From 3d29fedc85cd8450be0bccf96f9cc0e5321c7fc4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 21:58:06 -0800 Subject: [PATCH 0001/2301] Revert "[CIR] Upstream initial attribute support (#121069)" This reverts commit 8e329593313bb792592529ee825a52683108df99. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 11 -- clang/include/clang/CIR/Dialect/IR/CIRAttrs.h | 36 ---- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 142 --------------- .../include/clang/CIR/Dialect/IR/CIRDialect.h | 1 - clang/include/clang/CIR/Dialect/IR/CIROps.td | 54 +----- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 12 +- .../clang/CIR/Dialect/IR/CMakeLists.txt | 3 - clang/lib/CIR/CodeGen/CIRGenModule.cpp | 42 ----- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 172 +----------------- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 107 +---------- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 - clang/lib/CIR/Interfaces/CMakeLists.txt | 1 - clang/test/CIR/global-var-simple.cpp | 24 +-- 13 files changed, 23 insertions(+), 583 deletions(-) delete mode 100644 clang/include/clang/CIR/Dialect/IR/CIRAttrs.h delete mode 100644 clang/include/clang/CIR/Dialect/IR/CIRAttrs.td diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index b4a961de224a..0e414921324b 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -9,11 +9,7 @@ #ifndef LLVM_CLANG_CIR_DIALECT_BUILDER_CIRBASEBUILDER_H #define LLVM_CLANG_CIR_DIALECT_BUILDER_CIRBASEBUILDER_H -#include "clang/CIR/Dialect/IR/CIRAttrs.h" - #include "mlir/IR/Builders.h" -#include "mlir/IR/BuiltinTypes.h" -#include "mlir/IR/Types.h" namespace cir { @@ -30,13 +26,6 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { cir::PointerType getVoidPtrTy() { return getPointerTo(cir::VoidType::get(getContext())); } - - mlir::TypedAttr getConstPtrAttr(mlir::Type type, int64_t value) { - auto valueAttr = mlir::IntegerAttr::get( - mlir::IntegerType::get(type.getContext(), 64), value); - return cir::ConstPtrAttr::get( - getContext(), mlir::cast(type), valueAttr); - } }; } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h deleted file mode 100644 index 438fb7d09608..000000000000 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h +++ /dev/null @@ -1,36 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file declares the attributes in the CIR dialect. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRATTRS_H -#define LLVM_CLANG_CIR_DIALECT_IR_CIRATTRS_H - -#include "clang/CIR/Dialect/IR/CIRTypes.h" - -#include "mlir/IR/Attributes.h" -#include "mlir/IR/BuiltinAttributeInterfaces.h" - -#include "llvm/ADT/SmallVector.h" - -//===----------------------------------------------------------------------===// -// CIR Dialect Attrs -//===----------------------------------------------------------------------===// - -namespace clang { -class FunctionDecl; -class VarDecl; -class RecordDecl; -} // namespace clang - -#define GET_ATTRDEF_CLASSES -#include "clang/CIR/Dialect/IR/CIROpsAttributes.h.inc" - -#endif // LLVM_CLANG_CIR_DIALECT_IR_CIRATTRS_H diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td deleted file mode 100644 index bd1665e1ac1a..000000000000 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ /dev/null @@ -1,142 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file declares the CIR dialect attributes. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRATTRS_TD -#define LLVM_CLANG_CIR_DIALECT_IR_CIRATTRS_TD - -include "mlir/IR/BuiltinAttributeInterfaces.td" -include "mlir/IR/EnumAttr.td" - -include "clang/CIR/Dialect/IR/CIRDialect.td" - -//===----------------------------------------------------------------------===// -// CIR Attrs -//===----------------------------------------------------------------------===// - -class CIR_Attr traits = []> - : AttrDef { - let mnemonic = attrMnemonic; -} - -class CIRUnitAttr traits = []> - : CIR_Attr { - let returnType = "bool"; - let defaultValue = "false"; - let valueType = NoneType; - let isOptional = 1; -} - -//===----------------------------------------------------------------------===// -// IntegerAttr -//===----------------------------------------------------------------------===// - -def IntAttr : CIR_Attr<"Int", "int", [TypedAttrInterface]> { - let summary = "An attribute containing an integer value"; - let description = [{ - An integer attribute is a literal attribute that represents an integral - value of the specified integer type. - }]; - let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "llvm::APInt":$value); - let builders = [ - AttrBuilderWithInferredContext<(ins "mlir::Type":$type, - "const llvm::APInt &":$value), [{ - return $_get(type.getContext(), type, value); - }]>, - AttrBuilderWithInferredContext<(ins "mlir::Type":$type, - "int64_t":$value), [{ - IntType intType = mlir::cast(type); - mlir::APInt apValue(intType.getWidth(), value, intType.isSigned()); - return $_get(intType.getContext(), intType, apValue); - }]>, - ]; - let extraClassDeclaration = [{ - int64_t getSInt() const { return getValue().getSExtValue(); } - uint64_t getUInt() const { return getValue().getZExtValue(); } - bool isNullValue() const { return getValue() == 0; } - uint64_t getBitWidth() const { - return mlir::cast(getType()).getWidth(); - } - }]; - let genVerifyDecl = 1; - let hasCustomAssemblyFormat = 1; -} - -//===----------------------------------------------------------------------===// -// FPAttr -//===----------------------------------------------------------------------===// - -def FPAttr : CIR_Attr<"FP", "fp", [TypedAttrInterface]> { - let summary = "An attribute containing a floating-point value"; - let description = [{ - An fp attribute is a literal attribute that represents a floating-point - value of the specified floating-point type. Supporting only CIR FP types. - }]; - let parameters = (ins - AttributeSelfTypeParameter<"", "::cir::CIRFPTypeInterface">:$type, - APFloatParameter<"">:$value - ); - let builders = [ - AttrBuilderWithInferredContext<(ins "mlir::Type":$type, - "const llvm::APFloat &":$value), [{ - return $_get(type.getContext(), mlir::cast(type), - value); - }]>, - AttrBuilder<(ins "mlir::Type":$type, - "const llvm::APFloat &":$value), [{ - return $_get($_ctxt, mlir::cast(type), value); - }]>, - ]; - let extraClassDeclaration = [{ - static FPAttr getZero(mlir::Type type); - }]; - let genVerifyDecl = 1; - - let assemblyFormat = [{ - `<` custom($value, ref($type)) `>` - }]; -} - -//===----------------------------------------------------------------------===// -// ConstPtrAttr -//===----------------------------------------------------------------------===// - -def ConstPtrAttr : CIR_Attr<"ConstPtr", "ptr", [TypedAttrInterface]> { - let summary = "Holds a constant pointer value"; - let parameters = (ins - AttributeSelfTypeParameter<"", "::cir::PointerType">:$type, - "mlir::IntegerAttr":$value); - let description = [{ - A pointer attribute is a literal attribute that represents an integral - value of a pointer type. - }]; - let builders = [ - AttrBuilderWithInferredContext<(ins "mlir::Type":$type, - "mlir::IntegerAttr":$value), [{ - return $_get(type.getContext(), mlir::cast(type), - value); - }]>, - AttrBuilder<(ins "mlir::Type":$type, - "mlir::IntegerAttr":$value), [{ - return $_get($_ctxt, mlir::cast(type), value); - }]>, - ]; - let extraClassDeclaration = [{ - bool isNullValue() const { return getValue().getInt() == 0; } - }]; - - let assemblyFormat = [{ - `<` custom($value) `>` - }]; -} - -#endif // LLVM_CLANG_CIR_DIALECT_IR_CIRATTRS_TD diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index 683176b139ca..0b71bdad29a3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -26,7 +26,6 @@ #include "mlir/Interfaces/MemorySlotInterfaces.h" #include "mlir/Interfaces/SideEffectInterfaces.h" -#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIROpsDialect.h.inc" // TableGen'erated files for MLIR dialects require that a macro be defined when diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b15e0415360e..0d6c65ecf410 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -16,7 +16,6 @@ include "clang/CIR/Dialect/IR/CIRDialect.td" include "clang/CIR/Dialect/IR/CIRTypes.td" -include "clang/CIR/Dialect/IR/CIRAttrs.td" include "mlir/IR/BuiltinAttributeInterfaces.td" include "mlir/IR/EnumAttr.td" @@ -76,45 +75,6 @@ class LLVMLoweringInfo { class CIR_Op traits = []> : Op, LLVMLoweringInfo; -//===----------------------------------------------------------------------===// -// ConstantOp -//===----------------------------------------------------------------------===// - -def ConstantOp : CIR_Op<"const", - [ConstantLike, Pure, AllTypesMatch<["value", "res"]>]> { - let summary = "Defines a CIR constant"; - let description = [{ - The `cir.const` operation turns a literal into an SSA value. The data is - attached to the operation as an attribute. - - ```mlir - %0 = cir.const 42 : i32 - %1 = cir.const 4.2 : f32 - %2 = cir.const nullptr : !cir.ptr - ``` - }]; - - // The constant operation takes an attribute as the only input. - let arguments = (ins TypedAttrInterface:$value); - - // The constant operation returns a single value of CIR_AnyType. - let results = (outs CIR_AnyType:$res); - - let assemblyFormat = "attr-dict $value"; - - let hasVerifier = 1; - - let extraClassDeclaration = [{ - bool isNullPtr() { - if (const auto ptrAttr = mlir::dyn_cast(getValue())) - return ptrAttr.isNullValue(); - return false; - } - }]; - - let hasFolder = 1; -} - //===----------------------------------------------------------------------===// // GlobalOp //===----------------------------------------------------------------------===// @@ -132,19 +92,9 @@ def GlobalOp : CIR_Op<"global"> { described by the type of the variable. }]; - let arguments = (ins SymbolNameAttr:$sym_name, TypeAttr:$sym_type, - OptionalAttr:$initial_value); - - let assemblyFormat = [{ - $sym_name - custom($sym_type, $initial_value) - attr-dict - }]; + let arguments = (ins SymbolNameAttr:$sym_name, TypeAttr:$sym_type); - let extraClassDeclaration = [{ - bool isDeclaration() { return !getInitialValue(); } - bool hasInitializer() { return !isDeclaration(); } - }]; + let assemblyFormat = [{ $sym_name `:` $sym_type attr-dict }]; let skipDefaultBuilders = 1; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index a32fb3c80111..ef00b26c1fd9 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -220,8 +220,8 @@ def CIR_LongDouble : CIR_FloatType<"LongDouble", "long_double"> { // Constraints -def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_FP80, CIR_FP128, - CIR_LongDouble, CIR_FP16, CIR_BFloat16]>; +def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_FP80, CIR_FP128, CIR_LongDouble, + CIR_FP16, CIR_BFloat16]>; def CIR_AnyIntOrFloat: AnyTypeOf<[CIR_AnyFloat, CIR_IntType]>; //===----------------------------------------------------------------------===// @@ -350,12 +350,4 @@ def VoidPtr : Type< "cir::VoidType::get($_builder.getContext()))"> { } -//===----------------------------------------------------------------------===// -// Global type constraints -//===----------------------------------------------------------------------===// - -def CIR_AnyType : AnyTypeOf<[ - CIR_VoidType, CIR_IntType, CIR_AnyFloat, CIR_PointerType, CIR_FuncType -]>; - #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt index 1fdbc24ba6b4..28ae30dab8df 100644 --- a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt @@ -14,6 +14,3 @@ mlir_tablegen(CIROpsDialect.cpp.inc -gen-dialect-defs) add_public_tablegen_target(MLIRCIROpsIncGen) add_dependencies(mlir-headers MLIRCIROpsIncGen) -mlir_tablegen(CIROpsAttributes.h.inc -gen-attrdef-decls) -mlir_tablegen(CIROpsAttributes.cpp.inc -gen-attrdef-defs) -add_public_tablegen_target(MLIRCIRAttrsEnumsGen) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 2615ae382cb8..416d532028d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -115,48 +115,6 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd, if (clang::IdentifierInfo *identifier = vd->getIdentifier()) { auto varOp = builder.create(getLoc(vd->getSourceRange()), identifier->getName(), type); - // TODO(CIR): This code for processing initial values is a placeholder - // until class ConstantEmitter is upstreamed and the code for processing - // constant expressions is filled out. Only the most basic handling of - // certain constant expressions is implemented for now. - const VarDecl *initDecl; - const Expr *initExpr = vd->getAnyInitializer(initDecl); - if (initExpr) { - mlir::Attribute initializer; - if (APValue *value = initDecl->evaluateValue()) { - switch (value->getKind()) { - case APValue::Int: { - initializer = builder.getAttr(type, value->getInt()); - break; - } - case APValue::Float: { - initializer = builder.getAttr(type, value->getFloat()); - break; - } - case APValue::LValue: { - if (value->getLValueBase()) { - errorNYI(initExpr->getSourceRange(), - "non-null pointer initialization"); - } else { - if (auto ptrType = mlir::dyn_cast(type)) { - initializer = builder.getConstPtrAttr( - ptrType, value->getLValueOffset().getQuantity()); - } else { - llvm_unreachable( - "non-pointer variable initialized with a pointer"); - } - } - break; - } - default: - errorNYI(initExpr->getSourceRange(), "unsupported initializer kind"); - break; - } - } else { - errorNYI(initExpr->getSourceRange(), "non-constant initializer"); - } - varOp.setInitialValueAttr(initializer); - } theModule.push_back(varOp); } else { errorNYI(vd->getSourceRange().getBegin(), diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 8e8f7d5b7d7c..7d42da1ab20d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -12,24 +12,6 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "mlir/IR/DialectImplementation.h" -#include "llvm/ADT/TypeSwitch.h" - -static void printFloatLiteral(mlir::AsmPrinter &p, llvm::APFloat value, - mlir::Type ty); -static mlir::ParseResult -parseFloatLiteral(mlir::AsmParser &parser, - mlir::FailureOr &value, - cir::CIRFPTypeInterface fpType); - -static mlir::ParseResult parseConstPtr(mlir::AsmParser &parser, - mlir::IntegerAttr &value); - -static void printConstPtr(mlir::AsmPrinter &p, mlir::IntegerAttr value); - -#define GET_ATTRDEF_CLASSES -#include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" - using namespace mlir; using namespace cir; @@ -39,155 +21,12 @@ using namespace cir; Attribute CIRDialect::parseAttribute(DialectAsmParser &parser, Type type) const { - llvm::SMLoc typeLoc = parser.getCurrentLocation(); - llvm::StringRef mnemonic; - Attribute genAttr; - OptionalParseResult parseResult = - generatedAttributeParser(parser, &mnemonic, type, genAttr); - if (parseResult.has_value()) - return genAttr; - parser.emitError(typeLoc, "unknown attribute in CIR dialect"); - return Attribute(); + // No attributes yet to parse + return Attribute{}; } void CIRDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const { - if (failed(generatedAttributePrinter(attr, os))) - llvm_unreachable("unexpected CIR type kind"); -} - -//===----------------------------------------------------------------------===// -// ConstPtrAttr definitions -//===----------------------------------------------------------------------===// - -// TODO(CIR): Consider encoding the null value differently and use conditional -// assembly format instead of custom parsing/printing. -static ParseResult parseConstPtr(AsmParser &parser, mlir::IntegerAttr &value) { - - if (parser.parseOptionalKeyword("null").succeeded()) { - value = mlir::IntegerAttr::get( - mlir::IntegerType::get(parser.getContext(), 64), 0); - return success(); - } - - return parser.parseAttribute(value); -} - -static void printConstPtr(AsmPrinter &p, mlir::IntegerAttr value) { - if (!value.getInt()) - p << "null"; - else - p << value; -} - -//===----------------------------------------------------------------------===// -// IntAttr definitions -//===----------------------------------------------------------------------===// - -Attribute IntAttr::parse(AsmParser &parser, Type odsType) { - mlir::APInt apValue; - - if (!mlir::isa(odsType)) - return {}; - auto type = mlir::cast(odsType); - - // Consume the '<' symbol. - if (parser.parseLess()) - return {}; - - // Fetch arbitrary precision integer value. - if (type.isSigned()) { - int64_t value = 0; - if (parser.parseInteger(value)) { - parser.emitError(parser.getCurrentLocation(), "expected integer value"); - } else { - apValue = mlir::APInt(type.getWidth(), value, type.isSigned(), - /*implicitTrunc=*/true); - if (apValue.getSExtValue() != value) - parser.emitError(parser.getCurrentLocation(), - "integer value too large for the given type"); - } - } else { - uint64_t value = 0; - if (parser.parseInteger(value)) { - parser.emitError(parser.getCurrentLocation(), "expected integer value"); - } else { - apValue = mlir::APInt(type.getWidth(), value, type.isSigned(), - /*implicitTrunc=*/true); - if (apValue.getZExtValue() != value) - parser.emitError(parser.getCurrentLocation(), - "integer value too large for the given type"); - } - } - - // Consume the '>' symbol. - if (parser.parseGreater()) - return {}; - - return IntAttr::get(type, apValue); -} - -void IntAttr::print(AsmPrinter &printer) const { - auto type = mlir::cast(getType()); - printer << '<'; - if (type.isSigned()) - printer << getSInt(); - else - printer << getUInt(); - printer << '>'; -} - -LogicalResult IntAttr::verify(function_ref emitError, - Type type, APInt value) { - if (!mlir::isa(type)) { - emitError() << "expected 'simple.int' type"; - return failure(); - } - - auto intType = mlir::cast(type); - if (value.getBitWidth() != intType.getWidth()) { - emitError() << "type and value bitwidth mismatch: " << intType.getWidth() - << " != " << value.getBitWidth(); - return failure(); - } - - return success(); -} - -//===----------------------------------------------------------------------===// -// FPAttr definitions -//===----------------------------------------------------------------------===// - -static void printFloatLiteral(AsmPrinter &p, APFloat value, Type ty) { - p << value; -} - -static ParseResult parseFloatLiteral(AsmParser &parser, - FailureOr &value, - CIRFPTypeInterface fpType) { - - APFloat parsedValue(0.0); - if (parser.parseFloat(fpType.getFloatSemantics(), parsedValue)) - return failure(); - - value.emplace(parsedValue); - return success(); -} - -FPAttr FPAttr::getZero(Type type) { - return get(type, - APFloat::getZero( - mlir::cast(type).getFloatSemantics())); -} - -LogicalResult FPAttr::verify(function_ref emitError, - CIRFPTypeInterface fpType, APFloat value) { - if (APFloat::SemanticsToEnum(fpType.getFloatSemantics()) != - APFloat::SemanticsToEnum(value.getSemantics())) { - emitError() << "floating-point semantics mismatch"; - return failure(); - } - - return success(); + // No attributes yet to print } //===----------------------------------------------------------------------===// @@ -195,8 +34,5 @@ LogicalResult FPAttr::verify(function_ref emitError, //===----------------------------------------------------------------------===// void CIRDialect::registerAttributes() { - addAttributes< -#define GET_ATTRDEF_LIST -#include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" - >(); + // No attributes yet to register } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index f98d8b60f6ff..dbdca1f84016 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -12,8 +12,6 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "clang/CIR/Dialect/IR/CIRTypes.h" - #include "mlir/Support/LogicalResult.h" #include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc" @@ -34,73 +32,13 @@ void cir::CIRDialect::initialize() { >(); } -//===----------------------------------------------------------------------===// -// ConstantOp -//===----------------------------------------------------------------------===// - -static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, - mlir::Attribute attrType) { - if (isa(attrType)) { - if (!mlir::isa(opType)) - return op->emitOpError( - "pointer constant initializing a non-pointer type"); - return success(); - } - - if (mlir::isa(attrType)) { - auto at = cast(attrType); - if (at.getType() != opType) { - return op->emitOpError("result type (") - << opType << ") does not match value type (" << at.getType() - << ")"; - } - return success(); - } - - assert(isa(attrType) && "What else could we be looking at here?"); - return op->emitOpError("global with type ") - << cast(attrType).getType() << " not yet supported"; -} - -LogicalResult cir::ConstantOp::verify() { - // ODS already generates checks to make sure the result type is valid. We just - // need to additionally check that the value's attribute type is consistent - // with the result type. - return checkConstantTypes(getOperation(), getType(), getValue()); -} - -OpFoldResult cir::ConstantOp::fold(FoldAdaptor /*adaptor*/) { - return getValue(); -} - //===----------------------------------------------------------------------===// // GlobalOp //===----------------------------------------------------------------------===// -static ParseResult parseConstantValue(OpAsmParser &parser, - mlir::Attribute &valueAttr) { - NamedAttrList attr; - return parser.parseAttribute(valueAttr, "value", attr); -} - -static void printConstant(OpAsmPrinter &p, Attribute value) { - p.printAttribute(value); -} - -mlir::LogicalResult cir::GlobalOp::verify() { - // Verify that the initial value, if present, is either a unit attribute or - // an attribute CIR supports. - if (getInitialValue().has_value()) { - if (checkConstantTypes(getOperation(), getSymType(), *getInitialValue()) - .failed()) - return failure(); - } - - // TODO(CIR): Many other checks for properties that haven't been upstreamed - // yet. - - return success(); -} +// TODO(CIR): The properties of global variables that require verification +// haven't been implemented yet. +mlir::LogicalResult cir::GlobalOp::verify() { return success(); } void cir::GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, llvm::StringRef sym_name, mlir::Type sym_type) { @@ -110,45 +48,6 @@ void cir::GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, mlir::TypeAttr::get(sym_type)); } -static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, cir::GlobalOp op, - TypeAttr type, - Attribute initAttr) { - if (!op.isDeclaration()) { - p << "= "; - // This also prints the type... - if (initAttr) - printConstant(p, initAttr); - } else { - p << ": " << type; - } -} - -static ParseResult -parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, - Attribute &initialValueAttr) { - mlir::Type opTy; - if (parser.parseOptionalEqual().failed()) { - // Absence of equal means a declaration, so we need to parse the type. - // cir.global @a : !cir.int - if (parser.parseColonType(opTy)) - return failure(); - } else { - // Parse constant with initializer, examples: - // cir.global @y = #cir.fp<1.250000e+00> : !cir.double - // cir.global @rgb = #cir.const_array<[...] : !cir.array> - if (parseConstantValue(parser, initialValueAttr).failed()) - return failure(); - - assert(mlir::isa(initialValueAttr) && - "Non-typed attrs shouldn't appear here."); - auto typedAttr = mlir::cast(initialValueAttr); - opTy = typedAttr.getType(); - } - - typeAttr = TypeAttr::get(opTy); - return success(); -} - //===----------------------------------------------------------------------===// // FuncOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index baf8bff18522..df60f69df6fc 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -5,7 +5,6 @@ add_clang_library(MLIRCIR DEPENDS MLIRCIROpsIncGen - MLIRCIRAttrsEnumsGen LINK_LIBS PUBLIC MLIRIR diff --git a/clang/lib/CIR/Interfaces/CMakeLists.txt b/clang/lib/CIR/Interfaces/CMakeLists.txt index b826bf612cc3..fcd8b6963d06 100644 --- a/clang/lib/CIR/Interfaces/CMakeLists.txt +++ b/clang/lib/CIR/Interfaces/CMakeLists.txt @@ -5,7 +5,6 @@ add_clang_library(MLIRCIRInterfaces ${MLIR_MAIN_INCLUDE_DIR}/mlir/Interfaces DEPENDS - MLIRCIRAttrsEnumsGen MLIRCIRFPTypeInterfaceIncGen LINK_LIBS diff --git a/clang/test/CIR/global-var-simple.cpp b/clang/test/CIR/global-var-simple.cpp index ffcc3ef71a6c..bbd452655a01 100644 --- a/clang/test/CIR/global-var-simple.cpp +++ b/clang/test/CIR/global-var-simple.cpp @@ -13,11 +13,11 @@ unsigned char uc; short ss; // CHECK: cir.global @ss : !cir.int -unsigned short us = 100; -// CHECK: cir.global @us = #cir.int<100> : !cir.int +unsigned short us; +// CHECK: cir.global @us : !cir.int -int si = 42; -// CHECK: cir.global @si = #cir.int<42> : !cir.int +int si; +// CHECK: cir.global @si : !cir.int unsigned ui; // CHECK: cir.global @ui : !cir.int @@ -31,8 +31,8 @@ unsigned long ul; long long sll; // CHECK: cir.global @sll : !cir.int -unsigned long long ull = 123456; -// CHECK: cir.global @ull = #cir.int<123456> : !cir.int +unsigned long long ull; +// CHECK: cir.global @ull : !cir.int __int128 s128; // CHECK: cir.global @s128 : !cir.int @@ -67,8 +67,8 @@ __bf16 bf16; float f; // CHECK: cir.global @f : !cir.float -double d = 1.25; -// CHECK: cir.global @d = #cir.fp<1.250000e+00> : !cir.double +double d; +// CHECK: cir.global @d : !cir.double long double ld; // CHECK: cir.global @ld : !cir.long_double @@ -79,8 +79,8 @@ __float128 f128; void *vp; // CHECK: cir.global @vp : !cir.ptr -int *ip = 0; -// CHECK: cir.global @ip = #cir.ptr : !cir.ptr> +int *ip; +// CHECK: cir.global @ip : !cir.ptr> double *dp; // CHECK: cir.global @dp : !cir.ptr @@ -91,8 +91,8 @@ char **cpp; void (*fp)(); // CHECK: cir.global @fp : !cir.ptr> -int (*fpii)(int) = 0; -// CHECK: cir.global @fpii = #cir.ptr : !cir.ptr (!cir.int)>> +int (*fpii)(int); +// CHECK: cir.global @fpii : !cir.ptr (!cir.int)>> void (*fpvar)(int, ...); // CHECK: cir.global @fpvar : !cir.ptr, ...)>> From aa21de361cb8378931a4cf368efdd10bd53adace Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 21:58:13 -0800 Subject: [PATCH 0002/2301] Revert "[CIR] floating-point, pointer, and function types (#120484)" This reverts commit 8ae8a905855ca1b07a72059d8225ab1f9cae65dc. --- clang/include/clang/CIR/CMakeLists.txt | 1 - .../CIR/Dialect/Builder/CIRBaseBuilder.h | 8 - clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 7 - .../include/clang/CIR/Dialect/IR/CIRTypes.td | 221 -------------- .../clang/CIR/Interfaces/CIRFPTypeInterface.h | 22 -- .../CIR/Interfaces/CIRFPTypeInterface.td | 56 ---- .../clang/CIR/Interfaces/CMakeLists.txt | 14 - clang/lib/CIR/CMakeLists.txt | 1 - clang/lib/CIR/CodeGen/CIRGenBuilder.h | 12 - clang/lib/CIR/CodeGen/CIRGenModule.cpp | 7 - clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 11 - clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 132 --------- clang/lib/CIR/CodeGen/CIRGenTypes.h | 9 - clang/lib/CIR/CodeGen/CMakeLists.txt | 1 - clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 280 ------------------ clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 - .../lib/CIR/Interfaces/CIRFPTypeInterface.cpp | 18 -- clang/lib/CIR/Interfaces/CMakeLists.txt | 14 - clang/test/CIR/global-var-simple.cpp | 39 --- 19 files changed, 854 deletions(-) delete mode 100644 clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.h delete mode 100644 clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td delete mode 100644 clang/include/clang/CIR/Interfaces/CMakeLists.txt delete mode 100644 clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp delete mode 100644 clang/lib/CIR/Interfaces/CMakeLists.txt diff --git a/clang/include/clang/CIR/CMakeLists.txt b/clang/include/clang/CIR/CMakeLists.txt index e20c896171c9..f8d6f407a03d 100644 --- a/clang/include/clang/CIR/CMakeLists.txt +++ b/clang/include/clang/CIR/CMakeLists.txt @@ -4,4 +4,3 @@ include_directories(${MLIR_INCLUDE_DIR}) include_directories(${MLIR_TABLEGEN_OUTPUT_DIR}) add_subdirectory(Dialect) -add_subdirectory(Interfaces) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 0e414921324b..75ae74e926fb 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -18,14 +18,6 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { public: CIRBaseBuilderTy(mlir::MLIRContext &mlirContext) : mlir::OpBuilder(&mlirContext) {} - - cir::PointerType getPointerTo(mlir::Type ty) { - return cir::PointerType::get(getContext(), ty); - } - - cir::PointerType getVoidPtrTy() { - return getPointerTo(cir::VoidType::get(getContext())); - } }; } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 5d1eb17e146d..2bc7d77b2bc8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -16,13 +16,6 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Types.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" -#include "clang/CIR/Interfaces/CIRFPTypeInterface.h" - -namespace cir { - -bool isAnyFloatingPointType(mlir::Type t); - -} // namespace cir //===----------------------------------------------------------------------===// // CIR Dialect Tablegen'd Types diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index ef00b26c1fd9..ce0b6ba1d68c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -14,7 +14,6 @@ #define MLIR_CIR_DIALECT_CIR_TYPES include "clang/CIR/Dialect/IR/CIRDialect.td" -include "clang/CIR/Interfaces/CIRFPTypeInterface.td" include "mlir/Interfaces/DataLayoutInterfaces.td" include "mlir/IR/AttrTypeBase.td" @@ -130,224 +129,4 @@ def PrimitiveInt : AnyTypeOf<[UInt8, UInt16, UInt32, UInt64, SInt8, SInt16, SInt32, SInt64], "primitive int", "::cir::IntType">; -//===----------------------------------------------------------------------===// -// FloatType -//===----------------------------------------------------------------------===// - -class CIR_FloatType - : CIR_Type, - DeclareTypeInterfaceMethods, - ]> {} - -def CIR_Single : CIR_FloatType<"Single", "float"> { - let summary = "CIR single-precision 32-bit float type"; - let description = [{ - A 32-bit floating-point type whose format is IEEE-754 `binary32`. It - represents the types `float`, `_Float32`, and `std::float32_t` in C and C++. - }]; -} - -def CIR_Double : CIR_FloatType<"Double", "double"> { - let summary = "CIR double-precision 64-bit float type"; - let description = [{ - A 64-bit floating-point type whose format is IEEE-754 `binary64`. It - represents the types `double', '_Float64`, `std::float64_t`, and `_Float32x` - in C and C++. This is the underlying type for `long double` on some - platforms, including Windows. - }]; -} - -def CIR_FP16 : CIR_FloatType<"FP16", "f16"> { - let summary = "CIR half-precision 16-bit float type"; - let description = [{ - A 16-bit floating-point type whose format is IEEE-754 `binary16`. It - represents the types '_Float16` and `std::float16_t` in C and C++. - }]; -} - -def CIR_BFloat16 : CIR_FloatType<"BF16", "bf16"> { - let summary = "CIR bfloat16 16-bit float type"; - let description = [{ - A 16-bit floating-point type in the bfloat16 format, which is the same as - IEEE `binary32` except that the lower 16 bits of the mantissa are missing. - It represents the type `std::bfloat16_t` in C++, also spelled `__bf16` in - some implementations. - }]; -} - -def CIR_FP80 : CIR_FloatType<"FP80", "f80"> { - let summary = "CIR x87 80-bit float type"; - let description = [{ - An 80-bit floating-point type in the x87 extended precision format. The - size and alignment of the type are both 128 bits, even though only 80 of - those bits are used. This is the underlying type for `long double` on Linux - x86 platforms, and it is available as an extension in some implementations. - }]; -} - -def CIR_FP128 : CIR_FloatType<"FP128", "f128"> { - let summary = "CIR quad-precision 128-bit float type"; - let description = [{ - A 128-bit floating-point type whose format is IEEE-754 `binary128`. It - represents the types `_Float128` and `std::float128_t` in C and C++, and the - extension `__float128` in some implementations. This is the underlying type - for `long double` on some platforms including Linux Arm. - }]; -} - -def CIR_LongDouble : CIR_FloatType<"LongDouble", "long_double"> { - let summary = "CIR float type for `long double`"; - let description = [{ - A floating-point type that represents the `long double` type in C and C++. - - The underlying floating-point format of a `long double` value depends on the - target platform and the implementation. The `underlying` parameter specifies - the CIR floating-point type that corresponds to this format. Underlying - types of IEEE 64-bit, IEEE 128-bit, x87 80-bit, and IBM's double-double - format are all in use. - }]; - - let parameters = (ins "mlir::Type":$underlying); - - let assemblyFormat = [{ - `<` $underlying `>` - }]; - - let genVerifyDecl = 1; -} - -// Constraints - -def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_FP80, CIR_FP128, CIR_LongDouble, - CIR_FP16, CIR_BFloat16]>; -def CIR_AnyIntOrFloat: AnyTypeOf<[CIR_AnyFloat, CIR_IntType]>; - -//===----------------------------------------------------------------------===// -// PointerType -//===----------------------------------------------------------------------===// - -def CIR_PointerType : CIR_Type<"Pointer", "ptr", - [DeclareTypeInterfaceMethods]> { - - let summary = "CIR pointer type"; - let description = [{ - The `cir.ptr` type represents C and C++ pointer types and C++ reference - types, other than pointers-to-members. The `pointee` type is the type - pointed to. - - TODO(CIR): The address space attribute is not yet implemented. - }]; - - let parameters = (ins "mlir::Type":$pointee); - - let builders = [ - TypeBuilderWithInferredContext<(ins "mlir::Type":$pointee), [{ - return $_get(pointee.getContext(), pointee); - }]>, - TypeBuilder<(ins "mlir::Type":$pointee), [{ - return $_get($_ctxt, pointee); - }]> - ]; - - let assemblyFormat = [{ - `<` $pointee `>` - }]; - - let genVerifyDecl = 1; - - let skipDefaultBuilders = 1; - - let extraClassDeclaration = [{ - bool isVoidPtr() const { - return mlir::isa(getPointee()); - } - }]; -} - -//===----------------------------------------------------------------------===// -// FuncType -//===----------------------------------------------------------------------===// - -def CIR_FuncType : CIR_Type<"Func", "func"> { - let summary = "CIR function type"; - let description = [{ - The `!cir.func` is a function type. It consists of a single return type, a - list of parameter types and can optionally be variadic. - - Example: - - ```mlir - !cir.func - !cir.func - !cir.func - ``` - }]; - - let parameters = (ins ArrayRefParameter<"mlir::Type">:$inputs, - "mlir::Type":$returnType, "bool":$varArg); - let assemblyFormat = [{ - `<` $returnType ` ` `(` custom($inputs, $varArg) `>` - }]; - - let builders = [ - TypeBuilderWithInferredContext<(ins - "llvm::ArrayRef":$inputs, "mlir::Type":$returnType, - CArg<"bool", "false">:$isVarArg), [{ - return $_get(returnType.getContext(), inputs, returnType, isVarArg); - }]> - ]; - - let extraClassDeclaration = [{ - /// Returns whether the function is variadic. - bool isVarArg() const { return getVarArg(); } - - /// Returns the `i`th input operand type. Asserts if out of bounds. - mlir::Type getInput(unsigned i) const { return getInputs()[i]; } - - /// Returns the number of arguments to the function. - unsigned getNumInputs() const { return getInputs().size(); } - - /// Returns the result type of the function as an ArrayRef, enabling better - /// integration with generic MLIR utilities. - llvm::ArrayRef getReturnTypes() const; - - /// Returns whether the function is returns void. - bool isVoid() const; - - /// Returns a clone of this function type with the given argument - /// and result types. - FuncType clone(mlir::TypeRange inputs, mlir::TypeRange results) const; - }]; -} - -//===----------------------------------------------------------------------===// -// Void type -//===----------------------------------------------------------------------===// - -def CIR_VoidType : CIR_Type<"Void", "void"> { - let summary = "CIR void type"; - let description = [{ - The `!cir.void` type represents the C and C++ `void` type. - }]; - let extraClassDeclaration = [{ - std::string getAlias() const { return "void"; }; - }]; -} - -// Constraints - -// Pointer to void -def VoidPtr : Type< - And<[ - CPred<"::mlir::isa<::cir::PointerType>($_self)">, - CPred<"::mlir::isa<::cir::VoidType>(" - "::mlir::cast<::cir::PointerType>($_self).getPointee())">, - ]>, "void*">, - BuildableType< - "cir::PointerType::get($_builder.getContext()," - "cir::VoidType::get($_builder.getContext()))"> { -} - #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.h b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.h deleted file mode 100644 index 40b85ef6cfb6..000000000000 --- a/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.h +++ /dev/null @@ -1,22 +0,0 @@ -//===---------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===---------------------------------------------------------------------===// -// -// Defines the interface to generically handle CIR floating-point types. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_INCLUDE_CLANG_CIR_INTERFACES_CIRFPTYPEINTERFACE_H -#define LLVM_CLANG_INCLUDE_CLANG_CIR_INTERFACES_CIRFPTYPEINTERFACE_H - -#include "mlir/IR/Types.h" -#include "llvm/ADT/APFloat.h" - -/// Include the tablegen'd interface declarations. -#include "clang/CIR/Interfaces/CIRFPTypeInterface.h.inc" - -#endif // LLVM_CLANG_INCLUDE_CLANG_CIR_INTERFACES_CIRFPTYPEINTERFACE_H diff --git a/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td deleted file mode 100644 index 973851b61444..000000000000 --- a/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td +++ /dev/null @@ -1,56 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// Defines the interface to generically handle CIR floating-point types. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_INCLUDE_CLANG_CIR_INTERFACES_CIRFPTYPEINTERFACE_TD -#define LLVM_CLANG_INCLUDE_CLANG_CIR_INTERFACES_CIRFPTYPEINTERFACE_TD - -include "mlir/IR/OpBase.td" - -def CIRFPTypeInterface : TypeInterface<"CIRFPTypeInterface"> { - let description = [{ - Contains helper functions to query properties about a floating-point type. - }]; - let cppNamespace = "::cir"; - - let methods = [ - InterfaceMethod<[{ - Returns the bit width of this floating-point type. - }], - /*retTy=*/"unsigned", - /*methodName=*/"getWidth", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - return llvm::APFloat::semanticsSizeInBits($_type.getFloatSemantics()); - }] - >, - InterfaceMethod<[{ - Return the mantissa width. - }], - /*retTy=*/"unsigned", - /*methodName=*/"getFPMantissaWidth", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - return llvm::APFloat::semanticsPrecision($_type.getFloatSemantics()); - }] - >, - InterfaceMethod<[{ - Return the float semantics of this floating-point type. - }], - /*retTy=*/"const llvm::fltSemantics &", - /*methodName=*/"getFloatSemantics" - >, - ]; -} - -#endif // LLVM_CLANG_INCLUDE_CLANG_CIR_INTERFACES_CIRFPTYPEINTERFACE_TD diff --git a/clang/include/clang/CIR/Interfaces/CMakeLists.txt b/clang/include/clang/CIR/Interfaces/CMakeLists.txt deleted file mode 100644 index 1c90b6b5a23c..000000000000 --- a/clang/include/clang/CIR/Interfaces/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -# This replicates part of the add_mlir_interface cmake function from MLIR that -# cannot be used here. This happens because it expects to be run inside MLIR -# directory which is not the case for CIR (and also FIR, both have similar -# workarounds). - -function(add_clang_mlir_type_interface interface) - set(LLVM_TARGET_DEFINITIONS ${interface}.td) - mlir_tablegen(${interface}.h.inc -gen-type-interface-decls) - mlir_tablegen(${interface}.cpp.inc -gen-type-interface-defs) - add_public_tablegen_target(MLIR${interface}IncGen) - add_dependencies(mlir-generic-headers MLIR${interface}IncGen) -endfunction() - -add_clang_mlir_type_interface(CIRFPTypeInterface) diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index f3ef8525e15c..11cca734808d 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -4,4 +4,3 @@ include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) add_subdirectory(Dialect) add_subdirectory(CodeGen) add_subdirectory(FrontendAction) -add_subdirectory(Interfaces) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 01d56963883c..92115778518d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -21,18 +21,6 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { public: CIRGenBuilderTy(mlir::MLIRContext &mlirContext, const CIRGenTypeCache &tc) : CIRBaseBuilderTy(mlirContext), typeCache(tc) {} - - cir::LongDoubleType getLongDoubleTy(const llvm::fltSemantics &format) const { - if (&format == &llvm::APFloat::IEEEdouble()) - return cir::LongDoubleType::get(getContext(), typeCache.DoubleTy); - if (&format == &llvm::APFloat::x87DoubleExtended()) - return cir::LongDoubleType::get(getContext(), typeCache.FP80Ty); - if (&format == &llvm::APFloat::IEEEquad()) - return cir::LongDoubleType::get(getContext(), typeCache.FP128Ty); - if (&format == &llvm::APFloat::PPCDoubleDouble()) - llvm_unreachable("NYI: PPC double-double format for long double"); - llvm_unreachable("Unsupported format for long double"); - } }; } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 416d532028d0..0db24c3b41d1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -35,7 +35,6 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, diags(diags), target(astContext.getTargetInfo()), genTypes(*this) { // Initialize cached types - VoidTy = cir::VoidType::get(&getMLIRContext()); SInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true); SInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/true); SInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/true); @@ -46,12 +45,6 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, UInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false); UInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false); UInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/false); - FP16Ty = cir::FP16Type::get(&getMLIRContext()); - BFloat16Ty = cir::BF16Type::get(&getMLIRContext()); - FloatTy = cir::SingleType::get(&getMLIRContext()); - DoubleTy = cir::DoubleType::get(&getMLIRContext()); - FP80Ty = cir::FP80Type::get(&getMLIRContext()); - FP128Ty = cir::FP128Type::get(&getMLIRContext()); } mlir::Location CIRGenModule::getLoc(SourceLocation cLoc) { diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 99c0123c64b2..a357663c33e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -23,9 +23,6 @@ namespace clang::CIRGen { struct CIRGenTypeCache { CIRGenTypeCache() = default; - // ClangIR void type - cir::VoidType VoidTy; - // ClangIR signed integral types of common sizes cir::IntType SInt8Ty; cir::IntType SInt16Ty; @@ -39,14 +36,6 @@ struct CIRGenTypeCache { cir::IntType UInt32Ty; cir::IntType UInt64Ty; cir::IntType UInt128Ty; - - // ClangIR floating-point types with fixed formats - cir::FP16Type FP16Ty; - cir::BF16Type BFloat16Ty; - cir::SingleType FloatTy; - cir::DoubleType DoubleTy; - cir::FP80Type FP80Ty; - cir::FP128Type FP128Ty; }; } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 8519854556b1..181af1898baf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -4,9 +4,6 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/Type.h" -#include "clang/Basic/TargetInfo.h" - -#include using namespace clang; using namespace clang::CIRGen; @@ -21,70 +18,6 @@ mlir::MLIRContext &CIRGenTypes::getMLIRContext() const { return *builder.getContext(); } -/// Return true if the specified type in a function parameter or result position -/// can be converted to a CIR type at this point. This boils down to being -/// whether it is complete, as well as whether we've temporarily deferred -/// expanding the type because we're in a recursive context. -bool CIRGenTypes::isFuncParamTypeConvertible(clang::QualType type) { - // Some ABIs cannot have their member pointers represented in LLVM IR unless - // certain circumstances have been reached. - assert(!type->getAs() && "NYI"); - - // If this isn't a tag type, we can convert it. - const TagType *tagType = type->getAs(); - if (!tagType) - return true; - - // Function types involving incomplete class types are problematic in MLIR. - return !tagType->isIncompleteType(); -} - -/// Code to verify a given function type is complete, i.e. the return type and -/// all of the parameter types are complete. Also check to see if we are in a -/// RS_StructPointer context, and if so whether any struct types have been -/// pended. If so, we don't want to ask the ABI lowering code to handle a type -/// that cannot be converted to a CIR type. -bool CIRGenTypes::isFuncTypeConvertible(const FunctionType *ft) { - if (!isFuncParamTypeConvertible(ft->getReturnType())) - return false; - - if (const auto *fpt = dyn_cast(ft)) - for (unsigned i = 0, e = fpt->getNumParams(); i != e; i++) - if (!isFuncParamTypeConvertible(fpt->getParamType(i))) - return false; - - return true; -} - -mlir::Type CIRGenTypes::ConvertFunctionTypeInternal(QualType qft) { - assert(qft.isCanonical()); - const FunctionType *ft = cast(qft.getTypePtr()); - // First, check whether we can build the full fucntion type. If the function - // type depends on an incomplete type (e.g. a struct or enum), we cannot lower - // the function type. - if (!isFuncTypeConvertible(ft)) { - cgm.errorNYI(SourceLocation(), "function type involving an incomplete type", - qft); - return cir::FuncType::get(SmallVector{}, cgm.VoidTy); - } - - // TODO(CIR): This is a stub of what the final code will be. See the - // implementation of this function and the implementation of class - // CIRGenFunction in the ClangIR incubator project. - - if (const auto *fpt = dyn_cast(ft)) { - SmallVector mlirParamTypes; - for (unsigned i = 0; i < fpt->getNumParams(); ++i) { - mlirParamTypes.push_back(convertType(fpt->getParamType(i))); - } - return cir::FuncType::get( - mlirParamTypes, convertType(fpt->getReturnType().getUnqualifiedType()), - fpt->isVariadic()); - } - cgm.errorNYI(SourceLocation(), "non-prototype function type", qft); - return cir::FuncType::get(SmallVector{}, cgm.VoidTy); -} - mlir::Type CIRGenTypes::convertType(QualType type) { type = astContext.getCanonicalType(type); const Type *ty = type.getTypePtr(); @@ -101,12 +34,6 @@ mlir::Type CIRGenTypes::convertType(QualType type) { switch (ty->getTypeClass()) { case Type::Builtin: { switch (cast(ty)->getKind()) { - - // void - case BuiltinType::Void: - resultType = cgm.VoidTy; - break; - // Signed integral types. case BuiltinType::Char_S: case BuiltinType::Int: @@ -136,47 +63,6 @@ mlir::Type CIRGenTypes::convertType(QualType type) { cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty), /*isSigned=*/false); break; - - // Floating-point types - case BuiltinType::Float16: - resultType = cgm.FP16Ty; - break; - case BuiltinType::Half: - if (astContext.getLangOpts().NativeHalfType || - !astContext.getTargetInfo().useFP16ConversionIntrinsics()) { - resultType = cgm.FP16Ty; - } else { - cgm.errorNYI(SourceLocation(), "processing of built-in type", type); - resultType = cgm.SInt32Ty; - } - break; - case BuiltinType::BFloat16: - resultType = cgm.BFloat16Ty; - break; - case BuiltinType::Float: - assert(&astContext.getFloatTypeSemantics(type) == - &llvm::APFloat::IEEEsingle() && - "ClangIR NYI: 'float' in a format other than IEEE 32-bit"); - resultType = cgm.FloatTy; - break; - case BuiltinType::Double: - assert(&astContext.getFloatTypeSemantics(type) == - &llvm::APFloat::IEEEdouble() && - "ClangIR NYI: 'double' in a format other than IEEE 64-bit"); - resultType = cgm.DoubleTy; - break; - case BuiltinType::LongDouble: - resultType = - builder.getLongDoubleTy(astContext.getFloatTypeSemantics(type)); - break; - case BuiltinType::Float128: - resultType = cgm.FP128Ty; - break; - case BuiltinType::Ibm128: - cgm.errorNYI(SourceLocation(), "processing of built-in type", type); - resultType = cgm.SInt32Ty; - break; - default: cgm.errorNYI(SourceLocation(), "processing of built-in type", type); resultType = cgm.SInt32Ty; @@ -184,23 +70,6 @@ mlir::Type CIRGenTypes::convertType(QualType type) { } break; } - - case Type::Pointer: { - const PointerType *ptrTy = cast(ty); - QualType elemTy = ptrTy->getPointeeType(); - assert(!elemTy->isConstantMatrixType() && "not implemented"); - - mlir::Type pointeeType = convertType(elemTy); - - resultType = builder.getPointerTo(pointeeType); - break; - } - - case Type::FunctionNoProto: - case Type::FunctionProto: - resultType = ConvertFunctionTypeInternal(type); - break; - case Type::BitInt: { const auto *bitIntTy = cast(type); if (bitIntTy->getNumBits() > cir::IntType::maxBitwidth()) { @@ -212,7 +81,6 @@ mlir::Type CIRGenTypes::convertType(QualType type) { } break; } - default: cgm.errorNYI(SourceLocation(), "processing of type", type); resultType = cgm.SInt32Ty; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 71427e120002..563d7759831f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -19,7 +19,6 @@ namespace clang { class ASTContext; -class FunctionType; class QualType; class Type; } // namespace clang @@ -40,18 +39,10 @@ class CIRGenTypes { clang::ASTContext &astContext; CIRGenBuilderTy &builder; - /// Heper for ConvertType. - mlir::Type ConvertFunctionTypeInternal(clang::QualType ft); - public: CIRGenTypes(CIRGenModule &cgm); ~CIRGenTypes(); - /// Utility to check whether a function type can be converted to a CIR type - /// (i.e. doesn't depend on an incomplete tag type). - bool isFuncTypeConvertible(const clang::FunctionType *ft); - bool isFuncParamTypeConvertible(clang::QualType type); - /// This map of clang::Type to mlir::Type (which includes CIR type) is a /// cache of types that have already been processed. using TypeCacheTy = llvm::DenseMap; diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 782b814d75da..9ada31c11de9 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -21,5 +21,4 @@ add_clang_library(clangCIR clangLex ${dialect_libs} MLIRCIR - MLIRCIRInterfaces ) diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 48be11ba4e24..de38337057d3 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -16,16 +16,6 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "llvm/ADT/TypeSwitch.h" -//===----------------------------------------------------------------------===// -// CIR Custom Parser/Printer Signatures -//===----------------------------------------------------------------------===// - -static mlir::ParseResult -parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, - bool &isVarArg); -static void printFuncTypeArgs(mlir::AsmPrinter &p, - mlir::ArrayRef params, bool isVarArg); - //===----------------------------------------------------------------------===// // Get autogenerated stuff //===----------------------------------------------------------------------===// @@ -143,276 +133,6 @@ IntType::verify(llvm::function_ref emitError, return mlir::success(); } -//===----------------------------------------------------------------------===// -// Floating-point type definitions -//===----------------------------------------------------------------------===// - -const llvm::fltSemantics &SingleType::getFloatSemantics() const { - return llvm::APFloat::IEEEsingle(); -} - -llvm::TypeSize -SingleType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return llvm::TypeSize::getFixed(getWidth()); -} - -uint64_t -SingleType::getABIAlignment(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return (uint64_t)(getWidth() / 8); -} - -uint64_t -SingleType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - return (uint64_t)(getWidth() / 8); -} - -const llvm::fltSemantics &DoubleType::getFloatSemantics() const { - return llvm::APFloat::IEEEdouble(); -} - -llvm::TypeSize -DoubleType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return llvm::TypeSize::getFixed(getWidth()); -} - -uint64_t -DoubleType::getABIAlignment(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return (uint64_t)(getWidth() / 8); -} - -uint64_t -DoubleType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - return (uint64_t)(getWidth() / 8); -} - -const llvm::fltSemantics &FP16Type::getFloatSemantics() const { - return llvm::APFloat::IEEEhalf(); -} - -llvm::TypeSize -FP16Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return llvm::TypeSize::getFixed(getWidth()); -} - -uint64_t FP16Type::getABIAlignment(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return (uint64_t)(getWidth() / 8); -} - -uint64_t -FP16Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - return (uint64_t)(getWidth() / 8); -} - -const llvm::fltSemantics &BF16Type::getFloatSemantics() const { - return llvm::APFloat::BFloat(); -} - -llvm::TypeSize -BF16Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return llvm::TypeSize::getFixed(getWidth()); -} - -uint64_t BF16Type::getABIAlignment(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return (uint64_t)(getWidth() / 8); -} - -uint64_t -BF16Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - return (uint64_t)(getWidth() / 8); -} - -const llvm::fltSemantics &FP80Type::getFloatSemantics() const { - return llvm::APFloat::x87DoubleExtended(); -} - -llvm::TypeSize -FP80Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - // Though only 80 bits are used for the value, the type is 128 bits in size. - return llvm::TypeSize::getFixed(128); -} - -uint64_t FP80Type::getABIAlignment(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return 16; -} - -uint64_t -FP80Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - return 16; -} - -const llvm::fltSemantics &FP128Type::getFloatSemantics() const { - return llvm::APFloat::IEEEquad(); -} - -llvm::TypeSize -FP128Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return llvm::TypeSize::getFixed(getWidth()); -} - -uint64_t FP128Type::getABIAlignment(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return 16; -} - -uint64_t -FP128Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - return 16; -} - -const llvm::fltSemantics &LongDoubleType::getFloatSemantics() const { - return mlir::cast(getUnderlying()) - .getFloatSemantics(); -} - -llvm::TypeSize -LongDoubleType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return mlir::cast(getUnderlying()) - .getTypeSizeInBits(dataLayout, params); -} - -uint64_t -LongDoubleType::getABIAlignment(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return mlir::cast(getUnderlying()) - .getABIAlignment(dataLayout, params); -} - -uint64_t LongDoubleType::getPreferredAlignment( - const ::mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return mlir::cast(getUnderlying()) - .getPreferredAlignment(dataLayout, params); -} - -LogicalResult -LongDoubleType::verify(function_ref emitError, - mlir::Type underlying) { - if (!mlir::isa(underlying)) { - emitError() << "invalid underlying type for long double"; - return failure(); - } - - return success(); -} - -//===----------------------------------------------------------------------===// -// Floating-point type helpers -//===----------------------------------------------------------------------===// - -bool cir::isAnyFloatingPointType(mlir::Type t) { - return isa(t); -} - -//===----------------------------------------------------------------------===// -// FuncType Definitions -//===----------------------------------------------------------------------===// - -FuncType FuncType::clone(TypeRange inputs, TypeRange results) const { - assert(results.size() == 1 && "expected exactly one result type"); - return get(llvm::to_vector(inputs), results[0], isVarArg()); -} - -mlir::ParseResult parseFuncTypeArgs(mlir::AsmParser &p, - llvm::SmallVector ¶ms, - bool &isVarArg) { - isVarArg = false; - // `(` `)` - if (succeeded(p.parseOptionalRParen())) - return mlir::success(); - - // `(` `...` `)` - if (succeeded(p.parseOptionalEllipsis())) { - isVarArg = true; - return p.parseRParen(); - } - - // type (`,` type)* (`,` `...`)? - mlir::Type type; - if (p.parseType(type)) - return mlir::failure(); - params.push_back(type); - while (succeeded(p.parseOptionalComma())) { - if (succeeded(p.parseOptionalEllipsis())) { - isVarArg = true; - return p.parseRParen(); - } - if (p.parseType(type)) - return mlir::failure(); - params.push_back(type); - } - - return p.parseRParen(); -} - -void printFuncTypeArgs(mlir::AsmPrinter &p, mlir::ArrayRef params, - bool isVarArg) { - llvm::interleaveComma(params, p, - [&p](mlir::Type type) { p.printType(type); }); - if (isVarArg) { - if (!params.empty()) - p << ", "; - p << "..."; - } - p << ')'; -} - -llvm::ArrayRef FuncType::getReturnTypes() const { - return static_cast(getImpl())->returnType; -} - -bool FuncType::isVoid() const { return mlir::isa(getReturnType()); } - -//===----------------------------------------------------------------------===// -// PointerType Definitions -//===----------------------------------------------------------------------===// - -llvm::TypeSize -PointerType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - // FIXME: improve this in face of address spaces - return llvm::TypeSize::getFixed(64); -} - -uint64_t -PointerType::getABIAlignment(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - // FIXME: improve this in face of address spaces - return 8; -} - -uint64_t PointerType::getPreferredAlignment( - const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - // FIXME: improve this in face of address spaces - return 8; -} - -mlir::LogicalResult -PointerType::verify(llvm::function_ref emitError, - mlir::Type pointee) { - // TODO(CIR): Verification of the address space goes here. - return mlir::success(); -} - //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index df60f69df6fc..7ddc4ce50190 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -11,6 +11,5 @@ add_clang_library(MLIRCIR MLIRDLTIDialect MLIRDataLayoutInterfaces MLIRFuncDialect - MLIRCIRInterfaces clangAST ) diff --git a/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp b/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp deleted file mode 100644 index 41817e90b523..000000000000 --- a/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp +++ /dev/null @@ -1,18 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// Defines the interface to generically handle CIR floating-point types. -// -//===----------------------------------------------------------------------===// - -#include "clang/CIR/Interfaces/CIRFPTypeInterface.h" - -using namespace cir; - -/// Include the generated interfaces. -#include "clang/CIR/Interfaces/CIRFPTypeInterface.cpp.inc" diff --git a/clang/lib/CIR/Interfaces/CMakeLists.txt b/clang/lib/CIR/Interfaces/CMakeLists.txt deleted file mode 100644 index fcd8b6963d06..000000000000 --- a/clang/lib/CIR/Interfaces/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -add_clang_library(MLIRCIRInterfaces - CIRFPTypeInterface.cpp - - ADDITIONAL_HEADER_DIRS - ${MLIR_MAIN_INCLUDE_DIR}/mlir/Interfaces - - DEPENDS - MLIRCIRFPTypeInterfaceIncGen - - LINK_LIBS - ${dialect_libs} - MLIRIR - MLIRSupport - ) diff --git a/clang/test/CIR/global-var-simple.cpp b/clang/test/CIR/global-var-simple.cpp index bbd452655a01..5230ff53f87d 100644 --- a/clang/test/CIR/global-var-simple.cpp +++ b/clang/test/CIR/global-var-simple.cpp @@ -57,42 +57,3 @@ _BitInt(20) sb20; unsigned _BitInt(48) ub48; // CHECK: cir.global @ub48 : !cir.int - -_Float16 f16; -// CHECK: cir.global @f16 : !cir.f16 - -__bf16 bf16; -// CHECK: cir.global @bf16 : !cir.bf16 - -float f; -// CHECK: cir.global @f : !cir.float - -double d; -// CHECK: cir.global @d : !cir.double - -long double ld; -// CHECK: cir.global @ld : !cir.long_double - -__float128 f128; -// CHECK: cir.global @f128 : !cir.f128 - -void *vp; -// CHECK: cir.global @vp : !cir.ptr - -int *ip; -// CHECK: cir.global @ip : !cir.ptr> - -double *dp; -// CHECK: cir.global @dp : !cir.ptr - -char **cpp; -// CHECK: cir.global @cpp : !cir.ptr>> - -void (*fp)(); -// CHECK: cir.global @fp : !cir.ptr> - -int (*fpii)(int); -// CHECK: cir.global @fpii : !cir.ptr (!cir.int)>> - -void (*fpvar)(int, ...); -// CHECK: cir.global @fpvar : !cir.ptr, ...)>> From a44cc52ce3942607dda05cb0921bcc8c66e7581a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 21:58:19 -0800 Subject: [PATCH 0003/2301] Revert "[CIR] Cleanup: mlirContext and astContext (#119450)" This reverts commit 7eb73b95cb336cde14d5c755a09cd880bd3d5df9. --- clang/include/clang/CIR/CIRGenerator.h | 6 +++--- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 13 ++++++------- clang/lib/CIR/CodeGen/CIRGenModule.h | 6 +++--- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 14 ++++++-------- clang/lib/CIR/CodeGen/CIRGenTypes.h | 2 +- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 12 ++++++------ 6 files changed, 25 insertions(+), 28 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 414eba80b88b..c8ca7e4bfa72 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -37,14 +37,14 @@ namespace cir { class CIRGenerator : public clang::ASTConsumer { virtual void anchor(); clang::DiagnosticsEngine &diags; - clang::ASTContext *astContext; + clang::ASTContext *astCtx; // Only used for debug info. llvm::IntrusiveRefCntPtr fs; const clang::CodeGenOptions &codeGenOpts; protected: - std::unique_ptr mlirContext; + std::unique_ptr mlirCtx; std::unique_ptr cgm; public: @@ -52,7 +52,7 @@ class CIRGenerator : public clang::ASTConsumer { llvm::IntrusiveRefCntPtr fs, const clang::CodeGenOptions &cgo); ~CIRGenerator() override; - void Initialize(clang::ASTContext &astContext) override; + void Initialize(clang::ASTContext &astCtx) override; bool HandleTopLevelDecl(clang::DeclGroupRef group) override; mlir::ModuleOp getModule() const; }; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 0db24c3b41d1..e7c9512dcd3d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -25,14 +25,13 @@ using namespace clang; using namespace clang::CIRGen; -CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, - clang::ASTContext &astContext, +CIRGenModule::CIRGenModule(mlir::MLIRContext &context, + clang::ASTContext &astctx, const clang::CodeGenOptions &cgo, DiagnosticsEngine &diags) - : builder(mlirContext, *this), astContext(astContext), - langOpts(astContext.getLangOpts()), - theModule{mlir::ModuleOp::create(mlir::UnknownLoc::get(&mlirContext))}, - diags(diags), target(astContext.getTargetInfo()), genTypes(*this) { + : builder(context, *this), astCtx(astctx), langOpts(astctx.getLangOpts()), + theModule{mlir::ModuleOp::create(mlir::UnknownLoc::get(&context))}, + diags(diags), target(astctx.getTargetInfo()), genTypes(*this) { // Initialize cached types SInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true); @@ -49,7 +48,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, mlir::Location CIRGenModule::getLoc(SourceLocation cLoc) { assert(cLoc.isValid() && "expected valid source location"); - const SourceManager &sm = astContext.getSourceManager(); + const SourceManager &sm = astCtx.getSourceManager(); PresumedLoc pLoc = sm.getPresumedLoc(cLoc); StringRef filename = pLoc.getFilename(); return mlir::FileLineColLoc::get(builder.getStringAttr(filename), diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 1c7ed6377390..397e501fd4e8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -41,7 +41,7 @@ class CIRGenModule : public CIRGenTypeCache { CIRGenModule &operator=(CIRGenModule &) = delete; public: - CIRGenModule(mlir::MLIRContext &mlirContext, clang::ASTContext &astContext, + CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &cgo, clang::DiagnosticsEngine &diags); @@ -51,7 +51,7 @@ class CIRGenModule : public CIRGenTypeCache { CIRGenBuilderTy builder; /// Hold Clang AST information. - clang::ASTContext &astContext; + clang::ASTContext &astCtx; const clang::LangOptions &langOpts; @@ -67,7 +67,7 @@ class CIRGenModule : public CIRGenTypeCache { public: mlir::ModuleOp getModule() const { return theModule; } CIRGenBuilderTy &getBuilder() { return builder; } - clang::ASTContext &getASTContext() const { return astContext; } + clang::ASTContext &getASTContext() const { return astCtx; } CIRGenTypes &getTypes() { return genTypes; } mlir::MLIRContext &getMLIRContext() { return *builder.getContext(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 181af1898baf..e93bf93b1cb7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -9,7 +9,7 @@ using namespace clang; using namespace clang::CIRGen; CIRGenTypes::CIRGenTypes(CIRGenModule &genModule) - : cgm(genModule), astContext(genModule.getASTContext()), + : cgm(genModule), context(genModule.getASTContext()), builder(cgm.getBuilder()) {} CIRGenTypes::~CIRGenTypes() {} @@ -19,7 +19,7 @@ mlir::MLIRContext &CIRGenTypes::getMLIRContext() const { } mlir::Type CIRGenTypes::convertType(QualType type) { - type = astContext.getCanonicalType(type); + type = context.getCanonicalType(type); const Type *ty = type.getTypePtr(); // Has the type already been processed? @@ -43,9 +43,8 @@ mlir::Type CIRGenTypes::convertType(QualType type) { case BuiltinType::SChar: case BuiltinType::Short: case BuiltinType::WChar_S: - resultType = - cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty), - /*isSigned=*/true); + resultType = cir::IntType::get(&getMLIRContext(), context.getTypeSize(ty), + /*isSigned=*/true); break; // Unsigned integral types. case BuiltinType::Char8: @@ -59,9 +58,8 @@ mlir::Type CIRGenTypes::convertType(QualType type) { case BuiltinType::ULongLong: case BuiltinType::UShort: case BuiltinType::WChar_U: - resultType = - cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty), - /*isSigned=*/false); + resultType = cir::IntType::get(&getMLIRContext(), context.getTypeSize(ty), + /*isSigned=*/false); break; default: cgm.errorNYI(SourceLocation(), "processing of built-in type", type); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 563d7759831f..b5039b6d4a81 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -36,7 +36,7 @@ class CIRGenModule; /// AST types to CIR types. class CIRGenTypes { CIRGenModule &cgm; - clang::ASTContext &astContext; + clang::ASTContext &context; CIRGenBuilderTy &builder; public: diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 91070eda7d45..8f3370c0041a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -29,15 +29,15 @@ CIRGenerator::CIRGenerator(clang::DiagnosticsEngine &diags, : diags(diags), fs(std::move(vfs)), codeGenOpts{cgo} {} CIRGenerator::~CIRGenerator() = default; -void CIRGenerator::Initialize(ASTContext &astContext) { +void CIRGenerator::Initialize(ASTContext &astCtx) { using namespace llvm; - this->astContext = &astContext; + this->astCtx = &astCtx; - mlirContext = std::make_unique(); - mlirContext->loadDialect(); - cgm = std::make_unique( - *mlirContext.get(), astContext, codeGenOpts, diags); + mlirCtx = std::make_unique(); + mlirCtx->loadDialect(); + cgm = std::make_unique(*mlirCtx.get(), astCtx, + codeGenOpts, diags); } mlir::ModuleOp CIRGenerator::getModule() const { return cgm->getModule(); } From faab3792acd239991183991f3ada250351a25a71 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 21:58:35 -0800 Subject: [PATCH 0004/2301] Revert "[CIR] Infrastructure: class CIRGenBuilderTy; cache CIR types (#119037)" This reverts commit ffb19f4018e38ba7ff034b78914d5a8d2890a603. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 25 ------------- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 28 -------------- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 17 +-------- clang/lib/CIR/CodeGen/CIRGenModule.h | 8 ++-- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 16 -------- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 37 ++++++++----------- clang/lib/CIR/CodeGen/CIRGenTypes.h | 12 ------ 7 files changed, 22 insertions(+), 121 deletions(-) delete mode 100644 clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h delete mode 100644 clang/lib/CIR/CodeGen/CIRGenBuilder.h diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h deleted file mode 100644 index 75ae74e926fb..000000000000 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ /dev/null @@ -1,25 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_CIR_DIALECT_BUILDER_CIRBASEBUILDER_H -#define LLVM_CLANG_CIR_DIALECT_BUILDER_CIRBASEBUILDER_H - -#include "mlir/IR/Builders.h" - -namespace cir { - -class CIRBaseBuilderTy : public mlir::OpBuilder { - -public: - CIRBaseBuilderTy(mlir::MLIRContext &mlirContext) - : mlir::OpBuilder(&mlirContext) {} -}; - -} // namespace cir - -#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h deleted file mode 100644 index 92115778518d..000000000000 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ /dev/null @@ -1,28 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENBUILDER_H -#define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENBUILDER_H - -#include "CIRGenTypeCache.h" - -#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" - -namespace clang::CIRGen { - -class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { - const CIRGenTypeCache &typeCache; - -public: - CIRGenBuilderTy(mlir::MLIRContext &mlirContext, const CIRGenTypeCache &tc) - : CIRBaseBuilderTy(mlirContext), typeCache(tc) {} -}; - -} // namespace clang::CIRGen - -#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index e7c9512dcd3d..b44f66493254 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -29,22 +29,9 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &cgo, DiagnosticsEngine &diags) - : builder(context, *this), astCtx(astctx), langOpts(astctx.getLangOpts()), + : builder(&context), astCtx(astctx), langOpts(astctx.getLangOpts()), theModule{mlir::ModuleOp::create(mlir::UnknownLoc::get(&context))}, - diags(diags), target(astctx.getTargetInfo()), genTypes(*this) { - - // Initialize cached types - SInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true); - SInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/true); - SInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/true); - SInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/true); - SInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/true); - UInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/false); - UInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/false); - UInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false); - UInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false); - UInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/false); -} + diags(diags), target(astCtx.getTargetInfo()), genTypes(*this) {} mlir::Location CIRGenModule::getLoc(SourceLocation cLoc) { assert(cLoc.isValid() && "expected valid source location"); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 397e501fd4e8..7a84c942af49 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -13,7 +13,6 @@ #ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENMODULE_H #define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENMODULE_H -#include "CIRGenBuilder.h" #include "CIRGenTypeCache.h" #include "CIRGenTypes.h" @@ -48,7 +47,9 @@ class CIRGenModule : public CIRGenTypeCache { ~CIRGenModule() = default; private: - CIRGenBuilderTy builder; + // TODO(CIR) 'builder' will change to CIRGenBuilderTy once that type is + // defined + mlir::OpBuilder builder; /// Hold Clang AST information. clang::ASTContext &astCtx; @@ -66,10 +67,9 @@ class CIRGenModule : public CIRGenTypeCache { public: mlir::ModuleOp getModule() const { return theModule; } - CIRGenBuilderTy &getBuilder() { return builder; } + mlir::OpBuilder &getBuilder() { return builder; } clang::ASTContext &getASTContext() const { return astCtx; } CIRGenTypes &getTypes() { return genTypes; } - mlir::MLIRContext &getMLIRContext() { return *builder.getContext(); } /// Helpers to convert the presumed location of Clang's SourceLocation to an /// MLIR Location. diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index a357663c33e0..fde9a355f524 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -13,8 +13,6 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENTYPECACHE_H #define LLVM_CLANG_LIB_CIR_CIRGENTYPECACHE_H -#include "clang/CIR/Dialect/IR/CIRTypes.h" - namespace clang::CIRGen { /// This structure provides a set of types that are commonly used @@ -22,20 +20,6 @@ namespace clang::CIRGen { /// constructor and then copied around into new CIRGenFunction's. struct CIRGenTypeCache { CIRGenTypeCache() = default; - - // ClangIR signed integral types of common sizes - cir::IntType SInt8Ty; - cir::IntType SInt16Ty; - cir::IntType SInt32Ty; - cir::IntType SInt64Ty; - cir::IntType SInt128Ty; - - // ClangIR unsigned integral type of common sizes - cir::IntType UInt8Ty; - cir::IntType UInt16Ty; - cir::IntType UInt32Ty; - cir::IntType UInt64Ty; - cir::IntType UInt128Ty; }; } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index e93bf93b1cb7..e3fcbacf5f81 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -9,24 +9,14 @@ using namespace clang; using namespace clang::CIRGen; CIRGenTypes::CIRGenTypes(CIRGenModule &genModule) - : cgm(genModule), context(genModule.getASTContext()), - builder(cgm.getBuilder()) {} + : cgm(genModule), context(genModule.getASTContext()) {} CIRGenTypes::~CIRGenTypes() {} -mlir::MLIRContext &CIRGenTypes::getMLIRContext() const { - return *builder.getContext(); -} - mlir::Type CIRGenTypes::convertType(QualType type) { type = context.getCanonicalType(type); const Type *ty = type.getTypePtr(); - // Has the type already been processed? - TypeCacheTy::iterator tci = typeCache.find(ty); - if (tci != typeCache.end()) - return tci->second; - // For types that haven't been implemented yet or are otherwise unsupported, // report an error and return 'int'. @@ -34,7 +24,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) { switch (ty->getTypeClass()) { case Type::Builtin: { switch (cast(ty)->getKind()) { - // Signed integral types. + // Signed types. case BuiltinType::Char_S: case BuiltinType::Int: case BuiltinType::Int128: @@ -43,10 +33,11 @@ mlir::Type CIRGenTypes::convertType(QualType type) { case BuiltinType::SChar: case BuiltinType::Short: case BuiltinType::WChar_S: - resultType = cir::IntType::get(&getMLIRContext(), context.getTypeSize(ty), + resultType = cir::IntType::get(cgm.getBuilder().getContext(), + context.getTypeSize(ty), /*isSigned=*/true); break; - // Unsigned integral types. + // Unsigned types. case BuiltinType::Char8: case BuiltinType::Char16: case BuiltinType::Char32: @@ -58,12 +49,14 @@ mlir::Type CIRGenTypes::convertType(QualType type) { case BuiltinType::ULongLong: case BuiltinType::UShort: case BuiltinType::WChar_U: - resultType = cir::IntType::get(&getMLIRContext(), context.getTypeSize(ty), + resultType = cir::IntType::get(cgm.getBuilder().getContext(), + context.getTypeSize(ty), /*isSigned=*/false); break; default: cgm.errorNYI(SourceLocation(), "processing of built-in type", type); - resultType = cgm.SInt32Ty; + resultType = cir::IntType::get(cgm.getBuilder().getContext(), 32, + /*isSigned=*/true); break; } break; @@ -72,21 +65,23 @@ mlir::Type CIRGenTypes::convertType(QualType type) { const auto *bitIntTy = cast(type); if (bitIntTy->getNumBits() > cir::IntType::maxBitwidth()) { cgm.errorNYI(SourceLocation(), "large _BitInt type", type); - resultType = cgm.SInt32Ty; + resultType = cir::IntType::get(cgm.getBuilder().getContext(), 32, + /*isSigned=*/true); } else { - resultType = cir::IntType::get(&getMLIRContext(), bitIntTy->getNumBits(), - bitIntTy->isSigned()); + resultType = + cir::IntType::get(cgm.getBuilder().getContext(), + bitIntTy->getNumBits(), bitIntTy->isSigned()); } break; } default: cgm.errorNYI(SourceLocation(), "processing of type", type); - resultType = cgm.SInt32Ty; + resultType = + cir::IntType::get(cgm.getBuilder().getContext(), 32, /*isSigned=*/true); break; } assert(resultType && "Type conversion not yet implemented"); - typeCache[ty] = resultType; return resultType; } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index b5039b6d4a81..b37738c770de 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -15,12 +15,9 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "llvm/ADT/SmallPtrSet.h" - namespace clang { class ASTContext; class QualType; -class Type; } // namespace clang namespace mlir { @@ -29,7 +26,6 @@ class Type; namespace clang::CIRGen { -class CIRGenBuilderTy; class CIRGenModule; /// This class organizes the cross-module state that is used while lowering @@ -37,19 +33,11 @@ class CIRGenModule; class CIRGenTypes { CIRGenModule &cgm; clang::ASTContext &context; - CIRGenBuilderTy &builder; public: CIRGenTypes(CIRGenModule &cgm); ~CIRGenTypes(); - /// This map of clang::Type to mlir::Type (which includes CIR type) is a - /// cache of types that have already been processed. - using TypeCacheTy = llvm::DenseMap; - TypeCacheTy typeCache; - - mlir::MLIRContext &getMLIRContext() const; - /// Convert a Clang type into a mlir::Type. mlir::Type convertType(clang::QualType type); }; From 8f75c811069a551ecbb2f8036075d4f0cefd68c7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 21:58:43 -0800 Subject: [PATCH 0005/2301] Revert "[CIR] Integral types; simple global variables (#118743)" This reverts commit a43b2e13f9cc69ec7077ea9c74a972e178a2d8f7. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 35 +---- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 27 ---- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 132 ------------------ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 58 ++++---- clang/lib/CIR/CodeGen/CIRGenModule.h | 33 +---- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 87 ------------ clang/lib/CIR/CodeGen/CIRGenTypes.h | 47 ------- clang/lib/CIR/CodeGen/CMakeLists.txt | 1 - clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 18 --- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 120 +--------------- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 4 - clang/test/CIR/global-var-simple.cpp | 59 -------- 12 files changed, 38 insertions(+), 583 deletions(-) delete mode 100644 clang/include/clang/CIR/Dialect/IR/CIRTypes.h delete mode 100644 clang/include/clang/CIR/Dialect/IR/CIRTypes.td delete mode 100644 clang/lib/CIR/CodeGen/CIRGenTypes.cpp delete mode 100644 clang/lib/CIR/CodeGen/CIRGenTypes.h delete mode 100644 clang/test/CIR/global-var-simple.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 0d6c65ecf410..4462eb6fc00b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -15,7 +15,6 @@ #define LLVM_CLANG_CIR_DIALECT_IR_CIROPS include "clang/CIR/Dialect/IR/CIRDialect.td" -include "clang/CIR/Dialect/IR/CIRTypes.td" include "mlir/IR/BuiltinAttributeInterfaces.td" include "mlir/IR/EnumAttr.td" @@ -75,35 +74,6 @@ class LLVMLoweringInfo { class CIR_Op traits = []> : Op, LLVMLoweringInfo; -//===----------------------------------------------------------------------===// -// GlobalOp -//===----------------------------------------------------------------------===// - -// TODO(CIR): For starters, cir.global has only name and type. The other -// properties of a global variable will be added over time as more of ClangIR -// is upstreamed. - -def GlobalOp : CIR_Op<"global"> { - let summary = "Declare or define a global variable"; - let description = [{ - The `cir.global` operation declares or defines a named global variable. - - The backing memory for the variable is allocated statically and is - described by the type of the variable. - }]; - - let arguments = (ins SymbolNameAttr:$sym_name, TypeAttr:$sym_type); - - let assemblyFormat = [{ $sym_name `:` $sym_type attr-dict }]; - - let skipDefaultBuilders = 1; - - let builders = [OpBuilder<(ins "llvm::StringRef":$sym_name, - "mlir::Type":$sym_type)>]; - - let hasVerifier = 1; -} - //===----------------------------------------------------------------------===// // FuncOp //===----------------------------------------------------------------------===// @@ -115,15 +85,14 @@ def GlobalOp : CIR_Op<"global"> { def FuncOp : CIR_Op<"func"> { let summary = "Declare or define a function"; let description = [{ - The `cir.func` operation defines a function, similar to the `mlir::FuncOp` - built-in. + ... lots of text to be added later ... }]; let arguments = (ins SymbolNameAttr:$sym_name); let skipDefaultBuilders = 1; - let builders = [OpBuilder<(ins "llvm::StringRef":$sym_name)>]; + let builders = [OpBuilder<(ins "llvm::StringRef":$name)>]; let hasCustomAssemblyFormat = 1; let hasVerifier = 1; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h deleted file mode 100644 index 2bc7d77b2bc8..000000000000 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ /dev/null @@ -1,27 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file declares the types in the CIR dialect. -// -//===----------------------------------------------------------------------===// - -#ifndef MLIR_DIALECT_CIR_IR_CIRTYPES_H_ -#define MLIR_DIALECT_CIR_IR_CIRTYPES_H_ - -#include "mlir/IR/BuiltinAttributes.h" -#include "mlir/IR/Types.h" -#include "mlir/Interfaces/DataLayoutInterfaces.h" - -//===----------------------------------------------------------------------===// -// CIR Dialect Tablegen'd Types -//===----------------------------------------------------------------------===// - -#define GET_TYPEDEF_CLASSES -#include "clang/CIR/Dialect/IR/CIROpsTypes.h.inc" - -#endif // MLIR_DIALECT_CIR_IR_CIRTYPES_H_ diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td deleted file mode 100644 index ce0b6ba1d68c..000000000000 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ /dev/null @@ -1,132 +0,0 @@ -//===- CIRTypes.td - CIR dialect types ---------------------*- tablegen -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file declares the CIR dialect types. -// -//===----------------------------------------------------------------------===// - -#ifndef MLIR_CIR_DIALECT_CIR_TYPES -#define MLIR_CIR_DIALECT_CIR_TYPES - -include "clang/CIR/Dialect/IR/CIRDialect.td" -include "mlir/Interfaces/DataLayoutInterfaces.td" -include "mlir/IR/AttrTypeBase.td" - -//===----------------------------------------------------------------------===// -// CIR Types -//===----------------------------------------------------------------------===// - -class CIR_Type traits = [], - string baseCppClass = "::mlir::Type"> - : TypeDef { - let mnemonic = typeMnemonic; -} - -//===----------------------------------------------------------------------===// -// IntType -//===----------------------------------------------------------------------===// - -def CIR_IntType : CIR_Type<"Int", "int", - [DeclareTypeInterfaceMethods]> { - let summary = "Integer type with arbitrary precision up to a fixed limit"; - let description = [{ - CIR type that represents integer types with arbitrary precision, including - standard integral types such as `int` and `long`, extended integral types - such as `__int128`, and arbitrary width types such as `_BitInt(n)`. - - Those integer types that are directly available in C/C++ standard are called - primitive integer types. Said types are: `signed char`, `short`, `int`, - `long`, `long long`, and their unsigned variations. - }]; - let parameters = (ins "unsigned":$width, "bool":$isSigned); - let hasCustomAssemblyFormat = 1; - let extraClassDeclaration = [{ - /// Return true if this is a signed integer type. - bool isSigned() const { return getIsSigned(); } - /// Return true if this is an unsigned integer type. - bool isUnsigned() const { return !getIsSigned(); } - /// Return type alias. - std::string getAlias() const { - return (isSigned() ? 's' : 'u') + std::to_string(getWidth()) + 'i'; - } - /// Return true if this is a primitive integer type (i.e. signed or unsigned - /// integer types whose bit width is 8, 16, 32, or 64). - bool isPrimitive() const { - return isValidPrimitiveIntBitwidth(getWidth()); - } - bool isSignedPrimitive() const { - return isPrimitive() && isSigned(); - } - - /// Returns a minimum bitwidth of cir::IntType - static unsigned minBitwidth() { return 1; } - /// Returns a maximum bitwidth of cir::IntType - static unsigned maxBitwidth() { return 128; } - - /// Returns true if cir::IntType that represents a primitive integer type - /// can be constructed from the provided bitwidth. - static bool isValidPrimitiveIntBitwidth(unsigned width) { - return width == 8 || width == 16 || width == 32 || width == 64; - } - }]; - let genVerifyDecl = 1; -} - -// Constraints - -// Unsigned integer type of a specific width. -class UInt - : Type($_self)">, - CPred<"::mlir::cast<::cir::IntType>($_self).isUnsigned()">, - CPred<"::mlir::cast<::cir::IntType>($_self).getWidth() == " # width> - ]>, width # "-bit unsigned integer", "::cir::IntType">, - BuildableType< - "cir::IntType::get($_builder.getContext(), " - # width # ", /*isSigned=*/false)"> { - int bitwidth = width; -} - -def UInt1 : UInt<1>; -def UInt8 : UInt<8>; -def UInt16 : UInt<16>; -def UInt32 : UInt<32>; -def UInt64 : UInt<64>; - -// Signed integer type of a specific width. -class SInt - : Type($_self)">, - CPred<"::mlir::cast<::cir::IntType>($_self).isSigned()">, - CPred<"::mlir::cast<::cir::IntType>($_self).getWidth() == " # width> - ]>, width # "-bit signed integer", "::cir::IntType">, - BuildableType< - "cir::IntType::get($_builder.getContext(), " - # width # ", /*isSigned=*/true)"> { - int bitwidth = width; -} - -def SInt1 : SInt<1>; -def SInt8 : SInt<8>; -def SInt16 : SInt<16>; -def SInt32 : SInt<32>; -def SInt64 : SInt<64>; - -def PrimitiveUInt - : AnyTypeOf<[UInt8, UInt16, UInt32, UInt64], "primitive unsigned int", - "::cir::IntType">; - -def PrimitiveSInt - : AnyTypeOf<[SInt8, SInt16, SInt32, SInt64], "primitive signed int", - "::cir::IntType">; - -def PrimitiveInt - : AnyTypeOf<[UInt8, UInt16, UInt32, UInt64, SInt8, SInt16, SInt32, SInt64], - "primitive int", "::cir::IntType">; - -#endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index b44f66493254..5963d43bb967 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -31,7 +31,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, DiagnosticsEngine &diags) : builder(&context), astCtx(astctx), langOpts(astctx.getLangOpts()), theModule{mlir::ModuleOp::create(mlir::UnknownLoc::get(&context))}, - diags(diags), target(astCtx.getTargetInfo()), genTypes(*this) {} + diags(diags), target(astCtx.getTargetInfo()) {} mlir::Location CIRGenModule::getLoc(SourceLocation cLoc) { assert(cLoc.isValid() && "expected valid source location"); @@ -67,8 +67,7 @@ void CIRGenModule::emitGlobal(clang::GlobalDecl gd) { return; } } else { - assert(cast(global)->isFileVarDecl() && - "Cannot emit local var decl as global"); + errorNYI(global->getSourceRange(), "global variable declaration"); } // TODO(CIR): Defer emitting some global definitions until later @@ -78,27 +77,9 @@ void CIRGenModule::emitGlobal(clang::GlobalDecl gd) { void CIRGenModule::emitGlobalFunctionDefinition(clang::GlobalDecl gd, mlir::Operation *op) { auto const *funcDecl = cast(gd.getDecl()); - if (clang::IdentifierInfo *identifier = funcDecl->getIdentifier()) { - auto funcOp = builder.create( - getLoc(funcDecl->getSourceRange()), identifier->getName()); - theModule.push_back(funcOp); - } else { - errorNYI(funcDecl->getSourceRange().getBegin(), - "function definition with a non-identifier for a name"); - } -} - -void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd, - bool isTentative) { - mlir::Type type = getTypes().convertType(vd->getType()); - if (clang::IdentifierInfo *identifier = vd->getIdentifier()) { - auto varOp = builder.create(getLoc(vd->getSourceRange()), - identifier->getName(), type); - theModule.push_back(varOp); - } else { - errorNYI(vd->getSourceRange().getBegin(), - "variable definition with a non-identifier for a name"); - } + auto funcOp = builder.create( + getLoc(funcDecl->getSourceRange()), funcDecl->getIdentifier()->getName()); + theModule.push_back(funcOp); } void CIRGenModule::emitGlobalDefinition(clang::GlobalDecl gd, @@ -122,9 +103,6 @@ void CIRGenModule::emitGlobalDefinition(clang::GlobalDecl gd, return; } - if (const auto *vd = dyn_cast(decl)) - return emitGlobalVarDefinition(vd, !vd->hasDefinition()); - llvm_unreachable("Invalid argument to CIRGenModule::emitGlobalDefinition"); } @@ -148,15 +126,15 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) { emitGlobal(fd); break; } - - case Decl::Var: { - auto *vd = cast(decl); - emitGlobal(vd); - break; - } } } +DiagnosticBuilder CIRGenModule::errorNYI(llvm::StringRef feature) { + unsigned diagID = diags.getCustomDiagID( + DiagnosticsEngine::Error, "ClangIR code gen Not Yet Implemented: %0"); + return diags.Report(diagID) << feature; +} + DiagnosticBuilder CIRGenModule::errorNYI(SourceLocation loc, llvm::StringRef feature) { unsigned diagID = diags.getCustomDiagID( @@ -164,7 +142,21 @@ DiagnosticBuilder CIRGenModule::errorNYI(SourceLocation loc, return diags.Report(loc, diagID) << feature; } +DiagnosticBuilder CIRGenModule::errorNYI(SourceLocation loc, + llvm::StringRef feature, + llvm::StringRef name) { + unsigned diagID = diags.getCustomDiagID( + DiagnosticsEngine::Error, "ClangIR code gen Not Yet Implemented: %0: %1"); + return diags.Report(loc, diagID) << feature << name; +} + DiagnosticBuilder CIRGenModule::errorNYI(SourceRange loc, llvm::StringRef feature) { return errorNYI(loc.getBegin(), feature) << loc; } + +DiagnosticBuilder CIRGenModule::errorNYI(SourceRange loc, + llvm::StringRef feature, + llvm::StringRef name) { + return errorNYI(loc.getBegin(), feature, name) << loc; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 7a84c942af49..aaded92e6a63 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -14,22 +14,23 @@ #define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENMODULE_H #include "CIRGenTypeCache.h" -#include "CIRGenTypes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" -#include "clang/Basic/SourceManager.h" #include "llvm/ADT/StringRef.h" namespace clang { class ASTContext; class CodeGenOptions; class Decl; +class DiagnosticBuilder; +class DiagnosticsEngine; class GlobalDecl; class LangOptions; +class SourceLocation; +class SourceRange; class TargetInfo; -class VarDecl; namespace CIRGen { @@ -63,13 +64,8 @@ class CIRGenModule : public CIRGenTypeCache { const clang::TargetInfo ⌖ - CIRGenTypes genTypes; - public: mlir::ModuleOp getModule() const { return theModule; } - mlir::OpBuilder &getBuilder() { return builder; } - clang::ASTContext &getASTContext() const { return astCtx; } - CIRGenTypes &getTypes() { return genTypes; } /// Helpers to convert the presumed location of Clang's SourceLocation to an /// MLIR Location. @@ -85,28 +81,13 @@ class CIRGenModule : public CIRGenTypeCache { void emitGlobalDefinition(clang::GlobalDecl gd, mlir::Operation *op = nullptr); void emitGlobalFunctionDefinition(clang::GlobalDecl gd, mlir::Operation *op); - void emitGlobalVarDefinition(const clang::VarDecl *vd, - bool isTentative = false); /// Helpers to emit "not yet implemented" error diagnostics + DiagnosticBuilder errorNYI(llvm::StringRef); DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef); - - template - DiagnosticBuilder errorNYI(SourceLocation loc, llvm::StringRef feature, - const T &name) { - unsigned diagID = - diags.getCustomDiagID(DiagnosticsEngine::Error, - "ClangIR code gen Not Yet Implemented: %0: %1"); - return diags.Report(loc, diagID) << feature << name; - } - + DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef, llvm::StringRef); DiagnosticBuilder errorNYI(SourceRange, llvm::StringRef); - - template - DiagnosticBuilder errorNYI(SourceRange loc, llvm::StringRef feature, - const T &name) { - return errorNYI(loc.getBegin(), feature, name) << loc; - } + DiagnosticBuilder errorNYI(SourceRange, llvm::StringRef, llvm::StringRef); }; } // namespace CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp deleted file mode 100644 index e3fcbacf5f81..000000000000 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ /dev/null @@ -1,87 +0,0 @@ -#include "CIRGenTypes.h" - -#include "CIRGenModule.h" - -#include "clang/AST/ASTContext.h" -#include "clang/AST/Type.h" - -using namespace clang; -using namespace clang::CIRGen; - -CIRGenTypes::CIRGenTypes(CIRGenModule &genModule) - : cgm(genModule), context(genModule.getASTContext()) {} - -CIRGenTypes::~CIRGenTypes() {} - -mlir::Type CIRGenTypes::convertType(QualType type) { - type = context.getCanonicalType(type); - const Type *ty = type.getTypePtr(); - - // For types that haven't been implemented yet or are otherwise unsupported, - // report an error and return 'int'. - - mlir::Type resultType = nullptr; - switch (ty->getTypeClass()) { - case Type::Builtin: { - switch (cast(ty)->getKind()) { - // Signed types. - case BuiltinType::Char_S: - case BuiltinType::Int: - case BuiltinType::Int128: - case BuiltinType::Long: - case BuiltinType::LongLong: - case BuiltinType::SChar: - case BuiltinType::Short: - case BuiltinType::WChar_S: - resultType = cir::IntType::get(cgm.getBuilder().getContext(), - context.getTypeSize(ty), - /*isSigned=*/true); - break; - // Unsigned types. - case BuiltinType::Char8: - case BuiltinType::Char16: - case BuiltinType::Char32: - case BuiltinType::Char_U: - case BuiltinType::UChar: - case BuiltinType::UInt: - case BuiltinType::UInt128: - case BuiltinType::ULong: - case BuiltinType::ULongLong: - case BuiltinType::UShort: - case BuiltinType::WChar_U: - resultType = cir::IntType::get(cgm.getBuilder().getContext(), - context.getTypeSize(ty), - /*isSigned=*/false); - break; - default: - cgm.errorNYI(SourceLocation(), "processing of built-in type", type); - resultType = cir::IntType::get(cgm.getBuilder().getContext(), 32, - /*isSigned=*/true); - break; - } - break; - } - case Type::BitInt: { - const auto *bitIntTy = cast(type); - if (bitIntTy->getNumBits() > cir::IntType::maxBitwidth()) { - cgm.errorNYI(SourceLocation(), "large _BitInt type", type); - resultType = cir::IntType::get(cgm.getBuilder().getContext(), 32, - /*isSigned=*/true); - } else { - resultType = - cir::IntType::get(cgm.getBuilder().getContext(), - bitIntTy->getNumBits(), bitIntTy->isSigned()); - } - break; - } - default: - cgm.errorNYI(SourceLocation(), "processing of type", type); - resultType = - cir::IntType::get(cgm.getBuilder().getContext(), 32, /*isSigned=*/true); - break; - } - - assert(resultType && "Type conversion not yet implemented"); - - return resultType; -} diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h deleted file mode 100644 index b37738c770de..000000000000 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ /dev/null @@ -1,47 +0,0 @@ -//===--- CIRGenTypes.h - Type translation for CIR CodeGen -------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This is the code that handles AST -> CIR type lowering. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H -#define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H - -#include "clang/CIR/Dialect/IR/CIRTypes.h" - -namespace clang { -class ASTContext; -class QualType; -} // namespace clang - -namespace mlir { -class Type; -} - -namespace clang::CIRGen { - -class CIRGenModule; - -/// This class organizes the cross-module state that is used while lowering -/// AST types to CIR types. -class CIRGenTypes { - CIRGenModule &cgm; - clang::ASTContext &context; - -public: - CIRGenTypes(CIRGenModule &cgm); - ~CIRGenTypes(); - - /// Convert a Clang type into a mlir::Type. - mlir::Type convertType(clang::QualType type); -}; - -} // namespace clang::CIRGen - -#endif diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 9ada31c11de9..17a3aabfbd7f 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -9,7 +9,6 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR CIRGenerator.cpp CIRGenModule.cpp - CIRGenTypes.cpp DEPENDS MLIRCIR diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index dbdca1f84016..f666e5ab4b99 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -32,22 +32,6 @@ void cir::CIRDialect::initialize() { >(); } -//===----------------------------------------------------------------------===// -// GlobalOp -//===----------------------------------------------------------------------===// - -// TODO(CIR): The properties of global variables that require verification -// haven't been implemented yet. -mlir::LogicalResult cir::GlobalOp::verify() { return success(); } - -void cir::GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, - llvm::StringRef sym_name, mlir::Type sym_type) { - odsState.addAttribute(getSymNameAttrName(odsState.name), - odsBuilder.getStringAttr(sym_name)); - odsState.addAttribute(getSymTypeAttrName(odsState.name), - mlir::TypeAttr::get(sym_type)); -} - //===----------------------------------------------------------------------===// // FuncOp //===----------------------------------------------------------------------===// @@ -72,8 +56,6 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p.printSymbolName(getSymName()); } -// TODO(CIR): The properties of functions that require verification haven't -// been implemented yet. mlir::LogicalResult cir::FuncOp::verify() { return success(); } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index de38337057d3..4eeb70f06f5f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -10,18 +10,7 @@ // //===----------------------------------------------------------------------===// -#include "clang/CIR/Dialect/IR/CIRTypes.h" - -#include "mlir/IR/DialectImplementation.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "llvm/ADT/TypeSwitch.h" - -//===----------------------------------------------------------------------===// -// Get autogenerated stuff -//===----------------------------------------------------------------------===// - -#define GET_TYPEDEF_CLASSES -#include "clang/CIR/Dialect/IR/CIROpsTypes.cpp.inc" using namespace mlir; using namespace cir; @@ -31,106 +20,12 @@ using namespace cir; //===----------------------------------------------------------------------===// Type CIRDialect::parseType(DialectAsmParser &parser) const { - llvm::SMLoc typeLoc = parser.getCurrentLocation(); - llvm::StringRef mnemonic; - Type genType; - - // Try to parse as a tablegen'd type. - OptionalParseResult parseResult = - generatedTypeParser(parser, &mnemonic, genType); - if (parseResult.has_value()) - return genType; - - // TODO(CIR) Attempt to parse as a raw C++ type. - parser.emitError(typeLoc) << "unknown CIR type: " << mnemonic; - return Type(); + // No types yet to parse + return Type{}; } void CIRDialect::printType(Type type, DialectAsmPrinter &os) const { - // Try to print as a tablegen'd type. - if (generatedTypePrinter(type, os).succeeded()) - return; - - // TODO(CIR) Attempt to print as a raw C++ type. - llvm::report_fatal_error("printer is missing a handler for this type"); -} - -//===----------------------------------------------------------------------===// -// IntType Definitions -//===----------------------------------------------------------------------===// - -Type IntType::parse(mlir::AsmParser &parser) { - mlir::MLIRContext *context = parser.getBuilder().getContext(); - llvm::SMLoc loc = parser.getCurrentLocation(); - bool isSigned; - unsigned width; - - if (parser.parseLess()) - return {}; - - // Fetch integer sign. - llvm::StringRef sign; - if (parser.parseKeyword(&sign)) - return {}; - if (sign == "s") - isSigned = true; - else if (sign == "u") - isSigned = false; - else { - parser.emitError(loc, "expected 's' or 'u'"); - return {}; - } - - if (parser.parseComma()) - return {}; - - // Fetch integer size. - if (parser.parseInteger(width)) - return {}; - if (width < IntType::minBitwidth() || width > IntType::maxBitwidth()) { - parser.emitError(loc, "expected integer width to be from ") - << IntType::minBitwidth() << " up to " << IntType::maxBitwidth(); - return {}; - } - - if (parser.parseGreater()) - return {}; - - return IntType::get(context, width, isSigned); -} - -void IntType::print(mlir::AsmPrinter &printer) const { - char sign = isSigned() ? 's' : 'u'; - printer << '<' << sign << ", " << getWidth() << '>'; -} - -llvm::TypeSize -IntType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return llvm::TypeSize::getFixed(getWidth()); -} - -uint64_t IntType::getABIAlignment(const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { - return (uint64_t)(getWidth() / 8); -} - -uint64_t -IntType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - return (uint64_t)(getWidth() / 8); -} - -mlir::LogicalResult -IntType::verify(llvm::function_ref emitError, - unsigned width, bool isSigned) { - if (width < IntType::minBitwidth() || width > IntType::maxBitwidth()) { - emitError() << "IntType only supports widths from " - << IntType::minBitwidth() << " up to " - << IntType::maxBitwidth(); - return mlir::failure(); - } - return mlir::success(); + // No types yet to print } //===----------------------------------------------------------------------===// @@ -138,12 +33,5 @@ IntType::verify(llvm::function_ref emitError, //===----------------------------------------------------------------------===// void CIRDialect::registerTypes() { - // Register tablegen'd types. - addTypes< -#define GET_TYPEDEF_LIST -#include "clang/CIR/Dialect/IR/CIROpsTypes.cpp.inc" - >(); - - // Register raw C++ types. - // TODO(CIR) addTypes(); + // No types yet to register } diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 7ddc4ce50190..75cee3f87113 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -8,8 +8,4 @@ add_clang_library(MLIRCIR LINK_LIBS PUBLIC MLIRIR - MLIRDLTIDialect - MLIRDataLayoutInterfaces - MLIRFuncDialect - clangAST ) diff --git a/clang/test/CIR/global-var-simple.cpp b/clang/test/CIR/global-var-simple.cpp deleted file mode 100644 index 5230ff53f87d..000000000000 --- a/clang/test/CIR/global-var-simple.cpp +++ /dev/null @@ -1,59 +0,0 @@ -// Global variables of intergal types -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s - -char c; -// CHECK: cir.global @c : !cir.int - -signed char sc; -// CHECK: cir.global @sc : !cir.int - -unsigned char uc; -// CHECK: cir.global @uc : !cir.int - -short ss; -// CHECK: cir.global @ss : !cir.int - -unsigned short us; -// CHECK: cir.global @us : !cir.int - -int si; -// CHECK: cir.global @si : !cir.int - -unsigned ui; -// CHECK: cir.global @ui : !cir.int - -long sl; -// CHECK: cir.global @sl : !cir.int - -unsigned long ul; -// CHECK: cir.global @ul : !cir.int - -long long sll; -// CHECK: cir.global @sll : !cir.int - -unsigned long long ull; -// CHECK: cir.global @ull : !cir.int - -__int128 s128; -// CHECK: cir.global @s128 : !cir.int - -unsigned __int128 u128; -// CHECK: cir.global @u128 : !cir.int - -wchar_t wc; -// CHECK: cir.global @wc : !cir.int - -char8_t c8; -// CHECK: cir.global @c8 : !cir.int - -char16_t c16; -// CHECK: cir.global @c16 : !cir.int - -char32_t c32; -// CHECK: cir.global @c32 : !cir.int - -_BitInt(20) sb20; -// CHECK: cir.global @sb20 : !cir.int - -unsigned _BitInt(48) ub48; -// CHECK: cir.global @ub48 : !cir.int From 0e4601613897bb284e37a6d48fc63f66a2e23a8d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 21:58:52 -0800 Subject: [PATCH 0006/2301] Revert "[CIR] Fix warning in CIRGenAction.cpp (#118389)" This reverts commit eaa4eb281d4a0e34c7b4c2dac50cd151c3cb27e6. --- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 21b6bc56ed05..5a31e2070819 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -66,6 +66,9 @@ class CIRGenConsumer : public clang::ASTConsumer { MlirModule->print(*OutputStream, Flags); } break; + default: + llvm_unreachable("NYI: CIRGenAction other than EmitCIR"); + break; } } }; From c8424f63819bf4a2812cbfb501821e7aedd64d15 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 22:06:18 -0800 Subject: [PATCH 0007/2301] Revert "[clang][CIR] Fix missing dependency of MLIRCIR (#116221)" This reverts commit 56e56c9e6673cc17f4bc7cdb3a5dbffc1557b446. --- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 75cee3f87113..1518e8c76060 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -3,9 +3,6 @@ add_clang_library(MLIRCIR CIRDialect.cpp CIRTypes.cpp - DEPENDS - MLIRCIROpsIncGen - LINK_LIBS PUBLIC MLIRIR ) From 699714a4ec6e577d1272edc0e0cb77824960d695 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 22:06:25 -0800 Subject: [PATCH 0008/2301] Revert "[clang][CIR] Change buildX functions to emitX (#115568)" This reverts commit 1791b25f43f4e6a0b21284ce8076cfab160cb61a. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 20 ++++++++++---------- clang/lib/CIR/CodeGen/CIRGenModule.h | 10 +++++----- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 2 +- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 5963d43bb967..4e8a8cc3f4c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -50,7 +50,7 @@ mlir::Location CIRGenModule::getLoc(SourceRange cRange) { return mlir::FusedLoc::get({begin, end}, metadata, builder.getContext()); } -void CIRGenModule::emitGlobal(clang::GlobalDecl gd) { +void CIRGenModule::buildGlobal(clang::GlobalDecl gd) { const auto *global = cast(gd.getDecl()); if (const auto *fd = dyn_cast(global)) { @@ -71,19 +71,19 @@ void CIRGenModule::emitGlobal(clang::GlobalDecl gd) { } // TODO(CIR): Defer emitting some global definitions until later - emitGlobalDefinition(gd); + buildGlobalDefinition(gd); } -void CIRGenModule::emitGlobalFunctionDefinition(clang::GlobalDecl gd, - mlir::Operation *op) { +void CIRGenModule::buildGlobalFunctionDefinition(clang::GlobalDecl gd, + mlir::Operation *op) { auto const *funcDecl = cast(gd.getDecl()); auto funcOp = builder.create( getLoc(funcDecl->getSourceRange()), funcDecl->getIdentifier()->getName()); theModule.push_back(funcOp); } -void CIRGenModule::emitGlobalDefinition(clang::GlobalDecl gd, - mlir::Operation *op) { +void CIRGenModule::buildGlobalDefinition(clang::GlobalDecl gd, + mlir::Operation *op) { const auto *decl = cast(gd.getDecl()); if (const auto *fd = dyn_cast(decl)) { // TODO(CIR): Skip generation of CIR for functions with available_externally @@ -99,15 +99,15 @@ void CIRGenModule::emitGlobalDefinition(clang::GlobalDecl gd, if (fd->isMultiVersion()) errorNYI(fd->getSourceRange(), "multiversion functions"); - emitGlobalFunctionDefinition(gd, op); + buildGlobalFunctionDefinition(gd, op); return; } - llvm_unreachable("Invalid argument to CIRGenModule::emitGlobalDefinition"); + llvm_unreachable("Invalid argument to CIRGenModule::buildGlobalDefinition"); } // Emit code for a single top level declaration. -void CIRGenModule::emitTopLevelDecl(Decl *decl) { +void CIRGenModule::buildTopLevelDecl(Decl *decl) { // Ignore dependent declarations. if (decl->isTemplated()) @@ -123,7 +123,7 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) { auto *fd = cast(decl); // Consteval functions shouldn't be emitted. if (!fd->isConsteval()) - emitGlobal(fd); + buildGlobal(fd); break; } } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index aaded92e6a63..9e5950ff71c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -72,15 +72,15 @@ class CIRGenModule : public CIRGenTypeCache { mlir::Location getLoc(clang::SourceLocation cLoc); mlir::Location getLoc(clang::SourceRange cRange); - void emitTopLevelDecl(clang::Decl *decl); + void buildTopLevelDecl(clang::Decl *decl); /// Emit code for a single global function or variable declaration. Forward /// declarations are emitted lazily. - void emitGlobal(clang::GlobalDecl gd); + void buildGlobal(clang::GlobalDecl gd); - void emitGlobalDefinition(clang::GlobalDecl gd, - mlir::Operation *op = nullptr); - void emitGlobalFunctionDefinition(clang::GlobalDecl gd, mlir::Operation *op); + void buildGlobalDefinition(clang::GlobalDecl gd, + mlir::Operation *op = nullptr); + void buildGlobalFunctionDefinition(clang::GlobalDecl gd, mlir::Operation *op); /// Helpers to emit "not yet implemented" error diagnostics DiagnosticBuilder errorNYI(llvm::StringRef); diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 8f3370c0041a..85367a916ef7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -45,7 +45,7 @@ mlir::ModuleOp CIRGenerator::getModule() const { return cgm->getModule(); } bool CIRGenerator::HandleTopLevelDecl(DeclGroupRef group) { for (Decl *decl : group) - cgm->emitTopLevelDecl(decl); + cgm->buildTopLevelDecl(decl); return true; } From 28af588b9a5d1a2fe05141e94f27e9ec8e44ce19 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 22:06:38 -0800 Subject: [PATCH 0009/2301] Revert "[clang][CIR] Merge the mlir::cir namespace into cir (#115386)" This reverts commit c72389d4feef9eafc902f99c41f85ed218b5bedf. --- clang/include/clang/CIR/Dialect/IR/CIRDialect.td | 14 ++++++-------- clang/include/clang/CIR/Dialect/IR/CIROps.td | 8 ++++---- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 10 +++++----- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 2 +- .../lib/FrontendTool/ExecuteCompilerInvocation.cpp | 2 +- 8 files changed, 20 insertions(+), 22 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td index 305a06427ed0..69d6e9774942 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -22,7 +22,7 @@ def CIR_Dialect : Dialect { let summary = "A high-level dialect for analyzing and optimizing Clang " "supported languages"; - let cppNamespace = "::cir"; + let cppNamespace = "::mlir::cir"; let useDefaultAttributePrinterParser = 0; let useDefaultTypePrinterParser = 0; @@ -31,15 +31,13 @@ def CIR_Dialect : Dialect { void registerAttributes(); void registerTypes(); - mlir::Type parseType(mlir::DialectAsmParser &parser) const override; - void printType(mlir::Type type, - mlir::DialectAsmPrinter &printer) const override; + Type parseType(DialectAsmParser &parser) const override; + void printType(Type type, DialectAsmPrinter &printer) const override; - mlir::Attribute parseAttribute(mlir::DialectAsmParser &parser, - mlir::Type type) const override; + Attribute parseAttribute(DialectAsmParser &parser, + Type type) const override; - void printAttribute(mlir::Attribute attr, - mlir::DialectAsmPrinter &os) const override; + void printAttribute(Attribute attr, DialectAsmPrinter &os) const override; }]; } diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 4462eb6fc00b..c0440faa3c7b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -51,12 +51,12 @@ include "mlir/Interfaces/SideEffectInterfaces.td" // following: // // class CIRFooOpLowering -// : public mlir::OpConversionPattern { +// : public mlir::OpConversionPattern { // public: -// using OpConversionPattern::OpConversionPattern; +// using OpConversionPattern::OpConversionPattern; // // mlir::LogicalResult matchAndRewrite( -// cir::FooOp op, +// mlir::cir::FooOp op, // OpAdaptor adaptor, // mlir::ConversionPatternRewriter &rewriter) const override { // rewriter.replaceOpWithNewOp( @@ -92,7 +92,7 @@ def FuncOp : CIR_Op<"func"> { let skipDefaultBuilders = 1; - let builders = [OpBuilder<(ins "llvm::StringRef":$name)>]; + let builders = [OpBuilder<(ins "StringRef":$name)>]; let hasCustomAssemblyFormat = 1; let hasVerifier = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 4e8a8cc3f4c5..5a6fc27a130c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -77,7 +77,7 @@ void CIRGenModule::buildGlobal(clang::GlobalDecl gd) { void CIRGenModule::buildGlobalFunctionDefinition(clang::GlobalDecl gd, mlir::Operation *op) { auto const *funcDecl = cast(gd.getDecl()); - auto funcOp = builder.create( + auto funcOp = builder.create( getLoc(funcDecl->getSourceRange()), funcDecl->getIdentifier()->getName()); theModule.push_back(funcOp); } diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 85367a916ef7..825f78d32e76 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -35,7 +35,7 @@ void CIRGenerator::Initialize(ASTContext &astCtx) { this->astCtx = &astCtx; mlirCtx = std::make_unique(); - mlirCtx->loadDialect(); + mlirCtx->loadDialect(); cgm = std::make_unique(*mlirCtx.get(), astCtx, codeGenOpts, diags); } diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 7d42da1ab20d..6d74d72b77dc 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -13,7 +13,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" using namespace mlir; -using namespace cir; +using namespace mlir::cir; //===----------------------------------------------------------------------===// // General CIR parsing / printing diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index f666e5ab4b99..e0b38a2902bd 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -17,13 +17,13 @@ #include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc" using namespace mlir; -using namespace cir; +using namespace mlir::cir; //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// -void cir::CIRDialect::initialize() { +void mlir::cir::CIRDialect::initialize() { registerTypes(); registerAttributes(); addOperations< @@ -36,8 +36,8 @@ void cir::CIRDialect::initialize() { // FuncOp //===----------------------------------------------------------------------===// -void cir::FuncOp::build(OpBuilder &builder, OperationState &result, - StringRef name) { +void mlir::cir::FuncOp::build(OpBuilder &builder, OperationState &result, + StringRef name) { result.addAttribute(SymbolTable::getSymbolAttrName(), builder.getStringAttr(name)); } @@ -56,7 +56,7 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p.printSymbolName(getSymName()); } -mlir::LogicalResult cir::FuncOp::verify() { return success(); } +mlir::LogicalResult mlir::cir::FuncOp::verify() { return success(); } //===----------------------------------------------------------------------===// // TableGen'd op method definitions diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 4eeb70f06f5f..167c237ae551 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -13,7 +13,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" using namespace mlir; -using namespace cir; +using namespace mlir::cir; //===----------------------------------------------------------------------===// // General CIR parsing / printing diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 3f95a1efb2ee..60fde03289cf 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -67,7 +67,7 @@ CreateFrontendBaseAction(CompilerInstance &CI) { case EmitBC: return std::make_unique(); case EmitCIR: #if CLANG_ENABLE_CIR - return std::make_unique(); + return std::make_unique<::cir::EmitCIRAction>(); #else llvm_unreachable("CIR suppport not built into clang"); #endif From 220471e6064986970c3c5dd25298329670107270 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 22:06:46 -0800 Subject: [PATCH 0010/2301] Revert "[clang][CIR] Move CIRGen types into clang::CIRGen (#115385)" This reverts commit 40e545098e8bb5a18988316331e46c4557378afa. --- clang/include/clang/CIR/CIRGenerator.h | 7 +++---- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 +--- clang/lib/CIR/CodeGen/CIRGenModule.h | 8 ++++---- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 4 ++-- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 4 ++-- 5 files changed, 12 insertions(+), 15 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index c8ca7e4bfa72..aa1a7e64459b 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -25,15 +25,14 @@ namespace clang { class DeclGroupRef; class DiagnosticsEngine; -namespace CIRGen { -class CIRGenModule; -} // namespace CIRGen } // namespace clang namespace mlir { class MLIRContext; } // namespace mlir namespace cir { +class CIRGenModule; + class CIRGenerator : public clang::ASTConsumer { virtual void anchor(); clang::DiagnosticsEngine &diags; @@ -45,7 +44,7 @@ class CIRGenerator : public clang::ASTConsumer { protected: std::unique_ptr mlirCtx; - std::unique_ptr cgm; + std::unique_ptr cgm; public: CIRGenerator(clang::DiagnosticsEngine &diags, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 5a6fc27a130c..c1adc7ecbf74 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -22,9 +22,7 @@ #include "mlir/IR/Location.h" #include "mlir/IR/MLIRContext.h" -using namespace clang; -using namespace clang::CIRGen; - +using namespace cir; CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &cgo, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 9e5950ff71c5..2bf6a5d9c8f5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -31,8 +31,10 @@ class LangOptions; class SourceLocation; class SourceRange; class TargetInfo; +} // namespace clang -namespace CIRGen { +using namespace clang; +namespace cir { /// This class organizes the cross-function state that is used while generating /// CIR code. @@ -89,8 +91,6 @@ class CIRGenModule : public CIRGenTypeCache { DiagnosticBuilder errorNYI(SourceRange, llvm::StringRef); DiagnosticBuilder errorNYI(SourceRange, llvm::StringRef, llvm::StringRef); }; -} // namespace CIRGen - -} // namespace clang +} // namespace cir #endif // LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENMODULE_H diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index fde9a355f524..6478e0a07809 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -13,7 +13,7 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENTYPECACHE_H #define LLVM_CLANG_LIB_CIR_CIRGENTYPECACHE_H -namespace clang::CIRGen { +namespace cir { /// This structure provides a set of types that are commonly used /// during IR emission. It's initialized once in CodeGenModule's @@ -22,6 +22,6 @@ struct CIRGenTypeCache { CIRGenTypeCache() = default; }; -} // namespace clang::CIRGen +} // namespace cir #endif // LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENTYPECACHE_H diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 825f78d32e76..152124a00b2b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -36,8 +36,8 @@ void CIRGenerator::Initialize(ASTContext &astCtx) { mlirCtx = std::make_unique(); mlirCtx->loadDialect(); - cgm = std::make_unique(*mlirCtx.get(), astCtx, - codeGenOpts, diags); + cgm = std::make_unique(*mlirCtx.get(), astCtx, codeGenOpts, + diags); } mlir::ModuleOp CIRGenerator::getModule() const { return cgm->getModule(); } From f38305140a4e21a85f9df5598d92448ee6c684b8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 22:06:52 -0800 Subject: [PATCH 0011/2301] Revert "[CIR] Call code gen; create empty cir.func op (#113483)" This reverts commit c695a32576525b047f92b90de71eb707c152e29c. --- clang/include/clang/CIR/CIRGenerator.h | 1 - .../include/clang/CIR/Dialect/IR/CIRDialect.h | 21 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 82 ----------- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 136 +----------------- clang/lib/CIR/CodeGen/CIRGenModule.h | 34 ----- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 10 +- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 38 ----- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 55 +------ clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 37 ----- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 5 - clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 41 +----- clang/lib/Driver/ToolChains/Clang.cpp | 2 - clang/test/CIR/hello.c | 6 +- 13 files changed, 10 insertions(+), 458 deletions(-) delete mode 100644 clang/lib/CIR/Dialect/IR/CIRAttrs.cpp delete mode 100644 clang/lib/CIR/Dialect/IR/CIRTypes.cpp diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index aa1a7e64459b..9a8930ac46ea 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -53,7 +53,6 @@ class CIRGenerator : public clang::ASTConsumer { ~CIRGenerator() override; void Initialize(clang::ASTContext &astCtx) override; bool HandleTopLevelDecl(clang::DeclGroupRef group) override; - mlir::ModuleOp getModule() const; }; } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index 0b71bdad29a3..d53e5d1663d6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -13,25 +13,4 @@ #ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H #define LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H -#include "mlir/IR/Builders.h" -#include "mlir/IR/BuiltinOps.h" -#include "mlir/IR/BuiltinTypes.h" -#include "mlir/IR/Dialect.h" -#include "mlir/IR/OpDefinition.h" -#include "mlir/Interfaces/CallInterfaces.h" -#include "mlir/Interfaces/ControlFlowInterfaces.h" -#include "mlir/Interfaces/FunctionInterfaces.h" -#include "mlir/Interfaces/InferTypeOpInterface.h" -#include "mlir/Interfaces/LoopLikeInterface.h" -#include "mlir/Interfaces/MemorySlotInterfaces.h" -#include "mlir/Interfaces/SideEffectInterfaces.h" - -#include "clang/CIR/Dialect/IR/CIROpsDialect.h.inc" - -// TableGen'erated files for MLIR dialects require that a macro be defined when -// they are included. GET_OP_CLASSES tells the file to define the classes for -// the operations of that dialect. -#define GET_OP_CLASSES -#include "clang/CIR/Dialect/IR/CIROps.h.inc" - #endif // LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index c0440faa3c7b..7311c8db783e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -16,86 +16,4 @@ include "clang/CIR/Dialect/IR/CIRDialect.td" -include "mlir/IR/BuiltinAttributeInterfaces.td" -include "mlir/IR/EnumAttr.td" -include "mlir/IR/SymbolInterfaces.td" -include "mlir/IR/CommonAttrConstraints.td" -include "mlir/Interfaces/ControlFlowInterfaces.td" -include "mlir/Interfaces/FunctionInterfaces.td" -include "mlir/Interfaces/InferTypeOpInterface.td" -include "mlir/Interfaces/LoopLikeInterface.td" -include "mlir/Interfaces/MemorySlotInterfaces.td" -include "mlir/Interfaces/SideEffectInterfaces.td" - -//===----------------------------------------------------------------------===// -// CIR Ops -//===----------------------------------------------------------------------===// - -// LLVMLoweringInfo is used by cir-tablegen to generate LLVM lowering logic -// automatically for CIR operations. The `llvmOp` field gives the name of the -// LLVM IR dialect operation that the CIR operation will be lowered to. The -// input arguments of the CIR operation will be passed in the same order to the -// lowered LLVM IR operation. -// -// Example: -// -// For the following CIR operation definition: -// -// def FooOp : CIR_Op<"foo"> { -// // ... -// let arguments = (ins CIR_AnyType:$arg1, CIR_AnyType:$arg2); -// let llvmOp = "BarOp"; -// } -// -// cir-tablegen will generate LLVM lowering code for the FooOp similar to the -// following: -// -// class CIRFooOpLowering -// : public mlir::OpConversionPattern { -// public: -// using OpConversionPattern::OpConversionPattern; -// -// mlir::LogicalResult matchAndRewrite( -// mlir::cir::FooOp op, -// OpAdaptor adaptor, -// mlir::ConversionPatternRewriter &rewriter) const override { -// rewriter.replaceOpWithNewOp( -// op, adaptor.getOperands()[0], adaptor.getOperands()[1]); -// return mlir::success(); -// } -// } -// -// If you want fully customized LLVM IR lowering logic, simply exclude the -// `llvmOp` field from your CIR operation definition. -class LLVMLoweringInfo { - string llvmOp = ""; -} - -class CIR_Op traits = []> : - Op, LLVMLoweringInfo; - -//===----------------------------------------------------------------------===// -// FuncOp -//===----------------------------------------------------------------------===// - -// TODO(CIR): For starters, cir.func has only name, nothing else. The other -// properties of a function will be added over time as more of ClangIR is -// upstreamed. - -def FuncOp : CIR_Op<"func"> { - let summary = "Declare or define a function"; - let description = [{ - ... lots of text to be added later ... - }]; - - let arguments = (ins SymbolNameAttr:$sym_name); - - let skipDefaultBuilders = 1; - - let builders = [OpBuilder<(ins "StringRef":$name)>]; - - let hasCustomAssemblyFormat = 1; - let hasVerifier = 1; -} - #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c1adc7ecbf74..95e62326939f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -14,9 +14,6 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/DeclBase.h" -#include "clang/AST/GlobalDecl.h" -#include "clang/Basic/SourceManager.h" -#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/Location.h" @@ -27,134 +24,9 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &cgo, DiagnosticsEngine &diags) - : builder(&context), astCtx(astctx), langOpts(astctx.getLangOpts()), - theModule{mlir::ModuleOp::create(mlir::UnknownLoc::get(&context))}, - diags(diags), target(astCtx.getTargetInfo()) {} - -mlir::Location CIRGenModule::getLoc(SourceLocation cLoc) { - assert(cLoc.isValid() && "expected valid source location"); - const SourceManager &sm = astCtx.getSourceManager(); - PresumedLoc pLoc = sm.getPresumedLoc(cLoc); - StringRef filename = pLoc.getFilename(); - return mlir::FileLineColLoc::get(builder.getStringAttr(filename), - pLoc.getLine(), pLoc.getColumn()); -} - -mlir::Location CIRGenModule::getLoc(SourceRange cRange) { - assert(cRange.isValid() && "expected a valid source range"); - mlir::Location begin = getLoc(cRange.getBegin()); - mlir::Location end = getLoc(cRange.getEnd()); - mlir::Attribute metadata; - return mlir::FusedLoc::get({begin, end}, metadata, builder.getContext()); -} - -void CIRGenModule::buildGlobal(clang::GlobalDecl gd) { - const auto *global = cast(gd.getDecl()); - - if (const auto *fd = dyn_cast(global)) { - // Update deferred annotations with the latest declaration if the function - // was already used or defined. - if (fd->hasAttr()) - errorNYI(fd->getSourceRange(), "deferredAnnotations"); - if (!fd->doesThisDeclarationHaveABody()) { - if (!fd->doesDeclarationForceExternallyVisibleDefinition()) - return; - - errorNYI(fd->getSourceRange(), - "function declaration that forces code gen"); - return; - } - } else { - errorNYI(global->getSourceRange(), "global variable declaration"); - } - - // TODO(CIR): Defer emitting some global definitions until later - buildGlobalDefinition(gd); -} - -void CIRGenModule::buildGlobalFunctionDefinition(clang::GlobalDecl gd, - mlir::Operation *op) { - auto const *funcDecl = cast(gd.getDecl()); - auto funcOp = builder.create( - getLoc(funcDecl->getSourceRange()), funcDecl->getIdentifier()->getName()); - theModule.push_back(funcOp); -} - -void CIRGenModule::buildGlobalDefinition(clang::GlobalDecl gd, - mlir::Operation *op) { - const auto *decl = cast(gd.getDecl()); - if (const auto *fd = dyn_cast(decl)) { - // TODO(CIR): Skip generation of CIR for functions with available_externally - // linkage at -O0. - - if (const auto *method = dyn_cast(decl)) { - // Make sure to emit the definition(s) before we emit the thunks. This is - // necessary for the generation of certain thunks. - (void)method; - errorNYI(method->getSourceRange(), "member function"); - return; - } - - if (fd->isMultiVersion()) - errorNYI(fd->getSourceRange(), "multiversion functions"); - buildGlobalFunctionDefinition(gd, op); - return; - } - - llvm_unreachable("Invalid argument to CIRGenModule::buildGlobalDefinition"); -} + : astCtx(astctx), langOpts(astctx.getLangOpts()), + theModule{mlir::ModuleOp::create(mlir::UnknownLoc())}, + target(astCtx.getTargetInfo()) {} // Emit code for a single top level declaration. -void CIRGenModule::buildTopLevelDecl(Decl *decl) { - - // Ignore dependent declarations. - if (decl->isTemplated()) - return; - - switch (decl->getKind()) { - default: - errorNYI(decl->getBeginLoc(), "declaration of kind", - decl->getDeclKindName()); - break; - - case Decl::Function: { - auto *fd = cast(decl); - // Consteval functions shouldn't be emitted. - if (!fd->isConsteval()) - buildGlobal(fd); - break; - } - } -} - -DiagnosticBuilder CIRGenModule::errorNYI(llvm::StringRef feature) { - unsigned diagID = diags.getCustomDiagID( - DiagnosticsEngine::Error, "ClangIR code gen Not Yet Implemented: %0"); - return diags.Report(diagID) << feature; -} - -DiagnosticBuilder CIRGenModule::errorNYI(SourceLocation loc, - llvm::StringRef feature) { - unsigned diagID = diags.getCustomDiagID( - DiagnosticsEngine::Error, "ClangIR code gen Not Yet Implemented: %0"); - return diags.Report(loc, diagID) << feature; -} - -DiagnosticBuilder CIRGenModule::errorNYI(SourceLocation loc, - llvm::StringRef feature, - llvm::StringRef name) { - unsigned diagID = diags.getCustomDiagID( - DiagnosticsEngine::Error, "ClangIR code gen Not Yet Implemented: %0: %1"); - return diags.Report(loc, diagID) << feature << name; -} - -DiagnosticBuilder CIRGenModule::errorNYI(SourceRange loc, - llvm::StringRef feature) { - return errorNYI(loc.getBegin(), feature) << loc; -} - -DiagnosticBuilder CIRGenModule::errorNYI(SourceRange loc, - llvm::StringRef feature, - llvm::StringRef name) { - return errorNYI(loc.getBegin(), feature, name) << loc; -} +void CIRGenModule::buildTopLevelDecl(Decl *decl) {} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 2bf6a5d9c8f5..ab2a1d886465 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -15,21 +15,15 @@ #include "CIRGenTypeCache.h" -#include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" -#include "llvm/ADT/StringRef.h" namespace clang { class ASTContext; class CodeGenOptions; class Decl; -class DiagnosticBuilder; class DiagnosticsEngine; -class GlobalDecl; class LangOptions; -class SourceLocation; -class SourceRange; class TargetInfo; } // namespace clang @@ -50,10 +44,6 @@ class CIRGenModule : public CIRGenTypeCache { ~CIRGenModule() = default; private: - // TODO(CIR) 'builder' will change to CIRGenBuilderTy once that type is - // defined - mlir::OpBuilder builder; - /// Hold Clang AST information. clang::ASTContext &astCtx; @@ -62,34 +52,10 @@ class CIRGenModule : public CIRGenTypeCache { /// A "module" matches a c/cpp source file: containing a list of functions. mlir::ModuleOp theModule; - clang::DiagnosticsEngine &diags; - const clang::TargetInfo ⌖ public: - mlir::ModuleOp getModule() const { return theModule; } - - /// Helpers to convert the presumed location of Clang's SourceLocation to an - /// MLIR Location. - mlir::Location getLoc(clang::SourceLocation cLoc); - mlir::Location getLoc(clang::SourceRange cRange); - void buildTopLevelDecl(clang::Decl *decl); - - /// Emit code for a single global function or variable declaration. Forward - /// declarations are emitted lazily. - void buildGlobal(clang::GlobalDecl gd); - - void buildGlobalDefinition(clang::GlobalDecl gd, - mlir::Operation *op = nullptr); - void buildGlobalFunctionDefinition(clang::GlobalDecl gd, mlir::Operation *op); - - /// Helpers to emit "not yet implemented" error diagnostics - DiagnosticBuilder errorNYI(llvm::StringRef); - DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef); - DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef, llvm::StringRef); - DiagnosticBuilder errorNYI(SourceRange, llvm::StringRef); - DiagnosticBuilder errorNYI(SourceRange, llvm::StringRef, llvm::StringRef); }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 152124a00b2b..159355a99ece 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -12,11 +12,8 @@ #include "CIRGenModule.h" -#include "mlir/IR/MLIRContext.h" - #include "clang/AST/DeclGroup.h" #include "clang/CIR/CIRGenerator.h" -#include "clang/CIR/Dialect/IR/CIRDialect.h" using namespace cir; using namespace clang; @@ -34,14 +31,9 @@ void CIRGenerator::Initialize(ASTContext &astCtx) { this->astCtx = &astCtx; - mlirCtx = std::make_unique(); - mlirCtx->loadDialect(); - cgm = std::make_unique(*mlirCtx.get(), astCtx, codeGenOpts, - diags); + cgm = std::make_unique(*mlirCtx, astCtx, codeGenOpts, diags); } -mlir::ModuleOp CIRGenerator::getModule() const { return cgm->getModule(); } - bool CIRGenerator::HandleTopLevelDecl(DeclGroupRef group) { for (Decl *decl : group) diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp deleted file mode 100644 index 6d74d72b77dc..000000000000 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ /dev/null @@ -1,38 +0,0 @@ -//===- CIRAttrs.cpp - MLIR CIR Attributes ---------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file defines the attributes in the CIR dialect. -// -//===----------------------------------------------------------------------===// - -#include "clang/CIR/Dialect/IR/CIRDialect.h" - -using namespace mlir; -using namespace mlir::cir; - -//===----------------------------------------------------------------------===// -// General CIR parsing / printing -//===----------------------------------------------------------------------===// - -Attribute CIRDialect::parseAttribute(DialectAsmParser &parser, - Type type) const { - // No attributes yet to parse - return Attribute{}; -} - -void CIRDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const { - // No attributes yet to print -} - -//===----------------------------------------------------------------------===// -// CIR Dialect -//===----------------------------------------------------------------------===// - -void CIRDialect::registerAttributes() { - // No attributes yet to register -} diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index e0b38a2902bd..c2829c3ff2af 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -10,57 +10,4 @@ // //===----------------------------------------------------------------------===// -#include "clang/CIR/Dialect/IR/CIRDialect.h" - -#include "mlir/Support/LogicalResult.h" - -#include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc" - -using namespace mlir; -using namespace mlir::cir; - -//===----------------------------------------------------------------------===// -// CIR Dialect -//===----------------------------------------------------------------------===// - -void mlir::cir::CIRDialect::initialize() { - registerTypes(); - registerAttributes(); - addOperations< -#define GET_OP_LIST -#include "clang/CIR/Dialect/IR/CIROps.cpp.inc" - >(); -} - -//===----------------------------------------------------------------------===// -// FuncOp -//===----------------------------------------------------------------------===// - -void mlir::cir::FuncOp::build(OpBuilder &builder, OperationState &result, - StringRef name) { - result.addAttribute(SymbolTable::getSymbolAttrName(), - builder.getStringAttr(name)); -} - -ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { - StringAttr nameAttr; - if (parser.parseSymbolName(nameAttr, SymbolTable::getSymbolAttrName(), - state.attributes)) - return failure(); - return success(); -} - -void cir::FuncOp::print(OpAsmPrinter &p) { - p << ' '; - // For now the only property a function has is its name - p.printSymbolName(getSymName()); -} - -mlir::LogicalResult mlir::cir::FuncOp::verify() { return success(); } - -//===----------------------------------------------------------------------===// -// TableGen'd op method definitions -//===----------------------------------------------------------------------===// - -#define GET_OP_CLASSES -#include "clang/CIR/Dialect/IR/CIROps.cpp.inc" +#include diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp deleted file mode 100644 index 167c237ae551..000000000000 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ /dev/null @@ -1,37 +0,0 @@ -//===- CIRTypes.cpp - MLIR CIR Types --------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file defines the types in the CIR dialect. -// -//===----------------------------------------------------------------------===// - -#include "clang/CIR/Dialect/IR/CIRDialect.h" - -using namespace mlir; -using namespace mlir::cir; - -//===----------------------------------------------------------------------===// -// General CIR parsing / printing -//===----------------------------------------------------------------------===// - -Type CIRDialect::parseType(DialectAsmParser &parser) const { - // No types yet to parse - return Type{}; -} - -void CIRDialect::printType(Type type, DialectAsmPrinter &os) const { - // No types yet to print -} - -//===----------------------------------------------------------------------===// -// CIR Dialect -//===----------------------------------------------------------------------===// - -void CIRDialect::registerTypes() { - // No types yet to register -} diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 1518e8c76060..0d7476b55570 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -1,8 +1,3 @@ add_clang_library(MLIRCIR - CIRAttrs.cpp CIRDialect.cpp - CIRTypes.cpp - - LINK_LIBS PUBLIC - MLIRIR ) diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 5a31e2070819..72b9fa0c13c5 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -22,11 +22,8 @@ class CIRGenConsumer : public clang::ASTConsumer { virtual void anchor(); - CIRGenAction::OutputType Action; - std::unique_ptr OutputStream; - ASTContext *Context{nullptr}; IntrusiveRefCntPtr FS; std::unique_ptr Gen; @@ -40,37 +37,14 @@ class CIRGenConsumer : public clang::ASTConsumer { const LangOptions &LangOptions, const FrontendOptions &FEOptions, std::unique_ptr OS) - : Action(Action), OutputStream(std::move(OS)), FS(VFS), + : OutputStream(std::move(OS)), FS(VFS), Gen(std::make_unique(DiagnosticsEngine, std::move(VFS), CodeGenOptions)) {} - void Initialize(ASTContext &Ctx) override { - assert(!Context && "initialized multiple times"); - Context = &Ctx; - Gen->Initialize(Ctx); - } - bool HandleTopLevelDecl(DeclGroupRef D) override { Gen->HandleTopLevelDecl(D); return true; } - - void HandleTranslationUnit(ASTContext &C) override { - Gen->HandleTranslationUnit(C); - mlir::ModuleOp MlirModule = Gen->getModule(); - switch (Action) { - case CIRGenAction::OutputType::EmitCIR: - if (OutputStream && MlirModule) { - mlir::OpPrintingFlags Flags; - Flags.enableDebugInfo(/*enable=*/true, /*prettyForm=*/false); - MlirModule->print(*OutputStream, Flags); - } - break; - default: - llvm_unreachable("NYI: CIRGenAction other than EmitCIR"); - break; - } - } }; } // namespace cir @@ -81,23 +55,10 @@ CIRGenAction::CIRGenAction(OutputType Act, mlir::MLIRContext *MLIRCtx) CIRGenAction::~CIRGenAction() { MLIRMod.release(); } -static std::unique_ptr -getOutputStream(CompilerInstance &CI, StringRef InFile, - CIRGenAction::OutputType Action) { - switch (Action) { - case CIRGenAction::OutputType::EmitCIR: - return CI.createDefaultOutputFile(false, InFile, "cir"); - } - llvm_unreachable("Invalid CIRGenAction::OutputType"); -} - std::unique_ptr CIRGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { std::unique_ptr Out = CI.takeOutputStream(); - if (!Out) - Out = getOutputStream(CI, InFile, Action); - auto Result = std::make_unique( Action, CI.getDiagnostics(), &CI.getVirtualFileSystem(), CI.getHeaderSearchOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(), diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 518113e20cb0..5f5421e72eb6 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5412,8 +5412,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } else if (JA.getType() == types::TY_RewrittenLegacyObjC) { CmdArgs.push_back("-rewrite-objc"); rewriteKind = RK_Fragile; - } else if (JA.getType() == types::TY_CIR) { - CmdArgs.push_back("-emit-cir"); } else { assert(JA.getType() == types::TY_PP_Asm && "Unexpected output type!"); } diff --git a/clang/test/CIR/hello.c b/clang/test/CIR/hello.c index 4b07c04994aa..61f38d0a5bd0 100644 --- a/clang/test/CIR/hello.c +++ b/clang/test/CIR/hello.c @@ -1,5 +1,5 @@ -// Smoke test for ClangIR code generation -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s | FileCheck --allow-empty %s +// just confirm that we don't crash +// CHECK-NOT: * void foo() {} -// CHECK: cir.func @foo From 73cb737cb7c21614edbec128a530173ffce14315 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 22:06:59 -0800 Subject: [PATCH 0012/2301] Revert "[CIR] Build out AST consumer patterns to reach the entry point into CIRGen" This reverts commit 1bb52e94621d2cba4f34504697cb0ea83805cb98. --- clang/include/clang/CIR/CIRGenerator.h | 60 ---------------- .../clang/CIR/FrontendAction/CIRGenAction.h | 60 ---------------- clang/include/clang/Driver/Options.td | 2 +- clang/lib/CIR/CMakeLists.txt | 2 - clang/lib/CIR/CodeGen/CIRGenModule.cpp | 32 --------- clang/lib/CIR/CodeGen/CIRGenModule.h | 62 ---------------- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 27 ------- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 43 ----------- clang/lib/CIR/CodeGen/CMakeLists.txt | 23 ------ clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 72 ------------------- clang/lib/CIR/FrontendAction/CMakeLists.txt | 17 ----- clang/lib/Driver/ToolChains/Clang.cpp | 3 - clang/lib/FrontendTool/CMakeLists.txt | 15 ---- .../ExecuteCompilerInvocation.cpp | 16 ----- clang/test/CIR/hello.c | 5 -- clang/test/CIR/lit.local.cfg | 2 - 16 files changed, 1 insertion(+), 440 deletions(-) delete mode 100644 clang/include/clang/CIR/CIRGenerator.h delete mode 100644 clang/include/clang/CIR/FrontendAction/CIRGenAction.h delete mode 100644 clang/lib/CIR/CodeGen/CIRGenModule.cpp delete mode 100644 clang/lib/CIR/CodeGen/CIRGenModule.h delete mode 100644 clang/lib/CIR/CodeGen/CIRGenTypeCache.h delete mode 100644 clang/lib/CIR/CodeGen/CIRGenerator.cpp delete mode 100644 clang/lib/CIR/CodeGen/CMakeLists.txt delete mode 100644 clang/lib/CIR/FrontendAction/CIRGenAction.cpp delete mode 100644 clang/lib/CIR/FrontendAction/CMakeLists.txt delete mode 100644 clang/test/CIR/hello.c delete mode 100644 clang/test/CIR/lit.local.cfg diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h deleted file mode 100644 index 9a8930ac46ea..000000000000 --- a/clang/include/clang/CIR/CIRGenerator.h +++ /dev/null @@ -1,60 +0,0 @@ -//===- CIRGenerator.h - CIR Generation from Clang AST ---------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file declares a simple interface to perform CIR generation from Clang -// AST -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_CIR_CIRGENERATOR_H -#define LLVM_CLANG_CIR_CIRGENERATOR_H - -#include "clang/AST/ASTConsumer.h" -#include "clang/Basic/CodeGenOptions.h" - -#include "llvm/ADT/IntrusiveRefCntPtr.h" -#include "llvm/Support/VirtualFileSystem.h" - -#include - -namespace clang { -class DeclGroupRef; -class DiagnosticsEngine; -} // namespace clang - -namespace mlir { -class MLIRContext; -} // namespace mlir -namespace cir { -class CIRGenModule; - -class CIRGenerator : public clang::ASTConsumer { - virtual void anchor(); - clang::DiagnosticsEngine &diags; - clang::ASTContext *astCtx; - // Only used for debug info. - llvm::IntrusiveRefCntPtr fs; - - const clang::CodeGenOptions &codeGenOpts; - -protected: - std::unique_ptr mlirCtx; - std::unique_ptr cgm; - -public: - CIRGenerator(clang::DiagnosticsEngine &diags, - llvm::IntrusiveRefCntPtr fs, - const clang::CodeGenOptions &cgo); - ~CIRGenerator() override; - void Initialize(clang::ASTContext &astCtx) override; - bool HandleTopLevelDecl(clang::DeclGroupRef group) override; -}; - -} // namespace cir - -#endif // LLVM_CLANG_CIR_CIRGENERATOR_H diff --git a/clang/include/clang/CIR/FrontendAction/CIRGenAction.h b/clang/include/clang/CIR/FrontendAction/CIRGenAction.h deleted file mode 100644 index 2ab612613b73..000000000000 --- a/clang/include/clang/CIR/FrontendAction/CIRGenAction.h +++ /dev/null @@ -1,60 +0,0 @@ -//===---- CIRGenAction.h - CIR Code Generation Frontend Action -*- C++ -*--===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_CIR_CIRGENACTION_H -#define LLVM_CLANG_CIR_CIRGENACTION_H - -#include "clang/Frontend/FrontendAction.h" - -#include "mlir/IR/BuiltinOps.h" -#include "mlir/IR/OwningOpRef.h" - -namespace mlir { -class MLIRContext; -class ModuleOp; -} // namespace mlir - -namespace cir { -class CIRGenConsumer; - -class CIRGenAction : public clang::ASTFrontendAction { -public: - enum class OutputType { - EmitCIR, - }; - -private: - friend class CIRGenConsumer; - - mlir::OwningOpRef MLIRMod; - - mlir::MLIRContext *MLIRCtx; - -protected: - CIRGenAction(OutputType Action, mlir::MLIRContext *MLIRCtx = nullptr); - - std::unique_ptr - CreateASTConsumer(clang::CompilerInstance &CI, - llvm::StringRef InFile) override; - -public: - ~CIRGenAction() override; - - OutputType Action; -}; - -class EmitCIRAction : public CIRGenAction { - virtual void anchor(); - -public: - EmitCIRAction(mlir::MLIRContext *MLIRCtx = nullptr); -}; - -} // namespace cir - -#endif diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index df705104d9ea..56ab3823f656 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3038,7 +3038,7 @@ defm clangir : BoolFOption<"clangir", PosFlag, NegFlag LLVM pipeline to compile">, BothFlags<[], [ClangOption, CC1Option], "">>; -def emit_cir : Flag<["-"], "emit-cir">, Visibility<[ClangOption, CC1Option]>, +def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, Group, HelpText<"Build ASTs and then lower to ClangIR">; /// ClangIR-specific options - END diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 11cca734808d..d2ff200e0da5 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -2,5 +2,3 @@ include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) add_subdirectory(Dialect) -add_subdirectory(CodeGen) -add_subdirectory(FrontendAction) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp deleted file mode 100644 index 95e62326939f..000000000000 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ /dev/null @@ -1,32 +0,0 @@ -//===- CIRGenModule.cpp - Per-Module state for CIR generation -------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This is the internal per-translation-unit state used for CIR translation. -// -//===----------------------------------------------------------------------===// - -#include "CIRGenModule.h" - -#include "clang/AST/ASTContext.h" -#include "clang/AST/DeclBase.h" - -#include "mlir/IR/BuiltinOps.h" -#include "mlir/IR/Location.h" -#include "mlir/IR/MLIRContext.h" - -using namespace cir; -CIRGenModule::CIRGenModule(mlir::MLIRContext &context, - clang::ASTContext &astctx, - const clang::CodeGenOptions &cgo, - DiagnosticsEngine &diags) - : astCtx(astctx), langOpts(astctx.getLangOpts()), - theModule{mlir::ModuleOp::create(mlir::UnknownLoc())}, - target(astCtx.getTargetInfo()) {} - -// Emit code for a single top level declaration. -void CIRGenModule::buildTopLevelDecl(Decl *decl) {} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h deleted file mode 100644 index ab2a1d886465..000000000000 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ /dev/null @@ -1,62 +0,0 @@ -//===--- CIRGenModule.h - Per-Module state for CIR gen ----------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This is the internal per-translation-unit state used for CIR translation. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENMODULE_H -#define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENMODULE_H - -#include "CIRGenTypeCache.h" - -#include "mlir/IR/BuiltinOps.h" -#include "mlir/IR/MLIRContext.h" - -namespace clang { -class ASTContext; -class CodeGenOptions; -class Decl; -class DiagnosticsEngine; -class LangOptions; -class TargetInfo; -} // namespace clang - -using namespace clang; -namespace cir { - -/// This class organizes the cross-function state that is used while generating -/// CIR code. -class CIRGenModule : public CIRGenTypeCache { - CIRGenModule(CIRGenModule &) = delete; - CIRGenModule &operator=(CIRGenModule &) = delete; - -public: - CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, - const clang::CodeGenOptions &cgo, - clang::DiagnosticsEngine &diags); - - ~CIRGenModule() = default; - -private: - /// Hold Clang AST information. - clang::ASTContext &astCtx; - - const clang::LangOptions &langOpts; - - /// A "module" matches a c/cpp source file: containing a list of functions. - mlir::ModuleOp theModule; - - const clang::TargetInfo ⌖ - -public: - void buildTopLevelDecl(clang::Decl *decl); -}; -} // namespace cir - -#endif // LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENMODULE_H diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h deleted file mode 100644 index 6478e0a07809..000000000000 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ /dev/null @@ -1,27 +0,0 @@ -//===--- CIRGenTypeCache.h - Commonly used LLVM types and info -*- C++ --*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This structure provides a set of common types useful during CIR emission. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_LIB_CIR_CIRGENTYPECACHE_H -#define LLVM_CLANG_LIB_CIR_CIRGENTYPECACHE_H - -namespace cir { - -/// This structure provides a set of types that are commonly used -/// during IR emission. It's initialized once in CodeGenModule's -/// constructor and then copied around into new CIRGenFunction's. -struct CIRGenTypeCache { - CIRGenTypeCache() = default; -}; - -} // namespace cir - -#endif // LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENTYPECACHE_H diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp deleted file mode 100644 index 159355a99ece..000000000000 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ /dev/null @@ -1,43 +0,0 @@ -//===--- CIRGenerator.cpp - Emit CIR from ASTs ----------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This builds an AST and converts it to CIR. -// -//===----------------------------------------------------------------------===// - -#include "CIRGenModule.h" - -#include "clang/AST/DeclGroup.h" -#include "clang/CIR/CIRGenerator.h" - -using namespace cir; -using namespace clang; - -void CIRGenerator::anchor() {} - -CIRGenerator::CIRGenerator(clang::DiagnosticsEngine &diags, - llvm::IntrusiveRefCntPtr vfs, - const CodeGenOptions &cgo) - : diags(diags), fs(std::move(vfs)), codeGenOpts{cgo} {} -CIRGenerator::~CIRGenerator() = default; - -void CIRGenerator::Initialize(ASTContext &astCtx) { - using namespace llvm; - - this->astCtx = &astCtx; - - cgm = std::make_unique(*mlirCtx, astCtx, codeGenOpts, diags); -} - -bool CIRGenerator::HandleTopLevelDecl(DeclGroupRef group) { - - for (Decl *decl : group) - cgm->buildTopLevelDecl(decl); - - return true; -} diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt deleted file mode 100644 index 17a3aabfbd7f..000000000000 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ /dev/null @@ -1,23 +0,0 @@ -set( - LLVM_LINK_COMPONENTS - Core - Support -) - -get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) - -add_clang_library(clangCIR - CIRGenerator.cpp - CIRGenModule.cpp - - DEPENDS - MLIRCIR - ${dialect_libs} - - LINK_LIBS - clangAST - clangBasic - clangLex - ${dialect_libs} - MLIRCIR -) diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp deleted file mode 100644 index 72b9fa0c13c5..000000000000 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ /dev/null @@ -1,72 +0,0 @@ -//===--- CIRGenAction.cpp - LLVM Code generation Frontend Action ---------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "clang/CIR/FrontendAction/CIRGenAction.h" -#include "clang/CIR/CIRGenerator.h" -#include "clang/Frontend/CompilerInstance.h" - -#include "mlir/IR/MLIRContext.h" -#include "mlir/IR/OwningOpRef.h" - -using namespace cir; -using namespace clang; - -namespace cir { - -class CIRGenConsumer : public clang::ASTConsumer { - - virtual void anchor(); - - std::unique_ptr OutputStream; - - IntrusiveRefCntPtr FS; - std::unique_ptr Gen; - -public: - CIRGenConsumer(CIRGenAction::OutputType Action, - DiagnosticsEngine &DiagnosticsEngine, - IntrusiveRefCntPtr VFS, - const HeaderSearchOptions &HeaderSearchOptions, - const CodeGenOptions &CodeGenOptions, - const TargetOptions &TargetOptions, - const LangOptions &LangOptions, - const FrontendOptions &FEOptions, - std::unique_ptr OS) - : OutputStream(std::move(OS)), FS(VFS), - Gen(std::make_unique(DiagnosticsEngine, std::move(VFS), - CodeGenOptions)) {} - - bool HandleTopLevelDecl(DeclGroupRef D) override { - Gen->HandleTopLevelDecl(D); - return true; - } -}; -} // namespace cir - -void CIRGenConsumer::anchor() {} - -CIRGenAction::CIRGenAction(OutputType Act, mlir::MLIRContext *MLIRCtx) - : MLIRCtx(MLIRCtx ? MLIRCtx : new mlir::MLIRContext), Action(Act) {} - -CIRGenAction::~CIRGenAction() { MLIRMod.release(); } - -std::unique_ptr -CIRGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { - std::unique_ptr Out = CI.takeOutputStream(); - - auto Result = std::make_unique( - Action, CI.getDiagnostics(), &CI.getVirtualFileSystem(), - CI.getHeaderSearchOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(), - CI.getLangOpts(), CI.getFrontendOpts(), std::move(Out)); - - return Result; -} - -void EmitCIRAction::anchor() {} -EmitCIRAction::EmitCIRAction(mlir::MLIRContext *MLIRCtx) - : CIRGenAction(OutputType::EmitCIR, MLIRCtx) {} diff --git a/clang/lib/CIR/FrontendAction/CMakeLists.txt b/clang/lib/CIR/FrontendAction/CMakeLists.txt deleted file mode 100644 index b0616ab5d64b..000000000000 --- a/clang/lib/CIR/FrontendAction/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -set(LLVM_LINK_COMPONENTS - Core - Support - ) - -get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) - -add_clang_library(clangCIRFrontendAction - CIRGenAction.cpp - - LINK_LIBS - clangAST - clangFrontend - clangCIR - MLIRCIR - MLIRIR - ) diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 5f5421e72eb6..07ceab47112b 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5241,9 +5241,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } } - if (Args.hasArg(options::OPT_fclangir)) - CmdArgs.push_back("-fclangir"); - if (IsOpenMPDevice) { // We have to pass the triple of the host if compiling for an OpenMP device. std::string NormalizedTriple = diff --git a/clang/lib/FrontendTool/CMakeLists.txt b/clang/lib/FrontendTool/CMakeLists.txt index bfc7652b4c11..51c379ade270 100644 --- a/clang/lib/FrontendTool/CMakeLists.txt +++ b/clang/lib/FrontendTool/CMakeLists.txt @@ -12,15 +12,6 @@ set(link_libs clangRewriteFrontend ) -set(deps) - -if(CLANG_ENABLE_CIR) - list(APPEND link_libs - clangCIRFrontendAction - MLIRIR - ) -endif() - if(CLANG_ENABLE_ARCMT) list(APPEND link_libs clangARCMigrate @@ -38,13 +29,7 @@ add_clang_library(clangFrontendTool DEPENDS ClangDriverOptions - ${deps} LINK_LIBS ${link_libs} ) - -if(CLANG_ENABLE_CIR) - target_include_directories(clangFrontendTool PRIVATE ${LLVM_MAIN_SRC_DIR}/../mlir/include) - target_include_directories(clangFrontendTool PRIVATE ${CMAKE_BINARY_DIR}/tools/mlir/include) -endif() diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 60fde03289cf..7476b1076d10 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -31,11 +31,6 @@ #include "llvm/Support/BuryPointer.h" #include "llvm/Support/DynamicLibrary.h" #include "llvm/Support/ErrorHandling.h" - -#if CLANG_ENABLE_CIR -#include "clang/CIR/FrontendAction/CIRGenAction.h" -#endif - using namespace clang; using namespace llvm::opt; @@ -47,13 +42,6 @@ CreateFrontendBaseAction(CompilerInstance &CI) { StringRef Action("unknown"); (void)Action; - unsigned UseCIR = CI.getFrontendOpts().UseClangIRPipeline; - frontend::ActionKind Act = CI.getFrontendOpts().ProgramAction; - bool EmitsCIR = Act == EmitCIR; - - if (!UseCIR && EmitsCIR) - llvm::report_fatal_error("-emit-cir and only valid when using -fclangir"); - switch (CI.getFrontendOpts().ProgramAction) { case ASTDeclList: return std::make_unique(); case ASTDump: return std::make_unique(); @@ -66,11 +54,7 @@ CreateFrontendBaseAction(CompilerInstance &CI) { case EmitAssembly: return std::make_unique(); case EmitBC: return std::make_unique(); case EmitCIR: -#if CLANG_ENABLE_CIR - return std::make_unique<::cir::EmitCIRAction>(); -#else llvm_unreachable("CIR suppport not built into clang"); -#endif case EmitHTML: return std::make_unique(); case EmitLLVM: return std::make_unique(); case EmitLLVMOnly: return std::make_unique(); diff --git a/clang/test/CIR/hello.c b/clang/test/CIR/hello.c deleted file mode 100644 index 61f38d0a5bd0..000000000000 --- a/clang/test/CIR/hello.c +++ /dev/null @@ -1,5 +0,0 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s | FileCheck --allow-empty %s - -// just confirm that we don't crash -// CHECK-NOT: * -void foo() {} diff --git a/clang/test/CIR/lit.local.cfg b/clang/test/CIR/lit.local.cfg deleted file mode 100644 index 6afd60f47bff..000000000000 --- a/clang/test/CIR/lit.local.cfg +++ /dev/null @@ -1,2 +0,0 @@ -if not config.root.clang_enable_cir: - config.unsupported = True From f1e77f22402f1a41408648b50bd03d961e3c79a2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 22:07:07 -0800 Subject: [PATCH 0013/2301] Revert "[CIR] Add .clang-tidy files for ClangIR specific coding style rules" This reverts commit dd0fe4fb7440182d8101135bfd694b2d84893c2e. --- clang/include/clang/CIR/.clang-tidy | 62 ------------------- .../clang/CIRFrontendAction/.clang-tidy | 53 ---------------- clang/lib/CIR/.clang-tidy | 62 ------------------- 3 files changed, 177 deletions(-) delete mode 100644 clang/include/clang/CIR/.clang-tidy delete mode 100644 clang/include/clang/CIRFrontendAction/.clang-tidy delete mode 100644 clang/lib/CIR/.clang-tidy diff --git a/clang/include/clang/CIR/.clang-tidy b/clang/include/clang/CIR/.clang-tidy deleted file mode 100644 index aaba4585494d..000000000000 --- a/clang/include/clang/CIR/.clang-tidy +++ /dev/null @@ -1,62 +0,0 @@ -InheritParentConfig: true -Checks: > - -misc-const-correctness, - -llvm-header-guard, - bugprone-argument-comment, - bugprone-assert-side-effect, - bugprone-branch-clone, - bugprone-copy-constructor-init, - bugprone-dangling-handle, - bugprone-dynamic-static-initializers, - bugprone-macro-parentheses, - bugprone-macro-repeated-side-effects, - bugprone-misplaced-widening-cast, - bugprone-move-forwarding-reference, - bugprone-multiple-statement-macro, - bugprone-suspicious-semicolon, - bugprone-swapped-arguments, - bugprone-terminating-continue, - bugprone-unused-raii, - bugprone-unused-return-value, - misc-redundant-expression, - misc-static-assert, - misc-unused-using-decls, - modernize-use-bool-literals, - modernize-loop-convert, - modernize-make-unique, - modernize-raw-string-literal, - modernize-use-equals-default, - modernize-use-default-member-init, - modernize-use-emplace, - modernize-use-nullptr, - modernize-use-override, - modernize-use-using, - performance-for-range-copy, - performance-implicit-conversion-in-loop, - performance-inefficient-algorithm, - performance-inefficient-vector-operation, - performance-move-const-arg, - performance-no-automatic-move, - performance-trivially-destructible, - performance-unnecessary-copy-initialization, - performance-unnecessary-value-param, - readability-avoid-const-params-in-decls, - readability-const-return-type, - readability-container-size-empty, - readability-identifier-naming, - readability-inconsistent-declaration-parameter-name, - readability-misleading-indentation, - readability-redundant-control-flow, - readability-redundant-smartptr-get, - readability-simplify-boolean-expr, - readability-simplify-subscript-expr, - readability-use-anyofallof - - -CheckOptions: - - key: readability-identifier-naming.MemberCase - value: camelBack - - key: readability-identifier-naming.ParameterCase - value: camelBack - - key: readability-identifier-naming.VariableCase - value: camelBack diff --git a/clang/include/clang/CIRFrontendAction/.clang-tidy b/clang/include/clang/CIRFrontendAction/.clang-tidy deleted file mode 100644 index ef88dbcec488..000000000000 --- a/clang/include/clang/CIRFrontendAction/.clang-tidy +++ /dev/null @@ -1,53 +0,0 @@ -InheritParentConfig: true -Checks: > - -misc-const-correctness, - -llvm-header-guard, - bugprone-argument-comment, - bugprone-assert-side-effect, - bugprone-branch-clone, - bugprone-copy-constructor-init, - bugprone-dangling-handle, - bugprone-dynamic-static-initializers, - bugprone-macro-parentheses, - bugprone-macro-repeated-side-effects, - bugprone-misplaced-widening-cast, - bugprone-move-forwarding-reference, - bugprone-multiple-statement-macro, - bugprone-suspicious-semicolon, - bugprone-swapped-arguments, - bugprone-terminating-continue, - bugprone-unused-raii, - bugprone-unused-return-value, - misc-redundant-expression, - misc-static-assert, - misc-unused-using-decls, - modernize-use-bool-literals, - modernize-loop-convert, - modernize-make-unique, - modernize-raw-string-literal, - modernize-use-equals-default, - modernize-use-default-member-init, - modernize-use-emplace, - modernize-use-nullptr, - modernize-use-override, - modernize-use-using, - performance-for-range-copy, - performance-implicit-conversion-in-loop, - performance-inefficient-algorithm, - performance-inefficient-vector-operation, - performance-move-const-arg, - performance-no-automatic-move, - performance-trivially-destructible, - performance-unnecessary-copy-initialization, - performance-unnecessary-value-param, - readability-avoid-const-params-in-decls, - readability-const-return-type, - readability-container-size-empty, - readability-identifier-naming, - readability-inconsistent-declaration-parameter-name, - readability-misleading-indentation, - readability-redundant-control-flow, - readability-redundant-smartptr-get, - readability-simplify-boolean-expr, - readability-simplify-subscript-expr, - readability-use-anyofallof diff --git a/clang/lib/CIR/.clang-tidy b/clang/lib/CIR/.clang-tidy deleted file mode 100644 index aaba4585494d..000000000000 --- a/clang/lib/CIR/.clang-tidy +++ /dev/null @@ -1,62 +0,0 @@ -InheritParentConfig: true -Checks: > - -misc-const-correctness, - -llvm-header-guard, - bugprone-argument-comment, - bugprone-assert-side-effect, - bugprone-branch-clone, - bugprone-copy-constructor-init, - bugprone-dangling-handle, - bugprone-dynamic-static-initializers, - bugprone-macro-parentheses, - bugprone-macro-repeated-side-effects, - bugprone-misplaced-widening-cast, - bugprone-move-forwarding-reference, - bugprone-multiple-statement-macro, - bugprone-suspicious-semicolon, - bugprone-swapped-arguments, - bugprone-terminating-continue, - bugprone-unused-raii, - bugprone-unused-return-value, - misc-redundant-expression, - misc-static-assert, - misc-unused-using-decls, - modernize-use-bool-literals, - modernize-loop-convert, - modernize-make-unique, - modernize-raw-string-literal, - modernize-use-equals-default, - modernize-use-default-member-init, - modernize-use-emplace, - modernize-use-nullptr, - modernize-use-override, - modernize-use-using, - performance-for-range-copy, - performance-implicit-conversion-in-loop, - performance-inefficient-algorithm, - performance-inefficient-vector-operation, - performance-move-const-arg, - performance-no-automatic-move, - performance-trivially-destructible, - performance-unnecessary-copy-initialization, - performance-unnecessary-value-param, - readability-avoid-const-params-in-decls, - readability-const-return-type, - readability-container-size-empty, - readability-identifier-naming, - readability-inconsistent-declaration-parameter-name, - readability-misleading-indentation, - readability-redundant-control-flow, - readability-redundant-smartptr-get, - readability-simplify-boolean-expr, - readability-simplify-subscript-expr, - readability-use-anyofallof - - -CheckOptions: - - key: readability-identifier-naming.MemberCase - value: camelBack - - key: readability-identifier-naming.ParameterCase - value: camelBack - - key: readability-identifier-naming.VariableCase - value: camelBack From 79ca74a045d301ccf20bf625c2a022597b183c65 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 22:08:06 -0800 Subject: [PATCH 0014/2301] Revert "[CIR] Add options to emit ClangIR and enable the ClangIR pipeline" This reverts commit 359ab3aebba302fb4c37373b9907bc8880be7363. --- clang/include/clang/Driver/Options.td | 11 ----------- clang/include/clang/Frontend/FrontendOptions.h | 10 +--------- clang/lib/Driver/Driver.cpp | 3 --- clang/lib/Frontend/CompilerInvocation.cpp | 4 ---- clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp | 2 -- 5 files changed, 1 insertion(+), 29 deletions(-) diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 56ab3823f656..a70335f7bbfb 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3031,17 +3031,6 @@ def flax_vector_conversions : Flag<["-"], "flax-vector-conversions">, Group, Group, HelpText<"Force linking the clang builtins runtime library">; - -/// ClangIR-specific options - BEGIN -defm clangir : BoolFOption<"clangir", - FrontendOpts<"UseClangIRPipeline">, DefaultFalse, - PosFlag, - NegFlag LLVM pipeline to compile">, - BothFlags<[], [ClangOption, CC1Option], "">>; -def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, - Group, HelpText<"Build ASTs and then lower to ClangIR">; -/// ClangIR-specific options - END - def flto_EQ : Joined<["-"], "flto=">, Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>, Group, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 8241925c9847..f374ba9c3734 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -65,9 +65,6 @@ enum ActionKind { /// Translate input source into HTML. EmitHTML, - /// Emit a .cir file - EmitCIR, - /// Emit a .ll file. EmitLLVM, @@ -415,10 +412,6 @@ class FrontendOptions { LLVM_PREFERRED_TYPE(bool) unsigned GenReducedBMI : 1; - /// Use Clang IR pipeline to emit code - LLVM_PREFERRED_TYPE(bool) - unsigned UseClangIRPipeline : 1; - CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. @@ -606,8 +599,7 @@ class FrontendOptions { EmitSymbolGraph(false), EmitExtensionSymbolGraphs(false), EmitSymbolGraphSymbolLabelsForTesting(false), EmitPrettySymbolGraphs(false), GenReducedBMI(false), - UseClangIRPipeline(false), TimeTraceGranularity(500), - TimeTraceVerbose(false) {} + TimeTraceGranularity(500), TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file /// extension. For example, "c" would return Language::C. diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index 87855fdb7997..39fa20ad9881 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -420,7 +420,6 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL, (PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) || (PhaseArg = DAL.getLastArg(options::OPT__migrate)) || (PhaseArg = DAL.getLastArg(options::OPT__analyze)) || - (PhaseArg = DAL.getLastArg(options::OPT_emit_cir)) || (PhaseArg = DAL.getLastArg(options::OPT_emit_ast))) { FinalPhase = phases::Compile; @@ -5082,8 +5081,6 @@ Action *Driver::ConstructPhaseAction( return C.MakeAction(Input, types::TY_Remap); if (Args.hasArg(options::OPT_emit_ast)) return C.MakeAction(Input, types::TY_AST); - if (Args.hasArg(options::OPT_emit_cir)) - return C.MakeAction(Input, types::TY_CIR); if (Args.hasArg(options::OPT_module_file_info)) return C.MakeAction(Input, types::TY_ModuleFile); if (Args.hasArg(options::OPT_verify_pch)) diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 3bf124e4827b..5410bf1fbd78 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -2727,7 +2727,6 @@ static const auto &getFrontendActionTable() { {frontend::DumpTokens, OPT_dump_tokens}, {frontend::EmitAssembly, OPT_S}, {frontend::EmitBC, OPT_emit_llvm_bc}, - {frontend::EmitCIR, OPT_emit_cir}, {frontend::EmitHTML, OPT_emit_html}, {frontend::EmitLLVM, OPT_emit_llvm}, {frontend::EmitLLVMOnly, OPT_emit_llvm_only}, @@ -3091,8 +3090,6 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Opts.ProgramAction != frontend::GenerateModule && Opts.IsSystemModule) Diags.Report(diag::err_drv_argument_only_allowed_with) << "-fsystem-module" << "-emit-module"; - if (Args.hasArg(OPT_fclangir) || Args.hasArg(OPT_emit_cir)) - Opts.UseClangIRPipeline = true; if (Args.hasArg(OPT_aux_target_cpu)) Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu)); @@ -4613,7 +4610,6 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) { case frontend::ASTView: case frontend::EmitAssembly: case frontend::EmitBC: - case frontend::EmitCIR: case frontend::EmitHTML: case frontend::EmitLLVM: case frontend::EmitLLVMOnly: diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 7476b1076d10..f85f0365616f 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -53,8 +53,6 @@ CreateFrontendBaseAction(CompilerInstance &CI) { case DumpTokens: return std::make_unique(); case EmitAssembly: return std::make_unique(); case EmitBC: return std::make_unique(); - case EmitCIR: - llvm_unreachable("CIR suppport not built into clang"); case EmitHTML: return std::make_unique(); case EmitLLVM: return std::make_unique(); case EmitLLVMOnly: return std::make_unique(); From 276eb42b6209f457797cfb7d71985a0b7153a052 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 22:08:18 -0800 Subject: [PATCH 0015/2301] Revert "[CIR][NFC] Add scaffolding for the CIR dialect and CIROps.td" This reverts commit 10661ba2403f73cd2c4b76ebd177fdcf9261cbf2. --- clang/CMakeLists.txt | 4 -- clang/include/clang/CIR/CMakeLists.txt | 6 --- .../include/clang/CIR/Dialect/CMakeLists.txt | 1 - .../include/clang/CIR/Dialect/IR/CIRDialect.h | 16 ------- .../clang/CIR/Dialect/IR/CIRDialect.td | 44 ------------------- clang/include/clang/CIR/Dialect/IR/CIROps.td | 19 -------- .../clang/CIR/Dialect/IR/CMakeLists.txt | 16 ------- clang/lib/CIR/CMakeLists.txt | 4 -- clang/lib/CIR/Dialect/CMakeLists.txt | 1 - clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 13 ------ clang/lib/CIR/Dialect/IR/CMakeLists.txt | 3 -- 11 files changed, 127 deletions(-) delete mode 100644 clang/include/clang/CIR/Dialect/CMakeLists.txt delete mode 100644 clang/include/clang/CIR/Dialect/IR/CIRDialect.h delete mode 100644 clang/include/clang/CIR/Dialect/IR/CIRDialect.td delete mode 100644 clang/include/clang/CIR/Dialect/IR/CIROps.td delete mode 100644 clang/include/clang/CIR/Dialect/IR/CMakeLists.txt delete mode 100644 clang/lib/CIR/Dialect/CMakeLists.txt delete mode 100644 clang/lib/CIR/Dialect/IR/CIRDialect.cpp delete mode 100644 clang/lib/CIR/Dialect/IR/CMakeLists.txt diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt index b79e570667b2..f54cec7d4f4e 100644 --- a/clang/CMakeLists.txt +++ b/clang/CMakeLists.txt @@ -167,10 +167,6 @@ if(CLANG_ENABLE_LIBXML2) endif() if(CLANG_ENABLE_CIR) - if (CLANG_BUILT_STANDALONE) - message(FATAL_ERROR - "ClangIR is not yet supported in the standalone build.") - endif() if (NOT "${LLVM_ENABLE_PROJECTS}" MATCHES "MLIR|mlir") message(FATAL_ERROR "Cannot build ClangIR without MLIR in LLVM_ENABLE_PROJECTS") diff --git a/clang/include/clang/CIR/CMakeLists.txt b/clang/include/clang/CIR/CMakeLists.txt index f8d6f407a03d..e69de29bb2d1 100644 --- a/clang/include/clang/CIR/CMakeLists.txt +++ b/clang/include/clang/CIR/CMakeLists.txt @@ -1,6 +0,0 @@ -set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --includedir -set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include) -include_directories(${MLIR_INCLUDE_DIR}) -include_directories(${MLIR_TABLEGEN_OUTPUT_DIR}) - -add_subdirectory(Dialect) diff --git a/clang/include/clang/CIR/Dialect/CMakeLists.txt b/clang/include/clang/CIR/Dialect/CMakeLists.txt deleted file mode 100644 index f33061b2d87c..000000000000 --- a/clang/include/clang/CIR/Dialect/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -add_subdirectory(IR) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h deleted file mode 100644 index d53e5d1663d6..000000000000 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ /dev/null @@ -1,16 +0,0 @@ -//===- CIRDialect.h - CIR dialect -------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file declares the CIR dialect. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H -#define LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H - -#endif // LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td deleted file mode 100644 index 69d6e9774942..000000000000 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ /dev/null @@ -1,44 +0,0 @@ -//===- CIRDialect.td - CIR dialect -------------------------*- tablegen -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file declares the CIR dialect. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT -#define LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT - -include "mlir/IR/OpBase.td" - -def CIR_Dialect : Dialect { - let name = "cir"; - - // A short one-line summary of our dialect. - let summary = "A high-level dialect for analyzing and optimizing Clang " - "supported languages"; - - let cppNamespace = "::mlir::cir"; - - let useDefaultAttributePrinterParser = 0; - let useDefaultTypePrinterParser = 0; - - let extraClassDeclaration = [{ - void registerAttributes(); - void registerTypes(); - - Type parseType(DialectAsmParser &parser) const override; - void printType(Type type, DialectAsmPrinter &printer) const override; - - Attribute parseAttribute(DialectAsmParser &parser, - Type type) const override; - - void printAttribute(Attribute attr, DialectAsmPrinter &os) const override; - }]; -} - -#endif // LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td deleted file mode 100644 index 7311c8db783e..000000000000 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ /dev/null @@ -1,19 +0,0 @@ -//===-- CIROps.td - CIR dialect definition -----------------*- tablegen -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// -/// \file -/// Definition of the CIR dialect -/// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_CIR_DIALECT_IR_CIROPS -#define LLVM_CLANG_CIR_DIALECT_IR_CIROPS - -include "clang/CIR/Dialect/IR/CIRDialect.td" - -#endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt deleted file mode 100644 index 28ae30dab8df..000000000000 --- a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt +++ /dev/null @@ -1,16 +0,0 @@ -# This replicates part of the add_mlir_dialect cmake function from MLIR that -# cannot be used here. This happens because it expects to be run inside MLIR -# directory which is not the case for CIR (and also FIR, both have similar -# workarounds). - -# Equivalent to add_mlir_dialect(CIROps cir) -set(LLVM_TARGET_DEFINITIONS CIROps.td) -mlir_tablegen(CIROps.h.inc -gen-op-decls) -mlir_tablegen(CIROps.cpp.inc -gen-op-defs) -mlir_tablegen(CIROpsTypes.h.inc -gen-typedef-decls) -mlir_tablegen(CIROpsTypes.cpp.inc -gen-typedef-defs) -mlir_tablegen(CIROpsDialect.h.inc -gen-dialect-decls) -mlir_tablegen(CIROpsDialect.cpp.inc -gen-dialect-defs) -add_public_tablegen_target(MLIRCIROpsIncGen) -add_dependencies(mlir-headers MLIRCIROpsIncGen) - diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index d2ff200e0da5..e69de29bb2d1 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -1,4 +0,0 @@ -include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) -include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) - -add_subdirectory(Dialect) diff --git a/clang/lib/CIR/Dialect/CMakeLists.txt b/clang/lib/CIR/Dialect/CMakeLists.txt deleted file mode 100644 index f33061b2d87c..000000000000 --- a/clang/lib/CIR/Dialect/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -add_subdirectory(IR) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp deleted file mode 100644 index c2829c3ff2af..000000000000 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ /dev/null @@ -1,13 +0,0 @@ -//===- CIRDialect.cpp - MLIR CIR ops implementation -----------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file implements the CIR dialect and its operations. -// -//===----------------------------------------------------------------------===// - -#include diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt deleted file mode 100644 index 0d7476b55570..000000000000 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -add_clang_library(MLIRCIR - CIRDialect.cpp - ) From b18d0d13a41a5c0920d1fd19eba6541d7a34b45c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 22:08:26 -0800 Subject: [PATCH 0016/2301] Revert "[CIR][cmake] Add support for cmake variable CLANG_ENABLE_CIR" This reverts commit 44de2bb6949f0ca62a2e16506fe3d91be14e6d23. --- clang/CMakeLists.txt | 7 ------- clang/include/clang/CIR/CMakeLists.txt | 0 clang/include/clang/CMakeLists.txt | 3 --- clang/include/clang/Config/config.h.cmake | 3 --- clang/lib/CIR/CMakeLists.txt | 0 clang/lib/CMakeLists.txt | 4 ---- clang/test/CMakeLists.txt | 1 - clang/test/lit.site.cfg.py.in | 1 - 8 files changed, 19 deletions(-) delete mode 100644 clang/include/clang/CIR/CMakeLists.txt delete mode 100644 clang/lib/CIR/CMakeLists.txt diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt index f54cec7d4f4e..df3a770f22f9 100644 --- a/clang/CMakeLists.txt +++ b/clang/CMakeLists.txt @@ -166,13 +166,6 @@ if(CLANG_ENABLE_LIBXML2) endif() endif() -if(CLANG_ENABLE_CIR) - if (NOT "${LLVM_ENABLE_PROJECTS}" MATCHES "MLIR|mlir") - message(FATAL_ERROR - "Cannot build ClangIR without MLIR in LLVM_ENABLE_PROJECTS") - endif() -endif() - include(CheckIncludeFile) check_include_file(sys/resource.h CLANG_HAVE_RLIMITS) diff --git a/clang/include/clang/CIR/CMakeLists.txt b/clang/include/clang/CIR/CMakeLists.txt deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/clang/include/clang/CMakeLists.txt b/clang/include/clang/CMakeLists.txt index 47ac70cd2169..0dc9ea5ed8ac 100644 --- a/clang/include/clang/CMakeLists.txt +++ b/clang/include/clang/CMakeLists.txt @@ -1,8 +1,5 @@ add_subdirectory(AST) add_subdirectory(Basic) -if(CLANG_ENABLE_CIR) - add_subdirectory(CIR) -endif() add_subdirectory(Driver) add_subdirectory(Parse) add_subdirectory(Sema) diff --git a/clang/include/clang/Config/config.h.cmake b/clang/include/clang/Config/config.h.cmake index 27ed69e21562..4015ac804086 100644 --- a/clang/include/clang/Config/config.h.cmake +++ b/clang/include/clang/Config/config.h.cmake @@ -83,7 +83,4 @@ /* Spawn a new process clang.exe for the CC1 tool invocation, when necessary */ #cmakedefine01 CLANG_SPAWN_CC1 -/* Whether CIR is built into Clang */ -#cmakedefine01 CLANG_ENABLE_CIR - #endif diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/clang/lib/CMakeLists.txt b/clang/lib/CMakeLists.txt index 14ba55360fe0..0cac86451f39 100644 --- a/clang/lib/CMakeLists.txt +++ b/clang/lib/CMakeLists.txt @@ -31,7 +31,3 @@ if(CLANG_INCLUDE_TESTS) endif() add_subdirectory(Interpreter) add_subdirectory(Support) - -if(CLANG_ENABLE_CIR) - add_subdirectory(CIR) -endif() diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt index 5369dc92f69e..b18614a6cbb1 100644 --- a/clang/test/CMakeLists.txt +++ b/clang/test/CMakeLists.txt @@ -9,7 +9,6 @@ llvm_canonicalize_cmake_booleans( CLANG_ENABLE_STATIC_ANALYZER CLANG_PLUGIN_SUPPORT CLANG_SPAWN_CC1 - CLANG_ENABLE_CIR ENABLE_BACKTRACES LLVM_BUILD_EXAMPLES LLVM_BYE_LINK_INTO_TOOLS diff --git a/clang/test/lit.site.cfg.py.in b/clang/test/lit.site.cfg.py.in index 1cbd876ac5bb..7972686d199d 100644 --- a/clang/test/lit.site.cfg.py.in +++ b/clang/test/lit.site.cfg.py.in @@ -27,7 +27,6 @@ config.clang_default_pie_on_linux = @CLANG_DEFAULT_PIE_ON_LINUX@ config.clang_default_cxx_stdlib = "@CLANG_DEFAULT_CXX_STDLIB@" config.clang_staticanalyzer = @CLANG_ENABLE_STATIC_ANALYZER@ config.clang_staticanalyzer_z3 = @LLVM_WITH_Z3@ -config.clang_enable_cir = @CLANG_ENABLE_CIR@ config.clang_examples = @CLANG_BUILD_EXAMPLES@ config.enable_shared = @ENABLE_SHARED@ config.enable_backtrace = @ENABLE_BACKTRACES@ From 76701919edfd334d22e674b02ff9a86fc5607ec6 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Jan 2025 22:08:34 -0800 Subject: [PATCH 0017/2301] Revert "[CIR][Basic][NFC] Add the CIR language to the Language enum" This reverts commit e66b670f3bf9312f696e66c31152ae535207d6bb. --- clang/include/clang/Basic/LangStandard.h | 5 ++--- clang/include/clang/Driver/Types.def | 1 - clang/lib/Basic/LangStandards.cpp | 3 --- .../Serialization/SymbolGraphSerializer.cpp | 1 - clang/lib/Frontend/CompilerInvocation.cpp | 13 ++----------- clang/lib/Frontend/FrontendActions.cpp | 1 - clang/lib/Frontend/FrontendOptions.cpp | 1 - 7 files changed, 4 insertions(+), 21 deletions(-) diff --git a/clang/include/clang/Basic/LangStandard.h b/clang/include/clang/Basic/LangStandard.h index 49412232c9c5..35361880c371 100644 --- a/clang/include/clang/Basic/LangStandard.h +++ b/clang/include/clang/Basic/LangStandard.h @@ -26,9 +26,8 @@ enum class Language : uint8_t { /// Assembly: we accept this only so that we can preprocess it. Asm, - /// LLVM IR & CIR: we accept these so that we can run the optimizer on them, - /// and compile them to assembly or object code (or LLVM for CIR). - CIR, + /// LLVM IR: we accept this so that we can run the optimizer on it, + /// and compile it to assembly or object code. LLVM_IR, ///@{ Languages that the frontend can parse and compile. diff --git a/clang/include/clang/Driver/Types.def b/clang/include/clang/Driver/Types.def index 214c5e7a789f..2a59c1302f27 100644 --- a/clang/include/clang/Driver/Types.def +++ b/clang/include/clang/Driver/Types.def @@ -99,7 +99,6 @@ TYPE("ir", LLVM_BC, INVALID, "bc", phases TYPE("lto-ir", LTO_IR, INVALID, "s", phases::Compile, phases::Backend, phases::Assemble, phases::Link) TYPE("lto-bc", LTO_BC, INVALID, "o", phases::Compile, phases::Backend, phases::Assemble, phases::Link) -TYPE("cir", CIR, INVALID, "cir", phases::Compile, phases::Backend, phases::Assemble, phases::Link) // Misc. TYPE("ast", AST, INVALID, "ast", phases::Compile, phases::Backend, phases::Assemble, phases::Link) TYPE("ifs", IFS, INVALID, "ifs", phases::IfsMerge) diff --git a/clang/lib/Basic/LangStandards.cpp b/clang/lib/Basic/LangStandards.cpp index c49d095018b2..aa01e25baeca 100644 --- a/clang/lib/Basic/LangStandards.cpp +++ b/clang/lib/Basic/LangStandards.cpp @@ -21,8 +21,6 @@ StringRef clang::languageToString(Language L) { return "Asm"; case Language::LLVM_IR: return "LLVM IR"; - case Language::CIR: - return "ClangIR"; case Language::C: return "C"; case Language::CXX: @@ -93,7 +91,6 @@ LangStandard::Kind clang::getDefaultLanguageStandard(clang::Language Lang, switch (Lang) { case Language::Unknown: case Language::LLVM_IR: - case Language::CIR: llvm_unreachable("Invalid input kind!"); case Language::OpenCL: return LangStandard::lang_opencl12; diff --git a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp index e881d56258e5..8d874f226cb1 100644 --- a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp +++ b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp @@ -220,7 +220,6 @@ StringRef getLanguageName(Language Lang) { case Language::Unknown: case Language::Asm: case Language::LLVM_IR: - case Language::CIR: llvm_unreachable("Unsupported language kind"); } diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 5410bf1fbd78..6130b54410d7 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -2937,9 +2937,6 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts, case Language::HLSL: Lang = "hlsl"; break; - case Language::CIR: - Lang = "cir"; - break; } GenerateArg(Consumer, OPT_x, @@ -3164,7 +3161,6 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, .Cases("ast", "pcm", "precompiled-header", InputKind(Language::Unknown, InputKind::Precompiled)) .Case("ir", Language::LLVM_IR) - .Case("cir", Language::CIR) .Default(Language::Unknown); if (DashX.isUnknown()) @@ -3554,7 +3550,6 @@ static bool IsInputCompatibleWithStandard(InputKind IK, switch (IK.getLanguage()) { case Language::Unknown: case Language::LLVM_IR: - case Language::CIR: llvm_unreachable("should not parse language flags for this input"); case Language::C: @@ -3617,8 +3612,6 @@ static StringRef GetInputKindName(InputKind IK) { return "Asm"; case Language::LLVM_IR: return "LLVM IR"; - case Language::CIR: - return "Clang IR"; case Language::HLSL: return "HLSL"; @@ -3634,8 +3627,7 @@ void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts, const llvm::Triple &T, InputKind IK) { if (IK.getFormat() == InputKind::Precompiled || - IK.getLanguage() == Language::LLVM_IR || - IK.getLanguage() == Language::CIR) { + IK.getLanguage() == Language::LLVM_IR) { if (Opts.ObjCAutoRefCount) GenerateArg(Consumer, OPT_fobjc_arc); if (Opts.PICLevel != 0) @@ -3933,8 +3925,7 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args, unsigned NumErrorsBefore = Diags.getNumErrors(); if (IK.getFormat() == InputKind::Precompiled || - IK.getLanguage() == Language::LLVM_IR || - IK.getLanguage() == Language::CIR) { + IK.getLanguage() == Language::LLVM_IR) { // ObjCAAutoRefCount and Sanitize LangOpts are used to setup the // PassManager in BackendUtil.cpp. They need to be initialized no matter // what the input type is. diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp index 1ea4a2e9e88c..0e10268e024c 100644 --- a/clang/lib/Frontend/FrontendActions.cpp +++ b/clang/lib/Frontend/FrontendActions.cpp @@ -1106,7 +1106,6 @@ void PrintPreambleAction::ExecuteAction() { case Language::CUDA: case Language::HIP: case Language::HLSL: - case Language::CIR: break; case Language::Unknown: diff --git a/clang/lib/Frontend/FrontendOptions.cpp b/clang/lib/Frontend/FrontendOptions.cpp index 32ed99571e85..bf83b27c1367 100644 --- a/clang/lib/Frontend/FrontendOptions.cpp +++ b/clang/lib/Frontend/FrontendOptions.cpp @@ -34,6 +34,5 @@ InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) { .Case("hip", Language::HIP) .Cases("ll", "bc", Language::LLVM_IR) .Case("hlsl", Language::HLSL) - .Case("cir", Language::CIR) .Default(Language::Unknown); } From e464ad298626f0f563e5d037a794632b7981267f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 2 Jun 2022 12:34:11 -0700 Subject: [PATCH 0018/2301] [CIR] Add stub test for test directory --- clang/test/CIR/CodeGen/basic.c | 1 + 1 file changed, 1 insertion(+) create mode 100644 clang/test/CIR/CodeGen/basic.c diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c new file mode 100644 index 000000000000..5c1031cccc41 --- /dev/null +++ b/clang/test/CIR/CodeGen/basic.c @@ -0,0 +1 @@ +// RUN: true From 9b1ae840c03453fb25af3d6b8891a18a576f05bb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 Aug 2021 20:17:09 -0700 Subject: [PATCH 0019/2301] [CIR] Initial commit: setup cmake, driver flags and MLIR initial files Plumb pieces together but no functionality. --- clang/CMakeLists.txt | 13 + clang/include/clang/Basic/LangOptions.def | 2 + clang/include/clang/Driver/Options.td | 6 + .../clang/Sema/AnalysisBasedWarnings.h | 4 + clang/include/clang/Sema/CIRBasedWarnings.h | 62 +++++ clang/include/clang/Sema/Sema.h | 1 + clang/lib/Driver/ToolChains/Clang.cpp | 4 + clang/lib/Sema/CIRBasedWarnings.cpp | 261 ++++++++++++++++++ clang/lib/Sema/CMakeLists.txt | 16 ++ clang/test/Driver/cir.c | 12 + llvm/docs/CIR.rst | 24 ++ mlir/include/mlir/Dialect/CIR/CMakeLists.txt | 1 + mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 34 +++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 74 +++++ .../mlir/Dialect/CIR/IR/CMakeLists.txt | 2 + mlir/include/mlir/Dialect/CMakeLists.txt | 1 + mlir/lib/Dialect/CIR/CMakeLists.txt | 1 + mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 51 ++++ mlir/lib/Dialect/CIR/IR/CMakeLists.txt | 15 + mlir/lib/Dialect/CMakeLists.txt | 1 + 20 files changed, 585 insertions(+) create mode 100644 clang/include/clang/Sema/CIRBasedWarnings.h create mode 100644 clang/lib/Sema/CIRBasedWarnings.cpp create mode 100644 clang/test/Driver/cir.c create mode 100644 llvm/docs/CIR.rst create mode 100644 mlir/include/mlir/Dialect/CIR/CMakeLists.txt create mode 100644 mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h create mode 100644 mlir/include/mlir/Dialect/CIR/IR/CIROps.td create mode 100644 mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt create mode 100644 mlir/lib/Dialect/CIR/CMakeLists.txt create mode 100644 mlir/lib/Dialect/CIR/IR/CIRDialect.cpp create mode 100644 mlir/lib/Dialect/CIR/IR/CMakeLists.txt diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt index df3a770f22f9..d0a828029561 100644 --- a/clang/CMakeLists.txt +++ b/clang/CMakeLists.txt @@ -166,6 +166,19 @@ if(CLANG_ENABLE_LIBXML2) endif() endif() +set(CLANG_ENABLE_CIR FALSE) + +if(CLANG_ENABLE_CIR) + if (CLANG_BUILT_STANDALONE) + message(FATAL_ERROR + "ClangIR is not yet supported in the standalone build.") + endif() + if (NOT "${LLVM_ENABLE_PROJECTS}" MATCHES "MLIR|mlir") + message(FATAL_ERROR + "Cannot build ClangIR without MLIR in LLVM_ENABLE_PROJECTS") + endif() +endif() + include(CheckIncludeFile) check_include_file(sys/resource.h CLANG_HAVE_RLIMITS) diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def index a980be853d53..36e303fa26f2 100644 --- a/clang/include/clang/Basic/LangOptions.def +++ b/clang/include/clang/Basic/LangOptions.def @@ -436,6 +436,8 @@ COMPATIBLE_LANGOPT(RetainCommentsFromSystemHeaders, 1, 0, "retain documentation LANGOPT(APINotes, 1, 0, "use external API notes") LANGOPT(APINotesModules, 1, 0, "use module-based external API notes") +LANGOPT(CIRWarnings, 1, 0, "emit warnings with ClangIR") + LANGOPT(SanitizeAddressFieldPadding, 2, 0, "controls how aggressive is ASan " "field padding (0: none, 1:least " "aggressive, 2: more aggressive)") diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index a70335f7bbfb..6fe719db3f1b 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -1955,6 +1955,12 @@ defm bounds_safety : BoolFOption< BothFlags<[], [CC1Option], " experimental bounds safety extension for C">>; +defm cir_warnings : BoolFOption<"cir-warnings", + LangOpts<"CIRWarnings">, DefaultFalse, + PosFlag, + NegFlag, + BothFlags<[], [ClangOption, CC1Option], " CIR to emit (analysis based) warnings">>; + defm addrsig : BoolFOption<"addrsig", CodeGenOpts<"Addrsig">, DefaultFalse, PosFlag, diff --git a/clang/include/clang/Sema/AnalysisBasedWarnings.h b/clang/include/clang/Sema/AnalysisBasedWarnings.h index aafe227b8408..2c18350c72b0 100644 --- a/clang/include/clang/Sema/AnalysisBasedWarnings.h +++ b/clang/include/clang/Sema/AnalysisBasedWarnings.h @@ -19,10 +19,13 @@ namespace clang { +class BlockExpr; +class CIRBasedWarnings; class Decl; class FunctionDecl; class QualType; class Sema; + namespace sema { class FunctionScopeInfo; } @@ -33,6 +36,7 @@ class AnalysisBasedWarnings { public: class Policy { friend class AnalysisBasedWarnings; + friend class CIRBasedWarnings; // The warnings to run. LLVM_PREFERRED_TYPE(bool) unsigned enableCheckFallThrough : 1; diff --git a/clang/include/clang/Sema/CIRBasedWarnings.h b/clang/include/clang/Sema/CIRBasedWarnings.h new file mode 100644 index 000000000000..839e9f100b5c --- /dev/null +++ b/clang/include/clang/Sema/CIRBasedWarnings.h @@ -0,0 +1,62 @@ +//=- CIRBasedWarnings.h - Sema warnings based on libAnalysis -*- C++ -*-=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines CIRBasedWarnings, a worker object used by Sema +// that issues warnings based on dataflow-analysis. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_SEMA_CIRBASEDWARNINGS_H +#define LLVM_CLANG_SEMA_CIRBASEDWARNINGS_H + +#include "clang/Sema/AnalysisBasedWarnings.h" +#include "llvm/ADT/DenseMap.h" +#include + +namespace clang { + +class BlockExpr; +class Decl; +class FunctionDecl; +class ObjCMethodDecl; +class QualType; +class Sema; +namespace sema { + class FunctionScopeInfo; +} + +namespace sema { + +class CIRBasedWarnings { +private: + Sema &S; + AnalysisBasedWarnings::Policy DefaultPolicy; + + //class InterProceduralData; + //std::unique_ptr IPData; + + enum VisitFlag { NotVisited = 0, Visited = 1, Pending = 2 }; + llvm::DenseMap VisitedFD; + + /// @} + +public: + CIRBasedWarnings(Sema &s); + ~CIRBasedWarnings(); + + void IssueWarnings(AnalysisBasedWarnings::Policy P, FunctionScopeInfo *fscope, + const Decl *D, QualType BlockType); + + //Policy getDefaultPolicy() { return DefaultPolicy; } + + void PrintStats() const; +}; + +} // namespace sema +} // namespace clang + +#endif diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index 4d6e02fe2956..aebba1afed0c 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -55,6 +55,7 @@ #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/Attr.h" +#include "clang/Sema/CIRBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 07ceab47112b..5a20b1b3acd0 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -8095,6 +8095,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-fmv"); } + if (Args.hasFlag(options::OPT_fcir_warnings, options::OPT_fno_cir_warnings, + false)) + CmdArgs.push_back("-fcir-warnings"); + if (Args.hasFlag(options::OPT_faddrsig, options::OPT_fno_addrsig, (TC.getTriple().isOSBinFormatELF() || TC.getTriple().isOSBinFormatCOFF()) && diff --git a/clang/lib/Sema/CIRBasedWarnings.cpp b/clang/lib/Sema/CIRBasedWarnings.cpp new file mode 100644 index 000000000000..e14c05699253 --- /dev/null +++ b/clang/lib/Sema/CIRBasedWarnings.cpp @@ -0,0 +1,261 @@ +//=- CIRBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ ----*-=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines analysis_warnings::[Policy,Executor]. +// Together they are used by Sema to issue warnings based on inexpensive +// static analysis algorithms using ClangIR. +// +//===----------------------------------------------------------------------===// + +#include "clang/Sema/CIRBasedWarnings.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/EvaluatedExprVisitor.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/ExprObjC.h" +#include "clang/AST/ParentMap.h" +#include "clang/AST/RecursiveASTVisitor.h" +#include "clang/AST/StmtCXX.h" +#include "clang/AST/StmtObjC.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/SourceLocation.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Lex/Preprocessor.h" +#include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/SemaInternal.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/MapVector.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/Casting.h" + +#include "mlir/Dialect/CIR/IR/CIRDialect.h" + +#include +#include +#include + +using namespace clang; + +namespace { +/// +/// Helpers +/// +class reverse_children { + llvm::SmallVector childrenBuf; + ArrayRef children; + +public: + reverse_children(Stmt *S); + + using iterator = ArrayRef::reverse_iterator; + + iterator begin() const { return children.rbegin(); } + iterator end() const { return children.rend(); } +}; + +// FIXME: we might not even need this. +reverse_children::reverse_children(Stmt *S) { + if (CallExpr *CE = dyn_cast(S)) { + children = CE->getRawSubExprs(); + return; + } + switch (S->getStmtClass()) { + // Note: Fill in this switch with more cases we want to optimize. + case Stmt::InitListExprClass: { + InitListExpr *IE = cast(S); + children = llvm::ArrayRef(reinterpret_cast(IE->getInits()), + IE->getNumInits()); + return; + } + default: + break; + } + + // Default case for all other statements. + for (Stmt *SubStmt : S->children()) + childrenBuf.push_back(SubStmt); + + // This needs to be done *after* childrenBuf has been populated. + children = childrenBuf; +} + +/// +/// CIRBuilder +/// + +/// CIRBuilder - This class implements CIR construction from an AST. +class CIRBuilder { +public: + typedef int CIRUnit; + explicit CIRBuilder(ASTContext *astContext) : Context(astContext) {} + + ASTContext *Context; + + // buildCFG - Used by external clients to construct the CFG. + // std::unique_ptr buildCIR(const Decl *D, Stmt *Statement); + void buildCIR(const Decl *D, Stmt *Statement); + +private: + // Visitors to walk an AST and construct CIR. + CIRUnit *VisitImplicitCastExpr(ImplicitCastExpr *E); + CIRUnit *VisitCompoundStmt(CompoundStmt *C); + CIRUnit *VisitDeclStmt(DeclStmt *DS); + + // Basic components + CIRUnit *Visit(Stmt *S); + CIRUnit *VisitStmt(Stmt *S); + CIRUnit *VisitChildren(Stmt *S); +}; + +using CIRUnit = CIRBuilder::CIRUnit; + +/// +/// Basic visitors +/// + +/// Visit - Walk the subtree of a statement and add extra +/// blocks for ternary operators, &&, and ||. We also process "," and +/// DeclStmts (which may contain nested control-flow). +CIRUnit *CIRBuilder::Visit(Stmt *S) { + if (!S) { + return nullptr; + } + + // if (Expr *E = dyn_cast(S)) + // S = E->IgnoreParens(); + + switch (S->getStmtClass()) { + default: + return VisitStmt(S); + + case Stmt::CompoundStmtClass: + return VisitCompoundStmt(cast(S)); + + case Stmt::ImplicitCastExprClass: + return VisitImplicitCastExpr(cast(S)); + + case Stmt::DeclStmtClass: + return VisitDeclStmt(cast(S)); + } +} + +CIRUnit *CIRBuilder::VisitStmt(Stmt *S) { + // FIXME: do work. + return VisitChildren(S); +} + +/// VisitChildren - Visit the children of a Stmt. +CIRUnit *CIRBuilder::VisitChildren(Stmt *S) { + // Visit the children in their reverse order so that they appear in + // left-to-right (natural) order in the CFG. + // reverse_children RChildren(S); + // for (Stmt *Child : RChildren) { + // if (Child) + // if (CIRUnit *R = Visit(Child)) + // B = R; + // } + return nullptr; // B; +} + +/// +/// Other visitors +/// +CIRUnit *CIRBuilder::VisitImplicitCastExpr(ImplicitCastExpr *E) { + // FIXME: do work. + return nullptr; +} + +CIRUnit *CIRBuilder::VisitCompoundStmt(CompoundStmt *C) { + // FIXME: do work. + return nullptr; +} + +CIRUnit *CIRBuilder::VisitDeclStmt(DeclStmt *DS) { + // FIXME: do work. + return nullptr; +} + +} // namespace + +/// +/// CIRBasedWarnings +/// +static unsigned isEnabled(DiagnosticsEngine &D, unsigned diag) { + return (unsigned)!D.isIgnored(diag, SourceLocation()); +} + +sema::CIRBasedWarnings::CIRBasedWarnings(Sema &s) : S(s) { + + using namespace diag; + DiagnosticsEngine &D = S.getDiagnostics(); + + DefaultPolicy.enableCheckUnreachable = + isEnabled(D, warn_unreachable) || isEnabled(D, warn_unreachable_break) || + isEnabled(D, warn_unreachable_return) || + isEnabled(D, warn_unreachable_loop_increment); + + DefaultPolicy.enableThreadSafetyAnalysis = isEnabled(D, warn_double_lock); + + DefaultPolicy.enableConsumedAnalysis = + isEnabled(D, warn_use_in_invalid_state); +} + +// We need this here for unique_ptr with forward declared class. +sema::CIRBasedWarnings::~CIRBasedWarnings() = default; + +static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) { + for (const auto &D : fscope->PossiblyUnreachableDiags) + S.Diag(D.Loc, D.PD); +} + +void clang::sema::CIRBasedWarnings::IssueWarnings( + sema::AnalysisBasedWarnings::Policy P, sema::FunctionScopeInfo *fscope, + const Decl *D, QualType BlockType) { + // We avoid doing analysis-based warnings when there are errors for + // two reasons: + // (1) The CFGs often can't be constructed (if the body is invalid), so + // don't bother trying. + // (2) The code already has problems; running the analysis just takes more + // time. + DiagnosticsEngine &Diags = S.getDiagnostics(); + + // Do not do any analysis if we are going to just ignore them. + if (Diags.getIgnoreAllWarnings() || + (Diags.getSuppressSystemWarnings() && + S.SourceMgr.isInSystemHeader(D->getLocation()))) + return; + + // For code in dependent contexts, we'll do this at instantiation time. + if (cast(D)->isDependentContext()) + return; + + if (S.hasUncompilableErrorOccurred()) { + // Flush out any possibly unreachable diagnostics. + flushDiagnostics(S, fscope); + return; + } + + const Stmt *Body = D->getBody(); + assert(Body); + + // TODO: up to this point this behaves the same as + // AnalysisBasedWarnings::IssueWarnings + + // Unlike Clang CFG, we share CIR state between each analyzed function, + // retrieve or create a new context. + mlir::MLIRContext context; + // Load our Dialect in this MLIR Context. + context.getOrLoadDialect(); +} + +void clang::sema::CIRBasedWarnings::PrintStats() const { + llvm::errs() << "\n*** CIR Based Warnings Stats:\n"; +} diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt index 19cf3a2db00f..d63d39dd361d 100644 --- a/clang/lib/Sema/CMakeLists.txt +++ b/clang/lib/Sema/CMakeLists.txt @@ -13,10 +13,16 @@ clang_tablegen(OpenCLBuiltins.inc -gen-clang-opencl-builtins TARGET ClangOpenCLBuiltinsImpl ) +include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) +include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + add_clang_library(clangSema AnalysisBasedWarnings.cpp CheckExprLifetime.cpp CodeCompleteConsumer.cpp + CIRBasedWarnings.cpp DeclSpec.cpp DelayedDiagnostic.cpp HeuristicResolver.cpp @@ -100,6 +106,7 @@ add_clang_library(clangSema ClangOpenCLBuiltinsImpl omp_gen ClangDriverOptions + MLIRCIROpsIncGen LINK_LIBS clangAPINotes @@ -109,4 +116,13 @@ add_clang_library(clangSema clangEdit clangLex clangSupport + + ${dialect_libs} + MLIRCIR + MLIRAnalysis + MLIRIR + MLIRParser + MLIRSideEffectInterfaces + MLIRTransforms + MLIRSupport ) diff --git a/clang/test/Driver/cir.c b/clang/test/Driver/cir.c new file mode 100644 index 000000000000..ab8dfbb28277 --- /dev/null +++ b/clang/test/Driver/cir.c @@ -0,0 +1,12 @@ +// RUN: %clang -### -target x86_64-unknown-linux -c %s 2>&1 | FileCheck -check-prefix=NO-CIR %s +// RUN: %clang -### -target x86_64-pc-win32 -c %s 2>&1 | FileCheck -check-prefix=NO-CIR %s +// RUN: %clang -### -target x86_64-scei-ps4 -c %s 2>&1 | FileCheck -check-prefix=NO-CIR %s +// RUN: %clang -### -target x86_64-linux-android21 -c %s 2>&1 | FileCheck -check-prefix=NO-CIR %s + +// RUN: %clang -### -target x86_64-unknown-linux -c -fcir-warnings %s 2>&1 | FileCheck -check-prefix=CIR %s +// RUN: %clang -### -target x86_64-pc-win32 -c -fcir-warnings %s 2>&1 | FileCheck -check-prefix=CIR %s +// RUN: %clang -### -target x86_64-scei-ps4 -c -fcir-warnings %s 2>&1 | FileCheck -check-prefix=CIR %s +// RUN: %clang -### -target x86_64-linux-android21 -c -fcir-warnings %s 2>&1 | FileCheck -check-prefix=CIR %s + +// CIR: -fcir-warnings +// NO-CIR-NOT: -fcir-warnings diff --git a/llvm/docs/CIR.rst b/llvm/docs/CIR.rst new file mode 100644 index 000000000000..b829eaf3ac99 --- /dev/null +++ b/llvm/docs/CIR.rst @@ -0,0 +1,24 @@ +=============================== +CIR - Clang IR Design and Implementation +=============================== + +.. contents:: + :local: + +Introduction +============ + +This document aims to provide an overview of the design and +implementation of a Clang IR, a high level IR allowing more +analysis and future optimizations. + +Usage in Clang +============== + + +Usage in Clang happens right now as part of replacing current +IssueWarnings +AnalysisWarnings.IssueWarnings + +CFG usage in ``AnalysisBasedWarning.cpp`` to use CIR instead of +Clang's CFG, as part of ``PopFunctionScopeInfo``. \ No newline at end of file diff --git a/mlir/include/mlir/Dialect/CIR/CMakeLists.txt b/mlir/include/mlir/Dialect/CIR/CMakeLists.txt new file mode 100644 index 000000000000..f33061b2d87c --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(IR) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h new file mode 100644 index 000000000000..61fdc39c0c81 --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -0,0 +1,34 @@ +//===- CIRDialect.h - MLIR Dialect for CIR ----------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the Target dialect for CIR in MLIR. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_CIR_CIRDIALECT_H_ +#define MLIR_DIALECT_CIR_CIRDIALECT_H_ + +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Dialect.h" +#include "mlir/IR/OpDefinition.h" +#include "mlir/Interfaces/SideEffectInterfaces.h" + +namespace mlir { +namespace func { +class FuncOp; +} // namespace func +using FuncOp = func::FuncOp; +} // namespace mlir + +#include "mlir/Dialect/CIR/IR/CIROpsDialect.h.inc" + +#define GET_OP_CLASSES +#include "mlir/Dialect/CIR/IR/CIROps.h.inc" + +#endif // MLIR_DIALECT_CIR_CIRDIALECT_H_ diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td new file mode 100644 index 000000000000..9394f7fc1ce5 --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -0,0 +1,74 @@ +//===-- CIRDialect.td - CIR dialect definition -------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// Definition of the CIR dialect +/// +//===----------------------------------------------------------------------===// + +#ifndef CIR_DIALECT_CIR_DIALECT +#define CIR_DIALECT_CIR_DIALECT + +include "mlir/IR/OpBase.td" +include "mlir/Interfaces/SideEffectInterfaces.td" + +def CIR_Dialect : Dialect { + let name = "cir"; + + // A short one-line summary of our dialect. + let summary = "A high-level dialect for analyzing and optimizing Clang " + "compiled languages"; + + let cppNamespace = "::mlir::cir"; +} + +class CIR_Op traits = []> : + Op; + +//===----------------------------------------------------------------------===// +// CIR Operations +//===----------------------------------------------------------------------===// + +def ReturnOp : CIR_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { + let summary = "return operation"; + let description = [{ + The "return" operation represents a return operation within a function. + The operation takes an optional tensor operand and produces no results. + The operand type must match the signature of the function that contains + the operation. For example: + + ```mlir + func @foo() -> AnyType { + ... + cir.return %0 : AnyType + } + ``` + }]; + + // The return operation takes an optional input operand to return. This + // value must match the return type of the enclosing function. + let arguments = (ins Variadic:$input); + + // The return operation only emits the input in the format if it is present. + let assemblyFormat = "($input^ `:` type($input))? attr-dict "; + + // Allow building a ReturnOp with no return operand. + let builders = [ + OpBuilder<(ins), [{ build($_builder, $_state, std::nullopt); }]> + ]; + + // Provide extra utility definitions on the c++ operation class definition. + let extraClassDeclaration = [{ + bool hasOperand() { return getNumOperands() != 0; } + }]; + + let hasVerifier = 1; +} + + +#endif // CIR_DIALECT_CIR_DIALECT diff --git a/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt new file mode 100644 index 000000000000..8c2c20a31f9c --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt @@ -0,0 +1,2 @@ +add_mlir_dialect(CIROps cir) +add_mlir_doc(CIROps CIROps Dialects/ -gen-op-doc) diff --git a/mlir/include/mlir/Dialect/CMakeLists.txt b/mlir/include/mlir/Dialect/CMakeLists.txt index f71023519733..aebf25d02830 100644 --- a/mlir/include/mlir/Dialect/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/CMakeLists.txt @@ -7,6 +7,7 @@ add_subdirectory(ArmSME) add_subdirectory(ArmSVE) add_subdirectory(Async) add_subdirectory(Bufferization) +add_subdirectory(CIR) add_subdirectory(Complex) add_subdirectory(ControlFlow) add_subdirectory(DLTI) diff --git a/mlir/lib/Dialect/CIR/CMakeLists.txt b/mlir/lib/Dialect/CIR/CMakeLists.txt new file mode 100644 index 000000000000..f33061b2d87c --- /dev/null +++ b/mlir/lib/Dialect/CIR/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(IR) diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp new file mode 100644 index 000000000000..f3bf5dfe1a87 --- /dev/null +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -0,0 +1,51 @@ +//===- CIRDialect.cpp - MLIR CIR ops implementation -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the CIR dialect and its operations. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/CIR/IR/CIRDialect.h" + +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMTypes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/OpImplementation.h" +#include "mlir/IR/TypeUtilities.h" + +using namespace mlir; +using namespace mlir::cir; + +#include "mlir/Dialect/CIR/IR/CIROpsDialect.cpp.inc" + +//===----------------------------------------------------------------------===// +// CIR Dialect +//===----------------------------------------------------------------------===// + +/// Dialect initialization, the instance will be owned by the context. This is +/// the point of registration of types and operations for the dialect. +void cir::CIRDialect::initialize() { + addOperations< +#define GET_OP_LIST +#include "mlir/Dialect/CIR/IR/CIROps.cpp.inc" + >(); +} + +//===----------------------------------------------------------------------===// +// ReturnOp + +mlir::LogicalResult ReturnOp::verify() { + return getOperation()->emitError() << "not implemented"; +} + +//===----------------------------------------------------------------------===// +// TableGen'd op method definitions +//===----------------------------------------------------------------------===// + +#define GET_OP_CLASSES +#include "mlir/Dialect/CIR/IR/CIROps.cpp.inc" diff --git a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt new file mode 100644 index 000000000000..905031aefe52 --- /dev/null +++ b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt @@ -0,0 +1,15 @@ +add_mlir_dialect_library(MLIRCIR + CIRDialect.cpp + + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/CIR + + DEPENDS + MLIRCIROpsIncGen + + LINK_LIBS PUBLIC + MLIRIR + MLIRFuncDialect + MLIRLLVMDialect + MLIRSideEffectInterfaces + ) diff --git a/mlir/lib/Dialect/CMakeLists.txt b/mlir/lib/Dialect/CMakeLists.txt index 80b0ef068d96..9f4ed94f244d 100644 --- a/mlir/lib/Dialect/CMakeLists.txt +++ b/mlir/lib/Dialect/CMakeLists.txt @@ -7,6 +7,7 @@ add_subdirectory(ArmSME) add_subdirectory(ArmSVE) add_subdirectory(Async) add_subdirectory(Bufferization) +add_subdirectory(CIR) add_subdirectory(Complex) add_subdirectory(ControlFlow) add_subdirectory(DLTI) From bd31d89eece32b4f9986fc9424bd66d3b0902c6f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 23 Aug 2021 17:47:11 -0700 Subject: [PATCH 0020/2301] [CIR] Add more dialect bits for a return op, add more builder content - Update cmake files - More boilerplate on AST handling - This allows us to build a simple C file, but no testcase yet. --- clang/include/clang/CIR/CIRBuilder.h | 50 ++ clang/include/clang/CIR/CIRCodeGenFunction.h | 98 ++++ clang/include/clang/Sema/CIRBasedWarnings.h | 9 +- clang/lib/CIR/CIRBuilder.cpp | 526 +++++++++++++++++++ clang/lib/CIR/CMakeLists.txt | 32 ++ clang/lib/CIR/CodeGen/CMakeLists.txt | 8 + clang/lib/CIR/Dialect/IR/CMakeLists.txt | 0 clang/lib/CMakeLists.txt | 4 + clang/lib/Sema/CIRBasedWarnings.cpp | 152 +----- clang/lib/Sema/CMakeLists.txt | 1 + mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 2 +- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 30 +- 12 files changed, 761 insertions(+), 151 deletions(-) create mode 100644 clang/include/clang/CIR/CIRBuilder.h create mode 100644 clang/include/clang/CIR/CIRCodeGenFunction.h create mode 100644 clang/lib/CIR/CIRBuilder.cpp create mode 100644 clang/lib/CIR/CMakeLists.txt create mode 100644 clang/lib/CIR/CodeGen/CMakeLists.txt create mode 100644 clang/lib/CIR/Dialect/IR/CMakeLists.txt diff --git a/clang/include/clang/CIR/CIRBuilder.h b/clang/include/clang/CIR/CIRBuilder.h new file mode 100644 index 000000000000..a5420f986b22 --- /dev/null +++ b/clang/include/clang/CIR/CIRBuilder.h @@ -0,0 +1,50 @@ +//===- CIRBuilder.h - CIR Generation from Clang AST -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares a simple interface to perform CIR generation from Clang +// AST +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CIRBUILDER_H_ +#define CLANG_CIRBUILDER_H_ + +#include + +namespace mlir { +class MLIRContext; +class OwningModuleRef; +} // namespace mlir + +namespace clang { +class ASTContext; +class FunctionDecl; +} // namespace clang + +namespace cir { +class CIRBuildImpl; +} + +namespace cir { + +class CIRContext { +public: + ~CIRContext(); + CIRContext(clang::ASTContext &AC); + void Init(); + bool EmitFunction(const clang::FunctionDecl *FD); + +private: + std::unique_ptr mlirCtx; + std::unique_ptr builder; + clang::ASTContext &astCtx; +}; + +} // namespace cir + +#endif // CLANG_CIRBUILDER_H_ \ No newline at end of file diff --git a/clang/include/clang/CIR/CIRCodeGenFunction.h b/clang/include/clang/CIR/CIRCodeGenFunction.h new file mode 100644 index 000000000000..2bc43ebfedc7 --- /dev/null +++ b/clang/include/clang/CIR/CIRCodeGenFunction.h @@ -0,0 +1,98 @@ +//===-- CIRCIRCodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ +//-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is the internal per-function state used for cir translation. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRCODEGENFUNCTION_H +#define LLVM_CLANG_LIB_CIR_CIRCODEGENFUNCTION_H + +#include "mlir/IR/Value.h" +#include "clang/AST/Type.h" + +namespace clang { +class Expr; +} + +using namespace clang; + +namespace cir { + +// FIXME: for now we are reusing this from lib/Clang/CodeGenFunction.h, which +// isn't available in the include dir. Same for getEvaluationKind below. +enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; + +/// The source of the alignment of an l-value; an expression of +/// confidence in the alignment actually matching the estimate. +enum class AlignmentSource { + /// The l-value was an access to a declared entity or something + /// equivalently strong, like the address of an array allocated by a + /// language runtime. + Decl, + + /// The l-value was considered opaque, so the alignment was + /// determined from a type, but that type was an explicitly-aligned + /// typedef. + AttributedType, + + /// The l-value was considered opaque, so the alignment was + /// determined from a type. + Type +}; + +/// Given that the base address has the given alignment source, what's +/// our confidence in the alignment of the field? +static inline AlignmentSource getFieldAlignmentSource(AlignmentSource Source) { + // For now, we don't distinguish fields of opaque pointers from + // top-level declarations, but maybe we should. + return AlignmentSource::Decl; +} + +class LValueBaseInfo { + AlignmentSource AlignSource; + +public: + explicit LValueBaseInfo(AlignmentSource Source = AlignmentSource::Type) + : AlignSource(Source) {} + AlignmentSource getAlignmentSource() const { return AlignSource; } + void setAlignmentSource(AlignmentSource Source) { AlignSource = Source; } + + void mergeForCast(const LValueBaseInfo &Info) { + setAlignmentSource(Info.getAlignmentSource()); + } +}; + +class CIRCodeGenFunction { +public: + /// If a return statement is being visited, this holds the return statment's + /// result expression. + const Expr *RetExpr = nullptr; + + mlir::Value RetValue = nullptr; + mlir::Type FnRetTy; + clang::QualType FnRetQualTy; + + /// Return the TypeEvaluationKind of QualType \c T. + static TypeEvaluationKind getEvaluationKind(QualType T); + + static bool hasScalarEvaluationKind(QualType T) { + return getEvaluationKind(T) == TEK_Scalar; + } + + static bool hasAggregateEvaluationKind(QualType T) { + return getEvaluationKind(T) == TEK_Aggregate; + } + + CIRCodeGenFunction(); +}; + +} // namespace cir + +#endif \ No newline at end of file diff --git a/clang/include/clang/Sema/CIRBasedWarnings.h b/clang/include/clang/Sema/CIRBasedWarnings.h index 839e9f100b5c..3afdb9294415 100644 --- a/clang/include/clang/Sema/CIRBasedWarnings.h +++ b/clang/include/clang/Sema/CIRBasedWarnings.h @@ -17,6 +17,9 @@ #include "llvm/ADT/DenseMap.h" #include +namespace cir { +class CIRContext; +} // namespace cir namespace clang { class BlockExpr; @@ -25,16 +28,16 @@ class FunctionDecl; class ObjCMethodDecl; class QualType; class Sema; -namespace sema { - class FunctionScopeInfo; -} namespace sema { +class FunctionScopeInfo; + class CIRBasedWarnings { private: Sema &S; AnalysisBasedWarnings::Policy DefaultPolicy; + std::unique_ptr CIRCtx; //class InterProceduralData; //std::unique_ptr IPData; diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp new file mode 100644 index 000000000000..e242c1c6f71c --- /dev/null +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -0,0 +1,526 @@ +//===- CIRBuilder.cpp - MLIR Generation from a Toy AST --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements a simple IR generation targeting MLIR from a Module AST +// for the Toy language. +// +//===----------------------------------------------------------------------===// + +#include "clang/CIR/CIRBuilder.h" +#include "clang/CIR/CIRCodeGenFunction.h" + +#include "mlir/Dialect/CIR/IR/CIRDialect.h" + +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/IR/Verifier.h" + +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/EvaluatedExprVisitor.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/ExprObjC.h" +#include "clang/AST/ParentMap.h" +#include "clang/AST/RecursiveASTVisitor.h" +#include "clang/AST/StmtCXX.h" +#include "clang/AST/StmtObjC.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/SourceLocation.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Lex/Preprocessor.h" + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/MapVector.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/ScopedHashTable.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/raw_ostream.h" +#include + +using namespace mlir::cir; +using namespace cir; +using namespace clang; + +using llvm::ArrayRef; +using llvm::cast; +using llvm::dyn_cast; +using llvm::isa; +using llvm::ScopedHashTableScope; +using llvm::SmallVector; +using llvm::StringRef; +using llvm::Twine; + +CIRContext::~CIRContext() {} +CIRContext::CIRContext(clang::ASTContext &AC) : astCtx(AC) { Init(); } + +CIRCodeGenFunction::CIRCodeGenFunction() = default; +TypeEvaluationKind CIRCodeGenFunction::getEvaluationKind(QualType type) { + type = type.getCanonicalType(); + while (true) { + switch (type->getTypeClass()) { +#define TYPE(name, parent) +#define ABSTRACT_TYPE(name, parent) +#define NON_CANONICAL_TYPE(name, parent) case Type::name: +#define DEPENDENT_TYPE(name, parent) case Type::name: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("non-canonical or dependent type in IR-generation"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("undeduced type in IR-generation"); + + case Type::ArrayParameter: + llvm_unreachable("NYI"); + + // Various scalar types. + case Type::Builtin: + case Type::Pointer: + case Type::BlockPointer: + case Type::LValueReference: + case Type::RValueReference: + case Type::MemberPointer: + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::FunctionProto: + case Type::FunctionNoProto: + case Type::Enum: + case Type::ObjCObjectPointer: + case Type::Pipe: + case Type::BitInt: + return TEK_Scalar; + + // Complexes. + case Type::Complex: + return TEK_Complex; + + // Arrays, records, and Objective-C objects. + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::Record: + case Type::ObjCObject: + case Type::ObjCInterface: + return TEK_Aggregate; + + // We operate on atomic values according to their underlying type. + case Type::Atomic: + type = cast(type)->getValueType(); + continue; + } + llvm_unreachable("unknown type kind!"); + } +} + +namespace cir { + +/// Implementation of a CIR/MLIR emission from Clang AST. +/// +/// This will emit operations that are specific to C(++)/ObjC(++) language, +/// preserving the semantics of the language and (hopefully) allow to perform +/// accurate analysis and transformation based on these high level semantics. +class CIRBuildImpl { +public: + CIRBuildImpl(mlir::MLIRContext &context, clang::ASTContext &astctx) + : builder(&context), astCtx(astctx) { + theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); + } + CIRBuildImpl(CIRBuildImpl &) = delete; + CIRBuildImpl &operator=(CIRBuildImpl &) = delete; + ~CIRBuildImpl() = default; + + using SymTableTy = llvm::ScopedHashTable; + using SymTableScopeTy = ScopedHashTableScope; + +private: + /// A "module" matches a c/cpp source file: containing a list of functions. + mlir::ModuleOp theModule; + + /// The builder is a helper class to create IR inside a function. The + /// builder is stateful, in particular it keeps an "insertion point": this + /// is where the next operations will be introduced. + mlir::OpBuilder builder; + + /// The symbol table maps a variable name to a value in the current scope. + /// Entering a function creates a new scope, and the function arguments are + /// added to the mapping. When the processing of a function is terminated, + /// the scope is destroyed and the mappings created in this scope are + /// dropped. + SymTableTy symbolTable; + + /// Hold Clang AST information. + clang::ASTContext &astCtx; + + /// Per-function codegen information. Updated everytime buildCIR is called + /// for FunctionDecls's. + CIRCodeGenFunction *CurCCGF = nullptr; + + /// Helper conversion from Clang source location to an MLIR location. + mlir::Location getLoc(SourceLocation SLoc) { + const SourceManager &SM = astCtx.getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(SLoc); + StringRef Filename = PLoc.getFilename(); + return mlir::FileLineColLoc::get(builder.getStringAttr(Filename), + PLoc.getLine(), PLoc.getColumn()); + } + + /// Declare a variable in the current scope, return success if the variable + /// wasn't declared yet. + mlir::LogicalResult declare(StringRef var, mlir::Value value) { + if (symbolTable.count(var)) + return mlir::failure(); + symbolTable.insert(var, value); + return mlir::success(); + } + +public: + mlir::ModuleOp getModule() { return theModule; } + + class ScalarExprEmitter : public StmtVisitor { + CIRCodeGenFunction &CGF; + CIRBuildImpl &Builder; + + public: + ScalarExprEmitter(CIRCodeGenFunction &cgf, CIRBuildImpl &builder) + : CGF(cgf), Builder(builder) { + (void)CGF; + } + + mlir::Value Visit(Expr *E) { + return StmtVisitor::Visit(E); + } + + class RawAddress { + mlir::Value Pointer; + CharUnits Alignment; + + public: + RawAddress(mlir::Value pointer, CharUnits alignment) + : Pointer(pointer), Alignment(alignment) { + assert((!alignment.isZero() || pointer == nullptr) && + "creating valid address with invalid alignment"); + } + + static RawAddress invalid() { return RawAddress(nullptr, CharUnits()); } + bool isValid() const { return Pointer != nullptr; } + + mlir::Value getPointer() const { + // assert(isValid()); + return Pointer; + } + + /// Return the alignment of this pointer. + CharUnits getAlignment() const { + // assert(isValid()); + return Alignment; + } + }; + class LValue { + private: + void Initialize(CharUnits Alignment, LValueBaseInfo BaseInfo) { + // assert((!Alignment.isZero()) && // || Type->isIncompleteType()) && + // "initializing l-value with zero alignment!"); + + const unsigned MaxAlign = 1U << 31; + this->Alignment = Alignment.getQuantity() <= MaxAlign + ? Alignment.getQuantity() + : MaxAlign; + assert(this->Alignment == Alignment.getQuantity() && + "Alignment exceeds allowed max!"); + this->BaseInfo = BaseInfo; + } + + // The alignment to use when accessing this lvalue. (For vector elements, + // this is the alignment of the whole vector) + unsigned Alignment; + mlir::Value V; + LValueBaseInfo BaseInfo; + + public: + mlir::Value getPointer() const { return V; } + + CharUnits getAlignment() const { + return CharUnits::fromQuantity(Alignment); + } + + RawAddress getAddress() const { + return RawAddress(getPointer(), getAlignment()); + } + + LValueBaseInfo getBaseInfo() const { return BaseInfo; } + void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } + + static LValue makeAddr(RawAddress address, + AlignmentSource Source = AlignmentSource::Type) { + LValue R; + R.V = address.getPointer(); + R.Initialize(address.getAlignment(), LValueBaseInfo(Source)); + return R; + } + }; + + LValue EmitDeclRefLValue(const DeclRefExpr *E) { + const NamedDecl *ND = E->getDecl(); + + assert(E->isNonOdrUse() != NOUR_Unevaluated && + "should not emit an unevaluated operand"); + + if (const auto *VD = dyn_cast(ND)) { + // Global Named registers access via intrinsics only + assert(VD->getStorageClass() != SC_Register && "not implemented"); + assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); + assert(!E->refersToEnclosingVariableOrCapture() && "not implemented"); + assert(!(VD->hasLinkage() || VD->isStaticDataMember()) && + "not implemented"); + assert(!VD->isEscapingByref() && "not implemented"); + assert(!VD->getType()->isReferenceType() && "not implemented"); + assert(Builder.symbolTable.count(VD->getName()) && + "should be already mapped"); + + mlir::Value V = Builder.symbolTable.lookup(VD->getName()); + assert(V && "Name lookup must succeed"); + + LValue LV = LValue::makeAddr(RawAddress(V, CharUnits::fromQuantity(4)), + AlignmentSource::Decl); + return LV; + } + + llvm_unreachable("Unhandled DeclRefExpr?"); + } + + LValue EmitLValue(const Expr *E) { + switch (E->getStmtClass()) { + case Expr::DeclRefExprClass: + return EmitDeclRefLValue(cast(E)); + default: + emitError(Builder.getLoc(E->getExprLoc()), + "l-value not implemented for '") + << E->getStmtClassName() << "'"; + break; + } + return LValue::makeAddr(RawAddress::invalid()); + } + + /// Emits the address of the l-value, then loads and returns the result. + mlir::Value buildLoadOfLValue(const Expr *E) { + LValue LV = EmitLValue(E); + // mlir::Value V = EmitLoadOfLValue(LV, E->getExprLoc()); + + // EmitLValueAlignmentAssumption(E, V); + // return V; + return LV.getPointer(); + } + + // Handle l-values. + mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { + // FIXME: we could try to emit this as constant, similar to LLVM IR + // codegen. + return buildLoadOfLValue(E); + } + + // Emit code for an explicit or implicit cast. Implicit + // casts have to handle a more broad range of conversions than explicit + // casts, as they handle things like function to ptr-to-function decay + // etc. + mlir::Value VisitCastExpr(CastExpr *CE) { + Expr *E = CE->getSubExpr(); + QualType DestTy = CE->getType(); + CastKind Kind = CE->getCastKind(); + switch (Kind) { + case CK_LValueToRValue: + assert(Builder.astCtx.hasSameUnqualifiedType(E->getType(), DestTy)); + assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); + return Visit(const_cast(E)); + default: + emitError(Builder.getLoc(CE->getExprLoc()), + "cast kind not implemented: '") + << CE->getCastKindName() << "'"; + return nullptr; + } + } + + mlir::Value VisitExpr(Expr *E) { + emitError(Builder.getLoc(E->getExprLoc()), "scalar exp no implemented: '") + << E->getStmtClassName() << "'"; + if (E->getType()->isVoidType()) + return nullptr; + // FIXME: find a way to return "undef"... + // return llvm::UndefValue::get(CGF.ConvertType(E->getType())); + return nullptr; + } + }; + + /// Emit the computation of the specified expression of scalar type, + /// ignoring the result. + mlir::Value buildScalarExpr(const Expr *E) { + assert(E && CIRCodeGenFunction::hasScalarEvaluationKind(E->getType()) && + "Invalid scalar expression to emit"); + + return ScalarExprEmitter(*CurCCGF, *this).Visit(const_cast(E)); + } + + mlir::LogicalResult buildReturnStmt(const ReturnStmt &S) { + // Emit the result value, even if unused, to evaluate the side effects. + const Expr *RV = S.getRetValue(); + assert(!isa(RV) && "unimplemented"); + assert(!(astCtx.getLangOpts().ElideConstructors && S.getNRVOCandidate() && + S.getNRVOCandidate()->isNRVOVariable()) && + "unimplemented"); + assert(!CurCCGF->FnRetQualTy->isReferenceType() && "unimplemented"); + + if (!RV) // Do nothing (return value is left uninitialized) + return mlir::success(); + + mlir::Value V = nullptr; + switch (CIRCodeGenFunction::getEvaluationKind(RV->getType())) { + case TEK_Scalar: + V = buildScalarExpr(RV); + // Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); + break; + case TEK_Complex: + case TEK_Aggregate: + llvm::errs() << "ReturnStmt EvaluationKind not implemented\n"; + return mlir::failure(); + } + + CurCCGF->RetValue = V; + // Otherwise, this return operation has zero operands. + if (!V || (RV && RV->getType()->isVoidType())) { + // FIXME: evaluate for side effects. + } + + builder.create(getLoc(RV->getExprLoc()), + V ? ArrayRef(V) : ArrayRef()); + return mlir::success(); + } + + mlir::LogicalResult buildCompoundStmt(const CompoundStmt &S) { + // Create a scope in the symbol table to hold variable declarations local + // to this compound statement. + SymTableScopeTy varScope(symbolTable); + for (auto *CurStmt : S.body()) + if (buildStmt(CurStmt).failed()) + return mlir::failure(); + + return mlir::success(); + } + + mlir::LogicalResult buildStmt(const Stmt *S) { + switch (S->getStmtClass()) { + default: + llvm::errs() << "CIR codegen for '" << S->getStmtClassName() + << "' not implemented\n"; + return mlir::failure(); + case Stmt::CompoundStmtClass: + return buildCompoundStmt(cast(*S)); + case Stmt::ReturnStmtClass: + return buildReturnStmt(cast(*S)); + } + + return mlir::success(); + } + + // Emit a new function and add it to the MLIR module. + mlir::FuncOp buildCIR(CIRCodeGenFunction *CCGF, const FunctionDecl *FD) { + CurCCGF = CCGF; + + // Create a scope in the symbol table to hold variable declarations. + SymTableScopeTy varScope(symbolTable); + + const CXXMethodDecl *MD = dyn_cast(FD); + assert(!MD && "methods not implemented"); + auto loc = getLoc(FD->getLocation()); + + // Create an MLIR function for the given prototype. + llvm::SmallVector argTypes; + + for (auto *Param : FD->parameters()) + argTypes.push_back(getType(Param->getType())); + + CurCCGF->FnRetQualTy = FD->getReturnType(); + CurCCGF->FnRetTy = getType(CurCCGF->FnRetQualTy); + auto funcType = builder.getFunctionType(argTypes, CurCCGF->FnRetTy); + mlir::FuncOp function = mlir::FuncOp::create(loc, FD->getName(), funcType); + if (!function) + return nullptr; + + // In MLIR the entry block of the function is special: it must have the + // same argument list as the function itself. + auto &entryBlock = *function.addEntryBlock(); + + // Declare all the function arguments in the symbol table. + for (const auto nameValue : + llvm::zip(FD->parameters(), entryBlock.getArguments())) { + if (failed(declare(std::get<0>(nameValue)->getName(), + std::get<1>(nameValue)))) + return nullptr; + } + + // Set the insertion point in the builder to the beginning of the + // function body, it will be used throughout the codegen to create + // operations in this function. + builder.setInsertionPointToStart(&entryBlock); + + // Emit the body of the function. + if (mlir::failed(buildStmt(FD->getBody()))) { + function.erase(); + return nullptr; + } + + ReturnOp returnOp; + if (!entryBlock.empty()) + returnOp = dyn_cast(entryBlock.back()); + if (!returnOp) + builder.create(loc); + + assert(function.verifyBody().succeeded()); + theModule.push_back(function); + return function; + } + + mlir::Type getType(const QualType &type) { + // FIXME: actually map into the appropriated types. + return builder.getI32Type(); + } + + void verifyModule() { + // Verify the module after we have finished constructing it, this will + // check the structural properties of the IR and invoke any specific + // verifiers we have on the CIR operations. + if (failed(mlir::verify(theModule))) + theModule.emitError("module verification error"); + } +}; +} // namespace cir + +void CIRContext::Init() { + mlirCtx = std::make_unique(); + mlirCtx->getOrLoadDialect(); + builder = std::make_unique(*mlirCtx.get(), astCtx); +} + +bool CIRContext::EmitFunction(const FunctionDecl *FD) { + CIRCodeGenFunction CCGF{}; + auto func = builder->buildCIR(&CCGF, FD); + assert(func && "should emit function"); + + // FIXME: currently checked after emitting every function, should + // only run when the consumer of the context shutdowns. + builder->verifyModule(); + builder->getModule().dump(); + return true; +} diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt new file mode 100644 index 000000000000..2f003a624c3e --- /dev/null +++ b/clang/lib/CIR/CMakeLists.txt @@ -0,0 +1,32 @@ +set(LLVM_LINK_COMPONENTS + Core + Support + ) + +add_subdirectory(CodeGen) + +include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) +include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_clang_library(clangCIR + CIRBuilder.cpp + + DEPENDS + MLIRCIROpsIncGen + + LINK_LIBS + clangAST + clangBasic + clangEdit + clangLex + ${dialect_libs} + MLIRCIR + MLIRAnalysis + MLIRIR + MLIRParser + MLIRSideEffectInterfaces + MLIRTransforms + MLIRSupport + ) diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt new file mode 100644 index 000000000000..2ab3cbe1df4b --- /dev/null +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -0,0 +1,8 @@ +set( + LLVM_LINK_COMPONENTS + Core + Support +) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CMakeLists.txt b/clang/lib/CMakeLists.txt index 0cac86451f39..c4f28b8b3c41 100644 --- a/clang/lib/CMakeLists.txt +++ b/clang/lib/CMakeLists.txt @@ -31,3 +31,7 @@ if(CLANG_INCLUDE_TESTS) endif() add_subdirectory(Interpreter) add_subdirectory(Support) + +#if(CLANG_ENABLE_CIR) + add_subdirectory(CIR) +#endif() diff --git a/clang/lib/Sema/CIRBasedWarnings.cpp b/clang/lib/Sema/CIRBasedWarnings.cpp index e14c05699253..de3370e540ca 100644 --- a/clang/lib/Sema/CIRBasedWarnings.cpp +++ b/clang/lib/Sema/CIRBasedWarnings.cpp @@ -37,6 +37,7 @@ #include "llvm/Support/Casting.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "clang/CIR/CIRBuilder.h" #include #include @@ -44,147 +45,6 @@ using namespace clang; -namespace { -/// -/// Helpers -/// -class reverse_children { - llvm::SmallVector childrenBuf; - ArrayRef children; - -public: - reverse_children(Stmt *S); - - using iterator = ArrayRef::reverse_iterator; - - iterator begin() const { return children.rbegin(); } - iterator end() const { return children.rend(); } -}; - -// FIXME: we might not even need this. -reverse_children::reverse_children(Stmt *S) { - if (CallExpr *CE = dyn_cast(S)) { - children = CE->getRawSubExprs(); - return; - } - switch (S->getStmtClass()) { - // Note: Fill in this switch with more cases we want to optimize. - case Stmt::InitListExprClass: { - InitListExpr *IE = cast(S); - children = llvm::ArrayRef(reinterpret_cast(IE->getInits()), - IE->getNumInits()); - return; - } - default: - break; - } - - // Default case for all other statements. - for (Stmt *SubStmt : S->children()) - childrenBuf.push_back(SubStmt); - - // This needs to be done *after* childrenBuf has been populated. - children = childrenBuf; -} - -/// -/// CIRBuilder -/// - -/// CIRBuilder - This class implements CIR construction from an AST. -class CIRBuilder { -public: - typedef int CIRUnit; - explicit CIRBuilder(ASTContext *astContext) : Context(astContext) {} - - ASTContext *Context; - - // buildCFG - Used by external clients to construct the CFG. - // std::unique_ptr buildCIR(const Decl *D, Stmt *Statement); - void buildCIR(const Decl *D, Stmt *Statement); - -private: - // Visitors to walk an AST and construct CIR. - CIRUnit *VisitImplicitCastExpr(ImplicitCastExpr *E); - CIRUnit *VisitCompoundStmt(CompoundStmt *C); - CIRUnit *VisitDeclStmt(DeclStmt *DS); - - // Basic components - CIRUnit *Visit(Stmt *S); - CIRUnit *VisitStmt(Stmt *S); - CIRUnit *VisitChildren(Stmt *S); -}; - -using CIRUnit = CIRBuilder::CIRUnit; - -/// -/// Basic visitors -/// - -/// Visit - Walk the subtree of a statement and add extra -/// blocks for ternary operators, &&, and ||. We also process "," and -/// DeclStmts (which may contain nested control-flow). -CIRUnit *CIRBuilder::Visit(Stmt *S) { - if (!S) { - return nullptr; - } - - // if (Expr *E = dyn_cast(S)) - // S = E->IgnoreParens(); - - switch (S->getStmtClass()) { - default: - return VisitStmt(S); - - case Stmt::CompoundStmtClass: - return VisitCompoundStmt(cast(S)); - - case Stmt::ImplicitCastExprClass: - return VisitImplicitCastExpr(cast(S)); - - case Stmt::DeclStmtClass: - return VisitDeclStmt(cast(S)); - } -} - -CIRUnit *CIRBuilder::VisitStmt(Stmt *S) { - // FIXME: do work. - return VisitChildren(S); -} - -/// VisitChildren - Visit the children of a Stmt. -CIRUnit *CIRBuilder::VisitChildren(Stmt *S) { - // Visit the children in their reverse order so that they appear in - // left-to-right (natural) order in the CFG. - // reverse_children RChildren(S); - // for (Stmt *Child : RChildren) { - // if (Child) - // if (CIRUnit *R = Visit(Child)) - // B = R; - // } - return nullptr; // B; -} - -/// -/// Other visitors -/// -CIRUnit *CIRBuilder::VisitImplicitCastExpr(ImplicitCastExpr *E) { - // FIXME: do work. - return nullptr; -} - -CIRUnit *CIRBuilder::VisitCompoundStmt(CompoundStmt *C) { - // FIXME: do work. - return nullptr; -} - -CIRUnit *CIRBuilder::VisitDeclStmt(DeclStmt *DS) { - // FIXME: do work. - return nullptr; -} - -} // namespace - /// /// CIRBasedWarnings /// @@ -206,6 +66,8 @@ sema::CIRBasedWarnings::CIRBasedWarnings(Sema &s) : S(s) { DefaultPolicy.enableConsumedAnalysis = isEnabled(D, warn_use_in_invalid_state); + + CIRCtx = std::make_unique(S.getASTContext()); } // We need this here for unique_ptr with forward declared class. @@ -243,17 +105,15 @@ void clang::sema::CIRBasedWarnings::IssueWarnings( return; } - const Stmt *Body = D->getBody(); - assert(Body); + const FunctionDecl *FD = dyn_cast(D); + assert(FD && "Only know how to handle functions"); // TODO: up to this point this behaves the same as // AnalysisBasedWarnings::IssueWarnings // Unlike Clang CFG, we share CIR state between each analyzed function, // retrieve or create a new context. - mlir::MLIRContext context; - // Load our Dialect in this MLIR Context. - context.getOrLoadDialect(); + CIRCtx->EmitFunction(FD); } void clang::sema::CIRBasedWarnings::PrintStats() const { diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt index d63d39dd361d..7c0399b681fc 100644 --- a/clang/lib/Sema/CMakeLists.txt +++ b/clang/lib/Sema/CMakeLists.txt @@ -117,6 +117,7 @@ add_clang_library(clangSema clangLex clangSupport + clangCIR ${dialect_libs} MLIRCIR MLIRAnalysis diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 9394f7fc1ce5..3fc9ce62ed88 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -38,7 +38,7 @@ def ReturnOp : CIR_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { let summary = "return operation"; let description = [{ The "return" operation represents a return operation within a function. - The operation takes an optional tensor operand and produces no results. + The operation takes an optional operand and produces no results. The operand type must match the signature of the function that contains the operation. For example: diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index f3bf5dfe1a87..4976daeedbfe 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -40,7 +40,35 @@ void cir::CIRDialect::initialize() { // ReturnOp mlir::LogicalResult ReturnOp::verify() { - return getOperation()->emitError() << "not implemented"; + // We know that the parent operation is a function, because of the 'HasParent' + // trait attached to the operation definition. + auto function = cast(getOperation()->getParentOp()); + + /// ReturnOps can only have a single optional operand. + if (getNumOperands() > 1) + return emitOpError() << "expects at most 1 return operand"; + + // The operand number and types must match the function signature. + const auto &results = function.getFunctionType().getResults(); + if (getNumOperands() != results.size()) + return emitOpError() << "does not return the same number of values (" + << getNumOperands() << ") as the enclosing function (" + << results.size() << ")"; + + // If the operation does not have an input, we are done. + if (!hasOperand()) + return mlir::success(); + + auto inputType = *operand_type_begin(); + auto resultType = results.front(); + + // Check that the result type of the function matches the operand type. + if (inputType == resultType) + return mlir::success(); + + return emitError() << "type of return operand (" << inputType + << ") doesn't match function result type (" << resultType + << ")"; } //===----------------------------------------------------------------------===// From 31b834b737856041840f6f24676d17b3441f5d1c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 27 Aug 2021 01:16:06 -0700 Subject: [PATCH 0021/2301] [CIR] Add -fcir-output for writing clang IR output to a file Two modes available: - `-fcir-output` creates a new file with suffixed with .cir on top of the source file. - `-fcir-output=` uses user provided file path. Currently only supported when the driver takes a single source input. --- .../clang/Basic/DiagnosticDriverKinds.td | 2 ++ clang/include/clang/Basic/LangOptions.h | 3 +++ clang/include/clang/CIR/CIRBuilder.h | 2 ++ clang/include/clang/Driver/Options.td | 8 ++++++- .../clang/Sema/AnalysisBasedWarnings.h | 4 ++-- clang/lib/CIR/CIRBuilder.cpp | 22 +++++++++++++++++-- clang/lib/Driver/ToolChains/Clang.cpp | 22 +++++++++++++++++++ clang/lib/Frontend/CompilerInvocation.cpp | 3 +++ 8 files changed, 61 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td index 612f7e330ba5..9fc98fc29b34 100644 --- a/clang/include/clang/Basic/DiagnosticDriverKinds.td +++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td @@ -355,6 +355,8 @@ def err_drv_incompatible_omp_arch : Error<"OpenMP target architecture '%0' point def err_drv_omp_host_ir_file_not_found : Error< "provided host compiler IR file '%0' is required to generate code for OpenMP " "target regions but cannot be found">; +def err_drv_cir_multiple_input : Error< + "clangir (cir) generation requires exactly one input source file">; def err_drv_omp_host_target_not_supported : Error< "target '%0' is not a supported OpenMP host target">; def err_drv_expecting_fopenmp_with_fopenmp_targets : Error< diff --git a/clang/include/clang/Basic/LangOptions.h b/clang/include/clang/Basic/LangOptions.h index 949c8f5d448b..485b58632003 100644 --- a/clang/include/clang/Basic/LangOptions.h +++ b/clang/include/clang/Basic/LangOptions.h @@ -576,6 +576,9 @@ class LangOptions : public LangOptionsBase { /// host code generation. std::string OMPHostIRFile; + /// Name of the CIR file to output to disk. + std::string CIRFile; + /// The user provided compilation unit ID, if non-empty. This is used to /// externalize static variables which is needed to support accessing static /// device variables in host code for single source offloading languages diff --git a/clang/include/clang/CIR/CIRBuilder.h b/clang/include/clang/CIR/CIRBuilder.h index a5420f986b22..641f9ce3d8a7 100644 --- a/clang/include/clang/CIR/CIRBuilder.h +++ b/clang/include/clang/CIR/CIRBuilder.h @@ -14,6 +14,7 @@ #ifndef CLANG_CIRBUILDER_H_ #define CLANG_CIRBUILDER_H_ +#include "llvm/Support/ToolOutputFile.h" #include namespace mlir { @@ -42,6 +43,7 @@ class CIRContext { private: std::unique_ptr mlirCtx; std::unique_ptr builder; + std::unique_ptr cirOut; clang::ASTContext &astCtx; }; diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 6fe719db3f1b..1aa12080da55 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -1959,7 +1959,13 @@ defm cir_warnings : BoolFOption<"cir-warnings", LangOpts<"CIRWarnings">, DefaultFalse, PosFlag, NegFlag, - BothFlags<[], [ClangOption, CC1Option], " CIR to emit (analysis based) warnings">>; + BothFlags<[], [CC1Option], " CIR to emit (analysis based) warnings">>; +def fcir_output_EQ : Joined<["-"], "fcir-output=">, + Group, HelpText<"Write clang IR (cir) to output file">, + Visibility<[ClangOption, CC1Option]>, MarshallingInfoString>, + MetaVarName<"">; +def fcir_output : Flag<["-"], "fcir-output">, + Group, Visibility<[ClangOption, CC1Option]>; defm addrsig : BoolFOption<"addrsig", CodeGenOpts<"Addrsig">, DefaultFalse, diff --git a/clang/include/clang/Sema/AnalysisBasedWarnings.h b/clang/include/clang/Sema/AnalysisBasedWarnings.h index 2c18350c72b0..92430f1982d0 100644 --- a/clang/include/clang/Sema/AnalysisBasedWarnings.h +++ b/clang/include/clang/Sema/AnalysisBasedWarnings.h @@ -20,14 +20,14 @@ namespace clang { class BlockExpr; -class CIRBasedWarnings; class Decl; class FunctionDecl; class QualType; class Sema; namespace sema { - class FunctionScopeInfo; +class CIRBasedWarnings; +class FunctionScopeInfo; } namespace sema { diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index e242c1c6f71c..0048a48c7781 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -47,6 +47,8 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/FileSystem.h" #include "llvm/Support/raw_ostream.h" #include @@ -63,7 +65,6 @@ using llvm::SmallVector; using llvm::StringRef; using llvm::Twine; -CIRContext::~CIRContext() {} CIRContext::CIRContext(clang::ASTContext &AC) : astCtx(AC) { Init(); } CIRCodeGenFunction::CIRCodeGenFunction() = default; @@ -507,10 +508,28 @@ class CIRBuildImpl { }; } // namespace cir +CIRContext::~CIRContext() { + if (cirOut) { + // FIXME: pick a more verbose level. + builder->getModule()->print(cirOut->os()); + cirOut->keep(); + } +} + void CIRContext::Init() { + using namespace llvm; + mlirCtx = std::make_unique(); mlirCtx->getOrLoadDialect(); builder = std::make_unique(*mlirCtx.get(), astCtx); + + std::error_code EC; + StringRef outFile = astCtx.getLangOpts().CIRFile; + if (outFile.empty()) + return; + cirOut = std::make_unique(outFile, EC, sys::fs::OF_None); + if (EC) + report_fatal_error("Failed to open " + outFile + ": " + EC.message()); } bool CIRContext::EmitFunction(const FunctionDecl *FD) { @@ -521,6 +540,5 @@ bool CIRContext::EmitFunction(const FunctionDecl *FD) { // FIXME: currently checked after emitting every function, should // only run when the consumer of the context shutdowns. builder->verifyModule(); - builder->getModule().dump(); return true; } diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 5a20b1b3acd0..80423d5d0e7a 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -8155,6 +8155,28 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Input.getInputArg().renderAsInput(Args, CmdArgs); } + if (Arg *A = Args.getLastArg(options::OPT_fcir_output_EQ, + options::OPT_fcir_output)) { + if (A->getOption().matches(options::OPT_fcir_output_EQ)) { + StringRef Value = A->getValue(); + CmdArgs.push_back(Args.MakeArgString("-fcir-output=" + Value)); + } else { + std::string OutFile; + for (const InputInfo &Input : FrontendInputs) { + if (!Input.isFilename()) + continue; + OutFile = Input.getFilename(); + OutFile.append(".cir"); + StringRef Value = OutFile; + CmdArgs.push_back(Args.MakeArgString("-fcir-output=" + Value)); + break; + } + + if (OutFile.empty()) + D.Diag(diag::err_drv_cir_multiple_input); + } + } + if (D.CC1Main && !D.CCGenDiagnostics) { // Invoke the CC1 directly in this process C.addCommand(std::make_unique( diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 6130b54410d7..47d3a0780185 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -4315,6 +4315,9 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args, << Opts.OMPHostIRFile; } + if (Arg *A = Args.getLastArg(options::OPT_fcir_output_EQ)) + Opts.CIRFile = A->getValue(); + // Set CUDA mode for OpenMP target NVPTX/AMDGCN if specified in options Opts.OpenMPCUDAMode = Opts.OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN()) && From 69adc1e51bb8a2dae7e96fbde9e4ffda3fcf10ab Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 20 Sep 2021 16:37:01 -0700 Subject: [PATCH 0022/2301] [CIR] Add simple test for ClangIR codegen - Codegen of ClangIR out of clang AST, ClangIR down to anything else isn't yet implemented. - Right now -fcir-warnings -fcir-output must be used, in the future we should add an independent codegen flag and use it to drive tests such as these. --- clang/lib/CIR/CIRBuilder.cpp | 1 + clang/test/CIR/CodeGen/basic.c | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 0048a48c7781..80b9fa2789ff 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -520,6 +520,7 @@ void CIRContext::Init() { using namespace llvm; mlirCtx = std::make_unique(); + mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); builder = std::make_unique(*mlirCtx.get(), astCtx); diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 5c1031cccc41..d65701fa2329 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -1 +1,13 @@ -// RUN: true +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -fcir-warnings %s -fcir-output=%t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +int foo(int i) { + return i; +} + +// CHECK: module { +// CHECK-NEXT: func @foo(%arg0: i32) -> i32 { +// CHECK-NEXT: cir.return %arg0 : i32 +// CHECK-NEXT: } +// CHECK-NEXT: } From 379c1e3c59e84ee9b9b0f820ca4b9a8dbd5d6f73 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 20 Sep 2021 17:06:32 -0700 Subject: [PATCH 0023/2301] [CIR] Update doc a tiny bit to describe initial behavior --- llvm/docs/CIR.rst | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/llvm/docs/CIR.rst b/llvm/docs/CIR.rst index b829eaf3ac99..a965f2e1ebe0 100644 --- a/llvm/docs/CIR.rst +++ b/llvm/docs/CIR.rst @@ -12,13 +12,32 @@ This document aims to provide an overview of the design and implementation of a Clang IR, a high level IR allowing more analysis and future optimizations. +CIR is used as a short for ClangIR over commit messages and +other related code. + Usage in Clang ============== +Current strategy is to replace analysis based warnings with +analysis on top of CIR, using ``-fcir-warnings`` turns on such +analysis (current none). + +The ``-fcir-output`` and ``-fcir-output=`` flags can be used +to output the generated CIR (currently needs to be combined with +``-fcir-warnings`` to work). + +Implementation Notes +==================== + +- ``PopFunctionScopeInfo`` is the currentt entry point for CFG usage +in ``AnalysisBasedWarning.cpp``. The same entry point is used by the +CIR builder to emit functions. -Usage in Clang happens right now as part of replacing current -IssueWarnings -AnalysisWarnings.IssueWarnings +TODO's +====== -CFG usage in ``AnalysisBasedWarning.cpp`` to use CIR instead of -Clang's CFG, as part of ``PopFunctionScopeInfo``. \ No newline at end of file +- Other module related emission besides functions (and all currently +end of translation defered stuff). +- Some data structures used for LLVM codegen can be made more +generic and be reused from CIRBuilder. Duplicating content right +now to prevent potential frequent merge conflicts. \ No newline at end of file From 4cc9538e0b1787c64436420d1d76783d93da94c3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 20 Sep 2021 17:46:32 -0700 Subject: [PATCH 0024/2301] [CIR] Verify module only before shutdown --- clang/lib/CIR/CIRBuilder.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 80b9fa2789ff..1c157c6f27e5 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -509,6 +509,9 @@ class CIRBuildImpl { } // namespace cir CIRContext::~CIRContext() { + // Run module verifier before shutdown + builder->verifyModule(); + if (cirOut) { // FIXME: pick a more verbose level. builder->getModule()->print(cirOut->os()); @@ -537,9 +540,5 @@ bool CIRContext::EmitFunction(const FunctionDecl *FD) { CIRCodeGenFunction CCGF{}; auto func = builder->buildCIR(&CCGF, FD); assert(func && "should emit function"); - - // FIXME: currently checked after emitting every function, should - // only run when the consumer of the context shutdowns. - builder->verifyModule(); return true; } From 0579a22bd1c9b4ba5ac13950d933fe934892635d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 20 Sep 2021 17:57:16 -0700 Subject: [PATCH 0025/2301] [CIR][NFC] Fix build warnings --- clang/lib/CIR/CIRBuilder.cpp | 7 ++++--- llvm/docs/CIR.rst | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 1c157c6f27e5..8cc264501e3d 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -192,7 +192,7 @@ class CIRBuildImpl { mlir::ModuleOp getModule() { return theModule; } class ScalarExprEmitter : public StmtVisitor { - CIRCodeGenFunction &CGF; + LLVM_ATTRIBUTE_UNUSED CIRCodeGenFunction &CGF; CIRBuildImpl &Builder; public: @@ -488,7 +488,8 @@ class CIRBuildImpl { if (!returnOp) builder.create(loc); - assert(function.verifyBody().succeeded()); + if (mlir::failed(function.verifyBody())) + return nullptr; theModule.push_back(function); return function; } @@ -509,7 +510,7 @@ class CIRBuildImpl { } // namespace cir CIRContext::~CIRContext() { - // Run module verifier before shutdown + // Run module verifier before shutdown. builder->verifyModule(); if (cirOut) { diff --git a/llvm/docs/CIR.rst b/llvm/docs/CIR.rst index a965f2e1ebe0..aa178c7b812b 100644 --- a/llvm/docs/CIR.rst +++ b/llvm/docs/CIR.rst @@ -40,4 +40,5 @@ TODO's end of translation defered stuff). - Some data structures used for LLVM codegen can be made more generic and be reused from CIRBuilder. Duplicating content right -now to prevent potential frequent merge conflicts. \ No newline at end of file +now to prevent potential frequent merge conflicts. + - Split out into header files all potential common code. \ No newline at end of file From 30db11dff88bbe104215cec79038dbb14e04f5a7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 22 Sep 2021 20:48:04 -0700 Subject: [PATCH 0026/2301] [CIR] Add memref to handle allocas We might decide to use something custom later and lower that to memref's instead, but for now use this for the convenience of mapping lvalues. --- clang/lib/CIR/CIRBuilder.cpp | 49 +++++++++++++++++++++++----------- clang/lib/CIR/CMakeLists.txt | 1 + clang/test/CIR/CodeGen/basic.c | 7 +++-- llvm/docs/CIR.rst | 3 ++- 4 files changed, 41 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 8cc264501e3d..abcf3f6add74 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -17,6 +17,7 @@ #include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" @@ -181,10 +182,20 @@ class CIRBuildImpl { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(StringRef var, mlir::Value value) { + mlir::LogicalResult declare(StringRef var, mlir::Value value, + mlir::Location loc) { if (symbolTable.count(var)) return mlir::failure(); - symbolTable.insert(var, value); + + mlir::MemRefType type = mlir::MemRefType::get({}, builder.getI32Type()); + auto alloc = builder.create(loc, type); + auto *parentBlock = alloc->getBlock(); + alloc->moveBefore(&parentBlock->front()); + + // Insert into the symbol table, allocate some stack space in the + // function entry block. + symbolTable.insert(var, alloc); + return mlir::success(); } @@ -319,17 +330,16 @@ class CIRBuildImpl { /// Emits the address of the l-value, then loads and returns the result. mlir::Value buildLoadOfLValue(const Expr *E) { LValue LV = EmitLValue(E); - // mlir::Value V = EmitLoadOfLValue(LV, E->getExprLoc()); - - // EmitLValueAlignmentAssumption(E, V); - // return V; - return LV.getPointer(); + auto load = Builder.builder.create( + Builder.getLoc(E->getExprLoc()), LV.getPointer()); + // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); + return load; } // Handle l-values. mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { - // FIXME: we could try to emit this as constant, similar to LLVM IR - // codegen. + // FIXME: we could try to emit this as constant first, see + // CGF.tryEmitAsConstant(E) return buildLoadOfLValue(E); } @@ -463,19 +473,25 @@ class CIRBuildImpl { // same argument list as the function itself. auto &entryBlock = *function.addEntryBlock(); + // Set the insertion point in the builder to the beginning of the + // function body, it will be used throughout the codegen to create + // operations in this function. + builder.setInsertionPointToStart(&entryBlock); + // Declare all the function arguments in the symbol table. for (const auto nameValue : llvm::zip(FD->parameters(), entryBlock.getArguments())) { - if (failed(declare(std::get<0>(nameValue)->getName(), - std::get<1>(nameValue)))) + auto *paramVar = std::get<0>(nameValue); + auto paramVal = std::get<1>(nameValue); + if (failed(declare(paramVar->getName(), paramVal, + getLoc(paramVar->getSourceRange().getBegin())))) return nullptr; + // Store params in local storage. FIXME: is this really needed + // at this level of representation? + mlir::Value addr = symbolTable.lookup(paramVar->getName()); + builder.create(loc, paramVal, addr); } - // Set the insertion point in the builder to the beginning of the - // function body, it will be used throughout the codegen to create - // operations in this function. - builder.setInsertionPointToStart(&entryBlock); - // Emit the body of the function. if (mlir::failed(buildStmt(FD->getBody()))) { function.erase(); @@ -526,6 +542,7 @@ void CIRContext::Init() { mlirCtx = std::make_unique(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); builder = std::make_unique(*mlirCtx.get(), astCtx); std::error_code EC; diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 2f003a624c3e..38955b93dd76 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -29,4 +29,5 @@ add_clang_library(clangCIR MLIRSideEffectInterfaces MLIRTransforms MLIRSupport + MLIRMemRefDialect ) diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index d65701fa2329..093245635196 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -6,8 +6,11 @@ int foo(int i) { return i; } -// CHECK: module { +// CHECK: module { // CHECK-NEXT: func @foo(%arg0: i32) -> i32 { -// CHECK-NEXT: cir.return %arg0 : i32 +// CHECK-NEXT: %0 = memref.alloca() : memref +// CHECK-NEXT: memref.store %arg0, %0[] : memref +// CHECK-NEXT: %1 = memref.load %0[] : memref +// CHECK-NEXT: cir.return %1 : i32 // CHECK-NEXT: } // CHECK-NEXT: } diff --git a/llvm/docs/CIR.rst b/llvm/docs/CIR.rst index aa178c7b812b..2bfcb89035dc 100644 --- a/llvm/docs/CIR.rst +++ b/llvm/docs/CIR.rst @@ -35,7 +35,8 @@ CIR builder to emit functions. TODO's ====== - +- LValues + - Add proper alignment information - Other module related emission besides functions (and all currently end of translation defered stuff). - Some data structures used for LLVM codegen can be made more From 57cc54c7b15372d2cc45a5e4f535bb4a75f7a485 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 22 Sep 2021 21:45:48 -0700 Subject: [PATCH 0027/2301] [CIR][NFC] Change symbol table key to plain const Decl* --- clang/lib/CIR/CIRBuilder.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index abcf3f6add74..92fa203960d9 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -145,8 +145,9 @@ class CIRBuildImpl { CIRBuildImpl &operator=(CIRBuildImpl &) = delete; ~CIRBuildImpl() = default; - using SymTableTy = llvm::ScopedHashTable; - using SymTableScopeTy = ScopedHashTableScope; + // FIXME: instead of mlir::Value, hold a RawAddress here. + using SymTableTy = llvm::ScopedHashTable; + using SymTableScopeTy = ScopedHashTableScope; private: /// A "module" matches a c/cpp source file: containing a list of functions. @@ -182,7 +183,7 @@ class CIRBuildImpl { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(StringRef var, mlir::Value value, + mlir::LogicalResult declare(const Decl *var, mlir::Value value, mlir::Location loc) { if (symbolTable.count(var)) return mlir::failure(); @@ -300,10 +301,9 @@ class CIRBuildImpl { "not implemented"); assert(!VD->isEscapingByref() && "not implemented"); assert(!VD->getType()->isReferenceType() && "not implemented"); - assert(Builder.symbolTable.count(VD->getName()) && - "should be already mapped"); + assert(Builder.symbolTable.count(VD) && "should be already mapped"); - mlir::Value V = Builder.symbolTable.lookup(VD->getName()); + mlir::Value V = Builder.symbolTable.lookup(VD); assert(V && "Name lookup must succeed"); LValue LV = LValue::makeAddr(RawAddress(V, CharUnits::fromQuantity(4)), @@ -483,12 +483,12 @@ class CIRBuildImpl { llvm::zip(FD->parameters(), entryBlock.getArguments())) { auto *paramVar = std::get<0>(nameValue); auto paramVal = std::get<1>(nameValue); - if (failed(declare(paramVar->getName(), paramVal, + if (failed(declare(paramVar, paramVal, getLoc(paramVar->getSourceRange().getBegin())))) return nullptr; // Store params in local storage. FIXME: is this really needed // at this level of representation? - mlir::Value addr = symbolTable.lookup(paramVar->getName()); + mlir::Value addr = symbolTable.lookup(paramVar); builder.create(loc, paramVal, addr); } From 88e68f6ecb65a5700a2b80ca86a88582b7bb3ddd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 22 Sep 2021 23:37:43 -0700 Subject: [PATCH 0028/2301] [CIR] Add CIRGenTypes to track type conversion from clang::QualType -> mlir::Type Very similar to CodeGenTypes. - Create skeleton for the type mapping, add cache. - Support most native types and assert for anything fancy. - i32 the only one tested so far, tests for other types will be added in followup commits. --- clang/include/clang/CIR/CIRBuilder.h | 5 +- clang/lib/CIR/CIRBuilder.cpp | 22 +- clang/lib/CIR/CIRGenTypes.cpp | 326 +++++++++++++++++++++++++++ clang/lib/CIR/CIRGenTypes.h | 84 +++++++ clang/lib/CIR/CMakeLists.txt | 1 + 5 files changed, 427 insertions(+), 11 deletions(-) create mode 100644 clang/lib/CIR/CIRGenTypes.cpp create mode 100644 clang/lib/CIR/CIRGenTypes.h diff --git a/clang/include/clang/CIR/CIRBuilder.h b/clang/include/clang/CIR/CIRBuilder.h index 641f9ce3d8a7..cb0a574f24fc 100644 --- a/clang/include/clang/CIR/CIRBuilder.h +++ b/clang/include/clang/CIR/CIRBuilder.h @@ -29,9 +29,7 @@ class FunctionDecl; namespace cir { class CIRBuildImpl; -} - -namespace cir { +class CIRGenTypes; class CIRContext { public: @@ -44,6 +42,7 @@ class CIRContext { std::unique_ptr mlirCtx; std::unique_ptr builder; std::unique_ptr cirOut; + clang::ASTContext &astCtx; }; diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 92fa203960d9..2484a2c42ad0 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -11,6 +11,8 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenTypes.h" + #include "clang/CIR/CIRBuilder.h" #include "clang/CIR/CIRCodeGenFunction.h" @@ -140,6 +142,7 @@ class CIRBuildImpl { CIRBuildImpl(mlir::MLIRContext &context, clang::ASTContext &astctx) : builder(&context), astCtx(astctx) { theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); + genTypes = std::make_unique(astCtx, this->getBuilder()); } CIRBuildImpl(CIRBuildImpl &) = delete; CIRBuildImpl &operator=(CIRBuildImpl &) = delete; @@ -172,6 +175,9 @@ class CIRBuildImpl { /// for FunctionDecls's. CIRCodeGenFunction *CurCCGF = nullptr; + /// Per-module type mapping from clang AST to CIR. + std::unique_ptr genTypes; + /// Helper conversion from Clang source location to an MLIR location. mlir::Location getLoc(SourceLocation SLoc) { const SourceManager &SM = astCtx.getSourceManager(); @@ -183,12 +189,12 @@ class CIRBuildImpl { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(const Decl *var, mlir::Value value, + mlir::LogicalResult declare(const Decl *var, QualType T, mlir::Value value, mlir::Location loc) { if (symbolTable.count(var)) return mlir::failure(); - mlir::MemRefType type = mlir::MemRefType::get({}, builder.getI32Type()); + mlir::MemRefType type = mlir::MemRefType::get({}, getCIRType(T)); auto alloc = builder.create(loc, type); auto *parentBlock = alloc->getBlock(); alloc->moveBefore(&parentBlock->front()); @@ -202,6 +208,7 @@ class CIRBuildImpl { public: mlir::ModuleOp getModule() { return theModule; } + mlir::OpBuilder &getBuilder() { return builder; } class ScalarExprEmitter : public StmtVisitor { LLVM_ATTRIBUTE_UNUSED CIRCodeGenFunction &CGF; @@ -460,10 +467,10 @@ class CIRBuildImpl { llvm::SmallVector argTypes; for (auto *Param : FD->parameters()) - argTypes.push_back(getType(Param->getType())); + argTypes.push_back(getCIRType(Param->getType())); CurCCGF->FnRetQualTy = FD->getReturnType(); - CurCCGF->FnRetTy = getType(CurCCGF->FnRetQualTy); + CurCCGF->FnRetTy = getCIRType(CurCCGF->FnRetQualTy); auto funcType = builder.getFunctionType(argTypes, CurCCGF->FnRetTy); mlir::FuncOp function = mlir::FuncOp::create(loc, FD->getName(), funcType); if (!function) @@ -483,7 +490,7 @@ class CIRBuildImpl { llvm::zip(FD->parameters(), entryBlock.getArguments())) { auto *paramVar = std::get<0>(nameValue); auto paramVal = std::get<1>(nameValue); - if (failed(declare(paramVar, paramVal, + if (failed(declare(paramVar, paramVar->getType(), paramVal, getLoc(paramVar->getSourceRange().getBegin())))) return nullptr; // Store params in local storage. FIXME: is this really needed @@ -510,9 +517,8 @@ class CIRBuildImpl { return function; } - mlir::Type getType(const QualType &type) { - // FIXME: actually map into the appropriated types. - return builder.getI32Type(); + mlir::Type getCIRType(const QualType &type) { + return genTypes->ConvertType(type); } void verifyModule() { diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp new file mode 100644 index 000000000000..b434a9a267e2 --- /dev/null +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -0,0 +1,326 @@ +#include "CIRGenTypes.h" + +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinTypes.h" + +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/Expr.h" +#include "clang/AST/RecordLayout.h" + +using namespace clang; +using namespace cir; + +CIRGenTypes::CIRGenTypes(ASTContext &Ctx, mlir::OpBuilder &B) + : Context(Ctx), Builder(B) {} +CIRGenTypes::~CIRGenTypes() = default; + +/// ConvertType - Convert the specified type to its LLVM form. +mlir::Type CIRGenTypes::ConvertType(QualType T) { + T = Context.getCanonicalType(T); + const Type *Ty = T.getTypePtr(); + + // For the device-side compilation, CUDA device builtin surface/texture types + // may be represented in different types. + assert(!Context.getLangOpts().CUDAIsDevice && "not implemented"); + + // RecordTypes are cached and processed specially. + assert(!dyn_cast(Ty) && "not implemented"); + + // See if type is already cached. + TypeCacheTy::iterator TCI = TypeCache.find(Ty); + // If type is found in map then use it. Otherwise, convert type T. + if (TCI != TypeCache.end()) + return TCI->second; + + // If we don't have it in the cache, convert it now. + mlir::Type ResultType = nullptr; + switch (Ty->getTypeClass()) { + case Type::Record: // Handled above. +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("Non-canonical or dependent types aren't possible."); + + case Type::ArrayParameter: + llvm_unreachable("NYI"); + + case Type::Builtin: { + switch (cast(Ty)->getKind()) { + case BuiltinType::WasmExternRef: + case BuiltinType::SveBoolx2: + case BuiltinType::SveBoolx4: + case BuiltinType::SveCount: + llvm_unreachable("NYI"); + case BuiltinType::Void: + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + // LLVM void type can only be used as the result of a function call. Just + // map to the same as char. + ResultType = Builder.getI8Type(); + break; + + case BuiltinType::Bool: + // Note that we always return bool as i1 for use as a scalar type. + ResultType = Builder.getI1Type(); + break; + + case BuiltinType::Char_S: + case BuiltinType::Char_U: + case BuiltinType::SChar: + case BuiltinType::UChar: + case BuiltinType::Short: + case BuiltinType::UShort: + case BuiltinType::Int: + case BuiltinType::UInt: + case BuiltinType::Long: + case BuiltinType::ULong: + case BuiltinType::LongLong: + case BuiltinType::ULongLong: + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + case BuiltinType::Char8: + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::ShortAccum: + case BuiltinType::Accum: + case BuiltinType::LongAccum: + case BuiltinType::UShortAccum: + case BuiltinType::UAccum: + case BuiltinType::ULongAccum: + case BuiltinType::ShortFract: + case BuiltinType::Fract: + case BuiltinType::LongFract: + case BuiltinType::UShortFract: + case BuiltinType::UFract: + case BuiltinType::ULongFract: + case BuiltinType::SatShortAccum: + case BuiltinType::SatAccum: + case BuiltinType::SatLongAccum: + case BuiltinType::SatUShortAccum: + case BuiltinType::SatUAccum: + case BuiltinType::SatULongAccum: + case BuiltinType::SatShortFract: + case BuiltinType::SatFract: + case BuiltinType::SatLongFract: + case BuiltinType::SatUShortFract: + case BuiltinType::SatUFract: + case BuiltinType::SatULongFract: + // FIXME: break this in s/u and also pass signed param. + ResultType = + Builder.getIntegerType(static_cast(Context.getTypeSize(T))); + break; + + case BuiltinType::Float16: + ResultType = Builder.getF16Type(); + break; + case BuiltinType::Half: + // Should be the same as above? + assert("not implemented"); + break; + case BuiltinType::BFloat16: + ResultType = Builder.getBF16Type(); + break; + case BuiltinType::Float: + ResultType = Builder.getF32Type(); + break; + case BuiltinType::Double: + ResultType = Builder.getF32Type(); + break; + case BuiltinType::LongDouble: + case BuiltinType::Float128: + case BuiltinType::Ibm128: + // FIXME: look at Context.getFloatTypeSemantics(T) and getTypeForFormat + // on LLVM codegen. + assert("not implemented"); + break; + + case BuiltinType::NullPtr: + // Model std::nullptr_t as i8* + // ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); + assert("not implemented"); + break; + + case BuiltinType::UInt128: + case BuiltinType::Int128: + ResultType = Builder.getIntegerType(128); + break; + +#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + case BuiltinType::Id: +#include "clang/Basic/OpenCLImageTypes.def" +#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) case BuiltinType::Id: +#include "clang/Basic/OpenCLExtensionTypes.def" + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::OCLClkEvent: + case BuiltinType::OCLQueue: + case BuiltinType::OCLReserveID: + assert("not implemented"); + break; + case BuiltinType::SveInt8: + case BuiltinType::SveUint8: + case BuiltinType::SveInt8x2: + case BuiltinType::SveUint8x2: + case BuiltinType::SveInt8x3: + case BuiltinType::SveUint8x3: + case BuiltinType::SveInt8x4: + case BuiltinType::SveUint8x4: + case BuiltinType::SveInt16: + case BuiltinType::SveUint16: + case BuiltinType::SveInt16x2: + case BuiltinType::SveUint16x2: + case BuiltinType::SveInt16x3: + case BuiltinType::SveUint16x3: + case BuiltinType::SveInt16x4: + case BuiltinType::SveUint16x4: + case BuiltinType::SveInt32: + case BuiltinType::SveUint32: + case BuiltinType::SveInt32x2: + case BuiltinType::SveUint32x2: + case BuiltinType::SveInt32x3: + case BuiltinType::SveUint32x3: + case BuiltinType::SveInt32x4: + case BuiltinType::SveUint32x4: + case BuiltinType::SveInt64: + case BuiltinType::SveUint64: + case BuiltinType::SveInt64x2: + case BuiltinType::SveUint64x2: + case BuiltinType::SveInt64x3: + case BuiltinType::SveUint64x3: + case BuiltinType::SveInt64x4: + case BuiltinType::SveUint64x4: + case BuiltinType::SveBool: + case BuiltinType::SveFloat16: + case BuiltinType::SveFloat16x2: + case BuiltinType::SveFloat16x3: + case BuiltinType::SveFloat16x4: + case BuiltinType::SveFloat32: + case BuiltinType::SveFloat32x2: + case BuiltinType::SveFloat32x3: + case BuiltinType::SveFloat32x4: + case BuiltinType::SveFloat64: + case BuiltinType::SveFloat64x2: + case BuiltinType::SveFloat64x3: + case BuiltinType::SveFloat64x4: + case BuiltinType::SveBFloat16: + case BuiltinType::SveBFloat16x2: + case BuiltinType::SveBFloat16x3: + case BuiltinType::SveBFloat16x4: { + assert("not implemented"); + break; + } +#define PPC_VECTOR_TYPE(Name, Id, Size) \ + case BuiltinType::Id: \ + assert("not implemented"); \ + break; +#include "clang/Basic/PPCTypes.def" +#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/RISCVVTypes.def" + { + assert("not implemented"); + break; + } + case BuiltinType::Dependent: +#define BUILTIN_TYPE(Id, SingletonId) +#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: +#include "clang/AST/BuiltinTypes.def" + llvm_unreachable("Unexpected placeholder builtin type!"); + } + break; + } + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("Unexpected undeduced type!"); + case Type::Complex: { + assert("not implemented"); + break; + } + case Type::LValueReference: + case Type::RValueReference: { + assert("not implemented"); + break; + } + case Type::Pointer: { + assert("not implemented"); + break; + } + + case Type::VariableArray: { + assert("not implemented"); + break; + } + case Type::IncompleteArray: { + assert("not implemented"); + break; + } + case Type::ConstantArray: { + assert("not implemented"); + break; + } + case Type::ExtVector: + case Type::Vector: { + assert("not implemented"); + break; + } + case Type::ConstantMatrix: { + assert("not implemented"); + break; + } + case Type::FunctionNoProto: + case Type::FunctionProto: + assert("not implemented"); + break; + case Type::ObjCObject: + assert("not implemented"); + break; + + case Type::ObjCInterface: { + assert("not implemented"); + break; + } + + case Type::ObjCObjectPointer: { + assert("not implemented"); + break; + } + + case Type::Enum: { + assert("not implemented"); + break; + } + + case Type::BlockPointer: { + assert("not implemented"); + break; + } + + case Type::MemberPointer: { + assert("not implemented"); + break; + } + + case Type::Atomic: { + assert("not implemented"); + break; + } + case Type::Pipe: { + assert("not implemented"); + break; + } + case Type::BitInt: { + assert("not implemented"); + break; + } + } + + assert(ResultType && "Didn't convert a type?"); + + TypeCache[Ty] = ResultType; + return ResultType; +} diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h new file mode 100644 index 000000000000..f1951e858686 --- /dev/null +++ b/clang/lib/CIR/CIRGenTypes.h @@ -0,0 +1,84 @@ +//===--- CIRGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is the code that handles AST -> CIR type lowering. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H +#define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H + +#include "clang/CodeGen/CGFunctionInfo.h" +#include "llvm/ADT/DenseMap.h" + +namespace llvm { +class FunctionType; +class DataLayout; +class Type; +class LLVMContext; +class StructType; +} // namespace llvm + +namespace clang { +class ASTContext; +template class CanQual; +class CXXConstructorDecl; +class CXXDestructorDecl; +class CXXMethodDecl; +class CodeGenOptions; +class FieldDecl; +class FunctionProtoType; +class ObjCInterfaceDecl; +class ObjCIvarDecl; +class PointerType; +class QualType; +class RecordDecl; +class TagDecl; +class TargetInfo; +class Type; +typedef CanQual CanQualType; +class GlobalDecl; + +namespace CodeGen { +class ABIInfo; +class CGCXXABI; +class CGRecordLayout; +class CodeGenModule; +class RequiredArgs; +} // end namespace CodeGen +} // end namespace clang + +namespace mlir { +class Type; +class OpBuilder; +} // namespace mlir + +/// This class organizes the cross-module state that is used while lowering +/// AST types to CIR types. +namespace cir { +class CIRGenTypes { + clang::ASTContext &Context; + mlir::OpBuilder &Builder; + +public: + CIRGenTypes(clang::ASTContext &Ctx, mlir::OpBuilder &B); + ~CIRGenTypes(); + + /// This map keeps cache of llvm::Types and maps clang::Type to + /// corresponding llvm::Type. + using TypeCacheTy = llvm::DenseMap; + TypeCacheTy TypeCache; + + clang::ASTContext &getContext() const { return Context; } + + /// ConvertType - Convert type T into a mlir::Type. + mlir::Type ConvertType(clang::QualType T); +}; +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 38955b93dd76..860ab25640e4 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -12,6 +12,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR CIRBuilder.cpp + CIRGenTypes.cpp DEPENDS MLIRCIROpsIncGen From 8c1e1e99e6601142be28182bcd8ba0cebdbbe001 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 23 Sep 2021 20:02:41 -0700 Subject: [PATCH 0029/2301] [CIR] Fix return void, change some of the type mapping and add tests --- clang/lib/CIR/CIRBuilder.cpp | 12 ++++++---- clang/lib/CIR/CIRGenTypes.cpp | 10 ++++---- clang/test/CIR/CodeGen/types.c | 44 ++++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 10 deletions(-) create mode 100644 clang/test/CIR/CodeGen/types.c diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 2484a2c42ad0..7601430ca156 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -392,16 +392,16 @@ class CIRBuildImpl { } mlir::LogicalResult buildReturnStmt(const ReturnStmt &S) { - // Emit the result value, even if unused, to evaluate the side effects. - const Expr *RV = S.getRetValue(); - assert(!isa(RV) && "unimplemented"); assert(!(astCtx.getLangOpts().ElideConstructors && S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) && "unimplemented"); assert(!CurCCGF->FnRetQualTy->isReferenceType() && "unimplemented"); + // Emit the result value, even if unused, to evaluate the side effects. + const Expr *RV = S.getRetValue(); if (!RV) // Do nothing (return value is left uninitialized) return mlir::success(); + assert(!isa(RV) && "unimplemented"); mlir::Value V = nullptr; switch (CIRCodeGenFunction::getEvaluationKind(RV->getType())) { @@ -470,8 +470,10 @@ class CIRBuildImpl { argTypes.push_back(getCIRType(Param->getType())); CurCCGF->FnRetQualTy = FD->getReturnType(); - CurCCGF->FnRetTy = getCIRType(CurCCGF->FnRetQualTy); - auto funcType = builder.getFunctionType(argTypes, CurCCGF->FnRetTy); + auto funcType = builder.getFunctionType( + argTypes, CurCCGF->FnRetQualTy->isVoidType() + ? mlir::TypeRange() + : getCIRType(CurCCGF->FnRetQualTy)); mlir::FuncOp function = mlir::FuncOp::create(loc, FD->getName(), funcType); if (!function) return nullptr; diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index b434a9a267e2..8d71a9b781bc 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -60,9 +60,8 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::ObjCId: case BuiltinType::ObjCClass: case BuiltinType::ObjCSel: - // LLVM void type can only be used as the result of a function call. Just - // map to the same as char. - ResultType = Builder.getI8Type(); + // FIXME: if we emit like LLVM we probably wanna use i8. + assert("not implemented"); break; case BuiltinType::Bool: @@ -130,7 +129,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { ResultType = Builder.getF32Type(); break; case BuiltinType::Double: - ResultType = Builder.getF32Type(); + ResultType = Builder.getF64Type(); break; case BuiltinType::LongDouble: case BuiltinType::Float128: @@ -148,7 +147,8 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::UInt128: case BuiltinType::Int128: - ResultType = Builder.getIntegerType(128); + assert("not implemented"); + // FIXME: ResultType = Builder.getIntegerType(128); break; #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c new file mode 100644 index 000000000000..2372a54f9f0a --- /dev/null +++ b/clang/test/CIR/CodeGen/types.c @@ -0,0 +1,44 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -fcir-warnings %s -fcir-output=%t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fsyntax-only -fcir-warnings %s -fcir-output=%t.cpp.cir +// RUN: FileCheck --input-file=%t.cpp.cir --check-prefix=CHECK-CPP %s +// XFAIL: * + +int t0(int i) { return i; } +unsigned int t1(unsigned int i) { return i; } + +char t2(char i) { return i; } +unsigned char t3(unsigned char i) { return i; } + +short t4(short i) { return i; } +unsigned short t5(unsigned short i) { return i; } + +float t6(float i) { return i; } +double t7(double i) { return i; } + +void t8() {} + +#ifdef __cplusplus +bool t9(bool b) { return b; } +#endif + +// CHECK: func @t0(%arg0: i32) -> i32 { +// CHECK: func @t1(%arg0: i32) -> i32 { +// CHECK: func @t2(%arg0: i8) -> i8 { +// CHECK: func @t3(%arg0: i8) -> i8 { +// CHECK: func @t4(%arg0: i16) -> i16 { +// CHECK: func @t5(%arg0: i16) -> i16 { +// CHECK: func @t6(%arg0: f32) -> f32 { +// CHECK: func @t7(%arg0: f64) -> f64 { +// CHECK: func @t8() { + +// CHECK-CPP: func @t0(%arg0: i32) -> i32 { +// CHECK-CPP: func @t1(%arg0: i32) -> i32 { +// CHECK-CPP: func @t2(%arg0: i8) -> i8 { +// CHECK-CPP: func @t3(%arg0: i8) -> i8 { +// CHECK-CPP: func @t4(%arg0: i16) -> i16 { +// CHECK-CPP: func @t5(%arg0: i16) -> i16 { +// CHECK-CPP: func @t6(%arg0: f32) -> f32 { +// CHECK-CPP: func @t7(%arg0: f64) -> f64 { +// CHECK-CPP: func @t8() { +// CHECK-CPP: func @t9(%arg0: i1) -> i1 { From 34326a91828e86bf8d177b74cdfc7fed317a7491 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 21 Sep 2021 03:48:27 -0400 Subject: [PATCH 0030/2301] [CIR] Register CIR in `registerAllDialects` --- mlir/include/mlir/InitAllDialects.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h index 0da82825c828..97841338e32f 100644 --- a/mlir/include/mlir/InitAllDialects.h +++ b/mlir/include/mlir/InitAllDialects.h @@ -29,6 +29,7 @@ #include "mlir/Dialect/Async/IR/Async.h" #include "mlir/Dialect/Bufferization/IR/Bufferization.h" #include "mlir/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Complex/IR/Complex.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlow.h" #include "mlir/Dialect/ControlFlow/Transforms/BufferDeallocationOpInterfaceImpl.h" @@ -117,6 +118,7 @@ inline void registerAllDialects(DialectRegistry ®istry) { async::AsyncDialect, bufferization::BufferizationDialect, cf::ControlFlowDialect, + cir::CIRDialect, complex::ComplexDialect, DLTIDialect, emitc::EmitCDialect, From 43a34ff8df02ae9dfdcdcaff2c3ef558219ea359 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 30 Sep 2021 18:25:47 -0700 Subject: [PATCH 0031/2301] [CIR] Add a pointer type and tests While here make some effort to tablegen as much as possible from start. --- clang/lib/CIR/CIRBuilder.cpp | 1 + clang/lib/CIR/CIRGenTypes.cpp | 15 +++- mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 1 + .../include/mlir/Dialect/CIR/IR/CIRDialect.td | 39 ++++++++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 24 +++---- mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h | 25 +++++++ mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td | 51 +++++++++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 2 + mlir/lib/Dialect/CIR/IR/CIRTypes.cpp | 72 +++++++++++++++++++ mlir/lib/Dialect/CIR/IR/CMakeLists.txt | 1 + 10 files changed, 216 insertions(+), 15 deletions(-) create mode 100644 mlir/include/mlir/Dialect/CIR/IR/CIRDialect.td create mode 100644 mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h create mode 100644 mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td create mode 100644 mlir/lib/Dialect/CIR/IR/CIRTypes.cpp diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 7601430ca156..1d3801aab5c8 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -17,6 +17,7 @@ #include "clang/CIR/CIRCodeGenFunction.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 8d71a9b781bc..def51ecce9bb 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -1,5 +1,6 @@ #include "CIRGenTypes.h" +#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" @@ -247,7 +248,19 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { break; } case Type::Pointer: { - assert("not implemented"); + const PointerType *PTy = cast(Ty); + QualType ETy = PTy->getPointeeType(); + assert(!ETy->isConstantMatrixType() && "not implemented"); + + mlir::Type PointeeType = ConvertType(ETy); + + // Treat effectively as a *i8. + // if (PointeeType->isVoidTy()) + // PointeeType = Builder.getI8Type(); + + // FIXME: add address specifier to cir::PointerType? + ResultType = + ::mlir::cir::PointerType::get(Builder.getContext(), PointeeType); break; } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index 61fdc39c0c81..c7746f8801e8 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -27,6 +27,7 @@ using FuncOp = func::FuncOp; } // namespace mlir #include "mlir/Dialect/CIR/IR/CIROpsDialect.h.inc" +#include "mlir/Dialect/CIR/IR/CIRTypes.h" #define GET_OP_CLASSES #include "mlir/Dialect/CIR/IR/CIROps.h.inc" diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.td b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.td new file mode 100644 index 000000000000..899ab712649e --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.td @@ -0,0 +1,39 @@ +//===- CIRTypes.td - CIR dialect types ---------------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_DIALECT_CIR +#define MLIR_CIR_DIALECT_CIR + +include "mlir/IR/OpBase.td" + +def CIR_Dialect : Dialect { + let name = "cir"; + + // A short one-line summary of our dialect. + let summary = "A high-level dialect for analyzing and optimizing Clang " + "supported languages"; + + let cppNamespace = "::mlir::cir"; + + let useDefaultAttributePrinterParser = 0; + let useDefaultTypePrinterParser = 0; + + let extraClassDeclaration = [{ + void registerTypes(); + + ::mlir::Type parseType(::mlir::DialectAsmParser &parser) const override; + void printType(::mlir::Type type, + ::mlir::DialectAsmPrinter &printer) const override; + }]; +} + +#endif // MLIR_CIR_DIALECT_CIR diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 3fc9ce62ed88..ae622318aeb3 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -1,4 +1,4 @@ -//===-- CIRDialect.td - CIR dialect definition -------------*- tablegen -*-===// +//===-- CIROps.td - CIR dialect definition -----------------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -11,21 +11,17 @@ /// //===----------------------------------------------------------------------===// -#ifndef CIR_DIALECT_CIR_DIALECT -#define CIR_DIALECT_CIR_DIALECT +#ifndef MLIR_CIR_DIALECT_CIR_OPS +#define MLIR_CIR_DIALECT_CIR_OPS -include "mlir/IR/OpBase.td" +include "mlir/Dialect/CIR/IR/CIRDialect.td" +include "mlir/Dialect/CIR/IR/CIRTypes.td" +include "mlir/IR/SymbolInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" -def CIR_Dialect : Dialect { - let name = "cir"; - - // A short one-line summary of our dialect. - let summary = "A high-level dialect for analyzing and optimizing Clang " - "compiled languages"; - - let cppNamespace = "::mlir::cir"; -} +//===----------------------------------------------------------------------===// +// CIR Ops +//===----------------------------------------------------------------------===// class CIR_Op traits = []> : Op; @@ -71,4 +67,4 @@ def ReturnOp : CIR_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { } -#endif // CIR_DIALECT_CIR_DIALECT +#endif // MLIR_CIR_DIALECT_CIR_OPS diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h new file mode 100644 index 000000000000..e5ffc3ff54c8 --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h @@ -0,0 +1,25 @@ +//===- CIRTypes.h - MLIR SPIR-V Types -------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the types in the SPIR-V dialect. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_CIR_IR_CIRTYPES_H_ +#define MLIR_DIALECT_CIR_IR_CIRTYPES_H_ + +#include "mlir/IR/Types.h" + +//===----------------------------------------------------------------------===// +// CIR Dialect Types +//===----------------------------------------------------------------------===// + +#define GET_TYPEDEF_CLASSES +#include "mlir/Dialect/CIR/IR/CIROpsTypes.h.inc" + +#endif // MLIR_DIALECT_CIR_IR_CIRTYPES_H_ \ No newline at end of file diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td new file mode 100644 index 000000000000..7b04e08d3ae1 --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td @@ -0,0 +1,51 @@ +//===- CIRTypes.td - CIR dialect types ---------------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the CIR dialect types. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_DIALECT_CIR_TYPES +#define MLIR_CIR_DIALECT_CIR_TYPES + +include "mlir/Dialect/CIR/IR/CIRDialect.td" +include "mlir/IR/AttrTypeBase.td" + +//===----------------------------------------------------------------------===// +// CIR Types +//===----------------------------------------------------------------------===// + +class CIR_Type : TypeDef { + let mnemonic = typeMnemonic; +} + +//===----------------------------------------------------------------------===// +// PointerType +//===----------------------------------------------------------------------===// + +def CIR_PointerType : + CIR_Type<"Pointer", "ptr"> { + + let summary = "CIR pointer type"; + let description = [{ + `CIR.ptr` is a type returned by any op generating a pointer in C++. + }]; + + let parameters = (ins "mlir::Type":$pointee); + + let hasCustomAssemblyFormat = 1; +} + +//===----------------------------------------------------------------------===// +// One type to bind them all +//===----------------------------------------------------------------------===// + +def CIR_AnyCIRType : AnyTypeOf<[CIR_PointerType]>; + +#endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 4976daeedbfe..704701d0220b 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" @@ -30,6 +31,7 @@ using namespace mlir::cir; /// Dialect initialization, the instance will be owned by the context. This is /// the point of registration of types and operations for the dialect. void cir::CIRDialect::initialize() { + registerTypes(); addOperations< #define GET_OP_LIST #include "mlir/Dialect/CIR/IR/CIROps.cpp.inc" diff --git a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp new file mode 100644 index 000000000000..3379bd8ca124 --- /dev/null +++ b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp @@ -0,0 +1,72 @@ +//===- CIRTypes.cpp - MLIR CIR Types --------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the types in the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/DialectImplementation.h" + +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/TypeSwitch.h" + +#define GET_TYPEDEF_CLASSES +#include "mlir/Dialect/CIR/IR/CIROpsTypes.cpp.inc" + +using namespace mlir; +using namespace mlir::cir; + +//===----------------------------------------------------------------------===// +// General CIR parsing / printing +//===----------------------------------------------------------------------===// + +Type CIRDialect::parseType(DialectAsmParser &parser) const { + llvm::SMLoc typeLoc = parser.getCurrentLocation(); + StringRef mnemonic; + if (parser.parseKeyword(&mnemonic)) + return Type(); + Type genType; + OptionalParseResult parseResult = + generatedTypeParser(parser, &mnemonic, genType); + if (parseResult.has_value()) + return genType; + parser.emitError(typeLoc, "unknown type in CIR dialect"); + return Type(); +} + +void CIRDialect::printType(Type type, DialectAsmPrinter &os) const { + if (failed(generatedTypePrinter(type, os))) + llvm_unreachable("unexpected CIR type kind"); +} + +Type PointerType::parse(mlir::AsmParser &parser) { + if (parser.parseLess()) + return Type(); + Type pointeeType; + if (parser.parseType(pointeeType)) + return Type(); + if (parser.parseGreater()) + return Type(); + return get(parser.getContext(), pointeeType); +} + +void PointerType::print(mlir::AsmPrinter &printer) const { + printer << getMnemonic() << "<"; + printer.printType(getPointee()); + printer << '>'; +} + +//===----------------------------------------------------------------------===// +// CIR Dialect +//===----------------------------------------------------------------------===// + +void CIRDialect::registerTypes() { addTypes(); } diff --git a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt index 905031aefe52..854a9fa5357b 100644 --- a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt @@ -1,5 +1,6 @@ add_mlir_dialect_library(MLIRCIR CIRDialect.cpp + CIRTypes.cpp ADDITIONAL_HEADER_DIRS ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/CIR From 5bc989f623d4fa073b3e79dee2dae9694451073e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Oct 2021 17:47:33 -0700 Subject: [PATCH 0032/2301] [CIR] Add first set of CIR operations: local, get_addr, load and store These are used for using and tracking lvalues, conversions and local variables. TODOS from here: - Add constrains for the types allowed and verification. - Change current codegen to emit CIR instead of memref - Add a new pass to lowere these to memrefs. - Add docs to load/store --- clang/lib/CIR/CIRBuilder.cpp | 28 +++-- clang/test/CIR/CodeGen/basic.c | 11 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 116 ++++++++++++++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 49 +++++++++ mlir/lib/Dialect/CIR/IR/CIRTypes.cpp | 2 +- mlir/lib/Dialect/CIR/IR/CMakeLists.txt | 1 + 6 files changed, 191 insertions(+), 16 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 1d3801aab5c8..20287a395c05 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -192,17 +192,28 @@ class CIRBuildImpl { /// wasn't declared yet. mlir::LogicalResult declare(const Decl *var, QualType T, mlir::Value value, mlir::Location loc) { + const auto *namedVar = dyn_cast_or_null(var); + assert(namedVar && "Needs a named decl"); + if (symbolTable.count(var)) return mlir::failure(); - mlir::MemRefType type = mlir::MemRefType::get({}, getCIRType(T)); - auto alloc = builder.create(loc, type); - auto *parentBlock = alloc->getBlock(); - alloc->moveBefore(&parentBlock->front()); + // TODO: track "constant" + auto localVarTy = getCIRType(T); + auto localVarPtrTy = + mlir::cir::PointerType::get(builder.getContext(), localVarTy); + + auto localVarAddr = builder.create( + loc, /*addr type*/ localVarPtrTy, /*var type*/ localVarTy, + /*initial_value*/ mlir::UnitAttr::get(builder.getContext()), + /*constant*/ false); + + auto *parentBlock = localVarAddr->getBlock(); + localVarAddr->moveBefore(&parentBlock->front()); // Insert into the symbol table, allocate some stack space in the // function entry block. - symbolTable.insert(var, alloc); + symbolTable.insert(var, localVarAddr); return mlir::success(); } @@ -338,8 +349,9 @@ class CIRBuildImpl { /// Emits the address of the l-value, then loads and returns the result. mlir::Value buildLoadOfLValue(const Expr *E) { LValue LV = EmitLValue(E); - auto load = Builder.builder.create( - Builder.getLoc(E->getExprLoc()), LV.getPointer()); + auto load = Builder.builder.create( + Builder.getLoc(E->getExprLoc()), Builder.getCIRType(E->getType()), + LV.getPointer()); // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); return load; } @@ -499,7 +511,7 @@ class CIRBuildImpl { // Store params in local storage. FIXME: is this really needed // at this level of representation? mlir::Value addr = symbolTable.lookup(paramVar); - builder.create(loc, paramVal, addr); + builder.create(loc, paramVal, addr); } // Emit the body of the function. diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 093245635196..3b84b9ff323f 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -7,10 +7,9 @@ int foo(int i) { } // CHECK: module { -// CHECK-NEXT: func @foo(%arg0: i32) -> i32 { -// CHECK-NEXT: %0 = memref.alloca() : memref -// CHECK-NEXT: memref.store %arg0, %0[] : memref -// CHECK-NEXT: %1 = memref.load %0[] : memref -// CHECK-NEXT: cir.return %1 : i32 -// CHECK-NEXT: } +// CHECK-NEXT: func @foo(%arg0: i32) -> i32 { +// CHECK-NEXT: %0 = cir.alloca i32 = uninitialized, cir.ptr +// CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: cir.return %1 : i32 // CHECK-NEXT: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index ae622318aeb3..0c0070f48d6e 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -18,6 +18,7 @@ include "mlir/Dialect/CIR/IR/CIRDialect.td" include "mlir/Dialect/CIR/IR/CIRTypes.td" include "mlir/IR/SymbolInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" +include "mlir/IR/SymbolInterfaces.td" //===----------------------------------------------------------------------===// // CIR Ops @@ -27,7 +28,120 @@ class CIR_Op traits = []> : Op; //===----------------------------------------------------------------------===// -// CIR Operations +// AllocaOp +//===----------------------------------------------------------------------===// + +// FIXME: add alignment, bool attr on being param, automatic scope?. +def AllocaOp : CIR_Op<"alloca", []> { + let summary = "define a local variable"; + let description = [{ + The `cir.alloca` operation defines a local variable. The `initial_value` + can either be a unit attribute to represent a definition of an uninitialized + local variable, or constant to represent the definition of a + variable with an initial value. It can also be marked constant using the + `constant` unit attribute. + + The result is a pointer type for the original input type. + + Example: + + ```mlir + // Local variable with an initial value. + %0 = cir.alloca i32 = 1, !cir.ptr + + // Uninitialized local variable. + %0 = cir.alloca f32 = uninitialized, !cir.ptr + + // Constant local variable. + %0 = cir.alloca constant i8 = 3, !cir.ptr + ``` + }]; + + let arguments = (ins + TypeAttr:$type, + AnyAttr:$initial_value, + UnitAttr:$constant + ); + + let results = (outs Res]>:$addr); + + let assemblyFormat = [{ + (`constant` $constant^)? + custom($type, $initial_value) + attr-dict `,` `cir.ptr` type($addr) + }]; + + let extraClassDeclaration = [{ + bool isUninitialized() { + return getInitialValue().isa(); + } + }]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// LoadOp +//===----------------------------------------------------------------------===// + +def LoadOp : CIR_Op<"load", [ + TypesMatchWith<"type of 'result' matches pointee type of 'addr'", + "addr", "result", + "$_self.cast().getPointee()">]> { + + let summary = "load operation"; + let description = [{ + `cir.load` reads a variable using a pointer type. + + Example: + + ```mlir + + // Read from local variable, address in %0. + %1 = cir.load %0 : !cir.ptr, i32 + ``` + }]; + + let arguments = (ins Arg:$addr); + let results = (outs AnyType:$result); + + let assemblyFormat = + "$addr attr-dict `:` `cir.ptr` type($addr) `,` type($result)"; +} + +//===----------------------------------------------------------------------===// +// StoreOp +//===----------------------------------------------------------------------===// + +def StoreOp : CIR_Op<"store", [ + TypesMatchWith<"type of 'value' matches pointee type of 'addr'", + "addr", "value", + "$_self.cast().getPointee()">]> { + + let summary = "store operation"; + let description = [{ + `cir.load` reads a variable using a pointer type. + + Example: + + ```mlir + + // Store to local variable, address in %0. + cir.store %arg0, %0 : i32, !cir.ptr + }]; + + let arguments = (ins AnyType:$value, + Arg:$addr); + + let assemblyFormat = + "$value `,` $addr attr-dict `:` type($value) `,` `cir.ptr` type($addr)"; +} + +//===----------------------------------------------------------------------===// +// ReturnOp //===----------------------------------------------------------------------===// def ReturnOp : CIR_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 704701d0220b..e59de6f4dd84 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -40,6 +40,7 @@ void cir::CIRDialect::initialize() { //===----------------------------------------------------------------------===// // ReturnOp +//===----------------------------------------------------------------------===// mlir::LogicalResult ReturnOp::verify() { // We know that the parent operation is a function, because of the 'HasParent' @@ -73,6 +74,54 @@ mlir::LogicalResult ReturnOp::verify() { << ")"; } +//===----------------------------------------------------------------------===// +// AllocaOp +//===----------------------------------------------------------------------===// + +static void printAllocaOpTypeAndInitialValue(OpAsmPrinter &p, AllocaOp op, + TypeAttr type, + Attribute initialValue) { + p << type; + p << " = "; + if (op.isUninitialized()) + p << "uninitialized"; + else + p.printAttributeWithoutType(initialValue); +} + +static ParseResult parseAllocaOpTypeAndInitialValue(OpAsmParser &parser, + TypeAttr &typeAttr, + Attribute &initialValue) { + Type type; + if (parser.parseType(type)) + return failure(); + typeAttr = TypeAttr::get(type); + + if (parser.parseEqual()) + return success(); + + if (succeeded(parser.parseOptionalKeyword("uninitialized"))) + initialValue = UnitAttr::get(parser.getBuilder().getContext()); + + if (!initialValue.isa()) + return parser.emitError(parser.getNameLoc()) + << "constant operation not implemented yet"; + + return success(); +} + +LogicalResult AllocaOp::verify() { + // Verify that the initial value, is either a unit attribute or + // an elements attribute. + Attribute initValue = getInitialValue(); + if (!initValue.isa()) + return emitOpError("initial value should be a unit " + "attribute, but got ") + << initValue; + + return success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp index 3379bd8ca124..c42a7567ea5e 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp @@ -60,7 +60,7 @@ Type PointerType::parse(mlir::AsmParser &parser) { } void PointerType::print(mlir::AsmPrinter &printer) const { - printer << getMnemonic() << "<"; + printer << "<"; printer.printType(getPointee()); printer << '>'; } diff --git a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt index 854a9fa5357b..63e1b234b90a 100644 --- a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt @@ -7,6 +7,7 @@ add_mlir_dialect_library(MLIRCIR DEPENDS MLIRCIROpsIncGen + MLIRSymbolInterfacesIncGen LINK_LIBS PUBLIC MLIRIR From d8f6d6f615c27744cef900ffdda33a96355a6aa9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 5 Oct 2021 22:00:58 -0700 Subject: [PATCH 0033/2301] [CIR] Add cir-tool and first cir parse/print test --- clang/test/CIR/IR/cir-ops.cir | 20 +++++++++++++++++++ clang/test/CMakeLists.txt | 1 + clang/test/lit.cfg.py | 2 ++ clang/tools/CMakeLists.txt | 1 + clang/tools/cir-tool/CMakeLists.txt | 21 ++++++++++++++++++++ clang/tools/cir-tool/cir-tool.cpp | 30 +++++++++++++++++++++++++++++ 6 files changed, 75 insertions(+) create mode 100644 clang/test/CIR/IR/cir-ops.cir create mode 100644 clang/tools/cir-tool/CMakeLists.txt create mode 100644 clang/tools/cir-tool/cir-tool.cpp diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir new file mode 100644 index 000000000000..56727b002355 --- /dev/null +++ b/clang/test/CIR/IR/cir-ops.cir @@ -0,0 +1,20 @@ +// Test the CIR operations can parse and print correctly + +// RUN: cir-tool %s | cir-tool | FileCheck %s +module { + func.func @foo(%arg0: i32) -> i32 { + %0 = cir.alloca i32 = uninitialized, cir.ptr + cir.store %arg0, %0 : i32, cir.ptr + %1 = cir.load %0 : cir.ptr , i32 + cir.return %1 : i32 + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo(%arg0: i32) -> i32 { +// CHECK-NEXT: %0 = cir.alloca i32 = uninitialized, cir.ptr +// CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: cir.return %1 : i32 +// CHECK-NEXT: } +// CHECK-NEXT: } diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt index b18614a6cbb1..7aa776f70ae8 100644 --- a/clang/test/CMakeLists.txt +++ b/clang/test/CMakeLists.txt @@ -64,6 +64,7 @@ endif () list(APPEND CLANG_TEST_DEPS apinotes-test c-index-test + cir-tool clang clang-fuzzer-dictionary clang-resource-headers diff --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py index e4b39c4f7159..ff3b88b941b0 100644 --- a/clang/test/lit.cfg.py +++ b/clang/test/lit.cfg.py @@ -29,6 +29,7 @@ ".c", ".cpp", ".i", + ".cir", ".cppm", ".m", ".mm", @@ -85,6 +86,7 @@ tools = [ "apinotes-test", "c-index-test", + "cir-tool", "clang-diff", "clang-format", "clang-repl", diff --git a/clang/tools/CMakeLists.txt b/clang/tools/CMakeLists.txt index 98c018e96848..82217b8e0395 100644 --- a/clang/tools/CMakeLists.txt +++ b/clang/tools/CMakeLists.txt @@ -3,6 +3,7 @@ create_subdirectory_options(CLANG TOOL) add_clang_subdirectory(diagtool) add_clang_subdirectory(driver) add_clang_subdirectory(apinotes-test) +add_clang_subdirectory(cir-tool) add_clang_subdirectory(clang-diff) add_clang_subdirectory(clang-format) add_clang_subdirectory(clang-fuzzer) diff --git a/clang/tools/cir-tool/CMakeLists.txt b/clang/tools/cir-tool/CMakeLists.txt new file mode 100644 index 000000000000..4c1f4b2f85ef --- /dev/null +++ b/clang/tools/cir-tool/CMakeLists.txt @@ -0,0 +1,21 @@ +add_clang_tool(cir-tool cir-tool.cpp) +llvm_update_compile_flags(cir-tool) +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) +include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) + +target_link_libraries(cir-tool PRIVATE + ${dialect_libs} + ${conversion_libs} + + MLIRAnalysis + MLIRIR + MLIROptLib + MLIRCIR + MLIRMemRefDialect + MLIRParser + MLIRPass + MLIRSideEffectInterfaces + MLIRTransforms +) diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp new file mode 100644 index 000000000000..a0ed1447b46f --- /dev/null +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -0,0 +1,30 @@ +//===- cir-tool.cpp - CIR optimizationa and analysis driver -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Similar to MLIR/LLVM's "opt" tools but also deals with analysis and custom +// arguments. TODO: this is basically a copy from MlirOptMain.cpp, but capable +// of module emission as specified by the user. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Tools/mlir-opt/MlirOptMain.h" + +using namespace mlir; + +int main(int argc, char **argv) { + // TODO: register needed MLIR passes for CIR? + mlir::DialectRegistry registry; + // TODO: add memref::MemRefDialect> when we add lowering + registry.insert(); + registry.insert(); + + return failed(MlirOptMain( + argc, argv, "Clang IR analysis and optimization tool\n", registry)); +} From 78201ec321c7e593618a281557742a2a895ba0aa Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Oct 2021 00:11:02 -0400 Subject: [PATCH 0034/2301] [CIR] Implement cc1 support for a CIR pipeline This patch implements trivial cc1 support that'll let you pass `-fenable-clangir` to run clang with the clangir pipeline inserted. This also adds `-emit-cir` which will output a cir file and `-emit-cir-only` which will run the pipeline stopping after CIRGen. $ clang -cc1 test.c -fenable-clangir -emit-cir-only (no output) $ clang -cc1 test.c -fenable-clangir -emit-cir (outputs test.cir) $ clang -cc1 test.c -fenable-clangir -emit-cir -o here.cir (outputs here.cir) The following invocation: $ clang -cc1 test.c -fenable-clangir will crash. But it will eventually work as stanard clang does honoring your standard permutations of `-emit-llvm`, `-S`, `-c`, `-emit-llvm-only` etc. --- clang/include/clang/Basic/LangOptions.h | 3 - clang/include/clang/CIR/CIRBuilder.h | 18 +- .../clang/CIRFrontendAction/CIRGenAction.h | 90 ++++++++++ clang/include/clang/Driver/Options.td | 30 ++-- .../include/clang/Frontend/FrontendOptions.h | 26 +-- clang/lib/CIR/CIRBuilder.cpp | 41 +++-- clang/lib/CIR/CMakeLists.txt | 1 - clang/lib/CIR/FrontendAction/CMakeLists.txt | 7 + clang/lib/CIRFrontendAction/CIRGenAction.cpp | 162 ++++++++++++++++++ clang/lib/CIRFrontendAction/CMakeLists.txt | 32 ++++ clang/lib/CMakeLists.txt | 1 + clang/lib/Driver/ToolChains/Clang.cpp | 22 --- clang/lib/Frontend/CompilerInvocation.cpp | 7 +- clang/lib/FrontendTool/CMakeLists.txt | 1 + .../ExecuteCompilerInvocation.cpp | 17 ++ clang/lib/Sema/CIRBasedWarnings.cpp | 3 +- clang/test/CIR/CodeGen/basic.c | 3 +- clang/test/CIR/CodeGen/types.c | 5 +- llvm/docs/CIR.rst | 15 +- 19 files changed, 403 insertions(+), 81 deletions(-) create mode 100644 clang/include/clang/CIRFrontendAction/CIRGenAction.h create mode 100644 clang/lib/CIR/FrontendAction/CMakeLists.txt create mode 100644 clang/lib/CIRFrontendAction/CIRGenAction.cpp create mode 100644 clang/lib/CIRFrontendAction/CMakeLists.txt diff --git a/clang/include/clang/Basic/LangOptions.h b/clang/include/clang/Basic/LangOptions.h index 485b58632003..949c8f5d448b 100644 --- a/clang/include/clang/Basic/LangOptions.h +++ b/clang/include/clang/Basic/LangOptions.h @@ -576,9 +576,6 @@ class LangOptions : public LangOptionsBase { /// host code generation. std::string OMPHostIRFile; - /// Name of the CIR file to output to disk. - std::string CIRFile; - /// The user provided compilation unit ID, if non-empty. This is used to /// externalize static variables which is needed to support accessing static /// device variables in host code for single source offloading languages diff --git a/clang/include/clang/CIR/CIRBuilder.h b/clang/include/clang/CIR/CIRBuilder.h index cb0a574f24fc..4ac0b39ac9b5 100644 --- a/clang/include/clang/CIR/CIRBuilder.h +++ b/clang/include/clang/CIR/CIRBuilder.h @@ -14,6 +14,7 @@ #ifndef CLANG_CIRBUILDER_H_ #define CLANG_CIRBUILDER_H_ +#include "clang/AST/ASTConsumer.h" #include "llvm/Support/ToolOutputFile.h" #include @@ -24,6 +25,7 @@ class OwningModuleRef; namespace clang { class ASTContext; +class DeclGroupRef; class FunctionDecl; } // namespace clang @@ -31,21 +33,25 @@ namespace cir { class CIRBuildImpl; class CIRGenTypes; -class CIRContext { +class CIRContext : public clang::ASTConsumer { public: + CIRContext(); + CIRContext(std::unique_ptr os); ~CIRContext(); - CIRContext(clang::ASTContext &AC); - void Init(); + void Initialize(clang::ASTContext &Context) override; bool EmitFunction(const clang::FunctionDecl *FD); + bool HandleTopLevelDecl(clang::DeclGroupRef D) override; + void HandleTranslationUnit(clang::ASTContext &Ctx) override; + private: + std::unique_ptr outStream; std::unique_ptr mlirCtx; std::unique_ptr builder; - std::unique_ptr cirOut; - clang::ASTContext &astCtx; + clang::ASTContext *astCtx; }; } // namespace cir -#endif // CLANG_CIRBUILDER_H_ \ No newline at end of file +#endif // CLANG_CIRBUILDER_H_ diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIRFrontendAction/CIRGenAction.h new file mode 100644 index 000000000000..04fea51a40a8 --- /dev/null +++ b/clang/include/clang/CIRFrontendAction/CIRGenAction.h @@ -0,0 +1,90 @@ +//===---- CIRGenAction.h - CIR Code Generation Frontend Action -*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_CIR_CIRGENACTION_H +#define LLVM_CLANG_CIR_CIRGENACTION_H + +#include "clang/Frontend/FrontendAction.h" +#include + +namespace llvm { +class LLVMIRContext; +} + +namespace mlir { +class MLIRContext; +class ModuleOp; +} // namespace mlir + +namespace cir { +class CIRGenConsumer; +class CIRGenerator; + +class CIRGenAction : public clang::ASTFrontendAction { +public: + enum class OutputType { EmitAssembly, EmitCIR, EmitLLVM, None }; + +private: + friend class CIRGenConsumer; + + std::unique_ptr TheModule; + + mlir::MLIRContext *MLIRContext; + bool OwnsVMContext; + + std::unique_ptr loadModule(llvm::MemoryBufferRef MBRef); + +protected: + CIRGenAction(OutputType action, mlir::MLIRContext *_MLIRContext = nullptr); + + std::unique_ptr + CreateASTConsumer(clang::CompilerInstance &CI, + llvm::StringRef InFile) override; + + void ExecuteAction() override; + + void EndSourceFileAction() override; + +public: + ~CIRGenAction() override; + + CIRGenConsumer *cgConsumer; + OutputType action; +}; + +class EmitLLVMAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitLLVMAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + +class EmitCIRAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitCIRAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + +class EmitCIROnlyAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitCIROnlyAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + +class EmitAssemblyAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitAssemblyAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + +} // namespace cir + +#endif diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 1aa12080da55..420e919ce2d2 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -1955,18 +1955,6 @@ defm bounds_safety : BoolFOption< BothFlags<[], [CC1Option], " experimental bounds safety extension for C">>; -defm cir_warnings : BoolFOption<"cir-warnings", - LangOpts<"CIRWarnings">, DefaultFalse, - PosFlag, - NegFlag, - BothFlags<[], [CC1Option], " CIR to emit (analysis based) warnings">>; -def fcir_output_EQ : Joined<["-"], "fcir-output=">, - Group, HelpText<"Write clang IR (cir) to output file">, - Visibility<[ClangOption, CC1Option]>, MarshallingInfoString>, - MetaVarName<"">; -def fcir_output : Flag<["-"], "fcir-output">, - Group, Visibility<[ClangOption, CC1Option]>; - defm addrsig : BoolFOption<"addrsig", CodeGenOpts<"Addrsig">, DefaultFalse, PosFlag, @@ -3043,6 +3031,24 @@ def flax_vector_conversions : Flag<["-"], "flax-vector-conversions">, Group, Group, HelpText<"Force linking the clang builtins runtime library">; + +/// ClangIR-specific options - BEGIN +defm clangir : BoolFOption<"clangir", + FrontendOpts<"UseClangIRPipeline">, DefaultFalse, + PosFlag, + NegFlag LLVM pipeline to compile">, + BothFlags<[], [ClangOption, CC1Option], "">>; +def emit_cir : Flag<["-"], "emit-cir">, Visibility<[ClangOption, CC1Option]>, + Group, HelpText<"Build ASTs and then lower to ClangIR">; +def emit_cir_only : Flag<["-"], "emit-cir-only">, + HelpText<"Build ASTs and convert to CIR, discarding output">; +defm cir_warnings : BoolFOption<"cir-warnings", + LangOpts<"CIRWarnings">, DefaultFalse, + PosFlag, + NegFlag, + BothFlags<[], [CC1Option], " CIR to emit (analysis based) warnings">>; +/// ClangIR-specific options - END + def flto_EQ : Joined<["-"], "flto=">, Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>, Group, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index f374ba9c3734..c756ed99319d 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -65,6 +65,12 @@ enum ActionKind { /// Translate input source into HTML. EmitHTML, + /// Emit a .cir file + EmitCIR, + + /// Generate CIR, bud don't emit anything. + EmitCIROnly, + /// Emit a .ll file. EmitLLVM, @@ -154,11 +160,7 @@ enum ActionKind { class InputKind { public: /// The input file format. - enum Format { - Source, - ModuleMap, - Precompiled - }; + enum Format { Source, ModuleMap, Precompiled }; // If we are building a header unit, what kind it is; this affects whether // we look for the file in the user or system include search paths before @@ -412,6 +414,10 @@ class FrontendOptions { LLVM_PREFERRED_TYPE(bool) unsigned GenReducedBMI : 1; + /// Use Clang IR pipeline to emit code + LLVM_PREFERRED_TYPE(bool) + unsigned UseClangIRPipeline : 1; + CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. @@ -469,11 +475,11 @@ class FrontendOptions { /// Enable converting setter/getter expressions to property-dot syntx. ObjCMT_PropertyDotSyntax = 0x1000, - ObjCMT_MigrateDecls = (ObjCMT_ReadonlyProperty | ObjCMT_ReadwriteProperty | - ObjCMT_Annotation | ObjCMT_Instancetype | - ObjCMT_NsMacros | ObjCMT_ProtocolConformance | - ObjCMT_NsAtomicIOSOnlyProperty | - ObjCMT_DesignatedInitializer), + ObjCMT_MigrateDecls = + (ObjCMT_ReadonlyProperty | ObjCMT_ReadwriteProperty | + ObjCMT_Annotation | ObjCMT_Instancetype | ObjCMT_NsMacros | + ObjCMT_ProtocolConformance | ObjCMT_NsAtomicIOSOnlyProperty | + ObjCMT_DesignatedInitializer), ObjCMT_MigrateAll = (ObjCMT_Literals | ObjCMT_Subscripting | ObjCMT_MigrateDecls | ObjCMT_PropertyDotSyntax) }; diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 20287a395c05..cbe61e09ebb9 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -29,6 +29,7 @@ #include "mlir/IR/Verifier.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclGroup.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/ExprCXX.h" @@ -69,8 +70,6 @@ using llvm::SmallVector; using llvm::StringRef; using llvm::Twine; -CIRContext::CIRContext(clang::ASTContext &AC) : astCtx(AC) { Init(); } - CIRCodeGenFunction::CIRCodeGenFunction() = default; TypeEvaluationKind CIRCodeGenFunction::getEvaluationKind(QualType type) { type = type.getCanonicalType(); @@ -546,33 +545,26 @@ class CIRBuildImpl { }; } // namespace cir +CIRContext::CIRContext() {} + +CIRContext::CIRContext(std::unique_ptr os) + : outStream(std::move(os)) {} + CIRContext::~CIRContext() { // Run module verifier before shutdown. builder->verifyModule(); - - if (cirOut) { - // FIXME: pick a more verbose level. - builder->getModule()->print(cirOut->os()); - cirOut->keep(); - } } -void CIRContext::Init() { +void CIRContext::Initialize(clang::ASTContext &astCtx) { using namespace llvm; + this->astCtx = &astCtx; + mlirCtx = std::make_unique(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); builder = std::make_unique(*mlirCtx.get(), astCtx); - - std::error_code EC; - StringRef outFile = astCtx.getLangOpts().CIRFile; - if (outFile.empty()) - return; - cirOut = std::make_unique(outFile, EC, sys::fs::OF_None); - if (EC) - report_fatal_error("Failed to open " + outFile + ": " + EC.message()); } bool CIRContext::EmitFunction(const FunctionDecl *FD) { @@ -581,3 +573,18 @@ bool CIRContext::EmitFunction(const FunctionDecl *FD) { assert(func && "should emit function"); return true; } + +bool CIRContext::HandleTopLevelDecl(clang::DeclGroupRef D) { + for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { + auto *FD = cast(*I); + assert(FD && "We can't handle anything else yet"); + EmitFunction(FD); + } + + return true; +} + +void CIRContext::HandleTranslationUnit(ASTContext &C) { + if (outStream) + builder->getModule()->print(*outStream); +} diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 860ab25640e4..c9f3b2d35a68 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -20,7 +20,6 @@ add_clang_library(clangCIR LINK_LIBS clangAST clangBasic - clangEdit clangLex ${dialect_libs} MLIRCIR diff --git a/clang/lib/CIR/FrontendAction/CMakeLists.txt b/clang/lib/CIR/FrontendAction/CMakeLists.txt new file mode 100644 index 000000000000..21f5355f65af --- /dev/null +++ b/clang/lib/CIR/FrontendAction/CMakeLists.txt @@ -0,0 +1,7 @@ +set(LLVM_LINK_COMPONENTS + Core + Support + ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp new file mode 100644 index 000000000000..c074c31a0548 --- /dev/null +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -0,0 +1,162 @@ +//===--- CIRGenAction.cpp - LLVM Code generation Frontend Action ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "clang/CIRFrontendAction/CIRGenAction.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclGroup.h" +#include "clang/Basic/DiagnosticFrontend.h" +#include "clang/Basic/FileManager.h" +#include "clang/Basic/LangStandard.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/CIR/CIRBuilder.h" +#include "clang/CodeGen/BackendUtil.h" +#include "clang/CodeGen/ModuleBuilder.h" +#include "clang/Driver/DriverDiagnostic.h" +#include "clang/Frontend/CompilerInstance.h" +#include "clang/Frontend/FrontendDiagnostic.h" +#include "clang/Lex/Preprocessor.h" +#include "llvm/Bitcode/BitcodeReader.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/IR/DebugInfo.h" +#include "llvm/IR/DiagnosticInfo.h" +#include "llvm/IR/DiagnosticPrinter.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/LLVMRemarkStreamer.h" +#include "llvm/IR/Module.h" +#include "llvm/IRReader/IRReader.h" +#include "llvm/LTO/LTOBackend.h" +#include "llvm/Linker/Linker.h" +#include "llvm/Pass.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/SourceMgr.h" +#include "llvm/Support/TimeProfiler.h" +#include "llvm/Support/Timer.h" +#include "llvm/Support/ToolOutputFile.h" +#include "llvm/Support/YAMLTraits.h" +#include "llvm/Transforms/IPO/Internalize.h" + +#include + +using namespace cir; +using namespace clang; + +namespace cir { +class CIRGenConsumer : public clang::ASTConsumer { + + virtual void anchor(); + ASTContext *astContext{nullptr}; + + std::unique_ptr gen; + +public: + CIRGenConsumer(std::unique_ptr os) + : gen(std::make_unique(std::move(os))) {} + + void Initialize(ASTContext &ctx) override { + assert(!astContext && "initialized multiple times"); + + astContext = &ctx; + + gen->Initialize(ctx); + } + + bool HandleTopLevelDecl(DeclGroupRef D) override { + PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), + astContext->getSourceManager(), + "LLVM IR generation of declaration"); + gen->HandleTopLevelDecl(D); + + return true; + } + + void HandleInlineFunctionDefinition(FunctionDecl *D) override {} + + void HandleInterestingDecl(DeclGroupRef D) override { HandleTopLevelDecl(D); } + + void HandleTranslationUnit(ASTContext &C) override { + gen->HandleTranslationUnit(C); + // TODO: Have context emit file here + } + + void HandleTagDeclDefinition(TagDecl *D) override {} + + void HandleTagDeclRequiredDefinition(const TagDecl *D) override {} + + void CompleteTentativeDefinition(VarDecl *D) override {} + + void CompleteExternalDeclaration(DeclaratorDecl *D) override {} + + void AssignInheritanceModel(CXXRecordDecl *RD) override {} + + void HandleVTable(CXXRecordDecl *RD) override {} +}; +} // namespace cir + +void CIRGenConsumer::anchor() {} + +CIRGenAction::CIRGenAction(OutputType act, mlir::MLIRContext *_MLIRContext) + : MLIRContext(_MLIRContext ? _MLIRContext : new mlir::MLIRContext), + OwnsVMContext(!_MLIRContext), action(act) {} + +CIRGenAction::~CIRGenAction() { + TheModule.reset(); + if (OwnsVMContext) + delete MLIRContext; +} + +void CIRGenAction::EndSourceFileAction() {} + +static std::unique_ptr +getOutputStream(CompilerInstance &ci, StringRef inFile, + CIRGenAction::OutputType action) { + switch (action) { + case CIRGenAction::OutputType::EmitAssembly: + return ci.createDefaultOutputFile(false, inFile, "s"); + case CIRGenAction::OutputType::EmitCIR: + return ci.createDefaultOutputFile(false, inFile, "cir"); + case CIRGenAction::OutputType::EmitLLVM: + return ci.createDefaultOutputFile(true, inFile, "llvm"); + case CIRGenAction::OutputType::None: + return nullptr; + } + + llvm_unreachable("Invalid action!"); +} + +std::unique_ptr +CIRGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { + auto out = CI.takeOutputStream(); + if (!out) + out = getOutputStream(CI, InFile, action); + return std::make_unique(std::move(out)); +} + +std::unique_ptr +CIRGenAction::loadModule(llvm::MemoryBufferRef MBRef) { + return {}; +} + +void CIRGenAction::ExecuteAction() { ASTFrontendAction::ExecuteAction(); } + +void EmitAssemblyAction::anchor() {} +EmitAssemblyAction::EmitAssemblyAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitAssembly, _MLIRContext) {} + +void EmitCIRAction::anchor() {} +EmitCIRAction::EmitCIRAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitCIR, _MLIRContext) {} + +void EmitCIROnlyAction::anchor() {} +EmitCIROnlyAction::EmitCIROnlyAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::None, _MLIRContext) {} diff --git a/clang/lib/CIRFrontendAction/CMakeLists.txt b/clang/lib/CIRFrontendAction/CMakeLists.txt new file mode 100644 index 000000000000..60430e5b4ef4 --- /dev/null +++ b/clang/lib/CIRFrontendAction/CMakeLists.txt @@ -0,0 +1,32 @@ +set(LLVM_LINK_COMPONENTS + Core + Support + ) + +include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) +include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_clang_library(clangCIRFrontendAction + CIRGenAction.cpp + + DEPENDS + MLIRCIROpsIncGen + + LINK_LIBS + clangAST + clangBasic + clangLex + clangFrontend + clangCIR + ${dialect_libs} + MLIRCIR + MLIRAnalysis + MLIRIR + MLIRParser + MLIRSideEffectInterfaces + MLIRTransforms + MLIRSupport + MLIRMemRefDialect + ) diff --git a/clang/lib/CMakeLists.txt b/clang/lib/CMakeLists.txt index c4f28b8b3c41..1ce6e24acff8 100644 --- a/clang/lib/CMakeLists.txt +++ b/clang/lib/CMakeLists.txt @@ -34,4 +34,5 @@ add_subdirectory(Support) #if(CLANG_ENABLE_CIR) add_subdirectory(CIR) + add_subdirectory(CIRFrontendAction) #endif() diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 80423d5d0e7a..5a20b1b3acd0 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -8155,28 +8155,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Input.getInputArg().renderAsInput(Args, CmdArgs); } - if (Arg *A = Args.getLastArg(options::OPT_fcir_output_EQ, - options::OPT_fcir_output)) { - if (A->getOption().matches(options::OPT_fcir_output_EQ)) { - StringRef Value = A->getValue(); - CmdArgs.push_back(Args.MakeArgString("-fcir-output=" + Value)); - } else { - std::string OutFile; - for (const InputInfo &Input : FrontendInputs) { - if (!Input.isFilename()) - continue; - OutFile = Input.getFilename(); - OutFile.append(".cir"); - StringRef Value = OutFile; - CmdArgs.push_back(Args.MakeArgString("-fcir-output=" + Value)); - break; - } - - if (OutFile.empty()) - D.Diag(diag::err_drv_cir_multiple_input); - } - } - if (D.CC1Main && !D.CCGenDiagnostics) { // Invoke the CC1 directly in this process C.addCommand(std::make_unique( diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 47d3a0780185..55e6ac91b00f 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -2727,6 +2727,8 @@ static const auto &getFrontendActionTable() { {frontend::DumpTokens, OPT_dump_tokens}, {frontend::EmitAssembly, OPT_S}, {frontend::EmitBC, OPT_emit_llvm_bc}, + {frontend::EmitCIR, OPT_emit_cir}, + {frontend::EmitCIROnly, OPT_emit_cir_only}, {frontend::EmitHTML, OPT_emit_html}, {frontend::EmitLLVM, OPT_emit_llvm}, {frontend::EmitLLVMOnly, OPT_emit_llvm_only}, @@ -4315,9 +4317,6 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args, << Opts.OMPHostIRFile; } - if (Arg *A = Args.getLastArg(options::OPT_fcir_output_EQ)) - Opts.CIRFile = A->getValue(); - // Set CUDA mode for OpenMP target NVPTX/AMDGCN if specified in options Opts.OpenMPCUDAMode = Opts.OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN()) && @@ -4604,6 +4603,8 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) { case frontend::ASTView: case frontend::EmitAssembly: case frontend::EmitBC: + case frontend::EmitCIR: + case frontend::EmitCIROnly: case frontend::EmitHTML: case frontend::EmitLLVM: case frontend::EmitLLVMOnly: diff --git a/clang/lib/FrontendTool/CMakeLists.txt b/clang/lib/FrontendTool/CMakeLists.txt index 51c379ade270..37d6aec93a1f 100644 --- a/clang/lib/FrontendTool/CMakeLists.txt +++ b/clang/lib/FrontendTool/CMakeLists.txt @@ -9,6 +9,7 @@ set(link_libs clangDriver clangExtractAPI clangFrontend + clangCIRFrontendAction clangRewriteFrontend ) diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index f85f0365616f..217a6d4b8008 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "clang/ARCMigrate/ARCMTActions.h" +#include "clang/CIRFrontendAction/CIRGenAction.h" #include "clang/CodeGen/CodeGenAction.h" #include "clang/Config/config.h" #include "clang/Driver/Options.h" @@ -32,6 +33,7 @@ #include "llvm/Support/DynamicLibrary.h" #include "llvm/Support/ErrorHandling.h" using namespace clang; +using namespace cir; using namespace llvm::opt; namespace clang { @@ -42,6 +44,19 @@ CreateFrontendBaseAction(CompilerInstance &CI) { StringRef Action("unknown"); (void)Action; + auto UseCIR = CI.getFrontendOpts().UseClangIRPipeline; + auto Act = CI.getFrontendOpts().ProgramAction; + + auto EmitsCIR = Act == EmitCIR || Act == EmitCIROnly; + auto IsImplementedCIROutput = EmitsCIR || Act == EmitLLVM; + + if (UseCIR && !IsImplementedCIROutput) + llvm::report_fatal_error("-fclangir currently only works with -emit-cir, " + "-emit-cir-only and -emit-llvm"); + if (!UseCIR && EmitsCIR) + llvm::report_fatal_error( + "-emit-cir and -emit-cir-only only valid when using -fenable"); + switch (CI.getFrontendOpts().ProgramAction) { case ASTDeclList: return std::make_unique(); case ASTDump: return std::make_unique(); @@ -53,6 +68,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) { case DumpTokens: return std::make_unique(); case EmitAssembly: return std::make_unique(); case EmitBC: return std::make_unique(); + case EmitCIR: return std::make_unique(); + case EmitCIROnly: return std::make_unique(); case EmitHTML: return std::make_unique(); case EmitLLVM: return std::make_unique(); case EmitLLVMOnly: return std::make_unique(); diff --git a/clang/lib/Sema/CIRBasedWarnings.cpp b/clang/lib/Sema/CIRBasedWarnings.cpp index de3370e540ca..f259fca6d95b 100644 --- a/clang/lib/Sema/CIRBasedWarnings.cpp +++ b/clang/lib/Sema/CIRBasedWarnings.cpp @@ -67,7 +67,8 @@ sema::CIRBasedWarnings::CIRBasedWarnings(Sema &s) : S(s) { DefaultPolicy.enableConsumedAnalysis = isEnabled(D, warn_use_in_invalid_state); - CIRCtx = std::make_unique(S.getASTContext()); + CIRCtx = std::make_unique(); + CIRCtx->Initialize(S.getASTContext()); } // We need this here for unique_ptr with forward declared class. diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 3b84b9ff323f..9e79987a8fcd 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -1,6 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -fcir-warnings %s -fcir-output=%t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * int foo(int i) { return i; diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index 2372a54f9f0a..5403ade061a2 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -1,8 +1,7 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -fcir-warnings %s -fcir-output=%t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fsyntax-only -fcir-warnings %s -fcir-output=%t.cpp.cir +// RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cpp.cir // RUN: FileCheck --input-file=%t.cpp.cir --check-prefix=CHECK-CPP %s -// XFAIL: * int t0(int i) { return i; } unsigned int t1(unsigned int i) { return i; } diff --git a/llvm/docs/CIR.rst b/llvm/docs/CIR.rst index 2bfcb89035dc..da5830338d3a 100644 --- a/llvm/docs/CIR.rst +++ b/llvm/docs/CIR.rst @@ -26,6 +26,19 @@ The ``-fcir-output`` and ``-fcir-output=`` flags can be used to output the generated CIR (currently needs to be combined with ``-fcir-warnings`` to work). +Additionally, clang can run it's full compilation pipeline with +the CIR phase inserted between clang and llvm. Passing +``-fclangir`` to ``clang -cc1`` will opt in to clang generating +CIR which is lowered to LLVMIR and continued through the +backend. (WIP -- the backend is not yet functional). + +A new flag ``-emit-cir`` can be used in combination with +``-fclangir`` to emit pristine CIR right out of the CIRGen phase. + +Adding flags to select between different levels of lowerings +between MLIR dialects (e.g.to STD/Affine/SCF) are a WIP. + + Implementation Notes ==================== @@ -42,4 +55,4 @@ end of translation defered stuff). - Some data structures used for LLVM codegen can be made more generic and be reused from CIRBuilder. Duplicating content right now to prevent potential frequent merge conflicts. - - Split out into header files all potential common code. \ No newline at end of file + - Split out into header files all potential common code. From d87e2b1b3b17dda702181712bedeaf2afdbf7ec4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 19 Oct 2021 17:09:57 -0700 Subject: [PATCH 0035/2301] [CIR] Add 'cir.cst' to represent constants Add verifier and test --- clang/test/CIR/CodeGen/basic.c | 6 +++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 31 ++++++++++++++++++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 22 +++++++++++++++ 3 files changed, 59 insertions(+) diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 9e79987a8fcd..7dcf3205e763 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * int foo(int i) { return i; @@ -12,3 +13,8 @@ int foo(int i) { // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %1 : i32 // CHECK-NEXT: } + +int f2() { return 3; } + +// CHECK: func @f2() -> i32 { +// CHECK-NEXT: cir.return diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 0c0070f48d6e..ad53fc6e1861 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -17,6 +17,7 @@ include "mlir/Dialect/CIR/IR/CIRDialect.td" include "mlir/Dialect/CIR/IR/CIRTypes.td" include "mlir/IR/SymbolInterfaces.td" +include "mlir/IR/BuiltinAttributeInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/IR/SymbolInterfaces.td" @@ -27,6 +28,36 @@ include "mlir/IR/SymbolInterfaces.td" class CIR_Op traits = []> : Op; +//===----------------------------------------------------------------------===// +// ConstantOp +//===----------------------------------------------------------------------===// +def ConstantOp : CIR_Op<"cst", + [ConstantLike, Pure]> { + + let summary = "constant"; + let description = [{ + Constant operation turns a literal into an SSA value. The data is attached + to the operation as an attribute. For example: + + ```mlir + %0 = cir.cst 42 : i32 + %1 = cir.cst 4.2 : f32 + ``` + }]; + + // The constant operation takes an attribute as the only input. + let arguments = (ins TypedAttrInterface:$value); + + // The constant operation returns a single value of AnyType. + let results = (outs AnyType:$res); + + let assemblyFormat = "`(` $value `)` attr-dict `:` type($res)"; + + let hasVerifier = 1; + + // TODO: hasFolder, etc +} + //===----------------------------------------------------------------------===// // AllocaOp //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index e59de6f4dd84..98027363b2d0 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -38,6 +38,28 @@ void cir::CIRDialect::initialize() { >(); } +//===----------------------------------------------------------------------===// +// ConstantOp +//===----------------------------------------------------------------------===// + +LogicalResult ConstantOp::verify() { + auto opType = getType(); + auto val = getValue(); + auto valueType = val.getType(); + + // ODS already generates checks to make sure the result type is valid. We just + // need to additionally check that the value's attribute type is consistent + // with the result type. + if (val.isa()) { + if (valueType != opType) + return emitOpError("result type (") + << opType << ") does not match value type (" << valueType << ")"; + return success(); + } + + return emitOpError("cannot have value of type ") << valueType; +} + //===----------------------------------------------------------------------===// // ReturnOp //===----------------------------------------------------------------------===// From 2ef8b551b9b755a7233acaddd8983a38d56d6ba0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 15 Oct 2021 17:52:07 -0700 Subject: [PATCH 0036/2301] [CIR] Support for simple variable declaration and initialization - Add attributes for initialization style. - Add buildAutoVarDecl with scaffolding for var init and cleanup (TBD) - Cleanup alloca definition (some stuff might get re-introduced later) - Add constraints using tablegen and eliminate cpp verifier - Tests --- clang/lib/CIR/CIRBuilder.cpp | 754 ++++++++++++++++-- clang/test/CIR/CodeGen/basic.c | 16 +- clang/test/CIR/IR/cir-ops.cir | 25 +- mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 1 + mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 71 +- .../mlir/Dialect/CIR/IR/CMakeLists.txt | 5 + mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 50 +- mlir/lib/Dialect/CIR/IR/CMakeLists.txt | 1 + 8 files changed, 762 insertions(+), 161 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index cbe61e09ebb9..512a293bfe6c 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -148,7 +148,6 @@ class CIRBuildImpl { CIRBuildImpl &operator=(CIRBuildImpl &) = delete; ~CIRBuildImpl() = default; - // FIXME: instead of mlir::Value, hold a RawAddress here. using SymTableTy = llvm::ScopedHashTable; using SymTableScopeTy = ScopedHashTableScope; @@ -189,23 +188,21 @@ class CIRBuildImpl { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(const Decl *var, QualType T, mlir::Value value, - mlir::Location loc) { + mlir::LogicalResult declare(const Decl *var, QualType T, mlir::Location loc, + mlir::Value &addr, bool IsParam = false) { const auto *namedVar = dyn_cast_or_null(var); assert(namedVar && "Needs a named decl"); if (symbolTable.count(var)) return mlir::failure(); - // TODO: track "constant" auto localVarTy = getCIRType(T); auto localVarPtrTy = mlir::cir::PointerType::get(builder.getContext(), localVarTy); auto localVarAddr = builder.create( loc, /*addr type*/ localVarPtrTy, /*var type*/ localVarTy, - /*initial_value*/ mlir::UnitAttr::get(builder.getContext()), - /*constant*/ false); + IsParam ? InitStyle::paraminit : InitStyle::uninitialized); auto *parentBlock = localVarAddr->getBlock(); localVarAddr->moveBefore(&parentBlock->front()); @@ -213,6 +210,7 @@ class CIRBuildImpl { // Insert into the symbol table, allocate some stack space in the // function entry block. symbolTable.insert(var, localVarAddr); + addr = localVarAddr; return mlir::success(); } @@ -221,88 +219,183 @@ class CIRBuildImpl { mlir::ModuleOp getModule() { return theModule; } mlir::OpBuilder &getBuilder() { return builder; } - class ScalarExprEmitter : public StmtVisitor { - LLVM_ATTRIBUTE_UNUSED CIRCodeGenFunction &CGF; - CIRBuildImpl &Builder; + class RawAddress { + mlir::Value Pointer; + CharUnits Alignment; public: - ScalarExprEmitter(CIRCodeGenFunction &cgf, CIRBuildImpl &builder) - : CGF(cgf), Builder(builder) { - (void)CGF; + RawAddress(mlir::Value pointer, CharUnits alignment) + : Pointer(pointer), Alignment(alignment) { + assert((!alignment.isZero() || pointer == nullptr) && + "creating valid address with invalid alignment"); } - mlir::Value Visit(Expr *E) { - return StmtVisitor::Visit(E); + static RawAddress invalid() { return RawAddress(nullptr, CharUnits()); } + bool isValid() const { return Pointer != nullptr; } + + mlir::Value getPointer() const { + // assert(isValid()); + return Pointer; } - class RawAddress { - mlir::Value Pointer; - CharUnits Alignment; + /// Return the alignment of this pointer. + CharUnits getAlignment() const { + // assert(isValid()); + return Alignment; + } + }; - public: - RawAddress(mlir::Value pointer, CharUnits alignment) - : Pointer(pointer), Alignment(alignment) { - assert((!alignment.isZero() || pointer == nullptr) && - "creating valid address with invalid alignment"); - } + class LValue { + enum { + Simple, // This is a normal l-value, use getAddress(). + VectorElt, // This is a vector element l-value (V[i]), use getVector* + BitField, // This is a bitfield l-value, use getBitfield*. + ExtVectorElt, // This is an extended vector subset, use getExtVectorComp + GlobalReg, // This is a register l-value, use getGlobalReg() + MatrixElt // This is a matrix element, use getVector* + } LVType; + QualType Type; + + private: + void Initialize(CharUnits Alignment, QualType Type, + LValueBaseInfo BaseInfo) { + // assert((!Alignment.isZero()) && // || Type->isIncompleteType()) && + // "initializing l-value with zero alignment!"); + this->Type = Type; + // This flag shows if a nontemporal load/stores should be used when + // accessing this lvalue. + const unsigned MaxAlign = 1U << 31; + this->Alignment = Alignment.getQuantity() <= MaxAlign + ? Alignment.getQuantity() + : MaxAlign; + assert(this->Alignment == Alignment.getQuantity() && + "Alignment exceeds allowed max!"); + this->BaseInfo = BaseInfo; + } - static RawAddress invalid() { return RawAddress(nullptr, CharUnits()); } - bool isValid() const { return Pointer != nullptr; } + // The alignment to use when accessing this lvalue. (For vector elements, + // this is the alignment of the whole vector) + unsigned Alignment; + mlir::Value V; + LValueBaseInfo BaseInfo; - mlir::Value getPointer() const { - // assert(isValid()); - return Pointer; - } + public: + bool isSimple() const { return LVType == Simple; } + bool isVectorElt() const { return LVType == VectorElt; } + bool isBitField() const { return LVType == BitField; } + bool isExtVectorElt() const { return LVType == ExtVectorElt; } + bool isGlobalReg() const { return LVType == GlobalReg; } + bool isMatrixElt() const { return LVType == MatrixElt; } - /// Return the alignment of this pointer. - CharUnits getAlignment() const { - // assert(isValid()); - return Alignment; - } - }; - class LValue { - private: - void Initialize(CharUnits Alignment, LValueBaseInfo BaseInfo) { - // assert((!Alignment.isZero()) && // || Type->isIncompleteType()) && - // "initializing l-value with zero alignment!"); - - const unsigned MaxAlign = 1U << 31; - this->Alignment = Alignment.getQuantity() <= MaxAlign - ? Alignment.getQuantity() - : MaxAlign; - assert(this->Alignment == Alignment.getQuantity() && - "Alignment exceeds allowed max!"); - this->BaseInfo = BaseInfo; - } + QualType getType() const { return Type; } - // The alignment to use when accessing this lvalue. (For vector elements, - // this is the alignment of the whole vector) - unsigned Alignment; - mlir::Value V; - LValueBaseInfo BaseInfo; + mlir::Value getPointer() const { return V; } - public: - mlir::Value getPointer() const { return V; } + CharUnits getAlignment() const { + return CharUnits::fromQuantity(Alignment); + } - CharUnits getAlignment() const { - return CharUnits::fromQuantity(Alignment); - } + RawAddress getAddress() const { + return RawAddress(getPointer(), getAlignment()); + } - RawAddress getAddress() const { - return RawAddress(getPointer(), getAlignment()); - } + LValueBaseInfo getBaseInfo() const { return BaseInfo; } + void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } - LValueBaseInfo getBaseInfo() const { return BaseInfo; } - void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } + static LValue makeAddr(RawAddress address, QualType T, + AlignmentSource Source = AlignmentSource::Type) { + LValue R; + R.V = address.getPointer(); + R.Initialize(address.getAlignment(), T, LValueBaseInfo(Source)); + R.LVType = Simple; + return R; + } + }; - static LValue makeAddr(RawAddress address, - AlignmentSource Source = AlignmentSource::Type) { - LValue R; - R.V = address.getPointer(); - R.Initialize(address.getAlignment(), LValueBaseInfo(Source)); - return R; - } - }; + /// This trivial value class is used to represent the result of an + /// expression that is evaluated. It can be one of three things: either a + /// simple MLIR SSA value, a pair of SSA values for complex numbers, or the + /// address of an aggregate value in memory. + class RValue { + enum Flavor { Scalar, Complex, Aggregate }; + + // The shift to make to an aggregate's alignment to make it look + // like a pointer. + enum { AggAlignShift = 4 }; + + // Stores first value and flavor. + llvm::PointerIntPair V1; + // Stores second value and volatility. + llvm::PointerIntPair V2; + + public: + bool isScalar() const { return V1.getInt() == Scalar; } + bool isComplex() const { return V1.getInt() == Complex; } + bool isAggregate() const { return V1.getInt() == Aggregate; } + + bool isVolatileQualified() const { return V2.getInt(); } + + /// getScalarVal() - Return the Value* of this scalar value. + mlir::Value getScalarVal() const { + assert(isScalar() && "Not a scalar!"); + return V1.getPointer(); + } + + /// getComplexVal - Return the real/imag components of this complex value. + /// + std::pair getComplexVal() const { + assert(0 && "not implemented"); + return {}; + } + + /// getAggregateAddr() - Return the Value* of the address of the + /// aggregate. + RawAddress getAggregateAddress() const { + assert(0 && "not implemented"); + return RawAddress::invalid(); + } + + static RValue getIgnored() { + // FIXME: should we make this a more explicit state? + return get(nullptr); + } + + static RValue get(mlir::Value V) { + RValue ER; + ER.V1.setPointer(V); + ER.V1.setInt(Scalar); + ER.V2.setInt(false); + return ER; + } + static RValue getComplex(mlir::Value V1, mlir::Value V2) { + assert(0 && "not implemented"); + return RValue{}; + } + static RValue getComplex(const std::pair &C) { + assert(0 && "not implemented"); + return RValue{}; + } + // FIXME: Aggregate rvalues need to retain information about whether they + // are volatile or not. Remove default to find all places that probably + // get this wrong. + static RValue getAggregate(RawAddress addr, bool isVolatile = false) { + assert(0 && "not implemented"); + return RValue{}; + } + }; + class ScalarExprEmitter : public StmtVisitor { + LLVM_ATTRIBUTE_UNUSED CIRCodeGenFunction &CGF; + CIRBuildImpl &Builder; + + public: + ScalarExprEmitter(CIRCodeGenFunction &cgf, CIRBuildImpl &builder) + : CGF(cgf), Builder(builder) { + (void)CGF; + } + + mlir::Value Visit(Expr *E) { + return StmtVisitor::Visit(E); + } LValue EmitDeclRefLValue(const DeclRefExpr *E) { const NamedDecl *ND = E->getDecl(); @@ -325,7 +418,7 @@ class CIRBuildImpl { assert(V && "Name lookup must succeed"); LValue LV = LValue::makeAddr(RawAddress(V, CharUnits::fromQuantity(4)), - AlignmentSource::Decl); + VD->getType(), AlignmentSource::Decl); return LV; } @@ -342,7 +435,7 @@ class CIRBuildImpl { << E->getStmtClassName() << "'"; break; } - return LValue::makeAddr(RawAddress::invalid()); + return LValue::makeAddr(RawAddress::invalid(), E->getType()); } /// Emits the address of the l-value, then loads and returns the result. @@ -392,8 +485,500 @@ class CIRBuildImpl { // return llvm::UndefValue::get(CGF.ConvertType(E->getType())); return nullptr; } + + // Leaves. + mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { + mlir::Type Ty = Builder.getCIRType(E->getType()); + return Builder.builder.create( + Builder.getLoc(E->getExprLoc()), Ty, + Builder.builder.getIntegerAttr(Ty, E->getValue())); + } }; + struct AutoVarEmission { + const VarDecl *Variable; + /// The address of the alloca for languages with explicit address space + /// (e.g. OpenCL) or alloca casted to generic pointer for address space + /// agnostic languages (e.g. C++). Invalid if the variable was emitted + /// as a global constant. + RawAddress Addr; + + /// True if the variable is of aggregate type and has a constant + /// initializer. + bool IsConstantAggregate; + + struct Invalid {}; + AutoVarEmission(Invalid) : Variable(nullptr), Addr(RawAddress::invalid()) {} + + AutoVarEmission(const VarDecl &variable) + : Variable(&variable), Addr(RawAddress::invalid()), + IsConstantAggregate(false) {} + + static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } + /// Returns the raw, allocated address, which is not necessarily + /// the address of the object itself. It is casted to default + /// address space for address space agnostic languages. + RawAddress getAllocatedAddress() const { return Addr; } + }; + + /// Determine whether an object of this type can be emitted + /// as a constant. + /// + /// If ExcludeCtor is true, the duration when the object's constructor runs + /// will not be considered. The caller will need to verify that the object is + /// not written to during its construction. + /// FIXME: in LLVM codegen path this is part of CGM, which doesn't seem + /// like necessary, since (1) it doesn't use CGM at all and (2) is AST type + /// query specific. + bool isTypeConstant(QualType Ty, bool ExcludeCtor) { + if (!Ty.isConstant(astCtx) && !Ty->isReferenceType()) + return false; + + if (astCtx.getLangOpts().CPlusPlus) { + if (const CXXRecordDecl *Record = + astCtx.getBaseElementType(Ty)->getAsCXXRecordDecl()) + return ExcludeCtor && !Record->hasMutableFields() && + Record->hasTrivialDestructor(); + } + + return true; + } + + /// Emit the alloca and debug information for a + /// local variable. Does not emit initialization or destruction. + AutoVarEmission buildAutoVarAlloca(const VarDecl &D) { + QualType Ty = D.getType(); + // TODO: (|| Ty.getAddressSpace() == LangAS::opencl_private && + // getLangOpts().OpenCL)) + assert(Ty.getAddressSpace() == LangAS::Default); + + assert(!D.isEscapingByref() && "not implemented"); + assert(!Ty->isVariablyModifiedType() && "not implemented"); + assert(!astCtx.getLangOpts().OpenMP && // !CGM.getLangOpts().OpenMPIRBuilder + "not implemented"); + bool NRVO = astCtx.getLangOpts().ElideConstructors && D.isNRVOVariable(); + assert(!NRVO && "not implemented"); + assert(Ty->isConstantSizeType() && "not implemented"); + assert(!D.hasAttr() && "not implemented"); + + AutoVarEmission emission(D); + CharUnits alignment = astCtx.getDeclAlign(&D); + // TODO: debug info + // TODO: use CXXABI + + // If this value is an array or struct with a statically determinable + // constant initializer, there are optimizations we can do. + // + // TODO: We should constant-evaluate the initializer of any variable, + // as long as it is initialized by a constant expression. Currently, + // isConstantInitializer produces wrong answers for structs with + // reference or bitfield members, and a few other cases, and checking + // for POD-ness protects us from some of these. + if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) && + (D.isConstexpr() || + ((Ty.isPODType(astCtx) || + astCtx.getBaseElementType(Ty)->isObjCObjectPointerType()) && + D.getInit()->isConstantInitializer(astCtx, false)))) { + + // If the variable's a const type, and it's neither an NRVO + // candidate nor a __block variable and has no mutable members, + // emit it as a global instead. + // Exception is if a variable is located in non-constant address space + // in OpenCL. + // TODO: deal with CGM.getCodeGenOpts().MergeAllConstants + // TODO: perhaps we don't need this at all at CIR since this can + // be done as part of lowering down to LLVM. + if ((!astCtx.getLangOpts().OpenCL || + Ty.getAddressSpace() == LangAS::opencl_constant) && + (!NRVO && !D.isEscapingByref() && isTypeConstant(Ty, true))) + assert(0 && "not implemented"); + + // Otherwise, tell the initialization code that we're in this case. + emission.IsConstantAggregate = true; + } + + // TODO: track source location range... + mlir::Value addr; + if (failed(declare(&D, Ty, getLoc(D.getSourceRange().getBegin()), addr))) { + theModule.emitError("Cannot declare variable"); + return emission; + } + + // TODO: what about emitting lifetime markers for MSVC catch parameters? + // TODO: something like @llvm.lifetime.start/end here? revisit this later. + emission.Addr = RawAddress{addr, alignment}; + return emission; + } + + /// Determine whether the given initializer is trivial in the sense + /// that it requires no code to be generated. + bool isTrivialInitializer(const Expr *Init) { + if (!Init) + return true; + + if (const CXXConstructExpr *Construct = dyn_cast(Init)) + if (CXXConstructorDecl *Constructor = Construct->getConstructor()) + if (Constructor->isTrivial() && Constructor->isDefaultConstructor() && + !Construct->requiresZeroInitialization()) + return true; + + return false; + } + + // TODO: this can also be abstrated into common AST helpers + bool hasBooleanRepresentation(QualType Ty) { + if (Ty->isBooleanType()) + return true; + + if (const EnumType *ET = Ty->getAs()) + return ET->getDecl()->getIntegerType()->isBooleanType(); + + if (const AtomicType *AT = Ty->getAs()) + return hasBooleanRepresentation(AT->getValueType()); + + return false; + } + + mlir::Value buildToMemory(mlir::Value Value, QualType Ty) { + // Bool has a different representation in memory than in registers. + if (hasBooleanRepresentation(Ty)) + assert(0 && "not implemented"); + return Value; + } + + void buildStoreOfScalar(mlir::Value value, LValue lvalue, const Decl *D, + bool isInit) { + // TODO: constant matrix type, volatile, non temporal, TBAA + buildStoreOfScalar(value, lvalue.getAddress(), false, lvalue.getType(), + lvalue.getBaseInfo(), D, isInit, false); + } + + void buildStoreOfScalar(mlir::Value Value, RawAddress Addr, bool Volatile, + QualType Ty, LValueBaseInfo BaseInfo, const Decl *D, + bool isInit, bool isNontemporal) { + // TODO: PreserveVec3Type + // TODO: LValueIsSuitableForInlineAtomic ? + // TODO: TBAA + Value = buildToMemory(Value, Ty); + if (Ty->isAtomicType() || isNontemporal) { + assert(0 && "not implemented"); + } + + // Update the alloca with more info on initialization. + auto SrcAlloca = dyn_cast_or_null( + Addr.getPointer().getDefiningOp()); + if (isInit) { + InitStyle IS; + const VarDecl *VD = dyn_cast_or_null(D); + assert(VD && "VarDecl expected"); + if (VD->hasInit()) { + switch (VD->getInitStyle()) { + case VarDecl::ParenListInit: + llvm_unreachable("NYI"); + case VarDecl::CInit: + IS = InitStyle::cinit; + break; + case VarDecl::CallInit: + IS = InitStyle::callinit; + break; + case VarDecl::ListInit: + IS = InitStyle::listinit; + break; + } + SrcAlloca.setInitAttr(InitStyleAttr::get(builder.getContext(), IS)); + } + } + assert(SrcAlloca && "find a better way to retrieve source location"); + builder.create(SrcAlloca.getLoc(), Value, + Addr.getPointer()); + } + + /// Store the specified rvalue into the specified + /// lvalue, where both are guaranteed to the have the same type, and that type + /// is 'Ty'. + void buldStoreThroughLValue(RValue Src, LValue Dst, const Decl *D, + bool isInit) { + assert(Dst.isSimple() && "only implemented simple"); + // TODO: ObjC lifetime. + assert(Src.isScalar() && "Can't emit an agg store with this method"); + buildStoreOfScalar(Src.getScalarVal(), Dst, D, isInit); + } + + void buildScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue) { + // TODO: this is where a lot of ObjC lifetime stuff would be done. + mlir::Value value = buildScalarExpr(init); + buldStoreThroughLValue(RValue::get(value), lvalue, D, true); + return; + } + + /// Emit an expression as an initializer for an object (variable, field, etc.) + /// at the given location. The expression is not necessarily the normal + /// initializer for the object, and the address is not necessarily + /// its normal location. + /// + /// \param init the initializing expression + /// \param D the object to act as if we're initializing + /// \param lvalue the lvalue to initialize + void buildExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue) { + QualType type = D->getType(); + + if (type->isReferenceType()) { + assert(0 && "not implemented"); + return; + } + switch (CIRCodeGenFunction::getEvaluationKind(type)) { + case TEK_Scalar: + buildScalarInit(init, D, lvalue); + return; + case TEK_Complex: { + assert(0 && "not implemented"); + return; + } + case TEK_Aggregate: + assert(0 && "not implemented"); + return; + } + llvm_unreachable("bad evaluation kind"); + } + + void buildAutoVarInit(const AutoVarEmission &emission) { + assert(emission.Variable && "emission was not valid!"); + + const VarDecl &D = *emission.Variable; + QualType type = D.getType(); + + // If this local has an initializer, emit it now. + const Expr *Init = D.getInit(); + + // TODO: in LLVM codegen if we are at an unreachable point, the initializer + // isn't emitted unless it contains a label. What we want for CIR? + assert(builder.getInsertionBlock()); + + // Initialize the variable here if it doesn't have a initializer and it is a + // C struct that is non-trivial to initialize or an array containing such a + // struct. + if (!Init && type.isNonTrivialToPrimitiveDefaultInitialize() == + QualType::PDIK_Struct) { + assert(0 && "not implemented"); + return; + } + + const RawAddress Loc = emission.Addr; + + // Note: constexpr already initializes everything correctly. + LangOptions::TrivialAutoVarInitKind trivialAutoVarInit = + (D.isConstexpr() + ? LangOptions::TrivialAutoVarInitKind::Uninitialized + : (D.getAttr() + ? LangOptions::TrivialAutoVarInitKind::Uninitialized + : astCtx.getLangOpts().getTrivialAutoVarInit())); + + auto initializeWhatIsTechnicallyUninitialized = [&](RawAddress Loc) { + if (trivialAutoVarInit == + LangOptions::TrivialAutoVarInitKind::Uninitialized) + return; + + assert(0 && "unimplemented"); + }; + + if (isTrivialInitializer(Init)) + return initializeWhatIsTechnicallyUninitialized(Loc); + + if (emission.IsConstantAggregate || + D.mightBeUsableInConstantExpressions(astCtx)) { + assert(0 && "not implemented"); + } + + initializeWhatIsTechnicallyUninitialized(Loc); + LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); + return buildExprAsInit(Init, &D, lv); + } + + void buildAutoVarCleanups(const AutoVarEmission &emission) { + assert(emission.Variable && "emission was not valid!"); + + // TODO: in LLVM codegen if we are at an unreachable point codgen + // is ignored. What we want for CIR? + assert(builder.getInsertionBlock()); + const VarDecl &D = *emission.Variable; + + // Check the type for a cleanup. + // TODO: something like emitAutoVarTypeCleanup + if (QualType::DestructionKind dtorKind = D.needsDestruction(astCtx)) + assert(0 && "not implemented"); + + // In GC mode, honor objc_precise_lifetime. + if (astCtx.getLangOpts().getGC() != LangOptions::NonGC && + D.hasAttr()) + assert(0 && "not implemented"); + + // Handle the cleanup attribute. + if (const CleanupAttr *CA = D.getAttr()) + assert(0 && "not implemented"); + + // TODO: handle block variable + } + + /// Emit code and set up symbol table for a variable declaration with auto, + /// register, or no storage class specifier. These turn into simple stack + /// objects, globals depending on target. + void buildAutoVarDecl(const VarDecl &D) { + AutoVarEmission emission = buildAutoVarAlloca(D); + buildAutoVarInit(emission); + buildAutoVarCleanups(emission); + } + + /// This method handles emission of any variable declaration + /// inside a function, including static vars etc. + void buildVarDecl(const VarDecl &D) { + if (D.hasExternalStorage()) { + assert(0 && "should we just returns is there something to track?"); + // Don't emit it now, allow it to be emitted lazily on its first use. + return; + } + + // Some function-scope variable does not have static storage but still + // needs to be emitted like a static variable, e.g. a function-scope + // variable in constant address space in OpenCL. + if (D.getStorageDuration() != SD_Automatic) + assert(0 && "not implemented"); + + if (D.getType().getAddressSpace() == LangAS::opencl_local) + assert(0 && "not implemented"); + + assert(D.hasLocalStorage()); + return buildAutoVarDecl(D); + } + + void buildDecl(const Decl &D) { + switch (D.getKind()) { + case Decl::TopLevelStmt: + case Decl::ImplicitConceptSpecialization: + case Decl::HLSLBuffer: + case Decl::UnnamedGlobalConstant: + llvm_unreachable("NYI"); + case Decl::BuiltinTemplate: + case Decl::TranslationUnit: + case Decl::ExternCContext: + case Decl::Namespace: + case Decl::UnresolvedUsingTypename: + case Decl::ClassTemplateSpecialization: + case Decl::ClassTemplatePartialSpecialization: + case Decl::VarTemplateSpecialization: + case Decl::VarTemplatePartialSpecialization: + case Decl::TemplateTypeParm: + case Decl::UnresolvedUsingValue: + case Decl::NonTypeTemplateParm: + case Decl::CXXDeductionGuide: + case Decl::CXXMethod: + case Decl::CXXConstructor: + case Decl::CXXDestructor: + case Decl::CXXConversion: + case Decl::Field: + case Decl::MSProperty: + case Decl::IndirectField: + case Decl::ObjCIvar: + case Decl::ObjCAtDefsField: + case Decl::ParmVar: + case Decl::ImplicitParam: + case Decl::ClassTemplate: + case Decl::VarTemplate: + case Decl::FunctionTemplate: + case Decl::TypeAliasTemplate: + case Decl::TemplateTemplateParm: + case Decl::ObjCMethod: + case Decl::ObjCCategory: + case Decl::ObjCProtocol: + case Decl::ObjCInterface: + case Decl::ObjCCategoryImpl: + case Decl::ObjCImplementation: + case Decl::ObjCProperty: + case Decl::ObjCCompatibleAlias: + case Decl::PragmaComment: + case Decl::PragmaDetectMismatch: + case Decl::AccessSpec: + case Decl::LinkageSpec: + case Decl::Export: + case Decl::ObjCPropertyImpl: + case Decl::FileScopeAsm: + case Decl::Friend: + case Decl::FriendTemplate: + case Decl::Block: + case Decl::Captured: + case Decl::UsingShadow: + case Decl::ConstructorUsingShadow: + case Decl::ObjCTypeParam: + case Decl::Binding: + case Decl::UnresolvedUsingIfExists: + llvm_unreachable("Declaration should not be in declstmts!"); + case Decl::Record: // struct/union/class X; + case Decl::CXXRecord: // struct/union/class X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::Enum: // enum X; + assert(0 && "Not implemented"); + return; + case Decl::Function: // void X(); + case Decl::EnumConstant: // enum ? { X = ? } + case Decl::StaticAssert: // static_assert(X, ""); [C++0x] + case Decl::Label: // __label__ x; + case Decl::Import: + case Decl::MSGuid: // __declspec(uuid("...")) + case Decl::TemplateParamObject: + case Decl::OMPThreadPrivate: + case Decl::OMPAllocate: + case Decl::OMPCapturedExpr: + case Decl::OMPRequires: + case Decl::Empty: + case Decl::Concept: + case Decl::LifetimeExtendedTemporary: + case Decl::RequiresExprBody: + // None of these decls require codegen support. + return; + + case Decl::NamespaceAlias: + assert(0 && "Not implemented"); + return; + case Decl::Using: // using X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::UsingEnum: // using enum X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::UsingPack: + assert(0 && "Not implemented"); + return; + case Decl::UsingDirective: // using namespace X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::Var: + case Decl::Decomposition: { + const VarDecl &VD = cast(D); + assert(VD.isLocalVarDecl() && + "Should not see file-scope variables inside a function!"); + buildVarDecl(VD); + if (auto *DD = dyn_cast(&VD)) + assert(0 && "Not implemented"); + + // FIXME: add this + // if (auto *DD = dyn_cast(&VD)) + // for (auto *B : DD->bindings()) + // if (auto *HD = B->getHoldingVar()) + // EmitVarDecl(*HD); + return; + } + + case Decl::OMPDeclareReduction: + case Decl::OMPDeclareMapper: + assert(0 && "Not implemented"); + + case Decl::Typedef: // typedef int X; + case Decl::TypeAlias: { // using X = int; [C++0x] + assert(0 && "Not implemented"); + } + } + } + /// Emit the computation of the specified expression of scalar type, /// ignoring the result. mlir::Value buildScalarExpr(const Expr *E) { @@ -438,6 +1023,18 @@ class CIRBuildImpl { return mlir::success(); } + mlir::LogicalResult buildDeclStmt(const DeclStmt &S) { + if (!builder.getInsertionBlock()) + theModule.emitError( + "Seems like this is unreachable code, what should we do?"); + + for (const auto *I : S.decls()) { + buildDecl(*I); + } + + return mlir::success(); + } + mlir::LogicalResult buildCompoundStmt(const CompoundStmt &S) { // Create a scope in the symbol table to hold variable declarations local // to this compound statement. @@ -455,6 +1052,8 @@ class CIRBuildImpl { llvm::errs() << "CIR codegen for '" << S->getStmtClassName() << "' not implemented\n"; return mlir::failure(); + case Stmt::DeclStmtClass: + return buildDeclStmt(cast(*S)); case Stmt::CompoundStmtClass: return buildCompoundStmt(cast(*S)); case Stmt::ReturnStmtClass: @@ -504,12 +1103,13 @@ class CIRBuildImpl { llvm::zip(FD->parameters(), entryBlock.getArguments())) { auto *paramVar = std::get<0>(nameValue); auto paramVal = std::get<1>(nameValue); - if (failed(declare(paramVar, paramVar->getType(), paramVal, - getLoc(paramVar->getSourceRange().getBegin())))) + mlir::Value addr; + if (failed(declare(paramVar, paramVar->getType(), + getLoc(paramVar->getSourceRange().getBegin()), addr, + true /*param*/))) return nullptr; // Store params in local storage. FIXME: is this really needed // at this level of representation? - mlir::Value addr = symbolTable.lookup(paramVar); builder.create(loc, paramVal, addr); } diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 7dcf3205e763..7a6a30c085d4 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * int foo(int i) { return i; @@ -8,7 +7,7 @@ int foo(int i) { // CHECK: module { // CHECK-NEXT: func @foo(%arg0: i32) -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32 = uninitialized, cir.ptr +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [paraminit] // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %1 : i32 @@ -17,4 +16,15 @@ int foo(int i) { int f2() { return 3; } // CHECK: func @f2() -> i32 { -// CHECK-NEXT: cir.return +// CHECK-NEXT: %0 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: cir.return %0 : i32 + +int f3() { + int i = 3; + return i; +} + +// CHECK: func @f3() -> i32 { +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [cinit] +// CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 56727b002355..0873fb033079 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -1,20 +1,37 @@ -// Test the CIR operations can parse and print correctly +// Test the CIR operations can parse and print correctly (roundtrip) // RUN: cir-tool %s | cir-tool | FileCheck %s module { func.func @foo(%arg0: i32) -> i32 { - %0 = cir.alloca i32 = uninitialized, cir.ptr + %0 = cir.alloca i32, cir.ptr , [paraminit] cir.store %arg0, %0 : i32, cir.ptr %1 = cir.load %0 : cir.ptr , i32 cir.return %1 : i32 } + + func.func @f3() -> i32 { + %0 = cir.alloca i32, cir.ptr , [cinit] + %1 = cir.cst(3 : i32) : i32 + cir.store %1, %0 : i32, cir.ptr + %2 = cir.load %0 : cir.ptr , i32 + cir.return %2 : i32 + } } // CHECK: module { // CHECK-NEXT: func.func @foo(%arg0: i32) -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32 = uninitialized, cir.ptr +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [paraminit] // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %1 : i32 // CHECK-NEXT: } -// CHECK-NEXT: } + +// CHECK-NEXT: func.func @f3() -> i32 { +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [cinit] +// CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: cir.return %2 : i32 +// CHECK-NEXT: } + +// CHECK: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index c7746f8801e8..373a845bf854 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -27,6 +27,7 @@ using FuncOp = func::FuncOp; } // namespace mlir #include "mlir/Dialect/CIR/IR/CIROpsDialect.h.inc" +#include "mlir/Dialect/CIR/IR/CIROpsEnums.h.inc" #include "mlir/Dialect/CIR/IR/CIRTypes.h" #define GET_OP_CLASSES diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index ad53fc6e1861..c3873aaa9a34 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -16,10 +16,10 @@ include "mlir/Dialect/CIR/IR/CIRDialect.td" include "mlir/Dialect/CIR/IR/CIRTypes.td" +include "mlir/IR/EnumAttr.td" include "mlir/IR/SymbolInterfaces.td" include "mlir/IR/BuiltinAttributeInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" -include "mlir/IR/SymbolInterfaces.td" //===----------------------------------------------------------------------===// // CIR Ops @@ -62,54 +62,67 @@ def ConstantOp : CIR_Op<"cst", // AllocaOp //===----------------------------------------------------------------------===// +def InitStyle_None : I32EnumAttrCase<"uninitialized", 1>; +def InitStyle_ParamInit : I32EnumAttrCase<"paraminit", 2>; +def InitStyle_CInit : I32EnumAttrCase<"cinit", 3>; +def InitStyle_CallInit : I32EnumAttrCase<"callinit", 4>; +def InitStyle_ListInit : I32EnumAttrCase<"listinit", 5>; + +def InitStyle : I32EnumAttr< + "InitStyle", + "variable initialization style", + [InitStyle_None, InitStyle_ParamInit, + InitStyle_CInit, InitStyle_CallInit, + InitStyle_ListInit]> { + let cppNamespace = "::mlir::cir"; +} + +class AllocaTypesMatchWith + : PredOpTrait> { + string lhs = lhsArg; + string rhs = rhsArg; + string transformer = transform; +} + // FIXME: add alignment, bool attr on being param, automatic scope?. -def AllocaOp : CIR_Op<"alloca", []> { - let summary = "define a local variable"; +def AllocaOp : CIR_Op<"alloca", [ + AllocaTypesMatchWith<"'type' matches pointee type of 'addr'", + "addr", "type", + "$_self.cast().getPointee()">]> { + let summary = "local variable"; let description = [{ - The `cir.alloca` operation defines a local variable. The `initial_value` - can either be a unit attribute to represent a definition of an uninitialized - local variable, or constant to represent the definition of a - variable with an initial value. It can also be marked constant using the - `constant` unit attribute. + The `cir.alloca` operation defines a local variable. + + Possible initialization styles are: uninitialized, paraminit, + callinit, cinit and listinit. The result is a pointer type for the original input type. Example: ```mlir - // Local variable with an initial value. - %0 = cir.alloca i32 = 1, !cir.ptr - - // Uninitialized local variable. - %0 = cir.alloca f32 = uninitialized, !cir.ptr - - // Constant local variable. - %0 = cir.alloca constant i8 = 3, !cir.ptr + // Local variable with uninitialized value. + %0 = cir.alloca i32, !cir.ptr, [cinit] ``` }]; let arguments = (ins - TypeAttr:$type, - AnyAttr:$initial_value, - UnitAttr:$constant + TypeAttr:$type, + Arg:$init ); let results = (outs Res]>:$addr); let assemblyFormat = [{ - (`constant` $constant^)? - custom($type, $initial_value) - attr-dict `,` `cir.ptr` type($addr) - }]; - - let extraClassDeclaration = [{ - bool isUninitialized() { - return getInitialValue().isa(); - } + $type `,` `cir.ptr` type($addr) `,` `[` $init `]` attr-dict }]; - let hasVerifier = 1; + let hasVerifier = 0; } //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt index 8c2c20a31f9c..cb39458cadd1 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt @@ -1,2 +1,7 @@ add_mlir_dialect(CIROps cir) add_mlir_doc(CIROps CIROps Dialects/ -gen-op-doc) + +set(LLVM_TARGET_DEFINITIONS CIROps.td) +mlir_tablegen(CIROpsEnums.h.inc -gen-enum-decls) +mlir_tablegen(CIROpsEnums.cpp.inc -gen-enum-defs) +add_public_tablegen_target(MLIRCIREnumsGen) \ No newline at end of file diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 98027363b2d0..ef4413a8d6e1 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -22,6 +22,8 @@ using namespace mlir; using namespace mlir::cir; +#include "mlir/Dialect/CIR/IR/CIROpsEnums.cpp.inc" + #include "mlir/Dialect/CIR/IR/CIROpsDialect.cpp.inc" //===----------------------------------------------------------------------===// @@ -96,54 +98,6 @@ mlir::LogicalResult ReturnOp::verify() { << ")"; } -//===----------------------------------------------------------------------===// -// AllocaOp -//===----------------------------------------------------------------------===// - -static void printAllocaOpTypeAndInitialValue(OpAsmPrinter &p, AllocaOp op, - TypeAttr type, - Attribute initialValue) { - p << type; - p << " = "; - if (op.isUninitialized()) - p << "uninitialized"; - else - p.printAttributeWithoutType(initialValue); -} - -static ParseResult parseAllocaOpTypeAndInitialValue(OpAsmParser &parser, - TypeAttr &typeAttr, - Attribute &initialValue) { - Type type; - if (parser.parseType(type)) - return failure(); - typeAttr = TypeAttr::get(type); - - if (parser.parseEqual()) - return success(); - - if (succeeded(parser.parseOptionalKeyword("uninitialized"))) - initialValue = UnitAttr::get(parser.getBuilder().getContext()); - - if (!initialValue.isa()) - return parser.emitError(parser.getNameLoc()) - << "constant operation not implemented yet"; - - return success(); -} - -LogicalResult AllocaOp::verify() { - // Verify that the initial value, is either a unit attribute or - // an elements attribute. - Attribute initValue = getInitialValue(); - if (!initValue.isa()) - return emitOpError("initial value should be a unit " - "attribute, but got ") - << initValue; - - return success(); -} - //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt index 63e1b234b90a..d6787e798d18 100644 --- a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt @@ -7,6 +7,7 @@ add_mlir_dialect_library(MLIRCIR DEPENDS MLIRCIROpsIncGen + MLIRCIREnumsGen MLIRSymbolInterfacesIncGen LINK_LIBS PUBLIC From 7416be61ce55d40c41952f515c3a403f14e5f646 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 22 Oct 2021 17:35:41 -0700 Subject: [PATCH 0037/2301] [CIR] Add lvalue_to_rvalue attribute on loads Still a bit unclear if this is the best place for this attribute to sit, but until we get more clear knowledge about how the other conversions will play this is a good start. --- clang/lib/CIR/CIRBuilder.cpp | 2 +- clang/test/CIR/CodeGen/basic.c | 2 +- clang/test/CIR/IR/cir-ops.cir | 4 ++-- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 9 ++++++--- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 512a293bfe6c..80cae3f5e656 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -443,7 +443,7 @@ class CIRBuildImpl { LValue LV = EmitLValue(E); auto load = Builder.builder.create( Builder.getLoc(E->getExprLoc()), Builder.getCIRType(E->getType()), - LV.getPointer()); + LV.getPointer(), mlir::UnitAttr::get(Builder.builder.getContext())); // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); return load; } diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 7a6a30c085d4..60a07642687d 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -9,7 +9,7 @@ int foo(int i) { // CHECK-NEXT: func @foo(%arg0: i32) -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [paraminit] // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: %1 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 // CHECK-NEXT: cir.return %1 : i32 // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 0873fb033079..6ba90aa9c4e0 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -13,7 +13,7 @@ module { %0 = cir.alloca i32, cir.ptr , [cinit] %1 = cir.cst(3 : i32) : i32 cir.store %1, %0 : i32, cir.ptr - %2 = cir.load %0 : cir.ptr , i32 + %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 cir.return %2 : i32 } } @@ -30,7 +30,7 @@ module { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [cinit] // CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 // CHECK-NEXT: cir.return %2 : i32 // CHECK-NEXT: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index c3873aaa9a34..7a2db498646b 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -112,6 +112,7 @@ def AllocaOp : CIR_Op<"alloca", [ let arguments = (ins TypeAttr:$type, + // FIXME: add "uninitialzed" as default mode Arg:$init ); @@ -148,11 +149,13 @@ def LoadOp : CIR_Op<"load", [ }]; let arguments = (ins Arg:$addr); + [MemRead]>:$addr, + UnitAttr:$conv); let results = (outs AnyType:$result); - let assemblyFormat = - "$addr attr-dict `:` `cir.ptr` type($addr) `,` type($result)"; + let assemblyFormat = [{ + $addr (`lvalue_to_rvalue` $conv^)? attr-dict `:` `cir.ptr` type($addr) `,` type($result) + }]; } //===----------------------------------------------------------------------===// From 3a7906859eec68b6c8fc607a19311e10c1e28dcd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 25 Oct 2021 17:21:49 -0700 Subject: [PATCH 0038/2301] [CIR] Fix asserts and make them actually work --- clang/lib/CIR/CIRGenTypes.cpp | 55 ++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index def51ecce9bb..523390a3bc89 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -17,7 +17,7 @@ CIRGenTypes::CIRGenTypes(ASTContext &Ctx, mlir::OpBuilder &B) : Context(Ctx), Builder(B) {} CIRGenTypes::~CIRGenTypes() = default; -/// ConvertType - Convert the specified type to its LLVM form. +/// ConvertType - Convert the specified type to its MLIR form. mlir::Type CIRGenTypes::ConvertType(QualType T) { T = Context.getCanonicalType(T); const Type *Ty = T.getTypePtr(); @@ -62,7 +62,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::ObjCClass: case BuiltinType::ObjCSel: // FIXME: if we emit like LLVM we probably wanna use i8. - assert("not implemented"); + assert(0 && "not implemented"); break; case BuiltinType::Bool: @@ -121,7 +121,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { break; case BuiltinType::Half: // Should be the same as above? - assert("not implemented"); + assert(0 && "not implemented"); break; case BuiltinType::BFloat16: ResultType = Builder.getBF16Type(); @@ -137,18 +137,18 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::Ibm128: // FIXME: look at Context.getFloatTypeSemantics(T) and getTypeForFormat // on LLVM codegen. - assert("not implemented"); + assert(0 && "not implemented"); break; case BuiltinType::NullPtr: // Model std::nullptr_t as i8* // ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); - assert("not implemented"); + assert(0 && "not implemented"); break; case BuiltinType::UInt128: case BuiltinType::Int128: - assert("not implemented"); + assert(0 && "not implemented"); // FIXME: ResultType = Builder.getIntegerType(128); break; @@ -162,7 +162,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::OCLClkEvent: case BuiltinType::OCLQueue: case BuiltinType::OCLReserveID: - assert("not implemented"); + assert(0 && "not implemented"); break; case BuiltinType::SveInt8: case BuiltinType::SveUint8: @@ -213,18 +213,18 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::SveBFloat16x2: case BuiltinType::SveBFloat16x3: case BuiltinType::SveBFloat16x4: { - assert("not implemented"); + assert(0 && "not implemented"); break; } #define PPC_VECTOR_TYPE(Name, Id, Size) \ case BuiltinType::Id: \ - assert("not implemented"); \ + assert(0 && "not implemented"); \ break; #include "clang/Basic/PPCTypes.def" #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/RISCVVTypes.def" { - assert("not implemented"); + assert(0 && "not implemented"); break; } case BuiltinType::Dependent: @@ -239,12 +239,12 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case Type::DeducedTemplateSpecialization: llvm_unreachable("Unexpected undeduced type!"); case Type::Complex: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::LValueReference: case Type::RValueReference: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::Pointer: { @@ -261,73 +261,74 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { // FIXME: add address specifier to cir::PointerType? ResultType = ::mlir::cir::PointerType::get(Builder.getContext(), PointeeType); + assert(ResultType && "Cannot get pointer type?"); break; } case Type::VariableArray: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::IncompleteArray: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::ConstantArray: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::ExtVector: case Type::Vector: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::ConstantMatrix: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::FunctionNoProto: case Type::FunctionProto: - assert("not implemented"); + assert(0 && "not implemented"); break; case Type::ObjCObject: - assert("not implemented"); + assert(0 && "not implemented"); break; case Type::ObjCInterface: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::ObjCObjectPointer: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::Enum: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::BlockPointer: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::MemberPointer: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::Atomic: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::Pipe: { - assert("not implemented"); + assert(0 && "not implemented"); break; } case Type::BitInt: { - assert("not implemented"); + assert(0 && "not implemented"); break; } } From 2f56be774359649649b5397b14e74ad6c43473ef Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 26 Aug 2022 15:17:17 -0700 Subject: [PATCH 0039/2301] [CIR] Introduce NullAttr This is more-or-less just a typed version of UnitAttr. We previously used `UnitAttr` to represent the case of passing nullptr to ConstantOp, but the removal of the mlir::Type from mlir::Attribute broke this as ConstantOp's value requires a type. --- mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h | 28 +++++++++ mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td | 40 ++++++++++++ .../include/mlir/Dialect/CIR/IR/CIRDialect.td | 7 +++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 2 + .../mlir/Dialect/CIR/IR/CMakeLists.txt | 7 ++- mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp | 61 +++++++++++++++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 2 + mlir/lib/Dialect/CIR/IR/CIRTypes.cpp | 2 - mlir/lib/Dialect/CIR/IR/CMakeLists.txt | 2 + 9 files changed, 148 insertions(+), 3 deletions(-) create mode 100644 mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h create mode 100644 mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td create mode 100644 mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h new file mode 100644 index 000000000000..a9e098edbfc8 --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h @@ -0,0 +1,28 @@ +//===- CIRAttrs.h - MLIR CIR Attrs ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the attributes in the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_CIR_IR_CIRATTRS_H_ +#define MLIR_DIALECT_CIR_IR_CIRATTRS_H_ + +#include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" + +//===----------------------------------------------------------------------===// +// CIR Dialect Attrs +//===----------------------------------------------------------------------===// + +#define GET_ATTRDEF_CLASSES +#include "mlir/Dialect/CIR/IR/CIROpsAttributes.h.inc" + +#endif // MLIR_DIALECT_CIR_IR_CIRATTRS_H_ + diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td new file mode 100644 index 000000000000..cc8e72b50902 --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td @@ -0,0 +1,40 @@ +//===- CIRAttrs.td - CIR dialect types ---------------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the CIR dialect attributes. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_DIALECT_CIR_ATTRS +#define MLIR_CIR_DIALECT_CIR_ATTRS + +include "mlir/IR/BuiltinAttributeInterfaces.td" +include "mlir/Dialect/CIR/IR/CIRDialect.td" +include "mlir/Dialect/CIR/IR/CIRTypes.td" + +//===----------------------------------------------------------------------===// +// CIR Attrs +//===----------------------------------------------------------------------===// + +class CIR_Attr traits = []> + : AttrDef { + let mnemonic = attrMnemonic; +} + +def NullAttr : CIR_Attr<"Null", "null", [TypedAttrInterface]> { + let summary = "A simple attr to represent nullptr"; + let description = [{ + The NullAttr represents the value of nullptr within cir. + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type); + + let assemblyFormat = [{}]; +} + +#endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.td b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.td index 899ab712649e..8f756fa422e5 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.td @@ -28,11 +28,18 @@ def CIR_Dialect : Dialect { let useDefaultTypePrinterParser = 0; let extraClassDeclaration = [{ + void registerAttributes(); void registerTypes(); ::mlir::Type parseType(::mlir::DialectAsmParser &parser) const override; void printType(::mlir::Type type, ::mlir::DialectAsmPrinter &printer) const override; + + ::mlir::Attribute parseAttribute(::mlir::DialectAsmParser &parser, + ::mlir::Type type) const override; + + void printAttribute(::mlir::Attribute attr, + ::mlir::DialectAsmPrinter &os) const override; }]; } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 7a2db498646b..90d8e6729d7c 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -16,6 +16,8 @@ include "mlir/Dialect/CIR/IR/CIRDialect.td" include "mlir/Dialect/CIR/IR/CIRTypes.td" +include "mlir/Dialect/CIR/IR/CIRAttrs.td" + include "mlir/IR/EnumAttr.td" include "mlir/IR/SymbolInterfaces.td" include "mlir/IR/BuiltinAttributeInterfaces.td" diff --git a/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt index cb39458cadd1..a31abcdcfea4 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt @@ -1,7 +1,12 @@ add_mlir_dialect(CIROps cir) +add_mlir_doc(CIRDialect CIRDialect Dialects/ -gen-dialect-doc) add_mlir_doc(CIROps CIROps Dialects/ -gen-op-doc) +add_mlir_doc(CIRAttrs CIRAttrs Dialects/ -gen-attrdef-doc) +add_mlir_doc(CIRTypes CIRTypes Dialects/ -gen-typedef-doc) set(LLVM_TARGET_DEFINITIONS CIROps.td) mlir_tablegen(CIROpsEnums.h.inc -gen-enum-decls) mlir_tablegen(CIROpsEnums.cpp.inc -gen-enum-defs) -add_public_tablegen_target(MLIRCIREnumsGen) \ No newline at end of file +mlir_tablegen(CIROpsAttributes.h.inc -gen-attrdef-decls) +mlir_tablegen(CIROpsAttributes.cpp.inc -gen-attrdef-defs) +add_public_tablegen_target(MLIRCIREnumsGen) diff --git a/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp b/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp new file mode 100644 index 000000000000..bab679bafc67 --- /dev/null +++ b/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp @@ -0,0 +1,61 @@ +//===- CIRTypes.cpp - MLIR CIR Types --------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the types in the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/CIR/IR/CIRAttrs.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/DialectImplementation.h" + +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/TypeSwitch.h" + +#define GET_ATTRDEF_CLASSES +#include "mlir/Dialect/CIR/IR/CIROpsAttributes.cpp.inc" + +using namespace mlir; +using namespace mlir::cir; + +//===----------------------------------------------------------------------===// +// General CIR parsing / printing +//===----------------------------------------------------------------------===// + +Attribute CIRDialect::parseAttribute(DialectAsmParser &parser, + Type type) const { + llvm::SMLoc typeLoc = parser.getCurrentLocation(); + StringRef mnemonic; + Attribute genAttr; + OptionalParseResult parseResult = + generatedAttributeParser(parser, &mnemonic, type, genAttr); + if (parseResult.has_value()) + return genAttr; + parser.emitError(typeLoc, "unknown attribute in CIR dialect"); + return Attribute(); +} + +void CIRDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const { + if (failed(generatedAttributePrinter(attr, os))) + llvm_unreachable("unexpected CIR type kind"); +} + +//===----------------------------------------------------------------------===// +// CIR Dialect +//===----------------------------------------------------------------------===// + +void CIRDialect::registerAttributes() { + addAttributes< +#define GET_ATTRDEF_LIST +#include "mlir/Dialect/CIR/IR/CIROpsAttributes.cpp.inc" + >(); +} diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index ef4413a8d6e1..59e459365f8d 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/CIR/IR/CIRAttrs.h" #include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/Dialect/Func/IR/FuncOps.h" @@ -34,6 +35,7 @@ using namespace mlir::cir; /// the point of registration of types and operations for the dialect. void cir::CIRDialect::initialize() { registerTypes(); + registerAttributes(); addOperations< #define GET_OP_LIST #include "mlir/Dialect/CIR/IR/CIROps.cpp.inc" diff --git a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp index c42a7567ea5e..60476a892ab1 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp @@ -32,8 +32,6 @@ using namespace mlir::cir; Type CIRDialect::parseType(DialectAsmParser &parser) const { llvm::SMLoc typeLoc = parser.getCurrentLocation(); StringRef mnemonic; - if (parser.parseKeyword(&mnemonic)) - return Type(); Type genType; OptionalParseResult parseResult = generatedTypeParser(parser, &mnemonic, genType); diff --git a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt index d6787e798d18..9b7d5391b9b6 100644 --- a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt @@ -1,4 +1,5 @@ add_mlir_dialect_library(MLIRCIR + CIRAttrs.cpp CIRDialect.cpp CIRTypes.cpp @@ -6,6 +7,7 @@ add_mlir_dialect_library(MLIRCIR ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/CIR DEPENDS + MLIRBuiltinLocationAttributesIncGen MLIRCIROpsIncGen MLIRCIREnumsGen MLIRSymbolInterfacesIncGen From 47328908842f1047b7efe8e5747451ae5ffc2d5b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 25 Oct 2021 17:22:34 -0700 Subject: [PATCH 0040/2301] [CIR] Add support for 'nullptr' initialization - Describe as UnitAttr, add custom parser/printer - Add verifier for types used with nullptr - Tests --- clang/lib/CIR/CIRBuilder.cpp | 10 ++++++++++ clang/test/CIR/CodeGen/basic.cpp | 11 +++++++++++ clang/test/CIR/IR/invalid.cir | 11 +++++++++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 15 ++++++++++++--- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 22 +++++++++++++++++++++- 5 files changed, 65 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/basic.cpp create mode 100644 clang/test/CIR/IR/invalid.cir diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 80cae3f5e656..bf3edddf2081 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -16,6 +16,7 @@ #include "clang/CIR/CIRBuilder.h" #include "clang/CIR/CIRCodeGenFunction.h" +#include "mlir/Dialect/CIR/IR/CIRAttrs.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/CIR/IR/CIRTypes.h" @@ -468,6 +469,15 @@ class CIRBuildImpl { assert(Builder.astCtx.hasSameUnqualifiedType(E->getType(), DestTy)); assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); return Visit(const_cast(E)); + case CK_NullToPointer: { + // FIXME: use MustVisitNullValue(E) and evaluate expr. + // Note that DestTy is used as the MLIR type instead of a custom + // nullptr type. + mlir::Type Ty = Builder.getCIRType(DestTy); + return Builder.builder.create( + Builder.getLoc(E->getExprLoc()), Ty, + mlir::cir::NullAttr::get(Builder.builder.getContext(), Ty)); + } default: emitError(Builder.getLoc(CE->getExprLoc()), "cast kind not implemented: '") diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp new file mode 100644 index 000000000000..6f04c37396da --- /dev/null +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int *p0() { + int *p = nullptr; + return p; +} + +// CHECK: func @p0() -> !cir.ptr { +// CHECK: %1 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir new file mode 100644 index 000000000000..a4c2768bd5e2 --- /dev/null +++ b/clang/test/CIR/IR/invalid.cir @@ -0,0 +1,11 @@ +// Test attempts to build bogus CIR + +// RUN: cir-tool -verify-diagnostics %s +module { + func.func @p0() { + // expected-error@+1 {{'cir.cst' op nullptr expects pointer type}} + %1 = cir.cst(#cir.null : !cir.ptr) : i32 + + cir.return + } +} diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 90d8e6729d7c..b3a1a9b4077b 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -33,6 +33,8 @@ class CIR_Op traits = []> : //===----------------------------------------------------------------------===// // ConstantOp //===----------------------------------------------------------------------===// + + def ConstantOp : CIR_Op<"cst", [ConstantLike, Pure]> { @@ -42,8 +44,9 @@ def ConstantOp : CIR_Op<"cst", to the operation as an attribute. For example: ```mlir - %0 = cir.cst 42 : i32 - %1 = cir.cst 4.2 : f32 + %0 = cir.cst(42 : i32) + %1 = cir.cst(4.2 : f32) + %2 = cir.cst(nullptr : !cir.ptr) ``` }]; @@ -53,10 +56,16 @@ def ConstantOp : CIR_Op<"cst", // The constant operation returns a single value of AnyType. let results = (outs AnyType:$res); - let assemblyFormat = "`(` $value `)` attr-dict `:` type($res)"; + let assemblyFormat = "`(` custom($value) `)` attr-dict `:` type($res)"; let hasVerifier = 1; + let extraClassDeclaration = [{ + bool isNullPtr() { + return getValue().isa(); + } + }]; + // TODO: hasFolder, etc } diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 59e459365f8d..c0fc1a08a04b 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -51,10 +51,16 @@ LogicalResult ConstantOp::verify() { auto val = getValue(); auto valueType = val.getType(); + if (val.isa()) { + if (opType.isa<::mlir::cir::PointerType>()) + return success(); + return emitOpError("nullptr expects pointer type"); + } + // ODS already generates checks to make sure the result type is valid. We just // need to additionally check that the value's attribute type is consistent // with the result type. - if (val.isa()) { + if (opType.isa()) { if (valueType != opType) return emitOpError("result type (") << opType << ") does not match value type (" << valueType << ")"; @@ -64,6 +70,20 @@ LogicalResult ConstantOp::verify() { return emitOpError("cannot have value of type ") << valueType; } +static ParseResult parseConstantValue(OpAsmParser &parser, + mlir::Attribute &valueAttr) { + NamedAttrList attr; + if (parser.parseAttribute(valueAttr, "value", attr)) + return ::mlir::failure(); + + return success(); +} + +static void printConstantValue(OpAsmPrinter &p, cir::ConstantOp op, + Attribute value) { + p.printAttribute(value); +} + //===----------------------------------------------------------------------===// // ReturnOp //===----------------------------------------------------------------------===// From 445d662d3944f1b46c8f6f294df2dfe1372f0f9a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 25 Oct 2021 18:30:02 -0700 Subject: [PATCH 0041/2301] [CIR][NFC] Refactor and introduce more layers for building Stmts This is prep work for codegen'ing expressions as part of walking a compound statement. --- clang/lib/CIR/CIRBuilder.cpp | 55 ++++++++++++++++++++++++++---------- 1 file changed, 40 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index bf3edddf2081..24f6ab282550 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -1045,22 +1045,9 @@ class CIRBuildImpl { return mlir::success(); } - mlir::LogicalResult buildCompoundStmt(const CompoundStmt &S) { - // Create a scope in the symbol table to hold variable declarations local - // to this compound statement. - SymTableScopeTy varScope(symbolTable); - for (auto *CurStmt : S.body()) - if (buildStmt(CurStmt).failed()) - return mlir::failure(); - - return mlir::success(); - } - - mlir::LogicalResult buildStmt(const Stmt *S) { + mlir::LogicalResult buildSimpleStmt(const Stmt *S) { switch (S->getStmtClass()) { default: - llvm::errs() << "CIR codegen for '" << S->getStmtClassName() - << "' not implemented\n"; return mlir::failure(); case Stmt::DeclStmtClass: return buildDeclStmt(cast(*S)); @@ -1068,11 +1055,49 @@ class CIRBuildImpl { return buildCompoundStmt(cast(*S)); case Stmt::ReturnStmtClass: return buildReturnStmt(cast(*S)); + case Stmt::NullStmtClass: + break; + + case Stmt::LabelStmtClass: + case Stmt::AttributedStmtClass: + case Stmt::GotoStmtClass: + case Stmt::BreakStmtClass: + case Stmt::ContinueStmtClass: + case Stmt::DefaultStmtClass: + case Stmt::CaseStmtClass: + case Stmt::SEHLeaveStmtClass: + llvm::errs() << "CIR codegen for '" << S->getStmtClassName() + << "' not implemented\n"; + assert(0 && "not implemented"); } return mlir::success(); } + mlir::LogicalResult buildStmt(const Stmt *S) { + if (mlir::succeeded(buildSimpleStmt(S))) + return mlir::success(); + assert(0 && "not implemented"); + return mlir::failure(); + } + + mlir::LogicalResult buildFunctionBody(const Stmt *Body) { + const CompoundStmt *S = dyn_cast(Body); + assert(S && "expected compound stmt"); + return buildCompoundStmt(*S); + } + + mlir::LogicalResult buildCompoundStmt(const CompoundStmt &S) { + // Create a scope in the symbol table to hold variable declarations local + // to this compound statement. + SymTableScopeTy varScope(symbolTable); + for (auto *CurStmt : S.body()) + if (buildStmt(CurStmt).failed()) + return mlir::failure(); + + return mlir::success(); + } + // Emit a new function and add it to the MLIR module. mlir::FuncOp buildCIR(CIRCodeGenFunction *CCGF, const FunctionDecl *FD) { CurCCGF = CCGF; @@ -1124,7 +1149,7 @@ class CIRBuildImpl { } // Emit the body of the function. - if (mlir::failed(buildStmt(FD->getBody()))) { + if (mlir::failed(buildFunctionBody(FD->getBody()))) { function.erase(); return nullptr; } From add3f85f5cbe00ff21d0ed71d1cb719692ca368d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 25 Oct 2021 19:18:39 -0700 Subject: [PATCH 0042/2301] [CIR][NFC] Add infra to support building arbitrary expressions The only change here is that we assert a bit different now. This also unifies handling from scalar exp back to the builder. --- clang/lib/CIR/CIRBuilder.cpp | 266 +++++++++++++++++++++++++++++------ 1 file changed, 223 insertions(+), 43 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 24f6ab282550..e3ea4271f19f 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -398,50 +398,9 @@ class CIRBuildImpl { return StmtVisitor::Visit(E); } - LValue EmitDeclRefLValue(const DeclRefExpr *E) { - const NamedDecl *ND = E->getDecl(); - - assert(E->isNonOdrUse() != NOUR_Unevaluated && - "should not emit an unevaluated operand"); - - if (const auto *VD = dyn_cast(ND)) { - // Global Named registers access via intrinsics only - assert(VD->getStorageClass() != SC_Register && "not implemented"); - assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); - assert(!E->refersToEnclosingVariableOrCapture() && "not implemented"); - assert(!(VD->hasLinkage() || VD->isStaticDataMember()) && - "not implemented"); - assert(!VD->isEscapingByref() && "not implemented"); - assert(!VD->getType()->isReferenceType() && "not implemented"); - assert(Builder.symbolTable.count(VD) && "should be already mapped"); - - mlir::Value V = Builder.symbolTable.lookup(VD); - assert(V && "Name lookup must succeed"); - - LValue LV = LValue::makeAddr(RawAddress(V, CharUnits::fromQuantity(4)), - VD->getType(), AlignmentSource::Decl); - return LV; - } - - llvm_unreachable("Unhandled DeclRefExpr?"); - } - - LValue EmitLValue(const Expr *E) { - switch (E->getStmtClass()) { - case Expr::DeclRefExprClass: - return EmitDeclRefLValue(cast(E)); - default: - emitError(Builder.getLoc(E->getExprLoc()), - "l-value not implemented for '") - << E->getStmtClassName() << "'"; - break; - } - return LValue::makeAddr(RawAddress::invalid(), E->getType()); - } - /// Emits the address of the l-value, then loads and returns the result. mlir::Value buildLoadOfLValue(const Expr *E) { - LValue LV = EmitLValue(E); + LValue LV = Builder.buildLValue(E); auto load = Builder.builder.create( Builder.getLoc(E->getExprLoc()), Builder.getCIRType(E->getType()), LV.getPointer(), mlir::UnitAttr::get(Builder.builder.getContext())); @@ -1074,10 +1033,231 @@ class CIRBuildImpl { return mlir::success(); } + LValue buildDeclRefLValue(const DeclRefExpr *E) { + const NamedDecl *ND = E->getDecl(); + + assert(E->isNonOdrUse() != NOUR_Unevaluated && + "should not emit an unevaluated operand"); + + if (const auto *VD = dyn_cast(ND)) { + // Global Named registers access via intrinsics only + assert(VD->getStorageClass() != SC_Register && "not implemented"); + assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); + assert(!E->refersToEnclosingVariableOrCapture() && "not implemented"); + assert(!(VD->hasLinkage() || VD->isStaticDataMember()) && + "not implemented"); + assert(!VD->isEscapingByref() && "not implemented"); + assert(!VD->getType()->isReferenceType() && "not implemented"); + assert(symbolTable.count(VD) && "should be already mapped"); + + mlir::Value V = symbolTable.lookup(VD); + assert(V && "Name lookup must succeed"); + + LValue LV = LValue::makeAddr(RawAddress(V, CharUnits::fromQuantity(4)), + VD->getType(), AlignmentSource::Decl); + return LV; + } + + llvm_unreachable("Unhandled DeclRefExpr?"); + } + + /// Emit code to compute a designator that specifies the location + /// of the expression. + /// FIXME: document this function better. + LValue buildLValue(const Expr *E) { + // FIXME: ApplyDebugLocation DL(*this, E); + switch (E->getStmtClass()) { + default: { + emitError(getLoc(E->getExprLoc()), "l-value not implemented for '") + << E->getStmtClassName() << "'"; + assert(0 && "not implemented"); + } + case Expr::ObjCPropertyRefExprClass: + llvm_unreachable("cannot emit a property reference directly"); + case Expr::DeclRefExprClass: + return buildDeclRefLValue(cast(E)); + } + + return LValue::makeAddr(RawAddress::invalid(), E->getType()); + } + + /// EmitIgnoredExpr - Emit code to compute the specified expression, + /// ignoring the result. + void buildIgnoredExpr(const Expr *E) { + assert(!E->isPRValue() && "not implemented"); + + // Just emit it as an l-value and drop the result. + buildLValue(E); + } + mlir::LogicalResult buildStmt(const Stmt *S) { if (mlir::succeeded(buildSimpleStmt(S))) return mlir::success(); - assert(0 && "not implemented"); + + if (astCtx.getLangOpts().OpenMP && astCtx.getLangOpts().OpenMPSimd) + assert(0 && "not implemented"); + + switch (S->getStmtClass()) { + case Stmt::OpenACCComputeConstructClass: + case Stmt::OMPScopeDirectiveClass: + case Stmt::OMPTeamsGenericLoopDirectiveClass: + case Stmt::OMPParallelMaskedDirectiveClass: + case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: + case Stmt::OMPErrorDirectiveClass: + case Stmt::OMPGenericLoopDirectiveClass: + case Stmt::OMPMaskedTaskLoopDirectiveClass: + case Stmt::OMPMaskedTaskLoopSimdDirectiveClass: + case Stmt::OMPParallelGenericLoopDirectiveClass: + case Stmt::OMPParallelMaskedTaskLoopDirectiveClass: + case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass: + case Stmt::OMPTargetParallelGenericLoopDirectiveClass: + llvm_unreachable("NYI"); + case Stmt::NoStmtClass: + case Stmt::CXXCatchStmtClass: + case Stmt::SEHExceptStmtClass: + case Stmt::SEHFinallyStmtClass: + case Stmt::MSDependentExistsStmtClass: + llvm_unreachable("invalid statement class to emit generically"); + case Stmt::NullStmtClass: + case Stmt::CompoundStmtClass: + case Stmt::DeclStmtClass: + case Stmt::LabelStmtClass: + case Stmt::AttributedStmtClass: + case Stmt::GotoStmtClass: + case Stmt::BreakStmtClass: + case Stmt::ContinueStmtClass: + case Stmt::DefaultStmtClass: + case Stmt::CaseStmtClass: + case Stmt::SEHLeaveStmtClass: + llvm_unreachable("should have emitted these statements as simple"); + +#define STMT(Type, Base) +#define ABSTRACT_STMT(Op) +#define EXPR(Type, Base) case Stmt::Type##Class: +#include "clang/AST/StmtNodes.inc" + { + // Remember the block we came in on. + mlir::Block *incoming = builder.getInsertionBlock(); + assert(incoming && "expression emission must have an insertion point"); + + buildIgnoredExpr(cast(S)); + + mlir::Block *outgoing = builder.getInsertionBlock(); + assert(outgoing && "expression emission cleared block!"); + + // FIXME: Should we mimic LLVM emission here? + // The expression emitters assume (reasonably!) that the insertion + // point is always set. To maintain that, the call-emission code + // for noreturn functions has to enter a new block with no + // predecessors. We want to kill that block and mark the current + // insertion point unreachable in the common case of a call like + // "exit();". Since expression emission doesn't otherwise create + // blocks with no predecessors, we can just test for that. + // However, we must be careful not to do this to our incoming + // block, because *statement* emission does sometimes create + // reachable blocks which will have no predecessors until later in + // the function. This occurs with, e.g., labels that are not + // reachable by fallthrough. + if (incoming != outgoing && outgoing->use_empty()) + assert(0 && "not implemented"); + break; + } + + case Stmt::IndirectGotoStmtClass: + case Stmt::IfStmtClass: + case Stmt::WhileStmtClass: + case Stmt::DoStmtClass: + case Stmt::ForStmtClass: + case Stmt::ReturnStmtClass: + case Stmt::SwitchStmtClass: + // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. + case Stmt::GCCAsmStmtClass: + case Stmt::MSAsmStmtClass: + case Stmt::CoroutineBodyStmtClass: + case Stmt::CoreturnStmtClass: + case Stmt::CapturedStmtClass: + case Stmt::ObjCAtTryStmtClass: + case Stmt::ObjCAtThrowStmtClass: + case Stmt::ObjCAtSynchronizedStmtClass: + case Stmt::ObjCForCollectionStmtClass: + case Stmt::ObjCAutoreleasePoolStmtClass: + case Stmt::CXXTryStmtClass: + case Stmt::CXXForRangeStmtClass: + case Stmt::SEHTryStmtClass: + case Stmt::OMPMetaDirectiveClass: + case Stmt::OMPCanonicalLoopClass: + case Stmt::OMPParallelDirectiveClass: + case Stmt::OMPSimdDirectiveClass: + case Stmt::OMPTileDirectiveClass: + case Stmt::OMPUnrollDirectiveClass: + case Stmt::OMPForDirectiveClass: + case Stmt::OMPForSimdDirectiveClass: + case Stmt::OMPSectionsDirectiveClass: + case Stmt::OMPSectionDirectiveClass: + case Stmt::OMPSingleDirectiveClass: + case Stmt::OMPMasterDirectiveClass: + case Stmt::OMPCriticalDirectiveClass: + case Stmt::OMPParallelForDirectiveClass: + case Stmt::OMPParallelForSimdDirectiveClass: + case Stmt::OMPParallelMasterDirectiveClass: + case Stmt::OMPParallelSectionsDirectiveClass: + case Stmt::OMPTaskDirectiveClass: + case Stmt::OMPTaskyieldDirectiveClass: + case Stmt::OMPBarrierDirectiveClass: + case Stmt::OMPTaskwaitDirectiveClass: + case Stmt::OMPTaskgroupDirectiveClass: + case Stmt::OMPFlushDirectiveClass: + case Stmt::OMPDepobjDirectiveClass: + case Stmt::OMPScanDirectiveClass: + case Stmt::OMPOrderedDirectiveClass: + case Stmt::OMPAtomicDirectiveClass: + case Stmt::OMPTargetDirectiveClass: + case Stmt::OMPTeamsDirectiveClass: + case Stmt::OMPCancellationPointDirectiveClass: + case Stmt::OMPCancelDirectiveClass: + case Stmt::OMPTargetDataDirectiveClass: + case Stmt::OMPTargetEnterDataDirectiveClass: + case Stmt::OMPTargetExitDataDirectiveClass: + case Stmt::OMPTargetParallelDirectiveClass: + case Stmt::OMPTargetParallelForDirectiveClass: + case Stmt::OMPTaskLoopDirectiveClass: + case Stmt::OMPTaskLoopSimdDirectiveClass: + case Stmt::OMPMasterTaskLoopDirectiveClass: + case Stmt::OMPMasterTaskLoopSimdDirectiveClass: + case Stmt::OMPParallelMasterTaskLoopDirectiveClass: + case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: + case Stmt::OMPDistributeDirectiveClass: + case Stmt::OMPTargetUpdateDirectiveClass: + case Stmt::OMPDistributeParallelForDirectiveClass: + case Stmt::OMPDistributeParallelForSimdDirectiveClass: + case Stmt::OMPDistributeSimdDirectiveClass: + case Stmt::OMPTargetParallelForSimdDirectiveClass: + case Stmt::OMPTargetSimdDirectiveClass: + case Stmt::OMPTeamsDistributeDirectiveClass: + case Stmt::OMPTeamsDistributeSimdDirectiveClass: + case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: + case Stmt::OMPTeamsDistributeParallelForDirectiveClass: + case Stmt::OMPTargetTeamsDirectiveClass: + case Stmt::OMPTargetTeamsDistributeDirectiveClass: + case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: + case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: + case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: + case Stmt::OMPInteropDirectiveClass: + case Stmt::OMPDispatchDirectiveClass: + case Stmt::OMPMaskedDirectiveClass: { + llvm::errs() << "CIR codegen for '" << S->getStmtClassName() + << "' not implemented\n"; + assert(0 && "not implemented"); + break; + } + case Stmt::ObjCAtCatchStmtClass: + llvm_unreachable( + "@catch statements should be handled by EmitObjCAtTryStmt"); + case Stmt::ObjCAtFinallyStmtClass: + llvm_unreachable( + "@finally statements should be handled by EmitObjCAtTryStmt"); + } + return mlir::failure(); } From f602a1d24a445ca0417dd7774c85f18118222ccd Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 12 Oct 2021 14:39:24 -0400 Subject: [PATCH 0043/2301] [CIR] Implement simple lowering of CIR to LLVMIR, currently only supporting cir.ret This patch adds the simple infrastructure necessary to enable the `-emit-llvm` flag such that the following invocation will do what you'd expect: ``` $ clang -cc1 -fenable-clangir -emit-llvm -o out.ll foo.c ``` There is only one operation currently supported, `cir.ret`, and it is only supported for the case of returning nothing. --- clang/include/clang/CIR/CIRBuilder.h | 10 +- clang/include/clang/CIR/LowerToLLVM.h | 37 +++++ clang/include/clang/CIR/Passes.h | 30 ++++ .../clang/CIRFrontendAction/CIRGenAction.h | 14 +- clang/lib/CIR/CIRBuilder.cpp | 21 ++- clang/lib/CIR/CMakeLists.txt | 13 +- clang/lib/CIR/LowerToLLVM.cpp | 152 ++++++++++++++++++ clang/lib/CIRFrontendAction/CIRGenAction.cpp | 52 +++++- clang/lib/CIRLowering/CMakeLists.txt | 0 .../ExecuteCompilerInvocation.cpp | 21 ++- clang/test/CIR/IRGen/basic.c | 9 ++ 11 files changed, 320 insertions(+), 39 deletions(-) create mode 100644 clang/include/clang/CIR/LowerToLLVM.h create mode 100644 clang/include/clang/CIR/Passes.h create mode 100644 clang/lib/CIR/LowerToLLVM.cpp create mode 100644 clang/lib/CIRLowering/CMakeLists.txt create mode 100644 clang/test/CIR/IRGen/basic.c diff --git a/clang/include/clang/CIR/CIRBuilder.h b/clang/include/clang/CIR/CIRBuilder.h index 4ac0b39ac9b5..1d085ddc0059 100644 --- a/clang/include/clang/CIR/CIRBuilder.h +++ b/clang/include/clang/CIR/CIRBuilder.h @@ -20,6 +20,7 @@ namespace mlir { class MLIRContext; +class ModuleOp; class OwningModuleRef; } // namespace mlir @@ -36,7 +37,6 @@ class CIRGenTypes; class CIRContext : public clang::ASTConsumer { public: CIRContext(); - CIRContext(std::unique_ptr os); ~CIRContext(); void Initialize(clang::ASTContext &Context) override; bool EmitFunction(const clang::FunctionDecl *FD); @@ -44,8 +44,14 @@ class CIRContext : public clang::ASTConsumer { bool HandleTopLevelDecl(clang::DeclGroupRef D) override; void HandleTranslationUnit(clang::ASTContext &Ctx) override; + mlir::ModuleOp getModule(); + std::unique_ptr takeContext() { + return std::move(mlirCtx); + }; + + void verifyModule(); + private: - std::unique_ptr outStream; std::unique_ptr mlirCtx; std::unique_ptr builder; diff --git a/clang/include/clang/CIR/LowerToLLVM.h b/clang/include/clang/CIR/LowerToLLVM.h new file mode 100644 index 000000000000..529c25763961 --- /dev/null +++ b/clang/include/clang/CIR/LowerToLLVM.h @@ -0,0 +1,37 @@ +//====- LowerToLLVM.h- Lowering from CIR to LLVM --------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares an interface for converting CIR modules to LLVM IR. +// +//===----------------------------------------------------------------------===// +#ifndef CLANG_CIR_LOWERTOLLVM_H +#define CLANG_CIR_LOWERTOLLVM_H + +#include + +namespace llvm { +class LLVMContext; +class Module; +} // namespace llvm + +namespace mlir { +class MLIRContext; +class ModuleOp; +} // namespace mlir + +namespace cir { + +// Lower directly from pristine CIR to LLVMIR. +std::unique_ptr +lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, + std::unique_ptr mlirCtx, + llvm::LLVMContext &llvmCtx); + +} // namespace cir + +#endif // CLANG_CIR_LOWERTOLLVM_H_ diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h new file mode 100644 index 000000000000..3979bd08a772 --- /dev/null +++ b/clang/include/clang/CIR/Passes.h @@ -0,0 +1,30 @@ +//===- Passes.h - CIR Passes Definition -----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file exposes the entry points to create compiler passes for ClangIR. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CIR_PASSES_H +#define CLANG_CIR_PASSES_H + +#include "mlir/Pass/Pass.h" + +#include + +namespace cir { +class Pass; + +/// Create a pass for lowering from `CIR` operations well as `Affine` and `Std`, +/// to the LLVM dialect for codegen. We'll want to separate this eventually into +/// different phases instead of doing it all at once. +std::unique_ptr createLowerToLLVMIRPass(); + +} // end namespace cir + +#endif // CLANG_CIR_PASSES_H diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIRFrontendAction/CIRGenAction.h index 04fea51a40a8..2ec64a091d3f 100644 --- a/clang/include/clang/CIRFrontendAction/CIRGenAction.h +++ b/clang/include/clang/CIRFrontendAction/CIRGenAction.h @@ -27,7 +27,7 @@ class CIRGenerator; class CIRGenAction : public clang::ASTFrontendAction { public: - enum class OutputType { EmitAssembly, EmitCIR, EmitLLVM, None }; + enum class OutputType { EmitAssembly, EmitCIR, EmitLLVM, EmitObject, None }; private: friend class CIRGenConsumer; @@ -57,25 +57,25 @@ class CIRGenAction : public clang::ASTFrontendAction { OutputType action; }; -class EmitLLVMAction : public CIRGenAction { +class EmitCIRAction : public CIRGenAction { virtual void anchor(); public: - EmitLLVMAction(mlir::MLIRContext *mlirCtx = nullptr); + EmitCIRAction(mlir::MLIRContext *mlirCtx = nullptr); }; -class EmitCIRAction : public CIRGenAction { +class EmitCIROnlyAction : public CIRGenAction { virtual void anchor(); public: - EmitCIRAction(mlir::MLIRContext *mlirCtx = nullptr); + EmitCIROnlyAction(mlir::MLIRContext *mlirCtx = nullptr); }; -class EmitCIROnlyAction : public CIRGenAction { +class EmitLLVMAction : public CIRGenAction { virtual void anchor(); public: - EmitCIROnlyAction(mlir::MLIRContext *mlirCtx = nullptr); + EmitLLVMAction(mlir::MLIRContext *mlirCtx = nullptr); }; class EmitAssemblyAction : public CIRGenAction { diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index e3ea4271f19f..a348f846a7a8 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -13,8 +13,10 @@ #include "CIRGenTypes.h" +#include "clang/AST/ASTConsumer.h" #include "clang/CIR/CIRBuilder.h" #include "clang/CIR/CIRCodeGenFunction.h" +#include "clang/CIR/LowerToLLVM.h" #include "mlir/Dialect/CIR/IR/CIRAttrs.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" @@ -1360,15 +1362,8 @@ class CIRBuildImpl { }; } // namespace cir -CIRContext::CIRContext() {} - -CIRContext::CIRContext(std::unique_ptr os) - : outStream(std::move(os)) {} - -CIRContext::~CIRContext() { - // Run module verifier before shutdown. - builder->verifyModule(); -} +CIRContext::CIRContext() = default; +CIRContext::~CIRContext() = default; void CIRContext::Initialize(clang::ASTContext &astCtx) { using namespace llvm; @@ -1382,6 +1377,10 @@ void CIRContext::Initialize(clang::ASTContext &astCtx) { builder = std::make_unique(*mlirCtx.get(), astCtx); } +void CIRContext::verifyModule() { + builder->verifyModule(); +} + bool CIRContext::EmitFunction(const FunctionDecl *FD) { CIRCodeGenFunction CCGF{}; auto func = builder->buildCIR(&CCGF, FD); @@ -1389,6 +1388,8 @@ bool CIRContext::EmitFunction(const FunctionDecl *FD) { return true; } +mlir::ModuleOp CIRContext::getModule() { return builder->getModule(); } + bool CIRContext::HandleTopLevelDecl(clang::DeclGroupRef D) { for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { auto *FD = cast(*I); @@ -1400,6 +1401,4 @@ bool CIRContext::HandleTopLevelDecl(clang::DeclGroupRef D) { } void CIRContext::HandleTranslationUnit(ASTContext &C) { - if (outStream) - builder->getModule()->print(*outStream); } diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index c9f3b2d35a68..635d5e2c39b1 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -13,6 +13,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR CIRBuilder.cpp CIRGenTypes.cpp + LowerToLLVM.cpp DEPENDS MLIRCIROpsIncGen @@ -23,11 +24,21 @@ add_clang_library(clangCIR clangLex ${dialect_libs} MLIRCIR + MLIRAffineToStandard MLIRAnalysis MLIRIR + MLIRLLVMCommonConversion + MLIRLLVMDialect + MLIRLLVMToLLVMIRTranslation + MLIRMemRefDialect + MLIRMemRefToLLVM MLIRParser + MLIRPass MLIRSideEffectInterfaces - MLIRTransforms + MLIRSCFToControlFlow + MLIRFuncToLLVM MLIRSupport MLIRMemRefDialect + MLIRTargetLLVMIRExport + MLIRTransforms ) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp new file mode 100644 index 000000000000..376cfd68b66d --- /dev/null +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -0,0 +1,152 @@ +//====- LowerToLLVM.cpp - Lowering from CIR to LLVM -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements full lowering of CIR operations to LLVMIR. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Conversion/AffineToStandard/AffineToStandard.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" +#include "mlir/Conversion/LLVMCommon/ConversionTarget.h" +#include "mlir/Conversion/LLVMCommon/TypeConverter.h" +#include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" +#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" +#include "mlir/Dialect/Affine/IR/AffineOps.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/SCF/IR/SCF.h" +#include "mlir/Dialect/SCF/Transforms/Passes.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Export.h" +#include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Passes.h" +#include "llvm/ADT/Sequence.h" + +using namespace cir; + +namespace cir { + +struct CIRToLLVMIRLoweringPass + : public mlir::PassWrapper> { + void getDependentDialects(mlir::DialectRegistry ®istry) const override { + registry.insert(); + } + void runOnOperation() final; +}; + +class CIRReturnLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ReturnOp op, + mlir::PatternRewriter &rewriter) const override { + assert(op.getNumOperands() == 0 && + "we aren't handling non-zero operand count returns yet"); + rewriter.replaceOpWithNewOp(op); + return mlir::LogicalResult::success(); + } +}; + +class CIRAllocaLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AllocaOp op, + mlir::PatternRewriter &rewriter) const override { + assert(false && "NYI"); + auto ty = mlir::MemRefType::get({}, op.getType()); + rewriter.replaceOpWithNewOp(op, ty); + return mlir::LogicalResult::success(); + } +}; + +class CIRLoadLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::LoadOp op, + mlir::PatternRewriter &rewriter) const override { + assert(false && "NYI"); + return mlir::LogicalResult::success(); + } +}; + +class CIRStoreLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::StoreOp op, + mlir::PatternRewriter &rewriter) const override { + assert(false && "NYI"); + return mlir::LogicalResult::success(); + } +}; + +void populateCIRToStdConversionPatterns(mlir::RewritePatternSet &patterns) { + patterns.add(patterns.getContext()); +} + +void CIRToLLVMIRLoweringPass::runOnOperation() { + mlir::LLVMConversionTarget target(getContext()); + target.addLegalOp(); + + mlir::LLVMTypeConverter typeConverter(&getContext()); + + mlir::RewritePatternSet patterns(&getContext()); + populateCIRToStdConversionPatterns(patterns); + populateAffineToStdConversionPatterns(patterns); + populateSCFToControlFlowConversionPatterns(patterns); + populateFinalizeMemRefToLLVMConversionPatterns(typeConverter, patterns); + populateFuncToLLVMConversionPatterns(typeConverter, patterns); + + auto module = getOperation(); + if (failed(applyFullConversion(module, target, std::move(patterns)))) + signalPassFailure(); +} + +std::unique_ptr +lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, + std::unique_ptr mlirCtx, + llvm::LLVMContext &llvmCtx) { + mlir::PassManager pm(mlirCtx.get()); + + pm.addPass(createLowerToLLVMIRPass()); + + // TODO: Handle this error + if (mlir::failed(pm.run(theModule))) + ; + + mlir::registerLLVMDialectTranslation(*mlirCtx); + + llvm::LLVMContext llvmContext; + auto llvmModule = mlir::translateModuleToLLVMIR(theModule, llvmCtx); + + // TODO: Handle this error + if (!llvmModule) + ; + + return llvmModule; +} + +std::unique_ptr createLowerToLLVMIRPass() { + return std::make_unique(); +} + +} // namespace cir diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index c074c31a0548..a81a1e2a0776 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -19,6 +19,7 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/CIRBuilder.h" +#include "clang/CIR/LowerToLLVM.h" #include "clang/CodeGen/BackendUtil.h" #include "clang/CodeGen/ModuleBuilder.h" #include "clang/Driver/DriverDiagnostic.h" @@ -55,13 +56,20 @@ namespace cir { class CIRGenConsumer : public clang::ASTConsumer { virtual void anchor(); - ASTContext *astContext{nullptr}; + + std::unique_ptr outputStream; std::unique_ptr gen; + CIRGenAction::OutputType action; + + ASTContext *astContext{nullptr}; + public: - CIRGenConsumer(std::unique_ptr os) - : gen(std::make_unique(std::move(os))) {} + CIRGenConsumer(std::unique_ptr os, + CIRGenAction::OutputType action) + : outputStream(std::move(os)), gen(std::make_unique()), + action(action) {} void Initialize(ASTContext &ctx) override { assert(!astContext && "initialized multiple times"); @@ -76,7 +84,6 @@ class CIRGenConsumer : public clang::ASTConsumer { astContext->getSourceManager(), "LLVM IR generation of declaration"); gen->HandleTopLevelDecl(D); - return true; } @@ -86,7 +93,32 @@ class CIRGenConsumer : public clang::ASTConsumer { void HandleTranslationUnit(ASTContext &C) override { gen->HandleTranslationUnit(C); - // TODO: Have context emit file here + + gen->verifyModule(); + + auto mlirMod = gen->getModule(); + auto mlirCtx = gen->takeContext(); + + switch (action) { + case CIRGenAction::OutputType::EmitCIR: + if (outputStream) + mlirMod->print(*outputStream); + break; + case CIRGenAction::OutputType::EmitLLVM: { + llvm::LLVMContext llvmCtx; + auto llvmModule = + lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); + if (outputStream) + llvmModule->print(*outputStream, nullptr); + break; + } + case CIRGenAction::OutputType::EmitAssembly: + case CIRGenAction::OutputType::EmitObject: + assert(false && "Not yet implemented"); + break; + case CIRGenAction::OutputType::None: + break; + } } void HandleTagDeclDefinition(TagDecl *D) override {} @@ -126,7 +158,9 @@ getOutputStream(CompilerInstance &ci, StringRef inFile, case CIRGenAction::OutputType::EmitCIR: return ci.createDefaultOutputFile(false, inFile, "cir"); case CIRGenAction::OutputType::EmitLLVM: - return ci.createDefaultOutputFile(true, inFile, "llvm"); + return ci.createDefaultOutputFile(false, inFile, "llvm"); + case CIRGenAction::OutputType::EmitObject: + return ci.createDefaultOutputFile(true, inFile, "o"); case CIRGenAction::OutputType::None: return nullptr; } @@ -139,7 +173,7 @@ CIRGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { auto out = CI.takeOutputStream(); if (!out) out = getOutputStream(CI, InFile, action); - return std::make_unique(std::move(out)); + return std::make_unique(std::move(out), action); } std::unique_ptr @@ -160,3 +194,7 @@ EmitCIRAction::EmitCIRAction(mlir::MLIRContext *_MLIRContext) void EmitCIROnlyAction::anchor() {} EmitCIROnlyAction::EmitCIROnlyAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::None, _MLIRContext) {} + +void EmitLLVMAction::anchor() {} +EmitLLVMAction::EmitLLVMAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitLLVM, _MLIRContext) {} diff --git a/clang/lib/CIRLowering/CMakeLists.txt b/clang/lib/CIRLowering/CMakeLists.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 217a6d4b8008..f6d18acd4cf7 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -44,18 +44,13 @@ CreateFrontendBaseAction(CompilerInstance &CI) { StringRef Action("unknown"); (void)Action; - auto UseCIR = CI.getFrontendOpts().UseClangIRPipeline; + auto CIR = CI.getFrontendOpts().UseClangIRPipeline; auto Act = CI.getFrontendOpts().ProgramAction; - auto EmitsCIR = Act == EmitCIR || Act == EmitCIROnly; - auto IsImplementedCIROutput = EmitsCIR || Act == EmitLLVM; - - if (UseCIR && !IsImplementedCIROutput) - llvm::report_fatal_error("-fclangir currently only works with -emit-cir, " - "-emit-cir-only and -emit-llvm"); - if (!UseCIR && EmitsCIR) - llvm::report_fatal_error( - "-emit-cir and -emit-cir-only only valid when using -fenable"); + auto UsesCIR = Act == EmitCIR || Act == EmitCIROnly || Act == EmitLLVM; + if ((CIR && !UsesCIR) || (!CIR && UsesCIR)) + llvm::report_fatal_error("-fenable currently only works with " + "-emit-cir, -emit-cir-only and -emit-llvm"); switch (CI.getFrontendOpts().ProgramAction) { case ASTDeclList: return std::make_unique(); @@ -71,7 +66,11 @@ CreateFrontendBaseAction(CompilerInstance &CI) { case EmitCIR: return std::make_unique(); case EmitCIROnly: return std::make_unique(); case EmitHTML: return std::make_unique(); - case EmitLLVM: return std::make_unique(); + case EmitLLVM: { + if (CIR) + return std::make_unique(); + return std::make_unique(); + } case EmitLLVMOnly: return std::make_unique(); case EmitCodeGenOnly: return std::make_unique(); case EmitObj: return std::make_unique(); diff --git a/clang/test/CIR/IRGen/basic.c b/clang/test/CIR/IRGen/basic.c new file mode 100644 index 000000000000..9c31f7be6939 --- /dev/null +++ b/clang/test/CIR/IRGen/basic.c @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s +// XFAIL: * + +void foo() {} + +// CHECK: define void @foo() +// CHECK-NEXT: ret void, +// CHECK-NEXT: } From ee6dc8a54d4fca409545e1788c35991ac09265ef Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 9 Nov 2021 21:32:32 -0500 Subject: [PATCH 0044/2301] [CIR] Fix misplaced check for EmitLLVM That check doens't make sense as it has nothing to do with Emitting CIR. --- .../lib/FrontendTool/ExecuteCompilerInvocation.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index f6d18acd4cf7..781dc60d8c62 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -44,13 +44,18 @@ CreateFrontendBaseAction(CompilerInstance &CI) { StringRef Action("unknown"); (void)Action; - auto CIR = CI.getFrontendOpts().UseClangIRPipeline; + auto UseCIR = CI.getFrontendOpts().UseClangIRPipeline; auto Act = CI.getFrontendOpts().ProgramAction; - auto UsesCIR = Act == EmitCIR || Act == EmitCIROnly || Act == EmitLLVM; - if ((CIR && !UsesCIR) || (!CIR && UsesCIR)) + auto EmitsCIR = Act == EmitCIR || Act == EmitCIROnly; + auto IsImplementedCIROutput = EmitsCIR || Act == EmitLLVM; + + if (UseCIR && !IsImplementedCIROutput) llvm::report_fatal_error("-fenable currently only works with " "-emit-cir, -emit-cir-only and -emit-llvm"); + if (!UseCIR && EmitsCIR) + llvm::report_fatal_error( + "-emit-cir and -emit-cir-only only valid when using -fclangir"); switch (CI.getFrontendOpts().ProgramAction) { case ASTDeclList: return std::make_unique(); @@ -67,7 +72,7 @@ CreateFrontendBaseAction(CompilerInstance &CI) { case EmitCIROnly: return std::make_unique(); case EmitHTML: return std::make_unique(); case EmitLLVM: { - if (CIR) + if (UseCIR) return std::make_unique(); return std::make_unique(); } From 809136ad68d8a45bf375ea2ff690e4aab1678f53 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 9 Nov 2021 21:52:54 -0800 Subject: [PATCH 0045/2301] [CIR] Simplify interface and fix buildStmt default return value --- clang/lib/CIR/CIRBuilder.cpp | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index a348f846a7a8..627cd431b76c 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -617,16 +617,16 @@ class CIRBuildImpl { return Value; } - void buildStoreOfScalar(mlir::Value value, LValue lvalue, const Decl *D, - bool isInit) { + void buildStoreOfScalar(mlir::Value value, LValue lvalue, + const Decl *InitDecl) { // TODO: constant matrix type, volatile, non temporal, TBAA buildStoreOfScalar(value, lvalue.getAddress(), false, lvalue.getType(), - lvalue.getBaseInfo(), D, isInit, false); + lvalue.getBaseInfo(), InitDecl, false); } void buildStoreOfScalar(mlir::Value Value, RawAddress Addr, bool Volatile, - QualType Ty, LValueBaseInfo BaseInfo, const Decl *D, - bool isInit, bool isNontemporal) { + QualType Ty, LValueBaseInfo BaseInfo, + const Decl *InitDecl, bool isNontemporal) { // TODO: PreserveVec3Type // TODO: LValueIsSuitableForInlineAtomic ? // TODO: TBAA @@ -638,9 +638,9 @@ class CIRBuildImpl { // Update the alloca with more info on initialization. auto SrcAlloca = dyn_cast_or_null( Addr.getPointer().getDefiningOp()); - if (isInit) { + if (InitDecl) { InitStyle IS; - const VarDecl *VD = dyn_cast_or_null(D); + const VarDecl *VD = dyn_cast_or_null(InitDecl); assert(VD && "VarDecl expected"); if (VD->hasInit()) { switch (VD->getInitStyle()) { @@ -667,18 +667,17 @@ class CIRBuildImpl { /// Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. - void buldStoreThroughLValue(RValue Src, LValue Dst, const Decl *D, - bool isInit) { + void buldStoreThroughLValue(RValue Src, LValue Dst, const Decl *InitDecl) { assert(Dst.isSimple() && "only implemented simple"); // TODO: ObjC lifetime. assert(Src.isScalar() && "Can't emit an agg store with this method"); - buildStoreOfScalar(Src.getScalarVal(), Dst, D, isInit); + buildStoreOfScalar(Src.getScalarVal(), Dst, InitDecl); } void buildScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue) { // TODO: this is where a lot of ObjC lifetime stuff would be done. mlir::Value value = buildScalarExpr(init); - buldStoreThroughLValue(RValue::get(value), lvalue, D, true); + buldStoreThroughLValue(RValue::get(value), lvalue, D); return; } @@ -1260,7 +1259,7 @@ class CIRBuildImpl { "@finally statements should be handled by EmitObjCAtTryStmt"); } - return mlir::failure(); + return mlir::success(); } mlir::LogicalResult buildFunctionBody(const Stmt *Body) { From 5401be7d870e76eaec20d7f568c20a411de19672 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 9 Nov 2021 21:54:53 -0800 Subject: [PATCH 0046/2301] [CIR] Add BinaryOperator codegen for assignments --- clang/lib/CIR/CIRBuilder.cpp | 57 ++++++++++++++++++++++++++++++-- clang/test/CIR/CodeGen/basic.cpp | 11 ++++++ 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 627cd431b76c..7581eef32956 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -1062,6 +1062,57 @@ class CIRBuildImpl { llvm_unreachable("Unhandled DeclRefExpr?"); } + /// Emit code to compute the specified expression which + /// can have any type. The result is returned as an RValue struct. + /// TODO: if this is an aggregate expression, add a AggValueSlot to indicate + /// where the result should be returned. + RValue buildAnyExpr(const Expr *E) { + switch (CIRCodeGenFunction::getEvaluationKind(E->getType())) { + case TEK_Scalar: + return RValue::get(buildScalarExpr(E)); + case TEK_Complex: + assert(0 && "not implemented"); + case TEK_Aggregate: + assert(0 && "not implemented"); + } + llvm_unreachable("bad evaluation kind"); + } + + LValue buildBinaryOperatorLValue(const BinaryOperator *E) { + // Comma expressions just emit their LHS then their RHS as an l-value. + if (E->getOpcode() == BO_Comma) { + assert(0 && "not implemented"); + } + + if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) + assert(0 && "not implemented"); + + assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); + + // Note that in all of these cases, __block variables need the RHS + // evaluated first just in case the variable gets moved by the RHS. + + switch (CIRCodeGenFunction::getEvaluationKind(E->getType())) { + case TEK_Scalar: { + assert(E->getLHS()->getType().getObjCLifetime() == + clang::Qualifiers::ObjCLifetime::OCL_None && + "not implemented"); + + RValue RV = buildAnyExpr(E->getRHS()); + LValue LV = buildLValue(E->getLHS()); + buldStoreThroughLValue(RV, LV, nullptr /*InitDecl*/); + assert(!astCtx.getLangOpts().OpenMP && "last priv cond not implemented"); + return LV; + } + + case TEK_Complex: + assert(0 && "not implemented"); + case TEK_Aggregate: + assert(0 && "not implemented"); + } + llvm_unreachable("bad evaluation kind"); + } + /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. @@ -1073,10 +1124,12 @@ class CIRBuildImpl { << E->getStmtClassName() << "'"; assert(0 && "not implemented"); } - case Expr::ObjCPropertyRefExprClass: - llvm_unreachable("cannot emit a property reference directly"); + case Expr::BinaryOperatorClass: + return buildBinaryOperatorLValue(cast(E)); case Expr::DeclRefExprClass: return buildDeclRefLValue(cast(E)); + case Expr::ObjCPropertyRefExprClass: + llvm_unreachable("cannot emit a property reference directly"); } return LValue::makeAddr(RawAddress::invalid(), E->getType()); diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 6f04c37396da..3c29a826aa6b 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -9,3 +9,14 @@ int *p0() { // CHECK: func @p0() -> !cir.ptr { // CHECK: %1 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > + +int *p1() { + int *p; + p = nullptr; + return p; +} + +// CHECK: func @p1() -> !cir.ptr { +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, [uninitialized] +// CHECK: %1 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > From 41820731678d1f5b1925e1fe0881065c32ee5e8d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 9 Nov 2021 22:30:04 -0800 Subject: [PATCH 0047/2301] [CIR] Add missing stmt class to silence warning --- clang/lib/CIR/CIRBuilder.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 7581eef32956..569732aab16b 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -1158,7 +1158,6 @@ class CIRBuildImpl { case Stmt::OMPParallelMaskedDirectiveClass: case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: case Stmt::OMPErrorDirectiveClass: - case Stmt::OMPGenericLoopDirectiveClass: case Stmt::OMPMaskedTaskLoopDirectiveClass: case Stmt::OMPMaskedTaskLoopSimdDirectiveClass: case Stmt::OMPParallelGenericLoopDirectiveClass: @@ -1298,6 +1297,7 @@ class CIRBuildImpl { case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: case Stmt::OMPInteropDirectiveClass: case Stmt::OMPDispatchDirectiveClass: + case Stmt::OMPGenericLoopDirectiveClass: case Stmt::OMPMaskedDirectiveClass: { llvm::errs() << "CIR codegen for '" << S->getStmtClassName() << "' not implemented\n"; From fbf15c31ccf22afa7a4d986f3bfebe9e7a4c745d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 9 Nov 2021 22:42:29 -0800 Subject: [PATCH 0048/2301] [CIR] Simplify VisitExpr in scalar emitter to prevent random crashes --- clang/lib/CIR/CIRBuilder.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 569732aab16b..dc54f8636140 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -448,13 +448,12 @@ class CIRBuildImpl { } mlir::Value VisitExpr(Expr *E) { + // Crashing here for "ScalarExprClassName"? Please implement + // VisitScalarExprClassName(...) to get this working. emitError(Builder.getLoc(E->getExprLoc()), "scalar exp no implemented: '") << E->getStmtClassName() << "'"; - if (E->getType()->isVoidType()) - return nullptr; - // FIXME: find a way to return "undef"... - // return llvm::UndefValue::get(CGF.ConvertType(E->getType())); - return nullptr; + assert(0 && "shouldn't be here!"); + return {}; } // Leaves. From 83a853599ae4e9f9c43471fdf99e2f1fda21acaa Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 9 Nov 2021 23:45:22 -0800 Subject: [PATCH 0049/2301] [CIR] Visit unary adddress of as part of scalar emission --- clang/lib/CIR/CIRBuilder.cpp | 5 +++++ clang/test/CIR/CodeGen/basic.cpp | 12 ++++++++++++ 2 files changed, 17 insertions(+) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index dc54f8636140..a82ad5de6249 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -447,6 +447,11 @@ class CIRBuildImpl { } } + mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { + assert(!isa(E->getType()) && "not implemented"); + return Builder.buildLValue(E->getSubExpr()).getPointer(); + } + mlir::Value VisitExpr(Expr *E) { // Crashing here for "ScalarExprClassName"? Please implement // VisitScalarExprClassName(...) to get this working. diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 3c29a826aa6b..ed8ff40691a5 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -20,3 +20,15 @@ int *p1() { // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, [uninitialized] // CHECK: %1 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > + +int *p2() { + int *p = nullptr; + int x = 0; + p = &x; + return p; +} + +// CHECK: func @p2() -> !cir.ptr { +// CHECK: %0 = cir.alloca i32, cir.ptr , [cinit] +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, [cinit] +// CHECK: cir.store %0, %1 : !cir.ptr, cir.ptr > From 802076f2ea90c7a239d6fc8de66bbd77caaf1ec2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 10 Nov 2021 13:16:39 -0500 Subject: [PATCH 0050/2301] [CIR] Convert a few empty `if` statements to llvm::report_fatal_errors --- clang/lib/CIR/LowerToLLVM.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 376cfd68b66d..61fd8e6c3c44 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -129,18 +129,18 @@ lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, pm.addPass(createLowerToLLVMIRPass()); - // TODO: Handle this error - if (mlir::failed(pm.run(theModule))) - ; + auto result = !mlir::failed(pm.run(theModule)); + if (!result) + llvm::report_fatal_error( + "The pass manager failed to lower CIR to llvm IR!"); mlir::registerLLVMDialectTranslation(*mlirCtx); llvm::LLVMContext llvmContext; auto llvmModule = mlir::translateModuleToLLVMIR(theModule, llvmCtx); - // TODO: Handle this error if (!llvmModule) - ; + llvm::report_fatal_error("Lowering from llvm dialect to llvm IR failed!"); return llvmModule; } From 49d32604d64ce680219a9250954634fdb70d8270 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 10 Nov 2021 16:16:31 -0500 Subject: [PATCH 0051/2301] [CIR][NFC] Rename LowerToLLVMIRPass to LowerToLLVMPass This doesn't actually lower to LLVMIR. It did originally but I ripped that out to match the rest of MLIR but forgot to rename it. --- clang/include/clang/CIR/Passes.h | 2 +- clang/lib/CIR/LowerToLLVM.cpp | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index 3979bd08a772..206506c9aff5 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -23,7 +23,7 @@ class Pass; /// Create a pass for lowering from `CIR` operations well as `Affine` and `Std`, /// to the LLVM dialect for codegen. We'll want to separate this eventually into /// different phases instead of doing it all at once. -std::unique_ptr createLowerToLLVMIRPass(); +std::unique_ptr createLowerToLLVMPass(); } // end namespace cir diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 61fd8e6c3c44..7355f0f7e544 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -36,8 +36,8 @@ using namespace cir; namespace cir { -struct CIRToLLVMIRLoweringPass - : public mlir::PassWrapper> { void getDependentDialects(mlir::DialectRegistry ®istry) const override { registry.insert(patterns.getContext()); } -void CIRToLLVMIRLoweringPass::runOnOperation() { +void CIRToLLVMLoweringPass::runOnOperation() { mlir::LLVMConversionTarget target(getContext()); target.addLegalOp(); @@ -127,7 +127,7 @@ lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, llvm::LLVMContext &llvmCtx) { mlir::PassManager pm(mlirCtx.get()); - pm.addPass(createLowerToLLVMIRPass()); + pm.addPass(createLowerToLLVMPass()); auto result = !mlir::failed(pm.run(theModule)); if (!result) @@ -145,8 +145,8 @@ lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, return llvmModule; } -std::unique_ptr createLowerToLLVMIRPass() { - return std::make_unique(); +std::unique_ptr createLowerToLLVMPass() { + return std::make_unique(); } } // namespace cir From db39a890e05a19e811674e44dabd3904cb302001 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 9 Nov 2021 23:46:38 -0500 Subject: [PATCH 0052/2301] [CIR] Implementing support for parsing and lowering CIR files For the purpose of iterating on lowering individual CIR instructions it's much easier to just operate on a sample CIR file with one new instruction at a time instead of having to go through CIRGen. --- clang/include/clang/Basic/LangStandard.h | 1 + .../clang/CIRFrontendAction/CIRGenAction.h | 16 +++-- clang/include/clang/Frontend/FrontendAction.h | 3 + clang/lib/CIRFrontendAction/CIRGenAction.cpp | 62 +++++++++++++++---- clang/lib/Frontend/FrontendAction.cpp | 20 ++++++ clang/test/CIR/IRGen/basic.cir | 23 +++++++ 6 files changed, 109 insertions(+), 16 deletions(-) create mode 100644 clang/test/CIR/IRGen/basic.cir diff --git a/clang/include/clang/Basic/LangStandard.h b/clang/include/clang/Basic/LangStandard.h index 35361880c371..cd3356815f1f 100644 --- a/clang/include/clang/Basic/LangStandard.h +++ b/clang/include/clang/Basic/LangStandard.h @@ -28,6 +28,7 @@ enum class Language : uint8_t { /// LLVM IR: we accept this so that we can run the optimizer on it, /// and compile it to assembly or object code. + CIR, LLVM_IR, ///@{ Languages that the frontend can parse and compile. diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIRFrontendAction/CIRGenAction.h index 2ec64a091d3f..f644a7e5a6d2 100644 --- a/clang/include/clang/CIRFrontendAction/CIRGenAction.h +++ b/clang/include/clang/CIRFrontendAction/CIRGenAction.h @@ -14,11 +14,13 @@ namespace llvm { class LLVMIRContext; -} +class Module; +} // namespace llvm namespace mlir { class MLIRContext; class ModuleOp; +template class OwningOpRef; } // namespace mlir namespace cir { @@ -32,12 +34,14 @@ class CIRGenAction : public clang::ASTFrontendAction { private: friend class CIRGenConsumer; - std::unique_ptr TheModule; + // TODO: this is redundant but just using the OwningModuleRef requires more of + // clang against MLIR. Hide this somewhere else. + std::unique_ptr> mlirModule; + std::unique_ptr llvmModule; - mlir::MLIRContext *MLIRContext; - bool OwnsVMContext; + mlir::MLIRContext *mlirContext; - std::unique_ptr loadModule(llvm::MemoryBufferRef MBRef); + mlir::OwningOpRef loadModule(llvm::MemoryBufferRef mbRef); protected: CIRGenAction(OutputType action, mlir::MLIRContext *_MLIRContext = nullptr); @@ -53,6 +57,8 @@ class CIRGenAction : public clang::ASTFrontendAction { public: ~CIRGenAction() override; + virtual bool hasCIRSupport() const override { return true; } + CIRGenConsumer *cgConsumer; OutputType action; }; diff --git a/clang/include/clang/Frontend/FrontendAction.h b/clang/include/clang/Frontend/FrontendAction.h index 718684a67771..a04557f51dc5 100644 --- a/clang/include/clang/Frontend/FrontendAction.h +++ b/clang/include/clang/Frontend/FrontendAction.h @@ -202,6 +202,9 @@ class FrontendAction { /// Does this action support use with IR files? virtual bool hasIRSupport() const { return false; } + /// Does this action support use with CIR files? + virtual bool hasCIRSupport() const { return false; } + /// Does this action support use with code completion? virtual bool hasCodeCompletionSupport() const { return false; } diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index a81a1e2a0776..c22112bae7f5 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -7,8 +7,12 @@ //===----------------------------------------------------------------------===// #include "clang/CIRFrontendAction/CIRGenAction.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" +#include "mlir/Parser/Parser.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" @@ -138,14 +142,10 @@ class CIRGenConsumer : public clang::ASTConsumer { void CIRGenConsumer::anchor() {} CIRGenAction::CIRGenAction(OutputType act, mlir::MLIRContext *_MLIRContext) - : MLIRContext(_MLIRContext ? _MLIRContext : new mlir::MLIRContext), - OwnsVMContext(!_MLIRContext), action(act) {} + : mlirContext(_MLIRContext ? _MLIRContext : new mlir::MLIRContext), + action(act) {} -CIRGenAction::~CIRGenAction() { - TheModule.reset(); - if (OwnsVMContext) - delete MLIRContext; -} +CIRGenAction::~CIRGenAction() { mlirModule.reset(); } void CIRGenAction::EndSourceFileAction() {} @@ -176,12 +176,52 @@ CIRGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { return std::make_unique(std::move(out), action); } -std::unique_ptr -CIRGenAction::loadModule(llvm::MemoryBufferRef MBRef) { - return {}; +mlir::OwningOpRef +CIRGenAction::loadModule(llvm::MemoryBufferRef mbRef) { + auto module = + mlir::parseSourceString(mbRef.getBuffer(), mlirContext); + assert(module && "Failed to parse ClangIR module"); + return module; } -void CIRGenAction::ExecuteAction() { ASTFrontendAction::ExecuteAction(); } +void CIRGenAction::ExecuteAction() { + if (getCurrentFileKind().getLanguage() != Language::CIR) { + this->ASTFrontendAction::ExecuteAction(); + return; + } + + // If this is a CIR file we have to treat it specially. + auto &ci = getCompilerInstance(); + std::unique_ptr outstream = + getOutputStream(ci, getCurrentFile(), action); + if (action != OutputType::None && !outstream) + return; + + auto &sourceManager = ci.getSourceManager(); + auto fileID = sourceManager.getMainFileID(); + auto mainFile = sourceManager.getBufferOrNone(fileID); + + if (!mainFile) + return; + + mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); + + // TODO: unwrap this -- this exists because including the `OwningModuleRef` in + // CIRGenAction's header would require linking the Frontend against MLIR. + // Let's avoid that for now. + auto mlirModule = loadModule(*mainFile); + if (!mlirModule) + return; + + llvm::LLVMContext llvmCtx; + auto llvmModule = lowerFromCIRToLLVMIR( + *mlirModule, std::unique_ptr(mlirContext), llvmCtx); + + if (outstream) + llvmModule->print(*outstream, nullptr); +} void EmitAssemblyAction::anchor() {} EmitAssemblyAction::EmitAssemblyAction(mlir::MLIRContext *_MLIRContext) diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp index 9f789f093f55..2b1fcec5c94f 100644 --- a/clang/lib/Frontend/FrontendAction.cpp +++ b/clang/lib/Frontend/FrontendAction.cpp @@ -778,6 +778,26 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI, return true; } + // TODO: blindly duplicating for now + if (Input.getKind().getLanguage() == Language::CIR) { + assert(hasCIRSupport() && "This action does not have CIR file support!"); + + // Inform the diagnostic client we are processing a source file. + CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), nullptr); + HasBegunSourceFile = true; + + // Initialize the action. + if (!BeginSourceFileAction(CI)) + return false; + + // Initialize the main file entry. + if (!CI.InitializeSourceManager(CurrentInput)) + return false; + + FailureCleanup.release(); + return true; + } + // If the implicit PCH include is actually a directory, rather than // a single file, search for a suitable PCH file in that directory. if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) { diff --git a/clang/test/CIR/IRGen/basic.cir b/clang/test/CIR/IRGen/basic.cir new file mode 100644 index 000000000000..679acbdf8df8 --- /dev/null +++ b/clang/test/CIR/IRGen/basic.cir @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR +// RUN: mlir-translate -mlir-to-llvmir %t.mlir -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// XFAIL: * + +module { + func.func @foo() { + cir.return + } +} + +// LLVM: define void @foo() +// LLVM-NEXT: ret void, +// LLVM-NEXT: } + +// MLIR: module { +// MLIR-NEXT: llvm.func @foo() { +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: } From 005af0ad9a03b736e88e3b25cb8ab311536351ca Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 10 Nov 2021 16:17:43 -0500 Subject: [PATCH 0053/2301] [CIR] Add the cir-to-llvm pass to cir-tool This enables `$ cir-tool -cir-to-llvm foo.cir` and `$ cir-tool -cir-to-llvm foo.cir | mlir-translate -mlir-to-llvm` --- clang/include/clang/CIR/LowerToLLVM.h | 3 ++- clang/include/clang/CIR/Passes.h | 4 +--- clang/lib/CIR/LowerToLLVM.cpp | 24 +++++++++++++----------- clang/test/CIR/IRGen/basic.c | 4 ++-- clang/test/CIR/IRGen/basic.cir | 2 +- clang/tools/cir-tool/CMakeLists.txt | 1 + clang/tools/cir-tool/cir-tool.cpp | 12 ++++++++---- 7 files changed, 28 insertions(+), 22 deletions(-) diff --git a/clang/include/clang/CIR/LowerToLLVM.h b/clang/include/clang/CIR/LowerToLLVM.h index 529c25763961..139e3fc93aec 100644 --- a/clang/include/clang/CIR/LowerToLLVM.h +++ b/clang/include/clang/CIR/LowerToLLVM.h @@ -12,6 +12,8 @@ #ifndef CLANG_CIR_LOWERTOLLVM_H #define CLANG_CIR_LOWERTOLLVM_H +#include "mlir/Pass/Pass.h" + #include namespace llvm { @@ -31,7 +33,6 @@ std::unique_ptr lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, std::unique_ptr mlirCtx, llvm::LLVMContext &llvmCtx); - } // namespace cir #endif // CLANG_CIR_LOWERTOLLVM_H_ diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index 206506c9aff5..4dec0b53981c 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -18,13 +18,11 @@ #include namespace cir { -class Pass; /// Create a pass for lowering from `CIR` operations well as `Affine` and `Std`, /// to the LLVM dialect for codegen. We'll want to separate this eventually into /// different phases instead of doing it all at once. -std::unique_ptr createLowerToLLVMPass(); - +std::unique_ptr createConvertCIRToLLVMPass(); } // end namespace cir #endif // CLANG_CIR_PASSES_H diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 7355f0f7e544..36ea97a4c8ab 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -33,17 +33,20 @@ #include "llvm/ADT/Sequence.h" using namespace cir; +using namespace llvm; namespace cir { -struct CIRToLLVMLoweringPass - : public mlir::PassWrapper> { void getDependentDialects(mlir::DialectRegistry ®istry) const override { registry.insert(); } void runOnOperation() final; + + virtual StringRef getArgument() const override { return "cir-to-llvm"; } }; class CIRReturnLowering : public mlir::OpRewritePattern { @@ -103,7 +106,7 @@ void populateCIRToStdConversionPatterns(mlir::RewritePatternSet &patterns) { CIRStoreLowering>(patterns.getContext()); } -void CIRToLLVMLoweringPass::runOnOperation() { +void ConvertCIRToLLVMPass::runOnOperation() { mlir::LLVMConversionTarget target(getContext()); target.addLegalOp(); @@ -124,29 +127,28 @@ void CIRToLLVMLoweringPass::runOnOperation() { std::unique_ptr lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, std::unique_ptr mlirCtx, - llvm::LLVMContext &llvmCtx) { + LLVMContext &llvmCtx) { mlir::PassManager pm(mlirCtx.get()); - pm.addPass(createLowerToLLVMPass()); + pm.addPass(createConvertCIRToLLVMPass()); auto result = !mlir::failed(pm.run(theModule)); if (!result) - llvm::report_fatal_error( - "The pass manager failed to lower CIR to llvm IR!"); + report_fatal_error("The pass manager failed to lower CIR to llvm IR!"); mlir::registerLLVMDialectTranslation(*mlirCtx); - llvm::LLVMContext llvmContext; + LLVMContext llvmContext; auto llvmModule = mlir::translateModuleToLLVMIR(theModule, llvmCtx); if (!llvmModule) - llvm::report_fatal_error("Lowering from llvm dialect to llvm IR failed!"); + report_fatal_error("Lowering from llvm dialect to llvm IR failed!"); return llvmModule; } -std::unique_ptr createLowerToLLVMPass() { - return std::make_unique(); +std::unique_ptr createConvertCIRToLLVMPass() { + return std::make_unique(); } } // namespace cir diff --git a/clang/test/CIR/IRGen/basic.c b/clang/test/CIR/IRGen/basic.c index 9c31f7be6939..5085b85de8ae 100644 --- a/clang/test/CIR/IRGen/basic.c +++ b/clang/test/CIR/IRGen/basic.c @@ -4,6 +4,6 @@ void foo() {} -// CHECK: define void @foo() -// CHECK-NEXT: ret void, +// CHECK: define void @foo() +// CHECK-NEXT: ret void, // CHECK-NEXT: } diff --git a/clang/test/CIR/IRGen/basic.cir b/clang/test/CIR/IRGen/basic.cir index 679acbdf8df8..8afcf2dbba6e 100644 --- a/clang/test/CIR/IRGen/basic.cir +++ b/clang/test/CIR/IRGen/basic.cir @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fenable-clangir -emit-llvm %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=LLVM // RUN: cir-tool %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR diff --git a/clang/tools/cir-tool/CMakeLists.txt b/clang/tools/cir-tool/CMakeLists.txt index 4c1f4b2f85ef..f55786351d20 100644 --- a/clang/tools/cir-tool/CMakeLists.txt +++ b/clang/tools/cir-tool/CMakeLists.txt @@ -9,6 +9,7 @@ target_link_libraries(cir-tool PRIVATE ${dialect_libs} ${conversion_libs} + clangCIR MLIRAnalysis MLIRIR MLIROptLib diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index a0ed1447b46f..dda8de2af31c 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -14,16 +14,20 @@ #include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Pass/PassRegistry.h" #include "mlir/Tools/mlir-opt/MlirOptMain.h" - -using namespace mlir; +#include "clang/CIR/Passes.h" int main(int argc, char **argv) { // TODO: register needed MLIR passes for CIR? mlir::DialectRegistry registry; // TODO: add memref::MemRefDialect> when we add lowering - registry.insert(); - registry.insert(); + registry.insert(); + registry.insert(); + + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return cir::createConvertCIRToLLVMPass(); + }); return failed(MlirOptMain( argc, argv, "Clang IR analysis and optimization tool\n", registry)); From 11b4b4452f8115f6ce6675f25b371df2fe1e0880 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 11 Nov 2021 20:56:34 -0500 Subject: [PATCH 0054/2301] [CIR] Add a ConvertCIRToMemRefPass pass for lowering the memref-like insns CIR contains a set of memref-like instructions (alloca/load/etc) that map relatively simply to memref. Create a pass that can be used with cir-tool to lower them directly. --- clang/include/clang/CIR/Passes.h | 4 +++ clang/lib/CIR/LowerToLLVM.cpp | 48 +++++++++++++++++++++++++++++-- clang/tools/cir-tool/cir-tool.cpp | 9 ++++-- 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index 4dec0b53981c..18d0e9a9e6b1 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -23,6 +23,10 @@ namespace cir { /// to the LLVM dialect for codegen. We'll want to separate this eventually into /// different phases instead of doing it all at once. std::unique_ptr createConvertCIRToLLVMPass(); + +/// Create a pass that only lowers a subset of `CIR` memref-like operations to +/// MemRef specific versions. +std::unique_ptr createConvertCIRToMemRefPass(); } // end namespace cir #endif // CLANG_CIR_PASSES_H diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 36ea97a4c8ab..292e1344a60b 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -18,6 +18,7 @@ #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" +#include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" @@ -49,6 +50,18 @@ struct ConvertCIRToLLVMPass virtual StringRef getArgument() const override { return "cir-to-llvm"; } }; +struct ConvertCIRToMemRefPass + : public mlir::PassWrapper> { + void getDependentDialects(mlir::DialectRegistry ®istry) const override { + registry.insert(); + } + void runOnOperation() final; + + virtual StringRef getArgument() const override { return "cir-to-memref"; } +}; + class CIRReturnLowering : public mlir::OpRewritePattern { public: using OpRewritePattern::OpRewritePattern; @@ -101,9 +114,13 @@ class CIRStoreLowering : public mlir::OpRewritePattern { } }; +void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { + patterns.add( + patterns.getContext()); +} + void populateCIRToStdConversionPatterns(mlir::RewritePatternSet &patterns) { - patterns.add(patterns.getContext()); + patterns.add(patterns.getContext()); } void ConvertCIRToLLVMPass::runOnOperation() { @@ -114,6 +131,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { mlir::RewritePatternSet patterns(&getContext()); populateCIRToStdConversionPatterns(patterns); + populateCIRToMemRefConversionPatterns(patterns); populateAffineToStdConversionPatterns(patterns); populateSCFToControlFlowConversionPatterns(patterns); populateFinalizeMemRefToLLVMConversionPatterns(typeConverter, patterns); @@ -124,6 +142,28 @@ void ConvertCIRToLLVMPass::runOnOperation() { signalPassFailure(); } +void ConvertCIRToMemRefPass::runOnOperation() { + mlir::ConversionTarget target(getContext()); + + // TODO: Should this be a wholesale conversion? It's a bit ambiguous on + // whether we should have micro-conversions that do the minimal amount of work + // or macro conversions that entiirely remove a dialect. + target.addLegalOp(); + target.addLegalDialect(); + target.addIllegalOp(); + + mlir::RewritePatternSet patterns(&getContext()); + populateCIRToMemRefConversionPatterns(patterns); + // populateAffineToStdConversionPatterns(patterns); + // populateLoopToStdConversionPatterns(patterns); + + auto module = getOperation(); + if (failed(applyPartialConversion(module, target, std::move(patterns)))) + signalPassFailure(); +} + std::unique_ptr lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, std::unique_ptr mlirCtx, @@ -151,4 +191,8 @@ std::unique_ptr createConvertCIRToLLVMPass() { return std::make_unique(); } +std::unique_ptr createConvertCIRToMemRefPass() { + return std::make_unique(); +} + } // namespace cir diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index dda8de2af31c..de2a7574c559 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -14,6 +14,7 @@ #include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Pass/PassRegistry.h" #include "mlir/Tools/mlir-opt/MlirOptMain.h" #include "clang/CIR/Passes.h" @@ -21,13 +22,15 @@ int main(int argc, char **argv) { // TODO: register needed MLIR passes for CIR? mlir::DialectRegistry registry; - // TODO: add memref::MemRefDialect> when we add lowering - registry.insert(); - registry.insert(); + registry.insert(); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertCIRToLLVMPass(); }); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return cir::createConvertCIRToMemRefPass(); + }); return failed(MlirOptMain( argc, argv, "Clang IR analysis and optimization tool\n", registry)); From d5936ea8d3b7751aa6a12f92a4401b1d7b7d4d1c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 11 Nov 2021 20:57:04 -0500 Subject: [PATCH 0055/2301] [CIR] Enable AllocaOp conversion This is a pretty naieve implementation for now. I plan on iterating on this as I find new requirements from CIRGen. --- clang/lib/CIR/LowerToLLVM.cpp | 1 - clang/test/CIR/IRGen/memref.cir | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/IRGen/memref.cir diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 292e1344a60b..9b239643536c 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -83,7 +83,6 @@ class CIRAllocaLowering : public mlir::OpRewritePattern { mlir::LogicalResult matchAndRewrite(mlir::cir::AllocaOp op, mlir::PatternRewriter &rewriter) const override { - assert(false && "NYI"); auto ty = mlir::MemRefType::get({}, op.getType()); rewriter.replaceOpWithNewOp(op, ty); return mlir::LogicalResult::success(); diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir new file mode 100644 index 000000000000..f167086e7b28 --- /dev/null +++ b/clang/test/CIR/IRGen/memref.cir @@ -0,0 +1,17 @@ +// RUN: cir-tool %s -cir-to-memref -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s +// XFAIL: * + +module { + func.func @foo() { + %0 = cir.alloca i32, cir.ptr , [cinit] + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %0 = memref.alloc() : memref +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: } From 4f416a742d77ceedaadcbd9c50522e6542ad774f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 11 Nov 2021 21:11:14 -0500 Subject: [PATCH 0056/2301] [CIR] Implement trivial lowering of cir::ConstantOp to arith::ConstantOp Only supported and tested on i32s atm. --- clang/lib/CIR/LowerToLLVM.cpp | 21 ++++++++++++++++++--- clang/test/CIR/IRGen/memref.cir | 2 ++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 9b239643536c..2e447f756328 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -113,9 +113,24 @@ class CIRStoreLowering : public mlir::OpRewritePattern { } }; +class CIRConstantLowering + : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ConstantOp op, + mlir::PatternRewriter &rewriter) const override { + auto result = rewriter.replaceOpWithNewOp( + op, op.getType(), op.getValue()); + (void)result; + return mlir::LogicalResult::success(); + } +}; + void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { - patterns.add( - patterns.getContext()); + patterns.add(patterns.getContext()); } void populateCIRToStdConversionPatterns(mlir::RewritePatternSet &patterns) { @@ -151,7 +166,7 @@ void ConvertCIRToMemRefPass::runOnOperation() { target.addLegalDialect(); - target.addIllegalOp(); + target.addIllegalOp(); mlir::RewritePatternSet patterns(&getContext()); populateCIRToMemRefConversionPatterns(patterns); diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index f167086e7b28..b23c4b4381e7 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -5,6 +5,7 @@ module { func.func @foo() { %0 = cir.alloca i32, cir.ptr , [cinit] + %1 = cir.cst(1 : i32) : i32 cir.return } } @@ -12,6 +13,7 @@ module { // CHECK: module { // CHECK-NEXT: func.func @foo() { // CHECK-NEXT: %0 = memref.alloc() : memref +// CHECK-NEXT: {{.*}}arith.constant 1 : i32 // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: } From bf740368f4e28a0e86ab973743c5abb2254d98ce Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 12 Nov 2021 00:20:32 -0500 Subject: [PATCH 0057/2301] [CIR] Support -emit-obj in the -fenable-clangir pipeline --- .../clang/CIRFrontendAction/CIRGenAction.h | 9 ++- clang/lib/CIRFrontendAction/CIRGenAction.cpp | 70 +++++++++++++++---- clang/lib/CIRFrontendAction/CMakeLists.txt | 1 + .../ExecuteCompilerInvocation.cpp | 8 ++- clang/test/CIR/IRGen/basic.c | 12 ++-- 5 files changed, 80 insertions(+), 20 deletions(-) diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIRFrontendAction/CIRGenAction.h index f644a7e5a6d2..4ac4ed1b5fcb 100644 --- a/clang/include/clang/CIRFrontendAction/CIRGenAction.h +++ b/clang/include/clang/CIRFrontendAction/CIRGenAction.h @@ -29,7 +29,7 @@ class CIRGenerator; class CIRGenAction : public clang::ASTFrontendAction { public: - enum class OutputType { EmitAssembly, EmitCIR, EmitLLVM, EmitObject, None }; + enum class OutputType { EmitAssembly, EmitCIR, EmitLLVM, EmitObj, None }; private: friend class CIRGenConsumer; @@ -91,6 +91,13 @@ class EmitAssemblyAction : public CIRGenAction { EmitAssemblyAction(mlir::MLIRContext *mlirCtx = nullptr); }; +class EmitObjAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitObjAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + } // namespace cir #endif diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index c22112bae7f5..4d1b84288b2b 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -61,19 +61,41 @@ class CIRGenConsumer : public clang::ASTConsumer { virtual void anchor(); - std::unique_ptr outputStream; + CIRGenAction::OutputType action; - std::unique_ptr gen; + CompilerInstance &compilerInstance; + DiagnosticsEngine &diagnosticsEngine; + const HeaderSearchOptions &headerSearchOptions; + CodeGenOptions &codeGenOptions; + const TargetOptions &targetOptions; + const LangOptions &langOptions; - CIRGenAction::OutputType action; + std::unique_ptr outputStream; ASTContext *astContext{nullptr}; + std::unique_ptr gen; public: - CIRGenConsumer(std::unique_ptr os, - CIRGenAction::OutputType action) - : outputStream(std::move(os)), gen(std::make_unique()), - action(action) {} + CIRGenConsumer(CIRGenAction::OutputType action, + CompilerInstance &compilerInstance, + DiagnosticsEngine &diagnosticsEngine, + const HeaderSearchOptions &headerSearchOptions, + CodeGenOptions &codeGenOptions, + const TargetOptions &targetOptions, + const LangOptions &langOptions, + std::unique_ptr os) + : action(action), compilerInstance(compilerInstance), + diagnosticsEngine(diagnosticsEngine), + headerSearchOptions(headerSearchOptions), + codeGenOptions(codeGenOptions), targetOptions(targetOptions), + langOptions(langOptions), outputStream(std::move(os)), + gen(std::make_unique()) { + // This is required to match the constructors used during + // CodeGenAction. Ultimately, this is required because we want to use + // the same utility functions in BackendUtil.h for handling llvm + // optimization and codegen + (void)this->codeGenOptions; + } void Initialize(ASTContext &ctx) override { assert(!astContext && "initialized multiple times"); @@ -116,8 +138,21 @@ class CIRGenConsumer : public clang::ASTConsumer { llvmModule->print(*outputStream, nullptr); break; } + case CIRGenAction::OutputType::EmitObj: { + // TODO: Don't duplicate this from above + llvm::LLVMContext llvmCtx; + auto llvmModule = + lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); + + llvmModule->setTargetTriple(targetOptions.Triple); + + emitBackendOutput(compilerInstance, codeGenOptions, + C.getTargetInfo().getDataLayoutString(), + llvmModule.get(), BackendAction::Backend_EmitObj, + nullptr, std::move(outputStream)); + break; + } case CIRGenAction::OutputType::EmitAssembly: - case CIRGenAction::OutputType::EmitObject: assert(false && "Not yet implemented"); break; case CIRGenAction::OutputType::None: @@ -159,7 +194,7 @@ getOutputStream(CompilerInstance &ci, StringRef inFile, return ci.createDefaultOutputFile(false, inFile, "cir"); case CIRGenAction::OutputType::EmitLLVM: return ci.createDefaultOutputFile(false, inFile, "llvm"); - case CIRGenAction::OutputType::EmitObject: + case CIRGenAction::OutputType::EmitObj: return ci.createDefaultOutputFile(true, inFile, "o"); case CIRGenAction::OutputType::None: return nullptr; @@ -169,11 +204,14 @@ getOutputStream(CompilerInstance &ci, StringRef inFile, } std::unique_ptr -CIRGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { - auto out = CI.takeOutputStream(); +CIRGenAction::CreateASTConsumer(CompilerInstance &ci, StringRef inputFile) { + auto out = ci.takeOutputStream(); if (!out) - out = getOutputStream(CI, InFile, action); - return std::make_unique(std::move(out), action); + out = getOutputStream(ci, inputFile, action); + return std::make_unique( + action, ci, ci.getDiagnostics(), ci.getHeaderSearchOpts(), + ci.getCodeGenOpts(), ci.getTargetOpts(), ci.getLangOpts(), + std::move(out)); } mlir::OwningOpRef @@ -191,6 +229,8 @@ void CIRGenAction::ExecuteAction() { } // If this is a CIR file we have to treat it specially. + // TODO: This could be done more logically. This is just modeled at the moment + // mimicing CodeGenAction but this is clearly suboptimal. auto &ci = getCompilerInstance(); std::unique_ptr outstream = getOutputStream(ci, getCurrentFile(), action); @@ -238,3 +278,7 @@ EmitCIROnlyAction::EmitCIROnlyAction(mlir::MLIRContext *_MLIRContext) void EmitLLVMAction::anchor() {} EmitLLVMAction::EmitLLVMAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::EmitLLVM, _MLIRContext) {} + +void EmitObjAction::anchor() {} +EmitObjAction::EmitObjAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitObj, _MLIRContext) {} diff --git a/clang/lib/CIRFrontendAction/CMakeLists.txt b/clang/lib/CIRFrontendAction/CMakeLists.txt index 60430e5b4ef4..558787eb3a86 100644 --- a/clang/lib/CIRFrontendAction/CMakeLists.txt +++ b/clang/lib/CIRFrontendAction/CMakeLists.txt @@ -17,6 +17,7 @@ add_clang_library(clangCIRFrontendAction LINK_LIBS clangAST clangBasic + clangCodeGen clangLex clangFrontend clangCIR diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 781dc60d8c62..898ba773ca7e 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -48,7 +48,7 @@ CreateFrontendBaseAction(CompilerInstance &CI) { auto Act = CI.getFrontendOpts().ProgramAction; auto EmitsCIR = Act == EmitCIR || Act == EmitCIROnly; - auto IsImplementedCIROutput = EmitsCIR || Act == EmitLLVM; + auto IsImplementedCIROutput = EmitsCIR || Act == EmitLLVM || Act == EmitObj; if (UseCIR && !IsImplementedCIROutput) llvm::report_fatal_error("-fenable currently only works with " @@ -78,7 +78,11 @@ CreateFrontendBaseAction(CompilerInstance &CI) { } case EmitLLVMOnly: return std::make_unique(); case EmitCodeGenOnly: return std::make_unique(); - case EmitObj: return std::make_unique(); + case EmitObj: { + if (UseCIR) + return std::make_unique(); + return std::make_unique(); + } case ExtractAPI: return std::make_unique(); case FixIt: return std::make_unique(); diff --git a/clang/test/CIR/IRGen/basic.c b/clang/test/CIR/IRGen/basic.c index 5085b85de8ae..7314cc4263e7 100644 --- a/clang/test/CIR/IRGen/basic.c +++ b/clang/test/CIR/IRGen/basic.c @@ -1,9 +1,13 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -// RUN: FileCheck --input-file=%t.ll %s +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-obj %s -o %t.o +// RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ // XFAIL: * void foo() {} -// CHECK: define void @foo() -// CHECK-NEXT: ret void, -// CHECK-NEXT: } +// LLVM: define void @foo() +// LLVM-NEXT: ret void, +// LLVM-NEXT: } + +// OBJ: 0: c3 retq From 064b61775ba8fffba478fc7e667fc235cf988026 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 12 Nov 2021 01:08:34 -0500 Subject: [PATCH 0058/2301] [CIR] Support -fenable-clangir and -emit-cir via the driver This is a minimal implementation of support for -fenable-clangir and -emit-cir for the driver such that you can invoke ``` $ clang test.c -fenable-clangir -c $ clang test.c -fenable-clangir -S -emit-llvm $ clang test.c -fenable-clangir -emit-cir ``` --- clang/include/clang/Driver/Types.def | 2 ++ clang/lib/Driver/Driver.cpp | 5 +++++ clang/lib/Driver/ToolChains/Clang.cpp | 2 ++ clang/test/CIR/IRGen/basic.c | 4 ++++ 4 files changed, 13 insertions(+) diff --git a/clang/include/clang/Driver/Types.def b/clang/include/clang/Driver/Types.def index 2a59c1302f27..16dc446b1750 100644 --- a/clang/include/clang/Driver/Types.def +++ b/clang/include/clang/Driver/Types.def @@ -99,6 +99,8 @@ TYPE("ir", LLVM_BC, INVALID, "bc", phases TYPE("lto-ir", LTO_IR, INVALID, "s", phases::Compile, phases::Backend, phases::Assemble, phases::Link) TYPE("lto-bc", LTO_BC, INVALID, "o", phases::Compile, phases::Backend, phases::Assemble, phases::Link) +TYPE("cir", CIR, INVALID, "cir", phases::Compile, phases::Backend, phases::Assemble, phases::Link) + // Misc. TYPE("ast", AST, INVALID, "ast", phases::Compile, phases::Backend, phases::Assemble, phases::Link) TYPE("ifs", IFS, INVALID, "ifs", phases::IfsMerge) diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index 39fa20ad9881..c0f10d7a6af0 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -5081,6 +5081,11 @@ Action *Driver::ConstructPhaseAction( return C.MakeAction(Input, types::TY_Remap); if (Args.hasArg(options::OPT_emit_ast)) return C.MakeAction(Input, types::TY_AST); + if (Args.hasArg(options::OPT_emit_cir)) { + assert(Args.hasArg(options::OPT_fclangir) && + "Clang only uses ClangIR with the -fclangir flag"); + return C.MakeAction(Input, types::TY_CIR); + } if (Args.hasArg(options::OPT_module_file_info)) return C.MakeAction(Input, types::TY_ModuleFile); if (Args.hasArg(options::OPT_verify_pch)) diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 5a20b1b3acd0..3b64eb113fbd 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5379,6 +5379,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } else if (JA.getType() == types::TY_LLVM_IR || JA.getType() == types::TY_LTO_IR) { CmdArgs.push_back("-emit-llvm"); + } else if (JA.getType() == types::TY_CIR) { + CmdArgs.push_back("-emit-cir"); } else if (JA.getType() == types::TY_LLVM_BC || JA.getType() == types::TY_LTO_BC) { // Emit textual llvm IR for AMDGPU offloading for -emit-llvm -S diff --git a/clang/test/CIR/IRGen/basic.c b/clang/test/CIR/IRGen/basic.c index 7314cc4263e7..d19003bcb36b 100644 --- a/clang/test/CIR/IRGen/basic.c +++ b/clang/test/CIR/IRGen/basic.c @@ -2,6 +2,10 @@ // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-obj %s -o %t.o // RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -c %s -o %t.o +// RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ // XFAIL: * void foo() {} From a696939819da51f4f1d7979605bf50138541b8da Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 12 Nov 2021 17:36:20 -0500 Subject: [PATCH 0059/2301] [CIR][NFC] Remove unused variable that was included for debugging --- clang/lib/CIR/LowerToLLVM.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 2e447f756328..7a0a9c0b5a97 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -121,9 +121,8 @@ class CIRConstantLowering mlir::LogicalResult matchAndRewrite(mlir::cir::ConstantOp op, mlir::PatternRewriter &rewriter) const override { - auto result = rewriter.replaceOpWithNewOp( - op, op.getType(), op.getValue()); - (void)result; + rewriter.replaceOpWithNewOp(op, op.getType(), + op.getValue()); return mlir::LogicalResult::success(); } }; From d64f31ea4ac50ff1cd9a40b97ed194161730e4be Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 12 Nov 2021 17:33:11 -0500 Subject: [PATCH 0060/2301] [CIR] Copy the ConstantOp::fold from the tutorial There is an assert that a ConstantLike operation is foldable during lowering. For now just implement a trivial stubbed out version that assumes only one operand exists. --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 2 +- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index b3a1a9b4077b..1210bb74532f 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -66,7 +66,7 @@ def ConstantOp : CIR_Op<"cst", } }]; - // TODO: hasFolder, etc + let hasFolder = 1; } //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index c0fc1a08a04b..a54f8d630212 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -84,6 +84,13 @@ static void printConstantValue(OpAsmPrinter &p, cir::ConstantOp op, p.printAttribute(value); } +/// Trivial folding of constants from the tutorial. +OpFoldResult ConstantOp::fold(FoldAdaptor adaptor) { + assert(adaptor.getOperands().size() == 1 && + "ConstantOp::fold is only trivially implemented for single operands"); + return getValue(); +} + //===----------------------------------------------------------------------===// // ReturnOp //===----------------------------------------------------------------------===// From 1cd534310ce5f99ed336a569316f08ff6092a604 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 12 Nov 2021 17:35:51 -0500 Subject: [PATCH 0061/2301] [CIR] Also lower arithmetic to LLVM in ConverCIRToLLVMPass For some reason the output from mlir's lowering to LLVM Dialect is really strange. I see nothing wrong with the MLIR that's generated, though. func @foo() { %0 = memref.alloc() : memref %c1_i32 = arith.constant 1 : i32 cir.return } llvm.func @malloc(i64) -> !llvm.ptr llvm.func @foo() { %0 = llvm.mlir.constant(1 : index) : i64 %1 = llvm.mlir.null : !llvm.ptr %2 = llvm.getelementptr %1[%0] : (!llvm.ptr, i64) -> !llvm.ptr %3 = llvm.ptrtoint %2 : !llvm.ptr to i64 %4 = llvm.call @malloc(%3) : (i64) -> !llvm.ptr %5 = llvm.bitcast %4 : !llvm.ptr to !llvm.ptr %6 = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64)> %7 = llvm.insertvalue %5, %6[0] : !llvm.struct<(ptr, ptr, i64)> %8 = llvm.insertvalue %5, %7[1] : !llvm.struct<(ptr, ptr, i64)> %9 = llvm.mlir.constant(0 : index) : i64 %10 = llvm.insertvalue %9, %8[2] : !llvm.struct<(ptr, ptr, i64)> %11 = llvm.mlir.constant(1 : i32) : i32 llvm.return } --- clang/lib/CIR/LowerToLLVM.cpp | 2 ++ clang/test/CIR/IRGen/memref.cir | 23 ++++++++++++++--------- clang/tools/cir-tool/cir-tool.cpp | 7 +++++-- 3 files changed, 21 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 7a0a9c0b5a97..e09017749f6e 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" +#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" @@ -146,6 +147,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { populateCIRToStdConversionPatterns(patterns); populateCIRToMemRefConversionPatterns(patterns); populateAffineToStdConversionPatterns(patterns); + mlir::arith::populateArithToLLVMConversionPatterns(typeConverter, patterns); populateSCFToControlFlowConversionPatterns(patterns); populateFinalizeMemRefToLLVMConversionPatterns(typeConverter, patterns); populateFuncToLLVMConversionPatterns(typeConverter, patterns); diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index b23c4b4381e7..e650d8d75f6d 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-memref -o %t.mlir -// RUN: FileCheck --input-file=%t.mlir %s +// RUN: cir-tool %s -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=LLVM // XFAIL: * module { @@ -10,10 +10,15 @@ module { } } -// CHECK: module { -// CHECK-NEXT: func.func @foo() { -// CHECK-NEXT: %0 = memref.alloc() : memref -// CHECK-NEXT: {{.*}}arith.constant 1 : i32 -// CHECK-NEXT: cir.return -// CHECK-NEXT: } -// CHECK-NEXT: } +// MLIR: module { +// MLIR-NEXT: func.func @foo() { +// MLIR-NEXT: %0 = memref.alloc() : memref +// MLIR-NEXT: {{.*}}arith.constant 1 : i32 +// MLIR-NEXT: cir.return +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: module { +// LLVM-NEXT: llvm.func @malloc(i64) +// LLVM-NEXT: llvm.func @foo() { +// LLVM-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index de2a7574c559..b41112a68ebc 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -12,8 +12,10 @@ // //===----------------------------------------------------------------------===// +#include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Pass/PassRegistry.h" #include "mlir/Tools/mlir-opt/MlirOptMain.h" @@ -22,8 +24,9 @@ int main(int argc, char **argv) { // TODO: register needed MLIR passes for CIR? mlir::DialectRegistry registry; - registry.insert(); + registry.insert(); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertCIRToLLVMPass(); From 16e9429b37807b37f8ab437b0bfc006a47c8b5df Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 12 Nov 2021 17:34:46 -0500 Subject: [PATCH 0062/2301] [CIR] Propagate the types and operands of the cir.return instruction to mlir::ReturnOp This might be to flexible since we (probably) don't want to have multiple returns in CIR given the lack of such functionality in c++. But it was straight forward enough to include so just allow it for now. --- clang/lib/CIR/LowerToLLVM.cpp | 5 ++--- clang/test/CIR/IRGen/memref.cir | 16 ++++++++++++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 6 +----- 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index e09017749f6e..8b81c4a4b459 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -70,9 +70,8 @@ class CIRReturnLowering : public mlir::OpRewritePattern { mlir::LogicalResult matchAndRewrite(mlir::cir::ReturnOp op, mlir::PatternRewriter &rewriter) const override { - assert(op.getNumOperands() == 0 && - "we aren't handling non-zero operand count returns yet"); - rewriter.replaceOpWithNewOp(op); + rewriter.replaceOpWithNewOp(op, op->getResultTypes(), + op->getOperands()); return mlir::LogicalResult::success(); } }; diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index e650d8d75f6d..5a5979454cfa 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -8,6 +8,11 @@ module { %1 = cir.cst(1 : i32) : i32 cir.return } + + func.func @main() -> i32 { + %0 = cir.cst(1 : i32) : i32 + cir.return %0 : i32 + } } // MLIR: module { @@ -16,9 +21,20 @@ module { // MLIR-NEXT: {{.*}}arith.constant 1 : i32 // MLIR-NEXT: cir.return // MLIR-NEXT: } +// MLIR-NEXT: func @main() -> i32 { +// MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 +// MLIR-NEXT: cir.return %c1_i32 : i32 +// MLIR-NEXT: } // MLIR-NEXT: } // LLVM: module { // LLVM-NEXT: llvm.func @malloc(i64) // LLVM-NEXT: llvm.func @foo() { // LLVM-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// LLVM: llvm.func @main() -> i32 { +// LLVM-NEXT: %0 = llvm.mlir.constant(1 : i32) : i32 +// LLVM-NEXT: llvm.return %0 : i32 +// LLVM-NEXT: } +// LLVM-NEXT: } + + diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index a54f8d630212..989271f0114f 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -85,11 +85,7 @@ static void printConstantValue(OpAsmPrinter &p, cir::ConstantOp op, } /// Trivial folding of constants from the tutorial. -OpFoldResult ConstantOp::fold(FoldAdaptor adaptor) { - assert(adaptor.getOperands().size() == 1 && - "ConstantOp::fold is only trivially implemented for single operands"); - return getValue(); -} +OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } //===----------------------------------------------------------------------===// // ReturnOp From db81c90119925d38d282263e18992ba0ad29d14c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 12 Nov 2021 17:36:44 -0500 Subject: [PATCH 0063/2301] [CIR] Lower cir.return during CIRToMemRef MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I guess this pass is more along the lines of LowerCIRToX ∀ X other than llvm and then the other pass should be XToLLVM. --- clang/lib/CIR/LowerToLLVM.cpp | 5 +++-- clang/test/CIR/IRGen/memref.cir | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 8b81c4a4b459..ae184020dfa5 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -129,7 +129,7 @@ class CIRConstantLowering void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { patterns.add(patterns.getContext()); + CIRConstantLowering, CIRReturnLowering>(patterns.getContext()); } void populateCIRToStdConversionPatterns(mlir::RewritePatternSet &patterns) { @@ -166,7 +166,8 @@ void ConvertCIRToMemRefPass::runOnOperation() { target.addLegalDialect(); - target.addIllegalOp(); + target.addIllegalOp(); mlir::RewritePatternSet patterns(&getContext()); populateCIRToMemRefConversionPatterns(patterns); diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index 5a5979454cfa..e16b9502b97c 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -19,11 +19,11 @@ module { // MLIR-NEXT: func.func @foo() { // MLIR-NEXT: %0 = memref.alloc() : memref // MLIR-NEXT: {{.*}}arith.constant 1 : i32 -// MLIR-NEXT: cir.return +// MLIR-NEXT: return // MLIR-NEXT: } // MLIR-NEXT: func @main() -> i32 { // MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 -// MLIR-NEXT: cir.return %c1_i32 : i32 +// MLIR-NEXT: return %c1_i32 : i32 // MLIR-NEXT: } // MLIR-NEXT: } From d9c4f20db280871b6171e51083dc47b648f28db7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 12 Nov 2021 20:07:00 -0500 Subject: [PATCH 0064/2301] [CIR] Pull cir-to-memref out of cir-to-llvm and require them both for full lowering This mirrors the toy tutorial a little more closely where they lower to the in-tree dialects first and then from the in-tree dialects to llvm. --- clang/lib/CIR/LowerToLLVM.cpp | 15 ++++----------- clang/test/CIR/IRGen/basic.cir | 2 +- clang/test/CIR/IRGen/memref.cir | 2 +- 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index ae184020dfa5..96a8d091855a 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -132,10 +132,6 @@ void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { CIRConstantLowering, CIRReturnLowering>(patterns.getContext()); } -void populateCIRToStdConversionPatterns(mlir::RewritePatternSet &patterns) { - patterns.add(patterns.getContext()); -} - void ConvertCIRToLLVMPass::runOnOperation() { mlir::LLVMConversionTarget target(getContext()); target.addLegalOp(); @@ -143,8 +139,6 @@ void ConvertCIRToLLVMPass::runOnOperation() { mlir::LLVMTypeConverter typeConverter(&getContext()); mlir::RewritePatternSet patterns(&getContext()); - populateCIRToStdConversionPatterns(patterns); - populateCIRToMemRefConversionPatterns(patterns); populateAffineToStdConversionPatterns(patterns); mlir::arith::populateArithToLLVMConversionPatterns(typeConverter, patterns); populateSCFToControlFlowConversionPatterns(patterns); @@ -163,11 +157,9 @@ void ConvertCIRToMemRefPass::runOnOperation() { // whether we should have micro-conversions that do the minimal amount of work // or macro conversions that entiirely remove a dialect. target.addLegalOp(); - target.addLegalDialect(); - target.addIllegalOp(); + target + .addLegalDialect(); mlir::RewritePatternSet patterns(&getContext()); populateCIRToMemRefConversionPatterns(patterns); @@ -185,6 +177,7 @@ lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx) { mlir::PassManager pm(mlirCtx.get()); + pm.addPass(createConvertCIRToMemRefPass()); pm.addPass(createConvertCIRToLLVMPass()); auto result = !mlir::failed(pm.run(theModule)); diff --git a/clang/test/CIR/IRGen/basic.cir b/clang/test/CIR/IRGen/basic.cir index 8afcf2dbba6e..445d25dc8376 100644 --- a/clang/test/CIR/IRGen/basic.cir +++ b/clang/test/CIR/IRGen/basic.cir @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fenable-clangir -emit-llvm %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=LLVM -// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: cir-tool %s -cir-to-memref -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: mlir-translate -mlir-to-llvmir %t.mlir -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index e16b9502b97c..29b54a1a6ea1 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-memref -cir-to-llvm -o - | FileCheck %s -check-prefix=LLVM // XFAIL: * module { From ea6dca014ed4ed148a869032eae55cc223f773d7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 12 Nov 2021 20:09:09 -0500 Subject: [PATCH 0065/2301] [CIR] Implement simple CIRStore lowering This just simply takes the operands and forwards them to the memref variant for now. --- clang/lib/CIR/LowerToLLVM.cpp | 13 ++++++++----- clang/test/CIR/IRGen/memref.cir | 27 +++++++-------------------- 2 files changed, 15 insertions(+), 25 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 96a8d091855a..a52256e69557 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -101,14 +101,17 @@ class CIRLoadLowering : public mlir::OpRewritePattern { } }; -class CIRStoreLowering : public mlir::OpRewritePattern { +class CIRStoreLowering : public mlir::ConversionPattern { public: - using OpRewritePattern::OpRewritePattern; + CIRStoreLowering(mlir::MLIRContext *ctx) + : mlir::ConversionPattern(mlir::cir::StoreOp::getOperationName(), 1, + ctx) {} mlir::LogicalResult - matchAndRewrite(mlir::cir::StoreOp op, - mlir::PatternRewriter &rewriter) const override { - assert(false && "NYI"); + matchAndRewrite(mlir::Operation *op, ArrayRef operands, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, operands[0], + operands[1]); return mlir::LogicalResult::success(); } }; diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index 29b54a1a6ea1..a6316df5ea10 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -3,38 +3,25 @@ // XFAIL: * module { - func.func @foo() { + func.func @foo() -> i32 { %0 = cir.alloca i32, cir.ptr , [cinit] %1 = cir.cst(1 : i32) : i32 - cir.return - } - - func.func @main() -> i32 { - %0 = cir.cst(1 : i32) : i32 - cir.return %0 : i32 + cir.store %1, %0 : i32, cir.ptr + cir.return %1 : i32 } } // MLIR: module { -// MLIR-NEXT: func.func @foo() { +// MLIR-NEXT: func.func @foo() -> i32 { // MLIR-NEXT: %0 = memref.alloc() : memref -// MLIR-NEXT: {{.*}}arith.constant 1 : i32 -// MLIR-NEXT: return -// MLIR-NEXT: } -// MLIR-NEXT: func @main() -> i32 { // MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 +// MLIR-NEXT: memref.store %c1_i32, %0[] : memref // MLIR-NEXT: return %c1_i32 : i32 // MLIR-NEXT: } // MLIR-NEXT: } // LLVM: module { -// LLVM-NEXT: llvm.func @malloc(i64) -// LLVM-NEXT: llvm.func @foo() { +// LLVM-NEXT: llvm.func @malloc(i64) -> !llvm.ptr +// LLVM-NEXT: llvm.func @foo() -> i32 { // LLVM-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 -// LLVM: llvm.func @main() -> i32 { -// LLVM-NEXT: %0 = llvm.mlir.constant(1 : i32) : i32 -// LLVM-NEXT: llvm.return %0 : i32 -// LLVM-NEXT: } -// LLVM-NEXT: } - From f5fb05d0368b9a2b41fdf5e9e8b7c8c36f39e4ac Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 12 Nov 2021 20:09:37 -0500 Subject: [PATCH 0066/2301] [CIR] Implement lowering of cir.load This just does the obvious thing and moves it's argument for cir over to memref. --- clang/lib/CIR/LowerToLLVM.cpp | 12 +++++++----- clang/test/CIR/IRGen/memref.cir | 13 ++++--------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index a52256e69557..7f10a9a85b23 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -89,14 +89,16 @@ class CIRAllocaLowering : public mlir::OpRewritePattern { } }; -class CIRLoadLowering : public mlir::OpRewritePattern { +class CIRLoadLowering : public mlir::ConversionPattern { public: - using OpRewritePattern::OpRewritePattern; + CIRLoadLowering(mlir::MLIRContext *ctx) + : mlir::ConversionPattern(mlir::cir::LoadOp::getOperationName(), 1, ctx) { + } mlir::LogicalResult - matchAndRewrite(mlir::cir::LoadOp op, - mlir::PatternRewriter &rewriter) const override { - assert(false && "NYI"); + matchAndRewrite(mlir::Operation *op, ArrayRef operands, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, operands[0]); return mlir::LogicalResult::success(); } }; diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index a6316df5ea10..9822163706ca 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -1,5 +1,4 @@ // RUN: cir-tool %s -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-memref -cir-to-llvm -o - | FileCheck %s -check-prefix=LLVM // XFAIL: * module { @@ -7,7 +6,8 @@ module { %0 = cir.alloca i32, cir.ptr , [cinit] %1 = cir.cst(1 : i32) : i32 cir.store %1, %0 : i32, cir.ptr - cir.return %1 : i32 + %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 + cir.return %2 : i32 } } @@ -16,12 +16,7 @@ module { // MLIR-NEXT: %0 = memref.alloc() : memref // MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 // MLIR-NEXT: memref.store %c1_i32, %0[] : memref -// MLIR-NEXT: return %c1_i32 : i32 +// MLIR-NEXT: %1 = memref.load %0[] : memref +// MLIR-NEXT: return %1 : i32 // MLIR-NEXT: } // MLIR-NEXT: } - -// LLVM: module { -// LLVM-NEXT: llvm.func @malloc(i64) -> !llvm.ptr -// LLVM-NEXT: llvm.func @foo() -> i32 { -// LLVM-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 - From 5dcd7f9dbf21e4127c28a994b8a17deef6d18bc3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 18 Nov 2021 10:30:00 -0800 Subject: [PATCH 0067/2301] [CIR] Introduce SourceLocRAIIObject to track source locations while building CIR --- clang/lib/CIR/CIRBuilder.cpp | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index a82ad5de6249..4a6d20721fdb 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -180,6 +180,26 @@ class CIRBuildImpl { /// Per-module type mapping from clang AST to CIR. std::unique_ptr genTypes; + /// Use to track source locations across nested visitor traversals. + /// Always use a `SourceLocRAIIObject` to change currSrcLoc. + std::optional currSrcLoc; + class SourceLocRAIIObject { + CIRBuildImpl &P; + std::optional OldVal; + + public: + SourceLocRAIIObject(CIRBuildImpl &p, mlir::Location Value) : P(p) { + if (P.currSrcLoc) + OldVal = P.currSrcLoc; + P.currSrcLoc = Value; + } + + /// Can be used to restore the state early, before the dtor + /// is run. + void restore() { P.currSrcLoc = OldVal; } + ~SourceLocRAIIObject() { restore(); } + }; + /// Helper conversion from Clang source location to an MLIR location. mlir::Location getLoc(SourceLocation SLoc) { const SourceManager &SM = astCtx.getSourceManager(); @@ -663,9 +683,8 @@ class CIRBuildImpl { SrcAlloca.setInitAttr(InitStyleAttr::get(builder.getContext(), IS)); } } - assert(SrcAlloca && "find a better way to retrieve source location"); - builder.create(SrcAlloca.getLoc(), Value, - Addr.getPointer()); + assert(currSrcLoc && "must pass in source location"); + builder.create(*currSrcLoc, Value, Addr.getPointer()); } /// Store the specified rvalue into the specified @@ -681,6 +700,7 @@ class CIRBuildImpl { void buildScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue) { // TODO: this is where a lot of ObjC lifetime stuff would be done. mlir::Value value = buildScalarExpr(init); + SourceLocRAIIObject Loc{*this, getLoc(D->getSourceRange().getBegin())}; buldStoreThroughLValue(RValue::get(value), lvalue, D); return; } @@ -1104,6 +1124,8 @@ class CIRBuildImpl { RValue RV = buildAnyExpr(E->getRHS()); LValue LV = buildLValue(E->getLHS()); + + SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange().getBegin())}; buldStoreThroughLValue(RV, LV, nullptr /*InitDecl*/); assert(!astCtx.getLangOpts().OpenMP && "last priv cond not implemented"); return LV; From 6b2a73042e2932708e79f02ddfbeb37806f94aed Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 Nov 2021 18:38:37 -0800 Subject: [PATCH 0068/2301] [CIR] Add support for building LValues from UnaryOperator deref's --- clang/lib/CIR/CIRBuilder.cpp | 174 +++++++++++++++++++++++++++++++ clang/test/CIR/CodeGen/basic.cpp | 4 + 2 files changed, 178 insertions(+) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 4a6d20721fdb..e525e95fff85 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -38,6 +38,7 @@ #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ParentMap.h" +#include "clang/AST/RecordLayout.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" @@ -333,6 +334,15 @@ class CIRBuildImpl { R.LVType = Simple; return R; } + + // FIXME: only have one of these static methods. + static LValue makeAddr(RawAddress address, QualType T, LValueBaseInfo LBI) { + LValue R; + R.V = address.getPointer(); + R.Initialize(address.getAlignment(), T, LBI); + R.LVType = Simple; + return R; + } }; /// This trivial value class is used to represent the result of an @@ -1139,6 +1149,168 @@ class CIRBuildImpl { llvm_unreachable("bad evaluation kind"); } + /// FIXME: this could likely be a common helper and not necessarily related + /// with codegen. + /// Return the best known alignment for an unknown pointer to a + /// particular class. + CharUnits getClassPointerAlignment(const CXXRecordDecl *RD) { + if (!RD->hasDefinition()) + return CharUnits::One(); // Hopefully won't be used anywhere. + + auto &layout = astCtx.getASTRecordLayout(RD); + + // If the class is final, then we know that the pointer points to an + // object of that type and can use the full alignment. + if (RD->isEffectivelyFinal()) + return layout.getAlignment(); + + // Otherwise, we have to assume it could be a subclass. + return layout.getNonVirtualAlignment(); + } + + /// FIXME: this could likely be a common helper and not necessarily related + /// with codegen. + /// TODO: Add TBAAAccessInfo + CharUnits getNaturalPointeeTypeAlignment(QualType T, + LValueBaseInfo *BaseInfo) { + return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, + /* forPointeeType= */ true); + } + + /// FIXME: this could likely be a common helper and not necessarily related + /// with codegen. + /// TODO: Add TBAAAccessInfo + CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo, + bool forPointeeType) { + // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But + // that doesn't return the information we need to compute BaseInfo. + + // Honor alignment typedef attributes even on incomplete types. + // We also honor them straight for C++ class types, even as pointees; + // there's an expressivity gap here. + if (auto TT = T->getAs()) { + if (auto Align = TT->getDecl()->getMaxAlignment()) { + if (BaseInfo) + *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); + return astCtx.toCharUnitsFromBits(Align); + } + } + + bool AlignForArray = T->isArrayType(); + + // Analyze the base element type, so we don't get confused by incomplete + // array types. + T = astCtx.getBaseElementType(T); + + if (T->isIncompleteType()) { + // We could try to replicate the logic from + // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the + // type is incomplete, so it's impossible to test. We could try to reuse + // getTypeAlignIfKnown, but that doesn't return the information we need + // to set BaseInfo. So just ignore the possibility that the alignment is + // greater than one. + if (BaseInfo) + *BaseInfo = LValueBaseInfo(AlignmentSource::Type); + return CharUnits::One(); + } + + if (BaseInfo) + *BaseInfo = LValueBaseInfo(AlignmentSource::Type); + + CharUnits Alignment; + const CXXRecordDecl *RD; + if (T.getQualifiers().hasUnaligned()) { + Alignment = CharUnits::One(); + } else if (forPointeeType && !AlignForArray && + (RD = T->getAsCXXRecordDecl())) { + // For C++ class pointees, we don't know whether we're pointing at a + // base or a complete object, so we generally need to use the + // non-virtual alignment. + Alignment = getClassPointerAlignment(RD); + } else { + Alignment = astCtx.getTypeAlignInChars(T); + } + + // Cap to the global maximum type alignment unless the alignment + // was somehow explicit on the type. + if (unsigned MaxAlign = astCtx.getLangOpts().MaxTypeAlign) { + if (Alignment.getQuantity() > MaxAlign && !astCtx.isAlignmentRequired(T)) + Alignment = CharUnits::fromQuantity(MaxAlign); + } + return Alignment; + } + + /// Given an expression of pointer type, try to + /// derive a more accurate bound on the alignment of the pointer. + RawAddress buildPointerWithAlignment(const Expr *E, + LValueBaseInfo *BaseInfo) { + // We allow this with ObjC object pointers because of fragile ABIs. + assert(E->getType()->isPointerType() || + E->getType()->isObjCObjectPointerType()); + E = E->IgnoreParens(); + + // Casts: + if (const CastExpr *CE = dyn_cast(E)) { + if (const auto *ECE = dyn_cast(CE)) + assert(0 && "not implemented"); + + switch (CE->getCastKind()) { + default: + assert(0 && "not implemented"); + // Nothing to do here... + case CK_LValueToRValue: + break; + } + } + + // Unary &. + if (const UnaryOperator *UO = dyn_cast(E)) { + assert(0 && "not implemented"); + // if (UO->getOpcode() == UO_AddrOf) { + // LValue LV = buildLValue(UO->getSubExpr()); + // if (BaseInfo) + // *BaseInfo = LV.getBaseInfo(); + // // TODO: TBBA info + // return LV.getAddress(); + // } + } + + // TODO: conditional operators, comma. + // Otherwise, use the alignment of the type. + CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), BaseInfo); + return RawAddress(buildScalarExpr(E), Align); + } + + LValue buildUnaryOpLValue(const UnaryOperator *E) { + // __extension__ doesn't affect lvalue-ness. + assert(E->getOpcode() != UO_Extension && "not implemented"); + + switch (E->getOpcode()) { + default: + llvm_unreachable("Unknown unary operator lvalue!"); + case UO_Deref: { + QualType T = E->getSubExpr()->getType()->getPointeeType(); + assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); + + LValueBaseInfo BaseInfo; + // TODO: add TBAAInfo + RawAddress Addr = buildPointerWithAlignment(E->getSubExpr(), &BaseInfo); + LValue LV = LValue::makeAddr(Addr, T, BaseInfo); + // TODO: set addr space + // TODO: ObjC/GC/__weak write barrier stuff. + return LV; + } + case UO_Real: + case UO_Imag: { + assert(0 && "not implemented"); + } + case UO_PreInc: + case UO_PreDec: { + assert(0 && "not implemented"); + } + } + } + /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. @@ -1154,6 +1326,8 @@ class CIRBuildImpl { return buildBinaryOperatorLValue(cast(E)); case Expr::DeclRefExprClass: return buildDeclRefLValue(cast(E)); + case Expr::UnaryOperatorClass: + return buildUnaryOpLValue(cast(E)); case Expr::ObjCPropertyRefExprClass: llvm_unreachable("cannot emit a property reference directly"); } diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index ed8ff40691a5..91fcb64dffb2 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -25,6 +25,7 @@ int *p2() { int *p = nullptr; int x = 0; p = &x; + *p = 42; return p; } @@ -32,3 +33,6 @@ int *p2() { // CHECK: %0 = cir.alloca i32, cir.ptr , [cinit] // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, [cinit] // CHECK: cir.store %0, %1 : !cir.ptr, cir.ptr > +// CHECK: %4 = cir.cst(42 : i32) : i32 +// CHECK-NEXT: %5 = cir.load %1 lvalue_to_rvalue : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %4, %5 : i32, cir.ptr From 26832e46165993cd34d64b48f21dee28247525ce Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 23 Nov 2021 23:43:30 -0500 Subject: [PATCH 0069/2301] [CIR] Implement alignment for cir.alloca and pass alignment for VarDecls * Add an alignment attribute to the cir.alloca instruction in CIROps.td * Add an argument to `CIRBuildImpl::declare` to accept the alignment. * Get an `mlir::IntegerAttr` from the `alignment` and pass it to the alloca. * Ask the `ASTContext` for the alignment for `ParmVarDecl`s. * Fix the tests to account for this change. --- clang/lib/CIR/CIRBuilder.cpp | 25 +++++++++++++--------- clang/lib/CIR/LowerToLLVM.cpp | 2 +- clang/test/CIR/CodeGen/basic.c | 4 ++-- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 11 +++++----- 4 files changed, 24 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index e525e95fff85..b093ebcfb84c 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -213,7 +213,8 @@ class CIRBuildImpl { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. mlir::LogicalResult declare(const Decl *var, QualType T, mlir::Location loc, - mlir::Value &addr, bool IsParam = false) { + CharUnits alignment, mlir::Value &addr, + bool IsParam = false) { const auto *namedVar = dyn_cast_or_null(var); assert(namedVar && "Needs a named decl"); @@ -224,9 +225,14 @@ class CIRBuildImpl { auto localVarPtrTy = mlir::cir::PointerType::get(builder.getContext(), localVarTy); + auto alignIntAttr = + mlir::IntegerAttr::get(mlir::IntegerType::get(builder.getContext(), 64), + alignment.getQuantity()); + auto localVarAddr = builder.create( loc, /*addr type*/ localVarPtrTy, /*var type*/ localVarTy, - IsParam ? InitStyle::paraminit : InitStyle::uninitialized); + IsParam ? InitStyle::paraminit : InitStyle::uninitialized, + alignIntAttr); auto *parentBlock = localVarAddr->getBlock(); localVarAddr->moveBefore(&parentBlock->front()); @@ -604,7 +610,8 @@ class CIRBuildImpl { // TODO: track source location range... mlir::Value addr; - if (failed(declare(&D, Ty, getLoc(D.getSourceRange().getBegin()), addr))) { + if (failed(declare(&D, Ty, getLoc(D.getSourceRange().getBegin()), alignment, + addr))) { theModule.emitError("Cannot declare variable"); return emission; } @@ -1572,10 +1579,11 @@ class CIRBuildImpl { llvm::zip(FD->parameters(), entryBlock.getArguments())) { auto *paramVar = std::get<0>(nameValue); auto paramVal = std::get<1>(nameValue); + auto alignment = astCtx.getDeclAlign(paramVar); mlir::Value addr; if (failed(declare(paramVar, paramVar->getType(), - getLoc(paramVar->getSourceRange().getBegin()), addr, - true /*param*/))) + getLoc(paramVar->getSourceRange().getBegin()), + alignment, addr, true /*param*/))) return nullptr; // Store params in local storage. FIXME: is this really needed // at this level of representation? @@ -1629,9 +1637,7 @@ void CIRContext::Initialize(clang::ASTContext &astCtx) { builder = std::make_unique(*mlirCtx.get(), astCtx); } -void CIRContext::verifyModule() { - builder->verifyModule(); -} +void CIRContext::verifyModule() { builder->verifyModule(); } bool CIRContext::EmitFunction(const FunctionDecl *FD) { CIRCodeGenFunction CCGF{}; @@ -1652,5 +1658,4 @@ bool CIRContext::HandleTopLevelDecl(clang::DeclGroupRef D) { return true; } -void CIRContext::HandleTranslationUnit(ASTContext &C) { -} +void CIRContext::HandleTranslationUnit(ASTContext &C) {} diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 7f10a9a85b23..f144a72f20b8 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -83,7 +83,7 @@ class CIRAllocaLowering : public mlir::OpRewritePattern { mlir::LogicalResult matchAndRewrite(mlir::cir::AllocaOp op, mlir::PatternRewriter &rewriter) const override { - auto ty = mlir::MemRefType::get({}, op.getType()); + auto ty = mlir::MemRefType::get({}, op.getAllocaType()); rewriter.replaceOpWithNewOp(op, ty); return mlir::LogicalResult::success(); } diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 60a07642687d..f23ae56aef92 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -7,7 +7,7 @@ int foo(int i) { // CHECK: module { // CHECK-NEXT: func @foo(%arg0: i32) -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [paraminit] +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr // CHECK-NEXT: %1 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 // CHECK-NEXT: cir.return %1 : i32 @@ -25,6 +25,6 @@ int f3() { } // CHECK: func @f3() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [cinit] +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 1210bb74532f..f7824a82f230 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -101,8 +101,8 @@ class AllocaTypesMatchWith().getPointee()">]> { let summary = "local variable"; let description = [{ @@ -122,16 +122,17 @@ def AllocaOp : CIR_Op<"alloca", [ }]; let arguments = (ins - TypeAttr:$type, + TypeAttr:$allocaType, // FIXME: add "uninitialzed" as default mode - Arg:$init + Arg:$init, + ConfinedAttr, [IntMinValue<0>]>:$alignment ); let results = (outs Res]>:$addr); let assemblyFormat = [{ - $type `,` `cir.ptr` type($addr) `,` `[` $init `]` attr-dict + $allocaType `,` `cir.ptr` type($addr) `,` `[` $init `]` attr-dict }]; let hasVerifier = 0; From 084fd7bd0ab2f30fa40fdfafedccb3a64c0ff5b2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 24 Nov 2021 00:10:32 -0500 Subject: [PATCH 0070/2301] [CIR] Propagate the alignment information to llvm dialect lowering This is pretty straightforward and all we have to do here is to pass the alignment information along. --- clang/lib/CIR/LowerToLLVM.cpp | 3 ++- clang/test/CIR/IRGen/memref.cir | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index f144a72f20b8..94beb723dd1d 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -84,7 +84,8 @@ class CIRAllocaLowering : public mlir::OpRewritePattern { matchAndRewrite(mlir::cir::AllocaOp op, mlir::PatternRewriter &rewriter) const override { auto ty = mlir::MemRefType::get({}, op.getAllocaType()); - rewriter.replaceOpWithNewOp(op, ty); + rewriter.replaceOpWithNewOp(op, ty, + op.getAlignmentAttr()); return mlir::LogicalResult::success(); } }; diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index 9822163706ca..776ba92504ab 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -3,7 +3,7 @@ module { func.func @foo() -> i32 { - %0 = cir.alloca i32, cir.ptr , [cinit] + %0 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} %1 = cir.cst(1 : i32) : i32 cir.store %1, %0 : i32, cir.ptr %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 @@ -13,7 +13,7 @@ module { // MLIR: module { // MLIR-NEXT: func.func @foo() -> i32 { -// MLIR-NEXT: %0 = memref.alloc() : memref +// MLIR-NEXT: %0 = memref.alloc() {alignment = 4 : i64} : memref // MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 // MLIR-NEXT: memref.store %c1_i32, %0[] : memref // MLIR-NEXT: %1 = memref.load %0[] : memref From 4490a243900e3fbe1a9c24fb6f783b5a3dae19dd Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 24 Nov 2021 02:10:35 -0500 Subject: [PATCH 0071/2301] [CIR][NFC] Remove meaningless comment --- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 989271f0114f..6ec25ab4af93 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -84,7 +84,6 @@ static void printConstantValue(OpAsmPrinter &p, cir::ConstantOp op, p.printAttribute(value); } -/// Trivial folding of constants from the tutorial. OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } //===----------------------------------------------------------------------===// From 4713bf0c61844048303ec5f5bfc31432aa6854ec Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 22 Nov 2021 19:15:51 -0800 Subject: [PATCH 0072/2301] [CIR] Add cir.if operation --- mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 8 +++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 61 +++++++++++++++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 52 ++++++++++++++++ 3 files changed, 121 insertions(+) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index 373a845bf854..590c67e9ed62 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -13,10 +13,12 @@ #ifndef MLIR_DIALECT_CIR_CIRDIALECT_H_ #define MLIR_DIALECT_CIR_CIRDIALECT_H_ +#include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Dialect.h" #include "mlir/IR/OpDefinition.h" +#include "mlir/Interfaces/ControlFlowInterfaces.h" #include "mlir/Interfaces/SideEffectInterfaces.h" namespace mlir { @@ -30,6 +32,12 @@ using FuncOp = func::FuncOp; #include "mlir/Dialect/CIR/IR/CIROpsEnums.h.inc" #include "mlir/Dialect/CIR/IR/CIRTypes.h" +namespace mlir { +namespace cir { +void buildTerminatedBody(OpBuilder &builder, Location loc); +} // namespace cir +} // namespace mlir + #define GET_OP_CLASSES #include "mlir/Dialect/CIR/IR/CIROps.h.inc" diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index f7824a82f230..89167bd4bc7a 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -21,6 +21,7 @@ include "mlir/Dialect/CIR/IR/CIRAttrs.td" include "mlir/IR/EnumAttr.td" include "mlir/IR/SymbolInterfaces.td" include "mlir/IR/BuiltinAttributeInterfaces.td" +include "mlir/Interfaces/ControlFlowInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" //===----------------------------------------------------------------------===// @@ -239,5 +240,65 @@ def ReturnOp : CIR_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// IfOp +//===----------------------------------------------------------------------===// +def IfOp : CIR_Op<"if", + [DeclareOpInterfaceMethods, + SingleBlock, RecursivelySpeculatable, AutomaticAllocationScope, + NoRegionArguments]> { + let summary = "if-then-else operation"; + let description = [{ + The `scf.if` operation represents an if-then-else construct for + conditionally executing two regions of code. The operand to an if operation + is a boolean value. For example: + + ```mlir + cir.if %b { + ... + } else { + ... + } + ``` + + "cir.if" defines no values and the 'else' can be omitted. + + Example: + + ```mlir + cir.if %b { + ... + } + ``` + }]; + let arguments = (ins I1:$condition); + let regions = (region SizedRegion<1>:$thenRegion, AnyRegion:$elseRegion); + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins "Value":$cond, "bool":$withElseRegion)>, + OpBuilder<(ins "Value":$cond, + CArg<"function_ref", + "buildTerminatedBody">:$thenBuilder, + CArg<"function_ref", + "nullptr">:$elseBuilder)> + ]; + + let extraClassDeclaration = [{ + OpBuilder getThenBodyBuilder(OpBuilder::Listener *listener = nullptr) { + Block* body = getBody(0); + return OpBuilder::atBlockTerminator(body, listener); + } + OpBuilder getElseBodyBuilder(OpBuilder::Listener *listener = nullptr) { + Block* body = getBody(1); + return OpBuilder::atBlockTerminator(body, listener); + } + Block* thenBlock(); + Block* elseBlock(); + }]; + + // TODO: let hasCanonicalizer = 1; +} + #endif // MLIR_CIR_DIALECT_CIR_OPS diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 6ec25ab4af93..b9ef45417666 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -122,6 +122,58 @@ mlir::LogicalResult ReturnOp::verify() { << ")"; } +//===----------------------------------------------------------------------===// +// IfOp +//===----------------------------------------------------------------------===// + +Block *IfOp::thenBlock() { return &getThenRegion().back(); } +Block *IfOp::elseBlock() { + Region &r = getElseRegion(); + if (r.empty()) + return nullptr; + return &r.back(); +} + +/// Default callback for IfOp builders. Inserts nothing for now. +void mlir::cir::buildTerminatedBody(OpBuilder &builder, Location loc) {} + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes that +/// correspond to a constant value for each operand, or null if that operand is +/// not a constant. +void IfOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // The `then` and the `else` region branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // Don't consider the else region if it is empty. + Region *elseRegion = &this->getElseRegion(); + if (elseRegion->empty()) + elseRegion = nullptr; + + // Otherwise, the successor is dependent on the condition. + // bool condition; + // if (auto condAttr = operands.front().dyn_cast_or_null()) { + // assert(0 && "not implemented"); + // condition = condAttr.getValue().isOneValue(); + // Add the successor regions using the condition. + // regions.push_back(RegionSuccessor(condition ? &thenRegion() : + // elseRegion)); + // return; + // } + + // If the condition isn't constant, both regions may be executed. + regions.push_back(RegionSuccessor(&getThenRegion())); + // If the else region does not exist, it is not a viable successor. + if (elseRegion) + regions.push_back(RegionSuccessor(elseRegion)); + return; +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From eab21502f14f5ccdd2eddc2f0f9f1372bb61200c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 22 Nov 2021 19:14:50 -0800 Subject: [PATCH 0073/2301] [CIR] Add initial codegen for if statements - Implement more parts of cir.if, like builders. - Add logic to emit code for if statement AST node. - Add skeleton to implement IntegralToBoolean cast. This isn't completed yet and currently crashes. Next step here is to implement the cast and introduce tests for this. --- clang/lib/CIR/CIRBuilder.cpp | 282 ++++++++++++++++++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 20 ++ 2 files changed, 301 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index b093ebcfb84c..dc425491c203 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -475,10 +475,14 @@ class CIRBuildImpl { Builder.getLoc(E->getExprLoc()), Ty, mlir::cir::NullAttr::get(Builder.builder.getContext(), Ty)); } + case CK_IntegralToBoolean: { + return buildIntToBoolConversion(Visit(E)); + } default: emitError(Builder.getLoc(CE->getExprLoc()), "cast kind not implemented: '") << CE->getCastKindName() << "'"; + assert(0 && "not implemented"); return nullptr; } } @@ -497,6 +501,124 @@ class CIRBuildImpl { return {}; } + mlir::Value buildIntToBoolConversion(mlir::Value V) { + // Because of the type rules of C, we often end up computing a + // logical value, then zero extending it to int, then wanting it + // as a logical value again. TODO: optimize this common case here + // or leave it for later CIR passes? + assert(0 && "not implemented"); + // return Builder.CreateIsNotNull(V, "tobool"); + return nullptr; + } + + /// EmitConversionToBool - Convert the specified expression value to a + /// boolean (i1) truth value. This is equivalent to "Val != 0". + mlir::Value buildConversionToBool(mlir::Value Src, QualType SrcType) { + assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); + + if (SrcType->isRealFloatingType()) + assert(0 && "not implemented"); + + if (const MemberPointerType *MPT = dyn_cast(SrcType)) + assert(0 && "not implemented"); + + assert((SrcType->isIntegerType() || + Src.getType().isa<::mlir::cir::PointerType>()) && + "Unknown scalar type to convert"); + + assert(Src.getType().isa() && + "pointer source not implemented"); + return buildIntToBoolConversion(Src); + } + + /// Emit a conversion from the specified type to the specified destination + /// type, both of which are CIR scalar types. + /// TODO: do we need ScalarConversionOpts here? Should be done in another + /// pass. + mlir::Value buildScalarConversion(mlir::Value Src, QualType SrcType, + QualType DstType, SourceLocation Loc) { + if (SrcType->isFixedPointType()) { + assert(0 && "not implemented"); + } else if (DstType->isFixedPointType()) { + assert(0 && "not implemented"); + } + + SrcType = Builder.astCtx.getCanonicalType(SrcType); + DstType = Builder.astCtx.getCanonicalType(DstType); + if (SrcType == DstType) + return Src; + + if (DstType->isVoidType()) + return nullptr; + mlir::Type SrcTy = Src.getType(); + + // Handle conversions to bool first, they are special: comparisons against + // 0. + if (DstType->isBooleanType()) + return buildConversionToBool(Src, SrcType); + + mlir::Type DstTy = Builder.getCIRType(DstType); + + // Cast from half through float if half isn't a native type. + if (SrcType->isHalfType() && + !Builder.astCtx.getLangOpts().NativeHalfType) { + assert(0 && "not implemented"); + } + + // LLVM codegen ignore conversions like int -> uint, we should probably + // emit it here in case lowering to sanitizers dialect at some point. + if (SrcTy == DstTy) { + assert(0 && "not implemented"); + } + + // Handle pointer conversions next: pointers can only be converted to/from + // other pointers and integers. + if (DstTy.isa<::mlir::cir::PointerType>()) { + assert(0 && "not implemented"); + } + + if (SrcTy.isa<::mlir::cir::PointerType>()) { + // Must be an ptr to int cast. + assert(DstTy.isa() && "not ptr->int?"); + assert(0 && "not implemented"); + } + + // A scalar can be splatted to an extended vector of the same element type + if (DstType->isExtVectorType() && !SrcType->isVectorType()) { + // Sema should add casts to make sure that the source expression's type + // is the same as the vector's element type (sans qualifiers) + assert( + DstType->castAs()->getElementType().getTypePtr() == + SrcType.getTypePtr() && + "Splatted expr doesn't match with vector element type?"); + + assert(0 && "not implemented"); + } + + if (SrcType->isMatrixType() && DstType->isMatrixType()) + assert(0 && "not implemented"); + + // Finally, we have the arithmetic types: real int/float. + assert(0 && "not implemented"); + mlir::Value Res = nullptr; + mlir::Type ResTy = DstTy; + + // TODO: implement CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) + + // Cast to half through float if half isn't a native type. + if (DstType->isHalfType() && + !Builder.astCtx.getLangOpts().NativeHalfType) { + assert(0 && "not implemented"); + } + + // TODO: Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); + if (DstTy != ResTy) { + assert(0 && "not implemented"); + } + + return Res; + } + // Leaves. mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { mlir::Type Ty = Builder.getCIRType(E->getType()); @@ -999,6 +1121,17 @@ class CIRBuildImpl { return ScalarExprEmitter(*CurCCGF, *this).Visit(const_cast(E)); } + /// Emit a conversion from the specified type to the specified destination + /// type, both of which are CIR scalar types. + mlir::Value buildScalarConversion(mlir::Value Src, QualType SrcTy, + QualType DstTy, SourceLocation Loc) { + assert(CIRCodeGenFunction::hasScalarEvaluationKind(SrcTy) && + CIRCodeGenFunction::hasScalarEvaluationKind(DstTy) && + "Invalid scalar expression to emit"); + return ScalarExprEmitter(*CurCCGF, *this) + .buildScalarConversion(Src, SrcTy, DstTy, Loc); + } + mlir::LogicalResult buildReturnStmt(const ReturnStmt &S) { assert(!(astCtx.getLangOpts().ElideConstructors && S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) && @@ -1351,6 +1484,143 @@ class CIRBuildImpl { buildLValue(E); } + /// If the specified expression does not fold + /// to a constant, or if it does but contains a label, return false. If it + /// constant folds return true and set the boolean result in Result. + bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &ResultBool, + bool AllowLabels) { + llvm::APSInt ResultInt; + if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) + return false; + + ResultBool = ResultInt.getBoolValue(); + return true; + } + + /// Return true if the statement contains a label in it. If + /// this statement is not executed normally, it not containing a label means + /// that we can just remove the code. + bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false) { + // Null statement, not a label! + if (!S) + return false; + + // If this is a label, we have to emit the code, consider something like: + // if (0) { ... foo: bar(); } goto foo; + // + // TODO: If anyone cared, we could track __label__'s, since we know that you + // can't jump to one from outside their declared region. + if (isa(S)) + return true; + + // If this is a case/default statement, and we haven't seen a switch, we + // have to emit the code. + if (isa(S) && !IgnoreCaseStmts) + return true; + + // If this is a switch statement, we want to ignore cases below it. + if (isa(S)) + IgnoreCaseStmts = true; + + // Scan subexpressions for verboten labels. + for (const Stmt *SubStmt : S->children()) + if (ContainsLabel(SubStmt, IgnoreCaseStmts)) + return true; + + return false; + } + + /// If the specified expression does not fold + /// to a constant, or if it does but contains a label, return false. If it + /// constant folds return true and set the folded value. + bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt, + bool AllowLabels) { + // FIXME: Rename and handle conversion of other evaluatable things + // to bool. + Expr::EvalResult Result; + if (!Cond->EvaluateAsInt(Result, astCtx)) + return false; // Not foldable, not integer or not fully evaluatable. + + llvm::APSInt Int = Result.Val.getInt(); + if (!AllowLabels && ContainsLabel(Cond)) + return false; // Contains a label. + + ResultInt = Int; + return true; + } + + /// Perform the usual unary conversions on the specified + /// expression and compare the result against zero, returning an Int1Ty value. + mlir::Value evaluateExprAsBool(const Expr *E) { + // TODO: PGO + if (const MemberPointerType *MPT = + E->getType()->getAs()) { + assert(0 && "not implemented"); + } + + QualType BoolTy = astCtx.BoolTy; + SourceLocation Loc = E->getExprLoc(); + // TODO: CGFPOptionsRAII for FP stuff. + assert(!E->getType()->isAnyComplexType() && + "complex to scalar not implemented"); + return buildScalarConversion(buildScalarExpr(E), E->getType(), BoolTy, Loc); + } + + /// Emit an if on a boolean condition to the specified blocks. + /// FIXME: Based on the condition, this might try to simplify the codegen of + /// the conditional based on the branch. TrueCount should be the number of + /// times we expect the condition to evaluate to true based on PGO data. We + /// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr + /// for extra ideas). + void buildIfOnBoolExpr(const Expr *cond, mlir::Location loc, + const Stmt *thenS, const Stmt *elseS) { + // TODO: scoped ApplyDebugLocation DL(*this, Cond); + // TODO: __builtin_unpredictable and profile counts? + cond = cond->IgnoreParens(); + mlir::Value condV = evaluateExprAsBool(cond); + builder.create( + loc, condV, + /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { (void)buildStmt(thenS); }, + /*elseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (elseS) + return; + (void)buildStmt(elseS); + }); + } + + mlir::LogicalResult buildIfStmt(const IfStmt &S) { + // The else branch of a consteval if statement is always the only branch + // that can be runtime evaluated. + assert(!S.isConsteval() && "not implemented"); + + // C99 6.8.4.1: The first substatement is executed if the expression + // compares unequal to 0. The condition must be a scalar type. + // TODO: add cir.scope, add a new scoped symbol table as well. + // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); + if (S.getInit()) + if (buildStmt(S.getInit()).failed()) + return mlir::failure(); + + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + + // If the condition constant folds and can be elided, try to avoid emitting + // the condition and the dead arm of the if/else. + // FIXME: should this be done as part of a constant folder pass instead? + bool CondConstant; + if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, + S.isConstexpr())) { + assert(0 && "not implemented"); + } + + // TODO: PGO and likelihood. + buildIfOnBoolExpr(S.getCond(), getLoc(S.getSourceRange().getBegin()), + S.getThen(), S.getElse()); + return mlir::success(); + } + mlir::LogicalResult buildStmt(const Stmt *S) { if (mlir::succeeded(buildSimpleStmt(S))) return mlir::success(); @@ -1423,8 +1693,11 @@ class CIRBuildImpl { break; } - case Stmt::IndirectGotoStmtClass: case Stmt::IfStmtClass: + if (buildIfStmt(cast(*S)).failed()) + return mlir::failure(); + break; + case Stmt::IndirectGotoStmtClass: case Stmt::WhileStmtClass: case Stmt::DoStmtClass: case Stmt::ForStmtClass: @@ -1532,6 +1805,13 @@ class CIRBuildImpl { // Create a scope in the symbol table to hold variable declarations local // to this compound statement. SymTableScopeTy varScope(symbolTable); + if (buildCompoundStmtWithoutScope(S).failed()) + return mlir::failure(); + + return mlir::success(); + } + + mlir::LogicalResult buildCompoundStmtWithoutScope(const CompoundStmt &S) { for (auto *CurStmt : S.body()) if (buildStmt(CurStmt).failed()) return mlir::failure(); diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index b9ef45417666..764852eb2c00 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -174,6 +174,26 @@ void IfOp::getSuccessorRegions(mlir::RegionBranchPoint point, return; } +void IfOp::build(OpBuilder &builder, OperationState &result, Value cond, + function_ref thenBuilder, + function_ref elseBuilder) { + assert(thenBuilder && "the builder callback for 'then' must be present"); + + result.addOperands(cond); + + OpBuilder::InsertionGuard guard(builder); + Region *thenRegion = result.addRegion(); + builder.createBlock(thenRegion); + thenBuilder(builder, result.location); + + Region *elseRegion = result.addRegion(); + if (!elseBuilder) + return; + + builder.createBlock(elseRegion); + elseBuilder(builder, result.location); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From a2cdb36aee09b06f9dfe9bd9e003b11d61625886 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 24 Nov 2021 02:52:55 -0500 Subject: [PATCH 0074/2301] [CIR] Register transform passes for cir-tool This lets you pass args such as `-canonicalize` for experimentation. --- clang/tools/cir-tool/CMakeLists.txt | 6 ++++-- clang/tools/cir-tool/cir-tool.cpp | 3 +++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/clang/tools/cir-tool/CMakeLists.txt b/clang/tools/cir-tool/CMakeLists.txt index f55786351d20..ca65769d5455 100644 --- a/clang/tools/cir-tool/CMakeLists.txt +++ b/clang/tools/cir-tool/CMakeLists.txt @@ -11,12 +11,14 @@ target_link_libraries(cir-tool PRIVATE clangCIR MLIRAnalysis - MLIRIR - MLIROptLib MLIRCIR + MLIRDialect + MLIRIR MLIRMemRefDialect + MLIROptLib MLIRParser MLIRPass MLIRSideEffectInterfaces MLIRTransforms + MLIRTransformUtils ) diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index b41112a68ebc..d43576f1490c 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -17,6 +17,7 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/InitAllPasses.h" #include "mlir/Pass/PassRegistry.h" #include "mlir/Tools/mlir-opt/MlirOptMain.h" #include "clang/CIR/Passes.h" @@ -35,6 +36,8 @@ int main(int argc, char **argv) { return cir::createConvertCIRToMemRefPass(); }); + mlir::registerTransformsPasses(); + return failed(MlirOptMain( argc, argv, "Clang IR analysis and optimization tool\n", registry)); } From 1ae70e1f7b1c6668ea35731e5b4271ad361c5288 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 24 Nov 2021 02:54:22 -0500 Subject: [PATCH 0075/2301] [CIR] Switch cir.alloca to lower to memref.alloca instead of memref.alloc This was a typo, simply fix it here. --- clang/lib/CIR/LowerToLLVM.cpp | 4 ++-- clang/test/CIR/IRGen/memref.cir | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 94beb723dd1d..d640b30ef08f 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -84,8 +84,8 @@ class CIRAllocaLowering : public mlir::OpRewritePattern { matchAndRewrite(mlir::cir::AllocaOp op, mlir::PatternRewriter &rewriter) const override { auto ty = mlir::MemRefType::get({}, op.getAllocaType()); - rewriter.replaceOpWithNewOp(op, ty, - op.getAlignmentAttr()); + rewriter.replaceOpWithNewOp(op, ty, + op.getAlignmentAttr()); return mlir::LogicalResult::success(); } }; diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index 776ba92504ab..1e96a36100f8 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -13,7 +13,7 @@ module { // MLIR: module { // MLIR-NEXT: func.func @foo() -> i32 { -// MLIR-NEXT: %0 = memref.alloc() {alignment = 4 : i64} : memref +// MLIR-NEXT: %0 = memref.alloca() {alignment = 4 : i64} : memref // MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 // MLIR-NEXT: memref.store %c1_i32, %0[] : memref // MLIR-NEXT: %1 = memref.load %0[] : memref From 640f0c12ee5985861daeb5c47997e62c25cb4f04 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 24 Nov 2021 02:55:26 -0500 Subject: [PATCH 0076/2301] [CIR][NFC] Rip driver-specific testing out to a new test file --- clang/test/CIR/IRGen/basic.c | 4 ---- clang/test/CIR/driver.c | 21 +++++++++++++++++++++ 2 files changed, 21 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/driver.c diff --git a/clang/test/CIR/IRGen/basic.c b/clang/test/CIR/IRGen/basic.c index d19003bcb36b..7314cc4263e7 100644 --- a/clang/test/CIR/IRGen/basic.c +++ b/clang/test/CIR/IRGen/basic.c @@ -2,10 +2,6 @@ // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-obj %s -o %t.o // RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ -// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o %t.ll -// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -c %s -o %t.o -// RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ // XFAIL: * void foo() {} diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c new file mode 100644 index 000000000000..d1457d73d848 --- /dev/null +++ b/clang/test/CIR/driver.c @@ -0,0 +1,21 @@ +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -S -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -c %s -o %t.o +// RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ +// XFAIL: * + +void foo() {} + +// CIR: module { +// CIR-NEXT: func @foo() { +// CIR-NEXT: cir.return +// CIR-NEXT: } +// CIR-NEXT: } + +// LLVM: define void @foo() +// LLVM-NEXT: ret void, +// LLVM-NEXT: } + +// OBJ: 0: c3 retq From 69780ecbfbe4fef612374ae94966ee371ef5f743 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 24 Nov 2021 02:56:06 -0500 Subject: [PATCH 0077/2301] [CIR] Add a test to confirm that `clang` can handle `cir` input --- clang/test/CIR/clang-handles-cir-input.cir | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 clang/test/CIR/clang-handles-cir-input.cir diff --git a/clang/test/CIR/clang-handles-cir-input.cir b/clang/test/CIR/clang-handles-cir-input.cir new file mode 100644 index 000000000000..f1dd31346298 --- /dev/null +++ b/clang/test/CIR/clang-handles-cir-input.cir @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=LLVM +// XFAIL: * + +module { + func.func @foo() { + cir.return + } +} + +// LLVM: define void @foo() +// LLVM-NEXT: ret void, +// LLVM-NEXT: } From 94d544f23bf8e3ac5d2eaea6d6b997fedd5944ca Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 29 Nov 2021 17:32:04 -0500 Subject: [PATCH 0078/2301] [CIR][NFC] Restructure IRGen and driver related tests --- clang/test/CIR/IRGen/memref.cir | 12 ++++++++++++ clang/test/CIR/{IRGen/basic.c => cc1.c} | 0 .../CIR/{clang-handles-cir-input.cir => cc1.cir} | 0 clang/test/CIR/{IRGen/basic.cir => cirtool.cir} | 2 -- 4 files changed, 12 insertions(+), 2 deletions(-) rename clang/test/CIR/{IRGen/basic.c => cc1.c} (100%) rename clang/test/CIR/{clang-handles-cir-input.cir => cc1.cir} (100%) rename clang/test/CIR/{IRGen/basic.cir => cirtool.cir} (76%) diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index 1e96a36100f8..1d8f09609bdc 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -1,4 +1,5 @@ // RUN: cir-tool %s -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM // XFAIL: * module { @@ -20,3 +21,14 @@ module { // MLIR-NEXT: return %1 : i32 // MLIR-NEXT: } // MLIR-NEXT: } + +// LLVM: define i32 @foo() +// LLVM-NEXT: %1 = alloca i32, i64 +// LLVM-NEXT: %2 = insertvalue { ptr, ptr, i64 } undef, ptr %1, 0 +// LLVM-NEXT: %3 = insertvalue { ptr, ptr, i64 } %2, ptr %1, 1 +// LLVM-NEXT: %4 = insertvalue { ptr, ptr, i64 } %3, i64 0, 2 +// LLVM-NEXT: %5 = extractvalue { ptr, ptr, i64 } %4, 1 +// LLVM-NEXT: store i32 1, ptr %5, align 4 +// LLVM-NEXT: %6 = extractvalue { ptr, ptr, i64 } %4, 1 +// LLVM-NEXT: %7 = load i32, ptr %6, align 4 +// LLVM-NEXT: ret i32 %7 diff --git a/clang/test/CIR/IRGen/basic.c b/clang/test/CIR/cc1.c similarity index 100% rename from clang/test/CIR/IRGen/basic.c rename to clang/test/CIR/cc1.c diff --git a/clang/test/CIR/clang-handles-cir-input.cir b/clang/test/CIR/cc1.cir similarity index 100% rename from clang/test/CIR/clang-handles-cir-input.cir rename to clang/test/CIR/cc1.cir diff --git a/clang/test/CIR/IRGen/basic.cir b/clang/test/CIR/cirtool.cir similarity index 76% rename from clang/test/CIR/IRGen/basic.cir rename to clang/test/CIR/cirtool.cir index 445d25dc8376..bbd3778fabc8 100644 --- a/clang/test/CIR/IRGen/basic.cir +++ b/clang/test/CIR/cirtool.cir @@ -1,5 +1,3 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fenable-clangir -emit-llvm %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s -check-prefix=LLVM // RUN: cir-tool %s -cir-to-memref -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: mlir-translate -mlir-to-llvmir %t.mlir -o %t.ll From 78b9f4053134e14381a01ad1fef7b379f62e12bd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 24 Nov 2021 18:03:07 -0800 Subject: [PATCH 0079/2301] [CIR] Minor cleanups and add new assertion --- clang/lib/CIR/CIRBuilder.cpp | 2 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 2 -- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 2 ++ 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index dc425491c203..6cd323ae93fc 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -460,7 +460,7 @@ class CIRBuildImpl { mlir::Value VisitCastExpr(CastExpr *CE) { Expr *E = CE->getSubExpr(); QualType DestTy = CE->getType(); - CastKind Kind = CE->getCastKind(); + clang::CastKind Kind = CE->getCastKind(); switch (Kind) { case CK_LValueToRValue: assert(Builder.astCtx.hasSameUnqualifiedType(E->getType(), DestTy)); diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 89167bd4bc7a..dc2045b42e40 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -35,7 +35,6 @@ class CIR_Op traits = []> : // ConstantOp //===----------------------------------------------------------------------===// - def ConstantOp : CIR_Op<"cst", [ConstantLike, Pure]> { @@ -100,7 +99,6 @@ class AllocaTypesMatchWith ®ions) { + assert(0 && "not implemented"); + // The `then` and the `else` region branch back to the parent operation. if (!point.isParent()) { regions.push_back(RegionSuccessor()); From 49dbd8b87116a8f895c62a78976fd0bde019f676 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 24 Nov 2021 18:03:48 -0800 Subject: [PATCH 0080/2301] [CIR] Add cir.cast to cover initial conversions - Only added integreal to boolean so far. - Implement visitors and add actual builder codegen. No testcases just yet, getting there. This currently helps with getting cir.if useful. --- clang/lib/CIR/CIRBuilder.cpp | 24 ++++++++------ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 37 ++++++++++++++++++++++ 2 files changed, 51 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 6cd323ae93fc..ef7f857db7a0 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -476,7 +476,8 @@ class CIRBuildImpl { mlir::cir::NullAttr::get(Builder.builder.getContext(), Ty)); } case CK_IntegralToBoolean: { - return buildIntToBoolConversion(Visit(E)); + return buildIntToBoolConversion( + Visit(E), Builder.getLoc(CE->getSourceRange().getBegin())); } default: emitError(Builder.getLoc(CE->getExprLoc()), @@ -501,19 +502,22 @@ class CIRBuildImpl { return {}; } - mlir::Value buildIntToBoolConversion(mlir::Value V) { + mlir::Value buildIntToBoolConversion(mlir::Value srcVal, + mlir::Location loc) { // Because of the type rules of C, we often end up computing a // logical value, then zero extending it to int, then wanting it - // as a logical value again. TODO: optimize this common case here - // or leave it for later CIR passes? - assert(0 && "not implemented"); - // return Builder.CreateIsNotNull(V, "tobool"); - return nullptr; + // as a logical value again. + // TODO: optimize this common case here or leave it for later + // CIR passes? + mlir::Type boolTy = Builder.getCIRType(Builder.astCtx.BoolTy); + return Builder.builder.create( + loc, boolTy, mlir::cir::CastKind::int_to_bool, srcVal); } /// EmitConversionToBool - Convert the specified expression value to a /// boolean (i1) truth value. This is equivalent to "Val != 0". - mlir::Value buildConversionToBool(mlir::Value Src, QualType SrcType) { + mlir::Value buildConversionToBool(mlir::Value Src, QualType SrcType, + mlir::Location loc) { assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); if (SrcType->isRealFloatingType()) @@ -528,7 +532,7 @@ class CIRBuildImpl { assert(Src.getType().isa() && "pointer source not implemented"); - return buildIntToBoolConversion(Src); + return buildIntToBoolConversion(Src, loc); } /// Emit a conversion from the specified type to the specified destination @@ -555,7 +559,7 @@ class CIRBuildImpl { // Handle conversions to bool first, they are special: comparisons against // 0. if (DstType->isBooleanType()) - return buildConversionToBool(Src, SrcType); + return buildConversionToBool(Src, SrcType, Builder.getLoc(Loc)); mlir::Type DstTy = Builder.getCIRType(DstType); diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index dc2045b42e40..52ff8885236a 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -31,6 +31,43 @@ include "mlir/Interfaces/SideEffectInterfaces.td" class CIR_Op traits = []> : Op; +//===----------------------------------------------------------------------===// +// CastOp +//===----------------------------------------------------------------------===// + +// The enumaration value isn't in sync with clang. +def CK_IntegralToBoolean : I32EnumAttrCase<"int_to_bool", 1>; + +def CastKind : I32EnumAttr< + "CastKind", + "cast kind", + [CK_IntegralToBoolean]> { + let cppNamespace = "::mlir::cir"; +} + +def CastOp : CIR_Op<"cast", [Pure]> { + // FIXME: not all conversions are free of side effects. + let summary = "cast"; + let description = [{ + Apply C/C++ usual conversions rules between types. The full list of those + can be seen in clang/include/clang/AST/OperationKinds.def, but note that some + of the conversions aren't implemented in terms of cir.cast, lvalue-to-rvalue + for instance is modeled as a load. + + ```mlir + %4 = cir.cast (int_to_bool, %3 : i32), i1 + ``` + }]; + + let arguments = (ins CastKind:$kind, AnyType:$src); + let results = (outs AnyType:$res); + + let assemblyFormat = "`(` $kind `,` $src `:` type($src) `)` `,` type($res) attr-dict"; + + // The input and output types should match the cast kind. + //let verifier = [{ return ::verify(*this); }]; +} + //===----------------------------------------------------------------------===// // ConstantOp //===----------------------------------------------------------------------===// From c24e4181d9d88f871acd0bcf237c521db73362ae Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 24 Nov 2021 18:08:01 -0800 Subject: [PATCH 0081/2301] [CIR] Fix codegen of else stmt in if stmt --- clang/lib/CIR/CIRBuilder.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index ef7f857db7a0..35f1ba3a2530 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -1588,7 +1588,7 @@ class CIRBuildImpl { [&](mlir::OpBuilder &b, mlir::Location loc) { (void)buildStmt(thenS); }, /*elseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - if (elseS) + if (!elseS) return; (void)buildStmt(elseS); }); From 2d5abcd7d5806ee0e8fcd1bbc7106bc9d384c448 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 29 Nov 2021 18:15:17 -0800 Subject: [PATCH 0082/2301] [CIR] Add cir.bool for use with booleans (instead of i1) - Add new CIR type. - Codegen support for constants and int-to-bool conversion. - Tests. --- clang/lib/CIR/CIRBuilder.cpp | 9 +++++++-- clang/lib/CIR/CIRGenTypes.cpp | 3 +-- clang/test/CIR/CodeGen/basic.cpp | 13 +++++++++++++ clang/test/CIR/CodeGen/types.c | 2 +- clang/test/CIR/IR/invalid.cir | 19 ++++++++++++------- mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td | 20 +++++++++++++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 7 +++++++ mlir/lib/Dialect/CIR/IR/CIRTypes.cpp | 9 ++++++++- 8 files changed, 68 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 35f1ba3a2530..93a7d01a40dd 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -493,6 +493,13 @@ class CIRBuildImpl { return Builder.buildLValue(E->getSubExpr()).getPointer(); } + mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { + mlir::Type Ty = Builder.getCIRType(E->getType()); + return Builder.builder.create( + Builder.getLoc(E->getExprLoc()), Ty, + Builder.builder.getBoolAttr(E->getValue())); + } + mlir::Value VisitExpr(Expr *E) { // Crashing here for "ScalarExprClassName"? Please implement // VisitScalarExprClassName(...) to get this working. @@ -779,8 +786,6 @@ class CIRBuildImpl { mlir::Value buildToMemory(mlir::Value Value, QualType Ty) { // Bool has a different representation in memory than in registers. - if (hasBooleanRepresentation(Ty)) - assert(0 && "not implemented"); return Value; } diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 523390a3bc89..e9463a13c613 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -66,8 +66,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { break; case BuiltinType::Bool: - // Note that we always return bool as i1 for use as a scalar type. - ResultType = Builder.getI1Type(); + ResultType = ::mlir::cir::BoolType::get(Builder.getContext()); break; case BuiltinType::Char_S: diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 91fcb64dffb2..f0147df1cf9e 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -36,3 +36,16 @@ int *p2() { // CHECK: %4 = cir.cst(42 : i32) : i32 // CHECK-NEXT: %5 = cir.load %1 lvalue_to_rvalue : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.store %4, %5 : i32, cir.ptr + +void b0() { bool x = true, y = false; } + +// CHECK: func @b0() { +// CHECK: %2 = cir.cst(true) : !cir.bool +// CHECK: %3 = cir.cst(false) : !cir.bool + +void b1(int a) { bool b = a; } + +// CHECK: func @b1(%arg0: i32) { +// CHECK: %2 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: %3 = cir.cast(int_to_bool, %2 : i32), !cir.bool +// CHECK: cir.store %3, %0 : !cir.bool, cir.ptr diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index 5403ade061a2..81618b83231c 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -40,4 +40,4 @@ bool t9(bool b) { return b; } // CHECK-CPP: func @t6(%arg0: f32) -> f32 { // CHECK-CPP: func @t7(%arg0: f64) -> f64 { // CHECK-CPP: func @t8() { -// CHECK-CPP: func @t9(%arg0: i1) -> i1 { +// CHECK-CPP: func @t9(%arg0: !cir.bool) -> !cir.bool { diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index a4c2768bd5e2..8921aaa8a306 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1,11 +1,16 @@ // Test attempts to build bogus CIR +// RUN: cir-tool %s -verify-diagnostics -split-input-file -// RUN: cir-tool -verify-diagnostics %s -module { - func.func @p0() { - // expected-error@+1 {{'cir.cst' op nullptr expects pointer type}} - %1 = cir.cst(#cir.null : !cir.ptr) : i32 +// expected-error@+2 {{'cir.cst' op nullptr expects pointer type}} +func.func @p0() { + %1 = cir.cst(#cir.null : !cir.ptr) : i32 + cir.return +} + +// ----- - cir.return - } +// expected-error@+2 {{'cir.cst' op result type ('i32') must be '!cir.bool' for 'true'}} +func.func @b0() { + %1 = cir.cst(true) : i32 + cir.return } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td index 7b04e08d3ae1..48ce0227ce12 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td @@ -42,10 +42,28 @@ def CIR_PointerType : let hasCustomAssemblyFormat = 1; } +//===----------------------------------------------------------------------===// +// BoolType +// +// An alternative here is to represent bool as mlir::i1, but let's be more +// generic. +// +//===----------------------------------------------------------------------===// +def CIR_BoolType : + CIR_Type<"Bool", "bool"> { + + let summary = "CIR bool type"; + let description = [{ + `cir.bool` represent's C++ bool type. + }]; + + let hasCustomAssemblyFormat = 1; +} + //===----------------------------------------------------------------------===// // One type to bind them all //===----------------------------------------------------------------------===// -def CIR_AnyCIRType : AnyTypeOf<[CIR_PointerType]>; +def CIR_AnyCIRType : AnyTypeOf<[CIR_PointerType, CIR_BoolType]>; #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index ea98a4bd5a05..f8eb11fdbeb5 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -60,6 +60,13 @@ LogicalResult ConstantOp::verify() { // ODS already generates checks to make sure the result type is valid. We just // need to additionally check that the value's attribute type is consistent // with the result type. + if (val.isa()) { + if (!opType.isa()) + return emitOpError("result type (") + << opType << ") must be '!cir.bool' for '" << val << "'"; + return success(); + } + if (opType.isa()) { if (valueType != opType) return emitOpError("result type (") diff --git a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp index 60476a892ab1..5fba027a1433 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp @@ -63,8 +63,15 @@ void PointerType::print(mlir::AsmPrinter &printer) const { printer << '>'; } +Type BoolType::parse(mlir::AsmParser &parser) { + return get(parser.getContext()); +} + +void BoolType::print(mlir::AsmPrinter &printer) const { +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// -void CIRDialect::registerTypes() { addTypes(); } +void CIRDialect::registerTypes() { addTypes(); } From f5251f2c8c121ec03fef1dcf5d4485e5521844b1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 29 Nov 2021 23:54:07 -0800 Subject: [PATCH 0083/2301] [CIR] Teach IfOp about cir.bool --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 52ff8885236a..ec82994ddf67 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -306,7 +306,7 @@ def IfOp : CIR_Op<"if", } ``` }]; - let arguments = (ins I1:$condition); + let arguments = (ins CIR_BoolType:$condition); let regions = (region SizedRegion<1>:$thenRegion, AnyRegion:$elseRegion); let skipDefaultBuilders = 1; From cf319b0d7b6e1da3cf9273759651630134974b74 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 1 Dec 2021 18:21:02 -0800 Subject: [PATCH 0084/2301] [CIR] Add cir.yield op and finish first version of cir.if. - Add cir.yield op. Should be used in scoped ops, right now "return"s from ifs. While here add verifier. - Add a proper parser and printer to IfOp - Add tests for cir.cast, cir.if and cir.scope. --- clang/lib/CIR/CIRBuilder.cpp | 6 +- clang/test/CIR/CodeGen/basic.cpp | 21 +++++++ clang/test/CIR/IR/cir-ops.cir | 30 ++++++++++ clang/test/CIR/IR/invalid.cir | 12 ++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 41 +++++++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 64 ++++++++++++++++++++++ mlir/lib/Dialect/CIR/IR/CIRTypes.cpp | 3 +- 7 files changed, 172 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 93a7d01a40dd..6353a7916c81 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -1590,12 +1590,16 @@ class CIRBuildImpl { builder.create( loc, condV, /*thenBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { (void)buildStmt(thenS); }, + [&](mlir::OpBuilder &b, mlir::Location loc) { + (void)buildStmt(thenS); + builder.create(getLoc(thenS->getSourceRange().getEnd())); + }, /*elseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { if (!elseS) return; (void)buildStmt(elseS); + builder.create(getLoc(elseS->getSourceRange().getEnd())); }); } diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index f0147df1cf9e..6acadec1d32e 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * int *p0() { int *p = nullptr; @@ -49,3 +50,23 @@ void b1(int a) { bool b = a; } // CHECK: %2 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 // CHECK: %3 = cir.cast(int_to_bool, %2 : i32), !cir.bool // CHECK: cir.store %3, %0 : !cir.bool, cir.ptr + +int if0(int a) { + int x = 0; + if (a) { + x = 3; + } else { + x = 4; + } + return x; +} + +// CHECK: func @if0(%arg0: i32) -> i32 { +// CHECK: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool +// CHECK-NEXT: cir.if %4 { +// CHECK-NEXT: %6 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: } else { +// CHECK-NEXT: %6 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: } diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 6ba90aa9c4e0..ccf3e900d33f 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -1,4 +1,5 @@ // Test the CIR operations can parse and print correctly (roundtrip) +// XFAIL: * // RUN: cir-tool %s | cir-tool | FileCheck %s module { @@ -16,6 +17,25 @@ module { %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 cir.return %2 : i32 } + + func.func @if0(%arg0: i32) -> i32 { + %0 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} + cir.store %arg0, %1 : i32, cir.ptr + %2 = cir.cst(0 : i32) : i32 + cir.store %2, %0 : i32, cir.ptr + %3 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 + %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool + cir.if %4 { + %6 = cir.cst(3 : i32) : i32 + cir.store %6, %0 : i32, cir.ptr + } else { + %6 = cir.cst(4 : i32) : i32 + cir.store %6, %0 : i32, cir.ptr + } + %5 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 + cir.return %5 : i32 + } } // CHECK: module { @@ -34,4 +54,14 @@ module { // CHECK-NEXT: cir.return %2 : i32 // CHECK-NEXT: } +// CHECK: @if0(%arg0: i32) -> i32 { +// CHECK: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool +// CHECK-NEXT: cir.if %4 { +// CHECK-NEXT: %6 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: } else { +// CHECK-NEXT: %6 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: } + // CHECK: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 8921aaa8a306..8421b4180721 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -14,3 +14,15 @@ func.func @b0() { %1 = cir.cst(true) : i32 cir.return } + +// ----- + +func.func @if0() { + %0 = cir.cst(true) : !cir.bool + cir.if %0 { + %6 = cir.cst(3 : i32) : i32 + // expected-error@+1 {{'cir.yield' op must not produce results in 'if' operation}} + cir.yield %6 : i32 + } + cir.return +} diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index ec82994ddf67..8ffac19eda01 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -278,10 +278,11 @@ def ReturnOp : CIR_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { //===----------------------------------------------------------------------===// // IfOp //===----------------------------------------------------------------------===// + def IfOp : CIR_Op<"if", [DeclareOpInterfaceMethods, - SingleBlock, RecursivelySpeculatable, AutomaticAllocationScope, - NoRegionArguments]> { + SingleBlockImplicitTerminator<"cir::YieldOp">, RecursivelySpeculatable, + AutomaticAllocationScope, NoRegionArguments]> { let summary = "if-then-else operation"; let description = [{ The `scf.if` operation represents an if-then-else construct for @@ -307,8 +308,14 @@ def IfOp : CIR_Op<"if", ``` }]; let arguments = (ins CIR_BoolType:$condition); + + // FIXME: for now the "then" region only has one block, that should change + // soon as building CIR becomes more complex. let regions = (region SizedRegion<1>:$thenRegion, AnyRegion:$elseRegion); + // FIXME: unify these within CIR_Ops. + let hasCustomAssemblyFormat = 1; + let skipDefaultBuilders = 1; let builders = [ OpBuilder<(ins "Value":$cond, "bool":$withElseRegion)>, @@ -335,5 +342,35 @@ def IfOp : CIR_Op<"if", // TODO: let hasCanonicalizer = 1; } +//===----------------------------------------------------------------------===// +// YieldOp +//===----------------------------------------------------------------------===// + +def YieldOp : CIR_Op<"yield", [Pure, ReturnLike, Terminator, + ParentOneOf<["IfOp"]>]> { + let summary = "termination operation for regions inside if, for, scope, etc"; + let description = [{ + "cir.yield" yields an SSA value from a CIR dialect op region and + terminates the regions. The semantics of how the values are yielded is + defined by the parent operation. + + Currently, there are not parents where "cir.yield" has any operands, + but it will be useful to represent lifetime extension in the future. In + that case the operands must match the parent operation's results. + + If the parent operation defines no values, then the "cir.yield" may be + left out in the custom syntax and the builders will insert one implicitly. + Otherwise, it has to be present in the syntax to indicate which values are + yielded. + }]; + + let arguments = (ins Variadic:$results); + let builders = [OpBuilder<(ins), [{ /* nothing to do */ }]>]; + + let assemblyFormat = + [{ attr-dict ($results^ `:` type($results))? }]; + + let hasVerifier = 1; +} #endif // MLIR_CIR_DIALECT_CIR_OPS diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index f8eb11fdbeb5..e936efa6320f 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -133,6 +133,56 @@ mlir::LogicalResult ReturnOp::verify() { // IfOp //===----------------------------------------------------------------------===// +ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { + // Create the regions for 'then'. + result.regions.reserve(2); + Region *thenRegion = result.addRegion(); + Region *elseRegion = result.addRegion(); + + auto &builder = parser.getBuilder(); + OpAsmParser::UnresolvedOperand cond; + Type boolType = ::mlir::cir::BoolType::get(builder.getContext()); + + if (parser.parseOperand(cond) || + parser.resolveOperand(cond, boolType, result.operands)) + return failure(); + + // Parse the 'then' region. + if (parser.parseRegion(*thenRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + IfOp::ensureTerminator(*thenRegion, parser.getBuilder(), result.location); + + // If we find an 'else' keyword then parse the 'else' region. + if (!parser.parseOptionalKeyword("else")) { + if (parser.parseRegion(*elseRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + IfOp::ensureTerminator(*elseRegion, parser.getBuilder(), result.location); + } + + // Parse the optional attribute list. + if (parser.parseOptionalAttrDict(result.attributes)) + return failure(); + return success(); +} + +void IfOp::print(OpAsmPrinter &p) { + p << " " << getCondition() << " "; + p.printRegion(getThenRegion(), + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/false); + + // Print the 'else' regions if it exists and has a block. + auto &elseRegion = this->getElseRegion(); + if (!elseRegion.empty()) { + p << " else "; + p.printRegion(elseRegion, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/false); + } + + p.printOptionalAttrDict(getOperation()->getAttrs()); +} + Block *IfOp::thenBlock() { return &getThenRegion().back(); } Block *IfOp::elseBlock() { Region &r = getElseRegion(); @@ -203,6 +253,20 @@ void IfOp::build(OpBuilder &builder, OperationState &result, Value cond, elseBuilder(builder, result.location); } +//===----------------------------------------------------------------------===// +// YieldOp +//===----------------------------------------------------------------------===// + +mlir::LogicalResult YieldOp::verify() { + if (!llvm::isa(getOperation()->getParentOp())) + return emitOpError() << "expects 'if' as the parent operation'"; + + if (!getResults().empty()) + return emitOpError() << "must not produce results in 'if' operation"; + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp index 5fba027a1433..b20661931874 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp @@ -67,8 +67,7 @@ Type BoolType::parse(mlir::AsmParser &parser) { return get(parser.getContext()); } -void BoolType::print(mlir::AsmPrinter &printer) const { -} +void BoolType::print(mlir::AsmPrinter &printer) const {} //===----------------------------------------------------------------------===// // CIR Dialect From 3820444067973044f60ab79828ad7c10c03d995b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 1 Dec 2021 23:06:37 -0800 Subject: [PATCH 0085/2301] [CIR] More improvements on cir.if generation - Do proper detection of 'else' when building CIR. - Fix getSuccessorRegions to get proper verification. - Chained if's now work too. - More tests --- clang/lib/CIR/CIRBuilder.cpp | 4 +-- clang/test/CIR/CodeGen/basic.cpp | 39 +++++++++++++++++++++- clang/test/CIR/IR/cir-ops.cir | 1 - clang/test/CIR/IR/invalid.cir | 2 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 4 +-- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 10 +++--- 6 files changed, 46 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 6353a7916c81..c9634ce39a42 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -1588,7 +1588,7 @@ class CIRBuildImpl { cond = cond->IgnoreParens(); mlir::Value condV = evaluateExprAsBool(cond); builder.create( - loc, condV, + loc, condV, elseS, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { (void)buildStmt(thenS); @@ -1596,8 +1596,6 @@ class CIRBuildImpl { }, /*elseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - if (!elseS) - return; (void)buildStmt(elseS); builder.create(getLoc(elseS->getSourceRange().getEnd())); }); diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 6acadec1d32e..a78332a712f3 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * int *p0() { int *p = nullptr; @@ -70,3 +69,41 @@ int if0(int a) { // CHECK-NEXT: %6 = cir.cst(4 : i32) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: } + +int if1(int a, bool b, bool c) { + int x = 0; + if (a) { + x = 3; + if (b) { + x = 8; + } + } else { + if (c) { + x = 14; + } + x = 4; + } + return x; +} + +// CHECK: func @if1(%arg0: i32, %arg1: !cir.bool, %arg2: !cir.bool) -> i32 { +// CHECK: cir.if %6 { +// CHECK: %8 = cir.cst(3 : i32) : i32 +// CHECK: cir.store %8, %0 : i32, cir.ptr +// CHECK: %9 = cir.load %2 lvalue_to_rvalue : cir.ptr , !cir.bool +// CHECK: cir.if %9 { +// CHECK: %10 = cir.cst(8 : i32) : i32 +// CHECK: cir.store %10, %0 : i32, cir.ptr +// CHECK: } +// CHECK: } else { +// CHECK: %8 = cir.load %1 lvalue_to_rvalue : cir.ptr , !cir.bool +// CHECK: cir.if %8 { +// CHECK: %10 = cir.cst(14 : i32) : i32 +// CHECK: cir.store %10, %0 : i32, cir.ptr +// CHECK: } +// CHECK: %9 = cir.cst(4 : i32) : i32 +// CHECK: cir.store %9, %0 : i32, cir.ptr +// CHECK: } +// CHECK: %7 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: cir.return %7 : i32 +// CHECK: } diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index ccf3e900d33f..15d95ad75457 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -1,5 +1,4 @@ // Test the CIR operations can parse and print correctly (roundtrip) -// XFAIL: * // RUN: cir-tool %s | cir-tool | FileCheck %s module { diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 8421b4180721..ef637771a7ce 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -19,9 +19,9 @@ func.func @b0() { func.func @if0() { %0 = cir.cst(true) : !cir.bool + // expected-error@+1 {{'cir.if' op region control flow edge from Region #0 to parent results: source has 1 operands, but target successor needs 0}} cir.if %0 { %6 = cir.cst(3 : i32) : i32 - // expected-error@+1 {{'cir.yield' op must not produce results in 'if' operation}} cir.yield %6 : i32 } cir.return diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 8ffac19eda01..dcb76dca543b 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -315,11 +315,11 @@ def IfOp : CIR_Op<"if", // FIXME: unify these within CIR_Ops. let hasCustomAssemblyFormat = 1; + let hasVerifier = 1; let skipDefaultBuilders = 1; let builders = [ - OpBuilder<(ins "Value":$cond, "bool":$withElseRegion)>, - OpBuilder<(ins "Value":$cond, + OpBuilder<(ins "Value":$cond, "bool":$withElseRegion, CArg<"function_ref", "buildTerminatedBody">:$thenBuilder, CArg<"function_ref", diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index e936efa6320f..0a366d350587 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -201,8 +201,6 @@ void mlir::cir::buildTerminatedBody(OpBuilder &builder, Location loc) {} /// not a constant. void IfOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { - assert(0 && "not implemented"); - // The `then` and the `else` region branch back to the parent operation. if (!point.isParent()) { regions.push_back(RegionSuccessor()); @@ -234,6 +232,7 @@ void IfOp::getSuccessorRegions(mlir::RegionBranchPoint point, } void IfOp::build(OpBuilder &builder, OperationState &result, Value cond, + bool withElseRegion, function_ref thenBuilder, function_ref elseBuilder) { assert(thenBuilder && "the builder callback for 'then' must be present"); @@ -246,13 +245,15 @@ void IfOp::build(OpBuilder &builder, OperationState &result, Value cond, thenBuilder(builder, result.location); Region *elseRegion = result.addRegion(); - if (!elseBuilder) + if (!withElseRegion) return; builder.createBlock(elseRegion); elseBuilder(builder, result.location); } +LogicalResult IfOp::verify() { return success(); } + //===----------------------------------------------------------------------===// // YieldOp //===----------------------------------------------------------------------===// @@ -261,9 +262,6 @@ mlir::LogicalResult YieldOp::verify() { if (!llvm::isa(getOperation()->getParentOp())) return emitOpError() << "expects 'if' as the parent operation'"; - if (!getResults().empty()) - return emitOpError() << "must not produce results in 'if' operation"; - return mlir::success(); } From b7ccfad2646e7e1258054e26792ddff106f6cd14 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 1 Dec 2021 23:57:10 -0800 Subject: [PATCH 0086/2301] [CIR] Add cir.scope operation - Add operation interface - Builders, verifiers, printers --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 36 ++++++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 61 ++++++++++++++++++++++ 2 files changed, 96 insertions(+), 1 deletion(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index dcb76dca543b..a730b4d5d726 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -347,7 +347,7 @@ def IfOp : CIR_Op<"if", //===----------------------------------------------------------------------===// def YieldOp : CIR_Op<"yield", [Pure, ReturnLike, Terminator, - ParentOneOf<["IfOp"]>]> { + ParentOneOf<["IfOp", "ScopeOp"]>]> { let summary = "termination operation for regions inside if, for, scope, etc"; let description = [{ "cir.yield" yields an SSA value from a CIR dialect op region and @@ -373,4 +373,38 @@ def YieldOp : CIR_Op<"yield", [Pure, ReturnLike, Terminator, let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// ScopeOp +//===----------------------------------------------------------------------===// + +def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods, + SingleBlockImplicitTerminator<"cir::YieldOp">, RecursivelySpeculatable, + AutomaticAllocationScope, NoRegionArguments]> { + let summary = ""; + let description = [{ + "cir.scope" contains one region and defines a strict "scope" for all new + values produced within its blocks. + + "cir.yield" is required as a terminator and can have results, in which case + it can be omitted. Not used anywhere just yet but might be used to explicitly + model lifetime extension. + }]; + + let results = (outs Variadic:$results); + let regions = (region SizedRegion<1>:$scopeRegion); + + let hasCustomAssemblyFormat = 1; + let hasVerifier = 1; + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins "TypeRange":$resultTypes, + "function_ref":$scopeBuilder)> + ]; + + let extraClassDeclaration = [{ + Block* scopeBlock(); + }]; +} + #endif // MLIR_CIR_DIALECT_CIR_OPS diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 0a366d350587..8e353fe8074f 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -254,6 +254,67 @@ void IfOp::build(OpBuilder &builder, OperationState &result, Value cond, LogicalResult IfOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// ScopeOp +//===----------------------------------------------------------------------===// + +ParseResult ScopeOp::parse(OpAsmParser &parser, OperationState &result) { + // Create one region within 'scope'. + result.regions.reserve(1); + Region *scopeRegion = result.addRegion(); + + // Parse the scope region. + if (parser.parseRegion(*scopeRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + ScopeOp::ensureTerminator(*scopeRegion, parser.getBuilder(), result.location); + + // Parse the optional attribute list. + if (parser.parseOptionalAttrDict(result.attributes)) + return failure(); + return success(); +} + +void ScopeOp::print(OpAsmPrinter &p) { + p.printRegion(getScopeRegion(), + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/false); + + p.printOptionalAttrDict(getOperation()->getAttrs()); +} + +Block *ScopeOp::scopeBlock() { return &getScopeRegion().back(); } + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes that +/// correspond to a constant value for each operand, or null if that operand is +/// not a constant. +void ScopeOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // The only region always branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // If the condition isn't constant, both regions may be executed. + regions.push_back(RegionSuccessor(&getScopeRegion())); +} + +void ScopeOp::build(OpBuilder &builder, OperationState &result, + TypeRange resultTypes, + function_ref scopeBuilder) { + assert(scopeBuilder && "the builder callback for 'then' must be present"); + result.addTypes(resultTypes); + + OpBuilder::InsertionGuard guard(builder); + Region *scopeRegion = result.addRegion(); + builder.createBlock(scopeRegion); + scopeBuilder(builder, result.location); +} + +LogicalResult ScopeOp::verify() { return success(); } + //===----------------------------------------------------------------------===// // YieldOp //===----------------------------------------------------------------------===// From 4040fa1e1e25165c776a9cda64bd0178472accca Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 2 Dec 2021 00:36:40 -0800 Subject: [PATCH 0087/2301] [CIR][Builder] Codegen a cir.scope around cir.if - This should limit the lifetime of both if initializers and conditions. - Update ifstmt tests to check for a scope. --- clang/lib/CIR/CIRBuilder.cpp | 52 ++++++++++++++++---------- clang/test/CIR/CodeGen/basic.cpp | 44 +++++++++++++--------- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 6 ++- 3 files changed, 63 insertions(+), 39 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index c9634ce39a42..7f8a09f4b521 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -1605,31 +1605,45 @@ class CIRBuildImpl { // The else branch of a consteval if statement is always the only branch // that can be runtime evaluated. assert(!S.isConsteval() && "not implemented"); + mlir::LogicalResult res = mlir::success(); // C99 6.8.4.1: The first substatement is executed if the expression // compares unequal to 0. The condition must be a scalar type. - // TODO: add cir.scope, add a new scoped symbol table as well. - // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); - if (S.getInit()) - if (buildStmt(S.getInit()).failed()) - return mlir::failure(); + auto ifStmtBuilder = [&]() -> mlir::LogicalResult { + if (S.getInit()) + if (buildStmt(S.getInit()).failed()) + return mlir::failure(); + + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + + // If the condition constant folds and can be elided, try to avoid + // emitting the condition and the dead arm of the if/else. + // FIXME: should this be done as part of a constant folder pass instead? + bool CondConstant; + if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, + S.isConstexpr())) { + assert(0 && "not implemented"); + } - if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); + // TODO: PGO and likelihood. + buildIfOnBoolExpr(S.getCond(), getLoc(S.getSourceRange().getBegin()), + S.getThen(), S.getElse()); + return mlir::success(); + }; - // If the condition constant folds and can be elided, try to avoid emitting - // the condition and the dead arm of the if/else. - // FIXME: should this be done as part of a constant folder pass instead? - bool CondConstant; - if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, - S.isConstexpr())) { - assert(0 && "not implemented"); - } + // TODO: Add a new scoped symbol table. + // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); + auto locBegin = getLoc(S.getSourceRange().getBegin()); + auto locEnd = getLoc(S.getSourceRange().getEnd()); + builder.create( + locBegin, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + res = ifStmtBuilder(); + builder.create(locEnd); + }); - // TODO: PGO and likelihood. - buildIfOnBoolExpr(S.getCond(), getLoc(S.getSourceRange().getBegin()), - S.getThen(), S.getElse()); - return mlir::success(); + return res; } mlir::LogicalResult buildStmt(const Stmt *S) { diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index a78332a712f3..132b52a1bbd2 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -61,14 +61,17 @@ int if0(int a) { } // CHECK: func @if0(%arg0: i32) -> i32 { -// CHECK: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool -// CHECK-NEXT: cir.if %4 { -// CHECK-NEXT: %6 = cir.cst(3 : i32) : i32 -// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr -// CHECK-NEXT: } else { -// CHECK-NEXT: %6 = cir.cst(4 : i32) : i32 -// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr -// CHECK-NEXT: } +// CHECK: cir.scope { +// CHECK: %4 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: %5 = cir.cast(int_to_bool, %4 : i32), !cir.bool +// CHECK: cir.if %5 { +// CHECK: %6 = cir.cst(3 : i32) : i32 +// CHECK: cir.store %6, %0 : i32, cir.ptr +// CHECK: } else { +// CHECK: %6 = cir.cst(4 : i32) : i32 +// CHECK: cir.store %6, %0 : i32, cir.ptr +// CHECK: } +// CHECK: } int if1(int a, bool b, bool c) { int x = 0; @@ -87,23 +90,28 @@ int if1(int a, bool b, bool c) { } // CHECK: func @if1(%arg0: i32, %arg1: !cir.bool, %arg2: !cir.bool) -> i32 { -// CHECK: cir.if %6 { -// CHECK: %8 = cir.cst(3 : i32) : i32 -// CHECK: cir.store %8, %0 : i32, cir.ptr +// CHECK: cir.scope { +// CHECK: %6 = cir.load %3 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: %7 = cir.cast(int_to_bool, %6 : i32), !cir.bool +// CHECK: cir.if %7 { +// CHECK: %8 = cir.cst(3 : i32) : i32 +// CHECK: cir.store %8, %0 : i32, cir.ptr +// CHECK: cir.scope { // CHECK: %9 = cir.load %2 lvalue_to_rvalue : cir.ptr , !cir.bool // CHECK: cir.if %9 { // CHECK: %10 = cir.cst(8 : i32) : i32 // CHECK: cir.store %10, %0 : i32, cir.ptr // CHECK: } -// CHECK: } else { -// CHECK: %8 = cir.load %1 lvalue_to_rvalue : cir.ptr , !cir.bool -// CHECK: cir.if %8 { +// CHECK: } +// CHECK: } else { +// CHECK: cir.scope { +// CHECK: %9 = cir.load %1 lvalue_to_rvalue : cir.ptr , !cir.bool +// CHECK: cir.if %9 { // CHECK: %10 = cir.cst(14 : i32) : i32 // CHECK: cir.store %10, %0 : i32, cir.ptr // CHECK: } -// CHECK: %9 = cir.cst(4 : i32) : i32 -// CHECK: cir.store %9, %0 : i32, cir.ptr // CHECK: } -// CHECK: %7 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 -// CHECK: cir.return %7 : i32 +// CHECK: %8 = cir.cst(4 : i32) : i32 +// CHECK: cir.store %8, %0 : i32, cir.ptr // CHECK: } +// CHECK: } diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 8e353fe8074f..9117a5ba622d 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -275,6 +275,7 @@ ParseResult ScopeOp::parse(OpAsmParser &parser, OperationState &result) { } void ScopeOp::print(OpAsmPrinter &p) { + p << ' '; p.printRegion(getScopeRegion(), /*printEntryBlockArgs=*/false, /*printBlockTerminators=*/false); @@ -320,8 +321,9 @@ LogicalResult ScopeOp::verify() { return success(); } //===----------------------------------------------------------------------===// mlir::LogicalResult YieldOp::verify() { - if (!llvm::isa(getOperation()->getParentOp())) - return emitOpError() << "expects 'if' as the parent operation'"; + if (!llvm::isa(getOperation()->getParentOp())) + return emitOpError() + << "expects 'cir.if' or 'cir.scope' as the parent operation'"; return mlir::success(); } From c6860cda795ea104d5752c6d685115b23085a32c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 2 Dec 2021 00:47:07 -0800 Subject: [PATCH 0088/2301] [CIR][Builder] Improve error handling for then/else codegen --- clang/lib/CIR/CIRBuilder.cpp | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 7f8a09f4b521..0c239a7b705b 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -1581,24 +1581,29 @@ class CIRBuildImpl { /// times we expect the condition to evaluate to true based on PGO data. We /// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr /// for extra ideas). - void buildIfOnBoolExpr(const Expr *cond, mlir::Location loc, - const Stmt *thenS, const Stmt *elseS) { + mlir::LogicalResult buildIfOnBoolExpr(const Expr *cond, mlir::Location loc, + const Stmt *thenS, const Stmt *elseS) { // TODO: scoped ApplyDebugLocation DL(*this, Cond); // TODO: __builtin_unpredictable and profile counts? cond = cond->IgnoreParens(); mlir::Value condV = evaluateExprAsBool(cond); + mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); + builder.create( loc, condV, elseS, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - (void)buildStmt(thenS); + resThen = buildStmt(thenS); builder.create(getLoc(thenS->getSourceRange().getEnd())); }, /*elseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - (void)buildStmt(elseS); + resElse = buildStmt(elseS); builder.create(getLoc(elseS->getSourceRange().getEnd())); }); + + return mlir::LogicalResult::success(resThen.succeeded() && + resElse.succeeded()); } mlir::LogicalResult buildIfStmt(const IfStmt &S) { @@ -1627,9 +1632,9 @@ class CIRBuildImpl { } // TODO: PGO and likelihood. - buildIfOnBoolExpr(S.getCond(), getLoc(S.getSourceRange().getBegin()), - S.getThen(), S.getElse()); - return mlir::success(); + return buildIfOnBoolExpr(S.getCond(), + getLoc(S.getSourceRange().getBegin()), + S.getThen(), S.getElse()); }; // TODO: Add a new scoped symbol table. From 628fb95820088109bb92d4516e3b2ba777338a59 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 3 Dec 2021 23:12:01 -0800 Subject: [PATCH 0089/2301] [CIR] Model raw compound statemnts using cir.scope --- clang/lib/CIR/CIRBuilder.cpp | 29 +++++++++++++++++++++++------ clang/test/CIR/CodeGen/basic.cpp | 31 +++++++++++++++++++++++-------- 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 0c239a7b705b..e1e6dd43dcde 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -1828,17 +1828,34 @@ class CIRBuildImpl { mlir::LogicalResult buildFunctionBody(const Stmt *Body) { const CompoundStmt *S = dyn_cast(Body); assert(S && "expected compound stmt"); - return buildCompoundStmt(*S); + + // We start with function level scope for variables. + SymTableScopeTy varScope(symbolTable); + return buildCompoundStmtWithoutScope(*S); } mlir::LogicalResult buildCompoundStmt(const CompoundStmt &S) { - // Create a scope in the symbol table to hold variable declarations local - // to this compound statement. + mlir::LogicalResult res = mlir::success(); + + auto compoundStmtBuilder = [&]() -> mlir::LogicalResult { + if (buildCompoundStmtWithoutScope(S).failed()) + return mlir::failure(); + + return mlir::success(); + }; + + // Add local scope to track new declared variables. SymTableScopeTy varScope(symbolTable); - if (buildCompoundStmtWithoutScope(S).failed()) - return mlir::failure(); + auto locBegin = getLoc(S.getSourceRange().getBegin()); + auto locEnd = getLoc(S.getSourceRange().getEnd()); + builder.create( + locBegin, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + res = compoundStmtBuilder(); + builder.create(locEnd); + }); - return mlir::success(); + return res; } mlir::LogicalResult buildCompoundStmtWithoutScope(const CompoundStmt &S) { diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 132b52a1bbd2..31a69888c10c 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -23,19 +23,34 @@ int *p1() { int *p2() { int *p = nullptr; - int x = 0; - p = &x; + { + int x = 0; + p = &x; + *p = 42; + } *p = 42; return p; } // CHECK: func @p2() -> !cir.ptr { -// CHECK: %0 = cir.alloca i32, cir.ptr , [cinit] -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, [cinit] -// CHECK: cir.store %0, %1 : !cir.ptr, cir.ptr > -// CHECK: %4 = cir.cst(42 : i32) : i32 -// CHECK-NEXT: %5 = cir.load %1 lvalue_to_rvalue : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.store %4, %5 : i32, cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, [cinit] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.store %1, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %5 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} +// CHECK-NEXT: %6 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: cir.store %6, %5 : i32, cir.ptr +// CHECK-NEXT: cir.store %5, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %7 = cir.cst(42 : i32) : i32 +// CHECK-NEXT: %8 = cir.load %0 lvalue_to_rvalue : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %7, %8 : i32, cir.ptr +// CHECK-NEXT: } +// CHECK-NEXT: %2 = cir.cst(42 : i32) : i32 +// CHECK-NEXT: %3 = cir.load %0 lvalue_to_rvalue : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %2, %3 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.load %0 lvalue_to_rvalue : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return %4 : !cir.ptr +// CHECK-NEXT: } void b0() { bool x = true, y = false; } From 260b50175c07a3e21b011d38817dc7f5e5c4618f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Sat, 4 Dec 2021 00:04:20 -0800 Subject: [PATCH 0090/2301] [CIR] Remove redundant scope instructions (e.g. then/else statements) --- clang/lib/CIR/CIRBuilder.cpp | 21 +++++++++++++-------- clang/test/CIR/CodeGen/basic.cpp | 30 +++++++++++++++--------------- 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index e1e6dd43dcde..c6bdde23ba0a 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -1188,14 +1188,16 @@ class CIRBuildImpl { return mlir::success(); } - mlir::LogicalResult buildSimpleStmt(const Stmt *S) { + mlir::LogicalResult buildSimpleStmt(const Stmt *S, bool useCurrentScope) { switch (S->getStmtClass()) { default: return mlir::failure(); case Stmt::DeclStmtClass: return buildDeclStmt(cast(*S)); case Stmt::CompoundStmtClass: - return buildCompoundStmt(cast(*S)); + return useCurrentScope + ? buildCompoundStmtWithoutScope(cast(*S)) + : buildCompoundStmt(cast(*S)); case Stmt::ReturnStmtClass: return buildReturnStmt(cast(*S)); case Stmt::NullStmtClass: @@ -1593,12 +1595,12 @@ class CIRBuildImpl { loc, condV, elseS, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - resThen = buildStmt(thenS); + resThen = buildStmt(thenS, /*useCurrentScope=*/true); builder.create(getLoc(thenS->getSourceRange().getEnd())); }, /*elseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - resElse = buildStmt(elseS); + resElse = buildStmt(elseS, /*useCurrentScope=*/true); builder.create(getLoc(elseS->getSourceRange().getEnd())); }); @@ -1616,7 +1618,7 @@ class CIRBuildImpl { // compares unequal to 0. The condition must be a scalar type. auto ifStmtBuilder = [&]() -> mlir::LogicalResult { if (S.getInit()) - if (buildStmt(S.getInit()).failed()) + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); if (S.getConditionVariable()) @@ -1651,8 +1653,10 @@ class CIRBuildImpl { return res; } - mlir::LogicalResult buildStmt(const Stmt *S) { - if (mlir::succeeded(buildSimpleStmt(S))) + // Build CIR for a statement. useCurrentScope should be true if no + // new scopes need be created when finding a compound statement. + mlir::LogicalResult buildStmt(const Stmt *S, bool useCurrentScope) { + if (mlir::succeeded(buildSimpleStmt(S, useCurrentScope))) return mlir::success(); if (astCtx.getLangOpts().OpenMP && astCtx.getLangOpts().OpenMPSimd) @@ -1860,7 +1864,7 @@ class CIRBuildImpl { mlir::LogicalResult buildCompoundStmtWithoutScope(const CompoundStmt &S) { for (auto *CurStmt : S.body()) - if (buildStmt(CurStmt).failed()) + if (buildStmt(CurStmt, /*useCurrentScope=*/false).failed()) return mlir::failure(); return mlir::success(); @@ -1969,6 +1973,7 @@ void CIRContext::verifyModule() { builder->verifyModule(); } bool CIRContext::EmitFunction(const FunctionDecl *FD) { CIRCodeGenFunction CCGF{}; auto func = builder->buildCIR(&CCGF, FD); + func->dump(); assert(func && "should emit function"); return true; } diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 31a69888c10c..d8b31929aeb9 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -79,13 +79,13 @@ int if0(int a) { // CHECK: cir.scope { // CHECK: %4 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 // CHECK: %5 = cir.cast(int_to_bool, %4 : i32), !cir.bool -// CHECK: cir.if %5 { -// CHECK: %6 = cir.cst(3 : i32) : i32 -// CHECK: cir.store %6, %0 : i32, cir.ptr -// CHECK: } else { -// CHECK: %6 = cir.cst(4 : i32) : i32 -// CHECK: cir.store %6, %0 : i32, cir.ptr -// CHECK: } +// CHECK-NEXT: cir.if %5 { +// CHECK-NEXT: %6 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: } else { +// CHECK-NEXT: %6 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: } // CHECK: } int if1(int a, bool b, bool c) { @@ -113,18 +113,18 @@ int if1(int a, bool b, bool c) { // CHECK: cir.store %8, %0 : i32, cir.ptr // CHECK: cir.scope { // CHECK: %9 = cir.load %2 lvalue_to_rvalue : cir.ptr , !cir.bool -// CHECK: cir.if %9 { -// CHECK: %10 = cir.cst(8 : i32) : i32 -// CHECK: cir.store %10, %0 : i32, cir.ptr -// CHECK: } +// CHECK-NEXT: cir.if %9 { +// CHECK-NEXT: %10 = cir.cst(8 : i32) : i32 +// CHECK-NEXT: cir.store %10, %0 : i32, cir.ptr +// CHECK-NEXT: } // CHECK: } // CHECK: } else { // CHECK: cir.scope { // CHECK: %9 = cir.load %1 lvalue_to_rvalue : cir.ptr , !cir.bool -// CHECK: cir.if %9 { -// CHECK: %10 = cir.cst(14 : i32) : i32 -// CHECK: cir.store %10, %0 : i32, cir.ptr -// CHECK: } +// CHECK-NEXT: cir.if %9 { +// CHECK-NEXT: %10 = cir.cst(14 : i32) : i32 +// CHECK-NEXT: cir.store %10, %0 : i32, cir.ptr +// CHECK-NEXT: } // CHECK: } // CHECK: %8 = cir.cst(4 : i32) : i32 // CHECK: cir.store %8, %0 : i32, cir.ptr From c5789e10b1d42660a82bd391c70d7bbbc30a56e7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Sat, 4 Dec 2021 00:15:05 -0800 Subject: [PATCH 0091/2301] [CIR] Remove accidental dump() leftover --- clang/lib/CIR/CIRBuilder.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index c6bdde23ba0a..20dd53389303 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -1973,7 +1973,6 @@ void CIRContext::verifyModule() { builder->verifyModule(); } bool CIRContext::EmitFunction(const FunctionDecl *FD) { CIRCodeGenFunction CCGF{}; auto func = builder->buildCIR(&CCGF, FD); - func->dump(); assert(func && "should emit function"); return true; } From 62845e3f4bc29e5f0f09bb3f50dde69c12378a05 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 6 Dec 2021 15:44:37 -0800 Subject: [PATCH 0092/2301] [CIR] Update docs on cir.if --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index a730b4d5d726..35c331bd25ab 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -285,7 +285,7 @@ def IfOp : CIR_Op<"if", AutomaticAllocationScope, NoRegionArguments]> { let summary = "if-then-else operation"; let description = [{ - The `scf.if` operation represents an if-then-else construct for + The `cir.if` operation represents an if-then-else construct for conditionally executing two regions of code. The operand to an if operation is a boolean value. For example: @@ -297,7 +297,8 @@ def IfOp : CIR_Op<"if", } ``` - "cir.if" defines no values and the 'else' can be omitted. + `cir.if` defines no values and the 'else' can be omitted. Every region + must be terminated by `cir.yield`, which is implicit in the asm form. Example: From 65d8ad0b9394d9950e2087fbe5c378dc6bfca2c3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 7 Dec 2021 01:11:01 -0800 Subject: [PATCH 0093/2301] [CIR] Add cir.binop and a series of binop kinds - Add builder support and tests. --- clang/lib/CIR/CIRBuilder.cpp | 112 ++++++++++++++++++ mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 1 + mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 42 +++++++ 3 files changed, 155 insertions(+) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 20dd53389303..15843436a06a 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -500,6 +500,118 @@ class CIRBuildImpl { Builder.builder.getBoolAttr(E->getValue())); } + struct BinOpInfo { + mlir::Value LHS; + mlir::Value RHS; + SourceRange Loc; + QualType Ty; // Computation Type. + BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform + FPOptions FPFeatures; + const Expr *E; // Entire expr, for error unsupported. May not be binop. + + /// Check if the binop computes a division or a remainder. + bool isDivremOp() const { + return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || + Opcode == BO_RemAssign; + } + + /// Check if at least one operand is a fixed point type. In such cases, + /// this operation did not follow usual arithmetic conversion and both + /// operands might not be of the same type. + bool isFixedPointOp() const { + // We cannot simply check the result type since comparison operations + // return an int. + if (const auto *BinOp = dyn_cast(E)) { + QualType LHSType = BinOp->getLHS()->getType(); + QualType RHSType = BinOp->getRHS()->getType(); + return LHSType->isFixedPointType() || RHSType->isFixedPointType(); + } + if (const auto *UnOp = dyn_cast(E)) + return UnOp->getSubExpr()->getType()->isFixedPointType(); + return false; + } + }; + + BinOpInfo buildBinOps(const BinaryOperator *E) { + BinOpInfo Result; + Result.LHS = Visit(E->getLHS()); + Result.RHS = Visit(E->getRHS()); + Result.Ty = E->getType(); + Result.Opcode = E->getOpcode(); + Result.Loc = E->getSourceRange(); + // TODO: Result.FPFeatures + Result.E = E; + return Result; + } + + mlir::Value buildMul(const BinOpInfo &Ops) { + return Builder.builder.create( + Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); + } + mlir::Value buildDiv(const BinOpInfo &Ops) { + return Builder.builder.create( + Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Div, Ops.LHS, Ops.RHS); + } + mlir::Value buildRem(const BinOpInfo &Ops) { + return Builder.builder.create( + Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); + } + mlir::Value buildAdd(const BinOpInfo &Ops) { + return Builder.builder.create( + Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Add, Ops.LHS, Ops.RHS); + } + mlir::Value buildSub(const BinOpInfo &Ops) { + return Builder.builder.create( + Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); + } + mlir::Value buildShl(const BinOpInfo &Ops) { + return Builder.builder.create( + Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Shl, Ops.LHS, Ops.RHS); + } + mlir::Value buildShr(const BinOpInfo &Ops) { + return Builder.builder.create( + Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Shr, Ops.LHS, Ops.RHS); + } + mlir::Value buildAnd(const BinOpInfo &Ops) { + return Builder.builder.create( + Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::And, Ops.LHS, Ops.RHS); + } + mlir::Value buildXor(const BinOpInfo &Ops) { + return Builder.builder.create( + Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); + } + mlir::Value buildOr(const BinOpInfo &Ops) { + return Builder.builder.create( + Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Or, Ops.LHS, Ops.RHS); + } + + // Binary operators and binary compound assignment operators. +#define HANDLEBINOP(OP) \ + mlir::Value VisitBin##OP(const BinaryOperator *E) { \ + return build##OP(buildBinOps(E)); \ + } + HANDLEBINOP(Mul) + HANDLEBINOP(Div) + HANDLEBINOP(Rem) + HANDLEBINOP(Add) + HANDLEBINOP(Sub) + HANDLEBINOP(Shl) + HANDLEBINOP(Shr) + HANDLEBINOP(And) + HANDLEBINOP(Xor) + HANDLEBINOP(Or) +#undef HANDLEBINOP + mlir::Value VisitExpr(Expr *E) { // Crashing here for "ScalarExprClassName"? Please implement // VisitScalarExprClassName(...) to get this working. diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index 590c67e9ed62..ede8899ab04c 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -19,6 +19,7 @@ #include "mlir/IR/Dialect.h" #include "mlir/IR/OpDefinition.h" #include "mlir/Interfaces/ControlFlowInterfaces.h" +#include "mlir/Interfaces/InferTypeOpInterface.h" #include "mlir/Interfaces/SideEffectInterfaces.h" namespace mlir { diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 35c331bd25ab..4114d479d73d 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -22,6 +22,7 @@ include "mlir/IR/EnumAttr.td" include "mlir/IR/SymbolInterfaces.td" include "mlir/IR/BuiltinAttributeInterfaces.td" include "mlir/Interfaces/ControlFlowInterfaces.td" +include "mlir/Interfaces/InferTypeOpInterface.td" include "mlir/Interfaces/SideEffectInterfaces.td" //===----------------------------------------------------------------------===// @@ -408,4 +409,45 @@ def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods; +def BinOpKind_Div : I32EnumAttrCase<"Div", 2>; +def BinOpKind_Rem : I32EnumAttrCase<"Rem", 3>; +def BinOpKind_Add : I32EnumAttrCase<"Add", 4>; +def BinOpKind_Sub : I32EnumAttrCase<"Sub", 5>; +def BinOpKind_Shl : I32EnumAttrCase<"Shl", 6>; +def BinOpKind_Shr : I32EnumAttrCase<"Shr", 7>; +def BinOpKind_And : I32EnumAttrCase<"And", 8>; +def BinOpKind_Xor : I32EnumAttrCase<"Xor", 9>; +def BinOpKind_Or : I32EnumAttrCase<"Or", 10>; + +def BinOpKind : I32EnumAttr< + "BinOpKind", + "binary operation (arith and logic) kind", + [BinOpKind_Mul, BinOpKind_Div, BinOpKind_Rem, + BinOpKind_Add, BinOpKind_Sub, BinOpKind_Shl, + BinOpKind_Shr, BinOpKind_And, BinOpKind_Xor, + BinOpKind_Or]> { + let cppNamespace = "::mlir::cir"; +} + +def BinOp : CIR_Op<"binop", [Pure, + SameTypeOperands, SameOperandsAndResultType]> { + // TODO: get more accurate than AnyType + let results = (outs AnyType:$result); + let arguments = (ins Arg:$kind, + AnyType:$lhs, AnyType:$rhs); + + let assemblyFormat = [{ + `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) attr-dict + }]; + + // Already covered by the traits + let hasVerifier = 0; +} + #endif // MLIR_CIR_DIALECT_CIR_OPS From 32d8e461011ae5037737ae551bd78e7eb71bdb49 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 7 Dec 2021 22:10:13 -0800 Subject: [PATCH 0094/2301] [CIR] Write custom parsing/printing for binop kinds and add binops tests Also improve docs --- clang/test/CIR/CodeGen/binop.cpp | 26 +++++++++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 18 +++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 54 ++++++++++++++++++++++ 3 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/binop.cpp diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp new file mode 100644 index 000000000000..c6ce278060d6 --- /dev/null +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void b0(int a, int b) { + int x = a * b; + x = x / b; + x = x % b; + x = x + b; + x = x - b; + x = x >> b; + x = x << b; + x = x & b; + x = x ^ b; + x = x | b; +} + +// CHECK: = cir.binop(mul, %3, %4) : i32 +// CHECK: = cir.binop(div, %6, %7) : i32 +// CHECK: = cir.binop(rem, %9, %10) : i32 +// CHECK: = cir.binop(add, %12, %13) : i32 +// CHECK: = cir.binop(sub, %15, %16) : i32 +// CHECK: = cir.binop(shr, %18, %19) : i32 +// CHECK: = cir.binop(shl, %21, %22) : i32 +// CHECK: = cir.binop(and, %24, %25) : i32 +// CHECK: = cir.binop(xor, %27, %28) : i32 +// CHECK: = cir.binop(or, %30, %31) : i32 \ No newline at end of file diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 4114d479d73d..b172cd26b23f 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -435,15 +435,31 @@ def BinOpKind : I32EnumAttr< let cppNamespace = "::mlir::cir"; } +// FIXME: Pure won't work when we add overloading. def BinOp : CIR_Op<"binop", [Pure, SameTypeOperands, SameOperandsAndResultType]> { + + let summary = "binary operations (arith and logic)"; + let description = [{ + "cir.binop performs the binary operation according to + the specified kind/opcode: [mul, div, rem, add, sub, shl, + shr, and, xor, or]. It accepts to input operands and the + result type must match both types. + + Example + ``` + %7 = binop(add, %1, %2) + %7 = binop(mul, %1, %2) + ``` + }]; + // TODO: get more accurate than AnyType let results = (outs AnyType:$result); let arguments = (ins Arg:$kind, AnyType:$lhs, AnyType:$rhs); let assemblyFormat = [{ - `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) attr-dict + `(` custom($kind) `,` $lhs `,` $rhs `)` `:` type($lhs) attr-dict }]; // Already covered by the traits diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 9117a5ba622d..d687a5b403bc 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -328,6 +328,60 @@ mlir::LogicalResult YieldOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// BinOp +//===----------------------------------------------------------------------===// + +ParseResult parseBinOpKind(OpAsmParser &parser, BinOpKindAttr &kindAttr) { + ::llvm::StringRef attrStr; + ::mlir::NamedAttrList attrStorage; + auto loc = parser.getCurrentLocation(); + + // FIXME: since a few names can't be used as enum (and, or, xor) we declared + // them in CIROps.td capitalized, but we really wanna use lower case on + // clang IR asm form. + if (parser.parseOptionalKeyword(&attrStr, + {"mul", "div", "rem", "add", "sub", "shl", + "shr", "and", "xor", "or"})) { + ::mlir::StringAttr attrVal; + ::mlir::OptionalParseResult parseResult = parser.parseOptionalAttribute( + attrVal, parser.getBuilder().getNoneType(), "kind", attrStorage); + if (parseResult.has_value()) { + if (failed(*parseResult)) + return ::mlir::failure(); + attrStr = attrVal.getValue(); + } else { + return parser.emitError( + loc, "expected string or keyword containing one of the following " + "enum values for attribute 'kind' [mul, div, rem, add, sub, " + "shl, shr, and, xor, or]"); + } + } + if (!attrStr.empty()) { + std::string attrString = attrStr.str(); + attrString[0] = attrString[0] + 'A' - 'a'; + attrStr = attrString; + auto attrOptional = ::mlir::cir::symbolizeBinOpKind(attrStr); + if (!attrOptional) + return parser.emitError(loc, "invalid ") + << "kind attribute specification: \"" << attrStr << '"'; + ; + + kindAttr = ::mlir::cir::BinOpKindAttr::get(parser.getBuilder().getContext(), + attrOptional.value()); + } + + return ::mlir::success(); +} + +void printBinOpKind(OpAsmPrinter &p, BinOp binOp, BinOpKindAttr kindAttr) { + auto caseValueStr = stringifyBinOpKind(kindAttr.getValue()); + std::string attrString = caseValueStr.str(); + attrString[0] = attrString[0] + 'a' - 'A'; + caseValueStr = attrString; + p << caseValueStr; +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From 83e0ac1dba3d5e87e710b3eed28ae5349a90dddd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 7 Dec 2021 23:47:37 -0800 Subject: [PATCH 0095/2301] [CIR] Add cir.cmp instruction and builder support - Support most basic cmp kinds: gt, lt, le, ge, ne, eq. - Add tests. - Add coodegen support for such simple binops. --- clang/lib/CIR/CIRBuilder.cpp | 87 ++++++++++++++++++++++ clang/test/CIR/CodeGen/cmp.cpp | 18 +++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 51 ++++++++++++- 3 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/cmp.cpp diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 15843436a06a..eff2d4267787 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -612,6 +612,93 @@ class CIRBuildImpl { HANDLEBINOP(Or) #undef HANDLEBINOP + mlir::Value buildCmp(const BinaryOperator *E) { + mlir::Value Result; + QualType LHSTy = E->getLHS()->getType(); + QualType RHSTy = E->getRHS()->getType(); + + if (const MemberPointerType *MPT = LHSTy->getAs()) { + assert(0 && "not implemented"); + } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { + BinOpInfo BOInfo = buildBinOps(E); + mlir::Value LHS = BOInfo.LHS; + mlir::Value RHS = BOInfo.RHS; + + if (LHSTy->isVectorType()) { + // Cannot handle any vector just yet. + assert(0 && "not implemented"); + // If AltiVec, the comparison results in a numeric type, so we use + // intrinsics comparing vectors and giving 0 or 1 as a result + if (!E->getType()->isVectorType()) + assert(0 && "not implemented"); + } + if (BOInfo.isFixedPointOp()) { + assert(0 && "not implemented"); + } else { + // TODO: when we add proper basic types to CIR we + // probably won't need to handle + // LHSTy->hasSignedIntegerRepresentation() + + // Unsigned integers and pointers. + if (LHS.getType().isa() || + RHS.getType().isa()) { + // TODO: Handle StrictVTablePointers and + // mayBeDynamicClass/invariant group. + assert(0 && "not implemented"); + } + + mlir::cir::CmpOpKind Kind; + switch (E->getOpcode()) { + case BO_LT: + Kind = mlir::cir::CmpOpKind::lt; + break; + case BO_GT: + Kind = mlir::cir::CmpOpKind::gt; + break; + case BO_LE: + Kind = mlir::cir::CmpOpKind::le; + break; + case BO_GE: + Kind = mlir::cir::CmpOpKind::ge; + break; + case BO_EQ: + Kind = mlir::cir::CmpOpKind::eq; + break; + case BO_NE: + Kind = mlir::cir::CmpOpKind::ne; + break; + default: + llvm_unreachable("unsupported"); + } + + return Builder.builder.create( + Builder.getLoc(BOInfo.Loc.getBegin()), + Builder.getCIRType(BOInfo.Ty), Kind, BOInfo.LHS, BOInfo.RHS); + } + + // If this is a vector comparison, sign extend the result to the + // appropriate vector integer type and return it (don't convert to + // bool). + if (LHSTy->isVectorType()) + assert(0 && "not implemented"); + } else { // Complex Comparison: can only be an equality comparison. + assert(0 && "not implemented"); + } + + return buildScalarConversion(Result, Builder.astCtx.BoolTy, E->getType(), + E->getExprLoc()); + } + +#define VISITCOMP(CODE) \ + mlir::Value VisitBin##CODE(const BinaryOperator *E) { return buildCmp(E); } + VISITCOMP(LT) + VISITCOMP(GT) + VISITCOMP(LE) + VISITCOMP(GE) + VISITCOMP(EQ) + VISITCOMP(NE) +#undef VISITCOMP + mlir::Value VisitExpr(Expr *E) { // Crashing here for "ScalarExprClassName"? Please implement // VisitScalarExprClassName(...) to get this working. diff --git a/clang/test/CIR/CodeGen/cmp.cpp b/clang/test/CIR/CodeGen/cmp.cpp new file mode 100644 index 000000000000..f0c3c78f89a3 --- /dev/null +++ b/clang/test/CIR/CodeGen/cmp.cpp @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void c0(int a, int b) { + bool x = a > b; + x = a < b; + x = a <= b; + x = a >= b; + x = a != b; + x = a == b; +} + +// CHECK: = cir.cmp(gt, %3, %4) : i32, !cir.bool +// CHECK: = cir.cmp(lt, %6, %7) : i32, !cir.bool +// CHECK: = cir.cmp(le, %9, %10) : i32, !cir.bool +// CHECK: = cir.cmp(ge, %12, %13) : i32, !cir.bool +// CHECK: = cir.cmp(ne, %15, %16) : i32, !cir.bool +// CHECK: = cir.cmp(eq, %18, %19) : i32, !cir.bool \ No newline at end of file diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index b172cd26b23f..f3d820026b03 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -448,8 +448,8 @@ def BinOp : CIR_Op<"binop", [Pure, Example ``` - %7 = binop(add, %1, %2) - %7 = binop(mul, %1, %2) + %7 = binop(add, %1, %2) : i32 + %7 = binop(mul, %1, %2) : i8 ``` }]; @@ -466,4 +466,51 @@ def BinOp : CIR_Op<"binop", [Pure, let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// CmpOp +//===----------------------------------------------------------------------===// + +def CmpOpKind_LT : I32EnumAttrCase<"lt", 1>; +def CmpOpKind_LE : I32EnumAttrCase<"le", 2>; +def CmpOpKind_GT : I32EnumAttrCase<"gt", 3>; +def CmpOpKind_GE : I32EnumAttrCase<"ge", 4>; +def CmpOpKind_EQ : I32EnumAttrCase<"eq", 5>; +def CmpOpKind_NE : I32EnumAttrCase<"ne", 6>; + +def CmpOpKind : I32EnumAttr< + "CmpOpKind", + "compare operation kind", + [CmpOpKind_LT, CmpOpKind_LE, CmpOpKind_GT, + CmpOpKind_GE, CmpOpKind_EQ, CmpOpKind_NE]> { + let cppNamespace = "::mlir::cir"; +} + +// FIXME: Pure might not work when we add overloading. +def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { + + let summary = "comapre operation"; + let description = [{ + "cir.cmp compares two input operands and produces a bool result. The input + operands must have the same type. The kinds of comparison available are: + [lt,gt,ge,eq,ne] + + Example + ``` + %7 = cir.cmp(gt, %1, %2) : i32, !cir.bool + ``` + }]; + + // TODO: get more accurate than AnyType + let results = (outs AnyType:$result); + let arguments = (ins Arg:$kind, + AnyType:$lhs, AnyType:$rhs); + + let assemblyFormat = [{ + `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) `,` type($result) attr-dict + }]; + + // Already covered by the traits + let hasVerifier = 0; +} + #endif // MLIR_CIR_DIALECT_CIR_OPS From a98b47476aa4c0a93df4d16be7ff94e40697ad57 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 8 Dec 2021 17:01:29 -0800 Subject: [PATCH 0096/2301] [CIR] Enable debug printing and improve source location handling - Enable debug printing as the default way to print CIR in order to easily test source location accuracy. Note that pretty printing there doesn't round trip yet so hasn't been enabled. - Add a helper to convert between SourceRanges and FusedLocs. - Update places that were passing the beginning of SourceRange to pass in the full range. - Fix few places where the wrong source location was being used. - Update tests to skip source location bits when is uninteresting. - Add a new source location specific test. --- clang/lib/CIR/CIRBuilder.cpp | 91 ++++++++++++-------- clang/lib/CIRFrontendAction/CIRGenAction.cpp | 9 +- clang/test/CIR/CodeGen/basic.c | 3 +- clang/test/CIR/CodeGen/basic.cpp | 7 +- clang/test/CIR/CodeGen/sourcelocation.cpp | 60 +++++++++++++ clang/test/CIR/CodeGen/types.c | 35 ++++---- 6 files changed, 147 insertions(+), 58 deletions(-) create mode 100644 clang/test/CIR/CodeGen/sourcelocation.cpp diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index eff2d4267787..70d40706e58f 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -201,7 +201,7 @@ class CIRBuildImpl { ~SourceLocRAIIObject() { restore(); } }; - /// Helper conversion from Clang source location to an MLIR location. + /// Helpers to convert Clang's SourceLocation to a MLIR Location. mlir::Location getLoc(SourceLocation SLoc) { const SourceManager &SM = astCtx.getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(SLoc); @@ -210,6 +210,20 @@ class CIRBuildImpl { PLoc.getLine(), PLoc.getColumn()); } + mlir::Location getLoc(SourceRange SLoc) { + mlir::Location B = getLoc(SLoc.getBegin()); + mlir::Location E = getLoc(SLoc.getEnd()); + SmallVector locs = {B, E}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); + } + + mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs) { + SmallVector locs = {lhs, rhs}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); + } + /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. mlir::LogicalResult declare(const Decl *var, QualType T, mlir::Location loc, @@ -476,8 +490,8 @@ class CIRBuildImpl { mlir::cir::NullAttr::get(Builder.builder.getContext(), Ty)); } case CK_IntegralToBoolean: { - return buildIntToBoolConversion( - Visit(E), Builder.getLoc(CE->getSourceRange().getBegin())); + return buildIntToBoolConversion(Visit(E), + Builder.getLoc(CE->getSourceRange())); } default: emitError(Builder.getLoc(CE->getExprLoc()), @@ -546,52 +560,52 @@ class CIRBuildImpl { mlir::Value buildMul(const BinOpInfo &Ops) { return Builder.builder.create( - Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); } mlir::Value buildDiv(const BinOpInfo &Ops) { return Builder.builder.create( - Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Div, Ops.LHS, Ops.RHS); } mlir::Value buildRem(const BinOpInfo &Ops) { return Builder.builder.create( - Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); } mlir::Value buildAdd(const BinOpInfo &Ops) { return Builder.builder.create( - Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Add, Ops.LHS, Ops.RHS); } mlir::Value buildSub(const BinOpInfo &Ops) { return Builder.builder.create( - Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); } mlir::Value buildShl(const BinOpInfo &Ops) { return Builder.builder.create( - Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shl, Ops.LHS, Ops.RHS); } mlir::Value buildShr(const BinOpInfo &Ops) { return Builder.builder.create( - Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shr, Ops.LHS, Ops.RHS); } mlir::Value buildAnd(const BinOpInfo &Ops) { return Builder.builder.create( - Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), mlir::cir::BinOpKind::And, Ops.LHS, Ops.RHS); } mlir::Value buildXor(const BinOpInfo &Ops) { return Builder.builder.create( - Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); } mlir::Value buildOr(const BinOpInfo &Ops) { return Builder.builder.create( - Builder.getLoc(Ops.Loc.getBegin()), Builder.getCIRType(Ops.Ty), + Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Or, Ops.LHS, Ops.RHS); } @@ -672,8 +686,8 @@ class CIRBuildImpl { } return Builder.builder.create( - Builder.getLoc(BOInfo.Loc.getBegin()), - Builder.getCIRType(BOInfo.Ty), Kind, BOInfo.LHS, BOInfo.RHS); + Builder.getLoc(BOInfo.Loc), Builder.getCIRType(BOInfo.Ty), Kind, + BOInfo.LHS, BOInfo.RHS); } // If this is a vector comparison, sign extend the result to the @@ -942,8 +956,7 @@ class CIRBuildImpl { // TODO: track source location range... mlir::Value addr; - if (failed(declare(&D, Ty, getLoc(D.getSourceRange().getBegin()), alignment, - addr))) { + if (failed(declare(&D, Ty, getLoc(D.getSourceRange()), alignment, addr))) { theModule.emitError("Cannot declare variable"); return emission; } @@ -1047,7 +1060,7 @@ class CIRBuildImpl { void buildScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue) { // TODO: this is where a lot of ObjC lifetime stuff would be done. mlir::Value value = buildScalarExpr(init); - SourceLocRAIIObject Loc{*this, getLoc(D->getSourceRange().getBegin())}; + SourceLocRAIIObject Loc{*this, getLoc(D->getSourceRange())}; buldStoreThroughLValue(RValue::get(value), lvalue, D); return; } @@ -1370,7 +1383,7 @@ class CIRBuildImpl { // FIXME: evaluate for side effects. } - builder.create(getLoc(RV->getExprLoc()), + builder.create(getLoc(S.getSourceRange()), V ? ArrayRef(V) : ArrayRef()); return mlir::success(); } @@ -1485,7 +1498,7 @@ class CIRBuildImpl { RValue RV = buildAnyExpr(E->getRHS()); LValue LV = buildLValue(E->getLHS()); - SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange().getBegin())}; + SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; buldStoreThroughLValue(RV, LV, nullptr /*InitDecl*/); assert(!astCtx.getLangOpts().OpenMP && "last priv cond not implemented"); return LV; @@ -1833,20 +1846,24 @@ class CIRBuildImpl { } // TODO: PGO and likelihood. - return buildIfOnBoolExpr(S.getCond(), - getLoc(S.getSourceRange().getBegin()), + // The mlir::Location for cir.if skips the init/cond part of IfStmt, + // and effectively spans from "then-begin" to "else-end||then-end". + auto ifLocStart = getLoc(S.getThen()->getSourceRange().getBegin()); + auto ifLocEnd = getLoc(S.getSourceRange().getEnd()); + return buildIfOnBoolExpr(S.getCond(), getLoc(ifLocStart, ifLocEnd), S.getThen(), S.getElse()); }; // TODO: Add a new scoped symbol table. // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); - auto locBegin = getLoc(S.getSourceRange().getBegin()); - auto locEnd = getLoc(S.getSourceRange().getEnd()); + // The if scope contains the full source range for IfStmt. + auto scopeLoc = getLoc(S.getSourceRange()); + auto scopeLocEnd = getLoc(S.getSourceRange().getEnd()); builder.create( - locBegin, mlir::TypeRange(), /*scopeBuilder=*/ + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { res = ifStmtBuilder(); - builder.create(locEnd); + builder.create(scopeLocEnd); }); return res; @@ -2078,7 +2095,7 @@ class CIRBuildImpl { const CXXMethodDecl *MD = dyn_cast(FD); assert(!MD && "methods not implemented"); - auto loc = getLoc(FD->getLocation()); + auto fnLoc = getLoc(FD->getSourceRange()); // Create an MLIR function for the given prototype. llvm::SmallVector argTypes; @@ -2091,7 +2108,8 @@ class CIRBuildImpl { argTypes, CurCCGF->FnRetQualTy->isVoidType() ? mlir::TypeRange() : getCIRType(CurCCGF->FnRetQualTy)); - mlir::FuncOp function = mlir::FuncOp::create(loc, FD->getName(), funcType); + mlir::FuncOp function = + mlir::FuncOp::create(fnLoc, FD->getName(), funcType); if (!function) return nullptr; @@ -2110,14 +2128,17 @@ class CIRBuildImpl { auto *paramVar = std::get<0>(nameValue); auto paramVal = std::get<1>(nameValue); auto alignment = astCtx.getDeclAlign(paramVar); + auto paramLoc = getLoc(paramVar->getSourceRange()); + paramVal.setLoc(paramLoc); + mlir::Value addr; - if (failed(declare(paramVar, paramVar->getType(), - getLoc(paramVar->getSourceRange().getBegin()), - alignment, addr, true /*param*/))) + if (failed(declare(paramVar, paramVar->getType(), paramLoc, alignment, + addr, true /*param*/))) return nullptr; - // Store params in local storage. FIXME: is this really needed - // at this level of representation? - builder.create(loc, paramVal, addr); + // Location of the store to the param storage tracked as beginning of + // the function body. + auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); + builder.create(fnBodyBegin, paramVal, addr); } // Emit the body of the function. @@ -2130,7 +2151,7 @@ class CIRBuildImpl { if (!entryBlock.empty()) returnOp = dyn_cast(entryBlock.back()); if (!returnOp) - builder.create(loc); + builder.create(getLoc(FD->getBody()->getEndLoc())); if (mlir::failed(function.verifyBody())) return nullptr; diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index 4d1b84288b2b..051aa273619f 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -13,6 +13,7 @@ #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Parser/Parser.h" +#include "mlir/IR/OperationSupport.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" @@ -127,8 +128,12 @@ class CIRGenConsumer : public clang::ASTConsumer { switch (action) { case CIRGenAction::OutputType::EmitCIR: - if (outputStream) - mlirMod->print(*outputStream); + if (outputStream) { + mlir::OpPrintingFlags flags; + // FIXME: we cannot roundtrip prettyForm=true right now. + flags.enableDebugInfo(/*prettyForm=*/false); + mlirMod->print(*outputStream, flags); + } break; case CIRGenAction::OutputType::EmitLLVM: { llvm::LLVMContext llvmCtx; diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index f23ae56aef92..4d485ca13762 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -1,12 +1,13 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * int foo(int i) { return i; } // CHECK: module { -// CHECK-NEXT: func @foo(%arg0: i32) -> i32 { +// CHECK-NEXT: func @foo(%arg0: i32 loc({{.*}})) -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr // CHECK-NEXT: %1 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index d8b31929aeb9..f41c3ea90ddb 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * int *p0() { int *p = nullptr; @@ -60,7 +61,7 @@ void b0() { bool x = true, y = false; } void b1(int a) { bool b = a; } -// CHECK: func @b1(%arg0: i32) { +// CHECK: func @b1(%arg0: i32 loc({{.*}})) { // CHECK: %2 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 // CHECK: %3 = cir.cast(int_to_bool, %2 : i32), !cir.bool // CHECK: cir.store %3, %0 : !cir.bool, cir.ptr @@ -75,7 +76,7 @@ int if0(int a) { return x; } -// CHECK: func @if0(%arg0: i32) -> i32 { +// CHECK: func @if0(%arg0: i32 loc({{.*}})) -> i32 { // CHECK: cir.scope { // CHECK: %4 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 // CHECK: %5 = cir.cast(int_to_bool, %4 : i32), !cir.bool @@ -104,7 +105,7 @@ int if1(int a, bool b, bool c) { return x; } -// CHECK: func @if1(%arg0: i32, %arg1: !cir.bool, %arg2: !cir.bool) -> i32 { +// CHECK: func @if1(%arg0: i32 loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) -> i32 { // CHECK: cir.scope { // CHECK: %6 = cir.load %3 lvalue_to_rvalue : cir.ptr , i32 // CHECK: %7 = cir.cast(int_to_bool, %6 : i32), !cir.bool diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp new file mode 100644 index 000000000000..881df14559ee --- /dev/null +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -0,0 +1,60 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +int s0(int a, int b) { + int x = a + b; + if (x > 0) + x = 0; + else + x = 1; + return x; +} + +// CHECK: #[[loc2:loc[0-9]+]] = loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]) +// CHECK: #[[loc3:loc[0-9]+]] = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) +// CHECK: module { +// CHECK: func @s0(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { +// CHECK: %0 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} loc(#[[loc4:loc[0-9]+]]) +// CHECK: %1 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} loc(#[[loc3]]) +// CHECK: %2 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} loc(#[[loc2]]) +// CHECK: cir.store %arg0, %2 : i32, cir.ptr loc(#[[loc5:loc[0-9]+]]) +// CHECK: cir.store %arg1, %1 : i32, cir.ptr loc(#[[loc5]]) +// CHECK: %3 = cir.load %2 lvalue_to_rvalue : cir.ptr , i32 loc(#[[loc6:loc[0-9]+]]) +// CHECK: %4 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 loc(#[[loc7:loc[0-9]+]]) +// CHECK: %5 = cir.binop(add, %3, %4) : i32 loc(#[[loc8:loc[0-9]+]]) +// CHECK: cir.store %5, %0 : i32, cir.ptr loc(#[[loc4]]) +// CHECK: cir.scope { +// CHECK: %7 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 loc(#[[loc10:loc[0-9]+]]) +// CHECK: %8 = cir.cst(0 : i32) : i32 loc(#[[loc11:loc[0-9]+]]) +// CHECK: %9 = cir.cmp(gt, %7, %8) : i32, !cir.bool loc(#[[loc12:loc[0-9]+]]) +// CHECK: cir.if %9 { +// CHECK: %10 = cir.cst(0 : i32) : i32 loc(#[[loc14:loc[0-9]+]]) +// CHECK: cir.store %10, %0 : i32, cir.ptr loc(#[[loc15:loc[0-9]+]]) +// CHECK: } else { +// CHECK: %10 = cir.cst(1 : i32) : i32 loc(#[[loc16:loc[0-9]+]]) +// CHECK: cir.store %10, %0 : i32, cir.ptr loc(#[[loc17:loc[0-9]+]]) +// CHECK: } loc(#[[loc13:loc[0-9]+]]) +// CHECK: } loc(#[[loc9:loc[0-9]+]]) +// CHECK: %6 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 loc(#[[loc18:loc[0-9]+]]) +// CHECK: cir.return %6 : i32 loc(#[[loc19:loc[0-9]+]]) +// CHECK: } loc(#[[loc1:loc[0-9]+]]) +// CHECK: } loc(#[[loc0:loc[0-9]+]]) +// CHECK: #[[loc0]] = loc(unknown) +// CHECK: #[[loc1]] = loc(fused["{{.*}}sourcelocation.cpp":4:1, "{{.*}}sourcelocation.cpp":11:1]) +// CHECK: #[[loc4]] = loc(fused["{{.*}}sourcelocation.cpp":5:3, "{{.*}}sourcelocation.cpp":5:15]) +// CHECK: #[[loc5]] = loc("{{.*}}sourcelocation.cpp":4:22) +// CHECK: #[[loc6]] = loc("{{.*}}sourcelocation.cpp":5:11) +// CHECK: #[[loc7]] = loc("{{.*}}sourcelocation.cpp":5:15) +// CHECK: #[[loc8]] = loc(fused["{{.*}}sourcelocation.cpp":5:11, "{{.*}}sourcelocation.cpp":5:15]) +// CHECK: #[[loc9]] = loc(fused["{{.*}}sourcelocation.cpp":6:3, "{{.*}}sourcelocation.cpp":9:9]) +// CHECK: #[[loc10]] = loc("{{.*}}sourcelocation.cpp":6:7) +// CHECK: #[[loc11]] = loc("{{.*}}sourcelocation.cpp":6:11) +// CHECK: #[[loc12]] = loc(fused["{{.*}}sourcelocation.cpp":6:7, "{{.*}}sourcelocation.cpp":6:11]) +// CHECK: #[[loc13]] = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":9:9]) +// CHECK: #[[loc14]] = loc("{{.*}}sourcelocation.cpp":7:9) +// CHECK: #[[loc15]] = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9]) +// CHECK: #[[loc16]] = loc("{{.*}}sourcelocation.cpp":9:9) +// CHECK: #[[loc17]] = loc(fused["{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) +// CHECK: #[[loc18]] = loc("{{.*}}sourcelocation.cpp":10:10) +// CHECK: #[[loc19]] = loc(fused["{{.*}}sourcelocation.cpp":10:3, "{{.*}}sourcelocation.cpp":10:10]) diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index 81618b83231c..78c9f3ce8444 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -2,6 +2,7 @@ // RUN: FileCheck --input-file=%t.cir %s // RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cpp.cir // RUN: FileCheck --input-file=%t.cpp.cir --check-prefix=CHECK-CPP %s +// XFAIL: * int t0(int i) { return i; } unsigned int t1(unsigned int i) { return i; } @@ -21,23 +22,23 @@ void t8() {} bool t9(bool b) { return b; } #endif -// CHECK: func @t0(%arg0: i32) -> i32 { -// CHECK: func @t1(%arg0: i32) -> i32 { -// CHECK: func @t2(%arg0: i8) -> i8 { -// CHECK: func @t3(%arg0: i8) -> i8 { -// CHECK: func @t4(%arg0: i16) -> i16 { -// CHECK: func @t5(%arg0: i16) -> i16 { -// CHECK: func @t6(%arg0: f32) -> f32 { -// CHECK: func @t7(%arg0: f64) -> f64 { +// CHECK: func @t0(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK: func @t1(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK: func @t2(%arg0: i8 loc({{.*}})) -> i8 { +// CHECK: func @t3(%arg0: i8 loc({{.*}})) -> i8 { +// CHECK: func @t4(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK: func @t5(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK: func @t6(%arg0: f32 loc({{.*}})) -> f32 { +// CHECK: func @t7(%arg0: f64 loc({{.*}})) -> f64 { // CHECK: func @t8() { -// CHECK-CPP: func @t0(%arg0: i32) -> i32 { -// CHECK-CPP: func @t1(%arg0: i32) -> i32 { -// CHECK-CPP: func @t2(%arg0: i8) -> i8 { -// CHECK-CPP: func @t3(%arg0: i8) -> i8 { -// CHECK-CPP: func @t4(%arg0: i16) -> i16 { -// CHECK-CPP: func @t5(%arg0: i16) -> i16 { -// CHECK-CPP: func @t6(%arg0: f32) -> f32 { -// CHECK-CPP: func @t7(%arg0: f64) -> f64 { +// CHECK-CPP: func @t0(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK-CPP: func @t1(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK-CPP: func @t2(%arg0: i8 loc({{.*}})) -> i8 { +// CHECK-CPP: func @t3(%arg0: i8 loc({{.*}})) -> i8 { +// CHECK-CPP: func @t4(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK-CPP: func @t5(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK-CPP: func @t6(%arg0: f32 loc({{.*}})) -> f32 { +// CHECK-CPP: func @t7(%arg0: f64 loc({{.*}})) -> f64 { // CHECK-CPP: func @t8() { -// CHECK-CPP: func @t9(%arg0: !cir.bool) -> !cir.bool { +// CHECK-CPP: func @t9(%arg0: !cir.bool loc({{.*}})) -> !cir.bool { From 5bd6a55499110940febc4d9db3a0f6e13886a73b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 10 Dec 2021 17:42:44 -0500 Subject: [PATCH 0097/2301] [CIR] Refactor buildCIR into buildTopLevelDecl to account for non-functions Make a `buildTopLevelDecl` entry point for usage by the ASTConsumer that'll be able to switch over the Decl kind. Leave EmitFunction behind for the CIRBasedWarnings usage. --- clang/lib/CIR/CIRBuilder.cpp | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 70d40706e58f..7efe1a5b7fd2 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -2086,9 +2086,20 @@ class CIRBuildImpl { return mlir::success(); } + void buildTopLevelDecl(Decl *decl) { + switch (decl->getKind()) { + default: + assert(false && "Not yet implemented"); + case Decl::Function: + buildFunction(cast(decl)); + break; + } + } + // Emit a new function and add it to the MLIR module. - mlir::FuncOp buildCIR(CIRCodeGenFunction *CCGF, const FunctionDecl *FD) { - CurCCGF = CCGF; + mlir::FuncOp buildFunction(const FunctionDecl *FD) { + CIRCodeGenFunction CCGF; + CurCCGF = &CCGF; // Create a scope in the symbol table to hold variable declarations. SymTableScopeTy varScope(symbolTable); @@ -2191,19 +2202,16 @@ void CIRContext::Initialize(clang::ASTContext &astCtx) { void CIRContext::verifyModule() { builder->verifyModule(); } bool CIRContext::EmitFunction(const FunctionDecl *FD) { - CIRCodeGenFunction CCGF{}; - auto func = builder->buildCIR(&CCGF, FD); + auto func = builder->buildFunction(FD); assert(func && "should emit function"); - return true; + return func.getOperation() != nullptr; } mlir::ModuleOp CIRContext::getModule() { return builder->getModule(); } bool CIRContext::HandleTopLevelDecl(clang::DeclGroupRef D) { for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { - auto *FD = cast(*I); - assert(FD && "We can't handle anything else yet"); - EmitFunction(FD); + builder->buildTopLevelDecl(*I); } return true; From 4d30c00f0f6713e56b57169c0d790fae0da3ff14 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 10 Dec 2021 17:43:31 -0500 Subject: [PATCH 0098/2301] [CIR] Implement basic support for RecordDecl and add a cir::StructType This is a pretty bare bones implementation for handling top level RecordDecls and VarDecls that have RecordTypes as their type. A cir::StructType is created to serve as the storage. The record layout process follows closely from CodeGen and makes heavy use of asserts to point out divergences. --- clang/lib/CIR/CIRBuilder.cpp | 5 + clang/lib/CIR/CIRGenTypes.cpp | 76 +++++++- clang/lib/CIR/CIRGenTypes.h | 26 ++- clang/lib/CIR/CIRRecordLayoutBuilder.cpp | 173 +++++++++++++++++++ clang/lib/CIR/CMakeLists.txt | 1 + clang/test/CIR/CodeGen/struct.c | 17 ++ mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h | 3 +- mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td | 30 ++++ mlir/lib/Dialect/CIR/IR/CIRTypes.cpp | 25 ++- 9 files changed, 350 insertions(+), 6 deletions(-) create mode 100644 clang/lib/CIR/CIRRecordLayoutBuilder.cpp create mode 100644 clang/test/CIR/CodeGen/struct.c diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 7efe1a5b7fd2..97dd5fc04fb5 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -2093,6 +2093,11 @@ class CIRBuildImpl { case Decl::Function: buildFunction(cast(decl)); break; + case Decl::Record: + // There's nothing to do here, we emit everything pertaining to `Record`s + // lazily. + // TODO: handle debug info here? See clang's CodeGenModule::EmitTopLevelDecl + break; } } diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index e9463a13c613..2765fb5e0c14 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -17,6 +17,78 @@ CIRGenTypes::CIRGenTypes(ASTContext &Ctx, mlir::OpBuilder &B) : Context(Ctx), Builder(B) {} CIRGenTypes::~CIRGenTypes() = default; +std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, + StringRef suffix) { + llvm::SmallString<256> typeName; + llvm::raw_svector_ostream outStream(typeName); + + outStream << recordDecl->getKindName() << '.'; + + PrintingPolicy policy = recordDecl->getASTContext().getPrintingPolicy(); + policy.SuppressInlineNamespace = false; + + if (recordDecl->getIdentifier()) { + if (recordDecl->getDeclContext()) + recordDecl->printQualifiedName(outStream, policy); + else + recordDecl->DeclaratorDecl::printName(outStream); + } else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl()) { + if (typedefNameDecl->getDeclContext()) + typedefNameDecl->printQualifiedName(outStream, policy); + else + typedefNameDecl->printName(outStream); + } else { + outStream << "anon"; + } + + if (!suffix.empty()) + outStream << suffix; + + return std::string(typeName); +} + +mlir::Type +CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *recordDecl) { + const auto *key = Context.getTagDeclType(recordDecl).getTypePtr(); + mlir::cir::StructType &entry = recordDeclTypes[key]; + + recordDecl = recordDecl->getDefinition(); + // TODO: clang checks here whether the type is known to be opaque. This is + // equivalent to a forward decl. Is checking for a non-null entry close enough + // of a match? + if (!recordDecl || !recordDecl->isCompleteDefinition() || entry) + return entry; + + // TODO: Implement checking for whether or not this type is safe to convert. + // Clang CodeGen has issues with infinitely looping on recursive types that + // has to be worked around + + assert(!dyn_cast_or_null(recordDecl) && + "CXXRecordDecl not yet finished"); + + entry = computeRecordLayout(recordDecl); + + // TODO: handle whether or not layout was skipped + + return entry; +} + +mlir::Type CIRGenTypes::convertTypeForMem(clang::QualType qualType, + bool forBitField) { + assert(!qualType->isConstantMatrixType() && "Matrix types NYI"); + + mlir::Type convertedType = ConvertType(qualType); + + assert(!forBitField && "Bit fields NYI"); + assert(!qualType->isBitIntType() && "BitIntType NYI"); + + return convertedType; +} + +mlir::MLIRContext &CIRGenTypes::getMLIRContext() const { + return *Builder.getContext(); +} + /// ConvertType - Convert the specified type to its MLIR form. mlir::Type CIRGenTypes::ConvertType(QualType T) { T = Context.getCanonicalType(T); @@ -26,8 +98,8 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { // may be represented in different types. assert(!Context.getLangOpts().CUDAIsDevice && "not implemented"); - // RecordTypes are cached and processed specially. - assert(!dyn_cast(Ty) && "not implemented"); + if (const auto *recordType = dyn_cast(T)) + return convertRecordDeclType(recordType->getDecl()); // See if type is already cached. TypeCacheTy::iterator TCI = TypeCache.find(Ty); diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index f1951e858686..84d1d938f1f3 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -13,9 +13,13 @@ #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H #define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H +#include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "mlir/IR/MLIRContext.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "llvm/ADT/DenseMap.h" +#include + namespace llvm { class FunctionType; class DataLayout; @@ -47,8 +51,6 @@ class GlobalDecl; namespace CodeGen { class ABIInfo; class CGCXXABI; -class CGRecordLayout; -class CodeGenModule; class RequiredArgs; } // end namespace CodeGen } // end namespace clang @@ -56,6 +58,9 @@ class RequiredArgs; namespace mlir { class Type; class OpBuilder; +namespace cir { +class StructType; +} } // namespace mlir /// This class organizes the cross-module state that is used while lowering @@ -65,6 +70,8 @@ class CIRGenTypes { clang::ASTContext &Context; mlir::OpBuilder &Builder; + llvm::DenseMap recordDeclTypes; + public: CIRGenTypes(clang::ASTContext &Ctx, mlir::OpBuilder &B); ~CIRGenTypes(); @@ -75,9 +82,24 @@ class CIRGenTypes { TypeCacheTy TypeCache; clang::ASTContext &getContext() const { return Context; } + mlir::MLIRContext &getMLIRContext() const; /// ConvertType - Convert type T into a mlir::Type. mlir::Type ConvertType(clang::QualType T); + + mlir::Type convertRecordDeclType(const clang::RecordDecl *recordDecl); + + mlir::cir::StructType computeRecordLayout(const clang::RecordDecl *); + + std::string getRecordTypeName(const clang::RecordDecl *, + llvm::StringRef suffix); + + /// convertTypeForMem - Convert type T into an mlir::Type. This differs from + /// convertType in that it is used to convert to the memory representation for + /// a type. For example, the scalar representation for _Bool is i1, but the + /// memory representation is usually i8 or i32, depending on the target. + // TODO: convert this comment to account for MLIR's equivalence + mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); }; } // namespace cir diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp new file mode 100644 index 000000000000..4470d2ba5355 --- /dev/null +++ b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp @@ -0,0 +1,173 @@ + +#include "CIRGenTypes.h" +#include "mlir/IR/BuiltinTypes.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/RecordLayout.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" + +#include + +using namespace cir; + +namespace { +struct CIRRecordLowering final { + + // MemberInfo is a helper structure that contains information about a record + // member. In addition to the standard member types, there exists a sentinel + // member type that ensures correct rounding. + struct MemberInfo final { + clang::CharUnits offset; + enum class InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } kind; + mlir::Type data; + const clang::FieldDecl *fieldDecl; + MemberInfo(clang::CharUnits offset, InfoKind kind, mlir::Type data, + const clang::FieldDecl *fieldDecl = nullptr) + : offset{offset}, kind{kind}, data{data}, fieldDecl{fieldDecl} {}; + bool operator<(const MemberInfo &other) const { + return offset < other.offset; + } + }; + CIRRecordLowering(CIRGenTypes &cirGenTypes, + const clang::RecordDecl *recordDecl, bool isPacked); + + void lower(bool nonVirtualBaseType); + + void accumulateFields(); + + clang::CharUnits bitsToCharUnits(uint64_t bitOffset) { + return astContext.toCharUnitsFromBits(bitOffset); + } + + void calculateZeroInit(); + + mlir::Type getCharType() { + return mlir::IntegerType::get(&cirGenTypes.getMLIRContext(), + astContext.getCharWidth()); + } + + mlir::Type getByteArrayType(clang::CharUnits numberOfChars) { + assert(!numberOfChars.isZero() && "Empty byte arrays aren't allowed."); + mlir::Type type = getCharType(); + return numberOfChars == clang::CharUnits::One() + ? type + : mlir::RankedTensorType::get({0, numberOfChars.getQuantity()}, + type); + } + + mlir::Type getStorageType(const clang::FieldDecl *fieldDecl) { + auto type = cirGenTypes.convertTypeForMem(fieldDecl->getType()); + assert(!fieldDecl->isBitField() && "bit fields NYI"); + if (!fieldDecl->isBitField()) + return type; + + // if (isDiscreteBitFieldABI()) + // return type; + + // return getIntNType(std::min(fielddecl->getBitWidthValue(astContext), + // static_cast(astContext.toBits(getSize(type))))); + llvm_unreachable("getStorageType only supports nonBitFields at this point"); + } + + uint64_t getFieldBitOffset(const clang::FieldDecl *fieldDecl) { + return astRecordLayout.getFieldOffset(fieldDecl->getFieldIndex()); + } + + /// Fills out the structures that are ultimately consumed. + void fillOutputFields(); + + CIRGenTypes &cirGenTypes; + const clang::ASTContext &astContext; + const clang::RecordDecl *recordDecl; + const clang::CXXRecordDecl *cxxRecordDecl; + const clang::ASTRecordLayout &astRecordLayout; + // Helpful intermediate data-structures + std::vector members; + // Output fields, consumed by CIRGenTypes::computeRecordLayout + llvm::SmallVector fieldTypes; + llvm::DenseMap fields; + bool isPacked : 1; + +private: + CIRRecordLowering(const CIRRecordLowering &) = delete; + void operator=(const CIRRecordLowering &) = delete; +}; +} // namespace + +CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, + const clang::RecordDecl *recordDecl, + bool isPacked) + : cirGenTypes{cirGenTypes}, astContext{cirGenTypes.getContext()}, + recordDecl{recordDecl}, + cxxRecordDecl{llvm::dyn_cast(recordDecl)}, + astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, + isPacked{isPacked} {} + +void CIRRecordLowering::lower(bool nonVirtualBaseType) { + assert(!cxxRecordDecl && "CXXRecordDecl NYI"); + assert(!recordDecl->isUnion() && "unions NYI"); + + accumulateFields(); + llvm::stable_sort(members); + + // TODO: implement clipTailPadding once bitfields are implemented + // TODO: implemented packed structs + // TODO: implement padding + // TODO: support zeroInit + fillOutputFields(); + // TODO: implement volatile bit fields +} + +void CIRRecordLowering::fillOutputFields() { + for (auto &member : members) { + assert(member.data && "member.data should be valid"); + fieldTypes.push_back(member.data); + assert(member.kind == MemberInfo::InfoKind::Field && + "Bit fields and inheritance are not NYI"); + assert(member.fieldDecl && "member.fieldDecl should be valid"); + fields[member.fieldDecl->getCanonicalDecl()] = fieldTypes.size() - 1; + + // A field without storage must be a bitfield. + assert(member.data && "Bitfields NYI"); + assert(member.kind != MemberInfo::InfoKind::Base && "Base classes NYI"); + assert(member.kind != MemberInfo::InfoKind::VBase && "Base classes NYI"); + } +} + +void CIRRecordLowering::accumulateFields() { + for (auto *field : recordDecl->fields()) { + assert(!field->isBitField() && "bit fields NYI"); + assert(!field->isZeroSize(astContext) && "zero size members NYI"); + members.push_back(MemberInfo{bitsToCharUnits(getFieldBitOffset(field)), + MemberInfo::InfoKind::Field, + getStorageType(field), field}); + } +} + +mlir::cir::StructType +CIRGenTypes::computeRecordLayout(const clang::RecordDecl *recordDecl) { + CIRRecordLowering builder(*this, recordDecl, /*packed=*/false); + builder.lower(/*nonVirtualBaseType=*/false); + + assert(!llvm::isa(recordDecl) && "NYI"); + assert(!builder.isPacked && "Packed structs NYI"); + // TODO: figure out the corresponding `opaque`ness mapping from llvm -> + // mlir::cir for this comment. Comment lifted from CodeGen + // Fill in the struct *after* computing the base type. Filling in the body + // signifies that the type is no longer opaque and record layout is complete, + // but we may need to recursively layout D while laying D out as a base type. + auto name = getRecordTypeName(recordDecl, ""); + auto identifier = mlir::StringAttr::get(&getMLIRContext(), name); + auto structType = mlir::cir::StructType::get(&getMLIRContext(), + builder.fieldTypes, identifier); + + assert(!getContext().getLangOpts().DumpRecordLayouts && + "RecordLayouts dumping NYI"); + + // TODO: implement verification phase + + return structType; +} diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 635d5e2c39b1..de41b7e3b445 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -13,6 +13,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR CIRBuilder.cpp CIRGenTypes.cpp + CIRRecordLayoutBuilder.cpp LowerToLLVM.cpp DEPENDS diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c new file mode 100644 index 000000000000..a88285e42c0a --- /dev/null +++ b/clang/test/CIR/CodeGen/struct.c @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Foo { + int a; + char b; +}; + +void bar() { + struct Foo f; +} + +// CHECK: module { +// CHECK-NEXT: func @bar() { +// CHECK-NEXT: %0 = cir.alloca !cir.struct<"struct.Foo", i32, i8>, cir.ptr >, [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h index e5ffc3ff54c8..19921c11a927 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h @@ -13,6 +13,7 @@ #ifndef MLIR_DIALECT_CIR_IR_CIRTYPES_H_ #define MLIR_DIALECT_CIR_IR_CIRTYPES_H_ +#include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Types.h" //===----------------------------------------------------------------------===// @@ -22,4 +23,4 @@ #define GET_TYPEDEF_CLASSES #include "mlir/Dialect/CIR/IR/CIROpsTypes.h.inc" -#endif // MLIR_DIALECT_CIR_IR_CIRTYPES_H_ \ No newline at end of file +#endif // MLIR_DIALECT_CIR_IR_CIRTYPES_H_ diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td index 48ce0227ce12..3141d599c103 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td @@ -66,4 +66,34 @@ def CIR_BoolType : def CIR_AnyCIRType : AnyTypeOf<[CIR_PointerType, CIR_BoolType]>; + +//===----------------------------------------------------------------------===// +// The base type for all RecordDecls. +//===----------------------------------------------------------------------===// + +def CIR_StructType : CIR_Type<"Struct", "struct"> { + + let summary = "CIR struct type"; + let description = [{ + Each unique clang::RecordDecl is mapped to a `cir.struct` and any object in + C/C++ that has a struct type will have a `cir.struct` in CIR. + }]; + + let parameters = (ins + ArrayRefParameter<"mlir::Type", "members">:$members, + "mlir::StringAttr":$typeName + ); + + let builders = [ + TypeBuilder<(ins + "ArrayRef":$members, "StringRef":$typeName + ), [{ + auto id = mlir::StringAttr::get(context, typeName); + return StructType::get(context, members, id); + }]> + ]; + + let hasCustomAssemblyFormat = 1; +} + #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp index b20661931874..27a09fa75626 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp @@ -69,8 +69,31 @@ Type BoolType::parse(mlir::AsmParser &parser) { void BoolType::print(mlir::AsmPrinter &printer) const {} +Type StructType::parse(mlir::AsmParser &parser) { + if (parser.parseLess()) + return Type(); + std::string typeName; + if (parser.parseString(&typeName)) + return Type(); + llvm::SmallVector members; + Type nextMember; + while (mlir::succeeded(parser.parseType(nextMember))) + members.push_back(nextMember); + if (parser.parseGreater()) + return Type(); + return get(parser.getContext(), members, typeName); +} + +void StructType::print(mlir::AsmPrinter &printer) const { + printer << '<' << getTypeName() << ", "; + llvm::interleaveComma(getMembers(), printer); + printer << '>'; +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// -void CIRDialect::registerTypes() { addTypes(); } +void CIRDialect::registerTypes() { + addTypes(); +} From 965fb7b44fb0d1a4cca115b4a4acd0db00d8c7bd Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 17 Dec 2021 16:49:12 -0500 Subject: [PATCH 0099/2301] [CIR] Implement simple handling of CXXRecordDecl Most of the differences between RecordDecl and and CXXRecordDecl is still unimplemented (inheriance, virtual methods, etc). This just asserts that we have a CXXRecordDecl more-or-less the same as a C RecordDecl. The only other aspect needed for this was already done in CIRBuilder -- handling of simple CXXConstructExpr. --- clang/lib/CIR/CIRBuilder.cpp | 8 +++++++ clang/lib/CIR/CIRGenTypes.cpp | 12 +++++----- clang/lib/CIR/CIRRecordLayoutBuilder.cpp | 29 ++++++++++++++++-------- clang/test/CIR/CodeGen/struct.c | 14 +++++++++--- clang/test/CIR/CodeGen/struct.cpp | 25 ++++++++++++++++++++ 5 files changed, 69 insertions(+), 19 deletions(-) create mode 100644 clang/test/CIR/CodeGen/struct.cpp diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 97dd5fc04fb5..32418f66edad 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -2093,6 +2093,14 @@ class CIRBuildImpl { case Decl::Function: buildFunction(cast(decl)); break; + case Decl::CXXRecord: { + CXXRecordDecl *crd = cast(decl); + // TODO: Handle debug info as CodeGenModule.cpp does + for (auto *childDecl : crd->decls()) + if (isa(childDecl) || isa(childDecl)) + buildTopLevelDecl(childDecl); + break; + } case Decl::Record: // There's nothing to do here, we emit everything pertaining to `Record`s // lazily. diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 2765fb5e0c14..ee34a802eec7 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -60,15 +60,15 @@ CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *recordDecl) { return entry; // TODO: Implement checking for whether or not this type is safe to convert. - // Clang CodeGen has issues with infinitely looping on recursive types that - // has to be worked around - assert(!dyn_cast_or_null(recordDecl) && - "CXXRecordDecl not yet finished"); + // TODO: handle whether or not layout was skipped and recursive record layout - entry = computeRecordLayout(recordDecl); + if (const auto *cxxRecordDecl = dyn_cast(recordDecl)) { + assert(cxxRecordDecl->bases().begin() == cxxRecordDecl->bases().end() && + "Base clases NYI"); + } - // TODO: handle whether or not layout was skipped + entry = computeRecordLayout(recordDecl); return entry; } diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp index 4470d2ba5355..99a544ec678f 100644 --- a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp @@ -107,12 +107,20 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, isPacked{isPacked} {} void CIRRecordLowering::lower(bool nonVirtualBaseType) { - assert(!cxxRecordDecl && "CXXRecordDecl NYI"); - assert(!recordDecl->isUnion() && "unions NYI"); + assert(!recordDecl->isUnion() && "NYI"); accumulateFields(); - llvm::stable_sort(members); + if (cxxRecordDecl) { + assert(!astRecordLayout.hasOwnVFPtr() && "accumulateVPtrs() NYI"); + assert(cxxRecordDecl->bases().begin() == cxxRecordDecl->bases().end() && + "Inheritance NYI"); + + assert(!members.empty() && "Empty CXXRecordDecls NYI"); + assert(!nonVirtualBaseType && "non-irtual base type handling NYI"); + } + + llvm::stable_sort(members); // TODO: implement clipTailPadding once bitfields are implemented // TODO: implemented packed structs // TODO: implement padding @@ -152,13 +160,14 @@ CIRGenTypes::computeRecordLayout(const clang::RecordDecl *recordDecl) { CIRRecordLowering builder(*this, recordDecl, /*packed=*/false); builder.lower(/*nonVirtualBaseType=*/false); - assert(!llvm::isa(recordDecl) && "NYI"); + if (llvm::isa(recordDecl)) { + assert(builder.astRecordLayout.getNonVirtualSize() == + builder.astRecordLayout.getSize() && + "Virtual base objects NYI"); + } + assert(!builder.isPacked && "Packed structs NYI"); - // TODO: figure out the corresponding `opaque`ness mapping from llvm -> - // mlir::cir for this comment. Comment lifted from CodeGen - // Fill in the struct *after* computing the base type. Filling in the body - // signifies that the type is no longer opaque and record layout is complete, - // but we may need to recursively layout D while laying D out as a base type. + auto name = getRecordTypeName(recordDecl, ""); auto identifier = mlir::StringAttr::get(&getMLIRContext(), name); auto structType = mlir::cir::StructType::get(&getMLIRContext(), @@ -167,7 +176,7 @@ CIRGenTypes::computeRecordLayout(const clang::RecordDecl *recordDecl) { assert(!getContext().getLangOpts().DumpRecordLayouts && "RecordLayouts dumping NYI"); - // TODO: implement verification phase + // TODO: implement verification return structType; } diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index a88285e42c0a..eb66558e9f38 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -1,17 +1,25 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +struct Bar { + int a; + char b; +}; + struct Foo { int a; char b; + struct Bar z; }; -void bar() { +void baz() { + struct Bar b; struct Foo f; } // CHECK: module { -// CHECK-NEXT: func @bar() { -// CHECK-NEXT: %0 = cir.alloca !cir.struct<"struct.Foo", i32, i8>, cir.ptr >, [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: func @baz() { +// CHECK-NEXT: %0 = cir.alloca !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>>, cir.ptr >>, [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.struct<"struct.Bar", i32, i8>, cir.ptr >, [uninitialized] {alignment = 4 : i64} // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp new file mode 100644 index 000000000000..a584850d265f --- /dev/null +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Bar { + int a; + char b; +}; + +struct Foo { + int a; + char b; + Bar z; +}; + +void baz() { + Bar b; + Foo f; +} + +// CHECK: module { +// CHECK-NEXT: func @baz() { +// CHECK-NEXT: %0 = cir.alloca !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>>, cir.ptr >>, [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.struct<"struct.Bar", i32, i8>, cir.ptr >, [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: cir.return +// CHECK-NEXT: } From e57424c429e2dc56c5dd65703bd3e484bd5129b8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 18 Dec 2021 21:02:01 -0500 Subject: [PATCH 0100/2301] [CIR] Add a OpAsmDialectInterface sublcass for CIR to enable aliasing types This small interface let's us define aliases for StructTypes via the small hook member function `getAlias`. This significantly cleans up our IR for struct types. --- clang/test/CIR/CodeGen/struct.c | 13 ++++++++----- clang/test/CIR/CodeGen/struct.cpp | 9 ++++++--- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 16 ++++++++++++++++ 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index eb66558e9f38..594e2d77d5ea 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -17,9 +17,12 @@ void baz() { struct Foo f; } -// CHECK: module { -// CHECK-NEXT: func @baz() { -// CHECK-NEXT: %0 = cir.alloca !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>>, cir.ptr >>, [uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.struct<"struct.Bar", i32, i8>, cir.ptr >, [uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: cir.return +// CHECK: !_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> +// CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !_22struct2EBar22> +// CHECK-NEXT: module { +// CHECK-NEXT: func @baz() { +// CHECK-NEXT: %0 = cir.alloca !_22struct2EFoo22, cir.ptr , [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !_22struct2EBar22, cir.ptr , [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: cir.return +// CHECK-NEXT: } // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index a584850d265f..38dba398a9f5 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -17,9 +17,12 @@ void baz() { Foo f; } -// CHECK: module { +// CHECK: !_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> +// CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !_22struct2EBar22> +// CHECK-NEXT: module { // CHECK-NEXT: func @baz() { -// CHECK-NEXT: %0 = cir.alloca !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>>, cir.ptr >>, [uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.struct<"struct.Bar", i32, i8>, cir.ptr >, [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !_22struct2EFoo22, cir.ptr , [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !_22struct2EBar22, cir.ptr , [uninitialized] {alignment = 4 : i64} // CHECK-NEXT: cir.return +// CHECK-NEXT: } // CHECK-NEXT: } diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index d687a5b403bc..e62d9a416cd1 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -31,6 +31,21 @@ using namespace mlir::cir; // CIR Dialect //===----------------------------------------------------------------------===// +namespace { +struct CIROpAsmDialectInterface : public OpAsmDialectInterface { + using OpAsmDialectInterface::OpAsmDialectInterface; + + AliasResult getAlias(Type type, raw_ostream &os) const final { + if (auto structType = type.dyn_cast()) { + os << structType.getTypeName(); + return AliasResult::OverridableAlias; + } + + return AliasResult::NoAlias; + } +}; +} // namespace + /// Dialect initialization, the instance will be owned by the context. This is /// the point of registration of types and operations for the dialect. void cir::CIRDialect::initialize() { @@ -40,6 +55,7 @@ void cir::CIRDialect::initialize() { #define GET_OP_LIST #include "mlir/Dialect/CIR/IR/CIROps.cpp.inc" >(); + addInterfaces(); } //===----------------------------------------------------------------------===// From fa79a0a4797aaa6dc631265d11dff5e22ebbd720 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 23 Dec 2021 20:08:54 -0500 Subject: [PATCH 0101/2301] [CIR][NFC] Refactor some types to CIRGenValue.h that'll be used elsewhere These types are defined in CGValue.h for clang codegen and used across numerous files. I'll be using them in subsequent patches for some constructor related code. Refactor them out here for similar usages in CIRGen. --- clang/include/clang/CIR/CIRCodeGenFunction.h | 44 +--- clang/lib/CIR/CIRBuilder.cpp | 174 +------------ clang/lib/CIR/CIRGenValue.h | 248 +++++++++++++++++++ 3 files changed, 252 insertions(+), 214 deletions(-) create mode 100644 clang/lib/CIR/CIRGenValue.h diff --git a/clang/include/clang/CIR/CIRCodeGenFunction.h b/clang/include/clang/CIR/CIRCodeGenFunction.h index 2bc43ebfedc7..c6763062074b 100644 --- a/clang/include/clang/CIR/CIRCodeGenFunction.h +++ b/clang/include/clang/CIR/CIRCodeGenFunction.h @@ -14,6 +14,8 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRCODEGENFUNCTION_H #define LLVM_CLANG_LIB_CIR_CIRCODEGENFUNCTION_H +#include "CIRGenValue.h" + #include "mlir/IR/Value.h" #include "clang/AST/Type.h" @@ -29,46 +31,6 @@ namespace cir { // isn't available in the include dir. Same for getEvaluationKind below. enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; -/// The source of the alignment of an l-value; an expression of -/// confidence in the alignment actually matching the estimate. -enum class AlignmentSource { - /// The l-value was an access to a declared entity or something - /// equivalently strong, like the address of an array allocated by a - /// language runtime. - Decl, - - /// The l-value was considered opaque, so the alignment was - /// determined from a type, but that type was an explicitly-aligned - /// typedef. - AttributedType, - - /// The l-value was considered opaque, so the alignment was - /// determined from a type. - Type -}; - -/// Given that the base address has the given alignment source, what's -/// our confidence in the alignment of the field? -static inline AlignmentSource getFieldAlignmentSource(AlignmentSource Source) { - // For now, we don't distinguish fields of opaque pointers from - // top-level declarations, but maybe we should. - return AlignmentSource::Decl; -} - -class LValueBaseInfo { - AlignmentSource AlignSource; - -public: - explicit LValueBaseInfo(AlignmentSource Source = AlignmentSource::Type) - : AlignSource(Source) {} - AlignmentSource getAlignmentSource() const { return AlignSource; } - void setAlignmentSource(AlignmentSource Source) { AlignSource = Source; } - - void mergeForCast(const LValueBaseInfo &Info) { - setAlignmentSource(Info.getAlignmentSource()); - } -}; - class CIRCodeGenFunction { public: /// If a return statement is being visited, this holds the return statment's @@ -95,4 +57,4 @@ class CIRCodeGenFunction { } // namespace cir -#endif \ No newline at end of file +#endif diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRBuilder.cpp index 32418f66edad..bea5fe012c17 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRBuilder.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "CIRGenTypes.h" +#include "CIRGenValue.h" #include "clang/AST/ASTConsumer.h" #include "clang/CIR/CIRBuilder.h" @@ -263,179 +264,6 @@ class CIRBuildImpl { mlir::ModuleOp getModule() { return theModule; } mlir::OpBuilder &getBuilder() { return builder; } - class RawAddress { - mlir::Value Pointer; - CharUnits Alignment; - - public: - RawAddress(mlir::Value pointer, CharUnits alignment) - : Pointer(pointer), Alignment(alignment) { - assert((!alignment.isZero() || pointer == nullptr) && - "creating valid address with invalid alignment"); - } - - static RawAddress invalid() { return RawAddress(nullptr, CharUnits()); } - bool isValid() const { return Pointer != nullptr; } - - mlir::Value getPointer() const { - // assert(isValid()); - return Pointer; - } - - /// Return the alignment of this pointer. - CharUnits getAlignment() const { - // assert(isValid()); - return Alignment; - } - }; - - class LValue { - enum { - Simple, // This is a normal l-value, use getAddress(). - VectorElt, // This is a vector element l-value (V[i]), use getVector* - BitField, // This is a bitfield l-value, use getBitfield*. - ExtVectorElt, // This is an extended vector subset, use getExtVectorComp - GlobalReg, // This is a register l-value, use getGlobalReg() - MatrixElt // This is a matrix element, use getVector* - } LVType; - QualType Type; - - private: - void Initialize(CharUnits Alignment, QualType Type, - LValueBaseInfo BaseInfo) { - // assert((!Alignment.isZero()) && // || Type->isIncompleteType()) && - // "initializing l-value with zero alignment!"); - this->Type = Type; - // This flag shows if a nontemporal load/stores should be used when - // accessing this lvalue. - const unsigned MaxAlign = 1U << 31; - this->Alignment = Alignment.getQuantity() <= MaxAlign - ? Alignment.getQuantity() - : MaxAlign; - assert(this->Alignment == Alignment.getQuantity() && - "Alignment exceeds allowed max!"); - this->BaseInfo = BaseInfo; - } - - // The alignment to use when accessing this lvalue. (For vector elements, - // this is the alignment of the whole vector) - unsigned Alignment; - mlir::Value V; - LValueBaseInfo BaseInfo; - - public: - bool isSimple() const { return LVType == Simple; } - bool isVectorElt() const { return LVType == VectorElt; } - bool isBitField() const { return LVType == BitField; } - bool isExtVectorElt() const { return LVType == ExtVectorElt; } - bool isGlobalReg() const { return LVType == GlobalReg; } - bool isMatrixElt() const { return LVType == MatrixElt; } - - QualType getType() const { return Type; } - - mlir::Value getPointer() const { return V; } - - CharUnits getAlignment() const { - return CharUnits::fromQuantity(Alignment); - } - - RawAddress getAddress() const { - return RawAddress(getPointer(), getAlignment()); - } - - LValueBaseInfo getBaseInfo() const { return BaseInfo; } - void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } - - static LValue makeAddr(RawAddress address, QualType T, - AlignmentSource Source = AlignmentSource::Type) { - LValue R; - R.V = address.getPointer(); - R.Initialize(address.getAlignment(), T, LValueBaseInfo(Source)); - R.LVType = Simple; - return R; - } - - // FIXME: only have one of these static methods. - static LValue makeAddr(RawAddress address, QualType T, LValueBaseInfo LBI) { - LValue R; - R.V = address.getPointer(); - R.Initialize(address.getAlignment(), T, LBI); - R.LVType = Simple; - return R; - } - }; - - /// This trivial value class is used to represent the result of an - /// expression that is evaluated. It can be one of three things: either a - /// simple MLIR SSA value, a pair of SSA values for complex numbers, or the - /// address of an aggregate value in memory. - class RValue { - enum Flavor { Scalar, Complex, Aggregate }; - - // The shift to make to an aggregate's alignment to make it look - // like a pointer. - enum { AggAlignShift = 4 }; - - // Stores first value and flavor. - llvm::PointerIntPair V1; - // Stores second value and volatility. - llvm::PointerIntPair V2; - - public: - bool isScalar() const { return V1.getInt() == Scalar; } - bool isComplex() const { return V1.getInt() == Complex; } - bool isAggregate() const { return V1.getInt() == Aggregate; } - - bool isVolatileQualified() const { return V2.getInt(); } - - /// getScalarVal() - Return the Value* of this scalar value. - mlir::Value getScalarVal() const { - assert(isScalar() && "Not a scalar!"); - return V1.getPointer(); - } - - /// getComplexVal - Return the real/imag components of this complex value. - /// - std::pair getComplexVal() const { - assert(0 && "not implemented"); - return {}; - } - - /// getAggregateAddr() - Return the Value* of the address of the - /// aggregate. - RawAddress getAggregateAddress() const { - assert(0 && "not implemented"); - return RawAddress::invalid(); - } - - static RValue getIgnored() { - // FIXME: should we make this a more explicit state? - return get(nullptr); - } - - static RValue get(mlir::Value V) { - RValue ER; - ER.V1.setPointer(V); - ER.V1.setInt(Scalar); - ER.V2.setInt(false); - return ER; - } - static RValue getComplex(mlir::Value V1, mlir::Value V2) { - assert(0 && "not implemented"); - return RValue{}; - } - static RValue getComplex(const std::pair &C) { - assert(0 && "not implemented"); - return RValue{}; - } - // FIXME: Aggregate rvalues need to retain information about whether they - // are volatile or not. Remove default to find all places that probably - // get this wrong. - static RValue getAggregate(RawAddress addr, bool isVolatile = false) { - assert(0 && "not implemented"); - return RValue{}; - } - }; class ScalarExprEmitter : public StmtVisitor { LLVM_ATTRIBUTE_UNUSED CIRCodeGenFunction &CGF; CIRBuildImpl &Builder; diff --git a/clang/lib/CIR/CIRGenValue.h b/clang/lib/CIR/CIRGenValue.h new file mode 100644 index 000000000000..287b1582d315 --- /dev/null +++ b/clang/lib/CIR/CIRGenValue.h @@ -0,0 +1,248 @@ +//===-- CIRGenValue.h - CIRGen something TODO this desc* --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// IDK yet +// TODO: +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRGENVALUE_H +#define LLVM_CLANG_LIB_CIR_CIRGENVALUE_H + +#include "mlir/IR/Value.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/Type.h" +#include "clang/CIR/CIRCodeGenFunction.h" +#include "llvm/ADT/PointerIntPair.h" + +namespace cir { + +class RawAddress { + mlir::Value Pointer; + clang::CharUnits Alignment; + +public: + RawAddress(mlir::Value pointer, clang::CharUnits alignment) + : Pointer(pointer), Alignment(alignment) { + assert((!alignment.isZero() || pointer == nullptr) && + "creating valid address with invalid alignment"); + } + + static RawAddress invalid() { + return RawAddress(nullptr, clang::CharUnits()); + } + bool isValid() const { return Pointer != nullptr; } + + mlir::Value getPointer() const { + // assert(isValid()); + return Pointer; + } + + /// Return the alignment of this pointer. + clang::CharUnits getAlignment() const { + // assert(isValid()); + return Alignment; + } +}; + +/// This trivial value class is used to represent the result of an +/// expression that is evaluated. It can be one of three things: either a +/// simple MLIR SSA value, a pair of SSA values for complex numbers, or the +/// address of an aggregate value in memory. +class RValue { + enum Flavor { Scalar, Complex, Aggregate }; + + // The shift to make to an aggregate's alignment to make it look + // like a pointer. + enum { AggAlignShift = 4 }; + + // Stores first value and flavor. + llvm::PointerIntPair V1; + // Stores second value and volatility. + llvm::PointerIntPair V2; + +public: + bool isScalar() const { return V1.getInt() == Scalar; } + bool isComplex() const { return V1.getInt() == Complex; } + bool isAggregate() const { return V1.getInt() == Aggregate; } + + bool isVolatileQualified() const { return V2.getInt(); } + + /// getScalarVal() - Return the Value* of this scalar value. + mlir::Value getScalarVal() const { + assert(isScalar() && "Not a scalar!"); + return V1.getPointer(); + } + + /// getComplexVal - Return the real/imag components of this complex value. + /// + std::pair getComplexVal() const { + assert(0 && "not implemented"); + return {}; + } + + /// getAggregateAddr() - Return the Value* of the address of the + /// aggregate. + RawAddress getAggregateAddress() const { + assert(0 && "not implemented"); + return RawAddress::invalid(); + } + + static RValue getIgnored() { + // FIXME: should we make this a more explicit state? + return get(nullptr); + } + + static RValue get(mlir::Value V) { + RValue ER; + ER.V1.setPointer(V); + ER.V1.setInt(Scalar); + ER.V2.setInt(false); + return ER; + } + static RValue getComplex(mlir::Value V1, mlir::Value V2) { + assert(0 && "not implemented"); + return RValue{}; + } + static RValue getComplex(const std::pair &C) { + assert(0 && "not implemented"); + return RValue{}; + } + // FIXME: Aggregate rvalues need to retain information about whether they + // are volatile or not. Remove default to find all places that probably + // get this wrong. + static RValue getAggregate(RawAddress addr, bool isVolatile = false) { + assert(0 && "not implemented"); + return RValue{}; + } +}; + +/// The source of the alignment of an l-value; an expression of +/// confidence in the alignment actually matching the estimate. +enum class AlignmentSource { + /// The l-value was an access to a declared entity or something + /// equivalently strong, like the address of an array allocated by a + /// language runtime. + Decl, + + /// The l-value was considered opaque, so the alignment was + /// determined from a type, but that type was an explicitly-aligned + /// typedef. + AttributedType, + + /// The l-value was considered opaque, so the alignment was + /// determined from a type. + Type +}; + +/// Given that the base address has the given alignment source, what's +/// our confidence in the alignment of the field? +static inline AlignmentSource getFieldAlignmentSource(AlignmentSource Source) { + // For now, we don't distinguish fields of opaque pointers from + // top-level declarations, but maybe we should. + return AlignmentSource::Decl; +} + +class LValueBaseInfo { + AlignmentSource AlignSource; + +public: + explicit LValueBaseInfo(AlignmentSource Source = AlignmentSource::Type) + : AlignSource(Source) {} + AlignmentSource getAlignmentSource() const { return AlignSource; } + void setAlignmentSource(AlignmentSource Source) { AlignSource = Source; } + + void mergeForCast(const LValueBaseInfo &Info) { + setAlignmentSource(Info.getAlignmentSource()); + } +}; + +class LValue { + enum { + Simple, // This is a normal l-value, use getAddress(). + VectorElt, // This is a vector element l-value (V[i]), use getVector* + BitField, // This is a bitfield l-value, use getBitfield*. + ExtVectorElt, // This is an extended vector subset, use getExtVectorComp + GlobalReg, // This is a register l-value, use getGlobalReg() + MatrixElt // This is a matrix element, use getVector* + } LVType; + clang::QualType Type; + clang::Qualifiers Quals; + +private: + void Initialize(clang::CharUnits Alignment, clang::QualType Type, + LValueBaseInfo BaseInfo) { + // assert((!Alignment.isZero()) && // || Type->isIncompleteType()) && + // "initializing l-value with zero alignment!"); + this->Type = Type; + // This flag shows if a nontemporal load/stores should be used when + // accessing this lvalue. + const unsigned MaxAlign = 1U << 31; + this->Alignment = Alignment.getQuantity() <= MaxAlign + ? Alignment.getQuantity() + : MaxAlign; + assert(this->Alignment == Alignment.getQuantity() && + "Alignment exceeds allowed max!"); + this->BaseInfo = BaseInfo; + } + + // The alignment to use when accessing this lvalue. (For vector elements, + // this is the alignment of the whole vector) + unsigned Alignment; + mlir::Value V; + LValueBaseInfo BaseInfo; + +public: + bool isSimple() const { return LVType == Simple; } + bool isVectorElt() const { return LVType == VectorElt; } + bool isBitField() const { return LVType == BitField; } + bool isExtVectorElt() const { return LVType == ExtVectorElt; } + bool isGlobalReg() const { return LVType == GlobalReg; } + bool isMatrixElt() const { return LVType == MatrixElt; } + + clang::QualType getType() const { return Type; } + + mlir::Value getPointer() const { return V; } + + clang::CharUnits getAlignment() const { + return clang::CharUnits::fromQuantity(Alignment); + } + + RawAddress getAddress() const { + return RawAddress(getPointer(), getAlignment()); + } + + LValueBaseInfo getBaseInfo() const { return BaseInfo; } + void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } + + static LValue makeAddr(RawAddress address, clang::QualType T, + AlignmentSource Source = AlignmentSource::Type) { + LValue R; + R.V = address.getPointer(); + R.Initialize(address.getAlignment(), T, LValueBaseInfo(Source)); + R.LVType = Simple; + return R; + } + + // FIXME: only have one of these static methods. + static LValue makeAddr(RawAddress address, clang::QualType T, + LValueBaseInfo LBI) { + LValue R; + R.V = address.getPointer(); + R.Initialize(address.getAlignment(), T, LBI); + R.LVType = Simple; + return R; + } + + const clang::Qualifiers &getQuals() const { return Quals; } + clang::Qualifiers &getQuals() { return Quals; } +}; + +} // namespace cir + +#endif From 4e4179a0248eb03bc396426e40ec07990b12915d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 24 Jan 2022 23:44:42 -0500 Subject: [PATCH 0102/2301] [CIR] Rename CIRBuildImpl->CIRGenModule, CIRBuildImpl->CIRGenModule, etc CIRBuildImpl -> CIRGenModule CIRContext -> CIRGenerator CIRCodeGenFunction -> CIRGenFunction This is to match clang's CodeGen a bit better. We'll need to refactor some of the inline namespacing and private code in order to provide access to members in the same exact fashion that codegen does and so we might as well match their class patterns. --- .../CIR/{CIRBuilder.h => CIRGenerator.h} | 18 +- clang/include/clang/Sema/CIRBasedWarnings.h | 4 +- .../CIR/CIRGenFunction.h} | 17 +- .../CIR/{CIRBuilder.cpp => CIRGenModule.cpp} | 205 +++++++++--------- clang/lib/CIR/CIRGenValue.h | 3 +- clang/lib/CIR/CMakeLists.txt | 2 +- clang/lib/CIRFrontendAction/CIRGenAction.cpp | 6 +- clang/lib/Sema/CIRBasedWarnings.cpp | 8 +- 8 files changed, 130 insertions(+), 133 deletions(-) rename clang/include/clang/CIR/{CIRBuilder.h => CIRGenerator.h} (80%) rename clang/{include/clang/CIR/CIRCodeGenFunction.h => lib/CIR/CIRGenFunction.h} (77%) rename clang/lib/CIR/{CIRBuilder.cpp => CIRGenModule.cpp} (92%) diff --git a/clang/include/clang/CIR/CIRBuilder.h b/clang/include/clang/CIR/CIRGenerator.h similarity index 80% rename from clang/include/clang/CIR/CIRBuilder.h rename to clang/include/clang/CIR/CIRGenerator.h index 1d085ddc0059..1c61ad57e1bb 100644 --- a/clang/include/clang/CIR/CIRBuilder.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -1,4 +1,4 @@ -//===- CIRBuilder.h - CIR Generation from Clang AST -----------------------===// +//===- CIRGenerator.h - CIR Generation from Clang AST ---------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -11,8 +11,8 @@ // //===----------------------------------------------------------------------===// -#ifndef CLANG_CIRBUILDER_H_ -#define CLANG_CIRBUILDER_H_ +#ifndef CLANG_CIRGENERATOR_H_ +#define CLANG_CIRGENERATOR_H_ #include "clang/AST/ASTConsumer.h" #include "llvm/Support/ToolOutputFile.h" @@ -31,13 +31,13 @@ class FunctionDecl; } // namespace clang namespace cir { -class CIRBuildImpl; +class CIRGenModule; class CIRGenTypes; -class CIRContext : public clang::ASTConsumer { +class CIRGenerator : public clang::ASTConsumer { public: - CIRContext(); - ~CIRContext(); + CIRGenerator(); + ~CIRGenerator(); void Initialize(clang::ASTContext &Context) override; bool EmitFunction(const clang::FunctionDecl *FD); @@ -53,11 +53,11 @@ class CIRContext : public clang::ASTConsumer { private: std::unique_ptr mlirCtx; - std::unique_ptr builder; + std::unique_ptr CGM; clang::ASTContext *astCtx; }; } // namespace cir -#endif // CLANG_CIRBUILDER_H_ +#endif // LLVM_CLANG_CIR_CIRGENERATOR_H diff --git a/clang/include/clang/Sema/CIRBasedWarnings.h b/clang/include/clang/Sema/CIRBasedWarnings.h index 3afdb9294415..ea08e24ad6ea 100644 --- a/clang/include/clang/Sema/CIRBasedWarnings.h +++ b/clang/include/clang/Sema/CIRBasedWarnings.h @@ -18,7 +18,7 @@ #include namespace cir { -class CIRContext; +class CIRGenerator; } // namespace cir namespace clang { @@ -37,7 +37,7 @@ class CIRBasedWarnings { private: Sema &S; AnalysisBasedWarnings::Policy DefaultPolicy; - std::unique_ptr CIRCtx; + std::unique_ptr CIRGen; //class InterProceduralData; //std::unique_ptr IPData; diff --git a/clang/include/clang/CIR/CIRCodeGenFunction.h b/clang/lib/CIR/CIRGenFunction.h similarity index 77% rename from clang/include/clang/CIR/CIRCodeGenFunction.h rename to clang/lib/CIR/CIRGenFunction.h index c6763062074b..31407d55db2a 100644 --- a/clang/include/clang/CIR/CIRCodeGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -1,5 +1,4 @@ -//===-- CIRCIRCodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -//-*-===// +//===-- CIRGenFunction.h - Per-Function state for CIR gen -------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -7,17 +6,19 @@ // //===----------------------------------------------------------------------===// // -// This is the internal per-function state used for cir translation. +// This is the internal per-function state used for CIR translation. // //===----------------------------------------------------------------------===// -#ifndef LLVM_CLANG_LIB_CIR_CIRCODEGENFUNCTION_H -#define LLVM_CLANG_LIB_CIR_CIRCODEGENFUNCTION_H +#ifndef LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H +#define LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H #include "CIRGenValue.h" #include "mlir/IR/Value.h" +#include "clang/AST/ExprCXX.h" #include "clang/AST/Type.h" +#include "clang/CIR/CIRGenerator.h" namespace clang { class Expr; @@ -31,7 +32,7 @@ namespace cir { // isn't available in the include dir. Same for getEvaluationKind below. enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; -class CIRCodeGenFunction { +class CIRGenFunction { public: /// If a return statement is being visited, this holds the return statment's /// result expression. @@ -52,9 +53,9 @@ class CIRCodeGenFunction { return getEvaluationKind(T) == TEK_Aggregate; } - CIRCodeGenFunction(); + CIRGenFunction(); }; } // namespace cir -#endif +#endif // LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H diff --git a/clang/lib/CIR/CIRBuilder.cpp b/clang/lib/CIR/CIRGenModule.cpp similarity index 92% rename from clang/lib/CIR/CIRBuilder.cpp rename to clang/lib/CIR/CIRGenModule.cpp index bea5fe012c17..5c604d8e9a1a 100644 --- a/clang/lib/CIR/CIRBuilder.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1,4 +1,4 @@ -//===- CIRBuilder.cpp - MLIR Generation from a Toy AST --------------------===// +//===- CIRGenModule.cpp - Per-Module state for CIR generation -------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,17 +6,16 @@ // //===----------------------------------------------------------------------===// // -// This file implements a simple IR generation targeting MLIR from a Module AST -// for the Toy language. +// This is the internal per-translation-unit state used for CIR translation. // //===----------------------------------------------------------------------===// +#include "CIRGenFunction.h" #include "CIRGenTypes.h" #include "CIRGenValue.h" #include "clang/AST/ASTConsumer.h" -#include "clang/CIR/CIRBuilder.h" -#include "clang/CIR/CIRCodeGenFunction.h" +#include "clang/CIR/CIRGenerator.h" #include "clang/CIR/LowerToLLVM.h" #include "mlir/Dialect/CIR/IR/CIRAttrs.h" @@ -75,8 +74,8 @@ using llvm::SmallVector; using llvm::StringRef; using llvm::Twine; -CIRCodeGenFunction::CIRCodeGenFunction() = default; -TypeEvaluationKind CIRCodeGenFunction::getEvaluationKind(QualType type) { +CIRGenFunction::CIRGenFunction() = default; +TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { type = type.getCanonicalType(); while (true) { switch (type->getTypeClass()) { @@ -142,16 +141,16 @@ namespace cir { /// This will emit operations that are specific to C(++)/ObjC(++) language, /// preserving the semantics of the language and (hopefully) allow to perform /// accurate analysis and transformation based on these high level semantics. -class CIRBuildImpl { +class CIRGenModule { public: - CIRBuildImpl(mlir::MLIRContext &context, clang::ASTContext &astctx) + CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx) : builder(&context), astCtx(astctx) { theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); genTypes = std::make_unique(astCtx, this->getBuilder()); } - CIRBuildImpl(CIRBuildImpl &) = delete; - CIRBuildImpl &operator=(CIRBuildImpl &) = delete; - ~CIRBuildImpl() = default; + CIRGenModule(CIRGenModule &) = delete; + CIRGenModule &operator=(CIRGenModule &) = delete; + ~CIRGenModule() = default; using SymTableTy = llvm::ScopedHashTable; using SymTableScopeTy = ScopedHashTableScope; @@ -177,7 +176,7 @@ class CIRBuildImpl { /// Per-function codegen information. Updated everytime buildCIR is called /// for FunctionDecls's. - CIRCodeGenFunction *CurCCGF = nullptr; + CIRGenFunction *CurCGF = nullptr; /// Per-module type mapping from clang AST to CIR. std::unique_ptr genTypes; @@ -186,11 +185,11 @@ class CIRBuildImpl { /// Always use a `SourceLocRAIIObject` to change currSrcLoc. std::optional currSrcLoc; class SourceLocRAIIObject { - CIRBuildImpl &P; + CIRGenModule &P; std::optional OldVal; public: - SourceLocRAIIObject(CIRBuildImpl &p, mlir::Location Value) : P(p) { + SourceLocRAIIObject(CIRGenModule &p, mlir::Location Value) : P(p) { if (P.currSrcLoc) OldVal = P.currSrcLoc; P.currSrcLoc = Value; @@ -265,14 +264,12 @@ class CIRBuildImpl { mlir::OpBuilder &getBuilder() { return builder; } class ScalarExprEmitter : public StmtVisitor { - LLVM_ATTRIBUTE_UNUSED CIRCodeGenFunction &CGF; - CIRBuildImpl &Builder; + LLVM_ATTRIBUTE_UNUSED CIRGenFunction &CGF; + CIRGenModule &CGM; public: - ScalarExprEmitter(CIRCodeGenFunction &cgf, CIRBuildImpl &builder) - : CGF(cgf), Builder(builder) { - (void)CGF; - } + ScalarExprEmitter(CIRGenFunction &cgf, CIRGenModule &cgm) + : CGF(cgf), CGM(cgm) {} mlir::Value Visit(Expr *E) { return StmtVisitor::Visit(E); @@ -280,10 +277,10 @@ class CIRBuildImpl { /// Emits the address of the l-value, then loads and returns the result. mlir::Value buildLoadOfLValue(const Expr *E) { - LValue LV = Builder.buildLValue(E); - auto load = Builder.builder.create( - Builder.getLoc(E->getExprLoc()), Builder.getCIRType(E->getType()), - LV.getPointer(), mlir::UnitAttr::get(Builder.builder.getContext())); + LValue LV = CGM.buildLValue(E); + auto load = CGM.builder.create( + CGM.getLoc(E->getExprLoc()), CGM.getCIRType(E->getType()), + LV.getPointer(), mlir::UnitAttr::get(CGM.builder.getContext())); // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); return load; } @@ -305,25 +302,24 @@ class CIRBuildImpl { clang::CastKind Kind = CE->getCastKind(); switch (Kind) { case CK_LValueToRValue: - assert(Builder.astCtx.hasSameUnqualifiedType(E->getType(), DestTy)); + assert(CGM.astCtx.hasSameUnqualifiedType(E->getType(), DestTy)); assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); return Visit(const_cast(E)); case CK_NullToPointer: { // FIXME: use MustVisitNullValue(E) and evaluate expr. // Note that DestTy is used as the MLIR type instead of a custom // nullptr type. - mlir::Type Ty = Builder.getCIRType(DestTy); - return Builder.builder.create( - Builder.getLoc(E->getExprLoc()), Ty, - mlir::cir::NullAttr::get(Builder.builder.getContext(), Ty)); + mlir::Type Ty = CGM.getCIRType(DestTy); + return CGM.builder.create( + CGM.getLoc(E->getExprLoc()), Ty, + mlir::cir::NullAttr::get(CGM.builder.getContext(), Ty)); } case CK_IntegralToBoolean: { return buildIntToBoolConversion(Visit(E), - Builder.getLoc(CE->getSourceRange())); + CGM.getLoc(CE->getSourceRange())); } default: - emitError(Builder.getLoc(CE->getExprLoc()), - "cast kind not implemented: '") + emitError(CGM.getLoc(CE->getExprLoc()), "cast kind not implemented: '") << CE->getCastKindName() << "'"; assert(0 && "not implemented"); return nullptr; @@ -332,14 +328,14 @@ class CIRBuildImpl { mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { assert(!isa(E->getType()) && "not implemented"); - return Builder.buildLValue(E->getSubExpr()).getPointer(); + return CGM.buildLValue(E->getSubExpr()).getPointer(); } mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { - mlir::Type Ty = Builder.getCIRType(E->getType()); - return Builder.builder.create( - Builder.getLoc(E->getExprLoc()), Ty, - Builder.builder.getBoolAttr(E->getValue())); + mlir::Type Ty = CGM.getCIRType(E->getType()); + return CGM.builder.create( + CGM.getLoc(E->getExprLoc()), Ty, + CGM.builder.getBoolAttr(E->getValue())); } struct BinOpInfo { @@ -387,54 +383,54 @@ class CIRBuildImpl { } mlir::Value buildMul(const BinOpInfo &Ops) { - return Builder.builder.create( - Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); } mlir::Value buildDiv(const BinOpInfo &Ops) { - return Builder.builder.create( - Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Div, Ops.LHS, Ops.RHS); } mlir::Value buildRem(const BinOpInfo &Ops) { - return Builder.builder.create( - Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); } mlir::Value buildAdd(const BinOpInfo &Ops) { - return Builder.builder.create( - Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Add, Ops.LHS, Ops.RHS); } mlir::Value buildSub(const BinOpInfo &Ops) { - return Builder.builder.create( - Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); } mlir::Value buildShl(const BinOpInfo &Ops) { - return Builder.builder.create( - Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shl, Ops.LHS, Ops.RHS); } mlir::Value buildShr(const BinOpInfo &Ops) { - return Builder.builder.create( - Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shr, Ops.LHS, Ops.RHS); } mlir::Value buildAnd(const BinOpInfo &Ops) { - return Builder.builder.create( - Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::And, Ops.LHS, Ops.RHS); } mlir::Value buildXor(const BinOpInfo &Ops) { - return Builder.builder.create( - Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); } mlir::Value buildOr(const BinOpInfo &Ops) { - return Builder.builder.create( - Builder.getLoc(Ops.Loc), Builder.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Or, Ops.LHS, Ops.RHS); + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Or, + Ops.LHS, Ops.RHS); } // Binary operators and binary compound assignment operators. @@ -513,8 +509,8 @@ class CIRBuildImpl { llvm_unreachable("unsupported"); } - return Builder.builder.create( - Builder.getLoc(BOInfo.Loc), Builder.getCIRType(BOInfo.Ty), Kind, + return CGM.builder.create( + CGM.getLoc(BOInfo.Loc), CGM.getCIRType(BOInfo.Ty), Kind, BOInfo.LHS, BOInfo.RHS); } @@ -527,7 +523,7 @@ class CIRBuildImpl { assert(0 && "not implemented"); } - return buildScalarConversion(Result, Builder.astCtx.BoolTy, E->getType(), + return buildScalarConversion(Result, CGM.astCtx.BoolTy, E->getType(), E->getExprLoc()); } @@ -544,7 +540,7 @@ class CIRBuildImpl { mlir::Value VisitExpr(Expr *E) { // Crashing here for "ScalarExprClassName"? Please implement // VisitScalarExprClassName(...) to get this working. - emitError(Builder.getLoc(E->getExprLoc()), "scalar exp no implemented: '") + emitError(CGM.getLoc(E->getExprLoc()), "scalar exp no implemented: '") << E->getStmtClassName() << "'"; assert(0 && "shouldn't be here!"); return {}; @@ -557,8 +553,8 @@ class CIRBuildImpl { // as a logical value again. // TODO: optimize this common case here or leave it for later // CIR passes? - mlir::Type boolTy = Builder.getCIRType(Builder.astCtx.BoolTy); - return Builder.builder.create( + mlir::Type boolTy = CGM.getCIRType(CGM.astCtx.BoolTy); + return CGM.builder.create( loc, boolTy, mlir::cir::CastKind::int_to_bool, srcVal); } @@ -595,8 +591,8 @@ class CIRBuildImpl { assert(0 && "not implemented"); } - SrcType = Builder.astCtx.getCanonicalType(SrcType); - DstType = Builder.astCtx.getCanonicalType(DstType); + SrcType = CGM.astCtx.getCanonicalType(SrcType); + DstType = CGM.astCtx.getCanonicalType(DstType); if (SrcType == DstType) return Src; @@ -607,13 +603,12 @@ class CIRBuildImpl { // Handle conversions to bool first, they are special: comparisons against // 0. if (DstType->isBooleanType()) - return buildConversionToBool(Src, SrcType, Builder.getLoc(Loc)); + return buildConversionToBool(Src, SrcType, CGM.getLoc(Loc)); - mlir::Type DstTy = Builder.getCIRType(DstType); + mlir::Type DstTy = CGM.getCIRType(DstType); // Cast from half through float if half isn't a native type. - if (SrcType->isHalfType() && - !Builder.astCtx.getLangOpts().NativeHalfType) { + if (SrcType->isHalfType() && !CGM.astCtx.getLangOpts().NativeHalfType) { assert(0 && "not implemented"); } @@ -658,8 +653,7 @@ class CIRBuildImpl { // TODO: implement CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) // Cast to half through float if half isn't a native type. - if (DstType->isHalfType() && - !Builder.astCtx.getLangOpts().NativeHalfType) { + if (DstType->isHalfType() && !CGM.astCtx.getLangOpts().NativeHalfType) { assert(0 && "not implemented"); } @@ -673,10 +667,10 @@ class CIRBuildImpl { // Leaves. mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { - mlir::Type Ty = Builder.getCIRType(E->getType()); - return Builder.builder.create( - Builder.getLoc(E->getExprLoc()), Ty, - Builder.builder.getIntegerAttr(Ty, E->getValue())); + mlir::Type Ty = CGM.getCIRType(E->getType()); + return CGM.builder.create( + CGM.getLoc(E->getExprLoc()), Ty, + CGM.builder.getIntegerAttr(Ty, E->getValue())); } }; @@ -908,7 +902,7 @@ class CIRBuildImpl { assert(0 && "not implemented"); return; } - switch (CIRCodeGenFunction::getEvaluationKind(type)) { + switch (CIRGenFunction::getEvaluationKind(type)) { case TEK_Scalar: buildScalarInit(init, D, lvalue); return; @@ -1164,20 +1158,20 @@ class CIRBuildImpl { /// Emit the computation of the specified expression of scalar type, /// ignoring the result. mlir::Value buildScalarExpr(const Expr *E) { - assert(E && CIRCodeGenFunction::hasScalarEvaluationKind(E->getType()) && + assert(E && CIRGenFunction::hasScalarEvaluationKind(E->getType()) && "Invalid scalar expression to emit"); - return ScalarExprEmitter(*CurCCGF, *this).Visit(const_cast(E)); + return ScalarExprEmitter(*CurCGF, *this).Visit(const_cast(E)); } /// Emit a conversion from the specified type to the specified destination /// type, both of which are CIR scalar types. mlir::Value buildScalarConversion(mlir::Value Src, QualType SrcTy, QualType DstTy, SourceLocation Loc) { - assert(CIRCodeGenFunction::hasScalarEvaluationKind(SrcTy) && - CIRCodeGenFunction::hasScalarEvaluationKind(DstTy) && + assert(CIRGenFunction::hasScalarEvaluationKind(SrcTy) && + CIRGenFunction::hasScalarEvaluationKind(DstTy) && "Invalid scalar expression to emit"); - return ScalarExprEmitter(*CurCCGF, *this) + return ScalarExprEmitter(*CurCGF, *this) .buildScalarConversion(Src, SrcTy, DstTy, Loc); } @@ -1185,7 +1179,7 @@ class CIRBuildImpl { assert(!(astCtx.getLangOpts().ElideConstructors && S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) && "unimplemented"); - assert(!CurCCGF->FnRetQualTy->isReferenceType() && "unimplemented"); + assert(!CurCGF->FnRetQualTy->isReferenceType() && "unimplemented"); // Emit the result value, even if unused, to evaluate the side effects. const Expr *RV = S.getRetValue(); @@ -1194,7 +1188,7 @@ class CIRBuildImpl { assert(!isa(RV) && "unimplemented"); mlir::Value V = nullptr; - switch (CIRCodeGenFunction::getEvaluationKind(RV->getType())) { + switch (CIRGenFunction::getEvaluationKind(RV->getType())) { case TEK_Scalar: V = buildScalarExpr(RV); // Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); @@ -1205,7 +1199,7 @@ class CIRBuildImpl { return mlir::failure(); } - CurCCGF->RetValue = V; + CurCGF->RetValue = V; // Otherwise, this return operation has zero operands. if (!V || (RV && RV->getType()->isVoidType())) { // FIXME: evaluate for side effects. @@ -1292,7 +1286,7 @@ class CIRBuildImpl { /// TODO: if this is an aggregate expression, add a AggValueSlot to indicate /// where the result should be returned. RValue buildAnyExpr(const Expr *E) { - switch (CIRCodeGenFunction::getEvaluationKind(E->getType())) { + switch (CIRGenFunction::getEvaluationKind(E->getType())) { case TEK_Scalar: return RValue::get(buildScalarExpr(E)); case TEK_Complex: @@ -1317,7 +1311,7 @@ class CIRBuildImpl { // Note that in all of these cases, __block variables need the RHS // evaluated first just in case the variable gets moved by the RHS. - switch (CIRCodeGenFunction::getEvaluationKind(E->getType())) { + switch (CIRGenFunction::getEvaluationKind(E->getType())) { case TEK_Scalar: { assert(E->getLHS()->getType().getObjCLifetime() == clang::Qualifiers::ObjCLifetime::OCL_None && @@ -1932,15 +1926,16 @@ class CIRBuildImpl { case Decl::Record: // There's nothing to do here, we emit everything pertaining to `Record`s // lazily. - // TODO: handle debug info here? See clang's CodeGenModule::EmitTopLevelDecl + // TODO: handle debug info here? See clang's + // CodeGenModule::EmitTopLevelDecl break; } } // Emit a new function and add it to the MLIR module. mlir::FuncOp buildFunction(const FunctionDecl *FD) { - CIRCodeGenFunction CCGF; - CurCCGF = &CCGF; + CIRGenFunction CGF; + CurCGF = &CGF; // Create a scope in the symbol table to hold variable declarations. SymTableScopeTy varScope(symbolTable); @@ -1955,11 +1950,11 @@ class CIRBuildImpl { for (auto *Param : FD->parameters()) argTypes.push_back(getCIRType(Param->getType())); - CurCCGF->FnRetQualTy = FD->getReturnType(); + CurCGF->FnRetQualTy = FD->getReturnType(); auto funcType = builder.getFunctionType( - argTypes, CurCCGF->FnRetQualTy->isVoidType() + argTypes, CurCGF->FnRetQualTy->isVoidType() ? mlir::TypeRange() - : getCIRType(CurCCGF->FnRetQualTy)); + : getCIRType(CurCGF->FnRetQualTy)); mlir::FuncOp function = mlir::FuncOp::create(fnLoc, FD->getName(), funcType); if (!function) @@ -2025,10 +2020,10 @@ class CIRBuildImpl { }; } // namespace cir -CIRContext::CIRContext() = default; -CIRContext::~CIRContext() = default; +CIRGenerator::CIRGenerator() = default; +CIRGenerator::~CIRGenerator() = default; -void CIRContext::Initialize(clang::ASTContext &astCtx) { +void CIRGenerator::Initialize(clang::ASTContext &astCtx) { using namespace llvm; this->astCtx = &astCtx; @@ -2037,25 +2032,25 @@ void CIRContext::Initialize(clang::ASTContext &astCtx) { mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); - builder = std::make_unique(*mlirCtx.get(), astCtx); + CGM = std::make_unique(*mlirCtx.get(), astCtx); } -void CIRContext::verifyModule() { builder->verifyModule(); } +void CIRGenerator::verifyModule() { CGM->verifyModule(); } -bool CIRContext::EmitFunction(const FunctionDecl *FD) { - auto func = builder->buildFunction(FD); +bool CIRGenerator::EmitFunction(const FunctionDecl *FD) { + auto func = CGM->buildFunction(FD); assert(func && "should emit function"); return func.getOperation() != nullptr; } -mlir::ModuleOp CIRContext::getModule() { return builder->getModule(); } +mlir::ModuleOp CIRGenerator::getModule() { return CGM->getModule(); } -bool CIRContext::HandleTopLevelDecl(clang::DeclGroupRef D) { +bool CIRGenerator::HandleTopLevelDecl(clang::DeclGroupRef D) { for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { - builder->buildTopLevelDecl(*I); + CGM->buildTopLevelDecl(*I); } return true; } -void CIRContext::HandleTranslationUnit(ASTContext &C) {} +void CIRGenerator::HandleTranslationUnit(ASTContext &C) {} diff --git a/clang/lib/CIR/CIRGenValue.h b/clang/lib/CIR/CIRGenValue.h index 287b1582d315..172456d90e03 100644 --- a/clang/lib/CIR/CIRGenValue.h +++ b/clang/lib/CIR/CIRGenValue.h @@ -14,10 +14,11 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENVALUE_H #define LLVM_CLANG_LIB_CIR_CIRGENVALUE_H +#include "CIRGenFunction.h" + #include "mlir/IR/Value.h" #include "clang/AST/CharUnits.h" #include "clang/AST/Type.h" -#include "clang/CIR/CIRCodeGenFunction.h" #include "llvm/ADT/PointerIntPair.h" namespace cir { diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index de41b7e3b445..2a2d40a81924 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -11,7 +11,7 @@ include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR - CIRBuilder.cpp + CIRGenModule.cpp CIRGenTypes.cpp CIRRecordLayoutBuilder.cpp LowerToLLVM.cpp diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index 051aa273619f..10ffd86bcf31 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -23,7 +23,7 @@ #include "clang/Basic/LangStandard.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" -#include "clang/CIR/CIRBuilder.h" +#include "clang/CIR/CIRGenerator.h" #include "clang/CIR/LowerToLLVM.h" #include "clang/CodeGen/BackendUtil.h" #include "clang/CodeGen/ModuleBuilder.h" @@ -74,7 +74,7 @@ class CIRGenConsumer : public clang::ASTConsumer { std::unique_ptr outputStream; ASTContext *astContext{nullptr}; - std::unique_ptr gen; + std::unique_ptr gen; public: CIRGenConsumer(CIRGenAction::OutputType action, @@ -90,7 +90,7 @@ class CIRGenConsumer : public clang::ASTConsumer { headerSearchOptions(headerSearchOptions), codeGenOptions(codeGenOptions), targetOptions(targetOptions), langOptions(langOptions), outputStream(std::move(os)), - gen(std::make_unique()) { + gen(std::make_unique()) { // This is required to match the constructors used during // CodeGenAction. Ultimately, this is required because we want to use // the same utility functions in BackendUtil.h for handling llvm diff --git a/clang/lib/Sema/CIRBasedWarnings.cpp b/clang/lib/Sema/CIRBasedWarnings.cpp index f259fca6d95b..9b263c32560c 100644 --- a/clang/lib/Sema/CIRBasedWarnings.cpp +++ b/clang/lib/Sema/CIRBasedWarnings.cpp @@ -37,7 +37,7 @@ #include "llvm/Support/Casting.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" -#include "clang/CIR/CIRBuilder.h" +#include "clang/CIR/CIRGenerator.h" #include #include @@ -67,8 +67,8 @@ sema::CIRBasedWarnings::CIRBasedWarnings(Sema &s) : S(s) { DefaultPolicy.enableConsumedAnalysis = isEnabled(D, warn_use_in_invalid_state); - CIRCtx = std::make_unique(); - CIRCtx->Initialize(S.getASTContext()); + CIRGen = std::make_unique(); + CIRGen->Initialize(S.getASTContext()); } // We need this here for unique_ptr with forward declared class. @@ -114,7 +114,7 @@ void clang::sema::CIRBasedWarnings::IssueWarnings( // Unlike Clang CFG, we share CIR state between each analyzed function, // retrieve or create a new context. - CIRCtx->EmitFunction(FD); + CIRGen->EmitFunction(FD); } void clang::sema::CIRBasedWarnings::PrintStats() const { From a5b0960e4444bccfd17ca8db5ae61c8c155e3968 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 27 Jan 2022 19:29:59 -0500 Subject: [PATCH 0103/2301] [CIR][NFC] Move CIRGenFunction code to its own file --- clang/lib/CIR/CIRGenFunction.cpp | 77 + clang/lib/CIR/CIRGenFunction.h | 5 +- clang/lib/CIR/CIRGenModule.cpp | 2963 ++++++++++++------------------ clang/lib/CIR/CIRGenModule.h | 727 ++++++++ clang/lib/CIR/CMakeLists.txt | 1 + 5 files changed, 1997 insertions(+), 1776 deletions(-) create mode 100644 clang/lib/CIR/CIRGenFunction.cpp create mode 100644 clang/lib/CIR/CIRGenModule.h diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp new file mode 100644 index 000000000000..cf8be74fe477 --- /dev/null +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -0,0 +1,77 @@ +//===- CIRGenFunction.cpp - Emit CIR from ASTs for a Function -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This coordinates the per-function state used while generating code +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" + +using namespace cir; +using namespace clang; + +CIRGenFunction::CIRGenFunction() = default; + +TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { + type = type.getCanonicalType(); + while (true) { + switch (type->getTypeClass()) { +#define TYPE(name, parent) +#define ABSTRACT_TYPE(name, parent) +#define NON_CANONICAL_TYPE(name, parent) case Type::name: +#define DEPENDENT_TYPE(name, parent) case Type::name: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("non-canonical or dependent type in IR-generation"); + + case Type::ArrayParameter: + llvm_unreachable("NYI"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("undeduced type in IR-generation"); + + // Various scalar types. + case Type::Builtin: + case Type::Pointer: + case Type::BlockPointer: + case Type::LValueReference: + case Type::RValueReference: + case Type::MemberPointer: + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::FunctionProto: + case Type::FunctionNoProto: + case Type::Enum: + case Type::ObjCObjectPointer: + case Type::Pipe: + case Type::BitInt: + return TEK_Scalar; + + // Complexes. + case Type::Complex: + return TEK_Complex; + + // Arrays, records, and Objective-C objects. + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::Record: + case Type::ObjCObject: + case Type::ObjCInterface: + return TEK_Aggregate; + + // We operate on atomic values according to their underlying type. + case Type::Atomic: + type = cast(type)->getValueType(); + continue; + } + llvm_unreachable("unknown type kind!"); + } +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 31407d55db2a..97402d72204d 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -13,16 +13,13 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H #define LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H -#include "CIRGenValue.h" - #include "mlir/IR/Value.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/Type.h" -#include "clang/CIR/CIRGenerator.h" namespace clang { class Expr; -} +} // namespace clang using namespace clang; diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 5c604d8e9a1a..8ccbbf452c43 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -10,18 +10,12 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenModule.h" + #include "CIRGenFunction.h" #include "CIRGenTypes.h" #include "CIRGenValue.h" -#include "clang/AST/ASTConsumer.h" -#include "clang/CIR/CIRGenerator.h" -#include "clang/CIR/LowerToLLVM.h" - -#include "mlir/Dialect/CIR/IR/CIRAttrs.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" - #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/Attributes.h" @@ -31,6 +25,8 @@ #include "mlir/IR/MLIRContext.h" #include "mlir/IR/Verifier.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclGroup.h" #include "clang/AST/DeclObjC.h" @@ -42,9 +38,9 @@ #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" -#include "clang/AST/StmtVisitor.h" #include "clang/Basic/SourceLocation.h" -#include "clang/Basic/SourceManager.h" +#include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/LowerToLLVM.h" #include "clang/Lex/Preprocessor.h" #include "llvm/ADT/ArrayRef.h" @@ -72,1953 +68,1376 @@ using llvm::isa; using llvm::ScopedHashTableScope; using llvm::SmallVector; using llvm::StringRef; -using llvm::Twine; - -CIRGenFunction::CIRGenFunction() = default; -TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { - type = type.getCanonicalType(); - while (true) { - switch (type->getTypeClass()) { -#define TYPE(name, parent) -#define ABSTRACT_TYPE(name, parent) -#define NON_CANONICAL_TYPE(name, parent) case Type::name: -#define DEPENDENT_TYPE(name, parent) case Type::name: -#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: -#include "clang/AST/TypeNodes.inc" - llvm_unreachable("non-canonical or dependent type in IR-generation"); - - case Type::Auto: - case Type::DeducedTemplateSpecialization: - llvm_unreachable("undeduced type in IR-generation"); - - case Type::ArrayParameter: - llvm_unreachable("NYI"); - - // Various scalar types. - case Type::Builtin: - case Type::Pointer: - case Type::BlockPointer: - case Type::LValueReference: - case Type::RValueReference: - case Type::MemberPointer: - case Type::Vector: - case Type::ExtVector: - case Type::ConstantMatrix: - case Type::FunctionProto: - case Type::FunctionNoProto: - case Type::Enum: - case Type::ObjCObjectPointer: - case Type::Pipe: - case Type::BitInt: - return TEK_Scalar; - - // Complexes. - case Type::Complex: - return TEK_Complex; - - // Arrays, records, and Objective-C objects. - case Type::ConstantArray: - case Type::IncompleteArray: - case Type::VariableArray: - case Type::Record: - case Type::ObjCObject: - case Type::ObjCInterface: - return TEK_Aggregate; - - // We operate on atomic values according to their underlying type. - case Type::Atomic: - type = cast(type)->getValueType(); - continue; - } - llvm_unreachable("unknown type kind!"); - } + +CIRGenModule::CIRGenModule(mlir::MLIRContext &context, + clang::ASTContext &astctx) + : builder(&context), astCtx(astctx) { + theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); + genTypes = std::make_unique(astCtx, this->getBuilder()); } -namespace cir { - -/// Implementation of a CIR/MLIR emission from Clang AST. -/// -/// This will emit operations that are specific to C(++)/ObjC(++) language, -/// preserving the semantics of the language and (hopefully) allow to perform -/// accurate analysis and transformation based on these high level semantics. -class CIRGenModule { -public: - CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx) - : builder(&context), astCtx(astctx) { - theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - genTypes = std::make_unique(astCtx, this->getBuilder()); - } - CIRGenModule(CIRGenModule &) = delete; - CIRGenModule &operator=(CIRGenModule &) = delete; - ~CIRGenModule() = default; - - using SymTableTy = llvm::ScopedHashTable; - using SymTableScopeTy = ScopedHashTableScope; - -private: - /// A "module" matches a c/cpp source file: containing a list of functions. - mlir::ModuleOp theModule; - - /// The builder is a helper class to create IR inside a function. The - /// builder is stateful, in particular it keeps an "insertion point": this - /// is where the next operations will be introduced. - mlir::OpBuilder builder; - - /// The symbol table maps a variable name to a value in the current scope. - /// Entering a function creates a new scope, and the function arguments are - /// added to the mapping. When the processing of a function is terminated, - /// the scope is destroyed and the mappings created in this scope are - /// dropped. - SymTableTy symbolTable; - - /// Hold Clang AST information. - clang::ASTContext &astCtx; - - /// Per-function codegen information. Updated everytime buildCIR is called - /// for FunctionDecls's. - CIRGenFunction *CurCGF = nullptr; - - /// Per-module type mapping from clang AST to CIR. - std::unique_ptr genTypes; - - /// Use to track source locations across nested visitor traversals. - /// Always use a `SourceLocRAIIObject` to change currSrcLoc. - std::optional currSrcLoc; - class SourceLocRAIIObject { - CIRGenModule &P; - std::optional OldVal; - - public: - SourceLocRAIIObject(CIRGenModule &p, mlir::Location Value) : P(p) { - if (P.currSrcLoc) - OldVal = P.currSrcLoc; - P.currSrcLoc = Value; - } +mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { + const SourceManager &SM = astCtx.getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(SLoc); + StringRef Filename = PLoc.getFilename(); + return mlir::FileLineColLoc::get(builder.getStringAttr(Filename), + PLoc.getLine(), PLoc.getColumn()); +} - /// Can be used to restore the state early, before the dtor - /// is run. - void restore() { P.currSrcLoc = OldVal; } - ~SourceLocRAIIObject() { restore(); } - }; +mlir::Location CIRGenModule::getLoc(SourceRange SLoc) { + mlir::Location B = getLoc(SLoc.getBegin()); + mlir::Location E = getLoc(SLoc.getEnd()); + SmallVector locs = {B, E}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); +} - /// Helpers to convert Clang's SourceLocation to a MLIR Location. - mlir::Location getLoc(SourceLocation SLoc) { - const SourceManager &SM = astCtx.getSourceManager(); - PresumedLoc PLoc = SM.getPresumedLoc(SLoc); - StringRef Filename = PLoc.getFilename(); - return mlir::FileLineColLoc::get(builder.getStringAttr(Filename), - PLoc.getLine(), PLoc.getColumn()); - } +mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { + SmallVector locs = {lhs, rhs}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); +} - mlir::Location getLoc(SourceRange SLoc) { - mlir::Location B = getLoc(SLoc.getBegin()); - mlir::Location E = getLoc(SLoc.getEnd()); - SmallVector locs = {B, E}; - mlir::Attribute metadata; - return mlir::FusedLoc::get(locs, metadata, builder.getContext()); - } +mlir::LogicalResult CIRGenModule::declare(const Decl *var, QualType T, + mlir::Location loc, + CharUnits alignment, + mlir::Value &addr, bool IsParam) { + const auto *namedVar = dyn_cast_or_null(var); + assert(namedVar && "Needs a named decl"); - mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs) { - SmallVector locs = {lhs, rhs}; - mlir::Attribute metadata; - return mlir::FusedLoc::get(locs, metadata, builder.getContext()); - } + if (symbolTable.count(var)) + return mlir::failure(); - /// Declare a variable in the current scope, return success if the variable - /// wasn't declared yet. - mlir::LogicalResult declare(const Decl *var, QualType T, mlir::Location loc, - CharUnits alignment, mlir::Value &addr, - bool IsParam = false) { - const auto *namedVar = dyn_cast_or_null(var); - assert(namedVar && "Needs a named decl"); + auto localVarTy = getCIRType(T); + auto localVarPtrTy = + mlir::cir::PointerType::get(builder.getContext(), localVarTy); - if (symbolTable.count(var)) - return mlir::failure(); + auto alignIntAttr = + mlir::IntegerAttr::get(mlir::IntegerType::get(builder.getContext(), 64), + alignment.getQuantity()); - auto localVarTy = getCIRType(T); - auto localVarPtrTy = - mlir::cir::PointerType::get(builder.getContext(), localVarTy); + auto localVarAddr = builder.create( + loc, /*addr type*/ localVarPtrTy, /*var type*/ localVarTy, + IsParam ? InitStyle::paraminit : InitStyle::uninitialized, alignIntAttr); - auto alignIntAttr = - mlir::IntegerAttr::get(mlir::IntegerType::get(builder.getContext(), 64), - alignment.getQuantity()); + auto *parentBlock = localVarAddr->getBlock(); + localVarAddr->moveBefore(&parentBlock->front()); - auto localVarAddr = builder.create( - loc, /*addr type*/ localVarPtrTy, /*var type*/ localVarTy, - IsParam ? InitStyle::paraminit : InitStyle::uninitialized, - alignIntAttr); + // Insert into the symbol table, allocate some stack space in the + // function entry block. + symbolTable.insert(var, localVarAddr); + addr = localVarAddr; - auto *parentBlock = localVarAddr->getBlock(); - localVarAddr->moveBefore(&parentBlock->front()); + return mlir::success(); +} - // Insert into the symbol table, allocate some stack space in the - // function entry block. - symbolTable.insert(var, localVarAddr); - addr = localVarAddr; +bool CIRGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) { + if (!Ty.isConstant(astCtx) && !Ty->isReferenceType()) + return false; - return mlir::success(); + if (astCtx.getLangOpts().CPlusPlus) { + if (const CXXRecordDecl *Record = + astCtx.getBaseElementType(Ty)->getAsCXXRecordDecl()) + return ExcludeCtor && !Record->hasMutableFields() && + Record->hasTrivialDestructor(); } -public: - mlir::ModuleOp getModule() { return theModule; } - mlir::OpBuilder &getBuilder() { return builder; } - - class ScalarExprEmitter : public StmtVisitor { - LLVM_ATTRIBUTE_UNUSED CIRGenFunction &CGF; - CIRGenModule &CGM; - - public: - ScalarExprEmitter(CIRGenFunction &cgf, CIRGenModule &cgm) - : CGF(cgf), CGM(cgm) {} - - mlir::Value Visit(Expr *E) { - return StmtVisitor::Visit(E); - } - - /// Emits the address of the l-value, then loads and returns the result. - mlir::Value buildLoadOfLValue(const Expr *E) { - LValue LV = CGM.buildLValue(E); - auto load = CGM.builder.create( - CGM.getLoc(E->getExprLoc()), CGM.getCIRType(E->getType()), - LV.getPointer(), mlir::UnitAttr::get(CGM.builder.getContext())); - // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); - return load; - } - - // Handle l-values. - mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { - // FIXME: we could try to emit this as constant first, see - // CGF.tryEmitAsConstant(E) - return buildLoadOfLValue(E); - } - - // Emit code for an explicit or implicit cast. Implicit - // casts have to handle a more broad range of conversions than explicit - // casts, as they handle things like function to ptr-to-function decay - // etc. - mlir::Value VisitCastExpr(CastExpr *CE) { - Expr *E = CE->getSubExpr(); - QualType DestTy = CE->getType(); - clang::CastKind Kind = CE->getCastKind(); - switch (Kind) { - case CK_LValueToRValue: - assert(CGM.astCtx.hasSameUnqualifiedType(E->getType(), DestTy)); - assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); - return Visit(const_cast(E)); - case CK_NullToPointer: { - // FIXME: use MustVisitNullValue(E) and evaluate expr. - // Note that DestTy is used as the MLIR type instead of a custom - // nullptr type. - mlir::Type Ty = CGM.getCIRType(DestTy); - return CGM.builder.create( - CGM.getLoc(E->getExprLoc()), Ty, - mlir::cir::NullAttr::get(CGM.builder.getContext(), Ty)); - } - case CK_IntegralToBoolean: { - return buildIntToBoolConversion(Visit(E), - CGM.getLoc(CE->getSourceRange())); - } - default: - emitError(CGM.getLoc(CE->getExprLoc()), "cast kind not implemented: '") - << CE->getCastKindName() << "'"; - assert(0 && "not implemented"); - return nullptr; - } - } - - mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { - assert(!isa(E->getType()) && "not implemented"); - return CGM.buildLValue(E->getSubExpr()).getPointer(); - } - - mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { - mlir::Type Ty = CGM.getCIRType(E->getType()); - return CGM.builder.create( - CGM.getLoc(E->getExprLoc()), Ty, - CGM.builder.getBoolAttr(E->getValue())); - } - - struct BinOpInfo { - mlir::Value LHS; - mlir::Value RHS; - SourceRange Loc; - QualType Ty; // Computation Type. - BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform - FPOptions FPFeatures; - const Expr *E; // Entire expr, for error unsupported. May not be binop. - - /// Check if the binop computes a division or a remainder. - bool isDivremOp() const { - return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || - Opcode == BO_RemAssign; - } - - /// Check if at least one operand is a fixed point type. In such cases, - /// this operation did not follow usual arithmetic conversion and both - /// operands might not be of the same type. - bool isFixedPointOp() const { - // We cannot simply check the result type since comparison operations - // return an int. - if (const auto *BinOp = dyn_cast(E)) { - QualType LHSType = BinOp->getLHS()->getType(); - QualType RHSType = BinOp->getRHS()->getType(); - return LHSType->isFixedPointType() || RHSType->isFixedPointType(); - } - if (const auto *UnOp = dyn_cast(E)) - return UnOp->getSubExpr()->getType()->isFixedPointType(); - return false; - } - }; - - BinOpInfo buildBinOps(const BinaryOperator *E) { - BinOpInfo Result; - Result.LHS = Visit(E->getLHS()); - Result.RHS = Visit(E->getRHS()); - Result.Ty = E->getType(); - Result.Opcode = E->getOpcode(); - Result.Loc = E->getSourceRange(); - // TODO: Result.FPFeatures - Result.E = E; - return Result; - } + return true; +} - mlir::Value buildMul(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); - } - mlir::Value buildDiv(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Div, Ops.LHS, Ops.RHS); - } - mlir::Value buildRem(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); - } - mlir::Value buildAdd(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Add, Ops.LHS, Ops.RHS); - } - mlir::Value buildSub(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); - } - mlir::Value buildShl(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Shl, Ops.LHS, Ops.RHS); - } - mlir::Value buildShr(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Shr, Ops.LHS, Ops.RHS); - } - mlir::Value buildAnd(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::And, Ops.LHS, Ops.RHS); - } - mlir::Value buildXor(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); - } - mlir::Value buildOr(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Or, - Ops.LHS, Ops.RHS); - } +CIRGenModule::AutoVarEmission +CIRGenModule::buildAutoVarAlloca(const VarDecl &D) { + QualType Ty = D.getType(); + // TODO: (|| Ty.getAddressSpace() == LangAS::opencl_private && + // getLangOpts().OpenCL)) + assert(Ty.getAddressSpace() == LangAS::Default); + + assert(!D.isEscapingByref() && "not implemented"); + assert(!Ty->isVariablyModifiedType() && "not implemented"); + assert(!astCtx.getLangOpts().OpenMP && // !CGM.getLangOpts().OpenMPIRBuilder + "not implemented"); + bool NRVO = astCtx.getLangOpts().ElideConstructors && D.isNRVOVariable(); + assert(!NRVO && "not implemented"); + assert(Ty->isConstantSizeType() && "not implemented"); + assert(!D.hasAttr() && "not implemented"); + + AutoVarEmission emission(D); + CharUnits alignment = astCtx.getDeclAlign(&D); + // TODO: debug info + // TODO: use CXXABI + + // If this value is an array or struct with a statically determinable + // constant initializer, there are optimizations we can do. + // + // TODO: We should constant-evaluate the initializer of any variable, + // as long as it is initialized by a constant expression. Currently, + // isConstantInitializer produces wrong answers for structs with + // reference or bitfield members, and a few other cases, and checking + // for POD-ness protects us from some of these. + if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) && + (D.isConstexpr() || + ((Ty.isPODType(astCtx) || + astCtx.getBaseElementType(Ty)->isObjCObjectPointerType()) && + D.getInit()->isConstantInitializer(astCtx, false)))) { + + // If the variable's a const type, and it's neither an NRVO + // candidate nor a __block variable and has no mutable members, + // emit it as a global instead. + // Exception is if a variable is located in non-constant address space + // in OpenCL. + // TODO: deal with CGM.getCodeGenOpts().MergeAllConstants + // TODO: perhaps we don't need this at all at CIR since this can + // be done as part of lowering down to LLVM. + if ((!astCtx.getLangOpts().OpenCL || + Ty.getAddressSpace() == LangAS::opencl_constant) && + (!NRVO && !D.isEscapingByref() && isTypeConstant(Ty, true))) + assert(0 && "not implemented"); - // Binary operators and binary compound assignment operators. -#define HANDLEBINOP(OP) \ - mlir::Value VisitBin##OP(const BinaryOperator *E) { \ - return build##OP(buildBinOps(E)); \ + // Otherwise, tell the initialization code that we're in this case. + emission.IsConstantAggregate = true; } - HANDLEBINOP(Mul) - HANDLEBINOP(Div) - HANDLEBINOP(Rem) - HANDLEBINOP(Add) - HANDLEBINOP(Sub) - HANDLEBINOP(Shl) - HANDLEBINOP(Shr) - HANDLEBINOP(And) - HANDLEBINOP(Xor) - HANDLEBINOP(Or) -#undef HANDLEBINOP - - mlir::Value buildCmp(const BinaryOperator *E) { - mlir::Value Result; - QualType LHSTy = E->getLHS()->getType(); - QualType RHSTy = E->getRHS()->getType(); - - if (const MemberPointerType *MPT = LHSTy->getAs()) { - assert(0 && "not implemented"); - } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { - BinOpInfo BOInfo = buildBinOps(E); - mlir::Value LHS = BOInfo.LHS; - mlir::Value RHS = BOInfo.RHS; - - if (LHSTy->isVectorType()) { - // Cannot handle any vector just yet. - assert(0 && "not implemented"); - // If AltiVec, the comparison results in a numeric type, so we use - // intrinsics comparing vectors and giving 0 or 1 as a result - if (!E->getType()->isVectorType()) - assert(0 && "not implemented"); - } - if (BOInfo.isFixedPointOp()) { - assert(0 && "not implemented"); - } else { - // TODO: when we add proper basic types to CIR we - // probably won't need to handle - // LHSTy->hasSignedIntegerRepresentation() - - // Unsigned integers and pointers. - if (LHS.getType().isa() || - RHS.getType().isa()) { - // TODO: Handle StrictVTablePointers and - // mayBeDynamicClass/invariant group. - assert(0 && "not implemented"); - } - - mlir::cir::CmpOpKind Kind; - switch (E->getOpcode()) { - case BO_LT: - Kind = mlir::cir::CmpOpKind::lt; - break; - case BO_GT: - Kind = mlir::cir::CmpOpKind::gt; - break; - case BO_LE: - Kind = mlir::cir::CmpOpKind::le; - break; - case BO_GE: - Kind = mlir::cir::CmpOpKind::ge; - break; - case BO_EQ: - Kind = mlir::cir::CmpOpKind::eq; - break; - case BO_NE: - Kind = mlir::cir::CmpOpKind::ne; - break; - default: - llvm_unreachable("unsupported"); - } - - return CGM.builder.create( - CGM.getLoc(BOInfo.Loc), CGM.getCIRType(BOInfo.Ty), Kind, - BOInfo.LHS, BOInfo.RHS); - } - - // If this is a vector comparison, sign extend the result to the - // appropriate vector integer type and return it (don't convert to - // bool). - if (LHSTy->isVectorType()) - assert(0 && "not implemented"); - } else { // Complex Comparison: can only be an equality comparison. - assert(0 && "not implemented"); - } - return buildScalarConversion(Result, CGM.astCtx.BoolTy, E->getType(), - E->getExprLoc()); - } + // TODO: track source location range... + mlir::Value addr; + if (failed(declare(&D, Ty, getLoc(D.getSourceRange()), alignment, addr))) { + theModule.emitError("Cannot declare variable"); + return emission; + } -#define VISITCOMP(CODE) \ - mlir::Value VisitBin##CODE(const BinaryOperator *E) { return buildCmp(E); } - VISITCOMP(LT) - VISITCOMP(GT) - VISITCOMP(LE) - VISITCOMP(GE) - VISITCOMP(EQ) - VISITCOMP(NE) -#undef VISITCOMP - - mlir::Value VisitExpr(Expr *E) { - // Crashing here for "ScalarExprClassName"? Please implement - // VisitScalarExprClassName(...) to get this working. - emitError(CGM.getLoc(E->getExprLoc()), "scalar exp no implemented: '") - << E->getStmtClassName() << "'"; - assert(0 && "shouldn't be here!"); - return {}; - } + // TODO: what about emitting lifetime markers for MSVC catch parameters? + // TODO: something like @llvm.lifetime.start/end here? revisit this later. + emission.Addr = RawAddress{addr, alignment}; + return emission; +} - mlir::Value buildIntToBoolConversion(mlir::Value srcVal, - mlir::Location loc) { - // Because of the type rules of C, we often end up computing a - // logical value, then zero extending it to int, then wanting it - // as a logical value again. - // TODO: optimize this common case here or leave it for later - // CIR passes? - mlir::Type boolTy = CGM.getCIRType(CGM.astCtx.BoolTy); - return CGM.builder.create( - loc, boolTy, mlir::cir::CastKind::int_to_bool, srcVal); - } +/// Determine whether the given initializer is trivial in the sense +/// that it requires no code to be generated. +bool CIRGenModule::isTrivialInitializer(const Expr *Init) { + if (!Init) + return true; - /// EmitConversionToBool - Convert the specified expression value to a - /// boolean (i1) truth value. This is equivalent to "Val != 0". - mlir::Value buildConversionToBool(mlir::Value Src, QualType SrcType, - mlir::Location loc) { - assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); + if (const CXXConstructExpr *Construct = dyn_cast(Init)) + if (CXXConstructorDecl *Constructor = Construct->getConstructor()) + if (Constructor->isTrivial() && Constructor->isDefaultConstructor() && + !Construct->requiresZeroInitialization()) + return true; - if (SrcType->isRealFloatingType()) - assert(0 && "not implemented"); + return false; +} - if (const MemberPointerType *MPT = dyn_cast(SrcType)) - assert(0 && "not implemented"); +// TODO: this can also be abstrated into common AST helpers +bool CIRGenModule::hasBooleanRepresentation(QualType Ty) { - assert((SrcType->isIntegerType() || - Src.getType().isa<::mlir::cir::PointerType>()) && - "Unknown scalar type to convert"); + if (Ty->isBooleanType()) + return true; - assert(Src.getType().isa() && - "pointer source not implemented"); - return buildIntToBoolConversion(Src, loc); - } + if (const EnumType *ET = Ty->getAs()) + return ET->getDecl()->getIntegerType()->isBooleanType(); - /// Emit a conversion from the specified type to the specified destination - /// type, both of which are CIR scalar types. - /// TODO: do we need ScalarConversionOpts here? Should be done in another - /// pass. - mlir::Value buildScalarConversion(mlir::Value Src, QualType SrcType, - QualType DstType, SourceLocation Loc) { - if (SrcType->isFixedPointType()) { - assert(0 && "not implemented"); - } else if (DstType->isFixedPointType()) { - assert(0 && "not implemented"); - } + if (const AtomicType *AT = Ty->getAs()) + return hasBooleanRepresentation(AT->getValueType()); - SrcType = CGM.astCtx.getCanonicalType(SrcType); - DstType = CGM.astCtx.getCanonicalType(DstType); - if (SrcType == DstType) - return Src; + return false; +} - if (DstType->isVoidType()) - return nullptr; - mlir::Type SrcTy = Src.getType(); +mlir::Value CIRGenModule::buildToMemory(mlir::Value Value, QualType Ty) { + // Bool has a different representation in memory than in registers. + return Value; +} - // Handle conversions to bool first, they are special: comparisons against - // 0. - if (DstType->isBooleanType()) - return buildConversionToBool(Src, SrcType, CGM.getLoc(Loc)); +void CIRGenModule::buildStoreOfScalar(mlir::Value value, LValue lvalue, + const Decl *InitDecl) { + // TODO: constant matrix type, volatile, non temporal, TBAA + buildStoreOfScalar(value, lvalue.getAddress(), false, lvalue.getType(), + lvalue.getBaseInfo(), InitDecl, false); +} - mlir::Type DstTy = CGM.getCIRType(DstType); +void CIRGenModule::buildStoreOfScalar(mlir::Value Value, RawAddress Addr, + bool Volatile, QualType Ty, + LValueBaseInfo BaseInfo, + const Decl *InitDecl, + bool isNontemporal) { + // TODO: PreserveVec3Type + // TODO: LValueIsSuitableForInlineAtomic ? + // TODO: TBAA + Value = buildToMemory(Value, Ty); + if (Ty->isAtomicType() || isNontemporal) { + assert(0 && "not implemented"); + } - // Cast from half through float if half isn't a native type. - if (SrcType->isHalfType() && !CGM.astCtx.getLangOpts().NativeHalfType) { - assert(0 && "not implemented"); + // Update the alloca with more info on initialization. + auto SrcAlloca = + dyn_cast_or_null(Addr.getPointer().getDefiningOp()); + if (InitDecl) { + InitStyle IS; + const VarDecl *VD = dyn_cast_or_null(InitDecl); + assert(VD && "VarDecl expected"); + if (VD->hasInit()) { + switch (VD->getInitStyle()) { + case VarDecl::ParenListInit: + llvm_unreachable("NYI"); + case VarDecl::CInit: + IS = InitStyle::cinit; + break; + case VarDecl::CallInit: + IS = InitStyle::callinit; + break; + case VarDecl::ListInit: + IS = InitStyle::listinit; + break; } + SrcAlloca.setInitAttr(InitStyleAttr::get(builder.getContext(), IS)); + } + } + assert(currSrcLoc && "must pass in source location"); + builder.create(*currSrcLoc, Value, Addr.getPointer()); +} - // LLVM codegen ignore conversions like int -> uint, we should probably - // emit it here in case lowering to sanitizers dialect at some point. - if (SrcTy == DstTy) { - assert(0 && "not implemented"); - } +void CIRGenModule::buldStoreThroughLValue(RValue Src, LValue Dst, + const Decl *InitDecl) { + assert(Dst.isSimple() && "only implemented simple"); + // TODO: ObjC lifetime. + assert(Src.isScalar() && "Can't emit an agg store with this method"); + buildStoreOfScalar(Src.getScalarVal(), Dst, InitDecl); +} - // Handle pointer conversions next: pointers can only be converted to/from - // other pointers and integers. - if (DstTy.isa<::mlir::cir::PointerType>()) { - assert(0 && "not implemented"); - } +void CIRGenModule::buildScalarInit(const Expr *init, const ValueDecl *D, + LValue lvalue) { + // TODO: this is where a lot of ObjC lifetime stuff would be done. + mlir::Value value = buildScalarExpr(init); + SourceLocRAIIObject Loc{*this, getLoc(D->getSourceRange())}; + buldStoreThroughLValue(RValue::get(value), lvalue, D); + return; +} - if (SrcTy.isa<::mlir::cir::PointerType>()) { - // Must be an ptr to int cast. - assert(DstTy.isa() && "not ptr->int?"); - assert(0 && "not implemented"); - } +void CIRGenModule::buildExprAsInit(const Expr *init, const ValueDecl *D, + LValue lvalue) { + QualType type = D->getType(); - // A scalar can be splatted to an extended vector of the same element type - if (DstType->isExtVectorType() && !SrcType->isVectorType()) { - // Sema should add casts to make sure that the source expression's type - // is the same as the vector's element type (sans qualifiers) - assert( - DstType->castAs()->getElementType().getTypePtr() == - SrcType.getTypePtr() && - "Splatted expr doesn't match with vector element type?"); + if (type->isReferenceType()) { + assert(0 && "not implemented"); + return; + } + switch (CIRGenFunction::getEvaluationKind(type)) { + case TEK_Scalar: + buildScalarInit(init, D, lvalue); + return; + case TEK_Complex: { + assert(0 && "not implemented"); + return; + } + case TEK_Aggregate: + assert(0 && "not implemented"); + return; + } + llvm_unreachable("bad evaluation kind"); +} - assert(0 && "not implemented"); - } +void CIRGenModule::buildAutoVarInit(const AutoVarEmission &emission) { + assert(emission.Variable && "emission was not valid!"); - if (SrcType->isMatrixType() && DstType->isMatrixType()) - assert(0 && "not implemented"); + const VarDecl &D = *emission.Variable; + QualType type = D.getType(); - // Finally, we have the arithmetic types: real int/float. - assert(0 && "not implemented"); - mlir::Value Res = nullptr; - mlir::Type ResTy = DstTy; + // If this local has an initializer, emit it now. + const Expr *Init = D.getInit(); - // TODO: implement CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) + // TODO: in LLVM codegen if we are at an unreachable point, the initializer + // isn't emitted unless it contains a label. What we want for CIR? + assert(builder.getInsertionBlock()); - // Cast to half through float if half isn't a native type. - if (DstType->isHalfType() && !CGM.astCtx.getLangOpts().NativeHalfType) { - assert(0 && "not implemented"); - } + // Initialize the variable here if it doesn't have a initializer and it is a + // C struct that is non-trivial to initialize or an array containing such a + // struct. + if (!Init && type.isNonTrivialToPrimitiveDefaultInitialize() == + QualType::PDIK_Struct) { + assert(0 && "not implemented"); + return; + } - // TODO: Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); - if (DstTy != ResTy) { - assert(0 && "not implemented"); - } + const RawAddress Loc = emission.Addr; - return Res; - } + // Note: constexpr already initializes everything correctly. + LangOptions::TrivialAutoVarInitKind trivialAutoVarInit = + (D.isConstexpr() + ? LangOptions::TrivialAutoVarInitKind::Uninitialized + : (D.getAttr() + ? LangOptions::TrivialAutoVarInitKind::Uninitialized + : astCtx.getLangOpts().getTrivialAutoVarInit())); - // Leaves. - mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { - mlir::Type Ty = CGM.getCIRType(E->getType()); - return CGM.builder.create( - CGM.getLoc(E->getExprLoc()), Ty, - CGM.builder.getIntegerAttr(Ty, E->getValue())); - } - }; + auto initializeWhatIsTechnicallyUninitialized = [&](RawAddress Loc) { + if (trivialAutoVarInit == + LangOptions::TrivialAutoVarInitKind::Uninitialized) + return; - struct AutoVarEmission { - const VarDecl *Variable; - /// The address of the alloca for languages with explicit address space - /// (e.g. OpenCL) or alloca casted to generic pointer for address space - /// agnostic languages (e.g. C++). Invalid if the variable was emitted - /// as a global constant. - RawAddress Addr; - - /// True if the variable is of aggregate type and has a constant - /// initializer. - bool IsConstantAggregate; - - struct Invalid {}; - AutoVarEmission(Invalid) : Variable(nullptr), Addr(RawAddress::invalid()) {} - - AutoVarEmission(const VarDecl &variable) - : Variable(&variable), Addr(RawAddress::invalid()), - IsConstantAggregate(false) {} - - static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } - /// Returns the raw, allocated address, which is not necessarily - /// the address of the object itself. It is casted to default - /// address space for address space agnostic languages. - RawAddress getAllocatedAddress() const { return Addr; } + assert(0 && "unimplemented"); }; - /// Determine whether an object of this type can be emitted - /// as a constant. - /// - /// If ExcludeCtor is true, the duration when the object's constructor runs - /// will not be considered. The caller will need to verify that the object is - /// not written to during its construction. - /// FIXME: in LLVM codegen path this is part of CGM, which doesn't seem - /// like necessary, since (1) it doesn't use CGM at all and (2) is AST type - /// query specific. - bool isTypeConstant(QualType Ty, bool ExcludeCtor) { - if (!Ty.isConstant(astCtx) && !Ty->isReferenceType()) - return false; - - if (astCtx.getLangOpts().CPlusPlus) { - if (const CXXRecordDecl *Record = - astCtx.getBaseElementType(Ty)->getAsCXXRecordDecl()) - return ExcludeCtor && !Record->hasMutableFields() && - Record->hasTrivialDestructor(); - } + if (isTrivialInitializer(Init)) + return initializeWhatIsTechnicallyUninitialized(Loc); - return true; + if (emission.IsConstantAggregate || + D.mightBeUsableInConstantExpressions(astCtx)) { + assert(0 && "not implemented"); } - /// Emit the alloca and debug information for a - /// local variable. Does not emit initialization or destruction. - AutoVarEmission buildAutoVarAlloca(const VarDecl &D) { - QualType Ty = D.getType(); - // TODO: (|| Ty.getAddressSpace() == LangAS::opencl_private && - // getLangOpts().OpenCL)) - assert(Ty.getAddressSpace() == LangAS::Default); - - assert(!D.isEscapingByref() && "not implemented"); - assert(!Ty->isVariablyModifiedType() && "not implemented"); - assert(!astCtx.getLangOpts().OpenMP && // !CGM.getLangOpts().OpenMPIRBuilder - "not implemented"); - bool NRVO = astCtx.getLangOpts().ElideConstructors && D.isNRVOVariable(); - assert(!NRVO && "not implemented"); - assert(Ty->isConstantSizeType() && "not implemented"); - assert(!D.hasAttr() && "not implemented"); - - AutoVarEmission emission(D); - CharUnits alignment = astCtx.getDeclAlign(&D); - // TODO: debug info - // TODO: use CXXABI - - // If this value is an array or struct with a statically determinable - // constant initializer, there are optimizations we can do. - // - // TODO: We should constant-evaluate the initializer of any variable, - // as long as it is initialized by a constant expression. Currently, - // isConstantInitializer produces wrong answers for structs with - // reference or bitfield members, and a few other cases, and checking - // for POD-ness protects us from some of these. - if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) && - (D.isConstexpr() || - ((Ty.isPODType(astCtx) || - astCtx.getBaseElementType(Ty)->isObjCObjectPointerType()) && - D.getInit()->isConstantInitializer(astCtx, false)))) { - - // If the variable's a const type, and it's neither an NRVO - // candidate nor a __block variable and has no mutable members, - // emit it as a global instead. - // Exception is if a variable is located in non-constant address space - // in OpenCL. - // TODO: deal with CGM.getCodeGenOpts().MergeAllConstants - // TODO: perhaps we don't need this at all at CIR since this can - // be done as part of lowering down to LLVM. - if ((!astCtx.getLangOpts().OpenCL || - Ty.getAddressSpace() == LangAS::opencl_constant) && - (!NRVO && !D.isEscapingByref() && isTypeConstant(Ty, true))) - assert(0 && "not implemented"); - - // Otherwise, tell the initialization code that we're in this case. - emission.IsConstantAggregate = true; - } - - // TODO: track source location range... - mlir::Value addr; - if (failed(declare(&D, Ty, getLoc(D.getSourceRange()), alignment, addr))) { - theModule.emitError("Cannot declare variable"); - return emission; - } + initializeWhatIsTechnicallyUninitialized(Loc); + LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); + return buildExprAsInit(Init, &D, lv); +} - // TODO: what about emitting lifetime markers for MSVC catch parameters? - // TODO: something like @llvm.lifetime.start/end here? revisit this later. - emission.Addr = RawAddress{addr, alignment}; - return emission; - } +void CIRGenModule::buildAutoVarCleanups(const AutoVarEmission &emission) { + assert(emission.Variable && "emission was not valid!"); - /// Determine whether the given initializer is trivial in the sense - /// that it requires no code to be generated. - bool isTrivialInitializer(const Expr *Init) { - if (!Init) - return true; + // TODO: in LLVM codegen if we are at an unreachable point codgen + // is ignored. What we want for CIR? + assert(builder.getInsertionBlock()); + const VarDecl &D = *emission.Variable; - if (const CXXConstructExpr *Construct = dyn_cast(Init)) - if (CXXConstructorDecl *Constructor = Construct->getConstructor()) - if (Constructor->isTrivial() && Constructor->isDefaultConstructor() && - !Construct->requiresZeroInitialization()) - return true; + // Check the type for a cleanup. + // TODO: something like emitAutoVarTypeCleanup + if (QualType::DestructionKind dtorKind = D.needsDestruction(astCtx)) + assert(0 && "not implemented"); - return false; - } + // In GC mode, honor objc_precise_lifetime. + if (astCtx.getLangOpts().getGC() != LangOptions::NonGC && + D.hasAttr()) + assert(0 && "not implemented"); - // TODO: this can also be abstrated into common AST helpers - bool hasBooleanRepresentation(QualType Ty) { - if (Ty->isBooleanType()) - return true; + // Handle the cleanup attribute. + if (const CleanupAttr *CA = D.getAttr()) + assert(0 && "not implemented"); - if (const EnumType *ET = Ty->getAs()) - return ET->getDecl()->getIntegerType()->isBooleanType(); + // TODO: handle block variable +} - if (const AtomicType *AT = Ty->getAs()) - return hasBooleanRepresentation(AT->getValueType()); +/// Emit code and set up symbol table for a variable declaration with auto, +/// register, or no storage class specifier. These turn into simple stack +/// objects, globals depending on target. +void CIRGenModule::buildAutoVarDecl(const VarDecl &D) { + AutoVarEmission emission = buildAutoVarAlloca(D); + buildAutoVarInit(emission); + buildAutoVarCleanups(emission); +} - return false; +void CIRGenModule::buildVarDecl(const VarDecl &D) { + if (D.hasExternalStorage()) { + assert(0 && "should we just returns is there something to track?"); + // Don't emit it now, allow it to be emitted lazily on its first use. + return; } - mlir::Value buildToMemory(mlir::Value Value, QualType Ty) { - // Bool has a different representation in memory than in registers. - return Value; - } + // Some function-scope variable does not have static storage but still + // needs to be emitted like a static variable, e.g. a function-scope + // variable in constant address space in OpenCL. + if (D.getStorageDuration() != SD_Automatic) + assert(0 && "not implemented"); - void buildStoreOfScalar(mlir::Value value, LValue lvalue, - const Decl *InitDecl) { - // TODO: constant matrix type, volatile, non temporal, TBAA - buildStoreOfScalar(value, lvalue.getAddress(), false, lvalue.getType(), - lvalue.getBaseInfo(), InitDecl, false); - } + if (D.getType().getAddressSpace() == LangAS::opencl_local) + assert(0 && "not implemented"); - void buildStoreOfScalar(mlir::Value Value, RawAddress Addr, bool Volatile, - QualType Ty, LValueBaseInfo BaseInfo, - const Decl *InitDecl, bool isNontemporal) { - // TODO: PreserveVec3Type - // TODO: LValueIsSuitableForInlineAtomic ? - // TODO: TBAA - Value = buildToMemory(Value, Ty); - if (Ty->isAtomicType() || isNontemporal) { - assert(0 && "not implemented"); - } + assert(D.hasLocalStorage()); + return buildAutoVarDecl(D); +} - // Update the alloca with more info on initialization. - auto SrcAlloca = dyn_cast_or_null( - Addr.getPointer().getDefiningOp()); - if (InitDecl) { - InitStyle IS; - const VarDecl *VD = dyn_cast_or_null(InitDecl); - assert(VD && "VarDecl expected"); - if (VD->hasInit()) { - switch (VD->getInitStyle()) { - case VarDecl::ParenListInit: - llvm_unreachable("NYI"); - case VarDecl::CInit: - IS = InitStyle::cinit; - break; - case VarDecl::CallInit: - IS = InitStyle::callinit; - break; - case VarDecl::ListInit: - IS = InitStyle::listinit; - break; - } - SrcAlloca.setInitAttr(InitStyleAttr::get(builder.getContext(), IS)); - } - } - assert(currSrcLoc && "must pass in source location"); - builder.create(*currSrcLoc, Value, Addr.getPointer()); - } +void CIRGenModule::buildDecl(const Decl &D) { + switch (D.getKind()) { + case Decl::ImplicitConceptSpecialization: + case Decl::TopLevelStmt: + case Decl::HLSLBuffer: + case Decl::UnnamedGlobalConstant: + llvm_unreachable("NYI"); + case Decl::BuiltinTemplate: + case Decl::TranslationUnit: + case Decl::ExternCContext: + case Decl::Namespace: + case Decl::UnresolvedUsingTypename: + case Decl::ClassTemplateSpecialization: + case Decl::ClassTemplatePartialSpecialization: + case Decl::VarTemplateSpecialization: + case Decl::VarTemplatePartialSpecialization: + case Decl::TemplateTypeParm: + case Decl::UnresolvedUsingValue: + case Decl::NonTypeTemplateParm: + case Decl::CXXDeductionGuide: + case Decl::CXXMethod: + case Decl::CXXConstructor: + case Decl::CXXDestructor: + case Decl::CXXConversion: + case Decl::Field: + case Decl::MSProperty: + case Decl::IndirectField: + case Decl::ObjCIvar: + case Decl::ObjCAtDefsField: + case Decl::ParmVar: + case Decl::ImplicitParam: + case Decl::ClassTemplate: + case Decl::VarTemplate: + case Decl::FunctionTemplate: + case Decl::TypeAliasTemplate: + case Decl::TemplateTemplateParm: + case Decl::ObjCMethod: + case Decl::ObjCCategory: + case Decl::ObjCProtocol: + case Decl::ObjCInterface: + case Decl::ObjCCategoryImpl: + case Decl::ObjCImplementation: + case Decl::ObjCProperty: + case Decl::ObjCCompatibleAlias: + case Decl::PragmaComment: + case Decl::PragmaDetectMismatch: + case Decl::AccessSpec: + case Decl::LinkageSpec: + case Decl::Export: + case Decl::ObjCPropertyImpl: + case Decl::FileScopeAsm: + case Decl::Friend: + case Decl::FriendTemplate: + case Decl::Block: + case Decl::Captured: + case Decl::UsingShadow: + case Decl::ConstructorUsingShadow: + case Decl::ObjCTypeParam: + case Decl::Binding: + case Decl::UnresolvedUsingIfExists: + llvm_unreachable("Declaration should not be in declstmts!"); + case Decl::Record: // struct/union/class X; + case Decl::CXXRecord: // struct/union/class X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::Enum: // enum X; + assert(0 && "Not implemented"); + return; + case Decl::Function: // void X(); + case Decl::EnumConstant: // enum ? { X = ? } + case Decl::StaticAssert: // static_assert(X, ""); [C++0x] + case Decl::Label: // __label__ x; + case Decl::Import: + case Decl::MSGuid: // __declspec(uuid("...")) + case Decl::TemplateParamObject: + case Decl::OMPThreadPrivate: + case Decl::OMPAllocate: + case Decl::OMPCapturedExpr: + case Decl::OMPRequires: + case Decl::Empty: + case Decl::Concept: + case Decl::LifetimeExtendedTemporary: + case Decl::RequiresExprBody: + // None of these decls require codegen support. + return; - /// Store the specified rvalue into the specified - /// lvalue, where both are guaranteed to the have the same type, and that type - /// is 'Ty'. - void buldStoreThroughLValue(RValue Src, LValue Dst, const Decl *InitDecl) { - assert(Dst.isSimple() && "only implemented simple"); - // TODO: ObjC lifetime. - assert(Src.isScalar() && "Can't emit an agg store with this method"); - buildStoreOfScalar(Src.getScalarVal(), Dst, InitDecl); - } + case Decl::NamespaceAlias: + assert(0 && "Not implemented"); + return; + case Decl::Using: // using X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::UsingEnum: // using enum X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::UsingPack: + assert(0 && "Not implemented"); + return; + case Decl::UsingDirective: // using namespace X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::Var: + case Decl::Decomposition: { + const VarDecl &VD = cast(D); + assert(VD.isLocalVarDecl() && + "Should not see file-scope variables inside a function!"); + buildVarDecl(VD); + if (auto *DD = dyn_cast(&VD)) + assert(0 && "Not implemented"); - void buildScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue) { - // TODO: this is where a lot of ObjC lifetime stuff would be done. - mlir::Value value = buildScalarExpr(init); - SourceLocRAIIObject Loc{*this, getLoc(D->getSourceRange())}; - buldStoreThroughLValue(RValue::get(value), lvalue, D); + // FIXME: add this + // if (auto *DD = dyn_cast(&VD)) + // for (auto *B : DD->bindings()) + // if (auto *HD = B->getHoldingVar()) + // EmitVarDecl(*HD); return; } - /// Emit an expression as an initializer for an object (variable, field, etc.) - /// at the given location. The expression is not necessarily the normal - /// initializer for the object, and the address is not necessarily - /// its normal location. - /// - /// \param init the initializing expression - /// \param D the object to act as if we're initializing - /// \param lvalue the lvalue to initialize - void buildExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue) { - QualType type = D->getType(); - - if (type->isReferenceType()) { - assert(0 && "not implemented"); - return; - } - switch (CIRGenFunction::getEvaluationKind(type)) { - case TEK_Scalar: - buildScalarInit(init, D, lvalue); - return; - case TEK_Complex: { - assert(0 && "not implemented"); - return; - } - case TEK_Aggregate: - assert(0 && "not implemented"); - return; - } - llvm_unreachable("bad evaluation kind"); - } + case Decl::OMPDeclareReduction: + case Decl::OMPDeclareMapper: + assert(0 && "Not implemented"); - void buildAutoVarInit(const AutoVarEmission &emission) { - assert(emission.Variable && "emission was not valid!"); + case Decl::Typedef: // typedef int X; + case Decl::TypeAlias: { // using X = int; [C++0x] + assert(0 && "Not implemented"); + } + } +} - const VarDecl &D = *emission.Variable; - QualType type = D.getType(); +/// Emit the computation of the specified expression of scalar type, +/// ignoring the result. +mlir::Value CIRGenModule::buildScalarExpr(const Expr *E) { + assert(E && CIRGenFunction::hasScalarEvaluationKind(E->getType()) && + "Invalid scalar expression to emit"); - // If this local has an initializer, emit it now. - const Expr *Init = D.getInit(); + return ScalarExprEmitter(*CurCGF, *this).Visit(const_cast(E)); +} - // TODO: in LLVM codegen if we are at an unreachable point, the initializer - // isn't emitted unless it contains a label. What we want for CIR? - assert(builder.getInsertionBlock()); +/// Emit a conversion from the specified type to the specified destination +/// type, both of which are CIR scalar types. +mlir::Value CIRGenModule::buildScalarConversion(mlir::Value Src, QualType SrcTy, + QualType DstTy, + SourceLocation Loc) { + assert(CIRGenFunction::hasScalarEvaluationKind(SrcTy) && + CIRGenFunction::hasScalarEvaluationKind(DstTy) && + "Invalid scalar expression to emit"); + return ScalarExprEmitter(*CurCGF, *this) + .buildScalarConversion(Src, SrcTy, DstTy, Loc); +} - // Initialize the variable here if it doesn't have a initializer and it is a - // C struct that is non-trivial to initialize or an array containing such a - // struct. - if (!Init && type.isNonTrivialToPrimitiveDefaultInitialize() == - QualType::PDIK_Struct) { - assert(0 && "not implemented"); - return; - } +mlir::LogicalResult CIRGenModule::buildReturnStmt(const ReturnStmt &S) { + assert(!(astCtx.getLangOpts().ElideConstructors && S.getNRVOCandidate() && + S.getNRVOCandidate()->isNRVOVariable()) && + "unimplemented"); + assert(!CurCGF->FnRetQualTy->isReferenceType() && "unimplemented"); - const RawAddress Loc = emission.Addr; + // Emit the result value, even if unused, to evaluate the side effects. + const Expr *RV = S.getRetValue(); + if (!RV) // Do nothing (return value is left uninitialized) + return mlir::success(); + assert(!isa(RV) && "unimplemented"); + + mlir::Value V = nullptr; + switch (CIRGenFunction::getEvaluationKind(RV->getType())) { + case TEK_Scalar: + V = buildScalarExpr(RV); + // Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); + break; + case TEK_Complex: + case TEK_Aggregate: + llvm::errs() << "ReturnStmt EvaluationKind not implemented\n"; + return mlir::failure(); + } - // Note: constexpr already initializes everything correctly. - LangOptions::TrivialAutoVarInitKind trivialAutoVarInit = - (D.isConstexpr() - ? LangOptions::TrivialAutoVarInitKind::Uninitialized - : (D.getAttr() - ? LangOptions::TrivialAutoVarInitKind::Uninitialized - : astCtx.getLangOpts().getTrivialAutoVarInit())); + CurCGF->RetValue = V; + // Otherwise, this return operation has zero operands. + if (!V || (RV && RV->getType()->isVoidType())) { + // FIXME: evaluate for side effects. + } - auto initializeWhatIsTechnicallyUninitialized = [&](RawAddress Loc) { - if (trivialAutoVarInit == - LangOptions::TrivialAutoVarInitKind::Uninitialized) - return; + builder.create(getLoc(S.getSourceRange()), + V ? ArrayRef(V) : ArrayRef()); + return mlir::success(); +} - assert(0 && "unimplemented"); - }; +mlir::LogicalResult CIRGenModule::buildDeclStmt(const DeclStmt &S) { + if (!builder.getInsertionBlock()) + theModule.emitError( + "Seems like this is unreachable code, what should we do?"); - if (isTrivialInitializer(Init)) - return initializeWhatIsTechnicallyUninitialized(Loc); + for (const auto *I : S.decls()) { + buildDecl(*I); + } - if (emission.IsConstantAggregate || - D.mightBeUsableInConstantExpressions(astCtx)) { - assert(0 && "not implemented"); - } + return mlir::success(); +} - initializeWhatIsTechnicallyUninitialized(Loc); - LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); - return buildExprAsInit(Init, &D, lv); +mlir::LogicalResult CIRGenModule::buildSimpleStmt(const Stmt *S, + bool useCurrentScope) { + switch (S->getStmtClass()) { + default: + return mlir::failure(); + case Stmt::DeclStmtClass: + return buildDeclStmt(cast(*S)); + case Stmt::CompoundStmtClass: + return useCurrentScope + ? buildCompoundStmtWithoutScope(cast(*S)) + : buildCompoundStmt(cast(*S)); + case Stmt::ReturnStmtClass: + return buildReturnStmt(cast(*S)); + case Stmt::NullStmtClass: + break; + + case Stmt::LabelStmtClass: + case Stmt::AttributedStmtClass: + case Stmt::GotoStmtClass: + case Stmt::BreakStmtClass: + case Stmt::ContinueStmtClass: + case Stmt::DefaultStmtClass: + case Stmt::CaseStmtClass: + case Stmt::SEHLeaveStmtClass: + llvm::errs() << "CIR codegen for '" << S->getStmtClassName() + << "' not implemented\n"; + assert(0 && "not implemented"); } - void buildAutoVarCleanups(const AutoVarEmission &emission) { - assert(emission.Variable && "emission was not valid!"); - - // TODO: in LLVM codegen if we are at an unreachable point codgen - // is ignored. What we want for CIR? - assert(builder.getInsertionBlock()); - const VarDecl &D = *emission.Variable; + return mlir::success(); +} - // Check the type for a cleanup. - // TODO: something like emitAutoVarTypeCleanup - if (QualType::DestructionKind dtorKind = D.needsDestruction(astCtx)) - assert(0 && "not implemented"); +LValue CIRGenModule::buildDeclRefLValue(const DeclRefExpr *E) { + const NamedDecl *ND = E->getDecl(); - // In GC mode, honor objc_precise_lifetime. - if (astCtx.getLangOpts().getGC() != LangOptions::NonGC && - D.hasAttr()) - assert(0 && "not implemented"); + assert(E->isNonOdrUse() != NOUR_Unevaluated && + "should not emit an unevaluated operand"); - // Handle the cleanup attribute. - if (const CleanupAttr *CA = D.getAttr()) - assert(0 && "not implemented"); + if (const auto *VD = dyn_cast(ND)) { + // Global Named registers access via intrinsics only + assert(VD->getStorageClass() != SC_Register && "not implemented"); + assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); + assert(!E->refersToEnclosingVariableOrCapture() && "not implemented"); + assert(!(VD->hasLinkage() || VD->isStaticDataMember()) && + "not implemented"); + assert(!VD->isEscapingByref() && "not implemented"); + assert(!VD->getType()->isReferenceType() && "not implemented"); + assert(symbolTable.count(VD) && "should be already mapped"); - // TODO: handle block variable - } + mlir::Value V = symbolTable.lookup(VD); + assert(V && "Name lookup must succeed"); - /// Emit code and set up symbol table for a variable declaration with auto, - /// register, or no storage class specifier. These turn into simple stack - /// objects, globals depending on target. - void buildAutoVarDecl(const VarDecl &D) { - AutoVarEmission emission = buildAutoVarAlloca(D); - buildAutoVarInit(emission); - buildAutoVarCleanups(emission); + LValue LV = LValue::makeAddr(RawAddress(V, CharUnits::fromQuantity(4)), + VD->getType(), AlignmentSource::Decl); + return LV; } - /// This method handles emission of any variable declaration - /// inside a function, including static vars etc. - void buildVarDecl(const VarDecl &D) { - if (D.hasExternalStorage()) { - assert(0 && "should we just returns is there something to track?"); - // Don't emit it now, allow it to be emitted lazily on its first use. - return; - } - - // Some function-scope variable does not have static storage but still - // needs to be emitted like a static variable, e.g. a function-scope - // variable in constant address space in OpenCL. - if (D.getStorageDuration() != SD_Automatic) - assert(0 && "not implemented"); + llvm_unreachable("Unhandled DeclRefExpr?"); +} - if (D.getType().getAddressSpace() == LangAS::opencl_local) - assert(0 && "not implemented"); +/// Emit code to compute the specified expression which +/// can have any type. The result is returned as an RValue struct. +/// TODO: if this is an aggregate expression, add a AggValueSlot to indicate +/// where the result should be returned. +RValue CIRGenModule::buildAnyExpr(const Expr *E) { + switch (CIRGenFunction::getEvaluationKind(E->getType())) { + case TEK_Scalar: + return RValue::get(buildScalarExpr(E)); + case TEK_Complex: + assert(0 && "not implemented"); + case TEK_Aggregate: + assert(0 && "not implemented"); + } + llvm_unreachable("bad evaluation kind"); +} - assert(D.hasLocalStorage()); - return buildAutoVarDecl(D); +LValue CIRGenModule::buildBinaryOperatorLValue(const BinaryOperator *E) { + // Comma expressions just emit their LHS then their RHS as an l-value. + if (E->getOpcode() == BO_Comma) { + assert(0 && "not implemented"); } - void buildDecl(const Decl &D) { - switch (D.getKind()) { - case Decl::TopLevelStmt: - case Decl::ImplicitConceptSpecialization: - case Decl::HLSLBuffer: - case Decl::UnnamedGlobalConstant: - llvm_unreachable("NYI"); - case Decl::BuiltinTemplate: - case Decl::TranslationUnit: - case Decl::ExternCContext: - case Decl::Namespace: - case Decl::UnresolvedUsingTypename: - case Decl::ClassTemplateSpecialization: - case Decl::ClassTemplatePartialSpecialization: - case Decl::VarTemplateSpecialization: - case Decl::VarTemplatePartialSpecialization: - case Decl::TemplateTypeParm: - case Decl::UnresolvedUsingValue: - case Decl::NonTypeTemplateParm: - case Decl::CXXDeductionGuide: - case Decl::CXXMethod: - case Decl::CXXConstructor: - case Decl::CXXDestructor: - case Decl::CXXConversion: - case Decl::Field: - case Decl::MSProperty: - case Decl::IndirectField: - case Decl::ObjCIvar: - case Decl::ObjCAtDefsField: - case Decl::ParmVar: - case Decl::ImplicitParam: - case Decl::ClassTemplate: - case Decl::VarTemplate: - case Decl::FunctionTemplate: - case Decl::TypeAliasTemplate: - case Decl::TemplateTemplateParm: - case Decl::ObjCMethod: - case Decl::ObjCCategory: - case Decl::ObjCProtocol: - case Decl::ObjCInterface: - case Decl::ObjCCategoryImpl: - case Decl::ObjCImplementation: - case Decl::ObjCProperty: - case Decl::ObjCCompatibleAlias: - case Decl::PragmaComment: - case Decl::PragmaDetectMismatch: - case Decl::AccessSpec: - case Decl::LinkageSpec: - case Decl::Export: - case Decl::ObjCPropertyImpl: - case Decl::FileScopeAsm: - case Decl::Friend: - case Decl::FriendTemplate: - case Decl::Block: - case Decl::Captured: - case Decl::UsingShadow: - case Decl::ConstructorUsingShadow: - case Decl::ObjCTypeParam: - case Decl::Binding: - case Decl::UnresolvedUsingIfExists: - llvm_unreachable("Declaration should not be in declstmts!"); - case Decl::Record: // struct/union/class X; - case Decl::CXXRecord: // struct/union/class X; [C++] - assert(0 && "Not implemented"); - return; - case Decl::Enum: // enum X; - assert(0 && "Not implemented"); - return; - case Decl::Function: // void X(); - case Decl::EnumConstant: // enum ? { X = ? } - case Decl::StaticAssert: // static_assert(X, ""); [C++0x] - case Decl::Label: // __label__ x; - case Decl::Import: - case Decl::MSGuid: // __declspec(uuid("...")) - case Decl::TemplateParamObject: - case Decl::OMPThreadPrivate: - case Decl::OMPAllocate: - case Decl::OMPCapturedExpr: - case Decl::OMPRequires: - case Decl::Empty: - case Decl::Concept: - case Decl::LifetimeExtendedTemporary: - case Decl::RequiresExprBody: - // None of these decls require codegen support. - return; + if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) + assert(0 && "not implemented"); - case Decl::NamespaceAlias: - assert(0 && "Not implemented"); - return; - case Decl::Using: // using X; [C++] - assert(0 && "Not implemented"); - return; - case Decl::UsingEnum: // using enum X; [C++] - assert(0 && "Not implemented"); - return; - case Decl::UsingPack: - assert(0 && "Not implemented"); - return; - case Decl::UsingDirective: // using namespace X; [C++] - assert(0 && "Not implemented"); - return; - case Decl::Var: - case Decl::Decomposition: { - const VarDecl &VD = cast(D); - assert(VD.isLocalVarDecl() && - "Should not see file-scope variables inside a function!"); - buildVarDecl(VD); - if (auto *DD = dyn_cast(&VD)) - assert(0 && "Not implemented"); - - // FIXME: add this - // if (auto *DD = dyn_cast(&VD)) - // for (auto *B : DD->bindings()) - // if (auto *HD = B->getHoldingVar()) - // EmitVarDecl(*HD); - return; - } + assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); - case Decl::OMPDeclareReduction: - case Decl::OMPDeclareMapper: - assert(0 && "Not implemented"); + // Note that in all of these cases, __block variables need the RHS + // evaluated first just in case the variable gets moved by the RHS. - case Decl::Typedef: // typedef int X; - case Decl::TypeAlias: { // using X = int; [C++0x] - assert(0 && "Not implemented"); - } - } - } + switch (CIRGenFunction::getEvaluationKind(E->getType())) { + case TEK_Scalar: { + assert(E->getLHS()->getType().getObjCLifetime() == + clang::Qualifiers::ObjCLifetime::OCL_None && + "not implemented"); - /// Emit the computation of the specified expression of scalar type, - /// ignoring the result. - mlir::Value buildScalarExpr(const Expr *E) { - assert(E && CIRGenFunction::hasScalarEvaluationKind(E->getType()) && - "Invalid scalar expression to emit"); + RValue RV = buildAnyExpr(E->getRHS()); + LValue LV = buildLValue(E->getLHS()); - return ScalarExprEmitter(*CurCGF, *this).Visit(const_cast(E)); + SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; + buldStoreThroughLValue(RV, LV, nullptr /*InitDecl*/); + assert(!astCtx.getLangOpts().OpenMP && "last priv cond not implemented"); + return LV; } - /// Emit a conversion from the specified type to the specified destination - /// type, both of which are CIR scalar types. - mlir::Value buildScalarConversion(mlir::Value Src, QualType SrcTy, - QualType DstTy, SourceLocation Loc) { - assert(CIRGenFunction::hasScalarEvaluationKind(SrcTy) && - CIRGenFunction::hasScalarEvaluationKind(DstTy) && - "Invalid scalar expression to emit"); - return ScalarExprEmitter(*CurCGF, *this) - .buildScalarConversion(Src, SrcTy, DstTy, Loc); + case TEK_Complex: + assert(0 && "not implemented"); + case TEK_Aggregate: + assert(0 && "not implemented"); } + llvm_unreachable("bad evaluation kind"); +} - mlir::LogicalResult buildReturnStmt(const ReturnStmt &S) { - assert(!(astCtx.getLangOpts().ElideConstructors && S.getNRVOCandidate() && - S.getNRVOCandidate()->isNRVOVariable()) && - "unimplemented"); - assert(!CurCGF->FnRetQualTy->isReferenceType() && "unimplemented"); - - // Emit the result value, even if unused, to evaluate the side effects. - const Expr *RV = S.getRetValue(); - if (!RV) // Do nothing (return value is left uninitialized) - return mlir::success(); - assert(!isa(RV) && "unimplemented"); - - mlir::Value V = nullptr; - switch (CIRGenFunction::getEvaluationKind(RV->getType())) { - case TEK_Scalar: - V = buildScalarExpr(RV); - // Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); - break; - case TEK_Complex: - case TEK_Aggregate: - llvm::errs() << "ReturnStmt EvaluationKind not implemented\n"; - return mlir::failure(); - } +/// FIXME: this could likely be a common helper and not necessarily related +/// with codegen. +/// Return the best known alignment for an unknown pointer to a +/// particular class. +CharUnits CIRGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) { + if (!RD->hasDefinition()) + return CharUnits::One(); // Hopefully won't be used anywhere. - CurCGF->RetValue = V; - // Otherwise, this return operation has zero operands. - if (!V || (RV && RV->getType()->isVoidType())) { - // FIXME: evaluate for side effects. - } + auto &layout = astCtx.getASTRecordLayout(RD); - builder.create(getLoc(S.getSourceRange()), - V ? ArrayRef(V) : ArrayRef()); - return mlir::success(); - } + // If the class is final, then we know that the pointer points to an + // object of that type and can use the full alignment. + if (RD->isEffectivelyFinal()) + return layout.getAlignment(); + + // Otherwise, we have to assume it could be a subclass. + return layout.getNonVirtualAlignment(); +} - mlir::LogicalResult buildDeclStmt(const DeclStmt &S) { - if (!builder.getInsertionBlock()) - theModule.emitError( - "Seems like this is unreachable code, what should we do?"); +/// FIXME: this could likely be a common helper and not necessarily related +/// with codegen. +/// TODO: Add TBAAAccessInfo +CharUnits +CIRGenModule::getNaturalPointeeTypeAlignment(QualType T, + LValueBaseInfo *BaseInfo) { + return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, + /* forPointeeType= */ true); +} - for (const auto *I : S.decls()) { - buildDecl(*I); +/// FIXME: this could likely be a common helper and not necessarily related +/// with codegen. +/// TODO: Add TBAAAccessInfo +CharUnits CIRGenModule::getNaturalTypeAlignment(QualType T, + LValueBaseInfo *BaseInfo, + bool forPointeeType) { + // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But + // that doesn't return the information we need to compute BaseInfo. + + // Honor alignment typedef attributes even on incomplete types. + // We also honor them straight for C++ class types, even as pointees; + // there's an expressivity gap here. + if (auto TT = T->getAs()) { + if (auto Align = TT->getDecl()->getMaxAlignment()) { + if (BaseInfo) + *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); + return astCtx.toCharUnitsFromBits(Align); } - - return mlir::success(); } - mlir::LogicalResult buildSimpleStmt(const Stmt *S, bool useCurrentScope) { - switch (S->getStmtClass()) { - default: - return mlir::failure(); - case Stmt::DeclStmtClass: - return buildDeclStmt(cast(*S)); - case Stmt::CompoundStmtClass: - return useCurrentScope - ? buildCompoundStmtWithoutScope(cast(*S)) - : buildCompoundStmt(cast(*S)); - case Stmt::ReturnStmtClass: - return buildReturnStmt(cast(*S)); - case Stmt::NullStmtClass: - break; + bool AlignForArray = T->isArrayType(); - case Stmt::LabelStmtClass: - case Stmt::AttributedStmtClass: - case Stmt::GotoStmtClass: - case Stmt::BreakStmtClass: - case Stmt::ContinueStmtClass: - case Stmt::DefaultStmtClass: - case Stmt::CaseStmtClass: - case Stmt::SEHLeaveStmtClass: - llvm::errs() << "CIR codegen for '" << S->getStmtClassName() - << "' not implemented\n"; - assert(0 && "not implemented"); - } + // Analyze the base element type, so we don't get confused by incomplete + // array types. + T = astCtx.getBaseElementType(T); - return mlir::success(); + if (T->isIncompleteType()) { + // We could try to replicate the logic from + // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the + // type is incomplete, so it's impossible to test. We could try to reuse + // getTypeAlignIfKnown, but that doesn't return the information we need + // to set BaseInfo. So just ignore the possibility that the alignment is + // greater than one. + if (BaseInfo) + *BaseInfo = LValueBaseInfo(AlignmentSource::Type); + return CharUnits::One(); } - LValue buildDeclRefLValue(const DeclRefExpr *E) { - const NamedDecl *ND = E->getDecl(); - - assert(E->isNonOdrUse() != NOUR_Unevaluated && - "should not emit an unevaluated operand"); - - if (const auto *VD = dyn_cast(ND)) { - // Global Named registers access via intrinsics only - assert(VD->getStorageClass() != SC_Register && "not implemented"); - assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); - assert(!E->refersToEnclosingVariableOrCapture() && "not implemented"); - assert(!(VD->hasLinkage() || VD->isStaticDataMember()) && - "not implemented"); - assert(!VD->isEscapingByref() && "not implemented"); - assert(!VD->getType()->isReferenceType() && "not implemented"); - assert(symbolTable.count(VD) && "should be already mapped"); - - mlir::Value V = symbolTable.lookup(VD); - assert(V && "Name lookup must succeed"); - - LValue LV = LValue::makeAddr(RawAddress(V, CharUnits::fromQuantity(4)), - VD->getType(), AlignmentSource::Decl); - return LV; - } - - llvm_unreachable("Unhandled DeclRefExpr?"); + if (BaseInfo) + *BaseInfo = LValueBaseInfo(AlignmentSource::Type); + + CharUnits Alignment; + const CXXRecordDecl *RD; + if (T.getQualifiers().hasUnaligned()) { + Alignment = CharUnits::One(); + } else if (forPointeeType && !AlignForArray && + (RD = T->getAsCXXRecordDecl())) { + // For C++ class pointees, we don't know whether we're pointing at a + // base or a complete object, so we generally need to use the + // non-virtual alignment. + Alignment = getClassPointerAlignment(RD); + } else { + Alignment = astCtx.getTypeAlignInChars(T); } - /// Emit code to compute the specified expression which - /// can have any type. The result is returned as an RValue struct. - /// TODO: if this is an aggregate expression, add a AggValueSlot to indicate - /// where the result should be returned. - RValue buildAnyExpr(const Expr *E) { - switch (CIRGenFunction::getEvaluationKind(E->getType())) { - case TEK_Scalar: - return RValue::get(buildScalarExpr(E)); - case TEK_Complex: - assert(0 && "not implemented"); - case TEK_Aggregate: - assert(0 && "not implemented"); - } - llvm_unreachable("bad evaluation kind"); + // Cap to the global maximum type alignment unless the alignment + // was somehow explicit on the type. + if (unsigned MaxAlign = astCtx.getLangOpts().MaxTypeAlign) { + if (Alignment.getQuantity() > MaxAlign && !astCtx.isAlignmentRequired(T)) + Alignment = CharUnits::fromQuantity(MaxAlign); } + return Alignment; +} - LValue buildBinaryOperatorLValue(const BinaryOperator *E) { - // Comma expressions just emit their LHS then their RHS as an l-value. - if (E->getOpcode() == BO_Comma) { - assert(0 && "not implemented"); - } - - if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) +/// Given an expression of pointer type, try to +/// derive a more accurate bound on the alignment of the pointer. +RawAddress CIRGenModule::buildPointerWithAlignment(const Expr *E, + LValueBaseInfo *BaseInfo) { + // We allow this with ObjC object pointers because of fragile ABIs. + assert(E->getType()->isPointerType() || + E->getType()->isObjCObjectPointerType()); + E = E->IgnoreParens(); + + // Casts: + if (const CastExpr *CE = dyn_cast(E)) { + if (const auto *ECE = dyn_cast(CE)) assert(0 && "not implemented"); - assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); - - // Note that in all of these cases, __block variables need the RHS - // evaluated first just in case the variable gets moved by the RHS. - - switch (CIRGenFunction::getEvaluationKind(E->getType())) { - case TEK_Scalar: { - assert(E->getLHS()->getType().getObjCLifetime() == - clang::Qualifiers::ObjCLifetime::OCL_None && - "not implemented"); - - RValue RV = buildAnyExpr(E->getRHS()); - LValue LV = buildLValue(E->getLHS()); - - SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; - buldStoreThroughLValue(RV, LV, nullptr /*InitDecl*/); - assert(!astCtx.getLangOpts().OpenMP && "last priv cond not implemented"); - return LV; - } - - case TEK_Complex: - assert(0 && "not implemented"); - case TEK_Aggregate: + switch (CE->getCastKind()) { + default: assert(0 && "not implemented"); + // Nothing to do here... + case CK_LValueToRValue: + break; } - llvm_unreachable("bad evaluation kind"); } - /// FIXME: this could likely be a common helper and not necessarily related - /// with codegen. - /// Return the best known alignment for an unknown pointer to a - /// particular class. - CharUnits getClassPointerAlignment(const CXXRecordDecl *RD) { - if (!RD->hasDefinition()) - return CharUnits::One(); // Hopefully won't be used anywhere. - - auto &layout = astCtx.getASTRecordLayout(RD); + // Unary &. + if (const UnaryOperator *UO = dyn_cast(E)) { + assert(0 && "not implemented"); + // if (UO->getOpcode() == UO_AddrOf) { + // LValue LV = buildLValue(UO->getSubExpr()); + // if (BaseInfo) + // *BaseInfo = LV.getBaseInfo(); + // // TODO: TBBA info + // return LV.getAddress(); + // } + } - // If the class is final, then we know that the pointer points to an - // object of that type and can use the full alignment. - if (RD->isEffectivelyFinal()) - return layout.getAlignment(); + // TODO: conditional operators, comma. + // Otherwise, use the alignment of the type. + CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), BaseInfo); + return RawAddress(buildScalarExpr(E), Align); +} - // Otherwise, we have to assume it could be a subclass. - return layout.getNonVirtualAlignment(); +LValue CIRGenModule::buildUnaryOpLValue(const UnaryOperator *E) { + // __extension__ doesn't affect lvalue-ness. + assert(E->getOpcode() != UO_Extension && "not implemented"); + + switch (E->getOpcode()) { + default: + llvm_unreachable("Unknown unary operator lvalue!"); + case UO_Deref: { + QualType T = E->getSubExpr()->getType()->getPointeeType(); + assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); + + LValueBaseInfo BaseInfo; + // TODO: add TBAAInfo + RawAddress Addr = buildPointerWithAlignment(E->getSubExpr(), &BaseInfo); + LValue LV = LValue::makeAddr(Addr, T, BaseInfo); + // TODO: set addr space + // TODO: ObjC/GC/__weak write barrier stuff. + return LV; } - - /// FIXME: this could likely be a common helper and not necessarily related - /// with codegen. - /// TODO: Add TBAAAccessInfo - CharUnits getNaturalPointeeTypeAlignment(QualType T, - LValueBaseInfo *BaseInfo) { - return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, - /* forPointeeType= */ true); + case UO_Real: + case UO_Imag: { + assert(0 && "not implemented"); } - - /// FIXME: this could likely be a common helper and not necessarily related - /// with codegen. - /// TODO: Add TBAAAccessInfo - CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo, - bool forPointeeType) { - // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But - // that doesn't return the information we need to compute BaseInfo. - - // Honor alignment typedef attributes even on incomplete types. - // We also honor them straight for C++ class types, even as pointees; - // there's an expressivity gap here. - if (auto TT = T->getAs()) { - if (auto Align = TT->getDecl()->getMaxAlignment()) { - if (BaseInfo) - *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); - return astCtx.toCharUnitsFromBits(Align); - } - } - - bool AlignForArray = T->isArrayType(); - - // Analyze the base element type, so we don't get confused by incomplete - // array types. - T = astCtx.getBaseElementType(T); - - if (T->isIncompleteType()) { - // We could try to replicate the logic from - // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the - // type is incomplete, so it's impossible to test. We could try to reuse - // getTypeAlignIfKnown, but that doesn't return the information we need - // to set BaseInfo. So just ignore the possibility that the alignment is - // greater than one. - if (BaseInfo) - *BaseInfo = LValueBaseInfo(AlignmentSource::Type); - return CharUnits::One(); - } - - if (BaseInfo) - *BaseInfo = LValueBaseInfo(AlignmentSource::Type); - - CharUnits Alignment; - const CXXRecordDecl *RD; - if (T.getQualifiers().hasUnaligned()) { - Alignment = CharUnits::One(); - } else if (forPointeeType && !AlignForArray && - (RD = T->getAsCXXRecordDecl())) { - // For C++ class pointees, we don't know whether we're pointing at a - // base or a complete object, so we generally need to use the - // non-virtual alignment. - Alignment = getClassPointerAlignment(RD); - } else { - Alignment = astCtx.getTypeAlignInChars(T); - } - - // Cap to the global maximum type alignment unless the alignment - // was somehow explicit on the type. - if (unsigned MaxAlign = astCtx.getLangOpts().MaxTypeAlign) { - if (Alignment.getQuantity() > MaxAlign && !astCtx.isAlignmentRequired(T)) - Alignment = CharUnits::fromQuantity(MaxAlign); - } - return Alignment; + case UO_PreInc: + case UO_PreDec: { + assert(0 && "not implemented"); } + } +} - /// Given an expression of pointer type, try to - /// derive a more accurate bound on the alignment of the pointer. - RawAddress buildPointerWithAlignment(const Expr *E, - LValueBaseInfo *BaseInfo) { - // We allow this with ObjC object pointers because of fragile ABIs. - assert(E->getType()->isPointerType() || - E->getType()->isObjCObjectPointerType()); - E = E->IgnoreParens(); - - // Casts: - if (const CastExpr *CE = dyn_cast(E)) { - if (const auto *ECE = dyn_cast(CE)) - assert(0 && "not implemented"); - - switch (CE->getCastKind()) { - default: - assert(0 && "not implemented"); - // Nothing to do here... - case CK_LValueToRValue: - break; - } - } - - // Unary &. - if (const UnaryOperator *UO = dyn_cast(E)) { - assert(0 && "not implemented"); - // if (UO->getOpcode() == UO_AddrOf) { - // LValue LV = buildLValue(UO->getSubExpr()); - // if (BaseInfo) - // *BaseInfo = LV.getBaseInfo(); - // // TODO: TBBA info - // return LV.getAddress(); - // } - } - - // TODO: conditional operators, comma. - // Otherwise, use the alignment of the type. - CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), BaseInfo); - return RawAddress(buildScalarExpr(E), Align); +/// Emit code to compute a designator that specifies the location +/// of the expression. +/// FIXME: document this function better. +LValue CIRGenModule::buildLValue(const Expr *E) { + // FIXME: ApplyDebugLocation DL(*this, E); + switch (E->getStmtClass()) { + default: { + emitError(getLoc(E->getExprLoc()), "l-value not implemented for '") + << E->getStmtClassName() << "'"; + assert(0 && "not implemented"); + } + case Expr::BinaryOperatorClass: + return buildBinaryOperatorLValue(cast(E)); + case Expr::DeclRefExprClass: + return buildDeclRefLValue(cast(E)); + case Expr::UnaryOperatorClass: + return buildUnaryOpLValue(cast(E)); + case Expr::ObjCPropertyRefExprClass: + llvm_unreachable("cannot emit a property reference directly"); } - LValue buildUnaryOpLValue(const UnaryOperator *E) { - // __extension__ doesn't affect lvalue-ness. - assert(E->getOpcode() != UO_Extension && "not implemented"); + return LValue::makeAddr(RawAddress::invalid(), E->getType()); +} - switch (E->getOpcode()) { - default: - llvm_unreachable("Unknown unary operator lvalue!"); - case UO_Deref: { - QualType T = E->getSubExpr()->getType()->getPointeeType(); - assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); - - LValueBaseInfo BaseInfo; - // TODO: add TBAAInfo - RawAddress Addr = buildPointerWithAlignment(E->getSubExpr(), &BaseInfo); - LValue LV = LValue::makeAddr(Addr, T, BaseInfo); - // TODO: set addr space - // TODO: ObjC/GC/__weak write barrier stuff. - return LV; - } - case UO_Real: - case UO_Imag: { - assert(0 && "not implemented"); - } - case UO_PreInc: - case UO_PreDec: { - assert(0 && "not implemented"); - } - } - } +/// EmitIgnoredExpr - Emit code to compute the specified expression, +/// ignoring the result. +void CIRGenModule::buildIgnoredExpr(const Expr *E) { + assert(!E->isPRValue() && "not implemented"); - /// Emit code to compute a designator that specifies the location - /// of the expression. - /// FIXME: document this function better. - LValue buildLValue(const Expr *E) { - // FIXME: ApplyDebugLocation DL(*this, E); - switch (E->getStmtClass()) { - default: { - emitError(getLoc(E->getExprLoc()), "l-value not implemented for '") - << E->getStmtClassName() << "'"; - assert(0 && "not implemented"); - } - case Expr::BinaryOperatorClass: - return buildBinaryOperatorLValue(cast(E)); - case Expr::DeclRefExprClass: - return buildDeclRefLValue(cast(E)); - case Expr::UnaryOperatorClass: - return buildUnaryOpLValue(cast(E)); - case Expr::ObjCPropertyRefExprClass: - llvm_unreachable("cannot emit a property reference directly"); - } + // Just emit it as an l-value and drop the result. + buildLValue(E); +} - return LValue::makeAddr(RawAddress::invalid(), E->getType()); - } +/// If the specified expression does not fold +/// to a constant, or if it does but contains a label, return false. If it +/// constant folds return true and set the boolean result in Result. +bool CIRGenModule::ConstantFoldsToSimpleInteger(const Expr *Cond, + bool &ResultBool, + bool AllowLabels) { + llvm::APSInt ResultInt; + if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) + return false; - /// EmitIgnoredExpr - Emit code to compute the specified expression, - /// ignoring the result. - void buildIgnoredExpr(const Expr *E) { - assert(!E->isPRValue() && "not implemented"); + ResultBool = ResultInt.getBoolValue(); + return true; +} - // Just emit it as an l-value and drop the result. - buildLValue(E); - } +/// Return true if the statement contains a label in it. If +/// this statement is not executed normally, it not containing a label means +/// that we can just remove the code. +bool CIRGenModule::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { + // Null statement, not a label! + if (!S) + return false; - /// If the specified expression does not fold - /// to a constant, or if it does but contains a label, return false. If it - /// constant folds return true and set the boolean result in Result. - bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &ResultBool, - bool AllowLabels) { - llvm::APSInt ResultInt; - if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) - return false; + // If this is a label, we have to emit the code, consider something like: + // if (0) { ... foo: bar(); } goto foo; + // + // TODO: If anyone cared, we could track __label__'s, since we know that you + // can't jump to one from outside their declared region. + if (isa(S)) + return true; - ResultBool = ResultInt.getBoolValue(); + // If this is a case/default statement, and we haven't seen a switch, we + // have to emit the code. + if (isa(S) && !IgnoreCaseStmts) return true; - } - /// Return true if the statement contains a label in it. If - /// this statement is not executed normally, it not containing a label means - /// that we can just remove the code. - bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false) { - // Null statement, not a label! - if (!S) - return false; - - // If this is a label, we have to emit the code, consider something like: - // if (0) { ... foo: bar(); } goto foo; - // - // TODO: If anyone cared, we could track __label__'s, since we know that you - // can't jump to one from outside their declared region. - if (isa(S)) - return true; + // If this is a switch statement, we want to ignore cases below it. + if (isa(S)) + IgnoreCaseStmts = true; - // If this is a case/default statement, and we haven't seen a switch, we - // have to emit the code. - if (isa(S) && !IgnoreCaseStmts) + // Scan subexpressions for verboten labels. + for (const Stmt *SubStmt : S->children()) + if (ContainsLabel(SubStmt, IgnoreCaseStmts)) return true; - // If this is a switch statement, we want to ignore cases below it. - if (isa(S)) - IgnoreCaseStmts = true; + return false; +} - // Scan subexpressions for verboten labels. - for (const Stmt *SubStmt : S->children()) - if (ContainsLabel(SubStmt, IgnoreCaseStmts)) - return true; +/// If the specified expression does not fold +/// to a constant, or if it does but contains a label, return false. If it +/// constant folds return true and set the folded value. +bool CIRGenModule::ConstantFoldsToSimpleInteger(const Expr *Cond, + llvm::APSInt &ResultInt, + bool AllowLabels) { + // FIXME: Rename and handle conversion of other evaluatable things + // to bool. + Expr::EvalResult Result; + if (!Cond->EvaluateAsInt(Result, astCtx)) + return false; // Not foldable, not integer or not fully evaluatable. + + llvm::APSInt Int = Result.Val.getInt(); + if (!AllowLabels && ContainsLabel(Cond)) + return false; // Contains a label. + + ResultInt = Int; + return true; +} - return false; +/// Perform the usual unary conversions on the specified +/// expression and compare the result against zero, returning an Int1Ty value. +mlir::Value CIRGenModule::evaluateExprAsBool(const Expr *E) { + // TODO: PGO + if (const MemberPointerType *MPT = E->getType()->getAs()) { + assert(0 && "not implemented"); } - /// If the specified expression does not fold - /// to a constant, or if it does but contains a label, return false. If it - /// constant folds return true and set the folded value. - bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt, - bool AllowLabels) { - // FIXME: Rename and handle conversion of other evaluatable things - // to bool. - Expr::EvalResult Result; - if (!Cond->EvaluateAsInt(Result, astCtx)) - return false; // Not foldable, not integer or not fully evaluatable. - - llvm::APSInt Int = Result.Val.getInt(); - if (!AllowLabels && ContainsLabel(Cond)) - return false; // Contains a label. - - ResultInt = Int; - return true; - } + QualType BoolTy = astCtx.BoolTy; + SourceLocation Loc = E->getExprLoc(); + // TODO: CGFPOptionsRAII for FP stuff. + assert(!E->getType()->isAnyComplexType() && + "complex to scalar not implemented"); + return buildScalarConversion(buildScalarExpr(E), E->getType(), BoolTy, Loc); +} - /// Perform the usual unary conversions on the specified - /// expression and compare the result against zero, returning an Int1Ty value. - mlir::Value evaluateExprAsBool(const Expr *E) { - // TODO: PGO - if (const MemberPointerType *MPT = - E->getType()->getAs()) { - assert(0 && "not implemented"); - } +/// Emit an if on a boolean condition to the specified blocks. +/// FIXME: Based on the condition, this might try to simplify the codegen of +/// the conditional based on the branch. TrueCount should be the number of +/// times we expect the condition to evaluate to true based on PGO data. We +/// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr +/// for extra ideas). +mlir::LogicalResult CIRGenModule::buildIfOnBoolExpr(const Expr *cond, + mlir::Location loc, + const Stmt *thenS, + const Stmt *elseS) { + // TODO: scoped ApplyDebugLocation DL(*this, Cond); + // TODO: __builtin_unpredictable and profile counts? + cond = cond->IgnoreParens(); + mlir::Value condV = evaluateExprAsBool(cond); + mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); + + builder.create( + loc, condV, elseS, + /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + resThen = buildStmt(thenS, /*useCurrentScope=*/true); + builder.create(getLoc(thenS->getSourceRange().getEnd())); + }, + /*elseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + resElse = buildStmt(elseS, /*useCurrentScope=*/true); + builder.create(getLoc(elseS->getSourceRange().getEnd())); + }); + + return mlir::LogicalResult::success(resThen.succeeded() && + resElse.succeeded()); +} - QualType BoolTy = astCtx.BoolTy; - SourceLocation Loc = E->getExprLoc(); - // TODO: CGFPOptionsRAII for FP stuff. - assert(!E->getType()->isAnyComplexType() && - "complex to scalar not implemented"); - return buildScalarConversion(buildScalarExpr(E), E->getType(), BoolTy, Loc); - } +mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { + // The else branch of a consteval if statement is always the only branch + // that can be runtime evaluated. + assert(!S.isConsteval() && "not implemented"); + mlir::LogicalResult res = mlir::success(); + + // C99 6.8.4.1: The first substatement is executed if the expression + // compares unequal to 0. The condition must be a scalar type. + auto ifStmtBuilder = [&]() -> mlir::LogicalResult { + if (S.getInit()) + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); - /// Emit an if on a boolean condition to the specified blocks. - /// FIXME: Based on the condition, this might try to simplify the codegen of - /// the conditional based on the branch. TrueCount should be the number of - /// times we expect the condition to evaluate to true based on PGO data. We - /// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr - /// for extra ideas). - mlir::LogicalResult buildIfOnBoolExpr(const Expr *cond, mlir::Location loc, - const Stmt *thenS, const Stmt *elseS) { - // TODO: scoped ApplyDebugLocation DL(*this, Cond); - // TODO: __builtin_unpredictable and profile counts? - cond = cond->IgnoreParens(); - mlir::Value condV = evaluateExprAsBool(cond); - mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); - - builder.create( - loc, condV, elseS, - /*thenBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - resThen = buildStmt(thenS, /*useCurrentScope=*/true); - builder.create(getLoc(thenS->getSourceRange().getEnd())); - }, - /*elseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - resElse = buildStmt(elseS, /*useCurrentScope=*/true); - builder.create(getLoc(elseS->getSourceRange().getEnd())); - }); - - return mlir::LogicalResult::success(resThen.succeeded() && - resElse.succeeded()); - } + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); - mlir::LogicalResult buildIfStmt(const IfStmt &S) { - // The else branch of a consteval if statement is always the only branch - // that can be runtime evaluated. - assert(!S.isConsteval() && "not implemented"); - mlir::LogicalResult res = mlir::success(); - - // C99 6.8.4.1: The first substatement is executed if the expression - // compares unequal to 0. The condition must be a scalar type. - auto ifStmtBuilder = [&]() -> mlir::LogicalResult { - if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) - return mlir::failure(); - - if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); - - // If the condition constant folds and can be elided, try to avoid - // emitting the condition and the dead arm of the if/else. - // FIXME: should this be done as part of a constant folder pass instead? - bool CondConstant; - if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, - S.isConstexpr())) { - assert(0 && "not implemented"); - } + // If the condition constant folds and can be elided, try to avoid + // emitting the condition and the dead arm of the if/else. + // FIXME: should this be done as part of a constant folder pass instead? + bool CondConstant; + if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, + S.isConstexpr())) { + assert(0 && "not implemented"); + } - // TODO: PGO and likelihood. - // The mlir::Location for cir.if skips the init/cond part of IfStmt, - // and effectively spans from "then-begin" to "else-end||then-end". - auto ifLocStart = getLoc(S.getThen()->getSourceRange().getBegin()); - auto ifLocEnd = getLoc(S.getSourceRange().getEnd()); - return buildIfOnBoolExpr(S.getCond(), getLoc(ifLocStart, ifLocEnd), - S.getThen(), S.getElse()); - }; - - // TODO: Add a new scoped symbol table. - // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); - // The if scope contains the full source range for IfStmt. - auto scopeLoc = getLoc(S.getSourceRange()); - auto scopeLocEnd = getLoc(S.getSourceRange().getEnd()); - builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - res = ifStmtBuilder(); - builder.create(scopeLocEnd); - }); - - return res; - } + // TODO: PGO and likelihood. + // The mlir::Location for cir.if skips the init/cond part of IfStmt, + // and effectively spans from "then-begin" to "else-end||then-end". + auto ifLocStart = getLoc(S.getThen()->getSourceRange().getBegin()); + auto ifLocEnd = getLoc(S.getSourceRange().getEnd()); + return buildIfOnBoolExpr(S.getCond(), getLoc(ifLocStart, ifLocEnd), + S.getThen(), S.getElse()); + }; - // Build CIR for a statement. useCurrentScope should be true if no - // new scopes need be created when finding a compound statement. - mlir::LogicalResult buildStmt(const Stmt *S, bool useCurrentScope) { - if (mlir::succeeded(buildSimpleStmt(S, useCurrentScope))) - return mlir::success(); + // TODO: Add a new scoped symbol table. + // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); + // The if scope contains the full source range for IfStmt. + auto scopeLoc = getLoc(S.getSourceRange()); + auto scopeLocEnd = getLoc(S.getSourceRange().getEnd()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + res = ifStmtBuilder(); + builder.create(scopeLocEnd); + }); + + return res; +} - if (astCtx.getLangOpts().OpenMP && astCtx.getLangOpts().OpenMPSimd) - assert(0 && "not implemented"); +// Build CIR for a statement. useCurrentScope should be true if no +// new scopes need be created when finding a compound statement. +mlir::LogicalResult CIRGenModule::buildStmt(const Stmt *S, + bool useCurrentScope) { + if (mlir::succeeded(buildSimpleStmt(S, useCurrentScope))) + return mlir::success(); - switch (S->getStmtClass()) { - case Stmt::OpenACCComputeConstructClass: - case Stmt::OMPScopeDirectiveClass: - case Stmt::OMPTeamsGenericLoopDirectiveClass: - case Stmt::OMPParallelMaskedDirectiveClass: - case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: - case Stmt::OMPErrorDirectiveClass: - case Stmt::OMPMaskedTaskLoopDirectiveClass: - case Stmt::OMPMaskedTaskLoopSimdDirectiveClass: - case Stmt::OMPParallelGenericLoopDirectiveClass: - case Stmt::OMPParallelMaskedTaskLoopDirectiveClass: - case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass: - case Stmt::OMPTargetParallelGenericLoopDirectiveClass: - llvm_unreachable("NYI"); - case Stmt::NoStmtClass: - case Stmt::CXXCatchStmtClass: - case Stmt::SEHExceptStmtClass: - case Stmt::SEHFinallyStmtClass: - case Stmt::MSDependentExistsStmtClass: - llvm_unreachable("invalid statement class to emit generically"); - case Stmt::NullStmtClass: - case Stmt::CompoundStmtClass: - case Stmt::DeclStmtClass: - case Stmt::LabelStmtClass: - case Stmt::AttributedStmtClass: - case Stmt::GotoStmtClass: - case Stmt::BreakStmtClass: - case Stmt::ContinueStmtClass: - case Stmt::DefaultStmtClass: - case Stmt::CaseStmtClass: - case Stmt::SEHLeaveStmtClass: - llvm_unreachable("should have emitted these statements as simple"); + if (astCtx.getLangOpts().OpenMP && astCtx.getLangOpts().OpenMPSimd) + assert(0 && "not implemented"); + + switch (S->getStmtClass()) { + case Stmt::OpenACCComputeConstructClass: + case Stmt::OMPScopeDirectiveClass: + case Stmt::OMPParallelMaskedDirectiveClass: + case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: + case Stmt::OMPTeamsGenericLoopDirectiveClass: + case Stmt::OMPTargetParallelGenericLoopDirectiveClass: + case Stmt::OMPParallelGenericLoopDirectiveClass: + case Stmt::OMPParallelMaskedTaskLoopDirectiveClass: + case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass: + case Stmt::OMPErrorDirectiveClass: + case Stmt::OMPMaskedTaskLoopDirectiveClass: + case Stmt::OMPMaskedTaskLoopSimdDirectiveClass: + llvm_unreachable("NYI"); + case Stmt::NoStmtClass: + case Stmt::CXXCatchStmtClass: + case Stmt::SEHExceptStmtClass: + case Stmt::SEHFinallyStmtClass: + case Stmt::MSDependentExistsStmtClass: + llvm_unreachable("invalid statement class to emit generically"); + case Stmt::NullStmtClass: + case Stmt::CompoundStmtClass: + case Stmt::DeclStmtClass: + case Stmt::LabelStmtClass: + case Stmt::AttributedStmtClass: + case Stmt::GotoStmtClass: + case Stmt::BreakStmtClass: + case Stmt::ContinueStmtClass: + case Stmt::DefaultStmtClass: + case Stmt::CaseStmtClass: + case Stmt::SEHLeaveStmtClass: + llvm_unreachable("should have emitted these statements as simple"); #define STMT(Type, Base) #define ABSTRACT_STMT(Op) #define EXPR(Type, Base) case Stmt::Type##Class: #include "clang/AST/StmtNodes.inc" - { - // Remember the block we came in on. - mlir::Block *incoming = builder.getInsertionBlock(); - assert(incoming && "expression emission must have an insertion point"); - - buildIgnoredExpr(cast(S)); - - mlir::Block *outgoing = builder.getInsertionBlock(); - assert(outgoing && "expression emission cleared block!"); - - // FIXME: Should we mimic LLVM emission here? - // The expression emitters assume (reasonably!) that the insertion - // point is always set. To maintain that, the call-emission code - // for noreturn functions has to enter a new block with no - // predecessors. We want to kill that block and mark the current - // insertion point unreachable in the common case of a call like - // "exit();". Since expression emission doesn't otherwise create - // blocks with no predecessors, we can just test for that. - // However, we must be careful not to do this to our incoming - // block, because *statement* emission does sometimes create - // reachable blocks which will have no predecessors until later in - // the function. This occurs with, e.g., labels that are not - // reachable by fallthrough. - if (incoming != outgoing && outgoing->use_empty()) - assert(0 && "not implemented"); - break; - } - - case Stmt::IfStmtClass: - if (buildIfStmt(cast(*S)).failed()) - return mlir::failure(); - break; - case Stmt::IndirectGotoStmtClass: - case Stmt::WhileStmtClass: - case Stmt::DoStmtClass: - case Stmt::ForStmtClass: - case Stmt::ReturnStmtClass: - case Stmt::SwitchStmtClass: - // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. - case Stmt::GCCAsmStmtClass: - case Stmt::MSAsmStmtClass: - case Stmt::CoroutineBodyStmtClass: - case Stmt::CoreturnStmtClass: - case Stmt::CapturedStmtClass: - case Stmt::ObjCAtTryStmtClass: - case Stmt::ObjCAtThrowStmtClass: - case Stmt::ObjCAtSynchronizedStmtClass: - case Stmt::ObjCForCollectionStmtClass: - case Stmt::ObjCAutoreleasePoolStmtClass: - case Stmt::CXXTryStmtClass: - case Stmt::CXXForRangeStmtClass: - case Stmt::SEHTryStmtClass: - case Stmt::OMPMetaDirectiveClass: - case Stmt::OMPCanonicalLoopClass: - case Stmt::OMPParallelDirectiveClass: - case Stmt::OMPSimdDirectiveClass: - case Stmt::OMPTileDirectiveClass: - case Stmt::OMPUnrollDirectiveClass: - case Stmt::OMPForDirectiveClass: - case Stmt::OMPForSimdDirectiveClass: - case Stmt::OMPSectionsDirectiveClass: - case Stmt::OMPSectionDirectiveClass: - case Stmt::OMPSingleDirectiveClass: - case Stmt::OMPMasterDirectiveClass: - case Stmt::OMPCriticalDirectiveClass: - case Stmt::OMPParallelForDirectiveClass: - case Stmt::OMPParallelForSimdDirectiveClass: - case Stmt::OMPParallelMasterDirectiveClass: - case Stmt::OMPParallelSectionsDirectiveClass: - case Stmt::OMPTaskDirectiveClass: - case Stmt::OMPTaskyieldDirectiveClass: - case Stmt::OMPBarrierDirectiveClass: - case Stmt::OMPTaskwaitDirectiveClass: - case Stmt::OMPTaskgroupDirectiveClass: - case Stmt::OMPFlushDirectiveClass: - case Stmt::OMPDepobjDirectiveClass: - case Stmt::OMPScanDirectiveClass: - case Stmt::OMPOrderedDirectiveClass: - case Stmt::OMPAtomicDirectiveClass: - case Stmt::OMPTargetDirectiveClass: - case Stmt::OMPTeamsDirectiveClass: - case Stmt::OMPCancellationPointDirectiveClass: - case Stmt::OMPCancelDirectiveClass: - case Stmt::OMPTargetDataDirectiveClass: - case Stmt::OMPTargetEnterDataDirectiveClass: - case Stmt::OMPTargetExitDataDirectiveClass: - case Stmt::OMPTargetParallelDirectiveClass: - case Stmt::OMPTargetParallelForDirectiveClass: - case Stmt::OMPTaskLoopDirectiveClass: - case Stmt::OMPTaskLoopSimdDirectiveClass: - case Stmt::OMPMasterTaskLoopDirectiveClass: - case Stmt::OMPMasterTaskLoopSimdDirectiveClass: - case Stmt::OMPParallelMasterTaskLoopDirectiveClass: - case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: - case Stmt::OMPDistributeDirectiveClass: - case Stmt::OMPTargetUpdateDirectiveClass: - case Stmt::OMPDistributeParallelForDirectiveClass: - case Stmt::OMPDistributeParallelForSimdDirectiveClass: - case Stmt::OMPDistributeSimdDirectiveClass: - case Stmt::OMPTargetParallelForSimdDirectiveClass: - case Stmt::OMPTargetSimdDirectiveClass: - case Stmt::OMPTeamsDistributeDirectiveClass: - case Stmt::OMPTeamsDistributeSimdDirectiveClass: - case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: - case Stmt::OMPTeamsDistributeParallelForDirectiveClass: - case Stmt::OMPTargetTeamsDirectiveClass: - case Stmt::OMPTargetTeamsDistributeDirectiveClass: - case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: - case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: - case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: - case Stmt::OMPInteropDirectiveClass: - case Stmt::OMPDispatchDirectiveClass: - case Stmt::OMPGenericLoopDirectiveClass: - case Stmt::OMPMaskedDirectiveClass: { - llvm::errs() << "CIR codegen for '" << S->getStmtClassName() - << "' not implemented\n"; - assert(0 && "not implemented"); + { + // Remember the block we came in on. + mlir::Block *incoming = builder.getInsertionBlock(); + assert(incoming && "expression emission must have an insertion point"); + + buildIgnoredExpr(cast(S)); + + mlir::Block *outgoing = builder.getInsertionBlock(); + assert(outgoing && "expression emission cleared block!"); + + // FIXME: Should we mimic LLVM emission here? + // The expression emitters assume (reasonably!) that the insertion + // point is always set. To maintain that, the call-emission code + // for noreturn functions has to enter a new block with no + // predecessors. We want to kill that block and mark the current + // insertion point unreachable in the common case of a call like + // "exit();". Since expression emission doesn't otherwise create + // blocks with no predecessors, we can just test for that. + // However, we must be careful not to do this to our incoming + // block, because *statement* emission does sometimes create + // reachable blocks which will have no predecessors until later in + // the function. This occurs with, e.g., labels that are not + // reachable by fallthrough. + if (incoming != outgoing && outgoing->use_empty()) + assert(0 && "not implemented"); break; } - case Stmt::ObjCAtCatchStmtClass: - llvm_unreachable( - "@catch statements should be handled by EmitObjCAtTryStmt"); - case Stmt::ObjCAtFinallyStmtClass: - llvm_unreachable( - "@finally statements should be handled by EmitObjCAtTryStmt"); - } - return mlir::success(); + case Stmt::IfStmtClass: + if (buildIfStmt(cast(*S)).failed()) + return mlir::failure(); + break; + case Stmt::IndirectGotoStmtClass: + case Stmt::WhileStmtClass: + case Stmt::DoStmtClass: + case Stmt::ForStmtClass: + case Stmt::ReturnStmtClass: + case Stmt::SwitchStmtClass: + // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. + case Stmt::GCCAsmStmtClass: + case Stmt::MSAsmStmtClass: + case Stmt::CoroutineBodyStmtClass: + case Stmt::CoreturnStmtClass: + case Stmt::CapturedStmtClass: + case Stmt::ObjCAtTryStmtClass: + case Stmt::ObjCAtThrowStmtClass: + case Stmt::ObjCAtSynchronizedStmtClass: + case Stmt::ObjCForCollectionStmtClass: + case Stmt::ObjCAutoreleasePoolStmtClass: + case Stmt::CXXTryStmtClass: + case Stmt::CXXForRangeStmtClass: + case Stmt::SEHTryStmtClass: + case Stmt::OMPMetaDirectiveClass: + case Stmt::OMPCanonicalLoopClass: + case Stmt::OMPParallelDirectiveClass: + case Stmt::OMPSimdDirectiveClass: + case Stmt::OMPTileDirectiveClass: + case Stmt::OMPUnrollDirectiveClass: + case Stmt::OMPForDirectiveClass: + case Stmt::OMPForSimdDirectiveClass: + case Stmt::OMPSectionsDirectiveClass: + case Stmt::OMPSectionDirectiveClass: + case Stmt::OMPSingleDirectiveClass: + case Stmt::OMPMasterDirectiveClass: + case Stmt::OMPCriticalDirectiveClass: + case Stmt::OMPParallelForDirectiveClass: + case Stmt::OMPParallelForSimdDirectiveClass: + case Stmt::OMPParallelMasterDirectiveClass: + case Stmt::OMPParallelSectionsDirectiveClass: + case Stmt::OMPTaskDirectiveClass: + case Stmt::OMPTaskyieldDirectiveClass: + case Stmt::OMPBarrierDirectiveClass: + case Stmt::OMPTaskwaitDirectiveClass: + case Stmt::OMPTaskgroupDirectiveClass: + case Stmt::OMPFlushDirectiveClass: + case Stmt::OMPDepobjDirectiveClass: + case Stmt::OMPScanDirectiveClass: + case Stmt::OMPOrderedDirectiveClass: + case Stmt::OMPAtomicDirectiveClass: + case Stmt::OMPTargetDirectiveClass: + case Stmt::OMPTeamsDirectiveClass: + case Stmt::OMPCancellationPointDirectiveClass: + case Stmt::OMPCancelDirectiveClass: + case Stmt::OMPTargetDataDirectiveClass: + case Stmt::OMPTargetEnterDataDirectiveClass: + case Stmt::OMPTargetExitDataDirectiveClass: + case Stmt::OMPTargetParallelDirectiveClass: + case Stmt::OMPTargetParallelForDirectiveClass: + case Stmt::OMPTaskLoopDirectiveClass: + case Stmt::OMPTaskLoopSimdDirectiveClass: + case Stmt::OMPMasterTaskLoopDirectiveClass: + case Stmt::OMPMasterTaskLoopSimdDirectiveClass: + case Stmt::OMPParallelMasterTaskLoopDirectiveClass: + case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: + case Stmt::OMPDistributeDirectiveClass: + case Stmt::OMPTargetUpdateDirectiveClass: + case Stmt::OMPDistributeParallelForDirectiveClass: + case Stmt::OMPDistributeParallelForSimdDirectiveClass: + case Stmt::OMPDistributeSimdDirectiveClass: + case Stmt::OMPTargetParallelForSimdDirectiveClass: + case Stmt::OMPTargetSimdDirectiveClass: + case Stmt::OMPTeamsDistributeDirectiveClass: + case Stmt::OMPTeamsDistributeSimdDirectiveClass: + case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: + case Stmt::OMPTeamsDistributeParallelForDirectiveClass: + case Stmt::OMPTargetTeamsDirectiveClass: + case Stmt::OMPTargetTeamsDistributeDirectiveClass: + case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: + case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: + case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: + case Stmt::OMPInteropDirectiveClass: + case Stmt::OMPDispatchDirectiveClass: + case Stmt::OMPGenericLoopDirectiveClass: + case Stmt::OMPMaskedDirectiveClass: { + llvm::errs() << "CIR codegen for '" << S->getStmtClassName() + << "' not implemented\n"; + assert(0 && "not implemented"); + break; } - - mlir::LogicalResult buildFunctionBody(const Stmt *Body) { - const CompoundStmt *S = dyn_cast(Body); - assert(S && "expected compound stmt"); - - // We start with function level scope for variables. - SymTableScopeTy varScope(symbolTable); - return buildCompoundStmtWithoutScope(*S); + case Stmt::ObjCAtCatchStmtClass: + llvm_unreachable( + "@catch statements should be handled by EmitObjCAtTryStmt"); + case Stmt::ObjCAtFinallyStmtClass: + llvm_unreachable( + "@finally statements should be handled by EmitObjCAtTryStmt"); } - mlir::LogicalResult buildCompoundStmt(const CompoundStmt &S) { - mlir::LogicalResult res = mlir::success(); + return mlir::success(); +} - auto compoundStmtBuilder = [&]() -> mlir::LogicalResult { - if (buildCompoundStmtWithoutScope(S).failed()) - return mlir::failure(); +mlir::LogicalResult CIRGenModule::buildFunctionBody(const Stmt *Body) { + const CompoundStmt *S = dyn_cast(Body); + assert(S && "expected compound stmt"); - return mlir::success(); - }; - - // Add local scope to track new declared variables. - SymTableScopeTy varScope(symbolTable); - auto locBegin = getLoc(S.getSourceRange().getBegin()); - auto locEnd = getLoc(S.getSourceRange().getEnd()); - builder.create( - locBegin, mlir::TypeRange(), /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - res = compoundStmtBuilder(); - builder.create(locEnd); - }); - - return res; - } + // We start with function level scope for variables. + SymTableScopeTy varScope(symbolTable); + return buildCompoundStmtWithoutScope(*S); +} - mlir::LogicalResult buildCompoundStmtWithoutScope(const CompoundStmt &S) { - for (auto *CurStmt : S.body()) - if (buildStmt(CurStmt, /*useCurrentScope=*/false).failed()) - return mlir::failure(); +mlir::LogicalResult CIRGenModule::buildCompoundStmt(const CompoundStmt &S) { + mlir::LogicalResult res = mlir::success(); - return mlir::success(); - } + auto compoundStmtBuilder = [&]() -> mlir::LogicalResult { + if (buildCompoundStmtWithoutScope(S).failed()) + return mlir::failure(); - void buildTopLevelDecl(Decl *decl) { - switch (decl->getKind()) { - default: - assert(false && "Not yet implemented"); - case Decl::Function: - buildFunction(cast(decl)); - break; - case Decl::CXXRecord: { - CXXRecordDecl *crd = cast(decl); - // TODO: Handle debug info as CodeGenModule.cpp does - for (auto *childDecl : crd->decls()) - if (isa(childDecl) || isa(childDecl)) - buildTopLevelDecl(childDecl); - break; - } - case Decl::Record: - // There's nothing to do here, we emit everything pertaining to `Record`s - // lazily. - // TODO: handle debug info here? See clang's - // CodeGenModule::EmitTopLevelDecl - break; - } - } + return mlir::success(); + }; - // Emit a new function and add it to the MLIR module. - mlir::FuncOp buildFunction(const FunctionDecl *FD) { - CIRGenFunction CGF; - CurCGF = &CGF; + // Add local scope to track new declared variables. + SymTableScopeTy varScope(symbolTable); + auto locBegin = getLoc(S.getSourceRange().getBegin()); + auto locEnd = getLoc(S.getSourceRange().getEnd()); + builder.create( + locBegin, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + res = compoundStmtBuilder(); + builder.create(locEnd); + }); + + return res; +} - // Create a scope in the symbol table to hold variable declarations. - SymTableScopeTy varScope(symbolTable); +mlir::LogicalResult +CIRGenModule::buildCompoundStmtWithoutScope(const CompoundStmt &S) { + for (auto *CurStmt : S.body()) + if (buildStmt(CurStmt, /*useCurrentScope=*/false).failed()) + return mlir::failure(); - const CXXMethodDecl *MD = dyn_cast(FD); - assert(!MD && "methods not implemented"); - auto fnLoc = getLoc(FD->getSourceRange()); + return mlir::success(); +} - // Create an MLIR function for the given prototype. - llvm::SmallVector argTypes; +void CIRGenModule::buildTopLevelDecl(Decl *decl) { + switch (decl->getKind()) { + default: + assert(false && "Not yet implemented"); + case Decl::Function: + buildFunction(cast(decl)); + break; + case Decl::CXXRecord: { + CXXRecordDecl *crd = cast(decl); + // TODO: Handle debug info as CodeGenModule.cpp does + for (auto *childDecl : crd->decls()) + if (isa(childDecl) || isa(childDecl)) + buildTopLevelDecl(childDecl); + break; + } + case Decl::Record: + // There's nothing to do here, we emit everything pertaining to `Record`s + // lazily. + // TODO: handle debug info here? See clang's + // CodeGenModule::EmitTopLevelDecl + break; + } +} - for (auto *Param : FD->parameters()) - argTypes.push_back(getCIRType(Param->getType())); +mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { + CIRGenFunction CGF; + CurCGF = &CGF; + + // Create a scope in the symbol table to hold variable declarations. + SymTableScopeTy varScope(symbolTable); + + const CXXMethodDecl *MD = dyn_cast(FD); + assert(!MD && "methods not implemented"); + auto fnLoc = getLoc(FD->getSourceRange()); + + // Create an MLIR function for the given prototype. + llvm::SmallVector argTypes; + + for (auto *Param : FD->parameters()) + argTypes.push_back(getCIRType(Param->getType())); + + CurCGF->FnRetQualTy = FD->getReturnType(); + auto funcType = + builder.getFunctionType(argTypes, CurCGF->FnRetQualTy->isVoidType() + ? mlir::TypeRange() + : getCIRType(CurCGF->FnRetQualTy)); + mlir::FuncOp function = mlir::FuncOp::create(fnLoc, FD->getName(), funcType); + if (!function) + return nullptr; + + // In MLIR the entry block of the function is special: it must have the + // same argument list as the function itself. + auto &entryBlock = *function.addEntryBlock(); + + // Set the insertion point in the builder to the beginning of the + // function body, it will be used throughout the codegen to create + // operations in this function. + builder.setInsertionPointToStart(&entryBlock); + + // Declare all the function arguments in the symbol table. + for (const auto nameValue : + llvm::zip(FD->parameters(), entryBlock.getArguments())) { + auto *paramVar = std::get<0>(nameValue); + auto paramVal = std::get<1>(nameValue); + auto alignment = astCtx.getDeclAlign(paramVar); + auto paramLoc = getLoc(paramVar->getSourceRange()); + paramVal.setLoc(paramLoc); - CurCGF->FnRetQualTy = FD->getReturnType(); - auto funcType = builder.getFunctionType( - argTypes, CurCGF->FnRetQualTy->isVoidType() - ? mlir::TypeRange() - : getCIRType(CurCGF->FnRetQualTy)); - mlir::FuncOp function = - mlir::FuncOp::create(fnLoc, FD->getName(), funcType); - if (!function) + mlir::Value addr; + if (failed(declare(paramVar, paramVar->getType(), paramLoc, alignment, addr, + true /*param*/))) return nullptr; + // Location of the store to the param storage tracked as beginning of + // the function body. + auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); + builder.create(fnBodyBegin, paramVal, addr); + } - // In MLIR the entry block of the function is special: it must have the - // same argument list as the function itself. - auto &entryBlock = *function.addEntryBlock(); - - // Set the insertion point in the builder to the beginning of the - // function body, it will be used throughout the codegen to create - // operations in this function. - builder.setInsertionPointToStart(&entryBlock); - - // Declare all the function arguments in the symbol table. - for (const auto nameValue : - llvm::zip(FD->parameters(), entryBlock.getArguments())) { - auto *paramVar = std::get<0>(nameValue); - auto paramVal = std::get<1>(nameValue); - auto alignment = astCtx.getDeclAlign(paramVar); - auto paramLoc = getLoc(paramVar->getSourceRange()); - paramVal.setLoc(paramLoc); - - mlir::Value addr; - if (failed(declare(paramVar, paramVar->getType(), paramLoc, alignment, - addr, true /*param*/))) - return nullptr; - // Location of the store to the param storage tracked as beginning of - // the function body. - auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); - builder.create(fnBodyBegin, paramVal, addr); - } - - // Emit the body of the function. - if (mlir::failed(buildFunctionBody(FD->getBody()))) { - function.erase(); - return nullptr; - } + // Emit the body of the function. + if (mlir::failed(buildFunctionBody(FD->getBody()))) { + function.erase(); + return nullptr; + } - ReturnOp returnOp; - if (!entryBlock.empty()) - returnOp = dyn_cast(entryBlock.back()); - if (!returnOp) - builder.create(getLoc(FD->getBody()->getEndLoc())); + ReturnOp returnOp; + if (!entryBlock.empty()) + returnOp = dyn_cast(entryBlock.back()); + if (!returnOp) + builder.create(getLoc(FD->getBody()->getEndLoc())); - if (mlir::failed(function.verifyBody())) - return nullptr; - theModule.push_back(function); - return function; - } + if (mlir::failed(function.verifyBody())) + return nullptr; + theModule.push_back(function); + return function; +} - mlir::Type getCIRType(const QualType &type) { - return genTypes->ConvertType(type); - } +mlir::Type CIRGenModule::getCIRType(const QualType &type) { + return genTypes->ConvertType(type); +} - void verifyModule() { - // Verify the module after we have finished constructing it, this will - // check the structural properties of the IR and invoke any specific - // verifiers we have on the CIR operations. - if (failed(mlir::verify(theModule))) - theModule.emitError("module verification error"); - } -}; -} // namespace cir +void CIRGenModule::verifyModule() { + // Verify the module after we have finished constructing it, this will + // check the structural properties of the IR and invoke any specific + // verifiers we have on the CIR operations. + if (failed(mlir::verify(theModule))) + theModule.emitError("module verification error"); +} CIRGenerator::CIRGenerator() = default; CIRGenerator::~CIRGenerator() = default; diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h new file mode 100644 index 000000000000..b7dd7ae445ab --- /dev/null +++ b/clang/lib/CIR/CIRGenModule.h @@ -0,0 +1,727 @@ +//===--- CIRGenModule.h - Per-Module state for CIR gen ----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is the internal per-translation-unit state used for CIR translation. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H +#define LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H + +#include "CIRGenFunction.h" +#include "CIRGenTypes.h" +#include "CIRGenValue.h" + +#include "clang/AST/ASTContext.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/SourceManager.h" + +#include "llvm/ADT/ScopedHashTable.h" + +#include "mlir/Dialect/CIR/IR/CIRAttrs.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/IR/Value.h" + +namespace cir { + +/// Implementation of a CIR/MLIR emission from Clang AST. +/// +/// This will emit operations that are specific to C(++)/ObjC(++) language, +/// preserving the semantics of the language and (hopefully) allow to perform +/// accurate analysis and transformation based on these high level semantics. +class CIRGenModule { +public: + CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx); + CIRGenModule(CIRGenModule &) = delete; + CIRGenModule &operator=(CIRGenModule &) = delete; + ~CIRGenModule() = default; + + using SymTableTy = llvm::ScopedHashTable; + using SymTableScopeTy = + llvm::ScopedHashTableScope; + +private: + /// A "module" matches a c/cpp source file: containing a list of functions. + mlir::ModuleOp theModule; + + /// The builder is a helper class to create IR inside a function. The + /// builder is stateful, in particular it keeps an "insertion point": this + /// is where the next operations will be introduced. + mlir::OpBuilder builder; + + /// The symbol table maps a variable name to a value in the current scope. + /// Entering a function creates a new scope, and the function arguments are + /// added to the mapping. When the processing of a function is terminated, + /// the scope is destroyed and the mappings created in this scope are + /// dropped. + SymTableTy symbolTable; + + /// Hold Clang AST information. + clang::ASTContext &astCtx; + + /// Per-function codegen information. Updated everytime buildCIR is called + /// for FunctionDecls's. + CIRGenFunction *CurCGF = nullptr; + + /// Per-module type mapping from clang AST to CIR. + std::unique_ptr genTypes; + + /// Use to track source locations across nested visitor traversals. + /// Always use a `SourceLocRAIIObject` to change currSrcLoc. + std::optional currSrcLoc; + class SourceLocRAIIObject { + CIRGenModule &P; + std::optional OldVal; + + public: + SourceLocRAIIObject(CIRGenModule &p, mlir::Location Value) : P(p) { + if (P.currSrcLoc) + OldVal = P.currSrcLoc; + P.currSrcLoc = Value; + } + + /// Can be used to restore the state early, before the dtor + /// is run. + void restore() { P.currSrcLoc = OldVal; } + ~SourceLocRAIIObject() { restore(); } + }; + + /// Helpers to convert Clang's SourceLocation to a MLIR Location. + mlir::Location getLoc(SourceLocation SLoc); + + mlir::Location getLoc(SourceRange SLoc); + + mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); + + /// Declare a variable in the current scope, return success if the variable + /// wasn't declared yet. + mlir::LogicalResult declare(const Decl *var, QualType T, mlir::Location loc, + CharUnits alignment, mlir::Value &addr, + bool IsParam = false); + +public: + mlir::ModuleOp getModule() { return theModule; } + mlir::OpBuilder &getBuilder() { return builder; } + + class ScalarExprEmitter + : public clang::StmtVisitor { + LLVM_ATTRIBUTE_UNUSED CIRGenFunction &CGF; + CIRGenModule &CGM; + + public: + ScalarExprEmitter(CIRGenFunction &cgf, CIRGenModule &cgm) + : CGF(cgf), CGM(cgm) {} + + mlir::Value Visit(Expr *E) { + return StmtVisitor::Visit(E); + } + + /// Emits the address of the l-value, then loads and returns the result. + mlir::Value buildLoadOfLValue(const Expr *E) { + LValue LV = CGM.buildLValue(E); + auto load = CGM.builder.create( + CGM.getLoc(E->getExprLoc()), CGM.getCIRType(E->getType()), + LV.getPointer(), mlir::UnitAttr::get(CGM.builder.getContext())); + // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); + return load; + } + + // Handle l-values. + mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { + // FIXME: we could try to emit this as constant first, see + // CGF.tryEmitAsConstant(E) + return buildLoadOfLValue(E); + } + + // Emit code for an explicit or implicit cast. Implicit + // casts have to handle a more broad range of conversions than explicit + // casts, as they handle things like function to ptr-to-function decay + // etc. + mlir::Value VisitCastExpr(CastExpr *CE) { + Expr *E = CE->getSubExpr(); + QualType DestTy = CE->getType(); + clang::CastKind Kind = CE->getCastKind(); + switch (Kind) { + case CK_LValueToRValue: + assert(CGM.astCtx.hasSameUnqualifiedType(E->getType(), DestTy)); + assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); + return Visit(const_cast(E)); + case CK_NullToPointer: { + // FIXME: use MustVisitNullValue(E) and evaluate expr. + // Note that DestTy is used as the MLIR type instead of a custom + // nullptr type. + mlir::Type Ty = CGM.getCIRType(DestTy); + return CGM.builder.create( + CGM.getLoc(E->getExprLoc()), Ty, + mlir::cir::NullAttr::get(CGM.builder.getContext(), Ty)); + } + case CK_IntegralToBoolean: { + return buildIntToBoolConversion(Visit(E), + CGM.getLoc(CE->getSourceRange())); + } + default: + emitError(CGM.getLoc(CE->getExprLoc()), "cast kind not implemented: '") + << CE->getCastKindName() << "'"; + assert(0 && "not implemented"); + return nullptr; + } + } + + mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { + assert(!isa(E->getType()) && "not implemented"); + return CGM.buildLValue(E->getSubExpr()).getPointer(); + } + + mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { + mlir::Type Ty = CGM.getCIRType(E->getType()); + return CGM.builder.create( + CGM.getLoc(E->getExprLoc()), Ty, + CGM.builder.getBoolAttr(E->getValue())); + } + + struct BinOpInfo { + mlir::Value LHS; + mlir::Value RHS; + SourceRange Loc; + QualType Ty; // Computation Type. + BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform + FPOptions FPFeatures; + const Expr *E; // Entire expr, for error unsupported. May not be binop. + + /// Check if the binop computes a division or a remainder. + bool isDivremOp() const { + return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || + Opcode == BO_RemAssign; + } + + /// Check if at least one operand is a fixed point type. In such cases, + /// this operation did not follow usual arithmetic conversion and both + /// operands might not be of the same type. + bool isFixedPointOp() const { + // We cannot simply check the result type since comparison operations + // return an int. + if (const auto *BinOp = dyn_cast(E)) { + QualType LHSType = BinOp->getLHS()->getType(); + QualType RHSType = BinOp->getRHS()->getType(); + return LHSType->isFixedPointType() || RHSType->isFixedPointType(); + } + if (const auto *UnOp = dyn_cast(E)) + return UnOp->getSubExpr()->getType()->isFixedPointType(); + return false; + } + }; + + BinOpInfo buildBinOps(const BinaryOperator *E) { + BinOpInfo Result; + Result.LHS = Visit(E->getLHS()); + Result.RHS = Visit(E->getRHS()); + Result.Ty = E->getType(); + Result.Opcode = E->getOpcode(); + Result.Loc = E->getSourceRange(); + // TODO: Result.FPFeatures + Result.E = E; + return Result; + } + + mlir::Value buildMul(const BinOpInfo &Ops) { + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); + } + mlir::Value buildDiv(const BinOpInfo &Ops) { + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Div, Ops.LHS, Ops.RHS); + } + mlir::Value buildRem(const BinOpInfo &Ops) { + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); + } + mlir::Value buildAdd(const BinOpInfo &Ops) { + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Add, Ops.LHS, Ops.RHS); + } + mlir::Value buildSub(const BinOpInfo &Ops) { + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); + } + mlir::Value buildShl(const BinOpInfo &Ops) { + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Shl, Ops.LHS, Ops.RHS); + } + mlir::Value buildShr(const BinOpInfo &Ops) { + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Shr, Ops.LHS, Ops.RHS); + } + mlir::Value buildAnd(const BinOpInfo &Ops) { + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::And, Ops.LHS, Ops.RHS); + } + mlir::Value buildXor(const BinOpInfo &Ops) { + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); + } + mlir::Value buildOr(const BinOpInfo &Ops) { + return CGM.builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Or, + Ops.LHS, Ops.RHS); + } + + // Binary operators and binary compound assignment operators. +#define HANDLEBINOP(OP) \ + mlir::Value VisitBin##OP(const BinaryOperator *E) { \ + return build##OP(buildBinOps(E)); \ + } + HANDLEBINOP(Mul) + HANDLEBINOP(Div) + HANDLEBINOP(Rem) + HANDLEBINOP(Add) + HANDLEBINOP(Sub) + HANDLEBINOP(Shl) + HANDLEBINOP(Shr) + HANDLEBINOP(And) + HANDLEBINOP(Xor) + HANDLEBINOP(Or) +#undef HANDLEBINOP + + mlir::Value buildCmp(const BinaryOperator *E) { + mlir::Value Result; + QualType LHSTy = E->getLHS()->getType(); + QualType RHSTy = E->getRHS()->getType(); + + if (const MemberPointerType *MPT = LHSTy->getAs()) { + assert(0 && "not implemented"); + } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { + BinOpInfo BOInfo = buildBinOps(E); + mlir::Value LHS = BOInfo.LHS; + mlir::Value RHS = BOInfo.RHS; + + if (LHSTy->isVectorType()) { + // Cannot handle any vector just yet. + assert(0 && "not implemented"); + // If AltiVec, the comparison results in a numeric type, so we use + // intrinsics comparing vectors and giving 0 or 1 as a result + if (!E->getType()->isVectorType()) + assert(0 && "not implemented"); + } + if (BOInfo.isFixedPointOp()) { + assert(0 && "not implemented"); + } else { + // TODO: when we add proper basic types to CIR we + // probably won't need to handle + // LHSTy->hasSignedIntegerRepresentation() + + // Unsigned integers and pointers. + if (LHS.getType().isa() || + RHS.getType().isa()) { + // TODO: Handle StrictVTablePointers and + // mayBeDynamicClass/invariant group. + assert(0 && "not implemented"); + } + + mlir::cir::CmpOpKind Kind; + switch (E->getOpcode()) { + case BO_LT: + Kind = mlir::cir::CmpOpKind::lt; + break; + case BO_GT: + Kind = mlir::cir::CmpOpKind::gt; + break; + case BO_LE: + Kind = mlir::cir::CmpOpKind::le; + break; + case BO_GE: + Kind = mlir::cir::CmpOpKind::ge; + break; + case BO_EQ: + Kind = mlir::cir::CmpOpKind::eq; + break; + case BO_NE: + Kind = mlir::cir::CmpOpKind::ne; + break; + default: + llvm_unreachable("unsupported"); + } + + return CGM.builder.create( + CGM.getLoc(BOInfo.Loc), CGM.getCIRType(BOInfo.Ty), Kind, + BOInfo.LHS, BOInfo.RHS); + } + + // If this is a vector comparison, sign extend the result to the + // appropriate vector integer type and return it (don't convert to + // bool). + if (LHSTy->isVectorType()) + assert(0 && "not implemented"); + } else { // Complex Comparison: can only be an equality comparison. + assert(0 && "not implemented"); + } + + return buildScalarConversion(Result, CGM.astCtx.BoolTy, E->getType(), + E->getExprLoc()); + } + +#define VISITCOMP(CODE) \ + mlir::Value VisitBin##CODE(const BinaryOperator *E) { return buildCmp(E); } + VISITCOMP(LT) + VISITCOMP(GT) + VISITCOMP(LE) + VISITCOMP(GE) + VISITCOMP(EQ) + VISITCOMP(NE) +#undef VISITCOMP + + mlir::Value VisitExpr(Expr *E) { + // Crashing here for "ScalarExprClassName"? Please implement + // VisitScalarExprClassName(...) to get this working. + emitError(CGM.getLoc(E->getExprLoc()), "scalar exp no implemented: '") + << E->getStmtClassName() << "'"; + assert(0 && "shouldn't be here!"); + return {}; + } + + mlir::Value buildIntToBoolConversion(mlir::Value srcVal, + mlir::Location loc) { + // Because of the type rules of C, we often end up computing a + // logical value, then zero extending it to int, then wanting it + // as a logical value again. + // TODO: optimize this common case here or leave it for later + // CIR passes? + mlir::Type boolTy = CGM.getCIRType(CGM.astCtx.BoolTy); + return CGM.builder.create( + loc, boolTy, mlir::cir::CastKind::int_to_bool, srcVal); + } + + /// EmitConversionToBool - Convert the specified expression value to a + /// boolean (i1) truth value. This is equivalent to "Val != 0". + mlir::Value buildConversionToBool(mlir::Value Src, QualType SrcType, + mlir::Location loc) { + assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); + + if (SrcType->isRealFloatingType()) + assert(0 && "not implemented"); + + if (const MemberPointerType *MPT = dyn_cast(SrcType)) + assert(0 && "not implemented"); + + assert((SrcType->isIntegerType() || + Src.getType().isa<::mlir::cir::PointerType>()) && + "Unknown scalar type to convert"); + + assert(Src.getType().isa() && + "pointer source not implemented"); + return buildIntToBoolConversion(Src, loc); + } + + /// Emit a conversion from the specified type to the specified destination + /// type, both of which are CIR scalar types. + /// TODO: do we need ScalarConversionOpts here? Should be done in another + /// pass. + mlir::Value buildScalarConversion(mlir::Value Src, QualType SrcType, + QualType DstType, SourceLocation Loc) { + if (SrcType->isFixedPointType()) { + assert(0 && "not implemented"); + } else if (DstType->isFixedPointType()) { + assert(0 && "not implemented"); + } + + SrcType = CGM.astCtx.getCanonicalType(SrcType); + DstType = CGM.astCtx.getCanonicalType(DstType); + if (SrcType == DstType) + return Src; + + if (DstType->isVoidType()) + return nullptr; + mlir::Type SrcTy = Src.getType(); + + // Handle conversions to bool first, they are special: comparisons against + // 0. + if (DstType->isBooleanType()) + return buildConversionToBool(Src, SrcType, CGM.getLoc(Loc)); + + mlir::Type DstTy = CGM.getCIRType(DstType); + + // Cast from half through float if half isn't a native type. + if (SrcType->isHalfType() && !CGM.astCtx.getLangOpts().NativeHalfType) { + assert(0 && "not implemented"); + } + + // LLVM codegen ignore conversions like int -> uint, we should probably + // emit it here in case lowering to sanitizers dialect at some point. + if (SrcTy == DstTy) { + assert(0 && "not implemented"); + } + + // Handle pointer conversions next: pointers can only be converted to/from + // other pointers and integers. + if (DstTy.isa<::mlir::cir::PointerType>()) { + assert(0 && "not implemented"); + } + + if (SrcTy.isa<::mlir::cir::PointerType>()) { + // Must be an ptr to int cast. + assert(DstTy.isa() && "not ptr->int?"); + assert(0 && "not implemented"); + } + + // A scalar can be splatted to an extended vector of the same element type + if (DstType->isExtVectorType() && !SrcType->isVectorType()) { + // Sema should add casts to make sure that the source expression's type + // is the same as the vector's element type (sans qualifiers) + assert( + DstType->castAs()->getElementType().getTypePtr() == + SrcType.getTypePtr() && + "Splatted expr doesn't match with vector element type?"); + + assert(0 && "not implemented"); + } + + if (SrcType->isMatrixType() && DstType->isMatrixType()) + assert(0 && "not implemented"); + + // Finally, we have the arithmetic types: real int/float. + assert(0 && "not implemented"); + mlir::Value Res = nullptr; + mlir::Type ResTy = DstTy; + + // TODO: implement CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) + + // Cast to half through float if half isn't a native type. + if (DstType->isHalfType() && !CGM.astCtx.getLangOpts().NativeHalfType) { + assert(0 && "not implemented"); + } + + // TODO: Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); + if (DstTy != ResTy) { + assert(0 && "not implemented"); + } + + return Res; + } + + // Leaves. + mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { + mlir::Type Ty = CGM.getCIRType(E->getType()); + return CGM.builder.create( + CGM.getLoc(E->getExprLoc()), Ty, + CGM.builder.getIntegerAttr(Ty, E->getValue())); + } + }; + + struct AutoVarEmission { + const VarDecl *Variable; + /// The address of the alloca for languages with explicit address space + /// (e.g. OpenCL) or alloca casted to generic pointer for address space + /// agnostic languages (e.g. C++). Invalid if the variable was emitted + /// as a global constant. + RawAddress Addr; + + /// True if the variable is of aggregate type and has a constant + /// initializer. + bool IsConstantAggregate; + + struct Invalid {}; + AutoVarEmission(Invalid) : Variable(nullptr), Addr(RawAddress::invalid()) {} + + AutoVarEmission(const VarDecl &variable) + : Variable(&variable), Addr(RawAddress::invalid()), + IsConstantAggregate(false) {} + + static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } + /// Returns the raw, allocated address, which is not necessarily + /// the address of the object itself. It is casted to default + /// address space for address space agnostic languages. + RawAddress getAllocatedAddress() const { return Addr; } + }; + + /// Determine whether an object of this type can be emitted + /// as a constant. + /// + /// If ExcludeCtor is true, the duration when the object's constructor runs + /// will not be considered. The caller will need to verify that the object is + /// not written to during its construction. + /// FIXME: in LLVM codegen path this is part of CGM, which doesn't seem + /// like necessary, since (1) it doesn't use CGM at all and (2) is AST type + /// query specific. + bool isTypeConstant(QualType Ty, bool ExcludeCtor); + + /// Emit the alloca and debug information for a + /// local variable. Does not emit initialization or destruction. + AutoVarEmission buildAutoVarAlloca(const VarDecl &D); + + /// Determine whether the given initializer is trivial in the sense + /// that it requires no code to be generated. + bool isTrivialInitializer(const Expr *Init); + + // TODO: this can also be abstrated into common AST helpers + bool hasBooleanRepresentation(QualType Ty); + + mlir::Value buildToMemory(mlir::Value Value, QualType Ty); + + void buildStoreOfScalar(mlir::Value value, LValue lvalue, + const Decl *InitDecl); + + void buildStoreOfScalar(mlir::Value Value, RawAddress Addr, bool Volatile, + QualType Ty, LValueBaseInfo BaseInfo, + const Decl *InitDecl, bool isNontemporal); + + /// Store the specified rvalue into the specified + /// lvalue, where both are guaranteed to the have the same type, and that type + /// is 'Ty'. + void buldStoreThroughLValue(RValue Src, LValue Dst, const Decl *InitDecl); + + void buildScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue); + + /// Emit an expression as an initializer for an object (variable, field, etc.) + /// at the given location. The expression is not necessarily the normal + /// initializer for the object, and the address is not necessarily + /// its normal location. + /// + /// \param init the initializing expression + /// \param D the object to act as if we're initializing + /// \param lvalue the lvalue to initialize + void buildExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue); + + void buildAutoVarInit(const AutoVarEmission &emission); + + void buildAutoVarCleanups(const AutoVarEmission &emission); + + /// Emit code and set up symbol table for a variable declaration with auto, + /// register, or no storage class specifier. These turn into simple stack + /// objects, globals depending on target. + void buildAutoVarDecl(const VarDecl &D); + + /// This method handles emission of any variable declaration + /// inside a function, including static vars etc. + void buildVarDecl(const VarDecl &D); + + void buildDecl(const Decl &D); + + /// Emit the computation of the specified expression of scalar type, + /// ignoring the result. + mlir::Value buildScalarExpr(const Expr *E); + + /// Emit a conversion from the specified type to the specified destination + /// type, both of which are CIR scalar types. + mlir::Value buildScalarConversion(mlir::Value Src, QualType SrcTy, + QualType DstTy, SourceLocation Loc); + + mlir::LogicalResult buildReturnStmt(const ReturnStmt &S); + + mlir::LogicalResult buildDeclStmt(const DeclStmt &S); + + mlir::LogicalResult buildSimpleStmt(const Stmt *S, bool useCurrentScope); + + LValue buildDeclRefLValue(const DeclRefExpr *E); + + /// Emit code to compute the specified expression which + /// can have any type. The result is returned as an RValue struct. + /// TODO: if this is an aggregate expression, add a AggValueSlot to indicate + /// where the result should be returned. + RValue buildAnyExpr(const Expr *E); + + LValue buildBinaryOperatorLValue(const BinaryOperator *E); + + /// FIXME: this could likely be a common helper and not necessarily related + /// with codegen. + /// Return the best known alignment for an unknown pointer to a + /// particular class. + CharUnits getClassPointerAlignment(const CXXRecordDecl *RD); + + /// FIXME: this could likely be a common helper and not necessarily related + /// with codegen. + /// TODO: Add TBAAAccessInfo + CharUnits getNaturalPointeeTypeAlignment(QualType T, + LValueBaseInfo *BaseInfo); + + /// FIXME: this could likely be a common helper and not necessarily related + /// with codegen. + /// TODO: Add TBAAAccessInfo + CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo, + bool forPointeeType); + + /// Given an expression of pointer type, try to + /// derive a more accurate bound on the alignment of the pointer. + RawAddress buildPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo); + + LValue buildUnaryOpLValue(const UnaryOperator *E); + + /// Emit code to compute a designator that specifies the location + /// of the expression. + /// FIXME: document this function better. + LValue buildLValue(const Expr *E); + + /// EmitIgnoredExpr - Emit code to compute the specified expression, + /// ignoring the result. + void buildIgnoredExpr(const Expr *E); + + /// If the specified expression does not fold + /// to a constant, or if it does but contains a label, return false. If it + /// constant folds return true and set the boolean result in Result. + bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &ResultBool, + bool AllowLabels); + + /// Return true if the statement contains a label in it. If + /// this statement is not executed normally, it not containing a label means + /// that we can just remove the code. + bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false); + + /// If the specified expression does not fold + /// to a constant, or if it does but contains a label, return false. If it + /// constant folds return true and set the folded value. + bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt, + bool AllowLabels); + + /// Perform the usual unary conversions on the specified + /// expression and compare the result against zero, returning an Int1Ty value. + mlir::Value evaluateExprAsBool(const Expr *E); + + /// Emit an if on a boolean condition to the specified blocks. + /// FIXME: Based on the condition, this might try to simplify the codegen of + /// the conditional based on the branch. TrueCount should be the number of + /// times we expect the condition to evaluate to true based on PGO data. We + /// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr + /// for extra ideas). + mlir::LogicalResult buildIfOnBoolExpr(const Expr *cond, mlir::Location loc, + const Stmt *thenS, const Stmt *elseS); + + mlir::LogicalResult buildIfStmt(const IfStmt &S); + + // Build CIR for a statement. useCurrentScope should be true if no + // new scopes need be created when finding a compound statement. + mlir::LogicalResult buildStmt(const Stmt *S, bool useCurrentScope); + + mlir::LogicalResult buildFunctionBody(const Stmt *Body); + + mlir::LogicalResult buildCompoundStmt(const CompoundStmt &S); + + mlir::LogicalResult buildCompoundStmtWithoutScope(const CompoundStmt &S); + + void buildTopLevelDecl(Decl *decl); + + // Emit a new function and add it to the MLIR module. + mlir::FuncOp buildFunction(const FunctionDecl *FD); + + mlir::Type getCIRType(const QualType &type); + + void verifyModule(); +}; +} // namespace cir + +#endif // LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 2a2d40a81924..62f8e787d642 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -11,6 +11,7 @@ include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR + CIRGenFunction.cpp CIRGenModule.cpp CIRGenTypes.cpp CIRRecordLayoutBuilder.cpp From 42f79c2544c35be1bebe9b94fb2df5c15063f822 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 27 Jan 2022 21:31:45 -0500 Subject: [PATCH 0104/2301] [CIR][NFC] Refactor CIRGenerator into it's own file --- clang/lib/CIR/CIRGenModule.cpp | 35 -------------------- clang/lib/CIR/CIRGenerator.cpp | 59 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CMakeLists.txt | 1 + 3 files changed, 60 insertions(+), 35 deletions(-) create mode 100644 clang/lib/CIR/CIRGenerator.cpp diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 8ccbbf452c43..ca8c8d8369a1 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1438,38 +1438,3 @@ void CIRGenModule::verifyModule() { if (failed(mlir::verify(theModule))) theModule.emitError("module verification error"); } - -CIRGenerator::CIRGenerator() = default; -CIRGenerator::~CIRGenerator() = default; - -void CIRGenerator::Initialize(clang::ASTContext &astCtx) { - using namespace llvm; - - this->astCtx = &astCtx; - - mlirCtx = std::make_unique(); - mlirCtx->getOrLoadDialect(); - mlirCtx->getOrLoadDialect(); - mlirCtx->getOrLoadDialect(); - CGM = std::make_unique(*mlirCtx.get(), astCtx); -} - -void CIRGenerator::verifyModule() { CGM->verifyModule(); } - -bool CIRGenerator::EmitFunction(const FunctionDecl *FD) { - auto func = CGM->buildFunction(FD); - assert(func && "should emit function"); - return func.getOperation() != nullptr; -} - -mlir::ModuleOp CIRGenerator::getModule() { return CGM->getModule(); } - -bool CIRGenerator::HandleTopLevelDecl(clang::DeclGroupRef D) { - for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { - CGM->buildTopLevelDecl(*I); - } - - return true; -} - -void CIRGenerator::HandleTranslationUnit(ASTContext &C) {} diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp new file mode 100644 index 000000000000..dc4948b6bace --- /dev/null +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -0,0 +1,59 @@ +//===--- CIRGenerator.cpp - Emit CIR from ASTs ----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This builds an AST and converts it to CIR. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenModule.h" + +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/IR/MLIRContext.h" + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/CIR/CIRGenerator.h" + +using namespace cir; + +CIRGenerator::CIRGenerator() = default; +CIRGenerator::~CIRGenerator() = default; + +void CIRGenerator::Initialize(clang::ASTContext &astCtx) { + using namespace llvm; + + this->astCtx = &astCtx; + + mlirCtx = std::make_unique(); + mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); + CGM = std::make_unique(*mlirCtx.get(), astCtx); +} + +void CIRGenerator::verifyModule() { CGM->verifyModule(); } + +bool CIRGenerator::EmitFunction(const FunctionDecl *FD) { + auto func = CGM->buildFunction(FD); + assert(func && "should emit function"); + return func.getOperation() != nullptr; +} + +mlir::ModuleOp CIRGenerator::getModule() { return CGM->getModule(); } + +bool CIRGenerator::HandleTopLevelDecl(clang::DeclGroupRef D) { + for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { + CGM->buildTopLevelDecl(*I); + } + + return true; +} + +void CIRGenerator::HandleTranslationUnit(ASTContext &C) {} diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 62f8e787d642..9cb05a59caa1 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -11,6 +11,7 @@ include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR + CIRGenerator.cpp CIRGenFunction.cpp CIRGenModule.cpp CIRGenTypes.cpp From b83edfd75d461c1a528498adfbc97c2a1d162215 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 28 Jan 2022 20:04:15 -0500 Subject: [PATCH 0105/2301] [CIR][NFC] Move RawAddress to it's own file and rename it to match clang In a future patch I'm working on this gets a lot more Address-specific code. So just move it to it's own header instead of putting it in CIRGenValue.h --- clang/lib/CIR/Address.h | 53 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenModule.cpp | 20 ++++++------- clang/lib/CIR/CIRGenModule.h | 12 ++++---- clang/lib/CIR/CIRGenValue.h | 43 +++++---------------------- 4 files changed, 76 insertions(+), 52 deletions(-) create mode 100644 clang/lib/CIR/Address.h diff --git a/clang/lib/CIR/Address.h b/clang/lib/CIR/Address.h new file mode 100644 index 000000000000..3fe52d41f211 --- /dev/null +++ b/clang/lib/CIR/Address.h @@ -0,0 +1,53 @@ +//===-- Address.h - An aligned address -------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class provides a simple wrapper for a pair of a pointer and an +// alignment. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_ADDRESS_H +#define LLVM_CLANG_LIB_CIR_ADDRESS_H + +#include "clang/AST/CharUnits.h" + +#include "llvm/IR/Constants.h" + +#include "mlir/IR/Value.h" + +namespace cir { + +class Address { + mlir::Value Pointer; + clang::CharUnits Alignment; + +public: + Address(mlir::Value pointer, clang::CharUnits alignment) + : Pointer(pointer), Alignment(alignment) { + assert((!alignment.isZero() || pointer == nullptr) && + "creating valid address with invalid alignment"); + } + + static Address invalid() { return Address(nullptr, clang::CharUnits()); } + bool isValid() const { return Pointer != nullptr; } + + mlir::Value getPointer() const { + // assert(isValid()); + return Pointer; + } + + /// Return the alignment of this pointer. + clang::CharUnits getAlignment() const { + // assert(isValid()); + return Alignment; + } +}; + +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_ADDRESS_H diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index ca8c8d8369a1..6e401d28f9d4 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -206,7 +206,7 @@ CIRGenModule::buildAutoVarAlloca(const VarDecl &D) { // TODO: what about emitting lifetime markers for MSVC catch parameters? // TODO: something like @llvm.lifetime.start/end here? revisit this later. - emission.Addr = RawAddress{addr, alignment}; + emission.Addr = Address{addr, alignment}; return emission; } @@ -252,7 +252,7 @@ void CIRGenModule::buildStoreOfScalar(mlir::Value value, LValue lvalue, lvalue.getBaseInfo(), InitDecl, false); } -void CIRGenModule::buildStoreOfScalar(mlir::Value Value, RawAddress Addr, +void CIRGenModule::buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, QualType Ty, LValueBaseInfo BaseInfo, const Decl *InitDecl, @@ -355,7 +355,7 @@ void CIRGenModule::buildAutoVarInit(const AutoVarEmission &emission) { return; } - const RawAddress Loc = emission.Addr; + const Address Loc = emission.Addr; // Note: constexpr already initializes everything correctly. LangOptions::TrivialAutoVarInitKind trivialAutoVarInit = @@ -365,7 +365,7 @@ void CIRGenModule::buildAutoVarInit(const AutoVarEmission &emission) { ? LangOptions::TrivialAutoVarInitKind::Uninitialized : astCtx.getLangOpts().getTrivialAutoVarInit())); - auto initializeWhatIsTechnicallyUninitialized = [&](RawAddress Loc) { + auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) { if (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Uninitialized) return; @@ -689,7 +689,7 @@ LValue CIRGenModule::buildDeclRefLValue(const DeclRefExpr *E) { mlir::Value V = symbolTable.lookup(VD); assert(V && "Name lookup must succeed"); - LValue LV = LValue::makeAddr(RawAddress(V, CharUnits::fromQuantity(4)), + LValue LV = LValue::makeAddr(Address(V, CharUnits::fromQuantity(4)), VD->getType(), AlignmentSource::Decl); return LV; } @@ -845,8 +845,8 @@ CharUnits CIRGenModule::getNaturalTypeAlignment(QualType T, /// Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. -RawAddress CIRGenModule::buildPointerWithAlignment(const Expr *E, - LValueBaseInfo *BaseInfo) { +Address CIRGenModule::buildPointerWithAlignment(const Expr *E, + LValueBaseInfo *BaseInfo) { // We allow this with ObjC object pointers because of fragile ABIs. assert(E->getType()->isPointerType() || E->getType()->isObjCObjectPointerType()); @@ -881,7 +881,7 @@ RawAddress CIRGenModule::buildPointerWithAlignment(const Expr *E, // TODO: conditional operators, comma. // Otherwise, use the alignment of the type. CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), BaseInfo); - return RawAddress(buildScalarExpr(E), Align); + return Address(buildScalarExpr(E), Align); } LValue CIRGenModule::buildUnaryOpLValue(const UnaryOperator *E) { @@ -897,7 +897,7 @@ LValue CIRGenModule::buildUnaryOpLValue(const UnaryOperator *E) { LValueBaseInfo BaseInfo; // TODO: add TBAAInfo - RawAddress Addr = buildPointerWithAlignment(E->getSubExpr(), &BaseInfo); + Address Addr = buildPointerWithAlignment(E->getSubExpr(), &BaseInfo); LValue LV = LValue::makeAddr(Addr, T, BaseInfo); // TODO: set addr space // TODO: ObjC/GC/__weak write barrier stuff. @@ -935,7 +935,7 @@ LValue CIRGenModule::buildLValue(const Expr *E) { llvm_unreachable("cannot emit a property reference directly"); } - return LValue::makeAddr(RawAddress::invalid(), E->getType()); + return LValue::makeAddr(Address::invalid(), E->getType()); } /// EmitIgnoredExpr - Emit code to compute the specified expression, diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index b7dd7ae445ab..ac90546f4e4e 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -530,24 +530,24 @@ class CIRGenModule { /// (e.g. OpenCL) or alloca casted to generic pointer for address space /// agnostic languages (e.g. C++). Invalid if the variable was emitted /// as a global constant. - RawAddress Addr; + Address Addr; /// True if the variable is of aggregate type and has a constant /// initializer. bool IsConstantAggregate; struct Invalid {}; - AutoVarEmission(Invalid) : Variable(nullptr), Addr(RawAddress::invalid()) {} + AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {} AutoVarEmission(const VarDecl &variable) - : Variable(&variable), Addr(RawAddress::invalid()), + : Variable(&variable), Addr(Address::invalid()), IsConstantAggregate(false) {} static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } /// Returns the raw, allocated address, which is not necessarily /// the address of the object itself. It is casted to default /// address space for address space agnostic languages. - RawAddress getAllocatedAddress() const { return Addr; } + Address getAllocatedAddress() const { return Addr; } }; /// Determine whether an object of this type can be emitted @@ -577,7 +577,7 @@ class CIRGenModule { void buildStoreOfScalar(mlir::Value value, LValue lvalue, const Decl *InitDecl); - void buildStoreOfScalar(mlir::Value Value, RawAddress Addr, bool Volatile, + void buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, QualType Ty, LValueBaseInfo BaseInfo, const Decl *InitDecl, bool isNontemporal); @@ -658,7 +658,7 @@ class CIRGenModule { /// Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. - RawAddress buildPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo); + Address buildPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo); LValue buildUnaryOpLValue(const UnaryOperator *E); diff --git a/clang/lib/CIR/CIRGenValue.h b/clang/lib/CIR/CIRGenValue.h index 172456d90e03..61341f3a973b 100644 --- a/clang/lib/CIR/CIRGenValue.h +++ b/clang/lib/CIR/CIRGenValue.h @@ -14,6 +14,7 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENVALUE_H #define LLVM_CLANG_LIB_CIR_CIRGENVALUE_H +#include "Address.h" #include "CIRGenFunction.h" #include "mlir/IR/Value.h" @@ -23,34 +24,6 @@ namespace cir { -class RawAddress { - mlir::Value Pointer; - clang::CharUnits Alignment; - -public: - RawAddress(mlir::Value pointer, clang::CharUnits alignment) - : Pointer(pointer), Alignment(alignment) { - assert((!alignment.isZero() || pointer == nullptr) && - "creating valid address with invalid alignment"); - } - - static RawAddress invalid() { - return RawAddress(nullptr, clang::CharUnits()); - } - bool isValid() const { return Pointer != nullptr; } - - mlir::Value getPointer() const { - // assert(isValid()); - return Pointer; - } - - /// Return the alignment of this pointer. - clang::CharUnits getAlignment() const { - // assert(isValid()); - return Alignment; - } -}; - /// This trivial value class is used to represent the result of an /// expression that is evaluated. It can be one of three things: either a /// simple MLIR SSA value, a pair of SSA values for complex numbers, or the @@ -89,9 +62,9 @@ class RValue { /// getAggregateAddr() - Return the Value* of the address of the /// aggregate. - RawAddress getAggregateAddress() const { + Address getAggregateAddress() const { assert(0 && "not implemented"); - return RawAddress::invalid(); + return Address::invalid(); } static RValue getIgnored() { @@ -117,7 +90,7 @@ class RValue { // FIXME: Aggregate rvalues need to retain information about whether they // are volatile or not. Remove default to find all places that probably // get this wrong. - static RValue getAggregate(RawAddress addr, bool isVolatile = false) { + static RValue getAggregate(Address addr, bool isVolatile = false) { assert(0 && "not implemented"); return RValue{}; } @@ -214,14 +187,12 @@ class LValue { return clang::CharUnits::fromQuantity(Alignment); } - RawAddress getAddress() const { - return RawAddress(getPointer(), getAlignment()); - } + Address getAddress() const { return Address(getPointer(), getAlignment()); } LValueBaseInfo getBaseInfo() const { return BaseInfo; } void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } - static LValue makeAddr(RawAddress address, clang::QualType T, + static LValue makeAddr(Address address, clang::QualType T, AlignmentSource Source = AlignmentSource::Type) { LValue R; R.V = address.getPointer(); @@ -231,7 +202,7 @@ class LValue { } // FIXME: only have one of these static methods. - static LValue makeAddr(RawAddress address, clang::QualType T, + static LValue makeAddr(Address address, clang::QualType T, LValueBaseInfo LBI) { LValue R; R.V = address.getPointer(); From 7ab36c08bd213233f4a846ed9582c4217a30f509 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 28 Jan 2022 21:55:10 -0500 Subject: [PATCH 0106/2301] [CIR][NFC] Refactor ScalarExprEmitter into it's own file This is more due to a `using namespace clang;` collision. `CIRGenFunction` has a residual `using namespace` and this header was importing it incidentally and thus allowed to use non-namespaced decls from clang. It was either move it to it's own file (where we can define it in an anonymous namespace) or preserve no usings in headers. --- clang/lib/CIR/CIRGenExprScalar.cpp | 456 +++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenModule.cpp | 21 -- clang/lib/CIR/CIRGenModule.h | 421 +------------------------- clang/lib/CIR/CMakeLists.txt | 1 + 4 files changed, 462 insertions(+), 437 deletions(-) create mode 100644 clang/lib/CIR/CIRGenExprScalar.cpp diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp new file mode 100644 index 000000000000..9cb3a0d101f3 --- /dev/null +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -0,0 +1,456 @@ +#include "CIRGenFunction.h" +#include "CIRGenModule.h" + +#include "clang/AST/StmtVisitor.h" + +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "mlir/IR/Value.h" + +using namespace cir; + +namespace { + +class ScalarExprEmitter + : public clang::StmtVisitor { + LLVM_ATTRIBUTE_UNUSED CIRGenFunction &CGF; + CIRGenModule &CGM; + mlir::OpBuilder &Builder; + +public: + ScalarExprEmitter(CIRGenFunction &cgf, CIRGenModule &cgm, + mlir::OpBuilder &builder) + : CGF(cgf), CGM(cgm), Builder(builder) {} + + mlir::Value Visit(clang::Expr *E) { + return StmtVisitor::Visit(E); + } + + /// Emits the address of the l-value, then loads and returns the result. + mlir::Value buildLoadOfLValue(const clang::Expr *E) { + LValue LV = CGM.buildLValue(E); + auto load = Builder.create( + CGM.getLoc(E->getExprLoc()), CGM.getCIRType(E->getType()), + LV.getPointer(), mlir::UnitAttr::get(Builder.getContext())); + // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); + return load; + } + + // Handle l-values. + mlir::Value VisitDeclRefExpr(clang::DeclRefExpr *E) { + // FIXME: we could try to emit this as constant first, see + // CGF.tryEmitAsConstant(E) + return buildLoadOfLValue(E); + } + + // Emit code for an explicit or implicit cast. Implicit + // casts have to handle a more broad range of conversions than explicit + // casts, as they handle things like function to ptr-to-function decay + // etc. + mlir::Value VisitCastExpr(clang::CastExpr *CE) { + clang::Expr *E = CE->getSubExpr(); + clang::QualType DestTy = CE->getType(); + clang::CastKind Kind = CE->getCastKind(); + switch (Kind) { + case clang::CK_LValueToRValue: + assert(CGM.getASTContext().hasSameUnqualifiedType(E->getType(), DestTy)); + assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); + return Visit(const_cast(E)); + case clang::CK_NullToPointer: { + // FIXME: use MustVisitNullValue(E) and evaluate expr. + // Note that DestTy is used as the MLIR type instead of a custom + // nullptr type. + mlir::Type Ty = CGM.getCIRType(DestTy); + return Builder.create( + CGM.getLoc(E->getExprLoc()), Ty, + mlir::cir::NullAttr::get(Builder.getContext(), Ty)); + } + case clang::CK_IntegralToBoolean: { + return buildIntToBoolConversion(Visit(E), + CGM.getLoc(CE->getSourceRange())); + } + default: + emitError(CGM.getLoc(CE->getExprLoc()), "cast kind not implemented: '") + << CE->getCastKindName() << "'"; + assert(0 && "not implemented"); + return nullptr; + } + } + + mlir::Value VisitUnaryAddrOf(const clang::UnaryOperator *E) { + assert(!llvm::isa(E->getType()) && + "not implemented"); + return CGM.buildLValue(E->getSubExpr()).getPointer(); + } + + mlir::Value VisitCXXBoolLiteralExpr(const clang::CXXBoolLiteralExpr *E) { + mlir::Type Ty = CGM.getCIRType(E->getType()); + return Builder.create( + CGM.getLoc(E->getExprLoc()), Ty, Builder.getBoolAttr(E->getValue())); + } + + struct BinOpInfo { + mlir::Value LHS; + mlir::Value RHS; + clang::SourceRange Loc; + clang::QualType Ty; // Computation Type. + clang::BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform + clang::FPOptions FPFeatures; + const clang::Expr + *E; // Entire expr, for error unsupported. May not be binop. + + /// Check if the binop computes a division or a remainder. + bool isDivremOp() const { + return Opcode == clang::BO_Div || Opcode == clang::BO_Rem || + Opcode == clang::BO_DivAssign || Opcode == clang::BO_RemAssign; + } + + /// Check if at least one operand is a fixed point type. In such cases, + /// this operation did not follow usual arithmetic conversion and both + /// operands might not be of the same type. + bool isFixedPointOp() const { + // We cannot simply check the result type since comparison operations + // return an int. + if (const auto *BinOp = llvm::dyn_cast(E)) { + clang::QualType LHSType = BinOp->getLHS()->getType(); + clang::QualType RHSType = BinOp->getRHS()->getType(); + return LHSType->isFixedPointType() || RHSType->isFixedPointType(); + } + if (const auto *UnOp = llvm::dyn_cast(E)) + return UnOp->getSubExpr()->getType()->isFixedPointType(); + return false; + } + }; + + BinOpInfo buildBinOps(const clang::BinaryOperator *E) { + BinOpInfo Result; + Result.LHS = Visit(E->getLHS()); + Result.RHS = Visit(E->getRHS()); + Result.Ty = E->getType(); + Result.Opcode = E->getOpcode(); + Result.Loc = E->getSourceRange(); + // TODO: Result.FPFeatures + Result.E = E; + return Result; + } + + mlir::Value buildMul(const BinOpInfo &Ops) { + return Builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Mul, + Ops.LHS, Ops.RHS); + } + mlir::Value buildDiv(const BinOpInfo &Ops) { + return Builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Div, + Ops.LHS, Ops.RHS); + } + mlir::Value buildRem(const BinOpInfo &Ops) { + return Builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Rem, + Ops.LHS, Ops.RHS); + } + mlir::Value buildAdd(const BinOpInfo &Ops) { + return Builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Add, + Ops.LHS, Ops.RHS); + } + mlir::Value buildSub(const BinOpInfo &Ops) { + return Builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Sub, + Ops.LHS, Ops.RHS); + } + mlir::Value buildShl(const BinOpInfo &Ops) { + return Builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shl, + Ops.LHS, Ops.RHS); + } + mlir::Value buildShr(const BinOpInfo &Ops) { + return Builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shr, + Ops.LHS, Ops.RHS); + } + mlir::Value buildAnd(const BinOpInfo &Ops) { + return Builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::And, + Ops.LHS, Ops.RHS); + } + mlir::Value buildXor(const BinOpInfo &Ops) { + return Builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Xor, + Ops.LHS, Ops.RHS); + } + mlir::Value buildOr(const BinOpInfo &Ops) { + return Builder.create( + CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Or, + Ops.LHS, Ops.RHS); + } + + // Binary operators and binary compound assignment operators. +#define HANDLEBINOP(OP) \ + mlir::Value VisitBin##OP(const clang::BinaryOperator *E) { \ + return build##OP(buildBinOps(E)); \ + } + HANDLEBINOP(Mul) + HANDLEBINOP(Div) + HANDLEBINOP(Rem) + HANDLEBINOP(Add) + HANDLEBINOP(Sub) + HANDLEBINOP(Shl) + HANDLEBINOP(Shr) + HANDLEBINOP(And) + HANDLEBINOP(Xor) + HANDLEBINOP(Or) +#undef HANDLEBINOP + + mlir::Value buildCmp(const clang::BinaryOperator *E) { + mlir::Value Result; + clang::QualType LHSTy = E->getLHS()->getType(); + clang::QualType RHSTy = E->getRHS()->getType(); + + if (const clang::MemberPointerType *MPT = + LHSTy->getAs()) { + assert(0 && "not implemented"); + } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { + BinOpInfo BOInfo = buildBinOps(E); + mlir::Value LHS = BOInfo.LHS; + mlir::Value RHS = BOInfo.RHS; + + if (LHSTy->isVectorType()) { + // Cannot handle any vector just yet. + assert(0 && "not implemented"); + // If AltiVec, the comparison results in a numeric type, so we use + // intrinsics comparing vectors and giving 0 or 1 as a result + if (!E->getType()->isVectorType()) + assert(0 && "not implemented"); + } + if (BOInfo.isFixedPointOp()) { + assert(0 && "not implemented"); + } else { + // TODO: when we add proper basic types to CIR we + // probably won't need to handle + // LHSTy->hasSignedIntegerRepresentation() + + // Unsigned integers and pointers. + if (LHS.getType().isa() || + RHS.getType().isa()) { + // TODO: Handle StrictVTablePointers and + // mayBeDynamicClass/invariant group. + assert(0 && "not implemented"); + } + + mlir::cir::CmpOpKind Kind; + switch (E->getOpcode()) { + case clang::BO_LT: + Kind = mlir::cir::CmpOpKind::lt; + break; + case clang::BO_GT: + Kind = mlir::cir::CmpOpKind::gt; + break; + case clang::BO_LE: + Kind = mlir::cir::CmpOpKind::le; + break; + case clang::BO_GE: + Kind = mlir::cir::CmpOpKind::ge; + break; + case clang::BO_EQ: + Kind = mlir::cir::CmpOpKind::eq; + break; + case clang::BO_NE: + Kind = mlir::cir::CmpOpKind::ne; + break; + default: + llvm_unreachable("unsupported"); + } + + return Builder.create(CGM.getLoc(BOInfo.Loc), + CGM.getCIRType(BOInfo.Ty), Kind, + BOInfo.LHS, BOInfo.RHS); + } + + // If this is a vector comparison, sign extend the result to the + // appropriate vector integer type and return it (don't convert to + // bool). + if (LHSTy->isVectorType()) + assert(0 && "not implemented"); + } else { // Complex Comparison: can only be an equality comparison. + assert(0 && "not implemented"); + } + + return buildScalarConversion(Result, CGM.getASTContext().BoolTy, + E->getType(), E->getExprLoc()); + } + +#define VISITCOMP(CODE) \ + mlir::Value VisitBin##CODE(const clang::BinaryOperator *E) { \ + return buildCmp(E); \ + } + VISITCOMP(LT) + VISITCOMP(GT) + VISITCOMP(LE) + VISITCOMP(GE) + VISITCOMP(EQ) + VISITCOMP(NE) +#undef VISITCOMP + + mlir::Value VisitExpr(clang::Expr *E) { + // Crashing here for "ScalarExprClassName"? Please implement + // VisitScalarExprClassName(...) to get this working. + emitError(CGM.getLoc(E->getExprLoc()), "scalar exp no implemented: '") + << E->getStmtClassName() << "'"; + assert(0 && "shouldn't be here!"); + return {}; + } + + mlir::Value buildIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) { + // Because of the type rules of C, we often end up computing a + // logical value, then zero extending it to int, then wanting it + // as a logical value again. + // TODO: optimize this common case here or leave it for later + // CIR passes? + mlir::Type boolTy = CGM.getCIRType(CGM.getASTContext().BoolTy); + return Builder.create( + loc, boolTy, mlir::cir::CastKind::int_to_bool, srcVal); + } + + /// EmitConversionToBool - Convert the specified expression value to a + /// boolean (i1) truth value. This is equivalent to "Val != 0". + mlir::Value buildConversionToBool(mlir::Value Src, clang::QualType SrcType, + mlir::Location loc) { + assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); + + if (SrcType->isRealFloatingType()) + assert(0 && "not implemented"); + + if (auto *MPT = llvm::dyn_cast(SrcType)) + assert(0 && "not implemented"); + + assert((SrcType->isIntegerType() || + Src.getType().isa<::mlir::cir::PointerType>()) && + "Unknown scalar type to convert"); + + assert(Src.getType().isa() && + "pointer source not implemented"); + return buildIntToBoolConversion(Src, loc); + } + + /// Emit a conversion from the specified type to the specified destination + /// type, both of which are CIR scalar types. + /// TODO: do we need ScalarConversionOpts here? Should be done in another + /// pass. + mlir::Value buildScalarConversion(mlir::Value Src, clang::QualType SrcType, + clang::QualType DstType, + clang::SourceLocation Loc) { + if (SrcType->isFixedPointType()) { + assert(0 && "not implemented"); + } else if (DstType->isFixedPointType()) { + assert(0 && "not implemented"); + } + + SrcType = CGM.getASTContext().getCanonicalType(SrcType); + DstType = CGM.getASTContext().getCanonicalType(DstType); + if (SrcType == DstType) + return Src; + + if (DstType->isVoidType()) + return nullptr; + mlir::Type SrcTy = Src.getType(); + + // Handle conversions to bool first, they are special: comparisons against + // 0. + if (DstType->isBooleanType()) + return buildConversionToBool(Src, SrcType, CGM.getLoc(Loc)); + + mlir::Type DstTy = CGM.getCIRType(DstType); + + // Cast from half through float if half isn't a native type. + if (SrcType->isHalfType() && + !CGM.getASTContext().getLangOpts().NativeHalfType) { + assert(0 && "not implemented"); + } + + // LLVM codegen ignore conversions like int -> uint, we should probably + // emit it here in case lowering to sanitizers dialect at some point. + if (SrcTy == DstTy) { + assert(0 && "not implemented"); + } + + // Handle pointer conversions next: pointers can only be converted to/from + // other pointers and integers. + if (DstTy.isa<::mlir::cir::PointerType>()) { + assert(0 && "not implemented"); + } + + if (SrcTy.isa<::mlir::cir::PointerType>()) { + // Must be a ptr to int cast. + assert(DstTy.isa() && "not ptr->int?"); + assert(0 && "not implemented"); + } + + // A scalar can be splatted to an extended vector of the same element type + if (DstType->isExtVectorType() && !SrcType->isVectorType()) { + // Sema should add casts to make sure that the source expression's type + // is the same as the vector's element type (sans qualifiers) + assert(DstType->castAs() + ->getElementType() + .getTypePtr() == SrcType.getTypePtr() && + "Splatted expr doesn't match with vector element type?"); + + assert(0 && "not implemented"); + } + + if (SrcType->isMatrixType() && DstType->isMatrixType()) + assert(0 && "not implemented"); + + // Finally, we have the arithmetic types: real int/float. + assert(0 && "not implemented"); + mlir::Value Res = nullptr; + mlir::Type ResTy = DstTy; + + // TODO: implement CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) + + // Cast to half through float if half isn't a native type. + if (DstType->isHalfType() && + !CGM.getASTContext().getLangOpts().NativeHalfType) { + assert(0 && "not implemented"); + } + + // TODO: Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); + if (DstTy != ResTy) { + assert(0 && "not implemented"); + } + + return Res; + } + + // Leaves. + mlir::Value VisitIntegerLiteral(const clang::IntegerLiteral *E) { + mlir::Type Ty = CGM.getCIRType(E->getType()); + return Builder.create( + CGM.getLoc(E->getExprLoc()), Ty, + Builder.getIntegerAttr(Ty, E->getValue())); + } +}; + +} // namespace + +/// Emit the computation of the specified expression of scalar type, +/// ignoring the result. +mlir::Value CIRGenModule::buildScalarExpr(const Expr *E) { + assert(E && CIRGenFunction::hasScalarEvaluationKind(E->getType()) && + "Invalid scalar expression to emit"); + + return ScalarExprEmitter(*CurCGF, *this, builder) + .Visit(const_cast(E)); +} + +/// Emit a conversion from the specified type to the specified destination +/// type, both of which are CIR scalar types. +mlir::Value CIRGenModule::buildScalarConversion(mlir::Value Src, QualType SrcTy, + QualType DstTy, + SourceLocation Loc) { + assert(CIRGenFunction::hasScalarEvaluationKind(SrcTy) && + CIRGenFunction::hasScalarEvaluationKind(DstTy) && + "Invalid scalar expression to emit"); + return ScalarExprEmitter(*CurCGF, *this, builder) + .buildScalarConversion(Src, SrcTy, DstTy, Loc); +} diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 6e401d28f9d4..2a67196af4b3 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -569,27 +569,6 @@ void CIRGenModule::buildDecl(const Decl &D) { } } -/// Emit the computation of the specified expression of scalar type, -/// ignoring the result. -mlir::Value CIRGenModule::buildScalarExpr(const Expr *E) { - assert(E && CIRGenFunction::hasScalarEvaluationKind(E->getType()) && - "Invalid scalar expression to emit"); - - return ScalarExprEmitter(*CurCGF, *this).Visit(const_cast(E)); -} - -/// Emit a conversion from the specified type to the specified destination -/// type, both of which are CIR scalar types. -mlir::Value CIRGenModule::buildScalarConversion(mlir::Value Src, QualType SrcTy, - QualType DstTy, - SourceLocation Loc) { - assert(CIRGenFunction::hasScalarEvaluationKind(SrcTy) && - CIRGenFunction::hasScalarEvaluationKind(DstTy) && - "Invalid scalar expression to emit"); - return ScalarExprEmitter(*CurCGF, *this) - .buildScalarConversion(Src, SrcTy, DstTy, Loc); -} - mlir::LogicalResult CIRGenModule::buildReturnStmt(const ReturnStmt &S) { assert(!(astCtx.getLangOpts().ElideConstructors && S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) && diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index ac90546f4e4e..254c5b17c7fd 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -95,13 +95,6 @@ class CIRGenModule { ~SourceLocRAIIObject() { restore(); } }; - /// Helpers to convert Clang's SourceLocation to a MLIR Location. - mlir::Location getLoc(SourceLocation SLoc); - - mlir::Location getLoc(SourceRange SLoc); - - mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); - /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. mlir::LogicalResult declare(const Decl *var, QualType T, mlir::Location loc, @@ -111,418 +104,14 @@ class CIRGenModule { public: mlir::ModuleOp getModule() { return theModule; } mlir::OpBuilder &getBuilder() { return builder; } + clang::ASTContext &getASTContext() { return astCtx; } - class ScalarExprEmitter - : public clang::StmtVisitor { - LLVM_ATTRIBUTE_UNUSED CIRGenFunction &CGF; - CIRGenModule &CGM; - - public: - ScalarExprEmitter(CIRGenFunction &cgf, CIRGenModule &cgm) - : CGF(cgf), CGM(cgm) {} - - mlir::Value Visit(Expr *E) { - return StmtVisitor::Visit(E); - } - - /// Emits the address of the l-value, then loads and returns the result. - mlir::Value buildLoadOfLValue(const Expr *E) { - LValue LV = CGM.buildLValue(E); - auto load = CGM.builder.create( - CGM.getLoc(E->getExprLoc()), CGM.getCIRType(E->getType()), - LV.getPointer(), mlir::UnitAttr::get(CGM.builder.getContext())); - // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); - return load; - } - - // Handle l-values. - mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { - // FIXME: we could try to emit this as constant first, see - // CGF.tryEmitAsConstant(E) - return buildLoadOfLValue(E); - } - - // Emit code for an explicit or implicit cast. Implicit - // casts have to handle a more broad range of conversions than explicit - // casts, as they handle things like function to ptr-to-function decay - // etc. - mlir::Value VisitCastExpr(CastExpr *CE) { - Expr *E = CE->getSubExpr(); - QualType DestTy = CE->getType(); - clang::CastKind Kind = CE->getCastKind(); - switch (Kind) { - case CK_LValueToRValue: - assert(CGM.astCtx.hasSameUnqualifiedType(E->getType(), DestTy)); - assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); - return Visit(const_cast(E)); - case CK_NullToPointer: { - // FIXME: use MustVisitNullValue(E) and evaluate expr. - // Note that DestTy is used as the MLIR type instead of a custom - // nullptr type. - mlir::Type Ty = CGM.getCIRType(DestTy); - return CGM.builder.create( - CGM.getLoc(E->getExprLoc()), Ty, - mlir::cir::NullAttr::get(CGM.builder.getContext(), Ty)); - } - case CK_IntegralToBoolean: { - return buildIntToBoolConversion(Visit(E), - CGM.getLoc(CE->getSourceRange())); - } - default: - emitError(CGM.getLoc(CE->getExprLoc()), "cast kind not implemented: '") - << CE->getCastKindName() << "'"; - assert(0 && "not implemented"); - return nullptr; - } - } - - mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { - assert(!isa(E->getType()) && "not implemented"); - return CGM.buildLValue(E->getSubExpr()).getPointer(); - } - - mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { - mlir::Type Ty = CGM.getCIRType(E->getType()); - return CGM.builder.create( - CGM.getLoc(E->getExprLoc()), Ty, - CGM.builder.getBoolAttr(E->getValue())); - } - - struct BinOpInfo { - mlir::Value LHS; - mlir::Value RHS; - SourceRange Loc; - QualType Ty; // Computation Type. - BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform - FPOptions FPFeatures; - const Expr *E; // Entire expr, for error unsupported. May not be binop. - - /// Check if the binop computes a division or a remainder. - bool isDivremOp() const { - return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || - Opcode == BO_RemAssign; - } - - /// Check if at least one operand is a fixed point type. In such cases, - /// this operation did not follow usual arithmetic conversion and both - /// operands might not be of the same type. - bool isFixedPointOp() const { - // We cannot simply check the result type since comparison operations - // return an int. - if (const auto *BinOp = dyn_cast(E)) { - QualType LHSType = BinOp->getLHS()->getType(); - QualType RHSType = BinOp->getRHS()->getType(); - return LHSType->isFixedPointType() || RHSType->isFixedPointType(); - } - if (const auto *UnOp = dyn_cast(E)) - return UnOp->getSubExpr()->getType()->isFixedPointType(); - return false; - } - }; - - BinOpInfo buildBinOps(const BinaryOperator *E) { - BinOpInfo Result; - Result.LHS = Visit(E->getLHS()); - Result.RHS = Visit(E->getRHS()); - Result.Ty = E->getType(); - Result.Opcode = E->getOpcode(); - Result.Loc = E->getSourceRange(); - // TODO: Result.FPFeatures - Result.E = E; - return Result; - } - - mlir::Value buildMul(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); - } - mlir::Value buildDiv(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Div, Ops.LHS, Ops.RHS); - } - mlir::Value buildRem(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); - } - mlir::Value buildAdd(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Add, Ops.LHS, Ops.RHS); - } - mlir::Value buildSub(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); - } - mlir::Value buildShl(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Shl, Ops.LHS, Ops.RHS); - } - mlir::Value buildShr(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Shr, Ops.LHS, Ops.RHS); - } - mlir::Value buildAnd(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::And, Ops.LHS, Ops.RHS); - } - mlir::Value buildXor(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), - mlir::cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); - } - mlir::Value buildOr(const BinOpInfo &Ops) { - return CGM.builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Or, - Ops.LHS, Ops.RHS); - } - - // Binary operators and binary compound assignment operators. -#define HANDLEBINOP(OP) \ - mlir::Value VisitBin##OP(const BinaryOperator *E) { \ - return build##OP(buildBinOps(E)); \ - } - HANDLEBINOP(Mul) - HANDLEBINOP(Div) - HANDLEBINOP(Rem) - HANDLEBINOP(Add) - HANDLEBINOP(Sub) - HANDLEBINOP(Shl) - HANDLEBINOP(Shr) - HANDLEBINOP(And) - HANDLEBINOP(Xor) - HANDLEBINOP(Or) -#undef HANDLEBINOP - - mlir::Value buildCmp(const BinaryOperator *E) { - mlir::Value Result; - QualType LHSTy = E->getLHS()->getType(); - QualType RHSTy = E->getRHS()->getType(); - - if (const MemberPointerType *MPT = LHSTy->getAs()) { - assert(0 && "not implemented"); - } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { - BinOpInfo BOInfo = buildBinOps(E); - mlir::Value LHS = BOInfo.LHS; - mlir::Value RHS = BOInfo.RHS; - - if (LHSTy->isVectorType()) { - // Cannot handle any vector just yet. - assert(0 && "not implemented"); - // If AltiVec, the comparison results in a numeric type, so we use - // intrinsics comparing vectors and giving 0 or 1 as a result - if (!E->getType()->isVectorType()) - assert(0 && "not implemented"); - } - if (BOInfo.isFixedPointOp()) { - assert(0 && "not implemented"); - } else { - // TODO: when we add proper basic types to CIR we - // probably won't need to handle - // LHSTy->hasSignedIntegerRepresentation() - - // Unsigned integers and pointers. - if (LHS.getType().isa() || - RHS.getType().isa()) { - // TODO: Handle StrictVTablePointers and - // mayBeDynamicClass/invariant group. - assert(0 && "not implemented"); - } - - mlir::cir::CmpOpKind Kind; - switch (E->getOpcode()) { - case BO_LT: - Kind = mlir::cir::CmpOpKind::lt; - break; - case BO_GT: - Kind = mlir::cir::CmpOpKind::gt; - break; - case BO_LE: - Kind = mlir::cir::CmpOpKind::le; - break; - case BO_GE: - Kind = mlir::cir::CmpOpKind::ge; - break; - case BO_EQ: - Kind = mlir::cir::CmpOpKind::eq; - break; - case BO_NE: - Kind = mlir::cir::CmpOpKind::ne; - break; - default: - llvm_unreachable("unsupported"); - } - - return CGM.builder.create( - CGM.getLoc(BOInfo.Loc), CGM.getCIRType(BOInfo.Ty), Kind, - BOInfo.LHS, BOInfo.RHS); - } - - // If this is a vector comparison, sign extend the result to the - // appropriate vector integer type and return it (don't convert to - // bool). - if (LHSTy->isVectorType()) - assert(0 && "not implemented"); - } else { // Complex Comparison: can only be an equality comparison. - assert(0 && "not implemented"); - } - - return buildScalarConversion(Result, CGM.astCtx.BoolTy, E->getType(), - E->getExprLoc()); - } - -#define VISITCOMP(CODE) \ - mlir::Value VisitBin##CODE(const BinaryOperator *E) { return buildCmp(E); } - VISITCOMP(LT) - VISITCOMP(GT) - VISITCOMP(LE) - VISITCOMP(GE) - VISITCOMP(EQ) - VISITCOMP(NE) -#undef VISITCOMP - - mlir::Value VisitExpr(Expr *E) { - // Crashing here for "ScalarExprClassName"? Please implement - // VisitScalarExprClassName(...) to get this working. - emitError(CGM.getLoc(E->getExprLoc()), "scalar exp no implemented: '") - << E->getStmtClassName() << "'"; - assert(0 && "shouldn't be here!"); - return {}; - } - - mlir::Value buildIntToBoolConversion(mlir::Value srcVal, - mlir::Location loc) { - // Because of the type rules of C, we often end up computing a - // logical value, then zero extending it to int, then wanting it - // as a logical value again. - // TODO: optimize this common case here or leave it for later - // CIR passes? - mlir::Type boolTy = CGM.getCIRType(CGM.astCtx.BoolTy); - return CGM.builder.create( - loc, boolTy, mlir::cir::CastKind::int_to_bool, srcVal); - } - - /// EmitConversionToBool - Convert the specified expression value to a - /// boolean (i1) truth value. This is equivalent to "Val != 0". - mlir::Value buildConversionToBool(mlir::Value Src, QualType SrcType, - mlir::Location loc) { - assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); - - if (SrcType->isRealFloatingType()) - assert(0 && "not implemented"); - - if (const MemberPointerType *MPT = dyn_cast(SrcType)) - assert(0 && "not implemented"); - - assert((SrcType->isIntegerType() || - Src.getType().isa<::mlir::cir::PointerType>()) && - "Unknown scalar type to convert"); - - assert(Src.getType().isa() && - "pointer source not implemented"); - return buildIntToBoolConversion(Src, loc); - } + /// Helpers to convert Clang's SourceLocation to a MLIR Location. + mlir::Location getLoc(SourceLocation SLoc); - /// Emit a conversion from the specified type to the specified destination - /// type, both of which are CIR scalar types. - /// TODO: do we need ScalarConversionOpts here? Should be done in another - /// pass. - mlir::Value buildScalarConversion(mlir::Value Src, QualType SrcType, - QualType DstType, SourceLocation Loc) { - if (SrcType->isFixedPointType()) { - assert(0 && "not implemented"); - } else if (DstType->isFixedPointType()) { - assert(0 && "not implemented"); - } - - SrcType = CGM.astCtx.getCanonicalType(SrcType); - DstType = CGM.astCtx.getCanonicalType(DstType); - if (SrcType == DstType) - return Src; - - if (DstType->isVoidType()) - return nullptr; - mlir::Type SrcTy = Src.getType(); - - // Handle conversions to bool first, they are special: comparisons against - // 0. - if (DstType->isBooleanType()) - return buildConversionToBool(Src, SrcType, CGM.getLoc(Loc)); - - mlir::Type DstTy = CGM.getCIRType(DstType); - - // Cast from half through float if half isn't a native type. - if (SrcType->isHalfType() && !CGM.astCtx.getLangOpts().NativeHalfType) { - assert(0 && "not implemented"); - } - - // LLVM codegen ignore conversions like int -> uint, we should probably - // emit it here in case lowering to sanitizers dialect at some point. - if (SrcTy == DstTy) { - assert(0 && "not implemented"); - } - - // Handle pointer conversions next: pointers can only be converted to/from - // other pointers and integers. - if (DstTy.isa<::mlir::cir::PointerType>()) { - assert(0 && "not implemented"); - } - - if (SrcTy.isa<::mlir::cir::PointerType>()) { - // Must be an ptr to int cast. - assert(DstTy.isa() && "not ptr->int?"); - assert(0 && "not implemented"); - } - - // A scalar can be splatted to an extended vector of the same element type - if (DstType->isExtVectorType() && !SrcType->isVectorType()) { - // Sema should add casts to make sure that the source expression's type - // is the same as the vector's element type (sans qualifiers) - assert( - DstType->castAs()->getElementType().getTypePtr() == - SrcType.getTypePtr() && - "Splatted expr doesn't match with vector element type?"); - - assert(0 && "not implemented"); - } - - if (SrcType->isMatrixType() && DstType->isMatrixType()) - assert(0 && "not implemented"); - - // Finally, we have the arithmetic types: real int/float. - assert(0 && "not implemented"); - mlir::Value Res = nullptr; - mlir::Type ResTy = DstTy; - - // TODO: implement CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) - - // Cast to half through float if half isn't a native type. - if (DstType->isHalfType() && !CGM.astCtx.getLangOpts().NativeHalfType) { - assert(0 && "not implemented"); - } - - // TODO: Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); - if (DstTy != ResTy) { - assert(0 && "not implemented"); - } - - return Res; - } + mlir::Location getLoc(SourceRange SLoc); - // Leaves. - mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { - mlir::Type Ty = CGM.getCIRType(E->getType()); - return CGM.builder.create( - CGM.getLoc(E->getExprLoc()), Ty, - CGM.builder.getIntegerAttr(Ty, E->getValue())); - } - }; + mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); struct AutoVarEmission { const VarDecl *Variable; diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 9cb05a59caa1..78e08d267d92 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -12,6 +12,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR CIRGenerator.cpp + CIRGenExprScalar.cpp CIRGenFunction.cpp CIRGenModule.cpp CIRGenTypes.cpp From a9d6b944c742e3e62cde054616c5082419cc56de Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 28 Jan 2022 21:58:56 -0500 Subject: [PATCH 0107/2301] [CIR][NFC] Remove erroneous using namespace clang and fix usages of it I guess this got left behind while moving CIRGenFunction around earlier. Simply remove it and then correct any behaviors that depended on it. --- clang/lib/CIR/CIRGenExprScalar.cpp | 103 ++++++++++++------------- clang/lib/CIR/CIRGenFunction.h | 10 +-- clang/lib/CIR/CIRGenModule.h | 118 ++++++++++++++++------------- clang/lib/CIR/CIRGenerator.cpp | 5 +- 4 files changed, 119 insertions(+), 117 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 9cb3a0d101f3..dcd845b518a9 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -8,11 +8,11 @@ #include "mlir/IR/Value.h" using namespace cir; +using namespace clang; namespace { -class ScalarExprEmitter - : public clang::StmtVisitor { +class ScalarExprEmitter : public StmtVisitor { LLVM_ATTRIBUTE_UNUSED CIRGenFunction &CGF; CIRGenModule &CGM; mlir::OpBuilder &Builder; @@ -22,12 +22,12 @@ class ScalarExprEmitter mlir::OpBuilder &builder) : CGF(cgf), CGM(cgm), Builder(builder) {} - mlir::Value Visit(clang::Expr *E) { + mlir::Value Visit(Expr *E) { return StmtVisitor::Visit(E); } /// Emits the address of the l-value, then loads and returns the result. - mlir::Value buildLoadOfLValue(const clang::Expr *E) { + mlir::Value buildLoadOfLValue(const Expr *E) { LValue LV = CGM.buildLValue(E); auto load = Builder.create( CGM.getLoc(E->getExprLoc()), CGM.getCIRType(E->getType()), @@ -37,7 +37,7 @@ class ScalarExprEmitter } // Handle l-values. - mlir::Value VisitDeclRefExpr(clang::DeclRefExpr *E) { + mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { // FIXME: we could try to emit this as constant first, see // CGF.tryEmitAsConstant(E) return buildLoadOfLValue(E); @@ -47,16 +47,16 @@ class ScalarExprEmitter // casts have to handle a more broad range of conversions than explicit // casts, as they handle things like function to ptr-to-function decay // etc. - mlir::Value VisitCastExpr(clang::CastExpr *CE) { - clang::Expr *E = CE->getSubExpr(); - clang::QualType DestTy = CE->getType(); - clang::CastKind Kind = CE->getCastKind(); + mlir::Value VisitCastExpr(CastExpr *CE) { + Expr *E = CE->getSubExpr(); + QualType DestTy = CE->getType(); + CastKind Kind = CE->getCastKind(); switch (Kind) { - case clang::CK_LValueToRValue: + case CK_LValueToRValue: assert(CGM.getASTContext().hasSameUnqualifiedType(E->getType(), DestTy)); assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); - return Visit(const_cast(E)); - case clang::CK_NullToPointer: { + return Visit(const_cast(E)); + case CK_NullToPointer: { // FIXME: use MustVisitNullValue(E) and evaluate expr. // Note that DestTy is used as the MLIR type instead of a custom // nullptr type. @@ -65,7 +65,7 @@ class ScalarExprEmitter CGM.getLoc(E->getExprLoc()), Ty, mlir::cir::NullAttr::get(Builder.getContext(), Ty)); } - case clang::CK_IntegralToBoolean: { + case CK_IntegralToBoolean: { return buildIntToBoolConversion(Visit(E), CGM.getLoc(CE->getSourceRange())); } @@ -77,13 +77,12 @@ class ScalarExprEmitter } } - mlir::Value VisitUnaryAddrOf(const clang::UnaryOperator *E) { - assert(!llvm::isa(E->getType()) && - "not implemented"); + mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { + assert(!llvm::isa(E->getType()) && "not implemented"); return CGM.buildLValue(E->getSubExpr()).getPointer(); } - mlir::Value VisitCXXBoolLiteralExpr(const clang::CXXBoolLiteralExpr *E) { + mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { mlir::Type Ty = CGM.getCIRType(E->getType()); return Builder.create( CGM.getLoc(E->getExprLoc()), Ty, Builder.getBoolAttr(E->getValue())); @@ -92,17 +91,16 @@ class ScalarExprEmitter struct BinOpInfo { mlir::Value LHS; mlir::Value RHS; - clang::SourceRange Loc; - clang::QualType Ty; // Computation Type. - clang::BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform - clang::FPOptions FPFeatures; - const clang::Expr - *E; // Entire expr, for error unsupported. May not be binop. + SourceRange Loc; + QualType Ty; // Computation Type. + BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform + FPOptions FPFeatures; + const Expr *E; // Entire expr, for error unsupported. May not be binop. /// Check if the binop computes a division or a remainder. bool isDivremOp() const { - return Opcode == clang::BO_Div || Opcode == clang::BO_Rem || - Opcode == clang::BO_DivAssign || Opcode == clang::BO_RemAssign; + return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || + Opcode == BO_RemAssign; } /// Check if at least one operand is a fixed point type. In such cases, @@ -111,18 +109,18 @@ class ScalarExprEmitter bool isFixedPointOp() const { // We cannot simply check the result type since comparison operations // return an int. - if (const auto *BinOp = llvm::dyn_cast(E)) { - clang::QualType LHSType = BinOp->getLHS()->getType(); - clang::QualType RHSType = BinOp->getRHS()->getType(); + if (const auto *BinOp = llvm::dyn_cast(E)) { + QualType LHSType = BinOp->getLHS()->getType(); + QualType RHSType = BinOp->getRHS()->getType(); return LHSType->isFixedPointType() || RHSType->isFixedPointType(); } - if (const auto *UnOp = llvm::dyn_cast(E)) + if (const auto *UnOp = llvm::dyn_cast(E)) return UnOp->getSubExpr()->getType()->isFixedPointType(); return false; } }; - BinOpInfo buildBinOps(const clang::BinaryOperator *E) { + BinOpInfo buildBinOps(const BinaryOperator *E) { BinOpInfo Result; Result.LHS = Visit(E->getLHS()); Result.RHS = Visit(E->getRHS()); @@ -187,7 +185,7 @@ class ScalarExprEmitter // Binary operators and binary compound assignment operators. #define HANDLEBINOP(OP) \ - mlir::Value VisitBin##OP(const clang::BinaryOperator *E) { \ + mlir::Value VisitBin##OP(const BinaryOperator *E) { \ return build##OP(buildBinOps(E)); \ } HANDLEBINOP(Mul) @@ -202,13 +200,12 @@ class ScalarExprEmitter HANDLEBINOP(Or) #undef HANDLEBINOP - mlir::Value buildCmp(const clang::BinaryOperator *E) { + mlir::Value buildCmp(const BinaryOperator *E) { mlir::Value Result; - clang::QualType LHSTy = E->getLHS()->getType(); - clang::QualType RHSTy = E->getRHS()->getType(); + QualType LHSTy = E->getLHS()->getType(); + QualType RHSTy = E->getRHS()->getType(); - if (const clang::MemberPointerType *MPT = - LHSTy->getAs()) { + if (const MemberPointerType *MPT = LHSTy->getAs()) { assert(0 && "not implemented"); } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { BinOpInfo BOInfo = buildBinOps(E); @@ -240,22 +237,22 @@ class ScalarExprEmitter mlir::cir::CmpOpKind Kind; switch (E->getOpcode()) { - case clang::BO_LT: + case BO_LT: Kind = mlir::cir::CmpOpKind::lt; break; - case clang::BO_GT: + case BO_GT: Kind = mlir::cir::CmpOpKind::gt; break; - case clang::BO_LE: + case BO_LE: Kind = mlir::cir::CmpOpKind::le; break; - case clang::BO_GE: + case BO_GE: Kind = mlir::cir::CmpOpKind::ge; break; - case clang::BO_EQ: + case BO_EQ: Kind = mlir::cir::CmpOpKind::eq; break; - case clang::BO_NE: + case BO_NE: Kind = mlir::cir::CmpOpKind::ne; break; default: @@ -281,9 +278,7 @@ class ScalarExprEmitter } #define VISITCOMP(CODE) \ - mlir::Value VisitBin##CODE(const clang::BinaryOperator *E) { \ - return buildCmp(E); \ - } + mlir::Value VisitBin##CODE(const BinaryOperator *E) { return buildCmp(E); } VISITCOMP(LT) VISITCOMP(GT) VISITCOMP(LE) @@ -292,7 +287,7 @@ class ScalarExprEmitter VISITCOMP(NE) #undef VISITCOMP - mlir::Value VisitExpr(clang::Expr *E) { + mlir::Value VisitExpr(Expr *E) { // Crashing here for "ScalarExprClassName"? Please implement // VisitScalarExprClassName(...) to get this working. emitError(CGM.getLoc(E->getExprLoc()), "scalar exp no implemented: '") @@ -314,14 +309,14 @@ class ScalarExprEmitter /// EmitConversionToBool - Convert the specified expression value to a /// boolean (i1) truth value. This is equivalent to "Val != 0". - mlir::Value buildConversionToBool(mlir::Value Src, clang::QualType SrcType, + mlir::Value buildConversionToBool(mlir::Value Src, QualType SrcType, mlir::Location loc) { assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); if (SrcType->isRealFloatingType()) assert(0 && "not implemented"); - if (auto *MPT = llvm::dyn_cast(SrcType)) + if (auto *MPT = llvm::dyn_cast(SrcType)) assert(0 && "not implemented"); assert((SrcType->isIntegerType() || @@ -337,9 +332,8 @@ class ScalarExprEmitter /// type, both of which are CIR scalar types. /// TODO: do we need ScalarConversionOpts here? Should be done in another /// pass. - mlir::Value buildScalarConversion(mlir::Value Src, clang::QualType SrcType, - clang::QualType DstType, - clang::SourceLocation Loc) { + mlir::Value buildScalarConversion(mlir::Value Src, QualType SrcType, + QualType DstType, SourceLocation Loc) { if (SrcType->isFixedPointType()) { assert(0 && "not implemented"); } else if (DstType->isFixedPointType()) { @@ -390,9 +384,8 @@ class ScalarExprEmitter if (DstType->isExtVectorType() && !SrcType->isVectorType()) { // Sema should add casts to make sure that the source expression's type // is the same as the vector's element type (sans qualifiers) - assert(DstType->castAs() - ->getElementType() - .getTypePtr() == SrcType.getTypePtr() && + assert(DstType->castAs()->getElementType().getTypePtr() == + SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"); assert(0 && "not implemented"); @@ -423,7 +416,7 @@ class ScalarExprEmitter } // Leaves. - mlir::Value VisitIntegerLiteral(const clang::IntegerLiteral *E) { + mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { mlir::Type Ty = CGM.getCIRType(E->getType()); return Builder.create( CGM.getLoc(E->getExprLoc()), Ty, diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 97402d72204d..394498f2e1dd 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -21,8 +21,6 @@ namespace clang { class Expr; } // namespace clang -using namespace clang; - namespace cir { // FIXME: for now we are reusing this from lib/Clang/CodeGenFunction.h, which @@ -33,20 +31,20 @@ class CIRGenFunction { public: /// If a return statement is being visited, this holds the return statment's /// result expression. - const Expr *RetExpr = nullptr; + const clang::Expr *RetExpr = nullptr; mlir::Value RetValue = nullptr; mlir::Type FnRetTy; clang::QualType FnRetQualTy; /// Return the TypeEvaluationKind of QualType \c T. - static TypeEvaluationKind getEvaluationKind(QualType T); + static TypeEvaluationKind getEvaluationKind(clang::QualType T); - static bool hasScalarEvaluationKind(QualType T) { + static bool hasScalarEvaluationKind(clang::QualType T) { return getEvaluationKind(T) == TEK_Scalar; } - static bool hasAggregateEvaluationKind(QualType T) { + static bool hasAggregateEvaluationKind(clang::QualType T) { return getEvaluationKind(T) == TEK_Aggregate; } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 254c5b17c7fd..eb1cc26572be 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -97,9 +97,9 @@ class CIRGenModule { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(const Decl *var, QualType T, mlir::Location loc, - CharUnits alignment, mlir::Value &addr, - bool IsParam = false); + mlir::LogicalResult declare(const clang::Decl *var, clang::QualType T, + mlir::Location loc, clang::CharUnits alignment, + mlir::Value &addr, bool IsParam = false); public: mlir::ModuleOp getModule() { return theModule; } @@ -107,14 +107,14 @@ class CIRGenModule { clang::ASTContext &getASTContext() { return astCtx; } /// Helpers to convert Clang's SourceLocation to a MLIR Location. - mlir::Location getLoc(SourceLocation SLoc); + mlir::Location getLoc(clang::SourceLocation SLoc); - mlir::Location getLoc(SourceRange SLoc); + mlir::Location getLoc(clang::SourceRange SLoc); mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); struct AutoVarEmission { - const VarDecl *Variable; + const clang::VarDecl *Variable; /// The address of the alloca for languages with explicit address space /// (e.g. OpenCL) or alloca casted to generic pointer for address space /// agnostic languages (e.g. C++). Invalid if the variable was emitted @@ -128,7 +128,7 @@ class CIRGenModule { struct Invalid {}; AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {} - AutoVarEmission(const VarDecl &variable) + AutoVarEmission(const clang::VarDecl &variable) : Variable(&variable), Addr(Address::invalid()), IsConstantAggregate(false) {} @@ -148,34 +148,36 @@ class CIRGenModule { /// FIXME: in LLVM codegen path this is part of CGM, which doesn't seem /// like necessary, since (1) it doesn't use CGM at all and (2) is AST type /// query specific. - bool isTypeConstant(QualType Ty, bool ExcludeCtor); + bool isTypeConstant(clang::QualType Ty, bool ExcludeCtor); /// Emit the alloca and debug information for a /// local variable. Does not emit initialization or destruction. - AutoVarEmission buildAutoVarAlloca(const VarDecl &D); + AutoVarEmission buildAutoVarAlloca(const clang::VarDecl &D); /// Determine whether the given initializer is trivial in the sense /// that it requires no code to be generated. - bool isTrivialInitializer(const Expr *Init); + bool isTrivialInitializer(const clang::Expr *Init); // TODO: this can also be abstrated into common AST helpers - bool hasBooleanRepresentation(QualType Ty); + bool hasBooleanRepresentation(clang::QualType Ty); - mlir::Value buildToMemory(mlir::Value Value, QualType Ty); + mlir::Value buildToMemory(mlir::Value Value, clang::QualType Ty); void buildStoreOfScalar(mlir::Value value, LValue lvalue, - const Decl *InitDecl); + const clang::Decl *InitDecl); void buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, - QualType Ty, LValueBaseInfo BaseInfo, - const Decl *InitDecl, bool isNontemporal); + clang::QualType Ty, LValueBaseInfo BaseInfo, + const clang::Decl *InitDecl, bool isNontemporal); /// Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. - void buldStoreThroughLValue(RValue Src, LValue Dst, const Decl *InitDecl); + void buldStoreThroughLValue(RValue Src, LValue Dst, + const clang::Decl *InitDecl); - void buildScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue); + void buildScalarInit(const clang::Expr *init, const clang::ValueDecl *D, + LValue lvalue); /// Emit an expression as an initializer for an object (variable, field, etc.) /// at the given location. The expression is not necessarily the normal @@ -185,7 +187,8 @@ class CIRGenModule { /// \param init the initializing expression /// \param D the object to act as if we're initializing /// \param lvalue the lvalue to initialize - void buildExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue); + void buildExprAsInit(const clang::Expr *init, const clang::ValueDecl *D, + LValue lvalue); void buildAutoVarInit(const AutoVarEmission &emission); @@ -194,92 +197,96 @@ class CIRGenModule { /// Emit code and set up symbol table for a variable declaration with auto, /// register, or no storage class specifier. These turn into simple stack /// objects, globals depending on target. - void buildAutoVarDecl(const VarDecl &D); + void buildAutoVarDecl(const clang::VarDecl &D); /// This method handles emission of any variable declaration /// inside a function, including static vars etc. - void buildVarDecl(const VarDecl &D); + void buildVarDecl(const clang::VarDecl &D); - void buildDecl(const Decl &D); + void buildDecl(const clang::Decl &D); /// Emit the computation of the specified expression of scalar type, /// ignoring the result. - mlir::Value buildScalarExpr(const Expr *E); + mlir::Value buildScalarExpr(const clang::Expr *E); /// Emit a conversion from the specified type to the specified destination /// type, both of which are CIR scalar types. - mlir::Value buildScalarConversion(mlir::Value Src, QualType SrcTy, - QualType DstTy, SourceLocation Loc); + mlir::Value buildScalarConversion(mlir::Value Src, clang::QualType SrcTy, + clang::QualType DstTy, + clang::SourceLocation Loc); - mlir::LogicalResult buildReturnStmt(const ReturnStmt &S); + mlir::LogicalResult buildReturnStmt(const clang::ReturnStmt &S); - mlir::LogicalResult buildDeclStmt(const DeclStmt &S); + mlir::LogicalResult buildDeclStmt(const clang::DeclStmt &S); - mlir::LogicalResult buildSimpleStmt(const Stmt *S, bool useCurrentScope); + mlir::LogicalResult buildSimpleStmt(const clang::Stmt *S, + bool useCurrentScope); - LValue buildDeclRefLValue(const DeclRefExpr *E); + LValue buildDeclRefLValue(const clang::DeclRefExpr *E); /// Emit code to compute the specified expression which /// can have any type. The result is returned as an RValue struct. /// TODO: if this is an aggregate expression, add a AggValueSlot to indicate /// where the result should be returned. - RValue buildAnyExpr(const Expr *E); + RValue buildAnyExpr(const clang::Expr *E); - LValue buildBinaryOperatorLValue(const BinaryOperator *E); + LValue buildBinaryOperatorLValue(const clang::BinaryOperator *E); /// FIXME: this could likely be a common helper and not necessarily related /// with codegen. /// Return the best known alignment for an unknown pointer to a /// particular class. - CharUnits getClassPointerAlignment(const CXXRecordDecl *RD); + clang::CharUnits getClassPointerAlignment(const clang::CXXRecordDecl *RD); /// FIXME: this could likely be a common helper and not necessarily related /// with codegen. /// TODO: Add TBAAAccessInfo - CharUnits getNaturalPointeeTypeAlignment(QualType T, - LValueBaseInfo *BaseInfo); + clang::CharUnits getNaturalPointeeTypeAlignment(clang::QualType T, + LValueBaseInfo *BaseInfo); /// FIXME: this could likely be a common helper and not necessarily related /// with codegen. /// TODO: Add TBAAAccessInfo - CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo, - bool forPointeeType); + clang::CharUnits getNaturalTypeAlignment(clang::QualType T, + LValueBaseInfo *BaseInfo, + bool forPointeeType); /// Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. - Address buildPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo); + Address buildPointerWithAlignment(const clang::Expr *E, + LValueBaseInfo *BaseInfo); - LValue buildUnaryOpLValue(const UnaryOperator *E); + LValue buildUnaryOpLValue(const clang::UnaryOperator *E); /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. - LValue buildLValue(const Expr *E); + LValue buildLValue(const clang::Expr *E); /// EmitIgnoredExpr - Emit code to compute the specified expression, /// ignoring the result. - void buildIgnoredExpr(const Expr *E); + void buildIgnoredExpr(const clang::Expr *E); /// If the specified expression does not fold /// to a constant, or if it does but contains a label, return false. If it /// constant folds return true and set the boolean result in Result. - bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &ResultBool, + bool ConstantFoldsToSimpleInteger(const clang::Expr *Cond, bool &ResultBool, bool AllowLabels); /// Return true if the statement contains a label in it. If /// this statement is not executed normally, it not containing a label means /// that we can just remove the code. - bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false); + bool ContainsLabel(const clang::Stmt *S, bool IgnoreCaseStmts = false); /// If the specified expression does not fold /// to a constant, or if it does but contains a label, return false. If it /// constant folds return true and set the folded value. - bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt, - bool AllowLabels); + bool ConstantFoldsToSimpleInteger(const clang::Expr *Cond, + llvm::APSInt &ResultInt, bool AllowLabels); /// Perform the usual unary conversions on the specified /// expression and compare the result against zero, returning an Int1Ty value. - mlir::Value evaluateExprAsBool(const Expr *E); + mlir::Value evaluateExprAsBool(const clang::Expr *E); /// Emit an if on a boolean condition to the specified blocks. /// FIXME: Based on the condition, this might try to simplify the codegen of @@ -287,27 +294,30 @@ class CIRGenModule { /// times we expect the condition to evaluate to true based on PGO data. We /// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr /// for extra ideas). - mlir::LogicalResult buildIfOnBoolExpr(const Expr *cond, mlir::Location loc, - const Stmt *thenS, const Stmt *elseS); + mlir::LogicalResult buildIfOnBoolExpr(const clang::Expr *cond, + mlir::Location loc, + const clang::Stmt *thenS, + const clang::Stmt *elseS); - mlir::LogicalResult buildIfStmt(const IfStmt &S); + mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. - mlir::LogicalResult buildStmt(const Stmt *S, bool useCurrentScope); + mlir::LogicalResult buildStmt(const clang::Stmt *S, bool useCurrentScope); - mlir::LogicalResult buildFunctionBody(const Stmt *Body); + mlir::LogicalResult buildFunctionBody(const clang::Stmt *Body); - mlir::LogicalResult buildCompoundStmt(const CompoundStmt &S); + mlir::LogicalResult buildCompoundStmt(const clang::CompoundStmt &S); - mlir::LogicalResult buildCompoundStmtWithoutScope(const CompoundStmt &S); + mlir::LogicalResult + buildCompoundStmtWithoutScope(const clang::CompoundStmt &S); - void buildTopLevelDecl(Decl *decl); + void buildTopLevelDecl(clang::Decl *decl); // Emit a new function and add it to the MLIR module. - mlir::FuncOp buildFunction(const FunctionDecl *FD); + mlir::FuncOp buildFunction(const clang::FunctionDecl *FD); - mlir::Type getCIRType(const QualType &type); + mlir::Type getCIRType(const clang::QualType &type); void verifyModule(); }; diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index dc4948b6bace..4c05bea15f9b 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -22,11 +22,12 @@ #include "clang/CIR/CIRGenerator.h" using namespace cir; +using namespace clang; CIRGenerator::CIRGenerator() = default; CIRGenerator::~CIRGenerator() = default; -void CIRGenerator::Initialize(clang::ASTContext &astCtx) { +void CIRGenerator::Initialize(ASTContext &astCtx) { using namespace llvm; this->astCtx = &astCtx; @@ -48,7 +49,7 @@ bool CIRGenerator::EmitFunction(const FunctionDecl *FD) { mlir::ModuleOp CIRGenerator::getModule() { return CGM->getModule(); } -bool CIRGenerator::HandleTopLevelDecl(clang::DeclGroupRef D) { +bool CIRGenerator::HandleTopLevelDecl(DeclGroupRef D) { for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { CGM->buildTopLevelDecl(*I); } From ba574b95cb7e9c319906332c9ca38d2d54c0ebf4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Feb 2022 17:18:46 -0800 Subject: [PATCH 0108/2301] [CIR] Fix some CMAKE dep issues for CIRBasedWarnings --- clang/lib/Sema/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt index 7c0399b681fc..ad545fda4dad 100644 --- a/clang/lib/Sema/CMakeLists.txt +++ b/clang/lib/Sema/CMakeLists.txt @@ -107,6 +107,7 @@ add_clang_library(clangSema omp_gen ClangDriverOptions MLIRCIROpsIncGen + MLIRCIR LINK_LIBS clangAPINotes From 460af0a2f6f6d023ce8ebe5bf9c31505f00ac565 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Feb 2022 17:48:36 -0800 Subject: [PATCH 0109/2301] [CIR] Add skeleton for GotoStmt codegen --- clang/lib/CIR/CIRGenModule.cpp | 16 +++++++++++++++- clang/lib/CIR/CIRGenModule.h | 1 + 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 2a67196af4b3..67c6eabf99cc 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -616,6 +616,18 @@ mlir::LogicalResult CIRGenModule::buildDeclStmt(const DeclStmt &S) { return mlir::success(); } +mlir::LogicalResult CIRGenModule::buildGotoStmt(const GotoStmt &S) { + // FIXME: LLVM codegen inserts emit stop point here for debug info + // sake when the insertion point is available, but doesn't do + // anything special when there isn't. We haven't implemented debug + // info support just yet, look at this again once we have it. + assert(builder.getInsertionBlock() && "not yet implemented"); + assert(0 && "not implemented"); + // FIXME: add something like + // EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); + return mlir::success(); +} + mlir::LogicalResult CIRGenModule::buildSimpleStmt(const Stmt *S, bool useCurrentScope) { switch (S->getStmtClass()) { @@ -629,12 +641,14 @@ mlir::LogicalResult CIRGenModule::buildSimpleStmt(const Stmt *S, : buildCompoundStmt(cast(*S)); case Stmt::ReturnStmtClass: return buildReturnStmt(cast(*S)); + case Stmt::GotoStmtClass: + return buildGotoStmt(cast(*S)); + case Stmt::NullStmtClass: break; case Stmt::LabelStmtClass: case Stmt::AttributedStmtClass: - case Stmt::GotoStmtClass: case Stmt::BreakStmtClass: case Stmt::ContinueStmtClass: case Stmt::DefaultStmtClass: diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index eb1cc26572be..234c7fe2e469 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -216,6 +216,7 @@ class CIRGenModule { clang::SourceLocation Loc); mlir::LogicalResult buildReturnStmt(const clang::ReturnStmt &S); + mlir::LogicalResult buildGotoStmt(const clang::GotoStmt &S); mlir::LogicalResult buildDeclStmt(const clang::DeclStmt &S); From 51100f568aad46fbce5383778be371bd3f165f44 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 2 Feb 2022 19:57:49 -0800 Subject: [PATCH 0110/2301] CIR] Fix a missing proper return code --- clang/lib/CIR/CIRGenModule.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 67c6eabf99cc..73bdf6aace89 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -605,9 +605,11 @@ mlir::LogicalResult CIRGenModule::buildReturnStmt(const ReturnStmt &S) { } mlir::LogicalResult CIRGenModule::buildDeclStmt(const DeclStmt &S) { - if (!builder.getInsertionBlock()) + if (!builder.getInsertionBlock()) { theModule.emitError( "Seems like this is unreachable code, what should we do?"); + return mlir::failure(); + } for (const auto *I : S.decls()) { buildDecl(*I); From bb77d173f59fa32537a67c017b6c2275f9f29a36 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 2 Feb 2022 20:10:23 -0800 Subject: [PATCH 0111/2301] [CIR] Lift restriction on sized regions on IfOp and ScopeOp Also clean up the docs a bit. Note that this lifts the restriction but doesn't implement regions with more than one BB just yet. This paves the way to represent goto's and returns within a scope. --- clang/test/CIR/IR/cir-ops.cir | 14 +++++++++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 25 +++++++++---------- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 29 +++++++++++++++++++--- 3 files changed, 51 insertions(+), 17 deletions(-) diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 15d95ad75457..20467205b51e 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -35,6 +35,14 @@ module { %5 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 cir.return %5 : i32 } + + func.func @s0() { + %0 = cir.alloca i32, cir.ptr , [uninitialized] {alignment = 4 : i64} + cir.scope { + %1 = cir.alloca i32, cir.ptr , [uninitialized] {alignment = 4 : i64} + } + cir.return + } } // CHECK: module { @@ -63,4 +71,10 @@ module { // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: } +// CHECK: func.func @s0() { +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: } + // CHECK: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index f3d820026b03..905b4c32d381 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -282,13 +282,17 @@ def ReturnOp : CIR_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { def IfOp : CIR_Op<"if", [DeclareOpInterfaceMethods, - SingleBlockImplicitTerminator<"cir::YieldOp">, RecursivelySpeculatable, - AutomaticAllocationScope, NoRegionArguments]> { + RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { let summary = "if-then-else operation"; let description = [{ The `cir.if` operation represents an if-then-else construct for conditionally executing two regions of code. The operand to an if operation - is a boolean value. For example: + is a boolean value. + + Each region can contain an arbitrary number of blocks but there usually be + only one block in each region unless the presence of return and goto. + + Examples: ```mlir cir.if %b { @@ -313,7 +317,7 @@ def IfOp : CIR_Op<"if", // FIXME: for now the "then" region only has one block, that should change // soon as building CIR becomes more complex. - let regions = (region SizedRegion<1>:$thenRegion, AnyRegion:$elseRegion); + let regions = (region AnyRegion:$thenRegion, AnyRegion:$elseRegion); // FIXME: unify these within CIR_Ops. let hasCustomAssemblyFormat = 1; @@ -329,14 +333,6 @@ def IfOp : CIR_Op<"if", ]; let extraClassDeclaration = [{ - OpBuilder getThenBodyBuilder(OpBuilder::Listener *listener = nullptr) { - Block* body = getBody(0); - return OpBuilder::atBlockTerminator(body, listener); - } - OpBuilder getElseBodyBuilder(OpBuilder::Listener *listener = nullptr) { - Block* body = getBody(1); - return OpBuilder::atBlockTerminator(body, listener); - } Block* thenBlock(); Block* elseBlock(); }]; @@ -387,13 +383,16 @@ def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods:$results); - let regions = (region SizedRegion<1>:$scopeRegion); + let regions = (region AnyRegion:$scopeRegion); let hasCustomAssemblyFormat = 1; let hasVerifier = 1; diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index e62d9a416cd1..55cf1bc99c48 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -17,6 +17,7 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/OpDefinition.h" #include "mlir/IR/OpImplementation.h" #include "mlir/IR/TypeUtilities.h" @@ -163,16 +164,29 @@ ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { parser.resolveOperand(cond, boolType, result.operands)) return failure(); + auto getOrInsertTerminator = [&](Region *r) { + ::mlir::impl::ensureRegionTerminator( + *r, parser.getBuilder(), result.location, + [](OpBuilder &builder, Location loc) { + OperationState state(loc, YieldOp::getOperationName()); + YieldOp::build(builder, state); + return Operation::create(state); + }); + }; + // Parse the 'then' region. - if (parser.parseRegion(*thenRegion, /*arguments=*/{}, /*argTypes=*/{})) + if (parser.parseRegion(*thenRegion, /*arguments=*/{}, + /*argTypes=*/{})) return failure(); - IfOp::ensureTerminator(*thenRegion, parser.getBuilder(), result.location); + assert(thenRegion->hasOneBlock() && "not yet implemented"); + getOrInsertTerminator(thenRegion); // If we find an 'else' keyword then parse the 'else' region. if (!parser.parseOptionalKeyword("else")) { if (parser.parseRegion(*elseRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - IfOp::ensureTerminator(*elseRegion, parser.getBuilder(), result.location); + assert(elseRegion->hasOneBlock() && "not yet implemented"); + getOrInsertTerminator(elseRegion); } // Parse the optional attribute list. @@ -282,7 +296,14 @@ ParseResult ScopeOp::parse(OpAsmParser &parser, OperationState &result) { // Parse the scope region. if (parser.parseRegion(*scopeRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - ScopeOp::ensureTerminator(*scopeRegion, parser.getBuilder(), result.location); + assert(scopeRegion->hasOneBlock() && "not yet implemented"); + ::mlir::impl::ensureRegionTerminator( + *scopeRegion, parser.getBuilder(), result.location, + [](OpBuilder &builder, Location loc) { + OperationState state(loc, YieldOp::getOperationName()); + YieldOp::build(builder, state); + return Operation::create(state); + }); // Parse the optional attribute list. if (parser.parseOptionalAttrDict(result.attributes)) From 698d92ccca3767b89becf3983f77db10b5e176c1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 3 Feb 2022 10:34:57 -0800 Subject: [PATCH 0112/2301] [CIR] Eliminate constrains on single block implicit terminator for ScopeOp Missing from previous commit but already effective --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 905b4c32d381..1109e5a79747 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -376,8 +376,7 @@ def YieldOp : CIR_Op<"yield", [Pure, ReturnLike, Terminator, //===----------------------------------------------------------------------===// def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods, - SingleBlockImplicitTerminator<"cir::YieldOp">, RecursivelySpeculatable, - AutomaticAllocationScope, NoRegionArguments]> { + RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { let summary = ""; let description = [{ "cir.scope" contains one region and defines a strict "scope" for all new From b14e4edee74a5b98e8c38947828d49afad251c18 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 8 Feb 2022 22:46:21 -0800 Subject: [PATCH 0113/2301] [CIR] Add simple cir.br instruction, no testcases yet --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 28 ++++++++++++++++++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 14 +++++++++++ 2 files changed, 42 insertions(+) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 1109e5a79747..86c5576b8a7a 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -511,4 +511,32 @@ def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// BrOp +//===----------------------------------------------------------------------===// + +def BrOp : CIR_Op<"br", + [DeclareOpInterfaceMethods, + Pure, Terminator]> { + let summary = "branch operation"; + let description = [{ + The `cir.br` branches unconditionally to a block. + + Example: + + ```mlir + ... + cir.br ^bb3 + ^bb3: // pred: ^bb2 + cir.return + ``` + }]; + + let successors = (successor AnySuccessor:$dest); + let assemblyFormat = [{ + $dest attr-dict + }]; +} + + #endif // MLIR_CIR_DIALECT_CIR_OPS diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 55cf1bc99c48..bc7e3466944b 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -419,6 +419,20 @@ void printBinOpKind(OpAsmPrinter &p, BinOp binOp, BinOpKindAttr kindAttr) { p << caseValueStr; } +//===----------------------------------------------------------------------===// +// BrOp +//===----------------------------------------------------------------------===// + +mlir::SuccessorOperands BrOp::getSuccessorOperands(unsigned index) { + assert(index == 0 && "invalid successor index"); + // Current block targets do not have operands. + // TODO(CIR): This is a hacky avoidance of actually implementing this since + // MLIR moved it "because nobody used the llvm::Optional::None case.........." + return mlir::SuccessorOperands(MutableOperandRange(getOperation(), 0, 0)); +} + +Block *BrOp::getSuccessorForOperands(ArrayRef) { return getDest(); } + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From 73e06c4f69bfb7857babbb1efa1d3c7afb1041bd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 8 Feb 2022 22:56:25 -0800 Subject: [PATCH 0114/2301] [CIR] Support GotoStmt and LabelStmt - Added codegen bit for simple goto and label interaction (intra-scope only for now). - Introduced a lexical scope tracker, which among other things, allow us to track cleanup blocks. - Change the current way we handle returns. --- clang/lib/CIR/CIRGenFunction.h | 2 + clang/lib/CIR/CIRGenModule.cpp | 169 +++++++++++++++++++++++++++++--- clang/lib/CIR/CIRGenModule.h | 79 +++++++++++++++ clang/test/CIR/CodeGen/goto.cpp | 32 ++++++ 4 files changed, 269 insertions(+), 13 deletions(-) create mode 100644 clang/test/CIR/CodeGen/goto.cpp diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 394498f2e1dd..8f8c2d984cb9 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -34,6 +34,8 @@ class CIRGenFunction { const clang::Expr *RetExpr = nullptr; mlir::Value RetValue = nullptr; + std::optional RetLoc; + mlir::Type FnRetTy; clang::QualType FnRetQualTy; diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 73bdf6aace89..97aa624f30ad 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -593,14 +593,21 @@ mlir::LogicalResult CIRGenModule::buildReturnStmt(const ReturnStmt &S) { return mlir::failure(); } + // FIXME: there might be multiple return values in a function, fix this + // once we add support for arbitraty returns. CurCGF->RetValue = V; + CurCGF->RetLoc = getLoc(S.getSourceRange()); + // Otherwise, this return operation has zero operands. if (!V || (RV && RV->getType()->isVoidType())) { // FIXME: evaluate for side effects. } - builder.create(getLoc(S.getSourceRange()), - V ? ArrayRef(V) : ArrayRef()); + // FIXME: this currently assumes only a return stmt as the last + // on in a function, make this generic. + if (!builder.getInsertionBlock()->isEntryBlock()) + builder.create(getLoc(S.getSourceRange()), + currLexScope->CleanupBlock); return mlir::success(); } @@ -618,18 +625,112 @@ mlir::LogicalResult CIRGenModule::buildDeclStmt(const DeclStmt &S) { return mlir::success(); } +/// Build a unconditional branch to the lexical scope cleanup block +/// or with the labeled blocked if already solved. +/// +/// Track on scope basis, goto's we need to fix later. +mlir::LogicalResult +CIRGenModule::buildBranchThroughCleanup(JumpDest &Dest, LabelDecl *L, + mlir::Location Loc) { + // Remove this once we go for making sure unreachable code is + // well modeled (or not). + assert(builder.getInsertionBlock() && "not yet implemented"); + + // Insert a branch: to the cleanup block (unsolved) or to the already + // materialized label. Keep track of unsolved goto's. + mlir::Block *DstBlock = Dest.getBlock(); + auto G = builder.create( + Loc, Dest.isValid() ? DstBlock : currLexScope->CleanupBlock); + if (!Dest.isValid()) + currLexScope->PendingGotos.push_back(std::make_pair(G, L)); + + return mlir::success(); +} + +/// All scope related cleanup needed: +/// - Patching up unsolved goto's. +void CIRGenModule::LexicalScopeRAIIContext::cleanup() { + while (!PendingGotos.empty()) { + auto gotoInfo = PendingGotos.back(); + // FIXME: Currently only support resolving goto labels inside the + // same lexical ecope. + assert(SolvedLabels.count(gotoInfo.second) && + "goto across scopes not yet supported"); + + // The goto in this lexical context actually maps to a basic + // block. + auto g = cast(gotoInfo.first); + g.setSuccessor(P.LabelMap[gotoInfo.second].getBlock()); + PendingGotos.pop_back(); + } + + SolvedLabels.clear(); +} + mlir::LogicalResult CIRGenModule::buildGotoStmt(const GotoStmt &S) { // FIXME: LLVM codegen inserts emit stop point here for debug info // sake when the insertion point is available, but doesn't do // anything special when there isn't. We haven't implemented debug // info support just yet, look at this again once we have it. assert(builder.getInsertionBlock() && "not yet implemented"); - assert(0 && "not implemented"); - // FIXME: add something like - // EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); + + // A goto marks the end of a block, create a new one for codegen after + // buildGotoStmt can resume building in that block. + + // Build a cir.br to the target label. + auto &JD = LabelMap[S.getLabel()]; + if (buildBranchThroughCleanup(JD, S.getLabel(), getLoc(S.getSourceRange())) + .failed()) + return mlir::failure(); + + // Insert the new block to continue codegen after goto. + builder.createBlock(currLexScope->CleanupBlock); + + // What here... return mlir::success(); } +mlir::LogicalResult CIRGenModule::buildLabel(const LabelDecl *D) { + JumpDest &Dest = LabelMap[D]; + + // Create a new block to tag with a label and add a branch from + // the current one to it. + mlir::Block *currBlock = builder.getBlock(); + if (!currBlock->empty()) { + mlir::Operation *lastOp = nullptr; + if (!currBlock->back().hasTrait()) + lastOp = builder.create(getLoc(D->getSourceRange()), + currLexScope->CleanupBlock); + + currBlock = builder.createBlock(currLexScope->CleanupBlock); + if (lastOp) { + auto g = cast(lastOp); + g.setSuccessor(currBlock); + } + } + + if (!Dest.isValid()) { + Dest.Block = currBlock; + currLexScope->SolvedLabels.insert(D); + // FIXME: add a label attribute to block... + } else { + assert(0 && "unimplemented"); + } + + // FIXME: emit debug info for labels, incrementProfileCounter + return mlir::success(); +} + +mlir::LogicalResult CIRGenModule::buildLabelStmt(const clang::LabelStmt &S) { + if (buildLabel(S.getDecl()).failed()) + return mlir::failure(); + + // IsEHa: not implemented. + assert(!(astCtx.getLangOpts().EHAsynch && S.isSideEntry())); + + return buildStmt(S.getSubStmt(), /* useCurrentScope */ true); +} + mlir::LogicalResult CIRGenModule::buildSimpleStmt(const Stmt *S, bool useCurrentScope) { switch (S->getStmtClass()) { @@ -650,6 +751,8 @@ mlir::LogicalResult CIRGenModule::buildSimpleStmt(const Stmt *S, break; case Stmt::LabelStmtClass: + return buildLabelStmt(cast(*S)); + case Stmt::AttributedStmtClass: case Stmt::BreakStmtClass: case Stmt::ContinueStmtClass: @@ -1378,16 +1481,30 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { // In MLIR the entry block of the function is special: it must have the // same argument list as the function itself. - auto &entryBlock = *function.addEntryBlock(); + mlir::Block *entryBlock = function.addEntryBlock(); // Set the insertion point in the builder to the beginning of the // function body, it will be used throughout the codegen to create // operations in this function. - builder.setInsertionPointToStart(&entryBlock); + builder.setInsertionPointToStart(entryBlock); + auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); + + // Initialize lexical scope information. + LexicalScopeRAIIContext lexScope{*this, nullptr}; + currLexScope = &lexScope; + + { + // Create the cleanup block but dont hook it up around just + // yet. + mlir::OpBuilder::InsertionGuard guard(builder); + mlir::Block *entryBlock = builder.getBlock(); + currLexScope->CleanupBlock = builder.createBlock(entryBlock->getParent()); + } + assert(builder.getInsertionBlock() && "Should be valid"); // Declare all the function arguments in the symbol table. for (const auto nameValue : - llvm::zip(FD->parameters(), entryBlock.getArguments())) { + llvm::zip(FD->parameters(), entryBlock->getArguments())) { auto *paramVar = std::get<0>(nameValue); auto paramVal = std::get<1>(nameValue); auto alignment = astCtx.getDeclAlign(paramVar); @@ -1403,22 +1520,48 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); builder.create(fnBodyBegin, paramVal, addr); } + assert(builder.getInsertionBlock() && "Should be valid"); // Emit the body of the function. if (mlir::failed(buildFunctionBody(FD->getBody()))) { function.erase(); return nullptr; } + assert(builder.getInsertionBlock() && "Should be valid"); + + // Do not insert the cleanup block unecessarily, this doesn't really need + // to be here (should be a separate pass), but it helps keeping small + // testcases minimal for now. + if (!builder.getInsertionBlock()->isEntryBlock()) { + // If the current block doesn't have a terminator, add a branch to the + // cleanup block, where the actual cir.return happens (cleanup block). + if (!builder.getBlock()->back().hasTrait()) + builder.create(FnEndLoc, currLexScope->CleanupBlock); + + // Set the insertion point to the end of the cleanup block and insert + // the return instruction. + // FIXME: this currently assumes only one cir.return in the function. + builder.setInsertionPointToEnd(currLexScope->CleanupBlock); + } else { + // Do not even emit cleanup block + assert(currLexScope->CleanupBlock->empty() && "not empty"); + assert( + (builder.getBlock()->empty() || + !builder.getBlock()->back().hasTrait()) && + "entry basic block already has a terminator?"); + currLexScope->CleanupBlock->erase(); + } - ReturnOp returnOp; - if (!entryBlock.empty()) - returnOp = dyn_cast(entryBlock.back()); - if (!returnOp) - builder.create(getLoc(FD->getBody()->getEndLoc())); + builder.create(CurCGF->RetLoc ? *(CurCGF->RetLoc) : FnEndLoc, + CurCGF->RetValue ? ArrayRef(CurCGF->RetValue) + : ArrayRef()); if (mlir::failed(function.verifyBody())) return nullptr; theModule.push_back(function); + + CurCGF = nullptr; + currLexScope = nullptr; return function; } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 234c7fe2e469..a3301f840e46 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -22,6 +22,7 @@ #include "clang/Basic/SourceManager.h" #include "llvm/ADT/ScopedHashTable.h" +#include "llvm/ADT/SmallPtrSet.h" #include "mlir/Dialect/CIR/IR/CIRAttrs.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" @@ -75,6 +76,72 @@ class CIRGenModule { /// Per-module type mapping from clang AST to CIR. std::unique_ptr genTypes; + /// ------- + /// Goto + /// ------- + + /// A jump destination is an abstract label, branching to which may + /// require a jump out through normal cleanups. + struct JumpDest { + JumpDest() = default; + JumpDest(mlir::Block *Block) : Block(Block) {} + + bool isValid() const { return Block != nullptr; } + mlir::Block *getBlock() const { return Block; } + mlir::Block *Block = nullptr; + }; + + /// Track mlir Blocks for each C/C++ label. + llvm::DenseMap LabelMap; + JumpDest &getJumpDestForLabel(const clang::LabelDecl *D); + + /// ------- + /// Lexical Scope: to be read as in the meaning in CIR, a scope is always + /// related with initialization and destruction of objects. + /// ------- + + // Represents a cir.scope, cir.if, and then/else regions. I.e. lexical + // scopes that require cleanups. + struct LexicalScopeRAIIContext { + CIRGenModule &P; + LexicalScopeRAIIContext *OldVal = nullptr; + unsigned Depth = 0; + + public: + LexicalScopeRAIIContext(CIRGenModule &p, LexicalScopeRAIIContext *Value) + : P(p) { + if (P.currLexScope) + OldVal = P.currLexScope; + P.currLexScope = Value; + if (Value) + Depth++; + } + + // Block containing cleanup code for things initialized in this + // lexical context (scope). + mlir::Block *CleanupBlock = nullptr; + + // Goto's introduced in this scope but didn't get fixed. + llvm::SmallVector, 4> + PendingGotos; + + // Labels solved inside this scope. + llvm::SmallPtrSet SolvedLabels; + + void restore() { P.currLexScope = OldVal; } + void cleanup(); + ~LexicalScopeRAIIContext() { + cleanup(); + restore(); + } + }; + + LexicalScopeRAIIContext *currLexScope = nullptr; + + /// ------- + /// Source Location tracking + /// ------- + /// Use to track source locations across nested visitor traversals. /// Always use a `SourceLocRAIIObject` to change currSrcLoc. std::optional currSrcLoc; @@ -95,6 +162,10 @@ class CIRGenModule { ~SourceLocRAIIObject() { restore(); } }; + /// ------- + /// Declaring variables + /// ------- + /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. mlir::LogicalResult declare(const clang::Decl *var, clang::QualType T, @@ -215,7 +286,15 @@ class CIRGenModule { clang::QualType DstTy, clang::SourceLocation Loc); + mlir::LogicalResult buildBranchThroughCleanup(JumpDest &Dest, + clang::LabelDecl *L, + mlir::Location Loc); + mlir::LogicalResult buildReturnStmt(const clang::ReturnStmt &S); + + mlir::LogicalResult buildLabel(const clang::LabelDecl *D); + mlir::LogicalResult buildLabelStmt(const clang::LabelStmt &S); + mlir::LogicalResult buildGotoStmt(const clang::GotoStmt &S); mlir::LogicalResult buildDeclStmt(const clang::DeclStmt &S); diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp new file mode 100644 index 000000000000..dab5abd760cb --- /dev/null +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void g0(int a) { + int b = a; + goto end; + b = b + 1; +end: + b = b + 2; +} + +// CHECK: func @g0 +// CHECK: %0 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %1 : i32, cir.ptr +// CHECK: %2 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: cir.store %2, %0 : i32, cir.ptr +// CHECK: cir.br ^bb2 +// CHECK: ^bb1: // no predecessors +// CHECK: %3 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: %4 = cir.cst(1 : i32) : i32 +// CHECK: %5 = cir.binop(add, %3, %4) : i32 +// CHECK: cir.store %5, %0 : i32, cir.ptr +// CHECK: cir.br ^bb2 +// CHECK: ^bb2: // 2 preds: ^bb0, ^bb1 +// CHECK: %6 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: %7 = cir.cst(2 : i32) : i32 +// CHECK: %8 = cir.binop(add, %6, %7) : i32 +// CHECK: cir.store %8, %0 : i32, cir.ptr +// CHECK: cir.br ^bb3 +// CHECK: ^bb3: // pred: ^bb2 +// CHECK: cir.return \ No newline at end of file From 03d8ba34c02751720c34bdf1ac904d01e763efe2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 9 Feb 2022 16:04:00 -0800 Subject: [PATCH 0115/2301] [CIR] Split lexical context handling into two new classes --- clang/lib/CIR/CIRGenModule.cpp | 21 ++++++++++--------- clang/lib/CIR/CIRGenModule.h | 38 ++++++++++++++++++++-------------- 2 files changed, 33 insertions(+), 26 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 97aa624f30ad..4639aaa9bc76 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -649,22 +649,24 @@ CIRGenModule::buildBranchThroughCleanup(JumpDest &Dest, LabelDecl *L, /// All scope related cleanup needed: /// - Patching up unsolved goto's. -void CIRGenModule::LexicalScopeRAIIContext::cleanup() { - while (!PendingGotos.empty()) { - auto gotoInfo = PendingGotos.back(); +void CIRGenModule::LexicalScopeGuard::cleanup() { + auto *localScope = CGM.currLexScope; + + while (!localScope->PendingGotos.empty()) { + auto gotoInfo = localScope->PendingGotos.back(); // FIXME: Currently only support resolving goto labels inside the // same lexical ecope. - assert(SolvedLabels.count(gotoInfo.second) && + assert(localScope->SolvedLabels.count(gotoInfo.second) && "goto across scopes not yet supported"); // The goto in this lexical context actually maps to a basic // block. auto g = cast(gotoInfo.first); - g.setSuccessor(P.LabelMap[gotoInfo.second].getBlock()); - PendingGotos.pop_back(); + g.setSuccessor(CGM.LabelMap[gotoInfo.second].getBlock()); + localScope->PendingGotos.pop_back(); } - SolvedLabels.clear(); + localScope->SolvedLabels.clear(); } mlir::LogicalResult CIRGenModule::buildGotoStmt(const GotoStmt &S) { @@ -1490,8 +1492,8 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); // Initialize lexical scope information. - LexicalScopeRAIIContext lexScope{*this, nullptr}; - currLexScope = &lexScope; + LexicalScopeContext lexScope; + LexicalScopeGuard scopeGuard{*this, &lexScope}; { // Create the cleanup block but dont hook it up around just @@ -1561,7 +1563,6 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { theModule.push_back(function); CurCGF = nullptr; - currLexScope = nullptr; return function; } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index a3301f840e46..28513aeb5448 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -102,20 +102,10 @@ class CIRGenModule { // Represents a cir.scope, cir.if, and then/else regions. I.e. lexical // scopes that require cleanups. - struct LexicalScopeRAIIContext { - CIRGenModule &P; - LexicalScopeRAIIContext *OldVal = nullptr; + struct LexicalScopeContext { unsigned Depth = 0; - - public: - LexicalScopeRAIIContext(CIRGenModule &p, LexicalScopeRAIIContext *Value) - : P(p) { - if (P.currLexScope) - OldVal = P.currLexScope; - P.currLexScope = Value; - if (Value) - Depth++; - } + LexicalScopeContext() = default; + ~LexicalScopeContext() = default; // Block containing cleanup code for things initialized in this // lexical context (scope). @@ -127,16 +117,32 @@ class CIRGenModule { // Labels solved inside this scope. llvm::SmallPtrSet SolvedLabels; + }; + + class LexicalScopeGuard { + CIRGenModule &CGM; + LexicalScopeContext *OldVal = nullptr; + + public: + LexicalScopeGuard(CIRGenModule &c, LexicalScopeContext *L) : CGM(c) { + if (CGM.currLexScope) + OldVal = CGM.currLexScope; + CGM.currLexScope = L; + } + + LexicalScopeGuard(const LexicalScopeGuard &) = delete; + LexicalScopeGuard &operator=(const LexicalScopeGuard &) = delete; + LexicalScopeGuard &operator=(LexicalScopeGuard &&other) = delete; - void restore() { P.currLexScope = OldVal; } void cleanup(); - ~LexicalScopeRAIIContext() { + void restore() { CGM.currLexScope = OldVal; } + ~LexicalScopeGuard() { cleanup(); restore(); } }; - LexicalScopeRAIIContext *currLexScope = nullptr; + LexicalScopeContext *currLexScope = nullptr; /// ------- /// Source Location tracking From d36ccc259c7963fbccb903fad260127c4bdbfefb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 9 Feb 2022 16:25:17 -0800 Subject: [PATCH 0116/2301] [CIR] Use a more strict/shorter guard for lexical scopes --- clang/lib/CIR/CIRGenModule.cpp | 125 +++++++++++++++++---------------- 1 file changed, 64 insertions(+), 61 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 4639aaa9bc76..9b35c281876f 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1492,71 +1492,74 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); // Initialize lexical scope information. - LexicalScopeContext lexScope; - LexicalScopeGuard scopeGuard{*this, &lexScope}; - { - // Create the cleanup block but dont hook it up around just - // yet. - mlir::OpBuilder::InsertionGuard guard(builder); - mlir::Block *entryBlock = builder.getBlock(); - currLexScope->CleanupBlock = builder.createBlock(entryBlock->getParent()); - } - assert(builder.getInsertionBlock() && "Should be valid"); - - // Declare all the function arguments in the symbol table. - for (const auto nameValue : - llvm::zip(FD->parameters(), entryBlock->getArguments())) { - auto *paramVar = std::get<0>(nameValue); - auto paramVal = std::get<1>(nameValue); - auto alignment = astCtx.getDeclAlign(paramVar); - auto paramLoc = getLoc(paramVar->getSourceRange()); - paramVal.setLoc(paramLoc); - - mlir::Value addr; - if (failed(declare(paramVar, paramVar->getType(), paramLoc, alignment, addr, - true /*param*/))) + LexicalScopeContext lexScope; + LexicalScopeGuard scopeGuard{*this, &lexScope}; + + { + // Create the cleanup block but dont hook it up around just + // yet. + mlir::OpBuilder::InsertionGuard guard(builder); + mlir::Block *entryBlock = builder.getBlock(); + currLexScope->CleanupBlock = builder.createBlock(entryBlock->getParent()); + } + assert(builder.getInsertionBlock() && "Should be valid"); + + // Declare all the function arguments in the symbol table. + for (const auto nameValue : + llvm::zip(FD->parameters(), entryBlock->getArguments())) { + auto *paramVar = std::get<0>(nameValue); + auto paramVal = std::get<1>(nameValue); + auto alignment = astCtx.getDeclAlign(paramVar); + auto paramLoc = getLoc(paramVar->getSourceRange()); + paramVal.setLoc(paramLoc); + + mlir::Value addr; + if (failed(declare(paramVar, paramVar->getType(), paramLoc, alignment, + addr, true /*param*/))) + return nullptr; + // Location of the store to the param storage tracked as beginning of + // the function body. + auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); + builder.create(fnBodyBegin, paramVal, addr); + } + assert(builder.getInsertionBlock() && "Should be valid"); + + // Emit the body of the function. + if (mlir::failed(buildFunctionBody(FD->getBody()))) { + function.erase(); return nullptr; - // Location of the store to the param storage tracked as beginning of - // the function body. - auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); - builder.create(fnBodyBegin, paramVal, addr); - } - assert(builder.getInsertionBlock() && "Should be valid"); + } + assert(builder.getInsertionBlock() && "Should be valid"); + + // Do not insert the cleanup block unecessarily, this doesn't really need + // to be here (should be a separate pass), but it helps keeping small + // testcases minimal for now. + if (!builder.getInsertionBlock()->isEntryBlock()) { + // If the current block doesn't have a terminator, add a branch to the + // cleanup block, where the actual cir.return happens (cleanup block). + if (!builder.getBlock()->back().hasTrait()) + builder.create(FnEndLoc, currLexScope->CleanupBlock); + + // Set the insertion point to the end of the cleanup block and insert + // the return instruction. + // FIXME: this currently assumes only one cir.return in the function. + builder.setInsertionPointToEnd(currLexScope->CleanupBlock); + } else { + // Do not even emit cleanup block + assert(currLexScope->CleanupBlock->empty() && "not empty"); + assert((builder.getBlock()->empty() || + !builder.getBlock() + ->back() + .hasTrait()) && + "entry basic block already has a terminator?"); + currLexScope->CleanupBlock->erase(); + } - // Emit the body of the function. - if (mlir::failed(buildFunctionBody(FD->getBody()))) { - function.erase(); - return nullptr; + builder.create(CurCGF->RetLoc ? *(CurCGF->RetLoc) : FnEndLoc, + CurCGF->RetValue ? ArrayRef(CurCGF->RetValue) + : ArrayRef()); } - assert(builder.getInsertionBlock() && "Should be valid"); - - // Do not insert the cleanup block unecessarily, this doesn't really need - // to be here (should be a separate pass), but it helps keeping small - // testcases minimal for now. - if (!builder.getInsertionBlock()->isEntryBlock()) { - // If the current block doesn't have a terminator, add a branch to the - // cleanup block, where the actual cir.return happens (cleanup block). - if (!builder.getBlock()->back().hasTrait()) - builder.create(FnEndLoc, currLexScope->CleanupBlock); - - // Set the insertion point to the end of the cleanup block and insert - // the return instruction. - // FIXME: this currently assumes only one cir.return in the function. - builder.setInsertionPointToEnd(currLexScope->CleanupBlock); - } else { - // Do not even emit cleanup block - assert(currLexScope->CleanupBlock->empty() && "not empty"); - assert( - (builder.getBlock()->empty() || - !builder.getBlock()->back().hasTrait()) && - "entry basic block already has a terminator?"); - currLexScope->CleanupBlock->erase(); - } - - builder.create(CurCGF->RetLoc ? *(CurCGF->RetLoc) : FnEndLoc, - CurCGF->RetValue ? ArrayRef(CurCGF->RetValue) - : ArrayRef()); if (mlir::failed(function.verifyBody())) return nullptr; From ce84c4b5042f76a0426636b47964c1674d71d9e8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 9 Feb 2022 17:39:32 -0800 Subject: [PATCH 0117/2301] [CIR][NFC] Move cleanup building logic as part of lexical scope creation --- clang/lib/CIR/CIRGenModule.cpp | 11 +---------- clang/lib/CIR/CIRGenModule.h | 10 +++++++++- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 9b35c281876f..764aa8c23503 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1493,18 +1493,9 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { // Initialize lexical scope information. { - LexicalScopeContext lexScope; + LexicalScopeContext lexScope{builder}; LexicalScopeGuard scopeGuard{*this, &lexScope}; - { - // Create the cleanup block but dont hook it up around just - // yet. - mlir::OpBuilder::InsertionGuard guard(builder); - mlir::Block *entryBlock = builder.getBlock(); - currLexScope->CleanupBlock = builder.createBlock(entryBlock->getParent()); - } - assert(builder.getInsertionBlock() && "Should be valid"); - // Declare all the function arguments in the symbol table. for (const auto nameValue : llvm::zip(FD->parameters(), entryBlock->getArguments())) { diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 28513aeb5448..3aedca20bde5 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -104,7 +104,15 @@ class CIRGenModule { // scopes that require cleanups. struct LexicalScopeContext { unsigned Depth = 0; - LexicalScopeContext() = default; + LexicalScopeContext(mlir::OpBuilder &builder) { + { + // Create the cleanup block but dont hook it up around just + // yet. + mlir::OpBuilder::InsertionGuard guard(builder); + CleanupBlock = builder.createBlock(builder.getBlock()->getParent()); + } + assert(builder.getInsertionBlock() && "Should be valid"); + } ~LexicalScopeContext() = default; // Block containing cleanup code for things initialized in this From 2a8f445d459502c6f905449da457aafe34de87fe Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 9 Feb 2022 18:45:38 -0800 Subject: [PATCH 0118/2301] [CIR] Add cleanup for all lexical scopes - Implemented in a way that folds the extra cleanup blocks away so as to keep current testing functionality pristine - Unifies and return/yield insertion as part of the cleanup - New testcases to be added once we complete if parsing support (which currently assumes only one basic block) --- clang/lib/CIR/CIRGenModule.cpp | 85 ++++++++++++++++++++-------------- clang/lib/CIR/CIRGenModule.h | 10 +++- 2 files changed, 59 insertions(+), 36 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 764aa8c23503..1e0e9543b39f 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -652,6 +652,7 @@ CIRGenModule::buildBranchThroughCleanup(JumpDest &Dest, LabelDecl *L, void CIRGenModule::LexicalScopeGuard::cleanup() { auto *localScope = CGM.currLexScope; + // Handle pending gotos and the solved labels in this scope. while (!localScope->PendingGotos.empty()) { auto gotoInfo = localScope->PendingGotos.back(); // FIXME: Currently only support resolving goto labels inside the @@ -665,8 +666,42 @@ void CIRGenModule::LexicalScopeGuard::cleanup() { g.setSuccessor(CGM.LabelMap[gotoInfo.second].getBlock()); localScope->PendingGotos.pop_back(); } - localScope->SolvedLabels.clear(); + + // Do not insert the cleanup block unecessarily, this doesn't really need + // to be here (should be a separate pass), but it helps keeping small + // testcases minimal for now. + auto &builder = CGM.builder; + if (!builder.getInsertionBlock()->isEntryBlock()) { + // If the current block doesn't have a terminator, add a branch to the + // cleanup block, where the actual cir.return/yield happens (cleanup block). + if (!builder.getBlock()->back().hasTrait()) + builder.create(builder.getBlock()->back().getLoc(), + localScope->CleanupBlock); + + // Set the insertion point to the end of the cleanup block and insert + // the return instruction. + builder.setInsertionPointToEnd(localScope->CleanupBlock); + } else { + assert(localScope->CleanupBlock->empty() && "not empty"); + assert( + (builder.getBlock()->empty() || + !builder.getBlock()->back().hasTrait()) && + "entry basic block already has a terminator?"); + // Do not even emit cleanup blocks. + localScope->CleanupBlock->erase(); + } + + auto *CurFn = CGM.CurCGF; + if (localScope->Depth == 0) { // end of function + // FIXME: this currently assumes only one cir.return in the function. + builder.create(CurFn->RetLoc ? *(CurFn->RetLoc) + : localScope->EndLoc, + CurFn->RetValue ? ArrayRef(CurFn->RetValue) + : ArrayRef()); + } else { // end of other local scope + builder.create(localScope->EndLoc); + } } mlir::LogicalResult CIRGenModule::buildGotoStmt(const GotoStmt &S) { @@ -1150,13 +1185,19 @@ mlir::LogicalResult CIRGenModule::buildIfOnBoolExpr(const Expr *cond, loc, condV, elseS, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { + auto bLoc = getLoc(thenS->getSourceRange().getBegin()); + auto eLoc = getLoc(thenS->getSourceRange().getEnd()); + LexicalScopeContext lexScope{builder, bLoc, eLoc}; + LexicalScopeGuard lexThenGuard{*this, &lexScope}; resThen = buildStmt(thenS, /*useCurrentScope=*/true); - builder.create(getLoc(thenS->getSourceRange().getEnd())); }, /*elseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { + auto bLoc = getLoc(elseS->getSourceRange().getBegin()); + auto eLoc = getLoc(elseS->getSourceRange().getEnd()); + LexicalScopeContext lexScope{builder, bLoc, eLoc}; + LexicalScopeGuard lexElseGuard{*this, &lexScope}; resElse = buildStmt(elseS, /*useCurrentScope=*/true); - builder.create(getLoc(elseS->getSourceRange().getEnd())); }); return mlir::LogicalResult::success(resThen.succeeded() && @@ -1201,12 +1242,14 @@ mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); // The if scope contains the full source range for IfStmt. auto scopeLoc = getLoc(S.getSourceRange()); + auto scopeLocBegin = getLoc(S.getSourceRange().getBegin()); auto scopeLocEnd = getLoc(S.getSourceRange().getEnd()); builder.create( scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScopeContext lexScope{builder, scopeLocBegin, scopeLocEnd}; + LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; res = ifStmtBuilder(); - builder.create(scopeLocEnd); }); return res; @@ -1415,8 +1458,9 @@ mlir::LogicalResult CIRGenModule::buildCompoundStmt(const CompoundStmt &S) { builder.create( locBegin, mlir::TypeRange(), /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScopeContext lexScope{builder, locBegin, locEnd}; + LexicalScopeGuard lexScopeGuard{*this, &lexScope}; res = compoundStmtBuilder(); - builder.create(locEnd); }); return res; @@ -1489,11 +1533,12 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { // function body, it will be used throughout the codegen to create // operations in this function. builder.setInsertionPointToStart(entryBlock); + auto FnBeginLoc = getLoc(FD->getBody()->getEndLoc()); auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); // Initialize lexical scope information. { - LexicalScopeContext lexScope{builder}; + LexicalScopeContext lexScope{builder, FnBeginLoc, FnEndLoc}; LexicalScopeGuard scopeGuard{*this, &lexScope}; // Declare all the function arguments in the symbol table. @@ -1522,34 +1567,6 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { return nullptr; } assert(builder.getInsertionBlock() && "Should be valid"); - - // Do not insert the cleanup block unecessarily, this doesn't really need - // to be here (should be a separate pass), but it helps keeping small - // testcases minimal for now. - if (!builder.getInsertionBlock()->isEntryBlock()) { - // If the current block doesn't have a terminator, add a branch to the - // cleanup block, where the actual cir.return happens (cleanup block). - if (!builder.getBlock()->back().hasTrait()) - builder.create(FnEndLoc, currLexScope->CleanupBlock); - - // Set the insertion point to the end of the cleanup block and insert - // the return instruction. - // FIXME: this currently assumes only one cir.return in the function. - builder.setInsertionPointToEnd(currLexScope->CleanupBlock); - } else { - // Do not even emit cleanup block - assert(currLexScope->CleanupBlock->empty() && "not empty"); - assert((builder.getBlock()->empty() || - !builder.getBlock() - ->back() - .hasTrait()) && - "entry basic block already has a terminator?"); - currLexScope->CleanupBlock->erase(); - } - - builder.create(CurCGF->RetLoc ? *(CurCGF->RetLoc) : FnEndLoc, - CurCGF->RetValue ? ArrayRef(CurCGF->RetValue) - : ArrayRef()); } if (mlir::failed(function.verifyBody())) diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 3aedca20bde5..d53deb8e70ba 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -104,7 +104,9 @@ class CIRGenModule { // scopes that require cleanups. struct LexicalScopeContext { unsigned Depth = 0; - LexicalScopeContext(mlir::OpBuilder &builder) { + LexicalScopeContext(mlir::OpBuilder &builder, mlir::Location b, + mlir::Location e) + : BeginLoc(b), EndLoc(e) { { // Create the cleanup block but dont hook it up around just // yet. @@ -125,6 +127,8 @@ class CIRGenModule { // Labels solved inside this scope. llvm::SmallPtrSet SolvedLabels; + + mlir::Location BeginLoc, EndLoc; }; class LexicalScopeGuard { @@ -133,8 +137,10 @@ class CIRGenModule { public: LexicalScopeGuard(CIRGenModule &c, LexicalScopeContext *L) : CGM(c) { - if (CGM.currLexScope) + if (CGM.currLexScope) { OldVal = CGM.currLexScope; + L->Depth++; + } CGM.currLexScope = L; } From 4c2ac6fbce73697d52647e5980f52692de79727f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 10 Feb 2022 14:29:08 -0800 Subject: [PATCH 0119/2301] [CIR] Add skeleton for lifetime analysis checks This hooks up -cir-lifetime-check to cir-tool, by introducing a simple pass that only prints hello. The check is implemented as a transform and depends on a Lifetime Analysis which is about to be introduced. Lots of boilerplate and cmake changes too. --- clang/test/CIR/Transforms/lifetime-check.cpp | 15 ++++++++ clang/tools/cir-tool/CMakeLists.txt | 2 ++ clang/tools/cir-tool/cir-tool.cpp | 4 +++ mlir/docs/Passes.md | 4 +++ .../Dialect/CIR/Analysis/LifetimeAnalysis.h | 0 mlir/include/mlir/Dialect/CIR/CMakeLists.txt | 8 +++++ mlir/include/mlir/Dialect/CIR/Passes.h | 32 +++++++++++++++++ mlir/include/mlir/Dialect/CIR/Passes.td | 24 +++++++++++++ .../Dialect/CIR/Transforms/CMakeLists.txt | 0 mlir/lib/Dialect/CIR/Analysis/CMakeLists.txt | 11 ++++++ mlir/lib/Dialect/CIR/Analysis/Lifetime.cpp | 0 mlir/lib/Dialect/CIR/CMakeLists.txt | 2 ++ .../lib/Dialect/CIR/Transforms/CMakeLists.txt | 17 +++++++++ .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 36 +++++++++++++++++++ mlir/lib/Dialect/CIR/Transforms/PassDetail.h | 29 +++++++++++++++ 15 files changed, 184 insertions(+) create mode 100644 clang/test/CIR/Transforms/lifetime-check.cpp create mode 100644 mlir/include/mlir/Dialect/CIR/Analysis/LifetimeAnalysis.h create mode 100644 mlir/include/mlir/Dialect/CIR/Passes.h create mode 100644 mlir/include/mlir/Dialect/CIR/Passes.td create mode 100644 mlir/include/mlir/Dialect/CIR/Transforms/CMakeLists.txt create mode 100644 mlir/lib/Dialect/CIR/Analysis/CMakeLists.txt create mode 100644 mlir/lib/Dialect/CIR/Analysis/Lifetime.cpp create mode 100644 mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt create mode 100644 mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp create mode 100644 mlir/lib/Dialect/CIR/Transforms/PassDetail.h diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp new file mode 100644 index 000000000000..fa755fca0385 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: cir-tool %t.cir -cir-lifetime-check -o %t-out.cir 2>&1 | FileCheck %s + +int *basic() { + int *p = nullptr; + { + int x = 0; + p = &x; + *p = 42; + } + *p = 42; + return p; +} + +// CHECK: Hello Lifetime World \ No newline at end of file diff --git a/clang/tools/cir-tool/CMakeLists.txt b/clang/tools/cir-tool/CMakeLists.txt index ca65769d5455..aeea0bd3be36 100644 --- a/clang/tools/cir-tool/CMakeLists.txt +++ b/clang/tools/cir-tool/CMakeLists.txt @@ -12,6 +12,8 @@ target_link_libraries(cir-tool PRIVATE clangCIR MLIRAnalysis MLIRCIR + MLIRCIRAnalysis + MLIRCIRTransforms MLIRDialect MLIRIR MLIRMemRefDialect diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index d43576f1490c..00fb59388166 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -14,6 +14,7 @@ #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/CIR/Passes.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" @@ -35,6 +36,9 @@ int main(int argc, char **argv) { ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertCIRToMemRefPass(); }); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return mlir::createLifetimeCheckPass(); + }); mlir::registerTransformsPasses(); diff --git a/mlir/docs/Passes.md b/mlir/docs/Passes.md index 6a18e06593e8..242b11a824c1 100644 --- a/mlir/docs/Passes.md +++ b/mlir/docs/Passes.md @@ -123,3 +123,7 @@ This document describes the available MLIR passes and their contracts. ## XeGPU Dialect Passes [include "XeGPUPasses.md"] + +## CIR Dialect Passes + +[include "CIRPasses.md"] diff --git a/mlir/include/mlir/Dialect/CIR/Analysis/LifetimeAnalysis.h b/mlir/include/mlir/Dialect/CIR/Analysis/LifetimeAnalysis.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/mlir/include/mlir/Dialect/CIR/CMakeLists.txt b/mlir/include/mlir/Dialect/CIR/CMakeLists.txt index f33061b2d87c..ece82e6a3676 100644 --- a/mlir/include/mlir/Dialect/CIR/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/CIR/CMakeLists.txt @@ -1 +1,9 @@ add_subdirectory(IR) + +set(LLVM_TARGET_DEFINITIONS Passes.td) +mlir_tablegen(Passes.h.inc -gen-pass-decls -name CIR) +mlir_tablegen(Passes.capi.h.inc -gen-pass-capi-header --prefix CIR) +mlir_tablegen(Passes.capi.cpp.inc -gen-pass-capi-impl --prefix CIR) +add_public_tablegen_target(MLIRCIRPassIncGen) + +add_mlir_doc(Passes CIRPasses ./ -gen-pass-doc) diff --git a/mlir/include/mlir/Dialect/CIR/Passes.h b/mlir/include/mlir/Dialect/CIR/Passes.h new file mode 100644 index 000000000000..1357a3f3422d --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/Passes.h @@ -0,0 +1,32 @@ +//===- Passes.h - CIR pass entry points -------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This header file defines prototypes that expose pass constructors. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_CIR_PASSES_H_ +#define MLIR_DIALECT_CIR_PASSES_H_ + +#include "mlir/Pass/Pass.h" + +namespace mlir { + +std::unique_ptr createLifetimeCheckPass(); + +//===----------------------------------------------------------------------===// +// Registration +//===----------------------------------------------------------------------===// + +/// Generate the code for registering passes. +#define GEN_PASS_REGISTRATION +#include "mlir/Dialect/CIR/Passes.h.inc" + +} // namespace mlir + +#endif // MLIR_DIALECT_CIR_PASSES_H_ diff --git a/mlir/include/mlir/Dialect/CIR/Passes.td b/mlir/include/mlir/Dialect/CIR/Passes.td new file mode 100644 index 000000000000..f17f52e440e4 --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/Passes.td @@ -0,0 +1,24 @@ +//===-- Passes.td - CIR pass definition file ---------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_CIR_PASSES +#define MLIR_DIALECT_CIR_PASSES + +include "mlir/Pass/PassBase.td" + +def LifetimeCheck : Pass<"cir-lifetime-check"> { + let summary = "Check lifetime safety and generate diagnostics"; + let description = [{ + This pass relies on a lifetime analysis pass and uses the diagnostics + mechanism to report to the user. It does not change any code. + }]; + let constructor = "mlir::createLifetimeCheckPass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + +#endif // MLIR_DIALECT_CIR_PASSES diff --git a/mlir/include/mlir/Dialect/CIR/Transforms/CMakeLists.txt b/mlir/include/mlir/Dialect/CIR/Transforms/CMakeLists.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/mlir/lib/Dialect/CIR/Analysis/CMakeLists.txt b/mlir/lib/Dialect/CIR/Analysis/CMakeLists.txt new file mode 100644 index 000000000000..7094c3d54caf --- /dev/null +++ b/mlir/lib/Dialect/CIR/Analysis/CMakeLists.txt @@ -0,0 +1,11 @@ +add_mlir_dialect_library(MLIRCIRAnalysis + Lifetime.cpp + + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/CIR + + LINK_LIBS PUBLIC + MLIRAnalysis + MLIRIR + ) + diff --git a/mlir/lib/Dialect/CIR/Analysis/Lifetime.cpp b/mlir/lib/Dialect/CIR/Analysis/Lifetime.cpp new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/mlir/lib/Dialect/CIR/CMakeLists.txt b/mlir/lib/Dialect/CIR/CMakeLists.txt index f33061b2d87c..b78bf46d6d90 100644 --- a/mlir/lib/Dialect/CIR/CMakeLists.txt +++ b/mlir/lib/Dialect/CIR/CMakeLists.txt @@ -1 +1,3 @@ add_subdirectory(IR) +add_subdirectory(Analysis) +add_subdirectory(Transforms) \ No newline at end of file diff --git a/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt b/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt new file mode 100644 index 000000000000..11a7c49d04f3 --- /dev/null +++ b/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt @@ -0,0 +1,17 @@ +add_mlir_dialect_library(MLIRCIRTransforms + LifetimeCheck.cpp + + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/CIR + + DEPENDS + MLIRCIRPassIncGen + + LINK_LIBS PUBLIC + + MLIRAnalysis + MLIRIR + MLIRCIR + MLIRCIRAnalysis + MLIRPass +) diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp new file mode 100644 index 000000000000..433fc2d790c9 --- /dev/null +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -0,0 +1,36 @@ +//===- Lifetimecheck.cpp - emit diagnostic checks for lifetime violations -===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/CIR/Passes.h" + +#include "PassDetail.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" + +using namespace mlir; + +namespace { +struct LifetimeCheckPass : public LifetimeCheckBase { + explicit LifetimeCheckPass(raw_ostream &os = llvm::errs()) : os(os) {} + + // Prints the resultant operation statistics post iterating over the module. + void runOnOperation() override; + + // Print lifetime diagnostics + void printDiagnostics(); + +private: + raw_ostream &os; +}; +} // namespace + +void LifetimeCheckPass::runOnOperation() { printDiagnostics(); } +void LifetimeCheckPass::printDiagnostics() { os << "Hello Lifetime World\n"; } + +std::unique_ptr mlir::createLifetimeCheckPass() { + return std::make_unique(); +} \ No newline at end of file diff --git a/mlir/lib/Dialect/CIR/Transforms/PassDetail.h b/mlir/lib/Dialect/CIR/Transforms/PassDetail.h new file mode 100644 index 000000000000..4942e34f284b --- /dev/null +++ b/mlir/lib/Dialect/CIR/Transforms/PassDetail.h @@ -0,0 +1,29 @@ +//===- PassDetail.h - CIR Pass class details --------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef DIALECT_CIR_TRANSFORMS_PASSDETAIL_H_ +#define DIALECT_CIR_TRANSFORMS_PASSDETAIL_H_ + +#include "mlir/IR/Dialect.h" +#include "mlir/Pass/Pass.h" + +namespace mlir { +// Forward declaration from Dialect.h +template +void registerDialect(DialectRegistry ®istry); + +namespace cir { +class CIRDialect; +} // namespace cir + +#define GEN_PASS_CLASSES +#include "mlir/Dialect/CIR/Passes.h.inc" + +} // namespace mlir + +#endif // DIALECT_CIR_TRANSFORMS_PASSDETAIL_H_ From 6d33c3cdafc5d1c359fdf54216f3dc56a25e2030 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 10 Feb 2022 22:12:19 -0500 Subject: [PATCH 0120/2301] [CIR] Pass in CodeGenOptions to CIRGenerator and on down We'll have future uses of this and thus might as well slot it in now. Include a usage just asserting that CoverageMapping isn't enabled since we also obviously don't support that either. --- clang/include/clang/CIR/CIRGenerator.h | 5 ++++- clang/lib/CIR/CIRGenModule.cpp | 6 ++++-- clang/lib/CIR/CIRGenModule.h | 5 ++++- clang/lib/CIR/CIRGenerator.cpp | 4 ++-- clang/lib/CIRFrontendAction/CIRGenAction.cpp | 2 +- clang/lib/Sema/CIRBasedWarnings.cpp | 4 +++- 6 files changed, 18 insertions(+), 8 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 1c61ad57e1bb..c1e65a173c7e 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -15,6 +15,7 @@ #define CLANG_CIRGENERATOR_H_ #include "clang/AST/ASTConsumer.h" +#include "clang/Basic/CodeGenOptions.h" #include "llvm/Support/ToolOutputFile.h" #include @@ -36,7 +37,7 @@ class CIRGenTypes; class CIRGenerator : public clang::ASTConsumer { public: - CIRGenerator(); + CIRGenerator(const clang::CodeGenOptions &CGO); ~CIRGenerator(); void Initialize(clang::ASTContext &Context) override; bool EmitFunction(const clang::FunctionDecl *FD); @@ -55,6 +56,8 @@ class CIRGenerator : public clang::ASTConsumer { std::unique_ptr mlirCtx; std::unique_ptr CGM; + const clang::CodeGenOptions codeGenOpts; // Intentionally copied in. + clang::ASTContext *astCtx; }; diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 1e0e9543b39f..862badeaa0ff 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -70,8 +70,9 @@ using llvm::SmallVector; using llvm::StringRef; CIRGenModule::CIRGenModule(mlir::MLIRContext &context, - clang::ASTContext &astctx) - : builder(&context), astCtx(astctx) { + clang::ASTContext &astctx, + const clang::CodeGenOptions &CGO) + : builder(&context), astCtx(astctx), codeGenOpts(CGO) { theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); genTypes = std::make_unique(astCtx, this->getBuilder()); } @@ -1481,6 +1482,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { assert(false && "Not yet implemented"); case Decl::Function: buildFunction(cast(decl)); + assert(!codeGenOpts.CoverageMapping && "Coverage Mapping NYI"); break; case Decl::CXXRecord: { CXXRecordDecl *crd = cast(decl); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index d53deb8e70ba..cd2e7bdbb01d 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -41,7 +41,8 @@ namespace cir { /// accurate analysis and transformation based on these high level semantics. class CIRGenModule { public: - CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx); + CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, + const clang::CodeGenOptions &CGO); CIRGenModule(CIRGenModule &) = delete; CIRGenModule &operator=(CIRGenModule &) = delete; ~CIRGenModule() = default; @@ -73,6 +74,8 @@ class CIRGenModule { /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; + const clang::CodeGenOptions &codeGenOpts; + /// Per-module type mapping from clang AST to CIR. std::unique_ptr genTypes; diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index 4c05bea15f9b..04df9125c90d 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -24,7 +24,7 @@ using namespace cir; using namespace clang; -CIRGenerator::CIRGenerator() = default; +CIRGenerator::CIRGenerator(const CodeGenOptions &CGO) : codeGenOpts{CGO} {} CIRGenerator::~CIRGenerator() = default; void CIRGenerator::Initialize(ASTContext &astCtx) { @@ -36,7 +36,7 @@ void CIRGenerator::Initialize(ASTContext &astCtx) { mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); - CGM = std::make_unique(*mlirCtx.get(), astCtx); + CGM = std::make_unique(*mlirCtx.get(), astCtx, codeGenOpts); } void CIRGenerator::verifyModule() { CGM->verifyModule(); } diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index 10ffd86bcf31..feb86f3d6a8e 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -90,7 +90,7 @@ class CIRGenConsumer : public clang::ASTConsumer { headerSearchOptions(headerSearchOptions), codeGenOptions(codeGenOptions), targetOptions(targetOptions), langOptions(langOptions), outputStream(std::move(os)), - gen(std::make_unique()) { + gen(std::make_unique(codeGenOptions)) { // This is required to match the constructors used during // CodeGenAction. Ultimately, this is required because we want to use // the same utility functions in BackendUtil.h for handling llvm diff --git a/clang/lib/Sema/CIRBasedWarnings.cpp b/clang/lib/Sema/CIRBasedWarnings.cpp index 9b263c32560c..0964bc2f8b4b 100644 --- a/clang/lib/Sema/CIRBasedWarnings.cpp +++ b/clang/lib/Sema/CIRBasedWarnings.cpp @@ -67,7 +67,9 @@ sema::CIRBasedWarnings::CIRBasedWarnings(Sema &s) : S(s) { DefaultPolicy.enableConsumedAnalysis = isEnabled(D, warn_use_in_invalid_state); - CIRGen = std::make_unique(); + // TODO: figure out a way to get this properly. This isn't actually reasonable + // to ask for prior to codegen, so we're just subbing in a blank one. + CIRGen = std::make_unique(CodeGenOptions()); CIRGen->Initialize(S.getASTContext()); } From eb6e831d8fd5da16eab3aa652198242ca2c82e15 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 10 Feb 2022 22:30:06 -0500 Subject: [PATCH 0121/2301] [CIR] Pass the CIRGenModule to the CIRGenFunction and use it to get the ASTContext This will be used in a future patch. This is split up for readability purposes for the larger diff. --- clang/lib/CIR/CIRGenFunction.cpp | 7 ++++++- clang/lib/CIR/CIRGenFunction.h | 6 +++++- clang/lib/CIR/CIRGenModule.cpp | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index cf8be74fe477..55514b2b32f6 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -11,11 +11,16 @@ //===----------------------------------------------------------------------===// #include "CIRGenFunction.h" +#include "CIRGenModule.h" using namespace cir; using namespace clang; -CIRGenFunction::CIRGenFunction() = default; +CIRGenFunction::CIRGenFunction(CIRGenModule &CGM) : CGM{CGM} {} + +clang::ASTContext &CIRGenFunction::getContext() const { + return CGM.getASTContext(); +} TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { type = type.getCanonicalType(); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 8f8c2d984cb9..4cdffdd0a11d 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -22,6 +22,7 @@ class Expr; } // namespace clang namespace cir { +class CIRGenModule; // FIXME: for now we are reusing this from lib/Clang/CodeGenFunction.h, which // isn't available in the include dir. Same for getEvaluationKind below. @@ -39,6 +40,9 @@ class CIRGenFunction { mlir::Type FnRetTy; clang::QualType FnRetQualTy; + CIRGenModule &CGM; + clang::ASTContext &getContext() const; + /// Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(clang::QualType T); @@ -50,7 +54,7 @@ class CIRGenFunction { return getEvaluationKind(T) == TEK_Aggregate; } - CIRGenFunction(); + CIRGenFunction(CIRGenModule &CGM); }; } // namespace cir diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 862badeaa0ff..85f39aad7bda 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1502,7 +1502,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { } mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { - CIRGenFunction CGF; + CIRGenFunction CGF{*this}; CurCGF = &CGF; // Create a scope in the symbol table to hold variable declarations. From 14259ab4fdf69229e4ebaf8b51590f783a7307bc Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 10 Feb 2022 22:45:11 -0500 Subject: [PATCH 0122/2301] [CIR] Add a reference to the LangOptions for CIRGenModule This'll be used in a later patch and is split apart for readability purposes. --- clang/lib/CIR/CIRGenModule.cpp | 3 ++- clang/lib/CIR/CIRGenModule.h | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 85f39aad7bda..98724347e107 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -72,7 +72,8 @@ using llvm::StringRef; CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &CGO) - : builder(&context), astCtx(astctx), codeGenOpts(CGO) { + : builder(&context), astCtx(astctx), codeGenOpts(CGO), + langOpts(astctx.getLangOpts()) { theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); genTypes = std::make_unique(astCtx, this->getBuilder()); } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index cd2e7bdbb01d..4cfd21b5b5ee 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -79,6 +79,7 @@ class CIRGenModule { /// Per-module type mapping from clang AST to CIR. std::unique_ptr genTypes; + const clang::LangOptions &langOpts; /// ------- /// Goto /// ------- @@ -199,6 +200,7 @@ class CIRGenModule { mlir::ModuleOp getModule() { return theModule; } mlir::OpBuilder &getBuilder() { return builder; } clang::ASTContext &getASTContext() { return astCtx; } + const clang::LangOptions &getLangOpts() const { return langOpts; } /// Helpers to convert Clang's SourceLocation to a MLIR Location. mlir::Location getLoc(clang::SourceLocation SLoc); From bf3970b7c410be235fb90d6b6c403012b3f7641b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 10 Feb 2022 22:45:50 -0500 Subject: [PATCH 0123/2301] [CIR] Add a copy of the SanitizerSet to CIRGenFunction This'll be used in a future patch and is split apart for readability. --- clang/lib/CIR/CIRGenFunction.cpp | 3 ++- clang/lib/CIR/CIRGenFunction.h | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 55514b2b32f6..1b43a96be767 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -16,7 +16,8 @@ using namespace cir; using namespace clang; -CIRGenFunction::CIRGenFunction(CIRGenModule &CGM) : CGM{CGM} {} +CIRGenFunction::CIRGenFunction(CIRGenModule &CGM) + : CGM{CGM}, SanOpts(CGM.getLangOpts().Sanitize) {} clang::ASTContext &CIRGenFunction::getContext() const { return CGM.getASTContext(); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 4cdffdd0a11d..b7725bb387a3 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -43,6 +43,9 @@ class CIRGenFunction { CIRGenModule &CGM; clang::ASTContext &getContext() const; + /// Sanitizers enabled for this function. + clang::SanitizerSet SanOpts; + /// Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(clang::QualType T); From 7f7e8cedf9559092e5624042affc25cd7c39e543 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 10 Feb 2022 22:48:47 -0500 Subject: [PATCH 0124/2301] [CIR] Add a referene to the clang::TargetInfo for CIRGenModule This'll be used in a future patch and is split off for readability's sake. --- clang/lib/CIR/CIRGenModule.cpp | 4 ++-- clang/lib/CIR/CIRGenModule.h | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 98724347e107..f5e0c6d3113a 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -72,8 +72,8 @@ using llvm::StringRef; CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &CGO) - : builder(&context), astCtx(astctx), codeGenOpts(CGO), - langOpts(astctx.getLangOpts()) { + : builder(&context), astCtx(astctx), target(astCtx.getTargetInfo()), + codeGenOpts(CGO), langOpts(astctx.getLangOpts()) { theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); genTypes = std::make_unique(astCtx, this->getBuilder()); } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 4cfd21b5b5ee..4b7d8d0fb9f6 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -74,6 +74,7 @@ class CIRGenModule { /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; + const clang::TargetInfo ⌖ const clang::CodeGenOptions &codeGenOpts; /// Per-module type mapping from clang AST to CIR. @@ -200,6 +201,7 @@ class CIRGenModule { mlir::ModuleOp getModule() { return theModule; } mlir::OpBuilder &getBuilder() { return builder; } clang::ASTContext &getASTContext() { return astCtx; } + const clang::TargetInfo &getTarget() const { return target; } const clang::LangOptions &getLangOpts() const { return langOpts; } /// Helpers to convert Clang's SourceLocation to a MLIR Location. From d7401212c4b1c1c74a3fa57200cd42644e3af653 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 10 Feb 2022 22:52:23 -0500 Subject: [PATCH 0125/2301] [CIR] Keep a ref to the CIRGenModule in CIRGenTypes CIRGenTypes will need to know module specific information in a coming patch. --- clang/lib/CIR/CIRGenModule.cpp | 2 +- clang/lib/CIR/CIRGenTypes.cpp | 5 +++-- clang/lib/CIR/CIRGenTypes.h | 9 ++++++--- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index f5e0c6d3113a..d575c907c2e9 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -75,7 +75,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, : builder(&context), astCtx(astctx), target(astCtx.getTargetInfo()), codeGenOpts(CGO), langOpts(astctx.getLangOpts()) { theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - genTypes = std::make_unique(astCtx, this->getBuilder()); + genTypes = std::make_unique(*this); } mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index ee34a802eec7..575898664557 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -1,4 +1,5 @@ #include "CIRGenTypes.h" +#include "CIRGenModule.h" #include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/Builders.h" @@ -13,8 +14,8 @@ using namespace clang; using namespace cir; -CIRGenTypes::CIRGenTypes(ASTContext &Ctx, mlir::OpBuilder &B) - : Context(Ctx), Builder(B) {} +CIRGenTypes::CIRGenTypes(CIRGenModule &cgm) + : Context(cgm.getASTContext()), Builder(cgm.getBuilder()), CGM{cgm} {} CIRGenTypes::~CIRGenTypes() = default; std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 84d1d938f1f3..261d20de0014 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -60,20 +60,23 @@ class Type; class OpBuilder; namespace cir { class StructType; -} +} // namespace cir } // namespace mlir +namespace cir { +class CIRGenModule; + /// This class organizes the cross-module state that is used while lowering /// AST types to CIR types. -namespace cir { class CIRGenTypes { clang::ASTContext &Context; mlir::OpBuilder &Builder; + CIRGenModule &CGM; llvm::DenseMap recordDeclTypes; public: - CIRGenTypes(clang::ASTContext &Ctx, mlir::OpBuilder &B); + CIRGenTypes(CIRGenModule &cgm); ~CIRGenTypes(); /// This map keeps cache of llvm::Types and maps clang::Type to From 2b03360b7f0b426909da28c201a44845a2f336b5 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 10 Feb 2022 23:16:58 -0500 Subject: [PATCH 0126/2301] [CIR] Add a buildGlobal func to CIRGenModule to support function prototypes This mostly just wraps buildFunction for now while asserting that we weren't trying to compile anything we don't yet support. But we also weren't checking for whether or not this was just a declaration prior to this and thus would fail on a simple function declaration. --- clang/lib/CIR/CIRGenModule.cpp | 41 +++++++++++++++++++++++++++++++++- clang/lib/CIR/CIRGenModule.h | 14 ++++++++++++ clang/test/CIR/CodeGen/basic.c | 2 ++ 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index d575c907c2e9..dbab8ed04a40 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -33,6 +33,7 @@ #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" +#include "clang/AST/GlobalDecl.h" #include "clang/AST/ParentMap.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/RecursiveASTVisitor.h" @@ -1477,12 +1478,50 @@ CIRGenModule::buildCompoundStmtWithoutScope(const CompoundStmt &S) { return mlir::success(); } +bool CIRGenModule::MustBeEmitted(const ValueDecl *Global) { + // Never defer when EmitAllDecls is specified. + assert(!langOpts.EmitAllDecls && "EmitAllDecls NYI"); + assert(!codeGenOpts.KeepStaticConsts && "KeepStaticConsts NYI"); + + return getASTContext().DeclMustBeEmitted(Global); +} + +bool CIRGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { + assert(!langOpts.OpenMP && "NYI"); + auto const *FD = dyn_cast(Global); + assert(FD && "Only FunctionDecl should hit this path so far."); + assert(!FD->isTemplated() && "Templates NYI"); + + return true; +} + +void CIRGenModule::buildGlobal(GlobalDecl GD) { + const auto *Global = cast(GD.getDecl()); + + assert(!Global->hasAttr() && "NYI"); + assert(!Global->hasAttr() && "NYI"); + assert(!Global->hasAttr() && "NYI"); + assert(!langOpts.CUDA && "NYI"); + assert(!langOpts.OpenMP && "NYI"); + + const auto *FD = dyn_cast(Global); + assert(FD && "Only FunctionDecl supported as of here"); + if (!FD->doesThisDeclarationHaveABody()) { + assert(!FD->doesDeclarationForceExternallyVisibleDefinition() && "NYI"); + return; + } + + assert(MustBeEmitted(Global) || + MayBeEmittedEagerly(Global) && "Delayed emission NYI"); + + buildFunction(cast(GD.getDecl())); +} void CIRGenModule::buildTopLevelDecl(Decl *decl) { switch (decl->getKind()) { default: assert(false && "Not yet implemented"); case Decl::Function: - buildFunction(cast(decl)); + buildGlobal(cast(decl)); assert(!codeGenOpts.CoverageMapping && "Coverage Mapping NYI"); break; case Decl::CXXRecord: { diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 4b7d8d0fb9f6..498825106e47 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -421,11 +421,25 @@ class CIRGenModule { void buildTopLevelDecl(clang::Decl *decl); + /// Emit code for a single global function or var decl. Forward declarations + /// are emitted lazily. + void buildGlobal(clang::GlobalDecl D); + // Emit a new function and add it to the MLIR module. mlir::FuncOp buildFunction(const clang::FunctionDecl *FD); mlir::Type getCIRType(const clang::QualType &type); + /// Determine whether the definition must be emitted; if this returns \c + /// false, the definition can be emitted lazily if it's used. + bool MustBeEmitted(const clang::ValueDecl *D); + + /// Determine whether the definition can be emitted eagerly, or should be + /// delayed until the end of the translation unit. This is relevant for + /// definitions whose linkage can change, e.g. implicit function instantions + /// which may later be explicitly instantiated. + bool MayBeEmittedEagerly(const clang::ValueDecl *D); + void verifyModule(); }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 4d485ca13762..6f9859bf8f02 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -2,6 +2,8 @@ // RUN: FileCheck --input-file=%t.cir %s // XFAIL: * +int foo(int i); + int foo(int i) { return i; } From 0670512b54019ac74fad6890391641cde1e83f0d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Feb 2022 19:01:03 -0500 Subject: [PATCH 0127/2301] [CIR] Have -emit-cir imply -fenable-clangir If you are requesting cir from clang you know you're using clangir's CIRGen pipeline. It doesn't make sense to require both flags here. --- clang/lib/Driver/Driver.cpp | 5 +---- clang/lib/Driver/ToolChains/Clang.cpp | 3 +++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index c0f10d7a6af0..93c8dc4ad16d 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -5081,11 +5081,8 @@ Action *Driver::ConstructPhaseAction( return C.MakeAction(Input, types::TY_Remap); if (Args.hasArg(options::OPT_emit_ast)) return C.MakeAction(Input, types::TY_AST); - if (Args.hasArg(options::OPT_emit_cir)) { - assert(Args.hasArg(options::OPT_fclangir) && - "Clang only uses ClangIR with the -fclangir flag"); + if (Args.hasArg(options::OPT_emit_cir)) return C.MakeAction(Input, types::TY_CIR); - } if (Args.hasArg(options::OPT_module_file_info)) return C.MakeAction(Input, types::TY_ModuleFile); if (Args.hasArg(options::OPT_verify_pch)) diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 3b64eb113fbd..7d302f70af94 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5241,6 +5241,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } } + if (Args.hasArg(options::OPT_fclangir) || Args.hasArg(options::OPT_emit_cir)) + CmdArgs.push_back("-fclangir"); + if (IsOpenMPDevice) { // We have to pass the triple of the host if compiling for an OpenMP device. std::string NormalizedTriple = From 7aa00a1e3da59e7d60be2e4fbe19072bb51400e9 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 17 Feb 2022 00:33:05 -0500 Subject: [PATCH 0128/2301] [CIR] Disable CIRBasedWarnings for now CIRGenerator needs a CodeGenOptions (and potentially other things in the future) that don't exist here for Sema to pass in. We'll have to think about how to supplement this. But since we aren't yet using it just go ahead and remove it for now. --- clang/lib/Sema/CIRBasedWarnings.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/clang/lib/Sema/CIRBasedWarnings.cpp b/clang/lib/Sema/CIRBasedWarnings.cpp index 0964bc2f8b4b..88ba1013d047 100644 --- a/clang/lib/Sema/CIRBasedWarnings.cpp +++ b/clang/lib/Sema/CIRBasedWarnings.cpp @@ -67,8 +67,6 @@ sema::CIRBasedWarnings::CIRBasedWarnings(Sema &s) : S(s) { DefaultPolicy.enableConsumedAnalysis = isEnabled(D, warn_use_in_invalid_state); - // TODO: figure out a way to get this properly. This isn't actually reasonable - // to ask for prior to codegen, so we're just subbing in a blank one. CIRGen = std::make_unique(CodeGenOptions()); CIRGen->Initialize(S.getASTContext()); } From 13cdf2a2130e0fe8f7542ca760b5603f753b4311 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 17 Feb 2022 00:34:12 -0500 Subject: [PATCH 0129/2301] [CIR][NFC] Enable StandardOps dialect Just enable the StandardOps dialect so that we can build CallOps. AFAIK this is actually being removed upstream so this will be fun to rebase. --- clang/lib/CIR/CIRGenerator.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index 04df9125c90d..aeac8e003a3f 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -35,6 +35,7 @@ void CIRGenerator::Initialize(ASTContext &astCtx) { mlirCtx = std::make_unique(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); CGM = std::make_unique(*mlirCtx.get(), astCtx, codeGenOpts); } From 246e9540c534d2abf73120da139a5c7ebba0b6cb Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 17 Feb 2022 00:42:40 -0500 Subject: [PATCH 0130/2301] [CIR] Add a getter for CIRGenModule's codeGenOpts --- clang/lib/CIR/CIRGenModule.h | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 498825106e47..739f0bfe84db 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -202,6 +202,7 @@ class CIRGenModule { mlir::OpBuilder &getBuilder() { return builder; } clang::ASTContext &getASTContext() { return astCtx; } const clang::TargetInfo &getTarget() const { return target; } + const clang::CodeGenOptions &getCodeGenOpts() const { return codeGenOpts; } const clang::LangOptions &getLangOpts() const { return langOpts; } /// Helpers to convert Clang's SourceLocation to a MLIR Location. From f0d80c86fbd0def73dace3e34fba8fcc212189ff Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 17 Feb 2022 00:59:33 -0500 Subject: [PATCH 0131/2301] [CIR][NFC] Have CIRGenModule own the CIRGenTypes by value CIRGenModule's lifetime matches CIRGenTypes, so it makes sense to have it by held by value. This also helps fix an issue with include ordering in a later commit. --- clang/lib/CIR/CIRGenModule.cpp | 5 ++--- clang/lib/CIR/CIRGenModule.h | 3 ++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index dbab8ed04a40..0b2f3009dc5b 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -74,9 +74,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &CGO) : builder(&context), astCtx(astctx), target(astCtx.getTargetInfo()), - codeGenOpts(CGO), langOpts(astctx.getLangOpts()) { + codeGenOpts(CGO), genTypes(*this), langOpts(astctx.getLangOpts()) { theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - genTypes = std::make_unique(*this); } mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { @@ -1620,7 +1619,7 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { } mlir::Type CIRGenModule::getCIRType(const QualType &type) { - return genTypes->ConvertType(type); + return genTypes.ConvertType(type); } void CIRGenModule::verifyModule() { diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 739f0bfe84db..20f9580fb680 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -78,7 +78,7 @@ class CIRGenModule { const clang::CodeGenOptions &codeGenOpts; /// Per-module type mapping from clang AST to CIR. - std::unique_ptr genTypes; + CIRGenTypes genTypes; const clang::LangOptions &langOpts; /// ------- @@ -203,6 +203,7 @@ class CIRGenModule { clang::ASTContext &getASTContext() { return astCtx; } const clang::TargetInfo &getTarget() const { return target; } const clang::CodeGenOptions &getCodeGenOpts() const { return codeGenOpts; } + CIRGenTypes &getTypes() { return genTypes; } const clang::LangOptions &getLangOpts() const { return langOpts; } /// Helpers to convert Clang's SourceLocation to a MLIR Location. From 4ada98f98d927990842f78cf58d9de5e072ae082 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 17 Feb 2022 01:10:14 -0500 Subject: [PATCH 0132/2301] [CIR][NFC] Reorder some declarations to match clang a bit better This is superficial for now, but in a later diff there becomes a dependency on the order in which CIRGenModule builds it's components. So organize it here while it's NFC. --- clang/lib/CIR/CIRGenModule.cpp | 8 ++++---- clang/lib/CIR/CIRGenModule.h | 33 ++++++++++++++++++--------------- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 0b2f3009dc5b..142d59d67edd 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -73,10 +73,10 @@ using llvm::StringRef; CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &CGO) - : builder(&context), astCtx(astctx), target(astCtx.getTargetInfo()), - codeGenOpts(CGO), genTypes(*this), langOpts(astctx.getLangOpts()) { - theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); -} + : builder(&context), astCtx(astctx), langOpts(astctx.getLangOpts()), + codeGenOpts(CGO), theModule{mlir::ModuleOp::create( + builder.getUnknownLoc())}, + target(astCtx.getTargetInfo()), genTypes{*this} {} mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { const SourceManager &SM = astCtx.getSourceManager(); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 20f9580fb680..6d967e939599 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -40,11 +40,13 @@ namespace cir { /// preserving the semantics of the language and (hopefully) allow to perform /// accurate analysis and transformation based on these high level semantics. class CIRGenModule { + CIRGenModule(CIRGenModule &) = delete; + CIRGenModule &operator=(CIRGenModule &) = delete; + public: CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &CGO); - CIRGenModule(CIRGenModule &) = delete; - CIRGenModule &operator=(CIRGenModule &) = delete; + ~CIRGenModule() = default; using SymTableTy = llvm::ScopedHashTable; @@ -52,14 +54,25 @@ class CIRGenModule { llvm::ScopedHashTableScope; private: - /// A "module" matches a c/cpp source file: containing a list of functions. - mlir::ModuleOp theModule; - /// The builder is a helper class to create IR inside a function. The /// builder is stateful, in particular it keeps an "insertion point": this /// is where the next operations will be introduced. mlir::OpBuilder builder; + /// Hold Clang AST information. + clang::ASTContext &astCtx; + + const clang::LangOptions &langOpts; + + const clang::CodeGenOptions &codeGenOpts; + + /// A "module" matches a c/cpp source file: containing a list of functions. + mlir::ModuleOp theModule; + + const clang::TargetInfo ⌖ + /// Per-module type mapping from clang AST to CIR. + CIRGenTypes genTypes; + /// The symbol table maps a variable name to a value in the current scope. /// Entering a function creates a new scope, and the function arguments are /// added to the mapping. When the processing of a function is terminated, @@ -67,20 +80,10 @@ class CIRGenModule { /// dropped. SymTableTy symbolTable; - /// Hold Clang AST information. - clang::ASTContext &astCtx; - /// Per-function codegen information. Updated everytime buildCIR is called /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; - const clang::TargetInfo ⌖ - const clang::CodeGenOptions &codeGenOpts; - - /// Per-module type mapping from clang AST to CIR. - CIRGenTypes genTypes; - - const clang::LangOptions &langOpts; /// ------- /// Goto /// ------- From e7582ac3ee006207ddac3ac38b83276c22803fc6 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 17 Feb 2022 01:16:50 -0500 Subject: [PATCH 0133/2301] [CIR][NFC] Move buildAnyExpr to CIRGenFunction This and a handful of other member functions of CIRGenModule properly belong in CIRGenFunction to prevent upwards calling (CIRGenFunction -> CIRGenModule). --- clang/lib/CIR/CIRGenFunction.cpp | 15 +++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 9 +++++++++ clang/lib/CIR/CIRGenModule.cpp | 18 +----------------- clang/lib/CIR/CIRGenModule.h | 6 ------ 4 files changed, 25 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 1b43a96be767..cf4328dbb2ed 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -81,3 +81,18 @@ TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { llvm_unreachable("unknown type kind!"); } } +/// Emit code to compute the specified expression which +/// can have any type. The result is returned as an RValue struct. +/// TODO: if this is an aggregate expression, add a AggValueSlot to indicate +/// where the result should be returned. +RValue CIRGenFunction::buildAnyExpr(const Expr *E) { + switch (CIRGenFunction::getEvaluationKind(E->getType())) { + case TEK_Scalar: + return RValue::get(CGM.buildScalarExpr(E)); + case TEK_Complex: + assert(0 && "not implemented"); + case TEK_Aggregate: + assert(0 && "not implemented"); + } + llvm_unreachable("bad evaluation kind"); +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index b7725bb387a3..f38a410c6722 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H #define LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H +#include "CIRGenValue.h" #include "mlir/IR/Value.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/Type.h" @@ -58,6 +59,14 @@ class CIRGenFunction { } CIRGenFunction(CIRGenModule &CGM); + + /// buildAnyExpr - Emit code to compute the specified expression which can + /// have any type. The result is returned as an RValue struct. If this is an + /// aggregate expression, the aggloc/agglocvolatile arguments indicate where + /// the result should be returned. + /// TODO: if this is an aggregate expression, add a AggValueSlot to indicate + /// where the result should be returned. + RValue buildAnyExpr(const clang::Expr *E); }; } // namespace cir diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 142d59d67edd..cf4d949e6ad7 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -834,22 +834,6 @@ LValue CIRGenModule::buildDeclRefLValue(const DeclRefExpr *E) { llvm_unreachable("Unhandled DeclRefExpr?"); } -/// Emit code to compute the specified expression which -/// can have any type. The result is returned as an RValue struct. -/// TODO: if this is an aggregate expression, add a AggValueSlot to indicate -/// where the result should be returned. -RValue CIRGenModule::buildAnyExpr(const Expr *E) { - switch (CIRGenFunction::getEvaluationKind(E->getType())) { - case TEK_Scalar: - return RValue::get(buildScalarExpr(E)); - case TEK_Complex: - assert(0 && "not implemented"); - case TEK_Aggregate: - assert(0 && "not implemented"); - } - llvm_unreachable("bad evaluation kind"); -} - LValue CIRGenModule::buildBinaryOperatorLValue(const BinaryOperator *E) { // Comma expressions just emit their LHS then their RHS as an l-value. if (E->getOpcode() == BO_Comma) { @@ -870,7 +854,7 @@ LValue CIRGenModule::buildBinaryOperatorLValue(const BinaryOperator *E) { clang::Qualifiers::ObjCLifetime::OCL_None && "not implemented"); - RValue RV = buildAnyExpr(E->getRHS()); + RValue RV = CurCGF->buildAnyExpr(E->getRHS()); LValue LV = buildLValue(E->getLHS()); SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 6d967e939599..ee4f0bf4cf41 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -336,12 +336,6 @@ class CIRGenModule { LValue buildDeclRefLValue(const clang::DeclRefExpr *E); - /// Emit code to compute the specified expression which - /// can have any type. The result is returned as an RValue struct. - /// TODO: if this is an aggregate expression, add a AggValueSlot to indicate - /// where the result should be returned. - RValue buildAnyExpr(const clang::Expr *E); - LValue buildBinaryOperatorLValue(const clang::BinaryOperator *E); /// FIXME: this could likely be a common helper and not necessarily related From 2d74a5591cd0695ebc85eef2b4c3d4d8eecebb77 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 17 Feb 2022 01:20:51 -0500 Subject: [PATCH 0134/2301] [CIR][NFC] Add a dumb stubbed out `getDebugInfo` method to assert against This is pretty gross, but it's certainly worse to just leave points where we should be inserting debuginfo neglected. --- clang/lib/CIR/CIRGenFunction.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index f38a410c6722..a38777061e4c 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -60,6 +60,11 @@ class CIRGenFunction { CIRGenFunction(CIRGenModule &CGM); + // TODO: This is currently just a dumb stub. But we want to be able to clearly + // assert where we arne't doing things that we know we should and will crash + // as soon as we add a DebugInfo type to this class. + std::nullptr_t *getDebugInfo() { return nullptr; } + /// buildAnyExpr - Emit code to compute the specified expression which can /// have any type. The result is returned as an RValue struct. If this is an /// aggregate expression, the aggloc/agglocvolatile arguments indicate where From 3e600e187c818faa220930ab558ff46836cf2938 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 17 Feb 2022 01:28:00 -0500 Subject: [PATCH 0135/2301] [CIR] Allow unused results from expressions We assert that an expression is not a PRValue in `buildIgnoredExpr`. But the codegen seems fine via just building via buildAnyExpr. --- clang/lib/CIR/CIRGenModule.cpp | 3 ++- clang/test/CIR/CodeGen/basic.c | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index cf4d949e6ad7..1a23fab8aa6f 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1062,7 +1062,8 @@ LValue CIRGenModule::buildLValue(const Expr *E) { /// EmitIgnoredExpr - Emit code to compute the specified expression, /// ignoring the result. void CIRGenModule::buildIgnoredExpr(const Expr *E) { - assert(!E->isPRValue() && "not implemented"); + if (E->isPRValue()) + return (void)CurCGF->buildAnyExpr(E); // Just emit it as an l-value and drop the result. buildLValue(E); diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 6f9859bf8f02..f3bd584720d5 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -5,6 +5,7 @@ int foo(int i); int foo(int i) { + i; return i; } @@ -13,7 +14,8 @@ int foo(int i) { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr // CHECK-NEXT: %1 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 -// CHECK-NEXT: cir.return %1 : i32 +// CHECK-NEXT: %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 +// CHECK-NEXT: cir.return %2 : i32 // CHECK-NEXT: } int f2() { return 3; } From 7413eeb47cbca195cd2af903fa4424b0e5cea8da Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 17 Feb 2022 01:52:23 -0500 Subject: [PATCH 0136/2301] [CIR] Add a helper class, ABIArgInfo, for working with function arguments This is directly lifted with minimal translation from clang. For the time being we only really care about two valid types -- Direct (for simple args such as ints) and Ignore (void). The rest of the class is to provide the opportunity to assert at places where clang has differing behavior. e.g. if clang's codegen does `if (isInAlloca) doSomething();` we want to be able to assert that we aren't seeing an `isInAlloca` so that we don't forget to handle that situation later. This logic is pervasive through many of the following diffs. Most of the code is redundant and lifted from clang solely so we don't end up with things going down codepaths we don't anticipate and that we can enforce it with liberal usages of asserts. There is also the question of whether or not we need ABIArgInfo at the moment or anything ABI related at all. We could reason that we should prefer minimizing ABI interference in the highest levels of CIR. The hardest part of CIR is going to be getting it correct and working at all. Minimizing the divergences from clang seems like the easiest way to do this. If/when we get to the question of making it faster to compile and better at optimizing code we can focus on luxury things like lowering ABI decisions to mid-level CIR. --- clang/lib/CIR/CIRGenFunctionInfo.h | 180 +++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) create mode 100644 clang/lib/CIR/CIRGenFunctionInfo.h diff --git a/clang/lib/CIR/CIRGenFunctionInfo.h b/clang/lib/CIR/CIRGenFunctionInfo.h new file mode 100644 index 000000000000..25b12df156c8 --- /dev/null +++ b/clang/lib/CIR/CIRGenFunctionInfo.h @@ -0,0 +1,180 @@ +//==-- CIRGenFunctionInfo.h - Representation of fn argument/return types ---==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Defines CIRGenFunctionInfo and associated types used in representing the +// CIR source types and ABI-coerced types for function arguments and +// return values. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_CIR_CIRGENFUNCTIONINFO_H +#define LLVM_CLANG_CIR_CIRGENFUNCTIONINFO_H + +#include "clang/AST/CanonicalType.h" + +#include "llvm/ADT/FoldingSet.h" +#include "llvm/Support/TrailingObjects.h" + +#include "mlir/Dialect/CIR/IR/CIRTypes.h" + +namespace cir { + +/// ABIArgInfo - Helper class to encapsulate information about how a specific C +/// type should be passed to or returned from a function. +class ABIArgInfo { +public: + enum Kind : uint8_t { + /// Direct - Pass the argument directly using the normal converted CIR type, + /// or by coercing to another specified type stored in 'CoerceToType'). If + /// an offset is specified (in UIntData), then the argument passed is offset + /// by some number of bytes in the memory representation. A dummy argument + /// is emitted before the real argument if the specified type stored in + /// "PaddingType" is not zero. + Direct, + + /// Extend - Valid only for integer argument types. Same as 'direct' but + /// also emit a zer/sign extension attribute. + Extend, + + /// Indirect - Pass the argument indirectly via a hidden pointer with the + /// specified alignment (0 indicates default alignment) and address space. + Indirect, + + /// IndirectAliased - Similar to Indirect, but the pointer may be to an + /// object that is otherwise referenced. The object is known to not be + /// modified through any other references for the duration of the call, and + /// the callee must not itself modify the object. Because C allows parameter + /// variables to be modified and guarantees that they have unique addresses, + /// the callee must defensively copy the object into a local variable if it + /// might be modified or its address might be compared. Since those are + /// uncommon, in principle this convention allows programs to avoid copies + /// in more situations. However, it may introduce *extra* copies if the + /// callee fails to prove that a copy is unnecessary and the caller + /// naturally produces an unaliased object for the argument. + IndirectAliased, + + /// Ignore - Ignore the argument (treat as void). Useful for void and empty + /// structs. + Ignore, + + /// Expand - Only valid for aggregate argument types. The structure should + /// be expanded into consecutive arguments for its constituent fields. + /// Currently expand is only allowed on structures whose fields are all + /// scalar types or are themselves expandable types. + Expand, + + /// CoerceAndExpand - Only valid for aggregate argument types. The structure + /// should be expanded into consecutive arguments corresponding to the + /// non-array elements of the type stored in CoerceToType. + /// Array elements in the type are assumed to be padding and skipped. + CoerceAndExpand, + + // TODO: translate this idea to CIR! Define it for now just to ensure that + // we can assert it not being used + InAlloca, + KindFirst = Direct, + KindLast = InAlloca + }; + +private: + mlir::Type TypeData; // canHaveCoerceToType(); + union { + mlir::Type PaddingType; // canHavePaddingType() + mlir::Type UnpaddedCoerceAndExpandType; // isCoerceAndExpand() + }; + struct DirectAttrInfo { + unsigned Offset; + unsigned Align; + }; + struct IndirectAttrInfo { + unsigned Align; + unsigned AddrSpace; + }; + union { + DirectAttrInfo DirectAttr; // isDirect() || isExtend() + IndirectAttrInfo IndirectAttr; // isIndirect() + unsigned AllocaFieldIndex; // isInAlloca() + }; + Kind TheKind; + bool CanBeFlattened : 1; // isDirect() + + bool canHavePaddingType() const { + return isDirect() || isExtend() || isIndirect() || isIndirectAliased() || + isExpand(); + } + + void setPaddingType(mlir::Type T) { + assert(canHavePaddingType()); + PaddingType = T; + } + +public: + ABIArgInfo(Kind K = Direct) + : TypeData(nullptr), PaddingType(nullptr), DirectAttr{0, 0}, TheKind(K), + CanBeFlattened(false) {} + + static ABIArgInfo getDirect(mlir::Type T = nullptr, unsigned Offset = 0, + mlir::Type Padding = nullptr, + bool CanBeFlattened = true, unsigned Align = 0) { + auto AI = ABIArgInfo(Direct); + AI.setCoerceToType(T); + AI.setPaddingType(Padding); + AI.setDirectOffset(Offset); + AI.setDirectAlign(Align); + AI.setCanBeFlattened(CanBeFlattened); + return AI; + } + + static ABIArgInfo getIgnore() { return ABIArgInfo(Ignore); } + + Kind getKind() const { return TheKind; } + bool isDirect() const { return TheKind == Direct; } + bool isInAlloca() const { return TheKind == InAlloca; } + bool isExtend() const { return TheKind == Extend; } + bool isIndirect() const { return TheKind == Indirect; } + bool isIndirectAliased() const { return TheKind == IndirectAliased; } + bool isExpand() const { return TheKind == Expand; } + bool isCoerceAndExpand() const { return TheKind == CoerceAndExpand; } + + bool canHaveCoerceToType() const { + return isDirect() || isExtend() || isCoerceAndExpand(); + } + + void setDirectOffset(unsigned Offset) { + assert((isDirect() || isExtend()) && "Not a direct or extend kind"); + DirectAttr.Offset = Offset; + } + + void setDirectAlign(unsigned Align) { + assert((isDirect() || isExtend()) && "Not a direct or extend kind"); + DirectAttr.Align = Align; + } + + void setCanBeFlattened(bool Flatten) { + assert(isDirect() && "Invalid kind!"); + CanBeFlattened = Flatten; + } + + mlir::Type getPaddingType() const { + return (canHavePaddingType() ? PaddingType : nullptr); + } + + mlir::Type getCoerceToType() const { + assert(canHaveCoerceToType() && "Invalid kind!"); + return TypeData; + } + + void setCoerceToType(mlir::Type T) { + assert(canHaveCoerceToType() && "Invalid kind!"); + TypeData = T; + } +}; + +} // namespace cir + +#endif From 76f54665cc3d00053a062cc89bb9905967813c73 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 24 Feb 2022 23:28:50 -0800 Subject: [PATCH 0137/2301] [CIR] Add StrAttr to alloca for local symbol name representation While here add an extra class method for checking weather the storage type is a pointer type. Both of these are going to be used by the lifetime check. --- clang/lib/CIR/CIRGenModule.cpp | 1 + clang/test/CIR/CodeGen/basic.c | 4 ++-- clang/test/CIR/CodeGen/basic.cpp | 6 ++--- clang/test/CIR/CodeGen/goto.cpp | 4 ++-- clang/test/CIR/CodeGen/sourcelocation.cpp | 6 ++--- clang/test/CIR/CodeGen/struct.c | 4 ++-- clang/test/CIR/CodeGen/struct.cpp | 4 ++-- clang/test/CIR/IR/cir-ops.cir | 26 +++++++++++----------- clang/test/CIR/IRGen/memref.cir | 2 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 11 +++++++-- 10 files changed, 38 insertions(+), 30 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 1a23fab8aa6f..e7100a9a3975 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -120,6 +120,7 @@ mlir::LogicalResult CIRGenModule::declare(const Decl *var, QualType T, auto localVarAddr = builder.create( loc, /*addr type*/ localVarPtrTy, /*var type*/ localVarTy, + namedVar->getName(), IsParam ? InitStyle::paraminit : InitStyle::uninitialized, alignIntAttr); auto *parentBlock = localVarAddr->getBlock(); diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index f3bd584720d5..a4ee062d699c 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -11,7 +11,7 @@ int foo(int i) { // CHECK: module { // CHECK-NEXT: func @foo(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", paraminit] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr // CHECK-NEXT: %1 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 // CHECK-NEXT: %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 @@ -30,6 +30,6 @@ int f3() { } // CHECK: func @f3() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index f41c3ea90ddb..69852928d348 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -18,7 +18,7 @@ int *p1() { } // CHECK: func @p1() -> !cir.ptr { -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, [uninitialized] +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["p", uninitialized] // CHECK: %1 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > @@ -34,11 +34,11 @@ int *p2() { } // CHECK: func @p2() -> !cir.ptr { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, [cinit] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %1, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %5 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} +// CHECK-NEXT: %5 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} // CHECK-NEXT: %6 = cir.cst(0 : i32) : i32 // CHECK-NEXT: cir.store %6, %5 : i32, cir.ptr // CHECK-NEXT: cir.store %5, %0 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index dab5abd760cb..6c962c6975c1 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -10,8 +10,8 @@ void g0(int a) { } // CHECK: func @g0 -// CHECK: %0 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} +// CHECK: %0 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} // CHECK: cir.store %arg0, %1 : i32, cir.ptr // CHECK: %2 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 // CHECK: cir.store %2, %0 : i32, cir.ptr diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 881df14559ee..e0e1e53a5698 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -15,9 +15,9 @@ int s0(int a, int b) { // CHECK: #[[loc3:loc[0-9]+]] = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) // CHECK: module { // CHECK: func @s0(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { -// CHECK: %0 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} loc(#[[loc4:loc[0-9]+]]) -// CHECK: %1 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} loc(#[[loc3]]) -// CHECK: %2 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} loc(#[[loc2]]) +// CHECK: %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} loc(#[[loc4:loc[0-9]+]]) +// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] {alignment = 4 : i64} loc(#[[loc3]]) +// CHECK: %2 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} loc(#[[loc2]]) // CHECK: cir.store %arg0, %2 : i32, cir.ptr loc(#[[loc5:loc[0-9]+]]) // CHECK: cir.store %arg1, %1 : i32, cir.ptr loc(#[[loc5]]) // CHECK: %3 = cir.load %2 lvalue_to_rvalue : cir.ptr , i32 loc(#[[loc6:loc[0-9]+]]) diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 594e2d77d5ea..456b68bfbcf0 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -21,8 +21,8 @@ void baz() { // CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !_22struct2EBar22> // CHECK-NEXT: module { // CHECK-NEXT: func @baz() { -// CHECK-NEXT: %0 = cir.alloca !_22struct2EFoo22, cir.ptr , [uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !_22struct2EBar22, cir.ptr , [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 38dba398a9f5..c357c3ada9c8 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -21,8 +21,8 @@ void baz() { // CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !_22struct2EBar22> // CHECK-NEXT: module { // CHECK-NEXT: func @baz() { -// CHECK-NEXT: %0 = cir.alloca !_22struct2EFoo22, cir.ptr , [uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !_22struct2EBar22, cir.ptr , [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 20467205b51e..2ba56b47f119 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -3,14 +3,14 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s module { func.func @foo(%arg0: i32) -> i32 { - %0 = cir.alloca i32, cir.ptr , [paraminit] + %0 = cir.alloca i32, cir.ptr , ["x", paraminit] cir.store %arg0, %0 : i32, cir.ptr %1 = cir.load %0 : cir.ptr , i32 cir.return %1 : i32 } func.func @f3() -> i32 { - %0 = cir.alloca i32, cir.ptr , [cinit] + %0 = cir.alloca i32, cir.ptr , ["x", cinit] %1 = cir.cst(3 : i32) : i32 cir.store %1, %0 : i32, cir.ptr %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 @@ -18,8 +18,8 @@ module { } func.func @if0(%arg0: i32) -> i32 { - %0 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , [paraminit] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} cir.store %arg0, %1 : i32, cir.ptr %2 = cir.cst(0 : i32) : i32 cir.store %2, %0 : i32, cir.ptr @@ -37,24 +37,24 @@ module { } func.func @s0() { - %0 = cir.alloca i32, cir.ptr , [uninitialized] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["x", uninitialized] {alignment = 4 : i64} cir.scope { - %1 = cir.alloca i32, cir.ptr , [uninitialized] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["y", uninitialized] {alignment = 4 : i64} } cir.return } } // CHECK: module { -// CHECK-NEXT: func.func @foo(%arg0: i32) -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [paraminit] +// CHECK-NEXT: func @foo(%arg0: i32) -> i32 { +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", paraminit] // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %1 : i32 // CHECK-NEXT: } -// CHECK-NEXT: func.func @f3() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [cinit] +// CHECK-NEXT: func @f3() -> i32 { +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", cinit] // CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr // CHECK-NEXT: %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 @@ -71,10 +71,10 @@ module { // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: } -// CHECK: func.func @s0() { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , [uninitialized] {alignment = 4 : i64} +// CHECK: func @s0() { +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , [uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["y", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: } // CHECK: } diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index 1d8f09609bdc..7f82dceb9668 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -4,7 +4,7 @@ module { func.func @foo() -> i32 { - %0 = cir.alloca i32, cir.ptr , [cinit] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} %1 = cir.cst(1 : i32) : i32 cir.store %1, %0 : i32, cir.ptr %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 86c5576b8a7a..54070fc41909 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -154,12 +154,14 @@ def AllocaOp : CIR_Op<"alloca", [ ```mlir // Local variable with uninitialized value. - %0 = cir.alloca i32, !cir.ptr, [cinit] + // int count = ... + %0 = cir.alloca i32, !cir.ptr, ["count", cinit] ``` }]; let arguments = (ins TypeAttr:$allocaType, + StrAttr:$name, // FIXME: add "uninitialzed" as default mode Arg:$init, ConfinedAttr, [IntMinValue<0>]>:$alignment @@ -168,8 +170,13 @@ def AllocaOp : CIR_Op<"alloca", [ let results = (outs Res]>:$addr); + let extraClassDeclaration = [{ + // Whether the alloca input type is a pointer. + bool isPointerType() { return getAllocaType().isa<::mlir::cir::PointerType>(); } + }]; + let assemblyFormat = [{ - $allocaType `,` `cir.ptr` type($addr) `,` `[` $init `]` attr-dict + $allocaType `,` `cir.ptr` type($addr) `,` `[` $name `,` $init `]` attr-dict }]; let hasVerifier = 0; From 5f45303427b42564106567b28807cb3483dab730 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 9 Sep 2022 13:41:19 -0700 Subject: [PATCH 0138/2301] [CIR] Fix `isNullPtr` test on ConstantOp to check for `NullAttr` This is leftover from before the Attr Type removal. --- mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 1 + mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index ede8899ab04c..a65ce777cbed 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -29,6 +29,7 @@ class FuncOp; using FuncOp = func::FuncOp; } // namespace mlir +#include "mlir/Dialect/CIR/IR/CIRAttrs.h" #include "mlir/Dialect/CIR/IR/CIROpsDialect.h.inc" #include "mlir/Dialect/CIR/IR/CIROpsEnums.h.inc" #include "mlir/Dialect/CIR/IR/CIRTypes.h" diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 54070fc41909..4b52d1717314 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -100,7 +100,7 @@ def ConstantOp : CIR_Op<"cst", let extraClassDeclaration = [{ bool isNullPtr() { - return getValue().isa(); + return getValue().isa(); } }]; From 0358b362d037c1c2447521dede6de992a838dcc0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 24 Feb 2022 23:30:14 -0800 Subject: [PATCH 0139/2301] [CIR] Add a lifetime check pass - Implement the core logic from Herb's wg21.link/p1179, for now only implement basic tracking, no multi-level ownership just yet. - In the example below it's capable of diagnosing a bad uses of "p": ``` void basic() { int *p = nullptr; { int x = 0; p = &x; *p = 42; } // x dies in the end of scope *p = 42; // emit a warning on the bad use of 'p' } ``` There's a lot to be done still, but this paves the road for supporting more C++ along side with more accurate tests. --- clang/test/CIR/Transforms/lifetime-check.cpp | 9 +- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 323 +++++++++++++++++- 2 files changed, 319 insertions(+), 13 deletions(-) diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index fa755fca0385..880aea4f1bd5 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: cir-tool %t.cir -cir-lifetime-check -o %t-out.cir 2>&1 | FileCheck %s +// RUN: cir-tool %t.cir -cir-lifetime-check -verify-diagnostics -o %t-out.cir +// XFAIL: * int *basic() { int *p = nullptr; @@ -8,8 +9,6 @@ int *basic() { p = &x; *p = 42; } - *p = 42; - return p; + *p = 42; // expected-warning {{Found invalid use of pointer 'p'}} + return p; // expected-warning {{Found invalid use of pointer 'p'}} } - -// CHECK: Hello Lifetime World \ No newline at end of file diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 433fc2d790c9..7a9a83fa9fca 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -10,27 +10,334 @@ #include "PassDetail.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" + +#include "llvm/ADT/SmallSet.h" using namespace mlir; namespace { struct LifetimeCheckPass : public LifetimeCheckBase { - explicit LifetimeCheckPass(raw_ostream &os = llvm::errs()) : os(os) {} + LifetimeCheckPass() = default; // Prints the resultant operation statistics post iterating over the module. void runOnOperation() override; - // Print lifetime diagnostics - void printDiagnostics(); + void handleOperation(Operation *op); + void handleBlock(Block &block); + void handleRegion(Region ®ion); + + struct State { + using DataTy = enum { Invalid, NullPtr, LocalValue }; + DataTy data = Invalid; + State() = default; + State(DataTy d) : data(d) {} + State(mlir::Value v) : data(LocalValue), value(v) {} + // FIXME: use int/ptr pair to save space + std::optional value = std::nullopt; + + /// Provide less/equal than operator for sorting / set ops. + bool operator<(const State &RHS) const { + // FIXME: note that this makes the ordering non-deterministic, do + // we really care? + if (data == LocalValue && RHS.data == LocalValue) + return value->getAsOpaquePointer() < RHS.value->getAsOpaquePointer(); + else + return data < RHS.data; + } + bool operator==(const State &RHS) const { + if (data == LocalValue && RHS.data == LocalValue) + return *value == *RHS.value; + else + return data == RHS.data; + } + + void dump(); + + static State getInvalid() { return {}; } + static State getNullPtr() { return {NullPtr}; } + static State getLocalValue(mlir::Value v) { return {v}; } + }; + + using PSetType = llvm::SmallSet; + + // FIXME: this should be a ScopedHashTable for consistency. + using PMapType = llvm::DenseMap; + + PMapType pmap; + SmallPtrSet ptrs; + + // Represents the scope context for IR operations (cir.scope, cir.if, + // then/else regions, etc). Tracks the declaration of variables in the current + // local scope. + struct LexicalScopeContext { + unsigned Depth = 0; + LexicalScopeContext() = default; + ~LexicalScopeContext() = default; + + // Track all local values added in this scope + llvm::SmallVector localValues; + + void dumpLocalValues(); + }; -private: - raw_ostream &os; + class LexicalScopeGuard { + LifetimeCheckPass &Pass; + LexicalScopeContext *OldVal = nullptr; + + public: + LexicalScopeGuard(LifetimeCheckPass &p, LexicalScopeContext *L) : Pass(p) { + if (Pass.currScope) { + OldVal = Pass.currScope; + L->Depth++; + } + Pass.currScope = L; + } + + LexicalScopeGuard(const LexicalScopeGuard &) = delete; + LexicalScopeGuard &operator=(const LexicalScopeGuard &) = delete; + LexicalScopeGuard &operator=(LexicalScopeGuard &&other) = delete; + + void cleanup(); + void restore() { Pass.currScope = OldVal; } + ~LexicalScopeGuard() { + cleanup(); + restore(); + } + }; + + LexicalScopeContext *currScope = nullptr; + void dumpPset(PSetType &pset); + void dumpPmap(); }; } // namespace -void LifetimeCheckPass::runOnOperation() { printDiagnostics(); } -void LifetimeCheckPass::printDiagnostics() { os << "Hello Lifetime World\n"; } +static StringRef getVarNameFromValue(mlir::Value v) { + if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(v.getDefiningOp())) + return allocaOp.getName(); + assert(0 && "how did it get here?"); + return ""; +} + +void LifetimeCheckPass::LexicalScopeGuard::cleanup() { + auto *localScope = Pass.currScope; + auto &pmap = Pass.pmap; + // If we are cleaning up at the function level, nothing + // to do here cause we are past all possible deference points + if (localScope->Depth == 0) + return; + + // 2.3 - KILL(x) means to replace all occurrences of x and x' and x'' (etc.) + // in the pmap with invalid. For example, if pmap is {(p1,{a}), (p2,{a'})}, + // KILL(a') would invalidate only p2, and KILL(a) would invalidate both p1 and + // p2. + for (auto value : localScope->localValues) { + for (auto &mapEntry : pmap) { + + // We are deleting this entry anyways, nothing to do here. + if (value == mapEntry.first) + continue; + + // If the local value is part of this pset, it means + // we need to invalidate it, otherwise keep searching. + auto &pset = mapEntry.second; + State valState = State::getLocalValue(value); + if (!pset.contains(valState)) + continue; + + // Erase the reference and mark this invalid. + // FIXME: add a way to just mutate the state. + // FIXME: right now we are piling up invalids, if it's already + // invalid we don't need to add again? only if tracking the path. + pset.erase(valState); + pset.insert(State::getInvalid()); + } + // Delete the local value from pmap, since its gone now. + pmap.erase(value); + } +} + +void LifetimeCheckPass::handleBlock(Block &block) { + // Block main role is to hold a list of Operations: let's recurse. + for (Operation &op : block.getOperations()) + handleOperation(&op); +} + +void LifetimeCheckPass::handleRegion(Region ®ion) { + // FIXME: if else-then blocks have their own scope too. + for (Block &block : region.getBlocks()) + handleBlock(block); +} + +void LifetimeCheckPass::handleOperation(Operation *op) { + // FIXME: allow "isScopeLike" queries so that we can unify this type + // of handling in a generic way. + if (isa<::mlir::ModuleOp>(op)) { + for (Region ®ion : op->getRegions()) + handleRegion(region); + return; + } + + if (isa<::mlir::FuncOp>(op)) { + // Add a new scope. Note that as part of the scope cleanup process + // we apply section 2.3 KILL(x) functionality, turning relevant + // references invalid. + { + LexicalScopeContext lexScope{}; + LexicalScopeGuard scopeGuard{*this, &lexScope}; + for (Region ®ion : op->getRegions()) + handleRegion(region); + } + return; + } + + if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(op)) { + auto addr = allocaOp.getAddr(); + assert(!pmap.count(addr) && "only one alloca for any given address"); + + pmap[addr] = {}; + if (!allocaOp.isPointerType()) { + // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. + pmap[addr].insert(State::getLocalValue(addr)); + currScope->localValues.push_back(addr); + return; + } + + // 2.4.2 - When a non-parameter non-member Pointer p is declared, add + // (p, {invalid}) to pmap. + ptrs.insert(addr); + pmap[addr].insert(State::getInvalid()); + + // If other styles of initialization gets added, required to add support + // here. + assert(allocaOp.getInitAttr().getValue() == mlir::cir::InitStyle::cinit && + "other init styles tbd"); + return; + } + + if (auto storeOp = dyn_cast<::mlir::cir::StoreOp>(op)) { + auto addr = storeOp.getAddr(); + + // We only care about stores that change local pointers, local values + // are not interesting here (just yet). + if (!ptrs.count(addr)) + return; + + auto data = storeOp.getValue(); + // 2.4.2 - If the declaration includes an initialization, the + // initialization is treated as a separate operation + if (auto cstOp = dyn_cast<::mlir::cir::ConstantOp>(data.getDefiningOp())) { + assert(cstOp.isNullPtr() && "not implemented"); + // 2.4.2 - If the initialization is default initialization or zero + // initialization, set pset(p) = {null}; for example: + // int* p; => pset(p) == {invalid} + // int* p{}; or string_view p; => pset(p) == {null}. + // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} + pmap[addr] = {}; + pmap[addr].insert(State::getNullPtr()); + return; + } + + if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(data.getDefiningOp())) { + // p = &x; + pmap[addr] = {}; + pmap[addr].insert(State::getLocalValue(data)); + return; + } + + storeOp.dump(); + // FIXME: asserts here should become remarks for non-implemented parts. + assert(0 && "not implemented"); + } + + if (auto loadOp = dyn_cast<::mlir::cir::LoadOp>(op)) { + auto addr = loadOp.getAddr(); + // Only interested in checking deference on top of pointer types. + if (!pmap.count(addr) || !ptrs.count(addr)) + return; + // 2.4.2 - On every dereference of a Pointer p, enforce that p is not + // invalid. + if (!pmap[addr].count(State::getInvalid())) { + // FIXME: perhaps add a remark that we got a valid dereference + return; + } + + // Looks like we found a invalid path leading to this deference point, + // diagnose it. + emitWarning(loadOp.getLoc()) + << "Found invalid use of pointer '" << getVarNameFromValue(addr) << "'"; + return; + } + + // FIXME: allow "isScopeLike" queries so that we can unify this type + // of handling in a generic way. + if (auto ScopeOp = dyn_cast<::mlir::cir::ScopeOp>(op)) { + // Add a new scope. Note that as part of the scope cleanup process + // we apply section 2.3 KILL(x) functionality, turning relevant + // references invalid. + LexicalScopeContext lexScope{}; + LexicalScopeGuard scopeGuard{*this, &lexScope}; + for (Region ®ion : op->getRegions()) + handleRegion(region); + return; + } +} + +void LifetimeCheckPass::runOnOperation() { + Operation *op = getOperation(); + handleOperation(op); +} std::unique_ptr mlir::createLifetimeCheckPass() { return std::make_unique(); -} \ No newline at end of file +} + +//===----------------------------------------------------------------------===// +// Dump helpers +//===----------------------------------------------------------------------===// + +void LifetimeCheckPass::LexicalScopeContext::dumpLocalValues() { + llvm::errs() << "Local values: { "; + for (auto value : localValues) { + llvm::errs() << getVarNameFromValue(value); + llvm::errs() << ", "; + } + llvm::errs() << "}\n"; +} + +void LifetimeCheckPass::State::dump() { + switch (data) { + case Invalid: + llvm::errs() << "invalid"; + break; + case NullPtr: + llvm::errs() << "nullptr"; + break; + case LocalValue: + llvm::errs() << getVarNameFromValue(*value); + break; + } +} + +void LifetimeCheckPass::dumpPset(PSetType &pset) { + llvm::errs() << "{ "; + for (auto s : pset) { + s.dump(); + llvm::errs() << ", "; + } + llvm::errs() << "}"; +} + +void LifetimeCheckPass::dumpPmap() { + llvm::errs() << "pmap {\n"; + int entry = 0; + for (auto &mapEntry : pmap) { + llvm::errs() << " " << entry << ": " << getVarNameFromValue(mapEntry.first) + << " " + << "=> "; + dumpPset(mapEntry.second); + llvm::errs() << "\n"; + entry++; + } + llvm::errs() << "}\n"; +} From 7d03add4e97be9c07ce1b75bf3c81eaa62bbc416 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 25 Feb 2022 18:07:06 -0800 Subject: [PATCH 0140/2301] [CIR] Cleanup cir.load, remove lvaluetorvalue tag and add deference tag in its place --- clang/lib/CIR/CIRGenExprScalar.cpp | 6 +++--- clang/lib/CIR/CIRGenModule.cpp | 7 +++++++ clang/test/CIR/CodeGen/basic.c | 4 ++-- clang/test/CIR/CodeGen/basic.cpp | 16 ++++++++-------- clang/test/CIR/CodeGen/goto.cpp | 6 +++--- clang/test/CIR/CodeGen/sourcelocation.cpp | 8 ++++---- clang/test/CIR/IR/cir-ops.cir | 8 ++++---- clang/test/CIR/IRGen/memref.cir | 2 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 13 +++++++++---- 9 files changed, 41 insertions(+), 29 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index dcd845b518a9..f21974b9518b 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -29,9 +29,9 @@ class ScalarExprEmitter : public StmtVisitor { /// Emits the address of the l-value, then loads and returns the result. mlir::Value buildLoadOfLValue(const Expr *E) { LValue LV = CGM.buildLValue(E); - auto load = Builder.create( - CGM.getLoc(E->getExprLoc()), CGM.getCIRType(E->getType()), - LV.getPointer(), mlir::UnitAttr::get(Builder.getContext())); + auto load = Builder.create(CGM.getLoc(E->getExprLoc()), + CGM.getCIRType(E->getType()), + LV.getPointer()); // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); return load; } diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index e7100a9a3975..52b99a8e7f83 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1020,6 +1020,13 @@ LValue CIRGenModule::buildUnaryOpLValue(const UnaryOperator *E) { LValueBaseInfo BaseInfo; // TODO: add TBAAInfo Address Addr = buildPointerWithAlignment(E->getSubExpr(), &BaseInfo); + + // Tag 'load' with deref attribute. + if (auto loadOp = + dyn_cast<::mlir::cir::LoadOp>(Addr.getPointer().getDefiningOp())) { + loadOp.setIsDerefAttr(mlir::UnitAttr::get(builder.getContext())); + } + LValue LV = LValue::makeAddr(Addr, T, BaseInfo); // TODO: set addr space // TODO: ObjC/GC/__weak write barrier stuff. diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index a4ee062d699c..bd5a4b942139 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -13,8 +13,8 @@ int foo(int i) { // CHECK-NEXT: func @foo(%arg0: i32 loc({{.*}})) -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", paraminit] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr -// CHECK-NEXT: %1 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 -// CHECK-NEXT: %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %2 : i32 // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 69852928d348..3bd12b96571e 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -43,13 +43,13 @@ int *p2() { // CHECK-NEXT: cir.store %6, %5 : i32, cir.ptr // CHECK-NEXT: cir.store %5, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %7 = cir.cst(42 : i32) : i32 -// CHECK-NEXT: %8 = cir.load %0 lvalue_to_rvalue : cir.ptr >, !cir.ptr +// CHECK-NEXT: %8 = cir.load deref %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.store %7, %8 : i32, cir.ptr // CHECK-NEXT: } // CHECK-NEXT: %2 = cir.cst(42 : i32) : i32 -// CHECK-NEXT: %3 = cir.load %0 lvalue_to_rvalue : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = cir.load deref %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.store %2, %3 : i32, cir.ptr -// CHECK-NEXT: %4 = cir.load %0 lvalue_to_rvalue : cir.ptr >, !cir.ptr +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return %4 : !cir.ptr // CHECK-NEXT: } @@ -62,7 +62,7 @@ void b0() { bool x = true, y = false; } void b1(int a) { bool b = a; } // CHECK: func @b1(%arg0: i32 loc({{.*}})) { -// CHECK: %2 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: %2 = cir.load %1 : cir.ptr , i32 // CHECK: %3 = cir.cast(int_to_bool, %2 : i32), !cir.bool // CHECK: cir.store %3, %0 : !cir.bool, cir.ptr @@ -78,7 +78,7 @@ int if0(int a) { // CHECK: func @if0(%arg0: i32 loc({{.*}})) -> i32 { // CHECK: cir.scope { -// CHECK: %4 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: %4 = cir.load %1 : cir.ptr , i32 // CHECK: %5 = cir.cast(int_to_bool, %4 : i32), !cir.bool // CHECK-NEXT: cir.if %5 { // CHECK-NEXT: %6 = cir.cst(3 : i32) : i32 @@ -107,13 +107,13 @@ int if1(int a, bool b, bool c) { // CHECK: func @if1(%arg0: i32 loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) -> i32 { // CHECK: cir.scope { -// CHECK: %6 = cir.load %3 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: %6 = cir.load %3 : cir.ptr , i32 // CHECK: %7 = cir.cast(int_to_bool, %6 : i32), !cir.bool // CHECK: cir.if %7 { // CHECK: %8 = cir.cst(3 : i32) : i32 // CHECK: cir.store %8, %0 : i32, cir.ptr // CHECK: cir.scope { -// CHECK: %9 = cir.load %2 lvalue_to_rvalue : cir.ptr , !cir.bool +// CHECK: %9 = cir.load %2 : cir.ptr , !cir.bool // CHECK-NEXT: cir.if %9 { // CHECK-NEXT: %10 = cir.cst(8 : i32) : i32 // CHECK-NEXT: cir.store %10, %0 : i32, cir.ptr @@ -121,7 +121,7 @@ int if1(int a, bool b, bool c) { // CHECK: } // CHECK: } else { // CHECK: cir.scope { -// CHECK: %9 = cir.load %1 lvalue_to_rvalue : cir.ptr , !cir.bool +// CHECK: %9 = cir.load %1 : cir.ptr , !cir.bool // CHECK-NEXT: cir.if %9 { // CHECK-NEXT: %10 = cir.cst(14 : i32) : i32 // CHECK-NEXT: cir.store %10, %0 : i32, cir.ptr diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 6c962c6975c1..e6b013f3cb7d 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -13,17 +13,17 @@ void g0(int a) { // CHECK: %0 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} // CHECK: %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} // CHECK: cir.store %arg0, %1 : i32, cir.ptr -// CHECK: %2 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: %2 = cir.load %1 : cir.ptr , i32 // CHECK: cir.store %2, %0 : i32, cir.ptr // CHECK: cir.br ^bb2 // CHECK: ^bb1: // no predecessors -// CHECK: %3 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: %3 = cir.load %0 : cir.ptr , i32 // CHECK: %4 = cir.cst(1 : i32) : i32 // CHECK: %5 = cir.binop(add, %3, %4) : i32 // CHECK: cir.store %5, %0 : i32, cir.ptr // CHECK: cir.br ^bb2 // CHECK: ^bb2: // 2 preds: ^bb0, ^bb1 -// CHECK: %6 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 +// CHECK: %6 = cir.load %0 : cir.ptr , i32 // CHECK: %7 = cir.cst(2 : i32) : i32 // CHECK: %8 = cir.binop(add, %6, %7) : i32 // CHECK: cir.store %8, %0 : i32, cir.ptr diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index e0e1e53a5698..ba412baa5088 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -20,12 +20,12 @@ int s0(int a, int b) { // CHECK: %2 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} loc(#[[loc2]]) // CHECK: cir.store %arg0, %2 : i32, cir.ptr loc(#[[loc5:loc[0-9]+]]) // CHECK: cir.store %arg1, %1 : i32, cir.ptr loc(#[[loc5]]) -// CHECK: %3 = cir.load %2 lvalue_to_rvalue : cir.ptr , i32 loc(#[[loc6:loc[0-9]+]]) -// CHECK: %4 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 loc(#[[loc7:loc[0-9]+]]) +// CHECK: %3 = cir.load %2 : cir.ptr , i32 loc(#[[loc6:loc[0-9]+]]) +// CHECK: %4 = cir.load %1 : cir.ptr , i32 loc(#[[loc7:loc[0-9]+]]) // CHECK: %5 = cir.binop(add, %3, %4) : i32 loc(#[[loc8:loc[0-9]+]]) // CHECK: cir.store %5, %0 : i32, cir.ptr loc(#[[loc4]]) // CHECK: cir.scope { -// CHECK: %7 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 loc(#[[loc10:loc[0-9]+]]) +// CHECK: %7 = cir.load %0 : cir.ptr , i32 loc(#[[loc10:loc[0-9]+]]) // CHECK: %8 = cir.cst(0 : i32) : i32 loc(#[[loc11:loc[0-9]+]]) // CHECK: %9 = cir.cmp(gt, %7, %8) : i32, !cir.bool loc(#[[loc12:loc[0-9]+]]) // CHECK: cir.if %9 { @@ -36,7 +36,7 @@ int s0(int a, int b) { // CHECK: cir.store %10, %0 : i32, cir.ptr loc(#[[loc17:loc[0-9]+]]) // CHECK: } loc(#[[loc13:loc[0-9]+]]) // CHECK: } loc(#[[loc9:loc[0-9]+]]) -// CHECK: %6 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 loc(#[[loc18:loc[0-9]+]]) +// CHECK: %6 = cir.load %0 : cir.ptr , i32 loc(#[[loc18:loc[0-9]+]]) // CHECK: cir.return %6 : i32 loc(#[[loc19:loc[0-9]+]]) // CHECK: } loc(#[[loc1:loc[0-9]+]]) // CHECK: } loc(#[[loc0:loc[0-9]+]]) diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 2ba56b47f119..7918d5927f3b 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -13,7 +13,7 @@ module { %0 = cir.alloca i32, cir.ptr , ["x", cinit] %1 = cir.cst(3 : i32) : i32 cir.store %1, %0 : i32, cir.ptr - %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 + %2 = cir.load %0 : cir.ptr , i32 cir.return %2 : i32 } @@ -23,7 +23,7 @@ module { cir.store %arg0, %1 : i32, cir.ptr %2 = cir.cst(0 : i32) : i32 cir.store %2, %0 : i32, cir.ptr - %3 = cir.load %1 lvalue_to_rvalue : cir.ptr , i32 + %3 = cir.load %1 : cir.ptr , i32 %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool cir.if %4 { %6 = cir.cst(3 : i32) : i32 @@ -32,7 +32,7 @@ module { %6 = cir.cst(4 : i32) : i32 cir.store %6, %0 : i32, cir.ptr } - %5 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 + %5 = cir.load %0 : cir.ptr , i32 cir.return %5 : i32 } @@ -57,7 +57,7 @@ module { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", cinit] // CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %2 : i32 // CHECK-NEXT: } diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/IRGen/memref.cir index 7f82dceb9668..c49793d4831d 100644 --- a/clang/test/CIR/IRGen/memref.cir +++ b/clang/test/CIR/IRGen/memref.cir @@ -7,7 +7,7 @@ module { %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} %1 = cir.cst(1 : i32) : i32 cir.store %1, %0 : i32, cir.ptr - %2 = cir.load %0 lvalue_to_rvalue : cir.ptr , i32 + %2 = cir.load %0 : cir.ptr , i32 cir.return %2 : i32 } } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 4b52d1717314..102823751ad1 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -193,7 +193,8 @@ def LoadOp : CIR_Op<"load", [ let summary = "load operation"; let description = [{ - `cir.load` reads a variable using a pointer type. + `cir.load` reads a variable (lvalue to rvalue conversion) given an address + backed up by a `cir.ptr` type. Example: @@ -201,16 +202,20 @@ def LoadOp : CIR_Op<"load", [ // Read from local variable, address in %0. %1 = cir.load %0 : !cir.ptr, i32 + + // Load address from memory at address %0. %3 provides + // the address used while dereferecing a pointer. + %3 = cir.load deref %0 : cir.ptr > ``` }]; let arguments = (ins Arg:$addr, - UnitAttr:$conv); + [MemRead]>:$addr, UnitAttr:$isDeref); let results = (outs AnyType:$result); let assemblyFormat = [{ - $addr (`lvalue_to_rvalue` $conv^)? attr-dict `:` `cir.ptr` type($addr) `,` type($result) + (`deref` $isDeref^)? $addr `:` `cir.ptr` type($addr) `,` + type($result) attr-dict }]; } From 64a1ce51511968673cff8d433726a54a69b08f86 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 25 Feb 2022 19:05:18 -0800 Subject: [PATCH 0141/2301] [CIR][LifetimeCheck] Only look at derefs for now and update testcase --- clang/test/CIR/Transforms/lifetime-check.cpp | 4 ++-- mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp | 9 ++++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index 880aea4f1bd5..5a54e7730c79 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -9,6 +9,6 @@ int *basic() { p = &x; *p = 42; } - *p = 42; // expected-warning {{Found invalid use of pointer 'p'}} - return p; // expected-warning {{Found invalid use of pointer 'p'}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + return p; } diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 7a9a83fa9fca..56ce0e16b49d 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -255,6 +255,10 @@ void LifetimeCheckPass::handleOperation(Operation *op) { // Only interested in checking deference on top of pointer types. if (!pmap.count(addr) || !ptrs.count(addr)) return; + + if (!loadOp.getIsDeref()) + return; + // 2.4.2 - On every dereference of a Pointer p, enforce that p is not // invalid. if (!pmap[addr].count(State::getInvalid())) { @@ -264,8 +268,11 @@ void LifetimeCheckPass::handleOperation(Operation *op) { // Looks like we found a invalid path leading to this deference point, // diagnose it. + // + // Note that usually the use of the invalid address happens at the + // load or store using the result of this loadOp. emitWarning(loadOp.getLoc()) - << "Found invalid use of pointer '" << getVarNameFromValue(addr) << "'"; + << "use of invalid pointer '" << getVarNameFromValue(addr) << "'"; return; } From 4388e31457bc2469b7f6a068b7caedb795d2aa92 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 25 Feb 2022 19:08:53 -0800 Subject: [PATCH 0142/2301] [CIR][LifetimeCheck][NFC] Rename pass methods --- clang/test/CIR/Transforms/lifetime-check.cpp | 1 - .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 189 ++++++++---------- 2 files changed, 88 insertions(+), 102 deletions(-) diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index 5a54e7730c79..7a1d0652f31a 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: cir-tool %t.cir -cir-lifetime-check -verify-diagnostics -o %t-out.cir -// XFAIL: * int *basic() { int *p = nullptr; diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 56ce0e16b49d..31c3a1371564 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -23,9 +23,13 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Prints the resultant operation statistics post iterating over the module. void runOnOperation() override; - void handleOperation(Operation *op); - void handleBlock(Block &block); - void handleRegion(Region ®ion); + void checkOperation(Operation *op); + void checkBlock(Block &block); + void checkRegion(Region ®ion); + + void checkOperation(mlir::cir::AllocaOp *allocaOp); + void checkOperation(mlir::cir::StoreOp *storeOp); + void checkOperation(mlir::cir::LoadOp *loadOp); struct State { using DataTy = enum { Invalid, NullPtr, LocalValue }; @@ -157,142 +161,125 @@ void LifetimeCheckPass::LexicalScopeGuard::cleanup() { } } -void LifetimeCheckPass::handleBlock(Block &block) { +void LifetimeCheckPass::checkBlock(Block &block) { // Block main role is to hold a list of Operations: let's recurse. for (Operation &op : block.getOperations()) - handleOperation(&op); + checkOperation(&op); } -void LifetimeCheckPass::handleRegion(Region ®ion) { +void LifetimeCheckPass::checkRegion(Region ®ion) { // FIXME: if else-then blocks have their own scope too. for (Block &block : region.getBlocks()) - handleBlock(block); + checkBlock(block); } -void LifetimeCheckPass::handleOperation(Operation *op) { - // FIXME: allow "isScopeLike" queries so that we can unify this type - // of handling in a generic way. - if (isa<::mlir::ModuleOp>(op)) { - for (Region ®ion : op->getRegions()) - handleRegion(region); - return; - } +void LifetimeCheckPass::checkOperation(mlir::cir::AllocaOp *allocaOp) { + auto addr = allocaOp->getAddr(); + assert(!pmap.count(addr) && "only one alloca for any given address"); - if (isa<::mlir::FuncOp>(op)) { - // Add a new scope. Note that as part of the scope cleanup process - // we apply section 2.3 KILL(x) functionality, turning relevant - // references invalid. - { - LexicalScopeContext lexScope{}; - LexicalScopeGuard scopeGuard{*this, &lexScope}; - for (Region ®ion : op->getRegions()) - handleRegion(region); - } + pmap[addr] = {}; + if (!allocaOp->isPointerType()) { + // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. + pmap[addr].insert(State::getLocalValue(addr)); + currScope->localValues.push_back(addr); return; } - if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(op)) { - auto addr = allocaOp.getAddr(); - assert(!pmap.count(addr) && "only one alloca for any given address"); + // 2.4.2 - When a non-parameter non-member Pointer p is declared, add + // (p, {invalid}) to pmap. + ptrs.insert(addr); + pmap[addr].insert(State::getInvalid()); - pmap[addr] = {}; - if (!allocaOp.isPointerType()) { - // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. - pmap[addr].insert(State::getLocalValue(addr)); - currScope->localValues.push_back(addr); - return; - } + // If other styles of initialization gets added, required to add support + // here. + assert(allocaOp->getInitAttr().getValue() == mlir::cir::InitStyle::cinit && + "other init styles tbd"); +} - // 2.4.2 - When a non-parameter non-member Pointer p is declared, add - // (p, {invalid}) to pmap. - ptrs.insert(addr); - pmap[addr].insert(State::getInvalid()); +void LifetimeCheckPass::checkOperation(mlir::cir::StoreOp *storeOp) { + auto addr = storeOp->getAddr(); - // If other styles of initialization gets added, required to add support - // here. - assert(allocaOp.getInitAttr().getValue() == mlir::cir::InitStyle::cinit && - "other init styles tbd"); + // We only care about stores that change local pointers, local values + // are not interesting here (just yet). + if (!ptrs.count(addr)) + return; + + auto data = storeOp->getValue(); + // 2.4.2 - If the declaration includes an initialization, the + // initialization is treated as a separate operation + if (auto cstOp = dyn_cast<::mlir::cir::ConstantOp>(data.getDefiningOp())) { + assert(cstOp.isNullPtr() && "not implemented"); + // 2.4.2 - If the initialization is default initialization or zero + // initialization, set pset(p) = {null}; for example: + // int* p; => pset(p) == {invalid} + // int* p{}; or string_view p; => pset(p) == {null}. + // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} + pmap[addr] = {}; + pmap[addr].insert(State::getNullPtr()); return; } - if (auto storeOp = dyn_cast<::mlir::cir::StoreOp>(op)) { - auto addr = storeOp.getAddr(); - - // We only care about stores that change local pointers, local values - // are not interesting here (just yet). - if (!ptrs.count(addr)) - return; - - auto data = storeOp.getValue(); - // 2.4.2 - If the declaration includes an initialization, the - // initialization is treated as a separate operation - if (auto cstOp = dyn_cast<::mlir::cir::ConstantOp>(data.getDefiningOp())) { - assert(cstOp.isNullPtr() && "not implemented"); - // 2.4.2 - If the initialization is default initialization or zero - // initialization, set pset(p) = {null}; for example: - // int* p; => pset(p) == {invalid} - // int* p{}; or string_view p; => pset(p) == {null}. - // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} - pmap[addr] = {}; - pmap[addr].insert(State::getNullPtr()); - return; - } + if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(data.getDefiningOp())) { + // p = &x; + pmap[addr] = {}; + pmap[addr].insert(State::getLocalValue(data)); + return; + } - if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(data.getDefiningOp())) { - // p = &x; - pmap[addr] = {}; - pmap[addr].insert(State::getLocalValue(data)); - return; - } + storeOp->dump(); + // FIXME: asserts here should become remarks for non-implemented parts. + assert(0 && "not implemented"); +} - storeOp.dump(); - // FIXME: asserts here should become remarks for non-implemented parts. - assert(0 && "not implemented"); - } +void LifetimeCheckPass::checkOperation(mlir::cir::LoadOp *loadOp) { + auto addr = loadOp->getAddr(); + // Only interested in checking deference on top of pointer types. + if (!pmap.count(addr) || !ptrs.count(addr)) + return; - if (auto loadOp = dyn_cast<::mlir::cir::LoadOp>(op)) { - auto addr = loadOp.getAddr(); - // Only interested in checking deference on top of pointer types. - if (!pmap.count(addr) || !ptrs.count(addr)) - return; + if (!loadOp->getIsDeref()) + return; - if (!loadOp.getIsDeref()) - return; + // 2.4.2 - On every dereference of a Pointer p, enforce that p is not + // invalid. + if (!pmap[addr].count(State::getInvalid())) { + // FIXME: perhaps add a remark that we got a valid dereference + return; + } - // 2.4.2 - On every dereference of a Pointer p, enforce that p is not - // invalid. - if (!pmap[addr].count(State::getInvalid())) { - // FIXME: perhaps add a remark that we got a valid dereference - return; - } + // Looks like we found a invalid path leading to this deference point, + // diagnose it. + // + // Note that usually the use of the invalid address happens at the + // load or store using the result of this loadOp. + emitWarning(loadOp->getLoc()) + << "use of invalid pointer '" << getVarNameFromValue(addr) << "'"; + return; +} - // Looks like we found a invalid path leading to this deference point, - // diagnose it. - // - // Note that usually the use of the invalid address happens at the - // load or store using the result of this loadOp. - emitWarning(loadOp.getLoc()) - << "use of invalid pointer '" << getVarNameFromValue(addr) << "'"; +void LifetimeCheckPass::checkOperation(Operation *op) { + if (isa<::mlir::ModuleOp>(op)) { + for (Region ®ion : op->getRegions()) + checkRegion(region); return; } - // FIXME: allow "isScopeLike" queries so that we can unify this type - // of handling in a generic way. - if (auto ScopeOp = dyn_cast<::mlir::cir::ScopeOp>(op)) { + bool isLexicalScopeOp = + isa<::mlir::FuncOp>(op) || isa<::mlir::cir::ScopeOp>(op); + if (isLexicalScopeOp) { // Add a new scope. Note that as part of the scope cleanup process // we apply section 2.3 KILL(x) functionality, turning relevant // references invalid. LexicalScopeContext lexScope{}; LexicalScopeGuard scopeGuard{*this, &lexScope}; for (Region ®ion : op->getRegions()) - handleRegion(region); - return; + checkRegion(region); } } void LifetimeCheckPass::runOnOperation() { Operation *op = getOperation(); - handleOperation(op); + checkOperation(op); } std::unique_ptr mlir::createLifetimeCheckPass() { From b002ff0ac50a44f43e78824dcc7f9f14859546ac Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 25 Feb 2022 19:52:50 -0800 Subject: [PATCH 0143/2301] [CIR][NFC] Remove unused files for Analysis/lifetime, not necessary for now --- clang/tools/cir-tool/CMakeLists.txt | 1 - .../mlir/Dialect/CIR/Analysis/LifetimeAnalysis.h | 0 mlir/lib/Dialect/CIR/Analysis/CMakeLists.txt | 11 ----------- mlir/lib/Dialect/CIR/Analysis/Lifetime.cpp | 0 mlir/lib/Dialect/CIR/CMakeLists.txt | 3 +-- mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt | 1 - 6 files changed, 1 insertion(+), 15 deletions(-) delete mode 100644 mlir/include/mlir/Dialect/CIR/Analysis/LifetimeAnalysis.h delete mode 100644 mlir/lib/Dialect/CIR/Analysis/CMakeLists.txt delete mode 100644 mlir/lib/Dialect/CIR/Analysis/Lifetime.cpp diff --git a/clang/tools/cir-tool/CMakeLists.txt b/clang/tools/cir-tool/CMakeLists.txt index aeea0bd3be36..83f2075a5586 100644 --- a/clang/tools/cir-tool/CMakeLists.txt +++ b/clang/tools/cir-tool/CMakeLists.txt @@ -12,7 +12,6 @@ target_link_libraries(cir-tool PRIVATE clangCIR MLIRAnalysis MLIRCIR - MLIRCIRAnalysis MLIRCIRTransforms MLIRDialect MLIRIR diff --git a/mlir/include/mlir/Dialect/CIR/Analysis/LifetimeAnalysis.h b/mlir/include/mlir/Dialect/CIR/Analysis/LifetimeAnalysis.h deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/mlir/lib/Dialect/CIR/Analysis/CMakeLists.txt b/mlir/lib/Dialect/CIR/Analysis/CMakeLists.txt deleted file mode 100644 index 7094c3d54caf..000000000000 --- a/mlir/lib/Dialect/CIR/Analysis/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -add_mlir_dialect_library(MLIRCIRAnalysis - Lifetime.cpp - - ADDITIONAL_HEADER_DIRS - ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/CIR - - LINK_LIBS PUBLIC - MLIRAnalysis - MLIRIR - ) - diff --git a/mlir/lib/Dialect/CIR/Analysis/Lifetime.cpp b/mlir/lib/Dialect/CIR/Analysis/Lifetime.cpp deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/mlir/lib/Dialect/CIR/CMakeLists.txt b/mlir/lib/Dialect/CIR/CMakeLists.txt index b78bf46d6d90..9f57627c321f 100644 --- a/mlir/lib/Dialect/CIR/CMakeLists.txt +++ b/mlir/lib/Dialect/CIR/CMakeLists.txt @@ -1,3 +1,2 @@ add_subdirectory(IR) -add_subdirectory(Analysis) -add_subdirectory(Transforms) \ No newline at end of file +add_subdirectory(Transforms) diff --git a/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt b/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt index 11a7c49d04f3..bd27fb0fb173 100644 --- a/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt @@ -12,6 +12,5 @@ add_mlir_dialect_library(MLIRCIRTransforms MLIRAnalysis MLIRIR MLIRCIR - MLIRCIRAnalysis MLIRPass ) From 434a5b17b5b9886fb81265ac342bd10b68e3f614 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 25 Feb 2022 20:42:36 -0800 Subject: [PATCH 0144/2301] [CIR] Change the order allocas are inserted into local parent block They are now inserted in the same order they are declared. --- clang/lib/CIR/CIRGenModule.cpp | 11 ++++++++++- clang/test/CIR/CodeGen/basic.cpp | 24 +++++++++++------------ clang/test/CIR/CodeGen/goto.cpp | 18 ++++++++--------- clang/test/CIR/CodeGen/sourcelocation.cpp | 18 ++++++++--------- 4 files changed, 40 insertions(+), 31 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 52b99a8e7f83..7e3bc934f14b 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -56,6 +56,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/raw_ostream.h" +#include #include using namespace mlir::cir; @@ -123,8 +124,16 @@ mlir::LogicalResult CIRGenModule::declare(const Decl *var, QualType T, namedVar->getName(), IsParam ? InitStyle::paraminit : InitStyle::uninitialized, alignIntAttr); + // Allocas are expected to be in the beginning of the entry block + // in whatever region they show up. auto *parentBlock = localVarAddr->getBlock(); - localVarAddr->moveBefore(&parentBlock->front()); + auto lastAlloca = std::find_if_not( + parentBlock->begin(), parentBlock->end(), + [](mlir::Operation &op) { return isa(&op); }); + if (lastAlloca != std::end(*parentBlock)) + localVarAddr->moveBefore(&*lastAlloca); + else + localVarAddr->moveBefore(&parentBlock->front()); // Insert into the symbol table, allocate some stack space in the // function entry block. diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 3bd12b96571e..9674b6ba1a83 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -62,9 +62,9 @@ void b0() { bool x = true, y = false; } void b1(int a) { bool b = a; } // CHECK: func @b1(%arg0: i32 loc({{.*}})) { -// CHECK: %2 = cir.load %1 : cir.ptr , i32 +// CHECK: %2 = cir.load %0 : cir.ptr , i32 // CHECK: %3 = cir.cast(int_to_bool, %2 : i32), !cir.bool -// CHECK: cir.store %3, %0 : !cir.bool, cir.ptr +// CHECK: cir.store %3, %1 : !cir.bool, cir.ptr int if0(int a) { int x = 0; @@ -78,14 +78,14 @@ int if0(int a) { // CHECK: func @if0(%arg0: i32 loc({{.*}})) -> i32 { // CHECK: cir.scope { -// CHECK: %4 = cir.load %1 : cir.ptr , i32 +// CHECK: %4 = cir.load %0 : cir.ptr , i32 // CHECK: %5 = cir.cast(int_to_bool, %4 : i32), !cir.bool // CHECK-NEXT: cir.if %5 { // CHECK-NEXT: %6 = cir.cst(3 : i32) : i32 -// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: cir.store %6, %1 : i32, cir.ptr // CHECK-NEXT: } else { // CHECK-NEXT: %6 = cir.cst(4 : i32) : i32 -// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: cir.store %6, %1 : i32, cir.ptr // CHECK-NEXT: } // CHECK: } @@ -107,27 +107,27 @@ int if1(int a, bool b, bool c) { // CHECK: func @if1(%arg0: i32 loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) -> i32 { // CHECK: cir.scope { -// CHECK: %6 = cir.load %3 : cir.ptr , i32 +// CHECK: %6 = cir.load %0 : cir.ptr , i32 // CHECK: %7 = cir.cast(int_to_bool, %6 : i32), !cir.bool // CHECK: cir.if %7 { // CHECK: %8 = cir.cst(3 : i32) : i32 -// CHECK: cir.store %8, %0 : i32, cir.ptr +// CHECK: cir.store %8, %3 : i32, cir.ptr // CHECK: cir.scope { -// CHECK: %9 = cir.load %2 : cir.ptr , !cir.bool +// CHECK: %9 = cir.load %1 : cir.ptr , !cir.bool // CHECK-NEXT: cir.if %9 { // CHECK-NEXT: %10 = cir.cst(8 : i32) : i32 -// CHECK-NEXT: cir.store %10, %0 : i32, cir.ptr +// CHECK-NEXT: cir.store %10, %3 : i32, cir.ptr // CHECK-NEXT: } // CHECK: } // CHECK: } else { // CHECK: cir.scope { -// CHECK: %9 = cir.load %1 : cir.ptr , !cir.bool +// CHECK: %9 = cir.load %2 : cir.ptr , !cir.bool // CHECK-NEXT: cir.if %9 { // CHECK-NEXT: %10 = cir.cst(14 : i32) : i32 -// CHECK-NEXT: cir.store %10, %0 : i32, cir.ptr +// CHECK-NEXT: cir.store %10, %3 : i32, cir.ptr // CHECK-NEXT: } // CHECK: } // CHECK: %8 = cir.cst(4 : i32) : i32 -// CHECK: cir.store %8, %0 : i32, cir.ptr +// CHECK: cir.store %8, %3 : i32, cir.ptr // CHECK: } // CHECK: } diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index e6b013f3cb7d..42f4effc7bca 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -10,23 +10,23 @@ void g0(int a) { } // CHECK: func @g0 -// CHECK: %0 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} -// CHECK: cir.store %arg0, %1 : i32, cir.ptr -// CHECK: %2 = cir.load %1 : cir.ptr , i32 -// CHECK: cir.store %2, %0 : i32, cir.ptr +// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : i32, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , i32 +// CHECK: cir.store %2, %1 : i32, cir.ptr // CHECK: cir.br ^bb2 // CHECK: ^bb1: // no predecessors -// CHECK: %3 = cir.load %0 : cir.ptr , i32 +// CHECK: %3 = cir.load %1 : cir.ptr , i32 // CHECK: %4 = cir.cst(1 : i32) : i32 // CHECK: %5 = cir.binop(add, %3, %4) : i32 -// CHECK: cir.store %5, %0 : i32, cir.ptr +// CHECK: cir.store %5, %1 : i32, cir.ptr // CHECK: cir.br ^bb2 // CHECK: ^bb2: // 2 preds: ^bb0, ^bb1 -// CHECK: %6 = cir.load %0 : cir.ptr , i32 +// CHECK: %6 = cir.load %1 : cir.ptr , i32 // CHECK: %7 = cir.cst(2 : i32) : i32 // CHECK: %8 = cir.binop(add, %6, %7) : i32 -// CHECK: cir.store %8, %0 : i32, cir.ptr +// CHECK: cir.store %8, %1 : i32, cir.ptr // CHECK: cir.br ^bb3 // CHECK: ^bb3: // pred: ^bb2 // CHECK: cir.return \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index ba412baa5088..52300180a3a5 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -15,28 +15,28 @@ int s0(int a, int b) { // CHECK: #[[loc3:loc[0-9]+]] = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) // CHECK: module { // CHECK: func @s0(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { -// CHECK: %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} loc(#[[loc4:loc[0-9]+]]) +// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} loc(#[[loc2]]) // CHECK: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] {alignment = 4 : i64} loc(#[[loc3]]) -// CHECK: %2 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} loc(#[[loc2]]) -// CHECK: cir.store %arg0, %2 : i32, cir.ptr loc(#[[loc5:loc[0-9]+]]) +// CHECK: %2 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} loc(#[[loc4:loc[0-9]+]]) +// CHECK: cir.store %arg0, %0 : i32, cir.ptr loc(#[[loc5:loc[0-9]+]]) // CHECK: cir.store %arg1, %1 : i32, cir.ptr loc(#[[loc5]]) -// CHECK: %3 = cir.load %2 : cir.ptr , i32 loc(#[[loc6:loc[0-9]+]]) +// CHECK: %3 = cir.load %0 : cir.ptr , i32 loc(#[[loc6:loc[0-9]+]]) // CHECK: %4 = cir.load %1 : cir.ptr , i32 loc(#[[loc7:loc[0-9]+]]) // CHECK: %5 = cir.binop(add, %3, %4) : i32 loc(#[[loc8:loc[0-9]+]]) -// CHECK: cir.store %5, %0 : i32, cir.ptr loc(#[[loc4]]) +// CHECK: cir.store %5, %2 : i32, cir.ptr loc(#[[loc4]]) // CHECK: cir.scope { -// CHECK: %7 = cir.load %0 : cir.ptr , i32 loc(#[[loc10:loc[0-9]+]]) +// CHECK: %7 = cir.load %2 : cir.ptr , i32 loc(#[[loc10:loc[0-9]+]]) // CHECK: %8 = cir.cst(0 : i32) : i32 loc(#[[loc11:loc[0-9]+]]) // CHECK: %9 = cir.cmp(gt, %7, %8) : i32, !cir.bool loc(#[[loc12:loc[0-9]+]]) // CHECK: cir.if %9 { // CHECK: %10 = cir.cst(0 : i32) : i32 loc(#[[loc14:loc[0-9]+]]) -// CHECK: cir.store %10, %0 : i32, cir.ptr loc(#[[loc15:loc[0-9]+]]) +// CHECK: cir.store %10, %2 : i32, cir.ptr loc(#[[loc15:loc[0-9]+]]) // CHECK: } else { // CHECK: %10 = cir.cst(1 : i32) : i32 loc(#[[loc16:loc[0-9]+]]) -// CHECK: cir.store %10, %0 : i32, cir.ptr loc(#[[loc17:loc[0-9]+]]) +// CHECK: cir.store %10, %2 : i32, cir.ptr loc(#[[loc17:loc[0-9]+]]) // CHECK: } loc(#[[loc13:loc[0-9]+]]) // CHECK: } loc(#[[loc9:loc[0-9]+]]) -// CHECK: %6 = cir.load %0 : cir.ptr , i32 loc(#[[loc18:loc[0-9]+]]) +// CHECK: %6 = cir.load %2 : cir.ptr , i32 loc(#[[loc18:loc[0-9]+]]) // CHECK: cir.return %6 : i32 loc(#[[loc19:loc[0-9]+]]) // CHECK: } loc(#[[loc1:loc[0-9]+]]) // CHECK: } loc(#[[loc0:loc[0-9]+]]) From b9b1a5563cbbeee97d3db280d98af5ebf8ce8340 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 28 Feb 2022 16:58:45 -0800 Subject: [PATCH 0145/2301] Revert "[CIR][LifetimeCheck][NFC] Cleanup and tide up overall pass" This reverts commit 5f048dfabd970d3bd0a7c4551c63b5bf8c0978a3. --- clang/test/CIR/Transforms/lifetime-check.cpp | 1 + .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 171 ++++++++++-------- 2 files changed, 93 insertions(+), 79 deletions(-) diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index 7a1d0652f31a..5a54e7730c79 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: cir-tool %t.cir -cir-lifetime-check -verify-diagnostics -o %t-out.cir +// XFAIL: * int *basic() { int *p = nullptr; diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 31c3a1371564..ffe94af03470 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -27,10 +27,6 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkBlock(Block &block); void checkRegion(Region ®ion); - void checkOperation(mlir::cir::AllocaOp *allocaOp); - void checkOperation(mlir::cir::StoreOp *storeOp); - void checkOperation(mlir::cir::LoadOp *loadOp); - struct State { using DataTy = enum { Invalid, NullPtr, LocalValue }; DataTy data = Invalid; @@ -173,100 +169,116 @@ void LifetimeCheckPass::checkRegion(Region ®ion) { checkBlock(block); } -void LifetimeCheckPass::checkOperation(mlir::cir::AllocaOp *allocaOp) { - auto addr = allocaOp->getAddr(); - assert(!pmap.count(addr) && "only one alloca for any given address"); - - pmap[addr] = {}; - if (!allocaOp->isPointerType()) { - // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. - pmap[addr].insert(State::getLocalValue(addr)); - currScope->localValues.push_back(addr); +void LifetimeCheckPass::checkOperation(Operation *op) { + // FIXME: allow "isScopeLike" queries so that we can unify this type + // of handling in a generic way. + if (isa<::mlir::ModuleOp>(op)) { + for (Region ®ion : op->getRegions()) + checkRegion(region); return; } - // 2.4.2 - When a non-parameter non-member Pointer p is declared, add - // (p, {invalid}) to pmap. - ptrs.insert(addr); - pmap[addr].insert(State::getInvalid()); + if (isa<::mlir::FuncOp>(op)) { + // Add a new scope. Note that as part of the scope cleanup process + // we apply section 2.3 KILL(x) functionality, turning relevant + // references invalid. + { + LexicalScopeContext lexScope{}; + LexicalScopeGuard scopeGuard{*this, &lexScope}; + for (Region ®ion : op->getRegions()) + checkRegion(region); + } + return; + } - // If other styles of initialization gets added, required to add support - // here. - assert(allocaOp->getInitAttr().getValue() == mlir::cir::InitStyle::cinit && - "other init styles tbd"); -} + if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(op)) { + auto addr = allocaOp.getAddr(); + assert(!pmap.count(addr) && "only one alloca for any given address"); -void LifetimeCheckPass::checkOperation(mlir::cir::StoreOp *storeOp) { - auto addr = storeOp->getAddr(); + pmap[addr] = {}; + if (!allocaOp.isPointerType()) { + // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. + pmap[addr].insert(State::getLocalValue(addr)); + currScope->localValues.push_back(addr); + return; + } - // We only care about stores that change local pointers, local values - // are not interesting here (just yet). - if (!ptrs.count(addr)) - return; + // 2.4.2 - When a non-parameter non-member Pointer p is declared, add + // (p, {invalid}) to pmap. + ptrs.insert(addr); + pmap[addr].insert(State::getInvalid()); - auto data = storeOp->getValue(); - // 2.4.2 - If the declaration includes an initialization, the - // initialization is treated as a separate operation - if (auto cstOp = dyn_cast<::mlir::cir::ConstantOp>(data.getDefiningOp())) { - assert(cstOp.isNullPtr() && "not implemented"); - // 2.4.2 - If the initialization is default initialization or zero - // initialization, set pset(p) = {null}; for example: - // int* p; => pset(p) == {invalid} - // int* p{}; or string_view p; => pset(p) == {null}. - // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} - pmap[addr] = {}; - pmap[addr].insert(State::getNullPtr()); + // If other styles of initialization gets added, required to add support + // here. + assert(allocaOp.getInitAttr().getValue() == mlir::cir::InitStyle::cinit && + "other init styles tbd"); return; } - if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(data.getDefiningOp())) { - // p = &x; - pmap[addr] = {}; - pmap[addr].insert(State::getLocalValue(data)); - return; - } + if (auto storeOp = dyn_cast<::mlir::cir::StoreOp>(op)) { + auto addr = storeOp.getAddr(); + + // We only care about stores that change local pointers, local values + // are not interesting here (just yet). + if (!ptrs.count(addr)) + return; + + auto data = storeOp.getValue(); + // 2.4.2 - If the declaration includes an initialization, the + // initialization is treated as a separate operation + if (auto cstOp = dyn_cast<::mlir::cir::ConstantOp>(data.getDefiningOp())) { + assert(cstOp.isNullPtr() && "not implemented"); + // 2.4.2 - If the initialization is default initialization or zero + // initialization, set pset(p) = {null}; for example: + // int* p; => pset(p) == {invalid} + // int* p{}; or string_view p; => pset(p) == {null}. + // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} + pmap[addr] = {}; + pmap[addr].insert(State::getNullPtr()); + return; + } - storeOp->dump(); - // FIXME: asserts here should become remarks for non-implemented parts. - assert(0 && "not implemented"); -} + if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(data.getDefiningOp())) { + // p = &x; + pmap[addr] = {}; + pmap[addr].insert(State::getLocalValue(data)); + return; + } -void LifetimeCheckPass::checkOperation(mlir::cir::LoadOp *loadOp) { - auto addr = loadOp->getAddr(); - // Only interested in checking deference on top of pointer types. - if (!pmap.count(addr) || !ptrs.count(addr)) - return; + storeOp.dump(); + // FIXME: asserts here should become remarks for non-implemented parts. + assert(0 && "not implemented"); + } - if (!loadOp->getIsDeref()) - return; + if (auto loadOp = dyn_cast<::mlir::cir::LoadOp>(op)) { + auto addr = loadOp.getAddr(); + // Only interested in checking deference on top of pointer types. + if (!pmap.count(addr) || !ptrs.count(addr)) + return; - // 2.4.2 - On every dereference of a Pointer p, enforce that p is not - // invalid. - if (!pmap[addr].count(State::getInvalid())) { - // FIXME: perhaps add a remark that we got a valid dereference - return; - } + if (!loadOp.getIsDeref()) + return; - // Looks like we found a invalid path leading to this deference point, - // diagnose it. - // - // Note that usually the use of the invalid address happens at the - // load or store using the result of this loadOp. - emitWarning(loadOp->getLoc()) - << "use of invalid pointer '" << getVarNameFromValue(addr) << "'"; - return; -} + // 2.4.2 - On every dereference of a Pointer p, enforce that p is not + // invalid. + if (!pmap[addr].count(State::getInvalid())) { + // FIXME: perhaps add a remark that we got a valid dereference + return; + } -void LifetimeCheckPass::checkOperation(Operation *op) { - if (isa<::mlir::ModuleOp>(op)) { - for (Region ®ion : op->getRegions()) - checkRegion(region); + // Looks like we found a invalid path leading to this deference point, + // diagnose it. + // + // Note that usually the use of the invalid address happens at the + // load or store using the result of this loadOp. + emitWarning(loadOp.getLoc()) + << "use of invalid pointer '" << getVarNameFromValue(addr) << "'"; return; } - bool isLexicalScopeOp = - isa<::mlir::FuncOp>(op) || isa<::mlir::cir::ScopeOp>(op); - if (isLexicalScopeOp) { + // FIXME: allow "isScopeLike" queries so that we can unify this type + // of handling in a generic way. + if (auto ScopeOp = dyn_cast<::mlir::cir::ScopeOp>(op)) { // Add a new scope. Note that as part of the scope cleanup process // we apply section 2.3 KILL(x) functionality, turning relevant // references invalid. @@ -274,6 +286,7 @@ void LifetimeCheckPass::checkOperation(Operation *op) { LexicalScopeGuard scopeGuard{*this, &lexScope}; for (Region ®ion : op->getRegions()) checkRegion(region); + return; } } From 6b4774bc45f7f2d44905f8fe24139d6666afd785 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 28 Feb 2022 17:06:23 -0800 Subject: [PATCH 0146/2301] [CIR][LifetimeCheck][NFC] Handle lexical scope more generically --- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 29 +++++-------------- 1 file changed, 7 insertions(+), 22 deletions(-) diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index ffe94af03470..4694adae3c16 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -170,24 +170,22 @@ void LifetimeCheckPass::checkRegion(Region ®ion) { } void LifetimeCheckPass::checkOperation(Operation *op) { - // FIXME: allow "isScopeLike" queries so that we can unify this type - // of handling in a generic way. if (isa<::mlir::ModuleOp>(op)) { for (Region ®ion : op->getRegions()) checkRegion(region); return; } - if (isa<::mlir::FuncOp>(op)) { + bool isLexicalScopeOp = + isa<::mlir::FuncOp>(op) || isa<::mlir::cir::ScopeOp>(op); + if (isLexicalScopeOp) { // Add a new scope. Note that as part of the scope cleanup process // we apply section 2.3 KILL(x) functionality, turning relevant // references invalid. - { - LexicalScopeContext lexScope{}; - LexicalScopeGuard scopeGuard{*this, &lexScope}; - for (Region ®ion : op->getRegions()) - checkRegion(region); - } + LexicalScopeContext lexScope{}; + LexicalScopeGuard scopeGuard{*this, &lexScope}; + for (Region ®ion : op->getRegions()) + checkRegion(region); return; } @@ -275,19 +273,6 @@ void LifetimeCheckPass::checkOperation(Operation *op) { << "use of invalid pointer '" << getVarNameFromValue(addr) << "'"; return; } - - // FIXME: allow "isScopeLike" queries so that we can unify this type - // of handling in a generic way. - if (auto ScopeOp = dyn_cast<::mlir::cir::ScopeOp>(op)) { - // Add a new scope. Note that as part of the scope cleanup process - // we apply section 2.3 KILL(x) functionality, turning relevant - // references invalid. - LexicalScopeContext lexScope{}; - LexicalScopeGuard scopeGuard{*this, &lexScope}; - for (Region ®ion : op->getRegions()) - checkRegion(region); - return; - } } void LifetimeCheckPass::runOnOperation() { From b9b59c468bc62b257f76d45dc86f5b4d6d582c14 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 28 Feb 2022 17:10:46 -0800 Subject: [PATCH 0147/2301] [CIR][LifetimeCheck][NFC] Split out alloca into its own handling --- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 51 ++++++++++--------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 4694adae3c16..6e2bd2dea6c9 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -15,6 +15,7 @@ #include "llvm/ADT/SmallSet.h" using namespace mlir; +using namespace cir; namespace { struct LifetimeCheckPass : public LifetimeCheckBase { @@ -27,6 +28,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkBlock(Block &block); void checkRegion(Region ®ion); + void checkAlloca(AllocaOp op); + struct State { using DataTy = enum { Invalid, NullPtr, LocalValue }; DataTy data = Invalid; @@ -169,6 +172,29 @@ void LifetimeCheckPass::checkRegion(Region ®ion) { checkBlock(block); } +void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { + auto addr = allocaOp.getAddr(); + assert(!pmap.count(addr) && "only one alloca for any given address"); + + pmap[addr] = {}; + if (!allocaOp.isPointerType()) { + // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. + pmap[addr].insert(State::getLocalValue(addr)); + currScope->localValues.push_back(addr); + return; + } + + // 2.4.2 - When a non-parameter non-member Pointer p is declared, add + // (p, {invalid}) to pmap. + ptrs.insert(addr); + pmap[addr].insert(State::getInvalid()); + + // If other styles of initialization gets added, required to add support + // here. + assert(allocaOp.getInitAttr().getValue() == mlir::cir::InitStyle::cinit && + "other init styles tbd"); +} + void LifetimeCheckPass::checkOperation(Operation *op) { if (isa<::mlir::ModuleOp>(op)) { for (Region ®ion : op->getRegions()) @@ -189,29 +215,8 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return; } - if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(op)) { - auto addr = allocaOp.getAddr(); - assert(!pmap.count(addr) && "only one alloca for any given address"); - - pmap[addr] = {}; - if (!allocaOp.isPointerType()) { - // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. - pmap[addr].insert(State::getLocalValue(addr)); - currScope->localValues.push_back(addr); - return; - } - - // 2.4.2 - When a non-parameter non-member Pointer p is declared, add - // (p, {invalid}) to pmap. - ptrs.insert(addr); - pmap[addr].insert(State::getInvalid()); - - // If other styles of initialization gets added, required to add support - // here. - assert(allocaOp.getInitAttr().getValue() == mlir::cir::InitStyle::cinit && - "other init styles tbd"); - return; - } + if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(op)) + return checkAlloca(allocaOp); if (auto storeOp = dyn_cast<::mlir::cir::StoreOp>(op)) { auto addr = storeOp.getAddr(); From ff91999c385f95004321ae8956a149a6c5486688 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 28 Feb 2022 17:16:47 -0800 Subject: [PATCH 0148/2301] [CIR][LifetimeCheck][NFC] Split out load/store into its own handling --- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 128 +++++++++--------- 1 file changed, 67 insertions(+), 61 deletions(-) diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 6e2bd2dea6c9..efdfe2e6e1ba 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -29,6 +29,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkRegion(Region ®ion); void checkAlloca(AllocaOp op); + void checkStore(StoreOp op); + void checkLoad(LoadOp op); struct State { using DataTy = enum { Invalid, NullPtr, LocalValue }; @@ -195,6 +197,67 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { "other init styles tbd"); } +void LifetimeCheckPass::checkStore(StoreOp storeOp) { + auto addr = storeOp.getAddr(); + + // We only care about stores that change local pointers, local values + // are not interesting here (just yet). + if (!ptrs.count(addr)) + return; + + auto data = storeOp.getValue(); + // 2.4.2 - If the declaration includes an initialization, the + // initialization is treated as a separate operation + if (auto cstOp = dyn_cast<::mlir::cir::ConstantOp>(data.getDefiningOp())) { + assert(cstOp.isNullPtr() && "not implemented"); + // 2.4.2 - If the initialization is default initialization or zero + // initialization, set pset(p) = {null}; for example: + // + // int* p; => pset(p) == {invalid} + // int* p{}; or string_view p; => pset(p) == {null}. + // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} + pmap[addr] = {}; + pmap[addr].insert(State::getNullPtr()); + return; + } + + if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(data.getDefiningOp())) { + // p = &x; + pmap[addr] = {}; + pmap[addr].insert(State::getLocalValue(data)); + return; + } + + storeOp.dump(); + // FIXME: asserts here should become remarks for non-implemented parts. + assert(0 && "not implemented"); +} + +void LifetimeCheckPass::checkLoad(LoadOp loadOp) { + auto addr = loadOp.getAddr(); + // Only interested in checking deference on top of pointer types. + if (!pmap.count(addr) || !ptrs.count(addr)) + return; + + if (!loadOp.getIsDeref()) + return; + + // 2.4.2 - On every dereference of a Pointer p, enforce that p is not + // invalid. + if (!pmap[addr].count(State::getInvalid())) { + // FIXME: perhaps add a remark that we got a valid dereference + return; + } + + // Looks like we found a invalid path leading to this deference point, + // diagnose it. + // + // Note that usually the use of the invalid address happens at the + // load or store using the result of this loadOp. + emitWarning(loadOp.getLoc()) + << "use of invalid pointer '" << getVarNameFromValue(addr) << "'"; +} + void LifetimeCheckPass::checkOperation(Operation *op) { if (isa<::mlir::ModuleOp>(op)) { for (Region ®ion : op->getRegions()) @@ -217,67 +280,10 @@ void LifetimeCheckPass::checkOperation(Operation *op) { if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(op)) return checkAlloca(allocaOp); - - if (auto storeOp = dyn_cast<::mlir::cir::StoreOp>(op)) { - auto addr = storeOp.getAddr(); - - // We only care about stores that change local pointers, local values - // are not interesting here (just yet). - if (!ptrs.count(addr)) - return; - - auto data = storeOp.getValue(); - // 2.4.2 - If the declaration includes an initialization, the - // initialization is treated as a separate operation - if (auto cstOp = dyn_cast<::mlir::cir::ConstantOp>(data.getDefiningOp())) { - assert(cstOp.isNullPtr() && "not implemented"); - // 2.4.2 - If the initialization is default initialization or zero - // initialization, set pset(p) = {null}; for example: - // int* p; => pset(p) == {invalid} - // int* p{}; or string_view p; => pset(p) == {null}. - // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} - pmap[addr] = {}; - pmap[addr].insert(State::getNullPtr()); - return; - } - - if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(data.getDefiningOp())) { - // p = &x; - pmap[addr] = {}; - pmap[addr].insert(State::getLocalValue(data)); - return; - } - - storeOp.dump(); - // FIXME: asserts here should become remarks for non-implemented parts. - assert(0 && "not implemented"); - } - - if (auto loadOp = dyn_cast<::mlir::cir::LoadOp>(op)) { - auto addr = loadOp.getAddr(); - // Only interested in checking deference on top of pointer types. - if (!pmap.count(addr) || !ptrs.count(addr)) - return; - - if (!loadOp.getIsDeref()) - return; - - // 2.4.2 - On every dereference of a Pointer p, enforce that p is not - // invalid. - if (!pmap[addr].count(State::getInvalid())) { - // FIXME: perhaps add a remark that we got a valid dereference - return; - } - - // Looks like we found a invalid path leading to this deference point, - // diagnose it. - // - // Note that usually the use of the invalid address happens at the - // load or store using the result of this loadOp. - emitWarning(loadOp.getLoc()) - << "use of invalid pointer '" << getVarNameFromValue(addr) << "'"; - return; - } + if (auto storeOp = dyn_cast<::mlir::cir::StoreOp>(op)) + return checkStore(storeOp); + if (auto loadOp = dyn_cast<::mlir::cir::LoadOp>(op)) + return checkLoad(loadOp); } void LifetimeCheckPass::runOnOperation() { From 1d795f463b4730ade29a65a4917fa0ab992b38f7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 28 Feb 2022 18:02:28 -0800 Subject: [PATCH 0149/2301] [CIR][LifetimeCheck] Use llvm::PointerIntPair to track each state - While here add the `global` state, even though we don't really support it just yet. --- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 42 ++++++++++++------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index efdfe2e6e1ba..e55ff1cdd32c 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -33,28 +33,37 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkLoad(LoadOp op); struct State { - using DataTy = enum { Invalid, NullPtr, LocalValue }; - DataTy data = Invalid; - State() = default; - State(DataTy d) : data(d) {} - State(mlir::Value v) : data(LocalValue), value(v) {} - // FIXME: use int/ptr pair to save space - std::optional value = std::nullopt; + using DataTy = enum { + Invalid, + NullPtr, + Global, + LocalValue, + NumKindsMinusOne = LocalValue + }; + State() { val.setInt(Invalid); } + State(DataTy d) { val.setInt(d); } + State(mlir::Value v) { val.setPointerAndInt(v, LocalValue); } + + static constexpr int KindBits = 2; + static_assert((1 << KindBits) > NumKindsMinusOne, + "Not enough room for kind!"); + llvm::PointerIntPair val; /// Provide less/equal than operator for sorting / set ops. bool operator<(const State &RHS) const { // FIXME: note that this makes the ordering non-deterministic, do // we really care? - if (data == LocalValue && RHS.data == LocalValue) - return value->getAsOpaquePointer() < RHS.value->getAsOpaquePointer(); + if (val.getInt() == LocalValue && RHS.val.getInt() == LocalValue) + return val.getPointer().getAsOpaquePointer() < + RHS.val.getPointer().getAsOpaquePointer(); else - return data < RHS.data; + return val.getInt() < RHS.val.getInt(); } bool operator==(const State &RHS) const { - if (data == LocalValue && RHS.data == LocalValue) - return *value == *RHS.value; + if (val.getInt() == LocalValue && RHS.val.getInt() == LocalValue) + return val.getPointer() == RHS.val.getPointer(); else - return data == RHS.data; + return val.getInt() == RHS.val.getInt(); } void dump(); @@ -309,15 +318,18 @@ void LifetimeCheckPass::LexicalScopeContext::dumpLocalValues() { } void LifetimeCheckPass::State::dump() { - switch (data) { + switch (val.getInt()) { case Invalid: llvm::errs() << "invalid"; break; case NullPtr: llvm::errs() << "nullptr"; break; + case Global: + llvm::errs() << "global"; + break; case LocalValue: - llvm::errs() << getVarNameFromValue(*value); + llvm::errs() << getVarNameFromValue(val.getPointer()); break; } } From f8c832a25e75b04319039e56bb9316627fb20f9f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 2 Mar 2022 16:15:37 -0800 Subject: [PATCH 0150/2301] [CIR][LifetimeCheck] Add options to allow remarks to be emitted --- mlir/include/mlir/Dialect/CIR/Passes.td | 6 ++++++ .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 18 ++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/mlir/include/mlir/Dialect/CIR/Passes.td b/mlir/include/mlir/Dialect/CIR/Passes.td index f17f52e440e4..f288e0eeeb3b 100644 --- a/mlir/include/mlir/Dialect/CIR/Passes.td +++ b/mlir/include/mlir/Dialect/CIR/Passes.td @@ -19,6 +19,12 @@ def LifetimeCheck : Pass<"cir-lifetime-check"> { }]; let constructor = "mlir::createLifetimeCheckPass()"; let dependentDialects = ["cir::CIRDialect"]; + + let options = [ + ListOption<"remarksList", "remarks", "std::string", + "List of remark styles to enable as part of diagnostics." + " Supported styles: {all|pset}", "llvm::cl::ZeroOrMore"> + ]; } #endif // MLIR_DIALECT_CIR_PASSES diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index e55ff1cdd32c..22fd7e6d5881 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -32,6 +32,23 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkStore(StoreOp op); void checkLoad(LoadOp op); + struct Options { + enum : unsigned { None = 0, RemarkPset = 1, RemarkAll = 1 << 1 }; + unsigned val = None; + + void parseOptions(LifetimeCheckPass &pass) { + for (auto &remark : pass.remarksList) { + val |= StringSwitch(remark) + .Case("pset", RemarkPset) + .Case("all", RemarkAll) + .Default(None); + } + } + + bool emitRemarkAll() { return val & RemarkAll; } + bool emitRemarkPset() { return emitRemarkAll() || val & RemarkPset; } + } opts; + struct State { using DataTy = enum { Invalid, @@ -296,6 +313,7 @@ void LifetimeCheckPass::checkOperation(Operation *op) { } void LifetimeCheckPass::runOnOperation() { + opts.parseOptions(*this); Operation *op = getOperation(); checkOperation(op); } From 7b55c88bbfa52b60e11870e35666dfcf4fd200dc Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 2 Mar 2022 16:16:34 -0800 Subject: [PATCH 0151/2301] [CIR][LifetimeCheck] Add support for if operation - Add more scope handling to account for if's then/else. - Recognize control flow divergence. - Hide the pmap and add pmap guards to better maintain and handle pmaps. - Implement join operation between pmaps. - Add a remark to print the pset, this should help with writing useful tests. - Testcases --- .../CIR/Transforms/lifetime-check-remarks.cpp | 27 +++ clang/test/CIR/Transforms/lifetime-check.cpp | 13 +- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 203 ++++++++++++++---- 3 files changed, 200 insertions(+), 43 deletions(-) create mode 100644 clang/test/CIR/Transforms/lifetime-check-remarks.cpp diff --git a/clang/test/CIR/Transforms/lifetime-check-remarks.cpp b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp new file mode 100644 index 000000000000..68df5ec89f2b --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: cir-tool %t.cir -cir-lifetime-check="remarks=pset" -verify-diagnostics -o %t-out.cir +// XFAIL: * + +int *p0() { + int *p = nullptr; + { + int x = 0; + p = &x; + *p = 42; + } + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} + return p; +} + +int *p1(bool b = true) { + int *p = nullptr; + if (b) { + int x = 0; + p = &x; + *p = 42; + } + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid, nullptr }}} + return p; +} diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index 5a54e7730c79..8f94f6a335e6 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -2,7 +2,7 @@ // RUN: cir-tool %t.cir -cir-lifetime-check -verify-diagnostics -o %t-out.cir // XFAIL: * -int *basic() { +int *p0() { int *p = nullptr; { int x = 0; @@ -12,3 +12,14 @@ int *basic() { *p = 42; // expected-warning {{use of invalid pointer 'p'}} return p; } + +int *p1(bool b = true) { + int *p = nullptr; + if (b) { + int x = 0; + p = &x; + *p = 42; + } + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + return p; +} diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 22fd7e6d5881..153bb3f07c55 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -12,6 +12,7 @@ #include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SmallSet.h" using namespace mlir; @@ -25,9 +26,13 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void runOnOperation() override; void checkOperation(Operation *op); + void checkFunc(Operation *op); void checkBlock(Block &block); + + void checkRegionWithScope(Region ®ion); void checkRegion(Region ®ion); + void checkIf(IfOp op); void checkAlloca(AllocaOp op); void checkStore(StoreOp op); void checkLoad(LoadOp op); @@ -83,7 +88,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { return val.getInt() == RHS.val.getInt(); } - void dump(); + void dump(llvm::raw_ostream &OS = llvm::errs()); static State getInvalid() { return {}; } static State getNullPtr() { return {NullPtr}; } @@ -91,11 +96,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { }; using PSetType = llvm::SmallSet; - // FIXME: this should be a ScopedHashTable for consistency. using PMapType = llvm::DenseMap; - PMapType pmap; SmallPtrSet ptrs; // Represents the scope context for IR operations (cir.scope, cir.if, @@ -137,14 +140,38 @@ struct LifetimeCheckPass : public LifetimeCheckBase { } }; + class PmapGuard { + LifetimeCheckPass &Pass; + PMapType *OldVal = nullptr; + + public: + PmapGuard(LifetimeCheckPass &lcp, PMapType *L) : Pass(lcp) { + if (Pass.currPmap) { + OldVal = Pass.currPmap; + } + Pass.currPmap = L; + } + + PmapGuard(const PmapGuard &) = delete; + PmapGuard &operator=(const PmapGuard &) = delete; + PmapGuard &operator=(PmapGuard &&other) = delete; + + void restore() { Pass.currPmap = OldVal; } + ~PmapGuard() { restore(); } + }; + LexicalScopeContext *currScope = nullptr; - void dumpPset(PSetType &pset); - void dumpPmap(); + PMapType *currPmap = nullptr; + PMapType &getPmap() { return *currPmap; } + + void joinPmaps(SmallVectorImpl &pmaps); + void printPset(PSetType &pset, llvm::raw_ostream &OS = llvm::errs()); + void dumpPmap(PMapType &pmap); }; } // namespace static StringRef getVarNameFromValue(mlir::Value v) { - if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(v.getDefiningOp())) + if (auto allocaOp = dyn_cast(v.getDefiningOp())) return allocaOp.getName(); assert(0 && "how did it get here?"); return ""; @@ -152,7 +179,7 @@ static StringRef getVarNameFromValue(mlir::Value v) { void LifetimeCheckPass::LexicalScopeGuard::cleanup() { auto *localScope = Pass.currScope; - auto &pmap = Pass.pmap; + auto &pmap = Pass.getPmap(); // If we are cleaning up at the function level, nothing // to do here cause we are past all possible deference points if (localScope->Depth == 0) @@ -189,25 +216,101 @@ void LifetimeCheckPass::LexicalScopeGuard::cleanup() { } void LifetimeCheckPass::checkBlock(Block &block) { - // Block main role is to hold a list of Operations: let's recurse. + // Block main role is to hold a list of Operations. for (Operation &op : block.getOperations()) checkOperation(&op); } void LifetimeCheckPass::checkRegion(Region ®ion) { - // FIXME: if else-then blocks have their own scope too. for (Block &block : region.getBlocks()) checkBlock(block); } +void LifetimeCheckPass::checkRegionWithScope(Region ®ion) { + // Add a new scope. Note that as part of the scope cleanup process + // we apply section 2.3 KILL(x) functionality, turning relevant + // references invalid. + LexicalScopeContext lexScope{}; + LexicalScopeGuard scopeGuard{*this, &lexScope}; + for (Block &block : region.getBlocks()) + checkBlock(block); +} + +void LifetimeCheckPass::checkFunc(Operation *op) { + // Add a new scope. Note that as part of the scope cleanup process + // we apply section 2.3 KILL(x) functionality, turning relevant + // references invalid. + LexicalScopeContext lexScope{}; + LexicalScopeGuard scopeGuard{*this, &lexScope}; + + // Create a new pmap for this function. + PMapType localPmap{}; + PmapGuard pmapGuard{*this, &localPmap}; + + for (Region ®ion : op->getRegions()) + checkRegion(region); + + // FIXME: store the pmap result for this function, we + // could do some interesting IPA stuff using this info. +} + +// The join operation between pmap as described in section 2.3. +// +// JOIN({pmap1,...,pmapN}) => +// { (p, pset1(p) U ... U psetN(p) | (p,*) U pmap1 U ... U pmapN }. +// +void LifetimeCheckPass::joinPmaps(SmallVectorImpl &pmaps) { + for (auto &mapEntry : getPmap()) { + auto &val = mapEntry.first; + + PSetType joinPset; + for (auto &pmapOp : pmaps) + llvm::set_union(joinPset, pmapOp[val]); + + getPmap()[val] = joinPset; + } +} + +void LifetimeCheckPass::checkIf(IfOp ifOp) { + // Both then and else create their own lexical scopes, take that into account + // while checking then/else. + // + // This is also the moment where pmaps are joined because flow forks: + // pmap(ifOp) = JOIN( pmap(then), pmap(else) ) + // + // To that intent the pmap is copied out before checking each region and + // pmap(ifOp) computed after analysing both paths. + SmallVector pmapOps; + + { + PMapType localThenPmap = getPmap(); + PmapGuard pmapGuard{*this, &localThenPmap}; + checkRegionWithScope(ifOp.getThenRegion()); + pmapOps.push_back(localThenPmap); + } + + // In case there's no 'else' branch, the 'else' pmap is the same as + // prior to the if condition. + if (!ifOp.getElseRegion().empty()) { + PMapType localElsePmap = getPmap(); + PmapGuard pmapGuard{*this, &localElsePmap}; + checkRegionWithScope(ifOp.getElseRegion()); + pmapOps.push_back(localElsePmap); + } else { + pmapOps.push_back(getPmap()); + } + + joinPmaps(pmapOps); +} + void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { auto addr = allocaOp.getAddr(); - assert(!pmap.count(addr) && "only one alloca for any given address"); + assert(!getPmap().count(addr) && "only one alloca for any given address"); - pmap[addr] = {}; + getPmap()[addr] = {}; if (!allocaOp.isPointerType()) { // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. - pmap[addr].insert(State::getLocalValue(addr)); + getPmap()[addr].insert(State::getLocalValue(addr)); currScope->localValues.push_back(addr); return; } @@ -215,7 +318,7 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { // 2.4.2 - When a non-parameter non-member Pointer p is declared, add // (p, {invalid}) to pmap. ptrs.insert(addr); - pmap[addr].insert(State::getInvalid()); + getPmap()[addr].insert(State::getInvalid()); // If other styles of initialization gets added, required to add support // here. @@ -234,7 +337,7 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { auto data = storeOp.getValue(); // 2.4.2 - If the declaration includes an initialization, the // initialization is treated as a separate operation - if (auto cstOp = dyn_cast<::mlir::cir::ConstantOp>(data.getDefiningOp())) { + if (auto cstOp = dyn_cast(data.getDefiningOp())) { assert(cstOp.isNullPtr() && "not implemented"); // 2.4.2 - If the initialization is default initialization or zero // initialization, set pset(p) = {null}; for example: @@ -242,15 +345,15 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { // int* p; => pset(p) == {invalid} // int* p{}; or string_view p; => pset(p) == {null}. // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} - pmap[addr] = {}; - pmap[addr].insert(State::getNullPtr()); + getPmap()[addr] = {}; + getPmap()[addr].insert(State::getNullPtr()); return; } - if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(data.getDefiningOp())) { + if (auto allocaOp = dyn_cast(data.getDefiningOp())) { // p = &x; - pmap[addr] = {}; - pmap[addr].insert(State::getLocalValue(data)); + getPmap()[addr] = {}; + getPmap()[addr].insert(State::getLocalValue(data)); return; } @@ -262,7 +365,7 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { void LifetimeCheckPass::checkLoad(LoadOp loadOp) { auto addr = loadOp.getAddr(); // Only interested in checking deference on top of pointer types. - if (!pmap.count(addr) || !ptrs.count(addr)) + if (!getPmap().count(addr) || !ptrs.count(addr)) return; if (!loadOp.getIsDeref()) @@ -270,7 +373,7 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { // 2.4.2 - On every dereference of a Pointer p, enforce that p is not // invalid. - if (!pmap[addr].count(State::getInvalid())) { + if (!getPmap()[addr].count(State::getInvalid())) { // FIXME: perhaps add a remark that we got a valid dereference return; } @@ -280,8 +383,15 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { // // Note that usually the use of the invalid address happens at the // load or store using the result of this loadOp. - emitWarning(loadOp.getLoc()) - << "use of invalid pointer '" << getVarNameFromValue(addr) << "'"; + StringRef varName = getVarNameFromValue(addr); + emitWarning(loadOp.getLoc()) << "use of invalid pointer '" << varName << "'"; + + llvm::SmallString<128> psetStr; + llvm::raw_svector_ostream Out(psetStr); + printPset(getPmap()[addr], Out); + + if (opts.emitRemarkPset()) + emitRemark(loadOp.getLoc()) << "pset => " << Out.str(); } void LifetimeCheckPass::checkOperation(Operation *op) { @@ -291,12 +401,14 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return; } - bool isLexicalScopeOp = - isa<::mlir::FuncOp>(op) || isa<::mlir::cir::ScopeOp>(op); - if (isLexicalScopeOp) { + if (isa(op)) { // Add a new scope. Note that as part of the scope cleanup process // we apply section 2.3 KILL(x) functionality, turning relevant // references invalid. + // + // No need to create a new pmap when entering a new scope since it + // doesn't cause control flow to diverge (as it does in presence + // of cir::IfOp). LexicalScopeContext lexScope{}; LexicalScopeGuard scopeGuard{*this, &lexScope}; for (Region ®ion : op->getRegions()) @@ -304,11 +416,15 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return; } - if (auto allocaOp = dyn_cast<::mlir::cir::AllocaOp>(op)) + if (isa(op)) + return checkFunc(op); + if (auto ifOp = dyn_cast(op)) + return checkIf(ifOp); + if (auto allocaOp = dyn_cast(op)) return checkAlloca(allocaOp); - if (auto storeOp = dyn_cast<::mlir::cir::StoreOp>(op)) + if (auto storeOp = dyn_cast(op)) return checkStore(storeOp); - if (auto loadOp = dyn_cast<::mlir::cir::LoadOp>(op)) + if (auto loadOp = dyn_cast(op)) return checkLoad(loadOp); } @@ -323,7 +439,7 @@ std::unique_ptr mlir::createLifetimeCheckPass() { } //===----------------------------------------------------------------------===// -// Dump helpers +// Dump & print helpers //===----------------------------------------------------------------------===// void LifetimeCheckPass::LexicalScopeContext::dumpLocalValues() { @@ -335,40 +451,43 @@ void LifetimeCheckPass::LexicalScopeContext::dumpLocalValues() { llvm::errs() << "}\n"; } -void LifetimeCheckPass::State::dump() { +void LifetimeCheckPass::State::dump(llvm::raw_ostream &OS) { switch (val.getInt()) { case Invalid: - llvm::errs() << "invalid"; + OS << "invalid"; break; case NullPtr: - llvm::errs() << "nullptr"; + OS << "nullptr"; break; case Global: - llvm::errs() << "global"; + OS << "global"; break; case LocalValue: - llvm::errs() << getVarNameFromValue(val.getPointer()); + OS << getVarNameFromValue(val.getPointer()); break; } } -void LifetimeCheckPass::dumpPset(PSetType &pset) { - llvm::errs() << "{ "; +void LifetimeCheckPass::printPset(PSetType &pset, llvm::raw_ostream &OS) { + OS << "{ "; + auto size = pset.size(); for (auto s : pset) { - s.dump(); - llvm::errs() << ", "; + s.dump(OS); + size--; + if (size > 0) + OS << ", "; } - llvm::errs() << "}"; + OS << " }"; } -void LifetimeCheckPass::dumpPmap() { +void LifetimeCheckPass::dumpPmap(PMapType &pmap) { llvm::errs() << "pmap {\n"; int entry = 0; for (auto &mapEntry : pmap) { llvm::errs() << " " << entry << ": " << getVarNameFromValue(mapEntry.first) << " " << "=> "; - dumpPset(mapEntry.second); + printPset(mapEntry.second); llvm::errs() << "\n"; entry++; } From 26460a130b745284cac644da58442a214ebb9c8f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 2 Mar 2022 16:29:59 -0800 Subject: [PATCH 0152/2301] [CIR][LifetimeCheck] Add a pass option for tracking pointer invalid/null history --- mlir/include/mlir/Dialect/CIR/Passes.td | 3 +++ .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 22 ++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/mlir/include/mlir/Dialect/CIR/Passes.td b/mlir/include/mlir/Dialect/CIR/Passes.td index f288e0eeeb3b..13e639bfb785 100644 --- a/mlir/include/mlir/Dialect/CIR/Passes.td +++ b/mlir/include/mlir/Dialect/CIR/Passes.td @@ -21,6 +21,9 @@ def LifetimeCheck : Pass<"cir-lifetime-check"> { let dependentDialects = ["cir::CIRDialect"]; let options = [ + ListOption<"historyList", "history", "std::string", + "List of history styles to emit as part of diagnostics." + " Supported styles: {all|null|invalid}", "llvm::cl::ZeroOrMore">, ListOption<"remarksList", "remarks", "std::string", "List of remark styles to enable as part of diagnostics." " Supported styles: {all|pset}", "llvm::cl::ZeroOrMore"> diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 153bb3f07c55..60aa19cfa2ee 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -38,7 +38,14 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkLoad(LoadOp op); struct Options { - enum : unsigned { None = 0, RemarkPset = 1, RemarkAll = 1 << 1 }; + enum : unsigned { + None = 0, + RemarkPset = 1, + RemarkAll = 1 << 1, + HistoryNull = 1 << 2, + HistoryInvalid = 1 << 3, + HistoryAll = 1 << 4, + }; unsigned val = None; void parseOptions(LifetimeCheckPass &pass) { @@ -48,10 +55,23 @@ struct LifetimeCheckPass : public LifetimeCheckBase { .Case("all", RemarkAll) .Default(None); } + for (auto &h : pass.historyList) { + val |= StringSwitch(h) + .Case("invalid", HistoryInvalid) + .Case("null", HistoryNull) + .Case("all", HistoryAll) + .Default(None); + } } bool emitRemarkAll() { return val & RemarkAll; } bool emitRemarkPset() { return emitRemarkAll() || val & RemarkPset; } + + bool emitHistoryAll() { return val & HistoryAll; } + bool emitHistoryNull() { return emitHistoryAll() || val & HistoryNull; } + bool emitHistoryInvalid() { + return emitHistoryAll() || val & HistoryInvalid; + } } opts; struct State { From e94de1a769db09c9b9768249f17f804169d5e624 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 2 Mar 2022 17:15:04 -0800 Subject: [PATCH 0153/2301] [CIR][LifetimeCheck] Add history data structures and improve clean up for every function --- mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 60aa19cfa2ee..b46a58c7bbb8 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -119,6 +119,11 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // FIXME: this should be a ScopedHashTable for consistency. using PMapType = llvm::DenseMap; + using PSetHistType = llvm::SetVector; + using PMapHistType = llvm::DenseMap; + PMapHistType pmapNullHist; + PMapHistType pmapInvalidHist; + SmallPtrSet ptrs; // Represents the scope context for IR operations (cir.scope, cir.if, @@ -257,6 +262,13 @@ void LifetimeCheckPass::checkRegionWithScope(Region ®ion) { } void LifetimeCheckPass::checkFunc(Operation *op) { + // FIXME: perhaps this should be a function pass, but for now make + // sure we reset the state before looking at other functions. + if (currPmap) + getPmap().clear(); + pmapNullHist.clear(); + pmapInvalidHist.clear(); + // Add a new scope. Note that as part of the scope cleanup process // we apply section 2.3 KILL(x) functionality, turning relevant // references invalid. From 397c30cd735f05e9c246cdc66cbd5374a282471c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 3 Mar 2022 20:50:01 -0800 Subject: [PATCH 0154/2301] [CIR] Fix source location of plain cir.scope's --- clang/lib/CIR/CIRGenModule.cpp | 13 ++++++++----- clang/test/CIR/CodeGen/basic.cpp | 4 +++- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 7e3bc934f14b..98adf8245bdd 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1246,11 +1246,12 @@ mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); // The if scope contains the full source range for IfStmt. auto scopeLoc = getLoc(S.getSourceRange()); - auto scopeLocBegin = getLoc(S.getSourceRange().getBegin()); - auto scopeLocEnd = getLoc(S.getSourceRange().getEnd()); builder.create( scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto scopeLocBegin = fusedLoc.getLocations()[0]; + auto scopeLocEnd = fusedLoc.getLocations()[1]; LexicalScopeContext lexScope{builder, scopeLocBegin, scopeLocEnd}; LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; res = ifStmtBuilder(); @@ -1457,11 +1458,13 @@ mlir::LogicalResult CIRGenModule::buildCompoundStmt(const CompoundStmt &S) { // Add local scope to track new declared variables. SymTableScopeTy varScope(symbolTable); - auto locBegin = getLoc(S.getSourceRange().getBegin()); - auto locEnd = getLoc(S.getSourceRange().getEnd()); + auto scopeLoc = getLoc(S.getSourceRange()); builder.create( - locBegin, mlir::TypeRange(), /*scopeBuilder=*/ + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto locBegin = fusedLoc.getLocations()[0]; + auto locEnd = fusedLoc.getLocations()[1]; LexicalScopeContext lexScope{builder, locBegin, locEnd}; LexicalScopeGuard lexScopeGuard{*this, &lexScope}; res = compoundStmtBuilder(); diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 9674b6ba1a83..fda4f2e87d6c 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -45,7 +45,7 @@ int *p2() { // CHECK-NEXT: %7 = cir.cst(42 : i32) : i32 // CHECK-NEXT: %8 = cir.load deref %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.store %7, %8 : i32, cir.ptr -// CHECK-NEXT: } +// CHECK-NEXT: } loc(#loc15) // CHECK-NEXT: %2 = cir.cst(42 : i32) : i32 // CHECK-NEXT: %3 = cir.load deref %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.store %2, %3 : i32, cir.ptr @@ -131,3 +131,5 @@ int if1(int a, bool b, bool c) { // CHECK: cir.store %8, %3 : i32, cir.ptr // CHECK: } // CHECK: } + +// CHECK: #loc15 = loc(fused["{{.*}}basic.cpp":26:3, "{{.*}}basic.cpp":30:3]) From 82d161cc214611a6151c20a1e15a5684fceb61ac Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 3 Mar 2022 20:51:01 -0800 Subject: [PATCH 0155/2301] [CIR] Update the rules on emitting cir.yield: necessary for region with multiple blocks --- clang/test/CIR/IR/invalid.cir | 11 +++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 25 +++--- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 100 +++++++++++++++------ 3 files changed, 96 insertions(+), 40 deletions(-) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index ef637771a7ce..42ebc47700bd 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -26,3 +26,14 @@ func.func @if0() { } cir.return } + +// ----- + +func.func @yield0() { + %0 = cir.cst(true) : !cir.bool + cir.if %0 { // expected-error {{custom op 'cir.if' expected at least one block with cir.yield}} + cir.br ^a + ^a: + } + cir.return +} diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 102823751ad1..e9523161ec56 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -312,23 +312,23 @@ def IfOp : CIR_Op<"if", } else { ... } - ``` - - `cir.if` defines no values and the 'else' can be omitted. Every region - must be terminated by `cir.yield`, which is implicit in the asm form. - Example: + cir.if %c { + ... + } - ```mlir - cir.if %b { + cir.if %c { ... + br ^a + ^a: + cir.yield } ``` + + `cir.if` defines no values and the 'else' can be omitted. `cir.yield` must + explicitly terminate the region if it has more than one block. }]; let arguments = (ins CIR_BoolType:$condition); - - // FIXME: for now the "then" region only has one block, that should change - // soon as building CIR becomes more complex. let regions = (region AnyRegion:$thenRegion, AnyRegion:$elseRegion); // FIXME: unify these within CIR_Ops. @@ -368,10 +368,7 @@ def YieldOp : CIR_Op<"yield", [Pure, ReturnLike, Terminator, but it will be useful to represent lifetime extension in the future. In that case the operands must match the parent operation's results. - If the parent operation defines no values, then the "cir.yield" may be - left out in the custom syntax and the builders will insert one implicitly. - Otherwise, it has to be present in the syntax to indicate which values are - yielded. + `cir.yield` be present whenever the region has more than one block. }]; let arguments = (ins Variadic:$results); diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index bc7e3466944b..eeac8afac01c 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -31,7 +31,6 @@ using namespace mlir::cir; //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// - namespace { struct CIROpAsmDialectInterface : public OpAsmDialectInterface { using OpAsmDialectInterface::OpAsmDialectInterface; @@ -155,6 +154,7 @@ ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { result.regions.reserve(2); Region *thenRegion = result.addRegion(); Region *elseRegion = result.addRegion(); + auto loc = parser.getCurrentLocation(); auto &builder = parser.getBuilder(); OpAsmParser::UnresolvedOperand cond; @@ -164,29 +164,49 @@ ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { parser.resolveOperand(cond, boolType, result.operands)) return failure(); - auto getOrInsertTerminator = [&](Region *r) { - ::mlir::impl::ensureRegionTerminator( - *r, parser.getBuilder(), result.location, - [](OpBuilder &builder, Location loc) { - OperationState state(loc, YieldOp::getOperationName()); - YieldOp::build(builder, state); - return Operation::create(state); - }); + auto checkYieldTerminator = [&](Region *r) { + if (r->hasOneBlock()) { + ::mlir::impl::ensureRegionTerminator( + *r, parser.getBuilder(), result.location, + [](OpBuilder &builder, Location loc) { + OperationState state(loc, YieldOp::getOperationName()); + YieldOp::build(builder, state); + return Operation::create(state); + }); + return success(); + } + + // Soft verification: test that at least one block has a yield terminator. + bool foundYield = false; + for (Block &block : r->getBlocks()) { + if (block.empty()) + continue; + auto &op = block.back(); + if (op.hasTrait() && isa(op)) { + foundYield = true; + break; + } + } + if (!foundYield) { + parser.emitError(loc, "expected at least one block with cir.yield"); + return failure(); + } + return success(); }; // Parse the 'then' region. if (parser.parseRegion(*thenRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - assert(thenRegion->hasOneBlock() && "not yet implemented"); - getOrInsertTerminator(thenRegion); + if (checkYieldTerminator(thenRegion).failed()) + return failure(); // If we find an 'else' keyword then parse the 'else' region. if (!parser.parseOptionalKeyword("else")) { if (parser.parseRegion(*elseRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - assert(elseRegion->hasOneBlock() && "not yet implemented"); - getOrInsertTerminator(elseRegion); + if (checkYieldTerminator(elseRegion).failed()) + return failure(); } // Parse the optional attribute list. @@ -197,9 +217,10 @@ ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { void IfOp::print(OpAsmPrinter &p) { p << " " << getCondition() << " "; - p.printRegion(getThenRegion(), + auto &thenRegion = this->getThenRegion(); + p.printRegion(thenRegion, /*printEntryBlockArgs=*/false, - /*printBlockTerminators=*/false); + /*printBlockTerminators=*/!thenRegion.hasOneBlock()); // Print the 'else' regions if it exists and has a block. auto &elseRegion = this->getElseRegion(); @@ -207,7 +228,7 @@ void IfOp::print(OpAsmPrinter &p) { p << " else "; p.printRegion(elseRegion, /*printEntryBlockArgs=*/false, - /*printBlockTerminators=*/false); + /*printBlockTerminators=*/!thenRegion.hasOneBlock()); } p.printOptionalAttrDict(getOperation()->getAttrs()); @@ -292,18 +313,44 @@ ParseResult ScopeOp::parse(OpAsmParser &parser, OperationState &result) { // Create one region within 'scope'. result.regions.reserve(1); Region *scopeRegion = result.addRegion(); + auto loc = parser.getCurrentLocation(); // Parse the scope region. if (parser.parseRegion(*scopeRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - assert(scopeRegion->hasOneBlock() && "not yet implemented"); - ::mlir::impl::ensureRegionTerminator( - *scopeRegion, parser.getBuilder(), result.location, - [](OpBuilder &builder, Location loc) { - OperationState state(loc, YieldOp::getOperationName()); - YieldOp::build(builder, state); - return Operation::create(state); - }); + + auto checkYieldTerminator = [&](Region *r) { + if (r->hasOneBlock()) { + ::mlir::impl::ensureRegionTerminator( + *r, parser.getBuilder(), result.location, + [](OpBuilder &builder, Location loc) { + OperationState state(loc, YieldOp::getOperationName()); + YieldOp::build(builder, state); + return Operation::create(state); + }); + return success(); + } + + // Soft verification: test that at least one block has a yield terminator. + bool foundYield = false; + for (Block &block : r->getBlocks()) { + if (block.empty()) + continue; + auto &op = block.back(); + if (op.hasTrait() && isa(op)) { + foundYield = true; + break; + } + } + if (!foundYield) { + parser.emitError(loc, "expected at least one block with cir.yield"); + return failure(); + } + return success(); + }; + + if (checkYieldTerminator(scopeRegion).failed()) + return failure(); // Parse the optional attribute list. if (parser.parseOptionalAttrDict(result.attributes)) @@ -313,9 +360,10 @@ ParseResult ScopeOp::parse(OpAsmParser &parser, OperationState &result) { void ScopeOp::print(OpAsmPrinter &p) { p << ' '; - p.printRegion(getScopeRegion(), + auto &scopeRegion = this->getScopeRegion(); + p.printRegion(scopeRegion, /*printEntryBlockArgs=*/false, - /*printBlockTerminators=*/false); + /*printBlockTerminators=*/!scopeRegion.hasOneBlock()); p.printOptionalAttrDict(getOperation()->getAttrs()); } From 79fc3c70c10277baccaba5e7cbf7d34621454581 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 3 Mar 2022 21:38:14 -0800 Subject: [PATCH 0156/2301] [CIR] Add at least 2 at most 4 locations to ifOp (and finally track then/else properly) --- clang/lib/CIR/CIRGenModule.cpp | 34 +++++++++++++++-------- clang/test/CIR/CodeGen/sourcelocation.cpp | 2 +- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 98adf8245bdd..206e2d1ec949 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1189,17 +1189,19 @@ mlir::LogicalResult CIRGenModule::buildIfOnBoolExpr(const Expr *cond, loc, condV, elseS, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto bLoc = getLoc(thenS->getSourceRange().getBegin()); - auto eLoc = getLoc(thenS->getSourceRange().getEnd()); - LexicalScopeContext lexScope{builder, bLoc, eLoc}; + auto fusedLoc = loc.cast(); + auto locBegin = fusedLoc.getLocations()[0]; + auto locEnd = fusedLoc.getLocations()[1]; + LexicalScopeContext lexScope{builder, locBegin, locEnd}; LexicalScopeGuard lexThenGuard{*this, &lexScope}; resThen = buildStmt(thenS, /*useCurrentScope=*/true); }, /*elseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto bLoc = getLoc(elseS->getSourceRange().getBegin()); - auto eLoc = getLoc(elseS->getSourceRange().getEnd()); - LexicalScopeContext lexScope{builder, bLoc, eLoc}; + auto fusedLoc = loc.cast(); + auto locBegin = fusedLoc.getLocations()[2]; + auto locEnd = fusedLoc.getLocations()[3]; + LexicalScopeContext lexScope{builder, locBegin, locEnd}; LexicalScopeGuard lexElseGuard{*this, &lexScope}; resElse = buildStmt(elseS, /*useCurrentScope=*/true); }); @@ -1234,12 +1236,20 @@ mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { } // TODO: PGO and likelihood. - // The mlir::Location for cir.if skips the init/cond part of IfStmt, - // and effectively spans from "then-begin" to "else-end||then-end". - auto ifLocStart = getLoc(S.getThen()->getSourceRange().getBegin()); - auto ifLocEnd = getLoc(S.getSourceRange().getEnd()); - return buildIfOnBoolExpr(S.getCond(), getLoc(ifLocStart, ifLocEnd), - S.getThen(), S.getElse()); + // Attempt to be more accurate as possible with IfOp location, generate + // one fused location that has either 2 or 4 total locations, depending + // on else's availability. + SmallVector ifLocs; + mlir::Attribute metadata; + ifLocs.push_back(getLoc(S.getThen()->getSourceRange().getBegin())); + ifLocs.push_back(getLoc(S.getThen()->getSourceRange().getEnd())); + if (S.getElse()) { + ifLocs.push_back(getLoc(S.getElse()->getSourceRange().getBegin())); + ifLocs.push_back(getLoc(S.getElse()->getSourceRange().getEnd())); + } + + auto ifLoc = mlir::FusedLoc::get(ifLocs, metadata, builder.getContext()); + return buildIfOnBoolExpr(S.getCond(), ifLoc, S.getThen(), S.getElse()); }; // TODO: Add a new scoped symbol table. diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 52300180a3a5..be4dd0650c68 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -51,7 +51,7 @@ int s0(int a, int b) { // CHECK: #[[loc10]] = loc("{{.*}}sourcelocation.cpp":6:7) // CHECK: #[[loc11]] = loc("{{.*}}sourcelocation.cpp":6:11) // CHECK: #[[loc12]] = loc(fused["{{.*}}sourcelocation.cpp":6:7, "{{.*}}sourcelocation.cpp":6:11]) -// CHECK: #[[loc13]] = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":9:9]) +// CHECK: #[[loc13]] = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9, "{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) // CHECK: #[[loc14]] = loc("{{.*}}sourcelocation.cpp":7:9) // CHECK: #[[loc15]] = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9]) // CHECK: #[[loc16]] = loc("{{.*}}sourcelocation.cpp":9:9) From baba31af0ee5ccc868c987fcf6d4c9e8f8f7e6dc Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 3 Mar 2022 21:52:18 -0800 Subject: [PATCH 0157/2301] [CIR][LifetimeCheck] Add history tracking to point where pointer got invalidated This uses a "note" and in the future should also show which local variable lifetime's ended in the scope. --- clang/test/CIR/Transforms/lifetime-check.cpp | 6 +- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 60 ++++++++++++++++--- 2 files changed, 55 insertions(+), 11 deletions(-) diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index 8f94f6a335e6..959853d3988a 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: cir-tool %t.cir -cir-lifetime-check -verify-diagnostics -o %t-out.cir +// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid" -verify-diagnostics -o %t-out.cir // XFAIL: * int *p0() { @@ -8,7 +8,7 @@ int *p0() { int x = 0; p = &x; *p = 42; - } + } // expected-note {{invalidated at end of scope}} *p = 42; // expected-warning {{use of invalid pointer 'p'}} return p; } @@ -19,7 +19,7 @@ int *p1(bool b = true) { int x = 0; p = &x; *p = 42; - } + } // expected-note {{invalidated at end of scope}} *p = 42; // expected-warning {{use of invalid pointer 'p'}} return p; } diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index b46a58c7bbb8..9a820f5f5629 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -131,7 +131,11 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // local scope. struct LexicalScopeContext { unsigned Depth = 0; - LexicalScopeContext() = default; + LexicalScopeContext() = delete; + + llvm::PointerUnion parent; + LexicalScopeContext(mlir::Region *R) : parent(R) {} + LexicalScopeContext(mlir::Operation *Op) : parent(Op) {} ~LexicalScopeContext() = default; // Track all local values added in this scope @@ -202,6 +206,34 @@ static StringRef getVarNameFromValue(mlir::Value v) { return ""; } +static Location getEndLoc(Location loc, int idx = 1) { + auto fusedLoc = loc.dyn_cast(); + if (!fusedLoc) + return loc; + return fusedLoc.getLocations()[idx]; +} + +static Location getEndLocForHist(Operation *Op) { + return getEndLoc(Op->getLoc()); +} + +static Location getEndLocForHist(Region *R) { + auto ifOp = dyn_cast(R->getParentOp()); + assert(ifOp && "what other regions create their own scope?"); + if (&ifOp.getThenRegion() == R) + return getEndLoc(ifOp.getLoc()); + return getEndLoc(ifOp.getLoc(), /*idx=*/3); +} + +static Location getEndLocForHist(LifetimeCheckPass::LexicalScopeContext &lsc) { + assert(!lsc.parent.isNull() && "shouldn't be null"); + if (lsc.parent.is()) + return getEndLocForHist(lsc.parent.get()); + assert(lsc.parent.is() && + "Only support operation beyond this point"); + return getEndLocForHist(lsc.parent.get()); +} + void LifetimeCheckPass::LexicalScopeGuard::cleanup() { auto *localScope = Pass.currScope; auto &pmap = Pass.getPmap(); @@ -216,13 +248,15 @@ void LifetimeCheckPass::LexicalScopeGuard::cleanup() { // p2. for (auto value : localScope->localValues) { for (auto &mapEntry : pmap) { + auto ptr = mapEntry.first; // We are deleting this entry anyways, nothing to do here. - if (value == mapEntry.first) + if (value == ptr) continue; // If the local value is part of this pset, it means // we need to invalidate it, otherwise keep searching. + // FIXME: add support for x', x'', etc... auto &pset = mapEntry.second; State valState = State::getLocalValue(value); if (!pset.contains(valState)) @@ -230,10 +264,11 @@ void LifetimeCheckPass::LexicalScopeGuard::cleanup() { // Erase the reference and mark this invalid. // FIXME: add a way to just mutate the state. - // FIXME: right now we are piling up invalids, if it's already - // invalid we don't need to add again? only if tracking the path. pset.erase(valState); pset.insert(State::getInvalid()); + if (!Pass.pmapInvalidHist.count(ptr)) + Pass.pmapInvalidHist[ptr] = {}; + Pass.pmapInvalidHist[ptr].insert(getEndLocForHist(*Pass.currScope)); } // Delete the local value from pmap, since its gone now. pmap.erase(value); @@ -255,7 +290,7 @@ void LifetimeCheckPass::checkRegionWithScope(Region ®ion) { // Add a new scope. Note that as part of the scope cleanup process // we apply section 2.3 KILL(x) functionality, turning relevant // references invalid. - LexicalScopeContext lexScope{}; + LexicalScopeContext lexScope{®ion}; LexicalScopeGuard scopeGuard{*this, &lexScope}; for (Block &block : region.getBlocks()) checkBlock(block); @@ -272,7 +307,7 @@ void LifetimeCheckPass::checkFunc(Operation *op) { // Add a new scope. Note that as part of the scope cleanup process // we apply section 2.3 KILL(x) functionality, turning relevant // references invalid. - LexicalScopeContext lexScope{}; + LexicalScopeContext lexScope{op}; LexicalScopeGuard scopeGuard{*this, &lexScope}; // Create a new pmap for this function. @@ -416,12 +451,18 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { // Note that usually the use of the invalid address happens at the // load or store using the result of this loadOp. StringRef varName = getVarNameFromValue(addr); - emitWarning(loadOp.getLoc()) << "use of invalid pointer '" << varName << "'"; + auto D = emitWarning(loadOp.getLoc()); + D << "use of invalid pointer '" << varName << "'"; llvm::SmallString<128> psetStr; llvm::raw_svector_ostream Out(psetStr); printPset(getPmap()[addr], Out); + if (opts.emitHistoryInvalid()) { + for (auto note : pmapInvalidHist[addr]) + D.attachNote(note) << "invalidated at end of scope"; + } + if (opts.emitRemarkPset()) emitRemark(loadOp.getLoc()) << "pset => " << Out.str(); } @@ -441,7 +482,10 @@ void LifetimeCheckPass::checkOperation(Operation *op) { // No need to create a new pmap when entering a new scope since it // doesn't cause control flow to diverge (as it does in presence // of cir::IfOp). - LexicalScopeContext lexScope{}; + // + // Also note that for dangling pointers coming from if init stmts + // should be caught just fine, given that a ScopeOp embraces a IfOp. + LexicalScopeContext lexScope{op}; LexicalScopeGuard scopeGuard{*this, &lexScope}; for (Region ®ion : op->getRegions()) checkRegion(region); From 896eb8125719422d0daa005d3b8f4aa8ac8b12ff Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 4 Mar 2022 15:39:25 -0800 Subject: [PATCH 0158/2301] [CIR][LifetimeCheck] When emitting invalid history, include the pointee name --- clang/test/CIR/Transforms/lifetime-check.cpp | 4 ++-- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 22 ++++++++++++------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index 959853d3988a..dfd05687d1fe 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -8,7 +8,7 @@ int *p0() { int x = 0; p = &x; *p = 42; - } // expected-note {{invalidated at end of scope}} + } // expected-note {{pointee 'x' invalidated at end of scope}} *p = 42; // expected-warning {{use of invalid pointer 'p'}} return p; } @@ -19,7 +19,7 @@ int *p1(bool b = true) { int x = 0; p = &x; *p = 42; - } // expected-note {{invalidated at end of scope}} + } // expected-note {{pointee 'x' invalidated at end of scope}} *p = 42; // expected-warning {{use of invalid pointer 'p'}} return p; } diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 9a820f5f5629..abdc83f75b82 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -119,7 +119,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // FIXME: this should be a ScopedHashTable for consistency. using PMapType = llvm::DenseMap; - using PSetHistType = llvm::SetVector; + using PSetHistType = llvm::SetVector>; using PMapHistType = llvm::DenseMap; PMapHistType pmapNullHist; PMapHistType pmapInvalidHist; @@ -246,19 +246,19 @@ void LifetimeCheckPass::LexicalScopeGuard::cleanup() { // in the pmap with invalid. For example, if pmap is {(p1,{a}), (p2,{a'})}, // KILL(a') would invalidate only p2, and KILL(a) would invalidate both p1 and // p2. - for (auto value : localScope->localValues) { + for (auto pointee : localScope->localValues) { for (auto &mapEntry : pmap) { auto ptr = mapEntry.first; // We are deleting this entry anyways, nothing to do here. - if (value == ptr) + if (pointee == ptr) continue; // If the local value is part of this pset, it means // we need to invalidate it, otherwise keep searching. // FIXME: add support for x', x'', etc... auto &pset = mapEntry.second; - State valState = State::getLocalValue(value); + State valState = State::getLocalValue(pointee); if (!pset.contains(valState)) continue; @@ -268,10 +268,11 @@ void LifetimeCheckPass::LexicalScopeGuard::cleanup() { pset.insert(State::getInvalid()); if (!Pass.pmapInvalidHist.count(ptr)) Pass.pmapInvalidHist[ptr] = {}; - Pass.pmapInvalidHist[ptr].insert(getEndLocForHist(*Pass.currScope)); + Pass.pmapInvalidHist[ptr].insert( + std::make_pair(getEndLocForHist(*Pass.currScope), pointee)); } // Delete the local value from pmap, since its gone now. - pmap.erase(value); + pmap.erase(pointee); } } @@ -459,8 +460,13 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { printPset(getPmap()[addr], Out); if (opts.emitHistoryInvalid()) { - for (auto note : pmapInvalidHist[addr]) - D.attachNote(note) << "invalidated at end of scope"; + for (auto &info : pmapInvalidHist[addr]) { + auto ¬e = info.first; + auto &pointee = info.second; + StringRef pointeeName = getVarNameFromValue(pointee); + D.attachNote(note) << "pointee '" << pointeeName + << "' invalidated at end of scope"; + } } if (opts.emitRemarkPset()) From 71c2951c0ccf01bc6bd1d7bcd13ca4529dc325d6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 4 Mar 2022 18:25:22 -0800 Subject: [PATCH 0159/2301] [CIR] Fix test --- clang/test/CIR/CodeGen/basic.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index fda4f2e87d6c..684796ac249a 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -45,7 +45,7 @@ int *p2() { // CHECK-NEXT: %7 = cir.cst(42 : i32) : i32 // CHECK-NEXT: %8 = cir.load deref %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.store %7, %8 : i32, cir.ptr -// CHECK-NEXT: } loc(#loc15) +// CHECK-NEXT: } loc(#[[loc15:loc[0-9]+]]) // CHECK-NEXT: %2 = cir.cst(42 : i32) : i32 // CHECK-NEXT: %3 = cir.load deref %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.store %2, %3 : i32, cir.ptr From 79989ee452236a08cad4a3c2eb12220010dd7b1a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 4 Mar 2022 18:40:04 -0800 Subject: [PATCH 0160/2301] [CIR][LifetimeCheck] Add support for null history notes --- clang/test/CIR/Transforms/lifetime-check.cpp | 4 +- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 58 ++++++++++++------- 2 files changed, 38 insertions(+), 24 deletions(-) diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index dfd05687d1fe..acb622df4a1e 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid" -verify-diagnostics -o %t-out.cir +// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null" -verify-diagnostics -o %t-out.cir // XFAIL: * int *p0() { @@ -14,7 +14,7 @@ int *p0() { } int *p1(bool b = true) { - int *p = nullptr; + int *p = nullptr; // expected-note {{invalidated here}} if (b) { int x = 0; p = &x; diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index abdc83f75b82..768b28aff949 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -119,10 +119,14 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // FIXME: this should be a ScopedHashTable for consistency. using PMapType = llvm::DenseMap; - using PSetHistType = llvm::SetVector>; - using PMapHistType = llvm::DenseMap; - PMapHistType pmapNullHist; - PMapHistType pmapInvalidHist; + using PSetInvalidHistType = + llvm::SetVector>; + using PMapInvalidHistType = llvm::DenseMap; + PMapInvalidHistType pmapInvalidHist; + + using PMapNullHistType = + llvm::DenseMap>; + PMapNullHistType pmapNullHist; SmallPtrSet ptrs; @@ -390,7 +394,9 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { // If other styles of initialization gets added, required to add support // here. - assert(allocaOp.getInitAttr().getValue() == mlir::cir::InitStyle::cinit && + assert((allocaOp.getInitAttr().getValue() == mlir::cir::InitStyle::cinit || + allocaOp.getInitAttr().getValue() == + mlir::cir::InitStyle::uninitialized) && "other init styles tbd"); } @@ -407,20 +413,22 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { // initialization is treated as a separate operation if (auto cstOp = dyn_cast(data.getDefiningOp())) { assert(cstOp.isNullPtr() && "not implemented"); + assert(getPmap().count(addr) && "address should always be valid"); // 2.4.2 - If the initialization is default initialization or zero // initialization, set pset(p) = {null}; for example: // // int* p; => pset(p) == {invalid} // int* p{}; or string_view p; => pset(p) == {null}. // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} - getPmap()[addr] = {}; + getPmap()[addr].clear(); getPmap()[addr].insert(State::getNullPtr()); + pmapNullHist[addr] = storeOp.getValue().getLoc(); return; } if (auto allocaOp = dyn_cast(data.getDefiningOp())) { // p = &x; - getPmap()[addr] = {}; + getPmap()[addr].clear(); getPmap()[addr].insert(State::getLocalValue(data)); return; } @@ -433,33 +441,29 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { void LifetimeCheckPass::checkLoad(LoadOp loadOp) { auto addr = loadOp.getAddr(); // Only interested in checking deference on top of pointer types. + // Note that usually the use of the invalid address happens at the + // load or store using the result of this loadOp. if (!getPmap().count(addr) || !ptrs.count(addr)) return; if (!loadOp.getIsDeref()) return; - // 2.4.2 - On every dereference of a Pointer p, enforce that p is not - // invalid. - if (!getPmap()[addr].count(State::getInvalid())) { - // FIXME: perhaps add a remark that we got a valid dereference + bool hasInvalid = getPmap()[addr].count(State::getInvalid()); + bool hasNullptr = getPmap()[addr].count(State::getNullPtr()); + + // 2.4.2 - On every dereference of a Pointer p, enforce that p is valid. + if (!hasInvalid && !hasNullptr) return; - } - // Looks like we found a invalid path leading to this deference point, + // Looks like we found a bad path leading to this deference point, // diagnose it. - // - // Note that usually the use of the invalid address happens at the - // load or store using the result of this loadOp. StringRef varName = getVarNameFromValue(addr); auto D = emitWarning(loadOp.getLoc()); D << "use of invalid pointer '" << varName << "'"; - llvm::SmallString<128> psetStr; - llvm::raw_svector_ostream Out(psetStr); - printPset(getPmap()[addr], Out); - - if (opts.emitHistoryInvalid()) { + if (hasInvalid && opts.emitHistoryInvalid()) { + assert(pmapInvalidHist.count(addr) && "expected invalid hist"); for (auto &info : pmapInvalidHist[addr]) { auto ¬e = info.first; auto &pointee = info.second; @@ -469,8 +473,18 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { } } - if (opts.emitRemarkPset()) + if (hasNullptr && opts.emitHistoryNull()) { + assert(pmapNullHist.count(addr) && "expected nullptr hist"); + auto ¬e = pmapNullHist[addr]; + D.attachNote(*note) << "invalidated here"; + } + + if (opts.emitRemarkPset()) { + llvm::SmallString<128> psetStr; + llvm::raw_svector_ostream Out(psetStr); + printPset(getPmap()[addr], Out); emitRemark(loadOp.getLoc()) << "pset => " << Out.str(); + } } void LifetimeCheckPass::checkOperation(Operation *op) { From 503ab8227889297889a7fc5e98354706107b34ef Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 4 Mar 2022 18:51:56 -0800 Subject: [PATCH 0161/2301] [CIR][LifetimeCheck] Change history tracking to only keep the last --- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 768b28aff949..28eac27f7fe4 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -119,9 +119,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // FIXME: this should be a ScopedHashTable for consistency. using PMapType = llvm::DenseMap; - using PSetInvalidHistType = - llvm::SetVector>; - using PMapInvalidHistType = llvm::DenseMap; + using PMapInvalidHistType = + llvm::DenseMap, mlir::Value>>; PMapInvalidHistType pmapInvalidHist; using PMapNullHistType = @@ -270,10 +270,8 @@ void LifetimeCheckPass::LexicalScopeGuard::cleanup() { // FIXME: add a way to just mutate the state. pset.erase(valState); pset.insert(State::getInvalid()); - if (!Pass.pmapInvalidHist.count(ptr)) - Pass.pmapInvalidHist[ptr] = {}; - Pass.pmapInvalidHist[ptr].insert( - std::make_pair(getEndLocForHist(*Pass.currScope), pointee)); + Pass.pmapInvalidHist[ptr] = + std::make_pair(getEndLocForHist(*Pass.currScope), pointee); } // Delete the local value from pmap, since its gone now. pmap.erase(pointee); @@ -464,13 +462,12 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { if (hasInvalid && opts.emitHistoryInvalid()) { assert(pmapInvalidHist.count(addr) && "expected invalid hist"); - for (auto &info : pmapInvalidHist[addr]) { - auto ¬e = info.first; - auto &pointee = info.second; - StringRef pointeeName = getVarNameFromValue(pointee); - D.attachNote(note) << "pointee '" << pointeeName - << "' invalidated at end of scope"; - } + auto &info = pmapInvalidHist[addr]; + auto ¬e = info.first; + auto &pointee = info.second; + StringRef pointeeName = getVarNameFromValue(pointee); + D.attachNote(note) << "pointee '" << pointeeName + << "' invalidated at end of scope"; } if (hasNullptr && opts.emitHistoryNull()) { From a6ae967ca7269e22de0d7c1e60a9e12fa0cbf90b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 4 Mar 2022 19:05:29 -0800 Subject: [PATCH 0162/2301] [CIR][LifetimeCheck] Improve invalid loc to account for uninitialized pointers --- clang/test/CIR/Transforms/lifetime-check.cpp | 16 ++++++++++++++++ .../lib/Dialect/CIR/Transforms/LifetimeCheck.cpp | 16 +++++++++++----- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index acb622df4a1e..ab77d3440e01 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -23,3 +23,19 @@ int *p1(bool b = true) { *p = 42; // expected-warning {{use of invalid pointer 'p'}} return p; } + +void p2() { + int *p = nullptr; // expected-note {{invalidated here}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} + +void p3() { + int *p; + p = nullptr; // expected-note {{invalidated here}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} + +void p4() { + int *p; // expected-note {{uninitialized here}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 28eac27f7fe4..8e16373321a9 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -120,8 +120,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { using PMapType = llvm::DenseMap; using PMapInvalidHistType = - llvm::DenseMap, mlir::Value>>; + llvm::DenseMap, + std::optional>>; PMapInvalidHistType pmapInvalidHist; using PMapNullHistType = @@ -389,6 +389,7 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { // (p, {invalid}) to pmap. ptrs.insert(addr); getPmap()[addr].insert(State::getInvalid()); + pmapInvalidHist[addr] = std::make_pair(allocaOp.getLoc(), std::nullopt); // If other styles of initialization gets added, required to add support // here. @@ -465,9 +466,14 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { auto &info = pmapInvalidHist[addr]; auto ¬e = info.first; auto &pointee = info.second; - StringRef pointeeName = getVarNameFromValue(pointee); - D.attachNote(note) << "pointee '" << pointeeName - << "' invalidated at end of scope"; + + if (pointee.has_value()) { + StringRef pointeeName = getVarNameFromValue(*pointee); + D.attachNote(note) << "pointee '" << pointeeName + << "' invalidated at end of scope"; + } else { + D.attachNote(note) << "uninitialized here"; + } } if (hasNullptr && opts.emitHistoryNull()) { From df30a2e3f632d5985b5638107c6f9b4af355c174 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 27 Feb 2022 07:58:02 -0500 Subject: [PATCH 0163/2301] [CIR][NFC] Add helper classes for argument arrangement for call instructions This diff adds from clang codegen the CIRGenFunctionInfoArgInfo, RequiredArgs and CIRGenFunctionInfo. These are almost entirely clones of the codegen types with the names ported to CIR. --- clang/lib/CIR/CIRGenFunctionInfo.h | 235 +++++++++++++++++++++++++++++ 1 file changed, 235 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunctionInfo.h b/clang/lib/CIR/CIRGenFunctionInfo.h index 25b12df156c8..84ab86fd4a35 100644 --- a/clang/lib/CIR/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CIRGenFunctionInfo.h @@ -175,6 +175,241 @@ class ABIArgInfo { } }; +struct CIRGenFunctionInfoArgInfo { + clang::CanQualType type; + ABIArgInfo info; +}; + +/// A class for recording the number of arguments that a function signature +/// requires. +class RequiredArgs { + /// The number of required arguments, or ~0 if the signature does not permit + /// optional arguments. + unsigned NumRequired; + +public: + enum All_t { All }; + + RequiredArgs(All_t _) : NumRequired(~0U) {} + explicit RequiredArgs(unsigned n) : NumRequired(n) { assert(n != ~0U); } + + unsigned getOpaqueData() const { return NumRequired; } + + bool allowsOptionalArgs() const { return NumRequired != ~0U; } + + /// Compute the arguments required by the given formal prototype, given that + /// there may be some additional, non-formal arguments in play. + /// + /// If FD is not null, this will consider pass_object_size params in FD. + static RequiredArgs + forPrototypePlus(const clang::FunctionProtoType *prototype, + unsigned additional) { + assert(!prototype->isVariadic() && "NYI"); + return All; + } + + static RequiredArgs + forPrototypePlus(clang::CanQual prototype, + unsigned additional) { + return forPrototypePlus(prototype.getTypePtr(), additional); + } + + unsigned getNumRequiredArgs() const { + assert(allowsOptionalArgs()); + return NumRequired; + } +}; + +class CIRGenFunctionInfo final + : public llvm::FoldingSetNode, + private llvm::TrailingObjects< + CIRGenFunctionInfo, CIRGenFunctionInfoArgInfo, + clang::FunctionProtoType::ExtParameterInfo> { + + typedef CIRGenFunctionInfoArgInfo ArgInfo; + typedef clang::FunctionProtoType::ExtParameterInfo ExtParameterInfo; + + /// The cir::CallingConv to use for this function (as specified by the user). + unsigned CallingConvention : 8; + + /// The cir::CallingConv to actually use for this function, which may depend + /// on the ABI. + unsigned EffectiveCallingConvention : 8; + + /// The clang::CallingConv that this was originally created with. + unsigned ASTCallingConvention : 6; + + /// Whether this is an instance method. + unsigned InstanceMethod : 1; + + /// Whether this is a chain call. + unsigned ChainCall : 1; + + /// Whether this function is a CMSE nonsecure call + unsigned CmseNSCall : 1; + + /// Whether this function is noreturn. + unsigned NoReturn : 1; + + /// Whether this function is returns-retained. + unsigned ReturnsRetained : 1; + + /// Whether this function saved caller registers. + unsigned NoCallerSavedRegs : 1; + + /// How many arguments to pass inreg. + unsigned HasRegParm : 1; + unsigned RegParm : 3; + + /// Whether this function has nocf_check attribute. + unsigned NoCfCheck : 1; + + RequiredArgs Required; + + /// The struct representing all arguments passed in memory. Only used when + /// passing non-trivial types with inalloca. Not part of the profile. + /// TODO: think about modeling this properly, this is just a dumb subsitution + /// for now since we arent supporting anything other than arguments in + /// registers atm + mlir::cir::StructType *ArgStruct; + unsigned ArgStructAlign : 31; + unsigned HasExtParameterInfos : 1; + + unsigned NumArgs; + + ArgInfo *getArgsBuffer() { return getTrailingObjects(); } + + const ArgInfo *getArgsBuffer() const { return getTrailingObjects(); } + + ExtParameterInfo *getExtParameterInfosBuffer() { + return getTrailingObjects(); + } + + const ExtParameterInfo *getExtParameterInfosBuffer() const { + return getTrailingObjects(); + } + + CIRGenFunctionInfo() : Required(RequiredArgs::All) {} + +public: + static CIRGenFunctionInfo *create(unsigned cirCC, bool instanceMethod, + bool chainCall, + const clang::FunctionType::ExtInfo &extInfo, + llvm::ArrayRef paramInfos, + clang::CanQualType resultType, + llvm::ArrayRef argTypes, + RequiredArgs required); + void operator delete(void *p) { ::operator delete(p); } + + // Friending class TrailingObjects is apparantly not good enough for MSVC, so + // these have to be public. + friend class TrailingObjects; + size_t numTrailingObjects(OverloadToken) const { + return NumArgs + 1; + } + size_t numTrailingObjects(OverloadToken) const { + return (HasExtParameterInfos ? NumArgs : 0); + } + + using const_arg_iterator = const ArgInfo *; + using arg_iterator = ArgInfo *; + + static void Profile(llvm::FoldingSetNodeID &ID, bool InstanceMethod, + bool ChainCall, const clang::FunctionType::ExtInfo &info, + llvm::ArrayRef paramInfos, + RequiredArgs required, clang::CanQualType resultType, + llvm::ArrayRef argTypes) { + ID.AddInteger(info.getCC()); + ID.AddBoolean(InstanceMethod); + ID.AddBoolean(info.getNoReturn()); + ID.AddBoolean(info.getProducesResult()); + ID.AddBoolean(info.getNoCallerSavedRegs()); + ID.AddBoolean(info.getHasRegParm()); + ID.AddBoolean(info.getRegParm()); + ID.AddBoolean(info.getNoCfCheck()); + ID.AddBoolean(info.getCmseNSCall()); + ID.AddBoolean(required.getOpaqueData()); + ID.AddBoolean(!paramInfos.empty()); + if (!paramInfos.empty()) { + for (auto paramInfo : paramInfos) + ID.AddInteger(paramInfo.getOpaqueValue()); + } + resultType.Profile(ID); + for (auto i : argTypes) + i.Profile(ID); + } + + /// getASTCallingConvention() - Return the AST-specified calling convention + clang::CallingConv getASTCallingConvention() const { + return clang::CallingConv(ASTCallingConvention); + } + + void Profile(llvm::FoldingSetNodeID &ID) { + ID.AddInteger(getASTCallingConvention()); + ID.AddBoolean(InstanceMethod); + ID.AddBoolean(ChainCall); + ID.AddBoolean(NoReturn); + ID.AddBoolean(ReturnsRetained); + ID.AddBoolean(NoCallerSavedRegs); + ID.AddBoolean(HasRegParm); + ID.AddBoolean(RegParm); + ID.AddBoolean(NoCfCheck); + ID.AddBoolean(CmseNSCall); + ID.AddInteger(Required.getOpaqueData()); + ID.AddBoolean(HasExtParameterInfos); + if (HasExtParameterInfos) { + for (auto paramInfo : getExtParameterInfos()) + ID.AddInteger(paramInfo.getOpaqueValue()); + } + getReturnType().Profile(ID); + for (const auto &I : arguments()) + I.type.Profile(ID); + } + + llvm::MutableArrayRef arguments() { + return llvm::MutableArrayRef(arg_begin(), NumArgs); + } + llvm::ArrayRef arguments() const { + return llvm::ArrayRef(arg_begin(), NumArgs); + } + + const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; } + const_arg_iterator arg_end() const { return getArgsBuffer() + 1 + NumArgs; } + arg_iterator arg_begin() { return getArgsBuffer() + 1; } + arg_iterator arg_end() { return getArgsBuffer() + 1 + NumArgs; } + + unsigned arg_size() const { return NumArgs; } + + llvm::ArrayRef getExtParameterInfos() const { + if (!HasExtParameterInfos) + return {}; + return llvm::makeArrayRef(getExtParameterInfosBuffer(), NumArgs); + } + + /// getCallingConvention - REturn the user specified calling convention, which + /// has been translated into a CIR CC. + unsigned getCallingConvention() const { return CallingConvention; } + + clang::CanQualType getReturnType() const { return getArgsBuffer()[0].type; } + + ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; } + const ABIArgInfo &getReturnInfo() const { return getArgsBuffer()[0].info; } + + bool isChainCall() const { return ChainCall; } + + bool isVariadic() const { return Required.allowsOptionalArgs(); } + RequiredArgs getRequiredArgs() const { return Required; } + unsigned getNumRequiredArgs() const { + assert(!isVariadic() && "Variadic NYI"); + return isVariadic() ? getRequiredArgs().getNumRequiredArgs() : arg_size(); + } + + mlir::cir::StructType *getArgStruct() const { return ArgStruct; } + + /// Return true if this function uses inalloca arguments. + bool usesInAlloca() const { return ArgStruct; } +}; + } // namespace cir #endif From 77421448bf5ccc93776b520f14a46eeb697f3255 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 27 Feb 2022 08:11:53 -0500 Subject: [PATCH 0164/2301] [CIR] Add CIRGenCall.{h,cpp} which contains helper classes for call gen This is mostly boilerplate import from CodeGen --- clang/lib/CIR/CIRGenCall.cpp | 160 ++++++++++++++++++++++++ clang/lib/CIR/CIRGenCall.h | 194 +++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunctionInfo.h | 2 +- clang/lib/CIR/CMakeLists.txt | 1 + 4 files changed, 356 insertions(+), 1 deletion(-) create mode 100644 clang/lib/CIR/CIRGenCall.cpp create mode 100644 clang/lib/CIR/CIRGenCall.h diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp new file mode 100644 index 000000000000..3b4d3a05f6a3 --- /dev/null +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -0,0 +1,160 @@ +#include "CIRGenFunction.h" +#include "CIRGenFunctionInfo.h" +#include "CIRGenTypes.h" + +#include "clang/AST/GlobalDecl.h" + +#include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Types.h" + +using namespace cir; +using namespace clang; + +CIRGenFunctionInfo *CIRGenFunctionInfo::create( + unsigned cirCC, bool instanceMethod, bool chainCall, + const clang::FunctionType::ExtInfo &info, + llvm::ArrayRef paramInfos, clang::CanQualType resultType, + llvm::ArrayRef argTypes, RequiredArgs required) { + assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); + assert(!required.allowsOptionalArgs() || + required.getNumRequiredArgs() <= argTypes.size()); + + void *buffer = operator new(totalSizeToAlloc( + argTypes.size() + 1, paramInfos.size())); + + CIRGenFunctionInfo *FI = new (buffer) CIRGenFunctionInfo(); + FI->CallingConvention = cirCC; + FI->EffectiveCallingConvention = cirCC; + FI->ASTCallingConvention = info.getCC(); + FI->InstanceMethod = instanceMethod; + FI->ChainCall = chainCall; + FI->CmseNSCall = info.getCmseNSCall(); + FI->NoReturn = info.getNoReturn(); + FI->ReturnsRetained = info.getProducesResult(); + FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); + FI->NoCfCheck = info.getNoCfCheck(); + FI->Required = required; + FI->HasRegParm = info.getHasRegParm(); + FI->RegParm = info.getRegParm(); + FI->ArgStruct = nullptr; + FI->ArgStructAlign = 0; + FI->NumArgs = argTypes.size(); + FI->HasExtParameterInfos = !paramInfos.empty(); + FI->getArgsBuffer()[0].type = resultType; + for (unsigned i = 0; i < argTypes.size(); ++i) + FI->getArgsBuffer()[i + 1].type = argTypes[i]; + for (unsigned i = 0; i < paramInfos.size(); ++i) + FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; + + return FI; +} + +namespace { + +/// Encapsulates information about hte way function arguments from +/// CIRGenFunctionInfo should be passed to actual CIR function. +class ClangToCIRArgMapping { + static const unsigned InvalidIndex = ~0U; + unsigned InallocaArgNo; + unsigned SRetArgNo; + unsigned TotalCIRArgs; + + /// Arguments of CIR function corresponding to single Clang argument. + struct CIRArgs { + unsigned PaddingArgIndex; + // Argument is expanded to CIR arguments at positions + // [FirstArgIndex, FirstArgIndex + NumberOfArgs). + unsigned FirstArgIndex; + unsigned NumberOfArgs; + + CIRArgs() + : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), + NumberOfArgs(0) {} + }; + + SmallVector ArgInfo; + +public: + ClangToCIRArgMapping(const ASTContext &Context, const CIRGenFunctionInfo &FI, + bool OnlyRequiredArgs = false) + : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalCIRArgs(0), + ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { + construct(Context, FI, OnlyRequiredArgs); + } + + bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } + + bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } + + unsigned totalCIRArgs() const { return TotalCIRArgs; } + + bool hasPaddingArg(unsigned ArgNo) const { + assert(ArgNo < ArgInfo.size()); + return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; + } + + /// Returns index of first CIR argument corresponding to ArgNo, and their + /// quantity. + std::pair getCIRArgs(unsigned ArgNo) const { + assert(ArgNo < ArgInfo.size()); + return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, + ArgInfo[ArgNo].NumberOfArgs); + } + +private: + void construct(const ASTContext &Context, const CIRGenFunctionInfo &FI, + bool OnlyRequiredArgs); +}; + +void ClangToCIRArgMapping::construct(const ASTContext &Context, + const CIRGenFunctionInfo &FI, + bool OnlyRequiredArgs) { + unsigned CIRArgNo = 0; + bool SwapThisWithSRet = false; + const ABIArgInfo &RetAI = FI.getReturnInfo(); + + assert(RetAI.getKind() != ABIArgInfo::Indirect && "NYI"); + + unsigned ArgNo = 0; + unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); + for (CIRGenFunctionInfo::const_arg_iterator I = FI.arg_begin(); + ArgNo < NumArgs; ++I, ++ArgNo) { + assert(I != FI.arg_end()); + const ABIArgInfo &AI = I->info; + // Collect data about CIR arguments corresponding to Clang argument ArgNo. + auto &CIRArgs = ArgInfo[ArgNo]; + + assert(!AI.getPaddingType() && "NYI"); + + switch (AI.getKind()) { + default: + assert(false && "NYI"); + case ABIArgInfo::Direct: { + assert(!AI.getCoerceToType().dyn_cast() && "NYI"); + // FIXME: handle sseregparm someday... + // FIXME: handle structs + CIRArgs.NumberOfArgs = 1; + break; + } + } + + if (CIRArgs.NumberOfArgs > 0) { + CIRArgs.FirstArgIndex = CIRArgNo; + CIRArgNo += CIRArgs.NumberOfArgs; + } + + assert(!SwapThisWithSRet && "NYI"); + } + assert(ArgNo == ArgInfo.size()); + + assert(!FI.usesInAlloca() && "NYI"); + + TotalCIRArgs = CIRArgNo; +} + +} // namespace + diff --git a/clang/lib/CIR/CIRGenCall.h b/clang/lib/CIR/CIRGenCall.h new file mode 100644 index 000000000000..b73d25b05610 --- /dev/null +++ b/clang/lib/CIR/CIRGenCall.h @@ -0,0 +1,194 @@ +//===----- CIRGenCall.h - Encapsulate calling convention details ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes wrap the information about a call or function +// definition used to handle ABI compliancy. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CODEGEN_CIRGENCALL_H +#define LLVM_CLANG_LIB_CODEGEN_CIRGENCALL_H + +#include "CIRGenValue.h" + +#include "clang/AST/GlobalDecl.h" +#include "clang/AST/Type.h" + +#include "llvm/ADT/SmallVector.h" + +#include "mlir/IR/BuiltinOps.h" + +namespace cir { +class CIRGenFunction; + +/// Abstract information about a function or function prototype. +class CIRGenCalleeInfo { + const clang::FunctionProtoType *CalleeProtoTy; + clang::GlobalDecl CalleeDecl; + +public: + explicit CIRGenCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl() {} + CIRGenCalleeInfo(clang::GlobalDecl calleeDecl) + : CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {} + + const clang::FunctionProtoType *getCalleeFunctionProtoType() const { + return CalleeProtoTy; + } + const clang::GlobalDecl getCalleeDecl() const { return CalleeDecl; } +}; + +/// All available information about a concrete callee. +class CIRGenCallee { + enum class SpecialKind : uintptr_t { + Invalid, + Builtin, + PsuedoDestructor, + Virtual, + + Last = Virtual + }; + + struct BuiltinInfoStorage { + const clang::FunctionDecl *Decl; + unsigned ID; + }; + struct PseudoDestructorInfoStorage { + const clang::CXXPseudoDestructorExpr *Expr; + }; + struct VirtualInfoStorage { + const clang::CallExpr *CE; + clang::GlobalDecl MD; + Address Addr; + mlir::FunctionType FTy; + }; + + SpecialKind KindOrFunctionPointer; + + union { + CIRGenCalleeInfo AbstractInfo; + BuiltinInfoStorage BuiltinInfo; + PseudoDestructorInfoStorage PseudoDestructorInfo; + VirtualInfoStorage VirtualInfo; + }; + + explicit CIRGenCallee(SpecialKind kind) : KindOrFunctionPointer(kind) {} + +public: + CIRGenCallee() : KindOrFunctionPointer(SpecialKind::Invalid) {} + + // Construct a callee. Call this constructor directly when this isn't a direct + // call. + CIRGenCallee(const CIRGenCalleeInfo &abstractInfo, mlir::FuncOp functionPtr) + : KindOrFunctionPointer(SpecialKind( + reinterpret_cast(functionPtr.getAsOpaquePointer()))) { + AbstractInfo = abstractInfo; + assert(functionPtr && "configuring callee without function pointer"); + // TODO: codegen asserts functionPtr is a pointer + // TODO: codegen asserts functionPtr is either an opaque pointer type or a + // pointer to a function + } + + static CIRGenCallee + forDirect(mlir::FuncOp functionPtr, + const CIRGenCalleeInfo &abstractInfo = CIRGenCalleeInfo()) { + return CIRGenCallee(abstractInfo, functionPtr); + } + + bool isBuiltin() const { + return KindOrFunctionPointer == SpecialKind::Builtin; + } + + bool isPsuedoDestructor() const { + return KindOrFunctionPointer == SpecialKind::PsuedoDestructor; + } + + bool isOrdinary() const { + return uintptr_t(KindOrFunctionPointer) > uintptr_t(SpecialKind::Last); + } + + /// If this is a delayed callee computation of some sort, prepare a concrete + /// callee + CIRGenCallee prepareConcreteCallee(CIRGenFunction &CGF) const; + + mlir::FuncOp getFunctionPointer() const { + assert(isOrdinary()); + return mlir::FuncOp::getFromOpaquePointer( + reinterpret_cast(KindOrFunctionPointer)); + } + + CIRGenCalleeInfo getAbstractInfo() const { + assert(!isVirtual() && "Virtual NYI"); + assert(isOrdinary()); + return AbstractInfo; + } + + bool isVirtual() const { + return KindOrFunctionPointer == SpecialKind::Virtual; + } +}; + +struct CallArg { +private: + union { + RValue RV; + LValue LV; /// This argument is semantically a load from this l-value + }; + bool HasLV; + + /// A data-flow flag to make sure getRValue and/or copyInto are not + /// called twice for duplicated IR emission. + mutable bool IsUsed; + +public: + clang::QualType Ty; + CallArg(RValue rv, clang::QualType ty) + : RV(rv), HasLV(false), IsUsed(false), Ty(ty) { + (void)HasLV; + (void)IsUsed; + } +}; + +class CallArgList : public llvm::SmallVector { +public: + CallArgList() {} + + struct Writeback { + LValue Source; + }; + + void add(RValue rvalue, clang::QualType type) { + push_back(CallArg(rvalue, type)); + } +}; + +/// FunctionArgList - Type for representing both the decl and type of parameters +/// to a function. The decl must be either a ParmVarDecl or ImplicitParamDecl. +class FunctionArgList : public llvm::SmallVector {}; + +/// ReturnValueSlot - Contains the address where the return value of a function +/// can be stored, and whether the address is volatile or not. +class ReturnValueSlot { + Address Addr = Address::invalid(); + + // Return value slot flags + // unsigned IsVolatile : 1; + // unsigned IsUnused : 1; + // unsigned IsExternallyDestructed : 1; + +public: + // : + ReturnValueSlot() + // IsVolatile(false), + // IsUnused(false), + // IsExternallyDestructed(false) + {} +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CIRGenFunctionInfo.h b/clang/lib/CIR/CIRGenFunctionInfo.h index 84ab86fd4a35..be508a1ebfeb 100644 --- a/clang/lib/CIR/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CIRGenFunctionInfo.h @@ -383,7 +383,7 @@ class CIRGenFunctionInfo final llvm::ArrayRef getExtParameterInfos() const { if (!HasExtParameterInfos) return {}; - return llvm::makeArrayRef(getExtParameterInfosBuffer(), NumArgs); + return llvm::ArrayRef(getExtParameterInfosBuffer(), NumArgs); } /// getCallingConvention - REturn the user specified calling convention, which diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 78e08d267d92..fb0f816f68f0 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -11,6 +11,7 @@ include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR + CIRGenCall.cpp CIRGenerator.cpp CIRGenExprScalar.cpp CIRGenFunction.cpp From ce2f9fed3a5829776e8b6305b4e9f02b45cbd2c6 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 27 Feb 2022 07:51:58 -0500 Subject: [PATCH 0165/2301] [CIR] Add CIRGenCXXABI This type is a helper type for dealing with CXX ABI related issues. A later patch will add a ItaniumCXXABI subclass. --- clang/lib/CIR/CIRGenCXXABI.cpp | 29 ++++++++++ clang/lib/CIR/CIRGenCXXABI.h | 100 +++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenCall.h | 2 + clang/lib/CIR/CIRGenValue.h | 1 - clang/lib/CIR/CMakeLists.txt | 1 + 5 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 clang/lib/CIR/CIRGenCXXABI.cpp create mode 100644 clang/lib/CIR/CIRGenCXXABI.h diff --git a/clang/lib/CIR/CIRGenCXXABI.cpp b/clang/lib/CIR/CIRGenCXXABI.cpp new file mode 100644 index 000000000000..b4c5de488da2 --- /dev/null +++ b/clang/lib/CIR/CIRGenCXXABI.cpp @@ -0,0 +1,29 @@ +// TODO: ADD HEADER + +#include "CIRGenCXXABI.h" + +#include "clang/AST/GlobalDecl.h" +#include "clang/AST/Mangle.h" + +using namespace cir; +using namespace clang; + +CIRGenCXXABI::~CIRGenCXXABI() {} + +CIRGenCXXABI::AddedStructorArgCounts CIRGenCXXABI::addImplicitConstructorArgs( + CIRGenFunction &CGF, const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, bool Delegating, + CallArgList &Args) { + auto AddedArgs = + getImplicitConstructorArgs(CGF, D, Type, ForVirtualBase, Delegating); + for (size_t i = 0; i < AddedArgs.Prefix.size(); ++i) + Args.insert(Args.begin() + 1 + i, + CallArg(RValue::get(AddedArgs.Prefix[i].Value), + AddedArgs.Prefix[i].Type)); + for (const auto &arg : AddedArgs.Suffix) + Args.add(RValue::get(arg.Value), arg.Type); + return AddedStructorArgCounts(AddedArgs.Prefix.size(), + AddedArgs.Suffix.size()); +} + +bool CIRGenCXXABI::NeedsVTTParameter(GlobalDecl GD) { return false; } diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h new file mode 100644 index 000000000000..8bead2ac02e3 --- /dev/null +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -0,0 +1,100 @@ +//===----- CIRGenCXXABI.h - Interface to C++ ABIs ---------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides an abstract class for C++ code generation. Concrete subclasses +// of this implement code generation for specific C++ ABIs. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRGENCXXABI_H +#define LLVM_CLANG_LIB_CIR_CIRGENCXXABI_H + +#include "CIRGenCall.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" + +#include "clang/AST/Mangle.h" + +namespace cir { + +class CIRGenFunction; +class CIRGenFunctionInfo; + +/// Implements C++ ABI-specific code generation functions. +class CIRGenCXXABI { +protected: + cir::CIRGenModule &CGM; + std::unique_ptr MangleCtx; + + CIRGenCXXABI(CIRGenModule &CGM) + : CGM{CGM}, MangleCtx(CGM.getASTContext().createMangleContext()) {} + +public: + /// Similar to AddedStructorArgs, but only notes the number of additional + /// arguments. + struct AddedStructorArgCounts { + unsigned Prefix = 0; + unsigned Suffix = 0; + AddedStructorArgCounts() = default; + AddedStructorArgCounts(unsigned P, unsigned S) : Prefix(P), Suffix(S) {} + static AddedStructorArgCounts prefix(unsigned N) { return {N, 0}; } + static AddedStructorArgCounts suffix(unsigned N) { return {0, N}; } + }; + + /// Additional implicit arguments to add to the beginning (Prefix) and end + /// (Suffix) of a constructor / destructor arg list. + /// + /// Note that Prefix should actually be inserted *after* the first existing + /// arg; `this` arguments always come first. + struct AddedStructorArgs { + struct Arg { + mlir::Value Value; + clang::QualType Type; + }; + llvm::SmallVector Prefix; + llvm::SmallVector Suffix; + AddedStructorArgs() = default; + AddedStructorArgs(llvm::SmallVector P, llvm::SmallVector S) + : Prefix(std::move(P)), Suffix(std::move(S)) {} + static AddedStructorArgs prefix(llvm::SmallVector Args) { + return {std::move(Args), {}}; + } + static AddedStructorArgs suffix(llvm::SmallVector Args) { + return {{}, std::move(Args)}; + } + }; + + AddedStructorArgCounts + addImplicitConstructorArgs(CIRGenFunction &CGF, + const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, + bool Delegating, CallArgList &Args); + + virtual AddedStructorArgs getImplicitConstructorArgs( + CIRGenFunction &CGF, const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, bool Delegating) = 0; + + /// Return whether the given global decl needs a VTT parameter. + virtual bool NeedsVTTParameter(clang::GlobalDecl GD); + + /// If the C++ ABI requires the given type be returned in a particular way, + /// this method sets RetAI and returns true. + virtual bool classifyReturnType(CIRGenFunctionInfo &FI) const = 0; + + /// Gets the mangle context. + clang::MangleContext &getMangleContext() { return *MangleCtx; } + + virtual ~CIRGenCXXABI(); +}; + +/// Creates and Itanium-family ABI +CIRGenCXXABI *CreateItaniumCXXABI(CIRGenModule &CGM); + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CIRGenCall.h b/clang/lib/CIR/CIRGenCall.h index b73d25b05610..57257722e00e 100644 --- a/clang/lib/CIR/CIRGenCall.h +++ b/clang/lib/CIR/CIRGenCall.h @@ -21,6 +21,8 @@ #include "llvm/ADT/SmallVector.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinOps.h" namespace cir { diff --git a/clang/lib/CIR/CIRGenValue.h b/clang/lib/CIR/CIRGenValue.h index 61341f3a973b..b35420660823 100644 --- a/clang/lib/CIR/CIRGenValue.h +++ b/clang/lib/CIR/CIRGenValue.h @@ -15,7 +15,6 @@ #define LLVM_CLANG_LIB_CIR_CIRGENVALUE_H #include "Address.h" -#include "CIRGenFunction.h" #include "mlir/IR/Value.h" #include "clang/AST/CharUnits.h" diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index fb0f816f68f0..5da0f7be06d6 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -13,6 +13,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR CIRGenCall.cpp CIRGenerator.cpp + CIRGenCXXABI.cpp CIRGenExprScalar.cpp CIRGenFunction.cpp CIRGenModule.cpp From 2c01b015ba5e259efe03b202c5844b56d6570817 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 27 Feb 2022 08:20:29 -0500 Subject: [PATCH 0166/2301] [CIR][NFC] Clean up some unneeded includes and forward decls --- clang/lib/CIR/CIRGenTypes.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 261d20de0014..c081de6ede53 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -15,8 +15,6 @@ #include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/MLIRContext.h" -#include "clang/CodeGen/CGFunctionInfo.h" -#include "llvm/ADT/DenseMap.h" #include @@ -48,11 +46,6 @@ class Type; typedef CanQual CanQualType; class GlobalDecl; -namespace CodeGen { -class ABIInfo; -class CGCXXABI; -class RequiredArgs; -} // end namespace CodeGen } // end namespace clang namespace mlir { From ca7fa4b677a57153511e0443fed1ac6a53e264f6 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 11:57:27 -0500 Subject: [PATCH 0167/2301] [CIR] Import CallingConv.h from CodeGen --- clang/lib/CIR/CallingConv.h | 43 +++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 clang/lib/CIR/CallingConv.h diff --git a/clang/lib/CIR/CallingConv.h b/clang/lib/CIR/CallingConv.h new file mode 100644 index 000000000000..e6b41cdb550c --- /dev/null +++ b/clang/lib/CIR/CallingConv.h @@ -0,0 +1,43 @@ +//===- CallingConv.h - CIR Calling Conventions ------------*- C++ -------*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines CIR's set of calling conventions. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CIR_CALLINGCONV_H +#define CLANG_CIR_CALLINGCONV_H + +// TODO: This whole file needs translated to CIR + +namespace cir { + +/// CallingConv Namespace - This namespace contains an enum with a value for the +/// well-known calling conventions. +namespace CallingConv { + +/// LLVM IR allows to use arbitrary numbers as calling convention identifiers. +/// TODO: What should we do for this for CIR +using ID = unsigned; + +/// A set of enums which specify the assigned numeric values for known llvm +/// calling conventions. +/// LLVM Calling Convention Represetnation +enum { + /// C - The default llvm calling convention, compatible with C. This + /// convention is the only calling convention that supports varargs calls. As + /// with typical C calling conventions, the callee/caller have to tolerate + /// certain amounts of prototype mismatch. + C = 0, +}; + +} // namespace CallingConv + +} // namespace cir + +#endif From a1b440749271a84450396c2e55496f34061dfb16 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 12:05:27 -0500 Subject: [PATCH 0168/2301] [CIR] Import ABIInfo header from CodeGen --- clang/lib/CIR/ABIInfo.h | 43 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 clang/lib/CIR/ABIInfo.h diff --git a/clang/lib/CIR/ABIInfo.h b/clang/lib/CIR/ABIInfo.h new file mode 100644 index 000000000000..d77b126f36d6 --- /dev/null +++ b/clang/lib/CIR/ABIInfo.h @@ -0,0 +1,43 @@ +//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_ABIINFO_H +#define LLVM_CLANG_LIB_CIR_ABIINFO_H + +#include "clang/AST/Type.h" + +namespace cir { + +class CIRGenCXXABI; +class CIRGenFunctionInfo; +class CIRGenTypes; + +/// ABIInfo - Target specific hooks for defining how a type should be passed or +/// returned from functions. +class ABIInfo { + ABIInfo() = delete; + +public: + CIRGenTypes &CGT; + + ABIInfo(CIRGenTypes &cgt) : CGT{cgt} {} + + virtual ~ABIInfo(); + + CIRGenCXXABI &getCXXABI() const; + + virtual void computeInfo(CIRGenFunctionInfo &FI) const = 0; + + // Implement the Type::IsPromotableIntegerType for ABI specific needs. The + // only difference is that this consideres bit-precise integer types as well. + bool isPromotableIntegerTypeForABI(clang::QualType Ty) const; +}; + +} // namespace cir + +#endif From dc608dbae6119e8e960d76f1edc4091c5a3d2898 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 12:09:43 -0500 Subject: [PATCH 0169/2301] [CIR] Import some of ItaniumCXXABI from CodeGen --- clang/lib/CIR/CMakeLists.txt | 1 + clang/lib/CIR/ItaniumCXXABI.cpp | 93 +++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 clang/lib/CIR/ItaniumCXXABI.cpp diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 5da0f7be06d6..38ddc4f3ec22 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -19,6 +19,7 @@ add_clang_library(clangCIR CIRGenModule.cpp CIRGenTypes.cpp CIRRecordLayoutBuilder.cpp + ItaniumCXXABI.cpp LowerToLLVM.cpp DEPENDS diff --git a/clang/lib/CIR/ItaniumCXXABI.cpp b/clang/lib/CIR/ItaniumCXXABI.cpp new file mode 100644 index 000000000000..90718e195860 --- /dev/null +++ b/clang/lib/CIR/ItaniumCXXABI.cpp @@ -0,0 +1,93 @@ +//===------- ItaniumCXXABI.cpp - Emit CIR from ASTs for a Module ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides C++ code generation targeting the Itanium C++ ABI. The class +// in this file generates structures that follow the Itanium C++ ABI, which is +// documented at: +// https://itanium-cxx-abi.github.io/cxx-abi/abi.html +// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html +// +// It also supports the closely-related ARM ABI, documented at: +// https://developer.arm.com/documentation/ihi0041/g/ +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" +#include "CIRGenFunctionInfo.h" + +#include "clang/AST/GlobalDecl.h" +#include "clang/Basic/TargetInfo.h" + +using namespace cir; +using namespace clang; + +namespace { +class ItaniumCXXABI : public cir::CIRGenCXXABI { +protected: + bool UseARMMethodPtrABI; + bool UseARMGuardVarABI; + bool Use32BitVTableOffsetABI; + +public: + ItaniumCXXABI(CIRGenModule &CGM, bool UseARMMethodPtrABI = false, + bool UseARMGuardVarABI = false) + : CIRGenCXXABI(CGM), UseARMMethodPtrABI{UseARMMethodPtrABI}, + UseARMGuardVarABI{UseARMGuardVarABI}, Use32BitVTableOffsetABI{false} { + assert(!UseARMMethodPtrABI && "NYI"); + assert(!UseARMGuardVarABI && "NYI"); + } + AddedStructorArgs getImplicitConstructorArgs(CIRGenFunction &CGF, + const CXXConstructorDecl *D, + CXXCtorType Type, + bool ForVirtualBase, + bool Delegating) override; + + bool NeedsVTTParameter(GlobalDecl GD) override; + + bool classifyReturnType(CIRGenFunctionInfo &FI) const override; +}; +} // namespace + +CIRGenCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs( + CIRGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, + bool ForVirtualBase, bool Delegating) { + assert(!NeedsVTTParameter(GlobalDecl(D, Type)) && "VTT NYI"); + + return {}; +} + +bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { + auto *MD = cast(GD.getDecl()); + + assert(!MD->getParent()->getNumVBases() && "virtual bases NYI"); + + assert(isa(MD) && GD.getCtorType() == Ctor_Base && + "No other reason we should hit this function yet."); + if (isa(MD) && GD.getCtorType() == Ctor_Base) + return true; + + assert(!isa(MD) && "Destructors NYI"); + + return false; +} + +CIRGenCXXABI *cir::CreateItaniumCXXABI(CIRGenModule &CGM) { + switch (CGM.getASTContext().getCXXABIKind()) { + case TargetCXXABI::GenericItanium: + return new ItaniumCXXABI(CGM); + + default: + llvm_unreachable("bad or NYI ABI kind"); + } +} + +bool ItaniumCXXABI::classifyReturnType(CIRGenFunctionInfo &FI) const { + auto *RD = FI.getReturnType()->getAsCXXRecordDecl(); + assert(!RD && "RecordDecl return types NYI"); + return false; +} From e674bb870cc8dece7798664583a52c60568fee68 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 12:13:10 -0500 Subject: [PATCH 0170/2301] [CIR] Add a CIRGenCXXABI member and getter to CIRGenModule --- clang/lib/CIR/CIRGenModule.cpp | 15 ++++++++++++++- clang/lib/CIR/CIRGenModule.h | 8 +++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 206e2d1ec949..78b8289da6a4 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -12,6 +12,7 @@ #include "CIRGenModule.h" +#include "CIRGenCXXABI.h" #include "CIRGenFunction.h" #include "CIRGenTypes.h" #include "CIRGenValue.h" @@ -71,13 +72,25 @@ using llvm::ScopedHashTableScope; using llvm::SmallVector; using llvm::StringRef; +static CIRGenCXXABI *createCXXABI(CIRGenModule &CGM) { + switch (CGM.getASTContext().getCXXABIKind()) { + case TargetCXXABI::GenericItanium: + return CreateItaniumCXXABI(CGM); + default: + llvm_unreachable("invalid C++ ABI kind"); + } +} + CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &CGO) : builder(&context), astCtx(astctx), langOpts(astctx.getLangOpts()), codeGenOpts(CGO), theModule{mlir::ModuleOp::create( builder.getUnknownLoc())}, - target(astCtx.getTargetInfo()), genTypes{*this} {} + target(astCtx.getTargetInfo()), + ABI(createCXXABI(*this)), genTypes{*this} {} + +CIRGenModule::~CIRGenModule() {} mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { const SourceManager &SM = astCtx.getSourceManager(); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index ee4f0bf4cf41..4644176f82c2 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -34,6 +34,8 @@ namespace cir { +class CIRGenCXXABI; + /// Implementation of a CIR/MLIR emission from Clang AST. /// /// This will emit operations that are specific to C(++)/ObjC(++) language, @@ -47,7 +49,7 @@ class CIRGenModule { CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &CGO); - ~CIRGenModule() = default; + ~CIRGenModule(); using SymTableTy = llvm::ScopedHashTable; using SymTableScopeTy = @@ -70,6 +72,8 @@ class CIRGenModule { mlir::ModuleOp theModule; const clang::TargetInfo ⌖ + + std::unique_ptr ABI; /// Per-module type mapping from clang AST to CIR. CIRGenTypes genTypes; @@ -209,6 +213,8 @@ class CIRGenModule { CIRGenTypes &getTypes() { return genTypes; } const clang::LangOptions &getLangOpts() const { return langOpts; } + CIRGenCXXABI &getCXXABI() const { return *ABI; } + /// Helpers to convert Clang's SourceLocation to a MLIR Location. mlir::Location getLoc(clang::SourceLocation SLoc); From 57730be8b5f91054f186c82bd2b25674902bf6c4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 12:13:34 -0500 Subject: [PATCH 0171/2301] [CIR] Add CIRGenCXXABI member and getter to CIRGenTypes --- clang/lib/CIR/CIRGenTypes.cpp | 3 ++- clang/lib/CIR/CIRGenTypes.h | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 575898664557..352dd0cfdcbd 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -15,7 +15,8 @@ using namespace clang; using namespace cir; CIRGenTypes::CIRGenTypes(CIRGenModule &cgm) - : Context(cgm.getASTContext()), Builder(cgm.getBuilder()), CGM{cgm} {} + : Context(cgm.getASTContext()), Builder(cgm.getBuilder()), CGM{cgm}, + TheCXXABI(cgm.getCXXABI()) {} CIRGenTypes::~CIRGenTypes() = default; std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index c081de6ede53..a42c663fb86a 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -57,6 +57,7 @@ class StructType; } // namespace mlir namespace cir { +class CIRGenCXXABI; class CIRGenModule; /// This class organizes the cross-module state that is used while lowering @@ -65,6 +66,7 @@ class CIRGenTypes { clang::ASTContext &Context; mlir::OpBuilder &Builder; CIRGenModule &CGM; + CIRGenCXXABI &TheCXXABI; llvm::DenseMap recordDeclTypes; @@ -80,6 +82,7 @@ class CIRGenTypes { clang::ASTContext &getContext() const { return Context; } mlir::MLIRContext &getMLIRContext() const; + CIRGenCXXABI &getCXXABI() const { return TheCXXABI; } /// ConvertType - Convert type T into a mlir::Type. mlir::Type ConvertType(clang::QualType T); From d859de293d0c4249151d04711de34a0e96cdb0ea Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 12:14:51 -0500 Subject: [PATCH 0172/2301] [CIR] Import TargetInfo.h header from CodeGen --- clang/lib/CIR/TargetInfo.h | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 clang/lib/CIR/TargetInfo.h diff --git a/clang/lib/CIR/TargetInfo.h b/clang/lib/CIR/TargetInfo.h new file mode 100644 index 000000000000..b679c978dfd6 --- /dev/null +++ b/clang/lib/CIR/TargetInfo.h @@ -0,0 +1,36 @@ +//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes wrap the information about a call or function +// definition used to handle ABI compliancy. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_TARGETINFO_H +#define LLVM_CLANG_LIB_CIR_TARGETINFO_H + +#include + +namespace cir { +class ABIInfo; + +/// TargetCIRGenInfo - This class organizes various target-specific +/// codegeneration issues, like target-specific attributes, builtins and so on. +class TargetCIRGenInfo { + std::unique_ptr Info = nullptr; + +public: + TargetCIRGenInfo(std::unique_ptr Info) : Info(std::move(Info)) {} + + /// getABIInfo() - Returns ABI info helper for the target. + const ABIInfo &getABIInfo() const { return *Info; } +}; + +} // namespace cir + +#endif From 194c05037b176aa4d6fc5fe56f129859e2861600 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 12:16:03 -0500 Subject: [PATCH 0173/2301] [CIR] Import TargetInfo.cpp from CodeGen --- clang/lib/CIR/CIRGenModule.cpp | 1 + clang/lib/CIR/CIRGenModule.h | 7 + clang/lib/CIR/CMakeLists.txt | 1 + clang/lib/CIR/TargetInfo.cpp | 313 +++++++++++++++++++++++++++++++++ clang/lib/CIR/TargetInfo.h | 3 +- 5 files changed, 324 insertions(+), 1 deletion(-) create mode 100644 clang/lib/CIR/TargetInfo.cpp diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 78b8289da6a4..cb56b4ffac63 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -16,6 +16,7 @@ #include "CIRGenFunction.h" #include "CIRGenTypes.h" #include "CIRGenValue.h" +#include "TargetInfo.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 4644176f82c2..1eb108088208 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -35,6 +35,7 @@ namespace cir { class CIRGenCXXABI; +class TargetCIRGenInfo; /// Implementation of a CIR/MLIR emission from Clang AST. /// @@ -74,6 +75,7 @@ class CIRGenModule { const clang::TargetInfo ⌖ std::unique_ptr ABI; + /// Per-module type mapping from clang AST to CIR. CIRGenTypes genTypes; @@ -88,6 +90,8 @@ class CIRGenModule { /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; + mutable std::unique_ptr TheTargetCIRGenInfo; + /// ------- /// Goto /// ------- @@ -215,6 +219,9 @@ class CIRGenModule { CIRGenCXXABI &getCXXABI() const { return *ABI; } + // TODO: this obviously overlaps with + const TargetCIRGenInfo &getTargetCIRGenInfo(); + /// Helpers to convert Clang's SourceLocation to a MLIR Location. mlir::Location getLoc(clang::SourceLocation SLoc); diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 38ddc4f3ec22..dee5b4e6115f 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -21,6 +21,7 @@ add_clang_library(clangCIR CIRRecordLayoutBuilder.cpp ItaniumCXXABI.cpp LowerToLLVM.cpp + TargetInfo.cpp DEPENDS MLIRCIROpsIncGen diff --git a/clang/lib/CIR/TargetInfo.cpp b/clang/lib/CIR/TargetInfo.cpp new file mode 100644 index 000000000000..c53fe2831347 --- /dev/null +++ b/clang/lib/CIR/TargetInfo.cpp @@ -0,0 +1,313 @@ +#include "TargetInfo.h" +#include "ABIInfo.h" +#include "CIRGenCXXABI.h" +#include "CIRGenFunctionInfo.h" +#include "CIRGenTypes.h" +#include "CallingConv.h" + +#include "clang/Basic/TargetInfo.h" + +using namespace cir; +using namespace clang; + +namespace { + +/// The AVX ABI leel for X86 targets. +enum class X86AVXABILevel { None, AVX, AVX512 }; + +class X86_64ABIInfo : public ABIInfo { + enum Class { + Integer = 0, + SSE, + SSEUp, + X87, + X87Up, + ComplexX87, + NoClass, + Memory + }; + + // X86AVXABILevel AVXLevel; + // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 64-bit + // hardware. + // bool Has64BitPointers; + +public: + X86_64ABIInfo(CIRGenTypes &CGT, X86AVXABILevel AVXLevel) + : ABIInfo(CGT) + // , AVXLevel(AVXLevel) + // , Has64BitPointers(CGT.getDataLayout().getPointeSize(0) == 8) + {} + + virtual void computeInfo(CIRGenFunctionInfo &FI) const override; + + /// classify - Determine the x86_64 register classes in which the given type T + /// should be passed. + /// + /// \param Lo - The classification for the parts of the type residing in the + /// low word of the containing object. + /// + /// \param Hi - The classification for the parts of the type residing in the + /// high word of the containing object. + /// + /// \param OffsetBase - The bit offset of this type in the containing object. + /// Some parameters are classified different depending on whether they + /// straddle an eightbyte boundary. + /// + /// \param isNamedArg - Whether the argument in question is a "named" + /// argument, as used in AMD64-ABI 3.5.7. + /// + /// If a word is unused its result will be NoClass; if a type should be passed + /// in Memory then at least the classification of \arg Lo will be Memory. + /// + /// The \arg Lo class will be NoClass iff the argument is ignored. + /// + /// If the \arg Lo class is ComplexX87, then the \arg Hi class will also be + /// ComplexX87. + void classify(clang::QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, + bool isNamedArg) const; + + ABIArgInfo classifyReturnType(QualType RetTy) const; + + ABIArgInfo classifyArgumentType(clang::QualType Ty, unsigned freeIntRegs, + unsigned &neededInt, unsigned &neededSSE, + bool isNamedArg) const; + + mlir::Type GetINTEGERTypeAtOffset(mlir::Type CIRType, unsigned CIROffset, + QualType SourceTy, + unsigned SourceOffset) const; + + /// getIndirectResult - Give a source type \arg Ty, return a suitable result + /// such that the argument will be passed in memory. + /// + /// \param freeIntRegs - The number of free integer registers remaining + /// available. + ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; +}; + +class X86_64TargetCIRGenInfo : public TargetCIRGenInfo { +public: + X86_64TargetCIRGenInfo(CIRGenTypes &CGT, X86AVXABILevel AVXLevel) + : TargetCIRGenInfo(std::make_unique(CGT, AVXLevel)) {} +}; +} // namespace + +static bool classifyReturnType(const CIRGenCXXABI &CXXABI, + CIRGenFunctionInfo &FI, const ABIInfo &Info) { + QualType Ty = FI.getReturnType(); + + assert(!Ty->getAs() && "RecordType returns NYI"); + + return CXXABI.classifyReturnType(FI); +} + +CIRGenCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); } + +ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, + unsigned freeIntRegs) const { + assert(false && "NYI"); +} + +void X86_64ABIInfo::computeInfo(CIRGenFunctionInfo &FI) const { + const unsigned CallingConv = FI.getCallingConvention(); + + assert(CallingConv == cir::CallingConv::C && "C is the only supported CC"); + + unsigned FreeIntRegs = 6; + unsigned FreeSSERegs = 8; + unsigned NeededInt, NeededSSE; + + assert(!::classifyReturnType(getCXXABI(), FI, *this) && "NYI"); + FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); + + assert(!FI.getReturnInfo().isIndirect() && "Indirect return NYI"); + + assert(!FI.isChainCall() && "Chain call NYI"); + + unsigned NumRequiredArgs = FI.getNumRequiredArgs(); + // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers get + // assigned (in left-to-right order) for passing as follows... + unsigned ArgNo = 0; + for (CIRGenFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it, ++ArgNo) { + bool IsNamedArg = ArgNo < NumRequiredArgs; + + assert(!it->type->isStructureOrClassType() && "NYI"); + + it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, NeededSSE, + IsNamedArg); + + // AMD64-ABI 3.2.3p3: If there are no registers available for any eightbyte + // of an argument, the whole argument is passed on the stack. If registers + // have already been assigned for some eightbytes of such an argument, the + // assignments get reverted. + if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { + FreeIntRegs -= NeededInt; + FreeSSERegs -= NeededSSE; + } else { + it->info = getIndirectResult(it->type, FreeIntRegs); + } + } +} + +/// Pass transparent unions as if they were the type of the first element. Sema +/// should ensure that all elements of the union have the same "machine type". +static QualType useFirstFieldIfTransparentUnion(QualType Ty) { + assert(!Ty->getAsUnionType() && "NYI"); + return Ty; +} + +/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in +/// an 8-byte GPR. This means that we either have a scalar or we are talking +/// about the high or low part of an up-to-16-byte struct. This routine picks +/// the best CIR type to represent this, which may be i64 or may be anything +/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, +/// etc). +/// +/// PrefType is a CIR type that corresponds to (part of) the IR type for the +/// source type. CIROffset is an offset in bytes into the CIR type taht the +/// 8-byte value references. PrefType may be null. +/// +/// SourceTy is the source-level type for the entire argument. SourceOffset is +/// an offset into this that we're processing (which is always either 0 or 8). +/// +mlir::Type X86_64ABIInfo::GetINTEGERTypeAtOffset(mlir::Type CIRType, + unsigned CIROffset, + QualType SourceTy, + unsigned SourceOffset) const { + assert(CIROffset == 0 && "NYI"); + assert(SourceOffset == 0 && "NYI"); + // TODO: this entire function. It's safe to now just to let the integer type + // be used as is since we aren't actually generating anything. + return CIRType; +} + +ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, + unsigned int freeIntRegs, + unsigned int &neededInt, + unsigned int &neededSSE, + bool isNamedArg) const { + Ty = useFirstFieldIfTransparentUnion(Ty); + + X86_64ABIInfo::Class Lo, Hi; + classify(Ty, 0, Lo, Hi, isNamedArg); + + // Check some invariants + // FIXME: Enforce these by construction. + assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + + neededInt = 0; + neededSSE = 0; + mlir::Type ResType = nullptr; + switch (Lo) { + default: + assert(false && "NYI"); + + // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next available + // register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 and %r9 is used. + case Integer: + ++neededInt; + + // Pick an 8-byte type based on the preferred type. + ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); + + // If we have a sign or zero extended integer, make sure to return Extend so + // that the parameter gets the right LLVM IR attributes. + if (Hi == NoClass && ResType.isa()) { + assert(!Ty->getAs() && "NYI"); + assert(!isPromotableIntegerTypeForABI(Ty) && "NYI"); + } + + break; + } + + mlir::Type HighPart = nullptr; + switch (Hi) { + default: + assert(false && "NYI"); + case NoClass: + break; + } + + assert(!HighPart && "NYI"); + + return ABIArgInfo::getDirect(ResType); +} + +ABIInfo::~ABIInfo() {} + +bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { + assert(false && "NYI"); + + assert(!Ty->getAs() && "NYI"); + + return false; +} + +void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo, + Class &Hi, bool isNamedArg) const { + Lo = Hi = NoClass; + Class &Current = OffsetBase < 64 ? Lo : Hi; + Current = Memory; + + auto *BT = Ty->getAs(); + assert(BT && "Only builtin types implemented."); + BuiltinType::Kind k = BT->getKind(); + if (k == BuiltinType::Void) + Current = NoClass; + else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { + Current = Integer; + } else { + assert(false && "Only void and Integer supported so far"); + } + return; +} + +ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { + // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the classification + // algorithm. + X86_64ABIInfo::Class Lo, Hi; + classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); + + // Check some invariants. + assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + + // mlir::Type ResType = nullptr; + assert(Lo == NoClass && "Only NoClass Supported so far"); + assert(Hi == NoClass && "Only NoClass Supported so far"); + + return ABIArgInfo::getIgnore(); +} + +const TargetCIRGenInfo &CIRGenModule::getTargetCIRGenInfo() { + if (TheTargetCIRGenInfo) + return *TheTargetCIRGenInfo; + + // Helper to set the unique_ptr while still keeping the return value. + auto SetCIRGenInfo = [&](TargetCIRGenInfo *P) -> const TargetCIRGenInfo & { + this->TheTargetCIRGenInfo.reset(P); + return *P; + }; + + const llvm::Triple &Triple = getTarget().getTriple(); + + switch (Triple.getArch()) { + default: + assert(false && "Target not yet supported!"); + case llvm::Triple::x86_64: { + StringRef ABI = getTarget().getABI(); + X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 + : ABI == "avx" ? X86AVXABILevel::AVX + : X86AVXABILevel::None); + + switch (Triple.getOS()) { + default: + assert(false && "OSType NYI"); + case llvm::Triple::Linux: + return SetCIRGenInfo(new X86_64TargetCIRGenInfo(genTypes, AVXLevel)); + } + } + } +} diff --git a/clang/lib/CIR/TargetInfo.h b/clang/lib/CIR/TargetInfo.h index b679c978dfd6..f437d55b622d 100644 --- a/clang/lib/CIR/TargetInfo.h +++ b/clang/lib/CIR/TargetInfo.h @@ -14,10 +14,11 @@ #ifndef LLVM_CLANG_LIB_CIR_TARGETINFO_H #define LLVM_CLANG_LIB_CIR_TARGETINFO_H +#include "ABIInfo.h" + #include namespace cir { -class ABIInfo; /// TargetCIRGenInfo - This class organizes various target-specific /// codegeneration issues, like target-specific attributes, builtins and so on. From 684bd93b2bfd6f3746af05eadbee7ee562c2d847 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 15:27:23 -0500 Subject: [PATCH 0174/2301] [CIR] Add member var and a getter for the ABIInfo to CIRGenTypes --- clang/lib/CIR/CIRGenModule.h | 4 ++-- clang/lib/CIR/CIRGenTypes.cpp | 4 +++- clang/lib/CIR/CIRGenTypes.h | 8 ++++++++ clang/lib/CIR/TargetInfo.h | 6 +++--- 4 files changed, 16 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 1eb108088208..8b88051529bd 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -57,6 +57,8 @@ class CIRGenModule { llvm::ScopedHashTableScope; private: + mutable std::unique_ptr TheTargetCIRGenInfo; + /// The builder is a helper class to create IR inside a function. The /// builder is stateful, in particular it keeps an "insertion point": this /// is where the next operations will be introduced. @@ -90,8 +92,6 @@ class CIRGenModule { /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; - mutable std::unique_ptr TheTargetCIRGenInfo; - /// ------- /// Goto /// ------- diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 352dd0cfdcbd..d420d81d6a54 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -1,5 +1,6 @@ #include "CIRGenTypes.h" #include "CIRGenModule.h" +#include "TargetInfo.h" #include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/Builders.h" @@ -16,7 +17,8 @@ using namespace cir; CIRGenTypes::CIRGenTypes(CIRGenModule &cgm) : Context(cgm.getASTContext()), Builder(cgm.getBuilder()), CGM{cgm}, - TheCXXABI(cgm.getCXXABI()) {} + TheCXXABI(cgm.getCXXABI()), + TheABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) {} CIRGenTypes::~CIRGenTypes() = default; std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index a42c663fb86a..b4d14dd1fad8 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H #define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H +#include "ABIInfo.h" #include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/MLIRContext.h" @@ -68,6 +69,11 @@ class CIRGenTypes { CIRGenModule &CGM; CIRGenCXXABI &TheCXXABI; + // This should not be moved earlier, since its initialization depends on some + // of the previous reference members being already initialized + const ABIInfo &TheABIInfo; + + /// Contains the CIR type for any converted RecordDecl llvm::DenseMap recordDeclTypes; public: @@ -82,7 +88,9 @@ class CIRGenTypes { clang::ASTContext &getContext() const { return Context; } mlir::MLIRContext &getMLIRContext() const; + const ABIInfo &getABIInfo() const { return TheABIInfo; } CIRGenCXXABI &getCXXABI() const { return TheCXXABI; } + /// ConvertType - Convert type T into a mlir::Type. mlir::Type ConvertType(clang::QualType T); diff --git a/clang/lib/CIR/TargetInfo.h b/clang/lib/CIR/TargetInfo.h index f437d55b622d..b4e47d5f9b20 100644 --- a/clang/lib/CIR/TargetInfo.h +++ b/clang/lib/CIR/TargetInfo.h @@ -20,15 +20,15 @@ namespace cir { -/// TargetCIRGenInfo - This class organizes various target-specific -/// codegeneration issues, like target-specific attributes, builtins and so on. +/// This class organizes various target-specific codegeneration issues, like +/// target-specific attributes, builtins and so on. class TargetCIRGenInfo { std::unique_ptr Info = nullptr; public: TargetCIRGenInfo(std::unique_ptr Info) : Info(std::move(Info)) {} - /// getABIInfo() - Returns ABI info helper for the target. + /// Returns ABI info helper for the target. const ABIInfo &getABIInfo() const { return *Info; } }; From d1b29bb7af6f71d85b41e05f7b931a8d4b27ecc8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 27 Feb 2022 08:41:43 -0500 Subject: [PATCH 0175/2301] [CIR] Add CIRGenTypes::arrangeCIRFunctionInfo This function builds a helper type CIRGenFunctionInfo for each function. This is mostly directly lifted from CodeGen. --- clang/lib/CIR/CIRGenTypes.cpp | 63 ++++++++++++++++++++++++++++++++++- clang/lib/CIR/CIRGenTypes.h | 26 +++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index d420d81d6a54..b16a16696714 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -1,5 +1,7 @@ -#include "CIRGenTypes.h" +#include "CIRGenFunctionInfo.h" #include "CIRGenModule.h" +#include "CIRGenTypes.h" +#include "CallingConv.h" #include "TargetInfo.h" #include "mlir/Dialect/CIR/IR/CIRTypes.h" @@ -15,6 +17,11 @@ using namespace clang; using namespace cir; +unsigned CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { + assert(CC == CC_C && "No other calling conventions implemented."); + return cir::CallingConv::C; +} + CIRGenTypes::CIRGenTypes(CIRGenModule &cgm) : Context(cgm.getASTContext()), Builder(cgm.getBuilder()), CGM{cgm}, TheCXXABI(cgm.getCXXABI()), @@ -413,3 +420,57 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { TypeCache[Ty] = ResultType; return ResultType; } + +const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( + CanQualType resultType, bool instanceMethod, bool chainCall, + llvm::ArrayRef argTypes, FunctionType::ExtInfo info, + llvm::ArrayRef paramInfos, + RequiredArgs required) { + assert(llvm::all_of(argTypes, + [](CanQualType T) { return T.isCanonicalAsParam(); })); + + // Lookup or create unique function info. + llvm::FoldingSetNodeID ID; + CIRGenFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, + required, resultType, argTypes); + + void *insertPos = nullptr; + CIRGenFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); + if (FI) + return *FI; + + unsigned CC = ClangCallConvToCIRCallConv(info.getCC()); + + // Construction the function info. We co-allocate the ArgInfos. + FI = CIRGenFunctionInfo::create(CC, instanceMethod, chainCall, info, + paramInfos, resultType, argTypes, required); + FunctionInfos.InsertNode(FI, insertPos); + + bool inserted = FunctionsBeingProcessed.insert(FI).second; + (void)inserted; + assert(inserted && "Recursively being processed?"); + + // Compute ABI inforamtion. + assert(info.getCC() != clang::CallingConv::CC_SpirFunction && "NYI"); + assert(info.getCC() != CC_Swift && info.getCC() != CC_SwiftAsync && + "Swift NYI"); + getABIInfo().computeInfo(*FI); + + // Loop over all of the computed argument and return value info. If any of + // them are direct or extend without a specified coerce type, specify the + // default now. + ABIArgInfo &retInfo = FI->getReturnInfo(); + if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) + retInfo.setCoerceToType(ConvertType(FI->getReturnType())); + + for (auto &I : FI->arguments()) + if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) + I.info.setCoerceToType(ConvertType(I.type)); + + bool erased = FunctionsBeingProcessed.erase(FI); + (void)erased; + assert(erased && "Not in set?"); + + return *FI; +} + diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index b4d14dd1fad8..85828484c8a1 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -14,6 +14,13 @@ #define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H #include "ABIInfo.h" +#include "CIRGenFunctionInfo.h" + +#include "clang/Basic/ABI.h" +#include "clang/AST/Type.h" + +#include "llvm/ADT/SmallPtrSet.h" + #include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/MLIRContext.h" @@ -60,6 +67,7 @@ class StructType; namespace cir { class CIRGenCXXABI; class CIRGenModule; +class CIRGenFunctionInfo; /// This class organizes the cross-module state that is used while lowering /// AST types to CIR types. @@ -76,10 +84,17 @@ class CIRGenTypes { /// Contains the CIR type for any converted RecordDecl llvm::DenseMap recordDeclTypes; + /// Hold memoized CIRGenFunctionInfo results + llvm::FoldingSet FunctionInfos; + + llvm::SmallPtrSet FunctionsBeingProcessed; public: CIRGenTypes(CIRGenModule &cgm); ~CIRGenTypes(); + /// Convert clang calling convention to LLVM calling convention. + unsigned ClangCallConvToCIRCallConv(clang::CallingConv CC); + /// This map keeps cache of llvm::Types and maps clang::Type to /// corresponding llvm::Type. using TypeCacheTy = llvm::DenseMap; @@ -107,6 +122,17 @@ class CIRGenTypes { /// memory representation is usually i8 or i32, depending on the target. // TODO: convert this comment to account for MLIR's equivalence mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); + /// "Arrange" the LLVM information for a call or type with the given + /// signature. This is largely an internal method; other clients should use + /// one of the above routines, which ultimatley defer to this. + /// + /// \param argTypes - must all actually be canonical as params + const CIRGenFunctionInfo &arrangeCIRFunctionInfo( + clang::CanQualType returnType, bool instanceMethod, bool chainCall, + llvm::ArrayRef argTypes, + clang::FunctionType::ExtInfo info, + llvm::ArrayRef paramInfos, + RequiredArgs args); }; } // namespace cir From adcdc17a159cb368dcbfa0603a0c66207674e7ae Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 16:36:27 -0500 Subject: [PATCH 0176/2301] [CIR] Add arrangeFreeFunctionType This does what it says, it generates a CIRGenFunctionInfo for a free function. --- clang/lib/CIR/CIRGenTypes.cpp | 42 +++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenTypes.h | 4 ++++ 2 files changed, 46 insertions(+) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index b16a16696714..4c40e77080d4 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -474,3 +474,45 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( return *FI; } +/// Adds the formal parameters in FPT to the given prefix. If any parameter in +/// FPT has pass_object_size_attrs, then we'll add parameters for those, too. +static void appendParameterTypes( + const CIRGenTypes &CGT, SmallVectorImpl &prefix, + SmallVectorImpl ¶mInfos, + CanQual FPT) { + // Fast path: don't touch param info if we don't need to. + if (!FPT->hasExtParameterInfos()) { + assert(paramInfos.empty() && + "We have paramInfos, but the prototype doesn't?"); + prefix.append(FPT->param_type_begin(), FPT->param_type_end()); + return; + } + + assert(false && "params NYI"); +} + +/// Arrange the CIR function layout for a value of the given function type, on +/// top of any implicit parameters already stored. +static const CIRGenFunctionInfo & +arrangeCIRFunctionInfo(CIRGenTypes &CGT, bool instanceMethod, + SmallVectorImpl &prefix, + CanQual FTP) { + SmallVector paramInfos; + RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); + // FIXME: Kill copy. -- from codegen + appendParameterTypes(CGT, prefix, paramInfos, FTP); + CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); + + return CGT.arrangeCIRFunctionInfo(resultType, instanceMethod, + /*chainCall=*/false, prefix, + FTP->getExtInfo(), paramInfos, Required); +} + +/// Arrange the argument and result information for a value of the given +/// freestanding function type. +const CIRGenFunctionInfo & +CIRGenTypes::arrangeFreeFunctionType(CanQual FTP) { + SmallVector argTypes; + return ::arrangeCIRFunctionInfo(*this, /*instanceMethod=*/false, argTypes, + FTP); +} diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 85828484c8a1..1b0beeacaa6c 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -122,6 +122,10 @@ class CIRGenTypes { /// memory representation is usually i8 or i32, depending on the target. // TODO: convert this comment to account for MLIR's equivalence mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); + + const CIRGenFunctionInfo & + arrangeFreeFunctionType(clang::CanQual Ty); + /// "Arrange" the LLVM information for a call or type with the given /// signature. This is largely an internal method; other clients should use /// one of the above routines, which ultimatley defer to this. From 92f2f87ec4798cb49d4dd0662c9295e7f0a55fc6 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 16:38:28 -0500 Subject: [PATCH 0177/2301] [CIR] Add CIRGenTypes::arrangeFunctionDeclaration This method mostly just wraps arrangeFreeFunctionType but also will dispatch to a helper if the function doesn't have a prototype. (Which includes anything declared as `f()` where there are no arguments in between the parens. Void arguments requries that saying `f(void)`) --- clang/lib/CIR/CIRGenTypes.cpp | 27 +++++++++++++++++++++++++-- clang/lib/CIR/CIRGenTypes.h | 5 +++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 4c40e77080d4..39aff7242f7a 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -1,6 +1,6 @@ +#include "CIRGenTypes.h" #include "CIRGenFunctionInfo.h" #include "CIRGenModule.h" -#include "CIRGenTypes.h" #include "CallingConv.h" #include "TargetInfo.h" @@ -139,7 +139,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::SveBoolx2: case BuiltinType::SveBoolx4: case BuiltinType::SveCount: - llvm_unreachable("NYI"); + llvm_unreachable("NYI"); case BuiltinType::Void: case BuiltinType::ObjCId: case BuiltinType::ObjCClass: @@ -474,6 +474,29 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( return *FI; } +/// Arrange the argument and result information for the declaration or +/// definition of the given function. +const CIRGenFunctionInfo & +CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { + assert(!dyn_cast(FD) && "NYI"); + + auto FTy = FD->getType()->getCanonicalTypeUnqualified(); + + assert(isa(FTy)); + // TODO: setCUDAKernelCallingConvention + + // When declaring a function without a prototype, always use a non-variadic + // type. + if (CanQual noProto = FTy.getAs()) { + return arrangeCIRFunctionInfo(noProto->getReturnType(), + /*instanceMethod=*/false, + /*chainCall=*/false, std::nullopt, + noProto->getExtInfo(), {}, RequiredArgs::All); + } + + return arrangeFreeFunctionType(FTy.castAs()); +} + /// Adds the formal parameters in FPT to the given prefix. If any parameter in /// FPT has pass_object_size_attrs, then we'll add parameters for those, too. static void appendParameterTypes( diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 1b0beeacaa6c..1365d10a1ee1 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -123,6 +123,11 @@ class CIRGenTypes { // TODO: convert this comment to account for MLIR's equivalence mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); + /// Free functions are functions that are compatible with an ordinary C + /// function pointer type. + const CIRGenFunctionInfo & + arrangeFunctionDeclaration(const clang::FunctionDecl *FD); + const CIRGenFunctionInfo & arrangeFreeFunctionType(clang::CanQual Ty); From 60ee1f0f3f87f63ad41af1e970d49c71c2e3d6fb Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 16:42:06 -0500 Subject: [PATCH 0178/2301] [CIR] Add arrangeGlobalDeclaration This currently does nothing and just dispatches to arrangeFunctionDeclaration, but it's included to enforce asserts on what we're cirgen'ing --- clang/lib/CIR/CIRGenTypes.cpp | 11 +++++++++++ clang/lib/CIR/CIRGenTypes.h | 3 +++ 2 files changed, 14 insertions(+) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 39aff7242f7a..442738e29631 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -474,6 +474,17 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( return *FI; } +const CIRGenFunctionInfo &CIRGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { + assert(!dyn_cast(GD.getDecl()) && + "This is reported as a FIXME in codegen"); + const auto *FD = cast(GD.getDecl()); + + assert(!isa(GD.getDecl()) && + !isa(GD.getDecl()) && "NYI"); + + return arrangeFunctionDeclaration(FD); +} + /// Arrange the argument and result information for the declaration or /// definition of the given function. const CIRGenFunctionInfo & diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 1365d10a1ee1..53159d2467bd 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -17,6 +17,7 @@ #include "CIRGenFunctionInfo.h" #include "clang/Basic/ABI.h" +#include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "llvm/ADT/SmallPtrSet.h" @@ -123,6 +124,8 @@ class CIRGenTypes { // TODO: convert this comment to account for MLIR's equivalence mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); + const CIRGenFunctionInfo &arrangeGlobalDeclaration(clang::GlobalDecl GD); + /// Free functions are functions that are compatible with an ordinary C /// function pointer type. const CIRGenFunctionInfo & From 1dcb37442ff86ee6a9e99e40a4ed5e4015fd408d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 16:05:43 -0500 Subject: [PATCH 0179/2301] [CIR] Add CIRGenTypes::GetFunctionType This will be used to replace the simplified function type getting in CIRGenModule::buildFunction. --- clang/lib/CIR/CIRGenCall.cpp | 60 ++++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenTypes.h | 5 +++ 2 files changed, 65 insertions(+) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 3b4d3a05f6a3..377c2577ded5 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -158,3 +158,63 @@ void ClangToCIRArgMapping::construct(const ASTContext &Context, } // namespace +mlir::FunctionType CIRGenTypes::GetFunctionType(clang::GlobalDecl GD) { + const CIRGenFunctionInfo &FI = arrangeGlobalDeclaration(GD); + return GetFunctionType(FI); +} + +mlir::FunctionType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { + bool Inserted = FunctionsBeingProcessed.insert(&FI).second; + (void)Inserted; + assert(Inserted && "Recursively being processed?"); + + mlir::Type resultType = nullptr; + const ABIArgInfo &retAI = FI.getReturnInfo(); + switch (retAI.getKind()) { + case ABIArgInfo::Ignore: + // TODO: where to get VoidTy? + resultType = nullptr; + break; + default: + assert(false && "NYI"); + } + + ClangToCIRArgMapping CIRFunctionArgs(getContext(), FI, true); + SmallVector ArgTypes(CIRFunctionArgs.totalCIRArgs()); + + assert(!CIRFunctionArgs.hasSRetArg() && "NYI"); + assert(!CIRFunctionArgs.hasInallocaArg() && "NYI"); + + // Add in all of the required arguments. + unsigned ArgNo = 0; + CIRGenFunctionInfo::const_arg_iterator it = FI.arg_begin(), + ie = it + FI.getNumRequiredArgs(); + + for (; it != ie; ++it, ++ArgNo) { + const auto &ArgInfo = it->info; + + assert(!CIRFunctionArgs.hasPaddingArg(ArgNo) && "NYI"); + + unsigned FirstCIRArg, NumCIRArgs; + std::tie(FirstCIRArg, NumCIRArgs) = CIRFunctionArgs.getCIRArgs(ArgNo); + + switch (ArgInfo.getKind()) { + default: + assert(false && "NYI"); + case ABIArgInfo::Direct: { + mlir::Type argType = ArgInfo.getCoerceToType(); + // TODO: handle the test against llvm::StructType from codegen + assert(NumCIRArgs == 1); + ArgTypes[FirstCIRArg] = argType; + break; + } + } + } + + bool Erased = FunctionsBeingProcessed.erase(&FI); + (void)Erased; + assert(Erased && "Not in set?"); + + return Builder.getFunctionType(ArgTypes, + resultType ? resultType : mlir::TypeRange()); +} diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 53159d2467bd..795357d28ac1 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -124,6 +124,11 @@ class CIRGenTypes { // TODO: convert this comment to account for MLIR's equivalence mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); + /// GetFunctionType - Get the LLVM function type for \arg Info. + mlir::FunctionType GetFunctionType(const CIRGenFunctionInfo &Info); + + mlir::FunctionType GetFunctionType(clang::GlobalDecl GD); + const CIRGenFunctionInfo &arrangeGlobalDeclaration(clang::GlobalDecl GD); /// Free functions are functions that are compatible with an ordinary C From 8a8bdf3273ec6a5b0539cd96918c62da01fa0298 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 16:55:52 -0500 Subject: [PATCH 0180/2301] [CIR][NFC] Add comment describing the "Arrange" family of functions --- clang/lib/CIR/CIRGenTypes.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 795357d28ac1..d789e7c00dd8 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -129,6 +129,25 @@ class CIRGenTypes { mlir::FunctionType GetFunctionType(clang::GlobalDecl GD); + // The arrangement methods are split into three families: + // - those meant to drive the signature and prologue/epilogue + // of a function declaration or definition, + // - those meant for the computation fo the CIR type for an abstract + // appearance of a function, and + // - those meant for performing the CIR-generation of a call. + // They differ mainly in how they deal with optional (i.e. variadic) + // arguments, as well as unprototyped functions. + // + // Key points: + // - The CIRGenFunctionInfo for emitting a specific call site must include + // entries for the optional arguments. + // - The function type used at the call site must reflect the formal signature + // of the declaration being called, or else the call will go away. + // - For the most part, unprototyped functions are called by casting to a + // formal signature inferred from the specific argument types used at the + // call-site. However, some targets (e.g. x86-64) screw with this for + // compatability reasons. + const CIRGenFunctionInfo &arrangeGlobalDeclaration(clang::GlobalDecl GD); /// Free functions are functions that are compatible with an ordinary C From 04a9e23478b21a34f9cac291b39fd0c6ce4c0e0e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 17:02:35 -0500 Subject: [PATCH 0181/2301] [CIR] Add a simple wrapper around getting a symbol from the module symtab This member func exists in clang::CodeGen for getting a symbol from the actuall llvm Module. MLIR doesn't support globalvalue lookup the same way and thus we are using this symbol table. So just wrap the symtab lookup for API equivalence for now. --- clang/lib/CIR/CIRGenModule.cpp | 3 +++ clang/lib/CIR/CIRGenModule.h | 2 ++ 2 files changed, 5 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index cb56b4ffac63..83e5eba6208e 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1658,3 +1658,6 @@ void CIRGenModule::verifyModule() { if (failed(mlir::verify(theModule))) theModule.emitError("module verification error"); } +mlir::Value CIRGenModule::GetGlobalValue(const Decl *D) { + return symbolTable.lookup(D); +} diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 8b88051529bd..a6c63f50c2b8 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -453,6 +453,8 @@ class CIRGenModule { bool MayBeEmittedEagerly(const clang::ValueDecl *D); void verifyModule(); + + mlir::Value GetGlobalValue(const clang::Decl *D); }; } // namespace cir From f3940f52e308e7808c64ae3bb968ec8551ddf570 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 8 Mar 2022 17:08:41 -0500 Subject: [PATCH 0182/2301] [CIR] Add a helper method to create the FuncOp for a given name/ty/decl This method takes the name and will look it up in the symbol table and return it if found. Otherwise it'll go about creating the actual FuncOp. --- clang/lib/CIR/CIRGenModule.cpp | 72 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenModule.h | 11 ++++++ 2 files changed, 83 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 83e5eba6208e..086f2e27949a 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1658,6 +1658,78 @@ void CIRGenModule::verifyModule() { if (failed(mlir::verify(theModule))) theModule.emitError("module verification error"); } + mlir::Value CIRGenModule::GetGlobalValue(const Decl *D) { return symbolTable.lookup(D); } + +/// GetOrCreateCIRFunction - If the specified mangled name is not in the module, +/// create and return a CIR Function with the specified type. If there is +/// something in the module with the specified name, return it potentially +/// bitcasted to the right type. +/// +/// If D is non-null, it specifies a decl that corresponded to this. This is +/// used to set the attributes on the function when it is first created. +mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( + StringRef MangledName, mlir::Type Ty, GlobalDecl GD, bool ForVTable, + bool DontDefer, bool IsThunk, ForDefinition_t IsForDefinition) { + assert(!ForVTable && "NYI"); + assert(!IsThunk && "NYI"); + + const auto *D = GD.getDecl(); + + // Any attempts to use a MultiVersion function should result in retrieving the + // iFunc instead. Name mangling will handle the rest of the changes. + auto const *FD = cast_or_null(D); + assert(FD && "Only FD supported so far"); + + if (getLangOpts().OpenMP) + llvm_unreachable("NYI"); + if (FD->isMultiVersion()) + llvm_unreachable("NYI"); + + mlir::Value Entry = GetGlobalValue(GD.getDecl()); + + if (Entry) + assert(false && "Code path NYI since we're not yet using this for " + "generating fucntion decls"); + + // This function doesn't have a complete type (for example, the return type is + // an incompmlete struct). Use a fake type instead, and make sure not to try + // to set attributes. + bool IsIncompleteFunction = false; + + mlir::FunctionType FTy; + if (Ty.isa()) { + FTy = Ty.cast(); + } else { + assert(false && "NYI"); + // FTy = mlir::FunctionType::get(VoidTy, false); + IsIncompleteFunction = true; + } + + auto fnLoc = getLoc(FD->getSourceRange()); + // TODO: CodeGen includeds the linkage (ExternalLinkage) and only passes the + // mangledname if Entry is nullptr + mlir::FuncOp F = mlir::FuncOp::create(fnLoc, MangledName, FTy); + + assert(!Entry && "NYI"); + + // TODO: This might not be valid, seems the uniqueing system doesn't make + // sense for MLIR + // assert(F->getName().getStringRef() == MangledName && "name was uniqued!"); + + // TODO: set function attributes from the declaration + // TODO: set function attributes from the missing attributes param + + // TODO: Handle extra attributes + + assert(!DontDefer && "Only not DontDefer supported so far"); + + if (!IsIncompleteFunction) { + assert(F.getFunctionType() == Ty); + return F; + } + + assert(false && "Incompmlete functions NYI"); +} diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index a6c63f50c2b8..c50e3fddc624 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -37,6 +37,8 @@ namespace cir { class CIRGenCXXABI; class TargetCIRGenInfo; +enum ForDefinition_t : bool { NotForDefinition = false, ForDefinition = true }; + /// Implementation of a CIR/MLIR emission from Clang AST. /// /// This will emit operations that are specific to C(++)/ObjC(++) language, @@ -455,6 +457,15 @@ class CIRGenModule { void verifyModule(); mlir::Value GetGlobalValue(const clang::Decl *D); + +private: + // TODO: CodeGen also passes an AttributeList here. We'll have to match that + // in CIR + mlir::FuncOp + GetOrCreateCIRFunction(llvm::StringRef MangledName, mlir::Type Ty, + clang::GlobalDecl D, bool ForVTable, + bool DontDefer = false, bool IsThunk = false, + ForDefinition_t IsForDefinition = NotForDefinition); }; } // namespace cir From 1150e31ad01cd8087e0142ff04c239cb789bee8a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 16:48:34 -0500 Subject: [PATCH 0183/2301] [CIR] Add a helper for getting mangled names in CIRGenModule --- clang/lib/CIR/CIRGenModule.cpp | 49 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenModule.h | 6 +++++ 2 files changed, 55 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 086f2e27949a..0b9adeac03a4 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1663,6 +1663,55 @@ mlir::Value CIRGenModule::GetGlobalValue(const Decl *D) { return symbolTable.lookup(D); } +static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, + const NamedDecl *ND, + bool OmitMultiVersionMangling = false) { + assert(!OmitMultiVersionMangling && "NYI"); + + SmallString<256> Buffer; + + llvm::raw_svector_ostream Out(Buffer); + MangleContext &MC = CGM.getCXXABI().getMangleContext(); + + // TODO: support the module name hash + auto ShouldMangle = MC.shouldMangleDeclName(ND); + assert(!ShouldMangle && "Mangling not actually implemented yet."); + + auto *II = ND->getIdentifier(); + assert(II && "Attempt to mangle unnamed decl."); + + const auto *FD = dyn_cast(ND); + assert(FD && "Only FunctionDecl supported"); + assert(FD->getType()->castAs()->getCallConv() != + CC_X86RegCall && + "NYI"); + assert(!FD->hasAttr() && "NYI"); + + Out << II->getName(); + + assert(!ShouldMangle && "Mangling not actually implemented yet."); + + if (const auto *FD = dyn_cast(ND)) { + assert(!FD->isMultiVersion() && "NYI"); + } + assert(!CGM.getLangOpts().GPURelocatableDeviceCode && "NYI"); + + return std::string(Out.str()); +} + +StringRef CIRGenModule::getMangledName(GlobalDecl GD) { + auto CanonicalGD = GD.getCanonicalDecl(); + assert(!dyn_cast(CanonicalGD.getDecl()) && "NYI"); + assert(!langOpts.CUDAIsDevice && "NYI"); + + // Keep the first result in the case of a mangling collision. + const auto *ND = cast(GD.getDecl()); + std::string MangledName = getMangledNameImpl(*this, GD, ND); + + auto Result = Manglings.insert(std::make_pair(MangledName, GD)); + return MangledDeclNames[CanonicalGD] = Result.first->first(); +} + /// GetOrCreateCIRFunction - If the specified mangled name is not in the module, /// create and return a CIR Function with the specified type. If there is /// something in the module with the specified name, return it potentially diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index c50e3fddc624..6ef26e0ae0b3 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -456,6 +456,8 @@ class CIRGenModule { void verifyModule(); + llvm::StringRef getMangledName(clang::GlobalDecl GD); + mlir::Value GetGlobalValue(const clang::Decl *D); private: @@ -466,6 +468,10 @@ class CIRGenModule { clang::GlobalDecl D, bool ForVTable, bool DontDefer = false, bool IsThunk = false, ForDefinition_t IsForDefinition = NotForDefinition); + + // An ordered map of canonical GlobalDecls to their mangled names. + llvm::MapVector MangledDeclNames; + llvm::StringMap Manglings; }; } // namespace cir From 67e04df9caf1d19ec5b52d7a1795104968da0f50 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 16:49:19 -0500 Subject: [PATCH 0184/2301] [CIR] Add CIRGenModule::GetAddrOfFunction This takes in a decl and it's type (with some optional other paramaters currently unused) and returns the corresponding mlir::FuncOp. --- clang/lib/CIR/CIRGenModule.cpp | 26 ++++++++++++++++++++++++++ clang/lib/CIR/CIRGenModule.h | 8 ++++++++ 2 files changed, 34 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 0b9adeac03a4..060f0d0465d9 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1663,6 +1663,31 @@ mlir::Value CIRGenModule::GetGlobalValue(const Decl *D) { return symbolTable.lookup(D); } +mlir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, + mlir::Type Ty, bool ForVTable, + bool DontDefer, + ForDefinition_t IsForDefinition) { + assert(!ForVTable && "NYI"); + assert(!DontDefer && "NYI"); + + assert(!cast(GD.getDecl())->isConsteval() && + "consteval function should never be emitted"); + + assert(!Ty && "No code paths implemented that have this set yet"); + const auto *FD = cast(GD.getDecl()); + Ty = getTypes().ConvertType(FD->getType()); + + assert(!dyn_cast(GD.getDecl()) && "NYI"); + + StringRef MangledName = getMangledName(GD); + auto F = GetOrCreateCIRFunction(MangledName, Ty, GD, ForVTable, DontDefer, + /*IsThunk=*/false, IsForDefinition); + + assert(!langOpts.CUDA && "NYI"); + + return F; +} + static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, const NamedDecl *ND, bool OmitMultiVersionMangling = false) { @@ -1782,3 +1807,4 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( assert(false && "Incompmlete functions NYI"); } + diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 6ef26e0ae0b3..d6bd661bae49 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -456,6 +456,14 @@ class CIRGenModule { void verifyModule(); + /// Return the address of the given function. If Ty is non-null, then this + /// function will use the specified type if it has to create it. + // TODO: this is a bit weird as `GetAddr` given we give back a FuncOp? + mlir::FuncOp + GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty = nullptr, + bool ForVTable = false, bool Dontdefer = false, + ForDefinition_t IsForDefinition = NotForDefinition); + llvm::StringRef getMangledName(clang::GlobalDecl GD); mlir::Value GetGlobalValue(const clang::Decl *D); From c13812fac56fed351fbd770c3cc7f66089651261 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 17:09:30 -0500 Subject: [PATCH 0185/2301] [CIR][NFC] Reorder a function to match clang's ordering --- clang/lib/CIR/CIRGenModule.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 060f0d0465d9..c61a8f96ec6b 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1659,10 +1659,6 @@ void CIRGenModule::verifyModule() { theModule.emitError("module verification error"); } -mlir::Value CIRGenModule::GetGlobalValue(const Decl *D) { - return symbolTable.lookup(D); -} - mlir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, bool ForVTable, bool DontDefer, @@ -1808,3 +1804,6 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( assert(false && "Incompmlete functions NYI"); } +mlir::Value CIRGenModule::GetGlobalValue(const Decl *D) { + return symbolTable.lookup(D); +} From bded1f5a277a3f5f5d3348d6c444588c9ec4b221 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 17:12:30 -0500 Subject: [PATCH 0186/2301] [CIR] Delete CIRGenTypes' FunctionInfos in the destructor --- clang/lib/CIR/CIRGenTypes.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 442738e29631..43b2f460e3c2 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -26,7 +26,12 @@ CIRGenTypes::CIRGenTypes(CIRGenModule &cgm) : Context(cgm.getASTContext()), Builder(cgm.getBuilder()), CGM{cgm}, TheCXXABI(cgm.getCXXABI()), TheABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) {} -CIRGenTypes::~CIRGenTypes() = default; +CIRGenTypes::~CIRGenTypes() { + for (llvm::FoldingSet::iterator I = FunctionInfos.begin(), + E = FunctionInfos.end(); + I != E;) + delete &*I++; +} std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, StringRef suffix) { From b1be5444f4ac186ec2b6f09c5ce374f065b575fc Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 17:21:49 -0500 Subject: [PATCH 0187/2301] [CIR] Add a simple wrapper that gets the "extra-canonicalized" return type --- clang/lib/CIR/CIRGenTypes.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 43b2f460e3c2..6c9089f74ca3 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -426,6 +426,14 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { return ResultType; } +/// Returns the "extra-canonicalized" return type, which discards qualifiers on +/// the return type. Codegen doesn't care about them, and it makes ABI code a +/// little easier to be able to assume that all parameter and return types are +/// top-level unqualified. +// static CanQualType GetReturnType(QualType RetTy) { +// return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); +// } + const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( CanQualType resultType, bool instanceMethod, bool chainCall, llvm::ArrayRef argTypes, FunctionType::ExtInfo info, From c90feebe85f069a4eff517d6952d6315fd7b5ed2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 17:20:39 -0500 Subject: [PATCH 0188/2301] [CIR] Add function to arrange a call to a free fn into a CIRGenFunctionInfo --- clang/lib/CIR/CIRGenTypes.cpp | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 6c9089f74ca3..96db89a29bfb 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -1,4 +1,5 @@ #include "CIRGenTypes.h" +#include "CIRGenCall.h" #include "CIRGenFunctionInfo.h" #include "CIRGenModule.h" #include "CallingConv.h" @@ -434,6 +435,37 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { // return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); // } +/// Arrange a call as unto a free function, except possibly with an additional +/// number of formal parameters considered required. +// static const CIRGenFunctionInfo & +// arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, +// const CallArgList &args, const FunctionType *fnType, +// unsigned numExtraRequiredArgs, bool chainCall) { +// assert(args.size() >= numExtraRequiredArgs); +// assert(!chainCall && "Chain call NYI"); + +// llvm::SmallVector paramInfos; + +// // In most cases, there are no optional arguments. +// RequiredArgs required = RequiredArgs::All; + +// // if we have a variadic prototype, the required arguments are the extra +// // prefix plus the arguments in the prototype. +// auto *proto = dyn_cast(fnType); +// assert(proto && "Only FunctionProtoType supported so far"); +// assert(dyn_cast(fnType) && +// "Only FunctionProtoType supported so far"); +// assert(!proto->isVariadic() && "Variadic NYI"); +// assert(!proto->hasExtParameterInfos() && "extparameterinfos NYI"); + +// // FIXME: Kill copy. +// SmallVector argTypes; +// assert(args.size() == 0 && "Args NYI"); +// return CGT.arrangeCIRFunctionInfo( +// GetReturnType(fnType->getReturnType()), /*instanceMethod=*/false, +// chainCall, argTypes, fnType->getExtInfo(), paramInfos, required); +// } + const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( CanQualType resultType, bool instanceMethod, bool chainCall, llvm::ArrayRef argTypes, FunctionType::ExtInfo info, From 2cfa76208f4867793e47e866b8c1f6da2deac2c3 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 17:16:59 -0500 Subject: [PATCH 0189/2301] [CIR] Add CIRGenTypes::arrangeFreeFunctionCall This just wraps arrangeFreeFunctionLikeCall atm, but the CodeGen version behaves different depending on whether or not ChainCall is different. So preserve it just for the assert. --- clang/lib/CIR/CIRGenTypes.cpp | 72 ++++++++++++++++++++--------------- clang/lib/CIR/CIRGenTypes.h | 8 +++- 2 files changed, 48 insertions(+), 32 deletions(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 96db89a29bfb..0d49b593159f 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -431,40 +431,40 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { /// the return type. Codegen doesn't care about them, and it makes ABI code a /// little easier to be able to assume that all parameter and return types are /// top-level unqualified. -// static CanQualType GetReturnType(QualType RetTy) { -// return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); -// } +static CanQualType GetReturnType(QualType RetTy) { + return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); +} /// Arrange a call as unto a free function, except possibly with an additional /// number of formal parameters considered required. -// static const CIRGenFunctionInfo & -// arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, -// const CallArgList &args, const FunctionType *fnType, -// unsigned numExtraRequiredArgs, bool chainCall) { -// assert(args.size() >= numExtraRequiredArgs); -// assert(!chainCall && "Chain call NYI"); - -// llvm::SmallVector paramInfos; - -// // In most cases, there are no optional arguments. -// RequiredArgs required = RequiredArgs::All; - -// // if we have a variadic prototype, the required arguments are the extra -// // prefix plus the arguments in the prototype. -// auto *proto = dyn_cast(fnType); -// assert(proto && "Only FunctionProtoType supported so far"); -// assert(dyn_cast(fnType) && -// "Only FunctionProtoType supported so far"); -// assert(!proto->isVariadic() && "Variadic NYI"); -// assert(!proto->hasExtParameterInfos() && "extparameterinfos NYI"); - -// // FIXME: Kill copy. -// SmallVector argTypes; -// assert(args.size() == 0 && "Args NYI"); -// return CGT.arrangeCIRFunctionInfo( -// GetReturnType(fnType->getReturnType()), /*instanceMethod=*/false, -// chainCall, argTypes, fnType->getExtInfo(), paramInfos, required); -// } +static const CIRGenFunctionInfo & +arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, + const CallArgList &args, const FunctionType *fnType, + unsigned numExtraRequiredArgs, bool chainCall) { + assert(args.size() >= numExtraRequiredArgs); + assert(!chainCall && "Chain call NYI"); + + llvm::SmallVector paramInfos; + + // In most cases, there are no optional arguments. + RequiredArgs required = RequiredArgs::All; + + // if we have a variadic prototype, the required arguments are the extra + // prefix plus the arguments in the prototype. + auto *proto = dyn_cast(fnType); + assert(proto && "Only FunctionProtoType supported so far"); + assert(dyn_cast(fnType) && + "Only FunctionProtoType supported so far"); + assert(!proto->isVariadic() && "Variadic NYI"); + assert(!proto->hasExtParameterInfos() && "extparameterinfos NYI"); + + // FIXME: Kill copy. + SmallVector argTypes; + assert(args.size() == 0 && "Args NYI"); + return CGT.arrangeCIRFunctionInfo( + GetReturnType(fnType->getReturnType()), /*instanceMethod=*/false, + chainCall, argTypes, fnType->getExtInfo(), paramInfos, required); +} const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( CanQualType resultType, bool instanceMethod, bool chainCall, @@ -595,3 +595,13 @@ CIRGenTypes::arrangeFreeFunctionType(CanQual FTP) { return ::arrangeCIRFunctionInfo(*this, /*instanceMethod=*/false, argTypes, FTP); } + +/// Figure out the rules for calling a function with the given formal type using +/// the given arguments. The arguments are necessary because the function might +/// be unprototyped, in which case it's target-dependent in crazy ways. +const CIRGenFunctionInfo &CIRGenTypes::arrangeFreeFunctionCall( + const CallArgList &args, const FunctionType *fnType, bool ChainCall) { + assert(!ChainCall && "ChainCall NYI"); + return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, + ChainCall ? 1 : 0, ChainCall); +} diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index d789e7c00dd8..7db6b161a7e5 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -16,9 +16,9 @@ #include "ABIInfo.h" #include "CIRGenFunctionInfo.h" -#include "clang/Basic/ABI.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" +#include "clang/Basic/ABI.h" #include "llvm/ADT/SmallPtrSet.h" @@ -66,6 +66,7 @@ class StructType; } // namespace mlir namespace cir { +class CallArgList; class CIRGenCXXABI; class CIRGenModule; class CIRGenFunctionInfo; @@ -89,6 +90,7 @@ class CIRGenTypes { llvm::FoldingSet FunctionInfos; llvm::SmallPtrSet FunctionsBeingProcessed; + public: CIRGenTypes(CIRGenModule &cgm); ~CIRGenTypes(); @@ -155,6 +157,10 @@ class CIRGenTypes { const CIRGenFunctionInfo & arrangeFunctionDeclaration(const clang::FunctionDecl *FD); + const CIRGenFunctionInfo & + arrangeFreeFunctionCall(const CallArgList &Args, + const clang::FunctionType *Ty, bool ChainCall); + const CIRGenFunctionInfo & arrangeFreeFunctionType(clang::CanQual Ty); From 0985031a45ae89ab5554eb536c31ad3a76f15d24 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 17:33:18 -0500 Subject: [PATCH 0190/2301] [CIR] Support converting fns in CIRGenTypes::ConvertType This patch adds a few functions to support converting AST types to MLIR types for functions --- clang/lib/CIR/CIRGenTypes.cpp | 74 ++++++++++++++++++++++++++++++++++- clang/lib/CIR/CIRGenTypes.h | 20 ++++++++++ 2 files changed, 93 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 0d49b593159f..e7d21eebd250 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -106,6 +106,78 @@ mlir::MLIRContext &CIRGenTypes::getMLIRContext() const { return *Builder.getContext(); } +mlir::Type CIRGenTypes::ConvertFunctionTypeInternal(QualType QFT) { + assert(QFT.isCanonical()); + const Type *Ty = QFT.getTypePtr(); + const FunctionType *FT = cast(QFT.getTypePtr()); + // First, check whether we can build the full fucntion type. If the function + // type depends on an incomplete type (e.g. a struct or enum), we cannot lower + // the function type. + assert(isFuncTypeConvertible(FT) && "NYI"); + + // While we're converting the parameter types for a function, we don't want to + // recursively convert any pointed-to structs. Converting directly-used + // structs is ok though. + assert(RecordsBeingLaidOut.insert(Ty).second && "NYI"); + + // The function type can be built; call the appropriate routines to build it + const CIRGenFunctionInfo *FI; + const auto *FPT = dyn_cast(FT); + assert(FPT && "FunctionNonPrototype NIY"); + FI = &arrangeFreeFunctionType( + CanQual::CreateUnsafe(QualType(FPT, 0))); + + mlir::Type ResultType = nullptr; + // If there is something higher level prodding our CIRGenFunctionInfo, then + // don't recurse into it again. + assert(!FunctionsBeingProcessed.count(FI) && "NYI"); + + // Otherwise, we're good to go, go ahead and convert it. + ResultType = GetFunctionType(*FI); + + RecordsBeingLaidOut.erase(Ty); + + assert(!SkippedLayout && "Shouldn't have skipped anything yet"); + + assert(RecordsBeingLaidOut.empty() && "Deferral NYI"); + assert(DeferredRecords.empty() && "Deferral NYI"); + + return ResultType; +} + +/// isFuncParamTypeConvertible - Return true if the specified type in a function +/// parameter or result position can be converted to a CIR type at this point. +/// This boils down to being whether it is complete, as well as whether we've +/// temporarily deferred expanding the type because we're in a recursive +/// context. +bool CIRGenTypes::isFuncParamTypeConvertible(clang::QualType Ty) { + // Some ABIs cannot have their member pointers represented in LLVM IR unless + // certain circumstances have been reached. + assert(!Ty->getAs() && "NYI"); + + // If this isn't a tagged type, we can convert it! + auto *TT = Ty->getAs(); + assert(!TT && "Only non-TagTypes implemented atm."); + return true; +} + +/// Code to verify a given function type is complete, i.e. the return type and +/// all of the parameter types are complete. Also check to see if we are in a +/// RS_StructPointer context, and if so whether any struct types have been +/// pended. If so, we don't want to ask the ABI lowering code to handle a type +/// that cannot be converted to a CIR type. +bool CIRGenTypes::isFuncTypeConvertible(const FunctionType *FT) { + if (!isFuncParamTypeConvertible(FT->getReturnType())) + return false; + + if (const auto *FPT = dyn_cast(FT)) + for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) + if (!isFuncParamTypeConvertible(FPT->getParamType(i))) + return false; + + return true; +} + /// ConvertType - Convert the specified type to its MLIR form. mlir::Type CIRGenTypes::ConvertType(QualType T) { T = Context.getCanonicalType(T); @@ -376,7 +448,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { } case Type::FunctionNoProto: case Type::FunctionProto: - assert(0 && "not implemented"); + ResultType = ConvertFunctionTypeInternal(T); break; case Type::ObjCObject: assert(0 && "not implemented"); diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 7db6b161a7e5..e007271f30b2 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -89,12 +89,32 @@ class CIRGenTypes { /// Hold memoized CIRGenFunctionInfo results llvm::FoldingSet FunctionInfos; + /// This set keeps track of records that we're currently converting to a CIR + /// type. For example, when converting: + /// struct A { struct B { int x; } } when processing 'x', the 'A' and 'B' + /// types will be in this set. + llvm::SmallPtrSet RecordsBeingLaidOut; + llvm::SmallPtrSet FunctionsBeingProcessed; + /// True if we didn't layout a function due to being inside a recursive struct + /// conversion, set this to true. + bool SkippedLayout; + + llvm::SmallVector DeferredRecords; + + /// Heper for ConvertType. + mlir::Type ConvertFunctionTypeInternal(clang::QualType FT); + public: CIRGenTypes(CIRGenModule &cgm); ~CIRGenTypes(); + /// isFuncTypeConvertible - Utility to check whether a function type can be + /// converted to a CIR type (i.e. doesn't depend on an incomplete tag type). + bool isFuncTypeConvertible(const clang::FunctionType *FT); + bool isFuncParamTypeConvertible(clang::QualType Ty); + /// Convert clang calling convention to LLVM calling convention. unsigned ClangCallConvToCIRCallConv(clang::CallingConv CC); From cf96cacd3220f45c1643d3955426ac8aeedc83fb Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 18:20:05 -0500 Subject: [PATCH 0191/2301] [CIR] Add a mostly stubbed out buildCallArgs method This is mostly just asserts at the moment to preserve behaviorial checks. But a later diff will include implementation supporting actual argument generation. --- clang/lib/CIR/CIRGenFunction.cpp | 70 ++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 57 ++++++++++++++++++++++++++ 2 files changed, 127 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index cf4328dbb2ed..78da5890269d 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -13,6 +13,8 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "clang/Basic/TargetInfo.h" + using namespace cir; using namespace clang; @@ -81,6 +83,74 @@ TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { llvm_unreachable("unknown type kind!"); } } + +static bool hasInAllocaArgs(CIRGenModule &CGM, CallingConv ExplicitCC, + ArrayRef ArgTypes) { + assert(ExplicitCC != CC_Swift && ExplicitCC != CC_SwiftAsync && "Swift NYI"); + assert(!CGM.getTarget().getCXXABI().isMicrosoft() && "MSABI NYI"); + + return false; +} + +void CIRGenFunction::buildCallArgs( + CallArgList &Args, PrototypeWrapper Prototype, + llvm::iterator_range ArgRange, + AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { + + llvm::SmallVector ArgTypes; + + assert((ParamsToSkip == 0 || Prototype.P) && + "Can't skip parameters if type info is not provided"); + + // This variable only captures *explicitly* written conventions, not those + // applied by default via command line flags or target defaults, such as + // thiscall, appcs, stdcall via -mrtd, etc. Computing that correctly would + // require knowing if this is a C++ instance method or being able to see + // unprotyped FunctionTypes. + CallingConv ExplicitCC = CC_C; + + // First, if a prototype was provided, use those argument types. + bool IsVariadic = false; + if (Prototype.P) { + const auto *MD = Prototype.P.dyn_cast(); + assert(!MD && "ObjCMethodDecl NYI"); + + const auto *FPT = Prototype.P.get(); + IsVariadic = FPT->isVariadic(); + assert(!IsVariadic && "Variadic functions NYI"); + ExplicitCC = FPT->getExtInfo().getCC(); + ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, + FPT->param_type_end()); + } + + // If we still have any arguments, emit them using the type of the argument. + for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) { + assert(!IsVariadic && "Variadic functions NYI"); + ArgTypes.push_back(A->getType()); + }; + assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); + + // We must evaluate arguments from right to left in the MS C++ ABI, because + // arguments are destroyed left to right in the callee. As a special case, + // there are certain language constructs taht require left-to-right + // evaluation, and in those cases we consider the evaluation order requirement + // to trump the "destruction order is reverse construction order" guarantee. + bool LeftToRight = true; + assert(!CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() && + "MSABI NYI"); + assert(!hasInAllocaArgs(CGM, ExplicitCC, ArgTypes) && "NYI"); + + // Evaluate each argument in the appropriate order. + size_t CallArgsStart = Args.size(); + assert(ArgTypes.size() == 0 && "Args NYI"); + + if (!LeftToRight) { + // Un-reverse the arguments we just evaluated so they match up with the CIR + // function. + std::reverse(Args.begin() + CallArgsStart, Args.end()); + } +} + /// Emit code to compute the specified expression which /// can have any type. The result is returned as an RValue struct. /// TODO: if this is an aggregate expression, add a AggValueSlot to indicate diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index a38777061e4c..29ae137176b4 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -13,10 +13,15 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H #define LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H +#include "CIRGenCall.h" #include "CIRGenValue.h" + #include "mlir/IR/Value.h" +#include "clang/AST/DeclObjC.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/Type.h" +#include "clang/Basic/ABI.h" +#include "clang/Basic/TargetInfo.h" namespace clang { class Expr; @@ -31,6 +36,15 @@ enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; class CIRGenFunction { public: + enum class EvaluationOrder { + ///! No langauge constraints on evaluation order. + Default, + ///! Language semantics requrie left-to-right evaluation + ForceLeftToRight, + ///! Language semantics require right-to-left evaluation. + ForceRightToLeft + }; + /// If a return statement is being visited, this holds the return statment's /// result expression. const clang::Expr *RetExpr = nullptr; @@ -65,6 +79,49 @@ class CIRGenFunction { // as soon as we add a DebugInfo type to this class. std::nullptr_t *getDebugInfo() { return nullptr; } + // Wrapper for function prototype sources. Wraps either a FunctionProtoType or + // an ObjCMethodDecl. + struct PrototypeWrapper { + llvm::PointerUnion + P; + + PrototypeWrapper(const clang::FunctionProtoType *FT) : P(FT) {} + PrototypeWrapper(const clang::ObjCMethodDecl *MD) : P(MD) {} + }; + + /// An abstract representation of regular/ObjC call/message targets. + class AbstractCallee { + /// The function declaration of the callee. + const clang::Decl *CalleeDecl; + + public: + AbstractCallee() : CalleeDecl(nullptr) {} + AbstractCallee(const clang::FunctionDecl *FD) : CalleeDecl(FD) {} + AbstractCallee(const clang::ObjCMethodDecl *OMD) : CalleeDecl(OMD) {} + bool hasFunctionDecl() const { + return llvm::isa_and_nonnull(CalleeDecl); + } + const clang::Decl *getDecl() const { return CalleeDecl; } + unsigned getNumParams() const { + if (const auto *FD = llvm::dyn_cast(CalleeDecl)) + return FD->getNumParams(); + return llvm::cast(CalleeDecl)->param_size(); + } + const clang::ParmVarDecl *getParamDecl(unsigned I) const { + if (const auto *FD = llvm::dyn_cast(CalleeDecl)) + return FD->getParamDecl(I); + return *(llvm::cast(CalleeDecl)->param_begin() + + I); + } + }; + + void buildCallArgs( + CallArgList &Args, PrototypeWrapper Prototype, + llvm::iterator_range ArgRange, + AbstractCallee AC = AbstractCallee(), unsigned ParamsToSkip = 0, + EvaluationOrder Order = EvaluationOrder::Default); + /// buildAnyExpr - Emit code to compute the specified expression which can /// have any type. The result is returned as an RValue struct. If this is an /// aggregate expression, the aggloc/agglocvolatile arguments indicate where From b65e08d0d982c0841507825d708bd35c189aa792 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 18:31:54 -0500 Subject: [PATCH 0192/2301] [CIR] Add buildCallee for generating a CIRGenCallee from a DeclRefExpr --- clang/lib/CIR/CIRGenExpr.cpp | 56 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 2 ++ clang/lib/CIR/CMakeLists.txt | 1 + 3 files changed, 59 insertions(+) create mode 100644 clang/lib/CIR/CIRGenExpr.cpp diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp new file mode 100644 index 000000000000..a2dce48b4d8e --- /dev/null +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -0,0 +1,56 @@ +#include "CIRGenCall.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" + +#include "clang/AST/GlobalDecl.h" + +#include "mlir/IR/Value.h" + +using namespace cir; +using namespace clang; + +static mlir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { + const auto *FD = cast(GD.getDecl()); + assert(!FD->hasAttr() && "NYI"); + + auto V = CGM.GetAddrOfFunction(GD); + assert(FD->hasPrototype() && + "Only prototyped functions are currently callable"); + + return V; +} + +static CIRGenCallee buildDirectCallee(CIRGenFunction &CGF, GlobalDecl GD) { + const auto *FD = cast(GD.getDecl()); + + assert(!FD->getBuiltinID() && "Builtins NYI"); + + auto CalleePtr = buildFunctionDeclPointer(CGF.CGM, GD); + + assert(!CGF.CGM.getLangOpts().CUDA && "NYI"); + + return CIRGenCallee::forDirect(CalleePtr, GD); +} + +CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { + E = E->IgnoreParens(); + + if (auto ICE = dyn_cast(E)) { + assert(ICE && "Only ICE supported so far!"); + assert(ICE->getCastKind() == CK_FunctionToPointerDecay && + "No other casts supported yet"); + + return buildCallee(ICE->getSubExpr()); + } else if (auto DRE = dyn_cast(E)) { + auto FD = dyn_cast(DRE->getDecl()); + assert(FD && + "DeclRef referring to FunctionDecl onlything supported so far"); + return buildDirectCallee(*this, FD); + } + + assert(!dyn_cast(E) && "NYI"); + assert(!dyn_cast(E) && "NYI"); + assert(!dyn_cast(E) && "NYI"); + + assert(false && "Nothing else supported yet!"); +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 29ae137176b4..57c2d11a7992 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -122,6 +122,8 @@ class CIRGenFunction { AbstractCallee AC = AbstractCallee(), unsigned ParamsToSkip = 0, EvaluationOrder Order = EvaluationOrder::Default); + CIRGenCallee buildCallee(const clang::Expr *E); + /// buildAnyExpr - Emit code to compute the specified expression which can /// have any type. The result is returned as an RValue struct. If this is an /// aggregate expression, the aggloc/agglocvolatile arguments indicate where diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index dee5b4e6115f..b201fc1ae617 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -14,6 +14,7 @@ add_clang_library(clangCIR CIRGenCall.cpp CIRGenerator.cpp CIRGenCXXABI.cpp + CIRGenExpr.cpp CIRGenExprScalar.cpp CIRGenFunction.cpp CIRGenModule.cpp From 938368f12e6c258218371c2efe2cbc15ff7107dc Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 18:41:29 -0500 Subject: [PATCH 0193/2301] [CIR] Add getters for the CIRGenTypes and LangOptions for CIRGenFunction --- clang/lib/CIR/CIRGenFunction.h | 5 +++++ clang/lib/CIR/CIRGenModule.h | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 57c2d11a7992..44e49c8aa3a2 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -14,6 +14,7 @@ #define LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H #include "CIRGenCall.h" +#include "CIRGenModule.h" #include "CIRGenValue.h" #include "mlir/IR/Value.h" @@ -74,6 +75,10 @@ class CIRGenFunction { CIRGenFunction(CIRGenModule &CGM); + CIRGenTypes &getTypes() const { return CGM.getTypes(); } + + const clang::LangOptions &getLangOpts() const { return CGM.getLangOpts(); } + // TODO: This is currently just a dumb stub. But we want to be able to clearly // assert where we arne't doing things that we know we should and will crash // as soon as we add a DebugInfo type to this class. diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index d6bd661bae49..5c58aeb83c2e 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -13,7 +13,6 @@ #ifndef LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H #define LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H -#include "CIRGenFunction.h" #include "CIRGenTypes.h" #include "CIRGenValue.h" @@ -34,6 +33,7 @@ namespace cir { +class CIRGenFunction; class CIRGenCXXABI; class TargetCIRGenInfo; From a14ff5a71f6b06e426fcbe3a154efd6f46a20e6d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Mar 2022 18:42:16 -0500 Subject: [PATCH 0194/2301] [CIR] Add CIRGenFunction::buildCall This implements two versions of buildCall and a few helpers that take a CIRGenFunctionInfo, CIRGenCallee and other related information and build out and return the RValue for the call. --- clang/lib/CIR/CIRGenCall.cpp | 156 +++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.cpp | 84 +++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 26 +++++- 3 files changed, 265 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 377c2577ded5..d1e1c9c6274b 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -218,3 +218,159 @@ mlir::FunctionType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { return Builder.getFunctionType(ArgTypes, resultType ? resultType : mlir::TypeRange()); } + +CIRGenCallee CIRGenCallee::prepareConcreteCallee(CIRGenFunction &CGF) const { + assert(!isVirtual() && "Virtual NYI"); + return *this; +} + +RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, + ReturnValueSlot ReturnValue, + const CallArgList &CallArgs, + mlir::func::CallOp &callOrInvoke, + bool IsMustTail, clang::SourceLocation Loc) { + // FIXME: We no longer need the types from CallArgs; lift up and simplify + + assert(Callee.isOrdinary() || Callee.isVirtual()); + + // Handle struct-return functions by passing a pointer to the location that we + // would like to return info. + QualType RetTy = CallInfo.getReturnType(); + const auto &RetAI = CallInfo.getReturnInfo(); + + const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); + + const FunctionDecl *FD = dyn_cast_or_null(TargetDecl); + assert(FD && "Only functiondecl supported so far"); + // We can only guarantee that a function is called from the correct + // context/function based on the appropriate target attributes, so only check + // in hte case where we have both always_inline and target since otherwise we + // could be making a conditional call after a check for the proper cpu + // features (and it won't cause code generation issues due to function based + // code generation). + assert(!TargetDecl->hasAttr() && "NYI"); + assert(!TargetDecl->hasAttr() && "NYI"); + + // Some architectures (such as x86-64) have the ABI changed based on + // attribute-target/features. Give them a chance to diagnose. + // TODO: support this eventually, just assume the trivial result for now + // !CGM.getTargetCIRGenInfo().checkFunctionCallABI( + // CGM, Loc, dyn_cast_or_null(CurCodeDecl), FD, CallArgs); + + // TODO: add DNEBUG code + + // 1. Set up the arguments + + // If we're using inalloca, insert the allocation after the stack save. + // FIXME: Do this earlier rather than hacking it in here! + Address ArgMemory = Address::invalid(); + assert(!CallInfo.getArgStruct() && "NYI"); + + ClangToCIRArgMapping CIRFunctionArgs(CGM.getASTContext(), CallInfo); + SmallVector CIRCallArgs(CIRFunctionArgs.totalCIRArgs()); + + // If the call returns a temporary with struct return, create a temporary + // alloca to hold the result, unless one is given to us. + assert(!RetAI.isIndirect() && !RetAI.isInAlloca() && + !RetAI.isCoerceAndExpand() && "NYI"); + + // When passing arguments using temporary allocas, we need to add the + // appropriate lifetime markers. This vector keeps track of all the lifetime + // markers that need to be ended right after the call. + assert(CallArgs.size() == 0 && + "Args not yet supported. When they are we'll need to consider " + "supporting temporary allocas for passed args"); + + // Translate all of the arguments as necessary to match the CIR lowering. + assert(CallInfo.arg_size() == CallArgs.size() && + "Mismatch between function signature & arguments."); + unsigned ArgNo = 0; + CIRGenFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); + for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); + I != E; ++I, ++info_it, ++ArgNo) { + assert(false && "Nothing to see here!"); + } + + const CIRGenCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); + mlir::FuncOp CalleePtr = ConcreteCallee.getFunctionPointer(); + + // If we're using inalloca, set up that argument. + assert(!ArgMemory.isValid() && "inalloca NYI"); + + // TODO: simplifyVariadicCallee + + // 3. Perform the actual call. + + // Deactivate any cleanups that we're supposed to do immediately before the + // call. + // TODO: do this + + // TODO: Update the largest vector width if any arguments have vector types. + // TODO: Compute the calling convention and attributes. + assert(!FD->hasAttr() && "NYI"); + + // TODO: InNoMergeAttributedStmt + // assert(!CurCodeDecl->hasAttr() && + // !TargetDecl->hasAttr() && "NYI"); + + // TODO: isSEHTryScope + + // TODO: currentFunctionUsesSEHTry + // TODO: isCleanupPadScope + + // TODO: UnusedReturnSizePtr + + assert(!FD->hasAttr() && "NYI"); + + // TODO: alignment attributes + + auto callLoc = CGM.getLoc(Loc); + auto theCall = CGM.getBuilder().create(callLoc, CalleePtr, + CIRCallArgs); + + if (callOrInvoke) + callOrInvoke = theCall; + + if (const auto *FD = dyn_cast_or_null(CurFuncDecl)) { + assert(!FD->getAttr() && "NYI"); + } + + // TODO: set attributes on callop + + // assert(!theCall.getResults().getType().front().isSignlessInteger() && + // "Vector NYI"); + + // TODO: LLVM models indirect calls via a null callee, how should we do this? + + assert(!CGM.getLangOpts().ObjCAutoRefCount && "Not supported"); + + assert(!TargetDecl->hasAttr() && "NYI"); + + assert(!getDebugInfo() && "No debug info yet"); + + assert(!TargetDecl->hasAttr() && "NYI"); + + // 4. Finish the call. + + // If the call doesn't return, finish the basic block and clear the insertion + // point; this allows the rest of CIRGen to discard unreachable code. + // TODO: figure out how to support doesNotReturn + + assert(!IsMustTail && "NYI"); + + // TODO: figure out writebacks? seems like ObjC only __autorelease + + // TODO: cleanup argument memory at the end + + // TODO: implement genuine returns + + // TODO: implement assumed_aligned + + // TODO: implement lifetime extensions + + assert(RetTy.isDestructedType() != QualType::DK_nontrivial_c_struct && "NYI"); + + assert(theCall.getNumResults() == 0 && "Returns NYI"); + return RValue::get(nullptr); +} diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 78da5890269d..0db26103325f 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -15,6 +15,8 @@ #include "clang/Basic/TargetInfo.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" + using namespace cir; using namespace clang; @@ -166,3 +168,85 @@ RValue CIRGenFunction::buildAnyExpr(const Expr *E) { } llvm_unreachable("bad evaluation kind"); } +RValue CIRGenFunction::buildCall(clang::QualType CalleeType, + const CIRGenCallee &OrigCallee, + const clang::CallExpr *E, + ReturnValueSlot ReturnValue, + mlir::Value Chain) { + // Get the actual function type. The callee type will always be a pointer to + // function type or a block pointer type. + assert(CalleeType->isFunctionPointerType() && + "Call must have function pointer type!"); + + CalleeType = getContext().getCanonicalType(CalleeType); + + auto PointeeType = cast(CalleeType)->getPointeeType(); + + CIRGenCallee Callee = OrigCallee; + + if (getLangOpts().CPlusPlus) + assert(!SanOpts.has(SanitizerKind::Function) && "Sanitizers NYI"); + + const auto *FnType = cast(PointeeType); + + assert(!SanOpts.has(SanitizerKind::CFIICall) && "Sanitizers NYI"); + + CallArgList Args; + + assert(!Chain && "FIX THIS"); + + // C++17 requires that we evaluate arguments to a call using assignment syntax + // right-to-left, and that we evaluate arguments to certain other operators + // left-to-right. Note that we allow this to override the order dictated by + // the calling convention on the MS ABI, which means that parameter + // destruction order is not necessarily reverse construction order. + // FIXME: Revisit this based on C++ committee response to unimplementability. + EvaluationOrder Order = EvaluationOrder::Default; + assert(!dyn_cast(E) && "Operators NYI"); + + buildCallArgs(Args, dyn_cast(FnType), E->arguments(), + E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); + + const CIRGenFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( + Args, FnType, /*ChainCall=*/Chain.getAsOpaquePointer()); + + // C99 6.5.2.2p6: + // If the expression that denotes the called function has a type that does + // not include a prototype, [the default argument promotions are performed]. + // If the number of arguments does not equal the number of parameters, the + // behavior is undefined. If the function is defined with at type that + // includes a prototype, and either the prototype ends with an ellipsis (, + // ...) or the types of the arguments after promotion are not compatible + // with the types of the parameters, the behavior is undefined. If the + // function is defined with a type that does not include a prototype, and + // the types of the arguments after promotion are not compatible with those + // of the parameters after promotion, the behavior is undefined [except in + // some trivial cases]. + // That is, in the general case, we should assume that a call through an + // unprototyped function type works like a *non-variadic* call. The way we + // make this work is to cast to the exxact type fo the promoted arguments. + // + // Chain calls use the same code path to add the inviisble chain parameter to + // the function type. + assert(!isa(FnType) && "NYI"); + // if (isa(FnType) || Chain) { + // mlir::FunctionType CalleeTy = getTypes().GetFunctionType(FnInfo); + // int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace(); + // CalleeTy = CalleeTy->getPointerTo(AS); + + // llvm::Value *CalleePtr = Callee.getFunctionPointer(); + // CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast"); + // Callee.setFunctionPointer(CalleePtr); + // } + + assert(!CGM.getLangOpts().HIP && "HIP NYI"); + + assert(!MustTailCall && "Must tail NYI"); + mlir::func::CallOp callOP = nullptr; + RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, callOP, + E == MustTailCall, E->getExprLoc()); + + assert(!getDebugInfo() && "Debug Info NYI"); + + return Call; +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 44e49c8aa3a2..bc7989b017c0 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -28,8 +28,13 @@ namespace clang { class Expr; } // namespace clang +namespace mlir { +namespace func { +class CallOp; +} +} // namespace mlir + namespace cir { -class CIRGenModule; // FIXME: for now we are reusing this from lib/Clang/CodeGenFunction.h, which // isn't available in the include dir. Same for getEvaluationKind below. @@ -57,6 +62,14 @@ class CIRGenFunction { clang::QualType FnRetQualTy; CIRGenModule &CGM; + + // CurFuncDecl - Holds the Decl for the current outermost non-closure context + const clang::Decl *CurFuncDecl; + + // The CallExpr within the current statement that the musttail attribute + // applies to. nullptr if there is no 'musttail' on the current statement. + const clang::CallExpr *MustTailCall = nullptr; + clang::ASTContext &getContext() const; /// Sanitizers enabled for this function. @@ -127,6 +140,17 @@ class CIRGenFunction { AbstractCallee AC = AbstractCallee(), unsigned ParamsToSkip = 0, EvaluationOrder Order = EvaluationOrder::Default); + /// buildCall - Generate a call of the given function, expecting the given + /// result type, and using the given argument list which specifies both the + /// LLVM arguments and the types they were derived from. + RValue buildCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, + const CallArgList &Args, mlir::func::CallOp &callOrInvoke, + bool IsMustTail, clang::SourceLocation Loc); + RValue buildCall(clang::QualType FnType, const CIRGenCallee &Callee, + const clang::CallExpr *E, ReturnValueSlot returnValue, + mlir::Value Chain = nullptr); + CIRGenCallee buildCallee(const clang::Expr *E); /// buildAnyExpr - Emit code to compute the specified expression which can From 09683c429499d371bdd5ec77fdcdf05351341e3c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 14 Mar 2022 19:34:07 -0400 Subject: [PATCH 0195/2301] [CIR] Add CIRGenFunction::buildCallExpr Simply add a member func to CIRGenFunction that takes in the current CallExpr and delegates to buildCallee and buildCall. --- clang/lib/CIR/CIRGenFunction.cpp | 16 ++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 3 +++ 2 files changed, 19 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 0db26103325f..d1404f44dfc1 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -168,6 +168,22 @@ RValue CIRGenFunction::buildAnyExpr(const Expr *E) { } llvm_unreachable("bad evaluation kind"); } + +RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, + ReturnValueSlot ReturnValue) { + assert(!E->getCallee()->getType()->isBlockPointerType() && "ObjC Blocks NYI"); + assert(!dyn_cast(E) && "NYI"); + assert(!dyn_cast(E) && "CUDA NYI"); + assert(!dyn_cast(E) && "NYI"); + + CIRGenCallee callee = buildCallee(E->getCallee()); + + assert(!callee.isBuiltin() && "builtins NYI"); + assert(!callee.isPsuedoDestructor() && "NYI"); + + return buildCall(E->getCallee()->getType(), callee, E, ReturnValue); +} + RValue CIRGenFunction::buildCall(clang::QualType CalleeType, const CIRGenCallee &OrigCallee, const clang::CallExpr *E, diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index bc7989b017c0..ddc96041d277 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -151,6 +151,9 @@ class CIRGenFunction { const clang::CallExpr *E, ReturnValueSlot returnValue, mlir::Value Chain = nullptr); + RValue buildCallExpr(const clang::CallExpr *E, + ReturnValueSlot ReturnValue = ReturnValueSlot()); + CIRGenCallee buildCallee(const clang::Expr *E); /// buildAnyExpr - Emit code to compute the specified expression which can From 93c400912d618fff9286cba2a401c65ffa95e7f3 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 14 Mar 2022 19:35:58 -0400 Subject: [PATCH 0196/2301] [CIR] Add VisitCallExpr to ScalarExprEmitter Add a visitor member func for VisitCallExpr that'll delegate to buildCallExpr. --- clang/lib/CIR/CIRGenExprScalar.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index f21974b9518b..62f45b002178 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -77,6 +77,15 @@ class ScalarExprEmitter : public StmtVisitor { } } + mlir::Value VisitCallExpr(const CallExpr *E) { + assert(!E->getCallReturnType(CGF.getContext())->isReferenceType() && "NYI"); + + auto V = CGF.buildCallExpr(E).getScalarVal(); + + // TODO: buildLValueAlignmentAssumption + return V; + } + mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { assert(!llvm::isa(E->getType()) && "not implemented"); return CGM.buildLValue(E->getSubExpr()).getPointer(); From bb0cdfd495a9078ef80eb1cbb7f13c45450a276a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 14 Mar 2022 19:40:30 -0400 Subject: [PATCH 0197/2301] [CIR] Add AggValueSlot helper class for collecting info about aggregate slots --- clang/lib/CIR/CIRGenValue.h | 94 +++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/clang/lib/CIR/CIRGenValue.h b/clang/lib/CIR/CIRGenValue.h index b35420660823..0431064aa0f7 100644 --- a/clang/lib/CIR/CIRGenValue.h +++ b/clang/lib/CIR/CIRGenValue.h @@ -214,6 +214,100 @@ class LValue { clang::Qualifiers &getQuals() { return Quals; } }; +/// An aggregate value slot. +class AggValueSlot { + /// The address. + Address Addr; + + // Qualifiers + clang::Qualifiers Quals; + + /// This is set to true if the tail padding of this slot might overlap another + /// object that may have already been initialized (and whose value must be + /// preserved by this initialization). If so, we may only store up to the + /// dsize of the type. Otherwise we can widen stores to the size of the type. + bool OverlapFlag : 1; + + /// DestructedFlags - This is set to true if some external code is responsible + /// for setting up a destructor for the slot. Otherwise the code which + /// constructs it shoudl push the appropriate cleanup. + // bool DestructedFlag : 1; + + /// If is set to true, sanitizer checks are already generated for this address + /// or not required. For instance, if this address represents an object + /// created in 'new' expression, sanitizer checks for memory is made as a part + /// of 'operator new' emission and object constructor should not generate + /// them. + bool SanitizerCheckedFlag : 1; + + // TODO: Add the rest of these things + + AggValueSlot(Address Addr, clang::Qualifiers Quals, bool DestructedFlag, + bool ObjCGCFlag, bool ZeroedFlag, bool AliasedFlag, + bool OverlapFlag, bool SanitizerCheckedFlag) + : Addr(Addr), Quals(Quals) + // ,DestructedFlag(DestructedFlag) + // ,ObjCGCFlag(ObjCGCFlag) + // ,ZeroedFlag(ZeroedFlag) + // ,AliasedFlag(AliasedFlag) + // ,OverlapFlag(OverlapFlag) + // ,SanitizerCheckedFlag(SanitizerCheckedFlag) + {} + +public: + enum IsAliased_t { IsNotAliased, IsAliased }; + enum IsDestructed_t { IsNotDestructed, IsDestructed }; + enum IsZeroed_t { IsNotZeroed, IsZeroed }; + enum Overlap_t { DoesNotOverlap, MayOverlap }; + enum NeedsGCBarriers_t { DoesNotNeedGCBarriers, NeedsGCBarriers }; + enum IsSanitizerChecked_t { IsNotSanitizerChecked, IsSanitizerChecked }; + + /// ignored - Returns an aggregate value slot indicating that the aggregate + /// value is being ignored. + static AggValueSlot ignored() { + return forAddr(Address::invalid(), clang::Qualifiers(), IsNotDestructed, + DoesNotNeedGCBarriers, IsNotAliased, DoesNotOverlap); + } + + /// forAddr - Make a slot for an aggregate value. + /// + /// \param quals - The qualifiers that dictate how the slot should be + /// initialized. Only 'volatile' and the Objective-C lifetime qualifiers + /// matter. + /// + /// \param isDestructed - true if something else is responsible for calling + /// destructors on this object + /// \param needsGC - true fi the slot is potentially located somewhere that + /// ObjC GC calls should be emitted for + static AggValueSlot + forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, + NeedsGCBarriers_t needsGC, IsAliased_t isAliased, + Overlap_t mayOverlap, IsZeroed_t isZeroed = IsNotZeroed, + IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) { + return AggValueSlot(addr, quals, isDestructed, needsGC, isZeroed, isAliased, + mayOverlap, isChecked); + } + + static AggValueSlot + forLValue(const LValue &LV, IsDestructed_t isDestructed, + NeedsGCBarriers_t needsGC, IsAliased_t isAliased, + Overlap_t mayOverlap, IsZeroed_t isZeroed = IsNotZeroed, + IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) { + return forAddr(LV.getAddress(), LV.getQuals(), isDestructed, needsGC, + isAliased, mayOverlap, isZeroed, isChecked); + } + + clang::Qualifiers getQualifiers() const { return Quals; } + + Address getAddress() const { return Addr; } + + bool isIgnored() const { return !Addr.isValid(); } + + Overlap_t mayOverlap() const { return Overlap_t(OverlapFlag); } + + bool isSanitizerChecked() const { return SanitizerCheckedFlag; } +}; + } // namespace cir #endif From bf0647a51ad775b7022ee6416e82fc354a2a2f9d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 14 Mar 2022 20:28:41 -0400 Subject: [PATCH 0198/2301] [CIR] Add test for call gen --- clang/test/CIR/CodeGen/call.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 clang/test/CIR/CodeGen/call.c diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c new file mode 100644 index 000000000000..501304c56208 --- /dev/null +++ b/clang/test/CIR/CodeGen/call.c @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void a(void) {} + +void c(void) { + a(); +} + +// CHECK: module { +// CHECK: func @a() { +// CHECK: cir.return +// CHECK: } +// CHECK: func @c() { +// CHECK: call @a() : () -> () +// CHECK: cir.return +// CHECK: } + From 9967a771ac0b482e2c411c48fa5fe185f9c4f696 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 11 Mar 2022 17:24:13 -0800 Subject: [PATCH 0199/2301] [CIR][LifetimeCheck][NFC] Ignore unkonwn stores --- mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 8e16373321a9..addda79c86da 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -21,8 +21,6 @@ using namespace cir; namespace { struct LifetimeCheckPass : public LifetimeCheckBase { LifetimeCheckPass() = default; - - // Prints the resultant operation statistics post iterating over the module. void runOnOperation() override; void checkOperation(Operation *op); @@ -432,9 +430,7 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { return; } - storeOp.dump(); - // FIXME: asserts here should become remarks for non-implemented parts. - assert(0 && "not implemented"); + // From here on, some uninterestring store (for now?) } void LifetimeCheckPass::checkLoad(LoadOp loadOp) { From 9e9b1ef9ceb6bfd68946cc7a7a5b74a203a3c5d1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 11 Mar 2022 17:20:21 -0800 Subject: [PATCH 0200/2301] [CIR] Add MergeCleanupsPass to merge blocks with cleanup+ret/yield - Add to the default clang CIR generation pipeline. - Add pass that applies the transformations. - No testcases yet (next commit will add return support, those tests assume this pass to work in order to pass). - Turned off (will be turned on by the same commit mentioned above). --- clang/include/clang/CIR/CIRToCIRPasses.h | 30 ++++ clang/lib/CIR/CIRPasses.cpp | 29 +++ clang/lib/CIR/CMakeLists.txt | 2 + clang/lib/CIRFrontendAction/CIRGenAction.cpp | 4 +- mlir/include/mlir/Dialect/CIR/Passes.h | 1 + mlir/include/mlir/Dialect/CIR/Passes.td | 11 ++ .../lib/Dialect/CIR/Transforms/CMakeLists.txt | 2 + .../Dialect/CIR/Transforms/MergeCleanups.cpp | 167 ++++++++++++++++++ 8 files changed, 245 insertions(+), 1 deletion(-) create mode 100644 clang/include/clang/CIR/CIRToCIRPasses.h create mode 100644 clang/lib/CIR/CIRPasses.cpp create mode 100644 mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h new file mode 100644 index 000000000000..6ffedc5cd9f1 --- /dev/null +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -0,0 +1,30 @@ +//====- CIRToCIRPasses.h- Lowering from CIR to LLVM -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares an interface for converting CIR modules to LLVM IR. +// +//===----------------------------------------------------------------------===// +#ifndef CLANG_CIR_CIRTOCIRPASSES_H +#define CLANG_CIR_CIRTOCIRPASSES_H + +#include "mlir/Pass/Pass.h" + +#include + +namespace mlir { +class MLIRContext; +class ModuleOp; +} // namespace mlir + +namespace cir { + +// Run set of cleanup/prepare/etc passes CIR <-> CIR. +void runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx); +} // namespace cir + +#endif // CLANG_CIR_CIRTOCIRPASSES_H_ diff --git a/clang/lib/CIR/CIRPasses.cpp b/clang/lib/CIR/CIRPasses.cpp new file mode 100644 index 000000000000..242c1628897b --- /dev/null +++ b/clang/lib/CIR/CIRPasses.cpp @@ -0,0 +1,29 @@ +//====- CIRPasses.cpp - Lowering from CIR to LLVM -------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements machinery for any CIR <-> CIR passes used by clang. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/CIR/Passes.h" + +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" + +namespace cir { +void runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx) { + mlir::PassManager pm(mlirCtx); + pm.addPass(mlir::createMergeCleanupsPass()); + + auto result = !mlir::failed(pm.run(theModule)); + if (!result) + llvm::report_fatal_error( + "The pass manager failed to lower CIR to llvm IR!"); +} +} // namespace cir \ No newline at end of file diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index b201fc1ae617..d8c738deeebb 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -19,6 +19,7 @@ add_clang_library(clangCIR CIRGenFunction.cpp CIRGenModule.cpp CIRGenTypes.cpp + CIRPasses.cpp CIRRecordLayoutBuilder.cpp ItaniumCXXABI.cpp LowerToLLVM.cpp @@ -33,6 +34,7 @@ add_clang_library(clangCIR clangLex ${dialect_libs} MLIRCIR + MLIRCIRTransforms MLIRAffineToStandard MLIRAnalysis MLIRIR diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index feb86f3d6a8e..bb8ddc022c45 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -24,6 +24,7 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/CIRToCIRPasses.h" #include "clang/CIR/LowerToLLVM.h" #include "clang/CodeGen/BackendUtil.h" #include "clang/CodeGen/ModuleBuilder.h" @@ -128,7 +129,8 @@ class CIRGenConsumer : public clang::ASTConsumer { switch (action) { case CIRGenAction::OutputType::EmitCIR: - if (outputStream) { + if (outputStream && mlirMod) { + // runCIRToCIRPasses(mlirMod, mlirCtx.get()); mlir::OpPrintingFlags flags; // FIXME: we cannot roundtrip prettyForm=true right now. flags.enableDebugInfo(/*prettyForm=*/false); diff --git a/mlir/include/mlir/Dialect/CIR/Passes.h b/mlir/include/mlir/Dialect/CIR/Passes.h index 1357a3f3422d..fe6512eab798 100644 --- a/mlir/include/mlir/Dialect/CIR/Passes.h +++ b/mlir/include/mlir/Dialect/CIR/Passes.h @@ -18,6 +18,7 @@ namespace mlir { std::unique_ptr createLifetimeCheckPass(); +std::unique_ptr createMergeCleanupsPass(); //===----------------------------------------------------------------------===// // Registration diff --git a/mlir/include/mlir/Dialect/CIR/Passes.td b/mlir/include/mlir/Dialect/CIR/Passes.td index 13e639bfb785..5bef3081bd69 100644 --- a/mlir/include/mlir/Dialect/CIR/Passes.td +++ b/mlir/include/mlir/Dialect/CIR/Passes.td @@ -11,6 +11,17 @@ include "mlir/Pass/PassBase.td" +def MergeCleanups : Pass<"cir-merge-cleanups"> { + let summary = "Remove unnecessary branches to cleanup blocks"; + let description = [{ + Canonicalize pass is too aggressive for CIR when the pipeline is + used for C/C++ analysis. This pass runs some rewrites for scopes, + merging some blocks and eliminating unnecessary control-flow. + }]; + let constructor = "mlir::createMergeCleanupsPass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + def LifetimeCheck : Pass<"cir-lifetime-check"> { let summary = "Check lifetime safety and generate diagnostics"; let description = [{ diff --git a/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt b/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt index bd27fb0fb173..89b335608117 100644 --- a/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt @@ -1,5 +1,6 @@ add_mlir_dialect_library(MLIRCIRTransforms LifetimeCheck.cpp + MergeCleanups.cpp ADDITIONAL_HEADER_DIRS ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/CIR @@ -13,4 +14,5 @@ add_mlir_dialect_library(MLIRCIRTransforms MLIRIR MLIRCIR MLIRPass + MLIRTransformUtils ) diff --git a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp new file mode 100644 index 000000000000..7b0ccb48385f --- /dev/null +++ b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp @@ -0,0 +1,167 @@ +//===- MergeCleanups.cpp - merge simple return/yield blocks ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/CIR/Passes.h" + +#include "PassDetail.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" + +#include "mlir/IR/Matchers.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" + +using namespace mlir; +using namespace cir; + +namespace { + +template +struct SimplifyRetYieldBlocks : public mlir::OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + mlir::LogicalResult replaceScopeLikeOp(PatternRewriter &rewriter, + ScopeLikeOpTy scopeLikeOp) const; + + SimplifyRetYieldBlocks(mlir::MLIRContext *context) + : OpRewritePattern(context, /*benefit=*/1) {} + + mlir::LogicalResult + checkAndRewriteRegion(mlir::Region &r, + mlir::PatternRewriter &rewriter) const { + auto &blocks = r.getBlocks(); + + if (blocks.size() <= 1) + return failure(); + + // Rewrite something like this: + // + // cir.if %2 { + // %3 = cir.cst(3 : i32) : i32 + // cir.br ^bb1 + // ^bb1: // pred: ^bb0 + // cir.return %3 : i32 + // } + // + // to this: + // + // cir.if %2 { + // %3 = cir.cst(3 : i32) : i32 + // cir.return %3 : i32 + // } + // + SmallPtrSet candidateBlocks; + for (Block &block : blocks) { + if (block.isEntryBlock()) + continue; + + auto yieldVars = block.getOps(); + for (cir::YieldOp yield : yieldVars) + candidateBlocks.insert(yield.getOperation()->getBlock()); + + auto retVars = block.getOps(); + for (cir::ReturnOp ret : retVars) + candidateBlocks.insert(ret.getOperation()->getBlock()); + } + + bool Changed = false; + for (auto *mergeSource : candidateBlocks) { + if (!(mergeSource->hasNoSuccessors() && mergeSource->hasOneUse())) + continue; + auto *mergeDest = mergeSource->getSinglePredecessor(); + if (!mergeDest || mergeDest->getNumSuccessors() != 1) + continue; + rewriter.eraseOp(mergeDest->getTerminator()); + rewriter.mergeBlocks(mergeSource, mergeDest); + Changed = true; + } + + return Changed ? success() : failure(); + } + + mlir::LogicalResult + matchAndRewrite(ScopeLikeOpTy op, + mlir::PatternRewriter &rewriter) const override { + return replaceScopeLikeOp(rewriter, op); + } +}; + +// Specialize the template to account for the different build signatures for +// IfOp, ScopeOp and FuncOp. +template <> +mlir::LogicalResult +SimplifyRetYieldBlocks::replaceScopeLikeOp(PatternRewriter &rewriter, + IfOp ifOp) const { + bool regionChanged = false; + if (checkAndRewriteRegion(ifOp.getThenRegion(), rewriter).succeeded()) + regionChanged = true; + if (checkAndRewriteRegion(ifOp.getElseRegion(), rewriter).succeeded()) + regionChanged = true; + return regionChanged ? success() : failure(); +} + +template <> +mlir::LogicalResult +SimplifyRetYieldBlocks::replaceScopeLikeOp(PatternRewriter &rewriter, + ScopeOp scopeOp) const { + bool regionChanged = false; + if (checkAndRewriteRegion(scopeOp.getRegion(), rewriter).succeeded()) + regionChanged = true; + return regionChanged ? success() : failure(); +} + +template <> +mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( + PatternRewriter &rewriter, mlir::FuncOp funcOp) const { + bool regionChanged = false; + if (checkAndRewriteRegion(funcOp.getRegion(), rewriter).succeeded()) + regionChanged = true; + return regionChanged ? success() : failure(); +} + +void getMergeCleanupsPatterns(RewritePatternSet &results, + MLIRContext *context) { + results.add, SimplifyRetYieldBlocks, + SimplifyRetYieldBlocks>(context); +} + +struct MergeCleanupsPass : public MergeCleanupsBase { + MergeCleanupsPass() = default; + void runOnOperation() override; +}; + +// The same operation rewriting done here could have been performed +// by CanonicalizerPass (adding hasCanonicalizer for target Ops and implementing +// the same from above in CIRDialects.cpp). However, it's currently too +// aggressive for static analysis purposes, since it might remove things where +// a diagnostic can be generated. +// +// FIXME: perhaps we can add one more mode to GreedyRewriteConfig to +// disable this behavior. +void MergeCleanupsPass::runOnOperation() { + auto op = getOperation(); + mlir::RewritePatternSet patterns(&getContext()); + getMergeCleanupsPatterns(patterns, &getContext()); + FrozenRewritePatternSet frozenPatterns(std::move(patterns)); + + SmallVector opsToSimplify; + op->walk([&](Operation *op) { + if (isa(op)) + opsToSimplify.push_back(op); + }); + + for (auto *o : opsToSimplify) { + bool erase = false; + (void)applyOpPatternsAndFold(o, frozenPatterns, GreedyRewriteConfig(), + &erase); + } +} +} // namespace + +std::unique_ptr mlir::createMergeCleanupsPass() { + return std::make_unique(); +} From 64268d8f3f14df173faac13857c0b933e985b3da Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 7 Mar 2022 17:30:57 -0800 Subject: [PATCH 0201/2301] [CIR] Add more general support for 'return' statements - Decided to emit cir.return in place within other regions (if, scopes, etc) instead of fully doing all the branch dance to navigate in cleanup blocks. Every region now has a cleanup block, that ends up being merged back to the entry block in most cases. Lowering to LLVM will requiring chaining all these cleanup blocks. - Create an alloca to hold the return. - Always generate a branch instruction and a return block. - Clean that up as part of the MergeCleanups pass. - Update tons of tests. - Fix some of the verifiers scope/if (needed for early returns). - Enable MergeCleanupsPass, which is needed by this test rewrites. --- clang/lib/CIR/CIRGenFunction.h | 11 +- clang/lib/CIR/CIRGenModule.cpp | 300 ++++++++++++------- clang/lib/CIR/CIRGenModule.h | 68 ++++- clang/lib/CIRFrontendAction/CIRGenAction.cpp | 2 +- clang/test/CIR/CodeGen/basic.c | 34 ++- clang/test/CIR/CodeGen/basic.cpp | 102 +++---- clang/test/CIR/CodeGen/goto.cpp | 38 ++- clang/test/CIR/CodeGen/sourcelocation.cpp | 90 +++--- clang/test/CIR/IR/invalid.cir | 2 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 27 +- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 185 ++++++------ 11 files changed, 492 insertions(+), 367 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index ddc96041d277..4e212ce0da6a 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -17,6 +17,7 @@ #include "CIRGenModule.h" #include "CIRGenValue.h" +#include "mlir/IR/TypeRange.h" #include "mlir/IR/Value.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/ExprCXX.h" @@ -51,15 +52,9 @@ class CIRGenFunction { ForceRightToLeft }; - /// If a return statement is being visited, this holds the return statment's - /// result expression. - const clang::Expr *RetExpr = nullptr; - - mlir::Value RetValue = nullptr; - std::optional RetLoc; - - mlir::Type FnRetTy; clang::QualType FnRetQualTy; + std::optional FnRetTy; + std::optional FnRetAlloca; CIRGenModule &CGM; diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index c61a8f96ec6b..ae3bad12b495 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -86,10 +86,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &CGO) : builder(&context), astCtx(astctx), langOpts(astctx.getLangOpts()), - codeGenOpts(CGO), theModule{mlir::ModuleOp::create( - builder.getUnknownLoc())}, - target(astCtx.getTargetInfo()), - ABI(createCXXABI(*this)), genTypes{*this} {} + codeGenOpts(CGO), + theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, + target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), + genTypes{*this} {} CIRGenModule::~CIRGenModule() {} @@ -115,6 +115,36 @@ mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { return mlir::FusedLoc::get(locs, metadata, builder.getContext()); } +// Allocas are expected to be in the beginning of the entry block +// in whatever region they show up. +static void updateAllocaInEntryBlock(AllocaOp localVarAddr) { + auto *parentBlock = localVarAddr->getBlock(); + auto lastAlloca = std::find_if_not( + parentBlock->begin(), parentBlock->end(), + [](mlir::Operation &op) { return isa(&op); }); + if (lastAlloca != std::end(*parentBlock)) + localVarAddr->moveBefore(&*lastAlloca); + else + localVarAddr->moveBefore(&parentBlock->front()); +} + +void CIRGenModule::buildAndUpdateRetAlloca(QualType T, mlir::Location loc, + CharUnits alignment) { + auto localVarTy = getCIRType(T); + auto localVarPtrTy = + mlir::cir::PointerType::get(builder.getContext(), localVarTy); + + auto alignIntAttr = + mlir::IntegerAttr::get(mlir::IntegerType::get(builder.getContext(), 64), + alignment.getQuantity()); + auto addr = builder.create( + loc, /*addr type*/ localVarPtrTy, + /*var type*/ localVarTy, "__retval", InitStyle::uninitialized, + alignIntAttr); + updateAllocaInEntryBlock(addr); + CurCGF->FnRetAlloca = addr; +} + mlir::LogicalResult CIRGenModule::declare(const Decl *var, QualType T, mlir::Location loc, CharUnits alignment, @@ -137,17 +167,7 @@ mlir::LogicalResult CIRGenModule::declare(const Decl *var, QualType T, loc, /*addr type*/ localVarPtrTy, /*var type*/ localVarTy, namedVar->getName(), IsParam ? InitStyle::paraminit : InitStyle::uninitialized, alignIntAttr); - - // Allocas are expected to be in the beginning of the entry block - // in whatever region they show up. - auto *parentBlock = localVarAddr->getBlock(); - auto lastAlloca = std::find_if_not( - parentBlock->begin(), parentBlock->end(), - [](mlir::Operation &op) { return isa(&op); }); - if (lastAlloca != std::end(*parentBlock)) - localVarAddr->moveBefore(&*lastAlloca); - else - localVarAddr->moveBefore(&parentBlock->front()); + updateAllocaInEntryBlock(localVarAddr); // Insert into the symbol table, allocate some stack space in the // function entry block. @@ -600,40 +620,42 @@ mlir::LogicalResult CIRGenModule::buildReturnStmt(const ReturnStmt &S) { S.getNRVOCandidate()->isNRVOVariable()) && "unimplemented"); assert(!CurCGF->FnRetQualTy->isReferenceType() && "unimplemented"); + auto loc = getLoc(S.getSourceRange()); // Emit the result value, even if unused, to evaluate the side effects. const Expr *RV = S.getRetValue(); - if (!RV) // Do nothing (return value is left uninitialized) - return mlir::success(); - assert(!isa(RV) && "unimplemented"); + if (RV) { + assert(!isa(RV) && "unimplemented"); + + mlir::Value V = nullptr; + switch (CIRGenFunction::getEvaluationKind(RV->getType())) { + case TEK_Scalar: + V = buildScalarExpr(RV); + builder.create(loc, V, *CurCGF->FnRetAlloca); + break; + case TEK_Complex: + case TEK_Aggregate: + llvm::errs() << "ReturnStmt EvaluationKind not implemented\n"; + return mlir::failure(); + } - mlir::Value V = nullptr; - switch (CIRGenFunction::getEvaluationKind(RV->getType())) { - case TEK_Scalar: - V = buildScalarExpr(RV); - // Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); - break; - case TEK_Complex: - case TEK_Aggregate: - llvm::errs() << "ReturnStmt EvaluationKind not implemented\n"; - return mlir::failure(); + // Otherwise, this return operation has zero operands. + if (!V || (RV && RV->getType()->isVoidType())) { + // FIXME: evaluate for side effects. + } + } else { + // Do nothing (return value is left uninitialized), this is also + // the path when returning from void functions. } - // FIXME: there might be multiple return values in a function, fix this - // once we add support for arbitraty returns. - CurCGF->RetValue = V; - CurCGF->RetLoc = getLoc(S.getSourceRange()); - - // Otherwise, this return operation has zero operands. - if (!V || (RV && RV->getType()->isVoidType())) { - // FIXME: evaluate for side effects. - } + // Create a new return block (if not existent) and add a branch to + // it. The actual return instruction is only inserted during current + // scope cleanup handling. + auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); + builder.create(loc, retBlock); - // FIXME: this currently assumes only a return stmt as the last - // on in a function, make this generic. - if (!builder.getInsertionBlock()->isEntryBlock()) - builder.create(getLoc(S.getSourceRange()), - currLexScope->CleanupBlock); + // Insert the new block to continue codegen after branch to ret block. + builder.createBlock(builder.getBlock()->getParent()); return mlir::success(); } @@ -666,7 +688,8 @@ CIRGenModule::buildBranchThroughCleanup(JumpDest &Dest, LabelDecl *L, // materialized label. Keep track of unsolved goto's. mlir::Block *DstBlock = Dest.getBlock(); auto G = builder.create( - Loc, Dest.isValid() ? DstBlock : currLexScope->CleanupBlock); + Loc, Dest.isValid() ? DstBlock + : currLexScope->getOrCreateCleanupBlock(builder)); if (!Dest.isValid()) currLexScope->PendingGotos.push_back(std::make_pair(G, L)); @@ -675,7 +698,9 @@ CIRGenModule::buildBranchThroughCleanup(JumpDest &Dest, LabelDecl *L, /// All scope related cleanup needed: /// - Patching up unsolved goto's. +/// - Build all cleanup code and insert yield/returns. void CIRGenModule::LexicalScopeGuard::cleanup() { + auto &builder = CGM.builder; auto *localScope = CGM.currLexScope; // Handle pending gotos and the solved labels in this scope. @@ -694,40 +719,69 @@ void CIRGenModule::LexicalScopeGuard::cleanup() { } localScope->SolvedLabels.clear(); - // Do not insert the cleanup block unecessarily, this doesn't really need - // to be here (should be a separate pass), but it helps keeping small - // testcases minimal for now. - auto &builder = CGM.builder; - if (!builder.getInsertionBlock()->isEntryBlock()) { - // If the current block doesn't have a terminator, add a branch to the - // cleanup block, where the actual cir.return/yield happens (cleanup block). - if (!builder.getBlock()->back().hasTrait()) - builder.create(builder.getBlock()->back().getLoc(), - localScope->CleanupBlock); - - // Set the insertion point to the end of the cleanup block and insert - // the return instruction. - builder.setInsertionPointToEnd(localScope->CleanupBlock); - } else { - assert(localScope->CleanupBlock->empty() && "not empty"); - assert( - (builder.getBlock()->empty() || - !builder.getBlock()->back().hasTrait()) && - "entry basic block already has a terminator?"); - // Do not even emit cleanup blocks. - localScope->CleanupBlock->erase(); + // Cleanup are done right before codegen resume a scope. This is where + // objects are destroyed. + if (localScope->RetBlock) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToEnd(localScope->RetBlock); + + // TODO: insert actual scope cleanup HERE (dtors and etc) + + // If there's anything to return, load it first. + if (CGM.CurCGF->FnRetTy.has_value()) { + auto val = builder.create( + *localScope->RetLoc, *CGM.CurCGF->FnRetTy, *CGM.CurCGF->FnRetAlloca); + builder.create(*localScope->RetLoc, ArrayRef(val.getResult())); + } else { + builder.create(*localScope->RetLoc); + } } - auto *CurFn = CGM.CurCGF; - if (localScope->Depth == 0) { // end of function - // FIXME: this currently assumes only one cir.return in the function. - builder.create(CurFn->RetLoc ? *(CurFn->RetLoc) - : localScope->EndLoc, - CurFn->RetValue ? ArrayRef(CurFn->RetValue) - : ArrayRef()); - } else { // end of other local scope - builder.create(localScope->EndLoc); + auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToEnd(InsPt); + // TODO: insert actual scope cleanup (dtors and etc) + if (localScope->Depth != 0) // end of any local scope != function + builder.create(localScope->EndLoc); + else + builder.create(localScope->EndLoc); + }; + + // If a cleanup block has been created at some point, branch to it + // and set the insertion point to continue at the cleanup block. + // Terminators are then inserted either in the cleanup block or + // inline in this current block. + auto *cleanupBlock = localScope->getCleanupBlock(builder); + if (cleanupBlock) + insertCleanupAndLeave(cleanupBlock); + + // Now deal with any pending block wrap up like implicit end of + // scope. + + // If a terminator is already present in the current block, nothing + // else to do here. + bool entryBlock = builder.getInsertionBlock()->isEntryBlock(); + auto *currBlock = builder.getBlock(); + bool hasTerminator = + !currBlock->empty() && + currBlock->back().hasTrait(); + if (hasTerminator) + return; + + // An empty non-entry block has nothing to offer. + if (!entryBlock && currBlock->empty()) { + currBlock->erase(); + return; } + + // If there's a cleanup block, branch to it, nothing else to do. + if (cleanupBlock) { + builder.create(currBlock->back().getLoc(), cleanupBlock); + return; + } + + // No pre-existent cleanup block, emit cleanup code and yield/return. + insertCleanupAndLeave(currBlock); } mlir::LogicalResult CIRGenModule::buildGotoStmt(const GotoStmt &S) { @@ -747,7 +801,7 @@ mlir::LogicalResult CIRGenModule::buildGotoStmt(const GotoStmt &S) { return mlir::failure(); // Insert the new block to continue codegen after goto. - builder.createBlock(currLexScope->CleanupBlock); + builder.createBlock(builder.getBlock()->getParent()); // What here... return mlir::success(); @@ -757,23 +811,23 @@ mlir::LogicalResult CIRGenModule::buildLabel(const LabelDecl *D) { JumpDest &Dest = LabelMap[D]; // Create a new block to tag with a label and add a branch from - // the current one to it. + // the current one to it. If the block is empty just call attach it + // to this label. mlir::Block *currBlock = builder.getBlock(); + mlir::Block *labelBlock = currBlock; if (!currBlock->empty()) { - mlir::Operation *lastOp = nullptr; - if (!currBlock->back().hasTrait()) - lastOp = builder.create(getLoc(D->getSourceRange()), - currLexScope->CleanupBlock); - - currBlock = builder.createBlock(currLexScope->CleanupBlock); - if (lastOp) { - auto g = cast(lastOp); - g.setSuccessor(currBlock); + + { + mlir::OpBuilder::InsertionGuard guard(builder); + labelBlock = builder.createBlock(builder.getBlock()->getParent()); } + + builder.create(getLoc(D->getSourceRange()), labelBlock); + builder.setInsertionPointToEnd(labelBlock); } if (!Dest.isValid()) { - Dest.Block = currBlock; + Dest.Block = labelBlock; currLexScope->SolvedLabels.insert(D); // FIXME: add a label attribute to block... } else { @@ -1203,10 +1257,17 @@ mlir::LogicalResult CIRGenModule::buildIfOnBoolExpr(const Expr *cond, loc, condV, elseS, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto locBegin = fusedLoc.getLocations()[0]; - auto locEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{builder, locBegin, locEnd}; + // FIXME: abstract all this massive location handling elsewhere. + SmallVector locs; + if (loc.isa()) { + locs.push_back(loc); + locs.push_back(loc); + } else if (loc.isa()) { + auto fusedLoc = loc.cast(); + locs.push_back(fusedLoc.getLocations()[0]); + locs.push_back(fusedLoc.getLocations()[1]); + } + LexicalScopeContext lexScope{locs[0], locs[1]}; LexicalScopeGuard lexThenGuard{*this, &lexScope}; resThen = buildStmt(thenS, /*useCurrentScope=*/true); }, @@ -1215,7 +1276,7 @@ mlir::LogicalResult CIRGenModule::buildIfOnBoolExpr(const Expr *cond, auto fusedLoc = loc.cast(); auto locBegin = fusedLoc.getLocations()[2]; auto locEnd = fusedLoc.getLocations()[3]; - LexicalScopeContext lexScope{builder, locBegin, locEnd}; + LexicalScopeContext lexScope{locBegin, locEnd}; LexicalScopeGuard lexElseGuard{*this, &lexScope}; resElse = buildStmt(elseS, /*useCurrentScope=*/true); }); @@ -1224,6 +1285,26 @@ mlir::LogicalResult CIRGenModule::buildIfOnBoolExpr(const Expr *cond, resElse.succeeded()); } +static mlir::Location getIfLocs(CIRGenModule &CGM, const clang::Stmt *thenS, + const clang::Stmt *elseS) { + // Attempt to be more accurate as possible with IfOp location, generate + // one fused location that has either 2 or 4 total locations, depending + // on else's availability. + SmallVector ifLocs; + mlir::Attribute metadata; + + clang::SourceRange t = thenS->getSourceRange(); + ifLocs.push_back(CGM.getLoc(t.getBegin())); + ifLocs.push_back(CGM.getLoc(t.getEnd())); + if (elseS) { + clang::SourceRange e = elseS->getSourceRange(); + ifLocs.push_back(CGM.getLoc(e.getBegin())); + ifLocs.push_back(CGM.getLoc(e.getEnd())); + } + + return mlir::FusedLoc::get(ifLocs, metadata, CGM.getBuilder().getContext()); +} + mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { // The else branch of a consteval if statement is always the only branch // that can be runtime evaluated. @@ -1250,19 +1331,7 @@ mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { } // TODO: PGO and likelihood. - // Attempt to be more accurate as possible with IfOp location, generate - // one fused location that has either 2 or 4 total locations, depending - // on else's availability. - SmallVector ifLocs; - mlir::Attribute metadata; - ifLocs.push_back(getLoc(S.getThen()->getSourceRange().getBegin())); - ifLocs.push_back(getLoc(S.getThen()->getSourceRange().getEnd())); - if (S.getElse()) { - ifLocs.push_back(getLoc(S.getElse()->getSourceRange().getBegin())); - ifLocs.push_back(getLoc(S.getElse()->getSourceRange().getEnd())); - } - - auto ifLoc = mlir::FusedLoc::get(ifLocs, metadata, builder.getContext()); + auto ifLoc = getIfLocs(*this, S.getThen(), S.getElse()); return buildIfOnBoolExpr(S.getCond(), ifLoc, S.getThen(), S.getElse()); }; @@ -1276,7 +1345,7 @@ mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { auto fusedLoc = loc.cast(); auto scopeLocBegin = fusedLoc.getLocations()[0]; auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{builder, scopeLocBegin, scopeLocEnd}; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd}; LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; res = ifStmtBuilder(); }); @@ -1489,7 +1558,7 @@ mlir::LogicalResult CIRGenModule::buildCompoundStmt(const CompoundStmt &S) { auto fusedLoc = loc.cast(); auto locBegin = fusedLoc.getLocations()[0]; auto locEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{builder, locBegin, locEnd}; + LexicalScopeContext lexScope{locBegin, locEnd}; LexicalScopeGuard lexScopeGuard{*this, &lexScope}; res = compoundStmtBuilder(); }); @@ -1587,10 +1656,13 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { argTypes.push_back(getCIRType(Param->getType())); CurCGF->FnRetQualTy = FD->getReturnType(); - auto funcType = - builder.getFunctionType(argTypes, CurCGF->FnRetQualTy->isVoidType() - ? mlir::TypeRange() - : getCIRType(CurCGF->FnRetQualTy)); + mlir::TypeRange FnTyRange = {}; + if (!CurCGF->FnRetQualTy->isVoidType()) { + CurCGF->FnRetTy = getCIRType(CurCGF->FnRetQualTy); + FnTyRange = mlir::TypeRange{*CurCGF->FnRetTy}; + } + + auto funcType = builder.getFunctionType(argTypes, FnTyRange); mlir::FuncOp function = mlir::FuncOp::create(fnLoc, FD->getName(), funcType); if (!function) return nullptr; @@ -1608,7 +1680,7 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { // Initialize lexical scope information. { - LexicalScopeContext lexScope{builder, FnBeginLoc, FnEndLoc}; + LexicalScopeContext lexScope{FnBeginLoc, FnEndLoc}; LexicalScopeGuard scopeGuard{*this, &lexScope}; // Declare all the function arguments in the symbol table. @@ -1631,6 +1703,12 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { } assert(builder.getInsertionBlock() && "Should be valid"); + // When the current function is not void, create an address to store the + // result value. + if (CurCGF->FnRetTy.has_value()) + buildAndUpdateRetAlloca(CurCGF->FnRetQualTy, FnEndLoc, + getNaturalTypeAlignment(CurCGF->FnRetQualTy)); + // Emit the body of the function. if (mlir::failed(buildFunctionBody(FD->getBody()))) { function.erase(); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 5c58aeb83c2e..952ef3321ee5 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -121,23 +121,41 @@ class CIRGenModule { // Represents a cir.scope, cir.if, and then/else regions. I.e. lexical // scopes that require cleanups. struct LexicalScopeContext { + private: + // Block containing cleanup code for things initialized in this + // lexical context (scope). + mlir::Block *CleanupBlock = nullptr; + + public: unsigned Depth = 0; - LexicalScopeContext(mlir::OpBuilder &builder, mlir::Location b, - mlir::Location e) - : BeginLoc(b), EndLoc(e) { + bool HasReturn = false; + LexicalScopeContext(mlir::Location b, mlir::Location e) + : BeginLoc(b), EndLoc(e) {} + ~LexicalScopeContext() = default; + + // --- + // Goto handling + // --- + + // Lazy create cleanup block or return what's available. + mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) { + if (CleanupBlock) + return getCleanupBlock(builder); + return createCleanupBlock(builder); + } + + mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) { + return CleanupBlock; + } + mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) { { - // Create the cleanup block but dont hook it up around just - // yet. + // Create the cleanup block but dont hook it up around just yet. mlir::OpBuilder::InsertionGuard guard(builder); CleanupBlock = builder.createBlock(builder.getBlock()->getParent()); } assert(builder.getInsertionBlock() && "Should be valid"); + return CleanupBlock; } - ~LexicalScopeContext() = default; - - // Block containing cleanup code for things initialized in this - // lexical context (scope). - mlir::Block *CleanupBlock = nullptr; // Goto's introduced in this scope but didn't get fixed. llvm::SmallVector, 4> @@ -146,6 +164,30 @@ class CIRGenModule { // Labels solved inside this scope. llvm::SmallPtrSet SolvedLabels; + // --- + // Return handling + // --- + + // Return block info for this scope. + mlir::Block *RetBlock = nullptr; + std::optional RetLoc; + + // There's usually only one ret block per scope, but this needs to be + // get or create because of potential unreachable return statements, note + // that for those, all source location maps to the first one found. + mlir::Block *getOrCreateRetBlock(CIRGenModule &CGM, mlir::Location loc) { + if (RetBlock) + return RetBlock; + RetLoc = loc; + { + // Create the cleanup block but dont hook it up around just yet. + mlir::OpBuilder::InsertionGuard guard(CGM.builder); + RetBlock = CGM.builder.createBlock(CGM.builder.getBlock()->getParent()); + } + assert(CGM.builder.getInsertionBlock() && "Should be valid"); + return RetBlock; + } + mlir::Location BeginLoc, EndLoc; }; @@ -209,6 +251,8 @@ class CIRGenModule { mlir::LogicalResult declare(const clang::Decl *var, clang::QualType T, mlir::Location loc, clang::CharUnits alignment, mlir::Value &addr, bool IsParam = false); + void buildAndUpdateRetAlloca(clang::QualType T, mlir::Location loc, + clang::CharUnits alignment); public: mlir::ModuleOp getModule() { return theModule; } @@ -369,8 +413,8 @@ class CIRGenModule { /// with codegen. /// TODO: Add TBAAAccessInfo clang::CharUnits getNaturalTypeAlignment(clang::QualType T, - LValueBaseInfo *BaseInfo, - bool forPointeeType); + LValueBaseInfo *BaseInfo = nullptr, + bool forPointeeType = false); /// Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index bb8ddc022c45..ad113e1fad4d 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -130,7 +130,7 @@ class CIRGenConsumer : public clang::ASTConsumer { switch (action) { case CIRGenAction::OutputType::EmitCIR: if (outputStream && mlirMod) { - // runCIRToCIRPasses(mlirMod, mlirCtx.get()); + runCIRToCIRPasses(mlirMod, mlirCtx.get()); mlir::OpPrintingFlags flags; // FIXME: we cannot roundtrip prettyForm=true right now. flags.enableDebugInfo(/*prettyForm=*/false); diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index bd5a4b942139..8a8d2379f6f3 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s // XFAIL: * @@ -11,18 +11,23 @@ int foo(int i) { // CHECK: module { // CHECK-NEXT: func @foo(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", paraminit] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.return %2 : i32 -// CHECK-NEXT: } +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", paraminit] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: cir.store %3, %1 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: cir.return %4 : i32 int f2() { return 3; } // CHECK: func @f2() -> i32 { -// CHECK-NEXT: %0 = cir.cst(3 : i32) : i32 -// CHECK-NEXT: cir.return %0 : i32 +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: cir.return %2 : i32 int f3() { int i = 3; @@ -30,6 +35,11 @@ int f3() { } // CHECK: func @f3() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 -// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: cir.store %2, %0 : i32, cir.ptr +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: cir.store %3, %1 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: cir.return %4 : i32 diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 684796ac249a..9aa2485951e7 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -8,8 +8,8 @@ int *p0() { } // CHECK: func @p0() -> !cir.ptr { -// CHECK: %1 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > +// CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: cir.store %2, %0 : !cir.ptr, cir.ptr > int *p1() { int *p; @@ -19,8 +19,8 @@ int *p1() { // CHECK: func @p1() -> !cir.ptr { // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["p", uninitialized] -// CHECK: %1 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > +// CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: cir.store %2, %0 : !cir.ptr, cir.ptr > int *p2() { int *p = nullptr; @@ -34,24 +34,26 @@ int *p2() { } // CHECK: func @p2() -> !cir.ptr { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr -// CHECK-NEXT: cir.store %1, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: cir.scope { -// CHECK-NEXT: %5 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} -// CHECK-NEXT: %6 = cir.cst(0 : i32) : i32 -// CHECK-NEXT: cir.store %6, %5 : i32, cir.ptr -// CHECK-NEXT: cir.store %5, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %7 = cir.cst(42 : i32) : i32 -// CHECK-NEXT: %8 = cir.load deref %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.store %7, %8 : i32, cir.ptr -// CHECK-NEXT: } loc(#[[loc15:loc[0-9]+]]) -// CHECK-NEXT: %2 = cir.cst(42 : i32) : i32 -// CHECK-NEXT: %3 = cir.load deref %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.store %2, %3 : i32, cir.ptr -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.return %4 : !cir.ptr -// CHECK-NEXT: } +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] {alignment = 8 : i64} loc(#loc15) +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} loc(#loc16) +// CHECK-NEXT: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr loc(#loc17) +// CHECK-NEXT: cir.store %2, %0 : !cir.ptr, cir.ptr > loc(#loc15) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %7 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} loc(#loc19) +// CHECK-NEXT: %8 = cir.cst(0 : i32) : i32 loc(#loc20) +// CHECK-NEXT: cir.store %8, %7 : i32, cir.ptr loc(#loc19) +// CHECK-NEXT: cir.store %7, %0 : !cir.ptr, cir.ptr > loc(#loc21) +// CHECK-NEXT: %9 = cir.cst(42 : i32) : i32 loc(#loc22) +// CHECK-NEXT: %10 = cir.load deref %0 : cir.ptr >, !cir.ptr loc(#loc23) +// CHECK-NEXT: cir.store %9, %10 : i32, cir.ptr loc(#loc24) +// CHECK-NEXT: } loc(#[[locScope:loc[0-9]+]]) +// CHECK-NEXT: %3 = cir.cst(42 : i32) : i32 loc(#loc25) +// CHECK-NEXT: %4 = cir.load deref %0 : cir.ptr >, !cir.ptr loc(#loc26) +// CHECK-NEXT: cir.store %3, %4 : i32, cir.ptr loc(#loc27) +// CHECK-NEXT: %5 = cir.load %0 : cir.ptr >, !cir.ptr loc(#loc28) +// CHECK-NEXT: cir.store %5, %1 : !cir.ptr, cir.ptr > loc(#loc29) +// CHECK-NEXT: %6 = cir.load %1 : cir.ptr >, !cir.ptr loc(#loc29) +// CHECK-NEXT: cir.return %6 : !cir.ptr loc(#loc29) void b0() { bool x = true, y = false; } @@ -66,30 +68,29 @@ void b1(int a) { bool b = a; } // CHECK: %3 = cir.cast(int_to_bool, %2 : i32), !cir.bool // CHECK: cir.store %3, %1 : !cir.bool, cir.ptr -int if0(int a) { +void if0(int a) { int x = 0; if (a) { x = 3; } else { x = 4; } - return x; } -// CHECK: func @if0(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK: func @if0(%arg0: i32 loc({{.*}})) // CHECK: cir.scope { -// CHECK: %4 = cir.load %0 : cir.ptr , i32 -// CHECK: %5 = cir.cast(int_to_bool, %4 : i32), !cir.bool -// CHECK-NEXT: cir.if %5 { -// CHECK-NEXT: %6 = cir.cst(3 : i32) : i32 -// CHECK-NEXT: cir.store %6, %1 : i32, cir.ptr +// CHECK: %3 = cir.load %0 : cir.ptr , i32 +// CHECK: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool +// CHECK-NEXT: cir.if %4 { +// CHECK-NEXT: %5 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: } else { -// CHECK-NEXT: %6 = cir.cst(4 : i32) : i32 -// CHECK-NEXT: cir.store %6, %1 : i32, cir.ptr +// CHECK-NEXT: %5 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: } // CHECK: } -int if1(int a, bool b, bool c) { +void if1(int a, bool b, bool c) { int x = 0; if (a) { x = 3; @@ -102,34 +103,33 @@ int if1(int a, bool b, bool c) { } x = 4; } - return x; } -// CHECK: func @if1(%arg0: i32 loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) -> i32 { +// CHECK: func @if1(%arg0: i32 loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) // CHECK: cir.scope { -// CHECK: %6 = cir.load %0 : cir.ptr , i32 -// CHECK: %7 = cir.cast(int_to_bool, %6 : i32), !cir.bool -// CHECK: cir.if %7 { -// CHECK: %8 = cir.cst(3 : i32) : i32 -// CHECK: cir.store %8, %3 : i32, cir.ptr +// CHECK: %5 = cir.load %0 : cir.ptr , i32 +// CHECK: %6 = cir.cast(int_to_bool, %5 : i32), !cir.bool +// CHECK: cir.if %6 { +// CHECK: %7 = cir.cst(3 : i32) : i32 +// CHECK: cir.store %7, %3 : i32, cir.ptr // CHECK: cir.scope { -// CHECK: %9 = cir.load %1 : cir.ptr , !cir.bool -// CHECK-NEXT: cir.if %9 { -// CHECK-NEXT: %10 = cir.cst(8 : i32) : i32 -// CHECK-NEXT: cir.store %10, %3 : i32, cir.ptr +// CHECK: %8 = cir.load %1 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.if %8 { +// CHECK-NEXT: %9 = cir.cst(8 : i32) : i32 +// CHECK-NEXT: cir.store %9, %3 : i32, cir.ptr // CHECK-NEXT: } // CHECK: } // CHECK: } else { // CHECK: cir.scope { -// CHECK: %9 = cir.load %2 : cir.ptr , !cir.bool -// CHECK-NEXT: cir.if %9 { -// CHECK-NEXT: %10 = cir.cst(14 : i32) : i32 -// CHECK-NEXT: cir.store %10, %3 : i32, cir.ptr +// CHECK: %8 = cir.load %2 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.if %8 { +// CHECK-NEXT: %9 = cir.cst(14 : i32) : i32 +// CHECK-NEXT: cir.store %9, %3 : i32, cir.ptr // CHECK-NEXT: } // CHECK: } -// CHECK: %8 = cir.cst(4 : i32) : i32 -// CHECK: cir.store %8, %3 : i32, cir.ptr +// CHECK: %7 = cir.cst(4 : i32) : i32 +// CHECK: cir.store %7, %3 : i32, cir.ptr // CHECK: } // CHECK: } -// CHECK: #loc15 = loc(fused["{{.*}}basic.cpp":26:3, "{{.*}}basic.cpp":30:3]) +// CHECK: #[[locScope]] = loc(fused["{{.*}}basic.cpp":26:3, "{{.*}}basic.cpp":30:3]) diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 42f4effc7bca..202486354314 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -10,23 +10,21 @@ void g0(int a) { } // CHECK: func @g0 -// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} -// CHECK: cir.store %arg0, %0 : i32, cir.ptr -// CHECK: %2 = cir.load %0 : cir.ptr , i32 -// CHECK: cir.store %2, %1 : i32, cir.ptr -// CHECK: cir.br ^bb2 -// CHECK: ^bb1: // no predecessors -// CHECK: %3 = cir.load %1 : cir.ptr , i32 -// CHECK: %4 = cir.cst(1 : i32) : i32 -// CHECK: %5 = cir.binop(add, %3, %4) : i32 -// CHECK: cir.store %5, %1 : i32, cir.ptr -// CHECK: cir.br ^bb2 -// CHECK: ^bb2: // 2 preds: ^bb0, ^bb1 -// CHECK: %6 = cir.load %1 : cir.ptr , i32 -// CHECK: %7 = cir.cst(2 : i32) : i32 -// CHECK: %8 = cir.binop(add, %6, %7) : i32 -// CHECK: cir.store %8, %1 : i32, cir.ptr -// CHECK: cir.br ^bb3 -// CHECK: ^bb3: // pred: ^bb2 -// CHECK: cir.return \ No newline at end of file +// CHECK-NEXT %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} +// CHECK-NEXT %1 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} +// CHECK-NEXT cir.store %arg0, %0 : i32, cir.ptr +// CHECK-NEXT %2 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT cir.store %2, %1 : i32, cir.ptr +// CHECK-NEXT cir.br ^bb2 +// CHECK-NEXT ^bb1: // no predecessors +// CHECK-NEXT %3 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT %5 = cir.binop(add, %3, %4) : i32 +// CHECK-NEXT cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT cir.br ^bb2 +// CHECK-NEXT ^bb2: // 2 preds: ^bb0, ^bb1 +// CHECK-NEXT %6 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT %7 = cir.cst(2 : i32) : i32 +// CHECK-NEXT %8 = cir.binop(add, %6, %7) : i32 +// CHECK-NEXT cir.store %8, %1 : i32, cir.ptr +// CHECK-NEXT cir.return \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index be4dd0650c68..6a30611ae324 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -11,50 +11,54 @@ int s0(int a, int b) { return x; } -// CHECK: #[[loc2:loc[0-9]+]] = loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]) -// CHECK: #[[loc3:loc[0-9]+]] = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) +// CHECK: #loc2 = loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]) +// CHECK: #loc3 = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) // CHECK: module { // CHECK: func @s0(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { -// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} loc(#[[loc2]]) -// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] {alignment = 4 : i64} loc(#[[loc3]]) -// CHECK: %2 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} loc(#[[loc4:loc[0-9]+]]) -// CHECK: cir.store %arg0, %0 : i32, cir.ptr loc(#[[loc5:loc[0-9]+]]) -// CHECK: cir.store %arg1, %1 : i32, cir.ptr loc(#[[loc5]]) -// CHECK: %3 = cir.load %0 : cir.ptr , i32 loc(#[[loc6:loc[0-9]+]]) -// CHECK: %4 = cir.load %1 : cir.ptr , i32 loc(#[[loc7:loc[0-9]+]]) -// CHECK: %5 = cir.binop(add, %3, %4) : i32 loc(#[[loc8:loc[0-9]+]]) -// CHECK: cir.store %5, %2 : i32, cir.ptr loc(#[[loc4]]) +// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} loc(#loc2) +// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] {alignment = 4 : i64} loc(#loc3) +// CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} loc(#loc4) +// CHECK: %3 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} loc(#loc5) +// CHECK: cir.store %arg0, %0 : i32, cir.ptr loc(#loc6) +// CHECK: cir.store %arg1, %1 : i32, cir.ptr loc(#loc6) +// CHECK: %4 = cir.load %0 : cir.ptr , i32 loc(#loc7) +// CHECK: %5 = cir.load %1 : cir.ptr , i32 loc(#loc8) +// CHECK: %6 = cir.binop(add, %4, %5) : i32 loc(#loc9) +// CHECK: cir.store %6, %3 : i32, cir.ptr loc(#loc5) // CHECK: cir.scope { -// CHECK: %7 = cir.load %2 : cir.ptr , i32 loc(#[[loc10:loc[0-9]+]]) -// CHECK: %8 = cir.cst(0 : i32) : i32 loc(#[[loc11:loc[0-9]+]]) -// CHECK: %9 = cir.cmp(gt, %7, %8) : i32, !cir.bool loc(#[[loc12:loc[0-9]+]]) -// CHECK: cir.if %9 { -// CHECK: %10 = cir.cst(0 : i32) : i32 loc(#[[loc14:loc[0-9]+]]) -// CHECK: cir.store %10, %2 : i32, cir.ptr loc(#[[loc15:loc[0-9]+]]) +// CHECK: %9 = cir.load %3 : cir.ptr , i32 loc(#loc11) +// CHECK: %10 = cir.cst(0 : i32) : i32 loc(#loc12) +// CHECK: %11 = cir.cmp(gt, %9, %10) : i32, !cir.bool loc(#loc13) +// CHECK: cir.if %11 { +// CHECK: %12 = cir.cst(0 : i32) : i32 loc(#loc15) +// CHECK: cir.store %12, %3 : i32, cir.ptr loc(#loc16) // CHECK: } else { -// CHECK: %10 = cir.cst(1 : i32) : i32 loc(#[[loc16:loc[0-9]+]]) -// CHECK: cir.store %10, %2 : i32, cir.ptr loc(#[[loc17:loc[0-9]+]]) -// CHECK: } loc(#[[loc13:loc[0-9]+]]) -// CHECK: } loc(#[[loc9:loc[0-9]+]]) -// CHECK: %6 = cir.load %2 : cir.ptr , i32 loc(#[[loc18:loc[0-9]+]]) -// CHECK: cir.return %6 : i32 loc(#[[loc19:loc[0-9]+]]) -// CHECK: } loc(#[[loc1:loc[0-9]+]]) -// CHECK: } loc(#[[loc0:loc[0-9]+]]) -// CHECK: #[[loc0]] = loc(unknown) -// CHECK: #[[loc1]] = loc(fused["{{.*}}sourcelocation.cpp":4:1, "{{.*}}sourcelocation.cpp":11:1]) -// CHECK: #[[loc4]] = loc(fused["{{.*}}sourcelocation.cpp":5:3, "{{.*}}sourcelocation.cpp":5:15]) -// CHECK: #[[loc5]] = loc("{{.*}}sourcelocation.cpp":4:22) -// CHECK: #[[loc6]] = loc("{{.*}}sourcelocation.cpp":5:11) -// CHECK: #[[loc7]] = loc("{{.*}}sourcelocation.cpp":5:15) -// CHECK: #[[loc8]] = loc(fused["{{.*}}sourcelocation.cpp":5:11, "{{.*}}sourcelocation.cpp":5:15]) -// CHECK: #[[loc9]] = loc(fused["{{.*}}sourcelocation.cpp":6:3, "{{.*}}sourcelocation.cpp":9:9]) -// CHECK: #[[loc10]] = loc("{{.*}}sourcelocation.cpp":6:7) -// CHECK: #[[loc11]] = loc("{{.*}}sourcelocation.cpp":6:11) -// CHECK: #[[loc12]] = loc(fused["{{.*}}sourcelocation.cpp":6:7, "{{.*}}sourcelocation.cpp":6:11]) -// CHECK: #[[loc13]] = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9, "{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) -// CHECK: #[[loc14]] = loc("{{.*}}sourcelocation.cpp":7:9) -// CHECK: #[[loc15]] = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9]) -// CHECK: #[[loc16]] = loc("{{.*}}sourcelocation.cpp":9:9) -// CHECK: #[[loc17]] = loc(fused["{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) -// CHECK: #[[loc18]] = loc("{{.*}}sourcelocation.cpp":10:10) -// CHECK: #[[loc19]] = loc(fused["{{.*}}sourcelocation.cpp":10:3, "{{.*}}sourcelocation.cpp":10:10]) +// CHECK: %12 = cir.cst(1 : i32) : i32 loc(#loc17) +// CHECK: cir.store %12, %3 : i32, cir.ptr loc(#loc18) +// CHECK: } loc(#loc14) +// CHECK: } loc(#loc10) +// CHECK: %7 = cir.load %3 : cir.ptr , i32 loc(#loc19) +// CHECK: cir.store %7, %2 : i32, cir.ptr loc(#loc20) +// CHECK: %8 = cir.load %2 : cir.ptr , i32 loc(#loc20) +// CHECK: cir.return %8 : i32 loc(#loc20) +// CHECK: } loc(#loc1) +// CHECK: } loc(#loc0) +// CHECK: #loc0 = loc(unknown) +// CHECK: #loc1 = loc(fused["{{.*}}sourcelocation.cpp":4:1, "{{.*}}sourcelocation.cpp":11:1]) +// CHECK: #loc4 = loc("{{.*}}sourcelocation.cpp":11:1) +// CHECK: #loc5 = loc(fused["{{.*}}sourcelocation.cpp":5:3, "{{.*}}sourcelocation.cpp":5:15]) +// CHECK: #loc6 = loc("{{.*}}sourcelocation.cpp":4:22) +// CHECK: #loc7 = loc("{{.*}}sourcelocation.cpp":5:11) +// CHECK: #loc8 = loc("{{.*}}sourcelocation.cpp":5:15) +// CHECK: #loc9 = loc(fused["{{.*}}sourcelocation.cpp":5:11, "{{.*}}sourcelocation.cpp":5:15]) +// CHECK: #loc10 = loc(fused["{{.*}}sourcelocation.cpp":6:3, "{{.*}}sourcelocation.cpp":9:9]) +// CHECK: #loc11 = loc("{{.*}}sourcelocation.cpp":6:7) +// CHECK: #loc12 = loc("{{.*}}sourcelocation.cpp":6:11) +// CHECK: #loc13 = loc(fused["{{.*}}sourcelocation.cpp":6:7, "{{.*}}sourcelocation.cpp":6:11]) +// CHECK: #loc14 = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9, "{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) +// CHECK: #loc15 = loc("{{.*}}sourcelocation.cpp":7:9) +// CHECK: #loc16 = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9]) +// CHECK: #loc17 = loc("{{.*}}sourcelocation.cpp":9:9) +// CHECK: #loc18 = loc(fused["{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) +// CHECK: #loc19 = loc("{{.*}}sourcelocation.cpp":10:10) +// CHECK: #loc20 = loc(fused["{{.*}}sourcelocation.cpp":10:3, "{{.*}}sourcelocation.cpp":10:10]) \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 42ebc47700bd..6a1c5bcebc35 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -31,7 +31,7 @@ func.func @if0() { func.func @yield0() { %0 = cir.cst(true) : !cir.bool - cir.if %0 { // expected-error {{custom op 'cir.if' expected at least one block with cir.yield}} + cir.if %0 { // expected-error {{custom op 'cir.if' if.then expected at least one block with cir.yield or cir.return}} cir.br ^a ^a: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index e9523161ec56..53f781119dde 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -252,7 +252,8 @@ def StoreOp : CIR_Op<"store", [ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : CIR_Op<"return", [Pure, HasParent<"FuncOp">, Terminator]> { +def ReturnOp : CIR_Op<"return", [HasParent<"FuncOp, ScopeOp, IfOp">, + Terminator]> { let summary = "return operation"; let description = [{ The "return" operation represents a return operation within a function. @@ -343,20 +344,13 @@ def IfOp : CIR_Op<"if", CArg<"function_ref", "nullptr">:$elseBuilder)> ]; - - let extraClassDeclaration = [{ - Block* thenBlock(); - Block* elseBlock(); - }]; - - // TODO: let hasCanonicalizer = 1; } //===----------------------------------------------------------------------===// // YieldOp //===----------------------------------------------------------------------===// -def YieldOp : CIR_Op<"yield", [Pure, ReturnLike, Terminator, +def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp"]>]> { let summary = "termination operation for regions inside if, for, scope, etc"; let description = [{ @@ -410,10 +404,6 @@ def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods":$scopeBuilder)> ]; - - let extraClassDeclaration = [{ - Block* scopeBlock(); - }]; } //===----------------------------------------------------------------------===// @@ -541,9 +531,18 @@ def BrOp : CIR_Op<"br", ``` }]; + let builders = [ + OpBuilder<(ins "Block *":$dest, + CArg<"ValueRange", "{}">:$destOperands), [{ + $_state.addSuccessors(dest); + $_state.addOperands(destOperands); + }]> + ]; + + let arguments = (ins Variadic:$destOperands); let successors = (successor AnySuccessor:$dest); let assemblyFormat = [{ - $dest attr-dict + $dest (`(` $destOperands^ `:` type($destOperands) `)`)? attr-dict }]; } diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index eeac8afac01c..fff51a42f5e1 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -113,42 +113,87 @@ OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } // ReturnOp //===----------------------------------------------------------------------===// -mlir::LogicalResult ReturnOp::verify() { - // We know that the parent operation is a function, because of the 'HasParent' - // trait attached to the operation definition. - auto function = cast(getOperation()->getParentOp()); - - /// ReturnOps can only have a single optional operand. - if (getNumOperands() > 1) - return emitOpError() << "expects at most 1 return operand"; +static mlir::LogicalResult checkReturnAndFunction(ReturnOp op, + FuncOp function) { + // ReturnOps currently only have a single optional operand. + if (op.getNumOperands() > 1) + return op.emitOpError() << "expects at most 1 return operand"; // The operand number and types must match the function signature. const auto &results = function.getFunctionType().getResults(); - if (getNumOperands() != results.size()) - return emitOpError() << "does not return the same number of values (" - << getNumOperands() << ") as the enclosing function (" - << results.size() << ")"; + if (op.getNumOperands() != results.size()) + return op.emitOpError() + << "does not return the same number of values (" + << op.getNumOperands() << ") as the enclosing function (" + << results.size() << ")"; // If the operation does not have an input, we are done. - if (!hasOperand()) + if (!op.hasOperand()) return mlir::success(); - auto inputType = *operand_type_begin(); + auto inputType = *op.operand_type_begin(); auto resultType = results.front(); // Check that the result type of the function matches the operand type. if (inputType == resultType) return mlir::success(); - return emitError() << "type of return operand (" << inputType - << ") doesn't match function result type (" << resultType - << ")"; + return op.emitError() << "type of return operand (" << inputType + << ") doesn't match function result type (" + << resultType << ")"; +} + +mlir::LogicalResult ReturnOp::verify() { + // Returns can be present in multiple different scopes, get the + // wrapping function and start from there. + auto *fnOp = getOperation()->getParentOp(); + while (!isa(fnOp)) + fnOp = fnOp->getParentOp(); + + // Make sure return types match function return type. + if (checkReturnAndFunction(*this, cast(fnOp)).failed()) + return failure(); + + return success(); } //===----------------------------------------------------------------------===// // IfOp //===----------------------------------------------------------------------===// +static LogicalResult checkScopeTerminator(OpAsmParser &parser, + OperationState &result, Region *r) { + if (r->hasOneBlock()) { + ::mlir::impl::ensureRegionTerminator( + *r, parser.getBuilder(), result.location, + [](OpBuilder &builder, Location loc) { + OperationState state(loc, YieldOp::getOperationName()); + YieldOp::build(builder, state); + return Operation::create(state); + }); + return success(); + } + + // Empty regions don't need any handling. + auto &blocks = r->getBlocks(); + if (blocks.size() == 0) + return success(); + + // Test that at least one block has a yield/return terminator. We can + // probably make this a bit more strict. + for (Block &block : blocks) { + if (block.empty()) + continue; + auto &op = block.back(); + if (op.hasTrait() && + isa(op)) { + return success(); + } + } + + return failure(); +} + ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { // Create the regions for 'then'. result.regions.reserve(2); @@ -164,49 +209,27 @@ ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { parser.resolveOperand(cond, boolType, result.operands)) return failure(); - auto checkYieldTerminator = [&](Region *r) { - if (r->hasOneBlock()) { - ::mlir::impl::ensureRegionTerminator( - *r, parser.getBuilder(), result.location, - [](OpBuilder &builder, Location loc) { - OperationState state(loc, YieldOp::getOperationName()); - YieldOp::build(builder, state); - return Operation::create(state); - }); - return success(); - } - - // Soft verification: test that at least one block has a yield terminator. - bool foundYield = false; - for (Block &block : r->getBlocks()) { - if (block.empty()) - continue; - auto &op = block.back(); - if (op.hasTrait() && isa(op)) { - foundYield = true; - break; - } - } - if (!foundYield) { - parser.emitError(loc, "expected at least one block with cir.yield"); - return failure(); - } - return success(); - }; - // Parse the 'then' region. if (parser.parseRegion(*thenRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - if (checkYieldTerminator(thenRegion).failed()) + if (checkScopeTerminator(parser, result, thenRegion).failed()) { + parser.emitError( + loc, + "if.then expected at least one block with cir.yield or cir.return"); return failure(); + } - // If we find an 'else' keyword then parse the 'else' region. + // If we find an 'else' keyword, parse the 'else' region. if (!parser.parseOptionalKeyword("else")) { if (parser.parseRegion(*elseRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - if (checkYieldTerminator(elseRegion).failed()) + if (checkScopeTerminator(parser, result, elseRegion).failed()) { + parser.emitError( + loc, + "if.else expected at least one block with cir.yield or cir.return"); return failure(); + } } // Parse the optional attribute list. @@ -215,12 +238,23 @@ ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { return success(); } +bool shouldPrintTerm(mlir::Region &r) { + if (!r.hasOneBlock()) + return true; + auto *entryBlock = &r.front(); + if (entryBlock->empty()) + return false; + if (isa(entryBlock->back())) + return true; + return false; +} + void IfOp::print(OpAsmPrinter &p) { p << " " << getCondition() << " "; auto &thenRegion = this->getThenRegion(); p.printRegion(thenRegion, /*printEntryBlockArgs=*/false, - /*printBlockTerminators=*/!thenRegion.hasOneBlock()); + /*printBlockTerminators=*/shouldPrintTerm(thenRegion)); // Print the 'else' regions if it exists and has a block. auto &elseRegion = this->getElseRegion(); @@ -228,20 +262,12 @@ void IfOp::print(OpAsmPrinter &p) { p << " else "; p.printRegion(elseRegion, /*printEntryBlockArgs=*/false, - /*printBlockTerminators=*/!thenRegion.hasOneBlock()); + /*printBlockTerminators=*/shouldPrintTerm(elseRegion)); } p.printOptionalAttrDict(getOperation()->getAttrs()); } -Block *IfOp::thenBlock() { return &getThenRegion().back(); } -Block *IfOp::elseBlock() { - Region &r = getElseRegion(); - if (r.empty()) - return nullptr; - return &r.back(); -} - /// Default callback for IfOp builders. Inserts nothing for now. void mlir::cir::buildTerminatedBody(OpBuilder &builder, Location loc) {} @@ -319,38 +345,11 @@ ParseResult ScopeOp::parse(OpAsmParser &parser, OperationState &result) { if (parser.parseRegion(*scopeRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - auto checkYieldTerminator = [&](Region *r) { - if (r->hasOneBlock()) { - ::mlir::impl::ensureRegionTerminator( - *r, parser.getBuilder(), result.location, - [](OpBuilder &builder, Location loc) { - OperationState state(loc, YieldOp::getOperationName()); - YieldOp::build(builder, state); - return Operation::create(state); - }); - return success(); - } - - // Soft verification: test that at least one block has a yield terminator. - bool foundYield = false; - for (Block &block : r->getBlocks()) { - if (block.empty()) - continue; - auto &op = block.back(); - if (op.hasTrait() && isa(op)) { - foundYield = true; - break; - } - } - if (!foundYield) { - parser.emitError(loc, "expected at least one block with cir.yield"); - return failure(); - } - return success(); - }; - - if (checkYieldTerminator(scopeRegion).failed()) + if (checkScopeTerminator(parser, result, scopeRegion).failed()) { + parser.emitError( + loc, "expected at least one block with cir.yield or cir.return"); return failure(); + } // Parse the optional attribute list. if (parser.parseOptionalAttrDict(result.attributes)) @@ -363,13 +362,11 @@ void ScopeOp::print(OpAsmPrinter &p) { auto &scopeRegion = this->getScopeRegion(); p.printRegion(scopeRegion, /*printEntryBlockArgs=*/false, - /*printBlockTerminators=*/!scopeRegion.hasOneBlock()); + /*printBlockTerminators=*/shouldPrintTerm(scopeRegion)); p.printOptionalAttrDict(getOperation()->getAttrs()); } -Block *ScopeOp::scopeBlock() { return &getScopeRegion().back(); } - /// Given the region at `index`, or the parent operation if `index` is None, /// return the successor regions. These are the regions that may be selected /// during the flow of control. `operands` is a set of optional attributes that From 6030c97fe5123818f8f00c1f1e955cf2190132de Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 14 Mar 2022 15:12:34 -0700 Subject: [PATCH 0202/2301] [CIR][NFC] Move type comments around --- mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td index 3141d599c103..8d28419a65fb 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td @@ -61,14 +61,10 @@ def CIR_BoolType : } //===----------------------------------------------------------------------===// -// One type to bind them all -//===----------------------------------------------------------------------===// - -def CIR_AnyCIRType : AnyTypeOf<[CIR_PointerType, CIR_BoolType]>; - - -//===----------------------------------------------------------------------===// +// StructType +// // The base type for all RecordDecls. +// //===----------------------------------------------------------------------===// def CIR_StructType : CIR_Type<"Struct", "struct"> { @@ -96,4 +92,10 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { let hasCustomAssemblyFormat = 1; } -#endif // MLIR_CIR_DIALECT_CIR_TYPES +//===----------------------------------------------------------------------===// +// One type to bind them all +//===----------------------------------------------------------------------===// + +def CIR_AnyCIRType : AnyTypeOf<[CIR_PointerType, CIR_BoolType, CIR_StructType]>; + +#endif // MLIR_CIR_DIALECT_CIR_TYPES \ No newline at end of file From 547268caada8057dd46c37fd5afcab35a71e1395 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 14 Mar 2022 21:12:08 -0400 Subject: [PATCH 0203/2301] [CIR] Support args in CIRGenFunction::buildCallArgs This doens't work entirely yet as `arrangeFreeFunctionLikeCall` still needs ammended to support args. --- clang/lib/CIR/CIRGenCall.cpp | 32 ++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenCall.h | 2 +- clang/lib/CIR/CIRGenFunction.cpp | 25 +++++++++++++++++++++---- clang/lib/CIR/CIRGenFunction.h | 13 ++++++++++--- 4 files changed, 64 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index d1e1c9c6274b..ab2215cc7cb5 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -374,3 +374,35 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, assert(theCall.getNumResults() == 0 && "Returns NYI"); return RValue::get(nullptr); } + +void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, + QualType type) { + // TODO: Add the DisableDebugLocationUpdates helper + assert(!dyn_cast(E) && "NYI"); + + assert(type->isReferenceType() == E->isGLValue() && + "reference binding to unmaterialized r-value!"); + + assert(!E->isGLValue() && "NYI"); + + bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); + + // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. + // However, we still have to push an EH-only cleanup in case we unwind before + // we make it to the call. + assert(!type->isRecordType() && "Record type args NYI"); + + assert(!HasAggregateEvalKind && "aggregate args NYI"); + assert(!isa(E) && "Casted args NYI"); + + args.add(buildAnyExprToTemp(E), type); +} + +/// buildAnyExprToTemp - Similar to buildAnyExpr(), however, the result will +/// always be accessible even if no aggregate location is provided. +RValue CIRGenFunction::buildAnyExprToTemp(const Expr *E) { + AggValueSlot AggSlot = AggValueSlot::ignored(); + + assert(!hasAggregateEvaluationKind(E->getType()) && "aggregate args NYI"); + return buildAnyExpr(E, AggSlot); +} diff --git a/clang/lib/CIR/CIRGenCall.h b/clang/lib/CIR/CIRGenCall.h index 57257722e00e..1f76b3bf78be 100644 --- a/clang/lib/CIR/CIRGenCall.h +++ b/clang/lib/CIR/CIRGenCall.h @@ -150,9 +150,9 @@ struct CallArg { clang::QualType Ty; CallArg(RValue rv, clang::QualType ty) : RV(rv), HasLV(false), IsUsed(false), Ty(ty) { - (void)HasLV; (void)IsUsed; } + bool hasLValue() const { return HasLV; } }; class CallArgList : public llvm::SmallVector { diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index d1404f44dfc1..6c6bf6e88858 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -13,6 +13,7 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "clang/AST/ExprObjC.h" #include "clang/Basic/TargetInfo.h" #include "mlir/Dialect/Func/IR/FuncOps.h" @@ -144,7 +145,24 @@ void CIRGenFunction::buildCallArgs( // Evaluate each argument in the appropriate order. size_t CallArgsStart = Args.size(); - assert(ArgTypes.size() == 0 && "Args NYI"); + for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { + unsigned Idx = LeftToRight ? I : E - I - 1; + CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; + unsigned InitialArgSize = Args.size(); + assert(!isa(*Arg) && "NYI"); + assert(!isa(AC.getDecl()) && "NYI"); + + buildCallArg(Args, *Arg, ArgTypes[Idx]); + // In particular, we depend on it being the last arg in Args, and the + // objectsize bits depend on there only being one arg if !LeftToRight. + assert(InitialArgSize + 1 == Args.size() && + "The code below depends on only adding one arg per buildCallArg"); + (void)InitialArgSize; + // Since pointer argument are never emitted as LValue, it is safe to emit + // non-null argument check for r-value only. + assert(!SanOpts.has(SanitizerKind::NonnullAttribute) && "Sanitizers NYI"); + assert(!SanOpts.has(SanitizerKind::NullabilityArg) && "Sanitizers NYI"); + } if (!LeftToRight) { // Un-reverse the arguments we just evaluated so they match up with the CIR @@ -155,9 +173,8 @@ void CIRGenFunction::buildCallArgs( /// Emit code to compute the specified expression which /// can have any type. The result is returned as an RValue struct. -/// TODO: if this is an aggregate expression, add a AggValueSlot to indicate -/// where the result should be returned. -RValue CIRGenFunction::buildAnyExpr(const Expr *E) { +RValue CIRGenFunction::buildAnyExpr(const Expr *E, AggValueSlot aggSlot, + bool ignoreResult) { switch (CIRGenFunction::getEvaluationKind(E->getType())) { case TEK_Scalar: return RValue::get(CGM.buildScalarExpr(E)); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 4e212ce0da6a..44fc997f2adf 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -149,15 +149,22 @@ class CIRGenFunction { RValue buildCallExpr(const clang::CallExpr *E, ReturnValueSlot ReturnValue = ReturnValueSlot()); + void buildCallArg(CallArgList &args, const clang::Expr *E, + clang::QualType ArgType); + + /// buildAnyExprToTemp - Similarly to buildAnyExpr(), however, the result will + /// always be accessible even if no aggregate location is provided. + RValue buildAnyExprToTemp(const clang::Expr *E); + CIRGenCallee buildCallee(const clang::Expr *E); /// buildAnyExpr - Emit code to compute the specified expression which can /// have any type. The result is returned as an RValue struct. If this is an /// aggregate expression, the aggloc/agglocvolatile arguments indicate where /// the result should be returned. - /// TODO: if this is an aggregate expression, add a AggValueSlot to indicate - /// where the result should be returned. - RValue buildAnyExpr(const clang::Expr *E); + RValue buildAnyExpr(const clang::Expr *E, + AggValueSlot aggSlot = AggValueSlot::ignored(), + bool ignoreResult = false); }; } // namespace cir From ac6406a5267ca91c8c96fca76a1af458c15ad53d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 14 Mar 2022 22:09:49 -0400 Subject: [PATCH 0204/2301] [CIR] Support args in arrangeFreeFunctionLikeCall This was a trivial addition, not sure why I left it out in the first place? --- clang/lib/CIR/CIRGenTypes.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index e7d21eebd250..065109f1df59 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -532,7 +532,8 @@ arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, // FIXME: Kill copy. SmallVector argTypes; - assert(args.size() == 0 && "Args NYI"); + for (const auto &arg : args) + argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); return CGT.arrangeCIRFunctionInfo( GetReturnType(fnType->getReturnType()), /*instanceMethod=*/false, chainCall, argTypes, fnType->getExtInfo(), paramInfos, required); From 6c2595ed0f964a2772a934813ab7249b688c6c4b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 14 Mar 2022 22:42:23 -0400 Subject: [PATCH 0205/2301] [CIR] Support args in buildCall This only currently supports trivial things like prvalue integers being passed in. We can continue to expand it bit by bit as we go. --- clang/lib/CIR/CIRGenCall.cpp | 45 +++++++++++++++++++++++++++--- clang/lib/CIR/CIRGenCall.h | 7 +++++ clang/lib/CIR/CIRGenFunction.cpp | 4 +++ clang/lib/CIR/CIRGenFunction.h | 2 ++ clang/lib/CIR/CIRGenFunctionInfo.h | 12 ++++++++ clang/test/CIR/CodeGen/call.c | 11 +++++++- 6 files changed, 76 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index ab2215cc7cb5..610cb40f648d 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -278,9 +278,6 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // When passing arguments using temporary allocas, we need to add the // appropriate lifetime markers. This vector keeps track of all the lifetime // markers that need to be ended right after the call. - assert(CallArgs.size() == 0 && - "Args not yet supported. When they are we'll need to consider " - "supporting temporary allocas for passed args"); // Translate all of the arguments as necessary to match the CIR lowering. assert(CallInfo.arg_size() == CallArgs.size() && @@ -289,7 +286,47 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, CIRGenFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); I != E; ++I, ++info_it, ++ArgNo) { - assert(false && "Nothing to see here!"); + const ABIArgInfo &ArgInfo = info_it->info; + + // Insert a padding argument to ensure proper alignment. + assert(!CIRFunctionArgs.hasPaddingArg(ArgNo) && "Padding args NYI"); + + unsigned FirstCIRArg, NumCIRArgs; + std::tie(FirstCIRArg, NumCIRArgs) = CIRFunctionArgs.getCIRArgs(ArgNo); + + switch (ArgInfo.getKind()) { + case ABIArgInfo::Direct: { + if (!ArgInfo.getCoerceToType().isa() && + ArgInfo.getCoerceToType() == convertType(info_it->type) && + ArgInfo.getDirectOffset() == 0) { + assert(NumCIRArgs == 1); + mlir::Value V; + assert(!I->isAggregate() && "Aggregate NYI"); + V = I->getKnownRValue().getScalarVal(); + + assert(CallInfo.getExtParameterInfo(ArgNo).getABI() != + ParameterABI::SwiftErrorResult && + "swift NYI"); + + // We might have to widen integers, but we should never truncate. + assert(ArgInfo.getCoerceToType() == V.getType() && "widening NYI"); + + mlir::FunctionType CIRFuncTy = getTypes().GetFunctionType(CallInfo); + + // If the argument doesn't match, perform a bitcast to coerce it. This + // can happen due to trivial type mismatches. + if (FirstCIRArg < CIRFuncTy.getNumInputs() && + V.getType() != CIRFuncTy.getInput(FirstCIRArg)) + assert(false && "Shouldn't have to bitcast anything yet"); + + CIRCallArgs[FirstCIRArg] = V; + break; + } + assert(false && "this code path shouldn't be hit yet"); + } + default: + assert(false && "Only Direct support so far"); + } } const CIRGenCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); diff --git a/clang/lib/CIR/CIRGenCall.h b/clang/lib/CIR/CIRGenCall.h index 1f76b3bf78be..45bc056535a3 100644 --- a/clang/lib/CIR/CIRGenCall.h +++ b/clang/lib/CIR/CIRGenCall.h @@ -153,6 +153,13 @@ struct CallArg { (void)IsUsed; } bool hasLValue() const { return HasLV; } + + RValue getKnownRValue() const { + assert(!HasLV && !IsUsed); + return RV; + } + + bool isAggregate() const { return HasLV || RV.isAggregate(); } }; class CallArgList : public llvm::SmallVector { diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 6c6bf6e88858..5a5a02d0f88b 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -283,3 +283,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, return Call; } + +mlir::Type CIRGenFunction::convertType(QualType T) { + return CGM.getTypes().ConvertType(T); +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 44fc997f2adf..712563194b40 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -165,6 +165,8 @@ class CIRGenFunction { RValue buildAnyExpr(const clang::Expr *E, AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); + + mlir::Type convertType(clang::QualType T); }; } // namespace cir diff --git a/clang/lib/CIR/CIRGenFunctionInfo.h b/clang/lib/CIR/CIRGenFunctionInfo.h index be508a1ebfeb..275b77848313 100644 --- a/clang/lib/CIR/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CIRGenFunctionInfo.h @@ -145,6 +145,12 @@ class ABIArgInfo { return isDirect() || isExtend() || isCoerceAndExpand(); } + // Direct/Extend accessors + unsigned getDirectOffset() const { + assert((isDirect() || isExtend()) && "Not a direct or extend kind"); + return DirectAttr.Offset; + } + void setDirectOffset(unsigned Offset) { assert((isDirect() || isExtend()) && "Not a direct or extend kind"); DirectAttr.Offset = Offset; @@ -385,6 +391,12 @@ class CIRGenFunctionInfo final return {}; return llvm::ArrayRef(getExtParameterInfosBuffer(), NumArgs); } + ExtParameterInfo getExtParameterInfo(unsigned argIndex) const { + assert(argIndex <= NumArgs); + if (!HasExtParameterInfos) + return ExtParameterInfo(); + return getExtParameterInfos()[argIndex]; + } /// getCallingConvention - REturn the user specified calling convention, which /// has been translated into a CIR CC. diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index 501304c56208..c34d401f05de 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -1,18 +1,27 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * void a(void) {} +void b(int a) {} void c(void) { a(); + b(0); } // CHECK: module { // CHECK: func @a() { // CHECK: cir.return // CHECK: } +// CHECK: func @b(%arg0: i32 {{.*}} { +// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] +// CHECK: cir.store %arg0, %0 : i32, cir.ptr +// CHECK: cir.return +// CHECK: } // CHECK: func @c() { // CHECK: call @a() : () -> () +// CHECK: %0 = cir.cst(0 : i32) : i32 +// CHECK: call @b(%0) : (i32) -> () // CHECK: cir.return // CHECK: } - From 231886c92ac12678d0f4e9f7c42155c51b4860b2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 15 Mar 2022 20:58:43 -0400 Subject: [PATCH 0206/2301] [CIR] Expand call.c test to include multiple args --- clang/test/CIR/CodeGen/call.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index c34d401f05de..671a8ea17a31 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -3,11 +3,11 @@ // XFAIL: * void a(void) {} -void b(int a) {} +void b(int a, int b) {} void c(void) { a(); - b(0); + b(0, 1); } // CHECK: module { @@ -16,12 +16,15 @@ void c(void) { // CHECK: } // CHECK: func @b(%arg0: i32 {{.*}} { // CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] +// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] // CHECK: cir.store %arg0, %0 : i32, cir.ptr +// CHECK: cir.store %arg1, %1 : i32, cir.ptr // CHECK: cir.return // CHECK: } // CHECK: func @c() { // CHECK: call @a() : () -> () // CHECK: %0 = cir.cst(0 : i32) : i32 -// CHECK: call @b(%0) : (i32) -> () +// CHECK: %1 = cir.cst(1 : i32) : i32 +// CHECK: call @b(%0, %1) : (i32, i32) -> () // CHECK: cir.return // CHECK: } From 6c97d5042656a5359e8270e0f78fd087201a61f7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 15 Mar 2022 22:37:05 -0400 Subject: [PATCH 0207/2301] [CIR] Support Integer class types for X86_64 classifyReturnType --- clang/lib/CIR/TargetInfo.cpp | 42 ++++++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/TargetInfo.cpp b/clang/lib/CIR/TargetInfo.cpp index c53fe2831347..5bf5b2abec68 100644 --- a/clang/lib/CIR/TargetInfo.cpp +++ b/clang/lib/CIR/TargetInfo.cpp @@ -274,11 +274,45 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); - // mlir::Type ResType = nullptr; - assert(Lo == NoClass && "Only NoClass Supported so far"); - assert(Hi == NoClass && "Only NoClass Supported so far"); + mlir::Type ResType = nullptr; + assert(Lo == NoClass || + Lo == Integer && "Only NoClass and Integer supported so far"); + + switch (Lo) { + case NoClass: + assert(Hi == NoClass && "Only NoClass supported so far for Hi"); + return ABIArgInfo::getIgnore(); + + // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next available + // register of the sequence %rax, %rdx is used. + case Integer: + ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); + + // If we have a sign or zero extended integer, make sure to return Extend so + // that the parameter gets the right LLVM IR attributes. + // TODO: extend the above consideration to MLIR + if (Hi == NoClass && ResType.isa()) { + // Treat an enum type as its underlying type. + if (const auto *EnumTy = RetTy->getAs()) + RetTy = EnumTy->getDecl()->getIntegerType(); + + if (RetTy->isIntegralOrEnumerationType() && + isPromotableIntegerTypeForABI(RetTy)) { + assert(false && "extended types NYI"); + } + break; + } + llvm_unreachable("ResType as intenger is only case currently implemented."); + default: + llvm_unreachable("NYI"); + } - return ABIArgInfo::getIgnore(); + mlir::Type HighPart = nullptr; + + if (HighPart) + assert(false && "NYI"); + + return ABIArgInfo::getDirect(ResType); } const TargetCIRGenInfo &CIRGenModule::getTargetCIRGenInfo() { From a896b25ea19d0978d1b3251aeccd7111d1764bec Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 15 Mar 2022 22:38:55 -0400 Subject: [PATCH 0208/2301] [CIR] Support Direct arg types in CIRGenTypes::GetFunctionType --- clang/lib/CIR/CIRGenCall.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 610cb40f648d..4caff63496f1 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -175,6 +175,10 @@ mlir::FunctionType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { // TODO: where to get VoidTy? resultType = nullptr; break; + case ABIArgInfo::Direct: + resultType = retAI.getCoerceToType(); + break; + default: assert(false && "NYI"); } From fbec74a7ec2120cc702a1ef7436650f9a6e3fc0d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 15 Mar 2022 22:39:27 -0400 Subject: [PATCH 0209/2301] [CIR] Support return types in buildCall --- clang/lib/CIR/CIRGenCall.cpp | 43 ++++++++++++++++++++++++++++++++-- clang/lib/CIR/CIRGenFunction.h | 6 +++++ clang/test/CIR/CodeGen/call.c | 16 +++++++++---- 3 files changed, 59 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 4caff63496f1..8d0ddd0b2658 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -366,6 +366,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // TODO: alignment attributes + // Emit the actual call op. auto callLoc = CGM.getLoc(Loc); auto theCall = CGM.getBuilder().create(callLoc, CalleePtr, CIRCallArgs); @@ -404,7 +405,41 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // TODO: cleanup argument memory at the end - // TODO: implement genuine returns + // Extract the return value. + RValue Ret = [&] { + switch (RetAI.getKind()) { + case ABIArgInfo::Direct: { + mlir::Type RetCIRTy = convertType(RetTy); + if (RetAI.getCoerceToType() == RetCIRTy && RetAI.getDirectOffset() == 0) { + switch (getEvaluationKind(RetTy)) { + case TEK_Scalar: { + // If the argument doesn't match, perform a bitcast to coerce it. This + // can happen due to trivial type mismatches. + auto Results = theCall.getResults(); + assert(Results.size() <= 1 && "multiple returns NYI"); + assert(Results[0].getType() == RetCIRTy && "Bitcast support NYI"); + return RValue::get(Results[0]); + } + default: + llvm_unreachable("NYI"); + } + } else { + llvm_unreachable("No other forms implemented yet."); + } + } + + case ABIArgInfo::Ignore: + // If we are ignoring an argument that had a result, make sure to + // construct the appropriate return value for our caller. + return GetUndefRValue(RetTy); + + default: + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); + return RValue{}; + }(); // TODO: implement assumed_aligned @@ -412,7 +447,11 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, assert(RetTy.isDestructedType() != QualType::DK_nontrivial_c_struct && "NYI"); - assert(theCall.getNumResults() == 0 && "Returns NYI"); + return Ret; +} + +RValue CIRGenFunction::GetUndefRValue(QualType Ty) { + assert(Ty->isVoidType() && "Only VoidType supported so far."); return RValue::get(nullptr); } diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 712563194b40..4068642697b5 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -166,6 +166,12 @@ class CIRGenFunction { AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); + /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type. + /// TODO: What's the equivalent for MLIR? Currently we're only using this for + /// void types so it just returns RValue::get(nullptr) but it'll need + /// addressed later. + RValue GetUndefRValue(clang::QualType Ty); + mlir::Type convertType(clang::QualType T); }; diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index 671a8ea17a31..0851f099f5e8 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -3,7 +3,9 @@ // XFAIL: * void a(void) {} -void b(int a, int b) {} +int b(int a, int b) { + return a + b; +} void c(void) { a(); @@ -14,17 +16,23 @@ void c(void) { // CHECK: func @a() { // CHECK: cir.return // CHECK: } -// CHECK: func @b(%arg0: i32 {{.*}} { +// CHECK: func @b(%arg0: i32 {{.*}}, %arg1: i32 {{.*}}) -> i32 { // CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] // CHECK: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] +// CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] // CHECK: cir.store %arg0, %0 : i32, cir.ptr // CHECK: cir.store %arg1, %1 : i32, cir.ptr -// CHECK: cir.return +// CHECK: %3 = cir.load %0 : cir.ptr , i32 +// CHECK: %4 = cir.load %1 : cir.ptr , i32 +// CHECK: %5 = cir.binop(add, %3, %4) : i32 +// CHECK: cir.store %5, %2 : i32, cir.ptr +// CHECK: %6 = cir.load %2 : cir.ptr , i32 +// CHECK: cir.return %6 // CHECK: } // CHECK: func @c() { // CHECK: call @a() : () -> () // CHECK: %0 = cir.cst(0 : i32) : i32 // CHECK: %1 = cir.cst(1 : i32) : i32 -// CHECK: call @b(%0, %1) : (i32, i32) -> () +// CHECK: call @b(%0, %1) : (i32, i32) -> i32 // CHECK: cir.return // CHECK: } From 205edd79c725017f8396d503760816a22f684099 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 24 Mar 2022 19:04:44 -0700 Subject: [PATCH 0210/2301] [CIR] Fix silly spelling (cherry picked from commit de49c4c37be7d1f393ba0a37fcc3da5ba294c538) --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 53f781119dde..1511464b2ff6 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -485,7 +485,7 @@ def CmpOpKind : I32EnumAttr< // FIXME: Pure might not work when we add overloading. def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { - let summary = "comapre operation"; + let summary = "compare operation"; let description = [{ "cir.cmp compares two input operands and produces a bool result. The input operands must have the same type. The kinds of comparison available are: From b35c09e4e8784d3ac895fa82eb60cdc956cd0660 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 21 Mar 2022 17:04:51 -0700 Subject: [PATCH 0211/2301] [CIR] Add boilerplate for cir.switch operation No tests just yet, but it builds fine. (cherry picked from commit 0473385c9ea4c852ce4f529ce0bdf1393de1c27c) --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 68 ++++++++++++++++++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 53 +++++++++++++++++ 2 files changed, 121 insertions(+) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 1511464b2ff6..b77fabd1b4c1 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -510,6 +510,74 @@ def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// SwitchOp +//===----------------------------------------------------------------------===// + +// TODO: use this once Variadic<...> supports CaseAttr and CaseOpKind... +// +// def CaseOpKind_DT : I32EnumAttrCase<"_default_", 1>; +// def CaseOpKind_EQ : I32EnumAttrCase<"equal", 2>; +// +// def CaseOpKind : I32EnumAttr< +// "CaseOpKind", +// "case kind", +// [CaseOpKind_DT, CaseOpKind_EQ]> { +// let cppNamespace = "::mlir::cir"; +// } +// +// def CaseAttr : StructAttr<"CaseAttr", CIR_Dialect, [ +// StructFieldAttr<"match", AnyInteger>, +// StructFieldAttr<"kind", CaseOpKind> +// ]> {} + +def SwitchOp : CIR_Op<"switch", + [SameVariadicOperandSize, SameTypeOperands, + DeclareOpInterfaceMethods, + RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { + let summary = "a switch operation"; + let description = [{ + The `cir.switch` operation represents a C/C++ switch stmt for conditionally + executing multiple regions of code. The operand to an switch is an integral + value. + + Each region contains only one block and only accepts cir.case instructions. + The op body must be terminated with cir.yield, though it's never printed or + needed for parsing. + + Examples: + + ```mlir + cir.switch %b { + cir.case #equal, 20 { + ... + cir.yield #break + }, + cir.case #default { + ... + cir.yield #fallthrough + }, + } + ``` + }]; + + // TODO: use CaseOpKind + let arguments = (ins AnyInteger:$condition, + Variadic:$case_vals, + Variadic:$case_kinds); + let regions = (region VariadicRegion>:$regions); + + let hasVerifier = 0; + + let assemblyFormat = [{ + `(` $condition `:` type($condition) `)` `{` + custom( + $regions, $case_vals, $case_kinds + ) + `}` attr-dict + }]; +} + //===----------------------------------------------------------------------===// // BrOp //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index fff51a42f5e1..b66f0d071149 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -478,6 +478,59 @@ mlir::SuccessorOperands BrOp::getSuccessorOperands(unsigned index) { Block *BrOp::getSuccessorForOperands(ArrayRef) { return getDest(); } +//===----------------------------------------------------------------------===// +// SwitchOp +//===----------------------------------------------------------------------===// + +ParseResult parseSwitchOp( + OpAsmParser &parser, + llvm::SmallVectorImpl> ®ionsRegions, + mlir::SmallVectorImpl<::mlir::OpAsmParser::UnresolvedOperand> + &case_valsOperands, + mlir::SmallVectorImpl<::mlir::OpAsmParser::UnresolvedOperand> + &case_kindsOperands) { + return ::mlir::success(); +} + +void printSwitchOp(OpAsmPrinter &p, SwitchOp op, + mlir::MutableArrayRef<::mlir::Region> regions, + mlir::Operation::operand_range case_vals, + mlir::Operation::operand_range case_kinds) {} + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes that +/// correspond to a constant value for each operand, or null if that operand is +/// not a constant. +void SwitchOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // If any index all the underlying regions branch back to the parent + // operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // for (auto &r : this->getRegions()) { + // If we can figure out the case stmt we are landing, this can be + // overly simplified. + // bool condition; + // if (auto condAttr = operands.front().dyn_cast_or_null()) { + // assert(0 && "not implemented"); + // (void)r; + // condition = condAttr.getValue().isOneValue(); + // Add the successor regions using the condition. + // regions.push_back(RegionSuccessor(condition ? &thenRegion() : + // elseRegion)); + // return; + // } + // } + + // If the condition isn't constant, all regions may be executed. + for (auto &r : this->getRegions()) + regions.push_back(RegionSuccessor(&r)); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From cb11f368c67cc84c438315ad571d327a433b6d1f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 25 Mar 2022 18:15:29 -0700 Subject: [PATCH 0212/2301] [CIR] Change cir.switch to use optional attribute arrays with some nice enums (cherry picked from commit 577947c829fc8c415542f9aeeaab54666a87bdc1) --- mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h | 1 + mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 2 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 59 +++++++++++-------- .../include/mlir/Dialect/CIR/IR/CIROpsEnums.h | 21 +++++++ .../mlir/Dialect/CIR/IR/CMakeLists.txt | 2 + mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp | 2 + mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 16 +++-- 7 files changed, 68 insertions(+), 35 deletions(-) create mode 100644 mlir/include/mlir/Dialect/CIR/IR/CIROpsEnums.h diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h index a9e098edbfc8..4205aa7bb906 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h @@ -13,6 +13,7 @@ #ifndef MLIR_DIALECT_CIR_IR_CIRATTRS_H_ #define MLIR_DIALECT_CIR_IR_CIRATTRS_H_ +#include "mlir/Dialect/CIR/IR/CIROpsEnums.h" #include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index a65ce777cbed..bdc3e931db72 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -31,7 +31,7 @@ using FuncOp = func::FuncOp; #include "mlir/Dialect/CIR/IR/CIRAttrs.h" #include "mlir/Dialect/CIR/IR/CIROpsDialect.h.inc" -#include "mlir/Dialect/CIR/IR/CIROpsEnums.h.inc" +#include "mlir/Dialect/CIR/IR/CIROpsStructs.h.inc" #include "mlir/Dialect/CIR/IR/CIRTypes.h" namespace mlir { diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index b77fabd1b4c1..e62ca4f88445 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -514,25 +514,36 @@ def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { // SwitchOp //===----------------------------------------------------------------------===// -// TODO: use this once Variadic<...> supports CaseAttr and CaseOpKind... -// -// def CaseOpKind_DT : I32EnumAttrCase<"_default_", 1>; -// def CaseOpKind_EQ : I32EnumAttrCase<"equal", 2>; -// -// def CaseOpKind : I32EnumAttr< -// "CaseOpKind", -// "case kind", -// [CaseOpKind_DT, CaseOpKind_EQ]> { -// let cppNamespace = "::mlir::cir"; -// } -// -// def CaseAttr : StructAttr<"CaseAttr", CIR_Dialect, [ -// StructFieldAttr<"match", AnyInteger>, -// StructFieldAttr<"kind", CaseOpKind> -// ]> {} +// FIXME: even though printed/parsed names assume lowercase, we capitalize here +// because "default" is a reserved keyword and can't show up in a enum. +def CaseOpKind_DT : I32EnumAttrCase<"Default", 1>; +def CaseOpKind_EQ : I32EnumAttrCase<"Equal", 2>; + +def CaseOpKind : I32EnumAttr< + "CaseOpKind", + "case kind", + [CaseOpKind_DT, CaseOpKind_EQ]> { + let cppNamespace = "::mlir::cir"; +} + +def CaseEltValueListAttr : + TypedArrayAttrBase { + let constBuilderCall = ?; +} + +def CaseAttr : AttrDef { + let parameters = (ins "ArrayAttr":$value, "CaseOpKindAttr":$kind); + let mnemonic = "case"; + let assemblyFormat = "`<` struct(params) `>`"; +} + +def CaseArrayAttr : + TypedArrayAttrBase { + let constBuilderCall = ?; +} def SwitchOp : CIR_Op<"switch", - [SameVariadicOperandSize, SameTypeOperands, + [SameVariadicOperandSize, DeclareOpInterfaceMethods, RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { let summary = "a switch operation"; @@ -549,11 +560,11 @@ def SwitchOp : CIR_Op<"switch", ```mlir cir.switch %b { - cir.case #equal, 20 { + case (#equal, 20) { ... cir.yield #break }, - cir.case #default { + case (#default) { ... cir.yield #fallthrough }, @@ -561,20 +572,18 @@ def SwitchOp : CIR_Op<"switch", ``` }]; - // TODO: use CaseOpKind let arguments = (ins AnyInteger:$condition, - Variadic:$case_vals, - Variadic:$case_kinds); + OptionalAttr:$cases); let regions = (region VariadicRegion>:$regions); let hasVerifier = 0; let assemblyFormat = [{ - `(` $condition `:` type($condition) `)` `{` + `(` $condition `:` type($condition) `)` custom( - $regions, $case_vals, $case_kinds + $regions, $cases ) - `}` attr-dict + attr-dict }]; } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROpsEnums.h b/mlir/include/mlir/Dialect/CIR/IR/CIROpsEnums.h new file mode 100644 index 000000000000..f61d2b3a60a4 --- /dev/null +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROpsEnums.h @@ -0,0 +1,21 @@ +//===- CIROpsEnumsDialect.h - MLIR Dialect for CIR ----------------------*- C++ +//-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the Target dialect for CIR in MLIR. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_CIR_CIROPSENUMS_H_ +#define MLIR_DIALECT_CIR_CIROPSENUMS_H_ + +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/Dialect/CIR/IR/CIROpsEnums.h.inc" + +#endif // MLIR_DIALECT_CIR_CIROPSENUMS_H_ + diff --git a/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt index a31abcdcfea4..e476b8a77b9c 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt @@ -9,4 +9,6 @@ mlir_tablegen(CIROpsEnums.h.inc -gen-enum-decls) mlir_tablegen(CIROpsEnums.cpp.inc -gen-enum-defs) mlir_tablegen(CIROpsAttributes.h.inc -gen-attrdef-decls) mlir_tablegen(CIROpsAttributes.cpp.inc -gen-attrdef-defs) +mlir_tablegen(CIROpsStructs.h.inc -gen-attrdef-decls) +mlir_tablegen(CIROpsStructs.cpp.inc -gen-attrdef-defs) add_public_tablegen_target(MLIRCIREnumsGen) diff --git a/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp b/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp index bab679bafc67..be80d42f0a00 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp @@ -12,6 +12,8 @@ #include "mlir/Dialect/CIR/IR/CIRAttrs.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/CIR/IR/CIROpsEnums.h" +#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index b66f0d071149..a17c494e4fe7 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -25,6 +25,7 @@ using namespace mlir; using namespace mlir::cir; #include "mlir/Dialect/CIR/IR/CIROpsEnums.cpp.inc" +#include "mlir/Dialect/CIR/IR/CIROpsStructs.cpp.inc" #include "mlir/Dialect/CIR/IR/CIROpsDialect.cpp.inc" @@ -482,20 +483,17 @@ Block *BrOp::getSuccessorForOperands(ArrayRef) { return getDest(); } // SwitchOp //===----------------------------------------------------------------------===// -ParseResult parseSwitchOp( - OpAsmParser &parser, - llvm::SmallVectorImpl> ®ionsRegions, - mlir::SmallVectorImpl<::mlir::OpAsmParser::UnresolvedOperand> - &case_valsOperands, - mlir::SmallVectorImpl<::mlir::OpAsmParser::UnresolvedOperand> - &case_kindsOperands) { +ParseResult +parseSwitchOp(OpAsmParser &parser, + llvm::SmallVectorImpl> ®ions, + ::mlir::ArrayAttr &casesAttr) { + return ::mlir::success(); } void printSwitchOp(OpAsmPrinter &p, SwitchOp op, mlir::MutableArrayRef<::mlir::Region> regions, - mlir::Operation::operand_range case_vals, - mlir::Operation::operand_range case_kinds) {} + ::mlir::ArrayAttr casesAttr) {} /// Given the region at `index`, or the parent operation if `index` is None, /// return the successor regions. These are the regions that may be selected From 2da00993d7f4f03acb4c113489d055715b83c09b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 25 Mar 2022 20:01:26 -0700 Subject: [PATCH 0213/2301] [CIR] Add parsing for cir.switch and an initial testcase - Allow cir.return within SwitchOp. (cherry picked from commit 095f7fdbdc100eca56b9cc8fcd488c6d47556f78) --- clang/test/CIR/IR/switch.cir | 14 +++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 8 +- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 124 ++++++++++++++++++--- 3 files changed, 126 insertions(+), 20 deletions(-) create mode 100644 clang/test/CIR/IR/switch.cir diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir new file mode 100644 index 000000000000..03d96bf251da --- /dev/null +++ b/clang/test/CIR/IR/switch.cir @@ -0,0 +1,14 @@ +// RUN: cir-tool %s + +func.func @s0() { + %1 = cir.cst(2 : i32) : i32 + cir.switch (%1 : i32) [ + case (default) { + cir.return + }, + case (equal, 3) { + cir.return + } + ] + cir.return +} diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index e62ca4f88445..6de08ce5a9da 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -252,7 +252,7 @@ def StoreOp : CIR_Op<"store", [ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : CIR_Op<"return", [HasParent<"FuncOp, ScopeOp, IfOp">, +def ReturnOp : CIR_Op<"return", [HasParent<"FuncOp, ScopeOp, IfOp, SwitchOp">, Terminator]> { let summary = "return operation"; let description = [{ @@ -559,7 +559,7 @@ def SwitchOp : CIR_Op<"switch", Examples: ```mlir - cir.switch %b { + cir.switch (%b : i32) [ case (#equal, 20) { ... cir.yield #break @@ -567,8 +567,8 @@ def SwitchOp : CIR_Op<"switch", case (#default) { ... cir.yield #fallthrough - }, - } + } + ] ``` }]; diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index a17c494e4fe7..029582b23665 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -177,7 +177,7 @@ static LogicalResult checkScopeTerminator(OpAsmParser &parser, // Empty regions don't need any handling. auto &blocks = r->getBlocks(); - if (blocks.size() == 0) + if (blocks.empty()) return success(); // Test that at least one block has a yield/return terminator. We can @@ -488,6 +488,98 @@ parseSwitchOp(OpAsmParser &parser, llvm::SmallVectorImpl> ®ions, ::mlir::ArrayAttr &casesAttr) { + SmallVector cases; + auto parseRegion = [&]() -> ParseResult { + // Parse region attached to case + regions.emplace_back(new Region); + Region &currRegion = *regions.back().get(); + if (parser.parseRegion(currRegion, /*arguments=*/{}, /*argTypes=*/{})) { + regions.clear(); + return failure(); + } + return success(); + }; + + auto parseCase = [&]() -> ParseResult { + auto loc = parser.getCurrentLocation(); + if (parser.parseKeyword("case").failed()) + return parser.emitError(loc, "expected 'case' keyword here"); + + if (parser.parseLParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected '('"); + + ::llvm::StringRef attrStr; + ::mlir::NamedAttrList attrStorage; + + // case (equal, 20) { + // ... + // 1. Get the case kind + // 2. Get the value (next in list) + + // FIXME: since a few names can't be used as enum (default) we declared + // them in CIROps.td capitalized, but we really wanna use lower case on + // clang IR asm form. + if (parser.parseOptionalKeyword(&attrStr, {"default", "equal"})) { + ::mlir::StringAttr attrVal; + ::mlir::OptionalParseResult parseResult = parser.parseOptionalAttribute( + attrVal, parser.getBuilder().getNoneType(), "kind", attrStorage); + if (parseResult.has_value()) { + if (failed(*parseResult)) + return ::mlir::failure(); + attrStr = attrVal.getValue(); + } + } + + if (attrStr.empty()) { + return parser.emitError( + loc, "expected string or keyword containing one of the following " + "enum values for attribute 'kind' [default, equal]"); + } + + std::string attrString = attrStr.str(); + attrString[0] = attrString[0] + 'A' - 'a'; + attrStr = attrString; + auto attrOptional = ::mlir::cir::symbolizeCaseOpKind(attrStr); + if (!attrOptional) + return parser.emitError(loc, "invalid ") + << "kind attribute specification: \"" << attrStr << '"'; + ; + + mlir::Type intType = mlir::IntegerType::get(parser.getContext(), 64, + mlir::IntegerType::Signed); + auto kindAttr = ::mlir::cir::CaseOpKindAttr::get( + parser.getBuilder().getContext(), attrOptional.value()); + + if (parser.parseOptionalComma().failed() && + kindAttr.getValue() == cir::CaseOpKind::Default) { + if (parser.parseRParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected ')'"); + cases.push_back(cir::CaseAttr::get( + parser.getContext(), parser.getBuilder().getArrayAttr({}), kindAttr)); + return parseRegion(); + } + + // `,` value comes next (in the future perhaps a list?) + int64_t val = 0; + if (parser.parseInteger(val).failed()) + return ::mlir::failure(); + cases.push_back( + cir::CaseAttr::get(parser.getContext(), + parser.getBuilder().getArrayAttr( + {mlir::IntegerAttr::get(intType, val)}), + kindAttr)); + if (parser.parseRParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected ')'"); + return parseRegion(); + }; + + if (parser + .parseCommaSeparatedList(OpAsmParser::Delimiter::Square, parseCase, + " in cases list") + .failed()) + return failure(); + + casesAttr = parser.getBuilder().getArrayAttr(cases); return ::mlir::success(); } @@ -497,9 +589,9 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, /// Given the region at `index`, or the parent operation if `index` is None, /// return the successor regions. These are the regions that may be selected -/// during the flow of control. `operands` is a set of optional attributes that -/// correspond to a constant value for each operand, or null if that operand is -/// not a constant. +/// during the flow of control. `operands` is a set of optional attributes +/// that correspond to a constant value for each operand, or null if that +/// operand is not a constant. void SwitchOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // If any index all the underlying regions branch back to the parent @@ -510,18 +602,18 @@ void SwitchOp::getSuccessorRegions(mlir::RegionBranchPoint point, } // for (auto &r : this->getRegions()) { - // If we can figure out the case stmt we are landing, this can be - // overly simplified. - // bool condition; - // if (auto condAttr = operands.front().dyn_cast_or_null()) { - // assert(0 && "not implemented"); - // (void)r; - // condition = condAttr.getValue().isOneValue(); - // Add the successor regions using the condition. - // regions.push_back(RegionSuccessor(condition ? &thenRegion() : - // elseRegion)); - // return; - // } + // If we can figure out the case stmt we are landing, this can be + // overly simplified. + // bool condition; + // if (auto condAttr = operands.front().dyn_cast_or_null()) { + // assert(0 && "not implemented"); + // (void)r; + // condition = condAttr.getValue().isOneValue(); + // Add the successor regions using the condition. + // regions.push_back(RegionSuccessor(condition ? &thenRegion() : + // elseRegion)); + // return; + // } // } // If the condition isn't constant, all regions may be executed. From 44329afed46ac5c7d78676c4742e991ce56b77f8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 29 Mar 2022 14:00:30 -0700 Subject: [PATCH 0214/2301] [CIR] Allow cir.yield within cir.switch regions --- clang/test/CIR/IR/switch.cir | 2 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 2 +- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index 03d96bf251da..689f63524242 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -7,7 +7,7 @@ func.func @s0() { cir.return }, case (equal, 3) { - cir.return + cir.yield } ] cir.return diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 6de08ce5a9da..867f32fab6e0 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -351,7 +351,7 @@ def IfOp : CIR_Op<"if", //===----------------------------------------------------------------------===// def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, - ParentOneOf<["IfOp", "ScopeOp"]>]> { + ParentOneOf<["IfOp", "ScopeOp", "SwitchOp"]>]> { let summary = "termination operation for regions inside if, for, scope, etc"; let description = [{ "cir.yield" yields an SSA value from a CIR dialect op region and diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 029582b23665..b1dcabc77153 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -404,7 +404,7 @@ LogicalResult ScopeOp::verify() { return success(); } //===----------------------------------------------------------------------===// mlir::LogicalResult YieldOp::verify() { - if (!llvm::isa(getOperation()->getParentOp())) + if (!llvm::isa(getOperation()->getParentOp())) return emitOpError() << "expects 'cir.if' or 'cir.scope' as the parent operation'"; From ac7b01bfd3ec7160ec436469fafc2af213fba733 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 29 Mar 2022 14:33:27 -0700 Subject: [PATCH 0215/2301] [CIR] Change cir.yield to allow for fallthrough behaviors on cir.switch regions --- clang/test/CIR/IR/invalid.cir | 10 +++++ clang/test/CIR/IR/switch.cir | 3 ++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 43 ++++++++++++++++------ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 10 +++-- 4 files changed, 52 insertions(+), 14 deletions(-) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 6a1c5bcebc35..bc411953c97f 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -37,3 +37,13 @@ func.func @yield0() { } cir.return } + +// ----- + +func.func @yieldfallthrough() { + %0 = cir.cst(true) : !cir.bool + cir.if %0 { + cir.yield fallthrough // expected-error {{'cir.yield' op fallthrough only expected within 'cir.switch'}} + } + cir.return +} diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index 689f63524242..8b9655bfcd79 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -7,6 +7,9 @@ func.func @s0() { cir.return }, case (equal, 3) { + cir.yield fallthrough + }, + case (equal, 5) { cir.yield } ] diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 867f32fab6e0..71c65f9f00bf 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -358,18 +358,40 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, terminates the regions. The semantics of how the values are yielded is defined by the parent operation. - Currently, there are not parents where "cir.yield" has any operands, - but it will be useful to represent lifetime extension in the future. In - that case the operands must match the parent operation's results. + Currently, there are not parents where `cir.yield` has any operands, + but it will be useful to represent lifetime extension in the future. - `cir.yield` be present whenever the region has more than one block. + When used to leave `cir.switch` regions there are two possible meanings: + 1. Plain `cir.yield` has "breaking out of a switch" semantics. + 2. `cir.yield fallthrough` means the next region in the case list should + be executed. + + The `cir.yield` must be explicitly used whenever a region has more than + one block, or within `cir.switch` regions not `cir.return` terminated. + + Example: + ``` + cir.if %4 { + ... + cir.yield + } + + cir.switch (%5) [ + case (equal, 3) { + ... + cir.yield fallthrough + }, ... + ] + ``` }]; - let arguments = (ins Variadic:$results); + let arguments = (ins UnitAttr:$fallthrough, Variadic:$results); let builders = [OpBuilder<(ins), [{ /* nothing to do */ }]>]; - let assemblyFormat = - [{ attr-dict ($results^ `:` type($results))? }]; + let assemblyFormat = [{ + (`fallthrough` $fallthrough^)? ($results^ `:` type($results))? + attr-dict + }]; let hasVerifier = 1; } @@ -552,9 +574,8 @@ def SwitchOp : CIR_Op<"switch", executing multiple regions of code. The operand to an switch is an integral value. - Each region contains only one block and only accepts cir.case instructions. - The op body must be terminated with cir.yield, though it's never printed or - needed for parsing. + Each region contains only one block and must be explicitly terminated with + a cir.yield operation. Examples: @@ -562,7 +583,7 @@ def SwitchOp : CIR_Op<"switch", cir.switch (%b : i32) [ case (#equal, 20) { ... - cir.yield #break + cir.yield // break semantics }, case (#default) { ... diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index b1dcabc77153..045f4a2fd72a 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -404,9 +404,13 @@ LogicalResult ScopeOp::verify() { return success(); } //===----------------------------------------------------------------------===// mlir::LogicalResult YieldOp::verify() { - if (!llvm::isa(getOperation()->getParentOp())) - return emitOpError() - << "expects 'cir.if' or 'cir.scope' as the parent operation'"; + if (llvm::isa(getOperation()->getParentOp())) + return mlir::success(); + + assert((llvm::isa(getOperation()->getParentOp())) && + "unknown parent op"); + if (getFallthrough()) + return emitOpError() << "fallthrough only expected within 'cir.switch'"; return mlir::success(); } From 49d27774db8346a9c30d4c9fdeb5fde6e1511377 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 30 Mar 2022 09:52:17 -0700 Subject: [PATCH 0216/2301] [CIR] Cleanup checkScopeTerminator and rename --- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 045f4a2fd72a..0032ad4c197a 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -162,12 +162,11 @@ mlir::LogicalResult ReturnOp::verify() { // IfOp //===----------------------------------------------------------------------===// -static LogicalResult checkScopeTerminator(OpAsmParser &parser, - OperationState &result, Region *r) { +static LogicalResult checkBlockTerminator(mlir::Builder &builder, Location l, + Region *r) { if (r->hasOneBlock()) { ::mlir::impl::ensureRegionTerminator( - *r, parser.getBuilder(), result.location, - [](OpBuilder &builder, Location loc) { + *r, builder, l, [](OpBuilder &builder, Location loc) { OperationState state(loc, YieldOp::getOperationName()); YieldOp::build(builder, state); return Operation::create(state); @@ -214,7 +213,8 @@ ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { if (parser.parseRegion(*thenRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - if (checkScopeTerminator(parser, result, thenRegion).failed()) { + if (checkBlockTerminator(parser.getBuilder(), result.location, thenRegion) + .failed()) { parser.emitError( loc, "if.then expected at least one block with cir.yield or cir.return"); @@ -225,7 +225,8 @@ ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { if (!parser.parseOptionalKeyword("else")) { if (parser.parseRegion(*elseRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - if (checkScopeTerminator(parser, result, elseRegion).failed()) { + if (checkBlockTerminator(parser.getBuilder(), result.location, elseRegion) + .failed()) { parser.emitError( loc, "if.else expected at least one block with cir.yield or cir.return"); @@ -346,7 +347,8 @@ ParseResult ScopeOp::parse(OpAsmParser &parser, OperationState &result) { if (parser.parseRegion(*scopeRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - if (checkScopeTerminator(parser, result, scopeRegion).failed()) { + if (checkBlockTerminator(parser.getBuilder(), result.location, scopeRegion) + .failed()) { parser.emitError( loc, "expected at least one block with cir.yield or cir.return"); return failure(); From 8a196fe04df5bbb203fbb2a77af7416f0ef62b27 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 30 Mar 2022 11:12:55 -0700 Subject: [PATCH 0217/2301] [CIR] Add print support, enable verifier and enhance parsing rules for cir.switch --- clang/test/CIR/IR/invalid.cir | 23 ++++++ clang/test/CIR/IR/switch.cir | 15 +++- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 7 +- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 92 +++++++++++++++++++--- 4 files changed, 123 insertions(+), 14 deletions(-) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index bc411953c97f..4f4d947e11d3 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -47,3 +47,26 @@ func.func @yieldfallthrough() { } cir.return } + +// ----- + +func.func @s0() { + %1 = cir.cst(2 : i32) : i32 + cir.switch (%1 : i32) [ + case (equal, 5) { + %2 = cir.cst(3 : i32) : i32 + } + ] // expected-error {{blocks are expected to be explicitly terminated}} + cir.return +} + +// ----- + +func.func @s1() { + %1 = cir.cst(2 : i32) : i32 + cir.switch (%1 : i32) [ + case (equal, 5) { + } + ] // expected-error {{case regions expected to have one terminated block}} + cir.return +} diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index 8b9655bfcd79..41273afd6353 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -1,4 +1,5 @@ -// RUN: cir-tool %s +// RUN: cir-tool %s | FileCheck %s +// XFAIL: * func.func @s0() { %1 = cir.cst(2 : i32) : i32 @@ -15,3 +16,15 @@ func.func @s0() { ] cir.return } + +// CHECK: cir.switch (%0 : i32) [ +// CHECK-NEXT: case (default, 0 : i32) { +// CHECK-NEXT: cir.return +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 3 : i32) { +// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 5 : i32) { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: ] diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 71c65f9f00bf..e662c4639bfb 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -554,6 +554,8 @@ def CaseEltValueListAttr : } def CaseAttr : AttrDef { + // FIXME: value should probably be optional for more clear "default" + // representation. let parameters = (ins "ArrayAttr":$value, "CaseOpKindAttr":$kind); let mnemonic = "case"; let assemblyFormat = "`<` struct(params) `>`"; @@ -597,12 +599,11 @@ def SwitchOp : CIR_Op<"switch", OptionalAttr:$cases); let regions = (region VariadicRegion>:$regions); - let hasVerifier = 0; + let hasVerifier = 1; let assemblyFormat = [{ - `(` $condition `:` type($condition) `)` custom( - $regions, $cases + $regions, $cases, $condition, type($condition) ) attr-dict }]; diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 0032ad4c197a..4f4358804d2c 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -492,10 +492,13 @@ Block *BrOp::getSuccessorForOperands(ArrayRef) { return getDest(); } ParseResult parseSwitchOp(OpAsmParser &parser, llvm::SmallVectorImpl> ®ions, - ::mlir::ArrayAttr &casesAttr) { - + ::mlir::ArrayAttr &casesAttr, + mlir::OpAsmParser::UnresolvedOperand &cond, + mlir::Type &condType) { + ::mlir::IntegerType intCondType; SmallVector cases; - auto parseRegion = [&]() -> ParseResult { + + auto parseAndCheckRegion = [&]() -> ParseResult { // Parse region attached to case regions.emplace_back(new Region); Region &currRegion = *regions.back().get(); @@ -503,6 +506,23 @@ parseSwitchOp(OpAsmParser &parser, regions.clear(); return failure(); } + + if (currRegion.empty()) { + return parser.emitError( + parser.getCurrentLocation(), + "case regions expected to have one terminated block"); + } + + // Region trait in CIROps.td already verifies that, but make sure not + // mistakes happen. + assert(currRegion.hasOneBlock() && "expected only one block"); + Block &block = currRegion.back(); + if (block.empty() || !block.back().hasTrait()) { + return parser.emitError( + parser.getCurrentLocation(), + "blocks are expected to be explicitly terminated"); + } + return success(); }; @@ -549,10 +569,7 @@ parseSwitchOp(OpAsmParser &parser, if (!attrOptional) return parser.emitError(loc, "invalid ") << "kind attribute specification: \"" << attrStr << '"'; - ; - mlir::Type intType = mlir::IntegerType::get(parser.getContext(), 64, - mlir::IntegerType::Signed); auto kindAttr = ::mlir::cir::CaseOpKindAttr::get( parser.getBuilder().getContext(), attrOptional.value()); @@ -562,7 +579,7 @@ parseSwitchOp(OpAsmParser &parser, return parser.emitError(parser.getCurrentLocation(), "expected ')'"); cases.push_back(cir::CaseAttr::get( parser.getContext(), parser.getBuilder().getArrayAttr({}), kindAttr)); - return parseRegion(); + return parseAndCheckRegion(); } // `,` value comes next (in the future perhaps a list?) @@ -572,13 +589,26 @@ parseSwitchOp(OpAsmParser &parser, cases.push_back( cir::CaseAttr::get(parser.getContext(), parser.getBuilder().getArrayAttr( - {mlir::IntegerAttr::get(intType, val)}), + {mlir::IntegerAttr::get(intCondType, val)}), kindAttr)); if (parser.parseRParen().failed()) return parser.emitError(parser.getCurrentLocation(), "expected ')'"); - return parseRegion(); + return parseAndCheckRegion(); }; + if (parser.parseLParen()) + return ::mlir::failure(); + + if (parser.parseOperand(cond)) + return ::mlir::failure(); + if (parser.parseColon()) + return ::mlir::failure(); + if (parser.parseCustomTypeWithFallback(intCondType)) + return ::mlir::failure(); + condType = intCondType; + if (parser.parseRParen()) + return ::mlir::failure(); + if (parser .parseCommaSeparatedList(OpAsmParser::Delimiter::Square, parseCase, " in cases list") @@ -591,7 +621,47 @@ parseSwitchOp(OpAsmParser &parser, void printSwitchOp(OpAsmPrinter &p, SwitchOp op, mlir::MutableArrayRef<::mlir::Region> regions, - ::mlir::ArrayAttr casesAttr) {} + mlir::ArrayAttr casesAttr, mlir::Value condition, + mlir::Type condType) { + int idx = 0, lastIdx = regions.size() - 1; + + p << "("; + p << condition; + p << " : "; + p.printStrippedAttrOrType(condType); + p << ") ["; + // FIXME: ideally we want some extra indentation for "cases" but too + // cumbersome to pull it out now, since most handling is private. Perhaps + // better improve overall mechanism. + p.printNewline(); + for (auto &r : regions) { + p << "case ("; + + auto attr = casesAttr[idx].cast(); + auto kind = attr.getKind().getValue(); + assert((kind == CaseOpKind::Default || kind == CaseOpKind::Equal) && + "unknown case"); + + // Case kind + auto caseValueStr = stringifyCaseOpKind(kind); + std::string attrString = caseValueStr.str(); + attrString[0] = attrString[0] + 'a' - 'A'; + caseValueStr = attrString; + p << caseValueStr << ", "; + + // Case value + p.printStrippedAttrOrType(attr.getValue()); + + p << ") "; + p.printRegion(r, /*printEntryBLockArgs=*/false, + /*printBlockTerminators=*/true); + if (idx < lastIdx) + p << ","; + p.printNewline(); + idx++; + } + p << "]"; +} /// Given the region at `index`, or the parent operation if `index` is None, /// return the successor regions. These are the regions that may be selected @@ -627,6 +697,8 @@ void SwitchOp::getSuccessorRegions(mlir::RegionBranchPoint point, regions.push_back(RegionSuccessor(&r)); } +LogicalResult SwitchOp::verify() { return success(); } + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From d40f2cc659887bd2b4176f8808e5f8207e9f528f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 31 Mar 2022 14:55:21 -0700 Subject: [PATCH 0218/2301] [CIR] Initial support for switch stmt codegen in clang --- clang/lib/CIR/CIRGenModule.cpp | 52 +++++++++++++++++++++- clang/lib/CIR/CIRGenModule.h | 1 + clang/test/CIR/CodeGen/switch.cpp | 12 +++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 6 +++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 8 ++++ 5 files changed, 78 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/switch.cpp diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index ae3bad12b495..57b27ace7ff8 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1305,6 +1305,53 @@ static mlir::Location getIfLocs(CIRGenModule &CGM, const clang::Stmt *thenS, return mlir::FusedLoc::get(ifLocs, metadata, CGM.getBuilder().getContext()); } +mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { + // FIXME: track switch to handle nested stmts. + + // TODO: LLVM codegen does some early optimization to fold the condition and + // only emit live cases. CIR should use MLIR to achieve similar things, + // nothing to be done here. + // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))... + mlir::LogicalResult res = mlir::success(); + + // C99 6.8.4.1: The first substatement is executed if the expression + // compares unequal to 0. The condition must be a scalar type. + auto switchStmtBuilder = [&]() -> mlir::LogicalResult { + if (S.getInit()) + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + + mlir::Value condV = buildScalarExpr(S.getCond()); + + // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts()) + // TODO: if the switch has a condition wrapped by __builtin_unpredictable? + builder.create( + getLoc(S.getBeginLoc()), condV, + /*switchBuilder=*/[&](mlir::OpBuilder &b, mlir::Location loc) { + res = buildStmt(S.getBody(), /*useCurrentScope=*/true); + }); + return res; + }; + + // The switch scope contains the full source range for SwitchStmt. + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto scopeLocBegin = fusedLoc.getLocations()[0]; + auto scopeLocEnd = fusedLoc.getLocations()[1]; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd}; + LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; + res = switchStmtBuilder(); + }); + + return res; +} + mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { // The else branch of a consteval if statement is always the only branch // that can be runtime evaluated. @@ -1432,12 +1479,15 @@ mlir::LogicalResult CIRGenModule::buildStmt(const Stmt *S, if (buildIfStmt(cast(*S)).failed()) return mlir::failure(); break; + case Stmt::SwitchStmtClass: + if (buildSwitchStmt(cast(*S)).failed()) + return mlir::failure(); + break; case Stmt::IndirectGotoStmtClass: case Stmt::WhileStmtClass: case Stmt::DoStmtClass: case Stmt::ForStmtClass: case Stmt::ReturnStmtClass: - case Stmt::SwitchStmtClass: // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. case Stmt::GCCAsmStmtClass: case Stmt::MSAsmStmtClass: diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 952ef3321ee5..2cd941207adf 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -465,6 +465,7 @@ class CIRGenModule { const clang::Stmt *elseS); mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); + mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp new file mode 100644 index 000000000000..50cc5fb8a6da --- /dev/null +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void sw0(int a) { + switch (a) {} +} + +// CHECK: cir.scope { +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: cir.switch (%1 : i32) [ +// CHECK-NEXT: ] +// CHECK-NEXT: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index e662c4639bfb..a781ab658056 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -601,6 +601,12 @@ def SwitchOp : CIR_Op<"switch", let hasVerifier = 1; + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins "Value":$condition, + "function_ref":$switchBuilder)> + ]; + let assemblyFormat = [{ custom( $regions, $cases, $condition, type($condition) diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 4f4358804d2c..a32edb2db30a 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -699,6 +699,14 @@ void SwitchOp::getSuccessorRegions(mlir::RegionBranchPoint point, LogicalResult SwitchOp::verify() { return success(); } +void SwitchOp::build(OpBuilder &builder, OperationState &result, Value cond, + function_ref switchBuilder) { + assert(switchBuilder && "the builder callback for regions must be present"); + result.addOperands({cond}); + OpBuilder::InsertionGuard guard(builder); + switchBuilder(builder, result.location); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From 4a32a2d6425d11fb1a525df6b66083904ca5a1a3 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 3 Aug 2022 11:45:03 -0700 Subject: [PATCH 0219/2301] [TO BE DROPPED] Remove dead switch case for now --- clang/test/CIR/CodeGen/switch.cpp | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 50cc5fb8a6da..4b572bd24f0a 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -1,12 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -void sw0(int a) { - switch (a) {} -} +void sw0(int a) {} -// CHECK: cir.scope { -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.switch (%1 : i32) [ -// CHECK-NEXT: ] -// CHECK-NEXT: } +// CHECK: func.func From ffe87e887f4e87aac703e96cea92b96118c3472d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 31 Mar 2022 14:55:44 -0700 Subject: [PATCH 0220/2301] [CIR] Add case and break stmt codegen --- clang/lib/CIR/CIRGenFunction.h | 2 +- clang/lib/CIR/CIRGenModule.cpp | 84 +++++++++++++++++++--- clang/lib/CIR/CIRGenModule.h | 5 ++ clang/test/CIR/CodeGen/switch.cpp | 26 ++++++- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 4 +- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 9 +-- 6 files changed, 111 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 4068642697b5..9e4731881724 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -58,7 +58,7 @@ class CIRGenFunction { CIRGenModule &CGM; - // CurFuncDecl - Holds the Decl for the current outermost non-closure context + // Holds the Decl for the current outermost non-closure context const clang::Decl *CurFuncDecl; // The CallExpr within the current statement that the musttail attribute diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 57b27ace7ff8..96f76d738a09 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -870,11 +870,17 @@ mlir::LogicalResult CIRGenModule::buildSimpleStmt(const Stmt *S, case Stmt::LabelStmtClass: return buildLabelStmt(cast(*S)); - case Stmt::AttributedStmtClass: + case Stmt::CaseStmtClass: + assert(0 && + "Should not get here, currently handled directly from SwitchStmt"); + break; + case Stmt::BreakStmtClass: + return buildBreakStmt(cast(*S)); + + case Stmt::AttributedStmtClass: case Stmt::ContinueStmtClass: case Stmt::DefaultStmtClass: - case Stmt::CaseStmtClass: case Stmt::SEHLeaveStmtClass: llvm::errs() << "CIR codegen for '" << S->getStmtClassName() << "' not implemented\n"; @@ -1305,17 +1311,41 @@ static mlir::Location getIfLocs(CIRGenModule &CGM, const clang::Stmt *thenS, return mlir::FusedLoc::get(ifLocs, metadata, CGM.getBuilder().getContext()); } -mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { - // FIXME: track switch to handle nested stmts. +mlir::LogicalResult CIRGenModule::buildBreakStmt(const clang::BreakStmt &S) { + // FIXME: add proper tracking for "break" in yield. + builder.create(getLoc(S.getBreakLoc())); + return mlir::success(); +} +mlir::LogicalResult CIRGenModule::buildCaseStmt(const CaseStmt &S, + mlir::Type condType, + CaseAttr &caseEntry) { + assert((!S.getRHS() || !S.caseStmtIsGNURange()) && + "case ranges not implemented"); + auto res = mlir::success(); + + auto intVal = S.getLHS()->EvaluateKnownConstInt(getASTContext()); + auto *ctx = builder.getContext(); + caseEntry = mlir::cir::CaseAttr::get( + ctx, builder.getArrayAttr({}), + CaseOpKindAttr::get(ctx, mlir::cir::CaseOpKind::Equal)); + { + mlir::OpBuilder::InsertionGuard guardCase(builder); + res = buildStmt(S.getSubStmt(), + /*useCurrentScope=*/!isa(S.getSubStmt())); + } + + // TODO: likelihood + return res; +} + +mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { // TODO: LLVM codegen does some early optimization to fold the condition and // only emit live cases. CIR should use MLIR to achieve similar things, // nothing to be done here. // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))... - mlir::LogicalResult res = mlir::success(); - // C99 6.8.4.1: The first substatement is executed if the expression - // compares unequal to 0. The condition must be a scalar type. + auto res = mlir::success(); auto switchStmtBuilder = [&]() -> mlir::LogicalResult { if (S.getInit()) if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) @@ -1328,10 +1358,46 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts()) // TODO: if the switch has a condition wrapped by __builtin_unpredictable? + + // FIXME: track switch to handle nested stmts. builder.create( getLoc(S.getBeginLoc()), condV, - /*switchBuilder=*/[&](mlir::OpBuilder &b, mlir::Location loc) { - res = buildStmt(S.getBody(), /*useCurrentScope=*/true); + /*switchBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { + auto *cs = dyn_cast(S.getBody()); + assert(cs && "expected compound stmt"); + SmallVector caseAttrs; + + mlir::Block *lastCaseBlock = nullptr; + for (auto *c : cs->body()) { + auto *newCase = dyn_cast(c); + if (!newCase) { + // This means it's a random stmt following up a case, just + // emit it as part of previous known case. + assert(lastCaseBlock && "expects pre-existing case block"); + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(lastCaseBlock); + res = buildStmt(c, /*useCurrentScope=*/!isa(c)); + continue; + } + assert(newCase && "expected case stmt"); + const CaseStmt *nestedCase = + dyn_cast(newCase->getSubStmt()); + assert(!nestedCase && "empty case fallthrough NYI"); + + CaseAttr caseAttr; + { + mlir::OpBuilder::InsertionGuard guardCase(builder); + mlir::Region *caseRegion = os.addRegion(); + lastCaseBlock = builder.createBlock(caseRegion); + res = buildCaseStmt(*newCase, condV.getType(), caseAttr); + if (res.failed()) + break; + } + caseAttrs.push_back(caseAttr); + } + + os.addAttribute("cases", builder.getArrayAttr(caseAttrs)); }); return res; }; diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 2cd941207adf..11ad244b02f1 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -465,6 +465,11 @@ class CIRGenModule { const clang::Stmt *elseS); mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); + mlir::LogicalResult buildCaseStmt(const clang::CaseStmt &S, + mlir::Type condType, + mlir::cir::CaseAttr &caseEntry); + + mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); // Build CIR for a statement. useCurrentScope should be true if no diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 4b572bd24f0a..b8bea1b4578e 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -1,6 +1,26 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * -void sw0(int a) {} +void sw1(int a) { + switch (int b = 1; a) { + case 0: + b = b + 1; + break; + case 1: + break; + } +} -// CHECK: func.func +// CHECK: func @sw1 +// CHECK: cir.switch (%3 : i32) [ +// CHECK-NEXT: case (equal, 0 : i32) { +// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 +// CHECK-NEXT: cir.store %6, %1 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 1 : i32) { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index a781ab658056..fa59ad9f3fb6 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -537,7 +537,7 @@ def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { //===----------------------------------------------------------------------===// // FIXME: even though printed/parsed names assume lowercase, we capitalize here -// because "default" is a reserved keyword and can't show up in a enum. +// because "default" is a C++ reserved keyword and can't show up in a enum. def CaseOpKind_DT : I32EnumAttrCase<"Default", 1>; def CaseOpKind_EQ : I32EnumAttrCase<"Equal", 2>; @@ -604,7 +604,7 @@ def SwitchOp : CIR_Op<"switch", let skipDefaultBuilders = 1; let builders = [ OpBuilder<(ins "Value":$condition, - "function_ref":$switchBuilder)> + "function_ref":$switchBuilder)> ]; let assemblyFormat = [{ diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index a32edb2db30a..34b240078058 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -699,12 +699,13 @@ void SwitchOp::getSuccessorRegions(mlir::RegionBranchPoint point, LogicalResult SwitchOp::verify() { return success(); } -void SwitchOp::build(OpBuilder &builder, OperationState &result, Value cond, - function_ref switchBuilder) { +void SwitchOp::build( + OpBuilder &builder, OperationState &result, Value cond, + function_ref switchBuilder) { assert(switchBuilder && "the builder callback for regions must be present"); + OpBuilder::InsertionGuard guardSwitch(builder); result.addOperands({cond}); - OpBuilder::InsertionGuard guard(builder); - switchBuilder(builder, result.location); + switchBuilder(builder, result.location, result); } //===----------------------------------------------------------------------===// From c018d6ae0d7a0444c89bd5d685c290518283692a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 5 Apr 2022 10:16:17 -0700 Subject: [PATCH 0221/2301] [CIR] Parse optional type for case integral values, fix docs --- clang/test/CIR/IR/switch.cir | 2 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 16 +++++++++++----- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 8 ++++++++ 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index 41273afd6353..9ea616943058 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -10,7 +10,7 @@ func.func @s0() { case (equal, 3) { cir.yield fallthrough }, - case (equal, 5) { + case (equal, 5 : i32) { cir.yield } ] diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index fa59ad9f3fb6..ca0f9ad85788 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -572,9 +572,15 @@ def SwitchOp : CIR_Op<"switch", RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { let summary = "a switch operation"; let description = [{ - The `cir.switch` operation represents a C/C++ switch stmt for conditionally - executing multiple regions of code. The operand to an switch is an integral - value. + The `cir.switch` operation represents C/C++ switch functionality for + conditionally executing multiple regions of code. The operand to an switch + is an integral condition value. + + A variadic list of "case" attribute operands and regions track the possible + control flow within `cir.switch`. Each "case" first operand is either + "equal" (meaning equality comparision against the condition) and "default" + for any other value. An optional second operand denotes the actual value, + its type should match the condition and can be optionally present. Each region contains only one block and must be explicitly terminated with a cir.yield operation. @@ -583,11 +589,11 @@ def SwitchOp : CIR_Op<"switch", ```mlir cir.switch (%b : i32) [ - case (#equal, 20) { + case (equal, 20) { ... cir.yield // break semantics }, - case (#default) { + case (default) { ... cir.yield #fallthrough } diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 34b240078058..df58cc9a97aa 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -591,6 +591,14 @@ parseSwitchOp(OpAsmParser &parser, parser.getBuilder().getArrayAttr( {mlir::IntegerAttr::get(intCondType, val)}), kindAttr)); + if (succeeded(parser.parseOptionalColon())) { + Type caseIntTy; + if (parser.parseType(caseIntTy).failed()) + return parser.emitError(parser.getCurrentLocation(), "expected type"); + if (intCondType != caseIntTy) + return parser.emitError(parser.getCurrentLocation(), + "expected a match with the condition type"); + } if (parser.parseRParen().failed()) return parser.emitError(parser.getCurrentLocation(), "expected ')'"); return parseAndCheckRegion(); From a755f1eb8af7d2ea7a91e2ffb02253e82adc67ab Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 5 Apr 2022 11:07:58 -0700 Subject: [PATCH 0222/2301] [CIR] Add a cir.yield flavor for break - Rewrite optional kind to use an enum attribute instead. - Do not use break just yet --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 35 ++++++++++++++++++---- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 2 +- 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index ca0f9ad85788..4fed08a6e9c4 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -350,6 +350,16 @@ def IfOp : CIR_Op<"if", // YieldOp //===----------------------------------------------------------------------===// +def YieldOpKind_BK : I32EnumAttrCase<"Break", 1, "break">; +def YieldOpKind_FT : I32EnumAttrCase<"Fallthrough", 2, "fallthrough">; + +def YieldOpKind : I32EnumAttr< + "YieldOpKind", + "yield kind", + [YieldOpKind_BK, YieldOpKind_FT]> { + let cppNamespace = "::mlir::cir"; +} + def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp"]>]> { let summary = "termination operation for regions inside if, for, scope, etc"; @@ -362,7 +372,7 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, but it will be useful to represent lifetime extension in the future. When used to leave `cir.switch` regions there are two possible meanings: - 1. Plain `cir.yield` has "breaking out of a switch" semantics. + 1. `cir.yield break` has "breaking out of the outermost" switch semantics. 2. `cir.yield fallthrough` means the next region in the case list should be executed. @@ -385,12 +395,25 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ``` }]; - let arguments = (ins UnitAttr:$fallthrough, Variadic:$results); + let arguments = (ins OptionalAttr:$kind, + Variadic:$results); let builders = [OpBuilder<(ins), [{ /* nothing to do */ }]>]; let assemblyFormat = [{ - (`fallthrough` $fallthrough^)? ($results^ `:` type($results))? - attr-dict + ($kind^)? ($results^ `:` type($results))? attr-dict + }]; + + let extraClassDeclaration = [{ + // None of the below + bool isPlain() { + return !getKind(); + } + bool isFallthrough() { + return !isPlain() && *getKind() == YieldOpKind::Fallthrough; + } + bool isBreak() { + return !isPlain() && *getKind() == YieldOpKind::Break; + } }]; let hasVerifier = 1; @@ -591,11 +614,11 @@ def SwitchOp : CIR_Op<"switch", cir.switch (%b : i32) [ case (equal, 20) { ... - cir.yield // break semantics + cir.yield break }, case (default) { ... - cir.yield #fallthrough + cir.yield fallthrough } ] ``` diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index df58cc9a97aa..046943e5962f 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -411,7 +411,7 @@ mlir::LogicalResult YieldOp::verify() { assert((llvm::isa(getOperation()->getParentOp())) && "unknown parent op"); - if (getFallthrough()) + if (isFallthrough()) return emitOpError() << "fallthrough only expected within 'cir.switch'"; return mlir::success(); From 4edbb9f365acc11ae13f3e03d5b1ba8629a77d92 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 5 Apr 2022 11:20:01 -0700 Subject: [PATCH 0223/2301] [CIR][CodeGen] Use cir.yield break variant for break stmt codegen --- clang/lib/CIR/CIRGenModule.cpp | 7 +++++-- clang/test/CIR/CodeGen/switch.cpp | 4 ++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 96f76d738a09..75ea25fb7ce2 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1312,8 +1312,11 @@ static mlir::Location getIfLocs(CIRGenModule &CGM, const clang::Stmt *thenS, } mlir::LogicalResult CIRGenModule::buildBreakStmt(const clang::BreakStmt &S) { - // FIXME: add proper tracking for "break" in yield. - builder.create(getLoc(S.getBreakLoc())); + builder.create( + getLoc(S.getBreakLoc()), + mlir::cir::YieldOpKindAttr::get(builder.getContext(), + mlir::cir::YieldOpKind::Break), + mlir::ValueRange({})); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index b8bea1b4578e..02aa28a82048 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -19,8 +19,8 @@ void sw1(int a) { // CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %1 : i32, cir.ptr -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield break // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 1 : i32) { -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield break // CHECK-NEXT: } From abba9ac6ef0744302fb98e950a96fd5c36715ad4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 5 Apr 2022 13:59:27 -0700 Subject: [PATCH 0224/2301] [CIR][CodeGen] Make sure switch case regions are properly terminated - Make sure we print non-plain terminators (so that the semantics are quite obvious). - Add a test that considers scoped cases. --- clang/lib/CIR/CIRGenModule.cpp | 27 +++++++++++++++++++++++++- clang/test/CIR/CodeGen/switch.cpp | 20 ++++++++++++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 3 +++ 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 75ea25fb7ce2..fbea52e90788 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1362,8 +1362,27 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts()) // TODO: if the switch has a condition wrapped by __builtin_unpredictable? + auto terminateCaseRegion = [&](mlir::Region &r, mlir::Location loc) { + assert(r.getBlocks().size() <= 1 && "not implemented"); + if (r.empty()) + return; + + auto &block = r.back(); + + if (block.empty() || + !block.back().hasTrait()) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(&block); + builder.create( + loc, + mlir::cir::YieldOpKindAttr::get( + builder.getContext(), mlir::cir::YieldOpKind::Fallthrough), + mlir::ValueRange({})); + } + }; + // FIXME: track switch to handle nested stmts. - builder.create( + auto swop = builder.create( getLoc(S.getBeginLoc()), condV, /*switchBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { @@ -1402,6 +1421,12 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { os.addAttribute("cases", builder.getArrayAttr(caseAttrs)); }); + + // Make sure all case regions are terminated by inserting + // fallthroughs when necessary. + // FIXME: find a better way to get accurante with location here. + for (auto &r : swop.getRegions()) + terminateCaseRegion(r, swop.getLoc()); return res; }; diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 02aa28a82048..e6aba5ccd393 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -9,6 +9,11 @@ void sw1(int a) { break; case 1: break; + case 2: { + b = b + 1; + int yolo = 100; + break; + } } } @@ -23,4 +28,17 @@ void sw1(int a) { // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 1 : i32) { // CHECK-NEXT: cir.yield break -// CHECK-NEXT: } +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 2 : i32) { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %4 = cir.alloca i32, cir.ptr , ["yolo", cinit] +// CHECK-NEXT: %5 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: %6 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %7 = cir.binop(add, %5, %6) : i32 +// CHECK-NEXT: cir.store %7, %1 : i32, cir.ptr +// CHECK-NEXT: %8 = cir.cst(100 : i32) : i32 +// CHECK-NEXT: cir.store %8, %4 : i32, cir.ptr +// CHECK-NEXT: cir.yield break +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: } diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 046943e5962f..f8375265348f 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -248,6 +248,9 @@ bool shouldPrintTerm(mlir::Region &r) { return false; if (isa(entryBlock->back())) return true; + YieldOp y = dyn_cast(entryBlock->back()); + if (y && !y.isPlain()) + return true; return false; } From e2796b06019a3da13c23d750060509ffe731dac5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 5 Apr 2022 14:18:26 -0700 Subject: [PATCH 0225/2301] [CIR][CodeGen][NFC] Refactor alloca building logic While here also lowercase some name down. --- clang/lib/CIR/CIRGenModule.cpp | 41 +++++++++++++++------------------- clang/lib/CIR/CIRGenModule.h | 9 +++++--- 2 files changed, 24 insertions(+), 26 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index fbea52e90788..060c5b992026 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -128,9 +128,10 @@ static void updateAllocaInEntryBlock(AllocaOp localVarAddr) { localVarAddr->moveBefore(&parentBlock->front()); } -void CIRGenModule::buildAndUpdateRetAlloca(QualType T, mlir::Location loc, - CharUnits alignment) { - auto localVarTy = getCIRType(T); +mlir::Value CIRGenModule::buildAlloca(StringRef name, InitStyle initStyle, + QualType ty, mlir::Location loc, + CharUnits alignment) { + auto localVarTy = getCIRType(ty); auto localVarPtrTy = mlir::cir::PointerType::get(builder.getContext(), localVarTy); @@ -139,41 +140,35 @@ void CIRGenModule::buildAndUpdateRetAlloca(QualType T, mlir::Location loc, alignment.getQuantity()); auto addr = builder.create( loc, /*addr type*/ localVarPtrTy, - /*var type*/ localVarTy, "__retval", InitStyle::uninitialized, - alignIntAttr); + /*var type*/ localVarTy, name, initStyle, alignIntAttr); updateAllocaInEntryBlock(addr); + return addr; +} + +void CIRGenModule::buildAndUpdateRetAlloca(QualType ty, mlir::Location loc, + CharUnits alignment) { + auto addr = + buildAlloca("__retval", InitStyle::uninitialized, ty, loc, alignment); CurCGF->FnRetAlloca = addr; } -mlir::LogicalResult CIRGenModule::declare(const Decl *var, QualType T, +mlir::LogicalResult CIRGenModule::declare(const Decl *var, QualType ty, mlir::Location loc, CharUnits alignment, - mlir::Value &addr, bool IsParam) { + mlir::Value &addr, bool isParam) { const auto *namedVar = dyn_cast_or_null(var); assert(namedVar && "Needs a named decl"); if (symbolTable.count(var)) return mlir::failure(); - auto localVarTy = getCIRType(T); - auto localVarPtrTy = - mlir::cir::PointerType::get(builder.getContext(), localVarTy); - - auto alignIntAttr = - mlir::IntegerAttr::get(mlir::IntegerType::get(builder.getContext(), 64), - alignment.getQuantity()); - - auto localVarAddr = builder.create( - loc, /*addr type*/ localVarPtrTy, /*var type*/ localVarTy, - namedVar->getName(), - IsParam ? InitStyle::paraminit : InitStyle::uninitialized, alignIntAttr); - updateAllocaInEntryBlock(localVarAddr); + addr = buildAlloca(namedVar->getName(), + isParam ? InitStyle::paraminit : InitStyle::uninitialized, + ty, loc, alignment); // Insert into the symbol table, allocate some stack space in the // function entry block. - symbolTable.insert(var, localVarAddr); - addr = localVarAddr; - + symbolTable.insert(var, addr); return mlir::success(); } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 11ad244b02f1..89486c50c885 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -248,10 +248,13 @@ class CIRGenModule { /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. - mlir::LogicalResult declare(const clang::Decl *var, clang::QualType T, + mlir::LogicalResult declare(const clang::Decl *var, clang::QualType ty, mlir::Location loc, clang::CharUnits alignment, - mlir::Value &addr, bool IsParam = false); - void buildAndUpdateRetAlloca(clang::QualType T, mlir::Location loc, + mlir::Value &addr, bool isParam = false); + mlir::Value buildAlloca(llvm::StringRef name, mlir::cir::InitStyle initStyle, + clang::QualType ty, mlir::Location loc, + clang::CharUnits alignment); + void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, clang::CharUnits alignment); public: From 8c390c74887de1f32b0089dec7e8bd36e4acf267 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 5 Apr 2022 16:35:39 -0700 Subject: [PATCH 0226/2301] [CIR][CodeGen] Rewrite alloca placement - Use the order of declaration for local variables - Otherwise the order is: param, __retval and local variables --- clang/lib/CIR/CIRGenModule.cpp | 60 +++++++++++++++++++------------ clang/test/CIR/CodeGen/basic.c | 12 +++---- clang/test/CIR/CodeGen/basic.cpp | 45 +++++++++++------------ clang/test/CIR/CodeGen/struct.c | 4 +-- clang/test/CIR/CodeGen/struct.cpp | 6 ++-- 5 files changed, 71 insertions(+), 56 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 060c5b992026..771ea2567148 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -115,22 +115,26 @@ mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { return mlir::FusedLoc::get(locs, metadata, builder.getContext()); } -// Allocas are expected to be in the beginning of the entry block -// in whatever region they show up. -static void updateAllocaInEntryBlock(AllocaOp localVarAddr) { - auto *parentBlock = localVarAddr->getBlock(); - auto lastAlloca = std::find_if_not( - parentBlock->begin(), parentBlock->end(), - [](mlir::Operation &op) { return isa(&op); }); - if (lastAlloca != std::end(*parentBlock)) - localVarAddr->moveBefore(&*lastAlloca); - else - localVarAddr->moveBefore(&parentBlock->front()); -} - mlir::Value CIRGenModule::buildAlloca(StringRef name, InitStyle initStyle, QualType ty, mlir::Location loc, CharUnits alignment) { + // Allocas are expected to be in the beginning of the entry block + // for most of the regions. + // FIXME: for non-scoped C/C++ switch case regions, alloca's should + // go to the entry block of the switch scope, not of the case region. + auto getAllocaInsertPositionOp = + [&](mlir::Block **insertBlock) -> mlir::Operation * { + auto *parentBlock = builder.getInsertionBlock(); + auto lastAlloca = std::find_if( + parentBlock->rbegin(), parentBlock->rend(), + [](mlir::Operation &op) { return isa(&op); }); + + *insertBlock = parentBlock; + if (lastAlloca == parentBlock->rend()) + return nullptr; + return &*lastAlloca; + }; + auto localVarTy = getCIRType(ty); auto localVarPtrTy = mlir::cir::PointerType::get(builder.getContext(), localVarTy); @@ -138,10 +142,26 @@ mlir::Value CIRGenModule::buildAlloca(StringRef name, InitStyle initStyle, auto alignIntAttr = mlir::IntegerAttr::get(mlir::IntegerType::get(builder.getContext(), 64), alignment.getQuantity()); - auto addr = builder.create( - loc, /*addr type*/ localVarPtrTy, - /*var type*/ localVarTy, name, initStyle, alignIntAttr); - updateAllocaInEntryBlock(addr); + + mlir::Value addr; + { + mlir::OpBuilder::InsertionGuard guard(builder); + mlir::Block *insertBlock = nullptr; + mlir::Operation *insertOp = getAllocaInsertPositionOp(&insertBlock); + + if (insertOp) + builder.setInsertionPointAfter(insertOp); + else { + assert(insertBlock && "expected valid insertion block"); + // No previous alloca found, place this one in the beginning + // of the block. + builder.setInsertionPointToStart(insertBlock); + } + + addr = builder.create(loc, /*addr type*/ localVarPtrTy, + /*var type*/ localVarTy, name, + initStyle, alignIntAttr); + } return addr; } @@ -158,16 +178,12 @@ mlir::LogicalResult CIRGenModule::declare(const Decl *var, QualType ty, mlir::Value &addr, bool isParam) { const auto *namedVar = dyn_cast_or_null(var); assert(namedVar && "Needs a named decl"); - - if (symbolTable.count(var)) - return mlir::failure(); + assert(!symbolTable.count(var) && "not supposed to be available just yet"); addr = buildAlloca(namedVar->getName(), isParam ? InitStyle::paraminit : InitStyle::uninitialized, ty, loc, alignment); - // Insert into the symbol table, allocate some stack space in the - // function entry block. symbolTable.insert(var, addr); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 8a8d2379f6f3..16d2a173de6a 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -35,11 +35,11 @@ int f3() { } // CHECK: func @f3() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.cst(3 : i32) : i32 -// CHECK-NEXT: cir.store %2, %0 : i32, cir.ptr -// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.store %3, %1 : i32, cir.ptr -// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: cir.store %2, %1 : i32, cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: cir.store %3, %0 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %4 : i32 diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 9aa2485951e7..4dc39fe1312e 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -8,8 +8,9 @@ int *p0() { } // CHECK: func @p0() -> !cir.ptr { +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] // CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: cir.store %2, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > int *p1() { int *p; @@ -18,9 +19,9 @@ int *p1() { } // CHECK: func @p1() -> !cir.ptr { -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["p", uninitialized] +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", uninitialized] // CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: cir.store %2, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > int *p2() { int *p = nullptr; @@ -34,26 +35,26 @@ int *p2() { } // CHECK: func @p2() -> !cir.ptr { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] {alignment = 8 : i64} loc(#loc15) -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} loc(#loc16) -// CHECK-NEXT: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr loc(#loc17) -// CHECK-NEXT: cir.store %2, %0 : !cir.ptr, cir.ptr > loc(#loc15) +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] {alignment = 8 : i64} +// CHECK-NEXT: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.store %2, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %7 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} loc(#loc19) -// CHECK-NEXT: %8 = cir.cst(0 : i32) : i32 loc(#loc20) -// CHECK-NEXT: cir.store %8, %7 : i32, cir.ptr loc(#loc19) -// CHECK-NEXT: cir.store %7, %0 : !cir.ptr, cir.ptr > loc(#loc21) -// CHECK-NEXT: %9 = cir.cst(42 : i32) : i32 loc(#loc22) -// CHECK-NEXT: %10 = cir.load deref %0 : cir.ptr >, !cir.ptr loc(#loc23) -// CHECK-NEXT: cir.store %9, %10 : i32, cir.ptr loc(#loc24) +// CHECK-NEXT: %7 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %8 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: cir.store %8, %7 : i32, cir.ptr +// CHECK-NEXT: cir.store %7, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %9 = cir.cst(42 : i32) : i32 +// CHECK-NEXT: %10 = cir.load deref %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %9, %10 : i32, cir.ptr // CHECK-NEXT: } loc(#[[locScope:loc[0-9]+]]) -// CHECK-NEXT: %3 = cir.cst(42 : i32) : i32 loc(#loc25) -// CHECK-NEXT: %4 = cir.load deref %0 : cir.ptr >, !cir.ptr loc(#loc26) -// CHECK-NEXT: cir.store %3, %4 : i32, cir.ptr loc(#loc27) -// CHECK-NEXT: %5 = cir.load %0 : cir.ptr >, !cir.ptr loc(#loc28) -// CHECK-NEXT: cir.store %5, %1 : !cir.ptr, cir.ptr > loc(#loc29) -// CHECK-NEXT: %6 = cir.load %1 : cir.ptr >, !cir.ptr loc(#loc29) -// CHECK-NEXT: cir.return %6 : !cir.ptr loc(#loc29) +// CHECK-NEXT: %3 = cir.cst(42 : i32) : i32 +// CHECK-NEXT: %4 = cir.load deref %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %3, %4 : i32, cir.ptr +// CHECK-NEXT: %5 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %5, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %6 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return %6 : !cir.ptr void b0() { bool x = true, y = false; } @@ -132,4 +133,4 @@ void if1(int a, bool b, bool c) { // CHECK: } // CHECK: } -// CHECK: #[[locScope]] = loc(fused["{{.*}}basic.cpp":26:3, "{{.*}}basic.cpp":30:3]) +// CHECK: #[[locScope]] = loc(fused["{{.*}}basic.cpp":27:3, "{{.*}}basic.cpp":31:3]) diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 456b68bfbcf0..d87d039689da 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -21,8 +21,8 @@ void baz() { // CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !_22struct2EBar22> // CHECK-NEXT: module { // CHECK-NEXT: func @baz() { -// CHECK-NEXT: %0 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index c357c3ada9c8..5ab67876ab8f 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -21,8 +21,6 @@ void baz() { // CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !_22struct2EBar22> // CHECK-NEXT: module { // CHECK-NEXT: func @baz() { -// CHECK-NEXT: %0 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: cir.return -// CHECK-NEXT: } -// CHECK-NEXT: } From 713455f9bfcd6653dded88d913b927c93cafdf7e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 5 Apr 2022 17:34:04 -0700 Subject: [PATCH 0227/2301] [CIR][CodeGen] Ensure cir.alloca is always in the first block of a region --- clang/lib/CIR/CIRGenModule.cpp | 7 +++++++ clang/test/CIR/CodeGen/goto.cpp | 16 +++++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 771ea2567148..d454dd6697e8 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -125,6 +125,13 @@ mlir::Value CIRGenModule::buildAlloca(StringRef name, InitStyle initStyle, auto getAllocaInsertPositionOp = [&](mlir::Block **insertBlock) -> mlir::Operation * { auto *parentBlock = builder.getInsertionBlock(); + mlir::Region *r = parentBlock->getParent(); + assert(r->getBlocks().size() > 0 && "assume at least one block exists"); + mlir::Block &entryBlock = *r->begin(); + + if (parentBlock != &entryBlock) + parentBlock = &entryBlock; + auto lastAlloca = std::find_if( parentBlock->rbegin(), parentBlock->rend(), [](mlir::Operation &op) { return isa(&op); }); diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 202486354314..2ac1c8718b7f 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -27,4 +27,18 @@ void g0(int a) { // CHECK-NEXT %7 = cir.cst(2 : i32) : i32 // CHECK-NEXT %8 = cir.binop(add, %6, %7) : i32 // CHECK-NEXT cir.store %8, %1 : i32, cir.ptr -// CHECK-NEXT cir.return \ No newline at end of file +// CHECK-NEXT cir.return + +void g1(int a) { + int x = 0; + goto end; +end: + int y = a + 2; +} + +// Make sure alloca for "y" shows up in the entry block +// CHECK: func @g1(%arg0: i32 +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["y", cinit] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr \ No newline at end of file From b30a3291ab79d6823e381103d602aa087c0a3e39 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Apr 2022 11:12:57 -0700 Subject: [PATCH 0228/2301] [CIR][CodeGen] Track scope based entry blocks --- clang/lib/CIR/CIRGenModule.cpp | 18 ++++++++++++------ clang/lib/CIR/CIRGenModule.h | 14 ++++++++++++-- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index d454dd6697e8..5d0669aba17e 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1291,7 +1291,8 @@ mlir::LogicalResult CIRGenModule::buildIfOnBoolExpr(const Expr *cond, locs.push_back(fusedLoc.getLocations()[0]); locs.push_back(fusedLoc.getLocations()[1]); } - LexicalScopeContext lexScope{locs[0], locs[1]}; + LexicalScopeContext lexScope{locs[0], locs[1], + builder.getInsertionBlock()}; LexicalScopeGuard lexThenGuard{*this, &lexScope}; resThen = buildStmt(thenS, /*useCurrentScope=*/true); }, @@ -1300,7 +1301,8 @@ mlir::LogicalResult CIRGenModule::buildIfOnBoolExpr(const Expr *cond, auto fusedLoc = loc.cast(); auto locBegin = fusedLoc.getLocations()[2]; auto locEnd = fusedLoc.getLocations()[3]; - LexicalScopeContext lexScope{locBegin, locEnd}; + LexicalScopeContext lexScope{locBegin, locEnd, + builder.getInsertionBlock()}; LexicalScopeGuard lexElseGuard{*this, &lexScope}; resElse = buildStmt(elseS, /*useCurrentScope=*/true); }); @@ -1456,7 +1458,8 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { auto fusedLoc = loc.cast(); auto scopeLocBegin = fusedLoc.getLocations()[0]; auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd}; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, + builder.getInsertionBlock()}; LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; res = switchStmtBuilder(); }); @@ -1504,7 +1507,8 @@ mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { auto fusedLoc = loc.cast(); auto scopeLocBegin = fusedLoc.getLocations()[0]; auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd}; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, + builder.getInsertionBlock()}; LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; res = ifStmtBuilder(); }); @@ -1720,7 +1724,8 @@ mlir::LogicalResult CIRGenModule::buildCompoundStmt(const CompoundStmt &S) { auto fusedLoc = loc.cast(); auto locBegin = fusedLoc.getLocations()[0]; auto locEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{locBegin, locEnd}; + LexicalScopeContext lexScope{locBegin, locEnd, + builder.getInsertionBlock()}; LexicalScopeGuard lexScopeGuard{*this, &lexScope}; res = compoundStmtBuilder(); }); @@ -1842,7 +1847,8 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { // Initialize lexical scope information. { - LexicalScopeContext lexScope{FnBeginLoc, FnEndLoc}; + LexicalScopeContext lexScope{FnBeginLoc, FnEndLoc, + builder.getInsertionBlock()}; LexicalScopeGuard scopeGuard{*this, &lexScope}; // Declare all the function arguments in the symbol table. diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 89486c50c885..ef3b8be8e183 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -126,11 +126,16 @@ class CIRGenModule { // lexical context (scope). mlir::Block *CleanupBlock = nullptr; + // Points to scope entry block. This is useful, for instance, for + // helping to insert allocas before finalizing any recursive codegen + // from switches. + mlir::Block *EntryBlock; + public: unsigned Depth = 0; bool HasReturn = false; - LexicalScopeContext(mlir::Location b, mlir::Location e) - : BeginLoc(b), EndLoc(e) {} + LexicalScopeContext(mlir::Location b, mlir::Location e, mlir::Block *eb) + : EntryBlock(eb), BeginLoc(b), EndLoc(e) {} ~LexicalScopeContext() = default; // --- @@ -188,6 +193,11 @@ class CIRGenModule { return RetBlock; } + // --- + // Scope entry block tracking + // --- + mlir::Block *getEntryBlock() { return EntryBlock; } + mlir::Location BeginLoc, EndLoc; }; From 24edfe7dbeb390a555ba6bd790f06bc2f116d25d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Apr 2022 11:20:26 -0700 Subject: [PATCH 0229/2301] [CIR][CodeGen] Small changes to error reporting while building switch stmts --- clang/lib/CIR/CIRGenModule.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 5d0669aba17e..07343b96fd1c 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1420,6 +1420,8 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { mlir::OpBuilder::InsertionGuard guardCase(builder); builder.setInsertionPointToEnd(lastCaseBlock); res = buildStmt(c, /*useCurrentScope=*/!isa(c)); + if (res.failed()) + break; continue; } assert(newCase && "expected case stmt"); @@ -1442,12 +1444,15 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { os.addAttribute("cases", builder.getArrayAttr(caseAttrs)); }); + if (res.failed()) + return res; + // Make sure all case regions are terminated by inserting // fallthroughs when necessary. // FIXME: find a better way to get accurante with location here. for (auto &r : swop.getRegions()) terminateCaseRegion(r, swop.getLoc()); - return res; + return mlir::success(); }; // The switch scope contains the full source range for SwitchStmt. From e317acb91df115f52ba30510ad5cb718dd15e120 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Apr 2022 11:33:48 -0700 Subject: [PATCH 0230/2301] [CIR][CodeGen] Simplify buildAlloca logic based on scope entry block tracking This allows local variable declaration on non-scoped case stmts to land in the appropriate block (switch's scope entry block). --- clang/lib/CIR/CIRGenModule.cpp | 12 +----------- clang/test/CIR/CodeGen/switch.cpp | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 07343b96fd1c..d708357e7259 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -118,19 +118,9 @@ mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { mlir::Value CIRGenModule::buildAlloca(StringRef name, InitStyle initStyle, QualType ty, mlir::Location loc, CharUnits alignment) { - // Allocas are expected to be in the beginning of the entry block - // for most of the regions. - // FIXME: for non-scoped C/C++ switch case regions, alloca's should - // go to the entry block of the switch scope, not of the case region. auto getAllocaInsertPositionOp = [&](mlir::Block **insertBlock) -> mlir::Operation * { - auto *parentBlock = builder.getInsertionBlock(); - mlir::Region *r = parentBlock->getParent(); - assert(r->getBlocks().size() > 0 && "assume at least one block exists"); - mlir::Block &entryBlock = *r->begin(); - - if (parentBlock != &entryBlock) - parentBlock = &entryBlock; + auto *parentBlock = currLexScope->getEntryBlock(); auto lastAlloca = std::find_if( parentBlock->rbegin(), parentBlock->rend(), diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index e6aba5ccd393..4ab511d047e7 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -42,3 +42,22 @@ void sw1(int a) { // CHECK-NEXT: } // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: } + +void sw2(int a) { + switch (int yolo = 2; a) { + case 3: + // "fomo" has the same lifetime as "yolo" + int fomo = 0; + yolo = yolo + fomo; + break; + } +} + +// CHECK: func @sw2 +// CHECK: cir.scope { +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["yolo", cinit] +// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["fomo", cinit] +// CHECK: cir.switch (%4 : i32) [ +// CHECK-NEXT: case (equal, 3 : i32) { +// CHECK-NEXT: %5 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: cir.store %5, %2 : i32, cir.ptr \ No newline at end of file From a3bf745dd6a2cef316dc950fd1ee842ac00a6245 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Apr 2022 12:14:51 -0700 Subject: [PATCH 0231/2301] [CIR] Fix printing for cir.switch default cases --- clang/test/CIR/IR/switch.cir | 2 +- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index 9ea616943058..caa671100539 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -18,7 +18,7 @@ func.func @s0() { } // CHECK: cir.switch (%0 : i32) [ -// CHECK-NEXT: case (default, 0 : i32) { +// CHECK-NEXT: case (default) { // CHECK-NEXT: cir.return // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 3 : i32) { diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index f8375265348f..2a393eadd71b 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -658,10 +658,13 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, std::string attrString = caseValueStr.str(); attrString[0] = attrString[0] + 'a' - 'A'; caseValueStr = attrString; - p << caseValueStr << ", "; + p << caseValueStr; // Case value - p.printStrippedAttrOrType(attr.getValue()); + if (kind != cir::CaseOpKind::Default) { + p << ", "; + p.printStrippedAttrOrType(attr.getValue()); + } p << ") "; p.printRegion(r, /*printEntryBLockArgs=*/false, From 1cd1f7fa23e2da0135b6fa907aef489b981501cb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Apr 2022 12:18:56 -0700 Subject: [PATCH 0232/2301] [CIR][CodeGen] Add support for default stmts --- clang/lib/CIR/CIRGenModule.cpp | 46 +++++++++++++++++++++++++------ clang/lib/CIR/CIRGenModule.h | 3 ++ clang/test/CIR/CodeGen/switch.cpp | 18 +++++++++++- 3 files changed, 58 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index d708357e7259..d80b1892bb56 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -879,6 +879,7 @@ mlir::LogicalResult CIRGenModule::buildSimpleStmt(const Stmt *S, return buildLabelStmt(cast(*S)); case Stmt::CaseStmtClass: + case Stmt::DefaultStmtClass: assert(0 && "Should not get here, currently handled directly from SwitchStmt"); break; @@ -888,7 +889,6 @@ mlir::LogicalResult CIRGenModule::buildSimpleStmt(const Stmt *S, case Stmt::AttributedStmtClass: case Stmt::ContinueStmtClass: - case Stmt::DefaultStmtClass: case Stmt::SEHLeaveStmtClass: llvm::errs() << "CIR codegen for '" << S->getStmtClassName() << "' not implemented\n"; @@ -1330,6 +1330,24 @@ mlir::LogicalResult CIRGenModule::buildBreakStmt(const clang::BreakStmt &S) { return mlir::success(); } +mlir::LogicalResult CIRGenModule::buildDefaultStmt(const DefaultStmt &S, + mlir::Type condType, + CaseAttr &caseEntry) { + auto res = mlir::success(); + auto *ctx = builder.getContext(); + caseEntry = mlir::cir::CaseAttr::get( + ctx, builder.getArrayAttr({}), + CaseOpKindAttr::get(ctx, mlir::cir::CaseOpKind::Default)); + { + mlir::OpBuilder::InsertionGuard guardCase(builder); + res = buildStmt(S.getSubStmt(), + /*useCurrentScope=*/!isa(S.getSubStmt())); + } + + // TODO: likelihood + return res; +} + mlir::LogicalResult CIRGenModule::buildCaseStmt(const CaseStmt &S, mlir::Type condType, CaseAttr &caseEntry) { @@ -1402,8 +1420,8 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { mlir::Block *lastCaseBlock = nullptr; for (auto *c : cs->body()) { - auto *newCase = dyn_cast(c); - if (!newCase) { + bool caseLike = isa(c); + if (!caseLike) { // This means it's a random stmt following up a case, just // emit it as part of previous known case. assert(lastCaseBlock && "expects pre-existing case block"); @@ -1414,17 +1432,29 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { break; continue; } - assert(newCase && "expected case stmt"); - const CaseStmt *nestedCase = - dyn_cast(newCase->getSubStmt()); - assert(!nestedCase && "empty case fallthrough NYI"); + + // FIXME: add support for empty case fallthrough + auto *caseStmt = dyn_cast(c); + if (caseStmt) { + const CaseStmt *nestedCase = + dyn_cast(caseStmt->getSubStmt()); + assert(!nestedCase && "empty case fallthrough NYI"); + } CaseAttr caseAttr; { mlir::OpBuilder::InsertionGuard guardCase(builder); mlir::Region *caseRegion = os.addRegion(); lastCaseBlock = builder.createBlock(caseRegion); - res = buildCaseStmt(*newCase, condV.getType(), caseAttr); + + if (caseStmt) + res = buildCaseStmt(*caseStmt, condV.getType(), caseAttr); + else { + auto *defaultStmt = dyn_cast(c); + assert(defaultStmt && "expected default stmt"); + res = buildDefaultStmt(*defaultStmt, condV.getType(), caseAttr); + } + if (res.failed()) break; } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index ef3b8be8e183..efbea37f7015 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -481,6 +481,9 @@ class CIRGenModule { mlir::LogicalResult buildCaseStmt(const clang::CaseStmt &S, mlir::Type condType, mlir::cir::CaseAttr &caseEntry); + mlir::LogicalResult buildDefaultStmt(const clang::DefaultStmt &S, + mlir::Type condType, + mlir::cir::CaseAttr &caseEntry); mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 4ab511d047e7..4c2a6971464c 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -60,4 +60,20 @@ void sw2(int a) { // CHECK: cir.switch (%4 : i32) [ // CHECK-NEXT: case (equal, 3 : i32) { // CHECK-NEXT: %5 = cir.cst(0 : i32) : i32 -// CHECK-NEXT: cir.store %5, %2 : i32, cir.ptr \ No newline at end of file +// CHECK-NEXT: cir.store %5, %2 : i32, cir.ptr + +void sw3(int a) { + switch (a) { + default: + break; + } +} + +// CHECK: func @sw3 +// CHECK: cir.scope { +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: cir.switch (%1 : i32) [ +// CHECK-NEXT: case (default) { +// CHECK-NEXT: cir.yield break +// CHECK-NEXT: } +// CHECK-NEXT: ] From ba4174d8a919e0e13a737e4e50c8de7fc5ad5ae9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Apr 2022 12:58:50 -0700 Subject: [PATCH 0233/2301] [CIR] Lift the restriction on one sized regions for cir.switch and improve parser errors --- clang/test/CIR/IR/invalid.cir | 4 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 3 +- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 71 ++++++++++++---------- 3 files changed, 43 insertions(+), 35 deletions(-) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 4f4d947e11d3..09ca4e4a6adf 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -31,7 +31,7 @@ func.func @if0() { func.func @yield0() { %0 = cir.cst(true) : !cir.bool - cir.if %0 { // expected-error {{custom op 'cir.if' if.then expected at least one block with cir.yield or cir.return}} + cir.if %0 { // expected-error {{custom op 'cir.if' expected at least one block with cir.yield or cir.return}} cir.br ^a ^a: } @@ -67,6 +67,6 @@ func.func @s1() { cir.switch (%1 : i32) [ case (equal, 5) { } - ] // expected-error {{case regions expected to have one terminated block}} + ] // expected-error {{case region shall not be empty}} cir.return } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 4fed08a6e9c4..95bf50e2b29e 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -626,7 +626,8 @@ def SwitchOp : CIR_Op<"switch", let arguments = (ins AnyInteger:$condition, OptionalAttr:$cases); - let regions = (region VariadicRegion>:$regions); + + let regions = (region VariadicRegion:$regions); let hasVerifier = 1; diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 2a393eadd71b..1e6a5159647c 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -162,15 +162,28 @@ mlir::LogicalResult ReturnOp::verify() { // IfOp //===----------------------------------------------------------------------===// -static LogicalResult checkBlockTerminator(mlir::Builder &builder, Location l, - Region *r) { +static LogicalResult checkBlockTerminator(OpAsmParser &parser, + llvm::SMLoc parserLoc, + std::optional l, Region *r, + bool ensureTerm = true) { + mlir::Builder &builder = parser.getBuilder(); if (r->hasOneBlock()) { - ::mlir::impl::ensureRegionTerminator( - *r, builder, l, [](OpBuilder &builder, Location loc) { - OperationState state(loc, YieldOp::getOperationName()); - YieldOp::build(builder, state); - return Operation::create(state); - }); + if (ensureTerm) { + ::mlir::impl::ensureRegionTerminator( + *r, builder, *l, [](OpBuilder &builder, Location loc) { + OperationState state(loc, YieldOp::getOperationName()); + YieldOp::build(builder, state); + return Operation::create(state); + }); + } else { + assert(r && "region must not be empty"); + Block &block = r->back(); + if (block.empty() || !block.back().hasTrait()) { + return parser.emitError( + parser.getCurrentLocation(), + "blocks are expected to be explicitly terminated"); + } + } return success(); } @@ -191,6 +204,8 @@ static LogicalResult checkBlockTerminator(mlir::Builder &builder, Location l, } } + parser.emitError(parserLoc, + "expected at least one block with cir.yield or cir.return"); return failure(); } @@ -199,7 +214,6 @@ ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { result.regions.reserve(2); Region *thenRegion = result.addRegion(); Region *elseRegion = result.addRegion(); - auto loc = parser.getCurrentLocation(); auto &builder = parser.getBuilder(); OpAsmParser::UnresolvedOperand cond; @@ -210,28 +224,22 @@ ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { return failure(); // Parse the 'then' region. + auto parseThenLoc = parser.getCurrentLocation(); if (parser.parseRegion(*thenRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - if (checkBlockTerminator(parser.getBuilder(), result.location, thenRegion) - .failed()) { - parser.emitError( - loc, - "if.then expected at least one block with cir.yield or cir.return"); + if (checkBlockTerminator(parser, parseThenLoc, result.location, thenRegion) + .failed()) return failure(); - } // If we find an 'else' keyword, parse the 'else' region. if (!parser.parseOptionalKeyword("else")) { + auto parseElseLoc = parser.getCurrentLocation(); if (parser.parseRegion(*elseRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - if (checkBlockTerminator(parser.getBuilder(), result.location, elseRegion) - .failed()) { - parser.emitError( - loc, - "if.else expected at least one block with cir.yield or cir.return"); + if (checkBlockTerminator(parser, parseElseLoc, result.location, elseRegion) + .failed()) return failure(); - } } // Parse the optional attribute list. @@ -350,12 +358,8 @@ ParseResult ScopeOp::parse(OpAsmParser &parser, OperationState &result) { if (parser.parseRegion(*scopeRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - if (checkBlockTerminator(parser.getBuilder(), result.location, scopeRegion) - .failed()) { - parser.emitError( - loc, "expected at least one block with cir.yield or cir.return"); + if (checkBlockTerminator(parser, loc, result.location, scopeRegion).failed()) return failure(); - } // Parse the optional attribute list. if (parser.parseOptionalAttrDict(result.attributes)) @@ -482,7 +486,7 @@ mlir::SuccessorOperands BrOp::getSuccessorOperands(unsigned index) { assert(index == 0 && "invalid successor index"); // Current block targets do not have operands. // TODO(CIR): This is a hacky avoidance of actually implementing this since - // MLIR moved it "because nobody used the llvm::Optional::None case.........." + // MLIR moved it "because nobody used the std::optional::None case.........." return mlir::SuccessorOperands(MutableOperandRange(getOperation(), 0, 0)); } @@ -505,19 +509,22 @@ parseSwitchOp(OpAsmParser &parser, // Parse region attached to case regions.emplace_back(new Region); Region &currRegion = *regions.back().get(); + auto parserLoc = parser.getCurrentLocation(); if (parser.parseRegion(currRegion, /*arguments=*/{}, /*argTypes=*/{})) { regions.clear(); return failure(); } if (currRegion.empty()) { - return parser.emitError( - parser.getCurrentLocation(), - "case regions expected to have one terminated block"); + return parser.emitError(parser.getCurrentLocation(), + "case region shall not be empty"); } - // Region trait in CIROps.td already verifies that, but make sure not - // mistakes happen. + if (checkBlockTerminator(parser, parserLoc, std::nullopt, &currRegion, + /*ensureTerm=*/false) + .failed()) + return failure(); + assert(currRegion.hasOneBlock() && "expected only one block"); Block &block = currRegion.back(); if (block.empty() || !block.back().hasTrait()) { From 7b84f8f7b7fc85d405c6533ad80ab1580f9964b4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Apr 2022 17:23:15 -0700 Subject: [PATCH 0234/2301] [CIR][CodeGen] Support multiple blocks in case regions and add return support --- clang/lib/CIR/CIRGenModule.cpp | 92 +++++++++++++++++--------- clang/lib/CIR/CIRGenModule.h | 57 ++++++++++++---- clang/test/CIR/CodeGen/switch.cpp | 39 +++++++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 9 --- 4 files changed, 142 insertions(+), 55 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index d80b1892bb56..6c3badaed0b6 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -729,19 +729,22 @@ void CIRGenModule::LexicalScopeGuard::cleanup() { // Cleanup are done right before codegen resume a scope. This is where // objects are destroyed. - if (localScope->RetBlock) { + unsigned curLoc = 0; + for (auto *retBlock : localScope->getRetBlocks()) { mlir::OpBuilder::InsertionGuard guard(builder); - builder.setInsertionPointToEnd(localScope->RetBlock); + builder.setInsertionPointToEnd(retBlock); + mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; + curLoc++; // TODO: insert actual scope cleanup HERE (dtors and etc) // If there's anything to return, load it first. if (CGM.CurCGF->FnRetTy.has_value()) { - auto val = builder.create( - *localScope->RetLoc, *CGM.CurCGF->FnRetTy, *CGM.CurCGF->FnRetAlloca); - builder.create(*localScope->RetLoc, ArrayRef(val.getResult())); + auto val = builder.create(retLoc, *CGM.CurCGF->FnRetTy, + *CGM.CurCGF->FnRetAlloca); + builder.create(retLoc, ArrayRef(val.getResult())); } else { - builder.create(*localScope->RetLoc); + builder.create(retLoc); } } @@ -1377,6 +1380,8 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))... auto res = mlir::success(); + SwitchOp swop; + auto switchStmtBuilder = [&]() -> mlir::LogicalResult { if (S.getInit()) if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) @@ -1390,27 +1395,8 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts()) // TODO: if the switch has a condition wrapped by __builtin_unpredictable? - auto terminateCaseRegion = [&](mlir::Region &r, mlir::Location loc) { - assert(r.getBlocks().size() <= 1 && "not implemented"); - if (r.empty()) - return; - - auto &block = r.back(); - - if (block.empty() || - !block.back().hasTrait()) { - mlir::OpBuilder::InsertionGuard guardCase(builder); - builder.setInsertionPointToEnd(&block); - builder.create( - loc, - mlir::cir::YieldOpKindAttr::get( - builder.getContext(), mlir::cir::YieldOpKind::Fallthrough), - mlir::ValueRange({})); - } - }; - // FIXME: track switch to handle nested stmts. - auto swop = builder.create( + swop = builder.create( getLoc(S.getBeginLoc()), condV, /*switchBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { @@ -1418,6 +1404,7 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { assert(cs && "expected compound stmt"); SmallVector caseAttrs; + currLexScope->setAsSwitch(); mlir::Block *lastCaseBlock = nullptr; for (auto *c : cs->body()) { bool caseLike = isa(c); @@ -1444,9 +1431,14 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { CaseAttr caseAttr; { mlir::OpBuilder::InsertionGuard guardCase(builder); + + // Update scope information with the current region we are + // emitting code for. This is useful to allow return blocks to be + // automatically and properly placed during cleanup. mlir::Region *caseRegion = os.addRegion(); - lastCaseBlock = builder.createBlock(caseRegion); + currLexScope->updateCurrentSwitchCaseRegion(); + lastCaseBlock = builder.createBlock(caseRegion); if (caseStmt) res = buildCaseStmt(*caseStmt, condV.getType(), caseAttr); else { @@ -1467,11 +1459,6 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { if (res.failed()) return res; - // Make sure all case regions are terminated by inserting - // fallthroughs when necessary. - // FIXME: find a better way to get accurante with location here. - for (auto &r : swop.getRegions()) - terminateCaseRegion(r, swop.getLoc()); return mlir::success(); }; @@ -1489,7 +1476,46 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { res = switchStmtBuilder(); }); - return res; + if (res.failed()) + return res; + + // Any block in a case region without a terminator is considered a + // fallthrough yield. In practice there shouldn't be more than one + // block without a terminator, we patch any block we see though and + // let mlir's SwitchOp verifier enforce rules. + auto terminateCaseRegion = [&](mlir::Region &r, mlir::Location loc) { + if (r.empty()) + return; + + SmallVector eraseBlocks; + for (auto &block : r.getBlocks()) { + // Already cleanup after return operations, which might create + // empty blocks if emitted as last stmt. + if (block.empty() && block.hasNoPredecessors() && block.hasNoSuccessors()) + eraseBlocks.push_back(&block); + + if (block.empty() || + !block.back().hasTrait()) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(&block); + builder.create( + loc, + mlir::cir::YieldOpKindAttr::get( + builder.getContext(), mlir::cir::YieldOpKind::Fallthrough), + mlir::ValueRange({})); + } + } + + for (auto *b : eraseBlocks) + b->erase(); + }; + + // Make sure all case regions are terminated by inserting fallthroughs + // when necessary. + // FIXME: find a better way to get accurante with location here. + for (auto &r : swop.getRegions()) + terminateCaseRegion(r, swop.getLoc()); + return mlir::success(); } mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index efbea37f7015..5140ef85b5d7 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -131,6 +131,12 @@ class CIRGenModule { // from switches. mlir::Block *EntryBlock; + // FIXME: perhaps we can use some info encoded in operations. + enum Kind { + Regular, // cir.if, cir.scope, if_regions + Switch // cir.switch + } ScopeKind = Regular; + public: unsigned Depth = 0; bool HasReturn = false; @@ -138,6 +144,13 @@ class CIRGenModule { : EntryBlock(eb), BeginLoc(b), EndLoc(e) {} ~LexicalScopeContext() = default; + // --- + // Kind + // --- + bool isRegular() { return ScopeKind == Kind::Regular; } + bool isSwitch() { return ScopeKind == Kind::Switch; } + void setAsSwitch() { ScopeKind = Kind::Switch; } + // --- // Goto handling // --- @@ -173,24 +186,42 @@ class CIRGenModule { // Return handling // --- - // Return block info for this scope. - mlir::Block *RetBlock = nullptr; - std::optional RetLoc; + private: + // On switches we need one return block per region, since cases don't + // have their own scopes but are distinct regions nonetheless. + llvm::SmallVector RetBlocks; + llvm::SmallVector> RetLocs; + unsigned int CurrentSwitchRegionIdx = -1; // There's usually only one ret block per scope, but this needs to be // get or create because of potential unreachable return statements, note // that for those, all source location maps to the first one found. + mlir::Block *createRetBlock(CIRGenModule &CGM, mlir::Location loc) { + assert((isSwitch() || RetBlocks.size() == 0) && + "only switches can hold more than one ret block"); + + // Create the cleanup block but dont hook it up around just yet. + mlir::OpBuilder::InsertionGuard guard(CGM.builder); + auto *b = CGM.builder.createBlock(CGM.builder.getBlock()->getParent()); + RetBlocks.push_back(b); + RetLocs.push_back(loc); + return b; + } + + public: + void updateCurrentSwitchCaseRegion() { CurrentSwitchRegionIdx++; } + llvm::ArrayRef getRetBlocks() { return RetBlocks; } + llvm::ArrayRef> getRetLocs() { + return RetLocs; + } + mlir::Block *getOrCreateRetBlock(CIRGenModule &CGM, mlir::Location loc) { - if (RetBlock) - return RetBlock; - RetLoc = loc; - { - // Create the cleanup block but dont hook it up around just yet. - mlir::OpBuilder::InsertionGuard guard(CGM.builder); - RetBlock = CGM.builder.createBlock(CGM.builder.getBlock()->getParent()); - } - assert(CGM.builder.getInsertionBlock() && "Should be valid"); - return RetBlock; + unsigned int regionIdx = 0; + if (isSwitch()) + regionIdx = CurrentSwitchRegionIdx; + if (regionIdx >= RetBlocks.size()) + return createRetBlock(CGM, loc); + return &*RetBlocks.back(); } // --- diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 4c2a6971464c..01d8e00aca34 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -77,3 +77,42 @@ void sw3(int a) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: } // CHECK-NEXT: ] + +int sw4(int a) { + switch (a) { + case 42: { + return 3; + } + default: + return 2; + } + return 0; +} + +// CHECK: func @sw4( +// CHECK: cir.switch (%4 : i32) [ +// CHECK-NEXT: case (equal, 42 : i32) { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %5 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: cir.return %6 : i32 +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: }, +// CHECK-NEXT: case (default) { +// CHECK-NEXT: %5 = cir.cst(2 : i32) : i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: cir.br ^bb1 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: cir.return %6 : i32 +// CHECK-NEXT: } +// CHECK-NEXT: ] +// CHECK-NEXT: } +// CHECK-NEXT: %2 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: cir.store %2, %1 : i32, cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: cir.return %3 : i32 +// CHECK-NEXT: } +// CHECK-NEXT: } \ No newline at end of file diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 1e6a5159647c..85e3555ea960 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -524,15 +524,6 @@ parseSwitchOp(OpAsmParser &parser, /*ensureTerm=*/false) .failed()) return failure(); - - assert(currRegion.hasOneBlock() && "expected only one block"); - Block &block = currRegion.back(); - if (block.empty() || !block.back().hasTrait()) { - return parser.emitError( - parser.getCurrentLocation(), - "blocks are expected to be explicitly terminated"); - } - return success(); }; From d396a1ee3dc761452773627de14e704bafabe5a7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Apr 2022 17:46:18 -0700 Subject: [PATCH 0235/2301] [CIR] Add support for cir.switch on MergeCleanups pass Add one more integration test. We still need to add pass specific tests. --- clang/test/CIR/CodeGen/switch.cpp | 11 +--------- .../Dialect/CIR/Transforms/MergeCleanups.cpp | 21 +++++++++++++++---- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 01d8e00aca34..2411170c5eec 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -89,7 +89,7 @@ int sw4(int a) { return 0; } -// CHECK: func @sw4( +// CHECK: func @sw4 // CHECK: cir.switch (%4 : i32) [ // CHECK-NEXT: case (equal, 42 : i32) { // CHECK-NEXT: cir.scope { @@ -103,16 +103,7 @@ int sw4(int a) { // CHECK-NEXT: case (default) { // CHECK-NEXT: %5 = cir.cst(2 : i32) : i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr -// CHECK-NEXT: cir.br ^bb1 -// CHECK-NEXT: ^bb1: // CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: cir.return %6 : i32 // CHECK-NEXT: } // CHECK-NEXT: ] -// CHECK-NEXT: } -// CHECK-NEXT: %2 = cir.cst(0 : i32) : i32 -// CHECK-NEXT: cir.store %2, %1 : i32, cir.ptr -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: cir.return %3 : i32 -// CHECK-NEXT: } -// CHECK-NEXT: } \ No newline at end of file diff --git a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp index 7b0ccb48385f..82824aa5d7db 100644 --- a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp @@ -52,7 +52,7 @@ struct SimplifyRetYieldBlocks : public mlir::OpRewritePattern { // cir.if %2 { // %3 = cir.cst(3 : i32) : i32 // cir.return %3 : i32 - // } + // } // SmallPtrSet candidateBlocks; for (Block &block : blocks) { @@ -91,7 +91,7 @@ struct SimplifyRetYieldBlocks : public mlir::OpRewritePattern { }; // Specialize the template to account for the different build signatures for -// IfOp, ScopeOp and FuncOp. +// IfOp, ScopeOp, FuncOp and SwitchOp. template <> mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp(PatternRewriter &rewriter, @@ -123,10 +123,23 @@ mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( return regionChanged ? success() : failure(); } +template <> +mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( + PatternRewriter &rewriter, cir::SwitchOp switchOp) const { + bool regionChanged = false; + for (auto &r : switchOp.getRegions()) { + if (checkAndRewriteRegion(r, rewriter).succeeded()) + regionChanged = true; + } + + return regionChanged ? success() : failure(); +} + void getMergeCleanupsPatterns(RewritePatternSet &results, MLIRContext *context) { results.add, SimplifyRetYieldBlocks, - SimplifyRetYieldBlocks>(context); + SimplifyRetYieldBlocks, + SimplifyRetYieldBlocks>(context); } struct MergeCleanupsPass : public MergeCleanupsBase { @@ -150,7 +163,7 @@ void MergeCleanupsPass::runOnOperation() { SmallVector opsToSimplify; op->walk([&](Operation *op) { - if (isa(op)) + if (isa(op)) opsToSimplify.push_back(op); }); From 6affd23b548d4b6da068470b7badcaaf74b322dd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Apr 2022 13:35:51 -0700 Subject: [PATCH 0236/2301] [CIR][Clang] Introduce -disable-cir-passes knob This allows us to stop before running MergeCleanups, which should make it easier to write CIR to CIR tests to that specific pass, while allowing us to test some raw output from AST to CIR codegen. --- clang/include/clang/Driver/Options.td | 4 ++++ clang/include/clang/Frontend/FrontendOptions.h | 4 ++++ clang/lib/CIRFrontendAction/CIRGenAction.cpp | 10 +++++++--- clang/lib/Driver/ToolChains/Clang.cpp | 3 +++ clang/lib/Frontend/CompilerInvocation.cpp | 3 +++ clang/test/CIR/driver.c | 2 ++ 6 files changed, 23 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 420e919ce2d2..1a76cf0bd6e7 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3055,6 +3055,10 @@ def flto_EQ : Joined<["-"], "flto=">, HelpText<"Set LTO mode">, Values<"thin,full">; def flto_EQ_jobserver : Flag<["-"], "flto=jobserver">, Visibility<[ClangOption, FlangOption]>, Group, Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; +def disable_cir_passes : Flag<["-"], "disable-cir-passes">, + Visibility<[ClangOption, CC1Option]>, + HelpText<"Disable CIR transformations pipeline">, + MarshallingInfoFlag>; def flto_EQ_auto : Flag<["-"], "flto=auto">, Visibility<[ClangOption, FlangOption]>, Group, Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; def flto : Flag<["-"], "flto">, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index c756ed99319d..6fec975934ad 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -418,6 +418,9 @@ class FrontendOptions { LLVM_PREFERRED_TYPE(bool) unsigned UseClangIRPipeline : 1; + /// Disable Clang IR specific (CIR) passes + unsigned DisableCIRPasses : 1; + CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. @@ -605,6 +608,7 @@ class FrontendOptions { EmitSymbolGraph(false), EmitExtensionSymbolGraphs(false), EmitSymbolGraphSymbolLabelsForTesting(false), EmitPrettySymbolGraphs(false), GenReducedBMI(false), + UseClangIRPipeline(false), DisableCIRPasses(false), TimeTraceGranularity(500), TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index ad113e1fad4d..4b524180d0f1 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -71,6 +71,7 @@ class CIRGenConsumer : public clang::ASTConsumer { CodeGenOptions &codeGenOptions; const TargetOptions &targetOptions; const LangOptions &langOptions; + const FrontendOptions &feOptions; std::unique_ptr outputStream; @@ -85,12 +86,14 @@ class CIRGenConsumer : public clang::ASTConsumer { CodeGenOptions &codeGenOptions, const TargetOptions &targetOptions, const LangOptions &langOptions, + const FrontendOptions &feOptions, std::unique_ptr os) : action(action), compilerInstance(compilerInstance), diagnosticsEngine(diagnosticsEngine), headerSearchOptions(headerSearchOptions), codeGenOptions(codeGenOptions), targetOptions(targetOptions), - langOptions(langOptions), outputStream(std::move(os)), + langOptions(langOptions), feOptions(feOptions), + outputStream(std::move(os)), gen(std::make_unique(codeGenOptions)) { // This is required to match the constructors used during // CodeGenAction. Ultimately, this is required because we want to use @@ -130,7 +133,8 @@ class CIRGenConsumer : public clang::ASTConsumer { switch (action) { case CIRGenAction::OutputType::EmitCIR: if (outputStream && mlirMod) { - runCIRToCIRPasses(mlirMod, mlirCtx.get()); + if (!feOptions.DisableCIRPasses) + runCIRToCIRPasses(mlirMod, mlirCtx.get()); mlir::OpPrintingFlags flags; // FIXME: we cannot roundtrip prettyForm=true right now. flags.enableDebugInfo(/*prettyForm=*/false); @@ -218,7 +222,7 @@ CIRGenAction::CreateASTConsumer(CompilerInstance &ci, StringRef inputFile) { return std::make_unique( action, ci, ci.getDiagnostics(), ci.getHeaderSearchOpts(), ci.getCodeGenOpts(), ci.getTargetOpts(), ci.getLangOpts(), - std::move(out)); + ci.getFrontendOpts(), std::move(out)); } mlir::OwningOpRef diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 7d302f70af94..f0c94daafeaa 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5244,6 +5244,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasArg(options::OPT_fclangir) || Args.hasArg(options::OPT_emit_cir)) CmdArgs.push_back("-fclangir"); + if (Args.hasArg(options::OPT_disable_cir_passes)) + CmdArgs.push_back("-disable-cir-passes"); + if (IsOpenMPDevice) { // We have to pass the triple of the host if compiling for an OpenMP device. std::string NormalizedTriple = diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 55e6ac91b00f..af8225ade74e 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3090,6 +3090,9 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diags.Report(diag::err_drv_argument_only_allowed_with) << "-fsystem-module" << "-emit-module"; + if (Args.hasArg(OPT_disable_cir_passes)) + Opts.DisableCIRPasses = true; + if (Args.hasArg(OPT_aux_target_cpu)) Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu)); if (Args.hasArg(OPT_aux_target_feature)) diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index d1457d73d848..97bef90abb1f 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -4,6 +4,8 @@ // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -c %s -o %t.o // RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -disable-cir-passes -S -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR // XFAIL: * void foo() {} From f064b13291ba8a38100c5932b2df15ef6b388467 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Apr 2022 13:47:10 -0700 Subject: [PATCH 0237/2301] [CIR][CodeGen] Fix small corner case for empty cases --- clang/lib/CIR/CIRGenModule.cpp | 4 +++- clang/test/CIR/CodeGen/switch.cpp | 11 +++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 6c3badaed0b6..fdb75b9c85c2 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1488,10 +1488,12 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { return; SmallVector eraseBlocks; + unsigned numBlocks = r.getBlocks().size(); for (auto &block : r.getBlocks()) { // Already cleanup after return operations, which might create // empty blocks if emitted as last stmt. - if (block.empty() && block.hasNoPredecessors() && block.hasNoSuccessors()) + if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() && + block.hasNoSuccessors()) eraseBlocks.push_back(&block); if (block.empty() || diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 2411170c5eec..d556f359de88 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -107,3 +107,14 @@ int sw4(int a) { // CHECK-NEXT: cir.return %6 : i32 // CHECK-NEXT: } // CHECK-NEXT: ] + +void sw5(int a) { + switch (a) { + case 1:; + } +} + +// CHECK: func @sw5 +// CHECK: cir.switch (%1 : i32) [ +// CHECK-NEXT: case (equal, 1 : i32) { +// CHECK-NEXT: cir.yield fallthrough From 6065771183c682d0dc565fd2ffd9a4e610fa8d0a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Apr 2022 14:32:05 -0700 Subject: [PATCH 0238/2301] [CIR][MergeCleanups] Add CIR to CIR tests for merge cleanups --- clang/test/CIR/Transforms/merge-cleanups.cir | 98 ++++++++++++++++++++ clang/tools/cir-tool/cir-tool.cpp | 3 + 2 files changed, 101 insertions(+) create mode 100644 clang/test/CIR/Transforms/merge-cleanups.cir diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir new file mode 100644 index 000000000000..3f73f3f7e46d --- /dev/null +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -0,0 +1,98 @@ +// RUN: cir-tool %s -cir-merge-cleanups -o %t.out.cir +// RUN: FileCheck --input-file=%t.out.cir %s +// XFAIL: * + +module { + func.func @sw1(%arg0: i32, %arg1: i32) { + %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["c", paraminit] {alignment = 4 : i64} + cir.store %arg0, %0 : i32, cir.ptr + cir.store %arg1, %1 : i32, cir.ptr + cir.scope { + %2 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} + %3 = cir.cst(1 : i32) : i32 + cir.store %3, %2 : i32, cir.ptr + %4 = cir.load %0 : cir.ptr , i32 + cir.switch (%4 : i32) [ + case (equal, 0 : i32) { + %5 = cir.load %2 : cir.ptr , i32 + %6 = cir.cst(1 : i32) : i32 + %7 = cir.binop(add, %5, %6) : i32 + cir.store %7, %2 : i32, cir.ptr + cir.br ^bb1 + ^bb1: // pred: ^bb0 + cir.return + }, + case (equal, 1 : i32) { + cir.scope { + cir.scope { + %5 = cir.load %1 : cir.ptr , i32 + %6 = cir.cst(3 : i32) : i32 + %7 = cir.cmp(eq, %5, %6) : i32, !cir.bool + cir.if %7 { + cir.br ^bb1 + ^bb1: // pred: ^bb0 + cir.return + } + } + cir.yield break + } + cir.yield fallthrough + }, + case (equal, 2 : i32) { + cir.scope { + %5 = cir.alloca i32, cir.ptr , ["yolo", cinit] {alignment = 4 : i64} + %6 = cir.load %2 : cir.ptr , i32 + %7 = cir.cst(1 : i32) : i32 + %8 = cir.binop(add, %6, %7) : i32 + cir.store %8, %2 : i32, cir.ptr + %9 = cir.cst(100 : i32) : i32 + cir.store %9, %5 : i32, cir.ptr + cir.br ^bb1 + ^bb1: // pred: ^bb0 + cir.return + } + cir.yield fallthrough + } + ] + } + cir.return + } +} + +// CHECK: cir.switch (%4 : i32) [ +// CHECK-NEXT: case (equal, 0 : i32) { +// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , i32 +// CHECK-NEXT: %6 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %7 = cir.binop(add, %5, %6) : i32 +// CHECK-NEXT: cir.store %7, %2 : i32, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 1 : i32) { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %5 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: %6 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: %7 = cir.cmp(eq, %5, %6) : i32, !cir.bool +// CHECK-NEXT: cir.if %7 { +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield break +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 2 : i32) { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %5 = cir.alloca i32, cir.ptr , ["yolo", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %6 = cir.load %2 : cir.ptr , i32 +// CHECK-NEXT: %7 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %8 = cir.binop(add, %6, %7) : i32 +// CHECK-NEXT: cir.store %8, %2 : i32, cir.ptr +// CHECK-NEXT: %9 = cir.cst(100 : i32) : i32 +// CHECK-NEXT: cir.store %9, %5 : i32, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: } +// CHECK-NEXT: ] diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index 00fb59388166..a83edb85b86f 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -39,6 +39,9 @@ int main(int argc, char **argv) { ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return mlir::createLifetimeCheckPass(); }); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return mlir::createMergeCleanupsPass(); + }); mlir::registerTransformsPasses(); From 57f966e20bf324745b2480ae66a5b7357a2cae7c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Apr 2022 18:40:27 -0700 Subject: [PATCH 0239/2301] [CIR][LifetimeCheck] Add basic switch stmt support Still need to go over corner cases and cover cascading non-empty case fallthroughs --- clang/test/CIR/Transforms/lifetime-switch.cpp | 36 ++++++++++ .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 67 ++++++++++++++++++- 2 files changed, 102 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Transforms/lifetime-switch.cpp diff --git a/clang/test/CIR/Transforms/lifetime-switch.cpp b/clang/test/CIR/Transforms/lifetime-switch.cpp new file mode 100644 index 000000000000..ab10a4a94f94 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-switch.cpp @@ -0,0 +1,36 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null" -verify-diagnostics -o %t-out.cir +// XFAIL: * + +void s0(int b) { + int *p = nullptr; + switch (b) { + default: { + int x = 0; + p = &x; + *p = 42; + } // expected-note {{pointee 'x' invalidated at end of scope}} + } + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} + +void s1(int b) { + int *p = nullptr; + switch (b) { + default: + int x = 0; + p = &x; + *p = 42; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} + +void s2(int b) { + int *p = nullptr; + switch (int x = 0; b) { + default: + p = &x; + *p = 42; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index addda79c86da..24210fec3626 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -31,6 +31,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkRegion(Region ®ion); void checkIf(IfOp op); + void checkSwitch(SwitchOp op); void checkAlloca(AllocaOp op); void checkStore(StoreOp op); void checkLoad(LoadOp op); @@ -339,6 +340,68 @@ void LifetimeCheckPass::joinPmaps(SmallVectorImpl &pmaps) { } } +void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { + // 2.4.7. A switch(cond) is treated as if it were an equivalent series of + // non-nested if statements with single evaluation of cond; for example: + // + // switch (a) { + // case 1:/*1*/ + // case 2:/*2*/ break; + // default:/*3*/ + // } + // + // is treated as: + // + // if (auto& a=a; a==1) {/*1*/} + // else if (a==1 || a==2) {/*2*/} + // else {/*3*/}. + // + // See checkIf for additional explanations. + SmallVector pmapOps; + + // If there are no regions, pmap is the same. + if (switchOp.getRegions().empty()) + return; + + auto isCaseFallthroughTerminated = [&](Region &r) { + assert(r.getBlocks().size() == 1 && "cannot yet handle branches"); + Block &block = r.back(); + assert(!block.empty() && "case regions cannot be empty"); + + // FIXME: do something special about return terminated? + YieldOp y = dyn_cast(block.back()); + if (!y) + return false; + if (y.isFallthrough()) + return true; + return false; + }; + + auto regions = switchOp.getRegions(); + for (unsigned regionCurrent = 0, regionPastEnd = regions.size(); + regionCurrent != regionPastEnd; ++regionCurrent) { + // Intentional pmap copy, basis to start new path. + PMapType locaCasePmap = getPmap(); + PmapGuard pmapGuard{*this, &locaCasePmap}; + + // At any given point, fallbacks (if not empty) will increase the + // number of control-flow possibilities. For each region ending up + // with a fallback, keep computing the pmap until we hit a region + // that has a non-fallback terminator for the region. + unsigned idx = regionCurrent; + while (idx < regionPastEnd && isCaseFallthroughTerminated(regions[idx])) { + // Note that for 'if' regions we use checkRegionWithScope, since + // there are lexical scopes associated with each region, this is + // not the case for switch's. + checkRegion(regions[idx]); + idx++; + } + pmapOps.push_back(locaCasePmap); + } + + joinPmaps(pmapOps); +} + void LifetimeCheckPass::checkIf(IfOp ifOp) { // Both then and else create their own lexical scopes, take that into account // while checking then/else. @@ -500,7 +563,7 @@ void LifetimeCheckPass::checkOperation(Operation *op) { // // No need to create a new pmap when entering a new scope since it // doesn't cause control flow to diverge (as it does in presence - // of cir::IfOp). + // of cir::IfOp or cir::SwitchOp). // // Also note that for dangling pointers coming from if init stmts // should be caught just fine, given that a ScopeOp embraces a IfOp. @@ -515,6 +578,8 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return checkFunc(op); if (auto ifOp = dyn_cast(op)) return checkIf(ifOp); + if (auto switchOp = dyn_cast(op)) + return checkSwitch(switchOp); if (auto allocaOp = dyn_cast(op)) return checkAlloca(allocaOp); if (auto storeOp = dyn_cast(op)) From 7b30a97296d5ba50972439fad9d7e42409c3d161 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Apr 2022 15:28:02 -0700 Subject: [PATCH 0240/2301] [CIR][NFC] Change CaseAttr first element to be a ArrayAttr instead This also simplifies the way to handle default cases. --- clang/lib/CIR/CIRGenModule.cpp | 6 +++++- clang/test/CIR/CodeGen/switch.cpp | 1 - clang/test/CIR/IR/switch.cir | 1 - clang/test/CIR/Transforms/merge-cleanups.cir | 1 - mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 19 +++++++++++++------ 5 files changed, 18 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index fdb75b9c85c2..e1201158fb34 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1358,10 +1358,14 @@ mlir::LogicalResult CIRGenModule::buildCaseStmt(const CaseStmt &S, "case ranges not implemented"); auto res = mlir::success(); + SmallVector caseEltValueListAttr; auto intVal = S.getLHS()->EvaluateKnownConstInt(getASTContext()); + caseEltValueListAttr.push_back(mlir::IntegerAttr::get(condType, intVal)); + auto caseValueList = builder.getArrayAttr(caseEltValueListAttr); + auto *ctx = builder.getContext(); caseEntry = mlir::cir::CaseAttr::get( - ctx, builder.getArrayAttr({}), + ctx, caseValueList, CaseOpKindAttr::get(ctx, mlir::cir::CaseOpKind::Equal)); { mlir::OpBuilder::InsertionGuard guardCase(builder); diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index d556f359de88..a3ae0fd4dc6f 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * void sw1(int a) { switch (int b = 1; a) { diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index caa671100539..11aa5fe78081 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -1,5 +1,4 @@ // RUN: cir-tool %s | FileCheck %s -// XFAIL: * func.func @s0() { %1 = cir.cst(2 : i32) : i32 diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 3f73f3f7e46d..49fd61296bb6 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-merge-cleanups -o %t.out.cir // RUN: FileCheck --input-file=%t.out.cir %s -// XFAIL: * module { func.func @sw1(%arg0: i32, %arg1: i32) { diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 85e3555ea960..d3c60ed61bf6 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -587,11 +587,13 @@ parseSwitchOp(OpAsmParser &parser, int64_t val = 0; if (parser.parseInteger(val).failed()) return ::mlir::failure(); + + SmallVector caseEltValueListAttr; + caseEltValueListAttr.push_back(mlir::IntegerAttr::get(intCondType, val)); + auto caseValueList = parser.getBuilder().getArrayAttr(caseEltValueListAttr); + cases.push_back( - cir::CaseAttr::get(parser.getContext(), - parser.getBuilder().getArrayAttr( - {mlir::IntegerAttr::get(intCondType, val)}), - kindAttr)); + cir::CaseAttr::get(parser.getContext(), caseValueList, kindAttr)); if (succeeded(parser.parseOptionalColon())) { Type caseIntTy; if (parser.parseType(caseIntTy).failed()) @@ -659,9 +661,14 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, p << caseValueStr; // Case value - if (kind != cir::CaseOpKind::Default) { + switch (kind) { + case cir::CaseOpKind::Equal: { p << ", "; - p.printStrippedAttrOrType(attr.getValue()); + p.printStrippedAttrOrType(attr.getValue()[0]); + break; + } + case cir::CaseOpKind::Default: + break; } p << ") "; From 779fd0bad9a3e5b2e96174f1f039bdf5459abc0c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Apr 2022 15:29:12 -0700 Subject: [PATCH 0241/2301] [CIR] Introduce a new case kind, for handling multiple case values at once --- clang/test/CIR/IR/switch.cir | 6 ++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 22 +++++--- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 65 +++++++++++++++++----- 3 files changed, 72 insertions(+), 21 deletions(-) diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index 11aa5fe78081..0bd62fd924d1 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -9,6 +9,9 @@ func.func @s0() { case (equal, 3) { cir.yield fallthrough }, + case (anyof, [6, 7, 8] : i32) { + cir.yield break + }, case (equal, 5 : i32) { cir.yield } @@ -23,6 +26,9 @@ func.func @s0() { // CHECK-NEXT: case (equal, 3 : i32) { // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, +// CHECK-NEXT: case (anyof, [6, 7, 8] : i32) { +// CHECK-NEXT: cir.yield break +// CHECK-NEXT: }, // CHECK-NEXT: case (equal, 5 : i32) { // CHECK-NEXT: cir.yield // CHECK-NEXT: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 95bf50e2b29e..6f21dd7d6cc1 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -563,11 +563,12 @@ def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { // because "default" is a C++ reserved keyword and can't show up in a enum. def CaseOpKind_DT : I32EnumAttrCase<"Default", 1>; def CaseOpKind_EQ : I32EnumAttrCase<"Equal", 2>; +def CaseOpKind_AO : I32EnumAttrCase<"Anyof", 3>; def CaseOpKind : I32EnumAttr< "CaseOpKind", "case kind", - [CaseOpKind_DT, CaseOpKind_EQ]> { + [CaseOpKind_DT, CaseOpKind_EQ, CaseOpKind_AO]> { let cppNamespace = "::mlir::cir"; } @@ -600,13 +601,16 @@ def SwitchOp : CIR_Op<"switch", is an integral condition value. A variadic list of "case" attribute operands and regions track the possible - control flow within `cir.switch`. Each "case" first operand is either - "equal" (meaning equality comparision against the condition) and "default" - for any other value. An optional second operand denotes the actual value, - its type should match the condition and can be optionally present. + control flow within `cir.switch`. Each "case" first operand could be: + - "equal": equality check against the condition. + - "anyof": equals to any of the values in a following list. + - "default": any other value. - Each region contains only one block and must be explicitly terminated with - a cir.yield operation. + An optional second operand denotes the actual value (or list of). + Types value(s) should match the condition and among themselves (in the list + case). + + Each case region must be explicitly terminated with a cir.yield operation. Examples: @@ -616,6 +620,10 @@ def SwitchOp : CIR_Op<"switch", ... cir.yield break }, + case (anyof, [1, 2, 3] : i32) { + ... + cir.return ... + } case (default) { ... cir.yield fallthrough diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index d3c60ed61bf6..de680e60318b 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -546,7 +546,7 @@ parseSwitchOp(OpAsmParser &parser, // FIXME: since a few names can't be used as enum (default) we declared // them in CIROps.td capitalized, but we really wanna use lower case on // clang IR asm form. - if (parser.parseOptionalKeyword(&attrStr, {"default", "equal"})) { + if (parser.parseOptionalKeyword(&attrStr, {"default", "equal", "anyof"})) { ::mlir::StringAttr attrVal; ::mlir::OptionalParseResult parseResult = parser.parseOptionalAttribute( attrVal, parser.getBuilder().getNoneType(), "kind", attrStorage); @@ -560,7 +560,7 @@ parseSwitchOp(OpAsmParser &parser, if (attrStr.empty()) { return parser.emitError( loc, "expected string or keyword containing one of the following " - "enum values for attribute 'kind' [default, equal]"); + "enum values for attribute 'kind' [default, equal, anyof]"); } std::string attrString = attrStr.str(); @@ -574,24 +574,49 @@ parseSwitchOp(OpAsmParser &parser, auto kindAttr = ::mlir::cir::CaseOpKindAttr::get( parser.getBuilder().getContext(), attrOptional.value()); - if (parser.parseOptionalComma().failed() && - kindAttr.getValue() == cir::CaseOpKind::Default) { + // `,` value or `,` [values,...] + SmallVector caseEltValueListAttr; + mlir::ArrayAttr caseValueList; + + switch (kindAttr.getValue()) { + case cir::CaseOpKind::Equal: { + if (parser.parseComma().failed()) + return mlir::failure(); + int64_t val = 0; + if (parser.parseInteger(val).failed()) + return ::mlir::failure(); + caseEltValueListAttr.push_back(mlir::IntegerAttr::get(intCondType, val)); + break; + } + case cir::CaseOpKind::Anyof: { + if (parser.parseComma().failed()) + return mlir::failure(); + if (parser.parseLSquare().failed()) + return mlir::failure(); + auto result = parser.parseCommaSeparatedList([&]() { + int64_t val = 0; + if (parser.parseInteger(val).failed()) + return ::mlir::failure(); + caseEltValueListAttr.push_back( + mlir::IntegerAttr::get(intCondType, val)); + return ::mlir::success(); + }); + if (result.failed()) + return mlir::failure(); + if (parser.parseRSquare().failed()) + return mlir::failure(); + break; + } + case cir::CaseOpKind::Default: { if (parser.parseRParen().failed()) return parser.emitError(parser.getCurrentLocation(), "expected ')'"); cases.push_back(cir::CaseAttr::get( parser.getContext(), parser.getBuilder().getArrayAttr({}), kindAttr)); return parseAndCheckRegion(); } + } - // `,` value comes next (in the future perhaps a list?) - int64_t val = 0; - if (parser.parseInteger(val).failed()) - return ::mlir::failure(); - - SmallVector caseEltValueListAttr; - caseEltValueListAttr.push_back(mlir::IntegerAttr::get(intCondType, val)); - auto caseValueList = parser.getBuilder().getArrayAttr(caseEltValueListAttr); - + caseValueList = parser.getBuilder().getArrayAttr(caseEltValueListAttr); cases.push_back( cir::CaseAttr::get(parser.getContext(), caseValueList, kindAttr)); if (succeeded(parser.parseOptionalColon())) { @@ -650,7 +675,8 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, auto attr = casesAttr[idx].cast(); auto kind = attr.getKind().getValue(); - assert((kind == CaseOpKind::Default || kind == CaseOpKind::Equal) && + assert((kind == CaseOpKind::Default || kind == CaseOpKind::Equal || + kind == CaseOpKind::Anyof) && "unknown case"); // Case kind @@ -667,6 +693,17 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, p.printStrippedAttrOrType(attr.getValue()[0]); break; } + case cir::CaseOpKind::Anyof: { + p << ", ["; + llvm::interleaveComma(attr.getValue(), p, [&](const Attribute &a) { + p.printAttributeWithoutType(a); + }); + p << "] : "; + auto typedAttr = attr.getValue()[0].dyn_cast(); + assert(typedAttr && "this should never not have a type!"); + p.printType(typedAttr.getType()); + break; + } case cir::CaseOpKind::Default: break; } From 8d639eb6419d7f80d3d96104e3bea508a50e5ed8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Apr 2022 20:27:13 -0700 Subject: [PATCH 0242/2301] [CIR][CodeGen] Use case 'anyof' form and add support for cascading cases --- clang/lib/CIR/CIRGenModule.cpp | 29 ++++++++++++--------- clang/test/CIR/CodeGen/switch.cpp | 43 +++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index e1201158fb34..5cd923dd10d0 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1358,19 +1358,31 @@ mlir::LogicalResult CIRGenModule::buildCaseStmt(const CaseStmt &S, "case ranges not implemented"); auto res = mlir::success(); + const CaseStmt *caseStmt = &S; SmallVector caseEltValueListAttr; - auto intVal = S.getLHS()->EvaluateKnownConstInt(getASTContext()); - caseEltValueListAttr.push_back(mlir::IntegerAttr::get(condType, intVal)); + // Fold cascading cases whenever possible to simplify codegen a bit. + while (true) { + auto intVal = caseStmt->getLHS()->EvaluateKnownConstInt(getASTContext()); + caseEltValueListAttr.push_back(mlir::IntegerAttr::get(condType, intVal)); + if (isa(caseStmt->getSubStmt())) + caseStmt = dyn_cast_or_null(caseStmt->getSubStmt()); + else + break; + } + auto caseValueList = builder.getArrayAttr(caseEltValueListAttr); auto *ctx = builder.getContext(); caseEntry = mlir::cir::CaseAttr::get( ctx, caseValueList, - CaseOpKindAttr::get(ctx, mlir::cir::CaseOpKind::Equal)); + CaseOpKindAttr::get(ctx, caseEltValueListAttr.size() > 1 + ? mlir::cir::CaseOpKind::Anyof + : mlir::cir::CaseOpKind::Equal)); { mlir::OpBuilder::InsertionGuard guardCase(builder); - res = buildStmt(S.getSubStmt(), - /*useCurrentScope=*/!isa(S.getSubStmt())); + res = buildStmt( + caseStmt->getSubStmt(), + /*useCurrentScope=*/!isa(caseStmt->getSubStmt())); } // TODO: likelihood @@ -1424,14 +1436,7 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { continue; } - // FIXME: add support for empty case fallthrough auto *caseStmt = dyn_cast(c); - if (caseStmt) { - const CaseStmt *nestedCase = - dyn_cast(caseStmt->getSubStmt()); - assert(!nestedCase && "empty case fallthrough NYI"); - } - CaseAttr caseAttr; { mlir::OpBuilder::InsertionGuard guardCase(builder); diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index a3ae0fd4dc6f..497f39034647 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -117,3 +117,46 @@ void sw5(int a) { // CHECK: cir.switch (%1 : i32) [ // CHECK-NEXT: case (equal, 1 : i32) { // CHECK-NEXT: cir.yield fallthrough + +void sw6(int a) { + switch (a) { + case 0: + case 1: + case 2: + break; + case 3: + case 4: + case 5: + break; + } +} + +// CHECK: func @sw6 +// CHECK: cir.switch (%1 : i32) [ +// CHECK-NEXT: case (anyof, [0, 1, 2] : i32) { +// CHECK-NEXT: cir.yield break +// CHECK-NEXT: }, +// CHECK-NEXT: case (anyof, [3, 4, 5] : i32) { +// CHECK-NEXT: cir.yield break +// CHECK-NEXT: } + +void sw7(int a) { + switch (a) { + case 0: + case 1: + case 2: + int x; + case 3: + case 4: + case 5: + break; + } +} + +// CHECK: func @sw7 +// CHECK: case (anyof, [0, 1, 2] : i32) { +// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: }, +// CHECK-NEXT: case (anyof, [3, 4, 5] : i32) { +// CHECK-NEXT: cir.yield break +// CHECK-NEXT: } From 398427a6c82cbd40e011019df07db8f871b1d4f1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 11 Apr 2022 11:21:09 -0700 Subject: [PATCH 0243/2301] [CIR][LifetimeCheck] Do not stop before checking region with break yield --- .../CIR/Transforms/lifetime-check-remarks.cpp | 16 +++++++++++++++- clang/test/CIR/Transforms/lifetime-switch.cpp | 12 ++++++++++++ .../lib/Dialect/CIR/Transforms/LifetimeCheck.cpp | 4 +++- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/clang/test/CIR/Transforms/lifetime-check-remarks.cpp b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp index 68df5ec89f2b..ea48469b20b6 100644 --- a/clang/test/CIR/Transforms/lifetime-check-remarks.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: cir-tool %t.cir -cir-lifetime-check="remarks=pset" -verify-diagnostics -o %t-out.cir // XFAIL: * @@ -25,3 +25,17 @@ int *p1(bool b = true) { // expected-remark@-1 {{pset => { invalid, nullptr }}} return p; } + +void p2(int b) { + int *p = nullptr; + switch (int x = 0; b) { + case 1: + p = &x; + case 2: + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { nullptr }}} + break; + } + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { nullptr, invalid }}} +} diff --git a/clang/test/CIR/Transforms/lifetime-switch.cpp b/clang/test/CIR/Transforms/lifetime-switch.cpp index ab10a4a94f94..15e91cc76a6d 100644 --- a/clang/test/CIR/Transforms/lifetime-switch.cpp +++ b/clang/test/CIR/Transforms/lifetime-switch.cpp @@ -34,3 +34,15 @@ void s2(int b) { } // expected-note {{pointee 'x' invalidated at end of scope}} *p = 42; // expected-warning {{use of invalid pointer 'p'}} } + +void s3(int b) { + int *p = nullptr; // expected-note {{invalidated here}} + switch (int x = 0; b) { + case 1: + p = &x; + case 2: + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + break; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 24210fec3626..69e1f211f6c1 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -389,11 +389,13 @@ void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { // with a fallback, keep computing the pmap until we hit a region // that has a non-fallback terminator for the region. unsigned idx = regionCurrent; - while (idx < regionPastEnd && isCaseFallthroughTerminated(regions[idx])) { + while (idx < regionPastEnd) { // Note that for 'if' regions we use checkRegionWithScope, since // there are lexical scopes associated with each region, this is // not the case for switch's. checkRegion(regions[idx]); + if (!isCaseFallthroughTerminated(regions[idx])) + break; idx++; } pmapOps.push_back(locaCasePmap); From 4db8177d8d1fbc62b9ee1b2ecd63cd66996065de Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 13 Apr 2022 15:35:36 -0400 Subject: [PATCH 0244/2301] [CIR] Explicitly depend on MLIRCIR for clangCIR library I'm not sure if this is the key, but we're definitely missing some MLIR dependencies here. This did fix one build issue for me. --- clang/lib/CIR/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index d8c738deeebb..b2c31280d9af 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -26,6 +26,7 @@ add_clang_library(clangCIR TargetInfo.cpp DEPENDS + MLIRCIR MLIRCIROpsIncGen LINK_LIBS From 2725da8bff7831cf603701757fc015d9988fe827 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 13 Apr 2022 15:36:31 -0400 Subject: [PATCH 0245/2301] [CIR] Explicitly set a var to nullptr in the constructor This was being left to a random value given that it didn't have an initializer. The patch that actually sets the proper value for CGF comes up later. This fixes call.c test with optimizations. --- clang/lib/CIR/CIRGenFunction.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 5a5a02d0f88b..773d3f8d2e9b 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -22,7 +22,7 @@ using namespace cir; using namespace clang; CIRGenFunction::CIRGenFunction(CIRGenModule &CGM) - : CGM{CGM}, SanOpts(CGM.getLangOpts().Sanitize) {} + : CGM{CGM}, CurFuncDecl(nullptr), SanOpts(CGM.getLangOpts().Sanitize) {} clang::ASTContext &CIRGenFunction::getContext() const { return CGM.getASTContext(); From ad3e10bf82ad013db072a86db323f469263d666c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 21 Mar 2022 22:03:43 -0400 Subject: [PATCH 0246/2301] [CIR] Support returning and argument floating point types This adds some of the infrastructure for floating point arg/return types. The GetSSE type method is stubbed out and just returns the input. This propagates upwards in a later patch to avoid doing any ABI concerns at this high of a level. Keep it around for now, though, for the same reason as always: this helps us know what's done in clang. --- clang/lib/CIR/CIRGenCall.cpp | 4 ++ clang/lib/CIR/CIRGenFunctionInfo.h | 37 ++++++++++++ clang/lib/CIR/TargetInfo.cpp | 94 ++++++++++++++++++++++++------ clang/test/CIR/CodeGen/call.c | 20 ++++++- 4 files changed, 136 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 8d0ddd0b2658..501fac42d52e 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -133,6 +133,7 @@ void ClangToCIRArgMapping::construct(const ASTContext &Context, switch (AI.getKind()) { default: assert(false && "NYI"); + case ABIArgInfo::Extend: case ABIArgInfo::Direct: { assert(!AI.getCoerceToType().dyn_cast() && "NYI"); // FIXME: handle sseregparm someday... @@ -175,6 +176,8 @@ mlir::FunctionType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { // TODO: where to get VoidTy? resultType = nullptr; break; + + case ABIArgInfo::Extend: case ABIArgInfo::Direct: resultType = retAI.getCoerceToType(); break; @@ -205,6 +208,7 @@ mlir::FunctionType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { switch (ArgInfo.getKind()) { default: assert(false && "NYI"); + case ABIArgInfo::Extend: case ABIArgInfo::Direct: { mlir::Type argType = ArgInfo.getCoerceToType(); // TODO: handle the test against llvm::StructType from codegen diff --git a/clang/lib/CIR/CIRGenFunctionInfo.h b/clang/lib/CIR/CIRGenFunctionInfo.h index 275b77848313..312836667fe7 100644 --- a/clang/lib/CIR/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CIRGenFunctionInfo.h @@ -102,6 +102,7 @@ class ABIArgInfo { }; Kind TheKind; bool CanBeFlattened : 1; // isDirect() + bool SignExt : 1; // isExtend() bool canHavePaddingType() const { return isDirect() || isExtend() || isIndirect() || isIndirectAliased() || @@ -130,6 +131,37 @@ class ABIArgInfo { return AI; } + static ABIArgInfo getSignExtend(clang::QualType Ty, mlir::Type T = nullptr) { + assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); + auto AI = ABIArgInfo(Extend); + AI.setCoerceToType(T); + AI.setPaddingType(nullptr); + AI.setDirectOffset(0); + AI.setDirectAlign(0); + AI.setSignExt(true); + return AI; + } + + static ABIArgInfo getZeroExtend(clang::QualType Ty, mlir::Type T = nullptr) { + assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); + auto AI = ABIArgInfo(Extend); + AI.setCoerceToType(T); + AI.setPaddingType(nullptr); + AI.setDirectOffset(0); + AI.setDirectAlign(0); + AI.setSignExt(false); + return AI; + } + + // ABIArgInfo will record the argument as being extended based on the sign of + // it's type. + static ABIArgInfo getExtend(clang::QualType Ty, mlir::Type T = nullptr) { + assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); + if (Ty->hasSignedIntegerRepresentation()) + return getSignExtend(Ty, T); + return getZeroExtend(Ty, T); + } + static ABIArgInfo getIgnore() { return ABIArgInfo(Ignore); } Kind getKind() const { return TheKind; } @@ -161,6 +193,11 @@ class ABIArgInfo { DirectAttr.Align = Align; } + void setSignExt(bool SExt) { + assert(isExtend() && "Invalid kind!"); + SignExt = SExt; + } + void setCanBeFlattened(bool Flatten) { assert(isDirect() && "Invalid kind!"); CanBeFlattened = Flatten; diff --git a/clang/lib/CIR/TargetInfo.cpp b/clang/lib/CIR/TargetInfo.cpp index 5bf5b2abec68..63b42e75882c 100644 --- a/clang/lib/CIR/TargetInfo.cpp +++ b/clang/lib/CIR/TargetInfo.cpp @@ -67,6 +67,10 @@ class X86_64ABIInfo : public ABIInfo { void classify(clang::QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, bool isNamedArg) const; + mlir::Type GetSSETypeAtOffset(mlir::Type CIRType, unsigned CIROffset, + clang::QualType SourceTy, + unsigned SourceOffset) const; + ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(clang::QualType Ty, unsigned freeIntRegs, @@ -175,10 +179,9 @@ mlir::Type X86_64ABIInfo::GetINTEGERTypeAtOffset(mlir::Type CIRType, unsigned CIROffset, QualType SourceTy, unsigned SourceOffset) const { + // TODO: entirely stubbed out assert(CIROffset == 0 && "NYI"); assert(SourceOffset == 0 && "NYI"); - // TODO: this entire function. It's safe to now just to let the integer type - // be used as is since we aren't actually generating anything. return CIRType; } @@ -216,10 +219,22 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, // that the parameter gets the right LLVM IR attributes. if (Hi == NoClass && ResType.isa()) { assert(!Ty->getAs() && "NYI"); - assert(!isPromotableIntegerTypeForABI(Ty) && "NYI"); + if (Ty->isSignedIntegerOrEnumerationType() && + isPromotableIntegerTypeForABI(Ty)) + return ABIArgInfo::getExtend(Ty); } break; + + // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next available SSE + // register is used, the registers are taken in the order from %xmm0 to + // %xmm7. + case SSE: { + mlir::Type CIRType = CGT.ConvertType(Ty); + ResType = GetSSETypeAtOffset(CIRType, 0, Ty, 0); + ++neededSSE; + break; + } } mlir::Type HighPart = nullptr; @@ -247,21 +262,60 @@ bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, bool isNamedArg) const { + // FIXME: This code can be simplified by introducing a simple value class for + // Class pairs with appropriate constructor methods for the various + // situations. + + // FIXME: Some of the split computations are wrong; unaligned vectors + // shouldn't be passed in registers for example, so there is no chance they + // can straddle an eightbyte. Verify & simplify. + Lo = Hi = NoClass; Class &Current = OffsetBase < 64 ? Lo : Hi; Current = Memory; - auto *BT = Ty->getAs(); - assert(BT && "Only builtin types implemented."); - BuiltinType::Kind k = BT->getKind(); - if (k == BuiltinType::Void) - Current = NoClass; - else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { + if (const auto *BT = Ty->getAs()) { + BuiltinType::Kind k = BT->getKind(); + if (k == BuiltinType::Void) { + Current = NoClass; + } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { + assert(false && "NYI"); + Lo = Integer; + Hi = Integer; + } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { + Current = Integer; + } else if (k == BuiltinType::Float || k == BuiltinType::Double || + k == BuiltinType::Float16) { + Current = SSE; + } else if (k == BuiltinType::LongDouble) { + assert(false && "NYI"); + } else + assert(false && + "Only void and Integer supported so far for builtin types"); + // FIXME: _Decimal32 and _Decimal64 are SSE. + // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). + return; + } + + assert(!Ty->getAs() && "Enums NYI"); + if (Ty->hasPointerRepresentation()) { Current = Integer; - } else { - assert(false && "Only void and Integer supported so far"); + return; } - return; + + assert(false && "Nothing else implemented yet"); +} + +/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the +/// low 8 bytes of an XMM register, corresponding to the SSE class. +mlir::Type X86_64ABIInfo::GetSSETypeAtOffset(mlir::Type CIRType, + unsigned int CIROffset, + clang::QualType SourceTy, + unsigned int SourceOffset) const { + // TODO: entirely stubbed out + assert(CIROffset == 0 && "NYI"); + assert(SourceOffset == 0 && "NYI"); + return CIRType; } ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { @@ -275,8 +329,8 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); mlir::Type ResType = nullptr; - assert(Lo == NoClass || - Lo == Integer && "Only NoClass and Integer supported so far"); + assert(Lo == NoClass || Lo == Integer || + Lo == SSE && "Only NoClass and Integer supported so far"); switch (Lo) { case NoClass: @@ -298,11 +352,17 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isIntegralOrEnumerationType() && isPromotableIntegerTypeForABI(RetTy)) { - assert(false && "extended types NYI"); + return ABIArgInfo::getExtend(RetTy); } - break; } - llvm_unreachable("ResType as intenger is only case currently implemented."); + break; + + // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next available SSE + // register of the sequence %xmm0, %xmm1 is used. + case SSE: + ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); + break; + default: llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index 0851f099f5e8..148225820f8f 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -6,8 +6,11 @@ void a(void) {} int b(int a, int b) { return a + b; } +double c(double a, double b) { + return a + b; +} -void c(void) { +void d(void) { a(); b(0, 1); } @@ -29,7 +32,20 @@ void c(void) { // CHECK: %6 = cir.load %2 : cir.ptr , i32 // CHECK: cir.return %6 // CHECK: } -// CHECK: func @c() { +// CHECK: func @c(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { +// CHECK: %0 = cir.alloca f64, cir.ptr , ["a", paraminit] +// CHECK: %1 = cir.alloca f64, cir.ptr , ["b", paraminit] +// CHECK: %2 = cir.alloca f64, cir.ptr , ["__retval", uninitialized] +// CHECK: cir.store %arg0, %0 : f64, cir.ptr +// CHECK: cir.store %arg1, %1 : f64, cir.ptr +// CHECK: %3 = cir.load %0 : cir.ptr , f64 +// CHECK: %4 = cir.load %1 : cir.ptr , f64 +// CHECK: %5 = cir.binop(add, %3, %4) : f64 +// CHECK: cir.store %5, %2 : f64, cir.ptr +// CHECK: %6 = cir.load %2 : cir.ptr , f64 +// CHECK: cir.return %6 : f64 +// CHECK: } +// CHECK: func @d() { // CHECK: call @a() : () -> () // CHECK: %0 = cir.cst(0 : i32) : i32 // CHECK: %1 = cir.cst(1 : i32) : i32 From bdc1568bd0e86cb36983ca50bd6dfe1dae9bc603 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 21 Mar 2022 22:07:53 -0400 Subject: [PATCH 0247/2301] [CIR] Switch buildFunc to using CIRGenTypes::GetFunctionType Replace the simplified version of the function type getter with the one modeled from CodeGen --- clang/lib/CIR/ABIInfo.h | 1 + clang/lib/CIR/CIRGenModule.cpp | 9 +-------- clang/lib/CIR/TargetInfo.cpp | 5 ++++- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/ABIInfo.h b/clang/lib/CIR/ABIInfo.h index d77b126f36d6..99dbac14e209 100644 --- a/clang/lib/CIR/ABIInfo.h +++ b/clang/lib/CIR/ABIInfo.h @@ -30,6 +30,7 @@ class ABIInfo { virtual ~ABIInfo(); CIRGenCXXABI &getCXXABI() const; + clang::ASTContext &getContext() const; virtual void computeInfo(CIRGenFunctionInfo &FI) const = 0; diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 5cd923dd10d0..18779323fc37 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1878,20 +1878,13 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { assert(!MD && "methods not implemented"); auto fnLoc = getLoc(FD->getSourceRange()); - // Create an MLIR function for the given prototype. - llvm::SmallVector argTypes; - - for (auto *Param : FD->parameters()) - argTypes.push_back(getCIRType(Param->getType())); - CurCGF->FnRetQualTy = FD->getReturnType(); mlir::TypeRange FnTyRange = {}; if (!CurCGF->FnRetQualTy->isVoidType()) { CurCGF->FnRetTy = getCIRType(CurCGF->FnRetQualTy); - FnTyRange = mlir::TypeRange{*CurCGF->FnRetTy}; } + auto funcType = getTypes().GetFunctionType(GlobalDecl(FD)); - auto funcType = builder.getFunctionType(argTypes, FnTyRange); mlir::FuncOp function = mlir::FuncOp::create(fnLoc, FD->getName(), funcType); if (!function) return nullptr; diff --git a/clang/lib/CIR/TargetInfo.cpp b/clang/lib/CIR/TargetInfo.cpp index 63b42e75882c..10ecd506a5b6 100644 --- a/clang/lib/CIR/TargetInfo.cpp +++ b/clang/lib/CIR/TargetInfo.cpp @@ -107,6 +107,8 @@ static bool classifyReturnType(const CIRGenCXXABI &CXXABI, CIRGenCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); } +clang::ASTContext &ABIInfo::getContext() const { return CGT.getContext(); } + ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, unsigned freeIntRegs) const { assert(false && "NYI"); @@ -253,7 +255,8 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ABIInfo::~ABIInfo() {} bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { - assert(false && "NYI"); + if (getContext().isPromotableIntegerType(Ty)) + return true; assert(!Ty->getAs() && "NYI"); From 5aa01d0848a269e50e4022016faa67170a7b34c3 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 21 Mar 2022 22:08:55 -0400 Subject: [PATCH 0248/2301] [CIR] Shortcut `computeInfo` to avoid making ABI decisions We don't want to make ABI decisions this early on. We want to delay that to a lowering phase. So instead of actually doing the ABI considerations here via the X86_64ABIInfo machinery just shortcut and assume all arguments and returns are the direct ABI types. --- clang/lib/CIR/TargetInfo.cpp | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/clang/lib/CIR/TargetInfo.cpp b/clang/lib/CIR/TargetInfo.cpp index 10ecd506a5b6..d38c3992ebbf 100644 --- a/clang/lib/CIR/TargetInfo.cpp +++ b/clang/lib/CIR/TargetInfo.cpp @@ -114,7 +114,37 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, assert(false && "NYI"); } +static bool testIfIsVoidTy(QualType Ty) { + const auto *BT = Ty->getAs(); + if (!BT) + return false; + + BuiltinType::Kind k = BT->getKind(); + return k == BuiltinType::Void; +} + void X86_64ABIInfo::computeInfo(CIRGenFunctionInfo &FI) const { + // Top leevl CIR has unlimited arguments and return types. Lowering for ABI + // specific concerns should happen during a lowering phase. Assume everything + // is direct for now. + for (CIRGenFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it) { + if (testIfIsVoidTy(it->type)) + it->info = ABIArgInfo::getIgnore(); + else + it->info = ABIArgInfo::getDirect(CGT.ConvertType(it->type)); + } + auto RetTy = FI.getReturnType(); + if (testIfIsVoidTy(RetTy)) + FI.getReturnInfo() = ABIArgInfo::getIgnore(); + else + FI.getReturnInfo() = ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); + + return; + + // TODO: + llvm_unreachable("Everything below here is from codegen. We shouldn't be " + "computing ABI info until lowering"); const unsigned CallingConv = FI.getCallingConvention(); assert(CallingConv == cir::CallingConv::C && "C is the only supported CC"); From b34d5b7a9747a96eb509e62d289542360040f3ba Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 11 Apr 2022 14:23:51 -0700 Subject: [PATCH 0249/2301] [CIR] Introduce cir.loop and skeleton for ForStmt codegen - Add cir.loop and implement some loop and branch traits - Add cir.loop test and few additions to dialect verifier --- clang/lib/CIR/CIRGenModule.cpp | 157 +++++++++++++++++- clang/lib/CIR/CIRGenModule.h | 1 + clang/test/CIR/IR/loop.cir | 52 ++++++ mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 1 + mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 43 ++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 50 ++++++ 6 files changed, 301 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/IR/loop.cir diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 18779323fc37..486729e32c75 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1529,6 +1529,158 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { return mlir::success(); } +mlir::LogicalResult CIRGenModule::buildForStmt(const ForStmt &S) { + // TODO: pass in array of attributes. + + auto res = mlir::success(); + + auto forStmtBuilder = [&]() -> mlir::LogicalResult { + auto forRes = mlir::success(); + // Evaluate the first part before the loop. + if (S.getInit()) + forRes = buildStmt(S.getInit(), /*useCurrentScope=*/true); + + return forRes; + }; + + // The switch scope contains the full source range for SwitchStmt. + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto scopeLocBegin = fusedLoc.getLocations()[0]; + auto scopeLocEnd = fusedLoc.getLocations()[1]; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, + builder.getInsertionBlock()}; + LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; + res = forStmtBuilder(); + }); + + if (res.failed()) + return res; + + assert(0 && "unimplemented"); + + // JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); + + // LexicalScope ForScope(*this, S.getSourceRange()); + + // // Evaluate the first part before the loop. + // if (S.getInit()) + // EmitStmt(S.getInit()); + + // // Start the loop with a block that tests the condition. + // // If there's an increment, the continue scope will be overwritten + // // later. + // JumpDest CondDest = getJumpDestInCurrentScope("for.cond"); + // llvm::BasicBlock *CondBlock = CondDest.getBlock(); + // EmitBlock(CondBlock); + + // Expr::EvalResult Result; + // bool CondIsConstInt = + // !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext()); + + // const SourceRange &R = S.getSourceRange(); + // LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs, + // SourceLocToDebugLoc(R.getBegin()), + // SourceLocToDebugLoc(R.getEnd()), + // checkIfLoopMustProgress(CondIsConstInt)); + + // // Create a cleanup scope for the condition variable cleanups. + // LexicalScope ConditionScope(*this, S.getSourceRange()); + + // // If the for loop doesn't have an increment we can just use the condition + // as + // // the continue block. Otherwise, if there is no condition variable, we can + // // form the continue block now. If there is a condition variable, we can't + // // form the continue block until after we've emitted the condition, because + // // the condition is in scope in the increment, but Sema's jump diagnostics + // // ensure that there are no continues from the condition variable that jump + // // to the loop increment. + // JumpDest Continue; + // if (!S.getInc()) + // Continue = CondDest; + // else if (!S.getConditionVariable()) + // Continue = getJumpDestInCurrentScope("for.inc"); + // BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); + + // if (S.getCond()) { + // // If the for statement has a condition scope, emit the local variable + // // declaration. + // if (S.getConditionVariable()) { + // EmitDecl(*S.getConditionVariable()); + + // // We have entered the condition variable's scope, so we're now able to + // // jump to the continue block. + // Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : + // CondDest; BreakContinueStack.back().ContinueBlock = Continue; + // } + + // llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); + // // If there are any cleanups between here and the loop-exit scope, + // // create a block to stage a loop exit along. + // if (ForScope.requiresCleanups()) + // ExitBlock = createBasicBlock("for.cond.cleanup"); + + // // As long as the condition is true, iterate the loop. + // llvm::BasicBlock *ForBody = createBasicBlock("for.body"); + + // // C99 6.8.5p2/p4: The first substatement is executed if the expression + // // compares unequal to 0. The condition must be a scalar type. + // llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); + // llvm::MDNode *Weights = + // createProfileWeightsForLoop(S.getCond(), + // getProfileCount(S.getBody())); + // if (!Weights && CGM.getCodeGenOpts().OptimizationLevel) + // BoolCondVal = emitCondLikelihoodViaExpectIntrinsic( + // BoolCondVal, Stmt::getLikelihood(S.getBody())); + + // Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights); + + // if (ExitBlock != LoopExit.getBlock()) { + // EmitBlock(ExitBlock); + // EmitBranchThroughCleanup(LoopExit); + // } + + // EmitBlock(ForBody); + // } else { + // // Treat it as a non-zero constant. Don't even create a new block for + // the + // // body, just fall into it. + // } + // incrementProfileCounter(&S); + + // { + // // Create a separate cleanup scope for the body, in case it is not + // // a compound statement. + // RunCleanupsScope BodyScope(*this); + // EmitStmt(S.getBody()); + // } + + // // If there is an increment, emit it next. + // if (S.getInc()) { + // EmitBlock(Continue.getBlock()); + // EmitStmt(S.getInc()); + // } + + // BreakContinueStack.pop_back(); + + // ConditionScope.ForceCleanup(); + + // EmitStopPoint(&S); + // EmitBranch(CondBlock); + + // ForScope.ForceCleanup(); + + // LoopStack.pop(); + + // // Emit the fall-through block. + // EmitBlock(LoopExit.getBlock(), true); + + return mlir::success(); +} + mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { // The else branch of a consteval if statement is always the only branch // that can be runtime evaluated. @@ -1661,10 +1813,13 @@ mlir::LogicalResult CIRGenModule::buildStmt(const Stmt *S, if (buildSwitchStmt(cast(*S)).failed()) return mlir::failure(); break; + case Stmt::ForStmtClass: + if (buildForStmt(cast(*S)).failed()) + return mlir::failure(); + break; case Stmt::IndirectGotoStmtClass: case Stmt::WhileStmtClass: case Stmt::DoStmtClass: - case Stmt::ForStmtClass: case Stmt::ReturnStmtClass: // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. case Stmt::GCCAsmStmtClass: diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 5140ef85b5d7..c878fe027a2b 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -518,6 +518,7 @@ class CIRGenModule { mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); + mlir::LogicalResult buildForStmt(const clang::ForStmt &S); // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir new file mode 100644 index 000000000000..7e8429dcda64 --- /dev/null +++ b/clang/test/CIR/IR/loop.cir @@ -0,0 +1,52 @@ +// RUN: cir-tool %s | FileCheck %s +// XFAIL: * + +func @l0() { + %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} + %1 = cir.cst(0 : i32) : i32 + cir.store %1, %0 : i32, cir.ptr + cir.scope { + %2 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} + %3 = cir.cst(0 : i32) : i32 + cir.store %3, %2 : i32, cir.ptr + cir.loop(cond : { + %4 = cir.load %2 : cir.ptr , i32 + %5 = cir.cst(10 : i32) : i32 + %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool + cir.yield + }, step : { + %4 = cir.load %2 : cir.ptr , i32 + %5 = cir.cst(1 : i32) : i32 + %6 = cir.binop(add, %4, %5) : i32 + cir.store %6, %2 : i32, cir.ptr + cir.yield + }) { + %4 = cir.load %0 : cir.ptr , i32 + %5 = cir.cst(1 : i32) : i32 + %6 = cir.binop(add, %4, %5) : i32 + cir.store %6, %0 : i32, cir.ptr + cir.yield + } + } + cir.return +} + +// CHECK: func @l0 +// CHECK: cir.loop(cond : { +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 +// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }, step : { +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 +// CHECK-NEXT: cir.store %6, %2 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 +// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index bdc3e931db72..0cd883fd63b3 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -20,6 +20,7 @@ #include "mlir/IR/OpDefinition.h" #include "mlir/Interfaces/ControlFlowInterfaces.h" #include "mlir/Interfaces/InferTypeOpInterface.h" +#include "mlir/Interfaces/LoopLikeInterface.h" #include "mlir/Interfaces/SideEffectInterfaces.h" namespace mlir { diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 6f21dd7d6cc1..f568d8d3bd89 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -22,6 +22,7 @@ include "mlir/IR/EnumAttr.td" include "mlir/IR/SymbolInterfaces.td" include "mlir/IR/BuiltinAttributeInterfaces.td" include "mlir/Interfaces/ControlFlowInterfaces.td" +include "mlir/Interfaces/LoopLikeInterface.td" include "mlir/Interfaces/InferTypeOpInterface.td" include "mlir/Interfaces/SideEffectInterfaces.td" @@ -252,7 +253,7 @@ def StoreOp : CIR_Op<"store", [ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : CIR_Op<"return", [HasParent<"FuncOp, ScopeOp, IfOp, SwitchOp">, +def ReturnOp : CIR_Op<"return", [HasParent<"FuncOp, ScopeOp, IfOp, SwitchOp, LoopOp">, Terminator]> { let summary = "return operation"; let description = [{ @@ -361,7 +362,8 @@ def YieldOpKind : I32EnumAttr< } def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, - ParentOneOf<["IfOp", "ScopeOp", "SwitchOp"]>]> { + ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", + "LoopOp"]>]> { let summary = "termination operation for regions inside if, for, scope, etc"; let description = [{ "cir.yield" yields an SSA value from a CIR dialect op region and @@ -689,5 +691,42 @@ def BrOp : CIR_Op<"br", }]; } +//===----------------------------------------------------------------------===// +// LoopOp +//===----------------------------------------------------------------------===// + +def LoopOp : CIR_Op<"loop", + [DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, + RecursivelySpeculatable, NoRegionArguments]> { + let summary = "loop operation"; + let description = [{ + }]; + let regions = (region SizedRegion<1>:$cond, AnyRegion:$body, + SizedRegion<1>:$step); + + let assemblyFormat = [{ + `(` + `cond` `{` $cond `}` `,` + `step` `{` $step `}` + `)` `{` + $body + `}` + attr-dict + }]; + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins + CArg<"function_ref", + "nullptr">:$condBuilder, + CArg<"function_ref", + "nullptr">:$bodyBuilder, + CArg<"function_ref", + "nullptr">:$stepBuilder + )> + ]; +} + #endif // MLIR_CIR_DIALECT_CIR_OPS diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index de680e60318b..21fe9ce9c2b9 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -416,6 +416,10 @@ mlir::LogicalResult YieldOp::verify() { if (llvm::isa(getOperation()->getParentOp())) return mlir::success(); + // FIXME: check for cir.yield continue + if (llvm::isa(getOperation()->getParentOp())) + return mlir::success(); + assert((llvm::isa(getOperation()->getParentOp())) && "unknown parent op"); if (isFallthrough()) @@ -764,6 +768,52 @@ void SwitchOp::build( switchBuilder(builder, result.location, result); } +//===----------------------------------------------------------------------===// +// LoopOp +//===----------------------------------------------------------------------===// + +void LoopOp::build(OpBuilder &builder, OperationState &result, + function_ref condBuilder, + function_ref bodyBuilder, + function_ref stepBuilder) { + OpBuilder::InsertionGuard guard(builder); + + Region *condRegion = result.addRegion(); + builder.createBlock(condRegion); + condBuilder(builder, result.location); + + Region *bodyRegion = result.addRegion(); + builder.createBlock(bodyRegion); + bodyBuilder(builder, result.location); + + Region *stepRegion = result.addRegion(); + builder.createBlock(stepRegion); + stepBuilder(builder, result.location); +} + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes +/// that correspond to a constant value for each operand, or null if that +/// operand is not a constant. +void LoopOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // If any index all the underlying regions branch back to the parent + // operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // FIXME: we want to look at cond region for getting more accurate results + // if the other regions will get a chance to execute. + regions.push_back(RegionSuccessor(&this->getCond())); + regions.push_back(RegionSuccessor(&this->getBody())); + regions.push_back(RegionSuccessor(&this->getStep())); +} + +llvm::SmallVector LoopOp::getLoopRegions() { return {&getBody()}; } + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From 29ed7818ec922575464bbba3e6f63855fe74247e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 13 Apr 2022 12:15:01 -0700 Subject: [PATCH 0250/2301] [CIR] Add a loopcondition form for cir.yield out of loop conditions --- clang/test/CIR/IR/loop.cir | 4 +-- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 29 +++++++++++++++++++--- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 7e8429dcda64..a6838eed0c3a 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -13,7 +13,7 @@ func @l0() { %4 = cir.load %2 : cir.ptr , i32 %5 = cir.cst(10 : i32) : i32 %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool - cir.yield + cir.yield loopcondition %6 : !cir.bool }, step : { %4 = cir.load %2 : cir.ptr , i32 %5 = cir.cst(1 : i32) : i32 @@ -36,7 +36,7 @@ func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield loopcondition %6 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index f568d8d3bd89..d2229aa920e6 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -353,11 +353,12 @@ def IfOp : CIR_Op<"if", def YieldOpKind_BK : I32EnumAttrCase<"Break", 1, "break">; def YieldOpKind_FT : I32EnumAttrCase<"Fallthrough", 2, "fallthrough">; +def YieldOpKind_LC : I32EnumAttrCase<"Loopcondition", 3, "loopcondition">; def YieldOpKind : I32EnumAttr< "YieldOpKind", "yield kind", - [YieldOpKind_BK, YieldOpKind_FT]> { + [YieldOpKind_BK, YieldOpKind_FT, YieldOpKind_LC]> { let cppNamespace = "::mlir::cir"; } @@ -378,6 +379,9 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, 2. `cir.yield fallthrough` means the next region in the case list should be executed. + `cir.yield loopcondition %val` is another form that must terminate cond + regions within `cir.loop`s. + The `cir.yield` must be explicitly used whenever a region has more than one block, or within `cir.switch` regions not `cir.return` terminated. @@ -394,15 +398,29 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, cir.yield fallthrough }, ... ] + + cir.loop (cond : { + ... + %4 = ... : cir.bool + cir.yield loopcondition %4 + } ... ) {} ``` }]; let arguments = (ins OptionalAttr:$kind, - Variadic:$results); - let builders = [OpBuilder<(ins), [{ /* nothing to do */ }]>]; + Variadic:$args); + let builders = [ + OpBuilder<(ins), [{ /* nothing to do */ }]>, + OpBuilder<(ins "Value":$cond), [{ + $_state.addOperands(cond); + mlir::cir::YieldOpKindAttr kind = mlir::cir::YieldOpKindAttr::get( + $_builder.getContext(), mlir::cir::YieldOpKind::Loopcondition); + $_state.addAttribute(getKindAttrName($_state.name), kind); + }]> + ]; let assemblyFormat = [{ - ($kind^)? ($results^ `:` type($results))? attr-dict + ($kind^)? ($args^ `:` type($args))? attr-dict }]; let extraClassDeclaration = [{ @@ -416,6 +434,9 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, bool isBreak() { return !isPlain() && *getKind() == YieldOpKind::Break; } + bool isLoopCondition() { + return !isPlain() && *getKind() == YieldOpKind::Loopcondition; + } }]; let hasVerifier = 1; From 82e515b5a9979dfa59f266abbb1e6112b005b1d4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 11 Apr 2022 17:07:16 -0700 Subject: [PATCH 0251/2301] [CIR][CodeGen] Implement bulk of ForStmt - Insert yield's to wrap blocks and add testcases. --- clang/lib/CIR/CIRGenModule.cpp | 193 ++++++++------------- clang/test/CIR/CodeGen/loop.cpp | 45 +++++ clang/test/CIR/IR/loop.cir | 2 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 7 +- 4 files changed, 120 insertions(+), 127 deletions(-) create mode 100644 clang/test/CIR/CodeGen/loop.cpp diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 486729e32c75..6a9c3d9da945 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1530,20 +1530,57 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { } mlir::LogicalResult CIRGenModule::buildForStmt(const ForStmt &S) { - // TODO: pass in array of attributes. - - auto res = mlir::success(); + mlir::cir::LoopOp loopOp; + // TODO: pass in array of attributes. auto forStmtBuilder = [&]() -> mlir::LogicalResult { auto forRes = mlir::success(); // Evaluate the first part before the loop. if (S.getInit()) - forRes = buildStmt(S.getInit(), /*useCurrentScope=*/true); + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + loopOp = builder.create( + getLoc(S.getSourceRange()), + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // TODO: branch weigths, likelyhood, profile counter, etc. + mlir::Value condVal; + if (S.getCond()) { + // If the for statement has a condition scope, + // emit the local variable declaration. + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + // C99 6.8.5p2/p4: The first substatement is executed if the + // expression compares unequal to 0. The condition must be a + // scalar type. + condVal = evaluateExprAsBool(S.getCond()); + } else { + condVal = b.create( + loc, mlir::cir::BoolType::get(b.getContext()), + b.getBoolAttr(true)); + } + b.create(loc, condVal); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // FIXME: in C we need to open a new scope here. Do we also need it + // for C++ in case it's a compound statement? + if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + forRes = mlir::failure(); + builder.create(loc); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (S.getInc()) + if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) + forRes = mlir::failure(); + builder.create(loc); + }); return forRes; }; - // The switch scope contains the full source range for SwitchStmt. + auto res = mlir::success(); auto scopeLoc = getLoc(S.getSourceRange()); builder.create( scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ @@ -1560,123 +1597,34 @@ mlir::LogicalResult CIRGenModule::buildForStmt(const ForStmt &S) { if (res.failed()) return res; - assert(0 && "unimplemented"); - - // JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); - - // LexicalScope ForScope(*this, S.getSourceRange()); - - // // Evaluate the first part before the loop. - // if (S.getInit()) - // EmitStmt(S.getInit()); - - // // Start the loop with a block that tests the condition. - // // If there's an increment, the continue scope will be overwritten - // // later. - // JumpDest CondDest = getJumpDestInCurrentScope("for.cond"); - // llvm::BasicBlock *CondBlock = CondDest.getBlock(); - // EmitBlock(CondBlock); - - // Expr::EvalResult Result; - // bool CondIsConstInt = - // !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext()); - - // const SourceRange &R = S.getSourceRange(); - // LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs, - // SourceLocToDebugLoc(R.getBegin()), - // SourceLocToDebugLoc(R.getEnd()), - // checkIfLoopMustProgress(CondIsConstInt)); - - // // Create a cleanup scope for the condition variable cleanups. - // LexicalScope ConditionScope(*this, S.getSourceRange()); - - // // If the for loop doesn't have an increment we can just use the condition - // as - // // the continue block. Otherwise, if there is no condition variable, we can - // // form the continue block now. If there is a condition variable, we can't - // // form the continue block until after we've emitted the condition, because - // // the condition is in scope in the increment, but Sema's jump diagnostics - // // ensure that there are no continues from the condition variable that jump - // // to the loop increment. - // JumpDest Continue; - // if (!S.getInc()) - // Continue = CondDest; - // else if (!S.getConditionVariable()) - // Continue = getJumpDestInCurrentScope("for.inc"); - // BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); - - // if (S.getCond()) { - // // If the for statement has a condition scope, emit the local variable - // // declaration. - // if (S.getConditionVariable()) { - // EmitDecl(*S.getConditionVariable()); - - // // We have entered the condition variable's scope, so we're now able to - // // jump to the continue block. - // Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : - // CondDest; BreakContinueStack.back().ContinueBlock = Continue; - // } - - // llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); - // // If there are any cleanups between here and the loop-exit scope, - // // create a block to stage a loop exit along. - // if (ForScope.requiresCleanups()) - // ExitBlock = createBasicBlock("for.cond.cleanup"); - - // // As long as the condition is true, iterate the loop. - // llvm::BasicBlock *ForBody = createBasicBlock("for.body"); - - // // C99 6.8.5p2/p4: The first substatement is executed if the expression - // // compares unequal to 0. The condition must be a scalar type. - // llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); - // llvm::MDNode *Weights = - // createProfileWeightsForLoop(S.getCond(), - // getProfileCount(S.getBody())); - // if (!Weights && CGM.getCodeGenOpts().OptimizationLevel) - // BoolCondVal = emitCondLikelihoodViaExpectIntrinsic( - // BoolCondVal, Stmt::getLikelihood(S.getBody())); - - // Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights); - - // if (ExitBlock != LoopExit.getBlock()) { - // EmitBlock(ExitBlock); - // EmitBranchThroughCleanup(LoopExit); - // } - - // EmitBlock(ForBody); - // } else { - // // Treat it as a non-zero constant. Don't even create a new block for - // the - // // body, just fall into it. - // } - // incrementProfileCounter(&S); - - // { - // // Create a separate cleanup scope for the body, in case it is not - // // a compound statement. - // RunCleanupsScope BodyScope(*this); - // EmitStmt(S.getBody()); - // } - - // // If there is an increment, emit it next. - // if (S.getInc()) { - // EmitBlock(Continue.getBlock()); - // EmitStmt(S.getInc()); - // } - - // BreakContinueStack.pop_back(); - - // ConditionScope.ForceCleanup(); - - // EmitStopPoint(&S); - // EmitBranch(CondBlock); - - // ForScope.ForceCleanup(); - - // LoopStack.pop(); - - // // Emit the fall-through block. - // EmitBlock(LoopExit.getBlock(), true); + // Add terminating yield on loop body region in case there are not + // other terminators used. + // FIXME: unify this with terminateCaseRegion. + auto terminateLoopBody = [&](mlir::Region &r, mlir::Location loc) { + if (r.empty()) + return; + + SmallVector eraseBlocks; + unsigned numBlocks = r.getBlocks().size(); + for (auto &block : r.getBlocks()) { + // Already cleanup after return operations, which might create + // empty blocks if emitted as last stmt. + if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() && + block.hasNoSuccessors()) + eraseBlocks.push_back(&block); + + if (block.empty() || + !block.back().hasTrait()) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(&block); + builder.create(loc); + } + } + + for (auto *b : eraseBlocks) + b->erase(); + }; + terminateLoopBody(loopOp.getBody(), getLoc(S.getEndLoc())); return mlir::success(); } @@ -2095,6 +2043,7 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { assert(builder.getInsertionBlock() && "Should be valid"); } + function.dump(); if (mlir::failed(function.verifyBody())) return nullptr; theModule.push_back(function); diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp new file mode 100644 index 000000000000..4dd3f0270dd5 --- /dev/null +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -0,0 +1,45 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +void l0() { + for (;;) { + } +} + +// CHECK: func @l0 +// CHECK: cir.loop(cond : { +// CHECK-NEXT: %0 = cir.cst(true) : !cir.bool +// CHECK-NEXT: cir.yield loopcondition %0 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } + +void l1() { + int x = 0; + for (int i = 0; i < 10; i = i + 1) { + x = x + 1; + } +} + +// CHECK: func @l1 +// CHECK: cir.loop(cond : { +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 +// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool +// CHECK-NEXT: cir.yield loopcondition %6 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 +// CHECK-NEXT: cir.store %6, %2 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 +// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index a6838eed0c3a..6edbaff98431 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -1,7 +1,7 @@ // RUN: cir-tool %s | FileCheck %s // XFAIL: * -func @l0() { +func.func @l0() { %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} %1 = cir.cst(0 : i32) : i32 cir.store %1, %0 : i32, cir.ptr diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index d2229aa920e6..571f5ca01741 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -728,11 +728,10 @@ def LoopOp : CIR_Op<"loop", let assemblyFormat = [{ `(` - `cond` `{` $cond `}` `,` - `step` `{` $step `}` - `)` `{` + `cond` `:` $cond `,` + `step` `:` $step + `)` $body - `}` attr-dict }]; From a831ab7f12c1a5b136909a3be00f87445f939d4d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 13 Apr 2022 15:22:29 -0700 Subject: [PATCH 0252/2301] [CIR][CodeGen] Add support for WhileStmt --- clang/lib/CIR/CIRGenModule.cpp | 122 ++++++++++++++++++++++++-------- clang/lib/CIR/CIRGenModule.h | 1 + clang/test/CIR/CodeGen/loop.cpp | 58 +++++++++++++++ 3 files changed, 151 insertions(+), 30 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 6a9c3d9da945..62e3f335886b 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1529,6 +1529,92 @@ mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { return mlir::success(); } +// Add terminating yield on body regions (loops, ...) in case there are +// not other terminators used. +// FIXME: make terminateCaseRegion use this too. +static void terminateBody(mlir::OpBuilder &builder, mlir::Region &r, + mlir::Location loc) { + if (r.empty()) + return; + + SmallVector eraseBlocks; + unsigned numBlocks = r.getBlocks().size(); + for (auto &block : r.getBlocks()) { + // Already cleanup after return operations, which might create + // empty blocks if emitted as last stmt. + if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() && + block.hasNoSuccessors()) + eraseBlocks.push_back(&block); + + if (block.empty() || + !block.back().hasTrait()) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(&block); + builder.create(loc); + } + } + + for (auto *b : eraseBlocks) + b->erase(); +} + +mlir::LogicalResult CIRGenModule::buildWhileStmt(const WhileStmt &S) { + mlir::cir::LoopOp loopOp; + + // TODO: pass in array of attributes. + auto whileStmtBuilder = [&]() -> mlir::LogicalResult { + auto forRes = mlir::success(); + + loopOp = builder.create( + getLoc(S.getSourceRange()), + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // TODO: branch weigths, likelyhood, profile counter, etc. + mlir::Value condVal; + // If the for statement has a condition scope, + // emit the local variable declaration. + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + // C99 6.8.5p2/p4: The first substatement is executed if the + // expression compares unequal to 0. The condition must be a + // scalar type. + condVal = evaluateExprAsBool(S.getCond()); + b.create(loc, condVal); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + forRes = mlir::failure(); + builder.create(loc); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + builder.create(loc); + }); + return forRes; + }; + + auto res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto scopeLocBegin = fusedLoc.getLocations()[0]; + auto scopeLocEnd = fusedLoc.getLocations()[1]; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, + builder.getInsertionBlock()}; + LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; + res = whileStmtBuilder(); + }); + + if (res.failed()) + return res; + + terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); + return mlir::success(); +} + mlir::LogicalResult CIRGenModule::buildForStmt(const ForStmt &S) { mlir::cir::LoopOp loopOp; @@ -1597,35 +1683,7 @@ mlir::LogicalResult CIRGenModule::buildForStmt(const ForStmt &S) { if (res.failed()) return res; - // Add terminating yield on loop body region in case there are not - // other terminators used. - // FIXME: unify this with terminateCaseRegion. - auto terminateLoopBody = [&](mlir::Region &r, mlir::Location loc) { - if (r.empty()) - return; - - SmallVector eraseBlocks; - unsigned numBlocks = r.getBlocks().size(); - for (auto &block : r.getBlocks()) { - // Already cleanup after return operations, which might create - // empty blocks if emitted as last stmt. - if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() && - block.hasNoSuccessors()) - eraseBlocks.push_back(&block); - - if (block.empty() || - !block.back().hasTrait()) { - mlir::OpBuilder::InsertionGuard guardCase(builder); - builder.setInsertionPointToEnd(&block); - builder.create(loc); - } - } - - for (auto *b : eraseBlocks) - b->erase(); - }; - terminateLoopBody(loopOp.getBody(), getLoc(S.getEndLoc())); - + terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); return mlir::success(); } @@ -1765,8 +1823,12 @@ mlir::LogicalResult CIRGenModule::buildStmt(const Stmt *S, if (buildForStmt(cast(*S)).failed()) return mlir::failure(); break; - case Stmt::IndirectGotoStmtClass: case Stmt::WhileStmtClass: + if (buildWhileStmt(cast(*S)).failed()) + return mlir::failure(); + break; + + case Stmt::IndirectGotoStmtClass: case Stmt::DoStmtClass: case Stmt::ReturnStmtClass: // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index c878fe027a2b..f3a824a63fe7 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -519,6 +519,7 @@ class CIRGenModule { mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); mlir::LogicalResult buildForStmt(const clang::ForStmt &S); + mlir::LogicalResult buildWhileStmt(const clang::WhileStmt &S); // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 4dd3f0270dd5..2a180b2f346a 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -43,3 +43,61 @@ void l1() { // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } + +void l2(bool cond) { + int i = 0; + while (cond) { + i = i + 1; + } + while (true) { + i = i + 1; + } + while (1) { + i = i + 1; + } +} + +// CHECK: func @l2 +// CHECK: cir.scope { +// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: %3 = cir.cst(true) : !cir.bool +// CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: %3 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool +// CHECK-NEXT: cir.yield loopcondition %4 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } From ba5bf649745c81e8168aacead9f5e13e2174db95 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 13 Apr 2022 15:23:00 -0700 Subject: [PATCH 0253/2301] [CIR][CodeGen] Fix silly dump method invocation that slip through --- clang/lib/CIR/CIRGenModule.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 62e3f335886b..9741c9bbf10d 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -2105,7 +2105,6 @@ mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { assert(builder.getInsertionBlock() && "Should be valid"); } - function.dump(); if (mlir::failed(function.verifyBody())) return nullptr; theModule.push_back(function); From 443545f86e401f5965449dc2ee0a3d275a31c065 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 13 Apr 2022 15:59:23 -0700 Subject: [PATCH 0254/2301] [CIR] Remove extra yield, this should be added already by terminateBody calls --- clang/lib/CIR/CIRGenModule.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 9741c9bbf10d..aff69c1030c0 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1585,7 +1585,6 @@ mlir::LogicalResult CIRGenModule::buildWhileStmt(const WhileStmt &S) { [&](mlir::OpBuilder &b, mlir::Location loc) { if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) forRes = mlir::failure(); - builder.create(loc); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -1654,7 +1653,6 @@ mlir::LogicalResult CIRGenModule::buildForStmt(const ForStmt &S) { // for C++ in case it's a compound statement? if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) forRes = mlir::failure(); - builder.create(loc); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { From 242a596f9962cf55c23d44995cb7fa91f784336d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 13 Apr 2022 16:00:26 -0700 Subject: [PATCH 0255/2301] [CIR][CodeGen] Add support for DoStmt --- clang/lib/CIR/CIRGenModule.cpp | 56 ++++++++++++++++++++++++++++++- clang/lib/CIR/CIRGenModule.h | 1 + clang/test/CIR/CodeGen/loop.cpp | 58 +++++++++++++++++++++++++++++++++ 3 files changed, 114 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index aff69c1030c0..2d7738f54979 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1558,6 +1558,57 @@ static void terminateBody(mlir::OpBuilder &builder, mlir::Region &r, b->erase(); } +mlir::LogicalResult CIRGenModule::buildDoStmt(const DoStmt &S) { + mlir::cir::LoopOp loopOp; + + // TODO: pass in array of attributes. + auto doStmtBuilder = [&]() -> mlir::LogicalResult { + auto forRes = mlir::success(); + + loopOp = builder.create( + getLoc(S.getSourceRange()), + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // TODO: branch weigths, likelyhood, profile counter, etc. + // C99 6.8.5p2/p4: The first substatement is executed if the + // expression compares unequal to 0. The condition must be a + // scalar type. + mlir::Value condVal = evaluateExprAsBool(S.getCond()); + b.create(loc, condVal); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + forRes = mlir::failure(); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + builder.create(loc); + }); + return forRes; + }; + + auto res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto scopeLocBegin = fusedLoc.getLocations()[0]; + auto scopeLocEnd = fusedLoc.getLocations()[1]; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, + builder.getInsertionBlock()}; + LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; + res = doStmtBuilder(); + }); + + if (res.failed()) + return res; + + terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); + return mlir::success(); +} + mlir::LogicalResult CIRGenModule::buildWhileStmt(const WhileStmt &S) { mlir::cir::LoopOp loopOp; @@ -1825,9 +1876,12 @@ mlir::LogicalResult CIRGenModule::buildStmt(const Stmt *S, if (buildWhileStmt(cast(*S)).failed()) return mlir::failure(); break; + case Stmt::DoStmtClass: + if (buildDoStmt(cast(*S)).failed()) + return mlir::failure(); + break; case Stmt::IndirectGotoStmtClass: - case Stmt::DoStmtClass: case Stmt::ReturnStmtClass: // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. case Stmt::GCCAsmStmtClass: diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index f3a824a63fe7..e4fe1d355ea6 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -520,6 +520,7 @@ class CIRGenModule { mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); mlir::LogicalResult buildForStmt(const clang::ForStmt &S); mlir::LogicalResult buildWhileStmt(const clang::WhileStmt &S); + mlir::LogicalResult buildDoStmt(const clang::DoStmt &S); // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 2a180b2f346a..267dab427613 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -101,3 +101,61 @@ void l2(bool cond) { // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } + +void l3(bool cond) { + int i = 0; + do { + i = i + 1; + } while (cond); + do { + i = i + 1; + } while (true); + do { + i = i + 1; + } while (1); +} + +// CHECK: func @l3 +// CHECK: cir.scope { +// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: %3 = cir.cst(true) : !cir.bool +// CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: %3 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool +// CHECK-NEXT: cir.yield loopcondition %4 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } From 9c39641499c737bd5264b6ace023eb3a27559610 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 13 Apr 2022 16:37:04 -0700 Subject: [PATCH 0256/2301] [CIR] Add LoopOpKind to tag types of loops, add codegen support --- clang/lib/CIR/CIRGenModule.cpp | 6 +- clang/test/CIR/CodeGen/loop.cpp | 16 ++--- clang/test/CIR/IR/loop.cir | 73 +++++++++++++++++++++- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 15 +++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 4 ++ 5 files changed, 101 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 2d7738f54979..8d2323eea325 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1566,7 +1566,7 @@ mlir::LogicalResult CIRGenModule::buildDoStmt(const DoStmt &S) { auto forRes = mlir::success(); loopOp = builder.create( - getLoc(S.getSourceRange()), + getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::DoWhile, /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { // TODO: branch weigths, likelyhood, profile counter, etc. @@ -1617,7 +1617,7 @@ mlir::LogicalResult CIRGenModule::buildWhileStmt(const WhileStmt &S) { auto forRes = mlir::success(); loopOp = builder.create( - getLoc(S.getSourceRange()), + getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::While, /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { // TODO: branch weigths, likelyhood, profile counter, etc. @@ -1677,7 +1677,7 @@ mlir::LogicalResult CIRGenModule::buildForStmt(const ForStmt &S) { return mlir::failure(); loopOp = builder.create( - getLoc(S.getSourceRange()), + getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::For, /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { // TODO: branch weigths, likelyhood, profile counter, etc. diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 267dab427613..0c317f4a9887 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -8,7 +8,7 @@ void l0() { } // CHECK: func @l0 -// CHECK: cir.loop(cond : { +// CHECK: cir.loop for(cond : { // CHECK-NEXT: %0 = cir.cst(true) : !cir.bool // CHECK-NEXT: cir.yield loopcondition %0 : !cir.bool // CHECK-NEXT: }, step : { @@ -25,7 +25,7 @@ void l1() { } // CHECK: func @l1 -// CHECK: cir.loop(cond : { +// CHECK: cir.loop for(cond : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool @@ -59,7 +59,7 @@ void l2(bool cond) { // CHECK: func @l2 // CHECK: cir.scope { -// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool // CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool // CHECK-NEXT: }, step : { @@ -73,7 +73,7 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %3 = cir.cst(true) : !cir.bool // CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool // CHECK-NEXT: }, step : { @@ -87,7 +87,7 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %3 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool // CHECK-NEXT: cir.yield loopcondition %4 : !cir.bool @@ -117,7 +117,7 @@ void l3(bool cond) { // CHECK: func @l3 // CHECK: cir.scope { -// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool // CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool // CHECK-NEXT: }, step : { @@ -131,7 +131,7 @@ void l3(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.cst(true) : !cir.bool // CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool // CHECK-NEXT: }, step : { @@ -145,7 +145,7 @@ void l3(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop(cond : { +// CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool // CHECK-NEXT: cir.yield loopcondition %4 : !cir.bool diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 6edbaff98431..a906731aabb3 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -9,7 +9,7 @@ func.func @l0() { %2 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} %3 = cir.cst(0 : i32) : i32 cir.store %3, %2 : i32, cir.ptr - cir.loop(cond : { + cir.loop for(cond : { %4 = cir.load %2 : cir.ptr , i32 %5 = cir.cst(10 : i32) : i32 %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool @@ -28,11 +28,50 @@ func.func @l0() { cir.yield } } + cir.scope { + %2 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} + %3 = cir.cst(0 : i32) : i32 + cir.store %3, %2 : i32, cir.ptr + cir.loop while(cond : { + %4 = cir.load %2 : cir.ptr , i32 + %5 = cir.cst(10 : i32) : i32 + %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool + cir.yield loopcondition %6 : !cir.bool + }, step : { + cir.yield + }) { + %4 = cir.load %0 : cir.ptr , i32 + %5 = cir.cst(1 : i32) : i32 + %6 = cir.binop(add, %4, %5) : i32 + cir.store %6, %0 : i32, cir.ptr + cir.yield + } + } + + cir.scope { + %2 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} + %3 = cir.cst(0 : i32) : i32 + cir.store %3, %2 : i32, cir.ptr + cir.loop dowhile(cond : { + %4 = cir.load %2 : cir.ptr , i32 + %5 = cir.cst(10 : i32) : i32 + %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool + cir.yield loopcondition %6 : !cir.bool + }, step : { + cir.yield + }) { + %4 = cir.load %0 : cir.ptr , i32 + %5 = cir.cst(1 : i32) : i32 + %6 = cir.binop(add, %4, %5) : i32 + cir.store %6, %0 : i32, cir.ptr + cir.yield + } + } cir.return } // CHECK: func @l0 -// CHECK: cir.loop(cond : { +// CHECK: cir.loop for(cond : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool @@ -50,3 +89,33 @@ func.func @l0() { // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } + +// CHECK: cir.loop while(cond : { +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 +// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool +// CHECK-NEXT: cir.yield loopcondition %6 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 +// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } + +// CHECK: cir.loop dowhile(cond : { +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 +// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool +// CHECK-NEXT: cir.yield loopcondition %6 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 +// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } \ No newline at end of file diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 571f5ca01741..e3f23bdf766a 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -716,6 +716,17 @@ def BrOp : CIR_Op<"br", // LoopOp //===----------------------------------------------------------------------===// +def LoopOpKind_For : I32EnumAttrCase<"For", 1, "for">; +def LoopOpKind_While : I32EnumAttrCase<"While", 2, "while">; +def LoopOpKind_DoWhile : I32EnumAttrCase<"DoWhile", 3, "dowhile">; + +def LoopOpKind : I32EnumAttr< + "LoopOpKind", + "Loop kind", + [LoopOpKind_For, LoopOpKind_While, LoopOpKind_DoWhile]> { + let cppNamespace = "::mlir::cir"; +} + def LoopOp : CIR_Op<"loop", [DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, @@ -723,10 +734,13 @@ def LoopOp : CIR_Op<"loop", let summary = "loop operation"; let description = [{ }]; + + let arguments = (ins Arg:$kind); let regions = (region SizedRegion<1>:$cond, AnyRegion:$body, SizedRegion<1>:$step); let assemblyFormat = [{ + $kind `(` `cond` `:` $cond `,` `step` `:` $step @@ -738,6 +752,7 @@ def LoopOp : CIR_Op<"loop", let skipDefaultBuilders = 1; let builders = [ OpBuilder<(ins + "cir::LoopOpKind":$kind, CArg<"function_ref", "nullptr">:$condBuilder, CArg<"function_ref", diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 21fe9ce9c2b9..aac34bb63378 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -773,10 +773,14 @@ void SwitchOp::build( //===----------------------------------------------------------------------===// void LoopOp::build(OpBuilder &builder, OperationState &result, + cir::LoopOpKind kind, function_ref condBuilder, function_ref bodyBuilder, function_ref stepBuilder) { OpBuilder::InsertionGuard guard(builder); + ::mlir::cir::LoopOpKindAttr kindAttr = + cir::LoopOpKindAttr::get(builder.getContext(), kind); + result.addAttribute("kind", kindAttr); Region *condRegion = result.addRegion(); builder.createBlock(condRegion); From 070760f85c9550129d657aadc8ec72d9f71af326 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 11 Oct 2024 12:09:40 -0700 Subject: [PATCH 0257/2301] [CIR][NFC] Refactor CIRGenModule's member fns to CIRGenFunction Also, distribute the new CIRGenFunction member fns to their corresponding locations that match CodeGen. e.g. CG{Decl,Stmt,Expr}. Still to be done: * order the functions both more logically as well as more alike how codegen's functions are ordered. * clean up access specifiers. I didn't do a great job tracking where things should go as far as access. --- clang/CMakeLists.txt | 2 - .../clang/CIR/Dialect/IR/CMakeLists.txt | 0 clang/lib/CIR/CIRGenCall.cpp | 84 + clang/lib/CIR/CIRGenCleanup.cpp | 46 + clang/lib/CIR/CIRGenDecl.cpp | 368 ++++ clang/lib/CIR/CIRGenExpr.cpp | 492 ++++- clang/lib/CIR/CIRGenExprScalar.cpp | 108 +- clang/lib/CIR/CIRGenFunction.cpp | 445 ++-- clang/lib/CIR/CIRGenFunction.h | 422 +++- clang/lib/CIR/CIRGenModule.cpp | 1927 +---------------- clang/lib/CIR/CIRGenModule.h | 400 +--- clang/lib/CIR/CIRGenStmt.cpp | 878 ++++++++ clang/lib/CIR/CIRGenerator.cpp | 4 +- clang/lib/CIR/CMakeLists.txt | 19 +- clang/lib/Sema/CIRBasedWarnings.cpp | 2 +- clang/test/CIR/global-var-simple.cpp | 60 + clang/test/CIR/hello.c | 5 + 17 files changed, 2710 insertions(+), 2552 deletions(-) create mode 100644 clang/include/clang/CIR/Dialect/IR/CMakeLists.txt create mode 100644 clang/lib/CIR/CIRGenCleanup.cpp create mode 100644 clang/lib/CIR/CIRGenDecl.cpp create mode 100644 clang/lib/CIR/CIRGenStmt.cpp create mode 100644 clang/test/CIR/global-var-simple.cpp create mode 100644 clang/test/CIR/hello.c diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt index d0a828029561..b79e570667b2 100644 --- a/clang/CMakeLists.txt +++ b/clang/CMakeLists.txt @@ -166,8 +166,6 @@ if(CLANG_ENABLE_LIBXML2) endif() endif() -set(CLANG_ENABLE_CIR FALSE) - if(CLANG_ENABLE_CIR) if (CLANG_BUILT_STANDALONE) message(FATAL_ERROR diff --git a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 501fac42d52e..9a290f69cd3c 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -159,6 +159,14 @@ void ClangToCIRArgMapping::construct(const ASTContext &Context, } // namespace +static bool hasInAllocaArgs(CIRGenModule &CGM, CallingConv ExplicitCC, + ArrayRef ArgTypes) { + assert(ExplicitCC != CC_Swift && ExplicitCC != CC_SwiftAsync && "Swift NYI"); + assert(!CGM.getTarget().getCXXABI().isMicrosoft() && "MSABI NYI"); + + return false; +} + mlir::FunctionType CIRGenTypes::GetFunctionType(clang::GlobalDecl GD) { const CIRGenFunctionInfo &FI = arrangeGlobalDeclaration(GD); return GetFunctionType(FI); @@ -490,3 +498,79 @@ RValue CIRGenFunction::buildAnyExprToTemp(const Expr *E) { assert(!hasAggregateEvaluationKind(E->getType()) && "aggregate args NYI"); return buildAnyExpr(E, AggSlot); } + +void CIRGenFunction::buildCallArgs( + CallArgList &Args, PrototypeWrapper Prototype, + llvm::iterator_range ArgRange, + AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { + + llvm::SmallVector ArgTypes; + + assert((ParamsToSkip == 0 || Prototype.P) && + "Can't skip parameters if type info is not provided"); + + // This variable only captures *explicitly* written conventions, not those + // applied by default via command line flags or target defaults, such as + // thiscall, appcs, stdcall via -mrtd, etc. Computing that correctly would + // require knowing if this is a C++ instance method or being able to see + // unprotyped FunctionTypes. + CallingConv ExplicitCC = CC_C; + + // First, if a prototype was provided, use those argument types. + bool IsVariadic = false; + if (Prototype.P) { + const auto *MD = Prototype.P.dyn_cast(); + assert(!MD && "ObjCMethodDecl NYI"); + + const auto *FPT = Prototype.P.get(); + IsVariadic = FPT->isVariadic(); + assert(!IsVariadic && "Variadic functions NYI"); + ExplicitCC = FPT->getExtInfo().getCC(); + ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, + FPT->param_type_end()); + } + + // If we still have any arguments, emit them using the type of the argument. + for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) { + assert(!IsVariadic && "Variadic functions NYI"); + ArgTypes.push_back(A->getType()); + }; + assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); + + // We must evaluate arguments from right to left in the MS C++ ABI, because + // arguments are destroyed left to right in the callee. As a special case, + // there are certain language constructs taht require left-to-right + // evaluation, and in those cases we consider the evaluation order requirement + // to trump the "destruction order is reverse construction order" guarantee. + bool LeftToRight = true; + assert(!CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() && + "MSABI NYI"); + assert(!hasInAllocaArgs(CGM, ExplicitCC, ArgTypes) && "NYI"); + + // Evaluate each argument in the appropriate order. + size_t CallArgsStart = Args.size(); + for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { + unsigned Idx = LeftToRight ? I : E - I - 1; + CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; + unsigned InitialArgSize = Args.size(); + assert(!isa(*Arg) && "NYI"); + assert(!isa(AC.getDecl()) && "NYI"); + + buildCallArg(Args, *Arg, ArgTypes[Idx]); + // In particular, we depend on it being the last arg in Args, and the + // objectsize bits depend on there only being one arg if !LeftToRight. + assert(InitialArgSize + 1 == Args.size() && + "The code below depends on only adding one arg per buildCallArg"); + (void)InitialArgSize; + // Since pointer argument are never emitted as LValue, it is safe to emit + // non-null argument check for r-value only. + assert(!SanOpts.has(SanitizerKind::NonnullAttribute) && "Sanitizers NYI"); + assert(!SanOpts.has(SanitizerKind::NullabilityArg) && "Sanitizers NYI"); + } + + if (!LeftToRight) { + // Un-reverse the arguments we just evaluated so they match up with the CIR + // function. + std::reverse(Args.begin() + CallArgsStart, Args.end()); + } +} diff --git a/clang/lib/CIR/CIRGenCleanup.cpp b/clang/lib/CIR/CIRGenCleanup.cpp new file mode 100644 index 000000000000..3ebaafac066d --- /dev/null +++ b/clang/lib/CIR/CIRGenCleanup.cpp @@ -0,0 +1,46 @@ +//===--- CIRGenCleanup.cpp - Bookkeeping and code emission for cleanups ---===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains code dealing with the IR generation for cleanups +// and related information. +// +// A "cleanup" is a piece of code which needs to be executed whenever +// control transfers out of a particular scope. This can be +// conditionalized to occur only on exceptional control flow, only on +// normal control flow, or both. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; + +/// Build a unconditional branch to the lexical scope cleanup block +/// or with the labeled blocked if already solved. +/// +/// Track on scope basis, goto's we need to fix later. +mlir::LogicalResult +CIRGenFunction::buildBranchThroughCleanup(JumpDest &Dest, LabelDecl *L, + mlir::Location Loc) { + // Remove this once we go for making sure unreachable code is + // well modeled (or not). + assert(builder.getInsertionBlock() && "not yet implemented"); + + // Insert a branch: to the cleanup block (unsolved) or to the already + // materialized label. Keep track of unsolved goto's. + mlir::Block *DstBlock = Dest.getBlock(); + auto G = builder.create( + Loc, Dest.isValid() ? DstBlock + : currLexScope->getOrCreateCleanupBlock(builder)); + if (!Dest.isValid()) + currLexScope->PendingGotos.push_back(std::make_pair(G, L)); + + return mlir::success(); +} diff --git a/clang/lib/CIR/CIRGenDecl.cpp b/clang/lib/CIR/CIRGenDecl.cpp new file mode 100644 index 000000000000..b31f9376660f --- /dev/null +++ b/clang/lib/CIR/CIRGenDecl.cpp @@ -0,0 +1,368 @@ +//===--- CIRGenDecl.cpp - Emit CIR Code for declarations ------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Decl nodes as CIR code. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" + +#include "clang/AST/Decl.h" + +using namespace cir; +using namespace clang; + +CIRGenFunction::AutoVarEmission +CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { + QualType Ty = D.getType(); + // TODO: (|| Ty.getAddressSpace() == LangAS::opencl_private && + // getLangOpts().OpenCL)) + assert(Ty.getAddressSpace() == LangAS::Default); + + assert(!D.isEscapingByref() && "not implemented"); + assert(!Ty->isVariablyModifiedType() && "not implemented"); + assert(!getContext() + .getLangOpts() + .OpenMP && // !CGF.getLangOpts().OpenMPIRBuilder + "not implemented"); + bool NRVO = + getContext().getLangOpts().ElideConstructors && D.isNRVOVariable(); + assert(!NRVO && "not implemented"); + assert(Ty->isConstantSizeType() && "not implemented"); + assert(!D.hasAttr() && "not implemented"); + + AutoVarEmission emission(D); + CharUnits alignment = getContext().getDeclAlign(&D); + // TODO: debug info + // TODO: use CXXABI + + // If this value is an array or struct with a statically determinable + // constant initializer, there are optimizations we can do. + // + // TODO: We should constant-evaluate the initializer of any variable, + // as long as it is initialized by a constant expression. Currently, + // isConstantInitializer produces wrong answers for structs with + // reference or bitfield members, and a few other cases, and checking + // for POD-ness protects us from some of these. + if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) && + (D.isConstexpr() || + ((Ty.isPODType(getContext()) || + getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) && + D.getInit()->isConstantInitializer(getContext(), false)))) { + + // If the variable's a const type, and it's neither an NRVO + // candidate nor a __block variable and has no mutable members, + // emit it as a global instead. + // Exception is if a variable is located in non-constant address space + // in OpenCL. + // TODO: deal with CGM.getCodeGenOpts().MergeAllConstants + // TODO: perhaps we don't need this at all at CIR since this can + // be done as part of lowering down to LLVM. + if ((!getContext().getLangOpts().OpenCL || + Ty.getAddressSpace() == LangAS::opencl_constant) && + (!NRVO && !D.isEscapingByref() && CGM.isTypeConstant(Ty, true))) + assert(0 && "not implemented"); + + // Otherwise, tell the initialization code that we're in this case. + emission.IsConstantAggregate = true; + } + + // TODO: track source location range... + mlir::Value addr; + if (failed(declare(&D, Ty, getLoc(D.getSourceRange()), alignment, addr))) { + CGM.emitError("Cannot declare variable"); + return emission; + } + + // TODO: what about emitting lifetime markers for MSVC catch parameters? + // TODO: something like @llvm.lifetime.start/end here? revisit this later. + emission.Addr = Address{addr, alignment}; + return emission; +} + +/// Determine whether the given initializer is trivial in the sense +/// that it requires no code to be generated. +bool CIRGenFunction::isTrivialInitializer(const Expr *Init) { + if (!Init) + return true; + + if (const CXXConstructExpr *Construct = dyn_cast(Init)) + if (CXXConstructorDecl *Constructor = Construct->getConstructor()) + if (Constructor->isTrivial() && Constructor->isDefaultConstructor() && + !Construct->requiresZeroInitialization()) + return true; + + return false; +} +void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { + assert(emission.Variable && "emission was not valid!"); + + const VarDecl &D = *emission.Variable; + QualType type = D.getType(); + + // If this local has an initializer, emit it now. + const Expr *Init = D.getInit(); + + // TODO: in LLVM codegen if we are at an unreachable point, the initializer + // isn't emitted unless it contains a label. What we want for CIR? + assert(builder.getInsertionBlock()); + + // Initialize the variable here if it doesn't have a initializer and it is a + // C struct that is non-trivial to initialize or an array containing such a + // struct. + if (!Init && type.isNonTrivialToPrimitiveDefaultInitialize() == + QualType::PDIK_Struct) { + assert(0 && "not implemented"); + return; + } + + const Address Loc = emission.Addr; + + // Note: constexpr already initializes everything correctly. + LangOptions::TrivialAutoVarInitKind trivialAutoVarInit = + (D.isConstexpr() + ? LangOptions::TrivialAutoVarInitKind::Uninitialized + : (D.getAttr() + ? LangOptions::TrivialAutoVarInitKind::Uninitialized + : getContext().getLangOpts().getTrivialAutoVarInit())); + + auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) { + if (trivialAutoVarInit == + LangOptions::TrivialAutoVarInitKind::Uninitialized) + return; + + assert(0 && "unimplemented"); + }; + + if (isTrivialInitializer(Init)) + return initializeWhatIsTechnicallyUninitialized(Loc); + + if (emission.IsConstantAggregate || + D.mightBeUsableInConstantExpressions(getContext())) { + assert(0 && "not implemented"); + } + + initializeWhatIsTechnicallyUninitialized(Loc); + LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); + return buildExprAsInit(Init, &D, lv); +} + +void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { + assert(emission.Variable && "emission was not valid!"); + + // TODO: in LLVM codegen if we are at an unreachable point codgen + // is ignored. What we want for CIR? + assert(builder.getInsertionBlock()); + const VarDecl &D = *emission.Variable; + + // Check the type for a cleanup. + // TODO: something like emitAutoVarTypeCleanup + if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext())) + assert(0 && "not implemented"); + + // In GC mode, honor objc_precise_lifetime. + if (getContext().getLangOpts().getGC() != LangOptions::NonGC && + D.hasAttr()) + assert(0 && "not implemented"); + + // Handle the cleanup attribute. + if (const CleanupAttr *CA = D.getAttr()) + assert(0 && "not implemented"); + + // TODO: handle block variable +} + +/// Emit code and set up symbol table for a variable declaration with auto, +/// register, or no storage class specifier. These turn into simple stack +/// objects, globals depending on target. +void CIRGenFunction::buildAutoVarDecl(const VarDecl &D) { + AutoVarEmission emission = buildAutoVarAlloca(D); + buildAutoVarInit(emission); + buildAutoVarCleanups(emission); +} + +void CIRGenFunction::buildVarDecl(const VarDecl &D) { + if (D.hasExternalStorage()) { + assert(0 && "should we just returns is there something to track?"); + // Don't emit it now, allow it to be emitted lazily on its first use. + return; + } + + // Some function-scope variable does not have static storage but still + // needs to be emitted like a static variable, e.g. a function-scope + // variable in constant address space in OpenCL. + if (D.getStorageDuration() != SD_Automatic) + assert(0 && "not implemented"); + + if (D.getType().getAddressSpace() == LangAS::opencl_local) + assert(0 && "not implemented"); + + assert(D.hasLocalStorage()); + return buildAutoVarDecl(D); +} + +void CIRGenFunction::buildScalarInit(const Expr *init, const ValueDecl *D, + LValue lvalue) { + // TODO: this is where a lot of ObjC lifetime stuff would be done. + mlir::Value value = buildScalarExpr(init); + SourceLocRAIIObject Loc{*this, getLoc(D->getSourceRange())}; + buldStoreThroughLValue(RValue::get(value), lvalue, D); + return; +} + +void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, + LValue lvalue) { + QualType type = D->getType(); + + if (type->isReferenceType()) { + assert(0 && "not implemented"); + return; + } + switch (CIRGenFunction::getEvaluationKind(type)) { + case TEK_Scalar: + buildScalarInit(init, D, lvalue); + return; + case TEK_Complex: { + assert(0 && "not implemented"); + return; + } + case TEK_Aggregate: + assert(0 && "not implemented"); + return; + } + llvm_unreachable("bad evaluation kind"); +} + +void CIRGenFunction::buildDecl(const Decl &D) { + switch (D.getKind()) { + case Decl::ImplicitConceptSpecialization: + case Decl::HLSLBuffer: + case Decl::UnnamedGlobalConstant: + case Decl::TopLevelStmt: + llvm_unreachable("NYI"); + case Decl::BuiltinTemplate: + case Decl::TranslationUnit: + case Decl::ExternCContext: + case Decl::Namespace: + case Decl::UnresolvedUsingTypename: + case Decl::ClassTemplateSpecialization: + case Decl::ClassTemplatePartialSpecialization: + case Decl::VarTemplateSpecialization: + case Decl::VarTemplatePartialSpecialization: + case Decl::TemplateTypeParm: + case Decl::UnresolvedUsingValue: + case Decl::NonTypeTemplateParm: + case Decl::CXXDeductionGuide: + case Decl::CXXMethod: + case Decl::CXXConstructor: + case Decl::CXXDestructor: + case Decl::CXXConversion: + case Decl::Field: + case Decl::MSProperty: + case Decl::IndirectField: + case Decl::ObjCIvar: + case Decl::ObjCAtDefsField: + case Decl::ParmVar: + case Decl::ImplicitParam: + case Decl::ClassTemplate: + case Decl::VarTemplate: + case Decl::FunctionTemplate: + case Decl::TypeAliasTemplate: + case Decl::TemplateTemplateParm: + case Decl::ObjCMethod: + case Decl::ObjCCategory: + case Decl::ObjCProtocol: + case Decl::ObjCInterface: + case Decl::ObjCCategoryImpl: + case Decl::ObjCImplementation: + case Decl::ObjCProperty: + case Decl::ObjCCompatibleAlias: + case Decl::PragmaComment: + case Decl::PragmaDetectMismatch: + case Decl::AccessSpec: + case Decl::LinkageSpec: + case Decl::Export: + case Decl::ObjCPropertyImpl: + case Decl::FileScopeAsm: + case Decl::Friend: + case Decl::FriendTemplate: + case Decl::Block: + case Decl::Captured: + case Decl::UsingShadow: + case Decl::ConstructorUsingShadow: + case Decl::ObjCTypeParam: + case Decl::Binding: + case Decl::UnresolvedUsingIfExists: + llvm_unreachable("Declaration should not be in declstmts!"); + case Decl::Record: // struct/union/class X; + case Decl::CXXRecord: // struct/union/class X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::Enum: // enum X; + assert(0 && "Not implemented"); + return; + case Decl::Function: // void X(); + case Decl::EnumConstant: // enum ? { X = ? } + case Decl::StaticAssert: // static_assert(X, ""); [C++0x] + case Decl::Label: // __label__ x; + case Decl::Import: + case Decl::MSGuid: // __declspec(uuid("...")) + case Decl::TemplateParamObject: + case Decl::OMPThreadPrivate: + case Decl::OMPAllocate: + case Decl::OMPCapturedExpr: + case Decl::OMPRequires: + case Decl::Empty: + case Decl::Concept: + case Decl::LifetimeExtendedTemporary: + case Decl::RequiresExprBody: + // None of these decls require codegen support. + return; + + case Decl::NamespaceAlias: + assert(0 && "Not implemented"); + return; + case Decl::Using: // using X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::UsingEnum: // using enum X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::UsingPack: + assert(0 && "Not implemented"); + return; + case Decl::UsingDirective: // using namespace X; [C++] + assert(0 && "Not implemented"); + return; + case Decl::Var: + case Decl::Decomposition: { + const VarDecl &VD = cast(D); + assert(VD.isLocalVarDecl() && + "Should not see file-scope variables inside a function!"); + buildVarDecl(VD); + if (auto *DD = dyn_cast(&VD)) + assert(0 && "Not implemented"); + + // FIXME: add this + // if (auto *DD = dyn_cast(&VD)) + // for (auto *B : DD->bindings()) + // if (auto *HD = B->getHoldingVar()) + // EmitVarDecl(*HD); + return; + } + + case Decl::OMPDeclareReduction: + case Decl::OMPDeclareMapper: + assert(0 && "Not implemented"); + + case Decl::Typedef: // typedef int X; + case Decl::TypeAlias: { // using X = int; [C++0x] + assert(0 && "Not implemented"); + } + } +} diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index a2dce48b4d8e..0beef73bc899 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -4,10 +4,12 @@ #include "clang/AST/GlobalDecl.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Value.h" using namespace cir; using namespace clang; +using namespace mlir::cir; static mlir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); @@ -20,18 +22,33 @@ static mlir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { return V; } -static CIRGenCallee buildDirectCallee(CIRGenFunction &CGF, GlobalDecl GD) { +static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); assert(!FD->getBuiltinID() && "Builtins NYI"); - auto CalleePtr = buildFunctionDeclPointer(CGF.CGM, GD); + auto CalleePtr = buildFunctionDeclPointer(CGM, GD); - assert(!CGF.CGM.getLangOpts().CUDA && "NYI"); + assert(!CGM.getLangOpts().CUDA && "NYI"); return CIRGenCallee::forDirect(CalleePtr, GD); } +// TODO: this can also be abstrated into common AST helpers +bool CIRGenFunction::hasBooleanRepresentation(QualType Ty) { + + if (Ty->isBooleanType()) + return true; + + if (const EnumType *ET = Ty->getAs()) + return ET->getDecl()->getIntegerType()->isBooleanType(); + + if (const AtomicType *AT = Ty->getAs()) + return hasBooleanRepresentation(AT->getValueType()); + + return false; +} + CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { E = E->IgnoreParens(); @@ -45,7 +62,7 @@ CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { auto FD = dyn_cast(DRE->getDecl()); assert(FD && "DeclRef referring to FunctionDecl onlything supported so far"); - return buildDirectCallee(*this, FD); + return buildDirectCallee(CGM, FD); } assert(!dyn_cast(E) && "NYI"); @@ -54,3 +71,470 @@ CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { assert(false && "Nothing else supported yet!"); } + +mlir::Value CIRGenFunction::buildToMemory(mlir::Value Value, QualType Ty) { + // Bool has a different representation in memory than in registers. + return Value; +} + +void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, + const Decl *InitDecl) { + // TODO: constant matrix type, volatile, non temporal, TBAA + buildStoreOfScalar(value, lvalue.getAddress(), false, lvalue.getType(), + lvalue.getBaseInfo(), InitDecl, false); +} + +void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, + bool Volatile, QualType Ty, + LValueBaseInfo BaseInfo, + const Decl *InitDecl, + bool isNontemporal) { + // TODO: PreserveVec3Type + // TODO: LValueIsSuitableForInlineAtomic ? + // TODO: TBAA + Value = buildToMemory(Value, Ty); + if (Ty->isAtomicType() || isNontemporal) { + assert(0 && "not implemented"); + } + + // Update the alloca with more info on initialization. + auto SrcAlloca = + dyn_cast_or_null(Addr.getPointer().getDefiningOp()); + if (InitDecl) { + InitStyle IS; + const VarDecl *VD = dyn_cast_or_null(InitDecl); + assert(VD && "VarDecl expected"); + if (VD->hasInit()) { + switch (VD->getInitStyle()) { + case VarDecl::ParenListInit: + llvm_unreachable("NYI"); + case VarDecl::CInit: + IS = InitStyle::cinit; + break; + case VarDecl::CallInit: + IS = InitStyle::callinit; + break; + case VarDecl::ListInit: + IS = InitStyle::listinit; + break; + } + SrcAlloca.setInitAttr(InitStyleAttr::get(builder.getContext(), IS)); + } + } + assert(currSrcLoc && "must pass in source location"); + builder.create(*currSrcLoc, Value, Addr.getPointer()); +} +void CIRGenFunction::buldStoreThroughLValue(RValue Src, LValue Dst, + const Decl *InitDecl) { + assert(Dst.isSimple() && "only implemented simple"); + // TODO: ObjC lifetime. + assert(Src.isScalar() && "Can't emit an agg store with this method"); + buildStoreOfScalar(Src.getScalarVal(), Dst, InitDecl); +} + +LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { + const NamedDecl *ND = E->getDecl(); + + assert(E->isNonOdrUse() != NOUR_Unevaluated && + "should not emit an unevaluated operand"); + + if (const auto *VD = dyn_cast(ND)) { + // Global Named registers access via intrinsics only + assert(VD->getStorageClass() != SC_Register && "not implemented"); + assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); + assert(!E->refersToEnclosingVariableOrCapture() && "not implemented"); + assert(!(VD->hasLinkage() || VD->isStaticDataMember()) && + "not implemented"); + assert(!VD->isEscapingByref() && "not implemented"); + assert(!VD->getType()->isReferenceType() && "not implemented"); + assert(symbolTable.count(VD) && "should be already mapped"); + + mlir::Value V = symbolTable.lookup(VD); + assert(V && "Name lookup must succeed"); + + LValue LV = LValue::makeAddr(Address(V, CharUnits::fromQuantity(4)), + VD->getType(), AlignmentSource::Decl); + return LV; + } + + llvm_unreachable("Unhandled DeclRefExpr?"); +} + +LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { + // Comma expressions just emit their LHS then their RHS as an l-value. + if (E->getOpcode() == BO_Comma) { + assert(0 && "not implemented"); + } + + if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) + assert(0 && "not implemented"); + + assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); + + // Note that in all of these cases, __block variables need the RHS + // evaluated first just in case the variable gets moved by the RHS. + + switch (CIRGenFunction::getEvaluationKind(E->getType())) { + case TEK_Scalar: { + assert(E->getLHS()->getType().getObjCLifetime() == + clang::Qualifiers::ObjCLifetime::OCL_None && + "not implemented"); + + RValue RV = buildAnyExpr(E->getRHS()); + LValue LV = buildLValue(E->getLHS()); + + SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; + buldStoreThroughLValue(RV, LV, nullptr /*InitDecl*/); + assert(!getContext().getLangOpts().OpenMP && + "last priv cond not implemented"); + return LV; + } + + case TEK_Complex: + assert(0 && "not implemented"); + case TEK_Aggregate: + assert(0 && "not implemented"); + } + llvm_unreachable("bad evaluation kind"); +} + +/// Given an expression of pointer type, try to +/// derive a more accurate bound on the alignment of the pointer. +Address CIRGenFunction::buildPointerWithAlignment(const Expr *E, + LValueBaseInfo *BaseInfo) { + // We allow this with ObjC object pointers because of fragile ABIs. + assert(E->getType()->isPointerType() || + E->getType()->isObjCObjectPointerType()); + E = E->IgnoreParens(); + + // Casts: + if (const CastExpr *CE = dyn_cast(E)) { + if (const auto *ECE = dyn_cast(CE)) + assert(0 && "not implemented"); + + switch (CE->getCastKind()) { + default: + assert(0 && "not implemented"); + // Nothing to do here... + case CK_LValueToRValue: + break; + } + } + + // Unary &. + if (const UnaryOperator *UO = dyn_cast(E)) { + assert(0 && "not implemented"); + // if (UO->getOpcode() == UO_AddrOf) { + // LValue LV = buildLValue(UO->getSubExpr()); + // if (BaseInfo) + // *BaseInfo = LV.getBaseInfo(); + // // TODO: TBBA info + // return LV.getAddress(); + // } + } + + // TODO: conditional operators, comma. + // Otherwise, use the alignment of the type. + CharUnits Align = CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo); + return Address(buildScalarExpr(E), Align); +} + +/// Perform the usual unary conversions on the specified +/// expression and compare the result against zero, returning an Int1Ty value. +mlir::Value CIRGenFunction::evaluateExprAsBool(const Expr *E) { + // TODO: PGO + if (const MemberPointerType *MPT = E->getType()->getAs()) { + assert(0 && "not implemented"); + } + + QualType BoolTy = getContext().BoolTy; + SourceLocation Loc = E->getExprLoc(); + // TODO: CGFPOptionsRAII for FP stuff. + assert(!E->getType()->isAnyComplexType() && + "complex to scalar not implemented"); + return buildScalarConversion(buildScalarExpr(E), E->getType(), BoolTy, Loc); +} + +LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { + // __extension__ doesn't affect lvalue-ness. + assert(E->getOpcode() != UO_Extension && "not implemented"); + + switch (E->getOpcode()) { + default: + llvm_unreachable("Unknown unary operator lvalue!"); + case UO_Deref: { + QualType T = E->getSubExpr()->getType()->getPointeeType(); + assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); + + LValueBaseInfo BaseInfo; + // TODO: add TBAAInfo + Address Addr = buildPointerWithAlignment(E->getSubExpr(), &BaseInfo); + + // Tag 'load' with deref attribute. + if (auto loadOp = + dyn_cast<::mlir::cir::LoadOp>(Addr.getPointer().getDefiningOp())) { + loadOp.setIsDerefAttr(mlir::UnitAttr::get(builder.getContext())); + } + + LValue LV = LValue::makeAddr(Addr, T, BaseInfo); + // TODO: set addr space + // TODO: ObjC/GC/__weak write barrier stuff. + return LV; + } + case UO_Real: + case UO_Imag: { + assert(0 && "not implemented"); + } + case UO_PreInc: + case UO_PreDec: { + assert(0 && "not implemented"); + } + } +} + +/// Emit code to compute the specified expression which +/// can have any type. The result is returned as an RValue struct. +RValue CIRGenFunction::buildAnyExpr(const Expr *E, AggValueSlot aggSlot, + bool ignoreResult) { + switch (CIRGenFunction::getEvaluationKind(E->getType())) { + case TEK_Scalar: + return RValue::get(buildScalarExpr(E)); + case TEK_Complex: + assert(0 && "not implemented"); + case TEK_Aggregate: + assert(0 && "not implemented"); + } + llvm_unreachable("bad evaluation kind"); +} + +RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, + ReturnValueSlot ReturnValue) { + assert(!E->getCallee()->getType()->isBlockPointerType() && "ObjC Blocks NYI"); + assert(!dyn_cast(E) && "NYI"); + assert(!dyn_cast(E) && "CUDA NYI"); + assert(!dyn_cast(E) && "NYI"); + + CIRGenCallee callee = buildCallee(E->getCallee()); + + assert(!callee.isBuiltin() && "builtins NYI"); + assert(!callee.isPsuedoDestructor() && "NYI"); + + return buildCall(E->getCallee()->getType(), callee, E, ReturnValue); +} + +RValue CIRGenFunction::buildCall(clang::QualType CalleeType, + const CIRGenCallee &OrigCallee, + const clang::CallExpr *E, + ReturnValueSlot ReturnValue, + mlir::Value Chain) { + // Get the actual function type. The callee type will always be a pointer to + // function type or a block pointer type. + assert(CalleeType->isFunctionPointerType() && + "Call must have function pointer type!"); + + auto *TargetDecl = OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); + (void)TargetDecl; + + CalleeType = getContext().getCanonicalType(CalleeType); + + auto PointeeType = cast(CalleeType)->getPointeeType(); + + CIRGenCallee Callee = OrigCallee; + + if (getLangOpts().CPlusPlus) + assert(!SanOpts.has(SanitizerKind::Function) && "Sanitizers NYI"); + + const auto *FnType = cast(PointeeType); + + assert(!SanOpts.has(SanitizerKind::CFIICall) && "Sanitizers NYI"); + + CallArgList Args; + + assert(!Chain && "FIX THIS"); + + // C++17 requires that we evaluate arguments to a call using assignment syntax + // right-to-left, and that we evaluate arguments to certain other operators + // left-to-right. Note that we allow this to override the order dictated by + // the calling convention on the MS ABI, which means that parameter + // destruction order is not necessarily reverse construction order. + // FIXME: Revisit this based on C++ committee response to unimplementability. + EvaluationOrder Order = EvaluationOrder::Default; + assert(!dyn_cast(E) && "Operators NYI"); + + buildCallArgs(Args, dyn_cast(FnType), E->arguments(), + E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); + + const CIRGenFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( + Args, FnType, /*ChainCall=*/Chain.getAsOpaquePointer()); + + // C99 6.5.2.2p6: + // If the expression that denotes the called function has a type that does + // not include a prototype, [the default argument promotions are performed]. + // If the number of arguments does not equal the number of parameters, the + // behavior is undefined. If the function is defined with at type that + // includes a prototype, and either the prototype ends with an ellipsis (, + // ...) or the types of the arguments after promotion are not compatible + // with the types of the parameters, the behavior is undefined. If the + // function is defined with a type that does not include a prototype, and + // the types of the arguments after promotion are not compatible with those + // of the parameters after promotion, the behavior is undefined [except in + // some trivial cases]. + // That is, in the general case, we should assume that a call through an + // unprototyped function type works like a *non-variadic* call. The way we + // make this work is to cast to the exxact type fo the promoted arguments. + // + // Chain calls use the same code path to add the inviisble chain parameter to + // the function type. + assert(!isa(FnType) && "NYI"); + // if (isa(FnType) || Chain) { + // mlir::FunctionType CalleeTy = getTypes().GetFunctionType(FnInfo); + // int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace(); + // CalleeTy = CalleeTy->getPointerTo(AS); + + // llvm::Value *CalleePtr = Callee.getFunctionPointer(); + // CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast"); + // Callee.setFunctionPointer(CalleePtr); + // } + + assert(!CGM.getLangOpts().HIP && "HIP NYI"); + + assert(!MustTailCall && "Must tail NYI"); + mlir::func::CallOp callOP = nullptr; + RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, callOP, + E == MustTailCall, E->getExprLoc()); + + assert(!getDebugInfo() && "Debug Info NYI"); + + return Call; +} + +/// EmitIgnoredExpr - Emit code to compute the specified expression, +/// ignoring the result. +void CIRGenFunction::buildIgnoredExpr(const Expr *E) { + if (E->isPRValue()) + return (void)buildAnyExpr(E); + + // Just emit it as an l-value and drop the result. + buildLValue(E); +} + +/// Emit code to compute a designator that specifies the location +/// of the expression. +/// FIXME: document this function better. +LValue CIRGenFunction::buildLValue(const Expr *E) { + // FIXME: ApplyDebugLocation DL(*this, E); + switch (E->getStmtClass()) { + default: { + emitError(getLoc(E->getExprLoc()), "l-value not implemented for '") + << E->getStmtClassName() << "'"; + assert(0 && "not implemented"); + } + case Expr::BinaryOperatorClass: + return buildBinaryOperatorLValue(cast(E)); + case Expr::DeclRefExprClass: + return buildDeclRefLValue(cast(E)); + case Expr::UnaryOperatorClass: + return buildUnaryOpLValue(cast(E)); + case Expr::ObjCPropertyRefExprClass: + llvm_unreachable("cannot emit a property reference directly"); + } + + return LValue::makeAddr(Address::invalid(), E->getType()); +} + +/// Emit an if on a boolean condition to the specified blocks. +/// FIXME: Based on the condition, this might try to simplify the codegen of +/// the conditional based on the branch. TrueCount should be the number of +/// times we expect the condition to evaluate to true based on PGO data. We +/// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr +/// for extra ideas). +mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, + mlir::Location loc, + const Stmt *thenS, + const Stmt *elseS) { + // TODO: scoped ApplyDebugLocation DL(*this, Cond); + // TODO: __builtin_unpredictable and profile counts? + cond = cond->IgnoreParens(); + mlir::Value condV = evaluateExprAsBool(cond); + mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); + + builder.create( + loc, condV, elseS, + /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // FIXME: abstract all this massive location handling elsewhere. + SmallVector locs; + if (loc.isa()) { + locs.push_back(loc); + locs.push_back(loc); + } else if (loc.isa()) { + auto fusedLoc = loc.cast(); + locs.push_back(fusedLoc.getLocations()[0]); + locs.push_back(fusedLoc.getLocations()[1]); + } + LexicalScopeContext lexScope{locs[0], locs[1], + builder.getInsertionBlock()}; + LexicalScopeGuard lexThenGuard{*this, &lexScope}; + resThen = buildStmt(thenS, /*useCurrentScope=*/true); + }, + /*elseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto locBegin = fusedLoc.getLocations()[2]; + auto locEnd = fusedLoc.getLocations()[3]; + LexicalScopeContext lexScope{locBegin, locEnd, + builder.getInsertionBlock()}; + LexicalScopeGuard lexElseGuard{*this, &lexScope}; + resElse = buildStmt(elseS, /*useCurrentScope=*/true); + }); + + return mlir::LogicalResult::success(resThen.succeeded() && + resElse.succeeded()); +} + +mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, + QualType ty, mlir::Location loc, + CharUnits alignment) { + auto getAllocaInsertPositionOp = + [&](mlir::Block **insertBlock) -> mlir::Operation * { + auto *parentBlock = currLexScope->getEntryBlock(); + + auto lastAlloca = std::find_if( + parentBlock->rbegin(), parentBlock->rend(), + [](mlir::Operation &op) { return isa(&op); }); + + *insertBlock = parentBlock; + if (lastAlloca == parentBlock->rend()) + return nullptr; + return &*lastAlloca; + }; + + auto localVarTy = getCIRType(ty); + auto localVarPtrTy = + mlir::cir::PointerType::get(builder.getContext(), localVarTy); + + auto alignIntAttr = + mlir::IntegerAttr::get(mlir::IntegerType::get(builder.getContext(), 64), + alignment.getQuantity()); + + mlir::Value addr; + { + mlir::OpBuilder::InsertionGuard guard(builder); + mlir::Block *insertBlock = nullptr; + mlir::Operation *insertOp = getAllocaInsertPositionOp(&insertBlock); + + if (insertOp) + builder.setInsertionPointAfter(insertOp); + else { + assert(insertBlock && "expected valid insertion block"); + // No previous alloca found, place this one in the beginning + // of the block. + builder.setInsertionPointToStart(insertBlock); + } + + addr = builder.create(loc, /*addr type*/ localVarPtrTy, + /*var type*/ localVarTy, name, + initStyle, alignIntAttr); + } + return addr; +} diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 62f45b002178..c63dc34f8f1e 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -13,14 +13,12 @@ using namespace clang; namespace { class ScalarExprEmitter : public StmtVisitor { - LLVM_ATTRIBUTE_UNUSED CIRGenFunction &CGF; - CIRGenModule &CGM; + CIRGenFunction &CGF; mlir::OpBuilder &Builder; public: - ScalarExprEmitter(CIRGenFunction &cgf, CIRGenModule &cgm, - mlir::OpBuilder &builder) - : CGF(cgf), CGM(cgm), Builder(builder) {} + ScalarExprEmitter(CIRGenFunction &cgf, mlir::OpBuilder &builder) + : CGF(cgf), Builder(builder) {} mlir::Value Visit(Expr *E) { return StmtVisitor::Visit(E); @@ -28,9 +26,9 @@ class ScalarExprEmitter : public StmtVisitor { /// Emits the address of the l-value, then loads and returns the result. mlir::Value buildLoadOfLValue(const Expr *E) { - LValue LV = CGM.buildLValue(E); - auto load = Builder.create(CGM.getLoc(E->getExprLoc()), - CGM.getCIRType(E->getType()), + LValue LV = CGF.buildLValue(E); + auto load = Builder.create(CGF.getLoc(E->getExprLoc()), + CGF.getCIRType(E->getType()), LV.getPointer()); // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); return load; @@ -53,24 +51,24 @@ class ScalarExprEmitter : public StmtVisitor { CastKind Kind = CE->getCastKind(); switch (Kind) { case CK_LValueToRValue: - assert(CGM.getASTContext().hasSameUnqualifiedType(E->getType(), DestTy)); + assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); return Visit(const_cast(E)); case CK_NullToPointer: { // FIXME: use MustVisitNullValue(E) and evaluate expr. // Note that DestTy is used as the MLIR type instead of a custom // nullptr type. - mlir::Type Ty = CGM.getCIRType(DestTy); + mlir::Type Ty = CGF.getCIRType(DestTy); return Builder.create( - CGM.getLoc(E->getExprLoc()), Ty, + CGF.getLoc(E->getExprLoc()), Ty, mlir::cir::NullAttr::get(Builder.getContext(), Ty)); } case CK_IntegralToBoolean: { return buildIntToBoolConversion(Visit(E), - CGM.getLoc(CE->getSourceRange())); + CGF.getLoc(CE->getSourceRange())); } default: - emitError(CGM.getLoc(CE->getExprLoc()), "cast kind not implemented: '") + emitError(CGF.getLoc(CE->getExprLoc()), "cast kind not implemented: '") << CE->getCastKindName() << "'"; assert(0 && "not implemented"); return nullptr; @@ -88,13 +86,13 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { assert(!llvm::isa(E->getType()) && "not implemented"); - return CGM.buildLValue(E->getSubExpr()).getPointer(); + return CGF.buildLValue(E->getSubExpr()).getPointer(); } mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { - mlir::Type Ty = CGM.getCIRType(E->getType()); + mlir::Type Ty = CGF.getCIRType(E->getType()); return Builder.create( - CGM.getLoc(E->getExprLoc()), Ty, Builder.getBoolAttr(E->getValue())); + CGF.getLoc(E->getExprLoc()), Ty, Builder.getBoolAttr(E->getValue())); } struct BinOpInfo { @@ -143,52 +141,52 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value buildMul(const BinOpInfo &Ops) { return Builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Mul, + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); } mlir::Value buildDiv(const BinOpInfo &Ops) { return Builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Div, + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Div, Ops.LHS, Ops.RHS); } mlir::Value buildRem(const BinOpInfo &Ops) { return Builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Rem, + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); } mlir::Value buildAdd(const BinOpInfo &Ops) { return Builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Add, + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Add, Ops.LHS, Ops.RHS); } mlir::Value buildSub(const BinOpInfo &Ops) { return Builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Sub, + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); } mlir::Value buildShl(const BinOpInfo &Ops) { return Builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shl, + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shl, Ops.LHS, Ops.RHS); } mlir::Value buildShr(const BinOpInfo &Ops) { return Builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shr, + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shr, Ops.LHS, Ops.RHS); } mlir::Value buildAnd(const BinOpInfo &Ops) { return Builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::And, + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::And, Ops.LHS, Ops.RHS); } mlir::Value buildXor(const BinOpInfo &Ops) { return Builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Xor, + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); } mlir::Value buildOr(const BinOpInfo &Ops) { return Builder.create( - CGM.getLoc(Ops.Loc), CGM.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Or, + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Or, Ops.LHS, Ops.RHS); } @@ -268,8 +266,8 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("unsupported"); } - return Builder.create(CGM.getLoc(BOInfo.Loc), - CGM.getCIRType(BOInfo.Ty), Kind, + return Builder.create(CGF.getLoc(BOInfo.Loc), + CGF.getCIRType(BOInfo.Ty), Kind, BOInfo.LHS, BOInfo.RHS); } @@ -282,8 +280,8 @@ class ScalarExprEmitter : public StmtVisitor { assert(0 && "not implemented"); } - return buildScalarConversion(Result, CGM.getASTContext().BoolTy, - E->getType(), E->getExprLoc()); + return buildScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), + E->getExprLoc()); } #define VISITCOMP(CODE) \ @@ -299,7 +297,7 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitExpr(Expr *E) { // Crashing here for "ScalarExprClassName"? Please implement // VisitScalarExprClassName(...) to get this working. - emitError(CGM.getLoc(E->getExprLoc()), "scalar exp no implemented: '") + emitError(CGF.getLoc(E->getExprLoc()), "scalar exp no implemented: '") << E->getStmtClassName() << "'"; assert(0 && "shouldn't be here!"); return {}; @@ -311,7 +309,7 @@ class ScalarExprEmitter : public StmtVisitor { // as a logical value again. // TODO: optimize this common case here or leave it for later // CIR passes? - mlir::Type boolTy = CGM.getCIRType(CGM.getASTContext().BoolTy); + mlir::Type boolTy = CGF.getCIRType(CGF.getContext().BoolTy); return Builder.create( loc, boolTy, mlir::cir::CastKind::int_to_bool, srcVal); } @@ -349,8 +347,8 @@ class ScalarExprEmitter : public StmtVisitor { assert(0 && "not implemented"); } - SrcType = CGM.getASTContext().getCanonicalType(SrcType); - DstType = CGM.getASTContext().getCanonicalType(DstType); + SrcType = CGF.getContext().getCanonicalType(SrcType); + DstType = CGF.getContext().getCanonicalType(DstType); if (SrcType == DstType) return Src; @@ -361,13 +359,13 @@ class ScalarExprEmitter : public StmtVisitor { // Handle conversions to bool first, they are special: comparisons against // 0. if (DstType->isBooleanType()) - return buildConversionToBool(Src, SrcType, CGM.getLoc(Loc)); + return buildConversionToBool(Src, SrcType, CGF.getLoc(Loc)); - mlir::Type DstTy = CGM.getCIRType(DstType); + mlir::Type DstTy = CGF.getCIRType(DstType); // Cast from half through float if half isn't a native type. if (SrcType->isHalfType() && - !CGM.getASTContext().getLangOpts().NativeHalfType) { + !CGF.getContext().getLangOpts().NativeHalfType) { assert(0 && "not implemented"); } @@ -412,7 +410,7 @@ class ScalarExprEmitter : public StmtVisitor { // Cast to half through float if half isn't a native type. if (DstType->isHalfType() && - !CGM.getASTContext().getLangOpts().NativeHalfType) { + !CGF.getContext().getLangOpts().NativeHalfType) { assert(0 && "not implemented"); } @@ -426,9 +424,9 @@ class ScalarExprEmitter : public StmtVisitor { // Leaves. mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { - mlir::Type Ty = CGM.getCIRType(E->getType()); + mlir::Type Ty = CGF.getCIRType(E->getType()); return Builder.create( - CGM.getLoc(E->getExprLoc()), Ty, + CGF.getLoc(E->getExprLoc()), Ty, Builder.getIntegerAttr(Ty, E->getValue())); } }; @@ -437,22 +435,36 @@ class ScalarExprEmitter : public StmtVisitor { /// Emit the computation of the specified expression of scalar type, /// ignoring the result. -mlir::Value CIRGenModule::buildScalarExpr(const Expr *E) { - assert(E && CIRGenFunction::hasScalarEvaluationKind(E->getType()) && +mlir::Value CIRGenFunction::buildScalarExpr(const Expr *E) { + assert(E && hasScalarEvaluationKind(E->getType()) && "Invalid scalar expression to emit"); - return ScalarExprEmitter(*CurCGF, *this, builder) - .Visit(const_cast(E)); + return ScalarExprEmitter(*this, builder).Visit(const_cast(E)); } /// Emit a conversion from the specified type to the specified destination /// type, both of which are CIR scalar types. -mlir::Value CIRGenModule::buildScalarConversion(mlir::Value Src, QualType SrcTy, - QualType DstTy, - SourceLocation Loc) { +mlir::Value CIRGenFunction::buildScalarConversion(mlir::Value Src, + QualType SrcTy, + QualType DstTy, + SourceLocation Loc) { assert(CIRGenFunction::hasScalarEvaluationKind(SrcTy) && CIRGenFunction::hasScalarEvaluationKind(DstTy) && "Invalid scalar expression to emit"); - return ScalarExprEmitter(*CurCGF, *this, builder) + return ScalarExprEmitter(*this, builder) .buildScalarConversion(Src, SrcTy, DstTy, Loc); } + +/// If the specified expression does not fold +/// to a constant, or if it does but contains a label, return false. If it +/// constant folds return true and set the boolean result in Result. +bool CIRGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, + bool &ResultBool, + bool AllowLabels) { + llvm::APSInt ResultInt; + if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) + return false; + + ResultBool = ResultInt.getBoolValue(); + return true; +} diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 773d3f8d2e9b..8f3141d79d18 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -16,13 +16,16 @@ #include "clang/AST/ExprObjC.h" #include "clang/Basic/TargetInfo.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" using namespace cir; using namespace clang; +using namespace mlir::cir; -CIRGenFunction::CIRGenFunction(CIRGenModule &CGM) - : CGM{CGM}, CurFuncDecl(nullptr), SanOpts(CGM.getLangOpts().Sanitize) {} +CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, mlir::OpBuilder &builder) + : CGM{CGM}, builder(builder), CurFuncDecl(nullptr), + SanOpts(CGM.getLangOpts().Sanitize) {} clang::ASTContext &CIRGenFunction::getContext() const { return CGM.getASTContext(); @@ -87,203 +90,283 @@ TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { } } -static bool hasInAllocaArgs(CIRGenModule &CGM, CallingConv ExplicitCC, - ArrayRef ArgTypes) { - assert(ExplicitCC != CC_Swift && ExplicitCC != CC_SwiftAsync && "Swift NYI"); - assert(!CGM.getTarget().getCXXABI().isMicrosoft() && "MSABI NYI"); - - return false; +mlir::Type CIRGenFunction::convertType(QualType T) { + return CGM.getTypes().ConvertType(T); } -void CIRGenFunction::buildCallArgs( - CallArgList &Args, PrototypeWrapper Prototype, - llvm::iterator_range ArgRange, - AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { - - llvm::SmallVector ArgTypes; - - assert((ParamsToSkip == 0 || Prototype.P) && - "Can't skip parameters if type info is not provided"); - - // This variable only captures *explicitly* written conventions, not those - // applied by default via command line flags or target defaults, such as - // thiscall, appcs, stdcall via -mrtd, etc. Computing that correctly would - // require knowing if this is a C++ instance method or being able to see - // unprotyped FunctionTypes. - CallingConv ExplicitCC = CC_C; - - // First, if a prototype was provided, use those argument types. - bool IsVariadic = false; - if (Prototype.P) { - const auto *MD = Prototype.P.dyn_cast(); - assert(!MD && "ObjCMethodDecl NYI"); - - const auto *FPT = Prototype.P.get(); - IsVariadic = FPT->isVariadic(); - assert(!IsVariadic && "Variadic functions NYI"); - ExplicitCC = FPT->getExtInfo().getCC(); - ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, - FPT->param_type_end()); - } +mlir::LogicalResult CIRGenFunction::buildFunctionBody(const Stmt *Body) { + const CompoundStmt *S = dyn_cast(Body); + assert(S && "expected compound stmt"); - // If we still have any arguments, emit them using the type of the argument. - for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) { - assert(!IsVariadic && "Variadic functions NYI"); - ArgTypes.push_back(A->getType()); - }; - assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); - - // We must evaluate arguments from right to left in the MS C++ ABI, because - // arguments are destroyed left to right in the callee. As a special case, - // there are certain language constructs taht require left-to-right - // evaluation, and in those cases we consider the evaluation order requirement - // to trump the "destruction order is reverse construction order" guarantee. - bool LeftToRight = true; - assert(!CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() && - "MSABI NYI"); - assert(!hasInAllocaArgs(CGM, ExplicitCC, ArgTypes) && "NYI"); - - // Evaluate each argument in the appropriate order. - size_t CallArgsStart = Args.size(); - for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { - unsigned Idx = LeftToRight ? I : E - I - 1; - CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; - unsigned InitialArgSize = Args.size(); - assert(!isa(*Arg) && "NYI"); - assert(!isa(AC.getDecl()) && "NYI"); - - buildCallArg(Args, *Arg, ArgTypes[Idx]); - // In particular, we depend on it being the last arg in Args, and the - // objectsize bits depend on there only being one arg if !LeftToRight. - assert(InitialArgSize + 1 == Args.size() && - "The code below depends on only adding one arg per buildCallArg"); - (void)InitialArgSize; - // Since pointer argument are never emitted as LValue, it is safe to emit - // non-null argument check for r-value only. - assert(!SanOpts.has(SanitizerKind::NonnullAttribute) && "Sanitizers NYI"); - assert(!SanOpts.has(SanitizerKind::NullabilityArg) && "Sanitizers NYI"); - } + // We start with function level scope for variables. + SymTableScopeTy varScope(symbolTable); + return buildCompoundStmtWithoutScope(*S); +} - if (!LeftToRight) { - // Un-reverse the arguments we just evaluated so they match up with the CIR - // function. - std::reverse(Args.begin() + CallArgsStart, Args.end()); - } +mlir::Location CIRGenFunction::getLoc(SourceLocation SLoc) { + const SourceManager &SM = getContext().getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(SLoc); + StringRef Filename = PLoc.getFilename(); + return mlir::FileLineColLoc::get(builder.getStringAttr(Filename), + PLoc.getLine(), PLoc.getColumn()); } -/// Emit code to compute the specified expression which -/// can have any type. The result is returned as an RValue struct. -RValue CIRGenFunction::buildAnyExpr(const Expr *E, AggValueSlot aggSlot, - bool ignoreResult) { - switch (CIRGenFunction::getEvaluationKind(E->getType())) { - case TEK_Scalar: - return RValue::get(CGM.buildScalarExpr(E)); - case TEK_Complex: - assert(0 && "not implemented"); - case TEK_Aggregate: - assert(0 && "not implemented"); - } - llvm_unreachable("bad evaluation kind"); +mlir::Location CIRGenFunction::getLoc(SourceRange SLoc) { + mlir::Location B = getLoc(SLoc.getBegin()); + mlir::Location E = getLoc(SLoc.getEnd()); + SmallVector locs = {B, E}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); } -RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, - ReturnValueSlot ReturnValue) { - assert(!E->getCallee()->getType()->isBlockPointerType() && "ObjC Blocks NYI"); - assert(!dyn_cast(E) && "NYI"); - assert(!dyn_cast(E) && "CUDA NYI"); - assert(!dyn_cast(E) && "NYI"); +mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) { + SmallVector locs = {lhs, rhs}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); +} - CIRGenCallee callee = buildCallee(E->getCallee()); +/// Return true if the statement contains a label in it. If +/// this statement is not executed normally, it not containing a label means +/// that we can just remove the code. +bool CIRGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { + // Null statement, not a label! + if (!S) + return false; - assert(!callee.isBuiltin() && "builtins NYI"); - assert(!callee.isPsuedoDestructor() && "NYI"); + // If this is a label, we have to emit the code, consider something like: + // if (0) { ... foo: bar(); } goto foo; + // + // TODO: If anyone cared, we could track __label__'s, since we know that you + // can't jump to one from outside their declared region. + if (isa(S)) + return true; + + // If this is a case/default statement, and we haven't seen a switch, we + // have to emit the code. + if (isa(S) && !IgnoreCaseStmts) + return true; + + // If this is a switch statement, we want to ignore cases below it. + if (isa(S)) + IgnoreCaseStmts = true; - return buildCall(E->getCallee()->getType(), callee, E, ReturnValue); + // Scan subexpressions for verboten labels. + for (const Stmt *SubStmt : S->children()) + if (ContainsLabel(SubStmt, IgnoreCaseStmts)) + return true; + + return false; } -RValue CIRGenFunction::buildCall(clang::QualType CalleeType, - const CIRGenCallee &OrigCallee, - const clang::CallExpr *E, - ReturnValueSlot ReturnValue, - mlir::Value Chain) { - // Get the actual function type. The callee type will always be a pointer to - // function type or a block pointer type. - assert(CalleeType->isFunctionPointerType() && - "Call must have function pointer type!"); - - CalleeType = getContext().getCanonicalType(CalleeType); - - auto PointeeType = cast(CalleeType)->getPointeeType(); - - CIRGenCallee Callee = OrigCallee; - - if (getLangOpts().CPlusPlus) - assert(!SanOpts.has(SanitizerKind::Function) && "Sanitizers NYI"); - - const auto *FnType = cast(PointeeType); - - assert(!SanOpts.has(SanitizerKind::CFIICall) && "Sanitizers NYI"); - - CallArgList Args; - - assert(!Chain && "FIX THIS"); - - // C++17 requires that we evaluate arguments to a call using assignment syntax - // right-to-left, and that we evaluate arguments to certain other operators - // left-to-right. Note that we allow this to override the order dictated by - // the calling convention on the MS ABI, which means that parameter - // destruction order is not necessarily reverse construction order. - // FIXME: Revisit this based on C++ committee response to unimplementability. - EvaluationOrder Order = EvaluationOrder::Default; - assert(!dyn_cast(E) && "Operators NYI"); - - buildCallArgs(Args, dyn_cast(FnType), E->arguments(), - E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); - - const CIRGenFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( - Args, FnType, /*ChainCall=*/Chain.getAsOpaquePointer()); - - // C99 6.5.2.2p6: - // If the expression that denotes the called function has a type that does - // not include a prototype, [the default argument promotions are performed]. - // If the number of arguments does not equal the number of parameters, the - // behavior is undefined. If the function is defined with at type that - // includes a prototype, and either the prototype ends with an ellipsis (, - // ...) or the types of the arguments after promotion are not compatible - // with the types of the parameters, the behavior is undefined. If the - // function is defined with a type that does not include a prototype, and - // the types of the arguments after promotion are not compatible with those - // of the parameters after promotion, the behavior is undefined [except in - // some trivial cases]. - // That is, in the general case, we should assume that a call through an - // unprototyped function type works like a *non-variadic* call. The way we - // make this work is to cast to the exxact type fo the promoted arguments. - // - // Chain calls use the same code path to add the inviisble chain parameter to - // the function type. - assert(!isa(FnType) && "NYI"); - // if (isa(FnType) || Chain) { - // mlir::FunctionType CalleeTy = getTypes().GetFunctionType(FnInfo); - // int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace(); - // CalleeTy = CalleeTy->getPointerTo(AS); +/// If the specified expression does not fold +/// to a constant, or if it does but contains a label, return false. If it +/// constant folds return true and set the folded value. +bool CIRGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, + llvm::APSInt &ResultInt, + bool AllowLabels) { + // FIXME: Rename and handle conversion of other evaluatable things + // to bool. + Expr::EvalResult Result; + if (!Cond->EvaluateAsInt(Result, getContext())) + return false; // Not foldable, not integer or not fully evaluatable. + + llvm::APSInt Int = Result.Val.getInt(); + if (!AllowLabels && ContainsLabel(Cond)) + return false; // Contains a label. + + ResultInt = Int; + return true; +} - // llvm::Value *CalleePtr = Callee.getFunctionPointer(); - // CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast"); - // Callee.setFunctionPointer(CalleePtr); - // } +mlir::Type CIRGenFunction::getCIRType(const QualType &type) { + return CGM.getCIRType(type); +} - assert(!CGM.getLangOpts().HIP && "HIP NYI"); +void CIRGenFunction::buildAndUpdateRetAlloca(QualType ty, mlir::Location loc, + CharUnits alignment) { + auto addr = + buildAlloca("__retval", InitStyle::uninitialized, ty, loc, alignment); + FnRetAlloca = addr; +} - assert(!MustTailCall && "Must tail NYI"); - mlir::func::CallOp callOP = nullptr; - RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, callOP, - E == MustTailCall, E->getExprLoc()); +mlir::LogicalResult CIRGenFunction::declare(const Decl *var, QualType ty, + mlir::Location loc, + CharUnits alignment, + mlir::Value &addr, bool isParam) { + const auto *namedVar = dyn_cast_or_null(var); + assert(namedVar && "Needs a named decl"); + assert(!symbolTable.count(var) && "not supposed to be available just yet"); - assert(!getDebugInfo() && "Debug Info NYI"); + addr = buildAlloca(namedVar->getName(), + isParam ? InitStyle::paraminit : InitStyle::uninitialized, + ty, loc, alignment); - return Call; + symbolTable.insert(var, addr); + return mlir::success(); } -mlir::Type CIRGenFunction::convertType(QualType T) { - return CGM.getTypes().ConvertType(T); +/// All scope related cleanup needed: +/// - Patching up unsolved goto's. +/// - Build all cleanup code and insert yield/returns. +void CIRGenFunction::LexicalScopeGuard::cleanup() { + auto &builder = CGF.builder; + auto *localScope = CGF.currLexScope; + + // Handle pending gotos and the solved labels in this scope. + while (!localScope->PendingGotos.empty()) { + auto gotoInfo = localScope->PendingGotos.back(); + // FIXME: Currently only support resolving goto labels inside the + // same lexical ecope. + assert(localScope->SolvedLabels.count(gotoInfo.second) && + "goto across scopes not yet supported"); + + // The goto in this lexical context actually maps to a basic + // block. + auto g = cast(gotoInfo.first); + g.setSuccessor(CGF.LabelMap[gotoInfo.second].getBlock()); + localScope->PendingGotos.pop_back(); + } + localScope->SolvedLabels.clear(); + + // Cleanup are done right before codegen resume a scope. This is where + // objects are destroyed. + unsigned curLoc = 0; + for (auto *retBlock : localScope->getRetBlocks()) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToEnd(retBlock); + mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; + curLoc++; + + // TODO: insert actual scope cleanup HERE (dtors and etc) + + // If there's anything to return, load it first. + if (CGF.FnRetTy.has_value()) { + auto val = builder.create(retLoc, *CGF.FnRetTy, *CGF.FnRetAlloca); + builder.create(retLoc, llvm::ArrayRef(val.getResult())); + } else { + builder.create(retLoc); + } + } + + auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToEnd(InsPt); + // TODO: insert actual scope cleanup (dtors and etc) + if (localScope->Depth != 0) // end of any local scope != function + builder.create(localScope->EndLoc); + else + builder.create(localScope->EndLoc); + }; + + // If a cleanup block has been created at some point, branch to it + // and set the insertion point to continue at the cleanup block. + // Terminators are then inserted either in the cleanup block or + // inline in this current block. + auto *cleanupBlock = localScope->getCleanupBlock(builder); + if (cleanupBlock) + insertCleanupAndLeave(cleanupBlock); + + // Now deal with any pending block wrap up like implicit end of + // scope. + + // If a terminator is already present in the current block, nothing + // else to do here. + bool entryBlock = builder.getInsertionBlock()->isEntryBlock(); + auto *currBlock = builder.getBlock(); + bool hasTerminator = + !currBlock->empty() && + currBlock->back().hasTrait(); + if (hasTerminator) + return; + + // An empty non-entry block has nothing to offer. + if (!entryBlock && currBlock->empty()) { + currBlock->erase(); + return; + } + + // If there's a cleanup block, branch to it, nothing else to do. + if (cleanupBlock) { + builder.create(currBlock->back().getLoc(), cleanupBlock); + return; + } + + // No pre-existent cleanup block, emit cleanup code and yield/return. + insertCleanupAndLeave(currBlock); +} + +mlir::FuncOp CIRGenFunction::buildFunction(const FunctionDecl *FD) { + // Create a scope in the symbol table to hold variable declarations. + SymTableScopeTy varScope(symbolTable); + + const CXXMethodDecl *MD = dyn_cast(FD); + assert(!MD && "methods not implemented"); + auto fnLoc = getLoc(FD->getSourceRange()); + + FnRetQualTy = FD->getReturnType(); + mlir::TypeRange FnTyRange = {}; + if (!FnRetQualTy->isVoidType()) { + FnRetTy = getCIRType(FnRetQualTy); + } + auto funcType = getTypes().GetFunctionType(GlobalDecl(FD)); + + mlir::FuncOp function = mlir::FuncOp::create(fnLoc, FD->getName(), funcType); + if (!function) + return nullptr; + + // In MLIR the entry block of the function is special: it must have the + // same argument list as the function itself. + mlir::Block *entryBlock = function.addEntryBlock(); + + // Set the insertion point in the builder to the beginning of the + // function body, it will be used throughout the codegen to create + // operations in this function. + builder.setInsertionPointToStart(entryBlock); + auto FnBeginLoc = getLoc(FD->getBody()->getEndLoc()); + auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); + + // Initialize lexical scope information. + { + LexicalScopeContext lexScope{FnBeginLoc, FnEndLoc, + builder.getInsertionBlock()}; + LexicalScopeGuard scopeGuard{*this, &lexScope}; + + // Declare all the function arguments in the symbol table. + for (const auto nameValue : + llvm::zip(FD->parameters(), entryBlock->getArguments())) { + auto *paramVar = std::get<0>(nameValue); + auto paramVal = std::get<1>(nameValue); + auto alignment = getContext().getDeclAlign(paramVar); + auto paramLoc = getLoc(paramVar->getSourceRange()); + paramVal.setLoc(paramLoc); + + mlir::Value addr; + if (failed(declare(paramVar, paramVar->getType(), paramLoc, alignment, + addr, true /*param*/))) + return nullptr; + // Location of the store to the param storage tracked as beginning of + // the function body. + auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); + builder.create(fnBodyBegin, paramVal, addr); + } + assert(builder.getInsertionBlock() && "Should be valid"); + + // When the current function is not void, create an address to store the + // result value. + if (FnRetTy.has_value()) + buildAndUpdateRetAlloca(FnRetQualTy, FnEndLoc, + CGM.getNaturalTypeAlignment(FnRetQualTy)); + + // Emit the body of the function. + if (mlir::failed(buildFunctionBody(FD->getBody()))) { + function.erase(); + return nullptr; + } + assert(builder.getInsertionBlock() && "Should be valid"); + } + + if (mlir::failed(function.verifyBody())) + return nullptr; + + return function; } diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 9e4731881724..f7ebc4f61f3f 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -42,7 +42,216 @@ namespace cir { enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; class CIRGenFunction { + CIRGenModule &CGM; + /// The builder is a helper class to create IR inside a function. The + /// builder is stateful, in particular it keeps an "insertion point": this + /// is where the next operations will be introduced. + mlir::OpBuilder &builder; + + /// ------- + /// Goto + /// ------- + + /// A jump destination is an abstract label, branching to which may + /// require a jump out through normal cleanups. + struct JumpDest { + JumpDest() = default; + JumpDest(mlir::Block *Block) : Block(Block) {} + + bool isValid() const { return Block != nullptr; } + mlir::Block *getBlock() const { return Block; } + mlir::Block *Block = nullptr; + }; + + /// Track mlir Blocks for each C/C++ label. + llvm::DenseMap LabelMap; + JumpDest &getJumpDestForLabel(const clang::LabelDecl *D); + + /// ------- + /// Lexical Scope: to be read as in the meaning in CIR, a scope is always + /// related with initialization and destruction of objects. + /// ------- + + // Represents a cir.scope, cir.if, and then/else regions. I.e. lexical + // scopes that require cleanups. + struct LexicalScopeContext { + private: + // Block containing cleanup code for things initialized in this + // lexical context (scope). + mlir::Block *CleanupBlock = nullptr; + + // Points to scope entry block. This is useful, for instance, for + // helping to insert allocas before finalizing any recursive codegen + // from switches. + mlir::Block *EntryBlock; + + // FIXME: perhaps we can use some info encoded in operations. + enum Kind { + Regular, // cir.if, cir.scope, if_regions + Switch // cir.switch + } ScopeKind = Regular; + + public: + unsigned Depth = 0; + bool HasReturn = false; + LexicalScopeContext(mlir::Location b, mlir::Location e, mlir::Block *eb) + : EntryBlock(eb), BeginLoc(b), EndLoc(e) {} + ~LexicalScopeContext() = default; + + // --- + // Kind + // --- + bool isRegular() { return ScopeKind == Kind::Regular; } + bool isSwitch() { return ScopeKind == Kind::Switch; } + void setAsSwitch() { ScopeKind = Kind::Switch; } + + // --- + // Goto handling + // --- + + // Lazy create cleanup block or return what's available. + mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) { + if (CleanupBlock) + return getCleanupBlock(builder); + return createCleanupBlock(builder); + } + + mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) { + return CleanupBlock; + } + mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) { + { + // Create the cleanup block but dont hook it up around just yet. + mlir::OpBuilder::InsertionGuard guard(builder); + CleanupBlock = builder.createBlock(builder.getBlock()->getParent()); + } + assert(builder.getInsertionBlock() && "Should be valid"); + return CleanupBlock; + } + + // Goto's introduced in this scope but didn't get fixed. + llvm::SmallVector, 4> + PendingGotos; + + // Labels solved inside this scope. + llvm::SmallPtrSet SolvedLabels; + + // --- + // Return handling + // --- + + private: + // On switches we need one return block per region, since cases don't + // have their own scopes but are distinct regions nonetheless. + llvm::SmallVector RetBlocks; + llvm::SmallVector> RetLocs; + unsigned int CurrentSwitchRegionIdx = -1; + + // There's usually only one ret block per scope, but this needs to be + // get or create because of potential unreachable return statements, note + // that for those, all source location maps to the first one found. + mlir::Block *createRetBlock(CIRGenFunction &CGF, mlir::Location loc) { + assert((isSwitch() || RetBlocks.size() == 0) && + "only switches can hold more than one ret block"); + + // Create the cleanup block but dont hook it up around just yet. + mlir::OpBuilder::InsertionGuard guard(CGF.builder); + auto *b = CGF.builder.createBlock(CGF.builder.getBlock()->getParent()); + RetBlocks.push_back(b); + RetLocs.push_back(loc); + return b; + } + + public: + void updateCurrentSwitchCaseRegion() { CurrentSwitchRegionIdx++; } + llvm::ArrayRef getRetBlocks() { return RetBlocks; } + llvm::ArrayRef> getRetLocs() { + return RetLocs; + } + + mlir::Block *getOrCreateRetBlock(CIRGenFunction &CGF, mlir::Location loc) { + unsigned int regionIdx = 0; + if (isSwitch()) + regionIdx = CurrentSwitchRegionIdx; + if (regionIdx >= RetBlocks.size()) + return createRetBlock(CGF, loc); + return &*RetBlocks.back(); + } + + // --- + // Scope entry block tracking + // --- + mlir::Block *getEntryBlock() { return EntryBlock; } + + mlir::Location BeginLoc, EndLoc; + }; + + class LexicalScopeGuard { + CIRGenFunction &CGF; + LexicalScopeContext *OldVal = nullptr; + + public: + LexicalScopeGuard(CIRGenFunction &c, LexicalScopeContext *L) : CGF(c) { + if (CGF.currLexScope) { + OldVal = CGF.currLexScope; + L->Depth++; + } + CGF.currLexScope = L; + } + + LexicalScopeGuard(const LexicalScopeGuard &) = delete; + LexicalScopeGuard &operator=(const LexicalScopeGuard &) = delete; + LexicalScopeGuard &operator=(LexicalScopeGuard &&other) = delete; + + void cleanup(); + void restore() { CGF.currLexScope = OldVal; } + ~LexicalScopeGuard() { + cleanup(); + restore(); + } + }; + + LexicalScopeContext *currLexScope = nullptr; + + /// Declare a variable in the current scope, return success if the variable + /// wasn't declared yet. + mlir::LogicalResult declare(const clang::Decl *var, clang::QualType ty, + mlir::Location loc, clang::CharUnits alignment, + mlir::Value &addr, bool isParam = false); + mlir::Value buildAlloca(llvm::StringRef name, mlir::cir::InitStyle initStyle, + clang::QualType ty, mlir::Location loc, + clang::CharUnits alignment); + void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, + clang::CharUnits alignment); + + /// ------- + /// Source Location tracking + /// ------- + + /// Use to track source locations across nested visitor traversals. + /// Always use a `SourceLocRAIIObject` to change currSrcLoc. + std::optional currSrcLoc; + class SourceLocRAIIObject { + CIRGenFunction &P; + std::optional OldVal; + + public: + SourceLocRAIIObject(CIRGenFunction &p, mlir::Location Value) : P(p) { + if (P.currSrcLoc) + OldVal = P.currSrcLoc; + P.currSrcLoc = Value; + } + + /// Can be used to restore the state early, before the dtor + /// is run. + void restore() { P.currSrcLoc = OldVal; } + ~SourceLocRAIIObject() { restore(); } + }; + public: + using SymTableScopeTy = + llvm::ScopedHashTableScope; + enum class EvaluationOrder { ///! No langauge constraints on evaluation order. Default, @@ -56,8 +265,6 @@ class CIRGenFunction { std::optional FnRetTy; std::optional FnRetAlloca; - CIRGenModule &CGM; - // Holds the Decl for the current outermost non-closure context const clang::Decl *CurFuncDecl; @@ -67,9 +274,19 @@ class CIRGenFunction { clang::ASTContext &getContext() const; + mlir::OpBuilder &getBuilder() { return builder; } + /// Sanitizers enabled for this function. clang::SanitizerSet SanOpts; + /// The symbol table maps a variable name to a value in the current scope. + /// Entering a function creates a new scope, and the function arguments are + /// added to the mapping. When the processing of a function is terminated, + /// the scope is destroyed and the mappings created in this scope are + /// dropped. + using SymTableTy = llvm::ScopedHashTable; + SymTableTy symbolTable; + /// Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(clang::QualType T); @@ -81,10 +298,17 @@ class CIRGenFunction { return getEvaluationKind(T) == TEK_Aggregate; } - CIRGenFunction(CIRGenModule &CGM); + CIRGenFunction(CIRGenModule &CGM, mlir::OpBuilder &builder); CIRGenTypes &getTypes() const { return CGM.getTypes(); } + /// Helpers to convert Clang's SourceLocation to a MLIR Location. + mlir::Location getLoc(clang::SourceLocation SLoc); + + mlir::Location getLoc(clang::SourceRange SLoc); + + mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); + const clang::LangOptions &getLangOpts() const { return CGM.getLangOpts(); } // TODO: This is currently just a dumb stub. But we want to be able to clearly @@ -166,6 +390,30 @@ class CIRGenFunction { AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); + mlir::LogicalResult buildFunctionBody(const clang::Stmt *Body); + + // Build CIR for a statement. useCurrentScope should be true if no + // new scopes need be created when finding a compound statement. + mlir::LogicalResult buildStmt(const clang::Stmt *S, bool useCurrentScope); + + mlir::LogicalResult buildSimpleStmt(const clang::Stmt *S, + bool useCurrentScope); + + mlir::LogicalResult buildWhileStmt(const clang::WhileStmt &S); + mlir::LogicalResult buildDoStmt(const clang::DoStmt &S); + mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); + + mlir::LogicalResult buildCompoundStmt(const clang::CompoundStmt &S); + + mlir::LogicalResult + buildCompoundStmtWithoutScope(const clang::CompoundStmt &S); + + /// EmitIgnoredExpr - Emit code to compute the specified expression, + /// ignoring the result. + void buildIgnoredExpr(const clang::Expr *E); + + mlir::LogicalResult buildDeclStmt(const clang::DeclStmt &S); + /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type. /// TODO: What's the equivalent for MLIR? Currently we're only using this for /// void types so it just returns RValue::get(nullptr) but it'll need @@ -173,6 +421,174 @@ class CIRGenFunction { RValue GetUndefRValue(clang::QualType Ty); mlir::Type convertType(clang::QualType T); + + mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); + + mlir::LogicalResult buildReturnStmt(const clang::ReturnStmt &S); + + mlir::LogicalResult buildGotoStmt(const clang::GotoStmt &S); + + mlir::LogicalResult buildLabel(const clang::LabelDecl *D); + mlir::LogicalResult buildLabelStmt(const clang::LabelStmt &S); + + mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); + + /// Emit code to compute a designator that specifies the location + /// of the expression. + /// FIXME: document this function better. + LValue buildLValue(const clang::Expr *E); + + void buildDecl(const clang::Decl &D); + + /// If the specified expression does not fold + /// to a constant, or if it does but contains a label, return false. If it + /// constant folds return true and set the boolean result in Result. + bool ConstantFoldsToSimpleInteger(const clang::Expr *Cond, bool &ResultBool, + bool AllowLabels); + + /// Return true if the statement contains a label in it. If + /// this statement is not executed normally, it not containing a label means + /// that we can just remove the code. + bool ContainsLabel(const clang::Stmt *S, bool IgnoreCaseStmts = false); + + /// If the specified expression does not fold + /// to a constant, or if it does but contains a label, return false. If it + /// constant folds return true and set the folded value. + bool ConstantFoldsToSimpleInteger(const clang::Expr *Cond, + llvm::APSInt &ResultInt, bool AllowLabels); + + /// Emit an if on a boolean condition to the specified blocks. + /// FIXME: Based on the condition, this might try to simplify the codegen of + /// the conditional based on the branch. TrueCount should be the number of + /// times we expect the condition to evaluate to true based on PGO data. We + /// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr + /// for extra ideas). + mlir::LogicalResult buildIfOnBoolExpr(const clang::Expr *cond, + mlir::Location loc, + const clang::Stmt *thenS, + const clang::Stmt *elseS); + + /// Emit the computation of the specified expression of scalar type, + /// ignoring the result. + mlir::Value buildScalarExpr(const clang::Expr *E); + + mlir::Type getCIRType(const clang::QualType &type); + + mlir::LogicalResult buildCaseStmt(const clang::CaseStmt &S, + mlir::Type condType, + mlir::cir::CaseAttr &caseEntry); + + mlir::LogicalResult buildDefaultStmt(const clang::DefaultStmt &S, + mlir::Type condType, + mlir::cir::CaseAttr &caseEntry); + + struct AutoVarEmission { + const clang::VarDecl *Variable; + /// The address of the alloca for languages with explicit address space + /// (e.g. OpenCL) or alloca casted to generic pointer for address space + /// agnostic languages (e.g. C++). Invalid if the variable was emitted + /// as a global constant. + Address Addr; + + /// True if the variable is of aggregate type and has a constant + /// initializer. + bool IsConstantAggregate; + + struct Invalid {}; + AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {} + + AutoVarEmission(const clang::VarDecl &variable) + : Variable(&variable), Addr(Address::invalid()), + IsConstantAggregate(false) {} + + static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } + /// Returns the raw, allocated address, which is not necessarily + /// the address of the object itself. It is casted to default + /// address space for address space agnostic languages. + Address getAllocatedAddress() const { return Addr; } + }; + /// Emit the alloca and debug information for a + /// local variable. Does not emit initialization or destruction. + AutoVarEmission buildAutoVarAlloca(const clang::VarDecl &D); + + void buildAutoVarInit(const AutoVarEmission &emission); + + void buildAutoVarCleanups(const AutoVarEmission &emission); + + void buildStoreOfScalar(mlir::Value value, LValue lvalue, + const clang::Decl *InitDecl); + + void buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, + clang::QualType Ty, LValueBaseInfo BaseInfo, + const clang::Decl *InitDecl, bool isNontemporal); + + mlir::Value buildToMemory(mlir::Value Value, clang::QualType Ty); + + /// Store the specified rvalue into the specified + /// lvalue, where both are guaranteed to the have the same type, and that type + /// is 'Ty'. + void buldStoreThroughLValue(RValue Src, LValue Dst, + const clang::Decl *InitDecl); + + mlir::LogicalResult buildBranchThroughCleanup(JumpDest &Dest, + clang::LabelDecl *L, + mlir::Location Loc); + + void buildScalarInit(const clang::Expr *init, const clang::ValueDecl *D, + LValue lvalue); + + LValue buildDeclRefLValue(const clang::DeclRefExpr *E); + + LValue buildBinaryOperatorLValue(const clang::BinaryOperator *E); + + LValue buildUnaryOpLValue(const clang::UnaryOperator *E); + + /// Given an expression of pointer type, try to + /// derive a more accurate bound on the alignment of the pointer. + Address buildPointerWithAlignment(const clang::Expr *E, + LValueBaseInfo *BaseInfo); + + /// Emit an expression as an initializer for an object (variable, field, etc.) + /// at the given location. The expression is not necessarily the normal + /// initializer for the object, and the address is not necessarily + /// its normal location. + /// + /// \param init the initializing expression + /// \param D the object to act as if we're initializing + /// \param lvalue the lvalue to initialize + void buildExprAsInit(const clang::Expr *init, const clang::ValueDecl *D, + LValue lvalue); + + /// Emit code and set up symbol table for a variable declaration with auto, + /// register, or no storage class specifier. These turn into simple stack + /// objects, globals depending on target. + void buildAutoVarDecl(const clang::VarDecl &D); + + /// This method handles emission of any variable declaration + /// inside a function, including static vars etc. + void buildVarDecl(const clang::VarDecl &D); + + /// Perform the usual unary conversions on the specified + /// expression and compare the result against zero, returning an Int1Ty value. + mlir::Value evaluateExprAsBool(const clang::Expr *E); + + /// Emit a conversion from the specified type to the specified destination + /// type, both of which are CIR scalar types. + mlir::Value buildScalarConversion(mlir::Value Src, clang::QualType SrcTy, + clang::QualType DstTy, + clang::SourceLocation Loc); + + // Emit a new function and add it to the MLIR module. + mlir::FuncOp buildFunction(const clang::FunctionDecl *FD); + + /// Determine whether the given initializer is trivial in the sense + /// that it requires no code to be generated. + bool isTrivialInitializer(const clang::Expr *Init); + + // TODO: this can also be abstrated into common AST helpers + bool hasBooleanRepresentation(clang::QualType Ty); + + mlir::LogicalResult buildForStmt(const clang::ForStmt &S); }; } // namespace cir diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 8d2323eea325..8282cac5eba5 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -93,98 +93,6 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, CIRGenModule::~CIRGenModule() {} -mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { - const SourceManager &SM = astCtx.getSourceManager(); - PresumedLoc PLoc = SM.getPresumedLoc(SLoc); - StringRef Filename = PLoc.getFilename(); - return mlir::FileLineColLoc::get(builder.getStringAttr(Filename), - PLoc.getLine(), PLoc.getColumn()); -} - -mlir::Location CIRGenModule::getLoc(SourceRange SLoc) { - mlir::Location B = getLoc(SLoc.getBegin()); - mlir::Location E = getLoc(SLoc.getEnd()); - SmallVector locs = {B, E}; - mlir::Attribute metadata; - return mlir::FusedLoc::get(locs, metadata, builder.getContext()); -} - -mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { - SmallVector locs = {lhs, rhs}; - mlir::Attribute metadata; - return mlir::FusedLoc::get(locs, metadata, builder.getContext()); -} - -mlir::Value CIRGenModule::buildAlloca(StringRef name, InitStyle initStyle, - QualType ty, mlir::Location loc, - CharUnits alignment) { - auto getAllocaInsertPositionOp = - [&](mlir::Block **insertBlock) -> mlir::Operation * { - auto *parentBlock = currLexScope->getEntryBlock(); - - auto lastAlloca = std::find_if( - parentBlock->rbegin(), parentBlock->rend(), - [](mlir::Operation &op) { return isa(&op); }); - - *insertBlock = parentBlock; - if (lastAlloca == parentBlock->rend()) - return nullptr; - return &*lastAlloca; - }; - - auto localVarTy = getCIRType(ty); - auto localVarPtrTy = - mlir::cir::PointerType::get(builder.getContext(), localVarTy); - - auto alignIntAttr = - mlir::IntegerAttr::get(mlir::IntegerType::get(builder.getContext(), 64), - alignment.getQuantity()); - - mlir::Value addr; - { - mlir::OpBuilder::InsertionGuard guard(builder); - mlir::Block *insertBlock = nullptr; - mlir::Operation *insertOp = getAllocaInsertPositionOp(&insertBlock); - - if (insertOp) - builder.setInsertionPointAfter(insertOp); - else { - assert(insertBlock && "expected valid insertion block"); - // No previous alloca found, place this one in the beginning - // of the block. - builder.setInsertionPointToStart(insertBlock); - } - - addr = builder.create(loc, /*addr type*/ localVarPtrTy, - /*var type*/ localVarTy, name, - initStyle, alignIntAttr); - } - return addr; -} - -void CIRGenModule::buildAndUpdateRetAlloca(QualType ty, mlir::Location loc, - CharUnits alignment) { - auto addr = - buildAlloca("__retval", InitStyle::uninitialized, ty, loc, alignment); - CurCGF->FnRetAlloca = addr; -} - -mlir::LogicalResult CIRGenModule::declare(const Decl *var, QualType ty, - mlir::Location loc, - CharUnits alignment, - mlir::Value &addr, bool isParam) { - const auto *namedVar = dyn_cast_or_null(var); - assert(namedVar && "Needs a named decl"); - assert(!symbolTable.count(var) && "not supposed to be available just yet"); - - addr = buildAlloca(namedVar->getName(), - isParam ? InitStyle::paraminit : InitStyle::uninitialized, - ty, loc, alignment); - - symbolTable.insert(var, addr); - return mlir::success(); -} - bool CIRGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) { if (!Ty.isConstant(astCtx) && !Ty->isReferenceType()) return false; @@ -199,773 +107,6 @@ bool CIRGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) { return true; } -CIRGenModule::AutoVarEmission -CIRGenModule::buildAutoVarAlloca(const VarDecl &D) { - QualType Ty = D.getType(); - // TODO: (|| Ty.getAddressSpace() == LangAS::opencl_private && - // getLangOpts().OpenCL)) - assert(Ty.getAddressSpace() == LangAS::Default); - - assert(!D.isEscapingByref() && "not implemented"); - assert(!Ty->isVariablyModifiedType() && "not implemented"); - assert(!astCtx.getLangOpts().OpenMP && // !CGM.getLangOpts().OpenMPIRBuilder - "not implemented"); - bool NRVO = astCtx.getLangOpts().ElideConstructors && D.isNRVOVariable(); - assert(!NRVO && "not implemented"); - assert(Ty->isConstantSizeType() && "not implemented"); - assert(!D.hasAttr() && "not implemented"); - - AutoVarEmission emission(D); - CharUnits alignment = astCtx.getDeclAlign(&D); - // TODO: debug info - // TODO: use CXXABI - - // If this value is an array or struct with a statically determinable - // constant initializer, there are optimizations we can do. - // - // TODO: We should constant-evaluate the initializer of any variable, - // as long as it is initialized by a constant expression. Currently, - // isConstantInitializer produces wrong answers for structs with - // reference or bitfield members, and a few other cases, and checking - // for POD-ness protects us from some of these. - if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) && - (D.isConstexpr() || - ((Ty.isPODType(astCtx) || - astCtx.getBaseElementType(Ty)->isObjCObjectPointerType()) && - D.getInit()->isConstantInitializer(astCtx, false)))) { - - // If the variable's a const type, and it's neither an NRVO - // candidate nor a __block variable and has no mutable members, - // emit it as a global instead. - // Exception is if a variable is located in non-constant address space - // in OpenCL. - // TODO: deal with CGM.getCodeGenOpts().MergeAllConstants - // TODO: perhaps we don't need this at all at CIR since this can - // be done as part of lowering down to LLVM. - if ((!astCtx.getLangOpts().OpenCL || - Ty.getAddressSpace() == LangAS::opencl_constant) && - (!NRVO && !D.isEscapingByref() && isTypeConstant(Ty, true))) - assert(0 && "not implemented"); - - // Otherwise, tell the initialization code that we're in this case. - emission.IsConstantAggregate = true; - } - - // TODO: track source location range... - mlir::Value addr; - if (failed(declare(&D, Ty, getLoc(D.getSourceRange()), alignment, addr))) { - theModule.emitError("Cannot declare variable"); - return emission; - } - - // TODO: what about emitting lifetime markers for MSVC catch parameters? - // TODO: something like @llvm.lifetime.start/end here? revisit this later. - emission.Addr = Address{addr, alignment}; - return emission; -} - -/// Determine whether the given initializer is trivial in the sense -/// that it requires no code to be generated. -bool CIRGenModule::isTrivialInitializer(const Expr *Init) { - if (!Init) - return true; - - if (const CXXConstructExpr *Construct = dyn_cast(Init)) - if (CXXConstructorDecl *Constructor = Construct->getConstructor()) - if (Constructor->isTrivial() && Constructor->isDefaultConstructor() && - !Construct->requiresZeroInitialization()) - return true; - - return false; -} - -// TODO: this can also be abstrated into common AST helpers -bool CIRGenModule::hasBooleanRepresentation(QualType Ty) { - - if (Ty->isBooleanType()) - return true; - - if (const EnumType *ET = Ty->getAs()) - return ET->getDecl()->getIntegerType()->isBooleanType(); - - if (const AtomicType *AT = Ty->getAs()) - return hasBooleanRepresentation(AT->getValueType()); - - return false; -} - -mlir::Value CIRGenModule::buildToMemory(mlir::Value Value, QualType Ty) { - // Bool has a different representation in memory than in registers. - return Value; -} - -void CIRGenModule::buildStoreOfScalar(mlir::Value value, LValue lvalue, - const Decl *InitDecl) { - // TODO: constant matrix type, volatile, non temporal, TBAA - buildStoreOfScalar(value, lvalue.getAddress(), false, lvalue.getType(), - lvalue.getBaseInfo(), InitDecl, false); -} - -void CIRGenModule::buildStoreOfScalar(mlir::Value Value, Address Addr, - bool Volatile, QualType Ty, - LValueBaseInfo BaseInfo, - const Decl *InitDecl, - bool isNontemporal) { - // TODO: PreserveVec3Type - // TODO: LValueIsSuitableForInlineAtomic ? - // TODO: TBAA - Value = buildToMemory(Value, Ty); - if (Ty->isAtomicType() || isNontemporal) { - assert(0 && "not implemented"); - } - - // Update the alloca with more info on initialization. - auto SrcAlloca = - dyn_cast_or_null(Addr.getPointer().getDefiningOp()); - if (InitDecl) { - InitStyle IS; - const VarDecl *VD = dyn_cast_or_null(InitDecl); - assert(VD && "VarDecl expected"); - if (VD->hasInit()) { - switch (VD->getInitStyle()) { - case VarDecl::ParenListInit: - llvm_unreachable("NYI"); - case VarDecl::CInit: - IS = InitStyle::cinit; - break; - case VarDecl::CallInit: - IS = InitStyle::callinit; - break; - case VarDecl::ListInit: - IS = InitStyle::listinit; - break; - } - SrcAlloca.setInitAttr(InitStyleAttr::get(builder.getContext(), IS)); - } - } - assert(currSrcLoc && "must pass in source location"); - builder.create(*currSrcLoc, Value, Addr.getPointer()); -} - -void CIRGenModule::buldStoreThroughLValue(RValue Src, LValue Dst, - const Decl *InitDecl) { - assert(Dst.isSimple() && "only implemented simple"); - // TODO: ObjC lifetime. - assert(Src.isScalar() && "Can't emit an agg store with this method"); - buildStoreOfScalar(Src.getScalarVal(), Dst, InitDecl); -} - -void CIRGenModule::buildScalarInit(const Expr *init, const ValueDecl *D, - LValue lvalue) { - // TODO: this is where a lot of ObjC lifetime stuff would be done. - mlir::Value value = buildScalarExpr(init); - SourceLocRAIIObject Loc{*this, getLoc(D->getSourceRange())}; - buldStoreThroughLValue(RValue::get(value), lvalue, D); - return; -} - -void CIRGenModule::buildExprAsInit(const Expr *init, const ValueDecl *D, - LValue lvalue) { - QualType type = D->getType(); - - if (type->isReferenceType()) { - assert(0 && "not implemented"); - return; - } - switch (CIRGenFunction::getEvaluationKind(type)) { - case TEK_Scalar: - buildScalarInit(init, D, lvalue); - return; - case TEK_Complex: { - assert(0 && "not implemented"); - return; - } - case TEK_Aggregate: - assert(0 && "not implemented"); - return; - } - llvm_unreachable("bad evaluation kind"); -} - -void CIRGenModule::buildAutoVarInit(const AutoVarEmission &emission) { - assert(emission.Variable && "emission was not valid!"); - - const VarDecl &D = *emission.Variable; - QualType type = D.getType(); - - // If this local has an initializer, emit it now. - const Expr *Init = D.getInit(); - - // TODO: in LLVM codegen if we are at an unreachable point, the initializer - // isn't emitted unless it contains a label. What we want for CIR? - assert(builder.getInsertionBlock()); - - // Initialize the variable here if it doesn't have a initializer and it is a - // C struct that is non-trivial to initialize or an array containing such a - // struct. - if (!Init && type.isNonTrivialToPrimitiveDefaultInitialize() == - QualType::PDIK_Struct) { - assert(0 && "not implemented"); - return; - } - - const Address Loc = emission.Addr; - - // Note: constexpr already initializes everything correctly. - LangOptions::TrivialAutoVarInitKind trivialAutoVarInit = - (D.isConstexpr() - ? LangOptions::TrivialAutoVarInitKind::Uninitialized - : (D.getAttr() - ? LangOptions::TrivialAutoVarInitKind::Uninitialized - : astCtx.getLangOpts().getTrivialAutoVarInit())); - - auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) { - if (trivialAutoVarInit == - LangOptions::TrivialAutoVarInitKind::Uninitialized) - return; - - assert(0 && "unimplemented"); - }; - - if (isTrivialInitializer(Init)) - return initializeWhatIsTechnicallyUninitialized(Loc); - - if (emission.IsConstantAggregate || - D.mightBeUsableInConstantExpressions(astCtx)) { - assert(0 && "not implemented"); - } - - initializeWhatIsTechnicallyUninitialized(Loc); - LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); - return buildExprAsInit(Init, &D, lv); -} - -void CIRGenModule::buildAutoVarCleanups(const AutoVarEmission &emission) { - assert(emission.Variable && "emission was not valid!"); - - // TODO: in LLVM codegen if we are at an unreachable point codgen - // is ignored. What we want for CIR? - assert(builder.getInsertionBlock()); - const VarDecl &D = *emission.Variable; - - // Check the type for a cleanup. - // TODO: something like emitAutoVarTypeCleanup - if (QualType::DestructionKind dtorKind = D.needsDestruction(astCtx)) - assert(0 && "not implemented"); - - // In GC mode, honor objc_precise_lifetime. - if (astCtx.getLangOpts().getGC() != LangOptions::NonGC && - D.hasAttr()) - assert(0 && "not implemented"); - - // Handle the cleanup attribute. - if (const CleanupAttr *CA = D.getAttr()) - assert(0 && "not implemented"); - - // TODO: handle block variable -} - -/// Emit code and set up symbol table for a variable declaration with auto, -/// register, or no storage class specifier. These turn into simple stack -/// objects, globals depending on target. -void CIRGenModule::buildAutoVarDecl(const VarDecl &D) { - AutoVarEmission emission = buildAutoVarAlloca(D); - buildAutoVarInit(emission); - buildAutoVarCleanups(emission); -} - -void CIRGenModule::buildVarDecl(const VarDecl &D) { - if (D.hasExternalStorage()) { - assert(0 && "should we just returns is there something to track?"); - // Don't emit it now, allow it to be emitted lazily on its first use. - return; - } - - // Some function-scope variable does not have static storage but still - // needs to be emitted like a static variable, e.g. a function-scope - // variable in constant address space in OpenCL. - if (D.getStorageDuration() != SD_Automatic) - assert(0 && "not implemented"); - - if (D.getType().getAddressSpace() == LangAS::opencl_local) - assert(0 && "not implemented"); - - assert(D.hasLocalStorage()); - return buildAutoVarDecl(D); -} - -void CIRGenModule::buildDecl(const Decl &D) { - switch (D.getKind()) { - case Decl::ImplicitConceptSpecialization: - case Decl::TopLevelStmt: - case Decl::HLSLBuffer: - case Decl::UnnamedGlobalConstant: - llvm_unreachable("NYI"); - case Decl::BuiltinTemplate: - case Decl::TranslationUnit: - case Decl::ExternCContext: - case Decl::Namespace: - case Decl::UnresolvedUsingTypename: - case Decl::ClassTemplateSpecialization: - case Decl::ClassTemplatePartialSpecialization: - case Decl::VarTemplateSpecialization: - case Decl::VarTemplatePartialSpecialization: - case Decl::TemplateTypeParm: - case Decl::UnresolvedUsingValue: - case Decl::NonTypeTemplateParm: - case Decl::CXXDeductionGuide: - case Decl::CXXMethod: - case Decl::CXXConstructor: - case Decl::CXXDestructor: - case Decl::CXXConversion: - case Decl::Field: - case Decl::MSProperty: - case Decl::IndirectField: - case Decl::ObjCIvar: - case Decl::ObjCAtDefsField: - case Decl::ParmVar: - case Decl::ImplicitParam: - case Decl::ClassTemplate: - case Decl::VarTemplate: - case Decl::FunctionTemplate: - case Decl::TypeAliasTemplate: - case Decl::TemplateTemplateParm: - case Decl::ObjCMethod: - case Decl::ObjCCategory: - case Decl::ObjCProtocol: - case Decl::ObjCInterface: - case Decl::ObjCCategoryImpl: - case Decl::ObjCImplementation: - case Decl::ObjCProperty: - case Decl::ObjCCompatibleAlias: - case Decl::PragmaComment: - case Decl::PragmaDetectMismatch: - case Decl::AccessSpec: - case Decl::LinkageSpec: - case Decl::Export: - case Decl::ObjCPropertyImpl: - case Decl::FileScopeAsm: - case Decl::Friend: - case Decl::FriendTemplate: - case Decl::Block: - case Decl::Captured: - case Decl::UsingShadow: - case Decl::ConstructorUsingShadow: - case Decl::ObjCTypeParam: - case Decl::Binding: - case Decl::UnresolvedUsingIfExists: - llvm_unreachable("Declaration should not be in declstmts!"); - case Decl::Record: // struct/union/class X; - case Decl::CXXRecord: // struct/union/class X; [C++] - assert(0 && "Not implemented"); - return; - case Decl::Enum: // enum X; - assert(0 && "Not implemented"); - return; - case Decl::Function: // void X(); - case Decl::EnumConstant: // enum ? { X = ? } - case Decl::StaticAssert: // static_assert(X, ""); [C++0x] - case Decl::Label: // __label__ x; - case Decl::Import: - case Decl::MSGuid: // __declspec(uuid("...")) - case Decl::TemplateParamObject: - case Decl::OMPThreadPrivate: - case Decl::OMPAllocate: - case Decl::OMPCapturedExpr: - case Decl::OMPRequires: - case Decl::Empty: - case Decl::Concept: - case Decl::LifetimeExtendedTemporary: - case Decl::RequiresExprBody: - // None of these decls require codegen support. - return; - - case Decl::NamespaceAlias: - assert(0 && "Not implemented"); - return; - case Decl::Using: // using X; [C++] - assert(0 && "Not implemented"); - return; - case Decl::UsingEnum: // using enum X; [C++] - assert(0 && "Not implemented"); - return; - case Decl::UsingPack: - assert(0 && "Not implemented"); - return; - case Decl::UsingDirective: // using namespace X; [C++] - assert(0 && "Not implemented"); - return; - case Decl::Var: - case Decl::Decomposition: { - const VarDecl &VD = cast(D); - assert(VD.isLocalVarDecl() && - "Should not see file-scope variables inside a function!"); - buildVarDecl(VD); - if (auto *DD = dyn_cast(&VD)) - assert(0 && "Not implemented"); - - // FIXME: add this - // if (auto *DD = dyn_cast(&VD)) - // for (auto *B : DD->bindings()) - // if (auto *HD = B->getHoldingVar()) - // EmitVarDecl(*HD); - return; - } - - case Decl::OMPDeclareReduction: - case Decl::OMPDeclareMapper: - assert(0 && "Not implemented"); - - case Decl::Typedef: // typedef int X; - case Decl::TypeAlias: { // using X = int; [C++0x] - assert(0 && "Not implemented"); - } - } -} - -mlir::LogicalResult CIRGenModule::buildReturnStmt(const ReturnStmt &S) { - assert(!(astCtx.getLangOpts().ElideConstructors && S.getNRVOCandidate() && - S.getNRVOCandidate()->isNRVOVariable()) && - "unimplemented"); - assert(!CurCGF->FnRetQualTy->isReferenceType() && "unimplemented"); - auto loc = getLoc(S.getSourceRange()); - - // Emit the result value, even if unused, to evaluate the side effects. - const Expr *RV = S.getRetValue(); - if (RV) { - assert(!isa(RV) && "unimplemented"); - - mlir::Value V = nullptr; - switch (CIRGenFunction::getEvaluationKind(RV->getType())) { - case TEK_Scalar: - V = buildScalarExpr(RV); - builder.create(loc, V, *CurCGF->FnRetAlloca); - break; - case TEK_Complex: - case TEK_Aggregate: - llvm::errs() << "ReturnStmt EvaluationKind not implemented\n"; - return mlir::failure(); - } - - // Otherwise, this return operation has zero operands. - if (!V || (RV && RV->getType()->isVoidType())) { - // FIXME: evaluate for side effects. - } - } else { - // Do nothing (return value is left uninitialized), this is also - // the path when returning from void functions. - } - - // Create a new return block (if not existent) and add a branch to - // it. The actual return instruction is only inserted during current - // scope cleanup handling. - auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); - builder.create(loc, retBlock); - - // Insert the new block to continue codegen after branch to ret block. - builder.createBlock(builder.getBlock()->getParent()); - return mlir::success(); -} - -mlir::LogicalResult CIRGenModule::buildDeclStmt(const DeclStmt &S) { - if (!builder.getInsertionBlock()) { - theModule.emitError( - "Seems like this is unreachable code, what should we do?"); - return mlir::failure(); - } - - for (const auto *I : S.decls()) { - buildDecl(*I); - } - - return mlir::success(); -} - -/// Build a unconditional branch to the lexical scope cleanup block -/// or with the labeled blocked if already solved. -/// -/// Track on scope basis, goto's we need to fix later. -mlir::LogicalResult -CIRGenModule::buildBranchThroughCleanup(JumpDest &Dest, LabelDecl *L, - mlir::Location Loc) { - // Remove this once we go for making sure unreachable code is - // well modeled (or not). - assert(builder.getInsertionBlock() && "not yet implemented"); - - // Insert a branch: to the cleanup block (unsolved) or to the already - // materialized label. Keep track of unsolved goto's. - mlir::Block *DstBlock = Dest.getBlock(); - auto G = builder.create( - Loc, Dest.isValid() ? DstBlock - : currLexScope->getOrCreateCleanupBlock(builder)); - if (!Dest.isValid()) - currLexScope->PendingGotos.push_back(std::make_pair(G, L)); - - return mlir::success(); -} - -/// All scope related cleanup needed: -/// - Patching up unsolved goto's. -/// - Build all cleanup code and insert yield/returns. -void CIRGenModule::LexicalScopeGuard::cleanup() { - auto &builder = CGM.builder; - auto *localScope = CGM.currLexScope; - - // Handle pending gotos and the solved labels in this scope. - while (!localScope->PendingGotos.empty()) { - auto gotoInfo = localScope->PendingGotos.back(); - // FIXME: Currently only support resolving goto labels inside the - // same lexical ecope. - assert(localScope->SolvedLabels.count(gotoInfo.second) && - "goto across scopes not yet supported"); - - // The goto in this lexical context actually maps to a basic - // block. - auto g = cast(gotoInfo.first); - g.setSuccessor(CGM.LabelMap[gotoInfo.second].getBlock()); - localScope->PendingGotos.pop_back(); - } - localScope->SolvedLabels.clear(); - - // Cleanup are done right before codegen resume a scope. This is where - // objects are destroyed. - unsigned curLoc = 0; - for (auto *retBlock : localScope->getRetBlocks()) { - mlir::OpBuilder::InsertionGuard guard(builder); - builder.setInsertionPointToEnd(retBlock); - mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; - curLoc++; - - // TODO: insert actual scope cleanup HERE (dtors and etc) - - // If there's anything to return, load it first. - if (CGM.CurCGF->FnRetTy.has_value()) { - auto val = builder.create(retLoc, *CGM.CurCGF->FnRetTy, - *CGM.CurCGF->FnRetAlloca); - builder.create(retLoc, ArrayRef(val.getResult())); - } else { - builder.create(retLoc); - } - } - - auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { - mlir::OpBuilder::InsertionGuard guard(builder); - builder.setInsertionPointToEnd(InsPt); - // TODO: insert actual scope cleanup (dtors and etc) - if (localScope->Depth != 0) // end of any local scope != function - builder.create(localScope->EndLoc); - else - builder.create(localScope->EndLoc); - }; - - // If a cleanup block has been created at some point, branch to it - // and set the insertion point to continue at the cleanup block. - // Terminators are then inserted either in the cleanup block or - // inline in this current block. - auto *cleanupBlock = localScope->getCleanupBlock(builder); - if (cleanupBlock) - insertCleanupAndLeave(cleanupBlock); - - // Now deal with any pending block wrap up like implicit end of - // scope. - - // If a terminator is already present in the current block, nothing - // else to do here. - bool entryBlock = builder.getInsertionBlock()->isEntryBlock(); - auto *currBlock = builder.getBlock(); - bool hasTerminator = - !currBlock->empty() && - currBlock->back().hasTrait(); - if (hasTerminator) - return; - - // An empty non-entry block has nothing to offer. - if (!entryBlock && currBlock->empty()) { - currBlock->erase(); - return; - } - - // If there's a cleanup block, branch to it, nothing else to do. - if (cleanupBlock) { - builder.create(currBlock->back().getLoc(), cleanupBlock); - return; - } - - // No pre-existent cleanup block, emit cleanup code and yield/return. - insertCleanupAndLeave(currBlock); -} - -mlir::LogicalResult CIRGenModule::buildGotoStmt(const GotoStmt &S) { - // FIXME: LLVM codegen inserts emit stop point here for debug info - // sake when the insertion point is available, but doesn't do - // anything special when there isn't. We haven't implemented debug - // info support just yet, look at this again once we have it. - assert(builder.getInsertionBlock() && "not yet implemented"); - - // A goto marks the end of a block, create a new one for codegen after - // buildGotoStmt can resume building in that block. - - // Build a cir.br to the target label. - auto &JD = LabelMap[S.getLabel()]; - if (buildBranchThroughCleanup(JD, S.getLabel(), getLoc(S.getSourceRange())) - .failed()) - return mlir::failure(); - - // Insert the new block to continue codegen after goto. - builder.createBlock(builder.getBlock()->getParent()); - - // What here... - return mlir::success(); -} - -mlir::LogicalResult CIRGenModule::buildLabel(const LabelDecl *D) { - JumpDest &Dest = LabelMap[D]; - - // Create a new block to tag with a label and add a branch from - // the current one to it. If the block is empty just call attach it - // to this label. - mlir::Block *currBlock = builder.getBlock(); - mlir::Block *labelBlock = currBlock; - if (!currBlock->empty()) { - - { - mlir::OpBuilder::InsertionGuard guard(builder); - labelBlock = builder.createBlock(builder.getBlock()->getParent()); - } - - builder.create(getLoc(D->getSourceRange()), labelBlock); - builder.setInsertionPointToEnd(labelBlock); - } - - if (!Dest.isValid()) { - Dest.Block = labelBlock; - currLexScope->SolvedLabels.insert(D); - // FIXME: add a label attribute to block... - } else { - assert(0 && "unimplemented"); - } - - // FIXME: emit debug info for labels, incrementProfileCounter - return mlir::success(); -} - -mlir::LogicalResult CIRGenModule::buildLabelStmt(const clang::LabelStmt &S) { - if (buildLabel(S.getDecl()).failed()) - return mlir::failure(); - - // IsEHa: not implemented. - assert(!(astCtx.getLangOpts().EHAsynch && S.isSideEntry())); - - return buildStmt(S.getSubStmt(), /* useCurrentScope */ true); -} - -mlir::LogicalResult CIRGenModule::buildSimpleStmt(const Stmt *S, - bool useCurrentScope) { - switch (S->getStmtClass()) { - default: - return mlir::failure(); - case Stmt::DeclStmtClass: - return buildDeclStmt(cast(*S)); - case Stmt::CompoundStmtClass: - return useCurrentScope - ? buildCompoundStmtWithoutScope(cast(*S)) - : buildCompoundStmt(cast(*S)); - case Stmt::ReturnStmtClass: - return buildReturnStmt(cast(*S)); - case Stmt::GotoStmtClass: - return buildGotoStmt(cast(*S)); - - case Stmt::NullStmtClass: - break; - - case Stmt::LabelStmtClass: - return buildLabelStmt(cast(*S)); - - case Stmt::CaseStmtClass: - case Stmt::DefaultStmtClass: - assert(0 && - "Should not get here, currently handled directly from SwitchStmt"); - break; - - case Stmt::BreakStmtClass: - return buildBreakStmt(cast(*S)); - - case Stmt::AttributedStmtClass: - case Stmt::ContinueStmtClass: - case Stmt::SEHLeaveStmtClass: - llvm::errs() << "CIR codegen for '" << S->getStmtClassName() - << "' not implemented\n"; - assert(0 && "not implemented"); - } - - return mlir::success(); -} - -LValue CIRGenModule::buildDeclRefLValue(const DeclRefExpr *E) { - const NamedDecl *ND = E->getDecl(); - - assert(E->isNonOdrUse() != NOUR_Unevaluated && - "should not emit an unevaluated operand"); - - if (const auto *VD = dyn_cast(ND)) { - // Global Named registers access via intrinsics only - assert(VD->getStorageClass() != SC_Register && "not implemented"); - assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); - assert(!E->refersToEnclosingVariableOrCapture() && "not implemented"); - assert(!(VD->hasLinkage() || VD->isStaticDataMember()) && - "not implemented"); - assert(!VD->isEscapingByref() && "not implemented"); - assert(!VD->getType()->isReferenceType() && "not implemented"); - assert(symbolTable.count(VD) && "should be already mapped"); - - mlir::Value V = symbolTable.lookup(VD); - assert(V && "Name lookup must succeed"); - - LValue LV = LValue::makeAddr(Address(V, CharUnits::fromQuantity(4)), - VD->getType(), AlignmentSource::Decl); - return LV; - } - - llvm_unreachable("Unhandled DeclRefExpr?"); -} - -LValue CIRGenModule::buildBinaryOperatorLValue(const BinaryOperator *E) { - // Comma expressions just emit their LHS then their RHS as an l-value. - if (E->getOpcode() == BO_Comma) { - assert(0 && "not implemented"); - } - - if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) - assert(0 && "not implemented"); - - assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); - - // Note that in all of these cases, __block variables need the RHS - // evaluated first just in case the variable gets moved by the RHS. - - switch (CIRGenFunction::getEvaluationKind(E->getType())) { - case TEK_Scalar: { - assert(E->getLHS()->getType().getObjCLifetime() == - clang::Qualifiers::ObjCLifetime::OCL_None && - "not implemented"); - - RValue RV = CurCGF->buildAnyExpr(E->getRHS()); - LValue LV = buildLValue(E->getLHS()); - - SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; - buldStoreThroughLValue(RV, LV, nullptr /*InitDecl*/); - assert(!astCtx.getLangOpts().OpenMP && "last priv cond not implemented"); - return LV; - } - - case TEK_Complex: - assert(0 && "not implemented"); - case TEK_Aggregate: - assert(0 && "not implemented"); - } - llvm_unreachable("bad evaluation kind"); -} - /// FIXME: this could likely be a common helper and not necessarily related /// with codegen. /// Return the best known alignment for an unknown pointer to a @@ -1059,968 +200,6 @@ CharUnits CIRGenModule::getNaturalTypeAlignment(QualType T, return Alignment; } -/// Given an expression of pointer type, try to -/// derive a more accurate bound on the alignment of the pointer. -Address CIRGenModule::buildPointerWithAlignment(const Expr *E, - LValueBaseInfo *BaseInfo) { - // We allow this with ObjC object pointers because of fragile ABIs. - assert(E->getType()->isPointerType() || - E->getType()->isObjCObjectPointerType()); - E = E->IgnoreParens(); - - // Casts: - if (const CastExpr *CE = dyn_cast(E)) { - if (const auto *ECE = dyn_cast(CE)) - assert(0 && "not implemented"); - - switch (CE->getCastKind()) { - default: - assert(0 && "not implemented"); - // Nothing to do here... - case CK_LValueToRValue: - break; - } - } - - // Unary &. - if (const UnaryOperator *UO = dyn_cast(E)) { - assert(0 && "not implemented"); - // if (UO->getOpcode() == UO_AddrOf) { - // LValue LV = buildLValue(UO->getSubExpr()); - // if (BaseInfo) - // *BaseInfo = LV.getBaseInfo(); - // // TODO: TBBA info - // return LV.getAddress(); - // } - } - - // TODO: conditional operators, comma. - // Otherwise, use the alignment of the type. - CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), BaseInfo); - return Address(buildScalarExpr(E), Align); -} - -LValue CIRGenModule::buildUnaryOpLValue(const UnaryOperator *E) { - // __extension__ doesn't affect lvalue-ness. - assert(E->getOpcode() != UO_Extension && "not implemented"); - - switch (E->getOpcode()) { - default: - llvm_unreachable("Unknown unary operator lvalue!"); - case UO_Deref: { - QualType T = E->getSubExpr()->getType()->getPointeeType(); - assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); - - LValueBaseInfo BaseInfo; - // TODO: add TBAAInfo - Address Addr = buildPointerWithAlignment(E->getSubExpr(), &BaseInfo); - - // Tag 'load' with deref attribute. - if (auto loadOp = - dyn_cast<::mlir::cir::LoadOp>(Addr.getPointer().getDefiningOp())) { - loadOp.setIsDerefAttr(mlir::UnitAttr::get(builder.getContext())); - } - - LValue LV = LValue::makeAddr(Addr, T, BaseInfo); - // TODO: set addr space - // TODO: ObjC/GC/__weak write barrier stuff. - return LV; - } - case UO_Real: - case UO_Imag: { - assert(0 && "not implemented"); - } - case UO_PreInc: - case UO_PreDec: { - assert(0 && "not implemented"); - } - } -} - -/// Emit code to compute a designator that specifies the location -/// of the expression. -/// FIXME: document this function better. -LValue CIRGenModule::buildLValue(const Expr *E) { - // FIXME: ApplyDebugLocation DL(*this, E); - switch (E->getStmtClass()) { - default: { - emitError(getLoc(E->getExprLoc()), "l-value not implemented for '") - << E->getStmtClassName() << "'"; - assert(0 && "not implemented"); - } - case Expr::BinaryOperatorClass: - return buildBinaryOperatorLValue(cast(E)); - case Expr::DeclRefExprClass: - return buildDeclRefLValue(cast(E)); - case Expr::UnaryOperatorClass: - return buildUnaryOpLValue(cast(E)); - case Expr::ObjCPropertyRefExprClass: - llvm_unreachable("cannot emit a property reference directly"); - } - - return LValue::makeAddr(Address::invalid(), E->getType()); -} - -/// EmitIgnoredExpr - Emit code to compute the specified expression, -/// ignoring the result. -void CIRGenModule::buildIgnoredExpr(const Expr *E) { - if (E->isPRValue()) - return (void)CurCGF->buildAnyExpr(E); - - // Just emit it as an l-value and drop the result. - buildLValue(E); -} - -/// If the specified expression does not fold -/// to a constant, or if it does but contains a label, return false. If it -/// constant folds return true and set the boolean result in Result. -bool CIRGenModule::ConstantFoldsToSimpleInteger(const Expr *Cond, - bool &ResultBool, - bool AllowLabels) { - llvm::APSInt ResultInt; - if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) - return false; - - ResultBool = ResultInt.getBoolValue(); - return true; -} - -/// Return true if the statement contains a label in it. If -/// this statement is not executed normally, it not containing a label means -/// that we can just remove the code. -bool CIRGenModule::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { - // Null statement, not a label! - if (!S) - return false; - - // If this is a label, we have to emit the code, consider something like: - // if (0) { ... foo: bar(); } goto foo; - // - // TODO: If anyone cared, we could track __label__'s, since we know that you - // can't jump to one from outside their declared region. - if (isa(S)) - return true; - - // If this is a case/default statement, and we haven't seen a switch, we - // have to emit the code. - if (isa(S) && !IgnoreCaseStmts) - return true; - - // If this is a switch statement, we want to ignore cases below it. - if (isa(S)) - IgnoreCaseStmts = true; - - // Scan subexpressions for verboten labels. - for (const Stmt *SubStmt : S->children()) - if (ContainsLabel(SubStmt, IgnoreCaseStmts)) - return true; - - return false; -} - -/// If the specified expression does not fold -/// to a constant, or if it does but contains a label, return false. If it -/// constant folds return true and set the folded value. -bool CIRGenModule::ConstantFoldsToSimpleInteger(const Expr *Cond, - llvm::APSInt &ResultInt, - bool AllowLabels) { - // FIXME: Rename and handle conversion of other evaluatable things - // to bool. - Expr::EvalResult Result; - if (!Cond->EvaluateAsInt(Result, astCtx)) - return false; // Not foldable, not integer or not fully evaluatable. - - llvm::APSInt Int = Result.Val.getInt(); - if (!AllowLabels && ContainsLabel(Cond)) - return false; // Contains a label. - - ResultInt = Int; - return true; -} - -/// Perform the usual unary conversions on the specified -/// expression and compare the result against zero, returning an Int1Ty value. -mlir::Value CIRGenModule::evaluateExprAsBool(const Expr *E) { - // TODO: PGO - if (const MemberPointerType *MPT = E->getType()->getAs()) { - assert(0 && "not implemented"); - } - - QualType BoolTy = astCtx.BoolTy; - SourceLocation Loc = E->getExprLoc(); - // TODO: CGFPOptionsRAII for FP stuff. - assert(!E->getType()->isAnyComplexType() && - "complex to scalar not implemented"); - return buildScalarConversion(buildScalarExpr(E), E->getType(), BoolTy, Loc); -} - -/// Emit an if on a boolean condition to the specified blocks. -/// FIXME: Based on the condition, this might try to simplify the codegen of -/// the conditional based on the branch. TrueCount should be the number of -/// times we expect the condition to evaluate to true based on PGO data. We -/// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr -/// for extra ideas). -mlir::LogicalResult CIRGenModule::buildIfOnBoolExpr(const Expr *cond, - mlir::Location loc, - const Stmt *thenS, - const Stmt *elseS) { - // TODO: scoped ApplyDebugLocation DL(*this, Cond); - // TODO: __builtin_unpredictable and profile counts? - cond = cond->IgnoreParens(); - mlir::Value condV = evaluateExprAsBool(cond); - mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); - - builder.create( - loc, condV, elseS, - /*thenBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - // FIXME: abstract all this massive location handling elsewhere. - SmallVector locs; - if (loc.isa()) { - locs.push_back(loc); - locs.push_back(loc); - } else if (loc.isa()) { - auto fusedLoc = loc.cast(); - locs.push_back(fusedLoc.getLocations()[0]); - locs.push_back(fusedLoc.getLocations()[1]); - } - LexicalScopeContext lexScope{locs[0], locs[1], - builder.getInsertionBlock()}; - LexicalScopeGuard lexThenGuard{*this, &lexScope}; - resThen = buildStmt(thenS, /*useCurrentScope=*/true); - }, - /*elseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto locBegin = fusedLoc.getLocations()[2]; - auto locEnd = fusedLoc.getLocations()[3]; - LexicalScopeContext lexScope{locBegin, locEnd, - builder.getInsertionBlock()}; - LexicalScopeGuard lexElseGuard{*this, &lexScope}; - resElse = buildStmt(elseS, /*useCurrentScope=*/true); - }); - - return mlir::LogicalResult::success(resThen.succeeded() && - resElse.succeeded()); -} - -static mlir::Location getIfLocs(CIRGenModule &CGM, const clang::Stmt *thenS, - const clang::Stmt *elseS) { - // Attempt to be more accurate as possible with IfOp location, generate - // one fused location that has either 2 or 4 total locations, depending - // on else's availability. - SmallVector ifLocs; - mlir::Attribute metadata; - - clang::SourceRange t = thenS->getSourceRange(); - ifLocs.push_back(CGM.getLoc(t.getBegin())); - ifLocs.push_back(CGM.getLoc(t.getEnd())); - if (elseS) { - clang::SourceRange e = elseS->getSourceRange(); - ifLocs.push_back(CGM.getLoc(e.getBegin())); - ifLocs.push_back(CGM.getLoc(e.getEnd())); - } - - return mlir::FusedLoc::get(ifLocs, metadata, CGM.getBuilder().getContext()); -} - -mlir::LogicalResult CIRGenModule::buildBreakStmt(const clang::BreakStmt &S) { - builder.create( - getLoc(S.getBreakLoc()), - mlir::cir::YieldOpKindAttr::get(builder.getContext(), - mlir::cir::YieldOpKind::Break), - mlir::ValueRange({})); - return mlir::success(); -} - -mlir::LogicalResult CIRGenModule::buildDefaultStmt(const DefaultStmt &S, - mlir::Type condType, - CaseAttr &caseEntry) { - auto res = mlir::success(); - auto *ctx = builder.getContext(); - caseEntry = mlir::cir::CaseAttr::get( - ctx, builder.getArrayAttr({}), - CaseOpKindAttr::get(ctx, mlir::cir::CaseOpKind::Default)); - { - mlir::OpBuilder::InsertionGuard guardCase(builder); - res = buildStmt(S.getSubStmt(), - /*useCurrentScope=*/!isa(S.getSubStmt())); - } - - // TODO: likelihood - return res; -} - -mlir::LogicalResult CIRGenModule::buildCaseStmt(const CaseStmt &S, - mlir::Type condType, - CaseAttr &caseEntry) { - assert((!S.getRHS() || !S.caseStmtIsGNURange()) && - "case ranges not implemented"); - auto res = mlir::success(); - - const CaseStmt *caseStmt = &S; - SmallVector caseEltValueListAttr; - // Fold cascading cases whenever possible to simplify codegen a bit. - while (true) { - auto intVal = caseStmt->getLHS()->EvaluateKnownConstInt(getASTContext()); - caseEltValueListAttr.push_back(mlir::IntegerAttr::get(condType, intVal)); - if (isa(caseStmt->getSubStmt())) - caseStmt = dyn_cast_or_null(caseStmt->getSubStmt()); - else - break; - } - - auto caseValueList = builder.getArrayAttr(caseEltValueListAttr); - - auto *ctx = builder.getContext(); - caseEntry = mlir::cir::CaseAttr::get( - ctx, caseValueList, - CaseOpKindAttr::get(ctx, caseEltValueListAttr.size() > 1 - ? mlir::cir::CaseOpKind::Anyof - : mlir::cir::CaseOpKind::Equal)); - { - mlir::OpBuilder::InsertionGuard guardCase(builder); - res = buildStmt( - caseStmt->getSubStmt(), - /*useCurrentScope=*/!isa(caseStmt->getSubStmt())); - } - - // TODO: likelihood - return res; -} - -mlir::LogicalResult CIRGenModule::buildSwitchStmt(const SwitchStmt &S) { - // TODO: LLVM codegen does some early optimization to fold the condition and - // only emit live cases. CIR should use MLIR to achieve similar things, - // nothing to be done here. - // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))... - - auto res = mlir::success(); - SwitchOp swop; - - auto switchStmtBuilder = [&]() -> mlir::LogicalResult { - if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) - return mlir::failure(); - - if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); - - mlir::Value condV = buildScalarExpr(S.getCond()); - - // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts()) - // TODO: if the switch has a condition wrapped by __builtin_unpredictable? - - // FIXME: track switch to handle nested stmts. - swop = builder.create( - getLoc(S.getBeginLoc()), condV, - /*switchBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { - auto *cs = dyn_cast(S.getBody()); - assert(cs && "expected compound stmt"); - SmallVector caseAttrs; - - currLexScope->setAsSwitch(); - mlir::Block *lastCaseBlock = nullptr; - for (auto *c : cs->body()) { - bool caseLike = isa(c); - if (!caseLike) { - // This means it's a random stmt following up a case, just - // emit it as part of previous known case. - assert(lastCaseBlock && "expects pre-existing case block"); - mlir::OpBuilder::InsertionGuard guardCase(builder); - builder.setInsertionPointToEnd(lastCaseBlock); - res = buildStmt(c, /*useCurrentScope=*/!isa(c)); - if (res.failed()) - break; - continue; - } - - auto *caseStmt = dyn_cast(c); - CaseAttr caseAttr; - { - mlir::OpBuilder::InsertionGuard guardCase(builder); - - // Update scope information with the current region we are - // emitting code for. This is useful to allow return blocks to be - // automatically and properly placed during cleanup. - mlir::Region *caseRegion = os.addRegion(); - currLexScope->updateCurrentSwitchCaseRegion(); - - lastCaseBlock = builder.createBlock(caseRegion); - if (caseStmt) - res = buildCaseStmt(*caseStmt, condV.getType(), caseAttr); - else { - auto *defaultStmt = dyn_cast(c); - assert(defaultStmt && "expected default stmt"); - res = buildDefaultStmt(*defaultStmt, condV.getType(), caseAttr); - } - - if (res.failed()) - break; - } - caseAttrs.push_back(caseAttr); - } - - os.addAttribute("cases", builder.getArrayAttr(caseAttrs)); - }); - - if (res.failed()) - return res; - - return mlir::success(); - }; - - // The switch scope contains the full source range for SwitchStmt. - auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto scopeLocBegin = fusedLoc.getLocations()[0]; - auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, - builder.getInsertionBlock()}; - LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; - res = switchStmtBuilder(); - }); - - if (res.failed()) - return res; - - // Any block in a case region without a terminator is considered a - // fallthrough yield. In practice there shouldn't be more than one - // block without a terminator, we patch any block we see though and - // let mlir's SwitchOp verifier enforce rules. - auto terminateCaseRegion = [&](mlir::Region &r, mlir::Location loc) { - if (r.empty()) - return; - - SmallVector eraseBlocks; - unsigned numBlocks = r.getBlocks().size(); - for (auto &block : r.getBlocks()) { - // Already cleanup after return operations, which might create - // empty blocks if emitted as last stmt. - if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() && - block.hasNoSuccessors()) - eraseBlocks.push_back(&block); - - if (block.empty() || - !block.back().hasTrait()) { - mlir::OpBuilder::InsertionGuard guardCase(builder); - builder.setInsertionPointToEnd(&block); - builder.create( - loc, - mlir::cir::YieldOpKindAttr::get( - builder.getContext(), mlir::cir::YieldOpKind::Fallthrough), - mlir::ValueRange({})); - } - } - - for (auto *b : eraseBlocks) - b->erase(); - }; - - // Make sure all case regions are terminated by inserting fallthroughs - // when necessary. - // FIXME: find a better way to get accurante with location here. - for (auto &r : swop.getRegions()) - terminateCaseRegion(r, swop.getLoc()); - return mlir::success(); -} - -// Add terminating yield on body regions (loops, ...) in case there are -// not other terminators used. -// FIXME: make terminateCaseRegion use this too. -static void terminateBody(mlir::OpBuilder &builder, mlir::Region &r, - mlir::Location loc) { - if (r.empty()) - return; - - SmallVector eraseBlocks; - unsigned numBlocks = r.getBlocks().size(); - for (auto &block : r.getBlocks()) { - // Already cleanup after return operations, which might create - // empty blocks if emitted as last stmt. - if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() && - block.hasNoSuccessors()) - eraseBlocks.push_back(&block); - - if (block.empty() || - !block.back().hasTrait()) { - mlir::OpBuilder::InsertionGuard guardCase(builder); - builder.setInsertionPointToEnd(&block); - builder.create(loc); - } - } - - for (auto *b : eraseBlocks) - b->erase(); -} - -mlir::LogicalResult CIRGenModule::buildDoStmt(const DoStmt &S) { - mlir::cir::LoopOp loopOp; - - // TODO: pass in array of attributes. - auto doStmtBuilder = [&]() -> mlir::LogicalResult { - auto forRes = mlir::success(); - - loopOp = builder.create( - getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::DoWhile, - /*condBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - // TODO: branch weigths, likelyhood, profile counter, etc. - // C99 6.8.5p2/p4: The first substatement is executed if the - // expression compares unequal to 0. The condition must be a - // scalar type. - mlir::Value condVal = evaluateExprAsBool(S.getCond()); - b.create(loc, condVal); - }, - /*bodyBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) - forRes = mlir::failure(); - }, - /*stepBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - builder.create(loc); - }); - return forRes; - }; - - auto res = mlir::success(); - auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto scopeLocBegin = fusedLoc.getLocations()[0]; - auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, - builder.getInsertionBlock()}; - LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; - res = doStmtBuilder(); - }); - - if (res.failed()) - return res; - - terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); - return mlir::success(); -} - -mlir::LogicalResult CIRGenModule::buildWhileStmt(const WhileStmt &S) { - mlir::cir::LoopOp loopOp; - - // TODO: pass in array of attributes. - auto whileStmtBuilder = [&]() -> mlir::LogicalResult { - auto forRes = mlir::success(); - - loopOp = builder.create( - getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::While, - /*condBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - // TODO: branch weigths, likelyhood, profile counter, etc. - mlir::Value condVal; - // If the for statement has a condition scope, - // emit the local variable declaration. - if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); - // C99 6.8.5p2/p4: The first substatement is executed if the - // expression compares unequal to 0. The condition must be a - // scalar type. - condVal = evaluateExprAsBool(S.getCond()); - b.create(loc, condVal); - }, - /*bodyBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) - forRes = mlir::failure(); - }, - /*stepBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - builder.create(loc); - }); - return forRes; - }; - - auto res = mlir::success(); - auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto scopeLocBegin = fusedLoc.getLocations()[0]; - auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, - builder.getInsertionBlock()}; - LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; - res = whileStmtBuilder(); - }); - - if (res.failed()) - return res; - - terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); - return mlir::success(); -} - -mlir::LogicalResult CIRGenModule::buildForStmt(const ForStmt &S) { - mlir::cir::LoopOp loopOp; - - // TODO: pass in array of attributes. - auto forStmtBuilder = [&]() -> mlir::LogicalResult { - auto forRes = mlir::success(); - // Evaluate the first part before the loop. - if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) - return mlir::failure(); - - loopOp = builder.create( - getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::For, - /*condBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - // TODO: branch weigths, likelyhood, profile counter, etc. - mlir::Value condVal; - if (S.getCond()) { - // If the for statement has a condition scope, - // emit the local variable declaration. - if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); - // C99 6.8.5p2/p4: The first substatement is executed if the - // expression compares unequal to 0. The condition must be a - // scalar type. - condVal = evaluateExprAsBool(S.getCond()); - } else { - condVal = b.create( - loc, mlir::cir::BoolType::get(b.getContext()), - b.getBoolAttr(true)); - } - b.create(loc, condVal); - }, - /*bodyBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - // FIXME: in C we need to open a new scope here. Do we also need it - // for C++ in case it's a compound statement? - if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) - forRes = mlir::failure(); - }, - /*stepBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - if (S.getInc()) - if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) - forRes = mlir::failure(); - builder.create(loc); - }); - return forRes; - }; - - auto res = mlir::success(); - auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto scopeLocBegin = fusedLoc.getLocations()[0]; - auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, - builder.getInsertionBlock()}; - LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; - res = forStmtBuilder(); - }); - - if (res.failed()) - return res; - - terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); - return mlir::success(); -} - -mlir::LogicalResult CIRGenModule::buildIfStmt(const IfStmt &S) { - // The else branch of a consteval if statement is always the only branch - // that can be runtime evaluated. - assert(!S.isConsteval() && "not implemented"); - mlir::LogicalResult res = mlir::success(); - - // C99 6.8.4.1: The first substatement is executed if the expression - // compares unequal to 0. The condition must be a scalar type. - auto ifStmtBuilder = [&]() -> mlir::LogicalResult { - if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) - return mlir::failure(); - - if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); - - // If the condition constant folds and can be elided, try to avoid - // emitting the condition and the dead arm of the if/else. - // FIXME: should this be done as part of a constant folder pass instead? - bool CondConstant; - if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, - S.isConstexpr())) { - assert(0 && "not implemented"); - } - - // TODO: PGO and likelihood. - auto ifLoc = getIfLocs(*this, S.getThen(), S.getElse()); - return buildIfOnBoolExpr(S.getCond(), ifLoc, S.getThen(), S.getElse()); - }; - - // TODO: Add a new scoped symbol table. - // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); - // The if scope contains the full source range for IfStmt. - auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto scopeLocBegin = fusedLoc.getLocations()[0]; - auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, - builder.getInsertionBlock()}; - LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; - res = ifStmtBuilder(); - }); - - return res; -} - -// Build CIR for a statement. useCurrentScope should be true if no -// new scopes need be created when finding a compound statement. -mlir::LogicalResult CIRGenModule::buildStmt(const Stmt *S, - bool useCurrentScope) { - if (mlir::succeeded(buildSimpleStmt(S, useCurrentScope))) - return mlir::success(); - - if (astCtx.getLangOpts().OpenMP && astCtx.getLangOpts().OpenMPSimd) - assert(0 && "not implemented"); - - switch (S->getStmtClass()) { - case Stmt::OpenACCComputeConstructClass: - case Stmt::OMPScopeDirectiveClass: - case Stmt::OMPParallelMaskedDirectiveClass: - case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: - case Stmt::OMPTeamsGenericLoopDirectiveClass: - case Stmt::OMPTargetParallelGenericLoopDirectiveClass: - case Stmt::OMPParallelGenericLoopDirectiveClass: - case Stmt::OMPParallelMaskedTaskLoopDirectiveClass: - case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass: - case Stmt::OMPErrorDirectiveClass: - case Stmt::OMPMaskedTaskLoopDirectiveClass: - case Stmt::OMPMaskedTaskLoopSimdDirectiveClass: - llvm_unreachable("NYI"); - case Stmt::NoStmtClass: - case Stmt::CXXCatchStmtClass: - case Stmt::SEHExceptStmtClass: - case Stmt::SEHFinallyStmtClass: - case Stmt::MSDependentExistsStmtClass: - llvm_unreachable("invalid statement class to emit generically"); - case Stmt::NullStmtClass: - case Stmt::CompoundStmtClass: - case Stmt::DeclStmtClass: - case Stmt::LabelStmtClass: - case Stmt::AttributedStmtClass: - case Stmt::GotoStmtClass: - case Stmt::BreakStmtClass: - case Stmt::ContinueStmtClass: - case Stmt::DefaultStmtClass: - case Stmt::CaseStmtClass: - case Stmt::SEHLeaveStmtClass: - llvm_unreachable("should have emitted these statements as simple"); - -#define STMT(Type, Base) -#define ABSTRACT_STMT(Op) -#define EXPR(Type, Base) case Stmt::Type##Class: -#include "clang/AST/StmtNodes.inc" - { - // Remember the block we came in on. - mlir::Block *incoming = builder.getInsertionBlock(); - assert(incoming && "expression emission must have an insertion point"); - - buildIgnoredExpr(cast(S)); - - mlir::Block *outgoing = builder.getInsertionBlock(); - assert(outgoing && "expression emission cleared block!"); - - // FIXME: Should we mimic LLVM emission here? - // The expression emitters assume (reasonably!) that the insertion - // point is always set. To maintain that, the call-emission code - // for noreturn functions has to enter a new block with no - // predecessors. We want to kill that block and mark the current - // insertion point unreachable in the common case of a call like - // "exit();". Since expression emission doesn't otherwise create - // blocks with no predecessors, we can just test for that. - // However, we must be careful not to do this to our incoming - // block, because *statement* emission does sometimes create - // reachable blocks which will have no predecessors until later in - // the function. This occurs with, e.g., labels that are not - // reachable by fallthrough. - if (incoming != outgoing && outgoing->use_empty()) - assert(0 && "not implemented"); - break; - } - - case Stmt::IfStmtClass: - if (buildIfStmt(cast(*S)).failed()) - return mlir::failure(); - break; - case Stmt::SwitchStmtClass: - if (buildSwitchStmt(cast(*S)).failed()) - return mlir::failure(); - break; - case Stmt::ForStmtClass: - if (buildForStmt(cast(*S)).failed()) - return mlir::failure(); - break; - case Stmt::WhileStmtClass: - if (buildWhileStmt(cast(*S)).failed()) - return mlir::failure(); - break; - case Stmt::DoStmtClass: - if (buildDoStmt(cast(*S)).failed()) - return mlir::failure(); - break; - - case Stmt::IndirectGotoStmtClass: - case Stmt::ReturnStmtClass: - // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. - case Stmt::GCCAsmStmtClass: - case Stmt::MSAsmStmtClass: - case Stmt::CoroutineBodyStmtClass: - case Stmt::CoreturnStmtClass: - case Stmt::CapturedStmtClass: - case Stmt::ObjCAtTryStmtClass: - case Stmt::ObjCAtThrowStmtClass: - case Stmt::ObjCAtSynchronizedStmtClass: - case Stmt::ObjCForCollectionStmtClass: - case Stmt::ObjCAutoreleasePoolStmtClass: - case Stmt::CXXTryStmtClass: - case Stmt::CXXForRangeStmtClass: - case Stmt::SEHTryStmtClass: - case Stmt::OMPMetaDirectiveClass: - case Stmt::OMPCanonicalLoopClass: - case Stmt::OMPParallelDirectiveClass: - case Stmt::OMPSimdDirectiveClass: - case Stmt::OMPTileDirectiveClass: - case Stmt::OMPUnrollDirectiveClass: - case Stmt::OMPForDirectiveClass: - case Stmt::OMPForSimdDirectiveClass: - case Stmt::OMPSectionsDirectiveClass: - case Stmt::OMPSectionDirectiveClass: - case Stmt::OMPSingleDirectiveClass: - case Stmt::OMPMasterDirectiveClass: - case Stmt::OMPCriticalDirectiveClass: - case Stmt::OMPParallelForDirectiveClass: - case Stmt::OMPParallelForSimdDirectiveClass: - case Stmt::OMPParallelMasterDirectiveClass: - case Stmt::OMPParallelSectionsDirectiveClass: - case Stmt::OMPTaskDirectiveClass: - case Stmt::OMPTaskyieldDirectiveClass: - case Stmt::OMPBarrierDirectiveClass: - case Stmt::OMPTaskwaitDirectiveClass: - case Stmt::OMPTaskgroupDirectiveClass: - case Stmt::OMPFlushDirectiveClass: - case Stmt::OMPDepobjDirectiveClass: - case Stmt::OMPScanDirectiveClass: - case Stmt::OMPOrderedDirectiveClass: - case Stmt::OMPAtomicDirectiveClass: - case Stmt::OMPTargetDirectiveClass: - case Stmt::OMPTeamsDirectiveClass: - case Stmt::OMPCancellationPointDirectiveClass: - case Stmt::OMPCancelDirectiveClass: - case Stmt::OMPTargetDataDirectiveClass: - case Stmt::OMPTargetEnterDataDirectiveClass: - case Stmt::OMPTargetExitDataDirectiveClass: - case Stmt::OMPTargetParallelDirectiveClass: - case Stmt::OMPTargetParallelForDirectiveClass: - case Stmt::OMPTaskLoopDirectiveClass: - case Stmt::OMPTaskLoopSimdDirectiveClass: - case Stmt::OMPMasterTaskLoopDirectiveClass: - case Stmt::OMPMasterTaskLoopSimdDirectiveClass: - case Stmt::OMPParallelMasterTaskLoopDirectiveClass: - case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: - case Stmt::OMPDistributeDirectiveClass: - case Stmt::OMPTargetUpdateDirectiveClass: - case Stmt::OMPDistributeParallelForDirectiveClass: - case Stmt::OMPDistributeParallelForSimdDirectiveClass: - case Stmt::OMPDistributeSimdDirectiveClass: - case Stmt::OMPTargetParallelForSimdDirectiveClass: - case Stmt::OMPTargetSimdDirectiveClass: - case Stmt::OMPTeamsDistributeDirectiveClass: - case Stmt::OMPTeamsDistributeSimdDirectiveClass: - case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: - case Stmt::OMPTeamsDistributeParallelForDirectiveClass: - case Stmt::OMPTargetTeamsDirectiveClass: - case Stmt::OMPTargetTeamsDistributeDirectiveClass: - case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: - case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: - case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: - case Stmt::OMPInteropDirectiveClass: - case Stmt::OMPDispatchDirectiveClass: - case Stmt::OMPGenericLoopDirectiveClass: - case Stmt::OMPMaskedDirectiveClass: { - llvm::errs() << "CIR codegen for '" << S->getStmtClassName() - << "' not implemented\n"; - assert(0 && "not implemented"); - break; - } - case Stmt::ObjCAtCatchStmtClass: - llvm_unreachable( - "@catch statements should be handled by EmitObjCAtTryStmt"); - case Stmt::ObjCAtFinallyStmtClass: - llvm_unreachable( - "@finally statements should be handled by EmitObjCAtTryStmt"); - } - - return mlir::success(); -} - -mlir::LogicalResult CIRGenModule::buildFunctionBody(const Stmt *Body) { - const CompoundStmt *S = dyn_cast(Body); - assert(S && "expected compound stmt"); - - // We start with function level scope for variables. - SymTableScopeTy varScope(symbolTable); - return buildCompoundStmtWithoutScope(*S); -} - -mlir::LogicalResult CIRGenModule::buildCompoundStmt(const CompoundStmt &S) { - mlir::LogicalResult res = mlir::success(); - - auto compoundStmtBuilder = [&]() -> mlir::LogicalResult { - if (buildCompoundStmtWithoutScope(S).failed()) - return mlir::failure(); - - return mlir::success(); - }; - - // Add local scope to track new declared variables. - SymTableScopeTy varScope(symbolTable); - auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto locBegin = fusedLoc.getLocations()[0]; - auto locEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{locBegin, locEnd, - builder.getInsertionBlock()}; - LexicalScopeGuard lexScopeGuard{*this, &lexScope}; - res = compoundStmtBuilder(); - }); - - return res; -} - -mlir::LogicalResult -CIRGenModule::buildCompoundStmtWithoutScope(const CompoundStmt &S) { - for (auto *CurStmt : S.body()) - if (buildStmt(CurStmt, /*useCurrentScope=*/false).failed()) - return mlir::failure(); - - return mlir::success(); -} - bool CIRGenModule::MustBeEmitted(const ValueDecl *Global) { // Never defer when EmitAllDecls is specified. assert(!langOpts.EmitAllDecls && "EmitAllDecls NYI"); @@ -2057,8 +236,13 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { assert(MustBeEmitted(Global) || MayBeEmittedEagerly(Global) && "Delayed emission NYI"); - buildFunction(cast(GD.getDecl())); + CIRGenFunction CGF{*this, builder}; + CurCGF = &CGF; + auto fn = CGF.buildFunction(cast(GD.getDecl())); + theModule.push_back(fn); + CurCGF = nullptr; } + void CIRGenModule::buildTopLevelDecl(Decl *decl) { switch (decl->getKind()) { default: @@ -2084,87 +268,6 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { } } -mlir::FuncOp CIRGenModule::buildFunction(const FunctionDecl *FD) { - CIRGenFunction CGF{*this}; - CurCGF = &CGF; - - // Create a scope in the symbol table to hold variable declarations. - SymTableScopeTy varScope(symbolTable); - - const CXXMethodDecl *MD = dyn_cast(FD); - assert(!MD && "methods not implemented"); - auto fnLoc = getLoc(FD->getSourceRange()); - - CurCGF->FnRetQualTy = FD->getReturnType(); - mlir::TypeRange FnTyRange = {}; - if (!CurCGF->FnRetQualTy->isVoidType()) { - CurCGF->FnRetTy = getCIRType(CurCGF->FnRetQualTy); - } - auto funcType = getTypes().GetFunctionType(GlobalDecl(FD)); - - mlir::FuncOp function = mlir::FuncOp::create(fnLoc, FD->getName(), funcType); - if (!function) - return nullptr; - - // In MLIR the entry block of the function is special: it must have the - // same argument list as the function itself. - mlir::Block *entryBlock = function.addEntryBlock(); - - // Set the insertion point in the builder to the beginning of the - // function body, it will be used throughout the codegen to create - // operations in this function. - builder.setInsertionPointToStart(entryBlock); - auto FnBeginLoc = getLoc(FD->getBody()->getEndLoc()); - auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); - - // Initialize lexical scope information. - { - LexicalScopeContext lexScope{FnBeginLoc, FnEndLoc, - builder.getInsertionBlock()}; - LexicalScopeGuard scopeGuard{*this, &lexScope}; - - // Declare all the function arguments in the symbol table. - for (const auto nameValue : - llvm::zip(FD->parameters(), entryBlock->getArguments())) { - auto *paramVar = std::get<0>(nameValue); - auto paramVal = std::get<1>(nameValue); - auto alignment = astCtx.getDeclAlign(paramVar); - auto paramLoc = getLoc(paramVar->getSourceRange()); - paramVal.setLoc(paramLoc); - - mlir::Value addr; - if (failed(declare(paramVar, paramVar->getType(), paramLoc, alignment, - addr, true /*param*/))) - return nullptr; - // Location of the store to the param storage tracked as beginning of - // the function body. - auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); - builder.create(fnBodyBegin, paramVal, addr); - } - assert(builder.getInsertionBlock() && "Should be valid"); - - // When the current function is not void, create an address to store the - // result value. - if (CurCGF->FnRetTy.has_value()) - buildAndUpdateRetAlloca(CurCGF->FnRetQualTy, FnEndLoc, - getNaturalTypeAlignment(CurCGF->FnRetQualTy)); - - // Emit the body of the function. - if (mlir::failed(buildFunctionBody(FD->getBody()))) { - function.erase(); - return nullptr; - } - assert(builder.getInsertionBlock() && "Should be valid"); - } - - if (mlir::failed(function.verifyBody())) - return nullptr; - theModule.push_back(function); - - CurCGF = nullptr; - return function; -} - mlir::Type CIRGenModule::getCIRType(const QualType &type) { return genTypes.ConvertType(type); } @@ -2323,5 +426,21 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( } mlir::Value CIRGenModule::GetGlobalValue(const Decl *D) { - return symbolTable.lookup(D); + assert(CurCGF); + return CurCGF->symbolTable.lookup(D); +} + +mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { + assert(CurCGF); + return CurCGF->getLoc(SLoc); +} + +mlir::Location CIRGenModule::getLoc(SourceRange SLoc) { + assert(CurCGF); + return CurCGF->getLoc(SLoc); +} + +mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { + assert(CurCGF); + return CurCGF->getLoc(lhs, rhs); } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index e4fe1d355ea6..dd3cf25677b0 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -54,10 +54,6 @@ class CIRGenModule { ~CIRGenModule(); - using SymTableTy = llvm::ScopedHashTable; - using SymTableScopeTy = - llvm::ScopedHashTableScope; - private: mutable std::unique_ptr TheTargetCIRGenInfo; @@ -83,221 +79,14 @@ class CIRGenModule { /// Per-module type mapping from clang AST to CIR. CIRGenTypes genTypes; - /// The symbol table maps a variable name to a value in the current scope. - /// Entering a function creates a new scope, and the function arguments are - /// added to the mapping. When the processing of a function is terminated, - /// the scope is destroyed and the mappings created in this scope are - /// dropped. - SymTableTy symbolTable; - /// Per-function codegen information. Updated everytime buildCIR is called /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; - /// ------- - /// Goto - /// ------- - - /// A jump destination is an abstract label, branching to which may - /// require a jump out through normal cleanups. - struct JumpDest { - JumpDest() = default; - JumpDest(mlir::Block *Block) : Block(Block) {} - - bool isValid() const { return Block != nullptr; } - mlir::Block *getBlock() const { return Block; } - mlir::Block *Block = nullptr; - }; - - /// Track mlir Blocks for each C/C++ label. - llvm::DenseMap LabelMap; - JumpDest &getJumpDestForLabel(const clang::LabelDecl *D); - - /// ------- - /// Lexical Scope: to be read as in the meaning in CIR, a scope is always - /// related with initialization and destruction of objects. - /// ------- - - // Represents a cir.scope, cir.if, and then/else regions. I.e. lexical - // scopes that require cleanups. - struct LexicalScopeContext { - private: - // Block containing cleanup code for things initialized in this - // lexical context (scope). - mlir::Block *CleanupBlock = nullptr; - - // Points to scope entry block. This is useful, for instance, for - // helping to insert allocas before finalizing any recursive codegen - // from switches. - mlir::Block *EntryBlock; - - // FIXME: perhaps we can use some info encoded in operations. - enum Kind { - Regular, // cir.if, cir.scope, if_regions - Switch // cir.switch - } ScopeKind = Regular; - - public: - unsigned Depth = 0; - bool HasReturn = false; - LexicalScopeContext(mlir::Location b, mlir::Location e, mlir::Block *eb) - : EntryBlock(eb), BeginLoc(b), EndLoc(e) {} - ~LexicalScopeContext() = default; - - // --- - // Kind - // --- - bool isRegular() { return ScopeKind == Kind::Regular; } - bool isSwitch() { return ScopeKind == Kind::Switch; } - void setAsSwitch() { ScopeKind = Kind::Switch; } - - // --- - // Goto handling - // --- - - // Lazy create cleanup block or return what's available. - mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) { - if (CleanupBlock) - return getCleanupBlock(builder); - return createCleanupBlock(builder); - } - - mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) { - return CleanupBlock; - } - mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) { - { - // Create the cleanup block but dont hook it up around just yet. - mlir::OpBuilder::InsertionGuard guard(builder); - CleanupBlock = builder.createBlock(builder.getBlock()->getParent()); - } - assert(builder.getInsertionBlock() && "Should be valid"); - return CleanupBlock; - } - - // Goto's introduced in this scope but didn't get fixed. - llvm::SmallVector, 4> - PendingGotos; - - // Labels solved inside this scope. - llvm::SmallPtrSet SolvedLabels; - - // --- - // Return handling - // --- - - private: - // On switches we need one return block per region, since cases don't - // have their own scopes but are distinct regions nonetheless. - llvm::SmallVector RetBlocks; - llvm::SmallVector> RetLocs; - unsigned int CurrentSwitchRegionIdx = -1; - - // There's usually only one ret block per scope, but this needs to be - // get or create because of potential unreachable return statements, note - // that for those, all source location maps to the first one found. - mlir::Block *createRetBlock(CIRGenModule &CGM, mlir::Location loc) { - assert((isSwitch() || RetBlocks.size() == 0) && - "only switches can hold more than one ret block"); - - // Create the cleanup block but dont hook it up around just yet. - mlir::OpBuilder::InsertionGuard guard(CGM.builder); - auto *b = CGM.builder.createBlock(CGM.builder.getBlock()->getParent()); - RetBlocks.push_back(b); - RetLocs.push_back(loc); - return b; - } - - public: - void updateCurrentSwitchCaseRegion() { CurrentSwitchRegionIdx++; } - llvm::ArrayRef getRetBlocks() { return RetBlocks; } - llvm::ArrayRef> getRetLocs() { - return RetLocs; - } - - mlir::Block *getOrCreateRetBlock(CIRGenModule &CGM, mlir::Location loc) { - unsigned int regionIdx = 0; - if (isSwitch()) - regionIdx = CurrentSwitchRegionIdx; - if (regionIdx >= RetBlocks.size()) - return createRetBlock(CGM, loc); - return &*RetBlocks.back(); - } - - // --- - // Scope entry block tracking - // --- - mlir::Block *getEntryBlock() { return EntryBlock; } - - mlir::Location BeginLoc, EndLoc; - }; - - class LexicalScopeGuard { - CIRGenModule &CGM; - LexicalScopeContext *OldVal = nullptr; - - public: - LexicalScopeGuard(CIRGenModule &c, LexicalScopeContext *L) : CGM(c) { - if (CGM.currLexScope) { - OldVal = CGM.currLexScope; - L->Depth++; - } - CGM.currLexScope = L; - } - - LexicalScopeGuard(const LexicalScopeGuard &) = delete; - LexicalScopeGuard &operator=(const LexicalScopeGuard &) = delete; - LexicalScopeGuard &operator=(LexicalScopeGuard &&other) = delete; - - void cleanup(); - void restore() { CGM.currLexScope = OldVal; } - ~LexicalScopeGuard() { - cleanup(); - restore(); - } - }; - - LexicalScopeContext *currLexScope = nullptr; - - /// ------- - /// Source Location tracking - /// ------- - - /// Use to track source locations across nested visitor traversals. - /// Always use a `SourceLocRAIIObject` to change currSrcLoc. - std::optional currSrcLoc; - class SourceLocRAIIObject { - CIRGenModule &P; - std::optional OldVal; - - public: - SourceLocRAIIObject(CIRGenModule &p, mlir::Location Value) : P(p) { - if (P.currSrcLoc) - OldVal = P.currSrcLoc; - P.currSrcLoc = Value; - } - - /// Can be used to restore the state early, before the dtor - /// is run. - void restore() { P.currSrcLoc = OldVal; } - ~SourceLocRAIIObject() { restore(); } - }; - /// ------- /// Declaring variables /// ------- - /// Declare a variable in the current scope, return success if the variable - /// wasn't declared yet. - mlir::LogicalResult declare(const clang::Decl *var, clang::QualType ty, - mlir::Location loc, clang::CharUnits alignment, - mlir::Value &addr, bool isParam = false); - mlir::Value buildAlloca(llvm::StringRef name, mlir::cir::InitStyle initStyle, - clang::QualType ty, mlir::Location loc, - clang::CharUnits alignment); - void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, - clang::CharUnits alignment); - public: mlir::ModuleOp getModule() { return theModule; } mlir::OpBuilder &getBuilder() { return builder; } @@ -319,32 +108,6 @@ class CIRGenModule { mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); - struct AutoVarEmission { - const clang::VarDecl *Variable; - /// The address of the alloca for languages with explicit address space - /// (e.g. OpenCL) or alloca casted to generic pointer for address space - /// agnostic languages (e.g. C++). Invalid if the variable was emitted - /// as a global constant. - Address Addr; - - /// True if the variable is of aggregate type and has a constant - /// initializer. - bool IsConstantAggregate; - - struct Invalid {}; - AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {} - - AutoVarEmission(const clang::VarDecl &variable) - : Variable(&variable), Addr(Address::invalid()), - IsConstantAggregate(false) {} - - static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } - /// Returns the raw, allocated address, which is not necessarily - /// the address of the object itself. It is casted to default - /// address space for address space agnostic languages. - Address getAllocatedAddress() const { return Addr; } - }; - /// Determine whether an object of this type can be emitted /// as a constant. /// @@ -356,91 +119,6 @@ class CIRGenModule { /// query specific. bool isTypeConstant(clang::QualType Ty, bool ExcludeCtor); - /// Emit the alloca and debug information for a - /// local variable. Does not emit initialization or destruction. - AutoVarEmission buildAutoVarAlloca(const clang::VarDecl &D); - - /// Determine whether the given initializer is trivial in the sense - /// that it requires no code to be generated. - bool isTrivialInitializer(const clang::Expr *Init); - - // TODO: this can also be abstrated into common AST helpers - bool hasBooleanRepresentation(clang::QualType Ty); - - mlir::Value buildToMemory(mlir::Value Value, clang::QualType Ty); - - void buildStoreOfScalar(mlir::Value value, LValue lvalue, - const clang::Decl *InitDecl); - - void buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, - clang::QualType Ty, LValueBaseInfo BaseInfo, - const clang::Decl *InitDecl, bool isNontemporal); - - /// Store the specified rvalue into the specified - /// lvalue, where both are guaranteed to the have the same type, and that type - /// is 'Ty'. - void buldStoreThroughLValue(RValue Src, LValue Dst, - const clang::Decl *InitDecl); - - void buildScalarInit(const clang::Expr *init, const clang::ValueDecl *D, - LValue lvalue); - - /// Emit an expression as an initializer for an object (variable, field, etc.) - /// at the given location. The expression is not necessarily the normal - /// initializer for the object, and the address is not necessarily - /// its normal location. - /// - /// \param init the initializing expression - /// \param D the object to act as if we're initializing - /// \param lvalue the lvalue to initialize - void buildExprAsInit(const clang::Expr *init, const clang::ValueDecl *D, - LValue lvalue); - - void buildAutoVarInit(const AutoVarEmission &emission); - - void buildAutoVarCleanups(const AutoVarEmission &emission); - - /// Emit code and set up symbol table for a variable declaration with auto, - /// register, or no storage class specifier. These turn into simple stack - /// objects, globals depending on target. - void buildAutoVarDecl(const clang::VarDecl &D); - - /// This method handles emission of any variable declaration - /// inside a function, including static vars etc. - void buildVarDecl(const clang::VarDecl &D); - - void buildDecl(const clang::Decl &D); - - /// Emit the computation of the specified expression of scalar type, - /// ignoring the result. - mlir::Value buildScalarExpr(const clang::Expr *E); - - /// Emit a conversion from the specified type to the specified destination - /// type, both of which are CIR scalar types. - mlir::Value buildScalarConversion(mlir::Value Src, clang::QualType SrcTy, - clang::QualType DstTy, - clang::SourceLocation Loc); - - mlir::LogicalResult buildBranchThroughCleanup(JumpDest &Dest, - clang::LabelDecl *L, - mlir::Location Loc); - - mlir::LogicalResult buildReturnStmt(const clang::ReturnStmt &S); - - mlir::LogicalResult buildLabel(const clang::LabelDecl *D); - mlir::LogicalResult buildLabelStmt(const clang::LabelStmt &S); - - mlir::LogicalResult buildGotoStmt(const clang::GotoStmt &S); - - mlir::LogicalResult buildDeclStmt(const clang::DeclStmt &S); - - mlir::LogicalResult buildSimpleStmt(const clang::Stmt *S, - bool useCurrentScope); - - LValue buildDeclRefLValue(const clang::DeclRefExpr *E); - - LValue buildBinaryOperatorLValue(const clang::BinaryOperator *E); - /// FIXME: this could likely be a common helper and not necessarily related /// with codegen. /// Return the best known alignment for an unknown pointer to a @@ -460,88 +138,12 @@ class CIRGenModule { LValueBaseInfo *BaseInfo = nullptr, bool forPointeeType = false); - /// Given an expression of pointer type, try to - /// derive a more accurate bound on the alignment of the pointer. - Address buildPointerWithAlignment(const clang::Expr *E, - LValueBaseInfo *BaseInfo); - - LValue buildUnaryOpLValue(const clang::UnaryOperator *E); - - /// Emit code to compute a designator that specifies the location - /// of the expression. - /// FIXME: document this function better. - LValue buildLValue(const clang::Expr *E); - - /// EmitIgnoredExpr - Emit code to compute the specified expression, - /// ignoring the result. - void buildIgnoredExpr(const clang::Expr *E); - - /// If the specified expression does not fold - /// to a constant, or if it does but contains a label, return false. If it - /// constant folds return true and set the boolean result in Result. - bool ConstantFoldsToSimpleInteger(const clang::Expr *Cond, bool &ResultBool, - bool AllowLabels); - - /// Return true if the statement contains a label in it. If - /// this statement is not executed normally, it not containing a label means - /// that we can just remove the code. - bool ContainsLabel(const clang::Stmt *S, bool IgnoreCaseStmts = false); - - /// If the specified expression does not fold - /// to a constant, or if it does but contains a label, return false. If it - /// constant folds return true and set the folded value. - bool ConstantFoldsToSimpleInteger(const clang::Expr *Cond, - llvm::APSInt &ResultInt, bool AllowLabels); - - /// Perform the usual unary conversions on the specified - /// expression and compare the result against zero, returning an Int1Ty value. - mlir::Value evaluateExprAsBool(const clang::Expr *E); - - /// Emit an if on a boolean condition to the specified blocks. - /// FIXME: Based on the condition, this might try to simplify the codegen of - /// the conditional based on the branch. TrueCount should be the number of - /// times we expect the condition to evaluate to true based on PGO data. We - /// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr - /// for extra ideas). - mlir::LogicalResult buildIfOnBoolExpr(const clang::Expr *cond, - mlir::Location loc, - const clang::Stmt *thenS, - const clang::Stmt *elseS); - - mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); - mlir::LogicalResult buildCaseStmt(const clang::CaseStmt &S, - mlir::Type condType, - mlir::cir::CaseAttr &caseEntry); - mlir::LogicalResult buildDefaultStmt(const clang::DefaultStmt &S, - mlir::Type condType, - mlir::cir::CaseAttr &caseEntry); - - mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); - mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); - mlir::LogicalResult buildForStmt(const clang::ForStmt &S); - mlir::LogicalResult buildWhileStmt(const clang::WhileStmt &S); - mlir::LogicalResult buildDoStmt(const clang::DoStmt &S); - - // Build CIR for a statement. useCurrentScope should be true if no - // new scopes need be created when finding a compound statement. - mlir::LogicalResult buildStmt(const clang::Stmt *S, bool useCurrentScope); - - mlir::LogicalResult buildFunctionBody(const clang::Stmt *Body); - - mlir::LogicalResult buildCompoundStmt(const clang::CompoundStmt &S); - - mlir::LogicalResult - buildCompoundStmtWithoutScope(const clang::CompoundStmt &S); - void buildTopLevelDecl(clang::Decl *decl); /// Emit code for a single global function or var decl. Forward declarations /// are emitted lazily. void buildGlobal(clang::GlobalDecl D); - // Emit a new function and add it to the MLIR module. - mlir::FuncOp buildFunction(const clang::FunctionDecl *FD); - mlir::Type getCIRType(const clang::QualType &type); /// Determine whether the definition must be emitted; if this returns \c @@ -568,6 +170,8 @@ class CIRGenModule { mlir::Value GetGlobalValue(const clang::Decl *D); + void emitError(const llvm::Twine &message) { theModule.emitError(message); } + private: // TODO: CodeGen also passes an AttributeList here. We'll have to match that // in CIR diff --git a/clang/lib/CIR/CIRGenStmt.cpp b/clang/lib/CIR/CIRGenStmt.cpp new file mode 100644 index 000000000000..084031d5780a --- /dev/null +++ b/clang/lib/CIR/CIRGenStmt.cpp @@ -0,0 +1,878 @@ +//===--- CIRGenStmt.cpp - Emit CIR Code from Statements -------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Stmt nodes as CIR code. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; + +mlir::LogicalResult +CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S) { + for (auto *CurStmt : S.body()) + if (buildStmt(CurStmt, /*useCurrentScope=*/false).failed()) + return mlir::failure(); + + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildCompoundStmt(const CompoundStmt &S) { + mlir::LogicalResult res = mlir::success(); + + auto compoundStmtBuilder = [&]() -> mlir::LogicalResult { + if (buildCompoundStmtWithoutScope(S).failed()) + return mlir::failure(); + + return mlir::success(); + }; + + // Add local scope to track new declared variables. + SymTableScopeTy varScope(symbolTable); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto locBegin = fusedLoc.getLocations()[0]; + auto locEnd = fusedLoc.getLocations()[1]; + LexicalScopeContext lexScope{locBegin, locEnd, + builder.getInsertionBlock()}; + LexicalScopeGuard lexScopeGuard{*this, &lexScope}; + res = compoundStmtBuilder(); + }); + + return res; +} + +// Build CIR for a statement. useCurrentScope should be true if no +// new scopes need be created when finding a compound statement. +mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, + bool useCurrentScope) { + if (mlir::succeeded(buildSimpleStmt(S, useCurrentScope))) + return mlir::success(); + + if (getContext().getLangOpts().OpenMP && + getContext().getLangOpts().OpenMPSimd) + assert(0 && "not implemented"); + + switch (S->getStmtClass()) { + default: + llvm_unreachable("unknown statement class"); + case Stmt::NoStmtClass: + case Stmt::CXXCatchStmtClass: + case Stmt::SEHExceptStmtClass: + case Stmt::SEHFinallyStmtClass: + case Stmt::MSDependentExistsStmtClass: + llvm_unreachable("invalid statement class to emit generically"); + case Stmt::NullStmtClass: + case Stmt::CompoundStmtClass: + case Stmt::DeclStmtClass: + case Stmt::LabelStmtClass: + case Stmt::AttributedStmtClass: + case Stmt::GotoStmtClass: + case Stmt::BreakStmtClass: + case Stmt::ContinueStmtClass: + case Stmt::DefaultStmtClass: + case Stmt::CaseStmtClass: + case Stmt::SEHLeaveStmtClass: + llvm_unreachable("should have emitted these statements as simple"); + +#define STMT(Type, Base) +#define ABSTRACT_STMT(Op) +#define EXPR(Type, Base) case Stmt::Type##Class: +#include "clang/AST/StmtNodes.inc" + { + // Remember the block we came in on. + mlir::Block *incoming = builder.getInsertionBlock(); + assert(incoming && "expression emission must have an insertion point"); + + buildIgnoredExpr(cast(S)); + + mlir::Block *outgoing = builder.getInsertionBlock(); + assert(outgoing && "expression emission cleared block!"); + + // FIXME: Should we mimic LLVM emission here? + // The expression emitters assume (reasonably!) that the insertion + // point is always set. To maintain that, the call-emission code + // for noreturn functions has to enter a new block with no + // predecessors. We want to kill that block and mark the current + // insertion point unreachable in the common case of a call like + // "exit();". Since expression emission doesn't otherwise create + // blocks with no predecessors, we can just test for that. + // However, we must be careful not to do this to our incoming + // block, because *statement* emission does sometimes create + // reachable blocks which will have no predecessors until later in + // the function. This occurs with, e.g., labels that are not + // reachable by fallthrough. + if (incoming != outgoing && outgoing->use_empty()) + assert(0 && "not implemented"); + break; + } + + case Stmt::IfStmtClass: + if (buildIfStmt(cast(*S)).failed()) + return mlir::failure(); + break; + case Stmt::SwitchStmtClass: + if (buildSwitchStmt(cast(*S)).failed()) + return mlir::failure(); + break; + case Stmt::ForStmtClass: + if (buildForStmt(cast(*S)).failed()) + return mlir::failure(); + break; + case Stmt::WhileStmtClass: + if (buildWhileStmt(cast(*S)).failed()) + return mlir::failure(); + break; + case Stmt::DoStmtClass: + if (buildDoStmt(cast(*S)).failed()) + return mlir::failure(); + break; + + case Stmt::IndirectGotoStmtClass: + case Stmt::ReturnStmtClass: + // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. + case Stmt::GCCAsmStmtClass: + case Stmt::MSAsmStmtClass: + case Stmt::CoroutineBodyStmtClass: + case Stmt::CoreturnStmtClass: + case Stmt::CapturedStmtClass: + case Stmt::ObjCAtTryStmtClass: + case Stmt::ObjCAtThrowStmtClass: + case Stmt::ObjCAtSynchronizedStmtClass: + case Stmt::ObjCForCollectionStmtClass: + case Stmt::ObjCAutoreleasePoolStmtClass: + case Stmt::CXXTryStmtClass: + case Stmt::CXXForRangeStmtClass: + case Stmt::SEHTryStmtClass: + case Stmt::OMPMetaDirectiveClass: + case Stmt::OMPCanonicalLoopClass: + case Stmt::OMPParallelDirectiveClass: + case Stmt::OMPSimdDirectiveClass: + case Stmt::OMPTileDirectiveClass: + case Stmt::OMPUnrollDirectiveClass: + case Stmt::OMPForDirectiveClass: + case Stmt::OMPForSimdDirectiveClass: + case Stmt::OMPSectionsDirectiveClass: + case Stmt::OMPSectionDirectiveClass: + case Stmt::OMPSingleDirectiveClass: + case Stmt::OMPMasterDirectiveClass: + case Stmt::OMPCriticalDirectiveClass: + case Stmt::OMPParallelForDirectiveClass: + case Stmt::OMPParallelForSimdDirectiveClass: + case Stmt::OMPParallelMasterDirectiveClass: + case Stmt::OMPParallelSectionsDirectiveClass: + case Stmt::OMPTaskDirectiveClass: + case Stmt::OMPTaskyieldDirectiveClass: + case Stmt::OMPBarrierDirectiveClass: + case Stmt::OMPTaskwaitDirectiveClass: + case Stmt::OMPTaskgroupDirectiveClass: + case Stmt::OMPFlushDirectiveClass: + case Stmt::OMPDepobjDirectiveClass: + case Stmt::OMPScanDirectiveClass: + case Stmt::OMPOrderedDirectiveClass: + case Stmt::OMPAtomicDirectiveClass: + case Stmt::OMPTargetDirectiveClass: + case Stmt::OMPTeamsDirectiveClass: + case Stmt::OMPCancellationPointDirectiveClass: + case Stmt::OMPCancelDirectiveClass: + case Stmt::OMPTargetDataDirectiveClass: + case Stmt::OMPTargetEnterDataDirectiveClass: + case Stmt::OMPTargetExitDataDirectiveClass: + case Stmt::OMPTargetParallelDirectiveClass: + case Stmt::OMPTargetParallelForDirectiveClass: + case Stmt::OMPTaskLoopDirectiveClass: + case Stmt::OMPTaskLoopSimdDirectiveClass: + case Stmt::OMPMasterTaskLoopDirectiveClass: + case Stmt::OMPMasterTaskLoopSimdDirectiveClass: + case Stmt::OMPParallelMasterTaskLoopDirectiveClass: + case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: + case Stmt::OMPDistributeDirectiveClass: + case Stmt::OMPTargetUpdateDirectiveClass: + case Stmt::OMPDistributeParallelForDirectiveClass: + case Stmt::OMPDistributeParallelForSimdDirectiveClass: + case Stmt::OMPDistributeSimdDirectiveClass: + case Stmt::OMPTargetParallelForSimdDirectiveClass: + case Stmt::OMPTargetSimdDirectiveClass: + case Stmt::OMPTeamsDistributeDirectiveClass: + case Stmt::OMPTeamsDistributeSimdDirectiveClass: + case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: + case Stmt::OMPTeamsDistributeParallelForDirectiveClass: + case Stmt::OMPTargetTeamsDirectiveClass: + case Stmt::OMPTargetTeamsDistributeDirectiveClass: + case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: + case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: + case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: + case Stmt::OMPInteropDirectiveClass: + case Stmt::OMPDispatchDirectiveClass: + case Stmt::OMPGenericLoopDirectiveClass: + case Stmt::OMPMaskedDirectiveClass: { + llvm::errs() << "CIR codegen for '" << S->getStmtClassName() + << "' not implemented\n"; + assert(0 && "not implemented"); + break; + } + case Stmt::ObjCAtCatchStmtClass: + llvm_unreachable( + "@catch statements should be handled by EmitObjCAtTryStmt"); + case Stmt::ObjCAtFinallyStmtClass: + llvm_unreachable( + "@finally statements should be handled by EmitObjCAtTryStmt"); + } + + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, + bool useCurrentScope) { + switch (S->getStmtClass()) { + default: + return mlir::failure(); + case Stmt::DeclStmtClass: + return buildDeclStmt(cast(*S)); + case Stmt::CompoundStmtClass: + return useCurrentScope + ? buildCompoundStmtWithoutScope(cast(*S)) + : buildCompoundStmt(cast(*S)); + case Stmt::ReturnStmtClass: + return buildReturnStmt(cast(*S)); + case Stmt::GotoStmtClass: + return buildGotoStmt(cast(*S)); + + case Stmt::NullStmtClass: + break; + + case Stmt::LabelStmtClass: + return buildLabelStmt(cast(*S)); + + case Stmt::CaseStmtClass: + case Stmt::DefaultStmtClass: + assert(0 && + "Should not get here, currently handled directly from SwitchStmt"); + break; + + case Stmt::BreakStmtClass: + return buildBreakStmt(cast(*S)); + + case Stmt::AttributedStmtClass: + case Stmt::ContinueStmtClass: + case Stmt::SEHLeaveStmtClass: + llvm::errs() << "CIR codegen for '" << S->getStmtClassName() + << "' not implemented\n"; + assert(0 && "not implemented"); + } + + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildLabelStmt(const clang::LabelStmt &S) { + if (buildLabel(S.getDecl()).failed()) + return mlir::failure(); + + // IsEHa: not implemented. + assert(!(getContext().getLangOpts().EHAsynch && S.isSideEntry())); + + return buildStmt(S.getSubStmt(), /* useCurrentScope */ true); +} + +// Add terminating yield on body regions (loops, ...) in case there are +// not other terminators used. +// FIXME: make terminateCaseRegion use this too. +static void terminateBody(mlir::OpBuilder &builder, mlir::Region &r, + mlir::Location loc) { + if (r.empty()) + return; + + SmallVector eraseBlocks; + unsigned numBlocks = r.getBlocks().size(); + for (auto &block : r.getBlocks()) { + // Already cleanup after return operations, which might create + // empty blocks if emitted as last stmt. + if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() && + block.hasNoSuccessors()) + eraseBlocks.push_back(&block); + + if (block.empty() || + !block.back().hasTrait()) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(&block); + builder.create(loc); + } + } + + for (auto *b : eraseBlocks) + b->erase(); +} + +static mlir::Location getIfLocs(CIRGenFunction &CGF, const clang::Stmt *thenS, + const clang::Stmt *elseS) { + // Attempt to be more accurate as possible with IfOp location, generate + // one fused location that has either 2 or 4 total locations, depending + // on else's availability. + SmallVector ifLocs; + mlir::Attribute metadata; + + clang::SourceRange t = thenS->getSourceRange(); + ifLocs.push_back(CGF.getLoc(t.getBegin())); + ifLocs.push_back(CGF.getLoc(t.getEnd())); + if (elseS) { + clang::SourceRange e = elseS->getSourceRange(); + ifLocs.push_back(CGF.getLoc(e.getBegin())); + ifLocs.push_back(CGF.getLoc(e.getEnd())); + } + + return mlir::FusedLoc::get(ifLocs, metadata, CGF.getBuilder().getContext()); +} + +mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { + // The else branch of a consteval if statement is always the only branch + // that can be runtime evaluated. + assert(!S.isConsteval() && "not implemented"); + mlir::LogicalResult res = mlir::success(); + + // C99 6.8.4.1: The first substatement is executed if the expression + // compares unequal to 0. The condition must be a scalar type. + auto ifStmtBuilder = [&]() -> mlir::LogicalResult { + if (S.getInit()) + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + + // If the condition constant folds and can be elided, try to avoid + // emitting the condition and the dead arm of the if/else. + // FIXME: should this be done as part of a constant folder pass instead? + bool CondConstant; + if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, + S.isConstexpr())) { + assert(0 && "not implemented"); + } + + // TODO: PGO and likelihood. + auto ifLoc = getIfLocs(*this, S.getThen(), S.getElse()); + return buildIfOnBoolExpr(S.getCond(), ifLoc, S.getThen(), S.getElse()); + }; + + // TODO: Add a new scoped symbol table. + // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); + // The if scope contains the full source range for IfStmt. + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto scopeLocBegin = fusedLoc.getLocations()[0]; + auto scopeLocEnd = fusedLoc.getLocations()[1]; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, + builder.getInsertionBlock()}; + LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; + res = ifStmtBuilder(); + }); + + return res; +} + +mlir::LogicalResult CIRGenFunction::buildDeclStmt(const DeclStmt &S) { + if (!builder.getInsertionBlock()) { + CGM.emitError("Seems like this is unreachable code, what should we do?"); + return mlir::failure(); + } + + for (const auto *I : S.decls()) { + buildDecl(*I); + } + + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { + assert(!(getContext().getLangOpts().ElideConstructors && + S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) && + "unimplemented"); + assert(!FnRetQualTy->isReferenceType() && "unimplemented"); + auto loc = getLoc(S.getSourceRange()); + + // Emit the result value, even if unused, to evaluate the side effects. + const Expr *RV = S.getRetValue(); + if (RV) { + assert(!isa(RV) && "unimplemented"); + + mlir::Value V = nullptr; + switch (CIRGenFunction::getEvaluationKind(RV->getType())) { + case TEK_Scalar: + V = buildScalarExpr(RV); + builder.create(loc, V, *FnRetAlloca); + break; + case TEK_Complex: + case TEK_Aggregate: + llvm::errs() << "ReturnStmt EvaluationKind not implemented\n"; + return mlir::failure(); + } + + // Otherwise, this return operation has zero operands. + if (!V || (RV && RV->getType()->isVoidType())) { + // FIXME: evaluate for side effects. + } + } else { + // Do nothing (return value is left uninitialized), this is also + // the path when returning from void functions. + } + + // Create a new return block (if not existent) and add a branch to + // it. The actual return instruction is only inserted during current + // scope cleanup handling. + auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); + builder.create(loc, retBlock); + + // Insert the new block to continue codegen after branch to ret block. + builder.createBlock(builder.getBlock()->getParent()); + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { + // FIXME: LLVM codegen inserts emit stop point here for debug info + // sake when the insertion point is available, but doesn't do + // anything special when there isn't. We haven't implemented debug + // info support just yet, look at this again once we have it. + assert(builder.getInsertionBlock() && "not yet implemented"); + + // A goto marks the end of a block, create a new one for codegen after + // buildGotoStmt can resume building in that block. + + // Build a cir.br to the target label. + auto &JD = LabelMap[S.getLabel()]; + if (buildBranchThroughCleanup(JD, S.getLabel(), getLoc(S.getSourceRange())) + .failed()) + return mlir::failure(); + + // Insert the new block to continue codegen after goto. + builder.createBlock(builder.getBlock()->getParent()); + + // What here... + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildLabel(const LabelDecl *D) { + JumpDest &Dest = LabelMap[D]; + + // Create a new block to tag with a label and add a branch from + // the current one to it. If the block is empty just call attach it + // to this label. + mlir::Block *currBlock = builder.getBlock(); + mlir::Block *labelBlock = currBlock; + if (!currBlock->empty()) { + + { + mlir::OpBuilder::InsertionGuard guard(builder); + labelBlock = builder.createBlock(builder.getBlock()->getParent()); + } + + builder.create(getLoc(D->getSourceRange()), labelBlock); + builder.setInsertionPointToEnd(labelBlock); + } + + if (!Dest.isValid()) { + Dest.Block = labelBlock; + currLexScope->SolvedLabels.insert(D); + // FIXME: add a label attribute to block... + } else { + assert(0 && "unimplemented"); + } + + // FIXME: emit debug info for labels, incrementProfileCounter + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildBreakStmt(const clang::BreakStmt &S) { + builder.create( + getLoc(S.getBreakLoc()), + mlir::cir::YieldOpKindAttr::get(builder.getContext(), + mlir::cir::YieldOpKind::Break), + mlir::ValueRange({})); + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildCaseStmt(const CaseStmt &S, + mlir::Type condType, + CaseAttr &caseEntry) { + assert((!S.getRHS() || !S.caseStmtIsGNURange()) && + "case ranges not implemented"); + auto res = mlir::success(); + + const CaseStmt *caseStmt = &S; + SmallVector caseEltValueListAttr; + // Fold cascading cases whenever possible to simplify codegen a bit. + while (true) { + auto intVal = caseStmt->getLHS()->EvaluateKnownConstInt(getContext()); + caseEltValueListAttr.push_back(mlir::IntegerAttr::get(condType, intVal)); + if (isa(caseStmt->getSubStmt())) + caseStmt = dyn_cast_or_null(caseStmt->getSubStmt()); + else + break; + } + + auto caseValueList = builder.getArrayAttr(caseEltValueListAttr); + + auto *ctx = builder.getContext(); + caseEntry = mlir::cir::CaseAttr::get( + ctx, caseValueList, + CaseOpKindAttr::get(ctx, caseEltValueListAttr.size() > 1 + ? mlir::cir::CaseOpKind::Anyof + : mlir::cir::CaseOpKind::Equal)); + + { + mlir::OpBuilder::InsertionGuard guardCase(builder); + res = buildStmt( + caseStmt->getSubStmt(), + /*useCurrentScope=*/!isa(caseStmt->getSubStmt())); + } + + // TODO: likelihood + return res; +} + +mlir::LogicalResult CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, + mlir::Type condType, + CaseAttr &caseEntry) { + auto res = mlir::success(); + auto *ctx = builder.getContext(); + caseEntry = mlir::cir::CaseAttr::get( + ctx, builder.getArrayAttr({}), + CaseOpKindAttr::get(ctx, mlir::cir::CaseOpKind::Default)); + { + mlir::OpBuilder::InsertionGuard guardCase(builder); + res = buildStmt(S.getSubStmt(), + /*useCurrentScope=*/!isa(S.getSubStmt())); + } + + // TODO: likelihood + return res; +} + +mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { + mlir::cir::LoopOp loopOp; + + // TODO: pass in array of attributes. + auto forStmtBuilder = [&]() -> mlir::LogicalResult { + auto forRes = mlir::success(); + // Evaluate the first part before the loop. + if (S.getInit()) + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + loopOp = builder.create( + getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::For, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // TODO: branch weigths, likelyhood, profile counter, etc. + mlir::Value condVal; + if (S.getCond()) { + // If the for statement has a condition scope, + // emit the local variable declaration. + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + // C99 6.8.5p2/p4: The first substatement is executed if the + // expression compares unequal to 0. The condition must be a + // scalar type. + condVal = evaluateExprAsBool(S.getCond()); + } else { + condVal = b.create( + loc, mlir::cir::BoolType::get(b.getContext()), + b.getBoolAttr(true)); + } + b.create(loc, condVal); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // FIXME: in C we need to open a new scope here. Do we also need it + // for C++ in case it's a compound statement? + if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + forRes = mlir::failure(); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (S.getInc()) + if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) + forRes = mlir::failure(); + builder.create(loc); + }); + return forRes; + }; + + auto res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto scopeLocBegin = fusedLoc.getLocations()[0]; + auto scopeLocEnd = fusedLoc.getLocations()[1]; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, + builder.getInsertionBlock()}; + LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; + res = forStmtBuilder(); + }); + + if (res.failed()) + return res; + + terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { + mlir::cir::LoopOp loopOp; + + // TODO: pass in array of attributes. + auto doStmtBuilder = [&]() -> mlir::LogicalResult { + auto forRes = mlir::success(); + + loopOp = builder.create( + getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::DoWhile, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // TODO: branch weigths, likelyhood, profile counter, etc. + // C99 6.8.5p2/p4: The first substatement is executed if the + // expression compares unequal to 0. The condition must be a + // scalar type. + mlir::Value condVal = evaluateExprAsBool(S.getCond()); + b.create(loc, condVal); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + forRes = mlir::failure(); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + builder.create(loc); + }); + return forRes; + }; + + auto res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto scopeLocBegin = fusedLoc.getLocations()[0]; + auto scopeLocEnd = fusedLoc.getLocations()[1]; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, + builder.getInsertionBlock()}; + LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; + res = doStmtBuilder(); + }); + + if (res.failed()) + return res; + + terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { + mlir::cir::LoopOp loopOp; + + // TODO: pass in array of attributes. + auto whileStmtBuilder = [&]() -> mlir::LogicalResult { + auto forRes = mlir::success(); + + loopOp = builder.create( + getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::While, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // TODO: branch weigths, likelyhood, profile counter, etc. + mlir::Value condVal; + // If the for statement has a condition scope, + // emit the local variable declaration. + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + // C99 6.8.5p2/p4: The first substatement is executed if the + // expression compares unequal to 0. The condition must be a + // scalar type. + condVal = evaluateExprAsBool(S.getCond()); + b.create(loc, condVal); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + forRes = mlir::failure(); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + builder.create(loc); + }); + return forRes; + }; + + auto res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto scopeLocBegin = fusedLoc.getLocations()[0]; + auto scopeLocEnd = fusedLoc.getLocations()[1]; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, + builder.getInsertionBlock()}; + LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; + res = whileStmtBuilder(); + }); + + if (res.failed()) + return res; + + terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); + return mlir::success(); +} + +mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { + // TODO: LLVM codegen does some early optimization to fold the condition and + // only emit live cases. CIR should use MLIR to achieve similar things, + // nothing to be done here. + // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))... + + auto res = mlir::success(); + SwitchOp swop; + + auto switchStmtBuilder = [&]() -> mlir::LogicalResult { + if (S.getInit()) + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + if (S.getConditionVariable()) + buildDecl(*S.getConditionVariable()); + + mlir::Value condV = buildScalarExpr(S.getCond()); + + // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts()) + // TODO: if the switch has a condition wrapped by __builtin_unpredictable? + + // FIXME: track switch to handle nested stmts. + swop = builder.create( + getLoc(S.getBeginLoc()), condV, + /*switchBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { + auto *cs = dyn_cast(S.getBody()); + assert(cs && "expected compound stmt"); + SmallVector caseAttrs; + + currLexScope->setAsSwitch(); + mlir::Block *lastCaseBlock = nullptr; + for (auto *c : cs->body()) { + bool caseLike = isa(c); + if (!caseLike) { + // This means it's a random stmt following up a case, just + // emit it as part of previous known case. + assert(lastCaseBlock && "expects pre-existing case block"); + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(lastCaseBlock); + res = buildStmt(c, /*useCurrentScope=*/!isa(c)); + if (res.failed()) + break; + continue; + } + + auto *caseStmt = dyn_cast(c); + CaseAttr caseAttr; + { + mlir::OpBuilder::InsertionGuard guardCase(builder); + + // Update scope information with the current region we are + // emitting code for. This is useful to allow return blocks to be + // automatically and properly placed during cleanup. + mlir::Region *caseRegion = os.addRegion(); + currLexScope->updateCurrentSwitchCaseRegion(); + + lastCaseBlock = builder.createBlock(caseRegion); + if (caseStmt) + res = buildCaseStmt(*caseStmt, condV.getType(), caseAttr); + else { + auto *defaultStmt = dyn_cast(c); + assert(defaultStmt && "expected default stmt"); + res = buildDefaultStmt(*defaultStmt, condV.getType(), caseAttr); + } + + if (res.failed()) + break; + } + caseAttrs.push_back(caseAttr); + } + + os.addAttribute("cases", builder.getArrayAttr(caseAttrs)); + }); + + if (res.failed()) + return res; + return mlir::success(); + }; + + // The switch scope contains the full source range for SwitchStmt. + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto scopeLocBegin = fusedLoc.getLocations()[0]; + auto scopeLocEnd = fusedLoc.getLocations()[1]; + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, + builder.getInsertionBlock()}; + LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; + res = switchStmtBuilder(); + }); + + if (res.failed()) + return res; + + // Any block in a case region without a terminator is considered a + // fallthrough yield. In practice there shouldn't be more than one + // block without a terminator, we patch any block we see though and + // let mlir's SwitchOp verifier enforce rules. + auto terminateCaseRegion = [&](mlir::Region &r, mlir::Location loc) { + if (r.empty()) + return; + + SmallVector eraseBlocks; + unsigned numBlocks = r.getBlocks().size(); + for (auto &block : r.getBlocks()) { + // Already cleanup after return operations, which might create + // empty blocks if emitted as last stmt. + if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() && + block.hasNoSuccessors()) + eraseBlocks.push_back(&block); + + if (block.empty() || + !block.back().hasTrait()) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(&block); + builder.create( + loc, + mlir::cir::YieldOpKindAttr::get( + builder.getContext(), mlir::cir::YieldOpKind::Fallthrough), + mlir::ValueRange({})); + } + } + + for (auto *b : eraseBlocks) + b->erase(); + }; + + // Make sure all case regions are terminated by inserting fallthroughs + // when necessary. + // FIXME: find a better way to get accurante with location here. + for (auto &r : swop.getRegions()) + terminateCaseRegion(r, swop.getLoc()); + return mlir::success(); +} diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index aeac8e003a3f..51988f71ac5a 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -43,9 +43,7 @@ void CIRGenerator::Initialize(ASTContext &astCtx) { void CIRGenerator::verifyModule() { CGM->verifyModule(); } bool CIRGenerator::EmitFunction(const FunctionDecl *FD) { - auto func = CGM->buildFunction(FD); - assert(func && "should emit function"); - return func.getOperation() != nullptr; + llvm_unreachable("NYI"); } mlir::ModuleOp CIRGenerator::getModule() { return CGM->getModule(); } diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index b2c31280d9af..15daaa6d55bb 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -1,23 +1,26 @@ -set(LLVM_LINK_COMPONENTS +set( + LLVM_LINK_COMPONENTS Core Support - ) +) -add_subdirectory(CodeGen) - -include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) -include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) +include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) +include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) -add_clang_library(clangCIR +add_clang_library( + clangCIR CIRGenCall.cpp CIRGenerator.cpp + CIRGenCleanup.cpp CIRGenCXXABI.cpp + CIRGenDecl.cpp CIRGenExpr.cpp CIRGenExprScalar.cpp CIRGenFunction.cpp CIRGenModule.cpp + CIRGenStmt.cpp CIRGenTypes.cpp CIRPasses.cpp CIRRecordLayoutBuilder.cpp @@ -53,4 +56,4 @@ add_clang_library(clangCIR MLIRMemRefDialect MLIRTargetLLVMIRExport MLIRTransforms - ) +) diff --git a/clang/lib/Sema/CIRBasedWarnings.cpp b/clang/lib/Sema/CIRBasedWarnings.cpp index 88ba1013d047..ae7e3e29055e 100644 --- a/clang/lib/Sema/CIRBasedWarnings.cpp +++ b/clang/lib/Sema/CIRBasedWarnings.cpp @@ -114,7 +114,7 @@ void clang::sema::CIRBasedWarnings::IssueWarnings( // Unlike Clang CFG, we share CIR state between each analyzed function, // retrieve or create a new context. - CIRGen->EmitFunction(FD); + // CIRGen->EmitFunction(FD); } void clang::sema::CIRBasedWarnings::PrintStats() const { diff --git a/clang/test/CIR/global-var-simple.cpp b/clang/test/CIR/global-var-simple.cpp new file mode 100644 index 000000000000..ae0056f9f7c1 --- /dev/null +++ b/clang/test/CIR/global-var-simple.cpp @@ -0,0 +1,60 @@ +// Global variables of intergal types +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s +// XFAIL: * + +char c; +// CHECK: cir.global @c : !cir.int + +signed char sc; +// CHECK: cir.global @sc : !cir.int + +unsigned char uc; +// CHECK: cir.global @uc : !cir.int + +short ss; +// CHECK: cir.global @ss : !cir.int + +unsigned short us; +// CHECK: cir.global @us : !cir.int + +int si; +// CHECK: cir.global @si : !cir.int + +unsigned ui; +// CHECK: cir.global @ui : !cir.int + +long sl; +// CHECK: cir.global @sl : !cir.int + +unsigned long ul; +// CHECK: cir.global @ul : !cir.int + +long long sll; +// CHECK: cir.global @sll : !cir.int + +unsigned long long ull; +// CHECK: cir.global @ull : !cir.int + +__int128 s128; +// CHECK: cir.global @s128 : !cir.int + +unsigned __int128 u128; +// CHECK: cir.global @u128 : !cir.int + +wchar_t wc; +// CHECK: cir.global @wc : !cir.int + +char8_t c8; +// CHECK: cir.global @c8 : !cir.int + +char16_t c16; +// CHECK: cir.global @c16 : !cir.int + +char32_t c32; +// CHECK: cir.global @c32 : !cir.int + +_BitInt(20) sb20; +// CHECK: cir.global @sb20 : !cir.int + +unsigned _BitInt(48) ub48; +// CHECK: cir.global @ub48 : !cir.int diff --git a/clang/test/CIR/hello.c b/clang/test/CIR/hello.c new file mode 100644 index 000000000000..9de0bb3194c2 --- /dev/null +++ b/clang/test/CIR/hello.c @@ -0,0 +1,5 @@ +// Smoke test for ClangIR code generation +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +void foo() {} +// CHECK: func.func @foo From 505796a77779de67825ea0177a37bd7f4ef9cfa5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 13 Apr 2022 17:52:04 -0700 Subject: [PATCH 0258/2301] [CIR][CodeGen] Handle different scopes in C vs C++ on ForStmts --- clang/lib/CIR/CIRGenStmt.cpp | 10 ++++++--- clang/test/CIR/CodeGen/loop-scope.cpp | 30 +++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/loop-scope.cpp diff --git a/clang/lib/CIR/CIRGenStmt.cpp b/clang/lib/CIR/CIRGenStmt.cpp index 084031d5780a..e415e9ec41a0 100644 --- a/clang/lib/CIR/CIRGenStmt.cpp +++ b/clang/lib/CIR/CIRGenStmt.cpp @@ -595,9 +595,13 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - // FIXME: in C we need to open a new scope here. Do we also need it - // for C++ in case it's a compound statement? - if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + // https://en.cppreference.com/w/cpp/language/for + // While in C++, the scope of the init-statement and the scope of + // statement are one and the same, in C the scope of statement is + // nested within the scope of init-statement. + bool useCurrentScope = + CGM.getASTContext().getLangOpts().CPlusPlus ? true : false; + if (buildStmt(S.getBody(), useCurrentScope).failed()) forRes = mlir::failure(); }, /*stepBuilder=*/ diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp new file mode 100644 index 000000000000..84cdd6eef1d8 --- /dev/null +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cpp.cir +// RUN: FileCheck --input-file=%t.cpp.cir %s --check-prefix=CPPSCOPE +// RUN: %clang_cc1 -x c -std=c11 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.c.cir +// RUN: FileCheck --input-file=%t.c.cir %s --check-prefix=CSCOPE +// XFAIL: * + +void l0() { + for (int i = 0;;) { + int j = 0; + } +} + +// CPPSCOPE: func @l0() { +// CPPSCOPE-NEXT: cir.scope { +// CPPSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} +// CPPSCOPE-NEXT: %1 = cir.alloca i32, cir.ptr , ["j", cinit] {alignment = 4 : i64} +// CPPSCOPE-NEXT: %2 = cir.cst(0 : i32) : i32 +// CPPSCOPE-NEXT: cir.store %2, %0 : i32, cir.ptr +// CPPSCOPE-NEXT: cir.loop for(cond : { + +// CSCOPE: func @l0() { +// CSCOPE-NEXT: cir.scope { +// CSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} +// CSCOPE-NEXT: %1 = cir.cst(0 : i32) : i32 +// CSCOPE-NEXT: cir.store %1, %0 : i32, cir.ptr +// CSCOPE-NEXT: cir.loop for(cond : { + +// CSCOPE: }) { +// CSCOPE-NEXT: cir.scope { +// CSCOPE-NEXT: %2 = cir.alloca i32, cir.ptr , ["j", cinit] {alignment = 4 : i64} From 904c4a24337fa7edb1b449a16534a257d8f56fbb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Apr 2022 11:19:50 -0700 Subject: [PATCH 0259/2301] [CIR] Add verifier for 'cir.yield break' Make sure this operation cannot be used without a matching loop or switch. --- clang/test/CIR/IR/invalid.cir | 10 ++++++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 26 ++++++++++++++++++-------- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 09ca4e4a6adf..7e49fd82c875 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -50,6 +50,16 @@ func.func @yieldfallthrough() { // ----- +func.func @yieldbreak() { + %0 = cir.cst(true) : !cir.bool + cir.if %0 { + cir.yield break // expected-error {{shall be dominated by 'cir.loop' or 'cir.switch'}} + } + cir.return +} + +// ----- + func.func @s0() { %1 = cir.cst(2 : i32) : i32 cir.switch (%1 : i32) [ diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index aac34bb63378..6a0adf40745c 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -413,17 +413,27 @@ LogicalResult ScopeOp::verify() { return success(); } //===----------------------------------------------------------------------===// mlir::LogicalResult YieldOp::verify() { - if (llvm::isa(getOperation()->getParentOp())) - return mlir::success(); + auto isDominatedByLoopOrSwitch = [](Operation *parentOp) { + while (!llvm::isa(parentOp)) { + if (llvm::isa(parentOp)) + return true; + parentOp = parentOp->getParentOp(); + } + return false; + }; - // FIXME: check for cir.yield continue - if (llvm::isa(getOperation()->getParentOp())) + if (isBreak()) { + if (!isDominatedByLoopOrSwitch(getOperation()->getParentOp())) + return emitOpError() + << "shall be dominated by 'cir.loop' or 'cir.switch'"; return mlir::success(); + } - assert((llvm::isa(getOperation()->getParentOp())) && - "unknown parent op"); - if (isFallthrough()) - return emitOpError() << "fallthrough only expected within 'cir.switch'"; + if (isFallthrough()) { + if (!llvm::isa(getOperation()->getParentOp())) + return emitOpError() << "fallthrough only expected within 'cir.switch'"; + return mlir::success(); + } return mlir::success(); } From 521c84a8f376e3f842e1e327f04532164a8e09d8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Apr 2022 11:33:04 -0700 Subject: [PATCH 0260/2301] [CIR] Add 'cir.yield continue' to later handle continue stmts --- clang/test/CIR/IR/invalid.cir | 10 ++++++++++ clang/test/CIR/IR/loop.cir | 16 ++++++++++++++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 11 ++++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 18 +++++++++++++++++- 4 files changed, 53 insertions(+), 2 deletions(-) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 7e49fd82c875..77758e7f4bac 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -60,6 +60,16 @@ func.func @yieldbreak() { // ----- +func.func @yieldcontinue() { + %0 = cir.cst(true) : !cir.bool + cir.if %0 { + cir.yield continue // expected-error {{shall be dominated by 'cir.loop'}} + } + cir.return +} + +// ----- + func.func @s0() { %1 = cir.cst(2 : i32) : i32 cir.switch (%1 : i32) [ diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index a906731aabb3..dbabcf17a728 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -25,6 +25,10 @@ func.func @l0() { %5 = cir.cst(1 : i32) : i32 %6 = cir.binop(add, %4, %5) : i32 cir.store %6, %0 : i32, cir.ptr + %7 = cir.cst(true) : !cir.bool + cir.if %7 { + cir.yield break + } cir.yield } } @@ -44,6 +48,10 @@ func.func @l0() { %5 = cir.cst(1 : i32) : i32 %6 = cir.binop(add, %4, %5) : i32 cir.store %6, %0 : i32, cir.ptr + %7 = cir.cst(true) : !cir.bool + cir.if %7 { + cir.yield continue + } cir.yield } } @@ -87,6 +95,10 @@ func.func @l0() { // CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: %7 = cir.cst(true) : !cir.bool +// CHECK-NEXT: cir.if %7 { +// CHECK-NEXT: cir.yield break +// CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } @@ -102,6 +114,10 @@ func.func @l0() { // CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: %7 = cir.cst(true) : !cir.bool +// CHECK-NEXT: cir.if %7 { +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index e3f23bdf766a..d1b4d7bcf2a0 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -354,11 +354,12 @@ def IfOp : CIR_Op<"if", def YieldOpKind_BK : I32EnumAttrCase<"Break", 1, "break">; def YieldOpKind_FT : I32EnumAttrCase<"Fallthrough", 2, "fallthrough">; def YieldOpKind_LC : I32EnumAttrCase<"Loopcondition", 3, "loopcondition">; +def YieldOpKind_CE : I32EnumAttrCase<"Continue", 4, "continue">; def YieldOpKind : I32EnumAttr< "YieldOpKind", "yield kind", - [YieldOpKind_BK, YieldOpKind_FT, YieldOpKind_LC]> { + [YieldOpKind_BK, YieldOpKind_FT, YieldOpKind_LC, YieldOpKind_CE]> { let cppNamespace = "::mlir::cir"; } @@ -404,6 +405,11 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, %4 = ... : cir.bool cir.yield loopcondition %4 } ... ) {} + + cir.loop (cond : {...}, step : {...}) { + ... + cir.yield continue + } ``` }]; @@ -437,6 +443,9 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, bool isLoopCondition() { return !isPlain() && *getKind() == YieldOpKind::Loopcondition; } + bool isContinue() { + return !isPlain() && *getKind() == YieldOpKind::Continue; + } }]; let hasVerifier = 1; diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 6a0adf40745c..fc94c724a55c 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -422,6 +422,15 @@ mlir::LogicalResult YieldOp::verify() { return false; }; + auto isDominatedByLoop = [](Operation *parentOp) { + while (!llvm::isa(parentOp)) { + if (llvm::isa(parentOp)) + return true; + parentOp = parentOp->getParentOp(); + } + return false; + }; + if (isBreak()) { if (!isDominatedByLoopOrSwitch(getOperation()->getParentOp())) return emitOpError() @@ -429,9 +438,16 @@ mlir::LogicalResult YieldOp::verify() { return mlir::success(); } + if (isContinue()) { + if (!isDominatedByLoop(getOperation()->getParentOp())) + return emitOpError() << "shall be dominated by 'cir.loop'"; + return mlir::success(); + } + if (isFallthrough()) { if (!llvm::isa(getOperation()->getParentOp())) - return emitOpError() << "fallthrough only expected within 'cir.switch'"; + return emitOpError() + << "fallthrough only expected within 'cir.switch'"; return mlir::success(); } From 09ff4179306a099270a30edd6bf82938f15534b5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Apr 2022 11:43:15 -0700 Subject: [PATCH 0261/2301] [CIR][CodeGen] Gen code for continue stmt using cir.yield --- clang/lib/CIR/CIRGenFunction.h | 1 + clang/lib/CIR/CIRGenStmt.cpp | 13 ++++++++++++- clang/test/CIR/CodeGen/loop.cpp | 30 ++++++++++++++++++++++++++++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index f7ebc4f61f3f..f2b9d1050cb1 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -432,6 +432,7 @@ class CIRGenFunction { mlir::LogicalResult buildLabelStmt(const clang::LabelStmt &S); mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); + mlir::LogicalResult buildContinueStmt(const clang::ContinueStmt &S); /// Emit code to compute a designator that specifies the location /// of the expression. diff --git a/clang/lib/CIR/CIRGenStmt.cpp b/clang/lib/CIR/CIRGenStmt.cpp index e415e9ec41a0..d17150073d6d 100644 --- a/clang/lib/CIR/CIRGenStmt.cpp +++ b/clang/lib/CIR/CIRGenStmt.cpp @@ -248,6 +248,8 @@ mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, return buildReturnStmt(cast(*S)); case Stmt::GotoStmtClass: return buildGotoStmt(cast(*S)); + case Stmt::ContinueStmtClass: + return buildContinueStmt(cast(*S)); case Stmt::NullStmtClass: break; @@ -265,7 +267,6 @@ mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, return buildBreakStmt(cast(*S)); case Stmt::AttributedStmtClass: - case Stmt::ContinueStmtClass: case Stmt::SEHLeaveStmtClass: llvm::errs() << "CIR codegen for '" << S->getStmtClassName() << "' not implemented\n"; @@ -494,6 +495,16 @@ mlir::LogicalResult CIRGenFunction::buildLabel(const LabelDecl *D) { return mlir::success(); } +mlir::LogicalResult +CIRGenFunction::buildContinueStmt(const clang::ContinueStmt &S) { + builder.create( + getLoc(S.getContinueLoc()), + mlir::cir::YieldOpKindAttr::get(builder.getContext(), + mlir::cir::YieldOpKind::Continue), + mlir::ValueRange({})); + return mlir::success(); +} + mlir::LogicalResult CIRGenFunction::buildBreakStmt(const clang::BreakStmt &S) { builder.create( getLoc(S.getBreakLoc()), diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 0c317f4a9887..d91cb1c9aa24 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -159,3 +159,33 @@ void l3(bool cond) { // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } + +void l4() { + int i = 0, y = 100; + while (true) { + i = i + 1; + if (i < 10) + continue; + y = y - 20; + } +} + +// CHECK: func @l4 +// CHECK: cir.loop while(cond : { +// CHECK-NEXT: %4 = cir.cst(true) : !cir.bool +// CHECK-NEXT: cir.yield loopcondition %4 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 +// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %10 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: %11 = cir.cst(10 : i32) : i32 +// CHECK-NEXT: %12 = cir.cmp(lt, %10, %11) : i32, !cir.bool +// CHECK-NEXT: cir.if %12 { +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: } +// CHECK-NEXT: } \ No newline at end of file From 4a7d6f1deb03bf442032639a1fe814d5b80d2759 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Apr 2022 12:13:43 -0700 Subject: [PATCH 0262/2301] [CIR][MergeCleanups] Add support for cir.loop --- clang/test/CIR/Transforms/merge-cleanups.cir | 31 +++++++++++++++++++ .../Dialect/CIR/Transforms/MergeCleanups.cpp | 14 +++++++-- 2 files changed, 42 insertions(+), 3 deletions(-) diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 49fd61296bb6..e97c9c035372 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -1,5 +1,6 @@ // RUN: cir-tool %s -cir-merge-cleanups -o %t.out.cir // RUN: FileCheck --input-file=%t.out.cir %s +// XFAIL: * module { func.func @sw1(%arg0: i32, %arg1: i32) { @@ -57,6 +58,22 @@ module { } cir.return } + + func.func @l7() { + cir.scope { + cir.loop while(cond : { + %0 = cir.cst(true) : !cir.bool + cir.yield loopcondition %0 : !cir.bool + }, step : { + cir.yield + }) { + cir.br ^bb1 + ^bb1: + cir.return + } + } + cir.return + } } // CHECK: cir.switch (%4 : i32) [ @@ -95,3 +112,17 @@ module { // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: } // CHECK-NEXT: ] + +// CHECK: func @l7 +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.loop while(cond : { +// CHECK-NEXT: %0 = cir.cst(true) : !cir.bool +// CHECK-NEXT: cir.yield loopcondition %0 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp index 82824aa5d7db..99c9deb6171e 100644 --- a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp @@ -91,7 +91,7 @@ struct SimplifyRetYieldBlocks : public mlir::OpRewritePattern { }; // Specialize the template to account for the different build signatures for -// IfOp, ScopeOp, FuncOp and SwitchOp. +// IfOp, ScopeOp, FuncOp, SwitchOp, LoopOp. template <> mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp(PatternRewriter &rewriter, @@ -135,11 +135,18 @@ mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( return regionChanged ? success() : failure(); } +template <> +mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( + PatternRewriter &rewriter, cir::LoopOp loopOp) const { + return checkAndRewriteRegion(loopOp.getBody(), rewriter); +} + void getMergeCleanupsPatterns(RewritePatternSet &results, MLIRContext *context) { results.add, SimplifyRetYieldBlocks, SimplifyRetYieldBlocks, - SimplifyRetYieldBlocks>(context); + SimplifyRetYieldBlocks, + SimplifyRetYieldBlocks>(context); } struct MergeCleanupsPass : public MergeCleanupsBase { @@ -163,7 +170,8 @@ void MergeCleanupsPass::runOnOperation() { SmallVector opsToSimplify; op->walk([&](Operation *op) { - if (isa(op)) + if (isa( + op)) opsToSimplify.push_back(op); }); From fbacfca183b593ece6ba3ac7125b401e8987f287 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Apr 2022 12:17:04 -0700 Subject: [PATCH 0263/2301] [CIR] Add integration tests for new MergeCleanup loop support --- clang/test/CIR/CodeGen/loop.cpp | 42 ++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index d91cb1c9aa24..8569aee5303a 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -188,4 +188,44 @@ void l4() { // CHECK-NEXT: cir.if %12 { // CHECK-NEXT: cir.yield continue // CHECK-NEXT: } -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: } + +void l5() { + do { + } while (0); +} + +// CHECK: func @l5() { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.loop dowhile(cond : { +// CHECK-NEXT: %0 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %1 = cir.cast(int_to_bool, %0 : i32), !cir.bool +// CHECK-NEXT: cir.yield loopcondition %1 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +void l6() { + while (true) { + return; + } +} + +// CHECK: func @l6() { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.loop while(cond : { +// CHECK-NEXT: %0 = cir.cst(true) : !cir.bool +// CHECK-NEXT: cir.yield loopcondition %0 : !cir.bool +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } \ No newline at end of file From c124a4068639f956edbda680c429ca21d8bf24ed Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Apr 2022 12:25:21 -0700 Subject: [PATCH 0264/2301] [CIR][LifetimeCheck] Add skeleton for cir.loop --- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 69e1f211f6c1..8c9ef0562433 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -32,6 +32,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkIf(IfOp op); void checkSwitch(SwitchOp op); + void checkLoop(LoopOp op); void checkAlloca(AllocaOp op); void checkStore(StoreOp op); void checkLoad(LoadOp op); @@ -340,6 +341,23 @@ void LifetimeCheckPass::joinPmaps(SmallVectorImpl &pmaps) { } } +void LifetimeCheckPass::checkLoop(LoopOp loopOp) { + // 2.4.9. Loops + // A loop is treated as if it were the first two loop iterations unrolled + // using an if. For example: + // + // for (/*init*/; /*cond*/; /*incr*/) + // { /*body*/ } + // + // is treated as: + // + // if (/*init*/; /*cond*/) + // { /*body*/; /*incr*/ } + // if (/*cond*/) + // { /*body*/ } + // +} + void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { // 2.4.7. A switch(cond) is treated as if it were an equivalent series of // non-nested if statements with single evaluation of cond; for example: @@ -582,6 +600,8 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return checkIf(ifOp); if (auto switchOp = dyn_cast(op)) return checkSwitch(switchOp); + if (auto loopOp = dyn_cast(op)) + return checkLoop(loopOp); if (auto allocaOp = dyn_cast(op)) return checkAlloca(allocaOp); if (auto storeOp = dyn_cast(op)) From 51a78ac1ab0adac733f694e68cc6d6c0d616023c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Apr 2022 14:16:27 -0700 Subject: [PATCH 0265/2301] [CIR][LifetimeCheck] Implement cir.loop support and add tests --- clang/test/CIR/Transforms/lifetime-loop.cpp | 40 ++++++++++ .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 73 +++++++++++++++++++ 2 files changed, 113 insertions(+) create mode 100644 clang/test/CIR/Transforms/lifetime-loop.cpp diff --git a/clang/test/CIR/Transforms/lifetime-loop.cpp b/clang/test/CIR/Transforms/lifetime-loop.cpp new file mode 100644 index 000000000000..ec649c860e84 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-loop.cpp @@ -0,0 +1,40 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null remarks=pset" -verify-diagnostics -o %t-out.cir +// XFAIL: * + +void loop_basic_for() { + int *p = nullptr; // expected-note {{invalidated here}} + for (int i = 0; i < 10; i = i + 1) { + int x = 0; + p = &x; + *p = 42; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { nullptr, invalid }}} +} + +void loop_basic_while() { + int *p = nullptr; // expected-note {{invalidated here}} + int i = 0; + while (i < 10) { + int x = 0; + p = &x; + *p = 42; + i = i + 1; + } // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { nullptr, invalid }}} +} + +void loop_basic_dowhile() { + int *p = nullptr; // expected-note {{invalidated here}} + int i = 0; + do { + int x = 0; + p = &x; + *p = 42; + i = i + 1; + } while (i < 10); // expected-note {{pointee 'x' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { nullptr, invalid }}} +} diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 8c9ef0562433..5d431ceb45f2 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -343,6 +343,7 @@ void LifetimeCheckPass::joinPmaps(SmallVectorImpl &pmaps) { void LifetimeCheckPass::checkLoop(LoopOp loopOp) { // 2.4.9. Loops + // // A loop is treated as if it were the first two loop iterations unrolled // using an if. For example: // @@ -356,6 +357,78 @@ void LifetimeCheckPass::checkLoop(LoopOp loopOp) { // if (/*cond*/) // { /*body*/ } // + // See checkIf for additional explanations. + SmallVector pmapOps; + SmallVector regionsToCheck; + + auto setupLoopRegionsToCheck = [&](bool isSubsequentTaken = false) { + regionsToCheck.clear(); + switch (loopOp.getKind()) { + case LoopOpKind::For: { + regionsToCheck.push_back(&loopOp.getCond()); + regionsToCheck.push_back(&loopOp.getBody()); + if (!isSubsequentTaken) + regionsToCheck.push_back(&loopOp.getStep()); + break; + } + case LoopOpKind::While: { + regionsToCheck.push_back(&loopOp.getCond()); + regionsToCheck.push_back(&loopOp.getBody()); + break; + } + case LoopOpKind::DoWhile: { + // Note this is the reverse order from While above. + regionsToCheck.push_back(&loopOp.getBody()); + regionsToCheck.push_back(&loopOp.getCond()); + break; + } + } + }; + + // From 2.4.9 "Note": + // + // There are only three paths to analyze: + // (1) never taken (the loop body was not entered) + pmapOps.push_back(getPmap()); + + // (2) first taken (the first pass through the loop body, which begins + // with the loop entry pmap) + PMapType loopExitPmap; + { + // Intentional copy from loop entry map + loopExitPmap = getPmap(); + PmapGuard pmapGuard{*this, &loopExitPmap}; + setupLoopRegionsToCheck(); + for (auto *r : regionsToCheck) + checkRegion(*r); + pmapOps.push_back(loopExitPmap); + } + + // (3) and subsequent taken (second or later iteration, which begins with the + // loop body exit pmap and so takes into account any invalidations performed + // in the loop body on any path that could affect the next loop). + // + // This ensures that a subsequent loop iteration does not use a Pointer that + // was invalidated during a previous loop iteration. + // + // Because this analysis gives the same answer for each block of code (always + // converges), all loop iterations after the first get the same answer and + // so we only need to consider the second iteration, and so the analysis + // algorithm remains linear, single-pass. As an optimization, if the loop + // entry pmap is the same as the first loop body exit pmap, there is no need + // to perform the analysis on the second loop iteration; the answer will be + // the same. + if (getPmap() != loopExitPmap) { + // Intentional copy from first taken loop exit pmap + PMapType otherTakenPmap = loopExitPmap; + PmapGuard pmapGuard{*this, &otherTakenPmap}; + setupLoopRegionsToCheck(/*isSubsequentTaken=*/true); + for (auto *r : regionsToCheck) + checkRegion(*r); + pmapOps.push_back(otherTakenPmap); + } + + joinPmaps(pmapOps); } void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { From 12eb7767942ee1aa7a6a24d525eeb3c50c4f3386 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Apr 2022 17:52:55 -0700 Subject: [PATCH 0266/2301] [CIR] Add 'cir.array' type --- clang/test/CIR/IR/types.cir | 11 +++++++ mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td | 22 ++++++++++++-- mlir/lib/Dialect/CIR/IR/CIRTypes.cpp | 30 +++++++++++++++++++- 3 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/IR/types.cir diff --git a/clang/test/CIR/IR/types.cir b/clang/test/CIR/IR/types.cir new file mode 100644 index 000000000000..5d7ebae40c60 --- /dev/null +++ b/clang/test/CIR/IR/types.cir @@ -0,0 +1,11 @@ +// RUN: cir-tool %s | cir-tool | FileCheck %s + +module { + func.func @arrays() { + %0 = cir.alloca !cir.array, cir.ptr>, ["x", paraminit] + cir.return + } +} + +// CHECK: func @arrays() { +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td index 8d28419a65fb..f610e9c2ebd4 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td @@ -92,10 +92,28 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { let hasCustomAssemblyFormat = 1; } +//===----------------------------------------------------------------------===// +// ArrayType +//===----------------------------------------------------------------------===// + +def CIR_ArrayType : + CIR_Type<"Array", "array"> { + + let summary = "CIR array type"; + let description = [{ + `CIR.array` represents C/C++ constant arrays. + }]; + + let parameters = (ins "mlir::Type":$eltType, "uint64_t":$size); + + let hasCustomAssemblyFormat = 1; +} + //===----------------------------------------------------------------------===// // One type to bind them all //===----------------------------------------------------------------------===// -def CIR_AnyCIRType : AnyTypeOf<[CIR_PointerType, CIR_BoolType, CIR_StructType]>; +def CIR_AnyCIRType : AnyTypeOf<[CIR_PointerType, CIR_BoolType, CIR_StructType, + CIR_ArrayType]>; -#endif // MLIR_CIR_DIALECT_CIR_TYPES \ No newline at end of file +#endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp index 27a09fa75626..b0daa75752d7 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp @@ -90,10 +90,38 @@ void StructType::print(mlir::AsmPrinter &printer) const { printer << '>'; } +Type ArrayType::parse(mlir::AsmParser &parser) { + if (parser.parseLess()) + return Type(); + Type eltType; + if (parser.parseType(eltType)) + return Type(); + if (parser.parseKeyword("x")) + return Type(); + + uint64_t val = 0; + if (parser.parseInteger(val).failed()) + return Type(); + + if (parser.parseGreater()) + return Type(); + return get(parser.getContext(), eltType, val); +} + +void ArrayType::print(mlir::AsmPrinter &printer) const { + printer << '<'; + printer.printType(getEltType()); + printer << " x " << getSize(); + printer << '>'; +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// void CIRDialect::registerTypes() { - addTypes(); + addTypes< +#define GET_TYPEDEF_LIST +#include "mlir/Dialect/CIR/IR/CIROpsTypes.cpp.inc" + >(); } From 6a80839090515fee45bb62295f5cfd3252db067c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 15 Apr 2022 12:24:52 -0700 Subject: [PATCH 0267/2301] [CIR][CodeGen] Implement ConstantArray codegen --- clang/lib/CIR/CIRGenTypes.cpp | 18 +++++++++++++++++- clang/test/CIR/CodeGen/array.cpp | 9 +++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/array.cpp diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 065109f1df59..70c614204c09 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -434,7 +434,23 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { break; } case Type::ConstantArray: { - assert(0 && "not implemented"); + const ConstantArrayType *A = cast(Ty); + auto EltTy = convertTypeForMem(A->getElementType()); + + auto isSized = [&](mlir::Type ty) { + if (ty.isIntOrFloat() || + ty.isa()) + return true; + assert(0 && "not implemented"); + return false; + }; + + // FIXME: In LLVM, "lower arrays of undefined struct type to arrays of + // i8 just to have a concrete type". Not sure this makes sense in CIR yet. + assert(isSized(EltTy) && "not implemented"); + ResultType = ::mlir::cir::ArrayType::get(Builder.getContext(), EltTy, + A->getSize().getZExtValue()); break; } case Type::ExtVector: diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp new file mode 100644 index 000000000000..326ea4edd1b9 --- /dev/null +++ b/clang/test/CIR/CodeGen/array.cpp @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void a0() { + int a[10]; +} + +// CHECK: func @a0() { +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} \ No newline at end of file From 2f12447bad81bb6784c2cc7edb6b2bfbdc47f204 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 18 Apr 2022 16:52:30 -0700 Subject: [PATCH 0268/2301] [CIR] Add cir.array testcase --- clang/test/CIR/IR/array.cir | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 clang/test/CIR/IR/array.cir diff --git a/clang/test/CIR/IR/array.cir b/clang/test/CIR/IR/array.cir new file mode 100644 index 000000000000..5d7ebae40c60 --- /dev/null +++ b/clang/test/CIR/IR/array.cir @@ -0,0 +1,11 @@ +// RUN: cir-tool %s | cir-tool | FileCheck %s + +module { + func.func @arrays() { + %0 = cir.alloca !cir.array, cir.ptr>, ["x", paraminit] + cir.return + } +} + +// CHECK: func @arrays() { +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] From 0a4726918a0debf68ba7b07fd93b0fcee635069f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 18 Apr 2022 16:54:55 -0700 Subject: [PATCH 0269/2301] [CIR] Introduce new cast kind: array_to_ptrdecay --- clang/test/CIR/IR/cast.cir | 16 ++++++++++++++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 8 ++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/IR/cast.cir diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir new file mode 100644 index 000000000000..8d6736c82afe --- /dev/null +++ b/clang/test/CIR/IR/cast.cir @@ -0,0 +1,16 @@ +// RUN: cir-tool %s | cir-tool | FileCheck %s + +module { + func.func @yolo(%arg0 : i32) { + %0 = cir.alloca !cir.array, cir.ptr>, ["x", paraminit] + %a = cir.cast (int_to_bool, %arg0 : i32), !cir.bool + + %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr + %4 = cir.cst(0 : i32) : i32 + cir.return + } +} + +// CHECK: func @yolo(%arg0: i32) +// CHECK: %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool +// CHECK: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index d1b4d7bcf2a0..0957fe55e6c2 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -39,11 +39,12 @@ class CIR_Op traits = []> : // The enumaration value isn't in sync with clang. def CK_IntegralToBoolean : I32EnumAttrCase<"int_to_bool", 1>; +def CK_ArrayToPointerDecay : I32EnumAttrCase<"array_to_ptrdecay", 2>; def CastKind : I32EnumAttr< "CastKind", "cast kind", - [CK_IntegralToBoolean]> { + [CK_IntegralToBoolean, CK_ArrayToPointerDecay]> { let cppNamespace = "::mlir::cir"; } @@ -57,7 +58,10 @@ def CastOp : CIR_Op<"cast", [Pure]> { for instance is modeled as a load. ```mlir - %4 = cir.cast (int_to_bool, %3 : i32), i1 + %4 = cir.cast (int_to_bool, %3 : i32), !cir.bool + ... + %x = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr + ``` }]; From c5d2f12f69aa5b7761631a85e5f1ea443f42feb2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 3 Aug 2022 18:26:56 -0700 Subject: [PATCH 0270/2301] [DO NOT LAND] XFAIL all the tests broken by verifiers --- clang/test/CIR/CodeGen/loop.cpp | 2 +- clang/test/CIR/IR/loop.cir | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 8569aee5303a..a9d43fbad553 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -228,4 +228,4 @@ void l6() { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: } diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index dbabcf17a728..b1177cc22a04 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -134,4 +134,4 @@ func.func @l0() { // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: cir.yield -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: } From cf06e232d3533765a5b84e3a74e08531a66238e2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 18 Apr 2022 16:58:41 -0700 Subject: [PATCH 0271/2301] [CIR] Add 'cir.ptr_stride' operation This is used to compute ptr offsets out of a base pointer. Right now only used for array subscripts. In the future we might change this to something else, as we move forward and find more opportunities and other things to be represented. --- clang/test/CIR/IR/ptr_stride.cir | 21 +++++++++++++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 30 +++++++++++++++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 12 +++++++++ 3 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/IR/ptr_stride.cir diff --git a/clang/test/CIR/IR/ptr_stride.cir b/clang/test/CIR/IR/ptr_stride.cir new file mode 100644 index 000000000000..4040f4067215 --- /dev/null +++ b/clang/test/CIR/IR/ptr_stride.cir @@ -0,0 +1,21 @@ +// RUN: cir-tool %s | cir-tool | FileCheck %s + +module { + func.func @arraysubscript(%arg0: i32) { + %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] + %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool + %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr + %3 = cir.cst(0 : i32) : i32 + %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr + cir.return + } +} + +// CHECK: func @arraysubscript(%arg0: i32) { +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] +// CHECK-NEXT: %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool +// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 0957fe55e6c2..9170f66b727b 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -71,7 +71,35 @@ def CastOp : CIR_Op<"cast", [Pure]> { let assemblyFormat = "`(` $kind `,` $src `:` type($src) `)` `,` type($res) attr-dict"; // The input and output types should match the cast kind. - //let verifier = [{ return ::verify(*this); }]; + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// PtrStrideOp +//===----------------------------------------------------------------------===// + +def PtrStrideOp : CIR_Op<"ptr_stride", [Pure]> { + let summary = "pointer access with stride"; + let description = [{ + Given a base pointer as operand, provides a new pointer after applying + a stride. Used for array subscripts, vectors, etc. + + ```mlir + %3 = cir.cst(0 : i32) : i32 + %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr + ``` + }]; + + let arguments = (ins AnyType:$base, AnyInteger:$stride); + let results = (outs AnyType:$result); + + let assemblyFormat = [{ + `(` $base `:` type($base) `,` $stride `:` type($stride) `)` + `,` type($result) attr-dict + }]; + + // The input and output types should match the cast kind. + let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index fc94c724a55c..d6214368095a 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -110,6 +110,18 @@ static void printConstantValue(OpAsmPrinter &p, cir::ConstantOp op, OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } +//===----------------------------------------------------------------------===// +// CastOp +//===----------------------------------------------------------------------===// + +LogicalResult CastOp::verify() { return success(); } + +//===----------------------------------------------------------------------===// +// PtrStrideOp +//===----------------------------------------------------------------------===// + +LogicalResult PtrStrideOp::verify() { return success(); } + //===----------------------------------------------------------------------===// // ReturnOp //===----------------------------------------------------------------------===// From a58b7375aef79b86d77ae38d372982724f1bede6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 18 Apr 2022 17:01:35 -0700 Subject: [PATCH 0272/2301] [CIR][CodeGen] Add support for array subscripts This uses recently introduced 'cir.cast' and 'cir.ptr_stride' to build the final pointer used to load/store to/from locations in an array. --- clang/lib/CIR/CIRGenExpr.cpp | 124 ++++++++++++++++++++++++++++- clang/lib/CIR/CIRGenExprScalar.cpp | 12 +++ clang/lib/CIR/CIRGenFunction.h | 7 +- clang/test/CIR/CodeGen/array.cpp | 33 +++++++- 4 files changed, 170 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 0beef73bc899..aae5c89b885e 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -98,6 +98,7 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, } // Update the alloca with more info on initialization. + assert(Addr.getPointer() && "expected pointer to exist"); auto SrcAlloca = dyn_cast_or_null(Addr.getPointer().getDefiningOp()); if (InitDecl) { @@ -408,8 +409,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, return Call; } -/// EmitIgnoredExpr - Emit code to compute the specified expression, -/// ignoring the result. +/// Emit code to compute the specified expression, ignoring the result. void CIRGenFunction::buildIgnoredExpr(const Expr *E) { if (E->isPRValue()) return (void)buildAnyExpr(E); @@ -418,6 +418,124 @@ void CIRGenFunction::buildIgnoredExpr(const Expr *E) { buildLValue(E); } +/// If the specified expr is a simple decay from an array to pointer, +/// return the array subexpression. +/// FIXME: this could be abstracted into a commeon AST helper. +static const Expr *isSimpleArrayDecayOperand(const Expr *E) { + // If this isn't just an array->pointer decay, bail out. + const auto *CE = dyn_cast(E); + if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) + return nullptr; + + // If this is a decay from variable width array, bail out. + const Expr *SubExpr = CE->getSubExpr(); + if (SubExpr->getType()->isVariableArrayType()) + return nullptr; + + return SubExpr; +} + +LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, + bool Accessed) { + // The index must always be an integer, which is not an aggregate. Emit it + // in lexical order (this complexity is, sadly, required by C++17). + // llvm::Value *IdxPre = + // (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr; + assert(E->getLHS() != E->getIdx() && "not implemented"); + bool SignedIndices = false; + auto EmitIdxAfterBase = [&](bool Promote) -> mlir::Value { + mlir::Value Idx; + if (E->getLHS() != E->getIdx()) { + assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS"); + Idx = buildScalarExpr(E->getIdx()); + } + + QualType IdxTy = E->getIdx()->getType(); + bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); + SignedIndices |= IdxSigned; + + assert(!SanOpts.has(SanitizerKind::ArrayBounds) && "not implemented"); + + // TODO: Extend or truncate the index type to 32 or 64-bits. + // if (Promote && !Idx.getType().isa<::mlir::cir::PointerType>()) { + // Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); + // } + + return Idx; + }; + + // If the base is a vector type, then we are forming a vector element + // with this subscript. + if (E->getBase()->getType()->isVectorType() && + !isa(E->getBase())) { + assert(0 && "not implemented"); + } + + // All the other cases basically behave like simple offsetting. + + // Handle the extvector case we ignored above. + if (isa(E->getBase())) { + assert(0 && "not implemented"); + } + + // TODO: TBAAAccessInfo + LValueBaseInfo EltBaseInfo; + Address Addr = Address::invalid(); + if (const VariableArrayType *vla = + getContext().getAsVariableArrayType(E->getType())) { + assert(0 && "not implemented"); + } else if (const ObjCObjectType *OIT = + E->getType()->getAs()) { + assert(0 && "not implemented"); + } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { + // If this is A[i] where A is an array, the frontend will have decayed + // the base to be a ArrayToPointerDecay implicit cast. While correct, it is + // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then + // a "gep x, i" here. Emit one "gep A, 0, i". + assert(Array->getType()->isArrayType() && + "Array to pointer decay must have array source type!"); + LValue ArrayLV; + // For simple multidimensional array indexing, set the 'accessed' flag + // for better bounds-checking of the base expression. + // if (const auto *ASE = dyn_cast(Array)) + // ArrayLV = buildArraySubscriptExpr(ASE, /*Accessed*/ true); + assert(!llvm::isa(Array) && + "multidimensional array indexing not implemented"); + + ArrayLV = buildLValue(Array); + auto arrayPtrTy = + ArrayLV.getPointer().getType().dyn_cast<::mlir::cir::PointerType>(); + assert(arrayPtrTy && "expected pointer type"); + auto arrayTy = arrayPtrTy.getPointee().dyn_cast<::mlir::cir::ArrayType>(); + assert(arrayTy && "expected array type"); + + auto flatPtrTy = + mlir::cir::PointerType::get(builder.getContext(), arrayTy.getEltType()); + auto loc = getLoc(Array->getBeginLoc()); + auto basePtr = builder.create( + loc, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, + ArrayLV.getPointer()); + + loc = getLoc(Array->getEndLoc()); + auto stride = builder.create( + loc, flatPtrTy, basePtr, EmitIdxAfterBase(/*Promote=*/true)); + // Propagate the alignment from the array itself to the result. + Addr = Address(stride.getResult(), ArrayLV.getAlignment()); + EltBaseInfo = ArrayLV.getBaseInfo(); + // TODO: EltTBAAInfo + } else { + // The base must be a pointer; emit it with an estimate of its alignment. + assert(0 && "not implemented"); + } + + LValue LV = LValue::makeAddr(Addr, E->getType(), EltBaseInfo); + + if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) { + assert(0 && "not implemented"); + } + return LV; +} + /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. @@ -429,6 +547,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { << E->getStmtClassName() << "'"; assert(0 && "not implemented"); } + case Expr::ArraySubscriptExprClass: + return buildArraySubscriptExpr(cast(E)); case Expr::BinaryOperatorClass: return buildBinaryOperatorLValue(cast(E)); case Expr::DeclRefExprClass: diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index c63dc34f8f1e..43e3cf39daf5 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -41,6 +41,18 @@ class ScalarExprEmitter : public StmtVisitor { return buildLoadOfLValue(E); } + mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *E) { + // Do we need anything like TestAndClearIgnoreResultAssign()? + assert(!E->getBase()->getType()->isVectorType() && + "vector types not implemented"); + + // Emit subscript expressions in rvalue context's. For most cases, this + // just loads the lvalue formed by the subscript expr. However, we have to + // be careful, because the base of a vector subscript is occasionally an + // rvalue, so we can't get it as an lvalue. + return buildLoadOfLValue(E); + } + // Emit code for an explicit or implicit cast. Implicit // casts have to handle a more broad range of conversions than explicit // casts, as they handle things like function to ptr-to-function decay diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index f2b9d1050cb1..e211ba232b48 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -408,13 +408,16 @@ class CIRGenFunction { mlir::LogicalResult buildCompoundStmtWithoutScope(const clang::CompoundStmt &S); - /// EmitIgnoredExpr - Emit code to compute the specified expression, + /// Emit code to compute the specified expression, /// ignoring the result. void buildIgnoredExpr(const clang::Expr *E); + LValue buildArraySubscriptExpr(const clang::ArraySubscriptExpr *E, + bool Accessed = false); + mlir::LogicalResult buildDeclStmt(const clang::DeclStmt &S); - /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type. + /// Get an appropriate 'undef' rvalue for the given type. /// TODO: What's the equivalent for MLIR? Currently we're only using this for /// void types so it just returns RValue::get(nullptr) but it'll need /// addressed later. diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 326ea4edd1b9..369a5f6fc189 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -Wno-return-stack-address -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s void a0() { @@ -6,4 +6,33 @@ void a0() { } // CHECK: func @a0() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} \ No newline at end of file +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} + +void a1() { + int a[10]; + a[0] = 1; +} + +// CHECK: func @a1() { +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} +// CHECK-NEXT: %1 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr +// CHECK-NEXT: cir.store %1, %4 : i32, cir.ptr + +int *a2() { + int a[4]; + return &a[0]; +} + +// CHECK: func @a2() -> !cir.ptr { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} +// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr +// CHECK-NEXT: cir.store %4, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %5 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.return %5 : !cir.ptr +// CHECK: } From f062c107f8d13f900e4c11f6d958ea584f774f7f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 18 Apr 2022 17:43:33 -0700 Subject: [PATCH 0273/2301] [CIR][LifetimeCheck] Teach the analysis how to recognize C/C++ arrays --- clang/test/CIR/Transforms/lifetime-check.cpp | 9 +++++++++ .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 19 +++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index ab77d3440e01..f0831159232b 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -39,3 +39,12 @@ void p4() { int *p; // expected-note {{uninitialized here}} *p = 42; // expected-warning {{use of invalid pointer 'p'}} } + +void p5() { + int *p = nullptr; + { + int a[10]; + p = &a[0]; + } // expected-note {{pointee 'a' invalidated at end of scope}} + *p = 42; // expected-warning {{use of invalid pointer 'p'}} +} diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 5d431ceb45f2..b90b602262d2 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -561,6 +561,15 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { if (!ptrs.count(addr)) return; + auto getArrayFromSubscript = [&](PtrStrideOp strideOp) -> mlir::Value { + auto castOp = dyn_cast(strideOp.getBase().getDefiningOp()); + if (!castOp) + return {}; + if (castOp.getKind() != cir::CastKind::array_to_ptrdecay) + return {}; + return castOp.getSrc(); + }; + auto data = storeOp.getValue(); // 2.4.2 - If the declaration includes an initialization, the // initialization is treated as a separate operation @@ -586,6 +595,16 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { return; } + if (auto ptrStrideOp = dyn_cast(data.getDefiningOp())) { + // p = &a[0]; + auto array = getArrayFromSubscript(ptrStrideOp); + if (array) { + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getLocalValue(array)); + } + return; + } + // From here on, some uninterestring store (for now?) } From 763d7aae59048130ac1c792849981df740526dd5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 18 Apr 2022 18:08:03 -0700 Subject: [PATCH 0274/2301] [CIR][LifetimeCheck][NFC] Rename pset remark to pset-invalid --- clang/test/CIR/Transforms/lifetime-check-remarks.cpp | 2 +- clang/test/CIR/Transforms/lifetime-loop.cpp | 2 +- mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp | 10 ++++++---- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/clang/test/CIR/Transforms/lifetime-check-remarks.cpp b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp index ea48469b20b6..deecf9bbd6a5 100644 --- a/clang/test/CIR/Transforms/lifetime-check-remarks.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: cir-tool %t.cir -cir-lifetime-check="remarks=pset" -verify-diagnostics -o %t-out.cir +// RUN: cir-tool %t.cir -cir-lifetime-check="remarks=pset-invalid" -verify-diagnostics -o %t-out.cir // XFAIL: * int *p0() { diff --git a/clang/test/CIR/Transforms/lifetime-loop.cpp b/clang/test/CIR/Transforms/lifetime-loop.cpp index ec649c860e84..7ab25c19b8b4 100644 --- a/clang/test/CIR/Transforms/lifetime-loop.cpp +++ b/clang/test/CIR/Transforms/lifetime-loop.cpp @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null remarks=pset" -verify-diagnostics -o %t-out.cir +// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null remarks=pset-invalid" -verify-diagnostics -o %t-out.cir // XFAIL: * void loop_basic_for() { diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index b90b602262d2..844292515641 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -40,7 +40,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { struct Options { enum : unsigned { None = 0, - RemarkPset = 1, + RemarkPsetInvalid = 1, RemarkAll = 1 << 1, HistoryNull = 1 << 2, HistoryInvalid = 1 << 3, @@ -51,7 +51,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void parseOptions(LifetimeCheckPass &pass) { for (auto &remark : pass.remarksList) { val |= StringSwitch(remark) - .Case("pset", RemarkPset) + .Case("pset-invalid", RemarkPsetInvalid) .Case("all", RemarkAll) .Default(None); } @@ -65,7 +65,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { } bool emitRemarkAll() { return val & RemarkAll; } - bool emitRemarkPset() { return emitRemarkAll() || val & RemarkPset; } + bool emitRemarkPsetInvalid() { + return emitRemarkAll() || val & RemarkPsetInvalid; + } bool emitHistoryAll() { return val & HistoryAll; } bool emitHistoryNull() { return emitHistoryAll() || val & HistoryNull; } @@ -653,7 +655,7 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { D.attachNote(*note) << "invalidated here"; } - if (opts.emitRemarkPset()) { + if (opts.emitRemarkPsetInvalid()) { llvm::SmallString<128> psetStr; llvm::raw_svector_ostream Out(psetStr); printPset(getPmap()[addr], Out); From 91fd9f8ad8ec7c65f4a746c514ceed29885fa716 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 18 Apr 2022 18:17:20 -0700 Subject: [PATCH 0275/2301] [CIR][LifetimeCheck] Add 'pset-always' mode to always print psets (instead of only on bad derefs) --- .../CIR/Transforms/lifetime-loop-valid.cpp | 41 +++++++++++++++++++ .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 36 +++++++++++----- 2 files changed, 67 insertions(+), 10 deletions(-) create mode 100644 clang/test/CIR/Transforms/lifetime-loop-valid.cpp diff --git a/clang/test/CIR/Transforms/lifetime-loop-valid.cpp b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp new file mode 100644 index 000000000000..ea1269a6b49b --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp @@ -0,0 +1,41 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null remarks=pset-always" -verify-diagnostics -o %t-out.cir +// XFAIL: * + +// +// Loops that do not change psets + +// p1179r1: 2.4.9.1 +// No diagnostic needed, pset(p) = {a} before and after the loop +void valid0(bool b, int j) { + int a[10]; + int *p = &a[0]; + while (j) { + if (b) { + p = &a[j]; + } + j = j - 1; + } + *p = 12; // expected-remark {{pset => { a }}} +} + +// p1179r1: 2.4.9.2 +void valid1(bool b, int j) { + int a[4], c[5]; + int *p = &a[0]; + while (j) { + if (b) { + p = &c[j]; + } + j = j - 1; + } + *p = 0; // expected-remark {{pset => { a, c }}} + + while (j) { + if (b) { + p = &c[j]; + } + j = j - 1; + } + *p = 0; // expected-remark {{pset => { a, c }}} +} diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 844292515641..182ffc15ebb7 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -40,11 +40,14 @@ struct LifetimeCheckPass : public LifetimeCheckBase { struct Options { enum : unsigned { None = 0, + // Emit pset remarks only detecting invalid derefs RemarkPsetInvalid = 1, - RemarkAll = 1 << 1, - HistoryNull = 1 << 2, - HistoryInvalid = 1 << 3, - HistoryAll = 1 << 4, + // Emit pset remarks for all derefs + RemarkPsetAlways = 1 << 1, + RemarkAll = 1 << 2, + HistoryNull = 1 << 3, + HistoryInvalid = 1 << 4, + HistoryAll = 1 << 5, }; unsigned val = None; @@ -52,6 +55,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { for (auto &remark : pass.remarksList) { val |= StringSwitch(remark) .Case("pset-invalid", RemarkPsetInvalid) + .Case("pset-always", RemarkPsetAlways) .Case("all", RemarkAll) .Default(None); } @@ -68,6 +72,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { bool emitRemarkPsetInvalid() { return emitRemarkAll() || val & RemarkPsetInvalid; } + bool emitRemarkPsetAlways() { + return emitRemarkAll() || val & RemarkPsetAlways; + } bool emitHistoryAll() { return val & HistoryAll; } bool emitHistoryNull() { return emitHistoryAll() || val & HistoryNull; } @@ -624,6 +631,19 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { bool hasInvalid = getPmap()[addr].count(State::getInvalid()); bool hasNullptr = getPmap()[addr].count(State::getNullPtr()); + auto emitPsetRemark = [&] { + llvm::SmallString<128> psetStr; + llvm::raw_svector_ostream Out(psetStr); + printPset(getPmap()[addr], Out); + emitRemark(loadOp.getLoc()) << "pset => " << Out.str(); + }; + + bool psetRemarkEmitted = false; + if (opts.emitRemarkPsetAlways()) { + emitPsetRemark(); + psetRemarkEmitted = true; + } + // 2.4.2 - On every dereference of a Pointer p, enforce that p is valid. if (!hasInvalid && !hasNullptr) return; @@ -655,12 +675,8 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { D.attachNote(*note) << "invalidated here"; } - if (opts.emitRemarkPsetInvalid()) { - llvm::SmallString<128> psetStr; - llvm::raw_svector_ostream Out(psetStr); - printPset(getPmap()[addr], Out); - emitRemark(loadOp.getLoc()) << "pset => " << Out.str(); - } + if (!psetRemarkEmitted && opts.emitRemarkPsetInvalid()) + emitPsetRemark(); } void LifetimeCheckPass::checkOperation(Operation *op) { From f683ce0c949a52490f51056072d9a8380737b668 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 19 Apr 2022 13:31:44 -0700 Subject: [PATCH 0276/2301] [CIR][LifetimeCheck] Add an invalid loop + array access testcase --- clang/test/CIR/Transforms/lifetime-loop.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/clang/test/CIR/Transforms/lifetime-loop.cpp b/clang/test/CIR/Transforms/lifetime-loop.cpp index 7ab25c19b8b4..1e615c3e0ce8 100644 --- a/clang/test/CIR/Transforms/lifetime-loop.cpp +++ b/clang/test/CIR/Transforms/lifetime-loop.cpp @@ -38,3 +38,21 @@ void loop_basic_dowhile() { *p = 42; // expected-warning {{use of invalid pointer 'p'}} // expected-remark@-1 {{pset => { nullptr, invalid }}} } + +// p1179r1: 2.4.9.3 +void loop0(bool b, int j) { + int a[4], c[4]; + int *p = &a[0]; + while (j) { + // This access is invalidated after the first iteration + *p = 42; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { c, nullptr }}} + p = nullptr; // expected-note {{invalidated here}} + if (b) { + p = &c[j]; + } + j = j - 1; + } + *p = 0; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { a, c, nullptr }}} +} From 0485a7e5559a25a383187e0e0e063c9298bb54ea Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 19 Apr 2022 14:08:23 -0700 Subject: [PATCH 0277/2301] [CIR] Define trait to check ptr_stride operand and result type --- clang/test/CIR/IR/invalid.cir | 8 ++++++ mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 24 ++++++++++++++++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 10 +++++--- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 25 ++++++++++++++----- 4 files changed, 58 insertions(+), 9 deletions(-) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 77758e7f4bac..9474d8733269 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -90,3 +90,11 @@ func.func @s1() { ] // expected-error {{case region shall not be empty}} cir.return } + +// ----- + +func.func @s1(%x: !cir.ptr) { + %idx = cir.cst(2 : i32) : i32 + %4 = cir.ptr_stride(%x : !cir.ptr, %idx : i32), !cir.ptr // expected-error {{requires the same type for first operand and result}} + cir.return +} diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index 0cd883fd63b3..e3e43dd9b221 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -36,9 +36,33 @@ using FuncOp = func::FuncOp; #include "mlir/Dialect/CIR/IR/CIRTypes.h" namespace mlir { +namespace OpTrait { + +namespace impl { +// These functions are out-of-line implementations of the methods in the +// corresponding trait classes. This avoids them being template +// instantiated/duplicated. +LogicalResult verifySameFirstOperandAndResultType(Operation *op); +} // namespace impl + +/// This class provides verification for ops that are known to have the same +/// first operand and result type. +/// +template +class SameFirstOperandAndResultType + : public TraitBase { +public: + static LogicalResult verifyTrait(Operation *op) { + return impl::verifySameFirstOperandAndResultType(op); + } +}; + +} // namespace OpTrait + namespace cir { void buildTerminatedBody(OpBuilder &builder, Location loc); } // namespace cir + } // namespace mlir #define GET_OP_CLASSES diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 9170f66b727b..0eed50fdfa48 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -78,7 +78,11 @@ def CastOp : CIR_Op<"cast", [Pure]> { // PtrStrideOp //===----------------------------------------------------------------------===// -def PtrStrideOp : CIR_Op<"ptr_stride", [Pure]> { +def SameFirstOperandAndResultType : + NativeOpTrait<"SameFirstOperandAndResultType">; + +def PtrStrideOp : CIR_Op<"ptr_stride", + [Pure, SameFirstOperandAndResultType]> { let summary = "pointer access with stride"; let description = [{ Given a base pointer as operand, provides a new pointer after applying @@ -98,8 +102,8 @@ def PtrStrideOp : CIR_Op<"ptr_stride", [Pure]> { `,` type($result) attr-dict }]; - // The input and output types should match the cast kind. - let hasVerifier = 1; + // SameFirstOperandAndResultType already checks all we need. + let hasVerifier = 0; } //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index d6214368095a..7a5d3abe9a76 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -116,12 +116,6 @@ OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } LogicalResult CastOp::verify() { return success(); } -//===----------------------------------------------------------------------===// -// PtrStrideOp -//===----------------------------------------------------------------------===// - -LogicalResult PtrStrideOp::verify() { return success(); } - //===----------------------------------------------------------------------===// // ReturnOp //===----------------------------------------------------------------------===// @@ -856,6 +850,25 @@ void LoopOp::getSuccessorRegions(mlir::RegionBranchPoint point, llvm::SmallVector LoopOp::getLoopRegions() { return {&getBody()}; } +//===----------------------------------------------------------------------===// +// CIR defined traits +//===----------------------------------------------------------------------===// + +LogicalResult +mlir::OpTrait::impl::verifySameFirstOperandAndResultType(Operation *op) { + if (failed(verifyAtLeastNOperands(op, 1)) || failed(verifyOneResult(op))) + return failure(); + + auto type = op->getResult(0).getType(); + auto opType = op->getOperand(0).getType(); + + if (type != opType) + return op->emitOpError() + << "requires the same type for first operand and result"; + + return success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From 9c5d4816450cebd13fce205eb793102c8ba54762 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 19 Apr 2022 14:43:06 -0700 Subject: [PATCH 0278/2301] [CIR] Add verifiers for both available CastOp kinds --- clang/test/CIR/IR/invalid.cir | 31 ++++++++++++- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 8 ++-- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 52 +++++++++++++++++----- 3 files changed, 75 insertions(+), 16 deletions(-) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 9474d8733269..4fd6cf260efa 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -93,8 +93,37 @@ func.func @s1() { // ----- -func.func @s1(%x: !cir.ptr) { +func.func @badstride(%x: !cir.ptr) { %idx = cir.cst(2 : i32) : i32 %4 = cir.ptr_stride(%x : !cir.ptr, %idx : i32), !cir.ptr // expected-error {{requires the same type for first operand and result}} cir.return } + +// ----- + +func.func @cast0(%arg0: i32) { + %1 = cir.cast(int_to_bool, %arg0 : i32), i32 // expected-error {{requires !cir.bool type for result}} + cir.return +} + +// ----- + +func.func @cast1(%arg1: f32) { + %1 = cir.cast(int_to_bool, %arg1 : f32), !cir.bool // expected-error {{requires integral type for result}} + cir.return +} + +// ----- + +func.func @cast2(%p: !cir.ptr) { + %2 = cir.cast(array_to_ptrdecay, %p : !cir.ptr), !cir.ptr // expected-error {{requires !cir.array pointee}} + cir.return +} + +// ----- + +func.func @cast3(%p: !cir.ptr) { + %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] + %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // expected-error {{requires same type for array element and pointee result}} + cir.return +} diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 0eed50fdfa48..f9ab3b549b25 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -61,14 +61,16 @@ def CastOp : CIR_Op<"cast", [Pure]> { %4 = cir.cast (int_to_bool, %3 : i32), !cir.bool ... %x = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr - ``` }]; let arguments = (ins CastKind:$kind, AnyType:$src); - let results = (outs AnyType:$res); + let results = (outs AnyType:$result); - let assemblyFormat = "`(` $kind `,` $src `:` type($src) `)` `,` type($res) attr-dict"; + let assemblyFormat = [{ + `(` $kind `,` $src `:` type($src) `)` + `,` type($result) attr-dict + }]; // The input and output types should match the cast kind. let hasVerifier = 1; diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 7a5d3abe9a76..26fc7dc5508b 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -114,7 +114,37 @@ OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } // CastOp //===----------------------------------------------------------------------===// -LogicalResult CastOp::verify() { return success(); } +LogicalResult CastOp::verify() { + auto resType = getResult().getType(); + auto srcType = getSrc().getType(); + + switch (getKind()) { + case cir::CastKind::int_to_bool: { + if (!resType.isa()) + return emitOpError() << "requires !cir.bool type for result"; + if (!(srcType.isInteger(32) || srcType.isInteger(64))) + return emitOpError() << "requires integral type for result"; + return success(); + } + case cir::CastKind::array_to_ptrdecay: { + auto arrayPtrTy = srcType.dyn_cast(); + auto flatPtrTy = resType.dyn_cast(); + if (!arrayPtrTy || !flatPtrTy) + return emitOpError() << "requires !cir.ptr type for source and result"; + + auto arrayTy = arrayPtrTy.getPointee().dyn_cast(); + if (!arrayTy) + return emitOpError() << "requires !cir.array pointee"; + + if (arrayTy.getEltType() != flatPtrTy.getPointee()) + return emitOpError() + << "requires same type for array element and pointee result"; + return success(); + } + } + + return success(); +} //===----------------------------------------------------------------------===// // ReturnOp @@ -452,8 +482,7 @@ mlir::LogicalResult YieldOp::verify() { if (isFallthrough()) { if (!llvm::isa(getOperation()->getParentOp())) - return emitOpError() - << "fallthrough only expected within 'cir.switch'"; + return emitOpError() << "fallthrough only expected within 'cir.switch'"; return mlir::success(); } @@ -629,15 +658,14 @@ parseSwitchOp(OpAsmParser &parser, return mlir::failure(); if (parser.parseLSquare().failed()) return mlir::failure(); - auto result = parser.parseCommaSeparatedList([&]() { - int64_t val = 0; - if (parser.parseInteger(val).failed()) - return ::mlir::failure(); - caseEltValueListAttr.push_back( - mlir::IntegerAttr::get(intCondType, val)); - return ::mlir::success(); - }); - if (result.failed()) + if (parser.parseCommaSeparatedList([&]() { + int64_t val = 0; + if (parser.parseInteger(val).failed()) + return ::mlir::failure(); + caseEltValueListAttr.push_back( + mlir::IntegerAttr::get(intCondType, val)); + return ::mlir::success(); + })) return mlir::failure(); if (parser.parseRSquare().failed()) return mlir::failure(); From a28a60405c3ced30e772122638f66fbcf19b72aa Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 21 Apr 2022 17:11:23 -0700 Subject: [PATCH 0279/2301] [CIR] Add 'cir.brcond' operation and tests While here relax for loop cond region size restrictions to allow for already writing tests with loops. --- clang/test/CIR/IR/branch.cir | 42 +++++++++++++++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 52 +++++++++++++++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 18 +++++++- 3 files changed, 109 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/IR/branch.cir diff --git a/clang/test/CIR/IR/branch.cir b/clang/test/CIR/IR/branch.cir new file mode 100644 index 000000000000..38cb0b4cbf5c --- /dev/null +++ b/clang/test/CIR/IR/branch.cir @@ -0,0 +1,42 @@ +// RUN: cir-tool %s | FileCheck %s + + +func.func @b0() { + cir.scope { + cir.loop while(cond : { + %0 = cir.cst(true) : !cir.bool + cir.brcond %0 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield + }, step : { + cir.yield + }) { + cir.br ^bb1 + ^bb1: + cir.return + } + } + cir.return +} + +// CHECK: func @b0 +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.loop while(cond : { +// CHECK-NEXT: %0 = cir.cst(true) : !cir.bool +// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: cir.br ^bb1 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index f9ab3b549b25..4495e2c67d44 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -759,6 +759,56 @@ def BrOp : CIR_Op<"br", }]; } +//===----------------------------------------------------------------------===// +// BrCondOp +//===----------------------------------------------------------------------===// + +def BrCondOp : CIR_Op<"brcond", + [DeclareOpInterfaceMethods, + Pure, Terminator, SameVariadicOperandSize]> { + let summary = "conditional branch operation"; + let description = [{ + The `cir.brcond %cond, ^bb0, ^bb1` branches to 'bb0' block in case + %cond (which must be a !cir.bool type) evaluates to true, otherwise + it branches to 'bb1'. + + Example: + + ```mlir + ... + cir.brcond %a, ^bb3, ^bb4 + ^bb3: + cir.return + ^bb4: + cir.yield + ``` + }]; + + let builders = [ + OpBuilder<(ins "Value":$cond, "Block *":$destTrue, "Block *":$destFalse, + CArg<"ValueRange", "{}">:$destOperandsTrue, + CArg<"ValueRange", "{}">:$destOperandsFalse), [{ + $_state.addOperands(cond); + $_state.addSuccessors(destTrue); + $_state.addSuccessors(destFalse); + $_state.addOperands(destOperandsTrue); + $_state.addOperands(destOperandsFalse); + }]> + ]; + + let arguments = (ins CIR_BoolType:$cond, + Variadic:$destOperandsTrue, + Variadic:$destOperandsFalse); + let successors = (successor AnySuccessor:$destTrue, AnySuccessor:$destFalse); + let assemblyFormat = [{ + $cond + $destTrue (`(` $destOperandsTrue^ `:` type($destOperandsTrue) `)`)? + `,` + $destFalse (`(` $destOperandsFalse^ `:` type($destOperandsFalse) `)`)? + attr-dict + }]; +} + //===----------------------------------------------------------------------===// // LoopOp //===----------------------------------------------------------------------===// @@ -783,7 +833,7 @@ def LoopOp : CIR_Op<"loop", }]; let arguments = (ins Arg:$kind); - let regions = (region SizedRegion<1>:$cond, AnyRegion:$body, + let regions = (region AnyRegion:$cond, AnyRegion:$body, SizedRegion<1>:$step); let assemblyFormat = [{ diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 26fc7dc5508b..044938bb24de 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -550,13 +550,27 @@ void printBinOpKind(OpAsmPrinter &p, BinOp binOp, BinOpKindAttr kindAttr) { mlir::SuccessorOperands BrOp::getSuccessorOperands(unsigned index) { assert(index == 0 && "invalid successor index"); // Current block targets do not have operands. - // TODO(CIR): This is a hacky avoidance of actually implementing this since - // MLIR moved it "because nobody used the std::optional::None case.........." return mlir::SuccessorOperands(MutableOperandRange(getOperation(), 0, 0)); } Block *BrOp::getSuccessorForOperands(ArrayRef) { return getDest(); } +//===----------------------------------------------------------------------===// +// BrCondOp +//===----------------------------------------------------------------------===// + +mlir::SuccessorOperands BrCondOp::getSuccessorOperands(unsigned index) { + assert(index < getNumSuccessors() && "invalid successor index"); + return SuccessorOperands(index == 0 ? getDestOperandsTrueMutable() + : getDestOperandsFalseMutable()); +} + +Block *BrCondOp::getSuccessorForOperands(ArrayRef operands) { + if (IntegerAttr condAttr = operands.front().dyn_cast_or_null()) + return condAttr.getValue().isOne() ? getDestTrue() : getDestFalse(); + return nullptr; +} + //===----------------------------------------------------------------------===// // SwitchOp //===----------------------------------------------------------------------===// From ec6709b02343fd00a570aa2cc0ce3912cc74399d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 21 Apr 2022 17:16:41 -0700 Subject: [PATCH 0280/2301] [CIR] Remove 'cir.yield loopcondition' and use 'cir.brcond' on cond regions This will temporarily break the build in order to allow this change to be more clear and separated from Clang codegen (which need API updates). Since 'cir.yield loopcondition' was removed, update to use cir.brconds. This change was initially done because yielding a value out with loopcondition require adding arguments to all regions involved, and the approach was somewhat brittle. More importantly it was breaking branchop interfaces assumptions, which was preventing the verifier to be used. --- clang/lib/CIR/CIRGenFunction.h | 3 +- clang/lib/CIR/CIRGenStmt.cpp | 49 ++++++++++---- clang/test/CIR/CodeGen/loop-scope.cpp | 1 - clang/test/CIR/CodeGen/loop.cpp | 67 ++++++++++++++++---- clang/test/CIR/IR/loop.cir | 37 +++++++++-- clang/test/CIR/Transforms/merge-cleanups.cir | 13 +++- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 23 ++----- 7 files changed, 138 insertions(+), 55 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index e211ba232b48..09efac2f0be3 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -399,6 +399,7 @@ class CIRGenFunction { mlir::LogicalResult buildSimpleStmt(const clang::Stmt *S, bool useCurrentScope); + mlir::LogicalResult buildForStmt(const clang::ForStmt &S); mlir::LogicalResult buildWhileStmt(const clang::WhileStmt &S); mlir::LogicalResult buildDoStmt(const clang::DoStmt &S); mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); @@ -591,8 +592,6 @@ class CIRGenFunction { // TODO: this can also be abstrated into common AST helpers bool hasBooleanRepresentation(clang::QualType Ty); - - mlir::LogicalResult buildForStmt(const clang::ForStmt &S); }; } // namespace cir diff --git a/clang/lib/CIR/CIRGenStmt.cpp b/clang/lib/CIR/CIRGenStmt.cpp index d17150073d6d..e0816c55252f 100644 --- a/clang/lib/CIR/CIRGenStmt.cpp +++ b/clang/lib/CIR/CIRGenStmt.cpp @@ -571,12 +571,32 @@ mlir::LogicalResult CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, return res; } +static mlir::LogicalResult buildLoopCondYield(mlir::OpBuilder &builder, + mlir::Location loc, + mlir::Value cond) { + mlir::Block *trueBB = nullptr, *falseBB = nullptr; + { + mlir::OpBuilder::InsertionGuard guard(builder); + trueBB = builder.createBlock(builder.getBlock()->getParent()); + builder.create(loc, YieldOpKind::Continue); + } + { + mlir::OpBuilder::InsertionGuard guard(builder); + falseBB = builder.createBlock(builder.getBlock()->getParent()); + builder.create(loc); + } + + assert((trueBB && falseBB) && "expected both blocks to exist"); + builder.create(loc, cond, trueBB, falseBB); + return mlir::success(); +} + mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { mlir::cir::LoopOp loopOp; // TODO: pass in array of attributes. auto forStmtBuilder = [&]() -> mlir::LogicalResult { - auto forRes = mlir::success(); + auto loopRes = mlir::success(); // Evaluate the first part before the loop. if (S.getInit()) if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) @@ -602,7 +622,8 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { loc, mlir::cir::BoolType::get(b.getContext()), b.getBoolAttr(true)); } - b.create(loc, condVal); + if (buildLoopCondYield(b, loc, condVal).failed()) + loopRes = mlir::failure(); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -613,16 +634,16 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { bool useCurrentScope = CGM.getASTContext().getLangOpts().CPlusPlus ? true : false; if (buildStmt(S.getBody(), useCurrentScope).failed()) - forRes = mlir::failure(); + loopRes = mlir::failure(); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { if (S.getInc()) if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) - forRes = mlir::failure(); + loopRes = mlir::failure(); builder.create(loc); }); - return forRes; + return loopRes; }; auto res = mlir::success(); @@ -651,7 +672,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { // TODO: pass in array of attributes. auto doStmtBuilder = [&]() -> mlir::LogicalResult { - auto forRes = mlir::success(); + auto loopRes = mlir::success(); loopOp = builder.create( getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::DoWhile, @@ -662,18 +683,19 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { // expression compares unequal to 0. The condition must be a // scalar type. mlir::Value condVal = evaluateExprAsBool(S.getCond()); - b.create(loc, condVal); + if (buildLoopCondYield(b, loc, condVal).failed()) + loopRes = mlir::failure(); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) - forRes = mlir::failure(); + loopRes = mlir::failure(); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { builder.create(loc); }); - return forRes; + return loopRes; }; auto res = mlir::success(); @@ -702,7 +724,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { // TODO: pass in array of attributes. auto whileStmtBuilder = [&]() -> mlir::LogicalResult { - auto forRes = mlir::success(); + auto loopRes = mlir::success(); loopOp = builder.create( getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::While, @@ -718,18 +740,19 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { // expression compares unequal to 0. The condition must be a // scalar type. condVal = evaluateExprAsBool(S.getCond()); - b.create(loc, condVal); + if (buildLoopCondYield(b, loc, condVal).failed()) + loopRes = mlir::failure(); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) - forRes = mlir::failure(); + loopRes = mlir::failure(); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { builder.create(loc); }); - return forRes; + return loopRes; }; auto res = mlir::success(); diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index 84cdd6eef1d8..2b4830f25ce2 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -2,7 +2,6 @@ // RUN: FileCheck --input-file=%t.cpp.cir %s --check-prefix=CPPSCOPE // RUN: %clang_cc1 -x c -std=c11 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.c.cir // RUN: FileCheck --input-file=%t.c.cir %s --check-prefix=CSCOPE -// XFAIL: * void l0() { for (int i = 0;;) { diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index a9d43fbad553..808cec2380dd 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * void l0() { for (;;) { @@ -10,7 +9,11 @@ void l0() { // CHECK: func @l0 // CHECK: cir.loop for(cond : { // CHECK-NEXT: %0 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.yield loopcondition %0 : !cir.bool +// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -29,7 +32,11 @@ void l1() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool -// CHECK-NEXT: cir.yield loopcondition %6 : !cir.bool +// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 @@ -61,7 +68,11 @@ void l2(bool cond) { // CHECK: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool -// CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool +// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -75,7 +86,11 @@ void l2(bool cond) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %3 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool +// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -90,7 +105,11 @@ void l2(bool cond) { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %3 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool -// CHECK-NEXT: cir.yield loopcondition %4 : !cir.bool +// CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -119,7 +138,11 @@ void l3(bool cond) { // CHECK: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool -// CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool +// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -133,7 +156,11 @@ void l3(bool cond) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.yield loopcondition %3 : !cir.bool +// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -148,7 +175,11 @@ void l3(bool cond) { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool -// CHECK-NEXT: cir.yield loopcondition %4 : !cir.bool +// CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -173,7 +204,11 @@ void l4() { // CHECK: func @l4 // CHECK: cir.loop while(cond : { // CHECK-NEXT: %4 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.yield loopcondition %4 : !cir.bool +// CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -200,7 +235,11 @@ void l5() { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %0 = cir.cst(0 : i32) : i32 // CHECK-NEXT: %1 = cir.cast(int_to_bool, %0 : i32), !cir.bool -// CHECK-NEXT: cir.yield loopcondition %1 : !cir.bool +// CHECK-NEXT: cir.brcond %1 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -220,7 +259,11 @@ void l6() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %0 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.yield loopcondition %0 : !cir.bool +// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index b1177cc22a04..49a2f365b539 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -1,5 +1,4 @@ // RUN: cir-tool %s | FileCheck %s -// XFAIL: * func.func @l0() { %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} @@ -13,7 +12,11 @@ func.func @l0() { %4 = cir.load %2 : cir.ptr , i32 %5 = cir.cst(10 : i32) : i32 %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool - cir.yield loopcondition %6 : !cir.bool + cir.brcond %6 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield }, step : { %4 = cir.load %2 : cir.ptr , i32 %5 = cir.cst(1 : i32) : i32 @@ -40,7 +43,11 @@ func.func @l0() { %4 = cir.load %2 : cir.ptr , i32 %5 = cir.cst(10 : i32) : i32 %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool - cir.yield loopcondition %6 : !cir.bool + cir.brcond %6 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield }, step : { cir.yield }) { @@ -64,7 +71,11 @@ func.func @l0() { %4 = cir.load %2 : cir.ptr , i32 %5 = cir.cst(10 : i32) : i32 %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool - cir.yield loopcondition %6 : !cir.bool + cir.brcond %6 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield }, step : { cir.yield }) { @@ -83,7 +94,11 @@ func.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool -// CHECK-NEXT: cir.yield loopcondition %6 : !cir.bool +// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 @@ -106,7 +121,11 @@ func.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool -// CHECK-NEXT: cir.yield loopcondition %6 : !cir.bool +// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -125,7 +144,11 @@ func.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool -// CHECK-NEXT: cir.yield loopcondition %6 : !cir.bool +// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index e97c9c035372..9748e4a94219 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-merge-cleanups -o %t.out.cir // RUN: FileCheck --input-file=%t.out.cir %s -// XFAIL: * module { func.func @sw1(%arg0: i32, %arg1: i32) { @@ -63,7 +62,11 @@ module { cir.scope { cir.loop while(cond : { %0 = cir.cst(true) : !cir.bool - cir.yield loopcondition %0 : !cir.bool + cir.brcond %0 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield }, step : { cir.yield }) { @@ -117,7 +120,11 @@ module { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %0 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.yield loopcondition %0 : !cir.bool +// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 4495e2c67d44..f12ab2909d97 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -391,13 +391,12 @@ def IfOp : CIR_Op<"if", def YieldOpKind_BK : I32EnumAttrCase<"Break", 1, "break">; def YieldOpKind_FT : I32EnumAttrCase<"Fallthrough", 2, "fallthrough">; -def YieldOpKind_LC : I32EnumAttrCase<"Loopcondition", 3, "loopcondition">; -def YieldOpKind_CE : I32EnumAttrCase<"Continue", 4, "continue">; +def YieldOpKind_CE : I32EnumAttrCase<"Continue", 3, "continue">; def YieldOpKind : I32EnumAttr< "YieldOpKind", "yield kind", - [YieldOpKind_BK, YieldOpKind_FT, YieldOpKind_LC, YieldOpKind_CE]> { + [YieldOpKind_BK, YieldOpKind_FT, YieldOpKind_CE]> { let cppNamespace = "::mlir::cir"; } @@ -438,12 +437,6 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, }, ... ] - cir.loop (cond : { - ... - %4 = ... : cir.bool - cir.yield loopcondition %4 - } ... ) {} - cir.loop (cond : {...}, step : {...}) { ... cir.yield continue @@ -455,11 +448,10 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, Variadic:$args); let builders = [ OpBuilder<(ins), [{ /* nothing to do */ }]>, - OpBuilder<(ins "Value":$cond), [{ - $_state.addOperands(cond); - mlir::cir::YieldOpKindAttr kind = mlir::cir::YieldOpKindAttr::get( - $_builder.getContext(), mlir::cir::YieldOpKind::Loopcondition); - $_state.addAttribute(getKindAttrName($_state.name), kind); + OpBuilder<(ins "YieldOpKind":$kind), [{ + mlir::cir::YieldOpKindAttr kattr = mlir::cir::YieldOpKindAttr::get( + $_builder.getContext(), kind); + $_state.addAttribute(getKindAttrName($_state.name), kattr); }]> ]; @@ -478,9 +470,6 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, bool isBreak() { return !isPlain() && *getKind() == YieldOpKind::Break; } - bool isLoopCondition() { - return !isPlain() && *getKind() == YieldOpKind::Loopcondition; - } bool isContinue() { return !isPlain() && *getKind() == YieldOpKind::Continue; } From 4b45de3679760d973f4c73a135cf9b5ef85dc973 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 3 Aug 2022 18:42:55 -0700 Subject: [PATCH 0281/2301] Revert "[DO NOT LAND] XFAIL another test witih verification issues" This reverts commit a63e15fab69d092f3a82767162289e3cb99c86af. --- clang/test/CIR/Transforms/lifetime-loop-valid.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/test/CIR/Transforms/lifetime-loop-valid.cpp b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp index ea1269a6b49b..9bbd33c4b702 100644 --- a/clang/test/CIR/Transforms/lifetime-loop-valid.cpp +++ b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp @@ -38,4 +38,4 @@ void valid1(bool b, int j) { j = j - 1; } *p = 0; // expected-remark {{pset => { a, c }}} -} +} \ No newline at end of file From fe899cef84b7ac46a298b0e7ad3e8310db4eaa5e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 3 Aug 2022 18:43:40 -0700 Subject: [PATCH 0282/2301] Revert "[DO NOT LAND] XFAIL all the tests broken by verifiers" This reverts commit 8ec8c3e88a6533ea8dc488fa3cade144a5521b37. --- clang/test/CIR/CodeGen/loop-scope.cpp | 2 +- clang/test/CIR/CodeGen/loop.cpp | 2 +- clang/test/CIR/IR/loop.cir | 2 +- clang/test/CIR/Transforms/merge-cleanups.cir | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index 2b4830f25ce2..13a0292bb086 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -26,4 +26,4 @@ void l0() { // CSCOPE: }) { // CSCOPE-NEXT: cir.scope { -// CSCOPE-NEXT: %2 = cir.alloca i32, cir.ptr , ["j", cinit] {alignment = 4 : i64} +// CSCOPE-NEXT: %2 = cir.alloca i32, cir.ptr , ["j", cinit] {alignment = 4 : i64} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 808cec2380dd..10463aab8268 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -271,4 +271,4 @@ void l6() { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return -// CHECK-NEXT: } +// CHECK-NEXT: } \ No newline at end of file diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 49a2f365b539..add9c47732ea 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -157,4 +157,4 @@ func.func @l0() { // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: cir.yield -// CHECK-NEXT: } +// CHECK-NEXT: } \ No newline at end of file diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 9748e4a94219..b15be865f1d1 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -132,4 +132,4 @@ module { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return -// CHECK-NEXT: } +// CHECK-NEXT: } \ No newline at end of file From 47bfc7e48107c49faff651b4ebc157f7f14ed048 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 21 Apr 2022 17:46:00 -0700 Subject: [PATCH 0283/2301] [CIR] Enable a proper cir.loop verifier and add more tests --- clang/test/CIR/IR/invalid.cir | 22 +++++++++ clang/test/CIR/IR/loop.cir | 54 +++++++++++++++++++++- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 2 + mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 25 ++++++++++ 4 files changed, 102 insertions(+), 1 deletion(-) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 4fd6cf260efa..a1301521ca96 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -127,3 +127,25 @@ func.func @cast3(%p: !cir.ptr) { %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // expected-error {{requires same type for array element and pointee result}} cir.return } + +// ----- + +func.func @b0() { + cir.scope { + cir.loop while(cond : { // expected-error {{cond region must be terminated with 'cir.yield' or 'cir.yield continue'}} + %0 = cir.cst(true) : !cir.bool + cir.brcond %0 ^bb1, ^bb2 + ^bb1: + cir.yield break + ^bb2: + cir.yield + }, step : { + cir.yield + }) { + cir.br ^bb1 + ^bb1: + cir.return + } + } + cir.return +} diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index add9c47732ea..14b89c7257e5 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -157,4 +157,56 @@ func.func @l0() { // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: cir.yield -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: } + +func.func @l1() { + cir.scope { + cir.loop while(cond : { + cir.yield continue + }, step : { + cir.yield + }) { + cir.yield + } + } + cir.return +} + +// CHECK: func @l1 +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.loop while(cond : { +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +func.func @l2() { + cir.scope { + cir.loop while(cond : { + cir.yield + }, step : { + cir.yield + }) { + cir.yield + } + } + cir.return +} + +// CHECK: func @l2 +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.loop while(cond : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index f12ab2909d97..fac024eb831a 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -847,6 +847,8 @@ def LoopOp : CIR_Op<"loop", "nullptr">:$stepBuilder )> ]; + + let hasVerifier = 1; } diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 044938bb24de..7627efc613bd 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -892,6 +892,31 @@ void LoopOp::getSuccessorRegions(mlir::RegionBranchPoint point, llvm::SmallVector LoopOp::getLoopRegions() { return {&getBody()}; } +LogicalResult LoopOp::verify() { + // Cond regions should only terminate with plain 'cir.yield' or + // 'cir.yield continue'. + auto terminateError = [&]() { + return emitOpError() << "cond region must be terminated with " + "'cir.yield' or 'cir.yield continue'"; + }; + + auto &blocks = getCond().getBlocks(); + for (Block &block : blocks) { + if (block.empty()) + continue; + auto &op = block.back(); + if (isa(op)) + continue; + if (!isa(op)) + terminateError(); + auto y = cast(op); + if (!(y.isPlain() || y.isContinue())) + terminateError(); + } + + return success(); +} + //===----------------------------------------------------------------------===// // CIR defined traits //===----------------------------------------------------------------------===// From 9bf44eb56a7209ce6dd759996cdd21110dee7529 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 16:42:10 -0400 Subject: [PATCH 0284/2301] [CIR][NFC] Disable CIRBasedWarnings Up coming changes to CIRGenerator, CIRGenFunction and CIRGenModule break this functionality. We'll have to figure out how to rewrire this. We're going to need to have a system where the CIRBasedWarnings owns the full CIRGen pipeline. We're currently just asking CIRGenerator to ask CIRGenModule to build a function, but given that CGM has state that is required to exist for CIRGenFunction (which has it's own state) this no longer makes sense. Simply disable for now. --- clang/include/clang/Sema/CIRBasedWarnings.h | 2 +- clang/lib/Sema/CIRBasedWarnings.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/Sema/CIRBasedWarnings.h b/clang/include/clang/Sema/CIRBasedWarnings.h index ea08e24ad6ea..b050e8940215 100644 --- a/clang/include/clang/Sema/CIRBasedWarnings.h +++ b/clang/include/clang/Sema/CIRBasedWarnings.h @@ -37,7 +37,7 @@ class CIRBasedWarnings { private: Sema &S; AnalysisBasedWarnings::Policy DefaultPolicy; - std::unique_ptr CIRGen; + // std::unique_ptr CIRGen; //class InterProceduralData; //std::unique_ptr IPData; diff --git a/clang/lib/Sema/CIRBasedWarnings.cpp b/clang/lib/Sema/CIRBasedWarnings.cpp index ae7e3e29055e..1ad5e8b2e25f 100644 --- a/clang/lib/Sema/CIRBasedWarnings.cpp +++ b/clang/lib/Sema/CIRBasedWarnings.cpp @@ -67,8 +67,8 @@ sema::CIRBasedWarnings::CIRBasedWarnings(Sema &s) : S(s) { DefaultPolicy.enableConsumedAnalysis = isEnabled(D, warn_use_in_invalid_state); - CIRGen = std::make_unique(CodeGenOptions()); - CIRGen->Initialize(S.getASTContext()); + // CIRGen = std::make_unique(D, CodeGenOptions()); + // CIRGen->Initialize(S.getASTContext()); } // We need this here for unique_ptr with forward declared class. From 1a7d2514993d0e338f997b0109e2d696776f2a31 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 16:51:53 -0400 Subject: [PATCH 0285/2301] [CIR] Restructure Address to also have an ElementType This is technically just sourced immediately from the Pointer for now. However, LLVM just moved to opaque pointers and perhaps we'll need them too at some point. So just maintain the usages where we allow the user to pass both the pointer and the pointee. --- clang/lib/CIR/Address.h | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Address.h b/clang/lib/CIR/Address.h index 3fe52d41f211..8f371f13f746 100644 --- a/clang/lib/CIR/Address.h +++ b/clang/lib/CIR/Address.h @@ -18,22 +18,40 @@ #include "llvm/IR/Constants.h" +#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/Value.h" namespace cir { class Address { mlir::Value Pointer; + mlir::Type ElementType; clang::CharUnits Alignment; +protected: + Address(std::nullptr_t) : Pointer(nullptr), ElementType(nullptr) {} + public: + Address(mlir::Value pointer, mlir::Type elementType, + clang::CharUnits alignment) + : Pointer(pointer), ElementType(elementType), Alignment(alignment) { + assert(pointer != nullptr && "Pointer cannot be null"); + assert(elementType != nullptr && "Pointer cannot be null"); + assert(pointer.getType().cast().getPointee() == + ElementType && + "Incorrect pointer element type"); + assert(!alignment.isZero() && "Alignment cannot be zero"); + } Address(mlir::Value pointer, clang::CharUnits alignment) - : Pointer(pointer), Alignment(alignment) { + : Address(pointer, + pointer.getType().cast().getPointee(), + alignment) { + assert((!alignment.isZero() || pointer == nullptr) && "creating valid address with invalid alignment"); } - static Address invalid() { return Address(nullptr, clang::CharUnits()); } + static Address invalid() { return Address(nullptr); } bool isValid() const { return Pointer != nullptr; } mlir::Value getPointer() const { @@ -46,6 +64,11 @@ class Address { // assert(isValid()); return Alignment; } + + mlir::Type getElementType() const { + assert(isValid()); + return ElementType; + } }; } // namespace cir From bb84c8d240164760ece86dd9b9607fa5d3fa339e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 17:12:16 -0400 Subject: [PATCH 0286/2301] [CIR] Give CIRGenerator a DiagnosticsEngine --- clang/include/clang/CIR/CIRGenerator.h | 4 +++- clang/lib/CIR/CIRGenerator.cpp | 19 +++++++++++++++++-- clang/lib/CIRFrontendAction/CIRGenAction.cpp | 9 +++------ 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index c1e65a173c7e..292fc94bd151 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -36,8 +36,10 @@ class CIRGenModule; class CIRGenTypes; class CIRGenerator : public clang::ASTConsumer { + clang::DiagnosticsEngine &Diags; public: - CIRGenerator(const clang::CodeGenOptions &CGO); + CIRGenerator(clang::DiagnosticsEngine &diags, + const clang::CodeGenOptions &CGO); ~CIRGenerator(); void Initialize(clang::ASTContext &Context) override; bool EmitFunction(const clang::FunctionDecl *FD); diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index 51988f71ac5a..e010429f5554 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -24,8 +24,10 @@ using namespace cir; using namespace clang; -CIRGenerator::CIRGenerator(const CodeGenOptions &CGO) : codeGenOpts{CGO} {} CIRGenerator::~CIRGenerator() = default; +CIRGenerator::CIRGenerator(clang::DiagnosticsEngine &diags, + const CodeGenOptions &CGO) + : Diags(diags), codeGenOpts{CGO} {} void CIRGenerator::Initialize(ASTContext &astCtx) { using namespace llvm; @@ -49,6 +51,9 @@ bool CIRGenerator::EmitFunction(const FunctionDecl *FD) { mlir::ModuleOp CIRGenerator::getModule() { return CGM->getModule(); } bool CIRGenerator::HandleTopLevelDecl(DeclGroupRef D) { + if (Diags.hasErrorOccurred()) + return true; + for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { CGM->buildTopLevelDecl(*I); } @@ -56,4 +61,14 @@ bool CIRGenerator::HandleTopLevelDecl(DeclGroupRef D) { return true; } -void CIRGenerator::HandleTranslationUnit(ASTContext &C) {} +void CIRGenerator::HandleTranslationUnit(ASTContext &C) { + // If there are errors before or when releasing the CGM, reset the module to + // stop here before invoking the backend. + if (Diags.hasErrorOccurred()) { + if (CGM) + // TODO: CGM->clear(); + // TODO: M.reset(); + return; + } +} + diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index 4b524180d0f1..22af096626dd 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -93,13 +93,10 @@ class CIRGenConsumer : public clang::ASTConsumer { headerSearchOptions(headerSearchOptions), codeGenOptions(codeGenOptions), targetOptions(targetOptions), langOptions(langOptions), feOptions(feOptions), + outputStream(std::move(os)), - gen(std::make_unique(codeGenOptions)) { - // This is required to match the constructors used during - // CodeGenAction. Ultimately, this is required because we want to use - // the same utility functions in BackendUtil.h for handling llvm - // optimization and codegen - (void)this->codeGenOptions; + + gen(std::make_unique(diagnosticsEngine, codeGenOptions)) { } void Initialize(ASTContext &ctx) override { From 7e82f2ff73aeaeae45ec5328a69200ae34757eee Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 17:17:33 -0400 Subject: [PATCH 0287/2301] [CIR] Give CIRGenModule a DiagnosticsEngine --- clang/lib/CIR/CIRGenModule.cpp | 13 ++++++++----- clang/lib/CIR/CIRGenModule.h | 6 +++++- clang/lib/CIR/CIRGenerator.cpp | 3 ++- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 8282cac5eba5..f1a6a6699936 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -41,9 +41,11 @@ #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" +#include "clang/Basic/Diagnostic.h" #include "clang/Basic/SourceLocation.h" #include "clang/CIR/CIRGenerator.h" #include "clang/CIR/LowerToLLVM.h" +#include "clang/Frontend/FrontendDiagnostic.h" #include "clang/Lex/Preprocessor.h" #include "llvm/ADT/ArrayRef.h" @@ -84,12 +86,13 @@ static CIRGenCXXABI *createCXXABI(CIRGenModule &CGM) { CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, - const clang::CodeGenOptions &CGO) + const clang::CodeGenOptions &CGO, + DiagnosticsEngine &Diags) : builder(&context), astCtx(astctx), langOpts(astctx.getLangOpts()), - codeGenOpts(CGO), - theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, - target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), - genTypes{*this} {} + codeGenOpts(CGO), theModule{mlir::ModuleOp::create( + builder.getUnknownLoc())}, + Diags(Diags), target(astCtx.getTargetInfo()), + ABI(createCXXABI(*this)), genTypes{*this} {} CIRGenModule::~CIRGenModule() {} diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index dd3cf25677b0..0949101195b4 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -50,7 +50,8 @@ class CIRGenModule { public: CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, - const clang::CodeGenOptions &CGO); + const clang::CodeGenOptions &CGO, + clang::DiagnosticsEngine &Diags); ~CIRGenModule(); @@ -72,6 +73,8 @@ class CIRGenModule { /// A "module" matches a c/cpp source file: containing a list of functions. mlir::ModuleOp theModule; + clang::DiagnosticsEngine &Diags; + const clang::TargetInfo ⌖ std::unique_ptr ABI; @@ -93,6 +96,7 @@ class CIRGenModule { clang::ASTContext &getASTContext() { return astCtx; } const clang::TargetInfo &getTarget() const { return target; } const clang::CodeGenOptions &getCodeGenOpts() const { return codeGenOpts; } + clang::DiagnosticsEngine &getDiags() const { return Diags; } CIRGenTypes &getTypes() { return genTypes; } const clang::LangOptions &getLangOpts() const { return langOpts; } diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index e010429f5554..2b1ae967ab02 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -39,7 +39,8 @@ void CIRGenerator::Initialize(ASTContext &astCtx) { mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); - CGM = std::make_unique(*mlirCtx.get(), astCtx, codeGenOpts); + CGM = std::make_unique(*mlirCtx.get(), astCtx, codeGenOpts, + Diags); } void CIRGenerator::verifyModule() { CGM->verifyModule(); } From cb9b1086785994a9e44662f5fc79c09ca6cbea3b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 17:19:57 -0400 Subject: [PATCH 0288/2301] [CIR][NFC] Formatting for CIRGenerator --- clang/include/clang/CIR/CIRGenerator.h | 19 ++++++++++++------- clang/lib/CIR/CIRGenerator.cpp | 2 ++ 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 292fc94bd151..4328da129f62 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -16,7 +16,9 @@ #include "clang/AST/ASTConsumer.h" #include "clang/Basic/CodeGenOptions.h" + #include "llvm/Support/ToolOutputFile.h" + #include namespace mlir { @@ -36,7 +38,17 @@ class CIRGenModule; class CIRGenTypes; class CIRGenerator : public clang::ASTConsumer { + virtual void anchor(); clang::DiagnosticsEngine &Diags; + clang::ASTContext *astCtx; + + const clang::CodeGenOptions codeGenOpts; // Intentionally copied in. + +protected: + std::unique_ptr mlirCtx; + std::unique_ptr CGM; + +private: public: CIRGenerator(clang::DiagnosticsEngine &diags, const clang::CodeGenOptions &CGO); @@ -54,13 +66,6 @@ class CIRGenerator : public clang::ASTConsumer { void verifyModule(); -private: - std::unique_ptr mlirCtx; - std::unique_ptr CGM; - - const clang::CodeGenOptions codeGenOpts; // Intentionally copied in. - - clang::ASTContext *astCtx; }; } // namespace cir diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index 2b1ae967ab02..d9d76df53add 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -25,6 +25,8 @@ using namespace cir; using namespace clang; CIRGenerator::~CIRGenerator() = default; +void CIRGenerator::anchor() {} + CIRGenerator::CIRGenerator(clang::DiagnosticsEngine &diags, const CodeGenOptions &CGO) : Diags(diags), codeGenOpts{CGO} {} From 721056235669379275b3d106801722f6f62fb87a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 17:22:22 -0400 Subject: [PATCH 0289/2301] [CIR][NFC] Add stubbed out C++ files to ease CMake iteration While splitting a big diff into smaller diffs, if you transition back and forth between diffsets with different cmake status you'll have to run incremental builds which regenerate your build files each time. So simply avoid this by adding stubbed out versions of the full build. --- clang/lib/CIR/CIRGenCXX.cpp | 0 clang/lib/CIR/CIRGenClass.cpp | 0 clang/lib/CIR/CIRGenExprAgg.cpp | 0 clang/lib/CIR/CIRGenTBAA.cpp | 0 clang/lib/CIR/CIRGenTBAA.h | 0 clang/lib/CIR/CMakeLists.txt | 11 +++++++---- 6 files changed, 7 insertions(+), 4 deletions(-) create mode 100644 clang/lib/CIR/CIRGenCXX.cpp create mode 100644 clang/lib/CIR/CIRGenClass.cpp create mode 100644 clang/lib/CIR/CIRGenExprAgg.cpp create mode 100644 clang/lib/CIR/CIRGenTBAA.cpp create mode 100644 clang/lib/CIR/CIRGenTBAA.h diff --git a/clang/lib/CIR/CIRGenCXX.cpp b/clang/lib/CIR/CIRGenCXX.cpp new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CIR/CIRGenTBAA.cpp b/clang/lib/CIR/CIRGenTBAA.cpp new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CIR/CIRGenTBAA.h b/clang/lib/CIR/CIRGenTBAA.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 15daaa6d55bb..7ec9fcea3d0a 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -9,19 +9,22 @@ include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) -add_clang_library( - clangCIR +add_clang_library(clangCIR + CIRGenCXX.cpp + CIRGenCXXABI.cpp CIRGenCall.cpp - CIRGenerator.cpp + CIRGenClass.cpp CIRGenCleanup.cpp - CIRGenCXXABI.cpp CIRGenDecl.cpp CIRGenExpr.cpp + CIRGenExprAgg.cpp CIRGenExprScalar.cpp CIRGenFunction.cpp CIRGenModule.cpp CIRGenStmt.cpp + CIRGenTBAA.cpp CIRGenTypes.cpp + CIRGenerator.cpp CIRPasses.cpp CIRRecordLayoutBuilder.cpp ItaniumCXXABI.cpp From db9044206f0ba86dd72b5850fa6cf9786fc99ddd Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 17:27:51 -0400 Subject: [PATCH 0290/2301] [CIR][NFC] Rename ItaniumCXXABI -> CIRGenItaniumCXXABI Using the original name had too many naming collisions with the two versions of the same class in AST and CodeGen. So rename to avoid that issue. --- clang/lib/CIR/CIRGenCXXABI.h | 2 +- ...aniumCXXABI.cpp => CIRGenItaniumCXXABI.cpp} | 18 +++++++++--------- clang/lib/CIR/CIRGenModule.cpp | 2 +- clang/lib/CIR/CMakeLists.txt | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) rename clang/lib/CIR/{ItaniumCXXABI.cpp => CIRGenItaniumCXXABI.cpp} (81%) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index 8bead2ac02e3..bf1dce9ede3d 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -93,7 +93,7 @@ class CIRGenCXXABI { }; /// Creates and Itanium-family ABI -CIRGenCXXABI *CreateItaniumCXXABI(CIRGenModule &CGM); +CIRGenCXXABI *CreateCIRGenItaniumCXXABI(CIRGenModule &CGM); } // namespace cir diff --git a/clang/lib/CIR/ItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp similarity index 81% rename from clang/lib/CIR/ItaniumCXXABI.cpp rename to clang/lib/CIR/CIRGenItaniumCXXABI.cpp index 90718e195860..0d9d4194e51a 100644 --- a/clang/lib/CIR/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -1,4 +1,4 @@ -//===------- ItaniumCXXABI.cpp - Emit CIR from ASTs for a Module ----------===// +//===----- CIRGenItaniumCXXABI.cpp - Emit CIR from ASTs for a Module ------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -27,15 +27,15 @@ using namespace cir; using namespace clang; namespace { -class ItaniumCXXABI : public cir::CIRGenCXXABI { +class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { protected: bool UseARMMethodPtrABI; bool UseARMGuardVarABI; bool Use32BitVTableOffsetABI; public: - ItaniumCXXABI(CIRGenModule &CGM, bool UseARMMethodPtrABI = false, - bool UseARMGuardVarABI = false) + CIRGenItaniumCXXABI(CIRGenModule &CGM, bool UseARMMethodPtrABI = false, + bool UseARMGuardVarABI = false) : CIRGenCXXABI(CGM), UseARMMethodPtrABI{UseARMMethodPtrABI}, UseARMGuardVarABI{UseARMGuardVarABI}, Use32BitVTableOffsetABI{false} { assert(!UseARMMethodPtrABI && "NYI"); @@ -53,7 +53,7 @@ class ItaniumCXXABI : public cir::CIRGenCXXABI { }; } // namespace -CIRGenCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs( +CIRGenCXXABI::AddedStructorArgs CIRGenItaniumCXXABI::getImplicitConstructorArgs( CIRGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating) { assert(!NeedsVTTParameter(GlobalDecl(D, Type)) && "VTT NYI"); @@ -61,7 +61,7 @@ CIRGenCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs( return {}; } -bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { +bool CIRGenItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { auto *MD = cast(GD.getDecl()); assert(!MD->getParent()->getNumVBases() && "virtual bases NYI"); @@ -76,17 +76,17 @@ bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { return false; } -CIRGenCXXABI *cir::CreateItaniumCXXABI(CIRGenModule &CGM) { +CIRGenCXXABI *cir::CreateCIRGenItaniumCXXABI(CIRGenModule &CGM) { switch (CGM.getASTContext().getCXXABIKind()) { case TargetCXXABI::GenericItanium: - return new ItaniumCXXABI(CGM); + return new CIRGenItaniumCXXABI(CGM); default: llvm_unreachable("bad or NYI ABI kind"); } } -bool ItaniumCXXABI::classifyReturnType(CIRGenFunctionInfo &FI) const { +bool CIRGenItaniumCXXABI::classifyReturnType(CIRGenFunctionInfo &FI) const { auto *RD = FI.getReturnType()->getAsCXXRecordDecl(); assert(!RD && "RecordDecl return types NYI"); return false; diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index f1a6a6699936..390848f10188 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -78,7 +78,7 @@ using llvm::StringRef; static CIRGenCXXABI *createCXXABI(CIRGenModule &CGM) { switch (CGM.getASTContext().getCXXABIKind()) { case TargetCXXABI::GenericItanium: - return CreateItaniumCXXABI(CGM); + return CreateCIRGenItaniumCXXABI(CGM); default: llvm_unreachable("invalid C++ ABI kind"); } diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 7ec9fcea3d0a..9d11bce5b215 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -20,6 +20,7 @@ add_clang_library(clangCIR CIRGenExprAgg.cpp CIRGenExprScalar.cpp CIRGenFunction.cpp + CIRGenItaniumCXXABI.cpp CIRGenModule.cpp CIRGenStmt.cpp CIRGenTBAA.cpp @@ -27,7 +28,6 @@ add_clang_library(clangCIR CIRGenerator.cpp CIRPasses.cpp CIRRecordLayoutBuilder.cpp - ItaniumCXXABI.cpp LowerToLLVM.cpp TargetInfo.cpp From b6cd8478632f089f3dfb982b5fa5ab285fc6d1d9 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 17:33:53 -0400 Subject: [PATCH 0291/2301] [CIR] Handle some Parser callbacks from CIRGenerator and CIRGenConsumer The parser has been calling these member fns without us doing anything with them. CodeGen requries them to be able to properly handle deferred decl generation. We'll need to flesh out these implemetnations. But for now just stub them out and assert that we don't hit a few that we've yet to see. --- clang/include/clang/CIR/CIRGenerator.h | 3 + clang/lib/CIR/CIRGenerator.cpp | 18 ++++++ clang/lib/CIRFrontendAction/CIRGenAction.cpp | 61 ++++++++++++++++---- 3 files changed, 71 insertions(+), 11 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 4328da129f62..ae9d65f4a8f6 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -58,6 +58,9 @@ class CIRGenerator : public clang::ASTConsumer { bool HandleTopLevelDecl(clang::DeclGroupRef D) override; void HandleTranslationUnit(clang::ASTContext &Ctx) override; + void HandleInlineFunctionDefinition(clang::FunctionDecl *D) override; + void HandleTagDeclDefinition(clang::TagDecl *D) override; + void HandleTagDeclRequiredDefinition(const clang::TagDecl *D) override; mlir::ModuleOp getModule(); std::unique_ptr takeContext() { diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index d9d76df53add..4501b7001127 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -75,3 +75,21 @@ void CIRGenerator::HandleTranslationUnit(ASTContext &C) { } } +void CIRGenerator::HandleInlineFunctionDefinition(FunctionDecl *D) { + if (Diags.hasErrorOccurred()) + return; +} + +/// HandleTagDeclDefinition - This callback is invoked each time a TagDecl to +/// (e.g. struct, union, enum, class) is completed. This allows the client hack +/// on the type, which can occur at any point in the file (because these can be +/// defined in declspecs). +void CIRGenerator::HandleTagDeclDefinition(TagDecl *D) { + if (Diags.hasErrorOccurred()) + return; +} + +void CIRGenerator::HandleTagDeclRequiredDefinition(const TagDecl *D) { + if (Diags.hasErrorOccurred()) + return; +} diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index 22af096626dd..2e9d978ec5d0 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -12,8 +12,8 @@ #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" -#include "mlir/Parser/Parser.h" #include "mlir/IR/OperationSupport.h" +#include "mlir/Parser/Parser.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" @@ -115,9 +115,17 @@ class CIRGenConsumer : public clang::ASTConsumer { return true; } - void HandleInlineFunctionDefinition(FunctionDecl *D) override {} + void HandleCXXStaticMemberVarInstantiation(clang::VarDecl *VD) override { + llvm_unreachable("NYI"); + } + + void HandleInlineFunctionDefinition(FunctionDecl *D) override { + gen->HandleInlineFunctionDefinition(D); + } - void HandleInterestingDecl(DeclGroupRef D) override { HandleTopLevelDecl(D); } + void HandleInterestingDecl(DeclGroupRef D) override { + llvm_unreachable("NYI"); + } void HandleTranslationUnit(ASTContext &C) override { gen->HandleTranslationUnit(C); @@ -168,17 +176,30 @@ class CIRGenConsumer : public clang::ASTConsumer { } } - void HandleTagDeclDefinition(TagDecl *D) override {} + void HandleTagDeclDefinition(TagDecl *D) override { + PrettyStackTraceDecl CrashInfo(D, SourceLocation(), + astContext->getSourceManager(), + "CIR generation of declaration"); + gen->HandleTagDeclDefinition(D); + } - void HandleTagDeclRequiredDefinition(const TagDecl *D) override {} + void HandleTagDeclRequiredDefinition(const TagDecl *D) override { + gen->HandleTagDeclRequiredDefinition(D); + } - void CompleteTentativeDefinition(VarDecl *D) override {} + void CompleteTentativeDefinition(VarDecl *D) override { + llvm_unreachable("NYI"); + } - void CompleteExternalDeclaration(DeclaratorDecl *D) override {} + void CompleteExternalDeclaration(DeclaratorDecl *D) override { + llvm_unreachable("NYI"); + } - void AssignInheritanceModel(CXXRecordDecl *RD) override {} + void AssignInheritanceModel(CXXRecordDecl *RD) override { + llvm_unreachable("NYI"); + } - void HandleVTable(CXXRecordDecl *RD) override {} + void HandleVTable(CXXRecordDecl *RD) override { llvm_unreachable("NYI"); } }; } // namespace cir @@ -190,7 +211,14 @@ CIRGenAction::CIRGenAction(OutputType act, mlir::MLIRContext *_MLIRContext) CIRGenAction::~CIRGenAction() { mlirModule.reset(); } -void CIRGenAction::EndSourceFileAction() {} +void CIRGenAction::EndSourceFileAction() { + // If the consumer creation failed, do nothing. + if (!getCompilerInstance().hasASTConsumer()) + return; + + // TODO: pass the module around + // module = cgConsumer->takeModule(); +} static std::unique_ptr getOutputStream(CompilerInstance &ci, StringRef inFile, @@ -216,10 +244,21 @@ CIRGenAction::CreateASTConsumer(CompilerInstance &ci, StringRef inputFile) { auto out = ci.takeOutputStream(); if (!out) out = getOutputStream(ci, inputFile, action); - return std::make_unique( + + auto Result = std::make_unique( action, ci, ci.getDiagnostics(), ci.getHeaderSearchOpts(), ci.getCodeGenOpts(), ci.getTargetOpts(), ci.getLangOpts(), ci.getFrontendOpts(), std::move(out)); + cgConsumer = Result.get(); + + // Enable generating macro debug info only when debug info is not disabled and + // also macrod ebug info is enabled + if (ci.getCodeGenOpts().getDebugInfo() != llvm::codegenoptions::NoDebugInfo && + ci.getCodeGenOpts().MacroDebugInfo) { + llvm_unreachable("NYI"); + } + + return std::move(Result); } mlir::OwningOpRef From 1f495cbff9b2c495ebe2ab52d307fbce4b1cf631 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 17:38:52 -0400 Subject: [PATCH 0292/2301] [CIR] Add a RAII type for handling inline method definition deferment Sema calls `HandleInlineFunctionDefinition` which will ping us to generate a member function definition. But we don't know enough at the time of invocation to correctly do so. So this mechanism delays generation via an RAII type that will undo this stack at the end of HandleTopLevelDecl. It's currently not complete but later commits will finish it off. --- clang/include/clang/CIR/CIRGenerator.h | 23 ++++++++++++ clang/lib/CIR/CIRGenerator.cpp | 52 +++++++++++++++++++++++++- 2 files changed, 73 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index ae9d65f4a8f6..7e87e01b4cc6 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -15,6 +15,7 @@ #define CLANG_CIRGENERATOR_H_ #include "clang/AST/ASTConsumer.h" +#include "clang/AST/Decl.h" #include "clang/Basic/CodeGenOptions.h" #include "llvm/Support/ToolOutputFile.h" @@ -44,11 +45,32 @@ class CIRGenerator : public clang::ASTConsumer { const clang::CodeGenOptions codeGenOpts; // Intentionally copied in. + unsigned HandlingTopLevelDecls; + + /// Use this when emitting decls to block re-entrant decl emission. It will + /// emit all deferred decls on scope exit. Set EmitDeferred to false if decl + /// emission must be deferred longer, like at the end of a tag definition. + struct HandlingTopLevelDeclRAII { + CIRGenerator &Self; + bool EmitDeferred; + HandlingTopLevelDeclRAII(CIRGenerator &Self, bool EmitDeferred = true) + : Self{Self}, EmitDeferred{EmitDeferred} { + ++Self.HandlingTopLevelDecls; + } + ~HandlingTopLevelDeclRAII() { + unsigned Level = --Self.HandlingTopLevelDecls; + if (Level == 0 && EmitDeferred) + Self.buildDeferredDecls(); + } + }; + protected: std::unique_ptr mlirCtx; std::unique_ptr CGM; private: + llvm::SmallVector DeferredInlineMemberFuncDefs; + public: CIRGenerator(clang::DiagnosticsEngine &diags, const clang::CodeGenOptions &CGO); @@ -69,6 +91,7 @@ class CIRGenerator : public clang::ASTConsumer { void verifyModule(); + void buildDeferredDecls(); }; } // namespace cir diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index 4501b7001127..3533e1ad6b3c 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -19,17 +19,21 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" +#include "clang/Basic/TargetInfo.h" #include "clang/CIR/CIRGenerator.h" using namespace cir; using namespace clang; -CIRGenerator::~CIRGenerator() = default; void CIRGenerator::anchor() {} CIRGenerator::CIRGenerator(clang::DiagnosticsEngine &diags, const CodeGenOptions &CGO) - : Diags(diags), codeGenOpts{CGO} {} + : Diags(diags), codeGenOpts{CGO}, HandlingTopLevelDecls(0) {} +CIRGenerator::~CIRGenerator() { + // There should normally not be any leftover inline method definitions. + assert(DeferredInlineMemberFuncDefs.empty() || Diags.hasErrorOccurred()); +} void CIRGenerator::Initialize(ASTContext &astCtx) { using namespace llvm; @@ -57,6 +61,8 @@ bool CIRGenerator::HandleTopLevelDecl(DeclGroupRef D) { if (Diags.hasErrorOccurred()) return true; + HandlingTopLevelDeclRAII HandlingDecl(*this); + for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { CGM->buildTopLevelDecl(*I); } @@ -78,6 +84,30 @@ void CIRGenerator::HandleTranslationUnit(ASTContext &C) { void CIRGenerator::HandleInlineFunctionDefinition(FunctionDecl *D) { if (Diags.hasErrorOccurred()) return; + + assert(D->doesThisDeclarationHaveABody()); + + // We may want to emit this definition. However, that decision might be + // based on computing the linkage, and we have to defer that in case we are + // inside of something that will chagne the method's final linkage, e.g. + // typedef struct { + // void bar(); + // void foo() { bar(); } + // } A; + DeferredInlineMemberFuncDefs.push_back(D); +} + +void CIRGenerator::buildDeferredDecls() { + if (DeferredInlineMemberFuncDefs.empty()) + return; + + // Emit any deferred inline method definitions. Note that more deferred + // methods may be added during this loop, since ASTConsumer callbacks can be + // invoked if AST inspection results in declarations being added. + HandlingTopLevelDeclRAII HandlingDecls(*this); + for (unsigned I = 0; I != DeferredInlineMemberFuncDefs.size(); ++I) + CGM->buildTopLevelDecl(DeferredInlineMemberFuncDefs[I]); + DeferredInlineMemberFuncDefs.clear(); } /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl to @@ -87,9 +117,27 @@ void CIRGenerator::HandleInlineFunctionDefinition(FunctionDecl *D) { void CIRGenerator::HandleTagDeclDefinition(TagDecl *D) { if (Diags.hasErrorOccurred()) return; + + // Don't allow re-entrant calls to CIRGen triggered by PCH deserialization to + // emit deferred decls. + HandlingTopLevelDeclRAII HandlingDecl(*this, /*EmitDeferred=*/false); + + // For MSVC compatibility, treat declarations of static data members with + // inline initializers as definitions. + if (astCtx->getTargetInfo().getCXXABI().isMicrosoft()) { + llvm_unreachable("NYI"); + } + // For OpenMP emit declare reduction functions, if required. + if (astCtx->getLangOpts().OpenMP) { + llvm_unreachable("NYI"); + } } void CIRGenerator::HandleTagDeclRequiredDefinition(const TagDecl *D) { if (Diags.hasErrorOccurred()) return; + + // Don't allow re-entrant calls to CIRGen triggered by PCH deserialization to + // emit deferred decls. + HandlingTopLevelDeclRAII HandlingDecl(*this, /*EmitDeferred=*/false); } From 74635fb776644f1388ef06a500fb00b47ababa4b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 17:45:49 -0400 Subject: [PATCH 0293/2301] [CIR][NFC] Stub out getModuleDebugInfo so we can assert against it We want to explicitly reject supporting debug info when requested at the moment. Eventually when this method gets properly implemented and built with including moduleDebugInfo we'll have a handful of sites that assert and fail that immediately call out where we need to implement moduleDebugInfo. --- clang/lib/CIR/CIRGenModule.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 0949101195b4..279e7ebd0ec1 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -19,6 +19,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/SourceManager.h" +#include "clang/Basic/TargetInfo.h" #include "llvm/ADT/ScopedHashTable.h" #include "llvm/ADT/SmallPtrSet.h" @@ -173,6 +174,8 @@ class CIRGenModule { llvm::StringRef getMangledName(clang::GlobalDecl GD); mlir::Value GetGlobalValue(const clang::Decl *D); + std::nullptr_t getModuleDebugInfo() { return nullptr; } + void emitError(const llvm::Twine &message) { theModule.emitError(message); } From 8cf8e6dc6c238b071ac1e0bc35e822f38ae0ec34 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 17:56:41 -0400 Subject: [PATCH 0294/2301] [CIR][NFC] Mark some getters const for usage from const refs to a CGM --- clang/lib/CIR/CIRGenModule.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 279e7ebd0ec1..647127d5a902 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -92,9 +92,9 @@ class CIRGenModule { /// ------- public: - mlir::ModuleOp getModule() { return theModule; } + mlir::ModuleOp getModule() const { return theModule; } mlir::OpBuilder &getBuilder() { return builder; } - clang::ASTContext &getASTContext() { return astCtx; } + clang::ASTContext &getASTContext() const { return astCtx; } const clang::TargetInfo &getTarget() const { return target; } const clang::CodeGenOptions &getCodeGenOpts() const { return codeGenOpts; } clang::DiagnosticsEngine &getDiags() const { return Diags; } From e556107f0626fe791adf0150684ad45ffd61b3c4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 18:04:52 -0400 Subject: [PATCH 0295/2301] [CIR] Add a CGModule::Release method for cleanup from HandleTranslationUnit At the end of parsing the frontend action will call HandleTranslationUnit. Add a `Release` method to handle any deferrment or module finishing actions that we'll need to take. --- clang/lib/CIR/CIRGenModule.cpp | 47 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenModule.h | 3 +++ clang/lib/CIR/CIRGenerator.cpp | 4 +++ 3 files changed, 54 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 390848f10188..0d12754dee06 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -447,3 +447,50 @@ mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { assert(CurCGF); return CurCGF->getLoc(lhs, rhs); } + +void CIRGenModule::Release() { + // TODO: buildVTablesOpportunistically(); + // TODO: applyGlobalValReplacements(); + // TODO: applyReplacements(); + // TODO: checkAliases(); + // TODO: buildMultiVersionFunctions(); + // TODO: buildCXXGlobalInitFunc(); + // TODO: buildCXXGlobalCleanUpFunc(); + // TODO: registerGlobalDtorsWithAtExit(); + // TODO: buildCXXThreadLocalInitFunc(); + // TODO: ObjCRuntime + if (astCtx.getLangOpts().CUDA) { + llvm_unreachable("NYI"); + } + // TODO: OpenMPRuntime + // TODO: PGOReader + // TODO: buildCtorList(GlobalCtors); + // TODO: builtCtorList(GlobalDtors); + // TODO: buildGlobalAnnotations(); + // TODO: buildDeferredUnusedCoverageMappings(); + // TODO: CIRGenPGO + // TODO: CoverageMapping + if (getCodeGenOpts().SanitizeCfiCrossDso) { + llvm_unreachable("NYI"); + } + // TODO: buildAtAvailableLinkGuard(); + if (astCtx.getTargetInfo().getTriple().isWasm() && + !astCtx.getTargetInfo().getTriple().isOSEmscripten()) { + llvm_unreachable("NYI"); + } + + // Emit reference of __amdgpu_device_library_preserve_asan_functions to + // preserve ASAN functions in bitcode libraries. + if (getLangOpts().Sanitize.has(SanitizerKind::Address)) { + llvm_unreachable("NYI"); + } + + // TODO: buildLLVMUsed(); + // TODO: SanStats + + if (getCodeGenOpts().Autolink) { + // TODO: buildModuleLinkOptions + } + + // TODO: FINISH THE REST OF THIS +} diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 647127d5a902..8cd3d4ba3a4d 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -176,6 +176,9 @@ class CIRGenModule { mlir::Value GetGlobalValue(const clang::Decl *D); std::nullptr_t getModuleDebugInfo() { return nullptr; } + // Finalize CIR code generation. + void Release(); + void emitError(const llvm::Twine &message) { theModule.emitError(message); } diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index 3533e1ad6b3c..9287abce40e6 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -71,6 +71,10 @@ bool CIRGenerator::HandleTopLevelDecl(DeclGroupRef D) { } void CIRGenerator::HandleTranslationUnit(ASTContext &C) { + // Release the Builder when there is no error. + if (!Diags.hasErrorOccurred() && CGM) + CGM->Release(); + // If there are errors before or when releasing the CGM, reset the module to // stop here before invoking the backend. if (Diags.hasErrorOccurred()) { From 5516f021b959af397106f805deb56b79b0e05acf Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 18:08:14 -0400 Subject: [PATCH 0296/2301] [CIR][NFC] Clean up some clang namespace usage in CIRRecordLayoutBuilder I forgot to use the namespace here. Simply fix that up. --- clang/lib/CIR/CIRRecordLayoutBuilder.cpp | 52 +++++++++++++----------- 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp index 99a544ec678f..8ad369f611cf 100644 --- a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp @@ -12,6 +12,7 @@ #include using namespace cir; +using namespace clang; namespace { struct CIRRecordLowering final { @@ -20,25 +21,25 @@ struct CIRRecordLowering final { // member. In addition to the standard member types, there exists a sentinel // member type that ensures correct rounding. struct MemberInfo final { - clang::CharUnits offset; + CharUnits offset; enum class InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } kind; mlir::Type data; - const clang::FieldDecl *fieldDecl; - MemberInfo(clang::CharUnits offset, InfoKind kind, mlir::Type data, - const clang::FieldDecl *fieldDecl = nullptr) + const FieldDecl *fieldDecl; + MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data, + const FieldDecl *fieldDecl = nullptr) : offset{offset}, kind{kind}, data{data}, fieldDecl{fieldDecl} {}; bool operator<(const MemberInfo &other) const { return offset < other.offset; } }; - CIRRecordLowering(CIRGenTypes &cirGenTypes, - const clang::RecordDecl *recordDecl, bool isPacked); + CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl, + bool isPacked); void lower(bool nonVirtualBaseType); void accumulateFields(); - clang::CharUnits bitsToCharUnits(uint64_t bitOffset) { + CharUnits bitsToCharUnits(uint64_t bitOffset) { return astContext.toCharUnitsFromBits(bitOffset); } @@ -49,16 +50,16 @@ struct CIRRecordLowering final { astContext.getCharWidth()); } - mlir::Type getByteArrayType(clang::CharUnits numberOfChars) { + mlir::Type getByteArrayType(CharUnits numberOfChars) { assert(!numberOfChars.isZero() && "Empty byte arrays aren't allowed."); mlir::Type type = getCharType(); - return numberOfChars == clang::CharUnits::One() + return numberOfChars == CharUnits::One() ? type : mlir::RankedTensorType::get({0, numberOfChars.getQuantity()}, type); } - mlir::Type getStorageType(const clang::FieldDecl *fieldDecl) { + mlir::Type getStorageType(const FieldDecl *fieldDecl) { auto type = cirGenTypes.convertTypeForMem(fieldDecl->getType()); assert(!fieldDecl->isBitField() && "bit fields NYI"); if (!fieldDecl->isBitField()) @@ -72,7 +73,7 @@ struct CIRRecordLowering final { llvm_unreachable("getStorageType only supports nonBitFields at this point"); } - uint64_t getFieldBitOffset(const clang::FieldDecl *fieldDecl) { + uint64_t getFieldBitOffset(const FieldDecl *fieldDecl) { return astRecordLayout.getFieldOffset(fieldDecl->getFieldIndex()); } @@ -80,15 +81,15 @@ struct CIRRecordLowering final { void fillOutputFields(); CIRGenTypes &cirGenTypes; - const clang::ASTContext &astContext; - const clang::RecordDecl *recordDecl; - const clang::CXXRecordDecl *cxxRecordDecl; - const clang::ASTRecordLayout &astRecordLayout; + const ASTContext &astContext; + const RecordDecl *recordDecl; + const CXXRecordDecl *cxxRecordDecl; + const ASTRecordLayout &astRecordLayout; // Helpful intermediate data-structures std::vector members; // Output fields, consumed by CIRGenTypes::computeRecordLayout llvm::SmallVector fieldTypes; - llvm::DenseMap fields; + llvm::DenseMap fields; bool isPacked : 1; private: @@ -98,11 +99,11 @@ struct CIRRecordLowering final { } // namespace CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, - const clang::RecordDecl *recordDecl, + const RecordDecl *recordDecl, bool isPacked) : cirGenTypes{cirGenTypes}, astContext{cirGenTypes.getContext()}, - recordDecl{recordDecl}, - cxxRecordDecl{llvm::dyn_cast(recordDecl)}, + recordDecl{recordDecl}, cxxRecordDecl{llvm::dyn_cast( + recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, isPacked{isPacked} {} @@ -156,14 +157,17 @@ void CIRRecordLowering::accumulateFields() { } mlir::cir::StructType -CIRGenTypes::computeRecordLayout(const clang::RecordDecl *recordDecl) { +CIRGenTypes::computeRecordLayout(const RecordDecl *recordDecl) { CIRRecordLowering builder(*this, recordDecl, /*packed=*/false); builder.lower(/*nonVirtualBaseType=*/false); - if (llvm::isa(recordDecl)) { - assert(builder.astRecordLayout.getNonVirtualSize() == - builder.astRecordLayout.getSize() && - "Virtual base objects NYI"); + // If we're in C++, compute the base subobject type. + if (llvm::isa(recordDecl) && !recordDecl->isUnion() && + !recordDecl->hasAttr()) { + if (builder.astRecordLayout.getNonVirtualSize() != + builder.astRecordLayout.getSize()) { + llvm_unreachable("NYI"); + } } assert(!builder.isPacked && "Packed structs NYI"); From b029455acc3ede079264bc863a95e859a82f22cd Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 18:12:16 -0400 Subject: [PATCH 0297/2301] [CIR] Support emitting deferred decls at Release time for CIRGenModule We don't currently have this hooked into anything, but C++ constructors use deferrment with Ctor_Base and Ctor_Complete constructors and this will be necessary. So just build the infrastructure first and consume later. --- clang/lib/CIR/CIRGenModule.cpp | 36 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenModule.h | 19 ++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 0d12754dee06..8034895e56ec 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -448,7 +448,43 @@ mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { return CurCGF->getLoc(lhs, rhs); } +void CIRGenModule::buildDeferred() { + // Emit deferred declare target declarations + if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) + llvm_unreachable("NYI"); + + // Emit code for any potentially referenced deferred decls. Since a previously + // unused static decl may become used during the generation of code for a + // static function, iterate until no changes are made. + + if (!DeferredVTables.empty()) { + llvm_unreachable("NYI"); + } + + // Emit CUDA/HIP static device variables referenced by host code only. Note we + // should not clear CUDADeviceVarODRUsedByHost since it is still needed for + // further handling. + if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { + llvm_unreachable("NYI"); + } + + // Stop if we're out of both deferred vtables and deferred declarations. + if (DeferredDeclsToEmit.empty()) + return; + + // Grab the list of decls to emit. If buildGlobalDefinition schedules more + // work, it will not interfere with this. + std::vector CurDeclsToEmit; + CurDeclsToEmit.swap(DeferredDeclsToEmit); + + for (auto &D : CurDeclsToEmit) { + (void)D; + llvm_unreachable("NYI"); + } +} + void CIRGenModule::Release() { + buildDeferred(); // TODO: buildVTablesOpportunistically(); // TODO: applyGlobalValReplacements(); // TODO: applyReplacements(); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 8cd3d4ba3a4d..cc0b2bb3acf8 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -143,6 +143,22 @@ class CIRGenModule { LValueBaseInfo *BaseInfo = nullptr, bool forPointeeType = false); + /// A queue of (optional) vtables to consider emitting. + std::vector DeferredVTables; + + /// This contains all the decls which have definitions but which are deferred + /// for emission and therefore should only be output if they are actually + /// used. If a decl is in this, then it is known to have not been referenced + /// yet. + std::map DeferredDecls; + + // This is a list of deferred decls which we have seen that *are* actually + // referenced. These get code generated when the module is done. + std::vector DeferredDeclsToEmit; + void addDeferredDeclToEmit(clang::GlobalDecl GD) { + DeferredDeclsToEmit.emplace_back(GD); + } + void buildTopLevelDecl(clang::Decl *decl); /// Emit code for a single global function or var decl. Forward declarations @@ -176,6 +192,9 @@ class CIRGenModule { mlir::Value GetGlobalValue(const clang::Decl *D); std::nullptr_t getModuleDebugInfo() { return nullptr; } + /// Emit any needed decls for which code generation was deferred. + void buildDeferred(); + // Finalize CIR code generation. void Release(); From 5f249bd3c4c5506ed3311aa19afdfde572d6b003 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 18:43:27 -0400 Subject: [PATCH 0298/2301] [CIR] Add a stubbed out func to support deferred coverage This is for the previously described assertability principle. --- clang/lib/CIR/CIRGenModule.cpp | 8 ++++++++ clang/lib/CIR/CIRGenModule.h | 5 +++++ clang/lib/CIR/CIRGenerator.cpp | 6 ++++++ 3 files changed, 19 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 8034895e56ec..99caa96fdc18 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -530,3 +530,11 @@ void CIRGenModule::Release() { // TODO: FINISH THE REST OF THIS } +void CIRGenModule::AddDeferredUnusedCoverageMapping(Decl *D) { + // Do we need to generate coverage mapping? + if (!codeGenOpts.CoverageMapping) + return; + + llvm_unreachable("NYI"); +} + diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index cc0b2bb3acf8..0bb1a701b9c2 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -190,6 +190,11 @@ class CIRGenModule { llvm::StringRef getMangledName(clang::GlobalDecl GD); mlir::Value GetGlobalValue(const clang::Decl *D); + + /// Stored a deferred empty coverage mapping for an unused and thus + /// uninstrumented top level declaration. + void AddDeferredUnusedCoverageMapping(clang::Decl *D); + std::nullptr_t getModuleDebugInfo() { return nullptr; } /// Emit any needed decls for which code generation was deferred. diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index 9287abce40e6..93daf9047c25 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -99,6 +99,12 @@ void CIRGenerator::HandleInlineFunctionDefinition(FunctionDecl *D) { // void foo() { bar(); } // } A; DeferredInlineMemberFuncDefs.push_back(D); + + // Provide some coverage mapping even for methods that aren't emitted. + // Don't do this for templated classes though, as they may not be + // instantiable. + if (!D->getLexicalDeclContext()->isDependentContext()) + CGM->AddDeferredUnusedCoverageMapping(D); } void CIRGenerator::buildDeferredDecls() { From ea52ba6e6b4444e1cbd5ed22e836f9bad04565e3 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 18:45:31 -0400 Subject: [PATCH 0299/2301] [CIR] Support TagDecl definitions for RecordDecls from Sema When Sema calls HandleTagDeclDefinition, add a func to convert the TagDecl type to a CIR type. Only support RecordDecl atm since we don't support Enums yet. --- clang/lib/CIR/CIRGenModule.cpp | 5 +++++ clang/lib/CIR/CIRGenModule.h | 3 +++ clang/lib/CIR/CIRGenTypes.cpp | 27 +++++++++++++++++++++++++++ clang/lib/CIR/CIRGenTypes.h | 4 ++++ clang/lib/CIR/CIRGenerator.cpp | 2 ++ 5 files changed, 41 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 99caa96fdc18..ef2528bb1c58 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -538,3 +538,8 @@ void CIRGenModule::AddDeferredUnusedCoverageMapping(Decl *D) { llvm_unreachable("NYI"); } +void CIRGenModule::UpdateCompletedType(const TagDecl *TD) { + // Make sure that this type is translated. + genTypes.UpdateCompletedType(TD); +} + diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 0bb1a701b9c2..2ec51f1ebb94 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -191,6 +191,9 @@ class CIRGenModule { mlir::Value GetGlobalValue(const clang::Decl *D); + // Make sure that this type is translated. + void UpdateCompletedType(const clang::TagDecl *TD); + /// Stored a deferred empty coverage mapping for an unused and thus /// uninstrumented top level declaration. void AddDeferredUnusedCoverageMapping(clang::Decl *D); diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 70c614204c09..c0afd6326607 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -694,3 +694,30 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeFreeFunctionCall( return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, ChainCall ? 1 : 0, ChainCall); } + +// UpdateCompletedType - When we find the full definition for a TagDecl, +// replace the 'opaque' type we previously made for it if applicable. +void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { + // If this is an enum being completed, then we flush all non-struct types + // from the cache. This allows function types and other things that may be + // derived from the enum to be recomputed. + if (const auto *ED = dyn_cast(TD)) { + llvm_unreachable("NYI"); + } + + // If we completed a RecordDecl that we previously used and converted to an + // anonymous type, then go ahead and complete it now. + const auto *RD = cast(TD); + if (RD->isDependentType()) + return; + + // Only complete if we converted it already. If we haven't converted it yet, + // we'll just do it lazily. + if (recordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) + convertRecordDeclType(RD); + + // If necessary, provide the full definition of a type only used with a + // declaration so far. + if (CGM.getModuleDebugInfo()) + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index e007271f30b2..df39d42afdc0 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -172,6 +172,10 @@ class CIRGenTypes { const CIRGenFunctionInfo &arrangeGlobalDeclaration(clang::GlobalDecl GD); + /// UpdateCompletedType - when we find the full definition for a TagDecl, + /// replace the 'opaque' type we previously made for it if applicable. + void UpdateCompletedType(const clang::TagDecl *TD); + /// Free functions are functions that are compatible with an ordinary C /// function pointer type. const CIRGenFunctionInfo & diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index 93daf9047c25..182f5a8f0fcd 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -132,6 +132,8 @@ void CIRGenerator::HandleTagDeclDefinition(TagDecl *D) { // emit deferred decls. HandlingTopLevelDeclRAII HandlingDecl(*this, /*EmitDeferred=*/false); + CGM->UpdateCompletedType(D); + // For MSVC compatibility, treat declarations of static data members with // inline initializers as definitions. if (astCtx->getTargetInfo().getCXXABI().isMicrosoft()) { From d1dbb66e7ad23360d140518877b446417b22db9b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 18:55:52 -0400 Subject: [PATCH 0300/2301] [CIR][NFC] Strip clang:: namespace from some usages in CIRGenCall --- clang/lib/CIR/CIRGenCall.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 9a290f69cd3c..1f328866865e 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -16,9 +16,9 @@ using namespace clang; CIRGenFunctionInfo *CIRGenFunctionInfo::create( unsigned cirCC, bool instanceMethod, bool chainCall, - const clang::FunctionType::ExtInfo &info, - llvm::ArrayRef paramInfos, clang::CanQualType resultType, - llvm::ArrayRef argTypes, RequiredArgs required) { + const FunctionType::ExtInfo &info, + llvm::ArrayRef paramInfos, CanQualType resultType, + llvm::ArrayRef argTypes, RequiredArgs required) { assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); assert(!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()); @@ -167,7 +167,7 @@ static bool hasInAllocaArgs(CIRGenModule &CGM, CallingConv ExplicitCC, return false; } -mlir::FunctionType CIRGenTypes::GetFunctionType(clang::GlobalDecl GD) { +mlir::FunctionType CIRGenTypes::GetFunctionType(GlobalDecl GD) { const CIRGenFunctionInfo &FI = arrangeGlobalDeclaration(GD); return GetFunctionType(FI); } @@ -244,8 +244,8 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &CallArgs, - mlir::func::CallOp &callOrInvoke, - bool IsMustTail, clang::SourceLocation Loc) { + mlir::func::CallOp &callOrInvoke, bool IsMustTail, + SourceLocation Loc) { // FIXME: We no longer need the types from CallArgs; lift up and simplify assert(Callee.isOrdinary() || Callee.isVirtual()); From b439bdea0a99398fb182e11c082ef2f59eb1af17 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 18:57:38 -0400 Subject: [PATCH 0301/2301] [CIR] Add a mapping from Decl's to cir::Address's for CIRGenFunction This'll be used for, obviously, getting the previously generated cir::Address from the Decl. It's currently unused but will receive usages in a later patch. This also currently doesn't capture all local variables. Notably it isn't generating mappings for parameters. But a later patch rebuilds the parameter generation and thus adding it there would just be redundant. --- clang/lib/CIR/CIRGenDecl.cpp | 2 ++ clang/lib/CIR/CIRGenFunction.h | 11 +++++++++++ 2 files changed, 13 insertions(+) diff --git a/clang/lib/CIR/CIRGenDecl.cpp b/clang/lib/CIR/CIRGenDecl.cpp index b31f9376660f..47a9c89a76ae 100644 --- a/clang/lib/CIR/CIRGenDecl.cpp +++ b/clang/lib/CIR/CIRGenDecl.cpp @@ -82,6 +82,8 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { // TODO: what about emitting lifetime markers for MSVC catch parameters? // TODO: something like @llvm.lifetime.start/end here? revisit this later. emission.Addr = Address{addr, alignment}; + + setAddrOfLocalVar(&D, emission.Addr); return emission; } diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 09efac2f0be3..5f128010d70f 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -286,6 +286,11 @@ class CIRGenFunction { /// dropped. using SymTableTy = llvm::ScopedHashTable; SymTableTy symbolTable; + using DeclMapTy = llvm::DenseMap; + /// LocalDeclMap - This keeps track of the CIR allocas or globals for local C + /// delcs. + DeclMapTy LocalDeclMap; + /// Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(clang::QualType T); @@ -316,6 +321,12 @@ class CIRGenFunction { // as soon as we add a DebugInfo type to this class. std::nullptr_t *getDebugInfo() { return nullptr; } + /// Set the address of a local variable. + void setAddrOfLocalVar(const clang::VarDecl *VD, Address Addr) { + assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!"); + LocalDeclMap.insert({VD, Addr}); + } + // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. struct PrototypeWrapper { From 0835856096905b9523ee3df7fd83e1bfacaab6d2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 19:02:41 -0400 Subject: [PATCH 0302/2301] [CIR][NFC] Add a stubbed out test and stubbed out MLIR td file These will be filled out later, they currently are just a nuisance while diff splitting. --- clang/test/CIR/CodeGen/ctor.cpp | 1 + mlir/include/mlir/Dialect/CIR/IR/CIRAttrDefs.td | 0 2 files changed, 1 insertion(+) create mode 100644 clang/test/CIR/CodeGen/ctor.cpp create mode 100644 mlir/include/mlir/Dialect/CIR/IR/CIRAttrDefs.td diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp new file mode 100644 index 000000000000..5c1031cccc41 --- /dev/null +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -0,0 +1 @@ +// RUN: true diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrDefs.td b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrDefs.td new file mode 100644 index 000000000000..e69de29bb2d1 From e51052e7ec846acdb9d0b59a95e6efc82b240107 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 19:22:41 -0400 Subject: [PATCH 0303/2301] [CIR] Add a stubbed out `accumulateVBases` for CIRRecordLayoutBuilder Another feature just for the assertion principle --- clang/lib/CIR/CIRRecordLayoutBuilder.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp index 8ad369f611cf..ebd7d66dde54 100644 --- a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp @@ -38,6 +38,7 @@ struct CIRRecordLowering final { void lower(bool nonVirtualBaseType); void accumulateFields(); + void accumulateVBases(); CharUnits bitsToCharUnits(uint64_t bitOffset) { return astContext.toCharUnitsFromBits(bitOffset); @@ -118,7 +119,9 @@ void CIRRecordLowering::lower(bool nonVirtualBaseType) { "Inheritance NYI"); assert(!members.empty() && "Empty CXXRecordDecls NYI"); - assert(!nonVirtualBaseType && "non-irtual base type handling NYI"); + + if (!nonVirtualBaseType) + accumulateVBases(); } llvm::stable_sort(members); @@ -130,6 +133,13 @@ void CIRRecordLowering::lower(bool nonVirtualBaseType) { // TODO: implement volatile bit fields } +void CIRRecordLowering::accumulateVBases() { + if (astRecordLayout.hasOwnVFPtr()) + llvm_unreachable("NYI"); + if (astRecordLayout.hasOwnVBPtr()) + llvm_unreachable("NYI"); +} + void CIRRecordLowering::fillOutputFields() { for (auto &member : members) { assert(member.data && "member.data should be valid"); From 1377ad855209431c8da3ba283a327d06bd08c7f3 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 19:24:24 -0400 Subject: [PATCH 0304/2301] [CIR] Accept memberless structs in CIRRecordLayoutBuilder This just needed to insert padding bytes. So just simply fill it out. --- clang/lib/CIR/CIRRecordLayoutBuilder.cpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp index ebd7d66dde54..795e6a30fdf4 100644 --- a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp @@ -81,6 +81,11 @@ struct CIRRecordLowering final { /// Fills out the structures that are ultimately consumed. void fillOutputFields(); + void appendPaddingBytes(CharUnits Size) { + if (!Size.isZero()) + fieldTypes.push_back(getByteArrayType(Size)); + } + CIRGenTypes &cirGenTypes; const ASTContext &astContext; const RecordDecl *recordDecl; @@ -110,6 +115,8 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, void CIRRecordLowering::lower(bool nonVirtualBaseType) { assert(!recordDecl->isUnion() && "NYI"); + CharUnits Size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize() + : astRecordLayout.getSize(); accumulateFields(); @@ -118,7 +125,11 @@ void CIRRecordLowering::lower(bool nonVirtualBaseType) { assert(cxxRecordDecl->bases().begin() == cxxRecordDecl->bases().end() && "Inheritance NYI"); - assert(!members.empty() && "Empty CXXRecordDecls NYI"); + if (members.empty()) { + appendPaddingBytes(Size); + // TODO: computeVolatileBitFields(); + return; + } if (!nonVirtualBaseType) accumulateVBases(); From bd1cc438e398cf78b6a992bcd8372966a8882af5 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 19:25:12 -0400 Subject: [PATCH 0305/2301] [CIR][NFC] Do some simple cleanup in CIRRecordLayoutBuilder --- clang/lib/CIR/CIRRecordLayoutBuilder.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp index 795e6a30fdf4..4adb73bbf410 100644 --- a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp @@ -114,12 +114,16 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, isPacked{isPacked} {} void CIRRecordLowering::lower(bool nonVirtualBaseType) { - assert(!recordDecl->isUnion() && "NYI"); + if (recordDecl->isUnion()) { + llvm_unreachable("NYI"); + } + CharUnits Size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize() : astRecordLayout.getSize(); accumulateFields(); + // RD implies C++ if (cxxRecordDecl) { assert(!astRecordLayout.hasOwnVFPtr() && "accumulateVPtrs() NYI"); assert(cxxRecordDecl->bases().begin() == cxxRecordDecl->bases().end() && From 56cde2d3e3cca62bb56253b1919c7903bc65bd4b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 19:33:02 -0400 Subject: [PATCH 0306/2301] [CIR] Assert against having module debug info in HandleTagDeclReqDef --- clang/lib/CIR/CIRGenerator.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index 182f5a8f0fcd..26a06ab0dd51 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -19,7 +19,6 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" -#include "clang/Basic/TargetInfo.h" #include "clang/CIR/CIRGenerator.h" using namespace cir; @@ -152,4 +151,7 @@ void CIRGenerator::HandleTagDeclRequiredDefinition(const TagDecl *D) { // Don't allow re-entrant calls to CIRGen triggered by PCH deserialization to // emit deferred decls. HandlingTopLevelDeclRAII HandlingDecl(*this, /*EmitDeferred=*/false); + + if (CGM->getModuleDebugInfo()) + llvm_unreachable("NYI"); } From 28d08f24f6fbb389478f65397337b8cfac3ab670 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 19:37:13 -0400 Subject: [PATCH 0307/2301] [CIR][NFC] Add a getter for the ASTContext for CIRGenCXXABI --- clang/lib/CIR/CIRGenCXXABI.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index bf1dce9ede3d..5a5d6e30813a 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -34,6 +34,8 @@ class CIRGenCXXABI { CIRGenCXXABI(CIRGenModule &CGM) : CGM{CGM}, MangleCtx(CGM.getASTContext().createMangleContext()) {} + clang::ASTContext &getContext() const { return CGM.getASTContext(); } + public: /// Similar to AddedStructorArgs, but only notes the number of additional /// arguments. From 26d2818242671a021f69ac42f47ce6e2e18c25a7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 19:39:14 -0400 Subject: [PATCH 0308/2301] [CIR] Add some extra assertions to buildTopLevelDecl --- clang/lib/CIR/CIRGenModule.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index ef2528bb1c58..e88f21059468 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -246,7 +246,17 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { CurCGF = nullptr; } +// buildTopLevelDecl - Emit code for a single top level declaration. void CIRGenModule::buildTopLevelDecl(Decl *decl) { + // Ignore dependent declarations + if (decl->isTemplated()) + return; + + // Consteval function shouldn't be emitted. + if (auto *FD = dyn_cast(decl)) + if (FD->isConsteval()) + return; + switch (decl->getKind()) { default: assert(false && "Not yet implemented"); From dfb7834834d138bdccbc0b99cf9a48087e8a9da5 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 19:39:49 -0400 Subject: [PATCH 0309/2301] [CIR] Add extra assertion to buildGlobal --- clang/lib/CIR/CIRGenModule.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index e88f21059468..9b3e31a2ac42 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -224,6 +224,7 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { const auto *Global = cast(GD.getDecl()); assert(!Global->hasAttr() && "NYI"); + assert(!Global->hasAttr() && "NYI"); assert(!Global->hasAttr() && "NYI"); assert(!Global->hasAttr() && "NYI"); assert(!langOpts.CUDA && "NYI"); From 20d8eed298d4998838b7f287a811218bbd2175c7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 19:42:59 -0400 Subject: [PATCH 0310/2301] [CIR] buildGlobal -- don't return early for decls This was a mistake, we only wanted to return early here for non-forced externally visible definitions. --- clang/lib/CIR/CIRGenModule.cpp | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 9b3e31a2ac42..df716f650253 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -230,11 +230,15 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { assert(!langOpts.CUDA && "NYI"); assert(!langOpts.OpenMP && "NYI"); - const auto *FD = dyn_cast(Global); - assert(FD && "Only FunctionDecl supported as of here"); - if (!FD->doesThisDeclarationHaveABody()) { - assert(!FD->doesDeclarationForceExternallyVisibleDefinition() && "NYI"); - return; + // Ignore declarations, they will be emitted on their first use. + if (const auto *FD = dyn_cast(Global)) { + // Forward declarations are emitted lazily on first use. + if (!FD->doesThisDeclarationHaveABody()) { + if (!FD->doesDeclarationForceExternallyVisibleDefinition()) + return; + } + } else { + llvm_unreachable("NYI"); } assert(MustBeEmitted(Global) || @@ -245,6 +249,7 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { auto fn = CGF.buildFunction(cast(GD.getDecl())); theModule.push_back(fn); CurCGF = nullptr; + return; } // buildTopLevelDecl - Emit code for a single top level declaration. @@ -553,4 +558,3 @@ void CIRGenModule::UpdateCompletedType(const TagDecl *TD) { // Make sure that this type is translated. genTypes.UpdateCompletedType(TD); } - From 618ab7df90291f5464b472260d64835cfbcda859 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 19:48:58 -0400 Subject: [PATCH 0311/2301] [CIR] Add a hacky temporary GetGlobalValue that gets Fns from the module This is kinda gross, but it'll suffice for now. When generating functions later in `buildGlobalFunctionDefinition` we should add the functions as they are generated to a mapping from their mangled name to their FuncOp. --- clang/lib/CIR/CIRGenModule.cpp | 12 ++++++++++++ clang/lib/CIR/CIRGenModule.h | 3 ++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index df716f650253..0eabdd28490d 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -499,6 +499,18 @@ void CIRGenModule::buildDeferred() { } } +// TODO: this is gross, make a map +mlir::Operation *CIRGenModule::GetGlobalValue(StringRef Name) { + for (auto const &op : + theModule.getBodyRegion().front().getOps()) + if (auto Fn = llvm::cast(op)) { + if (Name == Fn.getName()) + return Fn; + } + + return nullptr; +} + void CIRGenModule::Release() { buildDeferred(); // TODO: buildVTablesOpportunistically(); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 2ec51f1ebb94..a6977a31f1fc 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -191,6 +191,8 @@ class CIRGenModule { mlir::Value GetGlobalValue(const clang::Decl *D); + mlir::Operation *GetGlobalValue(llvm::StringRef Ref); + // Make sure that this type is translated. void UpdateCompletedType(const clang::TagDecl *TD); @@ -206,7 +208,6 @@ class CIRGenModule { // Finalize CIR code generation. void Release(); - void emitError(const llvm::Twine &message) { theModule.emitError(message); } private: From f9059467c11aa8067ea26d5e9d546484166297e5 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 19:58:59 -0400 Subject: [PATCH 0312/2301] [CIR] Only emit a global if we must and assert that that's the only case For the assertion principle. Once we have VarDecls here we'll need to flesh that out as well. --- clang/lib/CIR/CIRGenModule.cpp | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 0eabdd28490d..94ff47f2303e 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -241,15 +241,24 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { llvm_unreachable("NYI"); } - assert(MustBeEmitted(Global) || - MayBeEmittedEagerly(Global) && "Delayed emission NYI"); - - CIRGenFunction CGF{*this, builder}; - CurCGF = &CGF; - auto fn = CGF.buildFunction(cast(GD.getDecl())); - theModule.push_back(fn); - CurCGF = nullptr; - return; + // Defer code generation to first use when possible, e.g. if this is an inline + // function. If the global mjust always be emitted, do it eagerly if possible + // to benefit from cache locality. + if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) { + CIRGenFunction CGF{*this, builder}; + CurCGF = &CGF; + auto fn = CGF.buildFunction(cast(GD.getDecl())); + theModule.push_back(fn); + CurCGF = nullptr; + return; + } + + // If we're deferring emission of a C++ variable with an initializer, remember + // the order in which it appeared on the file. + if (getLangOpts().CPlusPlus && isa(Global) && + cast(Global)->hasInit()) { + llvm_unreachable("NYI"); + } } // buildTopLevelDecl - Emit code for a single top level declaration. From 33a6cd0b37b5d77a089e7f2f40627eb0b40596fa Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 20:00:56 -0400 Subject: [PATCH 0313/2301] [CIR] Cover any extra decls we might see in buildGlobal with deferrment If anything gets through the previous cases just add it to the list of DeferredDeclsToEmit (if must be emitted) or DeferredDecls if not. --- clang/lib/CIR/CIRGenModule.cpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 94ff47f2303e..b718b43004b5 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -259,6 +259,20 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { cast(Global)->hasInit()) { llvm_unreachable("NYI"); } + + llvm::StringRef MangledName = getMangledName(GD); + if (GetGlobalValue(MangledName) != nullptr) { + // The value has already been used and should therefore be emitted. + addDeferredDeclToEmit(GD); + } else if (MustBeEmitted(Global)) { + // The value must be emitted, but cannot be emitted eagerly. + assert(!MayBeEmittedEagerly(Global)); + addDeferredDeclToEmit(GD); + } else { + // Otherwise, remember that we saw a deferred decl with this name. The first + // use of the mangled name will cause it to move into DeferredDeclsToEmit. + DeferredDecls[MangledName] = GD; + } } // buildTopLevelDecl - Emit code for a single top level declaration. From 674a4d130c702eb56dba720a8c90fbd90e500709 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 20:25:10 -0400 Subject: [PATCH 0314/2301] [CIR] Add buildGlobalDefinition and call it from a required emitted global This covers a few more required assertions and will be a point of further forking of behavior when VarDecls, CXXMethodDecls, etc are implemented. --- clang/lib/CIR/CIRGenModule.cpp | 43 ++++++++++++++++++++++++++++++---- clang/lib/CIR/CIRGenModule.h | 5 ++++ 2 files changed, 43 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index b718b43004b5..409b54ff6998 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -245,11 +245,8 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { // function. If the global mjust always be emitted, do it eagerly if possible // to benefit from cache locality. if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) { - CIRGenFunction CGF{*this, builder}; - CurCGF = &CGF; - auto fn = CGF.buildFunction(cast(GD.getDecl())); - theModule.push_back(fn); - CurCGF = nullptr; + // Emit the definition if it can't be deferred. + buildGlobalDefinition(GD); return; } @@ -275,6 +272,36 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { } } +void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { + const auto *D = cast(GD.getDecl()); + + if (const auto *FD = dyn_cast(D)) { + // At -O0, don't generate CIR for functions with available_externally + // linkage. + if (!shouldEmitFunction(GD)) + return; + + if (const auto *Method = dyn_cast(D)) { + llvm_unreachable("NYI"); + } + + if (FD->isMultiVersion()) + llvm_unreachable("NYI"); + + CIRGenFunction CGF{*this, builder}; + CurCGF = &CGF; + auto fn = CGF.buildFunction(cast(GD.getDecl())); + theModule.push_back(fn); + CurCGF = nullptr; + return; + } + + if (const auto *VD = dyn_cast(D)) + llvm_unreachable("NYI"); + + llvm_unreachable("Invalid argument to buildGlobalDefinition()"); +} + // buildTopLevelDecl - Emit code for a single top level declaration. void CIRGenModule::buildTopLevelDecl(Decl *decl) { // Ignore dependent declarations @@ -581,6 +608,12 @@ void CIRGenModule::Release() { // TODO: FINISH THE REST OF THIS } + +bool CIRGenModule::shouldEmitFunction(GlobalDecl GD) { + // TODO: implement this -- requires defining linkage for CIR + return true; +} + void CIRGenModule::AddDeferredUnusedCoverageMapping(Decl *D) { // Do we need to generate coverage mapping? if (!codeGenOpts.CoverageMapping) diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index a6977a31f1fc..26e6909c63f2 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -196,6 +196,9 @@ class CIRGenModule { // Make sure that this type is translated. void UpdateCompletedType(const clang::TagDecl *TD); + void buildGlobalDefinition(clang::GlobalDecl D, + mlir::Operation *Op = nullptr); + /// Stored a deferred empty coverage mapping for an unused and thus /// uninstrumented top level declaration. void AddDeferredUnusedCoverageMapping(clang::Decl *D); @@ -208,6 +211,8 @@ class CIRGenModule { // Finalize CIR code generation. void Release(); + bool shouldEmitFunction(clang::GlobalDecl GD); + void emitError(const llvm::Twine &message) { theModule.emitError(message); } private: From 02731221644aa2242e4603398c1d30c8dbf6ff78 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 20:46:31 -0400 Subject: [PATCH 0315/2301] [CIR][NFC] Reflow a comment and an assertion --- clang/lib/CIR/CIRGenModule.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 409b54ff6998..e791dceaf951 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -423,9 +423,9 @@ StringRef CIRGenModule::getMangledName(GlobalDecl GD) { return MangledDeclNames[CanonicalGD] = Result.first->first(); } -/// GetOrCreateCIRFunction - If the specified mangled name is not in the module, -/// create and return a CIR Function with the specified type. If there is -/// something in the module with the specified name, return it potentially +/// GetOrCreateCIRFunction - If the specified mangled name is not in the +/// module, create and return a CIR Function with the specified type. If there +/// is something in the module with the specified name, return it potentially /// bitcasted to the right type. /// /// If D is non-null, it specifies a decl that corresponded to this. This is @@ -479,7 +479,9 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // sense for MLIR // assert(F->getName().getStringRef() == MangledName && "name was uniqued!"); - // TODO: set function attributes from the declaration + if (D) + ; // TODO: set function attributes from the declaration + // TODO: set function attributes from the missing attributes param // TODO: Handle extra attributes From 2705733ceedeedc52234c0309eb74a32eb36a0b8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 20:46:56 -0400 Subject: [PATCH 0316/2301] [CIR] Implement getLoc for CGM instead of delegating to CGF We currently need this before the CGF exists, so just implement it out right. We probably could (and should) invert this and have the CGF versions just delgate. TODO --- clang/lib/CIR/CIRGenModule.cpp | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index e791dceaf951..d62de8f3071f 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -502,18 +502,25 @@ mlir::Value CIRGenModule::GetGlobalValue(const Decl *D) { } mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { - assert(CurCGF); - return CurCGF->getLoc(SLoc); + const SourceManager &SM = astCtx.getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(SLoc); + StringRef Filename = PLoc.getFilename(); + return mlir::FileLineColLoc::get(builder.getStringAttr(Filename), + PLoc.getLine(), PLoc.getColumn()); } mlir::Location CIRGenModule::getLoc(SourceRange SLoc) { - assert(CurCGF); - return CurCGF->getLoc(SLoc); + mlir::Location B = getLoc(SLoc.getBegin()); + mlir::Location E = getLoc(SLoc.getEnd()); + SmallVector locs = {B, E}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); } mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { - assert(CurCGF); - return CurCGF->getLoc(lhs, rhs); + SmallVector locs = {lhs, rhs}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); } void CIRGenModule::buildDeferred() { From 4fbd5d3ac213322e421493e43c36e51baa75853d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 20:48:47 -0400 Subject: [PATCH 0317/2301] [CIR] Allow deferral of emisison for GetOrCreateCIRFunction If we've already marked it Deferred then promote it to DeferredToEmit. Else, if it's C++ we might depend on non-buildTopLevelDecl decls and thus need to walk the FunctionDecl upwards to see if we're in a CXXRecordDecl to find one to mark as deferred and to emit. --- clang/lib/CIR/CIRGenModule.cpp | 44 ++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index d62de8f3071f..a76871629bf2 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -354,7 +354,6 @@ mlir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, bool DontDefer, ForDefinition_t IsForDefinition) { assert(!ForVTable && "NYI"); - assert(!DontDefer && "NYI"); assert(!cast(GD.getDecl())->isConsteval() && "consteval function should never be emitted"); @@ -486,7 +485,48 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // TODO: Handle extra attributes - assert(!DontDefer && "Only not DontDefer supported so far"); + if (!DontDefer) { + // All MSVC dtors other than the base dtor are linkonce_odr and delegate to + // each other bottoming out wiht the base dtor. Therefore we emit non-base + // dtors on usage, even if there is no dtor definition in the TU. + if (D && isa(D)) + llvm_unreachable("NYI"); + + // This is the first use or definition of a mangled name. If there is a + // deferred decl with this name, remember that we need to emit it at the end + // of the file. + auto DDI = DeferredDecls.find(MangledName); + if (DDI != DeferredDecls.end()) { + // Move the potentially referenced deferred decl to the + // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we + // don't need it anymore). + addDeferredDeclToEmit(DDI->second); + DeferredDecls.erase(DDI); + + // Otherwise, there are cases we have to worry about where we're using a + // declaration for which we must emit a definition but where we might not + // find a top-level definition. + // - member functions defined inline in their classes + // - friend functions defined inline in some class + // - special member functions with implicit definitions + // If we ever change our AST traversal to walk into class methods, this + // will be unnecessary. + // + // We also don't emit a definition for a function if it's going to be an + // entry in a vtable, unless it's already marked as used. + } else if (getLangOpts().CPlusPlus && D) { + // Look for a declaration that's lexically in a record. + for (const auto *FD = cast(D)->getMostRecentDecl(); FD; + FD = FD->getPreviousDecl()) { + if (isa(FD->getLexicalDeclContext())) { + if (FD->doesThisDeclarationHaveABody()) { + addDeferredDeclToEmit(GD.getWithDecl(FD)); + break; + } + } + } + } + } if (!IsIncompleteFunction) { assert(F.getFunctionType() == Ty); From 159b8f03b0ff4009b291615ec4e60e0cb6337fec Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 20:54:19 -0400 Subject: [PATCH 0318/2301] [CIR] Fix an assertion that was misplaced This doesn't make sense, this either shouldn't have the Ty getter or it should be wrapped in the else case. We eventually use this so just implement it properly instead of removing it. --- clang/lib/CIR/CIRGenModule.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index a76871629bf2..3dc437fad495 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -358,9 +358,10 @@ mlir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, assert(!cast(GD.getDecl())->isConsteval() && "consteval function should never be emitted"); - assert(!Ty && "No code paths implemented that have this set yet"); - const auto *FD = cast(GD.getDecl()); - Ty = getTypes().ConvertType(FD->getType()); + if (!Ty) { + const auto *FD = cast(GD.getDecl()); + Ty = getTypes().ConvertType(FD->getType()); + } assert(!dyn_cast(GD.getDecl()) && "NYI"); From dfac3b408a3b21dbedaa7977d4a4e6a8d4e1cb01 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 20:55:39 -0400 Subject: [PATCH 0319/2301] [CIR][NFC] Restructure some asserts to be a bit clearer --- clang/lib/CIR/CIRGenModule.cpp | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 3dc437fad495..c8a0e94d0776 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -440,13 +440,12 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // Any attempts to use a MultiVersion function should result in retrieving the // iFunc instead. Name mangling will handle the rest of the changes. - auto const *FD = cast_or_null(D); - assert(FD && "Only FD supported so far"); - - if (getLangOpts().OpenMP) - llvm_unreachable("NYI"); - if (FD->isMultiVersion()) - llvm_unreachable("NYI"); + if (const auto *FD = cast_or_null(D)) { + if (getLangOpts().OpenMP) + llvm_unreachable("open MP NYI"); + if (FD->isMultiVersion()) + llvm_unreachable("NYI"); + } mlir::Value Entry = GetGlobalValue(GD.getDecl()); @@ -468,12 +467,16 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( IsIncompleteFunction = true; } + auto *FD = llvm::cast(D); + assert(FD && "Only FunctionDecl supported so far."); auto fnLoc = getLoc(FD->getSourceRange()); // TODO: CodeGen includeds the linkage (ExternalLinkage) and only passes the // mangledname if Entry is nullptr mlir::FuncOp F = mlir::FuncOp::create(fnLoc, MangledName, FTy); - assert(!Entry && "NYI"); + if (Entry) { + llvm_unreachable("NYI"); + } // TODO: This might not be valid, seems the uniqueing system doesn't make // sense for MLIR From 9574e4ec5be0eb5c58fa1dcbae462e6391ae4609 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 21:03:24 -0400 Subject: [PATCH 0320/2301] [CIR] Add buildGlobalFunctionDefinition with new asserts Add another fn from the buildTopLevelDecl walk. This currently is pretty bare bones and only adds some TODOs and asserts but will be more fleshed out later. --- clang/lib/CIR/CIRGenModule.cpp | 31 +++++++++++++++++++++++++------ clang/lib/CIR/CIRGenModule.h | 1 + 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index c8a0e94d0776..ffc17399bdb3 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -272,6 +272,30 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { } } +void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, + mlir::Operation *Op) { + auto const *D = cast(GD.getDecl()); + + // TODO: setFunctionLinkage + // TODO: setGVProperties + // TODO: MaubeHandleStaticInExternC + // TODO: maybeSetTrivialComdat + // TODO: setLLVMFunctionFEnvAttributes + + CIRGenFunction CGF{*this, builder}; + CurCGF = &CGF; + auto fn = CGF.buildFunction(cast(GD.getDecl())); + theModule.push_back(fn); + CurCGF = nullptr; + + // TODO: setNonAliasAttributes + // TODO: SetLLVMFunctionAttributesForDeclaration + + assert(!D->getAttr() && "NYI"); + assert(!D->getAttr() && "NYI"); + assert(!D->getAttr() && "NYI"); +} + void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { const auto *D = cast(GD.getDecl()); @@ -287,12 +311,7 @@ void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { if (FD->isMultiVersion()) llvm_unreachable("NYI"); - - CIRGenFunction CGF{*this, builder}; - CurCGF = &CGF; - auto fn = CGF.buildFunction(cast(GD.getDecl())); - theModule.push_back(fn); - CurCGF = nullptr; + buildGlobalFunctionDefinition(GD, Op); return; } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 26e6909c63f2..fe264cd03e5b 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -198,6 +198,7 @@ class CIRGenModule { void buildGlobalDefinition(clang::GlobalDecl D, mlir::Operation *Op = nullptr); + void buildGlobalFunctionDefinition(clang::GlobalDecl D, mlir::Operation *Op); /// Stored a deferred empty coverage mapping for an unused and thus /// uninstrumented top level declaration. From 94c7b178b08923d60474466d19d1b04a584b6bdc Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 21:09:14 -0400 Subject: [PATCH 0321/2301] [CIR] If we've already created a fn just return it in GetOrCreateCIRFn As the title says, just lazily leave and don't attempt to recreate here. --- clang/lib/CIR/CIRGenModule.cpp | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index ffc17399bdb3..d97ecc44e535 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -466,11 +466,23 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( llvm_unreachable("NYI"); } - mlir::Value Entry = GetGlobalValue(GD.getDecl()); + // Lookup the entry, lazily creating it if necessary. + mlir::Operation *Entry = GetGlobalValue(MangledName); + if (Entry) { + // TODO: WeakRefReferences + // TODO: Handle dropped DLL attributes. + // TODO: If there are two attempts to define the same mangled name, issue an + // error. + + auto Fn = cast(Entry); + if (Fn && Fn.getFunctionType() == Ty) { + return Fn; + } + llvm_unreachable("NYI"); - if (Entry) - assert(false && "Code path NYI since we're not yet using this for " - "generating fucntion decls"); + // TODO: clang checks here if this is a llvm::GlobalAlias... how will we + // support this? + } // This function doesn't have a complete type (for example, the return type is // an incompmlete struct). Use a fake type instead, and make sure not to try From 1e8d788b0fdfdcc92b555fd28cb6f7b81c8cbe4e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 21:10:10 -0400 Subject: [PATCH 0322/2301] [CIR] Flesh out getMangledNameImpl but explicitly opt out The only real change is the explicitly opted out call to MC.mangeName. Once it's in real usage we'll just remove the `&& false`. --- clang/lib/CIR/CIRGenModule.cpp | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index d97ecc44e535..84fb48f36dec 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -405,21 +405,32 @@ static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, // TODO: support the module name hash auto ShouldMangle = MC.shouldMangleDeclName(ND); - assert(!ShouldMangle && "Mangling not actually implemented yet."); - auto *II = ND->getIdentifier(); - assert(II && "Attempt to mangle unnamed decl."); + // Explicit ignore mangling for now + if (ShouldMangle && false) { + MC.mangleName(GD.getWithDecl(ND), Out); + } else { + auto *II = ND->getIdentifier(); + assert(II && "Attempt to mangle unnamed decl."); - const auto *FD = dyn_cast(ND); - assert(FD && "Only FunctionDecl supported"); - assert(FD->getType()->castAs()->getCallConv() != - CC_X86RegCall && - "NYI"); - assert(!FD->hasAttr() && "NYI"); + const auto *FD = dyn_cast(ND); + assert(FD && "Only FunctionDecl supported"); + assert(FD->getType()->castAs()->getCallConv() != + CC_X86RegCall && + "NYI"); + assert(!FD->hasAttr() && "NYI"); - Out << II->getName(); + Out << II->getName(); + } - assert(!ShouldMangle && "Mangling not actually implemented yet."); + // Check if the module name hash should be appended for internal linkage + // symbols. This should come before multi-version target suffixes are + // appendded. This is to keep the name and module hash suffix of the internal + // linkage function together. The unique suffix should only be added when name + // mangling is done to make sure that the final name can be properly + // demangled. For example, for C functions without prototypes, name mangling + // is not done and the unique suffix should not be appended then. + // TODO: assert(!isUniqueInternalLinkageDecl(GD, CGM) && "NYI"); if (const auto *FD = dyn_cast(ND)) { assert(!FD->isMultiVersion() && "NYI"); From b0d2018e075846acac7a4fe9acc88d3b19936bd9 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 21:13:59 -0400 Subject: [PATCH 0323/2301] [CIR] Add an unused Fn to check if a Fn is to be sanitized or not --- clang/lib/CIR/CIRGenModule.cpp | 33 +++++++++++++++++++++++++++++---- clang/lib/CIR/CIRGenModule.h | 3 +++ 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 84fb48f36dec..8a4573da9def 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -42,6 +42,7 @@ #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/Basic/Diagnostic.h" +#include "clang/Basic/NoSanitizeList.h" #include "clang/Basic/SourceLocation.h" #include "clang/CIR/CIRGenerator.h" #include "clang/CIR/LowerToLLVM.h" @@ -89,10 +90,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, const clang::CodeGenOptions &CGO, DiagnosticsEngine &Diags) : builder(&context), astCtx(astctx), langOpts(astctx.getLangOpts()), - codeGenOpts(CGO), theModule{mlir::ModuleOp::create( - builder.getUnknownLoc())}, - Diags(Diags), target(astCtx.getTargetInfo()), - ABI(createCXXABI(*this)), genTypes{*this} {} + codeGenOpts(CGO), + theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), + target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), + genTypes{*this} {} CIRGenModule::~CIRGenModule() {} @@ -709,6 +710,30 @@ bool CIRGenModule::shouldEmitFunction(GlobalDecl GD) { return true; } +bool CIRGenModule::isInNoSanitizeList(SanitizerMask Kind, mlir::FuncOp Fn, + SourceLocation Loc) const { + const auto &NoSanitizeL = getASTContext().getNoSanitizeList(); + // NoSanitize by function name. + if (NoSanitizeL.containsFunction(Kind, Fn.getName())) + llvm_unreachable("NYI"); + // NoSanitize by location. + if (Loc.isValid()) + return NoSanitizeL.containsLocation(Kind, Loc); + // If location is unknown, this may be a compiler-generated function. Assume + // it's located in the main file. + auto &SM = getASTContext().getSourceManager(); + FileEntryRef MainFile = *SM.getFileEntryRefForID(SM.getMainFileID()); + if (NoSanitizeL.containsFile(Kind, MainFile.getName())) + return true; + + // Check "src" prefix. + if (Loc.isValid()) + return NoSanitizeL.containsLocation(Kind, Loc); + // If location is unknown, this may be a compiler-generated function. Assume + // it's located in the main file. + return NoSanitizeL.containsFile(Kind, MainFile.getName()); +} + void CIRGenModule::AddDeferredUnusedCoverageMapping(Decl *D) { // Do we need to generate coverage mapping? if (!codeGenOpts.CoverageMapping) diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index fe264cd03e5b..53893770dfa3 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -171,6 +171,9 @@ class CIRGenModule { /// false, the definition can be emitted lazily if it's used. bool MustBeEmitted(const clang::ValueDecl *D); + bool isInNoSanitizeList(clang::SanitizerMask Kind, mlir::FuncOp Fn, + clang::SourceLocation) const; + /// Determine whether the definition can be emitted eagerly, or should be /// delayed until the end of the translation unit. This is relevant for /// definitions whose linkage can change, e.g. implicit function instantions From 95fac7679648f3c78f3bd233842c62df338ab9d1 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 21:15:22 -0400 Subject: [PATCH 0324/2301] [CIR] Add a stubbed out fn for setting DSO local on an Op We'll need to implement a bunch of mlir attributes soon. --- clang/lib/CIR/CIRGenModule.cpp | 4 ++++ clang/lib/CIR/CIRGenModule.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 8a4573da9def..2eb17234c1de 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -454,6 +454,10 @@ StringRef CIRGenModule::getMangledName(GlobalDecl GD) { return MangledDeclNames[CanonicalGD] = Result.first->first(); } +void CIRGenModule::setDSOLocal(mlir::Operation *Op) const { + // TODO: Op->setDSOLocal +} + /// GetOrCreateCIRFunction - If the specified mangled name is not in the /// module, create and return a CIR Function with the specified type. If there /// is something in the module with the specified name, return it potentially diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 53893770dfa3..a8b73a99771f 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -167,6 +167,8 @@ class CIRGenModule { mlir::Type getCIRType(const clang::QualType &type); + void setDSOLocal(mlir::Operation *Op) const; + /// Determine whether the definition must be emitted; if this returns \c /// false, the definition can be emitted lazily if it's used. bool MustBeEmitted(const clang::ValueDecl *D); From d6127574d2ce4a058f2ee8806ea95934f6a2cc4e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 21:38:02 -0400 Subject: [PATCH 0325/2301] [CIR] Rename buildFunction to generateCode to match CodeGen The signature and implementation will be changing as well incrementally to match CodeGen's behavior. --- clang/lib/CIR/CIRGenFunction.cpp | 4 +++- clang/lib/CIR/CIRGenFunction.h | 7 +++---- clang/lib/CIR/CIRGenModule.cpp | 7 +++++-- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 8f3141d79d18..3dccb5f06678 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -295,7 +295,9 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { insertCleanupAndLeave(currBlock); } -mlir::FuncOp CIRGenFunction::buildFunction(const FunctionDecl *FD) { +mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, + const CIRGenFunctionInfo &FnInfo) { + auto *FD = cast(GD.getDecl()); // Create a scope in the symbol table to hold variable declarations. SymTableScopeTy varScope(symbolTable); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 5f128010d70f..1833ef32cc28 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -291,7 +291,6 @@ class CIRGenFunction { /// delcs. DeclMapTy LocalDeclMap; - /// Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(clang::QualType T); @@ -498,6 +497,9 @@ class CIRGenFunction { mlir::Type condType, mlir::cir::CaseAttr &caseEntry); + mlir::FuncOp generateCode(clang::GlobalDecl GD, + const CIRGenFunctionInfo &FnInfo); + struct AutoVarEmission { const clang::VarDecl *Variable; /// The address of the alloca for languages with explicit address space @@ -594,9 +596,6 @@ class CIRGenFunction { clang::QualType DstTy, clang::SourceLocation Loc); - // Emit a new function and add it to the MLIR module. - mlir::FuncOp buildFunction(const clang::FunctionDecl *FD); - /// Determine whether the given initializer is trivial in the sense /// that it requires no code to be generated. bool isTrivialInitializer(const clang::Expr *Init); diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 2eb17234c1de..8fef52e48427 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -277,6 +277,9 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, mlir::Operation *Op) { auto const *D = cast(GD.getDecl()); + // Compute the function info and CIR type. + const CIRGenFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); + // TODO: setFunctionLinkage // TODO: setGVProperties // TODO: MaubeHandleStaticInExternC @@ -285,8 +288,8 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, CIRGenFunction CGF{*this, builder}; CurCGF = &CGF; - auto fn = CGF.buildFunction(cast(GD.getDecl())); - theModule.push_back(fn); + auto Fn = CGF.generateCode(GD, FI); + theModule.push_back(Fn); CurCGF = nullptr; // TODO: setNonAliasAttributes From 3df38b20d8977846ae9f34a70cf8af97fa45dce8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 21:52:53 -0400 Subject: [PATCH 0326/2301] [CIR][NFC] Rename FnRetTy to FnRetCIRTy CodeGen uses the same variable name to track the QualType. So rename this type to avoid the conflict later. --- clang/lib/CIR/CIRGenFunction.cpp | 9 +++++---- clang/lib/CIR/CIRGenFunction.h | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 3dccb5f06678..1766d05393f0 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -240,8 +240,9 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { // TODO: insert actual scope cleanup HERE (dtors and etc) // If there's anything to return, load it first. - if (CGF.FnRetTy.has_value()) { - auto val = builder.create(retLoc, *CGF.FnRetTy, *CGF.FnRetAlloca); + if (CGF.FnRetCIRTy.has_value()) { + auto val = + builder.create(retLoc, *CGF.FnRetCIRTy, *CGF.FnRetAlloca); builder.create(retLoc, llvm::ArrayRef(val.getResult())); } else { builder.create(retLoc); @@ -308,7 +309,7 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, FnRetQualTy = FD->getReturnType(); mlir::TypeRange FnTyRange = {}; if (!FnRetQualTy->isVoidType()) { - FnRetTy = getCIRType(FnRetQualTy); + FnRetCIRTy = getCIRType(FnRetQualTy); } auto funcType = getTypes().GetFunctionType(GlobalDecl(FD)); @@ -355,7 +356,7 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, // When the current function is not void, create an address to store the // result value. - if (FnRetTy.has_value()) + if (FnRetCIRTy.has_value()) buildAndUpdateRetAlloca(FnRetQualTy, FnEndLoc, CGM.getNaturalTypeAlignment(FnRetQualTy)); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 1833ef32cc28..6e0fd3955f72 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -262,7 +262,7 @@ class CIRGenFunction { }; clang::QualType FnRetQualTy; - std::optional FnRetTy; + std::optional FnRetCIRTy; std::optional FnRetAlloca; // Holds the Decl for the current outermost non-closure context From 2c4c42c4ce0a835f28136dc6168e0e29672d7e06 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 21:58:26 -0400 Subject: [PATCH 0327/2301] [CIR] Add stubbed out HasThisReturn for CIRGenCXXABI This is only used for MSVC things, so we just rely on the default false implementation. --- clang/lib/CIR/CIRGenCXXABI.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index 5a5d6e30813a..7c99b13a863d 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -91,6 +91,14 @@ class CIRGenCXXABI { /// Gets the mangle context. clang::MangleContext &getMangleContext() { return *MangleCtx; } + /// Returns true if the given constructor or destructor is one of the kinds + /// that the ABI says returns 'this' (only applies when called non-virtually + /// for destructors). + /// + /// There currently is no way to indicate if a destructor returns 'this' when + /// called virtually, and CIR generation does not support this case. + virtual bool HasThisReturn(clang::GlobalDecl GD) const { return false; } + virtual ~CIRGenCXXABI(); }; From 9dff661b8087d82c7afcd310de59dbee2dbba90a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 21:59:09 -0400 Subject: [PATCH 0328/2301] [CIR] Add hasMostDerivedReturn for CIRGenCXXABI Again, another MSVC thing that defaults to fasle. --- clang/lib/CIR/CIRGenCXXABI.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index 7c99b13a863d..7b74b247e181 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -99,6 +99,10 @@ class CIRGenCXXABI { /// called virtually, and CIR generation does not support this case. virtual bool HasThisReturn(clang::GlobalDecl GD) const { return false; } + virtual bool hasMostDerivedReturn(clang::GlobalDecl GD) const { + return false; + } + virtual ~CIRGenCXXABI(); }; From 3d04645b2a32118cfed64ef200cde7c5bc08d17c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:01:24 -0400 Subject: [PATCH 0329/2301] [CIR] Add buildFunctionArgList for generating an arglist from a GlobalDecl Currently this just passes back what it started with from the GlobalDecl pretty straightforwardly. But these unreachables will be fleshed out as more C++ stuff is added. --- clang/lib/CIR/CIRGenFunction.cpp | 34 ++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 3 +++ 2 files changed, 37 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 1766d05393f0..abe3e4747ec0 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "CIRGenFunction.h" +#include "CIRGenCXXABI.h" #include "CIRGenModule.h" #include "clang/AST/ExprObjC.h" @@ -373,3 +374,36 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, return function; } + +clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl GD, + FunctionArgList &Args) { + const auto *FD = cast(GD.getDecl()); + QualType ResTy = FD->getReturnType(); + + const auto *MD = dyn_cast(FD); + if (MD && MD->isInstance()) { + llvm_unreachable("NYI"); + } + + // The base version of an inheriting constructor whose constructed base is a + // virtual base is not passed any arguments (because it doesn't actually + // call the inherited constructor). + bool PassedParams = true; + if (const auto *CD = dyn_cast(FD)) + llvm_unreachable("NYI"); + + if (PassedParams) { + for (auto *Param : FD->parameters()) { + Args.push_back(Param); + if (!Param->hasAttr()) + continue; + + llvm_unreachable("PassObjectSizeAttr NYI"); + } + } + + if (MD && (isa(MD) || isa(MD))) + llvm_unreachable("NYI"); + + return ResTy; +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 6e0fd3955f72..beb898db7471 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -500,6 +500,9 @@ class CIRGenFunction { mlir::FuncOp generateCode(clang::GlobalDecl GD, const CIRGenFunctionInfo &FnInfo); + clang::QualType buildFunctionArgList(clang::GlobalDecl GD, + FunctionArgList &Args); + struct AutoVarEmission { const clang::VarDecl *Variable; /// The address of the alloca for languages with explicit address space From d6a411ada1958cc62f3fac4d8f355fecd91c8a60 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:07:39 -0400 Subject: [PATCH 0330/2301] [CIR] Get the FnOp from GetOrCreateCIRFn instead of creating inline Rely on the purpose specific GetOrCreateCIRFunction method to get a FuncOp instead of the simpler creation in generateCode --- clang/lib/CIR/CIRGenFunction.cpp | 16 +++++----------- clang/lib/CIR/CIRGenFunction.h | 2 +- clang/lib/CIR/CIRGenModule.cpp | 23 ++++++++++++++++++----- 3 files changed, 24 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index abe3e4747ec0..02a0ae205e9f 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -297,7 +297,7 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { insertCleanupAndLeave(currBlock); } -mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, +mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp fn, const CIRGenFunctionInfo &FnInfo) { auto *FD = cast(GD.getDecl()); // Create a scope in the symbol table to hold variable declarations. @@ -305,22 +305,16 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, const CXXMethodDecl *MD = dyn_cast(FD); assert(!MD && "methods not implemented"); - auto fnLoc = getLoc(FD->getSourceRange()); FnRetQualTy = FD->getReturnType(); mlir::TypeRange FnTyRange = {}; if (!FnRetQualTy->isVoidType()) { FnRetCIRTy = getCIRType(FnRetQualTy); } - auto funcType = getTypes().GetFunctionType(GlobalDecl(FD)); - - mlir::FuncOp function = mlir::FuncOp::create(fnLoc, FD->getName(), funcType); - if (!function) - return nullptr; // In MLIR the entry block of the function is special: it must have the // same argument list as the function itself. - mlir::Block *entryBlock = function.addEntryBlock(); + mlir::Block *entryBlock = fn.addEntryBlock(); // Set the insertion point in the builder to the beginning of the // function body, it will be used throughout the codegen to create @@ -363,16 +357,16 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, // Emit the body of the function. if (mlir::failed(buildFunctionBody(FD->getBody()))) { - function.erase(); + fn.erase(); return nullptr; } assert(builder.getInsertionBlock() && "Should be valid"); } - if (mlir::failed(function.verifyBody())) + if (mlir::failed(fn.verifyBody())) return nullptr; - return function; + return fn; } clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl GD, diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index beb898db7471..6c95091823ef 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -497,7 +497,7 @@ class CIRGenFunction { mlir::Type condType, mlir::cir::CaseAttr &caseEntry); - mlir::FuncOp generateCode(clang::GlobalDecl GD, + mlir::FuncOp generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo); clang::QualType buildFunctionArgList(clang::GlobalDecl GD, diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 8fef52e48427..9946d7f0e5a8 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -279,6 +279,19 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, // Compute the function info and CIR type. const CIRGenFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); + mlir::FunctionType Ty = getTypes().GetFunctionType(FI); + + // Get or create the prototype for the function. + // if (!V || (V.getValueType() != Ty)) + // TODO: Figure out what to do here? llvm uses a GlobalValue for the FuncOp in + // mlir + Op = GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/true, + ForDefinition); + + auto Fn = cast(Op); + // Already emitted. + if (!Fn.isDeclaration()) + return; // TODO: setFunctionLinkage // TODO: setGVProperties @@ -288,8 +301,7 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, CIRGenFunction CGF{*this, builder}; CurCGF = &CGF; - auto Fn = CGF.generateCode(GD, FI); - theModule.push_back(Fn); + CGF.generateCode(GD, Fn, FI); CurCGF = nullptr; // TODO: setNonAliasAttributes @@ -461,9 +473,9 @@ void CIRGenModule::setDSOLocal(mlir::Operation *Op) const { // TODO: Op->setDSOLocal } -/// GetOrCreateCIRFunction - If the specified mangled name is not in the -/// module, create and return a CIR Function with the specified type. If there -/// is something in the module with the specified name, return it potentially +/// GetOrCreateCIRFunction - If the specified mangled name is not in the module, +/// create and return a CIR Function with the specified type. If there is +/// something in the module with the specified name, return it potentially /// bitcasted to the right type. /// /// If D is non-null, it specifies a decl that corresponded to this. This is @@ -523,6 +535,7 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // TODO: CodeGen includeds the linkage (ExternalLinkage) and only passes the // mangledname if Entry is nullptr mlir::FuncOp F = mlir::FuncOp::create(fnLoc, MangledName, FTy); + theModule.push_back(F); if (Entry) { llvm_unreachable("NYI"); From a7e6917e9b81bcce33c81dff11324573f9b90e19 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:10:17 -0400 Subject: [PATCH 0331/2301] [CIR] Tentatively call buildStmt from buildFnBody if non-compound Some case down the road includes a non-compound statement here. Support it here. --- clang/lib/CIR/CIRGenFunction.cpp | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 02a0ae205e9f..6ee0ee096077 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -95,15 +95,6 @@ mlir::Type CIRGenFunction::convertType(QualType T) { return CGM.getTypes().ConvertType(T); } -mlir::LogicalResult CIRGenFunction::buildFunctionBody(const Stmt *Body) { - const CompoundStmt *S = dyn_cast(Body); - assert(S && "expected compound stmt"); - - // We start with function level scope for variables. - SymTableScopeTy varScope(symbolTable); - return buildCompoundStmtWithoutScope(*S); -} - mlir::Location CIRGenFunction::getLoc(SourceLocation SLoc) { const SourceManager &SM = getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(SLoc); @@ -369,6 +360,25 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp fn, return fn; } +mlir::LogicalResult CIRGenFunction::buildFunctionBody(const clang::Stmt *Body) { + // TODO: incrementProfileCounter(Body); + + // We start with function level scope for variables. + SymTableScopeTy varScope(symbolTable); + + auto result = mlir::LogicalResult::success(); + if (const CompoundStmt *S = dyn_cast(Body)) + result = buildCompoundStmtWithoutScope(*S); + else + result = buildStmt(Body, /*useCurrentScope*/ true); + + // This is checked after emitting the function body so we know if there are + // any permitted infinite loops. + // TODO: if (checkIfFunctionMustProgress()) + // CurFn->addFnAttr(llvm::Attribute::MustProgress); + return result; +} + clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl GD, FunctionArgList &Args) { const auto *FD = cast(GD.getDecl()); From 4c969480f47d0a05cb17901cd5332ce62e8442e0 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:12:36 -0400 Subject: [PATCH 0332/2301] [CIR] Add a stubbed out ShouldInstrumentFunction helper method --- clang/lib/CIR/CIRGenFunction.cpp | 11 +++++++++++ clang/lib/CIR/CIRGenFunction.h | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 6ee0ee096077..c67953f5df73 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -360,6 +360,17 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp fn, return fn; } +/// ShouldInstrumentFunction - Return true if the current function should be +/// instrumented with __cyg_profile_func_* calls +bool CIRGenFunction::ShouldInstrumentFunction() { + if (!CGM.getCodeGenOpts().InstrumentFunctions && + !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && + !CGM.getCodeGenOpts().InstrumentFunctionEntryBare) + return false; + + llvm_unreachable("NYI"); +} + mlir::LogicalResult CIRGenFunction::buildFunctionBody(const clang::Stmt *Body) { // TODO: incrementProfileCounter(Body); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 6c95091823ef..588cc4509bf2 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -605,6 +605,10 @@ class CIRGenFunction { // TODO: this can also be abstrated into common AST helpers bool hasBooleanRepresentation(clang::QualType Ty); + + /// ShouldInstrumentFunction - Return true if the current function should be + /// instrumented with __cyg_profile_func_* calls + bool ShouldInstrumentFunction(); }; } // namespace cir From 1d8ebe7d41cda08c8775d4cc453bd038a529b999 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:13:14 -0400 Subject: [PATCH 0333/2301] [CIR] Add a helper method to lookup from LocalDeclMap --- clang/lib/CIR/CIRGenFunction.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 588cc4509bf2..b3990148d5dd 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -606,6 +606,14 @@ class CIRGenFunction { // TODO: this can also be abstrated into common AST helpers bool hasBooleanRepresentation(clang::QualType Ty); + /// GetAddrOfLocalVar - Return the address of a local variable. + Address GetAddrOfLocalVar(const clang::VarDecl *VD) { + auto it = LocalDeclMap.find(VD); + assert(it != LocalDeclMap.end() && + "Invalid argument to GetAddrOfLocalVar(), no decl!"); + return it->second; + } + /// ShouldInstrumentFunction - Return true if the current function should be /// instrumented with __cyg_profile_func_* calls bool ShouldInstrumentFunction(); From c8d65cb6a0b9f0fceb27d9b675e7f7e4fa176593 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:16:43 -0400 Subject: [PATCH 0334/2301] [CIR] Add some assertions for generateCode --- clang/lib/CIR/CIRGenFunction.cpp | 47 ++++++++++++++++++++++++++++---- 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index c67953f5df73..97cce82547cf 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -288,9 +288,44 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { insertCleanupAndLeave(currBlock); } -mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp fn, +mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo) { - auto *FD = cast(GD.getDecl()); + assert(Fn && "generating code for a null function"); + const auto FD = cast(GD.getDecl()); + if (FD->isInlineBuiltinDeclaration()) { + llvm_unreachable("NYI"); + } else { + // Detect the unusual situation where an inline version is shadowed by a + // non-inline version. In that case we should pick the external one + // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way + // to detect that situation before we reach codegen, so do some late + // replacement. + for (const auto *PD = FD->getPreviousDecl(); PD; + PD = PD->getPreviousDecl()) { + if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) { + llvm_unreachable("NYI"); + } + } + } + + // Check if we should generate debug info for this function. + if (FD->hasAttr()) { + llvm_unreachable("NYI"); + } + + // If this is a function specialization then use the pattern body as the + // location for the function. + if (const auto *SpecDecl = FD->getTemplateInstantiationPattern()) + llvm_unreachable("NYI"); + + Stmt *Body = FD->getBody(); + + if (Body) { + // Coroutines always emit lifetime markers + if (isa(Body)) + llvm_unreachable("Coroutines NYI"); + } + // Create a scope in the symbol table to hold variable declarations. SymTableScopeTy varScope(symbolTable); @@ -305,7 +340,7 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp fn, // In MLIR the entry block of the function is special: it must have the // same argument list as the function itself. - mlir::Block *entryBlock = fn.addEntryBlock(); + mlir::Block *entryBlock = Fn.addEntryBlock(); // Set the insertion point in the builder to the beginning of the // function body, it will be used throughout the codegen to create @@ -348,16 +383,16 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp fn, // Emit the body of the function. if (mlir::failed(buildFunctionBody(FD->getBody()))) { - fn.erase(); + Fn.erase(); return nullptr; } assert(builder.getInsertionBlock() && "Should be valid"); } - if (mlir::failed(fn.verifyBody())) + if (mlir::failed(Fn.verifyBody())) return nullptr; - return fn; + return Fn; } /// ShouldInstrumentFunction - Return true if the current function should be From e14a9ec6e0c7d6cd441e9b3474a0121e82d8e552 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:23:33 -0400 Subject: [PATCH 0335/2301] [CIR] Track the current GlobalDecl in CIRGenFunction --- clang/lib/CIR/CIRGenFunction.cpp | 2 ++ clang/lib/CIR/CIRGenFunction.h | 3 +++ 2 files changed, 5 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 97cce82547cf..d7ddc873045b 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -292,6 +292,8 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo) { assert(Fn && "generating code for a null function"); const auto FD = cast(GD.getDecl()); + CurGD = GD; + if (FD->isInlineBuiltinDeclaration()) { llvm_unreachable("NYI"); } else { diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index b3990148d5dd..ecb58de72505 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -261,6 +261,9 @@ class CIRGenFunction { ForceRightToLeft }; + /// CurGD - The GlobalDecl for the current function being compiled. + clang::GlobalDecl CurGD; + clang::QualType FnRetQualTy; std::optional FnRetCIRTy; std::optional FnRetAlloca; From 8e3284e0662f30598b33c11440704c142aae0f3a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:24:15 -0400 Subject: [PATCH 0336/2301] [CIR] Set the funtion return QualType in generateCode --- clang/lib/CIR/CIRGenFunction.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index d7ddc873045b..d3072e107ad6 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -294,6 +294,7 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, const auto FD = cast(GD.getDecl()); CurGD = GD; + FnRetQualTy = FD->getReturnType(); if (FD->isInlineBuiltinDeclaration()) { llvm_unreachable("NYI"); } else { From 4c5ca6a310885a145b8d43bf3376fb5bf19fbcf0 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:32:34 -0400 Subject: [PATCH 0337/2301] [CIR] Consider the alternatives other than a Fn for dispatch in generateCode Assert on all the other cases, but consider if we're looking at a constrcutor, destructor, etc. --- clang/lib/CIR/CIRGenFunction.cpp | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index d3072e107ad6..4e2b4a6d22dc 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -384,11 +384,30 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, buildAndUpdateRetAlloca(FnRetQualTy, FnEndLoc, CGM.getNaturalTypeAlignment(FnRetQualTy)); - // Emit the body of the function. - if (mlir::failed(buildFunctionBody(FD->getBody()))) { - Fn.erase(); - return nullptr; - } + // Generate the body of the function. + // TODO: PGO.assignRegionCounters + if (isa(FD)) + llvm_unreachable("NYI"); + else if (isa(FD)) + llvm_unreachable("NYI"); + else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice && + FD->hasAttr()) + llvm_unreachable("NYI"); + else if (isa(FD) && + cast(FD)->isLambdaStaticInvoker()) { + llvm_unreachable("NYI"); + } else if (FD->isDefaulted() && isa(FD) && + (cast(FD)->isCopyAssignmentOperator() || + cast(FD)->isMoveAssignmentOperator())) { + llvm_unreachable("NYI"); + } else if (Body) { + if (mlir::failed(buildFunctionBody(Body))) { + Fn.erase(); + return nullptr; + } + } else + llvm_unreachable("no definition for emitted function"); + assert(builder.getInsertionBlock() && "Should be valid"); } From e48ab735e6e894a48289681f4831a22b422ec995 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:34:55 -0400 Subject: [PATCH 0338/2301] [CIR][NFC] Add some TODOs for generateCode --- clang/lib/CIR/CIRGenFunction.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 4e2b4a6d22dc..cd4831f67b57 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -414,6 +414,13 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, if (mlir::failed(Fn.verifyBody())) return nullptr; + // Emit the standard function epilogue. + // TODO: finishFunction(BodyRange.getEnd()); + + // If we haven't marked the function nothrow through other means, do a quick + // pass now to see if we can. + // TODO: if (!CurFn->doesNotThrow()) TryMarkNoThrow(CurFn); + return Fn; } From cd47b62b8dd8a051c08bc1f1c4f88c51fec2c75a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:35:33 -0400 Subject: [PATCH 0339/2301] [CIR] Move some code within the nested scope in generateCode This isn't meaningful here, but later code moves some of this logic into "StartFunction" which handles the prologue and other boilerplate, so move it here now for simplicity. --- clang/lib/CIR/CIRGenFunction.cpp | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index cd4831f67b57..82efb650b074 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -341,19 +341,20 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, FnRetCIRTy = getCIRType(FnRetQualTy); } - // In MLIR the entry block of the function is special: it must have the - // same argument list as the function itself. - mlir::Block *entryBlock = Fn.addEntryBlock(); - - // Set the insertion point in the builder to the beginning of the - // function body, it will be used throughout the codegen to create - // operations in this function. - builder.setInsertionPointToStart(entryBlock); - auto FnBeginLoc = getLoc(FD->getBody()->getEndLoc()); - auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); - - // Initialize lexical scope information. { + auto FnBeginLoc = getLoc(FD->getBody()->getEndLoc()); + auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); + + // In MLIR the entry block of the function is special: it must have the + // same argument list as the function itself. + mlir::Block *entryBlock = Fn.addEntryBlock(); + + // Set the insertion point in the builder to the beginning of the + // function body, it will be used throughout the codegen to create + // operations in this function. + builder.setInsertionPointToStart(entryBlock); + + // Initialize lexical scope information. LexicalScopeContext lexScope{FnBeginLoc, FnEndLoc, builder.getInsertionBlock()}; LexicalScopeGuard scopeGuard{*this, &lexScope}; From b02d4d938664b8bfd649321032601904081bf4a4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:36:22 -0400 Subject: [PATCH 0340/2301] [CIR] Assert we aren't looking at a coroutine in generateCode --- clang/lib/CIR/CIRGenFunction.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 82efb650b074..d875325807ce 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -385,6 +385,10 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, buildAndUpdateRetAlloca(FnRetQualTy, FnEndLoc, CGM.getNaturalTypeAlignment(FnRetQualTy)); + // Save parameters for coroutine function. + if (Body && isa_and_nonnull(Body)) + llvm_unreachable("Coroutines NYI"); + // Generate the body of the function. // TODO: PGO.assignRegionCounters if (isa(FD)) From 4c0862dc4c849dbf8791963ec365ca3e8b267765 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:37:42 -0400 Subject: [PATCH 0341/2301] [CIR][NFC] Add an assert that the Fn isn't being generated twice --- clang/lib/CIR/CIRGenFunction.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index d875325807ce..7eda2d3f89c0 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -345,6 +345,7 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, auto FnBeginLoc = getLoc(FD->getBody()->getEndLoc()); auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); + assert(Fn.isDeclaration() && "Function already has body?"); // In MLIR the entry block of the function is special: it must have the // same argument list as the function itself. mlir::Block *entryBlock = Fn.addEntryBlock(); From ae3c0666ec01b70e7dd0e44ac2848dd3e4ea165d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:38:40 -0400 Subject: [PATCH 0342/2301] [CIR] Add a helper fn to check if we shoudl be doing TypeChecks (in the future) --- clang/lib/CIR/CIRGenFunction.cpp | 7 +++++++ clang/lib/CIR/CIRGenFunction.h | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 7eda2d3f89c0..94ed4c0affca 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -150,6 +150,13 @@ bool CIRGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { return false; } +bool CIRGenFunction::sanitizePerformTypeCheck() const { + return SanOpts.has(SanitizerKind::Null) || + SanOpts.has(SanitizerKind::Alignment) || + SanOpts.has(SanitizerKind::ObjectSize) || + SanOpts.has(SanitizerKind::Vptr); +} + /// If the specified expression does not fold /// to a constant, or if it does but contains a label, return false. If it /// constant folds return true and set the folded value. diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index ecb58de72505..75a2e02ed9e8 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -329,6 +329,10 @@ class CIRGenFunction { LocalDeclMap.insert({VD, Addr}); } + /// Whether any type-checking sanitizers are enabled. If \c false, calls to + /// buildTypeCheck can be skipped. + bool sanitizePerformTypeCheck() const; + // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. struct PrototypeWrapper { From f898c4069b046bdf1e66aa4c685043828cf29367 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:42:54 -0400 Subject: [PATCH 0343/2301] [CIR] Dispatch to buildAggExpr for TEK_Aggregate in buildExprAsInit As the title says, just add a fn to build an aggregate expression. Currently stubbed out. --- clang/lib/CIR/CIRGenDecl.cpp | 14 +++++++++++++- clang/lib/CIR/CIRGenExprAgg.cpp | 32 ++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 2 ++ 3 files changed, 47 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenDecl.cpp b/clang/lib/CIR/CIRGenDecl.cpp index 47a9c89a76ae..d518d423ce62 100644 --- a/clang/lib/CIR/CIRGenDecl.cpp +++ b/clang/lib/CIR/CIRGenDecl.cpp @@ -234,7 +234,19 @@ void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, return; } case TEK_Aggregate: - assert(0 && "not implemented"); + assert(!type->isAtomicType() && "NYI"); + AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap; + if (isa(D)) + Overlap = AggValueSlot::DoesNotOverlap; + else if (auto *FD = dyn_cast(D)) + assert(false && "Field decl NYI"); + else + assert(false && "Only VarDecl implemented so far"); + // TODO: how can we delay here if D is captured by its initializer? + buildAggExpr(init, + AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, Overlap)); return; } llvm_unreachable("bad evaluation kind"); diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index e69de29bb2d1..f3a42928f973 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -0,0 +1,32 @@ +//===--- CIRGenExprAgg.cpp - Emit CIR Code from Aggregate Expressions -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Aggregate Expr nodes as CIR code. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" +using namespace cir; +using namespace clang; +void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { + assert(E && CIRGenFunction::hasAggregateEvaluationKind(E->getType()) && + "Invalid aggregate expression to emit"); + assert((Slot.getAddress().isValid() || Slot.isIgnored()) && + "slot has bits but no address"); + + // TODO: assert(false && "Figure out how to assert we're in c++"); + if (const RecordType *RT = CGM.getASTContext() + .getBaseElementType(E->getType()) + ->getAs()) { + auto *RD = cast(RT->getDecl()); + assert(RD->hasUserDeclaredConstructor() && + "default constructors aren't expected here YET"); + } + + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 75a2e02ed9e8..281c38b16bea 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -333,6 +333,8 @@ class CIRGenFunction { /// buildTypeCheck can be skipped. bool sanitizePerformTypeCheck() const; + void buildAggExpr(const clang::Expr *E, AggValueSlot Slot); + // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. struct PrototypeWrapper { From 78c1e82f8eaaa7da1c23f41d579398d052848023 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:44:53 -0400 Subject: [PATCH 0344/2301] [CIR] Set the FnRetCIRTy in generateCode if the FnRetQualTy isn't void --- clang/lib/CIR/CIRGenFunction.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 94ed4c0affca..df1a5a21d619 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -302,6 +302,9 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, CurGD = GD; FnRetQualTy = FD->getReturnType(); + if (!FnRetQualTy->isVoidType()) + FnRetCIRTy = getCIRType(FnRetQualTy); + if (FD->isInlineBuiltinDeclaration()) { llvm_unreachable("NYI"); } else { From ec8a161cbc950c2b2b2acfb4123ad6f065ff9e2b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:46:45 -0400 Subject: [PATCH 0345/2301] [CIR] Add a visitor that will handle aggregate expressions Currently it only has a stubbed out fn for CXXConstructorExprs --- clang/lib/CIR/CIRGenExprAgg.cpp | 43 ++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index f3a42928f973..92f769ef3e84 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -11,8 +11,49 @@ //===----------------------------------------------------------------------===// #include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "CIRGenTypes.h" +#include "CIRGenValue.h" + +#include "clang/AST/StmtVisitor.h" + using namespace cir; using namespace clang; + +namespace { +class AggExprEmitter : public StmtVisitor { + CIRGenFunction &CGF; + AggValueSlot Dest; + // bool IsResultUnused; + + AggValueSlot EnsureSlot(QualType T) { + assert(!Dest.isIgnored() && "ignored slots NYI"); + return Dest; + } + +public: + AggExprEmitter(CIRGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused) + : CGF{cgf}, Dest(Dest) + // ,IsResultUnused(IsResultUnused) + {} + + void Visit(Expr *E) { + // TODO: CodeGen does ApplyDebugLocation here + assert(cast(E) && "Only CXXConstructExpr implemented"); + StmtVisitor::Visit(E); + } + + void VisitCXXConstructExpr(const CXXConstructExpr *E); +}; +} // namespace + +void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { + AggValueSlot Slot = EnsureSlot(E->getType()); + llvm_unreachable("NYI"); + (void)CGF; + (void)Slot; +} + void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { assert(E && CIRGenFunction::hasAggregateEvaluationKind(E->getType()) && "Invalid aggregate expression to emit"); @@ -28,5 +69,5 @@ void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { "default constructors aren't expected here YET"); } - llvm_unreachable("NYI"); + AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast(E)); } From efdd1d980642adc22a199c766c56c77fa25fbccf Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:48:11 -0400 Subject: [PATCH 0346/2301] [CIR] Add a stubbed out fn for building type checks --- clang/lib/CIR/CIRGenFunction.cpp | 12 ++++++++++++ clang/lib/CIR/CIRGenFunction.h | 28 ++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index df1a5a21d619..e0fb3acee4cb 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -157,6 +157,18 @@ bool CIRGenFunction::sanitizePerformTypeCheck() const { SanOpts.has(SanitizerKind::Vptr); } +void CIRGenFunction::buildTypeCheck(TypeCheckKind TCK, + clang::SourceLocation Loc, mlir::Value V, + clang::QualType Type, + clang::CharUnits Alignment, + clang::SanitizerSet SkippedChecks, + std::optional ArraySize) { + if (!sanitizePerformTypeCheck()) + return; + + assert(false && "type check NYI"); +} + /// If the specified expression does not fold /// to a constant, or if it does but contains a label, return false. If it /// constant folds return true and set the folded value. diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 281c38b16bea..de4499e04dc2 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -261,6 +261,28 @@ class CIRGenFunction { ForceRightToLeft }; + /// Situations in which we might emit a check for the suitability of a pointer + /// or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in + /// compiler-rt. + enum TypeCheckKind { + /// Checking hte operand of a load. Must be suitably sized and aligned. + TCK_Load, + /// Checking the destination of a store. Must be suitably sized and aligned. + TCK_Store, + /// Checking the bound value in a reference binding. Must be suitably sized + /// and aligned, but is not required to refer to an object (until the + /// reference is used), per core issue 453. + TCK_ReferenceBinding, + /// Checking the object expression in a non-static data member access. Must + /// be an object within its lifetime. + TCK_MemberAccess, + /// Checking the 'this' pointer for a call to a non-static member function. + /// Must be an object within its lifetime. + TCK_MemberCall, + /// Checking the 'this' pointer for a constructor call. + TCK_ConstructorCall, + }; + /// CurGD - The GlobalDecl for the current function being compiled. clang::GlobalDecl CurGD; @@ -333,6 +355,12 @@ class CIRGenFunction { /// buildTypeCheck can be skipped. bool sanitizePerformTypeCheck() const; + void buildTypeCheck(TypeCheckKind TCK, clang::SourceLocation Loc, + mlir::Value V, clang::QualType Type, + clang::CharUnits Alignment = clang::CharUnits::Zero(), + clang::SanitizerSet SkippedChecks = clang::SanitizerSet(), + std::optional ArraySize = std::nullopt); + void buildAggExpr(const clang::Expr *E, AggValueSlot Slot); // Wrapper for function prototype sources. Wraps either a FunctionProtoType or From 6414c23fb9fbd6d096c0cd40cf51eb9649b85135 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:49:33 -0400 Subject: [PATCH 0347/2301] [CIR] Add a member var to assert against for lifetime markers --- clang/lib/CIR/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CIRGenFunction.h | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index e0fb3acee4cb..e9f1f6eb3631 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -26,7 +26,7 @@ using namespace mlir::cir; CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, mlir::OpBuilder &builder) : CGM{CGM}, builder(builder), CurFuncDecl(nullptr), - SanOpts(CGM.getLangOpts().Sanitize) {} + SanOpts(CGM.getLangOpts().Sanitize), ShouldEmitLifetimeMarkers(false) {} clang::ASTContext &CIRGenFunction::getContext() const { return CGM.getASTContext(); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index de4499e04dc2..3a52286dc85b 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -311,6 +311,10 @@ class CIRGenFunction { /// dropped. using SymTableTy = llvm::ScopedHashTable; SymTableTy symbolTable; + /// True if we need to emit the life-time markers. This is initially set in + /// the constructor, but could be overwrriten to true if this is a coroutine. + bool ShouldEmitLifetimeMarkers; + using DeclMapTy = llvm::DenseMap; /// LocalDeclMap - This keeps track of the CIR allocas or globals for local C /// delcs. From ae21305c228f4260f9a42ee2befa3c551b22f5e1 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 22:56:28 -0400 Subject: [PATCH 0348/2301] [CIR] Move arg gen into new fn StartFunction and liberally guard the path This diff does three main things: * Move argument cirgen into StartFunction. * Assert against a million different things in the StartFunction fn. * Set a bunch of CIRGenFunction state specific to the function being generated --- clang/lib/CIR/CIRGenFunction.cpp | 314 ++++++++++++++++++++++++++----- clang/lib/CIR/CIRGenFunction.h | 18 ++ 2 files changed, 288 insertions(+), 44 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index e9f1f6eb3631..28062871ec52 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -317,6 +317,9 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, if (!FnRetQualTy->isVoidType()) FnRetCIRTy = getCIRType(FnRetQualTy); + FunctionArgList Args; + QualType ResTy = buildFunctionArgList(GD, Args); + if (FD->isInlineBuiltinDeclaration()) { llvm_unreachable("NYI"); } else { @@ -338,6 +341,22 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, llvm_unreachable("NYI"); } + // The function might not have a body if we're generating thunks for a + // function declaration. + SourceRange BodyRange; + if (Stmt *Body = FD->getBody()) + BodyRange = Body->getSourceRange(); + else + BodyRange = FD->getLocation(); + // TODO: CurEHLocation + + // Use the location of the start of the function to determine where the + // function definition is located. By default we use the location of the + // declaration as the location for the subprogram. A function may lack a + // declaration in the source code if it is created by code gen. (examples: + // _GLOBAL__I_a, __cxx_global_array_dtor, thunk). + SourceLocation Loc = FD->getLocation(); + // If this is a function specialization then use the pattern body as the // location for the function. if (const auto *SpecDecl = FD->getTemplateInstantiationPattern()) @@ -354,59 +373,21 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, // Create a scope in the symbol table to hold variable declarations. SymTableScopeTy varScope(symbolTable); - const CXXMethodDecl *MD = dyn_cast(FD); - assert(!MD && "methods not implemented"); - - FnRetQualTy = FD->getReturnType(); - mlir::TypeRange FnTyRange = {}; - if (!FnRetQualTy->isVoidType()) { - FnRetCIRTy = getCIRType(FnRetQualTy); - } - { auto FnBeginLoc = getLoc(FD->getBody()->getEndLoc()); auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); assert(Fn.isDeclaration() && "Function already has body?"); - // In MLIR the entry block of the function is special: it must have the - // same argument list as the function itself. - mlir::Block *entryBlock = Fn.addEntryBlock(); + mlir::Block *EntryBB = Fn.addEntryBlock(); + builder.setInsertionPointToStart(EntryBB); - // Set the insertion point in the builder to the beginning of the - // function body, it will be used throughout the codegen to create - // operations in this function. - builder.setInsertionPointToStart(entryBlock); - - // Initialize lexical scope information. - LexicalScopeContext lexScope{FnBeginLoc, FnEndLoc, - builder.getInsertionBlock()}; + LexicalScopeContext lexScope{FnBeginLoc, FnEndLoc, EntryBB}; LexicalScopeGuard scopeGuard{*this, &lexScope}; - // Declare all the function arguments in the symbol table. - for (const auto nameValue : - llvm::zip(FD->parameters(), entryBlock->getArguments())) { - auto *paramVar = std::get<0>(nameValue); - auto paramVal = std::get<1>(nameValue); - auto alignment = getContext().getDeclAlign(paramVar); - auto paramLoc = getLoc(paramVar->getSourceRange()); - paramVal.setLoc(paramLoc); - - mlir::Value addr; - if (failed(declare(paramVar, paramVar->getType(), paramLoc, alignment, - addr, true /*param*/))) - return nullptr; - // Location of the store to the param storage tracked as beginning of - // the function body. - auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); - builder.create(fnBodyBegin, paramVal, addr); - } - assert(builder.getInsertionBlock() && "Should be valid"); + // Emit the standard function prologue. + StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); - // When the current function is not void, create an address to store the - // result value. - if (FnRetCIRTy.has_value()) - buildAndUpdateRetAlloca(FnRetQualTy, FnEndLoc, - CGM.getNaturalTypeAlignment(FnRetQualTy)); + // Initialize lexical scope information. // Save parameters for coroutine function. if (Body && isa_and_nonnull(Body)) @@ -452,6 +433,251 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, return Fn; } +void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, + mlir::FuncOp Fn, + const CIRGenFunctionInfo &FnInfo, + const FunctionArgList &Args, + SourceLocation Loc, + SourceLocation StartLoc) { + assert(!CurFn && + "Do not use a CIRGenFunction object for more than one function"); + + const auto *D = GD.getDecl(); + + DidCallStackSave = false; + CurCodeDecl = D; + const auto *FD = dyn_cast_or_null(D); + if (FD && FD->usesSEHTry()) + llvm_unreachable("NYI"); + CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); + FnRetTy = RetTy; + CurFn = Fn; + CurFnInfo = &FnInfo; + + // If this function is ignored for any of the enabled sanitizers, disable + // the sanitizer for the function. + do { +#define SANITIZER(NAME, ID) \ + if (SanOpts.empty()) \ + break; \ + if (SanOpts.has(SanitizerKind::ID)) \ + if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \ + SanOpts.set(SanitizerKind::ID, false); + +#include "clang/Basic/Sanitizers.def" +#undef SANITIZER + } while (0); + + if (D) { + bool NoSanitizeCoverage = false; + (void)NoSanitizeCoverage; + + for (auto Attr : D->specific_attrs()) { + (void)Attr; + llvm_unreachable("NYI"); + } + + // SanitizeCoverage is not handled by SanOpts + if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage()) + llvm_unreachable("NYI"); + } + + // Apply sanitizer attributes to the function. + if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress | + SanitizerKind::HWAddress | + SanitizerKind::KernelHWAddress | SanitizerKind::MemTag | + SanitizerKind::Thread | SanitizerKind::Memory | + SanitizerKind::KernelMemory | SanitizerKind::SafeStack | + SanitizerKind::ShadowCallStack | SanitizerKind::Fuzzer | + SanitizerKind::FuzzerNoLink | + SanitizerKind::CFIUnrelatedCast | SanitizerKind::Null)) + llvm_unreachable("NYI"); + + // TODO: XRay + // TODO: PGO + + unsigned Count, Offset; + if (const auto *Attr = + D ? D->getAttr() : nullptr) { + llvm_unreachable("NYI"); + } else { + Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount; + Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset; + } + if (Count && Offset <= Count) { + llvm_unreachable("NYI"); + } + + // Add no-jump-tables value. + if (CGM.getCodeGenOpts().NoUseJumpTables) + llvm_unreachable("NYI"); + + // Add no-inline-line-tables value. + if (CGM.getCodeGenOpts().NoInlineLineTables) + llvm_unreachable("NYI"); + + // Add profile-sample-accurate value. + if (CGM.getCodeGenOpts().ProfileSampleAccurate) + llvm_unreachable("NYI"); + + if (!CGM.getCodeGenOpts().SampleProfileFile.empty()) + llvm_unreachable("NYI"); + + if (D && D->hasAttr()) + llvm_unreachable("NYI"); + + if (D && D->hasAttr()) + llvm_unreachable("NYI"); + + if (FD && getLangOpts().OpenCL) { + llvm_unreachable("NYI"); + } + + // If we are checking function types, emit a function type signature as + // prologue data. + if (FD && getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { + llvm_unreachable("NYI"); + } + + // If we're checking nullability, we need to know whether we can check the + // return value. Initialize the falg to 'true' and refine it in + // buildParmDecl. + if (SanOpts.has(SanitizerKind::NullabilityReturn)) { + llvm_unreachable("NYI"); + } + + // If we're in C++ mode and the function name is "main", it is guaranteed to + // be norecurse by the standard (3.6.1.3 "The function main shall not be + // used within a program"). + // + // OpenCL C 2.0 v2.2-11 s6.9.i: + // Recursion is not supported. + // + // SYCL v1.2.1 s3.10: + // kernels cannot include RTTI information, exception cases, recursive + // code, virtual functions or make use of C++ libraries that are not + // compiled for the device. + if (FD && + ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL || + getLangOpts().SYCLIsDevice | + (getLangOpts().CUDA && FD->hasAttr()))) + ; // TODO: support norecurse attr + + // TODO: rounding mode and strict floating point + + // TODO: stackrealign attr + + mlir::Block *EntryBB = &Fn.getBlocks().front(); + + // TODO: allocapt insertion? probably don't need for CIR + + // TODO: return value checking + + if (getDebugInfo()) { + llvm_unreachable("NYI"); + } + + if (ShouldInstrumentFunction()) { + llvm_unreachable("NYI"); + } + + // Since emitting the mcount call here impacts optimizations such as + // function inlining, we just add an attribute to insert a mcount call in + // backend. The attribute "counting-function" is set to mcount function name + // which is architecture dependent. + if (CGM.getCodeGenOpts().InstrumentForProfiling) { + llvm_unreachable("NYI"); + } + + if (CGM.getCodeGenOpts().PackedStack) { + llvm_unreachable("NYI"); + } + + if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX) { + llvm_unreachable("NYI"); + } + + // TODO: emitstartehspec + + // TODO: prologuecleanupdepth + + if (getLangOpts().OpenMP && CurCodeDecl) + llvm_unreachable("NYI"); + + // TODO: buildFunctionProlog + + { + // Set the insertion point in the builder to the beginning of the + // function body, it will be used throughout the codegen to create + // operations in this function. + builder.setInsertionPointToStart(EntryBB); + + // TODO: this should live in `buildFunctionProlog + // Declare all the function arguments in the symbol table. + for (const auto nameValue : llvm::zip(Args, EntryBB->getArguments())) { + auto *paramVar = std::get<0>(nameValue); + auto paramVal = std::get<1>(nameValue); + auto alignment = getContext().getDeclAlign(paramVar); + auto paramLoc = getLoc(paramVar->getSourceRange()); + paramVal.setLoc(paramLoc); + + mlir::Value addr; + if (failed(declare(paramVar, paramVar->getType(), paramLoc, alignment, + addr, true /*param*/))) + return; + + auto address = Address(addr, alignment); + setAddrOfLocalVar(paramVar, address); + + // Location of the store to the param storage tracked as beginning of + // the function body. + auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); + builder.create(fnBodyBegin, paramVal, addr); + } + assert(builder.getInsertionBlock() && "Should be valid"); + + auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); + + // When the current function is not void, create an address to store the + // result value. + if (FnRetCIRTy.has_value()) + buildAndUpdateRetAlloca(FnRetQualTy, FnEndLoc, + CGM.getNaturalTypeAlignment(FnRetQualTy)); + } + + if (D && isa(D) && cast(D)->isInstance()) { + llvm_unreachable("NYI"); + } + + // If any of the arguments have a variably modified type, make sure to emit + // the type size. + for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; + ++i) { + const VarDecl *VD = *i; + + // Dig out the type as written from ParmVarDecls; it's unclear whether the + // standard (C99 6.9.1p10) requires this, but we're following the + // precedent set by gcc. + QualType Ty; + if (const auto *PVD = dyn_cast(VD)) + Ty = PVD->getOriginalType(); + else + Ty = VD->getType(); + + if (Ty->isVariablyModifiedType()) + llvm_unreachable("NYI"); + } + // Emit a location at the end of the prologue. + if (getDebugInfo()) + llvm_unreachable("NYI"); + + // TODO: Do we need to handle this in two places like we do with + // target-features/target-cpu? + if (CurFuncDecl) + if (const auto *VecWidth = CurFuncDecl->getAttr()) + llvm_unreachable("NYI"); +} + /// ShouldInstrumentFunction - Return true if the current function should be /// instrumented with __cyg_profile_func_* calls bool CIRGenFunction::ShouldInstrumentFunction() { diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 3a52286dc85b..ca26382ef28d 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -292,6 +292,11 @@ class CIRGenFunction { // Holds the Decl for the current outermost non-closure context const clang::Decl *CurFuncDecl; + /// CurCodeDecl - This is the inner-most code context, which includes blocks. + const clang::Decl *CurCodeDecl; + const CIRGenFunctionInfo *CurFnInfo; + clang::QualType FnRetTy; + mlir::FuncOp CurFn = nullptr; // The CallExpr within the current statement that the musttail attribute // applies to. nullptr if there is no 'musttail' on the current statement. @@ -320,6 +325,11 @@ class CIRGenFunction { /// delcs. DeclMapTy LocalDeclMap; + /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid + /// calling llvm.stacksave for multiple VLAs in the same scope. + /// TODO: Translate to MLIR + bool DidCallStackSave = false; + /// Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(clang::QualType T); @@ -634,6 +644,14 @@ class CIRGenFunction { /// expression and compare the result against zero, returning an Int1Ty value. mlir::Value evaluateExprAsBool(const clang::Expr *E); + /// Emit code for the start of a function. + /// \param Loc The location to be associated with the function. + /// \param StartLoc The location of the function body. + void StartFunction(clang::GlobalDecl GD, clang::QualType RetTy, + mlir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, + const FunctionArgList &Args, clang::SourceLocation Loc, + clang::SourceLocation StartLoc); + /// Emit a conversion from the specified type to the specified destination /// type, both of which are CIR scalar types. mlir::Value buildScalarConversion(mlir::Value Src, clang::QualType SrcTy, From 8b38ddae307eace920023159f6d0d08db7a52473 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:00:14 -0400 Subject: [PATCH 0349/2301] [CIR] Assert we aren't seeing supposed to be emitting lifetime markers --- clang/lib/CIR/CIRGenFunction.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 28062871ec52..3734326b2bd0 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -368,6 +368,11 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, // Coroutines always emit lifetime markers if (isa(Body)) llvm_unreachable("Coroutines NYI"); + + // Initialize helper which will detect jumps which can cause invalid + // lifetime markers. + if (ShouldEmitLifetimeMarkers) + llvm_unreachable("Lifetime markers NYI"); } // Create a scope in the symbol table to hold variable declarations. From 53592370965cd0bd6bd83ee2e60a385287588c7a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:06:40 -0400 Subject: [PATCH 0350/2301] [CIR] Add some boilerplate for emitting traps for flowing off the end of a fn This is incomplete as we don't yet have a trap or unreachable instruction, but add some code that checks for the right conditions and mark the missing instruction insertion as TODO. --- clang/lib/CIR/CIRGenCall.cpp | 14 ++++++++++++++ clang/lib/CIR/CIRGenFunction.cpp | 24 ++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 4 ++++ clang/lib/CIR/CIRGenModule.h | 5 +++++ 4 files changed, 47 insertions(+) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 1f328866865e..efaac52df442 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -574,3 +574,17 @@ void CIRGenFunction::buildCallArgs( std::reverse(Args.begin() + CallArgsStart, Args.end()); } } + +bool CIRGenModule::MayDropFunctionReturn(const ASTContext &Context, + QualType ReturnType) { + // We can't just disard the return value for a record type with a complex + // destructor or a non-trivially copyable type. + if (const RecordType *RT = + ReturnType.getCanonicalType()->getAs()) { + llvm_unreachable("NYI"); + } + + return ReturnType.isTriviallyCopyableType(Context); +} + + diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 3734326b2bd0..7cfed2f00b3a 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -428,6 +428,30 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, if (mlir::failed(Fn.verifyBody())) return nullptr; + // C++11 [stmt.return]p2: + // Flowing off the end of a function [...] results in undefined behavior + // in a value-returning function. + // C11 6.9.1p12: + // If the '}' that terminates a function is reached, and the value of the + // function call is used by the caller, the behavior is undefined. + if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && + !FD->getReturnType()->isVoidType() && builder.getInsertionBlock()) { + bool shouldEmitUnreachable = + CGM.getCodeGenOpts().StrictReturn || + !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType()); + + if (SanOpts.has(SanitizerKind::Return)) { + llvm_unreachable("NYI"); + } else if (shouldEmitUnreachable) { + if (CGM.getCodeGenOpts().OptimizationLevel == 0) + ; // TODO: buildTrapCall(llvm::Intrinsic::trap); + } + if (SanOpts.has(SanitizerKind::Return) || shouldEmitUnreachable) { + // TODO: builder.createUnreachable(); + builder.clearInsertionPoint(); + } + } + // Emit the standard function epilogue. // TODO: finishFunction(BodyRange.getEnd()); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index ca26382ef28d..556619e80066 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -330,6 +330,10 @@ class CIRGenFunction { /// TODO: Translate to MLIR bool DidCallStackSave = false; + /// Whether we processed a Microsoft-style asm block during CIRGen. These can + /// potentially set the return value. + bool SawAsmBlock = false; + /// Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(clang::QualType T); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index a8b73a99771f..96b96dfe30fd 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -173,6 +173,11 @@ class CIRGenModule { /// false, the definition can be emitted lazily if it's used. bool MustBeEmitted(const clang::ValueDecl *D); + /// Whether this function's return type has no side effects, and thus may be + /// trivially discared if it is unused. + bool MayDropFunctionReturn(const clang::ASTContext &Context, + clang::QualType ReturnType); + bool isInNoSanitizeList(clang::SanitizerMask Kind, mlir::FuncOp Fn, clang::SourceLocation) const; From c67d518518bf2e642f5188c4e4ae3cfc8ad064c1 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:10:08 -0400 Subject: [PATCH 0351/2301] [CIR] Add some fns to support conditional codegen regarding COMDAT --- clang/lib/CIR/CIRGenModule.cpp | 37 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenModule.h | 5 +++++ 2 files changed, 42 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 9946d7f0e5a8..14096a8211ca 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -730,6 +730,43 @@ bool CIRGenModule::shouldEmitFunction(GlobalDecl GD) { return true; } +bool CIRGenModule::supportsCOMDAT() const { + return getTriple().supportsCOMDAT(); +} + +static bool shouldBeInCOMDAT(CIRGenModule &CGM, const Decl &D) { + if (!CGM.supportsCOMDAT()) + return false; + + if (D.hasAttr()) + return true; + + GVALinkage Linkage; + if (auto *VD = dyn_cast(&D)) + Linkage = CGM.getASTContext().GetGVALinkageForVariable(VD); + else + Linkage = + CGM.getASTContext().GetGVALinkageForFunction(cast(&D)); + + switch (Linkage) { + case clang::GVA_Internal: + case clang::GVA_AvailableExternally: + case clang::GVA_StrongExternal: + return false; + case clang::GVA_DiscardableODR: + case clang::GVA_StrongODR: + return true; + } + llvm_unreachable("No such linkage"); +} + +void CIRGenModule::maybeSetTrivialComdat(const Decl &D, mlir::Operation *Op) { + if (!shouldBeInCOMDAT(*this, D)) + return; + + // TODO: Op.setComdat +} + bool CIRGenModule::isInNoSanitizeList(SanitizerMask Kind, mlir::FuncOp Fn, SourceLocation Loc) const { const auto &NoSanitizeL = getASTContext().getNoSanitizeList(); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 96b96dfe30fd..5b3814d669c3 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -219,11 +219,16 @@ class CIRGenModule { /// Emit any needed decls for which code generation was deferred. void buildDeferred(); + const llvm::Triple &getTriple() const { return target.getTriple(); } + // Finalize CIR code generation. void Release(); bool shouldEmitFunction(clang::GlobalDecl GD); + bool supportsCOMDAT() const; + void maybeSetTrivialComdat(const clang::Decl &D, mlir::Operation *Op); + void emitError(const llvm::Twine &message) { theModule.emitError(message); } private: From 39d646dc2bb442733d5783c44f1e55b42a1c5d54 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:11:08 -0400 Subject: [PATCH 0352/2301] [CIR] Add a fn that will eventually be used to get fns and global vars This is just a stub ATM. It's first upcoming usage will be for CXXConstructorDecls and this'll delegate to the getAddrOfCXXStructor fn. --- clang/lib/CIR/CIRGenModule.cpp | 10 ++++++++++ clang/lib/CIR/CIRGenModule.h | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 14096a8211ca..09fd08377024 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -677,6 +677,16 @@ mlir::Operation *CIRGenModule::GetGlobalValue(StringRef Name) { return nullptr; } +mlir::Operation * +CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { + const Decl *D = GD.getDecl(); + + if (isa(D) || isa(D)) + llvm_unreachable("NYI"); + + llvm_unreachable("NYI"); +} + void CIRGenModule::Release() { buildDeferred(); // TODO: buildVTablesOpportunistically(); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 5b3814d669c3..83138783e810 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -197,6 +197,10 @@ class CIRGenModule { bool ForVTable = false, bool Dontdefer = false, ForDefinition_t IsForDefinition = NotForDefinition); + mlir::Operation * + GetAddrOfGlobal(clang::GlobalDecl GD, + ForDefinition_t IsForDefinition = NotForDefinition); + llvm::StringRef getMangledName(clang::GlobalDecl GD); mlir::Value GetGlobalValue(const clang::Decl *D); From 1de4051e099a1ee40325fbc0416c8f90819df508 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:17:27 -0400 Subject: [PATCH 0353/2301] [CIR] Add CGTypes::arrangeCXXStructorDeclaration This currently does very minimal actual work and just delegates to arrangeCIRFunctionInfo. The new codepaths are all asserted against for now. --- clang/lib/CIR/CIRGenCall.cpp | 97 +++++++++++++++++++++++++++++++++++- clang/lib/CIR/CIRGenTypes.h | 7 +++ 2 files changed, 102 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index efaac52df442..7becb01b3033 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -1,3 +1,17 @@ +//===--- CIRGenCall.cpp - Encapsulate calling convention details ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes wrap the information about a call or function +// definition used to handle ABI compliancy. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" #include "CIRGenFunction.h" #include "CIRGenFunctionInfo.h" #include "CIRGenTypes.h" @@ -575,6 +589,87 @@ void CIRGenFunction::buildCallArgs( } } +/// Returns the canonical formal type of the given C++ method. +static CanQual GetFormalType(const CXXMethodDecl *MD) { + return MD->getType() + ->getCanonicalTypeUnqualified() + .getAs(); +} + +/// Adds the formal parameters in FPT to the given prefix. If any parameter in +/// FPT has pass_object_size_attrs, then we'll add parameters for those, too. +static void appendParameterTypes( + const CIRGenTypes &CGT, SmallVectorImpl &prefix, + SmallVectorImpl ¶mInfos, + CanQual FPT) { + // Fast path: don't touch param info if we don't need to. + if (!FPT->hasExtParameterInfos()) { + assert(paramInfos.empty() && + "We have paramInfos, but the prototype doesn't?"); + prefix.append(FPT->param_type_begin(), FPT->param_type_end()); + return; + } + + assert(false && "params NYI"); +} + +const CIRGenFunctionInfo & +CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { + auto *MD = cast(GD.getDecl()); + + llvm::SmallVector argTypes; + SmallVector paramInfos; + argTypes.push_back(DeriveThisType(MD->getParent(), MD)); + + bool PassParams = true; + + if (auto *CD = dyn_cast(MD)) { + // A base class inheriting constructor doesn't get forwarded arguments + // needed to construct a virtual base (or base class thereof) + assert(!CD->getInheritedConstructor() && "Inheritance NYI"); + } + + CanQual FTP = GetFormalType(MD); + + if (PassParams) + appendParameterTypes(*this, argTypes, paramInfos, FTP); + + assert(paramInfos.empty() && "NYI"); + + assert(!MD->isVariadic() && "Variadic fns NYI"); + RequiredArgs required = RequiredArgs::All; + (void)required; + + FunctionType::ExtInfo extInfo = FTP->getExtInfo(); + + assert(!TheCXXABI.HasThisReturn(GD) && "NYI"); + + CanQualType resultType = Context.VoidTy; + (void)resultType; + + return arrangeCIRFunctionInfo(resultType, /*instanceMethod=*/true, + /*chainCall=*/false, argTypes, extInfo, + paramInfos, required); +} + +/// Derives the 'this' type for CIRGen purposes, i.e. ignoring method CVR +/// qualification. Either or both of RD and MD may be null. A null RD indicates +/// that there is no meaningful 'this' type, and a null MD can occur when +/// calling a method pointer. +CanQualType CIRGenTypes::DeriveThisType(const CXXRecordDecl *RD, + const CXXMethodDecl *MD) { + QualType RecTy; + if (RD) + RecTy = getContext().getTagDeclType(RD)->getCanonicalTypeInternal(); + else + assert(false && "CXXMethodDecl NYI"); + + if (MD) + RecTy = getContext().getAddrSpaceQualType( + RecTy, MD->getMethodQualifiers().getAddressSpace()); + return getContext().getPointerType(CanQualType::CreateUnsafe(RecTy)); +} + bool CIRGenModule::MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) { // We can't just disard the return value for a record type with a complex @@ -586,5 +681,3 @@ bool CIRGenModule::MayDropFunctionReturn(const ASTContext &Context, return ReturnType.isTriviallyCopyableType(Context); } - - diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index df39d42afdc0..4b3900acbbac 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -118,6 +118,11 @@ class CIRGenTypes { /// Convert clang calling convention to LLVM calling convention. unsigned ClangCallConvToCIRCallConv(clang::CallingConv CC); + /// Derives the 'this' type for CIRGen purposes, i.e. ignoring method CVR + /// qualification. + clang::CanQualType DeriveThisType(const clang::CXXRecordDecl *RD, + const clang::CXXMethodDecl *MD); + /// This map keeps cache of llvm::Types and maps clang::Type to /// corresponding llvm::Type. using TypeCacheTy = llvm::DenseMap; @@ -181,6 +186,8 @@ class CIRGenTypes { const CIRGenFunctionInfo & arrangeFunctionDeclaration(const clang::FunctionDecl *FD); + const CIRGenFunctionInfo &arrangeCXXStructorDeclaration(clang::GlobalDecl GD); + const CIRGenFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const clang::FunctionType *Ty, bool ChainCall); From 19a3c89788c2c5bb69855b7baa0f1530d8f5e7e1 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:21:39 -0400 Subject: [PATCH 0354/2301] [CIR][NFC] Move some code from CIRGenTypes to CIRGenCall --- clang/lib/CIR/CIRGenCall.cpp | 27 ++++++++++++++++++++++ clang/lib/CIR/CIRGenTypes.cpp | 43 ----------------------------------- 2 files changed, 27 insertions(+), 43 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 7becb01b3033..13c0d845a09d 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -670,6 +670,33 @@ CanQualType CIRGenTypes::DeriveThisType(const CXXRecordDecl *RD, return getContext().getPointerType(CanQualType::CreateUnsafe(RecTy)); } +/// Arrange the CIR function layout for a value of the given function type, on +/// top of any implicit parameters already stored. +static const CIRGenFunctionInfo & +arrangeCIRFunctionInfo(CIRGenTypes &CGT, bool instanceMethod, + SmallVectorImpl &prefix, + CanQual FTP) { + SmallVector paramInfos; + RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); + // FIXME: Kill copy. -- from codegen + appendParameterTypes(CGT, prefix, paramInfos, FTP); + CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); + + return CGT.arrangeCIRFunctionInfo(resultType, instanceMethod, + /*chainCall=*/false, prefix, + FTP->getExtInfo(), paramInfos, Required); +} + +/// Arrange the argument and result information for a value of the given +/// freestanding function type. +const CIRGenFunctionInfo & +CIRGenTypes::arrangeFreeFunctionType(CanQual FTP) { + SmallVector argTypes; + return ::arrangeCIRFunctionInfo(*this, /*instanceMethod=*/false, argTypes, + FTP); +} + + bool CIRGenModule::MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) { // We can't just disard the return value for a record type with a complex diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index c0afd6326607..a5a27aa35538 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -642,49 +642,6 @@ CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { return arrangeFreeFunctionType(FTy.castAs()); } -/// Adds the formal parameters in FPT to the given prefix. If any parameter in -/// FPT has pass_object_size_attrs, then we'll add parameters for those, too. -static void appendParameterTypes( - const CIRGenTypes &CGT, SmallVectorImpl &prefix, - SmallVectorImpl ¶mInfos, - CanQual FPT) { - // Fast path: don't touch param info if we don't need to. - if (!FPT->hasExtParameterInfos()) { - assert(paramInfos.empty() && - "We have paramInfos, but the prototype doesn't?"); - prefix.append(FPT->param_type_begin(), FPT->param_type_end()); - return; - } - - assert(false && "params NYI"); -} - -/// Arrange the CIR function layout for a value of the given function type, on -/// top of any implicit parameters already stored. -static const CIRGenFunctionInfo & -arrangeCIRFunctionInfo(CIRGenTypes &CGT, bool instanceMethod, - SmallVectorImpl &prefix, - CanQual FTP) { - SmallVector paramInfos; - RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); - // FIXME: Kill copy. -- from codegen - appendParameterTypes(CGT, prefix, paramInfos, FTP); - CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); - - return CGT.arrangeCIRFunctionInfo(resultType, instanceMethod, - /*chainCall=*/false, prefix, - FTP->getExtInfo(), paramInfos, Required); -} - -/// Arrange the argument and result information for a value of the given -/// freestanding function type. -const CIRGenFunctionInfo & -CIRGenTypes::arrangeFreeFunctionType(CanQual FTP) { - SmallVector argTypes; - return ::arrangeCIRFunctionInfo(*this, /*instanceMethod=*/false, argTypes, - FTP); -} - /// Figure out the rules for calling a function with the given formal type using /// the given arguments. The arguments are necessary because the function might /// be unprototyped, in which case it's target-dependent in crazy ways. From efab19bb3532cf2b5da714cc88bace6e5b392d97 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:22:34 -0400 Subject: [PATCH 0355/2301] [CIR] Add CGTypes::arrangeCXXConstructorCalls Again, this largely does nothing atm. The behavior is mostly the same as arrangeFreeFunctionLikeCall, but is covered with asserts against the constructor specific things we don't yet support. --- clang/lib/CIR/CIRGenCall.cpp | 42 ++++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenTypes.h | 5 +++++ 2 files changed, 47 insertions(+) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 13c0d845a09d..5000ed36b590 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -696,6 +696,48 @@ CIRGenTypes::arrangeFreeFunctionType(CanQual FTP) { FTP); } +/// Arrange a call to a C++ method, passing the given arguments. +/// +/// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` +/// parameter. +/// ExtraSuffixArgs is the number of ABI-specific args passed at the end of +/// args. +/// PassProtoArgs indicates whether `args` has args for the parameters in the +/// given CXXConstructorDecl. +const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXConstructorCall( + const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, + unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs) { + + // FIXME: Kill copy. + llvm::SmallVector ArgTypes; + for (const auto &Arg : Args) + ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); + + // +1 for implicit this, which should always be args[0] + unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; + + CanQual FPT = GetFormalType(D); + RequiredArgs Required = PassProtoArgs + ? RequiredArgs::forPrototypePlus( + FPT, TotalPrefixArgs + ExtraSuffixArgs) + : RequiredArgs::All; + + GlobalDecl GD(D, CtorKind); + assert(!TheCXXABI.HasThisReturn(GD) && "ThisReturn NYI"); + assert(!TheCXXABI.hasMostDerivedReturn(GD) && "Most derived return NYI"); + CanQualType ResultType = Context.VoidTy; + + FunctionType::ExtInfo Info = FPT->getExtInfo(); + llvm::SmallVector ParamInfos; + // If the prototype args are elided, we should onlyy have ABI-specific args, + // which never have param info. + assert(!FPT->hasExtParameterInfos() && "NYI"); + + return arrangeCIRFunctionInfo(ResultType, /*instanceMethod=*/true, + /*chainCall=*/false, ArgTypes, Info, ParamInfos, + Required); +} + bool CIRGenModule::MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) { diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 4b3900acbbac..0489b1a01dc4 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -186,6 +186,11 @@ class CIRGenTypes { const CIRGenFunctionInfo & arrangeFunctionDeclaration(const clang::FunctionDecl *FD); + const CIRGenFunctionInfo &arrangeCXXConstructorCall( + const CallArgList &Args, const clang::CXXConstructorDecl *D, + clang::CXXCtorType CtorKind, unsigned ExtraPrefixArgs, + unsigned ExtraSuffixArgs, bool PassProtoArgs = true); + const CIRGenFunctionInfo &arrangeCXXStructorDeclaration(clang::GlobalDecl GD); const CIRGenFunctionInfo & From 0b761b254046e7f5e07fae236fdf5c46c03a7c7b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:25:32 -0400 Subject: [PATCH 0356/2301] [CIR][NFC] Give CGTypes it's own TargetInfo to be used later. --- clang/lib/CIR/CIRGenTypes.cpp | 2 +- clang/lib/CIR/CIRGenTypes.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index a5a27aa35538..d4a8de3f7930 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -25,7 +25,7 @@ unsigned CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { CIRGenTypes::CIRGenTypes(CIRGenModule &cgm) : Context(cgm.getASTContext()), Builder(cgm.getBuilder()), CGM{cgm}, - TheCXXABI(cgm.getCXXABI()), + Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), TheABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) {} CIRGenTypes::~CIRGenTypes() { for (llvm::FoldingSet::iterator I = FunctionInfos.begin(), diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 0489b1a01dc4..1fa453c1e82e 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -77,6 +77,7 @@ class CIRGenTypes { clang::ASTContext &Context; mlir::OpBuilder &Builder; CIRGenModule &CGM; + const clang::TargetInfo &Target; CIRGenCXXABI &TheCXXABI; // This should not be moved earlier, since its initialization depends on some From 154302a2f92673c30fcfa6af9dc0ae1d1509ab0d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:30:26 -0400 Subject: [PATCH 0357/2301] [CIR] Add getAddrOfCXXStructor This fn just arranges the structor declaration, gets the function type and then calls GetOrCreate. --- clang/lib/CIR/CIRGenModule.cpp | 26 +++++++++++++++++++++++++- clang/lib/CIR/CIRGenModule.h | 16 ++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 09fd08377024..9324a24f2eca 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -384,6 +384,29 @@ void CIRGenModule::verifyModule() { theModule.emitError("module verification error"); } +std::pair +CIRGenModule::getAddrAndTypeOfCXXStructor(GlobalDecl GD, + const CIRGenFunctionInfo *FnInfo, + mlir::FunctionType FnType, + bool Dontdefer, + ForDefinition_t IsForDefinition) { + auto *MD = cast(GD.getDecl()); + + assert(!isa(MD) && "Destructors NYI"); + + if (!FnType) { + if (!FnInfo) + FnInfo = &getTypes().arrangeCXXStructorDeclaration(GD); + FnType = getTypes().GetFunctionType(*FnInfo); + } + + auto Fn = GetOrCreateCIRFunction(getMangledName(GD), FnType, GD, + /*ForVtable=*/false, Dontdefer, + /*IsThunk=*/false, IsForDefinition); + + return {FnType, Fn}; +} + mlir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, bool ForVTable, bool DontDefer, @@ -682,7 +705,8 @@ CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { const Decl *D = GD.getDecl(); if (isa(D) || isa(D)) - llvm_unreachable("NYI"); + return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr, + /*DontDefer=*/false, IsForDefinition); llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 83138783e810..ce437bb0c4ce 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -27,6 +27,7 @@ #include "mlir/Dialect/CIR/IR/CIRAttrs.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" @@ -143,6 +144,16 @@ class CIRGenModule { LValueBaseInfo *BaseInfo = nullptr, bool forPointeeType = false); + mlir::FuncOp getAddrOfCXXStructor( + clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, + mlir::FunctionType FnType = nullptr, bool DontDefer = false, + ForDefinition_t IsForDefinition = NotForDefinition) { + + return getAddrAndTypeOfCXXStructor(GD, FnInfo, FnType, DontDefer, + IsForDefinition) + .second; + } + /// A queue of (optional) vtables to consider emitting. std::vector DeferredVTables; @@ -159,6 +170,11 @@ class CIRGenModule { DeferredDeclsToEmit.emplace_back(GD); } + std::pair getAddrAndTypeOfCXXStructor( + clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, + mlir::FunctionType FnType = nullptr, bool Dontdefer = false, + ForDefinition_t IsForDefinition = NotForDefinition); + void buildTopLevelDecl(clang::Decl *decl); /// Emit code for a single global function or var decl. Forward declarations From 0b40d906910024480ad1fbaf12c9bfe68e2a8be6 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:32:03 -0400 Subject: [PATCH 0358/2301] [CIR] Add codegenCXXStructor This just gets the addr of the structor and then delegates to the CIRGenFunction machinery. --- clang/lib/CIR/CIRGenCXX.cpp | 35 +++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenModule.h | 5 +++++ 2 files changed, 40 insertions(+) diff --git a/clang/lib/CIR/CIRGenCXX.cpp b/clang/lib/CIR/CIRGenCXX.cpp index e69de29bb2d1..83d3b754847f 100644 --- a/clang/lib/CIR/CIRGenCXX.cpp +++ b/clang/lib/CIR/CIRGenCXX.cpp @@ -0,0 +1,35 @@ + +//===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ code generation. +// +//===----------------------------------------------------------------------===// + +// We might split this into multiple files if it gets too unwieldy + +#include "CIRGenFunction.h" +#include "CIRGenModule.h" + +#include "clang/AST/GlobalDecl.h" + +using namespace clang; +using namespace cir; + +mlir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { + const auto &FnInfo = getTypes().arrangeCXXStructorDeclaration(GD); + auto Fn = getAddrOfCXXStructor(GD, &FnInfo, /*FnType=*/nullptr, + /*DontDefer=*/true, ForDefinition); + + // TODO: setFunctionLinkage + CIRGenFunction(*this, builder).generateCode(GD, Fn, FnInfo); + + // TODO: setNonAliasAttributes + // TODO: SetLLVMFunctionAttributesForDefinition + return Fn; +} diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index ce437bb0c4ce..b81c5578f3f6 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -246,6 +246,11 @@ class CIRGenModule { bool shouldEmitFunction(clang::GlobalDecl GD); + // Produce code for this constructor/destructor. This method doesn't try to + // apply any ABI rules about which other constructors/destructors are needed + // or if they are alias to each other. + mlir::FuncOp codegenCXXStructor(clang::GlobalDecl GD); + bool supportsCOMDAT() const; void maybeSetTrivialComdat(const clang::Decl &D, mlir::Operation *Op); From be931318657badb5e22acaca3e70de16af20da28 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:33:02 -0400 Subject: [PATCH 0359/2301] [CIR][NFC] Add a missing include --- clang/lib/CIR/CIRGenCall.cpp | 2 +- clang/lib/CIR/CIRGenTypes.cpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 5000ed36b590..f52685b3b78f 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -16,6 +16,7 @@ #include "CIRGenFunctionInfo.h" #include "CIRGenTypes.h" +#include "clang/AST/DeclCXX.h" #include "clang/AST/GlobalDecl.h" #include "mlir/Dialect/CIR/IR/CIRTypes.h" @@ -738,7 +739,6 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXConstructorCall( Required); } - bool CIRGenModule::MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) { // We can't just disard the return value for a record type with a complex diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index d4a8de3f7930..3eed096c3e13 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -13,6 +13,7 @@ #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" +#include "clang/AST/GlobalDecl.h" #include "clang/AST/RecordLayout.h" using namespace clang; From 213b065551bfdae29aec9c6d0ea54bfc33455cd5 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:38:55 -0400 Subject: [PATCH 0360/2301] [CIR] Add buildCXXConstructorCall This is pretty simple -- * Build the list of arguments as well as the `this` pointer * call buildCallArgs on them * delegate to other buildCXXConstructorCall * assert against some C++ isms that we don't support yet * ask the ABI to add implicit args if needed * get the addr of the fn to call * arrange the call * build a normal call instruction --- clang/lib/CIR/CIRGenFunction.cpp | 87 ++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 12 +++++ 2 files changed, 99 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 7cfed2f00b3a..ac2e2932b736 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -462,6 +462,93 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, return Fn; } +static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) { + auto *CD = llvm::dyn_cast(D); + if (!(CD && CD->isCopyOrMoveConstructor()) && + !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator()) + return false; + + // We can emit a memcpy for a trivial copy or move constructor/assignment + if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) + return true; + + if (D->getParent()->isUnion() && D->isDefaulted()) + return true; + + return false; +} + +void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, + bool ForVirtualBase, + bool Delegating, + AggValueSlot ThisAVS, + const clang::CXXConstructExpr *E) { + CallArgList Args; + Address This = ThisAVS.getAddress(); + LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace(); + QualType ThisType = D->getThisType(); + LangAS ThisAS = ThisType.getTypePtr()->getPointeeType().getAddressSpace(); + mlir::Value ThisPtr = This.getPointer(); + + assert(SlotAS == ThisAS && "This edge case NYI"); + + Args.add(RValue::get(ThisPtr), D->getThisType()); + + assert(!isMemcpyEquivalentSpecialMember(D) && "NYI"); + + const FunctionProtoType *FPT = D->getType()->castAs(); + EvaluationOrder Order = E->isListInitialization() + ? EvaluationOrder::ForceLeftToRight + : EvaluationOrder::Default; + + buildCallArgs(Args, FPT, E->arguments(), E->getConstructor(), + /*ParamsToSkip*/ 0, Order); + + buildCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args, + ThisAVS.mayOverlap(), E->getExprLoc(), + ThisAVS.isSanitizerChecked()); +} + +void CIRGenFunction::buildCXXConstructorCall( + const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, + bool Delegating, Address This, CallArgList &Args, + AggValueSlot::Overlap_t Overlap, SourceLocation Loc, + bool NewPointerIsChecked) { + + const auto *ClassDecl = D->getParent(); + + if (!NewPointerIsChecked) + buildTypeCheck(CIRGenFunction::TCK_ConstructorCall, Loc, This.getPointer(), + getContext().getRecordType(ClassDecl), CharUnits::Zero()); + + assert(!D->isTrivial() && "Trivial ctor decl NYI"); + + assert(!isMemcpyEquivalentSpecialMember(D) && "NYI"); + + bool PassPrototypeArgs = true; + + assert(!D->getInheritedConstructor() && "inheritance NYI"); + + // Insert any ABI-specific implicit constructor arguments. + CIRGenCXXABI::AddedStructorArgCounts ExtraArgs = + CGM.getCXXABI().addImplicitConstructorArgs(*this, D, Type, ForVirtualBase, + Delegating, Args); + + // Emit the call. + auto CalleePtr = CGM.getAddrOfCXXStructor(GlobalDecl(D, Type)); + const CIRGenFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall( + Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); + CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); + mlir::func::CallOp C; + buildCall(Info, Callee, ReturnValueSlot(), Args, C, false, Loc); + + assert(CGM.getCodeGenOpts().OptimizationLevel == 0 || + ClassDecl->isDynamicClass() || Type == Ctor_Base || + !CGM.getCodeGenOpts().StrictVTablePointers && + "vtable assumption loads NYI"); +} + void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, mlir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 556619e80066..0ac0ccf52fe0 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -381,6 +381,18 @@ class CIRGenFunction { void buildAggExpr(const clang::Expr *E, AggValueSlot Slot); + void buildCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, + bool Delegating, AggValueSlot ThisAVS, + const clang::CXXConstructExpr *E); + + void buildCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, + bool Delegating, Address This, CallArgList &Args, + AggValueSlot::Overlap_t Overlap, + clang::SourceLocation Loc, + bool NewPointerIsChecked); + // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. struct PrototypeWrapper { From 762b29f48f0dcfcb103639f9eb0923262a50f555 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:43:08 -0400 Subject: [PATCH 0361/2301] [CIR] Add buildCXXConstructExpr This takes a CXXConstructExpr and asserts against doing some C++ things we don't support and then simply calls buildCXXConstructorCall. --- clang/lib/CIR/CIRGenExpr.cpp | 33 +++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 3 +++ 2 files changed, 36 insertions(+) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index aae5c89b885e..7b8484d40e01 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -658,3 +658,36 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, } return addr; } + +void CIRGenFunction::buildCXXConstructExpr(const clang::CXXConstructExpr *E, + AggValueSlot Dest) { + assert(!Dest.isIgnored() && "Must have a destination!"); + const auto *CD = E->getConstructor(); + + assert(!E->requiresZeroInitialization() && "zero initialization NYI"); + + // If this is a call to a trivial default constructor, do nothing. + if (CD->isTrivial() && CD->isDefaultConstructor()) + assert(!CD->isTrivial() && "trivial constructors NYI"); + + assert(!E->isElidable() && "elidable constructors NYI"); + + assert(!CGM.getASTContext().getAsArrayType(E->getType()) && + "array types NYI"); + + clang::CXXCtorType Type = Ctor_Complete; + bool ForVirtualBase = false; + bool Delegating = false; + + switch (E->getConstructionKind()) { + case CXXConstructionKind::Complete: + Type = Ctor_Complete; + break; + case CXXConstructionKind::Delegating: + case CXXConstructionKind::VirtualBase: + case CXXConstructionKind::NonVirtualBase: + assert(false && "Delegating, Virtualbae and NonVirtualBase ctorkind NYI"); + } + + buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 0ac0ccf52fe0..9ba303c2e3f6 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -381,6 +381,9 @@ class CIRGenFunction { void buildAggExpr(const clang::Expr *E, AggValueSlot Slot); + void buildCXXConstructExpr(const clang::CXXConstructExpr *E, + AggValueSlot Dest); + void buildCXXConstructorCall(const clang::CXXConstructorDecl *D, clang::CXXCtorType Type, bool ForVirtualBase, bool Delegating, AggValueSlot ThisAVS, From b7605692df6eae8b731976c37e67735ffce09492 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 14 Apr 2022 23:44:38 -0400 Subject: [PATCH 0362/2301] [CIR][NFC] Formatting... --- clang/lib/CIR/CIRGenFunction.h | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 9ba303c2e3f6..3b1795b8972b 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -598,6 +598,7 @@ class CIRGenFunction { /// address space for address space agnostic languages. Address getAllocatedAddress() const { return Addr; } }; + /// Emit the alloca and debug information for a /// local variable. Does not emit initialization or destruction. AutoVarEmission buildAutoVarAlloca(const clang::VarDecl &D); From e7142a6e9d72b4c28ffb454e9009a0b9d1344d34 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 00:11:24 -0400 Subject: [PATCH 0363/2301] [CIR] Add ElementType, Nontemporal and new make method to LValue This is technically redundant since the pointer alwyas holds the ElementType. But I guess at some point we might have opaque pointers given LLVM's transition? So follow CodeGen's lead here and just use the redundancy. --- clang/lib/CIR/CIRGenValue.h | 63 +++++++++++++++++++++++++++++++------ 1 file changed, 54 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CIRGenValue.h b/clang/lib/CIR/CIRGenValue.h index 0431064aa0f7..30391526b3b5 100644 --- a/clang/lib/CIR/CIRGenValue.h +++ b/clang/lib/CIR/CIRGenValue.h @@ -16,11 +16,15 @@ #include "Address.h" -#include "mlir/IR/Value.h" +#include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" #include "clang/AST/Type.h" + #include "llvm/ADT/PointerIntPair.h" +#include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "mlir/IR/Value.h" + namespace cir { /// This trivial value class is used to represent the result of an @@ -147,12 +151,24 @@ class LValue { clang::QualType Type; clang::Qualifiers Quals; + // This flag shows if a nontemporal load/stores should be used when accessing + // this lvalue. + bool Nontemporal : 1; + private: - void Initialize(clang::CharUnits Alignment, clang::QualType Type, - LValueBaseInfo BaseInfo) { - // assert((!Alignment.isZero()) && // || Type->isIncompleteType()) && - // "initializing l-value with zero alignment!"); + void Initialize(clang::QualType Type, clang::Qualifiers Quals, + clang::CharUnits Alignment, LValueBaseInfo BaseInfo) { + assert((!Alignment.isZero() || Type->isIncompleteType()) && + "initializing l-value with zero alignment!"); + if (isGlobalReg()) + assert(ElementType == nullptr && "Glboal reg does not store elem type"); + else + assert(V.getType().cast().getPointee() == + ElementType && + "Pointer element type mismatch"); + this->Type = Type; + this->Quals = Quals; // This flag shows if a nontemporal load/stores should be used when // accessing this lvalue. const unsigned MaxAlign = 1U << 31; @@ -162,12 +178,17 @@ class LValue { assert(this->Alignment == Alignment.getQuantity() && "Alignment exceeds allowed max!"); this->BaseInfo = BaseInfo; + + // TODO: ObjC flags + // Initialize Objective-C flags. + this->Nontemporal = false; } // The alignment to use when accessing this lvalue. (For vector elements, // this is the alignment of the whole vector) unsigned Alignment; mlir::Value V; + mlir::Type ElementType; LValueBaseInfo BaseInfo; public: @@ -178,6 +199,10 @@ class LValue { bool isGlobalReg() const { return LVType == GlobalReg; } bool isMatrixElt() const { return LVType == MatrixElt; } + bool isVolatile() const { return Quals.hasVolatile(); } + + bool isNontemporal() const { return Nontemporal; } + clang::QualType getType() const { return Type; } mlir::Value getPointer() const { return V; } @@ -186,7 +211,9 @@ class LValue { return clang::CharUnits::fromQuantity(Alignment); } - Address getAddress() const { return Address(getPointer(), getAlignment()); } + Address getAddress() const { + return Address(getPointer(), ElementType, getAlignment()); + } LValueBaseInfo getBaseInfo() const { return BaseInfo; } void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } @@ -194,9 +221,11 @@ class LValue { static LValue makeAddr(Address address, clang::QualType T, AlignmentSource Source = AlignmentSource::Type) { LValue R; - R.V = address.getPointer(); - R.Initialize(address.getAlignment(), T, LValueBaseInfo(Source)); R.LVType = Simple; + R.V = address.getPointer(); + R.ElementType = address.getElementType(); + R.Initialize(T, T.getQualifiers(), address.getAlignment(), + LValueBaseInfo(Source)); return R; } @@ -204,9 +233,25 @@ class LValue { static LValue makeAddr(Address address, clang::QualType T, LValueBaseInfo LBI) { LValue R; + R.LVType = Simple; R.V = address.getPointer(); - R.Initialize(address.getAlignment(), T, LBI); + R.ElementType = address.getElementType(); + R.Initialize(T, T.getQualifiers(), address.getAlignment(), LBI); + return R; + } + + static LValue makeAddr(Address address, clang::QualType type, + clang::ASTContext &Context, LValueBaseInfo BaseInfo) { + clang::Qualifiers qs = type.getQualifiers(); + qs.setObjCGCAttr(Context.getObjCGCAttrKind(type)); + + LValue R; R.LVType = Simple; + assert(address.getPointer().getType().cast()); + R.V = address.getPointer(); + R.ElementType = address.getElementType(); + R.Initialize(type, qs, address.getAlignment(), + BaseInfo); // TODO: TBAAInfo); return R; } From 0d437aa3713d2352db7ee3930c7657f9b56f1902 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 00:14:16 -0400 Subject: [PATCH 0364/2301] [CIR] Add a helper to covnert a temp to an rvalue This just delegates to buildLoadOfScalar and onyl supports scalar values ATM. --- clang/lib/CIR/CIRGenExpr.cpp | 67 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 26 +++++++++++++ 2 files changed, 93 insertions(+) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 7b8484d40e01..fa6ab10fbf06 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -562,6 +562,31 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return LValue::makeAddr(Address::invalid(), E->getType()); } +/// Given the address of a temporary variable, produce an r-value of its type. +RValue CIRGenFunction::convertTempToRValue(Address addr, clang::QualType type, + clang::SourceLocation loc) { + LValue lvalue = makeAddrLValue(addr, type, AlignmentSource::Decl); + switch (getEvaluationKind(type)) { + case TEK_Complex: + llvm_unreachable("NYI"); + case TEK_Aggregate: + llvm_unreachable("NYI"); + case TEK_Scalar: + return RValue::get(buildLoadOfScalar(lvalue, loc)); + } + llvm_unreachable("NYI"); +} + +/// An LValue is a candidate for having its loads and stores be made atomic if +/// we are operating under /volatile:ms *and* the LValue itself is volatile and +/// performing such an operation can be performed without a libcall. +bool CIRGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { + if (!CGM.getLangOpts().MSVolatile) + return false; + + llvm_unreachable("NYI"); +} + /// Emit an if on a boolean condition to the specified blocks. /// FIXME: Based on the condition, this might try to simplify the codegen of /// the conditional based on the branch. TrueCount should be the number of @@ -659,6 +684,48 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, return addr; } +mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, + SourceLocation Loc) { + return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), Loc, lvalue.getBaseInfo(), + lvalue.isNontemporal()); +} + +mlir::Value CIRGenFunction::buildFromMemory(mlir::Value Value, QualType Ty) { + // Bool has a different representation in memory than in registers. + if (hasBooleanRepresentation(Ty)) { + llvm_unreachable("NYI"); + } + + return Value; +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, + QualType Ty, SourceLocation Loc, + LValueBaseInfo BaseInfo, + bool isNontemporal) { + // TODO(CIR): this has fallen out of sync with codegen + + // Atomic operations have to be done on integral types + LValue AtomicLValue = LValue::makeAddr(Addr, Ty, getContext(), BaseInfo); + if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) { + llvm_unreachable("NYI"); + } + + mlir::cir::LoadOp Load = builder.create( + getLoc(Loc), Addr.getElementType(), Addr.getPointer()); + + if (isNontemporal) { + llvm_unreachable("NYI"); + } + + // TODO: TBAA + + // TODO: buildScalarRangeCheck + + return buildFromMemory(Load, Ty); +} + void CIRGenFunction::buildCXXConstructExpr(const clang::CXXConstructExpr *E, AggValueSlot Dest) { assert(!Dest.isIgnored() && "Must have a destination!"); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 3b1795b8972b..d6757485697c 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -407,6 +407,8 @@ class CIRGenFunction { PrototypeWrapper(const clang::ObjCMethodDecl *MD) : P(MD) {} }; + bool LValueIsSuitableForInlineAtomic(LValue Src); + /// An abstract representation of regular/ObjC call/message targets. class AbstractCallee { /// The function declaration of the callee. @@ -433,6 +435,18 @@ class CIRGenFunction { } }; + RValue convertTempToRValue(Address addr, clang::QualType type, + clang::SourceLocation Loc); + + mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, + clang::SourceLocation Loc, + LValueBaseInfo BaseInfo, + bool isNontemporal = false); + + /// buildLoadOfScalar - Load a scalar value from an address, taking care to + /// appropriately convert form the memory representation to the CIR value + /// representation. The l-value must be a simple l-value. + mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); void buildCallArgs( CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range ArgRange, @@ -504,6 +518,8 @@ class CIRGenFunction { /// addressed later. RValue GetUndefRValue(clang::QualType Ty); + mlir::Value buildFromMemory(mlir::Value Value, clang::QualType Ty); + mlir::Type convertType(clang::QualType T); mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); @@ -678,6 +694,16 @@ class CIRGenFunction { clang::QualType DstTy, clang::SourceLocation Loc); + LValue makeAddrLValue(Address Addr, clang::QualType T, + LValueBaseInfo BaseInfo) { + return LValue::makeAddr(Addr, T, getContext(), BaseInfo); + } + + LValue makeAddrLValue(Address Addr, clang::QualType T, + AlignmentSource Source = AlignmentSource::Type) { + return LValue::makeAddr(Addr, T, getContext(), LValueBaseInfo(Source)); + } + /// Determine whether the given initializer is trivial in the sense /// that it requires no code to be generated. bool isTrivialInitializer(const clang::Expr *Init); From 08e7c11837fdaa4c37eb3db206f0a2dc14e6d486 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 00:19:23 -0400 Subject: [PATCH 0365/2301] [CIR] Add a simple helper to get the ABI of C++ record passing --- clang/lib/CIR/CIRGenCXXABI.h | 19 +++++++++++++++++++ clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 8 ++++++++ 2 files changed, 27 insertions(+) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index 7b74b247e181..d8929aa85426 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -91,6 +91,25 @@ class CIRGenCXXABI { /// Gets the mangle context. clang::MangleContext &getMangleContext() { return *MangleCtx; } + /// Specify how one should pass an argument of a record type. + enum class RecordArgABI { + /// Pass it using the normal C aggregate rules for the ABI, potentially + /// introducing extra copies and passing some or all of it in registers. + Default = 0, + + /// Pass it on the stack using its defined layout. The argument must be + /// evaluated directly into the correct stack position in the arguments + /// area, and the call machinery must not move it or introduce extra copies. + DirectInMemory, + + /// Pass it as a pointer to temporary memory. + Indirect + }; + + /// Returns how an argument of the given record type should be passed. + virtual RecordArgABI + getRecordArgABI(const clang::CXXRecordDecl *RD) const = 0; + /// Returns true if the given constructor or destructor is one of the kinds /// that the ABI says returns 'this' (only applies when called non-virtually /// for destructors). diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index 0d9d4194e51a..fcb6c7506ec1 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -49,6 +49,14 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { bool NeedsVTTParameter(GlobalDecl GD) override; + RecordArgABI getRecordArgABI(const clang::CXXRecordDecl *RD) const override { + // If C++ prohibits us from making a copy, pass by address. + if (!RD->canPassInRegisters()) + return RecordArgABI::Indirect; + else + return RecordArgABI::Default; + } + bool classifyReturnType(CIRGenFunctionInfo &FI) const override; }; } // namespace From b6030c39f37dbee96f3d4afc161c46b55ed61df3 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 00:20:44 -0400 Subject: [PATCH 0366/2301] [CIR] Add a simple helper for finding if structors initialize vptrs yup --- clang/lib/CIR/CIRGenCXXABI.h | 4 ++++ clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 3 +++ 2 files changed, 7 insertions(+) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index d8929aa85426..059d4ccd7ea5 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -110,6 +110,10 @@ class CIRGenCXXABI { virtual RecordArgABI getRecordArgABI(const clang::CXXRecordDecl *RD) const = 0; + /// Checks if ABI requires to initialize vptrs for given dynamic class. + virtual bool + doStructorsInitializeVPtrs(const clang::CXXRecordDecl *VTableClass) = 0; + /// Returns true if the given constructor or destructor is one of the kinds /// that the ABI says returns 'this' (only applies when called non-virtually /// for destructors). diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index fcb6c7506ec1..8bcb62e88928 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -58,6 +58,9 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { } bool classifyReturnType(CIRGenFunctionInfo &FI) const override; + bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { + return true; + } }; } // namespace From 1e498b623f48580c15d20995542fc862c0f49fd6 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 00:26:21 -0400 Subject: [PATCH 0367/2301] [CIR] Add a helper class to get vtable pointers from a record --- clang/lib/CIR/CIRGenClass.cpp | 56 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 25 +++++++++++++-- 2 files changed, 79 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp index e69de29bb2d1..461ce5a4ba5f 100644 --- a/clang/lib/CIR/CIRGenClass.cpp +++ b/clang/lib/CIR/CIRGenClass.cpp @@ -0,0 +1,56 @@ +//===--- CIRGenClass.cpp - Emit CIR Code for C++ classes --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ code generation of classes +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" +#include "CIRGenFunction.h" + +#include "clang/AST/RecordLayout.h" + +using namespace clang; +using namespace cir; + +CIRGenFunction::VPtrsVector +CIRGenFunction::getVTablePointers(const CXXRecordDecl *VTableClass) { + CIRGenFunction::VPtrsVector VPtrsResult; + VisitedVirtualBasesSetTy VBases; + getVTablePointers(BaseSubobject(VTableClass, CharUnits::Zero()), + /*NearestVBase=*/nullptr, + /*OffsetFromNearestVBase=*/CharUnits::Zero(), + /*BaseIsNonVirtualPrimaryBase=*/false, VTableClass, VBases, + VPtrsResult); + return VPtrsResult; +} + +void CIRGenFunction::getVTablePointers(BaseSubobject Base, + const CXXRecordDecl *NearestVBase, + CharUnits OffsetFromNearestVBase, + bool BaseIsNonVirtualPrimaryBase, + const CXXRecordDecl *VTableClass, + VisitedVirtualBasesSetTy &VBases, + VPtrsVector &Vptrs) { + // If this base is a non-virtual primary base the address point has already + // been set. + if (!BaseIsNonVirtualPrimaryBase) { + // Initialize the vtable pointer for this base. + VPtr Vptr = {Base, NearestVBase, OffsetFromNearestVBase, VTableClass}; + Vptrs.push_back(Vptr); + } + + const CXXRecordDecl *RD = Base.getBase(); + + // Traverse bases. + for (const auto &I : RD->bases()) { + (void)I; + llvm_unreachable("NYI"); + } +} + diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index d6757485697c..d5e610ea7807 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -17,14 +17,16 @@ #include "CIRGenModule.h" #include "CIRGenValue.h" -#include "mlir/IR/TypeRange.h" -#include "mlir/IR/Value.h" +#include "clang/AST/BaseSubobject.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/Type.h" #include "clang/Basic/ABI.h" #include "clang/Basic/TargetInfo.h" +#include "mlir/IR/TypeRange.h" +#include "mlir/IR/Value.h" + namespace clang { class Expr; } // namespace clang @@ -680,6 +682,25 @@ class CIRGenFunction { /// expression and compare the result against zero, returning an Int1Ty value. mlir::Value evaluateExprAsBool(const clang::Expr *E); + struct VPtr { + clang::BaseSubobject Base; + const clang::CXXRecordDecl *NearestVBase; + clang::CharUnits OffsetFromNearestVBase; + const clang::CXXRecordDecl *VTableClass; + }; + + using VisitedVirtualBasesSetTy = + llvm::SmallPtrSet; + + using VPtrsVector = llvm::SmallVector; + VPtrsVector getVTablePointers(const clang::CXXRecordDecl *VTableClass); + void getVTablePointers(clang::BaseSubobject Base, + const clang::CXXRecordDecl *NearestVBase, + clang::CharUnits OffsetFromNearestVBase, + bool BaseIsNonVirtualPrimaryBase, + const clang::CXXRecordDecl *VTableClass, + VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs); + /// Emit code for the start of a function. /// \param Loc The location to be associated with the function. /// \param StartLoc The location of the function body. From 8300f0dbef86e24813e0b4d9c2fc8b21c0a82eec Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 00:33:57 -0400 Subject: [PATCH 0368/2301] [CIR] Add buildDelegateCallArg This just converts the local variables created in StartFunction into rvalues via convertTempToRValue. We assert against any of the other possible behaviors. --- clang/lib/CIR/CIRGenCall.cpp | 41 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 11 +++++++++ 2 files changed, 52 insertions(+) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index f52685b3b78f..af96078b219c 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -750,3 +750,44 @@ bool CIRGenModule::MayDropFunctionReturn(const ASTContext &Context, return ReturnType.isTriviallyCopyableType(Context); } + +static bool isInAllocaArgument(CIRGenCXXABI &ABI, QualType type) { + const auto *RD = type->getAsCXXRecordDecl(); + return RD && + ABI.getRecordArgABI(RD) == CIRGenCXXABI::RecordArgABI::DirectInMemory; +} + +void CIRGenFunction::buildDelegateCallArg(CallArgList &args, + const VarDecl *param, + SourceLocation loc) { + // StartFunction converted the ABI-lowered parameter(s) into a local alloca. + // We need to turn that into an r-value suitable for buildCall + Address local = GetAddrOfLocalVar(param); + + QualType type = param->getType(); + + if (isInAllocaArgument(CGM.getCXXABI(), type)) { + llvm_unreachable("NYI"); + } + + // GetAddrOfLocalVar returns a pointer-to-pointer for references, but the + // argument needs to be the original pointer. + if (type->isReferenceType()) { + + llvm_unreachable("NYI"); + } else if (getLangOpts().ObjCAutoRefCount) { + llvm_unreachable("NYI"); + + // For the most part, we just need to load the alloca, except that aggregate + // r-values are actually pointers to temporaries. + } else { + args.add(convertTempToRValue(local, type, loc), type); + } + + // Deactivate the cleanup for the callee-destructed param that was pushed. + if (type->isRecordType() && !CurFuncIsThunk && + type->castAs()->getDecl()->isParamDestroyedInCallee() && + param->needsDestruction(getContext())) { + llvm_unreachable("NYI"); + } +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index d5e610ea7807..b6b19f1dc2da 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -336,6 +336,10 @@ class CIRGenFunction { /// potentially set the return value. bool SawAsmBlock = false; + /// In C++, whether we are code generating a thunk. This controls whether we + /// should emit cleanups. + bool CurFuncIsThunk = false; + /// Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(clang::QualType T); @@ -449,6 +453,7 @@ class CIRGenFunction { /// appropriately convert form the memory representation to the CIR value /// representation. The l-value must be a simple l-value. mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); + void buildCallArgs( CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range ArgRange, @@ -740,6 +745,12 @@ class CIRGenFunction { return it->second; } + /// buildDelegatingCallArg - We are performing a delegate call; that is, the + /// current function is delegating to another one. Produce a r-value suitable + /// for passing the given parameter. + void buildDelegateCallArg(CallArgList &args, const clang::VarDecl *param, + clang::SourceLocation loc); + /// ShouldInstrumentFunction - Return true if the current function should be /// instrumented with __cyg_profile_func_* calls bool ShouldInstrumentFunction(); From fc0dc6ad8f13d394878d893aae6647d530493db1 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 00:37:10 -0400 Subject: [PATCH 0369/2301] [CIR] Add a helper that checks if we can perform the complete-to-base xform --- clang/lib/CIR/CIRGenClass.cpp | 34 ++++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 3 +++ 2 files changed, 37 insertions(+) diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp index 461ce5a4ba5f..dd9021d5e8be 100644 --- a/clang/lib/CIR/CIRGenClass.cpp +++ b/clang/lib/CIR/CIRGenClass.cpp @@ -18,6 +18,40 @@ using namespace clang; using namespace cir; +/// Checks whether the given constructor is a valid subject for the +/// complete-to-base constructor delgation optimization, i.e. emitting the +/// complete constructor as a simple call to the base constructor. +bool CIRGenFunction::IsConstructorDelegationValid( + const CXXConstructorDecl *Ctor) { + + // Currently we disable the optimization for classes with virtual bases + // because (1) the address of parameter variables need to be consistent across + // all initializers but (2) the delegate function call necessarily creates a + // second copy of the parameter variable. + // + // The limiting example (purely theoretical AFAIK): + // struct A { A(int &c) { c++; } }; + // struct A : virtual A { + // B(int count) : A(count) { printf("%d\n", count); } + // }; + // ...although even this example could in principle be emitted as a delegation + // since the address of the parameter doesn't escape. + if (Ctor->getParent()->getNumVBases()) { + llvm_unreachable("NYI"); + } + + // We also disable the optimization for variadic functions because it's + // impossible to "re-pass" varargs. + if (Ctor->getType()->castAs()->isVariadic()) + return false; + + // FIXME: Decide if we can do a delegation of a delegating constructor. + if (Ctor->isDelegatingConstructor()) + llvm_unreachable("NYI"); + + return true; +} + CIRGenFunction::VPtrsVector CIRGenFunction::getVTablePointers(const CXXRecordDecl *VTableClass) { CIRGenFunction::VPtrsVector VPtrsResult; diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index b6b19f1dc2da..404b9a1228ba 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -687,6 +687,9 @@ class CIRGenFunction { /// expression and compare the result against zero, returning an Int1Ty value. mlir::Value evaluateExprAsBool(const clang::Expr *E); + static bool + IsConstructorDelegationValid(const clang::CXXConstructorDecl *Ctor); + struct VPtr { clang::BaseSubobject Base; const clang::CXXRecordDecl *NearestVBase; From 3d75ac4c111fcbbe82e506dea94d3a56f9de67e2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 00:39:41 -0400 Subject: [PATCH 0370/2301] [CIR] Add helper fn to initialize vtable pointers This is just stubbed out for now since we aren't yet supporting virtual classes or inheritance. --- clang/lib/CIR/CIRGenClass.cpp | 16 ++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 2 ++ 2 files changed, 18 insertions(+) diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp index dd9021d5e8be..66453930c49d 100644 --- a/clang/lib/CIR/CIRGenClass.cpp +++ b/clang/lib/CIR/CIRGenClass.cpp @@ -52,6 +52,22 @@ bool CIRGenFunction::IsConstructorDelegationValid( return true; } +void CIRGenFunction::initializeVTablePointers(const CXXRecordDecl *RD) { + // Ignore classes without a vtable. + if (!RD->isDynamicClass()) + return; + + // Initialize the vtable pointers for this class and all of its bases. + if (CGM.getCXXABI().doStructorsInitializeVPtrs(RD)) + for (const auto &Vptr : getVTablePointers(RD)) { + llvm_unreachable("NYI"); + (void)Vptr; + } + + if (RD->getNumVBases()) + llvm_unreachable("NYI"); +} + CIRGenFunction::VPtrsVector CIRGenFunction::getVTablePointers(const CXXRecordDecl *VTableClass) { CIRGenFunction::VPtrsVector VPtrsResult; diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 404b9a1228ba..465feb102cd5 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -733,6 +733,8 @@ class CIRGenFunction { return LValue::makeAddr(Addr, T, getContext(), LValueBaseInfo(Source)); } + void initializeVTablePointers(const clang::CXXRecordDecl *RD); + /// Determine whether the given initializer is trivial in the sense /// that it requires no code to be generated. bool isTrivialInitializer(const clang::Expr *Init); From fe0b2fb4cd726c3bfbb8e00ff8105da4608883ce Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 01:15:03 -0400 Subject: [PATCH 0371/2301] [CIR] Add a helper to turn the saved CXXThisValue into an Address --- clang/lib/CIR/CIRGenClass.cpp | 17 +++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 16 ++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp index 66453930c49d..844b0fbe4266 100644 --- a/clang/lib/CIR/CIRGenClass.cpp +++ b/clang/lib/CIR/CIRGenClass.cpp @@ -104,3 +104,20 @@ void CIRGenFunction::getVTablePointers(BaseSubobject Base, } } +Address CIRGenFunction::LoadCXXThisAddress() { + assert(CurFuncDecl && "loading 'this' without a func declaration?"); + assert(isa(CurFuncDecl)); + + // Lazily compute CXXThisAlignment. + if (CXXThisAlignment.isZero()) { + // Just use the best known alignment for the parent. + // TODO: if we're currently emitting a complete-object ctor/dtor, we can + // always use the complete-object alignment. + auto RD = cast(CurFuncDecl)->getParent(); + CXXThisAlignment = CGM.getClassPointerAlignment(RD); + } + + // Consider how to do this if we ever have multiple returns + auto Result = LoadCXXThis()->getOpResult(0); + return Address(Result, CXXThisAlignment); +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 465feb102cd5..c88bf31e7753 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -293,6 +293,14 @@ class CIRGenFunction { std::optional FnRetAlloca; // Holds the Decl for the current outermost non-closure context + mlir::Operation *CXXThisValue = nullptr; + clang::CharUnits CXXThisAlignment; + + /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this + /// expression. + Address CXXDefaultInitExprThis = Address::invalid(); + + // CurFuncDecl - Holds the Decl for the current outermost non-closure context const clang::Decl *CurFuncDecl; /// CurCodeDecl - This is the inner-most code context, which includes blocks. const clang::Decl *CurCodeDecl; @@ -709,6 +717,14 @@ class CIRGenFunction { const clang::CXXRecordDecl *VTableClass, VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs); + /// LoadCXXThis - Load the value for 'this'. This function is only valid while + /// generating code for an C++ member function. + mlir::Operation *LoadCXXThis() { + assert(CXXThisValue && "no 'this' value for this function"); + return CXXThisValue; + } + Address LoadCXXThisAddress(); + /// Emit code for the start of a function. /// \param Loc The location to be associated with the function. /// \param StartLoc The location of the function body. From b202b51da4e5f0d85beb5af19766f46ea56a3693 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 01:29:12 -0400 Subject: [PATCH 0372/2301] [CIR] Add buildDelegateCXXConstructorCall This fn just pushes the this arg first and then calls buildDelegateCallArgs. After that it calls buildCXXConstructorCall. --- clang/lib/CIR/CIRGenClass.cpp | 32 ++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 5 +++++ 2 files changed, 37 insertions(+) diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp index 844b0fbe4266..7d171b2fb194 100644 --- a/clang/lib/CIR/CIRGenClass.cpp +++ b/clang/lib/CIR/CIRGenClass.cpp @@ -121,3 +121,35 @@ Address CIRGenFunction::LoadCXXThisAddress() { auto Result = LoadCXXThis()->getOpResult(0); return Address(Result, CXXThisAlignment); } + +void CIRGenFunction::buildDelegateCXXConstructorCall( + const CXXConstructorDecl *Ctor, CXXCtorType CtorType, + const FunctionArgList &Args, SourceLocation Loc) { + CallArgList DelegateArgs; + + FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); + assert(I != E && "no parameters to constructor"); + + // this + Address This = LoadCXXThisAddress(); + DelegateArgs.add(RValue::get(This.getPointer()), (*I)->getType()); + ++I; + + // FIXME: The location of the VTT parameter in the parameter list is specific + // to the Itanium ABI and shouldn't be hardcoded here. + if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { + llvm_unreachable("NYI"); + } + + // Explicit arguments. + for (; I != E; ++I) { + const VarDecl *param = *I; + // FIXME: per-argument source location + buildDelegateCallArg(DelegateArgs, param, Loc); + } + + buildCXXConstructorCall(Ctor, CtorType, /*ForVirtualBase=*/false, + /*Delegating=*/true, This, DelegateArgs, + AggValueSlot::MayOverlap, Loc, + /*NewPointerIsChecked=*/true); +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index c88bf31e7753..7a53e3ab2574 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -766,6 +766,11 @@ class CIRGenFunction { return it->second; } + void buildDelegateCXXConstructorCall(const clang::CXXConstructorDecl *Ctor, + clang::CXXCtorType CtorType, + const FunctionArgList &Args, + clang::SourceLocation Loc); + /// buildDelegatingCallArg - We are performing a delegate call; that is, the /// current function is delegating to another one. Produce a r-value suitable /// for passing the given parameter. From f1a118753027ab722504f519ec9dc9595fe37069 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 00:43:28 -0400 Subject: [PATCH 0373/2301] [CIR] Add buildCtorPrologue This fn basically does nothing so far other. It initializes vtable pointers (which does nothing) and initializes member variables (which also does nothing) and then returns. However, we have to include some heavy RAII machinery to do so, which also does nothing. But there aren't really great ways to assert against doing so so far, so just include them since my next chunk of work will be member vars, member funcs and arguments anyways. --- clang/lib/CIR/CIRGenClass.cpp | 250 +++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 21 +++ 2 files changed, 271 insertions(+) diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp index 7d171b2fb194..7d43c6f046ce 100644 --- a/clang/lib/CIR/CIRGenClass.cpp +++ b/clang/lib/CIR/CIRGenClass.cpp @@ -52,6 +52,256 @@ bool CIRGenFunction::IsConstructorDelegationValid( return true; } +namespace { +class FieldMemcpyizer { +public: + FieldMemcpyizer(CIRGenFunction &CGF, const CXXRecordDecl *ClassDecl, + const VarDecl *SrcRec) + : CGF(CGF), ClassDecl(ClassDecl), + // SrcRec(SrcRec), + RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), + FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0), + LastFieldOffset(0), LastAddedFieldIndex(0) { + (void)SrcRec; + } + + bool isMemcpyableField(FieldDecl *F) const { + // Never memcpy fields when we are adding poised paddings. + if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding) + return false; + Qualifiers Qual = F->getType().getQualifiers(); + if (Qual.hasVolatile() || Qual.hasObjCLifetime()) + return false; + + return true; + } + + void addMemcpyableField(FieldDecl *F) { + if (F->isZeroSize(CGF.getContext())) + return; + if (!FirstField) + addInitialField(F); + else + addNextField(F); + } + + CharUnits getMemcpySize(uint64_t FirstByteOffset) const { + ASTContext &Ctx = CGF.getContext(); + unsigned LastFieldSize = + LastField->isBitField() + ? LastField->getBitWidthValue() + : Ctx.toBits( + Ctx.getTypeInfoDataSizeInChars(LastField->getType()).Width); + uint64_t MemcpySizeBits = LastFieldOffset + LastFieldSize - + FirstByteOffset + Ctx.getCharWidth() - 1; + CharUnits MemcpySize = Ctx.toCharUnitsFromBits(MemcpySizeBits); + return MemcpySize; + } + + void buildMemcpy() { + // Give the subclass a chance to bail out if it feels the memcpy isn't worth + // it (e.g. Hasn't aggregated enough data). + if (!FirstField) { + return; + } + + llvm_unreachable("NYI"); + } + + void reset() { FirstField = nullptr; } + +protected: + CIRGenFunction &CGF; + const CXXRecordDecl *ClassDecl; + +private: + void buildMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) { + llvm_unreachable("NYI"); + } + + void addInitialField(FieldDecl *F) { + FirstField = F; + LastField = F; + FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); + LastFieldOffset = FirstFieldOffset; + LastAddedFieldIndex = F->getFieldIndex(); + } + + void addNextField(FieldDecl *F) { + // For the most part, the following invariant will hold: + // F->getFieldIndex() == LastAddedFieldIndex + 1 + // The one exception is that Sema won't add a copy-initializer for an + // unnamed bitfield, which will show up here as a gap in the sequence. + assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && + "Cannot aggregate fields out of order."); + LastAddedFieldIndex = F->getFieldIndex(); + + // The 'first' and 'last' fields are chosen by offset, rather than field + // index. This allows the code to support bitfields, as well as regular + // fields. + uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); + if (FOffset < FirstFieldOffset) { + FirstField = F; + FirstFieldOffset = FOffset; + } else if (FOffset >= LastFieldOffset) { + LastField = F; + LastFieldOffset = FOffset; + } + } + + // const VarDecl *SrcRec; + const ASTRecordLayout &RecLayout; + FieldDecl *FirstField; + FieldDecl *LastField; + uint64_t FirstFieldOffset, LastFieldOffset; + unsigned LastAddedFieldIndex; +}; + +class ConstructorMemcpyizer : public FieldMemcpyizer { +private: + /// Get source argument for copy constructor. Returns null if not a copy + /// constructor. + static const VarDecl *getTrivialCopySource(CIRGenFunction &CGF, + const CXXConstructorDecl *CD, + FunctionArgList &Args) { + if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) + llvm_unreachable("NYI"); + + return nullptr; + } + + // Returns true if a CXXCtorInitializer represents a member initialization + // that can be rolled into a memcpy + bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { + if (!MemcpyableCtor) + return false; + + llvm_unreachable("NYI"); + } + +public: + ConstructorMemcpyizer(CIRGenFunction &CGF, const CXXConstructorDecl *CD, + FunctionArgList &Args) + : FieldMemcpyizer(CGF, CD->getParent(), + getTrivialCopySource(CGF, CD, Args)), + MemcpyableCtor(CD->isDefaulted() && CD->isCopyOrMoveConstructor() && + CGF.getLangOpts().getGC() == LangOptions::NonGC) {} + + void addMemberInitializer(CXXCtorInitializer *MemberInit) { + if (isMemberInitMemcpyable(MemberInit)) { + llvm_unreachable("NYI"); + } else { + llvm_unreachable("NYI"); + } + } + + void buildAggregatedInits() { + if (AggregatedInits.size() <= 1) { + // This memcpy is too small to be worthwhile. Fall back on default + // codegen. + if (!AggregatedInits.empty()) { + llvm_unreachable("NYI"); + } + reset(); + return; + } + + pushEHDestructors(); + buildMemcpy(); + AggregatedInits.clear(); + } + + void pushEHDestructors() { + Address ThisPtr = CGF.LoadCXXThisAddress(); + QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); + LValue LHS = CGF.makeAddrLValue(ThisPtr, RecordTy); + (void)LHS; + + for (unsigned i = 0; i < AggregatedInits.size(); ++i) { + llvm_unreachable("NYI"); + } + } + + void finish() { buildAggregatedInits(); } + +private: + bool MemcpyableCtor; + SmallVector AggregatedInits; +}; + +} // namespace + +/// buildCtorPrologue - This routine generates necessary code to initialize base +/// classes and non-static data members belonging to this constructor. +void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, + CXXCtorType CtorType, + FunctionArgList &Args) { + if (CD->isDelegatingConstructor()) + llvm_unreachable("NYI"); + + const CXXRecordDecl *ClassDecl = CD->getParent(); + + CXXConstructorDecl::init_const_iterator B = CD->init_begin(), + E = CD->init_end(); + + // Virtual base initializers first, if any. They aren't needed if: + // - This is a base ctor variant + // - There are no vbases + // - The class is abstract, so a complete object of it cannot be constructed + // + // The check for an abstract class is necessary because sema may not have + // marked virtual base destructors referenced. + bool ConstructVBases = CtorType != Ctor_Base && + ClassDecl->getNumVBases() != 0 && + !ClassDecl->isAbstract(); + + // In the Microsoft C++ ABI, there are no constructor variants. Instead, the + // constructor of a class with virtual bases takes an additional parameter to + // conditionally construct the virtual bases. Emit that check here. + mlir::Block *BaseCtorContinueBB = nullptr; + if (ConstructVBases && + !CGM.getTarget().getCXXABI().hasConstructorVariants()) { + llvm_unreachable("NYI"); + } + + mlir::Operation *const OldThis = CXXThisValue; + for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { + if (!ConstructVBases) + continue; + llvm_unreachable("NYI"); + } + + if (BaseCtorContinueBB) { + llvm_unreachable("NYI"); + } + + // Then, non-virtual base initializers. + for (; B != E && (*B)->isBaseInitializer(); B++) { + assert(!(*B)->isBaseVirtual()); + + if (CGM.getCodeGenOpts().StrictVTablePointers) + llvm_unreachable("NYI"); + + llvm_unreachable("NYI"); + } + + CXXThisValue = OldThis; + + initializeVTablePointers(ClassDecl); + + // And finally, initialize class members. + FieldConstructionScope FCS(*this, LoadCXXThisAddress()); + ConstructorMemcpyizer CM(*this, CD, Args); + for (; B != E; B++) { + CXXCtorInitializer *Member = (*B); + assert(!Member->isBaseInitializer()); + assert(Member->isAnyMemberInitializer() && + "Delegating initializer on non-delegating constructor"); + CM.addMemberInitializer(Member); + } + CM.finish(); +} + void CIRGenFunction::initializeVTablePointers(const CXXRecordDecl *RD) { // Ignore classes without a vtable. if (!RD->isDynamicClass()) diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 7a53e3ab2574..1de4aee70f49 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -695,6 +695,9 @@ class CIRGenFunction { /// expression and compare the result against zero, returning an Int1Ty value. mlir::Value evaluateExprAsBool(const clang::Expr *E); + void buildCtorPrologue(const clang::CXXConstructorDecl *CD, + clang::CXXCtorType Type, FunctionArgList &Args); + static bool IsConstructorDelegationValid(const clang::CXXConstructorDecl *Ctor); @@ -717,6 +720,24 @@ class CIRGenFunction { const clang::CXXRecordDecl *VTableClass, VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs); + /// A scoep within which we are constructing the fields of an object which + /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if + /// we need to evaluate the CXXDefaultInitExpr within the evaluation. + class FieldConstructionScope { + public: + FieldConstructionScope(CIRGenFunction &CGF, Address This) + : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) { + CGF.CXXDefaultInitExprThis = This; + } + ~FieldConstructionScope() { + CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis; + } + + private: + CIRGenFunction &CGF; + Address OldCXXDefaultInitExprThis; + }; + /// LoadCXXThis - Load the value for 'this'. This function is only valid while /// generating code for an C++ member function. mlir::Operation *LoadCXXThis() { From 7287212179cf8a4c777f68d8da0480501cbb2ac1 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 15 Apr 2022 00:41:23 -0400 Subject: [PATCH 0374/2301] [CIR] Add CGF::buildConstructorBody This will delegate to buildCXXConstructorCall if we are in a valid fn for doing so. If not it'll assert against a bunch of things and then eventually buildStmt. --- clang/lib/CIR/CIRGenFunction.cpp | 56 ++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 2 ++ 2 files changed, 58 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index ac2e2932b736..87d2ec7d2b9f 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -549,6 +549,62 @@ void CIRGenFunction::buildCXXConstructorCall( "vtable assumption loads NYI"); } +void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { + // TODO: EmitAsanPrologueOrEpilogue(true); + const auto *Ctor = cast(CurGD.getDecl()); + auto CtorType = CurGD.getCtorType(); + + assert((CGM.getTarget().getCXXABI().hasConstructorVariants() || + CtorType == Ctor_Complete) && + "can only generate complete ctor for this ABI"); + + // Before we go any further, try the complete->base constructor delegation + // optimization. + if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && + CGM.getTarget().getCXXABI().hasConstructorVariants()) { + buildDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getEndLoc()); + return; + } + + const FunctionDecl *Definition = nullptr; + Stmt *Body = Ctor->getBody(Definition); + assert(Definition == Ctor && "emitting wrong constructor body"); + + // Enter the function-try-block before the constructor prologue if + // applicable. + bool IsTryBody = (Body && isa(Body)); + if (IsTryBody) + llvm_unreachable("NYI"); + + // TODO: incrementProfileCounter + + // TODO: RunClenaupCcope RunCleanups(*this); + + // TODO: in restricted cases, we can emit the vbase initializers of a + // complete ctor and then delegate to the base ctor. + + // Emit the constructor prologue, i.e. the base and member initializers. + buildCtorPrologue(Ctor, CtorType, Args); + + // Emit the body of the statement. + if (IsTryBody) + llvm_unreachable("NYI"); + else { + // TODO: propagate this result via mlir::logical result. Just unreachable + // now just to have it handled. + if (mlir::failed(buildStmt(Body, true))) + llvm_unreachable("NYI"); + } + + // Emit any cleanup blocks associated with the member or base initializers, + // which inlcudes (along the exceptional path) the destructors for those + // members and bases that were fully constructed. + /// TODO: RunCleanups.ForceCleanup(); + + if (IsTryBody) + llvm_unreachable("NYI"); +} + void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, mlir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 1de4aee70f49..dddab613f32f 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -698,6 +698,8 @@ class CIRGenFunction { void buildCtorPrologue(const clang::CXXConstructorDecl *CD, clang::CXXCtorType Type, FunctionArgList &Args); + void buildConstructorBody(FunctionArgList &Args); + static bool IsConstructorDelegationValid(const clang::CXXConstructorDecl *Ctor); From fa20f5e964662d3fcdb2cc34e00e07cdf80fd54f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 18:22:15 -0400 Subject: [PATCH 0375/2301] [CIR] Clean up some assert paths and edge cases for GetOrCreateCIRFn --- clang/lib/CIR/CIRGenModule.cpp | 16 ++++++++++++---- clang/lib/CIR/CIRGenModule.h | 5 +++++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 9324a24f2eca..a171b020ff23 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -523,12 +523,20 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // Lookup the entry, lazily creating it if necessary. mlir::Operation *Entry = GetGlobalValue(MangledName); if (Entry) { - // TODO: WeakRefReferences - // TODO: Handle dropped DLL attributes. - // TODO: If there are two attempts to define the same mangled name, issue an - // error. + if (WeakRefReferences.erase(Entry)) { + llvm_unreachable("NYI"); + } + // Handle dropped DLL attributes. + if (D && !D->hasAttr() && !D->hasAttr()) { + // TODO(CIR): Entry->setDLLStorageClass + setDSOLocal(Entry); + } + + // TODO(CIR): If there are two attempts to define the same mangled name, + // issue an error. auto Fn = cast(Entry); + if (Fn && Fn.getFunctionType() == Ty) { return Fn; } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index b81c5578f3f6..6123c3df2d12 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -88,6 +88,11 @@ class CIRGenModule { /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; + // A set of references that have only been set via a weakref so far. This is + // used to remove the weak of the reference if we ever see a direct reference + // or a definition. + llvm::SmallPtrSet WeakRefReferences; + /// ------- /// Declaring variables /// ------- From 7bc152bae6d7f8836e141c8d8d2ee3f56d9fa44d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 18:40:55 -0400 Subject: [PATCH 0376/2301] [CIR] Check and error out if we are repeatedly defining the same fn --- clang/lib/CIR/CIRGenModule.cpp | 26 ++++++++++++++++++++++++-- clang/lib/CIR/CIRGenModule.h | 8 ++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index a171b020ff23..bfd803bd0722 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -496,6 +496,15 @@ void CIRGenModule::setDSOLocal(mlir::Operation *Op) const { // TODO: Op->setDSOLocal } +bool CIRGenModule::lookupRepresentativeDecl(StringRef MangledName, + GlobalDecl &Result) const { + auto Res = Manglings.find(MangledName); + if (Res == Manglings.end()) + return false; + Result = Res->getValue(); + return true; +} + /// GetOrCreateCIRFunction - If the specified mangled name is not in the module, /// create and return a CIR Function with the specified type. If there is /// something in the module with the specified name, return it potentially @@ -533,9 +542,22 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( setDSOLocal(Entry); } - // TODO(CIR): If there are two attempts to define the same mangled name, - // issue an error. + // If there are two attempts to define the same mangled name, issue an + // error. auto Fn = cast(Entry); + if (IsForDefinition && Fn && !Fn.isDeclaration()) { + GlobalDecl OtherGD; + // CHeck that GD is not yet in DiagnosedConflictingDefinitions is required + // to make sure that we issue and error only once. + if (lookupRepresentativeDecl(MangledName, OtherGD) && + (GD.getCanonicalDecl().getDecl()) && + DiagnosedConflictingDefinitions.insert(GD).second) { + getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name) + << MangledName; + getDiags().Report(OtherGD.getDecl()->getLocation(), + diag::note_previous_definition); + } + } if (Fn && Fn.getFunctionType() == Ty) { return Fn; diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 6123c3df2d12..5c158063d8d2 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -97,6 +97,11 @@ class CIRGenModule { /// Declaring variables /// ------- + /// Set of global decls for which we already diagnosed mangled name conflict. + /// Required to not issue a warning (on a mangling conflict) multiple times + /// for the same decl. + llvm::DenseSet DiagnosedConflictingDefinitions; + public: mlir::ModuleOp getModule() const { return theModule; } mlir::OpBuilder &getBuilder() { return builder; } @@ -256,6 +261,9 @@ class CIRGenModule { // or if they are alias to each other. mlir::FuncOp codegenCXXStructor(clang::GlobalDecl GD); + bool lookupRepresentativeDecl(llvm::StringRef MangledName, + clang::GlobalDecl &Result) const; + bool supportsCOMDAT() const; void maybeSetTrivialComdat(const clang::Decl &D, mlir::Operation *Op); From f0eb0db21c6327ae0f00f4f7fbda61cd3c049834 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 18:44:48 -0400 Subject: [PATCH 0377/2301] [CIR] Reset the MangleContext when creating a new CGFn unelss opted out The ManglingContext needs set to a new function for each CGFn created except in some specific cases (which we don't use yet and thus defaults to false). --- clang/lib/CIR/CIRGenFunction.cpp | 11 +++++++++-- clang/lib/CIR/CIRGenFunction.h | 6 +++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 87d2ec7d2b9f..e2e5d45cee67 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -24,9 +24,16 @@ using namespace cir; using namespace clang; using namespace mlir::cir; -CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, mlir::OpBuilder &builder) +CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, mlir::OpBuilder &builder, + bool suppressNewContext) : CGM{CGM}, builder(builder), CurFuncDecl(nullptr), - SanOpts(CGM.getLangOpts().Sanitize), ShouldEmitLifetimeMarkers(false) {} + SanOpts(CGM.getLangOpts().Sanitize), ShouldEmitLifetimeMarkers(false) { + if (!suppressNewContext) + CGM.getCXXABI().getMangleContext().startNewFunction(); + // TODO(CIR): EHStack.setCGF(this); + + // TODO(CIR): SetFastMathFlags(CurFPFeatures); +} clang::ASTContext &CIRGenFunction::getContext() const { return CGM.getASTContext(); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index dddab613f32f..1382fd7e5f84 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -359,7 +359,8 @@ class CIRGenFunction { return getEvaluationKind(T) == TEK_Aggregate; } - CIRGenFunction(CIRGenModule &CGM, mlir::OpBuilder &builder); + CIRGenFunction(CIRGenModule &CGM, mlir::OpBuilder &builder, + bool suppressNewContext = false); CIRGenTypes &getTypes() const { return CGM.getTypes(); } @@ -774,6 +775,9 @@ class CIRGenFunction { void initializeVTablePointers(const clang::CXXRecordDecl *RD); + void buildInitializerForField(clang::FieldDecl *Field, LValue LHS, + clang::Expr *Init); + /// Determine whether the given initializer is trivial in the sense /// that it requires no code to be generated. bool isTrivialInitializer(const clang::Expr *Init); From 838b705ff43b430ebae8a67c8850a43759407abc Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 18:47:39 -0400 Subject: [PATCH 0378/2301] [CIR] Call buildConsructorBody from generateCode when were in a ctor --- clang/lib/CIR/CIRGenFunction.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index e2e5d45cee67..bedc2e724e5f 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -410,7 +410,7 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, if (isa(FD)) llvm_unreachable("NYI"); else if (isa(FD)) - llvm_unreachable("NYI"); + buildConstructorBody(Args); else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice && FD->hasAttr()) llvm_unreachable("NYI"); From de807f83ce6fcd1a1a1d2208cad56d36b7690d25 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 18:49:51 -0400 Subject: [PATCH 0379/2301] [CIR] Restructure the asserts in NeedsVTTParameter This should have early returned from the beginning. Enabling ctors pointed out this mistake. --- clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index 8bcb62e88928..0ae8d81ff84b 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -72,17 +72,22 @@ CIRGenCXXABI::AddedStructorArgs CIRGenItaniumCXXABI::getImplicitConstructorArgs( return {}; } +/// Return whether the given global decl needs a VTT parameter, which it does if +/// it's a base constructor or destructor with virtual bases. bool CIRGenItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { auto *MD = cast(GD.getDecl()); - assert(!MD->getParent()->getNumVBases() && "virtual bases NYI"); + // We don't have any virtual bases, just return early. + if (!MD->getParent()->getNumVBases()) + return false; - assert(isa(MD) && GD.getCtorType() == Ctor_Base && - "No other reason we should hit this function yet."); + // Check if we have a base constructor. if (isa(MD) && GD.getCtorType() == Ctor_Base) return true; - assert(!isa(MD) && "Destructors NYI"); + // Check if we have a base destructor. + if (isa(MD) && GD.getDtorType() == Dtor_Base) + llvm_unreachable("NYI"); return false; } From c10ff61e289a0ed62b29cfdbdf556c1f8a5ae6db Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 18:52:44 -0400 Subject: [PATCH 0380/2301] [CIR] Call buildCXXConstructExpr from CGExprAgg's ctorexpr visitor --- clang/lib/CIR/CIRGenExprAgg.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index 92f769ef3e84..a355fde3b175 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -49,9 +49,7 @@ class AggExprEmitter : public StmtVisitor { void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { AggValueSlot Slot = EnsureSlot(E->getType()); - llvm_unreachable("NYI"); - (void)CGF; - (void)Slot; + CGF.buildCXXConstructExpr(E, Slot); } void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { From f4fda769daf8e6fb6450e414bfbb005c067778ef Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 18:56:26 -0400 Subject: [PATCH 0381/2301] [CIR] Fill out buildDeclRefLValue's asserts There are numerous extra things we need to assert on here, so just add a bunch of them. --- clang/lib/CIR/CIRGenExpr.cpp | 72 +++++++++++++++++++++++++++++++++--- 1 file changed, 66 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index fa6ab10fbf06..b1a372cc1ab8 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -125,6 +125,7 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, assert(currSrcLoc && "must pass in source location"); builder.create(*currSrcLoc, Value, Addr.getPointer()); } + void CIRGenFunction::buldStoreThroughLValue(RValue Src, LValue Dst, const Decl *InitDecl) { assert(Dst.isSimple() && "only implemented simple"); @@ -135,6 +136,7 @@ void CIRGenFunction::buldStoreThroughLValue(RValue Src, LValue Dst, LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { const NamedDecl *ND = E->getDecl(); + QualType T = E->getType(); assert(E->isNonOdrUse() != NOUR_Unevaluated && "should not emit an unevaluated operand"); @@ -144,17 +146,75 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { assert(VD->getStorageClass() != SC_Register && "not implemented"); assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); assert(!E->refersToEnclosingVariableOrCapture() && "not implemented"); - assert(!(VD->hasLinkage() || VD->isStaticDataMember()) && - "not implemented"); - assert(!VD->isEscapingByref() && "not implemented"); - assert(!VD->getType()->isReferenceType() && "not implemented"); + } + + // FIXME(CIR): We should be able to assert this for FunctionDecls as well! + // FIXME(CIR): We should be able to assert this for all DeclRefExprs, not just + // those with a valid source location. + assert((ND->isUsed(false) || !isa(ND) || E->isNonOdrUse() || + !E->getLocation().isValid()) && + "Should not use decl without marking it used!"); + + if (ND->hasAttr()) { + llvm_unreachable("NYI"); + } + + if (const auto *VD = dyn_cast(ND)) { + // Check if this is a global variable + if (VD->hasLinkage() || VD->isStaticDataMember()) + llvm_unreachable("not implemented"); + + Address addr = Address::invalid(); + + // The variable should generally be present in the local decl map. + auto iter = LocalDeclMap.find(VD); + if (iter != LocalDeclMap.end()) { + addr = iter->second; + } + // Otherwise, it might be static local we haven't emitted yet for some + // reason; most likely, because it's in an outer function. + else if (VD->isStaticLocal()) { + llvm_unreachable("NYI"); + } else { + llvm_unreachable("DeclRefExpr for decl not entered in LocalDeclMap?"); + } + + // Check for OpenMP threadprivate variables. + if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && + VD->hasAttr()) { + llvm_unreachable("NYI"); + } + + // Drill into block byref variables. + bool isBlockByref = VD->isEscapingByref(); + if (isBlockByref) { + llvm_unreachable("NYI"); + } + + // Drill into reference types. + assert(!VD->getType()->isReferenceType() && "NYI"); + LValue LV = makeAddrLValue(addr, T, AlignmentSource::Decl); + assert(symbolTable.count(VD) && "should be already mapped"); + bool isLocalStorage = VD->hasLocalStorage(); + + bool NonGCable = + isLocalStorage && !VD->getType()->isReferenceType() && !isBlockByref; + + if (NonGCable) { + // TODO: nongcable + } + + bool isImpreciseLifetime = + (isLocalStorage && !VD->hasAttr()); + if (isImpreciseLifetime) + ; // TODO: LV.setARCPreciseLifetime + // TODO: setObjCGCLValueClass(getContext(), E, LV); + mlir::Value V = symbolTable.lookup(VD); assert(V && "Name lookup must succeed"); - LValue LV = LValue::makeAddr(Address(V, CharUnits::fromQuantity(4)), - VD->getType(), AlignmentSource::Decl); return LV; } From b1d7aeeb308526accbe7f6ebe80f0503fb2b1941 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:04:32 -0400 Subject: [PATCH 0382/2301] [CIR][NFC] Delete blank line at top of CIRGenCXX.cpp --- clang/lib/CIR/CIRGenCXX.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenCXX.cpp b/clang/lib/CIR/CIRGenCXX.cpp index 83d3b754847f..19cfdbd936f1 100644 --- a/clang/lib/CIR/CIRGenCXX.cpp +++ b/clang/lib/CIR/CIRGenCXX.cpp @@ -1,4 +1,3 @@ - //===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. From 0ad931c8c174f373095431360b2f6684bfb29389 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:04:51 -0400 Subject: [PATCH 0383/2301] [CIR] Add buildCXXConstructors to CIRGenCXXABI This is called by buildTopLevelDecl upon finding a cxx{c,d}tor. --- clang/lib/CIR/CIRGenCXXABI.h | 3 +++ clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 19 +++++++++++++++++++ clang/lib/CIR/CIRGenModule.cpp | 3 +++ 3 files changed, 25 insertions(+) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index 059d4ccd7ea5..7d911329c74c 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -91,6 +91,9 @@ class CIRGenCXXABI { /// Gets the mangle context. clang::MangleContext &getMangleContext() { return *MangleCtx; } + /// Emit constructor variants required by this ABI. + virtual void buildCXXConstructors(const clang::CXXConstructorDecl *D) = 0; + /// Specify how one should pass an argument of a record type. enum class RecordArgABI { /// Pass it using the normal C aggregate rules for the ABI, potentially diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index 0ae8d81ff84b..68b7bf7cf7f8 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -58,6 +58,9 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { } bool classifyReturnType(CIRGenFunctionInfo &FI) const override; + + void buildCXXConstructors(const clang::CXXConstructorDecl *D) override; + bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { return true; } @@ -107,3 +110,19 @@ bool CIRGenItaniumCXXABI::classifyReturnType(CIRGenFunctionInfo &FI) const { assert(!RD && "RecordDecl return types NYI"); return false; } + +void CIRGenItaniumCXXABI::buildCXXConstructors(const CXXConstructorDecl *D) { + // Just make sure we're in sync with TargetCXXABI. + assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); + + // The constructor used for constructing this as a base class; + // ignores virtual bases. + CGM.buildGlobal(GlobalDecl(D, Ctor_Base)); + + // The constructor used for constructing this as a complete class; + // constructs the virtual bases, then calls the base constructor. + if (!D->getParent()->isAbstract()) { + // We don't need to emit the complete ctro if the class is abstract. + CGM.buildGlobal(GlobalDecl(D, Ctor_Complete)); + } +} diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index bfd803bd0722..f2cd232dde46 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -363,6 +363,9 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { buildTopLevelDecl(childDecl); break; } + case Decl::CXXConstructor: + getCXXABI().buildCXXConstructors(cast(decl)); + break; case Decl::Record: // There's nothing to do here, we emit everything pertaining to `Record`s // lazily. From ac1f1c68cf897ea9fb27ff1ef1c0b100377adc7a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:07:06 -0400 Subject: [PATCH 0384/2301] [CIR] Add a ModuleNameHash to CGM to assert against for the UniqueInternalLinkageNames argument --- clang/lib/CIR/CIRGenModule.cpp | 13 +++++++++++-- clang/lib/CIR/CIRGenModule.h | 5 +++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index f2cd232dde46..6f94d672ac45 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -435,6 +435,15 @@ mlir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, return F; } +// Returns true if GD is a function decl with internal linkage and needs a +// unique suffix after the mangled name. +static bool isUniqueInternalLinkageDecl(GlobalDecl GD, CIRGenModule &CGM) { + assert(CGM.getModuleNameHash().empty() && + "Unique internal linkage names NYI"); + + return false; +} + static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, const NamedDecl *ND, bool OmitMultiVersionMangling = false) { @@ -445,7 +454,7 @@ static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, llvm::raw_svector_ostream Out(Buffer); MangleContext &MC = CGM.getCXXABI().getMangleContext(); - // TODO: support the module name hash + assert(CGM.getModuleNameHash().empty() && "NYI"); auto ShouldMangle = MC.shouldMangleDeclName(ND); // Explicit ignore mangling for now @@ -472,7 +481,7 @@ static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, // mangling is done to make sure that the final name can be properly // demangled. For example, for C functions without prototypes, name mangling // is not done and the unique suffix should not be appended then. - // TODO: assert(!isUniqueInternalLinkageDecl(GD, CGM) && "NYI"); + assert(!isUniqueInternalLinkageDecl(GD, CGM) && "NYI"); if (const auto *FD = dyn_cast(ND)) { assert(!FD->isMultiVersion() && "NYI"); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 5c158063d8d2..e01e805e03a4 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -57,6 +57,8 @@ class CIRGenModule { ~CIRGenModule(); + const std::string &getModuleNameHash() const { return ModuleNameHash; } + private: mutable std::unique_ptr TheTargetCIRGenInfo; @@ -81,6 +83,9 @@ class CIRGenModule { std::unique_ptr ABI; + /// Used for `UniqueInternalLinkageNames` option + std::string ModuleNameHash = ""; + /// Per-module type mapping from clang AST to CIR. CIRGenTypes genTypes; From 8d898ff14580603eb94e0467d422d8c2eba96069 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:08:49 -0400 Subject: [PATCH 0385/2301] [CIR] Opt into mangling and fix all tests This becomes relevant shortly as we attempt to mangle complete and base ctors which will collide without turning this on. --- clang/lib/CIR/CIRGenModule.cpp | 3 +-- clang/test/CIR/CodeGen/array.cpp | 6 +++--- clang/test/CIR/CodeGen/basic.cpp | 14 +++++++------- clang/test/CIR/CodeGen/goto.cpp | 6 +++--- clang/test/CIR/CodeGen/loop-scope.cpp | 4 ++-- clang/test/CIR/CodeGen/loop.cpp | 16 ++++++++-------- clang/test/CIR/CodeGen/sourcelocation.cpp | 4 ++-- clang/test/CIR/CodeGen/struct.cpp | 2 +- clang/test/CIR/CodeGen/switch.cpp | 14 +++++++------- clang/test/CIR/CodeGen/types.c | 20 ++++++++++---------- 10 files changed, 44 insertions(+), 45 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 6f94d672ac45..3b3a2ad7fdd1 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -457,8 +457,7 @@ static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, assert(CGM.getModuleNameHash().empty() && "NYI"); auto ShouldMangle = MC.shouldMangleDeclName(ND); - // Explicit ignore mangling for now - if (ShouldMangle && false) { + if (ShouldMangle) { MC.mangleName(GD.getWithDecl(ND), Out); } else { auto *II = ND->getIdentifier(); diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 369a5f6fc189..e2fc1ae88056 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -5,7 +5,7 @@ void a0() { int a[10]; } -// CHECK: func @a0() { +// CHECK: func @_Z2a0v() { // CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} void a1() { @@ -13,7 +13,7 @@ void a1() { a[0] = 1; } -// CHECK: func @a1() { +// CHECK: func @_Z2a1v() { // CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} // CHECK-NEXT: %1 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr @@ -26,7 +26,7 @@ int *a2() { return &a[0]; } -// CHECK: func @a2() -> !cir.ptr { +// CHECK: func @_Z2a2v() -> !cir.ptr { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 4dc39fe1312e..f85e88140ce6 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -7,7 +7,7 @@ int *p0() { return p; } -// CHECK: func @p0() -> !cir.ptr { +// CHECK: func @_Z2p0v() -> !cir.ptr { // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] // CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > @@ -18,7 +18,7 @@ int *p1() { return p; } -// CHECK: func @p1() -> !cir.ptr { +// CHECK: func @_Z2p1v() -> !cir.ptr { // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", uninitialized] // CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > @@ -34,7 +34,7 @@ int *p2() { return p; } -// CHECK: func @p2() -> !cir.ptr { +// CHECK: func @_Z2p2v() -> !cir.ptr { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] {alignment = 8 : i64} // CHECK-NEXT: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr @@ -58,13 +58,13 @@ int *p2() { void b0() { bool x = true, y = false; } -// CHECK: func @b0() { +// CHECK: func @_Z2b0v() { // CHECK: %2 = cir.cst(true) : !cir.bool // CHECK: %3 = cir.cst(false) : !cir.bool void b1(int a) { bool b = a; } -// CHECK: func @b1(%arg0: i32 loc({{.*}})) { +// CHECK: func @_Z2b1i(%arg0: i32 loc({{.*}})) { // CHECK: %2 = cir.load %0 : cir.ptr , i32 // CHECK: %3 = cir.cast(int_to_bool, %2 : i32), !cir.bool // CHECK: cir.store %3, %1 : !cir.bool, cir.ptr @@ -78,7 +78,7 @@ void if0(int a) { } } -// CHECK: func @if0(%arg0: i32 loc({{.*}})) +// CHECK: func @_Z3if0i(%arg0: i32 loc({{.*}})) // CHECK: cir.scope { // CHECK: %3 = cir.load %0 : cir.ptr , i32 // CHECK: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool @@ -106,7 +106,7 @@ void if1(int a, bool b, bool c) { } } -// CHECK: func @if1(%arg0: i32 loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) +// CHECK: func @_Z3if1ibb(%arg0: i32 loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) // CHECK: cir.scope { // CHECK: %5 = cir.load %0 : cir.ptr , i32 // CHECK: %6 = cir.cast(int_to_bool, %5 : i32), !cir.bool diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 2ac1c8718b7f..1252e02ff2b0 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -9,7 +9,7 @@ void g0(int a) { b = b + 2; } -// CHECK: func @g0 +// CHECK: func @_Z2g0i // CHECK-NEXT %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} // CHECK-NEXT %1 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} // CHECK-NEXT cir.store %arg0, %0 : i32, cir.ptr @@ -37,8 +37,8 @@ void g1(int a) { } // Make sure alloca for "y" shows up in the entry block -// CHECK: func @g1(%arg0: i32 +// CHECK: func @_Z2g1i(%arg0: i32 // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["y", cinit] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr \ No newline at end of file +// CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index 13a0292bb086..6e4346f0302a 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -9,7 +9,7 @@ void l0() { } } -// CPPSCOPE: func @l0() { +// CPPSCOPE: func @_Z2l0v() { // CPPSCOPE-NEXT: cir.scope { // CPPSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} // CPPSCOPE-NEXT: %1 = cir.alloca i32, cir.ptr , ["j", cinit] {alignment = 4 : i64} @@ -26,4 +26,4 @@ void l0() { // CSCOPE: }) { // CSCOPE-NEXT: cir.scope { -// CSCOPE-NEXT: %2 = cir.alloca i32, cir.ptr , ["j", cinit] {alignment = 4 : i64} \ No newline at end of file +// CSCOPE-NEXT: %2 = cir.alloca i32, cir.ptr , ["j", cinit] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 10463aab8268..16b29c9b8723 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -6,7 +6,7 @@ void l0() { } } -// CHECK: func @l0 +// CHECK: func @_Z2l0v // CHECK: cir.loop for(cond : { // CHECK-NEXT: %0 = cir.cst(true) : !cir.bool // CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 @@ -27,7 +27,7 @@ void l1() { } } -// CHECK: func @l1 +// CHECK: func @_Z2l1v // CHECK: cir.loop for(cond : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 @@ -64,7 +64,7 @@ void l2(bool cond) { } } -// CHECK: func @l2 +// CHECK: func @_Z2l2b // CHECK: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool @@ -134,7 +134,7 @@ void l3(bool cond) { } while (1); } -// CHECK: func @l3 +// CHECK: func @_Z2l3b // CHECK: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool @@ -201,7 +201,7 @@ void l4() { } } -// CHECK: func @l4 +// CHECK: func @_Z2l4v // CHECK: cir.loop while(cond : { // CHECK-NEXT: %4 = cir.cst(true) : !cir.bool // CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 @@ -230,7 +230,7 @@ void l5() { } while (0); } -// CHECK: func @l5() { +// CHECK: func @_Z2l5v() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %0 = cir.cst(0 : i32) : i32 @@ -255,7 +255,7 @@ void l6() { } } -// CHECK: func @l6() { +// CHECK: func @_Z2l6v() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %0 = cir.cst(true) : !cir.bool @@ -271,4 +271,4 @@ void l6() { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 6a30611ae324..e6920c72b5ea 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -14,7 +14,7 @@ int s0(int a, int b) { // CHECK: #loc2 = loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]) // CHECK: #loc3 = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) // CHECK: module { -// CHECK: func @s0(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { +// CHECK: func @_Z2s0ii(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { // CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} loc(#loc2) // CHECK: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] {alignment = 4 : i64} loc(#loc3) // CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} loc(#loc4) @@ -61,4 +61,4 @@ int s0(int a, int b) { // CHECK: #loc17 = loc("{{.*}}sourcelocation.cpp":9:9) // CHECK: #loc18 = loc(fused["{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) // CHECK: #loc19 = loc("{{.*}}sourcelocation.cpp":10:10) -// CHECK: #loc20 = loc(fused["{{.*}}sourcelocation.cpp":10:3, "{{.*}}sourcelocation.cpp":10:10]) \ No newline at end of file +// CHECK: #loc20 = loc(fused["{{.*}}sourcelocation.cpp":10:3, "{{.*}}sourcelocation.cpp":10:10]) diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 5ab67876ab8f..85635923758b 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -20,7 +20,7 @@ void baz() { // CHECK: !_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> // CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !_22struct2EBar22> // CHECK-NEXT: module { -// CHECK-NEXT: func @baz() { +// CHECK-NEXT: func @_Z3bazv() { // CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 497f39034647..ba32de695205 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -16,7 +16,7 @@ void sw1(int a) { } } -// CHECK: func @sw1 +// CHECK: func @_Z3sw1i // CHECK: cir.switch (%3 : i32) [ // CHECK-NEXT: case (equal, 0 : i32) { // CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 @@ -52,7 +52,7 @@ void sw2(int a) { } } -// CHECK: func @sw2 +// CHECK: func @_Z3sw2i // CHECK: cir.scope { // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["yolo", cinit] // CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["fomo", cinit] @@ -68,7 +68,7 @@ void sw3(int a) { } } -// CHECK: func @sw3 +// CHECK: func @_Z3sw3i // CHECK: cir.scope { // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.switch (%1 : i32) [ @@ -88,7 +88,7 @@ int sw4(int a) { return 0; } -// CHECK: func @sw4 +// CHECK: func @_Z3sw4i // CHECK: cir.switch (%4 : i32) [ // CHECK-NEXT: case (equal, 42 : i32) { // CHECK-NEXT: cir.scope { @@ -113,7 +113,7 @@ void sw5(int a) { } } -// CHECK: func @sw5 +// CHECK: func @_Z3sw5i // CHECK: cir.switch (%1 : i32) [ // CHECK-NEXT: case (equal, 1 : i32) { // CHECK-NEXT: cir.yield fallthrough @@ -131,7 +131,7 @@ void sw6(int a) { } } -// CHECK: func @sw6 +// CHECK: func @_Z3sw6i // CHECK: cir.switch (%1 : i32) [ // CHECK-NEXT: case (anyof, [0, 1, 2] : i32) { // CHECK-NEXT: cir.yield break @@ -153,7 +153,7 @@ void sw7(int a) { } } -// CHECK: func @sw7 +// CHECK: func @_Z3sw7i // CHECK: case (anyof, [0, 1, 2] : i32) { // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index 78c9f3ce8444..e536c9b423f9 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -32,13 +32,13 @@ bool t9(bool b) { return b; } // CHECK: func @t7(%arg0: f64 loc({{.*}})) -> f64 { // CHECK: func @t8() { -// CHECK-CPP: func @t0(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK-CPP: func @t1(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK-CPP: func @t2(%arg0: i8 loc({{.*}})) -> i8 { -// CHECK-CPP: func @t3(%arg0: i8 loc({{.*}})) -> i8 { -// CHECK-CPP: func @t4(%arg0: i16 loc({{.*}})) -> i16 { -// CHECK-CPP: func @t5(%arg0: i16 loc({{.*}})) -> i16 { -// CHECK-CPP: func @t6(%arg0: f32 loc({{.*}})) -> f32 { -// CHECK-CPP: func @t7(%arg0: f64 loc({{.*}})) -> f64 { -// CHECK-CPP: func @t8() { -// CHECK-CPP: func @t9(%arg0: !cir.bool loc({{.*}})) -> !cir.bool { +// CHECK-CPP: func @_Z2t0i(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK-CPP: func @_Z2t1j(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK-CPP: func @_Z2t2c(%arg0: i8 loc({{.*}})) -> i8 { +// CHECK-CPP: func @_Z2t3h(%arg0: i8 loc({{.*}})) -> i8 { +// CHECK-CPP: func @_Z2t4s(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK-CPP: func @_Z2t5t(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK-CPP: func @_Z2t6f(%arg0: f32 loc({{.*}})) -> f32 { +// CHECK-CPP: func @_Z2t7d(%arg0: f64 loc({{.*}})) -> f64 { +// CHECK-CPP: func @_Z2t8v() { +// CHECK-CPP: func @_Z2t9b(%arg0: !cir.bool loc({{.*}})) -> !cir.bool { From 86a618c5d659cfbd485d5341c3ce1422a596fe13 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:23:02 -0400 Subject: [PATCH 0386/2301] [CIR] Assert we have constructor variants since we're doing Itanium first And also guard against the case we're looking at MSVC. --- clang/lib/CIR/CIRGenModule.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 3b3a2ad7fdd1..d7280daa9f76 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -492,7 +492,15 @@ static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, StringRef CIRGenModule::getMangledName(GlobalDecl GD) { auto CanonicalGD = GD.getCanonicalDecl(); - assert(!dyn_cast(CanonicalGD.getDecl()) && "NYI"); + + // Some ABIs don't have constructor variants. Make sure that base and complete + // constructors get mangled the same. + if (const auto *CD = dyn_cast(CanonicalGD.getDecl())) { + if (!getTarget().getCXXABI().hasConstructorVariants()) { + assert(false && "NYI"); + } + } + assert(!langOpts.CUDAIsDevice && "NYI"); // Keep the first result in the case of a mangling collision. From 1a4245b465db237ae02a0ee2667b21a34fb40202 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:25:00 -0400 Subject: [PATCH 0387/2301] [CIR] Implement the actual handling of building deferred decls This doesn't have anything testing it's functionality yet, but soon we'll have the full ctor pipeline in with ctor.cpp as it's test. --- clang/lib/CIR/CIRGenModule.cpp | 43 ++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index d7280daa9f76..5e938f10a070 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -732,8 +732,47 @@ void CIRGenModule::buildDeferred() { CurDeclsToEmit.swap(DeferredDeclsToEmit); for (auto &D : CurDeclsToEmit) { - (void)D; - llvm_unreachable("NYI"); + // We should call GetAddrOfGlobal with IsForDefinition set to true in order + // to get a Value with exactly the type we need, not something that might + // have been created for another decl with the same mangled name but + // different type. + auto *Op = GetAddrOfGlobal(D, ForDefinition); + + // In case of different address spaces, we may still get a cast, even with + // IsForDefinition equal to true. Query mangled names table to get + // GlobalValue. + if (!Op) { + Op = GetGlobalValue(getMangledName(D)); + } + + // Make sure GetGlobalValue returned non-null. + assert(Op); + + // Check to see if we've already emitted this. This is necessary for a + // couple of reasons: first, decls can end up in deferred-decls queue + // multiple times, and second, decls can end up with definitions in unusual + // ways (e.g. by an extern inline function acquiring a strong function + // redefinition). Just ignore those cases. + // TODO: Not sure what to map this to for MLIR + if (auto Fn = cast(Op)) + if (!Fn.isDeclaration()) + continue; + + // If this is OpenMP, check if it is legal to emit this global normally. + if (getLangOpts().OpenMP) { + llvm_unreachable("NYI"); + } + + // Otherwise, emit the definition and move on to the next one. + buildGlobalDefinition(D, Op); + + // If we found out that we need to emit more decls, do that recursively. + // This has the advantage that the decls are emitted in a DFS and related + // ones are close together, which is convenient for testing. + if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) { + buildDeferred(); + assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty()); + } } } From b2f00310d8348c82fe716300661e40f0d211eb30 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:27:08 -0400 Subject: [PATCH 0388/2301] [CIR] Handle {c,d}tors in buildGlobalDefinition by dispatching to buildCXXStructor This is currently stubbed out and will be implemented in the coming patches. --- clang/lib/CIR/CIRGenCXXABI.h | 4 ++++ clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 6 ++++++ clang/lib/CIR/CIRGenModule.cpp | 14 +++++++++++++- 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index 7d911329c74c..254c787a2c2e 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -130,6 +130,10 @@ class CIRGenCXXABI { } virtual ~CIRGenCXXABI(); + + /// Emit a single constructor/destructor with the gien type from a C++ + /// constructor Decl. + virtual void buildCXXStructor(clang::GlobalDecl GD) = 0; }; /// Creates and Itanium-family ABI diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index 68b7bf7cf7f8..0fd64b9c13a1 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -61,6 +61,8 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { void buildCXXConstructors(const clang::CXXConstructorDecl *D) override; + void buildCXXStructor(clang::GlobalDecl GD) override; + bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { return true; } @@ -111,6 +113,10 @@ bool CIRGenItaniumCXXABI::classifyReturnType(CIRGenFunctionInfo &FI) const { return false; } +void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { + llvm_unreachable("NYI"); +} + void CIRGenItaniumCXXABI::buildCXXConstructors(const CXXConstructorDecl *D) { // Just make sure we're in sync with TargetCXXABI. assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 5e938f10a070..41ac802950e9 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -322,7 +322,19 @@ void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { return; if (const auto *Method = dyn_cast(D)) { - llvm_unreachable("NYI"); + // Make sure to emit the definition(s) before we emit the thunks. This is + // necessary for the generation of certain thunks. + if (isa(Method) || isa(Method)) + ABI->buildCXXStructor(GD); + else if (FD->isMultiVersion()) + llvm_unreachable("NYI"); + else + buildGlobalFunctionDefinition(GD, Op); + + if (Method->isVirtual()) + llvm_unreachable("NYI"); + + return; } if (FD->isMultiVersion()) From c68ce5447c7d01713c46e3473aa59f755b0b05be Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:28:50 -0400 Subject: [PATCH 0389/2301] [CIR] Implement buildCXXStructor This basically just calls some asserts and dispatches right back to the CGM which called it. --- clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 65 ++++++++++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index 0fd64b9c13a1..dc74a09ca86f 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -113,8 +113,71 @@ bool CIRGenItaniumCXXABI::classifyReturnType(CIRGenFunctionInfo &FI) const { return false; } +// Find out how to cirgen the complete destructor and constructor +namespace { +enum class StructorCIRGen { Emit, RAUW, Alias, COMDAT }; +} + +static StructorCIRGen getCIRGenToUse(CIRGenModule &CGM, + const CXXMethodDecl *MD) { + if (!CGM.getCodeGenOpts().CXXCtorDtorAliases) + return StructorCIRGen::Emit; + + llvm_unreachable("Nothing else implemented yet"); +} + void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { - llvm_unreachable("NYI"); + auto *MD = cast(GD.getDecl()); + auto *CD = dyn_cast(MD); + const CXXDestructorDecl *DD = CD ? nullptr : cast(MD); + + StructorCIRGen CIRGenType = getCIRGenToUse(CGM, MD); + + if (CD ? GD.getCtorType() == Ctor_Complete + : GD.getDtorType() == Dtor_Complete) { + GlobalDecl BaseDecl; + if (CD) + BaseDecl = GD.getWithCtorType(Ctor_Base); + else + BaseDecl = GD.getWithDtorType(Dtor_Base); + + if (CIRGenType == StructorCIRGen::Alias || + CIRGenType == StructorCIRGen::COMDAT) { + llvm_unreachable("NYI"); + } + + if (CIRGenType == StructorCIRGen::RAUW) { + llvm_unreachable("NYI"); + } + } + + // The base destructor is equivalent to the base destructor of its base class + // if there is exactly one non-virtual base class with a non-trivial + // destructor, there are no fields with a non-trivial destructor, and the body + // of the destructor is trivial. + if (DD && GD.getDtorType() == Dtor_Base && + CIRGenType != StructorCIRGen::COMDAT) + llvm_unreachable("NYI"); + + // FIXME: The deleting destructor is equivalent to the selected operator + // delete if: + // * either the delete is a destroying operator delete or the destructor + // would be trivial if it weren't virtual. + // * the conversion from the 'this' parameter to the first parameter of the + // destructor is equivalent to a bitcast, + // * the destructor does not have an implicit "this" return, and + // * the operator delete has the same calling convention and CIR function + // type as the destructor. + // In such cases we should try to emit the deleting dtor as an alias to the + // selected 'operator delete'. + + mlir::FuncOp Fn = CGM.codegenCXXStructor(GD); + + if (CIRGenType == StructorCIRGen::COMDAT) { + llvm_unreachable("NYI"); + } else { + CGM.maybeSetTrivialComdat(*MD, Fn); + } } void CIRGenItaniumCXXABI::buildCXXConstructors(const CXXConstructorDecl *D) { From acaf20c2acdd309d8b4093f7b58d6646175e3d21 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:34:09 -0400 Subject: [PATCH 0390/2301] [CIR][NFC] Add header to CIRGenCXXABI.cpp --- clang/lib/CIR/CIRGenCXXABI.cpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenCXXABI.cpp b/clang/lib/CIR/CIRGenCXXABI.cpp index b4c5de488da2..da2415c50a16 100644 --- a/clang/lib/CIR/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CIRGenCXXABI.cpp @@ -1,4 +1,15 @@ -// TODO: ADD HEADER +//===----- CirGenCXXABI.cpp - Interface to C++ ABIs -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides an abstract class for C++ code generation. Concrete subclasses +// of this implement code generation for specific C++ ABIs. +// +//===----------------------------------------------------------------------===// #include "CIRGenCXXABI.h" From cc766cf299a16b2d116c7e05510979a1ed662031 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:36:09 -0400 Subject: [PATCH 0391/2301] [CIR] Build the this param during buildFunctionArgList This is basically a big dance to do nothing more than add a decl to the paramlist and store it on the CGF. As per usual this adds a bunch of assertions to guard against uncovered situations. --- clang/lib/CIR/CIRGenCXXABI.cpp | 27 ++++++++++++++++++++++++++ clang/lib/CIR/CIRGenCXXABI.h | 9 +++++++++ clang/lib/CIR/CIRGenFunction.cpp | 6 +++++- clang/lib/CIR/CIRGenFunction.h | 10 +++++++++- clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 28 +++++++++++++++++++++++++++ 5 files changed, 78 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenCXXABI.cpp b/clang/lib/CIR/CIRGenCXXABI.cpp index da2415c50a16..5ab994227951 100644 --- a/clang/lib/CIR/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CIRGenCXXABI.cpp @@ -13,8 +13,10 @@ #include "CIRGenCXXABI.h" +#include "clang/AST/Decl.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Mangle.h" +#include "clang/AST/RecordLayout.h" using namespace cir; using namespace clang; @@ -38,3 +40,28 @@ CIRGenCXXABI::AddedStructorArgCounts CIRGenCXXABI::addImplicitConstructorArgs( } bool CIRGenCXXABI::NeedsVTTParameter(GlobalDecl GD) { return false; } + +void CIRGenCXXABI::buildThisParam(CIRGenFunction &CGF, + FunctionArgList ¶ms) { + const auto *MD = cast(CGF.CurGD.getDecl()); + + // FIXME: I'm not entirely sure I like using a fake decl just for code + // generation. Maybe we can come up with a better way? + auto *ThisDecl = + ImplicitParamDecl::Create(CGM.getASTContext(), nullptr, MD->getLocation(), + &CGM.getASTContext().Idents.get("this"), + MD->getThisType(), ImplicitParamKind::CXXThis); + params.push_back(ThisDecl); + CGF.CXXABIThisDecl = ThisDecl; + + // Compute the presumed alignment of 'this', which basically comes down to + // whether we know it's a complete object or not. + auto &Layout = CGF.getContext().getASTRecordLayout(MD->getParent()); + if (MD->getParent()->getNumVBases() == 0 || + MD->getParent()->isEffectivelyFinal() || + isThisCompleteObject(CGF.CurGD)) { + CGF.CXXABIThisAlignment = Layout.getAlignment(); + } else { + llvm_unreachable("NYI"); + } +} diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index 254c787a2c2e..c3d7d4753f57 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -91,6 +91,15 @@ class CIRGenCXXABI { /// Gets the mangle context. clang::MangleContext &getMangleContext() { return *MangleCtx; } + /// Build a parameter variable suitable for 'this'. + void buildThisParam(CIRGenFunction &CGF, FunctionArgList &Params); + + /// Determine whether there's something special about the rules of the ABI + /// tell us that 'this' is a complete object within the given function. + /// Obvious common logic like being defined on a final class will have been + /// taken care of by the caller. + virtual bool isThisCompleteObject(clang::GlobalDecl GD) const = 0; + /// Emit constructor variants required by this ABI. virtual void buildCXXConstructors(const clang::CXXConstructorDecl *D) = 0; diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index bedc2e724e5f..ae9aba6eb1f8 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -894,7 +894,11 @@ clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl GD, const auto *MD = dyn_cast(FD); if (MD && MD->isInstance()) { - llvm_unreachable("NYI"); + if (CGM.getCXXABI().HasThisReturn(GD)) + llvm_unreachable("NYI"); + else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) + llvm_unreachable("NYI"); + CGM.getCXXABI().buildThisParam(*this, Args); } // The base version of an inheriting constructor whose constructed base is a diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 1382fd7e5f84..4f7a3ed8ed72 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -292,8 +292,12 @@ class CIRGenFunction { std::optional FnRetCIRTy; std::optional FnRetAlloca; - // Holds the Decl for the current outermost non-closure context + /// CXXThisDecl - When generating code for a C++ member function, this will + /// hold the implicit 'this' declaration. + clang::ImplicitParamDecl *CXXABIThisDecl = nullptr; + mlir::Operation *CXXABIThisValue = nullptr; mlir::Operation *CXXThisValue = nullptr; + clang::CharUnits CXXABIThisAlignment; clang::CharUnits CXXThisAlignment; /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this @@ -308,6 +312,10 @@ class CIRGenFunction { clang::QualType FnRetTy; mlir::FuncOp CurFn = nullptr; + /// CXXStructorImplicitParamDecl - When generating code for a constructor or + /// destructor, this will hold the implicit argument (e.g. VTT). + clang::ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr; + // The CallExpr within the current statement that the musttail attribute // applies to. nullptr if there is no 'musttail' on the current statement. const clang::CallExpr *MustTailCall = nullptr; diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index dc74a09ca86f..224bfe285c48 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -59,6 +59,34 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { bool classifyReturnType(CIRGenFunctionInfo &FI) const override; + bool isThisCompleteObject(GlobalDecl GD) const override { + // The Itanium ABI has separate complete-object vs. base-object variants of + // both constructors and destructors. + if (isa(GD.getDecl())) { + llvm_unreachable("NYI"); + } + if (isa(GD.getDecl())) { + switch (GD.getCtorType()) { + case Ctor_Complete: + return true; + + case Ctor_Base: + return false; + + case Ctor_CopyingClosure: + case Ctor_DefaultClosure: + llvm_unreachable("closure ctors in Itanium ABI?"); + + case Ctor_Comdat: + llvm_unreachable("emitting ctor comdat as function?"); + } + llvm_unreachable("bad dtor kind"); + } + + // No other kinds. + return false; + } + void buildCXXConstructors(const clang::CXXConstructorDecl *D) override; void buildCXXStructor(clang::GlobalDecl GD) override; From 842324fd16c7de61a1abb53c34c4110c11d160b0 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:40:30 -0400 Subject: [PATCH 0392/2301] [CIR] Sink an assert into a check for inheritance instead of ctor --- clang/lib/CIR/CIRGenFunction.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index ae9aba6eb1f8..89d06bdfe085 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -906,7 +906,8 @@ clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl GD, // call the inherited constructor). bool PassedParams = true; if (const auto *CD = dyn_cast(FD)) - llvm_unreachable("NYI"); + if (auto Inherited = CD->getInheritedConstructor()) + llvm_unreachable("NYI"); if (PassedParams) { for (auto *Param : FD->parameters()) { From 92e8a2ec887eedacc889216333188b265cb0a8c8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:41:54 -0400 Subject: [PATCH 0393/2301] [CIR] Call addImplicitStructorParams from buildFunctionArgList All this does at the moment is assert that we aren't in a situation where we need a VTT. --- clang/lib/CIR/CIRGenCXXABI.h | 11 +++++++++++ clang/lib/CIR/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 15 +++++++++++++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index c3d7d4753f57..166457350ae0 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -122,6 +122,17 @@ class CIRGenCXXABI { virtual RecordArgABI getRecordArgABI(const clang::CXXRecordDecl *RD) const = 0; + /// Insert any ABI-specific implicit parameters into the parameter list for a + /// function. This generally involves extra data for constructors and + /// destructors. + /// + /// ABIs may also choose to override the return type, which has been + /// initialized with the type of 'this' if HasThisReturn(CGF.CurGD) is true or + /// the formal return type of the function otherwise. + virtual void addImplicitStructorParams(CIRGenFunction &CGF, + clang::QualType &ResTy, + FunctionArgList &Params) = 0; + /// Checks if ABI requires to initialize vptrs for given dynamic class. virtual bool doStructorsInitializeVPtrs(const clang::CXXRecordDecl *VTableClass) = 0; diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 89d06bdfe085..e315428f22b8 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -920,7 +920,7 @@ clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl GD, } if (MD && (isa(MD) || isa(MD))) - llvm_unreachable("NYI"); + CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); return ResTy; } diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index 224bfe285c48..df73522af167 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -87,6 +87,9 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { return false; } + void addImplicitStructorParams(CIRGenFunction &CGF, QualType &ResTy, + FunctionArgList &Params) override; + void buildCXXConstructors(const clang::CXXConstructorDecl *D) override; void buildCXXStructor(clang::GlobalDecl GD) override; @@ -208,6 +211,18 @@ void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { } } +void CIRGenItaniumCXXABI::addImplicitStructorParams(CIRGenFunction &CGF, + QualType &ResTY, + FunctionArgList &Params) { + const auto *MD = cast(CGF.CurGD.getDecl()); + assert(isa(MD) || isa(MD)); + + // Check if we need a VTT parameter as well. + if (NeedsVTTParameter(CGF.CurGD)) { + llvm_unreachable("NYI"); + } +} + void CIRGenItaniumCXXABI::buildCXXConstructors(const CXXConstructorDecl *D) { // Just make sure we're in sync with TargetCXXABI. assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); From 6256b6c498d4b3f28558efdb2573c74801d48d6f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:48:10 -0400 Subject: [PATCH 0394/2301] [CIR] Call buildInstanceFunctionProlog from CGF::StartFunction This only loads the `this` ptr from the saved one from the CGF. Everything else is an assertion check against unimplemented codepaths. This also adds some handling of the decls/CIR ops that represent the this ptr in various different ways. --- clang/lib/CIR/CIRGenCXXABI.h | 16 +++++++++++ clang/lib/CIR/CIRGenFunction.cpp | 40 ++++++++++++++++++++++++++- clang/lib/CIR/CIRGenFunction.h | 2 ++ clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 39 ++++++++++++++++++++++++++ 4 files changed, 96 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index 166457350ae0..0ea8501751ef 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -77,10 +77,17 @@ class CIRGenCXXABI { clang::CXXCtorType Type, bool ForVirtualBase, bool Delegating, CallArgList &Args); + clang::ImplicitParamDecl *getThisDecl(CIRGenFunction &CGF) { + return CGF.CXXABIThisDecl; + } + virtual AddedStructorArgs getImplicitConstructorArgs( CIRGenFunction &CGF, const clang::CXXConstructorDecl *D, clang::CXXCtorType Type, bool ForVirtualBase, bool Delegating) = 0; + /// Emit the ABI-specific prolog for the function + virtual void buildInstanceFunctionProlog(CIRGenFunction &CGF) = 0; + /// Return whether the given global decl needs a VTT parameter. virtual bool NeedsVTTParameter(clang::GlobalDecl GD); @@ -91,9 +98,16 @@ class CIRGenCXXABI { /// Gets the mangle context. clang::MangleContext &getMangleContext() { return *MangleCtx; } + clang::ImplicitParamDecl *&getStructorImplicitParamDecl(CIRGenFunction &CGF) { + return CGF.CXXStructorImplicitParamDecl; + } + /// Build a parameter variable suitable for 'this'. void buildThisParam(CIRGenFunction &CGF, FunctionArgList &Params); + /// Loads the incoming C++ this pointer as it was passed by the caller. + mlir::Operation *loadIncomingCXXThis(CIRGenFunction &CGF); + /// Determine whether there's something special about the rules of the ABI /// tell us that 'this' is a complete object within the given function. /// Obvious common logic like being defined on a final class will have been @@ -151,6 +165,8 @@ class CIRGenCXXABI { virtual ~CIRGenCXXABI(); + void setCXXABIThisValue(CIRGenFunction &CGF, mlir::Operation *ThisPtr); + /// Emit a single constructor/destructor with the gien type from a C++ /// constructor Decl. virtual void buildCXXStructor(clang::GlobalDecl GD) = 0; diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index e315428f22b8..137dea0fc668 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -14,6 +14,7 @@ #include "CIRGenCXXABI.h" #include "CIRGenModule.h" +#include "clang/AST/ASTLambda.h" #include "clang/AST/ExprObjC.h" #include "clang/Basic/TargetInfo.h" @@ -469,6 +470,15 @@ mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, return Fn; } +mlir::Operation *CIRGenFunction::createLoad(const VarDecl *VD, + const char *Name) { + auto addr = GetAddrOfLocalVar(VD); + auto ret = builder.create(getLoc(VD->getLocation()), + addr.getElementType(), addr.getPointer()); + + return ret; +} + static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) { auto *CD = llvm::dyn_cast(D); if (!(CD && CD->isCopyOrMoveConstructor()) && @@ -825,7 +835,35 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, } if (D && isa(D) && cast(D)->isInstance()) { - llvm_unreachable("NYI"); + CGM.getCXXABI().buildInstanceFunctionProlog(*this); + + const auto *MD = cast(D); + if (MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call) { + llvm_unreachable("NYI"); + } else { + // Not in a lambda; just use 'this' from the method. + // FIXME: Should we generate a new load for each use of 'this'? The fast + // register allocator would be happier... + CXXThisValue = CXXABIThisValue; + } + + // Check the 'this' pointer once per function, if it's available + if (CXXABIThisValue) { + SanitizerSet SkippedChecks; + SkippedChecks.set(SanitizerKind::ObjectSize, true); + QualType ThisTy = MD->getThisType(); + (void)ThisTy; + + // If this is the call operator of a lambda with no capture-default, it + // may have a staic invoker function, which may call this operator with + // a null 'this' pointer. + if (isLambdaCallOperator(MD) && + MD->getParent()->getLambdaCaptureDefault() == LCD_None) + llvm_unreachable("NYI"); + ; + + // TODO(CIR): buildTypeCheck + } } // If any of the arguments have a variably modified type, make sure to emit diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 4f7a3ed8ed72..a5632a2f7a1c 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -419,6 +419,8 @@ class CIRGenFunction { clang::SourceLocation Loc, bool NewPointerIsChecked); + mlir::Operation *createLoad(const clang::VarDecl *VD, const char *Name); + // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. struct PrototypeWrapper { diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index df73522af167..b85486fbea9c 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -87,6 +87,8 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { return false; } + void buildInstanceFunctionProlog(CIRGenFunction &CGF) override; + void addImplicitStructorParams(CIRGenFunction &CGF, QualType &ResTy, FunctionArgList &Params) override; @@ -223,6 +225,43 @@ void CIRGenItaniumCXXABI::addImplicitStructorParams(CIRGenFunction &CGF, } } +mlir::Operation *CIRGenCXXABI::loadIncomingCXXThis(CIRGenFunction &CGF) { + return CGF.createLoad(getThisDecl(CGF), "this"); +} + +void CIRGenCXXABI::setCXXABIThisValue(CIRGenFunction &CGF, + mlir::Operation *ThisPtr) { + /// Initialize the 'this' slot. + assert(getThisDecl(CGF) && "no 'this' variable for function"); + CGF.CXXABIThisValue = ThisPtr; +} + +void CIRGenItaniumCXXABI::buildInstanceFunctionProlog(CIRGenFunction &CGF) { + // Naked functions have no prolog. + if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr()) + llvm_unreachable("NYI"); + + /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue + /// adjustments are required, because they are all handled by thunks. + setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF)); + + /// Initialize the 'vtt' slot if needed. + if (getStructorImplicitParamDecl(CGF)) { + llvm_unreachable("NYI"); + } + + /// If this is a function that the ABI specifies returns 'this', initialize + /// the return slot to this' at the start of the function. + /// + /// Unlike the setting of return types, this is done within the ABI + /// implementation instead of by clients of CIRGenCXXBI because: + /// 1) getThisValue is currently protected + /// 2) in theory, an ABI could implement 'this' returns some other way; + /// HasThisReturn only specifies a contract, not the implementation + if (HasThisReturn(CGF.CurGD)) + llvm_unreachable("NYI"); +} + void CIRGenItaniumCXXABI::buildCXXConstructors(const CXXConstructorDecl *D) { // Just make sure we're in sync with TargetCXXABI. assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); From c6eb9cfc269874122b65881300bf9dc1a80cb955 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:51:14 -0400 Subject: [PATCH 0395/2301] [CIR] Add test validating the functionality of ctors! --- clang/test/CIR/CodeGen/ctor.cpp | 34 ++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index 5c1031cccc41..56165dc970c9 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -1 +1,33 @@ -// RUN: true +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Struk { + int a; + Struk() {} +}; + +void baz() { + Struk s; +} + +// CHECK: !_22struct2EStruk22 = !cir.struct<"struct.Struk", i32> +// CHECK-NEXT: module { +// CHECK-NEXT: func @_Z3bazv() +// CHECK-NEXT: %0 = cir.alloca !_22struct2EStruk22, cir.ptr , ["s", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: func @_ZN5StrukC1Ev(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: func @_ZN5StrukC2Ev(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: } From 81a9895128182c35c2e27f2885c7252941763a56 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:54:44 -0400 Subject: [PATCH 0396/2301] [CIR][NFC] Add cir annotation to a couple TODOs --- clang/lib/CIR/CIRGenExpr.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index b1a372cc1ab8..2029598f3d05 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -657,8 +657,8 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, mlir::Location loc, const Stmt *thenS, const Stmt *elseS) { - // TODO: scoped ApplyDebugLocation DL(*this, Cond); - // TODO: __builtin_unpredictable and profile counts? + // TODO(CIR): scoped ApplyDebugLocation DL(*this, Cond); + // TODO(CIR): __builtin_unpredictable and profile counts? cond = cond->IgnoreParens(); mlir::Value condV = evaluateExprAsBool(cond); mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); From 5214cae0b32dd170df74b549862e1c79f6b5d901 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 19:55:42 -0400 Subject: [PATCH 0397/2301] [CIR][NFC] Give two unused members some *really dumb* usages This is just to shut the warning up for now as they will be used shortly. --- clang/lib/CIR/CIRGenClass.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp index 7d43c6f046ce..aafa24fb256e 100644 --- a/clang/lib/CIR/CIRGenClass.cpp +++ b/clang/lib/CIR/CIRGenClass.cpp @@ -222,7 +222,9 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { } } - void finish() { buildAggregatedInits(); } + void finish() { + buildAggregatedInits(); + } private: bool MemcpyableCtor; From 70360eee945f31a478e8a2a89031262da843969d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 20:02:36 -0400 Subject: [PATCH 0398/2301] [CIR] Add buildStructorSignature This does nothing at all, just exists to assert against VTTs. --- clang/lib/CIR/CIRGenCXXABI.h | 7 +++++++ clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 23 +++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index 0ea8501751ef..2fe3163b2745 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -71,6 +71,13 @@ class CIRGenCXXABI { } }; + /// Build the signature of the given constructor or destructor vairant by + /// adding any required parameters. For convenience, ArgTys has been + /// initialized with the type of 'this'. + virtual AddedStructorArgCounts + buildStructorSignature(clang::GlobalDecl GD, + llvm::SmallVectorImpl &ArgTys) = 0; + AddedStructorArgCounts addImplicitConstructorArgs(CIRGenFunction &CGF, const clang::CXXConstructorDecl *D, diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index b85486fbea9c..a976ced2433f 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -59,6 +59,10 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { bool classifyReturnType(CIRGenFunctionInfo &FI) const override; + AddedStructorArgCounts + buildStructorSignature(GlobalDecl GD, + llvm::SmallVectorImpl &ArgTys) override; + bool isThisCompleteObject(GlobalDecl GD) const override { // The Itanium ABI has separate complete-object vs. base-object variants of // both constructors and destructors. @@ -146,6 +150,25 @@ bool CIRGenItaniumCXXABI::classifyReturnType(CIRGenFunctionInfo &FI) const { return false; } +CIRGenCXXABI::AddedStructorArgCounts +CIRGenItaniumCXXABI::buildStructorSignature( + GlobalDecl GD, llvm::SmallVectorImpl &ArgTys) { + auto &Context = getContext(); + + // All parameters are already in place except VTT, which goes after 'this'. + // These are clang types, so we don't need to worry about sret yet. + + // Check if we need to add a VTT parameter (which has type void **). + if ((isa(GD.getDecl()) ? GD.getCtorType() == Ctor_Base + : GD.getDtorType() == Dtor_Base) && + cast(GD.getDecl())->getParent()->getNumVBases() != 0) { + llvm_unreachable("NYI"); + (void)Context; + } + + return AddedStructorArgCounts{}; +} + // Find out how to cirgen the complete destructor and constructor namespace { enum class StructorCIRGen { Emit, RAUW, Alias, COMDAT }; From ccecf9e66738ae1968d81a7d70af72375a917bac Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 20:05:46 -0400 Subject: [PATCH 0399/2301] [CIR][NFC] Fix header for CIRGenValue.h --- clang/lib/CIR/CIRGenValue.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CIRGenValue.h b/clang/lib/CIR/CIRGenValue.h index 30391526b3b5..b54edbb50337 100644 --- a/clang/lib/CIR/CIRGenValue.h +++ b/clang/lib/CIR/CIRGenValue.h @@ -1,4 +1,4 @@ -//===-- CIRGenValue.h - CIRGen something TODO this desc* --------*- C++ -*-===// +//===-- CIRGenValue.h - CIRGen wrappers for mlir::Value ---------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,8 +6,8 @@ // //===----------------------------------------------------------------------===// // -// IDK yet -// TODO: +// These classes implement wrappers around mlir::Value in order to fully +// represent the range of values for C L- and R- values. // //===----------------------------------------------------------------------===// From 6909218068023bbd0ae90263230a245d11e63dfd Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 22 Apr 2022 20:08:15 -0400 Subject: [PATCH 0400/2301] [CIR] Implement inheritingCtorHasParams If we're in inheritance situation and the ctor matches a few different cases then we have an inheriting ctro with params. This is unused atm but will be needed for inheritance. --- clang/lib/CIR/CIRGenCall.cpp | 10 ++++++++++ clang/lib/CIR/CIRGenFunction.cpp | 3 ++- clang/lib/CIR/CIRGenTypes.h | 5 +++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index af96078b219c..499bb3e7124d 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -739,6 +739,16 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXConstructorCall( Required); } +bool CIRGenTypes::inheritingCtorHasParams(const InheritedConstructor &Inherited, + CXXCtorType Type) { + + // Parameters are unnecessary if we're constructing a base class subobject and + // the inherited constructor lives in a virtual base. + return Type == Ctor_Complete || + !Inherited.getShadowDecl()->constructsVirtualBase() || + !Target.getCXXABI().hasConstructorVariants(); +} + bool CIRGenModule::MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) { // We can't just disard the return value for a record type with a complex diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 137dea0fc668..eea34560f6ce 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -945,7 +945,8 @@ clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl GD, bool PassedParams = true; if (const auto *CD = dyn_cast(FD)) if (auto Inherited = CD->getInheritedConstructor()) - llvm_unreachable("NYI"); + PassedParams = + getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType()); if (PassedParams) { for (auto *Param : FD->parameters()) { diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 1fa453c1e82e..ec48d61433a3 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -145,6 +145,11 @@ class CIRGenTypes { std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix); + /// Determine if a C++ inheriting constructor should have parameters matching + /// those of its inherited constructor. + bool inheritingCtorHasParams(const clang::InheritedConstructor &Inherited, + clang::CXXCtorType Type); + /// convertTypeForMem - Convert type T into an mlir::Type. This differs from /// convertType in that it is used to convert to the memory representation for /// a type. For example, the scalar representation for _Bool is i1, but the From eec17c6fd771d575c0525666879f0688862f6f14 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 14 Mar 2022 19:32:20 -0400 Subject: [PATCH 0401/2301] [CIR] Add minimal support for Darwin aarch64 triples --- clang/lib/CIR/ABIInfo.h | 1 + clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 4 ++ clang/lib/CIR/CIRGenModule.cpp | 2 + clang/lib/CIR/TargetInfo.cpp | 81 ++++++++++++++++++++++++--- clang/test/CIR/driver.c | 1 + 5 files changed, 80 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/ABIInfo.h b/clang/lib/CIR/ABIInfo.h index 99dbac14e209..5a2e3ff56ca4 100644 --- a/clang/lib/CIR/ABIInfo.h +++ b/clang/lib/CIR/ABIInfo.h @@ -13,6 +13,7 @@ namespace cir { +class ABIArgInfo; class CIRGenCXXABI; class CIRGenFunctionInfo; class CIRGenTypes; diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index a976ced2433f..2d37098bb70d 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -137,6 +137,10 @@ bool CIRGenItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { CIRGenCXXABI *cir::CreateCIRGenItaniumCXXABI(CIRGenModule &CGM) { switch (CGM.getASTContext().getCXXABIKind()) { case TargetCXXABI::GenericItanium: + case TargetCXXABI::GenericAArch64: + case TargetCXXABI::AppleARM64: + // TODO: this isn't quite right, clang uses AppleARM64CXXABI which inherits + // from ARMCXXABI. We'll have to follow suit. return new CIRGenItaniumCXXABI(CGM); default: diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 41ac802950e9..e78d95075ec7 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -79,6 +79,8 @@ using llvm::StringRef; static CIRGenCXXABI *createCXXABI(CIRGenModule &CGM) { switch (CGM.getASTContext().getCXXABIKind()) { case TargetCXXABI::GenericItanium: + case TargetCXXABI::GenericAArch64: + case TargetCXXABI::AppleARM64: return CreateCIRGenItaniumCXXABI(CGM); default: llvm_unreachable("invalid C++ ABI kind"); diff --git a/clang/lib/CIR/TargetInfo.cpp b/clang/lib/CIR/TargetInfo.cpp index d38c3992ebbf..1241a73f07e0 100644 --- a/clang/lib/CIR/TargetInfo.cpp +++ b/clang/lib/CIR/TargetInfo.cpp @@ -10,6 +10,69 @@ using namespace cir; using namespace clang; +static bool testIfIsVoidTy(QualType Ty) { + const auto *BT = Ty->getAs(); + if (!BT) + return false; + + BuiltinType::Kind k = BT->getKind(); + return k == BuiltinType::Void; +} + +//===----------------------------------------------------------------------===// +// AArch64 ABI Implementation +//===----------------------------------------------------------------------===// + +namespace { + +class AArch64ABIInfo : public ABIInfo { +public: + enum ABIKind { AAPCS = 0, DarwinPCS, Win64 }; + +private: + ABIKind Kind; + +public: + AArch64ABIInfo(CIRGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {} + +private: + ABIKind getABIKind() const { return Kind; } + bool isDarwinPCS() const { return Kind == DarwinPCS; } + + ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const; + ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic, + unsigned CallingConvention) const; + + void computeInfo(CIRGenFunctionInfo &FI) const override { + // Top leevl CIR has unlimited arguments and return types. Lowering for ABI + // specific concerns should happen during a lowering phase. Assume + // everything is direct for now. + for (CIRGenFunctionInfo::arg_iterator it = FI.arg_begin(), + ie = FI.arg_end(); + it != ie; ++it) { + if (testIfIsVoidTy(it->type)) + it->info = ABIArgInfo::getIgnore(); + else + it->info = ABIArgInfo::getDirect(CGT.ConvertType(it->type)); + } + auto RetTy = FI.getReturnType(); + if (testIfIsVoidTy(RetTy)) + FI.getReturnInfo() = ABIArgInfo::getIgnore(); + else + FI.getReturnInfo() = ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); + + return; + } +}; + +class AArch64TargetCIRGenInfo : public TargetCIRGenInfo { +public: + AArch64TargetCIRGenInfo(CIRGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) + : TargetCIRGenInfo(std::make_unique(CGT, Kind)) {} +}; + +} // namespace + namespace { /// The AVX ABI leel for X86 targets. @@ -114,15 +177,6 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, assert(false && "NYI"); } -static bool testIfIsVoidTy(QualType Ty) { - const auto *BT = Ty->getAs(); - if (!BT) - return false; - - BuiltinType::Kind k = BT->getKind(); - return k == BuiltinType::Void; -} - void X86_64ABIInfo::computeInfo(CIRGenFunctionInfo &FI) const { // Top leevl CIR has unlimited arguments and return types. Lowering for ABI // specific concerns should happen during a lowering phase. Assume everything @@ -423,6 +477,15 @@ const TargetCIRGenInfo &CIRGenModule::getTargetCIRGenInfo() { switch (Triple.getArch()) { default: assert(false && "Target not yet supported!"); + case llvm::Triple::aarch64: { + AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; + assert(getTarget().getABI() == "aapcs" || + getTarget().getABI() == "darwinpcs" && + "Only Darwin supported for aarch64"); + Kind = AArch64ABIInfo::DarwinPCS; + return SetCIRGenInfo(new AArch64TargetCIRGenInfo(genTypes, Kind)); + } + case llvm::Triple::x86_64: { StringRef ABI = getTarget().getABI(); X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index 97bef90abb1f..1e805f5b4ea9 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -5,6 +5,7 @@ // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -c %s -o %t.o // RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -disable-cir-passes -S -emit-cir %s -o %t.cir +// RUN: %clang -target arm64-apple-macosx12.0.0 -fclangir -S -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR // XFAIL: * From c2b0996785d680193637bac42c1de90dbc33e8fb Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 26 Apr 2022 17:58:00 -0400 Subject: [PATCH 0402/2301] [CIR] Add `buildMemberInitializer` to use from ConstructorMemcpyizer Currently we just call unreachable from buildCtorPrologue -> CM.addMemberInitializer if we have any members. Start fleshing out this pipeline by implementing some of the functions. For the most part this just delays the unreachable failure but still does not work properly. --- clang/lib/CIR/CIRGenClass.cpp | 72 +++++++++++++++++++++++++++++--- clang/lib/CIR/CIRGenExpr.cpp | 15 +++++++ clang/lib/CIR/CIRGenFunction.cpp | 10 +++++ clang/lib/CIR/CIRGenFunction.h | 11 +++++ 4 files changed, 103 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp index aafa24fb256e..65417dee7581 100644 --- a/clang/lib/CIR/CIRGenClass.cpp +++ b/clang/lib/CIR/CIRGenClass.cpp @@ -157,6 +157,59 @@ class FieldMemcpyizer { unsigned LastAddedFieldIndex; }; +static void buildLValueForAnyFieldInitialization(CIRGenFunction &CGF, + CXXCtorInitializer *MemberInit, + LValue &LHS) { + FieldDecl *Field = MemberInit->getAnyMember(); + if (MemberInit->isIndirectMemberInitializer()) { + llvm_unreachable("NYI"); + } else { + LHS = CGF.buildLValueForFieldInitialization(LHS, Field); + } +} + +static void buildMemberInitializer(CIRGenFunction &CGF, + const CXXRecordDecl *ClassDecl, + CXXCtorInitializer *MemberInit, + const CXXConstructorDecl *Constructor, + FunctionArgList &Args) { + // TODO: ApplyDebugLocation + assert(MemberInit->isAnyMemberInitializer() && + "Mush have member initializer!"); + assert(MemberInit->getInit() && "Must have initializer!"); + + // non-static data member initializers + FieldDecl *Field = MemberInit->getAnyMember(); + QualType FieldType = Field->getType(); + + mlir::Operation *ThisPtr = CGF.LoadCXXThis(); + QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); + LValue LHS; + + // If a base constructor is being emitted, create an LValue that has the + // non-virtual alignment. + if (CGF.CurGD.getCtorType() == Ctor_Base) + LHS = CGF.MakeNaturalAlignPointeeAddrLValue(ThisPtr, RecordTy); + else + llvm_unreachable("NYI"); + + buildLValueForAnyFieldInitialization(CGF, MemberInit, LHS); + + // Special case: If we are in a copy or move constructor, and we are copying + // an array off PODs or classes with tirival copy constructors, ignore the AST + // and perform the copy we know is equivalent. + // FIXME: This is hacky at best... if we had a bit more explicit information + // in the AST, we could generalize it more easily. + const ConstantArrayType *Array = + CGF.getContext().getAsConstantArrayType(FieldType); + if (Array && Constructor->isDefaulted() && + Constructor->isCopyOrMoveConstructor()) { + llvm_unreachable("NYI"); + } + + CGF.buildInitializerForField(Field, LHS, MemberInit->getInit()); +} + class ConstructorMemcpyizer : public FieldMemcpyizer { private: /// Get source argument for copy constructor. Returns null if not a copy @@ -184,14 +237,18 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { FunctionArgList &Args) : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)), + ConstructorDecl(CD), MemcpyableCtor(CD->isDefaulted() && CD->isCopyOrMoveConstructor() && - CGF.getLangOpts().getGC() == LangOptions::NonGC) {} + CGF.getLangOpts().getGC() == LangOptions::NonGC), + Args(Args) {} void addMemberInitializer(CXXCtorInitializer *MemberInit) { if (isMemberInitMemcpyable(MemberInit)) { llvm_unreachable("NYI"); } else { - llvm_unreachable("NYI"); + buildAggregatedInits(); + buildMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, + ConstructorDecl, Args); } } @@ -222,12 +279,12 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { } } - void finish() { - buildAggregatedInits(); - } + void finish() { buildAggregatedInits(); } private: + const CXXConstructorDecl *ConstructorDecl; bool MemcpyableCtor; + FunctionArgList &Args; SmallVector AggregatedInits; }; @@ -374,6 +431,11 @@ Address CIRGenFunction::LoadCXXThisAddress() { return Address(Result, CXXThisAlignment); } +void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, + Expr *Init) { + llvm_unreachable("NYI"); +} + void CIRGenFunction::buildDelegateCXXConstructorCall( const CXXConstructorDecl *Ctor, CXXCtorType CtorType, const FunctionArgList &Args, SourceLocation Loc) { diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 2029598f3d05..674a2d6c0a43 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -22,6 +22,21 @@ static mlir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { return V; } +LValue CIRGenFunction::buildLValueForField(LValue base, + const FieldDecl *field) { + llvm_unreachable("NYI"); +} + +LValue CIRGenFunction::buildLValueForFieldInitialization( + LValue Base, const clang::FieldDecl *Field) { + QualType FieldType = Field->getType(); + + if (!FieldType->isReferenceType()) + return buildLValueForField(Base, Field); + + llvm_unreachable("NYI"); +} + static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index eea34560f6ce..4c3b6d011ea6 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -622,6 +622,16 @@ void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { llvm_unreachable("NYI"); } +/// Given a value of type T* that may not be to a complete object, construct +/// an l-vlaue withi the natural pointee alignment of T. +LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Operation *Op, + QualType T) { + LValueBaseInfo BaseInfo; + CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, + /* for PointeeType= */ true); + return makeAddrLValue(Address(Op->getResult(0), Align), T, BaseInfo); +} + void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, mlir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index a5632a2f7a1c..a7e74f39bc10 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -751,6 +751,9 @@ class CIRGenFunction { Address OldCXXDefaultInitExprThis; }; + LValue MakeNaturalAlignPointeeAddrLValue(mlir::Operation *Op, + clang::QualType T); + /// LoadCXXThis - Load the value for 'this'. This function is only valid while /// generating code for an C++ member function. mlir::Operation *LoadCXXThis() { @@ -785,6 +788,14 @@ class CIRGenFunction { void initializeVTablePointers(const clang::CXXRecordDecl *RD); + LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); + + /// buildLValueForFieldInitialization - like buildLValueForField, excpet that + /// if the Field is a reference, this will return the address of the reference + /// and not the address of the value stored in the reference. + LValue buildLValueForFieldInitialization(LValue Base, + const clang::FieldDecl *Field); + void buildInitializerForField(clang::FieldDecl *Field, LValue LHS, clang::Expr *Init); From 09a0165650a9d4bd087f50c58cf6b560437fe164 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 26 Apr 2022 18:39:12 -0400 Subject: [PATCH 0403/2301] [CIR] Cover some asserts from CodeGen's ret emission --- clang/lib/CIR/CIRGenFunction.cpp | 45 +++++++++++++++++++++++++++++--- clang/lib/CIR/CIRGenFunction.h | 7 +++++ 2 files changed, 49 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 4c3b6d011ea6..d69dae8cd89e 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -201,11 +201,50 @@ mlir::Type CIRGenFunction::getCIRType(const QualType &type) { return CGM.getCIRType(type); } +/// Determine whether the function F ends with a return stmt. +static bool endsWithReturn(const Decl *F) { + const Stmt *Body = nullptr; + if (auto *FD = dyn_cast_or_null(F)) + Body = FD->getBody(); + else if (auto *OMD = dyn_cast_or_null(F)) + llvm_unreachable("NYI"); + + if (auto *CS = dyn_cast_or_null(Body)) { + auto LastStmt = CS->body_rbegin(); + if (LastStmt != CS->body_rend()) + return isa(*LastStmt); + } + return false; +} + void CIRGenFunction::buildAndUpdateRetAlloca(QualType ty, mlir::Location loc, CharUnits alignment) { - auto addr = - buildAlloca("__retval", InitStyle::uninitialized, ty, loc, alignment); - FnRetAlloca = addr; + + if (ty->isVoidType()) { + // Void type; nothing to return. + ReturnValue = Address::invalid(); + + // Count the implicit return. + if (!endsWithReturn(CurFuncDecl)) + ++NumReturnExprs; + } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) { + // TODO(CIR): Consider this implementation in CIRtoLLVM + llvm_unreachable("NYI"); + // TODO(CIR): Consider this implementation in CIRtoLLVM + } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca) { + llvm_unreachable("NYI"); + } else { + auto addr = + buildAlloca("__retval", InitStyle::uninitialized, ty, loc, alignment); + FnRetAlloca = addr; + ReturnValue = Address(addr, alignment); + + // Tell the epilog emitter to autorelease the result. We do this now so + // that various specialized functions can suppress it during their IR - + // generation + if (getLangOpts().ObjCAutoRefCount) + llvm_unreachable("NYI"); + } } mlir::LogicalResult CIRGenFunction::declare(const Decl *var, QualType ty, diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index a7e74f39bc10..b1e85e54ee54 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -288,6 +288,13 @@ class CIRGenFunction { /// CurGD - The GlobalDecl for the current function being compiled. clang::GlobalDecl CurGD; + /// ReturnValue - The temporary alloca to hold the return value. This is + /// invalid iff the function has no return value. + Address ReturnValue = Address::invalid(); + + /// Counts of the number return expressions in the function. + unsigned NumReturnExprs = 0; + clang::QualType FnRetQualTy; std::optional FnRetCIRTy; std::optional FnRetAlloca; From 74ec7117d4f4b7babf6e595780a65581e95c522b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 26 Apr 2022 11:40:23 -0700 Subject: [PATCH 0404/2301] [CIR] Fix several hacks and cleanup custom parsers for enums --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 30 +++++----- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 69 +--------------------- 2 files changed, 17 insertions(+), 82 deletions(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index fac024eb831a..e9f13cc8da35 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -515,16 +515,16 @@ def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods; -def BinOpKind_Div : I32EnumAttrCase<"Div", 2>; -def BinOpKind_Rem : I32EnumAttrCase<"Rem", 3>; -def BinOpKind_Add : I32EnumAttrCase<"Add", 4>; -def BinOpKind_Sub : I32EnumAttrCase<"Sub", 5>; -def BinOpKind_Shl : I32EnumAttrCase<"Shl", 6>; -def BinOpKind_Shr : I32EnumAttrCase<"Shr", 7>; -def BinOpKind_And : I32EnumAttrCase<"And", 8>; -def BinOpKind_Xor : I32EnumAttrCase<"Xor", 9>; -def BinOpKind_Or : I32EnumAttrCase<"Or", 10>; +def BinOpKind_Mul : I32EnumAttrCase<"Mul", 1, "mul">; +def BinOpKind_Div : I32EnumAttrCase<"Div", 2, "div">; +def BinOpKind_Rem : I32EnumAttrCase<"Rem", 3, "rem">; +def BinOpKind_Add : I32EnumAttrCase<"Add", 4, "add">; +def BinOpKind_Sub : I32EnumAttrCase<"Sub", 5, "sub">; +def BinOpKind_Shl : I32EnumAttrCase<"Shl", 6, "shl">; +def BinOpKind_Shr : I32EnumAttrCase<"Shr", 7, "shr">; +def BinOpKind_And : I32EnumAttrCase<"And", 8, "and">; +def BinOpKind_Xor : I32EnumAttrCase<"Xor", 9, "xor">; +def BinOpKind_Or : I32EnumAttrCase<"Or", 10, "or">; def BinOpKind : I32EnumAttr< "BinOpKind", @@ -560,7 +560,7 @@ def BinOp : CIR_Op<"binop", [Pure, AnyType:$lhs, AnyType:$rhs); let assemblyFormat = [{ - `(` custom($kind) `,` $lhs `,` $rhs `)` `:` type($lhs) attr-dict + `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) attr-dict }]; // Already covered by the traits @@ -618,11 +618,9 @@ def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { // SwitchOp //===----------------------------------------------------------------------===// -// FIXME: even though printed/parsed names assume lowercase, we capitalize here -// because "default" is a C++ reserved keyword and can't show up in a enum. -def CaseOpKind_DT : I32EnumAttrCase<"Default", 1>; -def CaseOpKind_EQ : I32EnumAttrCase<"Equal", 2>; -def CaseOpKind_AO : I32EnumAttrCase<"Anyof", 3>; +def CaseOpKind_DT : I32EnumAttrCase<"Default", 1, "default">; +def CaseOpKind_EQ : I32EnumAttrCase<"Equal", 2, "equal">; +def CaseOpKind_AO : I32EnumAttrCase<"Anyof", 3, "anyof">; def CaseOpKind : I32EnumAttr< "CaseOpKind", diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 7627efc613bd..28a3c96e1195 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -489,60 +489,6 @@ mlir::LogicalResult YieldOp::verify() { return mlir::success(); } -//===----------------------------------------------------------------------===// -// BinOp -//===----------------------------------------------------------------------===// - -ParseResult parseBinOpKind(OpAsmParser &parser, BinOpKindAttr &kindAttr) { - ::llvm::StringRef attrStr; - ::mlir::NamedAttrList attrStorage; - auto loc = parser.getCurrentLocation(); - - // FIXME: since a few names can't be used as enum (and, or, xor) we declared - // them in CIROps.td capitalized, but we really wanna use lower case on - // clang IR asm form. - if (parser.parseOptionalKeyword(&attrStr, - {"mul", "div", "rem", "add", "sub", "shl", - "shr", "and", "xor", "or"})) { - ::mlir::StringAttr attrVal; - ::mlir::OptionalParseResult parseResult = parser.parseOptionalAttribute( - attrVal, parser.getBuilder().getNoneType(), "kind", attrStorage); - if (parseResult.has_value()) { - if (failed(*parseResult)) - return ::mlir::failure(); - attrStr = attrVal.getValue(); - } else { - return parser.emitError( - loc, "expected string or keyword containing one of the following " - "enum values for attribute 'kind' [mul, div, rem, add, sub, " - "shl, shr, and, xor, or]"); - } - } - if (!attrStr.empty()) { - std::string attrString = attrStr.str(); - attrString[0] = attrString[0] + 'A' - 'a'; - attrStr = attrString; - auto attrOptional = ::mlir::cir::symbolizeBinOpKind(attrStr); - if (!attrOptional) - return parser.emitError(loc, "invalid ") - << "kind attribute specification: \"" << attrStr << '"'; - ; - - kindAttr = ::mlir::cir::BinOpKindAttr::get(parser.getBuilder().getContext(), - attrOptional.value()); - } - - return ::mlir::success(); -} - -void printBinOpKind(OpAsmPrinter &p, BinOp binOp, BinOpKindAttr kindAttr) { - auto caseValueStr = stringifyBinOpKind(kindAttr.getValue()); - std::string attrString = caseValueStr.str(); - attrString[0] = attrString[0] + 'a' - 'A'; - caseValueStr = attrString; - p << caseValueStr; -} - //===----------------------------------------------------------------------===// // BrOp //===----------------------------------------------------------------------===// @@ -622,9 +568,7 @@ parseSwitchOp(OpAsmParser &parser, // 1. Get the case kind // 2. Get the value (next in list) - // FIXME: since a few names can't be used as enum (default) we declared - // them in CIROps.td capitalized, but we really wanna use lower case on - // clang IR asm form. + // These needs to be in sync with CIROps.td if (parser.parseOptionalKeyword(&attrStr, {"default", "equal", "anyof"})) { ::mlir::StringAttr attrVal; ::mlir::OptionalParseResult parseResult = parser.parseOptionalAttribute( @@ -642,10 +586,7 @@ parseSwitchOp(OpAsmParser &parser, "enum values for attribute 'kind' [default, equal, anyof]"); } - std::string attrString = attrStr.str(); - attrString[0] = attrString[0] + 'A' - 'a'; - attrStr = attrString; - auto attrOptional = ::mlir::cir::symbolizeCaseOpKind(attrStr); + auto attrOptional = ::mlir::cir::symbolizeCaseOpKind(attrStr.str()); if (!attrOptional) return parser.emitError(loc, "invalid ") << "kind attribute specification: \"" << attrStr << '"'; @@ -758,11 +699,7 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, "unknown case"); // Case kind - auto caseValueStr = stringifyCaseOpKind(kind); - std::string attrString = caseValueStr.str(); - attrString[0] = attrString[0] + 'a' - 'A'; - caseValueStr = attrString; - p << caseValueStr; + p << stringifyCaseOpKind(kind); // Case value switch (kind) { From 6c8e5ecd3f0b1c0678ad7db8f50927d7f1790cb3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 26 Apr 2022 14:27:52 -0700 Subject: [PATCH 0405/2301] [CIR][MergeCleanups] Simplify loop condition region when possible --- clang/test/CIR/CodeGen/loop.cpp | 33 ++---------- clang/test/CIR/Transforms/merge-cleanups.cir | 46 ++++++++++++---- .../Dialect/CIR/Transforms/MergeCleanups.cpp | 54 ++++++++++++++++++- 3 files changed, 94 insertions(+), 39 deletions(-) diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 16b29c9b8723..cab759221a28 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -8,12 +8,7 @@ void l0() { // CHECK: func @_Z2l0v // CHECK: cir.loop for(cond : { -// CHECK-NEXT: %0 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -85,12 +80,7 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %3 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -155,12 +145,7 @@ void l3(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { -// CHECK-NEXT: %3 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: // CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -203,12 +188,7 @@ void l4() { // CHECK: func @_Z2l4v // CHECK: cir.loop while(cond : { -// CHECK-NEXT: %4 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -258,12 +238,7 @@ void l6() { // CHECK: func @_Z2l6v() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %0 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index b15be865f1d1..d040c792b8bd 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -58,7 +58,7 @@ module { cir.return } - func.func @l7() { + func.func @l0() { cir.scope { cir.loop while(cond : { %0 = cir.cst(true) : !cir.bool @@ -77,6 +77,26 @@ module { } cir.return } + + func.func @l1() { + cir.scope { + cir.loop while(cond : { + %0 = cir.cst(false) : !cir.bool + cir.brcond %0 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield + }, step : { + cir.yield + }) { + cir.br ^bb1 + ^bb1: + cir.return + } + } + cir.return + } } // CHECK: cir.switch (%4 : i32) [ @@ -116,15 +136,23 @@ module { // CHECK-NEXT: } // CHECK-NEXT: ] -// CHECK: func @l7 +// CHECK: func @l0 +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.loop while(cond : { +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: }, step : { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }) { +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// CHECK: func @l1 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %0 = cir.cst(true) : !cir.bool -// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -132,4 +160,4 @@ module { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: } diff --git a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp index 99c9deb6171e..5117f5aaaf3a 100644 --- a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp @@ -83,6 +83,53 @@ struct SimplifyRetYieldBlocks : public mlir::OpRewritePattern { return Changed ? success() : failure(); } + mlir::LogicalResult + checkAndRewriteLoopCond(mlir::Region &condRegion, + mlir::PatternRewriter &rewriter) const { + SmallVector opsToSimplify; + condRegion.walk([&](Operation *op) { + if (isa(op)) + opsToSimplify.push_back(op); + }); + + // Blocks should only contain one "yield" operation. + auto trivialYield = [&](Block *b) { + if (&b->front() != &b->back()) + return false; + return isa(b->getTerminator()); + }; + + if (opsToSimplify.size() != 1) + return failure(); + BrCondOp brCondOp = cast(opsToSimplify[0]); + + // TODO: leverage SCCP to get improved results. + auto cstOp = dyn_cast(brCondOp.getCond().getDefiningOp()); + if (!cstOp || !cstOp.getValue().isa() || + !trivialYield(brCondOp.getDestTrue()) || + !trivialYield(brCondOp.getDestFalse())) + return failure(); + + // If the condition is constant, no need to use brcond, just yield + // properly, "yield" for false and "yield continue" for true. + auto boolAttr = cstOp.getValue().cast(); + auto *falseBlock = brCondOp.getDestFalse(); + auto *trueBlock = brCondOp.getDestTrue(); + auto *currBlock = brCondOp.getOperation()->getBlock(); + if (boolAttr.getValue()) { + rewriter.eraseOp(opsToSimplify[0]); + rewriter.mergeBlocks(trueBlock, currBlock); + falseBlock->erase(); + } else { + rewriter.eraseOp(opsToSimplify[0]); + rewriter.mergeBlocks(falseBlock, currBlock); + trueBlock->erase(); + } + if (cstOp.use_empty()) + rewriter.eraseOp(cstOp); + return success(); + } + mlir::LogicalResult matchAndRewrite(ScopeLikeOpTy op, mlir::PatternRewriter &rewriter) const override { @@ -138,7 +185,12 @@ mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( template <> mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( PatternRewriter &rewriter, cir::LoopOp loopOp) const { - return checkAndRewriteRegion(loopOp.getBody(), rewriter); + bool regionChanged = false; + if (checkAndRewriteRegion(loopOp.getBody(), rewriter).succeeded()) + regionChanged = true; + if (checkAndRewriteLoopCond(loopOp.getCond(), rewriter).succeeded()) + regionChanged = true; + return regionChanged ? success() : failure(); } void getMergeCleanupsPatterns(RewritePatternSet &results, From 8326d27031d6cf7975ea10621be8f36b6f8c26ff Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 26 Apr 2022 18:46:40 -0400 Subject: [PATCH 0406/2301] [CIR] build the fn in buildGlobal if it has a body This doesn't seem to make a difference yet, but codegen does it. --- clang/lib/CIR/CIRGenModule.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index e78d95075ec7..56837af15906 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -239,6 +239,16 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { if (!FD->doesThisDeclarationHaveABody()) { if (!FD->doesDeclarationForceExternallyVisibleDefinition()) return; + + llvm::StringRef MangledName = getMangledName(GD); + + // Compute the function info and CIR type. + const auto &FI = getTypes().arrangeGlobalDeclaration(GD); + mlir::Type Ty = getTypes().GetFunctionType(FI); + + GetOrCreateCIRFunction(MangledName, Ty, GD, /*ForVTable=*/false, + /*DontDefer=*/false); + return; } } else { llvm_unreachable("NYI"); From bd4ae6e12f85ff1b67792658247060d2e7d38116 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 26 Apr 2022 15:53:51 -0700 Subject: [PATCH 0407/2301] [CIR][MergeCleanups][NFC] Simplify change detection a tiny bit --- .../Dialect/CIR/Transforms/MergeCleanups.cpp | 41 +++++++++---------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp index 5117f5aaaf3a..9443b36555f6 100644 --- a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp @@ -68,7 +68,7 @@ struct SimplifyRetYieldBlocks : public mlir::OpRewritePattern { candidateBlocks.insert(ret.getOperation()->getBlock()); } - bool Changed = false; + auto changed = mlir::failure(); for (auto *mergeSource : candidateBlocks) { if (!(mergeSource->hasNoSuccessors() && mergeSource->hasOneUse())) continue; @@ -77,10 +77,10 @@ struct SimplifyRetYieldBlocks : public mlir::OpRewritePattern { continue; rewriter.eraseOp(mergeDest->getTerminator()); rewriter.mergeBlocks(mergeSource, mergeDest); - Changed = true; + changed = mlir::success(); } - return Changed ? success() : failure(); + return changed; } mlir::LogicalResult @@ -143,54 +143,53 @@ template <> mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp(PatternRewriter &rewriter, IfOp ifOp) const { - bool regionChanged = false; + auto regionChanged = mlir::failure(); if (checkAndRewriteRegion(ifOp.getThenRegion(), rewriter).succeeded()) - regionChanged = true; + regionChanged = mlir::success(); if (checkAndRewriteRegion(ifOp.getElseRegion(), rewriter).succeeded()) - regionChanged = true; - return regionChanged ? success() : failure(); + regionChanged = mlir::success(); + return regionChanged; } template <> mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp(PatternRewriter &rewriter, ScopeOp scopeOp) const { - bool regionChanged = false; + auto regionChanged = mlir::failure(); if (checkAndRewriteRegion(scopeOp.getRegion(), rewriter).succeeded()) - regionChanged = true; - return regionChanged ? success() : failure(); + regionChanged = mlir::success(); + return regionChanged; } template <> mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( PatternRewriter &rewriter, mlir::FuncOp funcOp) const { - bool regionChanged = false; + auto regionChanged = mlir::failure(); if (checkAndRewriteRegion(funcOp.getRegion(), rewriter).succeeded()) - regionChanged = true; - return regionChanged ? success() : failure(); + regionChanged = mlir::success(); + return regionChanged; } template <> mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( PatternRewriter &rewriter, cir::SwitchOp switchOp) const { - bool regionChanged = false; + auto regionChanged = mlir::failure(); for (auto &r : switchOp.getRegions()) { if (checkAndRewriteRegion(r, rewriter).succeeded()) - regionChanged = true; + regionChanged = mlir::success(); } - - return regionChanged ? success() : failure(); + return regionChanged; } template <> mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( PatternRewriter &rewriter, cir::LoopOp loopOp) const { - bool regionChanged = false; + auto regionChanged = mlir::failure(); if (checkAndRewriteRegion(loopOp.getBody(), rewriter).succeeded()) - regionChanged = true; + regionChanged = mlir::success(); if (checkAndRewriteLoopCond(loopOp.getCond(), rewriter).succeeded()) - regionChanged = true; - return regionChanged ? success() : failure(); + regionChanged = mlir::success(); + return regionChanged; } void getMergeCleanupsPatterns(RewritePatternSet &results, From 9704588ce876b3a59f5c701524818166822677e3 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 26 Apr 2022 19:02:18 -0400 Subject: [PATCH 0408/2301] [CIR] Enable top level handling of CXXMethods This is trivial and just prevents an assert when the method isn't used. --- clang/lib/CIR/CIRGenModule.cpp | 1 + clang/test/CIR/CodeGen/ctor.cpp | 1 + 2 files changed, 2 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 56837af15906..9ced75355d46 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -375,6 +375,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { switch (decl->getKind()) { default: assert(false && "Not yet implemented"); + case Decl::CXXMethod: case Decl::Function: buildGlobal(cast(decl)); assert(!codeGenOpts.CoverageMapping && "Coverage Mapping NYI"); diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index 56165dc970c9..ba795bfd0f13 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -4,6 +4,7 @@ struct Struk { int a; Struk() {} + void test() {} }; void baz() { From 6b400f593d9f1734a26e66ac0637c6c24b7424de Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 26 Apr 2022 19:29:44 -0400 Subject: [PATCH 0409/2301] [CIR] Add buildCXXMemberCallExpr to be called from buildCallExpr As per usual, this mostly constraints the sitautions that we can see in asserting against things such as BinaryOperators and static methods and moves on. --- clang/lib/CIR/CIRGenExpr.cpp | 38 +++++++++++++++++++++++++++++++++- clang/lib/CIR/CIRGenFunction.h | 8 +++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 674a2d6c0a43..6d94e822b5d5 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -386,7 +386,10 @@ RValue CIRGenFunction::buildAnyExpr(const Expr *E, AggValueSlot aggSlot, RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, ReturnValueSlot ReturnValue) { assert(!E->getCallee()->getType()->isBlockPointerType() && "ObjC Blocks NYI"); - assert(!dyn_cast(E) && "NYI"); + + if (const auto *CE = dyn_cast(E)) + return buildCXXMemberCallExpr(CE, ReturnValue); + assert(!dyn_cast(E) && "CUDA NYI"); assert(!dyn_cast(E) && "NYI"); @@ -833,3 +836,36 @@ void CIRGenFunction::buildCXXConstructExpr(const clang::CXXConstructExpr *E, buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); } + +// Note: this function also emit constructor calls to support a MSVC extensions +// allowing explicit constructor function call. +RValue CIRGenFunction::buildCXXMemberCallExpr(const CXXMemberCallExpr *CE, + ReturnValueSlot ReturnValue) { + + const Expr *callee = CE->getCallee()->IgnoreParens(); + + if (isa(callee)) + llvm_unreachable("NYI"); + + const auto *ME = cast(callee); + const auto *MD = cast(ME->getMemberDecl()); + + if (MD->isStatic()) { + llvm_unreachable("NYI"); + } + + bool HasQualifier = ME->hasQualifier(); + NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr; + bool IsArrow = ME->isArrow(); + const Expr *Base = ME->getBase(); + + return buildCXXMemberOrOperatorMemberCallExpr( + CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base); +} + +RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( + const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, + bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, + const Expr *Base) { + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index b1e85e54ee54..2a6845897640 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -426,6 +426,14 @@ class CIRGenFunction { clang::SourceLocation Loc, bool NewPointerIsChecked); + RValue buildCXXMemberCallExpr(const clang::CXXMemberCallExpr *E, + ReturnValueSlot ReturnValue); + RValue buildCXXMemberOrOperatorMemberCallExpr( + const clang::CallExpr *CE, const clang::CXXMethodDecl *MD, + ReturnValueSlot ReturnValue, bool HasQualifier, + clang::NestedNameSpecifier *Qualifier, bool IsArrow, + const clang::Expr *Base); + mlir::Operation *createLoad(const clang::VarDecl *VD, const char *Name); // Wrapper for function prototype sources. Wraps either a FunctionProtoType or From c1afdd3c4676cc8b82d27e5e3469da3d45d715cb Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 26 Apr 2022 19:56:52 -0400 Subject: [PATCH 0410/2301] [CIR][NFC] Add a UnimplementedFeature class to use for guarding against NYIs Some features in CodeGen aren't really guardable against very well. A simple example is linkage. We haven't defined anything to do with linkage yet. So we can't even assert that we don't have a certain type of it as we do with decl types, paramater count, virtual etc. So instead introduce a class that explicitly contains a list of static fns that will return false that you can guard against. If and when a feature becomes implemented simply changing this return to true will cause compilation to fail at all the points in which we noted that we needed to address. This is a much more explicit way to handle "TODO"s. --- clang/lib/CIR/UnimplementedFeatureGuarding.h | 26 ++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 clang/lib/CIR/UnimplementedFeatureGuarding.h diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h new file mode 100644 index 000000000000..15c624b5a27f --- /dev/null +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -0,0 +1,26 @@ +//===---- UnimplementedFeatureGuarding.h - Checks against NYI ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file introduces some helper classes to guard against features that +// CodeGen supports that we do not have and also do not have great ways to +// assert against. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_UFG +#define LLVM_CLANG_LIB_CIR_UFG + +namespace cir { +struct UnimplementedFeature { + // TODO(CIR): Implement the CIRGenFunction::buildTypeCheck method that handles + // sanitizer related type check features + static bool buildTypeCheck() { return false; } +}; +} // namespace cir + +#endif From a44a629f99103ab995ec016ddc6bb512a9230019 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 26 Apr 2022 19:59:51 -0400 Subject: [PATCH 0411/2301] [CIR] Flesh out buildCXXMemberOrOperatorMemberCallExpr Again, this mostly guards to constrain the things we support. All this patch effecitvely does is delay the crash that you'll see when you run into `struct S s; s.method();`. --- clang/lib/CIR/CIRGenCXX.cpp | 7 ++ clang/lib/CIR/CIRGenExpr.cpp | 143 ++++++++++++++++++++++++++++++++- clang/lib/CIR/CIRGenFunction.h | 9 +++ 3 files changed, 158 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenCXX.cpp b/clang/lib/CIR/CIRGenCXX.cpp index 19cfdbd936f1..4781a46851ba 100644 --- a/clang/lib/CIR/CIRGenCXX.cpp +++ b/clang/lib/CIR/CIRGenCXX.cpp @@ -32,3 +32,10 @@ mlir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { // TODO: SetLLVMFunctionAttributesForDefinition return Fn; } + +RValue CIRGenFunction::buildCXXMemberOrOperatorCall( + const CXXMethodDecl *MD, const CIRGenCallee &Callee, + ReturnValueSlot ReturnValue, mlir::Value This, mlir::Value ImplicitParam, + QualType ImplicitParamTy, const CallExpr *CE, CallArgList *RtlArgs) { + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 6d94e822b5d5..211d1e8056b8 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -1,6 +1,7 @@ #include "CIRGenCall.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "UnimplementedFeatureGuarding.h" #include "clang/AST/GlobalDecl.h" @@ -863,9 +864,149 @@ RValue CIRGenFunction::buildCXXMemberCallExpr(const CXXMemberCallExpr *CE, CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base); } +bool CIRGenFunction::isWrappedCXXThis(const Expr *object) { + const Expr *base = object; + while (!isa(base)) { + // The result of a dynamic_cast can be null. + if (isa(base)) + return false; + + if (const auto *ce = dyn_cast(base)) { + (void)ce; + llvm_unreachable("NYI"); + } else if (const auto *pe = dyn_cast(base)) { + (void)pe; + llvm_unreachable("NYI"); + } else if (const auto *uo = dyn_cast(base)) { + (void)uo; + llvm_unreachable("NYI"); + } else { + return false; + } + } + return true; +} + RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, const Expr *Base) { - llvm_unreachable("NYI"); + assert(isa(CE) || isa(CE)); + + // Compute the object pointer. + bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier; + assert(!CanUseVirtualCall && "NYI"); + + const CXXMethodDecl *DevirtualizedMethod = nullptr; + if (CanUseVirtualCall && + MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) { + llvm_unreachable("NYI"); + } + + bool TrivialForCodegen = + MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion()); + bool TrivialAssignment = + TrivialForCodegen && + (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && + !MD->getParent()->mayInsertExtraPadding(); + (void)TrivialAssignment; + + // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment + // operator before the LHS. + CallArgList RtlArgStorage; + CallArgList *RtlArgs = nullptr; + LValue TrivialAssignmentRHS; + if (auto *OCE = dyn_cast(CE)) { + llvm_unreachable("NYI"); + } + + LValue This; + if (IsArrow) { + llvm_unreachable("NYI"); + } else { + This = buildLValue(Base); + } + + if (const CXXConstructorDecl *Ctor = dyn_cast(MD)) { + llvm_unreachable("NYI"); + } + + if (TrivialForCodegen) { + llvm_unreachable("NYI"); + } + + // Compute the function type we're calling + const CXXMethodDecl *CalleeDecl = + DevirtualizedMethod ? DevirtualizedMethod : MD; + const CIRGenFunctionInfo *FInfo = nullptr; + if (const auto *Dtor = dyn_cast(CalleeDecl)) + llvm_unreachable("NYI"); + else + llvm_unreachable("NYI"); + // FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); + + mlir::FunctionType Ty = CGM.getTypes().GetFunctionType(*FInfo); + + // C++11 [class.mfct.non-static]p2: + // If a non-static member function of a class X is called for an object that + // is not of type X, or of a type derived from X, the behavior is undefined. + SourceLocation CallLoc; + ASTContext &C = getContext(); + (void)C; + if (CE) + CallLoc = CE->getExprLoc(); + + SanitizerSet SkippedChecks; + if (const auto *cmce = dyn_cast(CE)) { + auto *ioa = cmce->getImplicitObjectArgument(); + auto isImplicitObjectCXXThis = isWrappedCXXThis(ioa); + if (isImplicitObjectCXXThis) + SkippedChecks.set(SanitizerKind::Alignment, true); + if (isImplicitObjectCXXThis || isa(ioa)) + SkippedChecks.set(SanitizerKind::Null, true); + } + + if (UnimplementedFeature::buildTypeCheck()) + llvm_unreachable("NYI"); + + // C++ [class.virtual]p12: + // Explicit qualification with the scope operator (5.1) suppresses the + // virtual call mechanism. + // + // We also don't emit a virtual call if the base expression has a record type + // because then we know what the type is. + bool useVirtualCall = CanUseVirtualCall && !DevirtualizedMethod; + + if (const auto *dtor = dyn_cast(CalleeDecl)) { + llvm_unreachable("NYI"); + } + + // FIXME: Uses of 'MD' past this point need to be audited. We may need to use + // 'CalleeDecl' instead. + + CIRGenCallee Callee; + if (useVirtualCall) { + llvm_unreachable("NYI"); + } else { + if (SanOpts.has(SanitizerKind::CFINVCall)) { + llvm_unreachable("NYI"); + } + + if (getLangOpts().AppleKext) + llvm_unreachable("NYI"); + else if (!DevirtualizedMethod) + Callee = CIRGenCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), + GlobalDecl(MD)); + else { + llvm_unreachable("NYI"); + } + } + + if (MD->isVirtual()) { + llvm_unreachable("NYI"); + } + + return buildCXXMemberOrOperatorCall( + CalleeDecl, Callee, ReturnValue, This.getPointer(), + /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); } diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 2a6845897640..5eead169a2ef 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -426,6 +426,12 @@ class CIRGenFunction { clang::SourceLocation Loc, bool NewPointerIsChecked); + RValue buildCXXMemberOrOperatorCall( + const clang::CXXMethodDecl *Method, const CIRGenCallee &Callee, + ReturnValueSlot ReturnValue, mlir::Value This, mlir::Value ImplicitParam, + clang::QualType ImplicitParamTy, const clang::CallExpr *E, + CallArgList *RtlArgs); + RValue buildCXXMemberCallExpr(const clang::CXXMemberCallExpr *E, ReturnValueSlot ReturnValue); RValue buildCXXMemberOrOperatorMemberCallExpr( @@ -829,6 +835,9 @@ class CIRGenFunction { return it->second; } + /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts. + static bool isWrappedCXXThis(const clang::Expr *E); + void buildDelegateCXXConstructorCall(const clang::CXXConstructorDecl *Ctor, clang::CXXCtorType CtorType, const FunctionArgList &Args, From d7f3c6be34f8f4a3634fdec91a4f1c7946534511 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 26 Apr 2022 17:04:46 -0700 Subject: [PATCH 0412/2301] [CIR][CodeGen][NFC] Start paving the road for global VarDecl's For now this only allows global VarDecl's to crash with unimplemented further down the road. --- clang/lib/CIR/CIRGenModule.cpp | 53 ++++++++++++++++++++++++++++++---- 1 file changed, 48 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 9ced75355d46..e6fe93061253 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -216,10 +216,27 @@ bool CIRGenModule::MustBeEmitted(const ValueDecl *Global) { bool CIRGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { assert(!langOpts.OpenMP && "NYI"); - auto const *FD = dyn_cast(Global); - assert(FD && "Only FunctionDecl should hit this path so far."); - assert(!FD->isTemplated() && "Templates NYI"); + const auto *FD = dyn_cast(Global); + if (FD) { + // Implicit template instantiations may change linkage if they are later + // explicitly instantiated, so they should not be emitted eagerly. + // TODO(cir): do we care? + assert(FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation && + "not implemented"); + assert(!FD->isTemplated() && "Templates NYI"); + } + const auto *VD = dyn_cast(Global); + if (VD) + // A definition of an inline constexpr static data member may change + // linkage later if it's redeclared outside the class. + // TODO(cir): do we care? + assert(astCtx.getInlineVariableDefinitionKind(VD) != + ASTContext::InlineVariableDefinitionKind::WeakUnknown && + "not implemented"); + + assert((FD || VD) && + "Only FunctionDecl and VarDecl should hit this path so far."); return true; } @@ -251,7 +268,19 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { return; } } else { - llvm_unreachable("NYI"); + const auto *VD = cast(Global); + assert(VD->isFileVarDecl() && "Cannot emit local var decl as global."); + if (VD->isThisDeclarationADefinition() != VarDecl::Definition && + !astCtx.isMSStaticDataMemberInlineDefinition(VD)) { + assert(!getLangOpts().OpenMP && "not implemented"); + // If this declaration may have caused an inline variable definition + // to change linkage, make sure that it's emitted. + // TODO(cir): probably use GetAddrOfGlobalVar(VD) below? + assert((astCtx.getInlineVariableDefinitionKind(VD) != + ASTContext::InlineVariableDefinitionKind::Strong) && + "not implemented"); + return; + } } // Defer code generation to first use when possible, e.g. if this is an inline @@ -361,7 +390,7 @@ void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { llvm_unreachable("Invalid argument to buildGlobalDefinition()"); } -// buildTopLevelDecl - Emit code for a single top level declaration. +// Emit code for a single top level declaration. void CIRGenModule::buildTopLevelDecl(Decl *decl) { // Ignore dependent declarations if (decl->isTemplated()) @@ -374,7 +403,21 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { switch (decl->getKind()) { default: + llvm::errs() << "buildTopLevelDecl codegen for decl kind '" + << decl->getDeclKindName() << "' not implemented\n"; assert(false && "Not yet implemented"); + + case Decl::Var: + case Decl::Decomposition: + case Decl::VarTemplateSpecialization: + buildGlobal(cast(decl)); + assert(!isa(decl) && "not implemented"); + // if (auto *DD = dyn_cast(decl)) + // for (auto *B : DD->bindings()) + // if (auto *HD = B->getHoldingVar()) + // EmitGlobal(HD); + break; + case Decl::CXXMethod: case Decl::Function: buildGlobal(cast(decl)); From ef644f045455c3550bf139bc71ca30640a7cfa77 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 27 Apr 2022 01:00:50 -0400 Subject: [PATCH 0413/2301] [CIR][NFC] Fix some comments --- clang/lib/CIR/CIRGenCall.cpp | 3 ++- clang/lib/CIR/CIRGenExpr.cpp | 8 ++++---- clang/lib/CIR/CIRGenFunction.h | 2 +- clang/lib/CIR/CIRGenModule.cpp | 4 ++-- clang/lib/CIR/CIRGenTypes.cpp | 1 + clang/lib/CIR/CIRGenTypes.h | 6 +++--- 6 files changed, 13 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 499bb3e7124d..92f670373951 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -196,7 +196,8 @@ mlir::FunctionType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { const ABIArgInfo &retAI = FI.getReturnInfo(); switch (retAI.getKind()) { case ABIArgInfo::Ignore: - // TODO: where to get VoidTy? + // TODO(CIR): This should probably be the None type from the builtin + // dialect. resultType = nullptr; break; diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 211d1e8056b8..37920faa5ed6 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -68,16 +68,16 @@ bool CIRGenFunction::hasBooleanRepresentation(QualType Ty) { CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { E = E->IgnoreParens(); - if (auto ICE = dyn_cast(E)) { + if (const auto *ICE = dyn_cast(E)) { assert(ICE && "Only ICE supported so far!"); assert(ICE->getCastKind() == CK_FunctionToPointerDecay && "No other casts supported yet"); return buildCallee(ICE->getSubExpr()); - } else if (auto DRE = dyn_cast(E)) { - auto FD = dyn_cast(DRE->getDecl()); + } else if (const auto *DRE = dyn_cast(E)) { + const auto *FD = dyn_cast(DRE->getDecl()); assert(FD && - "DeclRef referring to FunctionDecl onlything supported so far"); + "DeclRef referring to FunctionDecl only thing supported so far"); return buildDirectCallee(CGM, FD); } diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 5eead169a2ef..0e75e812f6f2 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -257,7 +257,7 @@ class CIRGenFunction { enum class EvaluationOrder { ///! No langauge constraints on evaluation order. Default, - ///! Language semantics requrie left-to-right evaluation + ///! Language semantics require left-to-right evaluation ForceLeftToRight, ///! Language semantics require right-to-left evaluation. ForceRightToLeft diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index e6fe93061253..1521e030b8b1 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -656,8 +656,8 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( } // This function doesn't have a complete type (for example, the return type is - // an incompmlete struct). Use a fake type instead, and make sure not to try - // to set attributes. + // an incomplete struct). Use a fake type instead, and make sure not to try to + // set attributes. bool IsIncompleteFunction = false; mlir::FunctionType FTy; diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 3eed096c3e13..4366524fc03b 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -520,6 +520,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { /// the return type. Codegen doesn't care about them, and it makes ABI code a /// little easier to be able to assume that all parameter and return types are /// top-level unqualified. +/// FIXME(CIR): This should be a common helper extracted from CodeGen static CanQualType GetReturnType(QualType RetTy) { return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); } diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index ec48d61433a3..8a4a80f967d9 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -1,4 +1,4 @@ -//===--- CIRGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===// +//===--- CIRGenTypes.h - Type translation for CIR CodeGen -------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -165,7 +165,7 @@ class CIRGenTypes { // The arrangement methods are split into three families: // - those meant to drive the signature and prologue/epilogue // of a function declaration or definition, - // - those meant for the computation fo the CIR type for an abstract + // - those meant for the computation of the CIR type for an abstract // appearance of a function, and // - those meant for performing the CIR-generation of a call. // They differ mainly in how they deal with optional (i.e. variadic) @@ -206,7 +206,7 @@ class CIRGenTypes { const CIRGenFunctionInfo & arrangeFreeFunctionType(clang::CanQual Ty); - /// "Arrange" the LLVM information for a call or type with the given + /// "Arrange" the CIR information for a call or type with the given /// signature. This is largely an internal method; other clients should use /// one of the above routines, which ultimatley defer to this. /// From 4ab71f8139d0844230b0db31ba56e8cc06558bd4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 27 Apr 2022 02:02:40 -0400 Subject: [PATCH 0414/2301] [CIR] Add C++ test to call.c --- clang/test/CIR/CodeGen/call.c | 42 +++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index 148225820f8f..3d16e6e0e605 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s --check-prefix=CXX // XFAIL: * void a(void) {} @@ -52,3 +52,41 @@ void d(void) { // CHECK: call @b(%0, %1) : (i32, i32) -> i32 // CHECK: cir.return // CHECK: } +// +// CXX: module { +// CXX-NEXT: func @_Z1av() { +// CXX-NEXT: cir.return +// CXX-NEXT: } +// CXX-NEXT: func @_Z1bii(%arg0: i32 {{.*}}, %arg1: i32 {{.*}}) -> i32 { +// CXX-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] +// CXX-NEXT: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] +// CXX-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] +// CXX-NEXT: cir.store %arg0, %0 : i32, cir.ptr +// CXX-NEXT: cir.store %arg1, %1 : i32, cir.ptr +// CXX-NEXT: %3 = cir.load %0 : cir.ptr , i32 +// CXX-NEXT: %4 = cir.load %1 : cir.ptr , i32 +// CXX-NEXT: %5 = cir.binop(add, %3, %4) : i32 +// CXX-NEXT: cir.store %5, %2 : i32, cir.ptr +// CXX-NEXT: %6 = cir.load %2 : cir.ptr , i32 +// CXX-NEXT: cir.return %6 +// CXX-NEXT: } +// CXX-NEXT: func @_Z1cdd(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { +// CXX-NEXT: %0 = cir.alloca f64, cir.ptr , ["a", paraminit] +// CXX-NEXT: %1 = cir.alloca f64, cir.ptr , ["b", paraminit] +// CXX-NEXT: %2 = cir.alloca f64, cir.ptr , ["__retval", uninitialized] +// CXX-NEXT: cir.store %arg0, %0 : f64, cir.ptr +// CXX-NEXT: cir.store %arg1, %1 : f64, cir.ptr +// CXX-NEXT: %3 = cir.load %0 : cir.ptr , f64 +// CXX-NEXT: %4 = cir.load %1 : cir.ptr , f64 +// CXX-NEXT: %5 = cir.binop(add, %3, %4) : f64 +// CXX-NEXT: cir.store %5, %2 : f64, cir.ptr +// CXX-NEXT: %6 = cir.load %2 : cir.ptr , f64 +// CXX-NEXT: cir.return %6 : f64 +// CXX-NEXT: } +// CXX-NEXT: func @_Z1dv() { +// CXX-NEXT: call @_Z1av() : () -> () +// CXX-NEXT: %0 = cir.cst(0 : i32) : i32 +// CXX-NEXT: %1 = cir.cst(1 : i32) : i32 +// CXX-NEXT: call @_Z1bii(%0, %1) : (i32, i32) -> i32 +// CXX-NEXT: cir.return +// CXX-NEXT: } From 36c3a6363b316b06193a24fada23f73704d7e80e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 27 Apr 2022 02:10:55 -0400 Subject: [PATCH 0415/2301] [CIR][NFC] Fix comment --- clang/lib/CIR/TargetInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/TargetInfo.cpp b/clang/lib/CIR/TargetInfo.cpp index 1241a73f07e0..feecebd3eadb 100644 --- a/clang/lib/CIR/TargetInfo.cpp +++ b/clang/lib/CIR/TargetInfo.cpp @@ -178,7 +178,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, } void X86_64ABIInfo::computeInfo(CIRGenFunctionInfo &FI) const { - // Top leevl CIR has unlimited arguments and return types. Lowering for ABI + // Top level CIR has unlimited arguments and return types. Lowering for ABI // specific concerns should happen during a lowering phase. Assume everything // is direct for now. for (CIRGenFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); From dabac7857951fe295ff2aae3806926fcfd9eaa79 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 27 Apr 2022 02:11:04 -0400 Subject: [PATCH 0416/2301] [CIR][NFC] Fix naming of a lambda --- clang/lib/CIR/CIRGenCall.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 92f670373951..764fcf2b0e06 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -434,7 +434,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // TODO: cleanup argument memory at the end // Extract the return value. - RValue Ret = [&] { + RValue ret = [&] { switch (RetAI.getKind()) { case ABIArgInfo::Direct: { mlir::Type RetCIRTy = convertType(RetTy); @@ -475,7 +475,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, assert(RetTy.isDestructedType() != QualType::DK_nontrivial_c_struct && "NYI"); - return Ret; + return ret; } RValue CIRGenFunction::GetUndefRValue(QualType Ty) { From 5ce3358c89f67330c08881321afe145fde7bc0b3 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 27 Apr 2022 02:11:19 -0400 Subject: [PATCH 0417/2301] [CIR][NFC] Gut some unused code We early return here and just use direct. I left that for future TODO status but I figure we can just refer clang's codegen for a better reference anyways once we get there. --- clang/lib/CIR/TargetInfo.cpp | 54 ------------------------------------ 1 file changed, 54 deletions(-) diff --git a/clang/lib/CIR/TargetInfo.cpp b/clang/lib/CIR/TargetInfo.cpp index feecebd3eadb..4d48b8fda95b 100644 --- a/clang/lib/CIR/TargetInfo.cpp +++ b/clang/lib/CIR/TargetInfo.cpp @@ -159,15 +159,6 @@ class X86_64TargetCIRGenInfo : public TargetCIRGenInfo { }; } // namespace -static bool classifyReturnType(const CIRGenCXXABI &CXXABI, - CIRGenFunctionInfo &FI, const ABIInfo &Info) { - QualType Ty = FI.getReturnType(); - - assert(!Ty->getAs() && "RecordType returns NYI"); - - return CXXABI.classifyReturnType(FI); -} - CIRGenCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); } clang::ASTContext &ABIInfo::getContext() const { return CGT.getContext(); } @@ -193,51 +184,6 @@ void X86_64ABIInfo::computeInfo(CIRGenFunctionInfo &FI) const { FI.getReturnInfo() = ABIArgInfo::getIgnore(); else FI.getReturnInfo() = ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); - - return; - - // TODO: - llvm_unreachable("Everything below here is from codegen. We shouldn't be " - "computing ABI info until lowering"); - const unsigned CallingConv = FI.getCallingConvention(); - - assert(CallingConv == cir::CallingConv::C && "C is the only supported CC"); - - unsigned FreeIntRegs = 6; - unsigned FreeSSERegs = 8; - unsigned NeededInt, NeededSSE; - - assert(!::classifyReturnType(getCXXABI(), FI, *this) && "NYI"); - FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); - - assert(!FI.getReturnInfo().isIndirect() && "Indirect return NYI"); - - assert(!FI.isChainCall() && "Chain call NYI"); - - unsigned NumRequiredArgs = FI.getNumRequiredArgs(); - // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers get - // assigned (in left-to-right order) for passing as follows... - unsigned ArgNo = 0; - for (CIRGenFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); - it != ie; ++it, ++ArgNo) { - bool IsNamedArg = ArgNo < NumRequiredArgs; - - assert(!it->type->isStructureOrClassType() && "NYI"); - - it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, NeededSSE, - IsNamedArg); - - // AMD64-ABI 3.2.3p3: If there are no registers available for any eightbyte - // of an argument, the whole argument is passed on the stack. If registers - // have already been assigned for some eightbytes of such an argument, the - // assignments get reverted. - if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { - FreeIntRegs -= NeededInt; - FreeSSERegs -= NeededSSE; - } else { - it->info = getIndirectResult(it->type, FreeIntRegs); - } - } } /// Pass transparent unions as if they were the type of the first element. Sema From d55417128af4cecbaad90a540a3ba15ff6dfff86 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 20:58:11 -0400 Subject: [PATCH 0418/2301] [CIR] Pass an arg to buildCall by ptr rather than ref We follow codegen and pass nullptr at some point. So change this to avoid the nullptr reference. --- clang/lib/CIR/CIRGenCall.cpp | 6 +++--- clang/lib/CIR/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CIRGenFunction.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 764fcf2b0e06..df9484f7344c 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -260,8 +260,8 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &CallArgs, - mlir::func::CallOp &callOrInvoke, bool IsMustTail, - SourceLocation Loc) { + mlir::func::CallOp *callOrInvoke, + bool IsMustTail, SourceLocation Loc) { // FIXME: We no longer need the types from CallArgs; lift up and simplify assert(Callee.isOrdinary() || Callee.isVirtual()); @@ -400,7 +400,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, CIRCallArgs); if (callOrInvoke) - callOrInvoke = theCall; + callOrInvoke = &theCall; if (const auto *FD = dyn_cast_or_null(CurFuncDecl)) { assert(!FD->getAttr() && "NYI"); diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 37920faa5ed6..b389f61aa84e 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -480,7 +480,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, assert(!MustTailCall && "Must tail NYI"); mlir::func::CallOp callOP = nullptr; - RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, callOP, + RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, &callOP, E == MustTailCall, E->getExprLoc()); assert(!getDebugInfo() && "Debug Info NYI"); diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index d69dae8cd89e..549764377eaf 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -597,7 +597,7 @@ void CIRGenFunction::buildCXXConstructorCall( Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); mlir::func::CallOp C; - buildCall(Info, Callee, ReturnValueSlot(), Args, C, false, Loc); + buildCall(Info, Callee, ReturnValueSlot(), Args, &C, false, Loc); assert(CGM.getCodeGenOpts().OptimizationLevel == 0 || ClassDecl->isDynamicClass() || Type == Ctor_Base || diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 0e75e812f6f2..283cdf354205 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -505,7 +505,7 @@ class CIRGenFunction { /// LLVM arguments and the types they were derived from. RValue buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, - const CallArgList &Args, mlir::func::CallOp &callOrInvoke, + const CallArgList &Args, mlir::func::CallOp *callOrInvoke, bool IsMustTail, clang::SourceLocation Loc); RValue buildCall(clang::QualType FnType, const CIRGenCallee &Callee, const clang::CallExpr *E, ReturnValueSlot returnValue, From 68f8a123dfc5ac8497af40b70067a83a5c1fac3e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 26 Apr 2022 20:01:44 -0400 Subject: [PATCH 0419/2301] [CIR] Implement buildCXXMemberOrOperatorCall This delegates to a helper fn to setup the call arguments that is outlined from other similar functions that we'll impelment later. We then call arrangeCXXMethodCall. It is stubbed out with an unreachable and will be handled next. Last, we delegate to the standard buildCall fn. --- clang/lib/CIR/CIRGenCXX.cpp | 6 --- clang/lib/CIR/CIRGenCXXABI.h | 9 ++++ clang/lib/CIR/CIRGenCall.cpp | 11 +++++ clang/lib/CIR/CIRGenExprCXX.cpp | 86 +++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 3 ++ clang/lib/CIR/CIRGenTypes.h | 5 ++ clang/lib/CIR/CMakeLists.txt | 1 + 7 files changed, 115 insertions(+), 6 deletions(-) create mode 100644 clang/lib/CIR/CIRGenExprCXX.cpp diff --git a/clang/lib/CIR/CIRGenCXX.cpp b/clang/lib/CIR/CIRGenCXX.cpp index 4781a46851ba..961c84307c7f 100644 --- a/clang/lib/CIR/CIRGenCXX.cpp +++ b/clang/lib/CIR/CIRGenCXX.cpp @@ -33,9 +33,3 @@ mlir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { return Fn; } -RValue CIRGenFunction::buildCXXMemberOrOperatorCall( - const CXXMethodDecl *MD, const CIRGenCallee &Callee, - ReturnValueSlot ReturnValue, mlir::Value This, mlir::Value ImplicitParam, - QualType ImplicitParamTy, const CallExpr *CE, CallArgList *RtlArgs) { - llvm_unreachable("NYI"); -} diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index 2fe3163b2745..0d4bb51831b5 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -95,6 +95,15 @@ class CIRGenCXXABI { /// Emit the ABI-specific prolog for the function virtual void buildInstanceFunctionProlog(CIRGenFunction &CGF) = 0; + /// Get the type of the implicit "this" parameter used by a method. May return + /// zero if no specific type is applicable, e.g. if the ABI expects the "this" + /// parameter to point to some artificial offset in a complete object due to + /// vbases being reordered. + virtual const clang::CXXRecordDecl * + getThisArgumentTypeForMethod(const clang::CXXMethodDecl *MD) { + return MD->getParent(); + } + /// Return whether the given global decl needs a VTT parameter. virtual bool NeedsVTTParameter(clang::GlobalDecl GD); diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index df9484f7344c..91fe19816795 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -802,3 +802,14 @@ void CIRGenFunction::buildDelegateCallArg(CallArgList &args, llvm_unreachable("NYI"); } } + +/// Arrange a call to a C++ method, passing the given arguments. +/// +/// numPrefixArgs is the number of the ABI-specific prefix arguments we have. It +/// does not count `this`. +const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXMethodCall( + const CallArgList &args, const FunctionProtoType *proto, + RequiredArgs required, unsigned numPrefixArgs) { + llvm_unreachable("NYI"); +} + diff --git a/clang/lib/CIR/CIRGenExprCXX.cpp b/clang/lib/CIR/CIRGenExprCXX.cpp new file mode 100644 index 000000000000..e5fdfed1477c --- /dev/null +++ b/clang/lib/CIR/CIRGenExprCXX.cpp @@ -0,0 +1,86 @@ +//===--- CIRGenExprCXX.cpp - Emit CIR Code for C++ expressions ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with code generation of C++ expressions +// +//===----------------------------------------------------------------------===// + +#include +#include +#include +#include + +#include + +using namespace cir; +using namespace clang; + +namespace { +struct MemberCallInfo { + RequiredArgs ReqArgs; + // Number of prefix arguments for the call. Ignores the `this` pointer. + unsigned PrefixSize; +}; +} // namespace + +static MemberCallInfo +commonBuildCXXMemberOrOperatorCall(CIRGenFunction &CGF, const CXXMethodDecl *MD, + mlir::Value This, mlir::Value ImplicitParam, + QualType ImplicitParamTy, const CallExpr *CE, + CallArgList &Args, CallArgList *RtlArgs) { + assert(CE == nullptr || isa(CE) || + isa(CE)); + assert(MD->isInstance() && + "Trying to emit a member or operator call expr on a static method!"); + + // Push the this ptr. + const CXXRecordDecl *RD = + CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD); + Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD)); + + // If there is an implicit parameter (e.g. VTT), emit it. + if (ImplicitParam) { + llvm_unreachable("NYI"); + } + + const auto *FPT = MD->getType()->castAs(); + RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size()); + unsigned PrefixSize = Args.size() - 1; + + // Add the rest of the call args + if (RtlArgs) { + llvm_unreachable("NYI"); + } else if (CE) { + // Special case: skip first argument of CXXOperatorCall (it is "this"). + unsigned ArgsToSkip = isa(CE) ? 1 : 0; + CGF.buildCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip), + CE->getDirectCallee()); + } else { + assert( + FPT->getNumParams() == 0 && + "No CallExpr specified for function with non-zero number of arguments"); + } + + return {required, PrefixSize}; +} + +RValue CIRGenFunction::buildCXXMemberOrOperatorCall( + const CXXMethodDecl *MD, const CIRGenCallee &Callee, + ReturnValueSlot ReturnValue, mlir::Value This, mlir::Value ImplicitParam, + QualType ImplicitParamTy, const CallExpr *CE, CallArgList *RtlArgs) { + + const auto *FPT = MD->getType()->castAs(); + CallArgList Args; + MemberCallInfo CallInfo = commonBuildCXXMemberOrOperatorCall( + *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs); + auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall( + Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize); + return buildCall(FnInfo, Callee, ReturnValue, Args, nullptr, + CE && CE == MustTailCall, + CE ? CE->getExprLoc() : SourceLocation()); +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 283cdf354205..99e7a5de2b7e 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -44,7 +44,10 @@ namespace cir { enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; class CIRGenFunction { +public: CIRGenModule &CGM; + +private: /// The builder is a helper class to create IR inside a function. The /// builder is stateful, in particular it keeps an "insertion point": this /// is where the next operations will be introduced. diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 8a4a80f967d9..02b2b25de22b 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -197,6 +197,11 @@ class CIRGenTypes { clang::CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs = true); + const CIRGenFunctionInfo & + arrangeCXXMethodCall(const CallArgList &args, + const clang::FunctionProtoType *type, + RequiredArgs required, unsigned numPrefixArgs); + const CIRGenFunctionInfo &arrangeCXXStructorDeclaration(clang::GlobalDecl GD); const CIRGenFunctionInfo & diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 9d11bce5b215..0399814350e7 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -18,6 +18,7 @@ add_clang_library(clangCIR CIRGenDecl.cpp CIRGenExpr.cpp CIRGenExprAgg.cpp + CIRGenExprCXX.cpp CIRGenExprScalar.cpp CIRGenFunction.cpp CIRGenItaniumCXXABI.cpp From 9901252b47d3d92ea6c2744fb420aeaef159c684 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 21:49:00 -0400 Subject: [PATCH 0420/2301] [CIR] Implement arrangeCXXMethodCall This is just a pretty simple helper that delegates to getExtParameterInfosForCal, getArgTypesForCall and finally arrangeCIRFunctionInfo. The first helper is stubbed out to do nothing with an unreachable. The second is trivial. The third is the standard fn for arranging calls. --- clang/lib/CIR/CIRGenCall.cpp | 83 ++++++++++++++++++++++++++++++++++- clang/lib/CIR/CIRGenTypes.cpp | 51 --------------------- 2 files changed, 82 insertions(+), 52 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 91fe19816795..d7532bbb471a 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -803,6 +803,65 @@ void CIRGenFunction::buildDelegateCallArg(CallArgList &args, } } +/// Returns the "extra-canonicalized" return type, which discards qualifiers on +/// the return type. Codegen doesn't care about them, and it makes ABI code a +/// little easier to be able to assume that all parameter and return types are +/// top-level unqualified. +/// FIXME(CIR): This should be a common helper extracted from CodeGen +static CanQualType GetReturnType(QualType RetTy) { + return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); +} + +/// Arrange a call as unto a free function, except possibly with an additional +/// number of formal parameters considered required. +static const CIRGenFunctionInfo & +arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, + const CallArgList &args, const FunctionType *fnType, + unsigned numExtraRequiredArgs, bool chainCall) { + assert(args.size() >= numExtraRequiredArgs); + assert(!chainCall && "Chain call NYI"); + + llvm::SmallVector paramInfos; + + // In most cases, there are no optional arguments. + RequiredArgs required = RequiredArgs::All; + + // if we have a variadic prototype, the required arguments are the extra + // prefix plus the arguments in the prototype. + auto *proto = dyn_cast(fnType); + assert(proto && "Only FunctionProtoType supported so far"); + assert(dyn_cast(fnType) && + "Only FunctionProtoType supported so far"); + assert(!proto->isVariadic() && "Variadic NYI"); + assert(!proto->hasExtParameterInfos() && "extparameterinfos NYI"); + + // FIXME: Kill copy. + SmallVector argTypes; + for (const auto &arg : args) + argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); + return CGT.arrangeCIRFunctionInfo( + GetReturnType(fnType->getReturnType()), /*instanceMethod=*/false, + chainCall, argTypes, fnType->getExtInfo(), paramInfos, required); +} + +static llvm::SmallVector +getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { + llvm::SmallVector argTypes; + for (auto &arg : args) + argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); + return argTypes; +} + +static llvm::SmallVector +getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, + unsigned totalArgs) { + llvm::SmallVector result; + if (proto->hasExtParameterInfos()) { + llvm_unreachable("NYI"); + } + return result; +} + /// Arrange a call to a C++ method, passing the given arguments. /// /// numPrefixArgs is the number of the ABI-specific prefix arguments we have. It @@ -810,6 +869,28 @@ void CIRGenFunction::buildDelegateCallArg(CallArgList &args, const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXMethodCall( const CallArgList &args, const FunctionProtoType *proto, RequiredArgs required, unsigned numPrefixArgs) { - llvm_unreachable("NYI"); + assert(numPrefixArgs + 1 <= args.size() && + "Emitting a call with less args than the required prefix?"); + // Add one to account for `this`. It is a bit awkard here, but we don't count + // `this` in similar places elsewhere. + auto paramInfos = + getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); + + // FIXME: Kill copy. + auto argTypes = getArgTypesForCall(Context, args); + + auto info = proto->getExtInfo(); + return arrangeCIRFunctionInfo( + GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, + /*chainCall=*/false, argTypes, info, paramInfos, required); } +/// Figure out the rules for calling a function with the given formal type using +/// the given arguments. The arguments are necessary because the function might +/// be unprototyped, in which case it's target-dependent in crazy ways. +const CIRGenFunctionInfo &CIRGenTypes::arrangeFreeFunctionCall( + const CallArgList &args, const FunctionType *fnType, bool ChainCall) { + assert(!ChainCall && "ChainCall NYI"); + return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, + ChainCall ? 1 : 0, ChainCall); +} diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 4366524fc03b..c7c2d4c0aa04 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -516,47 +516,6 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { return ResultType; } -/// Returns the "extra-canonicalized" return type, which discards qualifiers on -/// the return type. Codegen doesn't care about them, and it makes ABI code a -/// little easier to be able to assume that all parameter and return types are -/// top-level unqualified. -/// FIXME(CIR): This should be a common helper extracted from CodeGen -static CanQualType GetReturnType(QualType RetTy) { - return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); -} - -/// Arrange a call as unto a free function, except possibly with an additional -/// number of formal parameters considered required. -static const CIRGenFunctionInfo & -arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, - const CallArgList &args, const FunctionType *fnType, - unsigned numExtraRequiredArgs, bool chainCall) { - assert(args.size() >= numExtraRequiredArgs); - assert(!chainCall && "Chain call NYI"); - - llvm::SmallVector paramInfos; - - // In most cases, there are no optional arguments. - RequiredArgs required = RequiredArgs::All; - - // if we have a variadic prototype, the required arguments are the extra - // prefix plus the arguments in the prototype. - auto *proto = dyn_cast(fnType); - assert(proto && "Only FunctionProtoType supported so far"); - assert(dyn_cast(fnType) && - "Only FunctionProtoType supported so far"); - assert(!proto->isVariadic() && "Variadic NYI"); - assert(!proto->hasExtParameterInfos() && "extparameterinfos NYI"); - - // FIXME: Kill copy. - SmallVector argTypes; - for (const auto &arg : args) - argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); - return CGT.arrangeCIRFunctionInfo( - GetReturnType(fnType->getReturnType()), /*instanceMethod=*/false, - chainCall, argTypes, fnType->getExtInfo(), paramInfos, required); -} - const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( CanQualType resultType, bool instanceMethod, bool chainCall, llvm::ArrayRef argTypes, FunctionType::ExtInfo info, @@ -644,16 +603,6 @@ CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { return arrangeFreeFunctionType(FTy.castAs()); } -/// Figure out the rules for calling a function with the given formal type using -/// the given arguments. The arguments are necessary because the function might -/// be unprototyped, in which case it's target-dependent in crazy ways. -const CIRGenFunctionInfo &CIRGenTypes::arrangeFreeFunctionCall( - const CallArgList &args, const FunctionType *fnType, bool ChainCall) { - assert(!ChainCall && "ChainCall NYI"); - return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, - ChainCall ? 1 : 0, ChainCall); -} - // UpdateCompletedType - When we find the full definition for a TagDecl, // replace the 'opaque' type we previously made for it if applicable. void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { From 223d82a1a09012b6da4984c4444a3393e12e3ae8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 22:10:24 -0400 Subject: [PATCH 0421/2301] [CIR][NFC] Move a fn to CIRGenExprCXX to match CodeGen --- clang/lib/CIR/CIRGenExpr.cpp | 124 ------------------------------- clang/lib/CIR/CIRGenExprCXX.cpp | 125 ++++++++++++++++++++++++++++++++ 2 files changed, 125 insertions(+), 124 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index b389f61aa84e..91de850680d0 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -886,127 +886,3 @@ bool CIRGenFunction::isWrappedCXXThis(const Expr *object) { } return true; } - -RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( - const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, - bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, - const Expr *Base) { - assert(isa(CE) || isa(CE)); - - // Compute the object pointer. - bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier; - assert(!CanUseVirtualCall && "NYI"); - - const CXXMethodDecl *DevirtualizedMethod = nullptr; - if (CanUseVirtualCall && - MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) { - llvm_unreachable("NYI"); - } - - bool TrivialForCodegen = - MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion()); - bool TrivialAssignment = - TrivialForCodegen && - (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && - !MD->getParent()->mayInsertExtraPadding(); - (void)TrivialAssignment; - - // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment - // operator before the LHS. - CallArgList RtlArgStorage; - CallArgList *RtlArgs = nullptr; - LValue TrivialAssignmentRHS; - if (auto *OCE = dyn_cast(CE)) { - llvm_unreachable("NYI"); - } - - LValue This; - if (IsArrow) { - llvm_unreachable("NYI"); - } else { - This = buildLValue(Base); - } - - if (const CXXConstructorDecl *Ctor = dyn_cast(MD)) { - llvm_unreachable("NYI"); - } - - if (TrivialForCodegen) { - llvm_unreachable("NYI"); - } - - // Compute the function type we're calling - const CXXMethodDecl *CalleeDecl = - DevirtualizedMethod ? DevirtualizedMethod : MD; - const CIRGenFunctionInfo *FInfo = nullptr; - if (const auto *Dtor = dyn_cast(CalleeDecl)) - llvm_unreachable("NYI"); - else - llvm_unreachable("NYI"); - // FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); - - mlir::FunctionType Ty = CGM.getTypes().GetFunctionType(*FInfo); - - // C++11 [class.mfct.non-static]p2: - // If a non-static member function of a class X is called for an object that - // is not of type X, or of a type derived from X, the behavior is undefined. - SourceLocation CallLoc; - ASTContext &C = getContext(); - (void)C; - if (CE) - CallLoc = CE->getExprLoc(); - - SanitizerSet SkippedChecks; - if (const auto *cmce = dyn_cast(CE)) { - auto *ioa = cmce->getImplicitObjectArgument(); - auto isImplicitObjectCXXThis = isWrappedCXXThis(ioa); - if (isImplicitObjectCXXThis) - SkippedChecks.set(SanitizerKind::Alignment, true); - if (isImplicitObjectCXXThis || isa(ioa)) - SkippedChecks.set(SanitizerKind::Null, true); - } - - if (UnimplementedFeature::buildTypeCheck()) - llvm_unreachable("NYI"); - - // C++ [class.virtual]p12: - // Explicit qualification with the scope operator (5.1) suppresses the - // virtual call mechanism. - // - // We also don't emit a virtual call if the base expression has a record type - // because then we know what the type is. - bool useVirtualCall = CanUseVirtualCall && !DevirtualizedMethod; - - if (const auto *dtor = dyn_cast(CalleeDecl)) { - llvm_unreachable("NYI"); - } - - // FIXME: Uses of 'MD' past this point need to be audited. We may need to use - // 'CalleeDecl' instead. - - CIRGenCallee Callee; - if (useVirtualCall) { - llvm_unreachable("NYI"); - } else { - if (SanOpts.has(SanitizerKind::CFINVCall)) { - llvm_unreachable("NYI"); - } - - if (getLangOpts().AppleKext) - llvm_unreachable("NYI"); - else if (!DevirtualizedMethod) - Callee = CIRGenCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), - GlobalDecl(MD)); - else { - llvm_unreachable("NYI"); - } - } - - if (MD->isVirtual()) { - llvm_unreachable("NYI"); - } - - return buildCXXMemberOrOperatorCall( - CalleeDecl, Callee, ReturnValue, This.getPointer(), - /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); -} diff --git a/clang/lib/CIR/CIRGenExprCXX.cpp b/clang/lib/CIR/CIRGenExprCXX.cpp index e5fdfed1477c..100a26ef889c 100644 --- a/clang/lib/CIR/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CIRGenExprCXX.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include @@ -84,3 +85,127 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorCall( CE && CE == MustTailCall, CE ? CE->getExprLoc() : SourceLocation()); } + +RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( + const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, + bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, + const Expr *Base) { + assert(isa(CE) || isa(CE)); + + // Compute the object pointer. + bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier; + assert(!CanUseVirtualCall && "NYI"); + + const CXXMethodDecl *DevirtualizedMethod = nullptr; + if (CanUseVirtualCall && + MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) { + llvm_unreachable("NYI"); + } + + bool TrivialForCodegen = + MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion()); + bool TrivialAssignment = + TrivialForCodegen && + (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && + !MD->getParent()->mayInsertExtraPadding(); + (void)TrivialAssignment; + + // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment + // operator before the LHS. + CallArgList RtlArgStorage; + CallArgList *RtlArgs = nullptr; + LValue TrivialAssignmentRHS; + if (auto *OCE = dyn_cast(CE)) { + llvm_unreachable("NYI"); + } + + LValue This; + if (IsArrow) { + llvm_unreachable("NYI"); + } else { + This = buildLValue(Base); + } + + if (const CXXConstructorDecl *Ctor = dyn_cast(MD)) { + llvm_unreachable("NYI"); + } + + if (TrivialForCodegen) { + llvm_unreachable("NYI"); + } + + // Compute the function type we're calling + const CXXMethodDecl *CalleeDecl = + DevirtualizedMethod ? DevirtualizedMethod : MD; + const CIRGenFunctionInfo *FInfo = nullptr; + if (const auto *Dtor = dyn_cast(CalleeDecl)) + llvm_unreachable("NYI"); + else + llvm_unreachable("NYI"); + // FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); + + mlir::FunctionType Ty = CGM.getTypes().GetFunctionType(*FInfo); + + // C++11 [class.mfct.non-static]p2: + // If a non-static member function of a class X is called for an object that + // is not of type X, or of a type derived from X, the behavior is undefined. + SourceLocation CallLoc; + ASTContext &C = getContext(); + (void)C; + if (CE) + CallLoc = CE->getExprLoc(); + + SanitizerSet SkippedChecks; + if (const auto *cmce = dyn_cast(CE)) { + auto *ioa = cmce->getImplicitObjectArgument(); + auto isImplicitObjectCXXThis = isWrappedCXXThis(ioa); + if (isImplicitObjectCXXThis) + SkippedChecks.set(SanitizerKind::Alignment, true); + if (isImplicitObjectCXXThis || isa(ioa)) + SkippedChecks.set(SanitizerKind::Null, true); + } + + if (UnimplementedFeature::buildTypeCheck()) + llvm_unreachable("NYI"); + + // C++ [class.virtual]p12: + // Explicit qualification with the scope operator (5.1) suppresses the + // virtual call mechanism. + // + // We also don't emit a virtual call if the base expression has a record type + // because then we know what the type is. + bool useVirtualCall = CanUseVirtualCall && !DevirtualizedMethod; + + if (const auto *dtor = dyn_cast(CalleeDecl)) { + llvm_unreachable("NYI"); + } + + // FIXME: Uses of 'MD' past this point need to be audited. We may need to use + // 'CalleeDecl' instead. + + CIRGenCallee Callee; + if (useVirtualCall) { + llvm_unreachable("NYI"); + } else { + if (SanOpts.has(SanitizerKind::CFINVCall)) { + llvm_unreachable("NYI"); + } + + if (getLangOpts().AppleKext) + llvm_unreachable("NYI"); + else if (!DevirtualizedMethod) + Callee = CIRGenCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), + GlobalDecl(MD)); + else { + llvm_unreachable("NYI"); + } + } + + if (MD->isVirtual()) { + llvm_unreachable("NYI"); + } + + return buildCXXMemberOrOperatorCall( + CalleeDecl, Callee, ReturnValue, This.getPointer(), + /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); +} From e12fa07b97a3f7df28e6e0a2e391411cda9ff4c7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 22:18:42 -0400 Subject: [PATCH 0422/2301] [CIR] Remove an unreachable with arrangeCXXMethodDeclaration and impl it We marked a codepath as unreachable due to not being used before that will be used coming up. The fn only checks if we're in an instance function and, if so, delegates to a currently unreachable'd fn that will do the full chunk of work with the this ptr available. --- clang/lib/CIR/CIRGenCall.cpp | 36 +++++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenExprCXX.cpp | 3 +-- clang/lib/CIR/CIRGenTypes.h | 8 ++++++++ 3 files changed, 45 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index d7532bbb471a..f35b3a8f0477 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -894,3 +894,39 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeFreeFunctionCall( return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, ChainCall ? 1 : 0, ChainCall); } + +/// Set calling convention for CUDA/HIP kernel. +static void setCUDAKernelCallingConvention(CanQualType &FTy, CIRGenModule &CGM, + const FunctionDecl *FD) { + if (FD->hasAttr()) { + llvm_unreachable("NYI"); + } +} + +/// Arrange the argument and result information for a declaration or definition +/// of the given C++ non-static member function. The member function must be an +/// ordinary function, i.e. not a constructor or destructor. +const CIRGenFunctionInfo & +CIRGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { + assert(!isa(MD) && "wrong method for constructors!"); + assert(!isa(MD) && "wrong method for destructors!"); + + CanQualType FT = GetFormalType(MD).getAs(); + setCUDAKernelCallingConvention(FT, CGM, MD); + auto prototype = FT.getAs(); + + if (MD->isInstance()) { + // The abstarct case is perfectly fine. + auto *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); + return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); + } + + llvm_unreachable("NYI"); +} + +const CIRGenFunctionInfo & +CIRGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, + const FunctionProtoType *FTP, + const CXXMethodDecl *MD) { + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/CIRGenExprCXX.cpp b/clang/lib/CIR/CIRGenExprCXX.cpp index 100a26ef889c..72462f616678 100644 --- a/clang/lib/CIR/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CIRGenExprCXX.cpp @@ -141,8 +141,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( if (const auto *Dtor = dyn_cast(CalleeDecl)) llvm_unreachable("NYI"); else - llvm_unreachable("NYI"); - // FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); + FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); mlir::FunctionType Ty = CGM.getTypes().GetFunctionType(*FInfo); diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 02b2b25de22b..77ad4151f78c 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -202,8 +202,16 @@ class CIRGenTypes { const clang::FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs); + /// C++ methods have some special rules and also have implicit parameters. + const CIRGenFunctionInfo & + arrangeCXXMethodDeclaration(const clang::CXXMethodDecl *MD); const CIRGenFunctionInfo &arrangeCXXStructorDeclaration(clang::GlobalDecl GD); + const CIRGenFunctionInfo & + arrangeCXXMethodType(const clang::CXXRecordDecl *RD, + const clang::FunctionProtoType *FTP, + const clang::CXXMethodDecl *MD); + const CIRGenFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const clang::FunctionType *Ty, bool ChainCall); From 3c67d1f50a6145b37d81903179643efa04645851 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 22:26:15 -0400 Subject: [PATCH 0423/2301] [CIR] Flesh out arrangeCXXMethodType This is pretty simple, just add the this to the argTypes and delegate to the standard arrangeCIRFunctionInfo --- clang/lib/CIR/CIRGenCall.cpp | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index f35b3a8f0477..82b642fe1797 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -924,9 +924,21 @@ CIRGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { llvm_unreachable("NYI"); } +/// Arrange the argument and result information for a call to an unknown C++ +/// non-static member function of the given abstract type. (A null RD means we +/// don't have any meaningful "this" argument type, so fall back to a generic +/// pointer type). The member fucntion must be an ordinary function, i.e. not a +/// constructor or destructor. const CIRGenFunctionInfo & CIRGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD) { - llvm_unreachable("NYI"); + llvm::SmallVector argTypes; + + // Add the 'this' pointer. + argTypes.push_back(DeriveThisType(RD, MD)); + + return ::arrangeCIRFunctionInfo( + *this, true, argTypes, + FTP->getCanonicalTypeUnqualified().getAs()); } From f4cf2731321e75607b3ed90b14396ff2a9cf9b3f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 22:31:36 -0400 Subject: [PATCH 0424/2301] [CIR] Add a branch for CXXMethodDecls in CGM::GetAddrOfGlobal --- clang/lib/CIR/CIRGenModule.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 1521e030b8b1..fc3de3ba5ab0 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -864,6 +864,14 @@ CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr, /*DontDefer=*/false, IsForDefinition); + if (isa(D)) { + auto FInfo = + &getTypes().arrangeCXXMethodDeclaration(cast(D)); + auto Ty = getTypes().GetFunctionType(*FInfo); + return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, + IsForDefinition); + } + llvm_unreachable("NYI"); } From 6fd930798ad7b8950ee8a5de40dbfd0536aa87fd Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 22:33:05 -0400 Subject: [PATCH 0425/2301] [CIR] Move a fn from CIRGenTypes.cpp to CIRGenCall.cpp Again, following CodeGen. Not sure why I made this mistake in the first place. --- clang/lib/CIR/CIRGenCall.cpp | 23 +++++++++++++++++++++++ clang/lib/CIR/CIRGenTypes.cpp | 23 ----------------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 82b642fe1797..726bc7c3fad5 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -942,3 +942,26 @@ CIRGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, *this, true, argTypes, FTP->getCanonicalTypeUnqualified().getAs()); } + +/// Arrange the argument and result information for the declaration or +/// definition of the given function. +const CIRGenFunctionInfo & +CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { + assert(!dyn_cast(FD) && "NYI"); + + auto FTy = FD->getType()->getCanonicalTypeUnqualified(); + + assert(isa(FTy)); + // TODO: setCUDAKernelCallingConvention + + // When declaring a function without a prototype, always use a non-variadic + // type. + if (CanQual noProto = FTy.getAs()) { + return arrangeCIRFunctionInfo(noProto->getReturnType(), + /*instanceMethod=*/false, + /*chainCall=*/false, std::nullopt, + noProto->getExtInfo(), {}, RequiredArgs::All); + } + + return arrangeFreeFunctionType(FTy.castAs()); +} diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index c7c2d4c0aa04..958e4a4e79b5 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -580,29 +580,6 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { return arrangeFunctionDeclaration(FD); } -/// Arrange the argument and result information for the declaration or -/// definition of the given function. -const CIRGenFunctionInfo & -CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { - assert(!dyn_cast(FD) && "NYI"); - - auto FTy = FD->getType()->getCanonicalTypeUnqualified(); - - assert(isa(FTy)); - // TODO: setCUDAKernelCallingConvention - - // When declaring a function without a prototype, always use a non-variadic - // type. - if (CanQual noProto = FTy.getAs()) { - return arrangeCIRFunctionInfo(noProto->getReturnType(), - /*instanceMethod=*/false, - /*chainCall=*/false, std::nullopt, - noProto->getExtInfo(), {}, RequiredArgs::All); - } - - return arrangeFreeFunctionType(FTy.castAs()); -} - // UpdateCompletedType - When we find the full definition for a TagDecl, // replace the 'opaque' type we previously made for it if applicable. void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { From 9d4890ce5c392c1c6b5f5b48359a5b46b898302a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 22:39:56 -0400 Subject: [PATCH 0426/2301] [CIR] Delegate to arrangeCXXMethodDeclaration from arrangeFunctionDecl If we invoke arrangeFunctionDecl with a CXXMethodDecl just delegate to arrangeCXXMethodDeclaration instead. --- clang/lib/CIR/CIRGenCall.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 726bc7c3fad5..fc1fb2e4799c 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -947,7 +947,9 @@ CIRGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, /// definition of the given function. const CIRGenFunctionInfo & CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { - assert(!dyn_cast(FD) && "NYI"); + if (const auto *MD = dyn_cast(FD)) + if (MD->isInstance()) + return arrangeCXXMethodDeclaration(MD); auto FTy = FD->getType()->getCanonicalTypeUnqualified(); From f82bbc551c569c879466e0c68936a4f80d104896 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 22:40:39 -0400 Subject: [PATCH 0427/2301] [CIR][NFC] Add a test for C++ method support Simply add a method declaration and a call to it! --- clang/test/CIR/CodeGen/struct.cpp | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 85635923758b..8d148bc04461 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -1,9 +1,11 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * struct Bar { int a; char b; + void method() {} }; struct Foo { @@ -14,13 +16,20 @@ struct Foo { void baz() { Bar b; + b.method(); Foo f; } // CHECK: !_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> -// CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !_22struct2EBar22> -// CHECK-NEXT: module { -// CHECK-NEXT: func @_Z3bazv() { -// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: cir.return +// CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>> +// CHECK: func @_Z3bazv() +// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK: func @_ZN3Bar6methodEv(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return From 9ffbc403a9c0b4efe04d515cfd2f1733cf5e931f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 22:45:43 -0400 Subject: [PATCH 0428/2301] [CIR][NFC] Add a test for a method that takes an argument Simply add a test in struct.cpp that adds a method that takes an argument and later invokes it! --- clang/test/CIR/CodeGen/struct.cpp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 8d148bc04461..bc1950ad9116 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -6,6 +6,7 @@ struct Bar { int a; char b; void method() {} + void method2(int a) {} }; struct Foo { @@ -17,6 +18,7 @@ struct Foo { void baz() { Bar b; b.method(); + b.method2(4); Foo f; } @@ -26,6 +28,8 @@ void baz() { // CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () +// CHECK-NEXT: %2 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: call @_ZN3Bar7method2Ei(%0, %2) : (!cir.ptr, i32) -> () // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: func @_ZN3Bar6methodEv(%arg0: !cir.ptr @@ -33,3 +37,12 @@ void baz() { // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK: func @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : i32, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } From 53d88e29b0b5f5257c1ef3cd151b81512ad636a4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 22:52:19 -0400 Subject: [PATCH 0429/2301] [CIR][NFC] Add a test method that takes an arg and returns it Just another simple test to verify method arg functionality. --- clang/test/CIR/CodeGen/struct.cpp | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index bc1950ad9116..0d7fe4ec7189 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -7,6 +7,7 @@ struct Bar { char b; void method() {} void method2(int a) {} + int method3(int a) { return a; } }; struct Foo { @@ -19,6 +20,7 @@ void baz() { Bar b; b.method(); b.method2(4); + int result = b.method3(4); Foo f; } @@ -26,10 +28,14 @@ void baz() { // CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>> // CHECK: func @_Z3bazv() // CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["result", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () -// CHECK-NEXT: %2 = cir.cst(4 : i32) : i32 -// CHECK-NEXT: call @_ZN3Bar7method2Ei(%0, %2) : (!cir.ptr, i32) -> () +// CHECK-NEXT: %3 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, i32) -> () +// CHECK-NEXT: %4 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: %5 = call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, i32) -> i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: func @_ZN3Bar6methodEv(%arg0: !cir.ptr @@ -46,3 +52,15 @@ void baz() { // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } +// CHECK: func @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : i32, cir.ptr +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: cir.store %4, %2 : i32, cir.ptr +// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , i32 +// CHECK-NEXT: cir.return %5 +// CHECK-NEXT: } From 03a0d6b29a4b966aa14d2287f1e78394463e8d2a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 23:15:02 -0400 Subject: [PATCH 0430/2301] [CIR] Start fleshing out CGF::buildLValueForField This is called as part of the buildMemberInitializer pipeline when you have a c++ constructor with memberwise initialization. e.g. struct C { int a; C() :a{0} {} }; This is the next step in the process to implementing the library type `fb::String` (or whatever we end up calling it). --- clang/lib/CIR/CIRGenExpr.cpp | 72 ++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 3 + clang/lib/CIR/CIRGenValue.h | 4 ++ clang/lib/CIR/UnimplementedFeatureGuarding.h | 1 + 4 files changed, 80 insertions(+) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 91de850680d0..271ad2abd261 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -23,9 +23,81 @@ static mlir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { return V; } +static Address buildPreserveStructAccess(CIRGenFunction &CGF, LValue base, + Address addr, const FieldDecl *field) { + llvm_unreachable("NYI"); +} + LValue CIRGenFunction::buildLValueForField(LValue base, const FieldDecl *field) { + LValueBaseInfo BaseInfo = base.getBaseInfo(); + + if (field->isBitField()) { + llvm_unreachable("NYI"); + } + + // Fields of may-alias structures are may-alais themselves. + // FIXME: this hould get propagated down through anonymous structs and unions. + QualType FieldType = field->getType(); + const RecordDecl *rec = field->getParent(); + AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); + LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); + if (UnimplementedFeature::tbaa() || rec->hasAttr() || + FieldType->isVectorType()) { + // TODO(CIR): TBAAAccessInfo FieldTBAAInfo + llvm_unreachable("NYI"); + } else if (rec->isUnion()) { + llvm_unreachable("NYI"); + } else { + // If no base type been assigned for the base access, then try to generate + // one for this base lvalue. + assert(!UnimplementedFeature::tbaa() && "NYI"); + } + + Address addr = base.getAddress(); + if (auto *ClassDef = dyn_cast(rec)) { + if (CGM.getCodeGenOpts().StrictVTablePointers && + ClassDef->isDynamicClass()) { + llvm_unreachable("NYI"); + } + } + + unsigned RecordCVR = base.getVRQualifiers(); + if (rec->isUnion()) { + llvm_unreachable("NYI"); + } else { + if (!IsInPreservedAIRegion && + (!getDebugInfo() || !rec->hasAttr())) + llvm_unreachable("NYI"); + else + // Remember the original struct field index + addr = buildPreserveStructAccess(*this, base, addr, field); + } + + // If this is a reference field, load the reference right now. + if (FieldType->isReferenceType()) { + llvm_unreachable("NYI"); + } + + // Make sure that the address is pointing to the right type. This is critical + // for both unions and structs. A union needs a bitcast, a struct element will + // need a bitcast if the CIR type laid out doesn't match the desired type. llvm_unreachable("NYI"); + + if (field->hasAttr()) + llvm_unreachable("NYI"); + + if (UnimplementedFeature::tbaa()) + // Next line should take a TBAA object + llvm_unreachable("NYI"); + LValue LV = makeAddrLValue(addr, FieldType, FieldBaseInfo); + LV.getQuals().addCVRQualifiers(RecordCVR); + + // __weak attribute on a field is ignored. + if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) + llvm_unreachable("NYI"); + + return LV; } LValue CIRGenFunction::buildLValueForFieldInitialization( diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 99e7a5de2b7e..d7a752e08ab4 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -362,6 +362,9 @@ class CIRGenFunction { /// potentially set the return value. bool SawAsmBlock = false; + /// True if CodeGen currently emits code inside preserved access index region. + bool IsInPreservedAIRegion = false; + /// In C++, whether we are code generating a thunk. This controls whether we /// should emit cleanups. bool CurFuncIsThunk = false; diff --git a/clang/lib/CIR/CIRGenValue.h b/clang/lib/CIR/CIRGenValue.h index b54edbb50337..5ce234ce1d46 100644 --- a/clang/lib/CIR/CIRGenValue.h +++ b/clang/lib/CIR/CIRGenValue.h @@ -199,6 +199,10 @@ class LValue { bool isGlobalReg() const { return LVType == GlobalReg; } bool isMatrixElt() const { return LVType == MatrixElt; } + unsigned getVRQualifiers() const { + return Quals.getCVRQualifiers() & ~clang::Qualifiers::Const; + } + bool isVolatile() const { return Quals.hasVolatile(); } bool isNontemporal() const { return Nontemporal; } diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index 15c624b5a27f..70203422864f 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -20,6 +20,7 @@ struct UnimplementedFeature { // TODO(CIR): Implement the CIRGenFunction::buildTypeCheck method that handles // sanitizer related type check features static bool buildTypeCheck() { return false; } + static bool tbaa() { return false; } }; } // namespace cir From eb4bf9cfaf6b1454db2e5c23f082fe387ba4ff02 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 2 May 2022 23:21:06 -0400 Subject: [PATCH 0431/2301] [CIR][NFC] Add an XFAIL test for the fb::String example This is the target for our RFC proposal. Keep it checked in and build it incrementally as features are completed. --- clang/test/CIR/CodeGen/String.cpp | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 clang/test/CIR/CodeGen/String.cpp diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp new file mode 100644 index 000000000000..4ff90ea2ecc9 --- /dev/null +++ b/clang/test/CIR/CodeGen/String.cpp @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s +// XFAIL: * + +class String { + char *storage; + long size; + long capacity; + +public: + String() : size{size} {} +}; + +void test() { + String s; +} From d0e84e8adc0649e4303979ea77dc9a8d143c312b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 4 May 2022 16:59:27 -0700 Subject: [PATCH 0432/2301] [CIR][CodeGen] Add skeleton for buildGlobalVarDefinition --- clang/lib/CIR/CIRGenModule.cpp | 7 ++++++- clang/lib/CIR/CIRGenModule.h | 2 ++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index fc3de3ba5ab0..0c6a8236a7da 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -353,6 +353,11 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, assert(!D->getAttr() && "NYI"); } +void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, + bool IsTentative) { + assert(0 && "not implemented"); +} + void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { const auto *D = cast(GD.getDecl()); @@ -385,7 +390,7 @@ void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { } if (const auto *VD = dyn_cast(D)) - llvm_unreachable("NYI"); + return buildGlobalVarDefinition(VD, !VD->hasDefinition()); llvm_unreachable("Invalid argument to buildGlobalDefinition()"); } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index e01e805e03a4..8db78a889df1 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -244,6 +244,8 @@ class CIRGenModule { void buildGlobalDefinition(clang::GlobalDecl D, mlir::Operation *Op = nullptr); void buildGlobalFunctionDefinition(clang::GlobalDecl D, mlir::Operation *Op); + void buildGlobalVarDefinition(const clang::VarDecl *D, + bool IsTentative = false); /// Stored a deferred empty coverage mapping for an unused and thus /// uninstrumented top level declaration. From da3d0fe67f59e8d1ff10560b013abbe67ecdae5e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 4 May 2022 16:59:27 -0700 Subject: [PATCH 0433/2301] [CIR][CodeGen] Globals: Add skeleton for ConstantEmitter, ConstExprEmitter and visitors --- clang/lib/CIR/CIRGenCstEmitter.h | 184 ++++++++++++++ clang/lib/CIR/CIRGenExprCst.cpp | 416 +++++++++++++++++++++++++++++++ clang/lib/CIR/CMakeLists.txt | 1 + 3 files changed, 601 insertions(+) create mode 100644 clang/lib/CIR/CIRGenCstEmitter.h create mode 100644 clang/lib/CIR/CIRGenExprCst.cpp diff --git a/clang/lib/CIR/CIRGenCstEmitter.h b/clang/lib/CIR/CIRGenCstEmitter.h new file mode 100644 index 000000000000..c0cddca33d97 --- /dev/null +++ b/clang/lib/CIR/CIRGenCstEmitter.h @@ -0,0 +1,184 @@ +//===--- CIRGenCstEmitter.h - CIR constant emission -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// A helper class for emitting expressions and values as mlir::cir::ConstantOp +// and as initializers for global variables. +// +// Note: this is based on LLVM's codegen in ConstantEmitter.h, reusing this +// class interface makes it easier move forward with bringing CIR codegen +// to completion. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CODEGEN_CIRGEN_CONSTANTEMITTER_H +#define LLVM_CLANG_LIB_CODEGEN_CIRGEN_CONSTANTEMITTER_H + +#include "CIRGenFunction.h" +#include "CIRGenModule.h" + +namespace cir { + +class ConstantEmitter { +public: + CIRGenModule &CGM; + CIRGenFunction *const CGF; + +private: + bool Abstract = false; + + /// Whether non-abstract components of the emitter have been initialized. + bool InitializedNonAbstract = false; + + /// Whether the emitter has been finalized. + bool Finalized = false; + + /// Whether the constant-emission failed. + bool Failed = false; + + /// Whether we're in a constant context. + bool InConstantContext = false; + + /// The AST address space where this (non-abstract) initializer is going. + /// Used for generating appropriate placeholders. + clang::LangAS DestAddressSpace; + + llvm::SmallVector, 4> + PlaceholderAddresses; + +public: + ConstantEmitter(CIRGenModule &CGM, CIRGenFunction *CGF = nullptr) + : CGM(CGM), CGF(CGF) {} + + /// Initialize this emission in the context of the given function. + /// Use this if the expression might contain contextual references like + /// block addresses or PredefinedExprs. + ConstantEmitter(CIRGenFunction &CGF) : CGM(CGF.CGM), CGF(&CGF) {} + + ConstantEmitter(const ConstantEmitter &other) = delete; + ConstantEmitter &operator=(const ConstantEmitter &other) = delete; + + ~ConstantEmitter(); + + /// Is the current emission context abstract? + bool isAbstract() const { return Abstract; } + + /// Try to emit the initiaizer of the given declaration as an abstract + /// constant. If this succeeds, the emission must be finalized. + mlir::Value tryEmitForInitializer(const clang::VarDecl &D); + mlir::Value tryEmitForInitializer(const clang::Expr *E, + clang::LangAS destAddrSpace, + clang::QualType destType); + // llvm::Constant *emitForInitializer(const APValue &value, LangAS + // destAddrSpace, + // QualType destType); + + // void finalize(llvm::GlobalVariable *global); + + // All of the "abstract" emission methods below permit the emission to + // be immediately discarded without finalizing anything. Therefore, they + // must also promise not to do anything that will, in the future, require + // finalization: + // + // - using the CGF (if present) for anything other than establishing + // semantic context; for example, an expression with ignored + // side-effects must not be emitted as an abstract expression + // + // - doing anything that would not be safe to duplicate within an + // initializer or to propagate to another context; for example, + // side effects, or emitting an initialization that requires a + // reference to its current location. + + /// Try to emit the initializer of the given declaration as an abstract + /// constant. + // llvm::Constant *tryEmitAbstractForInitializer(const VarDecl &D); + + /// Emit the result of the given expression as an abstract constant, + /// asserting that it succeeded. This is only safe to do when the + /// expression is known to be a constant expression with either a fairly + /// simple type or a known simple form. + // llvm::Constant *emitAbstract(const Expr *E, QualType T); + // llvm::Constant *emitAbstract(SourceLocation loc, const APValue &value, + // QualType T); + + /// Try to emit the result of the given expression as an abstract constant. + // llvm::Constant *tryEmitAbstract(const Expr *E, QualType T); + // llvm::Constant *tryEmitAbstractForMemory(const Expr *E, QualType T); + + // llvm::Constant *tryEmitAbstract(const APValue &value, QualType T); + // llvm::Constant *tryEmitAbstractForMemory(const APValue &value, QualType T); + + // llvm::Constant *tryEmitConstantExpr(const ConstantExpr *CE); + + // llvm::Constant *emitNullForMemory(QualType T) { + // return emitNullForMemory(CGM, T); + // } + mlir::Value emitForMemory(mlir::Value C, clang::QualType T) { + return emitForMemory(CGM, C, T); + } + + // static llvm::Constant *emitNullForMemory(CodeGenModule &CGM, QualType T); + static mlir::Value emitForMemory(CIRGenModule &CGM, mlir::Value C, + clang::QualType T); + + // These are private helper routines of the constant emitter that + // can't actually be private because things are split out into helper + // functions and classes. + + mlir::Value tryEmitPrivateForVarInit(const clang::VarDecl &D); + mlir::Value tryEmitPrivate(const clang::Expr *E, clang::QualType T); + mlir::Value tryEmitPrivateForMemory(const clang::Expr *E, clang::QualType T); + + mlir::Value tryEmitPrivate(const clang::APValue &value, clang::QualType T); + mlir::Value tryEmitPrivateForMemory(const clang::APValue &value, + clang::QualType T); + + /// Get the address of the current location. This is a constant + /// that will resolve, after finalization, to the address of the + /// 'signal' value that is registered with the emitter later. + // llvm::GlobalValue *getCurrentAddrPrivate(); + + /// Register a 'signal' value with the emitter to inform it where to + /// resolve a placeholder. The signal value must be unique in the + /// initializer; it might, for example, be the address of a global that + /// refers to the current-address value in its own initializer. + /// + /// Uses of the placeholder must be properly anchored before finalizing + /// the emitter, e.g. by being installed as the initializer of a global + /// variable. That is, it must be possible to replaceAllUsesWith + /// the placeholder with the proper address of the signal. + // void registerCurrentAddrPrivate(llvm::Constant *signal, + // llvm::GlobalValue *placeholder); + +private: + void initializeNonAbstract(clang::LangAS destAS) { + assert(!InitializedNonAbstract); + InitializedNonAbstract = true; + DestAddressSpace = destAS; + } + mlir::Value markIfFailed(mlir::Value init) { + if (!init) + Failed = true; + return init; + } + + struct AbstractState { + bool OldValue; + size_t OldPlaceholdersSize; + }; + AbstractState pushAbstract() { + AbstractState saved = {Abstract, PlaceholderAddresses.size()}; + Abstract = true; + return saved; + } + // llvm::Constant *validateAndPopAbstract(llvm::Constant *C, AbstractState + // save); +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CIRGenExprCst.cpp b/clang/lib/CIR/CIRGenExprCst.cpp new file mode 100644 index 000000000000..a0844d450eb8 --- /dev/null +++ b/clang/lib/CIR/CIRGenExprCst.cpp @@ -0,0 +1,416 @@ +//===---- CIRGenExprCst.cpp - Emit LLVM Code from Constant Expressions ----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Constant Expr nodes as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCstEmitter.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "clang/AST/APValue.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/OperationKinds.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/Builtins.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/Sequence.h" + +using namespace clang; +using namespace cir; + +//===----------------------------------------------------------------------===// +// ConstExprEmitter +//===----------------------------------------------------------------------===// + +namespace { + +// This class only needs to handle arrays, structs and unions. +// +// In LLVM codegen, when outside C++11 mode, those types are not constant +// folded, while all other types are handled by constant folding. +// +// In CIR codegen, instead of folding things here, we should defer that work +// to MLIR: do not attempt to do much here. +class ConstExprEmitter + : public StmtVisitor { + CIRGenModule &CGM; + LLVM_ATTRIBUTE_UNUSED ConstantEmitter &Emitter; + +public: + ConstExprEmitter(ConstantEmitter &emitter) + : CGM(emitter.CGM), Emitter(emitter) {} + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + mlir::Value VisitStmt(Stmt *S, QualType T) { return nullptr; } + + mlir::Value VisitConstantExpr(ConstantExpr *CE, QualType T) { + assert(0 && "unimplemented"); + // if (mlir::Value Result = Emitter.tryEmitConstantExpr(CE)) + // return Result; + // return Visit(CE->getSubExpr(), T); + return {}; + } + + mlir::Value VisitParenExpr(ParenExpr *PE, QualType T) { + return Visit(PE->getSubExpr(), T); + } + + mlir::Value + VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE, + QualType T) { + return Visit(PE->getReplacement(), T); + } + + mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *GE, QualType T) { + return Visit(GE->getResultExpr(), T); + } + + mlir::Value VisitChooseExpr(ChooseExpr *CE, QualType T) { + return Visit(CE->getChosenSubExpr(), T); + } + + mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *E, QualType T) { + return Visit(E->getInitializer(), T); + } + + mlir::Value VisitCastExpr(CastExpr *E, QualType destType) { + if (const auto *ECE = dyn_cast(E)) + assert(0 && "not implemented"); + Expr *subExpr = E->getSubExpr(); + + switch (E->getCastKind()) { + case CK_HLSLArrayRValue: + case CK_HLSLVectorTruncation: + case CK_ToUnion: { + assert(0 && "not implemented"); + } + + case CK_AddressSpaceConversion: { + assert(0 && "not implemented"); + } + + case CK_LValueToRValue: + case CK_AtomicToNonAtomic: + case CK_NonAtomicToAtomic: + case CK_NoOp: + case CK_ConstructorConversion: + return Visit(subExpr, destType); + + case CK_IntToOCLSampler: + llvm_unreachable("global sampler variables are not generated"); + + case CK_Dependent: + llvm_unreachable("saw dependent cast!"); + + case CK_BuiltinFnToFnPtr: + llvm_unreachable("builtin functions are handled elsewhere"); + + case CK_ReinterpretMemberPointer: + case CK_DerivedToBaseMemberPointer: + case CK_BaseToDerivedMemberPointer: { + assert(0 && "not implemented"); + } + + // These will never be supported. + case CK_ObjCObjectLValueCast: + case CK_ARCProduceObject: + case CK_ARCConsumeObject: + case CK_ARCReclaimReturnedObject: + case CK_ARCExtendBlockObject: + case CK_CopyAndAutoreleaseBlockObject: + return nullptr; + + // These don't need to be handled here because Evaluate knows how to + // evaluate them in the cases where they can be folded. + case CK_BitCast: + case CK_ToVoid: + case CK_Dynamic: + case CK_LValueBitCast: + case CK_LValueToRValueBitCast: + case CK_NullToMemberPointer: + case CK_UserDefinedConversion: + case CK_CPointerToObjCPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_AnyPointerToBlockPointerCast: + case CK_ArrayToPointerDecay: + case CK_FunctionToPointerDecay: + case CK_BaseToDerived: + case CK_DerivedToBase: + case CK_UncheckedDerivedToBase: + case CK_MemberPointerToBoolean: + case CK_VectorSplat: + case CK_FloatingRealToComplex: + case CK_FloatingComplexToReal: + case CK_FloatingComplexToBoolean: + case CK_FloatingComplexCast: + case CK_FloatingComplexToIntegralComplex: + case CK_IntegralRealToComplex: + case CK_IntegralComplexToReal: + case CK_IntegralComplexToBoolean: + case CK_IntegralComplexCast: + case CK_IntegralComplexToFloatingComplex: + case CK_PointerToIntegral: + case CK_PointerToBoolean: + case CK_NullToPointer: + case CK_IntegralCast: + case CK_BooleanToSignedIntegral: + case CK_IntegralToPointer: + case CK_IntegralToBoolean: + case CK_IntegralToFloating: + case CK_FloatingToIntegral: + case CK_FloatingToBoolean: + case CK_FloatingCast: + case CK_FloatingToFixedPoint: + case CK_FixedPointToFloating: + case CK_FixedPointCast: + case CK_FixedPointToBoolean: + case CK_FixedPointToIntegral: + case CK_IntegralToFixedPoint: + case CK_ZeroToOCLOpaqueType: + case CK_MatrixCast: + return nullptr; + } + llvm_unreachable("Invalid CastKind"); + } + + mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE, QualType T) { + // TODO(cir): figure out CIR story here... + // No need for a DefaultInitExprScope: we don't handle 'this' in a + // constant expression. + return Visit(DIE->getExpr(), T); + } + + mlir::Value VisitExprWithCleanups(ExprWithCleanups *E, QualType T) { + return Visit(E->getSubExpr(), T); + } + + mlir::Value VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E, + QualType T) { + return Visit(E->getSubExpr(), T); + } + + mlir::Value EmitArrayInitialization(InitListExpr *ILE, QualType T) { + assert(0 && "not implemented"); + return {}; + } + + mlir::Value EmitRecordInitialization(InitListExpr *ILE, QualType T) { + assert(0 && "not implemented"); + return {}; + } + + mlir::Value VisitImplicitValueInitExpr(ImplicitValueInitExpr *E, QualType T) { + assert(0 && "not implemented"); + return {}; + } + + mlir::Value VisitInitListExpr(InitListExpr *ILE, QualType T) { + if (ILE->isTransparent()) + return Visit(ILE->getInit(0), T); + + if (ILE->getType()->isArrayType()) + return EmitArrayInitialization(ILE, T); + + if (ILE->getType()->isRecordType()) + return EmitRecordInitialization(ILE, T); + + return nullptr; + } + + mlir::Value VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E, + QualType destType) { + auto C = Visit(E->getBase(), destType); + if (!C) + return nullptr; + + assert(0 && "not implemented"); + return {}; + } + + mlir::Value VisitCXXConstructExpr(CXXConstructExpr *E, QualType Ty) { + if (!E->getConstructor()->isTrivial()) + return nullptr; + + // Only default and copy/move constructors can be trivial. + if (E->getNumArgs()) { + assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument"); + assert(E->getConstructor()->isCopyOrMoveConstructor() && + "trivial ctor has argument but isn't a copy/move ctor"); + + Expr *Arg = E->getArg(0); + assert(CGM.getASTContext().hasSameUnqualifiedType(Ty, Arg->getType()) && + "argument to copy ctor is of wrong type"); + + return Visit(Arg, Ty); + } + + assert(0 && "not implemented"); + return {}; + } + + mlir::Value VisitStringLiteral(StringLiteral *E, QualType T) { + // This is a string literal initializing an array in an initializer. + assert(0 && "not implemented"); + return {}; + } + + mlir::Value VisitObjCEncodeExpr(ObjCEncodeExpr *E, QualType T) { + assert(0 && "not implemented"); + return {}; + } + + mlir::Value VisitUnaryExtension(const UnaryOperator *E, QualType T) { + return Visit(E->getSubExpr(), T); + } + + // Utility methods + mlir::Type ConvertType(QualType T) { return CGM.getTypes().ConvertType(T); } +}; + +} // end anonymous namespace. + +//===----------------------------------------------------------------------===// +// ConstantEmitter +//===----------------------------------------------------------------------===// + +mlir::Value ConstantEmitter::tryEmitForInitializer(const VarDecl &D) { + initializeNonAbstract(D.getType().getAddressSpace()); + return markIfFailed(tryEmitPrivateForVarInit(D)); +} + +mlir::Value ConstantEmitter::tryEmitForInitializer(const Expr *E, + LangAS destAddrSpace, + QualType destType) { + initializeNonAbstract(destAddrSpace); + return markIfFailed(tryEmitPrivateForMemory(E, destType)); +} + +// mlir::Value ConstantEmitter::emitForInitializer(const APValue &value, +// LangAS destAddrSpace, +// QualType destType) { +// initializeNonAbstract(destAddrSpace); +// auto C = tryEmitPrivateForMemory(value, destType); +// assert(C && "couldn't emit constant value non-abstractly?"); +// return C; +// } + +// void ConstantEmitter::finalize(llvm::GlobalVariable *global) { +// assert(InitializedNonAbstract && +// "finalizing emitter that was used for abstract emission?"); +// assert(!Finalized && "finalizing emitter multiple times"); +// assert(global->getInitializer()); + +// // Note that we might also be Failed. +// Finalized = true; + +// if (!PlaceholderAddresses.empty()) { +// assert(0 && "not implemented"); +// } +// } + +ConstantEmitter::~ConstantEmitter() { + assert((!InitializedNonAbstract || Finalized || Failed) && + "not finalized after being initialized for non-abstract emission"); + assert(PlaceholderAddresses.empty() && "unhandled placeholders"); +} + +// TODO(cir): this can be shared with LLVM's codegen +static QualType getNonMemoryType(CIRGenModule &CGM, QualType type) { + if (auto AT = type->getAs()) { + return CGM.getASTContext().getQualifiedType(AT->getValueType(), + type.getQualifiers()); + } + return type; +} + +mlir::Value ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { + // Make a quick check if variable can be default NULL initialized + // and avoid going through rest of code which may do, for c++11, + // initialization of memory to all NULLs. + if (!D.hasLocalStorage()) { + QualType Ty = CGM.getASTContext().getBaseElementType(D.getType()); + if (Ty->isRecordType()) + if (const CXXConstructExpr *E = + dyn_cast_or_null(D.getInit())) { + const CXXConstructorDecl *CD = E->getConstructor(); + if (CD->isTrivial() && CD->isDefaultConstructor()) + assert(0 && "not implemented"); + } + } + InConstantContext = D.hasConstantInitialization(); + + QualType destType = D.getType(); + + // Try to emit the initializer. Note that this can allow some things that + // are not allowed by tryEmitPrivateForMemory alone. + if (auto value = D.evaluateValue()) { + return tryEmitPrivateForMemory(*value, destType); + } + + assert(0 && "not implemented"); + return {}; +} + +mlir::Value ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, + QualType destType) { + auto nonMemoryDestType = getNonMemoryType(CGM, destType); + auto C = tryEmitPrivate(value, nonMemoryDestType); + return (C ? emitForMemory(C, destType) : nullptr); +} + +mlir::Value ConstantEmitter::tryEmitPrivateForMemory(const clang::Expr *E, + clang::QualType T) { + llvm_unreachable("NYI"); +} + +mlir::Value ConstantEmitter::emitForMemory(CIRGenModule &CGM, mlir::Value C, + QualType destType) { + // For an _Atomic-qualified constant, we may need to add tail padding. + if (auto AT = destType->getAs()) { + assert(0 && "not implemented"); + } + + // Zero-extend bool. + if (C.getType().isa()) { + assert(0 && "not implemented"); + } + + return C; +} + +mlir::Value ConstantEmitter::tryEmitPrivate(const APValue &Value, + QualType DestType) { + switch (Value.getKind()) { + case APValue::None: + case APValue::Indeterminate: + // TODO(cir): LLVM models out-of-lifetime and indeterminate values as + // 'undef'. Find out what's better for CIR. + assert(0 && "not implemented"); + case APValue::Int: + assert(0 && "not implemented"); + case APValue::LValue: + case APValue::FixedPoint: + case APValue::ComplexInt: + case APValue::Float: + case APValue::ComplexFloat: + case APValue::Vector: + case APValue::AddrLabelDiff: + case APValue::Struct: + case APValue::Union: + case APValue::Array: + case APValue::MemberPointer: + assert(0 && "not implemented"); + } + llvm_unreachable("Unknown APValue kind"); +} diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 0399814350e7..b84f798229f7 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -17,6 +17,7 @@ add_clang_library(clangCIR CIRGenCleanup.cpp CIRGenDecl.cpp CIRGenExpr.cpp + CIRGenExprCst.cpp CIRGenExprAgg.cpp CIRGenExprCXX.cpp CIRGenExprScalar.cpp From 6b3744a5c48d38ac9c9c41225b1be6b2f8c51005 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 4 May 2022 16:59:27 -0700 Subject: [PATCH 0434/2301] [CIR][CodeGen] Introduce few helpers and module level tracking needed for global constant emission --- clang/lib/CIR/CIRGenFunction.h | 2 ++ clang/lib/CIR/CIRGenModule.h | 11 +++++++++++ 2 files changed, 13 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index d7a752e08ab4..a1347e1ebe55 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -334,6 +334,8 @@ class CIRGenFunction { mlir::OpBuilder &getBuilder() { return builder; } + CIRGenModule &getCIRGenModule() { return CGM; } + /// Sanitizers enabled for this function. clang::SanitizerSet SanOpts; diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 8db78a889df1..3e81a2e96f6c 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -33,6 +33,7 @@ #include "mlir/IR/MLIRContext.h" #include "mlir/IR/Value.h" +using namespace clang; namespace cir { class CIRGenFunction; @@ -98,6 +99,16 @@ class CIRGenModule { // or a definition. llvm::SmallPtrSet WeakRefReferences; + // TODO(cir): does this really need to be a state for CIR emission? + GlobalDecl initializedGlobalDecl; + + /// When a C++ decl with an initializer is deferred, null is + /// appended to CXXGlobalInits, and the index of that null is placed + /// here so that the initializer will be performed in the correct + /// order. Once the decl is emitted, the index is replaced with ~0U to ensure + /// that we don't re-emit the initializer. + llvm::DenseMap DelayedCXXInitPosition; + /// ------- /// Declaring variables /// ------- From 0261bbef9de2a2db1981277ac4d5a5fd7827a31b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 4 May 2022 16:59:27 -0700 Subject: [PATCH 0435/2301] [CIR] Add cir.global operation - Add operation. - Reuse some cir.cst parsing and printing for initial value support. - Add all relevant methods to parse/print/verify. --- clang/test/CIR/IR/global.cir | 10 ++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 73 +++++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 128 +++++++++++++++++---- 3 files changed, 183 insertions(+), 28 deletions(-) create mode 100644 clang/test/CIR/IR/global.cir diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir new file mode 100644 index 000000000000..5138aa91a4be --- /dev/null +++ b/clang/test/CIR/IR/global.cir @@ -0,0 +1,10 @@ +// RUN: cir-tool %s | FileCheck %s + +module { + cir.global @a : i32 = 3 + func.func @use_global() { + cir.return + } +} + +// CHECK: cir.global @a : i32 = 3 diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index e9f13cc8da35..de38f61dccfd 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -114,16 +114,18 @@ def PtrStrideOp : CIR_Op<"ptr_stride", def ConstantOp : CIR_Op<"cst", [ConstantLike, Pure]> { + // FIXME: Use SameOperandsAndResultType or similar and prevent eye bleeding + // type repetition in the assembly form. - let summary = "constant"; + let summary = "constant operation"; let description = [{ Constant operation turns a literal into an SSA value. The data is attached to the operation as an attribute. For example: ```mlir - %0 = cir.cst(42 : i32) - %1 = cir.cst(4.2 : f32) - %2 = cir.cst(nullptr : !cir.ptr) + %0 = cir.cst(42 : i32) : i32 + %1 = cir.cst(4.2 : f32) : f32 + %2 = cir.cst(nullptr : !cir.ptr) : !cir.ptr ``` }]; @@ -133,7 +135,9 @@ def ConstantOp : CIR_Op<"cst", // The constant operation returns a single value of AnyType. let results = (outs AnyType:$res); - let assemblyFormat = "`(` custom($value) `)` attr-dict `:` type($res)"; + let assemblyFormat = [{ + `(` custom($value) `)` attr-dict `:` type($res) + }]; let hasVerifier = 1; @@ -849,5 +853,64 @@ def LoopOp : CIR_Op<"loop", let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// GlobalOp +//===----------------------------------------------------------------------===// + +def GlobalOp : CIR_Op<"global", [Symbol]> { + let summary = "declare or define a global variable"; + let description = [{ + The `cir.global` operation declares or defines a named global variable. + + The backing memory for the variable is allocated statically and is + described by the type of the variable. + + The operation is a declaration if no `inital_value` is + specified, else it is a definition. + + The global variable can also be marked constant using the + `constant` unit attribute. Writing to such constant global variables is + undefined. + + Example: + + ```mlir + // Externally available and constant variable with initial value. + cir.global public constant @c : i32 = 4; + ``` + }]; + + // Note that both sym_name and sym_visibility are tied to Symbol trait. + let arguments = (ins SymbolNameAttr:$sym_name, + OptionalAttr:$sym_visibility, + TypeAttr:$sym_type, + OptionalAttr:$initial_value, + UnitAttr:$constant, + OptionalAttr:$alignment); + + let assemblyFormat = [{ + ($sym_visibility^)? + (`constant` $constant^)? + $sym_name `:` + custom($sym_type, $initial_value) + attr-dict + }]; + + let extraClassDeclaration = [{ + bool isDeclaration() { + return !getInitialValue(); + } + }]; + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins + "StringRef":$sym_name, + "Type":$sym_type + )> + ]; + + let hasVerifier = 1; +} #endif // MLIR_CIR_DIALECT_CIR_OPS diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 28a3c96e1195..4fa96e6eead1 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -63,49 +63,74 @@ void cir::CIRDialect::initialize() { // ConstantOp //===----------------------------------------------------------------------===// -LogicalResult ConstantOp::verify() { - auto opType = getType(); - auto val = getValue(); - auto valueType = val.getType(); - - if (val.isa()) { +static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, + mlir::Attribute attrType) { + if (attrType.isa()) { if (opType.isa<::mlir::cir::PointerType>()) return success(); - return emitOpError("nullptr expects pointer type"); + return op->emitOpError("nullptr expects pointer type"); } - // ODS already generates checks to make sure the result type is valid. We just - // need to additionally check that the value's attribute type is consistent - // with the result type. - if (val.isa()) { + if (attrType.isa()) { if (!opType.isa()) - return emitOpError("result type (") - << opType << ") must be '!cir.bool' for '" << val << "'"; + return op->emitOpError("result type (") + << opType << ") must be '!cir.bool' for '" << attrType << "'"; return success(); } - if (opType.isa()) { - if (valueType != opType) - return emitOpError("result type (") - << opType << ") does not match value type (" << valueType << ")"; + if (attrType.isa()) { + auto at = attrType.cast(); + if (at.getType() != opType) { + return op->emitOpError("result type (") + << opType << ") does not match value type (" << at.getType() + << ")"; + } return success(); } - return emitOpError("cannot have value of type ") << valueType; + assert(attrType.isa() && "What else could we be looking at here?"); + return op->emitOpError("cannot have value of type ") + << attrType.cast().getType(); +} + +LogicalResult ConstantOp::verify() { + // ODS already generates checks to make sure the result type is valid. We just + // need to additionally check that the value's attribute type is consistent + // with the result type. + return checkConstantTypes(getOperation(), getType(), getValue()); } static ParseResult parseConstantValue(OpAsmParser &parser, - mlir::Attribute &valueAttr) { + mlir::Attribute &valueAttr, + mlir::Type ty = {}) { + if (succeeded(parser.parseOptionalKeyword("nullptr"))) { + valueAttr = UnitAttr::get(parser.getContext()); + return success(); + } + NamedAttrList attr; - if (parser.parseAttribute(valueAttr, "value", attr)) - return ::mlir::failure(); + + if (parser.parseAttribute(valueAttr, ty, "value", attr).failed()) { + return parser.emitError(parser.getCurrentLocation(), + "expected constant attribute to match type"); + } return success(); } +// FIXME: create a CIRCstAttr and hide this away for both global +// initialization and cir.cst operation. +static void printConstant(OpAsmPrinter &p, Attribute value, + bool omitType = false) { + if (omitType) + p.printAttributeWithoutType(value); + else + p.printAttribute(value); +} + static void printConstantValue(OpAsmPrinter &p, cir::ConstantOp op, Attribute value) { - p.printAttribute(value); + printConstant(p, value); } OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } @@ -834,7 +859,7 @@ LogicalResult LoopOp::verify() { // 'cir.yield continue'. auto terminateError = [&]() { return emitOpError() << "cond region must be terminated with " - "'cir.yield' or 'cir.yield continue'"; + "'cir.yield' or 'cir.yield continue'"; }; auto &blocks = getCond().getBlocks(); @@ -854,6 +879,63 @@ LogicalResult LoopOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// GlobalOp +//===----------------------------------------------------------------------===// + +static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, + TypeAttr type, + Attribute initAttr) { + p << type; + if (!op.isDeclaration()) { + p << " = "; + printConstant(p, initAttr, /*omitType=*/true); + } +} + +static ParseResult +parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, + Attribute &initialValueAttr) { + Type type; + if (parser.parseType(type)) + return failure(); + typeAttr = TypeAttr::get(type); + + if (parser.parseOptionalEqual().failed()) + return success(); + + if (parseConstantValue(parser, initialValueAttr, type).failed()) + return failure(); + + return success(); +} + +LogicalResult GlobalOp::verify() { + // Verify that the initial value, if present, is either a unit attribute or + // an attribute CIR supports. + if (getInitialValue().has_value()) + return checkConstantTypes(getOperation(), getSymType(), + getInitialValue().value()); + + if (std::optional alignAttr = getAlignment()) { + uint64_t alignment = alignAttr.value(); + if (!llvm::isPowerOf2_64(alignment)) + return emitError() << "alignment attribute value " << alignment + << " is not a power of 2"; + } + + // TODO: verify visibility for declarations? + return success(); +} + +void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, + StringRef sym_name, Type sym_type) { + odsState.addAttribute(getSymNameAttrName(odsState.name), + odsBuilder.getStringAttr(sym_name)); + odsState.addAttribute(getSymTypeAttrName(odsState.name), + ::mlir::TypeAttr::get(sym_type)); +} + //===----------------------------------------------------------------------===// // CIR defined traits //===----------------------------------------------------------------------===// From 8f777151aeeb1d4675d5e299fc37cedd901b34f7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 4 May 2022 16:59:27 -0700 Subject: [PATCH 0436/2301] [CIR][CodeGen] Globals: initial support - Add codegen for simple global example. - Rewrite ConstExprEmitter to use mlir::Attribute. - Implement all logic to build globals, assert for everything not simple. - We don't handle any linkage just yet, public/external by default. --- clang/lib/CIR/CIRGenCstEmitter.h | 75 +---- clang/lib/CIR/CIRGenExprCst.cpp | 127 ++++---- clang/lib/CIR/CIRGenModule.cpp | 447 ++++++++++++++++++++++++++++- clang/lib/CIR/CIRGenModule.h | 34 +++ clang/test/CIR/CodeGen/globals.cpp | 7 + 5 files changed, 544 insertions(+), 146 deletions(-) create mode 100644 clang/test/CIR/CodeGen/globals.cpp diff --git a/clang/lib/CIR/CIRGenCstEmitter.h b/clang/lib/CIR/CIRGenCstEmitter.h index c0cddca33d97..74eb103c3abc 100644 --- a/clang/lib/CIR/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CIRGenCstEmitter.h @@ -69,15 +69,9 @@ class ConstantEmitter { /// Try to emit the initiaizer of the given declaration as an abstract /// constant. If this succeeds, the emission must be finalized. - mlir::Value tryEmitForInitializer(const clang::VarDecl &D); - mlir::Value tryEmitForInitializer(const clang::Expr *E, - clang::LangAS destAddrSpace, - clang::QualType destType); - // llvm::Constant *emitForInitializer(const APValue &value, LangAS - // destAddrSpace, - // QualType destType); + mlir::Attribute tryEmitForInitializer(const VarDecl &D); - // void finalize(llvm::GlobalVariable *global); + void finalize(mlir::cir::GlobalOp global); // All of the "abstract" emission methods below permit the emission to // be immediately discarded without finalizing anything. Therefore, they @@ -92,67 +86,24 @@ class ConstantEmitter { // initializer or to propagate to another context; for example, // side effects, or emitting an initialization that requires a // reference to its current location. - - /// Try to emit the initializer of the given declaration as an abstract - /// constant. - // llvm::Constant *tryEmitAbstractForInitializer(const VarDecl &D); - - /// Emit the result of the given expression as an abstract constant, - /// asserting that it succeeded. This is only safe to do when the - /// expression is known to be a constant expression with either a fairly - /// simple type or a known simple form. - // llvm::Constant *emitAbstract(const Expr *E, QualType T); - // llvm::Constant *emitAbstract(SourceLocation loc, const APValue &value, - // QualType T); - - /// Try to emit the result of the given expression as an abstract constant. - // llvm::Constant *tryEmitAbstract(const Expr *E, QualType T); - // llvm::Constant *tryEmitAbstractForMemory(const Expr *E, QualType T); - - // llvm::Constant *tryEmitAbstract(const APValue &value, QualType T); - // llvm::Constant *tryEmitAbstractForMemory(const APValue &value, QualType T); - - // llvm::Constant *tryEmitConstantExpr(const ConstantExpr *CE); - - // llvm::Constant *emitNullForMemory(QualType T) { - // return emitNullForMemory(CGM, T); - // } - mlir::Value emitForMemory(mlir::Value C, clang::QualType T) { + mlir::Attribute emitForMemory(mlir::TypedAttr C, QualType T) { return emitForMemory(CGM, C, T); } // static llvm::Constant *emitNullForMemory(CodeGenModule &CGM, QualType T); - static mlir::Value emitForMemory(CIRGenModule &CGM, mlir::Value C, - clang::QualType T); + static mlir::Attribute emitForMemory(CIRGenModule &CGM, mlir::TypedAttr C, + clang::QualType T); // These are private helper routines of the constant emitter that // can't actually be private because things are split out into helper // functions and classes. - mlir::Value tryEmitPrivateForVarInit(const clang::VarDecl &D); - mlir::Value tryEmitPrivate(const clang::Expr *E, clang::QualType T); - mlir::Value tryEmitPrivateForMemory(const clang::Expr *E, clang::QualType T); - - mlir::Value tryEmitPrivate(const clang::APValue &value, clang::QualType T); - mlir::Value tryEmitPrivateForMemory(const clang::APValue &value, - clang::QualType T); - - /// Get the address of the current location. This is a constant - /// that will resolve, after finalization, to the address of the - /// 'signal' value that is registered with the emitter later. - // llvm::GlobalValue *getCurrentAddrPrivate(); - - /// Register a 'signal' value with the emitter to inform it where to - /// resolve a placeholder. The signal value must be unique in the - /// initializer; it might, for example, be the address of a global that - /// refers to the current-address value in its own initializer. - /// - /// Uses of the placeholder must be properly anchored before finalizing - /// the emitter, e.g. by being installed as the initializer of a global - /// variable. That is, it must be possible to replaceAllUsesWith - /// the placeholder with the proper address of the signal. - // void registerCurrentAddrPrivate(llvm::Constant *signal, - // llvm::GlobalValue *placeholder); + mlir::TypedAttr tryEmitPrivateForVarInit(const VarDecl &D); + mlir::TypedAttr tryEmitPrivate(const Expr *E, QualType T); + mlir::TypedAttr tryEmitPrivateForMemory(const Expr *E, QualType T); + + mlir::TypedAttr tryEmitPrivate(const APValue &value, QualType T); + mlir::TypedAttr tryEmitPrivateForMemory(const APValue &value, QualType T); private: void initializeNonAbstract(clang::LangAS destAS) { @@ -160,7 +111,7 @@ class ConstantEmitter { InitializedNonAbstract = true; DestAddressSpace = destAS; } - mlir::Value markIfFailed(mlir::Value init) { + mlir::Attribute markIfFailed(mlir::Attribute init) { if (!init) Failed = true; return init; @@ -175,8 +126,6 @@ class ConstantEmitter { Abstract = true; return saved; } - // llvm::Constant *validateAndPopAbstract(llvm::Constant *C, AbstractState - // save); }; } // namespace cir diff --git a/clang/lib/CIR/CIRGenExprCst.cpp b/clang/lib/CIR/CIRGenExprCst.cpp index a0844d450eb8..8390888a5400 100644 --- a/clang/lib/CIR/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CIRGenExprCst.cpp @@ -40,7 +40,7 @@ namespace { // In CIR codegen, instead of folding things here, we should defer that work // to MLIR: do not attempt to do much here. class ConstExprEmitter - : public StmtVisitor { + : public StmtVisitor { CIRGenModule &CGM; LLVM_ATTRIBUTE_UNUSED ConstantEmitter &Emitter; @@ -52,39 +52,37 @@ class ConstExprEmitter // Visitor Methods //===--------------------------------------------------------------------===// - mlir::Value VisitStmt(Stmt *S, QualType T) { return nullptr; } + mlir::Attribute VisitStmt(Stmt *S, QualType T) { return nullptr; } - mlir::Value VisitConstantExpr(ConstantExpr *CE, QualType T) { + mlir::Attribute VisitConstantExpr(ConstantExpr *CE, QualType T) { assert(0 && "unimplemented"); - // if (mlir::Value Result = Emitter.tryEmitConstantExpr(CE)) - // return Result; - // return Visit(CE->getSubExpr(), T); return {}; } - mlir::Value VisitParenExpr(ParenExpr *PE, QualType T) { + mlir::Attribute VisitParenExpr(ParenExpr *PE, QualType T) { return Visit(PE->getSubExpr(), T); } - mlir::Value + mlir::Attribute VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE, QualType T) { return Visit(PE->getReplacement(), T); } - mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *GE, QualType T) { + mlir::Attribute VisitGenericSelectionExpr(GenericSelectionExpr *GE, + QualType T) { return Visit(GE->getResultExpr(), T); } - mlir::Value VisitChooseExpr(ChooseExpr *CE, QualType T) { + mlir::Attribute VisitChooseExpr(ChooseExpr *CE, QualType T) { return Visit(CE->getChosenSubExpr(), T); } - mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *E, QualType T) { + mlir::Attribute VisitCompoundLiteralExpr(CompoundLiteralExpr *E, QualType T) { return Visit(E->getInitializer(), T); } - mlir::Value VisitCastExpr(CastExpr *E, QualType destType) { + mlir::Attribute VisitCastExpr(CastExpr *E, QualType destType) { if (const auto *ECE = dyn_cast(E)) assert(0 && "not implemented"); Expr *subExpr = E->getSubExpr(); @@ -184,38 +182,39 @@ class ConstExprEmitter llvm_unreachable("Invalid CastKind"); } - mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE, QualType T) { + mlir::Attribute VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE, QualType T) { // TODO(cir): figure out CIR story here... // No need for a DefaultInitExprScope: we don't handle 'this' in a // constant expression. return Visit(DIE->getExpr(), T); } - mlir::Value VisitExprWithCleanups(ExprWithCleanups *E, QualType T) { + mlir::Attribute VisitExprWithCleanups(ExprWithCleanups *E, QualType T) { return Visit(E->getSubExpr(), T); } - mlir::Value VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E, - QualType T) { + mlir::Attribute VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E, + QualType T) { return Visit(E->getSubExpr(), T); } - mlir::Value EmitArrayInitialization(InitListExpr *ILE, QualType T) { + mlir::Attribute EmitArrayInitialization(InitListExpr *ILE, QualType T) { assert(0 && "not implemented"); return {}; } - mlir::Value EmitRecordInitialization(InitListExpr *ILE, QualType T) { + mlir::Attribute EmitRecordInitialization(InitListExpr *ILE, QualType T) { assert(0 && "not implemented"); return {}; } - mlir::Value VisitImplicitValueInitExpr(ImplicitValueInitExpr *E, QualType T) { + mlir::Attribute VisitImplicitValueInitExpr(ImplicitValueInitExpr *E, + QualType T) { assert(0 && "not implemented"); return {}; } - mlir::Value VisitInitListExpr(InitListExpr *ILE, QualType T) { + mlir::Attribute VisitInitListExpr(InitListExpr *ILE, QualType T) { if (ILE->isTransparent()) return Visit(ILE->getInit(0), T); @@ -228,8 +227,8 @@ class ConstExprEmitter return nullptr; } - mlir::Value VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E, - QualType destType) { + mlir::Attribute VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E, + QualType destType) { auto C = Visit(E->getBase(), destType); if (!C) return nullptr; @@ -238,7 +237,7 @@ class ConstExprEmitter return {}; } - mlir::Value VisitCXXConstructExpr(CXXConstructExpr *E, QualType Ty) { + mlir::Attribute VisitCXXConstructExpr(CXXConstructExpr *E, QualType Ty) { if (!E->getConstructor()->isTrivial()) return nullptr; @@ -259,18 +258,18 @@ class ConstExprEmitter return {}; } - mlir::Value VisitStringLiteral(StringLiteral *E, QualType T) { + mlir::Attribute VisitStringLiteral(StringLiteral *E, QualType T) { // This is a string literal initializing an array in an initializer. assert(0 && "not implemented"); return {}; } - mlir::Value VisitObjCEncodeExpr(ObjCEncodeExpr *E, QualType T) { + mlir::Attribute VisitObjCEncodeExpr(ObjCEncodeExpr *E, QualType T) { assert(0 && "not implemented"); return {}; } - mlir::Value VisitUnaryExtension(const UnaryOperator *E, QualType T) { + mlir::Attribute VisitUnaryExtension(const UnaryOperator *E, QualType T) { return Visit(E->getSubExpr(), T); } @@ -284,40 +283,24 @@ class ConstExprEmitter // ConstantEmitter //===----------------------------------------------------------------------===// -mlir::Value ConstantEmitter::tryEmitForInitializer(const VarDecl &D) { +mlir::Attribute ConstantEmitter::tryEmitForInitializer(const VarDecl &D) { initializeNonAbstract(D.getType().getAddressSpace()); return markIfFailed(tryEmitPrivateForVarInit(D)); } -mlir::Value ConstantEmitter::tryEmitForInitializer(const Expr *E, - LangAS destAddrSpace, - QualType destType) { - initializeNonAbstract(destAddrSpace); - return markIfFailed(tryEmitPrivateForMemory(E, destType)); -} +void ConstantEmitter::finalize(mlir::cir::GlobalOp global) { + assert(InitializedNonAbstract && + "finalizing emitter that was used for abstract emission?"); + assert(!Finalized && "finalizing emitter multiple times"); + assert(!global.isDeclaration()); -// mlir::Value ConstantEmitter::emitForInitializer(const APValue &value, -// LangAS destAddrSpace, -// QualType destType) { -// initializeNonAbstract(destAddrSpace); -// auto C = tryEmitPrivateForMemory(value, destType); -// assert(C && "couldn't emit constant value non-abstractly?"); -// return C; -// } - -// void ConstantEmitter::finalize(llvm::GlobalVariable *global) { -// assert(InitializedNonAbstract && -// "finalizing emitter that was used for abstract emission?"); -// assert(!Finalized && "finalizing emitter multiple times"); -// assert(global->getInitializer()); - -// // Note that we might also be Failed. -// Finalized = true; - -// if (!PlaceholderAddresses.empty()) { -// assert(0 && "not implemented"); -// } -// } + // Note that we might also be Failed. + Finalized = true; + + if (!PlaceholderAddresses.empty()) { + assert(0 && "not implemented"); + } +} ConstantEmitter::~ConstantEmitter() { assert((!InitializedNonAbstract || Finalized || Failed) && @@ -334,7 +317,7 @@ static QualType getNonMemoryType(CIRGenModule &CGM, QualType type) { return type; } -mlir::Value ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { +mlir::TypedAttr ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { // Make a quick check if variable can be default NULL initialized // and avoid going through rest of code which may do, for c++11, // initialization of memory to all NULLs. @@ -362,20 +345,24 @@ mlir::Value ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { return {}; } -mlir::Value ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, - QualType destType) { +mlir::TypedAttr ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, + QualType destType) { auto nonMemoryDestType = getNonMemoryType(CGM, destType); auto C = tryEmitPrivate(value, nonMemoryDestType); - return (C ? emitForMemory(C, destType) : nullptr); -} + if (C) { + auto attr = emitForMemory(C, destType); + auto typedAttr = llvm::dyn_cast(attr); + if (!typedAttr) + llvm_unreachable("this should always be typed"); + return typedAttr; + } -mlir::Value ConstantEmitter::tryEmitPrivateForMemory(const clang::Expr *E, - clang::QualType T) { - llvm_unreachable("NYI"); + return nullptr; } -mlir::Value ConstantEmitter::emitForMemory(CIRGenModule &CGM, mlir::Value C, - QualType destType) { +mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, + mlir::TypedAttr C, + QualType destType) { // For an _Atomic-qualified constant, we may need to add tail padding. if (auto AT = destType->getAs()) { assert(0 && "not implemented"); @@ -389,16 +376,18 @@ mlir::Value ConstantEmitter::emitForMemory(CIRGenModule &CGM, mlir::Value C, return C; } -mlir::Value ConstantEmitter::tryEmitPrivate(const APValue &Value, - QualType DestType) { +mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const APValue &Value, + QualType DestType) { switch (Value.getKind()) { case APValue::None: case APValue::Indeterminate: // TODO(cir): LLVM models out-of-lifetime and indeterminate values as // 'undef'. Find out what's better for CIR. assert(0 && "not implemented"); - case APValue::Int: - assert(0 && "not implemented"); + case APValue::Int: { + mlir::Type ty = CGM.getCIRType(DestType); + return CGM.getBuilder().getIntegerAttr(ty, Value.getInt()); + } case APValue::LValue: case APValue::FixedPoint: case APValue::ComplexInt: diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 0c6a8236a7da..cc91ed265c7d 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -13,6 +13,7 @@ #include "CIRGenModule.h" #include "CIRGenCXXABI.h" +#include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenTypes.h" #include "CIRGenValue.h" @@ -324,8 +325,8 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, // Get or create the prototype for the function. // if (!V || (V.getValueType() != Ty)) - // TODO: Figure out what to do here? llvm uses a GlobalValue for the FuncOp in - // mlir + // TODO(cir): Figure out what to do here? llvm uses a GlobalValue for the + // FuncOp in mlir Op = GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/true, ForDefinition); @@ -334,11 +335,11 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, if (!Fn.isDeclaration()) return; - // TODO: setFunctionLinkage - // TODO: setGVProperties - // TODO: MaubeHandleStaticInExternC - // TODO: maybeSetTrivialComdat - // TODO: setLLVMFunctionFEnvAttributes + // TODO(cir): setFunctionLinkage + // TODO(cir): setGVProperties + // TODO(cir): MaubeHandleStaticInExternC + // TODO(cir): maybeSetTrivialComdat + // TODO(cir): setLLVMFunctionFEnvAttributes CIRGenFunction CGF{*this, builder}; CurCGF = &CGF; @@ -353,9 +354,424 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, assert(!D->getAttr() && "NYI"); } +/// FIXME: implement +mlir::cir::GlobalOp CIRGenModule::getGlobalValue(StringRef Name) { return {}; } + +/// If the specified mangled name is not in the module, +/// create and return an mlir GlobalOp with the specified type (TODO(cir): +/// address space). +/// +/// TODO(cir): +/// 1. If there is something in the module with the specified name, return +/// it potentially bitcasted to the right type. +/// +/// 2. If D is non-null, it specifies a decl that correspond to this. This is +/// used to set the attributes on the global when it is first created. +/// +/// 3. If IsForDefinition is true, it is guaranteed that an actual global with +/// type Ty will be returned, not conversion of a variable with the same +/// mangled name but some other type. +mlir::cir::GlobalOp +CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, + LangAS AddrSpace, const VarDecl *D, + ForDefinition_t IsForDefinition) { + // Lookup the entry, lazily creating it if necessary. + mlir::cir::GlobalOp Entry = getGlobalValue(MangledName); + + // unsigned TargetAS = astCtx.getTargetAddressSpace(AddrSpace); + if (Entry) { + if (WeakRefReferences.erase(Entry)) { + assert(0 && "not implemented"); + // if (D && !D->hasAttr()) + // Entry->setLinkage(llvm::Function::ExternalLinkage); + } + + // Handle dropped DLL attributes. + // FIXME: Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); + if (D && !D->hasAttr() && + !D->hasAttr()) + assert(0 && "not implemented"); + + if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && D) + assert(0 && "not implemented"); + + // TODO(cir): check address space matches + if (Entry.getSymType() == Ty) + return Entry; + + // If there are two attempts to define the same mangled name, issue an + // error. + // + // TODO(cir): look at mlir::GlobalValue::isDeclaration for all aspects of + // recognizing the global as a declaration, for now only check if + // initializer is present. + if (IsForDefinition && !Entry.isDeclaration()) { + GlobalDecl OtherGD; + const VarDecl *OtherD; + + // Check that D is not yet in DiagnosedConflictingDefinitions is required + // to make sure that we issue an error only once. + if (D && lookupRepresentativeDecl(MangledName, OtherGD) && + (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) && + (OtherD = dyn_cast(OtherGD.getDecl())) && + OtherD->hasInit() && + DiagnosedConflictingDefinitions.insert(D).second) { + getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name) + << MangledName; + getDiags().Report(OtherGD.getDecl()->getLocation(), + diag::note_previous_definition); + } + } + + // TODO(cir): LLVM codegen makes sure the result is of the correct type + // by issuing a address space cast. + + // TODO(cir): + // (In LLVM codgen, if global is requested for a definition, we always need + // to create a new global, otherwise return a bitcast.) + if (!IsForDefinition) + assert(0 && "not implemented"); + } + + // TODO(cir): auto DAddrSpace = GetGlobalVarAddressSpace(D); + // TODO(cir): do we need to strip pointer casts for Entry? + + auto loc = getLoc(D->getSourceRange()); + + // mlir::SymbolTable::Visibility::Public is the default, no need to explicitly + // mark it as such. + auto GV = builder.create(loc, MangledName, Ty); + theModule.push_back(GV); + + // If we already created a global with the same mangled name (but different + // type) before, take its name and remove it from its parent. + assert(!Entry && "not implemented"); + + // This is the first use or definition of a mangled name. If there is a + // deferred decl with this name, remember that we need to emit it at the end + // of the file. + auto DDI = DeferredDecls.find(MangledName); + if (DDI != DeferredDecls.end()) { + // Move the potentially referenced deferred decl to the DeferredDeclsToEmit + // list, and remove it from DeferredDecls (since we don't need it anymore). + addDeferredDeclToEmit(DDI->second); + DeferredDecls.erase(DDI); + } + + // Handle things which are present even on external declarations. + auto &LangOpts = getLangOpts(); + if (D) { + if (LangOpts.OpenMP && !LangOpts.OpenMPSimd) + assert(0 && "not implemented"); + + // FIXME: This code is overly simple and should be merged with other global + // handling. + + // TODO(cir): + // GV->setConstant(isTypeConstant(D->getType(), false)); + // GV->setAlignment(getContext().getDeclAlign(D).getAsAlign()); + // setLinkageForGV(GV, D); + + if (D->getTLSKind()) { + assert(0 && "not implemented"); + } + + // TODO(cir): + // setGVProperties(GV, D); + + // If required by the ABI, treat declarations of static data members with + // inline initializers as definitions. + if (astCtx.isMSStaticDataMemberInlineDefinition(D)) { + assert(0 && "not implemented"); + } + + // Emit section information for extern variables. + if (D->hasExternalStorage()) + assert(0 && "not implemented"); + + // Handle XCore specific ABI requirements. + if (getTriple().getArch() == llvm::Triple::xcore) + assert(0 && "not implemented"); + + // Check if we a have a const declaration with an initializer, we maybe + // able to emit it as available_externally to expose it's value to the + // optimizer. + if (getLangOpts().CPlusPlus && GV.isPublic() && + D->getType().isConstQualified() && GV.isDeclaration() && + !D->hasDefinition() && D->hasInit() && !D->hasAttr()) { + assert(0 && "not implemented"); + } + } + + // TODO(cir): if this method is used to handle functions we must have + // something closer to GlobalValue::isDeclaration instead of checking for + // initializer. + if (GV.isDeclaration()) { + // TODO(cir): set target attributes + + // External HIP managed variables needed to be recorded for transformation + // in both device and host compilations. + if (getLangOpts().CUDA) + assert(0 && "not implemented"); + } + + // TODO(cir): address space cast when needed for DAddrSpace. + return GV; +} + +mlir::cir::GlobalOp CIRGenModule::buildGlobal(const VarDecl *D, + std::optional Ty, + ForDefinition_t IsForDefinition) { + assert(D->hasGlobalStorage() && "Not a global variable"); + QualType ASTTy = D->getType(); + if (!Ty) + Ty = getTypes().convertTypeForMem(ASTTy); + + StringRef MangledName = getMangledName(D); + return getOrCreateCIRGlobal(MangledName, *Ty, ASTTy.getAddressSpace(), D, + IsForDefinition); +} + +/// Return the mlir::Value for the address of the given global variable. If Ty +/// is non-null and if the global doesn't exist, then it will be created with +/// the specified type instead of whatever the normal requested type would be. +/// If IsForDefinition is true, it is guaranteed that an actual global with type +/// Ty will be returned, not conversion of a variable with the same mangled name +/// but some other type. +mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, + std::optional Ty, + ForDefinition_t IsForDefinition) { + auto g = buildGlobal(D, Ty, IsForDefinition); + (void)g; + // FIXME: create an operation to get the address of the global. + assert(0 && "not implemented"); + return {}; +} + +/// TODO(cir): looks like part of this code can be part of a common AST +/// helper betweem CIR and LLVM codegen. +template +void CIRGenModule::maybeHandleStaticInExternC(const SomeDecl *D, + mlir::cir::GlobalOp GV) { + if (!getLangOpts().CPlusPlus) + return; + + // Must have 'used' attribute, or else inline assembly can't rely on + // the name existing. + if (!D->template hasAttr()) + return; + + // Must have internal linkage and an ordinary name. + if (!D->getIdentifier() || D->getFormalLinkage() != Linkage::Internal) + return; + + // Must be in an extern "C" context. Entities declared directly within + // a record are not extern "C" even if the record is in such a context. + const SomeDecl *First = D->getFirstDecl(); + if (First->getDeclContext()->isRecord() || !First->isInExternCContext()) + return; + + // TODO(cir): + // OK, this is an internal linkage entity inside an extern "C" linkage + // specification. Make a note of that so we can give it the "expected" + // mangled name if nothing else is using that name. + // + // If we have multiple internal linkage entities with the same name + // in extern "C" regions, none of them gets that name. + assert(0 && "not implemented"); +} + void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, bool IsTentative) { - assert(0 && "not implemented"); + // TODO(cir): + // OpenCL global variables of sampler type are translated to function calls, + // therefore no need to be translated. + // If this is OpenMP device, check if it is legal to emit this global + // normally. + QualType ASTTy = D->getType(); + assert(!(getLangOpts().OpenCL || getLangOpts().OpenMP) && "not implemented"); + + // TODO(cir): LLVM's codegen uses a llvm::TrackingVH here. Is that + // necessary here for CIR gen? + mlir::Attribute Init; + [[maybe_unused]] bool NeedsGlobalCtor = false; + bool NeedsGlobalDtor = + D->needsDestruction(astCtx) == QualType::DK_cxx_destructor; + + const VarDecl *InitDecl; + const Expr *InitExpr = D->getAnyInitializer(InitDecl); + + std::optional emitter; + + // CUDA E.2.4.1 "__shared__ variables cannot have an initialization + // as part of their declaration." Sema has already checked for + // error cases, so we just need to set Init to UndefValue. + bool IsCUDASharedVar = + getLangOpts().CUDAIsDevice && D->hasAttr(); + // Shadows of initialized device-side global variables are also left + // undefined. + // Managed Variables should be initialized on both host side and device side. + bool IsCUDAShadowVar = + !getLangOpts().CUDAIsDevice && !D->hasAttr() && + (D->hasAttr() || D->hasAttr() || + D->hasAttr()); + bool IsCUDADeviceShadowVar = + getLangOpts().CUDAIsDevice && !D->hasAttr() && + (D->getType()->isCUDADeviceBuiltinSurfaceType() || + D->getType()->isCUDADeviceBuiltinTextureType()); + if (getLangOpts().CUDA && + (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar)) + assert(0 && "not implemented"); + else if (D->hasAttr()) + assert(0 && "not implemented"); + else if (!InitExpr) { + // This is a tentative definition; tentative definitions are + // implicitly initialized with { 0 }. + // + // Note that tentative definitions are only emitted at the end of + // a translation unit, so they should never have incomplete + // type. In addition, EmitTentativeDefinition makes sure that we + // never attempt to emit a tentative definition if a real one + // exists. A use may still exists, however, so we still may need + // to do a RAUW. + assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type"); + assert(0 && "not implemented"); + } else { + initializedGlobalDecl = GlobalDecl(D); + emitter.emplace(*this); + auto Initializer = emitter->tryEmitForInitializer(*InitDecl); + if (!Initializer) { + assert(0 && "not implemented"); + } else { + Init = Initializer; + // We don't need an initializer, so remove the entry for the delayed + // initializer position (just in case this entry was delayed) if we + // also don't need to register a destructor. + if (getLangOpts().CPlusPlus && !NeedsGlobalDtor) + DelayedCXXInitPosition.erase(D); + } + } + + assert(Init.isa() && "This should have a type"); + auto TypedInitAttr = Init.cast(); + auto InitType = TypedInitAttr.getType(); + auto Entry = buildGlobal(D, InitType, ForDefinition_t(!IsTentative)); + // TODO(cir): Strip off pointer casts from Entry if we get them? + + // TODO(cir): LLVM codegen used GlobalValue to handle both Function or + // GlobalVariable here. We currently only support GlobalOp, should this be + // used for FuncOp? + assert(dyn_cast(&Entry) && "FuncOp not supported here"); + auto GV = Entry; + + // We have a definition after a declaration with the wrong type. + // We must make a new GlobalVariable* and update everything that used OldGV + // (a declaration or tentative definition) with the new GlobalVariable* + // (which will be a definition). + // + // This happens if there is a prototype for a global (e.g. + // "extern int x[];") and then a definition of a different type (e.g. + // "int x[10];"). This also happens when an initializer has a different type + // from the type of the global (this happens with unions). + if (!GV || GV.getSymType() != InitType) { + // TODO(cir): this should include an address space check as well. + assert(0 && "not implemented"); + } + + maybeHandleStaticInExternC(D, GV); + + if (D->hasAttr()) + assert(0 && "not implemented"); + + // TODO(cir): + // Set the llvm linkage type as appropriate. + // llvm::GlobalValue::LinkageTypes Linkage = + // getLLVMLinkageVarDefinition(D, GV->isConstant()); + + // TODO(cir): + // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on + // the device. [...]" + // CUDA B.2.2 "The __constant__ qualifier, optionally used together with + // __device__, declares a variable that: [...] + if (GV && getLangOpts().CUDA) { + assert(0 && "not implemented"); + } + + // Set initializer and finalize emission + GV.setInitialValueAttr(Init); + if (emitter) + emitter->finalize(GV); + + // TODO(cir): If it is safe to mark the global 'constant', do so now. + // GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor && + // isTypeConstant(D->getType(), true)); + + // If it is in a read-only section, mark it 'constant'. + if (const SectionAttr *SA = D->getAttr()) { + assert(0 && "not implemented"); + } + + // TODO(cir): + // GV->setAlignment(getContext().getDeclAlign(D).getAsAlign()); + + // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper + // function is only defined alongside the variable, not also alongside + // callers. Normally, all accesses to a thread_local go through the + // thread-wrapper in order to ensure initialization has occurred, underlying + // variable will never be used other than the thread-wrapper, so it can be + // converted to internal linkage. + // + // However, if the variable has the 'constinit' attribute, it _can_ be + // referenced directly, without calling the thread-wrapper, so the linkage + // must not be changed. + // + // Additionally, if the variable isn't plain external linkage, e.g. if it's + // weak or linkonce, the de-duplication semantics are important to preserve, + // so we don't change the linkage. + if (D->getTLSKind() == VarDecl::TLS_Dynamic && GV.isPublic() && + astCtx.getTargetInfo().getTriple().isOSDarwin() && + !D->hasAttr()) { + // TODO(cir): set to mlir::SymbolTable::Visibility::Private once we have + // testcases. + assert(0 && "not implemented"); + } + + // TODO(cir): set linkage, dll stuff and common linkage + // GV->setLinkage(Linkage); + // if (D->hasAttr()) + // GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); + // else if (D->hasAttr()) + // GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass); + // else + // GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass); + // + // if (Linkage == llvm::GlobalVariable::CommonLinkage) { + // // common vars aren't constant even if declared const. + // GV->setConstant(false); + // // Tentative definition of global variables may be initialized with + // // non-zero null pointers. In this case they should have weak linkage + // // since common linkage must have zero initializer and must not have + // // explicit section therefore cannot have non-zero initial value. + // if (!GV->getInitializer()->isNullValue()) + // GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage); + // } + + // TODO(cir): setNonAliasAttributes(D, GV); + + // TODO(cir): handle TLSKind if GV is not thread local + if (D->getTLSKind()) { // && !GV->isThreadLocal()) + assert(0 && "not implemented"); + } + + // TODO(cir): maybeSetTrivialComdat(*D, *GV); + + // TODO(cir): + // Emit the initializer function if necessary. + // if (NeedsGlobalCtor || NeedsGlobalDtor) + // EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor); + + // TODO(cir): sanitizers (reportGlobalToASan) and global variable debug + // information. } void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { @@ -537,13 +953,16 @@ static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, assert(II && "Attempt to mangle unnamed decl."); const auto *FD = dyn_cast(ND); - assert(FD && "Only FunctionDecl supported"); - assert(FD->getType()->castAs()->getCallConv() != - CC_X86RegCall && - "NYI"); - assert(!FD->hasAttr() && "NYI"); - Out << II->getName(); + if (FD && + FD->getType()->castAs()->getCallConv() == CC_X86RegCall) { + assert(0 && "NYI"); + } else if (FD && FD->hasAttr() && + GD.getKernelReferenceKind() == KernelReferenceKind::Stub) { + assert(0 && "NYI"); + } else { + Out << II->getName(); + } } // Check if the module name hash should be appended for internal linkage diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 3e81a2e96f6c..869b217aea66 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -130,6 +130,40 @@ class CIRGenModule { CIRGenCXXABI &getCXXABI() const { return *ABI; } + /// ------- + /// Handling globals + /// ------- + + /// If the declaration has internal linkage but is inside an + /// extern "C" linkage specification, prepare to emit an alias for it + /// to the expected name. + template + void maybeHandleStaticInExternC(const SomeDecl *D, mlir::cir::GlobalOp GV); + + llvm::DenseMap Globals; + mlir::cir::GlobalOp getGlobalValue(StringRef Ref); + + /// If the specified mangled name is not in the module, create and return an + /// mlir::GlobalOp value + mlir::cir::GlobalOp + getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, LangAS AddrSpace, + const VarDecl *D, + ForDefinition_t IsForDefinition = NotForDefinition); + + mlir::cir::GlobalOp buildGlobal(const VarDecl *D, + std::optional Ty, + ForDefinition_t IsForDefinition); + + /// Return the mlir::Value for the address of the given global variable. + /// If Ty is non-null and if the global doesn't exist, then it will be created + /// with the specified type instead of whatever the normal requested type + /// would be. If IsForDefinition is true, it is guaranteed that an actual + /// global with type Ty will be returned, not conversion of a variable with + /// the same mangled name but some other type. + mlir::Value + getAddrOfGlobalVar(const VarDecl *D, std::optional Ty, + ForDefinition_t IsForDefinition = NotForDefinition); + // TODO: this obviously overlaps with const TargetCIRGenInfo &getTargetCIRGenInfo(); diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp new file mode 100644 index 000000000000..18f924153848 --- /dev/null +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -0,0 +1,7 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int a = 3; + +// CHECK: module { +// CHECK-NEXT: cir.global @a : i32 = 3 \ No newline at end of file From cf8bf99ead7baa701ac7a5f50e395949d2a6bede Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 4 May 2022 22:12:57 -0700 Subject: [PATCH 0437/2301] [CIR][CodeGen] Implement basic const global, only emitted when used --- clang/lib/CIR/CIRGenDeclCXX.cpp | 31 ++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenModule.cpp | 5 +++-- clang/lib/CIR/CIRGenModule.h | 29 ++++++++++++++++++---------- clang/lib/CIR/CMakeLists.txt | 1 + clang/test/CIR/CodeGen/globals.cpp | 4 +++- 5 files changed, 57 insertions(+), 13 deletions(-) create mode 100644 clang/lib/CIR/CIRGenDeclCXX.cpp diff --git a/clang/lib/CIR/CIRGenDeclCXX.cpp b/clang/lib/CIR/CIRGenDeclCXX.cpp new file mode 100644 index 000000000000..594ce748d472 --- /dev/null +++ b/clang/lib/CIR/CIRGenDeclCXX.cpp @@ -0,0 +1,31 @@ +//===--- CIRGenDeclCXX.cpp - Build CIR Code for C++ declarations ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with code generation of C++ declarations +// +//===----------------------------------------------------------------------===// + +#include "CIRGenModule.h" +#include "TargetInfo.h" +#include "clang/AST/Attr.h" +#include "clang/Basic/LangOptions.h" + +using namespace clang; +using namespace mlir::cir; +using namespace cir; + +void CIRGenModule::buildCXXGlobalInitFunc() { + while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) + CXXGlobalInits.pop_back(); + + if (CXXGlobalInits.empty()) // TODO(cir): && + // PrioritizedCXXGlobalInits.empty()) + return; + + assert(0 && "NYE"); +} \ No newline at end of file diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index cc91ed265c7d..6dc592fda276 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -297,7 +297,8 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { // the order in which it appeared on the file. if (getLangOpts().CPlusPlus && isa(Global) && cast(Global)->hasInit()) { - llvm_unreachable("NYI"); + DelayedCXXInitPosition[Global] = CXXGlobalInits.size(); + CXXGlobalInits.push_back(nullptr); } llvm::StringRef MangledName = getMangledName(GD); @@ -1306,7 +1307,7 @@ void CIRGenModule::Release() { // TODO: applyReplacements(); // TODO: checkAliases(); // TODO: buildMultiVersionFunctions(); - // TODO: buildCXXGlobalInitFunc(); + buildCXXGlobalInitFunc(); // TODO: buildCXXGlobalCleanUpFunc(); // TODO: registerGlobalDtorsWithAtExit(); // TODO: buildCXXThreadLocalInitFunc(); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 869b217aea66..9b900467030c 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -99,16 +99,6 @@ class CIRGenModule { // or a definition. llvm::SmallPtrSet WeakRefReferences; - // TODO(cir): does this really need to be a state for CIR emission? - GlobalDecl initializedGlobalDecl; - - /// When a C++ decl with an initializer is deferred, null is - /// appended to CXXGlobalInits, and the index of that null is placed - /// here so that the initializer will be performed in the correct - /// order. Once the decl is emitted, the index is replaced with ~0U to ensure - /// that we don't re-emit the initializer. - llvm::DenseMap DelayedCXXInitPosition; - /// ------- /// Declaring variables /// ------- @@ -134,6 +124,25 @@ class CIRGenModule { /// Handling globals /// ------- + // TODO(cir): does this really need to be a state for CIR emission? + GlobalDecl initializedGlobalDecl; + + /// Global variables with initializers that need to run before main. + /// TODO(cir): for now track a generation operation, this is so far only + /// used to sync with DelayedCXXInitPosition. Improve it when we actually + /// use function calls for initialization + std::vector CXXGlobalInits; + + /// Emit the function that initializes C++ globals. + void buildCXXGlobalInitFunc(); + + /// When a C++ decl with an initializer is deferred, null is + /// appended to CXXGlobalInits, and the index of that null is placed + /// here so that the initializer will be performed in the correct + /// order. Once the decl is emitted, the index is replaced with ~0U to ensure + /// that we don't re-emit the initializer. + llvm::DenseMap DelayedCXXInitPosition; + /// If the declaration has internal linkage but is inside an /// extern "C" linkage specification, prepare to emit an alias for it /// to the expected name. diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index b84f798229f7..dc922e6ba18e 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -16,6 +16,7 @@ add_clang_library(clangCIR CIRGenClass.cpp CIRGenCleanup.cpp CIRGenDecl.cpp + CIRGenDeclCXX.cpp CIRGenExpr.cpp CIRGenExprCst.cpp CIRGenExprAgg.cpp diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 18f924153848..58419ce44ff8 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -2,6 +2,8 @@ // RUN: FileCheck --input-file=%t.cir %s int a = 3; +const int b = 4; // unless used wont be generated // CHECK: module { -// CHECK-NEXT: cir.global @a : i32 = 3 \ No newline at end of file +// CHECK-NEXT: cir.global @a : i32 = 3 +// CHECK-NOT: cir.global @b \ No newline at end of file From 3467f6ab6baf2b1af489b51449eecfffe4536927 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 4 May 2022 22:38:17 -0700 Subject: [PATCH 0438/2301] [CIR][CodeGen] Global: float/double constant initialization --- clang/lib/CIR/CIRGenExprCst.cpp | 12 +++++++++++- clang/test/CIR/CodeGen/globals.cpp | 10 ++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprCst.cpp b/clang/lib/CIR/CIRGenExprCst.cpp index 8390888a5400..8ff373dab56b 100644 --- a/clang/lib/CIR/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CIRGenExprCst.cpp @@ -388,10 +388,20 @@ mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const APValue &Value, mlir::Type ty = CGM.getCIRType(DestType); return CGM.getBuilder().getIntegerAttr(ty, Value.getInt()); } + case APValue::Float: { + const llvm::APFloat &Init = Value.getFloat(); + if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf() && + !CGM.getASTContext().getLangOpts().NativeHalfType && + CGM.getASTContext().getTargetInfo().useFP16ConversionIntrinsics()) + assert(0 && "not implemented"); + else { + mlir::Type ty = CGM.getCIRType(DestType); + return CGM.getBuilder().getFloatAttr(ty, Init); + } + } case APValue::LValue: case APValue::FixedPoint: case APValue::ComplexInt: - case APValue::Float: case APValue::ComplexFloat: case APValue::Vector: case APValue::AddrLabelDiff: diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 58419ce44ff8..14807aa0edda 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -4,6 +4,12 @@ int a = 3; const int b = 4; // unless used wont be generated +unsigned long int c = 2; +float y = 3.4; +double w = 4.3; + // CHECK: module { -// CHECK-NEXT: cir.global @a : i32 = 3 -// CHECK-NOT: cir.global @b \ No newline at end of file +// CHECK-NEXT: cir.global @a : i32 = 3 +// CHECK-NEXT: cir.global @c : i64 = 2 +// CHECK-NEXT: cir.global @y : f32 = 3.400000e+00 +// CHECK-NEXT: cir.global @w : f64 = 4.300000e+00 \ No newline at end of file From 18ed087da519d9376e8867cdf996bda2ee190669 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 5 May 2022 17:45:58 -0400 Subject: [PATCH 0439/2301] [CIR][NFC] Add some comments to CIRRecordLayoutBuilder --- clang/lib/CIR/CIRRecordLayoutBuilder.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp index 4adb73bbf410..2755f079c2d8 100644 --- a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp @@ -15,6 +15,9 @@ using namespace cir; using namespace clang; namespace { +/// The CIRRecordLowering is responsible for lowering an ASTRecordLayout to a +/// mlir::Type. Some of the lowering is straightforward, some is not. Here we +/// detail some of the complexities and weirdnesses here. struct CIRRecordLowering final { // MemberInfo is a helper structure that contains information about a record @@ -28,12 +31,15 @@ struct CIRRecordLowering final { MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data, const FieldDecl *fieldDecl = nullptr) : offset{offset}, kind{kind}, data{data}, fieldDecl{fieldDecl} {}; + // MemberInfos are sorted so we define a < operator. bool operator<(const MemberInfo &other) const { return offset < other.offset; } }; + // The constructor. CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl, bool isPacked); + // Short helper routines. void lower(bool nonVirtualBaseType); From ba6b9c3129e03569f25288d9bab8032bf64e354e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 5 May 2022 17:51:34 -0400 Subject: [PATCH 0440/2301] [CIR] Confirm that it's safe to convert a RecordDecl before attempting Evidently things can blow up in the case of recursive structs. So just add a fn to confirm but just fail if we hit it for now. We can support it genuinely later. --- clang/lib/CIR/CIRGenTypes.cpp | 17 ++++++++++++++++- clang/lib/CIR/CIRGenTypes.h | 2 ++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 958e4a4e79b5..55f6aceedf00 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -65,9 +65,21 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, return std::string(typeName); } +// isSafeToConvert - Return true if it is safe to convert the specified record +// decl to CIR and lay it out, false if doing so would cause us to get into a +// recursive compilation mess. +static bool isSafeToConvert(const RecordDecl *RD, CIRGenTypes &CGT) { + // If no structs are being laid out, we can certainly do this one. + if (CGT.noRecordsBeingLaidOut()) + return true; + + llvm_unreachable("NYI"); +} + mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *recordDecl) { const auto *key = Context.getTagDeclType(recordDecl).getTypePtr(); + mlir::cir::StructType &entry = recordDeclTypes[key]; recordDecl = recordDecl->getDefinition(); @@ -77,7 +89,10 @@ CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *recordDecl) { if (!recordDecl || !recordDecl->isCompleteDefinition() || entry) return entry; - // TODO: Implement checking for whether or not this type is safe to convert. + // If converting this type would cause us to infinitely loop, don't do it! + if (!isSafeToConvert(recordDecl, *this)) { + llvm_unreachable("NYI"); + } // TODO: handle whether or not layout was skipped and recursive record layout diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 77ad4151f78c..16bef876d300 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -230,6 +230,8 @@ class CIRGenTypes { clang::FunctionType::ExtInfo info, llvm::ArrayRef paramInfos, RequiredArgs args); + + bool noRecordsBeingLaidOut() const { return RecordsBeingLaidOut.empty(); } }; } // namespace cir From 37abc0dc8648282f033914a497cc26d7aefe84b6 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 5 May 2022 18:48:20 -0400 Subject: [PATCH 0441/2301] [CIR][NFC] Add more members to CIRRecordLayoutBuilder These members exist on the corresponding CG type and will be used in a subsequent patch. --- clang/lib/CIR/CIRRecordLayoutBuilder.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp index 2755f079c2d8..1fa2fac52de4 100644 --- a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp @@ -102,6 +102,11 @@ struct CIRRecordLowering final { // Output fields, consumed by CIRGenTypes::computeRecordLayout llvm::SmallVector fieldTypes; llvm::DenseMap fields; + llvm::DenseMap bitFields; + llvm::DenseMap nonVirtualBases; + llvm::DenseMap virtualBases; + bool IsZeroInitializable : 1; + bool IsZeroInitializableAsBase : 1; bool isPacked : 1; private: @@ -117,7 +122,8 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, recordDecl{recordDecl}, cxxRecordDecl{llvm::dyn_cast( recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, - isPacked{isPacked} {} + IsZeroInitializable(true), + IsZeroInitializableAsBase(true), isPacked{isPacked} {} void CIRRecordLowering::lower(bool nonVirtualBaseType) { if (recordDecl->isUnion()) { From 480a4c4065e57cdfe0c6da5d709e79d3332552a1 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 5 May 2022 18:49:54 -0400 Subject: [PATCH 0442/2301] [CIR][NFC] Formatting of a few comments --- clang/lib/CIR/CIRGenTypes.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 16bef876d300..8c2b504c6d37 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -151,9 +151,9 @@ class CIRGenTypes { clang::CXXCtorType Type); /// convertTypeForMem - Convert type T into an mlir::Type. This differs from - /// convertType in that it is used to convert to the memory representation for - /// a type. For example, the scalar representation for _Bool is i1, but the - /// memory representation is usually i8 or i32, depending on the target. + /// convertType in that it is used to convert to the memory representation + /// for a type. For example, the scalar representation for _Bool is i1, but + /// the memory representation is usually i8 or i32, depending on the target. // TODO: convert this comment to account for MLIR's equivalence mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); @@ -174,7 +174,8 @@ class CIRGenTypes { // Key points: // - The CIRGenFunctionInfo for emitting a specific call site must include // entries for the optional arguments. - // - The function type used at the call site must reflect the formal signature + // - The function type used at the call site must reflect the formal + // signature // of the declaration being called, or else the call will go away. // - For the most part, unprototyped functions are called by casting to a // formal signature inferred from the specific argument types used at the From abe6e6a715e33980e07c76cb0ac431d57083c29e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 5 May 2022 18:50:38 -0400 Subject: [PATCH 0443/2301] [CIR] Add CIRGenRecordLayout type This is the proxy between the cir::StructType and the AST RecordDecl that has more information on the type being looked at than is recorded in the StructType (e.g. the member name). Currently this is only used as a proxy during codegen, but eventually we'll be using this type to also build the CIR type with more rich type information for CIR-level optimizations. --- clang/lib/CIR/CIRGenRecordLayout.h | 84 ++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 clang/lib/CIR/CIRGenRecordLayout.h diff --git a/clang/lib/CIR/CIRGenRecordLayout.h b/clang/lib/CIR/CIRGenRecordLayout.h new file mode 100644 index 000000000000..ceb5ea6d29eb --- /dev/null +++ b/clang/lib/CIR/CIRGenRecordLayout.h @@ -0,0 +1,84 @@ +//===--- CIRGenRecordLayout.h - CIR Record Layout Information ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRGENRECORDLAYOUT_H +#define LLVM_CLANG_LIB_CIR_CIRGENRECORDLAYOUT_H + +#include "CIRGenTypes.h" + +#include "clang/AST/Decl.h" + +#include "mlir/Dialect/CIR/IR/CIRTypes.h" + +namespace cir { + +/// CIRGenRecordLayout - This class handles struct and union layout info while +/// lowering AST types to CIR types. +/// +/// These layout objects are only created on demand as CIR generation requires. +class CIRGenRecordLayout { + friend class CIRGenTypes; + + CIRGenRecordLayout(const CIRGenRecordLayout &) = delete; + void operator=(const CIRGenRecordLayout &) = delete; + +private: + /// The CIR type corresponding to this record layout; used when laying it out + /// as a complete object. + mlir::cir::StructType CompleteObjectType; + + /// The CIR type for the non-virtual part of this record layout; used when + /// laying it out as a base subobject. + mlir::cir::StructType BaseSubobjectType; + + /// Map from (non-bit-field) struct field to the corresponding cir struct type + /// field no. This info is populated by the record builder. + llvm::DenseMap FieldInfo; + + /// Map from (bit-field) struct field to the corresponding CIR struct type + /// field no. This info is populated by record builder. + /// TODO(CIR): value is an int for now, fix when we support bitfields + llvm::DenseMap BitFields; + + // FIXME: Maybe we could use CXXBaseSpecifier as the key and use a single map + // for both virtual and non-virtual bases. + llvm::DenseMap NonVirtualBases; + + /// Map from virtual bases to their field index in the complete object. + llvm::DenseMap + CompleteObjectVirtualBases; + + /// False if any direct or indirect subobject of this class, when considered + /// as a complete object, requires a non-zero bitpattern when + /// zero-initialized. + bool IsZeroInitializable : 1; + + /// False if any direct or indirect subobject of this class, when considered + /// as a base subobject, requires a non-zero bitpattern when zero-initialized. + bool IsZeroInitializableAsBase : 1; + +public: + CIRGenRecordLayout(mlir::cir::StructType CompleteObjectType, + mlir::cir::StructType BaseSubobjectType, + bool IsZeroInitializable, bool IsZeroInitializableAsBase) + : CompleteObjectType(CompleteObjectType), + BaseSubobjectType(BaseSubobjectType), + IsZeroInitializable(IsZeroInitializable), + IsZeroInitializableAsBase(IsZeroInitializableAsBase) {} + + /// Return cir::StructType element number that corresponds to the field FD. + unsigned getCIRFieldNo(const clang::FieldDecl *FD) const { + FD = FD->getCanonicalDecl(); + assert(FieldInfo.count(FD) && "Invalid field for record!"); + return FieldInfo.lookup(FD); + } +}; + +} // namespace cir + +#endif From 6f8582a75ebf9bdea476fd5b5b16dcbd1af98f74 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 5 May 2022 18:53:07 -0400 Subject: [PATCH 0444/2301] [CIR] Add a cache for CIRGenRecordLayouts to CIRGenTypes This type will be used as a proxy to refer to member information for cir::StructTypes from clang::RecordDecls --- clang/lib/CIR/CIRGenTypes.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 8c2b504c6d37..8a3c6c36fb8d 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -15,6 +15,7 @@ #include "ABIInfo.h" #include "CIRGenFunctionInfo.h" +#include "CIRGenRecordLayout.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" @@ -84,6 +85,10 @@ class CIRGenTypes { // of the previous reference members being already initialized const ABIInfo &TheABIInfo; + /// Contains the CIR type for any converted RecordDecl. + llvm::DenseMap> + CIRGenRecordLayouts; + /// Contains the CIR type for any converted RecordDecl llvm::DenseMap recordDeclTypes; From f8f3ed9cbb03d57396284cd6b92d5cc20aa60589 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 5 May 2022 18:54:50 -0400 Subject: [PATCH 0445/2301] [CIR] Add a fn to get the CIRGenRecordLayout from a RecordDecl This just looks up the cached version and lazily computes if it isn't found. Currently this doesn't function properly as `convertRecordDeclType` does not yet compute it. But the next patch implements that. --- clang/lib/CIR/CIRGenTypes.cpp | 20 ++++++++++++++++++++ clang/lib/CIR/CIRGenTypes.h | 2 ++ 2 files changed, 22 insertions(+) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 55f6aceedf00..64eabca064c1 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -621,3 +621,23 @@ void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { if (CGM.getModuleDebugInfo()) llvm_unreachable("NYI"); } + +/// getCIRGenRecordLayout - Return record layout info for the given record decl. +const CIRGenRecordLayout & +CIRGenTypes::getCIRGenRecordLayout(const RecordDecl *RD) { + const auto *Key = Context.getTagDeclType(RD).getTypePtr(); + + auto I = CIRGenRecordLayouts.find(Key); + if (I != CIRGenRecordLayouts.end()) + return *I->second; + + // Compute the type information. + convertRecordDeclType(RD); + + // Now try again. + I = CIRGenRecordLayouts.find(Key); + + assert(I != CIRGenRecordLayouts.end() && + "Unable to find record layout information for type"); + return *I->second; +} diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 8a3c6c36fb8d..9ac089d9be37 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -155,6 +155,8 @@ class CIRGenTypes { bool inheritingCtorHasParams(const clang::InheritedConstructor &Inherited, clang::CXXCtorType Type); + const CIRGenRecordLayout &getCIRGenRecordLayout(const clang::RecordDecl *RD); + /// convertTypeForMem - Convert type T into an mlir::Type. This differs from /// convertType in that it is used to convert to the memory representation /// for a type. For example, the scalar representation for _Bool is i1, but From d5d3cea44303887260767bd6c6413faeae6d7094 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 5 May 2022 19:41:11 -0400 Subject: [PATCH 0446/2301] [CIR] Handle some edge cases after converting a RecordDecl * Remove the struct from RecordsBeingLaidOut * Clear the TypeCache if we SkippedLayout as evidently we might a FunctionType that is now stale. * Handle deferred decls. We currently aren't deferring, but put it in anyways so that we don't forget later. --- clang/lib/CIR/CIRGenTypes.cpp | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 64eabca064c1..e82d87ed1942 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -94,7 +94,10 @@ CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *recordDecl) { llvm_unreachable("NYI"); } - // TODO: handle whether or not layout was skipped and recursive record layout + // Okay, this is a definition of a type. Compile the implementation now. + bool InsertResult = RecordsBeingLaidOut.insert(key).second; + (void)InsertResult; + assert(InsertResult && "Recursively compiling a struct?"); if (const auto *cxxRecordDecl = dyn_cast(recordDecl)) { assert(cxxRecordDecl->bases().begin() == cxxRecordDecl->bases().end() && @@ -102,6 +105,22 @@ CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *recordDecl) { } entry = computeRecordLayout(recordDecl); + // We're done laying out this struct. + bool EraseResult = RecordsBeingLaidOut.erase(key); + (void)EraseResult; + assert(EraseResult && "struct not in RecordsBeingLaidOut set?"); + + // If this struct blocked a FunctionType conversion, then recompute whatever + // was derived from that. + // FIXME: This is hugely overconservative. + if (SkippedLayout) + TypeCache.clear(); + + // If we're done converting the outer-most record, then convert any deferred + // structs as well. + if (RecordsBeingLaidOut.empty()) + while (!DeferredRecords.empty()) + convertRecordDeclType(DeferredRecords.pop_back_val()); return entry; } From 568499b047e5c6ceb88fd87e768f9f20cf5c2e18 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 5 May 2022 19:52:34 -0400 Subject: [PATCH 0447/2301] [CIR][NFC] Restructure an assert to an unreachable --- clang/lib/CIR/CIRGenTypes.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index e82d87ed1942..2bc0b489d53d 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -99,9 +99,12 @@ CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *recordDecl) { (void)InsertResult; assert(InsertResult && "Recursively compiling a struct?"); + // Force conversion of non-virtual base classes recursively. if (const auto *cxxRecordDecl = dyn_cast(recordDecl)) { - assert(cxxRecordDecl->bases().begin() == cxxRecordDecl->bases().end() && - "Base clases NYI"); + for (const auto &I : cxxRecordDecl->bases()) { + (void)I; + llvm_unreachable("NYI"); + } } entry = computeRecordLayout(recordDecl); From 7b4ca4bef5b5fb1f7c92ab3a2115518988792864 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 5 May 2022 19:55:58 -0400 Subject: [PATCH 0448/2301] [CIR] Have computeRecordLayout cache the CIRGenRecordLayout Use the computed information to generate a CIRGenRecordLayout in addition to the cir::StructType and cache it as well. Change the function signature to match CG's and include the CIRGenRecordLayout. --- clang/lib/CIR/CIRGenTypes.cpp | 6 +++- clang/lib/CIR/CIRGenTypes.h | 3 +- clang/lib/CIR/CIRRecordLayoutBuilder.cpp | 36 +++++++++++++++++++----- 3 files changed, 36 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 2bc0b489d53d..a4433b16d967 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -107,7 +107,11 @@ CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *recordDecl) { } } - entry = computeRecordLayout(recordDecl); + // Layout fields. + std::unique_ptr Layout = + computeRecordLayout(recordDecl, entry); + CIRGenRecordLayouts[key] = std::move(Layout); + // We're done laying out this struct. bool EraseResult = RecordsBeingLaidOut.erase(key); (void)EraseResult; diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CIRGenTypes.h index 9ac089d9be37..0e9446b09280 100644 --- a/clang/lib/CIR/CIRGenTypes.h +++ b/clang/lib/CIR/CIRGenTypes.h @@ -145,7 +145,8 @@ class CIRGenTypes { mlir::Type convertRecordDeclType(const clang::RecordDecl *recordDecl); - mlir::cir::StructType computeRecordLayout(const clang::RecordDecl *); + std::unique_ptr + computeRecordLayout(const clang::RecordDecl *D, mlir::cir::StructType& Ty); std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix); diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp index 1fa2fac52de4..0116d5583c7e 100644 --- a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp @@ -193,14 +193,18 @@ void CIRRecordLowering::accumulateFields() { } } -mlir::cir::StructType -CIRGenTypes::computeRecordLayout(const RecordDecl *recordDecl) { +std::unique_ptr +CIRGenTypes::computeRecordLayout(const RecordDecl *recordDecl, + mlir::cir::StructType &Ty) { CIRRecordLowering builder(*this, recordDecl, /*packed=*/false); + builder.lower(/*nonVirtualBaseType=*/false); // If we're in C++, compute the base subobject type. + mlir::cir::StructType BaseTy = nullptr; if (llvm::isa(recordDecl) && !recordDecl->isUnion() && !recordDecl->hasAttr()) { + BaseTy = Ty; if (builder.astRecordLayout.getNonVirtualSize() != builder.astRecordLayout.getSize()) { llvm_unreachable("NYI"); @@ -211,13 +215,31 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *recordDecl) { auto name = getRecordTypeName(recordDecl, ""); auto identifier = mlir::StringAttr::get(&getMLIRContext(), name); - auto structType = mlir::cir::StructType::get(&getMLIRContext(), - builder.fieldTypes, identifier); - assert(!getContext().getLangOpts().DumpRecordLayouts && - "RecordLayouts dumping NYI"); + Ty = mlir::cir::StructType::get(&getMLIRContext(), builder.fieldTypes, + identifier); + + auto RL = std::make_unique( + Ty, BaseTy, (bool)builder.IsZeroInitializable, + (bool)builder.IsZeroInitializableAsBase); + + RL->NonVirtualBases.swap(builder.nonVirtualBases); + RL->CompleteObjectVirtualBases.swap(builder.virtualBases); + + // Add all the field numbers. + RL->FieldInfo.swap(builder.fields); + + // Add bitfield info. + RL->BitFields.swap(builder.bitFields); + + // Dump the layout, if requested. + if (getContext().getLangOpts().DumpRecordLayouts) { + llvm_unreachable("NYI"); + } // TODO: implement verification - return structType; + assert(!builder.isPacked && "Packed structs NYI"); + + return RL; } From 9728482cb60f7de4bd8bf4e938bc2e62aa6a0d98 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 5 May 2022 19:59:54 -0400 Subject: [PATCH 0449/2301] [CIR] Restructure an or to unreachable if a else return early This if statement was wrong from the beginning. Luckily we weren't hitting the first two cases. We only want to return in the latter and fail due to current lack of support for the first two. --- clang/lib/CIR/CIRGenTypes.cpp | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index a4433b16d967..3730dc379d90 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -76,21 +76,27 @@ static bool isSafeToConvert(const RecordDecl *RD, CIRGenTypes &CGT) { llvm_unreachable("NYI"); } -mlir::Type -CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *recordDecl) { - const auto *key = Context.getTagDeclType(recordDecl).getTypePtr(); +/// convertRecordDeclType - Lay out a tagged decl type like struct or union. +mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { + // TagDecl's are not necessarily unique, instead use the (clang) type + // connected to the decl. + const auto *key = Context.getTagDeclType(RD).getTypePtr(); mlir::cir::StructType &entry = recordDeclTypes[key]; - recordDecl = recordDecl->getDefinition(); - // TODO: clang checks here whether the type is known to be opaque. This is - // equivalent to a forward decl. Is checking for a non-null entry close enough - // of a match? - if (!recordDecl || !recordDecl->isCompleteDefinition() || entry) + RD = RD->getDefinition(); + + // TODO(CIR): clang checks here whether the type is known to be opaque. This + // is equivalent to a forward decl. So far we don't need to support + // opaque/forward-declared record decls. If/when we do we might need to have + // temporary cir::StructType with no members as stand-ins. + if (!RD || !RD->isCompleteDefinition()) + llvm_unreachable("NYI"); + if (entry) return entry; // If converting this type would cause us to infinitely loop, don't do it! - if (!isSafeToConvert(recordDecl, *this)) { + if (!isSafeToConvert(RD, *this)) { llvm_unreachable("NYI"); } @@ -100,7 +106,7 @@ CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *recordDecl) { assert(InsertResult && "Recursively compiling a struct?"); // Force conversion of non-virtual base classes recursively. - if (const auto *cxxRecordDecl = dyn_cast(recordDecl)) { + if (const auto *cxxRecordDecl = dyn_cast(RD)) { for (const auto &I : cxxRecordDecl->bases()) { (void)I; llvm_unreachable("NYI"); @@ -108,8 +114,7 @@ CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *recordDecl) { } // Layout fields. - std::unique_ptr Layout = - computeRecordLayout(recordDecl, entry); + std::unique_ptr Layout = computeRecordLayout(RD, entry); CIRGenRecordLayouts[key] = std::move(Layout); // We're done laying out this struct. From e14aad16d6ccf3582ac7bba0a2c14f1945766819 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 17:25:39 -0400 Subject: [PATCH 0450/2301] [CIR] Stub out all the AggExprEmitter fns Instead of hitting a failure at the top level Visit that gives you a vague idea of what to look for just stub everything out with unreachables so that we know what we have to implement next. --- clang/lib/CIR/CIRGenExprAgg.cpp | 102 ++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index a355fde3b175..bebf85e3d06e 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -37,13 +37,115 @@ class AggExprEmitter : public StmtVisitor { // ,IsResultUnused(IsResultUnused) {} + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + void Visit(Expr *E) { // TODO: CodeGen does ApplyDebugLocation here assert(cast(E) && "Only CXXConstructExpr implemented"); StmtVisitor::Visit(E); } + void VisitStmt(Stmt *S) { llvm_unreachable("NYI"); } + void VisitParenExpr(ParenExpr *PE) { llvm_unreachable("NYI"); } + void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { + llvm_unreachable("NYI"); + } + void VisitCoawaitExpr(CoawaitExpr *E) { llvm_unreachable("NYI"); } + void VisitCoyieldExpr(CoyieldExpr *E) { llvm_unreachable("NYI"); } + void VisitUnaryCoawait(UnaryOperator *E) { llvm_unreachable("NYI"); } + void VisitUnaryExtension(UnaryOperator *E) { llvm_unreachable("NYI"); } + void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { + llvm_unreachable("NYI"); + } + void VisitConstantExpr(ConstantExpr *E) { llvm_unreachable("NYI"); } + + // l-values + void VisitDeclRefExpr(DeclRefExpr *E) { llvm_unreachable("NYI"); } + void VisitMemberExpr(MemberExpr *E) { llvm_unreachable("NYI"); } + void VisitUnaryDeref(UnaryOperator *E) { llvm_unreachable("NYI"); } + void VisitStringLiteral(StringLiteral *E) { llvm_unreachable("NYI"); } + void VisitCompoundLIteralExpr(CompoundLiteralExpr *E) { + llvm_unreachable("NYI"); + } + void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { + llvm_unreachable("NYI"); + } + void VisitPredefinedExpr(const PredefinedExpr *E) { llvm_unreachable("NYI"); } + + // Operators. + void VisitCastExpr(CastExpr *E) { llvm_unreachable("NYI"); } + void VisitCallExpr(const CallExpr *E) { llvm_unreachable("NYI"); } + void VisitStmtExpr(const StmtExpr *E) { llvm_unreachable("NYI"); } + void VisitBinaryOperator(const BinaryOperator *E) { llvm_unreachable("NYI"); } + void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *E) { + llvm_unreachable("NYI"); + } + void VisitBinAssign(const BinaryOperator *E) { llvm_unreachable("NYI"); } + void VisitBinComma(const BinaryOperator *E) { llvm_unreachable("NYI"); } + void VisitBinCmp(const BinaryOperator *E) { llvm_unreachable("NYI"); } + void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { + llvm_unreachable("NYI"); + } + + void VisitObjCMessageExpr(ObjCMessageExpr *E) { llvm_unreachable("NYI"); } + void VisitObjCIVarRefExpr(ObjCIvarRefExpr *E) { llvm_unreachable("NYI"); } + + void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) { + llvm_unreachable("NYI"); + } + void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { + llvm_unreachable("NYI"); + } + void VisitChooseExpr(const ChooseExpr *E) { llvm_unreachable("NYI"); } + void VisitInitListExpr(InitListExpr *E) { llvm_unreachable("NYI"); } + void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, + llvm::Value *outerBegin = nullptr) { + llvm_unreachable("NYI"); + } + void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { + llvm_unreachable("NYI"); + } + void VisitNoInitExpr(NoInitExpr *E) { llvm_unreachable("NYI"); } + void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { llvm_unreachable("NYI"); } + void VisitXCXDefaultInitExpr(CXXDefaultInitExpr *E) { + llvm_unreachable("NYI"); + } + void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { + llvm_unreachable("NYI"); + } void VisitCXXConstructExpr(const CXXConstructExpr *E); + void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E) { + llvm_unreachable("NYI"); + } + void VisitLambdaExpr(LambdaExpr *E) { llvm_unreachable("NYI"); } + void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { + llvm_unreachable("NYI"); + } + void VisitExprWithCleanups(ExprWithCleanups *E) { llvm_unreachable("NYI"); } + void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { + llvm_unreachable("NYI"); + } + void VisitCXXTypeidExpr(CXXTypeidExpr *E) { llvm_unreachable("NYI"); } + void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) { + llvm_unreachable("NYI"); + } + void VisitOpaqueValueExpr(OpaqueValueExpr *E) { llvm_unreachable("NYI"); } + + void VisitPseudoObjectExpr(PseudoObjectExpr *E) { llvm_unreachable("NYI"); } + + void VisitVAArgExpr(VAArgExpr *E) { llvm_unreachable("NYI"); } + + void EmitInitializationToLValue(Expr *E, LValue Address) { + llvm_unreachable("NYI"); + } + void EmitNullInitializationToLValue(LValue Address) { + llvm_unreachable("NYI"); + } + // case Expr::ChoseExprClass: + void VisitCXXThrowExpr(const CXXThrowExpr *E) { llvm_unreachable("NYI"); } + void VisitAtomicExpr(AtomicExpr *E) { llvm_unreachable("NYI"); } }; } // namespace From 2d453753c29755a5390257bb2d400bdd7b90271c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 17:51:24 -0400 Subject: [PATCH 0451/2301] [CIR] Add zeroed flag for AggValueSlot and getters for some properties --- clang/lib/CIR/CIRGenValue.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clang/lib/CIR/CIRGenValue.h b/clang/lib/CIR/CIRGenValue.h index 5ce234ce1d46..35c8a072e271 100644 --- a/clang/lib/CIR/CIRGenValue.h +++ b/clang/lib/CIR/CIRGenValue.h @@ -271,6 +271,11 @@ class AggValueSlot { // Qualifiers clang::Qualifiers Quals; + /// ZeroedFlag - This is set to true if the memory in the slot is known to be + /// zero before the assignment into it. This means that zero field don't need + /// to be set. + bool ZeroedFlag : 1; + /// This is set to true if the tail padding of this slot might overlap another /// object that may have already been initialized (and whose value must be /// preserved by this initialization). If so, we may only store up to the @@ -348,6 +353,8 @@ class AggValueSlot { clang::Qualifiers getQualifiers() const { return Quals; } + bool isVolatile() const { return Quals.hasVolatile(); } + Address getAddress() const { return Addr; } bool isIgnored() const { return !Addr.isValid(); } @@ -355,6 +362,8 @@ class AggValueSlot { Overlap_t mayOverlap() const { return Overlap_t(OverlapFlag); } bool isSanitizerChecked() const { return SanitizerCheckedFlag; } + + IsZeroed_t isZeroed() const { return IsZeroed_t(ZeroedFlag); } }; } // namespace cir From 7195caac804804773257e2a01ab1904703323ddd Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 18:10:39 -0400 Subject: [PATCH 0452/2301] [CIR] Support generating a base subobject type in computeRecordLayout If we have a RecordDecl where the nonVirtualSize is not the same as the size then compute the layout for the base type and generating a cir::StructType for it as well. --- clang/lib/CIR/CIRRecordLayoutBuilder.cpp | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp index 0116d5583c7e..2da825155847 100644 --- a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp @@ -194,28 +194,36 @@ void CIRRecordLowering::accumulateFields() { } std::unique_ptr -CIRGenTypes::computeRecordLayout(const RecordDecl *recordDecl, +CIRGenTypes::computeRecordLayout(const RecordDecl *D, mlir::cir::StructType &Ty) { - CIRRecordLowering builder(*this, recordDecl, /*packed=*/false); + CIRRecordLowering builder(*this, D, /*packed=*/false); builder.lower(/*nonVirtualBaseType=*/false); + auto name = getRecordTypeName(D, ""); + auto identifier = mlir::StringAttr::get(&getMLIRContext(), name); + // If we're in C++, compute the base subobject type. mlir::cir::StructType BaseTy = nullptr; - if (llvm::isa(recordDecl) && !recordDecl->isUnion() && - !recordDecl->hasAttr()) { + if (llvm::isa(D) && !D->isUnion() && + !D->hasAttr()) { BaseTy = Ty; if (builder.astRecordLayout.getNonVirtualSize() != builder.astRecordLayout.getSize()) { - llvm_unreachable("NYI"); + CIRRecordLowering baseBuilder(*this, D, /*Packed=*/builder.isPacked); + auto baseIdentifier = + mlir::StringAttr::get(&getMLIRContext(), name + ".base"); + BaseTy = mlir::cir::StructType::get( + &getMLIRContext(), baseBuilder.fieldTypes, baseIdentifier); + // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work + // on both of them with the same index. + assert(builder.isPacked == baseBuilder.isPacked && + "Non-virtual and complete types must agree on packedness"); } } assert(!builder.isPacked && "Packed structs NYI"); - auto name = getRecordTypeName(recordDecl, ""); - auto identifier = mlir::StringAttr::get(&getMLIRContext(), name); - Ty = mlir::cir::StructType::get(&getMLIRContext(), builder.fieldTypes, identifier); From 8a54971470ccf3b97e9c0633ad8c89d97f610f33 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 17:29:16 -0400 Subject: [PATCH 0453/2301] [CIR] Remove the assert in AggExprEmitter::Visit that we only have a ctor --- clang/lib/CIR/CIRGenExprAgg.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index bebf85e3d06e..0d31d5e15ec1 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -42,8 +42,9 @@ class AggExprEmitter : public StmtVisitor { //===--------------------------------------------------------------------===// void Visit(Expr *E) { - // TODO: CodeGen does ApplyDebugLocation here - assert(cast(E) && "Only CXXConstructExpr implemented"); + if (CGF.getDebugInfo()) { + llvm_unreachable("NYI"); + } StmtVisitor::Visit(E); } From dda814fdc651b4f5ad6cf640fc23e7df8c5810d6 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 17:31:59 -0400 Subject: [PATCH 0454/2301] [CIR] Add cleanups to UnimplementedFeatureGuarding --- clang/lib/CIR/UnimplementedFeatureGuarding.h | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index 70203422864f..dc21780f6638 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -21,6 +21,7 @@ struct UnimplementedFeature { // sanitizer related type check features static bool buildTypeCheck() { return false; } static bool tbaa() { return false; } + static bool cleanups() { return false; } }; } // namespace cir From e676e388ab00f4705a3155e3640e5296fbc362d0 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 18:16:13 -0400 Subject: [PATCH 0455/2301] [CIR] Handle a corner case where CodeGen will opt into using a memset If cg detects that it can simplify some work here it'll memset the memory. We currently don't support any of those cases and, in particular, we just see zeroed out memory here. So just honor the check but assume we're in too simple of a case and move on. --- clang/lib/CIR/CIRGenExprAgg.cpp | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index 0d31d5e15ec1..3c769d878208 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -155,20 +155,36 @@ void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { CGF.buildCXXConstructExpr(E, Slot); } +/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of +/// zeros in it, emit a memset and avoid storing the individual zeros. +static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, + CIRGenFunction &CGF) { + // If the slot is arleady known to be zeroed, nothing to do. Don't mess with + // volatile stores. + if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid()) + return; + + // C++ objects with a user-declared constructor don't need zero'ing. + if (CGF.getLangOpts().CPlusPlus) + if (const auto *RT = CGF.getContext() + .getBaseElementType(E->getType()) + ->getAs()) { + const auto *RD = cast(RT->getDecl()); + if (RD->hasUserDeclaredConstructor()) + return; + } + + llvm_unreachable("NYI"); +} + void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { assert(E && CIRGenFunction::hasAggregateEvaluationKind(E->getType()) && "Invalid aggregate expression to emit"); assert((Slot.getAddress().isValid() || Slot.isIgnored()) && "slot has bits but no address"); - // TODO: assert(false && "Figure out how to assert we're in c++"); - if (const RecordType *RT = CGM.getASTContext() - .getBaseElementType(E->getType()) - ->getAs()) { - auto *RD = cast(RT->getDecl()); - assert(RD->hasUserDeclaredConstructor() && - "default constructors aren't expected here YET"); - } + // Optimize the slot if possible. + CheckAggExprForMemSetUse(Slot, E, *this); AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast(E)); } From 460ae36c536637dfe2902fcf58db247f6e0392f2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 17:32:30 -0400 Subject: [PATCH 0456/2301] [CIR] Handle ExprWithCleanups hackily for now Just dispatch to the subexpression with an unreachable for the cleanups. This might generate wrong code that we'll have to fix. --- clang/lib/CIR/CIRGenExprAgg.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index 3c769d878208..94ee7c461e69 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -14,6 +14,7 @@ #include "CIRGenModule.h" #include "CIRGenTypes.h" #include "CIRGenValue.h" +#include "UnimplementedFeatureGuarding.h" #include "clang/AST/StmtVisitor.h" @@ -124,7 +125,7 @@ class AggExprEmitter : public StmtVisitor { void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { llvm_unreachable("NYI"); } - void VisitExprWithCleanups(ExprWithCleanups *E) { llvm_unreachable("NYI"); } + void VisitExprWithCleanups(ExprWithCleanups *E); void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { llvm_unreachable("NYI"); } @@ -155,6 +156,12 @@ void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { CGF.buildCXXConstructExpr(E, Slot); } +void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { + if (UnimplementedFeature::cleanups()) + llvm_unreachable("NYI"); + Visit(E->getSubExpr()); +} + /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of /// zeros in it, emit a memset and avoid storing the individual zeros. static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, From 5bba56851db0053e7188512818c3afee7790cd61 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 17:34:38 -0400 Subject: [PATCH 0457/2301] [CIR][NFC] Move buildCXXConstructExpr from CIRGenExpr to CIRGenExprCXX --- clang/lib/CIR/CIRGenExpr.cpp | 33 --------------------------------- clang/lib/CIR/CIRGenExprCXX.cpp | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 271ad2abd261..b3bfdd5fdfa6 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -877,39 +877,6 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, return buildFromMemory(Load, Ty); } -void CIRGenFunction::buildCXXConstructExpr(const clang::CXXConstructExpr *E, - AggValueSlot Dest) { - assert(!Dest.isIgnored() && "Must have a destination!"); - const auto *CD = E->getConstructor(); - - assert(!E->requiresZeroInitialization() && "zero initialization NYI"); - - // If this is a call to a trivial default constructor, do nothing. - if (CD->isTrivial() && CD->isDefaultConstructor()) - assert(!CD->isTrivial() && "trivial constructors NYI"); - - assert(!E->isElidable() && "elidable constructors NYI"); - - assert(!CGM.getASTContext().getAsArrayType(E->getType()) && - "array types NYI"); - - clang::CXXCtorType Type = Ctor_Complete; - bool ForVirtualBase = false; - bool Delegating = false; - - switch (E->getConstructionKind()) { - case CXXConstructionKind::Complete: - Type = Ctor_Complete; - break; - case CXXConstructionKind::Delegating: - case CXXConstructionKind::VirtualBase: - case CXXConstructionKind::NonVirtualBase: - assert(false && "Delegating, Virtualbae and NonVirtualBase ctorkind NYI"); - } - - buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); -} - // Note: this function also emit constructor calls to support a MSVC extensions // allowing explicit constructor function call. RValue CIRGenFunction::buildCXXMemberCallExpr(const CXXMemberCallExpr *CE, diff --git a/clang/lib/CIR/CIRGenExprCXX.cpp b/clang/lib/CIR/CIRGenExprCXX.cpp index 72462f616678..34c41b6b6365 100644 --- a/clang/lib/CIR/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CIRGenExprCXX.cpp @@ -208,3 +208,36 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( CalleeDecl, Callee, ReturnValue, This.getPointer(), /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); } + +void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, + AggValueSlot Dest) { + assert(!Dest.isIgnored() && "Must have a destination!"); + const auto *CD = E->getConstructor(); + + assert(!E->requiresZeroInitialization() && "zero initialization NYI"); + + // If this is a call to a trivial default constructor, do nothing. + if (CD->isTrivial() && CD->isDefaultConstructor()) + assert(!CD->isTrivial() && "trivial constructors NYI"); + + assert(!E->isElidable() && "elidable constructors NYI"); + + assert(!CGM.getASTContext().getAsArrayType(E->getType()) && + "array types NYI"); + + clang::CXXCtorType Type = Ctor_Complete; + bool ForVirtualBase = false; + bool Delegating = false; + + switch (E->getConstructionKind()) { + case CXXConstructionKind::Complete: + Type = Ctor_Complete; + break; + case CXXConstructionKind::Delegating: + case CXXConstructionKind::VirtualBase: + case CXXConstructionKind::NonVirtualBase: + assert(false && "Delegating, Virtualbae and NonVirtualBase ctorkind NYI"); + } + + buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); +} From acea3562476e13133b0811aa21f1d6bf10869c97 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 17:38:18 -0400 Subject: [PATCH 0458/2301] [CIR] Support elidable constructors in buildCXXConstructExpr Lambdas are elidable. We technically don't need this anytime soon and could just opt out of supporitng eliding constructors for now, but since it just unwraps and forwards to buildAggExpr this is simple enough to continue with. --- clang/lib/CIR/CIRGenExprCXX.cpp | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenExprCXX.cpp b/clang/lib/CIR/CIRGenExprCXX.cpp index 34c41b6b6365..83357330cbfc 100644 --- a/clang/lib/CIR/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CIRGenExprCXX.cpp @@ -220,7 +220,20 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, if (CD->isTrivial() && CD->isDefaultConstructor()) assert(!CD->isTrivial() && "trivial constructors NYI"); - assert(!E->isElidable() && "elidable constructors NYI"); + // Elide the constructor if we're constructing from a temporary + if (getLangOpts().ElideConstructors && E->isElidable()) { + // FIXME: This only handles the simplest case, where the source object is + // passed directly as the first argument to the constructor. This + // should also handle stepping through implicit casts and conversion + // sequences which involve two steps, with a conversion operator + // follwed by a converting constructor. + const auto *SrcObj = E->getArg(0); + assert(SrcObj->isTemporaryObject(getContext(), CD->getParent())); + assert( + getContext().hasSameUnqualifiedType(E->getType(), SrcObj->getType())); + buildAggExpr(SrcObj, Dest); + return; + } assert(!CGM.getASTContext().getAsArrayType(E->getType()) && "array types NYI"); From e7cac497a608c076ac0ccb3b3676104d5969a9c2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 17:40:58 -0400 Subject: [PATCH 0459/2301] [CIR] Support visiting MaterializeTemporaryExpr in AggExprEmitter This is trivial and just dispatches to the subexpr --- clang/lib/CIR/CIRGenExprAgg.cpp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index 94ee7c461e69..df29789ce285 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -130,9 +130,7 @@ class AggExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } void VisitCXXTypeidExpr(CXXTypeidExpr *E) { llvm_unreachable("NYI"); } - void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) { - llvm_unreachable("NYI"); - } + void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); void VisitOpaqueValueExpr(OpaqueValueExpr *E) { llvm_unreachable("NYI"); } void VisitPseudoObjectExpr(PseudoObjectExpr *E) { llvm_unreachable("NYI"); } @@ -151,6 +149,15 @@ class AggExprEmitter : public StmtVisitor { }; } // namespace +//===----------------------------------------------------------------------===// +// Visitor Methods +//===----------------------------------------------------------------------===// + +void AggExprEmitter::VisitMaterializeTemporaryExpr( + MaterializeTemporaryExpr *E) { + Visit(E->getSubExpr()); +} + void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { AggValueSlot Slot = EnsureSlot(E->getType()); CGF.buildCXXConstructExpr(E, Slot); From afb22a1972cc0e207f20114923a818e3a7d04591 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 17:50:26 -0400 Subject: [PATCH 0460/2301] [CIR] Implement AggExprEmitter::VisitLambdaExpr This is mostly stubbed out and not properly functional. Notably we don't support cleanups yet and this explicitly excludes captures. --- clang/lib/CIR/CIRGenExprAgg.cpp | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index df29789ce285..584c18ce473d 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -121,7 +121,7 @@ class AggExprEmitter : public StmtVisitor { void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E) { llvm_unreachable("NYI"); } - void VisitLambdaExpr(LambdaExpr *E) { llvm_unreachable("NYI"); } + void VisitLambdaExpr(LambdaExpr *E); void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { llvm_unreachable("NYI"); } @@ -169,6 +169,30 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { Visit(E->getSubExpr()); } +void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { + // We'll need to enter cleanup scopes in case any of the element initializers + // throws an exception. + if (UnimplementedFeature::cleanups()) + llvm_unreachable("NYI"); + mlir::Operation *CleanupDominator = nullptr; + + CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); + for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(), + e = E->capture_init_end(); + i != e; ++i, ++CurField) { + llvm_unreachable("NYI"); + } + + // Deactivate all the partial cleanups in reverse order, which generally means + // popping them. + if (UnimplementedFeature::cleanups()) + llvm_unreachable("NYI"); + + // Destroy the placeholder if we made one. + if (CleanupDominator) + CleanupDominator->erase(); +} + /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of /// zeros in it, emit a memset and avoid storing the individual zeros. static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, From 9aa8c0151dc63e59776929b137e8b1aa54e9b214 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 May 2022 17:52:28 -0400 Subject: [PATCH 0461/2301] [CIR] Add a simple test to codegen a trivial lambda --- clang/test/CIR/CodeGen/lambda.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 clang/test/CIR/CodeGen/lambda.cpp diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp new file mode 100644 index 000000000000..bba4d1618c8a --- /dev/null +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +void fn() { + auto a = [](){}; +} + +// CHECK: !22class2Eanon22 = type !cir.struct<"class.anon", i8> +// CHECK-NEXT: module +// CHECK-NEXT: func @_Z2fnv() +// CHECK-NEXT: %0 = cir.alloca !22class2Eanon22, cir.ptr , ["a", uninitialized] From e7db542a11aea343ca6f609bd34374057cc16381 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:07:20 -0400 Subject: [PATCH 0462/2301] [CIR] Add `capturedByInit` to buildExprAsInit Also assert that it's never true as we aren't yet covering it. But leave it possible to be true for assertion purposes. --- clang/lib/CIR/CIRGenDecl.cpp | 5 ++++- clang/lib/CIR/CIRGenFunction.h | 4 +++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenDecl.cpp b/clang/lib/CIR/CIRGenDecl.cpp index d518d423ce62..c704ef976c6a 100644 --- a/clang/lib/CIR/CIRGenDecl.cpp +++ b/clang/lib/CIR/CIRGenDecl.cpp @@ -218,7 +218,10 @@ void CIRGenFunction::buildScalarInit(const Expr *init, const ValueDecl *D, } void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, - LValue lvalue) { + LValue lvalue, bool capturedByInit) { + if (capturedByInit) + llvm_unreachable("NYI"); + QualType type = D->getType(); if (type->isReferenceType()) { diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index a1347e1ebe55..fc700fbd69de 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -719,8 +719,10 @@ class CIRGenFunction { /// \param init the initializing expression /// \param D the object to act as if we're initializing /// \param lvalue the lvalue to initialize + /// \param capturedByInit true if \p D is a __block variable whose address is + /// potentially changed by the initializer void buildExprAsInit(const clang::Expr *init, const clang::ValueDecl *D, - LValue lvalue); + LValue lvalue, bool capturedByInit = false); /// Emit code and set up symbol table for a variable declaration with auto, /// register, or no storage class specifier. These turn into simple stack From c10624bc184f3b080aecb8c26e671bb3522ce936 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 18:08:37 -0400 Subject: [PATCH 0463/2301] [CIR] Explicitly set SkippedLayout in CIRGenTypes to false during ctor --- clang/lib/CIR/CIRGenTypes.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 3730dc379d90..4cacaf406ddb 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -27,7 +27,10 @@ unsigned CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { CIRGenTypes::CIRGenTypes(CIRGenModule &cgm) : Context(cgm.getASTContext()), Builder(cgm.getBuilder()), CGM{cgm}, Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), - TheABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) {} + TheABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) { + SkippedLayout = false; +} + CIRGenTypes::~CIRGenTypes() { for (llvm::FoldingSet::iterator I = FunctionInfos.begin(), E = FunctionInfos.end(); From 5ccb1e1ebc60f2cc260657dd129ea85f9e1de339 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:08:04 -0400 Subject: [PATCH 0464/2301] [CIR][NFC] Fail if we hit an unknown CastOp during verification --- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 4fa96e6eead1..024bf9bc3a82 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -168,7 +168,7 @@ LogicalResult CastOp::verify() { } } - return success(); + llvm_unreachable("Unknown CastOp kind?"); } //===----------------------------------------------------------------------===// From aaa5ab74881051f667dbdfc5df559a22be672417 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:09:34 -0400 Subject: [PATCH 0465/2301] [CIR] Add an integral cast for converting between integer sizes This simply introduces a new cast kind for converting from, for example, i32 to i64. --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 3 ++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index de38f61dccfd..329da65b9b68 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -40,11 +40,12 @@ class CIR_Op traits = []> : // The enumaration value isn't in sync with clang. def CK_IntegralToBoolean : I32EnumAttrCase<"int_to_bool", 1>; def CK_ArrayToPointerDecay : I32EnumAttrCase<"array_to_ptrdecay", 2>; +def CK_IntegralCast : I32EnumAttrCase<"integral", 3>; def CastKind : I32EnumAttr< "CastKind", "cast kind", - [CK_IntegralToBoolean, CK_ArrayToPointerDecay]> { + [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast]> { let cppNamespace = "::mlir::cir"; } diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 024bf9bc3a82..4942356127b0 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -151,6 +151,13 @@ LogicalResult CastOp::verify() { return emitOpError() << "requires integral type for result"; return success(); } + case cir::CastKind::integral: { + if (!resType.isa()) + return emitOpError() << "requires !IntegerType for result"; + if (!srcType.isa()) + return emitOpError() << "requires !IntegerType for source"; + return success(); + } case cir::CastKind::array_to_ptrdecay: { auto arrayPtrTy = srcType.dyn_cast(); auto flatPtrTy = resType.dyn_cast(); From 763f63cac84cfb45ce0c6ea5b696f58fd093ee7d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:11:43 -0400 Subject: [PATCH 0466/2301] [CIR] Add an UnimplementedFeature guard for mlir::cir::VectorType --- clang/lib/CIR/UnimplementedFeatureGuarding.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index dc21780f6638..19c73fffe5b1 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -22,6 +22,9 @@ struct UnimplementedFeature { static bool buildTypeCheck() { return false; } static bool tbaa() { return false; } static bool cleanups() { return false; } + // This is for whether or not we've implemented a cir::VectorType + // corresponding to `llvm::VectorType` + static bool cirVectorType() { return false; } }; } // namespace cir From c004068e7eca9b4070d8d38a310624fb09ca25eb Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:12:09 -0400 Subject: [PATCH 0467/2301] [CIR][NFC] Add a header comment for CIRGenExprScalar.cpp --- clang/lib/CIR/CIRGenExprScalar.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 43e3cf39daf5..342ee7c07259 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -1,3 +1,15 @@ +//===--- CIRGenExprScalar.cpp - Emit CIR Code for Scalar Exprs ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Expr nodes with scalar CIR types as CIR code. +// +//===----------------------------------------------------------------------===// + #include "CIRGenFunction.h" #include "CIRGenModule.h" From af2d1e40da3c94d6189907219dbbc02a3d5200d3 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:34:05 -0400 Subject: [PATCH 0468/2301] [CIR] Remove unnecessary assert for computeRecordLayout This is actually incorrect at this point as we have since added support for packed layouts (and asserts elsewhere where corner cases aren't supported). --- clang/lib/CIR/CIRRecordLayoutBuilder.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp index 2da825155847..48c3e0b6c04f 100644 --- a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CIRRecordLayoutBuilder.cpp @@ -222,8 +222,6 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, } } - assert(!builder.isPacked && "Packed structs NYI"); - Ty = mlir::cir::StructType::get(&getMLIRContext(), builder.fieldTypes, identifier); @@ -247,7 +245,5 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, // TODO: implement verification - assert(!builder.isPacked && "Packed structs NYI"); - return RL; } From 6289d0d8df77f1cac2fbf0410dae7abca6ca4774 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:36:04 -0400 Subject: [PATCH 0469/2301] [CIR][NFC] Add a few unreachables for unimplemented paths in buildStoreOfSclar --- clang/lib/CIR/CIRGenExpr.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index b3bfdd5fdfa6..9f9055503f64 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -212,6 +212,12 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, } assert(currSrcLoc && "must pass in source location"); builder.create(*currSrcLoc, Value, Addr.getPointer()); + if (isNontemporal) { + llvm_unreachable("NYI"); + } + + if (UnimplementedFeature::tbaa()) + llvm_unreachable("NYI"); } void CIRGenFunction::buldStoreThroughLValue(RValue Src, LValue Dst, From 48ce158e5e957199f03b03d955416a1711c40979 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:38:48 -0400 Subject: [PATCH 0470/2301] [CIR] Add a property for ScalarExprEmitter that is used during a few visitors This is NFC as of now but will be used in a following commit. Add it here since it's standalone. --- clang/lib/CIR/CIRGenExprScalar.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 342ee7c07259..5357585bfc6a 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -29,7 +29,8 @@ class ScalarExprEmitter : public StmtVisitor { mlir::OpBuilder &Builder; public: - ScalarExprEmitter(CIRGenFunction &cgf, mlir::OpBuilder &builder) + ScalarExprEmitter(CIRGenFunction &cgf, mlir::OpBuilder &builder, + bool ira = false) : CGF(cgf), Builder(builder) {} mlir::Value Visit(Expr *E) { From f75c6ac75318e92900a10a059c8499fc0b3c4e63 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:40:07 -0400 Subject: [PATCH 0471/2301] [CIR] Add an unreachable for cleanups in buildInitializerForField --- clang/lib/CIR/CIRGenClass.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp index 65417dee7581..6f81a7ecaf4b 100644 --- a/clang/lib/CIR/CIRGenClass.cpp +++ b/clang/lib/CIR/CIRGenClass.cpp @@ -12,6 +12,7 @@ #include "CIRGenCXXABI.h" #include "CIRGenFunction.h" +#include "UnimplementedFeatureGuarding.h" #include "clang/AST/RecordLayout.h" @@ -434,6 +435,14 @@ Address CIRGenFunction::LoadCXXThisAddress() { void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init) { llvm_unreachable("NYI"); + QualType FieldType = Field->getType(); + + // Ensure that we destroy this object if an exception is thrown later in the + // constructor. + QualType::DestructionKind dtorKind = FieldType.isDestructedType(); + (void)dtorKind; + if (UnimplementedFeature::cleanups()) + llvm_unreachable("NYI"); } void CIRGenFunction::buildDelegateCXXConstructorCall( From 78bc35b20a5d49c83abba9dab7fa4b1ae0967189 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:40:47 -0400 Subject: [PATCH 0472/2301] [CIR] Add a type holding options for ScalarConversion During scalar conversion there are times when sanitizers will mandate adding extra checks. This is tracked in codegen with this struct. We don't support sanitizers yet, but we can track the usages with asserts using this struct. --- clang/lib/CIR/CIRGenExprScalar.cpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 5357585bfc6a..eb6619c6ebe4 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -66,6 +66,25 @@ class ScalarExprEmitter : public StmtVisitor { return buildLoadOfLValue(E); } + // Emit a conversion from the specified type to the specified destination + // type, both of which are CIR scalar types. + struct ScalarConversionOpts { + bool TreatBooleanAsSigned; + bool EmitImplicitIntegerTruncationChecks; + bool EmitImplicitIntegerSignChangeChecks; + + ScalarConversionOpts() + : TreatBooleanAsSigned(false), + EmitImplicitIntegerTruncationChecks(false), + EmitImplicitIntegerSignChangeChecks(false) {} + + ScalarConversionOpts(clang::SanitizerSet SanOpts) + : TreatBooleanAsSigned(false), + EmitImplicitIntegerTruncationChecks( + SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)), + EmitImplicitIntegerSignChangeChecks( + SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {} + }; // Emit code for an explicit or implicit cast. Implicit // casts have to handle a more broad range of conversions than explicit // casts, as they handle things like function to ptr-to-function decay From 0ec8a2c080be9b806f7ba791561894c3a2a424be Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:43:58 -0400 Subject: [PATCH 0473/2301] [CIR] Add all CK cast kinds in ScalarExprEmitter::VisitCastExpr This is super redundant for now with the excessive unreachables, but it's easier during development to just crash at a specific line number to know what we need to implement instead of having to run in the debugger to see which cast was used. --- clang/lib/CIR/CIRGenExprScalar.cpp | 140 +++++++++++++++++++++++++++-- 1 file changed, 134 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index eb6619c6ebe4..d9cb74754ba6 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -93,11 +93,50 @@ class ScalarExprEmitter : public StmtVisitor { Expr *E = CE->getSubExpr(); QualType DestTy = CE->getType(); CastKind Kind = CE->getCastKind(); + // Since almost all cast kinds apply to scalars, this switch doesn't have a + // default case, so the compiler will warn on a missing case. The cases are + // in the same order as in the CastKind enum. switch (Kind) { - case CK_LValueToRValue: - assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); - assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); - return Visit(const_cast(E)); + case clang::CK_Dependent: + llvm_unreachable("dependent cast kind in CIR gen!"); + case clang::CK_BuiltinFnToFnPtr: + llvm_unreachable("builtin functions are handled elsewhere"); + + case CK_LValueBitCast: + llvm_unreachable("NYI"); + case CK_ObjCObjectLValueCast: + llvm_unreachable("NYI"); + case CK_LValueToRValueBitCast: + llvm_unreachable("NYI"); + case CK_CPointerToObjCPointerCast: + llvm_unreachable("NYI"); + case CK_BlockPointerToObjCPointerCast: + llvm_unreachable("NYI"); + case CK_AnyPointerToBlockPointerCast: + llvm_unreachable("NYI"); + case CK_BitCast: + llvm_unreachable("NYI"); + case CK_AddressSpaceConversion: + llvm_unreachable("NYI"); + case CK_AtomicToNonAtomic: + llvm_unreachable("NYI"); + case CK_NonAtomicToAtomic: + llvm_unreachable("NYI"); + case CK_UserDefinedConversion: + llvm_unreachable("NYI"); + case CK_NoOp: + llvm_unreachable("NYI"); + case CK_BaseToDerived: + llvm_unreachable("NYI"); + case CK_DerivedToBase: + llvm_unreachable("NYI"); + case CK_Dynamic: + llvm_unreachable("NYI"); + case CK_ArrayToPointerDecay: + llvm_unreachable("NYI"); + case CK_FunctionToPointerDecay: + llvm_unreachable("NYI"); + case CK_NullToPointer: { // FIXME: use MustVisitNullValue(E) and evaluate expr. // Note that DestTy is used as the MLIR type instead of a custom @@ -107,16 +146,105 @@ class ScalarExprEmitter : public StmtVisitor { CGF.getLoc(E->getExprLoc()), Ty, mlir::cir::NullAttr::get(Builder.getContext(), Ty)); } + case CK_NullToMemberPointer: + llvm_unreachable("NYI"); + case CK_ReinterpretMemberPointer: + llvm_unreachable("NYI"); + case CK_BaseToDerivedMemberPointer: + llvm_unreachable("NYI"); + case CK_DerivedToBaseMemberPointer: + llvm_unreachable("NYI"); + case CK_ARCProduceObject: + llvm_unreachable("NYI"); + case CK_ARCConsumeObject: + llvm_unreachable("NYI"); + case CK_ARCReclaimReturnedObject: + llvm_unreachable("NYI"); + case CK_ARCExtendBlockObject: + llvm_unreachable("NYI"); + case CK_CopyAndAutoreleaseBlockObject: + llvm_unreachable("NYI"); + case CK_FloatingRealToComplex: + llvm_unreachable("NYI"); + case CK_FloatingComplexCast: + llvm_unreachable("NYI"); + case CK_IntegralComplexToFloatingComplex: + llvm_unreachable("NYI"); + case CK_FloatingComplexToIntegralComplex: + llvm_unreachable("NYI"); + case CK_ConstructorConversion: + llvm_unreachable("NYI"); + case CK_ToUnion: + llvm_unreachable("NYI"); + + case CK_LValueToRValue: + assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); + assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); + return Visit(const_cast(E)); + + case CK_IntegralToPointer: + llvm_unreachable("NYI"); + case CK_PointerToIntegral: + llvm_unreachable("NYI"); + case CK_ToVoid: + llvm_unreachable("NYI"); + case CK_MatrixCast: + llvm_unreachable("NYI"); + case CK_VectorSplat: + llvm_unreachable("NYI"); + case CK_FixedPointCast: + llvm_unreachable("NYI"); + case CK_FixedPointToBoolean: + llvm_unreachable("NYI"); + case CK_FixedPointToIntegral: + llvm_unreachable("NYI"); + case CK_IntegralToFixedPoint: + llvm_unreachable("NYI"); + + case CK_IntegralToFloating: + llvm_unreachable("NYI"); + case CK_FloatingToIntegral: + llvm_unreachable("NYI"); + case CK_FloatingCast: + llvm_unreachable("NYI"); + case CK_FixedPointToFloating: + llvm_unreachable("NYI"); + case CK_FloatingToFixedPoint: + llvm_unreachable("NYI"); + case CK_BooleanToSignedIntegral: + llvm_unreachable("NYI"); + case CK_IntegralToBoolean: { return buildIntToBoolConversion(Visit(E), CGF.getLoc(CE->getSourceRange())); } + + case CK_PointerToBoolean: + llvm_unreachable("NYI"); + case CK_FloatingToBoolean: + llvm_unreachable("NYI"); + case CK_MemberPointerToBoolean: + llvm_unreachable("NYI"); + case CK_FloatingComplexToReal: + llvm_unreachable("NYI"); + case CK_IntegralComplexToReal: + llvm_unreachable("NYI"); + case CK_FloatingComplexToBoolean: + llvm_unreachable("NYI"); + case CK_IntegralComplexToBoolean: + llvm_unreachable("NYI"); + case CK_ZeroToOCLOpaqueType: + llvm_unreachable("NYI"); + case CK_IntToOCLSampler: + llvm_unreachable("NYI"); + default: emitError(CGF.getLoc(CE->getExprLoc()), "cast kind not implemented: '") << CE->getCastKindName() << "'"; - assert(0 && "not implemented"); return nullptr; - } + } // end of switch + + llvm_unreachable("unknown scalar cast"); } mlir::Value VisitCallExpr(const CallExpr *E) { From 2d00423b739c799d8aad139889b1ae6265bc0d3d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:46:31 -0400 Subject: [PATCH 0474/2301] [CIR] Add a ScalarConversionOpts arg to buildScalarConversion ...with a default constructor as the default argument to not break current usages while extended it's API for future uses. --- clang/lib/CIR/CIRGenExprScalar.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index d9cb74754ba6..1708a83b1782 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -511,8 +511,10 @@ class ScalarExprEmitter : public StmtVisitor { /// type, both of which are CIR scalar types. /// TODO: do we need ScalarConversionOpts here? Should be done in another /// pass. - mlir::Value buildScalarConversion(mlir::Value Src, QualType SrcType, - QualType DstType, SourceLocation Loc) { + mlir::Value + buildScalarConversion(mlir::Value Src, QualType SrcType, QualType DstType, + SourceLocation Loc, + ScalarConversionOpts Opts = ScalarConversionOpts()) { if (SrcType->isFixedPointType()) { assert(0 && "not implemented"); } else if (DstType->isFixedPointType()) { From 12330493f5c9d2a6e327deb988f98d7ed6035967 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:47:20 -0400 Subject: [PATCH 0475/2301] [CIR] Add a call to TestAndClearIgnoreResultAssign in VisitCastExpr This is used in a few different cases, add it here early with a void cast to not have a warning. --- clang/lib/CIR/CIRGenExprScalar.cpp | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 1708a83b1782..5ebff2d02a63 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -27,11 +27,22 @@ namespace { class ScalarExprEmitter : public StmtVisitor { CIRGenFunction &CGF; mlir::OpBuilder &Builder; + bool IgnoreResultAssign; public: ScalarExprEmitter(CIRGenFunction &cgf, mlir::OpBuilder &builder, bool ira = false) - : CGF(cgf), Builder(builder) {} + : CGF(cgf), Builder(builder), IgnoreResultAssign(ira) {} + + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + bool TestAndClearIgnoreResultAssign() { + bool I = IgnoreResultAssign; + IgnoreResultAssign = false; + return I; + } mlir::Value Visit(Expr *E) { return StmtVisitor::Visit(E); @@ -93,6 +104,12 @@ class ScalarExprEmitter : public StmtVisitor { Expr *E = CE->getSubExpr(); QualType DestTy = CE->getType(); CastKind Kind = CE->getCastKind(); + + // These cases are generally not written to ignore the result of evaluating + // their sub-expressions, so we clear this now. + bool Ignored = TestAndClearIgnoreResultAssign(); + (void)Ignored; + // Since almost all cast kinds apply to scalars, this switch doesn't have a // default case, so the compiler will warn on a missing case. The cases are // in the same order as in the CastKind enum. From b98f24bc01aa1ce074de3097d929bcec5edfd26d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:49:17 -0400 Subject: [PATCH 0476/2301] [CIR] Add ExprScalarEmitter::VisitStmt This just errors out trivially in accordance with codegen. --- clang/lib/CIR/CIRGenExprScalar.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 5ebff2d02a63..c0f280a79f13 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -48,6 +48,11 @@ class ScalarExprEmitter : public StmtVisitor { return StmtVisitor::Visit(E); } + mlir::Value VisitStmt(Stmt *S) { + S->dump(llvm::errs(), CGF.getContext()); + llvm_unreachable("Stmt can't have complex result type!"); + } + /// Emits the address of the l-value, then loads and returns the result. mlir::Value buildLoadOfLValue(const Expr *E) { LValue LV = CGF.buildLValue(E); From b2aa72ed7e390123eef1ef16703450076201fdb8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:49:53 -0400 Subject: [PATCH 0477/2301] [CIR][NFC] Add a comment banner for Visitors in ScalarExprEmitter --- clang/lib/CIR/CIRGenExprScalar.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index c0f280a79f13..85b65191ca9f 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -44,6 +44,10 @@ class ScalarExprEmitter : public StmtVisitor { return I; } + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + mlir::Value Visit(Expr *E) { return StmtVisitor::Visit(E); } From b6add1b721ba6b8a67d0f1c0abc7a46512922eef Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:50:15 -0400 Subject: [PATCH 0478/2301] [CIR] Convert a TODO to an unreacable with a proper test --- clang/lib/CIR/CIRGenExprScalar.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 85b65191ca9f..2d986e87b81d 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -606,7 +606,13 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value Res = nullptr; mlir::Type ResTy = DstTy; - // TODO: implement CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) + // An overflowing conversion has undefined behavior if eitehr the source + // type or the destination type is a floating-point type. However, we + // consider the range of representable values for all floating-point types + // to be [-inf,+inf], so no overflow can ever happen when the destination + // type is a floating-point type. + if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow)) + llvm_unreachable("NYI"); // Cast to half through float if half isn't a native type. if (DstType->isHalfType() && From 5ce8cc6040c65eddd2bffd9ef64cd0188e01dc76 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:50:40 -0400 Subject: [PATCH 0479/2301] [CIR][NFC] Convert an assert to an unreachable --- clang/lib/CIR/CIRGenExprScalar.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 2d986e87b81d..3aca76023f79 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -617,12 +617,12 @@ class ScalarExprEmitter : public StmtVisitor { // Cast to half through float if half isn't a native type. if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { - assert(0 && "not implemented"); + llvm_unreachable("NYI"); } // TODO: Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); if (DstTy != ResTy) { - assert(0 && "not implemented"); + llvm_unreachable("NYI"); } return Res; From 78740bdb43f97fc9d45e3bb1ab825fdb1b2266cd Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:51:06 -0400 Subject: [PATCH 0480/2301] [CIR] Fail on two cases where we use the ScalarConversionOpts As mentioned previously, this is not used, just implemented purely to handle these two assertion cases (and a few others). --- clang/lib/CIR/CIRGenExprScalar.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 3aca76023f79..736414494ba8 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -625,6 +625,12 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } + if (Opts.EmitImplicitIntegerTruncationChecks) + llvm_unreachable("NYI"); + + if (Opts.EmitImplicitIntegerSignChangeChecks) + llvm_unreachable("NYI"); + return Res; } From c4cdd5f255a129049adfcce9f0ee3fbdcdb91b4c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:52:27 -0400 Subject: [PATCH 0481/2301] [CIR] Implement buildInitializerForField This trivially delegates to buildExprAsInit for TEK_Scalars where LHS.isSimple() and fails elsewise. --- clang/lib/CIR/CIRGenClass.cpp | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp index 6f81a7ecaf4b..3e0a27e1ae9f 100644 --- a/clang/lib/CIR/CIRGenClass.cpp +++ b/clang/lib/CIR/CIRGenClass.cpp @@ -434,8 +434,23 @@ Address CIRGenFunction::LoadCXXThisAddress() { void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init) { - llvm_unreachable("NYI"); QualType FieldType = Field->getType(); + switch (getEvaluationKind(FieldType)) { + case TEK_Scalar: + if (LHS.isSimple()) { + buildExprAsInit(Init, Field, LHS, false); + } else { + llvm_unreachable("NYI"); + } + break; + case TEK_Complex: + llvm_unreachable("NYI"); + break; + case TEK_Aggregate: { + llvm_unreachable("NYI"); + break; + } + } // Ensure that we destroy this object if an exception is thrown later in the // constructor. From eda1e3c577a8da92a8c6fc8b9177448f1ea8751d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:54:01 -0400 Subject: [PATCH 0482/2301] [CIR] Add ScalarExprEmitter::VisitInitListExpr This just adds three unreachables on uncovered cases and delegates back to visit. --- clang/lib/CIR/CIRGenExprScalar.cpp | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 736414494ba8..c08c87b8df4c 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -12,6 +12,7 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "UnimplementedFeatureGuarding.h" #include "clang/AST/StmtVisitor.h" @@ -57,6 +58,8 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("Stmt can't have complex result type!"); } + mlir::Value VisitInitListExpr(InitListExpr *E); + /// Emits the address of the l-value, then loads and returns the result. mlir::Value buildLoadOfLValue(const Expr *E) { LValue LV = CGF.buildLValue(E); @@ -680,3 +683,24 @@ bool CIRGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, ResultBool = ResultInt.getBoolValue(); return true; } + +mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { + bool Ignore = TestAndClearIgnoreResultAssign(); + (void)Ignore; + assert(Ignore == false && "init list ignored"); + unsigned NumInitElements = E->getNumInits(); + + if (E->hadArrayRangeDesignator()) + llvm_unreachable("NYI"); + + if (UnimplementedFeature::cirVectorType()) + llvm_unreachable("NYI"); + + if (NumInitElements == 0) { + // C++11 value-initialization for the scalar. + llvm_unreachable("NYI"); + } + + return Visit(E->getInit(0)); +} + From 45f2028d6ae3f521b6f5b82c6f81b25294adbd89 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 17:55:00 -0400 Subject: [PATCH 0483/2301] [CIR] Add StructElementAddr operation to get the offset of a member in a struct This is a first pass that'll be iterated on later. Just getting it in to have a working version first. TODO: * Add a printer and parser * Rename it to (probably) get_member --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 26 ++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 329da65b9b68..7f17f530fd82 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -914,4 +914,30 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { let hasVerifier = 1; } +def StructElementAddr : CIR_Op<"struct_element_addr"> { + let summary = "get the address of a member of a struct"; + let description = [{ + The `cir.struct_element_addr` operaration gets the address of a particular + named member from the input struct. + + Example: + ```mlir + !22struct2EBar22 = type !cir.struct<"struct.Bar", i32, i8> + ... + %0 = cir.alloca !22struct2EBar22, cir.ptr + ... + %1 = cir.struct_element_addr %0, "Bar.a" + %2 = cir.load %1 : cir.ptr , int + WIP + ``` + }]; + + let arguments = (ins + Arg:$struct_addr, + StrAttr:$member_name); + + let results = (outs Res:$result); +} + #endif // MLIR_CIR_DIALECT_CIR_OPS + From b1daf9794d291e7c09b7bda266e8b34cea3c0d65 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 18:26:55 -0400 Subject: [PATCH 0484/2301] [CIR] Don't attempt to set init for non-VarDecls in buildStoreOfScalar We hit this codepath for FieldDecl construction which obviously doesn't have it's own alloca since it's allocated as part of the struct itself. --- clang/lib/CIR/CIRGenExpr.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 9f9055503f64..7c7be1a9cafc 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -189,7 +189,7 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, assert(Addr.getPointer() && "expected pointer to exist"); auto SrcAlloca = dyn_cast_or_null(Addr.getPointer().getDefiningOp()); - if (InitDecl) { + if (InitDecl && SrcAlloca) { InitStyle IS; const VarDecl *VD = dyn_cast_or_null(InitDecl); assert(VD && "VarDecl expected"); @@ -210,8 +210,10 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, SrcAlloca.setInitAttr(InitStyleAttr::get(builder.getContext(), IS)); } } + assert(currSrcLoc && "must pass in source location"); builder.create(*currSrcLoc, Value, Addr.getPointer()); + if (isNontemporal) { llvm_unreachable("NYI"); } From fd523560c3c562e1ff82b3c0bdbf5c4d378156b2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 18:27:48 -0400 Subject: [PATCH 0485/2301] [CIR] Convert an assert to an unreachable Note that the isNontemporal was added below in an earlier commit to match the ordering from clang codegen. --- clang/lib/CIR/CIRGenExpr.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 7c7be1a9cafc..d8580840e077 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -181,8 +181,9 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, // TODO: LValueIsSuitableForInlineAtomic ? // TODO: TBAA Value = buildToMemory(Value, Ty); - if (Ty->isAtomicType() || isNontemporal) { - assert(0 && "not implemented"); + + if (Ty->isAtomicType()) { + llvm_unreachable("NYI"); } // Update the alloca with more info on initialization. From 2b818ea54b37b59fcb968cd03fece544521ff28f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 18:28:20 -0400 Subject: [PATCH 0486/2301] [CIR] Convert a TODO to an unreachable --- clang/lib/CIR/CIRGenExpr.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index d8580840e077..68a89ea0d618 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -177,9 +177,8 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, LValueBaseInfo BaseInfo, const Decl *InitDecl, bool isNontemporal) { - // TODO: PreserveVec3Type - // TODO: LValueIsSuitableForInlineAtomic ? - // TODO: TBAA + // TODO(CIR): this has fallen out of date with codegen + Value = buildToMemory(Value, Ty); if (Ty->isAtomicType()) { From b9d82398a508e03d3f2a8a440344dd5708c19451 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 18:28:49 -0400 Subject: [PATCH 0487/2301] [CIR] Dispatch to buildScalarConversion when visiting an IntegralCast When we hit an CK_IntegralCast simply buildScalarConversion --- clang/lib/CIR/CIRGenExprScalar.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index c08c87b8df4c..e697b38bb82a 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -230,6 +230,16 @@ class ScalarExprEmitter : public StmtVisitor { case CK_IntegralToFixedPoint: llvm_unreachable("NYI"); + case CK_IntegralCast: { + ScalarConversionOpts Opts; + if (auto *ICE = dyn_cast(CE)) { + if (!ICE->isPartOfExplicitCast()) + Opts = ScalarConversionOpts(CGF.SanOpts); + } + return buildScalarConversion(Visit(E), E->getType(), DestTy, + CE->getExprLoc(), Opts); + } + case CK_IntegralToFloating: llvm_unreachable("NYI"); case CK_FloatingToIntegral: From 28a21f66e6ba541cce1d92aebb4e3d7aa59bfde9 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 18:30:25 -0400 Subject: [PATCH 0488/2301] [CIR] Implement buildScalarCast This simply checks a few conditions and guards against them with unreachables and builds a cir::CastOp of kind Integral for CK_IntegralCasts --- clang/lib/CIR/CIRGenExprScalar.cpp | 52 ++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index e697b38bb82a..e26e316eda01 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -108,6 +108,10 @@ class ScalarExprEmitter : public StmtVisitor { EmitImplicitIntegerSignChangeChecks( SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {} }; + mlir::Value buildScalarCast(mlir::Value Src, QualType SrcType, + QualType DstType, mlir::Type SrcTy, + mlir::Type DstTy, ScalarConversionOpts Opts); + // Emit code for an explicit or implicit cast. Implicit // casts have to handle a more broad range of conversions than explicit // casts, as they handle things like function to ptr-to-function decay @@ -175,6 +179,7 @@ class ScalarExprEmitter : public StmtVisitor { CGF.getLoc(E->getExprLoc()), Ty, mlir::cir::NullAttr::get(Builder.getContext(), Ty)); } + case CK_NullToMemberPointer: llvm_unreachable("NYI"); case CK_ReinterpretMemberPointer: @@ -615,7 +620,6 @@ class ScalarExprEmitter : public StmtVisitor { assert(0 && "not implemented"); // Finally, we have the arithmetic types: real int/float. - assert(0 && "not implemented"); mlir::Value Res = nullptr; mlir::Type ResTy = DstTy; @@ -633,7 +637,8 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } - // TODO: Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); + Res = buildScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); + if (DstTy != ResTy) { llvm_unreachable("NYI"); } @@ -714,3 +719,46 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { return Visit(E->getInit(0)); } +mlir::Value ScalarExprEmitter::buildScalarCast( + mlir::Value Src, QualType SrcType, QualType DstType, mlir::Type SrcTy, + mlir::Type DstTy, ScalarConversionOpts Opts) { + // The Element types determine the type of cast to perform. + mlir::Type SrcElementTy; + mlir::Type DstElementTy; + QualType SrcElementType; + QualType DstElementType; + if (SrcType->isMatrixType() || DstType->isMatrixType()) { + llvm_unreachable("NYI"); + } else { + assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && + "cannot cast between matrix and non-matrix types"); + SrcElementTy = SrcTy; + DstElementTy = DstTy; + SrcElementType = SrcType; + DstElementType = DstType; + } + + if (SrcElementTy.isa()) { + bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType(); + if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) { + llvm_unreachable("NYI"); + } + + if (DstElementTy.isa()) + return Builder.create( + Src.getLoc(), DstTy, mlir::cir::CastKind::integral, Src); + if (InputSigned) + llvm_unreachable("NYI"); + + llvm_unreachable("NYI"); + } + + if (DstElementTy.isa()) { + llvm_unreachable("NYI"); + } + + // if (DstElementTy.getTypeID() < SrcElementTy.getTypeID()) + // llvm_unreachable("NYI"); + + llvm_unreachable("NYI"); +} From f32177665e8a2786a57827217f6fc41b258c50bd Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 18:32:11 -0400 Subject: [PATCH 0489/2301] [CIR] Add buildAddrOfFieldStorage to get the address of a member var When building a ctor memberwise initialization, we need to get the address of the individual members. This function simply constructs the parameters to build a StructElementAddr (name pending) and creates it. --- clang/lib/CIR/CIRGenExpr.cpp | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 68a89ea0d618..f389162a78ef 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -5,6 +5,7 @@ #include "clang/AST/GlobalDecl.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Value.h" @@ -28,6 +29,28 @@ static Address buildPreserveStructAccess(CIRGenFunction &CGF, LValue base, llvm_unreachable("NYI"); } +/// Get the address of a zero-sized field within a record. The resulting address +/// doesn't necessarily have the right type. +static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, + const FieldDecl *field) { + if (field->isZeroSize(CGF.getContext())) + llvm_unreachable("NYI"); + + auto loc = CGF.getLoc(field->getLocation()); + + auto fieldType = CGF.convertType(field->getType()); + auto fieldPtr = + mlir::cir::PointerType::get(CGF.getBuilder().getContext(), fieldType); + auto sea = CGF.getBuilder().create( + loc, fieldPtr, CGF.CXXThisValue->getOperand(0), field->getName()); + + // TODO: We could get the alignment from the CIRGenRecordLayout, but given the + // member name based lookup of the member here we probably shouldn't be. We'll + // have to consider this later. + auto addr = Address(sea->getResult(0), CharUnits::One()); + return addr; +} + LValue CIRGenFunction::buildLValueForField(LValue base, const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); @@ -68,7 +91,7 @@ LValue CIRGenFunction::buildLValueForField(LValue base, } else { if (!IsInPreservedAIRegion && (!getDebugInfo() || !rec->hasAttr())) - llvm_unreachable("NYI"); + addr = buildAddrOfFieldStorage(*this, addr, field); else // Remember the original struct field index addr = buildPreserveStructAccess(*this, base, addr, field); @@ -82,7 +105,9 @@ LValue CIRGenFunction::buildLValueForField(LValue base, // Make sure that the address is pointing to the right type. This is critical // for both unions and structs. A union needs a bitcast, a struct element will // need a bitcast if the CIR type laid out doesn't match the desired type. - llvm_unreachable("NYI"); + // TODO(CIR): CodeGen requires a bitcast here for unions or for structs where + // the LLVM type doesn't match the desired type. No idea when the latter might + // occur, though. if (field->hasAttr()) llvm_unreachable("NYI"); From 8184e35f6ff2ae8129ce7f21e3ff353d0274d2c0 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 May 2022 18:34:47 -0400 Subject: [PATCH 0490/2301] [CIR] Fix String.cpp to test memberwise initializers --- clang/test/CIR/CodeGen/String.cpp | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index 4ff90ea2ecc9..248013df0cd5 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -1,5 +1,4 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -// XFAIL: * class String { char *storage; @@ -7,9 +6,20 @@ class String { long capacity; public: - String() : size{size} {} + String() : size{0} {} }; void test() { String s; } + +// CHECK: func @_ZN6StringC2Ev +// CHECK-NEXT: %0 = cir.alloca !cir.ptr +// CHECK-NEXT: cir.store %arg0, %0 +// CHECK-NEXT: %1 = cir.load %0 +// CHECK-NEXT: %2 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr +// CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %4 = cir.cast(integral, %3 : i32), i64 +// CHECK-NEXT: cir.store %4, %2 : i64, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } From 6dda78b8af9d496a4b7166e915e64e6c4c15c8d6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 5 May 2022 15:32:28 -0700 Subject: [PATCH 0491/2301] [CIR] Add building pieces for CIR based attributes - Cmake and tablegen bits - Base class for CIR attribute - Hook it up with other things in the dialect - Add a dummy/incomplete cst array to make sure all appropriated generated methods can be tested, but no real functionality just yet. --- mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h | 1 - mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td | 9 +++++++++ mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h | 4 ++-- mlir/lib/Dialect/CIR/IR/CMakeLists.txt | 1 + 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h index 4205aa7bb906..9dfa8184f941 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h @@ -26,4 +26,3 @@ #include "mlir/Dialect/CIR/IR/CIROpsAttributes.h.inc" #endif // MLIR_DIALECT_CIR_IR_CIRATTRS_H_ - diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td index cc8e72b50902..85cc10f1862d 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td @@ -37,4 +37,13 @@ def NullAttr : CIR_Attr<"Null", "null", [TypedAttrInterface]> { let assemblyFormat = [{}]; } +def CstArrayAttr : CIR_Attr<"CstArray", "cst_array"> { + let summary = "An Attribute containing a mlir::ArrayAttr"; + let description = [{ + An CIR array attribute is an array of literals of the specified attr types. + }]; + + let assemblyFormat = "`<` `>`"; +} + #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h index 19921c11a927..f2e4f5bdd2da 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h @@ -1,4 +1,4 @@ -//===- CIRTypes.h - MLIR SPIR-V Types -------------------------*- C++ -*-===// +//===- CIRTypes.h - MLIR CIR Types ------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file declares the types in the SPIR-V dialect. +// This file declares the types in the CIR dialect. // //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt index 9b7d5391b9b6..fbfb52f6113d 100644 --- a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt @@ -2,6 +2,7 @@ add_mlir_dialect_library(MLIRCIR CIRAttrs.cpp CIRDialect.cpp CIRTypes.cpp + CIRAttrs.cpp ADDITIONAL_HEADER_DIRS ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/CIR From d9f0d406b07f98abc0d03cc462b8e8f26e0f4e12 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 12 May 2022 15:31:05 -0700 Subject: [PATCH 0492/2301] [CIR] Change the type order on global constants Add one more test while here. --- clang/lib/CIR/CIRGenCstEmitter.h | 10 +- clang/lib/CIR/CIRGenExprCst.cpp | 118 ++++++++++++++++++- clang/test/CIR/CodeGen/globals.cpp | 12 +- clang/test/CIR/IR/global.cir | 6 +- mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td | 25 +++- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 2 +- mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp | 1 - mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 55 +++++---- 8 files changed, 184 insertions(+), 45 deletions(-) diff --git a/clang/lib/CIR/CIRGenCstEmitter.h b/clang/lib/CIR/CIRGenCstEmitter.h index 74eb103c3abc..8878aba2c476 100644 --- a/clang/lib/CIR/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CIRGenCstEmitter.h @@ -86,24 +86,24 @@ class ConstantEmitter { // initializer or to propagate to another context; for example, // side effects, or emitting an initialization that requires a // reference to its current location. - mlir::Attribute emitForMemory(mlir::TypedAttr C, QualType T) { + mlir::Attribute emitForMemory(mlir::Attribute C, QualType T) { return emitForMemory(CGM, C, T); } // static llvm::Constant *emitNullForMemory(CodeGenModule &CGM, QualType T); - static mlir::Attribute emitForMemory(CIRGenModule &CGM, mlir::TypedAttr C, + static mlir::Attribute emitForMemory(CIRGenModule &CGM, mlir::Attribute C, clang::QualType T); // These are private helper routines of the constant emitter that // can't actually be private because things are split out into helper // functions and classes. - mlir::TypedAttr tryEmitPrivateForVarInit(const VarDecl &D); + mlir::Attribute tryEmitPrivateForVarInit(const VarDecl &D); mlir::TypedAttr tryEmitPrivate(const Expr *E, QualType T); mlir::TypedAttr tryEmitPrivateForMemory(const Expr *E, QualType T); - mlir::TypedAttr tryEmitPrivate(const APValue &value, QualType T); - mlir::TypedAttr tryEmitPrivateForMemory(const APValue &value, QualType T); + mlir::Attribute tryEmitPrivate(const APValue &value, QualType T); + mlir::Attribute tryEmitPrivateForMemory(const APValue &value, QualType T); private: void initializeNonAbstract(clang::LangAS destAS) { diff --git a/clang/lib/CIR/CIRGenExprCst.cpp b/clang/lib/CIR/CIRGenExprCst.cpp index 8ff373dab56b..fc03bbe76fcf 100644 --- a/clang/lib/CIR/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CIRGenExprCst.cpp @@ -317,7 +317,7 @@ static QualType getNonMemoryType(CIRGenModule &CGM, QualType type) { return type; } -mlir::TypedAttr ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { +mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { // Make a quick check if variable can be default NULL initialized // and avoid going through rest of code which may do, for c++11, // initialization of memory to all NULLs. @@ -345,7 +345,7 @@ mlir::TypedAttr ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { return {}; } -mlir::TypedAttr ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, +mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, QualType destType) { auto nonMemoryDestType = getNonMemoryType(CGM, destType); auto C = tryEmitPrivate(value, nonMemoryDestType); @@ -361,7 +361,7 @@ mlir::TypedAttr ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, } mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, - mlir::TypedAttr C, + mlir::Attribute C, QualType destType) { // For an _Atomic-qualified constant, we may need to add tail padding. if (auto AT = destType->getAs()) { @@ -369,14 +369,73 @@ mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, } // Zero-extend bool. - if (C.getType().isa()) { + auto typed = C.dyn_cast(); + if (typed && typed.getType().isa()) { assert(0 && "not implemented"); } return C; } -mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const APValue &Value, +static mlir::Attribute +buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, + mlir::Type CommonElementType, unsigned ArrayBound, + SmallVectorImpl &Elements, + mlir::Attribute Filler) { + auto isFillerNullVal = [&](mlir::Attribute f) { + // TODO(cir): introduce a CIR type for null and check for the + // attribute type here. For now assume the filler isn't null. + if (!f) + return true; + return false; + }; + + // Figure out how long the initial prefix of non-zero elements is. + unsigned NonzeroLength = ArrayBound; + if (Elements.size() < NonzeroLength && isFillerNullVal(Filler)) + NonzeroLength = Elements.size(); + if (NonzeroLength == Elements.size()) { + while (NonzeroLength > 0 && isFillerNullVal(Elements[NonzeroLength - 1])) + --NonzeroLength; + } + + if (NonzeroLength == 0) + assert(0 && "NYE"); + + // Add a zeroinitializer array filler if we have lots of trailing zeroes. + unsigned TrailingZeroes = ArrayBound - NonzeroLength; + if (TrailingZeroes >= 8) { + assert(0 && "NYE"); + assert(Elements.size() >= NonzeroLength && + "missing initializer for non-zero element"); + + // TODO(cir): If all the elements had the same type up to the trailing + // zeroes, emit a struct of two arrays (the nonzero data and the + // zeroinitializer). Use DesiredType to get the element type. + } else if (Elements.size() != ArrayBound) { + // Otherwise pad to the right size with the filler if necessary. + assert(0 && "NYE"); + } + + // If all elements have the same type, just emit an array constant. + if (CommonElementType) { + SmallVector Eles; + Eles.reserve(Elements.size()); + for (auto const &Element : Elements) + Eles.push_back(Element); + + return mlir::cir::CstArrayAttr::get( + mlir::cir::ArrayType::get(CGM.getBuilder().getContext(), + CommonElementType, ArrayBound), + mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Eles)); + } + + // We have mixed types. Use a packed struct. + assert(0 && "NYE"); + return {}; +} + +mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, QualType DestType) { switch (Value.getKind()) { case APValue::None: @@ -399,6 +458,54 @@ mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const APValue &Value, return CGM.getBuilder().getFloatAttr(ty, Init); } } + case APValue::Array: { + const ArrayType *ArrayTy = CGM.getASTContext().getAsArrayType(DestType); + unsigned NumElements = Value.getArraySize(); + unsigned NumInitElts = Value.getArrayInitializedElts(); + auto isFillerNullVal = [&](mlir::Attribute f) { + // TODO(cir): introduce a CIR type for null and check for the + // attribute type here. For now assume that if there's a filler, + // it's a null one. + return true; + }; + + // Emit array filler, if there is one. + mlir::Attribute Filler; + if (Value.hasArrayFiller()) { + assert(0 && "NYI"); + } + + // Emit initializer elements. + SmallVector Elts; + if (Filler && isFillerNullVal(Filler)) + Elts.reserve(NumInitElts + 1); + else + Elts.reserve(NumElements); + + mlir::Type CommonElementType; + for (unsigned I = 0; I < NumInitElts; ++I) { + auto C = tryEmitPrivateForMemory(Value.getArrayInitializedElt(I), + ArrayTy->getElementType()); + if (!C) + return nullptr; + + assert(C.isa() && "This should always be a TypedAttr."); + auto CTyped = C.cast(); + + if (I == 0) + CommonElementType = CTyped.getType(); + else if (CTyped.getType() != CommonElementType) + CommonElementType = {}; + auto typedC = llvm::dyn_cast(C); + if (!typedC) + llvm_unreachable("this should always be typed"); + Elts.push_back(typedC); + } + + auto Desired = CGM.getTypes().ConvertType(DestType); + return buildArrayConstant(CGM, Desired, CommonElementType, NumElements, + Elts, Filler); + } case APValue::LValue: case APValue::FixedPoint: case APValue::ComplexInt: @@ -407,7 +514,6 @@ mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const APValue &Value, case APValue::AddrLabelDiff: case APValue::Struct: case APValue::Union: - case APValue::Array: case APValue::MemberPointer: assert(0 && "not implemented"); } diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 14807aa0edda..6569428f9d44 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -7,9 +7,13 @@ const int b = 4; // unless used wont be generated unsigned long int c = 2; float y = 3.4; double w = 4.3; +char x = '3'; +unsigned char rgb[3] = {0, 233, 33}; // CHECK: module { -// CHECK-NEXT: cir.global @a : i32 = 3 -// CHECK-NEXT: cir.global @c : i64 = 2 -// CHECK-NEXT: cir.global @y : f32 = 3.400000e+00 -// CHECK-NEXT: cir.global @w : f64 = 4.300000e+00 \ No newline at end of file +// CHECK-NEXT: cir.global @a = 3 : i32 +// CHECK-NEXT: cir.global @c = 2 : i64 +// CHECK-NEXT: cir.global @y = 3.400000e+00 : f32 +// CHECK-NEXT: cir.global @w = 4.300000e+00 : f64 +// CHECK-NEXT: cir.global @x = 51 : i8 +// CHECK-NEXT: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 5138aa91a4be..e3cd96903835 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -1,10 +1,12 @@ // RUN: cir-tool %s | FileCheck %s module { - cir.global @a : i32 = 3 + cir.global @a = 3 : i32 + cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array func.func @use_global() { cir.return } } -// CHECK: cir.global @a : i32 = 3 +// CHECK: cir.global @a = 3 : i32 +// CHECK: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td index 85cc10f1862d..1630a8db9166 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td @@ -37,13 +37,34 @@ def NullAttr : CIR_Attr<"Null", "null", [TypedAttrInterface]> { let assemblyFormat = [{}]; } -def CstArrayAttr : CIR_Attr<"CstArray", "cst_array"> { +def CstArrayAttr : CIR_Attr<"CstArray", "cst_array", [TypedAttrInterface]> { let summary = "An Attribute containing a mlir::ArrayAttr"; let description = [{ An CIR array attribute is an array of literals of the specified attr types. }]; - let assemblyFormat = "`<` `>`"; + // `$type` is the `self` type of the attribute (i.e. the type of the + // Attribute itself). + // + // `arrayAttr` is the actual attribute array with elements for this constant + // array, there's yet no need to own these elements. + let parameters = (ins AttributeSelfTypeParameter<"">:$type, + "ArrayAttr":$value); + + // Define a custom builder for the type; that removes the need to pass + // in an MLIRContext instance, as it can be infered from the `type`. + let builders = [ + AttrBuilderWithInferredContext<(ins "mlir::cir::ArrayType":$type, + "ArrayAttr":$value), [{ + return $_get(type.getContext(), type, value); + }]> + ]; + + // Generate parser and printer logic, example: + // + // #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> + // + let assemblyFormat = "`<` $value `>`"; } #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 7f17f530fd82..baffe3b71aa7 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -892,7 +892,7 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { let assemblyFormat = [{ ($sym_visibility^)? (`constant` $constant^)? - $sym_name `:` + $sym_name custom($sym_type, $initial_value) attr-dict }]; diff --git a/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp b/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp index be80d42f0a00..70e253c159c6 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp @@ -13,7 +13,6 @@ #include "mlir/Dialect/CIR/IR/CIRAttrs.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/CIR/IR/CIROpsEnums.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 4942356127b0..e869cf9f2222 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -88,6 +88,11 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return success(); } + if (attrType.isa()) { + // CstArrayAttr is already verified to bing with cir.array type. + return success(); + } + assert(attrType.isa() && "What else could we be looking at here?"); return op->emitOpError("cannot have value of type ") << attrType.cast().getType(); @@ -101,16 +106,9 @@ LogicalResult ConstantOp::verify() { } static ParseResult parseConstantValue(OpAsmParser &parser, - mlir::Attribute &valueAttr, - mlir::Type ty = {}) { - if (succeeded(parser.parseOptionalKeyword("nullptr"))) { - valueAttr = UnitAttr::get(parser.getContext()); - return success(); - } - + mlir::Attribute &valueAttr) { NamedAttrList attr; - - if (parser.parseAttribute(valueAttr, ty, "value", attr).failed()) { + if (parser.parseAttribute(valueAttr, "value", attr).failed()) { return parser.emitError(parser.getCurrentLocation(), "expected constant attribute to match type"); } @@ -120,12 +118,8 @@ static ParseResult parseConstantValue(OpAsmParser &parser, // FIXME: create a CIRCstAttr and hide this away for both global // initialization and cir.cst operation. -static void printConstant(OpAsmPrinter &p, Attribute value, - bool omitType = false) { - if (omitType) - p.printAttributeWithoutType(value); - else - p.printAttribute(value); +static void printConstant(OpAsmPrinter &p, Attribute value) { + p.printAttribute(value); } static void printConstantValue(OpAsmPrinter &p, cir::ConstantOp op, @@ -893,10 +887,12 @@ LogicalResult LoopOp::verify() { static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, TypeAttr type, Attribute initAttr) { - p << type; if (!op.isDeclaration()) { - p << " = "; - printConstant(p, initAttr, /*omitType=*/true); + p << "= "; + // This also prints the type... + printConstant(p, initAttr); + } else { + p << type; } } @@ -904,16 +900,27 @@ static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, Attribute &initialValueAttr) { Type type; - if (parser.parseType(type)) - return failure(); - typeAttr = TypeAttr::get(type); - - if (parser.parseOptionalEqual().failed()) + if (parser.parseOptionalEqual().failed()) { + // Absence of equal means a declaration, so we need to parse the type. + // cir.global @a : i32 + if (parser.parseColonType(type)) + return failure(); + typeAttr = TypeAttr::get(type); return success(); + } - if (parseConstantValue(parser, initialValueAttr, type).failed()) + // Parse constant with initializer, examples: + // cir.global @y = 3.400000e+00 : f32 + // cir.global @rgb = #cir.cst_array<[...] : !cir.array> + Attribute attr; + if (parseConstantValue(parser, attr).failed()) return failure(); + assert(attr.isa() && + "Non-typed attrs shouldn't appear here."); + initialValueAttr = attr.cast(); + typeAttr = TypeAttr::get(attr.cast().getType()); + return success(); } From 4ae87b3177b8f7320fa453b93aceab6a6c840c81 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 12 May 2022 16:35:00 -0700 Subject: [PATCH 0493/2301] [CIR] Add verifier for cst array attributes --- clang/test/CIR/IR/invalid.cir | 12 +++++++++ mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td | 3 +++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 27 ++++++++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index a1301521ca96..dd0d0b406211 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -149,3 +149,15 @@ func.func @b0() { } cir.return } + +// ----- + +module { + cir.global @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array // expected-error {{cst array element should match array element type}} +} // expected-error {{expected constant attribute to match type}} + +// ----- + +module { + cir.global @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array // expected-error {{cst array size should match type size}} +} // expected-error {{expected constant attribute to match type}} diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td index 1630a8db9166..4e53d74598b8 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td @@ -65,6 +65,9 @@ def CstArrayAttr : CIR_Attr<"CstArray", "cst_array", [TypedAttrInterface]> { // #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // let assemblyFormat = "`<` $value `>`"; + + // Enable verifier. + let genVerifyDecl = 1; } #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index e869cf9f2222..8a71858aa9ac 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -969,6 +969,33 @@ mlir::OpTrait::impl::verifySameFirstOperandAndResultType(Operation *op) { return success(); } +//===----------------------------------------------------------------------===// +// CIR attributes +//===----------------------------------------------------------------------===// + +LogicalResult mlir::cir::CstArrayAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::Type type, ArrayAttr value) { + // Make sure both number of elements and subelement types match type. + mlir::cir::ArrayType at = type.cast(); + if (at.getSize() != value.size()) + return emitError() << "cst array size should match type size"; + LogicalResult eltTypeCheck = success(); + value.walkImmediateSubElements( + [&](Attribute attr) { + // Once we find a mismatch, stop there. + if (eltTypeCheck.failed()) + return; + auto typedAttr = attr.dyn_cast(); + if (!typedAttr || typedAttr.getType() != at.getEltType()) { + eltTypeCheck = failure(); + emitError() << "cst array element should match array element type"; + } + }, + [&](Type type) {}); + return eltTypeCheck; +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From 32835807165ad2d6a61bc3abe25e3138f28b21f9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 May 2022 00:20:11 -0700 Subject: [PATCH 0494/2301] [CIR][CodeGen] ConstantEmitter: support char array literals Pretty stringish form to be added later. --- clang/lib/CIR/CIRGenCstEmitter.h | 5 ++ clang/lib/CIR/CIRGenExprCst.cpp | 73 +++++++++++++++++++++++------- clang/test/CIR/CodeGen/globals.cpp | 2 + 3 files changed, 63 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CIRGenCstEmitter.h b/clang/lib/CIR/CIRGenCstEmitter.h index 8878aba2c476..6b31f6c7c155 100644 --- a/clang/lib/CIR/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CIRGenCstEmitter.h @@ -105,6 +105,10 @@ class ConstantEmitter { mlir::Attribute tryEmitPrivate(const APValue &value, QualType T); mlir::Attribute tryEmitPrivateForMemory(const APValue &value, QualType T); + mlir::Attribute tryEmitAbstract(const APValue &value, QualType destType); + mlir::Attribute tryEmitAbstractForMemory(const APValue &value, + QualType destType); + private: void initializeNonAbstract(clang::LangAS destAS) { assert(!InitializedNonAbstract); @@ -126,6 +130,7 @@ class ConstantEmitter { Abstract = true; return saved; } + mlir::Attribute validateAndPopAbstract(mlir::Attribute C, AbstractState save); }; } // namespace cir diff --git a/clang/lib/CIR/CIRGenExprCst.cpp b/clang/lib/CIR/CIRGenExprCst.cpp index fc03bbe76fcf..361a12ae263d 100644 --- a/clang/lib/CIR/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CIRGenExprCst.cpp @@ -283,6 +283,18 @@ class ConstExprEmitter // ConstantEmitter //===----------------------------------------------------------------------===// +mlir::Attribute ConstantEmitter::validateAndPopAbstract(mlir::Attribute C, + AbstractState saved) { + Abstract = saved.OldValue; + + assert(saved.OldPlaceholdersSize == PlaceholderAddresses.size() && + "created a placeholder while doing an abstract emission?"); + + // No validation necessary for now. + // No cleanup to do for now. + return C; +} + mlir::Attribute ConstantEmitter::tryEmitForInitializer(const VarDecl &D) { initializeNonAbstract(D.getType().getAddressSpace()); return markIfFailed(tryEmitPrivateForVarInit(D)); @@ -345,6 +357,20 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { return {}; } +mlir::Attribute ConstantEmitter::tryEmitAbstract(const APValue &value, + QualType destType) { + auto state = pushAbstract(); + auto C = tryEmitPrivate(value, destType); + return validateAndPopAbstract(C, state); +} + +mlir::Attribute ConstantEmitter::tryEmitAbstractForMemory(const APValue &value, + QualType destType) { + auto nonMemoryDestType = getNonMemoryType(CGM, destType); + auto C = tryEmitAbstract(value, nonMemoryDestType); + return (C ? emitForMemory(C, destType) : nullptr); +} + mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, QualType destType) { auto nonMemoryDestType = getNonMemoryType(CGM, destType); @@ -381,21 +407,22 @@ static mlir::Attribute buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, mlir::Type CommonElementType, unsigned ArrayBound, SmallVectorImpl &Elements, - mlir::Attribute Filler) { - auto isFillerNullVal = [&](mlir::Attribute f) { - // TODO(cir): introduce a CIR type for null and check for the - // attribute type here. For now assume the filler isn't null. - if (!f) + mlir::TypedAttr Filler) { + auto isNullValue = [&](mlir::Attribute f) { + // TODO(cir): introduce char type in CIR and check for that instead. + auto intVal = f.dyn_cast_or_null(); + assert(intVal && "not implemented"); + if (intVal.getInt() == 0) return true; return false; }; // Figure out how long the initial prefix of non-zero elements is. unsigned NonzeroLength = ArrayBound; - if (Elements.size() < NonzeroLength && isFillerNullVal(Filler)) + if (Elements.size() < NonzeroLength && isNullValue(Filler)) NonzeroLength = Elements.size(); if (NonzeroLength == Elements.size()) { - while (NonzeroLength > 0 && isFillerNullVal(Elements[NonzeroLength - 1])) + while (NonzeroLength > 0 && isNullValue(Elements[NonzeroLength - 1])) --NonzeroLength; } @@ -414,7 +441,9 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, // zeroinitializer). Use DesiredType to get the element type. } else if (Elements.size() != ArrayBound) { // Otherwise pad to the right size with the filler if necessary. - assert(0 && "NYE"); + Elements.resize(ArrayBound, Filler); + if (Filler.getType() != CommonElementType) + CommonElementType = {}; } // If all elements have the same type, just emit an array constant. @@ -462,22 +491,27 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, const ArrayType *ArrayTy = CGM.getASTContext().getAsArrayType(DestType); unsigned NumElements = Value.getArraySize(); unsigned NumInitElts = Value.getArrayInitializedElts(); - auto isFillerNullVal = [&](mlir::Attribute f) { - // TODO(cir): introduce a CIR type for null and check for the - // attribute type here. For now assume that if there's a filler, - // it's a null one. - return true; + auto isNullValue = [&](mlir::Attribute f) { + // TODO(cir): introduce char type in CIR and check for that instead. + auto intVal = f.dyn_cast_or_null(); + assert(intVal && "not implemented"); + if (intVal.getInt() == 0) + return true; + return false; }; // Emit array filler, if there is one. mlir::Attribute Filler; if (Value.hasArrayFiller()) { - assert(0 && "NYI"); + Filler = tryEmitAbstractForMemory(Value.getArrayFiller(), + ArrayTy->getElementType()); + if (!Filler) + return {}; } // Emit initializer elements. SmallVector Elts; - if (Filler && isFillerNullVal(Filler)) + if (Filler && isNullValue(Filler)) Elts.reserve(NumInitElts + 1); else Elts.reserve(NumElements); @@ -487,7 +521,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, auto C = tryEmitPrivateForMemory(Value.getArrayInitializedElt(I), ArrayTy->getElementType()); if (!C) - return nullptr; + return {}; assert(C.isa() && "This should always be a TypedAttr."); auto CTyped = C.cast(); @@ -503,8 +537,13 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, } auto Desired = CGM.getTypes().ConvertType(DestType); + + auto typedFiller = llvm::dyn_cast_or_null(Filler); + if (Filler && !typedFiller) + llvm_unreachable("this should always be typed"); + return buildArrayConstant(CGM, Desired, CommonElementType, NumElements, - Elts, Filler); + Elts, typedFiller); } case APValue::LValue: case APValue::FixedPoint: diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 6569428f9d44..75b3feac74f2 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -9,6 +9,7 @@ float y = 3.4; double w = 4.3; char x = '3'; unsigned char rgb[3] = {0, 233, 33}; +char alpha[4] = "abc"; // CHECK: module { // CHECK-NEXT: cir.global @a = 3 : i32 @@ -17,3 +18,4 @@ unsigned char rgb[3] = {0, 233, 33}; // CHECK-NEXT: cir.global @w = 4.300000e+00 : f64 // CHECK-NEXT: cir.global @x = 51 : i8 // CHECK-NEXT: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array +// CHECK-NEXT: cir.global @alpha = #cir.cst_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8]> : !cir.array From f1275827c9748e52651759ef8a48fd8fbb9d16b1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 May 2022 11:56:44 -0700 Subject: [PATCH 0495/2301] [CIR][CodeGen] Globals: add ConstantLValueEmitter visitor skeleton and some basic logic This is part of emitting globals initializers for constant addresses (e.g. const char * string literals) --- clang/lib/CIR/Address.h | 30 ++++++++++++++++++++++++++++++ clang/lib/CIR/CIRGenExprCst.cpp | 1 + 2 files changed, 31 insertions(+) diff --git a/clang/lib/CIR/Address.h b/clang/lib/CIR/Address.h index 8f371f13f746..815692fc8646 100644 --- a/clang/lib/CIR/Address.h +++ b/clang/lib/CIR/Address.h @@ -71,6 +71,36 @@ class Address { } }; +/// A specialization of Address that requires the address to be an +/// MLIR attribute +class ConstantAddress : public Address { + ConstantAddress(std::nullptr_t) : Address(nullptr) {} + +public: + ConstantAddress(mlir::Value pointer, mlir::Type elementType, + clang::CharUnits alignment) + : Address(pointer, elementType, alignment) {} + + static ConstantAddress invalid() { return ConstantAddress(nullptr); } + + mlir::Value getPointer() const { return Address::getPointer(); } + + ConstantAddress getElementBitCast(mlir::Type ElemTy) const { + assert(0 && "NYI"); + } + + static bool isaImpl(Address addr) { + return addr.getPointer() ? true : false; + // TODO(cir): in LLVM codegen this (and other methods) are implemented via + // llvm::isa, decide on what abstraction to use here. + // return llvm::isa(addr.getPointer()); + } + static ConstantAddress castImpl(Address addr) { + return ConstantAddress(addr.getPointer(), addr.getElementType(), + addr.getAlignment()); + } +}; + } // namespace cir #endif // LLVM_CLANG_LIB_CIR_ADDRESS_H diff --git a/clang/lib/CIR/CIRGenExprCst.cpp b/clang/lib/CIR/CIRGenExprCst.cpp index 361a12ae263d..0d7df1dc924a 100644 --- a/clang/lib/CIR/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CIRGenExprCst.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "Address.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" From 5eaa4621ac7250b738c3b33a7015440ae65bb0c6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 May 2022 14:30:37 -0700 Subject: [PATCH 0496/2301] [CIR] Add cir.get_global operation Out of a symbol reference produces a SSA value representing the address of such global. --- clang/test/CIR/IR/global.cir | 4 +++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 37 ++++++++++++++++++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 22 +++++++++++++ 3 files changed, 63 insertions(+) diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index e3cd96903835..a789a8fffe82 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -4,9 +4,13 @@ module { cir.global @a = 3 : i32 cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array func.func @use_global() { + %0 = cir.get_global @a : cir.ptr cir.return } } // CHECK: cir.global @a = 3 : i32 // CHECK: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array + +// CHECK: func @use_global() +// CHECK-NEXT: %0 = cir.get_global @a : cir.ptr diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index baffe3b71aa7..8ab7a2d16fca 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -219,6 +219,8 @@ def AllocaOp : CIR_Op<"alloca", [ bool isPointerType() { return getAllocaType().isa<::mlir::cir::PointerType>(); } }]; + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. let assemblyFormat = [{ $allocaType `,` `cir.ptr` type($addr) `,` `[` $name `,` $init `]` attr-dict }]; @@ -257,6 +259,8 @@ def LoadOp : CIR_Op<"load", [ [MemRead]>:$addr, UnitAttr:$isDeref); let results = (outs AnyType:$result); + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. let assemblyFormat = [{ (`deref` $isDeref^)? $addr `:` `cir.ptr` type($addr) `,` type($result) attr-dict @@ -288,6 +292,8 @@ def StoreOp : CIR_Op<"store", [ Arg:$addr); + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. let assemblyFormat = "$value `,` $addr attr-dict `:` type($value) `,` `cir.ptr` type($addr)"; } @@ -914,6 +920,37 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// GetGlobalOp +//===----------------------------------------------------------------------===// + +def GetGlobalOp : CIR_Op<"get_global", + [Pure, DeclareOpInterfaceMethods]> { + let summary = "get the memref pointing to a global variable"; + let description = [{ + The `cir.get_global` operation retrieves the address pointing to a + named global variable. If the global variable is marked constant, writing + to the resulting address (such as through a `cir.store` operation) is + undefined. Resulting type must always be a !cir.ptr<...> type. + + Example: + + ```mlir + %x = cir.get_global @foo : !cir.ptr + ``` + }]; + + let arguments = (ins FlatSymbolRefAttr:$name); + let results = (outs Res:$addr); + + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. + let assemblyFormat = "$name `:` `cir.ptr` type($addr) attr-dict"; + + // `GetGlobalOp` is fully verified by its traits. + let hasVerifier = 0; +} + def StructElementAddr : CIR_Op<"struct_element_addr"> { let summary = "get the address of a member of a struct"; let description = [{ diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 8a71858aa9ac..df6eba8e4db0 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -950,6 +950,28 @@ void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, ::mlir::TypeAttr::get(sym_type)); } +//===----------------------------------------------------------------------===// +// GetGlobalOp +//===----------------------------------------------------------------------===// + +LogicalResult +GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + // Verify that the result type underlying pointer type matches the type of the + // referenced cir.global op. + auto global = + symbolTable.lookupNearestSymbolFrom(*this, getNameAttr()); + if (!global) + return emitOpError("'") + << getName() << "' does not reference a valid cir.global"; + + auto resultType = getAddr().getType().dyn_cast(); + if (!resultType || global.getSymType() != resultType.getPointee()) + return emitOpError("result type pointee type '") + << resultType.getPointee() << "' does not match type " + << global.getSymType() << " of the global @" << getName(); + return success(); +} + //===----------------------------------------------------------------------===// // CIR defined traits //===----------------------------------------------------------------------===// From 7bfaa37c7bf2702d7100cce6ff8497aa7f427f93 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 May 2022 15:44:21 -0700 Subject: [PATCH 0497/2301] [CIR] Change CstArrayAttr to accept both ArrayAttr and StringAttr - Enhance verification to cope for the change - Testcases --- clang/test/CIR/CodeGen/globals.cpp | 4 +- clang/test/CIR/IR/global.cir | 6 +- clang/test/CIR/IR/invalid.cir | 10 ++- mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td | 16 ++-- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 87 ++++++++++++++++++-- 5 files changed, 103 insertions(+), 20 deletions(-) diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 75b3feac74f2..c839fc041b13 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -17,5 +17,5 @@ char alpha[4] = "abc"; // CHECK-NEXT: cir.global @y = 3.400000e+00 : f32 // CHECK-NEXT: cir.global @w = 4.300000e+00 : f64 // CHECK-NEXT: cir.global @x = 51 : i8 -// CHECK-NEXT: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array -// CHECK-NEXT: cir.global @alpha = #cir.cst_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8]> : !cir.array +// CHECK-NEXT: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> +// CHECK-NEXT: cir.global @alpha = #cir.cst_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8] : !cir.array> diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index a789a8fffe82..499b01ea6c56 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -2,7 +2,8 @@ module { cir.global @a = 3 : i32 - cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array + cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> + cir.global @b = #cir.cst_array<"example\00" : !cir.array> func.func @use_global() { %0 = cir.get_global @a : cir.ptr cir.return @@ -10,7 +11,8 @@ module { } // CHECK: cir.global @a = 3 : i32 -// CHECK: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array +// CHECK: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> +// CHECK: cir.global @b = #cir.cst_array<"example\00" : !cir.array> // CHECK: func @use_global() // CHECK-NEXT: %0 = cir.get_global @a : cir.ptr diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index dd0d0b406211..4e857ffa4945 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -153,11 +153,17 @@ func.func @b0() { // ----- module { - cir.global @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array // expected-error {{cst array element should match array element type}} + cir.global @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array element should match array element type}} } // expected-error {{expected constant attribute to match type}} // ----- module { - cir.global @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array // expected-error {{cst array size should match type size}} + cir.global @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array size should match type size}} +} // expected-error {{expected constant attribute to match type}} + +// ----- + +module { + cir.global @b = #cir.cst_array<"example\00" : !cir.array> // expected-error {{constant array element for string literals expects i8 array element type}} } // expected-error {{expected constant attribute to match type}} diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td index 4e53d74598b8..fc310567af83 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td @@ -38,7 +38,7 @@ def NullAttr : CIR_Attr<"Null", "null", [TypedAttrInterface]> { } def CstArrayAttr : CIR_Attr<"CstArray", "cst_array", [TypedAttrInterface]> { - let summary = "An Attribute containing a mlir::ArrayAttr"; + let summary = "A constant array from ArrayAttr or StringRefAttr"; let description = [{ An CIR array attribute is an array of literals of the specified attr types. }]; @@ -48,23 +48,23 @@ def CstArrayAttr : CIR_Attr<"CstArray", "cst_array", [TypedAttrInterface]> { // // `arrayAttr` is the actual attribute array with elements for this constant // array, there's yet no need to own these elements. + // + // TODO: create a trait for ArrayAttrOrStringAttr value instead of relying + // on verifier. let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "ArrayAttr":$value); + "Attribute":$value); // Define a custom builder for the type; that removes the need to pass // in an MLIRContext instance, as it can be infered from the `type`. let builders = [ AttrBuilderWithInferredContext<(ins "mlir::cir::ArrayType":$type, - "ArrayAttr":$value), [{ + "Attribute":$value), [{ return $_get(type.getContext(), type, value); }]> ]; - // Generate parser and printer logic, example: - // - // #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> - // - let assemblyFormat = "`<` $value `>`"; + // Printing and parsing available in CIRDialect.cpp + let hasCustomAssemblyFormat = 1; // Enable verifier. let genVerifyDecl = 1; diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index df6eba8e4db0..51c3d900f545 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -17,6 +17,7 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/DialectImplementation.h" #include "mlir/IR/OpDefinition.h" #include "mlir/IR/OpImplementation.h" #include "mlir/IR/TypeUtilities.h" @@ -997,13 +998,31 @@ mlir::OpTrait::impl::verifySameFirstOperandAndResultType(Operation *op) { LogicalResult mlir::cir::CstArrayAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ::mlir::Type type, ArrayAttr value) { - // Make sure both number of elements and subelement types match type. + ::mlir::Type type, Attribute attr) { + mlir::cir::ArrayType at = type.cast(); - if (at.getSize() != value.size()) - return emitError() << "cst array size should match type size"; + if (!(attr.isa() || attr.isa())) + return emitError() << "constant array expects ArrayAttr or StringAttr"; + + if (auto strAttr = attr.dyn_cast()) { + auto intTy = at.getEltType().dyn_cast(); + // TODO: add CIR type for char. + if (!intTy || intTy.getWidth() != 8) { + emitError() << "constant array element for string literals expects i8 " + "array element type"; + return failure(); + } + return success(); + } + + assert(attr.isa()); + auto arrayAttr = attr.cast(); + + // Make sure both number of elements and subelement types match type. + if (at.getSize() != arrayAttr.size()) + return emitError() << "constant array size should match type size"; LogicalResult eltTypeCheck = success(); - value.walkImmediateSubElements( + arrayAttr.walkImmediateSubElements( [&](Attribute attr) { // Once we find a mismatch, stop there. if (eltTypeCheck.failed()) @@ -1011,13 +1030,69 @@ LogicalResult mlir::cir::CstArrayAttr::verify( auto typedAttr = attr.dyn_cast(); if (!typedAttr || typedAttr.getType() != at.getEltType()) { eltTypeCheck = failure(); - emitError() << "cst array element should match array element type"; + emitError() + << "constant array element should match array element type"; } }, [&](Type type) {}); return eltTypeCheck; } +::mlir::Attribute CstArrayAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { + ::mlir::FailureOr<::mlir::Type> resultTy; + ::mlir::FailureOr resultVal; + ::llvm::SMLoc loc = parser.getCurrentLocation(); + (void)loc; + // Parse literal '<' + if (parser.parseLess()) + return {}; + + // Parse variable 'value' + resultVal = ::mlir::FieldParser::parse(parser); + if (failed(resultVal)) { + parser.emitError(parser.getCurrentLocation(), + "failed to parse CstArrayAttr parameter 'value' which is " + "to be a `Attribute`"); + return {}; + } + + // ArrayAttrs have per-element type, not the type of the array... + if (resultVal->isa()) { + // Parse literal ':' + if (parser.parseColon()) + return {}; + + // Parse variable 'type' + resultTy = ::mlir::FieldParser<::mlir::Type>::parse(parser); + if (failed(resultTy)) { + parser.emitError(parser.getCurrentLocation(), + "failed to parse CstArrayAttr parameter 'type' which is " + "to be a `::mlir::Type`"); + return {}; + } + } else { + resultTy = resultVal->cast().getType(); + } + + // Parse literal '>' + if (parser.parseGreater()) + return {}; + return parser.getChecked( + loc, parser.getContext(), resultTy.value(), resultVal.value()); +} + +void CstArrayAttr::print(::mlir::AsmPrinter &printer) const { + printer << "<"; + printer.printStrippedAttrOrType(getValue()); + if (getValue().isa()) { + printer << ' ' << ":"; + printer << ' '; + printer.printStrippedAttrOrType(getType()); + } + printer << ">"; +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From d3804e77ed264a9e61b5de59cb084d0dd0ec059d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 May 2022 14:42:09 -0700 Subject: [PATCH 0498/2301] [CIR][CodeGen] Globals: more work towards constant lvalue emission for string literals - Move ConstantLValueEmitter around, and start using it for StringLiteral codegen. - Add getConstantArrayFromStringLiteral to emit our cir::CstArrayAttr. - Add incomplete getAddrOfConstantStringFromLiteral, which uses the method above. - Add ConstantStringMap to unique global string data, but don't use it yet. - No testcase added yet since it currently asserts. --- clang/lib/CIR/CIRGenExprCst.cpp | 206 +++++++++++++++++++++ clang/lib/CIR/CIRGenModule.cpp | 54 ++++++ clang/lib/CIR/CIRGenModule.h | 9 + mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 1 + mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 14 +- 5 files changed, 277 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprCst.cpp b/clang/lib/CIR/CIRGenExprCst.cpp index 0d7df1dc924a..36388cf0688d 100644 --- a/clang/lib/CIR/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CIRGenExprCst.cpp @@ -280,6 +280,211 @@ class ConstExprEmitter } // end anonymous namespace. +//===----------------------------------------------------------------------===// +// ConstantLValueEmitter +//===----------------------------------------------------------------------===// + +namespace { +/// A struct which can be used to peephole certain kinds of finalization +/// that normally happen during l-value emission. +struct ConstantLValue { + mlir::Value Value; + bool HasOffsetApplied; + + /*implicit*/ ConstantLValue(mlir::Value value, bool hasOffsetApplied = false) + : Value(value), HasOffsetApplied(hasOffsetApplied) {} + + /*implicit*/ ConstantLValue(ConstantAddress address) + : ConstantLValue(address.getPointer()) {} + + ConstantLValue(std::nullptr_t) : ConstantLValue({}, false) {} +}; + +/// A helper class for emitting constant l-values. +class ConstantLValueEmitter + : public ConstStmtVisitor { + CIRGenModule &CGM; + ConstantEmitter &Emitter; + const APValue &Value; + QualType DestType; + + // Befriend StmtVisitorBase so that we don't have to expose Visit*. + friend StmtVisitorBase; + +public: + ConstantLValueEmitter(ConstantEmitter &emitter, const APValue &value, + QualType destType) + : CGM(emitter.CGM), Emitter(emitter), Value(value), DestType(destType) {} + + mlir::Attribute tryEmit(); + +private: + mlir::Attribute tryEmitAbsolute(mlir::Type destTy); + ConstantLValue tryEmitBase(const APValue::LValueBase &base); + + ConstantLValue VisitStmt(const Stmt *S) { return nullptr; } + ConstantLValue VisitConstantExpr(const ConstantExpr *E); + ConstantLValue VisitCompoundLiteralExpr(const CompoundLiteralExpr *E); + ConstantLValue VisitStringLiteral(const StringLiteral *E); + ConstantLValue VisitObjCBoxedExpr(const ObjCBoxedExpr *E); + ConstantLValue VisitObjCEncodeExpr(const ObjCEncodeExpr *E); + ConstantLValue VisitObjCStringLiteral(const ObjCStringLiteral *E); + ConstantLValue VisitPredefinedExpr(const PredefinedExpr *E); + ConstantLValue VisitAddrLabelExpr(const AddrLabelExpr *E); + ConstantLValue VisitCallExpr(const CallExpr *E); + ConstantLValue VisitBlockExpr(const BlockExpr *E); + ConstantLValue VisitCXXTypeidExpr(const CXXTypeidExpr *E); + ConstantLValue + VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); + + bool hasNonZeroOffset() const { return !Value.getLValueOffset().isZero(); } + + /// Return the value offset. + mlir::Attribute getOffset() { assert(0 && "NYI"); } + + /// Apply the value offset to the given constant. + mlir::Attribute applyOffset(mlir::Attribute C) { + if (!hasNonZeroOffset()) + return C; + assert(0 && "NYI"); + } +}; + +} // namespace + +mlir::Attribute ConstantLValueEmitter::tryEmit() { + const APValue::LValueBase &base = Value.getLValueBase(); + + // The destination type should be a pointer or reference + // type, but it might also be a cast thereof. + // + // FIXME: the chain of casts required should be reflected in the APValue. + // We need this in order to correctly handle things like a ptrtoint of a + // non-zero null pointer and addrspace casts that aren't trivially + // represented in LLVM IR. + auto destTy = CGM.getTypes().convertTypeForMem(DestType); + assert(destTy.isa()); + + // If there's no base at all, this is a null or absolute pointer, + // possibly cast back to an integer type. + if (!base) { + assert(0 && "NYI"); + } + + // Otherwise, try to emit the base. + ConstantLValue result = tryEmitBase(base); + + // If that failed, we're done. + auto value = result.Value; + if (!value) + return {}; + + // Apply the offset if necessary and not already done. + if (!result.HasOffsetApplied) { + // TODO(cir): use ptr_stride, or something... + // value = applyOffset(value); + } + + // Convert to the appropriate type; this could be an lvalue for + // an integer. FIXME: performAddrSpaceCast + if (destTy.isa()) + assert(0 && + "NYI"); // return llvm::ConstantExpr::getPointerCast(value, destTy); + + assert(0 && "NYI"); +} + +/// Try to emit an absolute l-value, such as a null pointer or an integer +/// bitcast to pointer type. +mlir::Attribute ConstantLValueEmitter::tryEmitAbsolute(mlir::Type destTy) { + assert(0 && "NYI"); + return {}; +} + +ConstantLValue +ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) { + // Handle values. + if (const ValueDecl *D = base.dyn_cast()) { + assert(0 && "NYI"); + } + + // Handle typeid(T). + if (TypeInfoLValue TI = base.dyn_cast()) { + assert(0 && "NYI"); + } + + // Otherwise, it must be an expression. + return Visit(base.get()); +} + +ConstantLValue ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) { + assert(0 && "NYI"); + return Visit(E->getSubExpr()); +} + +ConstantLValue +ConstantLValueEmitter::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue +ConstantLValueEmitter::VisitStringLiteral(const StringLiteral *E) { + return CGM.getAddrOfConstantStringFromLiteral(E); +} + +ConstantLValue +ConstantLValueEmitter::VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue +ConstantLValueEmitter::VisitObjCStringLiteral(const ObjCStringLiteral *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue +ConstantLValueEmitter::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue +ConstantLValueEmitter::VisitPredefinedExpr(const PredefinedExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue +ConstantLValueEmitter::VisitAddrLabelExpr(const AddrLabelExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue ConstantLValueEmitter::VisitCallExpr(const CallExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue ConstantLValueEmitter::VisitBlockExpr(const BlockExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue +ConstantLValueEmitter::VisitCXXTypeidExpr(const CXXTypeidExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + +ConstantLValue ConstantLValueEmitter::VisitMaterializeTemporaryExpr( + const MaterializeTemporaryExpr *E) { + assert(0 && "NYI"); + return nullptr; +} + //===----------------------------------------------------------------------===// // ConstantEmitter //===----------------------------------------------------------------------===// @@ -547,6 +752,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, Elts, typedFiller); } case APValue::LValue: + return ConstantLValueEmitter(*this, Value, DestType).tryEmit(); case APValue::FixedPoint: case APValue::ComplexInt: case APValue::ComplexFloat: diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 6dc592fda276..9f44ae08a366 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -812,6 +812,60 @@ void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { llvm_unreachable("Invalid argument to buildGlobalDefinition()"); } +mlir::Attribute +CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { + assert(!E->getType()->isPointerType() && "Strings are always arrays"); + + // Don't emit it as the address of the string, emit the string data itself + // as an inline array. + if (E->getCharByteWidth() == 1) { + SmallString<64> Str(E->getString()); + + // Resize the string to the right size, which is indicated by its type. + const ConstantArrayType *CAT = astCtx.getAsConstantArrayType(E->getType()); + auto finalSize = CAT->getSize().getZExtValue(); + Str.resize(finalSize); + + auto eltTy = getTypes().ConvertType(CAT->getElementType()); + auto cstArray = mlir::cir::CstArrayAttr::get( + mlir::cir::ArrayType::get(builder.getContext(), eltTy, finalSize), + mlir::StringAttr::get(builder.getContext(), Str)); + cstArray.dump(); + return cstArray; + } + + assert(0 && "not implemented"); + return {}; +} + +/// Return a pointer to a constant array for the given string literal. +ConstantAddress +CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, + StringRef Name) { + mlir::Attribute C = getConstantArrayFromStringLiteral(S); + mlir::cir::GlobalOp Entry; + if (!getLangOpts().WritableStrings) { + if (ConstantStringMap.count(C)) + assert(0 && "not implemented"); + } + + SmallString<256> MangledNameBuffer; + StringRef GlobalVariableName; + + // Mangle the string literal if that's how the ABI merges duplicate strings. + // Don't do it if they are writable, since we don't want writes in one TU to + // affect strings in another. + if (getCXXABI().getMangleContext().shouldMangleStringLiteral(S) && + !getLangOpts().WritableStrings) { + assert(0 && "not implemented"); + } else { + GlobalVariableName = Name; + } + + assert(0 && "not implemented"); + return ConstantAddress::invalid(); +} + // Emit code for a single top level declaration. void CIRGenModule::buildTopLevelDecl(Decl *decl) { // Ignore dependent declarations diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 9b900467030c..887a0f48fb75 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -173,6 +173,15 @@ class CIRGenModule { getAddrOfGlobalVar(const VarDecl *D, std::optional Ty, ForDefinition_t IsForDefinition = NotForDefinition); + llvm::DenseMap ConstantStringMap; + + /// Return a constant array for the given string. + mlir::Attribute getConstantArrayFromStringLiteral(const StringLiteral *E); + + /// Return a pointer to a constant array for the given string literal. + ConstantAddress getAddrOfConstantStringFromLiteral(const StringLiteral *S, + StringRef Name = ".str"); + // TODO: this obviously overlaps with const TargetCIRGenInfo &getTargetCIRGenInfo(); diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 8ab7a2d16fca..f6c645246937 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -891,6 +891,7 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { let arguments = (ins SymbolNameAttr:$sym_name, OptionalAttr:$sym_visibility, TypeAttr:$sym_type, + // Note this can also be a FlatSymbolRefAttr OptionalAttr:$initial_value, UnitAttr:$constant, OptionalAttr:$alignment); diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 51c3d900f545..79b3077e9e41 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -913,14 +913,14 @@ parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, // Parse constant with initializer, examples: // cir.global @y = 3.400000e+00 : f32 // cir.global @rgb = #cir.cst_array<[...] : !cir.array> - Attribute attr; - if (parseConstantValue(parser, attr).failed()) + if (parseConstantValue(parser, initialValueAttr).failed()) return failure(); - assert(attr.isa() && + assert(initialValueAttr.isa() && "Non-typed attrs shouldn't appear here."); - initialValueAttr = attr.cast(); - typeAttr = TypeAttr::get(attr.cast().getType()); + auto typedAttr = initialValueAttr.cast(); + + typeAttr = TypeAttr::get(typedAttr.getType()); return success(); } @@ -1078,8 +1078,8 @@ ::mlir::Attribute CstArrayAttr::parse(::mlir::AsmParser &parser, // Parse literal '>' if (parser.parseGreater()) return {}; - return parser.getChecked( - loc, parser.getContext(), resultTy.value(), resultVal.value()); + return parser.getChecked(loc, parser.getContext(), + resultTy.value(), resultVal.value()); } void CstArrayAttr::print(::mlir::AsmPrinter &printer) const { From 203bb40744fc3a12e31b81939684e9ecda86b7d7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 16 May 2022 15:37:19 -0700 Subject: [PATCH 0499/2301] [CIR] Globals: Allow building constant globals and fix type printing for uninitialized --- clang/lib/CIR/CIRGenModule.cpp | 3 ++- clang/test/CIR/IR/global.cir | 2 ++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 3 ++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 6 ++++-- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 9f44ae08a366..1faffa47add1 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -441,7 +441,8 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // mlir::SymbolTable::Visibility::Public is the default, no need to explicitly // mark it as such. - auto GV = builder.create(loc, MangledName, Ty); + auto GV = builder.create(loc, MangledName, Ty, + /*isConstant=*/false); theModule.push_back(GV); // If we already created a global with the same mangled name (but different diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 499b01ea6c56..e5368885bf7c 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -4,6 +4,7 @@ module { cir.global @a = 3 : i32 cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> cir.global @b = #cir.cst_array<"example\00" : !cir.array> + cir.global "private" constant @".str" : !cir.array {alignment = 1 : i64} func.func @use_global() { %0 = cir.get_global @a : cir.ptr cir.return @@ -13,6 +14,7 @@ module { // CHECK: cir.global @a = 3 : i32 // CHECK: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // CHECK: cir.global @b = #cir.cst_array<"example\00" : !cir.array> +// CHECK: cir.global "private" constant @".str" : !cir.array {alignment = 1 : i64} // CHECK: func @use_global() // CHECK-NEXT: %0 = cir.get_global @a : cir.ptr diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index f6c645246937..a99232109a62 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -914,7 +914,8 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { let builders = [ OpBuilder<(ins "StringRef":$sym_name, - "Type":$sym_type + "Type":$sym_type, + CArg<"bool", "false">:$isConstant )> ]; diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 79b3077e9e41..fb2ca983a0cf 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -893,7 +893,7 @@ static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, // This also prints the type... printConstant(p, initAttr); } else { - p << type; + p << ": " << type; } } @@ -944,11 +944,13 @@ LogicalResult GlobalOp::verify() { } void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, - StringRef sym_name, Type sym_type) { + StringRef sym_name, Type sym_type, bool isConstant) { odsState.addAttribute(getSymNameAttrName(odsState.name), odsBuilder.getStringAttr(sym_name)); odsState.addAttribute(getSymTypeAttrName(odsState.name), ::mlir::TypeAttr::get(sym_type)); + if (isConstant) + odsState.addAttribute("constant", odsBuilder.getUnitAttr()); } //===----------------------------------------------------------------------===// From c72b895e2d921f44d90356924f82ef80866f6049 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 16 May 2022 15:38:28 -0700 Subject: [PATCH 0500/2301] [CIR][CodeGen] Add a helper for getting alignment in CIR terms --- clang/lib/CIR/CIRGenExpr.cpp | 5 +---- clang/lib/CIR/CIRGenModule.cpp | 6 ++++++ clang/lib/CIR/CIRGenModule.h | 5 +++-- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index f389162a78ef..241b00fd17b7 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -841,10 +841,7 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, auto localVarTy = getCIRType(ty); auto localVarPtrTy = mlir::cir::PointerType::get(builder.getContext(), localVarTy); - - auto alignIntAttr = - mlir::IntegerAttr::get(mlir::IntegerType::get(builder.getContext(), 64), - alignment.getQuantity()); + auto alignIntAttr = CGM.getAlignment(alignment); mlir::Value addr; { diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 1faffa47add1..174b2753fc0a 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1324,6 +1324,12 @@ void CIRGenModule::buildDeferred() { } } +mlir::IntegerAttr CIRGenModule::getAlignment(CharUnits &alignment) { + return mlir::IntegerAttr::get( + mlir::IntegerType::get(builder.getContext(), 64), + alignment.getQuantity()); +} + // TODO: this is gross, make a map mlir::Operation *CIRGenModule::GetGlobalValue(StringRef Name) { for (auto const &op : diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 887a0f48fb75..5aa4eeb14873 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -187,11 +187,12 @@ class CIRGenModule { /// Helpers to convert Clang's SourceLocation to a MLIR Location. mlir::Location getLoc(clang::SourceLocation SLoc); - mlir::Location getLoc(clang::SourceRange SLoc); - mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); + /// Helper to convert Clang's alignment to CIR alignment + mlir::IntegerAttr getAlignment(CharUnits &alignment); + /// Determine whether an object of this type can be emitted /// as a constant. /// From 02c5580be895c855a235cef06427868fdce58c93 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 16 May 2022 15:40:07 -0700 Subject: [PATCH 0501/2301] [CIR][NFC] Populate UnimplementedFeatureGuarding.h with unsupported global bits --- clang/lib/CIR/CIRGenModule.h | 1 + clang/lib/CIR/UnimplementedFeatureGuarding.h | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 5aa4eeb14873..fbdc3ac73043 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -15,6 +15,7 @@ #include "CIRGenTypes.h" #include "CIRGenValue.h" +#include "UnimplementedFeatureGuarding.h" #include "clang/AST/ASTContext.h" #include "clang/AST/StmtVisitor.h" diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index 19c73fffe5b1..0efb109b9146 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -25,6 +25,15 @@ struct UnimplementedFeature { // This is for whether or not we've implemented a cir::VectorType // corresponding to `llvm::VectorType` static bool cirVectorType() { return false; } + + // CIR still unware of address space + static bool addressSpace() { return false; } + + // Unhandled global information. + static bool unnamedAddr() { return false; } + static bool isWeakForLinker() { return false; } + static bool setDSOLocal() { return false; } + static bool threadLocal() { return false; } }; } // namespace cir From 2baa931b4204a645f60b618acb22c6341df9add1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 16 May 2022 16:38:01 -0700 Subject: [PATCH 0502/2301] [CIR] Fix somple constant parsing issues and add tests --- clang/test/CIR/IR/global.cir | 4 ++++ clang/test/CIR/IR/invalid.cir | 6 ++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 21 +++++++++++++++------ 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index e5368885bf7c..5a0f6cfaf5ed 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -5,6 +5,8 @@ module { cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> cir.global @b = #cir.cst_array<"example\00" : !cir.array> cir.global "private" constant @".str" : !cir.array {alignment = 1 : i64} + cir.global "private" @c : i32 + cir.global "private" constant @".str2" = #cir.cst_array<"example\00" : !cir.array> {alignment = 1 : i64} func.func @use_global() { %0 = cir.get_global @a : cir.ptr cir.return @@ -15,6 +17,8 @@ module { // CHECK: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // CHECK: cir.global @b = #cir.cst_array<"example\00" : !cir.array> // CHECK: cir.global "private" constant @".str" : !cir.array {alignment = 1 : i64} +// CHECK: cir.global "private" @c : i32 +// CHECK: cir.global "private" constant @".str2" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: func @use_global() // CHECK-NEXT: %0 = cir.get_global @a : cir.ptr diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 4e857ffa4945..0993812981ea 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -167,3 +167,9 @@ module { module { cir.global @b = #cir.cst_array<"example\00" : !cir.array> // expected-error {{constant array element for string literals expects i8 array element type}} } // expected-error {{expected constant attribute to match type}} + +// ----- + +module { + cir.global "private" constant @".str2" = #cir.cst_array<"example\00"> {alignment = 1 : i64} // expected-error {{expected type declaration for string literal}} +} // expected-error@-1 {{expected constant attribute to match type}} diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index fb2ca983a0cf..2cab485482a1 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -912,7 +912,7 @@ parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, // Parse constant with initializer, examples: // cir.global @y = 3.400000e+00 : f32 - // cir.global @rgb = #cir.cst_array<[...] : !cir.array> + // cir.global @rgb = #cir.cst_array<[...] : !cir.array> if (parseConstantValue(parser, initialValueAttr).failed()) return failure(); @@ -1002,12 +1002,13 @@ LogicalResult mlir::cir::CstArrayAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::Type type, Attribute attr) { - mlir::cir::ArrayType at = type.cast(); if (!(attr.isa() || attr.isa())) return emitError() << "constant array expects ArrayAttr or StringAttr"; if (auto strAttr = attr.dyn_cast()) { + mlir::cir::ArrayType at = type.cast(); auto intTy = at.getEltType().dyn_cast(); + // TODO: add CIR type for char. if (!intTy || intTy.getWidth() != 8) { emitError() << "constant array element for string literals expects i8 " @@ -1019,6 +1020,7 @@ LogicalResult mlir::cir::CstArrayAttr::verify( assert(attr.isa()); auto arrayAttr = attr.cast(); + auto at = type.cast(); // Make sure both number of elements and subelement types match type. if (at.getSize() != arrayAttr.size()) @@ -1059,8 +1061,8 @@ ::mlir::Attribute CstArrayAttr::parse(::mlir::AsmParser &parser, return {}; } - // ArrayAttrs have per-element type, not the type of the array... - if (resultVal->isa()) { + // ArrayAttrrs have per-element type, not the type of the array... + if (resultVal->dyn_cast()) { // Parse literal ':' if (parser.parseColon()) return {}; @@ -1074,7 +1076,14 @@ ::mlir::Attribute CstArrayAttr::parse(::mlir::AsmParser &parser, return {}; } } else { - resultTy = resultVal->cast().getType(); + assert(resultVal->isa() && "IDK"); + auto ta = resultVal->cast(); + resultTy = ta.getType(); + if (resultTy->isa()) { + parser.emitError(parser.getCurrentLocation(), + "expected type declaration for string literal"); + return {}; + } } // Parse literal '>' @@ -1087,7 +1096,7 @@ ::mlir::Attribute CstArrayAttr::parse(::mlir::AsmParser &parser, void CstArrayAttr::print(::mlir::AsmPrinter &printer) const { printer << "<"; printer.printStrippedAttrOrType(getValue()); - if (getValue().isa()) { + if (getValue().isa()) { printer << ' ' << ":"; printer << ' '; printer.printStrippedAttrOrType(getType()); From 296bee68c4d4c8b74d3f1d889d7a3c29ac288ea1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 17 May 2022 11:50:20 -0700 Subject: [PATCH 0503/2301] [CIR] Support using SymbolRefAttr on cir::GlobalOp Useful for supporting initializers that are themselves constant globals --- clang/test/CIR/IR/global.cir | 2 ++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 30 ++++++++++++++++++++------ 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 5a0f6cfaf5ed..4d36fdb96d82 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -7,6 +7,7 @@ module { cir.global "private" constant @".str" : !cir.array {alignment = 1 : i64} cir.global "private" @c : i32 cir.global "private" constant @".str2" = #cir.cst_array<"example\00" : !cir.array> {alignment = 1 : i64} + cir.global @s = @".str2": !cir.ptr func.func @use_global() { %0 = cir.get_global @a : cir.ptr cir.return @@ -19,6 +20,7 @@ module { // CHECK: cir.global "private" constant @".str" : !cir.array {alignment = 1 : i64} // CHECK: cir.global "private" @c : i32 // CHECK: cir.global "private" constant @".str2" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global @s = @".str2": !cir.ptr // CHECK: func @use_global() // CHECK-NEXT: %0 = cir.get_global @a : cir.ptr diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 2cab485482a1..389e21d4063a 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -94,6 +94,12 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return success(); } + if (attrType.isa()) { + if (opType.isa<::mlir::cir::PointerType>()) + return success(); + return op->emitOpError("symbolref expects pointer type"); + } + assert(attrType.isa() && "What else could we be looking at here?"); return op->emitOpError("cannot have value of type ") << attrType.cast().getType(); @@ -888,22 +894,25 @@ LogicalResult LoopOp::verify() { static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, TypeAttr type, Attribute initAttr) { + auto printType = [&]() { p << ": " << type; }; if (!op.isDeclaration()) { p << "= "; // This also prints the type... printConstant(p, initAttr); + if (initAttr.isa()) + printType(); } else { - p << ": " << type; + printType(); } } static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, Attribute &initialValueAttr) { - Type type; if (parser.parseOptionalEqual().failed()) { // Absence of equal means a declaration, so we need to parse the type. // cir.global @a : i32 + Type type; if (parser.parseColonType(type)) return failure(); typeAttr = TypeAttr::get(type); @@ -916,12 +925,19 @@ parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, if (parseConstantValue(parser, initialValueAttr).failed()) return failure(); - assert(initialValueAttr.isa() && - "Non-typed attrs shouldn't appear here."); - auto typedAttr = initialValueAttr.cast(); - - typeAttr = TypeAttr::get(typedAttr.getType()); + mlir::Type opTy; + if (auto sra = initialValueAttr.dyn_cast()) { + if (parser.parseColonType(opTy)) + return failure(); + } else { + // Handle StringAttrs + assert(initialValueAttr.isa() && + "Non-typed attrs shouldn't appear here."); + auto typedAttr = initialValueAttr.cast(); + opTy = typedAttr.getType(); + } + typeAttr = TypeAttr::get(opTy); return success(); } From c616c317b580ead085c716bfc32ace85bb5fe1f5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 17 May 2022 11:55:40 -0700 Subject: [PATCH 0504/2301] [CIR][CodeGen] Globals: const char *s = "example" now works --- clang/lib/CIR/Address.h | 30 ----- clang/lib/CIR/CIRGenExprCst.cpp | 22 ++-- clang/lib/CIR/CIRGenModule.cpp | 112 +++++++++++++++++-- clang/lib/CIR/CIRGenModule.h | 15 ++- clang/lib/CIR/UnimplementedFeatureGuarding.h | 3 + clang/test/CIR/CodeGen/globals.cpp | 5 + clang/test/CIR/IR/global.cir | 2 +- 7 files changed, 136 insertions(+), 53 deletions(-) diff --git a/clang/lib/CIR/Address.h b/clang/lib/CIR/Address.h index 815692fc8646..8f371f13f746 100644 --- a/clang/lib/CIR/Address.h +++ b/clang/lib/CIR/Address.h @@ -71,36 +71,6 @@ class Address { } }; -/// A specialization of Address that requires the address to be an -/// MLIR attribute -class ConstantAddress : public Address { - ConstantAddress(std::nullptr_t) : Address(nullptr) {} - -public: - ConstantAddress(mlir::Value pointer, mlir::Type elementType, - clang::CharUnits alignment) - : Address(pointer, elementType, alignment) {} - - static ConstantAddress invalid() { return ConstantAddress(nullptr); } - - mlir::Value getPointer() const { return Address::getPointer(); } - - ConstantAddress getElementBitCast(mlir::Type ElemTy) const { - assert(0 && "NYI"); - } - - static bool isaImpl(Address addr) { - return addr.getPointer() ? true : false; - // TODO(cir): in LLVM codegen this (and other methods) are implemented via - // llvm::isa, decide on what abstraction to use here. - // return llvm::isa(addr.getPointer()); - } - static ConstantAddress castImpl(Address addr) { - return ConstantAddress(addr.getPointer(), addr.getElementType(), - addr.getAlignment()); - } -}; - } // namespace cir #endif // LLVM_CLANG_LIB_CIR_ADDRESS_H diff --git a/clang/lib/CIR/CIRGenExprCst.cpp b/clang/lib/CIR/CIRGenExprCst.cpp index 36388cf0688d..fbfbc0731416 100644 --- a/clang/lib/CIR/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CIRGenExprCst.cpp @@ -288,14 +288,14 @@ namespace { /// A struct which can be used to peephole certain kinds of finalization /// that normally happen during l-value emission. struct ConstantLValue { - mlir::Value Value; + using SymbolTy = mlir::SymbolRefAttr; + llvm::PointerUnion Value; bool HasOffsetApplied; /*implicit*/ ConstantLValue(mlir::Value value, bool hasOffsetApplied = false) : Value(value), HasOffsetApplied(hasOffsetApplied) {} - /*implicit*/ ConstantLValue(ConstantAddress address) - : ConstantLValue(address.getPointer()) {} + /*implicit*/ ConstantLValue(SymbolTy address) : Value(address) {} ConstantLValue(std::nullptr_t) : ConstantLValue({}, false) {} }; @@ -346,6 +346,7 @@ class ConstantLValueEmitter mlir::Attribute applyOffset(mlir::Attribute C) { if (!hasNonZeroOffset()) return C; + // TODO(cir): use ptr_stride, or something... assert(0 && "NYI"); } }; @@ -375,21 +376,22 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { ConstantLValue result = tryEmitBase(base); // If that failed, we're done. - auto value = result.Value; + auto &value = result.Value; if (!value) return {}; // Apply the offset if necessary and not already done. - if (!result.HasOffsetApplied) { - // TODO(cir): use ptr_stride, or something... - // value = applyOffset(value); + if (!result.HasOffsetApplied && !value.is()) { + assert(0 && "NYI"); } // Convert to the appropriate type; this could be an lvalue for // an integer. FIXME: performAddrSpaceCast - if (destTy.isa()) - assert(0 && - "NYI"); // return llvm::ConstantExpr::getPointerCast(value, destTy); + if (destTy.isa()) { + if (value.is()) + return value.get(); + assert(0 && "NYI"); + } assert(0 && "NYI"); } diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 174b2753fc0a..723299c03c54 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -654,9 +654,30 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, } } - assert(Init.isa() && "This should have a type"); - auto TypedInitAttr = Init.cast(); - auto InitType = TypedInitAttr.getType(); + mlir::Type InitType; + // If the initializer attribute is a SymbolRefAttr it means we are + // initializing the global based on a global constant. + // + // TODO(cir): create another attribute to contain the final type and abstract + // away SymbolRefAttr. + if (auto symAttr = Init.dyn_cast()) { + auto cstGlobal = mlir::SymbolTable::lookupSymbolIn(theModule, symAttr); + assert(isa(cstGlobal) && + "unaware of other symbol providers"); + auto g = cast(cstGlobal); + auto arrayTy = g.getSymType().dyn_cast(); + // TODO(cir): pointer to array decay. Should this be modeled explicitly in + // CIR? + if (arrayTy) + InitType = mlir::cir::PointerType::get(builder.getContext(), + arrayTy.getEltType()); + } else { + assert(Init.isa() && "This should have a type"); + auto TypedInitAttr = Init.cast(); + InitType = TypedInitAttr.getType(); + } + assert(!InitType.isa() && "Should have a type by now"); + auto Entry = buildGlobal(D, InitType, ForDefinition_t(!IsTentative)); // TODO(cir): Strip off pointer casts from Entry if we get them? @@ -828,10 +849,10 @@ CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { Str.resize(finalSize); auto eltTy = getTypes().ConvertType(CAT->getElementType()); + auto TheType = + mlir::cir::ArrayType::get(builder.getContext(), eltTy, finalSize); auto cstArray = mlir::cir::CstArrayAttr::get( - mlir::cir::ArrayType::get(builder.getContext(), eltTy, finalSize), - mlir::StringAttr::get(builder.getContext(), Str)); - cstArray.dump(); + TheType, mlir::StringAttr::get(Str, TheType)); return cstArray; } @@ -839,10 +860,72 @@ CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { return {}; } +// TODO(cir): this could be a common AST helper for both CIR and LLVM codegen. +LangAS CIRGenModule::getGlobalConstantAddressSpace() const { + // OpenCL v1.2 s6.5.3: a string literal is in the constant address space. + if (getLangOpts().OpenCL) + return LangAS::opencl_constant; + if (getLangOpts().SYCLIsDevice) + return LangAS::sycl_global; + if (auto AS = getTarget().getConstantAddressSpace()) + return AS.value(); + return LangAS::Default; +} + +static mlir::cir::GlobalOp +generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, + mlir::SymbolTable::Visibility LT, CIRGenModule &CGM, + StringRef GlobalName, CharUnits Alignment) { + unsigned AddrSpace = CGM.getASTContext().getTargetAddressSpace( + CGM.getGlobalConstantAddressSpace()); + assert((AddrSpace == 0 && !cir::UnimplementedFeature::addressSpace()) && + "NYI"); + + // Create a global variable for this string + // FIXME(cir): check for insertion point in module level. + auto GV = CGM.getBuilder().create( + loc, GlobalName, C.getType(), !CGM.getLangOpts().WritableStrings); + + // Set up extra information and add to the module + GV.setAlignmentAttr(CGM.getAlignment(Alignment)); + mlir::SymbolTable::setSymbolVisibility(GV, LT); + GV.setInitialValueAttr(C); + + CGM.getModule().push_back(GV); + + // TODO(cir) + assert(!cir::UnimplementedFeature::threadLocal() && "NYI"); + assert(!cir::UnimplementedFeature::unnamedAddr() && "NYI"); + assert(!cir::UnimplementedFeature::isWeakForLinker() && "NYI"); + assert(!cir::UnimplementedFeature::setDSOLocal() && "NYI"); + return GV; +} + +// In address space agnostic languages, string literals are in default address +// space in AST. However, certain targets (e.g. amdgcn) request them to be +// emitted in constant address space in LLVM IR. To be consistent with other +// parts of AST, string literal global variables in constant address space +// need to be casted to default address space before being put into address +// map and referenced by other part of CodeGen. +// In OpenCL, string literals are in constant address space in AST, therefore +// they should not be casted to default address space. +static mlir::StringAttr +castStringLiteralToDefaultAddressSpace(CIRGenModule &CGM, mlir::StringAttr GV) { + if (!CGM.getLangOpts().OpenCL) { + auto AS = CGM.getGlobalConstantAddressSpace(); + if (AS != LangAS::Default) + assert(0 && "not implemented"); + } + return GV; +} + /// Return a pointer to a constant array for the given string literal. -ConstantAddress +mlir::SymbolRefAttr CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name) { + CharUnits Alignment = + astCtx.getAlignOfGlobalVarInChars(S->getType(), /*VD=*/nullptr); + mlir::Attribute C = getConstantArrayFromStringLiteral(S); mlir::cir::GlobalOp Entry; if (!getLangOpts().WritableStrings) { @@ -852,6 +935,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, SmallString<256> MangledNameBuffer; StringRef GlobalVariableName; + auto LT = mlir::SymbolTable::Visibility::Public; // Mangle the string literal if that's how the ABI merges duplicate strings. // Don't do it if they are writable, since we don't want writes in one TU to @@ -860,11 +944,21 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, !getLangOpts().WritableStrings) { assert(0 && "not implemented"); } else { + LT = mlir::SymbolTable::Visibility::Private; GlobalVariableName = Name; } - assert(0 && "not implemented"); - return ConstantAddress::invalid(); + auto loc = getLoc(S->getSourceRange()); + auto typedC = llvm::dyn_cast(C); + if (!typedC) + llvm_unreachable("this should never be untyped at this point"); + auto GV = generateStringLiteral(loc, typedC, LT, *this, GlobalVariableName, + Alignment); + ConstantStringMap[C] = GV; + + assert(!cir::UnimplementedFeature::reportGlobalToASan() && "NYI"); + return mlir::SymbolRefAttr::get( + castStringLiteralToDefaultAddressSpace(*this, GV.getSymNameAttr())); } // Emit code for a single top level declaration. diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index fbdc3ac73043..9cd11825548b 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -179,9 +179,18 @@ class CIRGenModule { /// Return a constant array for the given string. mlir::Attribute getConstantArrayFromStringLiteral(const StringLiteral *E); - /// Return a pointer to a constant array for the given string literal. - ConstantAddress getAddrOfConstantStringFromLiteral(const StringLiteral *S, - StringRef Name = ".str"); + /// Return a global symbol reference to a constant array for the given string + /// literal. + mlir::SymbolRefAttr + getAddrOfConstantStringFromLiteral(const StringLiteral *S, + StringRef Name = ".str"); + + /// Return the AST address space of constant literal, which is used to emit + /// the constant literal as global variable in LLVM IR. + /// Note: This is not necessarily the address space of the constant literal + /// in AST. For address space agnostic language, e.g. C++, constant literal + /// in AST is always in default address space. + LangAS getGlobalConstantAddressSpace() const; // TODO: this obviously overlaps with const TargetCIRGenInfo &getTargetCIRGenInfo(); diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index 0efb109b9146..3f1bf4239a92 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -34,6 +34,9 @@ struct UnimplementedFeature { static bool isWeakForLinker() { return false; } static bool setDSOLocal() { return false; } static bool threadLocal() { return false; } + + // Sanitizers + static bool reportGlobalToASan() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index c839fc041b13..7693118b9551 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * int a = 3; const int b = 4; // unless used wont be generated @@ -10,6 +11,7 @@ double w = 4.3; char x = '3'; unsigned char rgb[3] = {0, 233, 33}; char alpha[4] = "abc"; +const char *s = "example"; // CHECK: module { // CHECK-NEXT: cir.global @a = 3 : i32 @@ -19,3 +21,6 @@ char alpha[4] = "abc"; // CHECK-NEXT: cir.global @x = 51 : i8 // CHECK-NEXT: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // CHECK-NEXT: cir.global @alpha = #cir.cst_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8] : !cir.array> + +// CHECK-NEXT: cir.global "private" constant @".str" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global @s = @".str": !cir.ptr diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 4d36fdb96d82..8270b288d967 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -6,7 +6,7 @@ module { cir.global @b = #cir.cst_array<"example\00" : !cir.array> cir.global "private" constant @".str" : !cir.array {alignment = 1 : i64} cir.global "private" @c : i32 - cir.global "private" constant @".str2" = #cir.cst_array<"example\00" : !cir.array> {alignment = 1 : i64} + cir.global "private" constant @".str2" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global @s = @".str2": !cir.ptr func.func @use_global() { %0 = cir.get_global @a : cir.ptr From 33023c44ce4939ceb3e369251becef9ebc90acea Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 17 May 2022 13:55:04 -0700 Subject: [PATCH 0505/2301] [CIR][CodeGen] Global: use count to track private string literals --- clang/lib/CIR/CIRGenModule.cpp | 7 +++++++ clang/lib/CIR/CIRGenModule.h | 1 + clang/test/CIR/CodeGen/globals.cpp | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 723299c03c54..c077ffa46475 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -933,6 +933,13 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, assert(0 && "not implemented"); } + SmallString<256> StringNameBuffer = Name; + llvm::raw_svector_ostream Out(StringNameBuffer); + if (StringLiteralCnt) + Out << StringLiteralCnt; + Name = Out.str(); + StringLiteralCnt++; + SmallString<256> MangledNameBuffer; StringRef GlobalVariableName; auto LT = mlir::SymbolTable::Visibility::Public; diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 9cd11825548b..3eeb10cd279c 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -184,6 +184,7 @@ class CIRGenModule { mlir::SymbolRefAttr getAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name = ".str"); + unsigned StringLiteralCnt = 0; /// Return the AST address space of constant literal, which is used to emit /// the constant literal as global variable in LLVM IR. diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 7693118b9551..0e44738794c3 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -12,6 +12,7 @@ char x = '3'; unsigned char rgb[3] = {0, 233, 33}; char alpha[4] = "abc"; const char *s = "example"; +const char *s1 = "example1"; // CHECK: module { // CHECK-NEXT: cir.global @a = 3 : i32 @@ -24,3 +25,6 @@ const char *s = "example"; // CHECK-NEXT: cir.global "private" constant @".str" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK-NEXT: cir.global @s = @".str": !cir.ptr + +// CHECK-NEXT: cir.global "private" constant @".str1" = #cir.cst_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global @s1 = @".str1": !cir.ptr From 371fdd23a181e5fc37ca2a381d882e8e79d7a265 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 17 May 2022 14:05:15 -0700 Subject: [PATCH 0506/2301] [CIR][CodeGen] Globals: unique constant string literals when possible --- clang/lib/CIR/CIRGenModule.cpp | 11 +++++++++-- clang/test/CIR/CodeGen/globals.cpp | 3 +++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index c077ffa46475..5372b3e366fd 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -929,8 +929,15 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, mlir::Attribute C = getConstantArrayFromStringLiteral(S); mlir::cir::GlobalOp Entry; if (!getLangOpts().WritableStrings) { - if (ConstantStringMap.count(C)) - assert(0 && "not implemented"); + if (ConstantStringMap.count(C)) { + auto g = ConstantStringMap[C]; + // The bigger alignment always wins. + if (!g.getAlignment() || + uint64_t(Alignment.getQuantity()) > *g.getAlignment()) + g.setAlignmentAttr(getAlignment(Alignment)); + return mlir::SymbolRefAttr::get( + castStringLiteralToDefaultAddressSpace(*this, g.getSymNameAttr())); + } } SmallString<256> StringNameBuffer = Name; diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 0e44738794c3..526933c77287 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -13,6 +13,7 @@ unsigned char rgb[3] = {0, 233, 33}; char alpha[4] = "abc"; const char *s = "example"; const char *s1 = "example1"; +const char *s2 = "example"; // CHECK: module { // CHECK-NEXT: cir.global @a = 3 : i32 @@ -28,3 +29,5 @@ const char *s1 = "example1"; // CHECK-NEXT: cir.global "private" constant @".str1" = #cir.cst_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK-NEXT: cir.global @s1 = @".str1": !cir.ptr + +// CHECK-NEXT: cir.global @s2 = @".str": !cir.ptr From 783c08907d87f222ad1f69b5f0415b43bf01cb1d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 17 May 2022 17:53:48 -0400 Subject: [PATCH 0507/2301] [CIR] Extend String.cpp test to include a ctor with a param --- clang/test/CIR/CodeGen/String.cpp | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index 248013df0cd5..beb0e9eb2407 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -7,10 +7,12 @@ class String { public: String() : size{0} {} + String(int size) : size{size} {} }; void test() { - String s; + String s1{}; + String s2{1}; } // CHECK: func @_ZN6StringC2Ev @@ -23,3 +25,15 @@ void test() { // CHECK-NEXT: cir.store %4, %2 : i64, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } +// CHECK: func @_ZN6StringC2Ei +// CHECK-NEXT: %0 = cir.alloca !cir.ptr +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["size", paraminit] +// CHECK-NEXT: cir.store %arg0, %0 +// CHECK-NEXT: cir.store %arg1, %1 +// CHECK-NEXT: %2 = cir.load %0 +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr +// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: %5 = cir.cast(integral, %4 : i32), i64 +// CHECK-NEXT: cir.store %5, %3 : i64, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } From c34ba78aea24db8845bcbb9196128782ff4b98ae Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 17 May 2022 21:38:41 -0400 Subject: [PATCH 0508/2301] [CIR][NFC] Check in the full StringView example --- clang/test/CIR/CodeGen/StringExample.cpp | 34 ++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 clang/test/CIR/CodeGen/StringExample.cpp diff --git a/clang/test/CIR/CodeGen/StringExample.cpp b/clang/test/CIR/CodeGen/StringExample.cpp new file mode 100644 index 000000000000..a2c0ef374f1c --- /dev/null +++ b/clang/test/CIR/CodeGen/StringExample.cpp @@ -0,0 +1,34 @@ +// RUN: true + +int strlen(char const *); +void puts(char const *); + +struct String { + long size; + long capacity; + char *storage; + + String() : size{0}, capacity{0}, storage{nullptr} {} + String(char const *s) : size{strlen(s)}, capacity{size}, + storage{new char[capacity]} {} +}; + +struct StringView { + long size; + char *storage; + + StringView(const String &s) : size{s.size}, storage{s.storage} {} + StringView() : size{0}, storage{nullptr} {} +}; + +int main() { + StringView sv; + { + String s = "Hi"; + sv = s; + + puts(sv.storage); + } + + puts(sv.storage); +} From dcd241bd00bbdbe22477b3babf6d273ee40ac3d9 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 17 May 2022 21:39:05 -0400 Subject: [PATCH 0509/2301] [CIR] Add stubbed out functions for all of ScalarExprEmitter As per usual, the more asserts the merrier --- clang/lib/CIR/CIRGenExprScalar.cpp | 674 +++++++++++++++++++---------- 1 file changed, 446 insertions(+), 228 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index e26e316eda01..21e412e92f97 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -58,7 +58,81 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("Stmt can't have complex result type!"); } - mlir::Value VisitInitListExpr(InitListExpr *E); + mlir::Value VisitExpr(Expr *E) { + // Crashing here for "ScalarExprClassName"? Please implement + // VisitScalarExprClassName(...) to get this working. + emitError(CGF.getLoc(E->getExprLoc()), "scalar exp no implemented: '") + << E->getStmtClassName() << "'"; + assert(0 && "shouldn't be here!"); + return {}; + } + + mlir::Value VisitConstantExpr(ConstantExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitParenExpr(ParenExpr *PE) { llvm_unreachable("NYI"); } + mlir::Value + VisitSubstnonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *GE) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCoawaitExpr(CoawaitExpr *S) { llvm_unreachable("NYI"); } + mlir::Value VisitCoyieldExpr(CoyieldExpr *S) { llvm_unreachable("NYI"); } + mlir::Value VisitUnaryCoawait(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + + // Leaves. + mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { + mlir::Type Ty = CGF.getCIRType(E->getType()); + return Builder.create( + CGF.getLoc(E->getExprLoc()), Ty, + Builder.getIntegerAttr(Ty, E->getValue())); + } + + mlir::Value VisitFixedPointLiteral(const FixedPointLiteral *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitFloatingLiteral(const FloatingLiteral *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCharacterLiteral(const CharacterLiteral *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { + mlir::Type Ty = CGF.getCIRType(E->getType()); + return Builder.create( + CGF.getLoc(E->getExprLoc()), Ty, Builder.getBoolAttr(E->getValue())); + } + + mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitGNUNullExpr(const GNUNullExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitOffsetOfExpr(OffsetOfExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitAddrLabelExpr(const AddrLabelExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitSizeOfPackExpr(SizeOfPackExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitPseudoObjectExpr(PseudoObjectExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *E) { + llvm_unreachable("NYI"); + } /// Emits the address of the l-value, then loads and returns the result. mlir::Value buildLoadOfLValue(const Expr *E) { @@ -70,13 +144,29 @@ class ScalarExprEmitter : public StmtVisitor { return load; } - // Handle l-values. + // l-values mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { // FIXME: we could try to emit this as constant first, see // CGF.tryEmitAsConstant(E) return buildLoadOfLValue(E); } + mlir::Value VisitObjCSelectorExpr(ObjCSelectorExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCProtocolExpr(ObjCProtocolExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCIVarRefExpr(ObjCIvarRefExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCMessageExpr(ObjCMessageExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCIsaExpr(ObjCIsaExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) { + llvm_unreachable("NYI"); + } mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *E) { // Do we need anything like TestAndClearIgnoreResultAssign()? assert(!E->getBase()->getType()->isVectorType() && @@ -89,6 +179,173 @@ class ScalarExprEmitter : public StmtVisitor { return buildLoadOfLValue(E); } + mlir::Value VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitMemberExpr(MemberExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitExtVectorelementExpr(Expr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitCompoundLiteralEpxr(CompoundLiteralExpr *E) { + llvm_unreachable("NYI"); + } + + mlir::Value VisitInitListExpr(InitListExpr *E); + + mlir::Value VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) { + llvm_unreachable("NYI"); + } + + mlir::Value VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCastExpr(CastExpr *E); + mlir::Value VisitCallExpr(const CallExpr *E); + mlir::Value VisitStmtExpr(StmtExpr *E) { llvm_unreachable("NYI"); } + + // Unary Operators. + mlir::Value VisitUnaryPostDec(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryPostInc(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryPreDec(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryPreInc(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + + mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { + assert(!llvm::isa(E->getType()) && "not implemented"); + return CGF.buildLValue(E->getSubExpr()).getPointer(); + } + + mlir::Value VisitUnaryDeref(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryPlus(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryMinus(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryNot(const UnaryOperator *E) { llvm_unreachable("NYI"); } + mlir::Value VisitUnaryLNot(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryReal(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryImag(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryExtension(const UnaryOperator *E) { + llvm_unreachable("NYI"); + } + + // C++ + mlir::Value VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitSourceLocExpr(SourceLocExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCXXThisExpr(CXXThisExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitExprWithCleanups(ExprWithCleanups *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCXXNewExpr(const CXXNewExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitTypeTraitExpr(const TypeTraitExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value + VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitRequiresExpr(const RequiresExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCXXThrowExpr(CXXThrowExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitCXXNoexceptExpr(CXXNoexceptExpr *E) { + llvm_unreachable("NYI"); + } + + // Comparisons. +#define VISITCOMP(CODE) \ + mlir::Value VisitBin##CODE(const BinaryOperator *E) { return buildCmp(E); } + VISITCOMP(LT) + VISITCOMP(GT) + VISITCOMP(LE) + VISITCOMP(GE) + VISITCOMP(EQ) + VISITCOMP(NE) +#undef VISITCOMP + + mlir::Value VisitBinAssign(const BinaryOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitBinLAnd(const BinaryOperator *E) { llvm_unreachable("NYI"); } + mlir::Value VisitBinLOr(const BinaryOperator *E) { llvm_unreachable("NYI"); } + mlir::Value VisitBinComma(const BinaryOperator *E) { + llvm_unreachable("NYI"); + } + + mlir::Value VisitBinPtrMemD(const Expr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitBinPtrMemI(const Expr *E) { llvm_unreachable("NYI"); } + + mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { + llvm_unreachable("NYI"); + } + + // Other Operators. + mlir::Value VisitBlockExpr(const BlockExpr *E) { llvm_unreachable("NYI"); } + mlir::Value + VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitChooseExpr(ChooseExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitVAArgExpr(VAArgExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitObjCStringLiteral(const ObjCStringLiteral *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCBoxedExpr(ObjCBoxedExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitObjCArrayLiteral(ObjCArrayLiteral *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitAsTypeExpr(AsTypeExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitAtomicExpr(AtomicExpr *E) { llvm_unreachable("NYI"); } + // Emit a conversion from the specified type to the specified destination // type, both of which are CIR scalar types. struct ScalarConversionOpts { @@ -112,205 +369,6 @@ class ScalarExprEmitter : public StmtVisitor { QualType DstType, mlir::Type SrcTy, mlir::Type DstTy, ScalarConversionOpts Opts); - // Emit code for an explicit or implicit cast. Implicit - // casts have to handle a more broad range of conversions than explicit - // casts, as they handle things like function to ptr-to-function decay - // etc. - mlir::Value VisitCastExpr(CastExpr *CE) { - Expr *E = CE->getSubExpr(); - QualType DestTy = CE->getType(); - CastKind Kind = CE->getCastKind(); - - // These cases are generally not written to ignore the result of evaluating - // their sub-expressions, so we clear this now. - bool Ignored = TestAndClearIgnoreResultAssign(); - (void)Ignored; - - // Since almost all cast kinds apply to scalars, this switch doesn't have a - // default case, so the compiler will warn on a missing case. The cases are - // in the same order as in the CastKind enum. - switch (Kind) { - case clang::CK_Dependent: - llvm_unreachable("dependent cast kind in CIR gen!"); - case clang::CK_BuiltinFnToFnPtr: - llvm_unreachable("builtin functions are handled elsewhere"); - - case CK_LValueBitCast: - llvm_unreachable("NYI"); - case CK_ObjCObjectLValueCast: - llvm_unreachable("NYI"); - case CK_LValueToRValueBitCast: - llvm_unreachable("NYI"); - case CK_CPointerToObjCPointerCast: - llvm_unreachable("NYI"); - case CK_BlockPointerToObjCPointerCast: - llvm_unreachable("NYI"); - case CK_AnyPointerToBlockPointerCast: - llvm_unreachable("NYI"); - case CK_BitCast: - llvm_unreachable("NYI"); - case CK_AddressSpaceConversion: - llvm_unreachable("NYI"); - case CK_AtomicToNonAtomic: - llvm_unreachable("NYI"); - case CK_NonAtomicToAtomic: - llvm_unreachable("NYI"); - case CK_UserDefinedConversion: - llvm_unreachable("NYI"); - case CK_NoOp: - llvm_unreachable("NYI"); - case CK_BaseToDerived: - llvm_unreachable("NYI"); - case CK_DerivedToBase: - llvm_unreachable("NYI"); - case CK_Dynamic: - llvm_unreachable("NYI"); - case CK_ArrayToPointerDecay: - llvm_unreachable("NYI"); - case CK_FunctionToPointerDecay: - llvm_unreachable("NYI"); - - case CK_NullToPointer: { - // FIXME: use MustVisitNullValue(E) and evaluate expr. - // Note that DestTy is used as the MLIR type instead of a custom - // nullptr type. - mlir::Type Ty = CGF.getCIRType(DestTy); - return Builder.create( - CGF.getLoc(E->getExprLoc()), Ty, - mlir::cir::NullAttr::get(Builder.getContext(), Ty)); - } - - case CK_NullToMemberPointer: - llvm_unreachable("NYI"); - case CK_ReinterpretMemberPointer: - llvm_unreachable("NYI"); - case CK_BaseToDerivedMemberPointer: - llvm_unreachable("NYI"); - case CK_DerivedToBaseMemberPointer: - llvm_unreachable("NYI"); - case CK_ARCProduceObject: - llvm_unreachable("NYI"); - case CK_ARCConsumeObject: - llvm_unreachable("NYI"); - case CK_ARCReclaimReturnedObject: - llvm_unreachable("NYI"); - case CK_ARCExtendBlockObject: - llvm_unreachable("NYI"); - case CK_CopyAndAutoreleaseBlockObject: - llvm_unreachable("NYI"); - case CK_FloatingRealToComplex: - llvm_unreachable("NYI"); - case CK_FloatingComplexCast: - llvm_unreachable("NYI"); - case CK_IntegralComplexToFloatingComplex: - llvm_unreachable("NYI"); - case CK_FloatingComplexToIntegralComplex: - llvm_unreachable("NYI"); - case CK_ConstructorConversion: - llvm_unreachable("NYI"); - case CK_ToUnion: - llvm_unreachable("NYI"); - - case CK_LValueToRValue: - assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); - assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); - return Visit(const_cast(E)); - - case CK_IntegralToPointer: - llvm_unreachable("NYI"); - case CK_PointerToIntegral: - llvm_unreachable("NYI"); - case CK_ToVoid: - llvm_unreachable("NYI"); - case CK_MatrixCast: - llvm_unreachable("NYI"); - case CK_VectorSplat: - llvm_unreachable("NYI"); - case CK_FixedPointCast: - llvm_unreachable("NYI"); - case CK_FixedPointToBoolean: - llvm_unreachable("NYI"); - case CK_FixedPointToIntegral: - llvm_unreachable("NYI"); - case CK_IntegralToFixedPoint: - llvm_unreachable("NYI"); - - case CK_IntegralCast: { - ScalarConversionOpts Opts; - if (auto *ICE = dyn_cast(CE)) { - if (!ICE->isPartOfExplicitCast()) - Opts = ScalarConversionOpts(CGF.SanOpts); - } - return buildScalarConversion(Visit(E), E->getType(), DestTy, - CE->getExprLoc(), Opts); - } - - case CK_IntegralToFloating: - llvm_unreachable("NYI"); - case CK_FloatingToIntegral: - llvm_unreachable("NYI"); - case CK_FloatingCast: - llvm_unreachable("NYI"); - case CK_FixedPointToFloating: - llvm_unreachable("NYI"); - case CK_FloatingToFixedPoint: - llvm_unreachable("NYI"); - case CK_BooleanToSignedIntegral: - llvm_unreachable("NYI"); - - case CK_IntegralToBoolean: { - return buildIntToBoolConversion(Visit(E), - CGF.getLoc(CE->getSourceRange())); - } - - case CK_PointerToBoolean: - llvm_unreachable("NYI"); - case CK_FloatingToBoolean: - llvm_unreachable("NYI"); - case CK_MemberPointerToBoolean: - llvm_unreachable("NYI"); - case CK_FloatingComplexToReal: - llvm_unreachable("NYI"); - case CK_IntegralComplexToReal: - llvm_unreachable("NYI"); - case CK_FloatingComplexToBoolean: - llvm_unreachable("NYI"); - case CK_IntegralComplexToBoolean: - llvm_unreachable("NYI"); - case CK_ZeroToOCLOpaqueType: - llvm_unreachable("NYI"); - case CK_IntToOCLSampler: - llvm_unreachable("NYI"); - - default: - emitError(CGF.getLoc(CE->getExprLoc()), "cast kind not implemented: '") - << CE->getCastKindName() << "'"; - return nullptr; - } // end of switch - - llvm_unreachable("unknown scalar cast"); - } - - mlir::Value VisitCallExpr(const CallExpr *E) { - assert(!E->getCallReturnType(CGF.getContext())->isReferenceType() && "NYI"); - - auto V = CGF.buildCallExpr(E).getScalarVal(); - - // TODO: buildLValueAlignmentAssumption - return V; - } - - mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { - assert(!llvm::isa(E->getType()) && "not implemented"); - return CGF.buildLValue(E->getSubExpr()).getPointer(); - } - - mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { - mlir::Type Ty = CGF.getCIRType(E->getType()); - return Builder.create( - CGF.getLoc(E->getExprLoc()), Ty, Builder.getBoolAttr(E->getValue())); - } - struct BinOpInfo { mlir::Value LHS; mlir::Value RHS; @@ -500,25 +558,6 @@ class ScalarExprEmitter : public StmtVisitor { E->getExprLoc()); } -#define VISITCOMP(CODE) \ - mlir::Value VisitBin##CODE(const BinaryOperator *E) { return buildCmp(E); } - VISITCOMP(LT) - VISITCOMP(GT) - VISITCOMP(LE) - VISITCOMP(GE) - VISITCOMP(EQ) - VISITCOMP(NE) -#undef VISITCOMP - - mlir::Value VisitExpr(Expr *E) { - // Crashing here for "ScalarExprClassName"? Please implement - // VisitScalarExprClassName(...) to get this working. - emitError(CGF.getLoc(E->getExprLoc()), "scalar exp no implemented: '") - << E->getStmtClassName() << "'"; - assert(0 && "shouldn't be here!"); - return {}; - } - mlir::Value buildIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) { // Because of the type rules of C, we often end up computing a // logical value, then zero extending it to int, then wanting it @@ -651,14 +690,6 @@ class ScalarExprEmitter : public StmtVisitor { return Res; } - - // Leaves. - mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { - mlir::Type Ty = CGF.getCIRType(E->getType()); - return Builder.create( - CGF.getLoc(E->getExprLoc()), Ty, - Builder.getIntegerAttr(Ty, E->getValue())); - } }; } // namespace @@ -672,6 +703,193 @@ mlir::Value CIRGenFunction::buildScalarExpr(const Expr *E) { return ScalarExprEmitter(*this, builder).Visit(const_cast(E)); } +// Emit code for an explicit or implicit cast. Implicit +// casts have to handle a more broad range of conversions than explicit +// casts, as they handle things like function to ptr-to-function decay +// etc. +mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { + Expr *E = CE->getSubExpr(); + QualType DestTy = CE->getType(); + CastKind Kind = CE->getCastKind(); + + // These cases are generally not written to ignore the result of evaluating + // their sub-expressions, so we clear this now. + bool Ignored = TestAndClearIgnoreResultAssign(); + (void)Ignored; + + // Since almost all cast kinds apply to scalars, this switch doesn't have a + // default case, so the compiler will warn on a missing case. The cases are + // in the same order as in the CastKind enum. + switch (Kind) { + case clang::CK_Dependent: + llvm_unreachable("dependent cast kind in CIR gen!"); + case clang::CK_BuiltinFnToFnPtr: + llvm_unreachable("builtin functions are handled elsewhere"); + + case CK_LValueBitCast: + llvm_unreachable("NYI"); + case CK_ObjCObjectLValueCast: + llvm_unreachable("NYI"); + case CK_LValueToRValueBitCast: + llvm_unreachable("NYI"); + case CK_CPointerToObjCPointerCast: + llvm_unreachable("NYI"); + case CK_BlockPointerToObjCPointerCast: + llvm_unreachable("NYI"); + case CK_AnyPointerToBlockPointerCast: + llvm_unreachable("NYI"); + case CK_BitCast: + llvm_unreachable("NYI"); + case CK_AddressSpaceConversion: + llvm_unreachable("NYI"); + case CK_AtomicToNonAtomic: + llvm_unreachable("NYI"); + case CK_NonAtomicToAtomic: + llvm_unreachable("NYI"); + case CK_UserDefinedConversion: + llvm_unreachable("NYI"); + case CK_NoOp: + llvm_unreachable("NYI"); + case CK_BaseToDerived: + llvm_unreachable("NYI"); + case CK_DerivedToBase: + llvm_unreachable("NYI"); + case CK_Dynamic: + llvm_unreachable("NYI"); + case CK_ArrayToPointerDecay: + llvm_unreachable("NYI"); + case CK_FunctionToPointerDecay: + llvm_unreachable("NYI"); + + case CK_NullToPointer: { + // FIXME: use MustVisitNullValue(E) and evaluate expr. + // Note that DestTy is used as the MLIR type instead of a custom + // nullptr type. + mlir::Type Ty = CGF.getCIRType(DestTy); + return Builder.create( + CGF.getLoc(E->getExprLoc()), Ty, + mlir::cir::NullAttr::get(Builder.getContext(), Ty)); + } + + case CK_NullToMemberPointer: + llvm_unreachable("NYI"); + case CK_ReinterpretMemberPointer: + llvm_unreachable("NYI"); + case CK_BaseToDerivedMemberPointer: + llvm_unreachable("NYI"); + case CK_DerivedToBaseMemberPointer: + llvm_unreachable("NYI"); + case CK_ARCProduceObject: + llvm_unreachable("NYI"); + case CK_ARCConsumeObject: + llvm_unreachable("NYI"); + case CK_ARCReclaimReturnedObject: + llvm_unreachable("NYI"); + case CK_ARCExtendBlockObject: + llvm_unreachable("NYI"); + case CK_CopyAndAutoreleaseBlockObject: + llvm_unreachable("NYI"); + case CK_FloatingRealToComplex: + llvm_unreachable("NYI"); + case CK_FloatingComplexCast: + llvm_unreachable("NYI"); + case CK_IntegralComplexToFloatingComplex: + llvm_unreachable("NYI"); + case CK_FloatingComplexToIntegralComplex: + llvm_unreachable("NYI"); + case CK_ConstructorConversion: + llvm_unreachable("NYI"); + case CK_ToUnion: + llvm_unreachable("NYI"); + + case CK_LValueToRValue: + assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); + assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); + return Visit(const_cast(E)); + + case CK_IntegralToPointer: + llvm_unreachable("NYI"); + case CK_PointerToIntegral: + llvm_unreachable("NYI"); + case CK_ToVoid: + llvm_unreachable("NYI"); + case CK_MatrixCast: + llvm_unreachable("NYI"); + case CK_VectorSplat: + llvm_unreachable("NYI"); + case CK_FixedPointCast: + llvm_unreachable("NYI"); + case CK_FixedPointToBoolean: + llvm_unreachable("NYI"); + case CK_FixedPointToIntegral: + llvm_unreachable("NYI"); + case CK_IntegralToFixedPoint: + llvm_unreachable("NYI"); + + case CK_IntegralCast: { + ScalarConversionOpts Opts; + if (auto *ICE = dyn_cast(CE)) { + if (!ICE->isPartOfExplicitCast()) + Opts = ScalarConversionOpts(CGF.SanOpts); + } + return buildScalarConversion(Visit(E), E->getType(), DestTy, + CE->getExprLoc(), Opts); + } + + case CK_IntegralToFloating: + llvm_unreachable("NYI"); + case CK_FloatingToIntegral: + llvm_unreachable("NYI"); + case CK_FloatingCast: + llvm_unreachable("NYI"); + case CK_FixedPointToFloating: + llvm_unreachable("NYI"); + case CK_FloatingToFixedPoint: + llvm_unreachable("NYI"); + case CK_BooleanToSignedIntegral: + llvm_unreachable("NYI"); + + case CK_IntegralToBoolean: { + return buildIntToBoolConversion(Visit(E), CGF.getLoc(CE->getSourceRange())); + } + + case CK_PointerToBoolean: + llvm_unreachable("NYI"); + case CK_FloatingToBoolean: + llvm_unreachable("NYI"); + case CK_MemberPointerToBoolean: + llvm_unreachable("NYI"); + case CK_FloatingComplexToReal: + llvm_unreachable("NYI"); + case CK_IntegralComplexToReal: + llvm_unreachable("NYI"); + case CK_FloatingComplexToBoolean: + llvm_unreachable("NYI"); + case CK_IntegralComplexToBoolean: + llvm_unreachable("NYI"); + case CK_ZeroToOCLOpaqueType: + llvm_unreachable("NYI"); + case CK_IntToOCLSampler: + llvm_unreachable("NYI"); + + default: + emitError(CGF.getLoc(CE->getExprLoc()), "cast kind not implemented: '") + << CE->getCastKindName() << "'"; + return nullptr; + } // end of switch + + llvm_unreachable("unknown scalar cast"); +} + +mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *E) { + assert(!E->getCallReturnType(CGF.getContext())->isReferenceType() && "NYI"); + + auto V = CGF.buildCallExpr(E).getScalarVal(); + + // TODO: buildLValueAlignmentAssumption + return V; +} + /// Emit a conversion from the specified type to the specified destination /// type, both of which are CIR scalar types. mlir::Value CIRGenFunction::buildScalarConversion(mlir::Value Src, From 470e5efb6c78b2b788b7d4ca0b7c2095882aa7d7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 17 May 2022 21:56:05 -0400 Subject: [PATCH 0510/2301] [CIR] Support CXXDefaultInitExprs from ctors This visitor method delegates to Visit(theContainedExpr). All that is requied new is a RAII type for setting and restoring the CXX this and alignment back to the CGF. --- clang/lib/CIR/CIRGenExprScalar.cpp | 5 +++-- clang/lib/CIR/CIRGenFunction.h | 32 ++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 21e412e92f97..c9edcbae7ac1 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -260,8 +260,9 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { llvm_unreachable("NYI"); } - mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) { - llvm_unreachable("NYI"); + mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { + CIRGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); + return Visit(DIE->getExpr()); } mlir::Value VisitCXXThisExpr(CXXThisExpr *E) { llvm_unreachable("NYI"); } mlir::Value VisitExprWithCleanups(ExprWithCleanups *E) { diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index fc700fbd69de..4c800fc718a6 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -18,6 +18,7 @@ #include "CIRGenValue.h" #include "clang/AST/BaseSubobject.h" +#include "clang/AST/CurrentSourceLocExprScope.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/Type.h" @@ -764,6 +765,12 @@ class CIRGenFunction { const clang::CXXRecordDecl *VTableClass, VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs); + /// Source location information about the default argument or member + /// initializer expression we're evaluating, if any. + clang::CurrentSourceLocExprScope CurSourceLocExprScope; + using SourceLocExprScopeGuard = + clang::CurrentSourceLocExprScope::SourceLocExprScopeGuard; + /// A scoep within which we are constructing the fields of an object which /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if /// we need to evaluate the CXXDefaultInitExpr within the evaluation. @@ -782,6 +789,31 @@ class CIRGenFunction { Address OldCXXDefaultInitExprThis; }; + /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this' + /// is overridden to be the object under construction. + class CXXDefaultInitExprScope { + public: + CXXDefaultInitExprScope(CIRGenFunction &CGF, + const clang::CXXDefaultInitExpr *E) + : CGF{CGF}, OldCXXThisValue(CGF.CXXThisValue), + OldCXXThisAlignment(CGF.CXXThisAlignment), + SourceLocScope(E, CGF.CurSourceLocExprScope) { + CGF.CXXThisValue = + CGF.CXXDefaultInitExprThis.getPointer().getDefiningOp(); + CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment(); + } + ~CXXDefaultInitExprScope() { + CGF.CXXThisValue = OldCXXThisValue; + CGF.CXXThisAlignment = OldCXXThisAlignment; + } + + public: + CIRGenFunction &CGF; + mlir::Operation *OldCXXThisValue; + clang::CharUnits OldCXXThisAlignment; + SourceLocExprScopeGuard SourceLocScope; + }; + LValue MakeNaturalAlignPointeeAddrLValue(mlir::Operation *Op, clang::QualType T); From 4c864981aec195387ff1e828e7072d0842109f35 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 17 May 2022 21:57:04 -0400 Subject: [PATCH 0511/2301] [CIR] Add a CXXDefaultInitExpr via `char *storage=nullptr` to String.cpp I actually don't think we'll use this for the final version since we want the constructor memberwise inits to cover it. But add a test for it anyways. --- clang/test/CIR/CodeGen/String.cpp | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index beb0e9eb2407..1cdfc446d260 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s class String { - char *storage; + char *storage{nullptr}; long size; long capacity; @@ -19,10 +19,13 @@ void test() { // CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 -// CHECK-NEXT: %2 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr -// CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 -// CHECK-NEXT: %4 = cir.cast(integral, %3 : i32), i64 -// CHECK-NEXT: cir.store %4, %2 : i64, cir.ptr +// CHECK-NEXT: %2 = "cir.struct_element_addr"(%0) <{member_name = "storage"}> +// CHECK-NEXT: %3 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %4 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr +// CHECK-NEXT: %5 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %6 = cir.cast(integral, %5 : i32), i64 +// CHECK-NEXT: cir.store %6, %4 : i64, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: func @_ZN6StringC2Ei @@ -31,9 +34,12 @@ void test() { // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: cir.store %arg1, %1 // CHECK-NEXT: %2 = cir.load %0 -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr -// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cast(integral, %4 : i32), i64 -// CHECK-NEXT: cir.store %5, %3 : i64, cir.ptr +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "storage"}> +// CHECK-NEXT: %4 = cir.cst(#cir.null : !cir.ptr) +// CHECK-NEXT: cir.store %4, %3 +// CHECK-NEXT: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr +// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: %7 = cir.cast(integral, %6 : i32), i64 +// CHECK-NEXT: cir.store %7, %5 : i64, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } From 8bab2f8f7e6fd27370c895dd10558d79643b67a5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 17 May 2022 17:35:57 -0700 Subject: [PATCH 0512/2301] [CIR][NFC] Isolate GlobalOp creation through a proxy function to unique assertion on bad cases --- clang/lib/CIR/CIRGenModule.cpp | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 5372b3e366fd..265489d4d57a 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -358,6 +358,19 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, /// FIXME: implement mlir::cir::GlobalOp CIRGenModule::getGlobalValue(StringRef Name) { return {}; } +static mlir::cir::GlobalOp createGlobalOp(CIRGenModule &CGM, mlir::Location loc, + StringRef name, mlir::Type t, + bool isCst = false) { + auto &builder = CGM.getBuilder(); + // TODO(cir): when/if this hits a case where globals need to be emitted while + // emitting things in a function, do a save/restore insertion dance. + assert(!builder.getInsertionBlock() && + "Globals shall only be added at the module level"); + auto g = builder.create(loc, name, t, isCst); + CGM.getModule().push_back(g); + return g; +} + /// If the specified mangled name is not in the module, /// create and return an mlir GlobalOp with the specified type (TODO(cir): /// address space). @@ -441,9 +454,8 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // mlir::SymbolTable::Visibility::Public is the default, no need to explicitly // mark it as such. - auto GV = builder.create(loc, MangledName, Ty, - /*isConstant=*/false); - theModule.push_back(GV); + auto GV = createGlobalOp(*this, loc, MangledName, Ty, + /*isConstant=*/false); // If we already created a global with the same mangled name (but different // type) before, take its name and remove it from its parent. @@ -883,16 +895,14 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, // Create a global variable for this string // FIXME(cir): check for insertion point in module level. - auto GV = CGM.getBuilder().create( - loc, GlobalName, C.getType(), !CGM.getLangOpts().WritableStrings); + auto GV = createGlobalOp(CGM, loc, GlobalName, C.getType(), + !CGM.getLangOpts().WritableStrings); // Set up extra information and add to the module GV.setAlignmentAttr(CGM.getAlignment(Alignment)); mlir::SymbolTable::setSymbolVisibility(GV, LT); GV.setInitialValueAttr(C); - CGM.getModule().push_back(GV); - // TODO(cir) assert(!cir::UnimplementedFeature::threadLocal() && "NYI"); assert(!cir::UnimplementedFeature::unnamedAddr() && "NYI"); From 9e4be4d0bf550647534011f25715a6cee3769279 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 May 2022 10:55:23 -0700 Subject: [PATCH 0513/2301] [CIR][CodeGen][NFC] Add helpers before resolving global addresses --- clang/lib/CIR/CIRGenCXXABI.h | 4 ++ clang/lib/CIR/CIRGenExpr.cpp | 13 +++++ clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 61 ++++++++++++++++++++ clang/lib/CIR/UnimplementedFeatureGuarding.h | 3 + 4 files changed, 81 insertions(+) diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CIRGenCXXABI.h index 0d4bb51831b5..5be888078891 100644 --- a/clang/lib/CIR/CIRGenCXXABI.h +++ b/clang/lib/CIR/CIRGenCXXABI.h @@ -183,6 +183,10 @@ class CIRGenCXXABI { void setCXXABIThisValue(CIRGenFunction &CGF, mlir::Operation *ThisPtr); + // Determine if references to thread_local global variables can be made + // directly or require access through a thread wrapper function. + virtual bool usesThreadWrapperFunction(const VarDecl *VD) const = 0; + /// Emit a single constructor/destructor with the gien type from a C++ /// constructor Decl. virtual void buildCXXStructor(clang::GlobalDecl GD) = 0; diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 241b00fd17b7..8ca2b1ffdab9 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -1,3 +1,16 @@ +//===--- CIRGenExpr.cpp - Emit LLVM Code from Expressions -----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Expr nodes as CIR code. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" #include "CIRGenCall.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index 2d37098bb70d..f77464fa6170 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -21,6 +21,7 @@ #include "CIRGenFunctionInfo.h" #include "clang/AST/GlobalDecl.h" +#include "clang/Basic/Linkage.h" #include "clang/Basic/TargetInfo.h" using namespace cir; @@ -100,6 +101,66 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { void buildCXXStructor(clang::GlobalDecl GD) override; + /// TODO(cir): seems like could be shared between LLVM IR and CIR codegen. + bool mayNeedDestruction(const VarDecl *VD) const { + if (VD->needsDestruction(getContext())) + return true; + + // If the variable has an incomplete class type (or array thereof), it + // might need destruction. + const Type *T = VD->getType()->getBaseElementTypeUnsafe(); + if (T->getAs() && T->isIncompleteType()) + return true; + + return false; + } + + /// Determine whether we will definitely emit this variable with a constant + /// initializer, either because the language semantics demand it or because + /// we know that the initializer is a constant. + /// For weak definitions, any initializer available in the current translation + /// is not necessarily reflective of the initializer used; such initializers + /// are ignored unless if InspectInitForWeakDef is true. + /// TODO(cir): seems like could be shared between LLVM IR and CIR codegen. + bool + isEmittedWithConstantInitializer(const VarDecl *VD, + bool InspectInitForWeakDef = false) const { + VD = VD->getMostRecentDecl(); + if (VD->hasAttr()) + return true; + + // All later checks examine the initializer specified on the variable. If + // the variable is weak, such examination would not be correct. + if (!InspectInitForWeakDef && + (VD->isWeak() || VD->hasAttr())) + return false; + + const VarDecl *InitDecl = VD->getInitializingDeclaration(); + if (!InitDecl) + return false; + + // If there's no initializer to run, this is constant initialization. + if (!InitDecl->hasInit()) + return true; + + // If we have the only definition, we don't need a thread wrapper if we + // will emit the value as a constant. + if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD))) + return !mayNeedDestruction(VD) && InitDecl->evaluateValue(); + + // Otherwise, we need a thread wrapper unless we know that every + // translation unit will emit the value as a constant. We rely on the + // variable being constant-initialized in every translation unit if it's + // constant-initialized in any translation unit, which isn't actually + // guaranteed by the standard but is necessary for sanity. + return InitDecl->hasConstantInitialization(); + } + + // TODO(cir): seems like could be shared between LLVM IR and CIR codegen. + bool usesThreadWrapperFunction(const VarDecl *VD) const override { + return !isEmittedWithConstantInitializer(VD) || mayNeedDestruction(VD); + } + bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { return true; } diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index 3f1bf4239a92..53e2885bae4e 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -37,6 +37,9 @@ struct UnimplementedFeature { // Sanitizers static bool reportGlobalToASan() { return false; } + + // ObjC + static bool setObjCGCLValueClass() { return false; } }; } // namespace cir From cfdfcfc0a658066a98f87791b2899c72aef2d571 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 May 2022 11:31:44 -0700 Subject: [PATCH 0514/2301] [CIR][CodeGen] Globals: Support load/store from/to globals using cir.get_global --- clang/lib/CIR/CIRGenExpr.cpp | 38 +++++++++++++++++++- clang/lib/CIR/CIRGenModule.cpp | 34 ++++++++++-------- clang/lib/CIR/CIRGenModule.h | 2 +- clang/lib/CIR/UnimplementedFeatureGuarding.h | 5 +-- clang/test/CIR/CodeGen/globals.cpp | 10 ++++++ 5 files changed, 71 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 8ca2b1ffdab9..7a23bade84fe 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -268,6 +268,42 @@ void CIRGenFunction::buldStoreThroughLValue(RValue Src, LValue Dst, buildStoreOfScalar(Src.getScalarVal(), Dst, InitDecl); } +static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, + const VarDecl *VD) { + QualType T = E->getType(); + + // If it's thread_local, emit a call to its wrapper function instead. + if (VD->getTLSKind() == VarDecl::TLS_Dynamic && + CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD)) + assert(0 && "not implemented"); + + // Check if the variable is marked as declare target with link clause in + // device codegen. + if (CGF.getLangOpts().OpenMP) { + assert(0 && "not implemented"); + } + + auto V = CGF.CGM.getAddrOfGlobalVar(VD); + auto RealVarTy = CGF.getTypes().convertTypeForMem(VD->getType()); + // TODO(cir): do we need this for CIR? + // V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); + CharUnits Alignment = CGF.getContext().getDeclAlign(VD); + Address Addr(V, RealVarTy, Alignment); + // Emit reference to the private copy of the variable if it is an OpenMP + // threadprivate variable. + if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd && + VD->hasAttr()) { + assert(0 && "NYI"); + } + LValue LV; + if (VD->getType()->isReferenceType()) + assert(0 && "NYI"); + else + LV = CGF.makeAddrLValue(Addr, T, AlignmentSource::Decl); + assert(!UnimplementedFeature::setObjCGCLValueClass() && "NYI"); + return LV; +} + LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { const NamedDecl *ND = E->getDecl(); QualType T = E->getType(); @@ -296,7 +332,7 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { if (const auto *VD = dyn_cast(ND)) { // Check if this is a global variable if (VD->hasLinkage() || VD->isStaticDataMember()) - llvm_unreachable("not implemented"); + return buildGlobalVarDeclLValue(*this, E, VD); Address addr = Address::invalid(); diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 265489d4d57a..ff067df3cd27 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -355,8 +355,13 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, assert(!D->getAttr() && "NYI"); } -/// FIXME: implement -mlir::cir::GlobalOp CIRGenModule::getGlobalValue(StringRef Name) { return {}; } +mlir::cir::GlobalOp CIRGenModule::getGlobalValue(StringRef Name) { + auto global = mlir::SymbolTable::lookupSymbolIn(theModule, Name); + if (!global) + return {}; + assert(isa(global) && "not supported"); + return cast(global); +} static mlir::cir::GlobalOp createGlobalOp(CIRGenModule &CGM, mlir::Location loc, StringRef name, mlir::Type t, @@ -395,22 +400,22 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // unsigned TargetAS = astCtx.getTargetAddressSpace(AddrSpace); if (Entry) { if (WeakRefReferences.erase(Entry)) { - assert(0 && "not implemented"); - // if (D && !D->hasAttr()) - // Entry->setLinkage(llvm::Function::ExternalLinkage); + if (D && !D->hasAttr()) + mlir::SymbolTable::setSymbolVisibility( + Entry, mlir::SymbolTable::Visibility::Public); } // Handle dropped DLL attributes. - // FIXME: Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); if (D && !D->hasAttr() && !D->hasAttr()) - assert(0 && "not implemented"); + assert(!UnimplementedFeature::setDLLStorageClass() && "NYI"); if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && D) assert(0 && "not implemented"); - // TODO(cir): check address space matches - if (Entry.getSymType() == Ty) + // TODO(cir): check TargetAS matches Entry address space + if (Entry.getSymType() == Ty && + !UnimplementedFeature::addressSpaceInGlobalVar()) return Entry; // If there are two attempts to define the same mangled name, issue an @@ -556,10 +561,10 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, std::optional Ty, ForDefinition_t IsForDefinition) { auto g = buildGlobal(D, Ty, IsForDefinition); - (void)g; - // FIXME: create an operation to get the address of the global. - assert(0 && "not implemented"); - return {}; + auto ptrTy = + mlir::cir::PointerType::get(builder.getContext(), g.getSymType()); + return builder.create(getLoc(D->getSourceRange()), + ptrTy, g.getSymName()); } /// TODO(cir): looks like part of this code can be part of a common AST @@ -890,7 +895,8 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, StringRef GlobalName, CharUnits Alignment) { unsigned AddrSpace = CGM.getASTContext().getTargetAddressSpace( CGM.getGlobalConstantAddressSpace()); - assert((AddrSpace == 0 && !cir::UnimplementedFeature::addressSpace()) && + assert((AddrSpace == 0 && + !cir::UnimplementedFeature::addressSpaceInGlobalVar()) && "NYI"); // Create a global variable for this string diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 3eeb10cd279c..5ecf7e9f9d82 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -171,7 +171,7 @@ class CIRGenModule { /// global with type Ty will be returned, not conversion of a variable with /// the same mangled name but some other type. mlir::Value - getAddrOfGlobalVar(const VarDecl *D, std::optional Ty, + getAddrOfGlobalVar(const VarDecl *D, std::optional Ty = {}, ForDefinition_t IsForDefinition = NotForDefinition); llvm::DenseMap ConstantStringMap; diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index 53e2885bae4e..25491d518ab6 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -27,13 +27,14 @@ struct UnimplementedFeature { static bool cirVectorType() { return false; } // CIR still unware of address space - static bool addressSpace() { return false; } + static bool addressSpaceInGlobalVar() { return false; } - // Unhandled global information. + // Unhandled global/linkage information. static bool unnamedAddr() { return false; } static bool isWeakForLinker() { return false; } static bool setDSOLocal() { return false; } static bool threadLocal() { return false; } + static bool setDLLStorageClass() { return false; } // Sanitizers static bool reportGlobalToASan() { return false; } diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 526933c77287..b6eedce08479 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -15,6 +15,10 @@ const char *s = "example"; const char *s1 = "example1"; const char *s2 = "example"; +void use_global() { + int li = a; +} + // CHECK: module { // CHECK-NEXT: cir.global @a = 3 : i32 // CHECK-NEXT: cir.global @c = 2 : i64 @@ -31,3 +35,9 @@ const char *s2 = "example"; // CHECK-NEXT: cir.global @s1 = @".str1": !cir.ptr // CHECK-NEXT: cir.global @s2 = @".str": !cir.ptr + +// CHECK: func @_Z10use_globalv() { +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["li", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.get_global @a : cir.ptr +// CHECK-NEXT: %2 = cir.load %1 : cir.ptr , i32 +// CHECK-NEXT: cir.store %2, %0 : i32, cir.ptr From 66d817ee26cbe47f54e15281be17024cdbd53143 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 May 2022 16:50:10 -0700 Subject: [PATCH 0515/2301] [CIR][CodeGen][NFC] Make array subscript codegen more generic Now that we are building up on top of the basics for arrays, make the codegen more generic and allow us to later reuse more logic. --- clang/lib/CIR/CIRGenExpr.cpp | 163 +++++++++++++++++-- clang/lib/CIR/CIRGenModule.cpp | 9 +- clang/lib/CIR/CIRGenModule.h | 2 +- clang/lib/CIR/UnimplementedFeatureGuarding.h | 4 + clang/test/CIR/CodeGen/array.cpp | 21 ++- 5 files changed, 164 insertions(+), 35 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 7a23bade84fe..60d645eb2f64 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -668,6 +668,134 @@ static const Expr *isSimpleArrayDecayOperand(const Expr *E) { return SubExpr; } +/// Given an array base, check whether its member access belongs to a record +/// with preserve_access_index attribute or not. +/// TODO(cir): don't need to be specific to LLVM's codegen, refactor into common +/// AST helpers. +static bool isPreserveAIArrayBase(CIRGenFunction &CGF, const Expr *ArrayBase) { + if (!ArrayBase || !CGF.getDebugInfo()) + return false; + + // Only support base as either a MemberExpr or DeclRefExpr. + // DeclRefExpr to cover cases like: + // struct s { int a; int b[10]; }; + // struct s *p; + // p[1].a + // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr. + // p->b[5] is a MemberExpr example. + const Expr *E = ArrayBase->IgnoreImpCasts(); + if (const auto *ME = dyn_cast(E)) + return ME->getMemberDecl()->hasAttr(); + + if (const auto *DRE = dyn_cast(E)) { + const auto *VarDef = dyn_cast(DRE->getDecl()); + if (!VarDef) + return false; + + const auto *PtrT = VarDef->getType()->getAs(); + if (!PtrT) + return false; + + const auto *PointeeT = + PtrT->getPointeeType()->getUnqualifiedDesugaredType(); + if (const auto *RecT = dyn_cast(PointeeT)) + return RecT->getDecl()->hasAttr(); + return false; + } + + return false; +} + +static mlir::IntegerAttr getConstantIndexOrNull(mlir::Value idx) { + // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr? + if (auto constantOp = dyn_cast(idx.getDefiningOp())) + return constantOp.getValue().dyn_cast(); + return {}; +} + +static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, + CharUnits eltSize) { + // If we have a constant index, we can use the exact offset of the + // element we're accessing. + auto constantIdx = getConstantIndexOrNull(idx); + if (constantIdx) { + CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize; + return arrayAlign.alignmentAtOffset(offset); + // Otherwise, use the worst-case alignment for any element. + } else { + return arrayAlign.alignmentOfArrayElement(eltSize); + } +} + +static mlir::Value buildArrayAccessOp(mlir::OpBuilder &builder, + mlir::Location arrayLocBegin, + mlir::Location arrayLocEnd, + mlir::Value arrayPtr, mlir::Value idx) { + auto arrayPtrTy = arrayPtr.getType().dyn_cast<::mlir::cir::PointerType>(); + assert(arrayPtrTy && "expected pointer type"); + auto arrayTy = arrayPtrTy.getPointee().dyn_cast<::mlir::cir::ArrayType>(); + assert(arrayTy && "expected array type"); + + auto flatPtrTy = + mlir::cir::PointerType::get(builder.getContext(), arrayTy.getEltType()); + auto basePtr = builder.create( + arrayLocBegin, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, + arrayPtr); + + return builder.create(arrayLocEnd, flatPtrTy, basePtr, + idx); +} + +static mlir::Value buildArraySubscriptPtr( + CIRGenFunction &CGF, mlir::Location beginLoc, mlir::Location endLoc, + mlir::Value ptr, ArrayRef indices, bool inbounds, + bool signedIndices, const llvm::Twine &name = "arrayidx") { + assert(indices.size() == 1 && "cannot handle multiple indices yet"); + auto idx = indices.back(); + auto &CGM = CGF.getCIRGenModule(); + // TODO(cir): LLVM codegen emits in bound gep check here, is there anything + // that would enhance tracking this later in CIR? + if (inbounds) + assert(!UnimplementedFeature::emitCheckedInBoundsGEP() && "NYI"); + return buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, ptr, idx); +} + +static Address buildArraySubscriptPtr( + CIRGenFunction &CGF, mlir::Location beginLoc, mlir::Location endLoc, + Address addr, ArrayRef indices, QualType eltType, + bool inbounds, bool signedIndices, mlir::Location loc, + QualType *arrayType = nullptr, const Expr *Base = nullptr, + const llvm::Twine &name = "arrayidx") { + // Determine the element size of the statically-sized base. This is + // the thing that the indices are expressed in terms of. + if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) { + assert(0 && "not implemented"); + } + + // We can use that to compute the best alignment of the element. + CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); + CharUnits eltAlign = + getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); + + mlir::Value eltPtr; + auto LastIndex = getConstantIndexOrNull(indices.back()); + if (!LastIndex || + (!CGF.IsInPreservedAIRegion && !isPreserveAIArrayBase(CGF, Base))) { + eltPtr = buildArraySubscriptPtr(CGF, beginLoc, endLoc, addr.getPointer(), + indices, inbounds, signedIndices, name); + } else { + // assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + // assert(indices.size() == 1 && "cannot handle multiple indices yet"); + // auto idx = indices.back(); + // auto &CGM = CGF.getCIRGenModule(); + // eltPtr = buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, + // addr.getPointer(), idx); + assert(0 && "NYI"); + } + + return Address(eltPtr, CGF.getTypes().convertTypeForMem(eltType), eltAlign); +} + LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed) { // The index must always be an integer, which is not an aggregate. Emit it @@ -736,28 +864,27 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, "multidimensional array indexing not implemented"); ArrayLV = buildLValue(Array); - auto arrayPtrTy = - ArrayLV.getPointer().getType().dyn_cast<::mlir::cir::PointerType>(); - assert(arrayPtrTy && "expected pointer type"); - auto arrayTy = arrayPtrTy.getPointee().dyn_cast<::mlir::cir::ArrayType>(); - assert(arrayTy && "expected array type"); - - auto flatPtrTy = - mlir::cir::PointerType::get(builder.getContext(), arrayTy.getEltType()); - auto loc = getLoc(Array->getBeginLoc()); - auto basePtr = builder.create( - loc, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, - ArrayLV.getPointer()); - - loc = getLoc(Array->getEndLoc()); - auto stride = builder.create( - loc, flatPtrTy, basePtr, EmitIdxAfterBase(/*Promote=*/true)); + auto Idx = EmitIdxAfterBase(/*Promote=*/true); + QualType arrayType = Array->getType(); + // Propagate the alignment from the array itself to the result. - Addr = Address(stride.getResult(), ArrayLV.getAlignment()); + Addr = buildArraySubscriptPtr( + *this, CGM.getLoc(Array->getBeginLoc()), CGM.getLoc(Array->getEndLoc()), + ArrayLV.getAddress(), {Idx}, E->getType(), + !getLangOpts().isSignedOverflowDefined(), SignedIndices, + CGM.getLoc(E->getExprLoc()), &arrayType, E->getBase()); EltBaseInfo = ArrayLV.getBaseInfo(); // TODO: EltTBAAInfo } else { // The base must be a pointer; emit it with an estimate of its alignment. + // TODO(cir): EltTBAAInfo + Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); + // auto Idx = EmitIdxAfterBase(/*Promote*/ true); + // QualType ptrType = E->getBase()->getType(); + // Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(), + // !getLangOpts().isSignedOverflowDefined(), + // SignedIndices, E->getExprLoc(), &ptrType, + // E->getBase()); assert(0 && "not implemented"); } @@ -890,7 +1017,7 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, auto localVarTy = getCIRType(ty); auto localVarPtrTy = mlir::cir::PointerType::get(builder.getContext(), localVarTy); - auto alignIntAttr = CGM.getAlignment(alignment); + auto alignIntAttr = CGM.getSize(alignment); mlir::Value addr; { diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index ff067df3cd27..1126dac8e1ff 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -905,7 +905,7 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, !CGM.getLangOpts().WritableStrings); // Set up extra information and add to the module - GV.setAlignmentAttr(CGM.getAlignment(Alignment)); + GV.setAlignmentAttr(CGM.getSize(Alignment)); mlir::SymbolTable::setSymbolVisibility(GV, LT); GV.setInitialValueAttr(C); @@ -950,7 +950,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, // The bigger alignment always wins. if (!g.getAlignment() || uint64_t(Alignment.getQuantity()) > *g.getAlignment()) - g.setAlignmentAttr(getAlignment(Alignment)); + g.setAlignmentAttr(getSize(Alignment)); return mlir::SymbolRefAttr::get( castStringLiteralToDefaultAddressSpace(*this, g.getSymNameAttr())); } @@ -1448,10 +1448,9 @@ void CIRGenModule::buildDeferred() { } } -mlir::IntegerAttr CIRGenModule::getAlignment(CharUnits &alignment) { +mlir::IntegerAttr CIRGenModule::getSize(CharUnits size) { return mlir::IntegerAttr::get( - mlir::IntegerType::get(builder.getContext(), 64), - alignment.getQuantity()); + mlir::IntegerType::get(builder.getContext(), 64), size.getQuantity()); } // TODO: this is gross, make a map diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 5ecf7e9f9d82..2ed511262468 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -202,7 +202,7 @@ class CIRGenModule { mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs); /// Helper to convert Clang's alignment to CIR alignment - mlir::IntegerAttr getAlignment(CharUnits &alignment); + mlir::IntegerAttr getSize(CharUnits size); /// Determine whether an object of this type can be emitted /// as a constant. diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index 25491d518ab6..2db4e364d1a1 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -38,9 +38,13 @@ struct UnimplementedFeature { // Sanitizers static bool reportGlobalToASan() { return false; } + static bool emitCheckedInBoundsGEP() { return false; } // ObjC static bool setObjCGCLValueClass() { return false; } + + // Debug info + static bool generateDebugInfo() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index e2fc1ae88056..4901e51cf037 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -14,12 +14,12 @@ void a1() { } // CHECK: func @_Z2a1v() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} -// CHECK-NEXT: %1 = cir.cst(1 : i32) : i32 -// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 -// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr -// CHECK-NEXT: cir.store %1, %4 : i32, cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} +// CHECK-NEXT: %1 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %2 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : i32), !cir.ptr +// CHECK-NEXT: cir.store %1, %4 : i32, cir.ptr int *a2() { int a[4]; @@ -29,10 +29,9 @@ int *a2() { // CHECK: func @_Z2a2v() -> !cir.ptr { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} -// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 -// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr +// CHECK-NEXT: %2 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : i32), !cir.ptr // CHECK-NEXT: cir.store %4, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %5 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: cir.return %5 : !cir.ptr -// CHECK: } +// CHECK-NEXT: cir.return %5 : !cir.ptr \ No newline at end of file From 983a93226cdc3d14045a8cd2161bbb6e04f79b2a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 May 2022 18:36:37 -0700 Subject: [PATCH 0516/2301] [CIR][CodeGen] Array subscript now also works with flat pointers --- clang/lib/CIR/CIRGenExpr.cpp | 49 ++++++++++++++++++------------ clang/lib/CIR/CIRGenExprScalar.cpp | 8 +++-- clang/test/CIR/CodeGen/globals.cpp | 13 ++++++++ 3 files changed, 47 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 60d645eb2f64..ce4306e14f0e 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -730,17 +730,25 @@ static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, static mlir::Value buildArrayAccessOp(mlir::OpBuilder &builder, mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, - mlir::Value arrayPtr, mlir::Value idx) { + mlir::Value arrayPtr, mlir::Type eltTy, + mlir::Value idx) { auto arrayPtrTy = arrayPtr.getType().dyn_cast<::mlir::cir::PointerType>(); assert(arrayPtrTy && "expected pointer type"); auto arrayTy = arrayPtrTy.getPointee().dyn_cast<::mlir::cir::ArrayType>(); - assert(arrayTy && "expected array type"); - auto flatPtrTy = - mlir::cir::PointerType::get(builder.getContext(), arrayTy.getEltType()); - auto basePtr = builder.create( - arrayLocBegin, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, - arrayPtr); + mlir::cir::PointerType flatPtrTy; + mlir::Value basePtr = arrayPtr; + if (arrayTy) { + flatPtrTy = + mlir::cir::PointerType::get(builder.getContext(), arrayTy.getEltType()); + basePtr = builder.create( + arrayLocBegin, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, + arrayPtr); + } else { + assert(arrayPtrTy.getPointee() == eltTy && + "flat pointee type must match original array element type"); + flatPtrTy = arrayPtrTy; + } return builder.create(arrayLocEnd, flatPtrTy, basePtr, idx); @@ -748,8 +756,8 @@ static mlir::Value buildArrayAccessOp(mlir::OpBuilder &builder, static mlir::Value buildArraySubscriptPtr( CIRGenFunction &CGF, mlir::Location beginLoc, mlir::Location endLoc, - mlir::Value ptr, ArrayRef indices, bool inbounds, - bool signedIndices, const llvm::Twine &name = "arrayidx") { + mlir::Value ptr, mlir::Type eltTy, ArrayRef indices, + bool inbounds, bool signedIndices, const llvm::Twine &name = "arrayidx") { assert(indices.size() == 1 && "cannot handle multiple indices yet"); auto idx = indices.back(); auto &CGM = CGF.getCIRGenModule(); @@ -757,7 +765,8 @@ static mlir::Value buildArraySubscriptPtr( // that would enhance tracking this later in CIR? if (inbounds) assert(!UnimplementedFeature::emitCheckedInBoundsGEP() && "NYI"); - return buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, ptr, idx); + return buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, ptr, eltTy, + idx); } static Address buildArraySubscriptPtr( @@ -782,14 +791,16 @@ static Address buildArraySubscriptPtr( if (!LastIndex || (!CGF.IsInPreservedAIRegion && !isPreserveAIArrayBase(CGF, Base))) { eltPtr = buildArraySubscriptPtr(CGF, beginLoc, endLoc, addr.getPointer(), - indices, inbounds, signedIndices, name); + addr.getElementType(), indices, inbounds, + signedIndices, name); } else { // assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); // assert(indices.size() == 1 && "cannot handle multiple indices yet"); // auto idx = indices.back(); // auto &CGM = CGF.getCIRGenModule(); // eltPtr = buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, - // addr.getPointer(), idx); + // addr.getPointer(), addr.getElementType(), + // idx); assert(0 && "NYI"); } @@ -879,17 +890,15 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // The base must be a pointer; emit it with an estimate of its alignment. // TODO(cir): EltTBAAInfo Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); - // auto Idx = EmitIdxAfterBase(/*Promote*/ true); - // QualType ptrType = E->getBase()->getType(); - // Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(), - // !getLangOpts().isSignedOverflowDefined(), - // SignedIndices, E->getExprLoc(), &ptrType, - // E->getBase()); - assert(0 && "not implemented"); + auto Idx = EmitIdxAfterBase(/*Promote*/ true); + QualType ptrType = E->getBase()->getType(); + Addr = buildArraySubscriptPtr( + *this, CGM.getLoc(E->getBeginLoc()), CGM.getLoc(E->getEndLoc()), Addr, + Idx, E->getType(), !getLangOpts().isSignedOverflowDefined(), + SignedIndices, CGM.getLoc(E->getExprLoc()), &ptrType, E->getBase()); } LValue LV = LValue::makeAddr(Addr, E->getType(), EltBaseInfo); - if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) { assert(0 && "not implemented"); } diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index c9edcbae7ac1..793ddbbd8b32 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -627,10 +627,12 @@ class ScalarExprEmitter : public StmtVisitor { assert(0 && "not implemented"); } - // LLVM codegen ignore conversions like int -> uint, we should probably - // emit it here in case lowering to sanitizers dialect at some point. + // TODO(cir): LLVM codegen ignore conversions like int -> uint, + // is there anything to be done for CIR here? if (SrcTy == DstTy) { - assert(0 && "not implemented"); + if (Opts.EmitImplicitIntegerSignChangeChecks) + assert(0 && "not implemented"); + return Src; } // Handle pointer conversions next: pointers can only be converted to/from diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index b6eedce08479..38bebb93c010 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -19,6 +19,10 @@ void use_global() { int li = a; } +void use_global_string() { + unsigned char c = s2[0]; +} + // CHECK: module { // CHECK-NEXT: cir.global @a = 3 : i32 // CHECK-NEXT: cir.global @c = 2 : i64 @@ -41,3 +45,12 @@ void use_global() { // CHECK-NEXT: %1 = cir.get_global @a : cir.ptr // CHECK-NEXT: %2 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: cir.store %2, %0 : i32, cir.ptr + +// CHECK: func @_Z17use_global_stringv() { +// CHECK-NEXT: %0 = cir.alloca i8, cir.ptr , ["c", cinit] {alignment = 1 : i64} +// CHECK-NEXT: %1 = cir.get_global @s2 : cir.ptr > +// CHECK-NEXT: %2 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr +// CHECK-NEXT: %5 = cir.load %4 : cir.ptr , i8 +// CHECK-NEXT: cir.store %5, %0 : i8, cir.ptr From 037a08a8c87e47beab3359ef13c822a7b6857fff Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 19 May 2022 23:11:03 -0700 Subject: [PATCH 0517/2301] [CIR][CodeGen] Globals: support emitting globals triggered during function codegen --- clang/lib/CIR/CIRGenCXX.cpp | 5 ++++- clang/lib/CIR/CIRGenModule.cpp | 22 ++++++++++++++++------ clang/lib/CIR/CIRGenModule.h | 1 + 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CIRGenCXX.cpp b/clang/lib/CIR/CIRGenCXX.cpp index 961c84307c7f..184eb8937ce0 100644 --- a/clang/lib/CIR/CIRGenCXX.cpp +++ b/clang/lib/CIR/CIRGenCXX.cpp @@ -26,7 +26,10 @@ mlir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { /*DontDefer=*/true, ForDefinition); // TODO: setFunctionLinkage - CIRGenFunction(*this, builder).generateCode(GD, Fn, FnInfo); + CIRGenFunction CGF{*this, builder}; + CurCGF = &CGF; + CGF.generateCode(GD, Fn, FnInfo); + CurCGF = nullptr; // TODO: setNonAliasAttributes // TODO: SetLLVMFunctionAttributesForDefinition diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 1126dac8e1ff..2483b976ac86 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -366,13 +366,23 @@ mlir::cir::GlobalOp CIRGenModule::getGlobalValue(StringRef Name) { static mlir::cir::GlobalOp createGlobalOp(CIRGenModule &CGM, mlir::Location loc, StringRef name, mlir::Type t, bool isCst = false) { + mlir::cir::GlobalOp g; auto &builder = CGM.getBuilder(); - // TODO(cir): when/if this hits a case where globals need to be emitted while - // emitting things in a function, do a save/restore insertion dance. - assert(!builder.getInsertionBlock() && - "Globals shall only be added at the module level"); - auto g = builder.create(loc, name, t, isCst); - CGM.getModule().push_back(g); + { + mlir::OpBuilder::InsertionGuard guard(builder); + + // Some global emissions are triggered while emitting a function, e.g. + // void s() { const char *s = "yolo"; ... } + // + // Be sure to insert global before the current function + auto *curCGF = CGM.getCurrCIRGenFun(); + if (curCGF) + builder.setInsertionPoint(curCGF->CurFn.getOperation()); + + g = builder.create(loc, name, t, isCst); + if (!curCGF) + CGM.getModule().push_back(g); + } return g; } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 2ed511262468..bddd19c820c6 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -118,6 +118,7 @@ class CIRGenModule { clang::DiagnosticsEngine &getDiags() const { return Diags; } CIRGenTypes &getTypes() { return genTypes; } const clang::LangOptions &getLangOpts() const { return langOpts; } + CIRGenFunction *getCurrCIRGenFun() const { return CurCGF; } CIRGenCXXABI &getCXXABI() const { return *ABI; } From 29ed5417cef37b8711912850c048f03ba9498c88 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 19 May 2022 23:12:42 -0700 Subject: [PATCH 0518/2301] [CIR][CodeGen] Support local use of global constant strings --- clang/lib/CIR/CIRGenExpr.cpp | 104 ++++++++++++++++++++++++----- clang/lib/CIR/CIRGenExprScalar.cpp | 2 +- clang/lib/CIR/CIRGenFunction.h | 23 ++++--- clang/test/CIR/CodeGen/array.cpp | 14 +++- 4 files changed, 114 insertions(+), 29 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index ce4306e14f0e..05eb7c4a3321 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -651,6 +651,68 @@ void CIRGenFunction::buildIgnoredExpr(const Expr *E) { buildLValue(E); } +static mlir::Value maybeBuildArrayDecay(mlir::OpBuilder &builder, + mlir::Location loc, + mlir::Value arrayPtr, + mlir::Type eltTy) { + auto arrayPtrTy = arrayPtr.getType().dyn_cast<::mlir::cir::PointerType>(); + assert(arrayPtrTy && "expected pointer type"); + auto arrayTy = arrayPtrTy.getPointee().dyn_cast<::mlir::cir::ArrayType>(); + + if (arrayTy) { + mlir::cir::PointerType flatPtrTy = + mlir::cir::PointerType::get(builder.getContext(), arrayTy.getEltType()); + return builder.create( + loc, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, arrayPtr); + } + + assert(arrayPtrTy.getPointee() == eltTy && + "flat pointee type must match original array element type"); + return arrayPtr; +} + +Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, + LValueBaseInfo *BaseInfo) { + assert(E->getType()->isArrayType() && + "Array to pointer decay must have array source type!"); + + // Expressions of array type can't be bitfields or vector elements. + LValue LV = buildLValue(E); + Address Addr = LV.getAddress(); + + // If the array type was an incomplete type, we need to make sure + // the decay ends up being the right type. + auto lvalueAddrTy = + Addr.getPointer().getType().dyn_cast(); + assert(lvalueAddrTy && "expected pointer"); + + auto pointeeTy = lvalueAddrTy.getPointee().dyn_cast(); + assert(pointeeTy && "expected array"); + + mlir::Type arrayTy = convertType(E->getType()); + assert(arrayTy.isa() && "expected array"); + assert(pointeeTy == arrayTy); + + // TODO(cir): in LLVM codegen VLA pointers are always decayed, so we don't + // need to do anything here. Revisit this for VAT when its supported in CIR. + assert(!E->getType()->isVariableArrayType() && "what now?"); + + // The result of this decay conversion points to an array element within the + // base lvalue. However, since TBAA currently does not support representing + // accesses to elements of member arrays, we conservatively represent accesses + // to the pointee object as if it had no any base lvalue specified. + // TODO: Support TBAA for member arrays. + QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); + if (BaseInfo) + *BaseInfo = LV.getBaseInfo(); + assert(!UnimplementedFeature::tbaa() && "NYI"); + + mlir::Value ptr = maybeBuildArrayDecay( + CGM.getBuilder(), CGM.getLoc(E->getSourceRange()), Addr.getPointer(), + getTypes().convertTypeForMem(EltType)); + return Address(ptr, Addr.getAlignment()); +} + /// If the specified expr is a simple decay from an array to pointer, /// return the array subexpression. /// FIXME: this could be abstracted into a commeon AST helper. @@ -732,23 +794,9 @@ static mlir::Value buildArrayAccessOp(mlir::OpBuilder &builder, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx) { - auto arrayPtrTy = arrayPtr.getType().dyn_cast<::mlir::cir::PointerType>(); - assert(arrayPtrTy && "expected pointer type"); - auto arrayTy = arrayPtrTy.getPointee().dyn_cast<::mlir::cir::ArrayType>(); - - mlir::cir::PointerType flatPtrTy; - mlir::Value basePtr = arrayPtr; - if (arrayTy) { - flatPtrTy = - mlir::cir::PointerType::get(builder.getContext(), arrayTy.getEltType()); - basePtr = builder.create( - arrayLocBegin, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, - arrayPtr); - } else { - assert(arrayPtrTy.getPointee() == eltTy && - "flat pointee type must match original array element type"); - flatPtrTy = arrayPtrTy; - } + mlir::Value basePtr = + maybeBuildArrayDecay(builder, arrayLocBegin, arrayPtr, eltTy); + mlir::Type flatPtrTy = basePtr.getType(); return builder.create(arrayLocEnd, flatPtrTy, basePtr, idx); @@ -905,6 +953,26 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, return LV; } +LValue CIRGenFunction::buildStringLiteralLValue(const StringLiteral *E) { + auto sym = CGM.getAddrOfConstantStringFromLiteral(E); + + auto cstGlobal = mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), sym); + assert(cstGlobal && "Expected global"); + + auto g = dyn_cast(cstGlobal); + assert(g && "unaware of other symbol providers"); + + auto ptrTy = mlir::cir::PointerType::get(CGM.getBuilder().getContext(), + g.getSymType()); + assert(g.getAlignment() && "expected alignment for string literal"); + auto align = *g.getAlignment(); + auto addr = builder.create( + getLoc(E->getSourceRange()), ptrTy, g.getSymName()); + return makeAddrLValue( + Address(addr, g.getSymType(), CharUnits::fromQuantity(align)), + E->getType(), AlignmentSource::Decl); +} + /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. @@ -924,6 +992,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return buildDeclRefLValue(cast(E)); case Expr::UnaryOperatorClass: return buildUnaryOpLValue(cast(E)); + case Expr::StringLiteralClass: + return buildStringLiteralLValue(cast(E)); case Expr::ObjCPropertyRefExprClass: llvm_unreachable("cannot emit a property reference directly"); } diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 793ddbbd8b32..97c7aaecdb0e 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -760,7 +760,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_Dynamic: llvm_unreachable("NYI"); case CK_ArrayToPointerDecay: - llvm_unreachable("NYI"); + return CGF.buildArrayToPointerDecay(E).getPointer(); case CK_FunctionToPointerDecay: llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 4c800fc718a6..b1088bbccebc 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -702,10 +702,9 @@ class CIRGenFunction { LValue lvalue); LValue buildDeclRefLValue(const clang::DeclRefExpr *E); - LValue buildBinaryOperatorLValue(const clang::BinaryOperator *E); - LValue buildUnaryOpLValue(const clang::UnaryOperator *E); + LValue buildStringLiteralLValue(const StringLiteral *E); /// Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. @@ -853,9 +852,9 @@ class CIRGenFunction { LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); - /// buildLValueForFieldInitialization - like buildLValueForField, excpet that - /// if the Field is a reference, this will return the address of the reference - /// and not the address of the value stored in the reference. + /// Like buildLValueForField, excpet that if the Field is a reference, this + /// will return the address of the reference and not the address of the value + /// stored in the reference. LValue buildLValueForFieldInitialization(LValue Base, const clang::FieldDecl *Field); @@ -885,15 +884,19 @@ class CIRGenFunction { const FunctionArgList &Args, clang::SourceLocation Loc); - /// buildDelegatingCallArg - We are performing a delegate call; that is, the - /// current function is delegating to another one. Produce a r-value suitable - /// for passing the given parameter. + /// We are performing a delegate call; that is, the current function is + /// delegating to another one. Produce a r-value suitable for passing the + /// given parameter. void buildDelegateCallArg(CallArgList &args, const clang::VarDecl *param, clang::SourceLocation loc); - /// ShouldInstrumentFunction - Return true if the current function should be - /// instrumented with __cyg_profile_func_* calls + /// Return true if the current function should be instrumented with + /// __cyg_profile_func_* calls bool ShouldInstrumentFunction(); + + /// TODO(cir): add TBAAAccessInfo + Address buildArrayToPointerDecay(const Expr *Array, + LValueBaseInfo *BaseInfo = nullptr); }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 4901e51cf037..7a89457a2cbb 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -Wno-return-stack-address -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * void a0() { int a[10]; @@ -34,4 +35,15 @@ int *a2() { // CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : i32), !cir.ptr // CHECK-NEXT: cir.store %4, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %5 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.return %5 : !cir.ptr \ No newline at end of file +// CHECK-NEXT: cir.return %5 : !cir.ptr + +void local_stringlit() { + const char *s = "whatnow"; +} + +// CHECK: cir.global "private" constant @".str" = #cir.cst_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} loc(#loc17) +// CHECK: func @_Z15local_stringlitv() { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", cinit] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > +// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: cir.store %2, %0 : !cir.ptr, cir.ptr > From f812466fabaf18ec29e4fddd2fd0b7f3c4586c2d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 May 2022 00:17:34 -0700 Subject: [PATCH 0519/2301] [CIR][CodeGen][NFC] Add cast visitor skeleton to AggrExpr emitter --- clang/lib/CIR/CIRGenExprAgg.cpp | 76 ++++++++++++++++++++++++++++++++- 1 file changed, 74 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index 584c18ce473d..f89285334f23 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -77,7 +77,7 @@ class AggExprEmitter : public StmtVisitor { void VisitPredefinedExpr(const PredefinedExpr *E) { llvm_unreachable("NYI"); } // Operators. - void VisitCastExpr(CastExpr *E) { llvm_unreachable("NYI"); } + void VisitCastExpr(CastExpr *E); void VisitCallExpr(const CallExpr *E) { llvm_unreachable("NYI"); } void VisitStmtExpr(const StmtExpr *E) { llvm_unreachable("NYI"); } void VisitBinaryOperator(const BinaryOperator *E) { llvm_unreachable("NYI"); } @@ -193,6 +193,78 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { CleanupDominator->erase(); } +void AggExprEmitter::VisitCastExpr(CastExpr *E) { + if (const auto *ECE = dyn_cast(E)) + assert(0 && "NYI"); + switch (E->getCastKind()) { + case CK_LValueBitCast: + llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); + + case CK_Dependent: + case CK_BitCast: + case CK_ArrayToPointerDecay: + case CK_FunctionToPointerDecay: + case CK_NullToPointer: + case CK_NullToMemberPointer: + case CK_BaseToDerivedMemberPointer: + case CK_DerivedToBaseMemberPointer: + case CK_MemberPointerToBoolean: + case CK_ReinterpretMemberPointer: + case CK_IntegralToPointer: + case CK_PointerToIntegral: + case CK_PointerToBoolean: + case CK_ToVoid: + case CK_VectorSplat: + case CK_IntegralCast: + case CK_BooleanToSignedIntegral: + case CK_IntegralToBoolean: + case CK_IntegralToFloating: + case CK_FloatingToIntegral: + case CK_FloatingToBoolean: + case CK_FloatingCast: + case CK_CPointerToObjCPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_AnyPointerToBlockPointerCast: + case CK_ObjCObjectLValueCast: + case CK_FloatingRealToComplex: + case CK_FloatingComplexToReal: + case CK_FloatingComplexToBoolean: + case CK_FloatingComplexCast: + case CK_FloatingComplexToIntegralComplex: + case CK_IntegralRealToComplex: + case CK_IntegralComplexToReal: + case CK_IntegralComplexToBoolean: + case CK_IntegralComplexCast: + case CK_IntegralComplexToFloatingComplex: + case CK_ARCProduceObject: + case CK_ARCConsumeObject: + case CK_ARCReclaimReturnedObject: + case CK_ARCExtendBlockObject: + case CK_CopyAndAutoreleaseBlockObject: + case CK_BuiltinFnToFnPtr: + case CK_ZeroToOCLOpaqueType: + case CK_MatrixCast: + + case CK_IntToOCLSampler: + case CK_FloatingToFixedPoint: + case CK_FixedPointToFloating: + case CK_FixedPointCast: + case CK_FixedPointToBoolean: + case CK_FixedPointToIntegral: + case CK_IntegralToFixedPoint: + llvm_unreachable("cast kind invalid for aggregate types"); + default: { + llvm::errs() << "cast kind not implemented: '" << E->getCastKindName() + << "'\n"; + break; + } + } +} + +//===----------------------------------------------------------------------===// +// Helpers and dispatcher +//===----------------------------------------------------------------------===// + /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of /// zeros in it, emit a memset and avoid storing the individual zeros. static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, @@ -225,4 +297,4 @@ void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { CheckAggExprForMemSetUse(Slot, E, *this); AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast(E)); -} +} \ No newline at end of file From e3af2f1601801cb7e517ad9e63cbff7ac0c3dd34 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 May 2022 00:19:34 -0700 Subject: [PATCH 0520/2301] [CIR][CodeGen] Add missing assert for just added cast visitor --- clang/lib/CIR/CIRGenExprAgg.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index f89285334f23..3026ea55771d 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -256,6 +256,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { default: { llvm::errs() << "cast kind not implemented: '" << E->getCastKindName() << "'\n"; + assert(0 && "not implemented"); break; } } From 273ff1345f3a85ec2ad20bcfa3ce2258f491f0cf Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 May 2022 00:31:08 -0700 Subject: [PATCH 0521/2301] [CIR][CodeGen][NFC] Add ConstructorConversion and friends to cast handling --- clang/lib/CIR/CIRGenExprAgg.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index 3026ea55771d..4a2e698400ee 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -197,6 +197,16 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { if (const auto *ECE = dyn_cast(E)) assert(0 && "NYI"); switch (E->getCastKind()) { + + case CK_NoOp: + case CK_UserDefinedConversion: + case CK_ConstructorConversion: + assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), + E->getType()) && + "Implicit cast types must be compatible"); + Visit(E->getSubExpr()); + break; + case CK_LValueBitCast: llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); From 74192860241a64798030618851d19893efefedd8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 May 2022 00:50:04 -0700 Subject: [PATCH 0522/2301] [CIR][CodeGen][NFC] Re-arrange asserts in buildCallArg --- clang/lib/CIR/CIRGenCall.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index fc1fb2e4799c..8145f5774914 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -500,8 +500,10 @@ void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, // we make it to the call. assert(!type->isRecordType() && "Record type args NYI"); - assert(!HasAggregateEvalKind && "aggregate args NYI"); - assert(!isa(E) && "Casted args NYI"); + if (HasAggregateEvalKind && isa(E) && + cast(E)->getCastKind() == CK_LValueToRValue) { + assert(0 && "NYI"); + } args.add(buildAnyExprToTemp(E), type); } From d9ddf8e223b0e19f487adbcc21bb1935acd022a6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 May 2022 15:09:38 -0700 Subject: [PATCH 0523/2301] [CIR][CodeGen][NFC] Classes: add functions to get linkage info from declarators and functions --- clang/lib/CIR/CIRGenModule.cpp | 215 ++++++++++++++++--- clang/lib/CIR/CIRGenModule.h | 9 + clang/lib/CIR/UnimplementedFeatureGuarding.h | 7 + 3 files changed, 205 insertions(+), 26 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 2483b976ac86..7be7cc2a34fb 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1054,6 +1054,194 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { } } +static bool shouldBeInCOMDAT(CIRGenModule &CGM, const Decl &D) { + if (!CGM.supportsCOMDAT()) + return false; + + if (D.hasAttr()) + return true; + + GVALinkage Linkage; + if (auto *VD = dyn_cast(&D)) + Linkage = CGM.getASTContext().GetGVALinkageForVariable(VD); + else + Linkage = + CGM.getASTContext().GetGVALinkageForFunction(cast(&D)); + + switch (Linkage) { + case clang::GVA_Internal: + case clang::GVA_AvailableExternally: + case clang::GVA_StrongExternal: + return false; + case clang::GVA_DiscardableODR: + case clang::GVA_StrongODR: + return true; + } + llvm_unreachable("No such linkage"); +} + +// TODO(cir): this could be a common method between LLVM codegen. +static bool isVarDeclStrongDefinition(const ASTContext &Context, + CIRGenModule &CGM, const VarDecl *D, + bool NoCommon) { + // Don't give variables common linkage if -fno-common was specified unless it + // was overridden by a NoCommon attribute. + if ((NoCommon || D->hasAttr()) && !D->hasAttr()) + return true; + + // C11 6.9.2/2: + // A declaration of an identifier for an object that has file scope without + // an initializer, and without a storage-class specifier or with the + // storage-class specifier static, constitutes a tentative definition. + if (D->getInit() || D->hasExternalStorage()) + return true; + + // A variable cannot be both common and exist in a section. + if (D->hasAttr()) + return true; + + // A variable cannot be both common and exist in a section. + // We don't try to determine which is the right section in the front-end. + // If no specialized section name is applicable, it will resort to default. + if (D->hasAttr() || + D->hasAttr() || + D->hasAttr() || + D->hasAttr()) + return true; + + // Thread local vars aren't considered common linkage. + if (D->getTLSKind()) + return true; + + // Tentative definitions marked with WeakImportAttr are true definitions. + if (D->hasAttr()) + return true; + + // A variable cannot be both common and exist in a comdat. + if (shouldBeInCOMDAT(CGM, *D)) + return true; + + // Declarations with a required alignment do not have common linkage in MSVC + // mode. + if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { + if (D->hasAttr()) + return true; + QualType VarType = D->getType(); + if (Context.isAlignmentRequired(VarType)) + return true; + + if (const auto *RT = VarType->getAs()) { + const RecordDecl *RD = RT->getDecl(); + for (const FieldDecl *FD : RD->fields()) { + if (FD->isBitField()) + continue; + if (FD->hasAttr()) + return true; + if (Context.isAlignmentRequired(FD->getType())) + return true; + } + } + } + + // Microsoft's link.exe doesn't support alignments greater than 32 bytes for + // common symbols, so symbols with greater alignment requirements cannot be + // common. + // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two + // alignments for common symbols via the aligncomm directive, so this + // restriction only applies to MSVC environments. + if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() && + Context.getTypeAlignIfKnown(D->getType()) > + Context.toBits(CharUnits::fromQuantity(32))) + return true; + + return false; +} + +mlir::SymbolTable::Visibility CIRGenModule::getCIRLinkageForDeclarator( + const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) { + if (Linkage == GVA_Internal) + return mlir::SymbolTable::Visibility::Private; + + if (D->hasAttr()) { + assert(UnimplementedFeature::globalWeakLinkage() && "NYI"); + } + + if (const auto *FD = D->getAsFunction()) + if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally) + assert(UnimplementedFeature::globalLinkOnceAnyLinkage() && "NYI"); + + // We are guaranteed to have a strong definition somewhere else, + // so we can use available_externally linkage. + if (Linkage == GVA_AvailableExternally) + assert(UnimplementedFeature::globalAvailableExternallyLinkage() && "NYI"); + + // Note that Apple's kernel linker doesn't support symbol + // coalescing, so we need to avoid linkonce and weak linkages there. + // Normally, this means we just map to internal, but for explicit + // instantiations we'll map to external. + + // In C++, the compiler has to emit a definition in every translation unit + // that references the function. We should use linkonce_odr because + // a) if all references in this translation unit are optimized away, we + // don't need to codegen it. b) if the function persists, it needs to be + // merged with other definitions. c) C++ has the ODR, so we know the + // definition is dependable. + if (Linkage == GVA_DiscardableODR) + assert(0 && "NYI"); + + // An explicit instantiation of a template has weak linkage, since + // explicit instantiations can occur in multiple translation units + // and must all be equivalent. However, we are not allowed to + // throw away these explicit instantiations. + // + // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU, + // so say that CUDA templates are either external (for kernels) or internal. + // This lets llvm perform aggressive inter-procedural optimizations. For + // -fgpu-rdc case, device function calls across multiple TU's are allowed, + // therefore we need to follow the normal linkage paradigm. + if (Linkage == GVA_StrongODR) { + assert(0 && "NYI"); + } + + // C++ doesn't have tentative definitions and thus cannot have common + // linkage. + if (!getLangOpts().CPlusPlus && isa(D) && + !isVarDeclStrongDefinition(astCtx, *this, cast(D), + getCodeGenOpts().NoCommon)) + assert(UnimplementedFeature::globalCommonLinkage() && "NYI"); + + // selectany symbols are externally visible, so use weak instead of + // linkonce. MSVC optimizes away references to const selectany globals, so + // all definitions should be the same and ODR linkage should be used. + // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx + if (D->hasAttr()) + assert(UnimplementedFeature::globalWeakLinkage() && "NYI"); + + // Otherwise, we have strong external linkage. + assert(Linkage == GVA_StrongExternal); + return mlir::SymbolTable::Visibility::Public; +} + +mlir::SymbolTable::Visibility CIRGenModule::getFunctionLinkage(GlobalDecl GD) { + const auto *D = cast(GD.getDecl()); + + GVALinkage Linkage = astCtx.GetGVALinkageForFunction(D); + + if (const auto *Dtor = dyn_cast(D)) + assert(0 && "NYI"); + + if (isa(D) && + cast(D)->isInheritingConstructor() && + astCtx.getTargetInfo().getCXXABI().isMicrosoft()) { + // Our approach to inheriting constructors is fundamentally different from + // that used by the MS ABI, so keep our inheriting constructor thunks + // internal rather than trying to pick an unambiguous mangling for them. + return mlir::SymbolTable::Visibility::Private; + } + + return getCIRLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false); +} + mlir::Type CIRGenModule::getCIRType(const QualType &type) { return genTypes.ConvertType(type); } @@ -1551,37 +1739,12 @@ bool CIRGenModule::supportsCOMDAT() const { return getTriple().supportsCOMDAT(); } -static bool shouldBeInCOMDAT(CIRGenModule &CGM, const Decl &D) { - if (!CGM.supportsCOMDAT()) - return false; - - if (D.hasAttr()) - return true; - - GVALinkage Linkage; - if (auto *VD = dyn_cast(&D)) - Linkage = CGM.getASTContext().GetGVALinkageForVariable(VD); - else - Linkage = - CGM.getASTContext().GetGVALinkageForFunction(cast(&D)); - - switch (Linkage) { - case clang::GVA_Internal: - case clang::GVA_AvailableExternally: - case clang::GVA_StrongExternal: - return false; - case clang::GVA_DiscardableODR: - case clang::GVA_StrongODR: - return true; - } - llvm_unreachable("No such linkage"); -} - void CIRGenModule::maybeSetTrivialComdat(const Decl &D, mlir::Operation *Op) { if (!shouldBeInCOMDAT(*this, D)) return; // TODO: Op.setComdat + assert(!UnimplementedFeature::setComdat() && "NYI"); } bool CIRGenModule::isInNoSanitizeList(SanitizerMask Kind, mlir::FuncOp Fn, diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index bddd19c820c6..6619a80952f6 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -352,6 +352,15 @@ class CIRGenModule { void emitError(const llvm::Twine &message) { theModule.emitError(message); } + /// ------- + /// Linkage + /// ------- + + mlir::SymbolTable::Visibility getFunctionLinkage(GlobalDecl GD); + mlir::SymbolTable::Visibility + getCIRLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage, + bool IsConstantVariable); + private: // TODO: CodeGen also passes an AttributeList here. We'll have to match that // in CIR diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index 2db4e364d1a1..5e02a1275a2b 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -31,7 +31,14 @@ struct UnimplementedFeature { // Unhandled global/linkage information. static bool unnamedAddr() { return false; } + static bool isWeakForLinker() { return false; } + static bool globalWeakLinkage() { return false; } + static bool globalLinkOnceAnyLinkage() { return false; } + static bool globalAvailableExternallyLinkage() { return false; } + static bool globalCommonLinkage() { return false; } + static bool setComdat() { return false; } + static bool setDSOLocal() { return false; } static bool threadLocal() { return false; } static bool setDLLStorageClass() { return false; } From 8e618129794cf9d289cdc7c076febfaf93f513a9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 May 2022 15:15:42 -0700 Subject: [PATCH 0524/2301] [CIR][CodeGen][NFC] Populate getCIRGenToUse with more structor info --- clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 22 +++++++++++++++++++- clang/lib/CIR/UnimplementedFeatureGuarding.h | 3 +++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index f77464fa6170..47466989b16b 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -244,7 +244,27 @@ static StructorCIRGen getCIRGenToUse(CIRGenModule &CGM, if (!CGM.getCodeGenOpts().CXXCtorDtorAliases) return StructorCIRGen::Emit; - llvm_unreachable("Nothing else implemented yet"); + // The complete and base structors are not equivalent if there are any virtual + // bases, so emit separate functions. + if (MD->getParent()->getNumVBases()) + return StructorCIRGen::Emit; + + GlobalDecl AliasDecl; + if (const auto *DD = dyn_cast(MD)) { + AliasDecl = GlobalDecl(DD, Dtor_Complete); + } else { + const auto *CD = cast(MD); + AliasDecl = GlobalDecl(CD, Ctor_Complete); + } + auto Linkage = CGM.getFunctionLinkage(AliasDecl); + (void)Linkage; + + assert(!UnimplementedFeature::globalIsDiscardableIfUnused() && "NYI"); + // // FIXME: Should we allow available_externally aliases? + assert(!UnimplementedFeature::globalIsValidLinkage() && "NYI"); + assert(!UnimplementedFeature::globalIsWeakForLinker() && "NYI"); + + return StructorCIRGen::Alias; } void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index 5e02a1275a2b..6fed5a8beca6 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -37,6 +37,9 @@ struct UnimplementedFeature { static bool globalLinkOnceAnyLinkage() { return false; } static bool globalAvailableExternallyLinkage() { return false; } static bool globalCommonLinkage() { return false; } + static bool globalIsDiscardableIfUnused() { return false; } + static bool globalIsValidLinkage() { return false; } + static bool globalIsWeakForLinker() { return false; } static bool setComdat() { return false; } static bool setDSOLocal() { return false; } From 3db930864150ba39fe40461f69639868a83f80fe Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 1 Jun 2022 14:01:53 -0400 Subject: [PATCH 0525/2301] [CIR] Find MLIR as part of the runtimes builds Clang now needs MLIR and thus a clang distribution also needs MLIR. So find MLIR first before clang for building the runtimes. As noted, we'll want the default to be for this to not happen and that only clangir builds of clang opt into this. e.g. via ``` cmake -DCLANG_BUILD_CLANGIR=ON ``` This would opt into clangir and also propagate it to the runtimes build. --- runtimes/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/runtimes/CMakeLists.txt b/runtimes/CMakeLists.txt index 4a6b317a03f6..cf5b4a2aad68 100644 --- a/runtimes/CMakeLists.txt +++ b/runtimes/CMakeLists.txt @@ -62,6 +62,8 @@ function(runtime_register_component name) endfunction() find_package(LLVM PATHS "${LLVM_BINARY_DIR}" NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) +# TODO(CIR): Once we guard CIR including clang builds guard this with the same flag +find_package(MLIR PATHS "${LLVM_BINARY_DIR}" NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) find_package(Clang PATHS "${LLVM_BINARY_DIR}" NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) set(LLVM_THIRD_PARTY_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../third-party") From 197964d89ab1325b095705c150d3514450155d9b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 2 Jun 2022 10:09:59 -0700 Subject: [PATCH 0526/2301] [CIR][cir-tool] Add all the used libraries as DEPENDS for cir-tool While building new we hit an issue where `Affine/Passes.h.inc` does not exist before we attempt to use it from cir-too.cpp. So just copy the structure used for mlir-opt for the tool definition and dependency. --- clang/tools/cir-tool/CMakeLists.txt | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/clang/tools/cir-tool/CMakeLists.txt b/clang/tools/cir-tool/CMakeLists.txt index 83f2075a5586..42a734b35e92 100644 --- a/clang/tools/cir-tool/CMakeLists.txt +++ b/clang/tools/cir-tool/CMakeLists.txt @@ -1,14 +1,12 @@ -add_clang_tool(cir-tool cir-tool.cpp) -llvm_update_compile_flags(cir-tool) get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) +get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS) -include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) -include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) +include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) +include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) -target_link_libraries(cir-tool PRIVATE +set(LIBS ${dialect_libs} ${conversion_libs} - clangCIR MLIRAnalysis MLIRCIR @@ -23,3 +21,13 @@ target_link_libraries(cir-tool PRIVATE MLIRTransforms MLIRTransformUtils ) + +add_clang_tool(cir-tool + cir-tool.cpp + + DEPENDS + ${LIBS} +) + +target_link_libraries(cir-tool PRIVATE ${LIBS}) +llvm_update_compile_flags(cir-tool) From ae30f7bf371aa10a2e8bba2c8d83750fe519683c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 13 Jun 2022 13:57:13 -0400 Subject: [PATCH 0527/2301] Add a snippet at the bottom of the README denoting the license --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index a9b29ecbc1a3..ec9670540d5d 100644 --- a/README.md +++ b/README.md @@ -42,3 +42,9 @@ chat](https://discord.gg/xS7Z362), The LLVM project has adopted a [code of conduct](https://llvm.org/docs/CodeOfConduct.html) for participants to all modes of communication within the project. + +### License + +ClangIR is based off https://github.com/llvm/llvm-project and uses the same +license. This ClangIR project is under the Apache License v2.0 with LLVM +Exceptions. Please see the `LICENSE.TXT` for the full details. From 8d86a7f273d889bea00a9f055aa873b18b96aa28 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 13 Jun 2022 16:31:26 -0400 Subject: [PATCH 0528/2301] Add snippets about our Contributing and CodeOfConudct at the bottom of the readme --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index ec9670540d5d..497d207a5e88 100644 --- a/README.md +++ b/README.md @@ -48,3 +48,14 @@ participants to all modes of communication within the project. ClangIR is based off https://github.com/llvm/llvm-project and uses the same license. This ClangIR project is under the Apache License v2.0 with LLVM Exceptions. Please see the `LICENSE.TXT` for the full details. + +## Contributing + +Check our [contributing guide](CONTRIBUTING.md) to learn about how to +contribute to the project. + +## Code Of Confuct + +Check our [Code Of Conduct](CODE_OF_CONDUCT.md) to learn more about our +contributor standards and expectations. + From 9e44066baecf3c39e4a3e69f61210b6da9cb0e7a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 2 Jun 2022 15:06:08 -0700 Subject: [PATCH 0529/2301] [CIR] Add mlir-translate as a dep for CLANG_TEST_DEPS We use this for a few tests and hus will need it in order to run tests at all. --- clang/test/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt index 7aa776f70ae8..f624bb4929e9 100644 --- a/clang/test/CMakeLists.txt +++ b/clang/test/CMakeLists.txt @@ -83,6 +83,7 @@ list(APPEND CLANG_TEST_DEPS clang-sycl-linker diagtool hmaptool + mlir-translate ) if(CLANG_ENABLE_STATIC_ANALYZER) From 9b09ce14b78ea4473d507ffbaa822de71208474b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 17 Jun 2022 14:26:06 -0700 Subject: [PATCH 0530/2301] [CIR][README] Update README.td for CIR disclaimer --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 497d207a5e88..a6de0311a4a0 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,10 @@ +# ClangIR (CIR) + +For more information see https://clangir.org. The rest of this document +fallbacks to llvm-project's default `README.td`. + +--- + # The LLVM Compiler Infrastructure [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/llvm/llvm-project/badge)](https://securityscorecards.dev/viewer/?uri=github.com/llvm/llvm-project) From be534a21afcd54b6ece8152a888e72cb8d409714 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 17 Jun 2022 21:19:18 -0700 Subject: [PATCH 0531/2301] [CIR] Improve AllocaOp and BinOp documentation --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 37 ++++++++++++++-------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index a99232109a62..9fdcb3d16617 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -157,13 +157,15 @@ def ConstantOp : CIR_Op<"cst", def InitStyle_None : I32EnumAttrCase<"uninitialized", 1>; def InitStyle_ParamInit : I32EnumAttrCase<"paraminit", 2>; + +// These are similar to Clang's VarDecl initialization style def InitStyle_CInit : I32EnumAttrCase<"cinit", 3>; def InitStyle_CallInit : I32EnumAttrCase<"callinit", 4>; def InitStyle_ListInit : I32EnumAttrCase<"listinit", 5>; def InitStyle : I32EnumAttr< "InitStyle", - "variable initialization style", + "initialization style", [InitStyle_None, InitStyle_ParamInit, InitStyle_CInit, InitStyle_CallInit, InitStyle_ListInit]> { @@ -185,21 +187,28 @@ def AllocaOp : CIR_Op<"alloca", [ AllocaTypesMatchWith<"'allocaType' matches pointee type of 'addr'", "addr", "allocaType", "$_self.cast().getPointee()">]> { - let summary = "local variable"; + let summary = "Defines a scope-local variable"; let description = [{ - The `cir.alloca` operation defines a local variable. + The `cir.alloca` operation defines a scope-local variable. - Possible initialization styles are: uninitialized, paraminit, - callinit, cinit and listinit. + Initialization style must be one of: + - uninitialized + - paraminit: alloca to hold a function argument + - callinit: Call-style initialization (C++98) + - cinit: C-style initialization with assignment + - listinit: Direct list-initialization (C++11) - The result is a pointer type for the original input type. + The result type is a pointer to the input's type. Example: ```mlir - // Local variable with uninitialized value. - // int count = ... - %0 = cir.alloca i32, !cir.ptr, ["count", cinit] + // int count = 3; + %0 = cir.alloca i32, !cir.ptr, ["count", cinit] {alignment = 4 : i64} + + // int *ptr; + %1 = cir.alloca !cir.ptr, cir.ptr >, ["ptr", uninitialized] {alignment = 8 : i64} + ... ``` }]; @@ -551,12 +560,14 @@ def BinOpKind : I32EnumAttr< def BinOp : CIR_Op<"binop", [Pure, SameTypeOperands, SameOperandsAndResultType]> { - let summary = "binary operations (arith and logic)"; + let summary = "Binary operations (arith and logic)"; let description = [{ "cir.binop performs the binary operation according to - the specified kind/opcode: [mul, div, rem, add, sub, shl, - shr, and, xor, or]. It accepts to input operands and the - result type must match both types. + the specified opcode kind: [mul, div, rem, add, sub, shl, + shr, and, xor, or]. + + It requires two input operands and has one result, all types + should be the same. Example ``` From ac0683715e2ef374eee36f49bacf5112b2f5ee5e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 20 Jun 2022 13:57:36 -0700 Subject: [PATCH 0532/2301] [CIR] Update overall CIROps.td documentation --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 239 ++++++++++++--------- 1 file changed, 143 insertions(+), 96 deletions(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 9fdcb3d16617..7a1beb6a0576 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -51,12 +51,18 @@ def CastKind : I32EnumAttr< def CastOp : CIR_Op<"cast", [Pure]> { // FIXME: not all conversions are free of side effects. - let summary = "cast"; + let summary = "Conversion between values of different types"; let description = [{ - Apply C/C++ usual conversions rules between types. The full list of those - can be seen in clang/include/clang/AST/OperationKinds.def, but note that some - of the conversions aren't implemented in terms of cir.cast, lvalue-to-rvalue - for instance is modeled as a load. + Apply C/C++ usual conversions rules between values. Currently supported kinds: + + - `int_to_bool` + - `array_to_ptrdecay` + - `integral` + + This is effectively a subset of the rules from + `llvm-project/clang/include/clang/AST/OperationKinds.def`; but note that some + of the conversions aren't implemented in terms of `cir.cast`, `lvalue-to-rvalue` + for instance is modeled as a regular `cir.load`. ```mlir %4 = cir.cast (int_to_bool, %3 : i32), !cir.bool @@ -86,10 +92,10 @@ def SameFirstOperandAndResultType : def PtrStrideOp : CIR_Op<"ptr_stride", [Pure, SameFirstOperandAndResultType]> { - let summary = "pointer access with stride"; + let summary = "Pointer access with stride"; let description = [{ Given a base pointer as operand, provides a new pointer after applying - a stride. Used for array subscripts, vectors, etc. + a stride. Currently only used for array subscripts. ```mlir %3 = cir.cst(0 : i32) : i32 @@ -118,10 +124,10 @@ def ConstantOp : CIR_Op<"cst", // FIXME: Use SameOperandsAndResultType or similar and prevent eye bleeding // type repetition in the assembly form. - let summary = "constant operation"; + let summary = "Defines a CIR constant"; let description = [{ - Constant operation turns a literal into an SSA value. The data is attached - to the operation as an attribute. For example: + The `cir.cst` operation turns a literal into an SSA value. The data is + attached to the operation as an attribute. ```mlir %0 = cir.cst(42 : i32) : i32 @@ -246,10 +252,12 @@ def LoadOp : CIR_Op<"load", [ "addr", "result", "$_self.cast().getPointee()">]> { - let summary = "load operation"; + let summary = "Load value from memory adddress"; let description = [{ - `cir.load` reads a variable (lvalue to rvalue conversion) given an address - backed up by a `cir.ptr` type. + `cir.load` reads a value (lvalue to rvalue conversion) given an address + backed up by a `cir.ptr` type. A unit attribute `deref` can be used to + mark the resulting value as used by another operation to dereference + a pointer. Example: @@ -258,8 +266,8 @@ def LoadOp : CIR_Op<"load", [ // Read from local variable, address in %0. %1 = cir.load %0 : !cir.ptr, i32 - // Load address from memory at address %0. %3 provides - // the address used while dereferecing a pointer. + // Load address from memory at address %0. %3 is used by at least one + // operation that dereferences a pointer. %3 = cir.load deref %0 : cir.ptr > ``` }]; @@ -274,6 +282,8 @@ def LoadOp : CIR_Op<"load", [ (`deref` $isDeref^)? $addr `:` `cir.ptr` type($addr) `,` type($result) attr-dict }]; + + // FIXME: add verifier. } //===----------------------------------------------------------------------===// @@ -285,16 +295,17 @@ def StoreOp : CIR_Op<"store", [ "addr", "value", "$_self.cast().getPointee()">]> { - let summary = "store operation"; + let summary = "Store value to memory address"; let description = [{ - `cir.load` reads a variable using a pointer type. + `cir.store` stores a value (first operand) to the memory address specified + in the second operand. Example: ```mlir - - // Store to local variable, address in %0. + // Store a function argument to local storage, address in %0. cir.store %arg0, %0 : i32, !cir.ptr + ``` }]; let arguments = (ins AnyType:$value, @@ -305,6 +316,8 @@ def StoreOp : CIR_Op<"store", [ // from the pointer type directly. let assemblyFormat = "$value `,` $addr attr-dict `:` type($value) `,` `cir.ptr` type($addr)"; + + // FIXME: add verifier. } //===----------------------------------------------------------------------===// @@ -313,17 +326,17 @@ def StoreOp : CIR_Op<"store", [ def ReturnOp : CIR_Op<"return", [HasParent<"FuncOp, ScopeOp, IfOp, SwitchOp, LoopOp">, Terminator]> { - let summary = "return operation"; + let summary = "Return from function"; let description = [{ The "return" operation represents a return operation within a function. The operation takes an optional operand and produces no results. The operand type must match the signature of the function that contains - the operation. For example: + the operation. ```mlir - func @foo() -> AnyType { + func @foo() -> i32 { ... - cir.return %0 : AnyType + cir.return %0 : i32 } ``` }]; @@ -355,14 +368,11 @@ def ReturnOp : CIR_Op<"return", [HasParent<"FuncOp, ScopeOp, IfOp, SwitchOp, Loo def IfOp : CIR_Op<"if", [DeclareOpInterfaceMethods, RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { - let summary = "if-then-else operation"; + let summary = "The if-then-else operation"; let description = [{ The `cir.if` operation represents an if-then-else construct for - conditionally executing two regions of code. The operand to an if operation - is a boolean value. - - Each region can contain an arbitrary number of blocks but there usually be - only one block in each region unless the presence of return and goto. + conditionally executing two regions of code. The operand is a `cir.bool` + type. Examples: @@ -379,7 +389,7 @@ def IfOp : CIR_Op<"if", cir.if %c { ... - br ^a + cir.br ^a ^a: cir.yield } @@ -423,28 +433,30 @@ def YieldOpKind : I32EnumAttr< def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "LoopOp"]>]> { - let summary = "termination operation for regions inside if, for, scope, etc"; + let summary = "Terminate CIR regions"; let description = [{ - "cir.yield" yields an SSA value from a CIR dialect op region and - terminates the regions. The semantics of how the values are yielded is - defined by the parent operation. - - Currently, there are not parents where `cir.yield` has any operands, - but it will be useful to represent lifetime extension in the future. - - When used to leave `cir.switch` regions there are two possible meanings: - 1. `cir.yield break` has "breaking out of the outermost" switch semantics. - 2. `cir.yield fallthrough` means the next region in the case list should - be executed. - - `cir.yield loopcondition %val` is another form that must terminate cond - regions within `cir.loop`s. - - The `cir.yield` must be explicitly used whenever a region has more than - one block, or within `cir.switch` regions not `cir.return` terminated. + The `cir.yield` operation terminates regions on different CIR operations: + `cir.if`, `cir.scope`, `cir.switch` and `cir.loop`. + + Might yield an SSA value and the semantics of how the values are yielded is + defined by the parent operation. Note: there are currently no uses of + `cir.yield` with operands - should be helpful to represent lifetime + extension out of short lived scopes in the future. + + Optionally, `cir.yield` can be annotated with extra kind specifiers: + - `break`: breaking out of the innermost `cir.switch` / `cir.loop` semantics, + cannot be used if not dominated by these parent operations. + - `fallthrough`: execution falls to the next region in `cir.switch` case list. + Only available inside `cir.switch` regions. + - `continue`: only allowed under `cir.loop`, continue execution to the next + loop step. + + As a general rule, `cir.yield` must be explicitly used whenever a region has + more than one block and no terminator, or within `cir.switch` regions not + `cir.return` terminated. Example: - ``` + ```mlir cir.if %4 { ... cir.yield @@ -504,17 +516,17 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods, RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { - let summary = ""; + let summary = "Represents a C/C++ scope"; let description = [{ - "cir.scope" contains one region and defines a strict "scope" for all new + `cir.scope` contains one region and defines a strict "scope" for all new values produced within its blocks. - The region can contain an arbitrary number of blocks but should usually be - only one block if return and goto are not present. + Its region can contain an arbitrary number of blocks but usually defaults + to one. The `cir.yield` is a required terminator and can be optionally omitted. - "cir.yield" is required as a terminator and can have results, in which case - it can be omitted. Not used anywhere just yet but might be used to explicitly - model lifetime extension. + A resulting value can also be specificed, though not currently used - together + with `cir.yield` should be helpful to represent lifetime extension out of short + lived scopes in the future. }]; let results = (outs Variadic:$results); @@ -562,18 +574,17 @@ def BinOp : CIR_Op<"binop", [Pure, let summary = "Binary operations (arith and logic)"; let description = [{ - "cir.binop performs the binary operation according to - the specified opcode kind: [mul, div, rem, add, sub, shl, - shr, and, xor, or]. + cir.binop performs the binary operation according to + the specified opcode kind: [mul, div, rem, add, sub, shl, + shr, and, xor, or]. - It requires two input operands and has one result, all types - should be the same. + It requires two input operands and has one result, all types + should be the same. - Example - ``` - %7 = binop(add, %1, %2) : i32 - %7 = binop(mul, %1, %2) : i8 - ``` + ```mlir + %7 = binop(add, %1, %2) : i32 + %7 = binop(mul, %1, %2) : i8 + ``` }]; // TODO: get more accurate than AnyType @@ -611,16 +622,15 @@ def CmpOpKind : I32EnumAttr< // FIXME: Pure might not work when we add overloading. def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { - let summary = "compare operation"; + let summary = "Compare values two values and produce a boolean result"; let description = [{ - "cir.cmp compares two input operands and produces a bool result. The input - operands must have the same type. The kinds of comparison available are: - [lt,gt,ge,eq,ne] + `cir.cmp` compares two input operands of the same type and produces a + `cir.bool` result. The kinds of comparison available are: + [lt,gt,ge,eq,ne] - Example - ``` - %7 = cir.cmp(gt, %1, %2) : i32, !cir.bool - ``` + ```mlir + %7 = cir.cmp(gt, %1, %2) : i32, !cir.bool + ``` }]; // TODO: get more accurate than AnyType @@ -673,23 +683,21 @@ def SwitchOp : CIR_Op<"switch", [SameVariadicOperandSize, DeclareOpInterfaceMethods, RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { - let summary = "a switch operation"; + let summary = "Switch operation"; let description = [{ The `cir.switch` operation represents C/C++ switch functionality for conditionally executing multiple regions of code. The operand to an switch is an integral condition value. A variadic list of "case" attribute operands and regions track the possible - control flow within `cir.switch`. Each "case" first operand could be: - - "equal": equality check against the condition. - - "anyof": equals to any of the values in a following list. - - "default": any other value. + control flow within `cir.switch`. A `case` must be in one of the following forms: + - `equal, `: equality of the second case operand against the + condition. + - `anyof, [constant-list]`: equals to any of the values in a subsequent + following list. + - `default`: any other value. - An optional second operand denotes the actual value (or list of). - Types value(s) should match the condition and among themselves (in the list - case). - - Each case region must be explicitly terminated with a cir.yield operation. + Each case region must be explicitly terminated. Examples: @@ -739,16 +747,17 @@ def SwitchOp : CIR_Op<"switch", def BrOp : CIR_Op<"br", [DeclareOpInterfaceMethods, Pure, Terminator]> { - let summary = "branch operation"; + let summary = "Unconditional branch"; let description = [{ - The `cir.br` branches unconditionally to a block. + The `cir.br` branches unconditionally to a block. Used to represent C/C++ + goto's and general block branching. Example: ```mlir ... cir.br ^bb3 - ^bb3: // pred: ^bb2 + ^bb3: cir.return ``` }]; @@ -775,7 +784,7 @@ def BrOp : CIR_Op<"br", def BrCondOp : CIR_Op<"brcond", [DeclareOpInterfaceMethods, Pure, Terminator, SameVariadicOperandSize]> { - let summary = "conditional branch operation"; + let summary = "Conditional branch"; let description = [{ The `cir.brcond %cond, ^bb0, ^bb1` branches to 'bb0' block in case %cond (which must be a !cir.bool type) evaluates to true, otherwise @@ -837,8 +846,37 @@ def LoopOp : CIR_Op<"loop", [DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, RecursivelySpeculatable, NoRegionArguments]> { - let summary = "loop operation"; + let summary = "Loop"; let description = [{ + `cir.loop` represents C/C++ loop forms. It defines 3 blocks: + - `cond`: region can contain multiple blocks, terminated by regular + `cir.yield` when control should yield back to the parent, and + `cir.yield continue` when execution continues to another region. + The region destination depends on the loop form specified. + - `step`: region with one block, containing code to compute the + loop step, must be terminated with `cir.yield`. + - `body`: region for the loop's body, can contain an arbitrary + number of blocks. + + The loop form: `for`, `while` and `dowhile` must also be specified and + each implies the loop regions execution order. + + ```mlir + // while (true) { + // i = i + 1; + // } + cir.loop while(cond : { + cir.yield continue + }, step : { + cir.yield + }) { + %3 = cir.load %1 : cir.ptr , i32 + %4 = cir.cst(1 : i32) : i32 + %5 = cir.binop(add, %3, %4) : i32 + cir.store %5, %1 : i32, cir.ptr + cir.yield + } + ``` }]; let arguments = (ins Arg:$kind); @@ -876,7 +914,7 @@ def LoopOp : CIR_Op<"loop", //===----------------------------------------------------------------------===// def GlobalOp : CIR_Op<"global", [Symbol]> { - let summary = "declare or define a global variable"; + let summary = "Declares or defines a global variable"; let description = [{ The `cir.global` operation declares or defines a named global variable. @@ -890,10 +928,13 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { `constant` unit attribute. Writing to such constant global variables is undefined. + Symbol visibility is defined in terms of MLIR's visibility, and C/C++ + linkage types are still TBD. + Example: ```mlir - // Externally available and constant variable with initial value. + // Public and constant variable with initial value. cir.global public constant @c : i32 = 4; ``` }]; @@ -939,12 +980,12 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { def GetGlobalOp : CIR_Op<"get_global", [Pure, DeclareOpInterfaceMethods]> { - let summary = "get the memref pointing to a global variable"; + let summary = "Get the address of a global variable"; let description = [{ The `cir.get_global` operation retrieves the address pointing to a named global variable. If the global variable is marked constant, writing to the resulting address (such as through a `cir.store` operation) is - undefined. Resulting type must always be a !cir.ptr<...> type. + undefined. Resulting type must always be a `!cir.ptr<...>` type. Example: @@ -964,8 +1005,13 @@ def GetGlobalOp : CIR_Op<"get_global", let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// StructElementAddr +//===----------------------------------------------------------------------===// + +// FIXME: rename this among the lines of GetGlobalOp. def StructElementAddr : CIR_Op<"struct_element_addr"> { - let summary = "get the address of a member of a struct"; + let summary = "Get the address of a member of a struct"; let description = [{ The `cir.struct_element_addr` operaration gets the address of a particular named member from the input struct. @@ -978,7 +1024,7 @@ def StructElementAddr : CIR_Op<"struct_element_addr"> { ... %1 = cir.struct_element_addr %0, "Bar.a" %2 = cir.load %1 : cir.ptr , int - WIP + ... ``` }]; @@ -987,7 +1033,8 @@ def StructElementAddr : CIR_Op<"struct_element_addr"> { StrAttr:$member_name); let results = (outs Res:$result); + + // FIXME: add verifier. } #endif // MLIR_CIR_DIALECT_CIR_OPS - From 244b72446ffe841c2974a4a6eb1ef05dc60c48d7 Mon Sep 17 00:00:00 2001 From: Jingsong Shang Date: Thu, 16 Jun 2022 13:33:07 -0700 Subject: [PATCH 0533/2301] [CIR]Renames the directory clang/test/CIR/IRGen to clang/test/CIR/CIRToLLVM --- clang/test/CIR/{IRGen => CIRToLLVM}/memref.cir | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename clang/test/CIR/{IRGen => CIRToLLVM}/memref.cir (100%) diff --git a/clang/test/CIR/IRGen/memref.cir b/clang/test/CIR/CIRToLLVM/memref.cir similarity index 100% rename from clang/test/CIR/IRGen/memref.cir rename to clang/test/CIR/CIRToLLVM/memref.cir From 665a6fb7efe3d5c33e7754ce8ace5eec853a29d1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 18 Jul 2022 14:40:32 -0700 Subject: [PATCH 0534/2301] [CIR][NFC] Silence warnings on some not yet used boilerplate --- clang/lib/CIR/CIRGenExprAgg.cpp | 6 +++++- clang/lib/CIR/CIRGenModule.cpp | 2 +- clang/lib/CIR/TargetInfo.cpp | 11 +++++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index 4a2e698400ee..0fa2c517c63e 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -170,6 +170,10 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { } void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { + AggValueSlot Slot = EnsureSlot(E->getType()); + LLVM_ATTRIBUTE_UNUSED LValue SlotLV = + CGF.makeAddrLValue(Slot.getAddress(), E->getType()); + // We'll need to enter cleanup scopes in case any of the element initializers // throws an exception. if (UnimplementedFeature::cleanups()) @@ -308,4 +312,4 @@ void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { CheckAggExprForMemSetUse(Slot, E, *this); AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast(E)); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 7be7cc2a34fb..49215d4b0a05 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -623,7 +623,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // TODO(cir): LLVM's codegen uses a llvm::TrackingVH here. Is that // necessary here for CIR gen? mlir::Attribute Init; - [[maybe_unused]] bool NeedsGlobalCtor = false; + // TODO(cir): bool NeedsGlobalCtor = false; bool NeedsGlobalDtor = D->needsDestruction(astCtx) == QualType::DK_cxx_destructor; diff --git a/clang/lib/CIR/TargetInfo.cpp b/clang/lib/CIR/TargetInfo.cpp index 4d48b8fda95b..ec73873437e7 100644 --- a/clang/lib/CIR/TargetInfo.cpp +++ b/clang/lib/CIR/TargetInfo.cpp @@ -159,6 +159,17 @@ class X86_64TargetCIRGenInfo : public TargetCIRGenInfo { }; } // namespace +// TODO(cir): remove the attribute once this gets used. +LLVM_ATTRIBUTE_UNUSED +static bool classifyReturnType(const CIRGenCXXABI &CXXABI, + CIRGenFunctionInfo &FI, const ABIInfo &Info) { + QualType Ty = FI.getReturnType(); + + assert(!Ty->getAs() && "RecordType returns NYI"); + + return CXXABI.classifyReturnType(FI); +} + CIRGenCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); } clang::ASTContext &ABIInfo::getContext() const { return CGT.getContext(); } From ba914f003af5aac21e8dcc87cb1bb9fa976a76b4 Mon Sep 17 00:00:00 2001 From: Jingsong Shang Date: Fri, 24 Jun 2022 22:11:26 -0700 Subject: [PATCH 0535/2301] [CIR][Clang] Support Compound Assignment Summary: Implemented compound assignment for AddAssign, SubAssign, MulAssign, DivAssign, RemAssign, ShlAssign, ShrAssign, AndAssign, OrAssign and XorAssign in terms of existing cir.load, cir.binop and cir.store and added binassign_cpp for test purpose. Test Plan: Test for support for compound assignment can be accomplished with those added test file: binassign.cpp using command ``` ninja check-clang-cir-codegen ``` in the build folder. Reviewers: brunolopes, #clangir Reviewed By: brunolopes Subscribers: lanza, ivanmurashko Differential Revision: https://phabricator.intern.facebook.com/D37305739 Tasks: T108616625 --- clang/lib/CIR/CIRGenExpr.cpp | 19 ++++ clang/lib/CIR/CIRGenExprScalar.cpp | 144 +++++++++++++++++++++++++++ clang/lib/CIR/CIRGenFunction.h | 11 ++ clang/test/CIR/CodeGen/binassign.cpp | 53 ++++++++++ 4 files changed, 227 insertions(+) create mode 100644 clang/test/CIR/CodeGen/binassign.cpp diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 05eb7c4a3321..0470414b5495 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -260,6 +260,18 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, llvm_unreachable("NYI"); } +/// Given an expression that represents a value lvalue, this +/// method emits the address of the lvalue, then loads the result as an rvalue, +/// returning the rvalue. +RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { + assert(LV.isSimple() && "not implemented"); + assert(!LV.getType()->isFunctionType()); + assert(!(LV.getType()->isConstantMatrixType()) && "not implemented"); + + // Everything needs a load. + return RValue::get(buildLoadOfScalar(LV, Loc)); +} + void CIRGenFunction::buldStoreThroughLValue(RValue Src, LValue Dst, const Decl *InitDecl) { assert(Dst.isSimple() && "only implemented simple"); @@ -988,6 +1000,13 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return buildArraySubscriptExpr(cast(E)); case Expr::BinaryOperatorClass: return buildBinaryOperatorLValue(cast(E)); + case Expr::CompoundAssignOperatorClass: { + QualType Ty = E->getType(); + if (const AtomicType *AT = Ty->getAs()) + assert(0 && "not yet implemented"); + assert(!Ty->isAnyComplexType() && "complex types not implemented"); + return buildCompoundAssignmentLValue(cast(E)); + } case Expr::DeclRefExprClass: return buildDeclRefLValue(cast(E)); case Expr::UnaryOperatorClass: diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 97c7aaecdb0e..59b852be0d7a 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -144,6 +144,10 @@ class ScalarExprEmitter : public StmtVisitor { return load; } + mlir::Value buildLoadOfLValue(LValue LV, SourceLocation Loc) { + return CGF.buildLoadOfLValue(LV, Loc).getScalarVal(); + } + // l-values mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { // FIXME: we could try to emit this as constant first, see @@ -465,11 +469,23 @@ class ScalarExprEmitter : public StmtVisitor { Ops.LHS, Ops.RHS); } + LValue buildCompoundAssignLValue( + const CompoundAssignOperator *E, + mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &), + mlir::Value &Result); + mlir::Value + buildCompoundAssign(const CompoundAssignOperator *E, + mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &)); + // Binary operators and binary compound assignment operators. #define HANDLEBINOP(OP) \ mlir::Value VisitBin##OP(const BinaryOperator *E) { \ return build##OP(buildBinOps(E)); \ + } \ + mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *E) { \ + return buildCompoundAssign(E, &ScalarExprEmitter::build##OP); \ } + HANDLEBINOP(Mul) HANDLEBINOP(Div) HANDLEBINOP(Rem) @@ -983,3 +999,131 @@ mlir::Value ScalarExprEmitter::buildScalarCast( llvm_unreachable("NYI"); } + +LValue +CIRGenFunction::buildCompoundAssignmentLValue(const CompoundAssignOperator *E) { + ScalarExprEmitter Scalar(*this, builder); + mlir::Value Result; + switch (E->getOpcode()) { +#define COMPOUND_OP(Op) \ + case BO_##Op##Assign: \ + return Scalar.buildCompoundAssignLValue(E, &ScalarExprEmitter::build##Op, \ + Result) + COMPOUND_OP(Mul); + COMPOUND_OP(Div); + COMPOUND_OP(Rem); + COMPOUND_OP(Add); + COMPOUND_OP(Sub); + COMPOUND_OP(Shl); + COMPOUND_OP(Shr); + COMPOUND_OP(And); + COMPOUND_OP(Xor); + COMPOUND_OP(Or); +#undef COMPOUND_OP + + case BO_PtrMemD: + case BO_PtrMemI: + case BO_Mul: + case BO_Div: + case BO_Rem: + case BO_Add: + case BO_Sub: + case BO_Shl: + case BO_Shr: + case BO_LT: + case BO_GT: + case BO_LE: + case BO_GE: + case BO_EQ: + case BO_NE: + case BO_Cmp: + case BO_And: + case BO_Xor: + case BO_Or: + case BO_LAnd: + case BO_LOr: + case BO_Assign: + case BO_Comma: + llvm_unreachable("Not valid compound assignment operators"); + } + llvm_unreachable("Unhandled compound assignment operator"); +} + +LValue ScalarExprEmitter::buildCompoundAssignLValue( + const CompoundAssignOperator *E, + mlir::Value (ScalarExprEmitter::*Func)(const BinOpInfo &), + mlir::Value &Result) { + QualType LHSTy = E->getLHS()->getType(); + BinOpInfo OpInfo; + + if (E->getComputationResultType()->isAnyComplexType()) + assert(0 && "not implemented"); + + // Emit the RHS first. __block variables need to have the rhs evaluated + // first, plus this should improve codegen a little. + OpInfo.RHS = Visit(E->getRHS()); + OpInfo.Ty = E->getComputationResultType(); + OpInfo.Opcode = E->getOpcode(); + OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); + OpInfo.E = E; + OpInfo.Loc = E->getSourceRange(); + + // Load/convert the LHS + LValue LHSLV = CGF.buildLValue(E->getLHS()); + + if (const AtomicType *atomicTy = LHSTy->getAs()) { + assert(0 && "not implemented"); + } + + OpInfo.LHS = buildLoadOfLValue(LHSLV, E->getExprLoc()); + + CIRGenFunction::SourceLocRAIIObject sourceloc{ + CGF, CGF.getLoc(E->getSourceRange())}; + SourceLocation Loc = E->getExprLoc(); + OpInfo.LHS = + buildScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc); + + // Expand the binary operator. + Result = (this->*Func)(OpInfo); + + // Convert the result back to the LHS type, + // potentially with Implicit Conversion sanitizer check. + Result = buildScalarConversion(Result, E->getComputationResultType(), LHSTy, + Loc, ScalarConversionOpts(CGF.SanOpts)); + + // Store the result value into the LHS lvalue. Bit-fields are handled + // specially because the result is altered by the store, i.e., [C99 6.5.16p1] + // 'An assignment expression has the value of the left operand after the + // assignment...'. + if (LHSLV.isBitField()) + assert(0 && "not yet implemented"); + else + CGF.buldStoreThroughLValue(RValue::get(Result), LHSLV, nullptr); + + assert(!CGF.getLangOpts().OpenMP && "Not implemented"); + return LHSLV; +} + +mlir::Value ScalarExprEmitter::buildCompoundAssign( + const CompoundAssignOperator *E, + mlir::Value (ScalarExprEmitter::*Func)(const BinOpInfo &)) { + + bool Ignore = TestAndClearIgnoreResultAssign(); + mlir::Value RHS; + LValue LHS = buildCompoundAssignLValue(E, Func, RHS); + + // If the result is clearly ignored, return now. + if (Ignore) + return {}; + + // The result of an assignment in C is the assigned r-value. + if (!CGF.getLangOpts().CPlusPlus) + return RHS; + + // If the lvalue is non-volatile, return the computed value of the assignment. + if (!LHS.isVolatile()) + return RHS; + + // Otherwise, reload the value. + return buildLoadOfLValue(LHS, E->getExprLoc()); +} diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index b1088bbccebc..dd242899cc5a 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -38,6 +38,10 @@ class CallOp; } } // namespace mlir +namespace { +class ScalarExprEmitter; +} + namespace cir { // FIXME: for now we are reusing this from lib/Clang/CodeGenFunction.h, which @@ -49,6 +53,8 @@ class CIRGenFunction { CIRGenModule &CGM; private: + friend class ::ScalarExprEmitter; + /// The builder is a helper class to create IR inside a function. The /// builder is stateful, in particular it keeps an "insertion point": this /// is where the next operations will be introduced. @@ -493,6 +499,10 @@ class CIRGenFunction { RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation Loc); + /// buildLoadOfLValue - Given an expression that represents a value lvalue, + /// this method emits the address of the lvalue, then loads the result as an + /// rvalue, returning the rvalue. + RValue buildLoadOfLValue(LValue LV, SourceLocation Loc); mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, clang::SourceLocation Loc, LValueBaseInfo BaseInfo, @@ -703,6 +713,7 @@ class CIRGenFunction { LValue buildDeclRefLValue(const clang::DeclRefExpr *E); LValue buildBinaryOperatorLValue(const clang::BinaryOperator *E); + LValue buildCompoundAssignmentLValue(const clang::CompoundAssignOperator *E); LValue buildUnaryOpLValue(const clang::UnaryOperator *E); LValue buildStringLiteralLValue(const StringLiteral *E); diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp new file mode 100644 index 000000000000..cf32e5c5d44b --- /dev/null +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -0,0 +1,53 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +int foo(int a, int b) { + int x = a * b; + x *= b; + x /= b; + x %= b; + x += b; + x -= b; + x >>= b; + x <<= b; + x &= b; + x ^= b; + x |= b; + return x; +} + +// CHECK: [[Value:%[0-9]+]] = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} +// CHECK: = cir.binop(mul, +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(mul, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: cir.binop(div, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(rem, {{.*}} loc([[SourceLocation:#loc[0-9]+]]) +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(add, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(sub, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(shr, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(shl, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(and, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(xor, +// CHECK: cir.store {{.*}}[[Value]] +// CHECK: = cir.load {{.*}}[[Value]] +// CHECK: = cir.binop(or, +// CHECK: cir.store {{.*}}[[Value]] + +// CHECK: [[SourceLocation]] = loc(fused["{{.*}}binassign.cpp":8:3, "{{.*}}binassign.cpp":8:8]) From 976c36e155dc7bfb49ffcddb7259c8cf21d2c973 Mon Sep 17 00:00:00 2001 From: Jingsong Shang Date: Fri, 24 Jun 2022 22:22:22 -0700 Subject: [PATCH 0536/2301] [CIR][NFC] Fixed several typos in function names and comments. Summary: Fixed several typos in the comments and names of functions. It has no functional change. Test Plan: Test with command ninja check-clang-cir-codegen and should return no error. Reviewers: lanza, ivanmurashko, brunolopes Reviewed By: brunolopes Subscribers: lanza, ivanmurashko, brunolopes Differential Revision: https://phabricator.intern.facebook.com/D37436511 --- clang/lib/CIR/CIRGenDecl.cpp | 2 +- clang/lib/CIR/CIRGenExpr.cpp | 6 +++--- clang/lib/CIR/CIRGenExprScalar.cpp | 2 +- clang/lib/CIR/CIRGenFunction.h | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CIRGenDecl.cpp b/clang/lib/CIR/CIRGenDecl.cpp index c704ef976c6a..1daf0a8badbe 100644 --- a/clang/lib/CIR/CIRGenDecl.cpp +++ b/clang/lib/CIR/CIRGenDecl.cpp @@ -213,7 +213,7 @@ void CIRGenFunction::buildScalarInit(const Expr *init, const ValueDecl *D, // TODO: this is where a lot of ObjC lifetime stuff would be done. mlir::Value value = buildScalarExpr(init); SourceLocRAIIObject Loc{*this, getLoc(D->getSourceRange())}; - buldStoreThroughLValue(RValue::get(value), lvalue, D); + buildStoreThroughLValue(RValue::get(value), lvalue, D); return; } diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 0470414b5495..76f400fd12e8 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -272,8 +272,8 @@ RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { return RValue::get(buildLoadOfScalar(LV, Loc)); } -void CIRGenFunction::buldStoreThroughLValue(RValue Src, LValue Dst, - const Decl *InitDecl) { +void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, + const Decl *InitDecl) { assert(Dst.isSimple() && "only implemented simple"); // TODO: ObjC lifetime. assert(Src.isScalar() && "Can't emit an agg store with this method"); @@ -427,7 +427,7 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { LValue LV = buildLValue(E->getLHS()); SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; - buldStoreThroughLValue(RV, LV, nullptr /*InitDecl*/); + buildStoreThroughLValue(RV, LV, nullptr /*InitDecl*/); assert(!getContext().getLangOpts().OpenMP && "last priv cond not implemented"); return LV; diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 59b852be0d7a..7582bd6b70f6 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -1098,7 +1098,7 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( if (LHSLV.isBitField()) assert(0 && "not yet implemented"); else - CGF.buldStoreThroughLValue(RValue::get(Result), LHSLV, nullptr); + CGF.buildStoreThroughLValue(RValue::get(Result), LHSLV, nullptr); assert(!CGF.getLangOpts().OpenMP && "Not implemented"); return LHSLV; diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index dd242899cc5a..755d1d1c4a83 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -277,7 +277,7 @@ class CIRGenFunction { /// or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in /// compiler-rt. enum TypeCheckKind { - /// Checking hte operand of a load. Must be suitably sized and aligned. + /// Checking the operand of a load. Must be suitably sized and aligned. TCK_Load, /// Checking the destination of a store. Must be suitably sized and aligned. TCK_Store, @@ -701,8 +701,8 @@ class CIRGenFunction { /// Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. - void buldStoreThroughLValue(RValue Src, LValue Dst, - const clang::Decl *InitDecl); + void buildStoreThroughLValue(RValue Src, LValue Dst, + const clang::Decl *InitDecl); mlir::LogicalResult buildBranchThroughCleanup(JumpDest &Dest, clang::LabelDecl *L, From d54c50180f8571c7fb72cacf3fe725c3cad5f20d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 18 Jul 2022 18:44:47 -0700 Subject: [PATCH 0537/2301] [CIR] Add global linkage types to CIR and update clang codegen Similar to LLVM for now, should make it easier to forward linkage type into LLVM codegen (when work into that direction happens). As implementing more struct stuff, we have to teach about linkage to do more accurate CIR codegen. This change makes clang to generate linkage information and MLIR's visibility is now computed based on it. Update tests and verify that both linkage and visibility are compatible. --- clang/lib/CIR/CIRGenModule.cpp | 45 +++++++++++---- clang/lib/CIR/CIRGenModule.h | 8 ++- clang/test/CIR/CodeGen/array.cpp | 2 +- clang/test/CIR/CodeGen/globals.cpp | 24 ++++---- clang/test/CIR/IR/global.cir | 28 ++++----- clang/test/CIR/IR/invalid.cir | 26 +++++++-- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 67 +++++++++++++++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 38 ++++++++++-- 8 files changed, 184 insertions(+), 54 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 49215d4b0a05..e95772f48447 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -410,9 +410,13 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // unsigned TargetAS = astCtx.getTargetAddressSpace(AddrSpace); if (Entry) { if (WeakRefReferences.erase(Entry)) { - if (D && !D->hasAttr()) + if (D && !D->hasAttr()) { + auto LT = mlir::cir::GlobalLinkageKind::ExternalLinkage; + Entry.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), LT)); mlir::SymbolTable::setSymbolVisibility( - Entry, mlir::SymbolTable::Visibility::Public); + Entry, getMLIRVisibilityFromCIRLinkage(LT)); + } } // Handle dropped DLL attributes. @@ -901,7 +905,7 @@ LangAS CIRGenModule::getGlobalConstantAddressSpace() const { static mlir::cir::GlobalOp generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, - mlir::SymbolTable::Visibility LT, CIRGenModule &CGM, + mlir::cir::GlobalLinkageKind LT, CIRGenModule &CGM, StringRef GlobalName, CharUnits Alignment) { unsigned AddrSpace = CGM.getASTContext().getTargetAddressSpace( CGM.getGlobalConstantAddressSpace()); @@ -916,7 +920,10 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, // Set up extra information and add to the module GV.setAlignmentAttr(CGM.getSize(Alignment)); - mlir::SymbolTable::setSymbolVisibility(GV, LT); + GV.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(CGM.getBuilder().getContext(), LT)); + mlir::SymbolTable::setSymbolVisibility( + GV, CIRGenModule::getMLIRVisibilityFromCIRLinkage(LT)); GV.setInitialValueAttr(C); // TODO(cir) @@ -975,7 +982,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, SmallString<256> MangledNameBuffer; StringRef GlobalVariableName; - auto LT = mlir::SymbolTable::Visibility::Public; + auto LT = mlir::cir::GlobalLinkageKind::ExternalLinkage; // Mangle the string literal if that's how the ABI merges duplicate strings. // Don't do it if they are writable, since we don't want writes in one TU to @@ -984,7 +991,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, !getLangOpts().WritableStrings) { assert(0 && "not implemented"); } else { - LT = mlir::SymbolTable::Visibility::Private; + LT = mlir::cir::GlobalLinkageKind::InternalLinkage; GlobalVariableName = Name; } @@ -1157,10 +1164,25 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context, return false; } -mlir::SymbolTable::Visibility CIRGenModule::getCIRLinkageForDeclarator( +mlir::SymbolTable::Visibility CIRGenModule::getMLIRVisibilityFromCIRLinkage( + mlir::cir::GlobalLinkageKind GLK) { + switch (GLK) { + case mlir::cir::GlobalLinkageKind::InternalLinkage: + case mlir::cir::GlobalLinkageKind::PrivateLinkage: + return mlir::SymbolTable::Visibility::Private; + case mlir::cir::GlobalLinkageKind::ExternalLinkage: + case mlir::cir::GlobalLinkageKind::ExternalWeakLinkage: + return mlir::SymbolTable::Visibility::Public; + default: + assert(0 && "not implemented"); + } + llvm_unreachable("linkage should be handled above!"); +} + +mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) { if (Linkage == GVA_Internal) - return mlir::SymbolTable::Visibility::Private; + return mlir::cir::GlobalLinkageKind::InternalLinkage; if (D->hasAttr()) { assert(UnimplementedFeature::globalWeakLinkage() && "NYI"); @@ -1219,10 +1241,10 @@ mlir::SymbolTable::Visibility CIRGenModule::getCIRLinkageForDeclarator( // Otherwise, we have strong external linkage. assert(Linkage == GVA_StrongExternal); - return mlir::SymbolTable::Visibility::Public; + return mlir::cir::GlobalLinkageKind::ExternalLinkage; } -mlir::SymbolTable::Visibility CIRGenModule::getFunctionLinkage(GlobalDecl GD) { +mlir::cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { const auto *D = cast(GD.getDecl()); GVALinkage Linkage = astCtx.GetGVALinkageForFunction(D); @@ -1233,10 +1255,11 @@ mlir::SymbolTable::Visibility CIRGenModule::getFunctionLinkage(GlobalDecl GD) { if (isa(D) && cast(D)->isInheritingConstructor() && astCtx.getTargetInfo().getCXXABI().isMicrosoft()) { + // Just like in LLVM codegen: // Our approach to inheriting constructors is fundamentally different from // that used by the MS ABI, so keep our inheriting constructor thunks // internal rather than trying to pick an unambiguous mangling for them. - return mlir::SymbolTable::Visibility::Private; + return mlir::cir::GlobalLinkageKind::InternalLinkage; } return getCIRLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 6619a80952f6..c4335ea9cedf 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -353,11 +353,13 @@ class CIRGenModule { void emitError(const llvm::Twine &message) { theModule.emitError(message); } /// ------- - /// Linkage + /// Visibility and Linkage /// ------- - mlir::SymbolTable::Visibility getFunctionLinkage(GlobalDecl GD); - mlir::SymbolTable::Visibility + static mlir::SymbolTable::Visibility + getMLIRVisibilityFromCIRLinkage(mlir::cir::GlobalLinkageKind GLK); + mlir::cir::GlobalLinkageKind getFunctionLinkage(GlobalDecl GD); + mlir::cir::GlobalLinkageKind getCIRLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable); diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 7a89457a2cbb..01dcf9b53cb2 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -41,7 +41,7 @@ void local_stringlit() { const char *s = "whatnow"; } -// CHECK: cir.global "private" constant @".str" = #cir.cst_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} loc(#loc17) +// CHECK: cir.global "private" constant internal @".str" = #cir.cst_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} loc(#loc17) // CHECK: func @_Z15local_stringlitv() { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", cinit] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 38bebb93c010..94c2d12b13ac 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -24,21 +24,21 @@ void use_global_string() { } // CHECK: module { -// CHECK-NEXT: cir.global @a = 3 : i32 -// CHECK-NEXT: cir.global @c = 2 : i64 -// CHECK-NEXT: cir.global @y = 3.400000e+00 : f32 -// CHECK-NEXT: cir.global @w = 4.300000e+00 : f64 -// CHECK-NEXT: cir.global @x = 51 : i8 -// CHECK-NEXT: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> -// CHECK-NEXT: cir.global @alpha = #cir.cst_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8] : !cir.array> +// CHECK-NEXT: cir.global external @a = 3 : i32 +// CHECK-NEXT: cir.global external @c = 2 : i64 +// CHECK-NEXT: cir.global external @y = 3.400000e+00 : f32 +// CHECK-NEXT: cir.global external @w = 4.300000e+00 : f64 +// CHECK-NEXT: cir.global external @x = 51 : i8 +// CHECK-NEXT: cir.global external @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> +// CHECK-NEXT: cir.global external @alpha = #cir.cst_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8] : !cir.array> -// CHECK-NEXT: cir.global "private" constant @".str" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK-NEXT: cir.global @s = @".str": !cir.ptr +// CHECK-NEXT: cir.global "private" constant internal @".str" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global external @s = @".str": !cir.ptr -// CHECK-NEXT: cir.global "private" constant @".str1" = #cir.cst_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK-NEXT: cir.global @s1 = @".str1": !cir.ptr +// CHECK-NEXT: cir.global "private" constant internal @".str1" = #cir.cst_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global external @s1 = @".str1": !cir.ptr -// CHECK-NEXT: cir.global @s2 = @".str": !cir.ptr +// CHECK-NEXT: cir.global external @s2 = @".str": !cir.ptr // CHECK: func @_Z10use_globalv() { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["li", cinit] {alignment = 4 : i64} diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 8270b288d967..649a370f3a3f 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -1,26 +1,26 @@ // RUN: cir-tool %s | FileCheck %s module { - cir.global @a = 3 : i32 - cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> - cir.global @b = #cir.cst_array<"example\00" : !cir.array> - cir.global "private" constant @".str" : !cir.array {alignment = 1 : i64} - cir.global "private" @c : i32 - cir.global "private" constant @".str2" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} - cir.global @s = @".str2": !cir.ptr + cir.global external @a = 3 : i32 + cir.global external @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> + cir.global external @b = #cir.cst_array<"example\00" : !cir.array> + cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} + cir.global "private" internal @c : i32 + cir.global "private" constant internal @".str2" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global external @s = @".str2": !cir.ptr func.func @use_global() { %0 = cir.get_global @a : cir.ptr cir.return } } -// CHECK: cir.global @a = 3 : i32 -// CHECK: cir.global @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> -// CHECK: cir.global @b = #cir.cst_array<"example\00" : !cir.array> -// CHECK: cir.global "private" constant @".str" : !cir.array {alignment = 1 : i64} -// CHECK: cir.global "private" @c : i32 -// CHECK: cir.global "private" constant @".str2" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK: cir.global @s = @".str2": !cir.ptr +// CHECK: cir.global external @a = 3 : i32 +// CHECK: cir.global external @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> +// CHECK: cir.global external @b = #cir.cst_array<"example\00" : !cir.array> +// CHECK: cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} +// CHECK: cir.global "private" internal @c : i32 +// CHECK: cir.global "private" constant internal @".str2" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global external @s = @".str2": !cir.ptr // CHECK: func @use_global() // CHECK-NEXT: %0 = cir.get_global @a : cir.ptr diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 0993812981ea..07761dd196cd 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -153,23 +153,41 @@ func.func @b0() { // ----- module { - cir.global @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array element should match array element type}} + cir.global external @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array element should match array element type}} } // expected-error {{expected constant attribute to match type}} // ----- module { - cir.global @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array size should match type size}} + cir.global external @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array size should match type size}} } // expected-error {{expected constant attribute to match type}} // ----- module { - cir.global @b = #cir.cst_array<"example\00" : !cir.array> // expected-error {{constant array element for string literals expects i8 array element type}} + cir.global external @b = #cir.cst_array<"example\00" : !cir.array> // expected-error {{constant array element for string literals expects i8 array element type}} } // expected-error {{expected constant attribute to match type}} // ----- module { - cir.global "private" constant @".str2" = #cir.cst_array<"example\00"> {alignment = 1 : i64} // expected-error {{expected type declaration for string literal}} + cir.global "private" constant external @".str2" = #cir.cst_array<"example\00"> {alignment = 1 : i64} // expected-error {{expected type declaration for string literal}} } // expected-error@-1 {{expected constant attribute to match type}} + +// ----- + +module { + cir.global @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{expected string or keyword containing one of the following enum values for attribute 'linkage' [external, available_externally, linkonce, linkonce_odr, weak, weak_odr, internal, private, extern_weak, common]}} +} + +// ----- + +module { + cir.global "private" external @v = 3 : i32 // expected-error {{private visibility not allowed with 'external' linkage}} +} + +// ----- + +module { + cir.global "public" internal @v = 3 : i32 // expected-error {{public visibility not allowed with 'internal' linkage}} +} diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 7a1beb6a0576..ea9e5f3d9e93 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -913,6 +913,58 @@ def LoopOp : CIR_Op<"loop", // GlobalOp //===----------------------------------------------------------------------===// +// Linkage types. This is currently a replay of llvm/IR/GlobalValue.h, this is +// currently handy as part of forwarding appropriate linkage types for LLVM +// lowering, specially useful for C++ support. + +// Externally visible function +def Global_ExternalLinkage : + I32EnumAttrCase<"ExternalLinkage", 0, "external">; +// Available for inspection, not emission. +def Global_AvailableExternallyLinkage : + I32EnumAttrCase<"AvailableExternallyLinkage", 1, "available_externally">; +// Keep one copy of function when linking (inline) +def Global_LinkOnceAnyLinkage : + I32EnumAttrCase<"LinkOnceAnyLinkage", 2, "linkonce">; +// Same, but only replaced by something equivalent. +def Global_LinkOnceODRLinkage : + I32EnumAttrCase<"LinkOnceODRLinkage", 3, "linkonce_odr">; +// Keep one copy of named function when linking (weak) +def Global_WeakAnyLinkage : + I32EnumAttrCase<"WeakAnyLinkage", 4, "weak">; +// Same, but only replaced by something equivalent. +def Global_WeakODRLinkage : + I32EnumAttrCase<"WeakODRLinkage", 5, "weak_odr">; +// TODO: should we add something like appending linkage too? +// Special purpose, only applies to global arrays +// def Global_AppendingLinkage : +// I32EnumAttrCase<"AppendingLinkage", 6, "appending">; +// Rename collisions when linking (static functions). +def Global_InternalLinkage : + I32EnumAttrCase<"InternalLinkage", 7, "internal">; +// Like Internal, but omit from symbol table. +def Global_PrivateLinkage : + I32EnumAttrCase<"PrivateLinkage", 8, "private">; +// ExternalWeak linkage description. +def Global_ExternalWeakLinkage : + I32EnumAttrCase<"ExternalWeakLinkage", 9, "extern_weak">; +// Tentative definitions. +def Global_CommonLinkage : + I32EnumAttrCase<"CommonLinkage", 10, "common">; + +/// An enumeration for the kinds of linkage for global values. +def GlobalLinkageKind : I32EnumAttr< + "GlobalLinkageKind", + "Linkage type/kind", + [Global_ExternalLinkage, Global_AvailableExternallyLinkage, + Global_LinkOnceAnyLinkage, Global_LinkOnceODRLinkage, + Global_WeakAnyLinkage, Global_WeakODRLinkage, + Global_InternalLinkage, Global_PrivateLinkage, + Global_ExternalWeakLinkage, Global_CommonLinkage + ]> { + let cppNamespace = "::mlir::cir"; +} + def GlobalOp : CIR_Op<"global", [Symbol]> { let summary = "Declares or defines a global variable"; let description = [{ @@ -928,8 +980,9 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { `constant` unit attribute. Writing to such constant global variables is undefined. - Symbol visibility is defined in terms of MLIR's visibility, and C/C++ - linkage types are still TBD. + The `linkage` tracks C/C++ linkage types, currently very similar to LLVM's. + Symbol visibility in `sym_visibility` is defined in terms of MLIR's visibility + and verified to be in accordance to `linkage`. Example: @@ -940,9 +993,12 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { }]; // Note that both sym_name and sym_visibility are tied to Symbol trait. + // TODO: sym_visibility can possibly be represented by implementing the + // necessary Symbol's interface in terms of linkage instead. let arguments = (ins SymbolNameAttr:$sym_name, OptionalAttr:$sym_visibility, TypeAttr:$sym_type, + Arg:$linkage, // Note this can also be a FlatSymbolRefAttr OptionalAttr:$initial_value, UnitAttr:$constant, @@ -951,6 +1007,7 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { let assemblyFormat = [{ ($sym_visibility^)? (`constant` $constant^)? + $linkage $sym_name custom($sym_type, $initial_value) attr-dict @@ -965,9 +1022,13 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { let skipDefaultBuilders = 1; let builders = [ OpBuilder<(ins + // MLIR's default visibility is public. "StringRef":$sym_name, "Type":$sym_type, - CArg<"bool", "false">:$isConstant + CArg<"bool", "false">:$isConstant, + // CIR defaults to external linkage. + CArg<"cir::GlobalLinkageKind", + "cir::GlobalLinkageKind::ExternalLinkage">:$linkage )> ]; diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 389e21d4063a..a97606268350 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -944,9 +944,11 @@ parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, LogicalResult GlobalOp::verify() { // Verify that the initial value, if present, is either a unit attribute or // an attribute CIR supports. - if (getInitialValue().has_value()) - return checkConstantTypes(getOperation(), getSymType(), - getInitialValue().value()); + if (getInitialValue().has_value()) { + if (checkConstantTypes(getOperation(), getSymType(), *getInitialValue()) + .failed()) + return failure(); + } if (std::optional alignAttr = getAlignment()) { uint64_t alignment = alignAttr.value(); @@ -955,18 +957,42 @@ LogicalResult GlobalOp::verify() { << " is not a power of 2"; } + switch (getLinkage()) { + case mlir::cir::GlobalLinkageKind::InternalLinkage: + case mlir::cir::GlobalLinkageKind::PrivateLinkage: + if (isPublic()) + return emitError() << "public visibility not allowed with '" + << stringifyGlobalLinkageKind(getLinkage()) + << "' linkage"; + break; + case mlir::cir::GlobalLinkageKind::ExternalLinkage: + case mlir::cir::GlobalLinkageKind::ExternalWeakLinkage: + if (isPrivate()) + return emitError() << "private visibility not allowed with '" + << stringifyGlobalLinkageKind(getLinkage()) + << "' linkage"; + break; + default: + assert(0 && "not implemented"); + } + // TODO: verify visibility for declarations? return success(); } void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, - StringRef sym_name, Type sym_type, bool isConstant) { + StringRef sym_name, Type sym_type, bool isConstant, + cir::GlobalLinkageKind linkage) { odsState.addAttribute(getSymNameAttrName(odsState.name), odsBuilder.getStringAttr(sym_name)); odsState.addAttribute(getSymTypeAttrName(odsState.name), ::mlir::TypeAttr::get(sym_type)); if (isConstant) odsState.addAttribute("constant", odsBuilder.getUnitAttr()); + + ::mlir::cir::GlobalLinkageKindAttr linkageAttr = + cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage); + odsState.addAttribute("linkage", linkageAttr); } //===----------------------------------------------------------------------===// @@ -1105,8 +1131,8 @@ ::mlir::Attribute CstArrayAttr::parse(::mlir::AsmParser &parser, // Parse literal '>' if (parser.parseGreater()) return {}; - return parser.getChecked(loc, parser.getContext(), - resultTy.value(), resultVal.value()); + return parser.getChecked( + loc, parser.getContext(), resultTy.value(), resultVal.value()); } void CstArrayAttr::print(::mlir::AsmPrinter &printer) const { From dff7f45940285fdbfeafff2e60f4d991272d94d0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 19 Jul 2022 16:38:09 -0700 Subject: [PATCH 0538/2301] [CIR][CodeGen] Unify methods to get globals and remove hack --- clang/lib/CIR/CIRGenModule.cpp | 46 ++++++++++++++++------------------ clang/lib/CIR/CIRGenModule.h | 7 ++---- 2 files changed, 23 insertions(+), 30 deletions(-) diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index e95772f48447..4c421477f806 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -302,7 +302,7 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { } llvm::StringRef MangledName = getMangledName(GD); - if (GetGlobalValue(MangledName) != nullptr) { + if (getGlobalValue(MangledName) != nullptr) { // The value has already been used and should therefore be emitted. addDeferredDeclToEmit(GD); } else if (MustBeEmitted(Global)) { @@ -355,12 +355,16 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, assert(!D->getAttr() && "NYI"); } -mlir::cir::GlobalOp CIRGenModule::getGlobalValue(StringRef Name) { +mlir::Operation *CIRGenModule::getGlobalValue(StringRef Name) { auto global = mlir::SymbolTable::lookupSymbolIn(theModule, Name); if (!global) return {}; - assert(isa(global) && "not supported"); - return cast(global); + return global; +} + +mlir::Value CIRGenModule::getGlobalValue(const Decl *D) { + assert(CurCGF); + return CurCGF->symbolTable.lookup(D); } static mlir::cir::GlobalOp createGlobalOp(CIRGenModule &CGM, mlir::Location loc, @@ -405,7 +409,11 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, LangAS AddrSpace, const VarDecl *D, ForDefinition_t IsForDefinition) { // Lookup the entry, lazily creating it if necessary. - mlir::cir::GlobalOp Entry = getGlobalValue(MangledName); + mlir::cir::GlobalOp Entry; + if (auto *V = getGlobalValue(MangledName)) { + assert(isa(V) && "only supports GlobalOp for now"); + Entry = dyn_cast_or_null(V); + } // unsigned TargetAS = astCtx.getTargetAddressSpace(AddrSpace); if (Entry) { @@ -1442,8 +1450,11 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( } // Lookup the entry, lazily creating it if necessary. - mlir::Operation *Entry = GetGlobalValue(MangledName); + mlir::Operation *Entry = getGlobalValue(MangledName); if (Entry) { + assert(isa(Entry) && + "not implemented, only supports FuncOp for now"); + if (WeakRefReferences.erase(Entry)) { llvm_unreachable("NYI"); } @@ -1568,11 +1579,6 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( assert(false && "Incompmlete functions NYI"); } -mlir::Value CIRGenModule::GetGlobalValue(const Decl *D) { - assert(CurCGF); - return CurCGF->symbolTable.lookup(D); -} - mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { const SourceManager &SM = astCtx.getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(SLoc); @@ -1635,11 +1641,13 @@ void CIRGenModule::buildDeferred() { // IsForDefinition equal to true. Query mangled names table to get // GlobalValue. if (!Op) { - Op = GetGlobalValue(getMangledName(D)); + Op = getGlobalValue(getMangledName(D)); } - // Make sure GetGlobalValue returned non-null. + // Make sure getGlobalValue returned non-null. assert(Op); + assert(isa(Op) && + "not implemented, only supports FuncOp for now"); // Check to see if we've already emitted this. This is necessary for a // couple of reasons: first, decls can end up in deferred-decls queue @@ -1674,18 +1682,6 @@ mlir::IntegerAttr CIRGenModule::getSize(CharUnits size) { mlir::IntegerType::get(builder.getContext(), 64), size.getQuantity()); } -// TODO: this is gross, make a map -mlir::Operation *CIRGenModule::GetGlobalValue(StringRef Name) { - for (auto const &op : - theModule.getBodyRegion().front().getOps()) - if (auto Fn = llvm::cast(op)) { - if (Name == Fn.getName()) - return Fn; - } - - return nullptr; -} - mlir::Operation * CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { const Decl *D = GD.getDecl(); diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index c4335ea9cedf..25b8fb3c3f7f 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -152,7 +152,8 @@ class CIRGenModule { void maybeHandleStaticInExternC(const SomeDecl *D, mlir::cir::GlobalOp GV); llvm::DenseMap Globals; - mlir::cir::GlobalOp getGlobalValue(StringRef Ref); + mlir::Operation *getGlobalValue(StringRef Ref); + mlir::Value getGlobalValue(const clang::Decl *D); /// If the specified mangled name is not in the module, create and return an /// mlir::GlobalOp value @@ -310,10 +311,6 @@ class CIRGenModule { llvm::StringRef getMangledName(clang::GlobalDecl GD); - mlir::Value GetGlobalValue(const clang::Decl *D); - - mlir::Operation *GetGlobalValue(llvm::StringRef Ref); - // Make sure that this type is translated. void UpdateCompletedType(const clang::TagDecl *TD); From 0b62435b5b64aa0c248f68c4130d7b5bc9be5757 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 20 Jul 2022 15:50:43 -0700 Subject: [PATCH 0539/2301] [CIR][CodeGen] Add helpers for gathering linkage information While here update previously unimplemented feature guarding call sites and add change `getCIRGenToUse` to return the appropriated linkage. --- clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 18 +++- clang/lib/CIR/CIRGenModule.cpp | 28 +++-- clang/lib/CIR/UnimplementedFeatureGuarding.h | 10 -- mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 1 + .../include/mlir/Dialect/CIR/IR/CIROpsEnums.h | 102 +++++++++++++++++- 5 files changed, 136 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index 47466989b16b..65a82b136732 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -259,10 +259,20 @@ static StructorCIRGen getCIRGenToUse(CIRGenModule &CGM, auto Linkage = CGM.getFunctionLinkage(AliasDecl); (void)Linkage; - assert(!UnimplementedFeature::globalIsDiscardableIfUnused() && "NYI"); - // // FIXME: Should we allow available_externally aliases? - assert(!UnimplementedFeature::globalIsValidLinkage() && "NYI"); - assert(!UnimplementedFeature::globalIsWeakForLinker() && "NYI"); + if (mlir::cir::isDiscardableIfUnused(Linkage)) + return StructorCIRGen::RAUW; + + // FIXME: Should we allow available_externally aliases? + if (!mlir::cir::isValidLinkage(Linkage)) + return StructorCIRGen::RAUW; + + if (mlir::cir::isWeakForLinker(Linkage)) { + // Only ELF and wasm support COMDATs with arbitrary names (C5/D5). + if (CGM.getTarget().getTriple().isOSBinFormatELF() || + CGM.getTarget().getTriple().isOSBinFormatWasm()) + return StructorCIRGen::COMDAT; + return StructorCIRGen::Emit; + } return StructorCIRGen::Alias; } diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 4c421477f806..d39ca0f70f43 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -937,7 +937,7 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, // TODO(cir) assert(!cir::UnimplementedFeature::threadLocal() && "NYI"); assert(!cir::UnimplementedFeature::unnamedAddr() && "NYI"); - assert(!cir::UnimplementedFeature::isWeakForLinker() && "NYI"); + assert(!mlir::cir::isWeakForLinker(LT) && "NYI"); assert(!cir::UnimplementedFeature::setDSOLocal() && "NYI"); return GV; } @@ -1193,17 +1193,20 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( return mlir::cir::GlobalLinkageKind::InternalLinkage; if (D->hasAttr()) { - assert(UnimplementedFeature::globalWeakLinkage() && "NYI"); + if (IsConstantVariable) + return mlir::cir::GlobalLinkageKind::WeakODRLinkage; + else + return mlir::cir::GlobalLinkageKind::WeakAnyLinkage; } if (const auto *FD = D->getAsFunction()) if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally) - assert(UnimplementedFeature::globalLinkOnceAnyLinkage() && "NYI"); + return mlir::cir::GlobalLinkageKind::LinkOnceAnyLinkage; // We are guaranteed to have a strong definition somewhere else, // so we can use available_externally linkage. if (Linkage == GVA_AvailableExternally) - assert(UnimplementedFeature::globalAvailableExternallyLinkage() && "NYI"); + return mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; // Note that Apple's kernel linker doesn't support symbol // coalescing, so we need to avoid linkonce and weak linkages there. @@ -1217,7 +1220,9 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( // merged with other definitions. c) C++ has the ODR, so we know the // definition is dependable. if (Linkage == GVA_DiscardableODR) - assert(0 && "NYI"); + return !astCtx.getLangOpts().AppleKext + ? mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage + : mlir::cir::GlobalLinkageKind::InternalLinkage; // An explicit instantiation of a template has weak linkage, since // explicit instantiations can occur in multiple translation units @@ -1230,7 +1235,14 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( // -fgpu-rdc case, device function calls across multiple TU's are allowed, // therefore we need to follow the normal linkage paradigm. if (Linkage == GVA_StrongODR) { - assert(0 && "NYI"); + if (getLangOpts().AppleKext) + return mlir::cir::GlobalLinkageKind::ExternalLinkage; + if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice && + !getLangOpts().GPURelocatableDeviceCode) + return D->hasAttr() + ? mlir::cir::GlobalLinkageKind::ExternalLinkage + : mlir::cir::GlobalLinkageKind::InternalLinkage; + return mlir::cir::GlobalLinkageKind::WeakODRLinkage; } // C++ doesn't have tentative definitions and thus cannot have common @@ -1238,14 +1250,14 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( if (!getLangOpts().CPlusPlus && isa(D) && !isVarDeclStrongDefinition(astCtx, *this, cast(D), getCodeGenOpts().NoCommon)) - assert(UnimplementedFeature::globalCommonLinkage() && "NYI"); + return mlir::cir::GlobalLinkageKind::CommonLinkage; // selectany symbols are externally visible, so use weak instead of // linkonce. MSVC optimizes away references to const selectany globals, so // all definitions should be the same and ODR linkage should be used. // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx if (D->hasAttr()) - assert(UnimplementedFeature::globalWeakLinkage() && "NYI"); + return mlir::cir::GlobalLinkageKind::WeakODRLinkage; // Otherwise, we have strong external linkage. assert(Linkage == GVA_StrongExternal); diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index 6fed5a8beca6..54e12fdcdea6 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -31,17 +31,7 @@ struct UnimplementedFeature { // Unhandled global/linkage information. static bool unnamedAddr() { return false; } - - static bool isWeakForLinker() { return false; } - static bool globalWeakLinkage() { return false; } - static bool globalLinkOnceAnyLinkage() { return false; } - static bool globalAvailableExternallyLinkage() { return false; } - static bool globalCommonLinkage() { return false; } - static bool globalIsDiscardableIfUnused() { return false; } - static bool globalIsValidLinkage() { return false; } - static bool globalIsWeakForLinker() { return false; } static bool setComdat() { return false; } - static bool setDSOLocal() { return false; } static bool threadLocal() { return false; } static bool setDLLStorageClass() { return false; } diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index e3e43dd9b221..9f4aa30cee4d 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -32,6 +32,7 @@ using FuncOp = func::FuncOp; #include "mlir/Dialect/CIR/IR/CIRAttrs.h" #include "mlir/Dialect/CIR/IR/CIROpsDialect.h.inc" +#include "mlir/Dialect/CIR/IR/CIROpsEnums.h" #include "mlir/Dialect/CIR/IR/CIROpsStructs.h.inc" #include "mlir/Dialect/CIR/IR/CIRTypes.h" diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROpsEnums.h b/mlir/include/mlir/Dialect/CIR/IR/CIROpsEnums.h index f61d2b3a60a4..8583569c84e8 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROpsEnums.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROpsEnums.h @@ -17,5 +17,105 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/Dialect/CIR/IR/CIROpsEnums.h.inc" -#endif // MLIR_DIALECT_CIR_CIROPSENUMS_H_ +namespace mlir { +namespace cir { + +static bool isExternalLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::ExternalLinkage; +} +static bool isAvailableExternallyLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::AvailableExternallyLinkage; +} +static bool isLinkOnceAnyLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::LinkOnceAnyLinkage; +} +static bool isLinkOnceODRLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::LinkOnceODRLinkage; +} +static bool isLinkOnceLinkage(GlobalLinkageKind Linkage) { + return isLinkOnceAnyLinkage(Linkage) || isLinkOnceODRLinkage(Linkage); +} +static bool isWeakAnyLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::WeakAnyLinkage; +} +static bool isWeakODRLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::WeakODRLinkage; +} +static bool isWeakLinkage(GlobalLinkageKind Linkage) { + return isWeakAnyLinkage(Linkage) || isWeakODRLinkage(Linkage); +} +static bool isInternalLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::InternalLinkage; +} +static bool isPrivateLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::PrivateLinkage; +} +static bool isLocalLinkage(GlobalLinkageKind Linkage) { + return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage); +} +static bool isExternalWeakLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::ExternalWeakLinkage; +} +LLVM_ATTRIBUTE_UNUSED static bool isCommonLinkage(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::CommonLinkage; +} +LLVM_ATTRIBUTE_UNUSED static bool +isValidDeclarationLinkage(GlobalLinkageKind Linkage) { + return isExternalWeakLinkage(Linkage) || isExternalLinkage(Linkage); +} + +/// Whether the definition of this global may be replaced by something +/// non-equivalent at link time. For example, if a function has weak linkage +/// then the code defining it may be replaced by different code. +LLVM_ATTRIBUTE_UNUSED static bool +isInterposableLinkage(GlobalLinkageKind Linkage) { + switch (Linkage) { + case GlobalLinkageKind::WeakAnyLinkage: + case GlobalLinkageKind::LinkOnceAnyLinkage: + case GlobalLinkageKind::CommonLinkage: + case GlobalLinkageKind::ExternalWeakLinkage: + return true; + + case GlobalLinkageKind::AvailableExternallyLinkage: + case GlobalLinkageKind::LinkOnceODRLinkage: + case GlobalLinkageKind::WeakODRLinkage: + // The above three cannot be overridden but can be de-refined. + + case GlobalLinkageKind::ExternalLinkage: + case GlobalLinkageKind::InternalLinkage: + case GlobalLinkageKind::PrivateLinkage: + return false; + } + llvm_unreachable("Fully covered switch above!"); +} +/// Whether the definition of this global may be discarded if it is not used +/// in its compilation unit. +LLVM_ATTRIBUTE_UNUSED static bool +isDiscardableIfUnused(GlobalLinkageKind Linkage) { + return isLinkOnceLinkage(Linkage) || isLocalLinkage(Linkage) || + isAvailableExternallyLinkage(Linkage); +} + +/// Whether the definition of this global may be replaced at link time. NB: +/// Using this method outside of the code generators is almost always a +/// mistake: when working at the IR level use isInterposable instead as it +/// knows about ODR semantics. +LLVM_ATTRIBUTE_UNUSED static bool isWeakForLinker(GlobalLinkageKind Linkage) { + return Linkage == GlobalLinkageKind::WeakAnyLinkage || + Linkage == GlobalLinkageKind::WeakODRLinkage || + Linkage == GlobalLinkageKind::LinkOnceAnyLinkage || + Linkage == GlobalLinkageKind::LinkOnceODRLinkage || + Linkage == GlobalLinkageKind::CommonLinkage || + Linkage == GlobalLinkageKind::ExternalWeakLinkage; +} + +LLVM_ATTRIBUTE_UNUSED static bool isValidLinkage(GlobalLinkageKind L) { + return isExternalLinkage(L) || isLocalLinkage(L) || isWeakLinkage(L) || + isLinkOnceLinkage(L); +} + +} // namespace cir +} // namespace mlir + +#endif // MLIR_DIALECT_CIR_CIROPSENUMS_H_ From c5682f17fb3b307638a6ae06272ddbd304911468 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 20 Jul 2022 22:27:30 -0700 Subject: [PATCH 0540/2301] [CIR] Add cir::FuncOp operation This is necessary to enforce the presence of linkage kind, which is key to continue implementing methods. While here add some helpers to apply more reliable parsing for linkage types. CodeGen does not use cir::FuncOp just yet, patch a few places that to keep using mlir::FuncOp for now. --- mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 2 + mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 114 +++++++++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 213 +++++++++++++++++- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 2 +- 4 files changed, 314 insertions(+), 17 deletions(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index 9f4aa30cee4d..aa3eeafde1b0 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -18,7 +18,9 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Dialect.h" #include "mlir/IR/OpDefinition.h" +#include "mlir/Interfaces/CallInterfaces.h" #include "mlir/Interfaces/ControlFlowInterfaces.h" +#include "mlir/Interfaces/FunctionInterfaces.h" #include "mlir/Interfaces/InferTypeOpInterface.h" #include "mlir/Interfaces/LoopLikeInterface.h" #include "mlir/Interfaces/SideEffectInterfaces.h" diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index ea9e5f3d9e93..cb74cae0ee9a 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -18,14 +18,17 @@ include "mlir/Dialect/CIR/IR/CIRDialect.td" include "mlir/Dialect/CIR/IR/CIRTypes.td" include "mlir/Dialect/CIR/IR/CIRAttrs.td" -include "mlir/IR/EnumAttr.td" -include "mlir/IR/SymbolInterfaces.td" -include "mlir/IR/BuiltinAttributeInterfaces.td" +include "mlir/Interfaces/CallInterfaces.td" include "mlir/Interfaces/ControlFlowInterfaces.td" -include "mlir/Interfaces/LoopLikeInterface.td" +include "mlir/Interfaces/FunctionInterfaces.td" include "mlir/Interfaces/InferTypeOpInterface.td" +include "mlir/Interfaces/LoopLikeInterface.td" include "mlir/Interfaces/SideEffectInterfaces.td" +include "mlir/IR/BuiltinAttributeInterfaces.td" +include "mlir/IR/EnumAttr.td" +include "mlir/IR/SymbolInterfaces.td" + //===----------------------------------------------------------------------===// // CIR Ops //===----------------------------------------------------------------------===// @@ -324,7 +327,7 @@ def StoreOp : CIR_Op<"store", [ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : CIR_Op<"return", [HasParent<"FuncOp, ScopeOp, IfOp, SwitchOp, LoopOp">, +def ReturnOp : CIR_Op<"return", [HasParent<"mlir::FuncOp, ScopeOp, IfOp, SwitchOp, LoopOp">, Terminator]> { let summary = "Return from function"; let description = [{ @@ -1098,4 +1101,105 @@ def StructElementAddr : CIR_Op<"struct_element_addr"> { // FIXME: add verifier. } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +def FuncOp : CIR_Op<"func", [ + AutomaticAllocationScope, CallableOpInterface, FunctionOpInterface, + IsolatedFromAbove, Symbol +]> { + let summary = "Declare or define a function"; + let description = [{ + + Similar to `mlir::FuncOp` built-in: + > Operations within the function cannot implicitly capture values defined + > outside of the function, i.e. Functions are `IsolatedFromAbove`. All + > external references must use function arguments or attributes that establish + > a symbolic connection (e.g. symbols referenced by name via a string + > attribute like SymbolRefAttr). An external function declaration (used when + > referring to a function declared in some other module) has no body. While + > the MLIR textual form provides a nice inline syntax for function arguments, + > they are internally represented as “block arguments” to the first block in + > the region. + > + > Only dialect attribute names may be specified in the attribute dictionaries + > for function arguments, results, or the function itself. + + The function linkage information is specified by `linkage`, as defined by + `GlobalLinkageKind` attribute. + + Example: + + ```mlir + // External function definitions. + func @abort() + + // A function with internal linkage. + func internal @count(%x: i64) -> (i64) + return %x : i64 + } + ``` + }]; + + let arguments = (ins SymbolNameAttr:$sym_name, + TypeAttrOf:$function_type, + DefaultValuedAttr:$linkage, + OptionalAttr:$sym_visibility, + OptionalAttr:$arg_attrs, + OptionalAttr:$res_attrs); + let regions = (region AnyRegion:$body); + let skipDefaultBuilders = 1; + + let builders = [OpBuilder<(ins + "StringRef":$name, "FunctionType":$type, + CArg<"GlobalLinkageKind", "GlobalLinkageKind::ExternalLinkage">:$linkage, + CArg<"ArrayRef", "{}">:$attrs, + CArg<"ArrayRef", "{}">:$argAttrs) + >]; + + let extraClassDeclaration = [{ + /// Returns the region on the current operation that is callable. This may + /// return null in the case of an external callable object, e.g. an external + /// function. + ::mlir::Region *getCallableRegion() { + return isExternal() ? nullptr : &getBody(); + } + + /// Returns the results types that the callable region produces when + /// executed. + ArrayRef getCallableResults() { + return getFunctionType().getResults(); + } + + /// Returns the argument attributes for all callable region arguments or + /// null if there are none. + ::mlir::ArrayAttr getCallableArgAttrs() { + return getArgAttrs().value_or(nullptr); + } + + /// Returns the result attributes for all callable region results or null if + /// there are none. + ::mlir::ArrayAttr getCallableResAttrs() { + return getResAttrs().value_or(nullptr); + } + + /// Returns the argument types of this function. + ArrayRef getArgumentTypes() { return getFunctionType().getInputs(); } + + /// Returns the result types of this function. + ArrayRef getResultTypes() { return getFunctionType().getResults(); } + + /// Hook for OpTrait::FunctionOpInterfaceTrait, called after verifying that + /// the 'type' attribute is present and checks if it holds a function type. + /// Ensures getType, getNumFuncArguments, and getNumFuncResults can be + /// called safely. + LogicalResult verifyType(); + }]; + + let hasCustomAssemblyFormat = 1; + let hasVerifier = 1; +} + #endif // MLIR_CIR_DIALECT_CIR_OPS diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index a97606268350..21c82f7f21a1 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -21,6 +21,8 @@ #include "mlir/IR/OpDefinition.h" #include "mlir/IR/OpImplementation.h" #include "mlir/IR/TypeUtilities.h" +#include "mlir/Interfaces/FunctionImplementation.h" +#include "mlir/Interfaces/InferTypeOpInterface.h" using namespace mlir; using namespace mlir::cir; @@ -60,6 +62,54 @@ void cir::CIRDialect::initialize() { addInterfaces(); } +//===----------------------------------------------------------------------===// +// Helpers +//===----------------------------------------------------------------------===// + +// Parses one of the keywords provided in the list `keywords` and returns the +// position of the parsed keyword in the list. If none of the keywords from the +// list is parsed, returns -1. +static int parseOptionalKeywordAlternative(OpAsmParser &parser, + ArrayRef keywords) { + for (auto en : llvm::enumerate(keywords)) { + if (succeeded(parser.parseOptionalKeyword(en.value()))) + return en.index(); + } + return -1; +} + +namespace { +template +struct EnumTraits {}; + +#define REGISTER_ENUM_TYPE(Ty) \ + template <> \ + struct EnumTraits { \ + static StringRef stringify(Ty value) { return stringify##Ty(value); } \ + static unsigned getMaxEnumVal() { return getMaxEnumValFor##Ty(); } \ + } + +REGISTER_ENUM_TYPE(GlobalLinkageKind); +} // namespace + +/// Parse an enum from the keyword, or default to the provided default value. +/// The return type is the enum type by default, unless overriden with the +/// second template argument. +/// TODO: teach other places in this file to use this function. +template +static RetTy parseOptionalCIRKeyword(OpAsmParser &parser, + OperationState &result, + EnumTy defaultValue) { + SmallVector names; + for (unsigned i = 0, e = EnumTraits::getMaxEnumVal(); i <= e; ++i) + names.push_back(EnumTraits::stringify(static_cast(i))); + + int index = parseOptionalKeywordAlternative(parser, names); + if (index == -1) + return static_cast(defaultValue); + return static_cast(index); +} + //===----------------------------------------------------------------------===// // ConstantOp //===----------------------------------------------------------------------===// @@ -184,7 +234,7 @@ LogicalResult CastOp::verify() { //===----------------------------------------------------------------------===// static mlir::LogicalResult checkReturnAndFunction(ReturnOp op, - FuncOp function) { + mlir::FuncOp function) { // ReturnOps currently only have a single optional operand. if (op.getNumOperands() > 1) return op.emitOpError() << "expects at most 1 return operand"; @@ -217,11 +267,11 @@ mlir::LogicalResult ReturnOp::verify() { // Returns can be present in multiple different scopes, get the // wrapping function and start from there. auto *fnOp = getOperation()->getParentOp(); - while (!isa(fnOp)) + while (!isa(fnOp)) fnOp = fnOp->getParentOp(); // Make sure return types match function return type. - if (checkReturnAndFunction(*this, cast(fnOp)).failed()) + if (checkReturnAndFunction(*this, cast(fnOp)).failed()) return failure(); return success(); @@ -483,7 +533,7 @@ LogicalResult ScopeOp::verify() { return success(); } mlir::LogicalResult YieldOp::verify() { auto isDominatedByLoopOrSwitch = [](Operation *parentOp) { - while (!llvm::isa(parentOp)) { + while (!llvm::isa(parentOp)) { if (llvm::isa(parentOp)) return true; parentOp = parentOp->getParentOp(); @@ -492,7 +542,7 @@ mlir::LogicalResult YieldOp::verify() { }; auto isDominatedByLoop = [](Operation *parentOp) { - while (!llvm::isa(parentOp)) { + while (!llvm::isa(parentOp)) { if (llvm::isa(parentOp)) return true; parentOp = parentOp->getParentOp(); @@ -958,15 +1008,15 @@ LogicalResult GlobalOp::verify() { } switch (getLinkage()) { - case mlir::cir::GlobalLinkageKind::InternalLinkage: - case mlir::cir::GlobalLinkageKind::PrivateLinkage: + case GlobalLinkageKind::InternalLinkage: + case GlobalLinkageKind::PrivateLinkage: if (isPublic()) return emitError() << "public visibility not allowed with '" << stringifyGlobalLinkageKind(getLinkage()) << "' linkage"; break; - case mlir::cir::GlobalLinkageKind::ExternalLinkage: - case mlir::cir::GlobalLinkageKind::ExternalWeakLinkage: + case GlobalLinkageKind::ExternalLinkage: + case GlobalLinkageKind::ExternalWeakLinkage: if (isPrivate()) return emitError() << "private visibility not allowed with '" << stringifyGlobalLinkageKind(getLinkage()) @@ -1017,6 +1067,147 @@ GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return success(); } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +/// Returns the name used for the linkage attribute. This *must* correspond to +/// the name of the attribute in ODS. +static StringRef getLinkageAttrNameString() { return "linkage"; } + +void cir::FuncOp::build(OpBuilder &builder, OperationState &result, + StringRef name, FunctionType type, + GlobalLinkageKind linkage, + ArrayRef attrs, + ArrayRef argAttrs) { + result.addRegion(); + result.addAttribute(SymbolTable::getSymbolAttrName(), + builder.getStringAttr(name)); + result.addAttribute(getFunctionTypeAttrName(result.name), + TypeAttr::get(type)); + result.addAttribute( + getLinkageAttrNameString(), + GlobalLinkageKindAttr::get(builder.getContext(), linkage)); + result.attributes.append(attrs.begin(), attrs.end()); + if (argAttrs.empty()) + return; + + function_interface_impl::addArgAndResultAttrs( + builder, result, argAttrs, + /*resultAttrs=*/std::nullopt, getArgAttrsAttrName(result.name), + getResAttrsAttrName(result.name)); +} + +ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { + // Default to external linkage if no keyword is provided. + state.addAttribute( + getLinkageAttrNameString(), + GlobalLinkageKindAttr::get( + parser.getContext(), + parseOptionalCIRKeyword( + parser, state, GlobalLinkageKind::ExternalLinkage))); + + StringAttr nameAttr; + SmallVector arguments; + SmallVector argAttrs; + SmallVector resultAttrs; + SmallVector argTypes; + SmallVector resultTypes; + auto &builder = parser.getBuilder(); + + // Parse the name as a symbol. + if (parser.parseSymbolName(nameAttr, SymbolTable::getSymbolAttrName(), + state.attributes)) + return failure(); + + // Parse the function signature. + bool isVariadic = false; + if (function_interface_impl::parseFunctionSignature( + parser, /*allowVariadic=*/false, arguments, isVariadic, resultTypes, + resultAttrs)) + return failure(); + + auto fnType = builder.getFunctionType(argTypes, resultTypes); + state.addAttribute(getFunctionTypeAttrName(state.name), + TypeAttr::get(fnType)); + + // If additional attributes are present, parse them. + if (parser.parseOptionalAttrDictWithKeyword(state.attributes)) + return failure(); + + // Add the attributes to the function arguments. + assert(argAttrs.size() == argTypes.size()); + assert(resultAttrs.size() == resultTypes.size()); + function_interface_impl::addArgAndResultAttrs( + builder, state, arguments, resultAttrs, getArgAttrsAttrName(state.name), + getResAttrsAttrName(state.name)); + + // Parse the optional function body. + auto *body = state.addRegion(); + OptionalParseResult result = parser.parseOptionalRegion( + *body, arguments, /*enableNameShadowing=*/false); + return failure(result.has_value() && failed(*result)); +} + +void cir::FuncOp::print(OpAsmPrinter &p) { + p << ' '; + if (getLinkage() != GlobalLinkageKind::ExternalLinkage) + p << stringifyGlobalLinkageKind(getLinkage()) << ' '; + + // Print function name, signature, and control. + p.printSymbolName(getSymName()); + auto fnType = getFunctionType(); + function_interface_impl::printFunctionSignature(p, *this, fnType.getInputs(), + /*isVariadic=*/false, + fnType.getResults()); + function_interface_impl::printFunctionAttributes( + p, *this, {getFunctionTypeAttrName(), getLinkageAttrName()}); + + // Print the body if this is not an external function. + Region &body = this->getBody(); + if (!body.empty()) + p.printRegion(body, /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/true); +} + +// Hook for OpTrait::FunctionLike, called after verifying that the 'type' +// attribute is present. This can check for preconditions of the +// getNumArguments hook not failing. +LogicalResult cir::FuncOp::verifyType() { + auto type = getFunctionType(); + if (!type.isa()) + return emitOpError("requires '" + getFunctionTypeAttrName().str() + + "' attribute of function type"); + if (getFunctionType().getNumResults() > 1) + return emitOpError("cannot have more than one result"); + return success(); +} + +// Verifies linkage types, similar to LLVM: +// - functions don't have 'common' linkage +// - external functions have 'external' or 'extern_weak' linkage +LogicalResult cir::FuncOp::verify() { + if (getLinkage() == cir::GlobalLinkageKind::CommonLinkage) + return emitOpError() << "functions cannot have '" + << stringifyGlobalLinkageKind( + cir::GlobalLinkageKind::CommonLinkage) + << "' linkage"; + + if (isExternal()) { + if (getLinkage() != cir::GlobalLinkageKind::ExternalLinkage && + getLinkage() != cir::GlobalLinkageKind::ExternalWeakLinkage) + return emitOpError() << "external functions must have '" + << stringifyGlobalLinkageKind( + cir::GlobalLinkageKind::ExternalLinkage) + << "' or '" + << stringifyGlobalLinkageKind( + cir::GlobalLinkageKind::ExternalWeakLinkage) + << "' linkage"; + return success(); + } + return success(); +} + //===----------------------------------------------------------------------===// // CIR defined traits //===----------------------------------------------------------------------===// @@ -1131,8 +1322,8 @@ ::mlir::Attribute CstArrayAttr::parse(::mlir::AsmParser &parser, // Parse literal '>' if (parser.parseGreater()) return {}; - return parser.getChecked( - loc, parser.getContext(), resultTy.value(), resultVal.value()); + return parser.getChecked(loc, parser.getContext(), + resultTy.value(), resultVal.value()); } void CstArrayAttr::print(::mlir::AsmPrinter &printer) const { diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 182ffc15ebb7..3ed4a4dda2f1 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -704,7 +704,7 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return; } - if (isa(op)) + if (isa(op)) return checkFunc(op); if (auto ifOp = dyn_cast(op)) return checkIf(ifOp); From dd11abee085b7ed18bb2b334112a3c578c9df397 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Wed, 3 Aug 2022 18:18:51 -0700 Subject: [PATCH 0541/2301] Fix typo in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a6de0311a4a0..58d7e29a0cf7 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # ClangIR (CIR) For more information see https://clangir.org. The rest of this document -fallbacks to llvm-project's default `README.td`. +fallbacks to llvm-project's default `README.md`. --- From b36bdc659978633468f7b286f637dc095e188f67 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 2 Aug 2022 12:16:39 -0700 Subject: [PATCH 0542/2301] [CIR][CodeGen] Add flag to disable MLIR verifiers and improve error handling around verification --- clang/include/clang/CIR/CIRGenerator.h | 2 +- clang/include/clang/CIR/CIRToCIRPasses.h | 3 ++- clang/include/clang/Driver/Options.td | 4 ++++ clang/include/clang/Frontend/FrontendOptions.h | 6 +++++- clang/lib/CIR/CIRGenModule.cpp | 5 ++--- clang/lib/CIR/CIRGenModule.h | 2 +- clang/lib/CIR/CIRGenerator.cpp | 2 +- clang/lib/CIR/CIRPasses.cpp | 6 ++++-- clang/lib/CIRFrontendAction/CIRGenAction.cpp | 18 ++++++++++++++---- clang/lib/Frontend/CompilerInvocation.cpp | 3 +++ 10 files changed, 37 insertions(+), 14 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 7e87e01b4cc6..734865fa24ed 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -89,7 +89,7 @@ class CIRGenerator : public clang::ASTConsumer { return std::move(mlirCtx); }; - void verifyModule(); + bool verifyModule(); void buildDeferredDecls(); }; diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index 6ffedc5cd9f1..6bf0664553a2 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -24,7 +24,8 @@ class ModuleOp; namespace cir { // Run set of cleanup/prepare/etc passes CIR <-> CIR. -void runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx); +void runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + bool enableVerifier); } // namespace cir #endif // CLANG_CIR_CIRTOCIRPASSES_H_ diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 1a76cf0bd6e7..9d31355b4e39 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3059,6 +3059,10 @@ def disable_cir_passes : Flag<["-"], "disable-cir-passes">, Visibility<[ClangOption, CC1Option]>, HelpText<"Disable CIR transformations pipeline">, MarshallingInfoFlag>; +def disable_cir_verifier : Flag<["-"], "disable-cir-verifier">, + Visibility<[ClangOption, CC1Option]>, + HelpText<"Disable CIR module verifier">, + MarshallingInfoFlag>; def flto_EQ_auto : Flag<["-"], "flto=auto">, Visibility<[ClangOption, FlangOption]>, Group, Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; def flto : Flag<["-"], "flto">, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 6fec975934ad..44ccfef42226 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -421,6 +421,9 @@ class FrontendOptions { /// Disable Clang IR specific (CIR) passes unsigned DisableCIRPasses : 1; + /// Disable Clang IR (CIR) verifier + unsigned DisableCIRVerifier : 1; + CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. @@ -609,7 +612,8 @@ class FrontendOptions { EmitSymbolGraphSymbolLabelsForTesting(false), EmitPrettySymbolGraphs(false), GenReducedBMI(false), UseClangIRPipeline(false), DisableCIRPasses(false), - TimeTraceGranularity(500), TimeTraceVerbose(false) {} + DisableCIRVerifier(false), TimeTraceGranularity(500), + TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file /// extension. For example, "c" would return Language::C. diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index d39ca0f70f43..5af2ff7ae42a 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1289,12 +1289,11 @@ mlir::Type CIRGenModule::getCIRType(const QualType &type) { return genTypes.ConvertType(type); } -void CIRGenModule::verifyModule() { +bool CIRGenModule::verifyModule() { // Verify the module after we have finished constructing it, this will // check the structural properties of the IR and invoke any specific // verifiers we have on the CIR operations. - if (failed(mlir::verify(theModule))) - theModule.emitError("module verification error"); + return mlir::verify(theModule).succeeded(); } std::pair diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 25b8fb3c3f7f..7eac8a6e8f55 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -295,7 +295,7 @@ class CIRGenModule { /// which may later be explicitly instantiated. bool MayBeEmittedEagerly(const clang::ValueDecl *D); - void verifyModule(); + bool verifyModule(); /// Return the address of the given function. If Ty is non-null, then this /// function will use the specified type if it has to create it. diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CIRGenerator.cpp index 26a06ab0dd51..b33033c5441d 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CIRGenerator.cpp @@ -48,7 +48,7 @@ void CIRGenerator::Initialize(ASTContext &astCtx) { Diags); } -void CIRGenerator::verifyModule() { CGM->verifyModule(); } +bool CIRGenerator::verifyModule() { return CGM->verifyModule(); } bool CIRGenerator::EmitFunction(const FunctionDecl *FD) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CIRPasses.cpp b/clang/lib/CIR/CIRPasses.cpp index 242c1628897b..08bd1fdaf4b7 100644 --- a/clang/lib/CIR/CIRPasses.cpp +++ b/clang/lib/CIR/CIRPasses.cpp @@ -17,13 +17,15 @@ #include "mlir/Pass/PassManager.h" namespace cir { -void runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx) { +void runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + bool enableVerifier) { mlir::PassManager pm(mlirCtx); pm.addPass(mlir::createMergeCleanupsPass()); + pm.enableVerifier(enableVerifier); auto result = !mlir::failed(pm.run(theModule)); if (!result) llvm::report_fatal_error( - "The pass manager failed to lower CIR to llvm IR!"); + "CIR codegen: MLIR pass manager fails when running CIR passes!"); } } // namespace cir \ No newline at end of file diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp index 2e9d978ec5d0..0f36a3cef2ec 100644 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIRFrontendAction/CIRGenAction.cpp @@ -128,9 +128,17 @@ class CIRGenConsumer : public clang::ASTConsumer { } void HandleTranslationUnit(ASTContext &C) override { - gen->HandleTranslationUnit(C); + // Note that this method is called after `HandleTopLevelDecl` has already + // ran all over the top level decls. Here clang mostly wraps defered and + // global codegen, followed by running CIR passes. - gen->verifyModule(); + gen->HandleTranslationUnit(C); + if (!feOptions.DisableCIRVerifier) + if (!gen->verifyModule()) { + llvm::report_fatal_error( + "CIR codegen: module verification error before running CIR passes"); + return; + } auto mlirMod = gen->getModule(); auto mlirCtx = gen->takeContext(); @@ -138,8 +146,10 @@ class CIRGenConsumer : public clang::ASTConsumer { switch (action) { case CIRGenAction::OutputType::EmitCIR: if (outputStream && mlirMod) { - if (!feOptions.DisableCIRPasses) - runCIRToCIRPasses(mlirMod, mlirCtx.get()); + if (!feOptions.DisableCIRPasses) { + runCIRToCIRPasses(mlirMod, mlirCtx.get(), + !feOptions.DisableCIRVerifier); + } mlir::OpPrintingFlags flags; // FIXME: we cannot roundtrip prettyForm=true right now. flags.enableDebugInfo(/*prettyForm=*/false); diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index af8225ade74e..130c91a7afce 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3093,6 +3093,9 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Args.hasArg(OPT_disable_cir_passes)) Opts.DisableCIRPasses = true; + if (Args.hasArg(OPT_disable_cir_verifier)) + Opts.DisableCIRVerifier = true; + if (Args.hasArg(OPT_aux_target_cpu)) Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu)); if (Args.hasArg(OPT_aux_target_feature)) From 9a7dcc8c2f3ccfec4dffb5585f1b9ff281b9aa09 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 3 Aug 2022 12:29:36 -0700 Subject: [PATCH 0543/2301] [CIR] Add cir::CallOp to CIR cir::FuncOp forces adding an equivalent cir::CallOp because the symbol verifier on func::CallOp requires the callee is defined via a func::FuncOp. --- mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 2 +- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 62 +++++++++++++++++ mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 67 +++++++++++++++++++ 3 files changed, 130 insertions(+), 1 deletion(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index aa3eeafde1b0..ed264e5d258e 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -18,9 +18,9 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Dialect.h" #include "mlir/IR/OpDefinition.h" +#include "mlir/Interfaces/FunctionInterfaces.h" #include "mlir/Interfaces/CallInterfaces.h" #include "mlir/Interfaces/ControlFlowInterfaces.h" -#include "mlir/Interfaces/FunctionInterfaces.h" #include "mlir/Interfaces/InferTypeOpInterface.h" #include "mlir/Interfaces/LoopLikeInterface.h" #include "mlir/Interfaces/SideEffectInterfaces.h" diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index cb74cae0ee9a..7be1d50bfbf1 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -1202,4 +1202,66 @@ def FuncOp : CIR_Op<"func", [ let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// CallOp +//===----------------------------------------------------------------------===// + +def CallOp : CIR_Op<"call", + [DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + let summary = "call operation"; + let description = [{ + The `call` operation represents a direct call to a function that is within + the same symbol scope as the call. The operands and result types of the + call must match the specified function type. The callee is encoded as a + symbol reference attribute named "callee". + + Since `mlir::func::CallOp` requires defining symbols to be tied with a + `mlir::func::FuncOp`, a custom `cir.call` is needed to interop with + `cir.func`. For now this is basically a simplified `mlir::func::CallOp`. + + Example: + + ```mlir + %2 = cir.call @my_add(%0, %1) : (f32, f32) -> f32 + ``` + }]; + + let arguments = (ins FlatSymbolRefAttr:$callee, Variadic:$operands); + let results = (outs Variadic); + + let builders = [ + OpBuilder<(ins "FuncOp":$callee, CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(operands); + $_state.addAttribute("callee", SymbolRefAttr::get(callee)); + $_state.addTypes(callee.getFunctionType().getResults()); + }]>, + OpBuilder<(ins "SymbolRefAttr":$callee, "TypeRange":$results, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(operands); + $_state.addAttribute("callee", callee); + $_state.addTypes(results); + }]>, + OpBuilder<(ins "StringAttr":$callee, "TypeRange":$results, + CArg<"ValueRange", "{}">:$operands), [{ + build($_builder, $_state, SymbolRefAttr::get(callee), results, operands); + }]>, + OpBuilder<(ins "StringRef":$callee, "TypeRange":$results, + CArg<"ValueRange", "{}">:$operands), [{ + build($_builder, $_state, StringAttr::get($_builder.getContext(), callee), + results, operands); + }]>]; + + let extraClassDeclaration = [{ + FunctionType getCalleeType(); + + operand_iterator arg_operand_begin() { return operand_begin(); } + operand_iterator arg_operand_end() { return operand_end(); } + }]; + + let assemblyFormat = [{ + $callee `(` $operands `)` attr-dict `:` functional-type($operands, results) + }]; + let hasVerifier = 0; +} + #endif // MLIR_CIR_DIALECT_CIR_OPS diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 21c82f7f21a1..d6dece644e28 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -1208,6 +1208,73 @@ LogicalResult cir::FuncOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// CallOp +//===----------------------------------------------------------------------===// + +/// Get the argument operands to the called function. +OperandRange cir::CallOp::getArgOperands() { + return {arg_operand_begin(), arg_operand_end()}; +} + +MutableOperandRange cir::CallOp::getArgOperandsMutable() { + return getOperandsMutable(); +} + +/// Return the callee of this operation +CallInterfaceCallable cir::CallOp::getCallableForCallee() { + return (*this)->getAttrOfType("callee"); +} + +/// Set the callee for this operation. +void cir::CallOp::setCalleeFromCallable(::mlir::CallInterfaceCallable callee) { + if (auto calling = + (*this)->getAttrOfType(getCalleeAttrName())) + (*this)->setAttr(getCalleeAttrName(), callee.get()); + setOperand(0, callee.get()); +} + +LogicalResult +cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + // Check that the callee attribute was specified. + auto fnAttr = (*this)->getAttrOfType("callee"); + if (!fnAttr) + return emitOpError("requires a 'callee' symbol reference attribute"); + FuncOp fn = + symbolTable.lookupNearestSymbolFrom(*this, fnAttr); + if (!fn) + return emitOpError() << "'" << fnAttr.getValue() + << "' does not reference a valid function"; + + // Verify that the operand and result types match the callee. + auto fnType = fn.getFunctionType(); + if (fnType.getNumInputs() != getNumOperands()) + return emitOpError("incorrect number of operands for callee"); + + for (unsigned i = 0, e = fnType.getNumInputs(); i != e; ++i) + if (getOperand(i).getType() != fnType.getInput(i)) + return emitOpError("operand type mismatch: expected operand type ") + << fnType.getInput(i) << ", but provided " + << getOperand(i).getType() << " for operand number " << i; + + if (fnType.getNumResults() != getNumResults()) + return emitOpError("incorrect number of results for callee"); + + for (unsigned i = 0, e = fnType.getNumResults(); i != e; ++i) + if (getResult(i).getType() != fnType.getResult(i)) { + auto diag = emitOpError("result type mismatch at index ") << i; + diag.attachNote() << " op result types: " << getResultTypes(); + diag.attachNote() << "function result types: " << fnType.getResults(); + return diag; + } + + return success(); +} + +FunctionType CallOp::getCalleeType() { + return FunctionType::get(getContext(), getOperandTypes(), getResultTypes()); +} + //===----------------------------------------------------------------------===// // CIR defined traits //===----------------------------------------------------------------------===// From 863acb4a895eb86d83ac29a36a3870708122e037 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 21 Jul 2022 00:04:50 -0700 Subject: [PATCH 0544/2301] [CIR][CodeGen] Teach codegen to use cir.func and cir.call - Change how insertion points work when adding and codegen'ing functions. This had the nice side effect of fixing an existing issue with lambda.cpp, which got its XFAIL removed. - Update codegen to use cir.func and cir.call. - Update FuncOp to deal with some recent rebase changes (parsing, printing and symbol interface handling). - Teach LowerToLLVM ro rewrite cir::FuncOp to mlir::FuncOp, same for CallOp. Note that this requires changes to RetLowering as well. - Fix tests. Conflicts: clang/lib/CIR/CIRGenCall.cpp clang/lib/CIR/CIRGenExpr.cpp clang/lib/CIR/CIRGenFunction.cpp clang/lib/CIR/CIRGenFunction.h clang/lib/CIR/LowerToLLVM.cpp clang/test/CIR/CodeGen/switch.cpp --- clang/include/clang/CIR/Passes.h | 2 + clang/lib/CIR/CIRGenCXX.cpp | 8 +- clang/lib/CIR/CIRGenCall.cpp | 11 +- clang/lib/CIR/CIRGenCall.h | 11 +- clang/lib/CIR/CIRGenExpr.cpp | 5 +- clang/lib/CIR/CIRGenFunction.cpp | 9 +- clang/lib/CIR/CIRGenFunction.h | 10 +- clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 2 +- clang/lib/CIR/CIRGenModule.cpp | 59 ++++++--- clang/lib/CIR/CIRGenModule.h | 16 ++- clang/lib/CIR/LowerToLLVM.cpp | 118 +++++++++++++++++- clang/test/CIR/CIRToLLVM/memref.cir | 8 +- clang/test/CIR/CodeGen/String.cpp | 4 +- clang/test/CIR/CodeGen/array.cpp | 8 +- clang/test/CIR/CodeGen/basic.c | 6 +- clang/test/CIR/CodeGen/basic.cpp | 14 +-- clang/test/CIR/CodeGen/call.c | 16 +-- clang/test/CIR/CodeGen/ctor.cpp | 24 ++-- clang/test/CIR/CodeGen/globals.cpp | 4 +- clang/test/CIR/CodeGen/goto.cpp | 4 +- clang/test/CIR/CodeGen/lambda.cpp | 4 +- clang/test/CIR/CodeGen/loop-scope.cpp | 4 +- clang/test/CIR/CodeGen/loop.cpp | 14 +-- clang/test/CIR/CodeGen/sourcelocation.cpp | 2 +- clang/test/CIR/CodeGen/struct.c | 2 +- clang/test/CIR/CodeGen/struct.cpp | 34 ++--- clang/test/CIR/CodeGen/switch.cpp | 14 +-- clang/test/CIR/CodeGen/types.c | 40 +++--- clang/test/CIR/IR/array.cir | 4 +- clang/test/CIR/IR/branch.cir | 6 +- clang/test/CIR/IR/cast.cir | 5 +- clang/test/CIR/IR/cir-ops.cir | 16 +-- clang/test/CIR/IR/global.cir | 4 +- clang/test/CIR/IR/invalid.cir | 31 ++--- clang/test/CIR/IR/loop.cir | 14 +-- clang/test/CIR/IR/ptr_stride.cir | 5 +- clang/test/CIR/IR/switch.cir | 2 +- clang/test/CIR/IR/types.cir | 4 +- .../CIR/Transforms/lifetime-loop-valid.cpp | 2 +- clang/test/CIR/Transforms/merge-cleanups.cir | 11 +- clang/test/CIR/cc1.cir | 2 +- clang/test/CIR/cirtool.cir | 4 +- clang/test/CIR/driver.c | 2 +- clang/test/CIR/hello.c | 2 +- clang/tools/cir-tool/cir-tool.cpp | 7 +- mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h | 7 -- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 8 +- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 28 +++-- .../Dialect/CIR/Transforms/LifetimeCheck.cpp | 2 +- .../Dialect/CIR/Transforms/MergeCleanups.cpp | 8 +- 50 files changed, 400 insertions(+), 227 deletions(-) diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index 18d0e9a9e6b1..ba4d79e88ad5 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -18,6 +18,8 @@ #include namespace cir { +/// Create a pass for lowering from `cir.func` to `func.func`. +std::unique_ptr createConvertCIRToFuncPass(); /// Create a pass for lowering from `CIR` operations well as `Affine` and `Std`, /// to the LLVM dialect for codegen. We'll want to separate this eventually into diff --git a/clang/lib/CIR/CIRGenCXX.cpp b/clang/lib/CIR/CIRGenCXX.cpp index 184eb8937ce0..1b5f451ce030 100644 --- a/clang/lib/CIR/CIRGenCXX.cpp +++ b/clang/lib/CIR/CIRGenCXX.cpp @@ -20,7 +20,7 @@ using namespace clang; using namespace cir; -mlir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { +mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { const auto &FnInfo = getTypes().arrangeCXXStructorDeclaration(GD); auto Fn = getAddrOfCXXStructor(GD, &FnInfo, /*FnType=*/nullptr, /*DontDefer=*/true, ForDefinition); @@ -28,11 +28,13 @@ mlir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { // TODO: setFunctionLinkage CIRGenFunction CGF{*this, builder}; CurCGF = &CGF; - CGF.generateCode(GD, Fn, FnInfo); + { + mlir::OpBuilder::InsertionGuard guard(builder); + CGF.generateCode(GD, Fn, FnInfo); + } CurCGF = nullptr; // TODO: setNonAliasAttributes // TODO: SetLLVMFunctionAttributesForDefinition return Fn; } - diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index 8145f5774914..f4b3e986d3f3 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -260,7 +260,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &CallArgs, - mlir::func::CallOp *callOrInvoke, + mlir::cir::CallOp *callOrInvoke, bool IsMustTail, SourceLocation Loc) { // FIXME: We no longer need the types from CallArgs; lift up and simplify @@ -362,7 +362,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, } const CIRGenCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); - mlir::FuncOp CalleePtr = ConcreteCallee.getFunctionPointer(); + auto CalleePtr = ConcreteCallee.getFunctionPointer(); // If we're using inalloca, set up that argument. assert(!ArgMemory.isValid() && "inalloca NYI"); @@ -396,7 +396,12 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // Emit the actual call op. auto callLoc = CGM.getLoc(Loc); - auto theCall = CGM.getBuilder().create(callLoc, CalleePtr, + + // FIXME: Used to be: + // auto theCall = CGM.getBuilder().create( + // callLoc, mlir::SymbolRefAttr::get(CalleePtr), + // CalleePtr.getType().getResults(), CIRCallArgs); + auto theCall = CGM.getBuilder().create(callLoc, CalleePtr, CIRCallArgs); if (callOrInvoke) diff --git a/clang/lib/CIR/CIRGenCall.h b/clang/lib/CIR/CIRGenCall.h index 45bc056535a3..1f29c136270f 100644 --- a/clang/lib/CIR/CIRGenCall.h +++ b/clang/lib/CIR/CIRGenCall.h @@ -22,7 +22,7 @@ #include "llvm/ADT/SmallVector.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" -#include "mlir/Dialect/Func/IR/FuncOps.h" + #include "mlir/IR/BuiltinOps.h" namespace cir { @@ -85,7 +85,8 @@ class CIRGenCallee { // Construct a callee. Call this constructor directly when this isn't a direct // call. - CIRGenCallee(const CIRGenCalleeInfo &abstractInfo, mlir::FuncOp functionPtr) + CIRGenCallee(const CIRGenCalleeInfo &abstractInfo, + mlir::cir::FuncOp functionPtr) : KindOrFunctionPointer(SpecialKind( reinterpret_cast(functionPtr.getAsOpaquePointer()))) { AbstractInfo = abstractInfo; @@ -96,7 +97,7 @@ class CIRGenCallee { } static CIRGenCallee - forDirect(mlir::FuncOp functionPtr, + forDirect(mlir::cir::FuncOp functionPtr, const CIRGenCalleeInfo &abstractInfo = CIRGenCalleeInfo()) { return CIRGenCallee(abstractInfo, functionPtr); } @@ -117,9 +118,9 @@ class CIRGenCallee { /// callee CIRGenCallee prepareConcreteCallee(CIRGenFunction &CGF) const; - mlir::FuncOp getFunctionPointer() const { + mlir::cir::FuncOp getFunctionPointer() const { assert(isOrdinary()); - return mlir::FuncOp::getFromOpaquePointer( + return mlir::cir::FuncOp::getFromOpaquePointer( reinterpret_cast(KindOrFunctionPointer)); } diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 76f400fd12e8..bf74cbd0b713 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -26,7 +26,8 @@ using namespace cir; using namespace clang; using namespace mlir::cir; -static mlir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { +static mlir::cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, + GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); assert(!FD->hasAttr() && "NYI"); @@ -645,7 +646,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, assert(!CGM.getLangOpts().HIP && "HIP NYI"); assert(!MustTailCall && "Must tail NYI"); - mlir::func::CallOp callOP = nullptr; + mlir::cir::CallOp callOP = nullptr; RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, &callOP, E == MustTailCall, E->getExprLoc()); diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 549764377eaf..518de7a548ff 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -354,8 +354,9 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { insertCleanupAndLeave(currBlock); } -mlir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, - const CIRGenFunctionInfo &FnInfo) { +mlir::cir::FuncOp +CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, + const CIRGenFunctionInfo &FnInfo) { assert(Fn && "generating code for a null function"); const auto FD = cast(GD.getDecl()); CurGD = GD; @@ -596,7 +597,7 @@ void CIRGenFunction::buildCXXConstructorCall( const CIRGenFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall( Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); - mlir::func::CallOp C; + mlir::cir::CallOp C; buildCall(Info, Callee, ReturnValueSlot(), Args, &C, false, Loc); assert(CGM.getCodeGenOpts().OptimizationLevel == 0 || @@ -672,7 +673,7 @@ LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Operation *Op, } void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, - mlir::FuncOp Fn, + mlir::cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc, diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 755d1d1c4a83..0518c1253ec4 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -327,7 +327,7 @@ class CIRGenFunction { const clang::Decl *CurCodeDecl; const CIRGenFunctionInfo *CurFnInfo; clang::QualType FnRetTy; - mlir::FuncOp CurFn = nullptr; + mlir::cir::FuncOp CurFn = nullptr; /// CXXStructorImplicitParamDecl - When generating code for a constructor or /// destructor, this will hold the implicit argument (e.g. VTT). @@ -524,7 +524,7 @@ class CIRGenFunction { /// LLVM arguments and the types they were derived from. RValue buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, - const CallArgList &Args, mlir::func::CallOp *callOrInvoke, + const CallArgList &Args, mlir::cir::CallOp *callOrInvoke, bool IsMustTail, clang::SourceLocation Loc); RValue buildCall(clang::QualType FnType, const CIRGenCallee &Callee, const clang::CallExpr *E, ReturnValueSlot returnValue, @@ -649,8 +649,8 @@ class CIRGenFunction { mlir::Type condType, mlir::cir::CaseAttr &caseEntry); - mlir::FuncOp generateCode(clang::GlobalDecl GD, mlir::FuncOp Fn, - const CIRGenFunctionInfo &FnInfo); + mlir::cir::FuncOp generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, + const CIRGenFunctionInfo &FnInfo); clang::QualType buildFunctionArgList(clang::GlobalDecl GD, FunctionArgList &Args); @@ -839,7 +839,7 @@ class CIRGenFunction { /// \param Loc The location to be associated with the function. /// \param StartLoc The location of the function body. void StartFunction(clang::GlobalDecl GD, clang::QualType RetTy, - mlir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, + mlir::cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, const FunctionArgList &Args, clang::SourceLocation Loc, clang::SourceLocation StartLoc); diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index 65a82b136732..0b3e594af763 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -322,7 +322,7 @@ void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { // In such cases we should try to emit the deleting dtor as an alias to the // selected 'operator delete'. - mlir::FuncOp Fn = CGM.codegenCXXStructor(GD); + auto Fn = CGM.codegenCXXStructor(GD); if (CIRGenType == StructorCIRGen::COMDAT) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 5af2ff7ae42a..8bb3251ef2ab 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -331,7 +331,7 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, Op = GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/true, ForDefinition); - auto Fn = cast(Op); + auto Fn = cast(Op); // Already emitted. if (!Fn.isDeclaration()) return; @@ -344,7 +344,10 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, CIRGenFunction CGF{*this, builder}; CurCGF = &CGF; - CGF.generateCode(GD, Fn, FI); + { + mlir::OpBuilder::InsertionGuard guard(builder); + CGF.generateCode(GD, Fn, FI); + } CurCGF = nullptr; // TODO: setNonAliasAttributes @@ -1296,7 +1299,7 @@ bool CIRGenModule::verifyModule() { return mlir::verify(theModule).succeeded(); } -std::pair +std::pair CIRGenModule::getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CIRGenFunctionInfo *FnInfo, mlir::FunctionType FnType, @@ -1319,10 +1322,10 @@ CIRGenModule::getAddrAndTypeOfCXXStructor(GlobalDecl GD, return {FnType, Fn}; } -mlir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, - mlir::Type Ty, bool ForVTable, - bool DontDefer, - ForDefinition_t IsForDefinition) { +mlir::cir::FuncOp +CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, + bool ForVTable, bool DontDefer, + ForDefinition_t IsForDefinition) { assert(!ForVTable && "NYI"); assert(!cast(GD.getDecl())->isConsteval() && @@ -1436,14 +1439,39 @@ bool CIRGenModule::lookupRepresentativeDecl(StringRef MangledName, return true; } -/// GetOrCreateCIRFunction - If the specified mangled name is not in the module, +mlir::cir::FuncOp CIRGenModule::createCIRFunction(mlir::Location loc, + StringRef name, + mlir::FunctionType Ty) { + // At the point we need to create the function, the insertion point + // could be anywhere (e.g. callsite). Do not rely on whatever it might + // be, properly save, find the appropriate place and restore. + FuncOp f; + { + mlir::OpBuilder::InsertionGuard guard(builder); + + // Some global emissions are triggered while emitting a function, e.g. + // void s() { x.method() } + // + // Be sure to insert a new function before a current one. + auto *curCGF = getCurrCIRGenFun(); + if (curCGF) + builder.setInsertionPoint(curCGF->CurFn.getOperation()); + + f = builder.create(loc, name, Ty); + if (!curCGF) + theModule.push_back(f); + } + return f; +} + +/// If the specified mangled name is not in the module, /// create and return a CIR Function with the specified type. If there is /// something in the module with the specified name, return it potentially /// bitcasted to the right type. /// /// If D is non-null, it specifies a decl that corresponded to this. This is /// used to set the attributes on the function when it is first created. -mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( +mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( StringRef MangledName, mlir::Type Ty, GlobalDecl GD, bool ForVTable, bool DontDefer, bool IsThunk, ForDefinition_t IsForDefinition) { assert(!ForVTable && "NYI"); @@ -1463,7 +1491,7 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // Lookup the entry, lazily creating it if necessary. mlir::Operation *Entry = getGlobalValue(MangledName); if (Entry) { - assert(isa(Entry) && + assert(isa(Entry) && "not implemented, only supports FuncOp for now"); if (WeakRefReferences.erase(Entry)) { @@ -1478,7 +1506,7 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // If there are two attempts to define the same mangled name, issue an // error. - auto Fn = cast(Entry); + auto Fn = cast(Entry); if (IsForDefinition && Fn && !Fn.isDeclaration()) { GlobalDecl OtherGD; // CHeck that GD is not yet in DiagnosedConflictingDefinitions is required @@ -1521,8 +1549,7 @@ mlir::FuncOp CIRGenModule::GetOrCreateCIRFunction( auto fnLoc = getLoc(FD->getSourceRange()); // TODO: CodeGen includeds the linkage (ExternalLinkage) and only passes the // mangledname if Entry is nullptr - mlir::FuncOp F = mlir::FuncOp::create(fnLoc, MangledName, FTy); - theModule.push_back(F); + auto F = createCIRFunction(fnLoc, MangledName, FTy); if (Entry) { llvm_unreachable("NYI"); @@ -1657,7 +1684,7 @@ void CIRGenModule::buildDeferred() { // Make sure getGlobalValue returned non-null. assert(Op); - assert(isa(Op) && + assert(isa(Op) && "not implemented, only supports FuncOp for now"); // Check to see if we've already emitted this. This is necessary for a @@ -1666,7 +1693,7 @@ void CIRGenModule::buildDeferred() { // ways (e.g. by an extern inline function acquiring a strong function // redefinition). Just ignore those cases. // TODO: Not sure what to map this to for MLIR - if (auto Fn = cast(Op)) + if (auto Fn = cast(Op)) if (!Fn.isDeclaration()) continue; @@ -1777,7 +1804,7 @@ void CIRGenModule::maybeSetTrivialComdat(const Decl &D, mlir::Operation *Op) { assert(!UnimplementedFeature::setComdat() && "NYI"); } -bool CIRGenModule::isInNoSanitizeList(SanitizerMask Kind, mlir::FuncOp Fn, +bool CIRGenModule::isInNoSanitizeList(SanitizerMask Kind, mlir::cir::FuncOp Fn, SourceLocation Loc) const { const auto &NoSanitizeL = getASTContext().getNoSanitizeList(); // NoSanitize by function name. diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 7eac8a6e8f55..29dab09b25a8 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -236,7 +236,7 @@ class CIRGenModule { LValueBaseInfo *BaseInfo = nullptr, bool forPointeeType = false); - mlir::FuncOp getAddrOfCXXStructor( + mlir::cir::FuncOp getAddrOfCXXStructor( clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, mlir::FunctionType FnType = nullptr, bool DontDefer = false, ForDefinition_t IsForDefinition = NotForDefinition) { @@ -262,7 +262,7 @@ class CIRGenModule { DeferredDeclsToEmit.emplace_back(GD); } - std::pair getAddrAndTypeOfCXXStructor( + std::pair getAddrAndTypeOfCXXStructor( clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, mlir::FunctionType FnType = nullptr, bool Dontdefer = false, ForDefinition_t IsForDefinition = NotForDefinition); @@ -286,7 +286,7 @@ class CIRGenModule { bool MayDropFunctionReturn(const clang::ASTContext &Context, clang::QualType ReturnType); - bool isInNoSanitizeList(clang::SanitizerMask Kind, mlir::FuncOp Fn, + bool isInNoSanitizeList(clang::SanitizerMask Kind, mlir::cir::FuncOp Fn, clang::SourceLocation) const; /// Determine whether the definition can be emitted eagerly, or should be @@ -300,7 +300,7 @@ class CIRGenModule { /// Return the address of the given function. If Ty is non-null, then this /// function will use the specified type if it has to create it. // TODO: this is a bit weird as `GetAddr` given we give back a FuncOp? - mlir::FuncOp + mlir::cir::FuncOp GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty = nullptr, bool ForVTable = false, bool Dontdefer = false, ForDefinition_t IsForDefinition = NotForDefinition); @@ -339,7 +339,7 @@ class CIRGenModule { // Produce code for this constructor/destructor. This method doesn't try to // apply any ABI rules about which other constructors/destructors are needed // or if they are alias to each other. - mlir::FuncOp codegenCXXStructor(clang::GlobalDecl GD); + mlir::cir::FuncOp codegenCXXStructor(clang::GlobalDecl GD); bool lookupRepresentativeDecl(llvm::StringRef MangledName, clang::GlobalDecl &Result) const; @@ -363,11 +363,15 @@ class CIRGenModule { private: // TODO: CodeGen also passes an AttributeList here. We'll have to match that // in CIR - mlir::FuncOp + mlir::cir::FuncOp GetOrCreateCIRFunction(llvm::StringRef MangledName, mlir::Type Ty, clang::GlobalDecl D, bool ForVTable, bool DontDefer = false, bool IsThunk = false, ForDefinition_t IsForDefinition = NotForDefinition); + // Effectively create the CIR instruction, properly handling insertion + // points. + mlir::cir::FuncOp createCIRFunction(mlir::Location loc, StringRef name, + mlir::FunctionType Ty); // An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector MangledDeclNames; diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index d640b30ef08f..54d3ce706064 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -26,6 +26,7 @@ #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" +#include "mlir/IR/BuiltinDialect.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" @@ -63,6 +64,20 @@ struct ConvertCIRToMemRefPass virtual StringRef getArgument() const override { return "cir-to-memref"; } }; +struct ConvertCIRToFuncPass + : public mlir::PassWrapper> { + void getDependentDialects(mlir::DialectRegistry ®istry) const override { + // FIXME: after we rebase to more recent changes, this should be + // mlir::FuncDialect instead. + registry.insert(); + } + void runOnOperation() final; + + virtual StringRef getArgument() const override { return "cir-to-func"; } +}; + class CIRReturnLowering : public mlir::OpRewritePattern { public: using OpRewritePattern::OpRewritePattern; @@ -76,6 +91,20 @@ class CIRReturnLowering : public mlir::OpRewritePattern { } }; +class CIRCallLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CallOp op, + mlir::PatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, mlir::SymbolRefAttr::get(op), op.getResultTypes(), + op.getArgOperands()); + return mlir::LogicalResult::success(); + } +}; + class CIRAllocaLowering : public mlir::OpRewritePattern { public: using OpRewritePattern::OpRewritePattern; @@ -133,9 +162,46 @@ class CIRConstantLowering } }; +class CIRFuncLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::FuncOp op, + mlir::PatternRewriter &rewriter) const override { + + auto fnType = op.getFunctionType(); + mlir::TypeConverter::SignatureConversion signatureConversion( + fnType.getNumInputs()); + + for (const auto &argType : enumerate(fnType.getInputs())) { + auto convertedType = argType.value(); + if (!convertedType) + return mlir::failure(); + signatureConversion.addInputs(argType.index(), convertedType); + } + + mlir::Type resultType; + if (fnType.getNumResults() == 1) { + resultType = fnType.getResult(0); + if (!resultType) + return mlir::failure(); + } + + auto fn = rewriter.create( + op.getLoc(), op.getName(), + rewriter.getFunctionType(signatureConversion.getConvertedTypes(), + resultType ? mlir::TypeRange(resultType) + : mlir::TypeRange())); + + rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); + return mlir::LogicalResult::success(); + } +}; + void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { patterns.add(patterns.getContext()); + CIRConstantLowering>(patterns.getContext()); } void ConvertCIRToLLVMPass::runOnOperation() { @@ -162,7 +228,7 @@ void ConvertCIRToMemRefPass::runOnOperation() { // TODO: Should this be a wholesale conversion? It's a bit ambiguous on // whether we should have micro-conversions that do the minimal amount of work // or macro conversions that entiirely remove a dialect. - target.addLegalOp(); + target.addLegalOp(); target .addLegalDialect(); @@ -177,18 +243,58 @@ void ConvertCIRToMemRefPass::runOnOperation() { signalPassFailure(); } +void ConvertCIRToFuncPass::runOnOperation() { + // End goal here is to legalize to mlir::FuncOp (builtin dialect) and + // mlir::ReturnOp (standard dialect). This is done in two steps, becase + // cir.return is a cir.func child it will be ignored in the first conversion. + // + // TODO: is there a better way to handle this? If such handling is decoupled + // from the same pass the verifier won't accept the mix between mlir::FuncOp + // and mlir::cir::ReturnOp. + + // Convert cir.func to builtin.func + mlir::ConversionTarget fnTarget(getContext()); + fnTarget.addLegalOp(); + fnTarget.addIllegalOp(); + + mlir::RewritePatternSet fnPatterns(&getContext()); + fnPatterns.add(fnPatterns.getContext()); + + auto module = getOperation(); + if (failed(applyPartialConversion(module, fnTarget, std::move(fnPatterns)))) + signalPassFailure(); + + // Convert cir.return to std.return, cir.call to std.call + mlir::ConversionTarget retTarget(getContext()); + retTarget + .addLegalOp(); + retTarget.addIllegalOp(); + + mlir::RewritePatternSet retPatterns(&getContext()); + retPatterns.add(retPatterns.getContext()); + + if (failed(applyPartialConversion(module, retTarget, std::move(retPatterns)))) + signalPassFailure(); +} + std::unique_ptr lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, std::unique_ptr mlirCtx, LLVMContext &llvmCtx) { mlir::PassManager pm(mlirCtx.get()); + pm.addPass(createConvertCIRToFuncPass()); pm.addPass(createConvertCIRToMemRefPass()); pm.addPass(createConvertCIRToLLVMPass()); auto result = !mlir::failed(pm.run(theModule)); if (!result) - report_fatal_error("The pass manager failed to lower CIR to llvm IR!"); + report_fatal_error( + "The pass manager failed to lower CIR to LLVMIR dialect!"); + + // Now that we ran all the lowering passes, verify the final output. + if (theModule.verify().failed()) + report_fatal_error("Verification of the final LLVMIR dialect failed!"); mlir::registerLLVMDialectTranslation(*mlirCtx); @@ -196,7 +302,7 @@ lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, auto llvmModule = mlir::translateModuleToLLVMIR(theModule, llvmCtx); if (!llvmModule) - report_fatal_error("Lowering from llvm dialect to llvm IR failed!"); + report_fatal_error("Lowering from LLVMIR dialect to llvm IR failed!"); return llvmModule; } @@ -209,4 +315,8 @@ std::unique_ptr createConvertCIRToMemRefPass() { return std::make_unique(); } +std::unique_ptr createConvertCIRToFuncPass() { + return std::make_unique(); +} + } // namespace cir diff --git a/clang/test/CIR/CIRToLLVM/memref.cir b/clang/test/CIR/CIRToLLVM/memref.cir index c49793d4831d..5431fb20967a 100644 --- a/clang/test/CIR/CIRToLLVM/memref.cir +++ b/clang/test/CIR/CIRToLLVM/memref.cir @@ -1,9 +1,9 @@ -// RUN: cir-tool %s -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM // XFAIL: * module { - func.func @foo() -> i32 { + cir.func @foo() -> i32 { %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} %1 = cir.cst(1 : i32) : i32 cir.store %1, %0 : i32, cir.ptr @@ -13,7 +13,7 @@ module { } // MLIR: module { -// MLIR-NEXT: func.func @foo() -> i32 { +// MLIR-NEXT: func @foo() -> i32 { // MLIR-NEXT: %0 = memref.alloca() {alignment = 4 : i64} : memref // MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 // MLIR-NEXT: memref.store %c1_i32, %0[] : memref diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index 1cdfc446d260..b7a373340073 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -15,7 +15,7 @@ void test() { String s2{1}; } -// CHECK: func @_ZN6StringC2Ev +// CHECK: cir.func @_ZN6StringC2Ev // CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 @@ -28,7 +28,7 @@ void test() { // CHECK-NEXT: cir.store %6, %4 : i64, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: func @_ZN6StringC2Ei +// CHECK: cir.func @_ZN6StringC2Ei // CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["size", paraminit] // CHECK-NEXT: cir.store %arg0, %0 diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 01dcf9b53cb2..394e2ce3d081 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -6,7 +6,7 @@ void a0() { int a[10]; } -// CHECK: func @_Z2a0v() { +// CHECK: cir.func @_Z2a0v() { // CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} void a1() { @@ -14,7 +14,7 @@ void a1() { a[0] = 1; } -// CHECK: func @_Z2a1v() { +// CHECK: cir.func @_Z2a1v() { // CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} // CHECK-NEXT: %1 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %2 = cir.cst(0 : i32) : i32 @@ -27,7 +27,7 @@ int *a2() { return &a[0]; } -// CHECK: func @_Z2a2v() -> !cir.ptr { +// CHECK: cir.func @_Z2a2v() -> !cir.ptr { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} // CHECK-NEXT: %2 = cir.cst(0 : i32) : i32 @@ -42,7 +42,7 @@ void local_stringlit() { } // CHECK: cir.global "private" constant internal @".str" = #cir.cst_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} loc(#loc17) -// CHECK: func @_Z15local_stringlitv() { +// CHECK: cir.func @_Z15local_stringlitv() { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", cinit] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 16d2a173de6a..2ed2c007561b 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -10,7 +10,7 @@ int foo(int i) { } // CHECK: module { -// CHECK-NEXT: func @foo(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK-NEXT: cir.func @foo(%arg0: i32 loc({{.*}})) -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", paraminit] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr @@ -22,7 +22,7 @@ int foo(int i) { int f2() { return 3; } -// CHECK: func @f2() -> i32 { +// CHECK: cir.func @f2() -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr @@ -34,7 +34,7 @@ int f3() { return i; } -// CHECK: func @f3() -> i32 { +// CHECK: cir.func @f3() -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.cst(3 : i32) : i32 diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index f85e88140ce6..96608f18eafb 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -7,7 +7,7 @@ int *p0() { return p; } -// CHECK: func @_Z2p0v() -> !cir.ptr { +// CHECK: cir.func @_Z2p0v() -> !cir.ptr { // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] // CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > @@ -18,7 +18,7 @@ int *p1() { return p; } -// CHECK: func @_Z2p1v() -> !cir.ptr { +// CHECK: cir.func @_Z2p1v() -> !cir.ptr { // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", uninitialized] // CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > @@ -34,7 +34,7 @@ int *p2() { return p; } -// CHECK: func @_Z2p2v() -> !cir.ptr { +// CHECK: cir.func @_Z2p2v() -> !cir.ptr { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] {alignment = 8 : i64} // CHECK-NEXT: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr @@ -58,13 +58,13 @@ int *p2() { void b0() { bool x = true, y = false; } -// CHECK: func @_Z2b0v() { +// CHECK: cir.func @_Z2b0v() { // CHECK: %2 = cir.cst(true) : !cir.bool // CHECK: %3 = cir.cst(false) : !cir.bool void b1(int a) { bool b = a; } -// CHECK: func @_Z2b1i(%arg0: i32 loc({{.*}})) { +// CHECK: cir.func @_Z2b1i(%arg0: i32 loc({{.*}})) { // CHECK: %2 = cir.load %0 : cir.ptr , i32 // CHECK: %3 = cir.cast(int_to_bool, %2 : i32), !cir.bool // CHECK: cir.store %3, %1 : !cir.bool, cir.ptr @@ -78,7 +78,7 @@ void if0(int a) { } } -// CHECK: func @_Z3if0i(%arg0: i32 loc({{.*}})) +// CHECK: cir.func @_Z3if0i(%arg0: i32 loc({{.*}})) // CHECK: cir.scope { // CHECK: %3 = cir.load %0 : cir.ptr , i32 // CHECK: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool @@ -106,7 +106,7 @@ void if1(int a, bool b, bool c) { } } -// CHECK: func @_Z3if1ibb(%arg0: i32 loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) +// CHECK: cir.func @_Z3if1ibb(%arg0: i32 loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) // CHECK: cir.scope { // CHECK: %5 = cir.load %0 : cir.ptr , i32 // CHECK: %6 = cir.cast(int_to_bool, %5 : i32), !cir.bool diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index 3d16e6e0e605..8f7ba8f782e1 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -16,10 +16,10 @@ void d(void) { } // CHECK: module { -// CHECK: func @a() { +// CHECK: cir.func @a() { // CHECK: cir.return // CHECK: } -// CHECK: func @b(%arg0: i32 {{.*}}, %arg1: i32 {{.*}}) -> i32 { +// CHECK: cir.func @b(%arg0: i32 {{.*}}, %arg1: i32 {{.*}}) -> i32 { // CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] // CHECK: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] // CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] @@ -32,7 +32,7 @@ void d(void) { // CHECK: %6 = cir.load %2 : cir.ptr , i32 // CHECK: cir.return %6 // CHECK: } -// CHECK: func @c(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { +// CHECK: cir.func @c(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { // CHECK: %0 = cir.alloca f64, cir.ptr , ["a", paraminit] // CHECK: %1 = cir.alloca f64, cir.ptr , ["b", paraminit] // CHECK: %2 = cir.alloca f64, cir.ptr , ["__retval", uninitialized] @@ -45,7 +45,7 @@ void d(void) { // CHECK: %6 = cir.load %2 : cir.ptr , f64 // CHECK: cir.return %6 : f64 // CHECK: } -// CHECK: func @d() { +// CHECK: cir.func @d() { // CHECK: call @a() : () -> () // CHECK: %0 = cir.cst(0 : i32) : i32 // CHECK: %1 = cir.cst(1 : i32) : i32 @@ -54,10 +54,10 @@ void d(void) { // CHECK: } // // CXX: module { -// CXX-NEXT: func @_Z1av() { +// CXX-NEXT: cir.func @_Z1av() { // CXX-NEXT: cir.return // CXX-NEXT: } -// CXX-NEXT: func @_Z1bii(%arg0: i32 {{.*}}, %arg1: i32 {{.*}}) -> i32 { +// CXX-NEXT: cir.func @_Z1bii(%arg0: i32 {{.*}}, %arg1: i32 {{.*}}) -> i32 { // CXX-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] // CXX-NEXT: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] // CXX-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] @@ -70,7 +70,7 @@ void d(void) { // CXX-NEXT: %6 = cir.load %2 : cir.ptr , i32 // CXX-NEXT: cir.return %6 // CXX-NEXT: } -// CXX-NEXT: func @_Z1cdd(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { +// CXX-NEXT: cir.func @_Z1cdd(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { // CXX-NEXT: %0 = cir.alloca f64, cir.ptr , ["a", paraminit] // CXX-NEXT: %1 = cir.alloca f64, cir.ptr , ["b", paraminit] // CXX-NEXT: %2 = cir.alloca f64, cir.ptr , ["__retval", uninitialized] @@ -83,7 +83,7 @@ void d(void) { // CXX-NEXT: %6 = cir.load %2 : cir.ptr , f64 // CXX-NEXT: cir.return %6 : f64 // CXX-NEXT: } -// CXX-NEXT: func @_Z1dv() { +// CXX-NEXT: cir.func @_Z1dv() { // CXX-NEXT: call @_Z1av() : () -> () // CXX-NEXT: %0 = cir.cst(0 : i32) : i32 // CXX-NEXT: %1 = cir.cst(1 : i32) : i32 diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index ba795bfd0f13..1806b0cf44aa 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -11,24 +11,22 @@ void baz() { Struk s; } -// CHECK: !_22struct2EStruk22 = !cir.struct<"struct.Struk", i32> -// CHECK-NEXT: module { -// CHECK-NEXT: func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !_22struct2EStruk22, cir.ptr , ["s", uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () -// CHECK-NEXT: cir.return -// CHECK-NEXT: } -// CHECK-NEXT: func @_ZN5StrukC1Ev(%arg0: !cir.ptr +// CHECK: !_22struct2EStruk22 = !cir.struct<"struct.Struk", i32> + +// CHECK: cir.func @_ZN5StrukC2Ev(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () // CHECK-NEXT: cir.return -// CHECK-NEXT: } -// CHECK-NEXT: func @_ZN5StrukC2Ev(%arg0: !cir.ptr + +// CHECK: cir.func @_ZN5StrukC1Ev(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () +// CHECK-NEXT: cir.return + +// CHECK: cir.func @_Z3bazv() +// CHECK-NEXT: %0 = cir.alloca !_22struct2EStruk22, cir.ptr , ["s", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: cir.call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () // CHECK-NEXT: cir.return -// CHECK-NEXT: } -// CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 94c2d12b13ac..e08c18bda949 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -40,13 +40,13 @@ void use_global_string() { // CHECK-NEXT: cir.global external @s2 = @".str": !cir.ptr -// CHECK: func @_Z10use_globalv() { +// CHECK: cir.func @_Z10use_globalv() { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["li", cinit] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.get_global @a : cir.ptr // CHECK-NEXT: %2 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: cir.store %2, %0 : i32, cir.ptr -// CHECK: func @_Z17use_global_stringv() { +// CHECK: cir.func @_Z17use_global_stringv() { // CHECK-NEXT: %0 = cir.alloca i8, cir.ptr , ["c", cinit] {alignment = 1 : i64} // CHECK-NEXT: %1 = cir.get_global @s2 : cir.ptr > // CHECK-NEXT: %2 = cir.load %1 : cir.ptr >, !cir.ptr diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 1252e02ff2b0..76b24bd0b7eb 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -9,7 +9,7 @@ void g0(int a) { b = b + 2; } -// CHECK: func @_Z2g0i +// CHECK: cir.func @_Z2g0i // CHECK-NEXT %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} // CHECK-NEXT %1 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} // CHECK-NEXT cir.store %arg0, %0 : i32, cir.ptr @@ -37,7 +37,7 @@ void g1(int a) { } // Make sure alloca for "y" shows up in the entry block -// CHECK: func @_Z2g1i(%arg0: i32 +// CHECK: cir.func @_Z2g1i(%arg0: i32 // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["y", cinit] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index bba4d1618c8a..3a779071ab03 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -6,7 +6,7 @@ void fn() { auto a = [](){}; } -// CHECK: !22class2Eanon22 = type !cir.struct<"class.anon", i8> +// CHECK: !22class2Eanon22 = !cir.struct<"class.anon", i8> // CHECK-NEXT: module -// CHECK-NEXT: func @_Z2fnv() +// CHECK-NEXT: cir.func @_Z2fnv() // CHECK-NEXT: %0 = cir.alloca !22class2Eanon22, cir.ptr , ["a", uninitialized] diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index 6e4346f0302a..297627fd791a 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -9,7 +9,7 @@ void l0() { } } -// CPPSCOPE: func @_Z2l0v() { +// CPPSCOPE: cir.func @_Z2l0v() { // CPPSCOPE-NEXT: cir.scope { // CPPSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} // CPPSCOPE-NEXT: %1 = cir.alloca i32, cir.ptr , ["j", cinit] {alignment = 4 : i64} @@ -17,7 +17,7 @@ void l0() { // CPPSCOPE-NEXT: cir.store %2, %0 : i32, cir.ptr // CPPSCOPE-NEXT: cir.loop for(cond : { -// CSCOPE: func @l0() { +// CSCOPE: cir.func @l0() { // CSCOPE-NEXT: cir.scope { // CSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} // CSCOPE-NEXT: %1 = cir.cst(0 : i32) : i32 diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index cab759221a28..9f80af591a1c 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -6,7 +6,7 @@ void l0() { } } -// CHECK: func @_Z2l0v +// CHECK: cir.func @_Z2l0v // CHECK: cir.loop for(cond : { // CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { @@ -22,7 +22,7 @@ void l1() { } } -// CHECK: func @_Z2l1v +// CHECK: cir.func @_Z2l1v // CHECK: cir.loop for(cond : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 @@ -59,7 +59,7 @@ void l2(bool cond) { } } -// CHECK: func @_Z2l2b +// CHECK: cir.func @_Z2l2b // CHECK: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool @@ -124,7 +124,7 @@ void l3(bool cond) { } while (1); } -// CHECK: func @_Z2l3b +// CHECK: cir.func @_Z2l3b // CHECK: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool @@ -186,7 +186,7 @@ void l4() { } } -// CHECK: func @_Z2l4v +// CHECK: cir.func @_Z2l4v // CHECK: cir.loop while(cond : { // CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { @@ -210,7 +210,7 @@ void l5() { } while (0); } -// CHECK: func @_Z2l5v() { +// CHECK: cir.func @_Z2l5v() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %0 = cir.cst(0 : i32) : i32 @@ -235,7 +235,7 @@ void l6() { } } -// CHECK: func @_Z2l6v() { +// CHECK: cir.func @_Z2l6v() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: cir.yield continue diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index e6920c72b5ea..0afb0066857d 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -14,7 +14,7 @@ int s0(int a, int b) { // CHECK: #loc2 = loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]) // CHECK: #loc3 = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) // CHECK: module { -// CHECK: func @_Z2s0ii(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { +// CHECK: cir.func @_Z2s0ii(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { // CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} loc(#loc2) // CHECK: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] {alignment = 4 : i64} loc(#loc3) // CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} loc(#loc4) diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index d87d039689da..7e318411de37 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -20,7 +20,7 @@ void baz() { // CHECK: !_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> // CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !_22struct2EBar22> // CHECK-NEXT: module { -// CHECK-NEXT: func @baz() { +// CHECK-NEXT: cir.func @baz() { // CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 0d7fe4ec7189..d4693580ab8c 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -26,25 +26,15 @@ void baz() { // CHECK: !_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> // CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>> -// CHECK: func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["result", cinit] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () -// CHECK-NEXT: %3 = cir.cst(4 : i32) : i32 -// CHECK-NEXT: call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, i32) -> () -// CHECK-NEXT: %4 = cir.cst(4 : i32) : i32 -// CHECK-NEXT: %5 = call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, i32) -> i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr -// CHECK-NEXT: cir.return -// CHECK-NEXT: } -// CHECK: func @_ZN3Bar6methodEv(%arg0: !cir.ptr + +// CHECK: cir.func @_ZN3Bar6methodEv(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: func @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 + +// CHECK: cir.func @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > @@ -52,7 +42,8 @@ void baz() { // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: func @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 + +// CHECK: cir.func @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} @@ -64,3 +55,16 @@ void baz() { // CHECK-NEXT: %5 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: cir.return %5 // CHECK-NEXT: } + +// CHECK: cir.func @_Z3bazv() +// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["result", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () +// CHECK-NEXT: %3 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, i32) -> () +// CHECK-NEXT: %4 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, i32) -> i32 +// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index ba32de695205..004bbe1ee129 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -16,7 +16,7 @@ void sw1(int a) { } } -// CHECK: func @_Z3sw1i +// CHECK: cir.func @_Z3sw1i // CHECK: cir.switch (%3 : i32) [ // CHECK-NEXT: case (equal, 0 : i32) { // CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 @@ -52,7 +52,7 @@ void sw2(int a) { } } -// CHECK: func @_Z3sw2i +// CHECK: cir.func @_Z3sw2i // CHECK: cir.scope { // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["yolo", cinit] // CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["fomo", cinit] @@ -68,7 +68,7 @@ void sw3(int a) { } } -// CHECK: func @_Z3sw3i +// CHECK: cir.func @_Z3sw3i // CHECK: cir.scope { // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.switch (%1 : i32) [ @@ -88,7 +88,7 @@ int sw4(int a) { return 0; } -// CHECK: func @_Z3sw4i +// CHECK: cir.func @_Z3sw4i // CHECK: cir.switch (%4 : i32) [ // CHECK-NEXT: case (equal, 42 : i32) { // CHECK-NEXT: cir.scope { @@ -113,7 +113,7 @@ void sw5(int a) { } } -// CHECK: func @_Z3sw5i +// CHECK: cir.func @_Z3sw5i // CHECK: cir.switch (%1 : i32) [ // CHECK-NEXT: case (equal, 1 : i32) { // CHECK-NEXT: cir.yield fallthrough @@ -131,7 +131,7 @@ void sw6(int a) { } } -// CHECK: func @_Z3sw6i +// CHECK: cir.func @_Z3sw6i // CHECK: cir.switch (%1 : i32) [ // CHECK-NEXT: case (anyof, [0, 1, 2] : i32) { // CHECK-NEXT: cir.yield break @@ -153,7 +153,7 @@ void sw7(int a) { } } -// CHECK: func @_Z3sw7i +// CHECK: cir.func @_Z3sw7i // CHECK: case (anyof, [0, 1, 2] : i32) { // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index e536c9b423f9..c5a535b7f038 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -22,23 +22,23 @@ void t8() {} bool t9(bool b) { return b; } #endif -// CHECK: func @t0(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK: func @t1(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK: func @t2(%arg0: i8 loc({{.*}})) -> i8 { -// CHECK: func @t3(%arg0: i8 loc({{.*}})) -> i8 { -// CHECK: func @t4(%arg0: i16 loc({{.*}})) -> i16 { -// CHECK: func @t5(%arg0: i16 loc({{.*}})) -> i16 { -// CHECK: func @t6(%arg0: f32 loc({{.*}})) -> f32 { -// CHECK: func @t7(%arg0: f64 loc({{.*}})) -> f64 { -// CHECK: func @t8() { - -// CHECK-CPP: func @_Z2t0i(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK-CPP: func @_Z2t1j(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK-CPP: func @_Z2t2c(%arg0: i8 loc({{.*}})) -> i8 { -// CHECK-CPP: func @_Z2t3h(%arg0: i8 loc({{.*}})) -> i8 { -// CHECK-CPP: func @_Z2t4s(%arg0: i16 loc({{.*}})) -> i16 { -// CHECK-CPP: func @_Z2t5t(%arg0: i16 loc({{.*}})) -> i16 { -// CHECK-CPP: func @_Z2t6f(%arg0: f32 loc({{.*}})) -> f32 { -// CHECK-CPP: func @_Z2t7d(%arg0: f64 loc({{.*}})) -> f64 { -// CHECK-CPP: func @_Z2t8v() { -// CHECK-CPP: func @_Z2t9b(%arg0: !cir.bool loc({{.*}})) -> !cir.bool { +// CHECK: cir.func @t0(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK: cir.func @t1(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK: cir.func @t2(%arg0: i8 loc({{.*}})) -> i8 { +// CHECK: cir.func @t3(%arg0: i8 loc({{.*}})) -> i8 { +// CHECK: cir.func @t4(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK: cir.func @t5(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK: cir.func @t6(%arg0: f32 loc({{.*}})) -> f32 { +// CHECK: cir.func @t7(%arg0: f64 loc({{.*}})) -> f64 { +// CHECK: cir.func @t8() { + +// CHECK-CPP: cir.func @_Z2t0i(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK-CPP: cir.func @_Z2t1j(%arg0: i32 loc({{.*}})) -> i32 { +// CHECK-CPP: cir.func @_Z2t2c(%arg0: i8 loc({{.*}})) -> i8 { +// CHECK-CPP: cir.func @_Z2t3h(%arg0: i8 loc({{.*}})) -> i8 { +// CHECK-CPP: cir.func @_Z2t4s(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK-CPP: cir.func @_Z2t5t(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK-CPP: cir.func @_Z2t6f(%arg0: f32 loc({{.*}})) -> f32 { +// CHECK-CPP: cir.func @_Z2t7d(%arg0: f64 loc({{.*}})) -> f64 { +// CHECK-CPP: cir.func @_Z2t8v() { +// CHECK-CPP: cir.func @_Z2t9b(%arg0: !cir.bool loc({{.*}})) -> !cir.bool { diff --git a/clang/test/CIR/IR/array.cir b/clang/test/CIR/IR/array.cir index 5d7ebae40c60..182082b9ba82 100644 --- a/clang/test/CIR/IR/array.cir +++ b/clang/test/CIR/IR/array.cir @@ -1,11 +1,11 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s module { - func.func @arrays() { + cir.func @arrays() { %0 = cir.alloca !cir.array, cir.ptr>, ["x", paraminit] cir.return } } -// CHECK: func @arrays() { +// CHECK: cir.func @arrays() { // CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] diff --git a/clang/test/CIR/IR/branch.cir b/clang/test/CIR/IR/branch.cir index 38cb0b4cbf5c..805f630e420b 100644 --- a/clang/test/CIR/IR/branch.cir +++ b/clang/test/CIR/IR/branch.cir @@ -1,7 +1,7 @@ // RUN: cir-tool %s | FileCheck %s -func.func @b0() { +cir.func @b0() { cir.scope { cir.loop while(cond : { %0 = cir.cst(true) : !cir.bool @@ -21,7 +21,7 @@ func.func @b0() { cir.return } -// CHECK: func @b0 +// CHECK: cir.func @b0 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %0 = cir.cst(true) : !cir.bool @@ -39,4 +39,4 @@ func.func @b0() { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return -// CHECK-NEXT: } +// CHECK-NEXT: } \ No newline at end of file diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index 8d6736c82afe..1bcb0327e79c 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -1,7 +1,8 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s +// XFAIL: * module { - func.func @yolo(%arg0 : i32) { + cir.func @yolo(%arg0 : i32) { %0 = cir.alloca !cir.array, cir.ptr>, ["x", paraminit] %a = cir.cast (int_to_bool, %arg0 : i32), !cir.bool @@ -11,6 +12,6 @@ module { } } -// CHECK: func @yolo(%arg0: i32) +// CHECK: cir.func @yolo(%arg0: i32) // CHECK: %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool // CHECK: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 7918d5927f3b..54386354643d 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -1,15 +1,16 @@ // Test the CIR operations can parse and print correctly (roundtrip) // RUN: cir-tool %s | cir-tool | FileCheck %s +// XFAIL: * module { - func.func @foo(%arg0: i32) -> i32 { + cir.func @foo(%arg0: i32) -> i32 { %0 = cir.alloca i32, cir.ptr , ["x", paraminit] cir.store %arg0, %0 : i32, cir.ptr %1 = cir.load %0 : cir.ptr , i32 cir.return %1 : i32 } - func.func @f3() -> i32 { + cir.func @f3() -> i32 { %0 = cir.alloca i32, cir.ptr , ["x", cinit] %1 = cir.cst(3 : i32) : i32 cir.store %1, %0 : i32, cir.ptr @@ -17,7 +18,7 @@ module { cir.return %2 : i32 } - func.func @if0(%arg0: i32) -> i32 { + cir.func @if0(%arg0: i32) -> i32 { %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} cir.store %arg0, %1 : i32, cir.ptr @@ -36,7 +37,7 @@ module { cir.return %5 : i32 } - func.func @s0() { + cir.func @s0() { %0 = cir.alloca i32, cir.ptr , ["x", uninitialized] {alignment = 4 : i64} cir.scope { %1 = cir.alloca i32, cir.ptr , ["y", uninitialized] {alignment = 4 : i64} @@ -46,14 +47,15 @@ module { } // CHECK: module { -// CHECK-NEXT: func @foo(%arg0: i32) -> i32 { + +// CHECK-NEXT: cir.func @foo(%arg0: i32) -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", paraminit] // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %1 : i32 // CHECK-NEXT: } -// CHECK-NEXT: func @f3() -> i32 { +// CHECK-NEXT: cir.func @f3() -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", cinit] // CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr @@ -71,7 +73,7 @@ module { // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: } -// CHECK: func @s0() { +// CHECK: cir.func @s0() { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", uninitialized] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["y", uninitialized] {alignment = 4 : i64} diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 649a370f3a3f..910a78851672 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -8,7 +8,7 @@ module { cir.global "private" internal @c : i32 cir.global "private" constant internal @".str2" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global external @s = @".str2": !cir.ptr - func.func @use_global() { + cir.func @use_global() { %0 = cir.get_global @a : cir.ptr cir.return } @@ -22,5 +22,5 @@ module { // CHECK: cir.global "private" constant internal @".str2" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.global external @s = @".str2": !cir.ptr -// CHECK: func @use_global() +// CHECK: cir.func @use_global() // CHECK-NEXT: %0 = cir.get_global @a : cir.ptr diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 07761dd196cd..9d92600ac401 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1,8 +1,9 @@ // Test attempts to build bogus CIR // RUN: cir-tool %s -verify-diagnostics -split-input-file +// XFAIL: * // expected-error@+2 {{'cir.cst' op nullptr expects pointer type}} -func.func @p0() { +cir.func @p0() { %1 = cir.cst(#cir.null : !cir.ptr) : i32 cir.return } @@ -10,14 +11,14 @@ func.func @p0() { // ----- // expected-error@+2 {{'cir.cst' op result type ('i32') must be '!cir.bool' for 'true'}} -func.func @b0() { +cir.func @b0() { %1 = cir.cst(true) : i32 cir.return } // ----- -func.func @if0() { +cir.func @if0() { %0 = cir.cst(true) : !cir.bool // expected-error@+1 {{'cir.if' op region control flow edge from Region #0 to parent results: source has 1 operands, but target successor needs 0}} cir.if %0 { @@ -29,7 +30,7 @@ func.func @if0() { // ----- -func.func @yield0() { +cir.func @yield0() { %0 = cir.cst(true) : !cir.bool cir.if %0 { // expected-error {{custom op 'cir.if' expected at least one block with cir.yield or cir.return}} cir.br ^a @@ -40,7 +41,7 @@ func.func @yield0() { // ----- -func.func @yieldfallthrough() { +cir.func @yieldfallthrough() { %0 = cir.cst(true) : !cir.bool cir.if %0 { cir.yield fallthrough // expected-error {{'cir.yield' op fallthrough only expected within 'cir.switch'}} @@ -50,7 +51,7 @@ func.func @yieldfallthrough() { // ----- -func.func @yieldbreak() { +cir.func @yieldbreak() { %0 = cir.cst(true) : !cir.bool cir.if %0 { cir.yield break // expected-error {{shall be dominated by 'cir.loop' or 'cir.switch'}} @@ -60,7 +61,7 @@ func.func @yieldbreak() { // ----- -func.func @yieldcontinue() { +cir.func @yieldcontinue() { %0 = cir.cst(true) : !cir.bool cir.if %0 { cir.yield continue // expected-error {{shall be dominated by 'cir.loop'}} @@ -70,7 +71,7 @@ func.func @yieldcontinue() { // ----- -func.func @s0() { +cir.func @s0() { %1 = cir.cst(2 : i32) : i32 cir.switch (%1 : i32) [ case (equal, 5) { @@ -82,7 +83,7 @@ func.func @s0() { // ----- -func.func @s1() { +cir.func @s1() { %1 = cir.cst(2 : i32) : i32 cir.switch (%1 : i32) [ case (equal, 5) { @@ -93,7 +94,7 @@ func.func @s1() { // ----- -func.func @badstride(%x: !cir.ptr) { +cir.func @badstride(%x: !cir.ptr) { %idx = cir.cst(2 : i32) : i32 %4 = cir.ptr_stride(%x : !cir.ptr, %idx : i32), !cir.ptr // expected-error {{requires the same type for first operand and result}} cir.return @@ -101,28 +102,28 @@ func.func @badstride(%x: !cir.ptr) { // ----- -func.func @cast0(%arg0: i32) { +cir.func @cast0(%arg0: i32) { %1 = cir.cast(int_to_bool, %arg0 : i32), i32 // expected-error {{requires !cir.bool type for result}} cir.return } // ----- -func.func @cast1(%arg1: f32) { +cir.func @cast1(%arg1: f32) { %1 = cir.cast(int_to_bool, %arg1 : f32), !cir.bool // expected-error {{requires integral type for result}} cir.return } // ----- -func.func @cast2(%p: !cir.ptr) { +cir.func @cast2(%p: !cir.ptr) { %2 = cir.cast(array_to_ptrdecay, %p : !cir.ptr), !cir.ptr // expected-error {{requires !cir.array pointee}} cir.return } // ----- -func.func @cast3(%p: !cir.ptr) { +cir.func @cast3(%p: !cir.ptr) { %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // expected-error {{requires same type for array element and pointee result}} cir.return @@ -130,7 +131,7 @@ func.func @cast3(%p: !cir.ptr) { // ----- -func.func @b0() { +cir.func @b0() { cir.scope { cir.loop while(cond : { // expected-error {{cond region must be terminated with 'cir.yield' or 'cir.yield continue'}} %0 = cir.cst(true) : !cir.bool diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 14b89c7257e5..44477768154c 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -1,6 +1,6 @@ // RUN: cir-tool %s | FileCheck %s -func.func @l0() { +cir.func @l0() { %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} %1 = cir.cst(0 : i32) : i32 cir.store %1, %0 : i32, cir.ptr @@ -89,7 +89,7 @@ func.func @l0() { cir.return } -// CHECK: func @l0 +// CHECK: cir.func @l0 // CHECK: cir.loop for(cond : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 @@ -159,7 +159,7 @@ func.func @l0() { // CHECK-NEXT: cir.yield // CHECK-NEXT: } -func.func @l1() { +cir.func @l1() { cir.scope { cir.loop while(cond : { cir.yield continue @@ -172,7 +172,7 @@ func.func @l1() { cir.return } -// CHECK: func @l1 +// CHECK: cir.func @l1 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: cir.yield continue @@ -185,7 +185,7 @@ func.func @l1() { // CHECK-NEXT: cir.return // CHECK-NEXT: } -func.func @l2() { +cir.func @l2() { cir.scope { cir.loop while(cond : { cir.yield @@ -198,7 +198,7 @@ func.func @l2() { cir.return } -// CHECK: func @l2 +// CHECK: cir.func @l2 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: cir.yield @@ -209,4 +209,4 @@ func.func @l2() { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return -// CHECK-NEXT: } +// CHECK-NEXT: } \ No newline at end of file diff --git a/clang/test/CIR/IR/ptr_stride.cir b/clang/test/CIR/IR/ptr_stride.cir index 4040f4067215..938a700961b6 100644 --- a/clang/test/CIR/IR/ptr_stride.cir +++ b/clang/test/CIR/IR/ptr_stride.cir @@ -1,7 +1,8 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s +// XFAIL: * module { - func.func @arraysubscript(%arg0: i32) { + cir.func @arraysubscript(%arg0: i32) { %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr @@ -11,7 +12,7 @@ module { } } -// CHECK: func @arraysubscript(%arg0: i32) { +// CHECK: cir.func @arraysubscript(%arg0: i32) { // CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] // CHECK-NEXT: %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index 0bd62fd924d1..a2c985991115 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -1,6 +1,6 @@ // RUN: cir-tool %s | FileCheck %s -func.func @s0() { +cir.func @s0() { %1 = cir.cst(2 : i32) : i32 cir.switch (%1 : i32) [ case (default) { diff --git a/clang/test/CIR/IR/types.cir b/clang/test/CIR/IR/types.cir index 5d7ebae40c60..182082b9ba82 100644 --- a/clang/test/CIR/IR/types.cir +++ b/clang/test/CIR/IR/types.cir @@ -1,11 +1,11 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s module { - func.func @arrays() { + cir.func @arrays() { %0 = cir.alloca !cir.array, cir.ptr>, ["x", paraminit] cir.return } } -// CHECK: func @arrays() { +// CHECK: cir.func @arrays() { // CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] diff --git a/clang/test/CIR/Transforms/lifetime-loop-valid.cpp b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp index 9bbd33c4b702..ea1269a6b49b 100644 --- a/clang/test/CIR/Transforms/lifetime-loop-valid.cpp +++ b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp @@ -38,4 +38,4 @@ void valid1(bool b, int j) { j = j - 1; } *p = 0; // expected-remark {{pset => { a, c }}} -} \ No newline at end of file +} diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index d040c792b8bd..370d32d7ec52 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -1,8 +1,9 @@ // RUN: cir-tool %s -cir-merge-cleanups -o %t.out.cir // RUN: FileCheck --input-file=%t.out.cir %s +// XFAIL: * module { - func.func @sw1(%arg0: i32, %arg1: i32) { + cir.func @sw1(%arg0: i32, %arg1: i32) { %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} %1 = cir.alloca i32, cir.ptr , ["c", paraminit] {alignment = 4 : i64} cir.store %arg0, %0 : i32, cir.ptr @@ -58,7 +59,7 @@ module { cir.return } - func.func @l0() { + cir.func @l0() { cir.scope { cir.loop while(cond : { %0 = cir.cst(true) : !cir.bool @@ -78,7 +79,7 @@ module { cir.return } - func.func @l1() { + cir.func @l1() { cir.scope { cir.loop while(cond : { %0 = cir.cst(false) : !cir.bool @@ -136,7 +137,7 @@ module { // CHECK-NEXT: } // CHECK-NEXT: ] -// CHECK: func @l0 +// CHECK: cir.func @l0 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: cir.yield continue @@ -149,7 +150,7 @@ module { // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: func @l1 +// CHECK: cir.func @l1 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: cir.yield diff --git a/clang/test/CIR/cc1.cir b/clang/test/CIR/cc1.cir index f1dd31346298..2037dec2482a 100644 --- a/clang/test/CIR/cc1.cir +++ b/clang/test/CIR/cc1.cir @@ -3,7 +3,7 @@ // XFAIL: * module { - func.func @foo() { + cir.func @foo() { cir.return } } diff --git a/clang/test/CIR/cirtool.cir b/clang/test/CIR/cirtool.cir index bbd3778fabc8..3f23b38dd5a5 100644 --- a/clang/test/CIR/cirtool.cir +++ b/clang/test/CIR/cirtool.cir @@ -1,11 +1,11 @@ -// RUN: cir-tool %s -cir-to-memref -cir-to-llvm -o %t.mlir +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: mlir-translate -mlir-to-llvmir %t.mlir -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM // XFAIL: * module { - func.func @foo() { + cir.func @foo() { cir.return } } diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index 1e805f5b4ea9..e755b130422a 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -12,7 +12,7 @@ void foo() {} // CIR: module { -// CIR-NEXT: func @foo() { +// CIR-NEXT: cir.func @foo() { // CIR-NEXT: cir.return // CIR-NEXT: } // CIR-NEXT: } diff --git a/clang/test/CIR/hello.c b/clang/test/CIR/hello.c index 9de0bb3194c2..4b07c04994aa 100644 --- a/clang/test/CIR/hello.c +++ b/clang/test/CIR/hello.c @@ -2,4 +2,4 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s void foo() {} -// CHECK: func.func @foo +// CHECK: cir.func @foo diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index a83edb85b86f..2f5f6260a502 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -26,10 +26,13 @@ int main(int argc, char **argv) { // TODO: register needed MLIR passes for CIR? mlir::DialectRegistry registry; - registry.insert(); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return cir::createConvertCIRToFuncPass(); + }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertCIRToLLVMPass(); }); diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h index ed264e5d258e..a7baf967fc0c 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h @@ -25,13 +25,6 @@ #include "mlir/Interfaces/LoopLikeInterface.h" #include "mlir/Interfaces/SideEffectInterfaces.h" -namespace mlir { -namespace func { -class FuncOp; -} // namespace func -using FuncOp = func::FuncOp; -} // namespace mlir - #include "mlir/Dialect/CIR/IR/CIRAttrs.h" #include "mlir/Dialect/CIR/IR/CIROpsDialect.h.inc" #include "mlir/Dialect/CIR/IR/CIROpsEnums.h" diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 7be1d50bfbf1..cd333d6ad980 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -327,7 +327,7 @@ def StoreOp : CIR_Op<"store", [ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : CIR_Op<"return", [HasParent<"mlir::FuncOp, ScopeOp, IfOp, SwitchOp, LoopOp">, +def ReturnOp : CIR_Op<"return", [HasParent<"FuncOp, ScopeOp, IfOp, SwitchOp, LoopOp">, Terminator]> { let summary = "Return from function"; let description = [{ @@ -1196,6 +1196,12 @@ def FuncOp : CIR_Op<"func", [ /// Ensures getType, getNumFuncArguments, and getNumFuncResults can be /// called safely. LogicalResult verifyType(); + + //===------------------------------------------------------------------===// + // SymbolOpInterface Methods + //===------------------------------------------------------------------===// + + bool isDeclaration() { return isExternal(); } }]; let hasCustomAssemblyFormat = 1; diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index d6dece644e28..ebb7e62d8ccb 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -234,7 +234,7 @@ LogicalResult CastOp::verify() { //===----------------------------------------------------------------------===// static mlir::LogicalResult checkReturnAndFunction(ReturnOp op, - mlir::FuncOp function) { + cir::FuncOp function) { // ReturnOps currently only have a single optional operand. if (op.getNumOperands() > 1) return op.emitOpError() << "expects at most 1 return operand"; @@ -267,11 +267,11 @@ mlir::LogicalResult ReturnOp::verify() { // Returns can be present in multiple different scopes, get the // wrapping function and start from there. auto *fnOp = getOperation()->getParentOp(); - while (!isa(fnOp)) + while (!isa(fnOp)) fnOp = fnOp->getParentOp(); // Make sure return types match function return type. - if (checkReturnAndFunction(*this, cast(fnOp)).failed()) + if (checkReturnAndFunction(*this, cast(fnOp)).failed()) return failure(); return success(); @@ -533,7 +533,7 @@ LogicalResult ScopeOp::verify() { return success(); } mlir::LogicalResult YieldOp::verify() { auto isDominatedByLoopOrSwitch = [](Operation *parentOp) { - while (!llvm::isa(parentOp)) { + while (!llvm::isa(parentOp)) { if (llvm::isa(parentOp)) return true; parentOp = parentOp->getParentOp(); @@ -542,7 +542,7 @@ mlir::LogicalResult YieldOp::verify() { }; auto isDominatedByLoop = [](Operation *parentOp) { - while (!llvm::isa(parentOp)) { + while (!llvm::isa(parentOp)) { if (llvm::isa(parentOp)) return true; parentOp = parentOp->getParentOp(); @@ -1144,9 +1144,17 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { // Parse the optional function body. auto *body = state.addRegion(); - OptionalParseResult result = parser.parseOptionalRegion( + llvm::SMLoc loc = parser.getCurrentLocation(); + OptionalParseResult parseResult = parser.parseOptionalRegion( *body, arguments, /*enableNameShadowing=*/false); - return failure(result.has_value() && failed(*result)); + if (parseResult.has_value()) { + if (failed(*parseResult)) + return failure(); + // Function body was parsed, make sure its not empty. + if (body->empty()) + return parser.emitError(loc, "expected non-empty function body"); + } + return success(); } void cir::FuncOp::print(OpAsmPrinter &p) { @@ -1164,10 +1172,12 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p, *this, {getFunctionTypeAttrName(), getLinkageAttrName()}); // Print the body if this is not an external function. - Region &body = this->getBody(); - if (!body.empty()) + Region &body = getOperation()->getRegion(0); + if (!body.empty()) { + p << ' '; p.printRegion(body, /*printEntryBlockArgs=*/false, /*printBlockTerminators=*/true); + } } // Hook for OpTrait::FunctionLike, called after verifying that the 'type' diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp index 3ed4a4dda2f1..7ed7b3ddf5ac 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp @@ -704,7 +704,7 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return; } - if (isa(op)) + if (isa(op)) return checkFunc(op); if (auto ifOp = dyn_cast(op)) return checkIf(ifOp); diff --git a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp index 9443b36555f6..9c9042f00ba3 100644 --- a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp +++ b/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp @@ -162,8 +162,8 @@ SimplifyRetYieldBlocks::replaceScopeLikeOp(PatternRewriter &rewriter, } template <> -mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( - PatternRewriter &rewriter, mlir::FuncOp funcOp) const { +mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( + PatternRewriter &rewriter, cir::FuncOp funcOp) const { auto regionChanged = mlir::failure(); if (checkAndRewriteRegion(funcOp.getRegion(), rewriter).succeeded()) regionChanged = mlir::success(); @@ -195,7 +195,7 @@ mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( void getMergeCleanupsPatterns(RewritePatternSet &results, MLIRContext *context) { results.add, SimplifyRetYieldBlocks, - SimplifyRetYieldBlocks, + SimplifyRetYieldBlocks, SimplifyRetYieldBlocks, SimplifyRetYieldBlocks>(context); } @@ -221,7 +221,7 @@ void MergeCleanupsPass::runOnOperation() { SmallVector opsToSimplify; op->walk([&](Operation *op) { - if (isa( + if (isa( op)) opsToSimplify.push_back(op); }); From a8f3bab5a9d9c18b359d6c1f0c3a577f8521cf47 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 4 Aug 2022 15:18:41 -0700 Subject: [PATCH 0545/2301] [CIR][CodeGen][NFC] Update deprecated use of parseSourceString --- clang/lib/CIR/LowerToLLVM.cpp | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 54d3ce706064..54651e092f05 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -68,8 +68,6 @@ struct ConvertCIRToFuncPass : public mlir::PassWrapper> { void getDependentDialects(mlir::DialectRegistry ®istry) const override { - // FIXME: after we rebase to more recent changes, this should be - // mlir::FuncDialect instead. registry.insert(); } @@ -244,13 +242,9 @@ void ConvertCIRToMemRefPass::runOnOperation() { } void ConvertCIRToFuncPass::runOnOperation() { - // End goal here is to legalize to mlir::FuncOp (builtin dialect) and - // mlir::ReturnOp (standard dialect). This is done in two steps, becase - // cir.return is a cir.func child it will be ignored in the first conversion. - // - // TODO: is there a better way to handle this? If such handling is decoupled - // from the same pass the verifier won't accept the mix between mlir::FuncOp - // and mlir::cir::ReturnOp. + // End goal here is to legalize to builtin.func, func.return, func.call. + // Given that children node are ignored, handle both return and call in + // a subsequent conversion. // Convert cir.func to builtin.func mlir::ConversionTarget fnTarget(getContext()); @@ -264,7 +258,7 @@ void ConvertCIRToFuncPass::runOnOperation() { if (failed(applyPartialConversion(module, fnTarget, std::move(fnPatterns)))) signalPassFailure(); - // Convert cir.return to std.return, cir.call to std.call + // Convert cir.return -> func.return, cir.call -> func.call mlir::ConversionTarget retTarget(getContext()); retTarget .addLegalOp(); From 14603c7608af5cd3953447060153933d9eebaebe Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 4 Aug 2022 15:31:55 -0700 Subject: [PATCH 0546/2301] [CIR][NFC] Modernize custom parsing and printing with hasCustomAssemblyFormat --- mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 1 - mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index cd333d6ad980..604cc7db1e75 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -404,7 +404,6 @@ def IfOp : CIR_Op<"if", let arguments = (ins CIR_BoolType:$condition); let regions = (region AnyRegion:$thenRegion, AnyRegion:$elseRegion); - // FIXME: unify these within CIR_Ops. let hasCustomAssemblyFormat = 1; let hasVerifier = 1; diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index ebb7e62d8ccb..6464bde95e55 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -328,7 +328,7 @@ static LogicalResult checkBlockTerminator(OpAsmParser &parser, return failure(); } -ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { +ParseResult cir::IfOp::parse(OpAsmParser &parser, OperationState &result) { // Create the regions for 'then'. result.regions.reserve(2); Region *thenRegion = result.addRegion(); @@ -381,7 +381,7 @@ bool shouldPrintTerm(mlir::Region &r) { return false; } -void IfOp::print(OpAsmPrinter &p) { +void cir::IfOp::print(OpAsmPrinter &p) { p << " " << getCondition() << " "; auto &thenRegion = this->getThenRegion(); p.printRegion(thenRegion, @@ -467,7 +467,7 @@ LogicalResult IfOp::verify() { return success(); } // ScopeOp //===----------------------------------------------------------------------===// -ParseResult ScopeOp::parse(OpAsmParser &parser, OperationState &result) { +ParseResult cir::ScopeOp::parse(OpAsmParser &parser, OperationState &result) { // Create one region within 'scope'. result.regions.reserve(1); Region *scopeRegion = result.addRegion(); @@ -486,7 +486,7 @@ ParseResult ScopeOp::parse(OpAsmParser &parser, OperationState &result) { return success(); } -void ScopeOp::print(OpAsmPrinter &p) { +void cir::ScopeOp::print(OpAsmPrinter &p) { p << ' '; auto &scopeRegion = this->getScopeRegion(); p.printRegion(scopeRegion, From 01142f5899cd8ef218fd012221143b3bd10ed004 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 5 Aug 2022 15:30:38 -0700 Subject: [PATCH 0547/2301] [CIR][CodeGen] Add support for -mconstructor-aliases (default on Linux codegen) --- clang/lib/CIR/CIRGenItaniumCXXABI.cpp | 5 ++++- clang/lib/CIR/CIRGenModule.cpp | 28 ++++++++++++++++++++++++- clang/lib/CIR/CIRGenModule.h | 8 +++++++ clang/test/CIR/CodeGen/String.cpp | 30 ++++++++++++++++++++++++++- clang/test/CIR/CodeGen/ctor-alias.cpp | 26 +++++++++++++++++++++++ 5 files changed, 94 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/ctor-alias.cpp diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp index 0b3e594af763..00dd44801437 100644 --- a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CIRGenItaniumCXXABI.cpp @@ -298,7 +298,10 @@ void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { } if (CIRGenType == StructorCIRGen::RAUW) { - llvm_unreachable("NYI"); + StringRef MangledName = CGM.getMangledName(GD); + auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl); + CGM.addReplacement(MangledName, Aliasee); + return; } } diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 8bb3251ef2ab..3092448f1882 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -1743,7 +1743,7 @@ void CIRGenModule::Release() { buildDeferred(); // TODO: buildVTablesOpportunistically(); // TODO: applyGlobalValReplacements(); - // TODO: applyReplacements(); + applyReplacements(); // TODO: checkAliases(); // TODO: buildMultiVersionFunctions(); buildCXXGlobalInitFunc(); @@ -1840,3 +1840,29 @@ void CIRGenModule::UpdateCompletedType(const TagDecl *TD) { // Make sure that this type is translated. genTypes.UpdateCompletedType(TD); } + +void CIRGenModule::addReplacement(StringRef Name, mlir::Operation *Op) { + Replacements[Name] = Op; +} + +void CIRGenModule::applyReplacements() { + for (auto &I : Replacements) { + StringRef MangledName = I.first(); + mlir::Operation *Replacement = I.second; + auto *Entry = getGlobalValue(MangledName); + if (!Entry) + continue; + assert(isa(Entry) && "expected function"); + auto OldF = cast(Entry); + auto NewF = dyn_cast(Replacement); + assert(NewF && "not implemented"); + + // Replace old with new, but keep the old order. + if (OldF.replaceAllSymbolUses(NewF.getSymNameAttr(), theModule).failed()) + llvm_unreachable("internal error, cannot RAUW symbol"); + if (NewF) { + NewF->moveBefore(OldF); + OldF->erase(); + } + } +} diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 29dab09b25a8..23a289acf5e8 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -360,6 +360,8 @@ class CIRGenModule { getCIRLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable); + void addReplacement(StringRef Name, mlir::Operation *Op); + private: // TODO: CodeGen also passes an AttributeList here. We'll have to match that // in CIR @@ -376,6 +378,12 @@ class CIRGenModule { // An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector MangledDeclNames; llvm::StringMap Manglings; + + // FIXME: should we use llvm::TrackingVH here? + typedef llvm::StringMap ReplacementsTy; + ReplacementsTy Replacements; + /// Call replaceAllUsesWith on all pairs in Replacements. + void applyReplacements(); }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index b7a373340073..f9627dba80f2 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s class String { char *storage{nullptr}; @@ -8,11 +8,13 @@ class String { public: String() : size{0} {} String(int size) : size{size} {} + String(const char *s) {} }; void test() { String s1{}; String s2{1}; + String s3{"abcdefghijklmnop"}; } // CHECK: cir.func @_ZN6StringC2Ev @@ -43,3 +45,29 @@ void test() { // CHECK-NEXT: cir.store %7, %5 : i64, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } + +// CHECK: cir.func @_ZN6StringC2EPKc +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "storage"}> : (!cir.ptr>) -> !cir.ptr> +// CHECK-NEXT: %4 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.return + +// CHECK: cir.func @_ZN6StringC1EPKc +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: cir.return + +// CHECK: cir.func @_Z4testv() { +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC1Ei(%1, %3) : (!cir.ptr, i32) -> () +// CHECK: cir.call @_ZN6StringC1EPKc(%2, %5) : (!cir.ptr, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp new file mode 100644 index 000000000000..3ea7eb7434dd --- /dev/null +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir %s -o - | FileCheck %s + +struct DummyString { + DummyString(const char *s) {} +}; + +void t() { + DummyString s4 = "yolo"; +} + +// CHECK: cir.func @_ZN11DummyStringC2EPKc +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return + +// CHECK-NOT: cir.fun @_ZN11DummyStringC1EPKc + +// CHECK: cir.func @_Z1tv +// CHECK-NEXT: %0 = cir.alloca !_22struct2EDummyString22, cir.ptr , ["s4", uninitialized] {alignment = 1 : i64} +// CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > +// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: cir.return From c9b4a227e0124be22032478dbe3d119d972b9ebe Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 Aug 2022 10:08:08 -0700 Subject: [PATCH 0548/2301] [CIR][CodeGen][NFC] Update switches to account for new Stmt and Decl from July rebase --- clang/lib/CIR/CIRGenDecl.cpp | 6 +++--- clang/lib/CIR/CIRGenStmt.cpp | 11 ++++++++++- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CIRGenDecl.cpp b/clang/lib/CIR/CIRGenDecl.cpp index 1daf0a8badbe..756d542ae6a5 100644 --- a/clang/lib/CIR/CIRGenDecl.cpp +++ b/clang/lib/CIR/CIRGenDecl.cpp @@ -259,7 +259,6 @@ void CIRGenFunction::buildDecl(const Decl &D) { switch (D.getKind()) { case Decl::ImplicitConceptSpecialization: case Decl::HLSLBuffer: - case Decl::UnnamedGlobalConstant: case Decl::TopLevelStmt: llvm_unreachable("NYI"); case Decl::BuiltinTemplate: @@ -318,10 +317,10 @@ void CIRGenFunction::buildDecl(const Decl &D) { llvm_unreachable("Declaration should not be in declstmts!"); case Decl::Record: // struct/union/class X; case Decl::CXXRecord: // struct/union/class X; [C++] - assert(0 && "Not implemented"); + llvm_unreachable("NYI"); return; case Decl::Enum: // enum X; - assert(0 && "Not implemented"); + llvm_unreachable("NYI"); return; case Decl::Function: // void X(); case Decl::EnumConstant: // enum ? { X = ? } @@ -338,6 +337,7 @@ void CIRGenFunction::buildDecl(const Decl &D) { case Decl::Concept: case Decl::LifetimeExtendedTemporary: case Decl::RequiresExprBody: + case Decl::UnnamedGlobalConstant: // None of these decls require codegen support. return; diff --git a/clang/lib/CIR/CIRGenStmt.cpp b/clang/lib/CIR/CIRGenStmt.cpp index e0816c55252f..44f49eb27534 100644 --- a/clang/lib/CIR/CIRGenStmt.cpp +++ b/clang/lib/CIR/CIRGenStmt.cpp @@ -193,21 +193,30 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, case Stmt::OMPTargetParallelForDirectiveClass: case Stmt::OMPTaskLoopDirectiveClass: case Stmt::OMPTaskLoopSimdDirectiveClass: + case Stmt::OMPMaskedTaskLoopDirectiveClass: + case Stmt::OMPMaskedTaskLoopSimdDirectiveClass: case Stmt::OMPMasterTaskLoopDirectiveClass: case Stmt::OMPMasterTaskLoopSimdDirectiveClass: + case Stmt::OMPParallelGenericLoopDirectiveClass: + case Stmt::OMPParallelMaskedDirectiveClass: + case Stmt::OMPParallelMaskedTaskLoopDirectiveClass: + case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass: case Stmt::OMPParallelMasterTaskLoopDirectiveClass: case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: case Stmt::OMPDistributeDirectiveClass: - case Stmt::OMPTargetUpdateDirectiveClass: case Stmt::OMPDistributeParallelForDirectiveClass: case Stmt::OMPDistributeParallelForSimdDirectiveClass: case Stmt::OMPDistributeSimdDirectiveClass: + case Stmt::OMPTargetParallelGenericLoopDirectiveClass: case Stmt::OMPTargetParallelForSimdDirectiveClass: case Stmt::OMPTargetSimdDirectiveClass: + case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: + case Stmt::OMPTargetUpdateDirectiveClass: case Stmt::OMPTeamsDistributeDirectiveClass: case Stmt::OMPTeamsDistributeSimdDirectiveClass: case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: case Stmt::OMPTeamsDistributeParallelForDirectiveClass: + case Stmt::OMPTeamsGenericLoopDirectiveClass: case Stmt::OMPTargetTeamsDirectiveClass: case Stmt::OMPTargetTeamsDistributeDirectiveClass: case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: From b75925240636e13f879452d5ee8a76b61607dbca Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 30 Aug 2022 18:04:02 -0700 Subject: [PATCH 0549/2301] [CIR] Fix issue introduced after tons of rebases and un-XFAIL several tests --- clang/test/CIR/IR/cast.cir | 1 - clang/test/CIR/IR/cir-ops.cir | 1 - clang/test/CIR/IR/invalid.cir | 1 - clang/test/CIR/IR/ptr_stride.cir | 1 - clang/test/CIR/Transforms/merge-cleanups.cir | 1 - mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 4 +++- 6 files changed, 3 insertions(+), 6 deletions(-) diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index 1bcb0327e79c..b71840833333 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -1,5 +1,4 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s -// XFAIL: * module { cir.func @yolo(%arg0 : i32) { diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 54386354643d..74eaea82b8ae 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -1,7 +1,6 @@ // Test the CIR operations can parse and print correctly (roundtrip) // RUN: cir-tool %s | cir-tool | FileCheck %s -// XFAIL: * module { cir.func @foo(%arg0: i32) -> i32 { %0 = cir.alloca i32, cir.ptr , ["x", paraminit] diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 9d92600ac401..df75178cab44 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1,6 +1,5 @@ // Test attempts to build bogus CIR // RUN: cir-tool %s -verify-diagnostics -split-input-file -// XFAIL: * // expected-error@+2 {{'cir.cst' op nullptr expects pointer type}} cir.func @p0() { diff --git a/clang/test/CIR/IR/ptr_stride.cir b/clang/test/CIR/IR/ptr_stride.cir index 938a700961b6..200e22ae1d52 100644 --- a/clang/test/CIR/IR/ptr_stride.cir +++ b/clang/test/CIR/IR/ptr_stride.cir @@ -1,5 +1,4 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s -// XFAIL: * module { cir.func @arraysubscript(%arg0: i32) { diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 370d32d7ec52..734c435dd0e1 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-merge-cleanups -o %t.out.cir // RUN: FileCheck --input-file=%t.out.cir %s -// XFAIL: * module { cir.func @sw1(%arg0: i32, %arg1: i32) { diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index 6464bde95e55..a88c7760349f 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -1127,6 +1127,9 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { resultAttrs)) return failure(); + for (auto &arg : arguments) + argTypes.push_back(arg.type); + auto fnType = builder.getFunctionType(argTypes, resultTypes); state.addAttribute(getFunctionTypeAttrName(state.name), TypeAttr::get(fnType)); @@ -1136,7 +1139,6 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { return failure(); // Add the attributes to the function arguments. - assert(argAttrs.size() == argTypes.size()); assert(resultAttrs.size() == resultTypes.size()); function_interface_impl::addArgAndResultAttrs( builder, state, arguments, resultAttrs, getArgAttrsAttrName(state.name), From 9885912c87279f96f79e11f1107891559c729500 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 26 Aug 2022 14:38:42 -0700 Subject: [PATCH 0550/2301] [CIR] Add bitcast cast kind and CastOp test --- clang/test/CIR/IR/cast.cir | 7 +++++++ clang/test/CIR/IR/invalid.cir | 7 +++++++ mlir/include/mlir/Dialect/CIR/IR/CIROps.td | 4 +++- mlir/lib/Dialect/CIR/IR/CIRDialect.cpp | 6 ++++++ 4 files changed, 23 insertions(+), 1 deletion(-) diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index b71840833333..4af2fa936b50 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -9,8 +9,15 @@ module { %4 = cir.cst(0 : i32) : i32 cir.return } + + cir.func @bitcast(%p: !cir.ptr) { + %2 = cir.cast(bitcast, %p : !cir.ptr), !cir.ptr + cir.return + } } // CHECK: cir.func @yolo(%arg0: i32) // CHECK: %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool // CHECK: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK: cir.func @bitcast +// CHECK: %0 = cir.cast(bitcast, %arg0 : !cir.ptr), !cir.ptr diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index df75178cab44..998da7fa3fb8 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -130,6 +130,13 @@ cir.func @cast3(%p: !cir.ptr) { // ----- +cir.func @cast4(%p: !cir.ptr) { + %2 = cir.cast(bitcast, %p : !cir.ptr), i32 // expected-error {{requires !cir.ptr type for source and result}} + cir.return +} + +// ----- + cir.func @b0() { cir.scope { cir.loop while(cond : { // expected-error {{cond region must be terminated with 'cir.yield' or 'cir.yield continue'}} diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td index 604cc7db1e75..b04b7071648e 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/mlir/include/mlir/Dialect/CIR/IR/CIROps.td @@ -44,11 +44,13 @@ class CIR_Op traits = []> : def CK_IntegralToBoolean : I32EnumAttrCase<"int_to_bool", 1>; def CK_ArrayToPointerDecay : I32EnumAttrCase<"array_to_ptrdecay", 2>; def CK_IntegralCast : I32EnumAttrCase<"integral", 3>; +def CK_BitCast : I32EnumAttrCase<"bitcast", 4>; def CastKind : I32EnumAttr< "CastKind", "cast kind", - [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast]> { + [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast, + CK_BitCast]> { let cppNamespace = "::mlir::cir"; } diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp index a88c7760349f..43a630f540fe 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp @@ -224,6 +224,12 @@ LogicalResult CastOp::verify() { << "requires same type for array element and pointee result"; return success(); } + case cir::CastKind::bitcast: { + if (!srcType.dyn_cast() || + !resType.dyn_cast()) + return emitOpError() << "requires !cir.ptr type for source and result"; + return success(); + } } llvm_unreachable("Unknown CastOp kind?"); From a83d1a8cdf06efb8b6ee51b112fb066f731479dd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 29 Aug 2022 16:17:57 -0700 Subject: [PATCH 0551/2301] [CIR][CodeGen] Fix return blocks with wrong number of return values Also make sure unused `cleanupBlocks` get erased after cleanup is generated. --- clang/lib/CIR/CIRGenFunction.cpp | 25 +++++++++++++++---------- clang/test/CIR/CodeGen/goto.cpp | 22 ++++++++++++++++++++++ 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 518de7a548ff..610d1d667edc 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -270,6 +270,15 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { auto &builder = CGF.builder; auto *localScope = CGF.currLexScope; + auto buildReturn = [&](mlir::Location loc) { + if (CGF.FnRetCIRTy.has_value()) { + // If there's anything to return, load it first. + auto val = builder.create(loc, *CGF.FnRetCIRTy, *CGF.FnRetAlloca); + return builder.create(loc, llvm::ArrayRef(val.getResult())); + } + return builder.create(loc); + }; + // Handle pending gotos and the solved labels in this scope. while (!localScope->PendingGotos.empty()) { auto gotoInfo = localScope->PendingGotos.back(); @@ -295,16 +304,9 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; curLoc++; - // TODO: insert actual scope cleanup HERE (dtors and etc) + // TODO(cir): insert actual scope cleanup HERE (dtors and etc) - // If there's anything to return, load it first. - if (CGF.FnRetCIRTy.has_value()) { - auto val = - builder.create(retLoc, *CGF.FnRetCIRTy, *CGF.FnRetAlloca); - builder.create(retLoc, llvm::ArrayRef(val.getResult())); - } else { - builder.create(retLoc); - } + (void)buildReturn(retLoc); } auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { @@ -314,7 +316,7 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { if (localScope->Depth != 0) // end of any local scope != function builder.create(localScope->EndLoc); else - builder.create(localScope->EndLoc); + (void)buildReturn(localScope->EndLoc); }; // If a cleanup block has been created at some point, branch to it @@ -341,6 +343,9 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { // An empty non-entry block has nothing to offer. if (!entryBlock && currBlock->empty()) { currBlock->erase(); + // Remove unused cleanup blocks. + if (cleanupBlock && cleanupBlock->hasNoPredecessors()) + cleanupBlock->erase(); return; } diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 76b24bd0b7eb..68587763d818 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -42,3 +42,25 @@ void g1(int a) { // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["y", cinit] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr + +int g2() { + int b = 1; + goto end; + b = b + 1; +end: + b = b + 2; + return 1; +} + +// Make sure (1) we don't get dangling unused cleanup blocks +// (2) generated returns consider the function type + +// CHECK: cir.func @_Z2g2v() -> i32 { + +// CHECK: cir.br ^bb2 +// CHECK-NEXT: ^bb1: // no predecessors +// CHECK: ^bb2: // 2 preds: ^bb0, ^bb1 + +// CHECK: [[R:%[0-9]+]] = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: [[R]] : i32 +// CHECK-NEXT: } \ No newline at end of file From 99eb305c57f9e1fb4095776f04682e34353fcf2e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 26 Aug 2022 16:09:11 -0700 Subject: [PATCH 0552/2301] [CIR][CodeGen][NFC] Add skeleton for handling CXXNewExpr --- clang/lib/CIR/CIRGenExprCXX.cpp | 4 ++++ clang/lib/CIR/CIRGenExprScalar.cpp | 4 +++- clang/lib/CIR/CIRGenFunction.h | 2 ++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CIRGenExprCXX.cpp b/clang/lib/CIR/CIRGenExprCXX.cpp index 83357330cbfc..a90dcfe5349e 100644 --- a/clang/lib/CIR/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CIRGenExprCXX.cpp @@ -254,3 +254,7 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); } + +mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { + assert(0 && "not implemented"); +} diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 7582bd6b70f6..838508b35d1d 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -272,7 +272,9 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitExprWithCleanups(ExprWithCleanups *E) { llvm_unreachable("NYI"); } - mlir::Value VisitCXXNewExpr(const CXXNewExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitCXXNewExpr(const CXXNewExpr *E) { + return CGF.buildCXXNewExpr(E); + } mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *E) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 0518c1253ec4..1bc47206374c 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -455,6 +455,8 @@ class CIRGenFunction { clang::NestedNameSpecifier *Qualifier, bool IsArrow, const clang::Expr *Base); + mlir::Value buildCXXNewExpr(const CXXNewExpr *E); + mlir::Operation *createLoad(const clang::VarDecl *VD, const char *Name); // Wrapper for function prototype sources. Wraps either a FunctionProtoType or From 611080f2ec05cf105541358db32d05b6b093c80e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 29 Aug 2022 14:52:25 -0700 Subject: [PATCH 0553/2301] [CIR][CodeGen][NFC] Cleanup and improve buildReturnStmt to handle more cases --- clang/lib/CIR/CIRGenStmt.cpp | 37 ++++++++++++-------- clang/lib/CIR/UnimplementedFeatureGuarding.h | 5 +++ 2 files changed, 28 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CIRGenStmt.cpp b/clang/lib/CIR/CIRGenStmt.cpp index 44f49eb27534..6eed97bb737e 100644 --- a/clang/lib/CIR/CIRGenStmt.cpp +++ b/clang/lib/CIR/CIRGenStmt.cpp @@ -407,17 +407,32 @@ mlir::LogicalResult CIRGenFunction::buildDeclStmt(const DeclStmt &S) { } mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { - assert(!(getContext().getLangOpts().ElideConstructors && - S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) && - "unimplemented"); - assert(!FnRetQualTy->isReferenceType() && "unimplemented"); + assert(!UnimplementedFeature::requiresReturnValueCheck()); auto loc = getLoc(S.getSourceRange()); // Emit the result value, even if unused, to evaluate the side effects. const Expr *RV = S.getRetValue(); - if (RV) { - assert(!isa(RV) && "unimplemented"); + // TODO(cir): LLVM codegen uses a RunCleanupsScope cleanupScope here, we + // should model this in face of dtors. + + if (const auto *EWC = dyn_cast_or_null(RV)) + assert(0 && "not implemented"); + + if (getContext().getLangOpts().ElideConstructors && S.getNRVOCandidate() && + S.getNRVOCandidate()->isNRVOVariable()) { + assert(0 && "not implemented"); + } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { + // Make sure not to return anything, but evaluate the expression + // for side effects. + if (RV) { + assert(0 && "not implemented"); + } + } else if (!RV) { + // Do nothing (return value is left uninitialized) + } else if (FnRetTy->isReferenceType()) { + assert(0 && "not implemented"); + } else { mlir::Value V = nullptr; switch (CIRGenFunction::getEvaluationKind(RV->getType())) { case TEK_Scalar: @@ -429,14 +444,6 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { llvm::errs() << "ReturnStmt EvaluationKind not implemented\n"; return mlir::failure(); } - - // Otherwise, this return operation has zero operands. - if (!V || (RV && RV->getType()->isVoidType())) { - // FIXME: evaluate for side effects. - } - } else { - // Do nothing (return value is left uninitialized), this is also - // the path when returning from void functions. } // Create a new return block (if not existent) and add a branch to @@ -447,6 +454,8 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { // Insert the new block to continue codegen after branch to ret block. builder.createBlock(builder.getBlock()->getParent()); + + // TODO(cir): LLVM codegen for a cleanup on cleanupScope here. return mlir::success(); } diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/UnimplementedFeatureGuarding.h index 54e12fdcdea6..04025fefdea4 100644 --- a/clang/lib/CIR/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/UnimplementedFeatureGuarding.h @@ -45,6 +45,11 @@ struct UnimplementedFeature { // Debug info static bool generateDebugInfo() { return false; } + + static bool getASTAllocaAddressSpace() { return false; } + static bool tryEmitAsConstant() { return false; } + static bool incrementProfileCounter() { return false; } + static bool requiresReturnValueCheck() { return false; } }; } // namespace cir From 555cf8cb32efa71b2d80db6d81669203401f6e93 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 30 Aug 2022 17:46:47 -0700 Subject: [PATCH 0554/2301] [CIR][CodeGen] Introduce setFunctionLinkage and hook it up to FuncOp creation Among other things introduce linkonce_odr for relevant class methods --- clang/lib/CIR/CIRGenCXX.cpp | 2 +- clang/lib/CIR/CIRGenModule.cpp | 16 ++++++++++++++-- clang/lib/CIR/CIRGenModule.h | 7 +++++++ clang/test/CIR/CodeGen/String.cpp | 8 ++++---- clang/test/CIR/CodeGen/ctor-alias.cpp | 2 +- clang/test/CIR/CodeGen/ctor.cpp | 4 ++-- clang/test/CIR/CodeGen/struct.cpp | 6 +++--- 7 files changed, 32 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CIRGenCXX.cpp b/clang/lib/CIR/CIRGenCXX.cpp index 1b5f451ce030..45252ba732f4 100644 --- a/clang/lib/CIR/CIRGenCXX.cpp +++ b/clang/lib/CIR/CIRGenCXX.cpp @@ -25,7 +25,7 @@ mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { auto Fn = getAddrOfCXXStructor(GD, &FnInfo, /*FnType=*/nullptr, /*DontDefer=*/true, ForDefinition); - // TODO: setFunctionLinkage + setFunctionLinkage(GD, Fn); CIRGenFunction CGF{*this, builder}; CurCGF = &CGF; { diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CIRGenModule.cpp index 3092448f1882..84d6b1062628 100644 --- a/clang/lib/CIR/CIRGenModule.cpp +++ b/clang/lib/CIR/CIRGenModule.cpp @@ -336,7 +336,7 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, if (!Fn.isDeclaration()) return; - // TODO(cir): setFunctionLinkage + setFunctionLinkage(GD, Fn); // TODO(cir): setGVProperties // TODO(cir): MaubeHandleStaticInExternC // TODO(cir): maybeSetTrivialComdat @@ -1183,10 +1183,14 @@ mlir::SymbolTable::Visibility CIRGenModule::getMLIRVisibilityFromCIRLinkage( return mlir::SymbolTable::Visibility::Private; case mlir::cir::GlobalLinkageKind::ExternalLinkage: case mlir::cir::GlobalLinkageKind::ExternalWeakLinkage: + case mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage: return mlir::SymbolTable::Visibility::Public; - default: + default: { + llvm::errs() << "visibility not implemented for '" + << stringifyGlobalLinkageKind(GLK) << "'\n"; assert(0 && "not implemented"); } + } llvm_unreachable("linkage should be handled above!"); } @@ -1458,6 +1462,14 @@ mlir::cir::FuncOp CIRGenModule::createCIRFunction(mlir::Location loc, builder.setInsertionPoint(curCGF->CurFn.getOperation()); f = builder.create(loc, name, Ty); + assert(f.isDeclaration() && "expected empty body"); + + // A declaration gets private visibility by default, but external linkage + // as the default linkage. + f.setLinkageAttr(mlir::cir::GlobalLinkageKindAttr::get( + builder.getContext(), mlir::cir::GlobalLinkageKind::ExternalLinkage)); + mlir::SymbolTable::setSymbolVisibility( + f, mlir::SymbolTable::Visibility::Private); if (!curCGF) theModule.push_back(f); } diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CIRGenModule.h index 23a289acf5e8..384cd79ee2a0 100644 --- a/clang/lib/CIR/CIRGenModule.h +++ b/clang/lib/CIR/CIRGenModule.h @@ -359,6 +359,13 @@ class CIRGenModule { mlir::cir::GlobalLinkageKind getCIRLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable); + void setFunctionLinkage(GlobalDecl GD, mlir::cir::FuncOp f) { + auto L = getFunctionLinkage(GD); + f.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), L)); + mlir::SymbolTable::setSymbolVisibility(f, + getMLIRVisibilityFromCIRLinkage(L)); + } void addReplacement(StringRef Name, mlir::Operation *Op); diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index f9627dba80f2..21678ab8bdb5 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -17,7 +17,7 @@ void test() { String s3{"abcdefghijklmnop"}; } -// CHECK: cir.func @_ZN6StringC2Ev +// CHECK: cir.func linkonce_odr @_ZN6StringC2Ev // CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 @@ -30,7 +30,7 @@ void test() { // CHECK-NEXT: cir.store %6, %4 : i64, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func @_ZN6StringC2Ei +// CHECK: cir.func linkonce_odr @_ZN6StringC2Ei // CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["size", paraminit] // CHECK-NEXT: cir.store %arg0, %0 @@ -46,7 +46,7 @@ void test() { // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func @_ZN6StringC2EPKc +// CHECK: cir.func linkonce_odr @_ZN6StringC2EPKc // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > @@ -57,7 +57,7 @@ void test() { // CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.return -// CHECK: cir.func @_ZN6StringC1EPKc +// CHECK: cir.func linkonce_odr @_ZN6StringC1EPKc // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp index 3ea7eb7434dd..01c698918e8a 100644 --- a/clang/test/CIR/CodeGen/ctor-alias.cpp +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -8,7 +8,7 @@ void t() { DummyString s4 = "yolo"; } -// CHECK: cir.func @_ZN11DummyStringC2EPKc +// CHECK: cir.func linkonce_odr @_ZN11DummyStringC2EPKc // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index 1806b0cf44aa..188ac76de651 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -13,13 +13,13 @@ void baz() { // CHECK: !_22struct2EStruk22 = !cir.struct<"struct.Struk", i32> -// CHECK: cir.func @_ZN5StrukC2Ev(%arg0: !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return -// CHECK: cir.func @_ZN5StrukC1Ev(%arg0: !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN5StrukC1Ev(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index d4693580ab8c..6765d38d372d 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -27,14 +27,14 @@ void baz() { // CHECK: !_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> // CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>> -// CHECK: cir.func @_ZN3Bar6methodEv(%arg0: !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 +// CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > @@ -43,7 +43,7 @@ void baz() { // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 +// CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} From fd0d87fae4b296ce3804ae37a6d408844aed2666 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 Aug 2022 17:21:45 -0700 Subject: [PATCH 0555/2301] [CIR][CodeGen] Support call arguments that take lvalue references Even though we can already return the appropriate type for rvalue we still need to fix some other things before it's also supported. Add test. --- clang/lib/CIR/CIRGenCall.cpp | 5 ++++- clang/lib/CIR/CIRGenExpr.cpp | 13 +++++++++++++ clang/lib/CIR/CIRGenFunction.h | 3 +++ clang/lib/CIR/CIRGenTypes.cpp | 8 +++++++- clang/test/CIR/CodeGen/lvalue-refs.cpp | 19 +++++++++++++++++++ 5 files changed, 46 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/lvalue-refs.cpp diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CIRGenCall.cpp index f4b3e986d3f3..7030f97ea7cf 100644 --- a/clang/lib/CIR/CIRGenCall.cpp +++ b/clang/lib/CIR/CIRGenCall.cpp @@ -496,7 +496,10 @@ void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, assert(type->isReferenceType() == E->isGLValue() && "reference binding to unmaterialized r-value!"); - assert(!E->isGLValue() && "NYI"); + if (E->isGLValue()) { + assert(E->getObjectKind() == OK_Ordinary); + return args.add(buildReferenceBindingToExpr(E), type); + } bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index bf74cbd0b713..e8a2b9507b57 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -1230,3 +1230,16 @@ bool CIRGenFunction::isWrappedCXXThis(const Expr *object) { } return true; } + +RValue CIRGenFunction::buildReferenceBindingToExpr(const Expr *E) { + // Emit the expression as an lvalue. + LValue LV = buildLValue(E); + assert(LV.isSimple()); + auto Value = LV.getPointer(); + + if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { + assert(0 && "NYI"); + } + + return RValue::get(Value); +} \ No newline at end of file diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 1bc47206374c..b449a97009a8 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -426,6 +426,9 @@ class CIRGenFunction { void buildAggExpr(const clang::Expr *E, AggValueSlot Slot); + /// Emits a reference binding to the passed in expression. + RValue buildReferenceBindingToExpr(const Expr *E); + void buildCXXConstructExpr(const clang::CXXConstructExpr *E, AggValueSlot Dest); diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CIRGenTypes.cpp index 4cacaf406ddb..e337e462c163 100644 --- a/clang/lib/CIR/CIRGenTypes.cpp +++ b/clang/lib/CIR/CIRGenTypes.cpp @@ -454,7 +454,13 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { } case Type::LValueReference: case Type::RValueReference: { - assert(0 && "not implemented"); + const ReferenceType *RTy = cast(Ty); + QualType ETy = RTy->getPointeeType(); + auto PointeeType = convertTypeForMem(ETy); + // TODO(cir): use Context.getTargetAddressSpace(ETy) on pointer + ResultType = + ::mlir::cir::PointerType::get(Builder.getContext(), PointeeType); + assert(ResultType && "Cannot get pointer type?"); break; } case Type::Pointer: { diff --git a/clang/test/CIR/CodeGen/lvalue-refs.cpp b/clang/test/CIR/CodeGen/lvalue-refs.cpp new file mode 100644 index 000000000000..6a76b9954a9d --- /dev/null +++ b/clang/test/CIR/CodeGen/lvalue-refs.cpp @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +struct String { + long size; +}; + +void split(String &S) {} + +// CHECK: cir.func @_Z5splitR6String(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["S", paraminit] + +void foo() { + String s; + split(s); +} + +// CHECK: cir.func @_Z3foov() { +// CHECK: %0 = cir.alloca !_22struct2EString22, cir.ptr , ["s", uninitialized] +// CHECK: cir.call @_Z5splitR6String(%0) : (!cir.ptr) -> () \ No newline at end of file From 8fc8253f8edfb6dcd2b726285f53bb759aa07d73 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 Aug 2022 15:24:54 -0700 Subject: [PATCH 0556/2301] [CIR][CodeGen][NFC] Add skeleton on CIRGenFunction for buildMaterializeTemporaryExpr, buildAnyExprToMem and CIR alloca creation helpers --- clang/lib/CIR/CIRGenFunction.h | 64 ++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index b449a97009a8..6678c70125a7 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -913,6 +913,70 @@ class CIRGenFunction { /// TODO(cir): add TBAAAccessInfo Address buildArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo = nullptr); + + /// Emits the code necessary to evaluate an arbitrary expression into the + /// given memory location. + void buildAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, + bool IsInitializer); + + /// CIR build helpers + /// ----------------- + + /// This creates an alloca and inserts it into the entry block if \p ArraySize + /// is nullptr, + /// + /// TODO(cir): ... otherwise inserts it at the current insertion point of + /// the builder. + /// The caller is responsible for setting an appropriate alignment on + /// the alloca. + /// + /// \p ArraySize is the number of array elements to be allocated if it + /// is not nullptr. + /// + /// LangAS::Default is the address space of pointers to local variables and + /// temporaries, as exposed in the source language. In certain + /// configurations, this is not the same as the alloca address space, and a + /// cast is needed to lift the pointer from the alloca AS into + /// LangAS::Default. This can happen when the target uses a restricted + /// address space for the stack but the source language requires + /// LangAS::Default to be a generic address space. The latter condition is + /// common for most programming languages; OpenCL is an exception in that + /// LangAS::Default is the private address space, which naturally maps + /// to the stack. + /// + /// Because the address of a temporary is often exposed to the program in + /// various ways, this function will perform the cast. The original alloca + /// instruction is returned through \p Alloca if it is not nullptr. + /// + /// The cast is not performaed in CreateTempAllocaWithoutCast. This is + /// more efficient if the caller knows that the address will not be exposed. + mlir::cir::AllocaOp CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, + const Twine &Name = "tmp", + mlir::Value ArraySize = nullptr); + Address CreateTempAlloca(mlir::Type Ty, CharUnits align, mlir::Location Loc, + const Twine &Name = "tmp", + mlir::Value ArraySize = nullptr, + Address *Alloca = nullptr); + Address CreateTempAllocaWithoutCast(mlir::Type Ty, CharUnits align, + mlir::Location Loc, + const Twine &Name = "tmp", + mlir::Value ArraySize = nullptr); + + /// Create a temporary memory object of the given type, with + /// appropriate alignmen and cast it to the default address space. Returns + /// the original alloca instruction by \p Alloca if it is not nullptr. + Address CreateMemTemp(QualType T, mlir::Location Loc, + const Twine &Name = "tmp", Address *Alloca = nullptr); + Address CreateMemTemp(QualType T, CharUnits Align, mlir::Location Loc, + const Twine &Name = "tmp", Address *Alloca = nullptr); + + /// Create a temporary memory object of the given type, with + /// appropriate alignment without casting it to the default address space. + Address CreateMemTempWithoutCast(QualType T, mlir::Location Loc, + const Twine &Name = "tmp"); + Address CreateMemTempWithoutCast(QualType T, CharUnits Align, + mlir::Location Loc, + const Twine &Name = "tmp"); }; } // namespace cir From 536cae595c0d449627a1341e000f78a586e4d836 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 Aug 2022 16:56:15 -0700 Subject: [PATCH 0557/2301] [CIR][CodeGen][NFC] Add another interface for buildAlloca and track getASTAllocaAddressSpace() --- clang/lib/CIR/CIRGenExpr.cpp | 16 ++++++++++------ clang/lib/CIR/CIRGenFunction.h | 3 +++ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index e8a2b9507b57..7f673b89320a 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -1097,7 +1097,7 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, } mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, - QualType ty, mlir::Location loc, + mlir::Type ty, mlir::Location loc, CharUnits alignment) { auto getAllocaInsertPositionOp = [&](mlir::Block **insertBlock) -> mlir::Operation * { @@ -1113,9 +1113,7 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, return &*lastAlloca; }; - auto localVarTy = getCIRType(ty); - auto localVarPtrTy = - mlir::cir::PointerType::get(builder.getContext(), localVarTy); + auto localVarPtrTy = mlir::cir::PointerType::get(builder.getContext(), ty); auto alignIntAttr = CGM.getSize(alignment); mlir::Value addr; @@ -1134,12 +1132,18 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, } addr = builder.create(loc, /*addr type*/ localVarPtrTy, - /*var type*/ localVarTy, name, - initStyle, alignIntAttr); + /*var type*/ ty, name, initStyle, + alignIntAttr); } return addr; } +mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, + QualType ty, mlir::Location loc, + CharUnits alignment) { + return buildAlloca(name, initStyle, getCIRType(ty), loc, alignment); +} + mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, SourceLocation Loc) { return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 6678c70125a7..1efbc6c32caf 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -233,6 +233,9 @@ class CIRGenFunction { mlir::Value buildAlloca(llvm::StringRef name, mlir::cir::InitStyle initStyle, clang::QualType ty, mlir::Location loc, clang::CharUnits alignment); + mlir::Value buildAlloca(llvm::StringRef name, mlir::cir::InitStyle initStyle, + mlir::Type ty, mlir::Location loc, + clang::CharUnits alignment); void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, clang::CharUnits alignment); From 4c0fbd99f53ff04fdbb3c540902662f85b27303b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 Aug 2022 17:50:16 -0700 Subject: [PATCH 0558/2301] [CIR][CodeGen] Support a common idiom of ctor initializer This touches over a bunch of interaction between different AST nodes, mainly MemberExpr and ImplicitCastExpr. - Add general skeleton for buildCastLValue and add implementation for CK_NoOp - Teach ScalarExprEmitter about VisitMemberExpr - Handle MemberExpr while building LValue's - Support getting decl refs to references, add loading refs helpers for that - Add testcase --- clang/lib/CIR/CIRGenExpr.cpp | 266 +++++++++++++++++- clang/lib/CIR/CIRGenExprScalar.cpp | 12 +- clang/lib/CIR/CIRGenFunction.h | 31 +- .../CodeGen/ctor-member-lvalue-to-rvalue.cpp | 37 +++ 4 files changed, 336 insertions(+), 10 deletions(-) create mode 100644 clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 7f673b89320a..73e0f3ca77e6 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -375,8 +375,11 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { } // Drill into reference types. - assert(!VD->getType()->isReferenceType() && "NYI"); - LValue LV = makeAddrLValue(addr, T, AlignmentSource::Decl); + LValue LV = + VD->getType()->isReferenceType() + ? buildLoadOfReferenceLValue(addr, getLoc(E->getSourceRange()), + VD->getType(), AlignmentSource::Decl) + : makeAddrLValue(addr, T, AlignmentSource::Decl); assert(symbolTable.count(VD) && "should be already mapped"); @@ -986,6 +989,224 @@ LValue CIRGenFunction::buildStringLiteralLValue(const StringLiteral *E) { E->getType(), AlignmentSource::Decl); } +/// Casts are never lvalues unless that cast is to a reference type. If the cast +/// is to a reference, we can have the usual lvalue result, otherwise if a cast +/// is needed by the code generator in an lvalue context, then it must mean that +/// we need the address of an aggregate in order to access one of its members. +/// This can happen for all the reasons that casts are permitted with aggregate +/// result, including noop aggregate casts, and cast from scalar to union. +LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { + switch (E->getCastKind()) { + case CK_HLSLArrayRValue: + case CK_HLSLVectorTruncation: + case CK_ToVoid: + case CK_BitCast: + case CK_LValueToRValueBitCast: + case CK_ArrayToPointerDecay: + case CK_FunctionToPointerDecay: + case CK_NullToMemberPointer: + case CK_NullToPointer: + case CK_IntegralToPointer: + case CK_PointerToIntegral: + case CK_PointerToBoolean: + case CK_VectorSplat: + case CK_IntegralCast: + case CK_BooleanToSignedIntegral: + case CK_IntegralToBoolean: + case CK_IntegralToFloating: + case CK_FloatingToIntegral: + case CK_FloatingToBoolean: + case CK_FloatingCast: + case CK_FloatingRealToComplex: + case CK_FloatingComplexToReal: + case CK_FloatingComplexToBoolean: + case CK_FloatingComplexCast: + case CK_FloatingComplexToIntegralComplex: + case CK_IntegralRealToComplex: + case CK_IntegralComplexToReal: + case CK_IntegralComplexToBoolean: + case CK_IntegralComplexCast: + case CK_IntegralComplexToFloatingComplex: + case CK_DerivedToBaseMemberPointer: + case CK_BaseToDerivedMemberPointer: + case CK_MemberPointerToBoolean: + case CK_ReinterpretMemberPointer: + case CK_AnyPointerToBlockPointerCast: + case CK_ARCProduceObject: + case CK_ARCConsumeObject: + case CK_ARCReclaimReturnedObject: + case CK_ARCExtendBlockObject: + case CK_CopyAndAutoreleaseBlockObject: + case CK_IntToOCLSampler: + case CK_FloatingToFixedPoint: + case CK_FixedPointToFloating: + case CK_FixedPointCast: + case CK_FixedPointToBoolean: + case CK_FixedPointToIntegral: + case CK_IntegralToFixedPoint: + case CK_MatrixCast: + llvm_unreachable("NYI"); + + case CK_Dependent: + llvm_unreachable("dependent cast kind in IR gen!"); + + case CK_BuiltinFnToFnPtr: + llvm_unreachable("builtin functions are handled elsewhere"); + + // These are never l-values; just use the aggregate emission code. + case CK_NonAtomicToAtomic: + case CK_AtomicToNonAtomic: + assert(0 && "NYI"); + + case CK_Dynamic: { + assert(0 && "NYI"); + } + + case CK_ConstructorConversion: + case CK_UserDefinedConversion: + case CK_CPointerToObjCPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_LValueToRValue: + assert(0 && "NYI"); + + case CK_NoOp: { + // CK_NoOp can model a qualification conversion, which can remove an array + // bound and change the IR type. + LValue LV = buildLValue(E->getSubExpr()); + if (LV.isSimple()) { + Address V = LV.getAddress(); + if (V.isValid()) { + auto T = getTypes().convertTypeForMem(E->getType()); + if (V.getElementType() != T) + assert(0 && "NYI"); + } + } + return LV; + } + + case CK_UncheckedDerivedToBase: + case CK_DerivedToBase: { + assert(0 && "NYI"); + } + case CK_ToUnion: + assert(0 && "NYI"); + case CK_BaseToDerived: { + assert(0 && "NYI"); + } + case CK_LValueBitCast: { + assert(0 && "NYI"); + } + case CK_AddressSpaceConversion: { + assert(0 && "NYI"); + } + case CK_ObjCObjectLValueCast: { + assert(0 && "NYI"); + } + case CK_ZeroToOCLOpaqueType: + llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid"); + } + + llvm_unreachable("Unhandled lvalue cast kind?"); +} + +// TODO(cir): candidate for common helper between LLVM and CIR codegen. +static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &CGF, + const MemberExpr *ME) { + if (auto *VD = dyn_cast(ME->getMemberDecl())) { + // Try to emit static variable member expressions as DREs. + return DeclRefExpr::Create( + CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD, + /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(), + ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse()); + } + return nullptr; +} + +LValue CIRGenFunction::buildCheckedLValue(const Expr *E, TypeCheckKind TCK) { + LValue LV; + if (SanOpts.has(SanitizerKind::ArrayBounds) && isa(E)) + assert(0 && "not implemented"); + else + LV = buildLValue(E); + if (!isa(E) && !LV.isBitField() && LV.isSimple()) { + if (const auto *ME = dyn_cast(E)) { + assert(0 && "not implemented"); + } + // TODO(cir): EmitTypeCheck equivalent. + assert(0 && "not implemented"); + } + return LV; +} + +// TODO(cir): candidate for common AST helper for LLVM and CIR codegen +bool CIRGenFunction::IsWrappedCXXThis(const Expr *Obj) { + const Expr *Base = Obj; + while (!isa(Base)) { + // The result of a dynamic_cast can be null. + if (isa(Base)) + return false; + + if (const auto *CE = dyn_cast(Base)) { + Base = CE->getSubExpr(); + } else if (const auto *PE = dyn_cast(Base)) { + Base = PE->getSubExpr(); + } else if (const auto *UO = dyn_cast(Base)) { + if (UO->getOpcode() == UO_Extension) + Base = UO->getSubExpr(); + else + return false; + } else { + return false; + } + } + return true; +} + +LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { + if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) { + assert(0 && "enable upon testcase that validates this path"); + // buildIgnoredExpr(E->getBase()); + // return buildDeclRefLValue(DRE); + } + + Expr *BaseExpr = E->getBase(); + // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. + LValue BaseLV; + if (E->isArrow()) { + LValueBaseInfo BaseInfo; + Address Addr = buildPointerWithAlignment(BaseExpr, &BaseInfo); + QualType PtrTy = BaseExpr->getType()->getPointeeType(); + SanitizerSet SkippedChecks; + bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr); + if (IsBaseCXXThis) + SkippedChecks.set(SanitizerKind::Alignment, true); + if (IsBaseCXXThis || isa(BaseExpr)) + SkippedChecks.set(SanitizerKind::Null, true); + buildTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, + /*Alignment=*/CharUnits::Zero(), SkippedChecks); + BaseLV = makeAddrLValue(Addr, PtrTy, BaseInfo); + } else + BaseLV = buildCheckedLValue(BaseExpr, TCK_MemberAccess); + + NamedDecl *ND = E->getMemberDecl(); + if (auto *Field = dyn_cast(ND)) { + LValue LV = buildLValueForField(BaseLV, Field); + assert(!UnimplementedFeature::setObjCGCLValueClass() && "NYI"); + if (getLangOpts().OpenMP) { + // If the member was explicitly marked as nontemporal, mark it as + // nontemporal. If the base lvalue is marked as nontemporal, mark access + // to children as nontemporal too. + assert(0 && "not implemented"); + } + return LV; + } + + if (const auto *FD = dyn_cast(ND)) + assert(0 && "not implemented"); + + llvm_unreachable("Unhandled member declaration!"); +} + /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. @@ -1014,6 +1235,21 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return buildUnaryOpLValue(cast(E)); case Expr::StringLiteralClass: return buildStringLiteralLValue(cast(E)); + case Expr::MemberExprClass: + return buildMemberExpr(cast(E)); + + case Expr::CStyleCastExprClass: + case Expr::CXXFunctionalCastExprClass: + case Expr::CXXStaticCastExprClass: + case Expr::CXXDynamicCastExprClass: + case Expr::CXXReinterpretCastExprClass: + case Expr::CXXConstCastExprClass: + case Expr::CXXAddrspaceCastExprClass: + case Expr::ObjCBridgedCastExprClass: + assert(0 && "Use buildCastLValue below, remove me when adding testcase"); + case Expr::ImplicitCastExprClass: + return buildCastLValue(cast(E)); + case Expr::ObjCPropertyRefExprClass: llvm_unreachable("cannot emit a property reference directly"); } @@ -1246,4 +1482,28 @@ RValue CIRGenFunction::buildReferenceBindingToExpr(const Expr *E) { } return RValue::get(Value); -} \ No newline at end of file +} + +Address CIRGenFunction::buildLoadOfReference(LValue RefLVal, mlir::Location Loc, + LValueBaseInfo *PointeeBaseInfo) { + assert(!RefLVal.isVolatile() && "NYI"); + mlir::cir::LoadOp Load = builder.create( + Loc, RefLVal.getAddress().getElementType(), + RefLVal.getAddress().getPointer()); + + // TODO(cir): DecorateInstructionWithTBAA relevant for us? + assert(!UnimplementedFeature::tbaa()); + + QualType PointeeType = RefLVal.getType()->getPointeeType(); + CharUnits Align = CGM.getNaturalTypeAlignment(PointeeType, PointeeBaseInfo, + /* forPointeeType= */ true); + return Address(Load, getTypes().convertTypeForMem(PointeeType), Align); +} + +LValue CIRGenFunction::buildLoadOfReferenceLValue(LValue RefLVal, + mlir::Location Loc) { + LValueBaseInfo PointeeBaseInfo; + Address PointeeAddr = buildLoadOfReference(RefLVal, Loc, &PointeeBaseInfo); + return makeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), + PointeeBaseInfo); +} diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 838508b35d1d..e001a46a7f55 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -192,7 +192,7 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *E) { llvm_unreachable("NYI"); } - mlir::Value VisitMemberExpr(MemberExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitMemberExpr(MemberExpr *E); mlir::Value VisitExtVectorelementExpr(Expr *E) { llvm_unreachable("NYI"); } mlir::Value VisitCompoundLiteralEpxr(CompoundLiteralExpr *E) { llvm_unreachable("NYI"); @@ -911,6 +911,16 @@ mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *E) { return V; } +mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { + // TODO(cir): Folding all this constants sound like work for MLIR optimizers, + // keep assertion for now. + assert(!UnimplementedFeature::tryEmitAsConstant()); + Expr::EvalResult Result; + if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) + assert(0 && "NYI"); + return buildLoadOfLValue(E); +} + /// Emit a conversion from the specified type to the specified destination /// type, both of which are CIR scalar types. mlir::Value CIRGenFunction::buildScalarConversion(mlir::Value Src, diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 1efbc6c32caf..36f8a71fca9d 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -432,6 +432,8 @@ class CIRGenFunction { /// Emits a reference binding to the passed in expression. RValue buildReferenceBindingToExpr(const Expr *E); + LValue buildCastLValue(const CastExpr *E); + void buildCXXConstructExpr(const clang::CXXConstructExpr *E, AggValueSlot Dest); @@ -507,20 +509,31 @@ class CIRGenFunction { RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation Loc); - /// buildLoadOfLValue - Given an expression that represents a value lvalue, - /// this method emits the address of the lvalue, then loads the result as an - /// rvalue, returning the rvalue. + /// Given an expression that represents a value lvalue, this method emits the + /// address of the lvalue, then loads the result as an rvalue, returning the + /// rvalue. RValue buildLoadOfLValue(LValue LV, SourceLocation Loc); mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, clang::SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal = false); - /// buildLoadOfScalar - Load a scalar value from an address, taking care to - /// appropriately convert form the memory representation to the CIR value - /// representation. The l-value must be a simple l-value. + /// Load a scalar value from an address, taking care to appropriately convert + /// form the memory representation to the CIR value representation. The + /// l-value must be a simple l-value. mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); + Address buildLoadOfReference(LValue RefLVal, mlir::Location Loc, + LValueBaseInfo *PointeeBaseInfo = nullptr); + LValue buildLoadOfReferenceLValue(LValue RefLVal, mlir::Location Loc); + LValue + buildLoadOfReferenceLValue(Address RefAddr, mlir::Location Loc, + QualType RefTy, + AlignmentSource Source = AlignmentSource::Type) { + LValue RefLVal = makeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source)); + return buildLoadOfReferenceLValue(RefLVal, Loc); + } + void buildCallArgs( CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range ArgRange, @@ -922,6 +935,12 @@ class CIRGenFunction { void buildAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer); + /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts. + static bool IsWrappedCXXThis(const Expr *E); + + LValue buildCheckedLValue(const Expr *E, TypeCheckKind TCK); + LValue buildMemberExpr(const MemberExpr *E); + /// CIR build helpers /// ----------------- diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp new file mode 100644 index 000000000000..218ffc9b0f2c --- /dev/null +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +// TODO: support -mno-constructor-aliases + +struct String { + long size; + String(const String &s) : size{s.size} {} +// CHECK: cir.func linkonce_odr @_ZN6StringC2ERKS_ +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 +// CHECK: cir.store %arg1, %1 +// CHECK: %2 = cir.load %0 +// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "size"}> +// CHECK: %4 = cir.load %1 +// CHECK: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> +// CHECK: %6 = cir.load %5 : cir.ptr , i64 +// CHECK: cir.store %6, %3 : i64, cir.ptr +// CHECK: cir.return +// CHECK: } + + String() {} +}; + +void foo() { + String s; + String s1{s}; + // FIXME: s1 shouldn't be uninitialized. + + // cir.func @_Z3foov() { + // %0 = cir.alloca !_22struct2EString22, cir.ptr , ["s", uninitialized] {alignment = 8 : i64} + // %1 = cir.alloca !_22struct2EString22, cir.ptr , ["s1", uninitialized] {alignment = 8 : i64} + // cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () + // cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () + // cir.return + // } +} From 3a314368ed025038767ef8fe1e2e2963766db378 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 29 Aug 2022 15:01:26 -0700 Subject: [PATCH 0559/2301] [CIR][CodeGen] Support building return stmt out for reference types --- clang/lib/CIR/CIRGenStmt.cpp | 6 +++++- clang/test/CIR/CodeGen/return.cpp | 14 ++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/return.cpp diff --git a/clang/lib/CIR/CIRGenStmt.cpp b/clang/lib/CIR/CIRGenStmt.cpp index 6eed97bb737e..85f805704d50 100644 --- a/clang/lib/CIR/CIRGenStmt.cpp +++ b/clang/lib/CIR/CIRGenStmt.cpp @@ -431,7 +431,11 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { } else if (!RV) { // Do nothing (return value is left uninitialized) } else if (FnRetTy->isReferenceType()) { - assert(0 && "not implemented"); + // If this function returns a reference, take the address of the expression + // rather than the value. + RValue Result = buildReferenceBindingToExpr(RV); + builder.create(loc, Result.getScalarVal(), + ReturnValue.getPointer()); } else { mlir::Value V = nullptr; switch (CIRGenFunction::getEvaluationKind(RV->getType())) { diff --git a/clang/test/CIR/CodeGen/return.cpp b/clang/test/CIR/CodeGen/return.cpp new file mode 100644 index 000000000000..27b0dbf0bfc4 --- /dev/null +++ b/clang/test/CIR/CodeGen/return.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +int &ret0(int &x) { + return x; +} + +// CHECK: cir.func @_Z4ret0Ri +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["x", paraminit] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > +// CHECK: %3 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: cir.return %3 : !cir.ptr From 088c2666357fa1f8463e598fafce1d4c6804a118 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 29 Aug 2022 14:31:35 -0700 Subject: [PATCH 0560/2301] [CIR][CodeGen] Add some support for CXXStaticCastExprClass --- clang/lib/CIR/CIRGenExpr.cpp | 4 +++- clang/lib/CIR/CIRGenExprScalar.cpp | 16 +++++++++++++--- clang/test/CIR/CodeGen/cast.cpp | 16 ++++++++++++++++ 3 files changed, 32 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/cast.cpp diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 73e0f3ca77e6..2ae9b40ca0cb 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -1240,13 +1240,15 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { case Expr::CStyleCastExprClass: case Expr::CXXFunctionalCastExprClass: - case Expr::CXXStaticCastExprClass: case Expr::CXXDynamicCastExprClass: case Expr::CXXReinterpretCastExprClass: case Expr::CXXConstCastExprClass: case Expr::CXXAddrspaceCastExprClass: case Expr::ObjCBridgedCastExprClass: + emitError(getLoc(E->getExprLoc()), "l-value not implemented for '") + << E->getStmtClassName() << "'"; assert(0 && "Use buildCastLValue below, remove me when adding testcase"); + case Expr::CXXStaticCastExprClass: case Expr::ImplicitCastExprClass: return buildCastLValue(cast(E)); diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index e001a46a7f55..db66717004a8 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -208,7 +208,7 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *E) { - llvm_unreachable("NYI"); + return VisitCastExpr(E); } mlir::Value VisitCastExpr(CastExpr *E); mlir::Value VisitCallExpr(const CallExpr *E); @@ -769,8 +769,18 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { llvm_unreachable("NYI"); case CK_UserDefinedConversion: llvm_unreachable("NYI"); - case CK_NoOp: - llvm_unreachable("NYI"); + case CK_NoOp: { + auto V = Visit(const_cast(E)); + if (V) { + // CK_NoOp can model a pointer qualification conversion, which can remove + // an array bound and change the IR type. + // FIXME: Once pointee types are removed from IR, remove this. + auto T = CGF.convertType(DestTy); + if (T != V.getType()) + assert(0 && "NYI"); + } + return V; + } case CK_BaseToDerived: llvm_unreachable("NYI"); case CK_DerivedToBase: diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp new file mode 100644 index 000000000000..23b5cdd44f75 --- /dev/null +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +unsigned char cxxstaticcast_0(unsigned int x) { + return static_cast(x); +} + +// CHECK: cir.func @_Z15cxxstaticcast_0j +// CHECK: %0 = cir.alloca i32, cir.ptr , ["x", paraminit] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca i8, cir.ptr , ["__retval", uninitialized] {alignment = 1 : i64} +// CHECK: cir.store %arg0, %0 : i32, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , i32 +// CHECK: %3 = cir.cast(integral, %2 : i32), i8 +// CHECK: cir.store %3, %1 : i8, cir.ptr +// CHECK: %4 = cir.load %1 : cir.ptr , i8 +// CHECK: cir.return %4 : i8 +// CHECK: } From 7e6bf754fe5e4b61d104581034445e48db153572 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 23 Aug 2022 14:46:11 -0700 Subject: [PATCH 0561/2301] [CIR][CodeGen] Add basic support for ExprWithCleanupsClass, CXXOperatorCallExprClass and MaterializeTemporaryExprClass This is all dep work needed in order to support copy assignment, testcase is in the next commit. - Pave the way to handle MaterializeTemporaryExpr with some basic support in terms of createReferenceTemporary and pushTemporaryCleanup. - This includes a bunch of helper methods build* - It doesn't yet invoke dtors (on the todo list). --- clang/lib/CIR/Address.h | 6 + clang/lib/CIR/CIRGenExpr.cpp | 262 ++++++++++++++++++++++++++++++- clang/lib/CIR/CIRGenExprCXX.cpp | 23 ++- clang/lib/CIR/CIRGenFunction.cpp | 2 + clang/lib/CIR/CIRGenFunction.h | 7 + 5 files changed, 298 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Address.h b/clang/lib/CIR/Address.h index 8f371f13f746..140d6d883d25 100644 --- a/clang/lib/CIR/Address.h +++ b/clang/lib/CIR/Address.h @@ -54,6 +54,12 @@ class Address { static Address invalid() { return Address(nullptr); } bool isValid() const { return Pointer != nullptr; } + /// Return address with different pointer, but same element type and + /// alignment. + Address withPointer(mlir::Value NewPointer) const { + return Address(NewPointer, getElementType(), getAlignment()); + } + mlir::Value getPointer() const { // assert(isValid()); return Pointer; diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 2ae9b40ca0cb..96d224665d25 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -562,7 +562,10 @@ RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, return buildCXXMemberCallExpr(CE, ReturnValue); assert(!dyn_cast(E) && "CUDA NYI"); - assert(!dyn_cast(E) && "NYI"); + if (const auto *CE = dyn_cast(E)) + if (const CXXMethodDecl *MD = + dyn_cast_or_null(CE->getCalleeDecl())) + return buildCXXOperatorMemberCallExpr(CE, MD, ReturnValue); CIRGenCallee callee = buildCallee(E->getCallee()); @@ -1207,6 +1210,173 @@ LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { llvm_unreachable("Unhandled member declaration!"); } +LValue CIRGenFunction::buildCallExprLValue(const CallExpr *E) { + RValue RV = buildCallExpr(E); + + if (!RV.isScalar()) + return makeAddrLValue(RV.getAggregateAddress(), E->getType(), + AlignmentSource::Decl); + + assert(E->getCallReturnType(getContext())->isReferenceType() && + "Can't have a scalar return unless the return type is a " + "reference type!"); + + assert(0 && "remove me once there's a testcase to cover this"); + return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal().getDefiningOp(), + E->getType()); +} + +/// Evaluate an expression into a given memory location. +void CIRGenFunction::buildAnyExprToMem(const Expr *E, Address Location, + Qualifiers Quals, bool IsInit) { + // FIXME: This function should take an LValue as an argument. + switch (getEvaluationKind(E->getType())) { + case TEK_Complex: + assert(0 && "NYI"); + return; + + case TEK_Aggregate: { + buildAggExpr(E, AggValueSlot::forAddr(Location, Quals, + AggValueSlot::IsDestructed_t(IsInit), + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsAliased_t(!IsInit), + AggValueSlot::MayOverlap)); + return; + } + + case TEK_Scalar: { + assert(0 && "NYI"); + return; + } + } + llvm_unreachable("bad evaluation kind"); +} + +static Address createReferenceTemporary(CIRGenFunction &CGF, + const MaterializeTemporaryExpr *M, + const Expr *Inner, + Address *Alloca = nullptr) { + // TODO(cir): CGF.getTargetHooks(); + switch (M->getStorageDuration()) { + case SD_FullExpression: + case SD_Automatic: { + // TODO(cir): probably not needed / too LLVM specific? + // If we have a constant temporary array or record try to promote it into a + // constant global under the same rules a normal constant would've been + // promoted. This is easier on the optimizer and generally emits fewer + // instructions. + QualType Ty = Inner->getType(); + if (CGF.CGM.getCodeGenOpts().MergeAllConstants && + (Ty->isArrayType() || Ty->isRecordType()) && + CGF.CGM.isTypeConstant(Ty, true)) + assert(0 && "NYI"); + return CGF.CreateMemTemp(Ty, CGF.getLoc(M->getSourceRange()), "ref.tmp", + Alloca); + } + case SD_Thread: + case SD_Static: + assert(0 && "NYI"); + + case SD_Dynamic: + llvm_unreachable("temporary can't have dynamic storage duration"); + } + llvm_unreachable("unknown storage duration"); +} + +static void pushTemporaryCleanup(CIRGenFunction &CGF, + const MaterializeTemporaryExpr *M, + const Expr *E, Address ReferenceTemporary) { + // Objective-C++ ARC: + // If we are binding a reference to a temporary that has ownership, we + // need to perform retain/release operations on the temporary. + // + // FIXME: This should be looking at E, not M. + if (auto Lifetime = M->getType().getObjCLifetime()) { + assert(0 && "NYI"); + } + + CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; + if (const RecordType *RT = + E->getType()->getBaseElementTypeUnsafe()->getAs()) { + // Get the destructor for the reference temporary. + auto *ClassDecl = cast(RT->getDecl()); + if (!ClassDecl->hasTrivialDestructor()) + ReferenceTemporaryDtor = ClassDecl->getDestructor(); + } + + if (!ReferenceTemporaryDtor) + return; + + // TODO(cir): Call the destructor for the temporary. + assert(0 && "NYI"); +} + +LValue CIRGenFunction::buildMaterializeTemporaryExpr( + const MaterializeTemporaryExpr *M) { + const Expr *E = M->getSubExpr(); + + assert((!M->getExtendingDecl() || !isa(M->getExtendingDecl()) || + !cast(M->getExtendingDecl())->isARCPseudoStrong()) && + "Reference should never be pseudo-strong!"); + + // FIXME: ideally this would use buildAnyExprToMem, however, we cannot do so + // as that will cause the lifetime adjustment to be lost for ARC + auto ownership = M->getType().getObjCLifetime(); + if (ownership != Qualifiers::OCL_None && + ownership != Qualifiers::OCL_ExplicitNone) { + assert(0 && "NYI"); + } + + SmallVector CommaLHSs; + SmallVector Adjustments; + E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); + + for (const auto &Ignored : CommaLHSs) + buildIgnoredExpr(Ignored); + + if (const auto *opaque = dyn_cast(E)) + assert(0 && "NYI"); + + // Create and initialize the reference temporary. + Address Alloca = Address::invalid(); + Address Object = createReferenceTemporary(*this, M, E, &Alloca); + + if (auto Var = + dyn_cast(Object.getPointer().getDefiningOp())) { + // TODO(cir): add something akin to stripPointerCasts() to ptr above + assert(0 && "NYI"); + } else { + switch (M->getStorageDuration()) { + case SD_Automatic: + assert(0 && "NYI"); + break; + + case SD_FullExpression: { + if (!ShouldEmitLifetimeMarkers) + break; + assert(0 && "NYI"); + break; + } + + default: + break; + } + + buildAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true); + } + pushTemporaryCleanup(*this, M, E, Object); + + // Perform derived-to-base casts and/or field accesses, to get from the + // temporary object we created (and, potentially, for which we extended + // the lifetime) to the subobject we're binding the reference to. + for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) { + (void)Adjustment; + assert(0 && "NYI"); + } + + return makeAddrLValue(Object, M->getType(), AlignmentSource::Decl); +} + /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. @@ -1229,6 +1399,27 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { assert(!Ty->isAnyComplexType() && "complex types not implemented"); return buildCompoundAssignmentLValue(cast(E)); } + case Expr::UserDefinedLiteralClass: + assert(0 && "should fallback below, remove assert when testcase available"); + case Expr::CXXOperatorCallExprClass: + return buildCallExprLValue(cast(E)); + case Expr::ExprWithCleanupsClass: { + const auto *cleanups = cast(E); + // RunCleanupsScope Scope(*this); + LValue LV = buildLValue(cleanups->getSubExpr()); + if (LV.isSimple()) { + // Defend against branches out of gnu statement expressions surrounded by + // cleanups. + Address Addr = LV.getAddress(); + auto V = Addr.getPointer(); + // Scope.ForceCleanup({&V}); + return LValue::makeAddr(Addr.withPointer(V), LV.getType(), getContext(), + LV.getBaseInfo() /*TODO(cir):TBAA*/); + } + // FIXME: Is it possible to create an ExprWithCleanups that produces a + // bitfield lvalue or some other non-simple lvalue? + return LV; + } case Expr::DeclRefExprClass: return buildDeclRefLValue(cast(E)); case Expr::UnaryOperatorClass: @@ -1252,6 +1443,9 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { case Expr::ImplicitCastExprClass: return buildCastLValue(cast(E)); + case Expr::MaterializeTemporaryExprClass: + return buildMaterializeTemporaryExpr(cast(E)); + case Expr::ObjCPropertyRefExprClass: llvm_unreachable("cannot emit a property reference directly"); } @@ -1509,3 +1703,69 @@ LValue CIRGenFunction::buildLoadOfReferenceLValue(LValue RefLVal, return makeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), PointeeBaseInfo); } + +//===----------------------------------------------------------------------===// +// CIR builder helpers +//===----------------------------------------------------------------------===// + +Address CIRGenFunction::CreateMemTemp(QualType Ty, mlir::Location Loc, + const Twine &Name, Address *Alloca) { + // FIXME: Should we prefer the preferred type alignment here? + return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Loc, Name, + Alloca); +} + +Address CIRGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, + mlir::Location Loc, const Twine &Name, + Address *Alloca) { + Address Result = + CreateTempAlloca(getTypes().convertTypeForMem(Ty), Align, Loc, Name, + /*ArraySize=*/nullptr, Alloca); + if (Ty->isConstantMatrixType()) { + assert(0 && "NYI"); + } + return Result; +} + +/// This creates a alloca and inserts it into the entry block. +Address CIRGenFunction::CreateTempAllocaWithoutCast(mlir::Type Ty, + CharUnits Align, + mlir::Location Loc, + const Twine &Name, + mlir::Value ArraySize) { + auto Alloca = CreateTempAlloca(Ty, Loc, Name, ArraySize); + Alloca.setAlignmentAttr(CGM.getSize(Align)); + return Address(Alloca, Ty, Align); +} + +/// CreateTempAlloca - This creates a alloca and inserts it into the entry +/// block. The alloca is casted to default address space if necessary. +Address CIRGenFunction::CreateTempAlloca(mlir::Type Ty, CharUnits Align, + mlir::Location Loc, const Twine &Name, + mlir::Value ArraySize, + Address *AllocaAddr) { + auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Loc, Name, ArraySize); + if (AllocaAddr) + *AllocaAddr = Alloca; + mlir::Value V = Alloca.getPointer(); + // Alloca always returns a pointer in alloca address space, which may + // be different from the type defined by the language. For example, + // in C++ the auto variables are in the default address space. Therefore + // cast alloca to the default address space when necessary. + assert(!UnimplementedFeature::getASTAllocaAddressSpace()); + return Address(V, Ty, Align); +} + +/// CreateTempAlloca - This creates an alloca and inserts it into the entry +/// block if \p ArraySize is nullptr, otherwise inserts it at the current +/// insertion point of the builder. +mlir::cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, + mlir::Location Loc, + const Twine &Name, + mlir::Value ArraySize) { + if (ArraySize) + assert(0 && "NYI"); + return cast( + buildAlloca(Name.str(), InitStyle::uninitialized, Ty, Loc, CharUnits()) + .getDefiningOp()); +} diff --git a/clang/lib/CIR/CIRGenExprCXX.cpp b/clang/lib/CIR/CIRGenExprCXX.cpp index a90dcfe5349e..22228de52fc6 100644 --- a/clang/lib/CIR/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CIRGenExprCXX.cpp @@ -116,7 +116,17 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( CallArgList *RtlArgs = nullptr; LValue TrivialAssignmentRHS; if (auto *OCE = dyn_cast(CE)) { - llvm_unreachable("NYI"); + if (OCE->isAssignmentOp()) { + if (TrivialAssignment) { + TrivialAssignmentRHS = buildLValue(CE->getArg(1)); + } else { + assert(0 && "remove me once there's a testcase to cover this"); + RtlArgs = &RtlArgStorage; + buildCallArgs(*RtlArgs, MD->getType()->castAs(), + drop_begin(CE->arguments(), 1), CE->getDirectCallee(), + /*ParamsToSkip*/ 0, EvaluationOrder::ForceRightToLeft); + } + } } LValue This; @@ -209,6 +219,17 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); } +RValue +CIRGenFunction::buildCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, + const CXXMethodDecl *MD, + ReturnValueSlot ReturnValue) { + assert(MD->isInstance() && + "Trying to emit a member call expr on a static method!"); + return buildCXXMemberOrOperatorMemberCallExpr( + E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr, + /*IsArrow=*/false, E->getArg(0)); +} + void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest) { assert(!Dest.isIgnored() && "Must have a destination!"); diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 610d1d667edc..9ef2443f02e2 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -671,6 +671,8 @@ void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { /// an l-vlaue withi the natural pointee alignment of T. LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Operation *Op, QualType T) { + // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps + // assert on the result type first. LValueBaseInfo BaseInfo; CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, /* for PointeeType= */ true); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 36f8a71fca9d..09cff90226df 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -462,6 +462,9 @@ class CIRGenFunction { ReturnValueSlot ReturnValue, bool HasQualifier, clang::NestedNameSpecifier *Qualifier, bool IsArrow, const clang::Expr *Base); + RValue buildCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, + const CXXMethodDecl *MD, + ReturnValueSlot ReturnValue); mlir::Value buildCXXNewExpr(const CXXNewExpr *E); @@ -557,6 +560,8 @@ class CIRGenFunction { void buildCallArg(CallArgList &args, const clang::Expr *E, clang::QualType ArgType); + LValue buildCallExprLValue(const CallExpr *E); + /// buildAnyExprToTemp - Similarly to buildAnyExpr(), however, the result will /// always be accessible even if no aggregate location is provided. RValue buildAnyExprToTemp(const clang::Expr *E); @@ -702,6 +707,8 @@ class CIRGenFunction { Address getAllocatedAddress() const { return Addr; } }; + LValue buildMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); + /// Emit the alloca and debug information for a /// local variable. Does not emit initialization or destruction. AutoVarEmission buildAutoVarAlloca(const clang::VarDecl &D); From 22467c5255b1707b60df6011b84a09e6db5ef756 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 Aug 2022 18:21:17 -0700 Subject: [PATCH 0562/2301] [CIR][CodeGen] Add machinery for building implicit assignment operator body Add more infra to complete some CXXOperatorCallExprClass support. - Do not optimize trivial assignment (like LLVM does). - This adds a testcase that covers this commits and previous one. - FIXME: add a testcase and insert scope (currently missing) --- clang/lib/CIR/CIRGenClass.cpp | 177 ++++++++++++++++++++- clang/lib/CIR/CIRGenExpr.cpp | 12 +- clang/lib/CIR/CIRGenExprAgg.cpp | 97 +++++++++++ clang/lib/CIR/CIRGenExprCXX.cpp | 22 ++- clang/lib/CIR/CIRGenExprScalar.cpp | 8 +- clang/lib/CIR/CIRGenFunction.cpp | 4 +- clang/lib/CIR/CIRGenFunction.h | 34 +++- clang/test/CIR/CodeGen/assign-operator.cpp | 79 +++++++++ 8 files changed, 422 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/CodeGen/assign-operator.cpp diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CIRGenClass.cpp index 3e0a27e1ae9f..2782ce631803 100644 --- a/clang/lib/CIR/CIRGenClass.cpp +++ b/clang/lib/CIR/CIRGenClass.cpp @@ -15,6 +15,7 @@ #include "UnimplementedFeatureGuarding.h" #include "clang/AST/RecordLayout.h" +#include "clang/Basic/TargetBuiltins.h" using namespace clang; using namespace cir; @@ -53,7 +54,47 @@ bool CIRGenFunction::IsConstructorDelegationValid( return true; } +/// TODO(cir): strong candidate for AST helper to be shared between LLVM and CIR +/// codegen. +static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) { + auto *CD = dyn_cast(D); + if (!(CD && CD->isCopyOrMoveConstructor()) && + !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator()) + return false; + + // We can emit a memcpy for a trivial copy or move constructor/assignment. + if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) + return true; + + // We *must* emit a memcpy for a defaulted union copy or move op. + if (D->getParent()->isUnion() && D->isDefaulted()) + return true; + + return false; +} + namespace { +/// TODO(cir): a lot of what we see under this namespace is a strong candidate +/// to be shared between LLVM and CIR codegen. + +/// RAII object to indicate that codegen is copying the value representation +/// instead of the object representation. Useful when copying a struct or +/// class which has uninitialized members and we're only performing +/// lvalue-to-rvalue conversion on the object but not its members. +class CopyingValueRepresentation { +public: + explicit CopyingValueRepresentation(CIRGenFunction &CGF) + : CGF(CGF), OldSanOpts(CGF.SanOpts) { + CGF.SanOpts.set(SanitizerKind::Bool, false); + CGF.SanOpts.set(SanitizerKind::Enum, false); + } + ~CopyingValueRepresentation() { CGF.SanOpts = OldSanOpts; } + +private: + CIRGenFunction &CGF; + SanitizerSet OldSanOpts; +}; + class FieldMemcpyizer { public: FieldMemcpyizer(CIRGenFunction &CGF, const CXXRecordDecl *ClassDecl, @@ -289,6 +330,118 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { SmallVector AggregatedInits; }; +class AssignmentMemcpyizer : public FieldMemcpyizer { +private: + // Returns the memcpyable field copied by the given statement, if one + // exists. Otherwise returns null. + FieldDecl *getMemcpyableField(Stmt *S) { + if (!AssignmentsMemcpyable) + return nullptr; + if (BinaryOperator *BO = dyn_cast(S)) { + // Recognise trivial assignments. + if (BO->getOpcode() != BO_Assign) + return nullptr; + MemberExpr *ME = dyn_cast(BO->getLHS()); + if (!ME) + return nullptr; + FieldDecl *Field = dyn_cast(ME->getMemberDecl()); + if (!Field || !isMemcpyableField(Field)) + return nullptr; + Stmt *RHS = BO->getRHS(); + if (ImplicitCastExpr *EC = dyn_cast(RHS)) + RHS = EC->getSubExpr(); + if (!RHS) + return nullptr; + if (MemberExpr *ME2 = dyn_cast(RHS)) { + if (ME2->getMemberDecl() == Field) + return Field; + } + return nullptr; + } else if (CXXMemberCallExpr *MCE = dyn_cast(S)) { + CXXMethodDecl *MD = dyn_cast(MCE->getCalleeDecl()); + if (!(MD && isMemcpyEquivalentSpecialMember(MD))) + return nullptr; + MemberExpr *IOA = dyn_cast(MCE->getImplicitObjectArgument()); + if (!IOA) + return nullptr; + FieldDecl *Field = dyn_cast(IOA->getMemberDecl()); + if (!Field || !isMemcpyableField(Field)) + return nullptr; + MemberExpr *Arg0 = dyn_cast(MCE->getArg(0)); + if (!Arg0 || Field != dyn_cast(Arg0->getMemberDecl())) + return nullptr; + return Field; + } else if (CallExpr *CE = dyn_cast(S)) { + FunctionDecl *FD = dyn_cast(CE->getCalleeDecl()); + if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) + return nullptr; + Expr *DstPtr = CE->getArg(0); + if (ImplicitCastExpr *DC = dyn_cast(DstPtr)) + DstPtr = DC->getSubExpr(); + UnaryOperator *DUO = dyn_cast(DstPtr); + if (!DUO || DUO->getOpcode() != UO_AddrOf) + return nullptr; + MemberExpr *ME = dyn_cast(DUO->getSubExpr()); + if (!ME) + return nullptr; + FieldDecl *Field = dyn_cast(ME->getMemberDecl()); + if (!Field || !isMemcpyableField(Field)) + return nullptr; + Expr *SrcPtr = CE->getArg(1); + if (ImplicitCastExpr *SC = dyn_cast(SrcPtr)) + SrcPtr = SC->getSubExpr(); + UnaryOperator *SUO = dyn_cast(SrcPtr); + if (!SUO || SUO->getOpcode() != UO_AddrOf) + return nullptr; + MemberExpr *ME2 = dyn_cast(SUO->getSubExpr()); + if (!ME2 || Field != dyn_cast(ME2->getMemberDecl())) + return nullptr; + return Field; + } + + return nullptr; + } + + bool AssignmentsMemcpyable; + SmallVector AggregatedStmts; + +public: + AssignmentMemcpyizer(CIRGenFunction &CGF, const CXXMethodDecl *AD, + FunctionArgList &Args) + : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), + AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { + assert(Args.size() == 2); + } + + void emitAssignment(Stmt *S) { + FieldDecl *F = getMemcpyableField(S); + if (F) { + addMemcpyableField(F); + AggregatedStmts.push_back(S); + } else { + emitAggregatedStmts(); + if (CGF.buildStmt(S, /*useCurrentScope=*/true).failed()) + llvm_unreachable("Should not get here!"); + } + } + + void emitAggregatedStmts() { + if (AggregatedStmts.size() <= 1) { + if (!AggregatedStmts.empty()) { + CopyingValueRepresentation CVR(CGF); + if (CGF.buildStmt(AggregatedStmts[0], /*useCurrentScope=*/true) + .failed()) + llvm_unreachable("Should not get here!"); + } + reset(); + } + + buildMemcpy(); + AggregatedStmts.clear(); + } + + void finish() { emitAggregatedStmts(); } +}; } // namespace /// buildCtorPrologue - This routine generates necessary code to initialize base @@ -427,8 +580,10 @@ Address CIRGenFunction::LoadCXXThisAddress() { CXXThisAlignment = CGM.getClassPointerAlignment(RD); } - // Consider how to do this if we ever have multiple returns - auto Result = LoadCXXThis()->getOpResult(0); + // TODO(cir): consider how to do this if we ever have multiple returns + auto *t = LoadCXXThis(); + assert(t->getNumResults() == 1); + auto Result = t->getOpResult(0); return Address(Result, CXXThisAlignment); } @@ -491,3 +646,21 @@ void CIRGenFunction::buildDelegateCXXConstructorCall( AggValueSlot::MayOverlap, Loc, /*NewPointerIsChecked=*/true); } + +void CIRGenFunction::buildImplicitAssignmentOperatorBody( + FunctionArgList &Args) { + const CXXMethodDecl *AssignOp = cast(CurGD.getDecl()); + const Stmt *RootS = AssignOp->getBody(); + assert(isa(RootS) && + "Body of an implicit assignment operator should be compound stmt."); + const CompoundStmt *RootCS = cast(RootS); + + // LexicalScope Scope(*this, RootCS->getSourceRange()); + // FIXME: add all of the below under a new scope. + + assert(!UnimplementedFeature::incrementProfileCounter()); + AssignmentMemcpyizer AM(*this, AssignOp, Args); + for (auto *I : RootCS->body()) + AM.emitAssignment(I); + AM.finish(); +} diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CIRGenExpr.cpp index 96d224665d25..643344bf145a 100644 --- a/clang/lib/CIR/CIRGenExpr.cpp +++ b/clang/lib/CIR/CIRGenExpr.cpp @@ -1132,11 +1132,16 @@ LValue CIRGenFunction::buildCheckedLValue(const Expr *E, TypeCheckKind TCK) { else LV = buildLValue(E); if (!isa(E) && !LV.isBitField() && LV.isSimple()) { + SanitizerSet SkippedChecks; if (const auto *ME = dyn_cast(E)) { - assert(0 && "not implemented"); + bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase()); + if (IsBaseCXXThis) + SkippedChecks.set(SanitizerKind::Alignment, true); + if (IsBaseCXXThis || isa(ME->getBase())) + SkippedChecks.set(SanitizerKind::Null, true); } - // TODO(cir): EmitTypeCheck equivalent. - assert(0 && "not implemented"); + buildTypeCheck(TCK, E->getExprLoc(), LV.getPointer(), E->getType(), + LV.getAlignment(), SkippedChecks); } return LV; } @@ -1221,7 +1226,6 @@ LValue CIRGenFunction::buildCallExprLValue(const CallExpr *E) { "Can't have a scalar return unless the return type is a " "reference type!"); - assert(0 && "remove me once there's a testcase to cover this"); return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal().getDefiningOp(), E->getType()); } diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CIRGenExprAgg.cpp index 0fa2c517c63e..0231ba7e5df8 100644 --- a/clang/lib/CIR/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CIRGenExprAgg.cpp @@ -313,3 +313,100 @@ void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast(E)); } + +void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, + AggValueSlot::Overlap_t MayOverlap, + bool isVolatile) { + // TODO(cir): this function needs improvements, commented code for now since + // this will be touched again soon. + assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); + + // Address DestPtr = Dest.getAddress(); + // Address SrcPtr = Src.getAddress(); + + if (getLangOpts().CPlusPlus) { + if (const RecordType *RT = Ty->getAs()) { + CXXRecordDecl *Record = cast(RT->getDecl()); + assert((Record->hasTrivialCopyConstructor() || + Record->hasTrivialCopyAssignment() || + Record->hasTrivialMoveConstructor() || + Record->hasTrivialMoveAssignment() || + Record->hasAttr() || Record->isUnion()) && + "Trying to aggregate-copy a type without a trivial copy/move " + "constructor or assignment operator"); + // Ignore empty classes in C++. + if (Record->isEmpty()) + return; + } + } + + if (getLangOpts().CUDAIsDevice) { + assert(0 && "NYI"); + } + + // Aggregate assignment turns into llvm.memcpy. This is almost valid per + // C99 6.5.16.1p3, which states "If the value being stored in an object is + // read from another object that overlaps in anyway the storage of the first + // object, then the overlap shall be exact and the two objects shall have + // qualified or unqualified versions of a compatible type." + // + // memcpy is not defined if the source and destination pointers are exactly + // equal, but other compilers do this optimization, and almost every memcpy + // implementation handles this case safely. If there is a libc that does not + // safely handle this, we can add a target hook. + + // Get data size info for this aggregate. Don't copy the tail padding if this + // might be a potentially-overlapping subobject, since the tail padding might + // be occupied by a different object. Otherwise, copying it is fine. + TypeInfoChars TypeInfo; + if (MayOverlap) + TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty); + else + TypeInfo = getContext().getTypeInfoInChars(Ty); + + llvm::Value *SizeVal = nullptr; + if (TypeInfo.Width.isZero()) { + assert(0 && "NYI"); + } + if (!SizeVal) { + assert(0 && "NYI"); + // SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()); + } + + // FIXME: If we have a volatile struct, the optimizer can remove what might + // appear to be `extra' memory ops: + // + // volatile struct { int i; } a, b; + // + // int main() { + // a = b; + // a = b; + // } + // + // we need to use a different call here. We use isVolatile to indicate when + // either the source or the destination is volatile. + + assert(0 && "NYI"); + // DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty); + // SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty); + + // Don't do any of the memmove_collectable tests if GC isn't set. + if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { + // fall through + } else if (const RecordType *RecordTy = Ty->getAs()) { + assert(0 && "NYI"); + } else if (Ty->isArrayType()) { + assert(0 && "NYI"); + } + + assert(0 && "NYI"); + // auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile); + + // Determine the metadata to describe the position of any padding in this + // memcpy, as well as the TBAA tags for the members of the struct, in case + // the optimizer wishes to expand it in to scalar memory operations. + assert(!UnimplementedFeature::tbaa()); + if (CGM.getCodeGenOpts().NewStructPathTBAA) { + assert(0 && "NYI"); + } +} diff --git a/clang/lib/CIR/CIRGenExprCXX.cpp b/clang/lib/CIR/CIRGenExprCXX.cpp index 22228de52fc6..b53df4fa2a8d 100644 --- a/clang/lib/CIR/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CIRGenExprCXX.cpp @@ -141,7 +141,27 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( } if (TrivialForCodegen) { - llvm_unreachable("NYI"); + if (isa(MD)) + return RValue::get(nullptr); + + if (TrivialAssignment) { + // We don't like to generate the trivial copy/move assignment operator + // when it isn't necessary; just produce the proper effect here. + // It's important that we use the result of EmitLValue here rather than + // emitting call arguments, in order to preserve TBAA information from + // the RHS. + // + // TODO(cir): once there are testcases evaluate if CIR needs to abstract + // this away or optimizing is fine. + // LValue RHS = isa(CE) ? TrivialAssignmentRHS + // : + // buildLValue(*CE->arg_begin()); + // buildAggregateAssign(This, RHS, CE->getType()); + // return RValue::get(This.getPointer()); + } else { + assert(MD->getParent()->mayInsertExtraPadding() && + "unknown trivial member function"); + } } // Compute the function type we're calling diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index db66717004a8..69fc607efa9e 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -268,7 +268,13 @@ class ScalarExprEmitter : public StmtVisitor { CIRGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); return Visit(DIE->getExpr()); } - mlir::Value VisitCXXThisExpr(CXXThisExpr *E) { llvm_unreachable("NYI"); } + + mlir::Value VisitCXXThisExpr(CXXThisExpr *TE) { + auto *t = CGF.LoadCXXThis(); + assert(t->getNumResults() == 1); + return t->getOpResult(0); + } + mlir::Value VisitExprWithCleanups(ExprWithCleanups *E) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CIRGenFunction.cpp index 9ef2443f02e2..83a707a1c927 100644 --- a/clang/lib/CIR/CIRGenFunction.cpp +++ b/clang/lib/CIR/CIRGenFunction.cpp @@ -466,7 +466,9 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, } else if (FD->isDefaulted() && isa(FD) && (cast(FD)->isCopyAssignmentOperator() || cast(FD)->isMoveAssignmentOperator())) { - llvm_unreachable("NYI"); + // Implicit copy-assignment gets the same special treatment as implicit + // copy-constructors. + buildImplicitAssignmentOperatorBody(Args); } else if (Body) { if (mlir::failed(buildFunctionBody(Body))) { Fn.erase(); diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CIRGenFunction.h index 09cff90226df..c14f4025e645 100644 --- a/clang/lib/CIR/CIRGenFunction.h +++ b/clang/lib/CIR/CIRGenFunction.h @@ -536,6 +536,7 @@ class CIRGenFunction { LValue RefLVal = makeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source)); return buildLoadOfReferenceLValue(RefLVal, Loc); } + void buildImplicitAssignmentOperatorBody(FunctionArgList &Args); void buildCallArgs( CallArgList &Args, PrototypeWrapper Prototype, @@ -855,8 +856,9 @@ class CIRGenFunction { LValue MakeNaturalAlignPointeeAddrLValue(mlir::Operation *Op, clang::QualType T); - /// LoadCXXThis - Load the value for 'this'. This function is only valid while - /// generating code for an C++ member function. + /// Load the value for 'this'. This function is only valid while generating + /// code for an C++ member function. + /// FIXME(cir): this should return a mlir::Value! mlir::Operation *LoadCXXThis() { assert(CXXThisValue && "no 'this' value for this function"); return CXXThisValue; @@ -942,12 +944,40 @@ class CIRGenFunction { void buildAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer); + /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts. static bool IsWrappedCXXThis(const Expr *E); LValue buildCheckedLValue(const Expr *E, TypeCheckKind TCK); LValue buildMemberExpr(const MemberExpr *E); + /// returns true if aggregate type has a volatile member. + /// TODO(cir): this could be a common AST helper between LLVM / CIR. + bool hasVolatileMember(QualType T) { + if (const RecordType *RT = T->getAs()) { + const RecordDecl *RD = cast(RT->getDecl()); + return RD->hasVolatileMember(); + } + return false; + } + + /// Emit an aggregate assignment. + void buildAggregateAssign(LValue Dest, LValue Src, QualType EltTy) { + bool IsVolatile = hasVolatileMember(EltTy); + buildAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile); + } + + /// Emit an aggregate copy. + /// + /// \param isVolatile \c true iff either the source or the destination is + /// volatile. + /// \param MayOverlap Whether the tail padding of the destination might be + /// occupied by some other object. More efficient code can often be + /// generated if not. + void buildAggregateCopy(LValue Dest, LValue Src, QualType EltTy, + AggValueSlot::Overlap_t MayOverlap, + bool isVolatile = false); + /// CIR build helpers /// ----------------- diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp new file mode 100644 index 000000000000..1530866091f6 --- /dev/null +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -0,0 +1,79 @@ +// RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +int strlen(char const *); + +struct String { + long size; + long capacity; + + String() : size{0}, capacity{0} {} + String(char const *s) : size{strlen(s)}, capacity{size} {} + // StringView::StringView(String const&) + // + // CHECK: cir.func linkonce_odr @_ZN10StringViewC2ERK6String + // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} + // CHECK: cir.store %arg0, %0 : !cir.ptr + // CHECK: cir.store %arg1, %1 : !cir.ptr + // CHECK: %2 = cir.load %0 : cir.ptr > + // CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "size"}> + // CHECK: %4 = cir.load %1 : cir.ptr > + // CHECK: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> + // CHECK: %6 = cir.load %5 : cir.ptr , i64 + // CHECK: cir.store %6, %3 : i64, cir.ptr + // CHECK: cir.return + // CHECK: } + + // StringView::operator=(StringView&&) + // + // CHECK: cir.func linkonce_odr @_ZN10StringViewaSEOS_ + // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["", paraminit] {alignment = 8 : i64} + // CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} + // CHECK: cir.store %arg0, %0 : !cir.ptr + // CHECK: cir.store %arg1, %1 : !cir.ptr + // CHECK: %3 = cir.load deref %0 : cir.ptr > + // CHECK: %4 = cir.load %1 : cir.ptr > + // CHECK: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> + // CHECK: %6 = cir.load %5 : cir.ptr , i64 + // CHECK: %7 = "cir.struct_element_addr"(%0) <{member_name = "size"}> + // CHECK: cir.store %6, %7 : i64, cir.ptr + // CHECK: cir.store %3, %2 : !cir.ptr + // CHECK: %8 = cir.load %2 : cir.ptr > + // CHECK: cir.return %8 : !cir.ptr + // CHECK: } +}; + +struct StringView { + long size; + + StringView(const String &s) : size{s.size} {} + StringView() : size{0} {} +}; + +int main() { + StringView sv; + { + String s = "Hi"; + sv = s; + } +} + +// CHECK: cir.func @main() -> i32 { +// CHECK: %0 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !_22struct2EStringView22, cir.ptr , ["sv", uninitialized] {alignment = 8 : i64} +// CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () +// CHECK: cir.scope { +// CHECK: %3 = cir.alloca !_22struct2EString22, cir.ptr , ["s", uninitialized] {alignment = 8 : i64} +// CHECK: %4 = cir.alloca !_22struct2EStringView22, cir.ptr , ["ref.tmp", uninitialized] {alignment = 8 : i64} +// CHECK: %5 = cir.alloca !_22struct2EStringView22, cir.ptr , ["ref.tmp", uninitialized] {alignment = 8 : i64} +// CHECK: %6 = cir.get_global @".str" : cir.ptr > +// CHECK: %7 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_ZN6StringC2EPKc(%3, %7) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN10StringViewC2ERK6String(%4, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN10StringViewC2ERK6String(%5, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %8 = cir.call @_ZN10StringViewaSEOS_(%1, %5) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: } +// CHECK: %2 = cir.load %0 : cir.ptr , i32 +// CHECK: cir.return %2 : i32 +// CHECK: } From 1e5d4c3fcb1e2005a4b31a402e51ff4f771338cb Mon Sep 17 00:00:00 2001 From: Caio Oliveira Date: Sun, 4 Sep 2022 00:59:18 -0700 Subject: [PATCH 0563/2301] [CIR][CodeGen] Add initial support for comma operator --- clang/lib/CIR/CIRGenExprScalar.cpp | 4 +++- clang/test/CIR/CodeGen/comma.cpp | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/comma.cpp diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CIRGenExprScalar.cpp index 69fc607efa9e..b16c2ac59861 100644 --- a/clang/lib/CIR/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CIRGenExprScalar.cpp @@ -328,7 +328,9 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitBinLAnd(const BinaryOperator *E) { llvm_unreachable("NYI"); } mlir::Value VisitBinLOr(const BinaryOperator *E) { llvm_unreachable("NYI"); } mlir::Value VisitBinComma(const BinaryOperator *E) { - llvm_unreachable("NYI"); + CGF.buildIgnoredExpr(E->getLHS()); + // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen. + return Visit(E->getRHS()); } mlir::Value VisitBinPtrMemD(const Expr *E) { llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/comma.cpp b/clang/test/CIR/CodeGen/comma.cpp new file mode 100644 index 000000000000..2809ec36fdf8 --- /dev/null +++ b/clang/test/CIR/CodeGen/comma.cpp @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int c0() { + int a = 1; + int b = 2; + return b + 1, a; +} + +// CHECK: cir.func @_Z2c0v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval", uninitialized] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", cinit] +// CHECK: %[[#B:]] = cir.alloca i32, cir.ptr , ["b", cinit] +// CHECK: %[[#LOADED_B:]] = cir.load %[[#B]] : cir.ptr , i32 +// CHECK: %[[#]] = cir.binop(add, %[[#LOADED_B]], %[[#]]) : i32 +// CHECK: %[[#LOADED_A:]] = cir.load %[[#A]] : cir.ptr , i32 +// CHECK: cir.store %[[#LOADED_A]], %[[#RET]] : i32, cir.ptr From 4e303bc50a86c050139577e0bf1ac007b2fee38f Mon Sep 17 00:00:00 2001 From: Jingsong Shang Date: Wed, 3 Aug 2022 23:12:46 -0700 Subject: [PATCH 0564/2301] [CIR][MLIR] Add support to lower binary operation in ClangIR to MLIR Dialects - Add support to lower binary operations in CIR to MLIR Dialects. - Add test case to test on lowering from CIR to MLIR Dialects then to LLVM Dialects. - Check binop-int.cir for binary operations on integers and check binop-fp.cir for floating-point numbers. --- clang/lib/CIR/LowerToLLVM.cpp | 95 +++++++++++++++++++++++++- clang/test/CIR/CIRToLLVM/binop-fp.cir | 69 +++++++++++++++++++ clang/test/CIR/CIRToLLVM/binop-int.cir | 76 +++++++++++++++++++++ 3 files changed, 239 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CIRToLLVM/binop-fp.cir create mode 100644 clang/test/CIR/CIRToLLVM/binop-int.cir diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 54651e092f05..d894552fb358 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -197,9 +197,101 @@ class CIRFuncLowering : public mlir::OpRewritePattern { } }; +class CIRBinOpLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BinOp op, + mlir::PatternRewriter &rewriter) const override { + assert((op.getLhs().getType() == op.getRhs().getType()) && + "inconsistent operands' types not supported yet"); + mlir::Type type = op.getRhs().getType(); + assert((type.isa() || type.isa()) && + "operand type not supported yet"); + + switch (op.getKind()) { + case mlir::cir::BinOpKind::Add: + if (type.isa()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Sub: + if (type.isa()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Mul: + if (type.isa()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Div: + if (type.isa()) { + if (type.isSignedInteger()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + } else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Rem: + if (type.isa()) { + if (type.isSignedInteger()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + } else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::And: + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Or: + rewriter.replaceOpWithNewOp(op, op.getType(), + op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Xor: + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Shl: + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Shr: + if (type.isSignedInteger()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + } + + return mlir::LogicalResult::success(); + } +}; + void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { patterns.add(patterns.getContext()); + CIRConstantLowering, CIRBinOpLowering>(patterns.getContext()); } void ConvertCIRToLLVMPass::runOnOperation() { @@ -230,6 +322,7 @@ void ConvertCIRToMemRefPass::runOnOperation() { target .addLegalDialect(); + target.addIllegalOp(); mlir::RewritePatternSet patterns(&getContext()); populateCIRToMemRefConversionPatterns(patterns); diff --git a/clang/test/CIR/CIRToLLVM/binop-fp.cir b/clang/test/CIR/CIRToLLVM/binop-fp.cir new file mode 100644 index 000000000000..30c958826c1a --- /dev/null +++ b/clang/test/CIR/CIRToLLVM/binop-fp.cir @@ -0,0 +1,69 @@ +// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// XFAIL: * + +module { + cir.func @foo() { + %0 = cir.alloca f32, cir.ptr , ["c", uninitialized] {alignment = 4 : i64} + %1 = cir.alloca f32, cir.ptr , ["d", uninitialized] {alignment = 4 : i64} + %2 = cir.alloca f32, cir.ptr , ["y", cinit] {alignment = 4 : i64} + %3 = cir.alloca f64, cir.ptr , ["e", uninitialized] {alignment = 8 : i64} + %4 = cir.alloca f64, cir.ptr , ["f", uninitialized] {alignment = 8 : i64} + %5 = cir.alloca f64, cir.ptr , ["g", cinit] {alignment = 8 : i64} + %6 = cir.load %0 : cir.ptr , f32 + %7 = cir.load %1 : cir.ptr , f32 + %8 = cir.binop(mul, %6, %7) : f32 + cir.store %8, %2 : f32, cir.ptr + %9 = cir.load %2 : cir.ptr , f32 + %10 = cir.load %1 : cir.ptr , f32 + %11 = cir.binop(div, %9, %10) : f32 + cir.store %11, %2 : f32, cir.ptr + %12 = cir.load %2 : cir.ptr , f32 + %13 = cir.load %1 : cir.ptr , f32 + %14 = cir.binop(add, %12, %13) : f32 + cir.store %14, %2 : f32, cir.ptr + %15 = cir.load %2 : cir.ptr , f32 + %16 = cir.load %1 : cir.ptr , f32 + %17 = cir.binop(sub, %15, %16) : f32 + cir.store %17, %2 : f32, cir.ptr + %18 = cir.load %3 : cir.ptr , f64 + %19 = cir.load %4 : cir.ptr , f64 + %20 = cir.binop(add, %18, %19) : f64 + cir.store %20, %5 : f64, cir.ptr + %21 = cir.load %3 : cir.ptr , f64 + %22 = cir.load %4 : cir.ptr , f64 + %23 = cir.binop(sub, %21, %22) : f64 + cir.store %23, %5 : f64, cir.ptr + %24 = cir.load %3 : cir.ptr , f64 + %25 = cir.load %4 : cir.ptr , f64 + %26 = cir.binop(mul, %24, %25) : f64 + cir.store %26, %5 : f64, cir.ptr + %27 = cir.load %3 : cir.ptr , f64 + %28 = cir.load %4 : cir.ptr , f64 + %29 = cir.binop(div, %27, %28) : f64 + cir.store %29, %5 : f64, cir.ptr + cir.return + } +} + +// MLIR: = memref.alloca() {alignment = 4 : i64} : memref +// MLIR: = memref.alloca() {alignment = 8 : i64} : memref +// MLIR: = arith.mulf {{.*}} : f32 +// MLIR: = arith.divf +// MLIR: = arith.addf +// MLIR: = arith.subf +// MLIR: = arith.addf {{.*}} : f64 +// MLIR: = arith.subf +// MLIR: = arith.mulf +// MLIR: = arith.divf + +// LLVM: = alloca float, i64 +// LLVM: = alloca double, i64 +// LLVM: = fmul float +// LLVM: = fdiv float +// LLVM: = fadd float +// LLVM: = fsub float +// LLVM: = fadd double +// LLVM: = fsub double +// LLVM: = fmul double +// LLVM: = fdiv double diff --git a/clang/test/CIR/CIRToLLVM/binop-int.cir b/clang/test/CIR/CIRToLLVM/binop-int.cir new file mode 100644 index 000000000000..330a4954a372 --- /dev/null +++ b/clang/test/CIR/CIRToLLVM/binop-int.cir @@ -0,0 +1,76 @@ +// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// XFAIL: * + +module { + cir.func @foo() { + %0 = cir.alloca i32, cir.ptr , ["a", cinit] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} + %2 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} + %3 = cir.cst(2 : i32) : i32 cir.store %3, %0 : i32, cir.ptr + %4 = cir.cst(1 : i32) : i32 cir.store %4, %1 : i32, cir.ptr + %5 = cir.load %0 : cir.ptr , i32 + %6 = cir.load %1 : cir.ptr , i32 + %7 = cir.binop(mul, %5, %6) : i32 + cir.store %7, %2 : i32, cir.ptr + %8 = cir.load %2 : cir.ptr , i32 + %9 = cir.load %1 : cir.ptr , i32 + %10 = cir.binop(div, %8, %9) : i32 + cir.store %10, %2 : i32, cir.ptr + %11 = cir.load %2 : cir.ptr , i32 + %12 = cir.load %1 : cir.ptr , i32 + %13 = cir.binop(rem, %11, %12) : i32 + cir.store %13, %2 : i32, cir.ptr + %14 = cir.load %2 : cir.ptr , i32 + %15 = cir.load %1 : cir.ptr , i32 + %16 = cir.binop(add, %14, %15) : i32 + cir.store %16, %2 : i32, cir.ptr + %17 = cir.load %2 : cir.ptr , i32 + %18 = cir.load %1 : cir.ptr , i32 + %19 = cir.binop(sub, %17, %18) : i32 + cir.store %19, %2 : i32, cir.ptr + %20 = cir.load %2 : cir.ptr , i32 + %21 = cir.load %1 : cir.ptr , i32 + %22 = cir.binop(shr, %20, %21) : i32 + cir.store %22, %2 : i32, cir.ptr + %23 = cir.load %2 : cir.ptr , i32 + %24 = cir.load %1 : cir.ptr , i32 + %25 = cir.binop(shl, %23, %24) : i32 + cir.store %25, %2 : i32, cir.ptr + %26 = cir.load %2 : cir.ptr , i32 + %27 = cir.load %1 : cir.ptr , i32 + %28 = cir.binop(and, %26, %27) : i32 + cir.store %28, %2 : i32, cir.ptr + %29 = cir.load %2 : cir.ptr , i32 + %30 = cir.load %1 : cir.ptr , i32 + %31 = cir.binop(xor, %29, %30) : i32 + cir.store %31, %2 : i32, cir.ptr + %32 = cir.load %2 : cir.ptr , i32 + %33 = cir.load %1 : cir.ptr , i32 + %34 = cir.binop(or, %32, %33) : i32 + cir.store %34, %2 : i32, cir.ptr + cir.return + } +} + +// MLIR: = arith.muli +// MLIR: = arith.divui +// MLIR: = arith.remui +// MLIR: = arith.addi +// MLIR: = arith.subi +// MLIR: = arith.shrui +// MLIR: = arith.shli +// MLIR: = arith.andi +// MLIR: = arith.xori +// MLIR: = arith.ori + +// LLVM: = mul i32 +// LLVM: = udiv i32 +// LLVM: = urem i32 +// LLVM: = add i32 +// LLVM: = sub i32 +// LLVM: = lshr i32 +// LLVM: = shl i32 +// LLVM: = and i32 +// LLVM: = xor i32 +// LLVM: = or i32 From d2ab67502f93d81b5d2383cd01f75868bfe9eaeb Mon Sep 17 00:00:00 2001 From: Jingsong Shang Date: Mon, 8 Aug 2022 15:55:42 -0700 Subject: [PATCH 0565/2301] [CIR][MLIR] Implement the lowering of booleans in CIR and add its test file. Implement the lowering of booleans in CIR to MLIR Dialects. Add test case to test on changes. --- clang/lib/CIR/LowerToLLVM.cpp | 24 +++++++++++++++++++++--- clang/test/CIR/CIRToLLVM/bool.cir | 22 ++++++++++++++++++++++ 2 files changed, 43 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CIRToLLVM/bool.cir diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index d894552fb358..c42b92b89c89 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -110,7 +110,14 @@ class CIRAllocaLowering : public mlir::OpRewritePattern { mlir::LogicalResult matchAndRewrite(mlir::cir::AllocaOp op, mlir::PatternRewriter &rewriter) const override { - auto ty = mlir::MemRefType::get({}, op.getAllocaType()); + mlir::MemRefType ty; + if (op.getAllocaType().isa()) { + mlir::Type integerType = + mlir::IntegerType::get(getContext(), 8, mlir::IntegerType::Signless); + ty = mlir::MemRefType::get({}, integerType); + } else { + ty = mlir::MemRefType::get({}, op.getAllocaType()); + } rewriter.replaceOpWithNewOp(op, ty, op.getAlignmentAttr()); return mlir::LogicalResult::success(); @@ -154,8 +161,19 @@ class CIRConstantLowering mlir::LogicalResult matchAndRewrite(mlir::cir::ConstantOp op, mlir::PatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, op.getType(), - op.getValue()); + if (op.getType().isa()) { + mlir::Type type = + mlir::IntegerType::get(getContext(), 8, mlir::IntegerType::Signless); + mlir::TypedAttr IntegerAttr; + if (op.getValue() == mlir::BoolAttr::get(getContext(), true)) + IntegerAttr = mlir::IntegerAttr::get(type, 1); + else + IntegerAttr = mlir::IntegerAttr::get(type, 0); + rewriter.replaceOpWithNewOp(op, type, + IntegerAttr); + } else + rewriter.replaceOpWithNewOp(op, op.getType(), + op.getValue()); return mlir::LogicalResult::success(); } }; diff --git a/clang/test/CIR/CIRToLLVM/bool.cir b/clang/test/CIR/CIRToLLVM/bool.cir new file mode 100644 index 000000000000..067741cf37c9 --- /dev/null +++ b/clang/test/CIR/CIRToLLVM/bool.cir @@ -0,0 +1,22 @@ +// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// XFAIL: * + +module { + cir.func @foo() { + %0 = cir.alloca !cir.bool, cir.ptr , ["a", cinit] {alignment = 1 : i64} + %1 = cir.cst(true) : !cir.bool + cir.store %1, %0 : !cir.bool, cir.ptr + cir.return + } +} + +// MLIR: func @foo() { +// MLIR: [[Value:%[0-9]+]] = memref.alloca() {alignment = 1 : i64} : memref +// MLIR: = arith.constant 1 : i8 +// MLIR: memref.store {{.*}}, [[Value]][] : memref +// return + +// LLVM: = alloca i8, i64 +// LLVM: store i8 1, ptr %5 +// LLVM: ret From 834eaa7cc5309f99e68608d23a839143e0483ead Mon Sep 17 00:00:00 2001 From: Jingsong Shang Date: Mon, 8 Aug 2022 23:24:39 -0700 Subject: [PATCH 0566/2301] [CIR][MLIR] Implement lowering of comparison operators in CIR to MLIR Dialects and add test case. - Implement lowering of Comparison operators in CIR to MLIR Dialects. - Add test case to test on changes. --- clang/lib/CIR/LowerToLLVM.cpp | 159 ++++++++++++++++++++++++++++++- clang/test/CIR/CIRToLLVM/cmp.cir | 78 +++++++++++++++ 2 files changed, 235 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CIRToLLVM/cmp.cir diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index c42b92b89c89..dca22eb43b6b 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -307,9 +307,161 @@ class CIRBinOpLowering : public mlir::OpRewritePattern { } }; +class CIRCmpOpLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CmpOp op, + mlir::PatternRewriter &rewriter) const override { + auto type = op.getLhs().getType(); + auto integerType = + mlir::IntegerType::get(getContext(), 1, mlir::IntegerType::Signless); + + switch (op.getKind()) { + case mlir::cir::CmpOpKind::gt: { + if (type.isa()) { + mlir::arith::CmpIPredicate cmpIType; + if (!type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::arith::CmpIPredicate::ugt; + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), + op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::UGT), + op.getLhs(), op.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::ge: { + if (type.isa()) { + mlir::arith::CmpIPredicate cmpIType; + if (type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::arith::CmpIPredicate::uge; + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), + op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::UGE), + op.getLhs(), op.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::lt: { + if (type.isa()) { + mlir::arith::CmpIPredicate cmpIType; + if (type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::arith::CmpIPredicate::ult; + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), + op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::ULT), + op.getLhs(), op.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::le: { + if (type.isa()) { + mlir::arith::CmpIPredicate cmpIType; + if (type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::arith::CmpIPredicate::ule; + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), + op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::ULE), + op.getLhs(), op.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::eq: { + if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), + mlir::arith::CmpIPredicate::eq), + op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::UEQ), + op.getLhs(), op.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::ne: { + if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpIPredicateAttr::get(getContext(), + mlir::arith::CmpIPredicate::ne), + op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::arith::CmpFPredicateAttr::get( + getContext(), mlir::arith::CmpFPredicate::UNE), + op.getLhs(), op.getRhs(), + mlir::arith::FastMathFlagsAttr::get( + getContext(), mlir::arith::FastMathFlags::none)); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + } + + return mlir::LogicalResult::success(); + } +}; + void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { patterns.add(patterns.getContext()); + CIRConstantLowering, CIRBinOpLowering, CIRCmpOpLowering>( + patterns.getContext()); } void ConvertCIRToLLVMPass::runOnOperation() { @@ -340,7 +492,10 @@ void ConvertCIRToMemRefPass::runOnOperation() { target .addLegalDialect(); - target.addIllegalOp(); + target + .addIllegalOp(); mlir::RewritePatternSet patterns(&getContext()); populateCIRToMemRefConversionPatterns(patterns); diff --git a/clang/test/CIR/CIRToLLVM/cmp.cir b/clang/test/CIR/CIRToLLVM/cmp.cir new file mode 100644 index 000000000000..75b584c6f762 --- /dev/null +++ b/clang/test/CIR/CIRToLLVM/cmp.cir @@ -0,0 +1,78 @@ +// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +// FIXME: after rebasing against July's 2022, this started failing with "integer type not supported in CIR yet" +// XFAIL: * + +module { + cir.func @foo() { + %0 = cir.alloca i32, cir.ptr , ["a", uninitialized] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} + %2 = cir.alloca f32, cir.ptr , ["c", uninitialized] {alignment = 4 : i64} + %3 = cir.alloca f32, cir.ptr , ["d", uninitialized] {alignment = 4 : i64} + %4 = cir.alloca !cir.bool, cir.ptr , ["e", uninitialized] {alignment = 1 : i64} + %5 = cir.load %0 : cir.ptr , i32 + %6 = cir.load %1 : cir.ptr , i32 + %7 = cir.cmp(gt, %5, %6) : i32, !cir.bool + %8 = cir.load %0 : cir.ptr , i32 + %9 = cir.load %1 : cir.ptr , i32 + %10 = cir.cmp(eq, %8, %9) : i32, !cir.bool + %11 = cir.load %0 : cir.ptr , i32 + %12 = cir.load %1 : cir.ptr , i32 + %13 = cir.cmp(lt, %11, %12) : i32, !cir.bool + %14 = cir.load %0 : cir.ptr , i32 + %15 = cir.load %1 : cir.ptr , i32 + %16 = cir.cmp(ge, %14, %15) : i32, !cir.bool + %17 = cir.load %0 : cir.ptr , i32 + %18 = cir.load %1 : cir.ptr , i32 + %19 = cir.cmp(ne, %17, %18) : i32, !cir.bool + %20 = cir.load %0 : cir.ptr , i32 + %21 = cir.load %1 : cir.ptr , i32 + %22 = cir.cmp(le, %20, %21) : i32, !cir.bool + %23 = cir.load %2 : cir.ptr , f32 + %24 = cir.load %3 : cir.ptr , f32 + %25 = cir.cmp(gt, %23, %24) : f32, !cir.bool + %26 = cir.load %2 : cir.ptr , f32 + %27 = cir.load %3 : cir.ptr , f32 + %28 = cir.cmp(eq, %26, %27) : f32, !cir.bool + %29 = cir.load %2 : cir.ptr , f32 + %30 = cir.load %3 : cir.ptr , f32 + %31 = cir.cmp(lt, %29, %30) : f32, !cir.bool + %32 = cir.load %2 : cir.ptr , f32 + %33 = cir.load %3 : cir.ptr , f32 + %34 = cir.cmp(ge, %32, %33) : f32, !cir.bool + %35 = cir.load %2 : cir.ptr , f32 + %36 = cir.load %3 : cir.ptr , f32 + %37 = cir.cmp(ne, %35, %36) : f32, !cir.bool + %38 = cir.load %2 : cir.ptr , f32 + %39 = cir.load %3 : cir.ptr , f32 + %40 = cir.cmp(le, %38, %39) : f32, !cir.bool + cir.return + } +} + +// MLIR: = arith.cmpi ugt +// MLIR: = arith.cmpi eq, +// MLIR: = arith.cmpi ult, +// MLIR: = arith.cmpi uge, +// MLIR: = arith.cmpi ne, +// MLIR: = arith.cmpi ule, +// MLIR: = arith.cmpf ugt +// MLIR: = arith.cmpf ueq, +// MLIR: = arith.cmpf ult, +// MLIR: = arith.cmpf uge, +// MLIR: = arith.cmpf une, +// MLIR: = arith.cmpf ule, + +// LLVM: icmp ugt i32 +// LLVM: icmp eq i32 +// LLVM: icmp ult i32 +// LLVM: icmp uge i32 +// LLVM: icmp ne i32 +// LLVM: icmp ule i32 +// LLVM: fcmp ugt float +// LLVM: fcmp ueq float +// LLVM: fcmp ult float +// LLVM: fcmp uge float +// LLVM: fcmp une float +// LLVM: fcmp ule float From 5ab11d2004893dfaaf1f20e4ec5a92e90dad258b Mon Sep 17 00:00:00 2001 From: Jingsong Shang Date: Thu, 18 Aug 2022 17:20:27 -0700 Subject: [PATCH 0567/2301] [CIR][MLIR] Add support of lowering array allocation in CIR to other MLIR dialects and test file Modify CIRAllocLowering function to cover array allocation in ClangIR and add its test file. --- clang/lib/CIR/LowerToLLVM.cpp | 20 ++++++++++++++------ clang/test/CIR/CIRToLLVM/array.cir | 25 +++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/CIRToLLVM/array.cir diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index dca22eb43b6b..31d5dab2c3a4 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -110,15 +110,23 @@ class CIRAllocaLowering : public mlir::OpRewritePattern { mlir::LogicalResult matchAndRewrite(mlir::cir::AllocaOp op, mlir::PatternRewriter &rewriter) const override { - mlir::MemRefType ty; - if (op.getAllocaType().isa()) { - mlir::Type integerType = + auto type = op.getAllocaType(); + mlir::MemRefType memreftype; + + if (type.isa()) { + auto integerType = mlir::IntegerType::get(getContext(), 8, mlir::IntegerType::Signless); - ty = mlir::MemRefType::get({}, integerType); + memreftype = mlir::MemRefType::get({}, integerType); + } else if (type.isa()) { + mlir::cir::ArrayType arraytype = type.dyn_cast(); + memreftype = + mlir::MemRefType::get(arraytype.getSize(), arraytype.getEltType()); + } else if (type.isa() || type.isa()) { + memreftype = mlir::MemRefType::get({}, op.getAllocaType()); } else { - ty = mlir::MemRefType::get({}, op.getAllocaType()); + llvm_unreachable("type to be allocated not supported yet"); } - rewriter.replaceOpWithNewOp(op, ty, + rewriter.replaceOpWithNewOp(op, memreftype, op.getAlignmentAttr()); return mlir::LogicalResult::success(); } diff --git a/clang/test/CIR/CIRToLLVM/array.cir b/clang/test/CIR/CIRToLLVM/array.cir new file mode 100644 index 000000000000..f3c2ba751b9f --- /dev/null +++ b/clang/test/CIR/CIRToLLVM/array.cir @@ -0,0 +1,25 @@ +// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// XFAIL: * + +module { + cir.func @foo() { + %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: func @foo() { +// MLIR-NEXT: = memref.alloca() {alignment = 16 : i64} : memref<10xi32> +// MLIR-NEXT: return +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: = alloca i32, i64 ptrtoint (ptr getelementptr (i32, ptr null, i64 10) to i64) +// LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } undef, ptr %1, 0 +// LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } %2, ptr %1, 1 +// LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } %3, i64 0, 2 +// LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } %4, i64 10, 3, 0 +// LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } %5, i64 1, 4, 0 +// LLVM-NEXT: ret void From 6b32e76f35668e081d8dada9609fb606c4b77e32 Mon Sep 17 00:00:00 2001 From: Jingsong Shang Date: Tue, 23 Aug 2022 11:01:53 -0700 Subject: [PATCH 0568/2301] [CIR][MLIR] Add support of lowering branch operations in ClangIR to other MLIR dialects and releated test case. Add support for lowering branch operations in ClangIR to other MLIR dialects and related test case. Note that: we add -canonicalize flag to testing command to make sure all unreachable blocks are removed before applying lowering conversion. --- clang/lib/CIR/LowerToLLVM.cpp | 27 ++++++++++++++++------ clang/test/CIR/CIRToLLVM/goto.cir | 38 +++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 7 deletions(-) create mode 100644 clang/test/CIR/CIRToLLVM/goto.cir diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index 31d5dab2c3a4..d24f05301a41 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -21,6 +21,7 @@ #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" @@ -57,7 +58,7 @@ struct ConvertCIRToMemRefPass mlir::OperationPass> { void getDependentDialects(mlir::DialectRegistry ®istry) const override { registry.insert(); + mlir::scf::SCFDialect, mlir::cf::ControlFlowDialect>(); } void runOnOperation() final; @@ -466,10 +467,22 @@ class CIRCmpOpLowering : public mlir::OpRewritePattern { } }; +class CIRBrOpLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BrOp op, + mlir::PatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, op.getDest()); + return mlir::LogicalResult::success(); + } +}; + void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { patterns.add( - patterns.getContext()); + CIRConstantLowering, CIRBinOpLowering, CIRCmpOpLowering, + CIRBrOpLowering>(patterns.getContext()); } void ConvertCIRToLLVMPass::runOnOperation() { @@ -497,13 +510,13 @@ void ConvertCIRToMemRefPass::runOnOperation() { // whether we should have micro-conversions that do the minimal amount of work // or macro conversions that entiirely remove a dialect. target.addLegalOp(); - target - .addLegalDialect(); + target.addLegalDialect(); target .addIllegalOp(); + mlir::cir::ConstantOp, mlir::cir::CmpOp, mlir::cir::BrOp>(); mlir::RewritePatternSet patterns(&getContext()); populateCIRToMemRefConversionPatterns(patterns); diff --git a/clang/test/CIR/CIRToLLVM/goto.cir b/clang/test/CIR/CIRToLLVM/goto.cir new file mode 100644 index 000000000000..b82303d202b3 --- /dev/null +++ b/clang/test/CIR/CIRToLLVM/goto.cir @@ -0,0 +1,38 @@ +// RUN: cir-tool %s -canonicalize -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -canonicalize -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +// FIXME: after rebasing against July's 2022 mlir, we get "failed to legalize +// operation 'cf.br'" from -cir-to-llvm +// XFAIL: * + +module { + cir.func @foo() { + %0 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} + %1 = cir.cst(1 : i32) : i32 + cir.store %1, %0 : i32, cir.ptr + cir.br ^bb2 + ^bb1: // no predecessors + %2 = cir.load %0 : cir.ptr , i32 + %3 = cir.cst(1 : i32) : i32 + %4 = cir.binop(add, %2, %3) : i32 + cir.store %4, %0 : i32, cir.ptr + cir.br ^bb2 + ^bb2: // 2 preds: ^bb0, ^bb1 + %5 = cir.load %0 : cir.ptr , i32 + %6 = cir.cst(2 : i32) : i32 + %7 = cir.binop(add, %5, %6) : i32 + cir.store %7, %0 : i32, cir.ptr + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: func @foo +// MLIR: cf.br ^bb1 +// MLIR: ^bb1: +// MLIR: return + +// LLVM: br label %[[Value:[0-9]+]], +// LLVM-EMPTY: +// LLVM-NEXT: [[Value]]: ; preds = +// LLVM: ret void From 0eb8cdb49d2c0bf8210b885e91bf750ed231b3aa Mon Sep 17 00:00:00 2001 From: Jingsong Shang Date: Wed, 31 Aug 2022 11:49:21 -0700 Subject: [PATCH 0569/2301] [CIR][MLIR] fix integer type related problems in CIRtoLLVM.cpp. Fix integer type check in CIRBinOpLowering and CIRCmpOpLowering. Add llvm_unreachable statement for integers of type singed and unsigned. --- clang/lib/CIR/LowerToLLVM.cpp | 21 +++++++++------------ clang/test/CIR/CIRToLLVM/binop-int.cir | 12 ++++++------ clang/test/CIR/CIRToLLVM/cmp.cir | 2 -- 3 files changed, 15 insertions(+), 20 deletions(-) diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/LowerToLLVM.cpp index d24f05301a41..8563d92a450f 100644 --- a/clang/lib/CIR/LowerToLLVM.cpp +++ b/clang/lib/CIR/LowerToLLVM.cpp @@ -264,24 +264,22 @@ class CIRBinOpLowering : public mlir::OpRewritePattern { break; case mlir::cir::BinOpKind::Div: if (type.isa()) { - if (type.isSignedInteger()) + if (type.isSignlessInteger()) rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); else - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + llvm_unreachable("integer type not supported in CIR yet"); } else rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); break; case mlir::cir::BinOpKind::Rem: if (type.isa()) { - if (type.isSignedInteger()) + if (type.isSignlessInteger()) rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); else - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + llvm_unreachable("integer type not supported in CIR yet"); } else rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); @@ -303,12 +301,11 @@ class CIRBinOpLowering : public mlir::OpRewritePattern { op, op.getType(), op.getLhs(), op.getRhs()); break; case mlir::cir::BinOpKind::Shr: - if (type.isSignedInteger()) + if (type.isSignlessInteger()) rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); else - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + llvm_unreachable("integer type not supported in CIR yet"); break; } @@ -354,7 +351,7 @@ class CIRCmpOpLowering : public mlir::OpRewritePattern { case mlir::cir::CmpOpKind::ge: { if (type.isa()) { mlir::arith::CmpIPredicate cmpIType; - if (type.isSignlessInteger()) + if (!type.isSignlessInteger()) llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::arith::CmpIPredicate::uge; rewriter.replaceOpWithNewOp( @@ -377,7 +374,7 @@ class CIRCmpOpLowering : public mlir::OpRewritePattern { case mlir::cir::CmpOpKind::lt: { if (type.isa()) { mlir::arith::CmpIPredicate cmpIType; - if (type.isSignlessInteger()) + if (!type.isSignlessInteger()) llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::arith::CmpIPredicate::ult; rewriter.replaceOpWithNewOp( @@ -401,7 +398,7 @@ class CIRCmpOpLowering : public mlir::OpRewritePattern { case mlir::cir::CmpOpKind::le: { if (type.isa()) { mlir::arith::CmpIPredicate cmpIType; - if (type.isSignlessInteger()) + if (!type.isSignlessInteger()) llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::arith::CmpIPredicate::ule; rewriter.replaceOpWithNewOp( diff --git a/clang/test/CIR/CIRToLLVM/binop-int.cir b/clang/test/CIR/CIRToLLVM/binop-int.cir index 330a4954a372..00cd6cfb7fa2 100644 --- a/clang/test/CIR/CIRToLLVM/binop-int.cir +++ b/clang/test/CIR/CIRToLLVM/binop-int.cir @@ -54,22 +54,22 @@ module { } // MLIR: = arith.muli -// MLIR: = arith.divui -// MLIR: = arith.remui +// MLIR: = arith.divsi +// MLIR: = arith.remsi // MLIR: = arith.addi // MLIR: = arith.subi -// MLIR: = arith.shrui +// MLIR: = arith.shrsi // MLIR: = arith.shli // MLIR: = arith.andi // MLIR: = arith.xori // MLIR: = arith.ori // LLVM: = mul i32 -// LLVM: = udiv i32 -// LLVM: = urem i32 +// LLVM: = sdiv i32 +// LLVM: = srem i32 // LLVM: = add i32 // LLVM: = sub i32 -// LLVM: = lshr i32 +// LLVM: = ashr i32 // LLVM: = shl i32 // LLVM: = and i32 // LLVM: = xor i32 diff --git a/clang/test/CIR/CIRToLLVM/cmp.cir b/clang/test/CIR/CIRToLLVM/cmp.cir index 75b584c6f762..24dcb6fef8cc 100644 --- a/clang/test/CIR/CIRToLLVM/cmp.cir +++ b/clang/test/CIR/CIRToLLVM/cmp.cir @@ -1,7 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM - -// FIXME: after rebasing against July's 2022, this started failing with "integer type not supported in CIR yet" // XFAIL: * module { From bb3c83285d038b86f874a395d51be04078c79846 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 8 Sep 2022 19:31:09 -0700 Subject: [PATCH 0570/2301] [CIR][CodeGen] Remove unused dir CIRLowering --- clang/lib/CIRLowering/CMakeLists.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 clang/lib/CIRLowering/CMakeLists.txt diff --git a/clang/lib/CIRLowering/CMakeLists.txt b/clang/lib/CIRLowering/CMakeLists.txt deleted file mode 100644 index e69de29bb2d1..000000000000 From 36181b64541b20829fa995a45aff51626064830a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 8 Sep 2022 19:54:04 -0700 Subject: [PATCH 0571/2301] [CIR][CodeGen] Organize CIR directory Before clang/lib/CIR/ clang/lib/CIRFrontendAction After clang/lib/CIR/CodeGen/ clang/lib/CIR/FrontendAction --- clang/include/clang/CIR/CMakeLists.txt | 6 + clang/lib/CIR/CMakeLists.txt | 65 ---- clang/lib/CIR/{ => CodeGen}/ABIInfo.h | 0 clang/lib/CIR/{ => CodeGen}/Address.h | 0 clang/lib/CIR/{ => CodeGen}/CIRGenCXX.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenCXXABI.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenCXXABI.h | 0 clang/lib/CIR/{ => CodeGen}/CIRGenCall.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenCall.h | 0 clang/lib/CIR/{ => CodeGen}/CIRGenClass.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenCleanup.cpp | 0 .../lib/CIR/{ => CodeGen}/CIRGenCstEmitter.h | 0 clang/lib/CIR/{ => CodeGen}/CIRGenDecl.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenDeclCXX.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenExpr.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenExprAgg.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenExprCXX.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenExprCst.cpp | 0 .../CIR/{ => CodeGen}/CIRGenExprScalar.cpp | 0 .../lib/CIR/{ => CodeGen}/CIRGenFunction.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenFunction.h | 0 .../CIR/{ => CodeGen}/CIRGenFunctionInfo.h | 0 .../CIR/{ => CodeGen}/CIRGenItaniumCXXABI.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenModule.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenModule.h | 0 .../CIR/{ => CodeGen}/CIRGenRecordLayout.h | 0 clang/lib/CIR/{ => CodeGen}/CIRGenStmt.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenTBAA.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenTBAA.h | 0 clang/lib/CIR/{ => CodeGen}/CIRGenTypes.cpp | 0 clang/lib/CIR/{ => CodeGen}/CIRGenTypes.h | 0 clang/lib/CIR/{ => CodeGen}/CIRGenValue.h | 0 clang/lib/CIR/{ => CodeGen}/CIRGenerator.cpp | 1 + clang/lib/CIR/{ => CodeGen}/CIRPasses.cpp | 0 .../{ => CodeGen}/CIRRecordLayoutBuilder.cpp | 0 clang/lib/CIR/CodeGen/CMakeLists.txt | 57 +++ clang/lib/CIR/{ => CodeGen}/CallingConv.h | 0 clang/lib/CIR/{ => CodeGen}/LowerToLLVM.cpp | 0 clang/lib/CIR/{ => CodeGen}/TargetInfo.cpp | 0 clang/lib/CIR/{ => CodeGen}/TargetInfo.h | 0 .../UnimplementedFeatureGuarding.h | 0 clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 341 ++++++++++++++++++ clang/lib/CIR/FrontendAction/CMakeLists.txt | 26 ++ clang/lib/CIRFrontendAction/CMakeLists.txt | 33 -- clang/lib/CMakeLists.txt | 8 +- 45 files changed, 435 insertions(+), 102 deletions(-) create mode 100644 clang/include/clang/CIR/CMakeLists.txt delete mode 100644 clang/lib/CIR/CMakeLists.txt rename clang/lib/CIR/{ => CodeGen}/ABIInfo.h (100%) rename clang/lib/CIR/{ => CodeGen}/Address.h (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenCXX.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenCXXABI.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenCXXABI.h (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenCall.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenCall.h (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenClass.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenCleanup.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenCstEmitter.h (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenDecl.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenDeclCXX.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenExpr.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenExprAgg.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenExprCXX.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenExprCst.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenExprScalar.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenFunction.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenFunction.h (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenFunctionInfo.h (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenItaniumCXXABI.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenModule.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenModule.h (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenRecordLayout.h (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenStmt.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenTBAA.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenTBAA.h (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenTypes.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenTypes.h (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenValue.h (100%) rename clang/lib/CIR/{ => CodeGen}/CIRGenerator.cpp (99%) rename clang/lib/CIR/{ => CodeGen}/CIRPasses.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CIRRecordLayoutBuilder.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/CallingConv.h (100%) rename clang/lib/CIR/{ => CodeGen}/LowerToLLVM.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/TargetInfo.cpp (100%) rename clang/lib/CIR/{ => CodeGen}/TargetInfo.h (100%) rename clang/lib/CIR/{ => CodeGen}/UnimplementedFeatureGuarding.h (100%) create mode 100644 clang/lib/CIR/FrontendAction/CIRGenAction.cpp delete mode 100644 clang/lib/CIRFrontendAction/CMakeLists.txt diff --git a/clang/include/clang/CIR/CMakeLists.txt b/clang/include/clang/CIR/CMakeLists.txt new file mode 100644 index 000000000000..f061051f41d5 --- /dev/null +++ b/clang/include/clang/CIR/CMakeLists.txt @@ -0,0 +1,6 @@ +set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --includedir +set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include) +include_directories(${MLIR_INCLUDE_DIR}) +include_directories(${MLIR_TABLEGEN_OUTPUT_DIR}) + +# add_subdirectory(Dialect) diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt deleted file mode 100644 index dc922e6ba18e..000000000000 --- a/clang/lib/CIR/CMakeLists.txt +++ /dev/null @@ -1,65 +0,0 @@ -set( - LLVM_LINK_COMPONENTS - Core - Support -) - -include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) -include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) - -get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) - -add_clang_library(clangCIR - CIRGenCXX.cpp - CIRGenCXXABI.cpp - CIRGenCall.cpp - CIRGenClass.cpp - CIRGenCleanup.cpp - CIRGenDecl.cpp - CIRGenDeclCXX.cpp - CIRGenExpr.cpp - CIRGenExprCst.cpp - CIRGenExprAgg.cpp - CIRGenExprCXX.cpp - CIRGenExprScalar.cpp - CIRGenFunction.cpp - CIRGenItaniumCXXABI.cpp - CIRGenModule.cpp - CIRGenStmt.cpp - CIRGenTBAA.cpp - CIRGenTypes.cpp - CIRGenerator.cpp - CIRPasses.cpp - CIRRecordLayoutBuilder.cpp - LowerToLLVM.cpp - TargetInfo.cpp - - DEPENDS - MLIRCIR - MLIRCIROpsIncGen - - LINK_LIBS - clangAST - clangBasic - clangLex - ${dialect_libs} - MLIRCIR - MLIRCIRTransforms - MLIRAffineToStandard - MLIRAnalysis - MLIRIR - MLIRLLVMCommonConversion - MLIRLLVMDialect - MLIRLLVMToLLVMIRTranslation - MLIRMemRefDialect - MLIRMemRefToLLVM - MLIRParser - MLIRPass - MLIRSideEffectInterfaces - MLIRSCFToControlFlow - MLIRFuncToLLVM - MLIRSupport - MLIRMemRefDialect - MLIRTargetLLVMIRExport - MLIRTransforms -) diff --git a/clang/lib/CIR/ABIInfo.h b/clang/lib/CIR/CodeGen/ABIInfo.h similarity index 100% rename from clang/lib/CIR/ABIInfo.h rename to clang/lib/CIR/CodeGen/ABIInfo.h diff --git a/clang/lib/CIR/Address.h b/clang/lib/CIR/CodeGen/Address.h similarity index 100% rename from clang/lib/CIR/Address.h rename to clang/lib/CIR/CodeGen/Address.h diff --git a/clang/lib/CIR/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp similarity index 100% rename from clang/lib/CIR/CIRGenCXX.cpp rename to clang/lib/CIR/CodeGen/CIRGenCXX.cpp diff --git a/clang/lib/CIR/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp similarity index 100% rename from clang/lib/CIR/CIRGenCXXABI.cpp rename to clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp diff --git a/clang/lib/CIR/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h similarity index 100% rename from clang/lib/CIR/CIRGenCXXABI.h rename to clang/lib/CIR/CodeGen/CIRGenCXXABI.h diff --git a/clang/lib/CIR/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp similarity index 100% rename from clang/lib/CIR/CIRGenCall.cpp rename to clang/lib/CIR/CodeGen/CIRGenCall.cpp diff --git a/clang/lib/CIR/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h similarity index 100% rename from clang/lib/CIR/CIRGenCall.h rename to clang/lib/CIR/CodeGen/CIRGenCall.h diff --git a/clang/lib/CIR/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp similarity index 100% rename from clang/lib/CIR/CIRGenClass.cpp rename to clang/lib/CIR/CodeGen/CIRGenClass.cpp diff --git a/clang/lib/CIR/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp similarity index 100% rename from clang/lib/CIR/CIRGenCleanup.cpp rename to clang/lib/CIR/CodeGen/CIRGenCleanup.cpp diff --git a/clang/lib/CIR/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h similarity index 100% rename from clang/lib/CIR/CIRGenCstEmitter.h rename to clang/lib/CIR/CodeGen/CIRGenCstEmitter.h diff --git a/clang/lib/CIR/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp similarity index 100% rename from clang/lib/CIR/CIRGenDecl.cpp rename to clang/lib/CIR/CodeGen/CIRGenDecl.cpp diff --git a/clang/lib/CIR/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp similarity index 100% rename from clang/lib/CIR/CIRGenDeclCXX.cpp rename to clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp diff --git a/clang/lib/CIR/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp similarity index 100% rename from clang/lib/CIR/CIRGenExpr.cpp rename to clang/lib/CIR/CodeGen/CIRGenExpr.cpp diff --git a/clang/lib/CIR/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp similarity index 100% rename from clang/lib/CIR/CIRGenExprAgg.cpp rename to clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp diff --git a/clang/lib/CIR/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp similarity index 100% rename from clang/lib/CIR/CIRGenExprCXX.cpp rename to clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp diff --git a/clang/lib/CIR/CIRGenExprCst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp similarity index 100% rename from clang/lib/CIR/CIRGenExprCst.cpp rename to clang/lib/CIR/CodeGen/CIRGenExprCst.cpp diff --git a/clang/lib/CIR/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp similarity index 100% rename from clang/lib/CIR/CIRGenExprScalar.cpp rename to clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp diff --git a/clang/lib/CIR/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp similarity index 100% rename from clang/lib/CIR/CIRGenFunction.cpp rename to clang/lib/CIR/CodeGen/CIRGenFunction.cpp diff --git a/clang/lib/CIR/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h similarity index 100% rename from clang/lib/CIR/CIRGenFunction.h rename to clang/lib/CIR/CodeGen/CIRGenFunction.h diff --git a/clang/lib/CIR/CIRGenFunctionInfo.h b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h similarity index 100% rename from clang/lib/CIR/CIRGenFunctionInfo.h rename to clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h diff --git a/clang/lib/CIR/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp similarity index 100% rename from clang/lib/CIR/CIRGenItaniumCXXABI.cpp rename to clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp diff --git a/clang/lib/CIR/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp similarity index 100% rename from clang/lib/CIR/CIRGenModule.cpp rename to clang/lib/CIR/CodeGen/CIRGenModule.cpp diff --git a/clang/lib/CIR/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h similarity index 100% rename from clang/lib/CIR/CIRGenModule.h rename to clang/lib/CIR/CodeGen/CIRGenModule.h diff --git a/clang/lib/CIR/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h similarity index 100% rename from clang/lib/CIR/CIRGenRecordLayout.h rename to clang/lib/CIR/CodeGen/CIRGenRecordLayout.h diff --git a/clang/lib/CIR/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp similarity index 100% rename from clang/lib/CIR/CIRGenStmt.cpp rename to clang/lib/CIR/CodeGen/CIRGenStmt.cpp diff --git a/clang/lib/CIR/CIRGenTBAA.cpp b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp similarity index 100% rename from clang/lib/CIR/CIRGenTBAA.cpp rename to clang/lib/CIR/CodeGen/CIRGenTBAA.cpp diff --git a/clang/lib/CIR/CIRGenTBAA.h b/clang/lib/CIR/CodeGen/CIRGenTBAA.h similarity index 100% rename from clang/lib/CIR/CIRGenTBAA.h rename to clang/lib/CIR/CodeGen/CIRGenTBAA.h diff --git a/clang/lib/CIR/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp similarity index 100% rename from clang/lib/CIR/CIRGenTypes.cpp rename to clang/lib/CIR/CodeGen/CIRGenTypes.cpp diff --git a/clang/lib/CIR/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h similarity index 100% rename from clang/lib/CIR/CIRGenTypes.h rename to clang/lib/CIR/CodeGen/CIRGenTypes.h diff --git a/clang/lib/CIR/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h similarity index 100% rename from clang/lib/CIR/CIRGenValue.h rename to clang/lib/CIR/CodeGen/CIRGenValue.h diff --git a/clang/lib/CIR/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp similarity index 99% rename from clang/lib/CIR/CIRGenerator.cpp rename to clang/lib/CIR/CodeGen/CIRGenerator.cpp index b33033c5441d..51c05c56c96b 100644 --- a/clang/lib/CIR/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -20,6 +20,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" #include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" using namespace cir; using namespace clang; diff --git a/clang/lib/CIR/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp similarity index 100% rename from clang/lib/CIR/CIRPasses.cpp rename to clang/lib/CIR/CodeGen/CIRPasses.cpp diff --git a/clang/lib/CIR/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp similarity index 100% rename from clang/lib/CIR/CIRRecordLayoutBuilder.cpp rename to clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 2ab3cbe1df4b..dc922e6ba18e 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -4,5 +4,62 @@ set( Support ) +include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) +include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) + get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) +add_clang_library(clangCIR + CIRGenCXX.cpp + CIRGenCXXABI.cpp + CIRGenCall.cpp + CIRGenClass.cpp + CIRGenCleanup.cpp + CIRGenDecl.cpp + CIRGenDeclCXX.cpp + CIRGenExpr.cpp + CIRGenExprCst.cpp + CIRGenExprAgg.cpp + CIRGenExprCXX.cpp + CIRGenExprScalar.cpp + CIRGenFunction.cpp + CIRGenItaniumCXXABI.cpp + CIRGenModule.cpp + CIRGenStmt.cpp + CIRGenTBAA.cpp + CIRGenTypes.cpp + CIRGenerator.cpp + CIRPasses.cpp + CIRRecordLayoutBuilder.cpp + LowerToLLVM.cpp + TargetInfo.cpp + + DEPENDS + MLIRCIR + MLIRCIROpsIncGen + + LINK_LIBS + clangAST + clangBasic + clangLex + ${dialect_libs} + MLIRCIR + MLIRCIRTransforms + MLIRAffineToStandard + MLIRAnalysis + MLIRIR + MLIRLLVMCommonConversion + MLIRLLVMDialect + MLIRLLVMToLLVMIRTranslation + MLIRMemRefDialect + MLIRMemRefToLLVM + MLIRParser + MLIRPass + MLIRSideEffectInterfaces + MLIRSCFToControlFlow + MLIRFuncToLLVM + MLIRSupport + MLIRMemRefDialect + MLIRTargetLLVMIRExport + MLIRTransforms +) diff --git a/clang/lib/CIR/CallingConv.h b/clang/lib/CIR/CodeGen/CallingConv.h similarity index 100% rename from clang/lib/CIR/CallingConv.h rename to clang/lib/CIR/CodeGen/CallingConv.h diff --git a/clang/lib/CIR/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp similarity index 100% rename from clang/lib/CIR/LowerToLLVM.cpp rename to clang/lib/CIR/CodeGen/LowerToLLVM.cpp diff --git a/clang/lib/CIR/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp similarity index 100% rename from clang/lib/CIR/TargetInfo.cpp rename to clang/lib/CIR/CodeGen/TargetInfo.cpp diff --git a/clang/lib/CIR/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h similarity index 100% rename from clang/lib/CIR/TargetInfo.h rename to clang/lib/CIR/CodeGen/TargetInfo.h diff --git a/clang/lib/CIR/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h similarity index 100% rename from clang/lib/CIR/UnimplementedFeatureGuarding.h rename to clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp new file mode 100644 index 000000000000..0f36a3cef2ec --- /dev/null +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -0,0 +1,341 @@ +//===--- CIRGenAction.cpp - LLVM Code generation Frontend Action ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "clang/CIRFrontendAction/CIRGenAction.h" +#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/IR/OperationSupport.h" +#include "mlir/Parser/Parser.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclGroup.h" +#include "clang/Basic/DiagnosticFrontend.h" +#include "clang/Basic/FileManager.h" +#include "clang/Basic/LangStandard.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/CIRToCIRPasses.h" +#include "clang/CIR/LowerToLLVM.h" +#include "clang/CodeGen/BackendUtil.h" +#include "clang/CodeGen/ModuleBuilder.h" +#include "clang/Driver/DriverDiagnostic.h" +#include "clang/Frontend/CompilerInstance.h" +#include "clang/Frontend/FrontendDiagnostic.h" +#include "clang/Lex/Preprocessor.h" +#include "llvm/Bitcode/BitcodeReader.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/IR/DebugInfo.h" +#include "llvm/IR/DiagnosticInfo.h" +#include "llvm/IR/DiagnosticPrinter.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/LLVMRemarkStreamer.h" +#include "llvm/IR/Module.h" +#include "llvm/IRReader/IRReader.h" +#include "llvm/LTO/LTOBackend.h" +#include "llvm/Linker/Linker.h" +#include "llvm/Pass.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/SourceMgr.h" +#include "llvm/Support/TimeProfiler.h" +#include "llvm/Support/Timer.h" +#include "llvm/Support/ToolOutputFile.h" +#include "llvm/Support/YAMLTraits.h" +#include "llvm/Transforms/IPO/Internalize.h" + +#include + +using namespace cir; +using namespace clang; + +namespace cir { +class CIRGenConsumer : public clang::ASTConsumer { + + virtual void anchor(); + + CIRGenAction::OutputType action; + + CompilerInstance &compilerInstance; + DiagnosticsEngine &diagnosticsEngine; + const HeaderSearchOptions &headerSearchOptions; + CodeGenOptions &codeGenOptions; + const TargetOptions &targetOptions; + const LangOptions &langOptions; + const FrontendOptions &feOptions; + + std::unique_ptr outputStream; + + ASTContext *astContext{nullptr}; + std::unique_ptr gen; + +public: + CIRGenConsumer(CIRGenAction::OutputType action, + CompilerInstance &compilerInstance, + DiagnosticsEngine &diagnosticsEngine, + const HeaderSearchOptions &headerSearchOptions, + CodeGenOptions &codeGenOptions, + const TargetOptions &targetOptions, + const LangOptions &langOptions, + const FrontendOptions &feOptions, + std::unique_ptr os) + : action(action), compilerInstance(compilerInstance), + diagnosticsEngine(diagnosticsEngine), + headerSearchOptions(headerSearchOptions), + codeGenOptions(codeGenOptions), targetOptions(targetOptions), + langOptions(langOptions), feOptions(feOptions), + + outputStream(std::move(os)), + + gen(std::make_unique(diagnosticsEngine, codeGenOptions)) { + } + + void Initialize(ASTContext &ctx) override { + assert(!astContext && "initialized multiple times"); + + astContext = &ctx; + + gen->Initialize(ctx); + } + + bool HandleTopLevelDecl(DeclGroupRef D) override { + PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), + astContext->getSourceManager(), + "LLVM IR generation of declaration"); + gen->HandleTopLevelDecl(D); + return true; + } + + void HandleCXXStaticMemberVarInstantiation(clang::VarDecl *VD) override { + llvm_unreachable("NYI"); + } + + void HandleInlineFunctionDefinition(FunctionDecl *D) override { + gen->HandleInlineFunctionDefinition(D); + } + + void HandleInterestingDecl(DeclGroupRef D) override { + llvm_unreachable("NYI"); + } + + void HandleTranslationUnit(ASTContext &C) override { + // Note that this method is called after `HandleTopLevelDecl` has already + // ran all over the top level decls. Here clang mostly wraps defered and + // global codegen, followed by running CIR passes. + + gen->HandleTranslationUnit(C); + if (!feOptions.DisableCIRVerifier) + if (!gen->verifyModule()) { + llvm::report_fatal_error( + "CIR codegen: module verification error before running CIR passes"); + return; + } + + auto mlirMod = gen->getModule(); + auto mlirCtx = gen->takeContext(); + + switch (action) { + case CIRGenAction::OutputType::EmitCIR: + if (outputStream && mlirMod) { + if (!feOptions.DisableCIRPasses) { + runCIRToCIRPasses(mlirMod, mlirCtx.get(), + !feOptions.DisableCIRVerifier); + } + mlir::OpPrintingFlags flags; + // FIXME: we cannot roundtrip prettyForm=true right now. + flags.enableDebugInfo(/*prettyForm=*/false); + mlirMod->print(*outputStream, flags); + } + break; + case CIRGenAction::OutputType::EmitLLVM: { + llvm::LLVMContext llvmCtx; + auto llvmModule = + lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); + if (outputStream) + llvmModule->print(*outputStream, nullptr); + break; + } + case CIRGenAction::OutputType::EmitObj: { + // TODO: Don't duplicate this from above + llvm::LLVMContext llvmCtx; + auto llvmModule = + lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); + + llvmModule->setTargetTriple(targetOptions.Triple); + + emitBackendOutput(compilerInstance, codeGenOptions, + C.getTargetInfo().getDataLayoutString(), + llvmModule.get(), BackendAction::Backend_EmitObj, + nullptr, std::move(outputStream)); + break; + } + case CIRGenAction::OutputType::EmitAssembly: + assert(false && "Not yet implemented"); + break; + case CIRGenAction::OutputType::None: + break; + } + } + + void HandleTagDeclDefinition(TagDecl *D) override { + PrettyStackTraceDecl CrashInfo(D, SourceLocation(), + astContext->getSourceManager(), + "CIR generation of declaration"); + gen->HandleTagDeclDefinition(D); + } + + void HandleTagDeclRequiredDefinition(const TagDecl *D) override { + gen->HandleTagDeclRequiredDefinition(D); + } + + void CompleteTentativeDefinition(VarDecl *D) override { + llvm_unreachable("NYI"); + } + + void CompleteExternalDeclaration(DeclaratorDecl *D) override { + llvm_unreachable("NYI"); + } + + void AssignInheritanceModel(CXXRecordDecl *RD) override { + llvm_unreachable("NYI"); + } + + void HandleVTable(CXXRecordDecl *RD) override { llvm_unreachable("NYI"); } +}; +} // namespace cir + +void CIRGenConsumer::anchor() {} + +CIRGenAction::CIRGenAction(OutputType act, mlir::MLIRContext *_MLIRContext) + : mlirContext(_MLIRContext ? _MLIRContext : new mlir::MLIRContext), + action(act) {} + +CIRGenAction::~CIRGenAction() { mlirModule.reset(); } + +void CIRGenAction::EndSourceFileAction() { + // If the consumer creation failed, do nothing. + if (!getCompilerInstance().hasASTConsumer()) + return; + + // TODO: pass the module around + // module = cgConsumer->takeModule(); +} + +static std::unique_ptr +getOutputStream(CompilerInstance &ci, StringRef inFile, + CIRGenAction::OutputType action) { + switch (action) { + case CIRGenAction::OutputType::EmitAssembly: + return ci.createDefaultOutputFile(false, inFile, "s"); + case CIRGenAction::OutputType::EmitCIR: + return ci.createDefaultOutputFile(false, inFile, "cir"); + case CIRGenAction::OutputType::EmitLLVM: + return ci.createDefaultOutputFile(false, inFile, "llvm"); + case CIRGenAction::OutputType::EmitObj: + return ci.createDefaultOutputFile(true, inFile, "o"); + case CIRGenAction::OutputType::None: + return nullptr; + } + + llvm_unreachable("Invalid action!"); +} + +std::unique_ptr +CIRGenAction::CreateASTConsumer(CompilerInstance &ci, StringRef inputFile) { + auto out = ci.takeOutputStream(); + if (!out) + out = getOutputStream(ci, inputFile, action); + + auto Result = std::make_unique( + action, ci, ci.getDiagnostics(), ci.getHeaderSearchOpts(), + ci.getCodeGenOpts(), ci.getTargetOpts(), ci.getLangOpts(), + ci.getFrontendOpts(), std::move(out)); + cgConsumer = Result.get(); + + // Enable generating macro debug info only when debug info is not disabled and + // also macrod ebug info is enabled + if (ci.getCodeGenOpts().getDebugInfo() != llvm::codegenoptions::NoDebugInfo && + ci.getCodeGenOpts().MacroDebugInfo) { + llvm_unreachable("NYI"); + } + + return std::move(Result); +} + +mlir::OwningOpRef +CIRGenAction::loadModule(llvm::MemoryBufferRef mbRef) { + auto module = + mlir::parseSourceString(mbRef.getBuffer(), mlirContext); + assert(module && "Failed to parse ClangIR module"); + return module; +} + +void CIRGenAction::ExecuteAction() { + if (getCurrentFileKind().getLanguage() != Language::CIR) { + this->ASTFrontendAction::ExecuteAction(); + return; + } + + // If this is a CIR file we have to treat it specially. + // TODO: This could be done more logically. This is just modeled at the moment + // mimicing CodeGenAction but this is clearly suboptimal. + auto &ci = getCompilerInstance(); + std::unique_ptr outstream = + getOutputStream(ci, getCurrentFile(), action); + if (action != OutputType::None && !outstream) + return; + + auto &sourceManager = ci.getSourceManager(); + auto fileID = sourceManager.getMainFileID(); + auto mainFile = sourceManager.getBufferOrNone(fileID); + + if (!mainFile) + return; + + mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); + + // TODO: unwrap this -- this exists because including the `OwningModuleRef` in + // CIRGenAction's header would require linking the Frontend against MLIR. + // Let's avoid that for now. + auto mlirModule = loadModule(*mainFile); + if (!mlirModule) + return; + + llvm::LLVMContext llvmCtx; + auto llvmModule = lowerFromCIRToLLVMIR( + *mlirModule, std::unique_ptr(mlirContext), llvmCtx); + + if (outstream) + llvmModule->print(*outstream, nullptr); +} + +void EmitAssemblyAction::anchor() {} +EmitAssemblyAction::EmitAssemblyAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitAssembly, _MLIRContext) {} + +void EmitCIRAction::anchor() {} +EmitCIRAction::EmitCIRAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitCIR, _MLIRContext) {} + +void EmitCIROnlyAction::anchor() {} +EmitCIROnlyAction::EmitCIROnlyAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::None, _MLIRContext) {} + +void EmitLLVMAction::anchor() {} +EmitLLVMAction::EmitLLVMAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitLLVM, _MLIRContext) {} + +void EmitObjAction::anchor() {} +EmitObjAction::EmitObjAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitObj, _MLIRContext) {} diff --git a/clang/lib/CIR/FrontendAction/CMakeLists.txt b/clang/lib/CIR/FrontendAction/CMakeLists.txt index 21f5355f65af..558787eb3a86 100644 --- a/clang/lib/CIR/FrontendAction/CMakeLists.txt +++ b/clang/lib/CIR/FrontendAction/CMakeLists.txt @@ -3,5 +3,31 @@ set(LLVM_LINK_COMPONENTS Support ) +include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) +include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) + get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) +add_clang_library(clangCIRFrontendAction + CIRGenAction.cpp + + DEPENDS + MLIRCIROpsIncGen + + LINK_LIBS + clangAST + clangBasic + clangCodeGen + clangLex + clangFrontend + clangCIR + ${dialect_libs} + MLIRCIR + MLIRAnalysis + MLIRIR + MLIRParser + MLIRSideEffectInterfaces + MLIRTransforms + MLIRSupport + MLIRMemRefDialect + ) diff --git a/clang/lib/CIRFrontendAction/CMakeLists.txt b/clang/lib/CIRFrontendAction/CMakeLists.txt deleted file mode 100644 index 558787eb3a86..000000000000 --- a/clang/lib/CIRFrontendAction/CMakeLists.txt +++ /dev/null @@ -1,33 +0,0 @@ -set(LLVM_LINK_COMPONENTS - Core - Support - ) - -include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) -include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) - -get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) - -add_clang_library(clangCIRFrontendAction - CIRGenAction.cpp - - DEPENDS - MLIRCIROpsIncGen - - LINK_LIBS - clangAST - clangBasic - clangCodeGen - clangLex - clangFrontend - clangCIR - ${dialect_libs} - MLIRCIR - MLIRAnalysis - MLIRIR - MLIRParser - MLIRSideEffectInterfaces - MLIRTransforms - MLIRSupport - MLIRMemRefDialect - ) diff --git a/clang/lib/CMakeLists.txt b/clang/lib/CMakeLists.txt index 1ce6e24acff8..3cb5143e69db 100644 --- a/clang/lib/CMakeLists.txt +++ b/clang/lib/CMakeLists.txt @@ -32,7 +32,7 @@ endif() add_subdirectory(Interpreter) add_subdirectory(Support) -#if(CLANG_ENABLE_CIR) - add_subdirectory(CIR) - add_subdirectory(CIRFrontendAction) -#endif() +if(CLANG_ENABLE_CIR) + add_subdirectory(CIR/CodeGen) + add_subdirectory(CIR/FrontendAction) +endif() From 18bf272941acddbac78e5663aeff9f78f4b51a79 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 8 Sep 2022 20:05:23 -0700 Subject: [PATCH 0572/2301] [CIR] Move CIR dialect from mlir to clang This was discussed as part of the RFC and since we are going to require some AST presence in CIR soon, implement the dialect move sooner than later. --- clang/include/clang/CIR/CMakeLists.txt | 2 +- .../include/clang/CIR/Dialect/CMakeLists.txt | 35 +++++++++++++++++++ .../clang/CIR/Dialect}/IR/CIRAttrDefs.td | 0 .../include/clang/CIR/Dialect}/IR/CIRAttrs.h | 7 ++-- .../include/clang/CIR/Dialect}/IR/CIRAttrs.td | 5 +-- .../clang/CIR/Dialect}/IR/CIRDialect.h | 23 ++++++------ .../clang/CIR/Dialect}/IR/CIRDialect.td | 20 +++++------ .../include/clang/CIR/Dialect}/IR/CIROps.td | 14 ++++---- .../clang/CIR/Dialect}/IR/CIROpsEnums.h | 3 +- .../include/clang/CIR/Dialect}/IR/CIRTypes.h | 2 +- .../include/clang/CIR/Dialect}/IR/CIRTypes.td | 2 +- .../clang/CIR/Dialect/IR/CMakeLists.txt | 30 ++++++++++++++++ .../include/clang/CIR/Dialect}/Passes.h | 2 +- .../include/clang/CIR/Dialect}/Passes.td | 0 .../CIR/Dialect}/Transforms/CMakeLists.txt | 0 clang/include/clang/CMakeLists.txt | 3 ++ clang/lib/CIR/CMakeLists.txt | 3 ++ clang/lib/CIR/CodeGen/Address.h | 2 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 4 +-- clang/lib/CIR/CodeGen/CIRGenCall.h | 2 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 +-- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h | 3 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 6 ++-- clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 3 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypes.h | 4 +-- clang/lib/CIR/CodeGen/CIRGenValue.h | 2 +- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 1 - clang/lib/CIR/CodeGen/CIRPasses.cpp | 6 ++-- clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 2 +- clang/lib/CIR/Dialect/CMakeLists.txt | 5 +++ .../lib/CIR/Dialect}/IR/CIRAttrs.cpp | 13 +++---- .../lib/CIR/Dialect}/IR/CIRDialect.cpp | 22 ++++++------ .../lib/CIR/Dialect}/IR/CIRTypes.cpp | 9 ++--- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 17 +++++++++ .../CIR/Dialect}/Transforms/CMakeLists.txt | 5 +-- .../CIR/Dialect}/Transforms/LifetimeCheck.cpp | 7 ++-- .../CIR/Dialect}/Transforms/MergeCleanups.cpp | 7 ++-- .../lib/CIR/Dialect}/Transforms/PassDetail.h | 2 +- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 +- clang/lib/CMakeLists.txt | 3 +- clang/lib/Sema/CIRBasedWarnings.cpp | 2 +- clang/tools/cir-tool/cir-tool.cpp | 4 +-- mlir/include/mlir/Dialect/CIR/CMakeLists.txt | 9 ----- mlir/include/mlir/Dialect/CMakeLists.txt | 1 - mlir/include/mlir/InitAllDialects.h | 2 -- mlir/lib/Dialect/CIR/CMakeLists.txt | 2 -- mlir/lib/Dialect/CIR/IR/CMakeLists.txt | 21 ----------- mlir/lib/Dialect/CMakeLists.txt | 1 - 52 files changed, 193 insertions(+), 138 deletions(-) create mode 100644 clang/include/clang/CIR/Dialect/CMakeLists.txt rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/IR/CIRAttrDefs.td (100%) rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/IR/CIRAttrs.h (86%) rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/IR/CIRAttrs.td (96%) rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/IR/CIRDialect.h (76%) rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/IR/CIRDialect.td (59%) rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/IR/CIROps.td (99%) rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/IR/CIROpsEnums.h (98%) rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/IR/CIRTypes.h (94%) rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/IR/CIRTypes.td (98%) rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/Passes.h (96%) rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/Passes.td (100%) rename {mlir/include/mlir/Dialect/CIR => clang/include/clang/CIR/Dialect}/Transforms/CMakeLists.txt (100%) create mode 100644 clang/lib/CIR/CMakeLists.txt create mode 100644 clang/lib/CIR/Dialect/CMakeLists.txt rename {mlir/lib/Dialect/CIR => clang/lib/CIR/Dialect}/IR/CIRAttrs.cpp (85%) rename {mlir/lib/Dialect/CIR => clang/lib/CIR/Dialect}/IR/CIRDialect.cpp (98%) rename {mlir/lib/Dialect/CIR => clang/lib/CIR/Dialect}/IR/CIRTypes.cpp (94%) rename {mlir/lib/Dialect/CIR => clang/lib/CIR/Dialect}/Transforms/CMakeLists.txt (58%) rename {mlir/lib/Dialect/CIR => clang/lib/CIR/Dialect}/Transforms/LifetimeCheck.cpp (99%) rename {mlir/lib/Dialect/CIR => clang/lib/CIR/Dialect}/Transforms/MergeCleanups.cpp (98%) rename {mlir/lib/Dialect/CIR => clang/lib/CIR/Dialect}/Transforms/PassDetail.h (95%) delete mode 100644 mlir/include/mlir/Dialect/CIR/CMakeLists.txt delete mode 100644 mlir/lib/Dialect/CIR/CMakeLists.txt delete mode 100644 mlir/lib/Dialect/CIR/IR/CMakeLists.txt diff --git a/clang/include/clang/CIR/CMakeLists.txt b/clang/include/clang/CIR/CMakeLists.txt index f061051f41d5..f8d6f407a03d 100644 --- a/clang/include/clang/CIR/CMakeLists.txt +++ b/clang/include/clang/CIR/CMakeLists.txt @@ -3,4 +3,4 @@ set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include) include_directories(${MLIR_INCLUDE_DIR}) include_directories(${MLIR_TABLEGEN_OUTPUT_DIR}) -# add_subdirectory(Dialect) +add_subdirectory(Dialect) diff --git a/clang/include/clang/CIR/Dialect/CMakeLists.txt b/clang/include/clang/CIR/Dialect/CMakeLists.txt new file mode 100644 index 000000000000..3b066dfc15fb --- /dev/null +++ b/clang/include/clang/CIR/Dialect/CMakeLists.txt @@ -0,0 +1,35 @@ +set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --src-root +set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --includedir +set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include) +set(MLIR_TABLEGEN_EXE $) +include_directories(SYSTEM ${MLIR_INCLUDE_DIR}) +include_directories(SYSTEM ${MLIR_TABLEGEN_OUTPUT_DIR}) + +add_custom_target(clang-cir-doc) + +# This replicates part of the add_mlir_doc cmake function from MLIR that cannot +# be used here. This happens because it expects to be run inside MLIR directory +# which is not the case for CIR (and also FIR, both have similar workarounds). +function(add_clang_mlir_doc doc_filename output_file output_directory command) + set(LLVM_TARGET_DEFINITIONS ${doc_filename}.td) + tablegen(MLIR ${output_file}.md ${command} ${ARGN} "-I${MLIR_MAIN_SRC_DIR}" "-I${MLIR_INCLUDE_DIR}") + set(GEN_DOC_FILE ${MLIR_BINARY_DIR}/docs/${output_directory}${output_file}.md) + add_custom_command( + OUTPUT ${GEN_DOC_FILE} + COMMAND ${CMAKE_COMMAND} -E copy + ${CMAKE_CURRENT_BINARY_DIR}/${output_file}.md + ${GEN_DOC_FILE} + DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${output_file}.md) + add_custom_target(${output_file}DocGen DEPENDS ${GEN_DOC_FILE}) + add_dependencies(clang-cir-doc ${output_file}DocGen) +endfunction() + +add_subdirectory(IR) + +set(LLVM_TARGET_DEFINITIONS Passes.td) +mlir_tablegen(Passes.h.inc -gen-pass-decls -name CIR) +mlir_tablegen(Passes.capi.h.inc -gen-pass-capi-header --prefix CIR) +mlir_tablegen(Passes.capi.cpp.inc -gen-pass-capi-impl --prefix CIR) +add_public_tablegen_target(MLIRCIRPassIncGen) + +add_clang_mlir_doc(Passes CIRPasses ./ -gen-pass-doc) diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrDefs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrDefs.td similarity index 100% rename from mlir/include/mlir/Dialect/CIR/IR/CIRAttrDefs.td rename to clang/include/clang/CIR/Dialect/IR/CIRAttrDefs.td diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h similarity index 86% rename from mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h rename to clang/include/clang/CIR/Dialect/IR/CIRAttrs.h index 9dfa8184f941..fe58ad61e55b 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -13,8 +13,9 @@ #ifndef MLIR_DIALECT_CIR_IR_CIRATTRS_H_ #define MLIR_DIALECT_CIR_IR_CIRATTRS_H_ -#include "mlir/Dialect/CIR/IR/CIROpsEnums.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" @@ -23,6 +24,6 @@ //===----------------------------------------------------------------------===// #define GET_ATTRDEF_CLASSES -#include "mlir/Dialect/CIR/IR/CIROpsAttributes.h.inc" +#include "clang/CIR/Dialect/IR/CIROpsAttributes.h.inc" #endif // MLIR_DIALECT_CIR_IR_CIRATTRS_H_ diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td similarity index 96% rename from mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td rename to clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index fc310567af83..0a7fc34b0d9a 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -14,8 +14,9 @@ #define MLIR_CIR_DIALECT_CIR_ATTRS include "mlir/IR/BuiltinAttributeInterfaces.td" -include "mlir/Dialect/CIR/IR/CIRDialect.td" -include "mlir/Dialect/CIR/IR/CIRTypes.td" + +include "clang/CIR/Dialect/IR/CIRDialect.td" +include "clang/CIR/Dialect/IR/CIRTypes.td" //===----------------------------------------------------------------------===// // CIR Attrs diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h similarity index 76% rename from mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h rename to clang/include/clang/CIR/Dialect/IR/CIRDialect.h index a7baf967fc0c..ef56711fed72 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -1,4 +1,4 @@ -//===- CIRDialect.h - MLIR Dialect for CIR ----------------------*- C++ -*-===// +//===- CIRDialect.h - CIR dialect -------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,12 +6,12 @@ // //===----------------------------------------------------------------------===// // -// This file declares the Target dialect for CIR in MLIR. +// This file declares the CIR dialect. // //===----------------------------------------------------------------------===// -#ifndef MLIR_DIALECT_CIR_CIRDIALECT_H_ -#define MLIR_DIALECT_CIR_CIRDIALECT_H_ +#ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H +#define LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" @@ -25,11 +25,11 @@ #include "mlir/Interfaces/LoopLikeInterface.h" #include "mlir/Interfaces/SideEffectInterfaces.h" -#include "mlir/Dialect/CIR/IR/CIRAttrs.h" -#include "mlir/Dialect/CIR/IR/CIROpsDialect.h.inc" -#include "mlir/Dialect/CIR/IR/CIROpsEnums.h" -#include "mlir/Dialect/CIR/IR/CIROpsStructs.h.inc" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIROpsDialect.h.inc" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIROpsStructs.h.inc" +#include "clang/CIR/Dialect/IR/CIRTypes.h" namespace mlir { namespace OpTrait { @@ -62,6 +62,7 @@ void buildTerminatedBody(OpBuilder &builder, Location loc); } // namespace mlir #define GET_OP_CLASSES -#include "mlir/Dialect/CIR/IR/CIROps.h.inc" +#include "clang/CIR/Dialect/IR/CIROps.h.inc" + +#endif // LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H -#endif // MLIR_DIALECT_CIR_CIRDIALECT_H_ diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td similarity index 59% rename from mlir/include/mlir/Dialect/CIR/IR/CIRDialect.td rename to clang/include/clang/CIR/Dialect/IR/CIRDialect.td index 8f756fa422e5..69d6e9774942 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -1,4 +1,4 @@ -//===- CIRTypes.td - CIR dialect types ---------------------*- tablegen -*-===// +//===- CIRDialect.td - CIR dialect -------------------------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -10,8 +10,8 @@ // //===----------------------------------------------------------------------===// -#ifndef MLIR_CIR_DIALECT_CIR -#define MLIR_CIR_DIALECT_CIR +#ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT +#define LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT include "mlir/IR/OpBase.td" @@ -31,16 +31,14 @@ def CIR_Dialect : Dialect { void registerAttributes(); void registerTypes(); - ::mlir::Type parseType(::mlir::DialectAsmParser &parser) const override; - void printType(::mlir::Type type, - ::mlir::DialectAsmPrinter &printer) const override; + Type parseType(DialectAsmParser &parser) const override; + void printType(Type type, DialectAsmPrinter &printer) const override; - ::mlir::Attribute parseAttribute(::mlir::DialectAsmParser &parser, - ::mlir::Type type) const override; + Attribute parseAttribute(DialectAsmParser &parser, + Type type) const override; - void printAttribute(::mlir::Attribute attr, - ::mlir::DialectAsmPrinter &os) const override; + void printAttribute(Attribute attr, DialectAsmPrinter &os) const override; }]; } -#endif // MLIR_CIR_DIALECT_CIR +#endif // LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td similarity index 99% rename from mlir/include/mlir/Dialect/CIR/IR/CIROps.td rename to clang/include/clang/CIR/Dialect/IR/CIROps.td index b04b7071648e..e2b41dda3e0e 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -11,12 +11,12 @@ /// //===----------------------------------------------------------------------===// -#ifndef MLIR_CIR_DIALECT_CIR_OPS -#define MLIR_CIR_DIALECT_CIR_OPS +#ifndef LLVM_CLANG_CIR_DIALECT_IR_CIROPS +#define LLVM_CLANG_CIR_DIALECT_IR_CIROPS -include "mlir/Dialect/CIR/IR/CIRDialect.td" -include "mlir/Dialect/CIR/IR/CIRTypes.td" -include "mlir/Dialect/CIR/IR/CIRAttrs.td" +include "clang/CIR/Dialect/IR/CIRDialect.td" +include "clang/CIR/Dialect/IR/CIRTypes.td" +include "clang/CIR/Dialect/IR/CIRAttrs.td" include "mlir/Interfaces/CallInterfaces.td" include "mlir/Interfaces/ControlFlowInterfaces.td" @@ -541,7 +541,7 @@ def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods":$scopeBuilder)> ]; } @@ -1271,4 +1271,4 @@ def CallOp : CIR_Op<"call", let hasVerifier = 0; } -#endif // MLIR_CIR_DIALECT_CIR_OPS +#endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIROpsEnums.h b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h similarity index 98% rename from mlir/include/mlir/Dialect/CIR/IR/CIROpsEnums.h rename to clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h index 8583569c84e8..7adfee6b482b 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIROpsEnums.h +++ b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h @@ -15,7 +15,8 @@ #define MLIR_DIALECT_CIR_CIROPSENUMS_H_ #include "mlir/IR/BuiltinAttributes.h" -#include "mlir/Dialect/CIR/IR/CIROpsEnums.h.inc" + +#include "clang/CIR/Dialect/IR/CIROpsEnums.h.inc" namespace mlir { namespace cir { diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h similarity index 94% rename from mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h rename to clang/include/clang/CIR/Dialect/IR/CIRTypes.h index f2e4f5bdd2da..4d0a3d77bc62 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -21,6 +21,6 @@ //===----------------------------------------------------------------------===// #define GET_TYPEDEF_CLASSES -#include "mlir/Dialect/CIR/IR/CIROpsTypes.h.inc" +#include "clang/CIR/Dialect/IR/CIROpsTypes.h.inc" #endif // MLIR_DIALECT_CIR_IR_CIRTYPES_H_ diff --git a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td similarity index 98% rename from mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td rename to clang/include/clang/CIR/Dialect/IR/CIRTypes.td index f610e9c2ebd4..1af173e235e2 100644 --- a/mlir/include/mlir/Dialect/CIR/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -13,7 +13,7 @@ #ifndef MLIR_CIR_DIALECT_CIR_TYPES #define MLIR_CIR_DIALECT_CIR_TYPES -include "mlir/Dialect/CIR/IR/CIRDialect.td" +include "clang/CIR/Dialect/IR/CIRDialect.td" include "mlir/IR/AttrTypeBase.td" //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt index e69de29bb2d1..f49d08ce63d8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt @@ -0,0 +1,30 @@ +# This replicates part of the add_mlir_dialect cmake function from MLIR that +# cannot be used here. This happens because it expects to be run inside MLIR +# directory which is not the case for CIR (and also FIR, both have similar +# workarounds). + +# Equivalent to add_mlir_dialect(CIROps cir) +set(LLVM_TARGET_DEFINITIONS CIROps.td) +mlir_tablegen(CIROps.h.inc -gen-op-decls) +mlir_tablegen(CIROps.cpp.inc -gen-op-defs) +mlir_tablegen(CIROpsTypes.h.inc -gen-typedef-decls) +mlir_tablegen(CIROpsTypes.cpp.inc -gen-typedef-defs) +mlir_tablegen(CIROpsDialect.h.inc -gen-dialect-decls) +mlir_tablegen(CIROpsDialect.cpp.inc -gen-dialect-defs) +add_public_tablegen_target(MLIRCIROpsIncGen) +add_dependencies(mlir-headers MLIRCIROpsIncGen) + +# Equivalent to add_mlir_doc +add_clang_mlir_doc(CIRDialect CIRDialect Dialects/ -gen-dialect-doc) +add_clang_mlir_doc(CIROps CIROps Dialects/ -gen-op-doc) +add_clang_mlir_doc(CIRAttrs CIRAttrs Dialects/ -gen-attrdef-doc) +add_clang_mlir_doc(CIRTypes CIRTypes Dialects/ -gen-typedef-doc) + +# Generate extra headers for custom enum and attrs. +mlir_tablegen(CIROpsEnums.h.inc -gen-enum-decls) +mlir_tablegen(CIROpsEnums.cpp.inc -gen-enum-defs) +mlir_tablegen(CIROpsStructs.h.inc -gen-attrdef-decls) +mlir_tablegen(CIROpsStructs.cpp.inc -gen-attrdef-defs) +mlir_tablegen(CIROpsAttributes.h.inc -gen-attrdef-decls) +mlir_tablegen(CIROpsAttributes.cpp.inc -gen-attrdef-defs) +add_public_tablegen_target(MLIRCIREnumsGen) diff --git a/mlir/include/mlir/Dialect/CIR/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h similarity index 96% rename from mlir/include/mlir/Dialect/CIR/Passes.h rename to clang/include/clang/CIR/Dialect/Passes.h index fe6512eab798..8aa3e6c71b3a 100644 --- a/mlir/include/mlir/Dialect/CIR/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -26,7 +26,7 @@ std::unique_ptr createMergeCleanupsPass(); /// Generate the code for registering passes. #define GEN_PASS_REGISTRATION -#include "mlir/Dialect/CIR/Passes.h.inc" +#include "clang/CIR/Dialect/Passes.h.inc" } // namespace mlir diff --git a/mlir/include/mlir/Dialect/CIR/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td similarity index 100% rename from mlir/include/mlir/Dialect/CIR/Passes.td rename to clang/include/clang/CIR/Dialect/Passes.td diff --git a/mlir/include/mlir/Dialect/CIR/Transforms/CMakeLists.txt b/clang/include/clang/CIR/Dialect/Transforms/CMakeLists.txt similarity index 100% rename from mlir/include/mlir/Dialect/CIR/Transforms/CMakeLists.txt rename to clang/include/clang/CIR/Dialect/Transforms/CMakeLists.txt diff --git a/clang/include/clang/CMakeLists.txt b/clang/include/clang/CMakeLists.txt index 0dc9ea5ed8ac..47ac70cd2169 100644 --- a/clang/include/clang/CMakeLists.txt +++ b/clang/include/clang/CMakeLists.txt @@ -1,5 +1,8 @@ add_subdirectory(AST) add_subdirectory(Basic) +if(CLANG_ENABLE_CIR) + add_subdirectory(CIR) +endif() add_subdirectory(Driver) add_subdirectory(Parse) add_subdirectory(Sema) diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt new file mode 100644 index 000000000000..abdbe92614d7 --- /dev/null +++ b/clang/lib/CIR/CMakeLists.txt @@ -0,0 +1,3 @@ +add_subdirectory(Dialect) +add_subdirectory(CodeGen) +add_subdirectory(FrontendAction) diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 140d6d883d25..5a9de3098327 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -15,10 +15,10 @@ #define LLVM_CLANG_LIB_CIR_ADDRESS_H #include "clang/AST/CharUnits.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/IR/Constants.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/Value.h" namespace cir { diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 7030f97ea7cf..356bd85e34b6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -18,8 +18,8 @@ #include "clang/AST/DeclCXX.h" #include "clang/AST/GlobalDecl.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" @@ -402,7 +402,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // callLoc, mlir::SymbolRefAttr::get(CalleePtr), // CalleePtr.getType().getResults(), CIRCallArgs); auto theCall = CGM.getBuilder().create(callLoc, CalleePtr, - CIRCallArgs); + CIRCallArgs); if (callOrInvoke) callOrInvoke = &theCall; diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 1f29c136270f..4e8543765536 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -21,7 +21,7 @@ #include "llvm/ADT/SmallVector.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "mlir/IR/BuiltinOps.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 643344bf145a..f5b1e423acc0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -17,8 +17,8 @@ #include "UnimplementedFeatureGuarding.h" #include "clang/AST/GlobalDecl.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Value.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index b16c2ac59861..323f7017a843 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -15,9 +15,9 @@ #include "UnimplementedFeatureGuarding.h" #include "clang/AST/StmtVisitor.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/Value.h" using namespace cir; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 83a707a1c927..c293a2707b5b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -17,8 +17,8 @@ #include "clang/AST/ASTLambda.h" #include "clang/AST/ExprObjC.h" #include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" using namespace cir; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h index 312836667fe7..50ffea4e94f1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h @@ -16,12 +16,11 @@ #define LLVM_CLANG_CIR_CIRGENFUNCTIONINFO_H #include "clang/AST/CanonicalType.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/Support/TrailingObjects.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" - namespace cir { /// ABIArgInfo - Helper class to encapsulate information about how a specific C diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 384cd79ee2a0..4be37d92d8d6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -21,13 +21,13 @@ #include "clang/AST/StmtVisitor.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/ADT/ScopedHashTable.h" #include "llvm/ADT/SmallPtrSet.h" -#include "mlir/Dialect/CIR/IR/CIRAttrs.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index ceb5ea6d29eb..84ef7292f423 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -12,8 +12,7 @@ #include "CIRGenTypes.h" #include "clang/AST/Decl.h" - -#include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" namespace cir { diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index e337e462c163..e7b188a049ee 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -5,7 +5,6 @@ #include "CallingConv.h" #include "TargetInfo.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" @@ -15,6 +14,7 @@ #include "clang/AST/Expr.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/RecordLayout.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" using namespace clang; using namespace cir; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 0e9446b09280..43a71c307928 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -20,10 +20,10 @@ #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/ABI.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/ADT/SmallPtrSet.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/MLIRContext.h" #include @@ -146,7 +146,7 @@ class CIRGenTypes { mlir::Type convertRecordDeclType(const clang::RecordDecl *recordDecl); std::unique_ptr - computeRecordLayout(const clang::RecordDecl *D, mlir::cir::StructType& Ty); + computeRecordLayout(const clang::RecordDecl *D, mlir::cir::StructType &Ty); std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix); diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 35c8a072e271..9e3a344cc12f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -19,10 +19,10 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" #include "clang/AST/Type.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/ADT/PointerIntPair.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" #include "mlir/IR/Value.h" namespace cir { diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 51c05c56c96b..5cae13759ced 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -12,7 +12,6 @@ #include "CIRGenModule.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/MLIRContext.h" diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 08bd1fdaf4b7..3447da22d3cf 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -10,8 +10,8 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/CIR/IR/CIRDialect.h" -#include "mlir/Dialect/CIR/Passes.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" @@ -28,4 +28,4 @@ void runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, llvm::report_fatal_error( "CIR codegen: MLIR pass manager fails when running CIR passes!"); } -} // namespace cir \ No newline at end of file +} // namespace cir diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index dc922e6ba18e..509d9660dbc3 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -37,6 +37,7 @@ add_clang_library(clangCIR DEPENDS MLIRCIR MLIRCIROpsIncGen + ${dialect_libs} LINK_LIBS clangAST diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index 8563d92a450f..e276417fd8de 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -20,7 +20,6 @@ #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Arith/IR/Arith.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" @@ -33,6 +32,7 @@ #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" #include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/Sequence.h" diff --git a/clang/lib/CIR/Dialect/CMakeLists.txt b/clang/lib/CIR/Dialect/CMakeLists.txt new file mode 100644 index 000000000000..5690e9b2fe61 --- /dev/null +++ b/clang/lib/CIR/Dialect/CMakeLists.txt @@ -0,0 +1,5 @@ +include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) +include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) + +add_subdirectory(IR) +add_subdirectory(Transforms) diff --git a/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp similarity index 85% rename from mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp rename to clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 70e253c159c6..2e32879ab8e7 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -1,4 +1,4 @@ -//===- CIRTypes.cpp - MLIR CIR Types --------------------------------------===// +//===- CIRAttrs.cpp - MLIR CIR Attributes ---------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -10,9 +10,10 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/CIR/IR/CIRAttrs.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" -#include "mlir/Dialect/CIR/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" + #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" @@ -23,7 +24,7 @@ #include "llvm/ADT/TypeSwitch.h" #define GET_ATTRDEF_CLASSES -#include "mlir/Dialect/CIR/IR/CIROpsAttributes.cpp.inc" +#include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" using namespace mlir; using namespace mlir::cir; @@ -57,6 +58,6 @@ void CIRDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const { void CIRDialect::registerAttributes() { addAttributes< #define GET_ATTRDEF_LIST -#include "mlir/Dialect/CIR/IR/CIROpsAttributes.cpp.inc" +#include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" >(); } diff --git a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp similarity index 98% rename from mlir/lib/Dialect/CIR/IR/CIRDialect.cpp rename to clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 43a630f540fe..5a7a34230ff8 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -10,9 +10,9 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/CIR/IR/CIRDialect.h" -#include "mlir/Dialect/CIR/IR/CIRAttrs.h" -#include "mlir/Dialect/CIR/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" @@ -27,10 +27,10 @@ using namespace mlir; using namespace mlir::cir; -#include "mlir/Dialect/CIR/IR/CIROpsEnums.cpp.inc" -#include "mlir/Dialect/CIR/IR/CIROpsStructs.cpp.inc" +#include "clang/CIR/Dialect/IR/CIROpsEnums.cpp.inc" +#include "clang/CIR/Dialect/IR/CIROpsStructs.cpp.inc" -#include "mlir/Dialect/CIR/IR/CIROpsDialect.cpp.inc" +#include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc" //===----------------------------------------------------------------------===// // CIR Dialect @@ -57,7 +57,7 @@ void cir::CIRDialect::initialize() { registerAttributes(); addOperations< #define GET_OP_LIST -#include "mlir/Dialect/CIR/IR/CIROps.cpp.inc" +#include "clang/CIR/Dialect/IR/CIROps.cpp.inc" >(); addInterfaces(); } @@ -79,12 +79,10 @@ static int parseOptionalKeywordAlternative(OpAsmParser &parser, } namespace { -template -struct EnumTraits {}; +template struct EnumTraits {}; #define REGISTER_ENUM_TYPE(Ty) \ - template <> \ - struct EnumTraits { \ + template <> struct EnumTraits { \ static StringRef stringify(Ty value) { return stringify##Ty(value); } \ static unsigned getMaxEnumVal() { return getMaxEnumValFor##Ty(); } \ } @@ -1427,4 +1425,4 @@ void CstArrayAttr::print(::mlir::AsmPrinter &printer) const { //===----------------------------------------------------------------------===// #define GET_OP_CLASSES -#include "mlir/Dialect/CIR/IR/CIROps.cpp.inc" +#include "clang/CIR/Dialect/IR/CIROps.cpp.inc" diff --git a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp similarity index 94% rename from mlir/lib/Dialect/CIR/IR/CIRTypes.cpp rename to clang/lib/CIR/Dialect/IR/CIRTypes.cpp index b0daa75752d7..a28cb6efdb34 100644 --- a/mlir/lib/Dialect/CIR/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -10,8 +10,9 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/CIR/IR/CIRTypes.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" @@ -20,7 +21,7 @@ #include "llvm/ADT/TypeSwitch.h" #define GET_TYPEDEF_CLASSES -#include "mlir/Dialect/CIR/IR/CIROpsTypes.cpp.inc" +#include "clang/CIR/Dialect/IR/CIROpsTypes.cpp.inc" using namespace mlir; using namespace mlir::cir; @@ -122,6 +123,6 @@ void ArrayType::print(mlir::AsmPrinter &printer) const { void CIRDialect::registerTypes() { addTypes< #define GET_TYPEDEF_LIST -#include "mlir/Dialect/CIR/IR/CIROpsTypes.cpp.inc" +#include "clang/CIR/Dialect/IR/CIROpsTypes.cpp.inc" >(); } diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index e69de29bb2d1..fd1be8998647 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -0,0 +1,17 @@ +add_clang_library(MLIRCIR + CIRAttrs.cpp + CIRDialect.cpp + CIRTypes.cpp + + DEPENDS + MLIRBuiltinLocationAttributesIncGen + MLIRCIROpsIncGen + MLIRCIREnumsGen + MLIRSymbolInterfacesIncGen + + LINK_LIBS PUBLIC + MLIRIR + MLIRFuncDialect + MLIRLLVMDialect + MLIRSideEffectInterfaces + ) diff --git a/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt similarity index 58% rename from mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt rename to clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 89b335608117..52e47a5cb413 100644 --- a/mlir/lib/Dialect/CIR/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -1,10 +1,7 @@ -add_mlir_dialect_library(MLIRCIRTransforms +add_clang_library(MLIRCIRTransforms LifetimeCheck.cpp MergeCleanups.cpp - ADDITIONAL_HEADER_DIRS - ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/CIR - DEPENDS MLIRCIRPassIncGen diff --git a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp similarity index 99% rename from mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp rename to clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 7ed7b3ddf5ac..28415835506a 100644 --- a/mlir/lib/Dialect/CIR/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -6,10 +6,11 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/CIR/Passes.h" - #include "PassDetail.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" + +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" + #include "mlir/Dialect/Func/IR/FuncOps.h" #include "llvm/ADT/SetOperations.h" diff --git a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp similarity index 98% rename from mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp rename to clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index 9c9042f00ba3..60958e3327c8 100644 --- a/mlir/lib/Dialect/CIR/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -6,10 +6,11 @@ // //===----------------------------------------------------------------------===// -#include "mlir/Dialect/CIR/Passes.h" - #include "PassDetail.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" + +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" + #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Matchers.h" diff --git a/mlir/lib/Dialect/CIR/Transforms/PassDetail.h b/clang/lib/CIR/Dialect/Transforms/PassDetail.h similarity index 95% rename from mlir/lib/Dialect/CIR/Transforms/PassDetail.h rename to clang/lib/CIR/Dialect/Transforms/PassDetail.h index 4942e34f284b..2fdcfbda61e5 100644 --- a/mlir/lib/Dialect/CIR/Transforms/PassDetail.h +++ b/clang/lib/CIR/Dialect/Transforms/PassDetail.h @@ -22,7 +22,7 @@ class CIRDialect; } // namespace cir #define GEN_PASS_CLASSES -#include "mlir/Dialect/CIR/Passes.h.inc" +#include "clang/CIR/Dialect/Passes.h.inc" } // namespace mlir diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 0f36a3cef2ec..7e61e97814f9 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -7,7 +7,6 @@ //===----------------------------------------------------------------------===// #include "clang/CIRFrontendAction/CIRGenAction.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/BuiltinOps.h" @@ -25,6 +24,7 @@ #include "clang/Basic/TargetInfo.h" #include "clang/CIR/CIRGenerator.h" #include "clang/CIR/CIRToCIRPasses.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/LowerToLLVM.h" #include "clang/CodeGen/BackendUtil.h" #include "clang/CodeGen/ModuleBuilder.h" diff --git a/clang/lib/CMakeLists.txt b/clang/lib/CMakeLists.txt index 3cb5143e69db..14ba55360fe0 100644 --- a/clang/lib/CMakeLists.txt +++ b/clang/lib/CMakeLists.txt @@ -33,6 +33,5 @@ add_subdirectory(Interpreter) add_subdirectory(Support) if(CLANG_ENABLE_CIR) - add_subdirectory(CIR/CodeGen) - add_subdirectory(CIR/FrontendAction) + add_subdirectory(CIR) endif() diff --git a/clang/lib/Sema/CIRBasedWarnings.cpp b/clang/lib/Sema/CIRBasedWarnings.cpp index 1ad5e8b2e25f..bc66f36bbfef 100644 --- a/clang/lib/Sema/CIRBasedWarnings.cpp +++ b/clang/lib/Sema/CIRBasedWarnings.cpp @@ -36,8 +36,8 @@ #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include #include diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index 2f5f6260a502..cf3818a2f289 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -13,14 +13,14 @@ //===----------------------------------------------------------------------===// #include "mlir/Dialect/Arith/IR/Arith.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" -#include "mlir/Dialect/CIR/Passes.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/InitAllPasses.h" #include "mlir/Pass/PassRegistry.h" #include "mlir/Tools/mlir-opt/MlirOptMain.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/Passes.h" int main(int argc, char **argv) { diff --git a/mlir/include/mlir/Dialect/CIR/CMakeLists.txt b/mlir/include/mlir/Dialect/CIR/CMakeLists.txt deleted file mode 100644 index ece82e6a3676..000000000000 --- a/mlir/include/mlir/Dialect/CIR/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -add_subdirectory(IR) - -set(LLVM_TARGET_DEFINITIONS Passes.td) -mlir_tablegen(Passes.h.inc -gen-pass-decls -name CIR) -mlir_tablegen(Passes.capi.h.inc -gen-pass-capi-header --prefix CIR) -mlir_tablegen(Passes.capi.cpp.inc -gen-pass-capi-impl --prefix CIR) -add_public_tablegen_target(MLIRCIRPassIncGen) - -add_mlir_doc(Passes CIRPasses ./ -gen-pass-doc) diff --git a/mlir/include/mlir/Dialect/CMakeLists.txt b/mlir/include/mlir/Dialect/CMakeLists.txt index aebf25d02830..f71023519733 100644 --- a/mlir/include/mlir/Dialect/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/CMakeLists.txt @@ -7,7 +7,6 @@ add_subdirectory(ArmSME) add_subdirectory(ArmSVE) add_subdirectory(Async) add_subdirectory(Bufferization) -add_subdirectory(CIR) add_subdirectory(Complex) add_subdirectory(ControlFlow) add_subdirectory(DLTI) diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h index 97841338e32f..0da82825c828 100644 --- a/mlir/include/mlir/InitAllDialects.h +++ b/mlir/include/mlir/InitAllDialects.h @@ -29,7 +29,6 @@ #include "mlir/Dialect/Async/IR/Async.h" #include "mlir/Dialect/Bufferization/IR/Bufferization.h" #include "mlir/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" #include "mlir/Dialect/Complex/IR/Complex.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlow.h" #include "mlir/Dialect/ControlFlow/Transforms/BufferDeallocationOpInterfaceImpl.h" @@ -118,7 +117,6 @@ inline void registerAllDialects(DialectRegistry ®istry) { async::AsyncDialect, bufferization::BufferizationDialect, cf::ControlFlowDialect, - cir::CIRDialect, complex::ComplexDialect, DLTIDialect, emitc::EmitCDialect, diff --git a/mlir/lib/Dialect/CIR/CMakeLists.txt b/mlir/lib/Dialect/CIR/CMakeLists.txt deleted file mode 100644 index 9f57627c321f..000000000000 --- a/mlir/lib/Dialect/CIR/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -add_subdirectory(IR) -add_subdirectory(Transforms) diff --git a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt b/mlir/lib/Dialect/CIR/IR/CMakeLists.txt deleted file mode 100644 index fbfb52f6113d..000000000000 --- a/mlir/lib/Dialect/CIR/IR/CMakeLists.txt +++ /dev/null @@ -1,21 +0,0 @@ -add_mlir_dialect_library(MLIRCIR - CIRAttrs.cpp - CIRDialect.cpp - CIRTypes.cpp - CIRAttrs.cpp - - ADDITIONAL_HEADER_DIRS - ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/CIR - - DEPENDS - MLIRBuiltinLocationAttributesIncGen - MLIRCIROpsIncGen - MLIRCIREnumsGen - MLIRSymbolInterfacesIncGen - - LINK_LIBS PUBLIC - MLIRIR - MLIRFuncDialect - MLIRLLVMDialect - MLIRSideEffectInterfaces - ) diff --git a/mlir/lib/Dialect/CMakeLists.txt b/mlir/lib/Dialect/CMakeLists.txt index 9f4ed94f244d..80b0ef068d96 100644 --- a/mlir/lib/Dialect/CMakeLists.txt +++ b/mlir/lib/Dialect/CMakeLists.txt @@ -7,7 +7,6 @@ add_subdirectory(ArmSME) add_subdirectory(ArmSVE) add_subdirectory(Async) add_subdirectory(Bufferization) -add_subdirectory(CIR) add_subdirectory(Complex) add_subdirectory(ControlFlow) add_subdirectory(DLTI) From a063517c5259a0c13c68e64818548b8e0ab692be Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 22 Sep 2022 13:01:13 -0700 Subject: [PATCH 0573/2301] [CIR] Remove CIRBasedWarnigns from Sema This is not being used and makes Sema depends on CIR/CodeGen, which feels quite odd. When we get real about AnalyzerBasedWarnings we can introduce the functionality as additional passes in the pipeline via our custom frontend actions (which did not exist when this was first done). --- .../clang/Sema/AnalysisBasedWarnings.h | 5 +- clang/include/clang/Sema/CIRBasedWarnings.h | 65 ---------- clang/include/clang/Sema/Sema.h | 1 - clang/lib/Driver/ToolChains/Clang.cpp | 4 - clang/lib/Sema/CIRBasedWarnings.cpp | 122 ------------------ clang/lib/Sema/CMakeLists.txt | 16 --- clang/test/Driver/cir.c | 12 -- llvm/docs/CIR.rst | 58 --------- 8 files changed, 1 insertion(+), 282 deletions(-) delete mode 100644 clang/include/clang/Sema/CIRBasedWarnings.h delete mode 100644 clang/lib/Sema/CIRBasedWarnings.cpp delete mode 100644 clang/test/Driver/cir.c delete mode 100644 llvm/docs/CIR.rst diff --git a/clang/include/clang/Sema/AnalysisBasedWarnings.h b/clang/include/clang/Sema/AnalysisBasedWarnings.h index 92430f1982d0..6aac70021ec7 100644 --- a/clang/include/clang/Sema/AnalysisBasedWarnings.h +++ b/clang/include/clang/Sema/AnalysisBasedWarnings.h @@ -19,15 +19,13 @@ namespace clang { -class BlockExpr; class Decl; class FunctionDecl; class QualType; class Sema; namespace sema { -class CIRBasedWarnings; -class FunctionScopeInfo; + class FunctionScopeInfo; } namespace sema { @@ -36,7 +34,6 @@ class AnalysisBasedWarnings { public: class Policy { friend class AnalysisBasedWarnings; - friend class CIRBasedWarnings; // The warnings to run. LLVM_PREFERRED_TYPE(bool) unsigned enableCheckFallThrough : 1; diff --git a/clang/include/clang/Sema/CIRBasedWarnings.h b/clang/include/clang/Sema/CIRBasedWarnings.h deleted file mode 100644 index b050e8940215..000000000000 --- a/clang/include/clang/Sema/CIRBasedWarnings.h +++ /dev/null @@ -1,65 +0,0 @@ -//=- CIRBasedWarnings.h - Sema warnings based on libAnalysis -*- C++ -*-=// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file defines CIRBasedWarnings, a worker object used by Sema -// that issues warnings based on dataflow-analysis. -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_SEMA_CIRBASEDWARNINGS_H -#define LLVM_CLANG_SEMA_CIRBASEDWARNINGS_H - -#include "clang/Sema/AnalysisBasedWarnings.h" -#include "llvm/ADT/DenseMap.h" -#include - -namespace cir { -class CIRGenerator; -} // namespace cir -namespace clang { - -class BlockExpr; -class Decl; -class FunctionDecl; -class ObjCMethodDecl; -class QualType; -class Sema; - -namespace sema { - -class FunctionScopeInfo; - -class CIRBasedWarnings { -private: - Sema &S; - AnalysisBasedWarnings::Policy DefaultPolicy; - // std::unique_ptr CIRGen; - - //class InterProceduralData; - //std::unique_ptr IPData; - - enum VisitFlag { NotVisited = 0, Visited = 1, Pending = 2 }; - llvm::DenseMap VisitedFD; - - /// @} - -public: - CIRBasedWarnings(Sema &s); - ~CIRBasedWarnings(); - - void IssueWarnings(AnalysisBasedWarnings::Policy P, FunctionScopeInfo *fscope, - const Decl *D, QualType BlockType); - - //Policy getDefaultPolicy() { return DefaultPolicy; } - - void PrintStats() const; -}; - -} // namespace sema -} // namespace clang - -#endif diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index aebba1afed0c..4d6e02fe2956 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -55,7 +55,6 @@ #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/Attr.h" -#include "clang/Sema/CIRBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index f0c94daafeaa..d3329609c9d6 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -8103,10 +8103,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-fmv"); } - if (Args.hasFlag(options::OPT_fcir_warnings, options::OPT_fno_cir_warnings, - false)) - CmdArgs.push_back("-fcir-warnings"); - if (Args.hasFlag(options::OPT_faddrsig, options::OPT_fno_addrsig, (TC.getTriple().isOSBinFormatELF() || TC.getTriple().isOSBinFormatCOFF()) && diff --git a/clang/lib/Sema/CIRBasedWarnings.cpp b/clang/lib/Sema/CIRBasedWarnings.cpp deleted file mode 100644 index bc66f36bbfef..000000000000 --- a/clang/lib/Sema/CIRBasedWarnings.cpp +++ /dev/null @@ -1,122 +0,0 @@ -//=- CIRBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ ----*-=// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file defines analysis_warnings::[Policy,Executor]. -// Together they are used by Sema to issue warnings based on inexpensive -// static analysis algorithms using ClangIR. -// -//===----------------------------------------------------------------------===// - -#include "clang/Sema/CIRBasedWarnings.h" -#include "clang/AST/DeclCXX.h" -#include "clang/AST/DeclObjC.h" -#include "clang/AST/EvaluatedExprVisitor.h" -#include "clang/AST/ExprCXX.h" -#include "clang/AST/ExprObjC.h" -#include "clang/AST/ParentMap.h" -#include "clang/AST/RecursiveASTVisitor.h" -#include "clang/AST/StmtCXX.h" -#include "clang/AST/StmtObjC.h" -#include "clang/AST/StmtVisitor.h" -#include "clang/Basic/SourceLocation.h" -#include "clang/Basic/SourceManager.h" -#include "clang/Lex/Preprocessor.h" -#include "clang/Sema/ScopeInfo.h" -#include "clang/Sema/SemaInternal.h" -#include "llvm/ADT/ArrayRef.h" -#include "llvm/ADT/BitVector.h" -#include "llvm/ADT/MapVector.h" -#include "llvm/ADT/SmallString.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/StringRef.h" -#include "llvm/Support/Casting.h" - -#include "clang/CIR/CIRGenerator.h" -#include "clang/CIR/Dialect/IR/CIRDialect.h" - -#include -#include -#include - -using namespace clang; - -/// -/// CIRBasedWarnings -/// -static unsigned isEnabled(DiagnosticsEngine &D, unsigned diag) { - return (unsigned)!D.isIgnored(diag, SourceLocation()); -} - -sema::CIRBasedWarnings::CIRBasedWarnings(Sema &s) : S(s) { - - using namespace diag; - DiagnosticsEngine &D = S.getDiagnostics(); - - DefaultPolicy.enableCheckUnreachable = - isEnabled(D, warn_unreachable) || isEnabled(D, warn_unreachable_break) || - isEnabled(D, warn_unreachable_return) || - isEnabled(D, warn_unreachable_loop_increment); - - DefaultPolicy.enableThreadSafetyAnalysis = isEnabled(D, warn_double_lock); - - DefaultPolicy.enableConsumedAnalysis = - isEnabled(D, warn_use_in_invalid_state); - - // CIRGen = std::make_unique(D, CodeGenOptions()); - // CIRGen->Initialize(S.getASTContext()); -} - -// We need this here for unique_ptr with forward declared class. -sema::CIRBasedWarnings::~CIRBasedWarnings() = default; - -static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) { - for (const auto &D : fscope->PossiblyUnreachableDiags) - S.Diag(D.Loc, D.PD); -} - -void clang::sema::CIRBasedWarnings::IssueWarnings( - sema::AnalysisBasedWarnings::Policy P, sema::FunctionScopeInfo *fscope, - const Decl *D, QualType BlockType) { - // We avoid doing analysis-based warnings when there are errors for - // two reasons: - // (1) The CFGs often can't be constructed (if the body is invalid), so - // don't bother trying. - // (2) The code already has problems; running the analysis just takes more - // time. - DiagnosticsEngine &Diags = S.getDiagnostics(); - - // Do not do any analysis if we are going to just ignore them. - if (Diags.getIgnoreAllWarnings() || - (Diags.getSuppressSystemWarnings() && - S.SourceMgr.isInSystemHeader(D->getLocation()))) - return; - - // For code in dependent contexts, we'll do this at instantiation time. - if (cast(D)->isDependentContext()) - return; - - if (S.hasUncompilableErrorOccurred()) { - // Flush out any possibly unreachable diagnostics. - flushDiagnostics(S, fscope); - return; - } - - const FunctionDecl *FD = dyn_cast(D); - assert(FD && "Only know how to handle functions"); - - // TODO: up to this point this behaves the same as - // AnalysisBasedWarnings::IssueWarnings - - // Unlike Clang CFG, we share CIR state between each analyzed function, - // retrieve or create a new context. - // CIRGen->EmitFunction(FD); -} - -void clang::sema::CIRBasedWarnings::PrintStats() const { - llvm::errs() << "\n*** CIR Based Warnings Stats:\n"; -} diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt index ad545fda4dad..fce42d60a615 100644 --- a/clang/lib/Sema/CMakeLists.txt +++ b/clang/lib/Sema/CMakeLists.txt @@ -13,16 +13,10 @@ clang_tablegen(OpenCLBuiltins.inc -gen-clang-opencl-builtins TARGET ClangOpenCLBuiltinsImpl ) -include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) -include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) - -get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) - add_clang_library(clangSema AnalysisBasedWarnings.cpp CheckExprLifetime.cpp CodeCompleteConsumer.cpp - CIRBasedWarnings.cpp DeclSpec.cpp DelayedDiagnostic.cpp HeuristicResolver.cpp @@ -117,14 +111,4 @@ add_clang_library(clangSema clangEdit clangLex clangSupport - - clangCIR - ${dialect_libs} - MLIRCIR - MLIRAnalysis - MLIRIR - MLIRParser - MLIRSideEffectInterfaces - MLIRTransforms - MLIRSupport ) diff --git a/clang/test/Driver/cir.c b/clang/test/Driver/cir.c deleted file mode 100644 index ab8dfbb28277..000000000000 --- a/clang/test/Driver/cir.c +++ /dev/null @@ -1,12 +0,0 @@ -// RUN: %clang -### -target x86_64-unknown-linux -c %s 2>&1 | FileCheck -check-prefix=NO-CIR %s -// RUN: %clang -### -target x86_64-pc-win32 -c %s 2>&1 | FileCheck -check-prefix=NO-CIR %s -// RUN: %clang -### -target x86_64-scei-ps4 -c %s 2>&1 | FileCheck -check-prefix=NO-CIR %s -// RUN: %clang -### -target x86_64-linux-android21 -c %s 2>&1 | FileCheck -check-prefix=NO-CIR %s - -// RUN: %clang -### -target x86_64-unknown-linux -c -fcir-warnings %s 2>&1 | FileCheck -check-prefix=CIR %s -// RUN: %clang -### -target x86_64-pc-win32 -c -fcir-warnings %s 2>&1 | FileCheck -check-prefix=CIR %s -// RUN: %clang -### -target x86_64-scei-ps4 -c -fcir-warnings %s 2>&1 | FileCheck -check-prefix=CIR %s -// RUN: %clang -### -target x86_64-linux-android21 -c -fcir-warnings %s 2>&1 | FileCheck -check-prefix=CIR %s - -// CIR: -fcir-warnings -// NO-CIR-NOT: -fcir-warnings diff --git a/llvm/docs/CIR.rst b/llvm/docs/CIR.rst deleted file mode 100644 index da5830338d3a..000000000000 --- a/llvm/docs/CIR.rst +++ /dev/null @@ -1,58 +0,0 @@ -=============================== -CIR - Clang IR Design and Implementation -=============================== - -.. contents:: - :local: - -Introduction -============ - -This document aims to provide an overview of the design and -implementation of a Clang IR, a high level IR allowing more -analysis and future optimizations. - -CIR is used as a short for ClangIR over commit messages and -other related code. - -Usage in Clang -============== - -Current strategy is to replace analysis based warnings with -analysis on top of CIR, using ``-fcir-warnings`` turns on such -analysis (current none). - -The ``-fcir-output`` and ``-fcir-output=`` flags can be used -to output the generated CIR (currently needs to be combined with -``-fcir-warnings`` to work). - -Additionally, clang can run it's full compilation pipeline with -the CIR phase inserted between clang and llvm. Passing -``-fclangir`` to ``clang -cc1`` will opt in to clang generating -CIR which is lowered to LLVMIR and continued through the -backend. (WIP -- the backend is not yet functional). - -A new flag ``-emit-cir`` can be used in combination with -``-fclangir`` to emit pristine CIR right out of the CIRGen phase. - -Adding flags to select between different levels of lowerings -between MLIR dialects (e.g.to STD/Affine/SCF) are a WIP. - - -Implementation Notes -==================== - -- ``PopFunctionScopeInfo`` is the currentt entry point for CFG usage -in ``AnalysisBasedWarning.cpp``. The same entry point is used by the -CIR builder to emit functions. - -TODO's -====== -- LValues - - Add proper alignment information -- Other module related emission besides functions (and all currently -end of translation defered stuff). -- Some data structures used for LLVM codegen can be made more -generic and be reused from CIRBuilder. Duplicating content right -now to prevent potential frequent merge conflicts. - - Split out into header files all potential common code. From 23183509ea818f4eef9503812d0b455fb09ad7e2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 15 Sep 2022 08:15:48 -0700 Subject: [PATCH 0574/2301] [CIR] Add 'cir' to LLVM_ENABLE_PROJECTS and make ClangIR build optional via CMAKE - Also fix the way we do testing and disable when CIR not available. --- clang/include/clang/Config/config.h.cmake | 3 +++ clang/lib/FrontendTool/CMakeLists.txt | 7 +++++- .../ExecuteCompilerInvocation.cpp | 25 ++++++++++++++----- clang/test/CMakeLists.txt | 9 +++++-- clang/tools/CMakeLists.txt | 4 ++- llvm/CMakeLists.txt | 20 ++++++++++++++- 6 files changed, 57 insertions(+), 11 deletions(-) diff --git a/clang/include/clang/Config/config.h.cmake b/clang/include/clang/Config/config.h.cmake index 4015ac804086..27ed69e21562 100644 --- a/clang/include/clang/Config/config.h.cmake +++ b/clang/include/clang/Config/config.h.cmake @@ -83,4 +83,7 @@ /* Spawn a new process clang.exe for the CC1 tool invocation, when necessary */ #cmakedefine01 CLANG_SPAWN_CC1 +/* Whether CIR is built into Clang */ +#cmakedefine01 CLANG_ENABLE_CIR + #endif diff --git a/clang/lib/FrontendTool/CMakeLists.txt b/clang/lib/FrontendTool/CMakeLists.txt index 37d6aec93a1f..cb70041b6914 100644 --- a/clang/lib/FrontendTool/CMakeLists.txt +++ b/clang/lib/FrontendTool/CMakeLists.txt @@ -9,10 +9,15 @@ set(link_libs clangDriver clangExtractAPI clangFrontend - clangCIRFrontendAction clangRewriteFrontend ) +if(CLANG_ENABLE_CIR) + list(APPEND link_libs + clangCIRFrontendAction + ) +endif() + if(CLANG_ENABLE_ARCMT) list(APPEND link_libs clangARCMigrate diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 898ba773ca7e..2ce55d1d41be 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -12,7 +12,6 @@ //===----------------------------------------------------------------------===// #include "clang/ARCMigrate/ARCMTActions.h" -#include "clang/CIRFrontendAction/CIRGenAction.h" #include "clang/CodeGen/CodeGenAction.h" #include "clang/Config/config.h" #include "clang/Driver/Options.h" @@ -32,8 +31,12 @@ #include "llvm/Support/BuryPointer.h" #include "llvm/Support/DynamicLibrary.h" #include "llvm/Support/ErrorHandling.h" + +#if CLANG_ENABLE_CIR +#include "clang/CIRFrontendAction/CIRGenAction.h" +#endif + using namespace clang; -using namespace cir; using namespace llvm::opt; namespace clang { @@ -68,19 +71,29 @@ CreateFrontendBaseAction(CompilerInstance &CI) { case DumpTokens: return std::make_unique(); case EmitAssembly: return std::make_unique(); case EmitBC: return std::make_unique(); - case EmitCIR: return std::make_unique(); - case EmitCIROnly: return std::make_unique(); +#if CLANG_ENABLE_CIR + case EmitCIR: return std::make_unique<::cir::EmitCIRAction>(); + case EmitCIROnly: return std::make_unique<::cir::EmitCIROnlyAction>(); +#else + case EmitCIR: + case EmitCIROnly: + llvm_unreachable("CIR suppport not built into clang"); +#endif case EmitHTML: return std::make_unique(); case EmitLLVM: { +#if CLANG_ENABLE_CIR if (UseCIR) - return std::make_unique(); + return std::make_unique<::cir::EmitLLVMAction>(); +#endif return std::make_unique(); } case EmitLLVMOnly: return std::make_unique(); case EmitCodeGenOnly: return std::make_unique(); case EmitObj: { +#if CLANG_ENABLE_CIR if (UseCIR) - return std::make_unique(); + return std::make_unique<::cir::EmitObjAction>(); +#endif return std::make_unique(); } case ExtractAPI: diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt index f624bb4929e9..3bacf428e27c 100644 --- a/clang/test/CMakeLists.txt +++ b/clang/test/CMakeLists.txt @@ -64,7 +64,6 @@ endif () list(APPEND CLANG_TEST_DEPS apinotes-test c-index-test - cir-tool clang clang-fuzzer-dictionary clang-resource-headers @@ -85,7 +84,13 @@ list(APPEND CLANG_TEST_DEPS hmaptool mlir-translate ) - + +if(CLANG_ENABLE_CIR) + list(APPEND CLANG_TEST_DEPS + cir-tool + ) +endif() + if(CLANG_ENABLE_STATIC_ANALYZER) list(APPEND CLANG_TEST_DEPS clang-check diff --git a/clang/tools/CMakeLists.txt b/clang/tools/CMakeLists.txt index 82217b8e0395..b9d2561dfdcb 100644 --- a/clang/tools/CMakeLists.txt +++ b/clang/tools/CMakeLists.txt @@ -3,7 +3,9 @@ create_subdirectory_options(CLANG TOOL) add_clang_subdirectory(diagtool) add_clang_subdirectory(driver) add_clang_subdirectory(apinotes-test) -add_clang_subdirectory(cir-tool) +if(CLANG_ENABLE_CIR) + add_clang_subdirectory(cir-tool) +endif() add_clang_subdirectory(clang-diff) add_clang_subdirectory(clang-format) add_clang_subdirectory(clang-fuzzer) diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt index ad12100fdb5b..0e40b398fcfb 100644 --- a/llvm/CMakeLists.txt +++ b/llvm/CMakeLists.txt @@ -116,7 +116,7 @@ endif() # LLVM_EXTERNAL_${project}_SOURCE_DIR using LLVM_ALL_PROJECTS # This allows an easy way of setting up a build directory for llvm and another # one for llvm+clang+... using the same sources. -set(LLVM_ALL_PROJECTS "bolt;clang;clang-tools-extra;compiler-rt;cross-project-tests;libc;libclc;lld;lldb;mlir;openmp;polly;pstl") +set(LLVM_ALL_PROJECTS "bolt;cir;clang;clang-tools-extra;compiler-rt;cross-project-tests;libc;libclc;lld;lldb;mlir;openmp;polly;pstl") if (${CMAKE_SYSTEM_NAME} MATCHES "AIX") # Disallow 'openmp' as a LLVM PROJECT on AIX as the supported way is to use # LLVM_ENABLE_RUNTIMES. @@ -157,6 +157,17 @@ if ("libc" IN_LIST LLVM_ENABLE_PROJECTS) "https://libc.llvm.org/ for building the runtimes.") endif() +if ("cir" IN_LIST LLVM_ENABLE_PROJECTS) + if (NOT "mlir" IN_LIST LLVM_ENABLE_PROJECTS) + message(STATUS "Enabling MLIR as a dependency to CIR") + list(APPEND LLVM_ENABLE_PROJECTS "mlir") + endif() + + if (NOT "clang" IN_LIST LLVM_ENABLE_PROJECTS) + message(FATAL_ERROR "Clang is not enabled, but is required to use CIR") + endif() +endif() + # Select the runtimes to build # # As we migrate runtimes to using the bootstrapping build, the set of default runtimes @@ -226,6 +237,13 @@ if (LLVM_ENABLE_PROJECTS_USED OR NOT LLVM_ENABLE_PROJECTS STREQUAL "") string(REGEX REPLACE "-" "_" upper_proj ${upper_proj}) if ("${proj}" IN_LIST LLVM_ENABLE_PROJECTS) message(STATUS "${proj} project is enabled") + # ClangIR is integrated inside clang and also provides the cir-tool, + # it needs some special handling. + if ("${proj}" STREQUAL "cir") + set(CLANG_ENABLE_CIR ON) + continue() + endif() + set(SHOULD_ENABLE_PROJECT TRUE) set(PROJ_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../${proj}") if(NOT EXISTS "${PROJ_DIR}" OR NOT IS_DIRECTORY "${PROJ_DIR}") From 3fc6ff6a3235cc8dfec06e22448aaf6152418ea5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 20 Sep 2022 11:39:18 -0700 Subject: [PATCH 0575/2301] [CIR][Docs] Fix cmake output dir and remove empty docs for CIRDialect --- clang/include/clang/CIR/Dialect/CMakeLists.txt | 2 +- clang/include/clang/CIR/Dialect/IR/CMakeLists.txt | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/CMakeLists.txt b/clang/include/clang/CIR/Dialect/CMakeLists.txt index 3b066dfc15fb..f4c99a2b9a8f 100644 --- a/clang/include/clang/CIR/Dialect/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/CMakeLists.txt @@ -13,7 +13,7 @@ add_custom_target(clang-cir-doc) function(add_clang_mlir_doc doc_filename output_file output_directory command) set(LLVM_TARGET_DEFINITIONS ${doc_filename}.td) tablegen(MLIR ${output_file}.md ${command} ${ARGN} "-I${MLIR_MAIN_SRC_DIR}" "-I${MLIR_INCLUDE_DIR}") - set(GEN_DOC_FILE ${MLIR_BINARY_DIR}/docs/${output_directory}${output_file}.md) + set(GEN_DOC_FILE ${CLANG_BINARY_DIR}/docs/${output_directory}${output_file}.md) add_custom_command( OUTPUT ${GEN_DOC_FILE} COMMAND ${CMAKE_COMMAND} -E copy diff --git a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt index f49d08ce63d8..c502525d30e8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt @@ -15,7 +15,6 @@ add_public_tablegen_target(MLIRCIROpsIncGen) add_dependencies(mlir-headers MLIRCIROpsIncGen) # Equivalent to add_mlir_doc -add_clang_mlir_doc(CIRDialect CIRDialect Dialects/ -gen-dialect-doc) add_clang_mlir_doc(CIROps CIROps Dialects/ -gen-op-doc) add_clang_mlir_doc(CIRAttrs CIRAttrs Dialects/ -gen-attrdef-doc) add_clang_mlir_doc(CIRTypes CIRTypes Dialects/ -gen-typedef-doc) From c90acbb51c496d3968e39664b48d5ff115424424 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 22 Sep 2022 17:52:06 -0700 Subject: [PATCH 0576/2301] [CIR] Hook the first AST node into CIR: FunctionDecl - Add the attribute that wraps the AST node and a class helper. - Do proper includes. - Optionally attachable to cir.func - Add generic verifier. Next we should teach codegen to pass those in. --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.h | 4 +++ .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 35 +++++++++++++++++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 +- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 3 ++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 21 +++++++++++ 5 files changed, 65 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h index fe58ad61e55b..bb9b425b2465 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -23,6 +23,10 @@ // CIR Dialect Attrs //===----------------------------------------------------------------------===// +namespace clang { +class FunctionDecl; +} + #define GET_ATTRDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsAttributes.h.inc" diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 0a7fc34b0d9a..2c44680948c6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -27,6 +27,10 @@ class CIR_Attr traits = []> let mnemonic = attrMnemonic; } +//===----------------------------------------------------------------------===// +// NullAttr +//===----------------------------------------------------------------------===// + def NullAttr : CIR_Attr<"Null", "null", [TypedAttrInterface]> { let summary = "A simple attr to represent nullptr"; let description = [{ @@ -38,6 +42,10 @@ def NullAttr : CIR_Attr<"Null", "null", [TypedAttrInterface]> { let assemblyFormat = [{}]; } +//===----------------------------------------------------------------------===// +// CstArrayAttr +//===----------------------------------------------------------------------===// + def CstArrayAttr : CIR_Attr<"CstArray", "cst_array", [TypedAttrInterface]> { let summary = "A constant array from ArrayAttr or StringRefAttr"; let description = [{ @@ -71,4 +79,31 @@ def CstArrayAttr : CIR_Attr<"CstArray", "cst_array", [TypedAttrInterface]> { let genVerifyDecl = 1; } +//===----------------------------------------------------------------------===// +// AST Wrappers +//===----------------------------------------------------------------------===// + +class ASTDecl traits = []> + : CIR_Attr { + string clang_name = !strconcat("clang::", name); + + let summary = !strconcat("Wraps a ", clang_name, " AST node."); + let description = [{ + Operations optionally refer to this node, they could be available depending + on the CIR lowering stage. Whether it's attached to the appropriated + CIR operation is delegated to the operation verifier. + + This always implies a non-null AST reference (verified). + }]; + let parameters = (ins !strconcat(clang_name, " *"):$astDecl); + + // Printing and parsing available in CIRDialect.cpp + let hasCustomAssemblyFormat = 1; + + // Enable verifier. + let genVerifyDecl = 1; +} + +def ASTFunctionDeclAttr : ASTDecl<"FunctionDecl">; + #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e2b41dda3e0e..b2e665e41e27 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1149,7 +1149,8 @@ def FuncOp : CIR_Op<"func", [ "GlobalLinkageKind::ExternalLinkage">:$linkage, OptionalAttr:$sym_visibility, OptionalAttr:$arg_attrs, - OptionalAttr:$res_attrs); + OptionalAttr:$res_attrs, + OptionalAttr:$ast); let regions = (region AnyRegion:$body); let skipDefaultBuilders = 1; diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 2e32879ab8e7..1621bf38e87e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -23,6 +23,9 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/TypeSwitch.h" +// ClangIR holds back AST references when available. +#include "clang/AST/Decl.h" + #define GET_ATTRDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 5a7a34230ff8..9ec99217d126 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1420,6 +1420,27 @@ void CstArrayAttr::print(::mlir::AsmPrinter &printer) const { printer << ">"; } +::mlir::Attribute ASTFunctionDeclAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { + // We cannot really parse anything AST related at this point + // since we have no serialization/JSON story. + return mlir::Attribute(); +} + +void ASTFunctionDeclAttr::print(::mlir::AsmPrinter &printer) const { + // Nothing to print besides the mnemonics. +} + +LogicalResult ASTFunctionDeclAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::clang::FunctionDecl *decl) { + if (!decl) { + emitError() << "expected non-null AST declaration"; + return failure(); + } + return success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From 7fe01a321e2593105b87b424e187ede050bf6361 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 23 Sep 2022 15:49:01 -0700 Subject: [PATCH 0577/2301] Update README.td to be more concise Remove the LLVM bits --- README.md | 67 +------------------------------------------------------ 1 file changed, 1 insertion(+), 66 deletions(-) diff --git a/README.md b/README.md index 58d7e29a0cf7..3dd79abc4b3e 100644 --- a/README.md +++ b/README.md @@ -1,68 +1,3 @@ # ClangIR (CIR) -For more information see https://clangir.org. The rest of this document -fallbacks to llvm-project's default `README.md`. - ---- - -# The LLVM Compiler Infrastructure - -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/llvm/llvm-project/badge)](https://securityscorecards.dev/viewer/?uri=github.com/llvm/llvm-project) -[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8273/badge)](https://www.bestpractices.dev/projects/8273) -[![libc++](https://github.com/llvm/llvm-project/actions/workflows/libcxx-build-and-test.yaml/badge.svg?branch=main&event=schedule)](https://github.com/llvm/llvm-project/actions/workflows/libcxx-build-and-test.yaml?query=event%3Aschedule) - -Welcome to the LLVM project! - -This repository contains the source code for LLVM, a toolkit for the -construction of highly optimized compilers, optimizers, and run-time -environments. - -The LLVM project has multiple components. The core of the project is -itself called "LLVM". This contains all of the tools, libraries, and header -files needed to process intermediate representations and convert them into -object files. Tools include an assembler, disassembler, bitcode analyzer, and -bitcode optimizer. - -C-like languages use the [Clang](https://clang.llvm.org/) frontend. This -component compiles C, C++, Objective-C, and Objective-C++ code into LLVM bitcode --- and from there into object files, using LLVM. - -Other components include: -the [libc++ C++ standard library](https://libcxx.llvm.org), -the [LLD linker](https://lld.llvm.org), and more. - -## Getting the Source Code and Building LLVM - -Consult the -[Getting Started with LLVM](https://llvm.org/docs/GettingStarted.html#getting-the-source-code-and-building-llvm) -page for information on building and running LLVM. - -For information on how to contribute to the LLVM project, please take a look at -the [Contributing to LLVM](https://llvm.org/docs/Contributing.html) guide. - -## Getting in touch - -Join the [LLVM Discourse forums](https://discourse.llvm.org/), [Discord -chat](https://discord.gg/xS7Z362), -[LLVM Office Hours](https://llvm.org/docs/GettingInvolved.html#office-hours) or -[Regular sync-ups](https://llvm.org/docs/GettingInvolved.html#online-sync-ups). - -The LLVM project has adopted a [code of conduct](https://llvm.org/docs/CodeOfConduct.html) for -participants to all modes of communication within the project. - -### License - -ClangIR is based off https://github.com/llvm/llvm-project and uses the same -license. This ClangIR project is under the Apache License v2.0 with LLVM -Exceptions. Please see the `LICENSE.TXT` for the full details. - -## Contributing - -Check our [contributing guide](CONTRIBUTING.md) to learn about how to -contribute to the project. - -## Code Of Confuct - -Check our [Code Of Conduct](CODE_OF_CONDUCT.md) to learn more about our -contributor standards and expectations. - +Check https://clangir.org for general information, build instructions and documentation. From b8487967407128493a3207366722a3224871cb04 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 26 Sep 2022 15:57:37 -0700 Subject: [PATCH 0578/2301] [CIR][CodeGen][NFC] Fix CallOp related comments --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 5 ----- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 356bd85e34b6..3ce685deb2f0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -396,11 +396,6 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // Emit the actual call op. auto callLoc = CGM.getLoc(Loc); - - // FIXME: Used to be: - // auto theCall = CGM.getBuilder().create( - // callLoc, mlir::SymbolRefAttr::get(CalleePtr), - // CalleePtr.getType().getResults(), CIRCallArgs); auto theCall = CGM.getBuilder().create(callLoc, CalleePtr, CIRCallArgs); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index c14f4025e645..af172b1439af 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -544,7 +544,7 @@ class CIRGenFunction { AbstractCallee AC = AbstractCallee(), unsigned ParamsToSkip = 0, EvaluationOrder Order = EvaluationOrder::Default); - /// buildCall - Generate a call of the given function, expecting the given + /// Generate a call of the given function, expecting the given /// result type, and using the given argument list which specifies both the /// LLVM arguments and the types they were derived from. RValue buildCall(const CIRGenFunctionInfo &CallInfo, From 35d0ec75049594de48686031221b056d4cf4ead1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Sep 2022 12:09:08 -0700 Subject: [PATCH 0579/2301] [CIR][NFC] Break default method emission to run after CIR passes This prevents us from doing unecessary codegen until we need the body of default methods. Some analysis won't require going inside such methods. In the future we should only enable the codegen in the pipeline that goes out to LLVM codegen. --- clang/include/clang/CIR/CIRGenerator.h | 1 + clang/lib/CIR/CodeGen/CIRGenModule.cpp | 94 ++++++++++++------- clang/lib/CIR/CodeGen/CIRGenModule.h | 14 +++ clang/lib/CIR/CodeGen/CIRGenerator.cpp | 2 + clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 9 +- 5 files changed, 83 insertions(+), 37 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 734865fa24ed..5d71311f5e61 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -92,6 +92,7 @@ class CIRGenerator : public clang::ASTConsumer { bool verifyModule(); void buildDeferredDecls(); + void buildDefaultMethods(); }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 84d6b1062628..70574944a3ad 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1476,6 +1476,14 @@ mlir::cir::FuncOp CIRGenModule::createCIRFunction(mlir::Location loc, return f; } +bool isDefaultedMethod(const clang::FunctionDecl *FD) { + if (FD->isDefaulted() && isa(FD) && + (cast(FD)->isCopyAssignmentOperator() || + cast(FD)->isMoveAssignmentOperator())) + return true; + return false; +} + /// If the specified mangled name is not in the module, /// create and return a CIR Function with the specified type. If there is /// something in the module with the specified name, return it potentially @@ -1613,7 +1621,10 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( FD = FD->getPreviousDecl()) { if (isa(FD->getLexicalDeclContext())) { if (FD->doesThisDeclarationHaveABody()) { - addDeferredDeclToEmit(GD.getWithDecl(FD)); + if (isDefaultedMethod(FD)) + addDefaultMethodsToEmit(GD.getWithDecl(FD)); + else + addDeferredDeclToEmit(GD.getWithDecl(FD)); break; } } @@ -1651,6 +1662,44 @@ mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { return mlir::FusedLoc::get(locs, metadata, builder.getContext()); } +void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { + // We should call GetAddrOfGlobal with IsForDefinition set to true in order + // to get a Value with exactly the type we need, not something that might + // have been created for another decl with the same mangled name but + // different type. + auto *Op = GetAddrOfGlobal(D, ForDefinition); + + // In case of different address spaces, we may still get a cast, even with + // IsForDefinition equal to true. Query mangled names table to get + // GlobalValue. + if (!Op) { + Op = getGlobalValue(getMangledName(D)); + } + + // Make sure getGlobalValue returned non-null. + assert(Op); + assert(isa(Op) && + "not implemented, only supports FuncOp for now"); + + // Check to see if we've already emitted this. This is necessary for a + // couple of reasons: first, decls can end up in deferred-decls queue + // multiple times, and second, decls can end up with definitions in unusual + // ways (e.g. by an extern inline function acquiring a strong function + // redefinition). Just ignore those cases. + // TODO: Not sure what to map this to for MLIR + if (auto Fn = cast(Op)) + if (!Fn.isDeclaration()) + return; + + // If this is OpenMP, check if it is legal to emit this global normally. + if (getLangOpts().OpenMP) { + llvm_unreachable("NYI"); + } + + // Otherwise, emit the definition and move on to the next one. + buildGlobalDefinition(D, Op); +} + void CIRGenModule::buildDeferred() { // Emit deferred declare target declarations if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) @@ -1681,41 +1730,7 @@ void CIRGenModule::buildDeferred() { CurDeclsToEmit.swap(DeferredDeclsToEmit); for (auto &D : CurDeclsToEmit) { - // We should call GetAddrOfGlobal with IsForDefinition set to true in order - // to get a Value with exactly the type we need, not something that might - // have been created for another decl with the same mangled name but - // different type. - auto *Op = GetAddrOfGlobal(D, ForDefinition); - - // In case of different address spaces, we may still get a cast, even with - // IsForDefinition equal to true. Query mangled names table to get - // GlobalValue. - if (!Op) { - Op = getGlobalValue(getMangledName(D)); - } - - // Make sure getGlobalValue returned non-null. - assert(Op); - assert(isa(Op) && - "not implemented, only supports FuncOp for now"); - - // Check to see if we've already emitted this. This is necessary for a - // couple of reasons: first, decls can end up in deferred-decls queue - // multiple times, and second, decls can end up with definitions in unusual - // ways (e.g. by an extern inline function acquiring a strong function - // redefinition). Just ignore those cases. - // TODO: Not sure what to map this to for MLIR - if (auto Fn = cast(Op)) - if (!Fn.isDeclaration()) - continue; - - // If this is OpenMP, check if it is legal to emit this global normally. - if (getLangOpts().OpenMP) { - llvm_unreachable("NYI"); - } - - // Otherwise, emit the definition and move on to the next one. - buildGlobalDefinition(D, Op); + buildGlobalDecl(D); // If we found out that we need to emit more decls, do that recursively. // This has the advantage that the decls are emitted in a DFS and related @@ -1727,6 +1742,13 @@ void CIRGenModule::buildDeferred() { } } +void CIRGenModule::buildDefaultMethods() { + // Differently from DeferredDeclsToEmit, there's no recurrent use of + // DefaultMethodsToEmit, so use it directly for emission. + for (auto &D : DefaultMethodsToEmit) + buildGlobalDecl(D); +} + mlir::IntegerAttr CIRGenModule::getSize(CharUnits size) { return mlir::IntegerAttr::get( mlir::IntegerType::get(builder.getContext(), 64), size.getQuantity()); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 4be37d92d8d6..f7c377b5b548 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -262,6 +262,14 @@ class CIRGenModule { DeferredDeclsToEmit.emplace_back(GD); } + // After HandleTranslation finishes, differently from DeferredDeclsToEmit, + // DefaultMethodsToEmit is only called after a set of CIR passes run. See + // addDefaultMethodsToEmit usage for examples. + std::vector DefaultMethodsToEmit; + void addDefaultMethodsToEmit(clang::GlobalDecl GD) { + DefaultMethodsToEmit.emplace_back(GD); + } + std::pair getAddrAndTypeOfCXXStructor( clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, mlir::FunctionType FnType = nullptr, bool Dontdefer = false, @@ -329,6 +337,12 @@ class CIRGenModule { /// Emit any needed decls for which code generation was deferred. void buildDeferred(); + /// Helper for `buildDeferred` to apply actual codegen. + void buildGlobalDecl(clang::GlobalDecl &D); + + /// Build default methods not emitted before this point. + void buildDefaultMethods(); + const llvm::Triple &getTriple() const { return target.getTriple(); } // Finalize CIR code generation. diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 5cae13759ced..c95c1b2f2b45 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -106,6 +106,8 @@ void CIRGenerator::HandleInlineFunctionDefinition(FunctionDecl *D) { CGM->AddDeferredUnusedCoverageMapping(D); } +void CIRGenerator::buildDefaultMethods() { CGM->buildDefaultMethods(); } + void CIRGenerator::buildDeferredDecls() { if (DeferredInlineMemberFuncDefs.empty()) return; diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 7e61e97814f9..06beb0626d6a 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -146,12 +146,19 @@ class CIRGenConsumer : public clang::ASTConsumer { switch (action) { case CIRGenAction::OutputType::EmitCIR: if (outputStream && mlirMod) { + + // Run CIR cleanup, in the future also the relevent raising and + // some code analysis. if (!feOptions.DisableCIRPasses) { runCIRToCIRPasses(mlirMod, mlirCtx.get(), !feOptions.DisableCIRVerifier); } - mlir::OpPrintingFlags flags; + + // Emit remaining defaulted C++ methods + gen->buildDefaultMethods(); + // FIXME: we cannot roundtrip prettyForm=true right now. + mlir::OpPrintingFlags flags; flags.enableDebugInfo(/*prettyForm=*/false); mlirMod->print(*outputStream, flags); } From e8bc9aaeaafa35c40e77773240e62264266c6aec Mon Sep 17 00:00:00 2001 From: Caio Oliveira Date: Mon, 26 Sep 2022 08:21:54 -0700 Subject: [PATCH 0580/2301] [CIR][CodeGen] Add missing conversion patterns for ControlFlowDialect Fixes #4. --- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 3 +++ clang/test/CIR/CIRToLLVM/goto.cir | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index e276417fd8de..b89d82019d03 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -12,6 +12,7 @@ #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" #include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" +#include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" @@ -492,6 +493,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { populateAffineToStdConversionPatterns(patterns); mlir::arith::populateArithToLLVMConversionPatterns(typeConverter, patterns); populateSCFToControlFlowConversionPatterns(patterns); + mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, + patterns); populateFinalizeMemRefToLLVMConversionPatterns(typeConverter, patterns); populateFuncToLLVMConversionPatterns(typeConverter, patterns); diff --git a/clang/test/CIR/CIRToLLVM/goto.cir b/clang/test/CIR/CIRToLLVM/goto.cir index b82303d202b3..a70f65b2bf88 100644 --- a/clang/test/CIR/CIRToLLVM/goto.cir +++ b/clang/test/CIR/CIRToLLVM/goto.cir @@ -1,8 +1,5 @@ // RUN: cir-tool %s -canonicalize -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -canonicalize -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM - -// FIXME: after rebasing against July's 2022 mlir, we get "failed to legalize -// operation 'cf.br'" from -cir-to-llvm // XFAIL: * module { From 2d542df54adbd912cc3fed92686211a7630ce397 Mon Sep 17 00:00:00 2001 From: Caio Oliveira Date: Sat, 17 Sep 2022 22:18:43 -0700 Subject: [PATCH 0581/2301] [CIR] Add cir::UnaryOp operation For now covering increment/decrement. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 45 ++++++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 27 ++++++++++++ clang/test/CIR/IR/invalid.cir | 23 ++++++++++ 3 files changed, 95 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b2e665e41e27..e6a86f68844b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -546,6 +546,51 @@ def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods; +def UnaryOpKind_Dec : I32EnumAttrCase<"Dec", 2, "dec">; + +def UnaryOpKind : I32EnumAttr< + "UnaryOpKind", + "unary operation kind", + [UnaryOpKind_Inc, + UnaryOpKind_Dec]> { + let cppNamespace = "::mlir::cir"; +} + +// FIXME: NoSideEffect won't work when we add overloading. +def UnaryOp : CIR_Op<"unary", [Pure, SameOperandsAndResultType]> { + let summary = "Unary operations"; + let description = [{ + `cir.unary` performs the unary operation according to + the specified opcode kind: [inc, dec]. + + Note for inc and dec: the operation corresponds only to the + addition/subtraction, its input is expect to come from a load + and the result to be used by a corresponding store. + + It requires one input operand and has one result, both types + should be the same. + + ```mlir + %7 = cir.unary(inc, %1) : i32 -> i32 + %8 = cir.unary(dec, %2) : i32 -> i32 + ``` + }]; + + let results = (outs AnyType:$result); + let arguments = (ins Arg:$kind, Arg:$input); + + let assemblyFormat = [{ + `(` $kind `,` $input `)` `:` type($input) `,` type($result) attr-dict + }]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // BinOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9ec99217d126..ecf70c3982fe 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1291,6 +1291,33 @@ FunctionType CallOp::getCalleeType() { return FunctionType::get(getContext(), getOperandTypes(), getResultTypes()); } +//===----------------------------------------------------------------------===// +// UnaryOp +//===----------------------------------------------------------------------===// + +LogicalResult UnaryOp::verify() { + switch (getKind()) { + case cir::UnaryOpKind::Inc: + LLVM_FALLTHROUGH; + case cir::UnaryOpKind::Dec: { + // TODO: Consider looking at the memory interface instead of LoadOp/StoreOp. + auto loadOp = getInput().getDefiningOp(); + if (!loadOp) + return emitOpError() << "requires input to be defined by a memory load"; + + for (const auto user : getResult().getUsers()) { + auto storeOp = dyn_cast(user); + if (storeOp && storeOp.getAddr() == loadOp.getAddr()) + return success(); + } + return emitOpError() << "requires result to be used by a memory store " + "to the same address as the input memory load"; + } + } + + llvm_unreachable("Unknown UnaryOp kind?"); +} + //===----------------------------------------------------------------------===// // CIR defined traits //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 998da7fa3fb8..6bd947f4ca9e 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -198,3 +198,26 @@ module { module { cir.global "public" internal @v = 3 : i32 // expected-error {{public visibility not allowed with 'internal' linkage}} } + +// ----- + +cir.func @unary0() { + %0 = cir.alloca i32, cir.ptr , ["a", cinit] {alignment = 4 : i64} + %1 = cir.cst(2 : i32) : i32 + + %3 = cir.unary(inc, %1) : i32, i32 // expected-error {{'cir.unary' op requires input to be defined by a memory load}} + cir.store %3, %0 : i32, cir.ptr + cir.return +} + +// ----- + +cir.func @unary1() { + %0 = cir.alloca i32, cir.ptr , ["a", cinit] {alignment = 4 : i64} + %1 = cir.cst(2 : i32) : i32 + cir.store %1, %0 : i32, cir.ptr + + %2 = cir.load %0 : cir.ptr , i32 + %3 = cir.unary(dec, %2) : i32, i32 // // expected-error {{'cir.unary' op requires result to be used by a memory store to the same address as the input memory load}} + cir.return +} From 7bcb46f42bb8dcbd84ad8338b9b8265035f2bf4e Mon Sep 17 00:00:00 2001 From: Caio Oliveira Date: Sun, 4 Sep 2022 00:59:18 -0700 Subject: [PATCH 0582/2301] [CIR][CodeGen] Add basic support for increment/decrement --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 112 ++++++++++++++++++++- clang/test/CIR/CodeGen/inc-dec.cpp | 55 ++++++++++ 2 files changed, 163 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/inc-dec.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 323f7017a843..619d2f63ff3b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -216,16 +216,113 @@ class ScalarExprEmitter : public StmtVisitor { // Unary Operators. mlir::Value VisitUnaryPostDec(const UnaryOperator *E) { - llvm_unreachable("NYI"); + return buildScalarPrePostIncDec(E); } mlir::Value VisitUnaryPostInc(const UnaryOperator *E) { - llvm_unreachable("NYI"); + return buildScalarPrePostIncDec(E); } mlir::Value VisitUnaryPreDec(const UnaryOperator *E) { - llvm_unreachable("NYI"); + return buildScalarPrePostIncDec(E); } mlir::Value VisitUnaryPreInc(const UnaryOperator *E) { - llvm_unreachable("NYI"); + return buildScalarPrePostIncDec(E); + } + mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E) { + QualType type = E->getSubExpr()->getType(); + + auto LV = CGF.buildLValue(E->getSubExpr()); + mlir::Value Value; + mlir::Value Input; + + if (const AtomicType *atomicTy = type->getAs()) { + assert(0 && "no atomics inc/dec yet"); + } else { + Value = buildLoadOfLValue(LV, E->getExprLoc()); + Input = Value; + } + + // NOTE: When possible, more frequent cases are handled first. + + // Special case of integer increment that we have to check first: bool++. + // Due to promotion rules, we get: + // bool++ -> bool = bool + 1 + // -> bool = (int)bool + 1 + // -> bool = ((int)bool + 1 != 0) + // An interesting aspect of this is that increment is always true. + // Decrement does not have this property. + if (E->isIncrementOp() && type->isBooleanType()) { + assert(0 && "inc simplification for booleans not implemented yet"); + + // NOTE: We likely want the code below, but loading/store booleans need to + // work first. See CIRGenFunction::buildFromMemory(). + Value = Builder.create(CGF.getLoc(E->getExprLoc()), + CGF.getCIRType(type), + Builder.getBoolAttr(true)); + } else if (type->isIntegerType()) { + bool canPerformLossyDemotionCheck = false; + if (CGF.getContext().isPromotableIntegerType(type)) { + canPerformLossyDemotionCheck = true; + assert(0 && "no promotable integer inc/dec yet"); + } + + if (CGF.SanOpts.hasOneOf( + SanitizerKind::ImplicitIntegerArithmeticValueChange) && + canPerformLossyDemotionCheck) { + assert(0 && + "perform lossy demotion case for inc/dec not implemented yet"); + } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { + Value = buildIncDecConsiderOverflowBehavior(E, Value); + } else if (E->canOverflow() && type->isUnsignedIntegerType() && + CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { + assert(0 && + "unsigned integer overflow sanitized inc/dec not implemented"); + } else { + auto Kind = E->isIncrementOp() ? mlir::cir::UnaryOpKind::Inc + : mlir::cir::UnaryOpKind::Dec; + Value = buildUnaryOp(E, Kind, Input); + } + } else if (const PointerType *ptr = type->getAs()) { + assert(0 && "no pointer inc/dec yet"); + } else if (type->isVectorType()) { + assert(0 && "no vector inc/dec yet"); + } else if (type->isRealFloatingType()) { + assert(0 && "no float inc/dec yet"); + } else if (type->isFixedPointType()) { + assert(0 && "no fixed point inc/dec yet"); + } else { + assert(type->castAs()); + assert(0 && "no objc pointer type inc/dec yet"); + } + + CIRGenFunction::SourceLocRAIIObject sourceloc{ + CGF, CGF.getLoc(E->getSourceRange())}; + + if (LV.isBitField()) + assert(0 && "no bitfield inc/dec yet"); + else + CGF.buildStoreThroughLValue(RValue::get(Value), LV, nullptr); + + return E->isPrefix() ? Value : Input; + } + + mlir::Value buildIncDecConsiderOverflowBehavior(const UnaryOperator *E, + mlir::Value V) { + switch (CGF.getLangOpts().getSignedOverflowBehavior()) { + case LangOptions::SOB_Defined: { + auto Kind = E->isIncrementOp() ? mlir::cir::UnaryOpKind::Inc + : mlir::cir::UnaryOpKind::Dec; + return buildUnaryOp(E, Kind, V); + break; + } + case LangOptions::SOB_Undefined: + assert(0 && + "inc/dec overflow behavior SOB_Undefined not implemented yet"); + break; + case LangOptions::SOB_Trapping: + assert(0 && "inc/dec overflow behavior SOB_Trapping not implemented yet"); + break; + } + llvm_unreachable("Unknown SignedOverflowBehaviorTy"); } mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { @@ -256,6 +353,13 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } + mlir::Value buildUnaryOp(const UnaryOperator *E, mlir::cir::UnaryOpKind kind, + mlir::Value input) { + return Builder.create( + CGF.getLoc(E->getSourceRange().getBegin()), + CGF.getCIRType(E->getType()), kind, input); + } + // C++ mlir::Value VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { llvm_unreachable("NYI"); diff --git a/clang/test/CIR/CodeGen/inc-dec.cpp b/clang/test/CIR/CodeGen/inc-dec.cpp new file mode 100644 index 000000000000..5c65243a1cb3 --- /dev/null +++ b/clang/test/CIR/CodeGen/inc-dec.cpp @@ -0,0 +1,55 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +unsigned id0() { + unsigned a = 1; + return ++a; +} + +// CHECK: cir.func @_Z3id0v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval", uninitialized] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", cinit] +// CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] +// CHECK: %[[#AFTER_A:]] = cir.unary(inc, %[[#BEFORE_A]]) +// CHECK: cir.store %[[#AFTER_A]], %[[#A]] +// CHECK: cir.store %[[#AFTER_A]], %[[#RET]] + + +unsigned id1() { + unsigned a = 1; + return --a; +} + +// CHECK: cir.func @_Z3id1v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval", uninitialized] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", cinit] +// CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] +// CHECK: %[[#AFTER_A:]] = cir.unary(dec, %[[#BEFORE_A]]) +// CHECK: cir.store %[[#AFTER_A]], %[[#A]] +// CHECK: cir.store %[[#AFTER_A]], %[[#RET]] + +unsigned id2() { + unsigned a = 1; + return a++; +} + +// CHECK: cir.func @_Z3id2v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval", uninitialized] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", cinit] +// CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] +// CHECK: %[[#AFTER_A:]] = cir.unary(inc, %[[#BEFORE_A]]) +// CHECK: cir.store %[[#AFTER_A]], %[[#A]] +// CHECK: cir.store %[[#BEFORE_A]], %[[#RET]] + +unsigned id3() { + unsigned a = 1; + return a--; +} + +// CHECK: cir.func @_Z3id3v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval", uninitialized] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", cinit] +// CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] +// CHECK: %[[#AFTER_A:]] = cir.unary(dec, %[[#BEFORE_A]]) +// CHECK: cir.store %[[#AFTER_A]], %[[#A]] +// CHECK: cir.store %[[#BEFORE_A]], %[[#RET]] From 5cf07257b2731ed2c6fcc2001782b383e55d0d2f Mon Sep 17 00:00:00 2001 From: Caio Oliveira Date: Thu, 22 Sep 2022 23:38:16 -0700 Subject: [PATCH 0583/2301] [CIR][CodeGen] Basic lowering of increment/decrement --- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 35 ++++++++++++++++++++-- clang/test/CIR/CIRToLLVM/unary-inc-dec.cir | 30 +++++++++++++++++++ 2 files changed, 63 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CIRToLLVM/unary-inc-dec.cir diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index b89d82019d03..c69aba717743 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -225,6 +225,37 @@ class CIRFuncLowering : public mlir::OpRewritePattern { } }; +class CIRUnaryOpLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::UnaryOp op, + mlir::PatternRewriter &rewriter) const override { + mlir::Type type = op.getInput().getType(); + assert(type.isa() && "operand type not supported yet"); + + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Inc: { + auto One = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); + rewriter.replaceOpWithNewOp(op, op.getType(), + op.getInput(), One); + break; + } + case mlir::cir::UnaryOpKind::Dec: { + auto One = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); + rewriter.replaceOpWithNewOp(op, op.getType(), + op.getInput(), One); + break; + } + } + + return mlir::LogicalResult::success(); + } +}; + class CIRBinOpLowering : public mlir::OpRewritePattern { public: using OpRewritePattern::OpRewritePattern; @@ -479,8 +510,8 @@ class CIRBrOpLowering : public mlir::OpRewritePattern { void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { patterns.add(patterns.getContext()); + CIRConstantLowering, CIRUnaryOpLowering, CIRBinOpLowering, + CIRCmpOpLowering, CIRBrOpLowering>(patterns.getContext()); } void ConvertCIRToLLVMPass::runOnOperation() { diff --git a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir new file mode 100644 index 000000000000..319bdb0a37c2 --- /dev/null +++ b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir @@ -0,0 +1,30 @@ +// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// XFAIL: * + +module { + cir.func @foo() { + %0 = cir.alloca i32, cir.ptr , ["a", cinit] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} + %2 = cir.cst(2 : i32) : i32 + cir.store %2, %0 : i32, cir.ptr + cir.store %2, %1 : i32, cir.ptr + + %3 = cir.load %0 : cir.ptr , i32 + %4 = cir.unary(inc, %3) : i32, i32 + cir.store %4, %0 : i32, cir.ptr + + %5 = cir.load %1 : cir.ptr , i32 + %6 = cir.unary(dec, %5) : i32, i32 + cir.store %6, %1 : i32, cir.ptr + cir.return + } +} + +// MLIR: = arith.constant 1 +// MLIR: = arith.addi +// MLIR: = arith.constant 1 +// MLIR: = arith.subi + +// LLVM: = add i32 %[[#]], 1 +// LLVM: = sub i32 %[[#]], 1 From 5c921121a14b7008f3b0e905829f2233f8ba514c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Sep 2022 23:00:03 -0700 Subject: [PATCH 0584/2301] [CIR][Driver] Rename a bunch of options Make the options more uniform, easy to search, guess and use. Add one more test --- clang/include/clang/Driver/Options.td | 10 +++++----- clang/include/clang/Frontend/FrontendOptions.h | 8 ++++---- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 6 +++--- clang/lib/Driver/ToolChains/Clang.cpp | 4 ++-- clang/lib/Frontend/CompilerInvocation.cpp | 8 ++++---- clang/test/CIR/driver.c | 3 ++- 6 files changed, 20 insertions(+), 19 deletions(-) diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 9d31355b4e39..aa26f6dce5c8 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3055,14 +3055,14 @@ def flto_EQ : Joined<["-"], "flto=">, HelpText<"Set LTO mode">, Values<"thin,full">; def flto_EQ_jobserver : Flag<["-"], "flto=jobserver">, Visibility<[ClangOption, FlangOption]>, Group, Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; -def disable_cir_passes : Flag<["-"], "disable-cir-passes">, +def clangir_disable_passes : Flag<["-"], "clangir-disable-passes">, Visibility<[ClangOption, CC1Option]>, HelpText<"Disable CIR transformations pipeline">, - MarshallingInfoFlag>; -def disable_cir_verifier : Flag<["-"], "disable-cir-verifier">, + MarshallingInfoFlag>; +def clangir_disable_verifier : Flag<["-"], "clangir-disable-verifier">, Visibility<[ClangOption, CC1Option]>, - HelpText<"Disable CIR module verifier">, - MarshallingInfoFlag>; + HelpText<"ClangIR: Disable MLIR module verifier">, + MarshallingInfoFlag>; def flto_EQ_auto : Flag<["-"], "flto=auto">, Visibility<[ClangOption, FlangOption]>, Group, Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; def flto : Flag<["-"], "flto">, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 44ccfef42226..16c1d69ff9fd 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -419,10 +419,10 @@ class FrontendOptions { unsigned UseClangIRPipeline : 1; /// Disable Clang IR specific (CIR) passes - unsigned DisableCIRPasses : 1; + unsigned ClangIRDisablePasses : 1; /// Disable Clang IR (CIR) verifier - unsigned DisableCIRVerifier : 1; + unsigned ClangIRDisableCIRVerifier : 1; CodeCompleteOptions CodeCompleteOpts; @@ -611,8 +611,8 @@ class FrontendOptions { EmitSymbolGraph(false), EmitExtensionSymbolGraphs(false), EmitSymbolGraphSymbolLabelsForTesting(false), EmitPrettySymbolGraphs(false), GenReducedBMI(false), - UseClangIRPipeline(false), DisableCIRPasses(false), - DisableCIRVerifier(false), TimeTraceGranularity(500), + UseClangIRPipeline(false), ClangIRDisablePasses(false), + ClangIRDisableCIRVerifier(false), TimeTraceGranularity(500), TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 06beb0626d6a..f716e60e5e11 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -133,7 +133,7 @@ class CIRGenConsumer : public clang::ASTConsumer { // global codegen, followed by running CIR passes. gen->HandleTranslationUnit(C); - if (!feOptions.DisableCIRVerifier) + if (!feOptions.ClangIRDisableCIRVerifier) if (!gen->verifyModule()) { llvm::report_fatal_error( "CIR codegen: module verification error before running CIR passes"); @@ -149,9 +149,9 @@ class CIRGenConsumer : public clang::ASTConsumer { // Run CIR cleanup, in the future also the relevent raising and // some code analysis. - if (!feOptions.DisableCIRPasses) { + if (!feOptions.ClangIRDisablePasses) { runCIRToCIRPasses(mlirMod, mlirCtx.get(), - !feOptions.DisableCIRVerifier); + !feOptions.ClangIRDisableCIRVerifier); } // Emit remaining defaulted C++ methods diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index d3329609c9d6..17967893aac4 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5244,8 +5244,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasArg(options::OPT_fclangir) || Args.hasArg(options::OPT_emit_cir)) CmdArgs.push_back("-fclangir"); - if (Args.hasArg(options::OPT_disable_cir_passes)) - CmdArgs.push_back("-disable-cir-passes"); + if (Args.hasArg(options::OPT_clangir_disable_passes)) + CmdArgs.push_back("-clangir-disable-passes"); if (IsOpenMPDevice) { // We have to pass the triple of the host if compiling for an OpenMP device. diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 130c91a7afce..bd3f195d39e8 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3090,11 +3090,11 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diags.Report(diag::err_drv_argument_only_allowed_with) << "-fsystem-module" << "-emit-module"; - if (Args.hasArg(OPT_disable_cir_passes)) - Opts.DisableCIRPasses = true; + if (Args.hasArg(OPT_clangir_disable_passes)) + Opts.ClangIRDisablePasses = true; - if (Args.hasArg(OPT_disable_cir_verifier)) - Opts.DisableCIRVerifier = true; + if (Args.hasArg(OPT_clangir_disable_verifier)) + Opts.ClangIRDisableCIRVerifier = true; if (Args.hasArg(OPT_aux_target_cpu)) Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu)); diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index e755b130422a..85d6fe31f7e6 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -4,7 +4,8 @@ // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -c %s -o %t.o // RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ -// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -disable-cir-passes -S -emit-cir %s -o %t.cir +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-passes -S -emit-cir %s -o %t.cir +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-verifier -S -emit-cir %s -o %t.cir // RUN: %clang -target arm64-apple-macosx12.0.0 -fclangir -S -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR // XFAIL: * From 7668300cfd00b43353ba65013c42abf25fd9ebfe Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 29 Sep 2022 10:36:05 -0700 Subject: [PATCH 0585/2301] [CIR][Driver] Add -fclangir-lifetime-check options - Add cc1 options to the driver - Hook it up as part of CIRGenAction - Teach runCIRToCIRPasses how to run lifetime pass - Sanitize pass options, also handle error handling for malformed ones - Add -clangir-verify-diagnostics to test source code when using clang - Update lifetime checks to also test clang for diagnostic --- .../clang/Basic/DiagnosticDriverKinds.td | 4 +- clang/include/clang/CIR/CIRToCIRPasses.h | 7 +- clang/include/clang/Driver/Options.td | 13 ++++ .../include/clang/Frontend/FrontendOptions.h | 12 ++- clang/lib/CIR/CodeGen/CIRPasses.cpp | 31 ++++++-- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 74 +++++++++++++++++-- clang/lib/Frontend/CompilerInvocation.cpp | 9 +++ .../CIR/Transforms/lifetime-check-remarks.cpp | 1 + clang/test/CIR/Transforms/lifetime-check.cpp | 1 + .../Transforms/lifetime-invalid-option.cpp | 3 + .../CIR/Transforms/lifetime-loop-valid.cpp | 1 + clang/test/CIR/Transforms/lifetime-loop.cpp | 1 + clang/test/CIR/Transforms/lifetime-switch.cpp | 1 + 13 files changed, 140 insertions(+), 18 deletions(-) create mode 100644 clang/test/CIR/Transforms/lifetime-invalid-option.cpp diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td index 9fc98fc29b34..f889d857a9ae 100644 --- a/clang/include/clang/Basic/DiagnosticDriverKinds.td +++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td @@ -355,8 +355,8 @@ def err_drv_incompatible_omp_arch : Error<"OpenMP target architecture '%0' point def err_drv_omp_host_ir_file_not_found : Error< "provided host compiler IR file '%0' is required to generate code for OpenMP " "target regions but cannot be found">; -def err_drv_cir_multiple_input : Error< - "clangir (cir) generation requires exactly one input source file">; +def err_drv_cir_pass_opt_parsing : Error< + "clangir pass option '%0' not recognized">; def err_drv_omp_host_target_not_supported : Error< "target '%0' is not a supported OpenMP host target">; def err_drv_expecting_fopenmp_with_fopenmp_targets : Error< diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index 6bf0664553a2..dff718bc0706 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -24,8 +24,11 @@ class ModuleOp; namespace cir { // Run set of cleanup/prepare/etc passes CIR <-> CIR. -void runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - bool enableVerifier); +mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, + mlir::MLIRContext *mlirCtx, + bool enableVerifier, bool enableLifetime, + llvm::StringRef lifetimeOpts, + bool &passOptParsingFailure); } // namespace cir #endif // CLANG_CIR_CIRTOCIRPASSES_H_ diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index aa26f6dce5c8..e0c507e6ff7f 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3065,6 +3065,19 @@ def clangir_disable_verifier : Flag<["-"], "clangir-disable-verifier">, MarshallingInfoFlag>; def flto_EQ_auto : Flag<["-"], "flto=auto">, Visibility<[ClangOption, FlangOption]>, Group, Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; +def clangir_verify_diagnostics : Flag<["-"], "clangir-verify-diagnostics">, + Visibility<[ClangOption, CC1Option]>, + HelpText<"ClangIR: Enable diagnostic verification in MLIR, similar to clang's -verify">, + MarshallingInfoFlag>; +def fclangir_lifetime_check_EQ : Joined<["-"], "fclangir-lifetime-check=">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"Run lifetime checker">, + MarshallingInfoString>; +def fclangir_lifetime_check : Flag<["-"], "fclangir-lifetime-check">, + Visibility<[ClangOption, CC1Option]>, Group, + Alias, AliasArgs<["history=invalid,null"]>, + HelpText<"Run lifetime checker">; + def flto : Flag<["-"], "flto">, Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>, Group, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 16c1d69ff9fd..eb9516bc86e0 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -424,6 +424,12 @@ class FrontendOptions { /// Disable Clang IR (CIR) verifier unsigned ClangIRDisableCIRVerifier : 1; + /// Enable diagnostic verification for CIR + unsigned ClangIRVerifyDiags : 1; + + // Enable Clang IR based lifetime check + unsigned ClangIRLifetimeCheck : 1; + CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. @@ -495,6 +501,8 @@ class FrontendOptions { std::string MTMigrateDir; std::string ARCMTMigrateReportOut; + std::string ClangIRLifetimeCheckOpts; + /// The input kind, either specified via -x argument or deduced from the input /// file name. InputKind DashX; @@ -612,8 +620,8 @@ class FrontendOptions { EmitSymbolGraphSymbolLabelsForTesting(false), EmitPrettySymbolGraphs(false), GenReducedBMI(false), UseClangIRPipeline(false), ClangIRDisablePasses(false), - ClangIRDisableCIRVerifier(false), TimeTraceGranularity(500), - TimeTraceVerbose(false) {} + ClangIRDisableCIRVerifier(false), ClangIRLifetimeCheck(false), + TimeTraceGranularity(500), TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file /// extension. For example, "c" would return Language::C. diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 3447da22d3cf..b91d590bc15c 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -15,17 +15,36 @@ #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" +#include "mlir/Support/LogicalResult.h" namespace cir { -void runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - bool enableVerifier) { +mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, + mlir::MLIRContext *mlirCtx, + bool enableVerifier, bool enableLifetime, + llvm::StringRef lifetimeOpts, + bool &passOptParsingFailure) { mlir::PassManager pm(mlirCtx); + passOptParsingFailure = false; + pm.addPass(mlir::createMergeCleanupsPass()); + + // TODO(CIR): Make this actually propagate errors correctly. This is stubbed + // in to get rebases going. + auto errorHandler = [](const llvm::Twine &) -> mlir::LogicalResult { + return mlir::LogicalResult::failure(); + }; + + if (enableLifetime) { + auto lifetimePass = mlir::createLifetimeCheckPass(); + if (lifetimePass->initializeOptions(lifetimeOpts, errorHandler).failed()) { + passOptParsingFailure = true; + return mlir::failure(); + } + pm.addPass(std::move(lifetimePass)); + } + pm.enableVerifier(enableVerifier); - auto result = !mlir::failed(pm.run(theModule)); - if (!result) - llvm::report_fatal_error( - "CIR codegen: MLIR pass manager fails when running CIR passes!"); + return pm.run(theModule); } } // namespace cir diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index f716e60e5e11..de6c5a4b1c26 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -46,6 +46,7 @@ #include "llvm/Linker/Linker.h" #include "llvm/Pass.h" #include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/Signals.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/TimeProfiler.h" #include "llvm/Support/Timer.h" @@ -58,6 +59,17 @@ using namespace cir; using namespace clang; +static std::string sanitizePassOptions(llvm::StringRef o) { + std::string opts{o}; + // MLIR pass options are space separated, but we use ';' in clang since + // space aren't well supported, switch it back. + for (unsigned i = 0, e = opts.size(); i < e; ++i) + if (opts[i] == ';') + opts[i] = ' '; + // If arguments are surrounded with '"', trim them off + return llvm::StringRef(opts).trim('"').str(); +} + namespace cir { class CIRGenConsumer : public clang::ASTConsumer { @@ -131,8 +143,8 @@ class CIRGenConsumer : public clang::ASTConsumer { // Note that this method is called after `HandleTopLevelDecl` has already // ran all over the top level decls. Here clang mostly wraps defered and // global codegen, followed by running CIR passes. - gen->HandleTranslationUnit(C); + if (!feOptions.ClangIRDisableCIRVerifier) if (!gen->verifyModule()) { llvm::report_fatal_error( @@ -143,15 +155,65 @@ class CIRGenConsumer : public clang::ASTConsumer { auto mlirMod = gen->getModule(); auto mlirCtx = gen->takeContext(); + auto setupCIRPipelineAndExecute = [&] { + // Sanitize passes options. MLIR uses spaces between pass options + // and since that's hard to fly in clang, we currently use ';'. + std::string lifetimeOpts; + if (feOptions.ClangIRLifetimeCheck) + lifetimeOpts = sanitizePassOptions(feOptions.ClangIRLifetimeCheckOpts); + + // Setup and run CIR pipeline. + bool passOptParsingFailure = false; + if (runCIRToCIRPasses(mlirMod, mlirCtx.get(), + !feOptions.ClangIRDisableCIRVerifier, + feOptions.ClangIRLifetimeCheck, lifetimeOpts, + passOptParsingFailure) + .failed()) { + if (passOptParsingFailure) + diagnosticsEngine.Report(diag::err_drv_cir_pass_opt_parsing) + << feOptions.ClangIRLifetimeCheckOpts; + else + llvm::report_fatal_error("CIR codegen: MLIR pass manager fails " + "when running CIR passes!"); + return; + } + }; + switch (action) { case CIRGenAction::OutputType::EmitCIR: if (outputStream && mlirMod) { - - // Run CIR cleanup, in the future also the relevent raising and - // some code analysis. if (!feOptions.ClangIRDisablePasses) { - runCIRToCIRPasses(mlirMod, mlirCtx.get(), - !feOptions.ClangIRDisableCIRVerifier); + // Handle source manager properly given that lifetime analysis + // might emit warnings and remarks. + auto &clangSourceMgr = C.getSourceManager(); + FileID MainFileID = clangSourceMgr.getMainFileID(); + + std::unique_ptr FileBuf = + llvm::MemoryBuffer::getMemBuffer( + clangSourceMgr.getBufferOrFake(MainFileID)); + + llvm::SourceMgr mlirSourceMgr; + mlirSourceMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); + + if (feOptions.ClangIRVerifyDiags) { + mlir::SourceMgrDiagnosticVerifierHandler sourceMgrHandler( + mlirSourceMgr, mlirCtx.get()); + mlirCtx->printOpOnDiagnostic(false); + setupCIRPipelineAndExecute(); + + // Verify the diagnostic handler to make sure that each of the + // diagnostics matched. + if (sourceMgrHandler.verify().failed()) { + // FIXME: we fail ungracefully, there's probably a better way + // to communicate non-zero return so tests can actually fail. + llvm::sys::RunInterruptHandlers(); + exit(1); + } + } else { + mlir::SourceMgrDiagnosticHandler sourceMgrHandler(mlirSourceMgr, + mlirCtx.get()); + setupCIRPipelineAndExecute(); + } } // Emit remaining defaulted C++ methods diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index bd3f195d39e8..0e7ac7ae0ffc 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3096,6 +3096,15 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Args.hasArg(OPT_clangir_disable_verifier)) Opts.ClangIRDisableCIRVerifier = true; + if (Args.hasArg(OPT_clangir_verify_diagnostics)) + Opts.ClangIRVerifyDiags = true; + + if (const Arg *A = Args.getLastArg(OPT_fclangir_lifetime_check, + OPT_fclangir_lifetime_check_EQ)) { + Opts.ClangIRLifetimeCheck = true; + Opts.ClangIRLifetimeCheckOpts = A->getValue(); + } + if (Args.hasArg(OPT_aux_target_cpu)) Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu)); if (Args.hasArg(OPT_aux_target_feature)) diff --git a/clang/test/CIR/Transforms/lifetime-check-remarks.cpp b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp index deecf9bbd6a5..7c1b1043302a 100644 --- a/clang/test/CIR/Transforms/lifetime-check-remarks.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: cir-tool %t.cir -cir-lifetime-check="remarks=pset-invalid" -verify-diagnostics -o %t-out.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="remarks=pset-invalid" -clangir-verify-diagnostics -emit-cir %s -o %t.cir // XFAIL: * int *p0() { diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index f0831159232b..1c84f736ccff 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null" -verify-diagnostics -o %t-out.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null" -clangir-verify-diagnostics -emit-cir %s -o %t.cir // XFAIL: * int *p0() { diff --git a/clang/test/CIR/Transforms/lifetime-invalid-option.cpp b/clang/test/CIR/Transforms/lifetime-invalid-option.cpp new file mode 100644 index 000000000000..64486b6aa166 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-invalid-option.cpp @@ -0,0 +1,3 @@ +// RUN: not %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="yolo=invalid,null" -emit-cir %s -o - 2>&1 | FileCheck %s + +// CHECK: clangir pass option 'yolo=invalid,null' not recognized \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lifetime-loop-valid.cpp b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp index ea1269a6b49b..95ce73ac758a 100644 --- a/clang/test/CIR/Transforms/lifetime-loop-valid.cpp +++ b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null remarks=pset-always" -verify-diagnostics -o %t-out.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null;remarks=pset-always" -clangir-verify-diagnostics -emit-cir %s -o %t.cir // XFAIL: * // diff --git a/clang/test/CIR/Transforms/lifetime-loop.cpp b/clang/test/CIR/Transforms/lifetime-loop.cpp index 1e615c3e0ce8..dd9ee7140f99 100644 --- a/clang/test/CIR/Transforms/lifetime-loop.cpp +++ b/clang/test/CIR/Transforms/lifetime-loop.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null remarks=pset-invalid" -verify-diagnostics -o %t-out.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null;remarks=pset-invalid" -clangir-verify-diagnostics -emit-cir %s -o %t.cir // XFAIL: * void loop_basic_for() { diff --git a/clang/test/CIR/Transforms/lifetime-switch.cpp b/clang/test/CIR/Transforms/lifetime-switch.cpp index 15e91cc76a6d..597eb174a13e 100644 --- a/clang/test/CIR/Transforms/lifetime-switch.cpp +++ b/clang/test/CIR/Transforms/lifetime-switch.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null" -verify-diagnostics -o %t-out.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null" -clangir-verify-diagnostics -emit-cir %s -o %t.cir // XFAIL: * void s0(int b) { From b0f141108d39664dd0563ce76fa5a409acbc70c6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 4 Oct 2022 11:25:54 -0700 Subject: [PATCH 0586/2301] [CIR][LifetimeCheck] Thread in clang::ASTContext to lifetime pass --- clang/include/clang/CIR/CIRToCIRPasses.h | 5 +++++ clang/include/clang/CIR/Dialect/Passes.h | 4 ++++ clang/lib/CIR/CodeGen/CIRPasses.cpp | 4 +++- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 12 ++++++++++-- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index dff718bc0706..06d928e5cf15 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -16,6 +16,10 @@ #include +namespace clang { +class ASTContext; +} + namespace mlir { class MLIRContext; class ModuleOp; @@ -26,6 +30,7 @@ namespace cir { // Run set of cleanup/prepare/etc passes CIR <-> CIR. mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, llvm::StringRef lifetimeOpts, bool &passOptParsingFailure); diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index 8aa3e6c71b3a..6db6d4b0b301 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -15,9 +15,13 @@ #include "mlir/Pass/Pass.h" +namespace clang { +class ASTContext; +} namespace mlir { std::unique_ptr createLifetimeCheckPass(); +std::unique_ptr createLifetimeCheckPass(clang::ASTContext *astCtx); std::unique_ptr createMergeCleanupsPass(); //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index b91d590bc15c..28bde345df13 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "clang/AST/ASTContext.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" @@ -20,6 +21,7 @@ namespace cir { mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, llvm::StringRef lifetimeOpts, bool &passOptParsingFailure) { @@ -35,7 +37,7 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, }; if (enableLifetime) { - auto lifetimePass = mlir::createLifetimeCheckPass(); + auto lifetimePass = mlir::createLifetimeCheckPass(&astCtx); if (lifetimePass->initializeOptions(lifetimeOpts, errorHandler).failed()) { passOptParsingFailure = true; return mlir::failure(); diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 28415835506a..6953b40e18b8 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -8,11 +8,10 @@ #include "PassDetail.h" +#include "clang/AST/ASTContext.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "mlir/Dialect/Func/IR/FuncOps.h" - #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SmallSet.h" @@ -207,6 +206,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { PMapType *currPmap = nullptr; PMapType &getPmap() { return *currPmap; } + std::optional astCtx; + void setASTContext(clang::ASTContext *c) { astCtx = c; } + void joinPmaps(SmallVectorImpl &pmaps); void printPset(PSetType &pset, llvm::raw_ostream &OS = llvm::errs()); void dumpPmap(PMapType &pmap); @@ -731,6 +733,12 @@ std::unique_ptr mlir::createLifetimeCheckPass() { return std::make_unique(); } +std::unique_ptr mlir::createLifetimeCheckPass(clang::ASTContext *astCtx) { + auto lifetime = std::make_unique(); + lifetime->setASTContext(astCtx); + return std::move(lifetime); +} + //===----------------------------------------------------------------------===// // Dump & print helpers //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index de6c5a4b1c26..f56f07545888 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -164,7 +164,7 @@ class CIRGenConsumer : public clang::ASTConsumer { // Setup and run CIR pipeline. bool passOptParsingFailure = false; - if (runCIRToCIRPasses(mlirMod, mlirCtx.get(), + if (runCIRToCIRPasses(mlirMod, mlirCtx.get(), C, !feOptions.ClangIRDisableCIRVerifier, feOptions.ClangIRLifetimeCheck, lifetimeOpts, passOptParsingFailure) From 159013d783aa816c43f73c6f8c56311d0f312105 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 4 Oct 2022 18:10:38 -0700 Subject: [PATCH 0587/2301] [CIR] Remove lifetime checks from cir-tool - lifetime now depends on AST --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 3 +++ clang/test/CIR/Transforms/lifetime-check-remarks.cpp | 3 --- clang/test/CIR/Transforms/lifetime-check.cpp | 3 --- clang/test/CIR/Transforms/lifetime-loop-valid.cpp | 4 ---- clang/test/CIR/Transforms/lifetime-loop.cpp | 3 --- clang/test/CIR/Transforms/lifetime-switch.cpp | 3 --- clang/tools/cir-tool/cir-tool.cpp | 3 --- 7 files changed, 3 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 6953b40e18b8..62e9608e4dd2 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -730,6 +730,9 @@ void LifetimeCheckPass::runOnOperation() { } std::unique_ptr mlir::createLifetimeCheckPass() { + // FIXME: MLIR requres a default "constructor", but should never + // be used. + llvm_unreachable("Check requires clang::ASTContext, use the other ctor"); return std::make_unique(); } diff --git a/clang/test/CIR/Transforms/lifetime-check-remarks.cpp b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp index 7c1b1043302a..83cef25c54da 100644 --- a/clang/test/CIR/Transforms/lifetime-check-remarks.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-remarks.cpp @@ -1,7 +1,4 @@ -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: cir-tool %t.cir -cir-lifetime-check="remarks=pset-invalid" -verify-diagnostics -o %t-out.cir // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="remarks=pset-invalid" -clangir-verify-diagnostics -emit-cir %s -o %t.cir -// XFAIL: * int *p0() { int *p = nullptr; diff --git a/clang/test/CIR/Transforms/lifetime-check.cpp b/clang/test/CIR/Transforms/lifetime-check.cpp index 1c84f736ccff..017de9f6495d 100644 --- a/clang/test/CIR/Transforms/lifetime-check.cpp +++ b/clang/test/CIR/Transforms/lifetime-check.cpp @@ -1,7 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null" -verify-diagnostics -o %t-out.cir // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null" -clangir-verify-diagnostics -emit-cir %s -o %t.cir -// XFAIL: * int *p0() { int *p = nullptr; diff --git a/clang/test/CIR/Transforms/lifetime-loop-valid.cpp b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp index 95ce73ac758a..e7ee7aca7cf3 100644 --- a/clang/test/CIR/Transforms/lifetime-loop-valid.cpp +++ b/clang/test/CIR/Transforms/lifetime-loop-valid.cpp @@ -1,9 +1,5 @@ -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null remarks=pset-always" -verify-diagnostics -o %t-out.cir // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null;remarks=pset-always" -clangir-verify-diagnostics -emit-cir %s -o %t.cir -// XFAIL: * -// // Loops that do not change psets // p1179r1: 2.4.9.1 diff --git a/clang/test/CIR/Transforms/lifetime-loop.cpp b/clang/test/CIR/Transforms/lifetime-loop.cpp index dd9ee7140f99..cf58ddf48f73 100644 --- a/clang/test/CIR/Transforms/lifetime-loop.cpp +++ b/clang/test/CIR/Transforms/lifetime-loop.cpp @@ -1,7 +1,4 @@ -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null remarks=pset-invalid" -verify-diagnostics -o %t-out.cir // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null;remarks=pset-invalid" -clangir-verify-diagnostics -emit-cir %s -o %t.cir -// XFAIL: * void loop_basic_for() { int *p = nullptr; // expected-note {{invalidated here}} diff --git a/clang/test/CIR/Transforms/lifetime-switch.cpp b/clang/test/CIR/Transforms/lifetime-switch.cpp index 597eb174a13e..ca56b95f71a0 100644 --- a/clang/test/CIR/Transforms/lifetime-switch.cpp +++ b/clang/test/CIR/Transforms/lifetime-switch.cpp @@ -1,7 +1,4 @@ -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: cir-tool %t.cir -cir-lifetime-check="history=invalid,null" -verify-diagnostics -o %t-out.cir // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=invalid,null" -clangir-verify-diagnostics -emit-cir %s -o %t.cir -// XFAIL: * void s0(int b) { int *p = nullptr; diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index cf3818a2f289..2e23c06d648d 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -39,9 +39,6 @@ int main(int argc, char **argv) { ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertCIRToMemRefPass(); }); - ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { - return mlir::createLifetimeCheckPass(); - }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return mlir::createMergeCleanupsPass(); }); From 0a4dd1dcd34d0e4e012a903900c7038f2229d48c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Oct 2022 17:06:23 -0700 Subject: [PATCH 0588/2301] [CIR] Add ASTVarDeclAttr to hold clang::VarDecl on AllocaOp's This doesn't hook up any AST nodes with operations just yet. --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.h | 1 + .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 7 +-- clang/include/clang/CIR/Dialect/IR/CIROps.td | 13 +++++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 1 - clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 43 +++++++++++++++++++ 5 files changed, 59 insertions(+), 6 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h index bb9b425b2465..a5792b6438aa 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -25,6 +25,7 @@ namespace clang { class FunctionDecl; +class VarDecl; } #define GET_ATTRDEF_CLASSES diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 2c44680948c6..eafc66cdaf74 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -83,8 +83,8 @@ def CstArrayAttr : CIR_Attr<"CstArray", "cst_array", [TypedAttrInterface]> { // AST Wrappers //===----------------------------------------------------------------------===// -class ASTDecl traits = []> - : CIR_Attr { +class ASTDecl traits = []> + : CIR_Attr { string clang_name = !strconcat("clang::", name); let summary = !strconcat("Wraps a ", clang_name, " AST node."); @@ -104,6 +104,7 @@ class ASTDecl traits = []> let genVerifyDecl = 1; } -def ASTFunctionDeclAttr : ASTDecl<"FunctionDecl">; +def ASTFunctionDeclAttr : ASTDecl<"FunctionDecl", "function.decl">; +def ASTVarDeclAttr : ASTDecl<"VarDecl", "var.decl">; #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e6a86f68844b..6c20be267069 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -228,12 +228,20 @@ def AllocaOp : CIR_Op<"alloca", [ StrAttr:$name, // FIXME: add "uninitialzed" as default mode Arg:$init, - ConfinedAttr, [IntMinValue<0>]>:$alignment + ConfinedAttr, [IntMinValue<0>]>:$alignment, + OptionalAttr:$ast ); let results = (outs Res]>:$addr); + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins "Type":$addr, "Type":$allocaType, + "StringRef":$name, "mlir::cir::InitStyle":$init, + "IntegerAttr":$alignment)> + ]; + let extraClassDeclaration = [{ // Whether the alloca input type is a pointer. bool isPointerType() { return getAllocaType().isa<::mlir::cir::PointerType>(); } @@ -242,7 +250,8 @@ def AllocaOp : CIR_Op<"alloca", [ // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. let assemblyFormat = [{ - $allocaType `,` `cir.ptr` type($addr) `,` `[` $name `,` $init `]` attr-dict + $allocaType `,` `cir.ptr` type($addr) `,` `[` $name `,` $init `]` + (`ast` $ast^)? attr-dict }]; let hasVerifier = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index f5b1e423acc0..b546716cd7c2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1566,7 +1566,6 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, // of the block. builder.setInsertionPointToStart(insertBlock); } - addr = builder.create(loc, /*addr type*/ localVarPtrTy, /*var type*/ ty, name, initStyle, alignIntAttr); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index ecf70c3982fe..aaac4f274714 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -108,6 +108,28 @@ static RetTy parseOptionalCIRKeyword(OpAsmParser &parser, return static_cast(index); } +//===----------------------------------------------------------------------===// +// AllocaOp +//===----------------------------------------------------------------------===// + +void AllocaOp::build(::mlir::OpBuilder &odsBuilder, + ::mlir::OperationState &odsState, ::mlir::Type addr, + ::mlir::Type allocaType, ::llvm::StringRef name, + ::mlir::cir::InitStyle init, + ::mlir::IntegerAttr alignment) { + odsState.addAttribute(getAllocaTypeAttrName(odsState.name), + ::mlir::TypeAttr::get(allocaType)); + odsState.addAttribute(getNameAttrName(odsState.name), + odsBuilder.getStringAttr(name)); + odsState.addAttribute( + getInitAttrName(odsState.name), + ::mlir::cir::InitStyleAttr::get(odsBuilder.getContext(), init)); + if (alignment) { + odsState.addAttribute(getAlignmentAttrName(odsState.name), alignment); + } + odsState.addTypes(addr); +} + //===----------------------------------------------------------------------===// // ConstantOp //===----------------------------------------------------------------------===// @@ -1468,6 +1490,27 @@ LogicalResult ASTFunctionDeclAttr::verify( return success(); } +::mlir::Attribute ASTVarDeclAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { + // We cannot really parse anything AST related at this point + // since we have no serialization/JSON story. + return mlir::Attribute(); +} + +void ASTVarDeclAttr::print(::mlir::AsmPrinter &printer) const { + // Nothing to print besides the mnemonics. +} + +LogicalResult ASTVarDeclAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::clang::VarDecl *decl) { + if (!decl) { + emitError() << "expected non-null AST declaration"; + return failure(); + } + return success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From 49a654eafe343dbb2062ed6bbcc4f24e96cbafd6 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 17 Dec 2023 12:24:55 -0800 Subject: [PATCH 0589/2301] [CIR][Dialect] Link against LoopLikeInterface --- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index fd1be8998647..7bab60b4606f 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -13,5 +13,6 @@ add_clang_library(MLIRCIR MLIRIR MLIRFuncDialect MLIRLLVMDialect + MLIRLoopLikeInterface MLIRSideEffectInterfaces ) From da93f471a105f6327cf3a416b347204b49737b0c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Oct 2022 17:42:46 -0700 Subject: [PATCH 0590/2301] [CIR][CodeGen][NFC] Track current vardecl by setting up a RAII context This doesn't change any funcionality but is a step towards setting up initialization on Alloca's for struct/class types --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 4 +++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 17 ++++++------- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 28 +++++++++++++++++----- 4 files changed, 34 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 756d542ae6a5..9eb64c2cbc8f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -205,6 +205,8 @@ void CIRGenFunction::buildVarDecl(const VarDecl &D) { assert(0 && "not implemented"); assert(D.hasLocalStorage()); + + CIRGenFunction::VarDeclContext varDeclCtx{*this, &D}; return buildAutoVarDecl(D); } @@ -213,7 +215,7 @@ void CIRGenFunction::buildScalarInit(const Expr *init, const ValueDecl *D, // TODO: this is where a lot of ObjC lifetime stuff would be done. mlir::Value value = buildScalarExpr(init); SourceLocRAIIObject Loc{*this, getLoc(D->getSourceRange())}; - buildStoreThroughLValue(RValue::get(value), lvalue, D); + buildStoreThroughLValue(RValue::get(value), lvalue); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index b546716cd7c2..ae782ed74346 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -204,17 +204,15 @@ mlir::Value CIRGenFunction::buildToMemory(mlir::Value Value, QualType Ty) { return Value; } -void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, - const Decl *InitDecl) { +void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue) { // TODO: constant matrix type, volatile, non temporal, TBAA buildStoreOfScalar(value, lvalue.getAddress(), false, lvalue.getType(), - lvalue.getBaseInfo(), InitDecl, false); + lvalue.getBaseInfo(), false); } void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, QualType Ty, LValueBaseInfo BaseInfo, - const Decl *InitDecl, bool isNontemporal) { // TODO(CIR): this has fallen out of date with codegen @@ -228,9 +226,9 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, assert(Addr.getPointer() && "expected pointer to exist"); auto SrcAlloca = dyn_cast_or_null(Addr.getPointer().getDefiningOp()); - if (InitDecl && SrcAlloca) { + if (currVarDecl && SrcAlloca) { InitStyle IS; - const VarDecl *VD = dyn_cast_or_null(InitDecl); + const VarDecl *VD = currVarDecl; assert(VD && "VarDecl expected"); if (VD->hasInit()) { switch (VD->getInitStyle()) { @@ -273,12 +271,11 @@ RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { return RValue::get(buildLoadOfScalar(LV, Loc)); } -void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, - const Decl *InitDecl) { +void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { assert(Dst.isSimple() && "only implemented simple"); // TODO: ObjC lifetime. assert(Src.isScalar() && "Can't emit an agg store with this method"); - buildStoreOfScalar(Src.getScalarVal(), Dst, InitDecl); + buildStoreOfScalar(Src.getScalarVal(), Dst); } static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, @@ -431,7 +428,7 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { LValue LV = buildLValue(E->getLHS()); SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; - buildStoreThroughLValue(RV, LV, nullptr /*InitDecl*/); + buildStoreThroughLValue(RV, LV); assert(!getContext().getLangOpts().OpenMP && "last priv cond not implemented"); return LV; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 619d2f63ff3b..01346867f836 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -300,7 +300,7 @@ class ScalarExprEmitter : public StmtVisitor { if (LV.isBitField()) assert(0 && "no bitfield inc/dec yet"); else - CGF.buildStoreThroughLValue(RValue::get(Value), LV, nullptr); + CGF.buildStoreThroughLValue(RValue::get(Value), LV); return E->isPrefix() ? Value : Input; } @@ -1232,7 +1232,7 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( if (LHSLV.isBitField()) assert(0 && "not yet implemented"); else - CGF.buildStoreThroughLValue(RValue::get(Result), LHSLV, nullptr); + CGF.buildStoreThroughLValue(RValue::get(Result), LHSLV); assert(!CGF.getLangOpts().OpenMP && "Not implemented"); return LHSLV; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index af172b1439af..6fe6dc350322 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -239,6 +239,25 @@ class CIRGenFunction { void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, clang::CharUnits alignment); + // Track current variable initialization (if there's one) + const clang::VarDecl *currVarDecl = nullptr; + class VarDeclContext { + CIRGenFunction &P; + const clang::VarDecl *OldVal = nullptr; + + public: + VarDeclContext(CIRGenFunction &p, const VarDecl *Value) : P(p) { + if (P.currSrcLoc) + OldVal = P.currVarDecl; + P.currVarDecl = Value; + } + + /// Can be used to restore the state early, before the dtor + /// is run. + void restore() { P.currVarDecl = OldVal; } + ~VarDeclContext() { restore(); } + }; + /// ------- /// Source Location tracking /// ------- @@ -718,20 +737,17 @@ class CIRGenFunction { void buildAutoVarCleanups(const AutoVarEmission &emission); - void buildStoreOfScalar(mlir::Value value, LValue lvalue, - const clang::Decl *InitDecl); - + void buildStoreOfScalar(mlir::Value value, LValue lvalue); void buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, clang::QualType Ty, LValueBaseInfo BaseInfo, - const clang::Decl *InitDecl, bool isNontemporal); + bool isNontemporal); mlir::Value buildToMemory(mlir::Value Value, clang::QualType Ty); /// Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. - void buildStoreThroughLValue(RValue Src, LValue Dst, - const clang::Decl *InitDecl); + void buildStoreThroughLValue(RValue Src, LValue Dst); mlir::LogicalResult buildBranchThroughCleanup(JumpDest &Dest, clang::LabelDecl *L, From a4399c407ec8313a37fc81ec968dd0803ddcf7a8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Oct 2022 18:05:40 -0700 Subject: [PATCH 0591/2301] [CIR][NFC] Constify AST references --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 6 +++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index eafc66cdaf74..56e7491187df 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -85,9 +85,9 @@ def CstArrayAttr : CIR_Attr<"CstArray", "cst_array", [TypedAttrInterface]> { class ASTDecl traits = []> : CIR_Attr { - string clang_name = !strconcat("clang::", name); + string clang_name = !strconcat("const clang::", name, " *"); - let summary = !strconcat("Wraps a ", clang_name, " AST node."); + let summary = !strconcat("Wraps a '", clang_name, "' AST node."); let description = [{ Operations optionally refer to this node, they could be available depending on the CIR lowering stage. Whether it's attached to the appropriated @@ -95,7 +95,7 @@ class ASTDecl traits = []> This always implies a non-null AST reference (verified). }]; - let parameters = (ins !strconcat(clang_name, " *"):$astDecl); + let parameters = (ins clang_name:$astDecl); // Printing and parsing available in CIRDialect.cpp let hasCustomAssemblyFormat = 1; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index aaac4f274714..5f695485a752 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1482,7 +1482,7 @@ void ASTFunctionDeclAttr::print(::mlir::AsmPrinter &printer) const { LogicalResult ASTFunctionDeclAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ::clang::FunctionDecl *decl) { + const ::clang::FunctionDecl *decl) { if (!decl) { emitError() << "expected non-null AST declaration"; return failure(); @@ -1503,7 +1503,7 @@ void ASTVarDeclAttr::print(::mlir::AsmPrinter &printer) const { LogicalResult ASTVarDeclAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ::clang::VarDecl *decl) { + const ::clang::VarDecl *decl) { if (!decl) { emitError() << "expected non-null AST declaration"; return failure(); From bafb6e4032abeab9ddcf005145abd35782e411d1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Oct 2022 18:48:56 -0700 Subject: [PATCH 0592/2301] [CIR] Add DropAST pass and hook it up in the pipeline --- clang/include/clang/CIR/Dialect/Passes.h | 1 + clang/include/clang/CIR/Dialect/Passes.td | 22 ++++++++++ clang/lib/CIR/CodeGen/CIRPasses.cpp | 3 ++ .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + clang/lib/CIR/Dialect/Transforms/DropAST.cpp | 42 +++++++++++++++++++ 5 files changed, 69 insertions(+) create mode 100644 clang/lib/CIR/Dialect/Transforms/DropAST.cpp diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index 6db6d4b0b301..ade41fd1db18 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -23,6 +23,7 @@ namespace mlir { std::unique_ptr createLifetimeCheckPass(); std::unique_ptr createLifetimeCheckPass(clang::ASTContext *astCtx); std::unique_ptr createMergeCleanupsPass(); +std::unique_ptr createDropASTPass(); //===----------------------------------------------------------------------===// // Registration diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index 5bef3081bd69..a4562cf10bb4 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -27,6 +27,11 @@ def LifetimeCheck : Pass<"cir-lifetime-check"> { let description = [{ This pass relies on a lifetime analysis pass and uses the diagnostics mechanism to report to the user. It does not change any code. + + A default ctor is specified but is solely in order to make + tablegen happy, since this pass requires the presence of an ASTContext, + one can set that up using `mlir::createLifetimeCheckPass(clang::ASTContext &)` + instead. }]; let constructor = "mlir::createLifetimeCheckPass()"; let dependentDialects = ["cir::CIRDialect"]; @@ -41,4 +46,21 @@ def LifetimeCheck : Pass<"cir-lifetime-check"> { ]; } +def DropAST : Pass<"cir-drop-ast"> { + let summary = "Remove clang AST nodes attached to CIR operations"; + let description = [{ + Some CIR operations have references back to Clang AST, this is + necessary to perform lots of useful checks without having to + duplicate all rich AST information in CIR. As we move down in the + pipeline (e.g. generating LLVM or other MLIR dialects), the need + for such nodes diminish and AST information can be dropped. + + Right now this is enabled by default in Clang prior to dialect + codegen from CIR, but not before lifetime check, where AST is + required to be present. + }]; + let constructor = "mlir::createDropASTPass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + #endif // MLIR_DIALECT_CIR_PASSES diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 28bde345df13..31a3e61c8020 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -45,6 +45,9 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, pm.addPass(std::move(lifetimePass)); } + // FIXME: once CIRCodenAction fixes emission other than CIR we + // need to run this right before dialect emission. + pm.addPass(mlir::createDropASTPass()); pm.enableVerifier(enableVerifier); return pm.run(theModule); diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 52e47a5cb413..61ff272d3cac 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -1,6 +1,7 @@ add_clang_library(MLIRCIRTransforms LifetimeCheck.cpp MergeCleanups.cpp + DropAST.cpp DEPENDS MLIRCIRPassIncGen diff --git a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp new file mode 100644 index 000000000000..553206a2ef62 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp @@ -0,0 +1,42 @@ +//===- DropAST.cpp - emit diagnostic checks for lifetime violations -===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "clang/CIR/Dialect/Passes.h" + +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "clang/AST/ASTContext.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +#include "llvm/ADT/SetOperations.h" +#include "llvm/ADT/SmallSet.h" + +using namespace mlir; +using namespace cir; + +namespace { +struct DropASTPass : public DropASTBase { + DropASTPass() = default; + void runOnOperation() override; +}; +} // namespace + +void DropASTPass::runOnOperation() { + Operation *op = getOperation(); + // This needs to be updated with operations that start + // carrying AST around. + op->walk([&](Operation *op) { + if (isa(op)) { + cast(op).removeAstAttr(); + } + }); +} + +std::unique_ptr mlir::createDropASTPass() { + return std::make_unique(); +} From e24aeb5d455030b1106d5a29838f6e76e5954020 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Oct 2022 18:06:36 -0700 Subject: [PATCH 0593/2301] [CIR][CodeGen] Include VarDecl in AllocaOp when possible --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index ae782ed74346..acabceb3f62f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1566,6 +1566,10 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, addr = builder.create(loc, /*addr type*/ localVarPtrTy, /*var type*/ ty, name, initStyle, alignIntAttr); + if (currVarDecl) { + auto alloca = cast(addr.getDefiningOp()); + alloca.setAstAttr(ASTVarDeclAttr::get(builder.getContext(), currVarDecl)); + } } return addr; } From 6a6dc0ecb28e0f555250ce463f1dc3df40304acb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Oct 2022 22:55:10 -0700 Subject: [PATCH 0594/2301] [CIR] Remove InitStyle from AllocaOp add use UnitAttr to track initialization Now that alloca's are linked with matching VarDecl's, no need to keep these other annotations around. For now we keep an unit attr that just designates presence of initialization. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 41 ++++++------------- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 34 ++++----------- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 11 ++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 10 ++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 -- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 8 ++-- clang/test/CIR/CIRToLLVM/array.cir | 2 +- clang/test/CIR/CIRToLLVM/binop-fp.cir | 12 +++--- clang/test/CIR/CIRToLLVM/binop-int.cir | 6 +-- clang/test/CIR/CIRToLLVM/bool.cir | 2 +- clang/test/CIR/CIRToLLVM/cmp.cir | 10 ++--- clang/test/CIR/CIRToLLVM/goto.cir | 2 +- clang/test/CIR/CIRToLLVM/memref.cir | 2 +- clang/test/CIR/CIRToLLVM/unary-inc-dec.cir | 4 +- clang/test/CIR/CodeGen/String.cpp | 10 ++--- clang/test/CIR/CodeGen/array.cpp | 10 ++--- clang/test/CIR/CodeGen/assign-operator.cpp | 20 ++++----- clang/test/CIR/CodeGen/basic.c | 10 ++--- clang/test/CIR/CodeGen/basic.cpp | 10 ++--- clang/test/CIR/CodeGen/binassign.cpp | 2 +- clang/test/CIR/CodeGen/call.c | 24 +++++------ clang/test/CIR/CodeGen/cast.cpp | 4 +- clang/test/CIR/CodeGen/comma.cpp | 6 +-- clang/test/CIR/CodeGen/ctor-alias.cpp | 6 +-- .../CodeGen/ctor-member-lvalue-to-rvalue.cpp | 8 ++-- clang/test/CIR/CodeGen/ctor.cpp | 6 +-- clang/test/CIR/CodeGen/globals.cpp | 4 +- clang/test/CIR/CodeGen/goto.cpp | 10 ++--- clang/test/CIR/CodeGen/inc-dec.cpp | 16 ++++---- clang/test/CIR/CodeGen/lambda.cpp | 2 +- clang/test/CIR/CodeGen/loop-scope.cpp | 8 ++-- clang/test/CIR/CodeGen/lvalue-refs.cpp | 4 +- clang/test/CIR/CodeGen/return.cpp | 4 +- clang/test/CIR/CodeGen/sourcelocation.cpp | 8 ++-- clang/test/CIR/CodeGen/struct.c | 4 +- clang/test/CIR/CodeGen/struct.cpp | 18 ++++---- clang/test/CIR/CodeGen/switch.cpp | 6 +-- clang/test/CIR/IR/array.cir | 4 +- clang/test/CIR/IR/cast.cir | 2 +- clang/test/CIR/IR/cir-ops.cir | 20 ++++----- clang/test/CIR/IR/invalid.cir | 6 +-- clang/test/CIR/IR/loop.cir | 8 ++-- clang/test/CIR/IR/ptr_stride.cir | 4 +- clang/test/CIR/IR/types.cir | 4 +- clang/test/CIR/Transforms/merge-cleanups.cir | 10 ++--- 45 files changed, 184 insertions(+), 222 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 6c20be267069..32eaddca24a7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -166,23 +166,6 @@ def ConstantOp : CIR_Op<"cst", // AllocaOp //===----------------------------------------------------------------------===// -def InitStyle_None : I32EnumAttrCase<"uninitialized", 1>; -def InitStyle_ParamInit : I32EnumAttrCase<"paraminit", 2>; - -// These are similar to Clang's VarDecl initialization style -def InitStyle_CInit : I32EnumAttrCase<"cinit", 3>; -def InitStyle_CallInit : I32EnumAttrCase<"callinit", 4>; -def InitStyle_ListInit : I32EnumAttrCase<"listinit", 5>; - -def InitStyle : I32EnumAttr< - "InitStyle", - "initialization style", - [InitStyle_None, InitStyle_ParamInit, - InitStyle_CInit, InitStyle_CallInit, - InitStyle_ListInit]> { - let cppNamespace = "::mlir::cir"; -} - class AllocaTypesMatchWith : PredOpTrait, ["count", cinit] {alignment = 4 : i64} + %0 = cir.alloca i32, !cir.ptr, ["count", init] {alignment = 4 : i64} // int *ptr; - %1 = cir.alloca !cir.ptr, cir.ptr >, ["ptr", uninitialized] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr, cir.ptr >, ["ptr"] {alignment = 8 : i64} ... ``` }]; @@ -226,8 +207,7 @@ def AllocaOp : CIR_Op<"alloca", [ let arguments = (ins TypeAttr:$allocaType, StrAttr:$name, - // FIXME: add "uninitialzed" as default mode - Arg:$init, + UnitAttr:$init, ConfinedAttr, [IntMinValue<0>]>:$alignment, OptionalAttr:$ast ); @@ -238,7 +218,7 @@ def AllocaOp : CIR_Op<"alloca", [ let skipDefaultBuilders = 1; let builders = [ OpBuilder<(ins "Type":$addr, "Type":$allocaType, - "StringRef":$name, "mlir::cir::InitStyle":$init, + "StringRef":$name, "IntegerAttr":$alignment)> ]; @@ -250,7 +230,10 @@ def AllocaOp : CIR_Op<"alloca", [ // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. let assemblyFormat = [{ - $allocaType `,` `cir.ptr` type($addr) `,` `[` $name `,` $init `]` + $allocaType `,` `cir.ptr` type($addr) `,` + `[` $name + (`,` `init` $init^)? + `]` (`ast` $ast^)? attr-dict }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index acabceb3f62f..396d33cec200 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -227,25 +227,10 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, auto SrcAlloca = dyn_cast_or_null(Addr.getPointer().getDefiningOp()); if (currVarDecl && SrcAlloca) { - InitStyle IS; const VarDecl *VD = currVarDecl; assert(VD && "VarDecl expected"); - if (VD->hasInit()) { - switch (VD->getInitStyle()) { - case VarDecl::ParenListInit: - llvm_unreachable("NYI"); - case VarDecl::CInit: - IS = InitStyle::cinit; - break; - case VarDecl::CallInit: - IS = InitStyle::callinit; - break; - case VarDecl::ListInit: - IS = InitStyle::listinit; - break; - } - SrcAlloca.setInitAttr(InitStyleAttr::get(builder.getContext(), IS)); - } + if (VD->hasInit()) + SrcAlloca.setInitAttr(mlir::UnitAttr::get(builder.getContext())); } assert(currSrcLoc && "must pass in source location"); @@ -1529,8 +1514,8 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, resElse.succeeded()); } -mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, - mlir::Type ty, mlir::Location loc, +mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, + mlir::Location loc, CharUnits alignment) { auto getAllocaInsertPositionOp = [&](mlir::Block **insertBlock) -> mlir::Operation * { @@ -1564,7 +1549,7 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, builder.setInsertionPointToStart(insertBlock); } addr = builder.create(loc, /*addr type*/ localVarPtrTy, - /*var type*/ ty, name, initStyle, + /*var type*/ ty, name, alignIntAttr); if (currVarDecl) { auto alloca = cast(addr.getDefiningOp()); @@ -1574,10 +1559,10 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, return addr; } -mlir::Value CIRGenFunction::buildAlloca(StringRef name, InitStyle initStyle, - QualType ty, mlir::Location loc, +mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, + mlir::Location loc, CharUnits alignment) { - return buildAlloca(name, initStyle, getCIRType(ty), loc, alignment); + return buildAlloca(name, getCIRType(ty), loc, alignment); } mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, @@ -1770,6 +1755,5 @@ mlir::cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, if (ArraySize) assert(0 && "NYI"); return cast( - buildAlloca(Name.str(), InitStyle::uninitialized, Ty, Loc, CharUnits()) - .getDefiningOp()); + buildAlloca(Name.str(), Ty, Loc, CharUnits()).getDefiningOp()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index c293a2707b5b..fc14ce9343c6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -234,8 +234,7 @@ void CIRGenFunction::buildAndUpdateRetAlloca(QualType ty, mlir::Location loc, } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca) { llvm_unreachable("NYI"); } else { - auto addr = - buildAlloca("__retval", InitStyle::uninitialized, ty, loc, alignment); + auto addr = buildAlloca("__retval", ty, loc, alignment); FnRetAlloca = addr; ReturnValue = Address(addr, alignment); @@ -255,9 +254,11 @@ mlir::LogicalResult CIRGenFunction::declare(const Decl *var, QualType ty, assert(namedVar && "Needs a named decl"); assert(!symbolTable.count(var) && "not supposed to be available just yet"); - addr = buildAlloca(namedVar->getName(), - isParam ? InitStyle::paraminit : InitStyle::uninitialized, - ty, loc, alignment); + addr = buildAlloca(namedVar->getName(), ty, loc, alignment); + if (isParam) { + auto allocaOp = cast(addr.getDefiningOp()); + allocaOp.setInitAttr(mlir::UnitAttr::get(builder.getContext())); + } symbolTable.insert(var, addr); return mlir::success(); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 6fe6dc350322..06ff096a6cfd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -230,12 +230,10 @@ class CIRGenFunction { mlir::LogicalResult declare(const clang::Decl *var, clang::QualType ty, mlir::Location loc, clang::CharUnits alignment, mlir::Value &addr, bool isParam = false); - mlir::Value buildAlloca(llvm::StringRef name, mlir::cir::InitStyle initStyle, - clang::QualType ty, mlir::Location loc, - clang::CharUnits alignment); - mlir::Value buildAlloca(llvm::StringRef name, mlir::cir::InitStyle initStyle, - mlir::Type ty, mlir::Location loc, - clang::CharUnits alignment); + mlir::Value buildAlloca(llvm::StringRef name, clang::QualType ty, + mlir::Location loc, clang::CharUnits alignment); + mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, + mlir::Location loc, clang::CharUnits alignment); void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, clang::CharUnits alignment); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 5f695485a752..0e61b02c6d9b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -115,15 +115,11 @@ static RetTy parseOptionalCIRKeyword(OpAsmParser &parser, void AllocaOp::build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type addr, ::mlir::Type allocaType, ::llvm::StringRef name, - ::mlir::cir::InitStyle init, ::mlir::IntegerAttr alignment) { odsState.addAttribute(getAllocaTypeAttrName(odsState.name), ::mlir::TypeAttr::get(allocaType)); odsState.addAttribute(getNameAttrName(odsState.name), odsBuilder.getStringAttr(name)); - odsState.addAttribute( - getInitAttrName(odsState.name), - ::mlir::cir::InitStyleAttr::get(odsBuilder.getContext(), init)); if (alignment) { odsState.addAttribute(getAlignmentAttrName(odsState.name), alignment); } diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 62e9608e4dd2..86a8f761d781 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -559,10 +559,10 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { // If other styles of initialization gets added, required to add support // here. - assert((allocaOp.getInitAttr().getValue() == mlir::cir::InitStyle::cinit || - allocaOp.getInitAttr().getValue() == - mlir::cir::InitStyle::uninitialized) && - "other init styles tbd"); + auto varDecl = allocaOp.getAst(); + assert(!varDecl || + (!allocaOp.getInit() || !varDecl->getAstDecl()->isDirectInit()) && + "not implemented"); } void LifetimeCheckPass::checkStore(StoreOp storeOp) { diff --git a/clang/test/CIR/CIRToLLVM/array.cir b/clang/test/CIR/CIRToLLVM/array.cir index f3c2ba751b9f..5c4dffae96a2 100644 --- a/clang/test/CIR/CIRToLLVM/array.cir +++ b/clang/test/CIR/CIRToLLVM/array.cir @@ -4,7 +4,7 @@ module { cir.func @foo() { - %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} + %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} cir.return } } diff --git a/clang/test/CIR/CIRToLLVM/binop-fp.cir b/clang/test/CIR/CIRToLLVM/binop-fp.cir index 30c958826c1a..30e56b04b090 100644 --- a/clang/test/CIR/CIRToLLVM/binop-fp.cir +++ b/clang/test/CIR/CIRToLLVM/binop-fp.cir @@ -4,12 +4,12 @@ module { cir.func @foo() { - %0 = cir.alloca f32, cir.ptr , ["c", uninitialized] {alignment = 4 : i64} - %1 = cir.alloca f32, cir.ptr , ["d", uninitialized] {alignment = 4 : i64} - %2 = cir.alloca f32, cir.ptr , ["y", cinit] {alignment = 4 : i64} - %3 = cir.alloca f64, cir.ptr , ["e", uninitialized] {alignment = 8 : i64} - %4 = cir.alloca f64, cir.ptr , ["f", uninitialized] {alignment = 8 : i64} - %5 = cir.alloca f64, cir.ptr , ["g", cinit] {alignment = 8 : i64} + %0 = cir.alloca f32, cir.ptr , ["c"] {alignment = 4 : i64} + %1 = cir.alloca f32, cir.ptr , ["d"] {alignment = 4 : i64} + %2 = cir.alloca f32, cir.ptr , ["y", init] {alignment = 4 : i64} + %3 = cir.alloca f64, cir.ptr , ["e"] {alignment = 8 : i64} + %4 = cir.alloca f64, cir.ptr , ["f"] {alignment = 8 : i64} + %5 = cir.alloca f64, cir.ptr , ["g", init] {alignment = 8 : i64} %6 = cir.load %0 : cir.ptr , f32 %7 = cir.load %1 : cir.ptr , f32 %8 = cir.binop(mul, %6, %7) : f32 diff --git a/clang/test/CIR/CIRToLLVM/binop-int.cir b/clang/test/CIR/CIRToLLVM/binop-int.cir index 00cd6cfb7fa2..d5b26e443d20 100644 --- a/clang/test/CIR/CIRToLLVM/binop-int.cir +++ b/clang/test/CIR/CIRToLLVM/binop-int.cir @@ -4,9 +4,9 @@ module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["a", cinit] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} - %2 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} %3 = cir.cst(2 : i32) : i32 cir.store %3, %0 : i32, cir.ptr %4 = cir.cst(1 : i32) : i32 cir.store %4, %1 : i32, cir.ptr %5 = cir.load %0 : cir.ptr , i32 diff --git a/clang/test/CIR/CIRToLLVM/bool.cir b/clang/test/CIR/CIRToLLVM/bool.cir index 067741cf37c9..a30fe73a1004 100644 --- a/clang/test/CIR/CIRToLLVM/bool.cir +++ b/clang/test/CIR/CIRToLLVM/bool.cir @@ -4,7 +4,7 @@ module { cir.func @foo() { - %0 = cir.alloca !cir.bool, cir.ptr , ["a", cinit] {alignment = 1 : i64} + %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} %1 = cir.cst(true) : !cir.bool cir.store %1, %0 : !cir.bool, cir.ptr cir.return diff --git a/clang/test/CIR/CIRToLLVM/cmp.cir b/clang/test/CIR/CIRToLLVM/cmp.cir index 24dcb6fef8cc..f7d821ad2467 100644 --- a/clang/test/CIR/CIRToLLVM/cmp.cir +++ b/clang/test/CIR/CIRToLLVM/cmp.cir @@ -4,11 +4,11 @@ module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["a", uninitialized] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} - %2 = cir.alloca f32, cir.ptr , ["c", uninitialized] {alignment = 4 : i64} - %3 = cir.alloca f32, cir.ptr , ["d", uninitialized] {alignment = 4 : i64} - %4 = cir.alloca !cir.bool, cir.ptr , ["e", uninitialized] {alignment = 1 : i64} + %0 = cir.alloca i32, cir.ptr , ["a"] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["b"] {alignment = 4 : i64} + %2 = cir.alloca f32, cir.ptr , ["c"] {alignment = 4 : i64} + %3 = cir.alloca f32, cir.ptr , ["d"] {alignment = 4 : i64} + %4 = cir.alloca !cir.bool, cir.ptr , ["e"] {alignment = 1 : i64} %5 = cir.load %0 : cir.ptr , i32 %6 = cir.load %1 : cir.ptr , i32 %7 = cir.cmp(gt, %5, %6) : i32, !cir.bool diff --git a/clang/test/CIR/CIRToLLVM/goto.cir b/clang/test/CIR/CIRToLLVM/goto.cir index a70f65b2bf88..696c2a9bbd17 100644 --- a/clang/test/CIR/CIRToLLVM/goto.cir +++ b/clang/test/CIR/CIRToLLVM/goto.cir @@ -4,7 +4,7 @@ module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} %1 = cir.cst(1 : i32) : i32 cir.store %1, %0 : i32, cir.ptr cir.br ^bb2 diff --git a/clang/test/CIR/CIRToLLVM/memref.cir b/clang/test/CIR/CIRToLLVM/memref.cir index 5431fb20967a..548dec008b94 100644 --- a/clang/test/CIR/CIRToLLVM/memref.cir +++ b/clang/test/CIR/CIRToLLVM/memref.cir @@ -4,7 +4,7 @@ module { cir.func @foo() -> i32 { - %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} %1 = cir.cst(1 : i32) : i32 cir.store %1, %0 : i32, cir.ptr %2 = cir.load %0 : cir.ptr , i32 diff --git a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir index 319bdb0a37c2..6c3f7917c7f9 100644 --- a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir +++ b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir @@ -4,8 +4,8 @@ module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["a", cinit] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} %2 = cir.cst(2 : i32) : i32 cir.store %2, %0 : i32, cir.ptr cir.store %2, %1 : i32, cir.ptr diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index 21678ab8bdb5..8ef8166ef441 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -32,7 +32,7 @@ void test() { // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2Ei // CHECK-NEXT: %0 = cir.alloca !cir.ptr -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["size", paraminit] +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["size", init] // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: cir.store %arg1, %1 // CHECK-NEXT: %2 = cir.load %0 @@ -47,8 +47,8 @@ void test() { // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr @@ -58,8 +58,8 @@ void test() { // CHECK-NEXT: cir.return // CHECK: cir.func linkonce_odr @_ZN6StringC1EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 394e2ce3d081..5c41b207aad2 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -7,7 +7,7 @@ void a0() { } // CHECK: cir.func @_Z2a0v() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} void a1() { int a[10]; @@ -15,7 +15,7 @@ void a1() { } // CHECK: cir.func @_Z2a1v() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} // CHECK-NEXT: %1 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %2 = cir.cst(0 : i32) : i32 // CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr @@ -28,8 +28,8 @@ int *a2() { } // CHECK: cir.func @_Z2a2v() -> !cir.ptr { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a", uninitialized] {alignment = 16 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} // CHECK-NEXT: %2 = cir.cst(0 : i32) : i32 // CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr // CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : i32), !cir.ptr @@ -43,7 +43,7 @@ void local_stringlit() { // CHECK: cir.global "private" constant internal @".str" = #cir.cst_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} loc(#loc17) // CHECK: cir.func @_Z15local_stringlitv() { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", cinit] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr // CHECK-NEXT: cir.store %2, %0 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 1530866091f6..27ab5c7e6aec 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -11,8 +11,8 @@ struct String { // StringView::StringView(String const&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewC2ERK6String - // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} + // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr // CHECK: cir.store %arg1, %1 : !cir.ptr // CHECK: %2 = cir.load %0 : cir.ptr > @@ -27,9 +27,9 @@ struct String { // StringView::operator=(StringView&&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewaSEOS_ - // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["", paraminit] {alignment = 8 : i64} - // CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} + // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["", init] {alignment = 8 : i64} + // CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr // CHECK: cir.store %arg1, %1 : !cir.ptr // CHECK: %3 = cir.load deref %0 : cir.ptr > @@ -60,13 +60,13 @@ int main() { } // CHECK: cir.func @main() -> i32 { -// CHECK: %0 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !_22struct2EStringView22, cir.ptr , ["sv", uninitialized] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !_22struct2EStringView22, cir.ptr , ["sv"] {alignment = 8 : i64} // CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () // CHECK: cir.scope { -// CHECK: %3 = cir.alloca !_22struct2EString22, cir.ptr , ["s", uninitialized] {alignment = 8 : i64} -// CHECK: %4 = cir.alloca !_22struct2EStringView22, cir.ptr , ["ref.tmp", uninitialized] {alignment = 8 : i64} -// CHECK: %5 = cir.alloca !_22struct2EStringView22, cir.ptr , ["ref.tmp", uninitialized] {alignment = 8 : i64} +// CHECK: %3 = cir.alloca !_22struct2EString22, cir.ptr , ["s"] {alignment = 8 : i64} +// CHECK: %4 = cir.alloca !_22struct2EStringView22, cir.ptr , ["ref.tmp"] {alignment = 8 : i64} +// CHECK: %5 = cir.alloca !_22struct2EStringView22, cir.ptr , ["ref.tmp"] {alignment = 8 : i64} // CHECK: %6 = cir.get_global @".str" : cir.ptr > // CHECK: %7 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr // CHECK: cir.call @_ZN6StringC2EPKc(%3, %7) : (!cir.ptr, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 2ed2c007561b..ff9fd81d45c5 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -11,8 +11,8 @@ int foo(int i) { // CHECK: module { // CHECK-NEXT: cir.func @foo(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", paraminit] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr // CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , i32 @@ -23,7 +23,7 @@ int foo(int i) { int f2() { return 3; } // CHECK: cir.func @f2() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr // CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 @@ -35,8 +35,8 @@ int f3() { } // CHECK: cir.func @f3() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.cst(3 : i32) : i32 // CHECK-NEXT: cir.store %2, %1 : i32, cir.ptr // CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 96608f18eafb..dac085e9ef95 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -8,7 +8,7 @@ int *p0() { } // CHECK: cir.func @_Z2p0v() -> !cir.ptr { -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] // CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > @@ -19,7 +19,7 @@ int *p1() { } // CHECK: cir.func @_Z2p1v() -> !cir.ptr { -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", uninitialized] +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p"] // CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > @@ -35,12 +35,12 @@ int *p2() { } // CHECK: cir.func @_Z2p2v() -> !cir.ptr { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", cinit] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] {alignment = 8 : i64} // CHECK-NEXT: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %2, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %7 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %7 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} // CHECK-NEXT: %8 = cir.cst(0 : i32) : i32 // CHECK-NEXT: cir.store %8, %7 : i32, cir.ptr // CHECK-NEXT: cir.store %7, %1 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp index cf32e5c5d44b..bd53b12bd04f 100644 --- a/clang/test/CIR/CodeGen/binassign.cpp +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -17,7 +17,7 @@ int foo(int a, int b) { return x; } -// CHECK: [[Value:%[0-9]+]] = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} +// CHECK: [[Value:%[0-9]+]] = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} // CHECK: = cir.binop(mul, // CHECK: = cir.load {{.*}}[[Value]] // CHECK: = cir.binop(mul, diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index 8f7ba8f782e1..91c57e64ce16 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -20,9 +20,9 @@ void d(void) { // CHECK: cir.return // CHECK: } // CHECK: cir.func @b(%arg0: i32 {{.*}}, %arg1: i32 {{.*}}) -> i32 { -// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] -// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] -// CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] +// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", init] +// CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval"] // CHECK: cir.store %arg0, %0 : i32, cir.ptr // CHECK: cir.store %arg1, %1 : i32, cir.ptr // CHECK: %3 = cir.load %0 : cir.ptr , i32 @@ -33,9 +33,9 @@ void d(void) { // CHECK: cir.return %6 // CHECK: } // CHECK: cir.func @c(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { -// CHECK: %0 = cir.alloca f64, cir.ptr , ["a", paraminit] -// CHECK: %1 = cir.alloca f64, cir.ptr , ["b", paraminit] -// CHECK: %2 = cir.alloca f64, cir.ptr , ["__retval", uninitialized] +// CHECK: %0 = cir.alloca f64, cir.ptr , ["a", init] +// CHECK: %1 = cir.alloca f64, cir.ptr , ["b", init] +// CHECK: %2 = cir.alloca f64, cir.ptr , ["__retval"] // CHECK: cir.store %arg0, %0 : f64, cir.ptr // CHECK: cir.store %arg1, %1 : f64, cir.ptr // CHECK: %3 = cir.load %0 : cir.ptr , f64 @@ -58,9 +58,9 @@ void d(void) { // CXX-NEXT: cir.return // CXX-NEXT: } // CXX-NEXT: cir.func @_Z1bii(%arg0: i32 {{.*}}, %arg1: i32 {{.*}}) -> i32 { -// CXX-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] -// CXX-NEXT: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] -// CXX-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] +// CXX-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", init] +// CXX-NEXT: %1 = cir.alloca i32, cir.ptr , ["b", init] +// CXX-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval"] // CXX-NEXT: cir.store %arg0, %0 : i32, cir.ptr // CXX-NEXT: cir.store %arg1, %1 : i32, cir.ptr // CXX-NEXT: %3 = cir.load %0 : cir.ptr , i32 @@ -71,9 +71,9 @@ void d(void) { // CXX-NEXT: cir.return %6 // CXX-NEXT: } // CXX-NEXT: cir.func @_Z1cdd(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { -// CXX-NEXT: %0 = cir.alloca f64, cir.ptr , ["a", paraminit] -// CXX-NEXT: %1 = cir.alloca f64, cir.ptr , ["b", paraminit] -// CXX-NEXT: %2 = cir.alloca f64, cir.ptr , ["__retval", uninitialized] +// CXX-NEXT: %0 = cir.alloca f64, cir.ptr , ["a", init] +// CXX-NEXT: %1 = cir.alloca f64, cir.ptr , ["b", init] +// CXX-NEXT: %2 = cir.alloca f64, cir.ptr , ["__retval"] // CXX-NEXT: cir.store %arg0, %0 : f64, cir.ptr // CXX-NEXT: cir.store %arg1, %1 : f64, cir.ptr // CXX-NEXT: %3 = cir.load %0 : cir.ptr , f64 diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 23b5cdd44f75..eff79450d025 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -5,8 +5,8 @@ unsigned char cxxstaticcast_0(unsigned int x) { } // CHECK: cir.func @_Z15cxxstaticcast_0j -// CHECK: %0 = cir.alloca i32, cir.ptr , ["x", paraminit] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca i8, cir.ptr , ["__retval", uninitialized] {alignment = 1 : i64} +// CHECK: %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca i8, cir.ptr , ["__retval"] {alignment = 1 : i64} // CHECK: cir.store %arg0, %0 : i32, cir.ptr // CHECK: %2 = cir.load %0 : cir.ptr , i32 // CHECK: %3 = cir.cast(integral, %2 : i32), i8 diff --git a/clang/test/CIR/CodeGen/comma.cpp b/clang/test/CIR/CodeGen/comma.cpp index 2809ec36fdf8..679f28a8236e 100644 --- a/clang/test/CIR/CodeGen/comma.cpp +++ b/clang/test/CIR/CodeGen/comma.cpp @@ -8,9 +8,9 @@ int c0() { } // CHECK: cir.func @_Z2c0v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval", uninitialized] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", cinit] -// CHECK: %[[#B:]] = cir.alloca i32, cir.ptr , ["b", cinit] +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: %[[#B:]] = cir.alloca i32, cir.ptr , ["b", init] // CHECK: %[[#LOADED_B:]] = cir.load %[[#B]] : cir.ptr , i32 // CHECK: %[[#]] = cir.binop(add, %[[#LOADED_B]], %[[#]]) : i32 // CHECK: %[[#LOADED_A:]] = cir.load %[[#A]] : cir.ptr , i32 diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp index 01c698918e8a..555bfd3032e4 100644 --- a/clang/test/CIR/CodeGen/ctor-alias.cpp +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -9,8 +9,8 @@ void t() { } // CHECK: cir.func linkonce_odr @_ZN11DummyStringC2EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr @@ -19,7 +19,7 @@ void t() { // CHECK-NOT: cir.fun @_ZN11DummyStringC1EPKc // CHECK: cir.func @_Z1tv -// CHECK-NEXT: %0 = cir.alloca !_22struct2EDummyString22, cir.ptr , ["s4", uninitialized] {alignment = 1 : i64} +// CHECK-NEXT: %0 = cir.alloca !_22struct2EDummyString22, cir.ptr , ["s4"] {alignment = 1 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr // CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp index 218ffc9b0f2c..b6d4888468be 100644 --- a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -6,8 +6,8 @@ struct String { long size; String(const String &s) : size{s.size} {} // CHECK: cir.func linkonce_odr @_ZN6StringC2ERKS_ -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", paraminit] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 // CHECK: cir.store %arg1, %1 // CHECK: %2 = cir.load %0 @@ -28,8 +28,8 @@ void foo() { // FIXME: s1 shouldn't be uninitialized. // cir.func @_Z3foov() { - // %0 = cir.alloca !_22struct2EString22, cir.ptr , ["s", uninitialized] {alignment = 8 : i64} - // %1 = cir.alloca !_22struct2EString22, cir.ptr , ["s1", uninitialized] {alignment = 8 : i64} + // %0 = cir.alloca !_22struct2EString22, cir.ptr , ["s"] {alignment = 8 : i64} + // %1 = cir.alloca !_22struct2EString22, cir.ptr , ["s1"] {alignment = 8 : i64} // cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () // cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () // cir.return diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index 188ac76de651..8948842014de 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -14,19 +14,19 @@ void baz() { // CHECK: !_22struct2EStruk22 = !cir.struct<"struct.Struk", i32> // CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK: cir.func linkonce_odr @_ZN5StrukC1Ev(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () // CHECK-NEXT: cir.return // CHECK: cir.func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !_22struct2EStruk22, cir.ptr , ["s", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !_22struct2EStruk22, cir.ptr , ["s"] {alignment = 4 : i64} // CHECK-NEXT: cir.call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index e08c18bda949..6b6f9118ab9a 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -41,13 +41,13 @@ void use_global_string() { // CHECK-NEXT: cir.global external @s2 = @".str": !cir.ptr // CHECK: cir.func @_Z10use_globalv() { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["li", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["li", init] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.get_global @a : cir.ptr // CHECK-NEXT: %2 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: cir.store %2, %0 : i32, cir.ptr // CHECK: cir.func @_Z17use_global_stringv() { -// CHECK-NEXT: %0 = cir.alloca i8, cir.ptr , ["c", cinit] {alignment = 1 : i64} +// CHECK-NEXT: %0 = cir.alloca i8, cir.ptr , ["c", init] {alignment = 1 : i64} // CHECK-NEXT: %1 = cir.get_global @s2 : cir.ptr > // CHECK-NEXT: %2 = cir.load %1 : cir.ptr >, !cir.ptr // CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 68587763d818..db79783f628e 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -10,8 +10,8 @@ void g0(int a) { } // CHECK: cir.func @_Z2g0i -// CHECK-NEXT %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} -// CHECK-NEXT %1 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} +// CHECK-NEXT %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} // CHECK-NEXT cir.store %arg0, %0 : i32, cir.ptr // CHECK-NEXT %2 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT cir.store %2, %1 : i32, cir.ptr @@ -38,9 +38,9 @@ void g1(int a) { // Make sure alloca for "y" shows up in the entry block // CHECK: cir.func @_Z2g1i(%arg0: i32 -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["y", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["y", init] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr int g2() { diff --git a/clang/test/CIR/CodeGen/inc-dec.cpp b/clang/test/CIR/CodeGen/inc-dec.cpp index 5c65243a1cb3..c342a0b15400 100644 --- a/clang/test/CIR/CodeGen/inc-dec.cpp +++ b/clang/test/CIR/CodeGen/inc-dec.cpp @@ -7,8 +7,8 @@ unsigned id0() { } // CHECK: cir.func @_Z3id0v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval", uninitialized] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", cinit] +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(inc, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] @@ -21,8 +21,8 @@ unsigned id1() { } // CHECK: cir.func @_Z3id1v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval", uninitialized] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", cinit] +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(dec, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] @@ -34,8 +34,8 @@ unsigned id2() { } // CHECK: cir.func @_Z3id2v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval", uninitialized] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", cinit] +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(inc, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] @@ -47,8 +47,8 @@ unsigned id3() { } // CHECK: cir.func @_Z3id3v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval", uninitialized] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", cinit] +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(dec, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 3a779071ab03..91e72fe892be 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -9,4 +9,4 @@ void fn() { // CHECK: !22class2Eanon22 = !cir.struct<"class.anon", i8> // CHECK-NEXT: module // CHECK-NEXT: cir.func @_Z2fnv() -// CHECK-NEXT: %0 = cir.alloca !22class2Eanon22, cir.ptr , ["a", uninitialized] +// CHECK-NEXT: %0 = cir.alloca !22class2Eanon22, cir.ptr , ["a"] diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index 297627fd791a..3255f319ed0b 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -11,19 +11,19 @@ void l0() { // CPPSCOPE: cir.func @_Z2l0v() { // CPPSCOPE-NEXT: cir.scope { -// CPPSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} -// CPPSCOPE-NEXT: %1 = cir.alloca i32, cir.ptr , ["j", cinit] {alignment = 4 : i64} +// CPPSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} +// CPPSCOPE-NEXT: %1 = cir.alloca i32, cir.ptr , ["j", init] {alignment = 4 : i64} // CPPSCOPE-NEXT: %2 = cir.cst(0 : i32) : i32 // CPPSCOPE-NEXT: cir.store %2, %0 : i32, cir.ptr // CPPSCOPE-NEXT: cir.loop for(cond : { // CSCOPE: cir.func @l0() { // CSCOPE-NEXT: cir.scope { -// CSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} +// CSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} // CSCOPE-NEXT: %1 = cir.cst(0 : i32) : i32 // CSCOPE-NEXT: cir.store %1, %0 : i32, cir.ptr // CSCOPE-NEXT: cir.loop for(cond : { // CSCOPE: }) { // CSCOPE-NEXT: cir.scope { -// CSCOPE-NEXT: %2 = cir.alloca i32, cir.ptr , ["j", cinit] {alignment = 4 : i64} +// CSCOPE-NEXT: %2 = cir.alloca i32, cir.ptr , ["j", init] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/lvalue-refs.cpp b/clang/test/CIR/CodeGen/lvalue-refs.cpp index 6a76b9954a9d..89e0f6038afc 100644 --- a/clang/test/CIR/CodeGen/lvalue-refs.cpp +++ b/clang/test/CIR/CodeGen/lvalue-refs.cpp @@ -7,7 +7,7 @@ struct String { void split(String &S) {} // CHECK: cir.func @_Z5splitR6String(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["S", paraminit] +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["S", init] void foo() { String s; @@ -15,5 +15,5 @@ void foo() { } // CHECK: cir.func @_Z3foov() { -// CHECK: %0 = cir.alloca !_22struct2EString22, cir.ptr , ["s", uninitialized] +// CHECK: %0 = cir.alloca !_22struct2EString22, cir.ptr , ["s"] // CHECK: cir.call @_Z5splitR6String(%0) : (!cir.ptr) -> () \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/return.cpp b/clang/test/CIR/CodeGen/return.cpp index 27b0dbf0bfc4..d56d3c272f7d 100644 --- a/clang/test/CIR/CodeGen/return.cpp +++ b/clang/test/CIR/CodeGen/return.cpp @@ -5,8 +5,8 @@ int &ret0(int &x) { } // CHECK: cir.func @_Z4ret0Ri -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["x", paraminit] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__retval", uninitialized] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["x", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 0afb0066857d..fb3b1ff933b1 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -15,10 +15,10 @@ int s0(int a, int b) { // CHECK: #loc3 = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) // CHECK: module { // CHECK: cir.func @_Z2s0ii(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { -// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} loc(#loc2) -// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", paraminit] {alignment = 4 : i64} loc(#loc3) -// CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} loc(#loc4) -// CHECK: %3 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} loc(#loc5) +// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc2) +// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc3) +// CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc4) +// CHECK: %3 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} loc(#loc5) // CHECK: cir.store %arg0, %0 : i32, cir.ptr loc(#loc6) // CHECK: cir.store %arg1, %1 : i32, cir.ptr loc(#loc6) // CHECK: %4 = cir.load %0 : cir.ptr , i32 loc(#loc7) diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 7e318411de37..5a7c2753da95 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -21,8 +21,8 @@ void baz() { // CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !_22struct2EBar22> // CHECK-NEXT: module { // CHECK-NEXT: cir.func @baz() { -// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 6765d38d372d..3da9b459866e 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -28,15 +28,15 @@ void baz() { // CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : i32, cir.ptr // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr @@ -44,9 +44,9 @@ void baz() { // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", paraminit] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : i32, cir.ptr // CHECK-NEXT: %3 = cir.load %0 : cir.ptr >, !cir.ptr @@ -57,9 +57,9 @@ void baz() { // CHECK-NEXT: } // CHECK: cir.func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b", uninitialized] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["result", cinit] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["result", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () // CHECK-NEXT: %3 = cir.cst(4 : i32) : i32 // CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, i32) -> () diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 004bbe1ee129..a7b5e40c66bb 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -30,7 +30,7 @@ void sw1(int a) { // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 2 : i32) { // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %4 = cir.alloca i32, cir.ptr , ["yolo", cinit] +// CHECK-NEXT: %4 = cir.alloca i32, cir.ptr , ["yolo", init] // CHECK-NEXT: %5 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: %6 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %7 = cir.binop(add, %5, %6) : i32 @@ -54,8 +54,8 @@ void sw2(int a) { // CHECK: cir.func @_Z3sw2i // CHECK: cir.scope { -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["yolo", cinit] -// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["fomo", cinit] +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["yolo", init] +// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["fomo", init] // CHECK: cir.switch (%4 : i32) [ // CHECK-NEXT: case (equal, 3 : i32) { // CHECK-NEXT: %5 = cir.cst(0 : i32) : i32 diff --git a/clang/test/CIR/IR/array.cir b/clang/test/CIR/IR/array.cir index 182082b9ba82..f60d9c89acb6 100644 --- a/clang/test/CIR/IR/array.cir +++ b/clang/test/CIR/IR/array.cir @@ -2,10 +2,10 @@ module { cir.func @arrays() { - %0 = cir.alloca !cir.array, cir.ptr>, ["x", paraminit] + %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] cir.return } } // CHECK: cir.func @arrays() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index 4af2fa936b50..02ae51620528 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -2,7 +2,7 @@ module { cir.func @yolo(%arg0 : i32) { - %0 = cir.alloca !cir.array, cir.ptr>, ["x", paraminit] + %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] %a = cir.cast (int_to_bool, %arg0 : i32), !cir.bool %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 74eaea82b8ae..551edf7c4eec 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -3,14 +3,14 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s module { cir.func @foo(%arg0: i32) -> i32 { - %0 = cir.alloca i32, cir.ptr , ["x", paraminit] + %0 = cir.alloca i32, cir.ptr , ["x", init] cir.store %arg0, %0 : i32, cir.ptr %1 = cir.load %0 : cir.ptr , i32 cir.return %1 : i32 } cir.func @f3() -> i32 { - %0 = cir.alloca i32, cir.ptr , ["x", cinit] + %0 = cir.alloca i32, cir.ptr , ["x", init] %1 = cir.cst(3 : i32) : i32 cir.store %1, %0 : i32, cir.ptr %2 = cir.load %0 : cir.ptr , i32 @@ -18,8 +18,8 @@ module { } cir.func @if0(%arg0: i32) -> i32 { - %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} cir.store %arg0, %1 : i32, cir.ptr %2 = cir.cst(0 : i32) : i32 cir.store %2, %0 : i32, cir.ptr @@ -37,9 +37,9 @@ module { } cir.func @s0() { - %0 = cir.alloca i32, cir.ptr , ["x", uninitialized] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["x"] {alignment = 4 : i64} cir.scope { - %1 = cir.alloca i32, cir.ptr , ["y", uninitialized] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["y"] {alignment = 4 : i64} } cir.return } @@ -48,14 +48,14 @@ module { // CHECK: module { // CHECK-NEXT: cir.func @foo(%arg0: i32) -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", paraminit] +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", init] // CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %1 : i32 // CHECK-NEXT: } // CHECK-NEXT: cir.func @f3() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", cinit] +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", init] // CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr // CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 @@ -73,9 +73,9 @@ module { // CHECK-NEXT: } // CHECK: cir.func @s0() { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x"] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["y", uninitialized] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["y"] {alignment = 4 : i64} // CHECK-NEXT: } // CHECK: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 6bd947f4ca9e..f57544423e72 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -123,7 +123,7 @@ cir.func @cast2(%p: !cir.ptr) { // ----- cir.func @cast3(%p: !cir.ptr) { - %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] + %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // expected-error {{requires same type for array element and pointee result}} cir.return } @@ -202,7 +202,7 @@ module { // ----- cir.func @unary0() { - %0 = cir.alloca i32, cir.ptr , ["a", cinit] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} %1 = cir.cst(2 : i32) : i32 %3 = cir.unary(inc, %1) : i32, i32 // expected-error {{'cir.unary' op requires input to be defined by a memory load}} @@ -213,7 +213,7 @@ cir.func @unary0() { // ----- cir.func @unary1() { - %0 = cir.alloca i32, cir.ptr , ["a", cinit] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} %1 = cir.cst(2 : i32) : i32 cir.store %1, %0 : i32, cir.ptr diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 44477768154c..77f6d444a2a1 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -1,11 +1,11 @@ // RUN: cir-tool %s | FileCheck %s cir.func @l0() { - %0 = cir.alloca i32, cir.ptr , ["x", cinit] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} %1 = cir.cst(0 : i32) : i32 cir.store %1, %0 : i32, cir.ptr cir.scope { - %2 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} + %2 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} %3 = cir.cst(0 : i32) : i32 cir.store %3, %2 : i32, cir.ptr cir.loop for(cond : { @@ -36,7 +36,7 @@ cir.func @l0() { } } cir.scope { - %2 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} + %2 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} %3 = cir.cst(0 : i32) : i32 cir.store %3, %2 : i32, cir.ptr cir.loop while(cond : { @@ -64,7 +64,7 @@ cir.func @l0() { } cir.scope { - %2 = cir.alloca i32, cir.ptr , ["i", cinit] {alignment = 4 : i64} + %2 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} %3 = cir.cst(0 : i32) : i32 cir.store %3, %2 : i32, cir.ptr cir.loop dowhile(cond : { diff --git a/clang/test/CIR/IR/ptr_stride.cir b/clang/test/CIR/IR/ptr_stride.cir index 200e22ae1d52..84d0baa4ee2d 100644 --- a/clang/test/CIR/IR/ptr_stride.cir +++ b/clang/test/CIR/IR/ptr_stride.cir @@ -2,7 +2,7 @@ module { cir.func @arraysubscript(%arg0: i32) { - %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] + %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr %3 = cir.cst(0 : i32) : i32 @@ -12,7 +12,7 @@ module { } // CHECK: cir.func @arraysubscript(%arg0: i32) { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] // CHECK-NEXT: %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 diff --git a/clang/test/CIR/IR/types.cir b/clang/test/CIR/IR/types.cir index 182082b9ba82..f60d9c89acb6 100644 --- a/clang/test/CIR/IR/types.cir +++ b/clang/test/CIR/IR/types.cir @@ -2,10 +2,10 @@ module { cir.func @arrays() { - %0 = cir.alloca !cir.array, cir.ptr>, ["x", paraminit] + %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] cir.return } } // CHECK: cir.func @arrays() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", paraminit] +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 734c435dd0e1..49aca6a3768a 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -3,12 +3,12 @@ module { cir.func @sw1(%arg0: i32, %arg1: i32) { - %0 = cir.alloca i32, cir.ptr , ["a", paraminit] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["c", paraminit] {alignment = 4 : i64} + %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["c", init] {alignment = 4 : i64} cir.store %arg0, %0 : i32, cir.ptr cir.store %arg1, %1 : i32, cir.ptr cir.scope { - %2 = cir.alloca i32, cir.ptr , ["b", cinit] {alignment = 4 : i64} + %2 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} %3 = cir.cst(1 : i32) : i32 cir.store %3, %2 : i32, cir.ptr %4 = cir.load %0 : cir.ptr , i32 @@ -40,7 +40,7 @@ module { }, case (equal, 2 : i32) { cir.scope { - %5 = cir.alloca i32, cir.ptr , ["yolo", cinit] {alignment = 4 : i64} + %5 = cir.alloca i32, cir.ptr , ["yolo", init] {alignment = 4 : i64} %6 = cir.load %2 : cir.ptr , i32 %7 = cir.cst(1 : i32) : i32 %8 = cir.binop(add, %6, %7) : i32 @@ -123,7 +123,7 @@ module { // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 2 : i32) { // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %5 = cir.alloca i32, cir.ptr , ["yolo", cinit] {alignment = 4 : i64} +// CHECK-NEXT: %5 = cir.alloca i32, cir.ptr , ["yolo", init] {alignment = 4 : i64} // CHECK-NEXT: %6 = cir.load %2 : cir.ptr , i32 // CHECK-NEXT: %7 = cir.cst(1 : i32) : i32 // CHECK-NEXT: %8 = cir.binop(add, %6, %7) : i32 From 793bd5cb0c39932862829dbdbcf45368315dff5d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Oct 2022 23:26:12 -0700 Subject: [PATCH 0595/2301] [CIR] Fix tests from rebase where new names are used for memref.alloca values --- clang/test/CIR/CIRToLLVM/bool.cir | 2 +- clang/test/CIR/CIRToLLVM/memref.cir | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/test/CIR/CIRToLLVM/bool.cir b/clang/test/CIR/CIRToLLVM/bool.cir index a30fe73a1004..10300d027ef7 100644 --- a/clang/test/CIR/CIRToLLVM/bool.cir +++ b/clang/test/CIR/CIRToLLVM/bool.cir @@ -12,7 +12,7 @@ module { } // MLIR: func @foo() { -// MLIR: [[Value:%[0-9]+]] = memref.alloca() {alignment = 1 : i64} : memref +// MLIR: [[Value:%[a-z0-9]+]] = memref.alloca() {alignment = 1 : i64} : memref // MLIR: = arith.constant 1 : i8 // MLIR: memref.store {{.*}}, [[Value]][] : memref // return diff --git a/clang/test/CIR/CIRToLLVM/memref.cir b/clang/test/CIR/CIRToLLVM/memref.cir index 548dec008b94..bdf7409c5929 100644 --- a/clang/test/CIR/CIRToLLVM/memref.cir +++ b/clang/test/CIR/CIRToLLVM/memref.cir @@ -14,11 +14,11 @@ module { // MLIR: module { // MLIR-NEXT: func @foo() -> i32 { -// MLIR-NEXT: %0 = memref.alloca() {alignment = 4 : i64} : memref +// MLIR-NEXT: [[alloca:%[a-z0-9]+]] = memref.alloca() {alignment = 4 : i64} : memref // MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 -// MLIR-NEXT: memref.store %c1_i32, %0[] : memref -// MLIR-NEXT: %1 = memref.load %0[] : memref -// MLIR-NEXT: return %1 : i32 +// MLIR-NEXT: memref.store %c1_i32, [[alloca]][] : memref +// MLIR-NEXT: [[load:%[a-z0-9]+]] = memref.load [[alloca]][] : memref +// MLIR-NEXT: return [[load]] : i32 // MLIR-NEXT: } // MLIR-NEXT: } From 01c3a4339ad397f77f1a1347d8034302a2ac0dc8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 6 Oct 2022 16:28:41 -0700 Subject: [PATCH 0596/2301] [CIR] Add a flag to disable CIR emission for default cxx methods This allows us to test analysis without having to CIR codegen compiler generated code. --- clang/include/clang/Driver/Options.td | 4 ++++ clang/include/clang/Frontend/FrontendOptions.h | 8 ++++++-- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 3 ++- clang/lib/Frontend/CompilerInvocation.cpp | 3 +++ clang/test/CIR/CodeGen/assign-operator.cpp | 7 +++++++ 5 files changed, 22 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index e0c507e6ff7f..214bc5a0e604 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3065,6 +3065,10 @@ def clangir_disable_verifier : Flag<["-"], "clangir-disable-verifier">, MarshallingInfoFlag>; def flto_EQ_auto : Flag<["-"], "flto=auto">, Visibility<[ClangOption, FlangOption]>, Group, Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; +def clangir_disable_emit_cxx_default : Flag<["-"], "clangir-disable-emit-cxx-default">, + Visibility<[ClangOption, CC1Option]>, + HelpText<"ClangIR: Disable emission of c++ default (compiler implemented) methods.">, + MarshallingInfoFlag>; def clangir_verify_diagnostics : Flag<["-"], "clangir-verify-diagnostics">, Visibility<[ClangOption, CC1Option]>, HelpText<"ClangIR: Enable diagnostic verification in MLIR, similar to clang's -verify">, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index eb9516bc86e0..bbee41db39f3 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -424,6 +424,9 @@ class FrontendOptions { /// Disable Clang IR (CIR) verifier unsigned ClangIRDisableCIRVerifier : 1; + /// Disable ClangIR emission for CXX default (compiler generated methods). + unsigned ClangIRDisableEmitCXXDefault : 1; + /// Enable diagnostic verification for CIR unsigned ClangIRVerifyDiags : 1; @@ -620,8 +623,9 @@ class FrontendOptions { EmitSymbolGraphSymbolLabelsForTesting(false), EmitPrettySymbolGraphs(false), GenReducedBMI(false), UseClangIRPipeline(false), ClangIRDisablePasses(false), - ClangIRDisableCIRVerifier(false), ClangIRLifetimeCheck(false), - TimeTraceGranularity(500), TimeTraceVerbose(false) {} + ClangIRDisableCIRVerifier(false), ClangIRDisableEmitCXXDefault(false), + ClangIRLifetimeCheck(false), TimeTraceGranularity(500), + TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file /// extension. For example, "c" would return Language::C. diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index f56f07545888..26580eeb094f 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -217,7 +217,8 @@ class CIRGenConsumer : public clang::ASTConsumer { } // Emit remaining defaulted C++ methods - gen->buildDefaultMethods(); + if (!feOptions.ClangIRDisableEmitCXXDefault) + gen->buildDefaultMethods(); // FIXME: we cannot roundtrip prettyForm=true right now. mlir::OpPrintingFlags flags; diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 0e7ac7ae0ffc..0b757062ad76 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3096,6 +3096,9 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Args.hasArg(OPT_clangir_disable_verifier)) Opts.ClangIRDisableCIRVerifier = true; + if (Args.hasArg(OPT_clangir_disable_emit_cxx_default)) + Opts.ClangIRDisableEmitCXXDefault = true; + if (Args.hasArg(OPT_clangir_verify_diagnostics)) Opts.ClangIRVerifyDiags = true; diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 27ab5c7e6aec..5871aaed5db8 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -clangir-disable-emit-cxx-default %s -o - | FileCheck %s --check-prefix=DISABLE int strlen(char const *); @@ -24,6 +25,9 @@ struct String { // CHECK: cir.return // CHECK: } + // DISABLE: cir.func linkonce_odr @_ZN10StringViewC2ERK6String + // DISABLE-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // StringView::operator=(StringView&&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewaSEOS_ @@ -42,6 +46,9 @@ struct String { // CHECK: %8 = cir.load %2 : cir.ptr > // CHECK: cir.return %8 : !cir.ptr // CHECK: } + + // DISABLE: cir.func @_ZN10StringViewaSEOS_ + // DISABLE-NEXT: cir.func @main() }; struct StringView { From 9c0028776304a8f319889bcdb3a39606c84b4888 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 10 Oct 2022 21:14:47 -0400 Subject: [PATCH 0597/2301] [CIR][NFC] Reformat some files clang-formatting these files generates these non-related changes, so just move them out to keep them separate from functional diff changes. --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 - clang/lib/CIR/CodeGen/CIRGenModule.h | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 06ff096a6cfd..dfc5559c4e77 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -958,7 +958,6 @@ class CIRGenFunction { void buildAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer); - /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts. static bool IsWrappedCXXThis(const Expr *E); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index f7c377b5b548..8c233859c0cc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -33,6 +33,9 @@ #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/Value.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" using namespace clang; namespace cir { From adcfab35a317828b4cc60f18f02a4b53f7a043e4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 10 Oct 2022 21:12:05 -0400 Subject: [PATCH 0598/2301] [CIR] Add a hush of a subclass for mlir::OpBuilder for CIR The llvm::IRBuilder class has some regarding floating point behaviors that need to be traced for lowering from C++. The behavior largely mirrors that from the lang options, so it should exist at the highest level CIR as well. This patch just builds the hush and replaces the previous mlir::OpBuilder with the subclass. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 25 ++++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 +++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 +--- clang/lib/CIR/CodeGen/CIRGenModule.h | 5 +++-- 5 files changed, 33 insertions(+), 8 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenBuilder.h diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h new file mode 100644 index 000000000000..458bdff54809 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -0,0 +1,25 @@ +//===-- CIRGenBuilder.h - CIRBuilder implementation ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H +#define LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H + +#include "mlir/IR/Builders.h" + +namespace cir { + +class CIRGenFunction; + +class CIRGenBuilderTy : public mlir::OpBuilder { +public: + CIRGenBuilderTy(mlir::MLIRContext &C) : mlir::OpBuilder(&C) {} +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index fc14ce9343c6..f543947f6605 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -25,7 +25,7 @@ using namespace cir; using namespace clang; using namespace mlir::cir; -CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, mlir::OpBuilder &builder, +CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, bool suppressNewContext) : CGM{CGM}, builder(builder), CurFuncDecl(nullptr), SanOpts(CGM.getLangOpts().Sanitize), ShouldEmitLifetimeMarkers(false) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index dfc5559c4e77..f1eb83d333bb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H #define LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H +#include "CIRGenBuilder.h" #include "CIRGenCall.h" #include "CIRGenModule.h" #include "CIRGenValue.h" @@ -58,7 +59,7 @@ class CIRGenFunction { /// The builder is a helper class to create IR inside a function. The /// builder is stateful, in particular it keeps an "insertion point": this /// is where the next operations will be introduced. - mlir::OpBuilder &builder; + CIRGenBuilderTy &builder; /// ------- /// Goto @@ -409,7 +410,7 @@ class CIRGenFunction { return getEvaluationKind(T) == TEK_Aggregate; } - CIRGenFunction(CIRGenModule &CGM, mlir::OpBuilder &builder, + CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, bool suppressNewContext = false); CIRGenTypes &getTypes() const { return CGM.getTypes(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 70574944a3ad..5a881f06aed2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -69,11 +69,9 @@ using namespace mlir::cir; using namespace cir; using namespace clang; -using llvm::ArrayRef; using llvm::cast; using llvm::dyn_cast; using llvm::isa; -using llvm::ScopedHashTableScope; using llvm::SmallVector; using llvm::StringRef; @@ -92,7 +90,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &CGO, DiagnosticsEngine &Diags) - : builder(&context), astCtx(astctx), langOpts(astctx.getLangOpts()), + : builder(context), astCtx(astctx), langOpts(astctx.getLangOpts()), codeGenOpts(CGO), theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 8c233859c0cc..dd1b783d4da3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H #define LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H +#include "CIRGenBuilder.h" #include "CIRGenTypes.h" #include "CIRGenValue.h" #include "UnimplementedFeatureGuarding.h" @@ -70,7 +71,7 @@ class CIRGenModule { /// The builder is a helper class to create IR inside a function. The /// builder is stateful, in particular it keeps an "insertion point": this /// is where the next operations will be introduced. - mlir::OpBuilder builder; + CIRGenBuilderTy builder; /// Hold Clang AST information. clang::ASTContext &astCtx; @@ -114,7 +115,7 @@ class CIRGenModule { public: mlir::ModuleOp getModule() const { return theModule; } - mlir::OpBuilder &getBuilder() { return builder; } + CIRGenBuilderTy &getBuilder() { return builder; } clang::ASTContext &getASTContext() const { return astCtx; } const clang::TargetInfo &getTarget() const { return target; } const clang::CodeGenOptions &getCodeGenOpts() const { return codeGenOpts; } From c8c6a1c60d6b771ae871b40348ae61d20800675e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 10 Oct 2022 21:55:23 -0400 Subject: [PATCH 0599/2301] [CIR] Add FPEnv.{h,cpp} to begin support for floating point behaviors This is just a simple enum and some simple string tools that are only used to assert against garbage values, but a few more helpers will land here eventually. --- clang/include/clang/CIR/Dialect/IR/FPEnv.h | 50 +++++++++++++++++ clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 + clang/lib/CIR/Dialect/IR/FPEnv.cpp | 64 ++++++++++++++++++++++ 3 files changed, 115 insertions(+) create mode 100644 clang/include/clang/CIR/Dialect/IR/FPEnv.h create mode 100644 clang/lib/CIR/Dialect/IR/FPEnv.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/FPEnv.h b/clang/include/clang/CIR/Dialect/IR/FPEnv.h new file mode 100644 index 000000000000..aceba9ee57d0 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/FPEnv.h @@ -0,0 +1,50 @@ +//===- FPEnv.h ---- FP Environment ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// @file +/// This file contains the declarations of entities that describe floating +/// point environment and related functions. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CIR_DIALECT_IR_FPENV_H +#define CLANG_CIR_DIALECT_IR_FPENV_H + +#include "llvm/ADT/FloatingPointMode.h" + +#include + +namespace cir { + +namespace fp { + +/// Exception behavior used for floating point operations. +/// +/// Each of these values corresponds to some LLVMIR metadata argument value of a +/// constrained floating point intrinsic. See the LLVM Language Reference Manual +/// for details. +enum ExceptionBehavior : uint8_t { + ebIgnore, ///< This corresponds to "fpexcept.ignore". + ebMayTrap, ///< This corresponds to "fpexcept.maytrap". + ebStrict, ///< This corresponds to "fpexcept.strict". +}; + +} // namespace fp + +/// For any RoundingMode enumerator, returns a string valid as input in +/// constrained intrinsic rounding mode metadata. +std::optional convertRoundingModeToStr(llvm::RoundingMode); + +/// For any ExceptionBehavior enumerator, returns a string valid as input in +/// constrained intrinsic exception behavior metadata. +std::optional + convertExceptionBehaviorToStr(fp::ExceptionBehavior); + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 7bab60b4606f..62ccb7fe364c 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -2,6 +2,7 @@ add_clang_library(MLIRCIR CIRAttrs.cpp CIRDialect.cpp CIRTypes.cpp + FPEnv.cpp DEPENDS MLIRBuiltinLocationAttributesIncGen diff --git a/clang/lib/CIR/Dialect/IR/FPEnv.cpp b/clang/lib/CIR/Dialect/IR/FPEnv.cpp new file mode 100644 index 000000000000..01dfe1e92640 --- /dev/null +++ b/clang/lib/CIR/Dialect/IR/FPEnv.cpp @@ -0,0 +1,64 @@ +//===-- FPEnv.cpp ---- FP Environment -------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// @file +/// This file contains the implementations of entities that describe floating +/// point environment. +// +//===----------------------------------------------------------------------===// + +#include "clang/CIR/Dialect/IR/FPEnv.h" + +namespace cir { + +std::optional +convertRoundingModeToStr(llvm::RoundingMode UseRounding) { + std::optional RoundingStr; + switch (UseRounding) { + case llvm::RoundingMode::Dynamic: + RoundingStr = "round.dynamic"; + break; + case llvm::RoundingMode::NearestTiesToEven: + RoundingStr = "round.tonearest"; + break; + case llvm::RoundingMode::NearestTiesToAway: + RoundingStr = "round.tonearestaway"; + break; + case llvm::RoundingMode::TowardNegative: + RoundingStr = "round.downward"; + break; + case llvm::RoundingMode::TowardPositive: + RoundingStr = "round.upward"; + break; + case llvm::RoundingMode::TowardZero: + RoundingStr = "round.towardZero"; + break; + default: + break; + } + return RoundingStr; +} + +std::optional +convertExceptionBehaviorToStr(fp::ExceptionBehavior UseExcept) { + std::optional ExceptStr; + switch (UseExcept) { + case fp::ebStrict: + ExceptStr = "fpexcept.strict"; + break; + case fp::ebIgnore: + ExceptStr = "fpexcept.ignore"; + break; + case fp::ebMayTrap: + ExceptStr = "fpexcept.maytrap"; + break; + } + return ExceptStr; +} + +} // namespace cir From ec95bce7e22af5f3c838a480c54ef71a39d0e651 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 10 Oct 2022 21:58:23 -0400 Subject: [PATCH 0600/2301] [CIR] Begin support for fp constrained rounding and except Begin implementing the block that propagates floating point language options to the CIRBuilderTy. More support to come. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 26 ++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 28 +++++++++++++++++++++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 458bdff54809..dbf71a88080a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -9,15 +9,41 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H #define LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H +#include "clang/CIR/Dialect/IR/FPEnv.h" + #include "mlir/IR/Builders.h" +#include "llvm/ADT/FloatingPointMode.h" namespace cir { class CIRGenFunction; class CIRGenBuilderTy : public mlir::OpBuilder { + fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict; + llvm::RoundingMode DefaultConstrainedRounding = llvm::RoundingMode::Dynamic; + public: CIRGenBuilderTy(mlir::MLIRContext &C) : mlir::OpBuilder(&C) {} + + /// Set the exception handling to be used with constrained floating point + void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) { +#ifndef NDEBUG + std::optional ExceptStr = + convertExceptionBehaviorToStr(NewExcept); + assert(ExceptStr && "Garbage strict exception behavior!"); +#endif + DefaultConstrainedExcept = NewExcept; + } + + /// Set the rounding mode handling to be used with constrained floating point + void setDefaultConstrainedRounding(llvm::RoundingMode NewRounding) { +#ifndef NDEBUG + std::optional RoundingStr = + convertRoundingModeToStr(NewRounding); + assert(RoundingStr && "Garbage strict rounding mode!"); +#endif + DefaultConstrainedRounding = NewRounding; + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index f543947f6605..747274d0dd97 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -18,6 +18,7 @@ #include "clang/AST/ExprObjC.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/FPEnv.h" #include "mlir/Dialect/Func/IR/FuncOps.h" @@ -682,6 +683,22 @@ LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Operation *Op, return makeAddrLValue(Address(Op->getResult(0), Align), T, BaseInfo); } +// Map the LangOption for exception behavior into the corresponding enum in +// the IR. +cir::fp::ExceptionBehavior +ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) { + switch (Kind) { + case LangOptions::FPE_Ignore: + return cir::fp::ebIgnore; + case LangOptions::FPE_MayTrap: + return cir::fp::ebMayTrap; + case LangOptions::FPE_Strict: + return cir::fp::ebStrict; + default: + llvm_unreachable("Unsupported FP Exception Behavior"); + } +} + void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, mlir::cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, @@ -812,7 +829,16 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, (getLangOpts().CUDA && FD->hasAttr()))) ; // TODO: support norecurse attr - // TODO: rounding mode and strict floating point + llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode(); + cir::fp::ExceptionBehavior FPExceptionBehavior = + ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode()); + builder.setDefaultConstrainedRounding(RM); + builder.setDefaultConstrainedExcept(FPExceptionBehavior); + if ((FD && (FD->UsesFPIntrin() || FD->hasAttr())) || + (!FD && (FPExceptionBehavior != cir::fp::ebIgnore || + RM != llvm::RoundingMode::NearestTiesToEven))) { + llvm_unreachable("NYI"); + } // TODO: stackrealign attr From 15a1830d93de998be53e58781dd85d8e1f8e0061 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 10 Oct 2022 22:08:30 -0400 Subject: [PATCH 0601/2301] [CIR] Implement ScalarExprEmitter::VisitCastExpr for CK_FloatingCast Create an RAII type to restore floating point state and then pass the expression off to buildScalarConversion. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 10 +++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 13 +++++---- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 32 ++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 15 ++++++++++ 4 files changed, 64 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index dbf71a88080a..24fafe874377 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -44,6 +44,16 @@ class CIRGenBuilderTy : public mlir::OpBuilder { #endif DefaultConstrainedRounding = NewRounding; } + + /// Get the exception handling used with constrained floating point + fp::ExceptionBehavior getDefaultConstrainedExcept() { + return DefaultConstrainedExcept; + } + + /// Get the rounding mode handling used with constrained floating point + llvm::RoundingMode getDefaultConstrainedRounding() { + return DefaultConstrainedRounding; + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 01346867f836..23844f675cac 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -980,15 +980,16 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } case CK_IntegralToFloating: - llvm_unreachable("NYI"); case CK_FloatingToIntegral: - llvm_unreachable("NYI"); case CK_FloatingCast: - llvm_unreachable("NYI"); case CK_FixedPointToFloating: - llvm_unreachable("NYI"); - case CK_FloatingToFixedPoint: - llvm_unreachable("NYI"); + case CK_FloatingToFixedPoint: { + if (Kind != CK_FloatingCast) + llvm_unreachable("Only FloatingCast supported so far."); + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, CE); + return buildScalarConversion(Visit(E), E->getType(), DestTy, + CE->getExprLoc()); + } case CK_BooleanToSignedIntegral: llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 747274d0dd97..d69a30f5ccd1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1049,3 +1049,35 @@ clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl GD, return ResTy; } + +CIRGenFunction::CIRGenFPOptionsRAII::CIRGenFPOptionsRAII(CIRGenFunction &CGF, + const clang::Expr *E) + : CGF(CGF) { + ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts())); +} + +CIRGenFunction::CIRGenFPOptionsRAII::CIRGenFPOptionsRAII(CIRGenFunction &CGF, + FPOptions FPFeatures) + : CGF(CGF) { + ConstructorHelper(FPFeatures); +} + +void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper( + FPOptions FPFeatures) { + OldFPFeatures = CGF.CurFPFeatures; + CGF.CurFPFeatures = FPFeatures; + + OldExcept = CGF.builder.getDefaultConstrainedExcept(); + OldRounding = CGF.builder.getDefaultConstrainedRounding(); + + if (OldFPFeatures == FPFeatures) + return; + + llvm_unreachable("NYI"); +} + +CIRGenFunction::CIRGenFPOptionsRAII::~CIRGenFPOptionsRAII() { + CGF.CurFPFeatures = OldFPFeatures; + CGF.builder.setDefaultConstrainedExcept(OldExcept); + CGF.builder.setDefaultConstrainedRounding(OldRounding); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index f1eb83d333bb..5b0d483c6bb5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -527,6 +527,21 @@ class CIRGenFunction { } }; + class CIRGenFPOptionsRAII { + public: + CIRGenFPOptionsRAII(CIRGenFunction &CGF, FPOptions FPFeatures); + CIRGenFPOptionsRAII(CIRGenFunction &CGF, const clang::Expr *E); + ~CIRGenFPOptionsRAII(); + + private: + void ConstructorHelper(clang::FPOptions FPFeatures); + CIRGenFunction &CGF; + clang::FPOptions OldFPFeatures; + fp::ExceptionBehavior OldExcept; + llvm::RoundingMode OldRounding; + }; + clang::FPOptions CurFPFeatures; + RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation Loc); From 1989463b95e0514914c52b72b051b913c66b46be Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 10 Oct 2022 22:25:48 -0400 Subject: [PATCH 0602/2301] [CIR] Set CurFPFeatures at the CIRGenFunction ctor --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 4 +-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 32 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index d69a30f5ccd1..f1df333cb6c0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -28,8 +28,8 @@ using namespace mlir::cir; CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, bool suppressNewContext) - : CGM{CGM}, builder(builder), CurFuncDecl(nullptr), - SanOpts(CGM.getLangOpts().Sanitize), ShouldEmitLifetimeMarkers(false) { + : CGM{CGM}, builder(builder), SanOpts(CGM.getLangOpts().Sanitize), + CurFPFeatures(CGM.getLangOpts()), ShouldEmitLifetimeMarkers(false) { if (!suppressNewContext) CGM.getCXXABI().getMangleContext().startNewFunction(); // TODO(CIR): EHStack.setCGF(this); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 5b0d483c6bb5..1ddcde4f5623 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -343,7 +343,7 @@ class CIRGenFunction { Address CXXDefaultInitExprThis = Address::invalid(); // CurFuncDecl - Holds the Decl for the current outermost non-closure context - const clang::Decl *CurFuncDecl; + const clang::Decl *CurFuncDecl = nullptr; /// CurCodeDecl - This is the inner-most code context, which includes blocks. const clang::Decl *CurCodeDecl; const CIRGenFunctionInfo *CurFnInfo; @@ -367,6 +367,21 @@ class CIRGenFunction { /// Sanitizers enabled for this function. clang::SanitizerSet SanOpts; + class CIRGenFPOptionsRAII { + public: + CIRGenFPOptionsRAII(CIRGenFunction &CGF, FPOptions FPFeatures); + CIRGenFPOptionsRAII(CIRGenFunction &CGF, const clang::Expr *E); + ~CIRGenFPOptionsRAII(); + + private: + void ConstructorHelper(clang::FPOptions FPFeatures); + CIRGenFunction &CGF; + clang::FPOptions OldFPFeatures; + fp::ExceptionBehavior OldExcept; + llvm::RoundingMode OldRounding; + }; + clang::FPOptions CurFPFeatures; + /// The symbol table maps a variable name to a value in the current scope. /// Entering a function creates a new scope, and the function arguments are /// added to the mapping. When the processing of a function is terminated, @@ -527,21 +542,6 @@ class CIRGenFunction { } }; - class CIRGenFPOptionsRAII { - public: - CIRGenFPOptionsRAII(CIRGenFunction &CGF, FPOptions FPFeatures); - CIRGenFPOptionsRAII(CIRGenFunction &CGF, const clang::Expr *E); - ~CIRGenFPOptionsRAII(); - - private: - void ConstructorHelper(clang::FPOptions FPFeatures); - CIRGenFunction &CGF; - clang::FPOptions OldFPFeatures; - fp::ExceptionBehavior OldExcept; - llvm::RoundingMode OldRounding; - }; - clang::FPOptions CurFPFeatures; - RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation Loc); From e87ecdb2d49fed7d8bb916f95bf9b4e5798170cf Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 7 Oct 2022 16:55:08 -0700 Subject: [PATCH 0603/2301] [CIR][CodeGen] Visit CXXDefaultArgExpr for expr scalar emission Note that the default argument is not actually handled just yet, but this gets us unblocked for looking into the prototype. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 5 +++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 +++++ clang/test/CIR/CodeGen/cxx-default-arg.cpp | 12 ++++++++++++ 3 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/cxx-default-arg.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 23844f675cac..4b520948f4cc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -365,8 +365,9 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitSourceLocExpr(SourceLocExpr *E) { llvm_unreachable("NYI"); } - mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { - llvm_unreachable("NYI"); + mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { + CIRGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); + return Visit(DAE->getExpr()); } mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { CIRGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 1ddcde4f5623..fe070ef2e53e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -883,6 +883,11 @@ class CIRGenFunction { SourceLocExprScopeGuard SourceLocScope; }; + struct CXXDefaultArgExprScope : SourceLocExprScopeGuard { + CXXDefaultArgExprScope(CIRGenFunction &CGF, const CXXDefaultArgExpr *E) + : SourceLocExprScopeGuard(E, CGF.CurSourceLocExprScope) {} + }; + LValue MakeNaturalAlignPointeeAddrLValue(mlir::Operation *Op, clang::QualType T); diff --git a/clang/test/CIR/CodeGen/cxx-default-arg.cpp b/clang/test/CIR/CodeGen/cxx-default-arg.cpp new file mode 100644 index 000000000000..f637119a8475 --- /dev/null +++ b/clang/test/CIR/CodeGen/cxx-default-arg.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// CHECK: cir.func @_ZN12MyIntPointerC1EPi + +struct MyIntPointer { + MyIntPointer(int *p = nullptr); +}; + +void foo() { + MyIntPointer p; +} \ No newline at end of file From fd88dc472aa641bb17dbdddadaa90de5265beefe Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 7 Oct 2022 17:53:00 -0700 Subject: [PATCH 0604/2301] [CIR] Add ASTRecordDeclAttr to be used on tracking class/structs --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.h | 1 + .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 21 +++++++++++++++++++ 3 files changed, 23 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h index a5792b6438aa..5b9c43ba7fe9 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -26,6 +26,7 @@ namespace clang { class FunctionDecl; class VarDecl; +class RecordDecl; } #define GET_ATTRDEF_CLASSES diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 56e7491187df..f5f69ce3fb2f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -106,5 +106,6 @@ class ASTDecl traits = []> def ASTFunctionDeclAttr : ASTDecl<"FunctionDecl", "function.decl">; def ASTVarDeclAttr : ASTDecl<"VarDecl", "var.decl">; +def ASTRecordDeclAttr : ASTDecl<"RecordDecl", "record.decl">; #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 0e61b02c6d9b..86e64b11f750 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1507,6 +1507,27 @@ LogicalResult ASTVarDeclAttr::verify( return success(); } +::mlir::Attribute ASTRecordDeclAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { + // We cannot really parse anything AST related at this point + // since we have no serialization/JSON story. + return mlir::Attribute(); +} + +void ASTRecordDeclAttr::print(::mlir::AsmPrinter &printer) const { + // Nothing to print besides the mnemonics. +} + +LogicalResult ASTRecordDeclAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + const ::clang::RecordDecl *decl) { + if (!decl) { + emitError() << "expected non-null AST declaration"; + return failure(); + } + return success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// From 8b40993be375c0f362179236d05a34fac26a6795 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 7 Oct 2022 18:43:04 -0700 Subject: [PATCH 0605/2301] [CIR] Add optional RecordDecl node to CIR_StructType --- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 6 ++++++ clang/include/clang/CIR/Dialect/IR/CIRTypes.td | 5 +++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 4d0a3d77bc62..26423651077a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -20,6 +20,12 @@ // CIR Dialect Types //===----------------------------------------------------------------------===// +namespace mlir { +namespace cir { +class ASTRecordDeclAttr; +} // namespace cir +} // namespace mlir + #define GET_TYPEDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsTypes.h.inc" diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 1af173e235e2..09358c95cda4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -77,7 +77,8 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { let parameters = (ins ArrayRefParameter<"mlir::Type", "members">:$members, - "mlir::StringAttr":$typeName + "mlir::StringAttr":$typeName, + "std::optional<::mlir::cir::ASTRecordDeclAttr>":$ast ); let builders = [ @@ -85,7 +86,7 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { "ArrayRef":$members, "StringRef":$typeName ), [{ auto id = mlir::StringAttr::get(context, typeName); - return StructType::get(context, members, id); + return StructType::get(context, members, id, std::nullopt); }]> ]; From b93b8653c694cba3270e2e91cdbe9890864f8da9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 10 Oct 2022 15:50:19 -0400 Subject: [PATCH 0606/2301] [CIR][NFC] Clean up unnecessary tablegen/header inclusion of CIRTypes --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.h | 6 ++++++ clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 2 -- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h index 5b9c43ba7fe9..be0340b3b827 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -29,6 +29,12 @@ class VarDecl; class RecordDecl; } +namespace mlir { +namespace cir { +class ArrayType; +} // namespace cir +} // namespace mlir + #define GET_ATTRDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsAttributes.h.inc" diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index f5f69ce3fb2f..66532763cbb2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -14,9 +14,7 @@ #define MLIR_CIR_DIALECT_CIR_ATTRS include "mlir/IR/BuiltinAttributeInterfaces.td" - include "clang/CIR/Dialect/IR/CIRDialect.td" -include "clang/CIR/Dialect/IR/CIRTypes.td" //===----------------------------------------------------------------------===// // CIR Attrs From d554b5c8954d01c87a2bb700d37199acd630c4dd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 10 Oct 2022 15:57:15 -0400 Subject: [PATCH 0607/2301] [CIR][CodeGen] Teach computeRecordLayout to embedd RecordDecl into new StructType types Note that there are no tests for this since we currently cannot read/write ast nodes to disk as part of CIR. --- clang/include/clang/CIR/Dialect/IR/CIRTypes.td | 10 ++++++++++ clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 9 ++++++--- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 4 ++++ clang/lib/CIR/Dialect/Transforms/DropAST.cpp | 7 ++++++- 4 files changed, 26 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 09358c95cda4..ee24a155d163 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -91,6 +91,16 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { ]; let hasCustomAssemblyFormat = 1; + + let extraClassDeclaration = [{ + void dropAst(); + }]; + + let extraClassDefinition = [{ + void $cppClass::dropAst() { + getImpl()->ast = std::nullopt; + } + }]; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 48c3e0b6c04f..9b8974a99eca 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -5,6 +5,7 @@ #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/RecordLayout.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "llvm/IR/DataLayout.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" @@ -214,7 +215,8 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, auto baseIdentifier = mlir::StringAttr::get(&getMLIRContext(), name + ".base"); BaseTy = mlir::cir::StructType::get( - &getMLIRContext(), baseBuilder.fieldTypes, baseIdentifier); + &getMLIRContext(), baseBuilder.fieldTypes, baseIdentifier, + mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work // on both of them with the same index. assert(builder.isPacked == baseBuilder.isPacked && @@ -222,8 +224,9 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, } } - Ty = mlir::cir::StructType::get(&getMLIRContext(), builder.fieldTypes, - identifier); + Ty = mlir::cir::StructType::get( + &getMLIRContext(), builder.fieldTypes, identifier, + mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); auto RL = std::make_unique( Ty, BaseTy, (bool)builder.IsZeroInitializable, diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index a28cb6efdb34..aeb43bfba5d4 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -88,6 +88,10 @@ Type StructType::parse(mlir::AsmParser &parser) { void StructType::print(mlir::AsmPrinter &printer) const { printer << '<' << getTypeName() << ", "; llvm::interleaveComma(getMembers(), printer); + if (getAst()) { + printer << ", "; + printer.printAttributeWithoutType(*getAst()); + } printer << '>'; } diff --git a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp index 553206a2ef62..528fce68dfc9 100644 --- a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp +++ b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp @@ -32,7 +32,12 @@ void DropASTPass::runOnOperation() { // carrying AST around. op->walk([&](Operation *op) { if (isa(op)) { - cast(op).removeAstAttr(); + auto alloca = cast(op); + alloca.removeAstAttr(); + auto ty = alloca.getAllocaType().dyn_cast(); + if (!ty) + return; + ty.dropAst(); } }); } From d756004e3604b425aaacde785718964f475a8b96 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 10 Oct 2022 19:06:50 -0400 Subject: [PATCH 0608/2301] [CIR][Lifetime] Add skeleton for attribute based detection of Owner/Pointer type categories --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 116 ++++++++++++++++-- 1 file changed, 108 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 86a8f761d781..9576ced0f472 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -9,6 +9,7 @@ #include "PassDetail.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" @@ -539,23 +540,122 @@ void LifetimeCheckPass::checkIf(IfOp ifOp) { joinPmaps(pmapOps); } +template bool isStructAndHasAttr(mlir::Type ty) { + if (!ty.isa()) + return false; + auto sTy = ty.cast(); + auto recordDecl = sTy.getAst()->getAstDecl(); + if (recordDecl->hasAttr()) + return true; + return false; +} + +static bool isOwnerType(mlir::Type ty) { + // From 2.1: + // + // An Owner uniquely owns another object (cannot dangle). An Owner type is + // expressed using the annotation [[gsl::Owner(DerefType)]] where DerefType is + // the owned type (and (DerefType) may be omitted and deduced as below). For + // example: + // + // template class [[gsl::Owner(T)]] my_unique_smart_pointer; + // + // TODO: The following standard or other types are treated as-if annotated as + // Owners, if not otherwise annotated and if not SharedOwners: + // + // - Every type that satisfies the standard Container requirements and has a + // user-provided destructor. (Example: vector.) DerefType is ::value_type. + // - Every type that provides unary * and has a user-provided destructor. + // (Example: unique_ptr.) DerefType is the ref-unqualified return type of + // operator*. + // - Every type that has a data member or public base class of an Owner type. + // Additionally, for convenient adoption without modifying existing standard + // library headers, the following well known standard types are treated as-if + // annotated as Owners: stack, queue, priority_queue, optional, variant, any, + // and regex. + return isStructAndHasAttr(ty); +} + +static bool isPointerType(AllocaOp allocaOp) { + // From 2.1: + // + // A Pointer is not an Owner and provides indirect access to an object it does + // not own (can dangle). A Pointer type is expressed using the annotation + // [[gsl::Pointer(DerefType)]] where DerefType is the pointed-to type (and + // (Dereftype) may be omitted and deduced as below). For example: + // + // template class [[gsl::Pointer(T)]] my_span; + // + // TODO: The following standard or other types are treated as-if annotated as + // Pointer, if not otherwise annotated and if not Owners: + // + // - Every type that satisfies the standard Iterator requirements. (Example: + // regex_iterator.) DerefType is the ref-unqualified return type of operator*. + // - Every type that satisfies the Ranges TS Range concept. (Example: + // basic_string_view.) DerefType is the ref-unqualified type of *begin(). + // - Every type that satisfies the following concept. DerefType is the + // ref-unqualified return type of operator*. + // + // template concept + // TriviallyCopyableAndNonOwningAndDereferenceable = + // std::is_trivially_copyable_v && std::is_copy_constructible_v && + // std::is_copy_assignable_v && requires(T t) { *t; }; + // + // - Every closure type of a lambda that captures by reference or captures a + // Pointer by value. DerefType is void. + // - Every type that has a data member or public base class of a Pointer type. + // Additionally, for convenient adoption without modifying existing standard + // library headers, the following well- known standard types are treated as-if + // annotated as Pointers, in addition to raw pointers and references: ref- + // erence_wrapper, and vector::reference. + if (allocaOp.isPointerType()) + return true; + return isStructAndHasAttr(allocaOp.getAllocaType()); +} + void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { auto addr = allocaOp.getAddr(); assert(!getPmap().count(addr) && "only one alloca for any given address"); - getPmap()[addr] = {}; - if (!allocaOp.isPointerType()) { + + enum TypeCategory { + Unknown = 0, + SharedOwner = 1, + Owner = 1 << 2, + Pointer = 1 << 3, + Indirection = 1 << 4, + Aggregate = 1 << 5, + Value = 1 << 6, + }; + + auto localStyle = [&]() { + if (isPointerType(allocaOp)) + return TypeCategory::Pointer; + if (isOwnerType(allocaOp.getAllocaType())) + return TypeCategory::Owner; + return TypeCategory::Value; + }(); + + switch (localStyle) { + case TypeCategory::Pointer: + // 2.4.2 - When a non-parameter non-member Pointer p is declared, add + // (p, {invalid}) to pmap. + ptrs.insert(addr); + getPmap()[addr].insert(State::getInvalid()); + pmapInvalidHist[addr] = std::make_pair(allocaOp.getLoc(), std::nullopt); + break; + case TypeCategory::Owner: + llvm_unreachable("NYI"); + break; + case TypeCategory::Value: { // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. getPmap()[addr].insert(State::getLocalValue(addr)); currScope->localValues.push_back(addr); return; } - - // 2.4.2 - When a non-parameter non-member Pointer p is declared, add - // (p, {invalid}) to pmap. - ptrs.insert(addr); - getPmap()[addr].insert(State::getInvalid()); - pmapInvalidHist[addr] = std::make_pair(allocaOp.getLoc(), std::nullopt); + default: + llvm_unreachable("NYI"); + } // If other styles of initialization gets added, required to add support // here. From 13244311db8eeffd5fa64b3ca87d367192ea1305 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 10 Oct 2022 23:37:44 -0400 Subject: [PATCH 0609/2301] [CIR] Prevent crashes by making sure only actual ops are being checked on store's --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 9576ced0f472..1ed799f28fb4 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -683,9 +683,15 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { }; auto data = storeOp.getValue(); + auto defOp = data.getDefiningOp(); + + // Do not handle block arguments just yet. + if (!defOp) + return; + // 2.4.2 - If the declaration includes an initialization, the // initialization is treated as a separate operation - if (auto cstOp = dyn_cast(data.getDefiningOp())) { + if (auto cstOp = dyn_cast(defOp)) { assert(cstOp.isNullPtr() && "not implemented"); assert(getPmap().count(addr) && "address should always be valid"); // 2.4.2 - If the initialization is default initialization or zero @@ -700,14 +706,14 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { return; } - if (auto allocaOp = dyn_cast(data.getDefiningOp())) { + if (auto allocaOp = dyn_cast(defOp)) { // p = &x; getPmap()[addr].clear(); getPmap()[addr].insert(State::getLocalValue(data)); return; } - if (auto ptrStrideOp = dyn_cast(data.getDefiningOp())) { + if (auto ptrStrideOp = dyn_cast(defOp)) { // p = &a[0]; auto array = getArrayFromSubscript(ptrStrideOp); if (array) { From 341bd12f09b9e22b7e4268fe8815ff8a76f77221 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 11 Oct 2022 00:16:45 -0400 Subject: [PATCH 0610/2301] [CIR][CodeGen] Hook up FunctionDecl to cir.func codegen --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 11 +++++++---- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 ++- clang/lib/CIR/Dialect/Transforms/DropAST.cpp | 7 +++++-- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 5a881f06aed2..eb92e44b5f9c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1441,9 +1441,10 @@ bool CIRGenModule::lookupRepresentativeDecl(StringRef MangledName, return true; } -mlir::cir::FuncOp CIRGenModule::createCIRFunction(mlir::Location loc, - StringRef name, - mlir::FunctionType Ty) { +mlir::cir::FuncOp +CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, + mlir::FunctionType Ty, + const clang::FunctionDecl *FD) { // At the point we need to create the function, the insertion point // could be anywhere (e.g. callsite). Do not rely on whatever it might // be, properly save, find the appropriate place and restore. @@ -1460,6 +1461,8 @@ mlir::cir::FuncOp CIRGenModule::createCIRFunction(mlir::Location loc, builder.setInsertionPoint(curCGF->CurFn.getOperation()); f = builder.create(loc, name, Ty); + f.setAstAttr(mlir::cir::ASTFunctionDeclAttr::get(builder.getContext(), FD)); + assert(f.isDeclaration() && "expected empty body"); // A declaration gets private visibility by default, but external linkage @@ -1567,7 +1570,7 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( auto fnLoc = getLoc(FD->getSourceRange()); // TODO: CodeGen includeds the linkage (ExternalLinkage) and only passes the // mangledname if Entry is nullptr - auto F = createCIRFunction(fnLoc, MangledName, FTy); + auto F = createCIRFunction(fnLoc, MangledName, FTy, FD); if (Entry) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index dd1b783d4da3..456433776895 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -398,7 +398,8 @@ class CIRGenModule { // Effectively create the CIR instruction, properly handling insertion // points. mlir::cir::FuncOp createCIRFunction(mlir::Location loc, StringRef name, - mlir::FunctionType Ty); + mlir::FunctionType Ty, + const clang::FunctionDecl *FD); // An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector MangledDeclNames; diff --git a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp index 528fce68dfc9..b72e7a686788 100644 --- a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp +++ b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp @@ -31,14 +31,17 @@ void DropASTPass::runOnOperation() { // This needs to be updated with operations that start // carrying AST around. op->walk([&](Operation *op) { - if (isa(op)) { - auto alloca = cast(op); + if (auto alloca = dyn_cast(op)) { alloca.removeAstAttr(); auto ty = alloca.getAllocaType().dyn_cast(); if (!ty) return; ty.dropAst(); + return; } + + if (auto funcOp = dyn_cast(op)) + funcOp.removeAstAttr(); }); } From 639205d908ba23f6f2c14f285dff2f6256fd1319 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 11 Oct 2022 14:32:08 -0700 Subject: [PATCH 0611/2301] [CIR][Lifetime] Start tracking Owner types Detects owner types using methods introduced in past commits, initialize accordingly. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 1ed799f28fb4..1498b3acad35 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -89,14 +89,19 @@ struct LifetimeCheckPass : public LifetimeCheckBase { Invalid, NullPtr, Global, + // FIXME: currently only supports one level of OwnedBy! + OwnedBy, LocalValue, NumKindsMinusOne = LocalValue }; State() { val.setInt(Invalid); } State(DataTy d) { val.setInt(d); } - State(mlir::Value v) { val.setPointerAndInt(v, LocalValue); } + State(mlir::Value v, DataTy d = LocalValue) { + assert((d == LocalValue || d == OwnedBy) && "expected value or owned"); + val.setPointerAndInt(v, LocalValue); + } - static constexpr int KindBits = 2; + static constexpr int KindBits = 3; static_assert((1 << KindBits) > NumKindsMinusOne, "Not enough room for kind!"); llvm::PointerIntPair val; @@ -123,6 +128,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { static State getInvalid() { return {}; } static State getNullPtr() { return {NullPtr}; } static State getLocalValue(mlir::Value v) { return {v}; } + static State getOwnedBy(mlir::Value v) { return {v, State::OwnedBy}; } }; using PSetType = llvm::SmallSet; @@ -138,8 +144,12 @@ struct LifetimeCheckPass : public LifetimeCheckBase { llvm::DenseMap>; PMapNullHistType pmapNullHist; + // Local pointers SmallPtrSet ptrs; + // Local owners + SmallPtrSet owners; + // Represents the scope context for IR operations (cir.scope, cir.if, // then/else regions, etc). Tracks the declaration of variables in the current // local scope. @@ -645,7 +655,9 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { pmapInvalidHist[addr] = std::make_pair(allocaOp.getLoc(), std::nullopt); break; case TypeCategory::Owner: - llvm_unreachable("NYI"); + // 2.4.2 - When a local Owner x is declared, add (x, {x__1'}) to pmap. + owners.insert(addr); + getPmap()[addr].insert(State::getOwnedBy(addr)); break; case TypeCategory::Value: { // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. From b380f4b767c8d933ce68b7b1c1564deb4cd59a77 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 11 Oct 2022 14:33:43 -0700 Subject: [PATCH 0612/2301] [CIR][Lifetime] Add initial handling for cir.call This is where we gonna catch ctors, special members, etc w.r.t to changing psets, for now filter out uninteresting ctors and rely on clang::CXXMethodDecl and clang::CXXRecordDecl AST nodes for some queries. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 1498b3acad35..252261d7aec4 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -10,6 +10,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" +#include "clang/AST/DeclCXX.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" @@ -37,6 +38,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkAlloca(AllocaOp op); void checkStore(StoreOp op); void checkLoad(LoadOp op); + void checkCall(CallOp callOp); struct Options { enum : unsigned { @@ -217,7 +219,10 @@ struct LifetimeCheckPass : public LifetimeCheckBase { PMapType *currPmap = nullptr; PMapType &getPmap() { return *currPmap; } + ModuleOp theModule; + std::optional astCtx; + void setASTContext(clang::ASTContext *c) { astCtx = c; } void joinPmaps(SmallVectorImpl &pmaps); @@ -800,8 +805,55 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { emitPsetRemark(); } +const clang::CXXMethodDecl *getMethod(ModuleOp mod, StringRef name) { + auto global = mlir::SymbolTable::lookupSymbolIn(mod, name); + assert(global && "expected to find symbol"); + auto method = dyn_cast(global); + if (!method) + return nullptr; + return dyn_cast(method.getAstAttr().getAstDecl()); +} + +void LifetimeCheckPass::checkCall(CallOp callOp) { + if (callOp.getNumOperands() == 0) + return; + + auto methodDecl = getMethod(theModule, callOp.getCallee()); + if (!methodDecl) + return; + + // TODO: only ctor init implemented, assign ops and others needed. + auto ctor = dyn_cast(methodDecl); + if (!ctor) + return; + + // First argument passed is always the alloca for the 'this' ptr. + auto addr = callOp.getOperand(0); + auto allocaOp = dyn_cast_or_null(addr.getDefiningOp()); + + // Not interested in block/function arguments or other source ops for now + // and Owners don't have interesting initialization. + if (!allocaOp || owners.count(addr)) + return; + + // TODO: + // 2.4.2 if the initialization is default initialization or zero + // initialization, example: + // + // int* p{}; + // string_view p; + // + // both results in pset(p) == {null} + // + // FIXME: Implementation is simple, but only do it once we add the + // relevant testcase. Explode here since this a pretty vital one. + if (ctor->isDefaultConstructor()) + llvm_unreachable("NYI"); +} + void LifetimeCheckPass::checkOperation(Operation *op) { if (isa<::mlir::ModuleOp>(op)) { + theModule = cast<::mlir::ModuleOp>(op); for (Region ®ion : op->getRegions()) checkRegion(region); return; @@ -839,6 +891,8 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return checkStore(storeOp); if (auto loadOp = dyn_cast(op)) return checkLoad(loadOp); + if (auto callOp = dyn_cast(op)) + return checkCall(callOp); } void LifetimeCheckPass::runOnOperation() { From d059c2284f2f100086ea6e0d365afa68746abc94 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 12 Oct 2022 14:43:04 -0700 Subject: [PATCH 0613/2301] [CIR][Lifetime][NFC] Split ctor handling into its own method --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 30 +++++++++++-------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 252261d7aec4..8052de61b984 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -40,6 +40,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkLoad(LoadOp op); void checkCall(CallOp callOp); + void checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor); + struct Options { enum : unsigned { None = 0, @@ -814,19 +816,8 @@ const clang::CXXMethodDecl *getMethod(ModuleOp mod, StringRef name) { return dyn_cast(method.getAstAttr().getAstDecl()); } -void LifetimeCheckPass::checkCall(CallOp callOp) { - if (callOp.getNumOperands() == 0) - return; - - auto methodDecl = getMethod(theModule, callOp.getCallee()); - if (!methodDecl) - return; - - // TODO: only ctor init implemented, assign ops and others needed. - auto ctor = dyn_cast(methodDecl); - if (!ctor) - return; - +void LifetimeCheckPass::checkCtor(CallOp callOp, + const clang::CXXConstructorDecl *ctor) { // First argument passed is always the alloca for the 'this' ptr. auto addr = callOp.getOperand(0); auto allocaOp = dyn_cast_or_null(addr.getDefiningOp()); @@ -851,6 +842,19 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { llvm_unreachable("NYI"); } +void LifetimeCheckPass::checkCall(CallOp callOp) { + if (callOp.getNumOperands() == 0) + return; + + auto methodDecl = getMethod(theModule, callOp.getCallee()); + if (!methodDecl) + return; + + // TODO: only ctor init implemented, assign ops and others needed. + if (auto ctor = dyn_cast(methodDecl)) + return checkCtor(callOp, ctor); +} + void LifetimeCheckPass::checkOperation(Operation *op) { if (isa<::mlir::ModuleOp>(op)) { theModule = cast<::mlir::ModuleOp>(op); From 7ca839cc25dbc3aff3738eb892e463e430792ae9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 12 Oct 2022 15:06:13 -0700 Subject: [PATCH 0614/2301] [CIR][Lifetime] Default ctors calls on init should mark pset with null --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 8052de61b984..ad0fd98790cb 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -827,7 +827,7 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, if (!allocaOp || owners.count(addr)) return; - // TODO: + // TODO: zero init // 2.4.2 if the initialization is default initialization or zero // initialization, example: // @@ -835,11 +835,11 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, // string_view p; // // both results in pset(p) == {null} - // - // FIXME: Implementation is simple, but only do it once we add the - // relevant testcase. Explode here since this a pretty vital one. - if (ctor->isDefaultConstructor()) - llvm_unreachable("NYI"); + if (ctor->isDefaultConstructor()) { + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getNullPtr()); + pmapNullHist[addr] = callOp.getLoc(); + } } void LifetimeCheckPass::checkCall(CallOp callOp) { From 716ff8208d2a2fd627479e3ab635cd2baa676088 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 12 Oct 2022 21:59:42 -0700 Subject: [PATCH 0615/2301] [CIR][Lifetime][NFC] Add empty checkMoveAssignment helper --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index ad0fd98790cb..83d187e01dc0 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -41,6 +41,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkCall(CallOp callOp); void checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor); + void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); struct Options { enum : unsigned { @@ -816,6 +817,12 @@ const clang::CXXMethodDecl *getMethod(ModuleOp mod, StringRef name) { return dyn_cast(method.getAstAttr().getAstDecl()); } +void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, + const clang::CXXMethodDecl *m) { + // auto srcObj = callOp.getOperand(0); + llvm_unreachable("NYI"); +} + void LifetimeCheckPass::checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor) { // First argument passed is always the alloca for the 'this' ptr. @@ -853,6 +860,8 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // TODO: only ctor init implemented, assign ops and others needed. if (auto ctor = dyn_cast(methodDecl)) return checkCtor(callOp, ctor); + if (methodDecl->isMoveAssignmentOperator()) + return checkMoveAssignment(callOp, methodDecl); } void LifetimeCheckPass::checkOperation(Operation *op) { From 0655e78a6bcf70d0655f26f32ce92b44fdab5920 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 13 Oct 2022 23:50:22 -0700 Subject: [PATCH 0616/2301] [CIR][CodeGen] Do not generate an extra temporary during materialization --- clang/lib/CIR/CodeGen/CIRGenCall.h | 8 +++++++ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 27 ++++++++++++---------- clang/test/CIR/CodeGen/assign-operator.cpp | 10 ++++---- 3 files changed, 27 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 4e8543765536..00fd52ad626f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -174,6 +174,14 @@ class CallArgList : public llvm::SmallVector { void add(RValue rvalue, clang::QualType type) { push_back(CallArg(rvalue, type)); } + + /// Add all the arguments from another CallArgList to this one. After doing + /// this, the old CallArgList retains its list of arguments, but must not + /// be used to emit a call. + void addFrom(const CallArgList &other) { + insert(end(), other.begin(), other.end()); + // TODO: Writebacks, CleanupsToDeactivate, StackBase??? + } }; /// FunctionArgList - Type for representing both the decl and type of parameters diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index b53df4fa2a8d..b2677f2fa16d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -55,7 +55,10 @@ commonBuildCXXMemberOrOperatorCall(CIRGenFunction &CGF, const CXXMethodDecl *MD, // Add the rest of the call args if (RtlArgs) { - llvm_unreachable("NYI"); + // Special case: if the caller emitted the arguments right-to-left already + // (prior to emitting the *this argument), we're done. This happens for + // assignment operators. + Args.addFrom(*RtlArgs); } else if (CE) { // Special case: skip first argument of CXXOperatorCall (it is "this"). unsigned ArgsToSkip = isa(CE) ? 1 : 0; @@ -117,15 +120,15 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( LValue TrivialAssignmentRHS; if (auto *OCE = dyn_cast(CE)) { if (OCE->isAssignmentOp()) { - if (TrivialAssignment) { - TrivialAssignmentRHS = buildLValue(CE->getArg(1)); - } else { - assert(0 && "remove me once there's a testcase to cover this"); - RtlArgs = &RtlArgStorage; - buildCallArgs(*RtlArgs, MD->getType()->castAs(), - drop_begin(CE->arguments(), 1), CE->getDirectCallee(), - /*ParamsToSkip*/ 0, EvaluationOrder::ForceRightToLeft); - } + // See further note on TrivialAssignment, we don't handle this during + // codegen, differently than LLVM, which early optimizes like this: + // if (TrivialAssignment) { + // TrivialAssignmentRHS = buildLValue(CE->getArg(1)); + // } else { + RtlArgs = &RtlArgStorage; + buildCallArgs(*RtlArgs, MD->getType()->castAs(), + drop_begin(CE->arguments(), 1), CE->getDirectCallee(), + /*ParamsToSkip*/ 0, EvaluationOrder::ForceRightToLeft); } } @@ -145,14 +148,14 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( return RValue::get(nullptr); if (TrivialAssignment) { + // From LLVM codegen: // We don't like to generate the trivial copy/move assignment operator // when it isn't necessary; just produce the proper effect here. // It's important that we use the result of EmitLValue here rather than // emitting call arguments, in order to preserve TBAA information from // the RHS. // - // TODO(cir): once there are testcases evaluate if CIR needs to abstract - // this away or optimizing is fine. + // We don't early optimize like LLVM does: // LValue RHS = isa(CE) ? TrivialAssignmentRHS // : // buildLValue(*CE->arg_begin()); diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 5871aaed5db8..8b788416b426 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -73,13 +73,11 @@ int main() { // CHECK: cir.scope { // CHECK: %3 = cir.alloca !_22struct2EString22, cir.ptr , ["s"] {alignment = 8 : i64} // CHECK: %4 = cir.alloca !_22struct2EStringView22, cir.ptr , ["ref.tmp"] {alignment = 8 : i64} -// CHECK: %5 = cir.alloca !_22struct2EStringView22, cir.ptr , ["ref.tmp"] {alignment = 8 : i64} -// CHECK: %6 = cir.get_global @".str" : cir.ptr > -// CHECK: %7 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @_ZN6StringC2EPKc(%3, %7) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %5 = cir.get_global @".str" : cir.ptr > +// CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_ZN6StringC2EPKc(%3, %6) : (!cir.ptr, !cir.ptr) -> () // CHECK: cir.call @_ZN10StringViewC2ERK6String(%4, %3) : (!cir.ptr, !cir.ptr) -> () -// CHECK: cir.call @_ZN10StringViewC2ERK6String(%5, %3) : (!cir.ptr, !cir.ptr) -> () -// CHECK: %8 = cir.call @_ZN10StringViewaSEOS_(%1, %5) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %4) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } // CHECK: %2 = cir.load %0 : cir.ptr , i32 // CHECK: cir.return %2 : i32 From 9d1f817e8462305dba4c84a798a144cd23e55e77 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 14 Oct 2022 15:52:36 -0700 Subject: [PATCH 0617/2301] [CIR][Lifetime] Add handling for copy ctors and move assignment This is more prep work for when operator* gets introduced, the we'll be actually able to trigger a lifetime issue and write proper testing. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 56 +++++++++++++++---- 1 file changed, 45 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 83d187e01dc0..33a027ec98d1 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -819,21 +819,24 @@ const clang::CXXMethodDecl *getMethod(ModuleOp mod, StringRef name) { void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m) { - // auto srcObj = callOp.getOperand(0); - llvm_unreachable("NYI"); + // MyIntPointer::operator=(MyIntPointer&&)(%dst, %src) + auto dst = callOp.getOperand(0); + auto src = callOp.getOperand(1); + + // Currently only handle move assignments between pointer categories. + if (!(ptrs.count(dst) && ptrs.count(src))) + return; + + // Note that the current pattern here usually comes from a xvalue in src + // where all the initialization is done, and this move assignment is + // where we finally materialize it back to the original pointer category. + // TODO: should CIR ops retain xvalue information somehow? + getPmap()[dst] = getPmap()[src]; + getPmap()[src].clear(); // TODO: should we add null to 'src' pset? } void LifetimeCheckPass::checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor) { - // First argument passed is always the alloca for the 'this' ptr. - auto addr = callOp.getOperand(0); - auto allocaOp = dyn_cast_or_null(addr.getDefiningOp()); - - // Not interested in block/function arguments or other source ops for now - // and Owners don't have interesting initialization. - if (!allocaOp || owners.count(addr)) - return; - // TODO: zero init // 2.4.2 if the initialization is default initialization or zero // initialization, example: @@ -843,9 +846,40 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, // // both results in pset(p) == {null} if (ctor->isDefaultConstructor()) { + // First argument passed is always the alloca for the 'this' ptr. + auto addr = callOp.getOperand(0); + + // Currently two possible actions: + // 1. Skip Owner category initialization. + // 2. Initialize Pointer categories. + if (owners.count(addr)) + return; + + if (!ptrs.count(addr)) + return; + + // Not interested in block/function arguments or any indirect + // provided alloca address. + if (!dyn_cast_or_null(addr.getDefiningOp())) + return; + getPmap()[addr].clear(); getPmap()[addr].insert(State::getNullPtr()); pmapNullHist[addr] = callOp.getLoc(); + return; + } + + // Copy ctor call that initializes a pointer type from an owner + // Example: + // MyIntPointer::MyIntPointer(MyIntOwner const&)(%5, %4) + if (ctor->isCopyConstructor()) { + auto addr = callOp.getOperand(0); + auto owner = callOp.getOperand(1); + + if (ptrs.count(addr) && owners.count(owner)) { + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getOwnedBy(owner)); + } } } From fdfa04131c73206d90e6dfa8f14e91b46081ab61 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 14 Oct 2022 16:14:26 -0700 Subject: [PATCH 0618/2301] [CIR][Lifetime][NFC] Split checkLoad into checkPointerDeref --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 33a027ec98d1..a35e77d4dfbc 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -40,6 +40,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkLoad(LoadOp op); void checkCall(CallOp callOp); + void checkPointerDeref(mlir::Value addr, mlir::Location loc); + void checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor); void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); @@ -757,6 +759,11 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { if (!loadOp.getIsDeref()) return; + checkPointerDeref(addr, loadOp.getLoc()); +} + +void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, + mlir::Location loc) { bool hasInvalid = getPmap()[addr].count(State::getInvalid()); bool hasNullptr = getPmap()[addr].count(State::getNullPtr()); @@ -764,7 +771,7 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { llvm::SmallString<128> psetStr; llvm::raw_svector_ostream Out(psetStr); printPset(getPmap()[addr], Out); - emitRemark(loadOp.getLoc()) << "pset => " << Out.str(); + emitRemark(loc) << "pset => " << Out.str(); }; bool psetRemarkEmitted = false; @@ -780,7 +787,7 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { // Looks like we found a bad path leading to this deference point, // diagnose it. StringRef varName = getVarNameFromValue(addr); - auto D = emitWarning(loadOp.getLoc()); + auto D = emitWarning(loc); D << "use of invalid pointer '" << varName << "'"; if (hasInvalid && opts.emitHistoryInvalid()) { From 83cda8129b6086f5d2e6ddd42fd45d8120568abe Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 14 Oct 2022 16:02:13 -0700 Subject: [PATCH 0619/2301] [CIR][Lifetime] Add support for ::operator* to check dereferences We finally can diagnose some of Owner/Pointer categories as of this change, but still unreliable since the proper KILL logic hasn't been implemented, this should come next with a final testcase that wraps testing for all previous building parts. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index a35e77d4dfbc..89861cb1467d 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -44,6 +44,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor); void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); + void checkOperatorStar(CallOp callOp); struct Options { enum : unsigned { @@ -668,6 +669,7 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { // 2.4.2 - When a local Owner x is declared, add (x, {x__1'}) to pmap. owners.insert(addr); getPmap()[addr].insert(State::getOwnedBy(addr)); + currScope->localValues.push_back(addr); break; case TypeCategory::Value: { // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. @@ -890,6 +892,20 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, } } +static bool isOperatorStar(const clang::CXXMethodDecl *m) { + if (!m->isOverloadedOperator()) + return false; + return m->getOverloadedOperator() == clang::OverloadedOperatorKind::OO_Star; +} + +void LifetimeCheckPass::checkOperatorStar(CallOp callOp) { + auto addr = callOp.getOperand(0); + if (!ptrs.count(addr)) + return; + + checkPointerDeref(addr, callOp.getLoc()); +} + void LifetimeCheckPass::checkCall(CallOp callOp) { if (callOp.getNumOperands() == 0) return; @@ -903,6 +919,8 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { return checkCtor(callOp, ctor); if (methodDecl->isMoveAssignmentOperator()) return checkMoveAssignment(callOp, methodDecl); + if (isOperatorStar(methodDecl)) + return checkOperatorStar(callOp); } void LifetimeCheckPass::checkOperation(Operation *op) { From 0a384c84281725111b7497f85ad91279560b245a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 17 Oct 2022 18:22:21 -0400 Subject: [PATCH 0620/2301] [CIR] Temporary tip of tree fix for NoSideEffect -> Pure --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 32eaddca24a7..f4fc6231553b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -553,7 +553,7 @@ def UnaryOpKind : I32EnumAttr< let cppNamespace = "::mlir::cir"; } -// FIXME: NoSideEffect won't work when we add overloading. +// FIXME: Pure won't work when we add overloading. def UnaryOp : CIR_Op<"unary", [Pure, SameOperandsAndResultType]> { let summary = "Unary operations"; let description = [{ From 097c591a2abcddea8044787732e516f2c996727c Mon Sep 17 00:00:00 2001 From: Caio Oliveira Date: Sun, 16 Oct 2022 15:02:38 -0700 Subject: [PATCH 0621/2301] [CIR] Implement more of CheckAggExprForMemSetUse() Implement another early return so the empty lambda also bail for this optimization -- in particular not performing it for small types. Fixes llvm/clangir#6. --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 5 +++++ clang/lib/CIR/CodeGen/CIRGenValue.h | 8 ++++++++ clang/test/CIR/CodeGen/lambda.cpp | 5 ++--- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 0231ba7e5df8..6c34d1e776e1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -299,6 +299,11 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, return; } + // If the type is 16-bytes or smaller, prefer individual stores over memset. + CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType()); + if (Size <= CharUnits::fromQuantity(16)) + return; + llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 9e3a344cc12f..499b55e238c6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -364,6 +364,14 @@ class AggValueSlot { bool isSanitizerChecked() const { return SanitizerCheckedFlag; } IsZeroed_t isZeroed() const { return IsZeroed_t(ZeroedFlag); } + + /// Get the preferred size to use when storing a value to this slot. This + /// is the type size unless that might overlap another object, in which + /// case it's the dsize. + clang::CharUnits getPreferredSize(clang::ASTContext &Ctx, clang::QualType Type) { + return mayOverlap() ? Ctx.getTypeInfoDataSizeInChars(Type).Width + : Ctx.getTypeSizeInChars(Type); + } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 91e72fe892be..d1ec3e4fc8a3 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -1,12 +1,11 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * void fn() { auto a = [](){}; } -// CHECK: !22class2Eanon22 = !cir.struct<"class.anon", i8> +// CHECK: !_22class2Eanon22 = !cir.struct<"class.anon", i8> // CHECK-NEXT: module // CHECK-NEXT: cir.func @_Z2fnv() -// CHECK-NEXT: %0 = cir.alloca !22class2Eanon22, cir.ptr , ["a"] +// CHECK-NEXT: %0 = cir.alloca !_22class2Eanon22, cir.ptr , ["a"] From 5f586be54218ff754e9e7f8de51cfa734873c107 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 17 Oct 2022 19:15:26 -0400 Subject: [PATCH 0622/2301] [CIR] Add clangAST as a library requirement for MLIRCIRTransforms --- clang/lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 61ff272d3cac..3a9e96715740 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -7,6 +7,7 @@ add_clang_library(MLIRCIRTransforms MLIRCIRPassIncGen LINK_LIBS PUBLIC + clangAST MLIRAnalysis MLIRIR From 370d7942f7e0e284414b359c68fa62d2f4f66072 Mon Sep 17 00:00:00 2001 From: Caio Oliveira Date: Sat, 15 Oct 2022 18:10:49 -0700 Subject: [PATCH 0623/2301] [CIR] Add Plus and Minus to Unary --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 12 ++++++++---- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 3 +++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 ++++++ 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index f4fc6231553b..e0a5bca25f75 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -542,14 +542,18 @@ def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods; -def UnaryOpKind_Dec : I32EnumAttrCase<"Dec", 2, "dec">; +def UnaryOpKind_Inc : I32EnumAttrCase<"Inc", 1, "inc">; +def UnaryOpKind_Dec : I32EnumAttrCase<"Dec", 2, "dec">; +def UnaryOpKind_Plus : I32EnumAttrCase<"Plus", 3, "plus">; +def UnaryOpKind_Minus : I32EnumAttrCase<"Minus", 4, "minus">; def UnaryOpKind : I32EnumAttr< "UnaryOpKind", "unary operation kind", [UnaryOpKind_Inc, - UnaryOpKind_Dec]> { + UnaryOpKind_Dec, + UnaryOpKind_Plus, + UnaryOpKind_Minus]> { let cppNamespace = "::mlir::cir"; } @@ -558,7 +562,7 @@ def UnaryOp : CIR_Op<"unary", [Pure, SameOperandsAndResultType]> { let summary = "Unary operations"; let description = [{ `cir.unary` performs the unary operation according to - the specified opcode kind: [inc, dec]. + the specified opcode kind: [inc, dec, plus, minus]. Note for inc and dec: the operation corresponds only to the addition/subtraction, its input is expect to come from a load diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index c69aba717743..61715fb4ccaa 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -236,6 +236,9 @@ class CIRUnaryOpLowering : public mlir::OpRewritePattern { assert(type.isa() && "operand type not supported yet"); switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Plus: + case mlir::cir::UnaryOpKind::Minus: + llvm_unreachable("NYI"); case mlir::cir::UnaryOpKind::Inc: { auto One = rewriter.create( op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 86e64b11f750..115ba4f94b13 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1331,6 +1331,12 @@ LogicalResult UnaryOp::verify() { return emitOpError() << "requires result to be used by a memory store " "to the same address as the input memory load"; } + case cir::UnaryOpKind::Plus: + // Nothing to verify. + return success(); + case cir::UnaryOpKind::Minus: + // Nothing to verify. + return success(); } llvm_unreachable("Unknown UnaryOp kind?"); From 1eea6e36946976198bca634c7fb92d226e3728e7 Mon Sep 17 00:00:00 2001 From: Caio Oliveira Date: Sat, 15 Oct 2022 18:11:04 -0700 Subject: [PATCH 0624/2301] [CIR][CodeGen] Add basic support for unary plus/minus --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 47 +++++++++++++++++++++- clang/test/CIR/CodeGen/unary.cpp | 26 ++++++++++++ 2 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/unary.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 4b520948f4cc..7a77e7f31447 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -334,11 +334,44 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitUnaryPlus(const UnaryOperator *E) { - llvm_unreachable("NYI"); + // NOTE(cir): QualType function parameter still not used, so don´t replicate + // it here yet. + QualType promotionTy = getPromotionType(E->getSubExpr()->getType()); + auto result = VisitPlus(E, promotionTy); + if (result && !promotionTy.isNull()) + assert(0 && "not implemented yet"); + return buildUnaryOp(E, mlir::cir::UnaryOpKind::Plus, result); + } + + mlir::Value VisitPlus(const UnaryOperator *E, QualType PromotionType) { + // This differs from gcc, though, most likely due to a bug in gcc. + TestAndClearIgnoreResultAssign(); + if (!PromotionType.isNull()) + assert(0 && "scalar promotion not implemented yet"); + return Visit(E->getSubExpr()); } + mlir::Value VisitUnaryMinus(const UnaryOperator *E) { - llvm_unreachable("NYI"); + // NOTE(cir): QualType function parameter still not used, so don´t replicate + // it here yet. + QualType promotionTy = getPromotionType(E->getSubExpr()->getType()); + auto result = VisitMinus(E, promotionTy); + if (result && !promotionTy.isNull()) + assert(0 && "not implemented yet"); + return buildUnaryOp(E, mlir::cir::UnaryOpKind::Minus, result); } + + mlir::Value VisitMinus(const UnaryOperator *E, QualType PromotionType) { + TestAndClearIgnoreResultAssign(); + if (!PromotionType.isNull()) + assert(0 && "scalar promotion not implemented yet"); + + // NOTE: LLVM codegen will lower this directly to either a FNeg + // or a Sub instruction. In CIR this will be handled later in LowerToLLVM. + + return Visit(E->getSubExpr()); + } + mlir::Value VisitUnaryNot(const UnaryOperator *E) { llvm_unreachable("NYI"); } mlir::Value VisitUnaryLNot(const UnaryOperator *E) { llvm_unreachable("NYI"); @@ -592,6 +625,16 @@ class ScalarExprEmitter : public StmtVisitor { buildCompoundAssign(const CompoundAssignOperator *E, mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &)); + // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM codegen. + QualType getPromotionType(QualType Ty) { + if (auto *CT = Ty->getAs()) { + llvm_unreachable("NYI"); + } + if (Ty.UseExcessPrecision(CGF.getContext())) + llvm_unreachable("NYI"); + return QualType(); + } + // Binary operators and binary compound assignment operators. #define HANDLEBINOP(OP) \ mlir::Value VisitBin##OP(const BinaryOperator *E) { \ diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp new file mode 100644 index 000000000000..fc4d888d74ad --- /dev/null +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +unsigned up0() { + unsigned a = 1; + return +a; +} + +// CHECK: cir.func @_Z3up0v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#OUTPUT:]] = cir.unary(plus, %[[#INPUT]]) +// CHECK: cir.store %[[#OUTPUT]], %[[#RET]] + +unsigned um0() { + unsigned a = 1; + return -a; +} + +// CHECK: cir.func @_Z3um0v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#OUTPUT:]] = cir.unary(minus, %[[#INPUT]]) +// CHECK: cir.store %[[#OUTPUT]], %[[#RET]] From 97fffd04878d01ace1e5c6d5e403c13a9c9e0f7f Mon Sep 17 00:00:00 2001 From: Caio Oliveira Date: Sat, 15 Oct 2022 18:11:15 -0700 Subject: [PATCH 0625/2301] [CIR][CodeGen] Integer lowering of unary plus and minus --- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 13 ++++++-- clang/test/CIR/CIRToLLVM/unary-plus-minus.cir | 30 +++++++++++++++++++ 2 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CIRToLLVM/unary-plus-minus.cir diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index 61715fb4ccaa..bec0dd05a2a3 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -236,8 +236,6 @@ class CIRUnaryOpLowering : public mlir::OpRewritePattern { assert(type.isa() && "operand type not supported yet"); switch (op.getKind()) { - case mlir::cir::UnaryOpKind::Plus: - case mlir::cir::UnaryOpKind::Minus: llvm_unreachable("NYI"); case mlir::cir::UnaryOpKind::Inc: { auto One = rewriter.create( @@ -253,6 +251,17 @@ class CIRUnaryOpLowering : public mlir::OpRewritePattern { op.getInput(), One); break; } + case mlir::cir::UnaryOpKind::Plus: { + rewriter.replaceOp(op, op.getInput()); + break; + } + case mlir::cir::UnaryOpKind::Minus: { + auto Zero = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, 0)); + rewriter.replaceOpWithNewOp(op, op.getType(), Zero, + op.getInput()); + break; + } } return mlir::LogicalResult::success(); diff --git a/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir b/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir new file mode 100644 index 000000000000..7277df90d2c5 --- /dev/null +++ b/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir @@ -0,0 +1,30 @@ +// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// XFAIL: * + +module { + cir.func @foo() { + %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.cst(2 : i32) : i32 + cir.store %2, %0 : i32, cir.ptr + cir.store %2, %1 : i32, cir.ptr + + %3 = cir.load %0 : cir.ptr , i32 + %4 = cir.unary(plus, %3) : i32, i32 + cir.store %4, %0 : i32, cir.ptr + + %5 = cir.load %1 : cir.ptr , i32 + %6 = cir.unary(minus, %5) : i32, i32 + cir.store %6, %1 : i32, cir.ptr + cir.return + } +} + +// MLIR: %[[#INPUT_PLUS:]] = memref.load +// MLIR: memref.store %[[#INPUT_PLUS]] +// MLIR: %[[#INPUT_MINUS:]] = memref.load +// MLIR: %[[ZERO:[a-z0-9_]+]] = arith.constant 0 +// MLIR: arith.subi %[[ZERO]], %[[#INPUT_MINUS]] + +// LLVM: = sub i32 0, %[[#]] From 1d39fcc5b995a04f90bddf86221b64dc26487a4a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Oct 2022 12:46:17 -0700 Subject: [PATCH 0626/2301] [CIR][Lifetime][NFC] Add more debugging facilities and account for owned types --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 89861cb1467d..3506502b2d23 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -173,7 +173,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Track all local values added in this scope llvm::SmallVector localValues; - void dumpLocalValues(); + LLVM_DUMP_METHOD void dumpLocalValues(); }; class LexicalScopeGuard { @@ -233,7 +233,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void joinPmaps(SmallVectorImpl &pmaps); void printPset(PSetType &pset, llvm::raw_ostream &OS = llvm::errs()); - void dumpPmap(PMapType &pmap); + LLVM_DUMP_METHOD void dumpPmap(PMapType &pmap); + LLVM_DUMP_METHOD void dumpCurrentPmap(); }; } // namespace @@ -1013,6 +1014,11 @@ void LifetimeCheckPass::State::dump(llvm::raw_ostream &OS) { case LocalValue: OS << getVarNameFromValue(val.getPointer()); break; + case OwnedBy: + OS << getVarNameFromValue(val.getPointer()) << "'"; + break; + default: + llvm_unreachable("Not handled"); } } @@ -1028,6 +1034,8 @@ void LifetimeCheckPass::printPset(PSetType &pset, llvm::raw_ostream &OS) { OS << " }"; } +void LifetimeCheckPass::dumpCurrentPmap() { dumpPmap(*currPmap); } + void LifetimeCheckPass::dumpPmap(PMapType &pmap) { llvm::errs() << "pmap {\n"; int entry = 0; From ff88e38c1820f1f4b72b21c1c366fd9c507b0bf3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Oct 2022 12:47:02 -0700 Subject: [PATCH 0627/2301] [CIR][Lifetime] Fix think'o and properly handle ctor init to ptr from owner --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 43 +++++++++++++++---- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 3506502b2d23..8e2ffddfe8e1 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -46,6 +46,10 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); void checkOperatorStar(CallOp callOp); + // Helpers + bool isCtorInitFromOwner(CallOp callOp, + const clang::CXXConstructorDecl *ctor); + struct Options { enum : unsigned { None = 0, @@ -106,7 +110,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { State(DataTy d) { val.setInt(d); } State(mlir::Value v, DataTy d = LocalValue) { assert((d == LocalValue || d == OwnedBy) && "expected value or owned"); - val.setPointerAndInt(v, LocalValue); + val.setPointerAndInt(v, d); } static constexpr int KindBits = 3; @@ -845,6 +849,27 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, getPmap()[src].clear(); // TODO: should we add null to 'src' pset? } +// User defined ctors that initialize from owner types is one +// way of tracking owned pointers. +// +// Example: +// MyIntPointer::MyIntPointer(MyIntOwner const&)(%5, %4) +// +bool LifetimeCheckPass::isCtorInitFromOwner( + CallOp callOp, const clang::CXXConstructorDecl *ctor) { + if (callOp.getNumOperands() < 2) + return false; + + // FIXME: should we scan all arguments past first to look for an owner? + auto addr = callOp.getOperand(0); + auto owner = callOp.getOperand(1); + + if (ptrs.count(addr) && owners.count(owner)) + return true; + + return false; +} + void LifetimeCheckPass::checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor) { // TODO: zero init @@ -879,17 +904,17 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, return; } - // Copy ctor call that initializes a pointer type from an owner - // Example: - // MyIntPointer::MyIntPointer(MyIntOwner const&)(%5, %4) + // User defined copy ctor calls ... if (ctor->isCopyConstructor()) { + llvm_unreachable("NYI"); + } + + if (isCtorInitFromOwner(callOp, ctor)) { auto addr = callOp.getOperand(0); auto owner = callOp.getOperand(1); - - if (ptrs.count(addr) && owners.count(owner)) { - getPmap()[addr].clear(); - getPmap()[addr].insert(State::getOwnedBy(owner)); - } + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getOwnedBy(owner)); + return; } } From 63a0e84ce107dded629ad5489ffbb747f3c3e365 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Oct 2022 14:28:06 -0700 Subject: [PATCH 0628/2301] [CIR][Lifetime] Detect dangling pointers on Owner types First example now works and we can successfully diagnose a basic dangling ref to owner type. - Handle kills for references owners o', but kill(o') not yet implemented, should be done together with its own use/test case. - Make sure invalidating moved-from adds proper state. - Add testcase. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 43 +++++++++++++------ .../CIR/Transforms/lifetime-check-owner.cpp | 26 +++++++++++ 2 files changed, 57 insertions(+), 12 deletions(-) create mode 100644 clang/test/CIR/Transforms/lifetime-check-owner.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 8e2ffddfe8e1..3fe7f3ba0287 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -297,18 +297,34 @@ void LifetimeCheckPass::LexicalScopeGuard::cleanup() { if (pointee == ptr) continue; - // If the local value is part of this pset, it means - // we need to invalidate it, otherwise keep searching. - // FIXME: add support for x', x'', etc... + // If the local value is part of this pset, it means we need to invalidate + // it, otherwise keep searching. Note that this assumes a current pset + // cannot have multiple entries for values and owned values at the same + // time, for example this should not be possible: pset(s) = {o, o'}. auto &pset = mapEntry.second; - State valState = State::getLocalValue(pointee); - if (!pset.contains(valState)) - continue; - - // Erase the reference and mark this invalid. - // FIXME: add a way to just mutate the state. - pset.erase(valState); - pset.insert(State::getInvalid()); + auto killValueInPset = [&](mlir::Value v) { + State valState = State::getLocalValue(v); + if (pset.contains(valState)) { + // Erase the reference and mark this invalid. + // FIXME: add a way to just mutate the state. + pset.erase(valState); + pset.insert(State::getInvalid()); + return; + } + + if (Pass.owners.count(v)) { + valState = State::getOwnedBy(v); + if (pset.contains(valState)) { + pset.erase(valState); + pset.insert(State::getInvalid()); + return; + } + // TODO: o'', ... + } + }; + + // KILL(x) for a particular pset. + killValueInPset(pointee); Pass.pmapInvalidHist[ptr] = std::make_pair(getEndLocForHist(*Pass.currScope), pointee); } @@ -846,7 +862,10 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, // where we finally materialize it back to the original pointer category. // TODO: should CIR ops retain xvalue information somehow? getPmap()[dst] = getPmap()[src]; - getPmap()[src].clear(); // TODO: should we add null to 'src' pset? + // TODO: should this be null? or should we swap dst/src pset state? + // For now just consider moved-from state as invalid. + getPmap()[src].clear(); + getPmap()[src].insert(State::getInvalid()); } // User defined ctors that initialize from owner types is one diff --git a/clang/test/CIR/Transforms/lifetime-check-owner.cpp b/clang/test/CIR/Transforms/lifetime-check-owner.cpp new file mode 100644 index 000000000000..7c61c37e6b50 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-owner.cpp @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +struct [[gsl::Owner(int)]] MyIntOwner { + int val; + MyIntOwner(int v) : val(v) {} + int &operator*(); +}; + +struct [[gsl::Pointer(int)]] MyIntPointer { + int *ptr; + MyIntPointer(int *p = nullptr) : ptr(p) {} + MyIntPointer(const MyIntOwner &); + int &operator*(); + MyIntOwner toOwner(); +}; + +void yolo() { + MyIntPointer p; + { + MyIntOwner o(1); + p = o; + *p = 3; // expected-remark {{pset => { o' }}} + } // expected-note {{pointee 'o' invalidated at end of scope}} + *p = 4; // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} +} \ No newline at end of file From c6669bafa633c651d653d900d5484e1e1e746693 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Oct 2022 16:25:30 -0700 Subject: [PATCH 0629/2301] [CIR][CodeGen][NFC] Delete duplicated past'o for IsWrappedCXXThis --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 29 +++----------------------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 --- 2 files changed, 3 insertions(+), 29 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 396d33cec200..8d9535e178e2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1116,7 +1116,7 @@ LValue CIRGenFunction::buildCheckedLValue(const Expr *E, TypeCheckKind TCK) { if (!isa(E) && !LV.isBitField() && LV.isSimple()) { SanitizerSet SkippedChecks; if (const auto *ME = dyn_cast(E)) { - bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase()); + bool IsBaseCXXThis = isWrappedCXXThis(ME->getBase()); if (IsBaseCXXThis) SkippedChecks.set(SanitizerKind::Alignment, true); if (IsBaseCXXThis || isa(ME->getBase())) @@ -1129,7 +1129,7 @@ LValue CIRGenFunction::buildCheckedLValue(const Expr *E, TypeCheckKind TCK) { } // TODO(cir): candidate for common AST helper for LLVM and CIR codegen -bool CIRGenFunction::IsWrappedCXXThis(const Expr *Obj) { +bool CIRGenFunction::isWrappedCXXThis(const Expr *Obj) { const Expr *Base = Obj; while (!isa(Base)) { // The result of a dynamic_cast can be null. @@ -1167,7 +1167,7 @@ LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { Address Addr = buildPointerWithAlignment(BaseExpr, &BaseInfo); QualType PtrTy = BaseExpr->getType()->getPointeeType(); SanitizerSet SkippedChecks; - bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr); + bool IsBaseCXXThis = isWrappedCXXThis(BaseExpr); if (IsBaseCXXThis) SkippedChecks.set(SanitizerKind::Alignment, true); if (IsBaseCXXThis || isa(BaseExpr)) @@ -1633,29 +1633,6 @@ RValue CIRGenFunction::buildCXXMemberCallExpr(const CXXMemberCallExpr *CE, CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base); } -bool CIRGenFunction::isWrappedCXXThis(const Expr *object) { - const Expr *base = object; - while (!isa(base)) { - // The result of a dynamic_cast can be null. - if (isa(base)) - return false; - - if (const auto *ce = dyn_cast(base)) { - (void)ce; - llvm_unreachable("NYI"); - } else if (const auto *pe = dyn_cast(base)) { - (void)pe; - llvm_unreachable("NYI"); - } else if (const auto *uo = dyn_cast(base)) { - (void)uo; - llvm_unreachable("NYI"); - } else { - return false; - } - } - return true; -} - RValue CIRGenFunction::buildReferenceBindingToExpr(const Expr *E) { // Emit the expression as an lvalue. LValue LV = buildLValue(E); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index fe070ef2e53e..797f3249b77f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -979,9 +979,6 @@ class CIRGenFunction { void buildAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer); - /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts. - static bool IsWrappedCXXThis(const Expr *E); - LValue buildCheckedLValue(const Expr *E, TypeCheckKind TCK); LValue buildMemberExpr(const MemberExpr *E); From 21094f9be9160d470d77af2966aebbb81f5b2173 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Oct 2022 16:32:53 -0700 Subject: [PATCH 0630/2301] [CIR][CodeGen] Implement ScalarExpr emission for VisitUnaryDeref While here also properly honor void casts. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 10 +++++++--- clang/test/CIR/CodeGen/unary-deref.cpp | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/unary-deref.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 7a77e7f31447..2b3a1f06eb8a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -331,7 +331,9 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitUnaryDeref(const UnaryOperator *E) { - llvm_unreachable("NYI"); + if (E->getType()->isVoidType()) + return Visit(E->getSubExpr()); // the actual value should be unused + return buildLoadOfLValue(E); } mlir::Value VisitUnaryPlus(const UnaryOperator *E) { // NOTE(cir): QualType function parameter still not used, so don´t replicate @@ -998,8 +1000,10 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { llvm_unreachable("NYI"); case CK_PointerToIntegral: llvm_unreachable("NYI"); - case CK_ToVoid: - llvm_unreachable("NYI"); + case CK_ToVoid: { + CGF.buildIgnoredExpr(E); + return nullptr; + } case CK_MatrixCast: llvm_unreachable("NYI"); case CK_VectorSplat: diff --git a/clang/test/CIR/CodeGen/unary-deref.cpp b/clang/test/CIR/CodeGen/unary-deref.cpp new file mode 100644 index 000000000000..9e41f5225fac --- /dev/null +++ b/clang/test/CIR/CodeGen/unary-deref.cpp @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +struct MyIntPointer { + int *ptr = nullptr; + int read() const { return *ptr; } +}; + +void foo() { + MyIntPointer p; + (void)p.read(); +} + +// CHECK: cir.func linkonce_odr @_ZNK12MyIntPointer4readEv +// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "ptr"}> +// CHECK: %4 = cir.load deref %3 : cir.ptr > +// CHECK: %5 = cir.load %4 From 6c006f04c651c97406d0a9e33ce8b8d369c4b239 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Oct 2022 23:30:42 -0700 Subject: [PATCH 0631/2301] [CIR][LifetimeCheck][NFC] Add detection for non-const use of owners Don't apply the kill just yet, but only the detection --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 3fe7f3ba0287..ebeccf77d59d 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -49,6 +49,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Helpers bool isCtorInitFromOwner(CallOp callOp, const clang::CXXConstructorDecl *ctor); + bool isNonConstUseOfOwner(CallOp callOp, const clang::CXXMethodDecl *m); struct Options { enum : unsigned { @@ -951,6 +952,16 @@ void LifetimeCheckPass::checkOperatorStar(CallOp callOp) { checkPointerDeref(addr, callOp.getLoc()); } +bool LifetimeCheckPass::isNonConstUseOfOwner(CallOp callOp, + const clang::CXXMethodDecl *m) { + if (m->isConst()) + return false; + auto addr = callOp.getOperand(0); + if (owners.count(addr)) + return true; + return false; +} + void LifetimeCheckPass::checkCall(CallOp callOp) { if (callOp.getNumOperands() == 0) return; @@ -966,6 +977,15 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { return checkMoveAssignment(callOp, methodDecl); if (isOperatorStar(methodDecl)) return checkOperatorStar(callOp); + + // For any other methods... + + // Non-const member call to a Owner invalidates any of its users. + if (isNonConstUseOfOwner(callOp, methodDecl)) { + // auto addr = callOp.getOperand(0); + // TODO: kill(a') + llvm_unreachable("NYI"); + } } void LifetimeCheckPass::checkOperation(Operation *op) { From b59ed30bb1081929dd862fd424845cc2e75eecc9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 18 Oct 2022 17:39:58 -0700 Subject: [PATCH 0632/2301] [CIR][Lifetime][NFC] Split scope cleanup into proper kill functions --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 100 ++++++++++-------- 1 file changed, 53 insertions(+), 47 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index ebeccf77d59d..12f2010f3f3d 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -157,6 +157,11 @@ struct LifetimeCheckPass : public LifetimeCheckBase { llvm::DenseMap>; PMapNullHistType pmapNullHist; + // Provides p1179's 'KILL' functionality. See implementation for more + // information. + void kill(mlir::Value v); + void killInPset(PSetType &pset, mlir::Value v); + // Local pointers SmallPtrSet ptrs; @@ -278,60 +283,61 @@ static Location getEndLocForHist(LifetimeCheckPass::LexicalScopeContext &lsc) { return getEndLocForHist(lsc.parent.get()); } +void LifetimeCheckPass::killInPset(PSetType &pset, mlir::Value v) { + State valState = State::getLocalValue(v); + if (pset.contains(valState)) { + // Erase the reference and mark this invalid. + // FIXME: add a way to just mutate the state. + pset.erase(valState); + pset.insert(State::getInvalid()); + return; + } + + // Note that this assumes a current pset cannot have multiple entries for + // values and owned values at the same time, for example this should not be + // possible: pset(s) = {o, o'}. + if (owners.count(v)) { + valState = State::getOwnedBy(v); + if (pset.contains(valState)) { + pset.erase(valState); + pset.insert(State::getInvalid()); + return; + } + // TODO: o'', ... + } +} + +// 2.3 - KILL(x) means to replace all occurrences of x and x' and x'' (etc.) +// in the pmap with invalid. For example, if pmap is {(p1,{a}), (p2,{a'})}, +// KILL(a') would invalidate only p2, and KILL(a) would invalidate both p1 and +// p2. +void LifetimeCheckPass::kill(mlir::Value v) { + for (auto &mapEntry : getPmap()) { + auto ptr = mapEntry.first; + + // We are deleting this entry anyways, nothing to do here. + if (v == ptr) + continue; + + // If the local value is part of this pset, it means we need to + // invalidate it, otherwise keep searching. + killInPset(mapEntry.second, v); + pmapInvalidHist[ptr] = std::make_pair(getEndLocForHist(*currScope), v); + } + + // Delete the local value from pmap, since its now gone. + getPmap().erase(v); +} + void LifetimeCheckPass::LexicalScopeGuard::cleanup() { auto *localScope = Pass.currScope; - auto &pmap = Pass.getPmap(); // If we are cleaning up at the function level, nothing // to do here cause we are past all possible deference points if (localScope->Depth == 0) return; - // 2.3 - KILL(x) means to replace all occurrences of x and x' and x'' (etc.) - // in the pmap with invalid. For example, if pmap is {(p1,{a}), (p2,{a'})}, - // KILL(a') would invalidate only p2, and KILL(a) would invalidate both p1 and - // p2. - for (auto pointee : localScope->localValues) { - for (auto &mapEntry : pmap) { - auto ptr = mapEntry.first; - - // We are deleting this entry anyways, nothing to do here. - if (pointee == ptr) - continue; - - // If the local value is part of this pset, it means we need to invalidate - // it, otherwise keep searching. Note that this assumes a current pset - // cannot have multiple entries for values and owned values at the same - // time, for example this should not be possible: pset(s) = {o, o'}. - auto &pset = mapEntry.second; - auto killValueInPset = [&](mlir::Value v) { - State valState = State::getLocalValue(v); - if (pset.contains(valState)) { - // Erase the reference and mark this invalid. - // FIXME: add a way to just mutate the state. - pset.erase(valState); - pset.insert(State::getInvalid()); - return; - } - - if (Pass.owners.count(v)) { - valState = State::getOwnedBy(v); - if (pset.contains(valState)) { - pset.erase(valState); - pset.insert(State::getInvalid()); - return; - } - // TODO: o'', ... - } - }; - - // KILL(x) for a particular pset. - killValueInPset(pointee); - Pass.pmapInvalidHist[ptr] = - std::make_pair(getEndLocForHist(*Pass.currScope), pointee); - } - // Delete the local value from pmap, since its gone now. - pmap.erase(pointee); - } + for (auto pointee : localScope->localValues) + Pass.kill(pointee); } void LifetimeCheckPass::checkBlock(Block &block) { From ab5f360a5892e4153d3062d4cd5edb9217d228bd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 18 Oct 2022 17:54:15 -0700 Subject: [PATCH 0633/2301] [CIR][Lifetime] Also break down kill() into killInPset() --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 30 +++++++------------ 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 12f2010f3f3d..7bd531e8b6e8 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -160,7 +160,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Provides p1179's 'KILL' functionality. See implementation for more // information. void kill(mlir::Value v); - void killInPset(PSetType &pset, mlir::Value v); + void killInPset(PSetType &pset, const State &valState); // Local pointers SmallPtrSet ptrs; @@ -283,8 +283,7 @@ static Location getEndLocForHist(LifetimeCheckPass::LexicalScopeContext &lsc) { return getEndLocForHist(lsc.parent.get()); } -void LifetimeCheckPass::killInPset(PSetType &pset, mlir::Value v) { - State valState = State::getLocalValue(v); +void LifetimeCheckPass::killInPset(PSetType &pset, const State &valState) { if (pset.contains(valState)) { // Erase the reference and mark this invalid. // FIXME: add a way to just mutate the state. @@ -292,19 +291,6 @@ void LifetimeCheckPass::killInPset(PSetType &pset, mlir::Value v) { pset.insert(State::getInvalid()); return; } - - // Note that this assumes a current pset cannot have multiple entries for - // values and owned values at the same time, for example this should not be - // possible: pset(s) = {o, o'}. - if (owners.count(v)) { - valState = State::getOwnedBy(v); - if (pset.contains(valState)) { - pset.erase(valState); - pset.insert(State::getInvalid()); - return; - } - // TODO: o'', ... - } } // 2.3 - KILL(x) means to replace all occurrences of x and x' and x'' (etc.) @@ -319,9 +305,15 @@ void LifetimeCheckPass::kill(mlir::Value v) { if (v == ptr) continue; - // If the local value is part of this pset, it means we need to - // invalidate it, otherwise keep searching. - killInPset(mapEntry.second, v); + // ... replace all occurrences of x and x' and x''. Start with the primes + // so we first remove uses and then users. + // + // FIXME: right now we only support x and x' + auto &pset = mapEntry.second; + if (owners.count(v)) + killInPset(pset, State::getOwnedBy(v)); + + killInPset(pset, State::getLocalValue(v)); pmapInvalidHist[ptr] = std::make_pair(getEndLocForHist(*currScope), v); } From 8b1a6c0d183107047e5750b2e2ab9a45ca4f65d7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 18 Oct 2022 18:15:54 -0700 Subject: [PATCH 0634/2301] [CIR][Lifetime][NFC] Make kill() work based on States, allowing it to decouple for reuse --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 7bd531e8b6e8..e047e6a0daae 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -136,6 +136,15 @@ struct LifetimeCheckPass : public LifetimeCheckBase { return val.getInt() == RHS.val.getInt(); } + bool isLocalValue() const { return val.getInt() == LocalValue; } + bool isOwnedBy() const { return val.getInt() == OwnedBy; } + bool hasValue() const { return isLocalValue() || isOwnedBy(); } + + mlir::Value getData() const { + assert(hasValue() && "data type does not hold a mlir::Value"); + return val.getPointer(); + } + void dump(llvm::raw_ostream &OS = llvm::errs()); static State getInvalid() { return {}; } @@ -159,8 +168,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Provides p1179's 'KILL' functionality. See implementation for more // information. - void kill(mlir::Value v); - void killInPset(PSetType &pset, const State &valState); + void kill(const State &s); + void killInPset(PSetType &pset, const State &s); // Local pointers SmallPtrSet ptrs; @@ -283,11 +292,11 @@ static Location getEndLocForHist(LifetimeCheckPass::LexicalScopeContext &lsc) { return getEndLocForHist(lsc.parent.get()); } -void LifetimeCheckPass::killInPset(PSetType &pset, const State &valState) { - if (pset.contains(valState)) { +void LifetimeCheckPass::killInPset(PSetType &pset, const State &s) { + if (pset.contains(s)) { // Erase the reference and mark this invalid. // FIXME: add a way to just mutate the state. - pset.erase(valState); + pset.erase(s); pset.insert(State::getInvalid()); return; } @@ -297,7 +306,9 @@ void LifetimeCheckPass::killInPset(PSetType &pset, const State &valState) { // in the pmap with invalid. For example, if pmap is {(p1,{a}), (p2,{a'})}, // KILL(a') would invalidate only p2, and KILL(a) would invalidate both p1 and // p2. -void LifetimeCheckPass::kill(mlir::Value v) { +void LifetimeCheckPass::kill(const State &s) { + assert(s.hasValue() && "does not know how to kill other data types"); + mlir::Value v = s.getData(); for (auto &mapEntry : getPmap()) { auto ptr = mapEntry.first; @@ -308,12 +319,13 @@ void LifetimeCheckPass::kill(mlir::Value v) { // ... replace all occurrences of x and x' and x''. Start with the primes // so we first remove uses and then users. // - // FIXME: right now we only support x and x' auto &pset = mapEntry.second; - if (owners.count(v)) + + // FIXME: add x'', x''', etc... + if (s.isLocalValue() && owners.count(v)) killInPset(pset, State::getOwnedBy(v)); - killInPset(pset, State::getLocalValue(v)); + killInPset(pset, s); pmapInvalidHist[ptr] = std::make_pair(getEndLocForHist(*currScope), v); } @@ -329,7 +341,7 @@ void LifetimeCheckPass::LexicalScopeGuard::cleanup() { return; for (auto pointee : localScope->localValues) - Pass.kill(pointee); + Pass.kill(State::getLocalValue(pointee)); } void LifetimeCheckPass::checkBlock(Block &block) { From 16ee06722e083c84d94cc423399167f5df0d3ddd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 18 Oct 2022 22:02:08 -0700 Subject: [PATCH 0635/2301] [CIR][Lifetime][NFC] Rename function --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index e047e6a0daae..5422c61390a7 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -47,8 +47,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkOperatorStar(CallOp callOp); // Helpers - bool isCtorInitFromOwner(CallOp callOp, - const clang::CXXConstructorDecl *ctor); + bool isCtorInitPointerFromOwner(CallOp callOp, + const clang::CXXConstructorDecl *ctor); bool isNonConstUseOfOwner(CallOp callOp, const clang::CXXMethodDecl *m); struct Options { @@ -885,7 +885,7 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, // Example: // MyIntPointer::MyIntPointer(MyIntOwner const&)(%5, %4) // -bool LifetimeCheckPass::isCtorInitFromOwner( +bool LifetimeCheckPass::isCtorInitPointerFromOwner( CallOp callOp, const clang::CXXConstructorDecl *ctor) { if (callOp.getNumOperands() < 2) return false; @@ -939,7 +939,7 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, llvm_unreachable("NYI"); } - if (isCtorInitFromOwner(callOp, ctor)) { + if (isCtorInitPointerFromOwner(callOp, ctor)) { auto addr = callOp.getOperand(0); auto owner = callOp.getOperand(1); getPmap()[addr].clear(); From cfd18f8b99ee1c17aadaf51d19045816c8f02f7a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 18 Oct 2022 23:01:16 -0700 Subject: [PATCH 0636/2301] [CIR][Lifetime] Detect underlying changes in Owner types and invalidate tainted Ptr usage --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 57 ++++++++++++++++--- .../CIR/Transforms/lifetime-check-owner.cpp | 13 +++++ 2 files changed, 63 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 5422c61390a7..6038904155ee 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -168,7 +168,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Provides p1179's 'KILL' functionality. See implementation for more // information. - void kill(const State &s); + void kill(const State &s, std::optional killLoc = {}); void killInPset(PSetType &pset, const State &s); // Local pointers @@ -306,7 +306,8 @@ void LifetimeCheckPass::killInPset(PSetType &pset, const State &s) { // in the pmap with invalid. For example, if pmap is {(p1,{a}), (p2,{a'})}, // KILL(a') would invalidate only p2, and KILL(a) would invalidate both p1 and // p2. -void LifetimeCheckPass::kill(const State &s) { +void LifetimeCheckPass::kill(const State &s, + std::optional killLoc) { assert(s.hasValue() && "does not know how to kill other data types"); mlir::Value v = s.getData(); for (auto &mapEntry : getPmap()) { @@ -321,12 +322,23 @@ void LifetimeCheckPass::kill(const State &s) { // auto &pset = mapEntry.second; + // Record if pmap(ptr) is invalid already. + bool wasInvalid = pset.count(State::getInvalid()); + // FIXME: add x'', x''', etc... if (s.isLocalValue() && owners.count(v)) killInPset(pset, State::getOwnedBy(v)); killInPset(pset, s); - pmapInvalidHist[ptr] = std::make_pair(getEndLocForHist(*currScope), v); + + // If pset(ptr) was already invalid, do not polute the history. + if (!wasInvalid) { + // FIXME: support invalidation history and types. + if (!killLoc) + pmapInvalidHist[ptr] = std::make_pair(getEndLocForHist(*currScope), v); + else + pmapInvalidHist[ptr] = std::make_pair(killLoc, std::nullopt); + } } // Delete the local value from pmap, since its now gone. @@ -954,6 +966,14 @@ static bool isOperatorStar(const clang::CXXMethodDecl *m) { return m->getOverloadedOperator() == clang::OverloadedOperatorKind::OO_Star; } +static bool sinkUnsupportedOperator(const clang::CXXMethodDecl *m) { + if (!m->isOverloadedOperator()) + return false; + if (!isOperatorStar(m)) + llvm_unreachable("NYI"); + return false; +} + void LifetimeCheckPass::checkOperatorStar(CallOp callOp) { auto addr = callOp.getOperand(0); if (!ptrs.count(addr)) @@ -980,22 +1000,45 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { if (!methodDecl) return; - // TODO: only ctor init implemented, assign ops and others needed. if (auto ctor = dyn_cast(methodDecl)) return checkCtor(callOp, ctor); if (methodDecl->isMoveAssignmentOperator()) return checkMoveAssignment(callOp, methodDecl); + if (methodDecl->isCopyAssignmentOperator()) + llvm_unreachable("NYI"); if (isOperatorStar(methodDecl)) return checkOperatorStar(callOp); + if (sinkUnsupportedOperator(methodDecl)) + return; // For any other methods... // Non-const member call to a Owner invalidates any of its users. if (isNonConstUseOfOwner(callOp, methodDecl)) { - // auto addr = callOp.getOperand(0); - // TODO: kill(a') - llvm_unreachable("NYI"); + auto addr = callOp.getOperand(0); + // 2.4.2 - On every non-const use of a local Owner o: + // + // - For each entry e in pset(s): Remove e from pset(s), and if no other + // Owner’s pset contains only e, then KILL(e). + kill(State::getOwnedBy(addr), callOp.getLoc()); + + // - Set pset(o) = {o__N'}, where N is one higher than the highest + // previously used suffix. For example, initially pset(o) is {o__1'}, on + // o’s first non-const use pset(o) becomes {o__2'}, on o’s second non-const + // use pset(o) becomes {o__3'}, and so on. + // FIXME: for now we set pset(o) = { invalid } + auto &pset = getPmap()[addr]; + pset.clear(); + pset.insert(State::getInvalid()); + pmapInvalidHist[addr] = std::make_pair(callOp.getLoc(), std::nullopt); + return; } + + // Take a pset(Ptr) = { Ownr' } where Own got invalidated, this will become + // invalid access to Ptr if any of its methods are used. + auto addr = callOp.getOperand(0); + if (ptrs.count(addr)) + return checkPointerDeref(addr, callOp.getLoc()); } void LifetimeCheckPass::checkOperation(Operation *op) { diff --git a/clang/test/CIR/Transforms/lifetime-check-owner.cpp b/clang/test/CIR/Transforms/lifetime-check-owner.cpp index 7c61c37e6b50..9a588b9da64d 100644 --- a/clang/test/CIR/Transforms/lifetime-check-owner.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-owner.cpp @@ -3,7 +3,9 @@ struct [[gsl::Owner(int)]] MyIntOwner { int val; MyIntOwner(int v) : val(v) {} + void changeInt(int i); int &operator*(); + int read() const; }; struct [[gsl::Pointer(int)]] MyIntPointer { @@ -12,6 +14,7 @@ struct [[gsl::Pointer(int)]] MyIntPointer { MyIntPointer(const MyIntOwner &); int &operator*(); MyIntOwner toOwner(); + int read() { return *ptr; } }; void yolo() { @@ -23,4 +26,14 @@ void yolo() { } // expected-note {{pointee 'o' invalidated at end of scope}} *p = 4; // expected-warning {{use of invalid pointer 'p'}} // expected-remark@-1 {{pset => { invalid }}} +} + +void yolo2() { + MyIntPointer p; + MyIntOwner o(1); + p = o; + (void)o.read(); + o.changeInt(42); // expected-note {{uninitialized here}} + (void)p.read(); // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} } \ No newline at end of file From 86c0eadb6209bbcb01e6c78b598a89cdae48b8c4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 19 Oct 2022 10:20:39 -0700 Subject: [PATCH 0637/2301] [CIR][Lifetime][NFC] Clean up pass declaration a bit and move members around --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 64 +++++++++++++++---- 1 file changed, 50 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 6038904155ee..0ee9cecf4e9b 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -46,11 +46,18 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); void checkOperatorStar(CallOp callOp); - // Helpers + // Tracks current module. + ModuleOp theModule; + + // Helpers. bool isCtorInitPointerFromOwner(CallOp callOp, const clang::CXXConstructorDecl *ctor); bool isNonConstUseOfOwner(CallOp callOp, const clang::CXXMethodDecl *m); + /// + /// Pass options handling + /// --------------------- + struct Options { enum : unsigned { None = 0, @@ -97,6 +104,11 @@ struct LifetimeCheckPass : public LifetimeCheckBase { } } opts; + /// + /// State + /// ----- + + // Represents the state of an element in a pointer set (pset) struct State { using DataTy = enum { Invalid, @@ -147,15 +159,15 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void dump(llvm::raw_ostream &OS = llvm::errs()); - static State getInvalid() { return {}; } + static State getInvalid() { return {Invalid}; } static State getNullPtr() { return {NullPtr}; } - static State getLocalValue(mlir::Value v) { return {v}; } + static State getLocalValue(mlir::Value v) { return {v, LocalValue}; } static State getOwnedBy(mlir::Value v) { return {v, State::OwnedBy}; } }; - using PSetType = llvm::SmallSet; - // FIXME: this should be a ScopedHashTable for consistency. - using PMapType = llvm::DenseMap; + /// + /// Invalid and null history tracking + /// --------------------------------- using PMapInvalidHistType = llvm::DenseMap, @@ -166,6 +178,26 @@ struct LifetimeCheckPass : public LifetimeCheckBase { llvm::DenseMap>; PMapNullHistType pmapNullHist; + enum HistInvalidStyle { + EndOfScope, + NotInitialized, + MovedFrom, + NonConstUseOfOwner, + }; + + /// + /// Pointer Map and Pointer Set + /// --------------------------- + + using PSetType = llvm::SmallSet; + // FIXME: this should be a ScopedHashTable for consistency. + using PMapType = llvm::DenseMap; + + PMapType *currPmap = nullptr; + PMapType &getPmap() { return *currPmap; } + + void joinPmaps(SmallVectorImpl &pmaps); + // Provides p1179's 'KILL' functionality. See implementation for more // information. void kill(const State &s, std::optional killLoc = {}); @@ -177,6 +209,15 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Local owners SmallPtrSet owners; + // Useful helpers for debugging + void printPset(PSetType &pset, llvm::raw_ostream &OS = llvm::errs()); + LLVM_DUMP_METHOD void dumpPmap(PMapType &pmap); + LLVM_DUMP_METHOD void dumpCurrentPmap(); + + /// + /// Scope, context and guards + /// ------------------------- + // Represents the scope context for IR operations (cir.scope, cir.if, // then/else regions, etc). Tracks the declaration of variables in the current // local scope. @@ -241,19 +282,14 @@ struct LifetimeCheckPass : public LifetimeCheckBase { }; LexicalScopeContext *currScope = nullptr; - PMapType *currPmap = nullptr; - PMapType &getPmap() { return *currPmap; } - ModuleOp theModule; + /// + /// AST related + /// ----------- std::optional astCtx; void setASTContext(clang::ASTContext *c) { astCtx = c; } - - void joinPmaps(SmallVectorImpl &pmaps); - void printPset(PSetType &pset, llvm::raw_ostream &OS = llvm::errs()); - LLVM_DUMP_METHOD void dumpPmap(PMapType &pmap); - LLVM_DUMP_METHOD void dumpCurrentPmap(); }; } // namespace From 05e13761c5e1228c22f82ca60030ec4603ad39c7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 19 Oct 2022 13:36:02 -0700 Subject: [PATCH 0638/2301] [CIR][Lifetime] Rewrite part of invalid history tracking and give invalidated by non-const proper warning message --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 151 ++++++++++-------- .../CIR/Transforms/lifetime-check-owner.cpp | 2 +- 2 files changed, 88 insertions(+), 65 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 0ee9cecf4e9b..24e2e13b2cba 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -138,14 +138,12 @@ struct LifetimeCheckPass : public LifetimeCheckBase { if (val.getInt() == LocalValue && RHS.val.getInt() == LocalValue) return val.getPointer().getAsOpaquePointer() < RHS.val.getPointer().getAsOpaquePointer(); - else - return val.getInt() < RHS.val.getInt(); + return val.getInt() < RHS.val.getInt(); } bool operator==(const State &RHS) const { if (val.getInt() == LocalValue && RHS.val.getInt() == LocalValue) return val.getPointer() == RHS.val.getPointer(); - else - return val.getInt() == RHS.val.getInt(); + return val.getInt() == RHS.val.getInt(); } bool isLocalValue() const { return val.getInt() == LocalValue; } @@ -168,23 +166,36 @@ struct LifetimeCheckPass : public LifetimeCheckBase { /// /// Invalid and null history tracking /// --------------------------------- + enum InvalidStyle { + Unknown, + EndOfScope, + NotInitialized, + MovedFrom, + NonConstUseOfOwner, + }; - using PMapInvalidHistType = - llvm::DenseMap, - std::optional>>; + struct InvalidHistEntry { + InvalidStyle style = Unknown; + std::optional loc; + std::optional val; + InvalidHistEntry() = default; + InvalidHistEntry(InvalidStyle s, std::optional l, + std::optional v) + : style(s), loc(l), val(v) {} + }; + + using PMapInvalidHistType = llvm::DenseMap; PMapInvalidHistType pmapInvalidHist; + void addInvalidHist(mlir::Value ptr, InvalidStyle histStyle, + mlir::Location loc, std::optional val = {}) { + pmapInvalidHist[ptr] = InvalidHistEntry(histStyle, loc, val); + } + using PMapNullHistType = llvm::DenseMap>; PMapNullHistType pmapNullHist; - enum HistInvalidStyle { - EndOfScope, - NotInitialized, - MovedFrom, - NonConstUseOfOwner, - }; - /// /// Pointer Map and Pointer Set /// --------------------------- @@ -195,13 +206,27 @@ struct LifetimeCheckPass : public LifetimeCheckBase { PMapType *currPmap = nullptr; PMapType &getPmap() { return *currPmap; } + void markPsetInvalid(mlir::Value ptr, InvalidStyle histStyle, + mlir::Location loc, + std::optional extraVal = {}) { + auto &pset = getPmap()[ptr]; + + // If pset is already invalid, don't bother. + if (pset.count(State::getInvalid())) + return; + + // 2.3 - putting invalid into pset(x) is said to invalidate it + pset.insert(State::getInvalid()); + addInvalidHist(ptr, histStyle, loc, extraVal); + } void joinPmaps(SmallVectorImpl &pmaps); // Provides p1179's 'KILL' functionality. See implementation for more // information. - void kill(const State &s, std::optional killLoc = {}); - void killInPset(PSetType &pset, const State &s); + void kill(const State &s, InvalidStyle histStyle, mlir::Location loc); + void killInPset(mlir::Value ptrKey, const State &s, InvalidStyle histStyle, + mlir::Location loc, std::optional extraVal); // Local pointers SmallPtrSet ptrs; @@ -328,13 +353,13 @@ static Location getEndLocForHist(LifetimeCheckPass::LexicalScopeContext &lsc) { return getEndLocForHist(lsc.parent.get()); } -void LifetimeCheckPass::killInPset(PSetType &pset, const State &s) { +void LifetimeCheckPass::killInPset(mlir::Value ptrKey, const State &s, + InvalidStyle histStyle, mlir::Location loc, + std::optional extraVal) { + auto &pset = getPmap()[ptrKey]; if (pset.contains(s)) { - // Erase the reference and mark this invalid. - // FIXME: add a way to just mutate the state. pset.erase(s); - pset.insert(State::getInvalid()); - return; + markPsetInvalid(ptrKey, histStyle, loc, extraVal); } } @@ -342,10 +367,14 @@ void LifetimeCheckPass::killInPset(PSetType &pset, const State &s) { // in the pmap with invalid. For example, if pmap is {(p1,{a}), (p2,{a'})}, // KILL(a') would invalidate only p2, and KILL(a) would invalidate both p1 and // p2. -void LifetimeCheckPass::kill(const State &s, - std::optional killLoc) { +void LifetimeCheckPass::kill(const State &s, InvalidStyle histStyle, + mlir::Location loc) { assert(s.hasValue() && "does not know how to kill other data types"); mlir::Value v = s.getData(); + std::optional extraVal; + if (histStyle == InvalidStyle::EndOfScope) + extraVal = v; + for (auto &mapEntry : getPmap()) { auto ptr = mapEntry.first; @@ -356,29 +385,15 @@ void LifetimeCheckPass::kill(const State &s, // ... replace all occurrences of x and x' and x''. Start with the primes // so we first remove uses and then users. // - auto &pset = mapEntry.second; - - // Record if pmap(ptr) is invalid already. - bool wasInvalid = pset.count(State::getInvalid()); - // FIXME: add x'', x''', etc... if (s.isLocalValue() && owners.count(v)) - killInPset(pset, State::getOwnedBy(v)); - - killInPset(pset, s); - - // If pset(ptr) was already invalid, do not polute the history. - if (!wasInvalid) { - // FIXME: support invalidation history and types. - if (!killLoc) - pmapInvalidHist[ptr] = std::make_pair(getEndLocForHist(*currScope), v); - else - pmapInvalidHist[ptr] = std::make_pair(killLoc, std::nullopt); - } + killInPset(ptr, State::getOwnedBy(v), histStyle, loc, extraVal); + killInPset(ptr, s, histStyle, loc, extraVal); } - // Delete the local value from pmap, since its now gone. - getPmap().erase(v); + // Delete the local value from pmap, since its scope has ended. + if (histStyle == InvalidStyle::EndOfScope) + getPmap().erase(v); } void LifetimeCheckPass::LexicalScopeGuard::cleanup() { @@ -389,7 +404,8 @@ void LifetimeCheckPass::LexicalScopeGuard::cleanup() { return; for (auto pointee : localScope->localValues) - Pass.kill(State::getLocalValue(pointee)); + Pass.kill(State::getLocalValue(pointee), InvalidStyle::EndOfScope, + getEndLocForHist(*localScope)); } void LifetimeCheckPass::checkBlock(Block &block) { @@ -645,7 +661,7 @@ template bool isStructAndHasAttr(mlir::Type ty) { if (!ty.isa()) return false; auto sTy = ty.cast(); - auto recordDecl = sTy.getAst()->getAstDecl(); + const auto *recordDecl = sTy.getAst()->getAstDecl(); if (recordDecl->hasAttr()) return true; return false; @@ -742,8 +758,7 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { // 2.4.2 - When a non-parameter non-member Pointer p is declared, add // (p, {invalid}) to pmap. ptrs.insert(addr); - getPmap()[addr].insert(State::getInvalid()); - pmapInvalidHist[addr] = std::make_pair(allocaOp.getLoc(), std::nullopt); + markPsetInvalid(addr, InvalidStyle::NotInitialized, allocaOp.getLoc()); break; case TypeCategory::Owner: // 2.4.2 - When a local Owner x is declared, add (x, {x__1'}) to pmap. @@ -787,7 +802,7 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { }; auto data = storeOp.getValue(); - auto defOp = data.getDefiningOp(); + auto *defOp = data.getDefiningOp(); // Do not handle block arguments just yet. if (!defOp) @@ -875,15 +890,24 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, if (hasInvalid && opts.emitHistoryInvalid()) { assert(pmapInvalidHist.count(addr) && "expected invalid hist"); auto &info = pmapInvalidHist[addr]; - auto ¬e = info.first; - auto &pointee = info.second; - - if (pointee.has_value()) { - StringRef pointeeName = getVarNameFromValue(*pointee); - D.attachNote(note) << "pointee '" << pointeeName - << "' invalidated at end of scope"; - } else { - D.attachNote(note) << "uninitialized here"; + + switch (info.style) { + case InvalidStyle::NotInitialized: { + D.attachNote(info.loc) << "uninitialized here"; + break; + } + case InvalidStyle::EndOfScope: { + StringRef outOfScopeVarName = getVarNameFromValue(*info.val); + D.attachNote(info.loc) << "pointee '" << outOfScopeVarName + << "' invalidated at end of scope"; + break; + } + case InvalidStyle::NonConstUseOfOwner: { + D.attachNote(info.loc) << "invalidated by non-const use of owner type"; + break; + } + default: + llvm_unreachable("unknown history style"); } } @@ -898,7 +922,7 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, } const clang::CXXMethodDecl *getMethod(ModuleOp mod, StringRef name) { - auto global = mlir::SymbolTable::lookupSymbolIn(mod, name); + auto *global = mlir::SymbolTable::lookupSymbolIn(mod, name); assert(global && "expected to find symbol"); auto method = dyn_cast(global); if (!method) @@ -1032,11 +1056,11 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { if (callOp.getNumOperands() == 0) return; - auto methodDecl = getMethod(theModule, callOp.getCallee()); + const auto *methodDecl = getMethod(theModule, callOp.getCallee()); if (!methodDecl) return; - if (auto ctor = dyn_cast(methodDecl)) + if (const auto *ctor = dyn_cast(methodDecl)) return checkCtor(callOp, ctor); if (methodDecl->isMoveAssignmentOperator()) return checkMoveAssignment(callOp, methodDecl); @@ -1056,17 +1080,16 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // // - For each entry e in pset(s): Remove e from pset(s), and if no other // Owner’s pset contains only e, then KILL(e). - kill(State::getOwnedBy(addr), callOp.getLoc()); + kill(State::getOwnedBy(addr), InvalidStyle::NonConstUseOfOwner, + callOp.getLoc()); // - Set pset(o) = {o__N'}, where N is one higher than the highest // previously used suffix. For example, initially pset(o) is {o__1'}, on // o’s first non-const use pset(o) becomes {o__2'}, on o’s second non-const // use pset(o) becomes {o__3'}, and so on. // FIXME: for now we set pset(o) = { invalid } - auto &pset = getPmap()[addr]; - pset.clear(); - pset.insert(State::getInvalid()); - pmapInvalidHist[addr] = std::make_pair(callOp.getLoc(), std::nullopt); + // markPsetInvalid(addr, InvalidStyle::NonConstUseOfOwner, + // callOp.getLoc()); return; } diff --git a/clang/test/CIR/Transforms/lifetime-check-owner.cpp b/clang/test/CIR/Transforms/lifetime-check-owner.cpp index 9a588b9da64d..7f624ab1989a 100644 --- a/clang/test/CIR/Transforms/lifetime-check-owner.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-owner.cpp @@ -33,7 +33,7 @@ void yolo2() { MyIntOwner o(1); p = o; (void)o.read(); - o.changeInt(42); // expected-note {{uninitialized here}} + o.changeInt(42); // expected-note {{invalidated by non-const use of owner type}} (void)p.read(); // expected-warning {{use of invalid pointer 'p'}} // expected-remark@-1 {{pset => { invalid }}} } \ No newline at end of file From 4203439ad97d280064da44bea233f08fa426d6a7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 19 Oct 2022 14:17:27 -0700 Subject: [PATCH 0639/2301] [CIR][Lifetime] Add option to control the history display limit This is useful while investigating the previous invalidation history for a given pointer. --- clang/include/clang/CIR/Dialect/Passes.td | 4 +- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 81 +++++++++++-------- .../CIR/Transforms/lifetime-check-owner.cpp | 4 +- 3 files changed, 54 insertions(+), 35 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index a4562cf10bb4..ce95aea2ed29 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -42,7 +42,9 @@ def LifetimeCheck : Pass<"cir-lifetime-check"> { " Supported styles: {all|null|invalid}", "llvm::cl::ZeroOrMore">, ListOption<"remarksList", "remarks", "std::string", "List of remark styles to enable as part of diagnostics." - " Supported styles: {all|pset}", "llvm::cl::ZeroOrMore"> + " Supported styles: {all|pset}", "llvm::cl::ZeroOrMore">, + Option<"historyLimit", "history_limit", "unsigned", /*default=*/"1", + "Max amount of diagnostics to emit on pointer history"> ]; } diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 24e2e13b2cba..2f467abea558 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -49,11 +49,14 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Tracks current module. ModuleOp theModule; - // Helpers. + // Common helpers. bool isCtorInitPointerFromOwner(CallOp callOp, const clang::CXXConstructorDecl *ctor); bool isNonConstUseOfOwner(CallOp callOp, const clang::CXXMethodDecl *m); + // Diagnostic helpers. + void emitInvalidHistory(mlir::InFlightDiagnostic &D, mlir::Value histKey); + /// /// Pass options handling /// --------------------- @@ -71,6 +74,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { HistoryAll = 1 << 5, }; unsigned val = None; + unsigned histLimit = 1; void parseOptions(LifetimeCheckPass &pass) { for (auto &remark : pass.remarksList) { @@ -87,6 +91,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { .Case("all", HistoryAll) .Default(None); } + histLimit = pass.historyLimit; } bool emitRemarkAll() { return val & RemarkAll; } @@ -184,13 +189,15 @@ struct LifetimeCheckPass : public LifetimeCheckBase { : style(s), loc(l), val(v) {} }; - using PMapInvalidHistType = llvm::DenseMap; - PMapInvalidHistType pmapInvalidHist; + struct InvalidHist { + llvm::SmallVector entries; + void add(mlir::Value ptr, InvalidStyle histStyle, mlir::Location loc, + std::optional val = {}) { + entries.emplace_back(InvalidHistEntry(histStyle, loc, val)); + } + }; - void addInvalidHist(mlir::Value ptr, InvalidStyle histStyle, - mlir::Location loc, std::optional val = {}) { - pmapInvalidHist[ptr] = InvalidHistEntry(histStyle, loc, val); - } + llvm::DenseMap invalidHist; using PMapNullHistType = llvm::DenseMap>; @@ -217,7 +224,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // 2.3 - putting invalid into pset(x) is said to invalidate it pset.insert(State::getInvalid()); - addInvalidHist(ptr, histStyle, loc, extraVal); + invalidHist[ptr].add(ptr, histStyle, loc, extraVal); } void joinPmaps(SmallVectorImpl &pmaps); @@ -435,7 +442,7 @@ void LifetimeCheckPass::checkFunc(Operation *op) { if (currPmap) getPmap().clear(); pmapNullHist.clear(); - pmapInvalidHist.clear(); + invalidHist.clear(); // Add a new scope. Note that as part of the scope cleanup process // we apply section 2.3 KILL(x) functionality, turning relevant @@ -859,6 +866,37 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { checkPointerDeref(addr, loadOp.getLoc()); } +void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, + mlir::Value histKey) { + assert(invalidHist.count(histKey) && "expected invalid hist"); + auto &hist = invalidHist[histKey]; + unsigned limit = opts.histLimit; + + for (int lastIdx = hist.entries.size() - 1; limit > 0 && lastIdx >= 0; + lastIdx--, limit--) { + auto &info = hist.entries[lastIdx]; + + switch (info.style) { + case InvalidStyle::NotInitialized: { + D.attachNote(info.loc) << "uninitialized here"; + break; + } + case InvalidStyle::EndOfScope: { + StringRef outOfScopeVarName = getVarNameFromValue(*info.val); + D.attachNote(info.loc) << "pointee '" << outOfScopeVarName + << "' invalidated at end of scope"; + break; + } + case InvalidStyle::NonConstUseOfOwner: { + D.attachNote(info.loc) << "invalidated by non-const use of owner type"; + break; + } + default: + llvm_unreachable("unknown history style"); + } + } +} + void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc) { bool hasInvalid = getPmap()[addr].count(State::getInvalid()); @@ -887,29 +925,8 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, auto D = emitWarning(loc); D << "use of invalid pointer '" << varName << "'"; - if (hasInvalid && opts.emitHistoryInvalid()) { - assert(pmapInvalidHist.count(addr) && "expected invalid hist"); - auto &info = pmapInvalidHist[addr]; - - switch (info.style) { - case InvalidStyle::NotInitialized: { - D.attachNote(info.loc) << "uninitialized here"; - break; - } - case InvalidStyle::EndOfScope: { - StringRef outOfScopeVarName = getVarNameFromValue(*info.val); - D.attachNote(info.loc) << "pointee '" << outOfScopeVarName - << "' invalidated at end of scope"; - break; - } - case InvalidStyle::NonConstUseOfOwner: { - D.attachNote(info.loc) << "invalidated by non-const use of owner type"; - break; - } - default: - llvm_unreachable("unknown history style"); - } - } + if (hasInvalid && opts.emitHistoryInvalid()) + emitInvalidHistory(D, addr); if (hasNullptr && opts.emitHistoryNull()) { assert(pmapNullHist.count(addr) && "expected nullptr hist"); diff --git a/clang/test/CIR/Transforms/lifetime-check-owner.cpp b/clang/test/CIR/Transforms/lifetime-check-owner.cpp index 7f624ab1989a..cdfee6e2016d 100644 --- a/clang/test/CIR/Transforms/lifetime-check-owner.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-owner.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=all;remarks=all;history_limit=1" -clangir-verify-diagnostics -emit-cir %s -o %t.cir struct [[gsl::Owner(int)]] MyIntOwner { int val; @@ -36,4 +36,4 @@ void yolo2() { o.changeInt(42); // expected-note {{invalidated by non-const use of owner type}} (void)p.read(); // expected-warning {{use of invalid pointer 'p'}} // expected-remark@-1 {{pset => { invalid }}} -} \ No newline at end of file +} From 8f7839d6724725bbc07b1695663b786880ac882c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 19 Oct 2022 14:20:47 -0700 Subject: [PATCH 0640/2301] [CIR][Lifetime][NFC] Rename histStyle to invalidStyle --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 2f467abea558..3d9fedf7c7c7 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -191,9 +191,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { struct InvalidHist { llvm::SmallVector entries; - void add(mlir::Value ptr, InvalidStyle histStyle, mlir::Location loc, + void add(mlir::Value ptr, InvalidStyle invalidStyle, mlir::Location loc, std::optional val = {}) { - entries.emplace_back(InvalidHistEntry(histStyle, loc, val)); + entries.emplace_back(InvalidHistEntry(invalidStyle, loc, val)); } }; @@ -213,7 +213,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { PMapType *currPmap = nullptr; PMapType &getPmap() { return *currPmap; } - void markPsetInvalid(mlir::Value ptr, InvalidStyle histStyle, + void markPsetInvalid(mlir::Value ptr, InvalidStyle invalidStyle, mlir::Location loc, std::optional extraVal = {}) { auto &pset = getPmap()[ptr]; @@ -224,15 +224,15 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // 2.3 - putting invalid into pset(x) is said to invalidate it pset.insert(State::getInvalid()); - invalidHist[ptr].add(ptr, histStyle, loc, extraVal); + invalidHist[ptr].add(ptr, invalidStyle, loc, extraVal); } void joinPmaps(SmallVectorImpl &pmaps); // Provides p1179's 'KILL' functionality. See implementation for more // information. - void kill(const State &s, InvalidStyle histStyle, mlir::Location loc); - void killInPset(mlir::Value ptrKey, const State &s, InvalidStyle histStyle, + void kill(const State &s, InvalidStyle invalidStyle, mlir::Location loc); + void killInPset(mlir::Value ptrKey, const State &s, InvalidStyle invalidStyle, mlir::Location loc, std::optional extraVal); // Local pointers @@ -361,12 +361,13 @@ static Location getEndLocForHist(LifetimeCheckPass::LexicalScopeContext &lsc) { } void LifetimeCheckPass::killInPset(mlir::Value ptrKey, const State &s, - InvalidStyle histStyle, mlir::Location loc, + InvalidStyle invalidStyle, + mlir::Location loc, std::optional extraVal) { auto &pset = getPmap()[ptrKey]; if (pset.contains(s)) { pset.erase(s); - markPsetInvalid(ptrKey, histStyle, loc, extraVal); + markPsetInvalid(ptrKey, invalidStyle, loc, extraVal); } } @@ -374,12 +375,12 @@ void LifetimeCheckPass::killInPset(mlir::Value ptrKey, const State &s, // in the pmap with invalid. For example, if pmap is {(p1,{a}), (p2,{a'})}, // KILL(a') would invalidate only p2, and KILL(a) would invalidate both p1 and // p2. -void LifetimeCheckPass::kill(const State &s, InvalidStyle histStyle, +void LifetimeCheckPass::kill(const State &s, InvalidStyle invalidStyle, mlir::Location loc) { assert(s.hasValue() && "does not know how to kill other data types"); mlir::Value v = s.getData(); std::optional extraVal; - if (histStyle == InvalidStyle::EndOfScope) + if (invalidStyle == InvalidStyle::EndOfScope) extraVal = v; for (auto &mapEntry : getPmap()) { @@ -394,12 +395,12 @@ void LifetimeCheckPass::kill(const State &s, InvalidStyle histStyle, // // FIXME: add x'', x''', etc... if (s.isLocalValue() && owners.count(v)) - killInPset(ptr, State::getOwnedBy(v), histStyle, loc, extraVal); - killInPset(ptr, s, histStyle, loc, extraVal); + killInPset(ptr, State::getOwnedBy(v), invalidStyle, loc, extraVal); + killInPset(ptr, s, invalidStyle, loc, extraVal); } // Delete the local value from pmap, since its scope has ended. - if (histStyle == InvalidStyle::EndOfScope) + if (invalidStyle == InvalidStyle::EndOfScope) getPmap().erase(v); } From 8c37a683a70b5f46f0f1aa4b5211dfd97c2926a2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 19 Oct 2022 14:23:08 -0700 Subject: [PATCH 0641/2301] [CIR][Lifetime] Make sure owners and ptrs are properly tracked when the scope ends --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 3d9fedf7c7c7..aa67cdd29e30 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -400,8 +400,11 @@ void LifetimeCheckPass::kill(const State &s, InvalidStyle invalidStyle, } // Delete the local value from pmap, since its scope has ended. - if (invalidStyle == InvalidStyle::EndOfScope) + if (invalidStyle == InvalidStyle::EndOfScope) { + owners.erase(v); + ptrs.erase(v); getPmap().erase(v); + } } void LifetimeCheckPass::LexicalScopeGuard::cleanup() { From 0106a54996ff5d61c1738d24826720a48cf57766 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 19 Oct 2022 14:32:39 -0700 Subject: [PATCH 0642/2301] [CIR][Lifetime][NFC] Change owners from set to map and track current gen, but not use it just yet --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index aa67cdd29e30..3fe9b443216c 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -238,8 +238,18 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Local pointers SmallPtrSet ptrs; - // Local owners - SmallPtrSet owners; + // Local owners. We use a map instead of a set to track the current generation + // for this owner type internal pointee's. For instance, this allows tracking + // subsequent reuse of owner storage when a non-const use happens. + DenseMap owners; + void addOwner(mlir::Value o) { + assert(!owners.count(o) && "already tracked"); + owners[o] = 0; + } + void incOwner(mlir::Value o) { + assert(owners.count(o) && "entry expected"); + owners[o]++; + } // Useful helpers for debugging void printPset(PSetType &pset, llvm::raw_ostream &OS = llvm::errs()); @@ -773,7 +783,7 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { break; case TypeCategory::Owner: // 2.4.2 - When a local Owner x is declared, add (x, {x__1'}) to pmap. - owners.insert(addr); + addOwner(addr); getPmap()[addr].insert(State::getOwnedBy(addr)); currScope->localValues.push_back(addr); break; @@ -1096,19 +1106,19 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // Non-const member call to a Owner invalidates any of its users. if (isNonConstUseOfOwner(callOp, methodDecl)) { - auto addr = callOp.getOperand(0); + auto ownerAddr = callOp.getOperand(0); // 2.4.2 - On every non-const use of a local Owner o: // // - For each entry e in pset(s): Remove e from pset(s), and if no other // Owner’s pset contains only e, then KILL(e). - kill(State::getOwnedBy(addr), InvalidStyle::NonConstUseOfOwner, + kill(State::getOwnedBy(ownerAddr), InvalidStyle::NonConstUseOfOwner, callOp.getLoc()); // - Set pset(o) = {o__N'}, where N is one higher than the highest // previously used suffix. For example, initially pset(o) is {o__1'}, on // o’s first non-const use pset(o) becomes {o__2'}, on o’s second non-const // use pset(o) becomes {o__3'}, and so on. - // FIXME: for now we set pset(o) = { invalid } + incOwner(ownerAddr); // markPsetInvalid(addr, InvalidStyle::NonConstUseOfOwner, // callOp.getLoc()); return; From 45ec97a29a17cb14bb9d0884a79d9399f3879ad0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 19 Oct 2022 16:11:23 -0700 Subject: [PATCH 0643/2301] [CIR][Lifetime] Use ownedby generation and add more consistency checks --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 14 ++++++++------ clang/test/CIR/Transforms/lifetime-check-owner.cpp | 8 +++++++- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 3fe9b443216c..1f07634e3619 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -160,7 +160,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { return val.getPointer(); } - void dump(llvm::raw_ostream &OS = llvm::errs()); + void dump(llvm::raw_ostream &OS = llvm::errs(), int ownedGen = 0); static State getInvalid() { return {Invalid}; } static State getNullPtr() { return {NullPtr}; } @@ -1119,8 +1119,6 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // o’s first non-const use pset(o) becomes {o__2'}, on o’s second non-const // use pset(o) becomes {o__3'}, and so on. incOwner(ownerAddr); - // markPsetInvalid(addr, InvalidStyle::NonConstUseOfOwner, - // callOp.getLoc()); return; } @@ -1207,7 +1205,7 @@ void LifetimeCheckPass::LexicalScopeContext::dumpLocalValues() { llvm::errs() << "}\n"; } -void LifetimeCheckPass::State::dump(llvm::raw_ostream &OS) { +void LifetimeCheckPass::State::dump(llvm::raw_ostream &OS, int ownedGen) { switch (val.getInt()) { case Invalid: OS << "invalid"; @@ -1222,7 +1220,8 @@ void LifetimeCheckPass::State::dump(llvm::raw_ostream &OS) { OS << getVarNameFromValue(val.getPointer()); break; case OwnedBy: - OS << getVarNameFromValue(val.getPointer()) << "'"; + ownedGen++; // Start from 1. + OS << getVarNameFromValue(val.getPointer()) << "__" << ownedGen << "'"; break; default: llvm_unreachable("Not handled"); @@ -1233,7 +1232,10 @@ void LifetimeCheckPass::printPset(PSetType &pset, llvm::raw_ostream &OS) { OS << "{ "; auto size = pset.size(); for (auto s : pset) { - s.dump(OS); + int ownerGen = 0; + if (s.isOwnedBy()) + ownerGen = owners[s.getData()]; + s.dump(OS, ownerGen); size--; if (size > 0) OS << ", "; diff --git a/clang/test/CIR/Transforms/lifetime-check-owner.cpp b/clang/test/CIR/Transforms/lifetime-check-owner.cpp index cdfee6e2016d..0f1022df4dfb 100644 --- a/clang/test/CIR/Transforms/lifetime-check-owner.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-owner.cpp @@ -22,7 +22,7 @@ void yolo() { { MyIntOwner o(1); p = o; - *p = 3; // expected-remark {{pset => { o' }}} + *p = 3; // expected-remark {{pset => { o__1' }}} } // expected-note {{pointee 'o' invalidated at end of scope}} *p = 4; // expected-warning {{use of invalid pointer 'p'}} // expected-remark@-1 {{pset => { invalid }}} @@ -33,7 +33,13 @@ void yolo2() { MyIntOwner o(1); p = o; (void)o.read(); + (void)p.read(); // expected-remark {{pset => { o__1' }}} o.changeInt(42); // expected-note {{invalidated by non-const use of owner type}} (void)p.read(); // expected-warning {{use of invalid pointer 'p'}} // expected-remark@-1 {{pset => { invalid }}} + p = o; + (void)p.read(); // expected-remark {{pset => { o__2' }}} + o.changeInt(33); // expected-note {{invalidated by non-const use of owner type}} + (void)p.read(); // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} } From 7b49410d9be697212cba381d9b3b4e4b99d02278 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 19 Oct 2022 16:18:55 -0700 Subject: [PATCH 0644/2301] [CIR][Lifetime] Add one more test for mulitple pointers and one owner --- clang/test/CIR/Transforms/lifetime-check-owner.cpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/clang/test/CIR/Transforms/lifetime-check-owner.cpp b/clang/test/CIR/Transforms/lifetime-check-owner.cpp index 0f1022df4dfb..98d9d8ec3029 100644 --- a/clang/test/CIR/Transforms/lifetime-check-owner.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-owner.cpp @@ -43,3 +43,17 @@ void yolo2() { (void)p.read(); // expected-warning {{use of invalid pointer 'p'}} // expected-remark@-1 {{pset => { invalid }}} } + +void yolo3() { + MyIntPointer p, q; + MyIntOwner o(1); + p = o; + q = o; + (void)q.read(); // expected-remark {{pset => { o__1' }}} + (void)p.read(); // expected-remark {{pset => { o__1' }}} + o.changeInt(42); // expected-note {{invalidated by non-const use of owner type}} + (void)p.read(); // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} + (void)q.read(); // expected-warning {{use of invalid pointer 'q'}} + // expected-remark@-1 {{pset => { invalid }}} +} \ No newline at end of file From 33c5f8d00dde27fcd4cca776359f3041623c113f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 19 Oct 2022 17:45:38 -0700 Subject: [PATCH 0645/2301] [CIR][Lifetime] Add support for checkCopyAssignment and fix bug in state comparison --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 58 +++++++++++++------ .../CIR/Transforms/lifetime-check-owner.cpp | 12 ++++ 2 files changed, 51 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 1f07634e3619..d13b4b6e417e 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -44,6 +44,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor); void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); + void checkCopyAssignment(CallOp callOp, const clang::CXXMethodDecl *m); + void checkNonConstUseOfOwner(CallOp callOp); void checkOperatorStar(CallOp callOp); // Tracks current module. @@ -140,13 +142,13 @@ struct LifetimeCheckPass : public LifetimeCheckBase { bool operator<(const State &RHS) const { // FIXME: note that this makes the ordering non-deterministic, do // we really care? - if (val.getInt() == LocalValue && RHS.val.getInt() == LocalValue) + if (hasValue() && RHS.hasValue()) return val.getPointer().getAsOpaquePointer() < RHS.val.getPointer().getAsOpaquePointer(); return val.getInt() < RHS.val.getInt(); } bool operator==(const State &RHS) const { - if (val.getInt() == LocalValue && RHS.val.getInt() == LocalValue) + if (hasValue() && RHS.hasValue()) return val.getPointer() == RHS.val.getPointer(); return val.getInt() == RHS.val.getInt(); } @@ -968,6 +970,7 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, auto src = callOp.getOperand(1); // Currently only handle move assignments between pointer categories. + // TODO: add Owner category if (!(ptrs.count(dst) && ptrs.count(src))) return; @@ -982,6 +985,20 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, getPmap()[src].insert(State::getInvalid()); } +void LifetimeCheckPass::checkCopyAssignment(CallOp callOp, + const clang::CXXMethodDecl *m) { + // MyIntOwner::operator=(MyIntOwner&)(%dst, %src) + auto dst = callOp.getOperand(0); + auto src = callOp.getOperand(1); + + // Currently only handle copy assignments between owner categories. + // TODO: add Ptr category + if (!(owners.count(dst) && owners.count(src))) + return; + + checkNonConstUseOfOwner(callOp); +} + // User defined ctors that initialize from owner types is one // way of tracking owned pointers. // @@ -1083,6 +1100,23 @@ bool LifetimeCheckPass::isNonConstUseOfOwner(CallOp callOp, return false; } +void LifetimeCheckPass::checkNonConstUseOfOwner(CallOp callOp) { + auto ownerAddr = callOp.getOperand(0); + // 2.4.2 - On every non-const use of a local Owner o: + // + // - For each entry e in pset(s): Remove e from pset(s), and if no other + // Owner’s pset contains only e, then KILL(e). + kill(State::getOwnedBy(ownerAddr), InvalidStyle::NonConstUseOfOwner, + callOp.getLoc()); + + // - Set pset(o) = {o__N'}, where N is one higher than the highest + // previously used suffix. For example, initially pset(o) is {o__1'}, on + // o’s first non-const use pset(o) becomes {o__2'}, on o’s second non-const + // use pset(o) becomes {o__3'}, and so on. + incOwner(ownerAddr); + return; +} + void LifetimeCheckPass::checkCall(CallOp callOp) { if (callOp.getNumOperands() == 0) return; @@ -1096,7 +1130,7 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { if (methodDecl->isMoveAssignmentOperator()) return checkMoveAssignment(callOp, methodDecl); if (methodDecl->isCopyAssignmentOperator()) - llvm_unreachable("NYI"); + return checkCopyAssignment(callOp, methodDecl); if (isOperatorStar(methodDecl)) return checkOperatorStar(callOp); if (sinkUnsupportedOperator(methodDecl)) @@ -1105,22 +1139,8 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // For any other methods... // Non-const member call to a Owner invalidates any of its users. - if (isNonConstUseOfOwner(callOp, methodDecl)) { - auto ownerAddr = callOp.getOperand(0); - // 2.4.2 - On every non-const use of a local Owner o: - // - // - For each entry e in pset(s): Remove e from pset(s), and if no other - // Owner’s pset contains only e, then KILL(e). - kill(State::getOwnedBy(ownerAddr), InvalidStyle::NonConstUseOfOwner, - callOp.getLoc()); - - // - Set pset(o) = {o__N'}, where N is one higher than the highest - // previously used suffix. For example, initially pset(o) is {o__1'}, on - // o’s first non-const use pset(o) becomes {o__2'}, on o’s second non-const - // use pset(o) becomes {o__3'}, and so on. - incOwner(ownerAddr); - return; - } + if (isNonConstUseOfOwner(callOp, methodDecl)) + return checkNonConstUseOfOwner(callOp); // Take a pset(Ptr) = { Ownr' } where Own got invalidated, this will become // invalid access to Ptr if any of its methods are used. diff --git a/clang/test/CIR/Transforms/lifetime-check-owner.cpp b/clang/test/CIR/Transforms/lifetime-check-owner.cpp index 98d9d8ec3029..23643c821884 100644 --- a/clang/test/CIR/Transforms/lifetime-check-owner.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-owner.cpp @@ -56,4 +56,16 @@ void yolo3() { // expected-remark@-1 {{pset => { invalid }}} (void)q.read(); // expected-warning {{use of invalid pointer 'q'}} // expected-remark@-1 {{pset => { invalid }}} +} + +void yolo4() { + MyIntOwner o0(1); + MyIntOwner o1(2); + MyIntPointer p{o0}, q{o1}; + p.read(); // expected-remark {{pset => { o0__1' }}} + q.read(); // expected-remark {{pset => { o1__1' }}} + o0 = o1; // expected-note {{invalidated by non-const use of owner type}} + p.read(); // expected-warning {{use of invalid pointer 'p'}} + // expected-remark@-1 {{pset => { invalid }}} + q.read(); // expected-remark {{pset => { o1__1' }}} } \ No newline at end of file From 58191bb79f9ffdc1a138c235f86f8b606372829b Mon Sep 17 00:00:00 2001 From: Caio Oliveira Date: Mon, 17 Oct 2022 23:52:47 -0700 Subject: [PATCH 0646/2301] [CIR] Fix parsing of CIR dialect's struct type The parsing function wasn't considering the commas separating the types inside the angle brackets, that are used by printing. Added a test to round trip a !cir.struct type to cover this code path. Related to llvm/clangir#9. --- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 7 +++++-- clang/test/CIR/IR/struct.cir | 12 ++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/IR/struct.cir diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index aeb43bfba5d4..1f19ff7579a8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -77,9 +77,12 @@ Type StructType::parse(mlir::AsmParser &parser) { if (parser.parseString(&typeName)) return Type(); llvm::SmallVector members; - Type nextMember; - while (mlir::succeeded(parser.parseType(nextMember))) + while (mlir::succeeded(parser.parseOptionalComma())) { + Type nextMember; + if (parser.parseType(nextMember)) + return Type(); members.push_back(nextMember); + } if (parser.parseGreater()) return Type(); return get(parser.getContext(), members, typeName); diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir new file mode 100644 index 000000000000..49aab5938869 --- /dev/null +++ b/clang/test/CIR/IR/struct.cir @@ -0,0 +1,12 @@ +// RUN: cir-tool %s | cir-tool | FileCheck %s +// XFAIL: * + +module { + cir.func @structs() { + %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] + cir.return + } +} + +// CHECK: cir.func @structs() { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] From d5a7ffae029c0e84d19f1924b68a71237b2b00a8 Mon Sep 17 00:00:00 2001 From: Caio Oliveira Date: Tue, 18 Oct 2022 00:05:04 -0700 Subject: [PATCH 0647/2301] [CIR] Fix invalid alias name for dialect's struct types For those types, the mangled name started with a number which is not a valid start character for alias types in MLIR. Prefix these aliases with "ty_". Relates to llvm/clangir#9. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 2 +- clang/test/CIR/CodeGen/String.cpp | 30 ++++++------ clang/test/CIR/CodeGen/assign-operator.cpp | 48 +++++++++---------- clang/test/CIR/CodeGen/ctor-alias.cpp | 10 ++-- .../CodeGen/ctor-member-lvalue-to-rvalue.cpp | 12 ++--- clang/test/CIR/CodeGen/ctor.cpp | 24 +++++----- clang/test/CIR/CodeGen/lambda.cpp | 4 +- clang/test/CIR/CodeGen/lvalue-refs.cpp | 8 ++-- clang/test/CIR/CodeGen/struct.c | 9 ++-- clang/test/CIR/CodeGen/struct.cpp | 38 +++++++-------- 11 files changed, 95 insertions(+), 94 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e0a5bca25f75..c3ff7afdd3dd 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1124,9 +1124,9 @@ def StructElementAddr : CIR_Op<"struct_element_addr"> { Example: ```mlir - !22struct2EBar22 = type !cir.struct<"struct.Bar", i32, i8> + !ty_22struct2EBar22 = type !cir.struct<"struct.Bar", i32, i8> ... - %0 = cir.alloca !22struct2EBar22, cir.ptr + %0 = cir.alloca !ty_22struct2EBar22, cir.ptr ... %1 = cir.struct_element_addr %0, "Bar.a" %2 = cir.load %1 : cir.ptr , int diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 115ba4f94b13..fb396ec975c2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -41,7 +41,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { AliasResult getAlias(Type type, raw_ostream &os) const final { if (auto structType = type.dyn_cast()) { - os << structType.getTypeName(); + os << "ty_" << structType.getTypeName(); return AliasResult::OverridableAlias; } diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index 8ef8166ef441..275e713e3806 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -18,20 +18,20 @@ void test() { } // CHECK: cir.func linkonce_odr @_ZN6StringC2Ev -// CHECK-NEXT: %0 = cir.alloca !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 // CHECK-NEXT: %2 = "cir.struct_element_addr"(%0) <{member_name = "storage"}> // CHECK-NEXT: %3 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %4 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr +// CHECK-NEXT: %4 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr // CHECK-NEXT: %5 = cir.cst(0 : i32) : i32 // CHECK-NEXT: %6 = cir.cast(integral, %5 : i32), i64 // CHECK-NEXT: cir.store %6, %4 : i64, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2Ei -// CHECK-NEXT: %0 = cir.alloca !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["size", init] // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: cir.store %arg1, %1 @@ -39,7 +39,7 @@ void test() { // CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "storage"}> // CHECK-NEXT: %4 = cir.cst(#cir.null : !cir.ptr) // CHECK-NEXT: cir.store %4, %3 -// CHECK-NEXT: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr +// CHECK-NEXT: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: %7 = cir.cast(integral, %6 : i32), i64 // CHECK-NEXT: cir.store %7, %5 : i64, cir.ptr @@ -47,27 +47,27 @@ void test() { // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "storage"}> : (!cir.ptr>) -> !cir.ptr> +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "storage"}> : (!cir.ptr>) -> !cir.ptr> // CHECK-NEXT: %4 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.return // CHECK: cir.func linkonce_odr @_ZN6StringC1EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: %3 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return // CHECK: cir.func @_Z4testv() { -// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () -// CHECK: cir.call @_ZN6StringC1Ei(%1, %3) : (!cir.ptr, i32) -> () -// CHECK: cir.call @_ZN6StringC1EPKc(%2, %5) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC1Ei(%1, %3) : (!cir.ptr, i32) -> () +// CHECK: cir.call @_ZN6StringC1EPKc(%2, %5) : (!cir.ptr, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 8b788416b426..f129d78a8cc8 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -12,13 +12,13 @@ struct String { // StringView::StringView(String const&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewC2ERK6String - // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} - // CHECK: cir.store %arg0, %0 : !cir.ptr - // CHECK: cir.store %arg1, %1 : !cir.ptr - // CHECK: %2 = cir.load %0 : cir.ptr > + // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} + // CHECK: cir.store %arg0, %0 : !cir.ptr + // CHECK: cir.store %arg1, %1 : !cir.ptr + // CHECK: %2 = cir.load %0 : cir.ptr > // CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "size"}> - // CHECK: %4 = cir.load %1 : cir.ptr > + // CHECK: %4 = cir.load %1 : cir.ptr > // CHECK: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> // CHECK: %6 = cir.load %5 : cir.ptr , i64 // CHECK: cir.store %6, %3 : i64, cir.ptr @@ -26,25 +26,25 @@ struct String { // CHECK: } // DISABLE: cir.func linkonce_odr @_ZN10StringViewC2ERK6String - // DISABLE-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // DISABLE-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // StringView::operator=(StringView&&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewaSEOS_ - // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["", init] {alignment = 8 : i64} - // CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} - // CHECK: cir.store %arg0, %0 : !cir.ptr - // CHECK: cir.store %arg1, %1 : !cir.ptr - // CHECK: %3 = cir.load deref %0 : cir.ptr > - // CHECK: %4 = cir.load %1 : cir.ptr > + // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["", init] {alignment = 8 : i64} + // CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} + // CHECK: cir.store %arg0, %0 : !cir.ptr + // CHECK: cir.store %arg1, %1 : !cir.ptr + // CHECK: %3 = cir.load deref %0 : cir.ptr > + // CHECK: %4 = cir.load %1 : cir.ptr > // CHECK: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> // CHECK: %6 = cir.load %5 : cir.ptr , i64 // CHECK: %7 = "cir.struct_element_addr"(%0) <{member_name = "size"}> // CHECK: cir.store %6, %7 : i64, cir.ptr - // CHECK: cir.store %3, %2 : !cir.ptr - // CHECK: %8 = cir.load %2 : cir.ptr > - // CHECK: cir.return %8 : !cir.ptr + // CHECK: cir.store %3, %2 : !cir.ptr + // CHECK: %8 = cir.load %2 : cir.ptr > + // CHECK: cir.return %8 : !cir.ptr // CHECK: } // DISABLE: cir.func @_ZN10StringViewaSEOS_ @@ -68,16 +68,16 @@ int main() { // CHECK: cir.func @main() -> i32 { // CHECK: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !_22struct2EStringView22, cir.ptr , ["sv"] {alignment = 8 : i64} -// CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () +// CHECK: %1 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["sv"] {alignment = 8 : i64} +// CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () // CHECK: cir.scope { -// CHECK: %3 = cir.alloca !_22struct2EString22, cir.ptr , ["s"] {alignment = 8 : i64} -// CHECK: %4 = cir.alloca !_22struct2EStringView22, cir.ptr , ["ref.tmp"] {alignment = 8 : i64} +// CHECK: %3 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s"] {alignment = 8 : i64} +// CHECK: %4 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["ref.tmp"] {alignment = 8 : i64} // CHECK: %5 = cir.get_global @".str" : cir.ptr > // CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @_ZN6StringC2EPKc(%3, %6) : (!cir.ptr, !cir.ptr) -> () -// CHECK: cir.call @_ZN10StringViewC2ERK6String(%4, %3) : (!cir.ptr, !cir.ptr) -> () -// CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %4) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: cir.call @_ZN6StringC2EPKc(%3, %6) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN10StringViewC2ERK6String(%4, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %4) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } // CHECK: %2 = cir.load %0 : cir.ptr , i32 // CHECK: cir.return %2 : i32 diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp index 555bfd3032e4..a70da7f5fecb 100644 --- a/clang/test/CIR/CodeGen/ctor-alias.cpp +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -9,18 +9,18 @@ void t() { } // CHECK: cir.func linkonce_odr @_ZN11DummyStringC2EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NOT: cir.fun @_ZN11DummyStringC1EPKc // CHECK: cir.func @_Z1tv -// CHECK-NEXT: %0 = cir.alloca !_22struct2EDummyString22, cir.ptr , ["s4"] {alignment = 1 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EDummyString22, cir.ptr , ["s4"] {alignment = 1 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp index b6d4888468be..dd7980366ca7 100644 --- a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -6,8 +6,8 @@ struct String { long size; String(const String &s) : size{s.size} {} // CHECK: cir.func linkonce_odr @_ZN6StringC2ERKS_ -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 // CHECK: cir.store %arg1, %1 // CHECK: %2 = cir.load %0 @@ -28,10 +28,10 @@ void foo() { // FIXME: s1 shouldn't be uninitialized. // cir.func @_Z3foov() { - // %0 = cir.alloca !_22struct2EString22, cir.ptr , ["s"] {alignment = 8 : i64} - // %1 = cir.alloca !_22struct2EString22, cir.ptr , ["s1"] {alignment = 8 : i64} - // cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () - // cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () + // %0 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s"] {alignment = 8 : i64} + // %1 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s1"] {alignment = 8 : i64} + // cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () + // cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () // cir.return // } } diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index 8948842014de..672af732430e 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -11,22 +11,22 @@ void baz() { Struk s; } -// CHECK: !_22struct2EStruk22 = !cir.struct<"struct.Struk", i32> +// CHECK: !ty_22struct2EStruk22 = !cir.struct<"struct.Struk", i32> -// CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return -// CHECK: cir.func linkonce_odr @_ZN5StrukC1Ev(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () +// CHECK: cir.func linkonce_odr @_ZN5StrukC1Ev(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () // CHECK-NEXT: cir.return // CHECK: cir.func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !_22struct2EStruk22, cir.ptr , ["s"] {alignment = 4 : i64} -// CHECK-NEXT: cir.call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () +// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EStruk22, cir.ptr , ["s"] {alignment = 4 : i64} +// CHECK-NEXT: cir.call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index d1ec3e4fc8a3..99a57c0afcf4 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -5,7 +5,7 @@ void fn() { auto a = [](){}; } -// CHECK: !_22class2Eanon22 = !cir.struct<"class.anon", i8> +// CHECK: !ty_22class2Eanon22 = !cir.struct<"class.anon", i8> // CHECK-NEXT: module // CHECK-NEXT: cir.func @_Z2fnv() -// CHECK-NEXT: %0 = cir.alloca !_22class2Eanon22, cir.ptr , ["a"] +// CHECK-NEXT: %0 = cir.alloca !ty_22class2Eanon22, cir.ptr , ["a"] diff --git a/clang/test/CIR/CodeGen/lvalue-refs.cpp b/clang/test/CIR/CodeGen/lvalue-refs.cpp index 89e0f6038afc..d4afc507d343 100644 --- a/clang/test/CIR/CodeGen/lvalue-refs.cpp +++ b/clang/test/CIR/CodeGen/lvalue-refs.cpp @@ -6,8 +6,8 @@ struct String { void split(String &S) {} -// CHECK: cir.func @_Z5splitR6String(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["S", init] +// CHECK: cir.func @_Z5splitR6String(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["S", init] void foo() { String s; @@ -15,5 +15,5 @@ void foo() { } // CHECK: cir.func @_Z3foov() { -// CHECK: %0 = cir.alloca !_22struct2EString22, cir.ptr , ["s"] -// CHECK: cir.call @_Z5splitR6String(%0) : (!cir.ptr) -> () \ No newline at end of file +// CHECK: %0 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s"] +// CHECK: cir.call @_Z5splitR6String(%0) : (!cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 5a7c2753da95..c9eb9be8d130 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * struct Bar { int a; @@ -17,12 +18,12 @@ void baz() { struct Foo f; } -// CHECK: !_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> -// CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !_22struct2EBar22> +// CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> +// CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>> // CHECK-NEXT: module { // CHECK-NEXT: cir.func @baz() { -// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 3da9b459866e..44c4bd9e1ea7 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -24,32 +24,32 @@ void baz() { Foo f; } -// CHECK: !_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> -// CHECK-NEXT: !_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>> +// CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> +// CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>> -// CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : i32, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : i32, cir.ptr -// CHECK-NEXT: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: cir.store %4, %2 : i32, cir.ptr // CHECK-NEXT: %5 = cir.load %2 : cir.ptr , i32 @@ -57,14 +57,14 @@ void baz() { // CHECK-NEXT: } // CHECK: cir.func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["result", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca !_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} -// CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () +// CHECK-NEXT: %2 = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} +// CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () // CHECK-NEXT: %3 = cir.cst(4 : i32) : i32 -// CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, i32) -> () +// CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, i32) -> () // CHECK-NEXT: %4 = cir.cst(4 : i32) : i32 -// CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, i32) -> i32 +// CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, i32) -> i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } From ba68a03f1c2d2e06b47bb9e0224174ffd03cdd23 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 19 Oct 2022 21:56:53 -0400 Subject: [PATCH 0648/2301] [CIR][NFC] Formatting CIRDialect.cpp --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 2b3a1f06eb8a..a4f6e74b3f37 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -627,7 +627,8 @@ class ScalarExprEmitter : public StmtVisitor { buildCompoundAssign(const CompoundAssignOperator *E, mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &)); - // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM codegen. + // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM + // codegen. QualType getPromotionType(QualType Ty) { if (auto *CT = Ty->getAs()) { llvm_unreachable("NYI"); From 0a939daf25b06e557fc5ea7a6fa94a09426b1ce2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 19 Oct 2022 21:56:25 -0400 Subject: [PATCH 0649/2301] [CIR] Add support for asserting against constrained FP --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 24fafe874377..4cacdbc9e484 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -9,6 +9,7 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H #define LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/FPEnv.h" #include "mlir/IR/Builders.h" @@ -19,12 +20,29 @@ namespace cir { class CIRGenFunction; class CIRGenBuilderTy : public mlir::OpBuilder { + bool IsFPConstrained = false; fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict; llvm::RoundingMode DefaultConstrainedRounding = llvm::RoundingMode::Dynamic; public: CIRGenBuilderTy(mlir::MLIRContext &C) : mlir::OpBuilder(&C) {} + /// Enable/Disable use of constrained floating point math. When enabled the + /// CreateF() calls instead create constrained floating point intrinsic + /// calls. Fast math flags are unaffected by this setting. + void setIsFPConstrained(bool IsCon) { + if (IsCon) + llvm_unreachable("Constrained FP NYI"); + IsFPConstrained = IsCon; + } + + /// Query for the use of constrained floating point math + bool getIsFPConstrained() { + if (IsFPConstrained) + llvm_unreachable("Constrained FP NYI"); + return IsFPConstrained; + } + /// Set the exception handling to be used with constrained floating point void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) { #ifndef NDEBUG From c39f63ada51b6af8743702ccd9701b8aa46cd556 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 19 Oct 2022 21:57:33 -0400 Subject: [PATCH 0650/2301] [CIR] Change the Builder's type on ScalarExprEmitter and CIRGenFunction These were subclassed earlier and thus these should changed accordingly. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index a4f6e74b3f37..fde6fabc8edc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -27,11 +27,11 @@ namespace { class ScalarExprEmitter : public StmtVisitor { CIRGenFunction &CGF; - mlir::OpBuilder &Builder; + CIRGenBuilderTy &Builder; bool IgnoreResultAssign; public: - ScalarExprEmitter(CIRGenFunction &cgf, mlir::OpBuilder &builder, + ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder, bool ira = false) : CGF(cgf), Builder(builder), IgnoreResultAssign(ira) {} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 797f3249b77f..91dc4fd85dbb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -360,7 +360,7 @@ class CIRGenFunction { clang::ASTContext &getContext() const; - mlir::OpBuilder &getBuilder() { return builder; } + CIRGenBuilderTy &getBuilder() { return builder; } CIRGenModule &getCIRGenModule() { return CGM; } From d07dce75b87e687c5ed31bc5ffd25052895b5aea Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 19 Oct 2022 21:58:46 -0400 Subject: [PATCH 0651/2301] [CIR] Add a new CastKid for floating conversions --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 ++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index c3ff7afdd3dd..bda6fbb36058 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -45,12 +45,13 @@ def CK_IntegralToBoolean : I32EnumAttrCase<"int_to_bool", 1>; def CK_ArrayToPointerDecay : I32EnumAttrCase<"array_to_ptrdecay", 2>; def CK_IntegralCast : I32EnumAttrCase<"integral", 3>; def CK_BitCast : I32EnumAttrCase<"bitcast", 4>; +def CK_FloatingCast : I32EnumAttrCase<"floating", 5>; def CastKind : I32EnumAttr< "CastKind", "cast kind", [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast, - CK_BitCast]> { + CK_BitCast, CK_FloatingCast]> { let cppNamespace = "::mlir::cir"; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index fb396ec975c2..0bc7cbff24f2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -246,6 +246,12 @@ LogicalResult CastOp::verify() { return emitOpError() << "requires !cir.ptr type for source and result"; return success(); } + case cir::CastKind::floating: { + if (!srcType.dyn_cast() || + !resType.dyn_cast()) + return emitOpError() << "requries floating for source and result"; + return success(); + } } llvm_unreachable("Unknown CastOp kind?"); From b7838724e65a9c61f1c033ed41c4aaef785402da Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 19 Oct 2022 22:01:53 -0400 Subject: [PATCH 0652/2301] [CIR] Add support for emitting floating point extensions Implement a handful of functions reponsible for emitting floating point extensions. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 ++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 14 +++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 4cacdbc9e484..431108c1dfcd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -72,6 +72,14 @@ class CIRGenBuilderTy : public mlir::OpBuilder { llvm::RoundingMode getDefaultConstrainedRounding() { return DefaultConstrainedRounding; } + + mlir::Value CreateFPExt(mlir::Value V, mlir::Type DestType) { + if (getIsFPConstrained()) + llvm_unreachable("constrainedfp NYI"); + + return create(V.getLoc(), DestType, + mlir::cir::CastKind::floating, V); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index fde6fabc8edc..0cc1941e8d74 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -94,7 +94,10 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitFloatingLiteral(const FloatingLiteral *E) { - llvm_unreachable("NYI"); + mlir::Type Ty = CGF.getCIRType(E->getType()); + return Builder.create( + CGF.getLoc(E->getExprLoc()), Ty, + Builder.getFloatAttr(Ty, E->getValue())); } mlir::Value VisitCharacterLiteral(const CharacterLiteral *E) { llvm_unreachable("NYI"); @@ -1178,10 +1181,11 @@ mlir::Value ScalarExprEmitter::buildScalarCast( llvm_unreachable("NYI"); } - // if (DstElementTy.getTypeID() < SrcElementTy.getTypeID()) - // llvm_unreachable("NYI"); - - llvm_unreachable("NYI"); + auto FloatDstTy = DstElementTy.cast(); + auto FloatSrcTy = SrcElementTy.cast(); + if (FloatDstTy.getWidth() < FloatSrcTy.getWidth()) + llvm_unreachable("truncation NYI"); + return Builder.CreateFPExt(Src, DstTy); } LValue From 240445adcd358d79c9174430a3544c6a3e65b849 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 5 Oct 2022 00:47:40 -0400 Subject: [PATCH 0653/2301] [CIR] Add a simple test case for local double literal This already worked prior, but we didn't have a test for it. So just add one here. --- clang/test/CIR/CodeGen/lalg.c | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 clang/test/CIR/CodeGen/lalg.c diff --git a/clang/test/CIR/CodeGen/lalg.c b/clang/test/CIR/CodeGen/lalg.c new file mode 100644 index 000000000000..789a13136750 --- /dev/null +++ b/clang/test/CIR/CodeGen/lalg.c @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o - | FileCheck %s + +double dot() { + double result = 0.0; + return result; +} + +// CHECK: %1 = cir.alloca f64, cir.ptr , ["result", init] +// CHECK-NEXT: %2 = cir.cst(0.000000e+00 : f64) : f64 +// CHECK-NEXT: cir.store %2, %1 : f64, cir.ptr From 92e126bd967553931dd1fb1dfb3ab1035ba430d5 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 19 Oct 2022 22:22:31 -0400 Subject: [PATCH 0654/2301] [CIR] Add a test for casting a float literal to a double --- clang/test/CIR/CodeGen/lalg.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/clang/test/CIR/CodeGen/lalg.c b/clang/test/CIR/CodeGen/lalg.c index 789a13136750..c5cf626d3441 100644 --- a/clang/test/CIR/CodeGen/lalg.c +++ b/clang/test/CIR/CodeGen/lalg.c @@ -1,10 +1,15 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o - | FileCheck %s double dot() { - double result = 0.0; - return result; + double x = 0.0; + double y = 0.0f; + return x; } -// CHECK: %1 = cir.alloca f64, cir.ptr , ["result", init] -// CHECK-NEXT: %2 = cir.cst(0.000000e+00 : f64) : f64 -// CHECK-NEXT: cir.store %2, %1 : f64, cir.ptr +// CHECK: %1 = cir.alloca f64, cir.ptr , ["x", init] +// CHECK-NEXT: %2 = cir.alloca f64, cir.ptr , ["y", init] +// CHECK-NEXT: %3 = cir.cst(0.000000e+00 : f64) : f64 +// CHECK-NEXT: cir.store %3, %1 : f64, cir.ptr +// CHECK-NEXT: %4 = cir.cst(0.000000e+00 : f32) : f32 +// CHECK-NEXT: %5 = cir.cast(floating, %4 : f32), f64 +// CHECK-NEXT: cir.store %5, %2 : f64, cir.ptr From ca7efce7bd657f8b75b64e8720ba79d300a175cf Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 19 Oct 2022 22:39:57 -0400 Subject: [PATCH 0655/2301] [CIR] Add a simple double multiplication expression and test for it --- clang/test/CIR/CodeGen/lalg.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/clang/test/CIR/CodeGen/lalg.c b/clang/test/CIR/CodeGen/lalg.c index c5cf626d3441..6d723cc9e732 100644 --- a/clang/test/CIR/CodeGen/lalg.c +++ b/clang/test/CIR/CodeGen/lalg.c @@ -3,13 +3,18 @@ double dot() { double x = 0.0; double y = 0.0f; - return x; + double result = x * y; + return result; } // CHECK: %1 = cir.alloca f64, cir.ptr , ["x", init] // CHECK-NEXT: %2 = cir.alloca f64, cir.ptr , ["y", init] -// CHECK-NEXT: %3 = cir.cst(0.000000e+00 : f64) : f64 -// CHECK-NEXT: cir.store %3, %1 : f64, cir.ptr -// CHECK-NEXT: %4 = cir.cst(0.000000e+00 : f32) : f32 -// CHECK-NEXT: %5 = cir.cast(floating, %4 : f32), f64 -// CHECK-NEXT: cir.store %5, %2 : f64, cir.ptr +// CHECK-NEXT: %3 = cir.alloca f64, cir.ptr , ["result", init] +// CHECK-NEXT: %4 = cir.cst(0.000000e+00 : f64) : f64 +// CHECK-NEXT: cir.store %4, %1 : f64, cir.ptr +// CHECK-NEXT: %5 = cir.cst(0.000000e+00 : f32) : f32 +// CHECK-NEXT: %6 = cir.cast(floating, %5 : f32), f64 +// CHECK-NEXT: cir.store %6, %2 : f64, cir.ptr +// CHECK-NEXT: %7 = cir.load %1 : cir.ptr , f64 +// CHECK-NEXT: %8 = cir.load %2 : cir.ptr , f64 +// CHECK-NEXT: %9 = cir.binop(mul, %7, %8) : f64 From 05c5303dc40ef7e8b3dbb69cc465681b9603e06d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 20 Oct 2022 12:29:39 -0700 Subject: [PATCH 0656/2301] [CIR][Lifetime] Update call to use the right interface for invalidation --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index d13b4b6e417e..4f24c1625c82 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -977,12 +977,11 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, // Note that the current pattern here usually comes from a xvalue in src // where all the initialization is done, and this move assignment is // where we finally materialize it back to the original pointer category. - // TODO: should CIR ops retain xvalue information somehow? getPmap()[dst] = getPmap()[src]; - // TODO: should this be null? or should we swap dst/src pset state? - // For now just consider moved-from state as invalid. - getPmap()[src].clear(); - getPmap()[src].insert(State::getInvalid()); + + // 2.4.2 - It is an error to use a moved-from object. + // To that intent we mark src's pset with invalid. + markPsetInvalid(src, InvalidStyle::MovedFrom, callOp.getLoc()); } void LifetimeCheckPass::checkCopyAssignment(CallOp callOp, From cb0d0b2caa0020c1f456e41b3cbfc5b984e326c5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 20 Oct 2022 13:12:47 -0700 Subject: [PATCH 0657/2301] [CIR][Lifetime] Add FIXME for other free functions handling --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 4f24c1625c82..67202a63ae56 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1121,6 +1121,11 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { return; const auto *methodDecl = getMethod(theModule, callOp.getCallee()); + + // FIXME: + // Handle free functions and other methods that use non-const + // Owners parameters, those should also invalidate the necessary + // pointers. if (!methodDecl) return; From f145f64f418e078013663bfb808fa372aac6b649 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 20 Oct 2022 15:26:30 -0700 Subject: [PATCH 0658/2301] [CIR][Lifetime] Handle move assignment for owner types --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 33 ++++++++++++------- .../CIR/Transforms/lifetime-check-string.cpp | 33 +++++++++++++++++++ 2 files changed, 55 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/Transforms/lifetime-check-string.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 67202a63ae56..8be4a2224429 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -965,23 +965,34 @@ const clang::CXXMethodDecl *getMethod(ModuleOp mod, StringRef name) { void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m) { - // MyIntPointer::operator=(MyIntPointer&&)(%dst, %src) + // MyPointer::operator=(MyPointer&&)(%dst, %src) + // or + // MyOwner::operator=(MyOwner&&)(%dst, %src) auto dst = callOp.getOperand(0); auto src = callOp.getOperand(1); - // Currently only handle move assignments between pointer categories. - // TODO: add Owner category - if (!(ptrs.count(dst) && ptrs.count(src))) + // Move assignments between pointer categories. + if (ptrs.count(dst) && ptrs.count(src)) { + // Note that the current pattern here usually comes from a xvalue in src + // where all the initialization is done, and this move assignment is + // where we finally materialize it back to the original pointer category. + getPmap()[dst] = getPmap()[src]; + + // 2.4.2 - It is an error to use a moved-from object. + // To that intent we mark src's pset with invalid. + markPsetInvalid(src, InvalidStyle::MovedFrom, callOp.getLoc()); return; + } - // Note that the current pattern here usually comes from a xvalue in src - // where all the initialization is done, and this move assignment is - // where we finally materialize it back to the original pointer category. - getPmap()[dst] = getPmap()[src]; + // Copy assignments between pointer categories. + if (owners.count(dst) && owners.count(src)) { + // Handle as a non const use of owner, invalidating pointers. + checkNonConstUseOfOwner(callOp); - // 2.4.2 - It is an error to use a moved-from object. - // To that intent we mark src's pset with invalid. - markPsetInvalid(src, InvalidStyle::MovedFrom, callOp.getLoc()); + // 2.4.2 - It is an error to use a moved-from object. + // To that intent we mark src's pset with invalid. + markPsetInvalid(src, InvalidStyle::MovedFrom, callOp.getLoc()); + } } void LifetimeCheckPass::checkCopyAssignment(CallOp callOp, diff --git a/clang/test/CIR/Transforms/lifetime-check-string.cpp b/clang/test/CIR/Transforms/lifetime-check-string.cpp new file mode 100644 index 000000000000..87ee47da8d1c --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-string.cpp @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +int strlen(char const *); + +struct [[gsl::Owner(char *)]] String { + long size; + long capacity; + const char *storage; + + String() : size{0}, capacity{0} {} + String(char const *s) : size{strlen(s)}, capacity{size}, storage{s} {} +}; + +struct [[gsl::Pointer(int)]] StringView { + long size; + const char *storage; + + StringView(const String &s) : size{s.size}, storage{s.storage} {} + StringView() : size{0}, storage{nullptr} {} + int getSize() const; +}; + +void lifetime_example() { + StringView sv; + String name = "abcdefghijklmnop"; + sv = name; + (void)sv.getSize(); // expected-remark {{pset => { name__1' }}} + name = "frobozz"; // expected-note {{invalidated by non-const use of owner type}} + (void)sv.getSize(); // expected-warning {{use of invalid pointer 'sv'}} + // expected-remark@-1 {{pset => { invalid }}} + sv = name; + (void)sv.getSize(); // expected-remark {{pset => { name__2' }}} +} \ No newline at end of file From 4baa00070698cf28f26de73a29db57d6c31590b5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 20 Oct 2022 15:48:52 -0700 Subject: [PATCH 0659/2301] [CIR][Lifetime] Handle copy assignment for pointer types --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 13 ++++++++----- .../CIR/Transforms/lifetime-check-string.cpp | 18 +++++++++++++++++- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 8be4a2224429..2738bd473951 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1001,12 +1001,15 @@ void LifetimeCheckPass::checkCopyAssignment(CallOp callOp, auto dst = callOp.getOperand(0); auto src = callOp.getOperand(1); - // Currently only handle copy assignments between owner categories. - // TODO: add Ptr category - if (!(owners.count(dst) && owners.count(src))) - return; + // Copy assignment between owner categories. + if (owners.count(dst) && owners.count(src)) + return checkNonConstUseOfOwner(callOp); - checkNonConstUseOfOwner(callOp); + // Copy assignment between pointer categories. + if (ptrs.count(dst) && ptrs.count(src)) { + getPmap()[dst] = getPmap()[src]; + return; + } } // User defined ctors that initialize from owner types is one diff --git a/clang/test/CIR/Transforms/lifetime-check-string.cpp b/clang/test/CIR/Transforms/lifetime-check-string.cpp index 87ee47da8d1c..76b42fa24078 100644 --- a/clang/test/CIR/Transforms/lifetime-check-string.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-string.cpp @@ -20,7 +20,7 @@ struct [[gsl::Pointer(int)]] StringView { int getSize() const; }; -void lifetime_example() { +void sv0() { StringView sv; String name = "abcdefghijklmnop"; sv = name; @@ -30,4 +30,20 @@ void lifetime_example() { // expected-remark@-1 {{pset => { invalid }}} sv = name; (void)sv.getSize(); // expected-remark {{pset => { name__2' }}} +} + +void sv1() { + StringView sv, sv_other; + String name = "abcdefghijklmnop"; + sv = name; + sv_other = sv; + (void)sv.getSize(); // expected-remark {{pset => { name__1' }}} + (void)sv_other.getSize(); // expected-remark {{pset => { name__1' }}} + name = "frobozz"; // expected-note {{invalidated by non-const use of owner type}} + (void)sv.getSize(); // expected-warning {{use of invalid pointer 'sv'}} + // expected-remark@-1 {{pset => { invalid }}} + (void)sv_other.getSize(); // expected-warning {{use of invalid pointer 'sv_other'}} + // expected-remark@-1 {{pset => { invalid }}} + sv = name; + (void)sv.getSize(); // expected-remark {{pset => { name__2' }}} } \ No newline at end of file From 2a98a506adb254a47ae9eb3f9554c32df10e3105 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 20 Oct 2022 16:59:36 -0700 Subject: [PATCH 0660/2301] [CIR][Lifetime] Generalize the handling of operators --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 48 ++++++++++--------- .../CIR/Transforms/lifetime-check-string.cpp | 19 +++++++- 2 files changed, 42 insertions(+), 25 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 2738bd473951..90e6ae4dc9d3 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -46,7 +46,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); void checkCopyAssignment(CallOp callOp, const clang::CXXMethodDecl *m); void checkNonConstUseOfOwner(CallOp callOp); - void checkOperatorStar(CallOp callOp); + void checkOperators(CallOp callOp, const clang::CXXMethodDecl *m); // Tracks current module. ModuleOp theModule; @@ -1081,26 +1081,30 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, } } -static bool isOperatorStar(const clang::CXXMethodDecl *m) { - if (!m->isOverloadedOperator()) - return false; - return m->getOverloadedOperator() == clang::OverloadedOperatorKind::OO_Star; -} - -static bool sinkUnsupportedOperator(const clang::CXXMethodDecl *m) { - if (!m->isOverloadedOperator()) - return false; - if (!isOperatorStar(m)) - llvm_unreachable("NYI"); - return false; -} - -void LifetimeCheckPass::checkOperatorStar(CallOp callOp) { +void LifetimeCheckPass::checkOperators(CallOp callOp, + const clang::CXXMethodDecl *m) { auto addr = callOp.getOperand(0); - if (!ptrs.count(addr)) - return; + if (owners.count(addr)) { + // const access to the owner is fine. + if (m->isConst()) + return; + // TODO: this is a place where we can hook in some idiom recocgnition + // so we don't need to use actual source code annotation to make assumptions + // on methods we understand and know to behave nicely. + // + // In P1179, section 2.5.7.12, the use of [[gsl::lifetime_const]] is + // suggested, but it's not part of clang (will it ever?) + return checkNonConstUseOfOwner(callOp); + } + + if (ptrs.count(addr)) { + // The assumption is that method calls on pointer types should trigger + // deref checking. + checkPointerDeref(addr, callOp.getLoc()); + } - checkPointerDeref(addr, callOp.getLoc()); + // FIXME: we also need to look at operators from non owner or pointer + // types that could be using Owner/Pointer types as parameters. } bool LifetimeCheckPass::isNonConstUseOfOwner(CallOp callOp, @@ -1149,10 +1153,8 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { return checkMoveAssignment(callOp, methodDecl); if (methodDecl->isCopyAssignmentOperator()) return checkCopyAssignment(callOp, methodDecl); - if (isOperatorStar(methodDecl)) - return checkOperatorStar(callOp); - if (sinkUnsupportedOperator(methodDecl)) - return; + if (methodDecl->isOverloadedOperator()) + return checkOperators(callOp, methodDecl); // For any other methods... diff --git a/clang/test/CIR/Transforms/lifetime-check-string.cpp b/clang/test/CIR/Transforms/lifetime-check-string.cpp index 76b42fa24078..07182ce6c6e1 100644 --- a/clang/test/CIR/Transforms/lifetime-check-string.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-string.cpp @@ -6,7 +6,7 @@ struct [[gsl::Owner(char *)]] String { long size; long capacity; const char *storage; - + char operator[](int); String() : size{0}, capacity{0} {} String(char const *s) : size{strlen(s)}, capacity{size}, storage{s} {} }; @@ -14,7 +14,7 @@ struct [[gsl::Owner(char *)]] String { struct [[gsl::Pointer(int)]] StringView { long size; const char *storage; - + char operator[](int); StringView(const String &s) : size{s.size}, storage{s.storage} {} StringView() : size{0}, storage{nullptr} {} int getSize() const; @@ -46,4 +46,19 @@ void sv1() { // expected-remark@-1 {{pset => { invalid }}} sv = name; (void)sv.getSize(); // expected-remark {{pset => { name__2' }}} +} + +void sv2() { + StringView sv; + String name = "abcdefghijklmnop"; + sv = name; + char read0 = sv[0]; // expected-remark {{pset => { name__1' }}} + name = "frobozz"; // expected-note {{invalidated by non-const use of owner type}} + char read1 = sv[0]; // expected-warning {{use of invalid pointer 'sv'}} + // expected-remark@-1 {{pset => { invalid }}} + sv = name; + char read2 = sv[0]; // expected-remark {{pset => { name__2' }}} + char read3 = name[1]; // expected-note {{invalidated by non-const use of owner type}} + char read4 = sv[1]; // expected-warning {{use of invalid pointer 'sv'}} + // expected-remark@-1 {{pset => { invalid }}} } \ No newline at end of file From 72eae0a7a6391e96f16418da3181ba3b1f7b1ed7 Mon Sep 17 00:00:00 2001 From: YingChi Long Date: Thu, 20 Oct 2022 02:11:59 +0800 Subject: [PATCH 0661/2301] [CIR] fix circular include dependency NFC --- clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index 84ef7292f423..8f377cafef73 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -9,8 +9,6 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENRECORDLAYOUT_H #define LLVM_CLANG_LIB_CIR_CIRGENRECORDLAYOUT_H -#include "CIRGenTypes.h" - #include "clang/AST/Decl.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" From a8e81cb9be2b6ead7a23fd923d231056957702e7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 21 Oct 2022 13:55:58 -0700 Subject: [PATCH 0662/2301] [CIR][Lifetime] Be more strict with checking owner/pointer types and add argument handling - It's obvious now what and where method/function are handled, filling more gaps for invalidation. - Conservative approach on argument handling for now, still need to fully implement 2.5 from p1179. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 80 +++++++++++++++---- .../CIR/Transforms/lifetime-check-string.cpp | 23 ++++++ 2 files changed, 87 insertions(+), 16 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 90e6ae4dc9d3..02d46315cf34 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -45,8 +45,11 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor); void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); void checkCopyAssignment(CallOp callOp, const clang::CXXMethodDecl *m); - void checkNonConstUseOfOwner(CallOp callOp); + void checkNonConstUseOfOwner(mlir::Value ownerAddr, mlir::Location loc); void checkOperators(CallOp callOp, const clang::CXXMethodDecl *m); + void checkOtherMethodsAndFunctions(CallOp callOp, + const clang::CXXMethodDecl *m); + void checkForOwnerAndPointerArguments(CallOp callOp, unsigned firstArgIdx); // Tracks current module. ModuleOp theModule; @@ -55,6 +58,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { bool isCtorInitPointerFromOwner(CallOp callOp, const clang::CXXConstructorDecl *ctor); bool isNonConstUseOfOwner(CallOp callOp, const clang::CXXMethodDecl *m); + bool isOwnerOrPointerClassMethod(mlir::Value firstParam, + const clang::CXXMethodDecl *m); // Diagnostic helpers. void emitInvalidHistory(mlir::InFlightDiagnostic &D, mlir::Value histKey); @@ -987,7 +992,7 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, // Copy assignments between pointer categories. if (owners.count(dst) && owners.count(src)) { // Handle as a non const use of owner, invalidating pointers. - checkNonConstUseOfOwner(callOp); + checkNonConstUseOfOwner(dst, callOp.getLoc()); // 2.4.2 - It is an error to use a moved-from object. // To that intent we mark src's pset with invalid. @@ -1003,7 +1008,7 @@ void LifetimeCheckPass::checkCopyAssignment(CallOp callOp, // Copy assignment between owner categories. if (owners.count(dst) && owners.count(src)) - return checkNonConstUseOfOwner(callOp); + return checkNonConstUseOfOwner(dst, callOp.getLoc()); // Copy assignment between pointer categories. if (ptrs.count(dst) && ptrs.count(src)) { @@ -1094,7 +1099,7 @@ void LifetimeCheckPass::checkOperators(CallOp callOp, // // In P1179, section 2.5.7.12, the use of [[gsl::lifetime_const]] is // suggested, but it's not part of clang (will it ever?) - return checkNonConstUseOfOwner(callOp); + return checkNonConstUseOfOwner(addr, callOp.getLoc()); } if (ptrs.count(addr)) { @@ -1117,14 +1122,13 @@ bool LifetimeCheckPass::isNonConstUseOfOwner(CallOp callOp, return false; } -void LifetimeCheckPass::checkNonConstUseOfOwner(CallOp callOp) { - auto ownerAddr = callOp.getOperand(0); +void LifetimeCheckPass::checkNonConstUseOfOwner(mlir::Value ownerAddr, + mlir::Location loc) { // 2.4.2 - On every non-const use of a local Owner o: // // - For each entry e in pset(s): Remove e from pset(s), and if no other // Owner’s pset contains only e, then KILL(e). - kill(State::getOwnedBy(ownerAddr), InvalidStyle::NonConstUseOfOwner, - callOp.getLoc()); + kill(State::getOwnedBy(ownerAddr), InvalidStyle::NonConstUseOfOwner, loc); // - Set pset(o) = {o__N'}, where N is one higher than the highest // previously used suffix. For example, initially pset(o) is {o__1'}, on @@ -1134,19 +1138,63 @@ void LifetimeCheckPass::checkNonConstUseOfOwner(CallOp callOp) { return; } +void LifetimeCheckPass::checkForOwnerAndPointerArguments(CallOp callOp, + unsigned firstArgIdx) { + auto numOperands = callOp.getNumOperands(); + if (firstArgIdx >= numOperands) + return; + + llvm::SmallSetVector ownersToInvalidate, ptrsToDeref; + for (unsigned i = firstArgIdx, e = numOperands; i != e; ++i) { + auto arg = callOp.getOperand(i); + // FIXME: apply p1179 rules as described in 2.5. Very conservative for now: + // + // - Owners: always invalidate. + // - Pointers: always check for deref. + // + // FIXME: even before 2.5 we should only invalidate non-const param types. + if (owners.count(arg)) + ownersToInvalidate.insert(arg); + if (ptrs.count(arg)) + ptrsToDeref.insert(arg); + } + + // FIXME: CIR should track source info on the passed args, so we can get + // accurate location for why the invalidation happens. + for (auto o : ownersToInvalidate) + checkNonConstUseOfOwner(o, callOp.getLoc()); + for (auto p : ptrsToDeref) + checkPointerDeref(p, callOp.getLoc()); +} + +void LifetimeCheckPass::checkOtherMethodsAndFunctions( + CallOp callOp, const clang::CXXMethodDecl *m) { + unsigned firstArgIdx = 0; + if (m) // Skip 'this' pointer + firstArgIdx++; + checkForOwnerAndPointerArguments(callOp, firstArgIdx); +} + +bool LifetimeCheckPass::isOwnerOrPointerClassMethod( + mlir::Value firstParam, const clang::CXXMethodDecl *m) { + // For the sake of analysis, these behave like regular functions + if (!m || m->isStatic()) + return false; + if (owners.count(firstParam) || ptrs.count(firstParam)) + return true; + return false; +} + void LifetimeCheckPass::checkCall(CallOp callOp) { if (callOp.getNumOperands() == 0) return; const auto *methodDecl = getMethod(theModule, callOp.getCallee()); + if (!isOwnerOrPointerClassMethod(callOp.getOperand(0), methodDecl)) + return checkOtherMethodsAndFunctions(callOp, methodDecl); - // FIXME: - // Handle free functions and other methods that use non-const - // Owners parameters, those should also invalidate the necessary - // pointers. - if (!methodDecl) - return; - + // From this point on only owner and pointer class methods handling, + // starting from special methods. if (const auto *ctor = dyn_cast(methodDecl)) return checkCtor(callOp, ctor); if (methodDecl->isMoveAssignmentOperator()) @@ -1160,7 +1208,7 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // Non-const member call to a Owner invalidates any of its users. if (isNonConstUseOfOwner(callOp, methodDecl)) - return checkNonConstUseOfOwner(callOp); + return checkNonConstUseOfOwner(callOp.getOperand(0), callOp.getLoc()); // Take a pset(Ptr) = { Ownr' } where Own got invalidated, this will become // invalid access to Ptr if any of its methods are used. diff --git a/clang/test/CIR/Transforms/lifetime-check-string.cpp b/clang/test/CIR/Transforms/lifetime-check-string.cpp index 07182ce6c6e1..6455f0b8e96b 100644 --- a/clang/test/CIR/Transforms/lifetime-check-string.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-string.cpp @@ -61,4 +61,27 @@ void sv2() { char read3 = name[1]; // expected-note {{invalidated by non-const use of owner type}} char read4 = sv[1]; // expected-warning {{use of invalid pointer 'sv'}} // expected-remark@-1 {{pset => { invalid }}} +} + +class Stream { + public: + Stream& operator<<(char); + Stream& operator<<(const StringView &); + // FIXME: conservative for now, but do not invalidate const Owners? + Stream& operator<<(const String &); +}; + +void sv3() { + Stream cout; + StringView sv; + String name = "abcdefghijklmnop"; + sv = name; + cout << sv; // expected-remark {{pset => { name__1' }}} + name = "frobozz"; // expected-note {{invalidated by non-const use of owner type}} + cout << sv[2]; // expected-warning {{use of invalid pointer 'sv'}} + sv = name; // expected-remark@-1 {{pset => { invalid }}} + cout << sv; // expected-remark {{pset => { name__2' }}} + cout << name; // expected-note {{invalidated by non-const use of owner type}} + cout << sv; // expected-warning {{use of invalid pointer 'sv'}} + // expected-remark@-1 {{pset => { invalid }}} } \ No newline at end of file From 868fa4dd740602c9c4bb737a5cb447553706b99a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 24 Oct 2022 16:09:13 -0700 Subject: [PATCH 0663/2301] [CIR][CIRGen] Start the handling of Decl::Namespace --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 19 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 3 +++ 2 files changed, 22 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index eb92e44b5f9c..06c43b3f7da0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1017,6 +1017,20 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, castStringLiteralToDefaultAddressSpace(*this, GV.getSymNameAttr())); } +void CIRGenModule::buildDeclContext(const DeclContext *DC) { + for (auto *I : DC->decls()) { + // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope + // are themselves considered "top-level", so EmitTopLevelDecl on an + // ObjCImplDecl does not recursively visit them. We need to do that in + // case they're nested inside another construct (LinkageSpecDecl / + // ExportDecl) that does stop them from being considered "top-level". + if (auto *OID = dyn_cast(I)) + llvm_unreachable("NYI"); + + buildTopLevelDecl(I); + } +} + // Emit code for a single top level declaration. void CIRGenModule::buildTopLevelDecl(Decl *decl) { // Ignore dependent declarations @@ -1045,6 +1059,11 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { // EmitGlobal(HD); break; + // C++ Decls + case Decl::Namespace: + buildDeclContext(cast(decl)); + break; + case Decl::CXXMethod: case Decl::Function: buildGlobal(cast(decl)); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 456433776895..98b862651dd5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -321,6 +321,9 @@ class CIRGenModule { GetAddrOfGlobal(clang::GlobalDecl GD, ForDefinition_t IsForDefinition = NotForDefinition); + // C++ related functions. + void buildDeclContext(const DeclContext *DC); + llvm::StringRef getMangledName(clang::GlobalDecl GD); // Make sure that this type is translated. From bcc1b634b21654fa160ed0eb46c84f8750afae3c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 24 Oct 2022 16:27:42 -0700 Subject: [PATCH 0664/2301] [CIR][CIRGen] Handle Decl::ClassTemplateSpecialization This unlocks coro promise types (see added test) --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 14 +++++++---- clang/test/CIR/CodeGen/coro-task.cpp | 35 ++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/coro-task.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 06c43b3f7da0..e82886e8c703 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1059,16 +1059,20 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { // EmitGlobal(HD); break; - // C++ Decls - case Decl::Namespace: - buildDeclContext(cast(decl)); - break; - case Decl::CXXMethod: case Decl::Function: buildGlobal(cast(decl)); assert(!codeGenOpts.CoverageMapping && "Coverage Mapping NYI"); break; + // C++ Decls + case Decl::Namespace: + buildDeclContext(cast(decl)); + break; + case Decl::ClassTemplateSpecialization: { + // const auto *Spec = cast(decl); + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + } + [[fallthrough]]; case Decl::CXXRecord: { CXXRecordDecl *crd = cast(decl); // TODO: Handle debug info as CodeGenModule.cpp does diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp new file mode 100644 index 000000000000..fd57b5c94c16 --- /dev/null +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +namespace std { + +template +struct coroutine_traits { using promise_type = typename Ret::promise_type; }; + +template +struct coroutine_handle { + static coroutine_handle from_address(void *) noexcept; +}; +template <> +struct coroutine_handle { + template + coroutine_handle(coroutine_handle) noexcept; + static coroutine_handle from_address(void *); +}; + +struct suspend_always { + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} +}; + +struct suspend_never { + bool await_ready() noexcept { return true; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} +}; + +} // namespace std + +// CHECK: module { +// CHECK-NEXT: } \ No newline at end of file From 5006d1ef5d68e5ad4234aa97b2a1ee449ecae762 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 24 Oct 2022 16:49:35 -0700 Subject: [PATCH 0665/2301] [CIR][CIRGen] Handle more toplevel decls that have no codegen (or depend on debug emission) --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 31 ++++++++++-- clang/test/CIR/CodeGen/coro-task.cpp | 65 ++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index e82886e8c703..427cd867958c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1081,14 +1081,37 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { buildTopLevelDecl(childDecl); break; } + // No code generation needed. + case Decl::UsingShadow: + case Decl::ClassTemplate: + case Decl::VarTemplate: + case Decl::Concept: + case Decl::VarTemplatePartialSpecialization: + case Decl::FunctionTemplate: + case Decl::TypeAliasTemplate: + case Decl::Block: + case Decl::Empty: + case Decl::Binding: + break; + case Decl::Using: // using X; [C++] + case Decl::UsingEnum: // using enum X; [C++] + case Decl::NamespaceAlias: + case Decl::UsingDirective: // using namespace X; [C++] + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + break; case Decl::CXXConstructor: getCXXABI().buildCXXConstructors(cast(decl)); break; + + case Decl::StaticAssert: + // Nothing to do. + break; + + case Decl::Typedef: + case Decl::TypeAlias: // using foo = bar; [C++11] case Decl::Record: - // There's nothing to do here, we emit everything pertaining to `Record`s - // lazily. - // TODO: handle debug info here? See clang's - // CodeGenModule::EmitTopLevelDecl + case Decl::Enum: + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); break; } } diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index fd57b5c94c16..fee8d7538d2e 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -31,5 +31,70 @@ struct suspend_never { } // namespace std +namespace folly { +namespace coro { + +using std::suspend_always; +using std::suspend_never; +using std::coroutine_handle; + +using SemiFuture = int; + +template +struct Task { + struct promise_type { + Task get_return_object() noexcept; + suspend_always initial_suspend() noexcept; + suspend_always final_suspend() noexcept; + void return_value(T); + void unhandled_exception(); + auto yield_value(Task) noexcept { return final_suspend(); } + }; + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + T await_resume(); +}; + +template<> +struct Task { + struct promise_type { + Task get_return_object() noexcept; + suspend_always initial_suspend() noexcept; + suspend_always final_suspend() noexcept; + void return_void() noexcept; + void unhandled_exception() noexcept; + auto yield_value(Task) noexcept { return final_suspend(); } + }; + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} + SemiFuture semi(); +}; + +struct blocking_wait_fn { + template + T operator()(Task&& awaitable) const { + return T(); + } +}; + +inline constexpr blocking_wait_fn blocking_wait{}; +static constexpr blocking_wait_fn const& blockingWait = blocking_wait; + +template +Task collectAllRange(Task* awaitable); + +template +Task collectAll(SemiAwaitables&&... awaitables); + +struct co_invoke_fn { + template + Task operator()(F&& f, A&&... a) const { + return Task(); + } +}; + +}} // namespace folly::coro + // CHECK: module { // CHECK-NEXT: } \ No newline at end of file From 1c29955cd9df9ac8d01d7f378fb4fe45dea38f16 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 25 Oct 2022 11:54:28 -0700 Subject: [PATCH 0666/2301] [CIR] Add #cir.zero to use with global initialization --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 14 ++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 +++++++ clang/test/CIR/IR/invalid.cir | 6 ++++++ 3 files changed, 27 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 66532763cbb2..ed31519c05d1 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -40,6 +40,20 @@ def NullAttr : CIR_Attr<"Null", "null", [TypedAttrInterface]> { let assemblyFormat = [{}]; } +//===----------------------------------------------------------------------===// +// ZeroAttr +//===----------------------------------------------------------------------===// + +def ZeroAttr : CIR_Attr<"Zero", "zero", [TypedAttrInterface]> { + let summary = "Attribute to represent zero initialization"; + let description = [{ + The ZeroAttr is used to indicate zero initialization on structs. + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type); + let assemblyFormat = [{}]; +} + //===----------------------------------------------------------------------===// // CstArrayAttr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 0bc7cbff24f2..a895c462987b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -138,6 +138,13 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return op->emitOpError("nullptr expects pointer type"); } + if (attrType.isa()) { + // FIXME: should also support arrays / cst_arrays. + if (opType.isa<::mlir::cir::StructType>()) + return success(); + return op->emitOpError("zero expects struct type"); + } + if (attrType.isa()) { if (!opType.isa()) return op->emitOpError("result type (") diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index f57544423e72..c6b34517e0c1 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -221,3 +221,9 @@ cir.func @unary1() { %3 = cir.unary(dec, %2) : i32, i32 // // expected-error {{'cir.unary' op requires result to be used by a memory store to the same address as the input memory load}} cir.return } + +// ----- + +module { + cir.global external @v = #cir.zero : i32 // expected-error {{zero expects struct type}} +} \ No newline at end of file From 360e8f5e5e82f9be10c4c28c443745b9c5584087 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 25 Oct 2022 11:54:55 -0700 Subject: [PATCH 0667/2301] [CIR][CIRGen] For now use zero init for default/trivial global iniit --- clang/lib/CIR/CodeGen/CIRGenExprCst.cpp | 8 +++++++- clang/test/CIR/CodeGen/coro-task.cpp | 6 +++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp index fbfbc0731416..13c162e10439 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp @@ -547,8 +547,14 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { if (const CXXConstructExpr *E = dyn_cast_or_null(D.getInit())) { const CXXConstructorDecl *CD = E->getConstructor(); + // FIXME: we should probably model this more closely to C++ than + // just emitting a global with zero init (mimic what we do for trivial + // assignments and whatnots). Since this is for globals shouldn't + // be a problem for the near future. if (CD->isTrivial() && CD->isDefaultConstructor()) - assert(0 && "not implemented"); + return mlir::cir::ZeroAttr::get( + CGM.getBuilder().getContext(), + CGM.getTypes().ConvertType(D.getType())); } } InConstantContext = D.hasConstantInitialization(); diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index fee8d7538d2e..3737f18ef99b 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * namespace std { @@ -94,7 +95,10 @@ struct co_invoke_fn { } }; +co_invoke_fn co_invoke; + }} // namespace folly::coro // CHECK: module { -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !cir.struct<"struct.folly::coro::co_invoke_fn", i8 +// CHECK-NEXT: } From 31218335221178adb9603adabe1676f8b224f2e3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 25 Oct 2022 13:27:50 -0700 Subject: [PATCH 0668/2301] [CIR][CIRGen] Start adding pieces for coroutines codegen: save params for coro body --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 11 +++++++---- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 +++ clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h | 1 + 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index f1df333cb6c0..c068bc2c9e74 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -420,14 +420,17 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, Stmt *Body = FD->getBody(); if (Body) { - // Coroutines always emit lifetime markers + // LLVM codegen: Coroutines always emit lifetime markers + // Hide this under request for lifetime emission so that we can write + // tests when the time comes, but CIR should be intrinsically scope + // accurate, so no need to tie coroutines to such markers. if (isa(Body)) - llvm_unreachable("Coroutines NYI"); + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); // Initialize helper which will detect jumps which can cause invalid // lifetime markers. if (ShouldEmitLifetimeMarkers) - llvm_unreachable("Lifetime markers NYI"); + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); } // Create a scope in the symbol table to hold variable declarations. @@ -451,7 +454,7 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, // Save parameters for coroutine function. if (Body && isa_and_nonnull(Body)) - llvm_unreachable("Coroutines NYI"); + llvm::append_range(FnArgs, FD->parameters()); // Generate the body of the function. // TODO: PGO.assignRegionCounters diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 91dc4fd85dbb..6c3c5ebfa775 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -350,6 +350,9 @@ class CIRGenFunction { clang::QualType FnRetTy; mlir::cir::FuncOp CurFn = nullptr; + /// Save Parameter Decl for coroutine. + llvm::SmallVector FnArgs; + /// CXXStructorImplicitParamDecl - When generating code for a constructor or /// destructor, this will hold the implicit argument (e.g. VTT). clang::ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr; diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 04025fefdea4..94cf56d44e97 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -50,6 +50,7 @@ struct UnimplementedFeature { static bool tryEmitAsConstant() { return false; } static bool incrementProfileCounter() { return false; } static bool requiresReturnValueCheck() { return false; } + static bool shouldEmitLifetimeMarkers() { return false; } }; } // namespace cir From cde519bb928eee04f44f671046951f985a4d39b6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 26 Oct 2022 16:02:21 -0700 Subject: [PATCH 0669/2301] [CIR][CIRGen] Skeleton for buildCoroutineBody --- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 24 +++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 + clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 4 +++- clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + 4 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp new file mode 100644 index 000000000000..68dddd4d4377 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -0,0 +1,24 @@ +//===----- CGCoroutine.cpp - Emit CIR Code for C++ coroutines -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ code generation of coroutines. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" +#include "clang/AST/StmtCXX.h" +#include "clang/AST/StmtVisitor.h" +#include "llvm/ADT/ScopeExit.h" + +using namespace clang; +using namespace cir; + +mlir::LogicalResult +CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { + assert(0 && "not implemented"); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 6c3c5ebfa775..dd741daf28a1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -614,6 +614,7 @@ class CIRGenFunction { bool ignoreResult = false); mlir::LogicalResult buildFunctionBody(const clang::Stmt *Body); + mlir::LogicalResult buildCoroutineBody(const CoroutineBodyStmt &S); // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 85f805704d50..8a5559383fa0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -139,12 +139,14 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, return mlir::failure(); break; + case Stmt::CoroutineBodyStmtClass: + return buildCoroutineBody(cast(*S)); + case Stmt::IndirectGotoStmtClass: case Stmt::ReturnStmtClass: // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. case Stmt::GCCAsmStmtClass: case Stmt::MSAsmStmtClass: - case Stmt::CoroutineBodyStmtClass: case Stmt::CoreturnStmtClass: case Stmt::CapturedStmtClass: case Stmt::ObjCAtTryStmtClass: diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 509d9660dbc3..436b5da9388c 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -15,6 +15,7 @@ add_clang_library(clangCIR CIRGenCall.cpp CIRGenClass.cpp CIRGenCleanup.cpp + CIRGenCoroutine.cpp CIRGenDecl.cpp CIRGenDeclCXX.cpp CIRGenExpr.cpp From 5b17ffc6ccad8c5ae641f85994cee62c5013ec65 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Nov 2022 12:12:56 -0300 Subject: [PATCH 0670/2301] [CIR][CIRGen][Coroutines] More codegen similar to LLVM for now, ignoring intrinsics and other specifics - Not yet functional, more building steps to come. - Note that there's commented code around, which should be gradually removed once we set on a simple but concrete testcase/example. --- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 249 +++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 13 ++ 2 files changed, 261 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 68dddd4d4377..6ea575c7a3df 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -18,7 +18,254 @@ using namespace clang; using namespace cir; +struct cir::CGCoroData { + // What is the current await expression kind and how many + // await/yield expressions were encountered so far. + // These are used to generate pretty labels for await expressions in LLVM IR. + // AwaitKind CurrentAwaitKind = AwaitKind::Init; + // unsigned AwaitNum = 0; + // unsigned YieldNum = 0; + + // How many co_return statements are in the coroutine. Used to decide whether + // we need to add co_return; equivalent at the end of the user authored body. + unsigned CoreturnCount = 0; + + // A branch to this block is emitted when coroutine needs to suspend. + // llvm::BasicBlock *SuspendBB = nullptr; + + // The promise type's 'unhandled_exception' handler, if it defines one. + Stmt *ExceptionHandler = nullptr; + + // A temporary i1 alloca that stores whether 'await_resume' threw an + // exception. If it did, 'true' is stored in this variable, and the coroutine + // body must be skipped. If the promise type does not define an exception + // handler, this is null. + // llvm::Value *ResumeEHVar = nullptr; + + // Stores the jump destination just before the coroutine memory is freed. + // This is the destination that every suspend point jumps to for the cleanup + // branch. + // CodeGenFunction::JumpDest CleanupJD; + + // Stores the jump destination just before the final suspend. The co_return + // statements jumps to this point after calling return_xxx promise member. + // CodeGenFunction::JumpDest FinalJD; + + // Stores the llvm.coro.id emitted in the function so that we can supply it + // as the first argument to coro.begin, coro.alloc and coro.free intrinsics. + // Note: llvm.coro.id returns a token that cannot be directly expressed in a + // builtin. + // llvm::CallInst *CoroId = nullptr; + + // Stores the llvm.coro.begin emitted in the function so that we can replace + // all coro.frame intrinsics with direct SSA value of coro.begin that returns + // the address of the coroutine frame of the current coroutine. + // llvm::CallInst *CoroBegin = nullptr; + + // Stores the last emitted coro.free for the deallocate expressions, we use it + // to wrap dealloc code with if(auto mem = coro.free) dealloc(mem). + // llvm::CallInst *LastCoroFree = nullptr; + + // If coro.id came from the builtin, remember the expression to give better + // diagnostic. If CoroIdExpr is nullptr, the coro.id was created by + // EmitCoroutineBody. + CallExpr const *CoroIdExpr = nullptr; +}; + +// Defining these here allows to keep CGCoroData private to this file. +CIRGenFunction::CGCoroInfo::CGCoroInfo() {} +CIRGenFunction::CGCoroInfo::~CGCoroInfo() {} + +static void createCoroData(CIRGenFunction &CGF, + CIRGenFunction::CGCoroInfo &CurCoro) { + if (CurCoro.Data) { + // if (CurCoro.Data->CoroIdExpr) + // CGF.CGM.Error(CoroIdExpr->getBeginLoc(), + // "only one __builtin_coro_id can be used in a function"); + // else if (CoroIdExpr) + // CGF.CGM.Error(CoroIdExpr->getBeginLoc(), + // "__builtin_coro_id shall not be used in a C++ + // coroutine"); + // else + llvm_unreachable("EmitCoroutineBodyStatement called twice?"); + + return; + } + + CurCoro.Data = std::unique_ptr(new CGCoroData); + // CurCoro.Data->CoroId = CoroId; + // CurCoro.Data->CoroIdExpr = CoroIdExpr; +} + +namespace { +// FIXME: both GetParamRef and ParamReferenceReplacerRAII are good template +// candidates to be shared among LLVM / CIR codegen. + +// Hunts for the parameter reference in the parameter copy/move declaration. +struct GetParamRef : public StmtVisitor { +public: + DeclRefExpr *Expr = nullptr; + GetParamRef() {} + void VisitDeclRefExpr(DeclRefExpr *E) { + assert(Expr == nullptr && "multilple declref in param move"); + Expr = E; + } + void VisitStmt(Stmt *S) { + for (auto *C : S->children()) { + if (C) + Visit(C); + } + } +}; + +// This class replaces references to parameters to their copies by changing +// the addresses in CGF.LocalDeclMap and restoring back the original values in +// its destructor. +struct ParamReferenceReplacerRAII { + CIRGenFunction::DeclMapTy SavedLocals; + CIRGenFunction::DeclMapTy &LocalDeclMap; + + ParamReferenceReplacerRAII(CIRGenFunction::DeclMapTy &LocalDeclMap) + : LocalDeclMap(LocalDeclMap) {} + + void addCopy(DeclStmt const *PM) { + // Figure out what param it refers to. + + assert(PM->isSingleDecl()); + VarDecl const *VD = static_cast(PM->getSingleDecl()); + Expr const *InitExpr = VD->getInit(); + GetParamRef Visitor; + Visitor.Visit(const_cast(InitExpr)); + assert(Visitor.Expr); + DeclRefExpr *DREOrig = Visitor.Expr; + auto *PD = DREOrig->getDecl(); + + auto it = LocalDeclMap.find(PD); + assert(it != LocalDeclMap.end() && "parameter is not found"); + SavedLocals.insert({PD, it->second}); + + auto copyIt = LocalDeclMap.find(VD); + assert(copyIt != LocalDeclMap.end() && "parameter copy is not found"); + it->second = copyIt->getSecond(); + } + + ~ParamReferenceReplacerRAII() { + for (auto &&SavedLocal : SavedLocals) { + LocalDeclMap.insert({SavedLocal.first, SavedLocal.second}); + } + } +}; +} // namespace + +static mlir::LogicalResult buildBodyAndFallthrough(CIRGenFunction &CGF, + const CoroutineBodyStmt &S, + Stmt *Body) { + if (CGF.buildStmt(Body, /*useCurrentScope=*/true).failed()) + return mlir::failure(); + // From LLVM codegen: + // const bool CanFallthrough = CGF.Builder.GetInsertBlock(); + if (S.getFallthroughHandler()) { + llvm_unreachable("NYI"); + // if (Stmt *OnFallthrough = S.getFallthroughHandler()) + // CGF.buildStmt(OnFallthrough, /*useCurrentScope=*/true); + } + return mlir::success(); +} + mlir::LogicalResult CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { - assert(0 && "not implemented"); + // This is very different from LLVM codegen as the current intent is to + // not expand too much of it here and leave it to dialect codegen. + // In the LLVM world, this is where we create calls to coro.id, + // coro.alloc and coro.begin. + createCoroData(*this, CurCoro); + + // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided. + if (auto *RetOnAllocFailure = S.getReturnStmtOnAllocFailure()) + llvm_unreachable("NYI"); + + { + // FIXME: create a new scope to copy out the params? + // LLVM create scope cleanups here, but might be due to the use + // of many basic blocks? + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + ParamReferenceReplacerRAII ParamReplacer(LocalDeclMap); + + // Create mapping between parameters and copy-params for coroutine + // function. + llvm::ArrayRef ParamMoves = S.getParamMoves(); + assert((ParamMoves.size() == 0 || (ParamMoves.size() == FnArgs.size())) && + "ParamMoves and FnArgs should be the same size for coroutine " + "function"); + // For zipping the arg map into debug info. + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + + // Create parameter copies. We do it before creating a promise, since an + // evolution of coroutine TS may allow promise constructor to observe + // parameter copies. + for (auto *PM : S.getParamMoves()) { + if (buildStmt(PM, /*useCurrentScope=*/true).failed()) + return mlir::failure(); + ParamReplacer.addCopy(cast(PM)); + } + + if (buildStmt(S.getPromiseDeclStmt(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + // Address promiseAddr = GetAddrOfLocalVar(S.getPromiseDecl()); + // auto *PromiseAddrVoidPtr = + // new llvm::BitCastInst(promiseAddr.getPointer(), VoidPtrTy, "", + // CoroId); + // // Update CoroId to refer to the promise. We could not do it earlier + // // because promise local variable was not emitted yet. + // CoroId->setArgOperand(1, PromiseAddrVoidPtr); + + // ReturnValue should be valid as long as the coroutine's return type + // is not void. The assertion could help us to reduce the check later. + assert(ReturnValue.isValid() == (bool)S.getReturnStmt()); + // Now we have the promise, initialize the GRO. + // We need to emit `get_return_object` first. According to: + // [dcl.fct.def.coroutine]p7 + // The call to get_return_­object is sequenced before the call to + // initial_suspend and is invoked at most once. + // + // So we couldn't emit return value when we emit return statment, + // otherwise the call to get_return_object wouldn't be in front + // of initial_suspend. + if (ReturnValue.isValid()) { + buildAnyExprToMem(S.getReturnValue(), ReturnValue, + S.getReturnValue()->getType().getQualifiers(), + /*IsInit*/ true); + } + + // EHStack.pushCleanup(EHCleanup); + + // CurCoro.Data->CurrentAwaitKind = AwaitKind::Init; + // CurCoro.Data->ExceptionHandler = S.getExceptionHandler(); + if (buildStmt(S.getInitSuspendStmt(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + // CurCoro.Data->FinalJD = getJumpDestInCurrentScope(FinalBB); + + // CurCoro.Data->CurrentAwaitKind = AwaitKind::Normal; + + if (S.getExceptionHandler()) { + llvm_unreachable("NYI"); + } else { + if (buildBodyAndFallthrough(*this, S, S.getBody()).failed()) + return mlir::failure(); + } + + // See if we need to generate final suspend. + // const bool CanFallthrough = Builder.GetInsertBlock(); + // FIXME: LLVM tracks fallthrough by checking the insertion + // point is valid, we can probably do better. + const bool CanFallthrough = false; + const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0; + if (CanFallthrough || HasCoreturns) { + // CurCoro.Data->CurrentAwaitKind = AwaitKind::Final; + if (buildStmt(S.getFinalSuspendStmt(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + } + } + return mlir::success(); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index dd741daf28a1..60422f7b61e9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -48,6 +48,7 @@ namespace cir { // FIXME: for now we are reusing this from lib/Clang/CodeGenFunction.h, which // isn't available in the include dir. Same for getEvaluationKind below. enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; +struct CGCoroData; class CIRGenFunction { public: @@ -316,6 +317,18 @@ class CIRGenFunction { TCK_ConstructorCall, }; + // Holds coroutine data if the current function is a coroutine. We use a + // wrapper to manage its lifetime, so that we don't have to define CGCoroData + // in this header. + struct CGCoroInfo { + std::unique_ptr Data; + CGCoroInfo(); + ~CGCoroInfo(); + }; + CGCoroInfo CurCoro; + + bool isCoroutine() const { return CurCoro.Data != nullptr; } + /// CurGD - The GlobalDecl for the current function being compiled. clang::GlobalDecl CurGD; From 54eb347633d3f6bbbd12c97b4e6aef8f983cef64 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Nov 2022 12:13:17 -0300 Subject: [PATCH 0671/2301] [CIR][CIRGen] Add ConstantEmitter::tryEmitAbstractForInitializer Do not use it just yet. --- clang/lib/CIR/CodeGen/CIRGenCstEmitter.h | 4 ++++ clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenExprCst.cpp | 12 ++++++++++-- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h index 6b31f6c7c155..1c70088d73b0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h @@ -94,6 +94,10 @@ class ConstantEmitter { static mlir::Attribute emitForMemory(CIRGenModule &CGM, mlir::Attribute C, clang::QualType T); + /// Try to emit the initializer of the given declaration as an abstract + /// constant. + mlir::Attribute tryEmitAbstractForInitializer(const VarDecl &D); + // These are private helper routines of the constant emitter that // can't actually be private because things are split out into helper // functions and classes. diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 9eb64c2cbc8f..3824c6ba42a4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "clang/AST/Decl.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp index 13c162e10439..7cb92fce33c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp @@ -537,6 +537,13 @@ static QualType getNonMemoryType(CIRGenModule &CGM, QualType type) { return type; } +mlir::Attribute +ConstantEmitter::tryEmitAbstractForInitializer(const VarDecl &D) { + auto state = pushAbstract(); + auto C = tryEmitPrivateForVarInit(D); + return validateAndPopAbstract(C, state); +} + mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { // Make a quick check if variable can be default NULL initialized // and avoid going through rest of code which may do, for c++11, @@ -761,13 +768,14 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, } case APValue::LValue: return ConstantLValueEmitter(*this, Value, DestType).tryEmit(); + case APValue::Struct: + case APValue::Union: + assert(0 && "not implemented"); case APValue::FixedPoint: case APValue::ComplexInt: case APValue::ComplexFloat: case APValue::Vector: case APValue::AddrLabelDiff: - case APValue::Struct: - case APValue::Union: case APValue::MemberPointer: assert(0 && "not implemented"); } From e17ee0f9141d1d5114867fd69705a4b1b84ba196 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Nov 2022 12:13:34 -0300 Subject: [PATCH 0672/2301] [CIR][CIRGen] Add initial handling code for aggregate constant initialization --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 27 +- clang/lib/CIR/CodeGen/CIRGenExprCst.cpp | 558 +++++++++++++++++- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 3 files changed, 580 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 3824c6ba42a4..ba639a4c807f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -125,6 +125,10 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { } const Address Loc = emission.Addr; + // Check whether this is a byref variable that's potentially + // captured and moved by its own initializer. If so, we'll need to + // emit the initializer first, then copy into the variable. + assert(!UnimplementedFeature::capturedByInit() && "NYI"); // Note: constexpr already initializes everything correctly. LangOptions::TrivialAutoVarInitKind trivialAutoVarInit = @@ -145,14 +149,29 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { if (isTrivialInitializer(Init)) return initializeWhatIsTechnicallyUninitialized(Loc); + mlir::Attribute constant; if (emission.IsConstantAggregate || D.mightBeUsableInConstantExpressions(getContext())) { - assert(0 && "not implemented"); + constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D); + llvm_unreachable("NYI"); + if (constant && !constant.isa() && + (trivialAutoVarInit != + LangOptions::TrivialAutoVarInitKind::Uninitialized)) { + llvm_unreachable("NYI"); + } + } + + if (!constant) { + initializeWhatIsTechnicallyUninitialized(Loc); + LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); + return buildExprAsInit(Init, &D, lv); + } + + if (!emission.IsConstantAggregate) { + llvm_unreachable("NYI"); } - initializeWhatIsTechnicallyUninitialized(Loc); - LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); - return buildExprAsInit(Init, &D, lv); + llvm_unreachable("NYI"); } void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp index 7cb92fce33c9..9702380235bb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp @@ -28,10 +28,564 @@ using namespace clang; using namespace cir; //===----------------------------------------------------------------------===// -// ConstExprEmitter +// ConstantAggregateBuilder //===----------------------------------------------------------------------===// namespace { +class ConstExprEmitter; + +struct ConstantAggregateBuilderUtils { + CIRGenModule &CGM; + + ConstantAggregateBuilderUtils(CIRGenModule &CGM) : CGM(CGM) {} + + CharUnits getAlignment(const mlir::Attribute C) const { + llvm_unreachable("NYI"); + // return CharUnits::fromQuantity( + // CGM.getDataLayout().getABITypeAlignment(C->getType())); + } + + CharUnits getSize(mlir::Type Ty) const { + llvm_unreachable("NYI"); + // return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(Ty)); + } + + CharUnits getSize(const mlir::Attribute C) const { + llvm_unreachable("NYI"); + // return getSize(C.getType()); + } + + mlir::Attribute getPadding(CharUnits PadSize) const { + llvm_unreachable("NYI"); + } + + mlir::Attribute getZeroes(CharUnits ZeroSize) const { + llvm_unreachable("NYI"); + } +}; + +/// Incremental builder for an llvm::Constant* holding a struct or array +/// constant. +class ConstantAggregateBuilder : private ConstantAggregateBuilderUtils { + /// The elements of the constant. These two arrays must have the same size; + /// Offsets[i] describes the offset of Elems[i] within the constant. The + /// elements are kept in increasing offset order, and we ensure that there + /// is no overlap: Offsets[i+1] >= Offsets[i] + getSize(Elemes[i]). + /// + /// This may contain explicit padding elements (in order to create a + /// natural layout), but need not. Gaps between elements are implicitly + /// considered to be filled with undef. + llvm::SmallVector Elems; + llvm::SmallVector Offsets; + + /// The size of the constant (the maximum end offset of any added element). + /// May be larger than the end of Elems.back() if we split the last element + /// and removed some trailing undefs. + CharUnits Size = CharUnits::Zero(); + + /// This is true only if laying out Elems in order as the elements of a + /// non-packed LLVM struct will give the correct layout. + bool NaturalLayout = true; + + bool split(size_t Index, CharUnits Hint); + std::optional splitAt(CharUnits Pos); + + static mlir::Attribute + buildFrom(CIRGenModule &CGM, ArrayRef Elems, + ArrayRef Offsets, CharUnits StartOffset, CharUnits Size, + bool NaturalLayout, mlir::Type DesiredTy, bool AllowOversized); + +public: + ConstantAggregateBuilder(CIRGenModule &CGM) + : ConstantAggregateBuilderUtils(CGM) {} + + /// Update or overwrite the value starting at \p Offset with \c C. + /// + /// \param AllowOverwrite If \c true, this constant might overwrite (part of) + /// a constant that has already been added. This flag is only used to + /// detect bugs. + bool add(mlir::Attribute C, CharUnits Offset, bool AllowOverwrite); + + /// Update or overwrite the bits starting at \p OffsetInBits with \p Bits. + bool addBits(llvm::APInt Bits, uint64_t OffsetInBits, bool AllowOverwrite); + + /// Attempt to condense the value starting at \p Offset to a constant of type + /// \p DesiredTy. + void condense(CharUnits Offset, mlir::Type DesiredTy); + + /// Produce a constant representing the entire accumulated value, ideally of + /// the specified type. If \p AllowOversized, the constant might be larger + /// than implied by \p DesiredTy (eg, if there is a flexible array member). + /// Otherwise, the constant will be of exactly the same size as \p DesiredTy + /// even if we can't represent it as that type. + mlir::Attribute build(mlir::Type DesiredTy, bool AllowOversized) const { + return buildFrom(CGM, Elems, Offsets, CharUnits::Zero(), Size, + NaturalLayout, DesiredTy, AllowOversized); + } +}; + +template > +static void replace(Container &C, size_t BeginOff, size_t EndOff, Range Vals) { + assert(BeginOff <= EndOff && "invalid replacement range"); + llvm::replace(C, C.begin() + BeginOff, C.begin() + EndOff, Vals); +} + +bool ConstantAggregateBuilder::add(mlir::Attribute C, CharUnits Offset, + bool AllowOverwrite) { + // Common case: appending to a layout. + if (Offset >= Size) { + CharUnits Align = getAlignment(C); + CharUnits AlignedSize = Size.alignTo(Align); + if (AlignedSize > Offset || Offset.alignTo(Align) != Offset) + NaturalLayout = false; + else if (AlignedSize < Offset) { + Elems.push_back(getPadding(Offset - Size)); + Offsets.push_back(Size); + } + Elems.push_back(C); + Offsets.push_back(Offset); + Size = Offset + getSize(C); + return true; + } + + // Uncommon case: constant overlaps what we've already created. + std::optional FirstElemToReplace = splitAt(Offset); + if (!FirstElemToReplace) + return false; + + CharUnits CSize = getSize(C); + std::optional LastElemToReplace = splitAt(Offset + CSize); + if (!LastElemToReplace) + return false; + + assert((FirstElemToReplace == LastElemToReplace || AllowOverwrite) && + "unexpectedly overwriting field"); + + replace(Elems, *FirstElemToReplace, *LastElemToReplace, {C}); + replace(Offsets, *FirstElemToReplace, *LastElemToReplace, {Offset}); + Size = std::max(Size, Offset + CSize); + NaturalLayout = false; + return true; +} + +bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits, + bool AllowOverwrite) { + llvm_unreachable("NYI"); +} + +/// Returns a position within Elems and Offsets such that all elements +/// before the returned index end before Pos and all elements at or after +/// the returned index begin at or after Pos. Splits elements as necessary +/// to ensure this. Returns None if we find something we can't split. +std::optional ConstantAggregateBuilder::splitAt(CharUnits Pos) { + if (Pos >= Size) + return Offsets.size(); + + while (true) { + auto FirstAfterPos = llvm::upper_bound(Offsets, Pos); + if (FirstAfterPos == Offsets.begin()) + return 0; + + // If we already have an element starting at Pos, we're done. + size_t LastAtOrBeforePosIndex = FirstAfterPos - Offsets.begin() - 1; + if (Offsets[LastAtOrBeforePosIndex] == Pos) + return LastAtOrBeforePosIndex; + + // We found an element starting before Pos. Check for overlap. + if (Offsets[LastAtOrBeforePosIndex] + + getSize(Elems[LastAtOrBeforePosIndex]) <= + Pos) + return LastAtOrBeforePosIndex + 1; + + // Try to decompose it into smaller constants. + if (!split(LastAtOrBeforePosIndex, Pos)) + return std::nullopt; + } +} + +/// Split the constant at index Index, if possible. Return true if we did. +/// Hint indicates the location at which we'd like to split, but may be +/// ignored. +bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) { + llvm_unreachable("NYI"); +} + +mlir::Attribute ConstantAggregateBuilder::buildFrom( + CIRGenModule &CGM, ArrayRef Elems, + ArrayRef Offsets, CharUnits StartOffset, CharUnits Size, + bool NaturalLayout, mlir::Type DesiredTy, bool AllowOversized) { + ConstantAggregateBuilderUtils Utils(CGM); + + if (Elems.empty()) + return {}; + + llvm_unreachable("NYI"); +} + +void ConstantAggregateBuilder::condense(CharUnits Offset, + mlir::Type DesiredTy) { + CharUnits Size = getSize(DesiredTy); + + std::optional FirstElemToReplace = splitAt(Offset); + if (!FirstElemToReplace) + return; + size_t First = *FirstElemToReplace; + + std::optional LastElemToReplace = splitAt(Offset + Size); + if (!LastElemToReplace) + return; + size_t Last = *LastElemToReplace; + + size_t Length = Last - First; + if (Length == 0) + return; + + if (Length == 1 && Offsets[First] == Offset && + getSize(Elems[First]) == Size) { + // Re-wrap single element structs if necessary. Otherwise, leave any single + // element constant of the right size alone even if it has the wrong type. + llvm_unreachable("NYI"); + } + + mlir::Attribute Replacement = buildFrom( + CGM, ArrayRef(Elems).slice(First, Length), + ArrayRef(Offsets).slice(First, Length), Offset, getSize(DesiredTy), + /*known to have natural layout=*/false, DesiredTy, false); + replace(Elems, First, Last, {Replacement}); + replace(Offsets, First, Last, {Offset}); +} + +//===----------------------------------------------------------------------===// +// ConstStructBuilder +//===----------------------------------------------------------------------===// + +class ConstStructBuilder { + CIRGenModule &CGM; + ConstantEmitter &Emitter; + ConstantAggregateBuilder &Builder; + CharUnits StartOffset; + +public: + static mlir::Attribute BuildStruct(ConstantEmitter &Emitter, + InitListExpr *ILE, QualType StructTy); + static mlir::Attribute BuildStruct(ConstantEmitter &Emitter, + const APValue &Value, QualType ValTy); + static bool UpdateStruct(ConstantEmitter &Emitter, + ConstantAggregateBuilder &Const, CharUnits Offset, + InitListExpr *Updater); + +private: + ConstStructBuilder(ConstantEmitter &Emitter, + ConstantAggregateBuilder &Builder, CharUnits StartOffset) + : CGM(Emitter.CGM), Emitter(Emitter), Builder(Builder), + StartOffset(StartOffset) {} + + bool AppendField(const FieldDecl *Field, uint64_t FieldOffset, + mlir::Attribute InitExpr, bool AllowOverwrite = false); + + bool AppendBytes(CharUnits FieldOffsetInChars, mlir::Attribute InitCst, + bool AllowOverwrite = false); + + bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, + mlir::IntegerAttr InitExpr, bool AllowOverwrite = false); + + bool Build(InitListExpr *ILE, bool AllowOverwrite); + bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase, + const CXXRecordDecl *VTableClass, CharUnits BaseOffset); + mlir::Attribute Finalize(QualType Ty); +}; + +bool ConstStructBuilder::AppendField(const FieldDecl *Field, + uint64_t FieldOffset, + mlir::Attribute InitCst, + bool AllowOverwrite) { + const ASTContext &Context = CGM.getASTContext(); + + CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset); + + return AppendBytes(FieldOffsetInChars, InitCst, AllowOverwrite); +} + +bool ConstStructBuilder::AppendBytes(CharUnits FieldOffsetInChars, + mlir::Attribute InitCst, + bool AllowOverwrite) { + return Builder.add(InitCst, StartOffset + FieldOffsetInChars, AllowOverwrite); +} + +bool ConstStructBuilder::AppendBitField(const FieldDecl *Field, + uint64_t FieldOffset, + mlir::IntegerAttr CI, + bool AllowOverwrite) { + llvm_unreachable("NYI"); +} + +static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter, + ConstantAggregateBuilder &Const, + CharUnits Offset, QualType Type, + InitListExpr *Updater) { + if (Type->isRecordType()) + return ConstStructBuilder::UpdateStruct(Emitter, Const, Offset, Updater); + + auto CAT = Emitter.CGM.getASTContext().getAsConstantArrayType(Type); + if (!CAT) + return false; + QualType ElemType = CAT->getElementType(); + CharUnits ElemSize = Emitter.CGM.getASTContext().getTypeSizeInChars(ElemType); + mlir::Type ElemTy = Emitter.CGM.getTypes().convertTypeForMem(ElemType); + + mlir::Attribute FillC = nullptr; + if (Expr *Filler = Updater->getArrayFiller()) { + if (!isa(Filler)) { + llvm_unreachable("NYI"); + } + } + + unsigned NumElementsToUpdate = + FillC ? CAT->getSize().getZExtValue() : Updater->getNumInits(); + for (unsigned I = 0; I != NumElementsToUpdate; ++I, Offset += ElemSize) { + Expr *Init = nullptr; + if (I < Updater->getNumInits()) + Init = Updater->getInit(I); + + if (!Init && FillC) { + if (!Const.add(FillC, Offset, true)) + return false; + } else if (!Init || isa(Init)) { + continue; + } else if (InitListExpr *ChildILE = dyn_cast(Init)) { + if (!EmitDesignatedInitUpdater(Emitter, Const, Offset, ElemType, + ChildILE)) + return false; + // Attempt to reduce the array element to a single constant if necessary. + Const.condense(Offset, ElemTy); + } else { + mlir::Attribute Val = Emitter.tryEmitPrivateForMemory(Init, ElemType); + if (!Const.add(Val, Offset, true)) + return false; + } + } + + return true; +} + +bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) { + RecordDecl *RD = ILE->getType()->castAs()->getDecl(); + const ASTRecordLayout &Layout = CGM.getASTContext().getASTRecordLayout(RD); + + unsigned FieldNo = -1; + unsigned ElementNo = 0; + + // Bail out if we have base classes. We could support these, but they only + // arise in C++1z where we will have already constant folded most interesting + // cases. FIXME: There are still a few more cases we can handle this way. + if (auto *CXXRD = dyn_cast(RD)) + if (CXXRD->getNumBases()) + return false; + + for (FieldDecl *Field : RD->fields()) { + ++FieldNo; + + // If this is a union, skip all the fields that aren't being initialized. + if (RD->isUnion() && + !declaresSameEntity(ILE->getInitializedFieldInUnion(), Field)) + continue; + + // Don't emit anonymous bitfields. + if (Field->isUnnamedBitField()) + continue; + + // Get the initializer. A struct can include fields without initializers, + // we just use explicit null values for them. + Expr *Init = nullptr; + if (ElementNo < ILE->getNumInits()) + Init = ILE->getInit(ElementNo++); + if (Init && isa(Init)) + continue; + + // Zero-sized fields are not emitted, but their initializers may still + // prevent emission of this struct as a constant. + if (Field->isZeroSize(CGM.getASTContext())) { + if (Init->HasSideEffects(CGM.getASTContext())) + return false; + continue; + } + + // When emitting a DesignatedInitUpdateExpr, a nested InitListExpr + // represents additional overwriting of our current constant value, and not + // a new constant to emit independently. + if (AllowOverwrite && + (Field->getType()->isArrayType() || Field->getType()->isRecordType())) { + if (auto *SubILE = dyn_cast(Init)) { + CharUnits Offset = CGM.getASTContext().toCharUnitsFromBits( + Layout.getFieldOffset(FieldNo)); + if (!EmitDesignatedInitUpdater(Emitter, Builder, StartOffset + Offset, + Field->getType(), SubILE)) + return false; + // If we split apart the field's value, try to collapse it down to a + // single value now. + llvm_unreachable("NYI"); + continue; + } + } + + mlir::Attribute EltInit; + if (Init) + Emitter.tryEmitPrivateForMemory(Init, Field->getType()); + else + llvm_unreachable("NYI"); + + if (!EltInit) + return false; + + if (!Field->isBitField()) { + // Handle non-bitfield members. + if (!AppendField(Field, Layout.getFieldOffset(FieldNo), EltInit, + AllowOverwrite)) + return false; + // After emitting a non-empty field with [[no_unique_address]], we may + // need to overwrite its tail padding. + if (Field->hasAttr()) + AllowOverwrite = true; + } else { + llvm_unreachable("NYI"); + } + } + + return true; +} + +namespace { +struct BaseInfo { + BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index) + : Decl(Decl), Offset(Offset), Index(Index) {} + + const CXXRecordDecl *Decl; + CharUnits Offset; + unsigned Index; + + bool operator<(const BaseInfo &O) const { return Offset < O.Offset; } +}; +} // namespace + +bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD, + bool IsPrimaryBase, + const CXXRecordDecl *VTableClass, + CharUnits Offset) { + const ASTRecordLayout &Layout = CGM.getASTContext().getASTRecordLayout(RD); + + if (const CXXRecordDecl *CD = dyn_cast(RD)) { + // Add a vtable pointer, if we need one and it hasn't already been added. + if (Layout.hasOwnVFPtr()) + llvm_unreachable("NYI"); + + // Accumulate and sort bases, in order to visit them in address order, which + // may not be the same as declaration order. + SmallVector Bases; + Bases.reserve(CD->getNumBases()); + unsigned BaseNo = 0; + for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(), + BaseEnd = CD->bases_end(); + Base != BaseEnd; ++Base, ++BaseNo) { + assert(!Base->isVirtual() && "should not have virtual bases here"); + const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl(); + CharUnits BaseOffset = Layout.getBaseClassOffset(BD); + Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo)); + } + llvm::stable_sort(Bases); + + for (unsigned I = 0, N = Bases.size(); I != N; ++I) { + BaseInfo &Base = Bases[I]; + + bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl; + Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase, + VTableClass, Offset + Base.Offset); + } + } + + unsigned FieldNo = 0; + uint64_t OffsetBits = CGM.getASTContext().toBits(Offset); + + bool AllowOverwrite = false; + for (RecordDecl::field_iterator Field = RD->field_begin(), + FieldEnd = RD->field_end(); + Field != FieldEnd; ++Field, ++FieldNo) { + // If this is a union, skip all the fields that aren't being initialized. + if (RD->isUnion() && !declaresSameEntity(Val.getUnionField(), *Field)) + continue; + + // Don't emit anonymous bitfields or zero-sized fields. + if (Field->isUnnamedBitField() || Field->isZeroSize(CGM.getASTContext())) + continue; + + // Emit the value of the initializer. + const APValue &FieldValue = + RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo); + mlir::Attribute EltInit = + Emitter.tryEmitPrivateForMemory(FieldValue, Field->getType()); + if (!EltInit) + return false; + + if (!Field->isBitField()) { + // Handle non-bitfield members. + if (!AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, + EltInit, AllowOverwrite)) + return false; + // After emitting a non-empty field with [[no_unique_address]], we may + // need to overwrite its tail padding. + if (Field->hasAttr()) + AllowOverwrite = true; + } else { + llvm_unreachable("NYI"); + } + } + + return true; +} + +mlir::Attribute ConstStructBuilder::Finalize(QualType Type) { + Type = Type.getNonReferenceType(); + RecordDecl *RD = Type->castAs()->getDecl(); + mlir::Type ValTy = CGM.getTypes().ConvertType(Type); + return Builder.build(ValTy, RD->hasFlexibleArrayMember()); +} + +mlir::Attribute ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter, + InitListExpr *ILE, + QualType ValTy) { + ConstantAggregateBuilder Const(Emitter.CGM); + ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero()); + + if (!Builder.Build(ILE, /*AllowOverwrite*/ false)) + return nullptr; + + return Builder.Finalize(ValTy); +} + +mlir::Attribute ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter, + const APValue &Val, + QualType ValTy) { + ConstantAggregateBuilder Const(Emitter.CGM); + ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero()); + + const RecordDecl *RD = ValTy->castAs()->getDecl(); + const CXXRecordDecl *CD = dyn_cast(RD); + if (!Builder.Build(Val, RD, false, CD, CharUnits::Zero())) + return nullptr; + + return Builder.Finalize(ValTy); +} + +bool ConstStructBuilder::UpdateStruct(ConstantEmitter &Emitter, + ConstantAggregateBuilder &Const, + CharUnits Offset, InitListExpr *Updater) { + return ConstStructBuilder(Emitter, Const, Offset) + .Build(Updater, /*AllowOverwrite*/ true); +} + +//===----------------------------------------------------------------------===// +// ConstExprEmitter +//===----------------------------------------------------------------------===// // This class only needs to handle arrays, structs and unions. // @@ -770,7 +1324,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, return ConstantLValueEmitter(*this, Value, DestType).tryEmit(); case APValue::Struct: case APValue::Union: - assert(0 && "not implemented"); + return ConstStructBuilder::BuildStruct(*this, Value, DestType); case APValue::FixedPoint: case APValue::ComplexInt: case APValue::ComplexFloat: diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 94cf56d44e97..b36ee0204304 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -46,6 +46,7 @@ struct UnimplementedFeature { // Debug info static bool generateDebugInfo() { return false; } + static bool capturedByInit() { return false; } static bool getASTAllocaAddressSpace() { return false; } static bool tryEmitAsConstant() { return false; } static bool incrementProfileCounter() { return false; } From 756c8d815a00a0506d89c08332a8a58069c800d0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Nov 2022 12:13:46 -0300 Subject: [PATCH 0673/2301] [CIR][CIRGen] Prevent more ctor to memcpy xform, keep the call around --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 6 +++++- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 9 ++++++--- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 7 ++++++- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index ba639a4c807f..90a74fdfee00 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -152,8 +152,12 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { mlir::Attribute constant; if (emission.IsConstantAggregate || D.mightBeUsableInConstantExpressions(getContext())) { + // FIXME: Differently from LLVM we try not to emit / lower too much + // here for CIR since we are interesting in seeing the ctor in some + // analysis later on. So CIR's implementation of ConstantEmitter will + // frequently return an empty Attribute, to signal we want to codegen + // some trivial ctor calls and whatnots. constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D); - llvm_unreachable("NYI"); if (constant && !constant.isa() && (trivialAutoVarInit != LangOptions::TrivialAutoVarInitKind::Uninitialized)) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index b2677f2fa16d..a4896eb1e8ee 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -260,9 +260,12 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, assert(!E->requiresZeroInitialization() && "zero initialization NYI"); - // If this is a call to a trivial default constructor, do nothing. - if (CD->isTrivial() && CD->isDefaultConstructor()) - assert(!CD->isTrivial() && "trivial constructors NYI"); + // If this is a call to a trivial default constructor: + // In LLVM: do nothing. + // In CIR: emit as a regular call, other later passes should lower the + // ctor call into trivial initialization. + // if (CD->isTrivial() && CD->isDefaultConstructor()) + // return; // Elide the constructor if we're constructing from a temporary if (getLangOpts().ElideConstructors && E->isElidable()) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index c068bc2c9e74..8a55e9fc61f0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -591,7 +591,12 @@ void CIRGenFunction::buildCXXConstructorCall( buildTypeCheck(CIRGenFunction::TCK_ConstructorCall, Loc, This.getPointer(), getContext().getRecordType(ClassDecl), CharUnits::Zero()); - assert(!D->isTrivial() && "Trivial ctor decl NYI"); + // If this is a call to a trivial default constructor: + // In LLVM: do nothing. + // In CIR: emit as a regular call, other later passes should lower the + // ctor call into trivial initialization. + // if (CD->isTrivial() && CD->isDefaultConstructor()) + // return; assert(!isMemcpyEquivalentSpecialMember(D) && "NYI"); From 63ce8d7fd8af4fa514a622b2b3d91852b46a32f8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Nov 2022 12:14:12 -0300 Subject: [PATCH 0674/2301] [CIR][CIRGen] Update ReturnValueSlot with extra info on usage, volatility and destruction --- clang/lib/CIR/CodeGen/CIRGenCall.h | 30 +++++++++++++++---------- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 1 + 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 00fd52ad626f..1ea32e5e5cc5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -184,27 +184,33 @@ class CallArgList : public llvm::SmallVector { } }; -/// FunctionArgList - Type for representing both the decl and type of parameters -/// to a function. The decl must be either a ParmVarDecl or ImplicitParamDecl. +/// Type for representing both the decl and type of parameters to a function. +/// The decl must be either a ParmVarDecl or ImplicitParamDecl. class FunctionArgList : public llvm::SmallVector {}; -/// ReturnValueSlot - Contains the address where the return value of a function -/// can be stored, and whether the address is volatile or not. +/// Contains the address where the return value of a function can be stored, and +/// whether the address is volatile or not. class ReturnValueSlot { Address Addr = Address::invalid(); // Return value slot flags - // unsigned IsVolatile : 1; - // unsigned IsUnused : 1; - // unsigned IsExternallyDestructed : 1; + unsigned IsVolatile : 1; + unsigned IsUnused : 1; + unsigned IsExternallyDestructed : 1; public: - // : ReturnValueSlot() - // IsVolatile(false), - // IsUnused(false), - // IsExternallyDestructed(false) - {} + : IsVolatile(false), IsUnused(false), IsExternallyDestructed(false) {} + ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false, + bool IsExternallyDestructed = false) + : Addr(Addr), IsVolatile(IsVolatile), IsUnused(IsUnused), + IsExternallyDestructed(IsExternallyDestructed) {} + + bool isNull() const { return !Addr.isValid(); } + bool isVolatile() const { return IsVolatile; } + Address getValue() const { return Addr; } + bool isUnused() const { return IsUnused; } + bool isExternallyDestructed() const { return IsExternallyDestructed; } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 6c34d1e776e1..24703b03b163 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenCall.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "CIRGenTypes.h" From dfcf3791f742084ccac27a5634846467f3b8efa2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Nov 2022 12:14:16 -0300 Subject: [PATCH 0675/2301] [CIR][CIRGen][NFC] Teach RValue about aggregates and add more fields to AggValueSlot --- clang/lib/CIR/CodeGen/CIRGenValue.h | 114 +++++++++++++++++++--------- 1 file changed, 78 insertions(+), 36 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 499b55e238c6..230aadaf191a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -28,7 +28,7 @@ namespace cir { /// This trivial value class is used to represent the result of an -/// expression that is evaluated. It can be one of three things: either a +/// expression that is evaluated. It can be one of three things: either a /// simple MLIR SSA value, a pair of SSA values for complex numbers, or the /// address of an aggregate value in memory. class RValue { @@ -41,7 +41,9 @@ class RValue { // Stores first value and flavor. llvm::PointerIntPair V1; // Stores second value and volatility. - llvm::PointerIntPair V2; + llvm::PointerIntPair, 1, bool> V2; + // Stores element type for aggregate values. + mlir::Type ElementType; public: bool isScalar() const { return V1.getInt() == Scalar; } @@ -50,24 +52,30 @@ class RValue { bool isVolatileQualified() const { return V2.getInt(); } - /// getScalarVal() - Return the Value* of this scalar value. + /// Return the mlir::Value of this scalar value. mlir::Value getScalarVal() const { assert(isScalar() && "Not a scalar!"); return V1.getPointer(); } - /// getComplexVal - Return the real/imag components of this complex value. - /// + /// Return the real/imag components of this complex value. std::pair getComplexVal() const { assert(0 && "not implemented"); return {}; } - /// getAggregateAddr() - Return the Value* of the address of the - /// aggregate. + /// Return the mlir::Value of the address of the aggregate. Address getAggregateAddress() const { - assert(0 && "not implemented"); - return Address::invalid(); + assert(isAggregate() && "Not an aggregate!"); + auto align = reinterpret_cast(V2.getPointer().get()) >> + AggAlignShift; + return Address(V1.getPointer(), ElementType, + clang::CharUnits::fromQuantity(align)); + } + + mlir::Value getAggregatePointer() const { + assert(isAggregate() && "Not an aggregate!"); + return V1.getPointer(); } static RValue getIgnored() { @@ -90,12 +98,19 @@ class RValue { assert(0 && "not implemented"); return RValue{}; } - // FIXME: Aggregate rvalues need to retain information about whether they - // are volatile or not. Remove default to find all places that probably - // get this wrong. + // FIXME: Aggregate rvalues need to retain information about whether they are + // volatile or not. Remove default to find all places that probably get this + // wrong. static RValue getAggregate(Address addr, bool isVolatile = false) { - assert(0 && "not implemented"); - return RValue{}; + RValue ER; + ER.V1.setPointer(addr.getPointer()); + ER.V1.setInt(Aggregate); + ER.ElementType = addr.getElementType(); + + auto align = static_cast(addr.getAlignment().getQuantity()); + ER.V2.setPointer(reinterpret_cast(align << AggAlignShift)); + ER.V2.setInt(isVolatile); + return ER; } }; @@ -271,22 +286,40 @@ class AggValueSlot { // Qualifiers clang::Qualifiers Quals; - /// ZeroedFlag - This is set to true if the memory in the slot is known to be - /// zero before the assignment into it. This means that zero field don't need - /// to be set. + /// This is set to true if some external code is responsible for setting up a + /// destructor for the slot. Otherwise the code which constructs it should + /// push the appropriate cleanup. + bool DestructedFlag : 1; + + /// This is set to true if writing to the memory in the slot might require + /// calling an appropriate Objective-C GC barrier. The exact interaction here + /// is unnecessarily mysterious. + bool ObjCGCFlag : 1; + + /// This is set to true if the memory in the slot is known to be zero before + /// the assignment into it. This means that zero fields don't need to be set. bool ZeroedFlag : 1; - /// This is set to true if the tail padding of this slot might overlap another - /// object that may have already been initialized (and whose value must be - /// preserved by this initialization). If so, we may only store up to the - /// dsize of the type. Otherwise we can widen stores to the size of the type. + /// This is set to true if the slot might be aliased and it's not undefined + /// behavior to access it through such an alias. Note that it's always + /// undefined behavior to access a C++ object that's under construction + /// through an alias derived from outside the construction process. + /// + /// This flag controls whether calls that produce the aggregate + /// value may be evaluated directly into the slot, or whether they + /// must be evaluated into an unaliased temporary and then memcpy'ed + /// over. Since it's invalid in general to memcpy a non-POD C++ + /// object, it's important that this flag never be set when + /// evaluating an expression which constructs such an object. + bool AliasedFlag : 1; + + /// This is set to true if the tail padding of this slot might overlap + /// another object that may have already been initialized (and whose + /// value must be preserved by this initialization). If so, we may only + /// store up to the dsize of the type. Otherwise we can widen stores to + /// the size of the type. bool OverlapFlag : 1; - /// DestructedFlags - This is set to true if some external code is responsible - /// for setting up a destructor for the slot. Otherwise the code which - /// constructs it shoudl push the appropriate cleanup. - // bool DestructedFlag : 1; - /// If is set to true, sanitizer checks are already generated for this address /// or not required. For instance, if this address represents an object /// created in 'new' expression, sanitizer checks for memory is made as a part @@ -294,19 +327,13 @@ class AggValueSlot { /// them. bool SanitizerCheckedFlag : 1; - // TODO: Add the rest of these things - AggValueSlot(Address Addr, clang::Qualifiers Quals, bool DestructedFlag, bool ObjCGCFlag, bool ZeroedFlag, bool AliasedFlag, bool OverlapFlag, bool SanitizerCheckedFlag) - : Addr(Addr), Quals(Quals) - // ,DestructedFlag(DestructedFlag) - // ,ObjCGCFlag(ObjCGCFlag) - // ,ZeroedFlag(ZeroedFlag) - // ,AliasedFlag(AliasedFlag) - // ,OverlapFlag(OverlapFlag) - // ,SanitizerCheckedFlag(SanitizerCheckedFlag) - {} + : Addr(Addr), Quals(Quals), DestructedFlag(DestructedFlag), + ObjCGCFlag(ObjCGCFlag), ZeroedFlag(ZeroedFlag), + AliasedFlag(AliasedFlag), OverlapFlag(OverlapFlag), + SanitizerCheckedFlag(SanitizerCheckedFlag) {} public: enum IsAliased_t { IsNotAliased, IsAliased }; @@ -351,6 +378,13 @@ class AggValueSlot { isAliased, mayOverlap, isZeroed, isChecked); } + IsDestructed_t isExternallyDestructed() const { + return IsDestructed_t(DestructedFlag); + } + void setExternallyDestructed(bool destructed = true) { + DestructedFlag = destructed; + } + clang::Qualifiers getQualifiers() const { return Quals; } bool isVolatile() const { return Quals.hasVolatile(); } @@ -359,12 +393,20 @@ class AggValueSlot { bool isIgnored() const { return !Addr.isValid(); } + mlir::Value getPointer() const { return Addr.getPointer(); } + Overlap_t mayOverlap() const { return Overlap_t(OverlapFlag); } bool isSanitizerChecked() const { return SanitizerCheckedFlag; } IsZeroed_t isZeroed() const { return IsZeroed_t(ZeroedFlag); } + NeedsGCBarriers_t requiresGCollection() const { + return NeedsGCBarriers_t(ObjCGCFlag); + } + + IsAliased_t isPotentiallyAliased() const { return IsAliased_t(AliasedFlag); } + /// Get the preferred size to use when storing a value to this slot. This /// is the type size unless that might overlap another object, in which /// case it's the dsize. From 17c1749e514f201235fb4972d18c32a0d01debce Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Nov 2022 12:14:19 -0300 Subject: [PATCH 0676/2301] [CIR][CIRGen] Add partial support for passing in aggregates arguments --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 30 +++++++++++ clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 69 +++++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 + 3 files changed, 96 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 3ce685deb2f0..b62988495270 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -256,6 +256,20 @@ CIRGenCallee CIRGenCallee::prepareConcreteCallee(CIRGenFunction &CGF) const { return *this; } +void CIRGenFunction::buildAggregateStore(mlir::Value Val, Address Dest, + bool DestIsVolatile) { + // In LLVM codegen: + // Function to store a first-class aggregate into memory. We prefer to + // store the elements rather than the aggregate to be more friendly to + // fast-isel. + // In CIR codegen: + // Emit the most simple cir.store possible (e.g. a store for a whole + // struct), which can later be broken down in other CIR levels (or prior + // to dialect codegen). + (void)DestIsVolatile; + builder.create(*currSrcLoc, Val, Dest.getPointer()); +} + RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, @@ -440,6 +454,22 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, mlir::Type RetCIRTy = convertType(RetTy); if (RetAI.getCoerceToType() == RetCIRTy && RetAI.getDirectOffset() == 0) { switch (getEvaluationKind(RetTy)) { + case TEK_Aggregate: { + Address DestPtr = ReturnValue.getValue(); + bool DestIsVolatile = ReturnValue.isVolatile(); + + if (!DestPtr.isValid()) { + DestPtr = CreateMemTemp(RetTy, callLoc, "agg.tmp"); + DestIsVolatile = false; + } + + auto Results = theCall.getResults(); + assert(Results.size() <= 1 && "multiple returns NYI"); + + SourceLocRAIIObject Loc{*this, callLoc}; + buildAggregateStore(Results[0], DestPtr, DestIsVolatile); + return RValue::getAggregate(DestPtr); + } case TEK_Scalar: { // If the argument doesn't match, perform a bitcast to coerce it. This // can happen due to trivial type mismatches. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 24703b03b163..b968b5fe401b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -26,7 +26,16 @@ namespace { class AggExprEmitter : public StmtVisitor { CIRGenFunction &CGF; AggValueSlot Dest; - // bool IsResultUnused; + bool IsResultUnused; + + // Calls `Fn` with a valid return value slot, potentially creating a temporary + // to do so. If a temporary is created, an appropriate copy into `Dest` will + // be emitted, as will lifetime markers. + // + // The given function should take a ReturnValueSlot, and return an RValue that + // points to said slot. + void withReturnValueSlot(const Expr *E, + llvm::function_ref Fn); AggValueSlot EnsureSlot(QualType T) { assert(!Dest.isIgnored() && "ignored slots NYI"); @@ -35,9 +44,7 @@ class AggExprEmitter : public StmtVisitor { public: AggExprEmitter(CIRGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused) - : CGF{cgf}, Dest(Dest) - // ,IsResultUnused(IsResultUnused) - {} + : CGF{cgf}, Dest(Dest), IsResultUnused(IsResultUnused) {} //===--------------------------------------------------------------------===// // Visitor Methods @@ -79,7 +86,7 @@ class AggExprEmitter : public StmtVisitor { // Operators. void VisitCastExpr(CastExpr *E); - void VisitCallExpr(const CallExpr *E) { llvm_unreachable("NYI"); } + void VisitCallExpr(const CallExpr *E); void VisitStmtExpr(const StmtExpr *E) { llvm_unreachable("NYI"); } void VisitBinaryOperator(const BinaryOperator *E) { llvm_unreachable("NYI"); } void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *E) { @@ -277,6 +284,58 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { } } +void AggExprEmitter::VisitCallExpr(const CallExpr *E) { + if (E->getCallReturnType(CGF.getContext())->isReferenceType()) { + llvm_unreachable("NYI"); + } + + withReturnValueSlot( + E, [&](ReturnValueSlot Slot) { return CGF.buildCallExpr(E, Slot); }); +} + +void AggExprEmitter::withReturnValueSlot( + const Expr *E, llvm::function_ref EmitCall) { + QualType RetTy = E->getType(); + bool RequiresDestruction = + !Dest.isExternallyDestructed() && + RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct; + + // If it makes no observable difference, save a memcpy + temporary. + // + // We need to always provide our own temporary if destruction is required. + // Otherwise, EmitCall will emit its own, notice that it's "unused", and end + // its lifetime before we have the chance to emit a proper destructor call. + bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() || + (RequiresDestruction && !Dest.getAddress().isValid()); + + Address RetAddr = Address::invalid(); + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + + if (!UseTemp) { + RetAddr = Dest.getAddress(); + } else { + llvm_unreachable("NYI"); + } + + RValue Src = + EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused, + Dest.isExternallyDestructed())); + + if (!UseTemp) + return; + + assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer()); + llvm_unreachable("NYI"); + // TODO(cir): EmitFinalDestCopy(E->getType(), Src); + + if (!RequiresDestruction) { + // If there's no dtor to run, the copy was the last use of our temporary. + // Since we're not guaranteed to be in an ExprWithCleanups, clean up + // eagerly. + llvm_unreachable("NYI"); + } +} + //===----------------------------------------------------------------------===// // Helpers and dispatcher //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 60422f7b61e9..06299e4242ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -587,6 +587,8 @@ class CIRGenFunction { } void buildImplicitAssignmentOperatorBody(FunctionArgList &Args); + void buildAggregateStore(mlir::Value Val, Address Dest, bool DestIsVolatile); + void buildCallArgs( CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range ArgRange, From e3c884cbfed0b7796b546996b97be31931e83ed0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Nov 2022 12:14:22 -0300 Subject: [PATCH 0677/2301] [CIR][CIRGen] Teach ScalarExprEmitter to codegen a simple version of ExprWithCleanups --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 0cc1941e8d74..a97d03aae031 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -418,9 +418,7 @@ class ScalarExprEmitter : public StmtVisitor { return t->getOpResult(0); } - mlir::Value VisitExprWithCleanups(ExprWithCleanups *E) { - llvm_unreachable("NYI"); - } + mlir::Value VisitExprWithCleanups(ExprWithCleanups *E); mlir::Value VisitCXXNewExpr(const CXXNewExpr *E) { return CGF.buildCXXNewExpr(E); } @@ -1315,3 +1313,13 @@ mlir::Value ScalarExprEmitter::buildCompoundAssign( // Otherwise, reload the value. return buildLoadOfLValue(LHS, E->getExprLoc()); } + +mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { + // TODO(cir): CodeGenFunction::RunCleanupsScope Scope(CGF); + mlir::Value V = Visit(E->getSubExpr()); + + // Defend against dominance problems caused by jumps out of expression + // evaluation through the shared cleanup block. + // TODO(cir): Scope.ForceCleanup({&V}); + return V; +} From b80d60e661196819576aa7d86606af272ff268ba Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 4 Nov 2022 20:45:57 -0400 Subject: [PATCH 0678/2301] [CIR][Conversion] Lower CIRFuncOp with an OpConversionPattern The OpConversionPattern has machinery to assit in lowering the nested Regions within Ops. Use this instead to lower `cir::FuncOp` and it's nested ops instead of sequential lowerings. --- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 67 ++++++++-------------- clang/test/CIR/CIRToLLVM/array.cir | 2 +- clang/test/CIR/CIRToLLVM/unary-inc-dec.cir | 2 +- clang/test/CIR/CodeGen/cmp.cpp | 2 +- 4 files changed, 26 insertions(+), 47 deletions(-) diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index bec0dd05a2a3..40b20d4b0d3f 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -188,39 +188,26 @@ class CIRConstantLowering } }; -class CIRFuncLowering : public mlir::OpRewritePattern { +class CIRFuncLowering : public mlir::OpConversionPattern { public: - using OpRewritePattern::OpRewritePattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::FuncOp op, - mlir::PatternRewriter &rewriter) const override { + matchAndRewrite(mlir::cir::FuncOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto fn = rewriter.replaceOpWithNewOp( + op, op.getName(), op.getFunctionType()); + auto &srcRegion = op.getBody(); + auto &dstRegion = fn.getBody(); - auto fnType = op.getFunctionType(); mlir::TypeConverter::SignatureConversion signatureConversion( - fnType.getNumInputs()); + op.front().getNumArguments()); - for (const auto &argType : enumerate(fnType.getInputs())) { - auto convertedType = argType.value(); - if (!convertedType) - return mlir::failure(); - signatureConversion.addInputs(argType.index(), convertedType); - } + rewriter.inlineRegionBefore(srcRegion, dstRegion, fn.end()); + if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, + &signatureConversion))) + return mlir::failure(); - mlir::Type resultType; - if (fnType.getNumResults() == 1) { - resultType = fnType.getResult(0); - if (!resultType) - return mlir::failure(); - } - - auto fn = rewriter.create( - op.getLoc(), op.getName(), - rewriter.getFunctionType(signatureConversion.getConvertedTypes(), - resultType ? mlir::TypeRange(resultType) - : mlir::TypeRange())); - - rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); return mlir::LogicalResult::success(); } }; @@ -577,27 +564,19 @@ void ConvertCIRToFuncPass::runOnOperation() { // a subsequent conversion. // Convert cir.func to builtin.func - mlir::ConversionTarget fnTarget(getContext()); - fnTarget.addLegalOp(); - fnTarget.addIllegalOp(); + mlir::ConversionTarget target(getContext()); + target.addLegalOp(); + target.addLegalDialect(); + target.addIllegalOp(); - mlir::RewritePatternSet fnPatterns(&getContext()); - fnPatterns.add(fnPatterns.getContext()); + mlir::RewritePatternSet patterns(&getContext()); + mlir::TypeConverter converter; + patterns.add(converter, patterns.getContext()); + patterns.add(patterns.getContext()); auto module = getOperation(); - if (failed(applyPartialConversion(module, fnTarget, std::move(fnPatterns)))) - signalPassFailure(); - - // Convert cir.return -> func.return, cir.call -> func.call - mlir::ConversionTarget retTarget(getContext()); - retTarget - .addLegalOp(); - retTarget.addIllegalOp(); - - mlir::RewritePatternSet retPatterns(&getContext()); - retPatterns.add(retPatterns.getContext()); - - if (failed(applyPartialConversion(module, retTarget, std::move(retPatterns)))) + if (failed(applyPartialConversion(module, target, std::move(patterns)))) signalPassFailure(); } diff --git a/clang/test/CIR/CIRToLLVM/array.cir b/clang/test/CIR/CIRToLLVM/array.cir index 5c4dffae96a2..23e5e1867f05 100644 --- a/clang/test/CIR/CIRToLLVM/array.cir +++ b/clang/test/CIR/CIRToLLVM/array.cir @@ -16,7 +16,7 @@ module { // MLIR-NEXT: } // MLIR-NEXT: } -// LLVM: = alloca i32, i64 ptrtoint (ptr getelementptr (i32, ptr null, i64 10) to i64) +// LLVM: = alloca i32, i64 10, align 16 // LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } undef, ptr %1, 0 // LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } %2, ptr %1, 1 // LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } %3, i64 0, 2 diff --git a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir index 6c3f7917c7f9..32ff7f2fd0f2 100644 --- a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir +++ b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir @@ -9,7 +9,7 @@ module { %2 = cir.cst(2 : i32) : i32 cir.store %2, %0 : i32, cir.ptr cir.store %2, %1 : i32, cir.ptr - + %3 = cir.load %0 : cir.ptr , i32 %4 = cir.unary(inc, %3) : i32, i32 cir.store %4, %0 : i32, cir.ptr diff --git a/clang/test/CIR/CodeGen/cmp.cpp b/clang/test/CIR/CodeGen/cmp.cpp index f0c3c78f89a3..1eb398fa3c6e 100644 --- a/clang/test/CIR/CodeGen/cmp.cpp +++ b/clang/test/CIR/CodeGen/cmp.cpp @@ -15,4 +15,4 @@ void c0(int a, int b) { // CHECK: = cir.cmp(le, %9, %10) : i32, !cir.bool // CHECK: = cir.cmp(ge, %12, %13) : i32, !cir.bool // CHECK: = cir.cmp(ne, %15, %16) : i32, !cir.bool -// CHECK: = cir.cmp(eq, %18, %19) : i32, !cir.bool \ No newline at end of file +// CHECK: = cir.cmp(eq, %18, %19) : i32, !cir.bool From 3fa5041301bda85bf55a185be61deab23cd3333f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 5 Nov 2022 01:04:32 -0400 Subject: [PATCH 0679/2301] [CIR][NFC] Remove default case on clang::Stmt switch Going through clangir with Werror to remove some previously ignored warnings. Simply remove this here. --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 8a5559383fa0..56bd7b5cdeb9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -65,8 +65,9 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, assert(0 && "not implemented"); switch (S->getStmtClass()) { - default: - llvm_unreachable("unknown statement class"); + case Stmt::OpenACCComputeConstructClass: + case Stmt::OMPScopeDirectiveClass: + case Stmt::OMPErrorDirectiveClass: case Stmt::NoStmtClass: case Stmt::CXXCatchStmtClass: case Stmt::SEHExceptStmtClass: From bcd2a9191ae9ccd408b7ce11f5815601628b8c07 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 5 Nov 2022 01:05:56 -0400 Subject: [PATCH 0680/2301] [CIR][Lowering] Change CIRReturnLowering to a OpConversionPattern This is to use the adaptor to account for changed argument types during other conversions. --- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index 40b20d4b0d3f..22f317ebc71d 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -78,15 +78,16 @@ struct ConvertCIRToFuncPass virtual StringRef getArgument() const override { return "cir-to-func"; } }; -class CIRReturnLowering : public mlir::OpRewritePattern { +class CIRReturnLowering + : public mlir::OpConversionPattern { public: - using OpRewritePattern::OpRewritePattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ReturnOp op, - mlir::PatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, op->getResultTypes(), - op->getOperands()); + matchAndRewrite(mlir::cir::ReturnOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, + adaptor.getOperands()); return mlir::LogicalResult::success(); } }; From d7e89ee32c67b99027f0de07acd3844c51adc465 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 5 Nov 2022 01:07:30 -0400 Subject: [PATCH 0681/2301] [CIR][Lowering] Setup a TypeConverter cir.ptr and IntegerType This is very simple at the moment and is probably where we'll want to consider some ABI details at some point. But at the moment just map, e.g., i32 to i32 and cir.ptr to MemRefTypes --- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index 22f317ebc71d..5d1dc2834661 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -514,6 +514,17 @@ void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { CIRCmpOpLowering, CIRBrOpLowering>(patterns.getContext()); } +mlir::TypeConverter prepareTypeConverter() { + mlir::TypeConverter converter; + converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { + return mlir::MemRefType::get({-1}, type.getPointee()); + }); + converter.addConversion( + [&](mlir::IntegerType type) -> mlir::Type { return type; }); + + return converter; +} + void ConvertCIRToLLVMPass::runOnOperation() { mlir::LLVMConversionTarget target(getContext()); target.addLegalOp(); @@ -572,9 +583,9 @@ void ConvertCIRToFuncPass::runOnOperation() { mlir::cir::CallOp>(); mlir::RewritePatternSet patterns(&getContext()); - mlir::TypeConverter converter; - patterns.add(converter, patterns.getContext()); + auto converter = prepareTypeConverter(); patterns.add(patterns.getContext()); + patterns.add(converter, patterns.getContext()); auto module = getOperation(); if (failed(applyPartialConversion(module, target, std::move(patterns)))) From 04a331e6cd968e6345aac390ba1c2b9a2494aec4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 5 Nov 2022 01:10:25 -0400 Subject: [PATCH 0682/2301] [CIR][Lowering] Support lowering allocas with cir.ptr types --- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index 5d1dc2834661..67160dc4cb80 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -126,6 +126,10 @@ class CIRAllocaLowering : public mlir::OpRewritePattern { mlir::MemRefType::get(arraytype.getSize(), arraytype.getEltType()); } else if (type.isa() || type.isa()) { memreftype = mlir::MemRefType::get({}, op.getAllocaType()); + } else if (type.isa()) { + auto ptrType = type.cast(); + auto innerMemref = mlir::MemRefType::get({-1}, ptrType.getPointee()); + memreftype = mlir::MemRefType::get({}, innerMemref); } else { llvm_unreachable("type to be allocated not supported yet"); } From 4e710cd21e801df27b8afc17a8ee569a7450a6f7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 5 Nov 2022 01:11:48 -0400 Subject: [PATCH 0683/2301] [CIR][Lowering] Implement better support for lowering cir.func signatures This previously only worked with base mlir::Types and failed with cir types. So integrate the full machinery for lowering function types and signatures. --- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 31 +++++++++++++++---- clang/test/CIR/CIRToLLVM/array.cir | 1 - clang/test/CIR/CIRToLLVM/binop-fp.cir | 1 - clang/test/CIR/CIRToLLVM/binop-int.cir | 1 - clang/test/CIR/CIRToLLVM/bool.cir | 1 - clang/test/CIR/CIRToLLVM/cmp.cir | 1 - clang/test/CIR/CIRToLLVM/memref.cir | 1 - clang/test/CIR/CIRToLLVM/unary-inc-dec.cir | 1 - clang/test/CIR/CIRToLLVM/unary-plus-minus.cir | 1 - 9 files changed, 25 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index 67160dc4cb80..570f392028f0 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -200,19 +200,38 @@ class CIRFuncLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::FuncOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto fn = rewriter.replaceOpWithNewOp( - op, op.getName(), op.getFunctionType()); - auto &srcRegion = op.getBody(); - auto &dstRegion = fn.getBody(); + auto fnType = op.getFunctionType(); mlir::TypeConverter::SignatureConversion signatureConversion( - op.front().getNumArguments()); + fnType.getNumInputs()); - rewriter.inlineRegionBefore(srcRegion, dstRegion, fn.end()); + for (const auto &argType : enumerate(fnType.getInputs())) { + auto convertedType = typeConverter->convertType(argType.value()); + if (!convertedType) + return mlir::failure(); + signatureConversion.addInputs(argType.index(), convertedType); + } + + mlir::Type resultType; + if (fnType.getNumResults() == 1) { + resultType = getTypeConverter()->convertType(fnType.getResult(0)); + if (!resultType) + return mlir::failure(); + } + + auto fn = rewriter.create( + op.getLoc(), op.getName(), + rewriter.getFunctionType(signatureConversion.getConvertedTypes(), + resultType ? mlir::TypeRange(resultType) + : mlir::TypeRange())); + + rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, &signatureConversion))) return mlir::failure(); + rewriter.eraseOp(op); + return mlir::LogicalResult::success(); } }; diff --git a/clang/test/CIR/CIRToLLVM/array.cir b/clang/test/CIR/CIRToLLVM/array.cir index 23e5e1867f05..8a467f059c52 100644 --- a/clang/test/CIR/CIRToLLVM/array.cir +++ b/clang/test/CIR/CIRToLLVM/array.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/binop-fp.cir b/clang/test/CIR/CIRToLLVM/binop-fp.cir index 30e56b04b090..b7fba41b9710 100644 --- a/clang/test/CIR/CIRToLLVM/binop-fp.cir +++ b/clang/test/CIR/CIRToLLVM/binop-fp.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/binop-int.cir b/clang/test/CIR/CIRToLLVM/binop-int.cir index d5b26e443d20..088f9b8cc7eb 100644 --- a/clang/test/CIR/CIRToLLVM/binop-int.cir +++ b/clang/test/CIR/CIRToLLVM/binop-int.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/bool.cir b/clang/test/CIR/CIRToLLVM/bool.cir index 10300d027ef7..4d16110e3cba 100644 --- a/clang/test/CIR/CIRToLLVM/bool.cir +++ b/clang/test/CIR/CIRToLLVM/bool.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/cmp.cir b/clang/test/CIR/CIRToLLVM/cmp.cir index f7d821ad2467..f0c9d791b751 100644 --- a/clang/test/CIR/CIRToLLVM/cmp.cir +++ b/clang/test/CIR/CIRToLLVM/cmp.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/memref.cir b/clang/test/CIR/CIRToLLVM/memref.cir index bdf7409c5929..6461094c8b03 100644 --- a/clang/test/CIR/CIRToLLVM/memref.cir +++ b/clang/test/CIR/CIRToLLVM/memref.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() -> i32 { diff --git a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir index 32ff7f2fd0f2..f67358c21ace 100644 --- a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir +++ b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir b/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir index 7277df90d2c5..37a1d159ed1e 100644 --- a/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir +++ b/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() { From 4a186eca9d08602f85a4c1f8f1bc410748b43466 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 2 Nov 2022 14:03:20 -0300 Subject: [PATCH 0684/2301] [CIR][CIRGen][NFC] Add OpaqueValueMappingData and RAII functionality This is going to be used for building coroutines suspend logic soon. --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 140 +++++++++++++++++- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 2 files changed, 139 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 06299e4242ca..bcc7a7f290c8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -192,9 +192,7 @@ class CIRGenFunction { return &*RetBlocks.back(); } - // --- // Scope entry block tracking - // --- mlir::Block *getEntryBlock() { return EntryBlock; } mlir::Location BeginLoc, EndLoc; @@ -227,6 +225,144 @@ class CIRGenFunction { LexicalScopeContext *currLexScope = nullptr; + // --------------------- + // Opaque value handling + // --------------------- + + /// Keeps track of the current set of opaque value expressions. + llvm::DenseMap OpaqueLValues; + llvm::DenseMap OpaqueRValues; + +public: + /// A non-RAII class containing all the information about a bound + /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for + /// this which makes individual mappings very simple; using this + /// class directly is useful when you have a variable number of + /// opaque values or don't want the RAII functionality for some + /// reason. + class OpaqueValueMappingData { + const OpaqueValueExpr *OpaqueValue; + bool BoundLValue; + + OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue) + : OpaqueValue(ov), BoundLValue(boundLValue) {} + + public: + OpaqueValueMappingData() : OpaqueValue(nullptr) {} + + static bool shouldBindAsLValue(const Expr *expr) { + // gl-values should be bound as l-values for obvious reasons. + // Records should be bound as l-values because IR generation + // always keeps them in memory. Expressions of function type + // act exactly like l-values but are formally required to be + // r-values in C. + return expr->isGLValue() || expr->getType()->isFunctionType() || + hasAggregateEvaluationKind(expr->getType()); + } + + static OpaqueValueMappingData + bind(CIRGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e) { + if (shouldBindAsLValue(ov)) + return bind(CGF, ov, CGF.buildLValue(e)); + return bind(CGF, ov, CGF.buildAnyExpr(e)); + } + + static OpaqueValueMappingData + bind(CIRGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv) { + assert(shouldBindAsLValue(ov)); + CGF.OpaqueLValues.insert(std::make_pair(ov, lv)); + return OpaqueValueMappingData(ov, true); + } + + static OpaqueValueMappingData + bind(CIRGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv) { + assert(!shouldBindAsLValue(ov)); + CGF.OpaqueRValues.insert(std::make_pair(ov, rv)); + + OpaqueValueMappingData data(ov, false); + + // Work around an extremely aggressive peephole optimization in + // EmitScalarConversion which assumes that all other uses of a + // value are extant. + assert(!UnimplementedFeature::peepholeProtection() && "NYI"); + return data; + } + + bool isValid() const { return OpaqueValue != nullptr; } + void clear() { OpaqueValue = nullptr; } + + void unbind(CIRGenFunction &CGF) { + assert(OpaqueValue && "no data to unbind!"); + + if (BoundLValue) { + CGF.OpaqueLValues.erase(OpaqueValue); + } else { + CGF.OpaqueRValues.erase(OpaqueValue); + assert(!UnimplementedFeature::peepholeProtection() && "NYI"); + } + } + }; + + /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr. + class OpaqueValueMapping { + CIRGenFunction &CGF; + OpaqueValueMappingData Data; + + public: + static bool shouldBindAsLValue(const Expr *expr) { + return OpaqueValueMappingData::shouldBindAsLValue(expr); + } + + /// Build the opaque value mapping for the given conditional + /// operator if it's the GNU ?: extension. This is a common + /// enough pattern that the convenience operator is really + /// helpful. + /// + OpaqueValueMapping(CIRGenFunction &CGF, + const AbstractConditionalOperator *op) + : CGF(CGF) { + if (isa(op)) + // Leave Data empty. + return; + + const BinaryConditionalOperator *e = cast(op); + Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(), + e->getCommon()); + } + + /// Build the opaque value mapping for an OpaqueValueExpr whose source + /// expression is set to the expression the OVE represents. + OpaqueValueMapping(CIRGenFunction &CGF, const OpaqueValueExpr *OV) + : CGF(CGF) { + if (OV) { + assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used " + "for OVE with no source expression"); + Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr()); + } + } + + OpaqueValueMapping(CIRGenFunction &CGF, const OpaqueValueExpr *opaqueValue, + LValue lvalue) + : CGF(CGF), + Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {} + + OpaqueValueMapping(CIRGenFunction &CGF, const OpaqueValueExpr *opaqueValue, + RValue rvalue) + : CGF(CGF), + Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {} + + void pop() { + Data.unbind(CGF); + Data.clear(); + } + + ~OpaqueValueMapping() { + if (Data.isValid()) + Data.unbind(CGF); + } + }; + +private: /// Declare a variable in the current scope, return success if the variable /// wasn't declared yet. mlir::LogicalResult declare(const clang::Decl *var, clang::QualType ty, diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index b36ee0204304..a34e0687c1b5 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -52,6 +52,7 @@ struct UnimplementedFeature { static bool incrementProfileCounter() { return false; } static bool requiresReturnValueCheck() { return false; } static bool shouldEmitLifetimeMarkers() { return false; } + static bool peepholeProtection() { return false; } }; } // namespace cir From 63481571197ad1788634efba354edf1410cac963 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 15 Nov 2022 12:51:11 -0800 Subject: [PATCH 0685/2301] [CIR] Add cir.await operation Populate new operations and add CIRDialect.cpp building bits. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 80 ++++++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 46 +++++++++++ 2 files changed, 126 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index bda6fbb36058..8bee629b1f33 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1314,4 +1314,84 @@ def CallOp : CIR_Op<"call", let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// AwaitOp +//===----------------------------------------------------------------------===// + +def AwaitOp : CIR_Op<"await", + [DeclareOpInterfaceMethods, + RecursivelySpeculatable, NoRegionArguments]> { + let summary = "Wraps C++ co_await implicit logic"; + let description = [{ + The under the hood effect of using C++ `co_await expr` roughly + translates to: + + ```c++ + // co_await expr; + + auto &&x = CommonExpr(); + if (!x.await_ready()) { + ... + x.await_suspend(...); + ... + } + x.await_resume(); + ``` + + `cir.await` represents this logic by using 3 regions: + - ready: covers veto power from x.await_ready() + - suspend: wraps actual x.await_suspend() logic + - resume: handles x.await_resume() + + Breaking this up in regions allow individual scrutiny of conditions + which might lead to folding some of them out. Lowerings coming out + of CIR, e.g. LLVM, should use the `suspend` region to track more + lower level codegen (e.g. intrinsic emission for saving/suspending). + + From the C++ snippet we get: + + ```mlir + cir.scope { + ... // auto &&x = CommonExpr(); + cir.await(ready : { + ... // x.await_ready() + }, suspend : { + ... // x.await_suspend() + }, resume : { + ... // x.await_resume() + }) + } + ``` + + Note that resulution of the common expression is assumed to happen + as part of the enclosing await scope. + }]; + + let regions = (region SizedRegion<1>:$ready, + SizedRegion<1>:$suspend, + SizedRegion<1>:$resume); + let assemblyFormat = [{ + `(` + `ready` `:` $ready `,` + `suspend` `:` $suspend `,` + `resume` `:` $resume `,` + `)` + attr-dict + }]; + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins + CArg<"function_ref", + "nullptr">:$readyBuilder, + CArg<"function_ref", + "nullptr">:$suspendBuilder, + CArg<"function_ref", + "nullptr">:$resumeBuilder + )> + ]; + + let hasVerifier = 1; +} + #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a895c462987b..51c8c89a19c9 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1355,6 +1355,52 @@ LogicalResult UnaryOp::verify() { llvm_unreachable("Unknown UnaryOp kind?"); } +//===----------------------------------------------------------------------===// +// AwaitOp +//===----------------------------------------------------------------------===// + +void AwaitOp::build(OpBuilder &builder, OperationState &result, + function_ref readyBuilder, + function_ref suspendBuilder, + function_ref resumeBuilder) { + OpBuilder::InsertionGuard guard(builder); + + Region *readyRegion = result.addRegion(); + builder.createBlock(readyRegion); + readyBuilder(builder, result.location); + + Region *suspendRegion = result.addRegion(); + builder.createBlock(suspendRegion); + suspendBuilder(builder, result.location); + + Region *resumeRegion = result.addRegion(); + builder.createBlock(resumeRegion); + resumeBuilder(builder, result.location); +} + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes +/// that correspond to a constant value for each operand, or null if that +/// operand is not a constant. +void AwaitOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // If any index all the underlying regions branch back to the parent + // operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // FIXME: we want to look at cond region for getting more accurate results + // if the other regions will get a chance to execute. + regions.push_back(RegionSuccessor(&this->getReady())); + regions.push_back(RegionSuccessor(&this->getSuspend())); + regions.push_back(RegionSuccessor(&this->getResume())); +} + +LogicalResult AwaitOp::verify() { return success(); } + //===----------------------------------------------------------------------===// // CIR defined traits //===----------------------------------------------------------------------===// From ab4d15e487aaa4b2ca4dcaef7cc9ea6368dc222f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 16 Nov 2022 11:23:25 -0800 Subject: [PATCH 0686/2301] [CIR][NFC] Add one more builder to cir::YieldOp --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 +++ 1 file changed, 3 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8bee629b1f33..6cb52508ede7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -481,6 +481,9 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, mlir::cir::YieldOpKindAttr kattr = mlir::cir::YieldOpKindAttr::get( $_builder.getContext(), kind); $_state.addAttribute(getKindAttrName($_state.name), kattr); + }]>, + OpBuilder<(ins "ValueRange":$results), [{ + $_state.addOperands(results); }]> ]; From 4bf4ce85f1372f303e3651f32c2c8d696f6a93a1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 16 Nov 2022 11:30:12 -0800 Subject: [PATCH 0687/2301] [CIR][CIRGen] Handle OpaqueValueExprClass as part of buildLValue --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 35 ++++++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 12 ++++++++- 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 8d9535e178e2..4f2b9f05cd3d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1363,6 +1363,39 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( return makeAddrLValue(Object, M->getType(), AlignmentSource::Decl); } +LValue CIRGenFunction::buildOpaqueValueLValue(const OpaqueValueExpr *e) { + assert(OpaqueValueMappingData::shouldBindAsLValue(e)); + return getOrCreateOpaqueLValueMapping(e); +} + +LValue +CIRGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { + assert(OpaqueValueMapping::shouldBindAsLValue(e)); + + llvm::DenseMap::iterator it = + OpaqueLValues.find(e); + + if (it != OpaqueLValues.end()) + return it->second; + + assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted"); + return buildLValue(e->getSourceExpr()); +} + +RValue +CIRGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { + assert(!OpaqueValueMapping::shouldBindAsLValue(e)); + + llvm::DenseMap::iterator it = + OpaqueRValues.find(e); + + if (it != OpaqueRValues.end()) + return it->second; + + assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted"); + return buildAnyExpr(e->getSourceExpr()); +} + /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. @@ -1428,6 +1461,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { case Expr::CXXStaticCastExprClass: case Expr::ImplicitCastExprClass: return buildCastLValue(cast(E)); + case Expr::OpaqueValueExprClass: + return buildOpaqueValueLValue(cast(E)); case Expr::MaterializeTemporaryExprClass: return buildMaterializeTemporaryExpr(cast(E)); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index bcc7a7f290c8..3100d537f8ac 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -815,6 +815,8 @@ class CIRGenFunction { mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); mlir::LogicalResult buildContinueStmt(const clang::ContinueStmt &S); + LValue buildOpaqueValueLValue(const OpaqueValueExpr *e); + /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. @@ -1099,7 +1101,7 @@ class CIRGenFunction { // TODO: this can also be abstrated into common AST helpers bool hasBooleanRepresentation(clang::QualType Ty); - /// GetAddrOfLocalVar - Return the address of a local variable. + /// Return the address of a local variable. Address GetAddrOfLocalVar(const clang::VarDecl *VD) { auto it = LocalDeclMap.find(VD); assert(it != LocalDeclMap.end() && @@ -1107,6 +1109,14 @@ class CIRGenFunction { return it->second; } + /// Given an opaque value expression, return its LValue mapping if it exists, + /// otherwise create one. + LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e); + + /// Given an opaque value expression, return its RValue mapping if it exists, + /// otherwise create one. + RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e); + /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts. static bool isWrappedCXXThis(const clang::Expr *E); From f055b88db75cd4ee443a26e108bb7dc026fc3973 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 16 Nov 2022 12:26:21 -0800 Subject: [PATCH 0688/2301] [CIR][CIRGen] Enhance the handling of function call arg build up to account for aggregate temporaries --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 27 ++++++++++----- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 9 +++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 11 ++++++ clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h | 5 +++ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 39 +++++++++++++++++----- clang/lib/CIR/CodeGen/CIRGenValue.h | 8 +++++ 6 files changed, 80 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index b62988495270..3d870e297f8a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -70,7 +70,7 @@ CIRGenFunctionInfo *CIRGenFunctionInfo::create( namespace { -/// Encapsulates information about hte way function arguments from +/// Encapsulates information about the way function arguments from /// CIRGenFunctionInfo should be passed to actual CIR function. class ClangToCIRArgMapping { static const unsigned InvalidIndex = ~0U; @@ -150,10 +150,15 @@ void ClangToCIRArgMapping::construct(const ASTContext &Context, assert(false && "NYI"); case ABIArgInfo::Extend: case ABIArgInfo::Direct: { - assert(!AI.getCoerceToType().dyn_cast() && "NYI"); + auto STy = AI.getCoerceToType().dyn_cast(); // FIXME: handle sseregparm someday... - // FIXME: handle structs - CIRArgs.NumberOfArgs = 1; + if (AI.isDirect() && AI.getCanBeFlattened() && STy) { + // TODO(cir): we might not want to break it this early, revisit this + // once we have a better ABI lowering story. + CIRArgs.NumberOfArgs = STy.getMembers().size(); + } else { + CIRArgs.NumberOfArgs = 1; + } break; } } @@ -531,7 +536,10 @@ void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. // However, we still have to push an EH-only cleanup in case we unwind before // we make it to the call. - assert(!type->isRecordType() && "Record type args NYI"); + if (type->isRecordType() && + type->castAs()->getDecl()->isParamDestroyedInCallee()) { + llvm_unreachable("NYI"); + } if (HasAggregateEvalKind && isa(E) && cast(E)->getCastKind() == CK_LValueToRValue) { @@ -541,12 +549,15 @@ void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, args.add(buildAnyExprToTemp(E), type); } -/// buildAnyExprToTemp - Similar to buildAnyExpr(), however, the result will -/// always be accessible even if no aggregate location is provided. +/// Similar to buildAnyExpr(), however, the result will always be accessible +/// even if no aggregate location is provided. RValue CIRGenFunction::buildAnyExprToTemp(const Expr *E) { AggValueSlot AggSlot = AggValueSlot::ignored(); - assert(!hasAggregateEvaluationKind(E->getType()) && "aggregate args NYI"); + if (hasAggregateEvaluationKind(E->getType())) + AggSlot = + CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), "agg.tmp"); + return buildAnyExpr(E, AggSlot); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 4f2b9f05cd3d..6dbd0c6bd42f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -530,8 +530,13 @@ RValue CIRGenFunction::buildAnyExpr(const Expr *E, AggValueSlot aggSlot, return RValue::get(buildScalarExpr(E)); case TEK_Complex: assert(0 && "not implemented"); - case TEK_Aggregate: - assert(0 && "not implemented"); + case TEK_Aggregate: { + if (!ignoreResult && aggSlot.isIgnored()) + aggSlot = + CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), "agg-temp"); + buildAggExpr(E, aggSlot); + return aggSlot.asRValue(); + } } llvm_unreachable("bad evaluation kind"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 3100d537f8ac..af737c18ea08 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1232,6 +1232,17 @@ class CIRGenFunction { Address CreateMemTempWithoutCast(QualType T, CharUnits Align, mlir::Location Loc, const Twine &Name = "tmp"); + + /// Create a temporary memory object for the given + /// aggregate type. + AggValueSlot CreateAggTemp(QualType T, mlir::Location Loc, + const Twine &Name = "tmp", + Address *Alloca = nullptr) { + return AggValueSlot::forAddr( + CreateMemTemp(T, Loc, Name, Alloca), T.getQualifiers(), + AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h index 50ffea4e94f1..e640584558be 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h @@ -202,6 +202,11 @@ class ABIArgInfo { CanBeFlattened = Flatten; } + bool getCanBeFlattened() const { + assert(isDirect() && "Invalid kind!"); + return CanBeFlattened; + } + mlir::Type getPaddingType() const { return (canHavePaddingType() ? PaddingType : nullptr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index e7b188a049ee..aa8fc3ce3c11 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -195,20 +195,37 @@ mlir::Type CIRGenTypes::ConvertFunctionTypeInternal(QualType QFT) { return ResultType; } -/// isFuncParamTypeConvertible - Return true if the specified type in a function -/// parameter or result position can be converted to a CIR type at this point. -/// This boils down to being whether it is complete, as well as whether we've -/// temporarily deferred expanding the type because we're in a recursive -/// context. +/// Return true if the specified type in a function parameter or result position +/// can be converted to a CIR type at this point. This boils down to being +/// whether it is complete, as well as whether we've temporarily deferred +/// expanding the type because we're in a recursive context. bool CIRGenTypes::isFuncParamTypeConvertible(clang::QualType Ty) { // Some ABIs cannot have their member pointers represented in LLVM IR unless // certain circumstances have been reached. assert(!Ty->getAs() && "NYI"); // If this isn't a tagged type, we can convert it! - auto *TT = Ty->getAs(); - assert(!TT && "Only non-TagTypes implemented atm."); - return true; + const TagType *TT = Ty->getAs(); + if (!TT) + return true; + + // Incomplete types cannot be converted. + if (TT->isIncompleteType()) + return false; + + // If this is an enum, then it is always safe to convert. + const RecordType *RT = dyn_cast(TT); + if (!RT) + return true; + + // Otherwise, we have to be careful. If it is a struct that we're in the + // process of expanding, then we can't convert the function type. That's ok + // though because we must be in a pointer context under the struct, so we can + // just convert it to a dummy type. + // + // We decide this by checking whether ConvertRecordDeclType returns us an + // opaque type for a struct that we know is defined. + return isSafeToConvert(RT->getDecl(), *this); } /// Code to verify a given function type is complete, i.e. the return type and @@ -269,10 +286,14 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::SveCount: llvm_unreachable("NYI"); case BuiltinType::Void: + // TODO(cir): how should we model this? + ResultType = ::mlir::IntegerType::get(Builder.getContext(), 8); + break; + case BuiltinType::ObjCId: case BuiltinType::ObjCClass: case BuiltinType::ObjCSel: - // FIXME: if we emit like LLVM we probably wanna use i8. + // TODO(cir): probably same as BuiltinType::Void assert(0 && "not implemented"); break; diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 230aadaf191a..a460b055e49d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -407,6 +407,14 @@ class AggValueSlot { IsAliased_t isPotentiallyAliased() const { return IsAliased_t(AliasedFlag); } + RValue asRValue() const { + if (isIgnored()) { + return RValue::getIgnored(); + } else { + return RValue::getAggregate(getAddress(), isVolatile()); + } + } + /// Get the preferred size to use when storing a value to this slot. This /// is the type size unless that might overlap another object, in which /// case it's the dsize. From 2774d6e43cd210f2347302e8dbef50a6eda6c450 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 16 Nov 2022 15:38:01 -0800 Subject: [PATCH 0689/2301] [CIR][CIRGen] Skeleton for built-in codegen support: we soon need to handle __builtin_coro_* stuff --- clang/lib/CIR/CodeGen/CIRGenCall.h | 8 ++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 45 ++++++++++++++++++- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 3 files changed, 53 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 1ea32e5e5cc5..eb03ad4e989b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -106,6 +106,14 @@ class CIRGenCallee { return KindOrFunctionPointer == SpecialKind::Builtin; } + static CIRGenCallee forBuiltin(unsigned builtinID, + const clang::FunctionDecl *builtinDecl) { + CIRGenCallee result(SpecialKind::Builtin); + result.BuiltinInfo.Decl = builtinDecl; + result.BuiltinInfo.ID = builtinID; + return result; + } + bool isPsuedoDestructor() const { return KindOrFunctionPointer == SpecialKind::PsuedoDestructor; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 6dbd0c6bd42f..586e1febfe27 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -17,6 +17,7 @@ #include "UnimplementedFeatureGuarding.h" #include "clang/AST/GlobalDecl.h" +#include "clang/Basic/Builtins.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "mlir/Dialect/Func/IR/FuncOps.h" @@ -149,10 +150,52 @@ LValue CIRGenFunction::buildLValueForFieldInitialization( llvm_unreachable("NYI"); } +// Detect the unusual situation where an inline version is shadowed by a +// non-inline version. In that case we should pick the external one +// everywhere. That's GCC behavior too. +static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) { + for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl()) + if (!PD->isInlineBuiltinDeclaration()) + return false; + return true; +} + static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); - assert(!FD->getBuiltinID() && "Builtins NYI"); + if (auto builtinID = FD->getBuiltinID()) { + std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str(); + std::string NoBuiltins = "no-builtins"; + + auto *A = FD->getAttr(); + StringRef Ident = A ? A->getLabel() : FD->getName(); + std::string FDInlineName = (Ident + ".inline").str(); + + auto &CGF = *CGM.getCurrCIRGenFun(); + bool IsPredefinedLibFunction = + CGM.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID); + bool HasAttributeNoBuiltin = false; + assert(!UnimplementedFeature::attributeNoBuiltin() && "NYI"); + // bool HasAttributeNoBuiltin = + // CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) || + // CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins); + + // When directing calling an inline builtin, call it through it's mangled + // name to make it clear it's not the actual builtin. + if (CGF.CurFn.getName() != FDInlineName && + onlyHasInlineBuiltinDeclaration(FD)) { + assert(0 && "NYI"); + } + + // Replaceable builtins provide their own implementation of a builtin. If we + // are in an inline builtin implementation, avoid trivial infinite + // recursion. Honor __attribute__((no_builtin("foo"))) or + // __attribute__((no_builtin)) on the current function unless foo is + // not a predefined library function which means we must generate the + // builtin no matter what. + else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin) + return CIRGenCallee::forBuiltin(builtinID, FD); + } auto CalleePtr = buildFunctionDeclPointer(CGM, GD); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index a34e0687c1b5..e2a881a1f4cf 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -53,6 +53,7 @@ struct UnimplementedFeature { static bool requiresReturnValueCheck() { return false; } static bool shouldEmitLifetimeMarkers() { return false; } static bool peepholeProtection() { return false; } + static bool attributeNoBuiltin() { return false; } }; } // namespace cir From 2d19b4482bb445f1413d97e9defb2a94c78fef61 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 16 Nov 2022 17:23:41 -0800 Subject: [PATCH 0690/2301] [CIR][CIRGen] Initial bridging support for emitting compiler builtins Start with only a few, long term we should probably by default emit all of them as calls and mark the cir.func with 'builtin'. This doesn't work just yet, still need some massaging on emitting calls, which should come next. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 195 ++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenCall.h | 9 + clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 6 + clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 9 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 10 ++ clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + 6 files changed, 229 insertions(+), 1 deletion(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp new file mode 100644 index 000000000000..95ce4ba0a28e --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -0,0 +1,195 @@ +//===---- CIRGenBuiltin.cpp - Emit CIR for builtins -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Builtin calls as CIR or a function call to be +// later resolved. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" +#include "CIRGenCall.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "UnimplementedFeatureGuarding.h" + +// TODO(cir): we shouldn't need this but we currently reuse intrinsic IDs for +// convenience. +#include "llvm/IR/Intrinsics.h" + +#include "clang/AST/GlobalDecl.h" +#include "clang/Basic/Builtins.h" + +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Value.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; +using namespace llvm; + +RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, + const CallExpr *E, + ReturnValueSlot ReturnValue, + bool &emitAsCall) { + const FunctionDecl *FD = GD.getDecl()->getAsFunction(); + + // This is used as fallback mechanism for re-emitting selected built-ins as + // regular function calls. + emitAsCall = false; + + // See if we can constant fold this builtin. If so, don't emit it at all. + // TODO: Extend this handling to all builtin calls that we can constant-fold. + Expr::EvalResult Result; + if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getASTContext()) && + !Result.hasSideEffects()) { + llvm_unreachable("NYI"); + } + + // If current long-double semantics is IEEE 128-bit, replace math builtins + // of long-double with f128 equivalent. + // TODO: This mutation should also be applied to other targets other than PPC, + // after backend supports IEEE 128-bit style libcalls. + if (getTarget().getTriple().isPPC64() && + &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad()) + llvm_unreachable("NYI"); + + // If the builtin has been declared explicitly with an assembler label, + // disable the specialized emitting below. Ideally we should communicate the + // rename in IR, or at least avoid generating the intrinsic calls that are + // likely to get lowered to the renamed library functions. + const unsigned BuiltinIDIfNoAsmLabel = + FD->hasAttr() ? 0 : BuiltinID; + + // There are LLVM math intrinsics/instructions corresponding to math library + // functions except the LLVM op will never set errno while the math library + // might. Also, math builtins have the same semantics as their math library + // twins. Thus, we can transform math library and builtin calls to their + // LLVM counterparts if the call is marked 'const' (known to never set errno). + // In case FP exceptions are enabled, the experimental versions of the + // intrinsics model those. + bool ConstWithoutErrnoAndExceptions = + getContext().BuiltinInfo.isConstWithoutErrnoAndExceptions(BuiltinID); + bool ConstWithoutExceptions = + getContext().BuiltinInfo.isConstWithoutExceptions(BuiltinID); + if (FD->hasAttr() || + ((ConstWithoutErrnoAndExceptions || ConstWithoutExceptions) && + (!ConstWithoutErrnoAndExceptions || (!getLangOpts().MathErrno)))) { + llvm_unreachable("NYI"); + } + + switch (BuiltinIDIfNoAsmLabel) { + default: + llvm_unreachable("NYI"); + break; + + case Builtin::BI__builtin_coro_id: + case Builtin::BI__builtin_coro_promise: + case Builtin::BI__builtin_coro_resume: + case Builtin::BI__builtin_coro_noop: + case Builtin::BI__builtin_coro_destroy: + case Builtin::BI__builtin_coro_done: + case Builtin::BI__builtin_coro_alloc: + case Builtin::BI__builtin_coro_begin: + case Builtin::BI__builtin_coro_end: + case Builtin::BI__builtin_coro_suspend: + case Builtin::BI__builtin_coro_align: + llvm_unreachable("NYI"); + + case Builtin::BI__builtin_coro_frame: + case Builtin::BI__builtin_coro_free: + case Builtin::BI__builtin_coro_size: { + // Inform the caller we rather be emitted as regular function calls + emitAsCall = true; + return RValue::getIgnored(); + } + } + + // If this is an alias for a lib function (e.g. __builtin_sin), emit + // the call using the normal call path, but using the unmangled + // version of the function name. + if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) + llvm_unreachable("NYI"); + + // If this is a predefined lib function (e.g. malloc), emit the call + // using exactly the normal call path. + if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) + llvm_unreachable("NYI"); + + // Check that a call to a target specific builtin has the correct target + // features. + // This is down here to avoid non-target specific builtins, however, if + // generic builtins start to require generic target features then we + // can move this up to the beginning of the function. + // checkTargetFeatures(E, FD); + + if (unsigned VectorWidth = + getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID)) + llvm_unreachable("NYI"); + + // See if we have a target specific intrinsic. + auto Name = getContext().BuiltinInfo.getName(BuiltinID).str(); + Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; + StringRef Prefix = + llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch()); + if (!Prefix.empty()) { + IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name); + // NOTE we don't need to perform a compatibility flag check here since the + // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the + // MS builtins via ALL_MS_LANGUAGES and are filtered earlier. + if (IntrinsicID == Intrinsic::not_intrinsic) + IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name); + } + + if (IntrinsicID != Intrinsic::not_intrinsic) { + llvm_unreachable("NYI"); + } + + // Some target-specific builtins can have aggregate return values, e.g. + // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force + // ReturnValue to be non-null, so that the target-specific emission code can + // always just emit into it. + TypeEvaluationKind EvalKind = getEvaluationKind(E->getType()); + if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) { + llvm_unreachable("NYI"); + } + + // Now see if we can emit a target-specific builtin. + if (auto v = buildTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); + // ErrorUnsupported(E, "builtin function"); + + // Unknown builtin, for now just dump it out and return undef. + return GetUndefRValue(E->getType()); +} + +static mlir::Value buildTargetArchBuiltinExpr(CIRGenFunction *CGF, + unsigned BuiltinID, + const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch) { + llvm_unreachable("NYI"); + return {}; +} + +mlir::Value +CIRGenFunction::buildTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue) { + if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) { + assert(getContext().getAuxTargetInfo() && "Missing aux target info"); + return buildTargetArchBuiltinExpr( + this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E, + ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch()); + } + + return buildTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue, + getTarget().getTriple().getArch()); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index eb03ad4e989b..278f9b6821ea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -106,6 +106,15 @@ class CIRGenCallee { return KindOrFunctionPointer == SpecialKind::Builtin; } + const clang::FunctionDecl *getBuiltinDecl() const { + assert(isBuiltin()); + return BuiltinInfo.Decl; + } + unsigned getBuiltinID() const { + assert(isBuiltin()); + return BuiltinInfo.ID; + } + static CIRGenCallee forBuiltin(unsigned builtinID, const clang::FunctionDecl *builtinDecl) { CIRGenCallee result(SpecialKind::Builtin); diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 6ea575c7a3df..bb6d68fad5cf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -157,6 +157,12 @@ struct ParamReferenceReplacerRAII { }; } // namespace +// Emit coroutine intrinsic and patch up arguments of the token type. +RValue CIRGenFunction::buildCoroutineIntrinsic(const CallExpr *E, + unsigned int IID) { + llvm_unreachable("NYI"); +} + static mlir::LogicalResult buildBodyAndFallthrough(CIRGenFunction &CGF, const CoroutineBodyStmt &S, Stmt *Body) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 586e1febfe27..8efe608a14eb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -599,7 +599,14 @@ RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, CIRGenCallee callee = buildCallee(E->getCallee()); - assert(!callee.isBuiltin() && "builtins NYI"); + bool emitBuiltinAsCall = false; + if (callee.isBuiltin()) { + auto rv = buildBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), + E, ReturnValue, emitBuiltinAsCall); + if (!emitBuiltinAsCall) + return rv; + } + assert(!callee.isPsuedoDestructor() && "NYI"); return buildCall(E->getCallee()->getType(), callee, E, ReturnValue); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index af737c18ea08..40de73962f52 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -582,6 +582,8 @@ class CIRGenFunction { CIRGenTypes &getTypes() const { return CGM.getTypes(); } + const TargetInfo &getTarget() const { return CGM.getTarget(); } + /// Helpers to convert Clang's SourceLocation to a MLIR Location. mlir::Location getLoc(clang::SourceLocation SLoc); @@ -767,6 +769,8 @@ class CIRGenFunction { mlir::LogicalResult buildFunctionBody(const clang::Stmt *Body); mlir::LogicalResult buildCoroutineBody(const CoroutineBodyStmt &S); + RValue buildCoroutineIntrinsic(const CallExpr *E, unsigned int IID); + // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. mlir::LogicalResult buildStmt(const clang::Stmt *S, bool useCurrentScope); @@ -932,6 +936,12 @@ class CIRGenFunction { LValue buildCompoundAssignmentLValue(const clang::CompoundAssignOperator *E); LValue buildUnaryOpLValue(const clang::UnaryOperator *E); LValue buildStringLiteralLValue(const StringLiteral *E); + RValue buildBuiltinExpr(const clang::GlobalDecl GD, unsigned BuiltinID, + const clang::CallExpr *E, ReturnValueSlot ReturnValue, + bool &emitAsCall); + mlir::Value buildTargetBuiltinExpr(unsigned BuiltinID, + const clang::CallExpr *E, + ReturnValueSlot ReturnValue); /// Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 436b5da9388c..92fe8fc1deac 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -10,6 +10,7 @@ include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR + CIRGenBuiltin.cpp CIRGenCXX.cpp CIRGenCXXABI.cpp CIRGenCall.cpp From d1f2cfd45061cd4224f38bef68862ba17bb1793e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 17 Nov 2022 11:37:28 -0800 Subject: [PATCH 0691/2301] [CIR][CIRGen] Emit some __builtin_coro as CIR function calls --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 19 ++++++++++--------- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 16 +++++++++------- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 10 +++------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 ++-- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- 5 files changed, 25 insertions(+), 26 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 95ce4ba0a28e..d7b32155fc5e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -35,14 +35,9 @@ using namespace llvm; RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue, - bool &emitAsCall) { + ReturnValueSlot ReturnValue) { const FunctionDecl *FD = GD.getDecl()->getAsFunction(); - // This is used as fallback mechanism for re-emitting selected built-ins as - // regular function calls. - emitAsCall = false; - // See if we can constant fold this builtin. If so, don't emit it at all. // TODO: Extend this handling to all builtin calls that we can constant-fold. Expr::EvalResult Result; @@ -104,9 +99,15 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_coro_frame: case Builtin::BI__builtin_coro_free: case Builtin::BI__builtin_coro_size: { - // Inform the caller we rather be emitted as regular function calls - emitAsCall = true; - return RValue::getIgnored(); + GlobalDecl gd{FD}; + mlir::Type ty = CGM.getTypes().GetFunctionType( + CGM.getTypes().arrangeGlobalDeclaration(GD)); + const auto *ND = cast(GD.getDecl()); + auto fnOp = + CGM.GetOrCreateCIRFunction(ND->getName(), ty, gd, /*ForVTable=*/false, + /*DontDefer=*/false); + return buildCall(E->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), + E, ReturnValue); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 3d870e297f8a..3ddb5210354e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -292,16 +292,18 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); + // This is not always tied to a FunctionDecl (e.g. builtins that are xformed + // into calls to other functions) const FunctionDecl *FD = dyn_cast_or_null(TargetDecl); - assert(FD && "Only functiondecl supported so far"); + // We can only guarantee that a function is called from the correct // context/function based on the appropriate target attributes, so only check // in hte case where we have both always_inline and target since otherwise we // could be making a conditional call after a check for the proper cpu // features (and it won't cause code generation issues due to function based // code generation). - assert(!TargetDecl->hasAttr() && "NYI"); - assert(!TargetDecl->hasAttr() && "NYI"); + assert((!TargetDecl || !TargetDecl->hasAttr()) && "NYI"); + assert((!TargetDecl || !TargetDecl->hasAttr()) && "NYI"); // Some architectures (such as x86-64) have the ABI changed based on // attribute-target/features. Give them a chance to diagnose. @@ -396,7 +398,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // TODO: Update the largest vector width if any arguments have vector types. // TODO: Compute the calling convention and attributes. - assert(!FD->hasAttr() && "NYI"); + assert((!FD || !FD->hasAttr()) && "NYI"); // TODO: InNoMergeAttributedStmt // assert(!CurCodeDecl->hasAttr() && @@ -409,7 +411,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // TODO: UnusedReturnSizePtr - assert(!FD->hasAttr() && "NYI"); + assert((!FD || !FD->hasAttr()) && "NYI"); // TODO: alignment attributes @@ -434,11 +436,11 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, assert(!CGM.getLangOpts().ObjCAutoRefCount && "Not supported"); - assert(!TargetDecl->hasAttr() && "NYI"); + assert((!TargetDecl || !TargetDecl->hasAttr()) && "NYI"); assert(!getDebugInfo() && "No debug info yet"); - assert(!TargetDecl->hasAttr() && "NYI"); + assert((!TargetDecl || !TargetDecl->hasAttr()) && "NYI"); // 4. Finish the call. diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 8efe608a14eb..0eebe94df2b0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -599,13 +599,9 @@ RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, CIRGenCallee callee = buildCallee(E->getCallee()); - bool emitBuiltinAsCall = false; - if (callee.isBuiltin()) { - auto rv = buildBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), - E, ReturnValue, emitBuiltinAsCall); - if (!emitBuiltinAsCall) - return rv; - } + if (callee.isBuiltin()) + return buildBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), E, + ReturnValue); assert(!callee.isPsuedoDestructor() && "NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 40de73962f52..69d4cef7361a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -937,8 +937,8 @@ class CIRGenFunction { LValue buildUnaryOpLValue(const clang::UnaryOperator *E); LValue buildStringLiteralLValue(const StringLiteral *E); RValue buildBuiltinExpr(const clang::GlobalDecl GD, unsigned BuiltinID, - const clang::CallExpr *E, ReturnValueSlot ReturnValue, - bool &emitAsCall); + const clang::CallExpr *E, + ReturnValueSlot ReturnValue); mlir::Value buildTargetBuiltinExpr(unsigned BuiltinID, const clang::CallExpr *E, ReturnValueSlot ReturnValue); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 98b862651dd5..337712e52e08 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -390,7 +390,6 @@ class CIRGenModule { void addReplacement(StringRef Name, mlir::Operation *Op); -private: // TODO: CodeGen also passes an AttributeList here. We'll have to match that // in CIR mlir::cir::FuncOp @@ -404,6 +403,7 @@ class CIRGenModule { mlir::FunctionType Ty, const clang::FunctionDecl *FD); +private: // An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector MangledDeclNames; llvm::StringMap Manglings; From e0d6adbeaa4e85f702434b659bbe2bd377e11511 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 29 Nov 2022 18:36:44 -0800 Subject: [PATCH 0692/2301] [CIR][CIRGen] Fix AwaitOp::build to account for insertion points sanity --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 29 +++++++++++++++---------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 51c8c89a19c9..912bddfb5501 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1363,19 +1363,26 @@ void AwaitOp::build(OpBuilder &builder, OperationState &result, function_ref readyBuilder, function_ref suspendBuilder, function_ref resumeBuilder) { - OpBuilder::InsertionGuard guard(builder); - - Region *readyRegion = result.addRegion(); - builder.createBlock(readyRegion); - readyBuilder(builder, result.location); + { + OpBuilder::InsertionGuard guard(builder); + Region *readyRegion = result.addRegion(); + builder.createBlock(readyRegion); + readyBuilder(builder, result.location); + } - Region *suspendRegion = result.addRegion(); - builder.createBlock(suspendRegion); - suspendBuilder(builder, result.location); + { + OpBuilder::InsertionGuard guard(builder); + Region *suspendRegion = result.addRegion(); + builder.createBlock(suspendRegion); + suspendBuilder(builder, result.location); + } - Region *resumeRegion = result.addRegion(); - builder.createBlock(resumeRegion); - resumeBuilder(builder, result.location); + { + OpBuilder::InsertionGuard guard(builder); + Region *resumeRegion = result.addRegion(); + builder.createBlock(resumeRegion); + resumeBuilder(builder, result.location); + } } /// Given the region at `index`, or the parent operation if `index` is None, From 03142dbe47db37a36f97ab36a83ad4164c460420 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 30 Nov 2022 14:39:09 -0800 Subject: [PATCH 0693/2301] [CIR][CIRGen][NFC] Add more asserts, initialize some data and add missing API --- clang/lib/CIR/CodeGen/Address.h | 6 ++++-- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 15 ++++++++++----- clang/lib/CIR/CodeGen/CIRGenCall.h | 5 +++++ 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 5a9de3098327..5490503f2271 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -35,10 +35,12 @@ class Address { Address(mlir::Value pointer, mlir::Type elementType, clang::CharUnits alignment) : Pointer(pointer), ElementType(elementType), Alignment(alignment) { + auto ptrTy = pointer.getType().dyn_cast(); + assert(ptrTy && "Expected cir.ptr type"); + assert(pointer != nullptr && "Pointer cannot be null"); assert(elementType != nullptr && "Pointer cannot be null"); - assert(pointer.getType().cast().getPointee() == - ElementType && + assert(ptrTy.getPointee() == ElementType && "Incorrect pointer element type"); assert(!alignment.isZero() && "Alignment cannot be zero"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 3ddb5210354e..9bd425ec00ef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -80,11 +80,11 @@ class ClangToCIRArgMapping { /// Arguments of CIR function corresponding to single Clang argument. struct CIRArgs { - unsigned PaddingArgIndex; + unsigned PaddingArgIndex = 0; // Argument is expanded to CIR arguments at positions // [FirstArgIndex, FirstArgIndex + NumberOfArgs). - unsigned FirstArgIndex; - unsigned NumberOfArgs; + unsigned FirstArgIndex = 0; + unsigned NumberOfArgs = 0; CIRArgs() : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), @@ -156,6 +156,8 @@ void ClangToCIRArgMapping::construct(const ASTContext &Context, // TODO(cir): we might not want to break it this early, revisit this // once we have a better ABI lowering story. CIRArgs.NumberOfArgs = STy.getMembers().size(); + assert(CIRArgs.NumberOfArgs == 1 && + "Initial CIR codegen is not the place to split arguments"); } else { CIRArgs.NumberOfArgs = 1; } @@ -281,6 +283,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, const CallArgList &CallArgs, mlir::cir::CallOp *callOrInvoke, bool IsMustTail, SourceLocation Loc) { + auto builder = CGM.getBuilder(); // FIXME: We no longer need the types from CallArgs; lift up and simplify assert(Callee.isOrdinary() || Callee.isVirtual()); @@ -331,6 +334,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // When passing arguments using temporary allocas, we need to add the // appropriate lifetime markers. This vector keeps track of all the lifetime // markers that need to be ended right after the call. + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); // Translate all of the arguments as necessary to match the CIR lowering. assert(CallInfo.arg_size() == CallArgs.size() && @@ -417,8 +421,9 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // Emit the actual call op. auto callLoc = CGM.getLoc(Loc); - auto theCall = CGM.getBuilder().create(callLoc, CalleePtr, - CIRCallArgs); + assert(builder.getInsertionBlock() && "expected valid basic block"); + auto theCall = + builder.create(callLoc, CalleePtr, CIRCallArgs); if (callOrInvoke) callOrInvoke = &theCall; diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 278f9b6821ea..25154afb92c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -172,6 +172,11 @@ struct CallArg { } bool hasLValue() const { return HasLV; } + LValue getKnownLValue() const { + assert(HasLV && !IsUsed); + return LV; + } + RValue getKnownRValue() const { assert(!HasLV && !IsUsed); return RV; From 5ecd9c2c89135a8fb695080cdbcf9c04b6b66463 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 30 Nov 2022 15:41:37 -0800 Subject: [PATCH 0694/2301] [CIR][CIRGen][NFC] Augment builder with helper for element bitcast --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 34 ++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 431108c1dfcd..d8e5e059dd81 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -9,6 +9,8 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H #define LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H +#include "Address.h" + #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/FPEnv.h" @@ -27,6 +29,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { public: CIRGenBuilderTy(mlir::MLIRContext &C) : mlir::OpBuilder(&C) {} + // + // Floating point specific helpers + // ------------------------------- + // + /// Enable/Disable use of constrained floating point math. When enabled the /// CreateF() calls instead create constrained floating point intrinsic /// calls. Fast math flags are unaffected by this setting. @@ -73,12 +80,33 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return DefaultConstrainedRounding; } - mlir::Value CreateFPExt(mlir::Value V, mlir::Type DestType) { + // + // Operation creation helpers + // -------------------------- + // + + mlir::Value createFPExt(mlir::Value v, mlir::Type destType) { if (getIsFPConstrained()) llvm_unreachable("constrainedfp NYI"); - return create(V.getLoc(), DestType, - mlir::cir::CastKind::floating, V); + return create(v.getLoc(), destType, + mlir::cir::CastKind::floating, v); + } + + cir::Address createElementBitCast(mlir::Location loc, cir::Address addr, + mlir::Type destType) { + if (destType == addr.getElementType()) + return addr; + + auto newPtrType = mlir::cir::PointerType::get(getContext(), destType); + auto cast = create( + loc, newPtrType, mlir::cir::CastKind::bitcast, addr.getPointer()); + return Address(cast, addr.getElementType(), addr.getAlignment()); + } + + mlir::Value createLoad(mlir::Location loc, Address addr) { + return create(loc, addr.getElementType(), + addr.getPointer()); } }; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index a97d03aae031..b0dec564b908 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1183,7 +1183,7 @@ mlir::Value ScalarExprEmitter::buildScalarCast( auto FloatSrcTy = SrcElementTy.cast(); if (FloatDstTy.getWidth() < FloatSrcTy.getWidth()) llvm_unreachable("truncation NYI"); - return Builder.CreateFPExt(Src, DstTy); + return Builder.createFPExt(Src, DstTy); } LValue From 6d34129642e498307055121c8f3bdb682085486d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 30 Nov 2022 16:42:49 -0800 Subject: [PATCH 0695/2301] [CIR][CIRGen][NFC] Handle coerced types, necessary for handling coroutine promisses Test will be added together with coawait testing soon. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 61 +++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 9bd425ec00ef..535516b02511 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -277,6 +277,14 @@ void CIRGenFunction::buildAggregateStore(mlir::Value Val, Address Dest, builder.create(*currSrcLoc, Val, Dest.getPointer()); } +static Address emitAddressAtOffset(CIRGenFunction &CGF, Address addr, + const ABIArgInfo &info) { + if (unsigned offset = info.getDirectOffset()) { + llvm_unreachable("NYI"); + } + return addr; +} + RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, @@ -379,7 +387,58 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, CIRCallArgs[FirstCIRArg] = V; break; } - assert(false && "this code path shouldn't be hit yet"); + + // FIXME: Avoid the conversion through memory if possible. + Address Src = Address::invalid(); + if (!I->isAggregate()) { + llvm_unreachable("NYI"); + } else { + Src = I->hasLValue() ? I->getKnownLValue().getAddress() + : I->getKnownRValue().getAggregateAddress(); + } + + // If the value is offset in memory, apply the offset now. + Src = emitAddressAtOffset(*this, Src, ArgInfo); + + // Fast-isel and the optimizer generally like scalar values better than + // FCAs, so we flatten them if this is safe to do for this argument. + auto STy = dyn_cast(ArgInfo.getCoerceToType()); + if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { + auto SrcTy = Src.getElementType(); + // FIXME(cir): get proper location for each argument. + auto argLoc = CGM.getLoc(Loc); + + // If the source type is smaller than the destination type of the + // coerce-to logic, copy the source value into a temp alloca the size + // of the destination type to allow loading all of it. The bits past + // the source value are left undef. + // FIXME(cir): add data layout info and compare sizes instead of + // matching the types. + // + // uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); + // uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); + // if (SrcSize < DstSize) { + if (SrcTy != STy) + llvm_unreachable("NYI"); + else { + // FIXME(cir): this currently only runs when the types are different, + // but should be when alloc sizes are different, fix this as soon as + // datalayout gets introduced. + Src = builder.createElementBitCast(argLoc, Src, STy); + } + + assert(NumCIRArgs == STy.getMembers().size()); + // In LLVMGen: Still only pass the struct without any gaps but mark it + // as such somehow. In CIRGen: Emit a load from the "whole" struct, + // which shall be broken later by some lowering step into multiple + // loads. + assert(STy.getMembers().size() == 1 && "dont break up arguments here!"); + CIRCallArgs[FirstCIRArg] = builder.createLoad(argLoc, Src); + } else { + llvm_unreachable("NYI"); + } + + break; } default: assert(false && "Only Direct support so far"); From c36f42a930c8688f4246d8ce0afc127104972985 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 30 Nov 2022 16:53:03 -0800 Subject: [PATCH 0696/2301] [CIR][CIRGen] Add emission of cir.await, effectively implementig the core of CoawaitExpr handling Tests: still need coreturn to be implemented before we can add tests for coawait --- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 238 ++++++++++++------ clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 + .../CodeGen/UnimplementedFeatureGuarding.h | 3 + 5 files changed, 170 insertions(+), 82 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index bb6d68fad5cf..0d205f671a6a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -18,58 +18,21 @@ using namespace clang; using namespace cir; +namespace { +enum class AwaitKind { Init, Normal, Yield, Final }; +} // namespace struct cir::CGCoroData { // What is the current await expression kind and how many // await/yield expressions were encountered so far. // These are used to generate pretty labels for await expressions in LLVM IR. - // AwaitKind CurrentAwaitKind = AwaitKind::Init; - // unsigned AwaitNum = 0; - // unsigned YieldNum = 0; + AwaitKind CurrentAwaitKind = AwaitKind::Init; // How many co_return statements are in the coroutine. Used to decide whether // we need to add co_return; equivalent at the end of the user authored body. unsigned CoreturnCount = 0; - // A branch to this block is emitted when coroutine needs to suspend. - // llvm::BasicBlock *SuspendBB = nullptr; - // The promise type's 'unhandled_exception' handler, if it defines one. Stmt *ExceptionHandler = nullptr; - - // A temporary i1 alloca that stores whether 'await_resume' threw an - // exception. If it did, 'true' is stored in this variable, and the coroutine - // body must be skipped. If the promise type does not define an exception - // handler, this is null. - // llvm::Value *ResumeEHVar = nullptr; - - // Stores the jump destination just before the coroutine memory is freed. - // This is the destination that every suspend point jumps to for the cleanup - // branch. - // CodeGenFunction::JumpDest CleanupJD; - - // Stores the jump destination just before the final suspend. The co_return - // statements jumps to this point after calling return_xxx promise member. - // CodeGenFunction::JumpDest FinalJD; - - // Stores the llvm.coro.id emitted in the function so that we can supply it - // as the first argument to coro.begin, coro.alloc and coro.free intrinsics. - // Note: llvm.coro.id returns a token that cannot be directly expressed in a - // builtin. - // llvm::CallInst *CoroId = nullptr; - - // Stores the llvm.coro.begin emitted in the function so that we can replace - // all coro.frame intrinsics with direct SSA value of coro.begin that returns - // the address of the coroutine frame of the current coroutine. - // llvm::CallInst *CoroBegin = nullptr; - - // Stores the last emitted coro.free for the deallocate expressions, we use it - // to wrap dealloc code with if(auto mem = coro.free) dealloc(mem). - // llvm::CallInst *LastCoroFree = nullptr; - - // If coro.id came from the builtin, remember the expression to give better - // diagnostic. If CoroIdExpr is nullptr, the coro.id was created by - // EmitCoroutineBody. - CallExpr const *CoroIdExpr = nullptr; }; // Defining these here allows to keep CGCoroData private to this file. @@ -79,22 +42,12 @@ CIRGenFunction::CGCoroInfo::~CGCoroInfo() {} static void createCoroData(CIRGenFunction &CGF, CIRGenFunction::CGCoroInfo &CurCoro) { if (CurCoro.Data) { - // if (CurCoro.Data->CoroIdExpr) - // CGF.CGM.Error(CoroIdExpr->getBeginLoc(), - // "only one __builtin_coro_id can be used in a function"); - // else if (CoroIdExpr) - // CGF.CGM.Error(CoroIdExpr->getBeginLoc(), - // "__builtin_coro_id shall not be used in a C++ - // coroutine"); - // else llvm_unreachable("EmitCoroutineBodyStatement called twice?"); return; } CurCoro.Data = std::unique_ptr(new CGCoroData); - // CurCoro.Data->CoroId = CoroId; - // CurCoro.Data->CoroIdExpr = CoroIdExpr; } namespace { @@ -168,13 +121,13 @@ static mlir::LogicalResult buildBodyAndFallthrough(CIRGenFunction &CGF, Stmt *Body) { if (CGF.buildStmt(Body, /*useCurrentScope=*/true).failed()) return mlir::failure(); - // From LLVM codegen: - // const bool CanFallthrough = CGF.Builder.GetInsertBlock(); - if (S.getFallthroughHandler()) { - llvm_unreachable("NYI"); - // if (Stmt *OnFallthrough = S.getFallthroughHandler()) - // CGF.buildStmt(OnFallthrough, /*useCurrentScope=*/true); - } + // LLVM codegen checks if a insert basic block is available in order + // to decide whether to getFallthroughHandler, sounds like it should + // be an assert, not clear. For CIRGen solely rely on getFallthroughHandler. + if (Stmt *OnFallthrough = S.getFallthroughHandler()) + if (CGF.buildStmt(OnFallthrough, /*useCurrentScope=*/true).failed()) + return mlir::failure(); + return mlir::success(); } @@ -191,7 +144,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { llvm_unreachable("NYI"); { - // FIXME: create a new scope to copy out the params? + // FIXME(cir): create a new scope to copy out the params? // LLVM create scope cleanups here, but might be due to the use // of many basic blocks? assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); @@ -218,13 +171,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { if (buildStmt(S.getPromiseDeclStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - // Address promiseAddr = GetAddrOfLocalVar(S.getPromiseDecl()); - // auto *PromiseAddrVoidPtr = - // new llvm::BitCastInst(promiseAddr.getPointer(), VoidPtrTy, "", - // CoroId); - // // Update CoroId to refer to the promise. We could not do it earlier - // // because promise local variable was not emitted yet. - // CoroId->setArgOperand(1, PromiseAddrVoidPtr); + // FIXME(cir): handle promiseAddr and coro id related stuff? // ReturnValue should be valid as long as the coroutine's return type // is not void. The assertion could help us to reduce the check later. @@ -244,22 +191,18 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { /*IsInit*/ true); } - // EHStack.pushCleanup(EHCleanup); - - // CurCoro.Data->CurrentAwaitKind = AwaitKind::Init; - // CurCoro.Data->ExceptionHandler = S.getExceptionHandler(); + // FIXME(cir): EHStack.pushCleanup(EHCleanup); + CurCoro.Data->CurrentAwaitKind = AwaitKind::Init; if (buildStmt(S.getInitSuspendStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - // CurCoro.Data->FinalJD = getJumpDestInCurrentScope(FinalBB); - // CurCoro.Data->CurrentAwaitKind = AwaitKind::Normal; + CurCoro.Data->CurrentAwaitKind = AwaitKind::Normal; - if (S.getExceptionHandler()) { - llvm_unreachable("NYI"); - } else { - if (buildBodyAndFallthrough(*this, S, S.getBody()).failed()) - return mlir::failure(); - } + // FIXME(cir): wrap buildBodyAndFallthrough with try/catch bits. + if (S.getExceptionHandler()) + assert(!UnimplementedFeature::unhandledException() && "NYI"); + if (buildBodyAndFallthrough(*this, S, S.getBody()).failed()) + return mlir::failure(); // See if we need to generate final suspend. // const bool CanFallthrough = Builder.GetInsertBlock(); @@ -268,10 +211,145 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { const bool CanFallthrough = false; const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0; if (CanFallthrough || HasCoreturns) { - // CurCoro.Data->CurrentAwaitKind = AwaitKind::Final; + CurCoro.Data->CurrentAwaitKind = AwaitKind::Final; if (buildStmt(S.getFinalSuspendStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); } } return mlir::success(); -} \ No newline at end of file +} + +static bool memberCallExpressionCanThrow(const Expr *E) { + if (const auto *CE = dyn_cast(E)) + if (const auto *Proto = + CE->getMethodDecl()->getType()->getAs()) + if (isNoexceptExceptionSpec(Proto->getExceptionSpecType()) && + Proto->canThrow() == CT_Cannot) + return false; + return true; +} + +// Given a suspend expression which roughly looks like: +// +// auto && x = CommonExpr(); +// if (!x.await_ready()) { +// x.await_suspend(...); (*) +// } +// x.await_resume(); +// +// where the result of the entire expression is the result of x.await_resume() +// +// (*) If x.await_suspend return type is bool, it allows to veto a suspend: +// if (x.await_suspend(...)) +// llvm_coro_suspend(); +// +// This is more higher level than LLVM codegen, for that one see llvm's +// docs/Coroutines.rst for more details. +namespace { +struct LValueOrRValue { + LValue LV; + RValue RV; +}; +} // namespace +static LValueOrRValue buildSuspendExpression( + CIRGenFunction &CGF, CGCoroData &Coro, CoroutineSuspendExpr const &S, + AwaitKind Kind, AggValueSlot aggSlot, bool ignoreResult, bool forLValue) { + auto *E = S.getCommonExpr(); + + auto awaitBuild = mlir::success(); + LValueOrRValue awaitRes; + + auto Binder = + CIRGenFunction::OpaqueValueMappingData::bind(CGF, S.getOpaqueValue(), E); + auto UnbindOnExit = llvm::make_scope_exit([&] { Binder.unbind(CGF); }); + auto &builder = CGF.getBuilder(); + + LLVM_ATTRIBUTE_UNUSED auto awaitOp = builder.create( + CGF.getLoc(S.getSourceRange()), + /*readyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto *cond = S.getReadyExpr(); + cond = cond->IgnoreParens(); + mlir::Value condV = CGF.evaluateExprAsBool(cond); + + if (!condV) { + awaitBuild = mlir::failure(); + return; + } + + // If expression is ready, no need to suspend. + builder.create(loc, condV); + }, + /*suspendBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // A invalid suspendRet indicates "void returning await_suspend" + auto suspendRet = CGF.buildScalarExpr(S.getSuspendExpr()); + + // Veto suspension if requested by bool returning await_suspend. + if (suspendRet) { + // From LLVM codegen: + // if (SuspendRet != nullptr && SuspendRet->getType()->isIntegerTy(1)) + llvm_unreachable("NYI"); + } + + auto alwaysSuspend = b.create( + loc, mlir::cir::BoolType::get(b.getContext()), b.getBoolAttr(true)); + builder.create(loc, + mlir::ValueRange{alwaysSuspend}); + }, + /*resumeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // Exception handling requires additional IR. If the 'await_resume' + // function is marked as 'noexcept', we avoid generating this additional + // IR. + CXXTryStmt *TryStmt = nullptr; + if (Coro.ExceptionHandler && Kind == AwaitKind::Init && + memberCallExpressionCanThrow(S.getResumeExpr())) { + llvm_unreachable("NYI"); + } + + // FIXME(cir): the alloca for the resume expr should be placed in the + // enclosing cir.scope instead. + if (forLValue) + awaitRes.LV = CGF.buildLValue(S.getResumeExpr()); + else + awaitRes.RV = + CGF.buildAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult); + + if (TryStmt) { + llvm_unreachable("NYI"); + } + }); + + assert(awaitBuild.succeeded() && "Should know how to codegen"); + return awaitRes; +} + +RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, + AggValueSlot aggSlot, + bool ignoreResult) { + RValue rval; + auto scopeLoc = getLoc(E.getSourceRange()); + builder.create( + scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // FIXME(cir): abstract all this massive location handling elsewhere. + SmallVector locs; + if (loc.isa()) { + locs.push_back(loc); + locs.push_back(loc); + } else if (loc.isa()) { + auto fusedLoc = loc.cast(); + locs.push_back(fusedLoc.getLocations()[0]); + locs.push_back(fusedLoc.getLocations()[1]); + } + LexicalScopeContext lexScope{locs[0], locs[1], + builder.getInsertionBlock()}; + LexicalScopeGuard lexScopeGuard{*this, &lexScope}; + rval = buildSuspendExpression(*this, *CurCoro.Data, E, + CurCoro.Data->CurrentAwaitKind, aggSlot, + ignoreResult, /*forLValue*/ false) + .RV; + }); + return rval; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index b968b5fe401b..d42e29b6e899 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -62,7 +62,9 @@ class AggExprEmitter : public StmtVisitor { void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { llvm_unreachable("NYI"); } - void VisitCoawaitExpr(CoawaitExpr *E) { llvm_unreachable("NYI"); } + void VisitCoawaitExpr(CoawaitExpr *E) { + CGF.buildCoawaitExpr(*E, Dest, IsResultUnused); + } void VisitCoyieldExpr(CoyieldExpr *E) { llvm_unreachable("NYI"); } void VisitUnaryCoawait(UnaryOperator *E) { llvm_unreachable("NYI"); } void VisitUnaryExtension(UnaryOperator *E) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index b0dec564b908..5bd5badc83af 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -76,7 +76,9 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *GE) { llvm_unreachable("NYI"); } - mlir::Value VisitCoawaitExpr(CoawaitExpr *S) { llvm_unreachable("NYI"); } + mlir::Value VisitCoawaitExpr(CoawaitExpr *S) { + return CGF.buildCoawaitExpr(*S).getScalarVal(); + } mlir::Value VisitCoyieldExpr(CoyieldExpr *S) { llvm_unreachable("NYI"); } mlir::Value VisitUnaryCoawait(const UnaryOperator *E) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 69d4cef7361a..128c4fcf3b60 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -769,6 +769,9 @@ class CIRGenFunction { mlir::LogicalResult buildFunctionBody(const clang::Stmt *Body); mlir::LogicalResult buildCoroutineBody(const CoroutineBodyStmt &S); + RValue buildCoawaitExpr(const CoawaitExpr &E, + AggValueSlot aggSlot = AggValueSlot::ignored(), + bool ignoreResult = false); RValue buildCoroutineIntrinsic(const CallExpr *E, unsigned int IID); // Build CIR for a statement. useCurrentScope should be true if no diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index e2a881a1f4cf..640e01afa2bd 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -46,6 +46,9 @@ struct UnimplementedFeature { // Debug info static bool generateDebugInfo() { return false; } + // Coroutines + static bool unhandledException() { return false; } + static bool capturedByInit() { return false; } static bool getASTAllocaAddressSpace() { return false; } static bool tryEmitAsConstant() { return false; } From 629f6f448b239ac80460c9121a51afcbcafdcbe1 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 6 Nov 2022 20:05:15 -0500 Subject: [PATCH 0697/2301] [CIR][Lowering] Add CIRToMLIR pass Add a pass that lowers out of CIR and into MLIR. We've been operating up until now via lowering individual instructions at the time, but that seems pointless to lower without a concrete target. So just restructure to lower monolithically here via a whole CIR->MLIR pass. Also, name pending. `MLIR` is pretty ambiguous, but there isn't a proper term for dialects within mlir. --- clang/include/clang/CIR/Passes.h | 3 ++ clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 65 +++++++++++++++++++++++++++ clang/tools/cir-tool/cir-tool.cpp | 4 ++ 3 files changed, 72 insertions(+) diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index ba4d79e88ad5..46741917ccaa 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -29,6 +29,9 @@ std::unique_ptr createConvertCIRToLLVMPass(); /// Create a pass that only lowers a subset of `CIR` memref-like operations to /// MemRef specific versions. std::unique_ptr createConvertCIRToMemRefPass(); + +/// Create a pass that fully lowers CIR to the MLIR in-tree dialects. +std::unique_ptr createConvertCIRToMLIRPass(); } // end namespace cir #endif // CLANG_CIR_PASSES_H diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index 570f392028f0..496c6ffa8dfb 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -92,6 +92,19 @@ class CIRReturnLowering } }; +struct ConvertCIRToMLIRPass + : public mlir::PassWrapper> { + void getDependentDialects(mlir::DialectRegistry ®istry) const override { + registry.insert(); + } + void runOnOperation() final; + + virtual StringRef getArgument() const override { return "cir-to-mlir"; } +}; + class CIRCallLowering : public mlir::OpRewritePattern { public: using OpRewritePattern::OpRewritePattern; @@ -537,6 +550,15 @@ void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { CIRCmpOpLowering, CIRBrOpLowering>(patterns.getContext()); } +void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, + mlir::TypeConverter &converter) { + patterns.add(patterns.getContext()); + patterns.add(converter, patterns.getContext()); +} + mlir::TypeConverter prepareTypeConverter() { mlir::TypeConverter converter; converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { @@ -548,6 +570,26 @@ mlir::TypeConverter prepareTypeConverter() { return converter; } +void ConvertCIRToMLIRPass::runOnOperation() { + auto module = getOperation(); + + auto converter = prepareTypeConverter(); + + mlir::RewritePatternSet patterns(&getContext()); + + populateCIRToMLIRConversionPatterns(patterns, converter); + + mlir::ConversionTarget target(getContext()); + target.addLegalOp(); + target.addLegalDialect(); + target.addIllegalDialect(); + + if (failed(applyPartialConversion(module, target, std::move(patterns)))) + signalPassFailure(); +} + void ConvertCIRToLLVMPass::runOnOperation() { mlir::LLVMConversionTarget target(getContext()); target.addLegalOp(); @@ -657,4 +699,27 @@ std::unique_ptr createConvertCIRToFuncPass() { return std::make_unique(); } +std::unique_ptr createConvertCIRToMLIRPass() { + return std::make_unique(); +} + +mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, + std::unique_ptr mlirCtx, + LLVMContext &llvmCtx) { + mlir::PassManager pm(mlirCtx.get()); + + pm.addPass(createConvertCIRToMLIRPass()); + + auto result = !mlir::failed(pm.run(theModule)); + if (!result) + report_fatal_error( + "The pass manager failed to lower CIR to LLVMIR dialect!"); + + // Now that we ran all the lowering passes, verify the final output. + if (theModule.verify().failed()) + report_fatal_error("Verification of the final LLVMIR dialect failed!"); + + return theModule; +} + } // namespace cir diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index 2e23c06d648d..30937a18ba3c 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -43,6 +43,10 @@ int main(int argc, char **argv) { return mlir::createMergeCleanupsPass(); }); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return cir::createConvertCIRToMLIRPass(); + }); + mlir::registerTransformsPasses(); return failed(MlirOptMain( From 80dd8dda97c2cf7c2aa53130b37bf3690831f492 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 6 Nov 2022 20:07:10 -0500 Subject: [PATCH 0698/2301] [CIR][Lowering] Add a test for lowering via cir-to-mlir --- clang/test/CIR/CIRToLLVM/dot.cir | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 clang/test/CIR/CIRToLLVM/dot.cir diff --git a/clang/test/CIR/CIRToLLVM/dot.cir b/clang/test/CIR/CIRToLLVM/dot.cir new file mode 100644 index 000000000000..1c4efc11b832 --- /dev/null +++ b/clang/test/CIR/CIRToLLVM/dot.cir @@ -0,0 +1,29 @@ +// RUN: cir-tool %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s +// XFAIL: * + +module { + cir.func @dot(%arg0: !cir.ptr) -> i32 { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["x", init] {alignment = 8 : i64} + %1 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} + %2 = cir.alloca !cir.ptr, cir.ptr >, ["y", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, cir.ptr > + %3 = cir.load %0 : cir.ptr >, !cir.ptr + cir.store %3, %2 : !cir.ptr, cir.ptr > + %4 = cir.cst(0 : i32) : i32 + %5 = cir.load %1 : cir.ptr , i32 + cir.return %5 : i32 + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @dot(%arg0: memref) -> i32 { +// CHECK-NEXT: %alloca = memref.alloca() {alignment = 8 : i64} : memref> +// CHECK-NEXT: %alloca_0 = memref.alloca() {alignment = 4 : i64} : memref +// CHECK-NEXT: %alloca_1 = memref.alloca() {alignment = 8 : i64} : memref> +// CHECK-NEXT: memref.store %arg0, %alloca[] : memref> +// CHECK-NEXT: %0 = memref.load %alloca[] : memref> +// CHECK-NEXT: memref.store %0, %alloca_1[] : memref> +// CHECK-NEXT: %c0_i32 = arith.constant 0 : i32 +// CHECK-NEXT: %1 = memref.load %alloca_0[] : memref +// CHECK-NEXT: return %1 : i32 From 0b7def046d40dbe82e27ce815921df4d7ba9b5de Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 6 Nov 2022 21:31:59 -0500 Subject: [PATCH 0699/2301] [CIR] Support lowering to assembly via clang This is just a matter of hooking up a few points in the ExecuteCompilerInvocation and CIRGenAction. --- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 13 +++++++++++-- .../lib/FrontendTool/ExecuteCompilerInvocation.cpp | 10 ++++++++-- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 26580eeb094f..e48335a89961 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -248,9 +248,18 @@ class CIRGenConsumer : public clang::ASTConsumer { nullptr, std::move(outputStream)); break; } - case CIRGenAction::OutputType::EmitAssembly: - assert(false && "Not yet implemented"); + case CIRGenAction::OutputType::EmitAssembly: { + llvm::LLVMContext llvmCtx; + auto llvmModule = + lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); + + llvmModule->setTargetTriple(targetOptions.Triple); + emitBackendOutput(compilerInstance, codeGenOptions, + C.getTargetInfo().getDataLayoutString(), + llvmModule.get(), BackendAction::Backend_EmitAssembly, + nullptr, std::move(outputStream)); break; + } case CIRGenAction::OutputType::None: break; } diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 2ce55d1d41be..9248b67d9bbb 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -51,7 +51,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) { auto Act = CI.getFrontendOpts().ProgramAction; auto EmitsCIR = Act == EmitCIR || Act == EmitCIROnly; - auto IsImplementedCIROutput = EmitsCIR || Act == EmitLLVM || Act == EmitObj; + auto IsImplementedCIROutput = + EmitsCIR || Act == EmitLLVM || Act == EmitObj || Act == EmitAssembly; if (UseCIR && !IsImplementedCIROutput) llvm::report_fatal_error("-fenable currently only works with " @@ -69,7 +70,12 @@ CreateFrontendBaseAction(CompilerInstance &CI) { return std::make_unique(); case DumpRawTokens: return std::make_unique(); case DumpTokens: return std::make_unique(); - case EmitAssembly: return std::make_unique(); + case EmitAssembly: +#if CLANG_ENABLE_CIR + if (UseCIR) + return std::make_unique<::cir::EmitAssemblyAction>(); +#endif + return std::make_unique(); case EmitBC: return std::make_unique(); #if CLANG_ENABLE_CIR case EmitCIR: return std::make_unique<::cir::EmitCIRAction>(); From edfef78b0a0f8f001d4ad55b1f104977cac01c71 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 6 Nov 2022 21:34:38 -0500 Subject: [PATCH 0700/2301] [CIR] Support lowering to MLIR via CIRGen This adds support to the CIRGenAction to be able to lower from CIR to the in-tree MLIR dialects and emit that from clang's frontend. --- clang/include/clang/CIR/LowerToLLVM.h | 3 +++ .../clang/CIRFrontendAction/CIRGenAction.h | 16 ++++++++++++++- clang/include/clang/Driver/Options.td | 9 ++++++--- clang/include/clang/Driver/Types.def | 1 + .../include/clang/Frontend/FrontendOptions.h | 3 +++ clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 5 ++--- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 20 +++++++++++++++++-- clang/lib/Frontend/CompilerInvocation.cpp | 2 ++ .../ExecuteCompilerInvocation.cpp | 10 ++++++---- 9 files changed, 56 insertions(+), 13 deletions(-) diff --git a/clang/include/clang/CIR/LowerToLLVM.h b/clang/include/clang/CIR/LowerToLLVM.h index 139e3fc93aec..0365ea3d6a9c 100644 --- a/clang/include/clang/CIR/LowerToLLVM.h +++ b/clang/include/clang/CIR/LowerToLLVM.h @@ -33,6 +33,9 @@ std::unique_ptr lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, std::unique_ptr mlirCtx, llvm::LLVMContext &llvmCtx); + +mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, + mlir::MLIRContext *mlirCtx); } // namespace cir #endif // CLANG_CIR_LOWERTOLLVM_H_ diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIRFrontendAction/CIRGenAction.h index 4ac4ed1b5fcb..d61c90573ade 100644 --- a/clang/include/clang/CIRFrontendAction/CIRGenAction.h +++ b/clang/include/clang/CIRFrontendAction/CIRGenAction.h @@ -29,7 +29,14 @@ class CIRGenerator; class CIRGenAction : public clang::ASTFrontendAction { public: - enum class OutputType { EmitAssembly, EmitCIR, EmitLLVM, EmitObj, None }; + enum class OutputType { + EmitAssembly, + EmitCIR, + EmitLLVM, + EmitMLIR, + EmitObj, + None + }; private: friend class CIRGenConsumer; @@ -77,6 +84,13 @@ class EmitCIROnlyAction : public CIRGenAction { EmitCIROnlyAction(mlir::MLIRContext *mlirCtx = nullptr); }; +class EmitMLIRAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitMLIRAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + class EmitLLVMAction : public CIRGenAction { virtual void anchor(); diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 214bc5a0e604..7f5d93535709 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3038,10 +3038,13 @@ defm clangir : BoolFOption<"clangir", PosFlag, NegFlag LLVM pipeline to compile">, BothFlags<[], [ClangOption, CC1Option], "">>; -def emit_cir : Flag<["-"], "emit-cir">, Visibility<[ClangOption, CC1Option]>, - Group, HelpText<"Build ASTs and then lower to ClangIR">; +def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, + Group, + HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; def emit_cir_only : Flag<["-"], "emit-cir-only">, HelpText<"Build ASTs and convert to CIR, discarding output">; +def emit_mlir : Flag<["-"], "emit-mlir">, Visibility<[CC1Option]>, Group, + HelpText<"Build ASTs and then lower through ClangIR to MLIR, emit the .milr file">; defm cir_warnings : BoolFOption<"cir-warnings", LangOpts<"CIRWarnings">, DefaultFalse, PosFlag, @@ -7079,7 +7082,7 @@ defm analyzed_objects_for_unparse : OptOutFC1FFlag<"analyzed-objects-for-unparse def emit_fir : Flag<["-"], "emit-fir">, Group, HelpText<"Build the parse tree, then lower it to FIR">; -def emit_mlir : Flag<["-"], "emit-mlir">, Alias; +// def emit_mlir : Flag<["-"], "emit-mlir">, Alias; def emit_hlfir : Flag<["-"], "emit-hlfir">, Group, HelpText<"Build the parse tree, then lower it to HLFIR">; diff --git a/clang/include/clang/Driver/Types.def b/clang/include/clang/Driver/Types.def index 16dc446b1750..b461473786fd 100644 --- a/clang/include/clang/Driver/Types.def +++ b/clang/include/clang/Driver/Types.def @@ -100,6 +100,7 @@ TYPE("lto-ir", LTO_IR, INVALID, "s", phases TYPE("lto-bc", LTO_BC, INVALID, "o", phases::Compile, phases::Backend, phases::Assemble, phases::Link) TYPE("cir", CIR, INVALID, "cir", phases::Compile, phases::Backend, phases::Assemble, phases::Link) +TYPE("mlir", MLIR, INVALID, "mlir", phases::Compile, phases::Backend, phases::Assemble, phases::Link) // Misc. TYPE("ast", AST, INVALID, "ast", phases::Compile, phases::Backend, phases::Assemble, phases::Link) diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index bbee41db39f3..6fc8ab145309 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -71,6 +71,9 @@ enum ActionKind { /// Generate CIR, bud don't emit anything. EmitCIROnly, + /// Emit a .mlir file + EmitMLIR, + /// Emit a .ll file. EmitLLVM, diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index 496c6ffa8dfb..3376c33f81a8 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -704,9 +704,8 @@ std::unique_ptr createConvertCIRToMLIRPass() { } mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, - std::unique_ptr mlirCtx, - LLVMContext &llvmCtx) { - mlir::PassManager pm(mlirCtx.get()); + mlir::MLIRContext *mlirCtx) { + mlir::PassManager pm(mlirCtx); pm.addPass(createConvertCIRToMLIRPass()); diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index e48335a89961..8a56bafde40f 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -26,6 +26,7 @@ #include "clang/CIR/CIRToCIRPasses.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/LowerToLLVM.h" +#include "clang/CIR/Passes.h" #include "clang/CodeGen/BackendUtil.h" #include "clang/CodeGen/ModuleBuilder.h" #include "clang/Driver/DriverDiagnostic.h" @@ -226,22 +227,31 @@ class CIRGenConsumer : public clang::ASTConsumer { mlirMod->print(*outputStream, flags); } break; + case CIRGenAction::OutputType::EmitMLIR: { + auto loweredMlirModule = lowerFromCIRToMLIR(mlirMod, mlirCtx.get()); + assert(outputStream && "Why are we here without an output stream?"); + // FIXME: we cannot roundtrip prettyForm=true right now. + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(/*prettyForm=*/false); + loweredMlirModule->print(*outputStream, flags); + break; + } case CIRGenAction::OutputType::EmitLLVM: { llvm::LLVMContext llvmCtx; auto llvmModule = lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); + + llvmModule->setTargetTriple(targetOptions.Triple); if (outputStream) llvmModule->print(*outputStream, nullptr); break; } case CIRGenAction::OutputType::EmitObj: { - // TODO: Don't duplicate this from above llvm::LLVMContext llvmCtx; auto llvmModule = lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); llvmModule->setTargetTriple(targetOptions.Triple); - emitBackendOutput(compilerInstance, codeGenOptions, C.getTargetInfo().getDataLayoutString(), llvmModule.get(), BackendAction::Backend_EmitObj, @@ -317,6 +327,8 @@ getOutputStream(CompilerInstance &ci, StringRef inFile, return ci.createDefaultOutputFile(false, inFile, "s"); case CIRGenAction::OutputType::EmitCIR: return ci.createDefaultOutputFile(false, inFile, "cir"); + case CIRGenAction::OutputType::EmitMLIR: + return ci.createDefaultOutputFile(false, inFile, "mlir"); case CIRGenAction::OutputType::EmitLLVM: return ci.createDefaultOutputFile(false, inFile, "llvm"); case CIRGenAction::OutputType::EmitObj: @@ -411,6 +423,10 @@ void EmitCIROnlyAction::anchor() {} EmitCIROnlyAction::EmitCIROnlyAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::None, _MLIRContext) {} +void EmitMLIRAction::anchor() {} +EmitMLIRAction::EmitMLIRAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitMLIR, _MLIRContext) {} + void EmitLLVMAction::anchor() {} EmitLLVMAction::EmitLLVMAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::EmitLLVM, _MLIRContext) {} diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 0b757062ad76..3e6cea3c5843 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -2729,6 +2729,7 @@ static const auto &getFrontendActionTable() { {frontend::EmitBC, OPT_emit_llvm_bc}, {frontend::EmitCIR, OPT_emit_cir}, {frontend::EmitCIROnly, OPT_emit_cir_only}, + {frontend::EmitMLIR, OPT_emit_mlir}, {frontend::EmitHTML, OPT_emit_html}, {frontend::EmitLLVM, OPT_emit_llvm}, {frontend::EmitLLVMOnly, OPT_emit_llvm_only}, @@ -4623,6 +4624,7 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) { case frontend::EmitBC: case frontend::EmitCIR: case frontend::EmitCIROnly: + case frontend::EmitMLIR: case frontend::EmitHTML: case frontend::EmitLLVM: case frontend::EmitLLVMOnly: diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 9248b67d9bbb..ff7ffd985385 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -51,12 +51,13 @@ CreateFrontendBaseAction(CompilerInstance &CI) { auto Act = CI.getFrontendOpts().ProgramAction; auto EmitsCIR = Act == EmitCIR || Act == EmitCIROnly; - auto IsImplementedCIROutput = - EmitsCIR || Act == EmitLLVM || Act == EmitObj || Act == EmitAssembly; + auto IsImplementedCIROutput = EmitsCIR || Act == EmitLLVM || + Act == EmitMLIR || Act == EmitAssembly || + Act == EmitObj; if (UseCIR && !IsImplementedCIROutput) - llvm::report_fatal_error("-fenable currently only works with " - "-emit-cir, -emit-cir-only and -emit-llvm"); + llvm::report_fatal_error("-fclangir currently only works with -emit-cir, " + "-emit-cir-only, -emit-mlir, -emit-llvm and -S"); if (!UseCIR && EmitsCIR) llvm::report_fatal_error( "-emit-cir and -emit-cir-only only valid when using -fclangir"); @@ -80,6 +81,7 @@ CreateFrontendBaseAction(CompilerInstance &CI) { #if CLANG_ENABLE_CIR case EmitCIR: return std::make_unique<::cir::EmitCIRAction>(); case EmitCIROnly: return std::make_unique<::cir::EmitCIROnlyAction>(); + case EmitMLIR: return std::make_unique<::cir::EmitMLIRAction>(); #else case EmitCIR: case EmitCIROnly: From 5d407b43a66578b0c1176988f1f606a0f89270e2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 6 Nov 2022 21:35:30 -0500 Subject: [PATCH 0701/2301] [CIR] Have `lowerFromCIRToLLVMIR` use the new `CIRToMLIR` pass This used the separate passes prior. Switch to our new format that uses the new monolithic one. --- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index 3376c33f81a8..9a89b087f372 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -46,8 +46,7 @@ struct ConvertCIRToLLVMPass : public mlir::PassWrapper> { void getDependentDialects(mlir::DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } void runOnOperation() final; @@ -663,8 +662,7 @@ lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx) { mlir::PassManager pm(mlirCtx.get()); - pm.addPass(createConvertCIRToFuncPass()); - pm.addPass(createConvertCIRToMemRefPass()); + pm.addPass(createConvertCIRToMLIRPass()); pm.addPass(createConvertCIRToLLVMPass()); auto result = !mlir::failed(pm.run(theModule)); From 055ef64b3e1fea9b4c1ecb8c4fb04163e549dffe Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 6 Nov 2022 22:13:09 -0500 Subject: [PATCH 0702/2301] [CIR] Send LLVM output from CIRGen through clang's EmitBackendOutput This gets us a bunch of logic, most obvious is correct data layout signature for the LLVM module. This also fixes some issues with the symbol table that was being emitted to object files that broke linking on arm64 darwin. --- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 8a56bafde40f..2ff0026db0f0 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -242,8 +242,11 @@ class CIRGenConsumer : public clang::ASTConsumer { lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); llvmModule->setTargetTriple(targetOptions.Triple); - if (outputStream) - llvmModule->print(*outputStream, nullptr); + + emitBackendOutput(compilerInstance, codeGenOptions, + C.getTargetInfo().getDataLayoutString(), + llvmModule.get(), BackendAction::Backend_EmitLL, + nullptr, std::move(outputStream)); break; } case CIRGenAction::OutputType::EmitObj: { From e76840f702691d3999e5e86bfd870a0daab38dbe Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 6 Nov 2022 22:19:34 -0500 Subject: [PATCH 0703/2301] [CIR] Add a test to ensure `-emit-mlir` works --- clang/test/CIR/cc1.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/clang/test/CIR/cc1.c b/clang/test/CIR/cc1.c index 7314cc4263e7..9e1edce4a38c 100644 --- a/clang/test/CIR/cc1.c +++ b/clang/test/CIR/cc1.c @@ -1,3 +1,5 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-obj %s -o %t.o @@ -6,8 +8,13 @@ void foo() {} +// MLIR: func.func @foo() { +// MLIR-NEXT: return +// MLIR-NEXT: } + // LLVM: define void @foo() // LLVM-NEXT: ret void, // LLVM-NEXT: } // OBJ: 0: c3 retq + From 3aa386d08e43cb1114eb166441d82123f1e380a8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 6 Nov 2022 22:26:08 -0500 Subject: [PATCH 0704/2301] [CIR] Add a test to ensure `-S` works with -fclangir-enable --- clang/test/CIR/cc1.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clang/test/CIR/cc1.c b/clang/test/CIR/cc1.c index 9e1edce4a38c..ec489a027250 100644 --- a/clang/test/CIR/cc1.c +++ b/clang/test/CIR/cc1.c @@ -2,6 +2,8 @@ // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -S %s -o %t.s +// RUN: FileCheck --input-file=%t.s %s -check-prefix=ASM // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-obj %s -o %t.o // RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ // XFAIL: * @@ -16,5 +18,11 @@ void foo() {} // LLVM-NEXT: ret void, // LLVM-NEXT: } +// ASM: .globl foo +// ASM-NEXT: .p2align +// ASM-NEXT: .type foo,@function +// ASM-NEXT: foo: +// ASM: retq + // OBJ: 0: c3 retq From 6f88c16d592444c304b16029df9933735eef50df Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 5 Dec 2022 21:10:04 -0500 Subject: [PATCH 0705/2301] [CIR][Lowering][NFC] Rename CreateCIRToLLVMPass to CreateMLIRToLLVMPass This doesn't actually lower CIR at all anymore, so rename it accordingly. --- clang/include/clang/CIR/Passes.h | 2 +- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 14 +++++++------- clang/test/CIR/CIRToLLVM/array.cir | 2 +- clang/test/CIR/CIRToLLVM/binop-fp.cir | 2 +- clang/test/CIR/CIRToLLVM/binop-int.cir | 2 +- clang/test/CIR/CIRToLLVM/bool.cir | 2 +- clang/test/CIR/CIRToLLVM/cmp.cir | 2 +- clang/test/CIR/CIRToLLVM/goto.cir | 2 +- clang/test/CIR/CIRToLLVM/memref.cir | 2 +- clang/test/CIR/CIRToLLVM/unary-inc-dec.cir | 2 +- clang/test/CIR/CIRToLLVM/unary-plus-minus.cir | 2 +- clang/test/CIR/cirtool.cir | 2 +- clang/tools/cir-tool/cir-tool.cpp | 2 +- 13 files changed, 19 insertions(+), 19 deletions(-) diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index 46741917ccaa..aa6be536313c 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -24,7 +24,7 @@ std::unique_ptr createConvertCIRToFuncPass(); /// Create a pass for lowering from `CIR` operations well as `Affine` and `Std`, /// to the LLVM dialect for codegen. We'll want to separate this eventually into /// different phases instead of doing it all at once. -std::unique_ptr createConvertCIRToLLVMPass(); +std::unique_ptr createConvertMLIRToLLVMPass(); /// Create a pass that only lowers a subset of `CIR` memref-like operations to /// MemRef specific versions. diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index 9a89b087f372..c69815e88db6 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -42,15 +42,15 @@ using namespace llvm; namespace cir { -struct ConvertCIRToLLVMPass - : public mlir::PassWrapper> { void getDependentDialects(mlir::DialectRegistry ®istry) const override { registry.insert(); } void runOnOperation() final; - virtual StringRef getArgument() const override { return "cir-to-llvm"; } + virtual StringRef getArgument() const override { return "cir-mlir-to-llvm"; } }; struct ConvertCIRToMemRefPass @@ -589,7 +589,7 @@ void ConvertCIRToMLIRPass::runOnOperation() { signalPassFailure(); } -void ConvertCIRToLLVMPass::runOnOperation() { +void ConvertMLIRToLLVMPass::runOnOperation() { mlir::LLVMConversionTarget target(getContext()); target.addLegalOp(); @@ -663,7 +663,7 @@ lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, mlir::PassManager pm(mlirCtx.get()); pm.addPass(createConvertCIRToMLIRPass()); - pm.addPass(createConvertCIRToLLVMPass()); + pm.addPass(createConvertMLIRToLLVMPass()); auto result = !mlir::failed(pm.run(theModule)); if (!result) @@ -685,8 +685,8 @@ lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, return llvmModule; } -std::unique_ptr createConvertCIRToLLVMPass() { - return std::make_unique(); +std::unique_ptr createConvertMLIRToLLVMPass() { + return std::make_unique(); } std::unique_ptr createConvertCIRToMemRefPass() { diff --git a/clang/test/CIR/CIRToLLVM/array.cir b/clang/test/CIR/CIRToLLVM/array.cir index 8a467f059c52..88735607eb78 100644 --- a/clang/test/CIR/CIRToLLVM/array.cir +++ b/clang/test/CIR/CIRToLLVM/array.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/binop-fp.cir b/clang/test/CIR/CIRToLLVM/binop-fp.cir index b7fba41b9710..c0f84ecf027c 100644 --- a/clang/test/CIR/CIRToLLVM/binop-fp.cir +++ b/clang/test/CIR/CIRToLLVM/binop-fp.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/binop-int.cir b/clang/test/CIR/CIRToLLVM/binop-int.cir index 088f9b8cc7eb..9c51cca36307 100644 --- a/clang/test/CIR/CIRToLLVM/binop-int.cir +++ b/clang/test/CIR/CIRToLLVM/binop-int.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/bool.cir b/clang/test/CIR/CIRToLLVM/bool.cir index 4d16110e3cba..654d23af75a7 100644 --- a/clang/test/CIR/CIRToLLVM/bool.cir +++ b/clang/test/CIR/CIRToLLVM/bool.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/cmp.cir b/clang/test/CIR/CIRToLLVM/cmp.cir index f0c9d791b751..d0b24805d493 100644 --- a/clang/test/CIR/CIRToLLVM/cmp.cir +++ b/clang/test/CIR/CIRToLLVM/cmp.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/goto.cir b/clang/test/CIR/CIRToLLVM/goto.cir index 696c2a9bbd17..0b992f653b3b 100644 --- a/clang/test/CIR/CIRToLLVM/goto.cir +++ b/clang/test/CIR/CIRToLLVM/goto.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -canonicalize -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -canonicalize -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -canonicalize -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM // XFAIL: * module { diff --git a/clang/test/CIR/CIRToLLVM/memref.cir b/clang/test/CIR/CIRToLLVM/memref.cir index 6461094c8b03..bec530a01699 100644 --- a/clang/test/CIR/CIRToLLVM/memref.cir +++ b/clang/test/CIR/CIRToLLVM/memref.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() -> i32 { diff --git a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir index f67358c21ace..f259a8a4d906 100644 --- a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir +++ b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir b/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir index 37a1d159ed1e..bbeda296ba5f 100644 --- a/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir +++ b/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/cirtool.cir b/clang/test/CIR/cirtool.cir index 3f23b38dd5a5..f58ab5c3cc4c 100644 --- a/clang/test/CIR/cirtool.cir +++ b/clang/test/CIR/cirtool.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-to-llvm -o %t.mlir +// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: mlir-translate -mlir-to-llvmir %t.mlir -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index 30937a18ba3c..fbe2332b53c0 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -34,7 +34,7 @@ int main(int argc, char **argv) { return cir::createConvertCIRToFuncPass(); }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { - return cir::createConvertCIRToLLVMPass(); + return cir::createConvertMLIRToLLVMPass(); }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertCIRToMemRefPass(); From 9e1078607ff5a61906907e8c2022a0dbb2ba3c65 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 5 Dec 2022 22:03:49 -0500 Subject: [PATCH 0706/2301] [CIR][CIRToMLIR] Add type conversion for floats --- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index c69815e88db6..bc89e5cc1665 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -565,6 +565,8 @@ mlir::TypeConverter prepareTypeConverter() { }); converter.addConversion( [&](mlir::IntegerType type) -> mlir::Type { return type; }); + converter.addConversion( + [&](mlir::FloatType type) -> mlir::Type { return type; }); return converter; } From 2a90a5cef0d0f58dfe00d1368e32e675f9d38991 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 5 Dec 2022 22:04:35 -0500 Subject: [PATCH 0707/2301] [CIR][CIRToMLIR] Convert CIRLoadLowering to an OpConversionPattern Pretty straightforward change following suit of the other CIR->memref instances. This allows the type to have a typeconverter. --- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index bc89e5cc1665..7de0b920e0f9 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -151,16 +151,14 @@ class CIRAllocaLowering : public mlir::OpRewritePattern { } }; -class CIRLoadLowering : public mlir::ConversionPattern { +class CIRLoadLowering : public mlir::OpConversionPattern { public: - CIRLoadLowering(mlir::MLIRContext *ctx) - : mlir::ConversionPattern(mlir::cir::LoadOp::getOperationName(), 1, ctx) { - } + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::Operation *op, ArrayRef operands, + matchAndRewrite(mlir::cir::LoadOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, operands[0]); + rewriter.replaceOpWithNewOp(op, adaptor.getAddr()); return mlir::LogicalResult::success(); } }; From 350ab670ca56dae5523bf02b401d8daeeacccccc Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 5 Dec 2022 22:46:19 -0500 Subject: [PATCH 0708/2301] [CIR][Lowering] Remove CIRTo{Func,Memref} separate passes We aren't going to end up using these localized lowerings, so remove them here. --- clang/include/clang/CIR/Passes.h | 7 -- clang/lib/CIR/CodeGen/LowerToLLVM.cpp | 79 ------------------- clang/test/CIR/CIRToLLVM/array.cir | 4 +- clang/test/CIR/CIRToLLVM/binop-fp.cir | 4 +- clang/test/CIR/CIRToLLVM/binop-int.cir | 4 +- clang/test/CIR/CIRToLLVM/bool.cir | 4 +- clang/test/CIR/CIRToLLVM/cmp.cir | 4 +- clang/test/CIR/CIRToLLVM/goto.cir | 4 +- clang/test/CIR/CIRToLLVM/memref.cir | 4 +- clang/test/CIR/CIRToLLVM/unary-inc-dec.cir | 4 +- clang/test/CIR/CIRToLLVM/unary-plus-minus.cir | 4 +- clang/test/CIR/cirtool.cir | 2 +- clang/tools/cir-tool/cir-tool.cpp | 6 -- 13 files changed, 19 insertions(+), 111 deletions(-) diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index aa6be536313c..57a77e8f87b8 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -18,18 +18,11 @@ #include namespace cir { -/// Create a pass for lowering from `cir.func` to `func.func`. -std::unique_ptr createConvertCIRToFuncPass(); - /// Create a pass for lowering from `CIR` operations well as `Affine` and `Std`, /// to the LLVM dialect for codegen. We'll want to separate this eventually into /// different phases instead of doing it all at once. std::unique_ptr createConvertMLIRToLLVMPass(); -/// Create a pass that only lowers a subset of `CIR` memref-like operations to -/// MemRef specific versions. -std::unique_ptr createConvertCIRToMemRefPass(); - /// Create a pass that fully lowers CIR to the MLIR in-tree dialects. std::unique_ptr createConvertCIRToMLIRPass(); } // end namespace cir diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp index 7de0b920e0f9..e97e411bc121 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/CodeGen/LowerToLLVM.cpp @@ -53,30 +53,6 @@ struct ConvertMLIRToLLVMPass virtual StringRef getArgument() const override { return "cir-mlir-to-llvm"; } }; -struct ConvertCIRToMemRefPass - : public mlir::PassWrapper> { - void getDependentDialects(mlir::DialectRegistry ®istry) const override { - registry.insert(); - } - void runOnOperation() final; - - virtual StringRef getArgument() const override { return "cir-to-memref"; } -}; - -struct ConvertCIRToFuncPass - : public mlir::PassWrapper> { - void getDependentDialects(mlir::DialectRegistry ®istry) const override { - registry.insert(); - } - void runOnOperation() final; - - virtual StringRef getArgument() const override { return "cir-to-func"; } -}; - class CIRReturnLowering : public mlir::OpConversionPattern { public: @@ -609,53 +585,6 @@ void ConvertMLIRToLLVMPass::runOnOperation() { signalPassFailure(); } -void ConvertCIRToMemRefPass::runOnOperation() { - mlir::ConversionTarget target(getContext()); - - // TODO: Should this be a wholesale conversion? It's a bit ambiguous on - // whether we should have micro-conversions that do the minimal amount of work - // or macro conversions that entiirely remove a dialect. - target.addLegalOp(); - target.addLegalDialect(); - target - .addIllegalOp(); - - mlir::RewritePatternSet patterns(&getContext()); - populateCIRToMemRefConversionPatterns(patterns); - // populateAffineToStdConversionPatterns(patterns); - // populateLoopToStdConversionPatterns(patterns); - - auto module = getOperation(); - if (failed(applyPartialConversion(module, target, std::move(patterns)))) - signalPassFailure(); -} - -void ConvertCIRToFuncPass::runOnOperation() { - // End goal here is to legalize to builtin.func, func.return, func.call. - // Given that children node are ignored, handle both return and call in - // a subsequent conversion. - - // Convert cir.func to builtin.func - mlir::ConversionTarget target(getContext()); - target.addLegalOp(); - target.addLegalDialect(); - target.addIllegalOp(); - - mlir::RewritePatternSet patterns(&getContext()); - auto converter = prepareTypeConverter(); - patterns.add(patterns.getContext()); - patterns.add(converter, patterns.getContext()); - - auto module = getOperation(); - if (failed(applyPartialConversion(module, target, std::move(patterns)))) - signalPassFailure(); -} - std::unique_ptr lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, std::unique_ptr mlirCtx, @@ -689,14 +618,6 @@ std::unique_ptr createConvertMLIRToLLVMPass() { return std::make_unique(); } -std::unique_ptr createConvertCIRToMemRefPass() { - return std::make_unique(); -} - -std::unique_ptr createConvertCIRToFuncPass() { - return std::make_unique(); -} - std::unique_ptr createConvertCIRToMLIRPass() { return std::make_unique(); } diff --git a/clang/test/CIR/CIRToLLVM/array.cir b/clang/test/CIR/CIRToLLVM/array.cir index 88735607eb78..dfbf6846d77c 100644 --- a/clang/test/CIR/CIRToLLVM/array.cir +++ b/clang/test/CIR/CIRToLLVM/array.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-mlir -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/binop-fp.cir b/clang/test/CIR/CIRToLLVM/binop-fp.cir index c0f84ecf027c..a1e3b5f5d183 100644 --- a/clang/test/CIR/CIRToLLVM/binop-fp.cir +++ b/clang/test/CIR/CIRToLLVM/binop-fp.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/binop-int.cir b/clang/test/CIR/CIRToLLVM/binop-int.cir index 9c51cca36307..58ad1be56115 100644 --- a/clang/test/CIR/CIRToLLVM/binop-int.cir +++ b/clang/test/CIR/CIRToLLVM/binop-int.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/bool.cir b/clang/test/CIR/CIRToLLVM/bool.cir index 654d23af75a7..0879e2cbfa99 100644 --- a/clang/test/CIR/CIRToLLVM/bool.cir +++ b/clang/test/CIR/CIRToLLVM/bool.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/cmp.cir b/clang/test/CIR/CIRToLLVM/cmp.cir index d0b24805d493..bda86d3d9047 100644 --- a/clang/test/CIR/CIRToLLVM/cmp.cir +++ b/clang/test/CIR/CIRToLLVM/cmp.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/goto.cir b/clang/test/CIR/CIRToLLVM/goto.cir index 0b992f653b3b..c05adc1505e1 100644 --- a/clang/test/CIR/CIRToLLVM/goto.cir +++ b/clang/test/CIR/CIRToLLVM/goto.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -canonicalize -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -canonicalize -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -canonicalize -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -canonicalize -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM // XFAIL: * module { diff --git a/clang/test/CIR/CIRToLLVM/memref.cir b/clang/test/CIR/CIRToLLVM/memref.cir index bec530a01699..19b65f5a79b6 100644 --- a/clang/test/CIR/CIRToLLVM/memref.cir +++ b/clang/test/CIR/CIRToLLVM/memref.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() -> i32 { diff --git a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir index f259a8a4d906..0edb8ee4e58a 100644 --- a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir +++ b/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir b/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir index bbeda296ba5f..d0da3bbc12f6 100644 --- a/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir +++ b/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-func -cir-to-memref -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/cirtool.cir b/clang/test/CIR/cirtool.cir index f58ab5c3cc4c..eabd45ba88c3 100644 --- a/clang/test/CIR/cirtool.cir +++ b/clang/test/CIR/cirtool.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-func -cir-to-memref -cir-mlir-to-llvm -o %t.mlir +// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: mlir-translate -mlir-to-llvmir %t.mlir -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index fbe2332b53c0..9d2090806305 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -30,15 +30,9 @@ int main(int argc, char **argv) { mlir::cir::CIRDialect, mlir::memref::MemRefDialect, mlir::LLVM::LLVMDialect>(); - ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { - return cir::createConvertCIRToFuncPass(); - }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertMLIRToLLVMPass(); }); - ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { - return cir::createConvertCIRToMemRefPass(); - }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return mlir::createMergeCleanupsPass(); }); From 3f1abbaeafd626fb8cdc67e794b258884ecbf3ee Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 5 Dec 2022 23:02:03 -0500 Subject: [PATCH 0709/2301] [CIR][Lowering][NFC] Split some lowering code to separate directories --- clang/lib/CIR/CMakeLists.txt | 1 + clang/lib/CIR/CodeGen/CMakeLists.txt | 1 - clang/lib/CIR/FrontendAction/CMakeLists.txt | 1 + clang/lib/CIR/Lowering/CMakeLists.txt | 2 + .../CIR/Lowering/DirectToLLVM/CMakeLists.txt | 0 .../CIR/Lowering/ThroughMLIR/CMakeLists.txt | 34 ++++++++ .../ThroughMLIR/LowerCIRToMLIR.cpp} | 45 +---------- .../Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp | 79 +++++++++++++++++++ clang/tools/cir-tool/CMakeLists.txt | 1 + 9 files changed, 120 insertions(+), 44 deletions(-) create mode 100644 clang/lib/CIR/Lowering/CMakeLists.txt create mode 100644 clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt create mode 100644 clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt rename clang/lib/CIR/{CodeGen/LowerToLLVM.cpp => Lowering/ThroughMLIR/LowerCIRToMLIR.cpp} (92%) create mode 100644 clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index abdbe92614d7..79c980ec020c 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -1,3 +1,4 @@ add_subdirectory(Dialect) add_subdirectory(CodeGen) add_subdirectory(FrontendAction) +add_subdirectory(Lowering) diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 92fe8fc1deac..99bcee685ee7 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -33,7 +33,6 @@ add_clang_library(clangCIR CIRGenerator.cpp CIRPasses.cpp CIRRecordLayoutBuilder.cpp - LowerToLLVM.cpp TargetInfo.cpp DEPENDS diff --git a/clang/lib/CIR/FrontendAction/CMakeLists.txt b/clang/lib/CIR/FrontendAction/CMakeLists.txt index 558787eb3a86..7c873ef3a98c 100644 --- a/clang/lib/CIR/FrontendAction/CMakeLists.txt +++ b/clang/lib/CIR/FrontendAction/CMakeLists.txt @@ -21,6 +21,7 @@ add_clang_library(clangCIRFrontendAction clangLex clangFrontend clangCIR + clangCIRLoweringThroughMLIR ${dialect_libs} MLIRCIR MLIRAnalysis diff --git a/clang/lib/CIR/Lowering/CMakeLists.txt b/clang/lib/CIR/Lowering/CMakeLists.txt new file mode 100644 index 000000000000..f720e597ecb0 --- /dev/null +++ b/clang/lib/CIR/Lowering/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(DirectToLLVM) +add_subdirectory(ThroughMLIR) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt new file mode 100644 index 000000000000..3d0d513338fd --- /dev/null +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -0,0 +1,34 @@ +set(LLVM_LINK_COMPONENTS + Core + Support + ) + +include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) +include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_clang_library(clangCIRLoweringThroughMLIR + LowerCIRToMLIR.cpp + LowerMLIRToLLVM.cpp + + DEPENDS + MLIRCIROpsIncGen + + LINK_LIBS + clangAST + clangBasic + clangCodeGen + clangLex + clangFrontend + clangCIR + ${dialect_libs} + MLIRCIR + MLIRAnalysis + MLIRIR + MLIRParser + MLIRSideEffectInterfaces + MLIRTransforms + MLIRSupport + MLIRMemRefDialect + ) diff --git a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp similarity index 92% rename from clang/lib/CIR/CodeGen/LowerToLLVM.cpp rename to clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index e97e411bc121..eefe9be8b216 100644 --- a/clang/lib/CIR/CodeGen/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -1,4 +1,4 @@ -//====- LowerToLLVM.cpp - Lowering from CIR to LLVM -----------------------===// +//====- LowerCIRToMLIR.cpp - Lowering from CIR to MLIR --------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements full lowering of CIR operations to LLVMIR. +// This file implements lowering of CIR operations to MLIR. // //===----------------------------------------------------------------------===// @@ -42,17 +42,6 @@ using namespace llvm; namespace cir { -struct ConvertMLIRToLLVMPass - : public mlir::PassWrapper> { - void getDependentDialects(mlir::DialectRegistry ®istry) const override { - registry.insert(); - } - void runOnOperation() final; - - virtual StringRef getArgument() const override { return "cir-mlir-to-llvm"; } -}; - class CIRReturnLowering : public mlir::OpConversionPattern { public: @@ -517,12 +506,6 @@ class CIRBrOpLowering : public mlir::OpRewritePattern { } }; -void populateCIRToMemRefConversionPatterns(mlir::RewritePatternSet &patterns) { - patterns.add(patterns.getContext()); -} - void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(); - - mlir::LLVMTypeConverter typeConverter(&getContext()); - - mlir::RewritePatternSet patterns(&getContext()); - populateAffineToStdConversionPatterns(patterns); - mlir::arith::populateArithToLLVMConversionPatterns(typeConverter, patterns); - populateSCFToControlFlowConversionPatterns(patterns); - mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, - patterns); - populateFinalizeMemRefToLLVMConversionPatterns(typeConverter, patterns); - populateFuncToLLVMConversionPatterns(typeConverter, patterns); - - auto module = getOperation(); - if (failed(applyFullConversion(module, target, std::move(patterns)))) - signalPassFailure(); -} - std::unique_ptr lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, std::unique_ptr mlirCtx, @@ -614,10 +577,6 @@ lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, return llvmModule; } -std::unique_ptr createConvertMLIRToLLVMPass() { - return std::make_unique(); -} - std::unique_ptr createConvertCIRToMLIRPass() { return std::make_unique(); } diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp new file mode 100644 index 000000000000..930ce1c12f68 --- /dev/null +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp @@ -0,0 +1,79 @@ +//====- LowerMLIRToCIR.cpp - Lowering from MLIR to CIR --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements lowering of CIR-lowered MLIR operations to LLVMIR. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Conversion/AffineToStandard/AffineToStandard.h" +#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" +#include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" +#include "mlir/Conversion/LLVMCommon/ConversionTarget.h" +#include "mlir/Conversion/LLVMCommon/TypeConverter.h" +#include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" +#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" +#include "mlir/Dialect/Affine/IR/AffineOps.h" +#include "mlir/Dialect/Arith/IR/Arith.h" +#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/SCF/IR/SCF.h" +#include "mlir/Dialect/SCF/Transforms/Passes.h" +#include "mlir/IR/BuiltinDialect.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Export.h" +#include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Passes.h" +#include "llvm/ADT/Sequence.h" + +using namespace cir; +using namespace llvm; + +namespace cir { +struct ConvertMLIRToLLVMPass + : public mlir::PassWrapper> { + void getDependentDialects(mlir::DialectRegistry ®istry) const override { + registry.insert(); + } + void runOnOperation() final; + + virtual StringRef getArgument() const override { return "cir-mlir-to-llvm"; } +}; + +void ConvertMLIRToLLVMPass::runOnOperation() { + mlir::LLVMConversionTarget target(getContext()); + target.addLegalOp(); + + mlir::LLVMTypeConverter typeConverter(&getContext()); + + mlir::RewritePatternSet patterns(&getContext()); + populateAffineToStdConversionPatterns(patterns); + mlir::arith::populateArithToLLVMConversionPatterns(typeConverter, patterns); + populateSCFToControlFlowConversionPatterns(patterns); + mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, + patterns); + populateFinalizeMemRefToLLVMConversionPatterns(typeConverter, patterns); + populateFuncToLLVMConversionPatterns(typeConverter, patterns); + + auto module = getOperation(); + if (failed(applyFullConversion(module, target, std::move(patterns)))) + signalPassFailure(); +} + +std::unique_ptr createConvertMLIRToLLVMPass() { + return std::make_unique(); +} + +} // namespace cir diff --git a/clang/tools/cir-tool/CMakeLists.txt b/clang/tools/cir-tool/CMakeLists.txt index 42a734b35e92..4ce9b7784d7d 100644 --- a/clang/tools/cir-tool/CMakeLists.txt +++ b/clang/tools/cir-tool/CMakeLists.txt @@ -8,6 +8,7 @@ set(LIBS ${dialect_libs} ${conversion_libs} clangCIR + clangCIRLoweringThroughMLIR MLIRAnalysis MLIRCIR MLIRCIRTransforms From 804074be4cdfa56e9a6441eb976e76f240d624ef Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 5 Dec 2022 23:20:17 -0500 Subject: [PATCH 0710/2301] [CIR][Lowering] Add a stubbed out version of DirectToLLVM --- .../CIR/Lowering/DirectToLLVM/CMakeLists.txt | 33 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 590 ++++++++++++++++++ 2 files changed, 623 insertions(+) create mode 100644 clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index e69de29bb2d1..832c99622394 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -0,0 +1,33 @@ +set(LLVM_LINK_COMPONENTS + Core + Support + ) + +include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) +include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_clang_library(clangCIRLoweringDirectToLLVM + LowerToLLVM.cpp + + DEPENDS + MLIRCIROpsIncGen + + LINK_LIBS + clangAST + clangBasic + clangCodeGen + clangLex + clangFrontend + clangCIR + ${dialect_libs} + MLIRCIR + MLIRAnalysis + MLIRIR + MLIRParser + MLIRSideEffectInterfaces + MLIRTransforms + MLIRSupport + MLIRMemRefDialect + ) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp new file mode 100644 index 000000000000..be3a323777c0 --- /dev/null +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -0,0 +1,590 @@ +//====- LowerToLLVM.cpp - Lowering from CIR to LLVMIR ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements lowering of CIR operations to LLVMIR. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Conversion/AffineToStandard/AffineToStandard.h" +#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" +#include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" +#include "mlir/Conversion/LLVMCommon/ConversionTarget.h" +#include "mlir/Conversion/LLVMCommon/TypeConverter.h" +#include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" +#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" +#include "mlir/Dialect/Affine/IR/AffineOps.h" +#include "mlir/Dialect/Arith/IR/Arith.h" +#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/SCF/IR/SCF.h" +#include "mlir/Dialect/SCF/Transforms/Passes.h" +#include "mlir/IR/BlockAndValueMapping.h" +#include "mlir/IR/BuiltinDialect.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Export.h" +#include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Passes.h" +#include "llvm/ADT/Sequence.h" + +using namespace cir; +using namespace llvm; + +namespace cir { + +// class CIRReturnLowering +// : public mlir::OpConversionPattern { +// public: +// using OpConversionPattern::OpConversionPattern; + +// mlir::LogicalResult +// matchAndRewrite(mlir::cir::ReturnOp op, OpAdaptor adaptor, +// mlir::ConversionPatternRewriter &rewriter) const override { +// rewriter.replaceOpWithNewOp(op, +// adaptor.getOperands()); +// return mlir::LogicalResult::success(); +// } +// }; + +struct ConvertCIRToLLVMPass + : public mlir::PassWrapper> { + void getDependentDialects(mlir::DialectRegistry ®istry) const override { + registry.insert(); + } + void runOnOperation() final; + + virtual StringRef getArgument() const override { return "cir-to-llvm"; } +}; + +// class CIRCallLowering : public mlir::OpRewritePattern { +// public: +// using OpRewritePattern::OpRewritePattern; + +// mlir::LogicalResult +// matchAndRewrite(mlir::cir::CallOp op, +// mlir::PatternRewriter &rewriter) const override { +// rewriter.replaceOpWithNewOp( +// op, mlir::SymbolRefAttr::get(op), op.getResultTypes(), +// op.getArgOperands()); +// return mlir::LogicalResult::success(); +// } +// }; + +// class CIRAllocaLowering : public mlir::OpRewritePattern +// { public: +// using OpRewritePattern::OpRewritePattern; + +// mlir::LogicalResult +// matchAndRewrite(mlir::cir::AllocaOp op, +// mlir::PatternRewriter &rewriter) const override { +// auto type = op.getAllocaType(); +// mlir::MemRefType memreftype; + +// if (type.isa()) { +// auto integerType = +// mlir::IntegerType::get(getContext(), 8, +// mlir::IntegerType::Signless); +// memreftype = mlir::MemRefType::get({}, integerType); +// } else if (type.isa()) { +// mlir::cir::ArrayType arraytype = type.dyn_cast(); +// memreftype = +// mlir::MemRefType::get(arraytype.getSize(), arraytype.getEltType()); +// } else if (type.isa() || type.isa()) +// { +// memreftype = mlir::MemRefType::get({}, op.getAllocaType()); +// } else if (type.isa()) { +// auto ptrType = type.cast(); +// auto innerMemref = mlir::MemRefType::get({-1}, ptrType.getPointee()); +// memreftype = mlir::MemRefType::get({}, innerMemref); +// } else { +// llvm_unreachable("type to be allocated not supported yet"); +// } +// rewriter.replaceOpWithNewOp(op, memreftype, +// op.getAlignmentAttr()); +// return mlir::LogicalResult::success(); +// } +// }; + +// class CIRLoadLowering : public mlir::OpConversionPattern { +// public: +// using OpConversionPattern::OpConversionPattern; + +// mlir::LogicalResult +// matchAndRewrite(mlir::cir::LoadOp op, OpAdaptor adaptor, +// mlir::ConversionPatternRewriter &rewriter) const override { +// rewriter.replaceOpWithNewOp(op, adaptor.getAddr()); +// return mlir::LogicalResult::success(); +// } +// }; + +// class CIRStoreLowering : public mlir::ConversionPattern { +// public: +// CIRStoreLowering(mlir::MLIRContext *ctx) +// : mlir::ConversionPattern(mlir::cir::StoreOp::getOperationName(), 1, +// ctx) {} + +// mlir::LogicalResult +// matchAndRewrite(mlir::Operation *op, ArrayRef operands, +// mlir::ConversionPatternRewriter &rewriter) const override { +// rewriter.replaceOpWithNewOp(op, operands[0], +// operands[1]); +// return mlir::LogicalResult::success(); +// } +// }; + +// class CIRConstantLowering +// : public mlir::OpRewritePattern { +// public: +// using OpRewritePattern::OpRewritePattern; + +// mlir::LogicalResult +// matchAndRewrite(mlir::cir::ConstantOp op, +// mlir::PatternRewriter &rewriter) const override { +// if (op.getType().isa()) { +// mlir::Type type = +// mlir::IntegerType::get(getContext(), 8, +// mlir::IntegerType::Signless); +// mlir::Attribute IntegerAttr; +// if (op.getValue() == mlir::BoolAttr::get(getContext(), true)) +// IntegerAttr = mlir::IntegerAttr::get(type, 1); +// else +// IntegerAttr = mlir::IntegerAttr::get(type, 0); +// rewriter.replaceOpWithNewOp(op, type, +// IntegerAttr); +// } else +// rewriter.replaceOpWithNewOp(op, op.getType(), +// op.getValue()); +// return mlir::LogicalResult::success(); +// } +// }; + +// class CIRFuncLowering : public mlir::OpConversionPattern { +// public: +// using OpConversionPattern::OpConversionPattern; + +// mlir::LogicalResult +// matchAndRewrite(mlir::cir::FuncOp op, OpAdaptor adaptor, +// mlir::ConversionPatternRewriter &rewriter) const override { + +// auto fnType = op.getFunctionType(); +// mlir::TypeConverter::SignatureConversion signatureConversion( +// fnType.getNumInputs()); + +// for (const auto &argType : enumerate(fnType.getInputs())) { +// auto convertedType = typeConverter->convertType(argType.value()); +// if (!convertedType) +// return mlir::failure(); +// signatureConversion.addInputs(argType.index(), convertedType); +// } + +// mlir::Type resultType; +// if (fnType.getNumResults() == 1) { +// resultType = getTypeConverter()->convertType(fnType.getResult(0)); +// if (!resultType) +// return mlir::failure(); +// } + +// auto fn = rewriter.create( +// op.getLoc(), op.getName(), +// rewriter.getFunctionType(signatureConversion.getConvertedTypes(), +// resultType ? mlir::TypeRange(resultType) +// : mlir::TypeRange())); + +// rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); +// if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, +// &signatureConversion))) +// return mlir::failure(); + +// rewriter.eraseOp(op); + +// return mlir::LogicalResult::success(); +// } +// }; + +// class CIRUnaryOpLowering : public mlir::OpRewritePattern +// { public: +// using OpRewritePattern::OpRewritePattern; + +// mlir::LogicalResult +// matchAndRewrite(mlir::cir::UnaryOp op, +// mlir::PatternRewriter &rewriter) const override { +// mlir::Type type = op.getInput().getType(); +// assert(type.isa() && "operand type not supported +// yet"); + +// switch (op.getKind()) { +// case mlir::cir::UnaryOpKind::Inc: { +// auto One = rewriter.create( +// op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); +// rewriter.replaceOpWithNewOp(op, op.getType(), +// op.getInput(), One); +// break; +// } +// case mlir::cir::UnaryOpKind::Dec: { +// auto One = rewriter.create( +// op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); +// rewriter.replaceOpWithNewOp(op, op.getType(), +// op.getInput(), One); +// break; +// } +// case mlir::cir::UnaryOpKind::Plus: { +// rewriter.replaceOp(op, op.getInput()); +// break; +// } +// case mlir::cir::UnaryOpKind::Minus: { +// auto Zero = rewriter.create( +// op.getLoc(), type, mlir::IntegerAttr::get(type, 0)); +// rewriter.replaceOpWithNewOp(op, op.getType(), +// Zero, +// op.getInput()); +// break; +// } +// } + +// return mlir::LogicalResult::success(); +// } +// }; + +// class CIRBinOpLowering : public mlir::OpRewritePattern { +// public: +// using OpRewritePattern::OpRewritePattern; + +// mlir::LogicalResult +// matchAndRewrite(mlir::cir::BinOp op, +// mlir::PatternRewriter &rewriter) const override { +// assert((op.getLhs().getType() == op.getRhs().getType()) && +// "inconsistent operands' types not supported yet"); +// mlir::Type type = op.getRhs().getType(); +// assert((type.isa() || type.isa()) && +// "operand type not supported yet"); + +// switch (op.getKind()) { +// case mlir::cir::BinOpKind::Add: +// if (type.isa()) +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// else +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// break; +// case mlir::cir::BinOpKind::Sub: +// if (type.isa()) +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// else +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// break; +// case mlir::cir::BinOpKind::Mul: +// if (type.isa()) +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// else +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// break; +// case mlir::cir::BinOpKind::Div: +// if (type.isa()) { +// if (type.isSignlessInteger()) +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// else +// llvm_unreachable("integer type not supported in CIR yet"); +// } else +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// break; +// case mlir::cir::BinOpKind::Rem: +// if (type.isa()) { +// if (type.isSignlessInteger()) +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// else +// llvm_unreachable("integer type not supported in CIR yet"); +// } else +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// break; +// case mlir::cir::BinOpKind::And: +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// break; +// case mlir::cir::BinOpKind::Or: +// rewriter.replaceOpWithNewOp(op, op.getType(), +// op.getLhs(), +// op.getRhs()); +// break; +// case mlir::cir::BinOpKind::Xor: +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// break; +// case mlir::cir::BinOpKind::Shl: +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// break; +// case mlir::cir::BinOpKind::Shr: +// if (type.isSignlessInteger()) +// rewriter.replaceOpWithNewOp( +// op, op.getType(), op.getLhs(), op.getRhs()); +// else +// llvm_unreachable("integer type not supported in CIR yet"); +// break; +// } + +// return mlir::LogicalResult::success(); +// } +// }; + +// class CIRCmpOpLowering : public mlir::OpRewritePattern { +// public: +// using OpRewritePattern::OpRewritePattern; + +// mlir::LogicalResult +// matchAndRewrite(mlir::cir::CmpOp op, +// mlir::PatternRewriter &rewriter) const override { +// auto type = op.getLhs().getType(); +// auto integerType = +// mlir::IntegerType::get(getContext(), 1, mlir::IntegerType::Signless); + +// switch (op.getKind()) { +// case mlir::cir::CmpOpKind::gt: { +// if (type.isa()) { +// mlir::arith::CmpIPredicate cmpIType; +// if (!type.isSignlessInteger()) +// llvm_unreachable("integer type not supported in CIR yet"); +// cmpIType = mlir::arith::CmpIPredicate::ugt; +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), +// op.getLhs(), op.getRhs()); +// } else if (type.isa()) { +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpFPredicateAttr::get( +// getContext(), mlir::arith::CmpFPredicate::UGT), +// op.getLhs(), op.getRhs()); +// } else { +// llvm_unreachable("Unknown Operand Type"); +// } +// break; +// } +// case mlir::cir::CmpOpKind::ge: { +// if (type.isa()) { +// mlir::arith::CmpIPredicate cmpIType; +// if (!type.isSignlessInteger()) +// llvm_unreachable("integer type not supported in CIR yet"); +// cmpIType = mlir::arith::CmpIPredicate::uge; +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), +// op.getLhs(), op.getRhs()); +// } else if (type.isa()) { +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpFPredicateAttr::get( +// getContext(), mlir::arith::CmpFPredicate::UGE), +// op.getLhs(), op.getRhs()); +// } else { +// llvm_unreachable("Unknown Operand Type"); +// } +// break; +// } +// case mlir::cir::CmpOpKind::lt: { +// if (type.isa()) { +// mlir::arith::CmpIPredicate cmpIType; +// if (!type.isSignlessInteger()) +// llvm_unreachable("integer type not supported in CIR yet"); +// cmpIType = mlir::arith::CmpIPredicate::ult; +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), +// op.getLhs(), op.getRhs()); +// } else if (type.isa()) { +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpFPredicateAttr::get( +// getContext(), mlir::arith::CmpFPredicate::ULT), +// op.getLhs(), op.getRhs()); +// } else { +// llvm_unreachable("Unknown Operand Type"); +// } +// break; +// } +// case mlir::cir::CmpOpKind::le: { +// if (type.isa()) { +// mlir::arith::CmpIPredicate cmpIType; +// if (!type.isSignlessInteger()) +// llvm_unreachable("integer type not supported in CIR yet"); +// cmpIType = mlir::arith::CmpIPredicate::ule; +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), +// op.getLhs(), op.getRhs()); +// } else if (type.isa()) { +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpFPredicateAttr::get( +// getContext(), mlir::arith::CmpFPredicate::ULE), +// op.getLhs(), op.getRhs()); +// } else { +// llvm_unreachable("Unknown Operand Type"); +// } +// break; +// } +// case mlir::cir::CmpOpKind::eq: { +// if (type.isa()) { +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpIPredicateAttr::get(getContext(), +// mlir::arith::CmpIPredicate::eq), +// op.getLhs(), op.getRhs()); +// } else if (type.isa()) { +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpFPredicateAttr::get( +// getContext(), mlir::arith::CmpFPredicate::UEQ), +// op.getLhs(), op.getRhs()); +// } else { +// llvm_unreachable("Unknown Operand Type"); +// } +// break; +// } +// case mlir::cir::CmpOpKind::ne: { +// if (type.isa()) { +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpIPredicateAttr::get(getContext(), +// mlir::arith::CmpIPredicate::ne), +// op.getLhs(), op.getRhs()); +// } else if (type.isa()) { +// rewriter.replaceOpWithNewOp( +// op, integerType, +// mlir::arith::CmpFPredicateAttr::get( +// getContext(), mlir::arith::CmpFPredicate::UNE), +// op.getLhs(), op.getRhs()); +// } else { +// llvm_unreachable("Unknown Operand Type"); +// } +// break; +// } +// } + +// return mlir::LogicalResult::success(); +// } +// }; + +// class CIRBrOpLowering : public mlir::OpRewritePattern { +// public: +// using OpRewritePattern::OpRewritePattern; + +// mlir::LogicalResult +// matchAndRewrite(mlir::cir::BrOp op, +// mlir::PatternRewriter &rewriter) const override { +// rewriter.replaceOpWithNewOp(op, op.getDest()); +// return mlir::LogicalResult::success(); +// } +// }; + +void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, + mlir::TypeConverter &converter) { + // patterns.add(patterns.getContext()); + // patterns.add(converter, patterns.getContext()); +} + +mlir::TypeConverter prepareTypeConverter() { + mlir::TypeConverter converter; + converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { + return mlir::MemRefType::get({-1}, type.getPointee()); + }); + converter.addConversion( + [&](mlir::IntegerType type) -> mlir::Type { return type; }); + converter.addConversion( + [&](mlir::FloatType type) -> mlir::Type { return type; }); + + return converter; +} + +void ConvertCIRToLLVMPass::runOnOperation() { + auto module = getOperation(); + + auto converter = prepareTypeConverter(); + + mlir::RewritePatternSet patterns(&getContext()); + + populateCIRToLLVMConversionPatterns(patterns, converter); + + mlir::ConversionTarget target(getContext()); + target.addLegalOp(); + target.addLegalDialect(); + target.addIllegalDialect(); + + if (failed(applyPartialConversion(module, target, std::move(patterns)))) + signalPassFailure(); +} + +std::unique_ptr +lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, + std::unique_ptr mlirCtx, + LLVMContext &llvmCtx) { + mlir::PassManager pm(mlirCtx.get()); + + pm.addPass(createConvertCIRToLLVMPass()); + + auto result = !mlir::failed(pm.run(theModule)); + if (!result) + report_fatal_error( + "The pass manager failed to lower CIR to LLVMIR dialect!"); + + // Now that we ran all the lowering passes, verify the final output. + if (theModule.verify().failed()) + report_fatal_error("Verification of the final LLVMIR dialect failed!"); + + mlir::registerLLVMDialectTranslation(*mlirCtx); + + LLVMContext llvmContext; + auto llvmModule = mlir::translateModuleToLLVMIR(theModule, llvmCtx); + + if (!llvmModule) + report_fatal_error("Lowering from LLVMIR dialect to llvm IR failed!"); + + return llvmModule; +} + +std::unique_ptr createConvertCIRToLLVMPass() { + return std::make_unique(); +} + +mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, + mlir::MLIRContext *mlirCtx) { + mlir::PassManager pm(mlirCtx); + + pm.addPass(createConvertCIRToLLVMPass()); + + auto result = !mlir::failed(pm.run(theModule)); + if (!result) + report_fatal_error( + "The pass manager failed to lower CIR to LLVMIR dialect!"); + + // Now that we ran all the lowering passes, verify the final output. + if (theModule.verify().failed()) + report_fatal_error("Verification of the final LLVMIR dialect failed!"); + + return theModule; +} + +} // namespace cir From fde57c9359e5c48d6aa028ed9f3eb0f980386c81 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 6 Dec 2022 00:10:39 -0500 Subject: [PATCH 0711/2301] [CIR][Lowering] Add a flag to opt into direct lowering This diff adds a flag that allows the user to opt into direct lowering from CIR to LLVMIR and much of the infrastructure necessary to make that possible. --- clang/include/clang/CIR/LowerToLLVM.h | 11 ++++-- clang/include/clang/CIR/Passes.h | 5 ++- clang/include/clang/Driver/Options.td | 4 +++ .../include/clang/Frontend/FrontendOptions.h | 10 ++++-- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 19 +++++++--- clang/lib/CIR/FrontendAction/CMakeLists.txt | 1 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 36 +++++-------------- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 8 +++-- .../ExecuteCompilerInvocation.cpp | 4 +++ 9 files changed, 54 insertions(+), 44 deletions(-) diff --git a/clang/include/clang/CIR/LowerToLLVM.h b/clang/include/clang/CIR/LowerToLLVM.h index 0365ea3d6a9c..79ffb0ba1a10 100644 --- a/clang/include/clang/CIR/LowerToLLVM.h +++ b/clang/include/clang/CIR/LowerToLLVM.h @@ -30,9 +30,14 @@ namespace cir { // Lower directly from pristine CIR to LLVMIR. std::unique_ptr -lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, - std::unique_ptr mlirCtx, - llvm::LLVMContext &llvmCtx); +lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, + std::unique_ptr mlirCtx, + llvm::LLVMContext &llvmCtx); + +std::unique_ptr +lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, + std::unique_ptr mlirCtx, + llvm::LLVMContext &llvmCtx); mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx); diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index 57a77e8f87b8..e7f96c4593c8 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -18,9 +18,8 @@ #include namespace cir { -/// Create a pass for lowering from `CIR` operations well as `Affine` and `Std`, -/// to the LLVM dialect for codegen. We'll want to separate this eventually into -/// different phases instead of doing it all at once. +/// Create a pass for lowering from MLIR builtin dialects such as `Affine` and +/// `Std`, to the LLVM dialect for codegen. std::unique_ptr createConvertMLIRToLLVMPass(); /// Create a pass that fully lowers CIR to the MLIR in-tree dialects. diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 7f5d93535709..0b20723a2b56 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3084,6 +3084,10 @@ def fclangir_lifetime_check : Flag<["-"], "fclangir-lifetime-check">, Visibility<[ClangOption, CC1Option]>, Group, Alias, AliasArgs<["history=invalid,null"]>, HelpText<"Run lifetime checker">; +def fclangir_direct_lowering : Flag<["-"], "fclangir-direct-lowering">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"Lower directly from ClangIR to LLVM">, + MarshallingInfoFlag>; def flto : Flag<["-"], "flto">, Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 6fc8ab145309..beb1696368aa 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -421,6 +421,9 @@ class FrontendOptions { LLVM_PREFERRED_TYPE(bool) unsigned UseClangIRPipeline : 1; + /// Lower directly from ClangIR to LLVM + unsigned ClangIRDirectLowering : 1; + /// Disable Clang IR specific (CIR) passes unsigned ClangIRDisablePasses : 1; @@ -625,9 +628,10 @@ class FrontendOptions { EmitSymbolGraph(false), EmitExtensionSymbolGraphs(false), EmitSymbolGraphSymbolLabelsForTesting(false), EmitPrettySymbolGraphs(false), GenReducedBMI(false), - UseClangIRPipeline(false), ClangIRDisablePasses(false), - ClangIRDisableCIRVerifier(false), ClangIRDisableEmitCXXDefault(false), - ClangIRLifetimeCheck(false), TimeTraceGranularity(500), + UseClangIRPipeline(false), ClangIRDirectLowering(false), + ClangIRDisablePasses(false), ClangIRDisableCIRVerifier(false), + ClangIRDisableEmitCXXDefault(false), ClangIRLifetimeCheck(false), + TimeTraceGranularity(500), TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 2ff0026db0f0..652495854551 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -72,6 +72,16 @@ static std::string sanitizePassOptions(llvm::StringRef o) { } namespace cir { + +static std::unique_ptr lowerFromCIRToLLVMIR( + const clang::FrontendOptions &feOptions, mlir::ModuleOp mlirMod, + std::unique_ptr mlirCtx, llvm::LLVMContext &llvmCtx) { + if (feOptions.ClangIRDirectLowering) + return lowerDirectlyFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); + else + return lowerFromCIRToMLIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); +} + class CIRGenConsumer : public clang::ASTConsumer { virtual void anchor(); @@ -239,7 +249,7 @@ class CIRGenConsumer : public clang::ASTConsumer { case CIRGenAction::OutputType::EmitLLVM: { llvm::LLVMContext llvmCtx; auto llvmModule = - lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); + lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx); llvmModule->setTargetTriple(targetOptions.Triple); @@ -252,7 +262,7 @@ class CIRGenConsumer : public clang::ASTConsumer { case CIRGenAction::OutputType::EmitObj: { llvm::LLVMContext llvmCtx; auto llvmModule = - lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); + lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx); llvmModule->setTargetTriple(targetOptions.Triple); emitBackendOutput(compilerInstance, codeGenOptions, @@ -264,7 +274,7 @@ class CIRGenConsumer : public clang::ASTConsumer { case CIRGenAction::OutputType::EmitAssembly: { llvm::LLVMContext llvmCtx; auto llvmModule = - lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); + lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx); llvmModule->setTargetTriple(targetOptions.Triple); emitBackendOutput(compilerInstance, codeGenOptions, @@ -408,7 +418,8 @@ void CIRGenAction::ExecuteAction() { llvm::LLVMContext llvmCtx; auto llvmModule = lowerFromCIRToLLVMIR( - *mlirModule, std::unique_ptr(mlirContext), llvmCtx); + ci.getFrontendOpts(), *mlirModule, + std::unique_ptr(mlirContext), llvmCtx); if (outstream) llvmModule->print(*outstream, nullptr); diff --git a/clang/lib/CIR/FrontendAction/CMakeLists.txt b/clang/lib/CIR/FrontendAction/CMakeLists.txt index 7c873ef3a98c..39e9b5e2e7d7 100644 --- a/clang/lib/CIR/FrontendAction/CMakeLists.txt +++ b/clang/lib/CIR/FrontendAction/CMakeLists.txt @@ -21,6 +21,7 @@ add_clang_library(clangCIRFrontendAction clangLex clangFrontend clangCIR + clangCIRLoweringDirectToLLVM clangCIRLoweringThroughMLIR ${dialect_libs} MLIRCIR diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index be3a323777c0..cbfde4ab66d9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -27,7 +27,6 @@ #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" -#include "mlir/IR/BlockAndValueMapping.h" #include "mlir/IR/BuiltinDialect.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" @@ -537,13 +536,17 @@ void ConvertCIRToLLVMPass::runOnOperation() { signalPassFailure(); } +std::unique_ptr createConvertDirectCIRToLLVMPass() { + return std::make_unique(); +} + std::unique_ptr -lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, - std::unique_ptr mlirCtx, - LLVMContext &llvmCtx) { +lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, + std::unique_ptr mlirCtx, + LLVMContext &llvmCtx) { mlir::PassManager pm(mlirCtx.get()); - pm.addPass(createConvertCIRToLLVMPass()); + pm.addPass(createConvertDirectCIRToLLVMPass()); auto result = !mlir::failed(pm.run(theModule)); if (!result) @@ -564,27 +567,4 @@ lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, return llvmModule; } - -std::unique_ptr createConvertCIRToLLVMPass() { - return std::make_unique(); -} - -mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, - mlir::MLIRContext *mlirCtx) { - mlir::PassManager pm(mlirCtx); - - pm.addPass(createConvertCIRToLLVMPass()); - - auto result = !mlir::failed(pm.run(theModule)); - if (!result) - report_fatal_error( - "The pass manager failed to lower CIR to LLVMIR dialect!"); - - // Now that we ran all the lowering passes, verify the final output. - if (theModule.verify().failed()) - report_fatal_error("Verification of the final LLVMIR dialect failed!"); - - return theModule; -} - } // namespace cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index eefe9be8b216..aa0fdc5f6917 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -515,6 +515,7 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add(converter, patterns.getContext()); } +namespace { mlir::TypeConverter prepareTypeConverter() { mlir::TypeConverter converter; converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { @@ -527,6 +528,7 @@ mlir::TypeConverter prepareTypeConverter() { return converter; } +} // namespace void ConvertCIRToMLIRPass::runOnOperation() { auto module = getOperation(); @@ -549,9 +551,9 @@ void ConvertCIRToMLIRPass::runOnOperation() { } std::unique_ptr -lowerFromCIRToLLVMIR(mlir::ModuleOp theModule, - std::unique_ptr mlirCtx, - LLVMContext &llvmCtx) { +lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, + std::unique_ptr mlirCtx, + LLVMContext &llvmCtx) { mlir::PassManager pm(mlirCtx.get()); pm.addPass(createConvertCIRToMLIRPass()); diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index ff7ffd985385..fb2b94b63877 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -62,6 +62,10 @@ CreateFrontendBaseAction(CompilerInstance &CI) { llvm::report_fatal_error( "-emit-cir and -emit-cir-only only valid when using -fclangir"); + if (CI.getFrontendOpts().ClangIRDirectLowering && Act == EmitMLIR) + llvm::report_fatal_error( + "ClangIR direct lowering is incompatible with -emit-mlir"); + switch (CI.getFrontendOpts().ProgramAction) { case ASTDeclList: return std::make_unique(); case ASTDump: return std::make_unique(); From de6615fd988ad6737af35ef3864ec9f07aae909e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 7 Dec 2022 17:13:20 -0500 Subject: [PATCH 0712/2301] [CIR][NFC] Move CIRToLLVM tests to the ThroughMLIR subdir We are working on lowering directly to LLVM at this point and it makes sense to move much of our old tests to a different dir to prevent colision. --- clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/array.cir | 0 clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/binop-fp.cir | 0 clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/binop-int.cir | 0 clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/bool.cir | 0 clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/cmp.cir | 0 clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/dot.cir | 0 clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/goto.cir | 0 clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/memref.cir | 0 .../CIR/{CIRToLLVM => Lowering/ThroughMLIR}/unary-inc-dec.cir | 0 .../CIR/{CIRToLLVM => Lowering/ThroughMLIR}/unary-plus-minus.cir | 0 10 files changed, 0 insertions(+), 0 deletions(-) rename clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/array.cir (100%) rename clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/binop-fp.cir (100%) rename clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/binop-int.cir (100%) rename clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/bool.cir (100%) rename clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/cmp.cir (100%) rename clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/dot.cir (100%) rename clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/goto.cir (100%) rename clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/memref.cir (100%) rename clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/unary-inc-dec.cir (100%) rename clang/test/CIR/{CIRToLLVM => Lowering/ThroughMLIR}/unary-plus-minus.cir (100%) diff --git a/clang/test/CIR/CIRToLLVM/array.cir b/clang/test/CIR/Lowering/ThroughMLIR/array.cir similarity index 100% rename from clang/test/CIR/CIRToLLVM/array.cir rename to clang/test/CIR/Lowering/ThroughMLIR/array.cir diff --git a/clang/test/CIR/CIRToLLVM/binop-fp.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir similarity index 100% rename from clang/test/CIR/CIRToLLVM/binop-fp.cir rename to clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir diff --git a/clang/test/CIR/CIRToLLVM/binop-int.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-int.cir similarity index 100% rename from clang/test/CIR/CIRToLLVM/binop-int.cir rename to clang/test/CIR/Lowering/ThroughMLIR/binop-int.cir diff --git a/clang/test/CIR/CIRToLLVM/bool.cir b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir similarity index 100% rename from clang/test/CIR/CIRToLLVM/bool.cir rename to clang/test/CIR/Lowering/ThroughMLIR/bool.cir diff --git a/clang/test/CIR/CIRToLLVM/cmp.cir b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir similarity index 100% rename from clang/test/CIR/CIRToLLVM/cmp.cir rename to clang/test/CIR/Lowering/ThroughMLIR/cmp.cir diff --git a/clang/test/CIR/CIRToLLVM/dot.cir b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir similarity index 100% rename from clang/test/CIR/CIRToLLVM/dot.cir rename to clang/test/CIR/Lowering/ThroughMLIR/dot.cir diff --git a/clang/test/CIR/CIRToLLVM/goto.cir b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir similarity index 100% rename from clang/test/CIR/CIRToLLVM/goto.cir rename to clang/test/CIR/Lowering/ThroughMLIR/goto.cir diff --git a/clang/test/CIR/CIRToLLVM/memref.cir b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir similarity index 100% rename from clang/test/CIR/CIRToLLVM/memref.cir rename to clang/test/CIR/Lowering/ThroughMLIR/memref.cir diff --git a/clang/test/CIR/CIRToLLVM/unary-inc-dec.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir similarity index 100% rename from clang/test/CIR/CIRToLLVM/unary-inc-dec.cir rename to clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir diff --git a/clang/test/CIR/CIRToLLVM/unary-plus-minus.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir similarity index 100% rename from clang/test/CIR/CIRToLLVM/unary-plus-minus.cir rename to clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir From 980b5eec6768b6f6dfeac7ed2ffdfb33007936d8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 7 Dec 2022 19:20:08 -0500 Subject: [PATCH 0713/2301] [CIR][Lowering] Make prepareTypeConverter static This was duplicate defined without static declarations here. --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +- clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index cbfde4ab66d9..8f78b06dba75 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -505,7 +505,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, // patterns.add(converter, patterns.getContext()); } -mlir::TypeConverter prepareTypeConverter() { +static mlir::TypeConverter prepareTypeConverter() { mlir::TypeConverter converter; converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { return mlir::MemRefType::get({-1}, type.getPointee()); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index aa0fdc5f6917..c9e5cb4a00db 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -515,8 +515,7 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add(converter, patterns.getContext()); } -namespace { -mlir::TypeConverter prepareTypeConverter() { +static mlir::TypeConverter prepareTypeConverter() { mlir::TypeConverter converter; converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { return mlir::MemRefType::get({-1}, type.getPointee()); @@ -528,7 +527,6 @@ mlir::TypeConverter prepareTypeConverter() { return converter; } -} // namespace void ConvertCIRToMLIRPass::runOnOperation() { auto module = getOperation(); From 348605d583a0b058d049c53900b7548bda61b3aa Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 7 Dec 2022 19:56:52 -0500 Subject: [PATCH 0714/2301] [CIR][cir-tool] Add the direct CIRToLLVM pass --- clang/include/clang/CIR/Passes.h | 3 +++ clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 ++-- clang/tools/cir-tool/CMakeLists.txt | 1 + clang/tools/cir-tool/cir-tool.cpp | 4 ++++ 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index e7f96c4593c8..be5ecb8f5209 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -24,6 +24,9 @@ std::unique_ptr createConvertMLIRToLLVMPass(); /// Create a pass that fully lowers CIR to the MLIR in-tree dialects. std::unique_ptr createConvertCIRToMLIRPass(); + +/// Create a pass that fully lowers CIR to the LLVMIR dialect. +std::unique_ptr createConvertCIRToLLVMPass(); } // end namespace cir #endif // CLANG_CIR_PASSES_H diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8f78b06dba75..de8fd4c3eaa0 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -536,7 +536,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { signalPassFailure(); } -std::unique_ptr createConvertDirectCIRToLLVMPass() { +std::unique_ptr createConvertCIRToLLVMPass() { return std::make_unique(); } @@ -546,7 +546,7 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx) { mlir::PassManager pm(mlirCtx.get()); - pm.addPass(createConvertDirectCIRToLLVMPass()); + pm.addPass(createConvertCIRToLLVMPass()); auto result = !mlir::failed(pm.run(theModule)); if (!result) diff --git a/clang/tools/cir-tool/CMakeLists.txt b/clang/tools/cir-tool/CMakeLists.txt index 4ce9b7784d7d..db22c216c173 100644 --- a/clang/tools/cir-tool/CMakeLists.txt +++ b/clang/tools/cir-tool/CMakeLists.txt @@ -9,6 +9,7 @@ set(LIBS ${conversion_libs} clangCIR clangCIRLoweringThroughMLIR + clangCIRLoweringDirectToLLVM MLIRAnalysis MLIRCIR MLIRCIRTransforms diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index 9d2090806305..ea9f7522dce4 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -41,6 +41,10 @@ int main(int argc, char **argv) { return cir::createConvertCIRToMLIRPass(); }); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return cir::createConvertCIRToLLVMPass(); + }); + mlir::registerTransformsPasses(); return failed(MlirOptMain( From 228c8fd034cc3733c5c3c1dcb1fbdd47f0fcee1e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 7 Dec 2022 20:11:29 -0500 Subject: [PATCH 0715/2301] [CIR][Lowering] Support lowering cir.{func,return} directly This just reuses the same code from the MLIR path. I don't see any meaningful differences that we're reunning into with the func dialect for now. But we probably will want to also remove that eventually if, for example, we need extra attributes and what not. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 129 +++++++++--------- clang/test/CIR/Lowering/bool.cir | 14 ++ 2 files changed, 81 insertions(+), 62 deletions(-) create mode 100644 clang/test/CIR/Lowering/bool.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index de8fd4c3eaa0..9bfd5ab8a32b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -42,25 +42,26 @@ using namespace llvm; namespace cir { -// class CIRReturnLowering -// : public mlir::OpConversionPattern { -// public: -// using OpConversionPattern::OpConversionPattern; - -// mlir::LogicalResult -// matchAndRewrite(mlir::cir::ReturnOp op, OpAdaptor adaptor, -// mlir::ConversionPatternRewriter &rewriter) const override { -// rewriter.replaceOpWithNewOp(op, -// adaptor.getOperands()); -// return mlir::LogicalResult::success(); -// } -// }; +class CIRReturnLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ReturnOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, + adaptor.getOperands()); + return mlir::LogicalResult::success(); + } +}; struct ConvertCIRToLLVMPass : public mlir::PassWrapper> { void getDependentDialects(mlir::DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } void runOnOperation() final; @@ -169,48 +170,48 @@ struct ConvertCIRToLLVMPass // } // }; -// class CIRFuncLowering : public mlir::OpConversionPattern { -// public: -// using OpConversionPattern::OpConversionPattern; - -// mlir::LogicalResult -// matchAndRewrite(mlir::cir::FuncOp op, OpAdaptor adaptor, -// mlir::ConversionPatternRewriter &rewriter) const override { - -// auto fnType = op.getFunctionType(); -// mlir::TypeConverter::SignatureConversion signatureConversion( -// fnType.getNumInputs()); - -// for (const auto &argType : enumerate(fnType.getInputs())) { -// auto convertedType = typeConverter->convertType(argType.value()); -// if (!convertedType) -// return mlir::failure(); -// signatureConversion.addInputs(argType.index(), convertedType); -// } - -// mlir::Type resultType; -// if (fnType.getNumResults() == 1) { -// resultType = getTypeConverter()->convertType(fnType.getResult(0)); -// if (!resultType) -// return mlir::failure(); -// } - -// auto fn = rewriter.create( -// op.getLoc(), op.getName(), -// rewriter.getFunctionType(signatureConversion.getConvertedTypes(), -// resultType ? mlir::TypeRange(resultType) -// : mlir::TypeRange())); - -// rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); -// if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, -// &signatureConversion))) -// return mlir::failure(); - -// rewriter.eraseOp(op); - -// return mlir::LogicalResult::success(); -// } -// }; +class CIRFuncLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::FuncOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + auto fnType = op.getFunctionType(); + mlir::TypeConverter::SignatureConversion signatureConversion( + fnType.getNumInputs()); + + for (const auto &argType : enumerate(fnType.getInputs())) { + auto convertedType = typeConverter->convertType(argType.value()); + if (!convertedType) + return mlir::failure(); + signatureConversion.addInputs(argType.index(), convertedType); + } + + mlir::Type resultType; + if (fnType.getNumResults() == 1) { + resultType = getTypeConverter()->convertType(fnType.getResult(0)); + if (!resultType) + return mlir::failure(); + } + + auto fn = rewriter.create( + op.getLoc(), op.getName(), + rewriter.getFunctionType(signatureConversion.getConvertedTypes(), + resultType ? mlir::TypeRange(resultType) + : mlir::TypeRange())); + + rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); + if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, + &signatureConversion))) + return mlir::failure(); + + rewriter.eraseOp(op); + + return mlir::LogicalResult::success(); + } +}; // class CIRUnaryOpLowering : public mlir::OpRewritePattern // { public: @@ -498,11 +499,12 @@ struct ConvertCIRToLLVMPass void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - // patterns.add(patterns.getContext()); - // patterns.add(converter, patterns.getContext()); + patterns.add(patterns.getContext()); + patterns.add(converter, patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { @@ -525,12 +527,15 @@ void ConvertCIRToLLVMPass::runOnOperation() { mlir::RewritePatternSet patterns(&getContext()); + mlir::LLVMTypeConverter llvmConverter(&getContext()); + populateCIRToLLVMConversionPatterns(patterns, converter); + mlir::populateFuncToLLVMConversionPatterns(llvmConverter, patterns); mlir::ConversionTarget target(getContext()); target.addLegalOp(); target.addLegalDialect(); - target.addIllegalDialect(); + target.addIllegalDialect(); if (failed(applyPartialConversion(module, target, std::move(patterns)))) signalPassFailure(); diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir new file mode 100644 index 000000000000..313495bfe271 --- /dev/null +++ b/clang/test/CIR/Lowering/bool.cir @@ -0,0 +1,14 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + cir.return + } +} + +// MLIR: llvm.func @foo() { +// MLIR-NEXT: llvm.return + +// LLVM: define void @foo() +// LLVM-NEXT: ret void From 7856b859ac15d9bfb6f87f07748ae70768222189 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 7 Dec 2022 23:14:45 -0500 Subject: [PATCH 0716/2301] [CIR][Lowering] Implement direct lowering to llvm's alloca This is pretty straightforward but includes a few general purpose extras. First, we switch our custom TypeConverter to the LLVMTypeConverter. We use an OpConversionPattern instead of an OpRewritePattern to facilitate usage of the TypeConverter. And add a test! --- clang/include/clang/CIR/LowerToLLVM.h | 12 ++- clang/include/clang/CIR/Passes.h | 2 + clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 3 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 91 +++++++++---------- clang/test/CIR/Lowering/bool.cir | 8 +- clang/tools/cir-tool/cir-tool.cpp | 2 +- 6 files changed, 60 insertions(+), 58 deletions(-) diff --git a/clang/include/clang/CIR/LowerToLLVM.h b/clang/include/clang/CIR/LowerToLLVM.h index 79ffb0ba1a10..9494b37fd75b 100644 --- a/clang/include/clang/CIR/LowerToLLVM.h +++ b/clang/include/clang/CIR/LowerToLLVM.h @@ -28,17 +28,19 @@ class ModuleOp; namespace cir { +namespace direct { +std::unique_ptr +lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, + std::unique_ptr mlirCtx, + llvm::LLVMContext &llvmCtx); +} + // Lower directly from pristine CIR to LLVMIR. std::unique_ptr lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, std::unique_ptr mlirCtx, llvm::LLVMContext &llvmCtx); -std::unique_ptr -lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, - std::unique_ptr mlirCtx, - llvm::LLVMContext &llvmCtx); - mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx); } // namespace cir diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index be5ecb8f5209..293af0412e6d 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -25,8 +25,10 @@ std::unique_ptr createConvertMLIRToLLVMPass(); /// Create a pass that fully lowers CIR to the MLIR in-tree dialects. std::unique_ptr createConvertCIRToMLIRPass(); +namespace direct { /// Create a pass that fully lowers CIR to the LLVMIR dialect. std::unique_ptr createConvertCIRToLLVMPass(); +} // namespace direct } // end namespace cir #endif // CLANG_CIR_PASSES_H diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 652495854551..d7dcfed09655 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -77,7 +77,8 @@ static std::unique_ptr lowerFromCIRToLLVMIR( const clang::FrontendOptions &feOptions, mlir::ModuleOp mlirMod, std::unique_ptr mlirCtx, llvm::LLVMContext &llvmCtx) { if (feOptions.ClangIRDirectLowering) - return lowerDirectlyFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); + return direct::lowerDirectlyFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), + llvmCtx); else return lowerFromCIRToMLIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 9bfd5ab8a32b..6c7a33ab33ff 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -17,14 +17,12 @@ #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" #include "mlir/Conversion/LLVMCommon/TypeConverter.h" -#include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" #include "mlir/IR/BuiltinDialect.h" @@ -41,6 +39,7 @@ using namespace cir; using namespace llvm; namespace cir { +namespace direct { class CIRReturnLowering : public mlir::OpConversionPattern { @@ -82,40 +81,27 @@ struct ConvertCIRToLLVMPass // } // }; -// class CIRAllocaLowering : public mlir::OpRewritePattern -// { public: -// using OpRewritePattern::OpRewritePattern; +class CIRAllocaLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; -// mlir::LogicalResult -// matchAndRewrite(mlir::cir::AllocaOp op, -// mlir::PatternRewriter &rewriter) const override { -// auto type = op.getAllocaType(); -// mlir::MemRefType memreftype; + mlir::LogicalResult + matchAndRewrite(mlir::cir::AllocaOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto elementTy = getTypeConverter()->convertType(op.getAllocaType()); -// if (type.isa()) { -// auto integerType = -// mlir::IntegerType::get(getContext(), 8, -// mlir::IntegerType::Signless); -// memreftype = mlir::MemRefType::get({}, integerType); -// } else if (type.isa()) { -// mlir::cir::ArrayType arraytype = type.dyn_cast(); -// memreftype = -// mlir::MemRefType::get(arraytype.getSize(), arraytype.getEltType()); -// } else if (type.isa() || type.isa()) -// { -// memreftype = mlir::MemRefType::get({}, op.getAllocaType()); -// } else if (type.isa()) { -// auto ptrType = type.cast(); -// auto innerMemref = mlir::MemRefType::get({-1}, ptrType.getPointee()); -// memreftype = mlir::MemRefType::get({}, innerMemref); -// } else { -// llvm_unreachable("type to be allocated not supported yet"); -// } -// rewriter.replaceOpWithNewOp(op, memreftype, -// op.getAlignmentAttr()); -// return mlir::LogicalResult::success(); -// } -// }; + mlir::Value one = rewriter.create( + op.getLoc(), typeConverter->convertType(rewriter.getIndexType()), + rewriter.getIntegerAttr(rewriter.getIndexType(), 1)); + + auto resultTy = mlir::LLVM::LLVMPointerType::get(getContext()); + + rewriter.replaceOpWithNewOp( + op, resultTy, elementTy, one, op.getAlignmentAttr().getInt()); + return mlir::LogicalResult::success(); + } +}; // class CIRLoadLowering : public mlir::OpConversionPattern { // public: @@ -499,38 +485,42 @@ class CIRFuncLowering : public mlir::OpConversionPattern { void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add(patterns.getContext()); - patterns.add(converter, patterns.getContext()); + patterns.add(converter, + patterns.getContext()); } -static mlir::TypeConverter prepareTypeConverter() { - mlir::TypeConverter converter; - converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { - return mlir::MemRefType::get({-1}, type.getPointee()); +static void prepareTypeConverter(mlir::LLVMTypeConverter &converter, + mlir::MLIRContext *ctx) { + // converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { + // return mlir::MemRefType::get({-1}, type.getPointee()); + // }); + // converter.addConversion( + // [&](mlir::IntegerType type) -> mlir::Type { return type; }); + // converter.addConversion( + // [&](mlir::FloatType type) -> mlir::Type { return type; }); + // converter.addConversion( + // [&](mlir::IndexType type) -> mlir::Type { return type; }); + converter.addConversion([&](mlir::cir::BoolType type) -> mlir::Type { + return mlir::IntegerType::get(type.getContext(), 8, + mlir::IntegerType::Signless); }); - converter.addConversion( - [&](mlir::IntegerType type) -> mlir::Type { return type; }); - converter.addConversion( - [&](mlir::FloatType type) -> mlir::Type { return type; }); - - return converter; } void ConvertCIRToLLVMPass::runOnOperation() { auto module = getOperation(); - auto converter = prepareTypeConverter(); + mlir::LLVMTypeConverter converter(&getContext()); + prepareTypeConverter(converter, &getContext()); mlir::RewritePatternSet patterns(&getContext()); - mlir::LLVMTypeConverter llvmConverter(&getContext()); - populateCIRToLLVMConversionPatterns(patterns, converter); - mlir::populateFuncToLLVMConversionPatterns(llvmConverter, patterns); + mlir::populateFuncToLLVMConversionPatterns(converter, patterns); mlir::ConversionTarget target(getContext()); target.addLegalOp(); @@ -572,4 +562,5 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, return llvmModule; } +} // namespace direct } // namespace cir diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index 313495bfe271..7ddfd9beb1bb 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -3,12 +3,18 @@ module { cir.func @foo() { + %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} + // %1 = cir.cst(true) : !cir.bool + // cir.store %1, %0 : !cir.bool, cir.ptr cir.return } } -// MLIR: llvm.func @foo() { +// MLIR: llvm.func @foo() { +// MLIR-NEXT: [[Value:%[a-z0-9]+]] = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: = llvm.alloca %0 x i8 {alignment = 1 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: llvm.return // LLVM: define void @foo() +// LLVM-NEXT: %1 = alloca i8, i64 1, align 1 // LLVM-NEXT: ret void diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index ea9f7522dce4..94f3917de7a3 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -42,7 +42,7 @@ int main(int argc, char **argv) { }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { - return cir::createConvertCIRToLLVMPass(); + return cir::direct::createConvertCIRToLLVMPass(); }); mlir::registerTransformsPasses(); From 8b1e8db945d10c0164da9cb98619d2103bb18533 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 7 Dec 2022 17:33:42 -0800 Subject: [PATCH 0717/2301] [CIR][CIRGen] Handle codegen for co_return stmt --- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 29 +++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 + clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 3 ++- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 0d205f671a6a..15a6cb2734f0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -353,3 +353,32 @@ RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, }); return rval; } + +mlir::LogicalResult CIRGenFunction::buildCoreturnStmt(CoreturnStmt const &S) { + ++CurCoro.Data->CoreturnCount; + const Expr *RV = S.getOperand(); + if (RV && RV->getType()->isVoidType() && !isa(RV)) { + // Make sure to evaluate the non initlist expression of a co_return + // with a void expression for side effects. + // FIXME(cir): add scope + // RunCleanupsScope cleanupScope(*this); + buildIgnoredExpr(RV); + } + if (buildStmt(S.getPromiseCall(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + // FIXME: do the proper things like ReturnStmt does + // EmitBranchThroughCleanup(CurCoro.Data->FinalJD); + + // Create a new return block (if not existent) and add a branch to + // it. The actual return instruction is only inserted during current + // scope cleanup handling. + auto loc = getLoc(S.getSourceRange()); + auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); + builder.create(loc, retBlock); + + // Insert the new block to continue codegen after branch to ret block. + builder.createBlock(builder.getBlock()->getParent()); + + // TODO(cir): LLVM codegen for a cleanup on cleanupScope here. + return mlir::success(); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 128c4fcf3b60..cbbacf1224f4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -768,6 +768,7 @@ class CIRGenFunction { mlir::LogicalResult buildFunctionBody(const clang::Stmt *Body); mlir::LogicalResult buildCoroutineBody(const CoroutineBodyStmt &S); + mlir::LogicalResult buildCoreturnStmt(const CoreturnStmt &S); RValue buildCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot = AggValueSlot::ignored(), diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 56bd7b5cdeb9..3b2ab5edfcc2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -142,13 +142,14 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, case Stmt::CoroutineBodyStmtClass: return buildCoroutineBody(cast(*S)); + case Stmt::CoreturnStmtClass: + return buildCoreturnStmt(cast(*S)); case Stmt::IndirectGotoStmtClass: case Stmt::ReturnStmtClass: // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. case Stmt::GCCAsmStmtClass: case Stmt::MSAsmStmtClass: - case Stmt::CoreturnStmtClass: case Stmt::CapturedStmtClass: case Stmt::ObjCAtTryStmtClass: case Stmt::ObjCAtThrowStmtClass: From c897d135418ab16205e8c6912eeef5bf2ff6108e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 7 Dec 2022 17:44:13 -0800 Subject: [PATCH 0718/2301] [CIR] AwaitOp also uses cir.yield --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 ++-- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 6cb52508ede7..b1936b973940 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -429,11 +429,11 @@ def YieldOpKind : I32EnumAttr< def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", - "LoopOp"]>]> { + "LoopOp", "AwaitOp"]>]> { let summary = "Terminate CIR regions"; let description = [{ The `cir.yield` operation terminates regions on different CIR operations: - `cir.if`, `cir.scope`, `cir.switch` and `cir.loop`. + `cir.if`, `cir.scope`, `cir.switch`, `cir.loop` and `cir.await`. Might yield an SSA value and the semantics of how the values are yielded is defined by the parent operation. Note: there are currently no uses of diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 15a6cb2734f0..438f9fff6171 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -319,6 +319,8 @@ static LValueOrRValue buildSuspendExpression( if (TryStmt) { llvm_unreachable("NYI"); } + + builder.create(loc); }); assert(awaitBuild.succeeded() && "Should know how to codegen"); From 303a0648f6555c38855b10258d91a23cb5d47cff Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 7 Dec 2022 17:44:52 -0800 Subject: [PATCH 0719/2301] [CIR][CIRGen] Derive location from template instantiaion when generating code --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 8a55e9fc61f0..3f4a95989ef6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -415,7 +415,8 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, // If this is a function specialization then use the pattern body as the // location for the function. if (const auto *SpecDecl = FD->getTemplateInstantiationPattern()) - llvm_unreachable("NYI"); + if (SpecDecl->hasBody(SpecDecl)) + Loc = SpecDecl->getLocation(); Stmt *Body = FD->getBody(); From 8c37ed5eca1659d63a12442819af765d509f8f18 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 8 Dec 2022 22:00:26 -0500 Subject: [PATCH 0720/2301] [CIR][Lowering] Lower cir.constant to llvm.mlir.constant This reuses the mlir lowering but factors out the bool specific behavior to the typeConverter. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 42 +++++++------------ clang/test/CIR/Lowering/bool.cir | 7 ++-- 2 files changed, 19 insertions(+), 30 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6c7a33ab33ff..ed746c00a6c0 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -130,31 +130,19 @@ class CIRAllocaLowering // } // }; -// class CIRConstantLowering -// : public mlir::OpRewritePattern { -// public: -// using OpRewritePattern::OpRewritePattern; +class CIRConstantLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; -// mlir::LogicalResult -// matchAndRewrite(mlir::cir::ConstantOp op, -// mlir::PatternRewriter &rewriter) const override { -// if (op.getType().isa()) { -// mlir::Type type = -// mlir::IntegerType::get(getContext(), 8, -// mlir::IntegerType::Signless); -// mlir::Attribute IntegerAttr; -// if (op.getValue() == mlir::BoolAttr::get(getContext(), true)) -// IntegerAttr = mlir::IntegerAttr::get(type, 1); -// else -// IntegerAttr = mlir::IntegerAttr::get(type, 0); -// rewriter.replaceOpWithNewOp(op, type, -// IntegerAttr); -// } else -// rewriter.replaceOpWithNewOp(op, op.getType(), -// op.getValue()); -// return mlir::LogicalResult::success(); -// } -// }; + mlir::LogicalResult + matchAndRewrite(mlir::cir::ConstantOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, getTypeConverter()->convertType(op.getType()), op.getValue()); + return mlir::LogicalResult::success(); + } +}; class CIRFuncLowering : public mlir::OpConversionPattern { public: @@ -486,12 +474,12 @@ class CIRFuncLowering : public mlir::OpConversionPattern { void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns.add(converter, - patterns.getContext()); + patterns.add( + converter, patterns.getContext()); } static void prepareTypeConverter(mlir::LLVMTypeConverter &converter, diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index 7ddfd9beb1bb..33d95c1ca520 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -3,16 +3,17 @@ module { cir.func @foo() { + %1 = cir.cst(true) : !cir.bool %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} - // %1 = cir.cst(true) : !cir.bool // cir.store %1, %0 : !cir.bool, cir.ptr cir.return } } // MLIR: llvm.func @foo() { -// MLIR-NEXT: [[Value:%[a-z0-9]+]] = llvm.mlir.constant(1 : index) : i64 -// MLIR-NEXT: = llvm.alloca %0 x i8 {alignment = 1 : i64} : (i64) -> !llvm.ptr +// MLIR-DAG: = llvm.mlir.constant(true) : i8 +// MLIR-DAG: [[Value:%[a-z0-9]+]] = llvm.mlir.constant(1 : index) : i64 +// MLIR-DAG: = llvm.alloca [[Value]] x i8 {alignment = 1 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: llvm.return // LLVM: define void @foo() From 06d53a1c599d7841870f15d148ca0a6ec3ce2f52 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 8 Dec 2022 23:28:20 -0500 Subject: [PATCH 0721/2301] [CIR][Lowering] Lower cir.store to llvm.store This also adds elementary support for typeconverter for cir.ptr. Curiously, llvm.mlir.constant(true) is lowering to -1. Though that is "true" in C/C++, it's still weird. Just going to leave it for now but we probably should figure out how to make this a correct --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 41 +++++++++---------- clang/test/CIR/Lowering/bool.cir | 5 ++- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ed746c00a6c0..03792dc1bf40 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -115,20 +115,18 @@ class CIRAllocaLowering // } // }; -// class CIRStoreLowering : public mlir::ConversionPattern { -// public: -// CIRStoreLowering(mlir::MLIRContext *ctx) -// : mlir::ConversionPattern(mlir::cir::StoreOp::getOperationName(), 1, -// ctx) {} +class CIRStoreLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; -// mlir::LogicalResult -// matchAndRewrite(mlir::Operation *op, ArrayRef operands, -// mlir::ConversionPatternRewriter &rewriter) const override { -// rewriter.replaceOpWithNewOp(op, operands[0], -// operands[1]); -// return mlir::LogicalResult::success(); -// } -// }; + mlir::LogicalResult + matchAndRewrite(mlir::cir::StoreOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getValue(), + adaptor.getAddr()); + return mlir::LogicalResult::success(); + } +}; class CIRConstantLowering : public mlir::OpConversionPattern { @@ -473,20 +471,19 @@ class CIRFuncLowering : public mlir::OpConversionPattern { void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add(patterns.getContext()); - patterns.add( - converter, patterns.getContext()); + patterns.add(converter, patterns.getContext()); } -static void prepareTypeConverter(mlir::LLVMTypeConverter &converter, - mlir::MLIRContext *ctx) { - // converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { - // return mlir::MemRefType::get({-1}, type.getPointee()); - // }); +static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { + converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { + return mlir::LLVM::LLVMPointerType::get(type.getContext()); + }); // converter.addConversion( // [&](mlir::IntegerType type) -> mlir::Type { return type; }); // converter.addConversion( @@ -503,7 +500,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { auto module = getOperation(); mlir::LLVMTypeConverter converter(&getContext()); - prepareTypeConverter(converter, &getContext()); + prepareTypeConverter(converter); mlir::RewritePatternSet patterns(&getContext()); diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index 33d95c1ca520..b4ef3c01df7f 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -1,11 +1,12 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// XFAIL: * module { cir.func @foo() { %1 = cir.cst(true) : !cir.bool %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} - // cir.store %1, %0 : !cir.bool, cir.ptr + cir.store %1, %0 : !cir.bool, cir.ptr cir.return } } @@ -14,8 +15,10 @@ module { // MLIR-DAG: = llvm.mlir.constant(true) : i8 // MLIR-DAG: [[Value:%[a-z0-9]+]] = llvm.mlir.constant(1 : index) : i64 // MLIR-DAG: = llvm.alloca [[Value]] x i8 {alignment = 1 : i64} : (i64) -> !llvm.ptr +// MLIR-DAG: llvm.store %0, %2 : !llvm.ptr // MLIR-NEXT: llvm.return // LLVM: define void @foo() // LLVM-NEXT: %1 = alloca i8, i64 1, align 1 +// LLVM-NEXT: store i8 -1, ptr %1, align 1 // LLVM-NEXT: ret void From e8d957dd2ab9ae38ec677fd34cf3918e9ca9a44d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 8 Dec 2022 22:48:22 -0800 Subject: [PATCH 0722/2301] [CIR][CIRGen] Generate proper if conditions for await ready, fix cir.yield usage --- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 22 ++++++++++++++++------ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 10 ++++------ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 23 +++++++++++++++++++---- clang/test/CIR/IR/invalid.cir | 2 +- clang/test/CIR/Lowering/bool.cir | 3 +-- 5 files changed, 41 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 438f9fff6171..33293fdb8fa4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -272,13 +272,24 @@ static LValueOrRValue buildSuspendExpression( cond = cond->IgnoreParens(); mlir::Value condV = CGF.evaluateExprAsBool(cond); + builder.create( + loc, condV, /*withElseRegion=*/false, + /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // If expression is ready, no need to suspend, + // `YieldOpKind::Break` tells control flow to return to parent, no + // more regions to be executed. + builder.create(loc, + mlir::cir::YieldOpKind::Break); + }); + if (!condV) { awaitBuild = mlir::failure(); return; } - // If expression is ready, no need to suspend. - builder.create(loc, condV); + // Signals the parent that execution flows to next region. + builder.create(loc); }, /*suspendBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -292,10 +303,8 @@ static LValueOrRValue buildSuspendExpression( llvm_unreachable("NYI"); } - auto alwaysSuspend = b.create( - loc, mlir::cir::BoolType::get(b.getContext()), b.getBoolAttr(true)); - builder.create(loc, - mlir::ValueRange{alwaysSuspend}); + // Signals the parent that execution flows to next region. + builder.create(loc); }, /*resumeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -320,6 +329,7 @@ static LValueOrRValue buildSuspendExpression( llvm_unreachable("NYI"); } + // Returns control back to parent. builder.create(loc); }); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 0eebe94df2b0..1912117ebe5c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1550,12 +1550,10 @@ bool CIRGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { llvm_unreachable("NYI"); } -/// Emit an if on a boolean condition to the specified blocks. -/// FIXME: Based on the condition, this might try to simplify the codegen of -/// the conditional based on the branch. TrueCount should be the number of -/// times we expect the condition to evaluate to true based on PGO data. We -/// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr -/// for extra ideas). +/// Emit an `if` on a boolean condition, filling `then` and `else` into +/// appropriated regions. +/// TODO(cir): PGO data +/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas). mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, mlir::Location loc, const Stmt *thenS, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 912bddfb5501..3006eb29fbc5 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -567,12 +567,28 @@ LogicalResult ScopeOp::verify() { return success(); } //===----------------------------------------------------------------------===// mlir::LogicalResult YieldOp::verify() { - auto isDominatedByLoopOrSwitch = [](Operation *parentOp) { + auto canDominateYieldBreak = [&](Operation *parentOp) { + mlir::Region *lastAwaitRegion = nullptr; while (!llvm::isa(parentOp)) { + auto awaitOp = dyn_cast(parentOp); + if (awaitOp) { + if (lastAwaitRegion && lastAwaitRegion == &awaitOp.getResume()) { + emitOpError() + << "break can only be used in 'ready' and 'suspend' regions"; + return false; + } + return true; + } + if (llvm::isa(parentOp)) return true; + + lastAwaitRegion = parentOp->getParentRegion(); parentOp = parentOp->getParentOp(); } + + emitOpError() + << "shall be dominated by 'cir.loop', 'cir.switch' or 'cir.await'"; return false; }; @@ -586,9 +602,8 @@ mlir::LogicalResult YieldOp::verify() { }; if (isBreak()) { - if (!isDominatedByLoopOrSwitch(getOperation()->getParentOp())) - return emitOpError() - << "shall be dominated by 'cir.loop' or 'cir.switch'"; + if (!canDominateYieldBreak(getOperation()->getParentOp())) + return mlir::failure(); return mlir::success(); } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index c6b34517e0c1..b10dfd3fe724 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -53,7 +53,7 @@ cir.func @yieldfallthrough() { cir.func @yieldbreak() { %0 = cir.cst(true) : !cir.bool cir.if %0 { - cir.yield break // expected-error {{shall be dominated by 'cir.loop' or 'cir.switch'}} + cir.yield break // expected-error {{shall be dominated by 'cir.loop', 'cir.switch' or 'cir.await'}} } cir.return } diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index b4ef3c01df7f..4d9b6b50f6f6 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() { @@ -15,7 +14,7 @@ module { // MLIR-DAG: = llvm.mlir.constant(true) : i8 // MLIR-DAG: [[Value:%[a-z0-9]+]] = llvm.mlir.constant(1 : index) : i64 // MLIR-DAG: = llvm.alloca [[Value]] x i8 {alignment = 1 : i64} : (i64) -> !llvm.ptr -// MLIR-DAG: llvm.store %0, %2 : !llvm.ptr +// MLIR-DAG: llvm.store %0, %2 : i8, !llvm.ptr // MLIR-NEXT: llvm.return // LLVM: define void @foo() From 7f0085f8bb7fc4e4ab8ae30d0895c7a4b327e061 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 9 Dec 2022 20:54:48 -0500 Subject: [PATCH 0723/2301] [CIR][Lowering] Support lowering cir.store directly to llvm.store This was trivial as the ops and arguments map simply. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 30 +++++++++++-------- clang/test/CIR/Lowering/loadstorealloca.cir | 27 +++++++++++++++++ 2 files changed, 44 insertions(+), 13 deletions(-) create mode 100644 clang/test/CIR/Lowering/loadstorealloca.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 03792dc1bf40..b5134f656311 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -103,17 +103,20 @@ class CIRAllocaLowering } }; -// class CIRLoadLowering : public mlir::OpConversionPattern { -// public: -// using OpConversionPattern::OpConversionPattern; +class CIRLoadLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; -// mlir::LogicalResult -// matchAndRewrite(mlir::cir::LoadOp op, OpAdaptor adaptor, -// mlir::ConversionPatternRewriter &rewriter) const override { -// rewriter.replaceOpWithNewOp(op, adaptor.getAddr()); -// return mlir::LogicalResult::success(); -// } -// }; + mlir::LogicalResult + matchAndRewrite(mlir::cir::LoadOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + const auto llvmTy = + getTypeConverter()->convertType(op.getResult().getType()); + rewriter.replaceOpWithNewOp(op, llvmTy, + adaptor.getAddr()); + return mlir::LogicalResult::success(); + } +}; class CIRStoreLowering : public mlir::OpConversionPattern { public: @@ -471,13 +474,14 @@ class CIRFuncLowering : public mlir::OpConversionPattern { void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add(patterns.getContext()); - patterns.add(converter, patterns.getContext()); + patterns.add(converter, + patterns.getContext()); } static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir new file mode 100644 index 000000000000..fbc43465a35c --- /dev/null +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -0,0 +1,27 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() -> i32 { + %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.cst(1 : i32) : i32 + cir.store %1, %0 : i32, cir.ptr + %2 = cir.load %0 : cir.ptr , i32 + cir.return %2 : i32 + } +} + +// MLIR: module { +// MLIR-NEXT: func @foo() -> i32 { +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: llvm.store %2, %1 : i32, !llvm.ptr +// MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr -> i32 +// MLIR-NEXT: return %3 : i32 + +// LLVM: define i32 @foo() +// LLVM-NEXT: %1 = alloca i32, i64 1, align 4 +// LLVM-NEXT: store i32 1, ptr %1, align 4 +// LLVM-NEXT: %2 = load i32, ptr %1, align 4 +// LLVM-NEXT: ret i32 %2 From 710e655f0a5382f8bfe745bf3f05bd9baccbe826 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 9 Dec 2022 21:20:57 -0500 Subject: [PATCH 0724/2301] [CIR][Lowering] Implement lowering of icmp "ugt" --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 286 +++++++++--------- clang/test/CIR/Lowering/cmp.cir | 20 ++ 2 files changed, 163 insertions(+), 143 deletions(-) create mode 100644 clang/test/CIR/Lowering/cmp.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b5134f656311..c3c86fd365ad 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -11,7 +11,6 @@ //===----------------------------------------------------------------------===// #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" -#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" @@ -19,7 +18,6 @@ #include "mlir/Conversion/LLVMCommon/TypeConverter.h" #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" -#include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" @@ -322,143 +320,146 @@ class CIRFuncLowering : public mlir::OpConversionPattern { // } // }; -// class CIRCmpOpLowering : public mlir::OpRewritePattern { -// public: -// using OpRewritePattern::OpRewritePattern; - -// mlir::LogicalResult -// matchAndRewrite(mlir::cir::CmpOp op, -// mlir::PatternRewriter &rewriter) const override { -// auto type = op.getLhs().getType(); -// auto integerType = -// mlir::IntegerType::get(getContext(), 1, mlir::IntegerType::Signless); +class CIRCmpOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; -// switch (op.getKind()) { -// case mlir::cir::CmpOpKind::gt: { -// if (type.isa()) { -// mlir::arith::CmpIPredicate cmpIType; -// if (!type.isSignlessInteger()) -// llvm_unreachable("integer type not supported in CIR yet"); -// cmpIType = mlir::arith::CmpIPredicate::ugt; -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), -// op.getLhs(), op.getRhs()); -// } else if (type.isa()) { -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpFPredicateAttr::get( -// getContext(), mlir::arith::CmpFPredicate::UGT), -// op.getLhs(), op.getRhs()); -// } else { -// llvm_unreachable("Unknown Operand Type"); -// } -// break; -// } -// case mlir::cir::CmpOpKind::ge: { -// if (type.isa()) { -// mlir::arith::CmpIPredicate cmpIType; -// if (!type.isSignlessInteger()) -// llvm_unreachable("integer type not supported in CIR yet"); -// cmpIType = mlir::arith::CmpIPredicate::uge; -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), -// op.getLhs(), op.getRhs()); -// } else if (type.isa()) { -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpFPredicateAttr::get( -// getContext(), mlir::arith::CmpFPredicate::UGE), -// op.getLhs(), op.getRhs()); -// } else { -// llvm_unreachable("Unknown Operand Type"); -// } -// break; -// } -// case mlir::cir::CmpOpKind::lt: { -// if (type.isa()) { -// mlir::arith::CmpIPredicate cmpIType; -// if (!type.isSignlessInteger()) -// llvm_unreachable("integer type not supported in CIR yet"); -// cmpIType = mlir::arith::CmpIPredicate::ult; -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), -// op.getLhs(), op.getRhs()); -// } else if (type.isa()) { -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpFPredicateAttr::get( -// getContext(), mlir::arith::CmpFPredicate::ULT), -// op.getLhs(), op.getRhs()); -// } else { -// llvm_unreachable("Unknown Operand Type"); -// } -// break; -// } -// case mlir::cir::CmpOpKind::le: { -// if (type.isa()) { -// mlir::arith::CmpIPredicate cmpIType; -// if (!type.isSignlessInteger()) -// llvm_unreachable("integer type not supported in CIR yet"); -// cmpIType = mlir::arith::CmpIPredicate::ule; -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), -// op.getLhs(), op.getRhs()); -// } else if (type.isa()) { -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpFPredicateAttr::get( -// getContext(), mlir::arith::CmpFPredicate::ULE), -// op.getLhs(), op.getRhs()); -// } else { -// llvm_unreachable("Unknown Operand Type"); -// } -// break; -// } -// case mlir::cir::CmpOpKind::eq: { -// if (type.isa()) { -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpIPredicateAttr::get(getContext(), -// mlir::arith::CmpIPredicate::eq), -// op.getLhs(), op.getRhs()); -// } else if (type.isa()) { -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpFPredicateAttr::get( -// getContext(), mlir::arith::CmpFPredicate::UEQ), -// op.getLhs(), op.getRhs()); -// } else { -// llvm_unreachable("Unknown Operand Type"); -// } -// break; -// } -// case mlir::cir::CmpOpKind::ne: { -// if (type.isa()) { -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpIPredicateAttr::get(getContext(), -// mlir::arith::CmpIPredicate::ne), -// op.getLhs(), op.getRhs()); -// } else if (type.isa()) { -// rewriter.replaceOpWithNewOp( -// op, integerType, -// mlir::arith::CmpFPredicateAttr::get( -// getContext(), mlir::arith::CmpFPredicate::UNE), -// op.getLhs(), op.getRhs()); -// } else { -// llvm_unreachable("Unknown Operand Type"); -// } -// break; -// } -// } + mlir::LogicalResult + matchAndRewrite(mlir::cir::CmpOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto type = op.getLhs().getType(); + auto integerType = + mlir::IntegerType::get(getContext(), 1, mlir::IntegerType::Signless); + + switch (op.getKind()) { + case mlir::cir::CmpOpKind::gt: { + if (type.isa()) { + mlir::LLVM::ICmpPredicate cmpIType; + if (!type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::LLVM::ICmpPredicate::ugt; + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), + op.getLhs(), op.getRhs()); + // } else if (type.isa()) { + // rewriter.replaceOpWithNewOp( + // op, integerType, + // mlir::arith::CmpFPredicateAttr::get( + // getContext(), mlir::arith::CmpFPredicate::UGT), + // op.getLhs(), op.getRhs()); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + default: + llvm_unreachable("NYI"); + + // case mlir::cir::CmpOpKind::ge: { + // if (type.isa()) { + // mlir::arith::CmpIPredicate cmpIType; + // if (!type.isSignlessInteger()) + // llvm_unreachable("integer type not supported in CIR yet"); + // cmpIType = mlir::arith::CmpIPredicate::uge; + // rewriter.replaceOpWithNewOp( + // op, integerType, + // mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), + // op.getLhs(), op.getRhs()); + // } else if (type.isa()) { + // rewriter.replaceOpWithNewOp( + // op, integerType, + // mlir::arith::CmpFPredicateAttr::get( + // getContext(), mlir::arith::CmpFPredicate::UGE), + // op.getLhs(), op.getRhs()); + // } else { + // llvm_unreachable("Unknown Operand Type"); + // } + // break; + // } + // case mlir::cir::CmpOpKind::lt: { + // if (type.isa()) { + // mlir::arith::CmpIPredicate cmpIType; + // if (!type.isSignlessInteger()) + // llvm_unreachable("integer type not supported in CIR yet"); + // cmpIType = mlir::arith::CmpIPredicate::ult; + // rewriter.replaceOpWithNewOp( + // op, integerType, + // mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), + // op.getLhs(), op.getRhs()); + // } else if (type.isa()) { + // rewriter.replaceOpWithNewOp( + // op, integerType, + // mlir::arith::CmpFPredicateAttr::get( + // getContext(), mlir::arith::CmpFPredicate::ULT), + // op.getLhs(), op.getRhs()); + // } else { + // llvm_unreachable("Unknown Operand Type"); + // } + // break; + // } + // case mlir::cir::CmpOpKind::le: { + // if (type.isa()) { + // mlir::arith::CmpIPredicate cmpIType; + // if (!type.isSignlessInteger()) + // llvm_unreachable("integer type not supported in CIR yet"); + // cmpIType = mlir::arith::CmpIPredicate::ule; + // rewriter.replaceOpWithNewOp( + // op, integerType, + // mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), + // op.getLhs(), op.getRhs()); + // } else if (type.isa()) { + // rewriter.replaceOpWithNewOp( + // op, integerType, + // mlir::arith::CmpFPredicateAttr::get( + // getContext(), mlir::arith::CmpFPredicate::ULE), + // op.getLhs(), op.getRhs()); + // } else { + // llvm_unreachable("Unknown Operand Type"); + // } + // break; + // } + // case mlir::cir::CmpOpKind::eq: { + // if (type.isa()) { + // rewriter.replaceOpWithNewOp( + // op, integerType, + // mlir::arith::CmpIPredicateAttr::get(getContext(), + // mlir::arith::CmpIPredicate::eq), + // op.getLhs(), op.getRhs()); + // } else if (type.isa()) { + // rewriter.replaceOpWithNewOp( + // op, integerType, + // mlir::arith::CmpFPredicateAttr::get( + // getContext(), mlir::arith::CmpFPredicate::UEQ), + // op.getLhs(), op.getRhs()); + // } else { + // llvm_unreachable("Unknown Operand Type"); + // } + // break; + // } + // case mlir::cir::CmpOpKind::ne: { + // if (type.isa()) { + // rewriter.replaceOpWithNewOp( + // op, integerType, + // mlir::arith::CmpIPredicateAttr::get(getContext(), + // mlir::arith::CmpIPredicate::ne), + // op.getLhs(), op.getRhs()); + // } else if (type.isa()) { + // rewriter.replaceOpWithNewOp( + // op, integerType, + // mlir::arith::CmpFPredicateAttr::get( + // getContext(), mlir::arith::CmpFPredicate::UNE), + // op.getLhs(), op.getRhs()); + // } else { + // llvm_unreachable("Unknown Operand Type"); + // } + // break; + // } + } -// return mlir::LogicalResult::success(); -// } -// }; + return mlir::LogicalResult::success(); + } +}; // class CIRBrOpLowering : public mlir::OpRewritePattern { // public: @@ -474,14 +475,13 @@ class CIRFuncLowering : public mlir::OpConversionPattern { void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add(patterns.getContext()); - patterns.add(converter, - patterns.getContext()); + patterns.add( + converter, patterns.getContext()); } static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { diff --git a/clang/test/CIR/Lowering/cmp.cir b/clang/test/CIR/Lowering/cmp.cir new file mode 100644 index 000000000000..2e71fc0cb207 --- /dev/null +++ b/clang/test/CIR/Lowering/cmp.cir @@ -0,0 +1,20 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + %0 = cir.alloca i32, cir.ptr , ["a"] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["b"] {alignment = 4 : i64} + %2 = cir.alloca f32, cir.ptr , ["c"] {alignment = 4 : i64} + %3 = cir.alloca f32, cir.ptr , ["d"] {alignment = 4 : i64} + %4 = cir.alloca !cir.bool, cir.ptr , ["e"] {alignment = 1 : i64} + %5 = cir.load %0 : cir.ptr , i32 + %6 = cir.load %1 : cir.ptr , i32 + %7 = cir.cmp(gt, %5, %6) : i32, !cir.bool + cir.return + } +} + +// MLIR: = llvm.icmp "ugt" + +// LLVM: icmp ugt i32 From 351db26742b300994aa790a72fde349f6aa073e8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 9 Dec 2022 21:39:29 -0500 Subject: [PATCH 0725/2301] [CIR][Lowering] Implement the rest of cmp lowering The only new interesting change here from the mlir implementation is the addition of the (currently defaulted) FastmathFlagsAttr. We'll have to support that when we go to remove the asserts from cirgen. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 220 +++++++++--------- clang/test/CIR/Lowering/cmp.cir | 55 +++++ 2 files changed, 167 insertions(+), 108 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index c3c86fd365ad..17c01e38b121 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -342,119 +342,123 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { op, integerType, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), op.getLhs(), op.getRhs()); - // } else if (type.isa()) { - // rewriter.replaceOpWithNewOp( - // op, integerType, - // mlir::arith::CmpFPredicateAttr::get( - // getContext(), mlir::arith::CmpFPredicate::UGT), - // op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::FCmpPredicateAttr::get(getContext(), + mlir::LLVM::FCmpPredicate::ugt), + op.getLhs(), op.getRhs(), + // TODO(CIR): These fastmath flags need to not be defaulted. + mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::ge: { + if (type.isa()) { + mlir::LLVM::ICmpPredicate cmpIType; + if (!type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::LLVM::ICmpPredicate::uge; + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), + op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::FCmpPredicateAttr::get(getContext(), + mlir::LLVM::FCmpPredicate::uge), + op.getLhs(), op.getRhs(), + mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::lt: { + if (type.isa()) { + mlir::LLVM::ICmpPredicate cmpIType; + if (!type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::LLVM::ICmpPredicate::ult; + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), + op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::FCmpPredicateAttr::get(getContext(), + mlir::LLVM::FCmpPredicate::ult), + op.getLhs(), op.getRhs(), + mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::le: { + if (type.isa()) { + mlir::LLVM::ICmpPredicate cmpIType; + if (!type.isSignlessInteger()) + llvm_unreachable("integer type not supported in CIR yet"); + cmpIType = mlir::LLVM::ICmpPredicate::ule; + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), + op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::FCmpPredicateAttr::get(getContext(), + mlir::LLVM::FCmpPredicate::ule), + op.getLhs(), op.getRhs(), + mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::eq: { + if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::ICmpPredicateAttr::get(getContext(), + mlir::LLVM::ICmpPredicate::eq), + op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::FCmpPredicateAttr::get(getContext(), + mlir::LLVM::FCmpPredicate::ueq), + op.getLhs(), op.getRhs(), + mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); + } else { + llvm_unreachable("Unknown Operand Type"); + } + break; + } + case mlir::cir::CmpOpKind::ne: { + if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::ICmpPredicateAttr::get(getContext(), + mlir::LLVM::ICmpPredicate::ne), + op.getLhs(), op.getRhs()); + } else if (type.isa()) { + rewriter.replaceOpWithNewOp( + op, integerType, + mlir::LLVM::FCmpPredicateAttr::get(getContext(), + mlir::LLVM::FCmpPredicate::une), + op.getLhs(), op.getRhs(), + mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); } else { llvm_unreachable("Unknown Operand Type"); } break; } - default: - llvm_unreachable("NYI"); - - // case mlir::cir::CmpOpKind::ge: { - // if (type.isa()) { - // mlir::arith::CmpIPredicate cmpIType; - // if (!type.isSignlessInteger()) - // llvm_unreachable("integer type not supported in CIR yet"); - // cmpIType = mlir::arith::CmpIPredicate::uge; - // rewriter.replaceOpWithNewOp( - // op, integerType, - // mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), - // op.getLhs(), op.getRhs()); - // } else if (type.isa()) { - // rewriter.replaceOpWithNewOp( - // op, integerType, - // mlir::arith::CmpFPredicateAttr::get( - // getContext(), mlir::arith::CmpFPredicate::UGE), - // op.getLhs(), op.getRhs()); - // } else { - // llvm_unreachable("Unknown Operand Type"); - // } - // break; - // } - // case mlir::cir::CmpOpKind::lt: { - // if (type.isa()) { - // mlir::arith::CmpIPredicate cmpIType; - // if (!type.isSignlessInteger()) - // llvm_unreachable("integer type not supported in CIR yet"); - // cmpIType = mlir::arith::CmpIPredicate::ult; - // rewriter.replaceOpWithNewOp( - // op, integerType, - // mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), - // op.getLhs(), op.getRhs()); - // } else if (type.isa()) { - // rewriter.replaceOpWithNewOp( - // op, integerType, - // mlir::arith::CmpFPredicateAttr::get( - // getContext(), mlir::arith::CmpFPredicate::ULT), - // op.getLhs(), op.getRhs()); - // } else { - // llvm_unreachable("Unknown Operand Type"); - // } - // break; - // } - // case mlir::cir::CmpOpKind::le: { - // if (type.isa()) { - // mlir::arith::CmpIPredicate cmpIType; - // if (!type.isSignlessInteger()) - // llvm_unreachable("integer type not supported in CIR yet"); - // cmpIType = mlir::arith::CmpIPredicate::ule; - // rewriter.replaceOpWithNewOp( - // op, integerType, - // mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), - // op.getLhs(), op.getRhs()); - // } else if (type.isa()) { - // rewriter.replaceOpWithNewOp( - // op, integerType, - // mlir::arith::CmpFPredicateAttr::get( - // getContext(), mlir::arith::CmpFPredicate::ULE), - // op.getLhs(), op.getRhs()); - // } else { - // llvm_unreachable("Unknown Operand Type"); - // } - // break; - // } - // case mlir::cir::CmpOpKind::eq: { - // if (type.isa()) { - // rewriter.replaceOpWithNewOp( - // op, integerType, - // mlir::arith::CmpIPredicateAttr::get(getContext(), - // mlir::arith::CmpIPredicate::eq), - // op.getLhs(), op.getRhs()); - // } else if (type.isa()) { - // rewriter.replaceOpWithNewOp( - // op, integerType, - // mlir::arith::CmpFPredicateAttr::get( - // getContext(), mlir::arith::CmpFPredicate::UEQ), - // op.getLhs(), op.getRhs()); - // } else { - // llvm_unreachable("Unknown Operand Type"); - // } - // break; - // } - // case mlir::cir::CmpOpKind::ne: { - // if (type.isa()) { - // rewriter.replaceOpWithNewOp( - // op, integerType, - // mlir::arith::CmpIPredicateAttr::get(getContext(), - // mlir::arith::CmpIPredicate::ne), - // op.getLhs(), op.getRhs()); - // } else if (type.isa()) { - // rewriter.replaceOpWithNewOp( - // op, integerType, - // mlir::arith::CmpFPredicateAttr::get( - // getContext(), mlir::arith::CmpFPredicate::UNE), - // op.getLhs(), op.getRhs()); - // } else { - // llvm_unreachable("Unknown Operand Type"); - // } - // break; - // } } return mlir::LogicalResult::success(); diff --git a/clang/test/CIR/Lowering/cmp.cir b/clang/test/CIR/Lowering/cmp.cir index 2e71fc0cb207..f6ad3bec44d1 100644 --- a/clang/test/CIR/Lowering/cmp.cir +++ b/clang/test/CIR/Lowering/cmp.cir @@ -11,10 +11,65 @@ module { %5 = cir.load %0 : cir.ptr , i32 %6 = cir.load %1 : cir.ptr , i32 %7 = cir.cmp(gt, %5, %6) : i32, !cir.bool + %8 = cir.load %0 : cir.ptr , i32 + %9 = cir.load %1 : cir.ptr , i32 + %10 = cir.cmp(eq, %8, %9) : i32, !cir.bool + %11 = cir.load %0 : cir.ptr , i32 + %12 = cir.load %1 : cir.ptr , i32 + %13 = cir.cmp(lt, %11, %12) : i32, !cir.bool + %14 = cir.load %0 : cir.ptr , i32 + %15 = cir.load %1 : cir.ptr , i32 + %16 = cir.cmp(ge, %14, %15) : i32, !cir.bool + %17 = cir.load %0 : cir.ptr , i32 + %18 = cir.load %1 : cir.ptr , i32 + %19 = cir.cmp(ne, %17, %18) : i32, !cir.bool + %20 = cir.load %0 : cir.ptr , i32 + %21 = cir.load %1 : cir.ptr , i32 + %22 = cir.cmp(le, %20, %21) : i32, !cir.bool + %23 = cir.load %2 : cir.ptr , f32 + %24 = cir.load %3 : cir.ptr , f32 + %25 = cir.cmp(gt, %23, %24) : f32, !cir.bool + %26 = cir.load %2 : cir.ptr , f32 + %27 = cir.load %3 : cir.ptr , f32 + %28 = cir.cmp(eq, %26, %27) : f32, !cir.bool + %29 = cir.load %2 : cir.ptr , f32 + %30 = cir.load %3 : cir.ptr , f32 + %31 = cir.cmp(lt, %29, %30) : f32, !cir.bool + %32 = cir.load %2 : cir.ptr , f32 + %33 = cir.load %3 : cir.ptr , f32 + %34 = cir.cmp(ge, %32, %33) : f32, !cir.bool + %35 = cir.load %2 : cir.ptr , f32 + %36 = cir.load %3 : cir.ptr , f32 + %37 = cir.cmp(ne, %35, %36) : f32, !cir.bool + %38 = cir.load %2 : cir.ptr , f32 + %39 = cir.load %3 : cir.ptr , f32 + %40 = cir.cmp(le, %38, %39) : f32, !cir.bool cir.return } } // MLIR: = llvm.icmp "ugt" +// MLIR: = llvm.icmp "eq" +// MLIR: = llvm.icmp "ult" +// MLIR: = llvm.icmp "uge" +// MLIR: = llvm.icmp "ne" +// MLIR: = llvm.icmp "ule" +// MLIR: = llvm.fcmp "ugt" +// MLIR: = llvm.fcmp "ueq" +// MLIR: = llvm.fcmp "ult" +// MLIR: = llvm.fcmp "uge" +// MLIR: = llvm.fcmp "une" +// MLIR: = llvm.fcmp "ule" // LLVM: icmp ugt i32 +// LLVM: icmp eq i32 +// LLVM: icmp ult i32 +// LLVM: icmp uge i32 +// LLVM: icmp ne i32 +// LLVM: icmp ule i32 +// LLVM: fcmp ugt float +// LLVM: fcmp ueq float +// LLVM: fcmp ult float +// LLVM: fcmp uge float +// LLVM: fcmp une float +// LLVM: fcmp ule float From ae04343a40ca1c43fd87fb6970c12234d54fca94 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 9 Dec 2022 21:52:57 -0500 Subject: [PATCH 0726/2301] [CIR][Lowering] Support lowering cir.array to llvm.array --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 10 ++++------ clang/test/CIR/Lowering/array.cir | 20 +++++++++++++++++++ 2 files changed, 24 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/Lowering/array.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 17c01e38b121..5c68b8a3d890 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -492,12 +492,10 @@ static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { return mlir::LLVM::LLVMPointerType::get(type.getContext()); }); - // converter.addConversion( - // [&](mlir::IntegerType type) -> mlir::Type { return type; }); - // converter.addConversion( - // [&](mlir::FloatType type) -> mlir::Type { return type; }); - // converter.addConversion( - // [&](mlir::IndexType type) -> mlir::Type { return type; }); + converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { + auto ty = converter.convertType(type.getEltType()); + return mlir::LLVM::LLVMArrayType::get(ty, type.getSize()); + }); converter.addConversion([&](mlir::cir::BoolType type) -> mlir::Type { return mlir::IntegerType::get(type.getContext(), 8, mlir::IntegerType::Signless); diff --git a/clang/test/CIR/Lowering/array.cir b/clang/test/CIR/Lowering/array.cir new file mode 100644 index 000000000000..582b6d83b148 --- /dev/null +++ b/clang/test/CIR/Lowering/array.cir @@ -0,0 +1,20 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: func @foo() { +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.array<10 x i32> {alignment = 16 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: %1 = alloca [10 x i32], i64 1, align 16 +// LLVM-NEXT: ret void From a61fc2ee3714013ab2a8bc29f11b6ce1beabf393 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 9 Dec 2022 23:15:57 -0500 Subject: [PATCH 0727/2301] [CIR][Lowering] Support lowering of BinOp --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 179 +++++++++--------- clang/test/CIR/Lowering/binop-fp.cir | 68 +++++++ 2 files changed, 157 insertions(+), 90 deletions(-) create mode 100644 clang/test/CIR/Lowering/binop-fp.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5c68b8a3d890..ecd49ef216b8 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -230,95 +230,94 @@ class CIRFuncLowering : public mlir::OpConversionPattern { // } // }; -// class CIRBinOpLowering : public mlir::OpRewritePattern { -// public: -// using OpRewritePattern::OpRewritePattern; +class CIRBinOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; -// mlir::LogicalResult -// matchAndRewrite(mlir::cir::BinOp op, -// mlir::PatternRewriter &rewriter) const override { -// assert((op.getLhs().getType() == op.getRhs().getType()) && -// "inconsistent operands' types not supported yet"); -// mlir::Type type = op.getRhs().getType(); -// assert((type.isa() || type.isa()) && -// "operand type not supported yet"); + mlir::LogicalResult + matchAndRewrite(mlir::cir::BinOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert((op.getLhs().getType() == op.getRhs().getType()) && + "inconsistent operands' types not supported yet"); + mlir::Type type = op.getRhs().getType(); + assert((type.isa() || type.isa()) && + "operand type not supported yet"); -// switch (op.getKind()) { -// case mlir::cir::BinOpKind::Add: -// if (type.isa()) -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// else -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// break; -// case mlir::cir::BinOpKind::Sub: -// if (type.isa()) -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// else -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// break; -// case mlir::cir::BinOpKind::Mul: -// if (type.isa()) -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// else -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// break; -// case mlir::cir::BinOpKind::Div: -// if (type.isa()) { -// if (type.isSignlessInteger()) -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// else -// llvm_unreachable("integer type not supported in CIR yet"); -// } else -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// break; -// case mlir::cir::BinOpKind::Rem: -// if (type.isa()) { -// if (type.isSignlessInteger()) -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// else -// llvm_unreachable("integer type not supported in CIR yet"); -// } else -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// break; -// case mlir::cir::BinOpKind::And: -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// break; -// case mlir::cir::BinOpKind::Or: -// rewriter.replaceOpWithNewOp(op, op.getType(), -// op.getLhs(), -// op.getRhs()); -// break; -// case mlir::cir::BinOpKind::Xor: -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// break; -// case mlir::cir::BinOpKind::Shl: -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// break; -// case mlir::cir::BinOpKind::Shr: -// if (type.isSignlessInteger()) -// rewriter.replaceOpWithNewOp( -// op, op.getType(), op.getLhs(), op.getRhs()); -// else -// llvm_unreachable("integer type not supported in CIR yet"); -// break; -// } + switch (op.getKind()) { + case mlir::cir::BinOpKind::Add: + if (type.isa()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Sub: + if (type.isa()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Mul: + if (type.isa()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Div: + if (type.isa()) { + if (type.isSignlessInteger()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + llvm_unreachable("integer type not supported in CIR yet"); + } else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Rem: + if (type.isa()) { + if (type.isSignlessInteger()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + llvm_unreachable("integer type not supported in CIR yet"); + } else + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::And: + rewriter.replaceOpWithNewOp(op, op.getType(), + op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Or: + rewriter.replaceOpWithNewOp(op, op.getType(), + op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Xor: + rewriter.replaceOpWithNewOp(op, op.getType(), + op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Shl: + rewriter.replaceOpWithNewOp(op, op.getType(), + op.getLhs(), op.getRhs()); + break; + case mlir::cir::BinOpKind::Shr: + if (type.isSignlessInteger()) + rewriter.replaceOpWithNewOp( + op, op.getType(), op.getLhs(), op.getRhs()); + else + llvm_unreachable("integer type not supported in CIR yet"); + break; + } -// return mlir::LogicalResult::success(); -// } -// }; + return mlir::LogicalResult::success(); + } +}; class CIRCmpOpLowering : public mlir::OpConversionPattern { public: @@ -479,13 +478,13 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add(patterns.getContext()); - patterns.add( - converter, patterns.getContext()); + patterns.add(converter, patterns.getContext()); } static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { diff --git a/clang/test/CIR/Lowering/binop-fp.cir b/clang/test/CIR/Lowering/binop-fp.cir new file mode 100644 index 000000000000..144095118b9e --- /dev/null +++ b/clang/test/CIR/Lowering/binop-fp.cir @@ -0,0 +1,68 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + %0 = cir.alloca f32, cir.ptr , ["c"] {alignment = 4 : i64} + %1 = cir.alloca f32, cir.ptr , ["d"] {alignment = 4 : i64} + %2 = cir.alloca f32, cir.ptr , ["y", init] {alignment = 4 : i64} + %3 = cir.alloca f64, cir.ptr , ["e"] {alignment = 8 : i64} + %4 = cir.alloca f64, cir.ptr , ["f"] {alignment = 8 : i64} + %5 = cir.alloca f64, cir.ptr , ["g", init] {alignment = 8 : i64} + %6 = cir.load %0 : cir.ptr , f32 + %7 = cir.load %1 : cir.ptr , f32 + %8 = cir.binop(mul, %6, %7) : f32 + cir.store %8, %2 : f32, cir.ptr + %9 = cir.load %2 : cir.ptr , f32 + %10 = cir.load %1 : cir.ptr , f32 + %11 = cir.binop(div, %9, %10) : f32 + cir.store %11, %2 : f32, cir.ptr + %12 = cir.load %2 : cir.ptr , f32 + %13 = cir.load %1 : cir.ptr , f32 + %14 = cir.binop(add, %12, %13) : f32 + cir.store %14, %2 : f32, cir.ptr + %15 = cir.load %2 : cir.ptr , f32 + %16 = cir.load %1 : cir.ptr , f32 + %17 = cir.binop(sub, %15, %16) : f32 + cir.store %17, %2 : f32, cir.ptr + %18 = cir.load %3 : cir.ptr , f64 + %19 = cir.load %4 : cir.ptr , f64 + %20 = cir.binop(add, %18, %19) : f64 + cir.store %20, %5 : f64, cir.ptr + %21 = cir.load %3 : cir.ptr , f64 + %22 = cir.load %4 : cir.ptr , f64 + %23 = cir.binop(sub, %21, %22) : f64 + cir.store %23, %5 : f64, cir.ptr + %24 = cir.load %3 : cir.ptr , f64 + %25 = cir.load %4 : cir.ptr , f64 + %26 = cir.binop(mul, %24, %25) : f64 + cir.store %26, %5 : f64, cir.ptr + %27 = cir.load %3 : cir.ptr , f64 + %28 = cir.load %4 : cir.ptr , f64 + %29 = cir.binop(div, %27, %28) : f64 + cir.store %29, %5 : f64, cir.ptr + cir.return + } +} + +// MLIR: = llvm.alloca {{.*}} f32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR: = llvm.alloca {{.*}} f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR: = llvm.fmul {{.*}} : f32 +// MLIR: = llvm.fdiv +// MLIR: = llvm.fadd +// MLIR: = llvm.fsub +// MLIR: = llvm.fadd {{.*}} : f64 +// MLIR: = llvm.fsub +// MLIR: = llvm.fmul +// MLIR: = llvm.fdiv + +// LLVM: = alloca float, i64 +// LLVM: = alloca double, i64 +// LLVM: = fmul float +// LLVM: = fdiv float +// LLVM: = fadd float +// LLVM: = fsub float +// LLVM: = fadd double +// LLVM: = fsub double +// LLVM: = fmul double +// LLVM: = fdiv double From 230e264227297f9152afd882e045d117e5e3d09f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 12 Dec 2022 19:23:08 -0500 Subject: [PATCH 0728/2301] [CIR][Lowering] Support direct lowering of cir.br --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 24 ++++++------- clang/test/CIR/Lowering/goto.cir | 35 +++++++++++++++++++ 2 files changed, 47 insertions(+), 12 deletions(-) create mode 100644 clang/test/CIR/Lowering/goto.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ecd49ef216b8..d447cf965728 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -464,22 +464,22 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { } }; -// class CIRBrOpLowering : public mlir::OpRewritePattern { -// public: -// using OpRewritePattern::OpRewritePattern; +class CIRBrOpLowering : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; -// mlir::LogicalResult -// matchAndRewrite(mlir::cir::BrOp op, -// mlir::PatternRewriter &rewriter) const override { -// rewriter.replaceOpWithNewOp(op, op.getDest()); -// return mlir::LogicalResult::success(); -// } -// }; + mlir::LogicalResult + matchAndRewrite(mlir::cir::BrOp op, + mlir::PatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, op.getDestOperands(), + op.getDest()); + return mlir::LogicalResult::success(); + } +}; void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add(patterns.getContext()); patterns.add, ["b", init] {alignment = 4 : i64} + %1 = cir.cst(1 : i32) : i32 + cir.store %1, %0 : i32, cir.ptr + cir.br ^bb2 + ^bb1: // no predecessors + %2 = cir.load %0 : cir.ptr , i32 + %3 = cir.cst(1 : i32) : i32 + %4 = cir.binop(add, %2, %3) : i32 + cir.store %4, %0 : i32, cir.ptr + cir.br ^bb2 + ^bb2: // 2 preds: ^bb0, ^bb1 + %5 = cir.load %0 : cir.ptr , i32 + %6 = cir.cst(2 : i32) : i32 + %7 = cir.binop(add, %5, %6) : i32 + cir.store %7, %0 : i32, cir.ptr + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @foo +// MLIR: llvm.br ^bb1 +// MLIR: ^bb1: +// MLIR: return + +// LLVM: br label %[[Value:[0-9]+]], +// LLVM-EMPTY: +// LLVM-NEXT: [[Value]]: ; preds = +// LLVM: ret void From b2c65da5962892b42348efc8fe4ec9c922cae43e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 12 Dec 2022 19:23:33 -0500 Subject: [PATCH 0729/2301] [CIR] Add cir-lsp-server This is necessary thanks to CIR no longer living in MLIR and CIRDialect depending on MLIR. So just use the minimal shim that the mlir-lsp-server provides to define our own. --- clang/tools/CMakeLists.txt | 1 + clang/tools/cir-lsp-server/CMakeLists.txt | 35 +++++++++++++++++++ clang/tools/cir-lsp-server/cir-lsp-server.cpp | 20 +++++++++++ 3 files changed, 56 insertions(+) create mode 100644 clang/tools/cir-lsp-server/CMakeLists.txt create mode 100644 clang/tools/cir-lsp-server/cir-lsp-server.cpp diff --git a/clang/tools/CMakeLists.txt b/clang/tools/CMakeLists.txt index b9d2561dfdcb..8f6ed6041e3a 100644 --- a/clang/tools/CMakeLists.txt +++ b/clang/tools/CMakeLists.txt @@ -5,6 +5,7 @@ add_clang_subdirectory(driver) add_clang_subdirectory(apinotes-test) if(CLANG_ENABLE_CIR) add_clang_subdirectory(cir-tool) + add_clang_subdirectory(cir-lsp-server) endif() add_clang_subdirectory(clang-diff) add_clang_subdirectory(clang-format) diff --git a/clang/tools/cir-lsp-server/CMakeLists.txt b/clang/tools/cir-lsp-server/CMakeLists.txt new file mode 100644 index 000000000000..5154a08e7d47 --- /dev/null +++ b/clang/tools/cir-lsp-server/CMakeLists.txt @@ -0,0 +1,35 @@ +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) +get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS) + +include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) +include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) + +set(LIBS + ${dialect_libs} + ${conversion_libs} + ${test_libs} + clangCIR + clangCIRLoweringThroughMLIR + clangCIRLoweringDirectToLLVM + MLIRCIR + MLIRAffineAnalysis + MLIRAnalysis + MLIRDialect + MLIRLspServerLib + MLIRParser + MLIRPass + MLIRTransforms + MLIRTransformUtils + MLIRSupport + MLIRIR + ) + +add_mlir_tool(cir-lsp-server + cir-lsp-server.cpp + + DEPENDS + ${LIBS} +) + +target_link_libraries(cir-lsp-server PRIVATE ${LIBS}) +llvm_update_compile_flags(cir-lsp-server) diff --git a/clang/tools/cir-lsp-server/cir-lsp-server.cpp b/clang/tools/cir-lsp-server/cir-lsp-server.cpp new file mode 100644 index 000000000000..bd823c13a42e --- /dev/null +++ b/clang/tools/cir-lsp-server/cir-lsp-server.cpp @@ -0,0 +1,20 @@ +//===- cir-lsp-server.cpp - CIR Language Server ---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/IR/Dialect.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/InitAllDialects.h" +#include "mlir/Tools/mlir-lsp-server/MlirLspServerMain.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +int main(int argc, char **argv) { + mlir::DialectRegistry registry; + mlir::registerAllDialects(registry); + registry.insert(); + return failed(mlir::MlirLspServerMain(argc, argv, registry)); +} From af5410eae3e2f37faaad4de1525081489dc65cb4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 12 Dec 2022 19:50:48 -0500 Subject: [PATCH 0730/2301] [CIR][Lowering] Add test for direct lowering for binop-int.cir --- clang/test/CIR/Lowering/binop-int.cir | 75 +++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 clang/test/CIR/Lowering/binop-int.cir diff --git a/clang/test/CIR/Lowering/binop-int.cir b/clang/test/CIR/Lowering/binop-int.cir new file mode 100644 index 000000000000..9493228ec770 --- /dev/null +++ b/clang/test/CIR/Lowering/binop-int.cir @@ -0,0 +1,75 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} + %3 = cir.cst(2 : i32) : i32 cir.store %3, %0 : i32, cir.ptr + %4 = cir.cst(1 : i32) : i32 cir.store %4, %1 : i32, cir.ptr + %5 = cir.load %0 : cir.ptr , i32 + %6 = cir.load %1 : cir.ptr , i32 + %7 = cir.binop(mul, %5, %6) : i32 + cir.store %7, %2 : i32, cir.ptr + %8 = cir.load %2 : cir.ptr , i32 + %9 = cir.load %1 : cir.ptr , i32 + %10 = cir.binop(div, %8, %9) : i32 + cir.store %10, %2 : i32, cir.ptr + %11 = cir.load %2 : cir.ptr , i32 + %12 = cir.load %1 : cir.ptr , i32 + %13 = cir.binop(rem, %11, %12) : i32 + cir.store %13, %2 : i32, cir.ptr + %14 = cir.load %2 : cir.ptr , i32 + %15 = cir.load %1 : cir.ptr , i32 + %16 = cir.binop(add, %14, %15) : i32 + cir.store %16, %2 : i32, cir.ptr + %17 = cir.load %2 : cir.ptr , i32 + %18 = cir.load %1 : cir.ptr , i32 + %19 = cir.binop(sub, %17, %18) : i32 + cir.store %19, %2 : i32, cir.ptr + %20 = cir.load %2 : cir.ptr , i32 + %21 = cir.load %1 : cir.ptr , i32 + %22 = cir.binop(shr, %20, %21) : i32 + cir.store %22, %2 : i32, cir.ptr + %23 = cir.load %2 : cir.ptr , i32 + %24 = cir.load %1 : cir.ptr , i32 + %25 = cir.binop(shl, %23, %24) : i32 + cir.store %25, %2 : i32, cir.ptr + %26 = cir.load %2 : cir.ptr , i32 + %27 = cir.load %1 : cir.ptr , i32 + %28 = cir.binop(and, %26, %27) : i32 + cir.store %28, %2 : i32, cir.ptr + %29 = cir.load %2 : cir.ptr , i32 + %30 = cir.load %1 : cir.ptr , i32 + %31 = cir.binop(xor, %29, %30) : i32 + cir.store %31, %2 : i32, cir.ptr + %32 = cir.load %2 : cir.ptr , i32 + %33 = cir.load %1 : cir.ptr , i32 + %34 = cir.binop(or, %32, %33) : i32 + cir.store %34, %2 : i32, cir.ptr + cir.return + } +} + +// MLIR: = llvm.mul +// MLIR: = llvm.sdiv +// MLIR: = llvm.srem +// MLIR: = llvm.add +// MLIR: = llvm.sub +// MLIR: = llvm.ashr +// MLIR: = llvm.shl +// MLIR: = llvm.and +// MLIR: = llvm.xor +// MLIR: = llvm.or + +// LLVM: = mul i32 +// LLVM: = sdiv i32 +// LLVM: = srem i32 +// LLVM: = add i32 +// LLVM: = sub i32 +// LLVM: = ashr i32 +// LLVM: = shl i32 +// LLVM: = and i32 +// LLVM: = xor i32 +// LLVM: = or i32 From d6daacc25fa0b7488203703c16df07124898f25e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 12 Dec 2022 20:02:26 -0500 Subject: [PATCH 0731/2301] [CIR][Lowering] Support lowering unaryops Only curiosity here is that llvm lowers to nsw and MLIR doesn't seem to support it. We'll have to add this support to their backend I guess. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 91 +++++++++---------- clang/test/CIR/Lowering/unary-inc-dec.cir | 29 ++++++ 2 files changed, 74 insertions(+), 46 deletions(-) create mode 100644 clang/test/CIR/Lowering/unary-inc-dec.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d447cf965728..9343882c5a13 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -186,49 +186,48 @@ class CIRFuncLowering : public mlir::OpConversionPattern { } }; -// class CIRUnaryOpLowering : public mlir::OpRewritePattern -// { public: -// using OpRewritePattern::OpRewritePattern; +class CIRUnaryOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; -// mlir::LogicalResult -// matchAndRewrite(mlir::cir::UnaryOp op, -// mlir::PatternRewriter &rewriter) const override { -// mlir::Type type = op.getInput().getType(); -// assert(type.isa() && "operand type not supported -// yet"); - -// switch (op.getKind()) { -// case mlir::cir::UnaryOpKind::Inc: { -// auto One = rewriter.create( -// op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); -// rewriter.replaceOpWithNewOp(op, op.getType(), -// op.getInput(), One); -// break; -// } -// case mlir::cir::UnaryOpKind::Dec: { -// auto One = rewriter.create( -// op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); -// rewriter.replaceOpWithNewOp(op, op.getType(), -// op.getInput(), One); -// break; -// } -// case mlir::cir::UnaryOpKind::Plus: { -// rewriter.replaceOp(op, op.getInput()); -// break; -// } -// case mlir::cir::UnaryOpKind::Minus: { -// auto Zero = rewriter.create( -// op.getLoc(), type, mlir::IntegerAttr::get(type, 0)); -// rewriter.replaceOpWithNewOp(op, op.getType(), -// Zero, -// op.getInput()); -// break; -// } -// } + mlir::LogicalResult + matchAndRewrite(mlir::cir::UnaryOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::Type type = op.getInput().getType(); + assert(type.isa() && "operand type not supported yet"); -// return mlir::LogicalResult::success(); -// } -// }; + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Inc: { + auto One = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); + rewriter.replaceOpWithNewOp(op, op.getType(), + op.getInput(), One); + break; + } + case mlir::cir::UnaryOpKind::Dec: { + auto One = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); + rewriter.replaceOpWithNewOp(op, op.getType(), + op.getInput(), One); + break; + } + case mlir::cir::UnaryOpKind::Plus: { + rewriter.replaceOp(op, op.getInput()); + break; + } + case mlir::cir::UnaryOpKind::Minus: { + auto Zero = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, 0)); + rewriter.replaceOpWithNewOp(op, op.getType(), Zero, + op.getInput()); + break; + } + } + + return mlir::LogicalResult::success(); + } +}; class CIRBinOpLowering : public mlir::OpConversionPattern { public: @@ -479,12 +478,12 @@ class CIRBrOpLowering : public mlir::OpRewritePattern { void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add(patterns.getContext()); - patterns.add(converter, patterns.getContext()); + patterns.add(converter, + patterns.getContext()); } static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir new file mode 100644 index 000000000000..559ba71d7587 --- /dev/null +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -0,0 +1,29 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.cst(2 : i32) : i32 + cir.store %2, %0 : i32, cir.ptr + cir.store %2, %1 : i32, cir.ptr + + %3 = cir.load %0 : cir.ptr , i32 + %4 = cir.unary(inc, %3) : i32, i32 + cir.store %4, %0 : i32, cir.ptr + + %5 = cir.load %1 : cir.ptr , i32 + %6 = cir.unary(dec, %5) : i32, i32 + cir.store %6, %1 : i32, cir.ptr + cir.return + } +} + +// MLIR: = llvm.mlir.constant(1 : i32) +// MLIR: = llvm.add +// MLIR: = llvm.mlir.constant(1 : i32) +// MLIR: = llvm.sub + +// LLVM: = add i32 %[[#]], 1 +// LLVM: = sub i32 %[[#]], 1 From e78684c3aba4c3ae7f6bcd3e62f3fa9e42eb5d96 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 12 Dec 2022 20:07:04 -0500 Subject: [PATCH 0732/2301] [CIR][Lowering] Add test for lowering unary plus/sub --- clang/test/CIR/Lowering/unary-plus-minus.cir | 29 ++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 clang/test/CIR/Lowering/unary-plus-minus.cir diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir new file mode 100644 index 000000000000..ea150950be21 --- /dev/null +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -0,0 +1,29 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.cst(2 : i32) : i32 + cir.store %2, %0 : i32, cir.ptr + cir.store %2, %1 : i32, cir.ptr + + %3 = cir.load %0 : cir.ptr , i32 + %4 = cir.unary(plus, %3) : i32, i32 + cir.store %4, %0 : i32, cir.ptr + + %5 = cir.load %1 : cir.ptr , i32 + %6 = cir.unary(minus, %5) : i32, i32 + cir.store %6, %1 : i32, cir.ptr + cir.return + } +} + +// MLIR: %[[#INPUT_PLUS:]] = llvm.load +// MLIR: llvm.store %[[#INPUT_PLUS]] +// MLIR: %[[#INPUT_MINUS:]] = llvm.load +// MLIR: %[[ZERO:[a-z0-9_]+]] = llvm.mlir.constant(0 : i32) +// MLIR: llvm.sub %[[ZERO]], %[[#INPUT_MINUS]] + +// LLVM: = sub i32 0, %[[#]] From dfbf113b9909ce4ecff33def085f89bfcd27f6fe Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 12 Dec 2022 20:23:35 -0500 Subject: [PATCH 0733/2301] [CIR][Lowering] Support lowering cir.call op directly --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 36 +++++++++---------- clang/test/CIR/Lowering/call.cir | 28 +++++++++++++++ 2 files changed, 45 insertions(+), 19 deletions(-) create mode 100644 clang/test/CIR/Lowering/call.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 9343882c5a13..7badab99d124 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -65,19 +65,18 @@ struct ConvertCIRToLLVMPass virtual StringRef getArgument() const override { return "cir-to-llvm"; } }; -// class CIRCallLowering : public mlir::OpRewritePattern { -// public: -// using OpRewritePattern::OpRewritePattern; - -// mlir::LogicalResult -// matchAndRewrite(mlir::cir::CallOp op, -// mlir::PatternRewriter &rewriter) const override { -// rewriter.replaceOpWithNewOp( -// op, mlir::SymbolRefAttr::get(op), op.getResultTypes(), -// op.getArgOperands()); -// return mlir::LogicalResult::success(); -// } -// }; +class CIRCallLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CallOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, op.getResultTypes(), op.getCalleeAttr(), op.getArgOperands()); + return mlir::LogicalResult::success(); + } +}; class CIRAllocaLowering : public mlir::OpConversionPattern { @@ -478,12 +477,11 @@ class CIRBrOpLowering : public mlir::OpRewritePattern { void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add(patterns.getContext()); - patterns.add(converter, - patterns.getContext()); + patterns.add(patterns.getContext()); + patterns.add( + converter, patterns.getContext()); } static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { diff --git a/clang/test/CIR/Lowering/call.cir b/clang/test/CIR/Lowering/call.cir new file mode 100644 index 000000000000..1de50ed9ff23 --- /dev/null +++ b/clang/test/CIR/Lowering/call.cir @@ -0,0 +1,28 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @a() { + cir.return + } + cir.func @d() { + cir.call @a() : () -> () + cir.return + } +} + +// MLIR: llvm.func @a() { +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: llvm.func @d() { +// MLIR-NEXT: llvm.call @a() : () -> () +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } + +// LLVM: define void @a() +// LLVM-NEXT: ret void +// LLVM-NEXT: } +// LLVM: define void @d() +// LLVM-NEXT: call void @a() +// LLVM-NEXT: ret void +// LLVM-NEXT: } From 260b76e93ddd7ef75ed112823218856572af429a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 9 Dec 2022 15:28:45 -0800 Subject: [PATCH 0734/2301] [CIR][CIRGen] Improve naming for temporary allocas --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 6 +++--- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 15 +++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 20 +++++++++++++------- clang/test/CIR/CodeGen/assign-operator.cpp | 2 +- 5 files changed, 34 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 535516b02511..a0bd46e2cbfb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -530,7 +530,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, bool DestIsVolatile = ReturnValue.isVolatile(); if (!DestPtr.isValid()) { - DestPtr = CreateMemTemp(RetTy, callLoc, "agg.tmp"); + DestPtr = CreateMemTemp(RetTy, callLoc, getCounterAggTmpAsString()); DestIsVolatile = false; } @@ -621,8 +621,8 @@ RValue CIRGenFunction::buildAnyExprToTemp(const Expr *E) { AggValueSlot AggSlot = AggValueSlot::ignored(); if (hasAggregateEvaluationKind(E->getType())) - AggSlot = - CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), "agg.tmp"); + AggSlot = CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), + getCounterAggTmpAsString()); return buildAnyExpr(E, AggSlot); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 1912117ebe5c..a5a9a2361695 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1307,8 +1307,8 @@ static Address createReferenceTemporary(CIRGenFunction &CGF, (Ty->isArrayType() || Ty->isRecordType()) && CGF.CGM.isTypeConstant(Ty, true)) assert(0 && "NYI"); - return CGF.CreateMemTemp(Ty, CGF.getLoc(M->getSourceRange()), "ref.tmp", - Alloca); + return CGF.CreateMemTemp(Ty, CGF.getLoc(M->getSourceRange()), + CGF.getCounterRefTmpAsString(), Alloca); } case SD_Thread: case SD_Static: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 3f4a95989ef6..5a5f4bec7cc7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1059,6 +1059,21 @@ clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl GD, return ResTy; } +static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) { + SmallString<256> Buffer; + llvm::raw_svector_ostream Out(Buffer); + Out << name << cnt; + return std::string(Out.str()); +} + +std::string CIRGenFunction::getCounterAggTmpAsString() { + return getVersionedTmpName("agg.tmp", CounterAggTmp++); +} + +std::string CIRGenFunction::getCounterRefTmpAsString() { + return getVersionedTmpName("ref.tmp", CounterRefTmp++); +} + CIRGenFunction::CIRGenFPOptionsRAII::CIRGenFPOptionsRAII(CIRGenFunction &CGF, const clang::Expr *E) : CGF(CGF) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index cbbacf1224f4..9c16f7911298 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -465,10 +465,10 @@ class CIRGenFunction { bool isCoroutine() const { return CurCoro.Data != nullptr; } - /// CurGD - The GlobalDecl for the current function being compiled. + /// The GlobalDecl for the current function being compiled. clang::GlobalDecl CurGD; - /// ReturnValue - The temporary alloca to hold the return value. This is + /// The temporary alloca to hold the return value. This is /// invalid iff the function has no return value. Address ReturnValue = Address::invalid(); @@ -479,7 +479,7 @@ class CIRGenFunction { std::optional FnRetCIRTy; std::optional FnRetAlloca; - /// CXXThisDecl - When generating code for a C++ member function, this will + /// When generating code for a C++ member function, this will /// hold the implicit 'this' declaration. clang::ImplicitParamDecl *CXXABIThisDecl = nullptr; mlir::Operation *CXXABIThisValue = nullptr; @@ -491,9 +491,9 @@ class CIRGenFunction { /// expression. Address CXXDefaultInitExprThis = Address::invalid(); - // CurFuncDecl - Holds the Decl for the current outermost non-closure context + // Holds the Decl for the current outermost non-closure context const clang::Decl *CurFuncDecl = nullptr; - /// CurCodeDecl - This is the inner-most code context, which includes blocks. + /// This is the inner-most code context, which includes blocks. const clang::Decl *CurCodeDecl; const CIRGenFunctionInfo *CurFnInfo; clang::QualType FnRetTy; @@ -546,11 +546,11 @@ class CIRGenFunction { bool ShouldEmitLifetimeMarkers; using DeclMapTy = llvm::DenseMap; - /// LocalDeclMap - This keeps track of the CIR allocas or globals for local C + /// This keeps track of the CIR allocas or globals for local C /// delcs. DeclMapTy LocalDeclMap; - /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid + /// Whether llvm.stacksave has been called. Used to avoid /// calling llvm.stacksave for multiple VLAs in the same scope. /// TODO: Translate to MLIR bool DidCallStackSave = false; @@ -566,6 +566,12 @@ class CIRGenFunction { /// should emit cleanups. bool CurFuncIsThunk = false; + /// Hold counters for incrementally naming temporaries + unsigned CounterRefTmp = 0; + unsigned CounterAggTmp = 0; + std::string getCounterRefTmpAsString(); + std::string getCounterAggTmpAsString(); + /// Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(clang::QualType T); diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index f129d78a8cc8..cb102c193b88 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -72,7 +72,7 @@ int main() { // CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () // CHECK: cir.scope { // CHECK: %3 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s"] {alignment = 8 : i64} -// CHECK: %4 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["ref.tmp"] {alignment = 8 : i64} +// CHECK: %4 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} // CHECK: %5 = cir.get_global @".str" : cir.ptr > // CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr // CHECK: cir.call @_ZN6StringC2EPKc(%3, %6) : (!cir.ptr, !cir.ptr) -> () From d3e8b1b9981dac9fa8daced8a3a6f7b652f554eb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 12 Dec 2022 20:31:59 -0800 Subject: [PATCH 0735/2301] [CIR][CIRGen] Fix simple aggregate initialization and add tests --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 26 ++++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 17 +++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 12 +++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 + clang/lib/CIR/CodeGen/CIRGenModule.cpp | 9 ++++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 5 +++++ clang/test/CIR/CodeGen/agg-init.cpp | 18 ++++++++++++++++ 8 files changed, 87 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/agg-init.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index a5a9a2361695..aa6556a57f86 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -575,8 +575,8 @@ RValue CIRGenFunction::buildAnyExpr(const Expr *E, AggValueSlot aggSlot, assert(0 && "not implemented"); case TEK_Aggregate: { if (!ignoreResult && aggSlot.isIgnored()) - aggSlot = - CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), "agg-temp"); + aggSlot = CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), + getCounterAggTmpAsString()); buildAggExpr(E, aggSlot); return aggSlot.asRValue(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index d42e29b6e899..db51c006ba12 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -111,7 +111,7 @@ class AggExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } void VisitChooseExpr(const ChooseExpr *E) { llvm_unreachable("NYI"); } - void VisitInitListExpr(InitListExpr *E) { llvm_unreachable("NYI"); } + void VisitInitListExpr(InitListExpr *E); void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, llvm::Value *outerBegin = nullptr) { llvm_unreachable("NYI"); @@ -209,7 +209,7 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { void AggExprEmitter::VisitCastExpr(CastExpr *E) { if (const auto *ECE = dyn_cast(E)) - assert(0 && "NYI"); + CGF.CGM.buildExplicitCastExprType(ECE, &CGF); switch (E->getCastKind()) { case CK_NoOp: @@ -338,6 +338,28 @@ void AggExprEmitter::withReturnValueSlot( } } +void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { + // If the initializer list is empty ({}), and there are + // no explicitly initialized elements. + if (E->getNumInits() == 0) + return; + + // TODO(cir): use something like CGF.ErrorUnsupported + if (E->hadArrayRangeDesignator()) + llvm_unreachable("GNU array range designator extension"); + + if (E->isTransparent()) + return Visit(E->getInit(0)); + + // Handle initialization of an array. + if (E->getType()->isArrayType()) { + llvm_unreachable("NYI"); + } + + assert(E->getType()->isRecordType() && "Only support structs/unions here!"); + llvm_unreachable("NYI"); +} + //===----------------------------------------------------------------------===// // Helpers and dispatcher //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index a4896eb1e8ee..26abb55c45cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -258,7 +258,22 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, assert(!Dest.isIgnored() && "Must have a destination!"); const auto *CD = E->getConstructor(); - assert(!E->requiresZeroInitialization() && "zero initialization NYI"); + // If we require zero initialization before (or instead of) calling the + // constructor, as can be the case with a non-user-provided default + // constructor, emit the zero initialization now, unless destination is + // already zeroed. + if (E->requiresZeroInitialization() && !Dest.isZeroed()) { + switch (E->getConstructionKind()) { + case CXXConstructionKind::Delegating: + case CXXConstructionKind::Complete: + buildNullInitialization(Dest.getAddress(), E->getType()); + break; + case CXXConstructionKind::VirtualBase: + case CXXConstructionKind::NonVirtualBase: + llvm_unreachable("NYI"); + break; + } + } // If this is a call to a trivial default constructor: // In LLVM: do nothing. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 5a5f4bec7cc7..b51233e88ee3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1074,6 +1074,18 @@ std::string CIRGenFunction::getCounterRefTmpAsString() { return getVersionedTmpName("ref.tmp", CounterRefTmp++); } +void CIRGenFunction::buildNullInitialization(Address DestPtr, QualType Ty) { + // Ignore empty classes in C++. + if (getLangOpts().CPlusPlus) { + if (const RecordType *RT = Ty->getAs()) { + if (cast(RT->getDecl())->isEmpty()) + return; + } + } + + llvm_unreachable("NYI"); +} + CIRGenFunction::CIRGenFPOptionsRAII::CIRGenFPOptionsRAII(CIRGenFunction &CGF, const clang::Expr *E) : CGF(CGF) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9c16f7911298..0760d0df5186 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -658,6 +658,7 @@ class CIRGenFunction { RValue buildCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue); + void buildNullInitialization(Address DestPtr, QualType Ty); mlir::Value buildCXXNewExpr(const CXXNewExpr *E); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 427cd867958c..3fc69ded00ff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1947,3 +1947,12 @@ void CIRGenModule::applyReplacements() { } } } + +void CIRGenModule::buildExplicitCastExprType(const ExplicitCastExpr *E, + CIRGenFunction *CGF) { + // Bind VLAs in the cast type. + if (CGF && E->getType()->isVariablyModifiedType()) + llvm_unreachable("NYI"); + + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 337712e52e08..c48074fdf3e8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -403,6 +403,11 @@ class CIRGenModule { mlir::FunctionType Ty, const clang::FunctionDecl *FD); + /// Emit type info if type of an expression is a variably modified + /// type. Also emit proper debug info for cast types. + void buildExplicitCastExprType(const ExplicitCastExpr *E, + CIRGenFunction *CGF = nullptr); + private: // An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector MangledDeclNames; diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp new file mode 100644 index 000000000000..674f5689cf28 --- /dev/null +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Zero { + void yolo(); +}; + +void f() { + Zero z0 = Zero(); + // {} no element init. + Zero z1 = Zero{}; +} + +// CHECK: cir.func @_Z1fv() { +// CHECK: %0 = cir.alloca !ty_22struct2EZero22, cir.ptr , ["z0"] +// CHECK: %1 = cir.alloca !ty_22struct2EZero22, cir.ptr , ["z1"] +// CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.return From fa0198c26423fde1ee84ef39527e9c679cd4177c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 12 Dec 2022 22:16:02 -0800 Subject: [PATCH 0736/2301] [CIR][CIRGen] Properly tag initialization in some missing places --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 12 +++++++++++- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 7 ------- clang/test/CIR/CodeGen/agg-init.cpp | 2 +- clang/test/CIR/CodeGen/assign-operator.cpp | 4 ++-- clang/test/CIR/CodeGen/ctor-alias.cpp | 2 +- clang/test/CIR/CodeGen/ctor.cpp | 2 +- 6 files changed, 16 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 90a74fdfee00..bd16d5aff509 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -168,7 +168,17 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { if (!constant) { initializeWhatIsTechnicallyUninitialized(Loc); LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); - return buildExprAsInit(Init, &D, lv); + buildExprAsInit(Init, &D, lv); + // In case lv has uses it means we indeed initialized something + // out of it while trying to build the expression, mark it as such. + auto addr = lv.getAddress().getPointer(); + assert(addr && "Should have an address"); + auto allocaOp = dyn_cast_or_null(addr.getDefiningOp()); + assert(allocaOp && "Address should come straight out of the alloca"); + + if (!allocaOp.use_empty()) + allocaOp.setInitAttr(mlir::UnitAttr::get(builder.getContext())); + return; } if (!emission.IsConstantAggregate) { diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 02d46315cf34..51296d36d774 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -803,13 +803,6 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { default: llvm_unreachable("NYI"); } - - // If other styles of initialization gets added, required to add support - // here. - auto varDecl = allocaOp.getAst(); - assert(!varDecl || - (!allocaOp.getInit() || !varDecl->getAstDecl()->isDirectInit()) && - "not implemented"); } void LifetimeCheckPass::checkStore(StoreOp storeOp) { diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index 674f5689cf28..c21d181f8c9b 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -12,7 +12,7 @@ void f() { } // CHECK: cir.func @_Z1fv() { -// CHECK: %0 = cir.alloca !ty_22struct2EZero22, cir.ptr , ["z0"] +// CHECK: %0 = cir.alloca !ty_22struct2EZero22, cir.ptr , ["z0", init] // CHECK: %1 = cir.alloca !ty_22struct2EZero22, cir.ptr , ["z1"] // CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index cb102c193b88..702ec45eb795 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -68,10 +68,10 @@ int main() { // CHECK: cir.func @main() -> i32 { // CHECK: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["sv"] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["sv", init] {alignment = 8 : i64} // CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () // CHECK: cir.scope { -// CHECK: %3 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s"] {alignment = 8 : i64} +// CHECK: %3 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s", init] {alignment = 8 : i64} // CHECK: %4 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} // CHECK: %5 = cir.get_global @".str" : cir.ptr > // CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp index a70da7f5fecb..89549cceeded 100644 --- a/clang/test/CIR/CodeGen/ctor-alias.cpp +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -19,7 +19,7 @@ void t() { // CHECK-NOT: cir.fun @_ZN11DummyStringC1EPKc // CHECK: cir.func @_Z1tv -// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EDummyString22, cir.ptr , ["s4"] {alignment = 1 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EDummyString22, cir.ptr , ["s4", init] {alignment = 1 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr // CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index 672af732430e..68b4a5136378 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -27,6 +27,6 @@ void baz() { // CHECK-NEXT: cir.return // CHECK: cir.func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EStruk22, cir.ptr , ["s"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EStruk22, cir.ptr , ["s", init] {alignment = 4 : i64} // CHECK-NEXT: cir.call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () // CHECK-NEXT: cir.return From 7f8e8e55177c2a3134d78e05bf94550caa3684bf Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 14 Dec 2022 00:49:52 -0500 Subject: [PATCH 0737/2301] [CIR][CodeGen] Add some unreachables to unary inc/dec gen Also clean up some differences between codegen and cirgen for future readability sake. --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.h | 2 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 91 +++++++++++-------- 2 files changed, 56 insertions(+), 37 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h index be0340b3b827..d0f6e1dafa4d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -27,7 +27,7 @@ namespace clang { class FunctionDecl; class VarDecl; class RecordDecl; -} +} // namespace clang namespace mlir { namespace cir { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 5bd5badc83af..58aba4103621 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -45,6 +45,8 @@ class ScalarExprEmitter : public StmtVisitor { return I; } + LValue buildLValue(const Expr *E) { return CGF.buildLValue(E); } + //===--------------------------------------------------------------------===// // Visitor Methods //===--------------------------------------------------------------------===// @@ -221,29 +223,35 @@ class ScalarExprEmitter : public StmtVisitor { // Unary Operators. mlir::Value VisitUnaryPostDec(const UnaryOperator *E) { - return buildScalarPrePostIncDec(E); + LValue LV = buildLValue(E->getSubExpr()); + return buildScalarPrePostIncDec(E, LV, false, false); } mlir::Value VisitUnaryPostInc(const UnaryOperator *E) { - return buildScalarPrePostIncDec(E); + LValue LV = buildLValue(E->getSubExpr()); + return buildScalarPrePostIncDec(E, LV, true, false); } mlir::Value VisitUnaryPreDec(const UnaryOperator *E) { - return buildScalarPrePostIncDec(E); + LValue LV = buildLValue(E->getSubExpr()); + return buildScalarPrePostIncDec(E, LV, false, true); } mlir::Value VisitUnaryPreInc(const UnaryOperator *E) { - return buildScalarPrePostIncDec(E); + LValue LV = buildLValue(E->getSubExpr()); + return buildScalarPrePostIncDec(E, LV, true, true); } - mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E) { + mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre) { + assert(!CGF.getLangOpts().OpenMP && "Not implemented"); QualType type = E->getSubExpr()->getType(); - auto LV = CGF.buildLValue(E->getSubExpr()); - mlir::Value Value; - mlir::Value Input; + bool atomicPHI = false; + mlir::Value value; + mlir::Value input; if (const AtomicType *atomicTy = type->getAs()) { - assert(0 && "no atomics inc/dec yet"); + llvm_unreachable("no atomics inc/dec yet"); } else { - Value = buildLoadOfLValue(LV, E->getExprLoc()); - Input = Value; + value = buildLoadOfLValue(LV, E->getExprLoc()); + input = value; } // NOTE: When possible, more frequent cases are handled first. @@ -255,76 +263,87 @@ class ScalarExprEmitter : public StmtVisitor { // -> bool = ((int)bool + 1 != 0) // An interesting aspect of this is that increment is always true. // Decrement does not have this property. - if (E->isIncrementOp() && type->isBooleanType()) { - assert(0 && "inc simplification for booleans not implemented yet"); + if (isInc && type->isBooleanType()) { + llvm_unreachable("inc simplification for booleans not implemented yet"); // NOTE: We likely want the code below, but loading/store booleans need to // work first. See CIRGenFunction::buildFromMemory(). - Value = Builder.create(CGF.getLoc(E->getExprLoc()), + value = Builder.create(CGF.getLoc(E->getExprLoc()), CGF.getCIRType(type), Builder.getBoolAttr(true)); } else if (type->isIntegerType()) { + // QualType promotedType; bool canPerformLossyDemotionCheck = false; if (CGF.getContext().isPromotableIntegerType(type)) { canPerformLossyDemotionCheck = true; - assert(0 && "no promotable integer inc/dec yet"); + llvm_unreachable("no promotable integer inc/dec yet"); } - if (CGF.SanOpts.hasOneOf( SanitizerKind::ImplicitIntegerArithmeticValueChange) && canPerformLossyDemotionCheck) { - assert(0 && - "perform lossy demotion case for inc/dec not implemented yet"); + llvm_unreachable( + "perform lossy demotion case for inc/dec not implemented yet"); } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { - Value = buildIncDecConsiderOverflowBehavior(E, Value); + value = buildIncDecConsiderOverflowBehavior(E, value, isInc); } else if (E->canOverflow() && type->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { - assert(0 && - "unsigned integer overflow sanitized inc/dec not implemented"); + llvm_unreachable( + "unsigned integer overflow sanitized inc/dec not implemented"); } else { auto Kind = E->isIncrementOp() ? mlir::cir::UnaryOpKind::Inc : mlir::cir::UnaryOpKind::Dec; - Value = buildUnaryOp(E, Kind, Input); + // NOTE(CIR): clang calls CreateAdd but folds this to a unary op + value = buildUnaryOp(E, Kind, input); } } else if (const PointerType *ptr = type->getAs()) { - assert(0 && "no pointer inc/dec yet"); + llvm_unreachable("no pointer inc/dec yet"); } else if (type->isVectorType()) { - assert(0 && "no vector inc/dec yet"); + llvm_unreachable("no vector inc/dec yet"); } else if (type->isRealFloatingType()) { - assert(0 && "no float inc/dec yet"); + llvm_unreachable("no float inc/dec yet"); } else if (type->isFixedPointType()) { - assert(0 && "no fixed point inc/dec yet"); + llvm_unreachable("no fixed point inc/dec yet"); } else { assert(type->castAs()); - assert(0 && "no objc pointer type inc/dec yet"); + llvm_unreachable("no objc pointer type inc/dec yet"); + } + + if (atomicPHI) { + llvm_unreachable("NYI"); } CIRGenFunction::SourceLocRAIIObject sourceloc{ CGF, CGF.getLoc(E->getSourceRange())}; + // Store the updated result through the lvalue if (LV.isBitField()) - assert(0 && "no bitfield inc/dec yet"); + llvm_unreachable("no bitfield inc/dec yet"); else - CGF.buildStoreThroughLValue(RValue::get(Value), LV); + CGF.buildStoreThroughLValue(RValue::get(value), LV); - return E->isPrefix() ? Value : Input; + // If this is a postinc, return the value read from memory, otherwise use + // the updated value. + return isPre ? value : input; } mlir::Value buildIncDecConsiderOverflowBehavior(const UnaryOperator *E, - mlir::Value V) { + mlir::Value InVal, + bool IsInc) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: { auto Kind = E->isIncrementOp() ? mlir::cir::UnaryOpKind::Inc : mlir::cir::UnaryOpKind::Dec; - return buildUnaryOp(E, Kind, V); - break; + return buildUnaryOp(E, Kind, InVal); } case LangOptions::SOB_Undefined: - assert(0 && - "inc/dec overflow behavior SOB_Undefined not implemented yet"); + // if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) + // return Builder.CreateNSWAdd(InVal, Amount, Name); + llvm_unreachable( + "inc/dec overflow behavior SOB_Undefined not implemented yet"); break; case LangOptions::SOB_Trapping: - assert(0 && "inc/dec overflow behavior SOB_Trapping not implemented yet"); + llvm_unreachable( + "inc/dec overflow behavior SOB_Trapping not implemented yet"); break; } llvm_unreachable("Unknown SignedOverflowBehaviorTy"); From a69d3c9058a7fa833f351cdd61e5902c45139eb4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 14 Dec 2022 00:52:15 -0500 Subject: [PATCH 0738/2301] [CIR][NFC] Mark a few variables [[maybe_unused]] to shush warnings --- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 33293fdb8fa4..81ae157d6953 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -264,7 +264,7 @@ static LValueOrRValue buildSuspendExpression( auto UnbindOnExit = llvm::make_scope_exit([&] { Binder.unbind(CGF); }); auto &builder = CGF.getBuilder(); - LLVM_ATTRIBUTE_UNUSED auto awaitOp = builder.create( + [[maybe_unused]] auto awaitOp = builder.create( CGF.getLoc(S.getSourceRange()), /*readyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index db51c006ba12..bb12afdc0b6d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -351,6 +351,11 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { if (E->isTransparent()) return Visit(E->getInit(0)); + AggValueSlot Dest = EnsureSlot(E->getType()); + + [[maybe_unused]] LValue DestLV = + CGF.makeAddrLValue(Dest.getAddress(), E->getType()); + // Handle initialization of an array. if (E->getType()->isArrayType()) { llvm_unreachable("NYI"); From a63482dede686ec10f24a3e826d51e49798973a5 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 14 Dec 2022 02:43:56 -0500 Subject: [PATCH 0739/2301] [CIR] Add a SignedOverflowBehavior attribute This is set at a global level in clang and thus reasonably corresponds to a module attribute. We will delay usage of this property until lowering. --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.h | 2 + .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 9 ++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 12 +++ .../clang/CIR/Dialect/IR/CIROpsEnums.h | 1 - clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 98 +++++++++++++------ 5 files changed, 89 insertions(+), 33 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h index d0f6e1dafa4d..4f4b0232689d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -19,6 +19,8 @@ #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" + //===----------------------------------------------------------------------===// // CIR Dialect Attrs //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index ed31519c05d1..92d41ad8d9bd 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -14,6 +14,7 @@ #define MLIR_CIR_DIALECT_CIR_ATTRS include "mlir/IR/BuiltinAttributeInterfaces.td" +include "mlir/IR/EnumAttr.td" include "clang/CIR/Dialect/IR/CIRDialect.td" //===----------------------------------------------------------------------===// @@ -91,6 +92,14 @@ def CstArrayAttr : CIR_Attr<"CstArray", "cst_array", [TypedAttrInterface]> { let genVerifyDecl = 1; } +def SignedOverflowBehaviorAttr : AttrDef { + let mnemonic = "signed_overflow_behavior"; + let parameters = (ins + "sob::SignedOverflowBehavior":$behavior + ); + let hasCustomAssemblyFormat = 1; + } + //===----------------------------------------------------------------------===// // AST Wrappers //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b1936b973940..7b2d07685a17 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1014,6 +1014,18 @@ def GlobalLinkageKind : I32EnumAttr< let cppNamespace = "::mlir::cir"; } +def SOB_Undefined : I32EnumAttrCase<"undefined", 1>; +def SOB_Defined : I32EnumAttrCase<"defined", 2>; +def SOB_Trapping : I32EnumAttrCase<"trapping", 3>; + +def SignedOverflowBehaviorEnum : I32EnumAttr< + "SignedOverflowBehavior", + "the behavior for signed overflow", + [SOB_Undefined, SOB_Defined, SOB_Trapping]> { + let cppNamespace = "::mlir::cir::sob"; +} + + def GlobalOp : CIR_Op<"global", [Symbol]> { let summary = "Declares or defines a global variable"; let description = [{ diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h index 7adfee6b482b..889cde696e91 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h +++ b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h @@ -15,7 +15,6 @@ #define MLIR_DIALECT_CIR_CIROPSENUMS_H_ #include "mlir/IR/BuiltinAttributes.h" - #include "clang/CIR/Dialect/IR/CIROpsEnums.h.inc" namespace mlir { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 3006eb29fbc5..32ffff9d8f63 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -69,7 +69,7 @@ void cir::CIRDialect::initialize() { // Parses one of the keywords provided in the list `keywords` and returns the // position of the parsed keyword in the list. If none of the keywords from the // list is parsed, returns -1. -static int parseOptionalKeywordAlternative(OpAsmParser &parser, +static int parseOptionalKeywordAlternative(AsmParser &parser, ArrayRef keywords) { for (auto en : llvm::enumerate(keywords)) { if (succeeded(parser.parseOptionalKeyword(en.value()))) @@ -86,8 +86,16 @@ template struct EnumTraits {}; static StringRef stringify(Ty value) { return stringify##Ty(value); } \ static unsigned getMaxEnumVal() { return getMaxEnumValFor##Ty(); } \ } +#define REGISTER_ENUM_TYPE_WITH_NS(NS, Ty) \ + template <> struct EnumTraits { \ + static StringRef stringify(NS::Ty value) { \ + return NS::stringify##Ty(value); \ + } \ + static unsigned getMaxEnumVal() { return NS::getMaxEnumValFor##Ty(); } \ + } REGISTER_ENUM_TYPE(GlobalLinkageKind); +REGISTER_ENUM_TYPE_WITH_NS(sob, SignedOverflowBehavior); } // namespace /// Parse an enum from the keyword, or default to the provided default value. @@ -95,9 +103,7 @@ REGISTER_ENUM_TYPE(GlobalLinkageKind); /// second template argument. /// TODO: teach other places in this file to use this function. template -static RetTy parseOptionalCIRKeyword(OpAsmParser &parser, - OperationState &result, - EnumTy defaultValue) { +static RetTy parseOptionalCIRKeyword(AsmParser &parser, EnumTy defaultValue) { SmallVector names; for (unsigned i = 0, e = EnumTraits::getMaxEnumVal(); i <= e; ++i) names.push_back(EnumTraits::stringify(static_cast(i))); @@ -567,30 +573,31 @@ LogicalResult ScopeOp::verify() { return success(); } //===----------------------------------------------------------------------===// mlir::LogicalResult YieldOp::verify() { - auto canDominateYieldBreak = [&](Operation *parentOp) { - mlir::Region *lastAwaitRegion = nullptr; - while (!llvm::isa(parentOp)) { - auto awaitOp = dyn_cast(parentOp); - if (awaitOp) { - if (lastAwaitRegion && lastAwaitRegion == &awaitOp.getResume()) { - emitOpError() - << "break can only be used in 'ready' and 'suspend' regions"; - return false; + auto canDominateYieldBreak = + [&](Operation *parentOp) { + mlir::Region *lastAwaitRegion = nullptr; + while (!llvm::isa(parentOp)) { + auto awaitOp = dyn_cast(parentOp); + if (awaitOp) { + if (lastAwaitRegion && lastAwaitRegion == &awaitOp.getResume()) { + emitOpError() + << "break can only be used in 'ready' and 'suspend' regions"; + return false; + } + return true; + } + + if (llvm::isa(parentOp)) + return true; + + lastAwaitRegion = parentOp->getParentRegion(); + parentOp = parentOp->getParentOp(); } - return true; - } - - if (llvm::isa(parentOp)) - return true; - - lastAwaitRegion = parentOp->getParentRegion(); - parentOp = parentOp->getParentOp(); - } - emitOpError() - << "shall be dominated by 'cir.loop', 'cir.switch' or 'cir.await'"; - return false; - }; + emitOpError() + << "shall be dominated by 'cir.loop', 'cir.switch' or 'cir.await'"; + return false; + }; auto isDominatedByLoop = [](Operation *parentOp) { while (!llvm::isa(parentOp)) { @@ -1150,12 +1157,11 @@ void cir::FuncOp::build(OpBuilder &builder, OperationState &result, ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { // Default to external linkage if no keyword is provided. - state.addAttribute( - getLinkageAttrNameString(), - GlobalLinkageKindAttr::get( - parser.getContext(), - parseOptionalCIRKeyword( - parser, state, GlobalLinkageKind::ExternalLinkage))); + state.addAttribute(getLinkageAttrNameString(), + GlobalLinkageKindAttr::get( + parser.getContext(), + parseOptionalCIRKeyword( + parser, GlobalLinkageKind::ExternalLinkage))); StringAttr nameAttr; SmallVector arguments; @@ -1552,6 +1558,34 @@ void CstArrayAttr::print(::mlir::AsmPrinter &printer) const { printer << ">"; } +::mlir::Attribute SignedOverflowBehaviorAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { + if (parser.parseLess()) + return {}; + auto behavior = parseOptionalCIRKeyword( + parser, mlir::cir::sob::SignedOverflowBehavior::undefined); + if (parser.parseGreater()) + return {}; + + return SignedOverflowBehaviorAttr::get(parser.getContext(), behavior); +} + +void SignedOverflowBehaviorAttr::print(::mlir::AsmPrinter &printer) const { + printer << "<"; + switch (getBehavior()) { + case sob::SignedOverflowBehavior::undefined: + printer << "undefined"; + break; + case sob::SignedOverflowBehavior::defined: + printer << "defined"; + break; + case sob::SignedOverflowBehavior::trapping: + printer << "trapping"; + break; + } + printer << ">"; +} + ::mlir::Attribute ASTFunctionDeclAttr::parse(::mlir::AsmParser &parser, ::mlir::Type type) { // We cannot really parse anything AST related at this point From d38e595f1eceffca9b9d7e427c72efce7e97ba5c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 14 Dec 2022 02:44:57 -0500 Subject: [PATCH 0740/2301] [CIR][CodeGen] Add the SignedOverflowBehavior attr to the module --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 19 +++++++++++++++++-- clang/test/CIR/CodeGen/basic.c | 2 +- clang/test/CIR/CodeGen/call.c | 4 ++-- clang/test/CIR/CodeGen/coro-task.cpp | 2 +- clang/test/CIR/CodeGen/globals.cpp | 2 +- clang/test/CIR/CodeGen/sourcelocation.cpp | 2 +- clang/test/CIR/CodeGen/struct.c | 2 +- clang/test/CIR/IR/invalid.cir | 2 +- .../test/CIR/Lowering/ThroughMLIR/memref.cir | 2 +- clang/test/CIR/Lowering/loadstorealloca.cir | 2 +- clang/test/CIR/cirtool.cir | 2 +- clang/test/CIR/driver.c | 2 +- 12 files changed, 29 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 3fc69ded00ff..2e1e741ef557 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -94,7 +94,22 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, codeGenOpts(CGO), theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), - genTypes{*this} {} + genTypes{*this} { + mlir::cir::sob::SignedOverflowBehavior sob; + switch (langOpts.getSignedOverflowBehavior()) { + case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Defined: + sob = sob::SignedOverflowBehavior::defined; + break; + case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Undefined: + sob = sob::SignedOverflowBehavior::undefined; + break; + case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Trapping: + sob = sob::SignedOverflowBehavior::trapping; + break; + } + theModule->setAttr("cir.sob", + mlir::cir::SignedOverflowBehaviorAttr::get(&context, sob)); +} CIRGenModule::~CIRGenModule() {} @@ -1955,4 +1970,4 @@ void CIRGenModule::buildExplicitCastExprType(const ExplicitCastExpr *E, llvm_unreachable("NYI"); assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); -} \ No newline at end of file +} diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index ff9fd81d45c5..f321f9a926ac 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -9,7 +9,7 @@ int foo(int i) { return i; } -// CHECK: module { +// CHECK: module attributes {cir.sob = #cir.signed_overflow_behavior} { // CHECK-NEXT: cir.func @foo(%arg0: i32 loc({{.*}})) -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index 91c57e64ce16..fb418202500f 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -15,7 +15,7 @@ void d(void) { b(0, 1); } -// CHECK: module { +// CHECK: module {{.*}} { // CHECK: cir.func @a() { // CHECK: cir.return // CHECK: } @@ -53,7 +53,7 @@ void d(void) { // CHECK: cir.return // CHECK: } // -// CXX: module { +// CXX: module {{.*}} { // CXX-NEXT: cir.func @_Z1av() { // CXX-NEXT: cir.return // CXX-NEXT: } diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 3737f18ef99b..f2fd4d520fca 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -99,6 +99,6 @@ co_invoke_fn co_invoke; }} // namespace folly::coro -// CHECK: module { +// CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !cir.struct<"struct.folly::coro::co_invoke_fn", i8 // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 6b6f9118ab9a..6b65d9516379 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -23,7 +23,7 @@ void use_global_string() { unsigned char c = s2[0]; } -// CHECK: module { +// CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @a = 3 : i32 // CHECK-NEXT: cir.global external @c = 2 : i64 // CHECK-NEXT: cir.global external @y = 3.400000e+00 : f32 diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index fb3b1ff933b1..560752ea398b 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -13,7 +13,7 @@ int s0(int a, int b) { // CHECK: #loc2 = loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]) // CHECK: #loc3 = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) -// CHECK: module { +// CHECK: module {{.*}} { // CHECK: cir.func @_Z2s0ii(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { // CHECK: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc2) // CHECK: %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc3) diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index c9eb9be8d130..6a46c7ffcffa 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -20,7 +20,7 @@ void baz() { // CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> // CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>> -// CHECK-NEXT: module { +// CHECK-NEXT: module {{.*}} { // CHECK-NEXT: cir.func @baz() { // CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index b10dfd3fe724..ceec9146b6c4 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -226,4 +226,4 @@ cir.func @unary1() { module { cir.global external @v = #cir.zero : i32 // expected-error {{zero expects struct type}} -} \ No newline at end of file +} diff --git a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir index 19b65f5a79b6..4b6b95afd361 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir @@ -11,7 +11,7 @@ module { } } -// MLIR: module { +// MLIR: module { // MLIR-NEXT: func @foo() -> i32 { // MLIR-NEXT: [[alloca:%[a-z0-9]+]] = memref.alloca() {alignment = 4 : i64} : memref // MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index fbc43465a35c..cef3bc98ca87 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -11,7 +11,7 @@ module { } } -// MLIR: module { +// MLIR: module { // MLIR-NEXT: func @foo() -> i32 { // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr diff --git a/clang/test/CIR/cirtool.cir b/clang/test/CIR/cirtool.cir index eabd45ba88c3..45832a192689 100644 --- a/clang/test/CIR/cirtool.cir +++ b/clang/test/CIR/cirtool.cir @@ -14,7 +14,7 @@ module { // LLVM-NEXT: ret void, // LLVM-NEXT: } -// MLIR: module { +// MLIR: module { // MLIR-NEXT: llvm.func @foo() { // MLIR-NEXT: llvm.return // MLIR-NEXT: } diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index 85d6fe31f7e6..8e8a9ccd6f85 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -12,7 +12,7 @@ void foo() {} -// CIR: module { +// CIR: module {{.*}} { // CIR-NEXT: cir.func @foo() { // CIR-NEXT: cir.return // CIR-NEXT: } From d077df823fbb8b5c15cdb33d6b02c428e584dd57 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 13 Dec 2022 22:41:36 -0800 Subject: [PATCH 0741/2301] [CIR][CIRGen] Start adding meaningful coroutines tests Support for coroutines is not complete but we got the minimum support so we can start to write tests and incrementally cover more functionality. Here we start by creating allocas for both the task and the promise, with a first call down to get_return_object in other to get the task. --- clang/test/CIR/CodeGen/coro-task.cpp | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index f2fd4d520fca..d534fa9942c4 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -99,6 +99,26 @@ co_invoke_fn co_invoke; }} // namespace folly::coro +// CHECK: ![[VoidTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task", i8> +// CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct<"struct.folly::coro::Task::promise_type", i8> + // CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !cir.struct<"struct.folly::coro::co_invoke_fn", i8 -// CHECK-NEXT: } + +using VoidTask = folly::coro::Task; + +VoidTask silly_task() { + co_await std::suspend_always(); +} + +// CHECK: cir.func @_Z10silly_taskv() -> ![[VoidTask]] { + +// Allocate promise and call get_return_object() to retrieve the task. +// Note there's no ctor call for the promisse given its a direct aggregate. + +// CHECK: %[[#VoidTaskAddr:]] = cir.alloca ![[VoidTask]], {{.*}}, ["__retval"] +// CHECK: %[[#VoidPromisseAddr:]] = cir.alloca ![[VoidPromisse]], {{.*}}, ["__promise"] +// CHECK: %2 = cir.call @_ZN5folly4coro4TaskIvE12promise_type17get_return_objectEv(%[[#VoidPromisseAddr]]) : {{.*}} -> ![[VoidTask]] +// CHECK: cir.store %2, %[[#VoidTaskAddr]] : ![[VoidTask]] + +// CHECK: } \ No newline at end of file From 470fa58a89bf6e80f670a13f6cb9ab28697e7260 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 14 Dec 2022 15:48:07 -0800 Subject: [PATCH 0742/2301] [CIR] Add 'builtin' attribute for cir.func This is going to be used to tag function calls that are builtins, like __builtin_coro_* and friends. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 ++++++ clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 2 +- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7b2d07685a17..b7b17989a09a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1202,6 +1202,7 @@ def FuncOp : CIR_Op<"func", [ let arguments = (ins SymbolNameAttr:$sym_name, TypeAttrOf:$function_type, + UnitAttr:$builtin, DefaultValuedAttr:$linkage, OptionalAttr:$sym_visibility, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 32ffff9d8f63..09e9bccedf81 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1156,6 +1156,9 @@ void cir::FuncOp::build(OpBuilder &builder, OperationState &result, } ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { + if (::mlir::succeeded(parser.parseOptionalKeyword("builtin"))) + state.addAttribute("builtin", parser.getBuilder().getUnitAttr()); + // Default to external linkage if no keyword is provided. state.addAttribute(getLinkageAttrNameString(), GlobalLinkageKindAttr::get( @@ -1217,6 +1220,9 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { void cir::FuncOp::print(OpAsmPrinter &p) { p << ' '; + if (getBuiltin()) + p << "builtin "; + if (getLinkage() != GlobalLinkageKind::ExternalLinkage) p << stringifyGlobalLinkageKind(getLinkage()) << ' '; diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 51296d36d774..ef8e8b078509 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -956,7 +956,7 @@ const clang::CXXMethodDecl *getMethod(ModuleOp mod, StringRef name) { auto *global = mlir::SymbolTable::lookupSymbolIn(mod, name); assert(global && "expected to find symbol"); auto method = dyn_cast(global); - if (!method) + if (!method || method.getBuiltin()) return nullptr; return dyn_cast(method.getAstAttr().getAstDecl()); } From 787a0efa89cb1b0d2176c204672016840dbcce09 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 14 Dec 2022 15:50:06 -0800 Subject: [PATCH 0743/2301] [CIR][CIRGen][NFC] Update comments --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 18 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenTypes.h | 4 ++-- clang/lib/CIR/CodeGen/TargetInfo.h | 1 + 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index d8e5e059dd81..88c862a59d1d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -80,6 +80,24 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return DefaultConstrainedRounding; } + // + // Type helpers + // ------------ + // + + // Fetch the type representing a pointer to an 8-bit integer value. + mlir::cir::PointerType getInt8PtrTy(unsigned AddrSpace = 0) { + return mlir::cir::PointerType::get(getContext(), + mlir::IntegerType::get(getContext(), 8)); + } + + /// Get a constant 32-bit value. + mlir::cir::ConstantOp getInt32(uint32_t C, mlir::Location loc) { + auto int32Ty = mlir::IntegerType::get(getContext(), 32); + return create(loc, int32Ty, + mlir::IntegerAttr::get(int32Ty, C)); + } + // // Operation creation helpers // -------------------------- diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 43a71c307928..edd60b38970b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -158,14 +158,14 @@ class CIRGenTypes { const CIRGenRecordLayout &getCIRGenRecordLayout(const clang::RecordDecl *RD); - /// convertTypeForMem - Convert type T into an mlir::Type. This differs from + /// Convert type T into an mlir::Type. This differs from /// convertType in that it is used to convert to the memory representation /// for a type. For example, the scalar representation for _Bool is i1, but /// the memory representation is usually i8 or i32, depending on the target. // TODO: convert this comment to account for MLIR's equivalence mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); - /// GetFunctionType - Get the LLVM function type for \arg Info. + /// Get the CIR function type for \arg Info. mlir::FunctionType GetFunctionType(const CIRGenFunctionInfo &Info); mlir::FunctionType GetFunctionType(clang::GlobalDecl GD); diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index b4e47d5f9b20..a06f59052302 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -22,6 +22,7 @@ namespace cir { /// This class organizes various target-specific codegeneration issues, like /// target-specific attributes, builtins and so on. +/// Equivalent to LLVM's TargetCodeGenInfo. class TargetCIRGenInfo { std::unique_ptr Info = nullptr; From 9c1fb2f9d43a446b6d80588d6797c94cd3b04fec Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 14 Dec 2022 15:51:05 -0800 Subject: [PATCH 0744/2301] [CIR][CIRGen] Emit coro_id builtin call and update function creation to tag as bultin Add testcase for this and previous commit --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 30 +++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 +++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 ++- clang/test/CIR/CodeGen/coro-task.cpp | 13 ++++++++-- 5 files changed, 48 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index d7b32155fc5e..c23d24fb8e1f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -106,6 +106,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, auto fnOp = CGM.GetOrCreateCIRFunction(ND->getName(), ty, gd, /*ForVTable=*/false, /*DontDefer=*/false); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); return buildCall(E->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), E, ReturnValue); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 81ae157d6953..14e3ede08c09 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -131,12 +131,42 @@ static mlir::LogicalResult buildBodyAndFallthrough(CIRGenFunction &CGF, return mlir::success(); } +mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc) { + auto int8PtrTy = builder.getInt8PtrTy(); + auto int32Ty = mlir::IntegerType::get(builder.getContext(), 32); + auto nullPtrCst = builder.create( + loc, int8PtrTy, + mlir::cir::NullAttr::get(builder.getContext(), int8PtrTy)); + + auto &TI = CGM.getASTContext().getTargetInfo(); + unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth(); + + mlir::Operation *builtin = CGM.getGlobalValue(builtinCoroId); + mlir::TypeRange argTypes{int32Ty, int8PtrTy, int8PtrTy, int8PtrTy}; + mlir::TypeRange resTypes{int32Ty}; + + mlir::cir::FuncOp fnOp; + if (!builtin) { + fnOp = CGM.createCIRFunction(loc, builtinCoroId, + builder.getFunctionType(argTypes, resTypes), + /*FD=*/nullptr); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + } else + fnOp = cast(builtin); + + mlir::ValueRange inputArgs{builder.getInt32(NewAlign, loc), nullPtrCst, + nullPtrCst, nullPtrCst}; + return builder.create(loc, fnOp, inputArgs); +} + mlir::LogicalResult CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { // This is very different from LLVM codegen as the current intent is to // not expand too much of it here and leave it to dialect codegen. // In the LLVM world, this is where we create calls to coro.id, // coro.alloc and coro.begin. + [[maybe_unused]] auto coroId = + buildCoroIDBuiltinCall(getLoc(S.getBeginLoc())); createCoroData(*this, CurCoro); // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 0760d0df5186..fd6c191a64f8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -777,6 +777,9 @@ class CIRGenFunction { mlir::LogicalResult buildCoroutineBody(const CoroutineBodyStmt &S); mlir::LogicalResult buildCoreturnStmt(const CoreturnStmt &S); + static constexpr const char *builtinCoroId = "__builtin_coro_id"; + mlir::cir::CallOp buildCoroIDBuiltinCall(mlir::Location loc); + RValue buildCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 2e1e741ef557..66c100f3d47f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1522,7 +1522,9 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, builder.setInsertionPoint(curCGF->CurFn.getOperation()); f = builder.create(loc, name, Ty); - f.setAstAttr(mlir::cir::ASTFunctionDeclAttr::get(builder.getContext(), FD)); + if (FD) + f.setAstAttr( + mlir::cir::ASTFunctionDeclAttr::get(builder.getContext(), FD)); assert(f.isDeclaration() && "expected empty body"); diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index d534fa9942c4..0bb3b8511b97 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -105,12 +105,16 @@ co_invoke_fn co_invoke; // CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !cir.struct<"struct.folly::coro::co_invoke_fn", i8 +// CHECK: cir.func builtin @__builtin_coro_id(i32, !cir.ptr, !cir.ptr, !cir.ptr) -> i32 attributes {builtin, sym_visibility = "private"} + using VoidTask = folly::coro::Task; VoidTask silly_task() { co_await std::suspend_always(); } +// CHECK: cir.func builtin @__builtin_coro_frame() -> !cir.ptr attributes {builtin, sym_visibility = "private"} + // CHECK: cir.func @_Z10silly_taskv() -> ![[VoidTask]] { // Allocate promise and call get_return_object() to retrieve the task. @@ -118,7 +122,12 @@ VoidTask silly_task() { // CHECK: %[[#VoidTaskAddr:]] = cir.alloca ![[VoidTask]], {{.*}}, ["__retval"] // CHECK: %[[#VoidPromisseAddr:]] = cir.alloca ![[VoidPromisse]], {{.*}}, ["__promise"] -// CHECK: %2 = cir.call @_ZN5folly4coro4TaskIvE12promise_type17get_return_objectEv(%[[#VoidPromisseAddr]]) : {{.*}} -> ![[VoidTask]] -// CHECK: cir.store %2, %[[#VoidTaskAddr]] : ![[VoidTask]] + +// CHECK: %[[#NullPtr:]] = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %[[#Align:]] = cir.cst(16 : i32) : i32 +// CHECK: %[[#CoroId:]] = cir.call @__builtin_coro_id(%[[#Align]], %[[#NullPtr]], %[[#NullPtr]], %[[#NullPtr]]) + +// CHECK: %[[#RetObj:]] = cir.call @_ZN5folly4coro4TaskIvE12promise_type17get_return_objectEv(%[[#VoidPromisseAddr]]) : {{.*}} -> ![[VoidTask]] +// CHECK: cir.store %[[#RetObj]], %[[#VoidTaskAddr]] : ![[VoidTask]] // CHECK: } \ No newline at end of file From c3d32622c6b555b305d5f0ade06ac112f63a2973 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 14 Dec 2022 18:30:13 -0800 Subject: [PATCH 0745/2301] [CIR][CIRGen][Coroutines] Add support for calling operator new to allocate memory for coroutine --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 ++ clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 75 ++++++++++++++++++----- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 15 ++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 5 ++ clang/test/CIR/CodeGen/coro-task.cpp | 15 ++++- 6 files changed, 94 insertions(+), 22 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 88c862a59d1d..c92ff354cdd0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -98,6 +98,10 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::IntegerAttr::get(int32Ty, C)); } + mlir::cir::BoolType getBoolTy() { + return ::mlir::cir::BoolType::get(getContext()); + } + // // Operation creation helpers // -------------------------- diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 14e3ede08c09..f680ecf3ca59 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -27,6 +27,10 @@ struct cir::CGCoroData { // These are used to generate pretty labels for await expressions in LLVM IR. AwaitKind CurrentAwaitKind = AwaitKind::Init; + // Stores the __builtin_coro_id emitted in the function so that we can supply + // it as the first argument to other builtins. + mlir::cir::CallOp CoroId = nullptr; + // How many co_return statements are in the coroutine. Used to decide whether // we need to add co_return; equivalent at the end of the user authored body. unsigned CoreturnCount = 0; @@ -40,7 +44,8 @@ CIRGenFunction::CGCoroInfo::CGCoroInfo() {} CIRGenFunction::CGCoroInfo::~CGCoroInfo() {} static void createCoroData(CIRGenFunction &CGF, - CIRGenFunction::CGCoroInfo &CurCoro) { + CIRGenFunction::CGCoroInfo &CurCoro, + mlir::cir::CallOp CoroId) { if (CurCoro.Data) { llvm_unreachable("EmitCoroutineBodyStatement called twice?"); @@ -48,6 +53,7 @@ static void createCoroData(CIRGenFunction &CGF, } CurCoro.Data = std::unique_ptr(new CGCoroData); + CurCoro.Data->CoroId = CoroId; } namespace { @@ -141,33 +147,68 @@ mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc) { auto &TI = CGM.getASTContext().getTargetInfo(); unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth(); - mlir::Operation *builtin = CGM.getGlobalValue(builtinCoroId); - mlir::TypeRange argTypes{int32Ty, int8PtrTy, int8PtrTy, int8PtrTy}; - mlir::TypeRange resTypes{int32Ty}; + mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroId); + + mlir::cir::FuncOp fnOp; + if (!builtin) { + fnOp = CGM.createCIRFunction( + loc, CGM.builtinCoroId, + builder.getFunctionType( + mlir::TypeRange{int32Ty, int8PtrTy, int8PtrTy, int8PtrTy}, + mlir::TypeRange{int32Ty}), + /*FD=*/nullptr); + assert(fnOp && "should always succeed"); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + } else + fnOp = cast(builtin); + + return builder.create( + loc, fnOp, + mlir::ValueRange{builder.getInt32(NewAlign, loc), nullPtrCst, nullPtrCst, + nullPtrCst}); +} + +mlir::cir::CallOp +CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { + auto boolTy = builder.getBoolTy(); + auto int32Ty = mlir::IntegerType::get(builder.getContext(), 32); + + mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroAlloc); mlir::cir::FuncOp fnOp; if (!builtin) { - fnOp = CGM.createCIRFunction(loc, builtinCoroId, - builder.getFunctionType(argTypes, resTypes), - /*FD=*/nullptr); + fnOp = + CGM.createCIRFunction(loc, CGM.builtinCoroAlloc, + builder.getFunctionType(mlir::TypeRange{int32Ty}, + mlir::TypeRange{boolTy}), + /*FD=*/nullptr); + assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); } else fnOp = cast(builtin); - mlir::ValueRange inputArgs{builder.getInt32(NewAlign, loc), nullPtrCst, - nullPtrCst, nullPtrCst}; - return builder.create(loc, fnOp, inputArgs); + return builder.create( + loc, fnOp, mlir::ValueRange{CurCoro.Data->CoroId.getResult(0)}); } mlir::LogicalResult CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { - // This is very different from LLVM codegen as the current intent is to - // not expand too much of it here and leave it to dialect codegen. - // In the LLVM world, this is where we create calls to coro.id, - // coro.alloc and coro.begin. - [[maybe_unused]] auto coroId = - buildCoroIDBuiltinCall(getLoc(S.getBeginLoc())); - createCoroData(*this, CurCoro); + auto openCurlyLoc = getLoc(S.getBeginLoc()); + auto coroId = buildCoroIDBuiltinCall(openCurlyLoc); + createCoroData(*this, CurCoro, coroId); + + // Backend is allowed to elide memory allocations, to help it, emit + // auto mem = coro.alloc() ? 0 : ... allocation code ...; + auto coroAlloc = buildCoroAllocBuiltinCall(openCurlyLoc); + + mlir::Value allocVal; + builder.create(openCurlyLoc, coroAlloc.getResult(0), + /*withElseRegion=*/false, + /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + allocVal = buildScalarExpr(S.getAllocate()); + builder.create(loc); + }); // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided. if (auto *RetOnAllocFailure = S.getReturnStmtOnAllocFailure()) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index fd6c191a64f8..3263fc16e06a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -777,8 +777,8 @@ class CIRGenFunction { mlir::LogicalResult buildCoroutineBody(const CoroutineBodyStmt &S); mlir::LogicalResult buildCoreturnStmt(const CoreturnStmt &S); - static constexpr const char *builtinCoroId = "__builtin_coro_id"; mlir::cir::CallOp buildCoroIDBuiltinCall(mlir::Location loc); + mlir::cir::CallOp buildCoroAllocBuiltinCall(mlir::Location loc); RValue buildCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot = AggValueSlot::ignored(), diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 66c100f3d47f..d7eb57d237cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1548,6 +1548,17 @@ bool isDefaultedMethod(const clang::FunctionDecl *FD) { return false; } +mlir::Location CIRGenModule::getLocForFunction(const clang::FunctionDecl *FD) { + assert(FD && "Not sure which location to use yet"); + bool invalidLoc = (FD->getSourceRange().getBegin().isInvalid() || + FD->getSourceRange().getEnd().isInvalid()); + if (!invalidLoc) + return getLoc(FD->getSourceRange()); + + // Use the module location + return theModule->getLoc(); +} + /// If the specified mangled name is not in the module, /// create and return a CIR Function with the specified type. If there is /// something in the module with the specified name, return it potentially @@ -1630,10 +1641,10 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( auto *FD = llvm::cast(D); assert(FD && "Only FunctionDecl supported so far."); - auto fnLoc = getLoc(FD->getSourceRange()); + // TODO: CodeGen includeds the linkage (ExternalLinkage) and only passes the // mangledname if Entry is nullptr - auto F = createCIRFunction(fnLoc, MangledName, FTy, FD); + auto F = createCIRFunction(getLocForFunction(FD), MangledName, FTy, FD); if (Entry) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index c48074fdf3e8..cb396e80540e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -390,6 +390,8 @@ class CIRGenModule { void addReplacement(StringRef Name, mlir::Operation *Op); + mlir::Location getLocForFunction(const clang::FunctionDecl *FD); + // TODO: CodeGen also passes an AttributeList here. We'll have to match that // in CIR mlir::cir::FuncOp @@ -408,6 +410,9 @@ class CIRGenModule { void buildExplicitCastExprType(const ExplicitCastExpr *E, CIRGenFunction *CGF = nullptr); + static constexpr const char *builtinCoroId = "__builtin_coro_id"; + static constexpr const char *builtinCoroAlloc = "__builtin_coro_alloc"; + private: // An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector MangledDeclNames; diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 0bb3b8511b97..fb4176b831f9 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -117,16 +117,27 @@ VoidTask silly_task() { // CHECK: cir.func @_Z10silly_taskv() -> ![[VoidTask]] { -// Allocate promise and call get_return_object() to retrieve the task. -// Note there's no ctor call for the promisse given its a direct aggregate. +// Allocate promise. // CHECK: %[[#VoidTaskAddr:]] = cir.alloca ![[VoidTask]], {{.*}}, ["__retval"] // CHECK: %[[#VoidPromisseAddr:]] = cir.alloca ![[VoidPromisse]], {{.*}}, ["__promise"] +// Get coroutine id with __builtin_coro_id. + // CHECK: %[[#NullPtr:]] = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK: %[[#Align:]] = cir.cst(16 : i32) : i32 // CHECK: %[[#CoroId:]] = cir.call @__builtin_coro_id(%[[#Align]], %[[#NullPtr]], %[[#NullPtr]], %[[#NullPtr]]) +// Maybe perform allocation calling operator new. + +// CHECK: %[[#ShouldAlloc:]] = cir.call @__builtin_coro_alloc(%[[#CoroId]]) : (i32) -> !cir.bool +// CHECK: cir.if %[[#ShouldAlloc]] { +// CHECK: %[[#CoroSize:]] = cir.call @__builtin_coro_size() : () -> i64 +// CHECK: %[[#CoroFrameAddr:]] = cir.call @_Znwm(%[[#CoroSize]]) : (i64) -> !cir.ptr +// CHECK: } + +// Call promise.get_return_object() to retrieve the task object. + // CHECK: %[[#RetObj:]] = cir.call @_ZN5folly4coro4TaskIvE12promise_type17get_return_objectEv(%[[#VoidPromisseAddr]]) : {{.*}} -> ![[VoidTask]] // CHECK: cir.store %[[#RetObj]], %[[#VoidTaskAddr]] : ![[VoidTask]] From 31148a470ff5d65c8a5314a9b890889bce4ca61d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 16 Dec 2022 21:10:01 -0500 Subject: [PATCH 0746/2301] [CIR][Rebase] Fix nested struct references in tests This evidently changed while rebasing. --- clang/test/CIR/CodeGen/struct.c | 3 +-- clang/test/CIR/CodeGen/struct.cpp | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 6a46c7ffcffa..6864a482fc81 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * struct Bar { int a; @@ -19,7 +18,7 @@ void baz() { } // CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> -// CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>> +// CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !ty_22struct2EBar22> // CHECK-NEXT: module {{.*}} { // CHECK-NEXT: cir.func @baz() { // CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 44c4bd9e1ea7..925cbefcb98e 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -25,7 +25,7 @@ void baz() { } // CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> -// CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !cir.struct<"struct.Bar", i32, i8>> +// CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !ty_22struct2EBar22> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} From 283858a278dc7832edf5a25a5db7334681957f99 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 16 Dec 2022 21:10:52 -0500 Subject: [PATCH 0747/2301] [CIR][Rebase] Fix some sourcelocation numberings due to new emissions while rebasing --- clang/test/CIR/CodeGen/array.cpp | 3 +- clang/test/CIR/CodeGen/sourcelocation.cpp | 82 ++++++++++------------- 2 files changed, 38 insertions(+), 47 deletions(-) diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 5c41b207aad2..07b5b0524e71 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -Wno-return-stack-address -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * void a0() { int a[10]; @@ -41,7 +40,7 @@ void local_stringlit() { const char *s = "whatnow"; } -// CHECK: cir.global "private" constant internal @".str" = #cir.cst_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} loc(#loc17) +// CHECK: cir.global "private" constant internal @".str" = #cir.cst_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.func @_Z15local_stringlitv() { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 560752ea398b..021d8a75cfa7 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -11,54 +11,46 @@ int s0(int a, int b) { return x; } -// CHECK: #loc2 = loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]) -// CHECK: #loc3 = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) +// CHECK: #loc21 = loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]) +// CHECK: #loc22 = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) // CHECK: module {{.*}} { // CHECK: cir.func @_Z2s0ii(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { -// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc2) -// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc3) -// CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc4) -// CHECK: %3 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} loc(#loc5) -// CHECK: cir.store %arg0, %0 : i32, cir.ptr loc(#loc6) -// CHECK: cir.store %arg1, %1 : i32, cir.ptr loc(#loc6) -// CHECK: %4 = cir.load %0 : cir.ptr , i32 loc(#loc7) +// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc21) +// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc22) +// CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) +// CHECK: %3 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} loc(#loc23) +// CHECK: cir.store %arg0, %0 : i32, cir.ptr loc(#loc9) +// CHECK: cir.store %arg1, %1 : i32, cir.ptr loc(#loc9) +// CHECK: %4 = cir.load %0 : cir.ptr , i32 loc(#loc10) // CHECK: %5 = cir.load %1 : cir.ptr , i32 loc(#loc8) -// CHECK: %6 = cir.binop(add, %4, %5) : i32 loc(#loc9) -// CHECK: cir.store %6, %3 : i32, cir.ptr loc(#loc5) +// CHECK: %6 = cir.binop(add, %4, %5) : i32 loc(#loc24) +// CHECK: cir.store %6, %3 : i32, cir.ptr loc(#loc23) // CHECK: cir.scope { -// CHECK: %9 = cir.load %3 : cir.ptr , i32 loc(#loc11) -// CHECK: %10 = cir.cst(0 : i32) : i32 loc(#loc12) -// CHECK: %11 = cir.cmp(gt, %9, %10) : i32, !cir.bool loc(#loc13) +// CHECK: %9 = cir.load %3 : cir.ptr , i32 loc(#loc13) +// CHECK: %10 = cir.cst(0 : i32) : i32 loc(#loc14) +// CHECK: %11 = cir.cmp(gt, %9, %10) : i32, !cir.bool loc(#loc26) // CHECK: cir.if %11 { -// CHECK: %12 = cir.cst(0 : i32) : i32 loc(#loc15) -// CHECK: cir.store %12, %3 : i32, cir.ptr loc(#loc16) +// CHECK: %12 = cir.cst(0 : i32) : i32 loc(#loc16) +// CHECK: cir.store %12, %3 : i32, cir.ptr loc(#loc28) // CHECK: } else { -// CHECK: %12 = cir.cst(1 : i32) : i32 loc(#loc17) -// CHECK: cir.store %12, %3 : i32, cir.ptr loc(#loc18) -// CHECK: } loc(#loc14) -// CHECK: } loc(#loc10) -// CHECK: %7 = cir.load %3 : cir.ptr , i32 loc(#loc19) -// CHECK: cir.store %7, %2 : i32, cir.ptr loc(#loc20) -// CHECK: %8 = cir.load %2 : cir.ptr , i32 loc(#loc20) -// CHECK: cir.return %8 : i32 loc(#loc20) -// CHECK: } loc(#loc1) -// CHECK: } loc(#loc0) -// CHECK: #loc0 = loc(unknown) -// CHECK: #loc1 = loc(fused["{{.*}}sourcelocation.cpp":4:1, "{{.*}}sourcelocation.cpp":11:1]) -// CHECK: #loc4 = loc("{{.*}}sourcelocation.cpp":11:1) -// CHECK: #loc5 = loc(fused["{{.*}}sourcelocation.cpp":5:3, "{{.*}}sourcelocation.cpp":5:15]) -// CHECK: #loc6 = loc("{{.*}}sourcelocation.cpp":4:22) -// CHECK: #loc7 = loc("{{.*}}sourcelocation.cpp":5:11) -// CHECK: #loc8 = loc("{{.*}}sourcelocation.cpp":5:15) -// CHECK: #loc9 = loc(fused["{{.*}}sourcelocation.cpp":5:11, "{{.*}}sourcelocation.cpp":5:15]) -// CHECK: #loc10 = loc(fused["{{.*}}sourcelocation.cpp":6:3, "{{.*}}sourcelocation.cpp":9:9]) -// CHECK: #loc11 = loc("{{.*}}sourcelocation.cpp":6:7) -// CHECK: #loc12 = loc("{{.*}}sourcelocation.cpp":6:11) -// CHECK: #loc13 = loc(fused["{{.*}}sourcelocation.cpp":6:7, "{{.*}}sourcelocation.cpp":6:11]) -// CHECK: #loc14 = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9, "{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) -// CHECK: #loc15 = loc("{{.*}}sourcelocation.cpp":7:9) -// CHECK: #loc16 = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9]) -// CHECK: #loc17 = loc("{{.*}}sourcelocation.cpp":9:9) -// CHECK: #loc18 = loc(fused["{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) -// CHECK: #loc19 = loc("{{.*}}sourcelocation.cpp":10:10) -// CHECK: #loc20 = loc(fused["{{.*}}sourcelocation.cpp":10:3, "{{.*}}sourcelocation.cpp":10:10]) +// CHECK: %12 = cir.cst(1 : i32) : i32 loc(#loc12) +// CHECK: cir.store %12, %3 : i32, cir.ptr loc(#loc29) +// CHECK: } loc(#loc27) +// CHECK: } loc(#loc25) +// CHECK: %7 = cir.load %3 : cir.ptr , i32 loc(#loc18) +// CHECK: cir.store %7, %2 : i32, cir.ptr loc(#loc30) +// CHECK: %8 = cir.load %2 : cir.ptr , i32 loc(#loc30) +// CHECK: cir.return %8 : i32 loc(#loc30) +// CHECK: } loc(#loc20) +// CHECK: } loc(#loc) +// CHECK: #loc = loc(unknown) +// CHECK: #loc9 = loc("{{.*}}sourcelocation.cpp":4:22) +// CHECK: #loc20 = loc(fused["{{.*}}sourcelocation.cpp":4:1, "{{.*}}sourcelocation.cpp":11:1]) +// CHECK: #loc23 = loc(fused["{{.*}}sourcelocation.cpp":5:3, "{{.*}}sourcelocation.cpp":5:15]) +// CHECK: #loc24 = loc(fused["{{.*}}sourcelocation.cpp":5:11, "{{.*}}sourcelocation.cpp":5:15]) +// CHECK: #loc25 = loc(fused["{{.*}}sourcelocation.cpp":6:3, "{{.*}}sourcelocation.cpp":9:9]) +// CHECK: #loc26 = loc(fused["{{.*}}sourcelocation.cpp":6:7, "{{.*}}sourcelocation.cpp":6:11]) +// CHECK: #loc27 = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9, "{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) +// CHECK: #loc28 = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9]) +// CHECK: #loc29 = loc(fused["{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) +// CHECK: #loc30 = loc(fused["{{.*}}sourcelocation.cpp":10:3, "{{.*}}sourcelocation.cpp":10:10]) From 0e3cabe097e6bb50276395dbe2d2baa4b29ca293 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 16 Dec 2022 21:29:22 -0500 Subject: [PATCH 0748/2301] [CIR][Rebase] Fix some tests that had extra commas after rebasing --- clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 2 +- clang/test/CIR/Lowering/ThroughMLIR/goto.cir | 3 +-- clang/test/CIR/Lowering/goto.cir | 3 +-- clang/test/CIR/cc1.c | 2 +- clang/test/CIR/cc1.cir | 2 +- clang/test/CIR/cirtool.cir | 3 +-- clang/test/CIR/driver.c | 2 +- 7 files changed, 7 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index c9e5cb4a00db..11213a6f3e7e 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -62,7 +62,7 @@ struct ConvertCIRToMLIRPass void getDependentDialects(mlir::DialectRegistry ®istry) const override { registry.insert(); + mlir::arith::ArithDialect, mlir::cf::ControlFlowDialect>(); } void runOnOperation() final; diff --git a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir index c05adc1505e1..df1ebbf02ee2 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -canonicalize -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -canonicalize -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() { @@ -29,7 +28,7 @@ module { // MLIR: ^bb1: // MLIR: return -// LLVM: br label %[[Value:[0-9]+]], +// LLVM: br label %[[Value:[0-9]+]] // LLVM-EMPTY: // LLVM-NEXT: [[Value]]: ; preds = // LLVM: ret void diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index 25c1625e80e9..25bd686394d5 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -canonicalize -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() { @@ -29,7 +28,7 @@ module { // MLIR: ^bb1: // MLIR: return -// LLVM: br label %[[Value:[0-9]+]], +// LLVM: br label %[[Value:[0-9]+]] // LLVM-EMPTY: // LLVM-NEXT: [[Value]]: ; preds = // LLVM: ret void diff --git a/clang/test/CIR/cc1.c b/clang/test/CIR/cc1.c index ec489a027250..6a4cb8e3026b 100644 --- a/clang/test/CIR/cc1.c +++ b/clang/test/CIR/cc1.c @@ -15,7 +15,7 @@ void foo() {} // MLIR-NEXT: } // LLVM: define void @foo() -// LLVM-NEXT: ret void, +// LLVM-NEXT: ret void // LLVM-NEXT: } // ASM: .globl foo diff --git a/clang/test/CIR/cc1.cir b/clang/test/CIR/cc1.cir index 2037dec2482a..0442a8359855 100644 --- a/clang/test/CIR/cc1.cir +++ b/clang/test/CIR/cc1.cir @@ -9,5 +9,5 @@ module { } // LLVM: define void @foo() -// LLVM-NEXT: ret void, +// LLVM-NEXT: ret void // LLVM-NEXT: } diff --git a/clang/test/CIR/cirtool.cir b/clang/test/CIR/cirtool.cir index 45832a192689..986e9dddd24e 100644 --- a/clang/test/CIR/cirtool.cir +++ b/clang/test/CIR/cirtool.cir @@ -2,7 +2,6 @@ // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: mlir-translate -mlir-to-llvmir %t.mlir -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// XFAIL: * module { cir.func @foo() { @@ -11,7 +10,7 @@ module { } // LLVM: define void @foo() -// LLVM-NEXT: ret void, +// LLVM-NEXT: ret void // LLVM-NEXT: } // MLIR: module { diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index 8e8a9ccd6f85..804292494484 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -19,7 +19,7 @@ void foo() {} // CIR-NEXT: } // LLVM: define void @foo() -// LLVM-NEXT: ret void, +// LLVM-NEXT: ret void // LLVM-NEXT: } // OBJ: 0: c3 retq From 7ce7a286c01b9208eedad56e7792d8dcf3a30562 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 17 Dec 2022 02:47:38 -0500 Subject: [PATCH 0749/2301] [CIR][Rebase] add debuginfo flag -- this should be one of our first commits This was added during a rebase. --- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 4 ++-- clang/test/CIR/CodeGen/basic.c | 1 - clang/test/CIR/CodeGen/call.c | 1 - clang/test/CIR/CodeGen/types.c | 1 - 4 files changed, 2 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index d7dcfed09655..d8f3ffe62a04 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -234,7 +234,7 @@ class CIRGenConsumer : public clang::ASTConsumer { // FIXME: we cannot roundtrip prettyForm=true right now. mlir::OpPrintingFlags flags; - flags.enableDebugInfo(/*prettyForm=*/false); + flags.enableDebugInfo(/*enable=*/true, /*prettyForm=*/false); mlirMod->print(*outputStream, flags); } break; @@ -243,7 +243,7 @@ class CIRGenConsumer : public clang::ASTConsumer { assert(outputStream && "Why are we here without an output stream?"); // FIXME: we cannot roundtrip prettyForm=true right now. mlir::OpPrintingFlags flags; - flags.enableDebugInfo(/*prettyForm=*/false); + flags.enableDebugInfo(/*enable=*/true, /*prettyForm=*/false); loweredMlirModule->print(*outputStream, flags); break; } diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index f321f9a926ac..9652a4255117 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * int foo(int i); diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index fb418202500f..d9093099693f 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s // RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s --check-prefix=CXX -// XFAIL: * void a(void) {} int b(int a, int b) { diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index c5a535b7f038..7ff4a22e8d89 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -2,7 +2,6 @@ // RUN: FileCheck --input-file=%t.cir %s // RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cpp.cir // RUN: FileCheck --input-file=%t.cpp.cir --check-prefix=CHECK-CPP %s -// XFAIL: * int t0(int i) { return i; } unsigned int t1(unsigned int i) { return i; } From 299cefdfa1808377d0606d44a2b3cb3e2108a075 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 17 Dec 2022 02:50:16 -0500 Subject: [PATCH 0750/2301] [CIR] Account for two changes to loc ordering while rebasing --- clang/test/CIR/CodeGen/lambda.cpp | 2 +- clang/test/CIR/CodeGen/struct.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 99a57c0afcf4..1b4ad460571e 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -6,6 +6,6 @@ void fn() { } // CHECK: !ty_22class2Eanon22 = !cir.struct<"class.anon", i8> -// CHECK-NEXT: module +// CHECK-DAG: module // CHECK-NEXT: cir.func @_Z2fnv() // CHECK-NEXT: %0 = cir.alloca !ty_22class2Eanon22, cir.ptr , ["a"] diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 6864a482fc81..6747b4713ea5 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -19,7 +19,7 @@ void baz() { // CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> // CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !ty_22struct2EBar22> -// CHECK-NEXT: module {{.*}} { +// CHECK-DAG: module {{.*}} { // CHECK-NEXT: cir.func @baz() { // CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} From b835ccba6d12b812aea9722f1e9bf29a350bcd27 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 17 Dec 2022 04:52:17 -0500 Subject: [PATCH 0751/2301] [CIR][Rebase] Account for MLIR enabling typedefs more liberally This occurred while rebasing. Fix it here. --- clang/test/CIR/CodeGen/coro-task.cpp | 5 ++--- clang/test/CIR/IR/struct.cir | 7 ++++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index fb4176b831f9..b9d68e285604 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * namespace std { @@ -103,7 +102,7 @@ co_invoke_fn co_invoke; // CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct<"struct.folly::coro::Task::promise_type", i8> // CHECK: module {{.*}} { -// CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !cir.struct<"struct.folly::coro::co_invoke_fn", i8 +// CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22struct2Efolly3A3Acoro3A3Aco_invoke_fn22 // CHECK: cir.func builtin @__builtin_coro_id(i32, !cir.ptr, !cir.ptr, !cir.ptr) -> i32 attributes {builtin, sym_visibility = "private"} @@ -141,4 +140,4 @@ VoidTask silly_task() { // CHECK: %[[#RetObj:]] = cir.call @_ZN5folly4coro4TaskIvE12promise_type17get_return_objectEv(%[[#VoidPromisseAddr]]) : {{.*}} -> ![[VoidTask]] // CHECK: cir.store %[[#RetObj]], %[[#VoidTaskAddr]] : ![[VoidTask]] -// CHECK: } \ No newline at end of file +// CHECK: } diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index 49aab5938869..bc3cae4083cd 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -1,5 +1,4 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s -// XFAIL: * module { cir.func @structs() { @@ -8,5 +7,7 @@ module { } } -// CHECK: cir.func @structs() { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] +// CHECK: !ty_22S22 = !cir.struct<"S", i8, i16, i32> +// CHECK-NEXT: module { +// CHECK-NEXT: cir.func @structs() { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] From fe481ae2fba3137c86b9cded563c5e090a4e64ff Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 17 Dec 2022 04:52:57 -0500 Subject: [PATCH 0752/2301] [CIR][Rebase] Account for changes to location emission * Fusions now are references to top level locs instead of inline fusions * More seem to be emitted than before * They are inserting them at different points than before --- clang/test/CIR/CodeGen/basic.cpp | 4 ++- clang/test/CIR/CodeGen/binassign.cpp | 4 ++- clang/test/CIR/CodeGen/sourcelocation.cpp | 44 ++++++++++++++++------- clang/test/CIR/CodeGen/struct.cpp | 3 +- 4 files changed, 38 insertions(+), 17 deletions(-) diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index dac085e9ef95..4b7709f598da 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -133,4 +133,6 @@ void if1(int a, bool b, bool c) { // CHECK: } // CHECK: } -// CHECK: #[[locScope]] = loc(fused["{{.*}}basic.cpp":27:3, "{{.*}}basic.cpp":31:3]) +// CHECK-DAG: #[[locScope]] = loc(fused[#[[locScopeA:loc[0-9]+]], #[[locScopeB:loc[0-9]+]]]) +// CHECK-DAG: #[[locScopeA]] = loc("{{.*}}basic.cpp":27:3) +// CHECK-DAG: #[[locScopeB]] = loc("{{.*}}basic.cpp":31:3) diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp index bd53b12bd04f..b765ce2de856 100644 --- a/clang/test/CIR/CodeGen/binassign.cpp +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -50,4 +50,6 @@ int foo(int a, int b) { // CHECK: = cir.binop(or, // CHECK: cir.store {{.*}}[[Value]] -// CHECK: [[SourceLocation]] = loc(fused["{{.*}}binassign.cpp":8:3, "{{.*}}binassign.cpp":8:8]) +// CHECK: [[SourceLocationB:#loc[0-9]+]] = loc("{{.*}}binassign.cpp":8:8) +// CHECK: [[SourceLocationA:#loc[0-9]+]] = loc("{{.*}}binassign.cpp":8:3) +// CHECK: [[SourceLocation]] = loc(fused[[[SourceLocationA]], [[SourceLocationB]]]) diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 021d8a75cfa7..bcb7f0163e01 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -11,10 +11,14 @@ int s0(int a, int b) { return x; } -// CHECK: #loc21 = loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]) -// CHECK: #loc22 = loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19]) -// CHECK: module {{.*}} { -// CHECK: cir.func @_Z2s0ii(%arg0: i32 loc(fused["{{.*}}sourcelocation.cpp":4:8, "{{.*}}sourcelocation.cpp":4:12]), %arg1: i32 loc(fused["{{.*}}sourcelocation.cpp":4:15, "{{.*}}sourcelocation.cpp":4:19])) -> i32 { +// CHECK: #loc3 = loc("{{.*}}sourcelocation.cpp":4:8) +// CHECK: #loc4 = loc("{{.*}}sourcelocation.cpp":4:12) +// CHECK: #loc5 = loc("{{.*}}sourcelocation.cpp":4:15) +// CHECK: #loc6 = loc("{{.*}}sourcelocation.cpp":4:19) +// CHECK: #loc21 = loc(fused[#loc3, #loc4]) +// CHECK: #loc22 = loc(fused[#loc5, #loc6]) +// CHECK: module attributes {cir.sob = #cir.signed_overflow_behavior} { +// CHECK: cir.func @_Z2s0ii(%arg0: i32 loc(fused[#loc3, #loc4]), %arg1: i32 loc(fused[#loc5, #loc6])) -> i32 { // CHECK: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc21) // CHECK: %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc22) // CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) @@ -44,13 +48,27 @@ int s0(int a, int b) { // CHECK: } loc(#loc20) // CHECK: } loc(#loc) // CHECK: #loc = loc(unknown) +// CHECK: #loc1 = loc("{{.*}}sourcelocation.cpp":4:1) +// CHECK: #loc2 = loc("{{.*}}sourcelocation.cpp":11:1) +// CHECK: #loc7 = loc("{{.*}}sourcelocation.cpp":5:3) +// CHECK: #loc8 = loc("{{.*}}sourcelocation.cpp":5:15) // CHECK: #loc9 = loc("{{.*}}sourcelocation.cpp":4:22) -// CHECK: #loc20 = loc(fused["{{.*}}sourcelocation.cpp":4:1, "{{.*}}sourcelocation.cpp":11:1]) -// CHECK: #loc23 = loc(fused["{{.*}}sourcelocation.cpp":5:3, "{{.*}}sourcelocation.cpp":5:15]) -// CHECK: #loc24 = loc(fused["{{.*}}sourcelocation.cpp":5:11, "{{.*}}sourcelocation.cpp":5:15]) -// CHECK: #loc25 = loc(fused["{{.*}}sourcelocation.cpp":6:3, "{{.*}}sourcelocation.cpp":9:9]) -// CHECK: #loc26 = loc(fused["{{.*}}sourcelocation.cpp":6:7, "{{.*}}sourcelocation.cpp":6:11]) -// CHECK: #loc27 = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9, "{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) -// CHECK: #loc28 = loc(fused["{{.*}}sourcelocation.cpp":7:5, "{{.*}}sourcelocation.cpp":7:9]) -// CHECK: #loc29 = loc(fused["{{.*}}sourcelocation.cpp":9:5, "{{.*}}sourcelocation.cpp":9:9]) -// CHECK: #loc30 = loc(fused["{{.*}}sourcelocation.cpp":10:3, "{{.*}}sourcelocation.cpp":10:10]) +// CHECK: #loc10 = loc("{{.*}}sourcelocation.cpp":5:11) +// CHECK: #loc11 = loc("{{.*}}sourcelocation.cpp":6:3) +// CHECK: #loc12 = loc("{{.*}}sourcelocation.cpp":9:9) +// CHECK: #loc13 = loc("{{.*}}sourcelocation.cpp":6:7) +// CHECK: #loc14 = loc("{{.*}}sourcelocation.cpp":6:11) +// CHECK: #loc15 = loc("{{.*}}sourcelocation.cpp":7:5) +// CHECK: #loc16 = loc("{{.*}}sourcelocation.cpp":7:9) +// CHECK: #loc17 = loc("{{.*}}sourcelocation.cpp":9:5) +// CHECK: #loc18 = loc("{{.*}}sourcelocation.cpp":10:10) +// CHECK: #loc19 = loc("{{.*}}sourcelocation.cpp":10:3) +// CHECK: #loc20 = loc(fused[#loc1, #loc2]) +// CHECK: #loc23 = loc(fused[#loc7, #loc8]) +// CHECK: #loc24 = loc(fused[#loc10, #loc8]) +// CHECK: #loc25 = loc(fused[#loc11, #loc12]) +// CHECK: #loc26 = loc(fused[#loc13, #loc14]) +// CHECK: #loc27 = loc(fused[#loc15, #loc16, #loc17, #loc12]) +// CHECK: #loc28 = loc(fused[#loc15, #loc16]) +// CHECK: #loc29 = loc(fused[#loc17, #loc12]) +// CHECK: #loc30 = loc(fused[#loc19, #loc18]) diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 925cbefcb98e..147560327850 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * struct Bar { int a; @@ -25,7 +24,7 @@ void baz() { } // CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> -// CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !ty_22struct2EBar22> +// CHECK: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !ty_22struct2EBar22> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} From 3e149c44d3957cec28a21d8c70918b259956219c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 17 Dec 2022 22:18:43 -0500 Subject: [PATCH 0753/2301] [CIR][NFC] clang-format all CIR code --- clang/lib/CIR/CodeGen/Address.h | 1 + clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenTypes.h | 1 + clang/lib/CIR/CodeGen/CIRGenValue.h | 4 +++- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 1 + clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 8 ++++---- 6 files changed, 11 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 5490503f2271..57d96aad7f66 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -20,6 +20,7 @@ #include "llvm/IR/Constants.h" #include "mlir/IR/Value.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" namespace cir { diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index aa8fc3ce3c11..0948e0575a72 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -7,6 +7,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index edd60b38970b..86b2f610959d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -25,6 +25,7 @@ #include "llvm/ADT/SmallPtrSet.h" #include "mlir/IR/MLIRContext.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index a460b055e49d..ab490e06d863 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -24,6 +24,7 @@ #include "llvm/ADT/PointerIntPair.h" #include "mlir/IR/Value.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" namespace cir { @@ -418,7 +419,8 @@ class AggValueSlot { /// Get the preferred size to use when storing a value to this slot. This /// is the type size unless that might overlap another object, in which /// case it's the dsize. - clang::CharUnits getPreferredSize(clang::ASTContext &Ctx, clang::QualType Type) { + clang::CharUnits getPreferredSize(clang::ASTContext &Ctx, + clang::QualType Type) { return mayOverlap() ? Ctx.getTypeInfoDataSizeInChars(Type).Width : Ctx.getTypeSizeInChars(Type); } diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index c95c1b2f2b45..09e89df9a16d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -15,6 +15,7 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/MLIRContext.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 9b8974a99eca..6b5e19c7c82f 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -120,11 +120,11 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl, bool isPacked) : cirGenTypes{cirGenTypes}, astContext{cirGenTypes.getContext()}, - recordDecl{recordDecl}, cxxRecordDecl{llvm::dyn_cast( - recordDecl)}, + recordDecl{recordDecl}, + cxxRecordDecl{llvm::dyn_cast(recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, - IsZeroInitializable(true), - IsZeroInitializableAsBase(true), isPacked{isPacked} {} + IsZeroInitializable(true), IsZeroInitializableAsBase(true), + isPacked{isPacked} {} void CIRRecordLowering::lower(bool nonVirtualBaseType) { if (recordDecl->isUnion()) { From 35e1194528094f0f9d21d3faa4ba0642af4bb976 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 16 Dec 2022 12:48:28 -0800 Subject: [PATCH 0754/2301] [CIR][CIRGen][Coroutines] Implemented __builtin_coro_begin and tie that up with frame allocation --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 16 +++++- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 62 +++++++++++++++++++---- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 1 + clang/test/CIR/CodeGen/coro-task.cpp | 13 ++++- 6 files changed, 85 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index c92ff354cdd0..4e5df58111cc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -102,6 +102,19 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return ::mlir::cir::BoolType::get(getContext()); } + // Creates constant pointer for type ty. + mlir::cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { + assert(ty.isa() && "expected cir.ptr"); + return create( + loc, ty, mlir::cir::NullAttr::get(getContext(), ty)); + } + + mlir::Value getBitcast(mlir::Location loc, mlir::Value src, + mlir::Type newTy) { + return create(loc, newTy, mlir::cir::CastKind::bitcast, + src); + } + // // Operation creation helpers // -------------------------- @@ -121,8 +134,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return addr; auto newPtrType = mlir::cir::PointerType::get(getContext(), destType); - auto cast = create( - loc, newPtrType, mlir::cir::CastKind::bitcast, addr.getPointer()); + auto cast = getBitcast(loc, addr.getPointer(), newPtrType); return Address(cast, addr.getElementType(), addr.getAlignment()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index f680ecf3ca59..cb799b2f252a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -31,6 +31,9 @@ struct cir::CGCoroData { // it as the first argument to other builtins. mlir::cir::CallOp CoroId = nullptr; + // Stores the result of __builtin_coro_begin call. + mlir::Value CoroBegin = nullptr; + // How many co_return statements are in the coroutine. Used to decide whether // we need to add co_return; equivalent at the end of the user authored body. unsigned CoreturnCount = 0; @@ -137,12 +140,10 @@ static mlir::LogicalResult buildBodyAndFallthrough(CIRGenFunction &CGF, return mlir::success(); } -mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc) { +mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, + mlir::Value nullPtr) { auto int8PtrTy = builder.getInt8PtrTy(); auto int32Ty = mlir::IntegerType::get(builder.getContext(), 32); - auto nullPtrCst = builder.create( - loc, int8PtrTy, - mlir::cir::NullAttr::get(builder.getContext(), int8PtrTy)); auto &TI = CGM.getASTContext().getTargetInfo(); unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth(); @@ -164,8 +165,8 @@ mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc) { return builder.create( loc, fnOp, - mlir::ValueRange{builder.getInt32(NewAlign, loc), nullPtrCst, nullPtrCst, - nullPtrCst}); + mlir::ValueRange{builder.getInt32(NewAlign, loc), nullPtr, nullPtr, + nullPtr}); } mlir::cir::CallOp @@ -191,25 +192,68 @@ CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { loc, fnOp, mlir::ValueRange{CurCoro.Data->CoroId.getResult(0)}); } +mlir::cir::CallOp +CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, + mlir::Value coroframeAddr) { + auto int8PtrTy = builder.getInt8PtrTy(); + auto int32Ty = mlir::IntegerType::get(builder.getContext(), 32); + mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroBegin); + + mlir::cir::FuncOp fnOp; + if (!builtin) { + fnOp = CGM.createCIRFunction( + loc, CGM.builtinCoroBegin, + builder.getFunctionType(mlir::TypeRange{int32Ty, int8PtrTy}, + mlir::TypeRange{int8PtrTy}), + /*FD=*/nullptr); + assert(fnOp && "should always succeed"); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + } else + fnOp = cast(builtin); + + return builder.create( + loc, fnOp, + mlir::ValueRange{CurCoro.Data->CoroId.getResult(0), coroframeAddr}); +} + mlir::LogicalResult CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { auto openCurlyLoc = getLoc(S.getBeginLoc()); - auto coroId = buildCoroIDBuiltinCall(openCurlyLoc); + auto nullPtrCst = builder.getNullPtr(builder.getInt8PtrTy(), openCurlyLoc); + + auto coroId = buildCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); createCoroData(*this, CurCoro, coroId); // Backend is allowed to elide memory allocations, to help it, emit // auto mem = coro.alloc() ? 0 : ... allocation code ...; auto coroAlloc = buildCoroAllocBuiltinCall(openCurlyLoc); - mlir::Value allocVal; + // Initialize address of coroutine frame to null + auto astVoidPtrTy = CGM.getASTContext().VoidPtrTy; + auto allocaTy = getTypes().convertTypeForMem(astVoidPtrTy); + Address coroFrame = + CreateTempAlloca(allocaTy, getContext().getTypeAlignInChars(astVoidPtrTy), + openCurlyLoc, "__coro_frame_addr", + /*ArraySize=*/nullptr); + + auto storeAddr = coroFrame.getPointer(); + builder.create(openCurlyLoc, nullPtrCst, storeAddr); builder.create(openCurlyLoc, coroAlloc.getResult(0), /*withElseRegion=*/false, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - allocVal = buildScalarExpr(S.getAllocate()); + builder.create( + loc, buildScalarExpr(S.getAllocate()), + storeAddr); builder.create(loc); }); + CurCoro.Data->CoroBegin = + buildCoroBeginBuiltinCall( + openCurlyLoc, + builder.create(openCurlyLoc, allocaTy, storeAddr)) + .getResult(0); + // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided. if (auto *RetOnAllocFailure = S.getReturnStmtOnAllocFailure()) llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index aa6556a57f86..328c7cf788e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1777,7 +1777,8 @@ Address CIRGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, return Result; } -/// This creates a alloca and inserts it into the entry block. +/// This creates a alloca and inserts it into the entry block of the +/// current region. Address CIRGenFunction::CreateTempAllocaWithoutCast(mlir::Type Ty, CharUnits Align, mlir::Location Loc, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 3263fc16e06a..ab761212c31e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -777,8 +777,11 @@ class CIRGenFunction { mlir::LogicalResult buildCoroutineBody(const CoroutineBodyStmt &S); mlir::LogicalResult buildCoreturnStmt(const CoreturnStmt &S); - mlir::cir::CallOp buildCoroIDBuiltinCall(mlir::Location loc); + mlir::cir::CallOp buildCoroIDBuiltinCall(mlir::Location loc, + mlir::Value nullPtr); mlir::cir::CallOp buildCoroAllocBuiltinCall(mlir::Location loc); + mlir::cir::CallOp buildCoroBeginBuiltinCall(mlir::Location loc, + mlir::Value coroframeAddr); RValue buildCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot = AggValueSlot::ignored(), diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index cb396e80540e..e22e02d532ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -412,6 +412,7 @@ class CIRGenModule { static constexpr const char *builtinCoroId = "__builtin_coro_id"; static constexpr const char *builtinCoroAlloc = "__builtin_coro_alloc"; + static constexpr const char *builtinCoroBegin = "__builtin_coro_begin"; private: // An ordered map of canonical GlobalDecls to their mangled names. diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index b9d68e285604..18bc95282d79 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -105,6 +105,9 @@ co_invoke_fn co_invoke; // CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22struct2Efolly3A3Acoro3A3Aco_invoke_fn22 // CHECK: cir.func builtin @__builtin_coro_id(i32, !cir.ptr, !cir.ptr, !cir.ptr) -> i32 attributes {builtin, sym_visibility = "private"} +// CHECK: cir.func builtin @__builtin_coro_alloc(i32) -> !cir.bool attributes {builtin, sym_visibility = "private"} +// CHECK: cir.func builtin @__builtin_coro_size() -> i64 attributes {builtin, sym_visibility = "private"} +// CHECK: cir.func builtin @__builtin_coro_begin(i32, !cir.ptr) -> !cir.ptr attributes {builtin, sym_visibility = "private"} using VoidTask = folly::coro::Task; @@ -119,6 +122,7 @@ VoidTask silly_task() { // Allocate promise. // CHECK: %[[#VoidTaskAddr:]] = cir.alloca ![[VoidTask]], {{.*}}, ["__retval"] +// CHECK: %[[#SavedFrameAddr:]] = cir.alloca !cir.ptr, cir.ptr >, ["__coro_frame_addr"] {alignment = 8 : i64} // CHECK: %[[#VoidPromisseAddr:]] = cir.alloca ![[VoidPromisse]], {{.*}}, ["__promise"] // Get coroutine id with __builtin_coro_id. @@ -127,13 +131,18 @@ VoidTask silly_task() { // CHECK: %[[#Align:]] = cir.cst(16 : i32) : i32 // CHECK: %[[#CoroId:]] = cir.call @__builtin_coro_id(%[[#Align]], %[[#NullPtr]], %[[#NullPtr]], %[[#NullPtr]]) -// Maybe perform allocation calling operator new. +// Perform allocation calling operator 'new' depending on __builtin_coro_alloc and +// call __builtin_coro_begin for the final coroutine frame address. // CHECK: %[[#ShouldAlloc:]] = cir.call @__builtin_coro_alloc(%[[#CoroId]]) : (i32) -> !cir.bool +// CHECK: cir.store %[[#NullPtr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > // CHECK: cir.if %[[#ShouldAlloc]] { // CHECK: %[[#CoroSize:]] = cir.call @__builtin_coro_size() : () -> i64 -// CHECK: %[[#CoroFrameAddr:]] = cir.call @_Znwm(%[[#CoroSize]]) : (i64) -> !cir.ptr +// CHECK: %[[#AllocAddr:]] = cir.call @_Znwm(%[[#CoroSize]]) : (i64) -> !cir.ptr +// CHECK: cir.store %[[#AllocAddr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > // CHECK: } +// CHECK: %[[#Load0:]] = cir.load %[[#SavedFrameAddr]] : cir.ptr >, !cir.ptr +// CHECK: %[[#CoroFrameAddr:]] = cir.call @__builtin_coro_begin(%[[#CoroId]], %[[#Load0]]) // Call promise.get_return_object() to retrieve the task object. From a437dd44158e454b215bfd89e5658ab6966ce1ea Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 19 Dec 2022 20:36:12 -0300 Subject: [PATCH 0755/2301] [CIR][CIRGen][Coroutines] Instead of calling __builtin_coro_frame, get the coro frame from __builtin_coro_begin Update coroutines test with CIR lowering for initial_suspend coawait. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 7 +++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 + clang/test/CIR/CodeGen/coro-task.cpp | 62 ++++++++++++++++++++++- 4 files changed, 71 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index c23d24fb8e1f..ae80c5f84b5e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -96,7 +96,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_coro_align: llvm_unreachable("NYI"); - case Builtin::BI__builtin_coro_frame: + case Builtin::BI__builtin_coro_frame: { + return buildCoroutineFrame(); + } case Builtin::BI__builtin_coro_free: case Builtin::BI__builtin_coro_size: { GlobalDecl gd{FD}; diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index cb799b2f252a..cadef17a5cb3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -125,6 +125,13 @@ RValue CIRGenFunction::buildCoroutineIntrinsic(const CallExpr *E, llvm_unreachable("NYI"); } +RValue CIRGenFunction::buildCoroutineFrame() { + if (CurCoro.Data && CurCoro.Data->CoroBegin) { + return RValue::get(CurCoro.Data->CoroBegin); + } + llvm_unreachable("NYI"); +} + static mlir::LogicalResult buildBodyAndFallthrough(CIRGenFunction &CGF, const CoroutineBodyStmt &S, Stmt *Body) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index ab761212c31e..9fe4bbb1f820 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -787,6 +787,7 @@ class CIRGenFunction { AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); RValue buildCoroutineIntrinsic(const CallExpr *E, unsigned int IID); + RValue buildCoroutineFrame(); // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 18bc95282d79..f578a08b1bb8 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -100,6 +100,9 @@ co_invoke_fn co_invoke; // CHECK: ![[VoidTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task", i8> // CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct<"struct.folly::coro::Task::promise_type", i8> +// CHECK: ![[CoroHandleVoid:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", i8> +// CHECK: ![[CoroHandlePromise:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", i8> +// CHECK: ![[SuspendAlways:ty_.*]] = !cir.struct<"struct.std::suspend_always", i8> // CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22struct2Efolly3A3Acoro3A3Aco_invoke_fn22 @@ -115,8 +118,6 @@ VoidTask silly_task() { co_await std::suspend_always(); } -// CHECK: cir.func builtin @__builtin_coro_frame() -> !cir.ptr attributes {builtin, sym_visibility = "private"} - // CHECK: cir.func @_Z10silly_taskv() -> ![[VoidTask]] { // Allocate promise. @@ -149,4 +150,61 @@ VoidTask silly_task() { // CHECK: %[[#RetObj:]] = cir.call @_ZN5folly4coro4TaskIvE12promise_type17get_return_objectEv(%[[#VoidPromisseAddr]]) : {{.*}} -> ![[VoidTask]] // CHECK: cir.store %[[#RetObj]], %[[#VoidTaskAddr]] : ![[VoidTask]] +// Start a new scope for the actual codegen for co_await, create temporary allocas for +// holding coroutine handle and the suspend_always struct. + +// CHECK: cir.scope { +// CHECK: %[[#SuspendAlwaysAddr:]] = cir.alloca ![[SuspendAlways]], {{.*}} ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: %[[#CoroHandleVoidAddr:]] = cir.alloca ![[CoroHandleVoid]], {{.*}} ["agg.tmp0"] {alignment = 1 : i64} +// CHECK: %[[#CoroHandlePromiseAddr:]] = cir.alloca ![[CoroHandlePromise]], {{.*}} ["agg.tmp1"] {alignment = 1 : i64} + +// Effectively execute `coawait promise_type::initial_suspend()` by calling initial_suspend() and getting +// the suspend_always struct to use for cir.await. Note that we return by-value since we defer ABI lowering +// to later passes, same is done elsewhere. + +// CHECK: %15 = cir.call @_ZN5folly4coro4TaskIvE12promise_type15initial_suspendEv(%2) +// CHECK: cir.store %15, %[[#SuspendAlwaysAddr]] : !ty_22struct2Estd3A3Asuspend_always22, cir.ptr + +// +// Here we start mapping co_await to cir.await. +// + +// First regions `ready` has a special cir.yield code to veto suspension. + +// CHECK: cir.await(ready : { +// CHECK: %16 = cir.call @_ZNSt14suspend_always11await_readyEv(%12) : (!cir.ptr) -> !cir.bool +// CHECK: cir.if %16 { +// CHECK: cir.yield break +// CHECK: } +// CHECK: cir.yield + +// Second region `suspend` contains the actual suspend logic. +// +// - Start by getting the coroutine handle using from_address(). +// - Implicit convert coroutine handle from task specific promisse +// specialization to a void one. +// - Call suspend_always::await_suspend() passing the handle. +// +// FIXME: add missing builtin calls. +// FIXME: add veto support for non-void await_suspends. + +// CHECK: }, suspend : { +// CHECK: %16 = cir.call @_ZNSt16coroutine_handleIN5folly4coro4TaskIvE12promise_typeEE12from_addressEPv(%[[#CoroFrameAddr]]) +// CHECK: cir.store %16, %[[#CoroHandlePromiseAddr]] : ![[CoroHandlePromise]] +// CHECK: %[[#CoroHandlePromiseReload:]] = cir.load %[[#CoroHandlePromiseAddr]] +// CHECK: cir.call @_ZNSt16coroutine_handleIvEC1IN5folly4coro4TaskIvE12promise_typeEEES_IT_E(%[[#CoroHandleVoidAddr]], %[[#CoroHandlePromiseReload]]) +// CHECK: %[[#CoroHandleVoidReload:]] = cir.load %[[#CoroHandleVoidAddr]] : cir.ptr , ![[CoroHandleVoid]] +// CHECK: cir.call @_ZNSt14suspend_always13await_suspendESt16coroutine_handleIvE(%[[#SuspendAlwaysAddr]], %[[#CoroHandleVoidReload]]) +// CHECK: cir.yield + +// Third region `resume` handles coroutine resuming logic. +// +// FIXME: add missing builtin calls. + +// CHECK: }, resume : { +// CHECK: cir.call @_ZNSt14suspend_always12await_resumeEv(%[[#SuspendAlwaysAddr]]) +// CHECK: cir.yield +// CHECK: },) +// CHECK: } + // CHECK: } From dfaa2d6949b489acf5f3adbc9560764160064c87 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 19 Dec 2022 20:21:36 -0500 Subject: [PATCH 0756/2301] [CIR][Lowering] Implement a first pass at cir.scope lowering This implementation mirrors memref::allocascopeop. They don't bother concerning themselves with whether or not there exists nested scopes, and I don't think it'll make a big difference for us. However, they do require a single block in the region which we do not. So this implemetation might need more work as we add more test cases. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 57 ++++++++++++++++++- clang/test/CIR/Lowering/scope.cir | 37 ++++++++++++ 2 files changed, 92 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/Lowering/scope.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7badab99d124..e4703651fd0d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -39,6 +39,59 @@ using namespace llvm; namespace cir { namespace direct { +class CIRScopeOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ScopeOp scopeOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::OpBuilder::InsertionGuard guard(rewriter); + auto loc = scopeOp.getLoc(); + + // Split the current block before the ScopeOp to create the inlining point. + auto *currentBlock = rewriter.getInsertionBlock(); + auto *remainingOpsBlock = + rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + mlir::Block *continueBlock; + if (scopeOp.getNumResults() == 0) + continueBlock = remainingOpsBlock; + else + llvm_unreachable("NYI"); + + // Inline body region. + auto *beforeBody = &scopeOp.getRegion().front(); + auto *afterBody = &scopeOp.getRegion().back(); + rewriter.inlineRegionBefore(scopeOp.getRegion(), continueBlock); + + // Save stack and then branch into the body of the region. + rewriter.setInsertionPointToEnd(currentBlock); + // TODO(CIR): stackSaveOp + // auto stackSaveOp = rewriter.create( + // loc, mlir::LLVM::LLVMPointerType::get( + // mlir::IntegerType::get(scopeOp.getContext(), 8))); + rewriter.create(loc, mlir::ValueRange(), beforeBody); + + // Replace the scopeop return with a branch that jumps out of the body. + // Stack restore before leaving the body region. + rewriter.setInsertionPointToEnd(afterBody); + auto yieldOp = cast(afterBody->getTerminator()); + auto branchOp = rewriter.replaceOpWithNewOp( + yieldOp, yieldOp.getArgs(), continueBlock); + + // // Insert stack restore before jumping out of the body of the region. + rewriter.setInsertionPoint(branchOp); + // TODO(CIR): stackrestore? + // rewriter.create(loc, stackSaveOp); + + // Replace the op with values return from the body region. + rewriter.replaceOp(scopeOp, continueBlock->getArguments()); + + return mlir::success(); + } +}; + class CIRReturnLowering : public mlir::OpConversionPattern { public: @@ -480,8 +533,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add(patterns.getContext()); patterns.add( - converter, patterns.getContext()); + CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, + CIRScopeOpLowering>(converter, patterns.getContext()); } static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir new file mode 100644 index 000000000000..c816ca95c750 --- /dev/null +++ b/clang/test/CIR/Lowering/scope.cir @@ -0,0 +1,37 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + cir.scope { + %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.cst(4 : i32) : i32 + cir.store %1, %0 : i32, cir.ptr + } + cir.return + } +} + +// MLIR: llvm.func @foo() { +// MLIR-NEXT: llvm.br ^bb1 +// MLIR-NEXT: ^bb1: +// MLIR-DAG: [[v1:%[0-9]]] = llvm.mlir.constant(4 : i32) : i32 +// MLIR-DAG: [[v2:%[0-9]]] = llvm.mlir.constant(1 : index) : i64 +// MLIR-DAG: [[v3:%[0-9]]] = llvm.alloca [[v2]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: llvm.store [[v1]], [[v3]] : i32, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb2 +// MLIR-NEXT: ^bb2: +// MLIR-NEXT: llvm.return + + +// LLVM: define void @foo() { +// LLVM-NEXT: br label %1 +// LLVM-EMPTY: +// LLVM-NEXT: 1: +// LLVM-NEXT: %2 = alloca i32, i64 1, align 4 +// LLVM-NEXT: store i32 4, ptr %2, align 4 +// LLVM-NEXT: br label %3 +// LLVM-EMPTY: +// LLVM-NEXT: 3: +// LLVM-NEXT: ret void +// LLVM-NEXT: } From a01881695dcb53b1f0b3682f5bfc8d0fc05cf4eb Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 14 Dec 2022 23:32:02 -0500 Subject: [PATCH 0757/2301] [CIR][CodeGen] Support lowering SOB_{Defined,Trapping} inc/dec This is super verbose, but I'm leaving around the old style code here to make it clear that this behavior is implemented during lowering. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 58aba4103621..08cfa054a564 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -329,24 +329,26 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value buildIncDecConsiderOverflowBehavior(const UnaryOperator *E, mlir::Value InVal, bool IsInc) { + // NOTE(CIR): The SignedOverflowBehavior is attached to the global ModuleOp + // and the nsw behavior is handled during lowering. + auto Kind = E->isIncrementOp() ? mlir::cir::UnaryOpKind::Inc + : mlir::cir::UnaryOpKind::Dec; switch (CGF.getLangOpts().getSignedOverflowBehavior()) { - case LangOptions::SOB_Defined: { - auto Kind = E->isIncrementOp() ? mlir::cir::UnaryOpKind::Inc - : mlir::cir::UnaryOpKind::Dec; + case LangOptions::SOB_Defined: return buildUnaryOp(E, Kind, InVal); - } case LangOptions::SOB_Undefined: - // if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) - // return Builder.CreateNSWAdd(InVal, Amount, Name); + if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return buildUnaryOp(E, Kind, InVal); llvm_unreachable( "inc/dec overflow behavior SOB_Undefined not implemented yet"); break; case LangOptions::SOB_Trapping: + if (!E->canOverflow()) + return buildUnaryOp(E, Kind, InVal); llvm_unreachable( "inc/dec overflow behavior SOB_Trapping not implemented yet"); break; } - llvm_unreachable("Unknown SignedOverflowBehaviorTy"); } mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { From 82012af9ce8fd998bf245cee5b420c1b38c66cab Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 19 Dec 2022 23:41:48 -0500 Subject: [PATCH 0758/2301] [CIR][CodeGen] Add some extra guards to ifstmt lowering We missed some cases here on the if statement's conditions. So explicitly guard against them here. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 32 ++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 32 ++++++++++++---------- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 4 +++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 ++ clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 6 ++-- 5 files changed, 56 insertions(+), 20 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 328c7cf788e0..415b5c96437a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -522,9 +522,10 @@ mlir::Value CIRGenFunction::evaluateExprAsBool(const Expr *E) { QualType BoolTy = getContext().BoolTy; SourceLocation Loc = E->getExprLoc(); // TODO: CGFPOptionsRAII for FP stuff. - assert(!E->getType()->isAnyComplexType() && - "complex to scalar not implemented"); - return buildScalarConversion(buildScalarExpr(E), E->getType(), BoolTy, Loc); + if (!E->getType()->isAnyComplexType()) + return buildScalarConversion(buildScalarExpr(E), E->getType(), BoolTy, Loc); + + llvm_unreachable("complex to scalar not implemented"); } LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { @@ -1561,9 +1562,34 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, // TODO(CIR): scoped ApplyDebugLocation DL(*this, Cond); // TODO(CIR): __builtin_unpredictable and profile counts? cond = cond->IgnoreParens(); + + // if (const BinaryOperator *CondBOp = dyn_cast(cond)) { + // llvm_unreachable("binaryoperator ifstmt NYI"); + // } + + if (const UnaryOperator *CondUOp = dyn_cast(cond)) { + llvm_unreachable("unaryoperator ifstmt NYI"); + } + + if (const ConditionalOperator *CondOp = dyn_cast(cond)) { + llvm_unreachable("conditionaloperator ifstmt NYI"); + } + + if (const CXXThrowExpr *Throw = dyn_cast(cond)) { + llvm_unreachable("throw expr ifstmt nyi"); + } + + // Emit the code with the fully general case. mlir::Value condV = evaluateExprAsBool(cond); mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); + auto *Call = dyn_cast(cond->IgnoreImpCasts()); + if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { + llvm_unreachable("NYI"); + } + + // TODO(CIR): emitCondLikelihoodViaExpectIntrinsic + builder.create( loc, condV, elseS, /*thenBuilder=*/ diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 08cfa054a564..f17a9d8e803c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -45,6 +45,7 @@ class ScalarExprEmitter : public StmtVisitor { return I; } + mlir::Type ConvertType(QualType T) { return CGF.ConvertType(T); } LValue buildLValue(const Expr *E) { return CGF.buildLValue(E); } //===--------------------------------------------------------------------===// @@ -783,13 +784,11 @@ class ScalarExprEmitter : public StmtVisitor { if (auto *MPT = llvm::dyn_cast(SrcType)) assert(0 && "not implemented"); - assert((SrcType->isIntegerType() || - Src.getType().isa<::mlir::cir::PointerType>()) && - "Unknown scalar type to convert"); + if (SrcType->isIntegerType()) + return buildIntToBoolConversion(Src, loc); - assert(Src.getType().isa() && - "pointer source not implemented"); - return buildIntToBoolConversion(Src, loc); + assert(Src.getType().isa<::mlir::cir::PointerType>()); + llvm_unreachable("pointer source not implemented"); } /// Emit a conversion from the specified type to the specified destination @@ -801,9 +800,9 @@ class ScalarExprEmitter : public StmtVisitor { SourceLocation Loc, ScalarConversionOpts Opts = ScalarConversionOpts()) { if (SrcType->isFixedPointType()) { - assert(0 && "not implemented"); + llvm_unreachable("not implemented"); } else if (DstType->isFixedPointType()) { - assert(0 && "not implemented"); + llvm_unreachable("not implemented"); } SrcType = CGF.getContext().getCanonicalType(SrcType); @@ -813,6 +812,7 @@ class ScalarExprEmitter : public StmtVisitor { if (DstType->isVoidType()) return nullptr; + mlir::Type SrcTy = Src.getType(); // Handle conversions to bool first, they are special: comparisons against @@ -820,32 +820,32 @@ class ScalarExprEmitter : public StmtVisitor { if (DstType->isBooleanType()) return buildConversionToBool(Src, SrcType, CGF.getLoc(Loc)); - mlir::Type DstTy = CGF.getCIRType(DstType); + mlir::Type DstTy = ConvertType(DstType); // Cast from half through float if half isn't a native type. if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { - assert(0 && "not implemented"); + llvm_unreachable("not implemented"); } // TODO(cir): LLVM codegen ignore conversions like int -> uint, // is there anything to be done for CIR here? if (SrcTy == DstTy) { if (Opts.EmitImplicitIntegerSignChangeChecks) - assert(0 && "not implemented"); + llvm_unreachable("not implemented"); return Src; } // Handle pointer conversions next: pointers can only be converted to/from // other pointers and integers. if (DstTy.isa<::mlir::cir::PointerType>()) { - assert(0 && "not implemented"); + llvm_unreachable("not implemented"); } if (SrcTy.isa<::mlir::cir::PointerType>()) { // Must be a ptr to int cast. assert(DstTy.isa() && "not ptr->int?"); - assert(0 && "not implemented"); + llvm_unreachable("not implemented"); } // A scalar can be splatted to an extended vector of the same element type @@ -856,11 +856,13 @@ class ScalarExprEmitter : public StmtVisitor { SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"); - assert(0 && "not implemented"); + llvm_unreachable("not implemented"); } if (SrcType->isMatrixType() && DstType->isMatrixType()) - assert(0 && "not implemented"); + llvm_unreachable("not implemented"); + + // TODO(CIR): Support VectorTypes // Finally, we have the arithmetic types: real int/float. mlir::Value Res = nullptr; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index b51233e88ee3..c1e3971030bd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -41,6 +41,10 @@ clang::ASTContext &CIRGenFunction::getContext() const { return CGM.getASTContext(); } +mlir::Type CIRGenFunction::ConvertType(QualType T) { + return CGM.getTypes().ConvertType(T); +} + TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { type = type.getCanonicalType(); while (true) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9fe4bbb1f820..85cb11ceccd0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -572,6 +572,8 @@ class CIRGenFunction { std::string getCounterRefTmpAsString(); std::string getCounterAggTmpAsString(); + mlir::Type ConvertType(clang::QualType T); + /// Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(clang::QualType T); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 3b2ab5edfcc2..fbb0bdfcbabb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -351,7 +351,9 @@ static mlir::Location getIfLocs(CIRGenFunction &CGF, const clang::Stmt *thenS, mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { // The else branch of a consteval if statement is always the only branch // that can be runtime evaluated. - assert(!S.isConsteval() && "not implemented"); + if (S.isConsteval()) { + llvm_unreachable("consteval nyi"); + } mlir::LogicalResult res = mlir::success(); // C99 6.8.4.1: The first substatement is executed if the expression @@ -370,7 +372,7 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { bool CondConstant; if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, S.isConstexpr())) { - assert(0 && "not implemented"); + llvm_unreachable("ConstantFoldsToSimpleInteger NYI"); } // TODO: PGO and likelihood. From 099846208fdf405ffb433778a708f342ad415ab0 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 20 Dec 2022 02:19:34 -0500 Subject: [PATCH 0759/2301] [CIR][Lowering] Implement lowering of int_to_bool casts --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 29 ++++++++++++++++++- clang/test/CIR/Lowering/cast.cir | 23 +++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/cast.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e4703651fd0d..ba2d6eb0513b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -39,6 +39,32 @@ using namespace llvm; namespace cir { namespace direct { +class CIRCastOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CastOp castOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto src = castOp.getSrc(); + switch (castOp.getKind()) { + case mlir::cir::CastKind::int_to_bool: { + auto zero = rewriter.create( + src.getLoc(), src.getType(), + mlir::IntegerAttr::get(src.getType(), 0)); + rewriter.replaceOpWithNewOp( + castOp, castOp.getSrc().getType(), mlir::cir::CmpOpKind::ne, src, + zero); + break; + } + default: + llvm_unreachable("NYI"); + } + + return mlir::success(); + } +}; + class CIRScopeOpLowering : public mlir::OpConversionPattern { public: @@ -534,7 +560,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add(converter, patterns.getContext()); + CIRScopeOpLowering, CIRCastOpLowering>(converter, + patterns.getContext()); } static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir new file mode 100644 index 000000000000..d2b2b632a35f --- /dev/null +++ b/clang/test/CIR/Lowering/cast.cir @@ -0,0 +1,23 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo(%arg0: i32) -> i32 { + %4 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool + cir.return %arg0 : i32 + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @foo(%arg0: i32) -> i32 { +// MLIR-NEXT: [[v0:%[0-9]]] = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: [[v1:%[0-9]]] = llvm.icmp "ne" %arg0, %0 : i32 +// MLIR-NEXT: llvm.return %arg0 : i32 +// MLIR-NEXT: } +// MLIR-NEXT:} + + +// LLVM: define i32 @foo(i32 %0) { +// LLVM-NEXT: %2 = icmp ne i32 %0, 0 +// LLVM-NEXT: ret i32 %0 +// LLVM-NEXT: } From ae95598f7ff757aabeea08b67dfcbb9a925dde9b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 22 Dec 2022 18:07:00 -0300 Subject: [PATCH 0760/2301] [CIR][CIRGen][Coroutines] Fix handling of final suspend and return blocks This finally allows us to check the full VoidTask testing. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 +- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 27 ++++++++---- clang/test/CIR/CodeGen/coro-task.cpp | 44 ++++++++++++++++---- 3 files changed, 56 insertions(+), 17 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b7b17989a09a..d420e4958218 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1362,7 +1362,7 @@ def AwaitOp : CIR_Op<"await", Breaking this up in regions allow individual scrutiny of conditions which might lead to folding some of them out. Lowerings coming out of CIR, e.g. LLVM, should use the `suspend` region to track more - lower level codegen (e.g. intrinsic emission for saving/suspending). + lower level codegen (e.g. intrinsic emission for coro.save/coro.suspend). From the C++ snippet we get: diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index cadef17a5cb3..85ed564d5aaf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -34,6 +34,11 @@ struct cir::CGCoroData { // Stores the result of __builtin_coro_begin call. mlir::Value CoroBegin = nullptr; + // Stores the insertion point for final suspend, this happens after the + // promise call (return_xxx promise member) but before a cir.br to the return + // block. + mlir::Operation *FinalSuspendInsPoint; + // How many co_return statements are in the coroutine. Used to decide whether // we need to add co_return; equivalent at the end of the user authored body. unsigned CoreturnCount = 0; @@ -334,8 +339,13 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0; if (CanFallthrough || HasCoreturns) { CurCoro.Data->CurrentAwaitKind = AwaitKind::Final; - if (buildStmt(S.getFinalSuspendStmt(), /*useCurrentScope=*/true).failed()) - return mlir::failure(); + { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPoint(CurCoro.Data->FinalSuspendInsPoint); + if (buildStmt(S.getFinalSuspendStmt(), /*useCurrentScope=*/true) + .failed()) + return mlir::failure(); + } } } return mlir::success(); @@ -415,6 +425,10 @@ static LValueOrRValue buildSuspendExpression( }, /*suspendBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { + // Note that differently from LLVM codegen we do not emit coro.save + // and coro.suspend here, that should be done as part of lowering this + // to LLVM dialect (or some other MLIR dialect) + // A invalid suspendRet indicates "void returning await_suspend" auto suspendRet = CGF.buildScalarExpr(S.getSuspendExpr()); @@ -500,17 +514,16 @@ mlir::LogicalResult CIRGenFunction::buildCoreturnStmt(CoreturnStmt const &S) { } if (buildStmt(S.getPromiseCall(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - // FIXME: do the proper things like ReturnStmt does - // EmitBranchThroughCleanup(CurCoro.Data->FinalJD); - // Create a new return block (if not existent) and add a branch to // it. The actual return instruction is only inserted during current // scope cleanup handling. auto loc = getLoc(S.getSourceRange()); auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); - builder.create(loc, retBlock); + CurCoro.Data->FinalSuspendInsPoint = + builder.create(loc, retBlock); - // Insert the new block to continue codegen after branch to ret block. + // Insert the new block to continue codegen after branch to ret block, + // this will likely be an empty block. builder.createBlock(builder.getBlock()->getParent()); // TODO(cir): LLVM codegen for a cleanup on cleanupScope here. diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index f578a08b1bb8..d813d85a0b1a 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -162,8 +162,8 @@ VoidTask silly_task() { // the suspend_always struct to use for cir.await. Note that we return by-value since we defer ABI lowering // to later passes, same is done elsewhere. -// CHECK: %15 = cir.call @_ZN5folly4coro4TaskIvE12promise_type15initial_suspendEv(%2) -// CHECK: cir.store %15, %[[#SuspendAlwaysAddr]] : !ty_22struct2Estd3A3Asuspend_always22, cir.ptr +// CHECK: %[[#Tmp0:]] = cir.call @_ZN5folly4coro4TaskIvE12promise_type15initial_suspendEv(%[[#VoidPromisseAddr]]) +// CHECK: cir.store %[[#Tmp0]], %[[#SuspendAlwaysAddr]] // // Here we start mapping co_await to cir.await. @@ -172,8 +172,8 @@ VoidTask silly_task() { // First regions `ready` has a special cir.yield code to veto suspension. // CHECK: cir.await(ready : { -// CHECK: %16 = cir.call @_ZNSt14suspend_always11await_readyEv(%12) : (!cir.ptr) -> !cir.bool -// CHECK: cir.if %16 { +// CHECK: %[[#ReadyVeto:]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[#SuspendAlwaysAddr]]) +// CHECK: cir.if %[[#ReadyVeto]] { // CHECK: cir.yield break // CHECK: } // CHECK: cir.yield @@ -185,12 +185,11 @@ VoidTask silly_task() { // specialization to a void one. // - Call suspend_always::await_suspend() passing the handle. // -// FIXME: add missing builtin calls. // FIXME: add veto support for non-void await_suspends. // CHECK: }, suspend : { -// CHECK: %16 = cir.call @_ZNSt16coroutine_handleIN5folly4coro4TaskIvE12promise_typeEE12from_addressEPv(%[[#CoroFrameAddr]]) -// CHECK: cir.store %16, %[[#CoroHandlePromiseAddr]] : ![[CoroHandlePromise]] +// CHECK: %[[#FromAddrRes:]] = cir.call @_ZNSt16coroutine_handleIN5folly4coro4TaskIvE12promise_typeEE12from_addressEPv(%[[#CoroFrameAddr]]) +// CHECK: cir.store %[[#FromAddrRes]], %[[#CoroHandlePromiseAddr]] : ![[CoroHandlePromise]] // CHECK: %[[#CoroHandlePromiseReload:]] = cir.load %[[#CoroHandlePromiseAddr]] // CHECK: cir.call @_ZNSt16coroutine_handleIvEC1IN5folly4coro4TaskIvE12promise_typeEEES_IT_E(%[[#CoroHandleVoidAddr]], %[[#CoroHandlePromiseReload]]) // CHECK: %[[#CoroHandleVoidReload:]] = cir.load %[[#CoroHandleVoidAddr]] : cir.ptr , ![[CoroHandleVoid]] @@ -198,8 +197,6 @@ VoidTask silly_task() { // CHECK: cir.yield // Third region `resume` handles coroutine resuming logic. -// -// FIXME: add missing builtin calls. // CHECK: }, resume : { // CHECK: cir.call @_ZNSt14suspend_always12await_resumeEv(%[[#SuspendAlwaysAddr]]) @@ -207,4 +204,33 @@ VoidTask silly_task() { // CHECK: },) // CHECK: } +// Since we already tested cir.await guts above, the remaining checks for: +// - The actual user written co_await +// - The promise call +// - The final suspend co_await +// - Return + +// The actual user written co_await +// CHECK: cir.scope { +// CHECK: cir.await(ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) +// CHECK: } + +// The promise call +// CHECK: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv(%[[#VoidPromisseAddr]]) + +// The final suspend co_await +// CHECK: cir.scope { +// CHECK: cir.await(ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) // CHECK: } + +// Return +// FIXME: add missing builtin calls +// CHECK: %[[#Tmp1:]] = cir.load %[[#VoidTaskAddr]] +// CHECK-NEXT: cir.return %[[#Tmp1]] +// CHECK-NEXT: } From 31e5cc8df3882f47f570ea580fd4c7e5bf4b2dbf Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 22 Dec 2022 16:07:36 -0800 Subject: [PATCH 0761/2301] [CIR] Remove the setting of MLIR_TABLEGEN_EXE This is a cache var, we don't need to set it here. It also breaks support for cross compilation as the target_file isn't the right one in that case. --- clang/include/clang/CIR/Dialect/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/CMakeLists.txt b/clang/include/clang/CIR/Dialect/CMakeLists.txt index f4c99a2b9a8f..383bf5231f57 100644 --- a/clang/include/clang/CIR/Dialect/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/CMakeLists.txt @@ -1,7 +1,6 @@ set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --src-root set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --includedir set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include) -set(MLIR_TABLEGEN_EXE $) include_directories(SYSTEM ${MLIR_INCLUDE_DIR}) include_directories(SYSTEM ${MLIR_TABLEGEN_OUTPUT_DIR}) From cd0a6661335ad2efca0ddfa63aa2c30aa83b200b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Dec 2022 13:14:31 -0300 Subject: [PATCH 0762/2301] [CIR][CIRGen] Add a coroutine tag to mark such functions --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 1 + clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 5 +++++ clang/test/CIR/CodeGen/coro-task.cpp | 2 +- 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d420e4958218..061f3c59abed 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1203,6 +1203,7 @@ def FuncOp : CIR_Op<"func", [ let arguments = (ins SymbolNameAttr:$sym_name, TypeAttrOf:$function_type, UnitAttr:$builtin, + UnitAttr:$coroutine, DefaultValuedAttr:$linkage, OptionalAttr:$sym_visibility, diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 85ed564d5aaf..19eb503c7915 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -233,6 +233,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { auto openCurlyLoc = getLoc(S.getBeginLoc()); auto nullPtrCst = builder.getNullPtr(builder.getInt8PtrTy(), openCurlyLoc); + CurFn.setCoroutineAttr(mlir::UnitAttr::get(builder.getContext())); auto coroId = buildCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); createCoroData(*this, CurCoro, coroId); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 09e9bccedf81..6da59be9d0a4 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1158,6 +1158,8 @@ void cir::FuncOp::build(OpBuilder &builder, OperationState &result, ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { if (::mlir::succeeded(parser.parseOptionalKeyword("builtin"))) state.addAttribute("builtin", parser.getBuilder().getUnitAttr()); + if (::mlir::succeeded(parser.parseOptionalKeyword("coroutine"))) + state.addAttribute("coroutine", parser.getBuilder().getUnitAttr()); // Default to external linkage if no keyword is provided. state.addAttribute(getLinkageAttrNameString(), @@ -1223,6 +1225,9 @@ void cir::FuncOp::print(OpAsmPrinter &p) { if (getBuiltin()) p << "builtin "; + if (getCoroutine()) + p << "coroutine "; + if (getLinkage() != GlobalLinkageKind::ExternalLinkage) p << stringifyGlobalLinkageKind(getLinkage()) << ' '; diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index d813d85a0b1a..58cfe1321324 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -118,7 +118,7 @@ VoidTask silly_task() { co_await std::suspend_always(); } -// CHECK: cir.func @_Z10silly_taskv() -> ![[VoidTask]] { +// CHECK: cir.func coroutine @_Z10silly_taskv() -> ![[VoidTask]] {{.*}} { // Allocate promise. From bdea51812cfa4f2864dfe5504de4e3d6cb33ccec Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Dec 2022 16:20:20 -0300 Subject: [PATCH 0763/2301] [CIR][Coroutines] Update cir.func docs and add more verifier pieces --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 23 ++++++++++++++++++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 16 +++++++++++++- clang/test/CIR/IR/invalid.cir | 6 +++++ 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 061f3c59abed..fdc93d4fd552 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1187,16 +1187,35 @@ def FuncOp : CIR_Op<"func", [ The function linkage information is specified by `linkage`, as defined by `GlobalLinkageKind` attribute. + A compiler builtin function must be marked as `builtin` for further + processing when lowering from CIR. + + The `coroutine` keyword is used to mark coroutine function, which requires + at least one `cir.await` instruction to be used in its body. + Example: ```mlir // External function definitions. - func @abort() + cir.func @abort() // A function with internal linkage. - func internal @count(%x: i64) -> (i64) + cir.func internal @count(%x: i64) -> (i64) return %x : i64 } + + // Linkage information + cir.func linkonce_odr @some_method(...) + + // Builtin function + cir.func builtin @__builtin_coro_end(!cir.ptr, !cir.bool) -> !cir.bool + + // Coroutine + cir.func coroutine @_Z10silly_taskv() -> !CoroTask { + ... + cir.await(...) + ... + } ``` }]; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 6da59be9d0a4..9614c62fcc0a 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1262,9 +1262,10 @@ LogicalResult cir::FuncOp::verifyType() { return success(); } -// Verifies linkage types, similar to LLVM: +// Verifies linkage types // - functions don't have 'common' linkage // - external functions have 'external' or 'extern_weak' linkage +// - coroutine body must use at least one cir.await operation. LogicalResult cir::FuncOp::verify() { if (getLinkage() == cir::GlobalLinkageKind::CommonLinkage) return emitOpError() << "functions cannot have '" @@ -1284,6 +1285,19 @@ LogicalResult cir::FuncOp::verify() { << "' linkage"; return success(); } + + if (!isDeclaration() && getCoroutine()) { + bool foundAwait = false; + this->walk([&](Operation *op) { + if (auto await = dyn_cast(op)) { + foundAwait = true; + return; + } + }); + if (!foundAwait) + return emitOpError() + << "coroutine body must use at least one cir.await op"; + } return success(); } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index ceec9146b6c4..84130d4e3172 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -227,3 +227,9 @@ cir.func @unary1() { module { cir.global external @v = #cir.zero : i32 // expected-error {{zero expects struct type}} } + +// ----- + +cir.func coroutine @bad_task() { // expected-error {{coroutine body must use at least one cir.await op}} + cir.return +} \ No newline at end of file From 451c279701960208862891cfbc18c5f2093a1012 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Dec 2022 16:21:15 -0300 Subject: [PATCH 0764/2301] [CIR][CIRGen] Emit __builtin_coro_end before returning out of coroutines --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 9 ++++++- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 32 ++++++++++++++++++----- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 5 ++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 ++ clang/lib/CIR/CodeGen/CIRGenModule.h | 1 + clang/test/CIR/CodeGen/coro-task.cpp | 8 ++++-- 6 files changed, 48 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 4e5df58111cc..9b19cd830461 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -91,13 +91,20 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::IntegerType::get(getContext(), 8)); } - /// Get a constant 32-bit value. + // Get a constant 32-bit value. mlir::cir::ConstantOp getInt32(uint32_t C, mlir::Location loc) { auto int32Ty = mlir::IntegerType::get(getContext(), 32); return create(loc, int32Ty, mlir::IntegerAttr::get(int32Ty, C)); } + // Get a bool + mlir::Value getBool(bool state, mlir::Location loc) { + return create( + loc, getBoolTy(), mlir::BoolAttr::get(getContext(), state)); + } + + // Get the bool type mlir::cir::BoolType getBoolTy() { return ::mlir::cir::BoolType::get(getContext()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 19eb503c7915..4bcbe272ceef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -228,6 +228,28 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, mlir::ValueRange{CurCoro.Data->CoroId.getResult(0), coroframeAddr}); } +mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, + mlir::Value nullPtr) { + auto int8PtrTy = builder.getInt8PtrTy(); + auto boolTy = builder.getBoolTy(); + mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroEnd); + + mlir::cir::FuncOp fnOp; + if (!builtin) { + fnOp = CGM.createCIRFunction( + loc, CGM.builtinCoroEnd, + builder.getFunctionType(mlir::TypeRange{int8PtrTy, boolTy}, + mlir::TypeRange{boolTy}), + /*FD=*/nullptr); + assert(fnOp && "should always succeed"); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + } else + fnOp = cast(builtin); + + return builder.create( + loc, fnOp, mlir::ValueRange{nullPtr, builder.getBool(false, loc)}); +} + mlir::LogicalResult CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { auto openCurlyLoc = getLoc(S.getBeginLoc()); @@ -299,8 +321,6 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { if (buildStmt(S.getPromiseDeclStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - // FIXME(cir): handle promiseAddr and coro id related stuff? - // ReturnValue should be valid as long as the coroutine's return type // is not void. The assertion could help us to reduce the check later. assert(ReturnValue.isValid() == (bool)S.getReturnStmt()); @@ -332,10 +352,10 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { if (buildBodyAndFallthrough(*this, S, S.getBody()).failed()) return mlir::failure(); - // See if we need to generate final suspend. - // const bool CanFallthrough = Builder.GetInsertBlock(); - // FIXME: LLVM tracks fallthrough by checking the insertion - // point is valid, we can probably do better. + // FIXME(cir): LLVM checks CanFallthrough by looking into the availability + // of the insert block, do we need this? Likely not since fallthroughs + // usually get an implicit AST node for a CoreturnStmt. + // From LLVM IR Gen: const bool CanFallthrough = Builder.GetInsertBlock(); const bool CanFallthrough = false; const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0; if (CanFallthrough || HasCoreturns) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index c1e3971030bd..76e4e4b2b366 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -277,6 +277,11 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { auto *localScope = CGF.currLexScope; auto buildReturn = [&](mlir::Location loc) { + // If we are on a coroutine, add the coro_end builtin call. + if (CGF.CurFn.getCoroutine()) + CGF.buildCoroEndBuiltinCall( + loc, builder.getNullPtr(builder.getInt8PtrTy(), loc)); + if (CGF.FnRetCIRTy.has_value()) { // If there's anything to return, load it first. auto val = builder.create(loc, *CGF.FnRetCIRTy, *CGF.FnRetAlloca); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 85cb11ceccd0..5932d6ba76a2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -784,6 +784,8 @@ class CIRGenFunction { mlir::cir::CallOp buildCoroAllocBuiltinCall(mlir::Location loc); mlir::cir::CallOp buildCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr); + mlir::cir::CallOp buildCoroEndBuiltinCall(mlir::Location loc, + mlir::Value nullPtr); RValue buildCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot = AggValueSlot::ignored(), diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index e22e02d532ca..d0bf15aac548 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -413,6 +413,7 @@ class CIRGenModule { static constexpr const char *builtinCoroId = "__builtin_coro_id"; static constexpr const char *builtinCoroAlloc = "__builtin_coro_alloc"; static constexpr const char *builtinCoroBegin = "__builtin_coro_begin"; + static constexpr const char *builtinCoroEnd = "__builtin_coro_end"; private: // An ordered map of canonical GlobalDecls to their mangled names. diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 58cfe1321324..8b7d8395327d 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -229,8 +229,12 @@ VoidTask silly_task() { // CHECK: },) // CHECK: } -// Return -// FIXME: add missing builtin calls +// Call builtin coro end and return + +// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.cst(#cir.null : !cir.ptr) +// CHECK-NEXT: %[[#CoroEndArg1:]] = cir.cst(false) : !cir.bool +// CHECK-NEXT: = cir.call @__builtin_coro_end(%[[#CoroEndArg0]], %[[#CoroEndArg1]]) + // CHECK: %[[#Tmp1:]] = cir.load %[[#VoidTaskAddr]] // CHECK-NEXT: cir.return %[[#Tmp1]] // CHECK-NEXT: } From 8f1e707146b8340075e88ad0403950ffdb40adb0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Dec 2022 17:31:29 -0300 Subject: [PATCH 0765/2301] [CIR][NFC] Avoid hardcoded attribute names when building operations --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 ++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 18 +++++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index fdc93d4fd552..1287fa2f5ada 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -64,6 +64,8 @@ def CastOp : CIR_Op<"cast", [Pure]> { - `int_to_bool` - `array_to_ptrdecay` - `integral` + - `bitcast` + - `floating` This is effectively a subset of the rules from `llvm-project/clang/include/clang/AST/OperationKinds.def`; but note that some diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9614c62fcc0a..fc7820a9c000 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -931,7 +931,7 @@ void LoopOp::build(OpBuilder &builder, OperationState &result, OpBuilder::InsertionGuard guard(builder); ::mlir::cir::LoopOpKindAttr kindAttr = cir::LoopOpKindAttr::get(builder.getContext(), kind); - result.addAttribute("kind", kindAttr); + result.addAttribute(getKindAttrName(result.name), kindAttr); Region *condRegion = result.addRegion(); builder.createBlock(condRegion); @@ -1095,11 +1095,12 @@ void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, odsState.addAttribute(getSymTypeAttrName(odsState.name), ::mlir::TypeAttr::get(sym_type)); if (isConstant) - odsState.addAttribute("constant", odsBuilder.getUnitAttr()); + odsState.addAttribute(getConstantAttrName(odsState.name), + odsBuilder.getUnitAttr()); ::mlir::cir::GlobalLinkageKindAttr linkageAttr = cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage); - odsState.addAttribute("linkage", linkageAttr); + odsState.addAttribute(getLinkageAttrName(odsState.name), linkageAttr); } //===----------------------------------------------------------------------===// @@ -1156,10 +1157,13 @@ void cir::FuncOp::build(OpBuilder &builder, OperationState &result, } ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { - if (::mlir::succeeded(parser.parseOptionalKeyword("builtin"))) - state.addAttribute("builtin", parser.getBuilder().getUnitAttr()); - if (::mlir::succeeded(parser.parseOptionalKeyword("coroutine"))) - state.addAttribute("coroutine", parser.getBuilder().getUnitAttr()); + auto builtinNameAttr = getBuiltinAttrName(state.name); + auto coroutineNameAttr = getCoroutineAttrName(state.name); + if (::mlir::succeeded(parser.parseOptionalKeyword(builtinNameAttr.strref()))) + state.addAttribute(builtinNameAttr, parser.getBuilder().getUnitAttr()); + if (::mlir::succeeded( + parser.parseOptionalKeyword(coroutineNameAttr.strref()))) + state.addAttribute(coroutineNameAttr, parser.getBuilder().getUnitAttr()); // Default to external linkage if no keyword is provided. state.addAttribute(getLinkageAttrNameString(), From 7a88f4b9f93d13dc02819ba8c312eb5da0d09a13 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Dec 2022 17:32:17 -0300 Subject: [PATCH 0766/2301] [CIR][CIRGen][Coroutines] Add {init, final, user} kinds for cir.await add support codegen for them --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 22 +++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 23 ++++++++++---------- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 3 +++ clang/test/CIR/CodeGen/coro-task.cpp | 6 ++--- 4 files changed, 37 insertions(+), 17 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1287fa2f5ada..fc0759e137fe 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1356,6 +1356,17 @@ def CallOp : CIR_Op<"call", // AwaitOp //===----------------------------------------------------------------------===// +def AK_Initial : I32EnumAttrCase<"init", 1>; +def AK_User : I32EnumAttrCase<"user", 2>; +def AK_Final : I32EnumAttrCase<"final", 3>; + +def AwaitKind : I32EnumAttr< + "AwaitKind", + "await kind", + [AK_Initial, AK_User, AK_Final]> { + let cppNamespace = "::mlir::cir"; +} + def AwaitOp : CIR_Op<"await", [DeclareOpInterfaceMethods, RecursivelySpeculatable, NoRegionArguments]> { @@ -1386,12 +1397,17 @@ def AwaitOp : CIR_Op<"await", of CIR, e.g. LLVM, should use the `suspend` region to track more lower level codegen (e.g. intrinsic emission for coro.save/coro.suspend). + There are also 3 flavors of `cir.await` available: + - `init`: compiler generated initial suspend via implicit `co_await`. + - `user`: also known as normal, representing user written co_await's. + - `final`: compiler generated final suspend via implicit `co_await`. + From the C++ snippet we get: ```mlir cir.scope { ... // auto &&x = CommonExpr(); - cir.await(ready : { + cir.await(user, ready : { ... // x.await_ready() }, suspend : { ... // x.await_suspend() @@ -1405,11 +1421,12 @@ def AwaitOp : CIR_Op<"await", as part of the enclosing await scope. }]; + let arguments = (ins AwaitKind:$kind); let regions = (region SizedRegion<1>:$ready, SizedRegion<1>:$suspend, SizedRegion<1>:$resume); let assemblyFormat = [{ - `(` + `(` $kind `,` `ready` `:` $ready `,` `suspend` `:` $suspend `,` `resume` `:` $resume `,` @@ -1420,6 +1437,7 @@ def AwaitOp : CIR_Op<"await", let skipDefaultBuilders = 1; let builders = [ OpBuilder<(ins + "mlir::cir::AwaitKind":$kind, CArg<"function_ref", "nullptr">:$readyBuilder, CArg<"function_ref", diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 4bcbe272ceef..be2c3e838add 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -18,14 +18,11 @@ using namespace clang; using namespace cir; -namespace { -enum class AwaitKind { Init, Normal, Yield, Final }; -} // namespace struct cir::CGCoroData { // What is the current await expression kind and how many // await/yield expressions were encountered so far. // These are used to generate pretty labels for await expressions in LLVM IR. - AwaitKind CurrentAwaitKind = AwaitKind::Init; + mlir::cir::AwaitKind CurrentAwaitKind = mlir::cir::AwaitKind::init; // Stores the __builtin_coro_id emitted in the function so that we can supply // it as the first argument to other builtins. @@ -340,11 +337,11 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { } // FIXME(cir): EHStack.pushCleanup(EHCleanup); - CurCoro.Data->CurrentAwaitKind = AwaitKind::Init; + CurCoro.Data->CurrentAwaitKind = mlir::cir::AwaitKind::init; if (buildStmt(S.getInitSuspendStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - CurCoro.Data->CurrentAwaitKind = AwaitKind::Normal; + CurCoro.Data->CurrentAwaitKind = mlir::cir::AwaitKind::user; // FIXME(cir): wrap buildBodyAndFallthrough with try/catch bits. if (S.getExceptionHandler()) @@ -359,7 +356,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { const bool CanFallthrough = false; const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0; if (CanFallthrough || HasCoreturns) { - CurCoro.Data->CurrentAwaitKind = AwaitKind::Final; + CurCoro.Data->CurrentAwaitKind = mlir::cir::AwaitKind::final; { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPoint(CurCoro.Data->FinalSuspendInsPoint); @@ -404,9 +401,11 @@ struct LValueOrRValue { RValue RV; }; } // namespace -static LValueOrRValue buildSuspendExpression( - CIRGenFunction &CGF, CGCoroData &Coro, CoroutineSuspendExpr const &S, - AwaitKind Kind, AggValueSlot aggSlot, bool ignoreResult, bool forLValue) { +static LValueOrRValue +buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, + CoroutineSuspendExpr const &S, mlir::cir::AwaitKind Kind, + AggValueSlot aggSlot, bool ignoreResult, + bool forLValue) { auto *E = S.getCommonExpr(); auto awaitBuild = mlir::success(); @@ -418,7 +417,7 @@ static LValueOrRValue buildSuspendExpression( auto &builder = CGF.getBuilder(); [[maybe_unused]] auto awaitOp = builder.create( - CGF.getLoc(S.getSourceRange()), + CGF.getLoc(S.getSourceRange()), Kind, /*readyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { auto *cond = S.getReadyExpr(); @@ -469,7 +468,7 @@ static LValueOrRValue buildSuspendExpression( // function is marked as 'noexcept', we avoid generating this additional // IR. CXXTryStmt *TryStmt = nullptr; - if (Coro.ExceptionHandler && Kind == AwaitKind::Init && + if (Coro.ExceptionHandler && Kind == mlir::cir::AwaitKind::init && memberCallExpressionCanThrow(S.getResumeExpr())) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index fc7820a9c000..92afb9de7606 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1410,9 +1410,12 @@ LogicalResult UnaryOp::verify() { //===----------------------------------------------------------------------===// void AwaitOp::build(OpBuilder &builder, OperationState &result, + mlir::cir::AwaitKind kind, function_ref readyBuilder, function_ref suspendBuilder, function_ref resumeBuilder) { + result.addAttribute(getKindAttrName(result.name), + cir::AwaitKindAttr::get(builder.getContext(), kind)); { OpBuilder::InsertionGuard guard(builder); Region *readyRegion = result.addRegion(); diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 8b7d8395327d..99822da99f54 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -171,7 +171,7 @@ VoidTask silly_task() { // First regions `ready` has a special cir.yield code to veto suspension. -// CHECK: cir.await(ready : { +// CHECK: cir.await(init, ready : { // CHECK: %[[#ReadyVeto:]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[#SuspendAlwaysAddr]]) // CHECK: cir.if %[[#ReadyVeto]] { // CHECK: cir.yield break @@ -212,7 +212,7 @@ VoidTask silly_task() { // The actual user written co_await // CHECK: cir.scope { -// CHECK: cir.await(ready : { +// CHECK: cir.await(user, ready : { // CHECK: }, suspend : { // CHECK: }, resume : { // CHECK: },) @@ -223,7 +223,7 @@ VoidTask silly_task() { // The final suspend co_await // CHECK: cir.scope { -// CHECK: cir.await(ready : { +// CHECK: cir.await(final, ready : { // CHECK: }, suspend : { // CHECK: }, resume : { // CHECK: },) From 12145dc8956ed319f5315f9cde7523ca04ed7188 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Dec 2022 18:58:36 -0300 Subject: [PATCH 0767/2301] [CIR][CIRGen][Coroutines] Add a specific yield kind for skipping coroutine suspension Previous used `cir.yield break` was a hack to keep moving while adding `cir.await` support. Add a proper kind to reflect the suspension semantics. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 20 ++++++- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 8 +-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 61 ++++++++++++-------- clang/test/CIR/CodeGen/coro-task.cpp | 2 +- clang/test/CIR/IR/invalid.cir | 32 +++++++++- 5 files changed, 91 insertions(+), 32 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index fc0759e137fe..2331228e45b8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -421,11 +421,12 @@ def IfOp : CIR_Op<"if", def YieldOpKind_BK : I32EnumAttrCase<"Break", 1, "break">; def YieldOpKind_FT : I32EnumAttrCase<"Fallthrough", 2, "fallthrough">; def YieldOpKind_CE : I32EnumAttrCase<"Continue", 3, "continue">; +def YieldOpKind_NS : I32EnumAttrCase<"NoSuspend", 4, "nosuspend">; def YieldOpKind : I32EnumAttr< "YieldOpKind", "yield kind", - [YieldOpKind_BK, YieldOpKind_FT, YieldOpKind_CE]> { + [YieldOpKind_BK, YieldOpKind_FT, YieldOpKind_CE, YieldOpKind_NS]> { let cppNamespace = "::mlir::cir"; } @@ -449,12 +450,14 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, Only available inside `cir.switch` regions. - `continue`: only allowed under `cir.loop`, continue execution to the next loop step. + - `nosuspend`: specific to the `ready` region inside `cir.await` op, it makes + control-flow to be transfered back to the parent, preventing suspension. As a general rule, `cir.yield` must be explicitly used whenever a region has more than one block and no terminator, or within `cir.switch` regions not `cir.return` terminated. - Example: + Examples: ```mlir cir.if %4 { ... @@ -472,6 +475,16 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ... cir.yield continue } + + cir.await(init, ready : { + // Call std::suspend_always::await_ready + %18 = cir.call @_ZNSt14suspend_always11await_readyEv(...) + cir.if %18 { + // yields back to the parent. + cir.yield nosuspend + } + cir.yield // control-flow to the next region for suspension. + }, ...) ``` }]; @@ -507,6 +520,9 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, bool isContinue() { return !isPlain() && *getKind() == YieldOpKind::Continue; } + bool isNoSuspend() { + return !isPlain() && *getKind() == YieldOpKind::NoSuspend; + } }]; let hasVerifier = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index be2c3e838add..81247b808b55 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -429,10 +429,10 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { // If expression is ready, no need to suspend, - // `YieldOpKind::Break` tells control flow to return to parent, no - // more regions to be executed. - builder.create(loc, - mlir::cir::YieldOpKind::Break); + // `YieldOpKind::NoSuspend` tells control flow to return to + // parent, no more regions to be executed. + builder.create( + loc, mlir::cir::YieldOpKind::NoSuspend); }); if (!condV) { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 92afb9de7606..49580212cf31 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -573,31 +573,37 @@ LogicalResult ScopeOp::verify() { return success(); } //===----------------------------------------------------------------------===// mlir::LogicalResult YieldOp::verify() { - auto canDominateYieldBreak = - [&](Operation *parentOp) { - mlir::Region *lastAwaitRegion = nullptr; - while (!llvm::isa(parentOp)) { - auto awaitOp = dyn_cast(parentOp); - if (awaitOp) { - if (lastAwaitRegion && lastAwaitRegion == &awaitOp.getResume()) { - emitOpError() - << "break can only be used in 'ready' and 'suspend' regions"; - return false; - } - return true; - } - - if (llvm::isa(parentOp)) - return true; - - lastAwaitRegion = parentOp->getParentRegion(); - parentOp = parentOp->getParentOp(); + auto isDominatedByLoopOrSwitch = [&](Operation *parentOp) { + while (!llvm::isa(parentOp)) { + if (llvm::isa(parentOp)) + return true; + parentOp = parentOp->getParentOp(); + } + + emitOpError() << "shall be dominated by 'cir.loop' or 'cir.switch'"; + return false; + }; + + auto isDominatedByProperAwaitRegion = [&](Operation *parentOp, + mlir::Region *currRegion) { + while (!llvm::isa(parentOp)) { + auto awaitOp = dyn_cast(parentOp); + if (awaitOp) { + if (currRegion && currRegion == &awaitOp.getResume()) { + emitOpError() << "kind 'nosuspend' can only be used in 'ready' and " + "'suspend' regions"; + return false; } + return true; + } - emitOpError() - << "shall be dominated by 'cir.loop', 'cir.switch' or 'cir.await'"; - return false; - }; + currRegion = parentOp->getParentRegion(); + parentOp = parentOp->getParentOp(); + } + + emitOpError() << "shall be dominated by 'cir.await'"; + return false; + }; auto isDominatedByLoop = [](Operation *parentOp) { while (!llvm::isa(parentOp)) { @@ -608,8 +614,15 @@ mlir::LogicalResult YieldOp::verify() { return false; }; + if (isNoSuspend()) { + if (!isDominatedByProperAwaitRegion(getOperation()->getParentOp(), + getOperation()->getParentRegion())) + return mlir::failure(); + return mlir::success(); + } + if (isBreak()) { - if (!canDominateYieldBreak(getOperation()->getParentOp())) + if (!isDominatedByLoopOrSwitch(getOperation()->getParentOp())) return mlir::failure(); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 99822da99f54..6afd6996e421 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -174,7 +174,7 @@ VoidTask silly_task() { // CHECK: cir.await(init, ready : { // CHECK: %[[#ReadyVeto:]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[#SuspendAlwaysAddr]]) // CHECK: cir.if %[[#ReadyVeto]] { -// CHECK: cir.yield break +// CHECK: cir.yield nosuspend // CHECK: } // CHECK: cir.yield diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 84130d4e3172..b8c24090ebd1 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -53,7 +53,7 @@ cir.func @yieldfallthrough() { cir.func @yieldbreak() { %0 = cir.cst(true) : !cir.bool cir.if %0 { - cir.yield break // expected-error {{shall be dominated by 'cir.loop', 'cir.switch' or 'cir.await'}} + cir.yield break // expected-error {{shall be dominated by 'cir.loop' or 'cir.switch'}} } cir.return } @@ -232,4 +232,34 @@ module { cir.func coroutine @bad_task() { // expected-error {{coroutine body must use at least one cir.await op}} cir.return +} + +// ----- + +cir.func coroutine @bad_yield() { + cir.scope { + cir.await(user, ready : { + cir.yield + }, suspend : { + cir.yield + }, resume : { + cir.yield nosuspend // expected-error {{kind 'nosuspend' can only be used in 'ready' and 'suspend' regions}} + },) + } + cir.return +} + +// ----- + +cir.func coroutine @good_yield() { + cir.scope { + cir.await(user, ready : { + cir.yield nosuspend + }, suspend : { + cir.yield nosuspend + }, resume : { + cir.yield + },) + } + cir.return } \ No newline at end of file From 8b1d2d1d11835cbe99cc6ed2e4c429b606172961 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Dec 2022 14:50:09 -0300 Subject: [PATCH 0768/2301] [CIR][CIRGen] Fix typo on VarDeclContext Found by inspection, somehow it was working with the wrong oldval --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 5932d6ba76a2..00ab9c406635 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -383,7 +383,7 @@ class CIRGenFunction { public: VarDeclContext(CIRGenFunction &p, const VarDecl *Value) : P(p) { - if (P.currSrcLoc) + if (P.currVarDecl) OldVal = P.currVarDecl; P.currVarDecl = Value; } From 17b1dfe2bdb81f947a4529417e4b4b267abe2686 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Dec 2022 14:51:11 -0300 Subject: [PATCH 0769/2301] [CIR][CIRGen] Improve constant emitter for reference types --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 6 +++++- clang/lib/CIR/CodeGen/CIRGenExprCst.cpp | 9 +++++++++ clang/test/CIR/CodeGen/coro-task.cpp | 15 +++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index bd16d5aff509..76186376f76b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -255,13 +255,17 @@ void CIRGenFunction::buildScalarInit(const Expr *init, const ValueDecl *D, void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit) { + SourceLocRAIIObject Loc{*this, getLoc(init->getSourceRange())}; if (capturedByInit) llvm_unreachable("NYI"); QualType type = D->getType(); if (type->isReferenceType()) { - assert(0 && "not implemented"); + RValue rvalue = buildReferenceBindingToExpr(init); + if (capturedByInit) + llvm_unreachable("NYI"); + buildStoreThroughLValue(rvalue, lvalue); return; } switch (CIRGenFunction::getEvaluationKind(type)) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp index 9702380235bb..8d05d27a6efe 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp @@ -1128,6 +1128,15 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { return tryEmitPrivateForMemory(*value, destType); } + // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a + // reference is a constant expression, and the reference binds to a temporary, + // then constant initialization is performed. ConstExprEmitter will + // incorrectly emit a prvalue constant in this case, and the calling code + // interprets that as the (pointer) value of the reference, rather than the + // desired value of the referee. + if (destType->isReferenceType()) + return {}; + assert(0 && "not implemented"); return {}; } diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 6afd6996e421..d9dc6a284e83 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -29,6 +29,11 @@ struct suspend_never { void await_resume() noexcept {} }; +struct string { + int size() const; + string(); + string(char const *s); +}; } // namespace std namespace folly { @@ -102,6 +107,7 @@ co_invoke_fn co_invoke; // CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct<"struct.folly::coro::Task::promise_type", i8> // CHECK: ![[CoroHandleVoid:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", i8> // CHECK: ![[CoroHandlePromise:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", i8> +// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string", i8 // CHECK: ![[SuspendAlways:ty_.*]] = !cir.struct<"struct.std::suspend_always", i8> // CHECK: module {{.*}} { @@ -238,3 +244,12 @@ VoidTask silly_task() { // CHECK: %[[#Tmp1:]] = cir.load %[[#VoidTaskAddr]] // CHECK-NEXT: cir.return %[[#Tmp1]] // CHECK-NEXT: } + +folly::coro::Task byRef(const std::string& s) { + co_return s.size(); +} + +// FIXME: this could be less redundant than two allocas + reloads +// CHECK: cir.func coroutine @_Z5byRefRKSt6string(%arg0: !cir.ptr +// CHECK: %[[#AllocaParam:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] +// CHECK: %[[#AllocaFnUse:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] \ No newline at end of file From e9ebfd71fba5fc086381033a66e86ba9670411d9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Dec 2022 16:44:23 -0300 Subject: [PATCH 0770/2301] [CIR][CIRGen] Add boilerplate for bunch of currently unhandled builtins --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 266 +++++++++++++++++++++++- 1 file changed, 265 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index ae80c5f84b5e..0c9fc35e59d1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -75,7 +75,258 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, if (FD->hasAttr() || ((ConstWithoutErrnoAndExceptions || ConstWithoutExceptions) && (!ConstWithoutErrnoAndExceptions || (!getLangOpts().MathErrno)))) { - llvm_unreachable("NYI"); + switch (BuiltinIDIfNoAsmLabel) { + case Builtin::BIceil: + case Builtin::BIceilf: + case Builtin::BIceill: + case Builtin::BI__builtin_ceil: + case Builtin::BI__builtin_ceilf: + case Builtin::BI__builtin_ceilf16: + case Builtin::BI__builtin_ceill: + case Builtin::BI__builtin_ceilf128: + llvm_unreachable("NYI"); + + case Builtin::BIcopysign: + case Builtin::BIcopysignf: + case Builtin::BIcopysignl: + case Builtin::BI__builtin_copysign: + case Builtin::BI__builtin_copysignf: + case Builtin::BI__builtin_copysignf16: + case Builtin::BI__builtin_copysignl: + case Builtin::BI__builtin_copysignf128: + llvm_unreachable("NYI"); + + case Builtin::BIcos: + case Builtin::BIcosf: + case Builtin::BIcosl: + case Builtin::BI__builtin_cos: + case Builtin::BI__builtin_cosf: + case Builtin::BI__builtin_cosf16: + case Builtin::BI__builtin_cosl: + case Builtin::BI__builtin_cosf128: + llvm_unreachable("NYI"); + + case Builtin::BIexp: + case Builtin::BIexpf: + case Builtin::BIexpl: + case Builtin::BI__builtin_exp: + case Builtin::BI__builtin_expf: + case Builtin::BI__builtin_expf16: + case Builtin::BI__builtin_expl: + case Builtin::BI__builtin_expf128: + llvm_unreachable("NYI"); + + case Builtin::BIexp2: + case Builtin::BIexp2f: + case Builtin::BIexp2l: + case Builtin::BI__builtin_exp2: + case Builtin::BI__builtin_exp2f: + case Builtin::BI__builtin_exp2f16: + case Builtin::BI__builtin_exp2l: + case Builtin::BI__builtin_exp2f128: + llvm_unreachable("NYI"); + + case Builtin::BIfabs: + case Builtin::BIfabsf: + case Builtin::BIfabsl: + case Builtin::BI__builtin_fabs: + case Builtin::BI__builtin_fabsf: + case Builtin::BI__builtin_fabsf16: + case Builtin::BI__builtin_fabsl: + case Builtin::BI__builtin_fabsf128: + llvm_unreachable("NYI"); + + case Builtin::BIfloor: + case Builtin::BIfloorf: + case Builtin::BIfloorl: + case Builtin::BI__builtin_floor: + case Builtin::BI__builtin_floorf: + case Builtin::BI__builtin_floorf16: + case Builtin::BI__builtin_floorl: + case Builtin::BI__builtin_floorf128: + llvm_unreachable("NYI"); + + case Builtin::BIfma: + case Builtin::BIfmaf: + case Builtin::BIfmal: + case Builtin::BI__builtin_fma: + case Builtin::BI__builtin_fmaf: + case Builtin::BI__builtin_fmaf16: + case Builtin::BI__builtin_fmal: + case Builtin::BI__builtin_fmaf128: + llvm_unreachable("NYI"); + + case Builtin::BIfmax: + case Builtin::BIfmaxf: + case Builtin::BIfmaxl: + case Builtin::BI__builtin_fmax: + case Builtin::BI__builtin_fmaxf: + case Builtin::BI__builtin_fmaxf16: + case Builtin::BI__builtin_fmaxl: + case Builtin::BI__builtin_fmaxf128: + llvm_unreachable("NYI"); + + case Builtin::BIfmin: + case Builtin::BIfminf: + case Builtin::BIfminl: + case Builtin::BI__builtin_fmin: + case Builtin::BI__builtin_fminf: + case Builtin::BI__builtin_fminf16: + case Builtin::BI__builtin_fminl: + case Builtin::BI__builtin_fminf128: + llvm_unreachable("NYI"); + + // fmod() is a special-case. It maps to the frem instruction rather than an + // LLVM intrinsic. + case Builtin::BIfmod: + case Builtin::BIfmodf: + case Builtin::BIfmodl: + case Builtin::BI__builtin_fmod: + case Builtin::BI__builtin_fmodf: + case Builtin::BI__builtin_fmodf16: + case Builtin::BI__builtin_fmodl: + case Builtin::BI__builtin_fmodf128: { + llvm_unreachable("NYI"); + } + + case Builtin::BIlog: + case Builtin::BIlogf: + case Builtin::BIlogl: + case Builtin::BI__builtin_log: + case Builtin::BI__builtin_logf: + case Builtin::BI__builtin_logf16: + case Builtin::BI__builtin_logl: + case Builtin::BI__builtin_logf128: + llvm_unreachable("NYI"); + + case Builtin::BIlog10: + case Builtin::BIlog10f: + case Builtin::BIlog10l: + case Builtin::BI__builtin_log10: + case Builtin::BI__builtin_log10f: + case Builtin::BI__builtin_log10f16: + case Builtin::BI__builtin_log10l: + case Builtin::BI__builtin_log10f128: + llvm_unreachable("NYI"); + + case Builtin::BIlog2: + case Builtin::BIlog2f: + case Builtin::BIlog2l: + case Builtin::BI__builtin_log2: + case Builtin::BI__builtin_log2f: + case Builtin::BI__builtin_log2f16: + case Builtin::BI__builtin_log2l: + case Builtin::BI__builtin_log2f128: + llvm_unreachable("NYI"); + + case Builtin::BInearbyint: + case Builtin::BInearbyintf: + case Builtin::BInearbyintl: + case Builtin::BI__builtin_nearbyint: + case Builtin::BI__builtin_nearbyintf: + case Builtin::BI__builtin_nearbyintl: + case Builtin::BI__builtin_nearbyintf128: + llvm_unreachable("NYI"); + + case Builtin::BIpow: + case Builtin::BIpowf: + case Builtin::BIpowl: + case Builtin::BI__builtin_pow: + case Builtin::BI__builtin_powf: + case Builtin::BI__builtin_powf16: + case Builtin::BI__builtin_powl: + case Builtin::BI__builtin_powf128: + llvm_unreachable("NYI"); + + case Builtin::BIrint: + case Builtin::BIrintf: + case Builtin::BIrintl: + case Builtin::BI__builtin_rint: + case Builtin::BI__builtin_rintf: + case Builtin::BI__builtin_rintf16: + case Builtin::BI__builtin_rintl: + case Builtin::BI__builtin_rintf128: + llvm_unreachable("NYI"); + + case Builtin::BIround: + case Builtin::BIroundf: + case Builtin::BIroundl: + case Builtin::BI__builtin_round: + case Builtin::BI__builtin_roundf: + case Builtin::BI__builtin_roundf16: + case Builtin::BI__builtin_roundl: + case Builtin::BI__builtin_roundf128: + llvm_unreachable("NYI"); + + case Builtin::BIsin: + case Builtin::BIsinf: + case Builtin::BIsinl: + case Builtin::BI__builtin_sin: + case Builtin::BI__builtin_sinf: + case Builtin::BI__builtin_sinf16: + case Builtin::BI__builtin_sinl: + case Builtin::BI__builtin_sinf128: + llvm_unreachable("NYI"); + + case Builtin::BIsqrt: + case Builtin::BIsqrtf: + case Builtin::BIsqrtl: + case Builtin::BI__builtin_sqrt: + case Builtin::BI__builtin_sqrtf: + case Builtin::BI__builtin_sqrtf16: + case Builtin::BI__builtin_sqrtl: + case Builtin::BI__builtin_sqrtf128: + llvm_unreachable("NYI"); + + case Builtin::BItrunc: + case Builtin::BItruncf: + case Builtin::BItruncl: + case Builtin::BI__builtin_trunc: + case Builtin::BI__builtin_truncf: + case Builtin::BI__builtin_truncf16: + case Builtin::BI__builtin_truncl: + case Builtin::BI__builtin_truncf128: + llvm_unreachable("NYI"); + + case Builtin::BIlround: + case Builtin::BIlroundf: + case Builtin::BIlroundl: + case Builtin::BI__builtin_lround: + case Builtin::BI__builtin_lroundf: + case Builtin::BI__builtin_lroundl: + case Builtin::BI__builtin_lroundf128: + llvm_unreachable("NYI"); + + case Builtin::BIllround: + case Builtin::BIllroundf: + case Builtin::BIllroundl: + case Builtin::BI__builtin_llround: + case Builtin::BI__builtin_llroundf: + case Builtin::BI__builtin_llroundl: + case Builtin::BI__builtin_llroundf128: + llvm_unreachable("NYI"); + + case Builtin::BIlrint: + case Builtin::BIlrintf: + case Builtin::BIlrintl: + case Builtin::BI__builtin_lrint: + case Builtin::BI__builtin_lrintf: + case Builtin::BI__builtin_lrintl: + case Builtin::BI__builtin_lrintf128: + llvm_unreachable("NYI"); + + case Builtin::BIllrint: + case Builtin::BIllrintf: + case Builtin::BIllrintl: + case Builtin::BI__builtin_llrint: + case Builtin::BI__builtin_llrintf: + case Builtin::BI__builtin_llrintl: + case Builtin::BI__builtin_llrintf128: + llvm_unreachable("NYI"); + + default: + break; + } } switch (BuiltinIDIfNoAsmLabel) { @@ -83,6 +334,19 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("NYI"); break; + // C++ std:: builtins. + case Builtin::BImove: + case Builtin::BImove_if_noexcept: + case Builtin::BIforward: + case Builtin::BIas_const: + llvm_unreachable("NYI"); + case Builtin::BI__GetExceptionInfo: { + llvm_unreachable("NYI"); + } + + case Builtin::BI__fastfail: + llvm_unreachable("NYI"); + case Builtin::BI__builtin_coro_id: case Builtin::BI__builtin_coro_promise: case Builtin::BI__builtin_coro_resume: From c1a4fef66a7ec1796385564aea9c3a34b7386a42 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Dec 2022 21:05:37 -0300 Subject: [PATCH 0771/2301] [CIR][CIRGen] Add initial support for std::move There's likely some more higher level modeling needed, but this is a start for unblocking some coroutine testing. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 16 ++++--- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 59 ++++++++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenTypes.h | 8 +++- 4 files changed, 70 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 0c9fc35e59d1..e3ddbe75df81 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -339,7 +339,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BImove_if_noexcept: case Builtin::BIforward: case Builtin::BIas_const: - llvm_unreachable("NYI"); + return RValue::get(buildLValue(E->getArg(0)).getPointer()); case Builtin::BI__GetExceptionInfo: { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 415b5c96437a..54f4a70eab10 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -222,12 +222,13 @@ bool CIRGenFunction::hasBooleanRepresentation(QualType Ty) { CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { E = E->IgnoreParens(); + // Look through function-to-pointer decay. if (const auto *ICE = dyn_cast(E)) { - assert(ICE && "Only ICE supported so far!"); - assert(ICE->getCastKind() == CK_FunctionToPointerDecay && - "No other casts supported yet"); - - return buildCallee(ICE->getSubExpr()); + if (ICE->getCastKind() == CK_FunctionToPointerDecay || + ICE->getCastKind() == CK_BuiltinFnToFnPtr) { + return buildCallee(ICE->getSubExpr()); + } + // Resolve direct calls. } else if (const auto *DRE = dyn_cast(E)) { const auto *FD = dyn_cast(DRE->getDecl()); assert(FD && @@ -1470,9 +1471,10 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { assert(!Ty->isAnyComplexType() && "complex types not implemented"); return buildCompoundAssignmentLValue(cast(E)); } - case Expr::UserDefinedLiteralClass: - assert(0 && "should fallback below, remove assert when testcase available"); + case Expr::CallExprClass: + case Expr::CXXMemberCallExprClass: case Expr::CXXOperatorCallExprClass: + case Expr::UserDefinedLiteralClass: return buildCallExprLValue(cast(E)); case Expr::ExprWithCleanupsClass: { const auto *cleanups = cast(E); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 0948e0575a72..71228d194bab 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -69,18 +69,67 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, return std::string(typeName); } -// isSafeToConvert - Return true if it is safe to convert the specified record -// decl to CIR and lay it out, false if doing so would cause us to get into a -// recursive compilation mess. +/// Return true if the specified type is already completely laid out. +bool CIRGenTypes::isRecordLayoutComplete(const Type *Ty) const { + llvm::DenseMap::const_iterator I = + recordDeclTypes.find(Ty); + return I != recordDeclTypes.end(); // && !I->second->isOpaque(); +} + +/// Return true if it is safe to convert the specified record decl to IR and lay +/// it out, false if doing so would cause us to get into a recursive compilation +/// mess. +static bool +isSafeToConvert(const RecordDecl *RD, CIRGenTypes &CGT, + llvm::SmallPtrSet &AlreadyChecked) { + // If we have already checked this type (maybe the same type is used by-value + // multiple times in multiple structure fields, don't check again. + if (!AlreadyChecked.insert(RD).second) + return true; + + const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr(); + + // If this type is already laid out, converting it is a noop. + if (CGT.isRecordLayoutComplete(Key)) + return true; + + // If this type is currently being laid out, we can't recursively compile it. + if (CGT.isRecordBeingLaidOut(Key)) + return false; + + // If this type would require laying out bases that are currently being laid + // out, don't do it. This includes virtual base classes which get laid out + // when a class is translated, even though they aren't embedded by-value into + // the class. + if (const CXXRecordDecl *CRD = dyn_cast(RD)) { + for (const auto &I : CRD->bases()) + if (!isSafeToConvert(I.getType()->castAs()->getDecl(), CGT, + AlreadyChecked)) + return false; + } + + // If this type would require laying out members that are currently being laid + // out, don't do it. + for ([[maybe_unused]] const auto *I : RD->fields()) + llvm_unreachable("NYI"); + + // If there are no problems, lets do it. + return true; +} + +// Return true if it is safe to convert the specified record decl to CIR and lay +// it out, false if doing so would cause us to get into a recursive compilation +// mess. static bool isSafeToConvert(const RecordDecl *RD, CIRGenTypes &CGT) { // If no structs are being laid out, we can certainly do this one. if (CGT.noRecordsBeingLaidOut()) return true; - llvm_unreachable("NYI"); + llvm::SmallPtrSet AlreadyChecked; + return isSafeToConvert(RD, CGT, AlreadyChecked); } -/// convertRecordDeclType - Lay out a tagged decl type like struct or union. +/// Lay out a tagged decl type like struct or union. mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { // TagDecl's are not necessarily unique, instead use the (clang) type // connected to the decl. diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 86b2f610959d..3e3771b98c34 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -138,6 +138,12 @@ class CIRGenTypes { clang::ASTContext &getContext() const { return Context; } mlir::MLIRContext &getMLIRContext() const; + bool isRecordLayoutComplete(const clang::Type *Ty) const; + bool noRecordsBeingLaidOut() const { return RecordsBeingLaidOut.empty(); } + bool isRecordBeingLaidOut(const clang::Type *Ty) const { + return RecordsBeingLaidOut.count(Ty); + } + const ABIInfo &getABIInfo() const { return TheABIInfo; } CIRGenCXXABI &getCXXABI() const { return TheCXXABI; } @@ -240,8 +246,6 @@ class CIRGenTypes { clang::FunctionType::ExtInfo info, llvm::ArrayRef paramInfos, RequiredArgs args); - - bool noRecordsBeingLaidOut() const { return RecordsBeingLaidOut.empty(); } }; } // namespace cir From 8201c302f3eccaefe172563fb360feef8b483581 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Dec 2022 21:12:55 -0300 Subject: [PATCH 0772/2301] [CIR][CIRGen] Add missing testcase for previous commit --- clang/test/CIR/CodeGen/move.cpp | 37 +++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 clang/test/CIR/CodeGen/move.cpp diff --git a/clang/test/CIR/CodeGen/move.cpp b/clang/test/CIR/CodeGen/move.cpp new file mode 100644 index 000000000000..b82e1c35b921 --- /dev/null +++ b/clang/test/CIR/CodeGen/move.cpp @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +namespace std { + +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; + +template +typename remove_reference::type &&move(T &&t) noexcept; + +struct string { + string(); +}; + +} // std namespace + +// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string", i8> + +std::string getstr(); +void emplace(std::string &&s); + +void t() { + emplace(std::move(getstr())); +} + +// FIXME: we should explicitly model std::move here since it will +// be useful at least for the lifetime checker. + +// CHECK: cir.func @_Z1tv() { +// CHECK: %[[#Addr:]] = cir.alloca ![[StdString]], {{.*}} ["ref.tmp0"] +// CHECK: %[[#RValStr:]] = cir.call @_Z6getstrv() : () -> ![[StdString]] +// CHECK: cir.store %[[#RValStr]], %[[#Addr]] +// CHECK: cir.call @_Z7emplaceOSt6string(%[[#Addr]]) +// CHECK: cir.return +// CHECK: } \ No newline at end of file From 6d75b5cf81a843ab1f34e71ec768262e022cd34e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Dec 2022 22:10:00 -0300 Subject: [PATCH 0773/2301] [CIR][CIRGen] Implement basic support for using incomplete struct types Still missing custom printing and parsing support. --- clang/include/clang/CIR/Dialect/IR/CIRTypes.td | 6 ++++++ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 17 ++++++++++++----- .../lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 2 ++ clang/test/CIR/CodeGen/struct.cpp | 6 +++++- 4 files changed, 25 insertions(+), 6 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index ee24a155d163..96ee382e5aab 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -93,7 +93,13 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { let hasCustomAssemblyFormat = 1; let extraClassDeclaration = [{ + private: + // Track forward declaration or incomplete struct types. + bool hasBody = false; + public: void dropAst(); + bool isOpaque() const { return !hasBody; } + void setBody() { hasBody = true; } }]; let extraClassDefinition = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 71228d194bab..e425339f507f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -39,6 +39,7 @@ CIRGenTypes::~CIRGenTypes() { delete &*I++; } +// This is CIR's version of CodeGenTypes::addRecordTypeName std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, StringRef suffix) { llvm::SmallString<256> typeName; @@ -73,7 +74,7 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, bool CIRGenTypes::isRecordLayoutComplete(const Type *Ty) const { llvm::DenseMap::const_iterator I = recordDeclTypes.find(Ty); - return I != recordDeclTypes.end(); // && !I->second->isOpaque(); + return I != recordDeclTypes.end() && !I->second.isOpaque(); } /// Return true if it is safe to convert the specified record decl to IR and lay @@ -137,15 +138,21 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { mlir::cir::StructType &entry = recordDeclTypes[key]; - RD = RD->getDefinition(); + // Handle forward decl / incomplete types. + if (!entry) { + auto name = getRecordTypeName(RD, ""); + auto identifier = mlir::StringAttr::get(&getMLIRContext(), name); + entry = mlir::cir::StructType::get( + &getMLIRContext(), {}, identifier, + mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), RD)); + } // TODO(CIR): clang checks here whether the type is known to be opaque. This // is equivalent to a forward decl. So far we don't need to support // opaque/forward-declared record decls. If/when we do we might need to have // temporary cir::StructType with no members as stand-ins. - if (!RD || !RD->isCompleteDefinition()) - llvm_unreachable("NYI"); - if (entry) + RD = RD->getDefinition(); + if (!RD || !RD->isCompleteDefinition() || !entry.isOpaque()) return entry; // If converting this type would cause us to infinitely loop, don't do it! diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 6b5e19c7c82f..bff4e8dce3bd 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -224,9 +224,11 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, } } + // TODO(cir): add base class info Ty = mlir::cir::StructType::get( &getMLIRContext(), builder.fieldTypes, identifier, mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); + Ty.setBody(); auto RL = std::make_unique( Ty, BaseTy, (bool)builder.IsZeroInitializable, diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 147560327850..f613721a2904 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -23,7 +23,11 @@ void baz() { Foo f; } +struct incomplete; +void yoyo(incomplete *i) {} + // CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> +// CHECK: !ty_22struct2Eincomplete22 = !cir.struct<"struct.incomplete" // CHECK: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !ty_22struct2EBar22> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr @@ -66,4 +70,4 @@ void baz() { // CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, i32) -> i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: cir.return -// CHECK-NEXT: } +// CHECK-NEXT: } \ No newline at end of file From ce0e9f879e4e286582420468c49b07e7df30af76 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 28 Dec 2022 18:38:24 -0500 Subject: [PATCH 0774/2301] [CIR][Lowering] Use adaptors more consistently We should be using adaptors in all places where any of the operands might have changed via the rewriting framework. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 71 +++++++++---------- 1 file changed, 35 insertions(+), 36 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ba2d6eb0513b..0711fd124082 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -46,15 +46,14 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::CastOp castOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto src = castOp.getSrc(); + auto src = adaptor.getSrc(); switch (castOp.getKind()) { case mlir::cir::CastKind::int_to_bool: { auto zero = rewriter.create( src.getLoc(), src.getType(), mlir::IntegerAttr::get(src.getType(), 0)); rewriter.replaceOpWithNewOp( - castOp, castOp.getSrc().getType(), mlir::cir::CmpOpKind::ne, src, - zero); + castOp, src.getType(), mlir::cir::CmpOpKind::ne, src, zero); break; } default: @@ -401,13 +400,13 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CmpOp op, OpAdaptor adaptor, + matchAndRewrite(mlir::cir::CmpOp cmpOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto type = op.getLhs().getType(); + auto type = adaptor.getLhs().getType(); auto integerType = mlir::IntegerType::get(getContext(), 1, mlir::IntegerType::Signless); - switch (op.getKind()) { + switch (adaptor.getKind()) { case mlir::cir::CmpOpKind::gt: { if (type.isa()) { mlir::LLVM::ICmpPredicate cmpIType; @@ -415,17 +414,17 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::ugt; rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ugt), - op.getLhs(), op.getRhs(), + adaptor.getLhs(), adaptor.getRhs(), // TODO(CIR): These fastmath flags need to not be defaulted. - mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); + mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); } else { llvm_unreachable("Unknown Operand Type"); } @@ -438,16 +437,16 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::uge; rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::uge), - op.getLhs(), op.getRhs(), - mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); + adaptor.getLhs(), adaptor.getRhs(), + mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); } else { llvm_unreachable("Unknown Operand Type"); } @@ -460,16 +459,16 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::ult; rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ult), - op.getLhs(), op.getRhs(), - mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); + adaptor.getLhs(), adaptor.getRhs(), + mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); } else { llvm_unreachable("Unknown Operand Type"); } @@ -482,16 +481,16 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::ule; rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ule), - op.getLhs(), op.getRhs(), - mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); + adaptor.getLhs(), adaptor.getRhs(), + mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); } else { llvm_unreachable("Unknown Operand Type"); } @@ -500,17 +499,17 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { case mlir::cir::CmpOpKind::eq: { if (type.isa()) { rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::ICmpPredicateAttr::get(getContext(), mlir::LLVM::ICmpPredicate::eq), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ueq), - op.getLhs(), op.getRhs(), - mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); + adaptor.getLhs(), adaptor.getRhs(), + mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); } else { llvm_unreachable("Unknown Operand Type"); } @@ -519,17 +518,17 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { case mlir::cir::CmpOpKind::ne: { if (type.isa()) { rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::ICmpPredicateAttr::get(getContext(), mlir::LLVM::ICmpPredicate::ne), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - op, integerType, + cmpOp, integerType, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::une), - op.getLhs(), op.getRhs(), - mlir::LLVM::FastmathFlagsAttr::get(op.getContext(), {})); + adaptor.getLhs(), adaptor.getRhs(), + mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); } else { llvm_unreachable("Unknown Operand Type"); } From 7edc2b3e2ff8064bbb0e38bddcfb8f19225fa621 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 28 Dec 2022 20:44:34 -0500 Subject: [PATCH 0775/2301] [CIR][Lowering] Have CmpOp lower and then zext to an i8 CIR's cmp op returns a cir.bool. This makes semantic sense the way we use it as a c++ operation (e.g. casting `if (4)` via cir.cast). So we need to maintain it's type for it's users and just rely on simplification to remove extra zext/truncs. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 33 +++++++++++-------- clang/test/CIR/Lowering/cast.cir | 2 ++ 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 0711fd124082..8593c1354f2e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -403,8 +403,10 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { matchAndRewrite(mlir::cir::CmpOp cmpOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto type = adaptor.getLhs().getType(); - auto integerType = + auto i1Type = mlir::IntegerType::get(getContext(), 1, mlir::IntegerType::Signless); + auto i8Type = + mlir::IntegerType::get(getContext(), 8, mlir::IntegerType::Signless); switch (adaptor.getKind()) { case mlir::cir::CmpOpKind::gt: { @@ -414,12 +416,12 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::ugt; rewriter.replaceOpWithNewOp( - cmpOp, integerType, + cmpOp, i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - cmpOp, integerType, + cmpOp, i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ugt), adaptor.getLhs(), adaptor.getRhs(), @@ -437,12 +439,12 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::uge; rewriter.replaceOpWithNewOp( - cmpOp, integerType, + cmpOp, i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - cmpOp, integerType, + cmpOp, i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::uge), adaptor.getLhs(), adaptor.getRhs(), @@ -459,12 +461,12 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::ult; rewriter.replaceOpWithNewOp( - cmpOp, integerType, + cmpOp, i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - cmpOp, integerType, + cmpOp, i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ult), adaptor.getLhs(), adaptor.getRhs(), @@ -481,12 +483,12 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::ule; rewriter.replaceOpWithNewOp( - cmpOp, integerType, + cmpOp, i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - cmpOp, integerType, + cmpOp, i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ule), adaptor.getLhs(), adaptor.getRhs(), @@ -499,13 +501,13 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { case mlir::cir::CmpOpKind::eq: { if (type.isa()) { rewriter.replaceOpWithNewOp( - cmpOp, integerType, + cmpOp, i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), mlir::LLVM::ICmpPredicate::eq), adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - cmpOp, integerType, + cmpOp, i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ueq), adaptor.getLhs(), adaptor.getRhs(), @@ -517,14 +519,17 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { } case mlir::cir::CmpOpKind::ne: { if (type.isa()) { - rewriter.replaceOpWithNewOp( - cmpOp, integerType, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), mlir::LLVM::ICmpPredicate::ne), adaptor.getLhs(), adaptor.getRhs()); + + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else if (type.isa()) { rewriter.replaceOpWithNewOp( - cmpOp, integerType, + cmpOp, i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::une), adaptor.getLhs(), adaptor.getRhs(), diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index d2b2b632a35f..629620a92579 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -12,6 +12,7 @@ module { // MLIR-NEXT: llvm.func @foo(%arg0: i32) -> i32 { // MLIR-NEXT: [[v0:%[0-9]]] = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: [[v1:%[0-9]]] = llvm.icmp "ne" %arg0, %0 : i32 +// MLIR-NEXT: [[v2:%[0-9]]] = llvm.zext %1 : i1 to i8 // MLIR-NEXT: llvm.return %arg0 : i32 // MLIR-NEXT: } // MLIR-NEXT:} @@ -19,5 +20,6 @@ module { // LLVM: define i32 @foo(i32 %0) { // LLVM-NEXT: %2 = icmp ne i32 %0, 0 +// LLVM-NEXT: %3 = zext i1 %2 to i8 // LLVM-NEXT: ret i32 %0 // LLVM-NEXT: } From ab39f32b9e9f26a9189105c130905648fa0639ed Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 28 Dec 2022 19:37:46 -0500 Subject: [PATCH 0776/2301] [CIR][Lowering] Support lowering cir::BrCondOp --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 30 +++++++++++++--- clang/test/CIR/Lowering/branch.cir | 36 +++++++++++++++++++ 2 files changed, 61 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/Lowering/branch.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8593c1354f2e..122c79568cc6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -39,6 +39,26 @@ using namespace llvm; namespace cir { namespace direct { +class CIRBrCondOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BrCondOp brOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto condition = adaptor.getCond(); + auto i1Condition = rewriter.create( + brOp.getLoc(), rewriter.getI1Type(), condition); + rewriter.replaceOpWithNewOp( + brOp, i1Condition.getResult(), brOp.getDestTrue(), + adaptor.getDestOperandsTrue(), brOp.getDestFalse(), + adaptor.getDestOperandsFalse()); + + return mlir::success(); + } +}; + class CIRCastOpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -561,11 +581,11 @@ class CIRBrOpLowering : public mlir::OpRewritePattern { void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns.add(converter, - patterns.getContext()); + patterns.add( + converter, patterns.getContext()); } static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { diff --git a/clang/test/CIR/Lowering/branch.cir b/clang/test/CIR/Lowering/branch.cir new file mode 100644 index 000000000000..cbc66f16f494 --- /dev/null +++ b/clang/test/CIR/Lowering/branch.cir @@ -0,0 +1,36 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +cir.func @foo(%arg0: !cir.bool) -> i32 { + cir.brcond %arg0 ^bb1, ^bb2 + ^bb1: + %0 = cir.cst(1: i32) : i32 + cir.return %0 : i32 + ^bb2: + %1 = cir.cst(0: i32) : i32 + cir.return %1 : i32 +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @foo(%arg0: i8) -> i32 { +// MLIR-NEXT: %0 = llvm.trunc %arg0 : i8 to i1 +// MLIR-NEXT: llvm.cond_br %0, ^bb1, ^bb2 +// MLIR-NEXT: ^bb1: // pred: ^bb0 +// MLIR-NEXT: %1 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: llvm.return %1 : i32 +// MLIR-NEXT: ^bb2: // pred: ^bb0 +// MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: llvm.return %2 : i32 +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: define i32 @foo(i8 %0) { +// LLVM-NEXT: %2 = trunc i8 %0 to i1 +// LLVM-NEXT: br i1 %2, label %3, label %4 +// LLVM-EMPTY: +// LLVM-NEXT: 3: ; preds = %1 +// LLVM-NEXT: ret i32 1 +// LLVM-EMPTY: +// LLVM-NEXT: 4: ; preds = %1 +// LLVM-NEXT: ret i32 0 +// LLVM-NEXT: } From db7c72b05e550a5d3bece6555d820bcaf92e75ed Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 28 Dec 2022 18:40:08 -0500 Subject: [PATCH 0777/2301] [CIR][Lowering] Support lowering cir::IfOp This is incomplete still. e.g. it only supports ifops with else statements and only supports SCF. I'll go about this in a TDD manner and update the functionality as we have tests to motivate it. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 83 ++++++++++++++++++- clang/test/CIR/Lowering/if.cir | 50 +++++++++++ 2 files changed, 131 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/Lowering/if.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 122c79568cc6..41a851377cec 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -16,6 +16,7 @@ #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" #include "mlir/Conversion/LLVMCommon/TypeConverter.h" +#include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h" #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" @@ -84,6 +85,84 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } }; +class CIRIfLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::IfOp ifOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto &thenRegion = ifOp.getThenRegion(); + auto &elseRegion = ifOp.getElseRegion(); + + (void)thenRegion; + (void)elseRegion; + + mlir::OpBuilder::InsertionGuard guard(rewriter); + + [[maybe_unused]] auto loc = ifOp.getLoc(); + + auto *currentBlock = rewriter.getInsertionBlock(); + auto *remainingOpsBlock = + rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + mlir::Block *continueBlock; + if (ifOp->getResults().size() == 0) + continueBlock = remainingOpsBlock; + else + llvm_unreachable("NYI"); + + // Inline then region + [[maybe_unused]] auto *thenBeforeBody = &ifOp.getThenRegion().front(); + [[maybe_unused]] auto *thenAfterBody = &ifOp.getThenRegion().back(); + rewriter.inlineRegionBefore(ifOp.getThenRegion(), continueBlock); + + rewriter.setInsertionPointToEnd(thenAfterBody); + if (auto thenYieldOp = + dyn_cast(thenAfterBody->getTerminator())) { + [[maybe_unused]] auto thenBranchOp = + rewriter.replaceOpWithNewOp( + thenYieldOp, thenYieldOp.getArgs(), continueBlock); + } else if (auto thenReturnOp = dyn_cast( + thenAfterBody->getTerminator())) { + ; + } else { + llvm_unreachable("what are we terminating with?"); + } + + rewriter.setInsertionPointToEnd(continueBlock); + + // Inline then region + [[maybe_unused]] auto *elseBeforeBody = &ifOp.getElseRegion().front(); + [[maybe_unused]] auto *elseAfterBody = &ifOp.getElseRegion().back(); + rewriter.inlineRegionBefore(ifOp.getElseRegion(), thenAfterBody); + + rewriter.setInsertionPointToEnd(currentBlock); + auto trunc = rewriter.create(loc, rewriter.getI1Type(), + adaptor.getCondition()); + rewriter.create(loc, trunc.getRes(), thenBeforeBody, + elseBeforeBody); + + rewriter.setInsertionPointToEnd(elseAfterBody); + if (auto elseYieldOp = + dyn_cast(elseAfterBody->getTerminator())) { + [[maybe_unused]] auto elseBranchOp = + rewriter.replaceOpWithNewOp( + elseYieldOp, elseYieldOp.getArgs(), continueBlock); + } else if (auto elseReturnOp = dyn_cast( + elseAfterBody->getTerminator())) { + ; + } else { + llvm_unreachable("what are we terminating with?"); + } + + rewriter.setInsertionPoint(elseAfterBody->getTerminator()); + rewriter.replaceOp(ifOp, continueBlock->getArguments()); + + return mlir::success(); + } +}; + + class CIRScopeOpLowering : public mlir::OpConversionPattern { public: @@ -584,8 +663,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add( - converter, patterns.getContext()); + CIRFuncLowering, CIRScopeOpLowering, CIRCastOpLowering, + CIRIfLowering>(converter, patterns.getContext()); } static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir new file mode 100644 index 000000000000..2d21ce0bbb0a --- /dev/null +++ b/clang/test/CIR/Lowering/if.cir @@ -0,0 +1,50 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo(%arg0: i32) -> i32 { + %4 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool + cir.if %4 { + %5 = cir.cst(1 : i32) : i32 + cir.return %5 : i32 + } else { + %5 = cir.cst(0 : i32) : i32 + cir.return %5 : i32 + } + cir.return %arg0 : i32 + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @foo(%arg0: i32) -> i32 { +// MLIR-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: %1 = llvm.icmp "ne" %arg0, %0 : i32 +// MLIR-NEXT: %2 = llvm.zext %1 : i1 to i8 +// MLIR-NEXT: %3 = llvm.trunc %2 : i8 to i1 +// MLIR-NEXT: llvm.cond_br %3, ^bb2, ^bb1 +// MLIR-NEXT: ^bb1: // pred: ^bb0 +// MLIR-NEXT: %4 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: llvm.return %4 : i32 +// MLIR-NEXT: ^bb2: // pred: ^bb0 +// MLIR-NEXT: %5 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: llvm.return %5 : i32 +// MLIR-NEXT: ^bb3: // no predecessors +// MLIR-NEXT: llvm.return %arg0 : i32 +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: define i32 @foo(i32 %0) { +// LLVM-NEXT: %2 = icmp ne i32 %0, 0 +// LLVM-NEXT: %3 = zext i1 %2 to i8 +// LLVM-NEXT: %4 = trunc i8 %3 to i1 +// LLVM-NEXT: br i1 %4, label %6, label %5 +// LLVM-EMPTY: +// LLVM-NEXT: 5: +// LLVM-NEXT: ret i32 0 +// LLVM-EMPTY: +// LLVM-NEXT: 6: +// LLVM-NEXT: ret i32 1 +// LLVM-EMPTY: +// LLVM-NEXT: 7: +// LLVM-NEXT: ret i32 %0 +// LLVM-NEXT: } From b52f4bbeb97d980a9ecc2db31b760ceca0c1dae2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 28 Dec 2022 21:20:17 -0500 Subject: [PATCH 0778/2301] [CIR][Lowering] Support zexting all supported cmpops I only did this previously for ne. Extend this to the rest. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 67 ++++++++++++------- 1 file changed, 44 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 41a851377cec..94cc2c1ab4c0 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -162,7 +162,6 @@ class CIRIfLowering : public mlir::OpConversionPattern { } }; - class CIRScopeOpLowering : public mlir::OpConversionPattern { public: @@ -514,18 +513,22 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { if (!type.isSignlessInteger()) llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::ugt; - rewriter.replaceOpWithNewOp( - cmpOp, i1Type, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - cmpOp, i1Type, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ugt), adaptor.getLhs(), adaptor.getRhs(), // TODO(CIR): These fastmath flags need to not be defaulted. mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); } @@ -537,17 +540,21 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { if (!type.isSignlessInteger()) llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::uge; - rewriter.replaceOpWithNewOp( - cmpOp, i1Type, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - cmpOp, i1Type, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::uge), adaptor.getLhs(), adaptor.getRhs(), mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); } @@ -559,17 +566,21 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { if (!type.isSignlessInteger()) llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::ult; - rewriter.replaceOpWithNewOp( - cmpOp, i1Type, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - cmpOp, i1Type, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ult), adaptor.getLhs(), adaptor.getRhs(), mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); } @@ -581,17 +592,21 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { if (!type.isSignlessInteger()) llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::LLVM::ICmpPredicate::ule; - rewriter.replaceOpWithNewOp( - cmpOp, i1Type, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - cmpOp, i1Type, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ule), adaptor.getLhs(), adaptor.getRhs(), mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); } @@ -599,18 +614,22 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { } case mlir::cir::CmpOpKind::eq: { if (type.isa()) { - rewriter.replaceOpWithNewOp( - cmpOp, i1Type, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), mlir::LLVM::ICmpPredicate::eq), adaptor.getLhs(), adaptor.getRhs()); + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - cmpOp, i1Type, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::ueq), adaptor.getLhs(), adaptor.getRhs(), mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); } @@ -627,12 +646,14 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { rewriter.replaceOpWithNewOp(cmpOp, i8Type, cmp.getRes()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - cmpOp, i1Type, + auto cmp = rewriter.create( + cmpOp.getLoc(), i1Type, mlir::LLVM::FCmpPredicateAttr::get(getContext(), mlir::LLVM::FCmpPredicate::une), adaptor.getLhs(), adaptor.getRhs(), mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); + rewriter.replaceOpWithNewOp(cmpOp, i8Type, + cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); } From 7e19fe496352d6222ec10e9b0dfcce7a122179e3 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 28 Dec 2022 22:02:37 -0500 Subject: [PATCH 0779/2301] [CIR][NFC] Move a function to an anonymous namespace clangd insists --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 94cc2c1ab4c0..7a33c0dfdfe6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -688,7 +688,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRIfLowering>(converter, patterns.getContext()); } -static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { +namespace { +void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { return mlir::LLVM::LLVMPointerType::get(type.getContext()); }); @@ -701,6 +702,7 @@ static void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { mlir::IntegerType::Signless); }); } +} // namespace void ConvertCIRToLLVMPass::runOnOperation() { auto module = getOperation(); From 895b0c266ee2e068ecb77851e6e78bff1c9248d1 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 28 Dec 2022 22:03:39 -0500 Subject: [PATCH 0780/2301] [CIR][NFC] Add some commenteed out code to make incremental work faster I keep typing and removing this for diffs. Just keep it around to make lowering easier to live with. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7a33c0dfdfe6..d2724c035ae2 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -716,7 +716,22 @@ void ConvertCIRToLLVMPass::runOnOperation() { mlir::populateFuncToLLVMConversionPatterns(converter, patterns); mlir::ConversionTarget target(getContext()); - target.addLegalOp(); + using namespace mlir::cir; + target.addLegalOp(); target.addLegalDialect(); target.addIllegalDialect(); From d50228eeeff5138212b598968be45abc95bad91e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 29 Dec 2022 14:23:00 -0300 Subject: [PATCH 0781/2301] [CIR] Improve printing and parsing for incomplete structs --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 12 ++++------ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 10 +++----- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 4 ++-- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 23 ++++++++++++++++--- clang/test/CIR/CodeGen/struct.cpp | 2 +- clang/test/CIR/IR/struct.cir | 3 +++ 6 files changed, 34 insertions(+), 20 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 96ee382e5aab..4e2f5650efb8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -78,28 +78,26 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { let parameters = (ins ArrayRefParameter<"mlir::Type", "members">:$members, "mlir::StringAttr":$typeName, + "bool":$body, "std::optional<::mlir::cir::ASTRecordDeclAttr>":$ast ); let builders = [ TypeBuilder<(ins - "ArrayRef":$members, "StringRef":$typeName + "ArrayRef":$members, "StringRef":$typeName, + "bool":$body ), [{ auto id = mlir::StringAttr::get(context, typeName); - return StructType::get(context, members, id, std::nullopt); + auto sTy = StructType::get(context, members, id, body, std::nullopt); + return sTy; }]> ]; let hasCustomAssemblyFormat = 1; let extraClassDeclaration = [{ - private: - // Track forward declaration or incomplete struct types. - bool hasBody = false; public: void dropAst(); - bool isOpaque() const { return !hasBody; } - void setBody() { hasBody = true; } }]; let extraClassDefinition = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index e425339f507f..fce5bc4e30da 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -74,7 +74,7 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, bool CIRGenTypes::isRecordLayoutComplete(const Type *Ty) const { llvm::DenseMap::const_iterator I = recordDeclTypes.find(Ty); - return I != recordDeclTypes.end() && !I->second.isOpaque(); + return I != recordDeclTypes.end() && I->second.getBody(); } /// Return true if it is safe to convert the specified record decl to IR and lay @@ -143,16 +143,12 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { auto name = getRecordTypeName(RD, ""); auto identifier = mlir::StringAttr::get(&getMLIRContext(), name); entry = mlir::cir::StructType::get( - &getMLIRContext(), {}, identifier, + &getMLIRContext(), {}, identifier, /*body=*/false, mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), RD)); } - // TODO(CIR): clang checks here whether the type is known to be opaque. This - // is equivalent to a forward decl. So far we don't need to support - // opaque/forward-declared record decls. If/when we do we might need to have - // temporary cir::StructType with no members as stand-ins. RD = RD->getDefinition(); - if (!RD || !RD->isCompleteDefinition() || !entry.isOpaque()) + if (!RD || !RD->isCompleteDefinition() || entry.getBody()) return entry; // If converting this type would cause us to infinitely loop, don't do it! diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index bff4e8dce3bd..7c2e309890d8 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -216,6 +216,7 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, mlir::StringAttr::get(&getMLIRContext(), name + ".base"); BaseTy = mlir::cir::StructType::get( &getMLIRContext(), baseBuilder.fieldTypes, baseIdentifier, + /*body=*/true, mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work // on both of them with the same index. @@ -227,8 +228,7 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, // TODO(cir): add base class info Ty = mlir::cir::StructType::get( &getMLIRContext(), builder.fieldTypes, identifier, - mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); - Ty.setBody(); + /*body=*/true, mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); auto RL = std::make_unique( Ty, BaseTy, (bool)builder.IsZeroInitializable, diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 1f19ff7579a8..c511e9b16e46 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -76,21 +76,38 @@ Type StructType::parse(mlir::AsmParser &parser) { std::string typeName; if (parser.parseString(&typeName)) return Type(); + llvm::SmallVector members; + bool parsedBody = false; + while (mlir::succeeded(parser.parseOptionalComma())) { + if (mlir::succeeded(parser.parseOptionalKeyword("incomplete"))) + break; + // FIXME: add parsing for ast node. + parsedBody = true; Type nextMember; if (parser.parseType(nextMember)) return Type(); members.push_back(nextMember); } + if (parser.parseGreater()) return Type(); - return get(parser.getContext(), members, typeName); + auto sTy = get(parser.getContext(), members, typeName, parsedBody); + return sTy; } void StructType::print(mlir::AsmPrinter &printer) const { - printer << '<' << getTypeName() << ", "; - llvm::interleaveComma(getMembers(), printer); + printer << '<' << getTypeName(); + if (!getBody()) { + printer << ", incomplete"; + } else { + auto members = getMembers(); + if (!members.empty()) { + printer << ", "; + llvm::interleaveComma(getMembers(), printer); + } + } if (getAst()) { printer << ", "; printer.printAttributeWithoutType(*getAst()); diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index f613721a2904..a7ad34e0c868 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -27,7 +27,7 @@ struct incomplete; void yoyo(incomplete *i) {} // CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> -// CHECK: !ty_22struct2Eincomplete22 = !cir.struct<"struct.incomplete" +// CHECK: !ty_22struct2Eincomplete22 = !cir.struct<"struct.incomplete", incomplete // CHECK: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !ty_22struct2EBar22> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index bc3cae4083cd..eab85379ba2d 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -3,11 +3,14 @@ module { cir.func @structs() { %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] + %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["i", init] cir.return } } // CHECK: !ty_22S22 = !cir.struct<"S", i8, i16, i32> +// CHECK: !ty_22i22 = !cir.struct<"i", incomplete> // CHECK-NEXT: module { // CHECK-NEXT: cir.func @structs() { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["i", init] From d036689b5ab600a9b5268f35de5b911c78585c5c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 29 Dec 2022 15:42:25 -0300 Subject: [PATCH 0782/2301] [CIR][CIRGen][NFC] Fix bunch of different build time warnings --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index fbb0bdfcbabb..6addc746babc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -65,8 +65,9 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, assert(0 && "not implemented"); switch (S->getStmtClass()) { - case Stmt::OpenACCComputeConstructClass: case Stmt::OMPScopeDirectiveClass: + llvm_unreachable("NYI"); + case Stmt::OpenACCComputeConstructClass: case Stmt::OMPErrorDirectiveClass: case Stmt::NoStmtClass: case Stmt::CXXCatchStmtClass: From 2f063213cac4aff183f194bd978b274f4fe7bdc5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 30 Dec 2022 15:40:10 -0300 Subject: [PATCH 0783/2301] [CIR][CIRGen] Boilerplate for handling different conditions for --- clang/lib/CIR/CodeGen/CIRGenExprCst.cpp | 22 +++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 12 +++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 5 ++++ clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 4 +++ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 32 ++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenTypes.h | 9 +++++- 6 files changed, 82 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp index 8d05d27a6efe..3ac0a142dfce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp @@ -1344,3 +1344,25 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, } llvm_unreachable("Unknown APValue kind"); } + +mlir::Value CIRGenModule::buildNullConstant(QualType T) { + if (T->getAs()) + llvm_unreachable("NYI"); + + if (getTypes().isZeroInitializable(T)) + llvm_unreachable("NYI"); + + if (const ConstantArrayType *CAT = + getASTContext().getAsConstantArrayType(T)) { + llvm_unreachable("NYI"); + } + + if (const RecordType *RT = T->getAs()) + llvm_unreachable("NYI"); + + assert(T->isMemberDataPointerType() && + "Should only see pointers to data members here!"); + + llvm_unreachable("NYI"); + return {}; +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index f17a9d8e803c..f62794a8337f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -48,6 +48,9 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Type ConvertType(QualType T) { return CGF.ConvertType(T); } LValue buildLValue(const Expr *E) { return CGF.buildLValue(E); } + /// Emit a value that corresponds to null for the given type. + mlir::Value buildNullValue(QualType Ty); + //===--------------------------------------------------------------------===// // Visitor Methods //===--------------------------------------------------------------------===// @@ -117,7 +120,10 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { - llvm_unreachable("NYI"); + if (E->getType()->isVoidType()) + return nullptr; + + return buildNullValue(E->getType()); } mlir::Value VisitGNUNullExpr(const GNUNullExpr *E) { llvm_unreachable("NYI"); @@ -1315,6 +1321,10 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( return LHSLV; } +mlir::Value ScalarExprEmitter::buildNullValue(QualType Ty) { + return CGF.buildFromMemory(CGF.CGM.buildNullConstant(Ty), Ty); +} + mlir::Value ScalarExprEmitter::buildCompoundAssign( const CompoundAssignOperator *E, mlir::Value (ScalarExprEmitter::*Func)(const BinOpInfo &)) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index d0bf15aac548..8c588ac9093f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -324,6 +324,11 @@ class CIRGenModule { // C++ related functions. void buildDeclContext(const DeclContext *DC); + /// Return the result of value-initializing the given type, i.e. a null + /// expression of the given type. This is usually, but not always, an LLVM + /// null constant. + mlir::Value buildNullConstant(QualType T); + llvm::StringRef getMangledName(clang::GlobalDecl GD); // Make sure that this type is translated. diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index 8f377cafef73..9619e8fee8a5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -74,6 +74,10 @@ class CIRGenRecordLayout { assert(FieldInfo.count(FD) && "Invalid field for record!"); return FieldInfo.lookup(FD); } + + /// Check whether this struct can be C++ zero-initialized with a + /// zeroinitializer. + bool isZeroInitializable() const { return IsZeroInitializable; } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index fce5bc4e30da..9b8cf1474d73 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -755,3 +755,35 @@ CIRGenTypes::getCIRGenRecordLayout(const RecordDecl *RD) { "Unable to find record layout information for type"); return *I->second; } + +bool CIRGenTypes::isZeroInitializable(QualType T) { + if (T->getAs()) + return Context.getTargetNullPointerValue(T) == 0; + + if (const auto *AT = Context.getAsArrayType(T)) { + if (isa(AT)) + return true; + if (const auto *CAT = dyn_cast(AT)) + if (Context.getConstantArrayElementCount(CAT) == 0) + return true; + T = Context.getBaseElementType(T); + } + + // Records are non-zero-initializable if they contain any + // non-zero-initializable subobjects. + if (const RecordType *RT = T->getAs()) { + const RecordDecl *RD = RT->getDecl(); + return isZeroInitializable(RD); + } + + // We have to ask the ABI about member pointers. + if (const MemberPointerType *MPT = T->getAs()) + llvm_unreachable("NYI"); + + // Everything else is okay. + return true; +} + +bool CIRGenTypes::isZeroInitializable(const RecordDecl *RD) { + return getCIRGenRecordLayout(RD).isZeroInitializable(); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 3e3771b98c34..f5e058ccf787 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -144,10 +144,17 @@ class CIRGenTypes { return RecordsBeingLaidOut.count(Ty); } + /// Return whether a type can be zero-initialized (in the C++ sense) with an + /// LLVM zeroinitializer. + bool isZeroInitializable(clang::QualType T); + /// Return whether a record type can be zero-initialized (in the C++ sense) + /// with an LLVM zeroinitializer. + bool isZeroInitializable(const clang::RecordDecl *RD); + const ABIInfo &getABIInfo() const { return TheABIInfo; } CIRGenCXXABI &getCXXABI() const { return TheCXXABI; } - /// ConvertType - Convert type T into a mlir::Type. + /// Convert type T into a mlir::Type. mlir::Type ConvertType(clang::QualType T); mlir::Type convertRecordDeclType(const clang::RecordDecl *recordDecl); From 4db590ed936df730603ebce5fdfbcdb963cbd110 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 30 Dec 2022 15:57:54 -0300 Subject: [PATCH 0784/2301] [CIR][CIRGen] Support the addr of globals for regular functions --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 7 ++++++ clang/lib/CIR/CodeGen/CIRGenExprCst.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 8 +++---- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 7 ++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- clang/test/CIR/CodeGen/globals.cpp | 26 ++++++++++++++++++++-- 6 files changed, 45 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 9b19cd830461..8717f34dbc33 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -116,6 +116,13 @@ class CIRGenBuilderTy : public mlir::OpBuilder { loc, ty, mlir::cir::NullAttr::get(getContext(), ty)); } + // Creates null value for type ty. + mlir::cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc) { + assert(ty.isa() && "NYI"); + return create(loc, ty, + mlir::IntegerAttr::get(ty, 0)); + } + mlir::Value getBitcast(mlir::Location loc, mlir::Value src, mlir::Type newTy) { return create(loc, newTy, mlir::cir::CastKind::bitcast, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp index 3ac0a142dfce..19c0fb9f673e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp @@ -1345,12 +1345,12 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, llvm_unreachable("Unknown APValue kind"); } -mlir::Value CIRGenModule::buildNullConstant(QualType T) { +mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { if (T->getAs()) llvm_unreachable("NYI"); if (getTypes().isZeroInitializable(T)) - llvm_unreachable("NYI"); + return builder.getNullValue(getTypes().convertTypeForMem(T), loc); if (const ConstantArrayType *CAT = getASTContext().getAsConstantArrayType(T)) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index f62794a8337f..8949be241b6c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -49,7 +49,7 @@ class ScalarExprEmitter : public StmtVisitor { LValue buildLValue(const Expr *E) { return CGF.buildLValue(E); } /// Emit a value that corresponds to null for the given type. - mlir::Value buildNullValue(QualType Ty); + mlir::Value buildNullValue(QualType Ty, mlir::Location loc); //===--------------------------------------------------------------------===// // Visitor Methods @@ -123,7 +123,7 @@ class ScalarExprEmitter : public StmtVisitor { if (E->getType()->isVoidType()) return nullptr; - return buildNullValue(E->getType()); + return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); } mlir::Value VisitGNUNullExpr(const GNUNullExpr *E) { llvm_unreachable("NYI"); @@ -1321,8 +1321,8 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( return LHSLV; } -mlir::Value ScalarExprEmitter::buildNullValue(QualType Ty) { - return CGF.buildFromMemory(CGF.CGM.buildNullConstant(Ty), Ty); +mlir::Value ScalarExprEmitter::buildNullValue(QualType Ty, mlir::Location loc) { + return CGF.buildFromMemory(CGF.CGM.buildNullConstant(Ty, loc), Ty); } mlir::Value ScalarExprEmitter::buildCompoundAssign( diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index d7eb57d237cd..cbe8029cf6eb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1845,6 +1845,13 @@ CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { IsForDefinition); } + if (isa(D)) { + const CIRGenFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); + auto Ty = getTypes().GetFunctionType(FI); + return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, + IsForDefinition); + } + llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 8c588ac9093f..a88c05b61701 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -327,7 +327,7 @@ class CIRGenModule { /// Return the result of value-initializing the given type, i.e. a null /// expression of the given type. This is usually, but not always, an LLVM /// null constant. - mlir::Value buildNullConstant(QualType T); + mlir::Value buildNullConstant(QualType T, mlir::Location loc); llvm::StringRef getMangledName(clang::GlobalDecl GD); diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 6b65d9516379..8b09c5d66671 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -23,6 +23,13 @@ void use_global_string() { unsigned char c = s2[0]; } +template +T func() { + return T(); +} + +int use_func() { return func(); } + // CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @a = 3 : i32 // CHECK-NEXT: cir.global external @c = 2 : i64 @@ -40,13 +47,13 @@ void use_global_string() { // CHECK-NEXT: cir.global external @s2 = @".str": !cir.ptr -// CHECK: cir.func @_Z10use_globalv() { +// CHECK: cir.func @_Z10use_globalv() { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["li", init] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.get_global @a : cir.ptr // CHECK-NEXT: %2 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: cir.store %2, %0 : i32, cir.ptr -// CHECK: cir.func @_Z17use_global_stringv() { +// CHECK: cir.func @_Z17use_global_stringv() { // CHECK-NEXT: %0 = cir.alloca i8, cir.ptr , ["c", init] {alignment = 1 : i64} // CHECK-NEXT: %1 = cir.get_global @s2 : cir.ptr > // CHECK-NEXT: %2 = cir.load %1 : cir.ptr >, !cir.ptr @@ -54,3 +61,18 @@ void use_global_string() { // CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr // CHECK-NEXT: %5 = cir.load %4 : cir.ptr , i8 // CHECK-NEXT: cir.store %5, %0 : i8, cir.ptr + +// CHECK: cir.func linkonce_odr @_Z4funcIiET_v() -> i32 { +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: cir.return %2 : i32 +// CHECK-NEXT: } +// CHECK-NEXT: cir.func @_Z8use_funcv() -> i32 { +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.call @_Z4funcIiET_v() : () -> i32 +// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: cir.return %2 : i32 +// CHECK-NEXT: } \ No newline at end of file From 0f633f6a8afc753c63921fcff4c84f2eea9b6fbc Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 30 Dec 2022 18:06:31 -0300 Subject: [PATCH 0785/2301] [CIR][CIRGen][Coroutines] Fix bug in OnFallthrough coro body stmt Only emit it when it makes sense, add testcase. --- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 41 ++++++++++------ clang/lib/CIR/CodeGen/CIRGenFunction.h | 14 ++++++ clang/test/CIR/CodeGen/coro-task.cpp | 60 +++++++++++++++++++---- 3 files changed, 92 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 81247b808b55..d3829d74ddaa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -134,17 +134,24 @@ RValue CIRGenFunction::buildCoroutineFrame() { llvm_unreachable("NYI"); } -static mlir::LogicalResult buildBodyAndFallthrough(CIRGenFunction &CGF, - const CoroutineBodyStmt &S, - Stmt *Body) { +static mlir::LogicalResult buildBodyAndFallthrough( + CIRGenFunction &CGF, const CoroutineBodyStmt &S, Stmt *Body, + const CIRGenFunction::LexicalScopeContext *currLexScope) { if (CGF.buildStmt(Body, /*useCurrentScope=*/true).failed()) return mlir::failure(); - // LLVM codegen checks if a insert basic block is available in order - // to decide whether to getFallthroughHandler, sounds like it should - // be an assert, not clear. For CIRGen solely rely on getFallthroughHandler. - if (Stmt *OnFallthrough = S.getFallthroughHandler()) - if (CGF.buildStmt(OnFallthrough, /*useCurrentScope=*/true).failed()) - return mlir::failure(); + // Note that LLVM checks CanFallthrough by looking into the availability + // of the insert block which is kinda brittle and unintuitive, seems to be + // related with how landing pads are handled. + // + // CIRGen handles this by checking pre-existing co_returns in the current + // scope instead. Are we missing anything? + // + // From LLVM IR Gen: const bool CanFallthrough = Builder.GetInsertBlock(); + const bool CanFallthrough = !currLexScope->hasCoreturn(); + if (CanFallthrough) + if (Stmt *OnFallthrough = S.getFallthroughHandler()) + if (CGF.buildStmt(OnFallthrough, /*useCurrentScope=*/true).failed()) + return mlir::failure(); return mlir::success(); } @@ -346,14 +353,18 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { // FIXME(cir): wrap buildBodyAndFallthrough with try/catch bits. if (S.getExceptionHandler()) assert(!UnimplementedFeature::unhandledException() && "NYI"); - if (buildBodyAndFallthrough(*this, S, S.getBody()).failed()) + if (buildBodyAndFallthrough(*this, S, S.getBody(), currLexScope).failed()) return mlir::failure(); - // FIXME(cir): LLVM checks CanFallthrough by looking into the availability - // of the insert block, do we need this? Likely not since fallthroughs - // usually get an implicit AST node for a CoreturnStmt. + // Note that LLVM checks CanFallthrough by looking into the availability + // of the insert block which is kinda brittle and unintuitive, seems to be + // related with how landing pads are handled. + // + // CIRGen handles this by checking pre-existing co_returns in the current + // scope instead. Are we missing anything? + // // From LLVM IR Gen: const bool CanFallthrough = Builder.GetInsertBlock(); - const bool CanFallthrough = false; + const bool CanFallthrough = currLexScope->hasCoreturn(); const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0; if (CanFallthrough || HasCoreturns) { CurCoro.Data->CurrentAwaitKind = mlir::cir::AwaitKind::final; @@ -524,6 +535,8 @@ RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, mlir::LogicalResult CIRGenFunction::buildCoreturnStmt(CoreturnStmt const &S) { ++CurCoro.Data->CoreturnCount; + currLexScope->setCoreturn(); + const Expr *RV = S.getOperand(); if (RV && RV->getType()->isVoidType() && !isa(RV)) { // Make sure to evaluate the non initlist expression of a co_return diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 00ab9c406635..8f432f5777a6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -86,6 +86,7 @@ class CIRGenFunction { /// related with initialization and destruction of objects. /// ------- +public: // Represents a cir.scope, cir.if, and then/else regions. I.e. lexical // scopes that require cleanups. struct LexicalScopeContext { @@ -99,6 +100,12 @@ class CIRGenFunction { // from switches. mlir::Block *EntryBlock; + // On a coroutine body, the OnFallthrough sub stmt holds the handler + // (CoreturnStmt) for control flow falling off the body. Keep track + // of emitted co_return in this scope and allow OnFallthrough to be + // skipeed. + bool HasCoreturn = false; + // FIXME: perhaps we can use some info encoded in operations. enum Kind { Regular, // cir.if, cir.scope, if_regions @@ -112,6 +119,12 @@ class CIRGenFunction { : EntryBlock(eb), BeginLoc(b), EndLoc(e) {} ~LexicalScopeContext() = default; + // --- + // Coroutine tracking + // --- + bool hasCoreturn() const { return HasCoreturn; } + void setCoreturn() { HasCoreturn = true; } + // --- // Kind // --- @@ -198,6 +211,7 @@ class CIRGenFunction { mlir::Location BeginLoc, EndLoc; }; +private: class LexicalScopeGuard { CIRGenFunction &CGF; LexicalScopeContext *OldVal = nullptr; diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index d9dc6a284e83..ef0b0709abb8 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -3,6 +3,13 @@ namespace std { +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; + +template +typename remove_reference::type &&move(T &&t) noexcept; + template struct coroutine_traits { using promise_type = typename Ret::promise_type; }; @@ -34,6 +41,16 @@ struct string { string(); string(char const *s); }; + +template +struct optional { + optional(); + optional(const T&); + T &operator*() &; + T &&operator*() &&; + T &value() &; + T &&value() &&; +}; } // namespace std namespace folly { @@ -76,15 +93,21 @@ struct Task { SemiFuture semi(); }; -struct blocking_wait_fn { - template - T operator()(Task&& awaitable) const { - return T(); - } -}; +// FIXME: add CIRGen support here. +// struct blocking_wait_fn { +// template +// T operator()(Task&& awaitable) const { +// return T(); +// } +// }; + +// inline constexpr blocking_wait_fn blocking_wait{}; +// static constexpr blocking_wait_fn const& blockingWait = blocking_wait; -inline constexpr blocking_wait_fn blocking_wait{}; -static constexpr blocking_wait_fn const& blockingWait = blocking_wait; +template +T blockingWait(Task&& awaitable) { + return T(); +} template Task collectAllRange(Task* awaitable); @@ -252,4 +275,23 @@ folly::coro::Task byRef(const std::string& s) { // FIXME: this could be less redundant than two allocas + reloads // CHECK: cir.func coroutine @_Z5byRefRKSt6string(%arg0: !cir.ptr // CHECK: %[[#AllocaParam:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] -// CHECK: %[[#AllocaFnUse:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] \ No newline at end of file +// CHECK: %[[#AllocaFnUse:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] + +folly::coro::Task silly_coro() { + std::optional> task; + { + std::string s = "yolo"; + task = byRef(s); + } + folly::coro::blockingWait(std::move(task.value())); + co_return; +} + +// Make sure we properly handle OnFallthrough coro body sub stmt and +// check there are not multiple co_returns emitted. + +// CHECK: cir.func coroutine @_Z10silly_corov() +// CHECK: cir.await(init, ready : { +// CHECK: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv +// CHECK-NOT: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv +// CHECK: cir.await(final, ready : { \ No newline at end of file From 716c116782fecc3b833b17a7dcec245c55bb86a8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 30 Dec 2022 23:46:20 -0300 Subject: [PATCH 0786/2301] [CIR][CIRGen][NFC] Factor out some alloca insertion logic to the builder Also add an extra helper for buildAlloca --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 12 ++++++++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 32 +++++++------------------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 +++ 3 files changed, 23 insertions(+), 24 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 8717f34dbc33..8e439db0c426 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -129,6 +129,18 @@ class CIRGenBuilderTy : public mlir::OpBuilder { src); } + OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block) { + auto lastAlloca = + std::find_if(block->rbegin(), block->rend(), [](mlir::Operation &op) { + return mlir::isa(&op); + }); + + if (lastAlloca != block->rend()) + return OpBuilder::InsertPoint(block, + ++mlir::Block::iterator(&*lastAlloca)); + return OpBuilder::InsertPoint(block, block->begin()); + }; + // // Operation creation helpers // -------------------------- diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 54f4a70eab10..bce4b896b3b5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1629,37 +1629,21 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, mlir::Location loc, CharUnits alignment) { - auto getAllocaInsertPositionOp = - [&](mlir::Block **insertBlock) -> mlir::Operation * { - auto *parentBlock = currLexScope->getEntryBlock(); - - auto lastAlloca = std::find_if( - parentBlock->rbegin(), parentBlock->rend(), - [](mlir::Operation &op) { return isa(&op); }); - - *insertBlock = parentBlock; - if (lastAlloca == parentBlock->rend()) - return nullptr; - return &*lastAlloca; - }; + return buildAlloca( + name, ty, loc, alignment, + builder.getBestAllocaInsertPoint(currLexScope->getEntryBlock())); +} +mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, + mlir::Location loc, CharUnits alignment, + mlir::OpBuilder::InsertPoint ip) { auto localVarPtrTy = mlir::cir::PointerType::get(builder.getContext(), ty); auto alignIntAttr = CGM.getSize(alignment); mlir::Value addr; { mlir::OpBuilder::InsertionGuard guard(builder); - mlir::Block *insertBlock = nullptr; - mlir::Operation *insertOp = getAllocaInsertPositionOp(&insertBlock); - - if (insertOp) - builder.setInsertionPointAfter(insertOp); - else { - assert(insertBlock && "expected valid insertion block"); - // No previous alloca found, place this one in the beginning - // of the block. - builder.setInsertionPointToStart(insertBlock); - } + builder.restoreInsertionPoint(ip); addr = builder.create(loc, /*addr type*/ localVarPtrTy, /*var type*/ ty, name, alignIntAttr); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 8f432f5777a6..e8668e8d02f2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -386,6 +386,9 @@ class CIRGenFunction { mlir::Location loc, clang::CharUnits alignment); mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment); + mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, + mlir::Location loc, clang::CharUnits alignment, + mlir::OpBuilder::InsertPoint ip); void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, clang::CharUnits alignment); From fac2a9b5249072c8b2ca68f86683488be5abe8db Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Sat, 31 Dec 2022 08:50:32 -0300 Subject: [PATCH 0787/2301] [CIR][CIRGen][Coroutines] Add support for non-void promise calls LLVM codegen uses a plain rvalue result for coreturn'ing scalars. CIRGen uses store/load because await_resume is called within cir.await's resume region but the result is consumed in the outside scope, so we need to dominate all uses. When lowering to LLVM we can opt out this load/store. --- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 44 +++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 ++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 +++ clang/lib/CIR/CodeGen/CIRGenValue.h | 1 + clang/test/CIR/CodeGen/coro-task.cpp | 26 +++++++++++++- 5 files changed, 75 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index d3829d74ddaa..57408145a8b3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -416,7 +416,8 @@ static LValueOrRValue buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, CoroutineSuspendExpr const &S, mlir::cir::AwaitKind Kind, AggValueSlot aggSlot, bool ignoreResult, - bool forLValue) { + mlir::Block *scopeParentBlock, + mlir::Value &tmpResumeRValAddr, bool forLValue) { auto *E = S.getCommonExpr(); auto awaitBuild = mlir::success(); @@ -488,9 +489,21 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, // enclosing cir.scope instead. if (forLValue) awaitRes.LV = CGF.buildLValue(S.getResumeExpr()); - else + else { awaitRes.RV = CGF.buildAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult); + if (!awaitRes.RV.isIgnored()) { + // Create the alloca in the block before the scope wrapping + // cir.await. + tmpResumeRValAddr = CGF.buildAlloca( + "__coawait_resume_rval", awaitRes.RV.getScalarVal().getType(), + loc, CharUnits::One(), + builder.getBestAllocaInsertPoint(scopeParentBlock)); + // Store the rvalue so we can reload it before the promise call. + builder.create(loc, awaitRes.RV.getScalarVal(), + tmpResumeRValAddr); + } + } if (TryStmt) { llvm_unreachable("NYI"); @@ -509,6 +522,16 @@ RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, bool ignoreResult) { RValue rval; auto scopeLoc = getLoc(E.getSourceRange()); + + // Since we model suspend / resume as an inner region, we must store + // resume scalar results in a tmp alloca, and load it after we build the + // suspend expression. An alternative way to do this would be to make + // every region return a value when promise.return_value() is used, but + // it's a bit awkward given that resume is the only region that actually + // returns a value. + mlir::Block *currEntryBlock = currLexScope->getEntryBlock(); + [[maybe_unused]] mlir::Value tmpResumeRValAddr; + builder.create( scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -527,9 +550,24 @@ RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, LexicalScopeGuard lexScopeGuard{*this, &lexScope}; rval = buildSuspendExpression(*this, *CurCoro.Data, E, CurCoro.Data->CurrentAwaitKind, aggSlot, - ignoreResult, /*forLValue*/ false) + ignoreResult, currEntryBlock, + tmpResumeRValAddr, /*forLValue*/ false) .RV; }); + + if (ignoreResult || rval.isIgnored()) + return rval; + + if (rval.isScalar()) { + rval = RValue::get(builder.create( + scopeLoc, rval.getScalarVal().getType(), tmpResumeRValAddr)); + } else if (rval.isAggregate()) { + // This is probably already handled via AggSlot, remove this assertion + // once we have a testcase and prove all pieces work. + llvm_unreachable("NYI"); + } else { // complex + llvm_unreachable("NYI"); + } return rval; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index bce4b896b3b5..3ac982716170 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1284,7 +1284,9 @@ void CIRGenFunction::buildAnyExprToMem(const Expr *E, Address Location, } case TEK_Scalar: { - assert(0 && "NYI"); + RValue RV = RValue::get(buildScalarExpr(E)); + LValue LV = makeAddrLValue(Location, E->getType()); + buildStoreThroughLValue(RV, LV); return; } } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index e8668e8d02f2..0ea39509d0f2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -382,6 +382,9 @@ class CIRGenFunction { mlir::LogicalResult declare(const clang::Decl *var, clang::QualType ty, mlir::Location loc, clang::CharUnits alignment, mlir::Value &addr, bool isParam = false); + +public: + // FIXME(cir): move this to CIRGenBuider.h mlir::Value buildAlloca(llvm::StringRef name, clang::QualType ty, mlir::Location loc, clang::CharUnits alignment); mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, @@ -389,6 +392,8 @@ class CIRGenFunction { mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, mlir::OpBuilder::InsertPoint ip); + +private: void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, clang::CharUnits alignment); diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index ab490e06d863..a1246c44c1da 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -50,6 +50,7 @@ class RValue { bool isScalar() const { return V1.getInt() == Scalar; } bool isComplex() const { return V1.getInt() == Complex; } bool isAggregate() const { return V1.getInt() == Aggregate; } + bool isIgnored() const { return isScalar() && !getScalarVal(); } bool isVolatileQualified() const { return V2.getInt(); } diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index ef0b0709abb8..92df3035bc97 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -294,4 +294,28 @@ folly::coro::Task silly_coro() { // CHECK: cir.await(init, ready : { // CHECK: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv // CHECK-NOT: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv -// CHECK: cir.await(final, ready : { \ No newline at end of file +// CHECK: cir.await(final, ready : { + +folly::coro::Task go(int const& val) { + co_return val; +} +folly::coro::Task go1() { + auto task = go(1); + co_return co_await task; +} + +// CHECK: cir.func coroutine @_Z3go1v() +// CHECK: %[[#CoReturnValAddr:]] = cir.alloca i32, cir.ptr , ["__coawait_resume_rval"] {alignment = 1 : i64} +// CHECK: cir.await(init, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) +// CHECK: } +// CHECK: cir.await(user, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: %[[#ResumeVal:]] = cir.call @_ZN5folly4coro4TaskIiE12await_resumeEv(%3) +// CHECK: cir.store %[[#ResumeVal]], %[[#CoReturnValAddr]] : i32, cir.ptr +// CHECK: },) +// CHECK: %[[#V:]] = cir.load %[[#CoReturnValAddr]] : cir.ptr , i32 +// CHECK: cir.call @_ZN5folly4coro4TaskIiE12promise_type12return_valueEi({{.*}}, %[[#V]]) \ No newline at end of file From fcd28a896b6b966bb6e00ee9a187c2b9d18a2c34 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Sat, 31 Dec 2022 10:25:18 -0300 Subject: [PATCH 0788/2301] [CIR] Improve ScopeOp docs and builders --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 29 ++++++++++++++------ clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 12 ++++---- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 9 ++++++ 4 files changed, 36 insertions(+), 16 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 2331228e45b8..93a0e1807c2b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -532,19 +532,29 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, // ScopeOp //===----------------------------------------------------------------------===// -def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods, - RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { +def ScopeOp : CIR_Op<"scope", [ + DeclareOpInterfaceMethods, + RecursivelySpeculatable, AutomaticAllocationScope, + NoRegionArguments]> { let summary = "Represents a C/C++ scope"; let description = [{ `cir.scope` contains one region and defines a strict "scope" for all new values produced within its blocks. - Its region can contain an arbitrary number of blocks but usually defaults - to one. The `cir.yield` is a required terminator and can be optionally omitted. + The region can contain an arbitrary number of blocks but usually defaults + to one and can optionally return a value (useful for representing values + coming out of C++ full-expressions) via `cir.yield`: + + + ```mlir + %rvalue = cir.scope { + ... + cir.yield %value + } + ``` - A resulting value can also be specificed, though not currently used - together - with `cir.yield` should be helpful to represent lifetime extension out of short - lived scopes in the future. + If `cir.scope` yields no value, the `cir.yield` can be left out, and + will be inserted implicitly. }]; let results = (outs Variadic:$results); @@ -552,11 +562,12 @@ def ScopeOp : CIR_Op<"scope", [DeclareOpInterfaceMethods":$scopeBuilder)> + "function_ref":$scopeBuilder)>, + OpBuilder<(ins "function_ref":$scopeBuilder)> ]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 57408145a8b3..6192b4c4dfc7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -533,7 +533,7 @@ RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, [[maybe_unused]] mlir::Value tmpResumeRValAddr; builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { // FIXME(cir): abstract all this massive location handling elsewhere. SmallVector locs; diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 6addc746babc..3eaed149fc56 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -39,7 +39,7 @@ mlir::LogicalResult CIRGenFunction::buildCompoundStmt(const CompoundStmt &S) { SymTableScopeTy varScope(symbolTable); auto scopeLoc = getLoc(S.getSourceRange()); builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { auto fusedLoc = loc.cast(); auto locBegin = fusedLoc.getLocations()[0]; @@ -386,7 +386,7 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { // The if scope contains the full source range for IfStmt. auto scopeLoc = getLoc(S.getSourceRange()); builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { auto fusedLoc = loc.cast(); auto scopeLocBegin = fusedLoc.getLocations()[0]; @@ -678,7 +678,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { auto res = mlir::success(); auto scopeLoc = getLoc(S.getSourceRange()); builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { auto fusedLoc = loc.cast(); auto scopeLocBegin = fusedLoc.getLocations()[0]; @@ -730,7 +730,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { auto res = mlir::success(); auto scopeLoc = getLoc(S.getSourceRange()); builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { auto fusedLoc = loc.cast(); auto scopeLocBegin = fusedLoc.getLocations()[0]; @@ -787,7 +787,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { auto res = mlir::success(); auto scopeLoc = getLoc(S.getSourceRange()); builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { auto fusedLoc = loc.cast(); auto scopeLocBegin = fusedLoc.getLocations()[0]; @@ -889,7 +889,7 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { // The switch scope contains the full source range for SwitchStmt. auto scopeLoc = getLoc(S.getSourceRange()); builder.create( - scopeLoc, mlir::TypeRange(), /*scopeBuilder=*/ + scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { auto fusedLoc = loc.cast(); auto scopeLocBegin = fusedLoc.getLocations()[0]; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 49580212cf31..09e4b3593933 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -566,6 +566,15 @@ void ScopeOp::build(OpBuilder &builder, OperationState &result, scopeBuilder(builder, result.location); } +void ScopeOp::build(OpBuilder &builder, OperationState &result, + function_ref scopeBuilder) { + assert(scopeBuilder && "the builder callback for 'then' must be present"); + OpBuilder::InsertionGuard guard(builder); + Region *scopeRegion = result.addRegion(); + builder.createBlock(scopeRegion); + scopeBuilder(builder, result.location); +} + LogicalResult ScopeOp::verify() { return success(); } //===----------------------------------------------------------------------===// From 9640db073c6ad94ac0fdde07e6e472f73e8209ba Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Sat, 31 Dec 2022 14:24:28 -0300 Subject: [PATCH 0789/2301] [CIR] Add support for scope returning values This is prep work for modeling full-expressions as short-lived scopes with their own allocation / cleanups. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 ++++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 16 +++++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 93a0e1807c2b..4a1ccd7c2e69 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -565,8 +565,10 @@ def ScopeOp : CIR_Op<"scope", [ let skipDefaultBuilders = 1; let builders = [ - OpBuilder<(ins "TypeRange":$resultTypes, - "function_ref":$scopeBuilder)>, + // Scopes for yielding values. + OpBuilder<(ins + "function_ref":$scopeBuilder)>, + // Scopes without yielding values. OpBuilder<(ins "function_ref":$scopeBuilder)> ]; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 09e4b3593933..54148b27c17b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -417,7 +417,7 @@ bool shouldPrintTerm(mlir::Region &r) { if (isa(entryBlock->back())) return true; YieldOp y = dyn_cast(entryBlock->back()); - if (y && !y.isPlain()) + if (y && (!y.isPlain() || !y.getArgs().empty())) return true; return false; } @@ -546,7 +546,7 @@ void ScopeOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // The only region always branch back to the parent operation. if (!point.isParent()) { - regions.push_back(RegionSuccessor()); + regions.push_back(RegionSuccessor(getResults())); return; } @@ -554,16 +554,18 @@ void ScopeOp::getSuccessorRegions(mlir::RegionBranchPoint point, regions.push_back(RegionSuccessor(&getScopeRegion())); } -void ScopeOp::build(OpBuilder &builder, OperationState &result, - TypeRange resultTypes, - function_ref scopeBuilder) { +void ScopeOp::build( + OpBuilder &builder, OperationState &result, + function_ref scopeBuilder) { assert(scopeBuilder && "the builder callback for 'then' must be present"); - result.addTypes(resultTypes); OpBuilder::InsertionGuard guard(builder); Region *scopeRegion = result.addRegion(); builder.createBlock(scopeRegion); - scopeBuilder(builder, result.location); + + mlir::Type yieldTy; + scopeBuilder(builder, yieldTy, result.location); + result.addTypes(TypeRange{yieldTy}); } void ScopeOp::build(OpBuilder &builder, OperationState &result, From 20e5ae8a02600272d5a68628fb82ec47d1ce9c36 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Sat, 31 Dec 2022 14:29:18 -0300 Subject: [PATCH 0790/2301] [CIR][CIRGen] Model full-expressions with short-lived scopes While here narrow the scope of resume temporary, add more testcases. --- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 29 ++++++---------------- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 28 ++++++++++++++++++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 ++- clang/test/CIR/CodeGen/coro-task.cpp | 7 ++++-- clang/test/CIR/CodeGen/fullexpr.cpp | 20 +++++++++++++++ 5 files changed, 60 insertions(+), 28 deletions(-) create mode 100644 clang/test/CIR/CodeGen/fullexpr.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 6192b4c4dfc7..f11b20596c10 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -532,28 +532,13 @@ RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, mlir::Block *currEntryBlock = currLexScope->getEntryBlock(); [[maybe_unused]] mlir::Value tmpResumeRValAddr; - builder.create( - scopeLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - // FIXME(cir): abstract all this massive location handling elsewhere. - SmallVector locs; - if (loc.isa()) { - locs.push_back(loc); - locs.push_back(loc); - } else if (loc.isa()) { - auto fusedLoc = loc.cast(); - locs.push_back(fusedLoc.getLocations()[0]); - locs.push_back(fusedLoc.getLocations()[1]); - } - LexicalScopeContext lexScope{locs[0], locs[1], - builder.getInsertionBlock()}; - LexicalScopeGuard lexScopeGuard{*this, &lexScope}; - rval = buildSuspendExpression(*this, *CurCoro.Data, E, - CurCoro.Data->CurrentAwaitKind, aggSlot, - ignoreResult, currEntryBlock, - tmpResumeRValAddr, /*forLValue*/ false) - .RV; - }); + // No need to explicitly wrap this into a scope since the AST already uses a + // ExprWithCleanups, which will wrap this into a cir.scope anyways. + rval = buildSuspendExpression(*this, *CurCoro.Data, E, + CurCoro.Data->CurrentAwaitKind, aggSlot, + ignoreResult, currEntryBlock, tmpResumeRValAddr, + /*forLValue*/ false) + .RV; if (ignoreResult || rval.isIgnored()) return rval; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 8949be241b6c..4e8cd2f68c20 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1350,11 +1350,33 @@ mlir::Value ScalarExprEmitter::buildCompoundAssign( } mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { - // TODO(cir): CodeGenFunction::RunCleanupsScope Scope(CGF); - mlir::Value V = Visit(E->getSubExpr()); + auto scopeLoc = CGF.getLoc(E->getSourceRange()); + auto &builder = CGF.builder; + + auto scope = builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) { + SmallVector locs; + if (loc.isa()) { + locs.push_back(loc); + locs.push_back(loc); + } else if (loc.isa()) { + auto fusedLoc = loc.cast(); + locs.push_back(fusedLoc.getLocations()[0]); + locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{ + locs[0], locs[1], builder.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexScopeGuard{CGF, &lexScope}; + auto scopeYieldVal = Visit(E->getSubExpr()); + if (scopeYieldVal) { + builder.create(loc, scopeYieldVal); + yieldTy = scopeYieldVal.getType(); + } + }); // Defend against dominance problems caused by jumps out of expression // evaluation through the shared cleanup block. // TODO(cir): Scope.ForceCleanup({&V}); - return V; + return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 54148b27c17b..74f3da9d6d82 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -565,7 +565,9 @@ void ScopeOp::build( mlir::Type yieldTy; scopeBuilder(builder, yieldTy, result.location); - result.addTypes(TypeRange{yieldTy}); + + if (yieldTy) + result.addTypes(TypeRange{yieldTy}); } void ScopeOp::build(OpBuilder &builder, OperationState &result, diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 92df3035bc97..a86dc03ae466 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -201,7 +201,10 @@ VoidTask silly_task() { // First regions `ready` has a special cir.yield code to veto suspension. // CHECK: cir.await(init, ready : { -// CHECK: %[[#ReadyVeto:]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[#SuspendAlwaysAddr]]) +// CHECK: %[[#ReadyVeto:]] = cir.scope { +// CHECK: %[[#TmpCallRes:]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[#SuspendAlwaysAddr]]) +// CHECK: cir.yield %[[#TmpCallRes]] : !cir.bool +// CHECK: } // CHECK: cir.if %[[#ReadyVeto]] { // CHECK: cir.yield nosuspend // CHECK: } @@ -305,12 +308,12 @@ folly::coro::Task go1() { } // CHECK: cir.func coroutine @_Z3go1v() -// CHECK: %[[#CoReturnValAddr:]] = cir.alloca i32, cir.ptr , ["__coawait_resume_rval"] {alignment = 1 : i64} // CHECK: cir.await(init, ready : { // CHECK: }, suspend : { // CHECK: }, resume : { // CHECK: },) // CHECK: } +// CHECK: %[[#CoReturnValAddr:]] = cir.alloca i32, cir.ptr , ["__coawait_resume_rval"] {alignment = 1 : i64} // CHECK: cir.await(user, ready : { // CHECK: }, suspend : { // CHECK: }, resume : { diff --git a/clang/test/CIR/CodeGen/fullexpr.cpp b/clang/test/CIR/CodeGen/fullexpr.cpp new file mode 100644 index 000000000000..52d5556b5416 --- /dev/null +++ b/clang/test/CIR/CodeGen/fullexpr.cpp @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int go(int const& val); + +int go1() { + auto x = go(1); + return x; +} + +// CHECK: cir.func @_Z3go1v() -> i32 { +// CHECK: %[[#XAddr:]] = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: %[[#RVal:]] = cir.scope { +// CHECK-NEXT: %[[#TmpAddr:]] = cir.alloca i32, cir.ptr , ["ref.tmp0", init] {alignment = 4 : i64} +// CHECK-NEXT: %[[#One:]] = cir.cst(1 : i32) : i32 +// CHECK-NEXT: cir.store %[[#One]], %[[#TmpAddr]] : i32, cir.ptr +// CHECK-NEXT: %[[#RValTmp:]] = cir.call @_Z2goRKi(%[[#TmpAddr]]) : (!cir.ptr) -> i32 +// CHECK-NEXT: cir.yield %[[#RValTmp]] : i32 +// CHECK-NEXT: } +// CHECK-NEXT: cir.store %[[#RVal]], %[[#XAddr]] : i32, cir.ptr \ No newline at end of file From 6a0e8de3c99baa824ead9eadd658f583fe3f36fa Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 11 Jan 2023 19:07:51 -0500 Subject: [PATCH 0791/2301] [CIR][Lowering] Fix the result type of castop lowering For int_to_bool we always convert to cir.bool and then rely on type conversion to get us to an i8 and some truncs/zexts to handle LLVM treating a C++ bool as an i1 at times and an i8 at others. I mistakenly used the srcType here. A test for this is in loopop lowering as follows. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d2724c035ae2..323f7be32884 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -74,7 +74,8 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { src.getLoc(), src.getType(), mlir::IntegerAttr::get(src.getType(), 0)); rewriter.replaceOpWithNewOp( - castOp, src.getType(), mlir::cir::CmpOpKind::ne, src, zero); + castOp, mlir::cir::BoolType::get(getContext()), + mlir::cir::CmpOpKind::ne, src, zero); break; } default: @@ -503,8 +504,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { auto type = adaptor.getLhs().getType(); auto i1Type = mlir::IntegerType::get(getContext(), 1, mlir::IntegerType::Signless); - auto i8Type = - mlir::IntegerType::get(getContext(), 8, mlir::IntegerType::Signless); + auto destType = getTypeConverter()->convertType(cmpOp.getType()); switch (adaptor.getKind()) { case mlir::cir::CmpOpKind::gt: { @@ -517,7 +517,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { cmpOp.getLoc(), i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else if (type.isa()) { auto cmp = rewriter.create( @@ -527,7 +527,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { adaptor.getLhs(), adaptor.getRhs(), // TODO(CIR): These fastmath flags need to not be defaulted. mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); @@ -544,7 +544,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { cmpOp.getLoc(), i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else if (type.isa()) { auto cmp = rewriter.create( @@ -553,7 +553,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::LLVM::FCmpPredicate::uge), adaptor.getLhs(), adaptor.getRhs(), mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); @@ -570,7 +570,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { cmpOp.getLoc(), i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else if (type.isa()) { auto cmp = rewriter.create( @@ -579,7 +579,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::LLVM::FCmpPredicate::ult), adaptor.getLhs(), adaptor.getRhs(), mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); @@ -596,7 +596,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { cmpOp.getLoc(), i1Type, mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), adaptor.getLhs(), adaptor.getRhs()); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else if (type.isa()) { auto cmp = rewriter.create( @@ -605,7 +605,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::LLVM::FCmpPredicate::ule), adaptor.getLhs(), adaptor.getRhs(), mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); @@ -619,7 +619,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::LLVM::ICmpPredicateAttr::get(getContext(), mlir::LLVM::ICmpPredicate::eq), adaptor.getLhs(), adaptor.getRhs()); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else if (type.isa()) { auto cmp = rewriter.create( @@ -628,7 +628,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::LLVM::FCmpPredicate::ueq), adaptor.getLhs(), adaptor.getRhs(), mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); @@ -643,7 +643,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::LLVM::ICmpPredicate::ne), adaptor.getLhs(), adaptor.getRhs()); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else if (type.isa()) { auto cmp = rewriter.create( @@ -652,7 +652,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::LLVM::FCmpPredicate::une), adaptor.getLhs(), adaptor.getRhs(), mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, i8Type, + rewriter.replaceOpWithNewOp(cmpOp, destType, cmp.getRes()); } else { llvm_unreachable("Unknown Operand Type"); From 77d737820058067bceb08691488669cdb8a3a6ad Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 11 Jan 2023 12:41:45 -0500 Subject: [PATCH 0792/2301] [CIR][Lowering] Lower cir.loop ops This is a continuation of the logic from the lowering of scope and if ops. Loop just has more regions to thread together. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 142 ++++++++++++++---- clang/test/CIR/Lowering/for.cir | 96 ++++++++++++ 2 files changed, 207 insertions(+), 31 deletions(-) create mode 100644 clang/test/CIR/Lowering/for.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 323f7be32884..58478577c581 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -31,6 +31,7 @@ #include "mlir/Target/LLVMIR/Export.h" #include "mlir/Transforms/DialectConversion.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/Sequence.h" @@ -40,6 +41,99 @@ using namespace llvm; namespace cir { namespace direct { +class CIRLoopOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + if (loopOp.getKind() != mlir::cir::LoopOpKind::For) + llvm_unreachable("NYI"); + + auto loc = loopOp.getLoc(); + + auto *currentBlock = rewriter.getInsertionBlock(); + auto *remainingOpsBlock = + rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + mlir::Block *continueBlock; + if (loopOp->getResults().size() == 0) + continueBlock = remainingOpsBlock; + else + llvm_unreachable("NYI"); + + auto &condRegion = loopOp.getCond(); + auto &condFrontBlock = condRegion.front(); + + auto &stepRegion = loopOp.getStep(); + auto &stepFrontBlock = stepRegion.front(); + auto &stepBackBlock = stepRegion.back(); + + auto &bodyRegion = loopOp.getBody(); + auto &bodyFrontBlock = bodyRegion.front(); + auto &bodyBackBlock = bodyRegion.back(); + + bool rewroteContinue = false; + bool rewroteBreak = false; + + for (auto &bb : condRegion) { + if (rewroteContinue && rewroteBreak) + break; + + if (auto yieldOp = dyn_cast(bb.getTerminator())) { + rewriter.setInsertionPointToEnd(yieldOp->getBlock()); + if (yieldOp.getKind().has_value()) { + switch (yieldOp.getKind().value()) { + case mlir::cir::YieldOpKind::Break: + case mlir::cir::YieldOpKind::Fallthrough: + case mlir::cir::YieldOpKind::NoSuspend: + llvm_unreachable("None of these should be present"); + case mlir::cir::YieldOpKind::Continue:; + rewriter.replaceOpWithNewOp( + yieldOp, yieldOp.getArgs(), &stepFrontBlock); + rewroteContinue = true; + } + } else { + rewriter.replaceOpWithNewOp( + yieldOp, yieldOp.getArgs(), continueBlock); + rewroteBreak = true; + } + } + } + + rewriter.inlineRegionBefore(condRegion, continueBlock); + + rewriter.inlineRegionBefore(stepRegion, continueBlock); + + if (auto stepYieldOp = + dyn_cast(stepBackBlock.getTerminator())) { + rewriter.setInsertionPointToEnd(stepYieldOp->getBlock()); + rewriter.replaceOpWithNewOp( + stepYieldOp, stepYieldOp.getArgs(), &bodyFrontBlock); + } else { + llvm_unreachable("What are we terminating with?"); + } + + rewriter.inlineRegionBefore(bodyRegion, continueBlock); + + if (auto bodyYieldOp = + dyn_cast(bodyBackBlock.getTerminator())) { + rewriter.setInsertionPointToEnd(bodyYieldOp->getBlock()); + rewriter.replaceOpWithNewOp( + bodyYieldOp, bodyYieldOp.getArgs(), &condFrontBlock); + } else { + llvm_unreachable("What are we terminating with?"); + } + + rewriter.setInsertionPointToEnd(currentBlock); + rewriter.create(loc, mlir::ValueRange(), &condFrontBlock); + + rewriter.replaceOp(loopOp, continueBlock->getArguments()); + + return mlir::success(); + } +}; + class CIRBrCondOpLowering : public mlir::OpConversionPattern { public: @@ -93,15 +187,9 @@ class CIRIfLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::IfOp ifOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto &thenRegion = ifOp.getThenRegion(); - auto &elseRegion = ifOp.getElseRegion(); - - (void)thenRegion; - (void)elseRegion; - mlir::OpBuilder::InsertionGuard guard(rewriter); - [[maybe_unused]] auto loc = ifOp.getLoc(); + auto loc = ifOp.getLoc(); auto *currentBlock = rewriter.getInsertionBlock(); auto *remainingOpsBlock = @@ -113,28 +201,24 @@ class CIRIfLowering : public mlir::OpConversionPattern { llvm_unreachable("NYI"); // Inline then region - [[maybe_unused]] auto *thenBeforeBody = &ifOp.getThenRegion().front(); - [[maybe_unused]] auto *thenAfterBody = &ifOp.getThenRegion().back(); + auto *thenBeforeBody = &ifOp.getThenRegion().front(); + auto *thenAfterBody = &ifOp.getThenRegion().back(); rewriter.inlineRegionBefore(ifOp.getThenRegion(), continueBlock); rewriter.setInsertionPointToEnd(thenAfterBody); if (auto thenYieldOp = dyn_cast(thenAfterBody->getTerminator())) { - [[maybe_unused]] auto thenBranchOp = - rewriter.replaceOpWithNewOp( - thenYieldOp, thenYieldOp.getArgs(), continueBlock); - } else if (auto thenReturnOp = dyn_cast( - thenAfterBody->getTerminator())) { - ; - } else { + rewriter.replaceOpWithNewOp( + thenYieldOp, thenYieldOp.getArgs(), continueBlock); + } else if (!dyn_cast(thenAfterBody->getTerminator())) { llvm_unreachable("what are we terminating with?"); } rewriter.setInsertionPointToEnd(continueBlock); // Inline then region - [[maybe_unused]] auto *elseBeforeBody = &ifOp.getElseRegion().front(); - [[maybe_unused]] auto *elseAfterBody = &ifOp.getElseRegion().back(); + auto *elseBeforeBody = &ifOp.getElseRegion().front(); + auto *elseAfterBody = &ifOp.getElseRegion().back(); rewriter.inlineRegionBefore(ifOp.getElseRegion(), thenAfterBody); rewriter.setInsertionPointToEnd(currentBlock); @@ -146,17 +230,12 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.setInsertionPointToEnd(elseAfterBody); if (auto elseYieldOp = dyn_cast(elseAfterBody->getTerminator())) { - [[maybe_unused]] auto elseBranchOp = - rewriter.replaceOpWithNewOp( - elseYieldOp, elseYieldOp.getArgs(), continueBlock); - } else if (auto elseReturnOp = dyn_cast( - elseAfterBody->getTerminator())) { - ; - } else { + rewriter.replaceOpWithNewOp( + elseYieldOp, elseYieldOp.getArgs(), continueBlock); + } else if (!dyn_cast(elseAfterBody->getTerminator())) { llvm_unreachable("what are we terminating with?"); } - rewriter.setInsertionPoint(elseAfterBody->getTerminator()); rewriter.replaceOp(ifOp, continueBlock->getArguments()); return mlir::success(); @@ -681,11 +760,12 @@ class CIRBrOpLowering : public mlir::OpRewritePattern { void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns.add(converter, patterns.getContext()); + patterns.add(converter, + patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/for.cir b/clang/test/CIR/Lowering/for.cir new file mode 100644 index 000000000000..bc023ed896b7 --- /dev/null +++ b/clang/test/CIR/Lowering/for.cir @@ -0,0 +1,96 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.cst(0 : i32) : i32 + cir.store %1, %0 : i32, cir.ptr + cir.loop for(cond : { + %2 = cir.load %0 : cir.ptr , i32 + %3 = cir.cst(10 : i32) : i32 + %4 = cir.cmp(lt, %2, %3) : i32, i32 + %5 = cir.cast(int_to_bool, %4 : i32), !cir.bool + cir.brcond %5 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + %2 = cir.load %0 : cir.ptr , i32 + %3 = cir.unary(inc, %2) : i32, i32 + cir.store %3, %0 : i32, cir.ptr + cir.yield + }) { + cir.yield + } + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @foo() { +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: llvm.store %2, %1 : i32, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb1 +// MLIR-NEXT: ^bb1: // 2 preds: ^bb0, ^bb5 +// MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %4 = llvm.mlir.constant(10 : i32) : i32 +// MLIR-NEXT: %5 = llvm.icmp "ult" %3, %4 : i32 +// MLIR-NEXT: %6 = llvm.zext %5 : i1 to i32 +// MLIR-NEXT: %7 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: %8 = llvm.icmp "ne" %6, %7 : i32 +// MLIR-NEXT: %9 = llvm.zext %8 : i1 to i8 +// MLIR-NEXT: %10 = llvm.trunc %9 : i8 to i1 +// MLIR-NEXT: llvm.cond_br %10, ^bb2, ^bb3 +// MLIR-NEXT: ^bb2: // pred: ^bb1 +// MLIR-NEXT: llvm.br ^bb4 +// MLIR-NEXT: ^bb3: // pred: ^bb1 +// MLIR-NEXT: llvm.br ^bb6 +// MLIR-NEXT: ^bb4: // pred: ^bb2 +// MLIR-NEXT: %11 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %12 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: %13 = llvm.add %11, %12 : i32 +// MLIR-NEXT: llvm.store %13, %1 : i32, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb5 +// MLIR-NEXT: ^bb5: // pred: ^bb4 +// MLIR-NEXT: llvm.br ^bb1 +// MLIR-NEXT: ^bb6: // pred: ^bb3 +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: define void @foo() { +// LLVM-NEXT: %1 = alloca i32, i64 1, align 4 +// LLVM-NEXT: store i32 0, ptr %1, align 4 +// LLVM-NEXT: br label %2 +// LLVM-EMPTY: +// LLVM-NEXT: 2: +// LLVM-NEXT: %3 = load i32, ptr %1, align 4 +// LLVM-NEXT: %4 = icmp ult i32 %3, 10 +// LLVM-NEXT: %5 = zext i1 %4 to i32 +// LLVM-NEXT: %6 = icmp ne i32 %5, 0 +// LLVM-NEXT: %7 = zext i1 %6 to i8 +// LLVM-NEXT: %8 = trunc i8 %7 to i1 +// LLVM-NEXT: br i1 %8, label %9, label %10 +// LLVM-EMPTY: +// LLVM-NEXT: 9: +// LLVM-NEXT: br label %11 +// LLVM-EMPTY: +// LLVM-NEXT: 10: +// LLVM-NEXT: br label %15 +// LLVM-EMPTY: +// LLVM-NEXT: 11: +// LLVM-NEXT: %12 = load i32, ptr %1, align 4 +// LLVM-NEXT: %13 = add i32 %12, 1 +// LLVM-NEXT: store i32 %13, ptr %1, align 4 +// LLVM-NEXT: br label %14 +// LLVM-EMPTY: +// LLVM-NEXT: 14: +// LLVM-NEXT: br label %2 +// LLVM-EMPTY: +// LLVM-NEXT: 15: +// LLVM-NEXT: ret void +// LLVM-NEXT: } From 07f69a2a2b6ae468be5c3e53960c543aa1516661 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 11 Jan 2023 19:10:19 -0500 Subject: [PATCH 0793/2301] [CIR][NFC] Add clang-format off/on comments for some code --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 58478577c581..b4252f3d3d04 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -797,6 +797,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { mlir::ConversionTarget target(getContext()); using namespace mlir::cir; + // clang-format off target.addLegalOp(); + // clang-format on target.addLegalDialect(); target.addIllegalDialect(); From 2673451992334dec2a49387d5082e7670a4d01f1 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 11 Jan 2023 19:48:07 -0500 Subject: [PATCH 0794/2301] [CIR][Lowering] Support lowering cir.ptrstride This is trivial at the moment and is just a direct mapping to GEP --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 7 ++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 29 ++++++++++++--- clang/test/CIR/Lowering/ptrstride.cir | 36 +++++++++++++++++++ 3 files changed, 67 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/Lowering/ptrstride.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 4a1ccd7c2e69..03253cce7999 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -119,6 +119,13 @@ def PtrStrideOp : CIR_Op<"ptr_stride", `,` type($result) attr-dict }]; + let extraClassDeclaration = [{ + // Get type pointed by the base pointer. + mlir::Type getElementTy() { + return getBase().getType().cast().getPointee(); + } + }]; + // SameFirstOperandAndResultType already checks all we need. let hasVerifier = 0; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b4252f3d3d04..f9e0ad790c3d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -41,6 +41,25 @@ using namespace llvm; namespace cir { namespace direct { +class CIRPtrStrideOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::PtrStrideOp ptrStrideOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto *tc = getTypeConverter(); + const auto resultTy = tc->convertType(ptrStrideOp.getType()); + const auto elementTy = tc->convertType(ptrStrideOp.getElementTy()); + rewriter.replaceOpWithNewOp(ptrStrideOp, resultTy, + elementTy, adaptor.getBase(), + adaptor.getStride()); + + return mlir::success(); + } +}; + class CIRLoopOpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -761,11 +780,11 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); patterns.add(converter, - patterns.getContext()); + CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, + CIRBinOpLowering, CIRLoadLowering, CIRConstantLowering, + CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, + CIRScopeOpLowering, CIRCastOpLowering, CIRIfLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir new file mode 100644 index 000000000000..a151ae645b32 --- /dev/null +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -0,0 +1,36 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @f(%arg0: !cir.ptr) { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, cir.ptr > + %1 = cir.load %0 : cir.ptr >, !cir.ptr + %2 = cir.cst(1 : i32) : i32 + %3 = cir.ptr_stride(%1 : !cir.ptr, %2 : i32), !cir.ptr + %4 = cir.load %3 : cir.ptr , i32 + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @f(%arg0: !llvm.ptr) { +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: llvm.store %arg0, %1 : !llvm.ptr, !llvm.ptr +// MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %3 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: %4 = llvm.getelementptr %2[%3] : (!llvm.ptr, i32) -> !llvm.ptr +// MLIR-NEXT: %5 = llvm.load %4 : !llvm.ptr +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: define void @f(ptr %0) { +// LLVM-NEXT: %2 = alloca ptr, i64 1, align 8 +// LLVM-NEXT: store ptr %0, ptr %2, align 8 +// LLVM-NEXT: %3 = load ptr, ptr %2, align 8 +// LLVM-NEXT: %4 = getelementptr i32, ptr %3, i32 1 +// LLVM-NEXT: %5 = load i32, ptr %4, align 4 +// LLVM-NEXT: ret void +// LLVM-NEXT: } From 7aec2cf11d062abda340e0a725d6fb8e3d4dff92 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 11 Jan 2023 20:23:20 -0500 Subject: [PATCH 0795/2301] [CIR][CIRGen] Fleshout some more members and getters in LValue --- clang/lib/CIR/CodeGen/CIRGenValue.h | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index a1246c44c1da..ca915ccdf7a3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -168,6 +168,10 @@ class LValue { clang::QualType Type; clang::Qualifiers Quals; + // LValue is non-gc'able for any reason, including being a parameter or local + // variable. + bool NonGC : 1; + // This flag shows if a nontemporal load/stores should be used when accessing // this lvalue. bool Nontemporal : 1; @@ -198,6 +202,7 @@ class LValue { // TODO: ObjC flags // Initialize Objective-C flags. + this->NonGC = false; this->Nontemporal = false; } @@ -216,14 +221,25 @@ class LValue { bool isGlobalReg() const { return LVType == GlobalReg; } bool isMatrixElt() const { return LVType == MatrixElt; } + bool isVolatileQualified() const { return Quals.hasVolatile(); } + unsigned getVRQualifiers() const { return Quals.getCVRQualifiers() & ~clang::Qualifiers::Const; } - bool isVolatile() const { return Quals.hasVolatile(); } + bool isNonGC() const { return NonGC; } bool isNontemporal() const { return Nontemporal; } + bool isObjCWeak() const { + return Quals.getObjCGCAttr() == clang::Qualifiers::Weak; + } + bool isObjCStrong() const { + return Quals.getObjCGCAttr() == clang::Qualifiers::Strong; + } + + bool isVolatile() const { return Quals.hasVolatile(); } + clang::QualType getType() const { return Type; } mlir::Value getPointer() const { return V; } From 9a67a277f949b81eee9fba5e046257e89be2590b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 11 Jan 2023 20:24:58 -0500 Subject: [PATCH 0796/2301] [CIR][CIRGen] Support simple storing of variables Surprised we've missed this up to now. Simple implementation that just required fleshing out some stubs. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 8 +++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 57 ++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 ++ clang/test/CIR/CodeGen/store.c | 17 +++++++ 4 files changed, 84 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/store.c diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 76186376f76b..ec88ff92b401 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -244,6 +244,14 @@ void CIRGenFunction::buildVarDecl(const VarDecl &D) { return buildAutoVarDecl(D); } +void CIRGenFunction::buildNullabilityCheck(LValue LHS, mlir::Value RHS, + SourceLocation Loc) { + if (!SanOpts.has(SanitizerKind::NullabilityAssign)) + return; + + llvm_unreachable("NYI"); +} + void CIRGenFunction::buildScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue) { // TODO: this is where a lot of ObjC lifetime stuff would be done. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 4e8cd2f68c20..c918858eb519 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -47,6 +47,9 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Type ConvertType(QualType T) { return CGF.ConvertType(T); } LValue buildLValue(const Expr *E) { return CGF.buildLValue(E); } + LValue buildCheckedLValue(const Expr *E, CIRGenFunction::TypeCheckKind TCK) { + return CGF.buildCheckedLValue(E, TCK); + } /// Emit a value that corresponds to null for the given type. mlir::Value buildNullValue(QualType Ty, mlir::Location loc); @@ -493,9 +496,7 @@ class ScalarExprEmitter : public StmtVisitor { VISITCOMP(NE) #undef VISITCOMP - mlir::Value VisitBinAssign(const BinaryOperator *E) { - llvm_unreachable("NYI"); - } + mlir::Value VisitBinAssign(const BinaryOperator *E); mlir::Value VisitBinLAnd(const BinaryOperator *E) { llvm_unreachable("NYI"); } mlir::Value VisitBinLOr(const BinaryOperator *E) { llvm_unreachable("NYI"); } mlir::Value VisitBinComma(const BinaryOperator *E) { @@ -1380,3 +1381,53 @@ mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { // TODO(cir): Scope.ForceCleanup({&V}); return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr; } + +mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { + bool Ignore = TestAndClearIgnoreResultAssign(); + + mlir::Value RHS; + LValue LHS; + + switch (E->getLHS()->getType().getObjCLifetime()) { + case Qualifiers::OCL_Strong: + llvm_unreachable("NYI"); + case Qualifiers::OCL_Autoreleasing: + llvm_unreachable("NYI"); + case Qualifiers::OCL_ExplicitNone: + llvm_unreachable("NYI"); + case Qualifiers::OCL_Weak: + llvm_unreachable("NYI"); + case Qualifiers::OCL_None: + // __block variables need to have the rhs evaluated first, plus this should + // improve codegen just a little. + RHS = Visit(E->getRHS()); + LHS = buildCheckedLValue(E->getLHS(), CIRGenFunction::TCK_Store); + + // Store the value into the LHS. Bit-fields are handled specially because + // the result is altered by the store, i.e., [C99 6.5.16p1] + // 'An assignment expression has the value of the left operand after the + // assignment...'. + if (LHS.isBitField()) { + llvm_unreachable("NYI"); + } else { + CGF.buildNullabilityCheck(LHS, RHS, E->getExprLoc()); + CGF.currSrcLoc = CGF.getLoc(E->getBeginLoc()); + CGF.buildStoreThroughLValue(RValue::get(RHS), LHS); + } + } + + // If the result is clearly ignored, return now. + if (Ignore) + return nullptr; + + // The result of an assignment in C is the assigned r-value. + if (!CGF.getLangOpts().CPlusPlus) + return RHS; + + // If the lvalue is non-volatile, return the computed value of the assignment. + if (!LHS.isVolatileQualified()) + llvm_unreachable("NYI"); + + // Otherwise, reload the value. + return buildLoadOfLValue(LHS, E->getExprLoc()); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 0ea39509d0f2..b50e0f856965 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -972,6 +972,11 @@ class CIRGenFunction { clang::LabelDecl *L, mlir::Location Loc); + /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is + /// nonnull, if 1\p LHS is marked _Nonnull. + void buildNullabilityCheck(LValue LHS, mlir::Value RHS, + clang::SourceLocation Loc); + void buildScalarInit(const clang::Expr *init, const clang::ValueDecl *D, LValue lvalue); diff --git a/clang/test/CIR/CodeGen/store.c b/clang/test/CIR/CodeGen/store.c new file mode 100644 index 000000000000..816cd6e4d97a --- /dev/null +++ b/clang/test/CIR/CodeGen/store.c @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void foo() { + int a = 0; + a = 1; +} + +// CHECK: cir.func @foo() { +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr +// CHECK-NEXT: %2 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: cir.store %2, %0 : i32, cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + From bf4b5d4ee18ecb3dfaa7f7439907f0010d152a2e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 11 Jan 2023 20:25:25 -0500 Subject: [PATCH 0797/2301] [CIR][CodeGen] Explicitly fail on some ObjC stuff in buildStoreThroughLValue --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 3ac982716170..651cc6b09f39 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -302,7 +302,20 @@ RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { assert(Dst.isSimple() && "only implemented simple"); - // TODO: ObjC lifetime. + + // There's special magic for assigning into an ARC-qualified l-value. + if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { + llvm_unreachable("NYI"); + } + + if (Dst.isObjCWeak() && !Dst.isNonGC()) { + llvm_unreachable("NYI"); + } + + if (Dst.isObjCStrong() && !Dst.isNonGC()) { + llvm_unreachable("NYI"); + } + assert(Src.isScalar() && "Can't emit an agg store with this method"); buildStoreOfScalar(Src.getScalarVal(), Dst); } From 0284d07d800ba32aaff9f0b5012bb69b435634c7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 11 Jan 2023 20:31:01 -0500 Subject: [PATCH 0798/2301] [CIR][Lowering] Add the dot.cir test! --- clang/test/CIR/Lowering/dot.cir | 188 ++++++++++++++++++++++++++++++++ 1 file changed, 188 insertions(+) create mode 100644 clang/test/CIR/Lowering/dot.cir diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir new file mode 100644 index 000000000000..d769cb95da8b --- /dev/null +++ b/clang/test/CIR/Lowering/dot.cir @@ -0,0 +1,188 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @dot(%arg0: !cir.ptr, %arg1: !cir.ptr, %arg2: i32) -> f64 { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr, cir.ptr >, ["b", init] {alignment = 8 : i64} + %2 = cir.alloca i32, cir.ptr , ["size", init] {alignment = 4 : i64} + %3 = cir.alloca f64, cir.ptr , ["__retval"] {alignment = 8 : i64} + %4 = cir.alloca f64, cir.ptr , ["q", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, cir.ptr > + cir.store %arg1, %1 : !cir.ptr, cir.ptr > + cir.store %arg2, %2 : i32, cir.ptr + %5 = cir.cst(0.000000e+00 : f64) : f64 + cir.store %5, %4 : f64, cir.ptr + cir.scope { + %8 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} + %9 = cir.cst(0 : i32) : i32 + cir.store %9, %8 : i32, cir.ptr + cir.loop for(cond : { + %10 = cir.load %8 : cir.ptr , i32 + %11 = cir.load %2 : cir.ptr , i32 + %12 = cir.cmp(lt, %10, %11) : i32, i32 + %13 = cir.cast(int_to_bool, %12 : i32), !cir.bool + cir.brcond %13 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + %10 = cir.load %8 : cir.ptr , i32 + %11 = cir.unary(inc, %10) : i32, i32 + cir.store %11, %8 : i32, cir.ptr + cir.yield + }) { + %10 = cir.load %0 : cir.ptr >, !cir.ptr + %11 = cir.load %8 : cir.ptr , i32 + %12 = cir.ptr_stride(%10 : !cir.ptr, %11 : i32), !cir.ptr + %13 = cir.load %12 : cir.ptr , f64 + %14 = cir.load %1 : cir.ptr >, !cir.ptr + %15 = cir.load %8 : cir.ptr , i32 + %16 = cir.ptr_stride(%14 : !cir.ptr, %15 : i32), !cir.ptr + %17 = cir.load %16 : cir.ptr , f64 + %18 = cir.binop(mul, %13, %17) : f64 + %19 = cir.load %4 : cir.ptr , f64 + %20 = cir.binop(add, %19, %18) : f64 + cir.store %20, %4 : f64, cir.ptr + cir.yield + } + } + %6 = cir.load %4 : cir.ptr , f64 + cir.store %6, %3 : f64, cir.ptr + %7 = cir.load %3 : cir.ptr , f64 + cir.return %7 : f64 + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @dot(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: i32) -> f64 { +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %2 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %3 = llvm.alloca %2 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %4 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %5 = llvm.alloca %4 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %6 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %7 = llvm.alloca %6 x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %8 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %9 = llvm.alloca %8 x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: llvm.store %arg0, %1 : !llvm.ptr +// MLIR-NEXT: llvm.store %arg1, %3 : !llvm.ptr +// MLIR-NEXT: llvm.store %arg2, %5 : i32, !llvm.ptr +// MLIR-NEXT: %10 = llvm.mlir.constant(0.000000e+00 : f64) : f64 +// MLIR-NEXT: llvm.store %10, %9 : f64, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb1 +// MLIR-NEXT: ^bb1: // pred: ^bb0 +// MLIR-NEXT: %11 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %12 = llvm.alloca %11 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %13 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: llvm.store %13, %12 : i32, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb2 +// MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb6 +// MLIR-NEXT: %14 = llvm.load %12 : !llvm.ptr +// MLIR-NEXT: %15 = llvm.load %5 : !llvm.ptr +// MLIR-NEXT: %16 = llvm.icmp "ult" %14, %15 : i32 +// MLIR-NEXT: %17 = llvm.zext %16 : i1 to i32 +// MLIR-NEXT: %18 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: %19 = llvm.icmp "ne" %17, %18 : i32 +// MLIR-NEXT: %20 = llvm.zext %19 : i1 to i8 +// MLIR-NEXT: %21 = llvm.trunc %20 : i8 to i1 +// MLIR-NEXT: llvm.cond_br %21, ^bb3, ^bb4 +// MLIR-NEXT: ^bb3: // pred: ^bb2 +// MLIR-NEXT: llvm.br ^bb5 +// MLIR-NEXT: ^bb4: // pred: ^bb2 +// MLIR-NEXT: llvm.br ^bb7 +// MLIR-NEXT: ^bb5: // pred: ^bb3 +// MLIR-NEXT: %22 = llvm.load %12 : !llvm.ptr +// MLIR-NEXT: %23 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: %24 = llvm.add %22, %23 : i32 +// MLIR-NEXT: llvm.store %24, %12 : i32, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb6 +// MLIR-NEXT: ^bb6: // pred: ^bb5 +// MLIR-NEXT: %25 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %26 = llvm.load %12 : !llvm.ptr +// MLIR-NEXT: %27 = llvm.getelementptr %25[%26] : (!llvm.ptr, i32) -> !llvm.ptr +// MLIR-NEXT: %28 = llvm.load %27 : !llvm.ptr +// MLIR-NEXT: %29 = llvm.load %3 : !llvm.ptr +// MLIR-NEXT: %30 = llvm.load %12 : !llvm.ptr +// MLIR-NEXT: %31 = llvm.getelementptr %29[%30] : (!llvm.ptr, i32) -> !llvm.ptr +// MLIR-NEXT: %32 = llvm.load %31 : !llvm.ptr +// MLIR-NEXT: %33 = llvm.fmul %28, %32 : f64 +// MLIR-NEXT: %34 = llvm.load %9 : !llvm.ptr +// MLIR-NEXT: %35 = llvm.fadd %34, %33 : f64 +// MLIR-NEXT: llvm.store %35, %9 : f64, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb2 +// MLIR-NEXT: ^bb7: // pred: ^bb4 +// MLIR-NEXT: llvm.br ^bb8 +// MLIR-NEXT: ^bb8: // pred: ^bb7 +// MLIR-NEXT: %36 = llvm.load %9 : !llvm.ptr +// MLIR-NEXT: llvm.store %36, %7 : f64, !llvm.ptr +// MLIR-NEXT: %37 = llvm.load %7 : !llvm.ptr +// MLIR-NEXT: llvm.return %37 : f64 +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: define double @dot(ptr %0, ptr %1, i32 %2) { +// LLVM-NEXT: %4 = alloca ptr, i64 1, align 8 +// LLVM-NEXT: %5 = alloca ptr, i64 1, align 8 +// LLVM-NEXT: %6 = alloca i32, i64 1, align 4 +// LLVM-NEXT: %7 = alloca double, i64 1, align 8 +// LLVM-NEXT: %8 = alloca double, i64 1, align 8 +// LLVM-NEXT: store ptr %0, ptr %4, align 8 +// LLVM-NEXT: store ptr %1, ptr %5, align 8 +// LLVM-NEXT: store i32 %2, ptr %6, align 4 +// LLVM-NEXT: store double 0.000000e+00, ptr %8, align 8 +// LLVM-NEXT: br label %9 +// LLVM-EMPTY: +// LLVM-NEXT: 9: ; preds = %3 +// LLVM-NEXT: %10 = alloca i32, i64 1, align 4 +// LLVM-NEXT: store i32 0, ptr %10, align 4 +// LLVM-NEXT: br label %11 +// LLVM-EMPTY: +// LLVM-NEXT: 11: ; preds = %24, %9 +// LLVM-NEXT: %12 = load i32, ptr %10, align 4 +// LLVM-NEXT: %13 = load i32, ptr %6, align 4 +// LLVM-NEXT: %14 = icmp ult i32 %12, %13 +// LLVM-NEXT: %15 = zext i1 %14 to i32 +// LLVM-NEXT: %16 = icmp ne i32 %15, 0 +// LLVM-NEXT: %17 = zext i1 %16 to i8 +// LLVM-NEXT: %18 = trunc i8 %17 to i1 +// LLVM-NEXT: br i1 %18, label %19, label %20 +// LLVM-EMPTY: +// LLVM-NEXT: 19: ; preds = %11 +// LLVM-NEXT: br label %21 +// LLVM-EMPTY: +// LLVM-NEXT: 20: ; preds = %11 +// LLVM-NEXT: br label %36 +// LLVM-EMPTY: +// LLVM-NEXT: 21: ; preds = %19 +// LLVM-NEXT: %22 = load i32, ptr %10, align 4 +// LLVM-NEXT: %23 = add i32 %22, 1 +// LLVM-NEXT: store i32 %23, ptr %10, align 4 +// LLVM-NEXT: br label %24 +// LLVM-EMPTY: +// LLVM-NEXT: 24: ; preds = %21 +// LLVM-NEXT: %25 = load ptr, ptr %4, align 8 +// LLVM-NEXT: %26 = load i32, ptr %10, align 4 +// LLVM-NEXT: %27 = getelementptr double, ptr %25, i32 %26 +// LLVM-NEXT: %28 = load double, ptr %27, align 8 +// LLVM-NEXT: %29 = load ptr, ptr %5, align 8 +// LLVM-NEXT: %30 = load i32, ptr %10, align 4 +// LLVM-NEXT: %31 = getelementptr double, ptr %29, i32 %30 +// LLVM-NEXT: %32 = load double, ptr %31, align 8 +// LLVM-NEXT: %33 = fmul double %28, %32 +// LLVM-NEXT: %34 = load double, ptr %8, align 8 +// LLVM-NEXT: %35 = fadd double %34, %33 +// LLVM-NEXT: store double %35, ptr %8, align 8 +// LLVM-NEXT: br label %11 +// LLVM-EMPTY: +// LLVM-NEXT: 36: ; preds = %20 +// LLVM-NEXT: br label %37 +// LLVM-EMPTY: +// LLVM-NEXT: 37: ; preds = %36 +// LLVM-NEXT: %38 = load double, ptr %8, align 8 +// LLVM-NEXT: store double %38, ptr %7, align 8 +// LLVM-NEXT: %39 = load double, ptr %7, align 8 +// LLVM-NEXT: ret double %39 +// LLVM-NEXT: } From c1da1d9aa61513d6fbe420fef475a7c8d823115c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 11 Jan 2023 20:44:52 -0500 Subject: [PATCH 0799/2301] [CIR][Lowering] Remove the cir.sob attribute during lowering We don't actually handle this yet. But it's not valid post-CIR. So always remove it. There's probably a better mechanism for removing attrs but I didn't see anything. So just directly do it here. --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f9e0ad790c3d..50a9cbbdec81 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -836,6 +836,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { target.addLegalDialect(); target.addIllegalDialect(); + getOperation()->removeAttr("cir.sob"); + if (failed(applyPartialConversion(module, target, std::move(patterns)))) signalPassFailure(); } From 312bc3660e836571382edd570631507adf25f8e7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 24 Jan 2023 15:29:59 -0300 Subject: [PATCH 0800/2301] [CIR][CIRGen] Handle ExprWithCleanups in face of lvalues --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 42 ++++++++++++++++------ clang/test/CIR/CodeGen/assign-operator.cpp | 16 +++++---- 2 files changed, 40 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 651cc6b09f39..b63a0022c9a2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1493,17 +1493,37 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return buildCallExprLValue(cast(E)); case Expr::ExprWithCleanupsClass: { const auto *cleanups = cast(E); - // RunCleanupsScope Scope(*this); - LValue LV = buildLValue(cleanups->getSubExpr()); - if (LV.isSimple()) { - // Defend against branches out of gnu statement expressions surrounded by - // cleanups. - Address Addr = LV.getAddress(); - auto V = Addr.getPointer(); - // Scope.ForceCleanup({&V}); - return LValue::makeAddr(Addr.withPointer(V), LV.getType(), getContext(), - LV.getBaseInfo() /*TODO(cir):TBAA*/); - } + LValue LV; + + auto scopeLoc = getLoc(E->getSourceRange()); + [[maybe_unused]] auto scope = builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + SmallVector locs; + if (loc.isa()) { + locs.push_back(loc); + locs.push_back(loc); + } else if (loc.isa()) { + auto fusedLoc = loc.cast(); + locs.push_back(fusedLoc.getLocations()[0]); + locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{ + locs[0], locs[1], builder.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexScopeGuard{*this, &lexScope}; + + LV = buildLValue(cleanups->getSubExpr()); + if (LV.isSimple()) { + // Defend against branches out of gnu statement expressions + // surrounded by cleanups. + Address Addr = LV.getAddress(); + auto V = Addr.getPointer(); + LV = LValue::makeAddr(Addr.withPointer(V), LV.getType(), + getContext(), + LV.getBaseInfo() /*TODO(cir):TBAA*/); + } + }); + // FIXME: Is it possible to create an ExprWithCleanups that produces a // bitfield lvalue or some other non-simple lvalue? return LV; diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 702ec45eb795..97864241db34 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -72,13 +72,15 @@ int main() { // CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () // CHECK: cir.scope { // CHECK: %3 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s", init] {alignment = 8 : i64} -// CHECK: %4 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} -// CHECK: %5 = cir.get_global @".str" : cir.ptr > -// CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @_ZN6StringC2EPKc(%3, %6) : (!cir.ptr, !cir.ptr) -> () -// CHECK: cir.call @_ZN10StringViewC2ERK6String(%4, %3) : (!cir.ptr, !cir.ptr) -> () -// CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %4) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: %4 = cir.get_global @".str" : cir.ptr > +// CHECK: %5 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_ZN6StringC2EPKc(%3, %5) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.scope { +// CHECK: %6 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: cir.call @_ZN10StringViewC2ERK6String(%6, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %6) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: } // CHECK: } // CHECK: %2 = cir.load %0 : cir.ptr , i32 // CHECK: cir.return %2 : i32 -// CHECK: } +// CHECK: } From 71ed001dbd820c162cbc20bbe1a023263f9cf9af Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 24 Jan 2023 16:03:58 -0300 Subject: [PATCH 0801/2301] [CIR][CIRGen] Handle ExprWithCleanups in face of AggExprEmitter This finally unblocks codegen support for the lifetime checker to use scopes to track simple lifetime issues with coroutines (to be written). While here also add a comment that no extra work is necessary for constant emitter handling ExprWithCleanups. --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 21 ++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprCst.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 ++ clang/test/CIR/CodeGen/coro-task.cpp | 17 ++++++++++++++--- 4 files changed, 37 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index bb12afdc0b6d..095739af3181 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -176,7 +176,26 @@ void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { if (UnimplementedFeature::cleanups()) llvm_unreachable("NYI"); - Visit(E->getSubExpr()); + + auto &builder = CGF.getBuilder(); + auto scopeLoc = CGF.getLoc(E->getSourceRange()); + [[maybe_unused]] auto scope = builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + SmallVector locs; + if (loc.isa()) { + locs.push_back(loc); + locs.push_back(loc); + } else if (loc.isa()) { + auto fusedLoc = loc.cast(); + locs.push_back(fusedLoc.getLocations()[0]); + locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{ + locs[0], locs[1], builder.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexScopeGuard{CGF, &lexScope}; + Visit(E->getSubExpr()); + }); } void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp index 19c0fb9f673e..8e71e4eef10f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp @@ -745,6 +745,7 @@ class ConstExprEmitter } mlir::Attribute VisitExprWithCleanups(ExprWithCleanups *E, QualType T) { + // Since this about constant emission no need to wrap this under a scope. return Visit(E->getSubExpr(), T); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index b50e0f856965..a56f34584cad 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -41,6 +41,7 @@ class CallOp; namespace { class ScalarExprEmitter; +class AggExprEmitter; } namespace cir { @@ -56,6 +57,7 @@ class CIRGenFunction { private: friend class ::ScalarExprEmitter; + friend class ::AggExprEmitter; /// The builder is a helper class to create IR inside a function. The /// builder is stateful, in particular it keeps an "insertion point": this diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index a86dc03ae466..0d7ce3eac0d6 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -127,6 +127,7 @@ co_invoke_fn co_invoke; }} // namespace folly::coro // CHECK: ![[VoidTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task", i8> +// CHECK: ![[IntTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task", i8> // CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct<"struct.folly::coro::Task::promise_type", i8> // CHECK: ![[CoroHandleVoid:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", i8> // CHECK: ![[CoroHandlePromise:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", i8> @@ -299,20 +300,30 @@ folly::coro::Task silly_coro() { // CHECK-NOT: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv // CHECK: cir.await(final, ready : { -folly::coro::Task go(int const& val) { - co_return val; -} +folly::coro::Task go(int const& val); folly::coro::Task go1() { auto task = go(1); co_return co_await task; } // CHECK: cir.func coroutine @_Z3go1v() +// CHECK: %[[#IntTaskAddr:]] = cir.alloca ![[IntTask]], cir.ptr , ["task", init] + // CHECK: cir.await(init, ready : { // CHECK: }, suspend : { // CHECK: }, resume : { // CHECK: },) // CHECK: } + +// The call to go(1) has its own scope due to full-expression rules. +// CHECK: cir.scope { +// CHECK: %[[#OneAddr:]] = cir.alloca i32, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} +// CHECK: %[[#One:]] = cir.cst(1 : i32) : i32 +// CHECK: cir.store %[[#One]], %[[#OneAddr]] : i32, cir.ptr +// CHECK: %[[#IntTaskTmp:]] = cir.call @_Z2goRKi(%[[#OneAddr]]) : (!cir.ptr) -> ![[IntTask]] +// CHECK: cir.store %[[#IntTaskTmp]], %[[#IntTaskAddr]] : ![[IntTask]], cir.ptr +// CHECK: } + // CHECK: %[[#CoReturnValAddr:]] = cir.alloca i32, cir.ptr , ["__coawait_resume_rval"] {alignment = 1 : i64} // CHECK: cir.await(user, ready : { // CHECK: }, suspend : { From 162c592bae405bd6722cd392d3bdfdfdaaa3db90 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 26 Jan 2023 17:22:49 -0800 Subject: [PATCH 0802/2301] [CIR][LifetimeChecker] Start tracking necessary idioms for coroutines based diagnostics - Add tracking for local coroutine tasks - Change localValues to be based on a set instead of SmallVector - Track initialization of coroutine through cir.store of the tmp task. - Augment pset(task) with local alloca's used in the coro construction, so these can invalidate the set when go out of scope. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 80 ++++++++++++++++--- 1 file changed, 70 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index ef8e8b078509..c10a44f41f01 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -280,7 +280,14 @@ struct LifetimeCheckPass : public LifetimeCheckBase { ~LexicalScopeContext() = default; // Track all local values added in this scope - llvm::SmallVector localValues; + SmallPtrSet localValues; + + // Track the result of temporaries with coroutine call results, + // they are used to initialize a task. + // + // Value must come directly out of a cir.call to a cir.func which + // is a coroutine. + SmallPtrSet localTempTasks; LLVM_DUMP_METHOD void dumpLocalValues(); }; @@ -792,12 +799,12 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { // 2.4.2 - When a local Owner x is declared, add (x, {x__1'}) to pmap. addOwner(addr); getPmap()[addr].insert(State::getOwnedBy(addr)); - currScope->localValues.push_back(addr); + currScope->localValues.insert(addr); break; case TypeCategory::Value: { // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. getPmap()[addr].insert(State::getLocalValue(addr)); - currScope->localValues.push_back(addr); + currScope->localValues.insert(addr); return; } default: @@ -808,10 +815,48 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { void LifetimeCheckPass::checkStore(StoreOp storeOp) { auto addr = storeOp.getAddr(); - // We only care about stores that change local pointers, local values - // are not interesting here (just yet). - if (!ptrs.count(addr)) + // The bulk of the check is done on top of store to pointer categories, + // which usually represent the most common case. + // + // We handle some special local values, like coroutine tasks, which could + // be holding references to things with dangling lifetime. + if (!ptrs.count(addr)) { + if (currScope->localTempTasks.count(storeOp.getValue())) { + // Given: + // auto task = [init task]; + // Extend pset(task) such that: + // pset(task) = pset(task) U {any local values used to init task} + auto taskTmp = storeOp.getValue(); + // FIXME: check it's initialization 'init' attr. + auto taskAddr = storeOp.getAddr(); + + // Take the following coroutine creation pattern: + // + // %task = cir.alloca ... + // cir.scope { + // %arg0 = cir.alloca ... + // ... + // %tmp_task = cir.call @corotine_call(%arg0, %arg1, ...) + // cir.store %tmp_task, %task + // ... + // } + // + // Bind values that are coming from alloca's (like %arg0 above) to the + // pset of %task - this effectively leads to some invalidation of %task + // when %arg0 finishes its lifetime at the end of the enclosing cir.scope. + if (auto call = dyn_cast(taskTmp.getDefiningOp())) { + for (auto arg : call.getOperands()) { + auto alloca = dyn_cast(arg.getDefiningOp()); + if (alloca && currScope->localValues.count(alloca)) + getPmap()[taskAddr].insert(State::getLocalValue(alloca)); + } + return; + } + llvm_unreachable("expecting calls"); + } + // Only handle ptrs from here on. return; + } auto getArrayFromSubscript = [&](PtrStrideOp strideOp) -> mlir::Value { auto castOp = dyn_cast(strideOp.getBase().getDefiningOp()); @@ -952,10 +997,14 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, emitPsetRemark(); } -const clang::CXXMethodDecl *getMethod(ModuleOp mod, StringRef name) { - auto *global = mlir::SymbolTable::lookupSymbolIn(mod, name); - assert(global && "expected to find symbol"); - auto method = dyn_cast(global); +static FuncOp getCalleeFromSymbol(ModuleOp mod, StringRef name) { + auto global = mlir::SymbolTable::lookupSymbolIn(mod, name); + assert(global && "expected to find symbol for function"); + return dyn_cast(global); +} + +static const clang::CXXMethodDecl *getMethod(ModuleOp mod, StringRef name) { + auto method = getCalleeFromSymbol(mod, name); if (!method || method.getBuiltin()) return nullptr; return dyn_cast(method.getAstAttr().getAstDecl()); @@ -1182,6 +1231,17 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { if (callOp.getNumOperands() == 0) return; + // Identify calls to coroutines, and start tracking which local resources + // might scape into one. + // + // Calls to coroutines return the coroutine task, keep track of it. + auto callee = getCalleeFromSymbol(theModule, callOp.getCallee()); + if (callee && callee.getCoroutine()) { + assert(callOp->getNumResults() > 0 && + "expected coroutine initialization or resume"); + currScope->localTempTasks.insert(callOp->getResult(0)); + } + const auto *methodDecl = getMethod(theModule, callOp.getCallee()); if (!isOwnerOrPointerClassMethod(callOp.getOperand(0), methodDecl)) return checkOtherMethodsAndFunctions(callOp, methodDecl); From cc27ae08d5afc8ea1780aa355147ffe03039a6f3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 26 Jan 2023 19:02:39 -0800 Subject: [PATCH 0803/2301] [CIR][LifetimeChecker] Add duck typing based on promise_type to recognize coroutine tasks --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 52 ++++++++++++++++--- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index c10a44f41f01..ca86bf5964b9 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -11,6 +11,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclTemplate.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" @@ -263,6 +264,16 @@ struct LifetimeCheckPass : public LifetimeCheckBase { LLVM_DUMP_METHOD void dumpPmap(PMapType &pmap); LLVM_DUMP_METHOD void dumpCurrentPmap(); + /// + /// Coroutine tasks (promise_type) + /// ---------------------------------------------- + + // Track types we already know to be a coroutine task (promise_type) + llvm::DenseMap IsTaskTyCache; + // Is the type associated with taskVal a coroutine task? Uses IsTaskTyCache + // or compute it from associated AST node. + bool isTaskType(mlir::Value taskVal); + /// /// Scope, context and guards /// ------------------------- @@ -1227,18 +1238,45 @@ bool LifetimeCheckPass::isOwnerOrPointerClassMethod( return false; } +bool LifetimeCheckPass::isTaskType(mlir::Value taskVal) { + auto ty = taskVal.getType(); + if (IsTaskTyCache.count(ty)) + return IsTaskTyCache[ty]; + + IsTaskTyCache[ty] = false; + auto taskTy = taskVal.getType().dyn_cast(); + if (!taskTy) + return false; + auto recordDecl = taskTy.getAst()->getAstDecl(); + auto *spec = dyn_cast(recordDecl); + if (!spec) + return false; + + for (auto *sub : spec->decls()) { + auto *subRec = dyn_cast(sub); + if (subRec && subRec->getDeclName().isIdentifier() && + subRec->getName() == "promise_type") { + IsTaskTyCache[ty] = true; + break; + } + } + + return IsTaskTyCache[ty]; +} + void LifetimeCheckPass::checkCall(CallOp callOp) { if (callOp.getNumOperands() == 0) return; - // Identify calls to coroutines, and start tracking which local resources - // might scape into one. + // Identify calls to coroutines and track returning temporary task types. // - // Calls to coroutines return the coroutine task, keep track of it. - auto callee = getCalleeFromSymbol(theModule, callOp.getCallee()); - if (callee && callee.getCoroutine()) { - assert(callOp->getNumResults() > 0 && - "expected coroutine initialization or resume"); + // Note that we can't reliably know if a function is a coroutine only as + // part of declaration + auto calleeFuncOp = getCalleeFromSymbol(theModule, callOp.getCallee()); + if (calleeFuncOp && + (calleeFuncOp.getCoroutine() || + (calleeFuncOp.isDeclaration() && callOp->getNumResults() > 0 && + isTaskType(callOp->getResult(0))))) { currScope->localTempTasks.insert(callOp->getResult(0)); } From 5a743546dccb2b0d4a6b4823ef1bba8c3e9bd888 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 27 Jan 2023 09:04:30 -0800 Subject: [PATCH 0804/2301] [CIR][LifetimeChecker] Handle cir.await regions and pmaps --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index ca86bf5964b9..acefe38478e4 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -40,6 +40,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkStore(StoreOp op); void checkLoad(LoadOp op); void checkCall(CallOp callOp); + void checkAwait(AwaitOp awaitOp); void checkPointerDeref(mlir::Value addr, mlir::Location loc); @@ -607,6 +608,24 @@ void LifetimeCheckPass::checkLoop(LoopOp loopOp) { joinPmaps(pmapOps); } +void LifetimeCheckPass::checkAwait(AwaitOp awaitOp) { + // Pretty conservative: assume all regions execute + // sequencially. + // + // FIXME: use branch interface here and only tackle + // the necessary regions. + SmallVector pmapOps; + + for (auto r : awaitOp.getRegions()) { + PMapType regionPmap = getPmap(); + PmapGuard pmapGuard{*this, ®ionPmap}; + checkRegion(*r); + pmapOps.push_back(regionPmap); + } + + joinPmaps(pmapOps); +} + void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { // 2.4.7. A switch(cond) is treated as if it were an equivalent series of // non-nested if statements with single evaluation of cond; for example: @@ -1334,6 +1353,7 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return; } + // FIXME: we can do better than sequence of dyn_casts. if (isa(op)) return checkFunc(op); if (auto ifOp = dyn_cast(op)) @@ -1350,6 +1370,8 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return checkLoad(loadOp); if (auto callOp = dyn_cast(op)) return checkCall(callOp); + if (auto awaitOp = dyn_cast(op)) + return checkAwait(awaitOp); } void LifetimeCheckPass::runOnOperation() { From 858d31b2f320905f98d4cdb7e433a8aba080e15e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 27 Jan 2023 09:12:15 -0800 Subject: [PATCH 0805/2301] [CIR][Lifetime] Move task tracking logic to checkCoroTaskStore --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 73 ++++++++++--------- 1 file changed, 39 insertions(+), 34 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index acefe38478e4..2ab8f0f56631 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -43,6 +43,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkAwait(AwaitOp awaitOp); void checkPointerDeref(mlir::Value addr, mlir::Location loc); + void checkCoroTaskStore(StoreOp storeOp); void checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor); void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); @@ -842,6 +843,40 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { } } +void LifetimeCheckPass::checkCoroTaskStore(StoreOp storeOp) { + // Given: + // auto task = [init task]; + // Extend pset(task) such that: + // pset(task) = pset(task) U {any local values used to init task} + auto taskTmp = storeOp.getValue(); + // FIXME: check it's initialization 'init' attr. + auto taskAddr = storeOp.getAddr(); + + // Take the following coroutine creation pattern: + // + // %task = cir.alloca ... + // cir.scope { + // %arg0 = cir.alloca ... + // ... + // %tmp_task = cir.call @corotine_call(%arg0, %arg1, ...) + // cir.store %tmp_task, %task + // ... + // } + // + // Bind values that are coming from alloca's (like %arg0 above) to the + // pset of %task - this effectively leads to some invalidation of %task + // when %arg0 finishes its lifetime at the end of the enclosing cir.scope. + if (auto call = dyn_cast(taskTmp.getDefiningOp())) { + for (auto arg : call.getOperands()) { + auto alloca = dyn_cast(arg.getDefiningOp()); + if (alloca && currScope->localValues.count(alloca)) + getPmap()[taskAddr].insert(State::getLocalValue(alloca)); + } + return; + } + llvm_unreachable("expecting cir.call defining op"); +} + void LifetimeCheckPass::checkStore(StoreOp storeOp) { auto addr = storeOp.getAddr(); @@ -851,43 +886,13 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { // We handle some special local values, like coroutine tasks, which could // be holding references to things with dangling lifetime. if (!ptrs.count(addr)) { - if (currScope->localTempTasks.count(storeOp.getValue())) { - // Given: - // auto task = [init task]; - // Extend pset(task) such that: - // pset(task) = pset(task) U {any local values used to init task} - auto taskTmp = storeOp.getValue(); - // FIXME: check it's initialization 'init' attr. - auto taskAddr = storeOp.getAddr(); - - // Take the following coroutine creation pattern: - // - // %task = cir.alloca ... - // cir.scope { - // %arg0 = cir.alloca ... - // ... - // %tmp_task = cir.call @corotine_call(%arg0, %arg1, ...) - // cir.store %tmp_task, %task - // ... - // } - // - // Bind values that are coming from alloca's (like %arg0 above) to the - // pset of %task - this effectively leads to some invalidation of %task - // when %arg0 finishes its lifetime at the end of the enclosing cir.scope. - if (auto call = dyn_cast(taskTmp.getDefiningOp())) { - for (auto arg : call.getOperands()) { - auto alloca = dyn_cast(arg.getDefiningOp()); - if (alloca && currScope->localValues.count(alloca)) - getPmap()[taskAddr].insert(State::getLocalValue(alloca)); - } - return; - } - llvm_unreachable("expecting calls"); - } - // Only handle ptrs from here on. + if (currScope->localTempTasks.count(storeOp.getValue())) + checkCoroTaskStore(storeOp); return; } + // Only handle ptrs from here on. + auto getArrayFromSubscript = [&](PtrStrideOp strideOp) -> mlir::Value { auto castOp = dyn_cast(strideOp.getBase().getDefiningOp()); if (!castOp) From b648ef5cd1f2e0cce255ed3a961911af433475d6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 27 Jan 2023 10:01:11 -0800 Subject: [PATCH 0806/2301] [CIR][LifetimeCheck] Track potential tainted tasks and check for deref --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 24 +++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 2ab8f0f56631..70301b70b7c6 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -275,6 +275,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Is the type associated with taskVal a coroutine task? Uses IsTaskTyCache // or compute it from associated AST node. bool isTaskType(mlir::Value taskVal); + // Addresses of coroutine Tasks found in the current function. + SmallPtrSet tasks; /// /// Scope, context and guards @@ -440,6 +442,7 @@ void LifetimeCheckPass::kill(const State &s, InvalidStyle invalidStyle, if (invalidStyle == InvalidStyle::EndOfScope) { owners.erase(v); ptrs.erase(v); + tasks.erase(v); getPmap().erase(v); } } @@ -867,11 +870,19 @@ void LifetimeCheckPass::checkCoroTaskStore(StoreOp storeOp) { // pset of %task - this effectively leads to some invalidation of %task // when %arg0 finishes its lifetime at the end of the enclosing cir.scope. if (auto call = dyn_cast(taskTmp.getDefiningOp())) { + bool potentialTaintedTask = false; for (auto arg : call.getOperands()) { auto alloca = dyn_cast(arg.getDefiningOp()); - if (alloca && currScope->localValues.count(alloca)) + if (alloca && currScope->localValues.count(alloca)) { getPmap()[taskAddr].insert(State::getLocalValue(alloca)); + potentialTaintedTask = true; + } } + + // Task are only interesting when there are local addresses leaking + // via the coroutine creation, only track those. + if (potentialTaintedTask) + tasks.insert(taskAddr); return; } llvm_unreachable("expecting cir.call defining op"); @@ -1228,12 +1239,16 @@ void LifetimeCheckPass::checkForOwnerAndPointerArguments(CallOp callOp, // // - Owners: always invalidate. // - Pointers: always check for deref. + // - Coroutine tasks: check the task for deref when calling methods of + // the task, but also when the passing the task around to other functions. // // FIXME: even before 2.5 we should only invalidate non-const param types. if (owners.count(arg)) ownersToInvalidate.insert(arg); if (ptrs.count(arg)) ptrsToDeref.insert(arg); + if (tasks.count(arg)) + ptrsToDeref.insert(arg); } // FIXME: CIR should track source info on the passed args, so we can get @@ -1247,7 +1262,12 @@ void LifetimeCheckPass::checkForOwnerAndPointerArguments(CallOp callOp, void LifetimeCheckPass::checkOtherMethodsAndFunctions( CallOp callOp, const clang::CXXMethodDecl *m) { unsigned firstArgIdx = 0; - if (m) // Skip 'this' pointer + + // Looks at a method 'this' pointer: + // - If a method call to a class we consider interesting, like a method + // call on a coroutine task (promise_type). + // - Skip the 'this' for any other method. + if (m && !tasks.count(callOp.getOperand(firstArgIdx))) firstArgIdx++; checkForOwnerAndPointerArguments(callOp, firstArgIdx); } From 6f059eedcd076caa7868e063fd0d66db96c84f47 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 27 Jan 2023 11:49:12 -0800 Subject: [PATCH 0807/2301] [CIR][LifetimeChecker] Detect out of scope use of locals for coroutine creation - Cache diagnostics while looking at the same source expression. - Write more coroutine specific check messages. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 46 ++++++++++++++++--- clang/test/CIR/Transforms/Inputs/folly-coro.h | 44 ++++++++++++++++++ clang/test/CIR/Transforms/Inputs/std.h | 29 ++++++++++++ .../Transforms/lifetime-check-coro-task.cpp | 11 +++++ 4 files changed, 123 insertions(+), 7 deletions(-) create mode 100644 clang/test/CIR/Transforms/Inputs/folly-coro.h create mode 100644 clang/test/CIR/Transforms/Inputs/std.h create mode 100644 clang/test/CIR/Transforms/lifetime-check-coro-task.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 70301b70b7c6..e885eaf9c26d 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -18,10 +18,20 @@ #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SmallSet.h" +#include + using namespace mlir; using namespace cir; namespace { + +struct LocOrdering { + bool operator()(mlir::Location L1, mlir::Location L2) const { + return std::less()(L1.getAsOpaquePointer(), + L2.getAsOpaquePointer()); + } +}; + struct LifetimeCheckPass : public LifetimeCheckBase { LifetimeCheckPass() = default; void runOnOperation() override; @@ -65,7 +75,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { const clang::CXXMethodDecl *m); // Diagnostic helpers. - void emitInvalidHistory(mlir::InFlightDiagnostic &D, mlir::Value histKey); + void emitInvalidHistory(mlir::InFlightDiagnostic &D, mlir::Value histKey, + mlir::Location warningLoc); /// /// Pass options handling @@ -277,6 +288,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { bool isTaskType(mlir::Value taskVal); // Addresses of coroutine Tasks found in the current function. SmallPtrSet tasks; + // Since coawait encapsulates several calls to a promise, do not emit + // the same warning multiple times, e.g. under the same coawait. + llvm::SmallSet emittedDanglingTasks; /// /// Scope, context and guards @@ -972,7 +986,8 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { } void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, - mlir::Value histKey) { + mlir::Value histKey, + mlir::Location warningLoc) { assert(invalidHist.count(histKey) && "expected invalid hist"); auto &hist = invalidHist[histKey]; unsigned limit = opts.histLimit; @@ -987,9 +1002,16 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, break; } case InvalidStyle::EndOfScope: { - StringRef outOfScopeVarName = getVarNameFromValue(*info.val); - D.attachNote(info.loc) << "pointee '" << outOfScopeVarName - << "' invalidated at end of scope"; + if (!tasks.count(histKey)) { + StringRef outOfScopeVarName = getVarNameFromValue(*info.val); + D.attachNote(info.loc) << "pointee '" << outOfScopeVarName + << "' invalidated at end of scope"; + } else { + D.attachNote((*info.val).getLoc()) << "coroutine bound to resource " + << "with expired lifetime"; + D.attachNote(info.loc) << "at the end of scope or full-expression"; + emittedDanglingTasks.insert(warningLoc); + } break; } case InvalidStyle::NonConstUseOfOwner: { @@ -1014,6 +1036,12 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, emitRemark(loc) << "pset => " << Out.str(); }; + // Do not emit more than one diagonistic for the same task deref location. + // Since cowait hides a bunch of logic and calls to the promise type, just + // have one per suspend expr. + if (tasks.count(addr) && emittedDanglingTasks.count(loc)) + return; + bool psetRemarkEmitted = false; if (opts.emitRemarkPsetAlways()) { emitPsetRemark(); @@ -1028,10 +1056,14 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, // diagnose it. StringRef varName = getVarNameFromValue(addr); auto D = emitWarning(loc); - D << "use of invalid pointer '" << varName << "'"; + + if (tasks.count(addr)) { + D << "use of coroutine '" << varName << "' with dangling reference"; + } else + D << "use of invalid pointer '" << varName << "'"; if (hasInvalid && opts.emitHistoryInvalid()) - emitInvalidHistory(D, addr); + emitInvalidHistory(D, addr, loc); if (hasNullptr && opts.emitHistoryNull()) { assert(pmapNullHist.count(addr) && "expected nullptr hist"); diff --git a/clang/test/CIR/Transforms/Inputs/folly-coro.h b/clang/test/CIR/Transforms/Inputs/folly-coro.h new file mode 100644 index 000000000000..21e4b337eb22 --- /dev/null +++ b/clang/test/CIR/Transforms/Inputs/folly-coro.h @@ -0,0 +1,44 @@ +#include "std.h" + +namespace folly { +namespace coro { + +using std::suspend_always; +using std::suspend_never; +using std::coroutine_handle; + +using SemiFuture = int; + +template +struct Task { + struct promise_type { + Task get_return_object() noexcept; + suspend_always initial_suspend() noexcept; + suspend_always final_suspend() noexcept; + void return_value(T); + void unhandled_exception(); + auto yield_value(Task) noexcept { return final_suspend(); } + }; + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + T await_resume(); +}; + +template<> +struct Task { + struct promise_type { + Task get_return_object() noexcept; + suspend_always initial_suspend() noexcept; + suspend_always final_suspend() noexcept; + void return_void() noexcept; + void unhandled_exception() noexcept; + auto yield_value(Task) noexcept { return final_suspend(); } + }; + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} + SemiFuture semi(); +}; + +} // coro +} // folly \ No newline at end of file diff --git a/clang/test/CIR/Transforms/Inputs/std.h b/clang/test/CIR/Transforms/Inputs/std.h new file mode 100644 index 000000000000..1bc2b8504784 --- /dev/null +++ b/clang/test/CIR/Transforms/Inputs/std.h @@ -0,0 +1,29 @@ +namespace std { + +template +struct coroutine_traits { using promise_type = typename Ret::promise_type; }; + +template +struct coroutine_handle { + static coroutine_handle from_address(void *) noexcept; +}; +template <> +struct coroutine_handle { + template + coroutine_handle(coroutine_handle) noexcept; + static coroutine_handle from_address(void *); +}; + +struct suspend_always { + bool await_ready() noexcept { return false; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} +}; + +struct suspend_never { + bool await_ready() noexcept { return true; } + void await_suspend(coroutine_handle<>) noexcept {} + void await_resume() noexcept {} +}; + +} // namespace std \ No newline at end of file diff --git a/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp b/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp new file mode 100644 index 000000000000..2be677c54cc5 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -I%S/Inputs -fclangir -fclangir-lifetime-check="history=all;remarks=all;history_limit=1" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +#include "folly-coro.h" + +folly::coro::Task go(int const& val); +folly::coro::Task go1() { + auto task = go(1); // expected-note {{coroutine bound to resource with expired lifetime}} + // expected-note@-1 {{at the end of scope or full-expression}} + co_return co_await task; // expected-remark {{pset => { task, invalid }}} + // expected-warning@-1 {{use of coroutine 'task' with dangling reference}} +} \ No newline at end of file From 68cf13b95ac384e2ddf4be234c4db7876ff901f9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 27 Jan 2023 17:16:30 -0800 Subject: [PATCH 0808/2301] [CIR][LifetimeCheck] Split options parse into more pieces --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index e885eaf9c26d..d2c34d4760fd 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -97,22 +97,27 @@ struct LifetimeCheckPass : public LifetimeCheckBase { unsigned val = None; unsigned histLimit = 1; - void parseOptions(LifetimeCheckPass &pass) { - for (auto &remark : pass.remarksList) { + void parseOptions(ArrayRef remarks, ArrayRef hist, + unsigned hist_limit) { + for (auto &remark : remarks) { val |= StringSwitch(remark) .Case("pset-invalid", RemarkPsetInvalid) .Case("pset-always", RemarkPsetAlways) .Case("all", RemarkAll) .Default(None); } - for (auto &h : pass.historyList) { + for (auto &h : hist) { val |= StringSwitch(h) .Case("invalid", HistoryInvalid) .Case("null", HistoryNull) .Case("all", HistoryAll) .Default(None); } - histLimit = pass.historyLimit; + histLimit = hist_limit; + } + + void parseOptions(LifetimeCheckPass &pass) { + parseOptions(pass.remarksList, pass.historyList, pass.historyLimit); } bool emitRemarkAll() { return val & RemarkAll; } From f1e30686c07d605e1af7846953bcb56f1d4796ff Mon Sep 17 00:00:00 2001 From: Jingsong Shang Date: Wed, 29 Jun 2022 16:27:31 -0700 Subject: [PATCH 0809/2301] [CIR][cir-tidy] Add cir-tidy tool cir-tidy is a command line tool that uses the same interface as clang-tidy but analysis are based on ClangIR instead of ASTMatchers or Clang CFG. This tool was brought together by Jingsong Shang. - Make cir-tidy tool build CIR out of clangAST and allow lifetime checker runs on generated CIR. - Add python script to check cir-tidy result. - The current implementation runs lifetime checker with no preset options in default. More options will be supported through command line interface. The cir-tidy tool accepts options in two ways, either config string using CLI or config file. One example of running cir-tidy + cir-lifetime-check looks like: ``` ./cir-tidy source.cpp -checks='-*, cir-lifetime-check' -config="{CheckOptions: [{key: 'cir-lifetime-check.RemarksList', value: 'None'}, {key: 'cir-lifetime-check.HistoryList', value: 'invalid;null'}]}" -- ``` Using ninja, cir-tidy is accessible over these instructions: - Enable clang-tools build `-DLLVM_ENABLE_PROJECTS="...;clang-tools-extra"` - To build: `ninja cir-tidy` - To run tests: `ninja check-clang-extra-cir-tidy` --- clang-tools-extra/clang-tidy/CMakeLists.txt | 1 + .../clang-tidy/cir-tidy/CIRASTConsumer.cpp | 83 +++ .../clang-tidy/cir-tidy/CIRASTConsumer.h | 24 + .../clang-tidy/cir-tidy/CIRTidy.cpp | 112 ++++ .../clang-tidy/cir-tidy/CIRTidy.h | 59 +++ .../clang-tidy/cir-tidy/CMakeLists.txt | 55 ++ .../clang-tidy/cir-tidy/tool/CIRTidyMain.cpp | 483 ++++++++++++++++++ .../clang-tidy/cir-tidy/tool/CIRTidyMain.h | 23 + .../cir-tidy/tool/CIRTidyToolMain.cpp | 21 + .../clang-tidy/cir-tidy/tool/CMakeLists.txt | 49 ++ .../test/cir-tidy/check_cir_tidy.py | 177 +++++++ .../test/cir-tidy/lifetime-basic.cpp | 16 + clang-tools-extra/test/lit.cfg.py | 5 + clang/include/clang/CIR/Dialect/Passes.h | 4 + .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 28 +- 15 files changed, 1138 insertions(+), 2 deletions(-) create mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp create mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h create mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp create mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h create mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CMakeLists.txt create mode 100644 clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp create mode 100644 clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.h create mode 100644 clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyToolMain.cpp create mode 100644 clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt create mode 100644 clang-tools-extra/test/cir-tidy/check_cir_tidy.py create mode 100644 clang-tools-extra/test/cir-tidy/lifetime-basic.cpp diff --git a/clang-tools-extra/clang-tidy/CMakeLists.txt b/clang-tools-extra/clang-tidy/CMakeLists.txt index 93117cf1d637..c401dd414e70 100644 --- a/clang-tools-extra/clang-tidy/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/CMakeLists.txt @@ -111,6 +111,7 @@ set(ALL_CLANG_TIDY_CHECKS ${ALL_CLANG_TIDY_CHECKS} PARENT_SCOPE) add_subdirectory(plugin) add_subdirectory(tool) add_subdirectory(utils) +add_subdirectory(cir-tidy) if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY) install(DIRECTORY . diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp new file mode 100644 index 000000000000..2f022995fdbe --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp @@ -0,0 +1,83 @@ +#include "CIRASTConsumer.h" + +#include "clang/CIR/Dialect/Passes.h" +#include "mlir/IR/BuiltinOps.h" +#include "../utils/OptionsUtils.h" + +#include "mlir/IR/MLIRContext.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include + +using namespace clang; +using namespace clang::tidy; + +namespace { +const std::string lifeTimeCheck = "cir-lifetime-check"; +} // namespace + +namespace cir { +namespace tidy { + +CIRASTConsumer::CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, + clang::tidy::ClangTidyContext &Context) + : Context(Context) { + Gen = + std::make_unique(CI.getDiagnostics(), CI.getCodeGenOpts()); +} + +bool CIRASTConsumer::HandleTopLevelDecl(DeclGroupRef D) { + PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), + AstContext->getSourceManager(), + "CIR generation of declaration"); + Gen->HandleTopLevelDecl(D); + return true; +} + +void CIRASTConsumer::Initialize(ASTContext &Context) { + AstContext = &Context; + Gen->Initialize(Context); +} + +void CIRASTConsumer::HandleTranslationUnit(ASTContext &C) { + Gen->HandleTranslationUnit(C); + Gen->verifyModule(); + + mlir::ModuleOp mlirMod = Gen->getModule(); + std::unique_ptr mlirCtx = Gen->takeContext(); + + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(/*prettyForm=*/false); + + SourceManager &SourceMgr = C.getSourceManager(); + FileID MainFileID = SourceMgr.getMainFileID(); + + llvm::MemoryBufferRef MainFileBuf = SourceMgr.getBufferOrFake(MainFileID); + std::unique_ptr FileBuf = + llvm::MemoryBuffer::getMemBuffer(MainFileBuf); + + llvm::SourceMgr sourceMgr; + sourceMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); + + mlir::SourceMgrDiagnosticHandler sourceMgrHandler(sourceMgr, mlirCtx.get()); + mlir::PassManager pm(mlirCtx.get()); + pm.addPass(mlir::createMergeCleanupsPass()); + + clang::tidy::ClangTidyOptions Opts = Context.getOptions(); + static constexpr const char *remarkOptName = "cir-lifetime-check.RemarksList"; + static constexpr const char *histOptName = "cir-lifetime-check.HistoryList"; + auto remarks = + utils::options::parseStringList(Opts.CheckOptions[remarkOptName].Value); + auto hist = + utils::options::parseStringList(Opts.CheckOptions[histOptName].Value); + + if (Context.isCheckEnabled(lifeTimeCheck)) + pm.addPass(mlir::createLifetimeCheckPass(remarks, hist, 1, &C)); + + bool Result = !mlir::failed(pm.run(mlirMod)); + if (!Result) + llvm::report_fatal_error( + "The pass manager failed to run pass on the module!"); +} +} // namespace tidy +} // namespace cir diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h new file mode 100644 index 000000000000..eb758b09135a --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h @@ -0,0 +1,24 @@ +#include "../ClangTidyDiagnosticConsumer.h" +#include "clang/AST/ASTContext.h" +#include "clang/CIR/CIRGenerator.h" +#include "clang/Frontend/CompilerInstance.h" + +using namespace clang; + +namespace cir { +namespace tidy { +class CIRASTConsumer : public ASTConsumer { +public: + CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, + clang::tidy::ClangTidyContext &Context); + +private: + void Initialize(ASTContext &Context) override; + void HandleTranslationUnit(ASTContext &C) override; + bool HandleTopLevelDecl(DeclGroupRef D) override; + std::unique_ptr Gen; + ASTContext *AstContext{nullptr}; + clang::tidy::ClangTidyContext &Context; +}; +} // namespace tidy +} // namespace cir diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp new file mode 100644 index 000000000000..49a8b7162ca5 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp @@ -0,0 +1,112 @@ +//===--- clang-tidy/cir-tidy/CIRTidy.cpp - CIR tidy tool ------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file This file implements a cir-tidy tool. +/// +/// This tool uses the Clang Tooling infrastructure, see +/// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html +/// for details on setting it up with LLVM source tree. +/// +//===----------------------------------------------------------------------===// + +#include "CIRTidy.h" +#include "CIRASTConsumer.h" +#include "ClangTidyModuleRegistry.h" +#include "ClangTidyProfiling.h" +#include "clang-tidy-config.h" +#include "clang/Frontend/CompilerInstance.h" +#include "clang/Lex/PreprocessorOptions.h" +#include "clang/Tooling/Refactoring.h" + +using namespace clang::tooling; + +namespace cir { +namespace tidy { + +CIRTidyASTConsumerFactory::CIRTidyASTConsumerFactory( + ClangTidyContext &Context, + IntrusiveRefCntPtr OverlayFS) + : Context(Context), OverlayFS(std::move(OverlayFS)) {} + +std::unique_ptr +CIRTidyASTConsumerFactory::createASTConsumer(clang::CompilerInstance &Compiler, + StringRef File) { + return std::make_unique(Compiler, File, Context); +} + +std::vector CIRTidyASTConsumerFactory::getCheckNames() { + std::vector CheckNames; + for (const auto &CIRCheckName : this->CIRChecks) { + if (Context.isCheckEnabled(CIRCheckName)) + CheckNames.emplace_back(CIRCheckName); + } + + llvm::sort(CheckNames); + return CheckNames; +} + +std::vector +runCIRTidy(ClangTidyContext &Context, const CompilationDatabase &Compilations, + ArrayRef InputFiles, + llvm::IntrusiveRefCntPtr BaseFS, + bool ApplyAnyFix, bool EnableCheckProfile, + llvm::StringRef StoreCheckProfile) { + ClangTool Tool(Compilations, InputFiles, + std::make_shared(), BaseFS); + + Context.setEnableProfiling(EnableCheckProfile); + Context.setProfileStoragePrefix(StoreCheckProfile); + + ClangTidyDiagnosticConsumer DiagConsumer(Context, nullptr, true, ApplyAnyFix); + DiagnosticsEngine DE(new DiagnosticIDs(), new DiagnosticOptions(), + &DiagConsumer, /*ShouldOwnClient=*/false); + Context.setDiagnosticsEngine(&DE); + Tool.setDiagnosticConsumer(&DiagConsumer); + + class ActionFactory : public FrontendActionFactory { + public: + ActionFactory(ClangTidyContext &Context, + IntrusiveRefCntPtr BaseFS) + : ConsumerFactory(Context, std::move(BaseFS)) {} + std::unique_ptr create() override { + return std::make_unique(&ConsumerFactory); + } + + bool runInvocation(std::shared_ptr Invocation, + FileManager *Files, + std::shared_ptr PCHContainerOps, + DiagnosticConsumer *DiagConsumer) override { + // Explicitly ask to define __clang_analyzer__ macro. + Invocation->getPreprocessorOpts().SetUpStaticAnalyzer = true; + return FrontendActionFactory::runInvocation( + Invocation, Files, PCHContainerOps, DiagConsumer); + } + + private: + class Action : public ASTFrontendAction { + public: + Action(CIRTidyASTConsumerFactory *Factory) : Factory(Factory) {} + std::unique_ptr CreateASTConsumer(CompilerInstance &Compiler, + StringRef File) override { + return Factory->createASTConsumer(Compiler, File); + } + + private: + CIRTidyASTConsumerFactory *Factory; + }; + + CIRTidyASTConsumerFactory ConsumerFactory; + }; + + ActionFactory Factory(Context, std::move(BaseFS)); + Tool.run(&Factory); + return DiagConsumer.take(); +} + +} // namespace tidy +} // namespace cir diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h new file mode 100644 index 000000000000..03fefb4ed774 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h @@ -0,0 +1,59 @@ +//===--- CIRTidy.h - cir-tidy -------------------------------*- C++ -*-----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIRTIDY_H +#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIRTIDY_H + +#include "ClangTidyDiagnosticConsumer.h" +#include "ClangTidyModule.h" +#include "clang/AST/ASTConsumer.h" +#include + +namespace clang { +class CompilerInstance; +namespace tooling { +class CompilationDatabase; +} +} // namespace clang + +using namespace clang; +using namespace clang::tidy; + +namespace cir { +namespace tidy { + +class CIRTidyASTConsumerFactory { +public: + CIRTidyASTConsumerFactory( + ClangTidyContext &Context, + IntrusiveRefCntPtr OverlayFS = nullptr); + + std::unique_ptr + createASTConsumer(clang::CompilerInstance &Compiler, StringRef File); + + /// Get the list of enabled checks. + std::vector getCheckNames(); + +private: + ClangTidyContext &Context; + IntrusiveRefCntPtr OverlayFS; + const std::vector CIRChecks = {"cir-lifetime-check"}; +}; + +std::vector +runCIRTidy(clang::tidy::ClangTidyContext &Context, + const tooling::CompilationDatabase &Compilations, + ArrayRef InputFiles, + llvm::IntrusiveRefCntPtr BaseFS, + bool ApplyAnyFix, bool EnableCheckProfile = false, + llvm::StringRef StoreCheckProfile = StringRef()); + +} // end namespace tidy +} // end namespace cir + +#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIRTIDY_H diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CMakeLists.txt b/clang-tools-extra/clang-tidy/cir-tidy/CMakeLists.txt new file mode 100644 index 000000000000..fb58718bf769 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir-tidy/CMakeLists.txt @@ -0,0 +1,55 @@ +set(LLVM_LINK_COMPONENTS + FrontendOpenMP + Support + ) + +include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/.. ) +include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) +include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_subdirectory(tool) + +add_clang_library(CIRTidy + CIRTidy.cpp + CIRASTConsumer.cpp + + DEPENDS + omp_gen + + LINK_LIBS + clangASTMatchers + clangCIR + clangFrontend + clangSerialization + clangTidy + clangTidyUtils + ${dialect_libs} + MLIRCIR + MLIRCIRTransforms + MLIRAffineToStandard + MLIRAnalysis + MLIRIR + MLIRLLVMCommonConversion + MLIRLLVMDialect + MLIRLLVMToLLVMIRTranslation + MLIRMemRefDialect + MLIRMemRefToLLVM + MLIRParser + MLIRPass + MLIRSideEffectInterfaces + MLIRSCFToControlFlow + MLIRFuncToLLVM + MLIRSupport + MLIRMemRefDialect + MLIRTargetLLVMIRExport + MLIRTransforms +) + +clang_target_link_libraries(CIRTidy + PRIVATE + clangBasic + clangTooling + clangToolingCore + ) diff --git a/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp b/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp new file mode 100644 index 000000000000..6df74d943a67 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp @@ -0,0 +1,483 @@ +//===--- tools/extra/clang-tidy/cir/CIRTidyMain.cpp - cir tidy tool -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file This file implements a cir-tidy tool. +/// +/// This tool uses the Clang Tooling infrastructure, see +/// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html +/// for details on setting it up with LLVM source tree. +/// +//===----------------------------------------------------------------------===// + +#include "CIRTidyMain.h" +#include "../ClangTidy.h" +#include "../ClangTidyForceLinker.h" +#include "../GlobList.h" +#include "CIRTidy.h" +#include "clang/Tooling/CommonOptionsParser.h" +#include "llvm/Support/InitLLVM.h" +#include "llvm/Support/Process.h" +#include "llvm/Support/TargetSelect.h" +#include "llvm/Support/WithColor.h" + +using namespace clang::tooling; +using namespace clang::tidy; +using namespace llvm; + +static cl::OptionCategory CIRTidyCategory("cir-tidy options"); + +static cl::extrahelp CommonHelp(CommonOptionsParser::HelpMessage); +static cl::extrahelp CIRTidyHelp(R"( +Configuration files: + cir-tidy attempts to read configuration for each source file from a + .clang-tidy file located in the closest parent directory of the source + file. If InheritParentConfig is true in a config file, the configuration file + in the parent directory (if any exists) will be taken and current config file + will be applied on top of the parent one. If any configuration options have + a corresponding command-line option, command-line option takes precedence. + The effective configuration can be inspected using -dump-config: + + $ cir-tidy -dump-config + --- + Checks: '-*,some-check' + WarningsAsErrors: '' + HeaderFilterRegex: '' + FormatStyle: none + InheritParentConfig: true + User: user + CheckOptions: + - key: some-check.SomeOption + value: 'some value' + ... + +)"); + +const char DefaultChecks[] = // Enable these checks by default: + "clang-diagnostic-*," // * compiler diagnostics + "clang-analyzer-*"; // * Static Analyzer checks + +static cl::opt + Checks("checks", cl::desc(R"(Comma-separated list of globs with optional '-' +prefix. Globs are processed in order of +appearance in the list. Globs without '-' +prefix add checks with matching names to the +set, globs with the '-' prefix remove checks +with matching names from the set of enabled +checks. This option's value is appended to the +value of the 'Checks' option in .clang-tidy +file, if any. +)"), + cl::init(""), cl::cat(CIRTidyCategory)); + +static cl::opt + WarningsAsErrors("warnings-as-errors", + cl::desc(R"(Upgrades warnings to errors. Same format as +'-checks'. +This option's value is appended to the value of +the 'WarningsAsErrors' option in .clang-tidy +file, if any. +)"), + cl::init(""), cl::cat(CIRTidyCategory)); + +static cl::opt + HeaderFilter("header-filter", + cl::desc(R"(Regular expression matching the names of the +headers to output diagnostics from. Diagnostics +from the main file of each translation unit are +always displayed. +Can be used together with -line-filter. +This option overrides the 'HeaderFilterRegex' +option in .clang-tidy file, if any. +)"), + cl::init(""), cl::cat(CIRTidyCategory)); + +static cl::opt + SystemHeaders("system-headers", + cl::desc("Display the errors from system headers."), + cl::init(false), cl::cat(CIRTidyCategory)); +static cl::opt + LineFilter("line-filter", + cl::desc(R"(List of files with line ranges to filter the +warnings. Can be used together with +-header-filter. The format of the list is a +JSON array of objects: + [ + {"name":"file1.cpp","lines":[[1,3],[5,7]]}, + {"name":"file2.h"} + ] +)"), + cl::init(""), cl::cat(CIRTidyCategory)); + +static cl::opt Fix("fix", + cl::desc(R"(Apply suggested fixes. Without -fix-errors +cir-tidy will bail out if any compilation +errors were found. +)"), + cl::init(false), cl::cat(CIRTidyCategory)); + +static cl::opt + FixErrors("fix-errors", + cl::desc(R"(Apply suggested fixes even if compilation +errors were found. If compiler errors have +attached fix-its, cir-tidy will apply them as +well. +)"), + cl::init(false), cl::cat(CIRTidyCategory)); + +static cl::opt + FixNotes("fix-notes", + cl::desc(R"(If a warning has no fix, but a single fix can +be found through an associated diagnostic note, +apply the fix. +Specifying this flag will implicitly enable the +'--fix' flag. +)"), + cl::init(false), cl::cat(CIRTidyCategory)); + +static cl::opt + FormatStyle("format-style", + cl::desc(R"(Style for formatting code around applied fixes: + - 'none' (default) turns off formatting + - 'file' (literally 'file', not a placeholder) + uses .clang-format file in the closest parent + directory + - '{ }' specifies options inline, e.g. + -format-style='{BasedOnStyle: llvm, IndentWidth: 8}' + - 'llvm', 'google', 'webkit', 'mozilla' +See clang-format documentation for the up-to-date +information about formatting styles and options. +This option overrides the 'FormatStyle` option in +.clang-tidy file, if any. +)"), + cl::init("none"), cl::cat(CIRTidyCategory)); + +static cl::opt + ListChecks("list-checks", + cl::desc(R"(List all enabled checks and exit. Use with +-checks=* to list all available checks. +)"), + cl::init(false), cl::cat(CIRTidyCategory)); + +static cl::opt + ExplainConfig("explain-config", + cl::desc(R"(For each enabled check explains, where it is +enabled, i.e. in cir-tidy binary, command +line or a specific configuration file. +)"), + cl::init(false), cl::cat(CIRTidyCategory)); + +static cl::opt + Config("config", cl::desc(R"(Specifies a configuration in YAML/JSON format: + -config="{Checks: '*', + CheckOptions: [{key: x, + value: y}]}" +When the value is empty, cir-tidy will +attempt to find a file named .clang-tidy for +each source file in its parent directories. +)"), + cl::init(""), cl::cat(CIRTidyCategory)); + +static cl::opt ConfigFile( + "config-file", + cl::desc(R"(Specify the path of .clang-tidy or custom config file: + e.g. --config-file=/some/path/myTidyConfigFile +This option internally works exactly the same way as + --config option after reading specified config file. +Use either --config-file or --config, not both. +)"), + cl::init(""), cl::cat(CIRTidyCategory)); + +static cl::opt + DumpConfig("dump-config", + cl::desc(R"(Dumps configuration in the YAML format to +stdout. This option can be used along with a +file name (and '--' if the file is outside of a +project with configured compilation database). +The configuration used for this file will be +printed. +Use along with -checks=* to include +configuration of all checks. +)"), + cl::init(false), cl::cat(CIRTidyCategory)); + +static cl::opt + EnableCheckProfile("enable-check-profile", + cl::desc(R"(Enable per-check timing profiles, and print a +report to stderr. +)"), + cl::init(false), cl::cat(CIRTidyCategory)); + +static cl::opt + StoreCheckProfile("store-check-profile", + cl::desc(R"(By default reports are printed in tabulated +format to stderr. When this option is passed, +these per-TU profiles are instead stored as JSON. +)"), + cl::value_desc("prefix"), cl::cat(CIRTidyCategory)); + +/// This option allows enabling the experimental alpha checkers from the static +/// analyzer. This option is set to false and not visible in help, because it is +/// highly not recommended for users. +static cl::opt + AllowEnablingAnalyzerAlphaCheckers("allow-enabling-analyzer-alpha-checkers", + cl::init(false), cl::Hidden, + cl::cat(CIRTidyCategory)); + +static cl::opt + ExportFixes("export-fixes", + cl::desc(R"(YAML file to store suggested fixes in. The +stored fixes can be applied to the input source +code with cir-apply-replacements. +)"), + cl::value_desc("filename"), cl::cat(CIRTidyCategory)); + +static cl::opt + Quiet("quiet", cl::desc(R"(Run cir-tidy in quiet mode. This suppresses +printing statistics about ignored warnings and +warnings treated as errors if the respective +options are specified. +)"), + cl::init(false), cl::cat(CIRTidyCategory)); + +static cl::opt + VfsOverlay("vfsoverlay", + cl::desc(R"(Overlay the virtual filesystem described by file +over the real file system. +)"), + cl::value_desc("filename"), cl::cat(CIRTidyCategory)); + +static cl::opt + UseColor("use-color", + cl::desc(R"(Use colors in diagnostics. If not set, colors +will be used if the terminal connected to +standard output supports colors. +This option overrides the 'UseColor' option in +.clang-tidy file, if any. +)"), + cl::init(false), cl::cat(CIRTidyCategory)); +namespace cir { +namespace tidy { + +std::vector getCIRCheckNames(const ClangTidyOptions &Options) { + clang::tidy::ClangTidyContext Context( + std::make_unique(ClangTidyGlobalOptions(), + Options)); + CIRTidyASTConsumerFactory Factory(Context); + return Factory.getCheckNames(); +} + +static std::unique_ptr +createOptionsProvider(llvm::IntrusiveRefCntPtr FS) { + ClangTidyGlobalOptions GlobalOptions; + if (std::error_code Err = parseLineFilter(LineFilter, GlobalOptions)) { + llvm::errs() << "Invalid LineFilter: " << Err.message() << "\n\nUsage:\n"; + llvm::cl::PrintHelpMessage(/*Hidden=*/false, /*Categorized=*/true); + return nullptr; + } + + ClangTidyOptions DefaultOptions; + DefaultOptions.Checks = DefaultChecks; + DefaultOptions.WarningsAsErrors = ""; + DefaultOptions.HeaderFilterRegex = HeaderFilter; + DefaultOptions.SystemHeaders = SystemHeaders; + DefaultOptions.FormatStyle = FormatStyle; + DefaultOptions.User = llvm::sys::Process::GetEnv("USER"); + // USERNAME is used on Windows. + if (!DefaultOptions.User) + DefaultOptions.User = llvm::sys::Process::GetEnv("USERNAME"); + + ClangTidyOptions OverrideOptions; + if (Checks.getNumOccurrences() > 0) + OverrideOptions.Checks = Checks; + if (WarningsAsErrors.getNumOccurrences() > 0) + OverrideOptions.WarningsAsErrors = WarningsAsErrors; + if (HeaderFilter.getNumOccurrences() > 0) + OverrideOptions.HeaderFilterRegex = HeaderFilter; + if (SystemHeaders.getNumOccurrences() > 0) + OverrideOptions.SystemHeaders = SystemHeaders; + if (FormatStyle.getNumOccurrences() > 0) + OverrideOptions.FormatStyle = FormatStyle; + if (UseColor.getNumOccurrences() > 0) + OverrideOptions.UseColor = UseColor; + + auto LoadConfig = + [&](StringRef Configuration, + StringRef Source) -> std::unique_ptr { + llvm::ErrorOr ParsedConfig = + parseConfiguration(MemoryBufferRef(Configuration, Source)); + if (ParsedConfig) + return std::make_unique( + std::move(GlobalOptions), + ClangTidyOptions::getDefaults().merge(DefaultOptions, 0), + std::move(*ParsedConfig), std::move(OverrideOptions), std::move(FS)); + llvm::errs() << "Error: invalid configuration specified.\n" + << ParsedConfig.getError().message() << "\n"; + return nullptr; + }; + + if (ConfigFile.getNumOccurrences() > 0) { + if (Config.getNumOccurrences() > 0) { + llvm::errs() << "Error: --config-file and --config are " + "mutually exclusive. Specify only one.\n"; + return nullptr; + } + + llvm::ErrorOr> Text = + llvm::MemoryBuffer::getFile(ConfigFile); + if (std::error_code EC = Text.getError()) { + llvm::errs() << "Error: can't read config-file '" << ConfigFile + << "': " << EC.message() << "\n"; + return nullptr; + } + + return LoadConfig((*Text)->getBuffer(), ConfigFile); + } + + if (Config.getNumOccurrences() > 0) + return LoadConfig(Config, ""); + + return std::make_unique( + std::move(GlobalOptions), std::move(DefaultOptions), + std::move(OverrideOptions), std::move(FS)); +} + +llvm::IntrusiveRefCntPtr +getVfsFromFile(const std::string &OverlayFile, + llvm::IntrusiveRefCntPtr BaseFS) { + llvm::ErrorOr> Buffer = + BaseFS->getBufferForFile(OverlayFile); + if (!Buffer) { + llvm::errs() << "Can't load virtual filesystem overlay file '" + << OverlayFile << "': " << Buffer.getError().message() + << ".\n"; + return nullptr; + } + + IntrusiveRefCntPtr FS = vfs::getVFSFromYAML( + std::move(Buffer.get()), /*DiagHandler*/ nullptr, OverlayFile); + if (!FS) { + llvm::errs() << "Error: invalid virtual filesystem overlay file '" + << OverlayFile << "'.\n"; + return nullptr; + } + return FS; +} + +int CIRTidyMain(int argc, const char **argv) { + llvm::InitLLVM X(argc, argv); + llvm::Expected OptionsParser = + CommonOptionsParser::create(argc, argv, CIRTidyCategory, cl::ZeroOrMore); + if (!OptionsParser) { + llvm::WithColor::error() << llvm::toString(OptionsParser.takeError()); + return 1; + } + + llvm::IntrusiveRefCntPtr BaseFS( + new vfs::OverlayFileSystem(vfs::getRealFileSystem())); + + if (!VfsOverlay.empty()) { + IntrusiveRefCntPtr VfsFromFile = + getVfsFromFile(VfsOverlay, BaseFS); + if (!VfsFromFile) + return 1; + BaseFS->pushOverlay(std::move(VfsFromFile)); + } + + auto OwningOptionsProvider = createOptionsProvider(BaseFS); + auto *OptionsProvider = OwningOptionsProvider.get(); + if (!OptionsProvider) + return 1; + + auto MakeAbsolute = [](const std::string &Input) -> SmallString<256> { + if (Input.empty()) + return {}; + SmallString<256> AbsolutePath(Input); + if (std::error_code EC = llvm::sys::fs::make_absolute(AbsolutePath)) { + llvm::errs() << "Can't make absolute path from " << Input << ": " + << EC.message() << "\n"; + } + return AbsolutePath; + }; + + SmallString<256> ProfilePrefix = MakeAbsolute(StoreCheckProfile); + + StringRef FileName("dummy"); + auto PathList = OptionsParser->getSourcePathList(); + if (!PathList.empty()) { + FileName = PathList.front(); + } + + SmallString<256> FilePath = MakeAbsolute(std::string(FileName)); + + ClangTidyOptions EffectiveOptions = OptionsProvider->getOptions(FilePath); + std::vector EnabledChecks = getCIRCheckNames(EffectiveOptions); + + if (ExplainConfig) { + // FIXME: Show other ClangTidyOptions' fields, like ExtraArg. + std::vector + RawOptions = OptionsProvider->getRawOptions(FilePath); + for (const std::string &Check : EnabledChecks) { + for (auto It = RawOptions.rbegin(); It != RawOptions.rend(); ++It) { + if (It->first.Checks && GlobList(*It->first.Checks).contains(Check)) { + llvm::outs() << "'" << Check << "' is enabled in the " << It->second + << ".\n"; + break; + } + } + } + return 0; + } + + if (ListChecks) { + if (EnabledChecks.empty()) { + llvm::errs() << "No checks enabled.\n"; + return 1; + } + llvm::outs() << "Enabled checks:"; + for (const auto &CheckName : EnabledChecks) + llvm::outs() << "\n " << CheckName; + llvm::outs() << "\n\n"; + return 0; + } + + if (DumpConfig) { + EffectiveOptions.CheckOptions = + getCheckOptions(EffectiveOptions, AllowEnablingAnalyzerAlphaCheckers); + llvm::outs() << configurationAsText(ClangTidyOptions::getDefaults().merge( + EffectiveOptions, 0)) + << "\n"; + return 0; + } + + if (EnabledChecks.empty()) { + llvm::errs() << "Error: no checks enabled.\n"; + llvm::cl::PrintHelpMessage(/*Hidden=*/false, /*Categorized=*/true); + return 1; + } + + if (PathList.empty()) { + llvm::errs() << "Error: no input files specified.\n"; + llvm::cl::PrintHelpMessage(/*Hidden=*/false, /*Categorized=*/true); + return 1; + } + + llvm::InitializeAllTargetInfos(); + llvm::InitializeAllTargetMCs(); + llvm::InitializeAllAsmParsers(); + + ClangTidyContext Context(std::move(OwningOptionsProvider), + AllowEnablingAnalyzerAlphaCheckers); + std::vector Errors = + runCIRTidy(Context, OptionsParser->getCompilations(), PathList, BaseFS, + FixNotes, EnableCheckProfile, ProfilePrefix); + + return 0; +} + +} // namespace tidy +} // namespace cir diff --git a/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.h b/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.h new file mode 100644 index 000000000000..08d25544dbf3 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.h @@ -0,0 +1,23 @@ +//===--- tools/extra/clang-tidy/cir/CIRTidyMain.h - cir tidy tool ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file This file declares the main function for the cir-tidy tool. +/// +/// This tool uses the Clang Tooling infrastructure, see +/// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html +/// for details on setting it up with LLVM source tree. +/// +//===----------------------------------------------------------------------===// + +namespace cir { +namespace tidy { + +int CIRTidyMain(int argc, const char **argv); + +} // namespace tidy +} // namespace cir diff --git a/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyToolMain.cpp b/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyToolMain.cpp new file mode 100644 index 000000000000..b5213510e822 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyToolMain.cpp @@ -0,0 +1,21 @@ +//===--- tools/extra/clang-tidy/cir/CIRTidyToolMain.cpp - cir tidy tool ---===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file This file contains cir-tidy tool entry point main function. +/// +/// This tool uses the Clang Tooling infrastructure, see +/// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html +/// for details on setting it up with LLVM source tree. +/// +//===----------------------------------------------------------------------===// + +#include "CIRTidyMain.h" + +int main(int argc, const char **argv) { + return cir::tidy::CIRTidyMain(argc, argv); +} diff --git a/clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt b/clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt new file mode 100644 index 000000000000..d271d927cc39 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt @@ -0,0 +1,49 @@ +include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/.. ) + +set(LLVM_LINK_COMPONENTS + AllTargetsAsmParsers + AllTargetsDescs + AllTargetsInfos + FrontendOpenMP + support + ) + +# Needed by LLVM's CMake checks because this file defines multiple targets. +set(LLVM_OPTIONAL_SOURCES CIRTidyMain.cpp CIRTidyToolMain.cpp) + +add_clang_library(CIRTidyMain + CIRTidyMain.cpp + + LINK_LIBS + CIRTidy + clangTidy + MLIRIR + ${ALL_CLANG_TIDY_CHECKS} + + DEPENDS + omp_gen + ) + +clang_target_link_libraries(CIRTidyMain + PRIVATE + clangBasic + clangTooling + clangToolingCore + ) + +add_clang_tool(cir-tidy + CIRTidyToolMain.cpp + ) +add_dependencies(cir-tidy + clang-resource-headers + ) + +target_link_libraries(cir-tidy + PRIVATE + CIRTidyMain + CIRTidy + ) + +install(TARGETS cir-tidy + DESTINATION bin + ) diff --git a/clang-tools-extra/test/cir-tidy/check_cir_tidy.py b/clang-tools-extra/test/cir-tidy/check_cir_tidy.py new file mode 100644 index 000000000000..03e4862427e6 --- /dev/null +++ b/clang-tools-extra/test/cir-tidy/check_cir_tidy.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +# +#===- check_cir_tidy.py - CIRTidy Test Helper ------------*- python -*--=======# +# +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +#===------------------------------------------------------------------------===# + +r""" +CIRTIDY Test Helper +===================== + +This script runs cir-tidy and check outputed messages. + +Usage: + check_cir_tidy.py -- \ + [optional cir-tidy arguments] + +Example: + // RUN: %check_cir_tidy %s cir-lifetime-check %t -- +""" + +import argparse +import re +import subprocess +import sys + + +def write_file(file_name, text): + with open(file_name, 'w', encoding='utf-8') as f: + f.write(text) + f.truncate() + + +def run_test_once(args, extra_args): + input_file_name = args.input_file_name + check_name = args.check_name + temp_file_name = args.temp_file_name + temp_file_name = temp_file_name + ".cpp" + + cir_tidy_extra_args = extra_args + cir_extra_args = [] + if '--' in extra_args: + i = cir_tidy_extra_args.index('--') + cir_extra_args = cir_tidy_extra_args[i + 1:] + cir_tidy_extra_args = cir_tidy_extra_args[:i] + + # If the test does not specify a config style, force an empty one; otherwise + # autodetection logic can discover a ".clang-tidy" file that is not related to + # the test. + if not any( + [arg.startswith('-config=') for arg in cir_tidy_extra_args]): + cir_tidy_extra_args.append('-config={}') + + with open(input_file_name, 'r', encoding='utf-8') as input_file: + input_text = input_file.read() + + check_fixes_prefixes = [] + check_messages_prefixes = [] + check_notes_prefixes = [] + + has_check_fixes = False + has_check_messages = False + has_check_notes = False + + check_fixes_prefix = 'CHECK-FIXES' + check_messages_prefix = 'CHECK-MESSAGES' + check_notes_prefix = 'CHECK-NOTES' + + has_check_fix = check_fixes_prefix in input_text + has_check_message = check_messages_prefix in input_text + has_check_note = check_notes_prefix in input_text + + if not has_check_fix and not has_check_message and not has_check_note: + sys.exit('%s, %s or %s not found in the input' % + (check_fixes_prefix, check_messages_prefix, check_notes_prefix)) + + has_check_fixes = has_check_fixes or has_check_fix + has_check_messages = has_check_messages or has_check_message + has_check_notes = has_check_notes or has_check_note + + if has_check_fix: + check_fixes_prefixes.append(check_fixes_prefix) + if has_check_message: + check_messages_prefixes.append(check_messages_prefix) + if has_check_note: + check_notes_prefixes.append(check_notes_prefix) + + assert has_check_fixes or has_check_messages or has_check_notes + # Remove the contents of the CHECK lines to avoid CHECKs matching on + # themselves. We need to keep the comments to preserve line numbers while + # avoiding empty lines which could potentially trigger formatting-related + # checks. + cleaned_test = re.sub('// *CHECK-[A-Z0-9\-]*:[^\r\n]*', '//', input_text) + + write_file(temp_file_name, cleaned_test) + + original_file_name = temp_file_name + ".orig" + write_file(original_file_name, cleaned_test) + + args = ['cir-tidy', temp_file_name, '--checks=-*,' + check_name] + \ + cir_tidy_extra_args + ['--'] + cir_extra_args + print('Running ' + repr(args) + '...') + try: + cir_tidy_output = \ + subprocess.check_output(args, stderr=subprocess.STDOUT).decode() + except subprocess.CalledProcessError as e: + print('cir-tidy failed:\n' + e.output.decode()) + raise + + print('------------------------ cir-tidy output -------------------------') + print(cir_tidy_output.encode()) + print('\n------------------------------------------------------------------') + + try: + diff_output = subprocess.check_output( + ['diff', '-u', original_file_name, temp_file_name], + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + diff_output = e.output + + print('------------------------------ Fixes -----------------------------\n' + + diff_output.decode(errors='ignore') + + '\n------------------------------------------------------------------') + + if has_check_fixes: + try: + subprocess.check_output( + ['FileCheck', '-input-file=' + temp_file_name, input_file_name, + '-check-prefixes=' + ','.join(check_fixes_prefixes), + '-strict-whitespace'], + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + print('FileCheck failed:\n' + e.output.decode()) + raise + + if has_check_messages: + messages_file = temp_file_name + '.msg' + write_file(messages_file, cir_tidy_output) + try: + subprocess.check_output( + ['FileCheck', '-input-file=' + messages_file, input_file_name, + '-check-prefixes=' + ','.join(check_messages_prefixes), + '-implicit-check-not={{warning|error}}:'], + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + print('FileCheck failed:\n' + e.output.decode()) + raise + + if has_check_notes: + notes_file = temp_file_name + '.notes' + write_file(notes_file, cir_tidy_output) + try: + subprocess.check_output( + ['FileCheck', '-input-file=' + notes_file, input_file_name, + '-check-prefixes=' + ','.join(check_notes_prefixes), + '-implicit-check-not={{error}}:'], + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + print('FileCheck failed:\n' + e.output.decode()) + raise + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('input_file_name') + parser.add_argument('check_name') + parser.add_argument('temp_file_name') + + args, extra_args = parser.parse_known_args() + run_test_once(args, extra_args) + + +if __name__ == '__main__': + main() diff --git a/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp b/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp new file mode 100644 index 000000000000..9e50cf2a06d3 --- /dev/null +++ b/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp @@ -0,0 +1,16 @@ +// RUN: %check_cir_tidy %s cir-lifetime-check %t \ +// RUN: -config='{CheckOptions: \ +// RUN: [{key: cir-lifetime-check.RemarksList, value: "None"}, \ +// RUN: {key: cir-lifetime-check.HistoryList, value: "invalid;null"}]}' \ +// RUN: -- + +int *p0() { + int *p = nullptr; + { + int x = 0; + p = &x; + *p = 42; + } // CHECK-NOTES: note: pointee 'x' invalidated at end of scope + *p = 42; // CHECK-MESSAGES: warning: use of invalid pointer 'p' + return p; +} diff --git a/clang-tools-extra/test/lit.cfg.py b/clang-tools-extra/test/lit.cfg.py index 9f64fd3d2ffa..2e3937337ed3 100644 --- a/clang-tools-extra/test/lit.cfg.py +++ b/clang-tools-extra/test/lit.cfg.py @@ -54,6 +54,11 @@ config.substitutions.append( ("%check_clang_tidy", "%s %s" % (python_exec, check_clang_tidy)) ) +check_cir_tidy = os.path.join( + config.test_source_root, "cir-tidy", "check_cir_tidy.py") +config.substitutions.append( + ('%check_cir_tidy', + '%s %s' % (python_exec, check_cir_tidy)) ) clang_tidy_diff = os.path.join( config.test_source_root, "..", "clang-tidy", "tool", "clang-tidy-diff.py" ) diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index ade41fd1db18..903681c0d1ba 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -22,6 +22,10 @@ namespace mlir { std::unique_ptr createLifetimeCheckPass(); std::unique_ptr createLifetimeCheckPass(clang::ASTContext *astCtx); +std::unique_ptr createLifetimeCheckPass(ArrayRef remark, + ArrayRef hist, + unsigned hist_limit, + clang::ASTContext *astCtx); std::unique_ptr createMergeCleanupsPass(); std::unique_ptr createDropASTPass(); diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index d2c34d4760fd..1d083b99804d 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -96,9 +96,13 @@ struct LifetimeCheckPass : public LifetimeCheckBase { }; unsigned val = None; unsigned histLimit = 1; + bool isOptionsParsed = false; - void parseOptions(ArrayRef remarks, ArrayRef hist, + void parseOptions(ArrayRef remarks, ArrayRef hist, unsigned hist_limit) { + if (isOptionsParsed) + return; + for (auto &remark : remarks) { val |= StringSwitch(remark) .Case("pset-invalid", RemarkPsetInvalid) @@ -114,10 +118,20 @@ struct LifetimeCheckPass : public LifetimeCheckBase { .Default(None); } histLimit = hist_limit; + isOptionsParsed = true; } void parseOptions(LifetimeCheckPass &pass) { - parseOptions(pass.remarksList, pass.historyList, pass.historyLimit); + SmallVector remarks; + SmallVector hists; + + for (auto &r : pass.remarksList) + remarks.push_back(r); + + for (auto &h : pass.historyList) + hists.push_back(h); + + parseOptions(remarks, hists, pass.historyLimit); } bool emitRemarkAll() { return val & RemarkAll; } @@ -1455,6 +1469,16 @@ std::unique_ptr mlir::createLifetimeCheckPass(clang::ASTContext *astCtx) { return std::move(lifetime); } +std::unique_ptr mlir::createLifetimeCheckPass(ArrayRef remark, + ArrayRef hist, + unsigned hist_limit, + clang::ASTContext *astCtx) { + auto lifetime = std::make_unique(); + lifetime->setASTContext(astCtx); + lifetime->opts.parseOptions(remark, hist, hist_limit); + return std::move(lifetime); +} + //===----------------------------------------------------------------------===// // Dump & print helpers //===----------------------------------------------------------------------===// From c3bcdda45008ae112112b52c8ee9168c120ec22c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 31 Jan 2023 12:32:47 -0800 Subject: [PATCH 0810/2301] [CIR][CIRTidy] Honor export-fixes options, and update check_cir_tidy.py to print cir-tidy invocation --- .../clang-tidy/cir-tidy/CIRTidy.cpp | 18 ++++++++++++++++++ .../clang-tidy/cir-tidy/tool/CIRTidyMain.cpp | 10 ++++++++++ .../test/cir-tidy/check_cir_tidy.py | 18 ++++++++++++++++-- 3 files changed, 44 insertions(+), 2 deletions(-) diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp index 49a8b7162ca5..eaa5a1f463e8 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp @@ -21,9 +21,13 @@ #include "clang-tidy-config.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Lex/PreprocessorOptions.h" +#include "clang/Tooling/DiagnosticsYaml.h" #include "clang/Tooling/Refactoring.h" +#include "clang/Tooling/ReplacementsYaml.h" +#include "clang/Tooling/Tooling.h" using namespace clang::tooling; +using namespace llvm; namespace cir { namespace tidy { @@ -50,6 +54,20 @@ std::vector CIRTidyASTConsumerFactory::getCheckNames() { return CheckNames; } +void exportReplacements(const llvm::StringRef MainFilePath, + const std::vector &Errors, + raw_ostream &OS) { + TranslationUnitDiagnostics TUD; + TUD.MainSourceFile = std::string(MainFilePath); + for (const auto &Error : Errors) { + tooling::Diagnostic Diag = Error; + TUD.Diagnostics.insert(TUD.Diagnostics.end(), Diag); + } + + yaml::Output YAML(OS); + YAML << TUD; +} + std::vector runCIRTidy(ClangTidyContext &Context, const CompilationDatabase &Compilations, ArrayRef InputFiles, diff --git a/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp b/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp index 6df74d943a67..14d9298e09cc 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp +++ b/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp @@ -476,6 +476,16 @@ int CIRTidyMain(int argc, const char **argv) { runCIRTidy(Context, OptionsParser->getCompilations(), PathList, BaseFS, FixNotes, EnableCheckProfile, ProfilePrefix); + if (!ExportFixes.empty() && !Errors.empty()) { + std::error_code EC; + llvm::raw_fd_ostream OS(ExportFixes, EC, llvm::sys::fs::OF_None); + if (EC) { + llvm::errs() << "Error opening output file: " << EC.message() << '\n'; + return 1; + } + exportReplacements(FilePath.str(), Errors, OS); + } + return 0; } diff --git a/clang-tools-extra/test/cir-tidy/check_cir_tidy.py b/clang-tools-extra/test/cir-tidy/check_cir_tidy.py index 03e4862427e6..5f042718efda 100644 --- a/clang-tools-extra/test/cir-tidy/check_cir_tidy.py +++ b/clang-tools-extra/test/cir-tidy/check_cir_tidy.py @@ -26,7 +26,7 @@ import re import subprocess import sys - +import shutil def write_file(file_name, text): with open(file_name, 'w', encoding='utf-8') as f: @@ -102,7 +102,21 @@ def run_test_once(args, extra_args): args = ['cir-tidy', temp_file_name, '--checks=-*,' + check_name] + \ cir_tidy_extra_args + ['--'] + cir_extra_args - print('Running ' + repr(args) + '...') + + arg_print_list = [] + for arg_print in cir_tidy_extra_args: + if (arg_print.startswith("-config=")): + conf = arg_print.replace("-config=", "-config='") + conf += "'" + arg_print_list.append(conf) + continue + arg_print_list.append(arg_print) + + cir_tidy_bin = shutil.which('cir-tidy') + args_for_print = [cir_tidy_bin, temp_file_name, "--checks='-*," + check_name + "'"] + \ + arg_print_list + ['--'] + cir_extra_args + print('Running: ' + " ".join(args_for_print)) + try: cir_tidy_output = \ subprocess.check_output(args, stderr=subprocess.STDOUT).decode() From f4952afe260372298bac29755203b79dec4888af Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 1 Feb 2023 12:18:53 -0800 Subject: [PATCH 0811/2301] [CIR][CIRTidy] Translate between MLIR diagnostics and ClangTidyError - Create a new diagnostic handler that at the same time allows printing diagnostics but also forwarding to ClangTidyErrors related machinery. - Unique references to lifetime check name. - Add support for warnings, notes and remarks. - Add testing for the YAML outputs, which are automatically derived from ClangTidyErrors. - Use `ClangTidyCheck::OptionsView` to handle pass options. --- .../clang-tidy/cir-tidy/CIRASTConsumer.cpp | 115 +++++++++++++++--- .../clang-tidy/cir-tidy/CIRChecks.h | 21 ++++ .../clang-tidy/cir-tidy/CIRTidy.cpp | 13 ++ .../clang-tidy/cir-tidy/CIRTidy.h | 5 +- .../test/cir-tidy/lifetime-basic.cpp | 28 ++++- 5 files changed, 159 insertions(+), 23 deletions(-) create mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CIRChecks.h diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp index 2f022995fdbe..c785bb939eb0 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp @@ -1,21 +1,26 @@ +//===--- clang-tidy/cir-tidy/CIRASTConsumer.cpp ---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + #include "CIRASTConsumer.h" +#include "CIRChecks.h" -#include "clang/CIR/Dialect/Passes.h" -#include "mlir/IR/BuiltinOps.h" #include "../utils/OptionsUtils.h" - +#include "ClangTidyCheck.h" +#include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" +#include "clang/CIR/Dialect/Passes.h" #include using namespace clang; using namespace clang::tidy; -namespace { -const std::string lifeTimeCheck = "cir-lifetime-check"; -} // namespace - namespace cir { namespace tidy { @@ -49,30 +54,102 @@ void CIRASTConsumer::HandleTranslationUnit(ASTContext &C) { mlir::OpPrintingFlags flags; flags.enableDebugInfo(/*prettyForm=*/false); - SourceManager &SourceMgr = C.getSourceManager(); - FileID MainFileID = SourceMgr.getMainFileID(); + clang::SourceManager &clangSrcMgr = C.getSourceManager(); + FileID MainFileID = clangSrcMgr.getMainFileID(); - llvm::MemoryBufferRef MainFileBuf = SourceMgr.getBufferOrFake(MainFileID); + llvm::MemoryBufferRef MainFileBuf = clangSrcMgr.getBufferOrFake(MainFileID); std::unique_ptr FileBuf = llvm::MemoryBuffer::getMemBuffer(MainFileBuf); - llvm::SourceMgr sourceMgr; - sourceMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); + llvm::SourceMgr llvmSrcMgr; + llvmSrcMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); + + class CIRTidyDiagnosticHandler : public mlir::SourceMgrDiagnosticHandler { + clang::tidy::ClangTidyContext &tidyCtx; + clang::SourceManager &clangSrcMgr; + + clang::SourceLocation getClangSrcLoc(mlir::Location loc) { + clang::SourceLocation clangLoc; + FileManager &fileMgr = clangSrcMgr.getFileManager(); + + auto fileLoc = loc.dyn_cast(); + if (!fileLoc) + return clangLoc; + // The column and line may be zero to represent unknown column and/or + // unknown line/column information. + if (fileLoc.getLine() == 0 || fileLoc.getColumn() == 0) + return clangLoc; + if (auto FE = fileMgr.getFile(fileLoc.getFilename())) { + return clangSrcMgr.translateFileLineCol(*FE, fileLoc.getLine(), + fileLoc.getColumn()); + } + return clangLoc; + } + + clang::DiagnosticIDs::Level + translateToClangDiagLevel(const mlir::DiagnosticSeverity &sev) { + switch (sev) { + case mlir::DiagnosticSeverity::Note: + return clang::DiagnosticIDs::Level::Note; + case mlir::DiagnosticSeverity::Warning: + return clang::DiagnosticIDs::Level::Warning; + case mlir::DiagnosticSeverity::Error: + return clang::DiagnosticIDs::Level::Error; + case mlir::DiagnosticSeverity::Remark: + return clang::DiagnosticIDs::Level::Remark; + } + llvm_unreachable("should not get here!"); + } + + public: + void emitClangTidyDiagnostic(mlir::Diagnostic &diag) { + tidyCtx.diag(cir::checks::LifetimeCheckName, + getClangSrcLoc(diag.getLocation()), diag.str(), + translateToClangDiagLevel(diag.getSeverity())); + for (const auto ¬e : diag.getNotes()) { + tidyCtx.diag(cir::checks::LifetimeCheckName, + getClangSrcLoc(note.getLocation()), note.str(), + translateToClangDiagLevel(note.getSeverity())); + } + } + + CIRTidyDiagnosticHandler(llvm::SourceMgr &mgr, mlir::MLIRContext *ctx, + clang::tidy::ClangTidyContext &tidyContext, + clang::SourceManager &clangMgr, + ShouldShowLocFn &&shouldShowLocFn = {}) + : SourceMgrDiagnosticHandler(mgr, ctx, llvm::errs(), + std::move(shouldShowLocFn)), + tidyCtx(tidyContext), clangSrcMgr(clangMgr) { + setHandler([this](mlir::Diagnostic &diag) { + // Emit diagnostic to llvm::errs() but also populate Clang + emitClangTidyDiagnostic(diag); + emitDiagnostic(diag); + }); + } + ~CIRTidyDiagnosticHandler() = default; + }; + + // Use a custom diagnostic handler that can allow both regular printing to + // stderr but also populates clang-tidy context with diagnostics (and allow + // for instance, diagnostics to be later converted to YAML). + CIRTidyDiagnosticHandler sourceMgrHandler(llvmSrcMgr, mlirCtx.get(), Context, + clangSrcMgr); - mlir::SourceMgrDiagnosticHandler sourceMgrHandler(sourceMgr, mlirCtx.get()); mlir::PassManager pm(mlirCtx.get()); pm.addPass(mlir::createMergeCleanupsPass()); clang::tidy::ClangTidyOptions Opts = Context.getOptions(); - static constexpr const char *remarkOptName = "cir-lifetime-check.RemarksList"; - static constexpr const char *histOptName = "cir-lifetime-check.HistoryList"; + ClangTidyCheck::OptionsView OptsView(cir::checks::LifetimeCheckName, + Opts.CheckOptions, &Context); + auto remarks = - utils::options::parseStringList(Opts.CheckOptions[remarkOptName].Value); + utils::options::parseStringList(OptsView.get("RemarksList", "")); auto hist = - utils::options::parseStringList(Opts.CheckOptions[histOptName].Value); + utils::options::parseStringList(OptsView.get("HistoryList", "all")); + auto hLimit = OptsView.get("HistLimit", 1U); - if (Context.isCheckEnabled(lifeTimeCheck)) - pm.addPass(mlir::createLifetimeCheckPass(remarks, hist, 1, &C)); + if (Context.isCheckEnabled(cir::checks::LifetimeCheckName)) + pm.addPass(mlir::createLifetimeCheckPass(remarks, hist, hLimit, &C)); bool Result = !mlir::failed(pm.run(mlirMod)); if (!Result) diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRChecks.h b/clang-tools-extra/clang-tidy/cir-tidy/CIRChecks.h new file mode 100644 index 000000000000..7dccbf879b4b --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRChecks.h @@ -0,0 +1,21 @@ +//===--- CIRChecks.h - cir-tidy -----------------------------*- C++ -*-----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_TOOLS_EXTRA_CIRTIDY_CHECKS_H +#define LLVM_CLANG_TOOLS_EXTRA_CIRTIDY_CHECKS_H + +// FIXME: split LifetimeCheck.cpp into headers and expose the class in a way +// we can directly query the pass name and unique the source of truth. + +namespace cir { +namespace checks { +constexpr const char *LifetimeCheckName = "cir-lifetime-check"; +} +} // namespace cir + +#endif \ No newline at end of file diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp index eaa5a1f463e8..0468f9198ce8 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp @@ -40,6 +40,19 @@ CIRTidyASTConsumerFactory::CIRTidyASTConsumerFactory( std::unique_ptr CIRTidyASTConsumerFactory::createASTConsumer(clang::CompilerInstance &Compiler, StringRef File) { + // FIXME(clang-tidy): Move this to a separate method, so that + // CreateASTConsumer doesn't modify Compiler. + SourceManager *SM = &Compiler.getSourceManager(); + Context.setSourceManager(SM); + Context.setCurrentFile(File); + Context.setASTContext(&Compiler.getASTContext()); + + auto WorkingDir = Compiler.getSourceManager() + .getFileManager() + .getVirtualFileSystem() + .getCurrentWorkingDirectory(); + if (WorkingDir) + Context.setCurrentBuildDirectory(WorkingDir.get()); return std::make_unique(Compiler, File, Context); } diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h index 03fefb4ed774..91073d106328 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h @@ -9,9 +9,11 @@ #ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIRTIDY_H #define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIRTIDY_H +#include "CIRChecks.h" #include "ClangTidyDiagnosticConsumer.h" #include "ClangTidyModule.h" #include "clang/AST/ASTConsumer.h" +#include "clang/CIR/Dialect/Passes.h" #include namespace clang { @@ -42,7 +44,8 @@ class CIRTidyASTConsumerFactory { private: ClangTidyContext &Context; IntrusiveRefCntPtr OverlayFS; - const std::vector CIRChecks = {"cir-lifetime-check"}; + const std::vector CIRChecks = { + cir::checks::LifetimeCheckName}; }; std::vector diff --git a/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp b/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp index 9e50cf2a06d3..f2088c59a27c 100644 --- a/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp +++ b/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp @@ -1,16 +1,38 @@ // RUN: %check_cir_tidy %s cir-lifetime-check %t \ +// RUN: --export-fixes=%t.yaml \ // RUN: -config='{CheckOptions: \ -// RUN: [{key: cir-lifetime-check.RemarksList, value: "None"}, \ +// RUN: [{key: cir-lifetime-check.RemarksList, value: "all"}, \ +// RUN: {key: cir-lifetime-check.HistLimit, value: "1"}, \ // RUN: {key: cir-lifetime-check.HistoryList, value: "invalid;null"}]}' \ // RUN: -- +// RUN: FileCheck -input-file=%t.yaml -check-prefix=CHECK-YAML %s int *p0() { int *p = nullptr; { int x = 0; p = &x; - *p = 42; + *p = 42; // CHECK-MESSAGES: remark: pset => { x } } // CHECK-NOTES: note: pointee 'x' invalidated at end of scope - *p = 42; // CHECK-MESSAGES: warning: use of invalid pointer 'p' + *p = 42; // CHECK-MESSAGES: remark: pset => { invalid } + // CHECK-MESSAGES: :[[@LINE-1]]:4: warning: use of invalid pointer 'p' return p; } + +// CHECK-YAML: DiagnosticMessage: +// CHECK-YAML: Message: 'pset => { x }' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Level: Remark + +// CHECK-YAML: DiagnosticMessage: +// CHECK-YAML: Message: 'pset => { invalid }' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Level: Remark + +// CHECK-YAML: DiagnosticMessage: +// CHECK-YAML: Message: 'use of invalid pointer ''p''' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Notes: +// CHECK-YAML: - Message: 'pointee ''x'' invalidated at end of scope' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Level: Warning \ No newline at end of file From 842bbd8c0ddbfd4fee3b9174929d7ad82e69e178 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 1 Feb 2023 16:01:46 -0800 Subject: [PATCH 0812/2301] [CIR][CIRTidy] Fix cmake and test config to check for CLANG_ENABLE_CIR --- clang-tools-extra/clang-tidy/CMakeLists.txt | 5 ++++- clang-tools-extra/test/CMakeLists.txt | 7 +++++++ clang-tools-extra/test/cir-tidy/lit.local.cfg | 2 ++ clang-tools-extra/test/lit.site.cfg.py.in | 1 + 4 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 clang-tools-extra/test/cir-tidy/lit.local.cfg diff --git a/clang-tools-extra/clang-tidy/CMakeLists.txt b/clang-tools-extra/clang-tidy/CMakeLists.txt index c401dd414e70..cab701f0f08d 100644 --- a/clang-tools-extra/clang-tidy/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/CMakeLists.txt @@ -111,7 +111,10 @@ set(ALL_CLANG_TIDY_CHECKS ${ALL_CLANG_TIDY_CHECKS} PARENT_SCOPE) add_subdirectory(plugin) add_subdirectory(tool) add_subdirectory(utils) -add_subdirectory(cir-tidy) + +if(CLANG_ENABLE_CIR) + add_subdirectory(cir-tidy) +endif() if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY) install(DIRECTORY . diff --git a/clang-tools-extra/test/CMakeLists.txt b/clang-tools-extra/test/CMakeLists.txt index 7e4d99d8cfc1..2b51500fa2cf 100644 --- a/clang-tools-extra/test/CMakeLists.txt +++ b/clang-tools-extra/test/CMakeLists.txt @@ -10,6 +10,7 @@ set(CLANG_TOOLS_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/..") llvm_canonicalize_cmake_booleans( CLANG_TIDY_ENABLE_STATIC_ANALYZER CLANG_PLUGIN_SUPPORT + CLANG_ENABLE_CIR LLVM_INSTALL_TOOLCHAIN_ONLY ) @@ -52,6 +53,12 @@ set(CLANG_TOOLS_TEST_DEPS clang-tidy ) +if(CLANG_ENABLE_CIR) + list(APPEND CLANG_TOOLS_TEST_DEPS + cir-tidy + ) +endif() + # Add lit test dependencies. set(LLVM_UTILS_DEPS FileCheck count not diff --git a/clang-tools-extra/test/cir-tidy/lit.local.cfg b/clang-tools-extra/test/cir-tidy/lit.local.cfg new file mode 100644 index 000000000000..e479c3e74cb6 --- /dev/null +++ b/clang-tools-extra/test/cir-tidy/lit.local.cfg @@ -0,0 +1,2 @@ +if not config.clang_enable_cir: + config.unsupported = True \ No newline at end of file diff --git a/clang-tools-extra/test/lit.site.cfg.py.in b/clang-tools-extra/test/lit.site.cfg.py.in index e6503a4c097c..fb3b1f675a20 100644 --- a/clang-tools-extra/test/lit.site.cfg.py.in +++ b/clang-tools-extra/test/lit.site.cfg.py.in @@ -11,6 +11,7 @@ config.target_triple = "@LLVM_TARGET_TRIPLE@" config.host_triple = "@LLVM_HOST_TRIPLE@" config.clang_tidy_staticanalyzer = @CLANG_TIDY_ENABLE_STATIC_ANALYZER@ config.has_plugins = @CLANG_PLUGIN_SUPPORT@ +config.clang_enable_cir = @CLANG_ENABLE_CIR@ # Support substitution of the tools and libs dirs with user parameters. This is # used when we can't determine the tool dir at configuration time. config.llvm_tools_dir = lit_config.substitute("@LLVM_TOOLS_DIR@") From 16418a74fba57582eff6c41918cfc01cd08e05da Mon Sep 17 00:00:00 2001 From: Ivan Murashko Date: Tue, 28 Feb 2023 03:00:20 -0800 Subject: [PATCH 0813/2301] [CIR][CIRGen][NFC] Fix build on gcc host compiler + stdlibc++ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary: The following compilation command is broken: ``` ninja clang ``` It will produce the some compilation and link errors if you try to build it on Linux Compilation error: ``` <...>/clangir/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp:362: undefined reference to `cir::ConstantEmitter::tryEmitPrivateForMemory(clang::Expr const*, clang::QualType)' lib/libclangCIR.a(CIRGenExprCst.cpp.o): In function `(anonymous namespace)::ConstStructBuilder::Build(clang::InitListExpr*, bool)': <...>/clangir/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp:433: undefined reference to `cir::ConstantEmitter::tryEmitPrivateForMemory(clang::Expr const*, clang::QualType)' ``` Link error: ``` .../clangir/clang/lib/CIR/CodeGen/CIRGenTypes.cpp: In member function ‘std::__cxx11::string cir::CIRGenTypes::getRecordTypeName(const clang::RecordDecl*, llvm::StringRef)’: .../clangir/clang/lib/CIR/CodeGen/CIRGenTypes.cpp:56:35: error: ‘clang::DeclaratorDecl’ is not a base of ‘const clang::RecordDecl’ recordDecl->DeclaratorDecl::printName(outStream); ``` Test Plan: ``` ninja clang ``` --- clang/lib/CIR/CodeGen/CIRGenExprCst.cpp | 13 ++++++++++++- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 4 ++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp index 8e71e4eef10f..48d04fbc6234 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp @@ -1156,6 +1156,12 @@ mlir::Attribute ConstantEmitter::tryEmitAbstractForMemory(const APValue &value, return (C ? emitForMemory(C, destType) : nullptr); } +mlir::TypedAttr ConstantEmitter::tryEmitPrivateForMemory(const Expr *E, + QualType destType) { + assert(0 && "not implemented"); + return nullptr; +} + mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, QualType destType) { auto nonMemoryDestType = getNonMemoryType(CGM, destType); @@ -1249,6 +1255,11 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, return {}; } +mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *E, QualType T) { + assert(0 && "not implemented"); + return nullptr; +} + mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, QualType DestType) { switch (Value.getKind()) { @@ -1366,4 +1377,4 @@ mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { llvm_unreachable("NYI"); return {}; -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 9b8cf1474d73..4b430071be34 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -54,7 +54,7 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, if (recordDecl->getDeclContext()) recordDecl->printQualifiedName(outStream, policy); else - recordDecl->DeclaratorDecl::printName(outStream); + recordDecl->printName(outStream, policy); } else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl()) { if (typedefNameDecl->getDeclContext()) typedefNameDecl->printQualifiedName(outStream, policy); @@ -786,4 +786,4 @@ bool CIRGenTypes::isZeroInitializable(QualType T) { bool CIRGenTypes::isZeroInitializable(const RecordDecl *RD) { return getCIRGenRecordLayout(RD).isZeroInitializable(); -} \ No newline at end of file +} From 37462bd0780d2bf9db9a9f17b42d819ddbcae87d Mon Sep 17 00:00:00 2001 From: redbopo Date: Wed, 8 Feb 2023 00:45:30 +0800 Subject: [PATCH 0814/2301] [CIR][CodeGen][Lower] Support CIR Unary Not Operation. Add Unary Not Operation on CIRGen and Lowerings. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 7 ++++-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 7 +++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 3 +-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 9 ++++++++ .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 7 ++++++ clang/test/CIR/CodeGen/unary.cpp | 12 ++++++++++ clang/test/CIR/Lowering/unary-not.cir | 22 +++++++++++++++++++ 7 files changed, 62 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/Lowering/unary-not.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 03253cce7999..00511cf7b1a2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -588,6 +588,7 @@ def UnaryOpKind_Inc : I32EnumAttrCase<"Inc", 1, "inc">; def UnaryOpKind_Dec : I32EnumAttrCase<"Dec", 2, "dec">; def UnaryOpKind_Plus : I32EnumAttrCase<"Plus", 3, "plus">; def UnaryOpKind_Minus : I32EnumAttrCase<"Minus", 4, "minus">; +def UnaryOpKind_Not : I32EnumAttrCase<"Not", 5, "not">; def UnaryOpKind : I32EnumAttr< "UnaryOpKind", @@ -595,7 +596,9 @@ def UnaryOpKind : I32EnumAttr< [UnaryOpKind_Inc, UnaryOpKind_Dec, UnaryOpKind_Plus, - UnaryOpKind_Minus]> { + UnaryOpKind_Minus, + UnaryOpKind_Not, + ]> { let cppNamespace = "::mlir::cir"; } @@ -604,7 +607,7 @@ def UnaryOp : CIR_Op<"unary", [Pure, SameOperandsAndResultType]> { let summary = "Unary operations"; let description = [{ `cir.unary` performs the unary operation according to - the specified opcode kind: [inc, dec, plus, minus]. + the specified opcode kind: [inc, dec, plus, minus, not]. Note for inc and dec: the operation corresponds only to the addition/subtraction, its input is expect to come from a load diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index c918858eb519..c76c16d52ee6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -410,7 +410,12 @@ class ScalarExprEmitter : public StmtVisitor { return Visit(E->getSubExpr()); } - mlir::Value VisitUnaryNot(const UnaryOperator *E) { llvm_unreachable("NYI"); } + mlir::Value VisitUnaryNot(const UnaryOperator *E) { + TestAndClearIgnoreResultAssign(); + mlir::Value op = Visit(E->getSubExpr()); + return buildUnaryOp(E, mlir::cir::UnaryOpKind::Not, op); + } + mlir::Value VisitUnaryLNot(const UnaryOperator *E) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 74f3da9d6d82..45ee0984b381 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1421,9 +1421,8 @@ LogicalResult UnaryOp::verify() { "to the same address as the input memory load"; } case cir::UnaryOpKind::Plus: - // Nothing to verify. - return success(); case cir::UnaryOpKind::Minus: + case cir::UnaryOpKind::Not: // Nothing to verify. return success(); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 50a9cbbdec81..60d754da283c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -25,6 +25,7 @@ #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" #include "mlir/IR/BuiltinDialect.h" +#include "mlir/IR/IRMapping.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" @@ -32,6 +33,7 @@ #include "mlir/Transforms/DialectConversion.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/Sequence.h" @@ -497,6 +499,13 @@ class CIRUnaryOpLowering op.getInput()); break; } + case mlir::cir::UnaryOpKind::Not: { + auto MinusOne = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, -1)); + rewriter.replaceOpWithNewOp(op, op.getType(), MinusOne, + op.getInput()); + break; + } } return mlir::LogicalResult::success(); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 11213a6f3e7e..08072a2f2a65 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -248,6 +248,13 @@ class CIRUnaryOpLowering : public mlir::OpRewritePattern { op.getInput()); break; } + case mlir::cir::UnaryOpKind::Not: { + auto MinusOne = rewriter.create( + op.getLoc(), type, mlir::IntegerAttr::get(type, -1)); + rewriter.replaceOpWithNewOp(op, op.getType(), + MinusOne, op.getInput()); + break; + } } return mlir::LogicalResult::success(); diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index fc4d888d74ad..2cbb06b408eb 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -24,3 +24,15 @@ unsigned um0() { // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#OUTPUT:]] = cir.unary(minus, %[[#INPUT]]) // CHECK: cir.store %[[#OUTPUT]], %[[#RET]] + +unsigned un0() { + unsigned a = 1; + return ~a; // a ^ -1 , not +} + +// CHECK: cir.func @_Z3un0v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#OUTPUT:]] = cir.unary(not, %[[#INPUT]]) +// CHECK: cir.store %[[#OUTPUT]], %[[#RET]] diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir new file mode 100644 index 000000000000..6ca263907ec2 --- /dev/null +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -0,0 +1,22 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() -> i32 { + %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} + %2 = cir.cst(1 : i32) : i32 + cir.store %2, %1 : i32, cir.ptr + %3 = cir.load %1 : cir.ptr , i32 + %4 = cir.unary(not, %3) : i32, i32 + cir.store %4, %0 : i32, cir.ptr + %5 = cir.load %0 : cir.ptr , i32 + cir.return %5 : i32 + } +} + +// MLIR: = llvm.load +// MLIR: = llvm.mlir.constant(-1 : i32) +// MLIR: = llvm.xor + +// LLVM: = xor i32 -1, %[[#]] From 470abfe260b1efb170bdb20bc88fef0ff9e87994 Mon Sep 17 00:00:00 2001 From: redbopo Date: Mon, 27 Feb 2023 00:56:00 +0800 Subject: [PATCH 0815/2301] [CIR][LowerToMLIR] Fix lowering signless integer type on DIV, REM, SHR ops - Fix lowering Operations DIV, REM, SHR on signless integer type. - Rename binop-int.cir to binop-unsigned-int.cir. Since currently the integer type not supported in CIR. --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 +++--- .../lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 6 +++--- .../{binop-int.cir => binop-unsigned-int.cir} | 12 ++++++------ .../{binop-int.cir => binop-unsigned-int.cir} | 12 ++++++------ 4 files changed, 18 insertions(+), 18 deletions(-) rename clang/test/CIR/Lowering/ThroughMLIR/{binop-int.cir => binop-unsigned-int.cir} (95%) rename clang/test/CIR/Lowering/{binop-int.cir => binop-unsigned-int.cir} (95%) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 60d754da283c..b716c0740a8f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -553,7 +553,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { case mlir::cir::BinOpKind::Div: if (type.isa()) { if (type.isSignlessInteger()) - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); else llvm_unreachable("integer type not supported in CIR yet"); @@ -564,7 +564,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { case mlir::cir::BinOpKind::Rem: if (type.isa()) { if (type.isSignlessInteger()) - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); else llvm_unreachable("integer type not supported in CIR yet"); @@ -590,7 +590,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { break; case mlir::cir::BinOpKind::Shr: if (type.isSignlessInteger()) - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); else llvm_unreachable("integer type not supported in CIR yet"); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 08072a2f2a65..dae3c6735683 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -302,7 +302,7 @@ class CIRBinOpLowering : public mlir::OpRewritePattern { case mlir::cir::BinOpKind::Div: if (type.isa()) { if (type.isSignlessInteger()) - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); else llvm_unreachable("integer type not supported in CIR yet"); @@ -313,7 +313,7 @@ class CIRBinOpLowering : public mlir::OpRewritePattern { case mlir::cir::BinOpKind::Rem: if (type.isa()) { if (type.isSignlessInteger()) - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); else llvm_unreachable("integer type not supported in CIR yet"); @@ -339,7 +339,7 @@ class CIRBinOpLowering : public mlir::OpRewritePattern { break; case mlir::cir::BinOpKind::Shr: if (type.isSignlessInteger()) - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); else llvm_unreachable("integer type not supported in CIR yet"); diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-int.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir similarity index 95% rename from clang/test/CIR/Lowering/ThroughMLIR/binop-int.cir rename to clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir index 58ad1be56115..2534baca6fbe 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-int.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir @@ -53,22 +53,22 @@ module { } // MLIR: = arith.muli -// MLIR: = arith.divsi -// MLIR: = arith.remsi +// MLIR: = arith.divui +// MLIR: = arith.remui // MLIR: = arith.addi // MLIR: = arith.subi -// MLIR: = arith.shrsi +// MLIR: = arith.shrui // MLIR: = arith.shli // MLIR: = arith.andi // MLIR: = arith.xori // MLIR: = arith.ori // LLVM: = mul i32 -// LLVM: = sdiv i32 -// LLVM: = srem i32 +// LLVM: = udiv i32 +// LLVM: = urem i32 // LLVM: = add i32 // LLVM: = sub i32 -// LLVM: = ashr i32 +// LLVM: = lshr i32 // LLVM: = shl i32 // LLVM: = and i32 // LLVM: = xor i32 diff --git a/clang/test/CIR/Lowering/binop-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir similarity index 95% rename from clang/test/CIR/Lowering/binop-int.cir rename to clang/test/CIR/Lowering/binop-unsigned-int.cir index 9493228ec770..85163e281c76 100644 --- a/clang/test/CIR/Lowering/binop-int.cir +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -53,22 +53,22 @@ module { } // MLIR: = llvm.mul -// MLIR: = llvm.sdiv -// MLIR: = llvm.srem +// MLIR: = llvm.udiv +// MLIR: = llvm.urem // MLIR: = llvm.add // MLIR: = llvm.sub -// MLIR: = llvm.ashr +// MLIR: = llvm.lshr // MLIR: = llvm.shl // MLIR: = llvm.and // MLIR: = llvm.xor // MLIR: = llvm.or // LLVM: = mul i32 -// LLVM: = sdiv i32 -// LLVM: = srem i32 +// LLVM: = udiv i32 +// LLVM: = urem i32 // LLVM: = add i32 // LLVM: = sub i32 -// LLVM: = ashr i32 +// LLVM: = lshr i32 // LLVM: = shl i32 // LLVM: = and i32 // LLVM: = xor i32 From ce93d81dcf50ec8a73795da58c8769bd5178b0fc Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 28 Feb 2023 11:54:26 -0800 Subject: [PATCH 0816/2301] [CIR][CIRGen] Unlock basic coroutine lambdas --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 18 ++++++++++++++---- clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 ++++ clang/test/CIR/CodeGen/coro-task.cpp | 12 +++++++++++- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 76e4e4b2b366..a7ca13a66d12 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -943,7 +943,18 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, const auto *MD = cast(D); if (MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call) { - llvm_unreachable("NYI"); + // We're in a lambda; figure out the captures. + MD->getParent()->getCaptureFields(LambdaCaptureFields, + LambdaThisCaptureField); + if (LambdaThisCaptureField) { + llvm_unreachable("NYI"); + } + for (auto *FD : MD->getParent()->fields()) { + if (FD->hasCapturedVLAType()) { + llvm_unreachable("NYI"); + } + } + } else { // Not in a lambda; just use 'this' from the method. // FIXME: Should we generate a new load for each use of 'this'? The fast @@ -963,10 +974,9 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // a null 'this' pointer. if (isLambdaCallOperator(MD) && MD->getParent()->getLambdaCaptureDefault() == LCD_None) - llvm_unreachable("NYI"); - ; + SkippedChecks.set(SanitizerKind::Null, true); - // TODO(CIR): buildTypeCheck + assert(!UnimplementedFeature::buildTypeCheck() && "NYI"); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index a56f34584cad..a3853d9a468f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -503,6 +503,10 @@ class CIRGenFunction { std::optional FnRetCIRTy; std::optional FnRetAlloca; + llvm::DenseMap + LambdaCaptureFields; + clang::FieldDecl *LambdaThisCaptureField = nullptr; + /// When generating code for a C++ member function, this will /// hold the implicit 'this' declaration. clang::ImplicitParamDecl *CXXABIThisDecl = nullptr; diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 0d7ce3eac0d6..7390ec87d2f4 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -332,4 +332,14 @@ folly::coro::Task go1() { // CHECK: cir.store %[[#ResumeVal]], %[[#CoReturnValAddr]] : i32, cir.ptr // CHECK: },) // CHECK: %[[#V:]] = cir.load %[[#CoReturnValAddr]] : cir.ptr , i32 -// CHECK: cir.call @_ZN5folly4coro4TaskIiE12promise_type12return_valueEi({{.*}}, %[[#V]]) \ No newline at end of file +// CHECK: cir.call @_ZN5folly4coro4TaskIiE12promise_type12return_valueEi({{.*}}, %[[#V]]) + +folly::coro::Task go1_lambda() { + auto task = []() -> folly::coro::Task { + co_return 1; + }(); + co_return co_await task; +} + +// CHECK: cir.func coroutine internal @_ZZ10go1_lambdavENK3$_0clEv +// CHECK: cir.func coroutine @_Z10go1_lambdav() \ No newline at end of file From d497d5968850b34e781659cfb5a11e26a2880298 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 1 Mar 2023 15:38:44 -0800 Subject: [PATCH 0817/2301] [CIR] Add lambda marker to function --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 +++++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 5 ++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 ++++++ clang/test/CIR/CodeGen/coro-task.cpp | 2 +- clang/test/CIR/CodeGen/lambda.cpp | 7 ++++++- 5 files changed, 22 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 00511cf7b1a2..4aa239f6a65a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1234,6 +1234,10 @@ def FuncOp : CIR_Op<"func", [ The `coroutine` keyword is used to mark coroutine function, which requires at least one `cir.await` instruction to be used in its body. + The `lambda` translates to a C++ `operator()` that implements a lambda, this + allow callsites to make certain assumptions about the real function nature + when writing analysis. The verifier should, but do act on this keyword yet. + Example: ```mlir @@ -1264,6 +1268,7 @@ def FuncOp : CIR_Op<"func", [ TypeAttrOf:$function_type, UnitAttr:$builtin, UnitAttr:$coroutine, + UnitAttr:$lambda, DefaultValuedAttr:$linkage, OptionalAttr:$sym_visibility, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index a7ca13a66d12..9790979dc378 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -943,7 +943,10 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, const auto *MD = cast(D); if (MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call) { - // We're in a lambda; figure out the captures. + // We're in a lambda. + CurFn.setLambdaAttr(mlir::UnitAttr::get(builder.getContext())); + + // Figure out the captures. MD->getParent()->getCaptureFields(LambdaCaptureFields, LambdaThisCaptureField); if (LambdaThisCaptureField) { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 45ee0984b381..94f378d45088 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1185,11 +1185,14 @@ void cir::FuncOp::build(OpBuilder &builder, OperationState &result, ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { auto builtinNameAttr = getBuiltinAttrName(state.name); auto coroutineNameAttr = getCoroutineAttrName(state.name); + auto lambdaNameAttr = getLambdaAttrName(state.name); if (::mlir::succeeded(parser.parseOptionalKeyword(builtinNameAttr.strref()))) state.addAttribute(builtinNameAttr, parser.getBuilder().getUnitAttr()); if (::mlir::succeeded( parser.parseOptionalKeyword(coroutineNameAttr.strref()))) state.addAttribute(coroutineNameAttr, parser.getBuilder().getUnitAttr()); + if (::mlir::succeeded(parser.parseOptionalKeyword(lambdaNameAttr.strref()))) + state.addAttribute(lambdaNameAttr, parser.getBuilder().getUnitAttr()); // Default to external linkage if no keyword is provided. state.addAttribute(getLinkageAttrNameString(), @@ -1258,6 +1261,9 @@ void cir::FuncOp::print(OpAsmPrinter &p) { if (getCoroutine()) p << "coroutine "; + if (getLambda()) + p << "lambda "; + if (getLinkage() != GlobalLinkageKind::ExternalLinkage) p << stringifyGlobalLinkageKind(getLinkage()) << ' '; diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 7390ec87d2f4..87914b0805e4 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -341,5 +341,5 @@ folly::coro::Task go1_lambda() { co_return co_await task; } -// CHECK: cir.func coroutine internal @_ZZ10go1_lambdavENK3$_0clEv +// CHECK: cir.func coroutine lambda internal @_ZZ10go1_lambdavENK3$_0clEv // CHECK: cir.func coroutine @_Z10go1_lambdav() \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 1b4ad460571e..397ff6a58409 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -3,9 +3,14 @@ void fn() { auto a = [](){}; + a(); } // CHECK: !ty_22class2Eanon22 = !cir.struct<"class.anon", i8> // CHECK-DAG: module -// CHECK-NEXT: cir.func @_Z2fnv() + +// CHECK: cir.func lambda internal @_ZZ2fnvENK3$_0clEv + +// CHECK: cir.func @_Z2fnv() // CHECK-NEXT: %0 = cir.alloca !ty_22class2Eanon22, cir.ptr , ["a"] +// CHECK: cir.call @_ZZ2fnvENK3$_0clEv From 78b5957bf9bae9d0fb2b13c177523da41d3a6c25 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 2 Mar 2023 12:50:52 -0800 Subject: [PATCH 0818/2301] [CIR][CIRGen] Add building blocks for lambda captures --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 16 +++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 15 ++- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 102 +++++++++++++++++- .../CodeGen/UnimplementedFeatureGuarding.h | 5 +- 4 files changed, 131 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 8e439db0c426..3ee77bd150b2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -10,6 +10,7 @@ #define LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H #include "Address.h" +#include "UnimplementedFeatureGuarding.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/FPEnv.h" @@ -129,6 +130,21 @@ class CIRGenBuilderTy : public mlir::OpBuilder { src); } + mlir::cir::PointerType getPointerTo(mlir::Type ty, + unsigned addressSpace = 0) { + assert(!UnimplementedFeature::addressSpace() && "NYI"); + return mlir::cir::PointerType::get(getContext(), ty); + } + + /// Cast the element type of the given address to a different type, + /// preserving information like the alignment. + Address getElementBitCast(mlir::Location loc, Address Addr, mlir::Type Ty) { + assert(!UnimplementedFeature::addressSpace() && "NYI"); + auto ptrTy = getPointerTo(Ty); + return Address(getBitcast(loc, Addr.getPointer(), ptrTy), Ty, + Addr.getAlignment()); + } + OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block) { auto lastAlloca = std::find_if(block->rbegin(), block->rend(), [](mlir::Operation &op) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index b63a0022c9a2..01596fb69eae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -147,7 +147,20 @@ LValue CIRGenFunction::buildLValueForFieldInitialization( if (!FieldType->isReferenceType()) return buildLValueForField(Base, Field); - llvm_unreachable("NYI"); + Address V = buildAddrOfFieldStorage(*this, Base.getAddress(), Field); + + // Make sure that the address is pointing to the right type. + auto memTy = getTypes().convertTypeForMem(FieldType); + V = builder.getElementBitCast(getLoc(Field->getSourceRange()), V, memTy); + + // TODO: Generate TBAA information that describes this access as a structure + // member access and not just an access to an object of the field's type. This + // should be similar to what we do in EmitLValueForField(). + LValueBaseInfo BaseInfo = Base.getBaseInfo(); + AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); + LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource)); + assert(!UnimplementedFeature::tbaa() && "NYI"); + return makeAddrLValue(V, FieldType, FieldBaseInfo); } // Detect the unusual situation where an inline version is shadowed by a diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 095739af3181..c69b7e676135 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -147,9 +147,8 @@ class AggExprEmitter : public StmtVisitor { void VisitVAArgExpr(VAArgExpr *E) { llvm_unreachable("NYI"); } - void EmitInitializationToLValue(Expr *E, LValue Address) { - llvm_unreachable("NYI"); - } + void EmitInitializationToLValue(Expr *E, LValue LV); + void EmitNullInitializationToLValue(LValue Address) { llvm_unreachable("NYI"); } @@ -163,6 +162,89 @@ class AggExprEmitter : public StmtVisitor { // Visitor Methods //===----------------------------------------------------------------------===// +/// If emitting this value will obviously just cause a store of +/// zero to memory, return true. This can return false if uncertain, so it just +/// handles simple cases. +static bool isSimpleZero(const Expr *E, CIRGenFunction &CGF) { + E = E->IgnoreParens(); + while (auto *CE = dyn_cast(E)) { + llvm_unreachable("NYI"); + // if (!castPreservesZero(CE)) + // break; + // E = CE->getSubExpr()->IgnoreParens(); + } + + // 0 + if (const IntegerLiteral *IL = dyn_cast(E)) + return IL->getValue() == 0; + // +0.0 + if (const FloatingLiteral *FL = dyn_cast(E)) + return FL->getValue().isPosZero(); + // int() + if ((isa(E) || isa(E)) && + CGF.getTypes().isZeroInitializable(E->getType())) + return true; + // (int*)0 - Null pointer expressions. + if (const CastExpr *ICE = dyn_cast(E)) { + llvm_unreachable("NYI"); + // return ICE->getCastKind() == CK_NullToPointer && + // CGF.getTypes().isPointerZeroInitializable(E->getType()) && + // !E->HasSideEffects(CGF.getContext()); + } + // '\0' + if (const CharacterLiteral *CL = dyn_cast(E)) + return CL->getValue() == 0; + + // Otherwise, hard case: conservatively return false. + return false; +} + +void AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { + QualType type = LV.getType(); + // FIXME: Ignore result? + // FIXME: Are initializers affected by volatile? + if (Dest.isZeroed() && isSimpleZero(E, CGF)) { + // TODO(cir): LLVM codegen just returns here, do we want to + // do anything different when we hit this code path? + llvm_unreachable("NYI"); + // Storing "i32 0" to a zero'd memory location is a noop. + return; + } else if (isa(E) || isa(E)) { + return EmitNullInitializationToLValue(LV); + } else if (isa(E)) { + // Do nothing. + return; + } else if (type->isReferenceType()) { + llvm_unreachable("NYI"); + // RValue RV = CGF.EmitReferenceBindingToExpr(E); + // return CGF.EmitStoreThroughLValue(RV, LV); + } + + switch (CGF.getEvaluationKind(type)) { + case TEK_Complex: + llvm_unreachable("NYI"); + return; + case TEK_Aggregate: + llvm_unreachable("NYI"); + // CGF.EmitAggExpr(E, AggValueSlot::forLValue( + // LV, CGF, AggValueSlot::IsDestructed, + // AggValueSlot::DoesNotNeedGCBarriers, + // AggValueSlot::IsNotAliased, + // AggValueSlot::MayOverlap, Dest.isZeroed())); + return; + case TEK_Scalar: + if (LV.isSimple()) { + llvm_unreachable("NYI"); + // CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false); + } else { + llvm_unreachable("NYI"); + // CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); + } + return; + } + llvm_unreachable("bad evaluation kind"); +} + void AggExprEmitter::VisitMaterializeTemporaryExpr( MaterializeTemporaryExpr *E) { Visit(E->getSubExpr()); @@ -213,7 +295,19 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(), e = E->capture_init_end(); i != e; ++i, ++CurField) { - llvm_unreachable("NYI"); + // Emit initialization + LValue LV = CGF.buildLValueForFieldInitialization(SlotLV, *CurField); + if (CurField->hasCapturedVLAType()) { + llvm_unreachable("NYI"); + } + + EmitInitializationToLValue(*i, LV); + + // Push a destructor if necessary. + if (QualType::DestructionKind DtorKind = + CurField->getType().isDestructedType()) { + llvm_unreachable("NYI"); + } } // Deactivate all the partial cleanups in reverse order, which generally means diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 640e01afa2bd..f461c93a110c 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -26,8 +26,10 @@ struct UnimplementedFeature { // corresponding to `llvm::VectorType` static bool cirVectorType() { return false; } - // CIR still unware of address space + // Address space related + static bool addressSpace() { return false; } static bool addressSpaceInGlobalVar() { return false; } + static bool getASTAllocaAddressSpace() { return false; } // Unhandled global/linkage information. static bool unnamedAddr() { return false; } @@ -50,7 +52,6 @@ struct UnimplementedFeature { static bool unhandledException() { return false; } static bool capturedByInit() { return false; } - static bool getASTAllocaAddressSpace() { return false; } static bool tryEmitAsConstant() { return false; } static bool incrementProfileCounter() { return false; } static bool requiresReturnValueCheck() { return false; } From 98a6f27d42bfeb7cc6a0afdbfd16b7bb41a624a0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 3 Mar 2023 13:09:38 -0800 Subject: [PATCH 0819/2301] [CIR][CIRGen] Fix use of CXXThis We were previously using the alloca address instead of the loaded `this` pointer, which is wrong. Directly using CXXThisValue was also misleading, since it wasn't passing the right base address to struct_element_addr --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 4 ++-- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 10 +++------- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 ++--- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 6 +----- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 13 +++++------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 16 +++++++-------- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 4 ++-- clang/test/CIR/CodeGen/String.cpp | 10 +++++----- clang/test/CIR/CodeGen/assign-operator.cpp | 20 +++++++++++++++---- .../CodeGen/ctor-member-lvalue-to-rvalue.cpp | 4 ++-- clang/test/CIR/CodeGen/unary-deref.cpp | 3 ++- 11 files changed, 47 insertions(+), 48 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 5be888078891..b4ce5c983cc4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -122,7 +122,7 @@ class CIRGenCXXABI { void buildThisParam(CIRGenFunction &CGF, FunctionArgList &Params); /// Loads the incoming C++ this pointer as it was passed by the caller. - mlir::Operation *loadIncomingCXXThis(CIRGenFunction &CGF); + mlir::Value loadIncomingCXXThis(CIRGenFunction &CGF); /// Determine whether there's something special about the rules of the ABI /// tell us that 'this' is a complete object within the given function. @@ -181,7 +181,7 @@ class CIRGenCXXABI { virtual ~CIRGenCXXABI(); - void setCXXABIThisValue(CIRGenFunction &CGF, mlir::Operation *ThisPtr); + void setCXXABIThisValue(CIRGenFunction &CGF, mlir::Value ThisPtr); // Determine if references to thread_local global variables can be made // directly or require access through a thread wrapper function. diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 2782ce631803..b13126532578 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -224,7 +224,7 @@ static void buildMemberInitializer(CIRGenFunction &CGF, FieldDecl *Field = MemberInit->getAnyMember(); QualType FieldType = Field->getType(); - mlir::Operation *ThisPtr = CGF.LoadCXXThis(); + auto ThisPtr = CGF.LoadCXXThis(); QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); LValue LHS; @@ -477,7 +477,7 @@ void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, llvm_unreachable("NYI"); } - mlir::Operation *const OldThis = CXXThisValue; + auto const OldThis = CXXThisValue; for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { if (!ConstructVBases) continue; @@ -580,11 +580,7 @@ Address CIRGenFunction::LoadCXXThisAddress() { CXXThisAlignment = CGM.getClassPointerAlignment(RD); } - // TODO(cir): consider how to do this if we ever have multiple returns - auto *t = LoadCXXThis(); - assert(t->getNumResults() == 1); - auto Result = t->getOpResult(0); - return Address(Result, CXXThisAlignment); + return Address(LoadCXXThis(), CXXThisAlignment); } void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 01596fb69eae..519291076b84 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -57,7 +57,7 @@ static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, auto fieldPtr = mlir::cir::PointerType::get(CGF.getBuilder().getContext(), fieldType); auto sea = CGF.getBuilder().create( - loc, fieldPtr, CGF.CXXThisValue->getOperand(0), field->getName()); + loc, fieldPtr, Base.getPointer(), field->getName()); // TODO: We could get the alignment from the CIRGenRecordLayout, but given the // member name based lookup of the member here we probably shouldn't be. We'll @@ -1287,8 +1287,7 @@ LValue CIRGenFunction::buildCallExprLValue(const CallExpr *E) { "Can't have a scalar return unless the return type is a " "reference type!"); - return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal().getDefiningOp(), - E->getType()); + return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); } /// Evaluate an expression into a given memory location. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index c76c16d52ee6..608fb2cb2fbf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -450,11 +450,7 @@ class ScalarExprEmitter : public StmtVisitor { return Visit(DIE->getExpr()); } - mlir::Value VisitCXXThisExpr(CXXThisExpr *TE) { - auto *t = CGF.LoadCXXThis(); - assert(t->getNumResults() == 1); - return t->getOpResult(0); - } + mlir::Value VisitCXXThisExpr(CXXThisExpr *TE) { return CGF.LoadCXXThis(); } mlir::Value VisitExprWithCleanups(ExprWithCleanups *E); mlir::Value VisitCXXNewExpr(const CXXNewExpr *E) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 9790979dc378..daa70808c474 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -532,13 +532,10 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, return Fn; } -mlir::Operation *CIRGenFunction::createLoad(const VarDecl *VD, - const char *Name) { +mlir::Value CIRGenFunction::createLoad(const VarDecl *VD, const char *Name) { auto addr = GetAddrOfLocalVar(VD); - auto ret = builder.create(getLoc(VD->getLocation()), - addr.getElementType(), addr.getPointer()); - - return ret; + return builder.create(getLoc(VD->getLocation()), + addr.getElementType(), addr.getPointer()); } static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) { @@ -691,14 +688,14 @@ void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { /// Given a value of type T* that may not be to a complete object, construct /// an l-vlaue withi the natural pointee alignment of T. -LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Operation *Op, +LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Value V, QualType T) { // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps // assert on the result type first. LValueBaseInfo BaseInfo; CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, /* for PointeeType= */ true); - return makeAddrLValue(Address(Op->getResult(0), Align), T, BaseInfo); + return makeAddrLValue(Address(V, Align), T, BaseInfo); } // Map the LangOption for exception behavior into the corresponding enum in diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index a3853d9a468f..a674eea9e53c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -510,8 +510,8 @@ class CIRGenFunction { /// When generating code for a C++ member function, this will /// hold the implicit 'this' declaration. clang::ImplicitParamDecl *CXXABIThisDecl = nullptr; - mlir::Operation *CXXABIThisValue = nullptr; - mlir::Operation *CXXThisValue = nullptr; + mlir::Value CXXABIThisValue = nullptr; + mlir::Value CXXThisValue = nullptr; clang::CharUnits CXXABIThisAlignment; clang::CharUnits CXXThisAlignment; @@ -692,7 +692,7 @@ class CIRGenFunction { mlir::Value buildCXXNewExpr(const CXXNewExpr *E); - mlir::Operation *createLoad(const clang::VarDecl *VD, const char *Name); + mlir::Value createLoad(const clang::VarDecl *VD, const char *Name); // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. @@ -1089,8 +1089,7 @@ class CIRGenFunction { : CGF{CGF}, OldCXXThisValue(CGF.CXXThisValue), OldCXXThisAlignment(CGF.CXXThisAlignment), SourceLocScope(E, CGF.CurSourceLocExprScope) { - CGF.CXXThisValue = - CGF.CXXDefaultInitExprThis.getPointer().getDefiningOp(); + CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer(); CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment(); } ~CXXDefaultInitExprScope() { @@ -1100,7 +1099,7 @@ class CIRGenFunction { public: CIRGenFunction &CGF; - mlir::Operation *OldCXXThisValue; + mlir::Value OldCXXThisValue; clang::CharUnits OldCXXThisAlignment; SourceLocExprScopeGuard SourceLocScope; }; @@ -1110,13 +1109,12 @@ class CIRGenFunction { : SourceLocExprScopeGuard(E, CGF.CurSourceLocExprScope) {} }; - LValue MakeNaturalAlignPointeeAddrLValue(mlir::Operation *Op, - clang::QualType T); + LValue MakeNaturalAlignPointeeAddrLValue(mlir::Value V, clang::QualType T); /// Load the value for 'this'. This function is only valid while generating /// code for an C++ member function. /// FIXME(cir): this should return a mlir::Value! - mlir::Operation *LoadCXXThis() { + mlir::Value LoadCXXThis() { assert(CXXThisValue && "no 'this' value for this function"); return CXXThisValue; } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 00dd44801437..34c2d3388668 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -346,12 +346,12 @@ void CIRGenItaniumCXXABI::addImplicitStructorParams(CIRGenFunction &CGF, } } -mlir::Operation *CIRGenCXXABI::loadIncomingCXXThis(CIRGenFunction &CGF) { +mlir::Value CIRGenCXXABI::loadIncomingCXXThis(CIRGenFunction &CGF) { return CGF.createLoad(getThisDecl(CGF), "this"); } void CIRGenCXXABI::setCXXABIThisValue(CIRGenFunction &CGF, - mlir::Operation *ThisPtr) { + mlir::Value ThisPtr) { /// Initialize the 'this' slot. assert(getThisDecl(CGF) && "no 'this' variable for function"); CGF.CXXABIThisValue = ThisPtr; diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index 275e713e3806..43b994bbc4c5 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -21,10 +21,10 @@ void test() { // CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 -// CHECK-NEXT: %2 = "cir.struct_element_addr"(%0) <{member_name = "storage"}> +// CHECK-NEXT: %2 = "cir.struct_element_addr"(%1) <{member_name = "storage"}> // CHECK-NEXT: %3 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %4 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr +// CHECK-NEXT: %4 = "cir.struct_element_addr"(%1) <{member_name = "size"}> : (!cir.ptr) -> !cir.ptr // CHECK-NEXT: %5 = cir.cst(0 : i32) : i32 // CHECK-NEXT: %6 = cir.cast(integral, %5 : i32), i64 // CHECK-NEXT: cir.store %6, %4 : i64, cir.ptr @@ -36,10 +36,10 @@ void test() { // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: cir.store %arg1, %1 // CHECK-NEXT: %2 = cir.load %0 -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "storage"}> +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_name = "storage"}> // CHECK-NEXT: %4 = cir.cst(#cir.null : !cir.ptr) // CHECK-NEXT: cir.store %4, %3 -// CHECK-NEXT: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> : (!cir.ptr>) -> !cir.ptr +// CHECK-NEXT: %5 = "cir.struct_element_addr"(%2) <{member_name = "size"}> : (!cir.ptr) -> !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: %7 = cir.cast(integral, %6 : i32), i64 // CHECK-NEXT: cir.store %7, %5 : i64, cir.ptr @@ -52,7 +52,7 @@ void test() { // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "storage"}> : (!cir.ptr>) -> !cir.ptr> +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_name = "storage"}> : (!cir.ptr) -> !cir.ptr> // CHECK-NEXT: %4 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 97864241db34..542325bfbf6d 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -17,9 +17,21 @@ struct String { // CHECK: cir.store %arg0, %0 : !cir.ptr // CHECK: cir.store %arg1, %1 : !cir.ptr // CHECK: %2 = cir.load %0 : cir.ptr > - // CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "size"}> + + // Get address of `this->size` + + // CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "size"}> + + // Get address of `s` + // CHECK: %4 = cir.load %1 : cir.ptr > - // CHECK: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> + + // Get the address of s.size + + // CHECK: %5 = "cir.struct_element_addr"(%4) <{member_name = "size"}> + + // Load value from s.size and store in this->size + // CHECK: %6 = cir.load %5 : cir.ptr , i64 // CHECK: cir.store %6, %3 : i64, cir.ptr // CHECK: cir.return @@ -38,9 +50,9 @@ struct String { // CHECK: cir.store %arg1, %1 : !cir.ptr // CHECK: %3 = cir.load deref %0 : cir.ptr > // CHECK: %4 = cir.load %1 : cir.ptr > - // CHECK: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> + // CHECK: %5 = "cir.struct_element_addr"(%4) <{member_name = "size"}> // CHECK: %6 = cir.load %5 : cir.ptr , i64 - // CHECK: %7 = "cir.struct_element_addr"(%0) <{member_name = "size"}> + // CHECK: %7 = "cir.struct_element_addr"(%3) <{member_name = "size"}> // CHECK: cir.store %6, %7 : i64, cir.ptr // CHECK: cir.store %3, %2 : !cir.ptr // CHECK: %8 = cir.load %2 : cir.ptr > diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp index dd7980366ca7..f17a28f22b1f 100644 --- a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -11,9 +11,9 @@ struct String { // CHECK: cir.store %arg0, %0 // CHECK: cir.store %arg1, %1 // CHECK: %2 = cir.load %0 -// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "size"}> +// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "size"}> // CHECK: %4 = cir.load %1 -// CHECK: %5 = "cir.struct_element_addr"(%0) <{member_name = "size"}> +// CHECK: %5 = "cir.struct_element_addr"(%4) <{member_name = "size"}> // CHECK: %6 = cir.load %5 : cir.ptr , i64 // CHECK: cir.store %6, %3 : i64, cir.ptr // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/unary-deref.cpp b/clang/test/CIR/CodeGen/unary-deref.cpp index 9e41f5225fac..8de7659756a2 100644 --- a/clang/test/CIR/CodeGen/unary-deref.cpp +++ b/clang/test/CIR/CodeGen/unary-deref.cpp @@ -11,6 +11,7 @@ void foo() { } // CHECK: cir.func linkonce_odr @_ZNK12MyIntPointer4readEv -// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "ptr"}> +// CHECK: %2 = cir.load %0 +// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "ptr"}> // CHECK: %4 = cir.load deref %3 : cir.ptr > // CHECK: %5 = cir.load %4 From c03bab287589b0d6c9cd7f28d7aefaa1eec9932f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 3 Mar 2023 18:28:41 -0800 Subject: [PATCH 0820/2301] [CIR][CIRGen] Add support for by-reference lambda captures --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 + clang/lib/CIR/CodeGen/CIRGenClass.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 53 +++++++++++++++---- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 32 +++++++---- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 8 +++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 6 +++ .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/lambda.cpp | 21 ++++++++ 9 files changed, 109 insertions(+), 20 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 3ee77bd150b2..359d771a63f5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -126,6 +126,8 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::Value getBitcast(mlir::Location loc, mlir::Value src, mlir::Type newTy) { + if (newTy == src.getType()) + return src; return create(loc, newTy, mlir::cir::CastKind::bitcast, src); } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index b13126532578..1546030d75f0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -206,7 +206,7 @@ static void buildLValueForAnyFieldInitialization(CIRGenFunction &CGF, if (MemberInit->isIndirectMemberInitializer()) { llvm_unreachable("NYI"); } else { - LHS = CGF.buildLValueForFieldInitialization(LHS, Field); + LHS = CGF.buildLValueForFieldInitialization(LHS, Field, Field->getName()); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 519291076b84..b785ef9d0c9c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -47,7 +47,8 @@ static Address buildPreserveStructAccess(CIRGenFunction &CGF, LValue base, /// Get the address of a zero-sized field within a record. The resulting address /// doesn't necessarily have the right type. static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, - const FieldDecl *field) { + const FieldDecl *field, + llvm::StringRef fieldName) { if (field->isZeroSize(CGF.getContext())) llvm_unreachable("NYI"); @@ -56,8 +57,11 @@ static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, auto fieldType = CGF.convertType(field->getType()); auto fieldPtr = mlir::cir::PointerType::get(CGF.getBuilder().getContext(), fieldType); + // For most cases fieldName is the same as field->getName() but for lambdas, + // which do not currently carry the name, so it can be passed down from the + // CaptureStmt. auto sea = CGF.getBuilder().create( - loc, fieldPtr, Base.getPointer(), field->getName()); + loc, fieldPtr, Base.getPointer(), fieldName); // TODO: We could get the alignment from the CIRGenRecordLayout, but given the // member name based lookup of the member here we probably shouldn't be. We'll @@ -105,16 +109,28 @@ LValue CIRGenFunction::buildLValueForField(LValue base, llvm_unreachable("NYI"); } else { if (!IsInPreservedAIRegion && - (!getDebugInfo() || !rec->hasAttr())) - addr = buildAddrOfFieldStorage(*this, addr, field); - else + (!getDebugInfo() || !rec->hasAttr())) { + llvm::StringRef fieldName = field->getName(); + if (CGM.LambdaFieldToName.count(field)) + fieldName = CGM.LambdaFieldToName[field]; + addr = buildAddrOfFieldStorage(*this, addr, field, fieldName); + } else // Remember the original struct field index addr = buildPreserveStructAccess(*this, base, addr, field); } // If this is a reference field, load the reference right now. if (FieldType->isReferenceType()) { - llvm_unreachable("NYI"); + assert(!UnimplementedFeature::tbaa()); + LValue RefLVal = makeAddrLValue(addr, FieldType, FieldBaseInfo); + if (RecordCVR & Qualifiers::Volatile) + RefLVal.getQuals().addVolatile(); + addr = buildLoadOfReference(RefLVal, getLoc(field->getSourceRange()), + &FieldBaseInfo); + + // Qualifiers on the struct don't apply to the referencee. + RecordCVR = 0; + FieldType = FieldType->getPointeeType(); } // Make sure that the address is pointing to the right type. This is critical @@ -141,13 +157,14 @@ LValue CIRGenFunction::buildLValueForField(LValue base, } LValue CIRGenFunction::buildLValueForFieldInitialization( - LValue Base, const clang::FieldDecl *Field) { + LValue Base, const clang::FieldDecl *Field, llvm::StringRef FieldName) { QualType FieldType = Field->getType(); if (!FieldType->isReferenceType()) return buildLValueForField(Base, Field); - Address V = buildAddrOfFieldStorage(*this, Base.getAddress(), Field); + Address V = + buildAddrOfFieldStorage(*this, Base.getAddress(), Field, FieldName); // Make sure that the address is pointing to the right type. auto memTy = getTypes().convertTypeForMem(FieldType); @@ -369,6 +386,13 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, return LV; } +static LValue buildCapturedFieldLValue(CIRGenFunction &CGF, const FieldDecl *FD, + mlir::Value ThisValue) { + QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); + LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); + return CGF.buildLValueForField(LV, FD); +} + LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { const NamedDecl *ND = E->getDecl(); QualType T = E->getType(); @@ -380,7 +404,18 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { // Global Named registers access via intrinsics only assert(VD->getStorageClass() != SC_Register && "not implemented"); assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); - assert(!E->refersToEnclosingVariableOrCapture() && "not implemented"); + + // Check for captured variables. + if (E->refersToEnclosingVariableOrCapture()) { + VD = VD->getCanonicalDecl(); + if (auto *FD = LambdaCaptureFields.lookup(VD)) + return buildCapturedFieldLValue(*this, FD, CXXABIThisValue); + assert(!UnimplementedFeature::CGCapturedStmtInfo() && "NYI"); + llvm_unreachable("NYI"); + // LLVM codegen: + // Address addr = GetAddrOfBlockDecl(VD); + // return MakeAddrLValue(addr, T, AlignmentSource::Decl); + } } // FIXME(CIR): We should be able to assert this for FunctionDecls as well! diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index c69b7e676135..1397da21b3a6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -215,9 +215,8 @@ void AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { // Do nothing. return; } else if (type->isReferenceType()) { - llvm_unreachable("NYI"); - // RValue RV = CGF.EmitReferenceBindingToExpr(E); - // return CGF.EmitStoreThroughLValue(RV, LV); + RValue RV = CGF.buildReferenceBindingToExpr(E); + return CGF.buildStoreThroughLValue(RV, LV); } switch (CGF.getEvaluationKind(type)) { @@ -291,23 +290,38 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { llvm_unreachable("NYI"); mlir::Operation *CleanupDominator = nullptr; - CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); - for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(), - e = E->capture_init_end(); - i != e; ++i, ++CurField) { + auto CurField = E->getLambdaClass()->field_begin(); + auto captureInfo = E->capture_begin(); + for (auto &captureInit : E->capture_inits()) { + // Pick a name for the field. + llvm::StringRef fieldName = CurField->getName(); + const LambdaCapture &capture = *captureInfo; + if (capture.capturesVariable()) { + assert(!CurField->isBitField() && "lambdas don't have bitfield members!"); + ValueDecl *v = capture.getCapturedVar(); + fieldName = v->getName(); + CGF.getCIRGenModule().LambdaFieldToName[*CurField] = fieldName; + } else { + llvm_unreachable("NYI"); + } + // Emit initialization - LValue LV = CGF.buildLValueForFieldInitialization(SlotLV, *CurField); + LValue LV = + CGF.buildLValueForFieldInitialization(SlotLV, *CurField, fieldName); if (CurField->hasCapturedVLAType()) { llvm_unreachable("NYI"); } - EmitInitializationToLValue(*i, LV); + EmitInitializationToLValue(captureInit, LV); // Push a destructor if necessary. if (QualType::DestructionKind DtorKind = CurField->getType().isDestructedType()) { llvm_unreachable("NYI"); } + + CurField++; + captureInfo++; } // Deactivate all the partial cleanups in reverse order, which generally means diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index daa70808c474..e151bfa58080 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -698,6 +698,14 @@ LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Value V, return makeAddrLValue(Address(V, Align), T, BaseInfo); } +LValue CIRGenFunction::MakeNaturalAlignAddrLValue(mlir::Value V, QualType T) { + LValueBaseInfo BaseInfo; + assert(!UnimplementedFeature::tbaa()); + CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo); + Address Addr(V, getTypes().convertTypeForMem(T), Alignment); + return LValue::makeAddr(Addr, T, getContext(), BaseInfo); +} + // Map the LangOption for exception behavior into the corresponding enum in // the IR. cir::fp::ExceptionBehavior diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index a674eea9e53c..ea1e683a8156 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1110,6 +1110,7 @@ class CIRGenFunction { }; LValue MakeNaturalAlignPointeeAddrLValue(mlir::Value V, clang::QualType T); + LValue MakeNaturalAlignAddrLValue(mlir::Value V, QualType T); /// Load the value for 'this'. This function is only valid while generating /// code for an C++ member function. @@ -1152,7 +1153,8 @@ class CIRGenFunction { /// will return the address of the reference and not the address of the value /// stored in the reference. LValue buildLValueForFieldInitialization(LValue Base, - const clang::FieldDecl *Field); + const clang::FieldDecl *Field, + llvm::StringRef FieldName); void buildInitializerForField(clang::FieldDecl *Field, LValue LHS, clang::Expr *Init); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index a88c05b61701..dd1032288b93 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -149,6 +149,12 @@ class CIRGenModule { /// that we don't re-emit the initializer. llvm::DenseMap DelayedCXXInitPosition; + /// Keep track of a map between lambda fields and names, this needs to be per + /// module since lambdas might get generated later as part of defered work, + /// and since the pointers are supposed to be uniqued, should be fine. Revisit + /// this if it ends up taking too much memory. + llvm::DenseMap LambdaFieldToName; + /// If the declaration has internal linkage but is inside an /// extern "C" linkage specification, prepare to emit an alias for it /// to the expected name. diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index f461c93a110c..be6b6761d7f8 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -58,6 +58,7 @@ struct UnimplementedFeature { static bool shouldEmitLifetimeMarkers() { return false; } static bool peepholeProtection() { return false; } static bool attributeNoBuiltin() { return false; } + static bool CGCapturedStmtInfo() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 397ff6a58409..8a0fed6323ed 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -14,3 +14,24 @@ void fn() { // CHECK: cir.func @_Z2fnv() // CHECK-NEXT: %0 = cir.alloca !ty_22class2Eanon22, cir.ptr , ["a"] // CHECK: cir.call @_ZZ2fnvENK3$_0clEv + +void l0() { + int i; + auto a = [&](){ i = i + 1; }; + a(); +} + +// CHECK: cir.func lambda internal @_ZZ2l0vENK3$_0clEv( + +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %3 = cir.load %2 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.load %3 : cir.ptr , i32 +// CHECK: %5 = cir.cst(1 : i32) : i32 +// CHECK: %6 = cir.binop(add, %4, %5) : i32 +// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %8 = cir.load %7 : cir.ptr >, !cir.ptr +// CHECK: cir.store %6, %8 : i32, cir.ptr + +// CHECK: cir.func @_Z2l0v() { From 577c796206fc7c612bc6ae21b0ed56274e4c330b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 6 Mar 2023 12:13:29 -0800 Subject: [PATCH 0821/2301] [CIR][CIRGen] Add aggregate return support for ReturmStmt --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 3 ++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 20 ++++++++++++++------ clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 10 ++++++++-- 4 files changed, 25 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 1397da21b3a6..4f7897c45cf6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -280,6 +280,7 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { } void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { + CIRGenFunction::SourceLocRAIIObject loc{CGF, CGF.getLoc(E->getSourceRange())}; AggValueSlot Slot = EnsureSlot(E->getType()); LLVM_ATTRIBUTE_UNUSED LValue SlotLV = CGF.makeAddrLValue(Slot.getAddress(), E->getType()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 608fb2cb2fbf..8571f269f36e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1412,7 +1412,8 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { llvm_unreachable("NYI"); } else { CGF.buildNullabilityCheck(LHS, RHS, E->getExprLoc()); - CGF.currSrcLoc = CGF.getLoc(E->getBeginLoc()); + CIRGenFunction::SourceLocRAIIObject loc{CGF, + CGF.getLoc(E->getSourceRange())}; CGF.buildStoreThroughLValue(RValue::get(RHS), LHS); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index ea1e683a8156..8874caed109d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -789,16 +789,16 @@ class CIRGenFunction { LValue buildCallExprLValue(const CallExpr *E); - /// buildAnyExprToTemp - Similarly to buildAnyExpr(), however, the result will - /// always be accessible even if no aggregate location is provided. + /// Similarly to buildAnyExpr(), however, the result will always be accessible + /// even if no aggregate location is provided. RValue buildAnyExprToTemp(const clang::Expr *E); CIRGenCallee buildCallee(const clang::Expr *E); - /// buildAnyExpr - Emit code to compute the specified expression which can - /// have any type. The result is returned as an RValue struct. If this is an - /// aggregate expression, the aggloc/agglocvolatile arguments indicate where - /// the result should be returned. + /// Emit code to compute the specified expression which can have any type. The + /// result is returned as an RValue struct. If this is an aggregate + /// expression, the aggloc/agglocvolatile arguments indicate where the result + /// should be returned. RValue buildAnyExpr(const clang::Expr *E, AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); @@ -847,6 +847,14 @@ class CIRGenFunction { mlir::LogicalResult buildDeclStmt(const clang::DeclStmt &S); + /// Determine whether a return value slot may overlap some other object. + AggValueSlot::Overlap_t getOverlapForReturnValue() { + // FIXME: Assuming no overlap here breaks guaranteed copy elision for base + // class subobjects. These cases may need to be revisited depending on the + // resolution of the relevant core issue. + return AggValueSlot::DoesNotOverlap; + } + /// Get an appropriate 'undef' rvalue for the given type. /// TODO: What's the equivalent for MLIR? Currently we're only using this for /// void types so it just returns RValue::get(nullptr) but it'll need diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 3eaed149fc56..7fd6013cb1ea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -451,9 +451,15 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { builder.create(loc, V, *FnRetAlloca); break; case TEK_Complex: + llvm_unreachable("NYI"); + break; case TEK_Aggregate: - llvm::errs() << "ReturnStmt EvaluationKind not implemented\n"; - return mlir::failure(); + buildAggExpr(RV, + AggValueSlot::forAddr( + ReturnValue, Qualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, getOverlapForReturnValue())); + break; } } From 24b0bde669625346ae682285c6d38df219afcf8a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 6 Mar 2023 13:20:00 -0800 Subject: [PATCH 0822/2301] [CIR][Lifetime] Detect returned lambdas with reference to captured local vars In this first version, we give a warning if a returned lambda captures any local variable. Since we don't codegen unused captured local variables, it's safe to assume the capture is used inside the lambda. --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 +- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 166 ++++++++++++++---- clang/test/CIR/CodeGen/lambda.cpp | 20 ++- .../CIR/Transforms/lifetime-check-lambda.cpp | 9 + 4 files changed, 164 insertions(+), 33 deletions(-) create mode 100644 clang/test/CIR/Transforms/lifetime-check-lambda.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 8874caed109d..a45e02ca7ff7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -422,6 +422,7 @@ class CIRGenFunction { /// Source Location tracking /// ------- +public: /// Use to track source locations across nested visitor traversals. /// Always use a `SourceLocRAIIObject` to change currSrcLoc. std::optional currSrcLoc; @@ -442,7 +443,6 @@ class CIRGenFunction { ~SourceLocRAIIObject() { restore(); } }; -public: using SymTableScopeTy = llvm::ScopedHashTableScope; diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 1d083b99804d..60c0e4f9843a 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -51,9 +51,14 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkLoad(LoadOp op); void checkCall(CallOp callOp); void checkAwait(AwaitOp awaitOp); + void checkReturn(ReturnOp retOp); - void checkPointerDeref(mlir::Value addr, mlir::Location loc); + // FIXME: classify tasks and lambdas prior to check ptr deref + // and pass down an enum. + void checkPointerDeref(mlir::Value addr, mlir::Location loc, + bool forRetLambda = false); void checkCoroTaskStore(StoreOp storeOp); + void checkLambdaCaptureStore(StoreOp storeOp); void checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor); void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); @@ -76,7 +81,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Diagnostic helpers. void emitInvalidHistory(mlir::InFlightDiagnostic &D, mlir::Value histKey, - mlir::Location warningLoc); + mlir::Location warningLoc, bool forRetLambda); /// /// Pass options handling @@ -251,6 +256,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // FIXME: this should be a ScopedHashTable for consistency. using PMapType = llvm::DenseMap; + // FIXME: we probably don't need to track it at this level, perhaps + // just tracking at the scope level should be enough? PMapType *currPmap = nullptr; PMapType &getPmap() { return *currPmap; } void markPsetInvalid(mlir::Value ptr, InvalidStyle invalidStyle, @@ -298,7 +305,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { /// /// Coroutine tasks (promise_type) - /// ---------------------------------------------- + /// ------------------------------ // Track types we already know to be a coroutine task (promise_type) llvm::DenseMap IsTaskTyCache; @@ -311,6 +318,17 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // the same warning multiple times, e.g. under the same coawait. llvm::SmallSet emittedDanglingTasks; + /// + /// Lambdas + /// ------- + + // Track types we already know to be a lambda + llvm::DenseMap IsLambdaTyCache; + // Check if a given cir type is a struct containing a lambda + bool isLambdaType(mlir::Type ty); + // Get the lambda struct from a member access to it. + mlir::Value getLambdaFromMemberAccess(mlir::Value addr); + /// /// Scope, context and guards /// ------------------------- @@ -337,6 +355,10 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // is a coroutine. SmallPtrSet localTempTasks; + // Track seen lambdas that escape out of the current scope + // (e.g. lambdas returned out of functions). + DenseMap localRetLambdas; + LLVM_DUMP_METHOD void dumpLocalValues(); }; @@ -415,14 +437,22 @@ static Location getEndLocForHist(Operation *Op) { return getEndLoc(Op->getLoc()); } -static Location getEndLocForHist(Region *R) { - auto ifOp = dyn_cast(R->getParentOp()); +static Location getEndLocIf(IfOp ifOp, Region *R) { assert(ifOp && "what other regions create their own scope?"); if (&ifOp.getThenRegion() == R) return getEndLoc(ifOp.getLoc()); return getEndLoc(ifOp.getLoc(), /*idx=*/3); } +static Location getEndLocForHist(Region *R) { + auto parentOp = R->getParentOp(); + if (isa(parentOp)) + return getEndLocIf(cast(parentOp), R); + if (isa(parentOp)) + return getEndLoc(parentOp->getLoc()); + llvm_unreachable("what other regions create their own scope?"); +} + static Location getEndLocForHist(LifetimeCheckPass::LexicalScopeContext &lsc) { assert(!lsc.parent.isNull() && "shouldn't be null"); if (lsc.parent.is()) @@ -476,20 +506,19 @@ void LifetimeCheckPass::kill(const State &s, InvalidStyle invalidStyle, owners.erase(v); ptrs.erase(v); tasks.erase(v); - getPmap().erase(v); } } void LifetimeCheckPass::LexicalScopeGuard::cleanup() { auto *localScope = Pass.currScope; - // If we are cleaning up at the function level, nothing - // to do here cause we are past all possible deference points - if (localScope->Depth == 0) - return; - for (auto pointee : localScope->localValues) Pass.kill(State::getLocalValue(pointee), InvalidStyle::EndOfScope, getEndLocForHist(*localScope)); + + // Catch interesting dangling references out of returns. + for (auto l : localScope->localRetLambdas) + Pass.checkPointerDeref(l.first, l.second, + /*forRetLambda=*/true); } void LifetimeCheckPass::checkBlock(Block &block) { @@ -521,18 +550,15 @@ void LifetimeCheckPass::checkFunc(Operation *op) { pmapNullHist.clear(); invalidHist.clear(); - // Add a new scope. Note that as part of the scope cleanup process - // we apply section 2.3 KILL(x) functionality, turning relevant - // references invalid. - LexicalScopeContext lexScope{op}; - LexicalScopeGuard scopeGuard{*this, &lexScope}; - // Create a new pmap for this function. PMapType localPmap{}; PmapGuard pmapGuard{*this, &localPmap}; + // Add a new scope. Note that as part of the scope cleanup process + // we apply section 2.3 KILL(x) functionality, turning relevant + // references invalid. for (Region ®ion : op->getRegions()) - checkRegion(region); + checkRegionWithScope(region); // FIXME: store the pmap result for this function, we // could do some interesting IPA stuff using this info. @@ -663,6 +689,33 @@ void LifetimeCheckPass::checkAwait(AwaitOp awaitOp) { joinPmaps(pmapOps); } +void LifetimeCheckPass::checkReturn(ReturnOp retOp) { + // Upon return invalidate all local values. Since some return + // values might depend on other local address, check for the + // dangling aspects for this. + if (retOp.getNumOperands() == 0) + return; + + auto retTy = retOp.getOperand(0).getType(); + // FIXME: this can be extended to cover more leaking/dandling + // semantics out of functions. + if (!isLambdaType(retTy)) + return; + + // The return value is loaded from the return slot before + // returning. + auto loadOp = dyn_cast(retOp.getOperand(0).getDefiningOp()); + assert(loadOp && "expected cir.load"); + if (!isa(loadOp.getAddr().getDefiningOp())) + return; + + // Keep track of interesting lambda. + assert(!currScope->localRetLambdas.count(loadOp.getAddr()) && + "lambda already returned?"); + currScope->localRetLambdas.insert( + std::make_pair(loadOp.getAddr(), loadOp.getLoc())); +} + void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { // 2.4.7. A switch(cond) is treated as if it were an equivalent series of // non-nested if statements with single evaluation of cond; for example: @@ -921,17 +974,45 @@ void LifetimeCheckPass::checkCoroTaskStore(StoreOp storeOp) { llvm_unreachable("expecting cir.call defining op"); } +mlir::Value LifetimeCheckPass::getLambdaFromMemberAccess(mlir::Value addr) { + auto op = addr.getDefiningOp(); + // FIXME: we likely want to consider more indirections here... + if (!isa(op)) + return nullptr; + auto allocaOp = + dyn_cast(op->getOperand(0).getDefiningOp()); + if (!allocaOp || !isLambdaType(allocaOp.getAllocaType())) + return nullptr; + return allocaOp; +} + +void LifetimeCheckPass::checkLambdaCaptureStore(StoreOp storeOp) { + auto localByRefAddr = storeOp.getValue(); + auto lambdaCaptureAddr = storeOp.getAddr(); + + if (!isa_and_nonnull(localByRefAddr.getDefiningOp())) + return; + auto lambdaAddr = getLambdaFromMemberAccess(lambdaCaptureAddr); + if (!lambdaAddr) + return; + + if (currScope->localValues.count(localByRefAddr)) + getPmap()[lambdaAddr].insert(State::getLocalValue(localByRefAddr)); +} + void LifetimeCheckPass::checkStore(StoreOp storeOp) { auto addr = storeOp.getAddr(); // The bulk of the check is done on top of store to pointer categories, // which usually represent the most common case. // - // We handle some special local values, like coroutine tasks, which could - // be holding references to things with dangling lifetime. + // We handle some special local values, like coroutine tasks and lambdas, + // which could be holding references to things with dangling lifetime. if (!ptrs.count(addr)) { if (currScope->localTempTasks.count(storeOp.getValue())) checkCoroTaskStore(storeOp); + else + checkLambdaCaptureStore(storeOp); return; } @@ -1006,7 +1087,8 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, mlir::Value histKey, - mlir::Location warningLoc) { + mlir::Location warningLoc, + bool forRetLambda) { assert(invalidHist.count(histKey) && "expected invalid hist"); auto &hist = invalidHist[histKey]; unsigned limit = opts.histLimit; @@ -1021,15 +1103,18 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, break; } case InvalidStyle::EndOfScope: { - if (!tasks.count(histKey)) { - StringRef outOfScopeVarName = getVarNameFromValue(*info.val); - D.attachNote(info.loc) << "pointee '" << outOfScopeVarName - << "' invalidated at end of scope"; - } else { + if (tasks.count(histKey)) { D.attachNote((*info.val).getLoc()) << "coroutine bound to resource " << "with expired lifetime"; D.attachNote(info.loc) << "at the end of scope or full-expression"; emittedDanglingTasks.insert(warningLoc); + } else if (forRetLambda) { + D.attachNote(info.val->getLoc()) + << "declared here but invalid after function end"; + } else { + StringRef outOfScopeVarName = getVarNameFromValue(*info.val); + D.attachNote(info.loc) << "pointee '" << outOfScopeVarName + << "' invalidated at end of scope"; } break; } @@ -1043,8 +1128,8 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, } } -void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, - mlir::Location loc) { +void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, + bool forRetLambda) { bool hasInvalid = getPmap()[addr].count(State::getInvalid()); bool hasNullptr = getPmap()[addr].count(State::getNullPtr()); @@ -1076,13 +1161,15 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, StringRef varName = getVarNameFromValue(addr); auto D = emitWarning(loc); - if (tasks.count(addr)) { + if (tasks.count(addr)) D << "use of coroutine '" << varName << "' with dangling reference"; - } else + else if (forRetLambda) + D << "returned lambda captures local variable"; + else D << "use of invalid pointer '" << varName << "'"; if (hasInvalid && opts.emitHistoryInvalid()) - emitInvalidHistory(D, addr, loc); + emitInvalidHistory(D, addr, loc, forRetLambda); if (hasNullptr && opts.emitHistoryNull()) { assert(pmapNullHist.count(addr) && "expected nullptr hist"); @@ -1333,6 +1420,21 @@ bool LifetimeCheckPass::isOwnerOrPointerClassMethod( return false; } +bool LifetimeCheckPass::isLambdaType(mlir::Type ty) { + if (IsLambdaTyCache.count(ty)) + return IsLambdaTyCache[ty]; + + IsLambdaTyCache[ty] = false; + auto taskTy = ty.dyn_cast(); + if (!taskTy) + return false; + auto recordDecl = taskTy.getAst()->getAstDecl(); + if (recordDecl->isLambda()) + IsLambdaTyCache[ty] = true; + + return IsLambdaTyCache[ty]; +} + bool LifetimeCheckPass::isTaskType(mlir::Value taskVal) { auto ty = taskVal.getType(); if (IsTaskTyCache.count(ty)) @@ -1448,6 +1550,8 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return checkCall(callOp); if (auto awaitOp = dyn_cast(op)) return checkAwait(awaitOp); + if (auto returnOp = dyn_cast(op)) + return checkReturn(returnOp); } void LifetimeCheckPass::runOnOperation() { diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 8a0fed6323ed..a0056d423c6a 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-return-stack-address -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s void fn() { @@ -35,3 +35,21 @@ void l0() { // CHECK: cir.store %6, %8 : i32, cir.ptr // CHECK: cir.func @_Z2l0v() { + +auto g() { + int i = 12; + return [&] { + i += 100; + return i; + }; +} + +// CHECK: cir.func @_Z1gv() -> !ty_22class2Eanon222 { +// CHECK: %0 = cir.alloca !ty_22class2Eanon222, cir.ptr , ["__retval"] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} +// CHECK: %2 = cir.cst(12 : i32) : i32 +// CHECK: cir.store %2, %1 : i32, cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: cir.store %1, %3 : !cir.ptr, cir.ptr > +// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon222 +// CHECK: cir.return %4 : !ty_22class2Eanon222 diff --git a/clang/test/CIR/Transforms/lifetime-check-lambda.cpp b/clang/test/CIR/Transforms/lifetime-check-lambda.cpp new file mode 100644 index 000000000000..b1a4a53bead4 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-lambda.cpp @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -I%S/Inputs -Wno-return-stack-address -fclangir -fclangir-lifetime-check="history=all;history_limit=1" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +auto g() { + int i = 12; // expected-note {{declared here but invalid after function end}} + return [&] { // expected-warning {{returned lambda captures local variable}} + i += 100; + return i; + }; +} \ No newline at end of file From dafd74c74641ce41f042b61bb2d13c57d7d4c4e5 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 7 Mar 2023 16:21:16 -0500 Subject: [PATCH 0823/2301] [CIR][NFC] Change cir.cst -> cir.const --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 4 +- clang/include/clang/CIR/Dialect/IR/CIROps.td | 14 +++--- ...{CIRGenExprCst.cpp => CIRGenExprConst.cpp} | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 12 ++--- clang/lib/CIR/CodeGen/CMakeLists.txt | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 38 ++++++++-------- .../CIR/Dialect/Transforms/MergeCleanups.cpp | 4 +- clang/test/CIR/CodeGen/String.cpp | 8 ++-- clang/test/CIR/CodeGen/array.cpp | 8 ++-- clang/test/CIR/CodeGen/basic.c | 4 +- clang/test/CIR/CodeGen/basic.cpp | 28 ++++++------ clang/test/CIR/CodeGen/call.c | 8 ++-- clang/test/CIR/CodeGen/coro-task.cpp | 10 ++--- clang/test/CIR/CodeGen/fullexpr.cpp | 2 +- clang/test/CIR/CodeGen/globals.cpp | 12 ++--- clang/test/CIR/CodeGen/goto.cpp | 4 +- clang/test/CIR/CodeGen/lalg.c | 4 +- clang/test/CIR/CodeGen/lambda.cpp | 4 +- clang/test/CIR/CodeGen/loop-scope.cpp | 4 +- clang/test/CIR/CodeGen/loop.cpp | 28 ++++++------ clang/test/CIR/CodeGen/sourcelocation.cpp | 6 +-- clang/test/CIR/CodeGen/store.c | 4 +- clang/test/CIR/CodeGen/struct.cpp | 4 +- clang/test/CIR/CodeGen/switch.cpp | 12 ++--- clang/test/CIR/IR/branch.cir | 4 +- clang/test/CIR/IR/cast.cir | 2 +- clang/test/CIR/IR/cir-ops.cir | 14 +++--- clang/test/CIR/IR/global.cir | 12 ++--- clang/test/CIR/IR/invalid.cir | 44 +++++++++---------- clang/test/CIR/IR/loop.cir | 44 +++++++++---------- clang/test/CIR/IR/ptr_stride.cir | 4 +- clang/test/CIR/IR/switch.cir | 2 +- .../ThroughMLIR/binop-unsigned-int.cir | 4 +- clang/test/CIR/Lowering/ThroughMLIR/bool.cir | 2 +- clang/test/CIR/Lowering/ThroughMLIR/dot.cir | 2 +- clang/test/CIR/Lowering/ThroughMLIR/goto.cir | 6 +-- .../test/CIR/Lowering/ThroughMLIR/memref.cir | 2 +- .../Lowering/ThroughMLIR/unary-inc-dec.cir | 2 +- .../Lowering/ThroughMLIR/unary-plus-minus.cir | 2 +- .../test/CIR/Lowering/binop-unsigned-int.cir | 4 +- clang/test/CIR/Lowering/bool.cir | 2 +- clang/test/CIR/Lowering/branch.cir | 4 +- clang/test/CIR/Lowering/dot.cir | 4 +- clang/test/CIR/Lowering/for.cir | 4 +- clang/test/CIR/Lowering/goto.cir | 6 +-- clang/test/CIR/Lowering/if.cir | 4 +- clang/test/CIR/Lowering/loadstorealloca.cir | 2 +- clang/test/CIR/Lowering/ptrstride.cir | 2 +- clang/test/CIR/Lowering/scope.cir | 2 +- clang/test/CIR/Lowering/unary-inc-dec.cir | 2 +- clang/test/CIR/Lowering/unary-not.cir | 2 +- clang/test/CIR/Lowering/unary-plus-minus.cir | 2 +- clang/test/CIR/Transforms/merge-cleanups.cir | 22 +++++----- 53 files changed, 218 insertions(+), 216 deletions(-) rename clang/lib/CIR/CodeGen/{CIRGenExprCst.cpp => CIRGenExprConst.cpp} (99%) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 92d41ad8d9bd..4e756f609620 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -56,10 +56,10 @@ def ZeroAttr : CIR_Attr<"Zero", "zero", [TypedAttrInterface]> { } //===----------------------------------------------------------------------===// -// CstArrayAttr +// ConstArrayAttr //===----------------------------------------------------------------------===// -def CstArrayAttr : CIR_Attr<"CstArray", "cst_array", [TypedAttrInterface]> { +def ConstArrayAttr : CIR_Attr<"ConstArray", "const_array", [TypedAttrInterface]> { let summary = "A constant array from ArrayAttr or StringRefAttr"; let description = [{ An CIR array attribute is an array of literals of the specified attr types. diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 4aa239f6a65a..6c60300a2477 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -106,7 +106,7 @@ def PtrStrideOp : CIR_Op<"ptr_stride", a stride. Currently only used for array subscripts. ```mlir - %3 = cir.cst(0 : i32) : i32 + %3 = cir.const(0 : i32) : i32 %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr ``` }]; @@ -134,20 +134,20 @@ def PtrStrideOp : CIR_Op<"ptr_stride", // ConstantOp //===----------------------------------------------------------------------===// -def ConstantOp : CIR_Op<"cst", +def ConstantOp : CIR_Op<"const", [ConstantLike, Pure]> { // FIXME: Use SameOperandsAndResultType or similar and prevent eye bleeding // type repetition in the assembly form. let summary = "Defines a CIR constant"; let description = [{ - The `cir.cst` operation turns a literal into an SSA value. The data is + The `cir.const` operation turns a literal into an SSA value. The data is attached to the operation as an attribute. ```mlir - %0 = cir.cst(42 : i32) : i32 - %1 = cir.cst(4.2 : f32) : f32 - %2 = cir.cst(nullptr : !cir.ptr) : !cir.ptr + %0 = cir.const(42 : i32) : i32 + %1 = cir.const(4.2 : f32) : f32 + %2 = cir.const(nullptr : !cir.ptr) : !cir.ptr ``` }]; @@ -961,7 +961,7 @@ def LoopOp : CIR_Op<"loop", cir.yield }) { %3 = cir.load %1 : cir.ptr , i32 - %4 = cir.cst(1 : i32) : i32 + %4 = cir.const(1 : i32) : i32 %5 = cir.binop(add, %3, %4) : i32 cir.store %5, %1 : i32, cir.ptr cir.yield diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp similarity index 99% rename from clang/lib/CIR/CodeGen/CIRGenExprCst.cpp rename to clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 48d04fbc6234..25b078b6ce83 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1244,7 +1244,7 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, for (auto const &Element : Elements) Eles.push_back(Element); - return mlir::cir::CstArrayAttr::get( + return mlir::cir::ConstArrayAttr::get( mlir::cir::ArrayType::get(CGM.getBuilder().getContext(), CommonElementType, ArrayBound), mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Eles)); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index cbe8029cf6eb..7988fd2ada46 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -91,10 +91,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, const clang::CodeGenOptions &CGO, DiagnosticsEngine &Diags) : builder(context), astCtx(astctx), langOpts(astctx.getLangOpts()), - codeGenOpts(CGO), - theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), - target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), - genTypes{*this} { + codeGenOpts(CGO), theModule{mlir::ModuleOp::create( + builder.getUnknownLoc())}, + Diags(Diags), target(astCtx.getTargetInfo()), + ABI(createCXXABI(*this)), genTypes{*this} { mlir::cir::sob::SignedOverflowBehavior sob; switch (langOpts.getSignedOverflowBehavior()) { case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Defined: @@ -906,9 +906,9 @@ CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { auto eltTy = getTypes().ConvertType(CAT->getElementType()); auto TheType = mlir::cir::ArrayType::get(builder.getContext(), eltTy, finalSize); - auto cstArray = mlir::cir::CstArrayAttr::get( + auto constArray = mlir::cir::ConstArrayAttr::get( TheType, mlir::StringAttr::get(Str, TheType)); - return cstArray; + return constArray; } assert(0 && "not implemented"); diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 99bcee685ee7..8924a0311d25 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -20,7 +20,7 @@ add_clang_library(clangCIR CIRGenDecl.cpp CIRGenDeclCXX.cpp CIRGenExpr.cpp - CIRGenExprCst.cpp + CIRGenExprConst.cpp CIRGenExprAgg.cpp CIRGenExprCXX.cpp CIRGenExprScalar.cpp diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 94f378d45088..de71936061c8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -145,7 +145,7 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, } if (attrType.isa()) { - // FIXME: should also support arrays / cst_arrays. + // FIXME: should also support arrays / const_arrays. if (opType.isa<::mlir::cir::StructType>()) return success(); return op->emitOpError("zero expects struct type"); @@ -168,8 +168,8 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return success(); } - if (attrType.isa()) { - // CstArrayAttr is already verified to bing with cir.array type. + if (attrType.isa()) { + // ConstArrayAttr is already verified to bing with cir.array type. return success(); } @@ -202,8 +202,8 @@ static ParseResult parseConstantValue(OpAsmParser &parser, return success(); } -// FIXME: create a CIRCstAttr and hide this away for both global -// initialization and cir.cst operation. +// FIXME: create a CIRConstAttr and hide this away for both global +// initialization and cir.const operation. static void printConstant(OpAsmPrinter &p, Attribute value) { p.printAttribute(value); } @@ -1054,7 +1054,7 @@ parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, // Parse constant with initializer, examples: // cir.global @y = 3.400000e+00 : f32 - // cir.global @rgb = #cir.cst_array<[...] : !cir.array> + // cir.global @rgb = #cir.const_array<[...] : !cir.array> if (parseConstantValue(parser, initialValueAttr).failed()) return failure(); @@ -1515,7 +1515,7 @@ mlir::OpTrait::impl::verifySameFirstOperandAndResultType(Operation *op) { // CIR attributes //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::CstArrayAttr::verify( +LogicalResult mlir::cir::ConstArrayAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::Type type, Attribute attr) { @@ -1559,8 +1559,8 @@ LogicalResult mlir::cir::CstArrayAttr::verify( return eltTypeCheck; } -::mlir::Attribute CstArrayAttr::parse(::mlir::AsmParser &parser, - ::mlir::Type type) { +::mlir::Attribute ConstArrayAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { ::mlir::FailureOr<::mlir::Type> resultTy; ::mlir::FailureOr resultVal; ::llvm::SMLoc loc = parser.getCurrentLocation(); @@ -1572,9 +1572,10 @@ ::mlir::Attribute CstArrayAttr::parse(::mlir::AsmParser &parser, // Parse variable 'value' resultVal = ::mlir::FieldParser::parse(parser); if (failed(resultVal)) { - parser.emitError(parser.getCurrentLocation(), - "failed to parse CstArrayAttr parameter 'value' which is " - "to be a `Attribute`"); + parser.emitError( + parser.getCurrentLocation(), + "failed to parse ConstArrayAttr parameter 'value' which is " + "to be a `Attribute`"); return {}; } @@ -1587,9 +1588,10 @@ ::mlir::Attribute CstArrayAttr::parse(::mlir::AsmParser &parser, // Parse variable 'type' resultTy = ::mlir::FieldParser<::mlir::Type>::parse(parser); if (failed(resultTy)) { - parser.emitError(parser.getCurrentLocation(), - "failed to parse CstArrayAttr parameter 'type' which is " - "to be a `::mlir::Type`"); + parser.emitError( + parser.getCurrentLocation(), + "failed to parse ConstArrayAttr parameter 'type' which is " + "to be a `::mlir::Type`"); return {}; } } else { @@ -1606,11 +1608,11 @@ ::mlir::Attribute CstArrayAttr::parse(::mlir::AsmParser &parser, // Parse literal '>' if (parser.parseGreater()) return {}; - return parser.getChecked(loc, parser.getContext(), - resultTy.value(), resultVal.value()); + return parser.getChecked(loc, parser.getContext(), + resultTy.value(), resultVal.value()); } -void CstArrayAttr::print(::mlir::AsmPrinter &printer) const { +void ConstArrayAttr::print(::mlir::AsmPrinter &printer) const { printer << "<"; printer.printStrippedAttrOrType(getValue()); if (getValue().isa()) { diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index 60958e3327c8..ce6b506100f7 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -42,7 +42,7 @@ struct SimplifyRetYieldBlocks : public mlir::OpRewritePattern { // Rewrite something like this: // // cir.if %2 { - // %3 = cir.cst(3 : i32) : i32 + // %3 = cir.const(3 : i32) : i32 // cir.br ^bb1 // ^bb1: // pred: ^bb0 // cir.return %3 : i32 @@ -51,7 +51,7 @@ struct SimplifyRetYieldBlocks : public mlir::OpRewritePattern { // to this: // // cir.if %2 { - // %3 = cir.cst(3 : i32) : i32 + // %3 = cir.const(3 : i32) : i32 // cir.return %3 : i32 // } // diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index 43b994bbc4c5..4861ef87fb24 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -22,10 +22,10 @@ void test() { // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 // CHECK-NEXT: %2 = "cir.struct_element_addr"(%1) <{member_name = "storage"}> -// CHECK-NEXT: %3 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %3 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > // CHECK-NEXT: %4 = "cir.struct_element_addr"(%1) <{member_name = "size"}> : (!cir.ptr) -> !cir.ptr -// CHECK-NEXT: %5 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(0 : i32) : i32 // CHECK-NEXT: %6 = cir.cast(integral, %5 : i32), i64 // CHECK-NEXT: cir.store %6, %4 : i64, cir.ptr // CHECK-NEXT: cir.return @@ -37,7 +37,7 @@ void test() { // CHECK-NEXT: cir.store %arg1, %1 // CHECK-NEXT: %2 = cir.load %0 // CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_name = "storage"}> -// CHECK-NEXT: %4 = cir.cst(#cir.null : !cir.ptr) +// CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) // CHECK-NEXT: cir.store %4, %3 // CHECK-NEXT: %5 = "cir.struct_element_addr"(%2) <{member_name = "size"}> : (!cir.ptr) -> !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 @@ -53,7 +53,7 @@ void test() { // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_name = "storage"}> : (!cir.ptr) -> !cir.ptr> -// CHECK-NEXT: %4 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 07b5b0524e71..5406855dedb8 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -15,8 +15,8 @@ void a1() { // CHECK: cir.func @_Z2a1v() { // CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} -// CHECK-NEXT: %1 = cir.cst(1 : i32) : i32 -// CHECK-NEXT: %2 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %1 = cir.const(1 : i32) : i32 +// CHECK-NEXT: %2 = cir.const(0 : i32) : i32 // CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : i32), !cir.ptr // CHECK-NEXT: cir.store %1, %4 : i32, cir.ptr @@ -29,7 +29,7 @@ int *a2() { // CHECK: cir.func @_Z2a2v() -> !cir.ptr { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} -// CHECK-NEXT: %2 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %2 = cir.const(0 : i32) : i32 // CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr // CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : i32), !cir.ptr // CHECK-NEXT: cir.store %4, %0 : !cir.ptr, cir.ptr > @@ -40,7 +40,7 @@ void local_stringlit() { const char *s = "whatnow"; } -// CHECK: cir.global "private" constant internal @".str" = #cir.cst_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.func @_Z15local_stringlitv() { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 9652a4255117..835befc685a3 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -23,7 +23,7 @@ int f2() { return 3; } // CHECK: cir.func @f2() -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: %1 = cir.const(3 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr // CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %2 : i32 @@ -36,7 +36,7 @@ int f3() { // CHECK: cir.func @f3() -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: %2 = cir.const(3 : i32) : i32 // CHECK-NEXT: cir.store %2, %1 : i32, cir.ptr // CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: cir.store %3, %0 : i32, cir.ptr diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 4b7709f598da..5b3ae186cc7e 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -9,7 +9,7 @@ int *p0() { // CHECK: cir.func @_Z2p0v() -> !cir.ptr { // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] -// CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > int *p1() { @@ -20,7 +20,7 @@ int *p1() { // CHECK: cir.func @_Z2p1v() -> !cir.ptr { // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p"] -// CHECK: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > int *p2() { @@ -37,18 +37,18 @@ int *p2() { // CHECK: cir.func @_Z2p2v() -> !cir.ptr { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] {alignment = 8 : i64} -// CHECK-NEXT: %2 = cir.cst(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %2, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.scope { // CHECK-NEXT: %7 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} -// CHECK-NEXT: %8 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %8 = cir.const(0 : i32) : i32 // CHECK-NEXT: cir.store %8, %7 : i32, cir.ptr // CHECK-NEXT: cir.store %7, %1 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %9 = cir.cst(42 : i32) : i32 +// CHECK-NEXT: %9 = cir.const(42 : i32) : i32 // CHECK-NEXT: %10 = cir.load deref %1 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.store %9, %10 : i32, cir.ptr // CHECK-NEXT: } loc(#[[locScope:loc[0-9]+]]) -// CHECK-NEXT: %3 = cir.cst(42 : i32) : i32 +// CHECK-NEXT: %3 = cir.const(42 : i32) : i32 // CHECK-NEXT: %4 = cir.load deref %1 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.store %3, %4 : i32, cir.ptr // CHECK-NEXT: %5 = cir.load %1 : cir.ptr >, !cir.ptr @@ -59,8 +59,8 @@ int *p2() { void b0() { bool x = true, y = false; } // CHECK: cir.func @_Z2b0v() { -// CHECK: %2 = cir.cst(true) : !cir.bool -// CHECK: %3 = cir.cst(false) : !cir.bool +// CHECK: %2 = cir.const(true) : !cir.bool +// CHECK: %3 = cir.const(false) : !cir.bool void b1(int a) { bool b = a; } @@ -83,10 +83,10 @@ void if0(int a) { // CHECK: %3 = cir.load %0 : cir.ptr , i32 // CHECK: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool // CHECK-NEXT: cir.if %4 { -// CHECK-NEXT: %5 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(3 : i32) : i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: } else { -// CHECK-NEXT: %5 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(4 : i32) : i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: } // CHECK: } @@ -111,12 +111,12 @@ void if1(int a, bool b, bool c) { // CHECK: %5 = cir.load %0 : cir.ptr , i32 // CHECK: %6 = cir.cast(int_to_bool, %5 : i32), !cir.bool // CHECK: cir.if %6 { -// CHECK: %7 = cir.cst(3 : i32) : i32 +// CHECK: %7 = cir.const(3 : i32) : i32 // CHECK: cir.store %7, %3 : i32, cir.ptr // CHECK: cir.scope { // CHECK: %8 = cir.load %1 : cir.ptr , !cir.bool // CHECK-NEXT: cir.if %8 { -// CHECK-NEXT: %9 = cir.cst(8 : i32) : i32 +// CHECK-NEXT: %9 = cir.const(8 : i32) : i32 // CHECK-NEXT: cir.store %9, %3 : i32, cir.ptr // CHECK-NEXT: } // CHECK: } @@ -124,11 +124,11 @@ void if1(int a, bool b, bool c) { // CHECK: cir.scope { // CHECK: %8 = cir.load %2 : cir.ptr , !cir.bool // CHECK-NEXT: cir.if %8 { -// CHECK-NEXT: %9 = cir.cst(14 : i32) : i32 +// CHECK-NEXT: %9 = cir.const(14 : i32) : i32 // CHECK-NEXT: cir.store %9, %3 : i32, cir.ptr // CHECK-NEXT: } // CHECK: } -// CHECK: %7 = cir.cst(4 : i32) : i32 +// CHECK: %7 = cir.const(4 : i32) : i32 // CHECK: cir.store %7, %3 : i32, cir.ptr // CHECK: } // CHECK: } diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index d9093099693f..ddd09b7ff189 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -46,8 +46,8 @@ void d(void) { // CHECK: } // CHECK: cir.func @d() { // CHECK: call @a() : () -> () -// CHECK: %0 = cir.cst(0 : i32) : i32 -// CHECK: %1 = cir.cst(1 : i32) : i32 +// CHECK: %0 = cir.const(0 : i32) : i32 +// CHECK: %1 = cir.const(1 : i32) : i32 // CHECK: call @b(%0, %1) : (i32, i32) -> i32 // CHECK: cir.return // CHECK: } @@ -84,8 +84,8 @@ void d(void) { // CXX-NEXT: } // CXX-NEXT: cir.func @_Z1dv() { // CXX-NEXT: call @_Z1av() : () -> () -// CXX-NEXT: %0 = cir.cst(0 : i32) : i32 -// CXX-NEXT: %1 = cir.cst(1 : i32) : i32 +// CXX-NEXT: %0 = cir.const(0 : i32) : i32 +// CXX-NEXT: %1 = cir.const(1 : i32) : i32 // CXX-NEXT: call @_Z1bii(%0, %1) : (i32, i32) -> i32 // CXX-NEXT: cir.return // CXX-NEXT: } diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 87914b0805e4..785788385c6a 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -158,8 +158,8 @@ VoidTask silly_task() { // Get coroutine id with __builtin_coro_id. -// CHECK: %[[#NullPtr:]] = cir.cst(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: %[[#Align:]] = cir.cst(16 : i32) : i32 +// CHECK: %[[#NullPtr:]] = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %[[#Align:]] = cir.const(16 : i32) : i32 // CHECK: %[[#CoroId:]] = cir.call @__builtin_coro_id(%[[#Align]], %[[#NullPtr]], %[[#NullPtr]], %[[#NullPtr]]) // Perform allocation calling operator 'new' depending on __builtin_coro_alloc and @@ -264,8 +264,8 @@ VoidTask silly_task() { // Call builtin coro end and return -// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.cst(#cir.null : !cir.ptr) -// CHECK-NEXT: %[[#CoroEndArg1:]] = cir.cst(false) : !cir.bool +// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.const(#cir.null : !cir.ptr) +// CHECK-NEXT: %[[#CoroEndArg1:]] = cir.const(false) : !cir.bool // CHECK-NEXT: = cir.call @__builtin_coro_end(%[[#CoroEndArg0]], %[[#CoroEndArg1]]) // CHECK: %[[#Tmp1:]] = cir.load %[[#VoidTaskAddr]] @@ -318,7 +318,7 @@ folly::coro::Task go1() { // The call to go(1) has its own scope due to full-expression rules. // CHECK: cir.scope { // CHECK: %[[#OneAddr:]] = cir.alloca i32, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} -// CHECK: %[[#One:]] = cir.cst(1 : i32) : i32 +// CHECK: %[[#One:]] = cir.const(1 : i32) : i32 // CHECK: cir.store %[[#One]], %[[#OneAddr]] : i32, cir.ptr // CHECK: %[[#IntTaskTmp:]] = cir.call @_Z2goRKi(%[[#OneAddr]]) : (!cir.ptr) -> ![[IntTask]] // CHECK: cir.store %[[#IntTaskTmp]], %[[#IntTaskAddr]] : ![[IntTask]], cir.ptr diff --git a/clang/test/CIR/CodeGen/fullexpr.cpp b/clang/test/CIR/CodeGen/fullexpr.cpp index 52d5556b5416..e1cc2a34d038 100644 --- a/clang/test/CIR/CodeGen/fullexpr.cpp +++ b/clang/test/CIR/CodeGen/fullexpr.cpp @@ -12,7 +12,7 @@ int go1() { // CHECK: %[[#XAddr:]] = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} // CHECK: %[[#RVal:]] = cir.scope { // CHECK-NEXT: %[[#TmpAddr:]] = cir.alloca i32, cir.ptr , ["ref.tmp0", init] {alignment = 4 : i64} -// CHECK-NEXT: %[[#One:]] = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %[[#One:]] = cir.const(1 : i32) : i32 // CHECK-NEXT: cir.store %[[#One]], %[[#TmpAddr]] : i32, cir.ptr // CHECK-NEXT: %[[#RValTmp:]] = cir.call @_Z2goRKi(%[[#TmpAddr]]) : (!cir.ptr) -> i32 // CHECK-NEXT: cir.yield %[[#RValTmp]] : i32 diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 8b09c5d66671..3b053403ba1c 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -36,13 +36,13 @@ int use_func() { return func(); } // CHECK-NEXT: cir.global external @y = 3.400000e+00 : f32 // CHECK-NEXT: cir.global external @w = 4.300000e+00 : f64 // CHECK-NEXT: cir.global external @x = 51 : i8 -// CHECK-NEXT: cir.global external @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> -// CHECK-NEXT: cir.global external @alpha = #cir.cst_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8] : !cir.array> +// CHECK-NEXT: cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> +// CHECK-NEXT: cir.global external @alpha = #cir.const_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8] : !cir.array> -// CHECK-NEXT: cir.global "private" constant internal @".str" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK-NEXT: cir.global external @s = @".str": !cir.ptr -// CHECK-NEXT: cir.global "private" constant internal @".str1" = #cir.cst_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK-NEXT: cir.global external @s1 = @".str1": !cir.ptr // CHECK-NEXT: cir.global external @s2 = @".str": !cir.ptr @@ -57,14 +57,14 @@ int use_func() { return func(); } // CHECK-NEXT: %0 = cir.alloca i8, cir.ptr , ["c", init] {alignment = 1 : i64} // CHECK-NEXT: %1 = cir.get_global @s2 : cir.ptr > // CHECK-NEXT: %2 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %3 = cir.const(0 : i32) : i32 // CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr // CHECK-NEXT: %5 = cir.load %4 : cir.ptr , i8 // CHECK-NEXT: cir.store %5, %0 : i8, cir.ptr // CHECK: cir.func linkonce_odr @_Z4funcIiET_v() -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %1 = cir.const(0 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr // CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %2 : i32 diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index db79783f628e..76493d75714e 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -18,13 +18,13 @@ void g0(int a) { // CHECK-NEXT cir.br ^bb2 // CHECK-NEXT ^bb1: // no predecessors // CHECK-NEXT %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT %4 = cir.const(1 : i32) : i32 // CHECK-NEXT %5 = cir.binop(add, %3, %4) : i32 // CHECK-NEXT cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT cir.br ^bb2 // CHECK-NEXT ^bb2: // 2 preds: ^bb0, ^bb1 // CHECK-NEXT %6 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT %7 = cir.cst(2 : i32) : i32 +// CHECK-NEXT %7 = cir.const(2 : i32) : i32 // CHECK-NEXT %8 = cir.binop(add, %6, %7) : i32 // CHECK-NEXT cir.store %8, %1 : i32, cir.ptr // CHECK-NEXT cir.return diff --git a/clang/test/CIR/CodeGen/lalg.c b/clang/test/CIR/CodeGen/lalg.c index 6d723cc9e732..1725ecaf5505 100644 --- a/clang/test/CIR/CodeGen/lalg.c +++ b/clang/test/CIR/CodeGen/lalg.c @@ -10,9 +10,9 @@ double dot() { // CHECK: %1 = cir.alloca f64, cir.ptr , ["x", init] // CHECK-NEXT: %2 = cir.alloca f64, cir.ptr , ["y", init] // CHECK-NEXT: %3 = cir.alloca f64, cir.ptr , ["result", init] -// CHECK-NEXT: %4 = cir.cst(0.000000e+00 : f64) : f64 +// CHECK-NEXT: %4 = cir.const(0.000000e+00 : f64) : f64 // CHECK-NEXT: cir.store %4, %1 : f64, cir.ptr -// CHECK-NEXT: %5 = cir.cst(0.000000e+00 : f32) : f32 +// CHECK-NEXT: %5 = cir.const(0.000000e+00 : f32) : f32 // CHECK-NEXT: %6 = cir.cast(floating, %5 : f32), f64 // CHECK-NEXT: cir.store %6, %2 : f64, cir.ptr // CHECK-NEXT: %7 = cir.load %1 : cir.ptr , f64 diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index a0056d423c6a..7436ccf919b2 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -28,7 +28,7 @@ void l0() { // CHECK: %2 = "cir.struct_element_addr"(%1) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %3 = cir.load %2 : cir.ptr >, !cir.ptr // CHECK: %4 = cir.load %3 : cir.ptr , i32 -// CHECK: %5 = cir.cst(1 : i32) : i32 +// CHECK: %5 = cir.const(1 : i32) : i32 // CHECK: %6 = cir.binop(add, %4, %5) : i32 // CHECK: %7 = "cir.struct_element_addr"(%1) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %8 = cir.load %7 : cir.ptr >, !cir.ptr @@ -47,7 +47,7 @@ auto g() { // CHECK: cir.func @_Z1gv() -> !ty_22class2Eanon222 { // CHECK: %0 = cir.alloca !ty_22class2Eanon222, cir.ptr , ["__retval"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} -// CHECK: %2 = cir.cst(12 : i32) : i32 +// CHECK: %2 = cir.const(12 : i32) : i32 // CHECK: cir.store %2, %1 : i32, cir.ptr // CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> // CHECK: cir.store %1, %3 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index 3255f319ed0b..3001cd26fc85 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -13,14 +13,14 @@ void l0() { // CPPSCOPE-NEXT: cir.scope { // CPPSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} // CPPSCOPE-NEXT: %1 = cir.alloca i32, cir.ptr , ["j", init] {alignment = 4 : i64} -// CPPSCOPE-NEXT: %2 = cir.cst(0 : i32) : i32 +// CPPSCOPE-NEXT: %2 = cir.const(0 : i32) : i32 // CPPSCOPE-NEXT: cir.store %2, %0 : i32, cir.ptr // CPPSCOPE-NEXT: cir.loop for(cond : { // CSCOPE: cir.func @l0() { // CSCOPE-NEXT: cir.scope { // CSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} -// CSCOPE-NEXT: %1 = cir.cst(0 : i32) : i32 +// CSCOPE-NEXT: %1 = cir.const(0 : i32) : i32 // CSCOPE-NEXT: cir.store %1, %0 : i32, cir.ptr // CSCOPE-NEXT: cir.loop for(cond : { diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 9f80af591a1c..cec5a2b9db4d 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -25,7 +25,7 @@ void l1() { // CHECK: cir.func @_Z2l1v // CHECK: cir.loop for(cond : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(10 : i32) : i32 // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool // CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: @@ -34,13 +34,13 @@ void l1() { // CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %2 : i32, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: cir.yield @@ -72,7 +72,7 @@ void l2(bool cond) { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: cir.yield @@ -85,7 +85,7 @@ void l2(bool cond) { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: cir.yield @@ -93,7 +93,7 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %3 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %3 = cir.const(1 : i32) : i32 // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool // CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: @@ -104,7 +104,7 @@ void l2(bool cond) { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: cir.yield @@ -137,7 +137,7 @@ void l3(bool cond) { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: cir.yield @@ -150,7 +150,7 @@ void l3(bool cond) { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: cir.yield @@ -158,7 +158,7 @@ void l3(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { -// CHECK-NEXT: %3 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %3 = cir.const(1 : i32) : i32 // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool // CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: @@ -169,7 +169,7 @@ void l3(bool cond) { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: cir.yield @@ -193,12 +193,12 @@ void l4() { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: cir.scope { // CHECK-NEXT: %10 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %11 = cir.cst(10 : i32) : i32 +// CHECK-NEXT: %11 = cir.const(10 : i32) : i32 // CHECK-NEXT: %12 = cir.cmp(lt, %10, %11) : i32, !cir.bool // CHECK-NEXT: cir.if %12 { // CHECK-NEXT: cir.yield continue @@ -213,7 +213,7 @@ void l5() { // CHECK: cir.func @_Z2l5v() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { -// CHECK-NEXT: %0 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %0 = cir.const(0 : i32) : i32 // CHECK-NEXT: %1 = cir.cast(int_to_bool, %0 : i32), !cir.bool // CHECK-NEXT: cir.brcond %1 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index bcb7f0163e01..a9e332eff28a 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -31,13 +31,13 @@ int s0(int a, int b) { // CHECK: cir.store %6, %3 : i32, cir.ptr loc(#loc23) // CHECK: cir.scope { // CHECK: %9 = cir.load %3 : cir.ptr , i32 loc(#loc13) -// CHECK: %10 = cir.cst(0 : i32) : i32 loc(#loc14) +// CHECK: %10 = cir.const(0 : i32) : i32 loc(#loc14) // CHECK: %11 = cir.cmp(gt, %9, %10) : i32, !cir.bool loc(#loc26) // CHECK: cir.if %11 { -// CHECK: %12 = cir.cst(0 : i32) : i32 loc(#loc16) +// CHECK: %12 = cir.const(0 : i32) : i32 loc(#loc16) // CHECK: cir.store %12, %3 : i32, cir.ptr loc(#loc28) // CHECK: } else { -// CHECK: %12 = cir.cst(1 : i32) : i32 loc(#loc12) +// CHECK: %12 = cir.const(1 : i32) : i32 loc(#loc12) // CHECK: cir.store %12, %3 : i32, cir.ptr loc(#loc29) // CHECK: } loc(#loc27) // CHECK: } loc(#loc25) diff --git a/clang/test/CIR/CodeGen/store.c b/clang/test/CIR/CodeGen/store.c index 816cd6e4d97a..20563f1fd7da 100644 --- a/clang/test/CIR/CodeGen/store.c +++ b/clang/test/CIR/CodeGen/store.c @@ -8,9 +8,9 @@ void foo() { // CHECK: cir.func @foo() { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %1 = cir.const(0 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr -// CHECK-NEXT: %2 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %2 = cir.const(1 : i32) : i32 // CHECK-NEXT: cir.store %2, %0 : i32, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index a7ad34e0c868..96c52f1ba476 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -64,9 +64,9 @@ void yoyo(incomplete *i) {} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["result", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () -// CHECK-NEXT: %3 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: %3 = cir.const(4 : i32) : i32 // CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, i32) -> () -// CHECK-NEXT: %4 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: %4 = cir.const(4 : i32) : i32 // CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, i32) -> i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index a7b5e40c66bb..af55e0a513eb 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -20,7 +20,7 @@ void sw1(int a) { // CHECK: cir.switch (%3 : i32) [ // CHECK-NEXT: case (equal, 0 : i32) { // CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %1 : i32, cir.ptr // CHECK-NEXT: cir.yield break @@ -32,10 +32,10 @@ void sw1(int a) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %4 = cir.alloca i32, cir.ptr , ["yolo", init] // CHECK-NEXT: %5 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %6 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %6 = cir.const(1 : i32) : i32 // CHECK-NEXT: %7 = cir.binop(add, %5, %6) : i32 // CHECK-NEXT: cir.store %7, %1 : i32, cir.ptr -// CHECK-NEXT: %8 = cir.cst(100 : i32) : i32 +// CHECK-NEXT: %8 = cir.const(100 : i32) : i32 // CHECK-NEXT: cir.store %8, %4 : i32, cir.ptr // CHECK-NEXT: cir.yield break // CHECK-NEXT: } @@ -58,7 +58,7 @@ void sw2(int a) { // CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["fomo", init] // CHECK: cir.switch (%4 : i32) [ // CHECK-NEXT: case (equal, 3 : i32) { -// CHECK-NEXT: %5 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(0 : i32) : i32 // CHECK-NEXT: cir.store %5, %2 : i32, cir.ptr void sw3(int a) { @@ -92,7 +92,7 @@ int sw4(int a) { // CHECK: cir.switch (%4 : i32) [ // CHECK-NEXT: case (equal, 42 : i32) { // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %5 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(3 : i32) : i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: cir.return %6 : i32 @@ -100,7 +100,7 @@ int sw4(int a) { // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, // CHECK-NEXT: case (default) { -// CHECK-NEXT: %5 = cir.cst(2 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(2 : i32) : i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 // CHECK-NEXT: cir.return %6 : i32 diff --git a/clang/test/CIR/IR/branch.cir b/clang/test/CIR/IR/branch.cir index 805f630e420b..b12f4a16db03 100644 --- a/clang/test/CIR/IR/branch.cir +++ b/clang/test/CIR/IR/branch.cir @@ -4,7 +4,7 @@ cir.func @b0() { cir.scope { cir.loop while(cond : { - %0 = cir.cst(true) : !cir.bool + %0 = cir.const(true) : !cir.bool cir.brcond %0 ^bb1, ^bb2 ^bb1: cir.yield continue @@ -24,7 +24,7 @@ cir.func @b0() { // CHECK: cir.func @b0 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %0 = cir.cst(true) : !cir.bool +// CHECK-NEXT: %0 = cir.const(true) : !cir.bool // CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: cir.yield continue diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index 02ae51620528..edbb7a9bad5a 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -6,7 +6,7 @@ module { %a = cir.cast (int_to_bool, %arg0 : i32), !cir.bool %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr - %4 = cir.cst(0 : i32) : i32 + %4 = cir.const(0 : i32) : i32 cir.return } diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 551edf7c4eec..de66b684ed37 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -11,7 +11,7 @@ module { cir.func @f3() -> i32 { %0 = cir.alloca i32, cir.ptr , ["x", init] - %1 = cir.cst(3 : i32) : i32 + %1 = cir.const(3 : i32) : i32 cir.store %1, %0 : i32, cir.ptr %2 = cir.load %0 : cir.ptr , i32 cir.return %2 : i32 @@ -21,15 +21,15 @@ module { %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} cir.store %arg0, %1 : i32, cir.ptr - %2 = cir.cst(0 : i32) : i32 + %2 = cir.const(0 : i32) : i32 cir.store %2, %0 : i32, cir.ptr %3 = cir.load %1 : cir.ptr , i32 %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool cir.if %4 { - %6 = cir.cst(3 : i32) : i32 + %6 = cir.const(3 : i32) : i32 cir.store %6, %0 : i32, cir.ptr } else { - %6 = cir.cst(4 : i32) : i32 + %6 = cir.const(4 : i32) : i32 cir.store %6, %0 : i32, cir.ptr } %5 = cir.load %0 : cir.ptr , i32 @@ -56,7 +56,7 @@ module { // CHECK-NEXT: cir.func @f3() -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", init] -// CHECK-NEXT: %1 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: %1 = cir.const(3 : i32) : i32 // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr // CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %2 : i32 @@ -65,10 +65,10 @@ module { // CHECK: @if0(%arg0: i32) -> i32 { // CHECK: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool // CHECK-NEXT: cir.if %4 { -// CHECK-NEXT: %6 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: %6 = cir.const(3 : i32) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: } else { -// CHECK-NEXT: %6 = cir.cst(4 : i32) : i32 +// CHECK-NEXT: %6 = cir.const(4 : i32) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 910a78851672..63b90b8ad27a 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -2,11 +2,11 @@ module { cir.global external @a = 3 : i32 - cir.global external @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> - cir.global external @b = #cir.cst_array<"example\00" : !cir.array> + cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> + cir.global external @b = #cir.const_array<"example\00" : !cir.array> cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} cir.global "private" internal @c : i32 - cir.global "private" constant internal @".str2" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global external @s = @".str2": !cir.ptr cir.func @use_global() { %0 = cir.get_global @a : cir.ptr @@ -15,11 +15,11 @@ module { } // CHECK: cir.global external @a = 3 : i32 -// CHECK: cir.global external @rgb = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> -// CHECK: cir.global external @b = #cir.cst_array<"example\00" : !cir.array> +// CHECK: cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> +// CHECK: cir.global external @b = #cir.const_array<"example\00" : !cir.array> // CHECK: cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} // CHECK: cir.global "private" internal @c : i32 -// CHECK: cir.global "private" constant internal @".str2" = #cir.cst_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.global external @s = @".str2": !cir.ptr // CHECK: cir.func @use_global() diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index b8c24090ebd1..d945ffab0b6f 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1,27 +1,27 @@ // Test attempts to build bogus CIR // RUN: cir-tool %s -verify-diagnostics -split-input-file -// expected-error@+2 {{'cir.cst' op nullptr expects pointer type}} +// expected-error@+2 {{'cir.const' op nullptr expects pointer type}} cir.func @p0() { - %1 = cir.cst(#cir.null : !cir.ptr) : i32 + %1 = cir.const(#cir.null : !cir.ptr) : i32 cir.return } // ----- -// expected-error@+2 {{'cir.cst' op result type ('i32') must be '!cir.bool' for 'true'}} +// expected-error@+2 {{'cir.const' op result type ('i32') must be '!cir.bool' for 'true'}} cir.func @b0() { - %1 = cir.cst(true) : i32 + %1 = cir.const(true) : i32 cir.return } // ----- cir.func @if0() { - %0 = cir.cst(true) : !cir.bool + %0 = cir.const(true) : !cir.bool // expected-error@+1 {{'cir.if' op region control flow edge from Region #0 to parent results: source has 1 operands, but target successor needs 0}} cir.if %0 { - %6 = cir.cst(3 : i32) : i32 + %6 = cir.const(3 : i32) : i32 cir.yield %6 : i32 } cir.return @@ -30,7 +30,7 @@ cir.func @if0() { // ----- cir.func @yield0() { - %0 = cir.cst(true) : !cir.bool + %0 = cir.const(true) : !cir.bool cir.if %0 { // expected-error {{custom op 'cir.if' expected at least one block with cir.yield or cir.return}} cir.br ^a ^a: @@ -41,7 +41,7 @@ cir.func @yield0() { // ----- cir.func @yieldfallthrough() { - %0 = cir.cst(true) : !cir.bool + %0 = cir.const(true) : !cir.bool cir.if %0 { cir.yield fallthrough // expected-error {{'cir.yield' op fallthrough only expected within 'cir.switch'}} } @@ -51,7 +51,7 @@ cir.func @yieldfallthrough() { // ----- cir.func @yieldbreak() { - %0 = cir.cst(true) : !cir.bool + %0 = cir.const(true) : !cir.bool cir.if %0 { cir.yield break // expected-error {{shall be dominated by 'cir.loop' or 'cir.switch'}} } @@ -61,7 +61,7 @@ cir.func @yieldbreak() { // ----- cir.func @yieldcontinue() { - %0 = cir.cst(true) : !cir.bool + %0 = cir.const(true) : !cir.bool cir.if %0 { cir.yield continue // expected-error {{shall be dominated by 'cir.loop'}} } @@ -71,10 +71,10 @@ cir.func @yieldcontinue() { // ----- cir.func @s0() { - %1 = cir.cst(2 : i32) : i32 + %1 = cir.const(2 : i32) : i32 cir.switch (%1 : i32) [ case (equal, 5) { - %2 = cir.cst(3 : i32) : i32 + %2 = cir.const(3 : i32) : i32 } ] // expected-error {{blocks are expected to be explicitly terminated}} cir.return @@ -83,7 +83,7 @@ cir.func @s0() { // ----- cir.func @s1() { - %1 = cir.cst(2 : i32) : i32 + %1 = cir.const(2 : i32) : i32 cir.switch (%1 : i32) [ case (equal, 5) { } @@ -94,7 +94,7 @@ cir.func @s1() { // ----- cir.func @badstride(%x: !cir.ptr) { - %idx = cir.cst(2 : i32) : i32 + %idx = cir.const(2 : i32) : i32 %4 = cir.ptr_stride(%x : !cir.ptr, %idx : i32), !cir.ptr // expected-error {{requires the same type for first operand and result}} cir.return } @@ -140,7 +140,7 @@ cir.func @cast4(%p: !cir.ptr) { cir.func @b0() { cir.scope { cir.loop while(cond : { // expected-error {{cond region must be terminated with 'cir.yield' or 'cir.yield continue'}} - %0 = cir.cst(true) : !cir.bool + %0 = cir.const(true) : !cir.bool cir.brcond %0 ^bb1, ^bb2 ^bb1: cir.yield break @@ -160,31 +160,31 @@ cir.func @b0() { // ----- module { - cir.global external @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array element should match array element type}} + cir.global external @a = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array element should match array element type}} } // expected-error {{expected constant attribute to match type}} // ----- module { - cir.global external @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array size should match type size}} + cir.global external @a = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array size should match type size}} } // expected-error {{expected constant attribute to match type}} // ----- module { - cir.global external @b = #cir.cst_array<"example\00" : !cir.array> // expected-error {{constant array element for string literals expects i8 array element type}} + cir.global external @b = #cir.const_array<"example\00" : !cir.array> // expected-error {{constant array element for string literals expects i8 array element type}} } // expected-error {{expected constant attribute to match type}} // ----- module { - cir.global "private" constant external @".str2" = #cir.cst_array<"example\00"> {alignment = 1 : i64} // expected-error {{expected type declaration for string literal}} + cir.global "private" constant external @".str2" = #cir.const_array<"example\00"> {alignment = 1 : i64} // expected-error {{expected type declaration for string literal}} } // expected-error@-1 {{expected constant attribute to match type}} // ----- module { - cir.global @a = #cir.cst_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{expected string or keyword containing one of the following enum values for attribute 'linkage' [external, available_externally, linkonce, linkonce_odr, weak, weak_odr, internal, private, extern_weak, common]}} + cir.global @a = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{expected string or keyword containing one of the following enum values for attribute 'linkage' [external, available_externally, linkonce, linkonce_odr, weak, weak_odr, internal, private, extern_weak, common]}} } // ----- @@ -203,7 +203,7 @@ module { cir.func @unary0() { %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.cst(2 : i32) : i32 + %1 = cir.const(2 : i32) : i32 %3 = cir.unary(inc, %1) : i32, i32 // expected-error {{'cir.unary' op requires input to be defined by a memory load}} cir.store %3, %0 : i32, cir.ptr @@ -214,7 +214,7 @@ cir.func @unary0() { cir.func @unary1() { %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.cst(2 : i32) : i32 + %1 = cir.const(2 : i32) : i32 cir.store %1, %0 : i32, cir.ptr %2 = cir.load %0 : cir.ptr , i32 diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 77f6d444a2a1..951d8e6f2fc4 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -2,15 +2,15 @@ cir.func @l0() { %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} - %1 = cir.cst(0 : i32) : i32 + %1 = cir.const(0 : i32) : i32 cir.store %1, %0 : i32, cir.ptr cir.scope { %2 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} - %3 = cir.cst(0 : i32) : i32 + %3 = cir.const(0 : i32) : i32 cir.store %3, %2 : i32, cir.ptr cir.loop for(cond : { %4 = cir.load %2 : cir.ptr , i32 - %5 = cir.cst(10 : i32) : i32 + %5 = cir.const(10 : i32) : i32 %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool cir.brcond %6 ^bb1, ^bb2 ^bb1: @@ -19,16 +19,16 @@ cir.func @l0() { cir.yield }, step : { %4 = cir.load %2 : cir.ptr , i32 - %5 = cir.cst(1 : i32) : i32 + %5 = cir.const(1 : i32) : i32 %6 = cir.binop(add, %4, %5) : i32 cir.store %6, %2 : i32, cir.ptr cir.yield }) { %4 = cir.load %0 : cir.ptr , i32 - %5 = cir.cst(1 : i32) : i32 + %5 = cir.const(1 : i32) : i32 %6 = cir.binop(add, %4, %5) : i32 cir.store %6, %0 : i32, cir.ptr - %7 = cir.cst(true) : !cir.bool + %7 = cir.const(true) : !cir.bool cir.if %7 { cir.yield break } @@ -37,11 +37,11 @@ cir.func @l0() { } cir.scope { %2 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} - %3 = cir.cst(0 : i32) : i32 + %3 = cir.const(0 : i32) : i32 cir.store %3, %2 : i32, cir.ptr cir.loop while(cond : { %4 = cir.load %2 : cir.ptr , i32 - %5 = cir.cst(10 : i32) : i32 + %5 = cir.const(10 : i32) : i32 %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool cir.brcond %6 ^bb1, ^bb2 ^bb1: @@ -52,10 +52,10 @@ cir.func @l0() { cir.yield }) { %4 = cir.load %0 : cir.ptr , i32 - %5 = cir.cst(1 : i32) : i32 + %5 = cir.const(1 : i32) : i32 %6 = cir.binop(add, %4, %5) : i32 cir.store %6, %0 : i32, cir.ptr - %7 = cir.cst(true) : !cir.bool + %7 = cir.const(true) : !cir.bool cir.if %7 { cir.yield continue } @@ -65,11 +65,11 @@ cir.func @l0() { cir.scope { %2 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} - %3 = cir.cst(0 : i32) : i32 + %3 = cir.const(0 : i32) : i32 cir.store %3, %2 : i32, cir.ptr cir.loop dowhile(cond : { %4 = cir.load %2 : cir.ptr , i32 - %5 = cir.cst(10 : i32) : i32 + %5 = cir.const(10 : i32) : i32 %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool cir.brcond %6 ^bb1, ^bb2 ^bb1: @@ -80,7 +80,7 @@ cir.func @l0() { cir.yield }) { %4 = cir.load %0 : cir.ptr , i32 - %5 = cir.cst(1 : i32) : i32 + %5 = cir.const(1 : i32) : i32 %6 = cir.binop(add, %4, %5) : i32 cir.store %6, %0 : i32, cir.ptr cir.yield @@ -92,7 +92,7 @@ cir.func @l0() { // CHECK: cir.func @l0 // CHECK: cir.loop for(cond : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(10 : i32) : i32 // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool // CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: @@ -101,16 +101,16 @@ cir.func @l0() { // CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %2 : i32, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr -// CHECK-NEXT: %7 = cir.cst(true) : !cir.bool +// CHECK-NEXT: %7 = cir.const(true) : !cir.bool // CHECK-NEXT: cir.if %7 { // CHECK-NEXT: cir.yield break // CHECK-NEXT: } @@ -119,7 +119,7 @@ cir.func @l0() { // CHECK: cir.loop while(cond : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(10 : i32) : i32 // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool // CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: @@ -130,10 +130,10 @@ cir.func @l0() { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr -// CHECK-NEXT: %7 = cir.cst(true) : !cir.bool +// CHECK-NEXT: %7 = cir.const(true) : !cir.bool // CHECK-NEXT: cir.if %7 { // CHECK-NEXT: cir.yield continue // CHECK-NEXT: } @@ -142,7 +142,7 @@ cir.func @l0() { // CHECK: cir.loop dowhile(cond : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(10 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(10 : i32) : i32 // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool // CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: @@ -153,7 +153,7 @@ cir.func @l0() { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr // CHECK-NEXT: cir.yield diff --git a/clang/test/CIR/IR/ptr_stride.cir b/clang/test/CIR/IR/ptr_stride.cir index 84d0baa4ee2d..a9e7a4ab29b0 100644 --- a/clang/test/CIR/IR/ptr_stride.cir +++ b/clang/test/CIR/IR/ptr_stride.cir @@ -5,7 +5,7 @@ module { %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr - %3 = cir.cst(0 : i32) : i32 + %3 = cir.const(0 : i32) : i32 %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr cir.return } @@ -15,7 +15,7 @@ module { // CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] // CHECK-NEXT: %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %3 = cir.cst(0 : i32) : i32 +// CHECK-NEXT: %3 = cir.const(0 : i32) : i32 // CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index a2c985991115..0f2c9acd881d 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -1,7 +1,7 @@ // RUN: cir-tool %s | FileCheck %s cir.func @s0() { - %1 = cir.cst(2 : i32) : i32 + %1 = cir.const(2 : i32) : i32 cir.switch (%1 : i32) [ case (default) { cir.return diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir index 2534baca6fbe..86cde3d35256 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir @@ -6,8 +6,8 @@ module { %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} %2 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} - %3 = cir.cst(2 : i32) : i32 cir.store %3, %0 : i32, cir.ptr - %4 = cir.cst(1 : i32) : i32 cir.store %4, %1 : i32, cir.ptr + %3 = cir.const(2 : i32) : i32 cir.store %3, %0 : i32, cir.ptr + %4 = cir.const(1 : i32) : i32 cir.store %4, %1 : i32, cir.ptr %5 = cir.load %0 : cir.ptr , i32 %6 = cir.load %1 : cir.ptr , i32 %7 = cir.binop(mul, %5, %6) : i32 diff --git a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir index 0879e2cbfa99..8a95f54118c4 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir @@ -4,7 +4,7 @@ module { cir.func @foo() { %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} - %1 = cir.cst(true) : !cir.bool + %1 = cir.const(true) : !cir.bool cir.store %1, %0 : !cir.bool, cir.ptr cir.return } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir index 1c4efc11b832..291487fab4c3 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir @@ -10,7 +10,7 @@ module { cir.store %arg0, %0 : !cir.ptr, cir.ptr > %3 = cir.load %0 : cir.ptr >, !cir.ptr cir.store %3, %2 : !cir.ptr, cir.ptr > - %4 = cir.cst(0 : i32) : i32 + %4 = cir.const(0 : i32) : i32 %5 = cir.load %1 : cir.ptr , i32 cir.return %5 : i32 } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir index df1ebbf02ee2..d7e5c432a333 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir @@ -4,18 +4,18 @@ module { cir.func @foo() { %0 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %1 = cir.cst(1 : i32) : i32 + %1 = cir.const(1 : i32) : i32 cir.store %1, %0 : i32, cir.ptr cir.br ^bb2 ^bb1: // no predecessors %2 = cir.load %0 : cir.ptr , i32 - %3 = cir.cst(1 : i32) : i32 + %3 = cir.const(1 : i32) : i32 %4 = cir.binop(add, %2, %3) : i32 cir.store %4, %0 : i32, cir.ptr cir.br ^bb2 ^bb2: // 2 preds: ^bb0, ^bb1 %5 = cir.load %0 : cir.ptr , i32 - %6 = cir.cst(2 : i32) : i32 + %6 = cir.const(2 : i32) : i32 %7 = cir.binop(add, %5, %6) : i32 cir.store %7, %0 : i32, cir.ptr cir.return diff --git a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir index 4b6b95afd361..cacc0f50f528 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir @@ -4,7 +4,7 @@ module { cir.func @foo() -> i32 { %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} - %1 = cir.cst(1 : i32) : i32 + %1 = cir.const(1 : i32) : i32 cir.store %1, %0 : i32, cir.ptr %2 = cir.load %0 : cir.ptr , i32 cir.return %2 : i32 diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir index 0edb8ee4e58a..5c195a69c57e 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir @@ -5,7 +5,7 @@ module { cir.func @foo() { %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.cst(2 : i32) : i32 + %2 = cir.const(2 : i32) : i32 cir.store %2, %0 : i32, cir.ptr cir.store %2, %1 : i32, cir.ptr diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir index d0da3bbc12f6..01b3f9c04236 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir @@ -5,7 +5,7 @@ module { cir.func @foo() { %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.cst(2 : i32) : i32 + %2 = cir.const(2 : i32) : i32 cir.store %2, %0 : i32, cir.ptr cir.store %2, %1 : i32, cir.ptr diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir index 85163e281c76..6347b4f07b4f 100644 --- a/clang/test/CIR/Lowering/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -6,8 +6,8 @@ module { %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} %2 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} - %3 = cir.cst(2 : i32) : i32 cir.store %3, %0 : i32, cir.ptr - %4 = cir.cst(1 : i32) : i32 cir.store %4, %1 : i32, cir.ptr + %3 = cir.const(2 : i32) : i32 cir.store %3, %0 : i32, cir.ptr + %4 = cir.const(1 : i32) : i32 cir.store %4, %1 : i32, cir.ptr %5 = cir.load %0 : cir.ptr , i32 %6 = cir.load %1 : cir.ptr , i32 %7 = cir.binop(mul, %5, %6) : i32 diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index 4d9b6b50f6f6..9067e75bbf9c 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -3,7 +3,7 @@ module { cir.func @foo() { - %1 = cir.cst(true) : !cir.bool + %1 = cir.const(true) : !cir.bool %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} cir.store %1, %0 : !cir.bool, cir.ptr cir.return diff --git a/clang/test/CIR/Lowering/branch.cir b/clang/test/CIR/Lowering/branch.cir index cbc66f16f494..1e50806dc355 100644 --- a/clang/test/CIR/Lowering/branch.cir +++ b/clang/test/CIR/Lowering/branch.cir @@ -4,10 +4,10 @@ cir.func @foo(%arg0: !cir.bool) -> i32 { cir.brcond %arg0 ^bb1, ^bb2 ^bb1: - %0 = cir.cst(1: i32) : i32 + %0 = cir.const(1: i32) : i32 cir.return %0 : i32 ^bb2: - %1 = cir.cst(0: i32) : i32 + %1 = cir.const(0: i32) : i32 cir.return %1 : i32 } diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index d769cb95da8b..71f488cd9535 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -11,11 +11,11 @@ module { cir.store %arg0, %0 : !cir.ptr, cir.ptr > cir.store %arg1, %1 : !cir.ptr, cir.ptr > cir.store %arg2, %2 : i32, cir.ptr - %5 = cir.cst(0.000000e+00 : f64) : f64 + %5 = cir.const(0.000000e+00 : f64) : f64 cir.store %5, %4 : f64, cir.ptr cir.scope { %8 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} - %9 = cir.cst(0 : i32) : i32 + %9 = cir.const(0 : i32) : i32 cir.store %9, %8 : i32, cir.ptr cir.loop for(cond : { %10 = cir.load %8 : cir.ptr , i32 diff --git a/clang/test/CIR/Lowering/for.cir b/clang/test/CIR/Lowering/for.cir index bc023ed896b7..40d36b8398dd 100644 --- a/clang/test/CIR/Lowering/for.cir +++ b/clang/test/CIR/Lowering/for.cir @@ -4,11 +4,11 @@ module { cir.func @foo() { %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} - %1 = cir.cst(0 : i32) : i32 + %1 = cir.const(0 : i32) : i32 cir.store %1, %0 : i32, cir.ptr cir.loop for(cond : { %2 = cir.load %0 : cir.ptr , i32 - %3 = cir.cst(10 : i32) : i32 + %3 = cir.const(10 : i32) : i32 %4 = cir.cmp(lt, %2, %3) : i32, i32 %5 = cir.cast(int_to_bool, %4 : i32), !cir.bool cir.brcond %5 ^bb1, ^bb2 diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index 25bd686394d5..2a8057a92144 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -4,18 +4,18 @@ module { cir.func @foo() { %0 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %1 = cir.cst(1 : i32) : i32 + %1 = cir.const(1 : i32) : i32 cir.store %1, %0 : i32, cir.ptr cir.br ^bb2 ^bb1: // no predecessors %2 = cir.load %0 : cir.ptr , i32 - %3 = cir.cst(1 : i32) : i32 + %3 = cir.const(1 : i32) : i32 %4 = cir.binop(add, %2, %3) : i32 cir.store %4, %0 : i32, cir.ptr cir.br ^bb2 ^bb2: // 2 preds: ^bb0, ^bb1 %5 = cir.load %0 : cir.ptr , i32 - %6 = cir.cst(2 : i32) : i32 + %6 = cir.const(2 : i32) : i32 %7 = cir.binop(add, %5, %6) : i32 cir.store %7, %0 : i32, cir.ptr cir.return diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir index 2d21ce0bbb0a..0a57a03254cc 100644 --- a/clang/test/CIR/Lowering/if.cir +++ b/clang/test/CIR/Lowering/if.cir @@ -5,10 +5,10 @@ module { cir.func @foo(%arg0: i32) -> i32 { %4 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool cir.if %4 { - %5 = cir.cst(1 : i32) : i32 + %5 = cir.const(1 : i32) : i32 cir.return %5 : i32 } else { - %5 = cir.cst(0 : i32) : i32 + %5 = cir.const(0 : i32) : i32 cir.return %5 : i32 } cir.return %arg0 : i32 diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index cef3bc98ca87..d53d11a7938a 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -4,7 +4,7 @@ module { cir.func @foo() -> i32 { %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} - %1 = cir.cst(1 : i32) : i32 + %1 = cir.const(1 : i32) : i32 cir.store %1, %0 : i32, cir.ptr %2 = cir.load %0 : cir.ptr , i32 cir.return %2 : i32 diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index a151ae645b32..39501250caea 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -6,7 +6,7 @@ module { %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} cir.store %arg0, %0 : !cir.ptr, cir.ptr > %1 = cir.load %0 : cir.ptr >, !cir.ptr - %2 = cir.cst(1 : i32) : i32 + %2 = cir.const(1 : i32) : i32 %3 = cir.ptr_stride(%1 : !cir.ptr, %2 : i32), !cir.ptr %4 = cir.load %3 : cir.ptr , i32 cir.return diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index c816ca95c750..82d0be699d1e 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -5,7 +5,7 @@ module { cir.func @foo() { cir.scope { %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.cst(4 : i32) : i32 + %1 = cir.const(4 : i32) : i32 cir.store %1, %0 : i32, cir.ptr } cir.return diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir index 559ba71d7587..829c51192ddb 100644 --- a/clang/test/CIR/Lowering/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -5,7 +5,7 @@ module { cir.func @foo() { %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.cst(2 : i32) : i32 + %2 = cir.const(2 : i32) : i32 cir.store %2, %0 : i32, cir.ptr cir.store %2, %1 : i32, cir.ptr diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index 6ca263907ec2..bdf77e6e4be3 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -5,7 +5,7 @@ module { cir.func @foo() -> i32 { %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %2 = cir.cst(1 : i32) : i32 + %2 = cir.const(1 : i32) : i32 cir.store %2, %1 : i32, cir.ptr %3 = cir.load %1 : cir.ptr , i32 %4 = cir.unary(not, %3) : i32, i32 diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index ea150950be21..81569b6d14e9 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -5,7 +5,7 @@ module { cir.func @foo() { %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.cst(2 : i32) : i32 + %2 = cir.const(2 : i32) : i32 cir.store %2, %0 : i32, cir.ptr cir.store %2, %1 : i32, cir.ptr diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 49aca6a3768a..e4dd5197ef2e 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -9,13 +9,13 @@ module { cir.store %arg1, %1 : i32, cir.ptr cir.scope { %2 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %3 = cir.cst(1 : i32) : i32 + %3 = cir.const(1 : i32) : i32 cir.store %3, %2 : i32, cir.ptr %4 = cir.load %0 : cir.ptr , i32 cir.switch (%4 : i32) [ case (equal, 0 : i32) { %5 = cir.load %2 : cir.ptr , i32 - %6 = cir.cst(1 : i32) : i32 + %6 = cir.const(1 : i32) : i32 %7 = cir.binop(add, %5, %6) : i32 cir.store %7, %2 : i32, cir.ptr cir.br ^bb1 @@ -26,7 +26,7 @@ module { cir.scope { cir.scope { %5 = cir.load %1 : cir.ptr , i32 - %6 = cir.cst(3 : i32) : i32 + %6 = cir.const(3 : i32) : i32 %7 = cir.cmp(eq, %5, %6) : i32, !cir.bool cir.if %7 { cir.br ^bb1 @@ -42,10 +42,10 @@ module { cir.scope { %5 = cir.alloca i32, cir.ptr , ["yolo", init] {alignment = 4 : i64} %6 = cir.load %2 : cir.ptr , i32 - %7 = cir.cst(1 : i32) : i32 + %7 = cir.const(1 : i32) : i32 %8 = cir.binop(add, %6, %7) : i32 cir.store %8, %2 : i32, cir.ptr - %9 = cir.cst(100 : i32) : i32 + %9 = cir.const(100 : i32) : i32 cir.store %9, %5 : i32, cir.ptr cir.br ^bb1 ^bb1: // pred: ^bb0 @@ -61,7 +61,7 @@ module { cir.func @l0() { cir.scope { cir.loop while(cond : { - %0 = cir.cst(true) : !cir.bool + %0 = cir.const(true) : !cir.bool cir.brcond %0 ^bb1, ^bb2 ^bb1: cir.yield continue @@ -81,7 +81,7 @@ module { cir.func @l1() { cir.scope { cir.loop while(cond : { - %0 = cir.cst(false) : !cir.bool + %0 = cir.const(false) : !cir.bool cir.brcond %0 ^bb1, ^bb2 ^bb1: cir.yield continue @@ -102,7 +102,7 @@ module { // CHECK: cir.switch (%4 : i32) [ // CHECK-NEXT: case (equal, 0 : i32) { // CHECK-NEXT: %5 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %6 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %6 = cir.const(1 : i32) : i32 // CHECK-NEXT: %7 = cir.binop(add, %5, %6) : i32 // CHECK-NEXT: cir.store %7, %2 : i32, cir.ptr // CHECK-NEXT: cir.return @@ -111,7 +111,7 @@ module { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %5 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %6 = cir.cst(3 : i32) : i32 +// CHECK-NEXT: %6 = cir.const(3 : i32) : i32 // CHECK-NEXT: %7 = cir.cmp(eq, %5, %6) : i32, !cir.bool // CHECK-NEXT: cir.if %7 { // CHECK-NEXT: cir.return @@ -125,10 +125,10 @@ module { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %5 = cir.alloca i32, cir.ptr , ["yolo", init] {alignment = 4 : i64} // CHECK-NEXT: %6 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %7 = cir.cst(1 : i32) : i32 +// CHECK-NEXT: %7 = cir.const(1 : i32) : i32 // CHECK-NEXT: %8 = cir.binop(add, %6, %7) : i32 // CHECK-NEXT: cir.store %8, %2 : i32, cir.ptr -// CHECK-NEXT: %9 = cir.cst(100 : i32) : i32 +// CHECK-NEXT: %9 = cir.const(100 : i32) : i32 // CHECK-NEXT: cir.store %9, %5 : i32, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } From 329f2778daf44b6e335eeb8e0e6307ab438b4291 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 9 Mar 2023 21:39:29 -0500 Subject: [PATCH 0824/2301] [CIR][CodeGen] Add DLTIDialect to CIR modules Use the layout string from clang's TargetInfo and feed it to the DLTI Dialect. It helpfully includes translation from llvm::DataLayout to MLIR attributes as demonstrated in the added test. It should be straightforward from here to query layout information from the module. --- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 17 +++++++++++- clang/lib/CIR/CodeGen/CMakeLists.txt | 7 ++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 5 ++-- clang/lib/Frontend/CompilerInvocation.cpp | 2 ++ clang/test/CIR/CodeGen/basic.c | 2 +- clang/test/CIR/CodeGen/dlti.c | 27 +++++++++++++++++++ clang/test/CIR/CodeGen/sourcelocation.cpp | 2 +- 7 files changed, 54 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/dlti.c diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 09e89df9a16d..807e2430d3b7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -12,9 +12,12 @@ #include "CIRGenModule.h" +#include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/MLIRContext.h" +#include "mlir/Target/LLVMIR/Import.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/AST/ASTContext.h" @@ -35,18 +38,30 @@ CIRGenerator::~CIRGenerator() { assert(DeferredInlineMemberFuncDefs.empty() || Diags.hasErrorOccurred()); } +static void setMLIRDataLayout(mlir::ModuleOp &mod, const llvm::DataLayout &dl) { + auto *context = mod.getContext(); + mod->setAttr(mlir::LLVM::LLVMDialect::getDataLayoutAttrName(), + mlir::StringAttr::get(context, dl.getStringRepresentation())); + mlir::DataLayoutSpecInterface dlSpec = mlir::translateDataLayout(dl, context); + mod->setAttr(mlir::DLTIDialect::kDataLayoutAttrName, dlSpec); +} + void CIRGenerator::Initialize(ASTContext &astCtx) { using namespace llvm; this->astCtx = &astCtx; mlirCtx = std::make_unique(); + mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); - mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); CGM = std::make_unique(*mlirCtx.get(), astCtx, codeGenOpts, Diags); + auto mod = CGM->getModule(); + auto layout = llvm::DataLayout(astCtx.getTargetInfo().getDataLayoutString()); + setMLIRDataLayout(mod, layout); } bool CIRGenerator::verifyModule() { return CGM->verifyModule(); } diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 8924a0311d25..b93f9ecf9910 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -49,6 +49,8 @@ add_clang_library(clangCIR MLIRCIRTransforms MLIRAffineToStandard MLIRAnalysis + MLIRDLTIDialect + MLIRFuncToLLVM MLIRIR MLIRLLVMCommonConversion MLIRLLVMDialect @@ -57,11 +59,10 @@ add_clang_library(clangCIR MLIRMemRefToLLVM MLIRParser MLIRPass - MLIRSideEffectInterfaces MLIRSCFToControlFlow - MLIRFuncToLLVM + MLIRSideEffectInterfaces MLIRSupport - MLIRMemRefDialect + MLIRTargetLLVMIRImport MLIRTargetLLVMIRExport MLIRTransforms ) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b716c0740a8f..2d0e256bcf4c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -20,6 +20,7 @@ #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" +#include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/SCF/IR/SCF.h" @@ -334,8 +335,8 @@ struct ConvertCIRToLLVMPass : public mlir::PassWrapper> { void getDependentDialects(mlir::DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } void runOnOperation() final; diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 3e6cea3c5843..2779de1eef3c 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3090,6 +3090,8 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Opts.ProgramAction != frontend::GenerateModule && Opts.IsSystemModule) Diags.Report(diag::err_drv_argument_only_allowed_with) << "-fsystem-module" << "-emit-module"; + if (Args.hasArg(OPT_fclangir) || Args.hasArg(OPT_emit_cir)) + Opts.UseClangIRPipeline = true; if (Args.hasArg(OPT_clangir_disable_passes)) Opts.ClangIRDisablePasses = true; diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 835befc685a3..659c3f3e9e80 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -8,7 +8,7 @@ int foo(int i) { return i; } -// CHECK: module attributes {cir.sob = #cir.signed_overflow_behavior} { +// CHECK: module attributes { // CHECK-NEXT: cir.func @foo(%arg0: i32 loc({{.*}})) -> i32 { // CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/dlti.c b/clang/test/CIR/CodeGen/dlti.c new file mode 100644 index 000000000000..fefcdf92f3f6 --- /dev/null +++ b/clang/test/CIR/CodeGen/dlti.c @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +void foo() {} + +// CHECK: module attributes { +// CHECK-DAG: cir.sob = #cir.signed_overflow_behavior, +// CHECK-DAG: dlti.dl_spec = +// CHECK-DAG: #dlti.dl_spec< +// CHECK-DAG: i16 = dense<16> : vector<2xi64>, +// CHECK-DAG: i32 = dense<32> : vector<2xi64>, +// CHECK-DAG: i8 = dense<8> : vector<2xi64>, +// CHECK-DAG: i1 = dense<8> : vector<2xi64>, +// CHECK-DAG: !llvm.ptr = dense<64> : vector<4xi64>, +// CHECK-DAG: f80 = dense<128> : vector<2xi64>, +// CHECK-DAG: i128 = dense<128> : vector<2xi64>, +// CHECK-DAG: !llvm.ptr<272> = dense<64> : vector<4xi64>, +// CHECK-DAG: i64 = dense<64> : vector<2xi64>, +// CHECK-DAG: !llvm.ptr<270> = dense<32> : vector<4xi64>, +// CHECK-DAG: !llvm.ptr<271> = dense<32> : vector<4xi64>, +// CHECK-DAG: f128 = dense<128> : vector<2xi64>, +// CHECK-DAG: f16 = dense<16> : vector<2xi64>, +// CHECK-DAG: f64 = dense<64> : vector<2xi64>, +// CHECK-DAG: "dlti.stack_alignment" = 128 : i64 +// CHECK-DAG: "dlti.endianness" = "little" +// >, +// CHECK-DAG: llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" + diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index a9e332eff28a..19cb68667f79 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -17,7 +17,7 @@ int s0(int a, int b) { // CHECK: #loc6 = loc("{{.*}}sourcelocation.cpp":4:19) // CHECK: #loc21 = loc(fused[#loc3, #loc4]) // CHECK: #loc22 = loc(fused[#loc5, #loc6]) -// CHECK: module attributes {cir.sob = #cir.signed_overflow_behavior} { +// CHECK: module attributes {cir.sob = #cir.signed_overflow_behavior // CHECK: cir.func @_Z2s0ii(%arg0: i32 loc(fused[#loc3, #loc4]), %arg1: i32 loc(fused[#loc5, #loc6])) -> i32 { // CHECK: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc21) // CHECK: %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc22) From e03858278960091b1ec6e04a037836df1b475847 Mon Sep 17 00:00:00 2001 From: redbopo Date: Sun, 12 Mar 2023 17:40:03 +0800 Subject: [PATCH 0825/2301] [NFC] Remove unused CIR CMakeLists.txt file. - remove the unused CIR CMakeLists.txt in mlir/include/* --- mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt diff --git a/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt deleted file mode 100644 index e476b8a77b9c..000000000000 --- a/mlir/include/mlir/Dialect/CIR/IR/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -add_mlir_dialect(CIROps cir) -add_mlir_doc(CIRDialect CIRDialect Dialects/ -gen-dialect-doc) -add_mlir_doc(CIROps CIROps Dialects/ -gen-op-doc) -add_mlir_doc(CIRAttrs CIRAttrs Dialects/ -gen-attrdef-doc) -add_mlir_doc(CIRTypes CIRTypes Dialects/ -gen-typedef-doc) - -set(LLVM_TARGET_DEFINITIONS CIROps.td) -mlir_tablegen(CIROpsEnums.h.inc -gen-enum-decls) -mlir_tablegen(CIROpsEnums.cpp.inc -gen-enum-defs) -mlir_tablegen(CIROpsAttributes.h.inc -gen-attrdef-decls) -mlir_tablegen(CIROpsAttributes.cpp.inc -gen-attrdef-defs) -mlir_tablegen(CIROpsStructs.h.inc -gen-attrdef-decls) -mlir_tablegen(CIROpsStructs.cpp.inc -gen-attrdef-defs) -add_public_tablegen_target(MLIRCIREnumsGen) From a51c90091f0057120c5dbfd1f2bce992909fc147 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 16 Mar 2023 22:34:21 -0700 Subject: [PATCH 0826/2301] [CIR][CIRGen][NFC] Improve buildAutoVarAlloca skeleton - Turn assert into checks. - Use temporaries for declarations. - Add asserts for more unimplemented features. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 119 +++++++++++------- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 19 +++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 10 ++ .../CodeGen/UnimplementedFeatureGuarding.h | 3 + 5 files changed, 108 insertions(+), 53 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index ec88ff92b401..f42915afda16 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -23,67 +23,90 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { QualType Ty = D.getType(); // TODO: (|| Ty.getAddressSpace() == LangAS::opencl_private && // getLangOpts().OpenCL)) + assert(!UnimplementedFeature::openCL()); + assert(!UnimplementedFeature::openMP()); assert(Ty.getAddressSpace() == LangAS::Default); - - assert(!D.isEscapingByref() && "not implemented"); assert(!Ty->isVariablyModifiedType() && "not implemented"); assert(!getContext() .getLangOpts() .OpenMP && // !CGF.getLangOpts().OpenMPIRBuilder "not implemented"); - bool NRVO = - getContext().getLangOpts().ElideConstructors && D.isNRVOVariable(); - assert(!NRVO && "not implemented"); - assert(Ty->isConstantSizeType() && "not implemented"); assert(!D.hasAttr() && "not implemented"); + bool NRVO = + getContext().getLangOpts().ElideConstructors && D.isNRVOVariable(); AutoVarEmission emission(D); + bool isEscapingByRef = D.isEscapingByref(); + emission.IsEscapingByRef = isEscapingByRef; + CharUnits alignment = getContext().getDeclAlign(&D); - // TODO: debug info - // TODO: use CXXABI - - // If this value is an array or struct with a statically determinable - // constant initializer, there are optimizations we can do. - // - // TODO: We should constant-evaluate the initializer of any variable, - // as long as it is initialized by a constant expression. Currently, - // isConstantInitializer produces wrong answers for structs with - // reference or bitfield members, and a few other cases, and checking - // for POD-ness protects us from some of these. - if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) && - (D.isConstexpr() || - ((Ty.isPODType(getContext()) || - getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) && - D.getInit()->isConstantInitializer(getContext(), false)))) { - - // If the variable's a const type, and it's neither an NRVO - // candidate nor a __block variable and has no mutable members, - // emit it as a global instead. - // Exception is if a variable is located in non-constant address space - // in OpenCL. - // TODO: deal with CGM.getCodeGenOpts().MergeAllConstants - // TODO: perhaps we don't need this at all at CIR since this can - // be done as part of lowering down to LLVM. - if ((!getContext().getLangOpts().OpenCL || - Ty.getAddressSpace() == LangAS::opencl_constant) && - (!NRVO && !D.isEscapingByref() && CGM.isTypeConstant(Ty, true))) - assert(0 && "not implemented"); - - // Otherwise, tell the initialization code that we're in this case. - emission.IsConstantAggregate = true; - } + assert(!UnimplementedFeature::generateDebugInfo()); + assert(!UnimplementedFeature::cxxABI()); - // TODO: track source location range... - mlir::Value addr; - if (failed(declare(&D, Ty, getLoc(D.getSourceRange()), alignment, addr))) { - CGM.emitError("Cannot declare variable"); - return emission; - } + Address address = Address::invalid(); + Address allocaAddr = Address::invalid(); + Address openMPLocalAddr = Address::invalid(); + if (getLangOpts().OpenMP && openMPLocalAddr.isValid()) { + llvm_unreachable("NYI"); + } else if (Ty->isConstantSizeType()) { + // If this value is an array or struct with a statically determinable + // constant initializer, there are optimizations we can do. + // + // TODO: We should constant-evaluate the initializer of any variable, + // as long as it is initialized by a constant expression. Currently, + // isConstantInitializer produces wrong answers for structs with + // reference or bitfield members, and a few other cases, and checking + // for POD-ness protects us from some of these. + if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) && + (D.isConstexpr() || + ((Ty.isPODType(getContext()) || + getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) && + D.getInit()->isConstantInitializer(getContext(), false)))) { + + // If the variable's a const type, and it's neither an NRVO + // candidate nor a __block variable and has no mutable members, + // emit it as a global instead. + // Exception is if a variable is located in non-constant address space + // in OpenCL. + // TODO: deal with CGM.getCodeGenOpts().MergeAllConstants + // TODO: perhaps we don't need this at all at CIR since this can + // be done as part of lowering down to LLVM. + if ((!getContext().getLangOpts().OpenCL || + Ty.getAddressSpace() == LangAS::opencl_constant) && + (!NRVO && !D.isEscapingByref() && CGM.isTypeConstant(Ty, true))) + assert(0 && "not implemented"); + + // Otherwise, tell the initialization code that we're in this case. + emission.IsConstantAggregate = true; + } - // TODO: what about emitting lifetime markers for MSVC catch parameters? - // TODO: something like @llvm.lifetime.start/end here? revisit this later. - emission.Addr = Address{addr, alignment}; + if (NRVO) + llvm_unreachable("NYI"); + else { + if (isEscapingByRef) + llvm_unreachable("NYI"); + + mlir::Type allocaTy = getTypes().convertTypeForMem(Ty); + CharUnits allocaAlignment = alignment; + // Create the temp alloca and declare variable using it. + mlir::Value addrVal; + address = CreateTempAlloca(allocaTy, allocaAlignment, + getLoc(D.getSourceRange()), D.getName(), + /*ArraySize=*/nullptr, &allocaAddr); + if (failed(declare(address, &D, Ty, getLoc(D.getSourceRange()), alignment, + addrVal))) { + CGM.emitError("Cannot declare variable"); + return emission; + } + // TODO: what about emitting lifetime markers for MSVC catch parameters? + // TODO: something like @llvm.lifetime.start/end here? revisit this later. + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers()); + } + } else { // not openmp nor constant sized type + llvm_unreachable("NYI"); + } + emission.Addr = address; setAddrOfLocalVar(&D, emission.Addr); return emission; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index b785ef9d0c9c..e43bbc35503b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1883,8 +1883,8 @@ Address CIRGenFunction::CreateTempAllocaWithoutCast(mlir::Type Ty, return Address(Alloca, Ty, Align); } -/// CreateTempAlloca - This creates a alloca and inserts it into the entry -/// block. The alloca is casted to default address space if necessary. +/// This creates a alloca and inserts it into the entry block. The alloca is +/// casted to default address space if necessary. Address CIRGenFunction::CreateTempAlloca(mlir::Type Ty, CharUnits Align, mlir::Location Loc, const Twine &Name, mlir::Value ArraySize, @@ -1901,9 +1901,9 @@ Address CIRGenFunction::CreateTempAlloca(mlir::Type Ty, CharUnits Align, return Address(V, Ty, Align); } -/// CreateTempAlloca - This creates an alloca and inserts it into the entry -/// block if \p ArraySize is nullptr, otherwise inserts it at the current -/// insertion point of the builder. +/// This creates an alloca and inserts it into the entry block if \p ArraySize +/// is nullptr, otherwise inserts it at the current insertion point of the +/// builder. mlir::cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, const Twine &Name, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index e151bfa58080..78e44242d70d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -269,6 +269,25 @@ mlir::LogicalResult CIRGenFunction::declare(const Decl *var, QualType ty, return mlir::success(); } +mlir::LogicalResult CIRGenFunction::declare(Address addr, const Decl *var, + QualType ty, mlir::Location loc, + CharUnits alignment, + mlir::Value &addrVal, + bool isParam) { + const auto *namedVar = dyn_cast_or_null(var); + assert(namedVar && "Needs a named decl"); + assert(!symbolTable.count(var) && "not supposed to be available just yet"); + + addrVal = addr.getPointer(); + if (isParam) { + auto allocaOp = cast(addrVal.getDefiningOp()); + allocaOp.setInitAttr(mlir::UnitAttr::get(builder.getContext())); + } + + symbolTable.insert(var, addrVal); + return mlir::success(); +} + /// All scope related cleanup needed: /// - Patching up unsolved goto's. /// - Build all cleanup code and insert yield/returns. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index a45e02ca7ff7..0b27ec3cc779 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -385,6 +385,12 @@ class CIRGenFunction { mlir::Location loc, clang::CharUnits alignment, mlir::Value &addr, bool isParam = false); + /// Declare a variable in the current scope but take an Address as input. + mlir::LogicalResult declare(Address addr, const clang::Decl *var, + clang::QualType ty, mlir::Location loc, + clang::CharUnits alignment, mlir::Value &addrVal, + bool isParam = false); + public: // FIXME(cir): move this to CIRGenBuider.h mlir::Value buildAlloca(llvm::StringRef name, clang::QualType ty, @@ -946,6 +952,10 @@ class CIRGenFunction { /// initializer. bool IsConstantAggregate; + /// True if the variable is a __block variable that is captured by an + /// escaping block. + bool IsEscapingByRef = false; + struct Invalid {}; AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {} diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index be6b6761d7f8..e666e212a774 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -59,6 +59,9 @@ struct UnimplementedFeature { static bool peepholeProtection() { return false; } static bool attributeNoBuiltin() { return false; } static bool CGCapturedStmtInfo() { return false; } + static bool cxxABI() { return false; } + static bool openCL() { return false; } + static bool openMP() { return false; } }; } // namespace cir From 88cc9c4587b32398183170942ec9f1f31eeebdc8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 6 Mar 2023 22:16:20 -0800 Subject: [PATCH 0827/2301] [CIR][CIRGen] Implement some NRVO for aggregates and improve lambda returning Note how this allows the lifetime checker to catch a dangling lambda currently not detected by -Wreturn-stack-address --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 32 +++++++++++++++---- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 14 ++++---- clang/lib/CIR/CodeGen/CIRGenFunction.h | 6 +++- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 10 +++++- clang/test/CIR/CodeGen/lambda.cpp | 20 ++++++++++++ .../CIR/Transforms/lifetime-check-lambda.cpp | 11 +++++++ 6 files changed, 77 insertions(+), 16 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index f42915afda16..989aad77327a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -80,9 +80,29 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { emission.IsConstantAggregate = true; } - if (NRVO) - llvm_unreachable("NYI"); - else { + // A normal fixed sized variable becomes an alloca in the entry block, + // unless: + // - it's an NRVO variable. + // - we are compiling OpenMP and it's an OpenMP local variable. + if (NRVO) { + // The named return value optimization: allocate this variable in the + // return slot, so that we can elide the copy when returning this + // variable (C++0x [class.copy]p34). + address = ReturnValue; + allocaAddr = ReturnValue; + + if (const RecordType *RecordTy = Ty->getAs()) { + const auto *RD = RecordTy->getDecl(); + const auto *CXXRD = dyn_cast(RD); + if ((CXXRD && !CXXRD->hasTrivialDestructor()) || + RD->isNonTrivialToPrimitiveDestroy()) { + // In LLVM: Create a flag that is used to indicate when the NRVO was + // applied to this variable. Set it to zero to indicate that NRVO was + // not applied. + llvm_unreachable("NYI"); + } + } + } else { if (isEscapingByRef) llvm_unreachable("NYI"); @@ -275,11 +295,11 @@ void CIRGenFunction::buildNullabilityCheck(LValue LHS, mlir::Value RHS, llvm_unreachable("NYI"); } -void CIRGenFunction::buildScalarInit(const Expr *init, const ValueDecl *D, +void CIRGenFunction::buildScalarInit(const Expr *init, mlir::Location loc, LValue lvalue) { // TODO: this is where a lot of ObjC lifetime stuff would be done. mlir::Value value = buildScalarExpr(init); - SourceLocRAIIObject Loc{*this, getLoc(D->getSourceRange())}; + SourceLocRAIIObject Loc{*this, loc}; buildStoreThroughLValue(RValue::get(value), lvalue); return; } @@ -301,7 +321,7 @@ void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, } switch (CIRGenFunction::getEvaluationKind(type)) { case TEK_Scalar: - buildScalarInit(init, D, lvalue); + buildScalarInit(init, getLoc(D->getSourceRange()), lvalue); return; case TEK_Complex: { assert(0 && "not implemented"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 4f7897c45cf6..5085082555a8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -224,17 +224,15 @@ void AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { llvm_unreachable("NYI"); return; case TEK_Aggregate: - llvm_unreachable("NYI"); - // CGF.EmitAggExpr(E, AggValueSlot::forLValue( - // LV, CGF, AggValueSlot::IsDestructed, - // AggValueSlot::DoesNotNeedGCBarriers, - // AggValueSlot::IsNotAliased, - // AggValueSlot::MayOverlap, Dest.isZeroed())); + CGF.buildAggExpr( + E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, + AggValueSlot::MayOverlap, Dest.isZeroed())); return; case TEK_Scalar: if (LV.isSimple()) { - llvm_unreachable("NYI"); - // CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false); + CGF.buildScalarInit(E, CGF.getLoc(E->getSourceRange()), LV); } else { llvm_unreachable("NYI"); // CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 0b27ec3cc779..8654f37d1c82 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -502,6 +502,10 @@ class CIRGenFunction { /// invalid iff the function has no return value. Address ReturnValue = Address::invalid(); + /// A mapping from NRVO variables to the flags used to indicate + /// when the NRVO has been applied to this variable. + llvm::DenseMap NRVOFlags; + /// Counts of the number return expressions in the function. unsigned NumReturnExprs = 0; @@ -1001,7 +1005,7 @@ class CIRGenFunction { void buildNullabilityCheck(LValue LHS, mlir::Value RHS, clang::SourceLocation Loc); - void buildScalarInit(const clang::Expr *init, const clang::ValueDecl *D, + void buildScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue); LValue buildDeclRefLValue(const clang::DeclRefExpr *E); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 7fd6013cb1ea..046db6a5aee1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -428,7 +428,15 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { if (getContext().getLangOpts().ElideConstructors && S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) { - assert(0 && "not implemented"); + assert(!UnimplementedFeature::openMP()); + // Apply the named return value optimization for this return statement, + // which means doing nothing: the appropriate result has already been + // constructed into the NRVO variable. + + // If there is an NRVO flag for this variable, set it to 1 into indicate + // that the cleanup code should not destroy the variable. + if (auto NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) + llvm_unreachable("NYI"); } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { // Make sure not to return anything, but evaluate the expression // for side effects. diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 7436ccf919b2..870cdbe0edc8 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -53,3 +53,23 @@ auto g() { // CHECK: cir.store %1, %3 : !cir.ptr, cir.ptr > // CHECK: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon222 // CHECK: cir.return %4 : !ty_22class2Eanon222 + +auto g2() { + int i = 12; + auto lam = [&] { + i += 100; + return i; + }; + return lam; +} + +// Should be same as above because of NRVO +// CHECK: cir.func @_Z2g2v() -> !ty_22class2Eanon223 { +// CHECK-NEXT: %0 = cir.alloca !ty_22class2Eanon223, cir.ptr , ["__retval", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.const(12 : i32) : i32 +// CHECK-NEXT: cir.store %2, %1 : i32, cir.ptr +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK-NEXT: cir.store %1, %3 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon223 +// CHECK-NEXT: cir.return %4 : !ty_22class2Eanon223 diff --git a/clang/test/CIR/Transforms/lifetime-check-lambda.cpp b/clang/test/CIR/Transforms/lifetime-check-lambda.cpp index b1a4a53bead4..f9733a1ff76d 100644 --- a/clang/test/CIR/Transforms/lifetime-check-lambda.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-lambda.cpp @@ -1,9 +1,20 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -I%S/Inputs -Wno-return-stack-address -fclangir -fclangir-lifetime-check="history=all;history_limit=1" -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// This can be diagnosed by clang with -Wreturn-stack-address auto g() { int i = 12; // expected-note {{declared here but invalid after function end}} return [&] { // expected-warning {{returned lambda captures local variable}} i += 100; return i; }; +} + +// This cannot be diagnosed by -Wreturn-stack-address +auto g2() { + int i = 12; // expected-note {{declared here but invalid after function end}} + auto lam = [&] { + i += 100; + return i; + }; + return lam; // expected-warning {{returned lambda captures local variable}} } \ No newline at end of file From a1ddc7b5eb5597ffce8485d9cfec7e8d385c943c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 17 Mar 2023 14:02:38 -0700 Subject: [PATCH 0828/2301] [CIR][CIRGen] Add ExprWithCleanups support in buildReturnStmt --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 117 +++++++++++++++++---------- clang/test/CIR/CodeGen/lambda.cpp | 17 ++++ 2 files changed, 90 insertions(+), 44 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 046db6a5aee1..b2ea98e7168a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -423,52 +423,81 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { // TODO(cir): LLVM codegen uses a RunCleanupsScope cleanupScope here, we // should model this in face of dtors. - if (const auto *EWC = dyn_cast_or_null(RV)) - assert(0 && "not implemented"); + bool createNewScope = false; + if (const auto *EWC = dyn_cast_or_null(RV)) { + RV = EWC->getSubExpr(); + createNewScope = true; + } - if (getContext().getLangOpts().ElideConstructors && S.getNRVOCandidate() && - S.getNRVOCandidate()->isNRVOVariable()) { - assert(!UnimplementedFeature::openMP()); - // Apply the named return value optimization for this return statement, - // which means doing nothing: the appropriate result has already been - // constructed into the NRVO variable. - - // If there is an NRVO flag for this variable, set it to 1 into indicate - // that the cleanup code should not destroy the variable. - if (auto NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) - llvm_unreachable("NYI"); - } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { - // Make sure not to return anything, but evaluate the expression - // for side effects. - if (RV) { - assert(0 && "not implemented"); - } - } else if (!RV) { - // Do nothing (return value is left uninitialized) - } else if (FnRetTy->isReferenceType()) { - // If this function returns a reference, take the address of the expression - // rather than the value. - RValue Result = buildReferenceBindingToExpr(RV); - builder.create(loc, Result.getScalarVal(), - ReturnValue.getPointer()); - } else { - mlir::Value V = nullptr; - switch (CIRGenFunction::getEvaluationKind(RV->getType())) { - case TEK_Scalar: - V = buildScalarExpr(RV); - builder.create(loc, V, *FnRetAlloca); - break; - case TEK_Complex: - llvm_unreachable("NYI"); - break; - case TEK_Aggregate: - buildAggExpr(RV, - AggValueSlot::forAddr( - ReturnValue, Qualifiers(), AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, getOverlapForReturnValue())); - break; + auto handleReturnVal = [&]() { + if (getContext().getLangOpts().ElideConstructors && S.getNRVOCandidate() && + S.getNRVOCandidate()->isNRVOVariable()) { + assert(!UnimplementedFeature::openMP()); + // Apply the named return value optimization for this return statement, + // which means doing nothing: the appropriate result has already been + // constructed into the NRVO variable. + + // If there is an NRVO flag for this variable, set it to 1 into indicate + // that the cleanup code should not destroy the variable. + if (auto NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) + llvm_unreachable("NYI"); + } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { + // Make sure not to return anything, but evaluate the expression + // for side effects. + if (RV) { + assert(0 && "not implemented"); + } + } else if (!RV) { + // Do nothing (return value is left uninitialized) + } else if (FnRetTy->isReferenceType()) { + // If this function returns a reference, take the address of the + // expression rather than the value. + RValue Result = buildReferenceBindingToExpr(RV); + builder.create(loc, Result.getScalarVal(), + ReturnValue.getPointer()); + } else { + mlir::Value V = nullptr; + switch (CIRGenFunction::getEvaluationKind(RV->getType())) { + case TEK_Scalar: + V = buildScalarExpr(RV); + builder.create(loc, V, *FnRetAlloca); + break; + case TEK_Complex: + llvm_unreachable("NYI"); + break; + case TEK_Aggregate: + buildAggExpr( + RV, AggValueSlot::forAddr( + ReturnValue, Qualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, getOverlapForReturnValue())); + break; + } } + }; + + if (!createNewScope) + handleReturnVal(); + else { + mlir::Location scopeLoc = + getLoc(RV ? RV->getSourceRange() : S.getSourceRange()); + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + SmallVector locs; + if (loc.isa()) { + locs.push_back(loc); + locs.push_back(loc); + } else if (loc.isa()) { + auto fusedLoc = loc.cast(); + locs.push_back(fusedLoc.getLocations()[0]); + locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{ + locs[0], locs[1], builder.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexScopeGuard{*this, &lexScope}; + handleReturnVal(); + }); } // Create a new return block (if not existent) and add a branch to diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 870cdbe0edc8..66e5eaed0e50 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -73,3 +73,20 @@ auto g2() { // CHECK-NEXT: cir.store %1, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon223 // CHECK-NEXT: cir.return %4 : !ty_22class2Eanon223 + +int f() { + return g2()(); +} + +// CHECK: cir.func @_Z1fv() -> i32 { +// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !ty_22class2Eanon223, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_22class2Eanon223 +// CHECK-NEXT: cir.store %3, %2 : !ty_22class2Eanon223, cir.ptr +// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> i32 +// CHECK-NEXT: cir.store %4, %0 : i32, cir.ptr +// CHECK-NEXT: } +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 +// CHECK-NEXT: cir.return %1 : i32 +// CHECK-NEXT: } From d7b5cc263437e6836bc096792bea62f8eae140bc Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 17 Mar 2023 14:39:21 -0700 Subject: [PATCH 0829/2301] [CIR][Lifetime] Distinguish between enclosing lambda and function, add more complex lambda check --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 19 +++++++++++++------ .../CIR/Transforms/lifetime-check-lambda.cpp | 19 +++++++++++++++++-- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 60c0e4f9843a..61c243099fa8 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -37,7 +37,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void runOnOperation() override; void checkOperation(Operation *op); - void checkFunc(Operation *op); + void checkFunc(cir::FuncOp fnOp); void checkBlock(Block &block); void checkRegionWithScope(Region ®ion); @@ -71,6 +71,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Tracks current module. ModuleOp theModule; + // Track current function under analysis + std::optional currFunc; // Common helpers. bool isCtorInitPointerFromOwner(CallOp callOp, @@ -542,7 +544,8 @@ void LifetimeCheckPass::checkRegionWithScope(Region ®ion) { checkBlock(block); } -void LifetimeCheckPass::checkFunc(Operation *op) { +void LifetimeCheckPass::checkFunc(cir::FuncOp fnOp) { + currFunc = fnOp; // FIXME: perhaps this should be a function pass, but for now make // sure we reset the state before looking at other functions. if (currPmap) @@ -557,11 +560,12 @@ void LifetimeCheckPass::checkFunc(Operation *op) { // Add a new scope. Note that as part of the scope cleanup process // we apply section 2.3 KILL(x) functionality, turning relevant // references invalid. - for (Region ®ion : op->getRegions()) + for (Region ®ion : fnOp->getRegions()) checkRegionWithScope(region); // FIXME: store the pmap result for this function, we // could do some interesting IPA stuff using this info. + currFunc.reset(); } // The join operation between pmap as described in section 2.3. @@ -1109,8 +1113,11 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, D.attachNote(info.loc) << "at the end of scope or full-expression"; emittedDanglingTasks.insert(warningLoc); } else if (forRetLambda) { + assert(currFunc && "expected function"); + StringRef parent = currFunc->getLambda() ? "lambda" : "function"; D.attachNote(info.val->getLoc()) - << "declared here but invalid after function end"; + << "declared here but invalid after enclosing " << parent + << " ends"; } else { StringRef outOfScopeVarName = getVarNameFromValue(*info.val); D.attachNote(info.loc) << "pointee '" << outOfScopeVarName @@ -1532,8 +1539,8 @@ void LifetimeCheckPass::checkOperation(Operation *op) { } // FIXME: we can do better than sequence of dyn_casts. - if (isa(op)) - return checkFunc(op); + if (auto fnOp = dyn_cast(op)) + return checkFunc(fnOp); if (auto ifOp = dyn_cast(op)) return checkIf(ifOp); if (auto switchOp = dyn_cast(op)) diff --git a/clang/test/CIR/Transforms/lifetime-check-lambda.cpp b/clang/test/CIR/Transforms/lifetime-check-lambda.cpp index f9733a1ff76d..617e18edf499 100644 --- a/clang/test/CIR/Transforms/lifetime-check-lambda.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-lambda.cpp @@ -1,8 +1,11 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -I%S/Inputs -Wno-return-stack-address -fclangir -fclangir-lifetime-check="history=all;history_limit=1" -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// Check also implements: +// EXP61-CPP. A lambda object must not outlive any of its reference captured objects + // This can be diagnosed by clang with -Wreturn-stack-address auto g() { - int i = 12; // expected-note {{declared here but invalid after function end}} + int i = 12; // expected-note {{declared here but invalid after enclosing function ends}} return [&] { // expected-warning {{returned lambda captures local variable}} i += 100; return i; @@ -11,10 +14,22 @@ auto g() { // This cannot be diagnosed by -Wreturn-stack-address auto g2() { - int i = 12; // expected-note {{declared here but invalid after function end}} + int i = 12; // expected-note {{declared here but invalid after enclosing function ends}} auto lam = [&] { i += 100; return i; }; return lam; // expected-warning {{returned lambda captures local variable}} +} + +auto g3(int val) { + auto outer = [val] { + int i = val; // expected-note {{declared here but invalid after enclosing lambda ends}} + auto inner = [&] { + i += 30; + return i; + }; + return inner; // expected-warning {{returned lambda captures local variable}} + }; + return outer(); } \ No newline at end of file From 712bd2f4f66c505bff2867f2e3cd7e11060bd6f6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 17 Mar 2023 17:28:24 -0700 Subject: [PATCH 0830/2301] [CIR][Lifetime] Check for coroutines + lambda combination --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 9 +++++++-- .../Transforms/lifetime-check-coro-task.cpp | 18 +++++++++++++++++- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 61c243099fa8..61c5fa08579b 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1108,8 +1108,13 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, } case InvalidStyle::EndOfScope: { if (tasks.count(histKey)) { - D.attachNote((*info.val).getLoc()) << "coroutine bound to resource " - << "with expired lifetime"; + StringRef resource = "resource"; + if (auto allocaOp = dyn_cast(info.val->getDefiningOp())) { + if (isLambdaType(allocaOp.getAllocaType())) + resource = "lambda"; + } + D.attachNote((*info.val).getLoc()) + << "coroutine bound to " << resource << " with expired lifetime"; D.attachNote(info.loc) << "at the end of scope or full-expression"; emittedDanglingTasks.insert(warningLoc); } else if (forRetLambda) { diff --git a/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp b/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp index 2be677c54cc5..56f97ff0d74e 100644 --- a/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp @@ -8,4 +8,20 @@ folly::coro::Task go1() { // expected-note@-1 {{at the end of scope or full-expression}} co_return co_await task; // expected-remark {{pset => { task, invalid }}} // expected-warning@-1 {{use of coroutine 'task' with dangling reference}} -} \ No newline at end of file +} + +folly::coro::Task go1_lambda() { + auto task = [i = 3]() -> folly::coro::Task { // expected-note {{coroutine bound to lambda with expired lifetime}} + co_return i; + }(); // expected-note {{at the end of scope or full-expression}} + co_return co_await task; // expected-remark {{pset => { task, invalid }}} + // expected-warning@-1 {{use of coroutine 'task' with dangling reference}} +} + +folly::coro::Task go2_lambda() { + auto task = []() -> folly::coro::Task { // expected-note {{coroutine bound to lambda with expired lifetime}} + co_return 3; + }(); // expected-note {{at the end of scope or full-expression}} + co_return co_await task; // expected-remark {{pset => { task, invalid }}} + // expected-warning@-1 {{use of coroutine 'task' with dangling reference}} +} From 2c17247ae6d760082a53b939407716b3f22aa9f7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 20 Mar 2023 15:53:31 -0700 Subject: [PATCH 0831/2301] [CIR][NFC] Add custom assembly handlers for CallOp, and trim of unused builders --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 20 +------- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 53 ++++++++++++++++++++ 2 files changed, 55 insertions(+), 18 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 6c60300a2477..35e5be934930 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1366,21 +1366,6 @@ def CallOp : CIR_Op<"call", $_state.addOperands(operands); $_state.addAttribute("callee", SymbolRefAttr::get(callee)); $_state.addTypes(callee.getFunctionType().getResults()); - }]>, - OpBuilder<(ins "SymbolRefAttr":$callee, "TypeRange":$results, - CArg<"ValueRange", "{}">:$operands), [{ - $_state.addOperands(operands); - $_state.addAttribute("callee", callee); - $_state.addTypes(results); - }]>, - OpBuilder<(ins "StringAttr":$callee, "TypeRange":$results, - CArg<"ValueRange", "{}">:$operands), [{ - build($_builder, $_state, SymbolRefAttr::get(callee), results, operands); - }]>, - OpBuilder<(ins "StringRef":$callee, "TypeRange":$results, - CArg<"ValueRange", "{}">:$operands), [{ - build($_builder, $_state, StringAttr::get($_builder.getContext(), callee), - results, operands); }]>]; let extraClassDeclaration = [{ @@ -1390,9 +1375,8 @@ def CallOp : CIR_Op<"call", operand_iterator arg_operand_end() { return operand_end(); } }]; - let assemblyFormat = [{ - $callee `(` $operands `)` attr-dict `:` functional-type($operands, results) - }]; + let hasCustomAssemblyFormat = 1; + let skipDefaultBuilders = 1; let hasVerifier = 0; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index de71936061c8..176c61721ac9 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1404,6 +1404,59 @@ FunctionType CallOp::getCalleeType() { return FunctionType::get(getContext(), getOperandTypes(), getResultTypes()); } +::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { + mlir::FlatSymbolRefAttr calleeAttr; + llvm::SmallVector<::mlir::OpAsmParser::UnresolvedOperand, 4> ops; + llvm::SMLoc opsLoc; + (void)opsLoc; + llvm::ArrayRef<::mlir::Type> operandsTypes; + llvm::ArrayRef<::mlir::Type> allResultTypes; + + if (parser.parseCustomAttributeWithFallback( + calleeAttr, parser.getBuilder().getType<::mlir::NoneType>(), "callee", + result.attributes)) { + return ::mlir::failure(); + } + if (parser.parseLParen()) + return ::mlir::failure(); + + opsLoc = parser.getCurrentLocation(); + if (parser.parseOperandList(ops)) + return ::mlir::failure(); + if (parser.parseRParen()) + return ::mlir::failure(); + if (parser.parseOptionalAttrDict(result.attributes)) + return ::mlir::failure(); + if (parser.parseColon()) + return ::mlir::failure(); + + ::mlir::FunctionType opsFnTy; + if (parser.parseType(opsFnTy)) + return ::mlir::failure(); + operandsTypes = opsFnTy.getInputs(); + allResultTypes = opsFnTy.getResults(); + result.addTypes(allResultTypes); + if (parser.resolveOperands(ops, operandsTypes, opsLoc, result.operands)) + return ::mlir::failure(); + return ::mlir::success(); +} + +void CallOp::print(::mlir::OpAsmPrinter &state) { + state << ' '; + state.printAttributeWithoutType(getCalleeAttr()); + state << "("; + state << getOperands(); + state << ")"; + llvm::SmallVector<::llvm::StringRef, 2> elidedAttrs; + elidedAttrs.push_back("callee"); + state.printOptionalAttrDict((*this)->getAttrs(), elidedAttrs); + state << ' ' << ":"; + state << ' '; + state.printFunctionalType(getOperands().getTypes(), + getOperation()->getResultTypes()); +} + //===----------------------------------------------------------------------===// // UnaryOp //===----------------------------------------------------------------------===// From eb343d10d11586c06db989887cd68ccd03a49bcf Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 20 Mar 2023 16:20:57 -0700 Subject: [PATCH 0832/2301] [CIR][NFC] Make symbol refs on CallOp optional --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 16 +++++++++++----- .../lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 9 +++++++-- 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 35e5be934930..8059d649f6dd 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1358,7 +1358,7 @@ def CallOp : CIR_Op<"call", ``` }]; - let arguments = (ins FlatSymbolRefAttr:$callee, Variadic:$operands); + let arguments = (ins OptionalAttr:$callee, Variadic:$operands); let results = (outs Variadic); let builders = [ diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 176c61721ac9..e29075b32961 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1413,11 +1413,11 @@ ::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, llvm::ArrayRef<::mlir::Type> operandsTypes; llvm::ArrayRef<::mlir::Type> allResultTypes; - if (parser.parseCustomAttributeWithFallback( - calleeAttr, parser.getBuilder().getType<::mlir::NoneType>(), "callee", - result.attributes)) { + if (!parser.parseOptionalAttribute(calleeAttr, "callee", result.attributes) + .has_value()) { return ::mlir::failure(); } + if (parser.parseLParen()) return ::mlir::failure(); @@ -1444,9 +1444,15 @@ ::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, void CallOp::print(::mlir::OpAsmPrinter &state) { state << ' '; - state.printAttributeWithoutType(getCalleeAttr()); + auto ops = getOperands(); + + if (getCallee()) { // Direct calls + state.printAttributeWithoutType(getCalleeAttr()); + } else { + llvm_unreachable("NYI"); + } state << "("; - state << getOperands(); + state << ops; state << ")"; llvm::SmallVector<::llvm::StringRef, 2> elidedAttrs; elidedAttrs.push_back("callee"); diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 61c5fa08579b..f265ff8f8894 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1481,7 +1481,12 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // // Note that we can't reliably know if a function is a coroutine only as // part of declaration - auto calleeFuncOp = getCalleeFromSymbol(theModule, callOp.getCallee()); + + // Indirect calls are not yet supported. + assert(callOp.getCallee() && "NYI"); + + auto fnName = *callOp.getCallee(); + auto calleeFuncOp = getCalleeFromSymbol(theModule, fnName); if (calleeFuncOp && (calleeFuncOp.getCoroutine() || (calleeFuncOp.isDeclaration() && callOp->getNumResults() > 0 && @@ -1489,7 +1494,7 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { currScope->localTempTasks.insert(callOp->getResult(0)); } - const auto *methodDecl = getMethod(theModule, callOp.getCallee()); + const auto *methodDecl = getMethod(theModule, fnName); if (!isOwnerOrPointerClassMethod(callOp.getOperand(0), methodDecl)) return checkOtherMethodsAndFunctions(callOp, methodDecl); From fda34044b8561946919ff86cb2737026b67c1829 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 20 Mar 2023 16:33:18 -0700 Subject: [PATCH 0833/2301] [CIR] Support indirect calls in CallOp - Add print/parsing/builders - Testcases coming as part of codegen support --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 7 +++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 12 +++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8059d649f6dd..c0f9fea3a91f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1366,6 +1366,13 @@ def CallOp : CIR_Op<"call", $_state.addOperands(operands); $_state.addAttribute("callee", SymbolRefAttr::get(callee)); $_state.addTypes(callee.getFunctionType().getResults()); + }]>, + OpBuilder<(ins "Value":$ind_target, + "FunctionType":$fn_type, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(ValueRange{ind_target}); + $_state.addOperands(operands); + $_state.addTypes(fn_type.getResults()); }]>]; let extraClassDeclaration = [{ diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index e29075b32961..9c68cc5591a2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1413,9 +1413,14 @@ ::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, llvm::ArrayRef<::mlir::Type> operandsTypes; llvm::ArrayRef<::mlir::Type> allResultTypes; + // If we cannot parse a string callee, it means this is an indirect call. if (!parser.parseOptionalAttribute(calleeAttr, "callee", result.attributes) .has_value()) { - return ::mlir::failure(); + OpAsmParser::UnresolvedOperand indirectVal; + mlir::Type indirectValTy; + if (parser.parseOperand(indirectVal) || + parser.resolveOperand(indirectVal, indirectValTy, result.operands)) + return failure(); } if (parser.parseLParen()) @@ -1448,8 +1453,9 @@ void CallOp::print(::mlir::OpAsmPrinter &state) { if (getCallee()) { // Direct calls state.printAttributeWithoutType(getCalleeAttr()); - } else { - llvm_unreachable("NYI"); + } else { // Indirect calls + state << ops.front(); + ops.drop_front(); } state << "("; state << ops; From 154e16aba6ad4adf9208698fa77c95d483bcee7e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 20 Mar 2023 14:48:14 -0700 Subject: [PATCH 0834/2301] [CIR][CIRGen][NFC] Make CIRGenCallee use mlir::Operation instead of cir::FuncOp --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 6 ++++-- clang/lib/CIR/CodeGen/CIRGenCall.h | 13 ++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index a0bd46e2cbfb..2e89eb25dda5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -481,8 +481,10 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // Emit the actual call op. auto callLoc = CGM.getLoc(Loc); assert(builder.getInsertionBlock() && "expected valid basic block"); - auto theCall = - builder.create(callLoc, CalleePtr, CIRCallArgs); + + auto fnOp = dyn_cast(CalleePtr); + assert(fnOp && "only direct call supported"); + auto theCall = builder.create(callLoc, fnOp, CIRCallArgs); if (callOrInvoke) callOrInvoke = &theCall; diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 25154afb92c5..42dda5882f7f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -86,9 +86,9 @@ class CIRGenCallee { // Construct a callee. Call this constructor directly when this isn't a direct // call. CIRGenCallee(const CIRGenCalleeInfo &abstractInfo, - mlir::cir::FuncOp functionPtr) - : KindOrFunctionPointer(SpecialKind( - reinterpret_cast(functionPtr.getAsOpaquePointer()))) { + mlir::Operation *functionPtr) + : KindOrFunctionPointer( + SpecialKind(reinterpret_cast(functionPtr))) { AbstractInfo = abstractInfo; assert(functionPtr && "configuring callee without function pointer"); // TODO: codegen asserts functionPtr is a pointer @@ -97,7 +97,7 @@ class CIRGenCallee { } static CIRGenCallee - forDirect(mlir::cir::FuncOp functionPtr, + forDirect(mlir::Operation *functionPtr, const CIRGenCalleeInfo &abstractInfo = CIRGenCalleeInfo()) { return CIRGenCallee(abstractInfo, functionPtr); } @@ -135,10 +135,9 @@ class CIRGenCallee { /// callee CIRGenCallee prepareConcreteCallee(CIRGenFunction &CGF) const; - mlir::cir::FuncOp getFunctionPointer() const { + mlir::Operation *getFunctionPointer() const { assert(isOrdinary()); - return mlir::cir::FuncOp::getFromOpaquePointer( - reinterpret_cast(KindOrFunctionPointer)); + return reinterpret_cast(KindOrFunctionPointer); } CIRGenCalleeInfo getAbstractInfo() const { From a287298ec17a5901aee7dab2c537c18de11b98bb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 22 Mar 2023 14:59:14 -0700 Subject: [PATCH 0835/2301] [CIR][CIRGen][NFC] Add createStore helper --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 359d771a63f5..a378b6bcf01d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -186,6 +186,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return create(loc, addr.getElementType(), addr.getPointer()); } + + mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, + Address dst) { + return create(loc, val, dst.getPointer()); + } }; } // namespace cir From da394b6991390fdb5fe696200fbf7a68045f2129 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 22 Mar 2023 15:01:27 -0700 Subject: [PATCH 0836/2301] [CIR][CIRGen][NFC] Make buildBranchThroughCleanup a bit concise --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 16 +++++----------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 10 +++++++--- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 6 +++--- .../CIR/CodeGen/UnimplementedFeatureGuarding.h | 1 + 4 files changed, 16 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 3ebaafac066d..c026a70eaa4f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -26,21 +26,15 @@ using namespace mlir::cir; /// or with the labeled blocked if already solved. /// /// Track on scope basis, goto's we need to fix later. -mlir::LogicalResult -CIRGenFunction::buildBranchThroughCleanup(JumpDest &Dest, LabelDecl *L, - mlir::Location Loc) { +mlir::cir::BrOp CIRGenFunction::buildBranchThroughCleanup(mlir::Location Loc, + JumpDest Dest) { // Remove this once we go for making sure unreachable code is // well modeled (or not). assert(builder.getInsertionBlock() && "not yet implemented"); + assert(!UnimplementedFeature::ehStack()); // Insert a branch: to the cleanup block (unsolved) or to the already // materialized label. Keep track of unsolved goto's. - mlir::Block *DstBlock = Dest.getBlock(); - auto G = builder.create( - Loc, Dest.isValid() ? DstBlock - : currLexScope->getOrCreateCleanupBlock(builder)); - if (!Dest.isValid()) - currLexScope->PendingGotos.push_back(std::make_pair(G, L)); - - return mlir::success(); + return builder.create(Loc, Dest.isValid() ? Dest.getBlock() + : ReturnBlock().getBlock()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 8654f37d1c82..2bcd89a79f91 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -498,6 +498,12 @@ class CIRGenFunction { /// The GlobalDecl for the current function being compiled. clang::GlobalDecl CurGD; + /// Unified return block. + /// Not that for LLVM codegen this is a memeber variable instead. + JumpDest ReturnBlock() { + return JumpDest(currLexScope->getOrCreateCleanupBlock(builder)); + } + /// The temporary alloca to hold the return value. This is /// invalid iff the function has no return value. Address ReturnValue = Address::invalid(); @@ -996,9 +1002,7 @@ class CIRGenFunction { /// is 'Ty'. void buildStoreThroughLValue(RValue Src, LValue Dst); - mlir::LogicalResult buildBranchThroughCleanup(JumpDest &Dest, - clang::LabelDecl *L, - mlir::Location Loc); + mlir::cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is /// nonnull, if 1\p LHS is marked _Nonnull. diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index b2ea98e7168a..ef40332ae715 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -525,9 +525,9 @@ mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { // Build a cir.br to the target label. auto &JD = LabelMap[S.getLabel()]; - if (buildBranchThroughCleanup(JD, S.getLabel(), getLoc(S.getSourceRange())) - .failed()) - return mlir::failure(); + auto brOp = buildBranchThroughCleanup(getLoc(S.getSourceRange()), JD); + if (!JD.isValid()) + currLexScope->PendingGotos.push_back(std::make_pair(brOp, S.getLabel())); // Insert the new block to continue codegen after goto. builder.createBlock(builder.getBlock()->getParent()); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index e666e212a774..054c7365950b 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -62,6 +62,7 @@ struct UnimplementedFeature { static bool cxxABI() { return false; } static bool openCL() { return false; } static bool openMP() { return false; } + static bool ehStack() { return false; } }; } // namespace cir From bbb8099574a609b8889c5619da25e6cb93559afa Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 20 Mar 2023 14:09:09 -0700 Subject: [PATCH 0837/2301] [CIR][CIRGen] Add codegen support for indirect calls - Load address of functions - Support for function to pointer decay - Add support for building reference call arguments. - Tricky lambda handling when static invokers are involved. - More functionality on buildCall - Fix verifiers to account for indirect calls. - Add support for more casts: UserDefinedConversion and UserConversion --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 31 ++++--- clang/lib/CIR/CodeGen/CIRGenCall.h | 5 ++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 84 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 48 ++++++++++- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 7 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 18 +++- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 14 ++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 31 ++++--- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 5 +- clang/test/CIR/CodeGen/coro-task.cpp | 42 +++++++++- clang/test/CIR/CodeGen/lambda.cpp | 44 ++++++++++ 13 files changed, 304 insertions(+), 34 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 2e89eb25dda5..af1f2006dedb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -290,7 +290,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, ReturnValueSlot ReturnValue, const CallArgList &CallArgs, mlir::cir::CallOp *callOrInvoke, - bool IsMustTail, SourceLocation Loc) { + bool IsMustTail, mlir::Location loc) { auto builder = CGM.getBuilder(); // FIXME: We no longer need the types from CallArgs; lift up and simplify @@ -301,6 +301,8 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, QualType RetTy = CallInfo.getReturnType(); const auto &RetAI = CallInfo.getReturnInfo(); + mlir::FunctionType CIRFuncTy = getTypes().GetFunctionType(CallInfo); + const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); // This is not always tied to a FunctionDecl (e.g. builtins that are xformed @@ -406,7 +408,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { auto SrcTy = Src.getElementType(); // FIXME(cir): get proper location for each argument. - auto argLoc = CGM.getLoc(Loc); + auto argLoc = loc; // If the source type is smaller than the destination type of the // coerce-to logic, copy the source value into a temp alloca the size @@ -479,12 +481,19 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // TODO: alignment attributes // Emit the actual call op. - auto callLoc = CGM.getLoc(Loc); + auto callLoc = loc; assert(builder.getInsertionBlock() && "expected valid basic block"); - auto fnOp = dyn_cast(CalleePtr); - assert(fnOp && "only direct call supported"); - auto theCall = builder.create(callLoc, fnOp, CIRCallArgs); + mlir::cir::CallOp theCall; + if (auto fnOp = dyn_cast(CalleePtr)) { + assert(fnOp && "only direct call supported"); + theCall = builder.create(callLoc, fnOp, CIRCallArgs); + } else if (auto loadOp = dyn_cast(CalleePtr)) { + theCall = builder.create(callLoc, loadOp->getResult(0), + CIRFuncTy, CIRCallArgs); + } else { + llvm_unreachable("expected call variant to be handled"); + } if (callOrInvoke) callOrInvoke = &theCall; @@ -684,7 +693,7 @@ void CIRGenFunction::buildCallArgs( CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; unsigned InitialArgSize = Args.size(); assert(!isa(*Arg) && "NYI"); - assert(!isa(AC.getDecl()) && "NYI"); + assert(!isa_and_nonnull(AC.getDecl()) && "NYI"); buildCallArg(Args, *Arg, ArgTypes[Idx]); // In particular, we depend on it being the last arg in Args, and the @@ -898,11 +907,11 @@ void CIRGenFunction::buildDelegateCallArg(CallArgList &args, // GetAddrOfLocalVar returns a pointer-to-pointer for references, but the // argument needs to be the original pointer. if (type->isReferenceType()) { - - llvm_unreachable("NYI"); + args.add( + RValue::get(builder.createLoad(getLoc(param->getSourceRange()), local)), + type); } else if (getLangOpts().ObjCAutoRefCount) { llvm_unreachable("NYI"); - // For the most part, we just need to load the alloca, except that aggregate // r-values are actually pointers to temporaries. } else { @@ -1035,7 +1044,7 @@ CIRGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); } - llvm_unreachable("NYI"); + return arrangeFreeFunctionType(prototype); } /// Arrange the argument and result information for a call to an unknown C++ diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 42dda5882f7f..77ceccd67360 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -35,6 +35,11 @@ class CIRGenCalleeInfo { public: explicit CIRGenCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl() {} + CIRGenCalleeInfo(const clang::FunctionProtoType *calleeProtoTy, + clang::GlobalDecl calleeDecl) + : CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {} + CIRGenCalleeInfo(const clang::FunctionProtoType *calleeProtoTy) + : CalleeProtoTy(calleeProtoTy) {} CIRGenCalleeInfo(clang::GlobalDecl calleeDecl) : CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {} diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 1546030d75f0..ddb3486c199a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -652,7 +652,7 @@ void CIRGenFunction::buildImplicitAssignmentOperatorBody( const CompoundStmt *RootCS = cast(RootS); // LexicalScope Scope(*this, RootCS->getSourceRange()); - // FIXME: add all of the below under a new scope. + // FIXME(cir): add all of the below under a new scope. assert(!UnimplementedFeature::incrementProfileCounter()); AssignmentMemcpyizer AM(*this, AssignOp, Args); @@ -660,3 +660,85 @@ void CIRGenFunction::buildImplicitAssignmentOperatorBody( AM.emitAssignment(I); AM.finish(); } + +void CIRGenFunction::buildForwardingCallToLambda( + const CXXMethodDecl *callOperator, CallArgList &callArgs) { + // Get the address of the call operator. + const auto &calleeFnInfo = + CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); + auto calleePtr = CGM.GetAddrOfFunction( + GlobalDecl(callOperator), CGM.getTypes().GetFunctionType(calleeFnInfo)); + + // Prepare the return slot. + const FunctionProtoType *FPT = + callOperator->getType()->castAs(); + QualType resultType = FPT->getReturnType(); + ReturnValueSlot returnSlot; + if (!resultType->isVoidType() && + calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && + !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) { + llvm_unreachable("NYI"); + } + + // We don't need to separately arrange the call arguments because + // the call can't be variadic anyway --- it's impossible to forward + // variadic arguments. + + // Now emit our call. + auto callee = CIRGenCallee::forDirect(calleePtr, GlobalDecl(callOperator)); + RValue RV = buildCall(calleeFnInfo, callee, returnSlot, callArgs); + + // If necessary, copy the returned value into the slot. + if (!resultType->isVoidType() && returnSlot.isNull()) { + if (getLangOpts().ObjCAutoRefCount && resultType->isObjCRetainableType()) + llvm_unreachable("NYI"); + buildReturnOfRValue(*currSrcLoc, RV, resultType); + } else { + llvm_unreachable("NYI"); + } +} + +void CIRGenFunction::buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { + const CXXRecordDecl *Lambda = MD->getParent(); + + // Start building arguments for forwarding call + CallArgList CallArgs; + + QualType LambdaType = getContext().getRecordType(Lambda); + QualType ThisType = getContext().getPointerType(LambdaType); + Address ThisPtr = + CreateMemTemp(LambdaType, getLoc(MD->getSourceRange()), "unused.capture"); + CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType); + + // Add the rest of the parameters. + for (auto *Param : MD->parameters()) + buildDelegateCallArg(CallArgs, Param, Param->getBeginLoc()); + + const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); + // For a generic lambda, find the corresponding call operator specialization + // to which the call to the static-invoker shall be forwarded. + if (Lambda->isGenericLambda()) { + assert(MD->isFunctionTemplateSpecialization()); + const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); + FunctionTemplateDecl *CallOpTemplate = + CallOp->getDescribedFunctionTemplate(); + void *InsertPos = nullptr; + FunctionDecl *CorrespondingCallOpSpecialization = + CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); + assert(CorrespondingCallOpSpecialization); + CallOp = cast(CorrespondingCallOpSpecialization); + } + buildForwardingCallToLambda(CallOp, CallArgs); +} + +void CIRGenFunction::buildLambdaStaticInvokeBody(const CXXMethodDecl *MD) { + if (MD->isVariadic()) { + // Codgen for LLVM doesn't emit code for this as well, it says: + // FIXME: Making this work correctly is nasty because it requires either + // cloning the body of the call operator or making the call operator + // forward. + llvm_unreachable("NYI"); + } + + buildLambdaDelegatingInvokeBody(MD); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index e43bbc35503b..b25bbdc8e4c2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -270,6 +270,27 @@ CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { assert(!dyn_cast(E) && "NYI"); assert(!dyn_cast(E) && "NYI"); + // Otherwise, we have an indirect reference. + mlir::Value calleePtr; + QualType functionType; + if (auto ptrType = E->getType()->getAs()) { + calleePtr = buildScalarExpr(E); + functionType = ptrType->getPointeeType(); + } else { + functionType = E->getType(); + calleePtr = buildLValue(E).getPointer(); + } + assert(functionType->isFunctionType()); + + GlobalDecl GD; + if (const auto *VD = + dyn_cast_or_null(E->getReferencedDeclOfCallee())) + GD = GlobalDecl(VD); + + CIRGenCalleeInfo calleeInfo(functionType->getAs(), GD); + CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp()); + return callee; + assert(false && "Nothing else supported yet!"); } @@ -393,6 +414,21 @@ static LValue buildCapturedFieldLValue(CIRGenFunction &CGF, const FieldDecl *FD, return CGF.buildLValueForField(LV, FD); } +static LValue buildFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, + GlobalDecl GD) { + const FunctionDecl *FD = cast(GD.getDecl()); + auto funcOp = buildFunctionDeclPointer(CGF.CGM, GD); + auto loc = CGF.getLoc(E->getSourceRange()); + CharUnits align = CGF.getContext().getDeclAlign(FD); + + auto fnTy = funcOp.getFunctionType(); + auto ptrTy = mlir::cir::PointerType::get(CGF.getBuilder().getContext(), fnTy); + auto addr = CGF.getBuilder().create( + loc, ptrTy, funcOp.getSymName()); + return CGF.makeAddrLValue(Address(addr, fnTy, align), E->getType(), + AlignmentSource::Decl); +} + LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { const NamedDecl *ND = E->getDecl(); QualType T = E->getType(); @@ -491,6 +527,16 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { return LV; } + if (const auto *FD = dyn_cast(ND)) { + LValue LV = buildFunctionDeclLValue(*this, E, FD); + + // Emit debuginfo for the function declaration if the target wants to. + if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) + assert(!UnimplementedFeature::generateDebugInfo()); + + return LV; + } + llvm_unreachable("Unhandled DeclRefExpr?"); } @@ -750,7 +796,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, assert(!MustTailCall && "Must tail NYI"); mlir::cir::CallOp callOP = nullptr; RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, &callOP, - E == MustTailCall, E->getExprLoc()); + E == MustTailCall, getLoc(E->getExprLoc())); assert(!getDebugInfo() && "Debug Info NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 26abb55c45cd..a47c5e637258 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -84,9 +84,10 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorCall( *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs); auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall( Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize); + assert((CE || currSrcLoc) && "expected source location"); + mlir::Location loc = CE ? getLoc(CE->getExprLoc()) : *currSrcLoc; return buildCall(FnInfo, Callee, ReturnValue, Args, nullptr, - CE && CE == MustTailCall, - CE ? CE->getExprLoc() : SourceLocation()); + CE && CE == MustTailCall, loc); } RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 8571f269f36e..ab1cf79a98cf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -961,7 +961,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_NonAtomicToAtomic: llvm_unreachable("NYI"); case CK_UserDefinedConversion: - llvm_unreachable("NYI"); + return Visit(const_cast(E)); case CK_NoOp: { auto V = Visit(const_cast(E)); if (V) { @@ -983,7 +983,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_ArrayToPointerDecay: return CGF.buildArrayToPointerDecay(E).getPointer(); case CK_FunctionToPointerDecay: - llvm_unreachable("NYI"); + return buildLValue(E).getPointer(); case CK_NullToPointer: { // FIXME: use MustVisitNullValue(E) and evaluate expr. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 78e44242d70d..051025df5ab5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -496,7 +496,10 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, llvm_unreachable("NYI"); else if (isa(FD) && cast(FD)->isLambdaStaticInvoker()) { - llvm_unreachable("NYI"); + // The lambda static invoker function is special, because it forwards or + // clones the body of the function call operator (but is actually static). + SourceLocRAIIObject Loc{*this, FnBeginLoc}; + buildLambdaStaticInvokeBody(cast(FD)); } else if (FD->isDefaulted() && isa(FD) && (cast(FD)->isCopyAssignmentOperator() || cast(FD)->isMoveAssignmentOperator())) { @@ -641,7 +644,7 @@ void CIRGenFunction::buildCXXConstructorCall( Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); mlir::cir::CallOp C; - buildCall(Info, Callee, ReturnValueSlot(), Args, &C, false, Loc); + buildCall(Info, Callee, ReturnValueSlot(), Args, &C, false, getLoc(Loc)); assert(CGM.getCodeGenOpts().OptimizationLevel == 0 || ClassDecl->isDynamicClass() || Type == Ctor_Base || diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 2bcd89a79f91..b7b304b635e9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -523,6 +523,11 @@ class CIRGenFunction { LambdaCaptureFields; clang::FieldDecl *LambdaThisCaptureField = nullptr; + void buildForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator, + CallArgList &CallArgs); + void buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD); + void buildLambdaStaticInvokeBody(const CXXMethodDecl *MD); + /// When generating code for a C++ member function, this will /// hold the implicit 'this' declaration. clang::ImplicitParamDecl *CXXABIThisDecl = nullptr; @@ -650,6 +655,8 @@ class CIRGenFunction { // as soon as we add a DebugInfo type to this class. std::nullptr_t *getDebugInfo() { return nullptr; } + void buildReturnOfRValue(mlir::Location loc, RValue RV, QualType Ty); + /// Set the address of a local variable. void setAddrOfLocalVar(const clang::VarDecl *VD, Address Addr) { assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!"); @@ -792,7 +799,16 @@ class CIRGenFunction { RValue buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, mlir::cir::CallOp *callOrInvoke, - bool IsMustTail, clang::SourceLocation Loc); + bool IsMustTail, mlir::Location loc); + RValue buildCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, + const CallArgList &Args, + mlir::cir::CallOp *callOrInvoke = nullptr, + bool IsMustTail = false) { + assert(currSrcLoc && "source location must have been set"); + return buildCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke, + IsMustTail, *currSrcLoc); + } RValue buildCall(clang::QualType FnType, const CIRGenCallee &Callee, const clang::CallExpr *E, ReturnValueSlot returnValue, mlir::Value Chain = nullptr); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index ef40332ae715..192aadca3ecf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -986,3 +986,17 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { terminateCaseRegion(r, swop.getLoc()); return mlir::success(); } + +void CIRGenFunction::buildReturnOfRValue(mlir::Location loc, RValue RV, + QualType Ty) { + if (RV.isScalar()) { + builder.createStore(loc, RV.getScalarVal(), ReturnValue); + } else if (RV.isAggregate()) { + LValue Dest = makeAddrLValue(ReturnValue, Ty); + LValue Src = makeAddrLValue(RV.getAggregateAddress(), Ty); + buildAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue()); + } else { + llvm_unreachable("NYI"); + } + buildBranchThroughCleanup(loc, ReturnBlock()); +} \ No newline at end of file diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9c68cc5591a2..38bce249845c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1136,18 +1136,26 @@ void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, LogicalResult GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // Verify that the result type underlying pointer type matches the type of the - // referenced cir.global op. - auto global = - symbolTable.lookupNearestSymbolFrom(*this, getNameAttr()); - if (!global) + // referenced cir.global or cir.func op. + auto op = symbolTable.lookupNearestSymbolFrom(*this, getNameAttr()); + if (!(isa(op) || isa(op))) return emitOpError("'") - << getName() << "' does not reference a valid cir.global"; + << getName() + << "' does not reference a valid cir.global or cir.func"; + + mlir::Type symTy; + if (auto g = dyn_cast(op)) + symTy = g.getSymType(); + else if (auto f = dyn_cast(op)) + symTy = f.getFunctionType(); + else + llvm_unreachable("shall not get here"); auto resultType = getAddr().getType().dyn_cast(); - if (!resultType || global.getSymType() != resultType.getPointee()) + if (!resultType || symTy != resultType.getPointee()) return emitOpError("result type pointee type '") - << resultType.getPointee() << "' does not match type " - << global.getSymType() << " of the global @" << getName(); + << resultType.getPointee() << "' does not match type " << symTy + << " of the global @" << getName(); return success(); } @@ -1365,10 +1373,11 @@ void cir::CallOp::setCalleeFromCallable(::mlir::CallInterfaceCallable callee) { LogicalResult cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { - // Check that the callee attribute was specified. + // Callee attribute only need on indirect calls. auto fnAttr = (*this)->getAttrOfType("callee"); if (!fnAttr) - return emitOpError("requires a 'callee' symbol reference attribute"); + return success(); + FuncOp fn = symbolTable.lookupNearestSymbolFrom(*this, fnAttr); if (!fn) @@ -1455,7 +1464,7 @@ void CallOp::print(::mlir::OpAsmPrinter &state) { state.printAttributeWithoutType(getCalleeAttr()); } else { // Indirect calls state << ops.front(); - ops.drop_front(); + ops = ops.drop_front(); } state << "("; state << ops; diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index f265ff8f8894..8f607f187599 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1482,8 +1482,9 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // Note that we can't reliably know if a function is a coroutine only as // part of declaration - // Indirect calls are not yet supported. - assert(callOp.getCallee() && "NYI"); + // FIXME: Indirect calls are not yet supported. + if (!callOp.getCallee()) + return; auto fnName = *callOp.getCallee(); auto calleeFuncOp = getCalleeFromSymbol(theModule, fnName); diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 785788385c6a..c9f587e49536 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -342,4 +342,44 @@ folly::coro::Task go1_lambda() { } // CHECK: cir.func coroutine lambda internal @_ZZ10go1_lambdavENK3$_0clEv -// CHECK: cir.func coroutine @_Z10go1_lambdav() \ No newline at end of file +// CHECK: cir.func coroutine @_Z10go1_lambdav() + +folly::coro::Task go4() { + auto* fn = +[](int const& i) -> folly::coro::Task { co_return i; }; + auto task = fn(3); + co_return co_await std::move(task); +} + +// CHECK: cir.func coroutine @_Z3go4v() + +// CHECK: cir.await(init, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) +// CHECK: } + +// CHECK: %12 = cir.scope { +// CHECK: %17 = cir.alloca !ty_22class2Eanon221, cir.ptr , ["ref.tmp1"] {alignment = 1 : i64} + +// Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` +// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> +// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> +// CHECK: cir.yield %19 : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> +// CHECK: } +// CHECK: cir.store %12, %3 : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, cir.ptr ) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>> +// CHECK: cir.scope { +// CHECK: %17 = cir.alloca i32, cir.ptr , ["ref.tmp2", init] {alignment = 4 : i64} +// CHECK: %18 = cir.load %3 : cir.ptr ) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>>, !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> +// CHECK: %19 = cir.const(3 : i32) : i32 +// CHECK: cir.store %19, %17 : i32, cir.ptr + +// Call invoker, which calls operator() indirectly. +// CHECK: %20 = cir.call %18(%17) : (!cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, !cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221 +// CHECK: cir.store %20, %4 : !ty_22struct2Efolly3A3Acoro3A3ATask221, cir.ptr +// CHECK: } + +// CHECK: cir.await(user, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) +// CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 66e5eaed0e50..f9eea7e3962c 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -90,3 +90,47 @@ int f() { // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %1 : i32 // CHECK-NEXT: } + +int g3() { + auto* fn = +[](int const& i) -> int { return i; }; + auto task = fn(3); + return task; +} + +// lambda operator() +// CHECK: cir.func lambda internal @_ZZ2g3vENK3$_0clERKi + +// lambda __invoke() +// CHECK: cir.func internal @_ZZ2g3vEN3$_08__invokeERKi + +// lambda operator int (*)(int const&)() +// CHECK: cir.func internal @_ZZ2g3vENK3$_0cvPFiRKiEEv + +// CHECK: cir.func @_Z2g3v() -> i32 { +// CHECK: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !cir.ptr<(!cir.ptr) -> i32>, cir.ptr ) -> i32>>, ["fn", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca i32, cir.ptr , ["task", init] {alignment = 4 : i64} + +// 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`. +// CHECK: %3 = cir.scope { +// CHECK: %7 = cir.alloca !ty_22class2Eanon224, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr<(!cir.ptr) -> i32> +// CHECK: %9 = cir.unary(plus, %8) : !cir.ptr<(!cir.ptr) -> i32>, !cir.ptr<(!cir.ptr) -> i32> +// CHECK: cir.yield %9 : !cir.ptr<(!cir.ptr) -> i32> +// CHECK: } + +// 2. Load ptr to `__invoke()`. +// CHECK: cir.store %3, %1 : !cir.ptr<(!cir.ptr) -> i32>, cir.ptr ) -> i32>> +// CHECK: %4 = cir.scope { +// CHECK: %7 = cir.alloca i32, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} +// CHECK: %8 = cir.load %1 : cir.ptr ) -> i32>>, !cir.ptr<(!cir.ptr) -> i32> +// CHECK: %9 = cir.const(3 : i32) : i32 +// CHECK: cir.store %9, %7 : i32, cir.ptr + +// 3. Call `__invoke()`, which effectively executes `operator()`. +// CHECK: %10 = cir.call %8(%7) : (!cir.ptr<(!cir.ptr) -> i32>, !cir.ptr) -> i32 +// CHECK: cir.yield %10 : i32 +// CHECK: } + +// CHECK: } + From 124eeab8f601fd502881d69475adffb8ce8c69e5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 22 Mar 2023 21:18:51 -0700 Subject: [PATCH 0838/2301] [CIR][Lifetime][NFC] Factor out some of coroutine detection --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 29 ++++++++++++------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 8f607f187599..5271ab64da87 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -59,6 +59,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { bool forRetLambda = false); void checkCoroTaskStore(StoreOp storeOp); void checkLambdaCaptureStore(StoreOp storeOp); + void trackCallToCoroutine(CallOp callOp); void checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor); void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); @@ -1473,6 +1474,21 @@ bool LifetimeCheckPass::isTaskType(mlir::Value taskVal) { return IsTaskTyCache[ty]; } +void LifetimeCheckPass::trackCallToCoroutine(CallOp callOp) { + if (auto fnName = callOp.getCallee()) { + auto calleeFuncOp = getCalleeFromSymbol(theModule, *fnName); + if (calleeFuncOp && + (calleeFuncOp.getCoroutine() || + (calleeFuncOp.isDeclaration() && callOp->getNumResults() > 0 && + isTaskType(callOp->getResult(0))))) { + currScope->localTempTasks.insert(callOp->getResult(0)); + } + return; + } + // TODO: Handle indirect calls to coroutines, for instance when + // lambda coroutines are involved with invokers. +} + void LifetimeCheckPass::checkCall(CallOp callOp) { if (callOp.getNumOperands() == 0) return; @@ -1481,21 +1497,14 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // // Note that we can't reliably know if a function is a coroutine only as // part of declaration + trackCallToCoroutine(callOp); - // FIXME: Indirect calls are not yet supported. + // FIXME: General indirect calls not yet supported. if (!callOp.getCallee()) return; auto fnName = *callOp.getCallee(); - auto calleeFuncOp = getCalleeFromSymbol(theModule, fnName); - if (calleeFuncOp && - (calleeFuncOp.getCoroutine() || - (calleeFuncOp.isDeclaration() && callOp->getNumResults() > 0 && - isTaskType(callOp->getResult(0))))) { - currScope->localTempTasks.insert(callOp->getResult(0)); - } - - const auto *methodDecl = getMethod(theModule, fnName); + auto methodDecl = getMethod(theModule, fnName); if (!isOwnerOrPointerClassMethod(callOp.getOperand(0), methodDecl)) return checkOtherMethodsAndFunctions(callOp, methodDecl); From 64122cd61a35b563f5dd258fcfe20166229c4c1a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 22 Mar 2023 21:33:20 -0700 Subject: [PATCH 0839/2301] [CIR] Fix operand handling for indirect calls --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index c0f9fea3a91f..5b9c793a8e18 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1378,6 +1378,11 @@ def CallOp : CIR_Op<"call", let extraClassDeclaration = [{ FunctionType getCalleeType(); + mlir::Value getIndirectCallee() { + assert(!getCallee() && "only works for indirect call"); + return *arg_operand_begin(); + } + operand_iterator arg_operand_begin() { return operand_begin(); } operand_iterator arg_operand_end() { return operand_end(); } }]; From 0c2ccb2c22dfe2aa3158e11a07210b1e91fc65e8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 22 Mar 2023 21:48:35 -0700 Subject: [PATCH 0840/2301] [CIR][Lifetime] Handle lambda coroutines with invokers --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 10 ++++++++-- clang/test/CIR/Transforms/lifetime-check-coro-task.cpp | 8 ++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 5271ab64da87..f7a57c1aa5c4 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -962,7 +962,7 @@ void LifetimeCheckPass::checkCoroTaskStore(StoreOp storeOp) { // when %arg0 finishes its lifetime at the end of the enclosing cir.scope. if (auto call = dyn_cast(taskTmp.getDefiningOp())) { bool potentialTaintedTask = false; - for (auto arg : call.getOperands()) { + for (auto arg : call.getArgOperands()) { auto alloca = dyn_cast(arg.getDefiningOp()); if (alloca && currScope->localValues.count(alloca)) { getPmap()[taskAddr].insert(State::getLocalValue(alloca)); @@ -1485,8 +1485,14 @@ void LifetimeCheckPass::trackCallToCoroutine(CallOp callOp) { } return; } - // TODO: Handle indirect calls to coroutines, for instance when + // Handle indirect calls to coroutines, for instance when // lambda coroutines are involved with invokers. + if (callOp->getNumResults() > 0 && isTaskType(callOp->getResult(0))) { + // FIXME: get more guarantees to prevent false positives (perhaps + // apply some tracking analysis before this pass and check for lambda + // idioms). + currScope->localTempTasks.insert(callOp->getResult(0)); + } } void LifetimeCheckPass::checkCall(CallOp callOp) { diff --git a/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp b/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp index 56f97ff0d74e..cf101b790491 100644 --- a/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-coro-task.cpp @@ -25,3 +25,11 @@ folly::coro::Task go2_lambda() { co_return co_await task; // expected-remark {{pset => { task, invalid }}} // expected-warning@-1 {{use of coroutine 'task' with dangling reference}} } + +folly::coro::Task go3_lambda() { + auto* fn = +[](int const& i) -> folly::coro::Task { co_return i; }; + auto task = fn(3); // expected-note {{coroutine bound to resource with expired lifetime}} + // expected-note@-1 {{at the end of scope or full-expression}} + co_return co_await task; // expected-remark {{pset => { task, invalid }}} + // expected-warning@-1 {{use of coroutine 'task' with dangling reference}} +} \ No newline at end of file From 28408b0f18c3eb29a52882088d0e7090de84f2e8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 27 Mar 2023 20:19:13 -0700 Subject: [PATCH 0841/2301] [CIR][CIRGen] Fix silly enum crash --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 13 ++++++++++++- clang/test/CIR/CodeGen/basic.cpp | 5 +++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 4b430071be34..3651f43025f4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -716,7 +716,18 @@ void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { // from the cache. This allows function types and other things that may be // derived from the enum to be recomputed. if (const auto *ED = dyn_cast(TD)) { - llvm_unreachable("NYI"); + // Only flush the cache if we've actually already converted this type. + if (TypeCache.count(ED->getTypeForDecl())) { + // Okay, we formed some types based on this. We speculated that the enum + // would be lowered to i32, so we only need to flush the cache if this + // didn't happen. + if (!ConvertType(ED->getIntegerType()).isInteger(32)) + TypeCache.clear(); + } + // If necessary, provide the full definition of a type only used with a + // declaration so far. + assert(!UnimplementedFeature::generateDebugInfo()); + return; } // If we completed a RecordDecl that we previously used and converted to an diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 5b3ae186cc7e..c1eb2a2629b9 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -136,3 +136,8 @@ void if1(int a, bool b, bool c) { // CHECK-DAG: #[[locScope]] = loc(fused[#[[locScopeA:loc[0-9]+]], #[[locScopeB:loc[0-9]+]]]) // CHECK-DAG: #[[locScopeA]] = loc("{{.*}}basic.cpp":27:3) // CHECK-DAG: #[[locScopeB]] = loc("{{.*}}basic.cpp":31:3) + +enum { + um = 0, + dois = 1, +}; // Do not crash! \ No newline at end of file From 6e5458b7c9a6882ab386f8aa8eb40ae5a8a04ecc Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 28 Mar 2023 12:15:51 -0700 Subject: [PATCH 0842/2301] [CIR][CIRGen] Add one more listener, this time for HandleCXXStaticMemberVarInstantiation This handles Sema enqueuing codegen for delayed static member var instantiation, no testcase just yet since the actual emission isn't implemented. --- clang/include/clang/CIR/CIRGenerator.h | 1 + clang/lib/CIR/CodeGen/CIRGenModule.cpp | 15 +++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 3 +++ clang/lib/CIR/CodeGen/CIRGenerator.cpp | 7 +++++++ clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 +- 5 files changed, 27 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 5d71311f5e61..39e067233233 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -83,6 +83,7 @@ class CIRGenerator : public clang::ASTConsumer { void HandleInlineFunctionDefinition(clang::FunctionDecl *D) override; void HandleTagDeclDefinition(clang::TagDecl *D) override; void HandleTagDeclRequiredDefinition(const clang::TagDecl *D) override; + void HandleCXXStaticMemberVarInstantiation(clang::VarDecl *D) override; mlir::ModuleOp getModule(); std::unique_ptr takeContext() { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 7988fd2ada46..472898d123a9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1991,3 +1991,18 @@ void CIRGenModule::buildExplicitCastExprType(const ExplicitCastExpr *E, assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); } + +void CIRGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { + auto DK = VD->isThisDeclarationADefinition(); + if (DK == VarDecl::Definition && VD->hasAttr()) + return; + + TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind(); + // If we have a definition, this might be a deferred decl. If the + // instantiation is explicit, make sure we emit it at the end. + if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition) { + llvm_unreachable("NYI"); + } + + buildTopLevelDecl(VD); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index dd1032288b93..640efd0eb1f8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -161,6 +161,9 @@ class CIRGenModule { template void maybeHandleStaticInExternC(const SomeDecl *D, mlir::cir::GlobalOp GV); + /// Tell the consumer that this variable has been instantiated. + void HandleCXXStaticMemberVarInstantiation(VarDecl *VD); + llvm::DenseMap Globals; mlir::Operation *getGlobalValue(StringRef Ref); mlir::Value getGlobalValue(const clang::Decl *D); diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 807e2430d3b7..419a51ba8dd3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -173,3 +173,10 @@ void CIRGenerator::HandleTagDeclRequiredDefinition(const TagDecl *D) { if (CGM->getModuleDebugInfo()) llvm_unreachable("NYI"); } + +void CIRGenerator::HandleCXXStaticMemberVarInstantiation(VarDecl *D) { + if (Diags.hasErrorOccurred()) + return; + + CGM->HandleCXXStaticMemberVarInstantiation(D); +} diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index d8f3ffe62a04..a2809d25b583 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -140,7 +140,7 @@ class CIRGenConsumer : public clang::ASTConsumer { } void HandleCXXStaticMemberVarInstantiation(clang::VarDecl *VD) override { - llvm_unreachable("NYI"); + gen->HandleCXXStaticMemberVarInstantiation(VD); } void HandleInlineFunctionDefinition(FunctionDecl *D) override { From 2182cef96ebf04ccd73c3fce10a48ad3dbe8f205 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 28 Mar 2023 14:00:15 -0700 Subject: [PATCH 0843/2301] [CIR][CIRGen] Handle toplevel codegen for LinkageSpecDecl --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 23 ++++++++++++++++++----- clang/lib/CIR/CodeGen/CIRGenModule.h | 1 + clang/test/CIR/CodeGen/basic.cpp | 22 +++++++++++++++++----- 3 files changed, 36 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 472898d123a9..fa4494d85c02 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -91,10 +91,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, const clang::CodeGenOptions &CGO, DiagnosticsEngine &Diags) : builder(context), astCtx(astctx), langOpts(astctx.getLangOpts()), - codeGenOpts(CGO), theModule{mlir::ModuleOp::create( - builder.getUnknownLoc())}, - Diags(Diags), target(astCtx.getTargetInfo()), - ABI(createCXXABI(*this)), genTypes{*this} { + codeGenOpts(CGO), + theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), + target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), + genTypes{*this} { mlir::cir::sob::SignedOverflowBehavior sob; switch (langOpts.getSignedOverflowBehavior()) { case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Defined: @@ -1046,6 +1046,15 @@ void CIRGenModule::buildDeclContext(const DeclContext *DC) { } } +void CIRGenModule::buildLinkageSpec(const LinkageSpecDecl *LSD) { + if (LSD->getLanguage() != LinkageSpecLanguageIDs::C && + LSD->getLanguage() != LinkageSpecLanguageIDs::CXX) { + llvm_unreachable("unsupported linkage spec"); + return; + } + buildDeclContext(LSD); +} + // Emit code for a single top level declaration. void CIRGenModule::buildTopLevelDecl(Decl *decl) { // Ignore dependent declarations @@ -1122,6 +1131,10 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { // Nothing to do. break; + case Decl::LinkageSpec: + buildLinkageSpec(cast(decl)); + break; + case Decl::Typedef: case Decl::TypeAlias: // using foo = bar; [C++11] case Decl::Record: @@ -2005,4 +2018,4 @@ void CIRGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { } buildTopLevelDecl(VD); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 640efd0eb1f8..794c58b64562 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -289,6 +289,7 @@ class CIRGenModule { ForDefinition_t IsForDefinition = NotForDefinition); void buildTopLevelDecl(clang::Decl *decl); + void buildLinkageSpec(const LinkageSpecDecl *D); /// Emit code for a single global function or var decl. Forward declarations /// are emitted lazily. diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index c1eb2a2629b9..7a471d776a25 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -133,11 +133,23 @@ void if1(int a, bool b, bool c) { // CHECK: } // CHECK: } -// CHECK-DAG: #[[locScope]] = loc(fused[#[[locScopeA:loc[0-9]+]], #[[locScopeB:loc[0-9]+]]]) -// CHECK-DAG: #[[locScopeA]] = loc("{{.*}}basic.cpp":27:3) -// CHECK-DAG: #[[locScopeB]] = loc("{{.*}}basic.cpp":31:3) - enum { um = 0, dois = 1, -}; // Do not crash! \ No newline at end of file +}; // Do not crash! + +extern "C" { +struct regs { + unsigned long sp; + unsigned long pc; +}; + +// Check it's not mangled. +// CHECK: cir.func @use_regs() + +void use_regs() { regs r; } +} + +// CHECK-DAG: #[[locScope]] = loc(fused[#[[locScopeA:loc[0-9]+]], #[[locScopeB:loc[0-9]+]]]) +// CHECK-DAG: #[[locScopeA]] = loc("{{.*}}basic.cpp":27:3) +// CHECK-DAG: #[[locScopeB]] = loc("{{.*}}basic.cpp":31:3) \ No newline at end of file From 5f99d0c77bb53b55e30bcee96e328ff0a248b6aa Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 28 Mar 2023 14:15:30 -0700 Subject: [PATCH 0844/2301] [CIR][CIRGen] Handle CXXConversion just like we handle methods and functions --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 1 + clang/test/CIR/CodeGen/operators.cpp | 14 ++++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 clang/test/CIR/CodeGen/operators.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index fa4494d85c02..e71a35da7924 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1083,6 +1083,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { // EmitGlobal(HD); break; + case Decl::CXXConversion: case Decl::CXXMethod: case Decl::Function: buildGlobal(cast(decl)); diff --git a/clang/test/CIR/CodeGen/operators.cpp b/clang/test/CIR/CodeGen/operators.cpp new file mode 100644 index 000000000000..1d900188f1ce --- /dev/null +++ b/clang/test/CIR/CodeGen/operators.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +class __attribute__((__visibility__("default"))) exception_ptr +{ + void* __ptr_; +public: + explicit operator bool() const noexcept {return __ptr_ != nullptr;} +}; + +// TODO: for now only check that this doesn't crash, in the future check operator +// bool codegen. + +// CHECK: module \ No newline at end of file From 9a6fb8ffb594a9dd2a359b3e8827b9356fc755ef Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 28 Mar 2023 14:24:15 -0700 Subject: [PATCH 0845/2301] [CIR][CIRGen] Handle top level decl for CXXDestructor --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 2 ++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 17 ++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 3 +++ clang/test/CIR/CodeGen/dtors.cpp | 23 +++++++++++++++++++ 4 files changed, 45 insertions(+) create mode 100644 clang/test/CIR/CodeGen/dtors.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index b4ce5c983cc4..07ea91575a2c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -132,6 +132,8 @@ class CIRGenCXXABI { /// Emit constructor variants required by this ABI. virtual void buildCXXConstructors(const clang::CXXConstructorDecl *D) = 0; + /// Emit dtor variants required by this ABI. + virtual void buildCXXDestructors(const clang::CXXDestructorDecl *D) = 0; /// Specify how one should pass an argument of a record type. enum class RecordArgABI { diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 34c2d3388668..c3aff116564a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -98,6 +98,7 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { FunctionArgList &Params) override; void buildCXXConstructors(const clang::CXXConstructorDecl *D) override; + void buildCXXDestructors(const clang::CXXDestructorDecl *D) override; void buildCXXStructor(clang::GlobalDecl GD) override; @@ -398,3 +399,19 @@ void CIRGenItaniumCXXABI::buildCXXConstructors(const CXXConstructorDecl *D) { CGM.buildGlobal(GlobalDecl(D, Ctor_Complete)); } } + +void CIRGenItaniumCXXABI::buildCXXDestructors(const CXXDestructorDecl *D) { + // The destructor used for destructing this as a base class; ignores + // virtual bases. + CGM.buildGlobal(GlobalDecl(D, Dtor_Base)); + + // The destructor used for destructing this as a most-derived class; + // call the base destructor and then destructs any virtual bases. + CGM.buildGlobal(GlobalDecl(D, Dtor_Complete)); + + // The destructor in a virtual table is always a 'deleting' + // destructor, which calls the complete destructor and then uses the + // appropriate operator delete. + if (D->isVirtual()) + CGM.buildGlobal(GlobalDecl(D, Dtor_Deleting)); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index e71a35da7924..2bb4deccb82a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1127,6 +1127,9 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { case Decl::CXXConstructor: getCXXABI().buildCXXConstructors(cast(decl)); break; + case Decl::CXXDestructor: + getCXXABI().buildCXXDestructors(cast(decl)); + break; case Decl::StaticAssert: // Nothing to do. diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp new file mode 100644 index 000000000000..4aff0ab08b4f --- /dev/null +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +class __attribute__((__visibility__("default"))) exception +{ +public: + __attribute__((__visibility__("hidden"))) __attribute__((__exclude_from_explicit_instantiation__)) exception() noexcept {} + __attribute__((__visibility__("hidden"))) __attribute__((__exclude_from_explicit_instantiation__)) exception(const exception&) noexcept = default; + + virtual ~exception() noexcept; + virtual const char* what() const noexcept; +}; + +class __attribute__((__visibility__("default"))) bad_function_call + : public exception +{ +public: + virtual ~bad_function_call() noexcept {} +}; + +// TODO: for now only check that this doesn't crash, more support soon. + +// CHECK: module \ No newline at end of file From 1ba6405df9075218afaf549512ff5e601c0a7f72 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 29 Mar 2023 11:18:45 -0700 Subject: [PATCH 0846/2301] [CIR][CIRGen][NFC] Use CIRGenBuilderTy instead of OpBuilder and add more helpers --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 15 +++++++++++++++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 6 ++++++ clang/lib/CIR/CodeGen/CIRGenTypes.h | 7 +++++-- clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 3 +++ 5 files changed, 30 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index a378b6bcf01d..c334993199c1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -81,6 +81,21 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return DefaultConstrainedRounding; } + // + // Attribute helpers + // ----------------- + // + + mlir::TypedAttr getZeroAttr(mlir::Type t) { + return mlir::cir::ZeroAttr::get(getContext(), t); + } + + mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { + // TODO: dispatch creation for primitive types. + assert(ty.isa() && "NYI for other types"); + return create(loc, ty, getZeroAttr(ty)); + } + // // Type helpers // ------------ diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index af1f2006dedb..198f4ab27462 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -11,6 +11,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenBuilder.h" #include "CIRGenCXXABI.h" #include "CIRGenFunction.h" #include "CIRGenFunctionInfo.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index 9619e8fee8a5..5bc097835524 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -78,6 +78,12 @@ class CIRGenRecordLayout { /// Check whether this struct can be C++ zero-initialized with a /// zeroinitializer. bool isZeroInitializable() const { return IsZeroInitializable; } + + /// Return the "base subobject" LLVM type associated with + /// this record. + mlir::cir::StructType getBaseSubobjectCIRType() const { + return BaseSubobjectType; + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index f5e058ccf787..4ab86af12559 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -61,7 +61,6 @@ class GlobalDecl; namespace mlir { class Type; -class OpBuilder; namespace cir { class StructType; } // namespace cir @@ -72,12 +71,13 @@ class CallArgList; class CIRGenCXXABI; class CIRGenModule; class CIRGenFunctionInfo; +class CIRGenBuilderTy; /// This class organizes the cross-module state that is used while lowering /// AST types to CIR types. class CIRGenTypes { clang::ASTContext &Context; - mlir::OpBuilder &Builder; + cir::CIRGenBuilderTy &Builder; CIRGenModule &CGM; const clang::TargetInfo &Target; CIRGenCXXABI &TheCXXABI; @@ -117,6 +117,9 @@ class CIRGenTypes { CIRGenTypes(CIRGenModule &cgm); ~CIRGenTypes(); + cir::CIRGenBuilderTy &getBuilder() const { return Builder; } + CIRGenModule &getModule() const { return CGM; } + /// isFuncTypeConvertible - Utility to check whether a function type can be /// converted to a CIR type (i.e. doesn't depend on an incomplete tag type). bool isFuncTypeConvertible(const clang::FunctionType *FT); diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 7c2e309890d8..a217cde42133 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -1,5 +1,8 @@ +#include "CIRGenBuilder.h" +#include "CIRGenModule.h" #include "CIRGenTypes.h" + #include "mlir/IR/BuiltinTypes.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" From e5f8c9ebfd28269ba4e3dd1be6b205be30f92ac3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 28 Mar 2023 16:33:19 -0700 Subject: [PATCH 0847/2301] [CIR][CIRGen] Pave the way for accounting vtables in CIR record layouts This adds lots of logic and extra boilerplate to build our way into supporting vtables, testcase will land when we finish the first testable chunk. - Added more cases for building null initialization. - Change CIRRecordLowering to start adding members for VFPtr and Base kinds. --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 6 + clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 43 ++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 5 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 180 +++++++++++++++--- 6 files changed, 206 insertions(+), 34 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 5085082555a8..62910cfa0d5c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -42,6 +42,12 @@ class AggExprEmitter : public StmtVisitor { return Dest; } + void EnsureDest(mlir::Location loc, QualType T) { + if (!Dest.isIgnored()) + return; + Dest = CGF.CreateAggTemp(T, loc, "agg.tmp.ensured"); + } + public: AggExprEmitter(CIRGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused) : CGF{cgf}, Dest(Dest), IsResultUnused(IsResultUnused) {} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index a47c5e637258..550d1959418a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -267,7 +267,8 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, switch (E->getConstructionKind()) { case CXXConstructionKind::Delegating: case CXXConstructionKind::Complete: - buildNullInitialization(Dest.getAddress(), E->getType()); + buildNullInitialization(getLoc(E->getSourceRange()), Dest.getAddress(), + E->getType()); break; case CXXConstructionKind::VirtualBase: case CXXConstructionKind::NonVirtualBase: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 051025df5ab5..e35ace5753bd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1123,7 +1123,8 @@ std::string CIRGenFunction::getCounterRefTmpAsString() { return getVersionedTmpName("ref.tmp", CounterRefTmp++); } -void CIRGenFunction::buildNullInitialization(Address DestPtr, QualType Ty) { +void CIRGenFunction::buildNullInitialization(mlir::Location loc, + Address DestPtr, QualType Ty) { // Ignore empty classes in C++. if (getLangOpts().CPlusPlus) { if (const RecordType *RT = Ty->getAs()) { @@ -1132,7 +1133,45 @@ void CIRGenFunction::buildNullInitialization(Address DestPtr, QualType Ty) { } } - llvm_unreachable("NYI"); + // Cast the dest ptr to the appropriate i8 pointer type. + // FIXME: add a CodeGenTypeCache thing for CIR. + auto intTy = DestPtr.getElementType().dyn_cast(); + if (intTy && intTy.getWidth() == 8) { + llvm_unreachable("NYI"); + } + + // Get size and alignment info for this aggregate. + CharUnits size = getContext().getTypeSizeInChars(Ty); + [[maybe_unused]] mlir::Attribute SizeVal{}; + [[maybe_unused]] const VariableArrayType *vla = nullptr; + + // Don't bother emitting a zero-byte memset. + if (size.isZero()) { + // But note that getTypeInfo returns 0 for a VLA. + if (const VariableArrayType *vlaType = dyn_cast_or_null( + getContext().getAsArrayType(Ty))) { + llvm_unreachable("NYI"); + } else { + return; + } + } else { + SizeVal = CGM.getSize(size); + } + + // If the type contains a pointer to data member we can't memset it to zero. + // Instead, create a null constant and copy it to the destination. + // TODO: there are other patterns besides zero that we can usefully memset, + // like -1, which happens to be the pattern used by member-pointers. + if (!CGM.getTypes().isZeroInitializable(Ty)) { + llvm_unreachable("NYI"); + } + + // In LLVM Codegen: otherwise, just memset the whole thing to zero using + // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the + // respective address. + // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); + builder.createStore(loc, builder.getZero(loc, getTypes().ConvertType(Ty)), + DestPtr); } CIRGenFunction::CIRGenFPOptionsRAII::CIRGenFPOptionsRAII(CIRGenFunction &CGF, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index b7b304b635e9..840e9cf040af 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -711,7 +711,8 @@ class CIRGenFunction { RValue buildCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue); - void buildNullInitialization(Address DestPtr, QualType Ty); + void buildNullInitialization(mlir::Location loc, Address DestPtr, + QualType Ty); mlir::Value buildCXXNewExpr(const CXXNewExpr *E); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 3651f43025f4..f1c207d50c09 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -164,8 +164,9 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { // Force conversion of non-virtual base classes recursively. if (const auto *cxxRecordDecl = dyn_cast(RD)) { for (const auto &I : cxxRecordDecl->bases()) { - (void)I; - llvm_unreachable("NYI"); + if (I.isVirtual()) + continue; + convertRecordDeclType(I.getType()->castAs()->getDecl()); } } diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index a217cde42133..d853d8352903 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -20,8 +20,8 @@ using namespace clang; namespace { /// The CIRRecordLowering is responsible for lowering an ASTRecordLayout to a -/// mlir::Type. Some of the lowering is straightforward, some is not. Here we -/// detail some of the complexities and weirdnesses here. +/// mlir::Type. Some of the lowering is straightforward, some is not. TODO: Here +/// we detail some of the complexities and weirdnesses? struct CIRRecordLowering final { // MemberInfo is a helper structure that contains information about a record @@ -31,10 +31,16 @@ struct CIRRecordLowering final { CharUnits offset; enum class InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } kind; mlir::Type data; - const FieldDecl *fieldDecl; + union { + const FieldDecl *fieldDecl; + const CXXRecordDecl *cxxRecordDecl; + }; MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data, const FieldDecl *fieldDecl = nullptr) : offset{offset}, kind{kind}, data{data}, fieldDecl{fieldDecl} {}; + MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data, + const CXXRecordDecl *RD) + : offset{offset}, kind{kind}, data{data}, cxxRecordDecl{RD} {} // MemberInfos are sorted so we define a < operator. bool operator<(const MemberInfo &other) const { return offset < other.offset; @@ -44,11 +50,31 @@ struct CIRRecordLowering final { CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl, bool isPacked); // Short helper routines. - void lower(bool nonVirtualBaseType); - void accumulateFields(); + void computeVolatileBitfields(); + void accumulateBases(); + void accumulateVPtrs(); void accumulateVBases(); + void accumulateFields(); + + mlir::Type getVFPtrType(); + + // Helper function to check if we are targeting AAPCS. + bool isAAPCS() const { + return astContext.getTargetInfo().getABI().starts_with("aapcs"); + } + + // The Itanium base layout rule allows virtual bases to overlap + // other bases, which complicates layout in specific ways. + // + // Note specifically that the ms_struct attribute doesn't change this. + bool isOverlappingVBaseABI() { + return !astContext.getTargetInfo().getCXXABI().isMicrosoft(); + } + // Recursively searches all of the bases to find out if a vbase is + // not the primary vbase of some base class. + bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query); CharUnits bitsToCharUnits(uint64_t bitOffset) { return astContext.toCharUnitsFromBits(bitOffset); @@ -70,6 +96,11 @@ struct CIRRecordLowering final { type); } + // Gets the llvm Basesubobject type from a CXXRecordDecl. + mlir::Type getStorageType(const CXXRecordDecl *RD) { + return cirGenTypes.getCIRGenRecordLayout(RD).getBaseSubobjectCIRType(); + } + mlir::Type getStorageType(const FieldDecl *fieldDecl) { auto type = cirGenTypes.convertTypeForMem(fieldDecl->getType()); assert(!fieldDecl->isBitField() && "bit fields NYI"); @@ -97,6 +128,7 @@ struct CIRRecordLowering final { } CIRGenTypes &cirGenTypes; + CIRGenBuilderTy &builder; const ASTContext &astContext; const RecordDecl *recordDecl; const CXXRecordDecl *cxxRecordDecl; @@ -122,8 +154,8 @@ struct CIRRecordLowering final { CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl, bool isPacked) - : cirGenTypes{cirGenTypes}, astContext{cirGenTypes.getContext()}, - recordDecl{recordDecl}, + : cirGenTypes{cirGenTypes}, builder{cirGenTypes.getBuilder()}, + astContext{cirGenTypes.getContext()}, recordDecl{recordDecl}, cxxRecordDecl{llvm::dyn_cast(recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, IsZeroInitializable(true), IsZeroInitializableAsBase(true), @@ -136,21 +168,23 @@ void CIRRecordLowering::lower(bool nonVirtualBaseType) { CharUnits Size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize() : astRecordLayout.getSize(); - + if (recordDecl->isUnion()) { + llvm_unreachable("NYI"); + // lowerUnion(); + // computeVolatileBitfields(); + return; + } accumulateFields(); // RD implies C++ if (cxxRecordDecl) { - assert(!astRecordLayout.hasOwnVFPtr() && "accumulateVPtrs() NYI"); - assert(cxxRecordDecl->bases().begin() == cxxRecordDecl->bases().end() && - "Inheritance NYI"); - + accumulateVPtrs(); + accumulateBases(); if (members.empty()) { appendPaddingBytes(Size); - // TODO: computeVolatileBitFields(); + computeVolatileBitfields(); return; } - if (!nonVirtualBaseType) accumulateVBases(); } @@ -161,29 +195,120 @@ void CIRRecordLowering::lower(bool nonVirtualBaseType) { // TODO: implement padding // TODO: support zeroInit fillOutputFields(); - // TODO: implement volatile bit fields + computeVolatileBitfields(); +} + +bool CIRRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl, + const CXXRecordDecl *Query) { + const ASTRecordLayout &DeclLayout = astContext.getASTRecordLayout(Decl); + if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query) + return false; + for (const auto &Base : Decl->bases()) + if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query)) + return false; + return true; +} + +/// The AAPCS that defines that, when possible, bit-fields should +/// be accessed using containers of the declared type width: +/// When a volatile bit-field is read, and its container does not overlap with +/// any non-bit-field member or any zero length bit-field member, its container +/// must be read exactly once using the access width appropriate to the type of +/// the container. When a volatile bit-field is written, and its container does +/// not overlap with any non-bit-field member or any zero-length bit-field +/// member, its container must be read exactly once and written exactly once +/// using the access width appropriate to the type of the container. The two +/// accesses are not atomic. +/// +/// Enforcing the width restriction can be disabled using +/// -fno-aapcs-bitfield-width. +void CIRRecordLowering::computeVolatileBitfields() { + if (!isAAPCS() || + !cirGenTypes.getModule().getCodeGenOpts().AAPCSBitfieldWidth) + return; + + for ([[maybe_unused]] auto &I : bitFields) { + llvm_unreachable("NYI"); + } +} + +void CIRRecordLowering::accumulateBases() { + // If we've got a primary virtual base, we need to add it with the bases. + if (astRecordLayout.isPrimaryBaseVirtual()) { + llvm_unreachable("NYI"); + } + + // Accumulate the non-virtual bases. + for ([[maybe_unused]] const auto &Base : cxxRecordDecl->bases()) { + if (Base.isVirtual()) + continue; + // Bases can be zero-sized even if not technically empty if they + // contain only a trailing array member. + const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); + if (!BaseDecl->isEmpty() && + !astContext.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero()) { + members.push_back(MemberInfo(astRecordLayout.getBaseClassOffset(BaseDecl), + MemberInfo::InfoKind::Base, + getStorageType(BaseDecl), BaseDecl)); + } + } } void CIRRecordLowering::accumulateVBases() { - if (astRecordLayout.hasOwnVFPtr()) + CharUnits ScissorOffset = astRecordLayout.getNonVirtualSize(); + // In the itanium ABI, it's possible to place a vbase at a dsize that is + // smaller than the nvsize. Here we check to see if such a base is placed + // before the nvsize and set the scissor offset to that, instead of the + // nvsize. + if (isOverlappingVBaseABI()) + for (const auto &Base : cxxRecordDecl->vbases()) { + const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); + if (BaseDecl->isEmpty()) + continue; + llvm_unreachable("NYI"); + } + members.push_back(MemberInfo(ScissorOffset, MemberInfo::InfoKind::Scissor, + mlir::Type{}, cxxRecordDecl)); + for (const auto &Base : cxxRecordDecl->vbases()) { + const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); + if (BaseDecl->isEmpty()) + continue; llvm_unreachable("NYI"); + } +} + +void CIRRecordLowering::accumulateVPtrs() { + if (astRecordLayout.hasOwnVFPtr()) + members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::VFPtr, + getVFPtrType())); if (astRecordLayout.hasOwnVBPtr()) llvm_unreachable("NYI"); } +mlir::Type CIRRecordLowering::getVFPtrType() { + // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special + // type so it's a bit more clear and C++ idiomatic. + auto intTy = mlir::IntegerType::get(builder.getContext(), 32); + auto fnTy = mlir::FunctionType::get(builder.getContext(), {}, {intTy}); + return builder.getPointerTo(builder.getPointerTo(fnTy)); +} + void CIRRecordLowering::fillOutputFields() { for (auto &member : members) { - assert(member.data && "member.data should be valid"); - fieldTypes.push_back(member.data); - assert(member.kind == MemberInfo::InfoKind::Field && - "Bit fields and inheritance are not NYI"); - assert(member.fieldDecl && "member.fieldDecl should be valid"); - fields[member.fieldDecl->getCanonicalDecl()] = fieldTypes.size() - 1; - - // A field without storage must be a bitfield. - assert(member.data && "Bitfields NYI"); - assert(member.kind != MemberInfo::InfoKind::Base && "Base classes NYI"); - assert(member.kind != MemberInfo::InfoKind::VBase && "Base classes NYI"); + if (member.data) + fieldTypes.push_back(member.data); + if (member.kind == MemberInfo::InfoKind::Field) { + if (member.fieldDecl) + fields[member.fieldDecl->getCanonicalDecl()] = fieldTypes.size() - 1; + // A field without storage must be a bitfield. + if (!member.data) + llvm_unreachable("NYI"); + } else if (member.kind == MemberInfo::InfoKind::Base) { + nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1; + } else if (member.kind == MemberInfo::InfoKind::VBase) { + llvm_unreachable("NYI"); + // virtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1; + } } } @@ -252,6 +377,5 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, } // TODO: implement verification - return RL; } From 8c47d9099b0e14ad7766aa85b855076ed4134a54 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 29 Mar 2023 13:59:02 -0700 Subject: [PATCH 0848/2301] [CIR][CIRGen][NFC] Add skeleton for supporting EHStack based dtor logic Start work to handle VisitCXXBindTemporaryExpr during agg emission context. To get it done we'll require many pieces, this starts adding some pieces, which end up touching EHCleanups - Add buildCXXTemporary logic. - Add chain of forced dtor insertion using pushDestroy/destroyCXXObject/pushFullExprCleanup/pushCleanup - Add more template tricks for pushFullExprCleanup and ConditionalEvaluation. - Add EHScopeStack.h, very similar to LLVM's codegen one (this could probably be factored out at some point). --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 11 + clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 22 ++ clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 99 ++++++ clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 20 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 74 ++++ clang/lib/CIR/CodeGen/EHScopeStack.h | 426 ++++++++++++++++++++++++ 6 files changed, 649 insertions(+), 3 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/EHScopeStack.h diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index ddb3486c199a..88b431ace1ce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -742,3 +742,14 @@ void CIRGenFunction::buildLambdaStaticInvokeBody(const CXXMethodDecl *MD) { buildLambdaDelegatingInvokeBody(MD); } + +void CIRGenFunction::destroyCXXObject(CIRGenFunction &CGF, Address addr, + QualType type) { + const RecordType *rtype = type->castAs(); + const CXXRecordDecl *record = cast(rtype->getDecl()); + const CXXDestructorDecl *dtor = record->getDestructor(); + assert(!dtor->isTrivial()); + llvm_unreachable("NYI"); + // CGF.buildCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, + // /*Delegating=*/false, addr, type); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index c026a70eaa4f..791ddb5a92bc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -22,6 +22,10 @@ using namespace cir; using namespace clang; using namespace mlir::cir; +//===----------------------------------------------------------------------===// +// CIRGenFunction cleanup related +//===----------------------------------------------------------------------===// + /// Build a unconditional branch to the lexical scope cleanup block /// or with the labeled blocked if already solved. /// @@ -38,3 +42,21 @@ mlir::cir::BrOp CIRGenFunction::buildBranchThroughCleanup(mlir::Location Loc, return builder.create(Loc, Dest.isValid() ? Dest.getBlock() : ReturnBlock().getBlock()); } + +/// Emits all the code to cause the given temporary to be cleaned up. +void CIRGenFunction::buildCXXTemporary(const CXXTemporary *Temporary, + QualType TempType, Address Ptr) { + pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject, + /*useEHCleanup*/ true); +} + +//===----------------------------------------------------------------------===// +// EHScopeStack +//===----------------------------------------------------------------------===// + +void EHScopeStack::Cleanup::anchor() {} + +void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { + llvm_unreachable("NYI"); + return nullptr; +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 989aad77327a..d9280c2b81ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -12,6 +12,7 @@ #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" +#include "EHScopeStack.h" #include "clang/AST/Decl.h" @@ -474,3 +475,101 @@ void CIRGenFunction::buildDecl(const Decl &D) { } } } + +namespace { +struct DestroyObject final : EHScopeStack::Cleanup { + DestroyObject(Address addr, QualType type, + CIRGenFunction::Destroyer *destroyer, bool useEHCleanupForArray) + : addr(addr), type(type), destroyer(destroyer), + useEHCleanupForArray(useEHCleanupForArray) {} + + Address addr; + QualType type; + CIRGenFunction::Destroyer *destroyer; + bool useEHCleanupForArray; + + void Emit(CIRGenFunction &CGF, Flags flags) override { + // Don't use an EH cleanup recursively from an EH cleanup. + [[maybe_unused]] bool useEHCleanupForArray = + flags.isForNormalCleanup() && this->useEHCleanupForArray; + + llvm_unreachable("NYI"); + // CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray); + } +}; + +template struct DestroyNRVOVariable : EHScopeStack::Cleanup { + DestroyNRVOVariable(Address addr, QualType type, mlir::Value NRVOFlag) + : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {} + + mlir::Value NRVOFlag; + Address Loc; + QualType Ty; + + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } + + virtual ~DestroyNRVOVariable() = default; +}; + +struct DestroyNRVOVariableCXX final + : DestroyNRVOVariable { + DestroyNRVOVariableCXX(Address addr, QualType type, + const CXXDestructorDecl *Dtor, mlir::Value NRVOFlag) + : DestroyNRVOVariable(addr, type, NRVOFlag), + Dtor(Dtor) {} + + const CXXDestructorDecl *Dtor; + + void emitDestructorCall(CIRGenFunction &CGF) { llvm_unreachable("NYI"); } +}; + +struct DestroyNRVOVariableC final : DestroyNRVOVariable { + DestroyNRVOVariableC(Address addr, mlir::Value NRVOFlag, QualType Ty) + : DestroyNRVOVariable(addr, Ty, NRVOFlag) {} + + void emitDestructorCall(CIRGenFunction &CGF) { llvm_unreachable("NYI"); } +}; + +struct CallStackRestore final : EHScopeStack::Cleanup { + Address Stack; + CallStackRestore(Address Stack) : Stack(Stack) {} + bool isRedundantBeforeReturn() override { return true; } + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } +}; + +struct ExtendGCLifetime final : EHScopeStack::Cleanup { + const VarDecl &Var; + ExtendGCLifetime(const VarDecl *var) : Var(*var) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } +}; + +struct CallCleanupFunction final : EHScopeStack::Cleanup { + // FIXME: mlir::Value used as placeholder, check options before implementing + // Emit below. + mlir::Value CleanupFn; + const CIRGenFunctionInfo &FnInfo; + const VarDecl &Var; + + CallCleanupFunction(mlir::Value CleanupFn, const CIRGenFunctionInfo *Info, + const VarDecl *Var) + : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } +}; +} // end anonymous namespace + +void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, + QualType type, Destroyer *destroyer, + bool useEHCleanupForArray) { + pushFullExprCleanup(cleanupKind, addr, type, destroyer, + useEHCleanupForArray); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 62910cfa0d5c..3271a7036039 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -130,9 +130,7 @@ class AggExprEmitter : public StmtVisitor { void VisitXCXDefaultInitExpr(CXXDefaultInitExpr *E) { llvm_unreachable("NYI"); } - void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { - llvm_unreachable("NYI"); - } + void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); void VisitCXXConstructExpr(const CXXConstructExpr *E); void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E) { llvm_unreachable("NYI"); @@ -497,6 +495,22 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { llvm_unreachable("NYI"); } +void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { + // Ensure that we have a slot, but if we already do, remember + // whether it was externally destructed. + bool wasExternallyDestructed = Dest.isExternallyDestructed(); + EnsureDest(CGF.getLoc(E->getSourceRange()), E->getType()); + + // We're going to push a destructor if there isn't already one. + Dest.setExternallyDestructed(); + + Visit(E->getSubExpr()); + + // Push that destructor we promised. + if (!wasExternallyDestructed) + CGF.buildCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); +} + //===----------------------------------------------------------------------===// // Helpers and dispatcher //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 840e9cf040af..c71a66f84216 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -17,6 +17,7 @@ #include "CIRGenCall.h" #include "CIRGenModule.h" #include "CIRGenValue.h" +#include "EHScopeStack.h" #include "clang/AST/BaseSubobject.h" #include "clang/AST/CurrentSourceLocExprScope.h" @@ -508,6 +509,9 @@ class CIRGenFunction { /// invalid iff the function has no return value. Address ReturnValue = Address::invalid(); + /// Tracks function scope overall cleanup handling. + EHScopeStack EHStack; + /// A mapping from NRVO variables to the flags used to indicate /// when the NRVO has been applied to this variable. llvm::DenseMap NRVOFlags; @@ -714,6 +718,8 @@ class CIRGenFunction { void buildNullInitialization(mlir::Location loc, Address DestPtr, QualType Ty); + void buildCXXTemporary(const CXXTemporary *Temporary, QualType TempType, + Address Ptr); mlir::Value buildCXXNewExpr(const CXXNewExpr *E); mlir::Value createLoad(const clang::VarDecl *VD, const char *Name); @@ -1282,6 +1288,74 @@ class CIRGenFunction { AggValueSlot::Overlap_t MayOverlap, bool isVolatile = false); + /// + /// Cleanups + /// -------- + typedef void Destroyer(CIRGenFunction &CGF, Address addr, QualType ty); + + static Destroyer destroyCXXObject; + + void pushDestroy(CleanupKind kind, Address addr, QualType type, + Destroyer *destroyer, bool useEHCleanupForArray); + + /// An object to manage conditionally-evaluated expressions. + class ConditionalEvaluation { + // llvm::BasicBlock *StartBB; + + public: + ConditionalEvaluation(CIRGenFunction &CGF) + /*: StartBB(CGF.Builder.GetInsertBlock())*/ {} + + void begin(CIRGenFunction &CGF) { + assert(CGF.OutermostConditional != this); + if (!CGF.OutermostConditional) + CGF.OutermostConditional = this; + } + + void end(CIRGenFunction &CGF) { + assert(CGF.OutermostConditional != nullptr); + if (CGF.OutermostConditional == this) + CGF.OutermostConditional = nullptr; + } + + /// Returns a block which will be executed prior to each + /// evaluation of the conditional code. + // llvm::BasicBlock *getStartingBlock() const { return StartBB; } + }; + + // Return true if we're currently emitting one branch or the other of a + // conditional expression. + bool isInConditionalBranch() const { return OutermostConditional != nullptr; } + + void setBeforeOutermostConditional(mlir::Value value, Address addr) { + assert(isInConditionalBranch()); + llvm_unreachable("NYI"); + } + + // Points to the outermost active conditional control. This is used so that + // we know if a temporary should be destroyed conditionally. + ConditionalEvaluation *OutermostConditional = nullptr; + + /// Push a cleanup to be run at the end of the current full-expression. Safe + /// against the possibility that we're currently inside a + /// conditionally-evaluated expression. + template + void pushFullExprCleanup(CleanupKind kind, As... A) { + // If we're not in a conditional branch, or if none of the + // arguments requires saving, then use the unconditional cleanup. + if (!isInConditionalBranch()) + return EHStack.pushCleanup(kind, A...); + + llvm_unreachable("NYI"); + // Stash values in a tuple so we can guarantee the order of saves. + // typedef std::tuple::saved_type...> + // SavedTuple; SavedTuple Saved{saveValueInCond(A)...}; + + // typedef EHScopeStack::ConditionalCleanup CleanupType; + // EHStack.pushCleanupTuple(kind, Saved); + // initFullExprCleanup(); + } + /// CIR build helpers /// ----------------- diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h new file mode 100644 index 000000000000..8711cd3c232e --- /dev/null +++ b/clang/lib/CIR/CodeGen/EHScopeStack.h @@ -0,0 +1,426 @@ +//===-- EHScopeStack.h - Stack for cleanup CIR generation -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes should be the minimum interface required for other parts of +// CodeGen to emit cleanups. The implementation is in CIRGenCleanup.cpp and +// other implemenentation details that are not widely needed are in +// CIRGenCleanup.h. +// +// TODO(cir): this header should be shared between LLVM and CIR codegen. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIRGEN_EHSCOPESTACK_H +#define LLVM_CLANG_LIB_CIRGEN_EHSCOPESTACK_H + +#include "mlir/IR/Value.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" + +namespace cir { + +class CIRGenFunction; + +/// A branch fixup. These are required when emitting a goto to a +/// label which hasn't been emitted yet. The goto is optimistically +/// emitted as a branch to the basic block for the label, and (if it +/// occurs in a scope with non-trivial cleanups) a fixup is added to +/// the innermost cleanup. When a (normal) cleanup is popped, any +/// unresolved fixups in that scope are threaded through the cleanup. +struct BranchFixup { + // /// The block containing the terminator which needs to be modified + // /// into a switch if this fixup is resolved into the current scope. + // /// If null, LatestBranch points directly to the destination. + // llvm::BasicBlock *OptimisticBranchBlock; + + // /// The ultimate destination of the branch. + // /// + // /// This can be set to null to indicate that this fixup was + // /// successfully resolved. + // llvm::BasicBlock *Destination; + + // /// The destination index value. + // unsigned DestinationIndex; + + // /// The initial branch of the fixup. + // llvm::BranchInst *InitialBranch; +}; + +template struct InvariantValue { + typedef T type; + typedef T saved_type; + static bool needsSaving(type value) { return false; } + static saved_type save(CIRGenFunction &CGF, type value) { return value; } + static type restore(CIRGenFunction &CGF, saved_type value) { return value; } +}; + +/// A metaprogramming class for ensuring that a value will dominate an +/// arbitrary position in a function. +template struct DominatingValue : InvariantValue {}; + +template ::value || + std::is_base_of::value) && + !std::is_base_of::value && + !std::is_base_of::value> +struct DominatingPointer; +template struct DominatingPointer : InvariantValue {}; +// template struct DominatingPointer at end of file + +template struct DominatingValue : DominatingPointer {}; + +enum CleanupKind : unsigned { + /// Denotes a cleanup that should run when a scope is exited using exceptional + /// control flow (a throw statement leading to stack unwinding, ). + EHCleanup = 0x1, + + /// Denotes a cleanup that should run when a scope is exited using normal + /// control flow (falling off the end of the scope, return, goto, ...). + NormalCleanup = 0x2, + + NormalAndEHCleanup = EHCleanup | NormalCleanup, + + LifetimeMarker = 0x8, + NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup, +}; + +/// A stack of scopes which respond to exceptions, including cleanups +/// and catch blocks. +class EHScopeStack { +public: + /* Should switch to alignof(uint64_t) instead of 8, when EHCleanupScope can */ + enum { ScopeStackAlignment = 8 }; + + /// A saved depth on the scope stack. This is necessary because + /// pushing scopes onto the stack invalidates iterators. + class stable_iterator { + friend class EHScopeStack; + + /// Offset from StartOfData to EndOfBuffer. + ptrdiff_t Size; + + stable_iterator(ptrdiff_t Size) : Size(Size) {} + + public: + static stable_iterator invalid() { return stable_iterator(-1); } + stable_iterator() : Size(-1) {} + + bool isValid() const { return Size >= 0; } + + /// Returns true if this scope encloses I. + /// Returns false if I is invalid. + /// This scope must be valid. + bool encloses(stable_iterator I) const { return Size <= I.Size; } + + /// Returns true if this scope strictly encloses I: that is, + /// if it encloses I and is not I. + /// Returns false is I is invalid. + /// This scope must be valid. + bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; } + + friend bool operator==(stable_iterator A, stable_iterator B) { + return A.Size == B.Size; + } + friend bool operator!=(stable_iterator A, stable_iterator B) { + return A.Size != B.Size; + } + }; + + /// Information for lazily generating a cleanup. Subclasses must be + /// POD-like: cleanups will not be destructed, and they will be + /// allocated on the cleanup stack and freely copied and moved + /// around. + /// + /// Cleanup implementations should generally be declared in an + /// anonymous namespace. + class Cleanup { + // Anchor the construction vtable. + virtual void anchor(); + + protected: + ~Cleanup() = default; + + public: + Cleanup(const Cleanup &) = default; + Cleanup(Cleanup &&) {} + Cleanup() = default; + + virtual bool isRedundantBeforeReturn() { return false; } + + /// Generation flags. + class Flags { + enum { + F_IsForEH = 0x1, + F_IsNormalCleanupKind = 0x2, + F_IsEHCleanupKind = 0x4, + F_HasExitSwitch = 0x8, + }; + unsigned flags; + + public: + Flags() : flags(0) {} + + /// isForEH - true if the current emission is for an EH cleanup. + bool isForEHCleanup() const { return flags & F_IsForEH; } + bool isForNormalCleanup() const { return !isForEHCleanup(); } + void setIsForEHCleanup() { flags |= F_IsForEH; } + + bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; } + void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; } + + /// isEHCleanupKind - true if the cleanup was pushed as an EH + /// cleanup. + bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; } + void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; } + + bool hasExitSwitch() const { return flags & F_HasExitSwitch; } + void setHasExitSwitch() { flags |= F_HasExitSwitch; } + }; + + /// Emit the cleanup. For normal cleanups, this is run in the + /// same EH context as when the cleanup was pushed, i.e. the + /// immediately-enclosing context of the cleanup scope. For + /// EH cleanups, this is run in a terminate context. + /// + // \param flags cleanup kind. + virtual void Emit(CIRGenFunction &CGF, Flags flags) = 0; + }; + + /// ConditionalCleanup stores the saved form of its parameters, + /// then restores them and performs the cleanup. + template + class ConditionalCleanup final : public Cleanup { + typedef std::tuple::saved_type...> SavedTuple; + SavedTuple Saved; + + template + T restore(CIRGenFunction &CGF, std::index_sequence) { + // It's important that the restores are emitted in order. The braced init + // list guarantees that. + return T{DominatingValue::restore(CGF, std::get(Saved))...}; + } + + void Emit(CIRGenFunction &CGF, Flags flags) override { + restore(CGF, std::index_sequence_for()).Emit(CGF, flags); + } + + public: + ConditionalCleanup(typename DominatingValue::saved_type... A) + : Saved(A...) {} + + ConditionalCleanup(SavedTuple Tuple) : Saved(std::move(Tuple)) {} + }; + +private: + // The implementation for this class is in CGException.h and + // CGException.cpp; the definition is here because it's used as a + // member of CIRGenFunction. + + /// The start of the scope-stack buffer, i.e. the allocated pointer + /// for the buffer. All of these pointers are either simultaneously + /// null or simultaneously valid. + char *StartOfBuffer; + + /// The end of the buffer. + char *EndOfBuffer; + + /// The first valid entry in the buffer. + char *StartOfData; + + /// The innermost normal cleanup on the stack. + stable_iterator InnermostNormalCleanup; + + /// The innermost EH scope on the stack. + stable_iterator InnermostEHScope; + + /// The CGF this Stack belong to + CIRGenFunction *CGF; + + /// The current set of branch fixups. A branch fixup is a jump to + /// an as-yet unemitted label, i.e. a label for which we don't yet + /// know the EH stack depth. Whenever we pop a cleanup, we have + /// to thread all the current branch fixups through it. + /// + /// Fixups are recorded as the Use of the respective branch or + /// switch statement. The use points to the final destination. + /// When popping out of a cleanup, these uses are threaded through + /// the cleanup and adjusted to point to the new cleanup. + /// + /// Note that branches are allowed to jump into protected scopes + /// in certain situations; e.g. the following code is legal: + /// struct A { ~A(); }; // trivial ctor, non-trivial dtor + /// goto foo; + /// A a; + /// foo: + /// bar(); + llvm::SmallVector BranchFixups; + + char *allocate(size_t Size); + void deallocate(size_t Size); + + void *pushCleanup(CleanupKind K, size_t DataSize); + +public: + EHScopeStack() + : StartOfBuffer(nullptr), EndOfBuffer(nullptr), StartOfData(nullptr), + InnermostNormalCleanup(stable_end()), InnermostEHScope(stable_end()), + CGF(nullptr) {} + ~EHScopeStack() { delete[] StartOfBuffer; } + + /// Push a lazily-created cleanup on the stack. + template void pushCleanup(CleanupKind Kind, As... A) { + static_assert(alignof(T) <= ScopeStackAlignment, + "Cleanup's alignment is too large."); + void *Buffer = pushCleanup(Kind, sizeof(T)); + Cleanup *Obj = new (Buffer) T(A...); + (void) Obj; + } + + /// Push a lazily-created cleanup on the stack. Tuple version. + template + void pushCleanupTuple(CleanupKind Kind, std::tuple A) { + static_assert(alignof(T) <= ScopeStackAlignment, + "Cleanup's alignment is too large."); + void *Buffer = pushCleanup(Kind, sizeof(T)); + Cleanup *Obj = new (Buffer) T(std::move(A)); + (void) Obj; + } + + // Feel free to add more variants of the following: + + /// Push a cleanup with non-constant storage requirements on the + /// stack. The cleanup type must provide an additional static method: + /// static size_t getExtraSize(size_t); + /// The argument to this method will be the value N, which will also + /// be passed as the first argument to the constructor. + /// + /// The data stored in the extra storage must obey the same + /// restrictions as normal cleanup member data. + /// + /// The pointer returned from this method is valid until the cleanup + /// stack is modified. + template + T *pushCleanupWithExtra(CleanupKind Kind, size_t N, As... A) { + static_assert(alignof(T) <= ScopeStackAlignment, + "Cleanup's alignment is too large."); + void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N)); + return new (Buffer) T(N, A...); + } + + void pushCopyOfCleanup(CleanupKind Kind, const void *Cleanup, size_t Size) { + void *Buffer = pushCleanup(Kind, Size); + std::memcpy(Buffer, Cleanup, Size); + } + + void setCGF(CIRGenFunction *inCGF) { CGF = inCGF; } + + /// Pops a cleanup scope off the stack. This is private to CGCleanup.cpp. + void popCleanup(); + + /// Push a set of catch handlers on the stack. The catch is + /// uninitialized and will need to have the given number of handlers + /// set on it. + class EHCatchScope *pushCatch(unsigned NumHandlers); + + /// Pops a catch scope off the stack. This is private to CGException.cpp. + void popCatch(); + + /// Push an exceptions filter on the stack. + class EHFilterScope *pushFilter(unsigned NumFilters); + + /// Pops an exceptions filter off the stack. + void popFilter(); + + /// Push a terminate handler on the stack. + void pushTerminate(); + + /// Pops a terminate handler off the stack. + void popTerminate(); + + // Returns true iff the current scope is either empty or contains only + // lifetime markers, i.e. no real cleanup code + bool containsOnlyLifetimeMarkers(stable_iterator Old) const; + + /// Determines whether the exception-scopes stack is empty. + bool empty() const { return StartOfData == EndOfBuffer; } + + bool requiresLandingPad() const; + + /// Determines whether there are any normal cleanups on the stack. + bool hasNormalCleanups() const { + return InnermostNormalCleanup != stable_end(); + } + + /// Returns the innermost normal cleanup on the stack, or + /// stable_end() if there are no normal cleanups. + stable_iterator getInnermostNormalCleanup() const { + return InnermostNormalCleanup; + } + stable_iterator getInnermostActiveNormalCleanup() const; + + stable_iterator getInnermostEHScope() const { + return InnermostEHScope; + } + + + /// An unstable reference to a scope-stack depth. Invalidated by + /// pushes but not pops. + class iterator; + + /// Returns an iterator pointing to the innermost EH scope. + iterator begin() const; + + /// Returns an iterator pointing to the outermost EH scope. + iterator end() const; + + /// Create a stable reference to the top of the EH stack. The + /// returned reference is valid until that scope is popped off the + /// stack. + stable_iterator stable_begin() const { + return stable_iterator(EndOfBuffer - StartOfData); + } + + /// Create a stable reference to the bottom of the EH stack. + static stable_iterator stable_end() { + return stable_iterator(0); + } + + /// Translates an iterator into a stable_iterator. + stable_iterator stabilize(iterator it) const; + + /// Turn a stable reference to a scope depth into a unstable pointer + /// to the EH stack. + iterator find(stable_iterator save) const; + + /// Add a branch fixup to the current cleanup scope. + BranchFixup &addBranchFixup() { + assert(hasNormalCleanups() && "adding fixup in scope without cleanups"); + BranchFixups.push_back(BranchFixup()); + return BranchFixups.back(); + } + + unsigned getNumBranchFixups() const { return BranchFixups.size(); } + BranchFixup &getBranchFixup(unsigned I) { + assert(I < getNumBranchFixups()); + return BranchFixups[I]; + } + + /// Pops lazily-removed fixups from the end of the list. This + /// should only be called by procedures which have just popped a + /// cleanup or resolved one or more fixups. + void popNullFixups(); + + /// Clears the branch-fixups list. This should only be called by + /// ResolveAllBranchFixups. + void clearFixups() { BranchFixups.clear(); } +}; + +} // namespace cir + +#endif From 95446bcea95e724730469c88c23d8b9982050519 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 30 Mar 2023 14:10:27 -0700 Subject: [PATCH 0849/2301] [CIR][CIRGen] Introduce CIRGenCleanup.h based on CGCleanup.h This first naive version substitutes `llvm::` stuff for moral equivalents from `mlir::`, which we expect to change as we move forward with better understanding of how exception related scopes should work in CIR. --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenCleanup.h | 618 ++++++++++++++++++++++++ 2 files changed, 619 insertions(+) create mode 100644 clang/lib/CIR/CodeGen/CIRGenCleanup.h diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 791ddb5a92bc..30fd7fa54e9d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -16,6 +16,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenCleanup.h" #include "CIRGenFunction.h" using namespace cir; diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h new file mode 100644 index 000000000000..f72038808626 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -0,0 +1,618 @@ +//===-- CIRGenCleanup.h - Classes for cleanups CIR generation ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes support the generation of CIR for cleanups, initially based +// on LLVM IR cleanup handling, but ought to change as CIR evolves. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CGCLEANUP_H +#define LLVM_CLANG_LIB_CIR_CODEGEN_CGCLEANUP_H + +#include "EHScopeStack.h" + +#include "Address.h" +#include "mlir/IR/Value.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" + +namespace clang { +class FunctionDecl; +} + +namespace cir { +class CIRGenModule; +class CIRGenFunction; + +/// The MS C++ ABI needs a pointer to RTTI data plus some flags to describe the +/// type of a catch handler, so we use this wrapper. +struct CatchTypeInfo { + mlir::Value RTTI; + unsigned Flags; +}; + +/// A protected scope for zero-cost EH handling. +class EHScope { + mlir::Block *CachedLandingPad; + mlir::Block *CachedEHDispatchBlock; + + EHScopeStack::stable_iterator EnclosingEHScope; + + class CommonBitFields { + friend class EHScope; + unsigned Kind : 3; + }; + enum { NumCommonBits = 3 }; + +protected: + class CatchBitFields { + friend class EHCatchScope; + unsigned : NumCommonBits; + + unsigned NumHandlers : 32 - NumCommonBits; + }; + + class CleanupBitFields { + friend class EHCleanupScope; + unsigned : NumCommonBits; + + /// Whether this cleanup needs to be run along normal edges. + unsigned IsNormalCleanup : 1; + + /// Whether this cleanup needs to be run along exception edges. + unsigned IsEHCleanup : 1; + + /// Whether this cleanup is currently active. + unsigned IsActive : 1; + + /// Whether this cleanup is a lifetime marker + unsigned IsLifetimeMarker : 1; + + /// Whether the normal cleanup should test the activation flag. + unsigned TestFlagInNormalCleanup : 1; + + /// Whether the EH cleanup should test the activation flag. + unsigned TestFlagInEHCleanup : 1; + + /// The amount of extra storage needed by the Cleanup. + /// Always a multiple of the scope-stack alignment. + unsigned CleanupSize : 12; + }; + + class FilterBitFields { + friend class EHFilterScope; + unsigned : NumCommonBits; + + unsigned NumFilters : 32 - NumCommonBits; + }; + + union { + CommonBitFields CommonBits; + CatchBitFields CatchBits; + CleanupBitFields CleanupBits; + FilterBitFields FilterBits; + }; + +public: + enum Kind { Cleanup, Catch, Terminate, Filter }; + + EHScope(Kind kind, EHScopeStack::stable_iterator enclosingEHScope) + : CachedLandingPad(nullptr), CachedEHDispatchBlock(nullptr), + EnclosingEHScope(enclosingEHScope) { + CommonBits.Kind = kind; + } + + Kind getKind() const { return static_cast(CommonBits.Kind); } + + mlir::Block *getCachedLandingPad() const { return CachedLandingPad; } + + void setCachedLandingPad(mlir::Block *block) { CachedLandingPad = block; } + + mlir::Block *getCachedEHDispatchBlock() const { + return CachedEHDispatchBlock; + } + + void setCachedEHDispatchBlock(mlir::Block *block) { + CachedEHDispatchBlock = block; + } + + bool hasEHBranches() const { + if (mlir::Block *block = getCachedEHDispatchBlock()) + return !block->use_empty(); + return false; + } + + EHScopeStack::stable_iterator getEnclosingEHScope() const { + return EnclosingEHScope; + } +}; + +/// A scope which attempts to handle some, possibly all, types of +/// exceptions. +/// +/// Objective C \@finally blocks are represented using a cleanup scope +/// after the catch scope. +class EHCatchScope : public EHScope { + // In effect, we have a flexible array member + // Handler Handlers[0]; + // But that's only standard in C99, not C++, so we have to do + // annoying pointer arithmetic instead. + +public: + struct Handler { + /// A type info value, or null (C++ null, not an LLVM null pointer) + /// for a catch-all. + CatchTypeInfo Type; + + /// The catch handler for this type. + mlir::Block *Block; + + bool isCatchAll() const { return Type.RTTI == nullptr; } + }; + +private: + friend class EHScopeStack; + + Handler *getHandlers() { return reinterpret_cast(this + 1); } + + const Handler *getHandlers() const { + return reinterpret_cast(this + 1); + } + +public: + static size_t getSizeForNumHandlers(unsigned N) { + return sizeof(EHCatchScope) + N * sizeof(Handler); + } + + EHCatchScope(unsigned numHandlers, + EHScopeStack::stable_iterator enclosingEHScope) + : EHScope(Catch, enclosingEHScope) { + CatchBits.NumHandlers = numHandlers; + assert(CatchBits.NumHandlers == numHandlers && "NumHandlers overflow?"); + } + + unsigned getNumHandlers() const { return CatchBits.NumHandlers; } + + void setCatchAllHandler(unsigned I, mlir::Block *Block) { + setHandler(I, CatchTypeInfo{nullptr, 0}, Block); + } + + void setHandler(unsigned I, mlir::Value Type, mlir::Block *Block) { + assert(I < getNumHandlers()); + getHandlers()[I].Type = CatchTypeInfo{Type, 0}; + getHandlers()[I].Block = Block; + } + + void setHandler(unsigned I, CatchTypeInfo Type, mlir::Block *Block) { + assert(I < getNumHandlers()); + getHandlers()[I].Type = Type; + getHandlers()[I].Block = Block; + } + + const Handler &getHandler(unsigned I) const { + assert(I < getNumHandlers()); + return getHandlers()[I]; + } + + // Clear all handler blocks. + // FIXME: it's better to always call clearHandlerBlocks in DTOR and have a + // 'takeHandler' or some such function which removes ownership from the + // EHCatchScope object if the handlers should live longer than EHCatchScope. + void clearHandlerBlocks() { + for (unsigned I = 0, N = getNumHandlers(); I != N; ++I) + delete getHandler(I).Block; + } + + typedef const Handler *iterator; + iterator begin() const { return getHandlers(); } + iterator end() const { return getHandlers() + getNumHandlers(); } + + static bool classof(const EHScope *Scope) { + return Scope->getKind() == Catch; + } +}; + +/// A cleanup scope which generates the cleanup blocks lazily. +class alignas(8) EHCleanupScope : public EHScope { + /// The nearest normal cleanup scope enclosing this one. + EHScopeStack::stable_iterator EnclosingNormal; + + /// The nearest EH scope enclosing this one. + EHScopeStack::stable_iterator EnclosingEH; + + /// The dual entry/exit block along the normal edge. This is lazily + /// created if needed before the cleanup is popped. + mlir::Block *NormalBlock; + + /// An optional i1 variable indicating whether this cleanup has been + /// activated yet. + Address ActiveFlag; + + /// Extra information required for cleanups that have resolved + /// branches through them. This has to be allocated on the side + /// because everything on the cleanup stack has be trivially + /// movable. + struct ExtInfo { + /// The destinations of normal branch-afters and branch-throughs. + llvm::SmallPtrSet Branches; + + /// Normal branch-afters. + llvm::SmallVector, 4> BranchAfters; + }; + mutable struct ExtInfo *ExtInfo; + + /// The number of fixups required by enclosing scopes (not including + /// this one). If this is the top cleanup scope, all the fixups + /// from this index onwards belong to this scope. + unsigned FixupDepth; + + struct ExtInfo &getExtInfo() { + if (!ExtInfo) + ExtInfo = new struct ExtInfo(); + return *ExtInfo; + } + + const struct ExtInfo &getExtInfo() const { + if (!ExtInfo) + ExtInfo = new struct ExtInfo(); + return *ExtInfo; + } + +public: + /// Gets the size required for a lazy cleanup scope with the given + /// cleanup-data requirements. + static size_t getSizeForCleanupSize(size_t Size) { + return sizeof(EHCleanupScope) + Size; + } + + size_t getAllocatedSize() const { + return sizeof(EHCleanupScope) + CleanupBits.CleanupSize; + } + + EHCleanupScope(bool isNormal, bool isEH, unsigned cleanupSize, + unsigned fixupDepth, + EHScopeStack::stable_iterator enclosingNormal, + EHScopeStack::stable_iterator enclosingEH) + : EHScope(EHScope::Cleanup, enclosingEH), + EnclosingNormal(enclosingNormal), NormalBlock(nullptr), + ActiveFlag(Address::invalid()), ExtInfo(nullptr), + FixupDepth(fixupDepth) { + CleanupBits.IsNormalCleanup = isNormal; + CleanupBits.IsEHCleanup = isEH; + CleanupBits.IsActive = true; + CleanupBits.IsLifetimeMarker = false; + CleanupBits.TestFlagInNormalCleanup = false; + CleanupBits.TestFlagInEHCleanup = false; + CleanupBits.CleanupSize = cleanupSize; + + assert(CleanupBits.CleanupSize == cleanupSize && "cleanup size overflow"); + } + + void Destroy() { delete ExtInfo; } + // Objects of EHCleanupScope are not destructed. Use Destroy(). + ~EHCleanupScope() = delete; + + bool isNormalCleanup() const { return CleanupBits.IsNormalCleanup; } + mlir::Block *getNormalBlock() const { return NormalBlock; } + void setNormalBlock(mlir::Block *BB) { NormalBlock = BB; } + + bool isEHCleanup() const { return CleanupBits.IsEHCleanup; } + + bool isActive() const { return CleanupBits.IsActive; } + void setActive(bool A) { CleanupBits.IsActive = A; } + + bool isLifetimeMarker() const { return CleanupBits.IsLifetimeMarker; } + void setLifetimeMarker() { CleanupBits.IsLifetimeMarker = true; } + + bool hasActiveFlag() const { return ActiveFlag.isValid(); } + Address getActiveFlag() const { return ActiveFlag; } + void setActiveFlag(Address Var) { + assert(Var.getAlignment().isOne()); + ActiveFlag = Var; + } + + void setTestFlagInNormalCleanup() { + CleanupBits.TestFlagInNormalCleanup = true; + } + bool shouldTestFlagInNormalCleanup() const { + return CleanupBits.TestFlagInNormalCleanup; + } + + void setTestFlagInEHCleanup() { CleanupBits.TestFlagInEHCleanup = true; } + bool shouldTestFlagInEHCleanup() const { + return CleanupBits.TestFlagInEHCleanup; + } + + unsigned getFixupDepth() const { return FixupDepth; } + EHScopeStack::stable_iterator getEnclosingNormalCleanup() const { + return EnclosingNormal; + } + + size_t getCleanupSize() const { return CleanupBits.CleanupSize; } + void *getCleanupBuffer() { return this + 1; } + + EHScopeStack::Cleanup *getCleanup() { + return reinterpret_cast(getCleanupBuffer()); + } + + /// True if this cleanup scope has any branch-afters or branch-throughs. + bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); } + + /// Add a branch-after to this cleanup scope. A branch-after is a + /// branch from a point protected by this (normal) cleanup to a + /// point in the normal cleanup scope immediately containing it. + /// For example, + /// for (;;) { A a; break; } + /// contains a branch-after. + /// + /// Branch-afters each have their own destination out of the + /// cleanup, guaranteed distinct from anything else threaded through + /// it. Therefore branch-afters usually force a switch after the + /// cleanup. + void addBranchAfter(mlir::Value Index, mlir::Block *Block) { + struct ExtInfo &ExtInfo = getExtInfo(); + if (ExtInfo.Branches.insert(Block).second) + ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index)); + } + + /// Return the number of unique branch-afters on this scope. + unsigned getNumBranchAfters() const { + return ExtInfo ? ExtInfo->BranchAfters.size() : 0; + } + + mlir::Block *getBranchAfterBlock(unsigned I) const { + assert(I < getNumBranchAfters()); + return ExtInfo->BranchAfters[I].first; + } + + mlir::Value getBranchAfterIndex(unsigned I) const { + assert(I < getNumBranchAfters()); + return ExtInfo->BranchAfters[I].second; + } + + /// Add a branch-through to this cleanup scope. A branch-through is + /// a branch from a scope protected by this (normal) cleanup to an + /// enclosing scope other than the immediately-enclosing normal + /// cleanup scope. + /// + /// In the following example, the branch through B's scope is a + /// branch-through, while the branch through A's scope is a + /// branch-after: + /// for (;;) { A a; B b; break; } + /// + /// All branch-throughs have a common destination out of the + /// cleanup, one possibly shared with the fall-through. Therefore + /// branch-throughs usually don't force a switch after the cleanup. + /// + /// \return true if the branch-through was new to this scope + bool addBranchThrough(mlir::Block *Block) { + return getExtInfo().Branches.insert(Block).second; + } + + /// Determines if this cleanup scope has any branch throughs. + bool hasBranchThroughs() const { + if (!ExtInfo) + return false; + return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size()); + } + + static bool classof(const EHScope *Scope) { + return (Scope->getKind() == Cleanup); + } +}; +// NOTE: there's a bunch of different data classes tacked on after an +// EHCleanupScope. It is asserted (in EHScopeStack::pushCleanup*) that +// they don't require greater alignment than ScopeStackAlignment. So, +// EHCleanupScope ought to have alignment equal to that -- not more +// (would be misaligned by the stack allocator), and not less (would +// break the appended classes). +static_assert(alignof(EHCleanupScope) == EHScopeStack::ScopeStackAlignment, + "EHCleanupScope expected alignment"); + +/// An exceptions scope which filters exceptions thrown through it. +/// Only exceptions matching the filter types will be permitted to be +/// thrown. +/// +/// This is used to implement C++ exception specifications. +class EHFilterScope : public EHScope { + // Essentially ends in a flexible array member: + // mlir::Value FilterTypes[0]; + + mlir::Value *getFilters() { + return reinterpret_cast(this + 1); + } + + mlir::Value const *getFilters() const { + return reinterpret_cast(this + 1); + } + +public: + EHFilterScope(unsigned numFilters) + : EHScope(Filter, EHScopeStack::stable_end()) { + FilterBits.NumFilters = numFilters; + assert(FilterBits.NumFilters == numFilters && "NumFilters overflow"); + } + + static size_t getSizeForNumFilters(unsigned numFilters) { + return sizeof(EHFilterScope) + numFilters * sizeof(mlir::Value); + } + + unsigned getNumFilters() const { return FilterBits.NumFilters; } + + void setFilter(unsigned i, mlir::Value filterValue) { + assert(i < getNumFilters()); + getFilters()[i] = filterValue; + } + + mlir::Value getFilter(unsigned i) const { + assert(i < getNumFilters()); + return getFilters()[i]; + } + + static bool classof(const EHScope *scope) { + return scope->getKind() == Filter; + } +}; + +/// An exceptions scope which calls std::terminate if any exception +/// reaches it. +class EHTerminateScope : public EHScope { +public: + EHTerminateScope(EHScopeStack::stable_iterator enclosingEHScope) + : EHScope(Terminate, enclosingEHScope) {} + static size_t getSize() { return sizeof(EHTerminateScope); } + + static bool classof(const EHScope *scope) { + return scope->getKind() == Terminate; + } +}; + +/// A non-stable pointer into the scope stack. +class EHScopeStack::iterator { + char *Ptr; + + friend class EHScopeStack; + explicit iterator(char *Ptr) : Ptr(Ptr) {} + +public: + iterator() : Ptr(nullptr) {} + + EHScope *get() const { return reinterpret_cast(Ptr); } + + EHScope *operator->() const { return get(); } + EHScope &operator*() const { return *get(); } + + iterator &operator++() { + size_t Size; + switch (get()->getKind()) { + case EHScope::Catch: + Size = EHCatchScope::getSizeForNumHandlers( + static_cast(get())->getNumHandlers()); + break; + + case EHScope::Filter: + Size = EHFilterScope::getSizeForNumFilters( + static_cast(get())->getNumFilters()); + break; + + case EHScope::Cleanup: + Size = static_cast(get())->getAllocatedSize(); + break; + + case EHScope::Terminate: + Size = EHTerminateScope::getSize(); + break; + } + Ptr += llvm::alignTo(Size, ScopeStackAlignment); + return *this; + } + + iterator next() { + iterator copy = *this; + ++copy; + return copy; + } + + iterator operator++(int) { + iterator copy = *this; + operator++(); + return copy; + } + + bool encloses(iterator other) const { return Ptr >= other.Ptr; } + bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; } + + bool operator==(iterator other) const { return Ptr == other.Ptr; } + bool operator!=(iterator other) const { return Ptr != other.Ptr; } +}; + +inline EHScopeStack::iterator EHScopeStack::begin() const { + return iterator(StartOfData); +} + +inline EHScopeStack::iterator EHScopeStack::end() const { + return iterator(EndOfBuffer); +} + +inline void EHScopeStack::popCatch() { + assert(!empty() && "popping exception stack when not empty"); + + EHCatchScope &scope = llvm::cast(*begin()); + InnermostEHScope = scope.getEnclosingEHScope(); + deallocate(EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers())); +} + +inline void EHScopeStack::popTerminate() { + assert(!empty() && "popping exception stack when not empty"); + + EHTerminateScope &scope = llvm::cast(*begin()); + InnermostEHScope = scope.getEnclosingEHScope(); + deallocate(EHTerminateScope::getSize()); +} + +inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const { + assert(sp.isValid() && "finding invalid savepoint"); + assert(sp.Size <= stable_begin().Size && "finding savepoint after pop"); + return iterator(EndOfBuffer - sp.Size); +} + +inline EHScopeStack::stable_iterator +EHScopeStack::stabilize(iterator ir) const { + assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer); + return stable_iterator(EndOfBuffer - ir.Ptr); +} + +/// The exceptions personality for a function. +struct EHPersonality { + const char *PersonalityFn; + + // If this is non-null, this personality requires a non-standard + // function for rethrowing an exception after a catchall cleanup. + // This function must have prototype void(void*). + const char *CatchallRethrowFn; + + static const EHPersonality &get(CIRGenModule &CGM, + const clang::FunctionDecl *FD); + static const EHPersonality &get(CIRGenFunction &CGF); + + static const EHPersonality GNU_C; + static const EHPersonality GNU_C_SJLJ; + static const EHPersonality GNU_C_SEH; + static const EHPersonality GNU_ObjC; + static const EHPersonality GNU_ObjC_SJLJ; + static const EHPersonality GNU_ObjC_SEH; + static const EHPersonality GNUstep_ObjC; + static const EHPersonality GNU_ObjCXX; + static const EHPersonality NeXT_ObjC; + static const EHPersonality GNU_CPlusPlus; + static const EHPersonality GNU_CPlusPlus_SJLJ; + static const EHPersonality GNU_CPlusPlus_SEH; + static const EHPersonality MSVC_except_handler; + static const EHPersonality MSVC_C_specific_handler; + static const EHPersonality MSVC_CxxFrameHandler3; + static const EHPersonality GNU_Wasm_CPlusPlus; + static const EHPersonality XL_CPlusPlus; + + /// Does this personality use landingpads or the family of pad instructions + /// designed to form funclets? + bool usesFuncletPads() const { + return isMSVCPersonality() || isWasmPersonality(); + } + + bool isMSVCPersonality() const { + return this == &MSVC_except_handler || this == &MSVC_C_specific_handler || + this == &MSVC_CxxFrameHandler3; + } + + bool isWasmPersonality() const { return this == &GNU_Wasm_CPlusPlus; } + + bool isMSVCXXPersonality() const { return this == &MSVC_CxxFrameHandler3; } +}; +} // namespace cir + +#endif From aa51455d51576c456170534b5a49f3850eb13b66 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 30 Mar 2023 14:27:05 -0700 Subject: [PATCH 0850/2301] [CIR][CIRGen][EHScope] Add allocate and populate pushCleanup This is more boilerplate and make us move forward with building some interesting dtor examples, more is needed until we land the first testcase though. --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 64 +++++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenCleanup.h | 3 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- 3 files changed, 64 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 30fd7fa54e9d..dcaf39b7833a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -57,7 +57,67 @@ void CIRGenFunction::buildCXXTemporary(const CXXTemporary *Temporary, void EHScopeStack::Cleanup::anchor() {} +/// Push an entry of the given size onto this protected-scope stack. +char *EHScopeStack::allocate(size_t Size) { + Size = llvm::alignTo(Size, ScopeStackAlignment); + if (!StartOfBuffer) { + unsigned Capacity = 1024; + while (Capacity < Size) + Capacity *= 2; + StartOfBuffer = new char[Capacity]; + StartOfData = EndOfBuffer = StartOfBuffer + Capacity; + } else if (static_cast(StartOfData - StartOfBuffer) < Size) { + unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; + unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); + + unsigned NewCapacity = CurrentCapacity; + do { + NewCapacity *= 2; + } while (NewCapacity < UsedCapacity + Size); + + char *NewStartOfBuffer = new char[NewCapacity]; + char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; + char *NewStartOfData = NewEndOfBuffer - UsedCapacity; + memcpy(NewStartOfData, StartOfData, UsedCapacity); + delete[] StartOfBuffer; + StartOfBuffer = NewStartOfBuffer; + EndOfBuffer = NewEndOfBuffer; + StartOfData = NewStartOfData; + } + + assert(StartOfBuffer + Size <= StartOfData); + StartOfData -= Size; + return StartOfData; +} + void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { - llvm_unreachable("NYI"); - return nullptr; + char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); + bool IsNormalCleanup = Kind & NormalCleanup; + bool IsEHCleanup = Kind & EHCleanup; + bool IsLifetimeMarker = Kind & LifetimeMarker; + + // Per C++ [except.terminate], it is implementation-defined whether none, + // some, or all cleanups are called before std::terminate. Thus, when + // terminate is the current EH scope, we may skip adding any EH cleanup + // scopes. + if (InnermostEHScope != stable_end() && + find(InnermostEHScope)->getKind() == EHScope::Terminate) + IsEHCleanup = false; + + EHCleanupScope *Scope = new (Buffer) + EHCleanupScope(IsNormalCleanup, IsEHCleanup, Size, BranchFixups.size(), + InnermostNormalCleanup, InnermostEHScope); + if (IsNormalCleanup) + InnermostNormalCleanup = stable_begin(); + if (IsEHCleanup) + InnermostEHScope = stable_begin(); + if (IsLifetimeMarker) + llvm_unreachable("NYI"); + + // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup + if (CGF->getLangOpts().EHAsynch && IsEHCleanup && !IsLifetimeMarker && + CGF->getTarget().getCXXABI().isMicrosoft()) + llvm_unreachable("NYI"); + + return Scope->getCleanupBuffer(); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h index f72038808626..b9f7ac2eeaae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -14,9 +14,8 @@ #ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CGCLEANUP_H #define LLVM_CLANG_LIB_CIR_CODEGEN_CGCLEANUP_H -#include "EHScopeStack.h" - #include "Address.h" +#include "EHScopeStack.h" #include "mlir/IR/Value.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index e35ace5753bd..b65bae9c6b88 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -32,7 +32,7 @@ CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, CurFPFeatures(CGM.getLangOpts()), ShouldEmitLifetimeMarkers(false) { if (!suppressNewContext) CGM.getCXXABI().getMangleContext().startNewFunction(); - // TODO(CIR): EHStack.setCGF(this); + EHStack.setCGF(this); // TODO(CIR): SetFastMathFlags(CurFPFeatures); } From 0a9df4c856ae0ec491a1a5aed8d093858f338311 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 30 Mar 2023 16:49:27 -0700 Subject: [PATCH 0851/2301] [CIR][CIRGen][NFC] Add more logic for building ctor and base initialization This is a building block, still working towards introducing a testcase. - Fix overly conservative assertion in Address.h - Add logic to build ctor and base initialization - Add some helpers while here. --- clang/lib/CIR/CodeGen/Address.h | 10 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 126 +++++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 22 +++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 16 +++ 4 files changed, 163 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 57d96aad7f66..f075cda8b957 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -36,13 +36,11 @@ class Address { Address(mlir::Value pointer, mlir::Type elementType, clang::CharUnits alignment) : Pointer(pointer), ElementType(elementType), Alignment(alignment) { - auto ptrTy = pointer.getType().dyn_cast(); - assert(ptrTy && "Expected cir.ptr type"); + assert(pointer.getType().isa() && + "Expected cir.ptr type"); - assert(pointer != nullptr && "Pointer cannot be null"); - assert(elementType != nullptr && "Pointer cannot be null"); - assert(ptrTy.getPointee() == ElementType && - "Incorrect pointer element type"); + assert(pointer && "Pointer cannot be null"); + assert(elementType && "Element type cannot be null"); assert(!alignment.isZero() && "Alignment cannot be zero"); } Address(mlir::Value pointer, clang::CharUnits alignment) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 88b431ace1ce..ced741f3398f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -14,6 +14,7 @@ #include "CIRGenFunction.h" #include "UnimplementedFeatureGuarding.h" +#include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/RecordLayout.h" #include "clang/Basic/TargetBuiltins.h" @@ -444,8 +445,122 @@ class AssignmentMemcpyizer : public FieldMemcpyizer { }; } // namespace -/// buildCtorPrologue - This routine generates necessary code to initialize base -/// classes and non-static data members belonging to this constructor. +static bool isInitializerOfDynamicClass(const CXXCtorInitializer *BaseInit) { + const Type *BaseType = BaseInit->getBaseClass(); + const auto *BaseClassDecl = + cast(BaseType->castAs()->getDecl()); + return BaseClassDecl->isDynamicClass(); +} + +namespace { +/// Call the destructor for a direct base class. +struct CallBaseDtor final : EHScopeStack::Cleanup { + const CXXRecordDecl *BaseClass; + bool BaseIsVirtual; + CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) + : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } +}; + +/// A visitor which checks whether an initializer uses 'this' in a +/// way which requires the vtable to be properly set. +struct DynamicThisUseChecker + : ConstEvaluatedExprVisitor { + typedef ConstEvaluatedExprVisitor super; + + bool UsesThis; + + DynamicThisUseChecker(const ASTContext &C) : super(C), UsesThis(false) {} + + // Black-list all explicit and implicit references to 'this'. + // + // Do we need to worry about external references to 'this' derived + // from arbitrary code? If so, then anything which runs arbitrary + // external code might potentially access the vtable. + void VisitCXXThisExpr(const CXXThisExpr *E) { UsesThis = true; } +}; +} // end anonymous namespace + +static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { + DynamicThisUseChecker Checker(C); + Checker.Visit(Init); + return Checker.UsesThis; +} + +/// Gets the address of a direct base class within a complete object. +/// This should only be used for (1) non-virtual bases or (2) virtual bases +/// when the type is known to be complete (e.g. in complete destructors). +/// +/// The object pointed to by 'This' is assumed to be non-null. +Address CIRGenFunction::getAddressOfDirectBaseInCompleteClass( + mlir::Location loc, Address This, const CXXRecordDecl *Derived, + const CXXRecordDecl *Base, bool BaseIsVirtual) { + // 'this' must be a pointer (in some address space) to Derived. + assert(This.getElementType() == ConvertType(Derived)); + + // Compute the offset of the virtual base. + CharUnits Offset; + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); + if (BaseIsVirtual) + Offset = Layout.getVBaseClassOffset(Base); + else + Offset = Layout.getBaseClassOffset(Base); + + // Shift and cast down to the base type. + // TODO: for complete types, this should be possible with a GEP. + Address V = This; + if (!Offset.isZero()) { + // TODO(cir): probably create a new operation to account for + // down casting when the offset isn't zero. + llvm_unreachable("NYI"); + } + V = builder.createElementBitCast(loc, V, ConvertType(Base)); + return V; +} + +static void buildBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, + const CXXRecordDecl *ClassDecl, + CXXCtorInitializer *BaseInit) { + assert(BaseInit->isBaseInitializer() && "Must have base initializer!"); + + Address ThisPtr = CGF.LoadCXXThisAddress(); + + const Type *BaseType = BaseInit->getBaseClass(); + const auto *BaseClassDecl = + cast(BaseType->castAs()->getDecl()); + + bool isBaseVirtual = BaseInit->isBaseVirtual(); + + // If the initializer for the base (other than the constructor + // itself) accesses 'this' in any way, we need to initialize the + // vtables. + if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) + CGF.initializeVTablePointers(ClassDecl); + + // We can pretend to be a complete class because it only matters for + // virtual bases, and we only do virtual bases for complete ctors. + Address V = CGF.getAddressOfDirectBaseInCompleteClass( + loc, ThisPtr, ClassDecl, BaseClassDecl, isBaseVirtual); + AggValueSlot AggSlot = AggValueSlot::forAddr( + V, Qualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + CGF.getOverlapForBaseInit(ClassDecl, BaseClassDecl, isBaseVirtual)); + + CGF.buildAggExpr(BaseInit->getInit(), AggSlot); + + if (CGF.CGM.getLangOpts().Exceptions && + !BaseClassDecl->hasTrivialDestructor()) { + llvm_unreachable("NYI"); + CGF.EHStack.pushCleanup(EHCleanup, BaseClassDecl, + isBaseVirtual); + } +} + +/// This routine generates necessary code to initialize base classes and +/// non-static data members belonging to this constructor. void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType CtorType, FunctionArgList &Args) { @@ -492,10 +607,11 @@ void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, for (; B != E && (*B)->isBaseInitializer(); B++) { assert(!(*B)->isBaseVirtual()); - if (CGM.getCodeGenOpts().StrictVTablePointers) + if (CGM.getCodeGenOpts().StrictVTablePointers && + CGM.getCodeGenOpts().OptimizationLevel > 0 && + isInitializerOfDynamicClass(*B)) llvm_unreachable("NYI"); - - llvm_unreachable("NYI"); + buildBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); } CXXThisValue = OldThis; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 3271a7036039..fa2277fb3d54 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -17,6 +17,7 @@ #include "CIRGenValue.h" #include "UnimplementedFeatureGuarding.h" +#include "clang/AST/RecordLayout.h" #include "clang/AST/StmtVisitor.h" using namespace cir; @@ -542,6 +543,27 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, llvm_unreachable("NYI"); } +AggValueSlot::Overlap_t CIRGenFunction::getOverlapForBaseInit( + const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) { + // If the most-derived object is a field declared with [[no_unique_address]], + // the tail padding of any virtual base could be reused for other subobjects + // of that field's class. + if (IsVirtual) + return AggValueSlot::MayOverlap; + + // If the base class is laid out entirely within the nvsize of the derived + // class, its tail padding cannot yet be initialized, so we can issue + // stores at the full width of the base class. + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); + if (Layout.getBaseClassOffset(BaseRD) + + getContext().getASTRecordLayout(BaseRD).getSize() <= + Layout.getNonVirtualSize()) + return AggValueSlot::DoesNotOverlap; + + // The tail padding may contain values we need to preserve. + return AggValueSlot::MayOverlap; +} + void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { assert(E && CIRGenFunction::hasAggregateEvaluationKind(E->getType()) && "Invalid aggregate expression to emit"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index c71a66f84216..14a08dd177dd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -626,6 +626,9 @@ class CIRGenFunction { std::string getCounterAggTmpAsString(); mlir::Type ConvertType(clang::QualType T); + mlir::Type ConvertType(const TypeDecl *T) { + return ConvertType(getContext().getTypeDeclType(T)); + } /// Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(clang::QualType T); @@ -894,6 +897,12 @@ class CIRGenFunction { return AggValueSlot::DoesNotOverlap; } + /// Determine whether a base class initialization may overlap some other + /// object. + AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, + const CXXRecordDecl *BaseRD, + bool IsVirtual); + /// Get an appropriate 'undef' rvalue for the given type. /// TODO: What's the equivalent for MLIR? Currently we're only using this for /// void types so it just returns RValue::get(nullptr) but it'll need @@ -1170,6 +1179,13 @@ class CIRGenFunction { } Address LoadCXXThisAddress(); + /// Convert the given pointer to a complete class to the given direct base. + Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, + Address Value, + const CXXRecordDecl *Derived, + const CXXRecordDecl *Base, + bool BaseIsVirtual); + /// Emit code for the start of a function. /// \param Loc The location to be associated with the function. /// \param StartLoc The location of the function body. From 4be3b730165c5ac158c6b1871bca5ffa9d73acf7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 3 Apr 2023 14:06:58 -0700 Subject: [PATCH 0852/2301] [CIR][CIRGen] Build ctors codegen for CXXConstructExpr::CK_NonVirtualBase Testcase coming up when all the pieces are here. --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 4 +--- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 5 ++++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index ced741f3398f..19bb59afa3fb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -552,11 +552,9 @@ static void buildBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, CGF.buildAggExpr(BaseInit->getInit(), AggSlot); if (CGF.CGM.getLangOpts().Exceptions && - !BaseClassDecl->hasTrivialDestructor()) { - llvm_unreachable("NYI"); + !BaseClassDecl->hasTrivialDestructor()) CGF.EHStack.pushCleanup(EHCleanup, BaseClassDecl, isBaseVirtual); - } } /// This routine generates necessary code to initialize base classes and diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 550d1959418a..8a0b44116a32 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -311,9 +311,12 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, Type = Ctor_Complete; break; case CXXConstructionKind::Delegating: + llvm_unreachable("NYI"); case CXXConstructionKind::VirtualBase: + llvm_unreachable("NYI"); case CXXConstructionKind::NonVirtualBase: - assert(false && "Delegating, Virtualbae and NonVirtualBase ctorkind NYI"); + Type = Ctor_Base; + break; } buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); From d0cf02c37aba912f477a6696424865cf817cf481 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 3 Apr 2023 16:16:53 -0700 Subject: [PATCH 0853/2301] [CIR][CIRGen][NFC] Cleanup CIRGenBuilder a bit --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 86 ++++++++++--------- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 3 files changed, 48 insertions(+), 41 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index c334993199c1..d1c66bd4b88e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -100,68 +100,64 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Type helpers // ------------ // + mlir::Type getInt8Ty() { return mlir::IntegerType::get(getContext(), 8); } + mlir::Type getInt32Ty() { return mlir::IntegerType::get(getContext(), 32); } + mlir::cir::BoolType getBoolTy() { + return ::mlir::cir::BoolType::get(getContext()); + } + mlir::Type getVirtualFnPtrType([[maybe_unused]] bool isVarArg = false) { + // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special + // type so it's a bit more clear and C++ idiomatic. + auto fnTy = mlir::FunctionType::get(getContext(), {}, {getInt32Ty()}); + assert(!UnimplementedFeature::isVarArg()); + return getPointerTo(getPointerTo(fnTy)); + } - // Fetch the type representing a pointer to an 8-bit integer value. + // Fetch the type representing a pointer to integer values. mlir::cir::PointerType getInt8PtrTy(unsigned AddrSpace = 0) { - return mlir::cir::PointerType::get(getContext(), - mlir::IntegerType::get(getContext(), 8)); + return mlir::cir::PointerType::get(getContext(), getInt8Ty()); + } + mlir::cir::PointerType getInt32PtrTy(unsigned AddrSpace = 0) { + return mlir::cir::PointerType::get(getContext(), getInt32Ty()); + } + mlir::cir::PointerType getPointerTo(mlir::Type ty, + unsigned addressSpace = 0) { + assert(!UnimplementedFeature::addressSpace() && "NYI"); + return mlir::cir::PointerType::get(getContext(), ty); } - // Get a constant 32-bit value. + // + // Constant creation helpers + // ------------------------- + // mlir::cir::ConstantOp getInt32(uint32_t C, mlir::Location loc) { - auto int32Ty = mlir::IntegerType::get(getContext(), 32); + auto int32Ty = getInt32Ty(); return create(loc, int32Ty, mlir::IntegerAttr::get(int32Ty, C)); } - - // Get a bool mlir::Value getBool(bool state, mlir::Location loc) { return create( loc, getBoolTy(), mlir::BoolAttr::get(getContext(), state)); } - // Get the bool type - mlir::cir::BoolType getBoolTy() { - return ::mlir::cir::BoolType::get(getContext()); - } - - // Creates constant pointer for type ty. + // Creates constant nullptr for pointer type ty. mlir::cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { assert(ty.isa() && "expected cir.ptr"); return create( loc, ty, mlir::cir::NullAttr::get(getContext(), ty)); } - // Creates null value for type ty. + // Creates constant null value for integral type ty. mlir::cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc) { assert(ty.isa() && "NYI"); return create(loc, ty, mlir::IntegerAttr::get(ty, 0)); } - mlir::Value getBitcast(mlir::Location loc, mlir::Value src, - mlir::Type newTy) { - if (newTy == src.getType()) - return src; - return create(loc, newTy, mlir::cir::CastKind::bitcast, - src); - } - - mlir::cir::PointerType getPointerTo(mlir::Type ty, - unsigned addressSpace = 0) { - assert(!UnimplementedFeature::addressSpace() && "NYI"); - return mlir::cir::PointerType::get(getContext(), ty); - } - - /// Cast the element type of the given address to a different type, - /// preserving information like the alignment. - Address getElementBitCast(mlir::Location loc, Address Addr, mlir::Type Ty) { - assert(!UnimplementedFeature::addressSpace() && "NYI"); - auto ptrTy = getPointerTo(Ty); - return Address(getBitcast(loc, Addr.getPointer(), ptrTy), Ty, - Addr.getAlignment()); - } - + // + // Block handling helpers + // ---------------------- + // OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block) { auto lastAlloca = std::find_if(block->rbegin(), block->rend(), [](mlir::Operation &op) { @@ -187,14 +183,24 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::CastKind::floating, v); } + /// Cast the element type of the given address to a different type, + /// preserving information like the alignment. cir::Address createElementBitCast(mlir::Location loc, cir::Address addr, mlir::Type destType) { if (destType == addr.getElementType()) return addr; - auto newPtrType = mlir::cir::PointerType::get(getContext(), destType); - auto cast = getBitcast(loc, addr.getPointer(), newPtrType); - return Address(cast, addr.getElementType(), addr.getAlignment()); + auto ptrTy = getPointerTo(destType); + return Address(createBitcast(loc, addr.getPointer(), ptrTy), destType, + addr.getAlignment()); + } + + mlir::Value createBitcast(mlir::Location loc, mlir::Value src, + mlir::Type newTy) { + if (newTy == src.getType()) + return src; + return create(loc, newTy, mlir::cir::CastKind::bitcast, + src); } mlir::Value createLoad(mlir::Location loc, Address addr) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index b25bbdc8e4c2..f38e467b00e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -168,7 +168,7 @@ LValue CIRGenFunction::buildLValueForFieldInitialization( // Make sure that the address is pointing to the right type. auto memTy = getTypes().convertTypeForMem(FieldType); - V = builder.getElementBitCast(getLoc(Field->getSourceRange()), V, memTy); + V = builder.createElementBitCast(getLoc(Field->getSourceRange()), V, memTy); // TODO: Generate TBAA information that describes this access as a structure // member access and not just an access to an object of the field's type. This diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 054c7365950b..802ca9e765d1 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -63,6 +63,7 @@ struct UnimplementedFeature { static bool openCL() { return false; } static bool openMP() { return false; } static bool ehStack() { return false; } + static bool isVarArg() { return false; } }; } // namespace cir From 448650cbf67238e57eeb8f389a883a1a5e963c17 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 4 Apr 2023 14:00:08 -0700 Subject: [PATCH 0854/2301] [CIR] Add cir.vtable.address_point operation and verifySymbolUses impl --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 40 ++++++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 23 +++++++++++ 2 files changed, 63 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 5b9c793a8e18..dc0595dfc830 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1119,6 +1119,9 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { bool isDeclaration() { return !getInitialValue(); } + bool hasAvailableExternallyLinkage() { + return mlir::cir::isAvailableExternallyLinkage(getLinkage()); + } }]; let skipDefaultBuilders = 1; @@ -1168,6 +1171,43 @@ def GetGlobalOp : CIR_Op<"get_global", let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// VTableAddrPointOp +//===----------------------------------------------------------------------===// + +def VTableAddrPointOp : CIR_Op<"vtable.address_point", + [Pure, DeclareOpInterfaceMethods]> { + let summary = "Get the vtable (global variable) address point"; + let description = [{ + The `vtable.address_point` operation retrieves the address point of a + C++ virtual table (vtable). + + Example: + + ```mlir + %x = cir.vtable.address_point(@vtable, 2, 3) : !cir.ptr> + ``` + }]; + + let arguments = (ins FlatSymbolRefAttr:$name, + I32Attr:$vtableIndex, + I32Attr:$addrPointIndex); + let results = (outs Res:$addr); + + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. + let assemblyFormat = [{ + `(` $name `,` + $vtableIndex `,` + $addrPointIndex `,` + `)` + `:` `cir.ptr` type($addr) attr-dict + }]; + + // `VTableAddrPointOp` is fully verified by its traits. + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // StructElementAddr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 38bce249845c..40ab4a063b2e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1159,6 +1159,29 @@ GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return success(); } +//===----------------------------------------------------------------------===// +// VTableAddrPointOp +//===----------------------------------------------------------------------===// + +LogicalResult +VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + // Verify that the result type underlying pointer type matches the type of the + // referenced cir.global or cir.func op. + auto op = dyn_cast_or_null( + symbolTable.lookupNearestSymbolFrom(*this, getNameAttr())); + if (!isa(op)) + return emitOpError("'") + << getName() << "' does not reference a valid cir.global"; + + mlir::Type symTy = op.getSymType(); + auto resultType = getAddr().getType().dyn_cast(); + if (!resultType || symTy != resultType.getPointee()) + return emitOpError("result type pointee type '") + << resultType.getPointee() << "' does not match type " << symTy + << " of the global @" << getName(); + return success(); +} + //===----------------------------------------------------------------------===// // FuncOp //===----------------------------------------------------------------------===// From f8c1c2bcf2167911029a0b6e35f95e627fb7e5dc Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 3 Apr 2023 14:47:48 -0700 Subject: [PATCH 0855/2301] [CIR][CIRGen] Initialize and Get vtable address point in structors - Initialize vtable pointer. - Add a itanium based mechanism for retrieving the address of a vtable. - Add CIRGenVTables.h and CIRGenVTables.cpp. - Make CIRGenModule hold vtable information. - Synthetize a vtable type. - Add logic for createOrReplaceCXXRuntimeVariable. - Fix calls to setGVProperties everywhere, add unimplemented helpers. Testcase yet to come - more code needs to land first. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 1 + clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 22 +++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 95 +++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 104 +++++++++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 76 +++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 32 ++++ clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 61 ++++++ clang/lib/CIR/CodeGen/CIRGenVTables.h | 178 ++++++++++++++++++ .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 4 +- clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + .../CodeGen/UnimplementedFeatureGuarding.h | 3 + 12 files changed, 561 insertions(+), 20 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenVTables.cpp create mode 100644 clang/lib/CIR/CodeGen/CIRGenVTables.h diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index d1c66bd4b88e..e672db429e15 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -102,6 +102,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // mlir::Type getInt8Ty() { return mlir::IntegerType::get(getContext(), 8); } mlir::Type getInt32Ty() { return mlir::IntegerType::get(getContext(), 32); } + mlir::Type getInt64Ty() { return mlir::IntegerType::get(getContext(), 64); } mlir::cir::BoolType getBoolTy() { return ::mlir::cir::BoolType::get(getContext()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 07ea91575a2c..5839297265ce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -135,6 +135,28 @@ class CIRGenCXXABI { /// Emit dtor variants required by this ABI. virtual void buildCXXDestructors(const clang::CXXDestructorDecl *D) = 0; + /// Get the address of the vtable for the given record decl which should be + /// used for the vptr at the given offset in RD. + virtual mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, + CharUnits VPtrOffset) = 0; + + /// Checks if ABI requires extra virtual offset for vtable field. + virtual bool + isVirtualOffsetNeededForVTableField(CIRGenFunction &CGF, + CIRGenFunction::VPtr Vptr) = 0; + + /// Get the address point of the vtable for the given base subobject. + virtual mlir::Value + getVTableAddressPoint(BaseSubobject Base, + const CXXRecordDecl *VTableClass) = 0; + + /// Get the address point of the vtable for the given base subobject while + /// building a constructor or a destructor. + virtual mlir::Value + getVTableAddressPointInStructor(CIRGenFunction &CGF, const CXXRecordDecl *RD, + BaseSubobject Base, + const CXXRecordDecl *NearestVBase) = 0; + /// Specify how one should pass an argument of a record type. enum class RecordArgABI { /// Pass it using the normal C aggregate rules for the ABI, potentially diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 19bb59afa3fb..67f3861acb45 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -538,7 +538,7 @@ static void buildBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, // itself) accesses 'this' in any way, we need to initialize the // vtables. if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) - CGF.initializeVTablePointers(ClassDecl); + CGF.initializeVTablePointers(loc, ClassDecl); // We can pretend to be a complete class because it only matters for // virtual bases, and we only do virtual bases for complete ctors. @@ -614,7 +614,7 @@ void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, CXXThisValue = OldThis; - initializeVTablePointers(ClassDecl); + initializeVTablePointers(getLoc(CD->getBeginLoc()), ClassDecl); // And finally, initialize class members. FieldConstructionScope FCS(*this, LoadCXXThisAddress()); @@ -629,17 +629,70 @@ void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, CM.finish(); } -void CIRGenFunction::initializeVTablePointers(const CXXRecordDecl *RD) { +static Address ApplyNonVirtualAndVirtualOffset( + CIRGenFunction &CGF, Address addr, CharUnits nonVirtualOffset, + mlir::Value virtualOffset, const CXXRecordDecl *derivedClass, + const CXXRecordDecl *nearestVBase) { + llvm_unreachable("NYI"); + return Address::invalid(); +} + +void CIRGenFunction::initializeVTablePointer(mlir::Location loc, + const VPtr &Vptr) { + // Compute the address point. + auto VTableAddressPoint = CGM.getCXXABI().getVTableAddressPointInStructor( + *this, Vptr.VTableClass, Vptr.Base, Vptr.NearestVBase); + + if (!VTableAddressPoint) + return; + + // Compute where to store the address point. + mlir::Value VirtualOffset{}; + CharUnits NonVirtualOffset = CharUnits::Zero(); + + if (CGM.getCXXABI().isVirtualOffsetNeededForVTableField(*this, Vptr)) { + llvm_unreachable("NYI"); + } else { + // We can just use the base offset in the complete class. + NonVirtualOffset = Vptr.Base.getBaseOffset(); + } + + // Apply the offsets. + Address VTableField = LoadCXXThisAddress(); + if (!NonVirtualOffset.isZero() || VirtualOffset) { + VTableField = ApplyNonVirtualAndVirtualOffset( + *this, VTableField, NonVirtualOffset, VirtualOffset, Vptr.VTableClass, + Vptr.NearestVBase); + } + + // Finally, store the address point. Use the same CIR types as the field. + + // unsigned GlobalsAS = CGM.getDataLayout().getDefaultGlobalsAddressSpace(); + // unsigned ProgAS = CGM.getDataLayout().getProgramAddressSpace(); + assert(!UnimplementedFeature::addressSpace()); + auto VTablePtrTy = builder.getVirtualFnPtrType(/*isVarArg=*/true); + + // // vtable field is derived from `this` pointer, therefore they should be in + // // the same addr space. Note that this might not be LLVM address space 0. + VTableField = builder.createElementBitCast(loc, VTableField, VTablePtrTy); + VTableAddressPoint = + builder.createBitcast(loc, VTableAddressPoint, VTablePtrTy); + builder.createStore(loc, VTableAddressPoint, VTableField); + + // TODO(cir): handle anything TBAA related? + assert(!UnimplementedFeature::tbaa()); +} + +void CIRGenFunction::initializeVTablePointers(mlir::Location loc, + const CXXRecordDecl *RD) { // Ignore classes without a vtable. if (!RD->isDynamicClass()) return; // Initialize the vtable pointers for this class and all of its bases. if (CGM.getCXXABI().doStructorsInitializeVPtrs(RD)) - for (const auto &Vptr : getVTablePointers(RD)) { - llvm_unreachable("NYI"); - (void)Vptr; - } + for (const auto &Vptr : getVTablePointers(RD)) + initializeVTablePointer(loc, Vptr); if (RD->getNumVBases()) llvm_unreachable("NYI"); @@ -676,8 +729,32 @@ void CIRGenFunction::getVTablePointers(BaseSubobject Base, // Traverse bases. for (const auto &I : RD->bases()) { - (void)I; - llvm_unreachable("NYI"); + auto *BaseDecl = + cast(I.getType()->castAs()->getDecl()); + + // Ignore classes without a vtable. + if (!BaseDecl->isDynamicClass()) + continue; + + CharUnits BaseOffset; + CharUnits BaseOffsetFromNearestVBase; + bool BaseDeclIsNonVirtualPrimaryBase; + + if (I.isVirtual()) { + llvm_unreachable("NYI"); + } else { + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); + + BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); + BaseOffsetFromNearestVBase = + OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); + BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; + } + + getVTablePointers( + BaseSubobject(BaseDecl, BaseOffset), + I.isVirtual() ? BaseDecl : NearestVBase, BaseOffsetFromNearestVBase, + BaseDeclIsNonVirtualPrimaryBase, VTableClass, VBases, Vptrs); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 14a08dd177dd..fbe94402623c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1210,7 +1210,9 @@ class CIRGenFunction { return LValue::makeAddr(Addr, T, getContext(), LValueBaseInfo(Source)); } - void initializeVTablePointers(const clang::CXXRecordDecl *RD); + void initializeVTablePointers(mlir::Location loc, + const clang::CXXRecordDecl *RD); + void initializeVTablePointer(mlir::Location loc, const VPtr &Vptr); LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index c3aff116564a..ac2ffc57cccf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -21,6 +21,8 @@ #include "CIRGenFunctionInfo.h" #include "clang/AST/GlobalDecl.h" +#include "clang/AST/Mangle.h" +#include "clang/AST/VTableBuilder.h" #include "clang/Basic/Linkage.h" #include "clang/Basic/TargetInfo.h" @@ -29,11 +31,18 @@ using namespace clang; namespace { class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { + /// All the vtables which have been defined. + llvm::DenseMap VTables; + protected: bool UseARMMethodPtrABI; bool UseARMGuardVarABI; bool Use32BitVTableOffsetABI; + ItaniumMangleContext &getMangleContext() { + return cast(cir::CIRGenCXXABI::getMangleContext()); + } + public: CIRGenItaniumCXXABI(CIRGenModule &CGM, bool UseARMMethodPtrABI = false, bool UseARMGuardVarABI = false) @@ -99,9 +108,18 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { void buildCXXConstructors(const clang::CXXConstructorDecl *D) override; void buildCXXDestructors(const clang::CXXDestructorDecl *D) override; - void buildCXXStructor(clang::GlobalDecl GD) override; + mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, + CharUnits VPtrOffset) override; + mlir::Value getVTableAddressPoint(BaseSubobject Base, + const CXXRecordDecl *VTableClass) override; + bool isVirtualOffsetNeededForVTableField(CIRGenFunction &CGF, + CIRGenFunction::VPtr Vptr) override; + mlir::Value getVTableAddressPointInStructor( + CIRGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, + const CXXRecordDecl *NearestVBase) override; + /// TODO(cir): seems like could be shared between LLVM IR and CIR codegen. bool mayNeedDestruction(const VarDecl *VD) const { if (VD->needsDestruction(getContext())) @@ -415,3 +433,87 @@ void CIRGenItaniumCXXABI::buildCXXDestructors(const CXXDestructorDecl *D) { if (D->isVirtual()) CGM.buildGlobal(GlobalDecl(D, Dtor_Deleting)); } + +mlir::cir::GlobalOp +CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, + CharUnits VPtrOffset) { + assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); + auto vtable = VTables[RD]; + if (vtable) + return vtable; + + // Queue up this vtable for possible deferred emission. + CGM.addDeferredVTable(RD); + + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + getMangleContext().mangleCXXVTable(RD, Out); + + const VTableLayout &VTLayout = + CGM.getItaniumVTableContext().getVTableLayout(RD); + auto VTableType = CGM.getVTables().getVTableType(VTLayout); + + // Use pointer alignment for the vtable. Otherwise we would align them based + // on the size of the initializer which doesn't make sense as only single + // values are read. + unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout() + ? 32 + : CGM.getTarget().getPointerAlign(LangAS::Default); + + vtable = CGM.createOrReplaceCXXRuntimeVariable( + CGM.getLoc(RD->getSourceRange()), Name, VTableType, + mlir::cir::GlobalLinkageKind::ExternalLinkage, + getContext().toCharUnitsFromBits(PAlign)); + // For LLVM codegen we also do + // VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); + + // In MS C++ if you have a class with virtual functions in which you are using + // selective member import/export, then all virtual functions must be exported + // unless they are inline, otherwise a link error will result. To match this + // behavior, for such classes, we dllimport the vtable if it is defined + // externally and all the non-inline virtual methods are marked dllimport, and + // we dllexport the vtable if it is defined in this TU and all the non-inline + // virtual methods are marked dllexport. + if (CGM.getTarget().hasPS4DLLImportExport()) + llvm_unreachable("NYI"); + + CGM.setGVProperties(vtable, RD); + return vtable; +} + +mlir::Value +CIRGenItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, + const CXXRecordDecl *VTableClass) { + auto vtable = getAddrOfVTable(VTableClass, CharUnits()); + + // Find the appropriate vtable within the vtable group, and the address point + // within that vtable. + VTableLayout::AddressPointLocation AddressPoint = + CGM.getItaniumVTableContext() + .getVTableLayout(VTableClass) + .getAddressPoint(Base); + + auto &builder = CGM.getBuilder(); + auto ptrTy = builder.getPointerTo(vtable.getSymType()); + return builder.create( + CGM.getLoc(VTableClass->getSourceRange()), ptrTy, vtable.getSymName(), + AddressPoint.VTableIndex, AddressPoint.AddressPointIndex); +} + +mlir::Value CIRGenItaniumCXXABI::getVTableAddressPointInStructor( + CIRGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, + const CXXRecordDecl *NearestVBase) { + + if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && + NeedsVTTParameter(CGF.CurGD)) { + llvm_unreachable("NYI"); + } + return getVTableAddressPoint(Base, VTableClass); +} + +bool CIRGenItaniumCXXABI::isVirtualOffsetNeededForVTableField( + CIRGenFunction &CGF, CIRGenFunction::VPtr Vptr) { + if (Vptr.NearestVBase == nullptr) + return false; + return NeedsVTTParameter(CGF.CurGD); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 2bb4deccb82a..c5549bf01458 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -93,8 +93,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, : builder(context), astCtx(astctx), langOpts(astctx.getLangOpts()), codeGenOpts(CGO), theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), - target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), - genTypes{*this} { + target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, + VTables{*this} { mlir::cir::sob::SignedOverflowBehavior sob; switch (langOpts.getSignedOverflowBehavior()) { case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Defined: @@ -350,7 +350,7 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, return; setFunctionLinkage(GD, Fn); - // TODO(cir): setGVProperties + setGVProperties(Op, D); // TODO(cir): MaubeHandleStaticInExternC // TODO(cir): maybeSetTrivialComdat // TODO(cir): setLLVMFunctionFEnvAttributes @@ -533,8 +533,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, assert(0 && "not implemented"); } - // TODO(cir): - // setGVProperties(GV, D); + setGVProperties(GV, D); // If required by the ABI, treat declarations of static data members with // inline initializers as definitions. @@ -1506,8 +1505,26 @@ StringRef CIRGenModule::getMangledName(GlobalDecl GD) { return MangledDeclNames[CanonicalGD] = Result.first->first(); } +void CIRGenModule::setGlobalVisibility(mlir::Operation *GV, + const NamedDecl *D) const { + assert(!UnimplementedFeature::setGlobalVisibility()); +} + void CIRGenModule::setDSOLocal(mlir::Operation *Op) const { - // TODO: Op->setDSOLocal + assert(!UnimplementedFeature::setDSOLocal()); +} + +void CIRGenModule::setGVProperties(mlir::Operation *Op, + const NamedDecl *D) const { + assert(!UnimplementedFeature::setDLLImportDLLExport()); + setGVPropertiesAux(Op, D); +} + +void CIRGenModule::setGVPropertiesAux(mlir::Operation *Op, + const NamedDecl *D) const { + setGlobalVisibility(Op, D); + setDSOLocal(Op); + assert(!UnimplementedFeature::setPartition()); } bool CIRGenModule::lookupRepresentativeDecl(StringRef MangledName, @@ -2023,3 +2040,50 @@ void CIRGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { buildTopLevelDecl(VD); } + +mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( + mlir::Location loc, StringRef Name, mlir::Type Ty, + mlir::cir::GlobalLinkageKind Linkage, clang::CharUnits Alignment) { + mlir::cir::GlobalOp OldGV{}; + auto GV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(getModule(), Name)); + + if (GV) { + // Check if the variable has the right type. + if (GV.getSymType() == Ty) + return GV; + + // Because C++ name mangling, the only way we can end up with an already + // existing global with the same name is if it has been declared extern + // "C". + assert(GV.isDeclaration() && "Declaration has wrong type!"); + OldGV = GV; + } + + // // Create a new variable. + GV = createGlobalOp(*this, loc, Name, Ty); + + // Set up extra information and add to the module + + GV.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), Linkage)); + mlir::SymbolTable::setSymbolVisibility( + GV, CIRGenModule::getMLIRVisibilityFromCIRLinkage(Linkage)); + + if (OldGV) { + // Replace occurrences of the old variable if needed. + GV.setName(OldGV.getName()); + if (!OldGV->use_empty()) { + llvm_unreachable("NYI"); + } + OldGV->erase(); + } + + assert(!UnimplementedFeature::setComdat()); + if (supportsCOMDAT() && mlir::cir::isWeakForLinker(Linkage) && + !GV.hasAvailableExternallyLinkage()) + assert(!UnimplementedFeature::setComdat()); + + GV.setAlignmentAttr(getSize(Alignment)); + return GV; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 794c58b64562..56c19c5a47e3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -15,6 +15,7 @@ #include "CIRGenBuilder.h" #include "CIRGenTypes.h" +#include "CIRGenVTables.h" #include "CIRGenValue.h" #include "UnimplementedFeatureGuarding.h" @@ -95,6 +96,9 @@ class CIRGenModule { /// Per-module type mapping from clang AST to CIR. CIRGenTypes genTypes; + /// Holds information about C++ vtables. + CIRGenVTables VTables; + /// Per-function codegen information. Updated everytime buildCIR is called /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; @@ -189,6 +193,14 @@ class CIRGenModule { getAddrOfGlobalVar(const VarDecl *D, std::optional Ty = {}, ForDefinition_t IsForDefinition = NotForDefinition); + /// Will return a global variable of the given type. If a variable with a + /// different type already exists then a new variable with the right type + /// will be created and all uses of the old variable will be replaced with a + /// bitcast to the new variable. + mlir::cir::GlobalOp createOrReplaceCXXRuntimeVariable( + mlir::Location loc, StringRef Name, mlir::Type Ty, + mlir::cir::GlobalLinkageKind Linkage, clang::CharUnits Alignment); + llvm::DenseMap ConstantStringMap; /// Return a constant array for the given string. @@ -262,6 +274,16 @@ class CIRGenModule { /// A queue of (optional) vtables to consider emitting. std::vector DeferredVTables; + mlir::Type getVTableComponentType(); + CIRGenVTables &getVTables() { return VTables; } + + ItaniumVTableContext &getItaniumVTableContext() { + return VTables.getItaniumVTableContext(); + } + const ItaniumVTableContext &getItaniumVTableContext() const { + return VTables.getItaniumVTableContext(); + } + /// This contains all the decls which have definitions but which are deferred /// for emission and therefore should only be output if they are actually /// used. If a decl is in this, then it is known to have not been referenced @@ -297,7 +319,13 @@ class CIRGenModule { mlir::Type getCIRType(const clang::QualType &type); + /// Set the visibility for the given global. + void setGlobalVisibility(mlir::Operation *Op, const NamedDecl *D) const; void setDSOLocal(mlir::Operation *Op) const; + /// Set visibility, dllimport/dllexport and dso_local. + /// This must be called after dllimport/dllexport is set. + void setGVProperties(mlir::Operation *Op, const NamedDecl *D) const; + void setGVPropertiesAux(mlir::Operation *Op, const NamedDecl *D) const; /// Determine whether the definition must be emitted; if this returns \c /// false, the definition can be emitted lazily if it's used. @@ -350,6 +378,10 @@ class CIRGenModule { void buildGlobalVarDefinition(const clang::VarDecl *D, bool IsTentative = false); + void addDeferredVTable(const CXXRecordDecl *RD) { + DeferredVTables.push_back(RD); + } + /// Stored a deferred empty coverage mapping for an unused and thus /// uninstrumented top level declaration. void AddDeferredUnusedCoverageMapping(clang::Decl *D); diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp new file mode 100644 index 000000000000..fad1ee399416 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -0,0 +1,61 @@ +//===--- CIRGenVTables.cpp - Emit CIR Code for C++ vtables ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ code generation of virtual tables. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "clang/AST/Attr.h" +#include "clang/AST/CXXInheritance.h" +#include "clang/AST/RecordLayout.h" +#include "clang/Basic/CodeGenOptions.h" +#include "clang/CodeGen/CGFunctionInfo.h" +#include "clang/CodeGen/ConstantInitBuilder.h" +#include "llvm/Support/Format.h" +#include "llvm/Transforms/Utils/Cloning.h" +#include +#include + +using namespace clang; +using namespace cir; + +CIRGenVTables::CIRGenVTables(CIRGenModule &CGM) + : CGM(CGM), VTContext(CGM.getASTContext().getVTableContext()) {} + +static bool UseRelativeLayout(const CIRGenModule &CGM) { + return CGM.getTarget().getCXXABI().isItaniumFamily() && + CGM.getItaniumVTableContext().isRelativeLayout(); +} + +mlir::Type CIRGenModule::getVTableComponentType() { + mlir::Type ptrTy = builder.getInt8PtrTy(); + if (UseRelativeLayout(*this)) + ptrTy = builder.getInt32PtrTy(); + return ptrTy; +} + +mlir::Type CIRGenVTables::getVTableComponentType() { + return CGM.getVTableComponentType(); +} + +mlir::Type CIRGenVTables::getVTableType(const VTableLayout &layout) { + SmallVector tys; + auto ctx = CGM.getBuilder().getContext(); + auto componentType = getVTableComponentType(); + for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) + tys.push_back( + mlir::cir::ArrayType::get(ctx, componentType, layout.getVTableSize(i))); + + // FIXME(cir): should VTableLayout be encoded like we do for some + // AST nodes? + return mlir::cir::StructType::get(ctx, tys, "vtable", + /*body=*/true); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h new file mode 100644 index 000000000000..0cb527a2b4f5 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -0,0 +1,178 @@ +//===--- CIRGenVTables.h - Emit LLVM Code for C++ vtables -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ code generation of virtual tables. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H +#define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H + +#include "clang/AST/BaseSubobject.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/GlobalDecl.h" +#include "clang/AST/VTableBuilder.h" +#include "clang/Basic/ABI.h" +#include "llvm/ADT/DenseMap.h" + +namespace clang { +class CXXRecordDecl; +} + +namespace cir { +class CIRGenModule; +// class ConstantArrayBuilder; +// class ConstantStructBuilder; + +class CIRGenVTables { + CIRGenModule &CGM; + + clang::VTableContextBase *VTContext; + + /// VTableAddressPointsMapTy - Address points for a single vtable. + typedef clang::VTableLayout::AddressPointsMapTy VTableAddressPointsMapTy; + + typedef std::pair + BaseSubobjectPairTy; + typedef llvm::DenseMap SubVTTIndiciesMapTy; + + /// SubVTTIndicies - Contains indices into the various sub-VTTs. + SubVTTIndiciesMapTy SubVTTIndicies; + + typedef llvm::DenseMap + SecondaryVirtualPointerIndicesMapTy; + + /// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer + /// indices. + SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices; + + // /// Cache for the pure virtual member call function. + // llvm::Constant *PureVirtualFn = nullptr; + + // /// Cache for the deleted virtual member call function. + // llvm::Constant *DeletedVirtualFn = nullptr; + + // /// Get the address of a thunk and emit it if necessary. + // llvm::Constant *maybeEmitThunk(GlobalDecl GD, + // const ThunkInfo &ThunkAdjustments, + // bool ForVTable); + + // void addVTableComponent(ConstantArrayBuilder &builder, + // const VTableLayout &layout, unsigned + // componentIndex, llvm::Constant *rtti, unsigned + // &nextVTableThunkIndex, unsigned + // vtableAddressPoint, bool vtableHasLocalLinkage); + + // /// Add a 32-bit offset to a component relative to the vtable when using + // the + // /// relative vtables ABI. The array builder points to the start of the + // vtable. void addRelativeComponent(ConstantArrayBuilder &builder, + // llvm::Constant *component, + // unsigned vtableAddressPoint, + // bool vtableHasLocalLinkage, + // bool isCompleteDtor) const; + + // /// Create a dso_local stub that will be used for a relative reference in + // the + // /// relative vtable layout. This stub will just be a tail call to the + // original + // /// function and propagate any function attributes from the original. If + // the + // /// original function is already dso_local, the original is returned + // instead + // /// and a stub is not created. + // llvm::Function * + // getOrCreateRelativeStub(llvm::Function *func, + // llvm::GlobalValue::LinkageTypes stubLinkage, + // bool isCompleteDtor) const; + + bool useRelativeLayout() const; + + mlir::Type getVTableComponentType(); + +public: + /// Add vtable components for the given vtable layout to the given + /// global initializer. + // void createVTableInitializer(ConstantStructBuilder &builder, + // const VTableLayout &layout, llvm::Constant + // *rtti, bool vtableHasLocalLinkage); + + CIRGenVTables(CIRGenModule &CGM); + + clang::ItaniumVTableContext &getItaniumVTableContext() { + return *llvm::cast(VTContext); + } + + const clang::ItaniumVTableContext &getItaniumVTableContext() const { + return *llvm::cast(VTContext); + } + + // MicrosoftVTableContext &getMicrosoftVTableContext() { + // return *cast(VTContext); + // } + + // /// getSubVTTIndex - Return the index of the sub-VTT for the base class + // of the + // /// given record decl. + // uint64_t getSubVTTIndex(const CXXRecordDecl *RD, BaseSubobject Base); + + // /// getSecondaryVirtualPointerIndex - Return the index in the VTT where + // the + // /// virtual pointer for the given subobject is located. + // uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, + // BaseSubobject Base); + + // /// GenerateConstructionVTable - Generate a construction vtable for the + // given + // /// base subobject. + // llvm::GlobalVariable * + // GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject + // &Base, + // bool BaseIsVirtual, + // llvm::GlobalVariable::LinkageTypes Linkage, + // VTableAddressPointsMapTy &AddressPoints); + + // /// GetAddrOfVTT - Get the address of the VTT for the given record decl. + // llvm::GlobalVariable *GetAddrOfVTT(const CXXRecordDecl *RD); + + // /// EmitVTTDefinition - Emit the definition of the given vtable. + // void EmitVTTDefinition(llvm::GlobalVariable *VTT, + // llvm::GlobalVariable::LinkageTypes Linkage, + // const CXXRecordDecl *RD); + + // /// EmitThunks - Emit the associated thunks for the given global decl. + // void EmitThunks(GlobalDecl GD); + + // /// GenerateClassData - Generate all the class data required to be + // /// generated upon definition of a KeyFunction. This includes the + // /// vtable, the RTTI data structure (if RTTI is enabled) and the VTT + // /// (if the class has virtual bases). + // void GenerateClassData(const CXXRecordDecl *RD); + + // bool isVTableExternal(const CXXRecordDecl *RD); + + /// Returns the type of a vtable with the given layout. Normally a struct of + /// arrays of pointers, with one struct element for each vtable in the vtable + /// group. + mlir::Type getVTableType(const clang::VTableLayout &layout); + + // /// Generate a public facing alias for the vtable and make the vtable + // either + // /// hidden or private. The alias will have the original linkage and + // visibility + // /// of the vtable. This is used for cases under the relative vtables ABI + // /// when a vtable may not be dso_local. + // void GenerateRelativeVTableAlias(llvm::GlobalVariable *VTable, + // llvm::StringRef AliasNameRef); + + // /// Specify a global should not be instrumented with hwasan. + // void RemoveHwasanMetadata(llvm::GlobalValue *GV) const; +}; + +} // end namespace cir +#endif diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index d853d8352903..1870d955b722 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -288,9 +288,7 @@ void CIRRecordLowering::accumulateVPtrs() { mlir::Type CIRRecordLowering::getVFPtrType() { // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special // type so it's a bit more clear and C++ idiomatic. - auto intTy = mlir::IntegerType::get(builder.getContext(), 32); - auto fnTy = mlir::FunctionType::get(builder.getContext(), {}, {intTy}); - return builder.getPointerTo(builder.getPointerTo(fnTy)); + return builder.getVirtualFnPtrType(); } void CIRRecordLowering::fillOutputFields() { diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index b93f9ecf9910..f0b08fb463e1 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -30,6 +30,7 @@ add_clang_library(clangCIR CIRGenStmt.cpp CIRGenTBAA.cpp CIRGenTypes.cpp + CIRGenVTables.cpp CIRGenerator.cpp CIRPasses.cpp CIRRecordLayoutBuilder.cpp diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 802ca9e765d1..69e50c813e99 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -37,6 +37,9 @@ struct UnimplementedFeature { static bool setDSOLocal() { return false; } static bool threadLocal() { return false; } static bool setDLLStorageClass() { return false; } + static bool setDLLImportDLLExport() { return false; } + static bool setPartition() { return false; } + static bool setGlobalVisibility() { return false; } // Sanitizers static bool reportGlobalToASan() { return false; } From dc5ed3409cd3d6a84e4d15d0915d9bcc3112dac0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Apr 2023 16:52:06 -0700 Subject: [PATCH 0856/2301] [CIR][CIRGen] Handle TU level buildDeferredVTables() --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 5 ++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 6 ++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 13 ++- clang/lib/CIR/CodeGen/CIRGenModule.h | 4 + clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 87 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenVTables.h | 2 +- 6 files changed, 115 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 5839297265ce..8ac9e7d36e6f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -145,6 +145,11 @@ class CIRGenCXXABI { isVirtualOffsetNeededForVTableField(CIRGenFunction &CGF, CIRGenFunction::VPtr Vptr) = 0; + /// Determine whether it's possible to emit a vtable for \p RD, even + /// though we do not know that the vtable has been marked as used by semantic + /// analysis. + virtual bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const = 0; + /// Get the address point of the vtable for the given base subobject. virtual mlir::Value getVTableAddressPoint(BaseSubobject Base, diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index ac2ffc57cccf..065d6144dac3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -110,6 +110,7 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { void buildCXXDestructors(const clang::CXXDestructorDecl *D) override; void buildCXXStructor(clang::GlobalDecl GD) override; + bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) override; mlir::Value getVTableAddressPoint(BaseSubobject Base, @@ -517,3 +518,8 @@ bool CIRGenItaniumCXXABI::isVirtualOffsetNeededForVTableField( return false; return NeedsVTTParameter(CGF.CurGD); } + +bool CIRGenItaniumCXXABI::canSpeculativelyEmitVTable( + [[maybe_unused]] const CXXRecordDecl *RD) const { + llvm_unreachable("NYI"); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c5549bf01458..8ef4f7591bfb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1819,7 +1819,12 @@ void CIRGenModule::buildDeferred() { // static function, iterate until no changes are made. if (!DeferredVTables.empty()) { - llvm_unreachable("NYI"); + buildDeferredVTables(); + + // Emitting a vtable doesn't directly cause more vtables to + // become deferred, although it can cause functions to be + // emitted that then need those vtables. + assert(DeferredVTables.empty()); } // Emit CUDA/HIP static device variables referenced by host code only. Note we @@ -2087,3 +2092,9 @@ mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( GV.setAlignmentAttr(getSize(Alignment)); return GV; } + +bool CIRGenModule::shouldOpportunisticallyEmitVTables() { + if (codeGenOpts.OptimizationLevel != 0) + llvm_unreachable("NYI"); + return codeGenOpts.OptimizationLevel > 0; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 56c19c5a47e3..d3dbf7895706 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -201,6 +201,10 @@ class CIRGenModule { mlir::Location loc, StringRef Name, mlir::Type Ty, mlir::cir::GlobalLinkageKind Linkage, clang::CharUnits Alignment); + /// Emit any vtables which we deferred and still have a use for. + void buildDeferredVTables(); + bool shouldOpportunisticallyEmitVTables(); + llvm::DenseMap ConstantStringMap; /// Return a constant array for the given string. diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index fad1ee399416..05d9b5a5f4d5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -58,4 +58,91 @@ mlir::Type CIRGenVTables::getVTableType(const VTableLayout &layout) { // AST nodes? return mlir::cir::StructType::get(ctx, tys, "vtable", /*body=*/true); +} + +/// At this point in the translation unit, does it appear that can we +/// rely on the vtable being defined elsewhere in the program? +/// +/// The response is really only definitive when called at the end of +/// the translation unit. +/// +/// The only semantic restriction here is that the object file should +/// not contain a vtable definition when that vtable is defined +/// strongly elsewhere. Otherwise, we'd just like to avoid emitting +/// vtables when unnecessary. +/// TODO(cir): this should be merged into common AST helper for codegen. +bool CIRGenVTables::isVTableExternal(const CXXRecordDecl *RD) { + assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable."); + + // We always synthesize vtables if they are needed in the MS ABI. MSVC doesn't + // emit them even if there is an explicit template instantiation. + if (CGM.getTarget().getCXXABI().isMicrosoft()) + return false; + + // If we have an explicit instantiation declaration (and not a + // definition), the vtable is defined elsewhere. + TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind(); + if (TSK == TSK_ExplicitInstantiationDeclaration) + return true; + + // Otherwise, if the class is an instantiated template, the + // vtable must be defined here. + if (TSK == TSK_ImplicitInstantiation || + TSK == TSK_ExplicitInstantiationDefinition) + return false; + + // Otherwise, if the class doesn't have a key function (possibly + // anymore), the vtable must be defined here. + const CXXMethodDecl *keyFunction = + CGM.getASTContext().getCurrentKeyFunction(RD); + if (!keyFunction) + return false; + + // Otherwise, if we don't have a definition of the key function, the + // vtable must be defined somewhere else. + return !keyFunction->hasBody(); +} + +static bool shouldEmitAvailableExternallyVTable(const CIRGenModule &CGM, + const CXXRecordDecl *RD) { + assert(CGM.getCodeGenOpts().OptimizationLevel == 0 && "NYI"); + return CGM.getCodeGenOpts().OptimizationLevel > 0 && + CGM.getCXXABI().canSpeculativelyEmitVTable(RD); +} + +/// Given that we're currently at the end of the translation unit, and +/// we've emitted a reference to the vtable for this class, should +/// we define that vtable? +static bool shouldEmitVTableAtEndOfTranslationUnit(CIRGenModule &CGM, + const CXXRecordDecl *RD) { + // If vtable is internal then it has to be done. + if (!CGM.getVTables().isVTableExternal(RD)) + return true; + + // If it's external then maybe we will need it as available_externally. + return shouldEmitAvailableExternallyVTable(CGM, RD); +} + +/// Given that at some point we emitted a reference to one or more +/// vtables, and that we are now at the end of the translation unit, +/// decide whether we should emit them. +void CIRGenModule::buildDeferredVTables() { +#ifndef NDEBUG + // Remember the size of DeferredVTables, because we're going to assume + // that this entire operation doesn't modify it. + size_t savedSize = DeferredVTables.size(); +#endif + + for (const CXXRecordDecl *RD : DeferredVTables) + if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD)) { + llvm_unreachable("NYI"); + // VTables.GenerateClassData(RD); + } else if (shouldOpportunisticallyEmitVTables()) { + llvm_unreachable("NYI"); + // OpportunisticVTables.push_back(RD); + } + + assert(savedSize == DeferredVTables.size() && + "deferred extra vtables during vtable emission?"); + DeferredVTables.clear(); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index 0cb527a2b4f5..a578f567bed1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -154,7 +154,7 @@ class CIRGenVTables { // /// (if the class has virtual bases). // void GenerateClassData(const CXXRecordDecl *RD); - // bool isVTableExternal(const CXXRecordDecl *RD); + bool isVTableExternal(const clang::CXXRecordDecl *RD); /// Returns the type of a vtable with the given layout. Normally a struct of /// arrays of pointers, with one struct element for each vtable in the vtable From 55f5fbf2c7f5c9842693d49b02fade0b677d0b24 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Apr 2023 21:20:39 -0700 Subject: [PATCH 0857/2301] [CIR][CIRGen] Start populating by delayed vtable emission, compute linkage type - handle initializeVTablePointers. - implement defered mechanism for vtable emission. - also emit typeinfo/RTTI stuff. - add rules for adding proper linkage type via getVTableLinkage. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 1 + clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 4 + clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 13 ++- clang/lib/CIR/CodeGen/CIRGenModule.h | 4 + clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 107 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenVTables.h | 9 +- 6 files changed, 128 insertions(+), 10 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index dc0595dfc830..5ad1f5ac424c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1119,6 +1119,7 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { bool isDeclaration() { return !getInitialValue(); } + bool hasInitializer() { return !isDeclaration(); } bool hasAvailableExternallyLinkage() { return mlir::cir::isAvailableExternallyLinkage(getLinkage()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 8ac9e7d36e6f..852c65ea45ce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -150,6 +150,10 @@ class CIRGenCXXABI { /// analysis. virtual bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const = 0; + /// Emits the VTable definitions required for the given record type. + virtual void emitVTableDefinitions(CIRGenVTables &CGVT, + const CXXRecordDecl *RD) = 0; + /// Get the address point of the vtable for the given base subobject. virtual mlir::Value getVTableAddressPoint(BaseSubobject Base, diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 065d6144dac3..c35b1af5509a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -120,6 +120,8 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { mlir::Value getVTableAddressPointInStructor( CIRGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, const CXXRecordDecl *NearestVBase) override; + void emitVTableDefinitions(CIRGenVTables &CGVT, + const CXXRecordDecl *RD) override; /// TODO(cir): seems like could be shared between LLVM IR and CIR codegen. bool mayNeedDestruction(const VarDecl *VD) const { @@ -522,4 +524,13 @@ bool CIRGenItaniumCXXABI::isVirtualOffsetNeededForVTableField( bool CIRGenItaniumCXXABI::canSpeculativelyEmitVTable( [[maybe_unused]] const CXXRecordDecl *RD) const { llvm_unreachable("NYI"); -} \ No newline at end of file +} + +void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, + const CXXRecordDecl *RD) { + auto VTable = getAddrOfVTable(RD, CharUnits()); + if (VTable.hasInitializer()) + return; + + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index d3dbf7895706..c3cee04c0f0b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -205,6 +205,10 @@ class CIRGenModule { void buildDeferredVTables(); bool shouldOpportunisticallyEmitVTables(); + /// Return the appropriate linkage for the vtable, VTT, and type information + /// of the given class. + mlir::cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *RD); + llvm::DenseMap ConstantStringMap; /// Return a constant array for the given string. diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 05d9b5a5f4d5..8cf1f8e4a470 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -135,14 +135,113 @@ void CIRGenModule::buildDeferredVTables() { for (const CXXRecordDecl *RD : DeferredVTables) if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD)) { - llvm_unreachable("NYI"); - // VTables.GenerateClassData(RD); + VTables.GenerateClassData(RD); } else if (shouldOpportunisticallyEmitVTables()) { llvm_unreachable("NYI"); - // OpportunisticVTables.push_back(RD); } assert(savedSize == DeferredVTables.size() && "deferred extra vtables during vtable emission?"); DeferredVTables.clear(); -} \ No newline at end of file +} + +void CIRGenVTables::GenerateClassData(const CXXRecordDecl *RD) { + assert(!UnimplementedFeature::generateDebugInfo()); + + if (RD->getNumVBases()) + llvm_unreachable("NYI"); + + CGM.getCXXABI().emitVTableDefinitions(*this, RD); + llvm_unreachable("NYI"); +} + +/// Compute the required linkage of the vtable for the given class. +/// +/// Note that we only call this at the end of the translation unit. +mlir::cir::GlobalLinkageKind +CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { + if (!RD->isExternallyVisible()) + return mlir::cir::GlobalLinkageKind::InternalLinkage; + + // We're at the end of the translation unit, so the current key + // function is fully correct. + const CXXMethodDecl *keyFunction = astCtx.getCurrentKeyFunction(RD); + if (keyFunction && !RD->hasAttr()) { + // If this class has a key function, use that to determine the + // linkage of the vtable. + const FunctionDecl *def = nullptr; + if (keyFunction->hasBody(def)) + keyFunction = cast(def); + + switch (keyFunction->getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ExplicitSpecialization: + assert( + (def || codeGenOpts.OptimizationLevel > 0 || + codeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo) && + "Shouldn't query vtable linkage without key function, " + "optimizations, or debug info"); + if (!def && codeGenOpts.OptimizationLevel > 0) + return mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; + + if (keyFunction->isInlined()) + return !astCtx.getLangOpts().AppleKext + ? mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage + : mlir::cir::GlobalLinkageKind::InternalLinkage; + + return mlir::cir::GlobalLinkageKind::ExternalLinkage; + + case TSK_ImplicitInstantiation: + return !astCtx.getLangOpts().AppleKext + ? mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage + : mlir::cir::GlobalLinkageKind::InternalLinkage; + + case TSK_ExplicitInstantiationDefinition: + return !astCtx.getLangOpts().AppleKext + ? mlir::cir::GlobalLinkageKind::WeakODRLinkage + : mlir::cir::GlobalLinkageKind::InternalLinkage; + + case TSK_ExplicitInstantiationDeclaration: + llvm_unreachable("Should not have been asked to emit this"); + } + } + + // -fapple-kext mode does not support weak linkage, so we must use + // internal linkage. + if (astCtx.getLangOpts().AppleKext) + return mlir::cir::GlobalLinkageKind::InternalLinkage; + + auto DiscardableODRLinkage = mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage; + auto NonDiscardableODRLinkage = mlir::cir::GlobalLinkageKind::WeakODRLinkage; + if (RD->hasAttr()) { + // Cannot discard exported vtables. + DiscardableODRLinkage = NonDiscardableODRLinkage; + } else if (RD->hasAttr()) { + // Imported vtables are available externally. + DiscardableODRLinkage = + mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; + NonDiscardableODRLinkage = + mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; + } + + switch (RD->getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ExplicitSpecialization: + case TSK_ImplicitInstantiation: + return DiscardableODRLinkage; + + case TSK_ExplicitInstantiationDeclaration: + // Explicit instantiations in MSVC do not provide vtables, so we must emit + // our own. + if (getTarget().getCXXABI().isMicrosoft()) + return DiscardableODRLinkage; + return shouldEmitAvailableExternallyVTable(*this, RD) + ? mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage + : mlir::cir::GlobalLinkageKind::ExternalLinkage; + + case TSK_ExplicitInstantiationDefinition: + return NonDiscardableODRLinkage; + } + + llvm_unreachable("Invalid TemplateSpecializationKind!"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index a578f567bed1..b0ef3b28d1e5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -148,11 +148,10 @@ class CIRGenVTables { // /// EmitThunks - Emit the associated thunks for the given global decl. // void EmitThunks(GlobalDecl GD); - // /// GenerateClassData - Generate all the class data required to be - // /// generated upon definition of a KeyFunction. This includes the - // /// vtable, the RTTI data structure (if RTTI is enabled) and the VTT - // /// (if the class has virtual bases). - // void GenerateClassData(const CXXRecordDecl *RD); + /// Generate all the class data required to be generated upon definition of a + /// KeyFunction. This includes the vtable, the RTTI data structure (if RTTI + /// is enabled) and the VTT (if the class has virtual bases). + void GenerateClassData(const clang::CXXRecordDecl *RD); bool isVTableExternal(const clang::CXXRecordDecl *RD); From 0446524736ddbfae02632861592ae1cab5ef2fe4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Apr 2023 22:33:33 -0700 Subject: [PATCH 0858/2301] [CIR][CIRGen] Lots of preparation for RTTI and typeinfo construction - Initial impl for getAddrOfRTTIDescriptor. - Bits for RTTI uniqueness. - Introduce CIRGenItaniumRTTIBuilder for more vtable info. - Bunch of helpers - Handle linkage and visibility - Add initial code for BuildTypeInfo --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 1 + clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 550 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 17 + clang/lib/CIR/CodeGen/CIRGenModule.h | 16 + .../CodeGen/UnimplementedFeatureGuarding.h | 2 + 5 files changed, 584 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 852c65ea45ce..7dbd070f9235 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -153,6 +153,7 @@ class CIRGenCXXABI { /// Emits the VTable definitions required for the given record type. virtual void emitVTableDefinitions(CIRGenVTables &CGVT, const CXXRecordDecl *RD) = 0; + virtual mlir::Value getAddrOfRTTIDescriptor(QualType Ty) = 0; /// Get the address point of the vtable for the given base subobject. virtual mlir::Value diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index c35b1af5509a..0e872d34c0eb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -122,6 +122,7 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { const CXXRecordDecl *NearestVBase) override; void emitVTableDefinitions(CIRGenVTables &CGVT, const CXXRecordDecl *RD) override; + mlir::Value getAddrOfRTTIDescriptor(QualType Ty) override; /// TODO(cir): seems like could be shared between LLVM IR and CIR codegen. bool mayNeedDestruction(const VarDecl *VD) const { @@ -186,6 +187,36 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { return true; } + + /**************************** RTTI Uniqueness ******************************/ +protected: + /// Returns true if the ABI requires RTTI type_info objects to be unique + /// across a program. + virtual bool shouldRTTIBeUnique() const { return true; } + +public: + /// What sort of unique-RTTI behavior should we use? + enum RTTIUniquenessKind { + /// We are guaranteeing, or need to guarantee, that the RTTI string + /// is unique. + RUK_Unique, + + /// We are not guaranteeing uniqueness for the RTTI string, so we + /// can demote to hidden visibility but must use string comparisons. + RUK_NonUniqueHidden, + + /// We are not guaranteeing uniqueness for the RTTI string, so we + /// have to use string comparisons, but we also have to emit it with + /// non-hidden visibility. + RUK_NonUniqueVisible + }; + + /// Return the required visibility status for the given type and linkage in + /// the current ABI. + RTTIUniquenessKind + classifyRTTIUniqueness(QualType CanTy, + mlir::cir::GlobalLinkageKind Linkage) const; + friend class CIRGenItaniumRTTIBuilder; }; } // namespace @@ -467,8 +498,8 @@ CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, CGM.getLoc(RD->getSourceRange()), Name, VTableType, mlir::cir::GlobalLinkageKind::ExternalLinkage, getContext().toCharUnitsFromBits(PAlign)); - // For LLVM codegen we also do - // VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); + // LLVM codegen handles unnamedAddr + assert(!UnimplementedFeature::unnamedAddr()); // In MS C++ if you have a class with virtual functions in which you are using // selective member import/export, then all virtual functions must be exported @@ -526,6 +557,442 @@ bool CIRGenItaniumCXXABI::canSpeculativelyEmitVTable( llvm_unreachable("NYI"); } +namespace { +class CIRGenItaniumRTTIBuilder { + CIRGenModule &CGM; // Per-module state. + const CIRGenItaniumCXXABI &CXXABI; // Per-module state. + + // /// The fields of the RTTI descriptor currently being built. + // SmallVector Fields; + + // /// Returns the mangled type name of the given type. + // llvm::GlobalVariable * + // GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage); + + // /// Returns the constant for the RTTI + // /// descriptor of the given type. + // llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty); + + // /// Build the vtable pointer for the given type. + // void BuildVTablePointer(const Type *Ty); + + // /// Build an abi::__si_class_type_info, used for + // single + // /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b. + // void BuildSIClassTypeInfo(const CXXRecordDecl *RD); + + // /// Build an abi::__vmi_class_type_info, used for + // /// classes with bases that do not satisfy the abi::__si_class_type_info + // /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. + // void BuildVMIClassTypeInfo(const CXXRecordDecl *RD); + + // /// Build an abi::__pointer_type_info struct, used + // /// for pointer types. + // void BuildPointerTypeInfo(QualType PointeeTy); + + // /// Build the appropriate kind of + // /// type_info for an object type. + // void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty); + + // /// Build an + // abi::__pointer_to_member_type_info + // /// struct, used for member pointer types. + // void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty); + +public: + CIRGenItaniumRTTIBuilder(const CIRGenItaniumCXXABI &ABI, CIRGenModule &_CGM) + : CGM(_CGM), CXXABI(ABI) {} + + // Pointer type info flags. + enum { + /// PTI_Const - Type has const qualifier. + PTI_Const = 0x1, + + /// PTI_Volatile - Type has volatile qualifier. + PTI_Volatile = 0x2, + + /// PTI_Restrict - Type has restrict qualifier. + PTI_Restrict = 0x4, + + /// PTI_Incomplete - Type is incomplete. + PTI_Incomplete = 0x8, + + /// PTI_ContainingClassIncomplete - Containing class is incomplete. + /// (in pointer to member). + PTI_ContainingClassIncomplete = 0x10, + + /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS). + // PTI_TransactionSafe = 0x20, + + /// PTI_Noexcept - Pointee is noexcept function (C++1z). + PTI_Noexcept = 0x40, + }; + + // VMI type info flags. + enum { + /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance. + VMI_NonDiamondRepeat = 0x1, + + /// VMI_DiamondShaped - Class is diamond shaped. + VMI_DiamondShaped = 0x2 + }; + + // Base class type info flags. + enum { + /// BCTI_Virtual - Base class is virtual. + BCTI_Virtual = 0x1, + + /// BCTI_Public - Base class is public. + BCTI_Public = 0x2 + }; + + /// Build the RTTI type info struct for the given type, or + /// link to an existing RTTI descriptor if one already exists. + mlir::Value BuildTypeInfo(QualType Ty); + + /// Build the RTTI type info struct for the given type. + mlir::Value BuildTypeInfo(QualType Ty, mlir::cir::GlobalLinkageKind Linkage, + mlir::SymbolTable::Visibility Visibility); +}; +} // namespace + +/// Given a builtin type, returns whether the type +/// info for that type is defined in the standard library. +/// TODO(cir): this can unified with LLVM codegen +static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) { + // Itanium C++ ABI 2.9.2: + // Basic type information (e.g. for "int", "bool", etc.) will be kept in + // the run-time support library. Specifically, the run-time support + // library should contain type_info objects for the types X, X* and + // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char, + // unsigned char, signed char, short, unsigned short, int, unsigned int, + // long, unsigned long, long long, unsigned long long, float, double, + // long double, char16_t, char32_t, and the IEEE 754r decimal and + // half-precision floating point types. + // + // GCC also emits RTTI for __int128. + // FIXME: We do not emit RTTI information for decimal types here. + + // Types added here must also be added to EmitFundamentalRTTIDescriptors. + switch (Ty->getKind()) { + case BuiltinType::WasmExternRef: + llvm_unreachable("NYI"); + case BuiltinType::Void: + case BuiltinType::NullPtr: + case BuiltinType::Bool: + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + case BuiltinType::Char_U: + case BuiltinType::Char_S: + case BuiltinType::UChar: + case BuiltinType::SChar: + case BuiltinType::Short: + case BuiltinType::UShort: + case BuiltinType::Int: + case BuiltinType::UInt: + case BuiltinType::Long: + case BuiltinType::ULong: + case BuiltinType::LongLong: + case BuiltinType::ULongLong: + case BuiltinType::Half: + case BuiltinType::Float: + case BuiltinType::Double: + case BuiltinType::LongDouble: + case BuiltinType::Float16: + case BuiltinType::Float128: + case BuiltinType::Ibm128: + case BuiltinType::Char8: + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::Int128: + case BuiltinType::UInt128: + return true; + +#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + case BuiltinType::Id: +#include "clang/Basic/OpenCLImageTypes.def" +#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) case BuiltinType::Id: +#include "clang/Basic/OpenCLExtensionTypes.def" + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::OCLClkEvent: + case BuiltinType::OCLQueue: + case BuiltinType::OCLReserveID: +#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AArch64SVEACLETypes.def" +#define PPC_VECTOR_TYPE(Name, Id, Size) case BuiltinType::Id: +#include "clang/Basic/PPCTypes.def" +#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/RISCVVTypes.def" + case BuiltinType::ShortAccum: + case BuiltinType::Accum: + case BuiltinType::LongAccum: + case BuiltinType::UShortAccum: + case BuiltinType::UAccum: + case BuiltinType::ULongAccum: + case BuiltinType::ShortFract: + case BuiltinType::Fract: + case BuiltinType::LongFract: + case BuiltinType::UShortFract: + case BuiltinType::UFract: + case BuiltinType::ULongFract: + case BuiltinType::SatShortAccum: + case BuiltinType::SatAccum: + case BuiltinType::SatLongAccum: + case BuiltinType::SatUShortAccum: + case BuiltinType::SatUAccum: + case BuiltinType::SatULongAccum: + case BuiltinType::SatShortFract: + case BuiltinType::SatFract: + case BuiltinType::SatLongFract: + case BuiltinType::SatUShortFract: + case BuiltinType::SatUFract: + case BuiltinType::SatULongFract: + case BuiltinType::BFloat16: + return false; + + case BuiltinType::Dependent: +#define BUILTIN_TYPE(Id, SingletonId) +#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: +#include "clang/AST/BuiltinTypes.def" + llvm_unreachable("asking for RRTI for a placeholder type!"); + + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + llvm_unreachable("FIXME: Objective-C types are unsupported!"); + } + + llvm_unreachable("Invalid BuiltinType Kind!"); +} + +static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) { + QualType PointeeTy = PointerTy->getPointeeType(); + const BuiltinType *BuiltinTy = dyn_cast(PointeeTy); + if (!BuiltinTy) + return false; + + // Check the qualifiers. + Qualifiers Quals = PointeeTy.getQualifiers(); + Quals.removeConst(); + + if (!Quals.empty()) + return false; + + return TypeInfoIsInStandardLibrary(BuiltinTy); +} + +/// Returns whether the type +/// information for the given type exists in the standard library. +/// TODO(cir): this can unified with LLVM codegen +static bool IsStandardLibraryRTTIDescriptor(QualType Ty) { + // Type info for builtin types is defined in the standard library. + if (const BuiltinType *BuiltinTy = dyn_cast(Ty)) + return TypeInfoIsInStandardLibrary(BuiltinTy); + + // Type info for some pointer types to builtin types is defined in the + // standard library. + if (const PointerType *PointerTy = dyn_cast(Ty)) + return TypeInfoIsInStandardLibrary(PointerTy); + + return false; +} + +/// Returns whether the type information for +/// the given type exists somewhere else, and that we should not emit the type +/// information in this translation unit. Assumes that it is not a +/// standard-library type. +/// TODO(cir): this can unified with LLVM codegen +static bool ShouldUseExternalRTTIDescriptor(CIRGenModule &CGM, QualType Ty) { + ASTContext &Context = CGM.getASTContext(); + + // If RTTI is disabled, assume it might be disabled in the + // translation unit that defines any potential key function, too. + if (!Context.getLangOpts().RTTI) + return false; + + if (const RecordType *RecordTy = dyn_cast(Ty)) { + const CXXRecordDecl *RD = cast(RecordTy->getDecl()); + if (!RD->hasDefinition()) + return false; + + if (!RD->isDynamicClass()) + return false; + + // FIXME: this may need to be reconsidered if the key function + // changes. + // N.B. We must always emit the RTTI data ourselves if there exists a key + // function. + bool IsDLLImport = RD->hasAttr(); + + // Don't import the RTTI but emit it locally. + if (CGM.getTriple().isWindowsGNUEnvironment()) + return false; + + if (CGM.getVTables().isVTableExternal(RD)) { + if (CGM.getTarget().hasPS4DLLImportExport()) + return true; + + return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment() + ? false + : true; + } + if (IsDLLImport) + return true; + } + + return false; +} + +/// Returns whether the given record type is incomplete. +/// TODO(cir): this can unified with LLVM codegen +static bool IsIncompleteClassType(const RecordType *RecordTy) { + return !RecordTy->getDecl()->isCompleteDefinition(); +} + +/// Returns whether the given type contains an +/// incomplete class type. This is true if +/// +/// * The given type is an incomplete class type. +/// * The given type is a pointer type whose pointee type contains an +/// incomplete class type. +/// * The given type is a member pointer type whose class is an incomplete +/// class type. +/// * The given type is a member pointer type whoise pointee type contains an +/// incomplete class type. +/// is an indirect or direct pointer to an incomplete class type. +/// TODO(cir): this can unified with LLVM codegen +static bool ContainsIncompleteClassType(QualType Ty) { + if (const RecordType *RecordTy = dyn_cast(Ty)) { + if (IsIncompleteClassType(RecordTy)) + return true; + } + + if (const PointerType *PointerTy = dyn_cast(Ty)) + return ContainsIncompleteClassType(PointerTy->getPointeeType()); + + if (const MemberPointerType *MemberPointerTy = + dyn_cast(Ty)) { + // Check if the class type is incomplete. + const RecordType *ClassType = cast(MemberPointerTy->getClass()); + if (IsIncompleteClassType(ClassType)) + return true; + + return ContainsIncompleteClassType(MemberPointerTy->getPointeeType()); + } + + return false; +} + +/// Return the linkage that the type info and type info name constants +/// should have for the given type. +static mlir::cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &CGM, + QualType Ty) { + // Itanium C++ ABI 2.9.5p7: + // In addition, it and all of the intermediate abi::__pointer_type_info + // structs in the chain down to the abi::__class_type_info for the + // incomplete class type must be prevented from resolving to the + // corresponding type_info structs for the complete class type, possibly + // by making them local static objects. Finally, a dummy class RTTI is + // generated for the incomplete type that will not resolve to the final + // complete class RTTI (because the latter need not exist), possibly by + // making it a local static object. + if (ContainsIncompleteClassType(Ty)) + return mlir::cir::GlobalLinkageKind::InternalLinkage; + + switch (Ty->getLinkage()) { + case Linkage::None: + case Linkage::Internal: + case Linkage::UniqueExternal: + return mlir::cir::GlobalLinkageKind::InternalLinkage; + + case Linkage::VisibleNone: + case Linkage::Module: + case Linkage::External: + // RTTI is not enabled, which means that this type info struct is going + // to be used for exception handling. Give it linkonce_odr linkage. + if (!CGM.getLangOpts().RTTI) + return mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage; + + if (const RecordType *Record = dyn_cast(Ty)) { + const CXXRecordDecl *RD = cast(Record->getDecl()); + if (RD->hasAttr()) + return mlir::cir::GlobalLinkageKind::WeakODRLinkage; + if (CGM.getTriple().isWindowsItaniumEnvironment()) + if (RD->hasAttr() && + ShouldUseExternalRTTIDescriptor(CGM, Ty)) + return mlir::cir::GlobalLinkageKind::ExternalLinkage; + // MinGW always uses LinkOnceODRLinkage for type info. + if (RD->isDynamicClass() && !CGM.getASTContext() + .getTargetInfo() + .getTriple() + .isWindowsGNUEnvironment()) + return CGM.getVTableLinkage(RD); + } + + return mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage; + case Linkage::Invalid: + llvm_unreachable("Invalid linkage!"); + } + + llvm_unreachable("Invalid linkage!"); +} + +mlir::Value CIRGenItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) { + // We want to operate on the canonical type. + Ty = Ty.getCanonicalType(); + + // Check if we've already emitted an RTTI descriptor for this type. + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); + + auto OldGV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), Name)); + + if (OldGV && !OldGV.isDeclaration()) { + assert(!OldGV.hasAvailableExternallyLinkage() && + "available_externally typeinfos not yet implemented"); + llvm_unreachable("NYI"); + } + + // Check if there is already an external RTTI descriptor for this type. + if (IsStandardLibraryRTTIDescriptor(Ty) || + ShouldUseExternalRTTIDescriptor(CGM, Ty)) + llvm_unreachable("NYI"); + + // Emit the standard library with external linkage. + auto Linkage = getTypeInfoLinkage(CGM, Ty); + + // Give the type_info object and name the formal visibility of the + // type itself. + assert(!UnimplementedFeature::hiddenVisibility()); + assert(!UnimplementedFeature::protectedVisibility()); + mlir::SymbolTable::Visibility symVisibility; + if (mlir::cir::isLocalLinkage(Linkage)) + // If the linkage is local, only default visibility makes sense. + symVisibility = mlir::SymbolTable::Visibility::Public; + else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) == + CIRGenItaniumCXXABI::RUK_NonUniqueHidden) + llvm_unreachable("NYI"); + else + symVisibility = CIRGenModule::getCIRVisibility(Ty->getVisibility()); + + assert(!UnimplementedFeature::setDLLStorageClass()); + return BuildTypeInfo(Ty, Linkage, symVisibility); +} + +mlir::Value CIRGenItaniumRTTIBuilder::BuildTypeInfo( + QualType Ty, mlir::cir::GlobalLinkageKind Linkage, + mlir::SymbolTable::Visibility Visibility) { + assert(!UnimplementedFeature::setDLLStorageClass()); + llvm_unreachable("NYI"); +} + +mlir::Value CIRGenItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) { + return CIRGenItaniumRTTIBuilder(*this, CGM).BuildTypeInfo(Ty); +} + void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, const CXXRecordDecl *RD) { auto VTable = getAddrOfVTable(RD, CharUnits()); @@ -533,4 +1000,83 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, return; llvm_unreachable("NYI"); + + // // Create and set the initializer. + // ConstantInitBuilder builder(CGM); + // auto components = builder.beginStruct(); + // CGVT.createVTableInitializer(components, VTLayout, RTTI, + // mlir::cir::GlobalLinkageKind::isLocalLinkage(Linkage)); + // components.finishAndSetAsInitializer(VTable); + + // // Set the correct linkage. + // VTable->setLinkage(Linkage); + + // if (CGM.supportsCOMDAT() && VTable->isWeakForLinker()) + // VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName())); + + // // Set the right visibility. + // CGM.setGVProperties(VTable, RD); + + // // If this is the magic class __cxxabiv1::__fundamental_type_info, + // // we will emit the typeinfo for the fundamental types. This is the + // // same behaviour as GCC. + // const DeclContext *DC = RD->getDeclContext(); + // if (RD->getIdentifier() && + // RD->getIdentifier()->isStr("__fundamental_type_info") && + // isa(DC) && cast(DC)->getIdentifier() && + // cast(DC)->getIdentifier()->isStr("__cxxabiv1") && + // DC->getParent()->isTranslationUnit()) + // EmitFundamentalRTTIDescriptors(RD); + + // // Always emit type metadata on non-available_externally definitions, and + // on + // // available_externally definitions if we are performing whole program + // // devirtualization. For WPD we need the type metadata on all vtable + // // definitions to ensure we associate derived classes with base classes + // // defined in headers but with a strong definition only in a shared + // library. if (!VTable->isDeclarationForLinker() || + // CGM.getCodeGenOpts().WholeProgramVTables) { + // CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout); + // // For available_externally definitions, add the vtable to + // // @llvm.compiler.used so that it isn't deleted before whole program + // // analysis. + // if (VTable->isDeclarationForLinker()) { + // assert(CGM.getCodeGenOpts().WholeProgramVTables); + // CGM.addCompilerUsedGlobal(VTable); + // } + // } + + // if (VTContext.isRelativeLayout()) { + // CGVT.RemoveHwasanMetadata(VTable); + // if (!VTable->isDSOLocal()) + // CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName()); + // } +} + +/// What sort of uniqueness rules should we use for the RTTI for the +/// given type? +CIRGenItaniumCXXABI::RTTIUniquenessKind +CIRGenItaniumCXXABI::classifyRTTIUniqueness( + QualType CanTy, mlir::cir::GlobalLinkageKind Linkage) const { + if (shouldRTTIBeUnique()) + return RUK_Unique; + + // It's only necessary for linkonce_odr or weak_odr linkage. + if (Linkage != mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage && + Linkage != mlir::cir::GlobalLinkageKind::WeakODRLinkage) + return RUK_Unique; + + // It's only necessary with default visibility. + if (CanTy->getVisibility() != DefaultVisibility) + return RUK_Unique; + + // If we're not required to publish this symbol, hide it. + if (Linkage == mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage) + return RUK_NonUniqueHidden; + + // If we're required to publish this symbol, as we might be under an + // explicit instantiation, leave it with default visibility but + // enable string-comparisons. + assert(Linkage == mlir::cir::GlobalLinkageKind::WeakODRLinkage); + return RUK_NonUniqueVisible; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 8ef4f7591bfb..e4b678ee3737 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2098,3 +2098,20 @@ bool CIRGenModule::shouldOpportunisticallyEmitVTables() { llvm_unreachable("NYI"); return codeGenOpts.OptimizationLevel > 0; } + +mlir::Value CIRGenModule::getAddrOfRTTIDescriptor(QualType Ty, bool ForEH) { + // Return a bogus pointer if RTTI is disabled, unless it's for EH. + // FIXME: should we even be calling this method if RTTI is disabled + // and it's not for EH? + if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice || + (getLangOpts().OpenMP && getLangOpts().OpenMP && getTriple().isNVPTX())) { + llvm_unreachable("NYI"); + } + + if (ForEH && Ty->isObjCObjectPointerType() && + getLangOpts().ObjCRuntime.isGNUFamily()) { + llvm_unreachable("NYI"); + } + + return getCXXABI().getAddrOfRTTIDescriptor(Ty); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index c3cee04c0f0b..4dc6d2ea3092 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -209,6 +209,22 @@ class CIRGenModule { /// of the given class. mlir::cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *RD); + /// Get the address of the RTTI descriptor for the given type. + mlir::Value getAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false); + + /// TODO(cir): add CIR visibility bits. + static mlir::SymbolTable::Visibility getCIRVisibility(Visibility V) { + switch (V) { + case DefaultVisibility: + return mlir::SymbolTable::Visibility::Public; + case HiddenVisibility: + return mlir::SymbolTable::Visibility::Private; + case ProtectedVisibility: + llvm_unreachable("NYI"); + } + llvm_unreachable("unknown visibility!"); + } + llvm::DenseMap ConstantStringMap; /// Return a constant array for the given string. diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 69e50c813e99..d4d6e6c51d80 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -40,6 +40,8 @@ struct UnimplementedFeature { static bool setDLLImportDLLExport() { return false; } static bool setPartition() { return false; } static bool setGlobalVisibility() { return false; } + static bool hiddenVisibility() { return false; } + static bool protectedVisibility() { return false; } // Sanitizers static bool reportGlobalToASan() { return false; } From 7930c02efd80c0502864b75d3d5a1bd345915b77 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 10 Apr 2023 14:12:49 -0700 Subject: [PATCH 0859/2301] [CIR][NFC] Cleanup SignedOverflowBehaviorAttr --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 4e756f609620..3a70d5c9cc82 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -92,13 +92,17 @@ def ConstArrayAttr : CIR_Attr<"ConstArray", "const_array", [TypedAttrInterface]> let genVerifyDecl = 1; } +//===----------------------------------------------------------------------===// +// SignedOverflowBehaviorAttr +//===----------------------------------------------------------------------===// + def SignedOverflowBehaviorAttr : AttrDef { - let mnemonic = "signed_overflow_behavior"; - let parameters = (ins - "sob::SignedOverflowBehavior":$behavior - ); - let hasCustomAssemblyFormat = 1; - } + let mnemonic = "signed_overflow_behavior"; + let parameters = (ins + "sob::SignedOverflowBehavior":$behavior + ); + let hasCustomAssemblyFormat = 1; +} //===----------------------------------------------------------------------===// // AST Wrappers From 88be3b19eb6032dfcd684f976a4d8cbc9cb5b583 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 10 Apr 2023 16:19:54 -0700 Subject: [PATCH 0860/2301] [CIR] Add #cir.global_view attribute --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 60 +++++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 ++- clang/test/CIR/IR/global.cir | 3 + 3 files changed, 68 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 3a70d5c9cc82..05a248fa85dc 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -104,6 +104,66 @@ def SignedOverflowBehaviorAttr : AttrDef let hasCustomAssemblyFormat = 1; } +//===----------------------------------------------------------------------===// +// GlobalViewAttr +//===----------------------------------------------------------------------===// + +def GlobalViewAttr : CIR_Attr<"GlobalView", "global_view", [TypedAttrInterface]> { + let summary = "Provides constant access to a global address"; + let description = [{ + Get constant address of global `symbol` and optionally apply offsets to + access existing subelements. It provides a way to access globals from other + global and always produces a pointer. + + The type of the input symbol can be different from `#cir.global_view` + output type, since a given view of the global might require a static + cast for initializing other globals. + + A list of indices can be optionally passed and each element indexes + the underlying type one level deep. For `symbol` types like `!cir.array` + and `!cir.struct`, it leads to the constant address of sub-elements, while + for `!cir.ptr` base an offset can be applied. + + Example: + + ``` + cir.global external @s = @".str2": !cir.ptr + cir.global external @x = #cir.global_view<@s> : !cir.ptr + + cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> + cir.global external @elt_ptr = #cir.global_view<@rgb, [1]> : !cir.ptr + cir.global external @table_of_ptrs = #cir.const_array<[#cir.global_view<@rgb, [1]> : !cir.ptr] : !cir.array x 1>> + ``` + }]; + + // `$type` is the `self` type of the attribute (i.e. the type of the + // Attribute itself). + // + // `symbol` is the actual attribute StringAttr for the global symbol this + // refers to. + let parameters = (ins AttributeSelfTypeParameter<"">:$type, + "FlatSymbolRefAttr":$symbol, + OptionalParameter<"ArrayAttr">:$indices); + + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, + "FlatSymbolRefAttr":$symbol, + CArg<"ArrayAttr", "{}">:$indices), [{ + return $_get(type.getContext(), type, symbol, indices); + }]> + ]; + + // Enable verifier. + // let genVerifyDecl = 1; + + let assemblyFormat = [{ + `<` + $symbol + (`,` $indices^)? + `>` + }]; +} + //===----------------------------------------------------------------------===// // AST Wrappers //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 40ab4a063b2e..d56b41342b89 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -179,9 +179,12 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return op->emitOpError("symbolref expects pointer type"); } + if (attrType.isa()) + return success(); + assert(attrType.isa() && "What else could we be looking at here?"); - return op->emitOpError("cannot have value of type ") - << attrType.cast().getType(); + return op->emitOpError("global with type ") + << attrType.cast().getType() << " not supported"; } LogicalResult ConstantOp::verify() { diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 63b90b8ad27a..698af7002b47 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -12,6 +12,9 @@ module { %0 = cir.get_global @a : cir.ptr cir.return } + cir.global external @table = #cir.global_view<@s> : !cir.ptr + cir.global external @elt_ptr = #cir.global_view<@rgb, [1]> : !cir.ptr + cir.global external @table_of_ptrs = #cir.const_array<[#cir.global_view<@rgb, [1]> : !cir.ptr] : !cir.array x 1>> } // CHECK: cir.global external @a = 3 : i32 From 4939e671d71a347ebb9a17baa1c286373de59d5c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 11 Apr 2023 00:03:56 -0700 Subject: [PATCH 0861/2301] [CIR] Add #cir.typeinfo to represent RTTI stuff --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 63 +++++++++++++++---- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 38 ++++++++++- clang/test/CIR/IR/global.cir | 13 ++++ clang/test/CIR/IR/invalid.cir | 15 ++++- 4 files changed, 115 insertions(+), 14 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 05a248fa85dc..54ff9e4510d3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -119,10 +119,11 @@ def GlobalViewAttr : CIR_Attr<"GlobalView", "global_view", [TypedAttrInterface]> output type, since a given view of the global might require a static cast for initializing other globals. - A list of indices can be optionally passed and each element indexes - the underlying type one level deep. For `symbol` types like `!cir.array` + A list of indices can be optionally passed and each element subsequently + indexes underlying types. For `symbol` types like `!cir.array` and `!cir.struct`, it leads to the constant address of sub-elements, while - for `!cir.ptr` base an offset can be applied. + for `!cir.ptr`, an offset is applied. The first index is relative to the + original symbol type, not the produced one. Example: @@ -136,11 +137,6 @@ def GlobalViewAttr : CIR_Attr<"GlobalView", "global_view", [TypedAttrInterface]> ``` }]; - // `$type` is the `self` type of the attribute (i.e. the type of the - // Attribute itself). - // - // `symbol` is the actual attribute StringAttr for the global symbol this - // refers to. let parameters = (ins AttributeSelfTypeParameter<"">:$type, "FlatSymbolRefAttr":$symbol, OptionalParameter<"ArrayAttr">:$indices); @@ -153,17 +149,60 @@ def GlobalViewAttr : CIR_Attr<"GlobalView", "global_view", [TypedAttrInterface]> }]> ]; - // Enable verifier. // let genVerifyDecl = 1; - let assemblyFormat = [{ `<` - $symbol - (`,` $indices^)? + $symbol + (`,` $indices^)? `>` }]; } +//===----------------------------------------------------------------------===// +// TypeInfoAttr +//===----------------------------------------------------------------------===// + +def TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> { + let summary = "Represents a typeinfo used for RTTI"; + let description = [{ + The typeinfo data for a given class is stored into an ArrayAttr. The + layout is determined by the C++ ABI used (clang only implements + itanium on CIRGen). + + The verifier enforces that the output type is always a `!cir.struct`, + and that the ArrayAttr element types match the equivalent member type + for the resulting struct. + + Example: + + ``` + cir.global "private" constant external @type_info_A : !cir.ptr + cir.global external @type_info_B = #cir.typeinfo< + [#cir.global_view<@type_info_A> : !cir.ptr] : !cir.struct<"", !cir.ptr> + > + ``` + }]; + + // FIXME: move from ArrayAttr to a ConstStructAttr once it lands? + let parameters = (ins AttributeSelfTypeParameter<"">:$type, + "ArrayAttr":$info); + + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, + "ArrayAttr":$info), [{ + return $_get(type.getContext(), type, info); + }]> + ]; + + // Checks struct element types should match the array for every equivalent + // element type. + let genVerifyDecl = 1; + + let assemblyFormat = [{ + `<` $info `:` $type `>` + }]; +} + //===----------------------------------------------------------------------===// // AST Wrappers //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d56b41342b89..a05a813716d8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -181,6 +181,8 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, if (attrType.isa()) return success(); + if (attrType.isa()) + return success(); assert(attrType.isa() && "What else could we be looking at here?"); return op->emitOpError("global with type ") @@ -1103,7 +1105,10 @@ LogicalResult GlobalOp::verify() { break; case GlobalLinkageKind::ExternalLinkage: case GlobalLinkageKind::ExternalWeakLinkage: - if (isPrivate()) + // FIXME: mlir's concept of visibility gets tricky with LLVM ones, + // for instance, symbol declarations cannot be "public", so we + // have to mark them "private" to workaround the symbol verifier. + if (isPrivate() && !isDeclaration()) return emitError() << "private visibility not allowed with '" << stringifyGlobalLinkageKind(getLinkage()) << "' linkage"; @@ -1814,6 +1819,37 @@ LogicalResult ASTRecordDeclAttr::verify( return success(); } +LogicalResult TypeInfoAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::Type type, ArrayAttr info) { + auto sTy = type.dyn_cast_or_null(); + if (!sTy) { + emitError() << "expected !cir.struct type"; + return failure(); + } + + if (sTy.getMembers().size() != info.size()) { + emitError() << "number of typeinfo elements must match result type"; + return failure(); + } + + unsigned attrIdx = 0; + for (auto &member : sTy.getMembers()) { + auto gview = info[attrIdx].dyn_cast_or_null(); + if (!gview) { + emitError() << "expected GlobalViewAttr attribute"; + return failure(); + } + if (member != gview.getType()) { + emitError() << "typeinfo element must match result element type"; + return failure(); + } + attrIdx++; + } + + return success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 698af7002b47..d6a8933f842e 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -15,6 +15,19 @@ module { cir.global external @table = #cir.global_view<@s> : !cir.ptr cir.global external @elt_ptr = #cir.global_view<@rgb, [1]> : !cir.ptr cir.global external @table_of_ptrs = #cir.const_array<[#cir.global_view<@rgb, [1]> : !cir.ptr] : !cir.array x 1>> + + // Note MLIR requires "private" for global declarations, should get + // rid of this somehow in favor of clarity? + cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr + cir.global "private" constant external @type_info_A : !cir.ptr + cir.global constant external @type_info_name_B = #cir.const_array<"1B\00" : !cir.array> + + cir.global external @type_info_B = #cir.typeinfo< + [#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, + #cir.global_view<@type_info_name_B> : !cir.ptr, + #cir.global_view<@type_info_A> : !cir.ptr] + : !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> + > } // CHECK: cir.global external @a = 3 : i32 diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index d945ffab0b6f..9497f1107486 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -262,4 +262,17 @@ cir.func coroutine @good_yield() { },) } cir.return -} \ No newline at end of file +} + +// ----- + +module { + // Note MLIR requires "private" for global declarations, should get + // rid of this somehow in favor of clarity? + cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr + + cir.global external @type_info_B = #cir.typeinfo< // expected-error {{typeinfo element must match result element type}} + [#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr] + : !cir.struct<"", !cir.ptr> + > +} // expected-error {{'cir.global' expected constant attribute to match type}} \ No newline at end of file From 5ad797452ae04539846099b419fd9fabcb9734dd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 11 Apr 2023 14:57:03 -0700 Subject: [PATCH 0862/2301] [CIR][CIRGen] Build and emit RTTI typeinfo typeinfo table can now be built, next step is to wrap up vtable construction, which ultimately will allow testcases to land. - Add GetAddrOfTypeName. - Finish getAddrOfRTTIDescriptor and add GetAddrOfExternalRTTIDescriptor - Add a getOrInsertGlobal for RTTI purposes. - Finish BuildTypeInfo. - Finish BuildVTablePointer. - Add a helpers for ConstArrayAttr and String creation. - Start a constant initializer builder for structs (to support vtables) --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 1 + clang/lib/CIR/CodeGen/CIRGenBuilder.h | 34 +- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 3 +- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 9 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 502 ++++++++++++++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 73 ++- clang/lib/CIR/CodeGen/CIRGenModule.h | 24 +- clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp | 312 ++++++++++ clang/lib/CIR/CodeGen/ConstantInitBuilder.h | 578 ++++++++++++++++++ clang/lib/CIR/CodeGen/ConstantInitFuture.h | 102 ++++ 11 files changed, 1582 insertions(+), 57 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp create mode 100644 clang/lib/CIR/CodeGen/ConstantInitBuilder.h create mode 100644 clang/lib/CIR/CodeGen/ConstantInitFuture.h diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 4e2f5650efb8..616fcdab07af 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -98,6 +98,7 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { let extraClassDeclaration = [{ public: void dropAst(); + size_t getNumElements() const { return getMembers().size(); } }]; let extraClassDefinition = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index e672db429e15..d404150b5822 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -85,15 +85,32 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Attribute helpers // ----------------- // - mlir::TypedAttr getZeroAttr(mlir::Type t) { return mlir::cir::ZeroAttr::get(getContext(), t); } - mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { - // TODO: dispatch creation for primitive types. - assert(ty.isa() && "NYI for other types"); - return create(loc, ty, getZeroAttr(ty)); + mlir::cir::ConstArrayAttr getString(llvm::StringRef str, mlir::Type eltTy, + unsigned size = 0) { + unsigned finalSize = size ? size : str.size(); + auto arrayTy = mlir::cir::ArrayType::get(getContext(), eltTy, finalSize); + return getConstArray(mlir::StringAttr::get(str, arrayTy), arrayTy); + } + + mlir::cir::ConstArrayAttr getConstArray(mlir::Attribute attrs, + mlir::cir::ArrayType arrayTy) { + return mlir::cir::ConstArrayAttr::get(arrayTy, attrs); + } + + mlir::cir::TypeInfoAttr getTypeInfo(mlir::ArrayAttr fieldsAttr) { + llvm::SmallVector members; + for (auto &f : fieldsAttr) { + auto gva = f.dyn_cast(); + assert(gva && "expected #cir.global_view attribute for element"); + members.push_back(gva.getType()); + } + auto structType = mlir::cir::StructType::get(getContext(), members, "", + /*body=*/true); + return mlir::cir::TypeInfoAttr::get(structType, fieldsAttr); } // @@ -155,6 +172,12 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::IntegerAttr::get(ty, 0)); } + mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { + // TODO: dispatch creation for primitive types. + assert(ty.isa() && "NYI for other types"); + return create(loc, ty, getZeroAttr(ty)); + } + // // Block handling helpers // ---------------------- @@ -175,7 +198,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Operation creation helpers // -------------------------- // - mlir::Value createFPExt(mlir::Value v, mlir::Type destType) { if (getIsFPConstrained()) llvm_unreachable("constrainedfp NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 7dbd070f9235..47315015f6be 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -153,7 +153,8 @@ class CIRGenCXXABI { /// Emits the VTable definitions required for the given record type. virtual void emitVTableDefinitions(CIRGenVTables &CGVT, const CXXRecordDecl *RD) = 0; - virtual mlir::Value getAddrOfRTTIDescriptor(QualType Ty) = 0; + virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, + QualType Ty) = 0; /// Get the address point of the vtable for the given base subobject. virtual mlir::Value diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 25b078b6ce83..69b933473693 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1199,6 +1199,7 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, mlir::Type CommonElementType, unsigned ArrayBound, SmallVectorImpl &Elements, mlir::TypedAttr Filler) { + auto &builder = CGM.getBuilder(); auto isNullValue = [&](mlir::Attribute f) { // TODO(cir): introduce char type in CIR and check for that instead. auto intVal = f.dyn_cast_or_null(); @@ -1244,10 +1245,10 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, for (auto const &Element : Elements) Eles.push_back(Element); - return mlir::cir::ConstArrayAttr::get( - mlir::cir::ArrayType::get(CGM.getBuilder().getContext(), - CommonElementType, ArrayBound), - mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Eles)); + return builder.getConstArray( + mlir::ArrayAttr::get(builder.getContext(), Eles), + mlir::cir::ArrayType::get(builder.getContext(), CommonElementType, + ArrayBound)); } // We have mixed types. Use a packed struct. diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 0e872d34c0eb..2cc2f0edb74b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -19,6 +19,7 @@ #include "CIRGenCXXABI.h" #include "CIRGenFunctionInfo.h" +#include "ConstantInitBuilder.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Mangle.h" @@ -122,7 +123,8 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { const CXXRecordDecl *NearestVBase) override; void emitVTableDefinitions(CIRGenVTables &CGVT, const CXXRecordDecl *RD) override; - mlir::Value getAddrOfRTTIDescriptor(QualType Ty) override; + mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, + QualType Ty) override; /// TODO(cir): seems like could be shared between LLVM IR and CIR codegen. bool mayNeedDestruction(const VarDecl *VD) const { @@ -562,24 +564,24 @@ class CIRGenItaniumRTTIBuilder { CIRGenModule &CGM; // Per-module state. const CIRGenItaniumCXXABI &CXXABI; // Per-module state. - // /// The fields of the RTTI descriptor currently being built. - // SmallVector Fields; + /// The fields of the RTTI descriptor currently being built. + SmallVector Fields; - // /// Returns the mangled type name of the given type. - // llvm::GlobalVariable * - // GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage); + // Returns the mangled type name of the given type. + mlir::cir::GlobalOp GetAddrOfTypeName(mlir::Location loc, QualType Ty, + mlir::cir::GlobalLinkageKind Linkage); // /// Returns the constant for the RTTI // /// descriptor of the given type. - // llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty); + mlir::Attribute GetAddrOfExternalRTTIDescriptor(mlir::Location loc, + QualType Ty); - // /// Build the vtable pointer for the given type. - // void BuildVTablePointer(const Type *Ty); + /// Build the vtable pointer for the given type. + void BuildVTablePointer(mlir::Location loc, const Type *Ty); - // /// Build an abi::__si_class_type_info, used for - // single - // /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b. - // void BuildSIClassTypeInfo(const CXXRecordDecl *RD); + /// Build an abi::__si_class_type_info, used for single inheritance, according + /// to the Itanium C++ ABI, 2.9.5p6b. + void BuildSIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *RD); // /// Build an abi::__vmi_class_type_info, used for // /// classes with bases that do not satisfy the abi::__si_class_type_info @@ -648,11 +650,12 @@ class CIRGenItaniumRTTIBuilder { /// Build the RTTI type info struct for the given type, or /// link to an existing RTTI descriptor if one already exists. - mlir::Value BuildTypeInfo(QualType Ty); + mlir::Attribute BuildTypeInfo(mlir::Location loc, QualType Ty); /// Build the RTTI type info struct for the given type. - mlir::Value BuildTypeInfo(QualType Ty, mlir::cir::GlobalLinkageKind Linkage, - mlir::SymbolTable::Visibility Visibility); + mlir::Attribute BuildTypeInfo(mlir::Location loc, QualType Ty, + mlir::cir::GlobalLinkageKind Linkage, + mlir::SymbolTable::Visibility Visibility); }; } // namespace @@ -884,6 +887,36 @@ static bool ContainsIncompleteClassType(QualType Ty) { return false; } +// Return whether the given record decl has a "single, +// public, non-virtual base at offset zero (i.e. the derived class is dynamic +// iff the base is)", according to Itanium C++ ABI, 2.95p6b. +// TODO(cir): this can unified with LLVM codegen +static bool CanUseSingleInheritance(const CXXRecordDecl *RD) { + // Check the number of bases. + if (RD->getNumBases() != 1) + return false; + + // Get the base. + CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(); + + // Check that the base is not virtual. + if (Base->isVirtual()) + return false; + + // Check that the base is public. + if (Base->getAccessSpecifier() != AS_public) + return false; + + // Check that the class is dynamic iff the base is. + auto *BaseDecl = + cast(Base->getType()->castAs()->getDecl()); + if (!BaseDecl->isEmpty() && + BaseDecl->isDynamicClass() != RD->isDynamicClass()) + return false; + + return true; +} + /// Return the linkage that the type info and type info name constants /// should have for the given type. static mlir::cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &CGM, @@ -938,7 +971,8 @@ static mlir::cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &CGM, llvm_unreachable("Invalid linkage!"); } -mlir::Value CIRGenItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) { +mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo(mlir::Location loc, + QualType Ty) { // We want to operate on the canonical type. Ty = Ty.getCanonicalType(); @@ -959,7 +993,7 @@ mlir::Value CIRGenItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) { // Check if there is already an external RTTI descriptor for this type. if (IsStandardLibraryRTTIDescriptor(Ty) || ShouldUseExternalRTTIDescriptor(CGM, Ty)) - llvm_unreachable("NYI"); + return GetAddrOfExternalRTTIDescriptor(loc, Ty); // Emit the standard library with external linkage. auto Linkage = getTypeInfoLinkage(CGM, Ty); @@ -979,18 +1013,424 @@ mlir::Value CIRGenItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) { symVisibility = CIRGenModule::getCIRVisibility(Ty->getVisibility()); assert(!UnimplementedFeature::setDLLStorageClass()); - return BuildTypeInfo(Ty, Linkage, symVisibility); + return BuildTypeInfo(loc, Ty, Linkage, symVisibility); +} + +void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, + const Type *Ty) { + auto &builder = CGM.getBuilder(); + + // abi::__class_type_info. + static const char *const ClassTypeInfo = + "_ZTVN10__cxxabiv117__class_type_infoE"; + // abi::__si_class_type_info. + static const char *const SIClassTypeInfo = + "_ZTVN10__cxxabiv120__si_class_type_infoE"; + // abi::__vmi_class_type_info. + static const char *const VMIClassTypeInfo = + "_ZTVN10__cxxabiv121__vmi_class_type_infoE"; + + const char *VTableName = nullptr; + + switch (Ty->getTypeClass()) { + case Type::ArrayParameter: + llvm_unreachable("NYI"); +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("Non-canonical and dependent types shouldn't get here"); + + case Type::LValueReference: + case Type::RValueReference: + llvm_unreachable("References shouldn't get here"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("Undeduced type shouldn't get here"); + + case Type::Pipe: + llvm_unreachable("Pipe types shouldn't get here"); + + case Type::Builtin: + case Type::BitInt: + // GCC treats vector and complex types as fundamental types. + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::Complex: + case Type::Atomic: + // FIXME: GCC treats block pointers as fundamental types?! + case Type::BlockPointer: + // abi::__fundamental_type_info. + VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE"; + break; + + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + // abi::__array_type_info. + VTableName = "_ZTVN10__cxxabiv117__array_type_infoE"; + break; + + case Type::FunctionNoProto: + case Type::FunctionProto: + // abi::__function_type_info. + VTableName = "_ZTVN10__cxxabiv120__function_type_infoE"; + break; + + case Type::Enum: + // abi::__enum_type_info. + VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE"; + break; + + case Type::Record: { + const CXXRecordDecl *RD = + cast(cast(Ty)->getDecl()); + + if (!RD->hasDefinition() || !RD->getNumBases()) { + VTableName = ClassTypeInfo; + } else if (CanUseSingleInheritance(RD)) { + VTableName = SIClassTypeInfo; + } else { + VTableName = VMIClassTypeInfo; + } + + break; + } + + case Type::ObjCObject: + // Ignore protocol qualifiers. + Ty = cast(Ty)->getBaseType().getTypePtr(); + + // Handle id and Class. + if (isa(Ty)) { + VTableName = ClassTypeInfo; + break; + } + + assert(isa(Ty)); + [[fallthrough]]; + + case Type::ObjCInterface: + if (cast(Ty)->getDecl()->getSuperClass()) { + VTableName = SIClassTypeInfo; + } else { + VTableName = ClassTypeInfo; + } + break; + + case Type::ObjCObjectPointer: + case Type::Pointer: + // abi::__pointer_type_info. + VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE"; + break; + + case Type::MemberPointer: + // abi::__pointer_to_member_type_info. + VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE"; + break; + } + + mlir::cir::GlobalOp VTable{}; + + // Check if the alias exists. If it doesn't, then get or create the global. + if (CGM.getItaniumVTableContext().isRelativeLayout()) + llvm_unreachable("NYI"); + if (!VTable) { + VTable = + CGM.getOrInsertGlobal(loc, VTableName, CGM.getBuilder().getInt8PtrTy()); + } + + assert(!UnimplementedFeature::setDSOLocal()); + auto PtrDiffTy = + CGM.getTypes().ConvertType(CGM.getASTContext().getPointerDiffType()); + + // The vtable address point is 2. + mlir::Attribute field{}; + if (CGM.getItaniumVTableContext().isRelativeLayout()) { + llvm_unreachable("NYI"); + } else { + SmallVector offsets{ + mlir::IntegerAttr::get(PtrDiffTy, 2)}; + field = mlir::cir::GlobalViewAttr::get( + builder.getInt8PtrTy(), + mlir::FlatSymbolRefAttr::get(VTable.getSymNameAttr()), + mlir::ArrayAttr::get(builder.getContext(), offsets)); + } + + assert(field && "expected attribute"); + Fields.push_back(field); +} + +mlir::cir::GlobalOp CIRGenItaniumRTTIBuilder::GetAddrOfTypeName( + mlir::Location loc, QualType Ty, mlir::cir::GlobalLinkageKind Linkage) { + auto &builder = CGM.getBuilder(); + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out); + + // We know that the mangled name of the type starts at index 4 of the + // mangled name of the typename, so we can just index into it in order to + // get the mangled name of the type. + auto Init = builder.getString( + Name.substr(4), CGM.getTypes().ConvertType(CGM.getASTContext().CharTy)); + auto Align = + CGM.getASTContext().getTypeAlignInChars(CGM.getASTContext().CharTy); + + auto GV = CGM.createOrReplaceCXXRuntimeVariable(loc, Name, Init.getType(), + Linkage, Align); + + GV.setInitialValueAttr(Init); + return GV; +} + +/// Build an abi::__si_class_type_info, used for single inheritance, according +/// to the Itanium C++ ABI, 2.95p6b. +void CIRGenItaniumRTTIBuilder::BuildSIClassTypeInfo(mlir::Location loc, + const CXXRecordDecl *RD) { + // Itanium C++ ABI 2.9.5p6b: + // It adds to abi::__class_type_info a single member pointing to the + // type_info structure for the base type, + auto BaseTypeInfo = CIRGenItaniumRTTIBuilder(CXXABI, CGM) + .BuildTypeInfo(loc, RD->bases_begin()->getType()); + Fields.push_back(BaseTypeInfo); +} + +mlir::Attribute +CIRGenItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(mlir::Location loc, + QualType Ty) { + // Mangle the RTTI name. + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); + auto &builder = CGM.getBuilder(); + + // Look for an existing global. + auto GV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), Name)); + + if (!GV) { + // Create a new global variable. + // From LLVM codegen => Note for the future: If we would ever like to do + // deferred emission of RTTI, check if emitting vtables opportunistically + // need any adjustment. + GV = CIRGenModule::createGlobalOp(CGM, loc, Name, builder.getInt8PtrTy(), + /*isConstant=*/true); + const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); + CGM.setGVProperties(GV, RD); + + // Import the typeinfo symbol when all non-inline virtual methods are + // imported. + if (CGM.getTarget().hasPS4DLLImportExport()) + llvm_unreachable("NYI"); + } + + return mlir::cir::GlobalViewAttr::get( + builder.getInt8PtrTy(), + mlir::FlatSymbolRefAttr::get(GV.getSymNameAttr())); } -mlir::Value CIRGenItaniumRTTIBuilder::BuildTypeInfo( - QualType Ty, mlir::cir::GlobalLinkageKind Linkage, +mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( + mlir::Location loc, QualType Ty, mlir::cir::GlobalLinkageKind Linkage, mlir::SymbolTable::Visibility Visibility) { + auto &builder = CGM.getBuilder(); assert(!UnimplementedFeature::setDLLStorageClass()); - llvm_unreachable("NYI"); + + // Add the vtable pointer. + BuildVTablePointer(loc, cast(Ty)); + + // And the name. + auto TypeName = GetAddrOfTypeName(loc, Ty, Linkage); + mlir::Attribute TypeNameField; + + // If we're supposed to demote the visibility, be sure to set a flag + // to use a string comparison for type_info comparisons. + CIRGenItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness = + CXXABI.classifyRTTIUniqueness(Ty, Linkage); + if (RTTIUniqueness != CIRGenItaniumCXXABI::RUK_Unique) { + // The flag is the sign bit, which on ARM64 is defined to be clear + // for global pointers. This is very ARM64-specific. + llvm_unreachable("NYI"); + } else { + TypeNameField = mlir::cir::GlobalViewAttr::get( + builder.getInt8PtrTy(), + mlir::FlatSymbolRefAttr::get(TypeName.getSymNameAttr())); + } + Fields.push_back(TypeNameField); + + switch (Ty->getTypeClass()) { + case Type::ArrayParameter: + llvm_unreachable("NYI"); +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("Non-canonical and dependent types shouldn't get here"); + + // GCC treats vector types as fundamental types. + case Type::Builtin: + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::Complex: + case Type::BlockPointer: + // Itanium C++ ABI 2.9.5p4: + // abi::__fundamental_type_info adds no data members to std::type_info. + break; + + case Type::LValueReference: + case Type::RValueReference: + llvm_unreachable("References shouldn't get here"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("Undeduced type shouldn't get here"); + + case Type::Pipe: + break; + + case Type::BitInt: + break; + + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + // Itanium C++ ABI 2.9.5p5: + // abi::__array_type_info adds no data members to std::type_info. + break; + + case Type::FunctionNoProto: + case Type::FunctionProto: + // Itanium C++ ABI 2.9.5p5: + // abi::__function_type_info adds no data members to std::type_info. + break; + + case Type::Enum: + // Itanium C++ ABI 2.9.5p5: + // abi::__enum_type_info adds no data members to std::type_info. + break; + + case Type::Record: { + const CXXRecordDecl *RD = + cast(cast(Ty)->getDecl()); + if (!RD->hasDefinition() || !RD->getNumBases()) { + // We don't need to emit any fields. + break; + } + + if (CanUseSingleInheritance(RD)) { + BuildSIClassTypeInfo(loc, RD); + } else { + llvm_unreachable("NYI"); + // BuildVMIClassTypeInfo(RD); + } + + break; + } + + case Type::ObjCObject: + case Type::ObjCInterface: + llvm_unreachable("NYI"); + break; + + case Type::ObjCObjectPointer: + llvm_unreachable("NYI"); + break; + + case Type::Pointer: + llvm_unreachable("NYI"); + break; + + case Type::MemberPointer: + llvm_unreachable("NYI"); + break; + + case Type::Atomic: + // No fields, at least for the moment. + break; + } + + assert(!UnimplementedFeature::setDLLImportDLLExport()); + auto init = builder.getTypeInfo(builder.getArrayAttr(Fields)); + + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); + + // Create new global and search for an existing global. + auto OldGV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), Name)); + mlir::cir::GlobalOp GV = + CIRGenModule::createGlobalOp(CGM, loc, Name, init.getType(), + /*isConstant=*/true); + + // Export the typeinfo in the same circumstances as the vtable is + // exported. + if (CGM.getTarget().hasPS4DLLImportExport()) + llvm_unreachable("NYI"); + + // If there's already an old global variable, replace it with the new one. + if (OldGV) { + // Replace occurrences of the old variable if needed. + GV.setName(OldGV.getName()); + if (!OldGV->use_empty()) { + // TODO: replaceAllUsesWith + llvm_unreachable("NYI"); + } + OldGV->erase(); + } + + if (CGM.supportsCOMDAT() && mlir::cir::isWeakForLinker(GV.getLinkage())) { + assert(!UnimplementedFeature::setComdat()); + llvm_unreachable("NYI"); + } + + CharUnits Align = CGM.getASTContext().toCharUnitsFromBits( + CGM.getTarget().getPointerAlign(LangAS::Default)); + GV.setAlignmentAttr(CGM.getSize(Align)); + + // The Itanium ABI specifies that type_info objects must be globally + // unique, with one exception: if the type is an incomplete class + // type or a (possibly indirect) pointer to one. That exception + // affects the general case of comparing type_info objects produced + // by the typeid operator, which is why the comparison operators on + // std::type_info generally use the type_info name pointers instead + // of the object addresses. However, the language's built-in uses + // of RTTI generally require class types to be complete, even when + // manipulating pointers to those class types. This allows the + // implementation of dynamic_cast to rely on address equality tests, + // which is much faster. + // + // All of this is to say that it's important that both the type_info + // object and the type_info name be uniqued when weakly emitted. + + // TODO(cir): setup other bits for TypeName + assert(!UnimplementedFeature::setDLLStorageClass()); + assert(!UnimplementedFeature::setPartition()); + assert(!UnimplementedFeature::setDSOLocal()); + mlir::SymbolTable::setSymbolVisibility( + TypeName, CIRGenModule::getMLIRVisibilityFromCIRLinkage(Linkage)); + + // TODO(cir): setup other bits for GV + assert(!UnimplementedFeature::setDLLStorageClass()); + assert(!UnimplementedFeature::setPartition()); + assert(!UnimplementedFeature::setDSOLocal()); + mlir::SymbolTable::setSymbolVisibility( + GV, CIRGenModule::getMLIRVisibilityFromCIRLinkage(Linkage)); + + return mlir::cir::GlobalViewAttr::get( + builder.getInt8PtrTy(), + mlir::FlatSymbolRefAttr::get(GV.getSymNameAttr())); } -mlir::Value CIRGenItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) { - return CIRGenItaniumRTTIBuilder(*this, CGM).BuildTypeInfo(Ty); +mlir::Attribute CIRGenItaniumCXXABI::getAddrOfRTTIDescriptor(mlir::Location loc, + QualType Ty) { + return CIRGenItaniumRTTIBuilder(*this, CGM).BuildTypeInfo(loc, Ty); } void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, @@ -999,11 +1439,17 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, if (VTable.hasInitializer()) return; - llvm_unreachable("NYI"); + ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext(); + [[maybe_unused]] const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); + [[maybe_unused]] auto Linkage = CGM.getVTableLinkage(RD); + [[maybe_unused]] auto RTTI = CGM.getAddrOfRTTIDescriptor( + CGM.getLoc(RD->getBeginLoc()), CGM.getASTContext().getTagDeclType(RD)); + + // Create and set the initializer. + ConstantInitBuilder builder(CGM); + [[maybe_unused]] auto components = builder.beginStruct(); - // // Create and set the initializer. - // ConstantInitBuilder builder(CGM); - // auto components = builder.beginStruct(); + llvm_unreachable("NYI"); // CGVT.createVTableInitializer(components, VTLayout, RTTI, // mlir::cir::GlobalLinkageKind::isLocalLinkage(Linkage)); // components.finishAndSetAsInitializer(VTable); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index e4b678ee3737..8f0b6b136689 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -383,9 +383,10 @@ mlir::Value CIRGenModule::getGlobalValue(const Decl *D) { return CurCGF->symbolTable.lookup(D); } -static mlir::cir::GlobalOp createGlobalOp(CIRGenModule &CGM, mlir::Location loc, - StringRef name, mlir::Type t, - bool isCst = false) { +mlir::cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &CGM, + mlir::Location loc, + StringRef name, mlir::Type t, + bool isCst) { mlir::cir::GlobalOp g; auto &builder = CGM.getBuilder(); { @@ -497,8 +498,8 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // mlir::SymbolTable::Visibility::Public is the default, no need to explicitly // mark it as such. - auto GV = createGlobalOp(*this, loc, MangledName, Ty, - /*isConstant=*/false); + auto GV = CIRGenModule::createGlobalOp(*this, loc, MangledName, Ty, + /*isConstant=*/false); // If we already created a global with the same mangled name (but different // type) before, take its name and remove it from its parent. @@ -903,11 +904,7 @@ CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { Str.resize(finalSize); auto eltTy = getTypes().ConvertType(CAT->getElementType()); - auto TheType = - mlir::cir::ArrayType::get(builder.getContext(), eltTy, finalSize); - auto constArray = mlir::cir::ConstArrayAttr::get( - TheType, mlir::StringAttr::get(Str, TheType)); - return constArray; + return builder.getString(Str, eltTy, finalSize); } assert(0 && "not implemented"); @@ -938,8 +935,8 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, // Create a global variable for this string // FIXME(cir): check for insertion point in module level. - auto GV = createGlobalOp(CGM, loc, GlobalName, C.getType(), - !CGM.getLangOpts().WritableStrings); + auto GV = CIRGenModule::createGlobalOp(CGM, loc, GlobalName, C.getType(), + !CGM.getLangOpts().WritableStrings); // Set up extra information and add to the module GV.setAlignmentAttr(CGM.getSize(Alignment)); @@ -2065,11 +2062,10 @@ mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( OldGV = GV; } - // // Create a new variable. - GV = createGlobalOp(*this, loc, Name, Ty); + // Create a new variable. + GV = CIRGenModule::createGlobalOp(*this, loc, Name, Ty); // Set up extra information and add to the module - GV.setLinkageAttr( mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), Linkage)); mlir::SymbolTable::setSymbolVisibility( @@ -2099,7 +2095,8 @@ bool CIRGenModule::shouldOpportunisticallyEmitVTables() { return codeGenOpts.OptimizationLevel > 0; } -mlir::Value CIRGenModule::getAddrOfRTTIDescriptor(QualType Ty, bool ForEH) { +mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc, + QualType Ty, bool ForEH) { // Return a bogus pointer if RTTI is disabled, unless it's for EH. // FIXME: should we even be calling this method if RTTI is disabled // and it's not for EH? @@ -2113,5 +2110,47 @@ mlir::Value CIRGenModule::getAddrOfRTTIDescriptor(QualType Ty, bool ForEH) { llvm_unreachable("NYI"); } - return getCXXABI().getAddrOfRTTIDescriptor(Ty); + return getCXXABI().getAddrOfRTTIDescriptor(loc, Ty); +} + +/// TODO(cir): once we have cir.module, add this as a convenience method there. +/// +/// Look up the specified global in the module symbol table. +/// 1. If it does not exist, add a declaration of the global and return it. +/// 2. Else, the global exists but has the wrong type: return the function +/// with a constantexpr cast to the right type. +/// 3. Finally, if the existing global is the correct declaration, return the +/// existing global. +mlir::cir::GlobalOp CIRGenModule::getOrInsertGlobal( + mlir::Location loc, StringRef Name, mlir::Type Ty, + llvm::function_ref CreateGlobalCallback) { + // See if we have a definition for the specified global already. + auto GV = dyn_cast_or_null(getGlobalValue(Name)); + if (!GV) { + GV = CreateGlobalCallback(); + } + assert(GV && "The CreateGlobalCallback is expected to create a global"); + + // If the variable exists but has the wrong type, return a bitcast to the + // right type. + auto GVTy = GV.getSymType(); + assert(!UnimplementedFeature::addressSpace()); + auto PTy = builder.getPointerTo(Ty); + + if (GVTy != PTy) + llvm_unreachable("NYI"); + + // Otherwise, we just found the existing function or a prototype. + return GV; } + +// Overload to construct a global variable using its constructor's defaults. +mlir::cir::GlobalOp CIRGenModule::getOrInsertGlobal(mlir::Location loc, + StringRef Name, + mlir::Type Ty) { + return getOrInsertGlobal(loc, Name, Ty, [&] { + return CIRGenModule::createGlobalOp(*this, loc, Name, + builder.getPointerTo(Ty)); + }); +} + diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 4dc6d2ea3092..5736aa2c8c78 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -183,6 +183,27 @@ class CIRGenModule { std::optional Ty, ForDefinition_t IsForDefinition); + /// TODO(cir): once we have cir.module, add this as a convenience method + /// there instead of here. + /// + /// Look up the specified global in the module symbol table. + /// 1. If it does not exist, add a declaration of the global and return it. + /// 2. Else, the global exists but has the wrong type: return the function + /// with a constantexpr cast to the right type. + /// 3. Finally, if the existing global is the correct declaration, return + /// the existing global. + mlir::cir::GlobalOp getOrInsertGlobal( + mlir::Location loc, StringRef Name, mlir::Type Ty, + llvm::function_ref CreateGlobalCallback); + + // Overload to construct a global variable using its constructor's defaults. + mlir::cir::GlobalOp getOrInsertGlobal(mlir::Location loc, StringRef Name, + mlir::Type Ty); + + static mlir::cir::GlobalOp createGlobalOp(CIRGenModule &CGM, + mlir::Location loc, StringRef name, + mlir::Type t, bool isCst = false); + /// Return the mlir::Value for the address of the given global variable. /// If Ty is non-null and if the global doesn't exist, then it will be created /// with the specified type instead of whatever the normal requested type @@ -210,7 +231,8 @@ class CIRGenModule { mlir::cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *RD); /// Get the address of the RTTI descriptor for the given type. - mlir::Value getAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false); + mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType Ty, + bool ForEH = false); /// TODO(cir): add CIR visibility bits. static mlir::SymbolTable::Visibility getCIRVisibility(Visibility V) { diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index f0b08fb463e1..bf9c7ef92db5 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -34,6 +34,7 @@ add_clang_library(clangCIR CIRGenerator.cpp CIRPasses.cpp CIRRecordLayoutBuilder.cpp + ConstantInitBuilder.cpp TargetInfo.cpp DEPENDS diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp new file mode 100644 index 000000000000..3158980051c4 --- /dev/null +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -0,0 +1,312 @@ +//===--- ConstantInitBuilder.cpp - Global initializer builder -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines out-of-line routines for building initializers for +// global variables, in particular the kind of globals that are implicitly +// introduced by various language ABIs. +// +//===----------------------------------------------------------------------===// + +#include "ConstantInitBuilder.h" +#include "CIRGenModule.h" + +using namespace clang; +using namespace cir; + +ConstantInitBuilderBase::ConstantInitBuilderBase(CIRGenModule &CGM) + : CGM(CGM), builder(CGM.getBuilder()) {} + +mlir::Type ConstantInitFuture::getType() const { + assert(Data && "dereferencing null future"); + if (Data.is()) { + auto attr = Data.get().dyn_cast(); + assert(attr && "expected typed attribute"); + return attr.getType(); + } else { + llvm_unreachable("Only sypport typed attributes here"); + } +} + +void ConstantInitFuture::abandon() { + assert(Data && "abandoning null future"); + if (auto builder = Data.dyn_cast()) { + builder->abandon(0); + } + Data = nullptr; +} + +void ConstantInitFuture::installInGlobal(mlir::cir::GlobalOp GV) { + assert(Data && "installing null future"); + if (Data.is()) { + GV.setInitialValueAttr(Data.get()); + } else { + llvm_unreachable("NYI"); + // auto &builder = *Data.get(); + // assert(builder.Buffer.size() == 1); + // builder.setGlobalInitializer(GV, builder.Buffer[0]); + // builder.Buffer.clear(); + // Data = nullptr; + } +} + +ConstantInitFuture +ConstantInitBuilderBase::createFuture(mlir::Attribute initializer) { + assert(Buffer.empty() && "buffer not current empty"); + Buffer.push_back(initializer); + return ConstantInitFuture(this); +} + +// Only used in this file. +inline ConstantInitFuture::ConstantInitFuture(ConstantInitBuilderBase *builder) + : Data(builder) { + assert(!builder->Frozen); + assert(builder->Buffer.size() == 1); + assert(builder->Buffer[0] != nullptr); +} + +mlir::cir::GlobalOp ConstantInitBuilderBase::createGlobal( + mlir::Attribute initializer, const llvm::Twine &name, CharUnits alignment, + bool constant, mlir::cir::GlobalLinkageKind linkage, + unsigned addressSpace) { + llvm_unreachable("NYI"); + // auto GV = + // new llvm::GlobalVariable(CGM.getModule(), initializer->getType(), + // constant, linkage, initializer, name, + // /*insert before*/ nullptr, + // llvm::GlobalValue::NotThreadLocal, + // addressSpace); + // GV->setAlignment(alignment.getAsAlign()); + // resolveSelfReferences(GV); + // return GV; +} + +void ConstantInitBuilderBase::setGlobalInitializer( + mlir::cir::GlobalOp GV, mlir::Attribute initializer) { + // GV->setInitializer(initializer); + + // if (!SelfReferences.empty()) + // resolveSelfReferences(GV); + llvm_unreachable("NYI"); +} + +void ConstantInitBuilderBase::resolveSelfReferences(mlir::cir::GlobalOp GV) { + llvm_unreachable("NYI"); + // for (auto &entry : SelfReferences) { + // mlir::Attribute resolvedReference = + // llvm::ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV, + // entry.Indices); + // auto dummy = entry.Dummy; + // dummy->replaceAllUsesWith(resolvedReference); + // dummy->eraseFromParent(); + // } + // SelfReferences.clear(); +} + +void ConstantInitBuilderBase::abandon(size_t newEnd) { + llvm_unreachable("NYI"); + // // Remove all the entries we've added. + // Buffer.erase(Buffer.begin() + newEnd, Buffer.end()); + + // // If we're abandoning all the way to the beginning, destroy + // // all the self-references, because we might not get another + // // opportunity. + // if (newEnd == 0) { + // for (auto &entry : SelfReferences) { + // auto dummy = entry.Dummy; + // dummy->replaceAllUsesWith(llvm::PoisonValue::get(dummy->getType())); + // dummy->eraseFromParent(); + // } + // SelfReferences.clear(); + // } +} + +void ConstantAggregateBuilderBase::addSize(CharUnits size) { + add(Builder.CGM.getSize(size)); +} + +mlir::Attribute +ConstantAggregateBuilderBase::getRelativeOffset(mlir::IntegerType offsetType, + mlir::Attribute target) { + return getRelativeOffsetToPosition(offsetType, target, + Builder.Buffer.size() - Begin); +} + +mlir::Attribute ConstantAggregateBuilderBase::getRelativeOffsetToPosition( + mlir::IntegerType offsetType, mlir::Attribute target, size_t position) { + llvm_unreachable("NYI"); + // // Compute the address of the relative-address slot. + // auto base = getAddrOfPosition(offsetType, position); + + // // Subtract. + // base = llvm::ConstantExpr::getPtrToInt(base, Builder.CGM.IntPtrTy); + // target = llvm::ConstantExpr::getPtrToInt(target, Builder.CGM.IntPtrTy); + // mlir::Attribute offset = llvm::ConstantExpr::getSub(target, base); + + // // Truncate to the relative-address type if necessary. + // if (Builder.CGM.IntPtrTy != offsetType) { + // offset = llvm::ConstantExpr::getTrunc(offset, offsetType); + // } + + // return offset; +} + +mlir::Attribute +ConstantAggregateBuilderBase::getAddrOfPosition(mlir::Type type, + size_t position) { + llvm_unreachable("NYI"); + // // Make a global variable. We will replace this with a GEP to this + // // position after installing the initializer. + // auto dummy = new llvm::GlobalVariable(Builder.CGM.getModule(), type, true, + // llvm::GlobalVariable::PrivateLinkage, + // nullptr, ""); + // Builder.SelfReferences.emplace_back(dummy); + // auto &entry = Builder.SelfReferences.back(); + // (void)getGEPIndicesTo(entry.Indices, position + Begin); + // return dummy; +} + +mlir::Attribute +ConstantAggregateBuilderBase::getAddrOfCurrentPosition(mlir::Type type) { + llvm_unreachable("NYI"); + // // Make a global variable. We will replace this with a GEP to this + // // position after installing the initializer. + // auto dummy = new llvm::GlobalVariable(Builder.CGM.getModule(), type, true, + // llvm::GlobalVariable::PrivateLinkage, + // nullptr, ""); + // Builder.SelfReferences.emplace_back(dummy); + // auto &entry = Builder.SelfReferences.back(); + // (void)getGEPIndicesToCurrentPosition(entry.Indices); + // return dummy; +} + +void ConstantAggregateBuilderBase::getGEPIndicesTo( + llvm::SmallVectorImpl &indices, size_t position) const { + llvm_unreachable("NYI"); + // // Recurse on the parent builder if present. + // if (Parent) { + // Parent->getGEPIndicesTo(indices, Begin); + + // // Otherwise, add an index to drill into the first level of pointer. + // } else { + // assert(indices.empty()); + // indices.push_back(llvm::ConstantInt::get(Builder.CGM.Int32Ty, 0)); + // } + + // assert(position >= Begin); + // // We have to use i32 here because struct GEPs demand i32 indices. + // // It's rather unlikely to matter in practice. + // indices.push_back( + // llvm::ConstantInt::get(Builder.CGM.Int32Ty, position - Begin)); +} + +ConstantAggregateBuilderBase::PlaceholderPosition +ConstantAggregateBuilderBase::addPlaceholderWithSize(mlir::Type type) { + llvm_unreachable("NYI"); + // // Bring the offset up to the last field. + // CharUnits offset = getNextOffsetFromGlobal(); + + // // Create the placeholder. + // auto position = addPlaceholder(); + + // // Advance the offset past that field. + // auto &layout = Builder.CGM.getDataLayout(); + // if (!Packed) + // offset = + // offset.alignTo(CharUnits::fromQuantity(layout.getABITypeAlign(type))); + // offset += CharUnits::fromQuantity(layout.getTypeStoreSize(type)); + + // CachedOffsetEnd = Builder.Buffer.size(); + // CachedOffsetFromGlobal = offset; + + // return position; +} + +CharUnits +ConstantAggregateBuilderBase::getOffsetFromGlobalTo(size_t end) const { + size_t cacheEnd = CachedOffsetEnd; + assert(cacheEnd <= end); + + // Fast path: if the cache is valid, just use it. + if (cacheEnd == end) { + return CachedOffsetFromGlobal; + } + + // If the cached range ends before the index at which the current + // aggregate starts, recurse for the parent. + CharUnits offset; + if (cacheEnd < Begin) { + assert(cacheEnd == 0); + assert(Parent && "Begin != 0 for root builder"); + cacheEnd = Begin; + offset = Parent->getOffsetFromGlobalTo(Begin); + } else { + offset = CachedOffsetFromGlobal; + } + + // Perform simple layout on the elements in cacheEnd..getType(); + // if (!Packed) + // offset = offset.alignTo( + // CharUnits::fromQuantity(layout.getABITypeAlign(elementType))); + // offset += + // CharUnits::fromQuantity(layout.getTypeStoreSize(elementType)); + // } while (++cacheEnd != end); + } + + // Cache and return. + CachedOffsetEnd = cacheEnd; + CachedOffsetFromGlobal = offset; + return offset; +} + +mlir::Attribute ConstantAggregateBuilderBase::finishArray(mlir::Type eltTy) { + llvm_unreachable("NYI"); + // markFinished(); + + // auto &buffer = getBuffer(); + // assert((Begin < buffer.size() || (Begin == buffer.size() && eltTy)) && + // "didn't add any array elements without element type"); + // auto elts = llvm::ArrayRef(buffer).slice(Begin); + // if (!eltTy) + // eltTy = elts[0]->getType(); + // auto type = llvm::ArrayType::get(eltTy, elts.size()); + // auto constant = llvm::ConstantArray::get(type, elts); + // buffer.erase(buffer.begin() + Begin, buffer.end()); + // return constant; +} + +mlir::Attribute +ConstantAggregateBuilderBase::finishStruct(mlir::cir::StructType ty) { + llvm_unreachable("NYI"); + // markFinished(); + + // auto &buffer = getBuffer(); + // auto elts = llvm::ArrayRef(buffer).slice(Begin); + + // if (ty == nullptr && elts.empty()) + // ty = mlir::cir::StructType::get(Builder.CGM.getLLVMContext(), {}, + // Packed); + + // mlir::Attribute constant; + // if (ty) { + // assert(ty->isPacked() == Packed); + // constant = llvm::ConstantStruct::get(ty, elts); + // } else { + // constant = llvm::ConstantStruct::getAnon(elts, Packed); + // } + + // buffer.erase(buffer.begin() + Begin, buffer.end()); + // return constant; +} diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h new file mode 100644 index 000000000000..96cedbe1d66a --- /dev/null +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -0,0 +1,578 @@ +//===- ConstantInitBuilder.h - Builder for CIR attributes -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class provides a convenient interface for building complex +// global initializers of the sort that are frequently required for +// language ABIs. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_CIR_CODEGEN_CONSTANTINITBUILDER_H +#define LLVM_CLANG_CIR_CODEGEN_CONSTANTINITBUILDER_H + +#include "clang/AST/CharUnits.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/SmallVector.h" + +#include "CIRGenBuilder.h" +#include "ConstantInitFuture.h" + +#include + +using namespace clang; + +namespace cir { + +class CIRGenModule; + +/// A convenience builder class for complex constant initializers, +/// especially for anonymous global structures used by various language +/// runtimes. +/// +/// The basic usage pattern is expected to be something like: +/// ConstantInitBuilder builder(CGM); +/// auto toplevel = builder.beginStruct(); +/// toplevel.addInt(CGM.SizeTy, widgets.size()); +/// auto widgetArray = builder.beginArray(); +/// for (auto &widget : widgets) { +/// auto widgetDesc = widgetArray.beginStruct(); +/// widgetDesc.addInt(CGM.SizeTy, widget.getPower()); +/// widgetDesc.add(CGM.GetAddrOfConstantString(widget.getName())); +/// widgetDesc.add(CGM.GetAddrOfGlobal(widget.getInitializerDecl())); +/// widgetDesc.finishAndAddTo(widgetArray); +/// } +/// widgetArray.finishAndAddTo(toplevel); +/// auto global = toplevel.finishAndCreateGlobal("WIDGET_LIST", Align, +/// /*constant*/ true); +class ConstantInitBuilderBase { + struct SelfReference { + mlir::cir::GlobalOp Dummy; + llvm::SmallVector Indices; + + SelfReference(mlir::cir::GlobalOp dummy) : Dummy(dummy) {} + }; + CIRGenModule &CGM; + CIRGenBuilderTy &builder; + llvm::SmallVector Buffer; + std::vector SelfReferences; + bool Frozen = false; + + friend class ConstantInitFuture; + friend class ConstantAggregateBuilderBase; + template friend class ConstantAggregateBuilderTemplateBase; + +protected: + explicit ConstantInitBuilderBase(CIRGenModule &CGM); + + ~ConstantInitBuilderBase() { + assert(Buffer.empty() && "didn't claim all values out of buffer"); + assert(SelfReferences.empty() && "didn't apply all self-references"); + } + +private: + mlir::cir::GlobalOp + createGlobal(mlir::Attribute initializer, const llvm::Twine &name, + CharUnits alignment, bool constant = false, + mlir::cir::GlobalLinkageKind linkage = + mlir::cir::GlobalLinkageKind::InternalLinkage, + unsigned addressSpace = 0); + + ConstantInitFuture createFuture(mlir::Attribute initializer); + + void setGlobalInitializer(mlir::cir::GlobalOp GV, + mlir::Attribute initializer); + + void resolveSelfReferences(mlir::cir::GlobalOp GV); + + void abandon(size_t newEnd); +}; + +/// A concrete base class for struct and array aggregate +/// initializer builders. +class ConstantAggregateBuilderBase { +protected: + ConstantInitBuilderBase &Builder; + ConstantAggregateBuilderBase *Parent; + size_t Begin; + mutable size_t CachedOffsetEnd = 0; + bool Finished = false; + bool Frozen = false; + bool Packed = false; + mutable CharUnits CachedOffsetFromGlobal; + + llvm::SmallVectorImpl &getBuffer() { return Builder.Buffer; } + + const llvm::SmallVectorImpl &getBuffer() const { + return Builder.Buffer; + } + + ConstantAggregateBuilderBase(ConstantInitBuilderBase &builder, + ConstantAggregateBuilderBase *parent) + : Builder(builder), Parent(parent), Begin(builder.Buffer.size()) { + if (parent) { + assert(!parent->Frozen && "parent already has child builder active"); + parent->Frozen = true; + } else { + assert(!builder.Frozen && "builder already has child builder active"); + builder.Frozen = true; + } + } + + ~ConstantAggregateBuilderBase() { + assert(Finished && "didn't finish aggregate builder"); + } + + void markFinished() { + assert(!Frozen && "child builder still active"); + assert(!Finished && "builder already finished"); + Finished = true; + if (Parent) { + assert(Parent->Frozen && "parent not frozen while child builder active"); + Parent->Frozen = false; + } else { + assert(Builder.Frozen && "builder not frozen while child builder active"); + Builder.Frozen = false; + } + } + +public: + // Not copyable. + ConstantAggregateBuilderBase(const ConstantAggregateBuilderBase &) = delete; + ConstantAggregateBuilderBase & + operator=(const ConstantAggregateBuilderBase &) = delete; + + // Movable, mostly to allow returning. But we have to write this out + // properly to satisfy the assert in the destructor. + ConstantAggregateBuilderBase(ConstantAggregateBuilderBase &&other) + : Builder(other.Builder), Parent(other.Parent), Begin(other.Begin), + CachedOffsetEnd(other.CachedOffsetEnd), Finished(other.Finished), + Frozen(other.Frozen), Packed(other.Packed), + CachedOffsetFromGlobal(other.CachedOffsetFromGlobal) { + other.Finished = true; + } + ConstantAggregateBuilderBase & + operator=(ConstantAggregateBuilderBase &&other) = delete; + + /// Return the number of elements that have been added to + /// this struct or array. + size_t size() const { + assert(!this->Finished && "cannot query after finishing builder"); + assert(!this->Frozen && "cannot query while sub-builder is active"); + assert(this->Begin <= this->getBuffer().size()); + return this->getBuffer().size() - this->Begin; + } + + /// Return true if no elements have yet been added to this struct or array. + bool empty() const { return size() == 0; } + + /// Abandon this builder completely. + void abandon() { + markFinished(); + Builder.abandon(Begin); + } + + /// Add a new value to this initializer. + void add(mlir::Attribute value) { + assert(value && "adding null value to constant initializer"); + assert(!Finished && "cannot add more values after finishing builder"); + assert(!Frozen && "cannot add values while subbuilder is active"); + Builder.Buffer.push_back(value); + } + + /// Add an integer value of type size_t. + void addSize(CharUnits size); + + /// Add an integer value of a specific type. + void addInt(mlir::IntegerType intTy, uint64_t value, bool isSigned = false) { + add(mlir::IntegerAttr::get(intTy, + llvm::APInt{intTy.getWidth(), value, isSigned})); + } + + /// Add a null pointer of a specific type. + void addNullPointer(mlir::cir::PointerType ptrTy) { + add(mlir::cir::NullAttr::get(ptrTy.getContext(), ptrTy)); + } + + /// Add a bitcast of a value to a specific type. + void addBitCast(mlir::Attribute value, mlir::Type type) { + llvm_unreachable("NYI"); + // add(llvm::ConstantExpr::getBitCast(value, type)); + } + + /// Add a bunch of new values to this initializer. + void addAll(llvm::ArrayRef values) { + assert(!Finished && "cannot add more values after finishing builder"); + assert(!Frozen && "cannot add values while subbuilder is active"); + Builder.Buffer.append(values.begin(), values.end()); + } + + /// Add a relative offset to the given target address, i.e. the + /// static difference between the target address and the address + /// of the relative offset. The target must be known to be defined + /// in the current linkage unit. The offset will have the given + /// integer type, which must be no wider than intptr_t. Some + /// targets may not fully support this operation. + void addRelativeOffset(mlir::IntegerType type, mlir::Attribute target) { + llvm_unreachable("NYI"); + // add(getRelativeOffset(type, target)); + } + + /// Same as addRelativeOffset(), but instead relative to an element in this + /// aggregate, identified by its index. + void addRelativeOffsetToPosition(mlir::IntegerType type, + mlir::Attribute target, size_t position) { + llvm_unreachable("NYI"); + // add(getRelativeOffsetToPosition(type, target, position)); + } + + /// Add a relative offset to the target address, plus a small + /// constant offset. This is primarily useful when the relative + /// offset is known to be a multiple of (say) four and therefore + /// the tag can be used to express an extra two bits of information. + void addTaggedRelativeOffset(mlir::IntegerType type, mlir::Attribute address, + unsigned tag) { + llvm_unreachable("NYI"); + // mlir::Attribute offset = + // getRelativeOffset(type, address); if + // (tag) { + // offset = + // llvm::ConstantExpr::getAdd(offset, + // llvm::ConstantInt::get(type, tag)); + // } + // add(offset); + } + + /// Return the offset from the start of the initializer to the + /// next position, assuming no padding is required prior to it. + /// + /// This operation will not succeed if any unsized placeholders are + /// currently in place in the initializer. + CharUnits getNextOffsetFromGlobal() const { + assert(!Finished && "cannot add more values after finishing builder"); + assert(!Frozen && "cannot add values while subbuilder is active"); + return getOffsetFromGlobalTo(Builder.Buffer.size()); + } + + /// An opaque class to hold the abstract position of a placeholder. + class PlaceholderPosition { + size_t Index; + friend class ConstantAggregateBuilderBase; + PlaceholderPosition(size_t index) : Index(index) {} + }; + + /// Add a placeholder value to the structure. The returned position + /// can be used to set the value later; it will not be invalidated by + /// any intermediate operations except (1) filling the same position or + /// (2) finishing the entire builder. + /// + /// This is useful for emitting certain kinds of structure which + /// contain some sort of summary field, generally a count, before any + /// of the data. By emitting a placeholder first, the structure can + /// be emitted eagerly. + PlaceholderPosition addPlaceholder() { + assert(!Finished && "cannot add more values after finishing builder"); + assert(!Frozen && "cannot add values while subbuilder is active"); + Builder.Buffer.push_back(nullptr); + return Builder.Buffer.size() - 1; + } + + /// Add a placeholder, giving the expected type that will be filled in. + PlaceholderPosition addPlaceholderWithSize(mlir::Type expectedType); + + /// Fill a previously-added placeholder. + void fillPlaceholderWithInt(PlaceholderPosition position, + mlir::IntegerType type, uint64_t value, + bool isSigned = false) { + llvm_unreachable("NYI"); + // fillPlaceholder(position, llvm::ConstantInt::get(type, value, isSigned)); + } + + /// Fill a previously-added placeholder. + void fillPlaceholder(PlaceholderPosition position, mlir::Attribute value) { + assert(!Finished && "cannot change values after finishing builder"); + assert(!Frozen && "cannot add values while subbuilder is active"); + mlir::Attribute &slot = Builder.Buffer[position.Index]; + assert(slot == nullptr && "placeholder already filled"); + slot = value; + } + + /// Produce an address which will eventually point to the next + /// position to be filled. This is computed with an indexed + /// getelementptr rather than by computing offsets. + /// + /// The returned pointer will have type T*, where T is the given type. This + /// type can differ from the type of the actual element. + mlir::Attribute getAddrOfCurrentPosition(mlir::Type type); + + /// Produce an address which points to a position in the aggregate being + /// constructed. This is computed with an indexed getelementptr rather than by + /// computing offsets. + /// + /// The returned pointer will have type T*, where T is the given type. This + /// type can differ from the type of the actual element. + mlir::Attribute getAddrOfPosition(mlir::Type type, size_t position); + + llvm::ArrayRef getGEPIndicesToCurrentPosition( + llvm::SmallVectorImpl &indices) { + getGEPIndicesTo(indices, Builder.Buffer.size()); + return indices; + } + +protected: + mlir::Attribute finishArray(mlir::Type eltTy); + mlir::Attribute finishStruct(mlir::cir::StructType structTy); + +private: + void getGEPIndicesTo(llvm::SmallVectorImpl &indices, + size_t position) const; + + mlir::Attribute getRelativeOffset(mlir::IntegerType offsetType, + mlir::Attribute target); + + mlir::Attribute getRelativeOffsetToPosition(mlir::IntegerType offsetType, + mlir::Attribute target, + size_t position); + + CharUnits getOffsetFromGlobalTo(size_t index) const; +}; + +template +class ConstantAggregateBuilderTemplateBase + : public Traits::AggregateBuilderBase { + using super = typename Traits::AggregateBuilderBase; + +public: + using InitBuilder = typename Traits::InitBuilder; + using ArrayBuilder = typename Traits::ArrayBuilder; + using StructBuilder = typename Traits::StructBuilder; + using AggregateBuilderBase = typename Traits::AggregateBuilderBase; + +protected: + ConstantAggregateBuilderTemplateBase(InitBuilder &builder, + AggregateBuilderBase *parent) + : super(builder, parent) {} + + Impl &asImpl() { return *static_cast(this); } + +public: + ArrayBuilder beginArray(mlir::Type eltTy = nullptr) { + return ArrayBuilder(static_cast(this->Builder), this, eltTy); + } + + StructBuilder beginStruct(mlir::cir::StructType ty = nullptr) { + return StructBuilder(static_cast(this->Builder), this, ty); + } + + /// Given that this builder was created by beginning an array or struct + /// component on the given parent builder, finish the array/struct + /// component and add it to the parent. + /// + /// It is an intentional choice that the parent is passed in explicitly + /// despite it being redundant with information already kept in the + /// builder. This aids in readability by making it easier to find the + /// places that add components to a builder, as well as "bookending" + /// the sub-builder more explicitly. + void finishAndAddTo(AggregateBuilderBase &parent) { + assert(this->Parent == &parent && "adding to non-parent builder"); + parent.add(asImpl().finishImpl()); + } + + /// Given that this builder was created by beginning an array or struct + /// directly on a ConstantInitBuilder, finish the array/struct and + /// create a global variable with it as the initializer. + template + mlir::cir::GlobalOp finishAndCreateGlobal(As &&...args) { + assert(!this->Parent && "finishing non-root builder"); + return this->Builder.createGlobal(asImpl().finishImpl(), + std::forward(args)...); + } + + /// Given that this builder was created by beginning an array or struct + /// directly on a ConstantInitBuilder, finish the array/struct and + /// set it as the initializer of the given global variable. + void finishAndSetAsInitializer(mlir::cir::GlobalOp global) { + assert(!this->Parent && "finishing non-root builder"); + return this->Builder.setGlobalInitializer(global, asImpl().finishImpl()); + } + + /// Given that this builder was created by beginning an array or struct + /// directly on a ConstantInitBuilder, finish the array/struct and + /// return a future which can be used to install the initializer in + /// a global later. + /// + /// This is useful for allowing a finished initializer to passed to + /// an API which will build the global. However, the "future" preserves + /// a dependency on the original builder; it is an error to pass it aside. + ConstantInitFuture finishAndCreateFuture() { + assert(!this->Parent && "finishing non-root builder"); + return this->Builder.createFuture(asImpl().finishImpl()); + } +}; + +template +class ConstantArrayBuilderTemplateBase + : public ConstantAggregateBuilderTemplateBase { + using super = + ConstantAggregateBuilderTemplateBase; + +public: + using InitBuilder = typename Traits::InitBuilder; + using AggregateBuilderBase = typename Traits::AggregateBuilderBase; + +private: + mlir::Type EltTy; + + template friend class ConstantAggregateBuilderTemplateBase; + +protected: + ConstantArrayBuilderTemplateBase(InitBuilder &builder, + AggregateBuilderBase *parent, + mlir::Type eltTy) + : super(builder, parent), EltTy(eltTy) {} + +private: + /// Form an array constant from the values that have been added to this + /// builder. + mlir::Attribute finishImpl() { + return AggregateBuilderBase::finishArray(EltTy); + } +}; + +/// A template class designed to allow other frontends to +/// easily customize the builder classes used by ConstantInitBuilder, +/// and thus to extend the API to work with the abstractions they +/// prefer. This would probably not be necessary if C++ just +/// supported extension methods. +template +class ConstantStructBuilderTemplateBase + : public ConstantAggregateBuilderTemplateBase< + typename Traits::StructBuilder, Traits> { + using super = + ConstantAggregateBuilderTemplateBase; + +public: + using InitBuilder = typename Traits::InitBuilder; + using AggregateBuilderBase = typename Traits::AggregateBuilderBase; + +private: + mlir::cir::StructType StructTy; + + template friend class ConstantAggregateBuilderTemplateBase; + +protected: + ConstantStructBuilderTemplateBase(InitBuilder &builder, + AggregateBuilderBase *parent, + mlir::cir::StructType structTy) + : super(builder, parent), StructTy(structTy) { + if (structTy) { + llvm_unreachable("NYI"); + // this->Packed = structTy->isPacked(); + } + } + +public: + void setPacked(bool packed) { this->Packed = packed; } + + /// Use the given type for the struct if its element count is correct. + /// Don't add more elements after calling this. + void suggestType(mlir::cir::StructType structTy) { + if (this->size() == structTy.getNumElements()) { + StructTy = structTy; + } + } + +private: + /// Form an array constant from the values that have been added to this + /// builder. + mlir::Attribute finishImpl() { + return AggregateBuilderBase::finishStruct(StructTy); + } +}; + +/// A template class designed to allow other frontends to +/// easily customize the builder classes used by ConstantInitBuilder, +/// and thus to extend the API to work with the abstractions they +/// prefer. This would probably not be necessary if C++ just +/// supported extension methods. +template +class ConstantInitBuilderTemplateBase : public ConstantInitBuilderBase { +protected: + ConstantInitBuilderTemplateBase(CIRGenModule &CGM) + : ConstantInitBuilderBase(CGM) {} + +public: + using InitBuilder = typename Traits::InitBuilder; + using ArrayBuilder = typename Traits::ArrayBuilder; + using StructBuilder = typename Traits::StructBuilder; + + ArrayBuilder beginArray(mlir::Type eltTy = nullptr) { + return ArrayBuilder(static_cast(*this), nullptr, eltTy); + } + + StructBuilder beginStruct(mlir::cir::StructType structTy = nullptr) { + return StructBuilder(static_cast(*this), nullptr, structTy); + } +}; + +class ConstantInitBuilder; +class ConstantStructBuilder; +class ConstantArrayBuilder; + +struct ConstantInitBuilderTraits { + using InitBuilder = ConstantInitBuilder; + using AggregateBuilderBase = ConstantAggregateBuilderBase; + using ArrayBuilder = ConstantArrayBuilder; + using StructBuilder = ConstantStructBuilder; +}; + +/// The standard implementation of ConstantInitBuilder used in Clang. +class ConstantInitBuilder + : public ConstantInitBuilderTemplateBase { +public: + explicit ConstantInitBuilder(CIRGenModule &CGM) + : ConstantInitBuilderTemplateBase(CGM) {} +}; + +/// A helper class of ConstantInitBuilder, used for building constant +/// array initializers. +class ConstantArrayBuilder + : public ConstantArrayBuilderTemplateBase { + template friend class ConstantInitBuilderTemplateBase; + + // The use of explicit qualification is a GCC workaround. + template + friend class cir::ConstantAggregateBuilderTemplateBase; + + ConstantArrayBuilder(ConstantInitBuilder &builder, + ConstantAggregateBuilderBase *parent, mlir::Type eltTy) + : ConstantArrayBuilderTemplateBase(builder, parent, eltTy) {} +}; + +/// A helper class of ConstantInitBuilder, used for building constant +/// struct initializers. +class ConstantStructBuilder + : public ConstantStructBuilderTemplateBase { + template friend class ConstantInitBuilderTemplateBase; + + // The use of explicit qualification is a GCC workaround. + template + friend class cir::ConstantAggregateBuilderTemplateBase; + + ConstantStructBuilder(ConstantInitBuilder &builder, + ConstantAggregateBuilderBase *parent, + mlir::cir::StructType structTy) + : ConstantStructBuilderTemplateBase(builder, parent, structTy) {} +}; + +} // end namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/ConstantInitFuture.h b/clang/lib/CIR/CodeGen/ConstantInitFuture.h new file mode 100644 index 000000000000..97631d5da88c --- /dev/null +++ b/clang/lib/CIR/CodeGen/ConstantInitFuture.h @@ -0,0 +1,102 @@ +//===- ConstantInitFuture.h - "Future" constant initializers ----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class defines the ConstantInitFuture class. This is split out +// from ConstantInitBuilder.h in order to allow APIs to work with it +// without having to include that entire header. This is particularly +// important because it is often useful to be able to default-construct +// a future in, say, a default argument. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_CIR_CODEGEN_CONSTANTINITFUTURE_H +#define LLVM_CLANG_CIR_CODEGEN_CONSTANTINITFUTURE_H + +#include "mlir/IR/Attributes.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "llvm/ADT/PointerUnion.h" + +// Forward-declare ConstantInitBuilderBase and give it a +// PointerLikeTypeTraits specialization so that we can safely use it +// in a PointerUnion below. +namespace cir { +class ConstantInitBuilderBase; +} // namespace cir + +namespace llvm { +template <> struct PointerLikeTypeTraits<::cir::ConstantInitBuilderBase *> { + using T = ::cir::ConstantInitBuilderBase *; + + static inline void *getAsVoidPointer(T p) { return p; } + static inline T getFromVoidPointer(void *p) { return static_cast(p); } + static constexpr int NumLowBitsAvailable = 2; +}; +} // namespace llvm + +namespace cir { + +/// A "future" for a completed constant initializer, which can be passed +/// around independently of any sub-builders (but not the original parent). +class ConstantInitFuture { + using PairTy = llvm::PointerUnion; + + PairTy Data; + + friend class ConstantInitBuilderBase; + explicit ConstantInitFuture(ConstantInitBuilderBase *builder); + +public: + ConstantInitFuture() {} + + /// A future can be explicitly created from a fixed initializer. + explicit ConstantInitFuture(mlir::Attribute initializer) : Data(initializer) { + assert(initializer && "creating null future"); + } + + /// Is this future non-null? + explicit operator bool() const { return bool(Data); } + + /// Return the type of the initializer. + mlir::Type getType() const; + + /// Abandon this initializer. + void abandon(); + + /// Install the initializer into a global variable. This cannot + /// be called multiple times. + void installInGlobal(mlir::cir::GlobalOp global); + + void *getOpaqueValue() const { return Data.getOpaqueValue(); } + static ConstantInitFuture getFromOpaqueValue(void *value) { + ConstantInitFuture result; + result.Data = PairTy::getFromOpaqueValue(value); + return result; + } + static constexpr int NumLowBitsAvailable = + llvm::PointerLikeTypeTraits::NumLowBitsAvailable; +}; + +} // namespace cir + +namespace llvm { + +template <> struct PointerLikeTypeTraits<::cir::ConstantInitFuture> { + using T = ::cir::ConstantInitFuture; + + static inline void *getAsVoidPointer(T future) { + return future.getOpaqueValue(); + } + static inline T getFromVoidPointer(void *p) { + return T::getFromOpaqueValue(p); + } + static constexpr int NumLowBitsAvailable = T::NumLowBitsAvailable; +}; + +} // end namespace llvm + +#endif From 5bfc7af8dcf593ba37d98ceea95869a51a8b8192 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 12 Apr 2023 20:47:37 -0700 Subject: [PATCH 0863/2301] [CIR][CIRGen] Add vtable initializar and component - More skeleton for vtable building. --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 168 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenVTables.h | 17 +- 3 files changed, 179 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 2cc2f0edb74b..340080553383 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1450,8 +1450,8 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, [[maybe_unused]] auto components = builder.beginStruct(); llvm_unreachable("NYI"); - // CGVT.createVTableInitializer(components, VTLayout, RTTI, - // mlir::cir::GlobalLinkageKind::isLocalLinkage(Linkage)); + CGVT.createVTableInitializer(components, VTLayout, RTTI, + mlir::cir::isLocalLinkage(Linkage)); // components.finishAndSetAsInitializer(VTable); // // Set the correct linkage. diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 8cf1f8e4a470..a095ee589088 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -35,6 +35,8 @@ static bool UseRelativeLayout(const CIRGenModule &CGM) { CGM.getItaniumVTableContext().isRelativeLayout(); } +bool CIRGenVTables::useRelativeLayout() const { return UseRelativeLayout(CGM); } + mlir::Type CIRGenModule::getVTableComponentType() { mlir::Type ptrTy = builder.getInt8PtrTy(); if (UseRelativeLayout(*this)) @@ -155,6 +157,172 @@ void CIRGenVTables::GenerateClassData(const CXXRecordDecl *RD) { llvm_unreachable("NYI"); } +static void AddPointerLayoutOffset(const CIRGenModule &CGM, + ConstantArrayBuilder &builder, + CharUnits offset) { + llvm_unreachable("NYI"); + // builder.add(llvm::ConstantExpr::getIntToPtr( + // llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()), + // CGM.Int8PtrTy)); +} + +static void AddRelativeLayoutOffset(const CIRGenModule &CGM, + ConstantArrayBuilder &builder, + CharUnits offset) { + llvm_unreachable("NYI"); + // builder.add(llvm::ConstantInt::get(CGM.Int32Ty, offset.getQuantity())); +} + +void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, + const VTableLayout &layout, + unsigned componentIndex, + mlir::Attribute rtti, + unsigned &nextVTableThunkIndex, + unsigned vtableAddressPoint, + bool vtableHasLocalLinkage) { + auto &component = layout.vtable_components()[componentIndex]; + + auto addOffsetConstant = + useRelativeLayout() ? AddRelativeLayoutOffset : AddPointerLayoutOffset; + + switch (component.getKind()) { + case VTableComponent::CK_VCallOffset: + return addOffsetConstant(CGM, builder, component.getVCallOffset()); + + case VTableComponent::CK_VBaseOffset: + return addOffsetConstant(CGM, builder, component.getVBaseOffset()); + + case VTableComponent::CK_OffsetToTop: + return addOffsetConstant(CGM, builder, component.getOffsetToTop()); + + case VTableComponent::CK_RTTI: + if (useRelativeLayout()) { + llvm_unreachable("NYI"); + // return addRelativeComponent(builder, rtti, vtableAddressPoint, + // vtableHasLocalLinkage, + // /*isCompleteDtor=*/false); + } else { + llvm_unreachable("NYI"); + // return builder.add(llvm::ConstantExpr::getBitCast(rtti, + // CGM.Int8PtrTy)); + } + + case VTableComponent::CK_FunctionPointer: + case VTableComponent::CK_CompleteDtorPointer: + case VTableComponent::CK_DeletingDtorPointer: { + GlobalDecl GD = component.getGlobalDecl(); + + if (CGM.getLangOpts().CUDA) { + llvm_unreachable("NYI"); + } + + [[maybe_unused]] auto getSpecialVirtualFn = + [&](StringRef name) -> mlir::Attribute { + // FIXME(PR43094): When merging comdat groups, lld can select a local + // symbol as the signature symbol even though it cannot be accessed + // outside that symbol's TU. The relative vtables ABI would make + // __cxa_pure_virtual and __cxa_deleted_virtual local symbols, and + // depending on link order, the comdat groups could resolve to the one + // with the local symbol. As a temporary solution, fill these components + // with zero. We shouldn't be calling these in the first place anyway. + if (useRelativeLayout()) + llvm_unreachable("NYI"); + + // For NVPTX devices in OpenMP emit special functon as null pointers, + // otherwise linking ends up with unresolved references. + if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMP && + CGM.getTriple().isNVPTX()) + llvm_unreachable("NYI"); + + llvm_unreachable("NYI"); + // llvm::FunctionType *fnTy = + // llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); + // llvm::Constant *fn = cast( + // CGM.CreateRuntimeFunction(fnTy, name).getCallee()); + // if (auto f = dyn_cast(fn)) + // f->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); + // return llvm::ConstantExpr::getBitCast(fn, CGM.Int8PtrTy); + }; + + // mlir::Attribute fnPtr; + // Pure virtual member functions. + if (cast(GD.getDecl())->isPureVirtual()) { + llvm_unreachable("NYI"); + // if (!PureVirtualFn) + // PureVirtualFn = + // getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName()); + // fnPtr = PureVirtualFn; + + // Deleted virtual member functions. + } else if (cast(GD.getDecl())->isDeleted()) { + llvm_unreachable("NYI"); + // if (!DeletedVirtualFn) + // DeletedVirtualFn = + // getSpecialVirtualFn(CGM.getCXXABI().GetDeletedVirtualCallName()); + // fnPtr = DeletedVirtualFn; + + // Thunks. + } else if (nextVTableThunkIndex < layout.vtable_thunks().size() && + layout.vtable_thunks()[nextVTableThunkIndex].first == + componentIndex) { + llvm_unreachable("NYI"); + // auto &thunkInfo = layout.vtable_thunks()[nextVTableThunkIndex].second; + + // nextVTableThunkIndex++; + // fnPtr = maybeEmitThunk(GD, thunkInfo, /*ForVTable=*/true); + + // Otherwise we can use the method definition directly. + } else { + llvm_unreachable("NYI"); + // llvm::Type *fnTy = CGM.getTypes().GetFunctionTypeForVTable(GD); + // fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true); + } + + if (useRelativeLayout()) { + llvm_unreachable("NYI"); + } else { + llvm_unreachable("NYI"); + // return builder.add(llvm::ConstantExpr::getBitCast(fnPtr, + // CGM.Int8PtrTy)); + } + } + + case VTableComponent::CK_UnusedFunctionPointer: + if (useRelativeLayout()) + llvm_unreachable("NYI"); + else { + llvm_unreachable("NYI"); + // return builder.addNullPointer(CGM.Int8PtrTy); + } + } + + llvm_unreachable("Unexpected vtable component kind"); +} + +void CIRGenVTables::createVTableInitializer(ConstantStructBuilder &builder, + const VTableLayout &layout, + mlir::Attribute rtti, + bool vtableHasLocalLinkage) { + auto componentType = getVTableComponentType(); + + const auto &addressPoints = layout.getAddressPointIndices(); + unsigned nextVTableThunkIndex = 0; + for (unsigned vtableIndex = 0, endIndex = layout.getNumVTables(); + vtableIndex != endIndex; ++vtableIndex) { + auto vtableElem = builder.beginArray(componentType); + + size_t vtableStart = layout.getVTableOffset(vtableIndex); + size_t vtableEnd = vtableStart + layout.getVTableSize(vtableIndex); + for (size_t componentIndex = vtableStart; componentIndex < vtableEnd; + ++componentIndex) { + addVTableComponent(vtableElem, layout, componentIndex, rtti, + nextVTableThunkIndex, addressPoints[vtableIndex], + vtableHasLocalLinkage); + } + vtableElem.finishAndAddTo(builder); + } +} + /// Compute the required linkage of the vtable for the given class. /// /// Note that we only call this at the end of the translation unit. diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index b0ef3b28d1e5..e9673d194072 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H #define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H +#include "ConstantInitBuilder.h" #include "clang/AST/BaseSubobject.h" #include "clang/AST/CharUnits.h" #include "clang/AST/GlobalDecl.h" @@ -62,11 +63,11 @@ class CIRGenVTables { // const ThunkInfo &ThunkAdjustments, // bool ForVTable); - // void addVTableComponent(ConstantArrayBuilder &builder, - // const VTableLayout &layout, unsigned - // componentIndex, llvm::Constant *rtti, unsigned - // &nextVTableThunkIndex, unsigned - // vtableAddressPoint, bool vtableHasLocalLinkage); + void addVTableComponent(ConstantArrayBuilder &builder, + const VTableLayout &layout, unsigned componentIndex, + mlir::Attribute rtti, unsigned &nextVTableThunkIndex, + unsigned vtableAddressPoint, + bool vtableHasLocalLinkage); // /// Add a 32-bit offset to a component relative to the vtable when using // the @@ -98,9 +99,9 @@ class CIRGenVTables { public: /// Add vtable components for the given vtable layout to the given /// global initializer. - // void createVTableInitializer(ConstantStructBuilder &builder, - // const VTableLayout &layout, llvm::Constant - // *rtti, bool vtableHasLocalLinkage); + void createVTableInitializer(ConstantStructBuilder &builder, + const VTableLayout &layout, mlir::Attribute rtti, + bool vtableHasLocalLinkage); CIRGenVTables(CIRGenModule &CGM); From 441eba30384d408b6c860e4ee421826d59eda510 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 12 Apr 2023 21:27:44 -0700 Subject: [PATCH 0864/2301] [CIR][CIRGen] Fill in first two vtable components for simple A -> B inheritance Building up vtable, needs to be completed before testcase. --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 3 ++- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 14 ++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 340080553383..99394aad8786 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1449,11 +1449,12 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, ConstantInitBuilder builder(CGM); [[maybe_unused]] auto components = builder.beginStruct(); - llvm_unreachable("NYI"); CGVT.createVTableInitializer(components, VTLayout, RTTI, mlir::cir::isLocalLinkage(Linkage)); // components.finishAndSetAsInitializer(VTable); + llvm_unreachable("NYI"); + // // Set the correct linkage. // VTable->setLinkage(Linkage); diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index a095ee589088..43b96f645f5a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -157,16 +157,18 @@ void CIRGenVTables::GenerateClassData(const CXXRecordDecl *RD) { llvm_unreachable("NYI"); } -static void AddPointerLayoutOffset(const CIRGenModule &CGM, +static void AddPointerLayoutOffset(CIRGenModule &CGM, ConstantArrayBuilder &builder, CharUnits offset) { - llvm_unreachable("NYI"); + assert(offset.getQuantity() == 0 && "NYI"); + builder.add(mlir::cir::NullAttr::get(CGM.getBuilder().getContext(), + CGM.getBuilder().getInt8PtrTy())); // builder.add(llvm::ConstantExpr::getIntToPtr( // llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()), // CGM.Int8PtrTy)); } -static void AddRelativeLayoutOffset(const CIRGenModule &CGM, +static void AddRelativeLayoutOffset(CIRGenModule &CGM, ConstantArrayBuilder &builder, CharUnits offset) { llvm_unreachable("NYI"); @@ -202,9 +204,9 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, // vtableHasLocalLinkage, // /*isCompleteDtor=*/false); } else { - llvm_unreachable("NYI"); - // return builder.add(llvm::ConstantExpr::getBitCast(rtti, - // CGM.Int8PtrTy)); + assert(rtti.isa() && + "expected GlobalViewAttr"); + return builder.add(rtti); } case VTableComponent::CK_FunctionPointer: From 3c09b7f15e3f5a36a88e2cbae289ac7c67a33507 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 13 Apr 2023 15:44:08 -0700 Subject: [PATCH 0865/2301] [CIR][CIRGen] Fill in more gaps in order to complete vtable - Fix GetAddrOfFunction. - Add array and struct emission support for aggregates as part of ConstantInitBuilder. - Fill in more addVTableComponent. --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 6 ++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 12 +++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 17 ++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 20 +++-- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 7 +- clang/lib/CIR/CodeGen/CIRGenTypes.h | 9 ++- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 13 ++-- clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp | 78 ++++++++++++------- 8 files changed, 110 insertions(+), 52 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 47315015f6be..a0124cc0d0b8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -156,6 +156,12 @@ class CIRGenCXXABI { virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType Ty) = 0; + /// Returns true if the given destructor type should be emitted as a linkonce + /// delegating thunk, regardless of whether the dtor is defined in this TU or + /// not. + virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, + CXXDtorType DT) const = 0; + /// Get the address point of the vtable for the given base subobject. virtual mlir::Value getVTableAddressPoint(BaseSubobject Base, diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 198f4ab27462..d95fdba01ed7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -259,6 +259,18 @@ mlir::FunctionType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { resultType ? resultType : mlir::TypeRange()); } +mlir::FunctionType CIRGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { + const CXXMethodDecl *MD = cast(GD.getDecl()); + const FunctionProtoType *FPT = MD->getType()->getAs(); + + if (!isFuncTypeConvertible(FPT)) { + llvm_unreachable("NYI"); + // return llvm::StructType::get(getLLVMContext()); + } + + return GetFunctionType(GD); +} + CIRGenCallee CIRGenCallee::prepareConcreteCallee(CIRGenFunction &CGF) const { assert(!isVirtual() && "Virtual NYI"); return *this; diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 99394aad8786..3c1daa6027ab 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -125,6 +125,13 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { const CXXRecordDecl *RD) override; mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType Ty) override; + bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, + CXXDtorType DT) const override { + // Itanium does not emit any destructor variant as an inline thunk. + // Delegating may occur as an optimization, but all variants are either + // emitted with external linkage or as linkonce if they are inline and used. + return false; + } /// TODO(cir): seems like could be shared between LLVM IR and CIR codegen. bool mayNeedDestruction(const VarDecl *VD) const { @@ -1440,18 +1447,18 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, return; ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext(); - [[maybe_unused]] const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); - [[maybe_unused]] auto Linkage = CGM.getVTableLinkage(RD); - [[maybe_unused]] auto RTTI = CGM.getAddrOfRTTIDescriptor( + const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); + auto Linkage = CGM.getVTableLinkage(RD); + auto RTTI = CGM.getAddrOfRTTIDescriptor( CGM.getLoc(RD->getBeginLoc()), CGM.getASTContext().getTagDeclType(RD)); // Create and set the initializer. ConstantInitBuilder builder(CGM); - [[maybe_unused]] auto components = builder.beginStruct(); + auto components = builder.beginStruct(); CGVT.createVTableInitializer(components, VTLayout, RTTI, mlir::cir::isLocalLinkage(Linkage)); - // components.finishAndSetAsInitializer(VTable); + components.finishAndSetAsInitializer(VTable); llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 8f0b6b136689..576a61a9e7ac 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1402,8 +1402,6 @@ mlir::cir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, bool ForVTable, bool DontDefer, ForDefinition_t IsForDefinition) { - assert(!ForVTable && "NYI"); - assert(!cast(GD.getDecl())->isConsteval() && "consteval function should never be emitted"); @@ -1412,7 +1410,15 @@ CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, Ty = getTypes().ConvertType(FD->getType()); } - assert(!dyn_cast(GD.getDecl()) && "NYI"); + // Devirtualized destructor calls may come through here instead of via + // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead + // of the complete destructor when necessary. + if (const auto *DD = dyn_cast(GD.getDecl())) { + if (getTarget().getCXXABI().isMicrosoft() && + GD.getDtorType() == Dtor_Complete && + DD->getParent()->getNumVBases() == 0) + llvm_unreachable("NYI"); + } StringRef MangledName = getMangledName(GD); auto F = GetOrCreateCIRFunction(MangledName, Ty, GD, ForVTable, DontDefer, @@ -1600,7 +1606,6 @@ mlir::Location CIRGenModule::getLocForFunction(const clang::FunctionDecl *FD) { mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( StringRef MangledName, mlir::Type Ty, GlobalDecl GD, bool ForVTable, bool DontDefer, bool IsThunk, ForDefinition_t IsForDefinition) { - assert(!ForVTable && "NYI"); assert(!IsThunk && "NYI"); const auto *D = GD.getDecl(); @@ -1696,8 +1701,11 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // All MSVC dtors other than the base dtor are linkonce_odr and delegate to // each other bottoming out wiht the base dtor. Therefore we emit non-base // dtors on usage, even if there is no dtor definition in the TU. - if (D && isa(D)) - llvm_unreachable("NYI"); + if (isa_and_nonnull(D) && + getCXXABI().useThunkForDtorVariant(cast(D), + GD.getDtorType())) { + llvm_unreachable("NYI"); // addDeferredDeclToEmit(GD); + } // This is the first use or definition of a mangled name. If there is a // deferred decl with this name, remember that we need to emit it at the end diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index f1c207d50c09..d44f348ce32d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -701,11 +701,12 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( const CIRGenFunctionInfo &CIRGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { assert(!dyn_cast(GD.getDecl()) && - "This is reported as a FIXME in codegen"); + "This is reported as a FIXME in LLVM codegen"); const auto *FD = cast(GD.getDecl()); - assert(!isa(GD.getDecl()) && - !isa(GD.getDecl()) && "NYI"); + if (isa(GD.getDecl()) || + isa(GD.getDecl())) + return arrangeCXXStructorDeclaration(GD); return arrangeFunctionDeclaration(FD); } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 4ab86af12559..f19e2b2e5ae4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -120,8 +120,8 @@ class CIRGenTypes { cir::CIRGenBuilderTy &getBuilder() const { return Builder; } CIRGenModule &getModule() const { return CGM; } - /// isFuncTypeConvertible - Utility to check whether a function type can be - /// converted to a CIR type (i.e. doesn't depend on an incomplete tag type). + /// Utility to check whether a function type can be converted to a CIR type + /// (i.e. doesn't depend on an incomplete tag type). bool isFuncTypeConvertible(const clang::FunctionType *FT); bool isFuncParamTypeConvertible(clang::QualType Ty); @@ -187,6 +187,11 @@ class CIRGenTypes { mlir::FunctionType GetFunctionType(clang::GlobalDecl GD); + /// Get the LLVM function type for use in a vtable, given a CXXMethodDecl. If + /// the method to has an incomplete return type, and/or incomplete argument + /// types, this will return the opaque type. + mlir::FunctionType GetFunctionTypeForVTable(clang::GlobalDecl GD); + // The arrangement methods are split into three families: // - those meant to drive the signature and prologue/epilogue // of a function declaration or definition, diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 43b96f645f5a..b4ab988f8f0e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -246,7 +246,7 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, // return llvm::ConstantExpr::getBitCast(fn, CGM.Int8PtrTy); }; - // mlir::Attribute fnPtr; + mlir::cir::FuncOp fnPtr; // Pure virtual member functions. if (cast(GD.getDecl())->isPureVirtual()) { llvm_unreachable("NYI"); @@ -275,17 +275,16 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, // Otherwise we can use the method definition directly. } else { - llvm_unreachable("NYI"); - // llvm::Type *fnTy = CGM.getTypes().GetFunctionTypeForVTable(GD); - // fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true); + auto fnTy = CGM.getTypes().GetFunctionTypeForVTable(GD); + fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true); } if (useRelativeLayout()) { llvm_unreachable("NYI"); } else { - llvm_unreachable("NYI"); - // return builder.add(llvm::ConstantExpr::getBitCast(fnPtr, - // CGM.Int8PtrTy)); + return builder.add(mlir::cir::GlobalViewAttr::get( + CGM.getBuilder().getInt8PtrTy(), + mlir::FlatSymbolRefAttr::get(fnPtr.getSymNameAttr()))); } } diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp index 3158980051c4..6fd55a89fb99 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -271,42 +271,62 @@ ConstantAggregateBuilderBase::getOffsetFromGlobalTo(size_t end) const { return offset; } +// FIXME(cir): ideally we should use CIRGenBuilder for both static function +// bellow by threading ConstantAggregateBuilderBase through +// ConstantAggregateBuilderBase. +static mlir::cir::ConstArrayAttr getConstArray(mlir::Attribute attrs, + mlir::cir::ArrayType arrayTy) { + return mlir::cir::ConstArrayAttr::get(arrayTy, attrs); +} + mlir::Attribute ConstantAggregateBuilderBase::finishArray(mlir::Type eltTy) { - llvm_unreachable("NYI"); - // markFinished(); - - // auto &buffer = getBuffer(); - // assert((Begin < buffer.size() || (Begin == buffer.size() && eltTy)) && - // "didn't add any array elements without element type"); - // auto elts = llvm::ArrayRef(buffer).slice(Begin); - // if (!eltTy) - // eltTy = elts[0]->getType(); - // auto type = llvm::ArrayType::get(eltTy, elts.size()); - // auto constant = llvm::ConstantArray::get(type, elts); - // buffer.erase(buffer.begin() + Begin, buffer.end()); - // return constant; + markFinished(); + + auto &buffer = getBuffer(); + assert((Begin < buffer.size() || (Begin == buffer.size() && eltTy)) && + "didn't add any array elements without element type"); + auto elts = llvm::ArrayRef(buffer).slice(Begin); + if (!eltTy) { + llvm_unreachable("NYI"); + // Uncomment this once we get a testcase. + // auto tAttr = elts[0].dyn_cast(); + // assert(tAttr && "expected typed attribute"); + // eltTy = tAttr.getType(); + } + + auto constant = getConstArray( + mlir::ArrayAttr::get(eltTy.getContext(), elts), + mlir::cir::ArrayType::get(eltTy.getContext(), eltTy, elts.size())); + buffer.erase(buffer.begin() + Begin, buffer.end()); + return constant; } mlir::Attribute ConstantAggregateBuilderBase::finishStruct(mlir::cir::StructType ty) { - llvm_unreachable("NYI"); - // markFinished(); + markFinished(); - // auto &buffer = getBuffer(); - // auto elts = llvm::ArrayRef(buffer).slice(Begin); + auto &buffer = getBuffer(); + auto elts = llvm::ArrayRef(buffer).slice(Begin); - // if (ty == nullptr && elts.empty()) - // ty = mlir::cir::StructType::get(Builder.CGM.getLLVMContext(), {}, - // Packed); + if (ty == nullptr && elts.empty()) { + llvm_unreachable("NYI"); + // ty = mlir::cir::StructType::get(Builder.CGM.getLLVMContext(), {}, + // Packed); + } - // mlir::Attribute constant; - // if (ty) { - // assert(ty->isPacked() == Packed); - // constant = llvm::ConstantStruct::get(ty, elts); - // } else { - // constant = llvm::ConstantStruct::getAnon(elts, Packed); - // } + mlir::Attribute constant; + if (ty) { + llvm_unreachable("NYI"); + // assert(ty->isPacked() == Packed); + // constant = llvm::ConstantStruct::get(ty, elts); + } else { + assert(!Packed && "NYI"); + // constant = llvm::ConstantStruct::getAnon(elts, Packed); + // getAnonStruct(mlir::ArrayAttr::get(ty.getContext(), elts)) + llvm_unreachable("NYI"); + } - // buffer.erase(buffer.begin() + Begin, buffer.end()); - // return constant; + llvm_unreachable("NYI"); + buffer.erase(buffer.begin() + Begin, buffer.end()); + return constant; } From 1bd1fddc8358a0eaa566cd435d6de4f3264301e3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 13 Apr 2023 22:57:47 -0700 Subject: [PATCH 0866/2301] [CIR] Add #cir.const_struct attribute to handle global constant structs --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.h | 1 + .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 33 +++++++++++++++ clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 42 +++++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 3 ++ clang/test/CIR/IR/global.cir | 1 + 5 files changed, 80 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h index 4f4b0232689d..94599cadcd39 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -34,6 +34,7 @@ class RecordDecl; namespace mlir { namespace cir { class ArrayType; +class StructType; } // namespace cir } // namespace mlir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 54ff9e4510d3..96bb353b1825 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -92,6 +92,39 @@ def ConstArrayAttr : CIR_Attr<"ConstArray", "const_array", [TypedAttrInterface]> let genVerifyDecl = 1; } +//===----------------------------------------------------------------------===// +// ConstStructAttr +//===----------------------------------------------------------------------===// + +def ConstStructAttr : CIR_Attr<"ConstStruct", "const_struct", + [TypedAttrInterface]> { + let summary = "Represents a constant struct"; + let description = [{ + Effectively supports "struct-like" constants. It's must be built from + an `mlir::ArrayAttr `instance where each elements is a typed attribute + (`mlir::TypedAttribute`). + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type, + "ArrayAttr":$members); + + let builders = [ + AttrBuilderWithInferredContext<(ins "mlir::cir::StructType":$type, + "ArrayAttr":$members), [{ + return $_get(type.getContext(), type, members); + }]> + ]; + + let assemblyFormat = [{ + `<` + custom($type, $members) + `>` + }]; + + // let hasCustomAssemblyFormat = 1; + // let genVerifyDecl = 1; +} + //===----------------------------------------------------------------------===// // SignedOverflowBehaviorAttr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 1621bf38e87e..e0c88eb2fd21 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -19,6 +19,7 @@ #include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" +#include "mlir/IR/OpImplementation.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/TypeSwitch.h" @@ -26,6 +27,12 @@ // ClangIR holds back AST references when available. #include "clang/AST/Decl.h" +static void printConstStructMembers(mlir::AsmPrinter &p, mlir::Type type, + mlir::ArrayAttr members); +static mlir::ParseResult parseConstStructMembers(::mlir::AsmParser &parser, + mlir::Type &type, + mlir::ArrayAttr &members); + #define GET_ATTRDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" @@ -54,6 +61,41 @@ void CIRDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const { llvm_unreachable("unexpected CIR type kind"); } +static void printConstStructMembers(mlir::AsmPrinter &p, mlir::Type type, + mlir::ArrayAttr members) { + p << members; +} + +static ParseResult parseConstStructMembers(::mlir::AsmParser &parser, + mlir::Type &type, + mlir::ArrayAttr &members) { + SmallVector elts; + SmallVector tys; + if (parser + .parseCommaSeparatedList( + AsmParser::Delimiter::Braces, + [&]() { + Attribute attr; + if (parser.parseAttribute(attr).succeeded()) { + elts.push_back(attr); + if (auto tyAttr = attr.dyn_cast()) { + tys.push_back(tyAttr.getType()); + return success(); + } + parser.emitError(parser.getCurrentLocation(), + "expected a typed attribute"); + } + return failure(); + }) + .failed()) + return failure(); + + auto *ctx = parser.getContext(); + members = mlir::ArrayAttr::get(ctx, elts); + type = mlir::cir::StructType::get(ctx, tys, "", /*body=*/true); + return success(); +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a05a813716d8..a20cb3de244b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -183,6 +183,8 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return success(); if (attrType.isa()) return success(); + if (attrType.isa()) + return success(); assert(attrType.isa() && "What else could we be looking at here?"); return op->emitOpError("global with type ") @@ -1618,6 +1620,7 @@ mlir::OpTrait::impl::verifySameFirstOperandAndResultType(Operation *op) { //===----------------------------------------------------------------------===// // CIR attributes +// FIXME: move all of these to CIRAttrs.cpp //===----------------------------------------------------------------------===// LogicalResult mlir::cir::ConstArrayAttr::verify( diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index d6a8933f842e..8fed3d5bca4c 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -4,6 +4,7 @@ module { cir.global external @a = 3 : i32 cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> cir.global external @b = #cir.const_array<"example\00" : !cir.array> + cir.global external @rgb2 = #cir.const_struct<{0 : i8, 5 : i64, #cir.null : !cir.ptr}> : !cir.struct<"", i8, i64, !cir.ptr> cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} cir.global "private" internal @c : i32 cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} From 52b6a4fcf70e9dbb8da1c9d30d50bcaa11e9487d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 14 Apr 2023 00:23:43 -0700 Subject: [PATCH 0867/2301] [CIR][CIRGen] One more piece on emitVTableDefinitions There's one remaining bug on addDeferredVTable size increasing, which we should address before completing this. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 ++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 87 +++++++++---------- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 8 ++ clang/lib/CIR/CodeGen/CIRGenModule.h | 5 ++ clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 6 +- clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp | 28 ++++-- clang/lib/CIR/CodeGen/ConstantInitBuilder.h | 25 +++--- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 8 files changed, 97 insertions(+), 69 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 5ad1f5ac424c..268b6d968999 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1123,6 +1123,12 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { bool hasAvailableExternallyLinkage() { return mlir::cir::isAvailableExternallyLinkage(getLinkage()); } + bool isDeclarationForLinker() { + if (hasAvailableExternallyLinkage()) + return true; + + return isDeclaration(); + } }]; let skipDefaultBuilders = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 3c1daa6027ab..3b4f9731feb4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1460,51 +1460,50 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, mlir::cir::isLocalLinkage(Linkage)); components.finishAndSetAsInitializer(VTable); - llvm_unreachable("NYI"); + // Set the correct linkage. + VTable.setLinkage(Linkage); + + if (CGM.supportsCOMDAT() && mlir::cir::isWeakForLinker(Linkage)) { + assert(!UnimplementedFeature::setComdat()); + } + + // Set the right visibility. + CGM.setGVProperties(VTable, RD); + + // If this is the magic class __cxxabiv1::__fundamental_type_info, + // we will emit the typeinfo for the fundamental types. This is the + // same behaviour as GCC. + const DeclContext *DC = RD->getDeclContext(); + if (RD->getIdentifier() && + RD->getIdentifier()->isStr("__fundamental_type_info") && + isa(DC) && cast(DC)->getIdentifier() && + cast(DC)->getIdentifier()->isStr("__cxxabiv1") && + DC->getParent()->isTranslationUnit()) { + llvm_unreachable("NYI"); + // EmitFundamentalRTTIDescriptors(RD); + } + + // Always emit type metadata on non-available_externally definitions, and on + // available_externally definitions if we are performing whole program + // devirtualization. For WPD we need the type metadata on all vtable + // definitions to ensure we associate derived classes with base classes + // defined in headers but with a strong definition only in a shared + // library. + if (!VTable.isDeclarationForLinker() || + CGM.getCodeGenOpts().WholeProgramVTables) { + CGM.buildVTableTypeMetadata(RD, VTable, VTLayout); + // For available_externally definitions, add the vtable to + // @llvm.compiler.used so that it isn't deleted before whole program + // analysis. + if (VTable.isDeclarationForLinker()) { + llvm_unreachable("NYI"); + assert(CGM.getCodeGenOpts().WholeProgramVTables); + assert(!UnimplementedFeature::addCompilerUsedGlobal()); + } + } - // // Set the correct linkage. - // VTable->setLinkage(Linkage); - - // if (CGM.supportsCOMDAT() && VTable->isWeakForLinker()) - // VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName())); - - // // Set the right visibility. - // CGM.setGVProperties(VTable, RD); - - // // If this is the magic class __cxxabiv1::__fundamental_type_info, - // // we will emit the typeinfo for the fundamental types. This is the - // // same behaviour as GCC. - // const DeclContext *DC = RD->getDeclContext(); - // if (RD->getIdentifier() && - // RD->getIdentifier()->isStr("__fundamental_type_info") && - // isa(DC) && cast(DC)->getIdentifier() && - // cast(DC)->getIdentifier()->isStr("__cxxabiv1") && - // DC->getParent()->isTranslationUnit()) - // EmitFundamentalRTTIDescriptors(RD); - - // // Always emit type metadata on non-available_externally definitions, and - // on - // // available_externally definitions if we are performing whole program - // // devirtualization. For WPD we need the type metadata on all vtable - // // definitions to ensure we associate derived classes with base classes - // // defined in headers but with a strong definition only in a shared - // library. if (!VTable->isDeclarationForLinker() || - // CGM.getCodeGenOpts().WholeProgramVTables) { - // CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout); - // // For available_externally definitions, add the vtable to - // // @llvm.compiler.used so that it isn't deleted before whole program - // // analysis. - // if (VTable->isDeclarationForLinker()) { - // assert(CGM.getCodeGenOpts().WholeProgramVTables); - // CGM.addCompilerUsedGlobal(VTable); - // } - // } - - // if (VTContext.isRelativeLayout()) { - // CGVT.RemoveHwasanMetadata(VTable); - // if (!VTable->isDSOLocal()) - // CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName()); - // } + if (VTContext.isRelativeLayout()) + llvm_unreachable("NYI"); } /// What sort of uniqueness rules should we use for the RTTI for the diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 576a61a9e7ac..27af052dbc30 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2103,6 +2103,14 @@ bool CIRGenModule::shouldOpportunisticallyEmitVTables() { return codeGenOpts.OptimizationLevel > 0; } +void CIRGenModule::buildVTableTypeMetadata(const CXXRecordDecl *RD, + mlir::cir::GlobalOp VTable, + const VTableLayout &VTLayout) { + if (!getCodeGenOpts().LTOUnit) + return; + llvm_unreachable("NYI"); +} + mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc, QualType Ty, bool ForEH) { // Return a bogus pointer if RTTI is disabled, unless it's for EH. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 5736aa2c8c78..4f7737e578ec 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -230,6 +230,11 @@ class CIRGenModule { /// of the given class. mlir::cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *RD); + /// Emit type metadata for the given vtable using the given layout. + void buildVTableTypeMetadata(const CXXRecordDecl *RD, + mlir::cir::GlobalOp VTable, + const VTableLayout &VTLayout); + /// Get the address of the RTTI descriptor for the given type. mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType Ty, bool ForEH = false); diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index b4ab988f8f0e..0da30c71ec16 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -154,7 +154,6 @@ void CIRGenVTables::GenerateClassData(const CXXRecordDecl *RD) { llvm_unreachable("NYI"); CGM.getCXXABI().emitVTableDefinitions(*this, RD); - llvm_unreachable("NYI"); } static void AddPointerLayoutOffset(CIRGenModule &CGM, @@ -163,9 +162,6 @@ static void AddPointerLayoutOffset(CIRGenModule &CGM, assert(offset.getQuantity() == 0 && "NYI"); builder.add(mlir::cir::NullAttr::get(CGM.getBuilder().getContext(), CGM.getBuilder().getInt8PtrTy())); - // builder.add(llvm::ConstantExpr::getIntToPtr( - // llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()), - // CGM.Int8PtrTy)); } static void AddRelativeLayoutOffset(CIRGenModule &CGM, @@ -320,7 +316,7 @@ void CIRGenVTables::createVTableInitializer(ConstantStructBuilder &builder, nextVTableThunkIndex, addressPoints[vtableIndex], vtableHasLocalLinkage); } - vtableElem.finishAndAddTo(builder); + vtableElem.finishAndAddTo(rtti.getContext(), builder); } } diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp index 6fd55a89fb99..e28f5292e31f 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -87,11 +87,10 @@ mlir::cir::GlobalOp ConstantInitBuilderBase::createGlobal( void ConstantInitBuilderBase::setGlobalInitializer( mlir::cir::GlobalOp GV, mlir::Attribute initializer) { - // GV->setInitializer(initializer); + GV.setInitialValueAttr(initializer); - // if (!SelfReferences.empty()) - // resolveSelfReferences(GV); - llvm_unreachable("NYI"); + if (!SelfReferences.empty()) + resolveSelfReferences(GV); } void ConstantInitBuilderBase::resolveSelfReferences(mlir::cir::GlobalOp GV) { @@ -278,6 +277,19 @@ static mlir::cir::ConstArrayAttr getConstArray(mlir::Attribute attrs, mlir::cir::ArrayType arrayTy) { return mlir::cir::ConstArrayAttr::get(arrayTy, attrs); } +static mlir::Attribute getAnonConstStruct(mlir::ArrayAttr arrayAttr, + bool packed = false) { + assert(!packed && "NYI"); + llvm::SmallVector members; + for (auto &f : arrayAttr) { + auto ta = f.dyn_cast(); + assert(ta && "expected typed attribute member"); + members.push_back(ta.getType()); + } + auto sTy = mlir::cir::StructType::get(arrayAttr.getContext(), members, "", + /*body=*/true); + return mlir::cir::ConstStructAttr::get(sTy, arrayAttr); +} mlir::Attribute ConstantAggregateBuilderBase::finishArray(mlir::Type eltTy) { markFinished(); @@ -302,7 +314,8 @@ mlir::Attribute ConstantAggregateBuilderBase::finishArray(mlir::Type eltTy) { } mlir::Attribute -ConstantAggregateBuilderBase::finishStruct(mlir::cir::StructType ty) { +ConstantAggregateBuilderBase::finishStruct(mlir::MLIRContext *ctx, + mlir::cir::StructType ty) { markFinished(); auto &buffer = getBuffer(); @@ -321,12 +334,9 @@ ConstantAggregateBuilderBase::finishStruct(mlir::cir::StructType ty) { // constant = llvm::ConstantStruct::get(ty, elts); } else { assert(!Packed && "NYI"); - // constant = llvm::ConstantStruct::getAnon(elts, Packed); - // getAnonStruct(mlir::ArrayAttr::get(ty.getContext(), elts)) - llvm_unreachable("NYI"); + constant = getAnonConstStruct(mlir::ArrayAttr::get(ctx, elts), Packed); } - llvm_unreachable("NYI"); buffer.erase(buffer.begin() + Begin, buffer.end()); return constant; } diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h index 96cedbe1d66a..2cff288bd8f4 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -326,7 +326,8 @@ class ConstantAggregateBuilderBase { protected: mlir::Attribute finishArray(mlir::Type eltTy); - mlir::Attribute finishStruct(mlir::cir::StructType structTy); + mlir::Attribute finishStruct(mlir::MLIRContext *ctx, + mlir::cir::StructType structTy); private: void getGEPIndicesTo(llvm::SmallVectorImpl &indices, @@ -378,18 +379,19 @@ class ConstantAggregateBuilderTemplateBase /// builder. This aids in readability by making it easier to find the /// places that add components to a builder, as well as "bookending" /// the sub-builder more explicitly. - void finishAndAddTo(AggregateBuilderBase &parent) { + void finishAndAddTo(mlir::MLIRContext *ctx, AggregateBuilderBase &parent) { assert(this->Parent == &parent && "adding to non-parent builder"); - parent.add(asImpl().finishImpl()); + parent.add(asImpl().finishImpl(ctx)); } /// Given that this builder was created by beginning an array or struct /// directly on a ConstantInitBuilder, finish the array/struct and /// create a global variable with it as the initializer. template - mlir::cir::GlobalOp finishAndCreateGlobal(As &&...args) { + mlir::cir::GlobalOp finishAndCreateGlobal(mlir::MLIRContext *ctx, + As &&...args) { assert(!this->Parent && "finishing non-root builder"); - return this->Builder.createGlobal(asImpl().finishImpl(), + return this->Builder.createGlobal(asImpl().finishImpl(ctx), std::forward(args)...); } @@ -398,7 +400,8 @@ class ConstantAggregateBuilderTemplateBase /// set it as the initializer of the given global variable. void finishAndSetAsInitializer(mlir::cir::GlobalOp global) { assert(!this->Parent && "finishing non-root builder"); - return this->Builder.setGlobalInitializer(global, asImpl().finishImpl()); + return this->Builder.setGlobalInitializer( + global, asImpl().finishImpl(global.getContext())); } /// Given that this builder was created by beginning an array or struct @@ -409,9 +412,9 @@ class ConstantAggregateBuilderTemplateBase /// This is useful for allowing a finished initializer to passed to /// an API which will build the global. However, the "future" preserves /// a dependency on the original builder; it is an error to pass it aside. - ConstantInitFuture finishAndCreateFuture() { + ConstantInitFuture finishAndCreateFuture(mlir::MLIRContext *ctx) { assert(!this->Parent && "finishing non-root builder"); - return this->Builder.createFuture(asImpl().finishImpl()); + return this->Builder.createFuture(asImpl().finishImpl(ctx)); } }; @@ -441,7 +444,7 @@ class ConstantArrayBuilderTemplateBase private: /// Form an array constant from the values that have been added to this /// builder. - mlir::Attribute finishImpl() { + mlir::Attribute finishImpl([[maybe_unused]] mlir::MLIRContext *ctx) { return AggregateBuilderBase::finishArray(EltTy); } }; @@ -493,8 +496,8 @@ class ConstantStructBuilderTemplateBase private: /// Form an array constant from the values that have been added to this /// builder. - mlir::Attribute finishImpl() { - return AggregateBuilderBase::finishStruct(StructTy); + mlir::Attribute finishImpl(mlir::MLIRContext *ctx) { + return AggregateBuilderBase::finishStruct(ctx, StructTy); } }; diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index d4d6e6c51d80..953ed576d951 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -42,6 +42,7 @@ struct UnimplementedFeature { static bool setGlobalVisibility() { return false; } static bool hiddenVisibility() { return false; } static bool protectedVisibility() { return false; } + static bool addCompilerUsedGlobal() { return false; } // Sanitizers static bool reportGlobalToASan() { return false; } From da7850cd4164b2c178d10925906fff0df0168319 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Apr 2023 13:06:55 -0700 Subject: [PATCH 0868/2301] [CIR][CIRGen] Fix silly bug for properly accessing/populating cached vtable --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 3b4f9731feb4..2844a0a224f9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -481,7 +481,7 @@ mlir::cir::GlobalOp CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) { assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); - auto vtable = VTables[RD]; + mlir::cir::GlobalOp &vtable = VTables[RD]; if (vtable) return vtable; From a78fb6c5e5666edb6d677a710fa142405f5ecd34 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Apr 2023 14:10:30 -0700 Subject: [PATCH 0869/2301] [CIR][CIRGen] Dtors: teach getAddrAndTypeOfCXXStructor about it and compute linkage based on CXXABI --- clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp | 7 +++++++ clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 4 ++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 11 +++++++++-- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp index 5ab994227951..0b8500eb12b4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp @@ -65,3 +65,10 @@ void CIRGenCXXABI::buildThisParam(CIRGenFunction &CGF, llvm_unreachable("NYI"); } } + +mlir::cir::GlobalLinkageKind CIRGenCXXABI::getCXXDestructorLinkage( + GVALinkage Linkage, const CXXDestructorDecl *Dtor, CXXDtorType DT) const { + // Delegate back to CGM by default. + return CGM.getCIRLinkageForDeclarator(Dtor, Linkage, + /*IsConstantVariable=*/false); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index a0124cc0d0b8..712bcc02129d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -162,6 +162,10 @@ class CIRGenCXXABI { virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, CXXDtorType DT) const = 0; + virtual mlir::cir::GlobalLinkageKind + getCXXDestructorLinkage(GVALinkage Linkage, const CXXDestructorDecl *Dtor, + CXXDtorType DT) const; + /// Get the address point of the vtable for the given base subobject. virtual mlir::Value getVTableAddressPoint(BaseSubobject Base, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 27af052dbc30..53a4c6292d84 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1349,7 +1349,7 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { GVALinkage Linkage = astCtx.GetGVALinkageForFunction(D); if (const auto *Dtor = dyn_cast(D)) - assert(0 && "NYI"); + return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType()); if (isa(D) && cast(D)->isInheritingConstructor() && @@ -1383,7 +1383,14 @@ CIRGenModule::getAddrAndTypeOfCXXStructor(GlobalDecl GD, ForDefinition_t IsForDefinition) { auto *MD = cast(GD.getDecl()); - assert(!isa(MD) && "Destructors NYI"); + if (isa(MD)) { + // Always alias equivalent complete destructors to base destructors in the + // MS ABI. + if (getTarget().getCXXABI().isMicrosoft() && + GD.getDtorType() == Dtor_Complete && + MD->getParent()->getNumVBases() == 0) + llvm_unreachable("NYI"); + } if (!FnType) { if (!FnInfo) From 6fc1b8d5071388a7a447a7a8a730e5986422fa71 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Apr 2023 14:30:00 -0700 Subject: [PATCH 0870/2301] [CIR][CIRGen] VTables: add skeleton for handling thunks --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 18 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenVTables.h | 4 ++-- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 53a4c6292d84..f27bbc5f1756 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -872,7 +872,7 @@ void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { buildGlobalFunctionDefinition(GD, Op); if (Method->isVirtual()) - llvm_unreachable("NYI"); + getVTables().buildThunks(GD); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 0da30c71ec16..d18de0ef8241 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -410,3 +410,21 @@ CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { llvm_unreachable("Invalid TemplateSpecializationKind!"); } + +void CIRGenVTables::buildThunks(GlobalDecl GD) { + const CXXMethodDecl *MD = + cast(GD.getDecl())->getCanonicalDecl(); + + // We don't need to generate thunks for the base destructor. + if (isa(MD) && GD.getDtorType() == Dtor_Base) + return; + + const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector = + VTContext->getThunkInfo(GD); + + if (!ThunkInfoVector) + return; + + for ([[maybe_unused]] const ThunkInfo &Thunk : *ThunkInfoVector) + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index e9673d194072..754490674445 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -146,8 +146,8 @@ class CIRGenVTables { // llvm::GlobalVariable::LinkageTypes Linkage, // const CXXRecordDecl *RD); - // /// EmitThunks - Emit the associated thunks for the given global decl. - // void EmitThunks(GlobalDecl GD); + /// Emit the associated thunks for the given global decl. + void buildThunks(GlobalDecl GD); /// Generate all the class data required to be generated upon definition of a /// KeyFunction. This includes the vtable, the RTTI data structure (if RTTI From 378f1a724a2fafcb12656f4dd797ae1f4ed1cc2b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Apr 2023 14:30:25 -0700 Subject: [PATCH 0871/2301] [CIR][CIRGen] Dtors: layout dtor body, hook it up with codegen and add RunCleanupScope skeleton --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 121 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 97 ++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 106 ++++++++++++++- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 2 +- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 6 files changed, 326 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 67f3861acb45..c88401cbb636 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -943,4 +943,125 @@ void CIRGenFunction::destroyCXXObject(CIRGenFunction &CGF, Address addr, llvm_unreachable("NYI"); // CGF.buildCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, // /*Delegating=*/false, addr, type); +} + +/// Emits the body of the current destructor. +void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { + const CXXDestructorDecl *Dtor = cast(CurGD.getDecl()); + CXXDtorType DtorType = CurGD.getDtorType(); + + // For an abstract class, non-base destructors are never used (and can't + // be emitted in general, because vbase dtors may not have been validated + // by Sema), but the Itanium ABI doesn't make them optional and Clang may + // in fact emit references to them from other compilations, so emit them + // as functions containing a trap instruction. + if (DtorType != Dtor_Base && Dtor->getParent()->isAbstract()) { + llvm_unreachable("NYI"); + } + + Stmt *Body = Dtor->getBody(); + if (Body) + assert(!UnimplementedFeature::incrementProfileCounter()); + + // The call to operator delete in a deleting destructor happens + // outside of the function-try-block, which means it's always + // possible to delegate the destructor body to the complete + // destructor. Do so. + if (DtorType == Dtor_Deleting) { + RunCleanupsScope DtorEpilogue(*this); + llvm_unreachable("NYI"); + // EnterDtorCleanups(Dtor, Dtor_Deleting); + // if (HaveInsertPoint()) { + // QualType ThisTy = Dtor->getThisObjectType(); + // EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, + // /*Delegating=*/false, LoadCXXThisAddress(), + // ThisTy); + // } + // return; + } + + // If the body is a function-try-block, enter the try before + // anything else. + bool isTryBody = (Body && isa(Body)); + if (isTryBody) { + llvm_unreachable("NYI"); + // EnterCXXTryStmt(*cast(Body), true); + } + assert(!UnimplementedFeature::emitAsanPrologueOrEpilogue()); + + // Enter the epilogue cleanups. + llvm_unreachable("NYI"); + // RunCleanupsScope DtorEpilogue(*this); + + // If this is the complete variant, just invoke the base variant; + // the epilogue will destruct the virtual bases. But we can't do + // this optimization if the body is a function-try-block, because + // we'd introduce *two* handler blocks. In the Microsoft ABI, we + // always delegate because we might not have a definition in this TU. + switch (DtorType) { + case Dtor_Comdat: + llvm_unreachable("not expecting a COMDAT"); + case Dtor_Deleting: + llvm_unreachable("already handled deleting case"); + + case Dtor_Complete: + llvm_unreachable("NYI"); + // assert((Body || getTarget().getCXXABI().isMicrosoft()) && + // "can't emit a dtor without a body for non-Microsoft ABIs"); + + // // Enter the cleanup scopes for virtual bases. + // EnterDtorCleanups(Dtor, Dtor_Complete); + + // if (!isTryBody) { + // QualType ThisTy = Dtor->getThisObjectType(); + // EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, + // /*Delegating=*/false, LoadCXXThisAddress(), + // ThisTy); + // break; + // } + + // Fallthrough: act like we're in the base variant. + [[fallthrough]]; + + case Dtor_Base: + llvm_unreachable("NYI"); + assert(Body); + + // // Enter the cleanup scopes for fields and non-virtual bases. + // EnterDtorCleanups(Dtor, Dtor_Base); + + // // Initialize the vtable pointers before entering the body. + // if (!CanSkipVTablePointerInitialization(*this, Dtor)) { + // // Insert the llvm.launder.invariant.group intrinsic before + // initializing + // // the vptrs to cancel any previous assumptions we might have made. + // if (CGM.getCodeGenOpts().StrictVTablePointers && + // CGM.getCodeGenOpts().OptimizationLevel > 0) + // CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis()); + // InitializeVTablePointers(Dtor->getParent()); + // } + + // if (isTryBody) + // EmitStmt(cast(Body)->getTryBlock()); + // else if (Body) + // EmitStmt(Body); + // else { + // assert(Dtor->isImplicit() && "bodyless dtor not implicit"); + // // nothing to do besides what's in the epilogue + // } + // // -fapple-kext must inline any call to this dtor into + // // the caller's body. + // if (getLangOpts().AppleKext) + // CurFn->addFnAttr(llvm::Attribute::AlwaysInline); + + // break; + } + + // Jump out through the epilogue cleanups. + llvm_unreachable("NYI"); + // DtorEpilogue.ForceCleanup(); + + // Exit the try if applicable. + if (isTryBody) + llvm_unreachable("NYI"); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index dcaf39b7833a..28ae5fb7756a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -51,6 +51,103 @@ void CIRGenFunction::buildCXXTemporary(const CXXTemporary *Temporary, /*useEHCleanup*/ true); } +void CIRGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) { + // Set that as the active flag in the cleanup. + EHCleanupScope &cleanup = cast(*EHStack.begin()); + assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?"); + cleanup.setActiveFlag(ActiveFlag); + + if (cleanup.isNormalCleanup()) + cleanup.setTestFlagInNormalCleanup(); + if (cleanup.isEHCleanup()) + cleanup.setTestFlagInEHCleanup(); +} + +/// Pops a cleanup block. If the block includes a normal cleanup, the +/// current insertion point is threaded through the cleanup, as are +/// any branch fixups on the cleanup. +void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { + assert(!EHStack.empty() && "cleanup stack is empty!"); + assert(isa(*EHStack.begin()) && "top not a cleanup!"); + [[maybe_unused]] EHCleanupScope &Scope = + cast(*EHStack.begin()); + assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); + + // Remember activation information. + [[maybe_unused]] bool IsActive = Scope.isActive(); + [[maybe_unused]] Address NormalActiveFlag = + Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() + : Address::invalid(); + [[maybe_unused]] Address EHActiveFlag = Scope.shouldTestFlagInEHCleanup() + ? Scope.getActiveFlag() + : Address::invalid(); + llvm_unreachable("NYI"); +} + +/// Pops cleanup blocks until the given savepoint is reached. +void CIRGenFunction::PopCleanupBlocks( + EHScopeStack::stable_iterator Old, + std::initializer_list ValuesToReload) { + assert(Old.isValid()); + + bool HadBranches = false; + while (EHStack.stable_begin() != Old) { + EHCleanupScope &Scope = cast(*EHStack.begin()); + HadBranches |= Scope.hasBranches(); + + // As long as Old strictly encloses the scope's enclosing normal + // cleanup, we're going to emit another normal cleanup which + // fallthrough can propagate through. + bool FallThroughIsBranchThrough = + Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); + + PopCleanupBlock(FallThroughIsBranchThrough); + } + + // If we didn't have any branches, the insertion point before cleanups must + // dominate the current insertion point and we don't need to reload any + // values. + if (!HadBranches) + return; + + llvm_unreachable("NYI"); +} + +/// Pops cleanup blocks until the given savepoint is reached, then add the +/// cleanups from the given savepoint in the lifetime-extended cleanups stack. +void CIRGenFunction::PopCleanupBlocks( + EHScopeStack::stable_iterator Old, size_t OldLifetimeExtendedSize, + std::initializer_list ValuesToReload) { + PopCleanupBlocks(Old, ValuesToReload); + + // Move our deferred cleanups onto the EH stack. + for (size_t I = OldLifetimeExtendedSize, + E = LifetimeExtendedCleanupStack.size(); + I != E; + /**/) { + // Alignment should be guaranteed by the vptrs in the individual cleanups. + assert((I % alignof(LifetimeExtendedCleanupHeader) == 0) && + "misaligned cleanup stack entry"); + + LifetimeExtendedCleanupHeader &Header = + reinterpret_cast( + LifetimeExtendedCleanupStack[I]); + I += sizeof(Header); + + EHStack.pushCopyOfCleanup( + Header.getKind(), &LifetimeExtendedCleanupStack[I], Header.getSize()); + I += Header.getSize(); + + if (Header.isConditional()) { + Address ActiveFlag = + reinterpret_cast
(LifetimeExtendedCleanupStack[I]); + initFullExprCleanupWithFlag(ActiveFlag); + I += sizeof(ActiveFlag); + } + } + LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize); +} + //===----------------------------------------------------------------------===// // EHScopeStack //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index b65bae9c6b88..82e1680cb5a6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -488,7 +488,7 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, // Generate the body of the function. // TODO: PGO.assignRegionCounters if (isa(FD)) - llvm_unreachable("NYI"); + buildDestructorBody(Args); else if (isa(FD)) buildConstructorBody(Args); else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice && diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index fbe94402623c..9fef87017374 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -511,6 +511,7 @@ class CIRGenFunction { /// Tracks function scope overall cleanup handling. EHScopeStack EHStack; + llvm::SmallVector LifetimeExtendedCleanupStack; /// A mapping from NRVO variables to the flags used to indicate /// when the NRVO has been applied to this variable. @@ -1089,8 +1090,8 @@ class CIRGenFunction { void buildCtorPrologue(const clang::CXXConstructorDecl *CD, clang::CXXCtorType Type, FunctionArgList &Args); - void buildConstructorBody(FunctionArgList &Args); + void buildDestructorBody(FunctionArgList &Args); static bool IsConstructorDelegationValid(const clang::CXXConstructorDecl *Ctor); @@ -1309,6 +1310,38 @@ class CIRGenFunction { /// /// Cleanups /// -------- + + /// Header for data within LifetimeExtendedCleanupStack. + struct LifetimeExtendedCleanupHeader { + /// The size of the following cleanup object. + unsigned Size; + /// The kind of cleanup to push: a value from the CleanupKind enumeration. + unsigned Kind : 31; + /// Whether this is a conditional cleanup. + unsigned IsConditional : 1; + + size_t getSize() const { return Size; } + CleanupKind getKind() const { return (CleanupKind)Kind; } + bool isConditional() const { return IsConditional; } + }; + + /// Takes the old cleanup stack size and emits the cleanup blocks + /// that have been added. + void + PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, + std::initializer_list ValuesToReload = {}); + + /// Takes the old cleanup stack size and emits the cleanup blocks + /// that have been added, then adds all lifetime-extended cleanups from + /// the given position to the stack. + void + PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, + size_t OldLifetimeExtendedStackSize, + std::initializer_list ValuesToReload = {}); + + /// Will pop the cleanup entry on the stack and process all branch fixups. + void PopCleanupBlock(bool FallThroughIsBranchThrough = false); + typedef void Destroyer(CIRGenFunction &CGF, Address addr, QualType ty); static Destroyer destroyCXXObject; @@ -1374,6 +1407,77 @@ class CIRGenFunction { // initFullExprCleanup(); } + /// Set up the last cleanup that was pushed as a conditional + /// full-expression cleanup. + void initFullExprCleanup() { + initFullExprCleanupWithFlag(createCleanupActiveFlag()); + } + + void initFullExprCleanupWithFlag(Address ActiveFlag); + Address createCleanupActiveFlag(); + + /// Enters a new scope for capturing cleanups, all of which + /// will be executed once the scope is exited. + class RunCleanupsScope { + EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth; + size_t LifetimeExtendedCleanupStackSize; + bool OldDidCallStackSave; + + protected: + bool PerformCleanup; + + private: + RunCleanupsScope(const RunCleanupsScope &) = delete; + void operator=(const RunCleanupsScope &) = delete; + + protected: + CIRGenFunction &CGF; + + public: + /// Enter a new cleanup scope. + explicit RunCleanupsScope(CIRGenFunction &CGF) + : PerformCleanup(true), CGF(CGF) { + CleanupStackDepth = CGF.EHStack.stable_begin(); + LifetimeExtendedCleanupStackSize = + CGF.LifetimeExtendedCleanupStack.size(); + OldDidCallStackSave = CGF.DidCallStackSave; + CGF.DidCallStackSave = false; + OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth; + CGF.CurrentCleanupScopeDepth = CleanupStackDepth; + } + + /// Exit this cleanup scope, emitting any accumulated cleanups. + ~RunCleanupsScope() { + if (PerformCleanup) + ForceCleanup(); + } + + /// Determine whether this scope requires any cleanups. + bool requiresCleanups() const { + return CGF.EHStack.stable_begin() != CleanupStackDepth; + } + + /// Force the emission of cleanups now, instead of waiting + /// until this object is destroyed. + /// \param ValuesToReload - A list of values that need to be available at + /// the insertion point after cleanup emission. If cleanup emission created + /// a shared cleanup block, these value pointers will be rewritten. + /// Otherwise, they not will be modified. + void + ForceCleanup(std::initializer_list ValuesToReload = {}) { + assert(PerformCleanup && "Already forced cleanup"); + CGF.DidCallStackSave = OldDidCallStackSave; + CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize, + ValuesToReload); + PerformCleanup = false; + CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth; + } + }; + + // Cleanup stack depth of the RunCleanupsScope that was pushed most recently. + EHScopeStack::stable_iterator CurrentCleanupScopeDepth = + EHScopeStack::stable_end(); + /// CIR build helpers /// ----------------- diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 2844a0a224f9..be89fe14ba0b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -373,7 +373,7 @@ void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { // of the destructor is trivial. if (DD && GD.getDtorType() == Dtor_Base && CIRGenType != StructorCIRGen::COMDAT) - llvm_unreachable("NYI"); + return; // FIXME: The deleting destructor is equivalent to the selected operator // delete if: diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 953ed576d951..abd2a79885dc 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -46,6 +46,7 @@ struct UnimplementedFeature { // Sanitizers static bool reportGlobalToASan() { return false; } + static bool emitAsanPrologueOrEpilogue() { return false; } static bool emitCheckedInBoundsGEP() { return false; } // ObjC From ab9f1f4141093c3dfe8883e2bd948bafc5dba71c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Apr 2023 16:19:25 -0700 Subject: [PATCH 0872/2301] [CIR][CIRGen] Dtors: use EnterDtorCleanups to emit deleting dtor --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 55 ++++++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 14 +++++-- 2 files changed, 65 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index c88401cbb636..578f1b1c4b7d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1064,4 +1064,59 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { // Exit the try if applicable. if (isTryBody) llvm_unreachable("NYI"); +} + +namespace { +[[maybe_unused]] mlir::Value +LoadThisForDtorDelete(CIRGenFunction &CGF, const CXXDestructorDecl *DD) { + if (Expr *ThisArg = DD->getOperatorDeleteThisArg()) + return CGF.buildScalarExpr(ThisArg); + return CGF.LoadCXXThis(); +} + +/// Call the operator delete associated with the current destructor. +struct CallDtorDelete final : EHScopeStack::Cleanup { + CallDtorDelete() {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + [[maybe_unused]] const CXXDestructorDecl *Dtor = + cast(CGF.CurCodeDecl); + [[maybe_unused]] const CXXRecordDecl *ClassDecl = Dtor->getParent(); + llvm_unreachable("NYI"); + // CGF.EmitDeleteCall(Dtor->getOperatorDelete(), + // LoadThisForDtorDelete(CGF, Dtor), + // CGF.getContext().getTagDeclType(ClassDecl)); + } +}; +} // namespace + +/// Emit all code that comes at the end of class's destructor. This is to call +/// destructors on members and base classes in reverse order of their +/// construction. +/// +/// For a deleting destructor, this also handles the case where a destroying +/// operator delete completely overrides the definition. +void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, + CXXDtorType DtorType) { + assert((!DD->isTrivial() || DD->hasAttr()) && + "Should not emit dtor epilogue for non-exported trivial dtor!"); + + // The deleting-destructor phase just needs to call the appropriate + // operator delete that Sema picked up. + if (DtorType == Dtor_Deleting) { + assert(DD->getOperatorDelete() && + "operator delete missing - EnterDtorCleanups"); + if (CXXStructorImplicitParamValue) { + llvm_unreachable("NYI"); + } else { + if (DD->getOperatorDelete()->isDestroyingOperatorDelete()) { + llvm_unreachable("NYI"); + } else { + EHStack.pushCleanup(NormalAndEHCleanup); + } + } + return; + } + + llvm_unreachable("NYI"); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9fef87017374..ad9fdbb7b66d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -541,6 +541,11 @@ class CIRGenFunction { clang::CharUnits CXXABIThisAlignment; clang::CharUnits CXXThisAlignment; + /// When generating code for a constructor or destructor, this will hold the + /// implicit argument (e.g. VTT). + ImplicitParamDecl *CXXStructorImplicitParamDecl{}; + mlir::Value CXXStructorImplicitParamValue{}; + /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this /// expression. Address CXXDefaultInitExprThis = Address::invalid(); @@ -556,10 +561,6 @@ class CIRGenFunction { /// Save Parameter Decl for coroutine. llvm::SmallVector FnArgs; - /// CXXStructorImplicitParamDecl - When generating code for a constructor or - /// destructor, this will hold the implicit argument (e.g. VTT). - clang::ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr; - // The CallExpr within the current statement that the musttail attribute // applies to. nullptr if there is no 'musttail' on the current statement. const clang::CallExpr *MustTailCall = nullptr; @@ -1093,6 +1094,11 @@ class CIRGenFunction { void buildConstructorBody(FunctionArgList &Args); void buildDestructorBody(FunctionArgList &Args); + /// Enter the cleanups necessary to complete the given phase of destruction + /// for a destructor. The end result should call destructors on members and + /// base classes in reverse order of their construction. + void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type); + static bool IsConstructorDelegationValid(const clang::CXXConstructorDecl *Ctor); From eeda96653728a715f267382c22ceb614c0ea0f14 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Apr 2023 16:36:51 -0700 Subject: [PATCH 0873/2301] [CIR][CIRGen] Dtors: hook build{CXX}DestructorCall's for regular dtor emission --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 7 +++++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 31 ++++++++++++------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 12 +++++++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 10 ++++++ 4 files changed, 49 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 712bcc02129d..809b701331af 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -135,6 +135,13 @@ class CIRGenCXXABI { /// Emit dtor variants required by this ABI. virtual void buildCXXDestructors(const clang::CXXDestructorDecl *D) = 0; + /// Emit the destructor call. + virtual void buildDestructorCall(CIRGenFunction &CGF, + const CXXDestructorDecl *DD, + CXXDtorType Type, bool ForVirtualBase, + bool Delegating, Address This, + QualType ThisTy) = 0; + /// Get the address of the vtable for the given record decl which should be /// used for the vptr at the given offset in RD. virtual mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 578f1b1c4b7d..4b73bb0530cb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -969,15 +969,14 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { // destructor. Do so. if (DtorType == Dtor_Deleting) { RunCleanupsScope DtorEpilogue(*this); - llvm_unreachable("NYI"); - // EnterDtorCleanups(Dtor, Dtor_Deleting); - // if (HaveInsertPoint()) { - // QualType ThisTy = Dtor->getThisObjectType(); - // EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, - // /*Delegating=*/false, LoadCXXThisAddress(), - // ThisTy); - // } - // return; + EnterDtorCleanups(Dtor, Dtor_Deleting); + if (HaveInsertPoint()) { + QualType ThisTy = Dtor->getFunctionObjectParameterType(); + buildCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, + /*Delegating=*/false, LoadCXXThisAddress(), + ThisTy); + } + return; } // If the body is a function-try-block, enter the try before @@ -987,7 +986,8 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { llvm_unreachable("NYI"); // EnterCXXTryStmt(*cast(Body), true); } - assert(!UnimplementedFeature::emitAsanPrologueOrEpilogue()); + if (UnimplementedFeature::emitAsanPrologueOrEpilogue()) + llvm_unreachable("NYI"); // Enter the epilogue cleanups. llvm_unreachable("NYI"); @@ -1119,4 +1119,13 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, } llvm_unreachable("NYI"); -} \ No newline at end of file +} + +void CIRGenFunction::buildCXXDestructorCall(const CXXDestructorDecl *DD, + CXXDtorType Type, + bool ForVirtualBase, + bool Delegating, Address This, + QualType ThisTy) { + CGM.getCXXABI().buildDestructorCall(*this, DD, Type, ForVirtualBase, + Delegating, This, ThisTy); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index ad9fdbb7b66d..c70b7380c92c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -672,6 +672,15 @@ class CIRGenFunction { LocalDeclMap.insert({VD, Addr}); } + /// True if an insertion point is defined. If not, this indicates that the + /// current code being emitted is unreachable. + /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism + /// since we don't yet force null insertion point to designate behavior (like + /// LLVM's codegen does) and we probably shouldn't. + bool HaveInsertPoint() const { + return builder.getInsertionBlock() != nullptr; + } + /// Whether any type-checking sanitizers are enabled. If \c false, calls to /// buildTypeCheck can be skipped. bool sanitizePerformTypeCheck() const; @@ -1093,6 +1102,9 @@ class CIRGenFunction { clang::CXXCtorType Type, FunctionArgList &Args); void buildConstructorBody(FunctionArgList &Args); void buildDestructorBody(FunctionArgList &Args); + void buildCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, + bool ForVirtualBase, bool Delegating, + Address This, QualType ThisTy); /// Enter the cleanups necessary to complete the given phase of destruction /// for a destructor. The end result should call destructors on members and diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index be89fe14ba0b..ae482f3abfe4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -110,6 +110,10 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { void buildCXXConstructors(const clang::CXXConstructorDecl *D) override; void buildCXXDestructors(const clang::CXXDestructorDecl *D) override; void buildCXXStructor(clang::GlobalDecl GD) override; + void buildDestructorCall(CIRGenFunction &CGF, const CXXDestructorDecl *DD, + CXXDtorType Type, bool ForVirtualBase, + bool Delegating, Address This, + QualType ThisTy) override; bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, @@ -1533,3 +1537,9 @@ CIRGenItaniumCXXABI::classifyRTTIUniqueness( assert(Linkage == mlir::cir::GlobalLinkageKind::WeakODRLinkage); return RUK_NonUniqueVisible; } + +void CIRGenItaniumCXXABI::buildDestructorCall( + CIRGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, + bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy) { + llvm_unreachable("NYI"); +} From 381f0c375a1eab128903988892713ad9f74c3613 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Apr 2023 16:45:08 -0700 Subject: [PATCH 0874/2301] [CIR][CIRGen] Dtors: implement itanium specific buildDestructor logic --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 8 ++++++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 25 +++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 27 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 11 ++++++++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 26 +++++++++++++++++- 5 files changed, 96 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 809b701331af..7a44349a1661 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -130,6 +130,14 @@ class CIRGenCXXABI { /// taken care of by the caller. virtual bool isThisCompleteObject(clang::GlobalDecl GD) const = 0; + /// Get the implicit (second) parameter that comes after the "this" pointer, + /// or nullptr if there is isn't one. + virtual mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &CGF, + const CXXDestructorDecl *DD, + CXXDtorType Type, + bool ForVirtualBase, + bool Delegating) = 0; + /// Emit constructor variants required by this ABI. virtual void buildCXXConstructors(const clang::CXXConstructorDecl *D) = 0; /// Emit dtor variants required by this ABI. diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 4b73bb0530cb..bba773068a81 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1129,3 +1129,28 @@ void CIRGenFunction::buildCXXDestructorCall(const CXXDestructorDecl *DD, CGM.getCXXABI().buildDestructorCall(*this, DD, Type, ForVirtualBase, Delegating, This, ThisTy); } + +mlir::Value CIRGenFunction::GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, + bool Delegating) { + if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { + // This constructor/destructor does not need a VTT parameter. + return nullptr; + } + + const CXXRecordDecl *RD = cast(CurCodeDecl)->getParent(); + const CXXRecordDecl *Base = cast(GD.getDecl())->getParent(); + + if (Delegating) { + llvm_unreachable("NYI"); + } else if (RD == Base) { + llvm_unreachable("NYI"); + } else { + llvm_unreachable("NYI"); + } + + if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { + llvm_unreachable("NYI"); + } else { + llvm_unreachable("NYI"); + } +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 8a0b44116a32..9efe3e709d41 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -325,3 +325,30 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { assert(0 && "not implemented"); } + +RValue CIRGenFunction::buildCXXDestructorCall(GlobalDecl Dtor, + const CIRGenCallee &Callee, + mlir::Value This, QualType ThisTy, + mlir::Value ImplicitParam, + QualType ImplicitParamTy, + const CallExpr *CE) { + const CXXMethodDecl *DtorDecl = cast(Dtor.getDecl()); + + assert(!ThisTy.isNull()); + assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() && + "Pointer/Object mixup"); + + LangAS SrcAS = ThisTy.getAddressSpace(); + LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace(); + if (SrcAS != DstAS) { + llvm_unreachable("NYI"); + } + + CallArgList Args; + commonBuildCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam, + ImplicitParamTy, CE, Args, nullptr); + assert((CE || currSrcLoc) && "expected source location"); + return buildCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee, + ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall, + CE ? getLoc(CE->getExprLoc()) : *currSrcLoc); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index c70b7380c92c..dfd2aa0faba1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1105,6 +1105,10 @@ class CIRGenFunction { void buildCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy); + RValue buildCXXDestructorCall(GlobalDecl Dtor, const CIRGenCallee &Callee, + mlir::Value This, QualType ThisTy, + mlir::Value ImplicitParam, + QualType ImplicitParamTy, const CallExpr *E); /// Enter the cleanups necessary to complete the given phase of destruction /// for a destructor. The end result should call destructors on members and @@ -1133,6 +1137,13 @@ class CIRGenFunction { const clang::CXXRecordDecl *VTableClass, VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs); + /// Return the VTT parameter that should be passed to a base + /// constructor/destructor with virtual bases. + /// FIXME: VTTs are Itanium ABI-specific, so the definition should move + /// to CIRGenItaniumCXXABI.cpp together with all the references to VTT. + mlir::Value GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, + bool Delegating); + /// Source location information about the default argument or member /// initializer expression we're evaluating, if any. clang::CurrentSourceLocExprScope CurSourceLocExprScope; diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index ae482f3abfe4..d605dfd0fab8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -107,6 +107,11 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { void addImplicitStructorParams(CIRGenFunction &CGF, QualType &ResTy, FunctionArgList &Params) override; + mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &CGF, + const CXXDestructorDecl *DD, + CXXDtorType Type, + bool ForVirtualBase, + bool Delegating) override; void buildCXXConstructors(const clang::CXXConstructorDecl *D) override; void buildCXXDestructors(const clang::CXXDestructorDecl *D) override; void buildCXXStructor(clang::GlobalDecl GD) override; @@ -1541,5 +1546,24 @@ CIRGenItaniumCXXABI::classifyRTTIUniqueness( void CIRGenItaniumCXXABI::buildDestructorCall( CIRGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy) { - llvm_unreachable("NYI"); + GlobalDecl GD(DD, Type); + auto VTT = + getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating); + QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); + CIRGenCallee Callee; + if (getContext().getLangOpts().AppleKext && Type != Dtor_Base && + DD->isVirtual()) + llvm_unreachable("NYI"); + else + Callee = CIRGenCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD); + + CGF.buildCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy, + nullptr); +} + +mlir::Value CIRGenItaniumCXXABI::getCXXDestructorImplicitParam( + CIRGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, + bool ForVirtualBase, bool Delegating) { + GlobalDecl GD(DD, Type); + return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); } From b6c9765d4540b9586db52daf581cc1aedd1cc4a6 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sun, 9 Apr 2023 10:31:51 -0300 Subject: [PATCH 0875/2301] [CIR][Bugfix] Register missing DLTI dialect in cir-tool's registry When lowering from CIR to the LLVM dialect, DLTI module attributes generated errors due to them being from an unregistered dialect. This commit fixes these errors. --- clang/tools/cir-tool/cir-tool.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index 94f3917de7a3..ef01f6a81707 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "mlir/Dialect/Arith/IR/Arith.h" +#include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" @@ -28,7 +29,7 @@ int main(int argc, char **argv) { mlir::DialectRegistry registry; registry.insert(); + mlir::LLVM::LLVMDialect, mlir::DLTIDialect>(); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertMLIRToLLVMPass(); From 5959e23b2abc0bfe58c550b2daa014af63dc278f Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sun, 9 Apr 2023 17:05:41 -0300 Subject: [PATCH 0876/2301] [CIR][CodeGen] Emit CIR operations for CharacterLiteral AST nodes --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 6 +++++- clang/test/CIR/CodeGen/literals.c | 9 +++++++++ clang/test/CIR/CodeGen/literals.cpp | 8 ++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/literals.c create mode 100644 clang/test/CIR/CodeGen/literals.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index ab1cf79a98cf..6cf85b36d912 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -111,7 +111,11 @@ class ScalarExprEmitter : public StmtVisitor { Builder.getFloatAttr(Ty, E->getValue())); } mlir::Value VisitCharacterLiteral(const CharacterLiteral *E) { - llvm_unreachable("NYI"); + mlir::Type Ty = CGF.getCIRType(E->getType()); + auto newOp = Builder.create( + CGF.getLoc(E->getExprLoc()), Ty, + Builder.getIntegerAttr(Ty, E->getValue())); + return newOp; } mlir::Value VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { llvm_unreachable("NYI"); diff --git a/clang/test/CIR/CodeGen/literals.c b/clang/test/CIR/CodeGen/literals.c new file mode 100644 index 000000000000..91d1b310c4c4 --- /dev/null +++ b/clang/test/CIR/CodeGen/literals.c @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -fclangir -emit-cir %s -o - | FileCheck %s + +int literals(void) { + char a = 'a'; // char literals are int in C + // CHECK: %[[RES:[0-9]+]] = cir.const(97 : i32) : i32 + // CHECK: %{{[0-9]+}} = cir.cast(integral, %[[RES]] : i32), i8 + + return 0; +} diff --git a/clang/test/CIR/CodeGen/literals.cpp b/clang/test/CIR/CodeGen/literals.cpp new file mode 100644 index 000000000000..1aaf6a56cb58 --- /dev/null +++ b/clang/test/CIR/CodeGen/literals.cpp @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -fclangir -emit-cir %s -o - | FileCheck %s + +int literals() { + char a = 'a'; // char literals have char type in C++ + // CHECK: %{{[0-9]+}} = cir.const(97 : i8) : i8 + + return 0; +} From ca199e73c15928cc49c6faee0139501c52c8f47b Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sun, 9 Apr 2023 20:49:38 -0300 Subject: [PATCH 0877/2301] [CIR][Codegen][NFC] Group signed/unsigned Clang builtin types cases --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 60 ++++++++++++++------------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index d44f348ce32d..e2884ddd72a4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -355,47 +355,51 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { ResultType = ::mlir::cir::BoolType::get(Builder.getContext()); break; + // Signed types. + case BuiltinType::Accum: case BuiltinType::Char_S: - case BuiltinType::Char_U: - case BuiltinType::SChar: - case BuiltinType::UChar: - case BuiltinType::Short: - case BuiltinType::UShort: + case BuiltinType::Fract: case BuiltinType::Int: - case BuiltinType::UInt: case BuiltinType::Long: - case BuiltinType::ULong: + case BuiltinType::LongAccum: + case BuiltinType::LongFract: case BuiltinType::LongLong: - case BuiltinType::ULongLong: + case BuiltinType::SChar: + case BuiltinType::Short: + case BuiltinType::ShortAccum: + case BuiltinType::ShortFract: case BuiltinType::WChar_S: - case BuiltinType::WChar_U: - case BuiltinType::Char8: + // Saturated signed types. + case BuiltinType::SatAccum: + case BuiltinType::SatFract: + case BuiltinType::SatLongAccum: + case BuiltinType::SatLongFract: + case BuiltinType::SatShortAccum: + case BuiltinType::SatShortFract: + // Unsigned types. case BuiltinType::Char16: case BuiltinType::Char32: - case BuiltinType::ShortAccum: - case BuiltinType::Accum: - case BuiltinType::LongAccum: - case BuiltinType::UShortAccum: + case BuiltinType::Char8: + case BuiltinType::Char_U: case BuiltinType::UAccum: - case BuiltinType::ULongAccum: - case BuiltinType::ShortFract: - case BuiltinType::Fract: - case BuiltinType::LongFract: - case BuiltinType::UShortFract: + case BuiltinType::UChar: case BuiltinType::UFract: + case BuiltinType::UInt: + case BuiltinType::ULong: + case BuiltinType::ULongAccum: case BuiltinType::ULongFract: - case BuiltinType::SatShortAccum: - case BuiltinType::SatAccum: - case BuiltinType::SatLongAccum: - case BuiltinType::SatUShortAccum: + case BuiltinType::ULongLong: + case BuiltinType::UShort: + case BuiltinType::UShortAccum: + case BuiltinType::UShortFract: + case BuiltinType::WChar_U: + // Saturated unsigned types. case BuiltinType::SatUAccum: - case BuiltinType::SatULongAccum: - case BuiltinType::SatShortFract: - case BuiltinType::SatFract: - case BuiltinType::SatLongFract: - case BuiltinType::SatUShortFract: case BuiltinType::SatUFract: + case BuiltinType::SatULongAccum: case BuiltinType::SatULongFract: + case BuiltinType::SatUShortAccum: + case BuiltinType::SatUShortFract: // FIXME: break this in s/u and also pass signed param. ResultType = Builder.getIntegerType(static_cast(Context.getTypeSize(T))); From c8001c6ee696e59efba2d48fea2ae1be021d15ed Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sun, 9 Apr 2023 20:52:16 -0300 Subject: [PATCH 0878/2301] [CIR][CodeGen] Add missing code gen cast tests --- clang/test/CIR/CodeGen/cast.cpp | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index eff79450d025..d3fc7ff7eb85 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -14,3 +14,26 @@ unsigned char cxxstaticcast_0(unsigned int x) { // CHECK: %4 = cir.load %1 : cir.ptr , i8 // CHECK: cir.return %4 : i8 // CHECK: } + + +int cStyleCasts_0(unsigned x1, int x2) { +// CHECK: cir.func @_{{.*}}cStyleCasts_0{{.*}} + + char a = (char)x1; // truncate + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : i32), i8 + + short b = (short)x2; // truncate with sign + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : i32), i16 + + long long c = (long long)x1; // zero extend + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : i32), i64 + + long long d = (long long)x2; // sign extend + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : i32), i64 + + int arr[3]; + int* e = (int*)arr; // explicit pointer decay + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %{{[0-9]+}} : !cir.ptr>), !cir.ptr + + return 0; +} From b252c26ebe17313c64c4a0b6abc1deef585a333a Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 10 Apr 2023 06:36:04 -0300 Subject: [PATCH 0879/2301] [CIR][Lowering] Lower CIR integral casts --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 24 +++++++++++ clang/test/CIR/Lowering/cast.cir | 40 +++++++++++++++++-- 2 files changed, 60 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 2d0e256bcf4c..302a4632ca66 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -194,6 +194,30 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { mlir::cir::CmpOpKind::ne, src, zero); break; } + case mlir::cir::CastKind::integral: { + auto oldSourceType = + castOp->getOperands().front().getType().cast(); + auto sourceValue = adaptor.getOperands().front(); + auto sourceType = sourceValue.getType().cast(); + auto targetType = getTypeConverter() + ->convertType(castOp.getResult().getType()) + .cast(); + + // Target integer is smaller: truncate source value. + if (targetType.getWidth() < sourceType.getWidth()) { + rewriter.replaceOpWithNewOp(castOp, targetType, + sourceValue); + } else { + // FIXME: CIR codegen does not distiguishes singned/unsinged types. + if (oldSourceType.isUnsigned()) + rewriter.replaceOpWithNewOp(castOp, targetType, + sourceValue); + else + rewriter.replaceOpWithNewOp(castOp, targetType, + sourceValue); + } + break; + } default: llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 629620a92579..6e14d723dec1 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -6,16 +6,13 @@ module { %4 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool cir.return %arg0 : i32 } -} -// MLIR: module { -// MLIR-NEXT: llvm.func @foo(%arg0: i32) -> i32 { +// MLIR: llvm.func @foo(%arg0: i32) -> i32 { // MLIR-NEXT: [[v0:%[0-9]]] = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: [[v1:%[0-9]]] = llvm.icmp "ne" %arg0, %0 : i32 // MLIR-NEXT: [[v2:%[0-9]]] = llvm.zext %1 : i1 to i8 // MLIR-NEXT: llvm.return %arg0 : i32 // MLIR-NEXT: } -// MLIR-NEXT:} // LLVM: define i32 @foo(i32 %0) { @@ -23,3 +20,38 @@ module { // LLVM-NEXT: %3 = zext i1 %2 to i8 // LLVM-NEXT: ret i32 %0 // LLVM-NEXT: } + + cir.func @cStyleCasts(%arg0: i32, %arg1: i32) -> i32 { + // MLIR: llvm.func @cStyleCasts(%arg0: i32, %arg1: i32) -> i32 { + %0 = cir.alloca i32, cir.ptr , ["x1", init] {alignment = 4 : i64} + %1 = cir.alloca i32, cir.ptr , ["x2", init] {alignment = 4 : i64} + %2 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} + %3 = cir.alloca i8, cir.ptr , ["a", init] {alignment = 1 : i64} + %4 = cir.alloca i16, cir.ptr , ["b", init] {alignment = 2 : i64} + %5 = cir.alloca i64, cir.ptr , ["c", init] {alignment = 8 : i64} + %6 = cir.alloca i64, cir.ptr , ["d", init] {alignment = 8 : i64} + cir.store %arg0, %0 : i32, cir.ptr + cir.store %arg1, %1 : i32, cir.ptr + %7 = cir.load %0 : cir.ptr , i32 + %8 = cir.cast(integral, %7 : i32), i8 + // MLIR: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i8 + cir.store %8, %3 : i8, cir.ptr + %9 = cir.load %1 : cir.ptr , i32 + %10 = cir.cast(integral, %9 : i32), i16 + // MLIR: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i16 + cir.store %10, %4 : i16, cir.ptr + %11 = cir.load %0 : cir.ptr , i32 + %12 = cir.cast(integral, %11 : i32), i64 + // FIXME: this should be a zext, but we don't distinguish signed/unsigned + // MLIR: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 + cir.store %12, %5 : i64, cir.ptr + %13 = cir.load %1 : cir.ptr , i32 + %14 = cir.cast(integral, %13 : i32), i64 + // MLIR: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 + cir.store %14, %6 : i64, cir.ptr + %15 = cir.const(0 : i32) : i32 + cir.store %15, %2 : i32, cir.ptr + %16 = cir.load %2 : cir.ptr , i32 + cir.return %16 : i32 + } +} From 58210bb2c1351daf80fcd0e6122748ca02e0b084 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 10 Apr 2023 07:12:12 -0300 Subject: [PATCH 0880/2301] [CIR][Lowering] Lower CIR pointer decay casts --- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 15 +++++++++++++++ clang/test/CIR/Lowering/cast.cir | 5 +++++ 2 files changed, 20 insertions(+) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 302a4632ca66..b39b94a044da 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -180,11 +180,26 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; + inline mlir::Type convertTy(mlir::Type ty) const { + return getTypeConverter()->convertType(ty); + } + mlir::LogicalResult matchAndRewrite(mlir::cir::CastOp castOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto src = adaptor.getSrc(); switch (castOp.getKind()) { + case mlir::cir::CastKind::array_to_ptrdecay: { + const auto ptrTy = castOp.getType().cast(); + auto sourceValue = adaptor.getOperands().front(); + auto targetType = + getTypeConverter()->convertType(castOp->getResult(0).getType()); + auto elementTy = convertTy(ptrTy.getPointee()); + auto offset = llvm::SmallVector{0}; + rewriter.replaceOpWithNewOp( + castOp, targetType, elementTy, sourceValue, offset); + break; + } case mlir::cir::CastKind::int_to_bool: { auto zero = rewriter.create( src.getLoc(), src.getType(), diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 6e14d723dec1..30f54fd3cd2e 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -30,6 +30,8 @@ module { %4 = cir.alloca i16, cir.ptr , ["b", init] {alignment = 2 : i64} %5 = cir.alloca i64, cir.ptr , ["c", init] {alignment = 8 : i64} %6 = cir.alloca i64, cir.ptr , ["d", init] {alignment = 8 : i64} + %17 = cir.alloca !cir.array, cir.ptr >, ["arr"] {alignment = 4 : i64} + %18 = cir.alloca !cir.ptr, cir.ptr >, ["e", init] {alignment = 8 : i64} cir.store %arg0, %0 : i32, cir.ptr cir.store %arg1, %1 : i32, cir.ptr %7 = cir.load %0 : cir.ptr , i32 @@ -49,6 +51,9 @@ module { %14 = cir.cast(integral, %13 : i32), i64 // MLIR: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 cir.store %14, %6 : i64, cir.ptr + %19 = cir.cast(array_to_ptrdecay, %17 : !cir.ptr>), !cir.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr + cir.store %19, %18 : !cir.ptr, cir.ptr > %15 = cir.const(0 : i32) : i32 cir.store %15, %2 : i32, cir.ptr %16 = cir.load %2 : cir.ptr , i32 From c6fd73e3d7a96de1ee04ccc6b9f49b5db6290199 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 11 Apr 2023 06:43:25 -0300 Subject: [PATCH 0881/2301] [CIR][Lowering][Bugfix] Use llvm types when creating a LLVM::CallOp --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 11 +++++++++-- clang/test/CIR/Lowering/call.cir | 13 ++++++++++++- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b39b94a044da..da6babc2f284 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -37,6 +37,7 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/Sequence.h" +#include "llvm/ADT/SmallVector.h" using namespace cir; using namespace llvm; @@ -389,9 +390,15 @@ class CIRCallLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::CallOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { + llvm::SmallVector llvmResults; + auto cirResults = op.getResultTypes(); + + if (getTypeConverter()->convertTypes(cirResults, llvmResults).failed()) + return mlir::failure(); + rewriter.replaceOpWithNewOp( - op, op.getResultTypes(), op.getCalleeAttr(), op.getArgOperands()); - return mlir::LogicalResult::success(); + op, llvmResults, op.getCalleeAttr(), adaptor.getOperands()); + return mlir::success(); } }; diff --git a/clang/test/CIR/Lowering/call.cir b/clang/test/CIR/Lowering/call.cir index 1de50ed9ff23..01a97ec8c24b 100644 --- a/clang/test/CIR/Lowering/call.cir +++ b/clang/test/CIR/Lowering/call.cir @@ -9,7 +9,6 @@ module { cir.call @a() : () -> () cir.return } -} // MLIR: llvm.func @a() { // MLIR-NEXT: llvm.return @@ -26,3 +25,15 @@ module { // LLVM-NEXT: call void @a() // LLVM-NEXT: ret void // LLVM-NEXT: } + + // check operands and results type lowering + cir.func @callee(!cir.ptr) -> !cir.ptr attributes {sym_visibility = "private"} + // MLIR: llvm.func @callee(!llvm.ptr) -> !llvm.ptr + cir.func @caller(%arg0: !cir.ptr) -> !cir.ptr { + // MLIR: llvm.func @caller(%arg0: !llvm.ptr) -> !llvm.ptr + %0 = cir.call @callee(%arg0) : (!cir.ptr) -> !cir.ptr + // MLIR: %{{[0-9]+}} = llvm.call @callee(%arg0) : (!llvm.ptr) -> !llvm.ptr + cir.return %0 : !cir.ptr + } + +} // end module From 8210a782fe53061a22fa03eed12301a5924c214f Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 11 Apr 2023 09:04:13 -0300 Subject: [PATCH 0882/2301] [CIR][Lowering] Lower CIR global operations - Map between CIR and LLVM linkage types - Convert CIR's constant arrays to tensors - Increment code gen tests for globals - Add lowering tests for globals --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 169 +++++++++++++++++- clang/test/CIR/CodeGen/globals.cpp | 33 +++- clang/test/CIR/Lowering/globals.cir | 108 +++++++++++ 3 files changed, 308 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/Lowering/globals.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index da6babc2f284..183478ce0496 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -25,6 +25,8 @@ #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinDialect.h" #include "mlir/IR/IRMapping.h" #include "mlir/Pass/Pass.h" @@ -38,6 +40,8 @@ #include "clang/CIR/Passes.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/Support/Casting.h" +#include using namespace cir; using namespace llvm; @@ -509,6 +513,168 @@ class CIRFuncLowering : public mlir::OpConversionPattern { } }; +template +mlir::DenseElementsAttr +convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr) { + auto type = attr.getType().cast().getEltType(); + auto values = llvm::SmallVector{}; + for (auto element : attr.getValue().cast()) + values.push_back(element.cast().getInt()); + return mlir::DenseElementsAttr::get( + mlir::RankedTensorType::get({(int64_t)values.size()}, type), + llvm::ArrayRef(values)); +} + +std::optional +lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr) { + + // Ensure ConstArrayAttr has a type. + auto typedConstArr = constArr.dyn_cast(); + assert(typedConstArr && "cir::ConstArrayAttr is not a mlir::TypedAttr"); + + // Ensure ConstArrayAttr type is a ArrayType. + auto cirArrayType = typedConstArr.getType().dyn_cast(); + assert(cirArrayType && "cir::ConstArrayAttr is not a cir::ArrayType"); + + // Is a ConstArrayAttr with an cir::ArrayType: fetch element type. + auto type = cirArrayType.getEltType(); + + if (type.isInteger(8)) + return convertToDenseElementsAttr(constArr); + if (type.isInteger(16)) + return convertToDenseElementsAttr(constArr); + if (type.isInteger(32)) + return convertToDenseElementsAttr(constArr); + if (type.isInteger(64)) + return convertToDenseElementsAttr(constArr); + + return std::nullopt; +} + +mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { + using CIR = mlir::cir::GlobalLinkageKind; + using LLVM = mlir::LLVM::Linkage; + + switch (linkage) { + case CIR::AvailableExternallyLinkage: + return LLVM::AvailableExternally; + case CIR::CommonLinkage: + return LLVM::Common; + case CIR::ExternalLinkage: + return LLVM::External; + case CIR::ExternalWeakLinkage: + return LLVM::ExternWeak; + case CIR::InternalLinkage: + return LLVM::Internal; + case CIR::LinkOnceAnyLinkage: + return LLVM::Linkonce; + case CIR::LinkOnceODRLinkage: + return LLVM::LinkonceODR; + case CIR::PrivateLinkage: + return LLVM::Private; + case CIR::WeakAnyLinkage: + return LLVM::Weak; + case CIR::WeakODRLinkage: + return LLVM::WeakODR; + }; +} + +class CIRGetGlobalOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::GetGlobalOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto type = getTypeConverter()->convertType(op.getType()); + auto symbol = op.getName(); + rewriter.replaceOpWithNewOp(op, type, symbol); + return mlir::success(); + } +}; + +class CIRGlobalOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::GlobalOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + // Fetch required values to create LLVM op. + auto type = getTypeConverter()->convertType(op.getSymType()); + auto isConst = op.getConstant(); + auto linkage = convertLinkage(op.getLinkage()); + auto symbol = op.getSymName(); + auto init = op.getInitialValue(); + + // Check for missing funcionalities. + if (!init.has_value()) { + op.emitError() << "uninitialized globals are not yet supported."; + return mlir::failure(); + } + + // Initializer is a constant array: convert it to a compatible llvm init. + if (auto constArr = init.value().dyn_cast()) { + if (auto attr = constArr.getValue().dyn_cast()) { + init = rewriter.getStringAttr(attr.getValue()); + } else if (auto attr = constArr.getValue().dyn_cast()) { + if (!(init = lowerConstArrayAttr(constArr))) { + op.emitError() + << "unsupported lowering for #cir.const_array with element type " + << type; + return mlir::failure(); + } + } else { + op.emitError() + << "unsupported lowering for #cir.const_array with value " + << constArr.getValue(); + return mlir::failure(); + } + } else if (llvm::isa(init.value())) { + // Nothing to do since LLVM already supports these types as initializers. + } + // Initializer is a global: load global value in initializer block. + else if (auto attr = init.value().dyn_cast()) { + auto newGlobalOp = rewriter.replaceOpWithNewOp( + op, type, isConst, linkage, symbol, mlir::Attribute()); + mlir::OpBuilder::InsertionGuard guard(rewriter); + + // Create initializer block. + auto *newBlock = new mlir::Block(); + newGlobalOp.getRegion().push_back(newBlock); + + // Fetch global used as initializer. + auto sourceSymbol = + dyn_cast(mlir::SymbolTable::lookupSymbolIn( + op->getParentOfType(), attr.getValue())); + + // Load and return the initializer value. + rewriter.setInsertionPointToEnd(newBlock); + auto addressOfOp = rewriter.create( + op->getLoc(), mlir::LLVM::LLVMPointerType::get(getContext()), + sourceSymbol.getSymName()); + llvm::SmallVector offset{0}; + auto gepOp = rewriter.create( + op->getLoc(), type, sourceSymbol.getType(), addressOfOp.getResult(), + offset); + rewriter.create(op->getLoc(), gepOp.getResult()); + + return mlir::success(); + } else { + op.emitError() << "usupported initializer '" << init.value() << "'"; + return mlir::failure(); + } + + // Rewrite op. + rewriter.replaceOpWithNewOp( + op, type, isConst, linkage, symbol, init.value()); + return mlir::success(); + } +}; + class CIRUnaryOpLowering : public mlir::OpConversionPattern { public: @@ -839,7 +1005,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, - CIRScopeOpLowering, CIRCastOpLowering, CIRIfLowering>( + CIRScopeOpLowering, CIRCastOpLowering, CIRIfLowering, + CIRGlobalOpLowering, CIRGetGlobalOpLowering>( converter, patterns.getContext()); } diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 3b053403ba1c..bfa90a61d750 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -75,4 +75,35 @@ int use_func() { return func(); } // CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr // CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 // CHECK-NEXT: cir.return %2 : i32 -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: } + + +char string[] = "whatnow"; +// CHECK: cir.global external @string = #cir.const_array<[119 : i8, 104 : i8, 97 : i8, 116 : i8, 110 : i8, 111 : i8, 119 : i8, 0 : i8] : !cir.array> : !cir.array +unsigned uint[] = {255}; +// CHECK: cir.global external @uint = #cir.const_array<[255 : i32] : !cir.array> : !cir.array +short sshort[] = {11111, 22222}; +// CHECK: cir.global external @sshort = #cir.const_array<[11111 : i16, 22222 : i16] : !cir.array> : !cir.array +int sint[] = {123, 456, 789}; +// CHECK: cir.global external @sint = #cir.const_array<[123 : i32, 456 : i32, 789 : i32] : !cir.array> : !cir.array +long long ll[] = {999999999, 0, 0, 0}; +// CHECK: cir.global external @ll = #cir.const_array<[999999999, 0, 0, 0] : !cir.array> : !cir.array + +void get_globals() { + // CHECK: cir.func @_Z11get_globalsv() + char *s = string; + // CHECK: %[[RES:[0-9]+]] = cir.get_global @string : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + unsigned *u = uint; + // CHECK: %[[RES:[0-9]+]] = cir.get_global @uint : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + short *ss = sshort; + // CHECK: %[[RES:[0-9]+]] = cir.get_global @sshort : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + int *si = sint; + // CHECK: %[[RES:[0-9]+]] = cir.get_global @sint : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + long long *l = ll; + // CHECK: %[[RES:[0-9]+]] = cir.get_global @ll : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr +} diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir new file mode 100644 index 000000000000..c0c5b45d295f --- /dev/null +++ b/clang/test/CIR/Lowering/globals.cir @@ -0,0 +1,108 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +module { + cir.global external @a = 3 : i32 + cir.global external @c = 2 : i64 + cir.global external @y = 3.400000e+00 : f32 + cir.global external @w = 4.300000e+00 : f64 + cir.global external @x = 51 : i8 + cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> : !cir.array + cir.global external @alpha = #cir.const_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8] : !cir.array> : !cir.array + cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global external @s = @".str": !cir.ptr + // MLIR: llvm.mlir.global internal constant @".str"("example\00") {addr_space = 0 : i32} + // MLIR: llvm.mlir.global external @s() {addr_space = 0 : i32} : !llvm.ptr { + // MLIR: %0 = llvm.mlir.addressof @".str" : !llvm.ptr + // MLIR: %1 = llvm.getelementptr %0[0] : (!llvm.ptr) -> !llvm.ptr + // MLIR: llvm.return %1 : !llvm.ptr + // MLIR: } + // LLVM: @.str = internal constant [8 x i8] c"example\00" + // LLVM: @s = global ptr @.str + cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global external @s1 = @".str1": !cir.ptr + cir.global external @s2 = @".str": !cir.ptr + cir.func @_Z10use_globalv() { + %0 = cir.alloca i32, cir.ptr , ["li", init] {alignment = 4 : i64} + %1 = cir.get_global @a : cir.ptr + %2 = cir.load %1 : cir.ptr , i32 + cir.store %2, %0 : i32, cir.ptr + cir.return + } + cir.func @_Z17use_global_stringv() { + %0 = cir.alloca i8, cir.ptr , ["c", init] {alignment = 1 : i64} + %1 = cir.get_global @s2 : cir.ptr > + %2 = cir.load %1 : cir.ptr >, !cir.ptr + %3 = cir.const(0 : i32) : i32 + %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr + %5 = cir.load %4 : cir.ptr , i8 + cir.store %5, %0 : i8, cir.ptr + cir.return + } + cir.func linkonce_odr @_Z4funcIiET_v() -> i32 { + %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.const(0 : i32) : i32 + cir.store %1, %0 : i32, cir.ptr + %2 = cir.load %0 : cir.ptr , i32 + cir.return %2 : i32 + } + cir.func @_Z8use_funcv() -> i32 { + %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.call @_Z4funcIiET_v() : () -> i32 + cir.store %1, %0 : i32, cir.ptr + %2 = cir.load %0 : cir.ptr , i32 + cir.return %2 : i32 + } + cir.global external @string = #cir.const_array<[119 : i8, 104 : i8, 97 : i8, 116 : i8, 110 : i8, 111 : i8, 119 : i8, 0 : i8] : !cir.array> : !cir.array + // MLIR: llvm.mlir.global external @string(dense<[119, 104, 97, 116, 110, 111, 119, 0]> : tensor<8xi8>) {addr_space = 0 : i32} : !llvm.array<8 x i8> + // LLVM: @string = global [8 x i8] c"whatnow\00" + cir.global external @uint = #cir.const_array<[255 : i32] : !cir.array> : !cir.array + // MLIR: llvm.mlir.global external @uint(dense<255> : tensor<1xi32>) {addr_space = 0 : i32} : !llvm.array<1 x i32> + // LLVM: @uint = global [1 x i32] [i32 255] + cir.global external @sshort = #cir.const_array<[11111 : i16, 22222 : i16] : !cir.array> : !cir.array + // MLIR: llvm.mlir.global external @sshort(dense<[11111, 22222]> : tensor<2xi16>) {addr_space = 0 : i32} : !llvm.array<2 x i16> + // LLVM: @sshort = global [2 x i16] [i16 11111, i16 22222] + cir.global external @sint = #cir.const_array<[123 : i32, 456 : i32, 789 : i32] : !cir.array> : !cir.array + // MLIR: llvm.mlir.global external @sint(dense<[123, 456, 789]> : tensor<3xi32>) {addr_space = 0 : i32} : !llvm.array<3 x i32> + // LLVM: @sint = global [3 x i32] [i32 123, i32 456, i32 789] + cir.global external @ll = #cir.const_array<[999999999, 0, 0, 0] : !cir.array> : !cir.array + // MLIR: llvm.mlir.global external @ll(dense<[999999999, 0, 0, 0]> : tensor<4xi64>) {addr_space = 0 : i32} : !llvm.array<4 x i64> + // LLVM: @ll = global [4 x i64] [i64 999999999, i64 0, i64 0, i64 0] + cir.func @_Z11get_globalsv() { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr, cir.ptr >, ["u", init] {alignment = 8 : i64} + %2 = cir.alloca !cir.ptr, cir.ptr >, ["ss", init] {alignment = 8 : i64} + %3 = cir.alloca !cir.ptr, cir.ptr >, ["si", init] {alignment = 8 : i64} + %4 = cir.alloca !cir.ptr, cir.ptr >, ["l", init] {alignment = 8 : i64} + %5 = cir.get_global @string : cir.ptr > + %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr + // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @string : !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr + // LLVM: store ptr @string, ptr %{{[0-9]+}} + cir.store %6, %0 : !cir.ptr, cir.ptr > + %7 = cir.get_global @uint : cir.ptr > + %8 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr + // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @uint : !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr + // LLVM: store ptr @uint, ptr %{{[0-9]+}} + cir.store %8, %1 : !cir.ptr, cir.ptr > + %9 = cir.get_global @sshort : cir.ptr > + %10 = cir.cast(array_to_ptrdecay, %9 : !cir.ptr>), !cir.ptr + // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sshort : !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr + // LLVM: store ptr @sshort, ptr %{{[0-9]+}} + cir.store %10, %2 : !cir.ptr, cir.ptr > + %11 = cir.get_global @sint : cir.ptr > + %12 = cir.cast(array_to_ptrdecay, %11 : !cir.ptr>), !cir.ptr + // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sint : !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr + // LLVM: store ptr @sint, ptr %{{[0-9]+}} + cir.store %12, %3 : !cir.ptr, cir.ptr > + %13 = cir.get_global @ll : cir.ptr > + %14 = cir.cast(array_to_ptrdecay, %13 : !cir.ptr>), !cir.ptr + // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @ll : !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr + // LLVM: store ptr @ll, ptr %{{[0-9]+}} + cir.store %14, %4 : !cir.ptr, cir.ptr > + cir.return + } +} From 9ec08d409df62b8d3fa063929db67aed083bf680 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 11 Apr 2023 09:18:14 -0300 Subject: [PATCH 0883/2301] [CIR][Bugfix] Mark Builtin dialect as illegal when lowering to LLVM --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 183478ce0496..15b64db7e27c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1057,7 +1057,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { >(); // clang-format on target.addLegalDialect(); - target.addIllegalDialect(); + target.addIllegalDialect(); getOperation()->removeAttr("cir.sob"); From 9019db0bc2d9738765782b4261bf3dff2cfc8c00 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 11 Apr 2023 14:57:16 -0300 Subject: [PATCH 0884/2301] [CIR][Bugfix] Fix clangir-direct-lowering flag being ignored --- clang/lib/Driver/ToolChains/Clang.cpp | 3 +++ clang/lib/Frontend/CompilerInvocation.cpp | 3 +++ clang/test/CIR/Executables/hello.c | 11 +++++++++++ 3 files changed, 17 insertions(+) create mode 100644 clang/test/CIR/Executables/hello.c diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 17967893aac4..eabb3374500b 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5244,6 +5244,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasArg(options::OPT_fclangir) || Args.hasArg(options::OPT_emit_cir)) CmdArgs.push_back("-fclangir"); + if (Args.hasArg(options::OPT_fclangir_direct_lowering)) + CmdArgs.push_back("-fclangir-direct-lowering"); + if (Args.hasArg(options::OPT_clangir_disable_passes)) CmdArgs.push_back("-clangir-disable-passes"); diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 2779de1eef3c..c85817b29089 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3093,6 +3093,9 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Args.hasArg(OPT_fclangir) || Args.hasArg(OPT_emit_cir)) Opts.UseClangIRPipeline = true; + if (Args.hasArg(OPT_fclangir_direct_lowering)) + Opts.ClangIRDirectLowering = true; + if (Args.hasArg(OPT_clangir_disable_passes)) Opts.ClangIRDisablePasses = true; diff --git a/clang/test/CIR/Executables/hello.c b/clang/test/CIR/Executables/hello.c new file mode 100644 index 000000000000..42e9e0802fc6 --- /dev/null +++ b/clang/test/CIR/Executables/hello.c @@ -0,0 +1,11 @@ +// RUN: %clang -fclangir -fclangir-direct-lowering -o %t %s +// RUN: %t | FileCheck %s +// XFAIL: * + +int printf(const char *format); + +int main (void) { + printf ("Hello, world!\n"); + // CHECK: Hello, world! + return 0; +} From f9669d33ded13e7a8687450a3eeb365018e23a3a Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 11 Apr 2023 15:01:34 -0300 Subject: [PATCH 0885/2301] [CIR][Bugfix] Fix clangir-enable flag error message --- clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index fb2b94b63877..3ec147ce8447 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -56,8 +56,9 @@ CreateFrontendBaseAction(CompilerInstance &CI) { Act == EmitObj; if (UseCIR && !IsImplementedCIROutput) - llvm::report_fatal_error("-fclangir currently only works with -emit-cir, " - "-emit-cir-only, -emit-mlir, -emit-llvm and -S"); + llvm::report_fatal_error( + "-fclangir currently only works with -emit-cir, -emit-cir-only, " + "-emit-mlir, -emit-llvm, -emit-obj, and -S"); if (!UseCIR && EmitsCIR) llvm::report_fatal_error( "-emit-cir and -emit-cir-only only valid when using -fclangir"); From b35a78bdbf08ee5fdc5b4e54ba2da24cc3a0cd5c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Apr 2023 20:24:57 -0700 Subject: [PATCH 0886/2301] [CIR][CIRGen] Dtors: Populate PopCleanupBlock with more skeleton --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 164 +++++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 5 +- 2 files changed, 166 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 28ae5fb7756a..ee3c119ee086 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -63,6 +63,21 @@ void CIRGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) { cleanup.setTestFlagInEHCleanup(); } +/// We don't need a normal entry block for the given cleanup. +/// Optimistic fixup branches can cause these blocks to come into +/// existence anyway; if so, destroy it. +/// +/// The validity of this transformation is very much specific to the +/// exact ways in which we form branches to cleanup entries. +static void destroyOptimisticNormalEntry(CIRGenFunction &CGF, + EHCleanupScope &scope) { + auto *entry = scope.getNormalBlock(); + if (!entry) + return; + + llvm_unreachable("NYI"); +} + /// Pops a cleanup block. If the block includes a normal cleanup, the /// current insertion point is threaded through the cleanup, as are /// any branch fixups on the cleanup. @@ -81,6 +96,113 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { [[maybe_unused]] Address EHActiveFlag = Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : Address::invalid(); + + // Check whether we need an EH cleanup. This is only true if we've + // generated a lazy EH cleanup block. + auto *EHEntry = Scope.getCachedEHDispatchBlock(); + assert(Scope.hasEHBranches() == (EHEntry != nullptr)); + bool RequiresEHCleanup = (EHEntry != nullptr); + + // Check the three conditions which might require a normal cleanup: + + // - whether there are branch fix-ups through this cleanup + unsigned FixupDepth = Scope.getFixupDepth(); + bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; + + // - whether there are branch-throughs or branch-afters + bool HasExistingBranches = Scope.hasBranches(); + + // - whether there's a fallthrough + auto *FallthroughSource = builder.getInsertionBlock(); + bool HasFallthrough = (FallthroughSource != nullptr && IsActive); + + // Branch-through fall-throughs leave the insertion point set to the + // end of the last cleanup, which points to the current scope. The + // rest of CIR gen doesn't need to worry about this; it only happens + // during the execution of PopCleanupBlocks(). + bool HasTerminator = + !FallthroughSource->empty() && + FallthroughSource->back().mightHaveTrait(); + bool HasPrebranchedFallthrough = (FallthroughSource && HasTerminator && + FallthroughSource->getTerminator()); + + // If this is a normal cleanup, then having a prebranched + // fallthrough implies that the fallthrough source unconditionally + // jumps here. + assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || + (Scope.getNormalBlock() && + FallthroughSource->getTerminator()->getSuccessor(0) == + Scope.getNormalBlock())); + + bool RequiresNormalCleanup = false; + if (Scope.isNormalCleanup() && + (HasFixups || HasExistingBranches || HasFallthrough)) { + RequiresNormalCleanup = true; + } + + // If we have a prebranched fallthrough into an inactive normal + // cleanup, rewrite it so that it leads to the appropriate place. + if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) { + llvm_unreachable("NYI"); + } + + // If we don't need the cleanup at all, we're done. + if (!RequiresNormalCleanup && !RequiresEHCleanup) { + llvm_unreachable("NYI"); + } + + // Copy the cleanup emission data out. This uses either a stack + // array or malloc'd memory, depending on the size, which is + // behavior that SmallVector would provide, if we could use it + // here. Unfortunately, if you ask for a SmallVector, the + // alignment isn't sufficient. + auto *CleanupSource = reinterpret_cast(Scope.getCleanupBuffer()); + alignas(EHScopeStack::ScopeStackAlignment) char + CleanupBufferStack[8 * sizeof(void *)]; + std::unique_ptr CleanupBufferHeap; + size_t CleanupSize = Scope.getCleanupSize(); + + if (CleanupSize <= sizeof(CleanupBufferStack)) { + memcpy(CleanupBufferStack, CleanupSource, CleanupSize); + } else { + CleanupBufferHeap.reset(new char[CleanupSize]); + memcpy(CleanupBufferHeap.get(), CleanupSource, CleanupSize); + } + + EHScopeStack::Cleanup::Flags cleanupFlags; + if (Scope.isNormalCleanup()) + cleanupFlags.setIsNormalCleanupKind(); + if (Scope.isEHCleanup()) + cleanupFlags.setIsEHCleanupKind(); + + // Under -EHa, invoke seh.scope.end() to mark scope end before dtor + bool IsEHa = getLangOpts().EHAsynch && !Scope.isLifetimeMarker(); + // const EHPersonality &Personality = EHPersonality::get(*this); + if (!RequiresNormalCleanup) { + llvm_unreachable("NYI"); + } else { + // If we have a fallthrough and no other need for the cleanup, + // emit it directly. + if (HasFallthrough && !HasPrebranchedFallthrough && !HasFixups && + !HasExistingBranches) { + + // mark SEH scope end for fall-through flow + if (IsEHa) { + llvm_unreachable("NYI"); + } + + destroyOptimisticNormalEntry(*this, Scope); + EHStack.popCleanup(); + // CONTINUE HERE... + // EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); + + // Otherwise, the best approach is to thread everything through + // the cleanup block and then try to clean up after ourselves. + } else { + llvm_unreachable("NYI"); + } + } + llvm_unreachable("NYI"); } @@ -217,4 +339,44 @@ void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { llvm_unreachable("NYI"); return Scope->getCleanupBuffer(); -} \ No newline at end of file +} + +void EHScopeStack::popCleanup() { + assert(!empty() && "popping exception stack when not empty"); + + assert(isa(*begin())); + EHCleanupScope &Cleanup = cast(*begin()); + InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); + InnermostEHScope = Cleanup.getEnclosingEHScope(); + deallocate(Cleanup.getAllocatedSize()); + + // Destroy the cleanup. + Cleanup.Destroy(); + + // Check whether we can shrink the branch-fixups stack. + if (!BranchFixups.empty()) { + // If we no longer have any normal cleanups, all the fixups are + // complete. + if (!hasNormalCleanups()) + BranchFixups.clear(); + + // Otherwise we can still trim out unnecessary nulls. + else + popNullFixups(); + } +} + +void EHScopeStack::deallocate(size_t Size) { + StartOfData += llvm::alignTo(Size, ScopeStackAlignment); +} + +/// Remove any 'null' fixups on the stack. However, we can't pop more +/// fixups than the fixup depth on the innermost normal cleanup, or +/// else fixups that we try to add to that cleanup will end up in the +/// wrong place. We *could* try to shrink fixup depths, but that's +/// actually a lot of work for little benefit. +void EHScopeStack::popNullFixups() { + // We expect this to only be called when there's still an innermost + // normal cleanup; otherwise there really shouldn't be any fixups. + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 9efe3e709d41..a043e120cff5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -347,8 +347,9 @@ RValue CIRGenFunction::buildCXXDestructorCall(GlobalDecl Dtor, CallArgList Args; commonBuildCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam, ImplicitParamTy, CE, Args, nullptr); - assert((CE || currSrcLoc) && "expected source location"); + assert((CE || Dtor.getDecl()) && "expected source location provider"); return buildCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee, ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall, - CE ? getLoc(CE->getExprLoc()) : *currSrcLoc); + CE ? getLoc(CE->getExprLoc()) + : getLoc(Dtor.getDecl()->getSourceRange())); } From 5e6f53da70632cb4321d9bb6338a6a58699431f5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 18 Apr 2023 10:44:48 -0700 Subject: [PATCH 0887/2301] [CIR][CIRGen] Dtors: emit calls to deleting ctor - Finish CallDtorDelete - Add EmitCLenaup - build calls to delete (for deleting ctor) Testcase still up to come. --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 12 +- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 32 ++++- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 112 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 + .../CodeGen/UnimplementedFeatureGuarding.h | 5 +- 6 files changed, 155 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index bba773068a81..714a4b5aa81c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1079,13 +1079,11 @@ struct CallDtorDelete final : EHScopeStack::Cleanup { CallDtorDelete() {} void Emit(CIRGenFunction &CGF, Flags flags) override { - [[maybe_unused]] const CXXDestructorDecl *Dtor = - cast(CGF.CurCodeDecl); - [[maybe_unused]] const CXXRecordDecl *ClassDecl = Dtor->getParent(); - llvm_unreachable("NYI"); - // CGF.EmitDeleteCall(Dtor->getOperatorDelete(), - // LoadThisForDtorDelete(CGF, Dtor), - // CGF.getContext().getTagDeclType(ClassDecl)); + const CXXDestructorDecl *Dtor = cast(CGF.CurCodeDecl); + const CXXRecordDecl *ClassDecl = Dtor->getParent(); + CGF.buildDeleteCall(Dtor->getOperatorDelete(), + LoadThisForDtorDelete(CGF, Dtor), + CGF.getContext().getTagDeclType(ClassDecl)); } }; } // namespace diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index ee3c119ee086..0b95afa69532 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -78,6 +78,25 @@ static void destroyOptimisticNormalEntry(CIRGenFunction &CGF, llvm_unreachable("NYI"); } +static void buildCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, + EHScopeStack::Cleanup::Flags flags, + Address ActiveFlag) { + // If there's an active flag, load it and skip the cleanup if it's + // false. + if (ActiveFlag.isValid()) { + llvm_unreachable("NYI"); + } + + // Ask the cleanup to emit itself. + Fn->Emit(CGF, flags); + assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); + + // Emit the continuation block if there was an active flag. + if (ActiveFlag.isValid()) { + llvm_unreachable("NYI"); + } +} + /// Pops a cleanup block. If the block includes a normal cleanup, the /// current insertion point is threaded through the cleanup, as are /// any branch fixups on the cleanup. @@ -161,12 +180,15 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { CleanupBufferStack[8 * sizeof(void *)]; std::unique_ptr CleanupBufferHeap; size_t CleanupSize = Scope.getCleanupSize(); + EHScopeStack::Cleanup *Fn; if (CleanupSize <= sizeof(CleanupBufferStack)) { memcpy(CleanupBufferStack, CleanupSource, CleanupSize); + Fn = reinterpret_cast(CleanupBufferStack); } else { CleanupBufferHeap.reset(new char[CleanupSize]); memcpy(CleanupBufferHeap.get(), CleanupSource, CleanupSize); + Fn = reinterpret_cast(CleanupBufferHeap.get()); } EHScopeStack::Cleanup::Flags cleanupFlags; @@ -193,8 +215,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { destroyOptimisticNormalEntry(*this, Scope); EHStack.popCleanup(); - // CONTINUE HERE... - // EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); + buildCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); // Otherwise, the best approach is to thread everything through // the cleanup block and then try to clean up after ourselves. @@ -203,7 +224,12 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { } } - llvm_unreachable("NYI"); + assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); + + // Emit the EH cleanup if required. + if (RequiresEHCleanup) { + llvm_unreachable("NYI"); + } } /// Pops cleanup blocks until the given savepoint is reached. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index a043e120cff5..57988ae6eb23 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -353,3 +353,115 @@ RValue CIRGenFunction::buildCXXDestructorCall(GlobalDecl Dtor, CE ? getLoc(CE->getExprLoc()) : getLoc(Dtor.getDecl()->getSourceRange())); } + +namespace { +/// The parameters to pass to a usual operator delete. +struct UsualDeleteParams { + bool DestroyingDelete = false; + bool Size = false; + bool Alignment = false; +}; +} // namespace + +// FIXME(cir): this should be shared with LLVM codegen +static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) { + UsualDeleteParams Params; + + const FunctionProtoType *FPT = FD->getType()->castAs(); + auto AI = FPT->param_type_begin(), AE = FPT->param_type_end(); + + // The first argument is always a void*. + ++AI; + + // The next parameter may be a std::destroying_delete_t. + if (FD->isDestroyingOperatorDelete()) { + Params.DestroyingDelete = true; + assert(AI != AE); + ++AI; + } + + // Figure out what other parameters we should be implicitly passing. + if (AI != AE && (*AI)->isIntegerType()) { + Params.Size = true; + ++AI; + } + + if (AI != AE && (*AI)->isAlignValT()) { + Params.Alignment = true; + ++AI; + } + + assert(AI == AE && "unexpected usual deallocation function parameter"); + return Params; +} + +/// Emit a call to an operator new or operator delete function, as implicitly +/// created by new-expressions and delete-expressions. +static RValue buildNewDeleteCall(CIRGenFunction &CGF, + const FunctionDecl *CalleeDecl, + const FunctionProtoType *CalleeType, + const CallArgList &Args) { + mlir::cir::CallOp CallOrInvoke{}; + auto CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl); + CIRGenCallee Callee = + CIRGenCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl)); + RValue RV = CGF.buildCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( + Args, CalleeType, /*ChainCall=*/false), + Callee, ReturnValueSlot(), Args, &CallOrInvoke); + + /// C++1y [expr.new]p10: + /// [In a new-expression,] an implementation is allowed to omit a call + /// to a replaceable global allocation function. + /// + /// We model such elidable calls with the 'builtin' attribute. + assert(!UnimplementedFeature::attributeBuiltin()); + return RV; +} + +void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, + mlir::Value Ptr, QualType DeleteTy, + mlir::Value NumElements, + CharUnits CookieSize) { + assert((!NumElements && CookieSize.isZero()) || + DeleteFD->getOverloadedOperator() == OO_Array_Delete); + + const auto *DeleteFTy = DeleteFD->getType()->castAs(); + CallArgList DeleteArgs; + + auto Params = getUsualDeleteParams(DeleteFD); + auto ParamTypeIt = DeleteFTy->param_type_begin(); + + // Pass the pointer itself. + QualType ArgTy = *ParamTypeIt++; + mlir::Value DeletePtr = + builder.createBitcast(Ptr.getLoc(), Ptr, ConvertType(ArgTy)); + DeleteArgs.add(RValue::get(DeletePtr), ArgTy); + + // Pass the std::destroying_delete tag if present. + mlir::Value DestroyingDeleteTag{}; + if (Params.DestroyingDelete) { + llvm_unreachable("NYI"); + } + + // Pass the size if the delete function has a size_t parameter. + if (Params.Size) { + llvm_unreachable("NYI"); + } + + // Pass the alignment if the delete function has an align_val_t parameter. + if (Params.Alignment) { + llvm_unreachable("NYI"); + } + + assert(ParamTypeIt == DeleteFTy->param_type_end() && + "unknown parameter to usual delete function"); + + // Emit the call to delete. + buildNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs); + + // If call argument lowering didn't use the destroying_delete_t alloca, + // remove it again. + if (DestroyingDeleteTag && DestroyingDeleteTag.use_empty()) { + llvm_unreachable("NYI"); // DestroyingDeleteTag->eraseFromParent(); + } +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 82e1680cb5a6..a57ee2379618 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -468,6 +468,7 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, { auto FnBeginLoc = getLoc(FD->getBody()->getEndLoc()); auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); + SourceLocRAIIObject fnLoc{*this, getLoc(Loc)}; assert(Fn.isDeclaration() && "Function already has body?"); mlir::Block *EntryBB = Fn.addEntryBlock(); @@ -498,7 +499,6 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, cast(FD)->isLambdaStaticInvoker()) { // The lambda static invoker function is special, because it forwards or // clones the body of the function call operator (but is actually static). - SourceLocRAIIObject Loc{*this, FnBeginLoc}; buildLambdaStaticInvokeBody(cast(FD)); } else if (FD->isDefaulted() && isa(FD) && (cast(FD)->isCopyAssignmentOperator() || diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index dfd2aa0faba1..35f958d79e48 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -736,6 +736,10 @@ class CIRGenFunction { Address Ptr); mlir::Value buildCXXNewExpr(const CXXNewExpr *E); + void buildDeleteCall(const FunctionDecl *DeleteFD, mlir::Value Ptr, + QualType DeleteTy, mlir::Value NumElements = nullptr, + CharUnits CookieSize = CharUnits()); + mlir::Value createLoad(const clang::VarDecl *VD, const char *Name); // Wrapper for function prototype sources. Wraps either a FunctionProtoType or diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index abd2a79885dc..da527ee86159 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -55,6 +55,10 @@ struct UnimplementedFeature { // Debug info static bool generateDebugInfo() { return false; } + // LLVM Attributes + static bool attributeBuiltin() { return false; } + static bool attributeNoBuiltin() { return false; } + // Coroutines static bool unhandledException() { return false; } @@ -64,7 +68,6 @@ struct UnimplementedFeature { static bool requiresReturnValueCheck() { return false; } static bool shouldEmitLifetimeMarkers() { return false; } static bool peepholeProtection() { return false; } - static bool attributeNoBuiltin() { return false; } static bool CGCapturedStmtInfo() { return false; } static bool cxxABI() { return false; } static bool openCL() { return false; } From e0fcb81f5a98ffec2b8f8c5964c68623c8f02ca3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 19 Apr 2023 20:24:08 -0700 Subject: [PATCH 0888/2301] [CIR] Improve parsing and naming for #cir..ast nodes --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 6 ++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 18 ++-------- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 34 ++++++++++++++++--- clang/test/CIR/IR/struct.cir | 4 +++ 4 files changed, 39 insertions(+), 23 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 96bb353b1825..4c4342289efd 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -261,8 +261,8 @@ class ASTDecl traits = []> let genVerifyDecl = 1; } -def ASTFunctionDeclAttr : ASTDecl<"FunctionDecl", "function.decl">; -def ASTVarDeclAttr : ASTDecl<"VarDecl", "var.decl">; -def ASTRecordDeclAttr : ASTDecl<"RecordDecl", "record.decl">; +def ASTFunctionDeclAttr : ASTDecl<"FunctionDecl", "fndecl">; +def ASTVarDeclAttr : ASTDecl<"VarDecl", "vardecl">; +def ASTRecordDeclAttr : ASTDecl<"RecordDecl", "recdecl">; #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a20cb3de244b..d068c1e1bef2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1763,7 +1763,7 @@ ::mlir::Attribute ASTFunctionDeclAttr::parse(::mlir::AsmParser &parser, ::mlir::Type type) { // We cannot really parse anything AST related at this point // since we have no serialization/JSON story. - return mlir::Attribute(); + return ASTFunctionDeclAttr::get(parser.getContext(), nullptr); } void ASTFunctionDeclAttr::print(::mlir::AsmPrinter &printer) const { @@ -1773,10 +1773,6 @@ void ASTFunctionDeclAttr::print(::mlir::AsmPrinter &printer) const { LogicalResult ASTFunctionDeclAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, const ::clang::FunctionDecl *decl) { - if (!decl) { - emitError() << "expected non-null AST declaration"; - return failure(); - } return success(); } @@ -1784,7 +1780,7 @@ ::mlir::Attribute ASTVarDeclAttr::parse(::mlir::AsmParser &parser, ::mlir::Type type) { // We cannot really parse anything AST related at this point // since we have no serialization/JSON story. - return mlir::Attribute(); + return ASTVarDeclAttr::get(parser.getContext(), nullptr); } void ASTVarDeclAttr::print(::mlir::AsmPrinter &printer) const { @@ -1794,10 +1790,6 @@ void ASTVarDeclAttr::print(::mlir::AsmPrinter &printer) const { LogicalResult ASTVarDeclAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, const ::clang::VarDecl *decl) { - if (!decl) { - emitError() << "expected non-null AST declaration"; - return failure(); - } return success(); } @@ -1805,7 +1797,7 @@ ::mlir::Attribute ASTRecordDeclAttr::parse(::mlir::AsmParser &parser, ::mlir::Type type) { // We cannot really parse anything AST related at this point // since we have no serialization/JSON story. - return mlir::Attribute(); + return ASTRecordDeclAttr::get(parser.getContext(), nullptr); } void ASTRecordDeclAttr::print(::mlir::AsmPrinter &printer) const { @@ -1815,10 +1807,6 @@ void ASTRecordDeclAttr::print(::mlir::AsmPrinter &printer) const { LogicalResult ASTRecordDeclAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, const ::clang::RecordDecl *decl) { - if (!decl) { - emitError() << "expected non-null AST declaration"; - return failure(); - } return success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index c511e9b16e46..fec1f8741ad6 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -80,15 +80,39 @@ Type StructType::parse(mlir::AsmParser &parser) { llvm::SmallVector members; bool parsedBody = false; + auto parseASTAttribute = [&](Attribute &attr) { + auto optAttr = parser.parseOptionalAttribute(attr); + if (optAttr.has_value()) { + if (failed(*optAttr)) + return false; + if (attr.isa() || attr.isa() || + attr.isa()) + return true; + parser.emitError(parser.getCurrentLocation(), + "Unknown cir.struct attribute"); + return false; + } + return false; + }; + while (mlir::succeeded(parser.parseOptionalComma())) { if (mlir::succeeded(parser.parseOptionalKeyword("incomplete"))) - break; - // FIXME: add parsing for ast node. + continue; + parsedBody = true; Type nextMember; - if (parser.parseType(nextMember)) - return Type(); - members.push_back(nextMember); + auto optTy = parser.parseOptionalType(nextMember); + if (optTy.has_value()) { + if (failed(*optTy)) + return Type(); + members.push_back(nextMember); + continue; + } + + // Maybe it's an AST attribute: always last member, break. + Attribute astAttr; + if (parseASTAttribute(astAttr)) + break; } if (parser.parseGreater()) diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index eab85379ba2d..25fb6214751d 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -1,5 +1,9 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s +!ty_2222 = !cir.struct<"", !cir.array x 5>> +!ty_22221 = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> +!ty_22class2EA22 = !cir.struct<"class.A", incomplete, #cir.recdecl.ast> + module { cir.func @structs() { %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] From 1c42424c3faae151c3d0e3823d2c2a75e0b16013 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 20 Apr 2023 15:04:15 -0700 Subject: [PATCH 0889/2301] [CIR][CIRGen] Globals: always recompute visibility when adding initializers --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 9 +++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 35 ++++++++++++++----- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 ++ clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp | 4 +-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 +++- 5 files changed, 41 insertions(+), 16 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index d605dfd0fab8..936dcc7a9775 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1198,8 +1198,7 @@ mlir::cir::GlobalOp CIRGenItaniumRTTIBuilder::GetAddrOfTypeName( auto GV = CGM.createOrReplaceCXXRuntimeVariable(loc, Name, Init.getType(), Linkage, Align); - - GV.setInitialValueAttr(Init); + CIRGenModule::setInitializer(GV, Init); return GV; } @@ -1430,14 +1429,14 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( assert(!UnimplementedFeature::setPartition()); assert(!UnimplementedFeature::setDSOLocal()); mlir::SymbolTable::setSymbolVisibility( - TypeName, CIRGenModule::getMLIRVisibilityFromCIRLinkage(Linkage)); + TypeName, CIRGenModule::getMLIRVisibility(TypeName)); // TODO(cir): setup other bits for GV assert(!UnimplementedFeature::setDLLStorageClass()); assert(!UnimplementedFeature::setPartition()); assert(!UnimplementedFeature::setDSOLocal()); - mlir::SymbolTable::setSymbolVisibility( - GV, CIRGenModule::getMLIRVisibilityFromCIRLinkage(Linkage)); + mlir::SymbolTable::setSymbolVisibility(GV, + CIRGenModule::getMLIRVisibility(GV)); return mlir::cir::GlobalViewAttr::get( builder.getInt8PtrTy(), diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index f27bbc5f1756..767ef5f9027d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -403,6 +403,11 @@ mlir::cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &CGM, g = builder.create(loc, name, t, isCst); if (!curCGF) CGM.getModule().push_back(g); + + // Default to private until we can judge based on the initializer, + // since MLIR doesn't allow public declarations. + mlir::SymbolTable::setSymbolVisibility( + g, mlir::SymbolTable::Visibility::Private); } return g; } @@ -439,8 +444,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, auto LT = mlir::cir::GlobalLinkageKind::ExternalLinkage; Entry.setLinkageAttr( mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), LT)); - mlir::SymbolTable::setSymbolVisibility( - Entry, getMLIRVisibilityFromCIRLinkage(LT)); + mlir::SymbolTable::setSymbolVisibility(Entry, getMLIRVisibility(Entry)); } } @@ -776,7 +780,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, } // Set initializer and finalize emission - GV.setInitialValueAttr(Init); + CIRGenModule::setInitializer(GV, Init); if (emitter) emitter->finalize(GV); @@ -942,9 +946,7 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, GV.setAlignmentAttr(CGM.getSize(Alignment)); GV.setLinkageAttr( mlir::cir::GlobalLinkageKindAttr::get(CGM.getBuilder().getContext(), LT)); - mlir::SymbolTable::setSymbolVisibility( - GV, CIRGenModule::getMLIRVisibilityFromCIRLinkage(LT)); - GV.setInitialValueAttr(C); + CIRGenModule::setInitializer(GV, C); // TODO(cir) assert(!cir::UnimplementedFeature::threadLocal() && "NYI"); @@ -1247,6 +1249,23 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context, return false; } +void CIRGenModule::setInitializer(mlir::cir::GlobalOp &global, + mlir::Attribute value) { + // Recompute visibility when updating initializer. + global.setInitialValueAttr(value); + mlir::SymbolTable::setSymbolVisibility( + global, CIRGenModule::getMLIRVisibility(global)); +} + +mlir::SymbolTable::Visibility +CIRGenModule::getMLIRVisibility(mlir::cir::GlobalOp op) { + // MLIR doesn't accept public symbols declarations (only + // definitions). + if (op.isDeclaration()) + return mlir::SymbolTable::Visibility::Private; + return getMLIRVisibilityFromCIRLinkage(op.getLinkage()); +} + mlir::SymbolTable::Visibility CIRGenModule::getMLIRVisibilityFromCIRLinkage( mlir::cir::GlobalLinkageKind GLK) { switch (GLK) { @@ -2083,8 +2102,8 @@ mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( // Set up extra information and add to the module GV.setLinkageAttr( mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), Linkage)); - mlir::SymbolTable::setSymbolVisibility( - GV, CIRGenModule::getMLIRVisibilityFromCIRLinkage(Linkage)); + mlir::SymbolTable::setSymbolVisibility(GV, + CIRGenModule::getMLIRVisibility(GV)); if (OldGV) { // Replace occurrences of the old variable if needed. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 4f7737e578ec..f89f95218ce2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -472,8 +472,11 @@ class CIRGenModule { /// Visibility and Linkage /// ------- + static void setInitializer(mlir::cir::GlobalOp &op, mlir::Attribute value); static mlir::SymbolTable::Visibility getMLIRVisibilityFromCIRLinkage(mlir::cir::GlobalLinkageKind GLK); + static mlir::SymbolTable::Visibility + getMLIRVisibility(mlir::cir::GlobalOp op); mlir::cir::GlobalLinkageKind getFunctionLinkage(GlobalDecl GD); mlir::cir::GlobalLinkageKind getCIRLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage, diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp index e28f5292e31f..19b9d5708a37 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -43,7 +43,7 @@ void ConstantInitFuture::abandon() { void ConstantInitFuture::installInGlobal(mlir::cir::GlobalOp GV) { assert(Data && "installing null future"); if (Data.is()) { - GV.setInitialValueAttr(Data.get()); + CIRGenModule::setInitializer(GV, Data.get()); } else { llvm_unreachable("NYI"); // auto &builder = *Data.get(); @@ -87,7 +87,7 @@ mlir::cir::GlobalOp ConstantInitBuilderBase::createGlobal( void ConstantInitBuilderBase::setGlobalInitializer( mlir::cir::GlobalOp GV, mlir::Attribute initializer) { - GV.setInitialValueAttr(initializer); + CIRGenModule::setInitializer(GV, initializer); if (!SelfReferences.empty()) resolveSelfReferences(GV); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d068c1e1bef2..6a41206074a6 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1107,6 +1107,8 @@ LogicalResult GlobalOp::verify() { break; case GlobalLinkageKind::ExternalLinkage: case GlobalLinkageKind::ExternalWeakLinkage: + case GlobalLinkageKind::LinkOnceODRLinkage: + case GlobalLinkageKind::LinkOnceAnyLinkage: // FIXME: mlir's concept of visibility gets tricky with LLVM ones, // for instance, symbol declarations cannot be "public", so we // have to mark them "private" to workaround the symbol verifier. @@ -1116,7 +1118,9 @@ LogicalResult GlobalOp::verify() { << "' linkage"; break; default: - assert(0 && "not implemented"); + emitError() << stringifyGlobalLinkageKind(getLinkage()) + << ": verifier not implemented\n"; + return failure(); } // TODO: verify visibility for declarations? From 4d881cc077a5b11d5835bb3bc2ef6e6c550c4130 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 20 Apr 2023 16:34:39 -0700 Subject: [PATCH 0890/2301] [CIR][CIRGen] Fix computeRecordLayout to properly propagate complete type --- clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 16 ++++++++++------ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypes.h | 2 +- .../lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 18 ++++++++++++------ 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index 5bc097835524..687ac46668f6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -68,6 +68,16 @@ class CIRGenRecordLayout { IsZeroInitializable(IsZeroInitializable), IsZeroInitializableAsBase(IsZeroInitializableAsBase) {} + /// Return the "complete object" LLVM type associated with + /// this record. + mlir::cir::StructType getCIRType() const { return CompleteObjectType; } + + /// Return the "base subobject" LLVM type associated with + /// this record. + mlir::cir::StructType getBaseSubobjectCIRType() const { + return BaseSubobjectType; + } + /// Return cir::StructType element number that corresponds to the field FD. unsigned getCIRFieldNo(const clang::FieldDecl *FD) const { FD = FD->getCanonicalDecl(); @@ -78,12 +88,6 @@ class CIRGenRecordLayout { /// Check whether this struct can be C++ zero-initialized with a /// zeroinitializer. bool isZeroInitializable() const { return IsZeroInitializable; } - - /// Return the "base subobject" LLVM type associated with - /// this record. - mlir::cir::StructType getBaseSubobjectCIRType() const { - return BaseSubobjectType; - } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index e2884ddd72a4..e80792e6726c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -171,7 +171,7 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { } // Layout fields. - std::unique_ptr Layout = computeRecordLayout(RD, entry); + std::unique_ptr Layout = computeRecordLayout(RD, &entry); CIRGenRecordLayouts[key] = std::move(Layout); // We're done laying out this struct. diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index f19e2b2e5ae4..b4a183ddb42b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -163,7 +163,7 @@ class CIRGenTypes { mlir::Type convertRecordDeclType(const clang::RecordDecl *recordDecl); std::unique_ptr - computeRecordLayout(const clang::RecordDecl *D, mlir::cir::StructType &Ty); + computeRecordLayout(const clang::RecordDecl *D, mlir::cir::StructType *Ty); std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix); diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 1870d955b722..a566b6f314f5 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -322,7 +322,7 @@ void CIRRecordLowering::accumulateFields() { std::unique_ptr CIRGenTypes::computeRecordLayout(const RecordDecl *D, - mlir::cir::StructType &Ty) { + mlir::cir::StructType *Ty) { CIRRecordLowering builder(*this, D, /*packed=*/false); builder.lower(/*nonVirtualBaseType=*/false); @@ -331,7 +331,7 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, auto identifier = mlir::StringAttr::get(&getMLIRContext(), name); // If we're in C++, compute the base subobject type. - mlir::cir::StructType BaseTy = nullptr; + mlir::cir::StructType *BaseTy = nullptr; if (llvm::isa(D) && !D->isUnion() && !D->hasAttr()) { BaseTy = Ty; @@ -340,10 +340,12 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, CIRRecordLowering baseBuilder(*this, D, /*Packed=*/builder.isPacked); auto baseIdentifier = mlir::StringAttr::get(&getMLIRContext(), name + ".base"); - BaseTy = mlir::cir::StructType::get( + *BaseTy = mlir::cir::StructType::get( &getMLIRContext(), baseBuilder.fieldTypes, baseIdentifier, /*body=*/true, mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); + // TODO(cir): add something like addRecordTypeName + // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work // on both of them with the same index. assert(builder.isPacked == baseBuilder.isPacked && @@ -351,13 +353,17 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, } } - // TODO(cir): add base class info - Ty = mlir::cir::StructType::get( + // Fill in the struct *after* computing the base type. Filling in the body + // signifies that the type is no longer opaque and record layout is complete, + // but we may need to recursively layout D while laying D out as a base type. + *Ty = mlir::cir::StructType::get( &getMLIRContext(), builder.fieldTypes, identifier, /*body=*/true, mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); auto RL = std::make_unique( - Ty, BaseTy, (bool)builder.IsZeroInitializable, + Ty ? *Ty : mlir::cir::StructType{}, + BaseTy ? *BaseTy : mlir::cir::StructType{}, + (bool)builder.IsZeroInitializable, (bool)builder.IsZeroInitializableAsBase); RL->NonVirtualBases.swap(builder.nonVirtualBases); From dd3c75f5c75a63c77ed2adf4f72f0535681eb014 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 20 Apr 2023 17:34:49 -0700 Subject: [PATCH 0891/2301] [CIR][CIRGen] Vtables and dtors: add testcases This covers all the logic implemented for dtors and vtables in the previous commits. --- clang/test/CIR/CodeGen/dtors.cpp | 46 +++++++++--- clang/test/CIR/CodeGen/vtable-rtti.cpp | 96 ++++++++++++++++++++++++++ 2 files changed, 131 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/CodeGen/vtable-rtti.cpp diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index 4aff0ab08b4f..6b5e5a90b2c7 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -1,23 +1,47 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * -class __attribute__((__visibility__("default"))) exception +class A { public: - __attribute__((__visibility__("hidden"))) __attribute__((__exclude_from_explicit_instantiation__)) exception() noexcept {} - __attribute__((__visibility__("hidden"))) __attribute__((__exclude_from_explicit_instantiation__)) exception(const exception&) noexcept = default; + A() noexcept {} + A(const A&) noexcept = default; - virtual ~exception() noexcept; - virtual const char* what() const noexcept; + virtual ~A() noexcept; + virtual const char* quack() const noexcept; }; -class __attribute__((__visibility__("default"))) bad_function_call - : public exception +class B : public A { public: - virtual ~bad_function_call() noexcept {} + virtual ~B() noexcept {} }; -// TODO: for now only check that this doesn't crash, more support soon. +// Class A +// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr i32>>, #cir.recdecl.ast> -// CHECK: module \ No newline at end of file +// Class B +// CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> + +// @B::~B() #1 declaration +// CHECK: cir.func @_ZN1BD2Ev(!cir.ptr) attributes {sym_visibility = "private"} + +// operator delete(void*) declaration +// CHECK: cir.func @_ZdlPv(!cir.ptr) attributes {sym_visibility = "private"} + +// B dtor => @B::~B() #2 +// Calls dtor #1 +// Calls operator delete +// +// CHECK: cir.func linkonce_odr @_ZN1BD0Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.call @_ZN1BD2Ev(%1) : (!cir.ptr) -> () +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: cir.call @_ZdlPv(%2) : (!cir.ptr) -> () +// CHECK: cir.return +// CHECK: } + +void foo() { B(); } diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp new file mode 100644 index 000000000000..9c04ba248865 --- /dev/null +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -0,0 +1,96 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +class A +{ +public: + A() noexcept {} + A(const A&) noexcept = default; + + virtual ~A() noexcept; + virtual const char* quack() const noexcept; +}; + +class B : public A +{ +public: + virtual ~B() noexcept {} +}; + +// Aliased for typeinfo and vtable. +// CHECK: ![[AnonVTableType:ty_.*]] = !cir.struct<"", !cir.array x 5>> +// CHECK: ![[AnonTypeInfo:ty_.*]] = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> + +// Class A +// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr i32>>, #cir.recdecl.ast> + +// vtable for A type +// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct<"vtable", !cir.array x 5>> + +// Class B +// CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> + +// B ctor => @B::B() +// Calls @A::A() and initialize __vptr with address of B's vtable. +// +// CHECK: cir.func linkonce_odr @_ZN1BC2Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: cir.call @_ZN1AC2Ev(%2) : (!cir.ptr) -> () +// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, 0, 2,) : cir.ptr +// CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr i32>>> +// CHECK: %5 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr i32>> +// CHECK: cir.store %5, %4 : !cir.ptr i32>>, cir.ptr i32>>> +// CHECK: cir.return +// CHECK: } + +// foo - zero initialize object B and call ctor (@B::B()) +// +// CHECK: cir.func @_Z3foov() { +// CHECK: %0 = cir.alloca ![[ClassB]], cir.ptr , ["agg.tmp0"] {alignment = 8 : i64} +// CHECK: cir.scope { +// CHECK: %1 = cir.const(#cir.zero : ![[ClassB]]) : ![[ClassB]] +// CHECK: cir.store %1, %0 : ![[ClassB]], cir.ptr +// CHECK: cir.call @_ZN1BC2Ev(%0) : (!cir.ptr) -> () +// CHECK: } +// CHECK: cir.return +// CHECK: } + +// Vtable definition for A +// cir.global "private" external @_ZTV1A : ![[VTableTypeA]] {alignment = 8 : i64} + +// A ctor => @A::A() +// Calls @A::A() and initialize __vptr with address of A's vtable +// +// CHECK: cir.func linkonce_odr @_ZN1AC2Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, 0, 2,) : cir.ptr +// CHECK: %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr i32>>> +// CHECK: %4 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr i32>> +// CHECK: cir.store %4, %3 : !cir.ptr i32>>, cir.ptr i32>>> +// CHECK: cir.return +// CHECK: } + +// vtable for B +// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.const_struct<[#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr] : !cir.array x 5>> : !cir.array x 5>]> : ![[AnonVTableType]] {alignment = 8 : i64} + +// vtable for __cxxabiv1::__si_class_type_info +// CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> + +// typeinfo name for B +// CHECK: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array {alignment = 1 : i64} + +// typeinfo for A +// CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr + +// typeinfo for B +// CHECK: cir.global "private" constant external @_ZTI1B : ![[AnonTypeInfo]] {alignment = 8 : i64} + +// Checks for dtors in dtors.cpp + +void foo() { B(); } From 722e10903f061d4453d92027b2c584ef0a1f1b4a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 20 Apr 2023 18:33:41 -0700 Subject: [PATCH 0892/2301] [CIR][CIRGen] Vtables: actually emit initializer for typeinfo --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 3 +-- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 936dcc7a9775..348e7328cdcb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1435,8 +1435,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( assert(!UnimplementedFeature::setDLLStorageClass()); assert(!UnimplementedFeature::setPartition()); assert(!UnimplementedFeature::setDSOLocal()); - mlir::SymbolTable::setSymbolVisibility(GV, - CIRGenModule::getMLIRVisibility(GV)); + CIRGenModule::setInitializer(GV, init); return mlir::cir::GlobalViewAttr::get( builder.getInt8PtrTy(), diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 9c04ba248865..bdc2068b419f 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -89,7 +89,7 @@ class B : public A // CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr // typeinfo for B -// CHECK: cir.global "private" constant external @_ZTI1B : ![[AnonTypeInfo]] {alignment = 8 : i64} +// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<[#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr] : ![[AnonTypeInfo]]> : ![[AnonTypeInfo]] {alignment = 8 : i64} // Checks for dtors in dtors.cpp From 895478d7f318989f78966cbc68b51254b35a1e9f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 25 Apr 2023 18:06:49 -0700 Subject: [PATCH 0893/2301] [CIR][NFC] Tide VTableAddrPointOp up a bit --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 17 +++++++++++------ clang/test/CIR/CodeGen/vtable-rtti.cpp | 4 ++-- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 268b6d968999..624dfc52b662 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1186,8 +1186,13 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", [Pure, DeclareOpInterfaceMethods]> { let summary = "Get the vtable (global variable) address point"; let description = [{ - The `vtable.address_point` operation retrieves the address point of a - C++ virtual table (vtable). + The `vtable.address_point` operation retrieves the "effective" address + (address point) of a C++ virtual table. An object internal `__vptr` + gets initializated on top of the value returned by this operation. + + `vtable_index` provides the appropriate vtable within the vtable group + (as specified by Itanium ABI), and `addr_point_index` the actual address + point within that vtable. Example: @@ -1197,16 +1202,16 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", }]; let arguments = (ins FlatSymbolRefAttr:$name, - I32Attr:$vtableIndex, - I32Attr:$addrPointIndex); + I32Attr:$vtable_index, + I32Attr:$address_point_index); let results = (outs Res:$addr); // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. let assemblyFormat = [{ `(` $name `,` - $vtableIndex `,` - $addrPointIndex `,` + `vtable_index` `=` $vtable_index `,` + `address_point_index` `=` $address_point_index `)` `:` `cir.ptr` type($addr) attr-dict }]; diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index bdc2068b419f..36852d43c449 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -40,7 +40,7 @@ class B : public A // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr // CHECK: cir.call @_ZN1AC2Ev(%2) : (!cir.ptr) -> () -// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, 0, 2,) : cir.ptr +// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr // CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr i32>>> // CHECK: %5 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr i32>> // CHECK: cir.store %5, %4 : !cir.ptr i32>>, cir.ptr i32>>> @@ -69,7 +69,7 @@ class B : public A // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, 0, 2,) : cir.ptr +// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : cir.ptr // CHECK: %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr i32>>> // CHECK: %4 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr i32>> // CHECK: cir.store %4, %3 : !cir.ptr i32>>, cir.ptr i32>>> From 7d6f75ab6eb4d62816438a14e442ef370e810ef4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 26 Apr 2023 11:19:39 -0400 Subject: [PATCH 0894/2301] [CIR][tests] Add explicit x86_64-unknown-linux-gnu targets to a few tests Our internal CI cross comiles x86_64-apple-darwin toolchains and then attempts to run these tests. So the default triple is the Darwin one and thus this test fails. We should also just teach CIRGen how to target x86_64 apple but that's a decent bit more work than just explicit usage of the triple. --- clang/test/CIR/CodeGen/literals.c | 2 +- clang/test/CIR/CodeGen/literals.cpp | 2 +- clang/test/CIR/Executables/hello.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/test/CIR/CodeGen/literals.c b/clang/test/CIR/CodeGen/literals.c index 91d1b310c4c4..7b0a69dc5886 100644 --- a/clang/test/CIR/CodeGen/literals.c +++ b/clang/test/CIR/CodeGen/literals.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s int literals(void) { char a = 'a'; // char literals are int in C diff --git a/clang/test/CIR/CodeGen/literals.cpp b/clang/test/CIR/CodeGen/literals.cpp index 1aaf6a56cb58..35cf7f8144bb 100644 --- a/clang/test/CIR/CodeGen/literals.cpp +++ b/clang/test/CIR/CodeGen/literals.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s int literals() { char a = 'a'; // char literals have char type in C++ diff --git a/clang/test/CIR/Executables/hello.c b/clang/test/CIR/Executables/hello.c index 42e9e0802fc6..f81080b35733 100644 --- a/clang/test/CIR/Executables/hello.c +++ b/clang/test/CIR/Executables/hello.c @@ -1,7 +1,7 @@ -// RUN: %clang -fclangir -fclangir-direct-lowering -o %t %s +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fclangir-direct-lowering -o %t %s // RUN: %t | FileCheck %s -// XFAIL: * - +// REQUIRES: system-linux +// REQUIRES: target-linux int printf(const char *format); int main (void) { From 6b6b47b8ee4d0327f653e410479e36ff4b96aca6 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 26 Apr 2023 15:08:36 -0300 Subject: [PATCH 0895/2301] [CIR][Codegen][NFC] Move buildArrayConstant to CIRGenExprConst namespace The buildArrayConstant method is only used when generating code for some constant expression in the AST. To ensure encapsulation, this patch moves the method to the CIRGenExprConst anonymous namespace, ensuring any (and only) expression constant visitors have access to it. ghstack-source-id: b49a91a1ca5844989110e080fc80a1797273e638 Pull Request resolved: https://github.com/llvm/clangir/pull/63 --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 132 ++++++++++++---------- 1 file changed, 70 insertions(+), 62 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 69b933473693..20ee4e2cef8f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -14,6 +14,8 @@ #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" #include "clang/AST/APValue.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" @@ -34,6 +36,12 @@ using namespace cir; namespace { class ConstExprEmitter; +static mlir::Attribute +buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, + mlir::Type CommonElementType, unsigned ArrayBound, + SmallVectorImpl &Elements, + mlir::TypedAttr Filler); + struct ConstantAggregateBuilderUtils { CIRGenModule &CGM; @@ -833,6 +841,68 @@ class ConstExprEmitter mlir::Type ConvertType(QualType T) { return CGM.getTypes().ConvertType(T); } }; +static mlir::Attribute +buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, + mlir::Type CommonElementType, unsigned ArrayBound, + SmallVectorImpl &Elements, + mlir::TypedAttr Filler) { + auto &builder = CGM.getBuilder(); + auto isNullValue = [&](mlir::Attribute f) { + // TODO(cir): introduce char type in CIR and check for that instead. + auto intVal = f.dyn_cast_or_null(); + assert(intVal && "not implemented"); + if (intVal.getInt() == 0) + return true; + return false; + }; + + // Figure out how long the initial prefix of non-zero elements is. + unsigned NonzeroLength = ArrayBound; + if (Elements.size() < NonzeroLength && isNullValue(Filler)) + NonzeroLength = Elements.size(); + if (NonzeroLength == Elements.size()) { + while (NonzeroLength > 0 && isNullValue(Elements[NonzeroLength - 1])) + --NonzeroLength; + } + + if (NonzeroLength == 0) + assert(0 && "NYE"); + + // Add a zeroinitializer array filler if we have lots of trailing zeroes. + unsigned TrailingZeroes = ArrayBound - NonzeroLength; + if (TrailingZeroes >= 8) { + assert(0 && "NYE"); + assert(Elements.size() >= NonzeroLength && + "missing initializer for non-zero element"); + + // TODO(cir): If all the elements had the same type up to the trailing + // zeroes, emit a struct of two arrays (the nonzero data and the + // zeroinitializer). Use DesiredType to get the element type. + } else if (Elements.size() != ArrayBound) { + // Otherwise pad to the right size with the filler if necessary. + Elements.resize(ArrayBound, Filler); + if (Filler.getType() != CommonElementType) + CommonElementType = {}; + } + + // If all elements have the same type, just emit an array constant. + if (CommonElementType) { + SmallVector Eles; + Eles.reserve(Elements.size()); + for (auto const &Element : Elements) + Eles.push_back(Element); + + return builder.getConstArray( + mlir::ArrayAttr::get(builder.getContext(), Eles), + mlir::cir::ArrayType::get(builder.getContext(), CommonElementType, + ArrayBound)); + } + + // We have mixed types. Use a packed struct. + assert(0 && "NYE"); + return {}; +} + } // end anonymous namespace. //===----------------------------------------------------------------------===// @@ -1194,68 +1264,6 @@ mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, return C; } -static mlir::Attribute -buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, - mlir::Type CommonElementType, unsigned ArrayBound, - SmallVectorImpl &Elements, - mlir::TypedAttr Filler) { - auto &builder = CGM.getBuilder(); - auto isNullValue = [&](mlir::Attribute f) { - // TODO(cir): introduce char type in CIR and check for that instead. - auto intVal = f.dyn_cast_or_null(); - assert(intVal && "not implemented"); - if (intVal.getInt() == 0) - return true; - return false; - }; - - // Figure out how long the initial prefix of non-zero elements is. - unsigned NonzeroLength = ArrayBound; - if (Elements.size() < NonzeroLength && isNullValue(Filler)) - NonzeroLength = Elements.size(); - if (NonzeroLength == Elements.size()) { - while (NonzeroLength > 0 && isNullValue(Elements[NonzeroLength - 1])) - --NonzeroLength; - } - - if (NonzeroLength == 0) - assert(0 && "NYE"); - - // Add a zeroinitializer array filler if we have lots of trailing zeroes. - unsigned TrailingZeroes = ArrayBound - NonzeroLength; - if (TrailingZeroes >= 8) { - assert(0 && "NYE"); - assert(Elements.size() >= NonzeroLength && - "missing initializer for non-zero element"); - - // TODO(cir): If all the elements had the same type up to the trailing - // zeroes, emit a struct of two arrays (the nonzero data and the - // zeroinitializer). Use DesiredType to get the element type. - } else if (Elements.size() != ArrayBound) { - // Otherwise pad to the right size with the filler if necessary. - Elements.resize(ArrayBound, Filler); - if (Filler.getType() != CommonElementType) - CommonElementType = {}; - } - - // If all elements have the same type, just emit an array constant. - if (CommonElementType) { - SmallVector Eles; - Eles.reserve(Elements.size()); - for (auto const &Element : Elements) - Eles.push_back(Element); - - return builder.getConstArray( - mlir::ArrayAttr::get(builder.getContext(), Eles), - mlir::cir::ArrayType::get(builder.getContext(), CommonElementType, - ArrayBound)); - } - - // We have mixed types. Use a packed struct. - assert(0 && "NYE"); - return {}; -} - mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *E, QualType T) { assert(0 && "not implemented"); return nullptr; From 56a38ed15af72c8fd19bb42fc655553b7e4d4e7b Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 26 Apr 2023 15:08:37 -0300 Subject: [PATCH 0896/2301] [CIR][Codegen] Emit string literals When emitting variable initializers, if the initializer's evaluation fails, CodeGen will fall back to regular visitors for the emission. String literals are then emitted as a constant array of bytes. This fixes issues with C constant expression initializers that fail to be evaluated. ghstack-source-id: bdb7a49c8d679b35fb365ed942013ccea162f1f6 Pull Request resolved: https://github.com/llvm/clangir/pull/64 --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 16 ++++++++++++---- clang/test/CIR/CodeGen/globals.c | 10 ++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/globals.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 20ee4e2cef8f..7604ce2fb894 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -824,8 +824,7 @@ class ConstExprEmitter mlir::Attribute VisitStringLiteral(StringLiteral *E, QualType T) { // This is a string literal initializing an array in an initializer. - assert(0 && "not implemented"); - return {}; + return CGM.getConstantArrayFromStringLiteral(E); } mlir::Attribute VisitObjCEncodeExpr(ObjCEncodeExpr *E, QualType T) { @@ -1208,8 +1207,17 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { if (destType->isReferenceType()) return {}; - assert(0 && "not implemented"); - return {}; + // Evaluation failed and not a reference type: ensure initializer exists. + const Expr *E = D.getInit(); + assert(E && "No initializer to emit"); + + // Initializer exists: emit it "manually" through visitors. + auto nonMemoryDestType = getNonMemoryType(CGM, destType); + auto C = + ConstExprEmitter(*this).Visit(const_cast(E), nonMemoryDestType); + + // Return either the initializer attribute or a null attribute on failure. + return (C ? emitForMemory(C, destType) : nullptr); } mlir::Attribute ConstantEmitter::tryEmitAbstract(const APValue &value, diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c new file mode 100644 index 000000000000..c6344fa1adce --- /dev/null +++ b/clang/test/CIR/CodeGen/globals.c @@ -0,0 +1,10 @@ +// There seems to be some differences in how constant expressions are evaluated +// in C vs C++. This causees the code gen for C initialized globals to be a +// bit different from the C++ version. This test ensures that these differences +// are accounted for. + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s +// XFAIL: * + +char string[] = "whatnow"; +// CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array From 6dd7a1696454ddc72f781a90e771504344004447 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 26 Apr 2023 15:08:37 -0300 Subject: [PATCH 0897/2301] [CIR][CodeGen] Emit array initialization Implement InitListExpr visitor to emit a cir.const_array attribute. As a dependency, also implemented methods tryEmitPrivate and tryEmitForMemory, to both handle emission failures and emissions of elements not in memory. ghstack-source-id: fedf0c3d0af1a897f9191e6ea4bb29937a782bc6 Pull Request resolved: https://github.com/llvm/clangir/pull/65 --- clang/lib/CIR/CodeGen/CIRGenCstEmitter.h | 4 +- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 74 ++++++++++++++++++++--- clang/test/CIR/CodeGen/globals.c | 2 + 3 files changed, 70 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h index 1c70088d73b0..3b20964241de 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h @@ -103,8 +103,8 @@ class ConstantEmitter { // functions and classes. mlir::Attribute tryEmitPrivateForVarInit(const VarDecl &D); - mlir::TypedAttr tryEmitPrivate(const Expr *E, QualType T); - mlir::TypedAttr tryEmitPrivateForMemory(const Expr *E, QualType T); + mlir::Attribute tryEmitPrivate(const Expr *E, QualType T); + mlir::Attribute tryEmitPrivateForMemory(const Expr *E, QualType T); mlir::Attribute tryEmitPrivate(const APValue &value, QualType T); mlir::Attribute tryEmitPrivateForMemory(const APValue &value, QualType T); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 7604ce2fb894..cce94c1b735c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -25,6 +25,8 @@ #include "clang/Basic/Builtins.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Sequence.h" +#include "llvm/Support/ErrorHandling.h" +#include using namespace clang; using namespace cir; @@ -763,8 +765,45 @@ class ConstExprEmitter } mlir::Attribute EmitArrayInitialization(InitListExpr *ILE, QualType T) { - assert(0 && "not implemented"); - return {}; + auto *CAT = CGM.getASTContext().getAsConstantArrayType(ILE->getType()); + assert(CAT && "can't emit array init for non-constant-bound array"); + unsigned NumInitElements = ILE->getNumInits(); // init list size + unsigned NumElements = CAT->getSize().getZExtValue(); // array size + unsigned NumInitableElts = std::min(NumInitElements, NumElements); + + QualType EltTy = CAT->getElementType(); + SmallVector Elts; + Elts.reserve(NumElements); + + // Emit array filler, if there is one. + if (Expr *filler = ILE->getArrayFiller()) { + llvm_unreachable("NYI"); + } + + // Emit initializer elements as MLIR attributes and check for common type. + mlir::Type CommonElementType; + for (unsigned i = 0; i != NumInitableElts; ++i) { + Expr *Init = ILE->getInit(i); + auto C = Emitter.tryEmitPrivateForMemory(Init, EltTy); + if (!C) + return {}; + + assert(C.isa() && "This should always be a TypedAttr."); + auto CTyped = C.cast(); + + if (i == 0) + CommonElementType = CTyped.getType(); + else if (CTyped.getType() != CommonElementType) + CommonElementType = nullptr; + auto typedC = llvm::dyn_cast(C); + if (!typedC) + llvm_unreachable("this should always be typed"); + Elts.push_back(typedC); + } + + auto desiredType = CGM.getTypes().ConvertType(T); + return buildArrayConstant(CGM, desiredType, CommonElementType, NumElements, + Elts, mlir::TypedAttr()); } mlir::Attribute EmitRecordInitialization(InitListExpr *ILE, QualType T) { @@ -1234,10 +1273,11 @@ mlir::Attribute ConstantEmitter::tryEmitAbstractForMemory(const APValue &value, return (C ? emitForMemory(C, destType) : nullptr); } -mlir::TypedAttr ConstantEmitter::tryEmitPrivateForMemory(const Expr *E, +mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const Expr *E, QualType destType) { - assert(0 && "not implemented"); - return nullptr; + auto nonMemoryDestType = getNonMemoryType(CGM, destType); + auto C = tryEmitPrivate(E, nonMemoryDestType); + return (C ? emitForMemory(C, destType) : nullptr); } mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, @@ -1272,9 +1312,27 @@ mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, return C; } -mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *E, QualType T) { - assert(0 && "not implemented"); - return nullptr; +mlir::Attribute ConstantEmitter::tryEmitPrivate(const Expr *E, QualType T) { + assert(!T->isVoidType() && "can't emit a void constant"); + Expr::EvalResult Result; + bool Success; + + // TODO: Implement the missing functionalities below. + assert(!T->isReferenceType() && "NYI"); + + // NOTE: Not all constant expressions can be emited by the ConstExprEmitter. + // So we have to fold/evaluate the expression in some cases. + // + // Try folding constant expression into an RValue. + Success = E->EvaluateAsRValue(Result, CGM.getASTContext(), InConstantContext); + + mlir::Attribute C; + if (Success && !Result.HasSideEffects) + C = tryEmitPrivate(Result.Val, T); + else + C = ConstExprEmitter(*this).Visit(const_cast(E), T); + + return C; } mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index c6344fa1adce..37f544e30b6f 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -8,3 +8,5 @@ char string[] = "whatnow"; // CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array +int sint[] = {123, 456, 789}; +// CHECK: cir.global external @sint = #cir.const_array<[123 : i32, 456 : i32, 789 : i32] : !cir.array> : !cir.array From 71cd8fac26843c355ae2dfdb0d6cd392cf49bf40 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 26 Apr 2023 15:08:37 -0300 Subject: [PATCH 0898/2301] [CIR][CodeGen] Handle init list size mismatch Initializer lists can have more or fewer elements than the destination the array can hold, and the compiler will deal with it. This patch handles handle these scenarios. ghstack-source-id: 2d283018460c47e16393c4ffee99d7c43093ce2b Pull Request resolved: https://github.com/llvm/clangir/pull/66 --- clang/lib/CIR/CodeGen/CIRGenCstEmitter.h | 3 +++ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 27 ++++++++++++++++++++--- clang/test/CIR/CodeGen/globals.c | 4 ++++ 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h index 3b20964241de..e283568993cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h @@ -109,6 +109,9 @@ class ConstantEmitter { mlir::Attribute tryEmitPrivate(const APValue &value, QualType T); mlir::Attribute tryEmitPrivateForMemory(const APValue &value, QualType T); + mlir::Attribute tryEmitAbstract(const Expr *E, QualType destType); + mlir::Attribute tryEmitAbstractForMemory(const Expr *E, QualType destType); + mlir::Attribute tryEmitAbstract(const APValue &value, QualType destType); mlir::Attribute tryEmitAbstractForMemory(const APValue &value, QualType destType); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index cce94c1b735c..d44cb1a0a5a2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -776,8 +776,12 @@ class ConstExprEmitter Elts.reserve(NumElements); // Emit array filler, if there is one. - if (Expr *filler = ILE->getArrayFiller()) { - llvm_unreachable("NYI"); + mlir::Attribute Filler; + if (ILE->hasArrayFiller()) { + auto *aux = ILE->getArrayFiller(); + Filler = Emitter.tryEmitAbstractForMemory(aux, CAT->getElementType()); + if (!Filler) + return {}; } // Emit initializer elements as MLIR attributes and check for common type. @@ -802,8 +806,11 @@ class ConstExprEmitter } auto desiredType = CGM.getTypes().ConvertType(T); + auto typedFiller = llvm::dyn_cast_or_null(Filler); + if (Filler && !typedFiller) + llvm_unreachable("We shouldn't be receiving untyped attrs here"); return buildArrayConstant(CGM, desiredType, CommonElementType, NumElements, - Elts, mlir::TypedAttr()); + Elts, typedFiller); } mlir::Attribute EmitRecordInitialization(InitListExpr *ILE, QualType T) { @@ -1259,6 +1266,13 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { return (C ? emitForMemory(C, destType) : nullptr); } +mlir::Attribute ConstantEmitter::tryEmitAbstract(const Expr *E, + QualType destType) { + auto state = pushAbstract(); + auto C = tryEmitPrivate(E, destType); + return validateAndPopAbstract(C, state); +} + mlir::Attribute ConstantEmitter::tryEmitAbstract(const APValue &value, QualType destType) { auto state = pushAbstract(); @@ -1266,6 +1280,13 @@ mlir::Attribute ConstantEmitter::tryEmitAbstract(const APValue &value, return validateAndPopAbstract(C, state); } +mlir::Attribute ConstantEmitter::tryEmitAbstractForMemory(const Expr *E, + QualType destType) { + auto nonMemoryDestType = getNonMemoryType(CGM, destType); + auto C = tryEmitAbstract(E, nonMemoryDestType); + return (C ? emitForMemory(C, destType) : nullptr); +} + mlir::Attribute ConstantEmitter::tryEmitAbstractForMemory(const APValue &value, QualType destType) { auto nonMemoryDestType = getNonMemoryType(CGM, destType); diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 37f544e30b6f..3f06c40ce09e 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -10,3 +10,7 @@ char string[] = "whatnow"; // CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array int sint[] = {123, 456, 789}; // CHECK: cir.global external @sint = #cir.const_array<[123 : i32, 456 : i32, 789 : i32] : !cir.array> : !cir.array +int filler_sint[4] = {1, 2}; // Ensure missing elements are zero-initialized. +// CHECK: cir.global external @filler_sint = #cir.const_array<[1 : i32, 2 : i32, 0 : i32, 0 : i32] : !cir.array> : !cir.array +int excess_sint[2] = {1, 2, 3, 4}; // Ensure excess elements are ignored. +// CHECK: cir.global external @excess_sint = #cir.const_array<[1 : i32, 2 : i32] : !cir.array> : !cir.array From c812cd7d293305c02623b3d37774d5fb25299bc9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 25 Apr 2023 18:18:46 -0700 Subject: [PATCH 0899/2301] [CIR][CIRGen] vtable struct should be anonymous --- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 3 +-- clang/test/CIR/CodeGen/vtable-rtti.cpp | 15 +++++++-------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index d18de0ef8241..f3231b660f33 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -58,8 +58,7 @@ mlir::Type CIRGenVTables::getVTableType(const VTableLayout &layout) { // FIXME(cir): should VTableLayout be encoded like we do for some // AST nodes? - return mlir::cir::StructType::get(ctx, tys, "vtable", - /*body=*/true); + return mlir::cir::StructType::get(ctx, tys, "", /*body=*/true); } /// At this point in the translation unit, does it appear that can we diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 36852d43c449..78488128b206 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -18,16 +18,15 @@ class B : public A virtual ~B() noexcept {} }; -// Aliased for typeinfo and vtable. -// CHECK: ![[AnonVTableType:ty_.*]] = !cir.struct<"", !cir.array x 5>> -// CHECK: ![[AnonTypeInfo:ty_.*]] = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> +// vtable for A type +// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct<"", !cir.array x 5>> + +// Type info B. +// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> // Class A // CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr i32>>, #cir.recdecl.ast> -// vtable for A type -// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct<"vtable", !cir.array x 5>> - // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> @@ -77,7 +76,7 @@ class B : public A // CHECK: } // vtable for B -// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.const_struct<[#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr] : !cir.array x 5>> : !cir.array x 5>]> : ![[AnonVTableType]] {alignment = 8 : i64} +// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.const_struct<[#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr] : !cir.array x 5>> : !cir.array x 5>]> : ![[VTableTypeA]] {alignment = 8 : i64} // vtable for __cxxabiv1::__si_class_type_info // CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> @@ -89,7 +88,7 @@ class B : public A // CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr // typeinfo for B -// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<[#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr] : ![[AnonTypeInfo]]> : ![[AnonTypeInfo]] {alignment = 8 : i64} +// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<[#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr] : ![[TypeInfoB]]> : ![[TypeInfoB]] {alignment = 8 : i64} // Checks for dtors in dtors.cpp From fc1114d9a683bf3083e8ccd846b683a40b695d24 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 26 Apr 2023 12:33:22 -0700 Subject: [PATCH 0900/2301] [CIR][CIRGen] Improve VTableAddrPointOp and fold usual bitcast as part of its functionality - Add verifier for returning type. - Improve docs. - Be more accurante: an address point is the final location that matters, it should already be bitcasted, no need to do that during CIRGen. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 26 ++++++++++--------- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 17 ++++-------- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 8 +++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 18 ++++++++----- clang/test/CIR/CodeGen/vtable-rtti.cpp | 10 +++---- 5 files changed, 40 insertions(+), 39 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 624dfc52b662..b6c7eabb0bd4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1186,19 +1186,22 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", [Pure, DeclareOpInterfaceMethods]> { let summary = "Get the vtable (global variable) address point"; let description = [{ - The `vtable.address_point` operation retrieves the "effective" address - (address point) of a C++ virtual table. An object internal `__vptr` - gets initializated on top of the value returned by this operation. + The `vtable.address_point` operation retrieves the "effective" address + (address point) of a C++ virtual table. An object internal `__vptr` + gets initializated on top of the value returned by this operation. - `vtable_index` provides the appropriate vtable within the vtable group - (as specified by Itanium ABI), and `addr_point_index` the actual address - point within that vtable. + `vtable_index` provides the appropriate vtable within the vtable group + (as specified by Itanium ABI), and `addr_point_index` the actual address + point within that vtable. - Example: + The return type is always a `!cir.ptr i32>>`. - ```mlir - %x = cir.vtable.address_point(@vtable, 2, 3) : !cir.ptr> - ``` + Example: + ```mlir + cir.global linkonce_odr @_ZTV1B = ... + ... + %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr i32>> + ``` }]; let arguments = (ins FlatSymbolRefAttr:$name, @@ -1216,8 +1219,7 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", `:` `cir.ptr` type($addr) attr-dict }]; - // `VTableAddrPointOp` is fully verified by its traits. - let hasVerifier = 0; + let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 714a4b5aa81c..c8605197bbb8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -666,20 +666,13 @@ void CIRGenFunction::initializeVTablePointer(mlir::Location loc, } // Finally, store the address point. Use the same CIR types as the field. - - // unsigned GlobalsAS = CGM.getDataLayout().getDefaultGlobalsAddressSpace(); - // unsigned ProgAS = CGM.getDataLayout().getProgramAddressSpace(); + // + // vtable field is derived from `this` pointer, therefore they should be in + // the same addr space. assert(!UnimplementedFeature::addressSpace()); - auto VTablePtrTy = builder.getVirtualFnPtrType(/*isVarArg=*/true); - - // // vtable field is derived from `this` pointer, therefore they should be in - // // the same addr space. Note that this might not be LLVM address space 0. - VTableField = builder.createElementBitCast(loc, VTableField, VTablePtrTy); - VTableAddressPoint = - builder.createBitcast(loc, VTableAddressPoint, VTablePtrTy); + VTableField = builder.createElementBitCast(loc, VTableField, + VTableAddressPoint.getType()); builder.createStore(loc, VTableAddressPoint, VTableField); - - // TODO(cir): handle anything TBAA related? assert(!UnimplementedFeature::tbaa()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 348e7328cdcb..646ff0e98fff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -546,10 +546,12 @@ CIRGenItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, .getAddressPoint(Base); auto &builder = CGM.getBuilder(); - auto ptrTy = builder.getPointerTo(vtable.getSymType()); + auto vtablePtrTy = builder.getVirtualFnPtrType(/*isVarArg=*/true); + return builder.create( - CGM.getLoc(VTableClass->getSourceRange()), ptrTy, vtable.getSymName(), - AddressPoint.VTableIndex, AddressPoint.AddressPointIndex); + CGM.getLoc(VTableClass->getSourceRange()), vtablePtrTy, + vtable.getSymName(), AddressPoint.VTableIndex, + AddressPoint.AddressPointIndex); } mlir::Value CIRGenItaniumCXXABI::getVTableAddressPointInStructor( diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 6a41206074a6..626bf5bbf327 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1186,13 +1186,19 @@ VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { if (!isa(op)) return emitOpError("'") << getName() << "' does not reference a valid cir.global"; + return success(); +} - mlir::Type symTy = op.getSymType(); - auto resultType = getAddr().getType().dyn_cast(); - if (!resultType || symTy != resultType.getPointee()) - return emitOpError("result type pointee type '") - << resultType.getPointee() << "' does not match type " << symTy - << " of the global @" << getName(); +LogicalResult cir::VTableAddrPointOp::verify() { + auto resultType = getAddr().getType(); + auto fnTy = mlir::FunctionType::get( + getContext(), {}, {mlir::IntegerType::get(getContext(), 32)}); + auto resTy = mlir::cir::PointerType::get( + getContext(), mlir::cir::PointerType::get(getContext(), fnTy)); + + if (resultType != resTy) + return emitOpError("result type must be '") + << resTy << "', but provided result type is '" << resultType << "'"; return success(); } diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 78488128b206..1fe86da5aee9 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -39,10 +39,9 @@ class B : public A // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr // CHECK: cir.call @_ZN1AC2Ev(%2) : (!cir.ptr) -> () -// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr +// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr i32>> // CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr i32>>> -// CHECK: %5 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr i32>> -// CHECK: cir.store %5, %4 : !cir.ptr i32>>, cir.ptr i32>>> +// CHECK: cir.store %3, %4 : !cir.ptr i32>>, cir.ptr i32>>> // CHECK: cir.return // CHECK: } @@ -68,10 +67,9 @@ class B : public A // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : cir.ptr +// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : cir.ptr i32>> // CHECK: %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr i32>>> -// CHECK: %4 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr i32>> -// CHECK: cir.store %4, %3 : !cir.ptr i32>>, cir.ptr i32>>> +// CHECK: cir.store %2, %3 : !cir.ptr i32>>, cir.ptr i32>>> // CHECK: cir.return // CHECK: } From 01e58535446a5e5f3317e79e9161fd405d77ffea Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 26 Apr 2023 15:25:15 -0700 Subject: [PATCH 0901/2301] [CIR][CIRGen] Add #cir.vtable to represent vtable related data --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 41 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 2 +- clang/lib/CIR/CodeGen/ConstantInitBuilder.h | 11 ++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 17 +++++--- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- 5 files changed, 63 insertions(+), 10 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 4c4342289efd..78f3a862dc17 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -236,6 +236,47 @@ def TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> { }]; } +//===----------------------------------------------------------------------===// +// VTableAttr +//===----------------------------------------------------------------------===// + +def VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> { + let summary = "Represents a C++ vtable"; + let description = [{ + Wraps a #cir.const_struct containing vtable data. + + Example: + ``` + cir.global linkonce_odr @_ZTV1B = #cir.vtable<< + [#cir.const_array<[#cir.null : !cir.ptr, + #cir.global_view<@_ZTI1B> : !cir.ptr, + #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, + #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, + #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr] + : !cir.array x 5>> + : !cir.array x 5>]>> + : !cir.struct<"", !cir.array x 5>> + ``` + }]; + + // `info` must be a const struct with one element, containing an array of + // vtable information. + let parameters = (ins AttributeSelfTypeParameter<"">:$type, + "ConstStructAttr":$vtable_data); + + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, + "ConstStructAttr":$vtable_data), [{ + return $_get(type.getContext(), type, vtable_data); + }]> + ]; + + // let genVerifyDecl = 1; + let assemblyFormat = [{ + `<` $vtable_data `>` + }]; +} + //===----------------------------------------------------------------------===// // AST Wrappers //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 646ff0e98fff..8c18f768c36c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1467,7 +1467,7 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, CGVT.createVTableInitializer(components, VTLayout, RTTI, mlir::cir::isLocalLinkage(Linkage)); - components.finishAndSetAsInitializer(VTable); + components.finishAndSetAsInitializer(VTable, /*forVtable=*/true); // Set the correct linkage. VTable.setLinkage(Linkage); diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h index 2cff288bd8f4..6147c22d0a54 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -398,10 +398,17 @@ class ConstantAggregateBuilderTemplateBase /// Given that this builder was created by beginning an array or struct /// directly on a ConstantInitBuilder, finish the array/struct and /// set it as the initializer of the given global variable. - void finishAndSetAsInitializer(mlir::cir::GlobalOp global) { + void finishAndSetAsInitializer(mlir::cir::GlobalOp global, + bool forVTable = false) { assert(!this->Parent && "finishing non-root builder"); + mlir::Attribute init = asImpl().finishImpl(global.getContext()); + auto initCSA = init.dyn_cast(); + assert(initCSA && + "expected #cir.const_struct attribute to represent vtable data"); return this->Builder.setGlobalInitializer( - global, asImpl().finishImpl(global.getContext())); + global, forVTable + ? mlir::cir::VTableAttr::get(initCSA.getType(), initCSA) + : init); } /// Given that this builder was created by beginning an array or struct diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 626bf5bbf327..7f10f6a6d0ad 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -179,11 +179,10 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return op->emitOpError("symbolref expects pointer type"); } - if (attrType.isa()) - return success(); - if (attrType.isa()) - return success(); - if (attrType.isa()) + if (attrType.isa() || + attrType.isa() || + attrType.isa() || + attrType.isa()) return success(); assert(attrType.isa() && "What else could we be looking at here?"); @@ -1183,9 +1182,15 @@ VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // referenced cir.global or cir.func op. auto op = dyn_cast_or_null( symbolTable.lookupNearestSymbolFrom(*this, getNameAttr())); - if (!isa(op)) + if (!op) return emitOpError("'") << getName() << "' does not reference a valid cir.global"; + auto init = op.getInitialValue(); + if (!init) + return success(); + if (!isa(*init)) + return emitOpError("Expected #cir.vtable in initializer for global '") + << getName() << "'"; return success(); } diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 1fe86da5aee9..b620c4eadd3a 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -74,7 +74,7 @@ class B : public A // CHECK: } // vtable for B -// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.const_struct<[#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr] : !cir.array x 5>> : !cir.array x 5>]> : ![[VTableTypeA]] {alignment = 8 : i64} +// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<<[#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr] : !cir.array x 5>> : !cir.array x 5>]>> : ![[VTableTypeA]] // vtable for __cxxabiv1::__si_class_type_info // CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> From b19675f5925160f1ce7552701515b6886bca50b6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 26 Apr 2023 15:46:58 -0700 Subject: [PATCH 0902/2301] [CIR][NFC] Make #cir.const_array more terse --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 13 ++----------- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 5 ----- clang/test/CIR/CodeGen/globals.c | 6 +++--- clang/test/CIR/CodeGen/globals.cpp | 14 +++++++------- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- clang/test/CIR/IR/global.cir | 2 +- 6 files changed, 14 insertions(+), 28 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 78f3a862dc17..d9ac49b1e53b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -65,14 +65,6 @@ def ConstArrayAttr : CIR_Attr<"ConstArray", "const_array", [TypedAttrInterface]> An CIR array attribute is an array of literals of the specified attr types. }]; - // `$type` is the `self` type of the attribute (i.e. the type of the - // Attribute itself). - // - // `arrayAttr` is the actual attribute array with elements for this constant - // array, there's yet no need to own these elements. - // - // TODO: create a trait for ArrayAttrOrStringAttr value instead of relying - // on verifier. let parameters = (ins AttributeSelfTypeParameter<"">:$type, "Attribute":$value); @@ -252,9 +244,8 @@ def VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> { #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, - #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr] - : !cir.array x 5>> - : !cir.array x 5>]>> + #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> + : !cir.array x 5>]>> : !cir.struct<"", !cir.array x 5>> ``` }]; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7f10f6a6d0ad..6de7d07cfa5b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1738,11 +1738,6 @@ ::mlir::Attribute ConstArrayAttr::parse(::mlir::AsmParser &parser, void ConstArrayAttr::print(::mlir::AsmPrinter &printer) const { printer << "<"; printer.printStrippedAttrOrType(getValue()); - if (getValue().isa()) { - printer << ' ' << ":"; - printer << ' '; - printer.printStrippedAttrOrType(getType()); - } printer << ">"; } diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 3f06c40ce09e..185aac9e086f 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -9,8 +9,8 @@ char string[] = "whatnow"; // CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array int sint[] = {123, 456, 789}; -// CHECK: cir.global external @sint = #cir.const_array<[123 : i32, 456 : i32, 789 : i32] : !cir.array> : !cir.array +// CHECK: cir.global external @sint = #cir.const_array<[123 : i32, 456 : i32, 789 : i32]> : !cir.array int filler_sint[4] = {1, 2}; // Ensure missing elements are zero-initialized. -// CHECK: cir.global external @filler_sint = #cir.const_array<[1 : i32, 2 : i32, 0 : i32, 0 : i32] : !cir.array> : !cir.array +// CHECK: cir.global external @filler_sint = #cir.const_array<[1 : i32, 2 : i32, 0 : i32, 0 : i32]> : !cir.array int excess_sint[2] = {1, 2, 3, 4}; // Ensure excess elements are ignored. -// CHECK: cir.global external @excess_sint = #cir.const_array<[1 : i32, 2 : i32] : !cir.array> : !cir.array +// CHECK: cir.global external @excess_sint = #cir.const_array<[1 : i32, 2 : i32]> : !cir.array diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index bfa90a61d750..205c1ef56491 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -36,8 +36,8 @@ int use_func() { return func(); } // CHECK-NEXT: cir.global external @y = 3.400000e+00 : f32 // CHECK-NEXT: cir.global external @w = 4.300000e+00 : f64 // CHECK-NEXT: cir.global external @x = 51 : i8 -// CHECK-NEXT: cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> -// CHECK-NEXT: cir.global external @alpha = #cir.const_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8] : !cir.array> +// CHECK-NEXT: cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array +// CHECK-NEXT: cir.global external @alpha = #cir.const_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8]> : !cir.array // CHECK-NEXT: cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK-NEXT: cir.global external @s = @".str": !cir.ptr @@ -79,15 +79,15 @@ int use_func() { return func(); } char string[] = "whatnow"; -// CHECK: cir.global external @string = #cir.const_array<[119 : i8, 104 : i8, 97 : i8, 116 : i8, 110 : i8, 111 : i8, 119 : i8, 0 : i8] : !cir.array> : !cir.array +// CHECK: cir.global external @string = #cir.const_array<[119 : i8, 104 : i8, 97 : i8, 116 : i8, 110 : i8, 111 : i8, 119 : i8, 0 : i8]> : !cir.array unsigned uint[] = {255}; -// CHECK: cir.global external @uint = #cir.const_array<[255 : i32] : !cir.array> : !cir.array +// CHECK: cir.global external @uint = #cir.const_array<[255 : i32]> : !cir.array short sshort[] = {11111, 22222}; -// CHECK: cir.global external @sshort = #cir.const_array<[11111 : i16, 22222 : i16] : !cir.array> : !cir.array +// CHECK: cir.global external @sshort = #cir.const_array<[11111 : i16, 22222 : i16]> : !cir.array int sint[] = {123, 456, 789}; -// CHECK: cir.global external @sint = #cir.const_array<[123 : i32, 456 : i32, 789 : i32] : !cir.array> : !cir.array +// CHECK: cir.global external @sint = #cir.const_array<[123 : i32, 456 : i32, 789 : i32]> : !cir.array long long ll[] = {999999999, 0, 0, 0}; -// CHECK: cir.global external @ll = #cir.const_array<[999999999, 0, 0, 0] : !cir.array> : !cir.array +// CHECK: cir.global external @ll = #cir.const_array<[999999999, 0, 0, 0]> : !cir.array void get_globals() { // CHECK: cir.func @_Z11get_globalsv() diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index b620c4eadd3a..0f4012d4e391 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -74,7 +74,7 @@ class B : public A // CHECK: } // vtable for B -// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<<[#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr] : !cir.array x 5>> : !cir.array x 5>]>> : ![[VTableTypeA]] +// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<<[#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>]>> : ![[VTableTypeA]] // vtable for __cxxabiv1::__si_class_type_info // CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 8fed3d5bca4c..101b6013189b 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -32,7 +32,7 @@ module { } // CHECK: cir.global external @a = 3 : i32 -// CHECK: cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> +// CHECK: cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array // CHECK: cir.global external @b = #cir.const_array<"example\00" : !cir.array> // CHECK: cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} // CHECK: cir.global "private" internal @c : i32 From 4a56f3e37dbf03485fe9f37e4de88c62263dd15a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 26 Apr 2023 16:34:17 -0700 Subject: [PATCH 0903/2301] [CIR][CIRGen][NFC] Make #cir.typeinfo use an underlying #cir.const_struct --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 12 +++---- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 23 +++++++++----- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 31 +++++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 29 ++++------------- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- clang/test/CIR/IR/global.cir | 7 +++-- clang/test/CIR/IR/invalid.cir | 3 +- 7 files changed, 64 insertions(+), 43 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index d9ac49b1e53b..046715fa0208 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -113,8 +113,7 @@ def ConstStructAttr : CIR_Attr<"ConstStruct", "const_struct", `>` }]; - // let hasCustomAssemblyFormat = 1; - // let genVerifyDecl = 1; + let genVerifyDecl = 1; } //===----------------------------------------------------------------------===// @@ -208,14 +207,13 @@ def TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> { ``` }]; - // FIXME: move from ArrayAttr to a ConstStructAttr once it lands? let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "ArrayAttr":$info); + "ConstStructAttr":$typeinfo_data); let builders = [ AttrBuilderWithInferredContext<(ins "Type":$type, - "ArrayAttr":$info), [{ - return $_get(type.getContext(), type, info); + "ConstStructAttr":$typeinfo_data), [{ + return $_get(type.getContext(), type, typeinfo_data); }]> ]; @@ -224,7 +222,7 @@ def TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> { let genVerifyDecl = 1; let assemblyFormat = [{ - `<` $info `:` $type `>` + `<` $typeinfo_data `>` }]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index d404150b5822..d609f57615d2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -101,16 +101,23 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return mlir::cir::ConstArrayAttr::get(arrayTy, attrs); } - mlir::cir::TypeInfoAttr getTypeInfo(mlir::ArrayAttr fieldsAttr) { + mlir::cir::ConstStructAttr getAnonConstStruct(mlir::ArrayAttr arrayAttr, + bool packed = false) { + assert(!packed && "NYI"); llvm::SmallVector members; - for (auto &f : fieldsAttr) { - auto gva = f.dyn_cast(); - assert(gva && "expected #cir.global_view attribute for element"); - members.push_back(gva.getType()); + for (auto &f : arrayAttr) { + auto ta = f.dyn_cast(); + assert(ta && "expected typed attribute member"); + members.push_back(ta.getType()); } - auto structType = mlir::cir::StructType::get(getContext(), members, "", - /*body=*/true); - return mlir::cir::TypeInfoAttr::get(structType, fieldsAttr); + auto sTy = mlir::cir::StructType::get(arrayAttr.getContext(), members, "", + /*body=*/true); + return mlir::cir::ConstStructAttr::get(sTy, arrayAttr); + } + + mlir::cir::TypeInfoAttr getTypeInfo(mlir::ArrayAttr fieldsAttr) { + auto anonStruct = getAnonConstStruct(fieldsAttr); + return mlir::cir::TypeInfoAttr::get(anonStruct.getType(), anonStruct); } // diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index e0c88eb2fd21..7eebc063fa44 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -96,6 +96,37 @@ static ParseResult parseConstStructMembers(::mlir::AsmParser &parser, return success(); } +LogicalResult ConstStructAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + mlir::Type type, ArrayAttr members) { + auto sTy = type.dyn_cast_or_null(); + if (!sTy) { + emitError() << "expected !cir.struct type"; + return failure(); + } + + if (sTy.getMembers().size() != members.size()) { + emitError() << "number of elements must match"; + return failure(); + } + + unsigned attrIdx = 0; + for (auto &member : sTy.getMembers()) { + auto m = members[attrIdx].dyn_cast_or_null(); + if (!m) { + emitError() << "expected mlir::TypedAttr attribute"; + return failure(); + } + if (member != m.getType()) { + emitError() << "input element type must match result element type"; + return failure(); + } + attrIdx++; + } + + return success(); +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 6de7d07cfa5b..2b859b69e9f0 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1822,32 +1822,15 @@ LogicalResult ASTRecordDeclAttr::verify( LogicalResult TypeInfoAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ::mlir::Type type, ArrayAttr info) { - auto sTy = type.dyn_cast_or_null(); - if (!sTy) { - emitError() << "expected !cir.struct type"; - return failure(); - } - - if (sTy.getMembers().size() != info.size()) { - emitError() << "number of typeinfo elements must match result type"; + ::mlir::Type type, ConstStructAttr info) { + for (auto &member : info.getMembers()) { + auto gview = member.dyn_cast_or_null(); + if (gview) + continue; + emitError() << "expected GlobalViewAttr attribute"; return failure(); } - unsigned attrIdx = 0; - for (auto &member : sTy.getMembers()) { - auto gview = info[attrIdx].dyn_cast_or_null(); - if (!gview) { - emitError() << "expected GlobalViewAttr attribute"; - return failure(); - } - if (member != gview.getType()) { - emitError() << "typeinfo element must match result element type"; - return failure(); - } - attrIdx++; - } - return success(); } diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 0f4012d4e391..77579d42f382 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -86,7 +86,7 @@ class B : public A // CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr // typeinfo for B -// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<[#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr] : ![[TypeInfoB]]> : ![[TypeInfoB]] {alignment = 8 : i64} +// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<<[#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr]>> : ![[TypeInfoB]] // Checks for dtors in dtors.cpp diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 101b6013189b..0858fb16cdc4 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -1,4 +1,5 @@ // RUN: cir-tool %s | FileCheck %s +// XFAIL: * module { cir.global external @a = 3 : i32 @@ -23,11 +24,11 @@ module { cir.global "private" constant external @type_info_A : !cir.ptr cir.global constant external @type_info_name_B = #cir.const_array<"1B\00" : !cir.array> - cir.global external @type_info_B = #cir.typeinfo< + cir.global external @type_info_B = #cir.typeinfo<< [#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, #cir.global_view<@type_info_name_B> : !cir.ptr, - #cir.global_view<@type_info_A> : !cir.ptr] - : !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> + #cir.global_view<@type_info_A> : !cir.ptr]>> + : !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr > } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 9497f1107486..a33c4e3d3abf 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1,5 +1,6 @@ // Test attempts to build bogus CIR // RUN: cir-tool %s -verify-diagnostics -split-input-file +// XFAIL: * // expected-error@+2 {{'cir.const' op nullptr expects pointer type}} cir.func @p0() { @@ -275,4 +276,4 @@ module { [#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr] : !cir.struct<"", !cir.ptr> > -} // expected-error {{'cir.global' expected constant attribute to match type}} \ No newline at end of file +} // expected-error {{'cir.global' expected constant attribute to match type}} From c0cea0a94bbcc647f0225dc6a633ac40dd980e2b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 27 Apr 2023 12:06:12 -0700 Subject: [PATCH 0904/2301] [CIR] Improve verifiers, parsing and printing for several CIR attrs --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 41 ++++++++------ clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 14 ++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 54 +++++++++++++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 10 ++-- clang/test/CIR/CodeGen/vtable-rtti.cpp | 4 +- clang/test/CIR/IR/global.cir | 9 ++-- clang/test/CIR/IR/invalid.cir | 8 ++- 7 files changed, 102 insertions(+), 38 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 046715fa0208..aa8b5f03a2d8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -66,14 +66,14 @@ def ConstArrayAttr : CIR_Attr<"ConstArray", "const_array", [TypedAttrInterface]> }]; let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "Attribute":$value); + "Attribute":$elts); // Define a custom builder for the type; that removes the need to pass // in an MLIRContext instance, as it can be infered from the `type`. let builders = [ AttrBuilderWithInferredContext<(ins "mlir::cir::ArrayType":$type, - "Attribute":$value), [{ - return $_get(type.getContext(), type, value); + "Attribute":$elts), [{ + return $_get(type.getContext(), type, elts); }]> ]; @@ -95,6 +95,13 @@ def ConstStructAttr : CIR_Attr<"ConstStruct", "const_struct", Effectively supports "struct-like" constants. It's must be built from an `mlir::ArrayAttr `instance where each elements is a typed attribute (`mlir::TypedAttribute`). + + Example: + ``` + cir.global external @rgb2 = #cir.const_struct<{0 : i8, + 5 : i64, #cir.null : !cir.ptr + }> : !cir.struct<"", i8, i64, !cir.ptr> + ``` }]; let parameters = (ins AttributeSelfTypeParameter<"">:$type, @@ -200,10 +207,11 @@ def TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> { Example: ``` - cir.global "private" constant external @type_info_A : !cir.ptr - cir.global external @type_info_B = #cir.typeinfo< - [#cir.global_view<@type_info_A> : !cir.ptr] : !cir.struct<"", !cir.ptr> - > + cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr + + cir.global external @type_info_B = #cir.typeinfo<< + {#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr} + >> : !cir.struct<"", !cir.ptr> ``` }]; @@ -220,7 +228,6 @@ def TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> { // Checks struct element types should match the array for every equivalent // element type. let genVerifyDecl = 1; - let assemblyFormat = [{ `<` $typeinfo_data `>` }]; @@ -238,17 +245,17 @@ def VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> { Example: ``` cir.global linkonce_odr @_ZTV1B = #cir.vtable<< - [#cir.const_array<[#cir.null : !cir.ptr, - #cir.global_view<@_ZTI1B> : !cir.ptr, - #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, - #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, - #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> - : !cir.array x 5>]>> - : !cir.struct<"", !cir.array x 5>> + {#cir.const_array<[#cir.null : !cir.ptr, + #cir.global_view<@_ZTI1B> : !cir.ptr, + #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, + #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, + #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> + : !cir.array x 5>}>> + : !cir.struct<"", !cir.array x 5>> ``` }]; - // `info` must be a const struct with one element, containing an array of + // `vtable_data` is const struct with one element, containing an array of // vtable information. let parameters = (ins AttributeSelfTypeParameter<"">:$type, "ConstStructAttr":$vtable_data); @@ -260,7 +267,7 @@ def VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> { }]> ]; - // let genVerifyDecl = 1; + let genVerifyDecl = 1; let assemblyFormat = [{ `<` $vtable_data `>` }]; diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 7eebc063fa44..4d29e8618713 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -63,7 +63,15 @@ void CIRDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const { static void printConstStructMembers(mlir::AsmPrinter &p, mlir::Type type, mlir::ArrayAttr members) { - p << members; + p << "{"; + unsigned i = 0, e = members.size(); + while (i < e) { + p << members[i]; + if (e > 0 && i < e - 1) + p << ","; + i++; + } + p << "}"; } static ParseResult parseConstStructMembers(::mlir::AsmParser &parser, @@ -118,7 +126,9 @@ LogicalResult ConstStructAttr::verify( return failure(); } if (member != m.getType()) { - emitError() << "input element type must match result element type"; + emitError() << "element at index " << attrIdx << " has type " + << m.getType() << " but return type for this element is " + << member; return failure(); } attrIdx++; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 2b859b69e9f0..ee476eb26743 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1737,7 +1737,7 @@ ::mlir::Attribute ConstArrayAttr::parse(::mlir::AsmParser &parser, void ConstArrayAttr::print(::mlir::AsmPrinter &printer) const { printer << "<"; - printer.printStrippedAttrOrType(getValue()); + printer.printStrippedAttrOrType(getElts()); printer << ">"; } @@ -1822,8 +1822,14 @@ LogicalResult ASTRecordDeclAttr::verify( LogicalResult TypeInfoAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ::mlir::Type type, ConstStructAttr info) { - for (auto &member : info.getMembers()) { + ::mlir::Type type, ConstStructAttr typeinfoData) { + + if (mlir::cir::ConstStructAttr::verify(emitError, type, + typeinfoData.getMembers()) + .failed()) + return failure(); + + for (auto &member : typeinfoData.getMembers()) { auto gview = member.dyn_cast_or_null(); if (gview) continue; @@ -1834,6 +1840,48 @@ LogicalResult TypeInfoAttr::verify( return success(); } +LogicalResult +VTableAttr::verify(::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::Type type, ConstStructAttr vtableData) { + auto sTy = type.dyn_cast_or_null(); + if (!sTy) { + emitError() << "expected !cir.struct type result"; + return failure(); + } + if (sTy.getMembers().size() != 1 || vtableData.getMembers().size() != 1) { + emitError() << "expected struct type with only one subtype"; + return failure(); + } + + auto arrayTy = sTy.getMembers()[0].dyn_cast(); + auto constArrayAttr = + vtableData.getMembers()[0].dyn_cast(); + if (!arrayTy || !constArrayAttr) { + emitError() << "expected struct type with one array element"; + return failure(); + } + + if (mlir::cir::ConstStructAttr::verify(emitError, type, + vtableData.getMembers()) + .failed()) + return failure(); + + LogicalResult eltTypeCheck = success(); + if (auto arrayElts = constArrayAttr.getElts().dyn_cast()) { + arrayElts.walkImmediateSubElements( + [&](Attribute attr) { + if (attr.isa() || attr.isa()) + return; + emitError() << "expected GlobalViewAttr attribute"; + eltTypeCheck = failure(); + }, + [&](Type type) {}); + return eltTypeCheck; + } + + return success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 15b64db7e27c..3ffe0f51613b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -518,7 +518,9 @@ mlir::DenseElementsAttr convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr) { auto type = attr.getType().cast().getEltType(); auto values = llvm::SmallVector{}; - for (auto element : attr.getValue().cast()) + auto arrayAttr = attr.getElts().dyn_cast(); + assert(arrayAttr && "expected array here"); + for (auto element : arrayAttr) values.push_back(element.cast().getInt()); return mlir::DenseElementsAttr::get( mlir::RankedTensorType::get({(int64_t)values.size()}, type), @@ -618,9 +620,9 @@ class CIRGlobalOpLowering // Initializer is a constant array: convert it to a compatible llvm init. if (auto constArr = init.value().dyn_cast()) { - if (auto attr = constArr.getValue().dyn_cast()) { + if (auto attr = constArr.getElts().dyn_cast()) { init = rewriter.getStringAttr(attr.getValue()); - } else if (auto attr = constArr.getValue().dyn_cast()) { + } else if (auto attr = constArr.getElts().dyn_cast()) { if (!(init = lowerConstArrayAttr(constArr))) { op.emitError() << "unsupported lowering for #cir.const_array with element type " @@ -630,7 +632,7 @@ class CIRGlobalOpLowering } else { op.emitError() << "unsupported lowering for #cir.const_array with value " - << constArr.getValue(); + << constArr.getElts(); return mlir::failure(); } } else if (llvm::isa(init.value())) { diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 77579d42f382..7016d1037614 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -74,7 +74,7 @@ class B : public A // CHECK: } // vtable for B -// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<<[#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>]>> : ![[VTableTypeA]] +// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<<{#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}>> : ![[VTableTypeA]] // vtable for __cxxabiv1::__si_class_type_info // CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> @@ -86,7 +86,7 @@ class B : public A // CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr // typeinfo for B -// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<<[#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr]>> : ![[TypeInfoB]] +// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<<{#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr,#cir.global_view<@_ZTS1B> : !cir.ptr,#cir.global_view<@_ZTI1A> : !cir.ptr}>> : ![[TypeInfoB]] // Checks for dtors in dtors.cpp diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 0858fb16cdc4..1a1d126a18a7 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -1,5 +1,4 @@ // RUN: cir-tool %s | FileCheck %s -// XFAIL: * module { cir.global external @a = 3 : i32 @@ -24,10 +23,10 @@ module { cir.global "private" constant external @type_info_A : !cir.ptr cir.global constant external @type_info_name_B = #cir.const_array<"1B\00" : !cir.array> - cir.global external @type_info_B = #cir.typeinfo<< - [#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, - #cir.global_view<@type_info_name_B> : !cir.ptr, - #cir.global_view<@type_info_A> : !cir.ptr]>> + cir.global external @type_info_B = #cir.typeinfo<<{ + #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, + #cir.global_view<@type_info_name_B> : !cir.ptr, + #cir.global_view<@type_info_A> : !cir.ptr}>> : !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr > } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index a33c4e3d3abf..f5f3c1667162 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1,6 +1,5 @@ // Test attempts to build bogus CIR // RUN: cir-tool %s -verify-diagnostics -split-input-file -// XFAIL: * // expected-error@+2 {{'cir.const' op nullptr expects pointer type}} cir.func @p0() { @@ -272,8 +271,7 @@ module { // rid of this somehow in favor of clarity? cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr - cir.global external @type_info_B = #cir.typeinfo< // expected-error {{typeinfo element must match result element type}} - [#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr] - : !cir.struct<"", !cir.ptr> - > + cir.global external @type_info_B = #cir.typeinfo<<{ // expected-error {{element at index 0 has type '!cir.ptr' but return type for this element is '!cir.ptr'}} + #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr}>> + : !cir.struct<"", !cir.ptr> } // expected-error {{'cir.global' expected constant attribute to match type}} From 088e7fec074ea3fd4cc8c8bb08f899ba716221e6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 27 Apr 2023 17:28:14 -0700 Subject: [PATCH 0905/2301] [CIR][CIRGen][NFC] Add a CIRGenTypeCache and use wherever possible --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 15 +-- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 9 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 30 +++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 +- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 128 +++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 6 +- 7 files changed, 177 insertions(+), 17 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenTypeCache.h diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index d609f57615d2..5dfe1ec6e7a8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -10,6 +10,7 @@ #define LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H #include "Address.h" +#include "CIRGenTypeCache.h" #include "UnimplementedFeatureGuarding.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" @@ -23,12 +24,14 @@ namespace cir { class CIRGenFunction; class CIRGenBuilderTy : public mlir::OpBuilder { + const CIRGenTypeCache &typeCache; bool IsFPConstrained = false; fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict; llvm::RoundingMode DefaultConstrainedRounding = llvm::RoundingMode::Dynamic; public: - CIRGenBuilderTy(mlir::MLIRContext &C) : mlir::OpBuilder(&C) {} + CIRGenBuilderTy(mlir::MLIRContext &C, const CIRGenTypeCache &tc) + : mlir::OpBuilder(&C), typeCache(tc) {} // // Floating point specific helpers @@ -124,9 +127,9 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Type helpers // ------------ // - mlir::Type getInt8Ty() { return mlir::IntegerType::get(getContext(), 8); } - mlir::Type getInt32Ty() { return mlir::IntegerType::get(getContext(), 32); } - mlir::Type getInt64Ty() { return mlir::IntegerType::get(getContext(), 64); } + mlir::Type getInt8Ty() { return typeCache.Int8Ty; } + mlir::Type getInt32Ty() { return typeCache.Int32Ty; } + mlir::Type getInt64Ty() { return typeCache.Int64Ty; } mlir::cir::BoolType getBoolTy() { return ::mlir::cir::BoolType::get(getContext()); } @@ -140,10 +143,10 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Fetch the type representing a pointer to integer values. mlir::cir::PointerType getInt8PtrTy(unsigned AddrSpace = 0) { - return mlir::cir::PointerType::get(getContext(), getInt8Ty()); + return typeCache.Int8PtrTy; } mlir::cir::PointerType getInt32PtrTy(unsigned AddrSpace = 0) { - return mlir::cir::PointerType::get(getContext(), getInt32Ty()); + return mlir::cir::PointerType::get(getContext(), typeCache.Int32Ty); } mlir::cir::PointerType getPointerTo(mlir::Type ty, unsigned addressSpace = 0) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index a57ee2379618..b9b5560c9d22 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -28,8 +28,9 @@ using namespace mlir::cir; CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, bool suppressNewContext) - : CGM{CGM}, builder(builder), SanOpts(CGM.getLangOpts().Sanitize), - CurFPFeatures(CGM.getLangOpts()), ShouldEmitLifetimeMarkers(false) { + : CIRGenTypeCache(CGM), CGM{CGM}, builder(builder), + SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()), + ShouldEmitLifetimeMarkers(false) { if (!suppressNewContext) CGM.getCXXABI().getMangleContext().startNewFunction(); EHStack.setCGF(this); @@ -1134,9 +1135,7 @@ void CIRGenFunction::buildNullInitialization(mlir::Location loc, } // Cast the dest ptr to the appropriate i8 pointer type. - // FIXME: add a CodeGenTypeCache thing for CIR. - auto intTy = DestPtr.getElementType().dyn_cast(); - if (intTy && intTy.getWidth() == 8) { + if (DestPtr.getElementType() == Int8Ty) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 35f958d79e48..7f4322580d01 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -16,6 +16,7 @@ #include "CIRGenBuilder.h" #include "CIRGenCall.h" #include "CIRGenModule.h" +#include "CIRGenTypeCache.h" #include "CIRGenValue.h" #include "EHScopeStack.h" @@ -52,7 +53,7 @@ namespace cir { enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; struct CGCoroData; -class CIRGenFunction { +class CIRGenFunction : public CIRGenTypeCache { public: CIRGenModule &CGM; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 767ef5f9027d..4ef3e177e91d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -90,11 +90,39 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, const clang::CodeGenOptions &CGO, DiagnosticsEngine &Diags) - : builder(context), astCtx(astctx), langOpts(astctx.getLangOpts()), + : builder(context, *this), astCtx(astctx), langOpts(astctx.getLangOpts()), codeGenOpts(CGO), theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, VTables{*this} { + + // Initialize the type cache. + VoidTy = ::mlir::IntegerType::get(builder.getContext(), 8); + Int8Ty = ::mlir::IntegerType::get(builder.getContext(), 8); + Int16Ty = ::mlir::IntegerType::get(builder.getContext(), 16); + Int32Ty = ::mlir::IntegerType::get(builder.getContext(), 32); + Int64Ty = ::mlir::IntegerType::get(builder.getContext(), 64); + // TODO: HalfTy + // TODO: BFloatTy + FloatTy = builder.getF32Type(); + DoubleTy = builder.getF64Type(); + // TODO: PointerWidthInBits + // TODO: PointerAlignInBytes + // TODO: SizeSizeInBytes + // TODO: IntAlignInBytes + CharTy = ::mlir::IntegerType::get(builder.getContext(), + astCtx.getTargetInfo().getCharWidth()); + IntTy = ::mlir::IntegerType::get(builder.getContext(), + astCtx.getTargetInfo().getIntWidth()); + IntPtrTy = ::mlir::IntegerType::get( + builder.getContext(), astCtx.getTargetInfo().getMaxPointerWidth()); + Int8PtrTy = builder.getPointerTo(Int8Ty); + Int8PtrPtrTy = builder.getPointerTo(Int8PtrTy); + // TODO: AllocaInt8PtrTy + // TODO: GlobalsInt8PtrTy + // TODO: ConstGlobalsPtrTy + // TODO: ASTAllocaAddressSpace + mlir::cir::sob::SignedOverflowBehavior sob; switch (langOpts.getSignedOverflowBehavior()) { case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Defined: diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index f89f95218ce2..33767d44688c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -14,6 +14,7 @@ #define LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H #include "CIRGenBuilder.h" +#include "CIRGenTypeCache.h" #include "CIRGenTypes.h" #include "CIRGenVTables.h" #include "CIRGenValue.h" @@ -53,7 +54,7 @@ enum ForDefinition_t : bool { NotForDefinition = false, ForDefinition = true }; /// This will emit operations that are specific to C(++)/ObjC(++) language, /// preserving the semantics of the language and (hopefully) allow to perform /// accurate analysis and transformation based on these high level semantics. -class CIRGenModule { +class CIRGenModule : public CIRGenTypeCache { CIRGenModule(CIRGenModule &) = delete; CIRGenModule &operator=(CIRGenModule &) = delete; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h new file mode 100644 index 000000000000..7c8e6e3914b5 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -0,0 +1,128 @@ +//===--- CIRGenTypeCache.h - Commonly used LLVM types and info -*- C++ --*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This structure provides a set of common types useful during CIR emission. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CODEGENTYPECACHE_H +#define LLVM_CLANG_LIB_CIR_CODEGENTYPECACHE_H + +#include "mlir/IR/Types.h" +#include "clang/AST/CharUnits.h" +#include "clang/Basic/AddressSpaces.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +namespace cir { + +/// This structure provides a set of types that are commonly used +/// during IR emission. It's initialized once in CodeGenModule's +/// constructor and then copied around into new CIRGenFunction's. +struct CIRGenTypeCache { + CIRGenTypeCache() {} + + /// void + mlir::Type VoidTy; + + /// i8, i16, i32, and i64 + mlir::Type Int8Ty, Int16Ty, Int32Ty, Int64Ty; + /// half, bfloat, float, double + // mlir::Type HalfTy, BFloatTy; + mlir::Type FloatTy, DoubleTy; + + /// int + mlir::Type IntTy; + + /// char + mlir::Type CharTy; + + /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size. + union { + mlir::Type IntPtrTy; + mlir::Type SizeTy; + mlir::Type PtrDiffTy; + }; + + /// void* in address space 0 + union { + mlir::cir::PointerType VoidPtrTy; + mlir::cir::PointerType Int8PtrTy; + }; + + /// void** in address space 0 + union { + mlir::cir::PointerType VoidPtrPtrTy; + mlir::cir::PointerType Int8PtrPtrTy; + }; + + /// void* in alloca address space + // union { + // mlir::cir::PointerType AllocaVoidPtrTy; + // mlir::cir::PointerType AllocaInt8PtrTy; + // }; + + /// void* in default globals address space + // union { + // mlir::cir::PointerType GlobalsVoidPtrTy; + // mlir::cir::PointerType GlobalsInt8PtrTy; + // }; + + /// void* in the address space for constant globals + // mlir::cir::PointerType ConstGlobalsPtrTy; + + /// The size and alignment of the builtin C type 'int'. This comes + /// up enough in various ABI lowering tasks to be worth pre-computing. + // union { + // unsigned char IntSizeInBytes; + // unsigned char IntAlignInBytes; + // }; + // clang::CharUnits getIntSize() const { + // return clang::CharUnits::fromQuantity(IntSizeInBytes); + // } + // clang::CharUnits getIntAlign() const { + // return clang::CharUnits::fromQuantity(IntAlignInBytes); + // } + + /// The width of a pointer into the generic address space. + // unsigned char PointerWidthInBits; + + /// The size and alignment of a pointer into the generic address space. + // union { + // unsigned char PointerAlignInBytes; + // unsigned char PointerSizeInBytes; + // }; + + /// The size and alignment of size_t. + // union { + // unsigned char SizeSizeInBytes; // sizeof(size_t) + // unsigned char SizeAlignInBytes; + // }; + + // clang::LangAS ASTAllocaAddressSpace; + + // clang::CharUnits getSizeSize() const { + // return clang::CharUnits::fromQuantity(SizeSizeInBytes); + // } + // clang::CharUnits getSizeAlign() const { + // return clang::CharUnits::fromQuantity(SizeAlignInBytes); + // } + // clang::CharUnits getPointerSize() const { + // return clang::CharUnits::fromQuantity(PointerSizeInBytes); + // } + // clang::CharUnits getPointerAlign() const { + // return clang::CharUnits::fromQuantity(PointerAlignInBytes); + // } + + // clang::LangAS getASTAllocaAddressSpace() const { + // return ASTAllocaAddressSpace; + // } +}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index e80792e6726c..ef2ded2f9f16 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -341,7 +341,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { llvm_unreachable("NYI"); case BuiltinType::Void: // TODO(cir): how should we model this? - ResultType = ::mlir::IntegerType::get(Builder.getContext(), 8); + ResultType = CGM.VoidTy; break; case BuiltinType::ObjCId: @@ -416,10 +416,10 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { ResultType = Builder.getBF16Type(); break; case BuiltinType::Float: - ResultType = Builder.getF32Type(); + ResultType = CGM.FloatTy; break; case BuiltinType::Double: - ResultType = Builder.getF64Type(); + ResultType = CGM.DoubleTy; break; case BuiltinType::LongDouble: case BuiltinType::Float128: From 184faf4d9c61176e8c194cd012dfe108617c5240 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 27 Apr 2023 15:40:44 -0700 Subject: [PATCH 0906/2301] [CIR][CIRGen] Plumbing for DerivedToBase cast and more calls to 'new' operator - Add initial logic to compute the offset for base class as part of upcast. - Build call to new operator - Boilterplate for handling delete operator when new fails. No testcase just yet. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 10 + clang/lib/CIR/CodeGen/CIRGenCall.h | 5 + clang/lib/CIR/CodeGen/CIRGenClass.cpp | 77 +++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 12 + clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 352 +++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 7 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 21 ++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 46 ++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 27 ++ clang/lib/CIR/CodeGen/CIRGenModule.h | 5 + 10 files changed, 525 insertions(+), 37 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index d95fdba01ed7..cf8dec1b17f8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1103,3 +1103,13 @@ CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { return arrangeFreeFunctionType(FTy.castAs()); } + +RValue CallArg::getRValue(CIRGenFunction &CGF, mlir::Location loc) const { + if (!HasLV) + return RV; + LValue Copy = CGF.makeAddrLValue(CGF.CreateMemTemp(Ty, loc), Ty); + CGF.buildAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, + LV.isVolatile()); + IsUsed = true; + return RValue::getAggregate(Copy.getAddress()); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 77ceccd67360..5808a30aa79b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -174,6 +174,11 @@ struct CallArg { : RV(rv), HasLV(false), IsUsed(false), Ty(ty) { (void)IsUsed; } + + /// \returns an independent RValue. If the CallArg contains an LValue, + /// a temporary copy is returned. + RValue getRValue(CIRGenFunction &CGF, mlir::Location loc) const; + bool hasLValue() const { return HasLV; } LValue getKnownLValue() const { diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index c8605197bbb8..fdff7b75e5eb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1145,3 +1145,80 @@ mlir::Value CIRGenFunction::GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, llvm_unreachable("NYI"); } } + +Address +CIRGenFunction::getAddressOfBaseClass(Address Value, + const CXXRecordDecl *Derived, + CastExpr::path_const_iterator PathBegin, + CastExpr::path_const_iterator PathEnd, + bool NullCheckValue, SourceLocation Loc) { + assert(PathBegin != PathEnd && "Base path should not be empty!"); + + CastExpr::path_const_iterator Start = PathBegin; + const CXXRecordDecl *VBase = nullptr; + + // Sema has done some convenient canonicalization here: if the + // access path involved any virtual steps, the conversion path will + // *start* with a step down to the correct virtual base subobject, + // and hence will not require any further steps. + if ((*Start)->isVirtual()) { + llvm_unreachable("NYI"); + } + + // Compute the static offset of the ultimate destination within its + // allocating subobject (the virtual base, if there is one, or else + // the "complete" object that we see). + CharUnits NonVirtualOffset = CGM.computeNonVirtualBaseClassOffset( + VBase ? VBase : Derived, Start, PathEnd); + + // If there's a virtual step, we can sometimes "devirtualize" it. + // For now, that's limited to when the derived type is final. + // TODO: "devirtualize" this for accesses to known-complete objects. + if (VBase && Derived->hasAttr()) { + llvm_unreachable("NYI"); + } + + // Get the base pointer type. + auto BaseValueTy = convertType((PathEnd[-1])->getType()); + assert(!UnimplementedFeature::addressSpace()); + // auto BasePtrTy = builder.getPointerTo(BaseValueTy); + // QualType DerivedTy = getContext().getRecordType(Derived); + // CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived); + + // If the static offset is zero and we don't have a virtual step, + // just do a bitcast; null checks are unnecessary. + if (NonVirtualOffset.isZero() && !VBase) { + llvm_unreachable("NYI"); + } + + // Skip over the offset (and the vtable load) if we're supposed to + // null-check the pointer. + if (NullCheckValue) { + llvm_unreachable("NYI"); + } + + if (sanitizePerformTypeCheck()) { + llvm_unreachable("NYI"); + } + + // Compute the virtual offset. + mlir::Value VirtualOffset{}; + if (VBase) { + llvm_unreachable("NYI"); + } + + // Apply both offsets. + Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset, + VirtualOffset, Derived, VBase); + // Cast to the destination type. + Value = builder.createElementBitCast(Value.getPointer().getLoc(), Value, + BaseValueTy); + + // Build a phi if we needed a null check. + if (NullCheckValue) { + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); + return Value; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index f38e467b00e0..30cdb84193ed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -598,6 +598,18 @@ Address CIRGenFunction::buildPointerWithAlignment(const Expr *E, // Nothing to do here... case CK_LValueToRValue: break; + + case CK_DerivedToBase: { + // TODO: Support accesses to members of base classes in TBAA. For now, we + // conservatively pretend that the complete object is of the base class + // type. + assert(!UnimplementedFeature::tbaa()); + Address Addr = buildPointerWithAlignment(CE->getSubExpr(), BaseInfo); + auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); + return getAddressOfBaseClass( + Addr, Derived, CE->path_begin(), CE->path_end(), + shouldNullCheckClassCastValue(CE), CE->getExprLoc()); + } } } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 57988ae6eb23..6cc9ea976e23 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -29,6 +29,11 @@ struct MemberCallInfo { }; } // namespace +static RValue buildNewDeleteCall(CIRGenFunction &CGF, + const FunctionDecl *CalleeDecl, + const FunctionProtoType *CalleeType, + const CallArgList &Args); + static MemberCallInfo commonBuildCXXMemberOrOperatorCall(CIRGenFunction &CGF, const CXXMethodDecl *MD, mlir::Value This, mlir::Value ImplicitParam, @@ -322,38 +327,6 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); } -mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { - assert(0 && "not implemented"); -} - -RValue CIRGenFunction::buildCXXDestructorCall(GlobalDecl Dtor, - const CIRGenCallee &Callee, - mlir::Value This, QualType ThisTy, - mlir::Value ImplicitParam, - QualType ImplicitParamTy, - const CallExpr *CE) { - const CXXMethodDecl *DtorDecl = cast(Dtor.getDecl()); - - assert(!ThisTy.isNull()); - assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() && - "Pointer/Object mixup"); - - LangAS SrcAS = ThisTy.getAddressSpace(); - LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace(); - if (SrcAS != DstAS) { - llvm_unreachable("NYI"); - } - - CallArgList Args; - commonBuildCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam, - ImplicitParamTy, CE, Args, nullptr); - assert((CE || Dtor.getDecl()) && "expected source location provider"); - return buildCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee, - ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall, - CE ? getLoc(CE->getExprLoc()) - : getLoc(Dtor.getDecl()->getSourceRange())); -} - namespace { /// The parameters to pass to a usual operator delete. struct UsualDeleteParams { @@ -395,6 +368,319 @@ static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) { return Params; } +static mlir::Value buildCXXNewAllocSize(CIRGenFunction &CGF, + const CXXNewExpr *e, + unsigned minElements, + mlir::Value &numElements, + mlir::Value &sizeWithoutCookie) { + QualType type = e->getAllocatedType(); + + if (!e->isArray()) { + CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); + sizeWithoutCookie = CGF.getBuilder().create( + CGF.getLoc(e->getSourceRange()), CGF.SizeTy, + mlir::IntegerAttr::get(CGF.SizeTy, typeSize.getQuantity())); + return sizeWithoutCookie; + } + + llvm_unreachable("NYI"); +} + +namespace { +/// A cleanup to call the given 'operator delete' function upon abnormal +/// exit from a new expression. Templated on a traits type that deals with +/// ensuring that the arguments dominate the cleanup if necessary. +template +class CallDeleteDuringNew final : public EHScopeStack::Cleanup { + /// Type used to hold llvm::Value*s. + typedef typename Traits::ValueTy ValueTy; + /// Type used to hold RValues. + typedef typename Traits::RValueTy RValueTy; + struct PlacementArg { + RValueTy ArgValue; + QualType ArgType; + }; + + unsigned NumPlacementArgs : 31; + unsigned PassAlignmentToPlacementDelete : 1; + const FunctionDecl *OperatorDelete; + ValueTy Ptr; + ValueTy AllocSize; + CharUnits AllocAlign; + + PlacementArg *getPlacementArgs() { + return reinterpret_cast(this + 1); + } + +public: + static size_t getExtraSize(size_t NumPlacementArgs) { + return NumPlacementArgs * sizeof(PlacementArg); + } + + CallDeleteDuringNew(size_t NumPlacementArgs, + const FunctionDecl *OperatorDelete, ValueTy Ptr, + ValueTy AllocSize, bool PassAlignmentToPlacementDelete, + CharUnits AllocAlign) + : NumPlacementArgs(NumPlacementArgs), + PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete), + OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize), + AllocAlign(AllocAlign) {} + + void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) { + assert(I < NumPlacementArgs && "index out of range"); + getPlacementArgs()[I] = {Arg, Type}; + } + + void Emit(CIRGenFunction &CGF, Flags flags) override { + const auto *FPT = OperatorDelete->getType()->castAs(); + CallArgList DeleteArgs; + + // The first argument is always a void* (or C* for a destroying operator + // delete for class type C). + DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0)); + + // Figure out what other parameters we should be implicitly passing. + UsualDeleteParams Params; + if (NumPlacementArgs) { + // A placement deallocation function is implicitly passed an alignment + // if the placement allocation function was, but is never passed a size. + Params.Alignment = PassAlignmentToPlacementDelete; + } else { + // For a non-placement new-expression, 'operator delete' can take a + // size and/or an alignment if it has the right parameters. + Params = getUsualDeleteParams(OperatorDelete); + } + + assert(!Params.DestroyingDelete && + "should not call destroying delete in a new-expression"); + + // The second argument can be a std::size_t (for non-placement delete). + if (Params.Size) + DeleteArgs.add(Traits::get(CGF, AllocSize), + CGF.getContext().getSizeType()); + + // The next (second or third) argument can be a std::align_val_t, which + // is an enum whose underlying type is std::size_t. + // FIXME: Use the right type as the parameter type. Note that in a call + // to operator delete(size_t, ...), we may not have it available. + if (Params.Alignment) { + llvm_unreachable("NYI"); + } + + // Pass the rest of the arguments, which must match exactly. + for (unsigned I = 0; I != NumPlacementArgs; ++I) { + auto Arg = getPlacementArgs()[I]; + DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType); + } + + // Call 'operator delete'. + buildNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); + } +}; +} // namespace + +/// Enter a cleanup to call 'operator delete' if the initializer in a +/// new-expression throws. +static void EnterNewDeleteCleanup(CIRGenFunction &CGF, const CXXNewExpr *E, + Address NewPtr, mlir::Value AllocSize, + CharUnits AllocAlign, + const CallArgList &NewArgs) { + unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1; + + // If we're not inside a conditional branch, then the cleanup will + // dominate and we can do the easier (and more efficient) thing. + if (!CGF.isInConditionalBranch()) { + struct DirectCleanupTraits { + typedef mlir::Value ValueTy; + typedef RValue RValueTy; + static RValue get(CIRGenFunction &, ValueTy V) { return RValue::get(V); } + static RValue get(CIRGenFunction &, RValueTy V) { return V; } + }; + + typedef CallDeleteDuringNew DirectCleanup; + + DirectCleanup *Cleanup = CGF.EHStack.pushCleanupWithExtra( + EHCleanup, E->getNumPlacementArgs(), E->getOperatorDelete(), + NewPtr.getPointer(), AllocSize, E->passAlignment(), AllocAlign); + for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) { + auto &Arg = NewArgs[I + NumNonPlacementArgs]; + Cleanup->setPlacementArg( + I, Arg.getRValue(CGF, CGF.getLoc(E->getSourceRange())), Arg.Ty); + } + + return; + } + + // Otherwise, we need to save all this stuff. + DominatingValue::saved_type SavedNewPtr = + DominatingValue::save(CGF, RValue::get(NewPtr.getPointer())); + DominatingValue::saved_type SavedAllocSize = + DominatingValue::save(CGF, RValue::get(AllocSize)); + + struct ConditionalCleanupTraits { + typedef DominatingValue::saved_type ValueTy; + typedef DominatingValue::saved_type RValueTy; + static RValue get(CIRGenFunction &CGF, ValueTy V) { return V.restore(CGF); } + }; + typedef CallDeleteDuringNew ConditionalCleanup; + + ConditionalCleanup *Cleanup = + CGF.EHStack.pushCleanupWithExtra( + EHCleanup, E->getNumPlacementArgs(), E->getOperatorDelete(), + SavedNewPtr, SavedAllocSize, E->passAlignment(), AllocAlign); + for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) { + auto &Arg = NewArgs[I + NumNonPlacementArgs]; + Cleanup->setPlacementArg( + I, + DominatingValue::save( + CGF, Arg.getRValue(CGF, CGF.getLoc(E->getSourceRange()))), + Arg.Ty); + } + + CGF.initFullExprCleanup(); +} + +mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { + // The element type being allocated. + QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); + + // 1. Build a call to the allocation function. + FunctionDecl *allocator = E->getOperatorNew(); + + // If there is a brace-initializer, cannot allocate fewer elements than inits. + unsigned minElements = 0; + if (E->isArray() && E->hasInitializer()) { + const InitListExpr *ILE = dyn_cast(E->getInitializer()); + if (ILE && ILE->isStringLiteralInit()) + minElements = + cast(ILE->getType()->getAsArrayTypeUnsafe()) + ->getSize() + .getZExtValue(); + else if (ILE) + minElements = ILE->getNumInits(); + } + + mlir::Value numElements = nullptr; + mlir::Value allocSizeWithoutCookie = nullptr; + mlir::Value allocSize = buildCXXNewAllocSize( + *this, E, minElements, numElements, allocSizeWithoutCookie); + CharUnits allocAlign = getContext().getTypeAlignInChars(allocType); + + // Emit the allocation call. + Address allocation = Address::invalid(); + CallArgList allocatorArgs; + if (allocator->isReservedGlobalPlacementOperator()) { + // In LLVM codegen: If the allocator is a global placement operator, just + // "inline" it directly. + llvm_unreachable("NYI"); + } else { + const FunctionProtoType *allocatorType = + allocator->getType()->castAs(); + unsigned ParamsToSkip = 0; + + // The allocation size is the first argument. + QualType sizeType = getContext().getSizeType(); + allocatorArgs.add(RValue::get(allocSize), sizeType); + ++ParamsToSkip; + + if (allocSize != allocSizeWithoutCookie) { + llvm_unreachable("NYI"); + } + + // The allocation alignment may be passed as the second argument. + if (E->passAlignment()) { + llvm_unreachable("NYI"); + } + + // FIXME: Why do we not pass a CalleeDecl here? + buildCallArgs(allocatorArgs, allocatorType, E->placement_arguments(), + /*AC*/ + AbstractCallee(), + /*ParamsToSkip*/ + ParamsToSkip); + RValue RV = + buildNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); + + // Set !heapallocsite metadata on the call to operator new. + assert(!UnimplementedFeature::generateDebugInfo()); + + // If this was a call to a global replaceable allocation function that does + // not take an alignment argument, the allocator is known to produce storage + // that's suitably aligned for any object that fits, up to a known + // threshold. Otherwise assume it's suitably aligned for the allocated type. + CharUnits allocationAlign = allocAlign; + if (!E->passAlignment() && + allocator->isReplaceableGlobalAllocationFunction()) { + auto &Target = CGM.getASTContext().getTargetInfo(); + unsigned AllocatorAlign = llvm::bit_floor(std::min( + Target.getNewAlign(), getContext().getTypeSize(allocType))); + allocationAlign = std::max( + allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign)); + } + + allocation = Address(RV.getScalarVal(), Int8Ty, allocationAlign); + } + + // Emit a null check on the allocation result if the allocation + // function is allowed to return null (because it has a non-throwing + // exception spec or is the reserved placement new) and we have an + // interesting initializer will be running sanitizers on the initialization. + bool nullCheck = E->shouldNullCheckAllocation() && + (!allocType.isPODType(getContext()) || E->hasInitializer() || + sanitizePerformTypeCheck()); + + // The null-check means that the initializer is conditionally + // evaluated. + ConditionalEvaluation conditional(*this); + + if (nullCheck) { + llvm_unreachable("NYI"); + } + + // If there's an operator delete, enter a cleanup to call it if an + // exception is thrown. + EHScopeStack::stable_iterator operatorDeleteCleanup; + // llvm::Instruction *cleanupDominator = nullptr; + if (E->getOperatorDelete() && + !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { + llvm_unreachable("NYI"); + EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign, + allocatorArgs); + operatorDeleteCleanup = EHStack.stable_begin(); + llvm_unreachable("NYI"); + // cleanupDominator = Builder.CreateUnreachable(); + } + llvm_unreachable("NYI"); +} + +RValue CIRGenFunction::buildCXXDestructorCall(GlobalDecl Dtor, + const CIRGenCallee &Callee, + mlir::Value This, QualType ThisTy, + mlir::Value ImplicitParam, + QualType ImplicitParamTy, + const CallExpr *CE) { + const CXXMethodDecl *DtorDecl = cast(Dtor.getDecl()); + + assert(!ThisTy.isNull()); + assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() && + "Pointer/Object mixup"); + + LangAS SrcAS = ThisTy.getAddressSpace(); + LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace(); + if (SrcAS != DstAS) { + llvm_unreachable("NYI"); + } + + CallArgList Args; + commonBuildCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam, + ImplicitParamTy, CE, Args, nullptr); + assert((CE || Dtor.getDecl()) && "expected source location provider"); + return buildCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee, + ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall, + CE ? getLoc(CE->getExprLoc()) + : getLoc(Dtor.getDecl()->getSourceRange())); +} + /// Emit a call to an operator new or operator delete function, as implicitly /// created by new-expressions and delete-expressions. static RValue buildNewDeleteCall(CIRGenFunction &CGF, @@ -464,4 +750,4 @@ void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, if (DestroyingDeleteTag && DestroyingDeleteTag.use_empty()) { llvm_unreachable("NYI"); // DestroyingDeleteTag->eraseFromParent(); } -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 6cf85b36d912..0547ef6cf933 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -980,8 +980,11 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } case CK_BaseToDerived: llvm_unreachable("NYI"); - case CK_DerivedToBase: - llvm_unreachable("NYI"); + case CK_DerivedToBase: { + // The EmitPointerWithAlignment path does this fine; just discard + // the alignment. + return CGF.buildPointerWithAlignment(CE).getPointer(); + } case CK_Dynamic: llvm_unreachable("NYI"); case CK_ArrayToPointerDecay: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index b9b5560c9d22..085931c90892 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1204,3 +1204,24 @@ CIRGenFunction::CIRGenFPOptionsRAII::~CIRGenFPOptionsRAII() { CGF.builder.setDefaultConstrainedExcept(OldExcept); CGF.builder.setDefaultConstrainedRounding(OldRounding); } + +// TODO(cir): should be shared with LLVM codegen. +bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *CE) { + const Expr *E = CE->getSubExpr(); + + if (CE->getCastKind() == CK_UncheckedDerivedToBase) + return false; + + if (isa(E->IgnoreParens())) { + // We always assume that 'this' is never null. + return false; + } + + if (const ImplicitCastExpr *ICE = dyn_cast(CE)) { + // And that glvalue casts are never null. + if (ICE->isGLValue()) + return false; + } + + return true; +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 7f4322580d01..906de4b26c84 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -48,7 +48,7 @@ class AggExprEmitter; namespace cir { -// FIXME: for now we are reusing this from lib/Clang/CodeGenFunction.h, which +// FIXME: for now we are reusing this from lib/Clang/CIRGenFunction.h, which // isn't available in the include dir. Same for getEvaluationKind below. enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; struct CGCoroData; @@ -732,6 +732,7 @@ class CIRGenFunction : public CIRGenTypeCache { ReturnValueSlot ReturnValue); void buildNullInitialization(mlir::Location loc, Address DestPtr, QualType Ty); + bool shouldNullCheckClassCastValue(const CastExpr *CE); void buildCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr); @@ -1075,7 +1076,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. Address buildPointerWithAlignment(const clang::Expr *E, - LValueBaseInfo *BaseInfo); + LValueBaseInfo *BaseInfo = nullptr); /// Emit an expression as an initializer for an object (variable, field, etc.) /// at the given location. The expression is not necessarily the normal @@ -1221,6 +1222,11 @@ class CIRGenFunction : public CIRGenTypeCache { const CXXRecordDecl *Base, bool BaseIsVirtual); + Address getAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, + CastExpr::path_const_iterator PathBegin, + CastExpr::path_const_iterator PathEnd, + bool NullCheckValue, SourceLocation Loc); + /// Emit code for the start of a function. /// \param Loc The location to be associated with the function. /// \param StartLoc The location of the function body. @@ -1583,6 +1589,42 @@ class CIRGenFunction : public CIRGenTypeCache { } }; +/// A specialization of DominatingValue for RValue. +template <> struct DominatingValue { + typedef RValue type; + class saved_type { + enum Kind { + ScalarLiteral, + ScalarAddress, + AggregateLiteral, + AggregateAddress, + ComplexAddress + }; + + llvm::Value *Value; + llvm::Type *ElementType; + unsigned K : 3; + unsigned Align : 29; + saved_type(llvm::Value *v, llvm::Type *e, Kind k, unsigned a = 0) + : Value(v), ElementType(e), K(k), Align(a) {} + + public: + static bool needsSaving(RValue value); + static saved_type save(CIRGenFunction &CGF, RValue value); + RValue restore(CIRGenFunction &CGF); + + // implementations in CGCleanup.cpp + }; + + static bool needsSaving(type value) { return saved_type::needsSaving(value); } + static saved_type save(CIRGenFunction &CGF, type value) { + return saved_type::save(CGF, value); + } + static type restore(CIRGenFunction &CGF, saved_type value) { + return value.restore(CGF); + } +}; + } // namespace cir #endif // LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 4ef3e177e91d..93f2240171c2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2224,3 +2224,30 @@ mlir::cir::GlobalOp CIRGenModule::getOrInsertGlobal(mlir::Location loc, }); } +// TODO(cir): this can be shared with LLVM codegen. +CharUnits CIRGenModule::computeNonVirtualBaseClassOffset( + const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start, + CastExpr::path_const_iterator End) { + CharUnits Offset = CharUnits::Zero(); + + const ASTContext &Context = getASTContext(); + const CXXRecordDecl *RD = DerivedClass; + + for (CastExpr::path_const_iterator I = Start; I != End; ++I) { + const CXXBaseSpecifier *Base = *I; + assert(!Base->isVirtual() && "Should not see virtual bases here!"); + + // Get the layout. + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + const auto *BaseDecl = + cast(Base->getType()->castAs()->getDecl()); + + // Add the offset. + Offset += Layout.getBaseClassOffset(BaseDecl); + + RD = BaseDecl; + } + + return Offset; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 33767d44688c..07235d442cf6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -215,6 +215,11 @@ class CIRGenModule : public CIRGenTypeCache { getAddrOfGlobalVar(const VarDecl *D, std::optional Ty = {}, ForDefinition_t IsForDefinition = NotForDefinition); + CharUnits + computeNonVirtualBaseClassOffset(const CXXRecordDecl *DerivedClass, + CastExpr::path_const_iterator Start, + CastExpr::path_const_iterator End); + /// Will return a global variable of the given type. If a variable with a /// different type already exists then a new variable with the right type /// will be created and all uses of the old variable will be replaced with a From e098aec50bbf496ad3d5be153ace3ba447498710 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 28 Apr 2023 17:23:00 -0700 Subject: [PATCH 0907/2301] [CIR] Enhance isSafeToConvert logic for fields --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 34 ++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index ef2ded2f9f16..706e2f297975 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -39,7 +39,7 @@ CIRGenTypes::~CIRGenTypes() { delete &*I++; } -// This is CIR's version of CodeGenTypes::addRecordTypeName +// This is CIR's version of CIRGenTypes::addRecordTypeName std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, StringRef suffix) { llvm::SmallString<256> typeName; @@ -77,6 +77,10 @@ bool CIRGenTypes::isRecordLayoutComplete(const Type *Ty) const { return I != recordDeclTypes.end() && I->second.getBody(); } +static bool +isSafeToConvert(QualType T, CIRGenTypes &CGT, + llvm::SmallPtrSet &AlreadyChecked); + /// Return true if it is safe to convert the specified record decl to IR and lay /// it out, false if doing so would cause us to get into a recursive compilation /// mess. @@ -111,13 +115,37 @@ isSafeToConvert(const RecordDecl *RD, CIRGenTypes &CGT, // If this type would require laying out members that are currently being laid // out, don't do it. - for ([[maybe_unused]] const auto *I : RD->fields()) - llvm_unreachable("NYI"); + for (const auto *I : RD->fields()) + if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked)) + return false; // If there are no problems, lets do it. return true; } +/// Return true if it is safe to convert this field type, which requires the +/// structure elements contained by-value to all be recursively safe to convert. +static bool +isSafeToConvert(QualType T, CIRGenTypes &CGT, + llvm::SmallPtrSet &AlreadyChecked) { + // Strip off atomic type sugar. + if (const auto *AT = T->getAs()) + T = AT->getValueType(); + + // If this is a record, check it. + if (const auto *RT = T->getAs()) + return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked); + + // If this is an array, check the elements, which are embedded inline. + if (const auto *AT = CGT.getContext().getAsArrayType(T)) + return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked); + + // Otherwise, there is no concern about transforming this. We only care about + // things that are contained by-value in a structure that can have another + // structure as a member. + return true; +} + // Return true if it is safe to convert the specified record decl to CIR and lay // it out, false if doing so would cause us to get into a recursive compilation // mess. From 03344f404eedf0c37e88c55f763b09fe9b399f3d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 28 Apr 2023 19:05:22 -0700 Subject: [PATCH 0908/2301] [CIR][CIRGen] Handle initialization for enum members on structs --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 17 +- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 180 ++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 8 +- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/agg-init.cpp | 27 +++ 6 files changed, 212 insertions(+), 24 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 30cdb84193ed..087f54133827 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -300,14 +300,14 @@ mlir::Value CIRGenFunction::buildToMemory(mlir::Value Value, QualType Ty) { } void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue) { - // TODO: constant matrix type, volatile, non temporal, TBAA + // TODO: constant matrix type, volatile, no init, non temporal, TBAA buildStoreOfScalar(value, lvalue.getAddress(), false, lvalue.getType(), - lvalue.getBaseInfo(), false); + lvalue.getBaseInfo(), false, false); } void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, QualType Ty, - LValueBaseInfo BaseInfo, + LValueBaseInfo BaseInfo, bool isInit, bool isNontemporal) { // TODO(CIR): this has fallen out of date with codegen @@ -339,6 +339,17 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, llvm_unreachable("NYI"); } +void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, + bool isInit) { + if (lvalue.getType()->isConstantMatrixType()) { + llvm_unreachable("NYI"); + } + + buildStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), lvalue.getBaseInfo(), isInit, + lvalue.isNontemporal()); +} + /// Given an expression that represents a value lvalue, this /// method emits the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index fa2277fb3d54..1913f8eae145 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -119,6 +119,9 @@ class AggExprEmitter : public StmtVisitor { } void VisitChooseExpr(const ChooseExpr *E) { llvm_unreachable("NYI"); } void VisitInitListExpr(InitListExpr *E); + void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef Args, + FieldDecl *InitializedFieldInUnion, + Expr *ArrayFiller); void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, llvm::Value *outerBegin = nullptr) { llvm_unreachable("NYI"); @@ -152,12 +155,9 @@ class AggExprEmitter : public StmtVisitor { void VisitVAArgExpr(VAArgExpr *E) { llvm_unreachable("NYI"); } - void EmitInitializationToLValue(Expr *E, LValue LV); + void buildInitializationToLValue(Expr *E, LValue LV); - void EmitNullInitializationToLValue(LValue Address) { - llvm_unreachable("NYI"); - } - // case Expr::ChoseExprClass: + void buildNullInitializationToLValue(mlir::Location loc, LValue Address); void VisitCXXThrowExpr(const CXXThrowExpr *E) { llvm_unreachable("NYI"); } void VisitAtomicExpr(AtomicExpr *E) { llvm_unreachable("NYI"); } }; @@ -204,7 +204,35 @@ static bool isSimpleZero(const Expr *E, CIRGenFunction &CGF) { return false; } -void AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { +void AggExprEmitter::buildNullInitializationToLValue(mlir::Location loc, + LValue lv) { + QualType type = lv.getType(); + + // If the destination slot is already zeroed out before the aggregate is + // copied into it, we don't have to emit any zeros here. + if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)) + return; + + if (CGF.hasScalarEvaluationKind(type)) { + // For non-aggregates, we can store the appropriate null constant. + auto null = CGF.CGM.buildNullConstant(type, loc); + // Note that the following is not equivalent to + // EmitStoreThroughBitfieldLValue for ARC types. + if (lv.isBitField()) { + llvm_unreachable("NYI"); + } else { + assert(lv.isSimple()); + CGF.buildStoreOfScalar(null, lv, /* isInitialization */ true); + } + } else { + // There's a potential optimization opportunity in combining + // memsets; that would be easy for arrays, but relatively + // difficult for structures with the current code. + CGF.buildNullInitialization(loc, lv.getAddress(), lv.getType()); + } +} + +void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { QualType type = LV.getType(); // FIXME: Ignore result? // FIXME: Are initializers affected by volatile? @@ -215,7 +243,9 @@ void AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { // Storing "i32 0" to a zero'd memory location is a noop. return; } else if (isa(E) || isa(E)) { - return EmitNullInitializationToLValue(LV); + auto loc = E->getSourceRange().isValid() ? CGF.getLoc(E->getSourceRange()) + : *CGF.currSrcLoc; + return buildNullInitializationToLValue(loc, LV); } else if (isa(E)) { // Do nothing. return; @@ -316,7 +346,7 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { llvm_unreachable("NYI"); } - EmitInitializationToLValue(captureInit, LV); + buildInitializationToLValue(captureInit, LV); // Push a destructor if necessary. if (QualType::DestructionKind DtorKind = @@ -470,11 +500,6 @@ void AggExprEmitter::withReturnValueSlot( } void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { - // If the initializer list is empty ({}), and there are - // no explicitly initialized elements. - if (E->getNumInits() == 0) - return; - // TODO(cir): use something like CGF.ErrorUnsupported if (E->hadArrayRangeDesignator()) llvm_unreachable("GNU array range designator extension"); @@ -482,18 +507,135 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { if (E->isTransparent()) return Visit(E->getInit(0)); - AggValueSlot Dest = EnsureSlot(E->getType()); + VisitCXXParenListOrInitListExpr( + E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller()); +} - [[maybe_unused]] LValue DestLV = - CGF.makeAddrLValue(Dest.getAddress(), E->getType()); +void AggExprEmitter::VisitCXXParenListOrInitListExpr( + Expr *ExprToVisit, ArrayRef InitExprs, + FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) { +#if 0 + // FIXME: Assess perf here? Figure out what cases are worth optimizing here + // (Length of globals? Chunks of zeroed-out space?). + // + // If we can, prefer a copy from a global; this is a lot less code for long + // globals, and it's easier for the current optimizers to analyze. + if (llvm::Constant *C = + CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) { + llvm::GlobalVariable* GV = + new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, + llvm::GlobalValue::InternalLinkage, C, ""); + EmitFinalDestCopy(ExprToVisit->getType(), + CGF.MakeAddrLValue(GV, ExprToVisit->getType())); + return; + } +#endif + + AggValueSlot Dest = EnsureSlot(ExprToVisit->getType()); + + LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), ExprToVisit->getType()); // Handle initialization of an array. - if (E->getType()->isArrayType()) { + if (ExprToVisit->getType()->isArrayType()) { llvm_unreachable("NYI"); } - assert(E->getType()->isRecordType() && "Only support structs/unions here!"); - llvm_unreachable("NYI"); + assert(ExprToVisit->getType()->isRecordType() && + "Only support structs/unions here!"); + + // Do struct initialization; this code just sets each individual member + // to the approprate value. This makes bitfield support automatic; + // the disadvantage is that the generated code is more difficult for + // the optimizer, especially with bitfields. + unsigned NumInitElements = InitExprs.size(); + RecordDecl *record = ExprToVisit->getType()->castAs()->getDecl(); + + // We'll need to enter cleanup scopes in case any of the element + // initializers throws an exception. + SmallVector cleanups; + // FIXME(cir): placeholder + mlir::Operation *cleanupDominator = nullptr; + [[maybe_unused]] auto addCleanup = + [&](const EHScopeStack::stable_iterator &cleanup) { + llvm_unreachable("NYI"); + }; + + unsigned curInitIndex = 0; + + // Emit initialization of base classes. + if (auto *CXXRD = dyn_cast(record)) { + assert(NumInitElements >= CXXRD->getNumBases() && + "missing initializer for base class"); + for ([[maybe_unused]] auto &Base : CXXRD->bases()) { + llvm_unreachable("NYI"); + } + } + + // Prepare a 'this' for CXXDefaultInitExprs. + CIRGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress()); + + if (record->isUnion()) { + llvm_unreachable("NYI"); + } + + // Here we iterate over the fields; this makes it simpler to both + // default-initialize fields and skip over unnamed fields. + for (const auto *field : record->fields()) { + // We're done once we hit the flexible array member. + if (field->getType()->isIncompleteArrayType()) + break; + + // Always skip anonymous bitfields. + if (field->isUnnamedBitField()) + continue; + + // We're done if we reach the end of the explicit initializers, we + // have a zeroed object, and the rest of the fields are + // zero-initializable. + if (curInitIndex == NumInitElements && Dest.isZeroed() && + CGF.getTypes().isZeroInitializable(ExprToVisit->getType())) + break; + + LValue LV = + CGF.buildLValueForFieldInitialization(DestLV, field, field->getName()); + // We never generate write-barries for initialized fields. + assert(!UnimplementedFeature::setNonGC()); + + if (curInitIndex < NumInitElements) { + // Store the initializer into the field. + CIRGenFunction::SourceLocRAIIObject loc{ + CGF, CGF.getLoc(record->getSourceRange())}; + buildInitializationToLValue(InitExprs[curInitIndex++], LV); + } else { + // We're out of initializers; default-initialize to null + buildNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), + LV); + } + + // Push a destructor if necessary. + // FIXME: if we have an array of structures, all explicitly + // initialized, we can end up pushing a linear number of cleanups. + [[maybe_unused]] bool pushedCleanup = false; + if (QualType::DestructionKind dtorKind = + field->getType().isDestructedType()) { + llvm_unreachable("NYI"); + } + + // From LLVM codegen, maybe not useful for CIR: + // If the GEP didn't get used because of a dead zero init or something + // else, clean it up for -O0 builds and general tidiness. + } + + // Deactivate all the partial cleanups in reverse order, which + // generally means popping them. + assert((cleanupDominator || cleanups.empty()) && + "Missing cleanupDominator before deactivating cleanup blocks"); + for (unsigned i = cleanups.size(); i != 0; --i) + llvm_unreachable("NYI"); + + // Destroy the placeholder if we made one. + if (cleanupDominator) + llvm_unreachable("NYI"); } void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 906de4b26c84..418d13ca0d27 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1042,7 +1042,8 @@ class CIRGenFunction : public CIRGenTypeCache { void buildStoreOfScalar(mlir::Value value, LValue lvalue); void buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, clang::QualType Ty, LValueBaseInfo BaseInfo, - bool isNontemporal); + bool isInit = false, bool isNontemporal = false); + void buildStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit); mlir::Value buildToMemory(mlir::Value Value, clang::QualType Ty); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 706e2f297975..b473b1ae2139 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -644,7 +644,13 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { } case Type::Enum: { - assert(0 && "not implemented"); + const EnumDecl *ED = cast(Ty)->getDecl(); + if (ED->isCompleteDefinition() || ED->isFixed()) + return ConvertType(ED->getIntegerType()); + // Return a placeholder 'i32' type. This can be changed later when the + // type is defined (see UpdateCompletedType), but is likely to be the + // "right" answer. + ResultType = CGM.Int32Ty; break; } diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index da527ee86159..5c8afbafa6e6 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -74,6 +74,7 @@ struct UnimplementedFeature { static bool openMP() { return false; } static bool ehStack() { return false; } static bool isVarArg() { return false; } + static bool setNonGC() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index c21d181f8c9b..7ae0ce221719 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -1,6 +1,9 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// CHECK !ty_22struct2EZero22 = !cir.struct<"struct.Zero", i8> +// CHECK !ty_22struct2Eyep_22 = !cir.struct<"struct.yep_", i32, i32> + struct Zero { void yolo(); }; @@ -16,3 +19,27 @@ void f() { // CHECK: %1 = cir.alloca !ty_22struct2EZero22, cir.ptr , ["z1"] // CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () // CHECK: cir.return + +typedef enum xxy_ { + xxy_Low = 0, + xxy_High = 0x3f800000, + xxy_EnumSize = 0x7fffffff +} xxy; + +typedef struct yep_ { + unsigned int Status; + xxy HC; +} yop; + +void use() { yop{}; } + +// CHECK: cir.func @_Z3usev() { +// CHECK: %0 = cir.alloca !ty_22struct2Eyep_22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} +// CHECK: %1 = "cir.struct_element_addr"(%0) <{member_name = "Status"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %2 = cir.const(0 : i32) : i32 +// CHECK: cir.store %2, %1 : i32, cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "HC"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %4 = cir.const(0 : i32) : i32 +// CHECK: cir.store %4, %3 : i32, cir.ptr +// CHECK: cir.return +// CHECK: } From ea7f1619bedc89e9033b6328c3a1df13934cabdb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 28 Apr 2023 19:34:08 -0700 Subject: [PATCH 0909/2301] [CIR][CIRGen] Add more plumbing for struct construction --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index b473b1ae2139..dc2abfc5320d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -181,7 +181,8 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { // If converting this type would cause us to infinitely loop, don't do it! if (!isSafeToConvert(RD, *this)) { - llvm_unreachable("NYI"); + DeferredRecords.push_back(RD); + return entry; } // Okay, this is a definition of a type. Compile the implementation now. @@ -600,10 +601,11 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { const ConstantArrayType *A = cast(Ty); auto EltTy = convertTypeForMem(A->getElementType()); + // FIXME(cir): add a `isSized` method to CIRGenBuilder. auto isSized = [&](mlir::Type ty) { if (ty.isIntOrFloat() || ty.isa()) + mlir::cir::ArrayType, mlir::cir::BoolType>()) return true; assert(0 && "not implemented"); return false; From d7e8dbb384f55c64e4badbfe080a2acd0c8ead8e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 28 Apr 2023 19:59:14 -0700 Subject: [PATCH 0910/2301] [CIR][CIRGen] Types: allow creation of atomics --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 10 +++++++++- clang/test/CIR/CodeGen/atomic.cpp | 10 ++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/atomic.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index dc2abfc5320d..224eb8896ca3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -667,7 +667,15 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { } case Type::Atomic: { - assert(0 && "not implemented"); + QualType valueType = cast(Ty)->getValueType(); + ResultType = convertTypeForMem(valueType); + + // Pad out to the inflated size if necessary. + uint64_t valueSize = Context.getTypeSize(valueType); + uint64_t atomicSize = Context.getTypeSize(Ty); + if (valueSize != atomicSize) { + llvm_unreachable("NYI"); + } break; } case Type::Pipe: { diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp new file mode 100644 index 000000000000..fcc989ab8124 --- /dev/null +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef struct _a { + _Atomic(int) d; +} at; + +void m() { at y; } + +// CHECK: !ty_22struct2E_a22 = !cir.struct<"struct._a", i32> \ No newline at end of file From 451a7a563aa567aba76845c1add5ae226f0d1fc7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 29 Apr 2023 00:28:50 -0400 Subject: [PATCH 0911/2301] [CIR][Dialect] Add the datalayout interface to cir.ArrayType and PointerType Add the interface and the boilerplate methods to propagate the elements. This is the same behavior as we see from llvm's datalayout types. --- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 1 + .../include/clang/CIR/Dialect/IR/CIRTypes.td | 13 +++---- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 36 +++++++++++++++++++ clang/lib/CIR/Dialect/IR/CMakeLists.txt | 3 +- 4 files changed, 46 insertions(+), 7 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 26423651077a..87aea83b744e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -15,6 +15,7 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Types.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" //===----------------------------------------------------------------------===// // CIR Dialect Types diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 616fcdab07af..27faea246fa5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -14,14 +14,15 @@ #define MLIR_CIR_DIALECT_CIR_TYPES include "clang/CIR/Dialect/IR/CIRDialect.td" +include "mlir/Interfaces/DataLayoutInterfaces.td" include "mlir/IR/AttrTypeBase.td" //===----------------------------------------------------------------------===// // CIR Types //===----------------------------------------------------------------------===// -class CIR_Type : TypeDef { +class CIR_Type traits = []> : + TypeDef { let mnemonic = typeMnemonic; } @@ -29,8 +30,8 @@ class CIR_Type : TypeDef { +def CIR_PointerType : CIR_Type<"Pointer", "ptr", + [DeclareTypeInterfaceMethods]> { let summary = "CIR pointer type"; let description = [{ @@ -112,8 +113,8 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { // ArrayType //===----------------------------------------------------------------------===// -def CIR_ArrayType : - CIR_Type<"Array", "array"> { +def CIR_ArrayType : CIR_Type<"Array", "array", + [DeclareTypeInterfaceMethods]> { let summary = "CIR array type"; let description = [{ diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index fec1f8741ad6..ab5d60ee7bd8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -64,6 +64,24 @@ void PointerType::print(mlir::AsmPrinter &printer) const { printer << '>'; } +llvm::TypeSize +PointerType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + llvm_unreachable("NYI"); +} + +uint64_t +PointerType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + llvm_unreachable("NYI"); +} + +uint64_t PointerType::getPreferredAlignment( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + llvm_unreachable("NYI"); +} + Type BoolType::parse(mlir::AsmParser &parser) { return get(parser.getContext()); } @@ -164,6 +182,24 @@ void ArrayType::print(mlir::AsmPrinter &printer) const { printer << '>'; } +llvm::TypeSize +ArrayType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return dataLayout.getTypeSizeInBits(getEltType()); +} + +uint64_t +ArrayType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return dataLayout.getTypeABIAlignment(getEltType()); +} + +uint64_t +ArrayType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return dataLayout.getTypePreferredAlignment(getEltType()); +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 62ccb7fe364c..703718d3d2c7 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -12,8 +12,9 @@ add_clang_library(MLIRCIR LINK_LIBS PUBLIC MLIRIR + MLIRDataLayoutInterfaces MLIRFuncDialect - MLIRLLVMDialect MLIRLoopLikeInterface + MLIRLLVMDialect MLIRSideEffectInterfaces ) From 4741ed1070ae6293823d4f9c0bf576e6ea8e94e9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 1 May 2023 10:11:21 -0700 Subject: [PATCH 0912/2301] [CIR][NFC] Fix linking problems saved_type::restore --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 418d13ca0d27..383b1570ae11 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1612,7 +1612,7 @@ template <> struct DominatingValue { public: static bool needsSaving(RValue value); static saved_type save(CIRGenFunction &CGF, RValue value); - RValue restore(CIRGenFunction &CGF); + RValue restore(CIRGenFunction &CGF) { llvm_unreachable("NYI"); } // implementations in CGCleanup.cpp }; From eb3144eb9135136e126c37f1a141891f06f41b2a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 1 May 2023 11:15:14 -0700 Subject: [PATCH 0913/2301] [CIR] Structs: add logic to compute datalyout size and alignment Add internal size, align and padded members to StructType and make the computation lazily. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 15 ++- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 2 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 5 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 115 +++++++++++++++--- 4 files changed, 114 insertions(+), 23 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 27faea246fa5..54d2e9749119 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -68,7 +68,8 @@ def CIR_BoolType : // //===----------------------------------------------------------------------===// -def CIR_StructType : CIR_Type<"Struct", "struct"> { +def CIR_StructType : CIR_Type<"Struct", "struct", + [DeclareTypeInterfaceMethods]> { let summary = "CIR struct type"; let description = [{ @@ -80,6 +81,7 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { ArrayRefParameter<"mlir::Type", "members">:$members, "mlir::StringAttr":$typeName, "bool":$body, + "bool":$packed, "std::optional<::mlir::cir::ASTRecordDeclAttr>":$ast ); @@ -89,7 +91,8 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { "bool":$body ), [{ auto id = mlir::StringAttr::get(context, typeName); - auto sTy = StructType::get(context, members, id, body, std::nullopt); + auto sTy = StructType::get(context, members, id, body, + /*packed=*/false, std::nullopt); return sTy; }]> ]; @@ -97,9 +100,17 @@ def CIR_StructType : CIR_Type<"Struct", "struct"> { let hasCustomAssemblyFormat = 1; let extraClassDeclaration = [{ + private: + // All these support lazily computation and storage + // for the struct size and alignment. + mutable std::optional size, align; + mutable std::optional padded = false; + void computeSizeAndAlignment(const ::mlir::DataLayout &dataLayout) const; public: void dropAst(); size_t getNumElements() const { return getMembers().size(); } + bool isOpaque() const { return !getBody(); } + bool isPadded(const ::mlir::DataLayout &dataLayout) const; }]; let extraClassDefinition = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 224eb8896ca3..06c183938891 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -171,7 +171,7 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { auto name = getRecordTypeName(RD, ""); auto identifier = mlir::StringAttr::get(&getMLIRContext(), name); entry = mlir::cir::StructType::get( - &getMLIRContext(), {}, identifier, /*body=*/false, + &getMLIRContext(), {}, identifier, /*body=*/false, /**packed=*/false, mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), RD)); } diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index a566b6f314f5..971edac0ad6e 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -342,7 +342,7 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, mlir::StringAttr::get(&getMLIRContext(), name + ".base"); *BaseTy = mlir::cir::StructType::get( &getMLIRContext(), baseBuilder.fieldTypes, baseIdentifier, - /*body=*/true, + /*body=*/true, /**packed=*/false, mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); // TODO(cir): add something like addRecordTypeName @@ -358,7 +358,8 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, // but we may need to recursively layout D while laying D out as a base type. *Ty = mlir::cir::StructType::get( &getMLIRContext(), builder.fieldTypes, identifier, - /*body=*/true, mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); + /*body=*/true, /**packed=*/false, + mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); auto RL = std::make_unique( Ty ? *Ty : mlir::cir::StructType{}, diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index ab5d60ee7bd8..f7ab440d9628 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -64,24 +64,6 @@ void PointerType::print(mlir::AsmPrinter &printer) const { printer << '>'; } -llvm::TypeSize -PointerType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - llvm_unreachable("NYI"); -} - -uint64_t -PointerType::getABIAlignment(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - llvm_unreachable("NYI"); -} - -uint64_t PointerType::getPreferredAlignment( - const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { - llvm_unreachable("NYI"); -} - Type BoolType::parse(mlir::AsmParser &parser) { return get(parser.getContext()); } @@ -182,6 +164,28 @@ void ArrayType::print(mlir::AsmPrinter &printer) const { printer << '>'; } +//===----------------------------------------------------------------------===// +// Data Layout information for types +//===----------------------------------------------------------------------===// + +llvm::TypeSize +PointerType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + llvm_unreachable("NYI"); +} + +uint64_t +PointerType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + llvm_unreachable("NYI"); +} + +uint64_t PointerType::getPreferredAlignment( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + llvm_unreachable("NYI"); +} + llvm::TypeSize ArrayType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { @@ -200,6 +204,81 @@ ArrayType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, return dataLayout.getTypePreferredAlignment(getEltType()); } +llvm::TypeSize +StructType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + if (!size) + computeSizeAndAlignment(dataLayout); + return llvm::TypeSize::getFixed(*size * 8); +} + +uint64_t +StructType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + if (!align) + computeSizeAndAlignment(dataLayout); + return *align; +} + +uint64_t +StructType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + llvm_unreachable("NYI"); +} + +bool StructType::isPadded(const ::mlir::DataLayout &dataLayout) const { + if (!padded) + computeSizeAndAlignment(dataLayout); + return *padded; +} + +void StructType::computeSizeAndAlignment( + const ::mlir::DataLayout &dataLayout) const { + assert(!isOpaque() && "Cannot get layout of opaque structs"); + // Do not recompute. + if (size || align || padded) + return; + + unsigned structSize = 0; + llvm::Align structAlignment{1}; + [[maybe_unused]] bool isPadded = false; + unsigned numElements = getNumElements(); + auto members = getMembers(); + + // Loop over each of the elements, placing them in memory. + for (unsigned i = 0, e = numElements; i != e; ++i) { + auto ty = members[i]; + const llvm::Align tyAlign = + llvm::Align(getPacked() ? 1 : dataLayout.getTypeABIAlignment(ty)); + + // Add padding if necessary to align the data element properly. + if (!llvm::isAligned(tyAlign, structSize)) { + isPadded = true; + structSize = llvm::alignTo(structSize, tyAlign); + } + + // Keep track of maximum alignment constraint. + structAlignment = std::max(tyAlign, structAlignment); + + // FIXME: track struct size up to each element. + // getMemberOffsets()[i] = structSize; + + // Consume space for this data item + structSize += dataLayout.getTypeSize(ty); + } + + // Add padding to the end of the struct so that it could be put in an array + // and all array elements would be aligned correctly. + if (!llvm::isAligned(structAlignment, structSize)) { + isPadded = true; + structSize = llvm::alignTo(structSize, structAlignment); + } + + size = structSize; + align = structAlignment.value(); + padded = isPadded; +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// From faaf64cc312d6bbd90017c30cb2e40d2d5f796de Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 28 Apr 2023 20:17:12 -0700 Subject: [PATCH 0914/2301] [CIR][CIRGen] Unions: layout and initial lowering - Teach CIRRecordLowering to use mlir::DataLayout. - Adds logic for lowerUnion(). - Add testcase. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 4 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 85 +++++++++++++++++-- clang/test/CIR/CodeGen/union.cpp | 20 +++++ 3 files changed, 102 insertions(+), 7 deletions(-) create mode 100644 clang/test/CIR/CodeGen/union.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 54d2e9749119..d1346aa6431a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -103,8 +103,8 @@ def CIR_StructType : CIR_Type<"Struct", "struct", private: // All these support lazily computation and storage // for the struct size and alignment. - mutable std::optional size, align; - mutable std::optional padded = false; + mutable std::optional size{}, align{}; + mutable std::optional padded{}; void computeSizeAndAlignment(const ::mlir::DataLayout &dataLayout) const; public: void dropAst(); diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 971edac0ad6e..400e148d8593 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -51,6 +51,7 @@ struct CIRRecordLowering final { bool isPacked); // Short helper routines. void lower(bool nonVirtualBaseType); + void lowerUnion(); void computeVolatileBitfields(); void accumulateBases(); @@ -82,6 +83,19 @@ struct CIRRecordLowering final { void calculateZeroInit(); + CharUnits getSize(mlir::Type Ty) { + return CharUnits::fromQuantity(layout.getTypeSize(Ty)); + } + CharUnits getAlignment(mlir::Type Ty) { + return CharUnits::fromQuantity(layout.getTypeABIAlignment(Ty)); + } + bool isZeroInitializable(const FieldDecl *FD) { + return cirGenTypes.isZeroInitializable(FD->getType()); + } + bool isZeroInitializable(const RecordDecl *RD) { + return cirGenTypes.isZeroInitializable(RD); + } + mlir::Type getCharType() { return mlir::IntegerType::get(&cirGenTypes.getMLIRContext(), astContext.getCharWidth()); @@ -92,8 +106,8 @@ struct CIRRecordLowering final { mlir::Type type = getCharType(); return numberOfChars == CharUnits::One() ? type - : mlir::RankedTensorType::get({0, numberOfChars.getQuantity()}, - type); + : mlir::cir::ArrayType::get(type.getContext(), type, + numberOfChars.getQuantity()); } // Gets the llvm Basesubobject type from a CXXRecordDecl. @@ -141,6 +155,7 @@ struct CIRRecordLowering final { llvm::DenseMap bitFields; llvm::DenseMap nonVirtualBases; llvm::DenseMap virtualBases; + mlir::DataLayout layout; bool IsZeroInitializable : 1; bool IsZeroInitializableAsBase : 1; bool isPacked : 1; @@ -158,12 +173,14 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, astContext{cirGenTypes.getContext()}, recordDecl{recordDecl}, cxxRecordDecl{llvm::dyn_cast(recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, - IsZeroInitializable(true), IsZeroInitializableAsBase(true), - isPacked{isPacked} {} + layout{cirGenTypes.getModule().getModule()}, IsZeroInitializable(true), + IsZeroInitializableAsBase(true), isPacked{isPacked} {} void CIRRecordLowering::lower(bool nonVirtualBaseType) { if (recordDecl->isUnion()) { - llvm_unreachable("NYI"); + lowerUnion(); + computeVolatileBitfields(); + return; } CharUnits Size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize() @@ -198,6 +215,64 @@ void CIRRecordLowering::lower(bool nonVirtualBaseType) { computeVolatileBitfields(); } +void CIRRecordLowering::lowerUnion() { + CharUnits LayoutSize = astRecordLayout.getSize(); + mlir::Type StorageType = nullptr; + bool SeenNamedMember = false; + // Iterate through the fields setting bitFieldInfo and the Fields array. Also + // locate the "most appropriate" storage type. The heuristic for finding the + // storage type isn't necessary, the first (non-0-length-bitfield) field's + // type would work fine and be simpler but would be different than what we've + // been doing and cause lit tests to change. + for (const auto *Field : recordDecl->fields()) { + if (Field->isBitField()) { + if (Field->isZeroLengthBitField()) + continue; + llvm_unreachable("NYI"); + } + fields[Field->getCanonicalDecl()] = 0; + auto FieldType = getStorageType(Field); + // Compute zero-initializable status. + // This union might not be zero initialized: it may contain a pointer to + // data member which might have some exotic initialization sequence. + // If this is the case, then we aught not to try and come up with a "better" + // type, it might not be very easy to come up with a Constant which + // correctly initializes it. + if (!SeenNamedMember) { + SeenNamedMember = Field->getIdentifier(); + if (!SeenNamedMember) + if (const auto *FieldRD = Field->getType()->getAsRecordDecl()) + SeenNamedMember = FieldRD->findFirstNamedDataMember(); + if (SeenNamedMember && !isZeroInitializable(Field)) { + IsZeroInitializable = IsZeroInitializableAsBase = false; + StorageType = FieldType; + } + } + // Because our union isn't zero initializable, we won't be getting a better + // storage type. + if (!IsZeroInitializable) + continue; + + // Conditionally update our storage type if we've got a new "better" one. + if (!StorageType || getAlignment(FieldType) > getAlignment(StorageType) || + (getAlignment(FieldType) == getAlignment(StorageType) && + getSize(FieldType) > getSize(StorageType))) + StorageType = FieldType; + } + // If we have no storage type just pad to the appropriate size and return. + if (!StorageType) + return appendPaddingBytes(LayoutSize); + // If our storage size was bigger than our required size (can happen in the + // case of packed bitfields on Itanium) then just use an I8 array. + if (LayoutSize < getSize(StorageType)) + StorageType = getByteArrayType(LayoutSize); + fieldTypes.push_back(StorageType); + appendPaddingBytes(LayoutSize - getSize(StorageType)); + // Set packed if we need it. + if (LayoutSize % getAlignment(StorageType)) + isPacked = true; +} + bool CIRRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query) { const ASTRecordLayout &DeclLayout = astContext.getASTRecordLayout(Decl); diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp new file mode 100644 index 000000000000..6f15199a56fe --- /dev/null +++ b/clang/test/CIR/CodeGen/union.cpp @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef struct { int x; } yolo; +typedef union { yolo y; struct { int lifecnt; }; } yolm; +typedef union { yolo y; struct { int lifecnt; int genpad; }; } yolm2; + +void m() { + yolm q; + yolm2 q2; +} + +// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", i32, i32, #cir.recdecl.ast> +// CHECK: !ty_22struct2Eyolo22 = !cir.struct<"struct.yolo", i32, #cir.recdecl.ast> +// CHECK: !ty_22union2Eyolm22 = !cir.struct<"union.yolm", !ty_22struct2Eyolo22> +// CHECK: !ty_22union2Eyolm222 = !cir.struct<"union.yolm2", !ty_22struct2Eanon22> + +// CHECK: cir.func @_Z1mv() { +// CHECK: cir.alloca !ty_22union2Eyolm22, cir.ptr , ["q"] {alignment = 4 : i64} +// CHECK: cir.alloca !ty_22union2Eyolm222, cir.ptr , ["q2"] {alignment = 4 : i64} \ No newline at end of file From 0e27f6c042040c1087925fe0d068446fc8a39406 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 3 May 2023 01:14:30 -0400 Subject: [PATCH 0915/2301] [CIR][Rebase] Fixes for upstream changes --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 93f2240171c2..816fd74580e5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -91,10 +91,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, const clang::CodeGenOptions &CGO, DiagnosticsEngine &Diags) : builder(context, *this), astCtx(astctx), langOpts(astctx.getLangOpts()), - codeGenOpts(CGO), - theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), - target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, - VTables{*this} { + codeGenOpts(CGO), theModule{mlir::ModuleOp::create( + builder.getUnknownLoc())}, + Diags(Diags), target(astCtx.getTargetInfo()), + ABI(createCXXABI(*this)), genTypes{*this}, VTables{*this} { // Initialize the type cache. VoidTy = ::mlir::IntegerType::get(builder.getContext(), 8); From 4646d73214e2da6e5e9bd028653d5c4a7298f926 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 3 May 2023 01:14:57 -0400 Subject: [PATCH 0916/2301] [CIR][Rebase] Correct wrong destructor reference in vtable incu/main has this wrong evidently? clang normally emits the _ZN1BD2Ev as I've changed it to. TBD. --- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 7016d1037614..9c4a4cf61619 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -74,7 +74,7 @@ class B : public A // CHECK: } // vtable for B -// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<<{#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD1Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}>> : ![[VTableTypeA]] +// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<<{#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD2Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}>> : ![[VTableTypeA]] // vtable for __cxxabiv1::__si_class_type_info // CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> From 3a5e00ad7b8962ddb61f527caa5b06a6f4972f62 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 3 May 2023 12:39:07 -0400 Subject: [PATCH 0917/2301] [CIR][Rebase] Fix changes from upstream --- clang/include/clang/CIR/CIRGenerator.h | 5 ++++ clang/lib/CIR/CodeGen/CIRGenerator.cpp | 4 ++- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 26 +++++++++---------- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 39e067233233..64c0e4a82202 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -18,7 +18,9 @@ #include "clang/AST/Decl.h" #include "clang/Basic/CodeGenOptions.h" +#include "llvm/ADT/IntrusiveRefCntPtr.h" #include "llvm/Support/ToolOutputFile.h" +#include "llvm/Support/VirtualFileSystem.h" #include @@ -42,6 +44,8 @@ class CIRGenerator : public clang::ASTConsumer { virtual void anchor(); clang::DiagnosticsEngine &Diags; clang::ASTContext *astCtx; + llvm::IntrusiveRefCntPtr + fs; // Only used for debug info. const clang::CodeGenOptions codeGenOpts; // Intentionally copied in. @@ -73,6 +77,7 @@ class CIRGenerator : public clang::ASTConsumer { public: CIRGenerator(clang::DiagnosticsEngine &diags, + llvm::IntrusiveRefCntPtr FS, const clang::CodeGenOptions &CGO); ~CIRGenerator(); void Initialize(clang::ASTContext &Context) override; diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 419a51ba8dd3..9eb3398a840f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -31,8 +31,10 @@ using namespace clang; void CIRGenerator::anchor() {} CIRGenerator::CIRGenerator(clang::DiagnosticsEngine &diags, + llvm::IntrusiveRefCntPtr vfs, const CodeGenOptions &CGO) - : Diags(diags), codeGenOpts{CGO}, HandlingTopLevelDecls(0) {} + : Diags(diags), fs(std::move(vfs)), codeGenOpts{CGO}, + HandlingTopLevelDecls(0) {} CIRGenerator::~CIRGenerator() { // There should normally not be any leftover inline method definitions. assert(DeferredInlineMemberFuncDefs.empty() || Diags.hasErrorOccurred()); diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index a2809d25b583..406c225f7504 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -100,12 +100,14 @@ class CIRGenConsumer : public clang::ASTConsumer { std::unique_ptr outputStream; ASTContext *astContext{nullptr}; + IntrusiveRefCntPtr FS; std::unique_ptr gen; public: CIRGenConsumer(CIRGenAction::OutputType action, CompilerInstance &compilerInstance, DiagnosticsEngine &diagnosticsEngine, + IntrusiveRefCntPtr VFS, const HeaderSearchOptions &headerSearchOptions, CodeGenOptions &codeGenOptions, const TargetOptions &targetOptions, @@ -117,11 +119,9 @@ class CIRGenConsumer : public clang::ASTConsumer { headerSearchOptions(headerSearchOptions), codeGenOptions(codeGenOptions), targetOptions(targetOptions), langOptions(langOptions), feOptions(feOptions), - - outputStream(std::move(os)), - - gen(std::make_unique(diagnosticsEngine, codeGenOptions)) { - } + outputStream(std::move(os)), FS(VFS), + gen(std::make_unique(diagnosticsEngine, std::move(VFS), + codeGenOptions)) {} void Initialize(ASTContext &ctx) override { assert(!astContext && "initialized multiple times"); @@ -256,8 +256,8 @@ class CIRGenConsumer : public clang::ASTConsumer { emitBackendOutput(compilerInstance, codeGenOptions, C.getTargetInfo().getDataLayoutString(), - llvmModule.get(), BackendAction::Backend_EmitLL, - nullptr, std::move(outputStream)); + llvmModule.get(), BackendAction::Backend_EmitLL, FS, + std::move(outputStream)); break; } case CIRGenAction::OutputType::EmitObj: { @@ -268,8 +268,8 @@ class CIRGenConsumer : public clang::ASTConsumer { llvmModule->setTargetTriple(targetOptions.Triple); emitBackendOutput(compilerInstance, codeGenOptions, C.getTargetInfo().getDataLayoutString(), - llvmModule.get(), BackendAction::Backend_EmitObj, - nullptr, std::move(outputStream)); + llvmModule.get(), BackendAction::Backend_EmitObj, FS, + std::move(outputStream)); break; } case CIRGenAction::OutputType::EmitAssembly: { @@ -281,7 +281,7 @@ class CIRGenConsumer : public clang::ASTConsumer { emitBackendOutput(compilerInstance, codeGenOptions, C.getTargetInfo().getDataLayoutString(), llvmModule.get(), BackendAction::Backend_EmitAssembly, - nullptr, std::move(outputStream)); + FS, std::move(outputStream)); break; } case CIRGenAction::OutputType::None: @@ -361,9 +361,9 @@ CIRGenAction::CreateASTConsumer(CompilerInstance &ci, StringRef inputFile) { out = getOutputStream(ci, inputFile, action); auto Result = std::make_unique( - action, ci, ci.getDiagnostics(), ci.getHeaderSearchOpts(), - ci.getCodeGenOpts(), ci.getTargetOpts(), ci.getLangOpts(), - ci.getFrontendOpts(), std::move(out)); + action, ci, ci.getDiagnostics(), &ci.getVirtualFileSystem(), + ci.getHeaderSearchOpts(), ci.getCodeGenOpts(), ci.getTargetOpts(), + ci.getLangOpts(), ci.getFrontendOpts(), std::move(out)); cgConsumer = Result.get(); // Enable generating macro debug info only when debug info is not disabled and From 7aa258dda37aa943a1de278d268b41993d8e5bb5 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 3 May 2023 16:25:18 -0400 Subject: [PATCH 0918/2301] [CIR][Rebase] Fixes for upstream changes --- clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt | 1 + clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 4 +++- clang/test/CIR/cc1.c | 1 - clang/test/CIR/driver.c | 9 ++++----- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt index 3d0d513338fd..dcb7b9e4adb8 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -25,6 +25,7 @@ add_clang_library(clangCIRLoweringThroughMLIR ${dialect_libs} MLIRCIR MLIRAnalysis + MLIRBuiltinToLLVMIRTranslation MLIRIR MLIRParser MLIRSideEffectInterfaces diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index dae3c6735683..c2afe563a1a6 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -30,6 +30,7 @@ #include "mlir/IR/BuiltinDialect.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" +#include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" #include "mlir/Transforms/DialectConversion.h" @@ -252,7 +253,7 @@ class CIRUnaryOpLowering : public mlir::OpRewritePattern { auto MinusOne = rewriter.create( op.getLoc(), type, mlir::IntegerAttr::get(type, -1)); rewriter.replaceOpWithNewOp(op, op.getType(), - MinusOne, op.getInput()); + MinusOne, op.getInput()); break; } } @@ -573,6 +574,7 @@ lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, if (theModule.verify().failed()) report_fatal_error("Verification of the final LLVMIR dialect failed!"); + mlir::registerBuiltinDialectTranslation(*mlirCtx); mlir::registerLLVMDialectTranslation(*mlirCtx); LLVMContext llvmContext; diff --git a/clang/test/CIR/cc1.c b/clang/test/CIR/cc1.c index 6a4cb8e3026b..8b968b0b1c49 100644 --- a/clang/test/CIR/cc1.c +++ b/clang/test/CIR/cc1.c @@ -6,7 +6,6 @@ // RUN: FileCheck --input-file=%t.s %s -check-prefix=ASM // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-obj %s -o %t.o // RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ -// XFAIL: * void foo() {} diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index 804292494484..65435dddd300 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -1,14 +1,13 @@ -// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -S -emit-cir %s -o %t.cir +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -S -Xclang -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -c %s -o %t.o // RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ -// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-passes -S -emit-cir %s -o %t.cir -// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-verifier -S -emit-cir %s -o %t.cir -// RUN: %clang -target arm64-apple-macosx12.0.0 -fclangir -S -emit-cir %s -o %t.cir +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-passes -S -Xclang -emit-cir %s -o %t.cir +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-verifier -S -Xclang -emit-cir %s -o %t.cir +// RUN: %clang -target arm64-apple-macosx12.0.0 -fclangir -S -Xclang -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// XFAIL: * void foo() {} From 3905cb8ff2216781252239fbebfb185238a18c56 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 3 May 2023 19:13:40 -0400 Subject: [PATCH 0919/2301] [CIR][Rebase] Fix for upstream changes --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 6 ++-- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 5 ++-- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 29 +++++++++++++------ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 8 ++--- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 1 - 5 files changed, 29 insertions(+), 20 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index cf8dec1b17f8..772206622598 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -148,7 +148,7 @@ void ClangToCIRArgMapping::construct(const ASTContext &Context, switch (AI.getKind()) { default: - assert(false && "NYI"); + llvm_unreachable("NYI"); case ABIArgInfo::Extend: case ABIArgInfo::Direct: { auto STy = AI.getCoerceToType().dyn_cast(); @@ -239,7 +239,7 @@ mlir::FunctionType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { switch (ArgInfo.getKind()) { default: - assert(false && "NYI"); + llvm_unreachable("NYI"); case ABIArgInfo::Extend: case ABIArgInfo::Direct: { mlir::Type argType = ArgInfo.getCoerceToType(); @@ -1112,4 +1112,4 @@ RValue CallArg::getRValue(CIRGenFunction &CGF, mlir::Location loc) const { LV.isVolatile()); IsUsed = true; return RValue::getAggregate(Copy.getAddress()); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index d9280c2b81ca..3bccda6c5e6d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -225,9 +225,8 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { return; } - if (!emission.IsConstantAggregate) { + if (!emission.IsConstantAggregate) llvm_unreachable("NYI"); - } llvm_unreachable("NYI"); } @@ -572,4 +571,4 @@ void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, bool useEHCleanupForArray) { pushFullExprCleanup(cleanupKind, addr, type, destroyer, useEHCleanupForArray); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index d44cb1a0a5a2..095fe4efce5e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -656,11 +656,11 @@ class ConstExprEmitter case CK_HLSLArrayRValue: case CK_HLSLVectorTruncation: case CK_ToUnion: { - assert(0 && "not implemented"); + llvm_unreachable("not implemented"); } case CK_AddressSpaceConversion: { - assert(0 && "not implemented"); + llvm_unreachable("not implemented"); } case CK_LValueToRValue: @@ -682,7 +682,7 @@ class ConstExprEmitter case CK_ReinterpretMemberPointer: case CK_DerivedToBaseMemberPointer: case CK_BaseToDerivedMemberPointer: { - assert(0 && "not implemented"); + llvm_unreachable("not implemented"); } // These will never be supported. @@ -1010,14 +1010,14 @@ class ConstantLValueEmitter bool hasNonZeroOffset() const { return !Value.getLValueOffset().isZero(); } /// Return the value offset. - mlir::Attribute getOffset() { assert(0 && "NYI"); } + mlir::Attribute getOffset() { llvm_unreachable("NYI"); } /// Apply the value offset to the given constant. mlir::Attribute applyOffset(mlir::Attribute C) { if (!hasNonZeroOffset()) return C; // TODO(cir): use ptr_stride, or something... - assert(0 && "NYI"); + llvm_unreachable("NYI"); } }; @@ -1060,10 +1060,10 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { if (destTy.isa()) { if (value.is()) return value.get(); - assert(0 && "NYI"); + llvm_unreachable("NYI"); } - assert(0 && "NYI"); + llvm_unreachable("NYI"); } /// Try to emit an absolute l-value, such as a null pointer or an integer @@ -1298,7 +1298,15 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const Expr *E, QualType destType) { auto nonMemoryDestType = getNonMemoryType(CGM, destType); auto C = tryEmitPrivate(E, nonMemoryDestType); - return (C ? emitForMemory(C, destType) : nullptr); + if (C) { + auto attr = emitForMemory(C, destType); + auto typedAttr = llvm::dyn_cast(attr); + if (!typedAttr) + llvm_unreachable("this should always be typed"); + return typedAttr; + } else { + return nullptr; + } } mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, @@ -1353,7 +1361,10 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const Expr *E, QualType T) { else C = ConstExprEmitter(*this).Visit(const_cast(E), T); - return C; + auto typedC = llvm::dyn_cast(C); + if (!typedC) + llvm_unreachable("this should always be typed"); + return typedC; } mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 816fd74580e5..93f2240171c2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -91,10 +91,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, const clang::CodeGenOptions &CGO, DiagnosticsEngine &Diags) : builder(context, *this), astCtx(astctx), langOpts(astctx.getLangOpts()), - codeGenOpts(CGO), theModule{mlir::ModuleOp::create( - builder.getUnknownLoc())}, - Diags(Diags), target(astCtx.getTargetInfo()), - ABI(createCXXABI(*this)), genTypes{*this}, VTables{*this} { + codeGenOpts(CGO), + theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), + target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, + VTables{*this} { // Initialize the type cache. VoidTy = ::mlir::IntegerType::get(builder.getContext(), 8); diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 406c225f7504..742a188ba379 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -34,7 +34,6 @@ #include "clang/Frontend/FrontendDiagnostic.h" #include "clang/Lex/Preprocessor.h" #include "llvm/Bitcode/BitcodeReader.h" -#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/DiagnosticPrinter.h" From 91a07155135d82aed0c7a57be13bb424d3dd4a45 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 4 May 2023 17:16:43 -0400 Subject: [PATCH 0920/2301] [CIR][Rebase] Fix some things that came up during rebase --- clang/lib/CIR/CodeGen/Address.h | 1 - clang/lib/CIR/CodeGen/CIRGenCall.cpp | 2 -- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 --- clang/lib/CIR/CodeGen/CIRGenTypes.h | 1 - clang/lib/CIR/CodeGen/CIRGenValue.h | 1 - clang/lib/CIR/CodeGen/CIRGenerator.cpp | 1 - clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 1 - 8 files changed, 1 insertion(+), 11 deletions(-) diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index f075cda8b957..dcf308f1d3f7 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -20,7 +20,6 @@ #include "llvm/IR/Constants.h" #include "mlir/IR/Value.h" -#include "clang/CIR/Dialect/IR/CIRTypes.h" namespace cir { diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 772206622598..c18a5c1d1226 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -391,8 +391,6 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // We might have to widen integers, but we should never truncate. assert(ArgInfo.getCoerceToType() == V.getType() && "widening NYI"); - mlir::FunctionType CIRFuncTy = getTypes().GetFunctionType(CallInfo); - // If the argument doesn't match, perform a bitcast to coerce it. This // can happen due to trivial type mismatches. if (FirstCIRArg < CIRFuncTy.getNumInputs() && diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 93f2240171c2..3121a3f887b5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -29,7 +29,6 @@ #include "mlir/IR/Verifier.h" #include "clang/AST/ASTConsumer.h" -#include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclGroup.h" #include "clang/AST/DeclObjC.h" @@ -62,6 +61,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/raw_ostream.h" + #include #include diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 07235d442cf6..d05fbcdede71 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -36,9 +36,6 @@ #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/Value.h" -#include "clang/CIR/Dialect/IR/CIRAttrs.h" -#include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "clang/CIR/Dialect/IR/CIRTypes.h" using namespace clang; namespace cir { diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index b4a183ddb42b..bf1a50580878 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -25,7 +25,6 @@ #include "llvm/ADT/SmallPtrSet.h" #include "mlir/IR/MLIRContext.h" -#include "clang/CIR/Dialect/IR/CIRTypes.h" #include diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index ca915ccdf7a3..68aa451ae3d4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -24,7 +24,6 @@ #include "llvm/ADT/PointerIntPair.h" #include "mlir/IR/Value.h" -#include "clang/CIR/Dialect/IR/CIRTypes.h" namespace cir { diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 9eb3398a840f..2867f1fdc1b0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -18,7 +18,6 @@ #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Target/LLVMIR/Import.h" -#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index c2afe563a1a6..9c79a6dc96c6 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -223,7 +223,6 @@ class CIRUnaryOpLowering : public mlir::OpRewritePattern { assert(type.isa() && "operand type not supported yet"); switch (op.getKind()) { - llvm_unreachable("NYI"); case mlir::cir::UnaryOpKind::Inc: { auto One = rewriter.create( op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); From fd9c13e039649998f3d6602b8e9aa5da6ff54b58 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 4 May 2023 17:17:31 -0400 Subject: [PATCH 0921/2301] [CIR][Rebase] Fix a typed vs untyped attr issue --- clang/lib/CIR/CodeGen/CIRGenCstEmitter.h | 4 ++-- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 27 +++++------------------ clang/test/CIR/CodeGen/basic.cpp | 1 - clang/test/CIR/CodeGen/binassign.cpp | 1 - clang/test/CIR/CodeGen/globals.cpp | 1 - clang/test/CIR/CodeGen/sourcelocation.cpp | 1 - 6 files changed, 8 insertions(+), 27 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h index e283568993cd..e1b4de6395e3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h @@ -103,8 +103,8 @@ class ConstantEmitter { // functions and classes. mlir::Attribute tryEmitPrivateForVarInit(const VarDecl &D); - mlir::Attribute tryEmitPrivate(const Expr *E, QualType T); - mlir::Attribute tryEmitPrivateForMemory(const Expr *E, QualType T); + mlir::TypedAttr tryEmitPrivate(const Expr *E, QualType T); + mlir::TypedAttr tryEmitPrivateForMemory(const Expr *E, QualType T); mlir::Attribute tryEmitPrivate(const APValue &value, QualType T); mlir::Attribute tryEmitPrivateForMemory(const APValue &value, QualType T); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 095fe4efce5e..fec8daee4ecb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -791,18 +791,11 @@ class ConstExprEmitter auto C = Emitter.tryEmitPrivateForMemory(Init, EltTy); if (!C) return {}; - - assert(C.isa() && "This should always be a TypedAttr."); - auto CTyped = C.cast(); - if (i == 0) - CommonElementType = CTyped.getType(); - else if (CTyped.getType() != CommonElementType) + CommonElementType = C.getType(); + else if (C.getType() != CommonElementType) CommonElementType = nullptr; - auto typedC = llvm::dyn_cast(C); - if (!typedC) - llvm_unreachable("this should always be typed"); - Elts.push_back(typedC); + Elts.push_back(std::move(C)); } auto desiredType = CGM.getTypes().ConvertType(T); @@ -1294,7 +1287,7 @@ mlir::Attribute ConstantEmitter::tryEmitAbstractForMemory(const APValue &value, return (C ? emitForMemory(C, destType) : nullptr); } -mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const Expr *E, +mlir::TypedAttr ConstantEmitter::tryEmitPrivateForMemory(const Expr *E, QualType destType) { auto nonMemoryDestType = getNonMemoryType(CGM, destType); auto C = tryEmitPrivate(E, nonMemoryDestType); @@ -1313,15 +1306,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const APValue &value, QualType destType) { auto nonMemoryDestType = getNonMemoryType(CGM, destType); auto C = tryEmitPrivate(value, nonMemoryDestType); - if (C) { - auto attr = emitForMemory(C, destType); - auto typedAttr = llvm::dyn_cast(attr); - if (!typedAttr) - llvm_unreachable("this should always be typed"); - return typedAttr; - } - - return nullptr; + return (C ? emitForMemory(C, destType) : nullptr); } mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, @@ -1341,7 +1326,7 @@ mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, return C; } -mlir::Attribute ConstantEmitter::tryEmitPrivate(const Expr *E, QualType T) { +mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *E, QualType T) { assert(!T->isVoidType() && "can't emit a void constant"); Expr::EvalResult Result; bool Success; diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 7a471d776a25..a5a77f13396e 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * int *p0() { int *p = nullptr; diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp index b765ce2de856..5e0ad30363ed 100644 --- a/clang/test/CIR/CodeGen/binassign.cpp +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * int foo(int a, int b) { int x = a * b; diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 205c1ef56491..63c4f75a24c0 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * int a = 3; const int b = 4; // unless used wont be generated diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 19cb68667f79..0b1f1b059b68 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * int s0(int a, int b) { int x = a + b; From db12a71712c3a4902485c215a61a76c6d86eaddc Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 2 May 2023 11:36:44 -0700 Subject: [PATCH 0922/2301] [CIR][CIRGen] Bitfields: accumulate them while building structs This doesn't implement accessing bitfields just yet, but only record emission containing those. --- clang/lib/CIR/CodeGen/CIRDataLayout.h | 32 +++ clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 105 +++++++- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 228 ++++++++++++++++-- clang/test/CIR/CodeGen/bitfields.cpp | 18 ++ 4 files changed, 365 insertions(+), 18 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRDataLayout.h create mode 100644 clang/test/CIR/CodeGen/bitfields.cpp diff --git a/clang/lib/CIR/CodeGen/CIRDataLayout.h b/clang/lib/CIR/CodeGen/CIRDataLayout.h new file mode 100644 index 000000000000..814737fc6ed2 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRDataLayout.h @@ -0,0 +1,32 @@ +//===--- CIRDataLayout.h - CIR Data Layout Information ----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// Provides a LLVM-like API wrapper to DLTI and MLIR layout queries. This makes +// it easier to port some of LLVM codegen layout logic to CIR. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRDATALAYOUT_H +#define LLVM_CLANG_LIB_CIR_CIRDATALAYOUT_H + +#include "mlir/Dialect/DLTI/DLTI.h" +#include "mlir/IR/BuiltinOps.h" + +namespace cir { + +class CIRDataLayout { + bool bigEndian = false; + +public: + mlir::DataLayout layout; + + CIRDataLayout(mlir::ModuleOp modOp); + bool isBigEndian() { return bigEndian; } +}; + +} // namespace cir + +#endif \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index 687ac46668f6..b1ded0017d59 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -12,10 +12,109 @@ #include "clang/AST/Decl.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/raw_ostream.h" + namespace cir { -/// CIRGenRecordLayout - This class handles struct and union layout info while -/// lowering AST types to CIR types. +/// Structure with information about how a bitfield should be accessed. This is +/// very similar to what LLVM codegen does, once CIR evolves it's possible we +/// can use a more higher level representation. +/// TODO(cir): the comment below is extracted from LLVM, build a CIR version of +/// this. +/// +/// Often we layout a sequence of bitfields as a contiguous sequence of bits. +/// When the AST record layout does this, we represent it in the LLVM IR's type +/// as either a sequence of i8 members or a byte array to reserve the number of +/// bytes touched without forcing any particular alignment beyond the basic +/// character alignment. +/// +/// Then accessing a particular bitfield involves converting this byte array +/// into a single integer of that size (i24 or i40 -- may not be power-of-two +/// size), loading it, and shifting and masking to extract the particular +/// subsequence of bits which make up that particular bitfield. This structure +/// encodes the information used to construct the extraction code sequences. +/// The CIRGenRecordLayout also has a field index which encodes which +/// byte-sequence this bitfield falls within. Let's assume the following C +/// struct: +/// +/// struct S { +/// char a, b, c; +/// unsigned bits : 3; +/// unsigned more_bits : 4; +/// unsigned still_more_bits : 7; +/// }; +/// +/// This will end up as the following LLVM type. The first array is the +/// bitfield, and the second is the padding out to a 4-byte alignment. +/// +/// %t = type { i8, i8, i8, i8, i8, [3 x i8] } +/// +/// When generating code to access more_bits, we'll generate something +/// essentially like this: +/// +/// define i32 @foo(%t* %base) { +/// %0 = gep %t* %base, i32 0, i32 3 +/// %2 = load i8* %1 +/// %3 = lshr i8 %2, 3 +/// %4 = and i8 %3, 15 +/// %5 = zext i8 %4 to i32 +/// ret i32 %i +/// } +/// +struct CIRGenBitFieldInfo { + /// The offset within a contiguous run of bitfields that are represented as + /// a single "field" within the LLVM struct type. This offset is in bits. + unsigned Offset : 16; + + /// The total size of the bit-field, in bits. + unsigned Size : 15; + + /// Whether the bit-field is signed. + unsigned IsSigned : 1; + + /// The storage size in bits which should be used when accessing this + /// bitfield. + unsigned StorageSize; + + /// The offset of the bitfield storage from the start of the struct. + clang::CharUnits StorageOffset; + + /// The offset within a contiguous run of bitfields that are represented as a + /// single "field" within the LLVM struct type, taking into account the AAPCS + /// rules for volatile bitfields. This offset is in bits. + unsigned VolatileOffset : 16; + + /// The storage size in bits which should be used when accessing this + /// bitfield. + unsigned VolatileStorageSize; + + /// The offset of the bitfield storage from the start of the struct. + clang::CharUnits VolatileStorageOffset; + + CIRGenBitFieldInfo() + : Offset(), Size(), IsSigned(), StorageSize(), VolatileOffset(), + VolatileStorageSize() {} + + CIRGenBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned, + unsigned StorageSize, clang::CharUnits StorageOffset) + : Offset(Offset), Size(Size), IsSigned(IsSigned), + StorageSize(StorageSize), StorageOffset(StorageOffset) {} + + void print(llvm::raw_ostream &OS) const; + void dump() const; + + /// Given a bit-field decl, build an appropriate helper object for + /// accessing that field (which is expected to have the given offset and + /// size). + static CIRGenBitFieldInfo MakeInfo(class CIRGenTypes &Types, + const clang::FieldDecl *FD, + uint64_t Offset, uint64_t Size, + uint64_t StorageSize, + clang::CharUnits StorageOffset); +}; + +/// This class handles struct and union layout info while lowering AST types +/// to CIR types. /// /// These layout objects are only created on demand as CIR generation requires. class CIRGenRecordLayout { @@ -40,7 +139,7 @@ class CIRGenRecordLayout { /// Map from (bit-field) struct field to the corresponding CIR struct type /// field no. This info is populated by record builder. /// TODO(CIR): value is an int for now, fix when we support bitfields - llvm::DenseMap BitFields; + llvm::DenseMap BitFields; // FIXME: Maybe we could use CXXBaseSpecifier as the key and use a single map // for both virtual and non-virtual bases. diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 400e148d8593..30ac1b30895d 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -1,4 +1,5 @@ +#include "CIRDataLayout.h" #include "CIRGenBuilder.h" #include "CIRGenModule.h" #include "CIRGenTypes.h" @@ -15,8 +16,9 @@ #include -using namespace cir; +using namespace llvm; using namespace clang; +using namespace cir; namespace { /// The CIRRecordLowering is responsible for lowering an ASTRecordLayout to a @@ -49,7 +51,19 @@ struct CIRRecordLowering final { // The constructor. CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl, bool isPacked); - // Short helper routines. + + /// ---------------------- + /// Short helper routines. + + /// Constructs a MemberInfo instance from an offset and mlir::Type. + MemberInfo StorageInfo(CharUnits Offset, mlir::Type Data) { + return MemberInfo(Offset, MemberInfo::InfoKind::Field, Data); + } + + // Layout routines. + void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, + mlir::Type StorageType); + void lower(bool nonVirtualBaseType); void lowerUnion(); @@ -58,6 +72,8 @@ struct CIRRecordLowering final { void accumulateVPtrs(); void accumulateVBases(); void accumulateFields(); + void accumulateBitFields(RecordDecl::field_iterator Field, + RecordDecl::field_iterator FieldEnd); mlir::Type getVFPtrType(); @@ -66,6 +82,16 @@ struct CIRRecordLowering final { return astContext.getTargetInfo().getABI().starts_with("aapcs"); } + /// The Microsoft bitfield layout rule allocates discrete storage + /// units of the field's formal type and only combines adjacent + /// fields of the same formal type. We want to emit a layout with + /// these discrete storage units instead of combining them into a + /// continuous run. + bool isDiscreteBitFieldABI() { + return astContext.getTargetInfo().getCXXABI().isMicrosoft() || + recordDecl->isMsStruct(astContext); + } + // The Itanium base layout rule allows virtual bases to overlap // other bases, which complicates layout in specific ways. // @@ -84,10 +110,13 @@ struct CIRRecordLowering final { void calculateZeroInit(); CharUnits getSize(mlir::Type Ty) { - return CharUnits::fromQuantity(layout.getTypeSize(Ty)); + return CharUnits::fromQuantity(dataLayout.layout.getTypeSize(Ty)); + } + CharUnits getSizeInBits(mlir::Type Ty) { + return CharUnits::fromQuantity(dataLayout.layout.getTypeSizeInBits(Ty)); } CharUnits getAlignment(mlir::Type Ty) { - return CharUnits::fromQuantity(layout.getTypeABIAlignment(Ty)); + return CharUnits::fromQuantity(dataLayout.layout.getTypeABIAlignment(Ty)); } bool isZeroInitializable(const FieldDecl *FD) { return cirGenTypes.isZeroInitializable(FD->getType()); @@ -101,6 +130,12 @@ struct CIRRecordLowering final { astContext.getCharWidth()); } + /// Wraps mlir::IntegerType with some implicit arguments. + mlir::Type getIntNType(uint64_t NumBits) { + unsigned AlignedBits = llvm::alignTo(NumBits, astContext.getCharWidth()); + return mlir::IntegerType::get(&cirGenTypes.getMLIRContext(), AlignedBits); + } + mlir::Type getByteArrayType(CharUnits numberOfChars) { assert(!numberOfChars.isZero() && "Empty byte arrays aren't allowed."); mlir::Type type = getCharType(); @@ -152,10 +187,10 @@ struct CIRRecordLowering final { // Output fields, consumed by CIRGenTypes::computeRecordLayout llvm::SmallVector fieldTypes; llvm::DenseMap fields; - llvm::DenseMap bitFields; + llvm::DenseMap bitFields; llvm::DenseMap nonVirtualBases; llvm::DenseMap virtualBases; - mlir::DataLayout layout; + CIRDataLayout dataLayout; bool IsZeroInitializable : 1; bool IsZeroInitializableAsBase : 1; bool isPacked : 1; @@ -173,8 +208,34 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, astContext{cirGenTypes.getContext()}, recordDecl{recordDecl}, cxxRecordDecl{llvm::dyn_cast(recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, - layout{cirGenTypes.getModule().getModule()}, IsZeroInitializable(true), - IsZeroInitializableAsBase(true), isPacked{isPacked} {} + dataLayout{cirGenTypes.getModule().getModule()}, + IsZeroInitializable(true), IsZeroInitializableAsBase(true), + isPacked{isPacked} {} + +void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, + CharUnits StartOffset, + mlir::Type StorageType) { + CIRGenBitFieldInfo &Info = bitFields[FD->getCanonicalDecl()]; + Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType(); + Info.Offset = + (unsigned)(getFieldBitOffset(FD) - astContext.toBits(StartOffset)); + Info.Size = FD->getBitWidthValue(); + Info.StorageSize = getSizeInBits(StorageType).getQuantity(); + Info.StorageOffset = StartOffset; + + if (Info.Size > Info.StorageSize) + Info.Size = Info.StorageSize; + // Reverse the bit offsets for big endian machines. Because we represent + // a bitfield as a single large integer load, we can imagine the bits + // counting from the most-significant-bit instead of the + // least-significant-bit. + if (dataLayout.isBigEndian()) + Info.Offset = Info.StorageSize - (Info.Offset + Info.Size); + + Info.VolatileStorageSize = 0; + Info.VolatileOffset = 0; + Info.VolatileStorageOffset = CharUnits::Zero(); +} void CIRRecordLowering::lower(bool nonVirtualBaseType) { if (recordDecl->isUnion()) { @@ -375,7 +436,7 @@ void CIRRecordLowering::fillOutputFields() { fields[member.fieldDecl->getCanonicalDecl()] = fieldTypes.size() - 1; // A field without storage must be a bitfield. if (!member.data) - llvm_unreachable("NYI"); + setBitFieldInfo(member.fieldDecl, member.offset, fieldTypes.back()); } else if (member.kind == MemberInfo::InfoKind::Base) { nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1; } else if (member.kind == MemberInfo::InfoKind::VBase) { @@ -385,13 +446,116 @@ void CIRRecordLowering::fillOutputFields() { } } +void CIRRecordLowering::accumulateBitFields( + RecordDecl::field_iterator Field, RecordDecl::field_iterator FieldEnd) { + // Run stores the first element of the current run of bitfields. FieldEnd is + // used as a special value to note that we don't have a current run. A + // bitfield run is a contiguous collection of bitfields that can be stored in + // the same storage block. Zero-sized bitfields and bitfields that would + // cross an alignment boundary break a run and start a new one. + RecordDecl::field_iterator Run = FieldEnd; + // Tail is the offset of the first bit off the end of the current run. It's + // used to determine if the ASTRecordLayout is treating these two bitfields as + // contiguous. StartBitOffset is offset of the beginning of the Run. + uint64_t StartBitOffset, Tail = 0; + if (isDiscreteBitFieldABI()) { + llvm_unreachable("NYI"); + } + + // Check if OffsetInRecord (the size in bits of the current run) is better + // as a single field run. When OffsetInRecord has legal integer width, and + // its bitfield offset is naturally aligned, it is better to make the + // bitfield a separate storage component so as it can be accessed directly + // with lower cost. + auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord, + uint64_t StartBitOffset) { + if (!cirGenTypes.getModule().getCodeGenOpts().FineGrainedBitfieldAccesses) + return false; + llvm_unreachable("NYI"); + // if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) || + // !DataLayout.fitsInLegalInteger(OffsetInRecord)) + // return false; + // Make sure StartBitOffset is naturally aligned if it is treated as an + // IType integer. + // if (StartBitOffset % + // astContext.toBits(getAlignment(getIntNType(OffsetInRecord))) != + // 0) + // return false; + return true; + }; + + // The start field is better as a single field run. + bool StartFieldAsSingleRun = false; + for (;;) { + // Check to see if we need to start a new run. + if (Run == FieldEnd) { + // If we're out of fields, return. + if (Field == FieldEnd) + break; + // Any non-zero-length bitfield can start a new run. + if (!Field->isZeroLengthBitField()) { + Run = Field; + StartBitOffset = getFieldBitOffset(*Field); + Tail = StartBitOffset + Field->getBitWidthValue(); + StartFieldAsSingleRun = + IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset); + } + ++Field; + continue; + } + + // If the start field of a new run is better as a single run, or if current + // field (or consecutive fields) is better as a single run, or if current + // field has zero width bitfield and either UseZeroLengthBitfieldAlignment + // or UseBitFieldTypeAlignment is set to true, or if the offset of current + // field is inconsistent with the offset of previous field plus its offset, + // skip the block below and go ahead to emit the storage. Otherwise, try to + // add bitfields to the run. + if (!StartFieldAsSingleRun && Field != FieldEnd && + !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) && + (!Field->isZeroLengthBitField() || + (!astContext.getTargetInfo().useZeroLengthBitfieldAlignment() && + !astContext.getTargetInfo().useBitFieldTypeAlignment())) && + Tail == getFieldBitOffset(*Field)) { + Tail += Field->getBitWidthValue(); + ++Field; + continue; + } + + // We've hit a break-point in the run and need to emit a storage field. + auto Type = getIntNType(Tail - StartBitOffset); + // Add the storage member to the record and set the bitfield info for all of + // the bitfields in the run. Bitfields get the offset of their storage but + // come afterward and remain there after a stable sort. + members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type)); + for (; Run != Field; ++Run) + members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset), + MemberInfo::InfoKind::Field, nullptr, *Run)); + Run = FieldEnd; + StartFieldAsSingleRun = false; + } +} + void CIRRecordLowering::accumulateFields() { - for (auto *field : recordDecl->fields()) { - assert(!field->isBitField() && "bit fields NYI"); - assert(!field->isZeroSize(astContext) && "zero size members NYI"); - members.push_back(MemberInfo{bitsToCharUnits(getFieldBitOffset(field)), - MemberInfo::InfoKind::Field, - getStorageType(field), field}); + for (RecordDecl::field_iterator field = recordDecl->field_begin(), + fieldEnd = recordDecl->field_end(); + field != fieldEnd;) { + if (field->isBitField()) { + RecordDecl::field_iterator start = field; + // Iterate to gather the list of bitfields. + for (++field; field != fieldEnd && field->isBitField(); ++field) + ; + accumulateBitFields(start, field); + } else if (!field->isZeroSize(astContext)) { + members.push_back(MemberInfo{bitsToCharUnits(getFieldBitOffset(*field)), + MemberInfo::InfoKind::Field, + getStorageType(*field), *field}); + ++field; + } else { + // TODO(cir): do we want to do anything special about zero size + // members? + ++field; + } } } @@ -459,3 +623,37 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, // TODO: implement verification return RL; } + +CIRGenBitFieldInfo CIRGenBitFieldInfo::MakeInfo(CIRGenTypes &Types, + const FieldDecl *FD, + uint64_t Offset, uint64_t Size, + uint64_t StorageSize, + CharUnits StorageOffset) { + llvm_unreachable("NYI"); +} + +CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { + auto dlSpec = modOp->getAttr(mlir::DLTIDialect::kDataLayoutAttrName) + .dyn_cast(); + assert(dlSpec && "expected dl_spec in the module"); + auto entries = dlSpec.getEntries(); + + for (auto entry : entries) { + auto entryKey = entry.getKey(); + auto strKey = entryKey.dyn_cast(); + if (!strKey) + continue; + auto entryName = strKey.strref(); + if (entryName == mlir::DLTIDialect::kDataLayoutEndiannessKey) { + auto value = entry.getValue().dyn_cast(); + assert(value && "expected string attribute"); + auto endian = value.getValue(); + if (endian == mlir::DLTIDialect::kDataLayoutEndiannessBig) + bigEndian = true; + else if (endian == mlir::DLTIDialect::kDataLayoutEndiannessLittle) + bigEndian = false; + else + llvm_unreachable("unknown endianess"); + } + } +} diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp new file mode 100644 index 000000000000..7a99ba1266a2 --- /dev/null +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct __long { + struct __attribute__((__packed__)) { + unsigned __is_long_ : 1; + unsigned __cap_ : sizeof(unsigned) * 8 - 1; + }; + unsigned __size_; + unsigned *__data_; +}; + +void m() { + __long l; +} + +// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", i32, #cir.recdecl.ast> +// CHECK: !ty_22struct2E__long22 = !cir.struct<"struct.__long", !ty_22struct2Eanon22, i32, !cir.ptr> \ No newline at end of file From 3e8865c7583ed64040fba61e23e434342adf9479 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 3 May 2023 20:32:11 -0700 Subject: [PATCH 0923/2301] [CIR][CIRGen] Work around isAAPCS/AAPCSBitfieldWidth for now --- clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 5 ++++- clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 30ac1b30895d..665e9d6601d6 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -82,6 +82,9 @@ struct CIRRecordLowering final { return astContext.getTargetInfo().getABI().starts_with("aapcs"); } + /// Helper function to check if the target machine is BigEndian. + bool isBE() const { return astContext.getTargetInfo().isBigEndian(); } + /// The Microsoft bitfield layout rule allocates discrete storage /// units of the field's formal type and only combines adjacent /// fields of the same formal type. We want to emit a layout with @@ -364,7 +367,7 @@ void CIRRecordLowering::computeVolatileBitfields() { return; for ([[maybe_unused]] auto &I : bitFields) { - llvm_unreachable("NYI"); + assert(!UnimplementedFeature::armComputeVolatileBitfields()); } } diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 5c8afbafa6e6..214a1b56fc16 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -75,6 +75,7 @@ struct UnimplementedFeature { static bool ehStack() { return false; } static bool isVarArg() { return false; } static bool setNonGC() { return false; } + static bool armComputeVolatileBitfields() { return false; } }; } // namespace cir From ae3b1ced556aec921d29139646201aa2eec64ae3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 4 May 2023 15:50:07 -0700 Subject: [PATCH 0924/2301] [CIR][CIRGen][NFC] Datalayout: add few more convenient methods --- clang/lib/CIR/CodeGen/CIRDataLayout.h | 9 +++++++++ clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 3 +++ 2 files changed, 12 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRDataLayout.h b/clang/lib/CIR/CodeGen/CIRDataLayout.h index 814737fc6ed2..fade07da6d93 100644 --- a/clang/lib/CIR/CodeGen/CIRDataLayout.h +++ b/clang/lib/CIR/CodeGen/CIRDataLayout.h @@ -25,6 +25,15 @@ class CIRDataLayout { CIRDataLayout(mlir::ModuleOp modOp); bool isBigEndian() { return bigEndian; } + + // `useABI` is `true` if not using prefered alignment. + unsigned getAlignment(mlir::Type ty, bool useABI) const { + return useABI ? layout.getTypeABIAlignment(ty) + : layout.getTypePreferredAlignment(ty); + } + unsigned getABITypeAlign(mlir::Type ty) const { + return getAlignment(ty, true); + } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index f7ab440d9628..4686bf32f1c8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -239,6 +239,7 @@ void StructType::computeSizeAndAlignment( if (size || align || padded) return; + // This is a similar algorithm to LLVM's StructLayout. unsigned structSize = 0; llvm::Align structAlignment{1}; [[maybe_unused]] bool isPadded = false; @@ -248,6 +249,8 @@ void StructType::computeSizeAndAlignment( // Loop over each of the elements, placing them in memory. for (unsigned i = 0, e = numElements; i != e; ++i) { auto ty = members[i]; + + // This matches LLVM since it uses the ABI instead of preferred alignment. const llvm::Align tyAlign = llvm::Align(getPacked() ? 1 : dataLayout.getTypeABIAlignment(ty)); From 60ff0cf5dd1c2e668564bde3b2035bf73216f2d8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 4 May 2023 16:17:11 -0700 Subject: [PATCH 0925/2301] [CIR][CIRGen] Unions: add pointer alignment information to unblock layout computation --- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 9 ++++++--- clang/test/CIR/CodeGen/union.cpp | 6 +++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 4686bf32f1c8..0633862422a2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -171,19 +171,22 @@ void ArrayType::print(mlir::AsmPrinter &printer) const { llvm::TypeSize PointerType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { - llvm_unreachable("NYI"); + // FIXME: improve this in face of address spaces + return llvm::TypeSize::getFixed(64); } uint64_t PointerType::getABIAlignment(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { - llvm_unreachable("NYI"); + // FIXME: improve this in face of address spaces + return 8; } uint64_t PointerType::getPreferredAlignment( const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { - llvm_unreachable("NYI"); + // FIXME: improve this in face of address spaces + return 8; } llvm::TypeSize diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index 6f15199a56fe..65586bfd461f 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -3,18 +3,18 @@ typedef struct { int x; } yolo; typedef union { yolo y; struct { int lifecnt; }; } yolm; -typedef union { yolo y; struct { int lifecnt; int genpad; }; } yolm2; +typedef union { yolo y; struct { int *lifecnt; int genpad; }; } yolm2; void m() { yolm q; yolm2 q2; } -// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", i32, i32, #cir.recdecl.ast> +// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", !cir.ptr, i32, #cir.recdecl.ast> // CHECK: !ty_22struct2Eyolo22 = !cir.struct<"struct.yolo", i32, #cir.recdecl.ast> // CHECK: !ty_22union2Eyolm22 = !cir.struct<"union.yolm", !ty_22struct2Eyolo22> // CHECK: !ty_22union2Eyolm222 = !cir.struct<"union.yolm2", !ty_22struct2Eanon22> // CHECK: cir.func @_Z1mv() { // CHECK: cir.alloca !ty_22union2Eyolm22, cir.ptr , ["q"] {alignment = 4 : i64} -// CHECK: cir.alloca !ty_22union2Eyolm222, cir.ptr , ["q2"] {alignment = 4 : i64} \ No newline at end of file +// CHECK: cir.alloca !ty_22union2Eyolm222, cir.ptr , ["q2"] {alignment = 8 : i64} \ No newline at end of file From f7b638b832901f935a32cd03c9edc0842054b66d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 4 May 2023 17:13:32 -0700 Subject: [PATCH 0926/2301] [CIR][CIRGen] Braced init: handle partial initialization of class fields - DeclRefExpr: try to emit constants before loading from lvalue - Add testcase. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 9 ++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 8 +- clang/lib/CIR/CodeGen/CIRGenCstEmitter.h | 8 + clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 176 ++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 128 +++++++++++++-- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 30 +++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 5 + clang/lib/CIR/CodeGen/CIRGenFunction.h | 41 +++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 22 +++ clang/lib/CIR/CodeGen/CIRGenModule.h | 9 ++ clang/lib/CIR/CodeGen/CIRGenValue.h | 1 + clang/test/CIR/CodeGen/struct.cpp | 39 ++++- 13 files changed, 463 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 5dfe1ec6e7a8..68d768d96576 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -163,6 +163,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return create(loc, int32Ty, mlir::IntegerAttr::get(int32Ty, C)); } + mlir::cir::ConstantOp getInt64(uint32_t C, mlir::Location loc) { + auto int64Ty = getInt64Ty(); + return create(loc, int64Ty, + mlir::IntegerAttr::get(int64Ty, C)); + } mlir::Value getBool(bool state, mlir::Location loc) { return create( loc, getBoolTy(), mlir::BoolAttr::get(getContext(), state)); @@ -188,6 +193,10 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return create(loc, ty, getZeroAttr(ty)); } + mlir::cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr) { + return create(loc, attr.getType(), attr); + } + // // Block handling helpers // ---------------------- diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index fdff7b75e5eb..0ac5bdc3339d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -782,7 +782,13 @@ void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, llvm_unreachable("NYI"); break; case TEK_Aggregate: { - llvm_unreachable("NYI"); + AggValueSlot Slot = AggValueSlot::forLValue( + LHS, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, getOverlapForFieldInit(Field), + AggValueSlot::IsNotZeroed, + // Checks are made by the code that calls constructor. + AggValueSlot::IsSanitizerChecked); + buildAggExpr(Init, Slot); break; } } diff --git a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h index e1b4de6395e3..5c9e545f227f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h @@ -98,6 +98,14 @@ class ConstantEmitter { /// constant. mlir::Attribute tryEmitAbstractForInitializer(const VarDecl &D); + /// Emit the result of the given expression as an abstract constant, + /// asserting that it succeeded. This is only safe to do when the + /// expression is known to be a constant expression with either a fairly + /// simple type or a known simple form. + mlir::Attribute emitAbstract(const Expr *E, QualType T); + mlir::Attribute emitAbstract(SourceLocation loc, const APValue &value, + QualType T); + // These are private helper routines of the constant emitter that // can't actually be private because things are split out into helper // functions and classes. diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 087f54133827..c71f0e14ecfa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -12,6 +12,7 @@ #include "CIRGenCXXABI.h" #include "CIRGenCall.h" +#include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "UnimplementedFeatureGuarding.h" @@ -548,7 +549,22 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { return LV; } - llvm_unreachable("Unhandled DeclRefExpr?"); + // FIXME: While we're emitting a binding from an enclosing scope, all other + // DeclRefExprs we see should be implicitly treated as if they also refer to + // an enclosing scope. + if (const auto *BD = dyn_cast(ND)) { + llvm_unreachable("NYI"); + } + + // We can form DeclRefExprs naming GUID declarations when reconstituting + // non-type template parameters into expressions. + if (const auto *GD = dyn_cast(ND)) + llvm_unreachable("NYI"); + + if (const auto *TPO = dyn_cast(ND)) + llvm_unreachable("NYI"); + + llvm_unreachable("Unhandled DeclRefExpr"); } LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { @@ -1982,3 +1998,161 @@ mlir::cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, return cast( buildAlloca(Name.str(), Ty, Loc, CharUnits()).getDefiningOp()); } + +/// Given an object of the given canonical type, can we safely copy a +/// value out of it based on its initializer? +static bool isConstantEmittableObjectType(QualType type) { + assert(type.isCanonical()); + assert(!type->isReferenceType()); + + // Must be const-qualified but non-volatile. + Qualifiers qs = type.getLocalQualifiers(); + if (!qs.hasConst() || qs.hasVolatile()) + return false; + + // Otherwise, all object types satisfy this except C++ classes with + // mutable subobjects or non-trivial copy/destroy behavior. + if (const auto *RT = dyn_cast(type)) + if (const auto *RD = dyn_cast(RT->getDecl())) + if (RD->hasMutableFields() || !RD->isTrivial()) + return false; + + return true; +} + +/// Can we constant-emit a load of a reference to a variable of the +/// given type? This is different from predicates like +/// Decl::mightBeUsableInConstantExpressions because we do want it to apply +/// in situations that don't necessarily satisfy the language's rules +/// for this (e.g. C++'s ODR-use rules). For example, we want to able +/// to do this with const float variables even if those variables +/// aren't marked 'constexpr'. +enum ConstantEmissionKind { + CEK_None, + CEK_AsReferenceOnly, + CEK_AsValueOrReference, + CEK_AsValueOnly +}; +static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { + type = type.getCanonicalType(); + if (const auto *ref = dyn_cast(type)) { + if (isConstantEmittableObjectType(ref->getPointeeType())) + return CEK_AsValueOrReference; + return CEK_AsReferenceOnly; + } + if (isConstantEmittableObjectType(type)) + return CEK_AsValueOnly; + return CEK_None; +} + +/// Try to emit a reference to the given value without producing it as +/// an l-value. This is just an optimization, but it avoids us needing +/// to emit global copies of variables if they're named without triggering +/// a formal use in a context where we can't emit a direct reference to them, +/// for instance if a block or lambda or a member of a local class uses a +/// const int variable or constexpr variable from an enclosing function. +CIRGenFunction::ConstantEmission +CIRGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { + ValueDecl *value = refExpr->getDecl(); + + // The value needs to be an enum constant or a constant variable. + ConstantEmissionKind CEK; + if (isa(value)) { + CEK = CEK_None; + } else if (auto *var = dyn_cast(value)) { + CEK = checkVarTypeForConstantEmission(var->getType()); + } else if (isa(value)) { + CEK = CEK_AsValueOnly; + } else { + CEK = CEK_None; + } + if (CEK == CEK_None) + return ConstantEmission(); + + Expr::EvalResult result; + bool resultIsReference; + QualType resultType; + + // It's best to evaluate all the way as an r-value if that's permitted. + if (CEK != CEK_AsReferenceOnly && + refExpr->EvaluateAsRValue(result, getContext())) { + resultIsReference = false; + resultType = refExpr->getType(); + + // Otherwise, try to evaluate as an l-value. + } else if (CEK != CEK_AsValueOnly && + refExpr->EvaluateAsLValue(result, getContext())) { + resultIsReference = true; + resultType = value->getType(); + + // Failure. + } else { + return ConstantEmission(); + } + + // In any case, if the initializer has side-effects, abandon ship. + if (result.HasSideEffects) + return ConstantEmission(); + + // In CUDA/HIP device compilation, a lambda may capture a reference variable + // referencing a global host variable by copy. In this case the lambda should + // make a copy of the value of the global host variable. The DRE of the + // captured reference variable cannot be emitted as load from the host + // global variable as compile time constant, since the host variable is not + // accessible on device. The DRE of the captured reference variable has to be + // loaded from captures. + if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() && + refExpr->refersToEnclosingVariableOrCapture()) { + auto *MD = dyn_cast_or_null(CurCodeDecl); + if (MD && MD->getParent()->isLambda() && + MD->getOverloadedOperator() == OO_Call) { + const APValue::LValueBase &base = result.Val.getLValueBase(); + if (const ValueDecl *D = base.dyn_cast()) { + if (const VarDecl *VD = dyn_cast(D)) { + if (!VD->hasAttr()) { + return ConstantEmission(); + } + } + } + } + } + + // Emit as a constant. + // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires + // somewhat heavy refactoring...) + auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(), + result.Val, resultType); + mlir::TypedAttr cstToEmit = C.dyn_cast_or_null(); + assert(cstToEmit && "expect a typed attribute"); + + // Make sure we emit a debug reference to the global variable. + // This should probably fire even for + if (isa(value)) { + if (!getContext().DeclMustBeEmitted(cast(value))) + buildDeclRefExprDbgValue(refExpr, result.Val); + } else { + assert(isa(value)); + buildDeclRefExprDbgValue(refExpr, result.Val); + } + + // If we emitted a reference constant, we need to dereference that. + if (resultIsReference) + return ConstantEmission::forReference(cstToEmit); + + return ConstantEmission::forValue(cstToEmit); +} + +CIRGenFunction::ConstantEmission +CIRGenFunction::tryEmitAsConstant(const MemberExpr *ME) { + llvm_unreachable("NYI"); +} + +mlir::Value CIRGenFunction::buildScalarConstant( + const CIRGenFunction::ConstantEmission &Constant, Expr *E) { + assert(Constant && "not a constant"); + if (Constant.isReference()) + return buildLoadOfLValue(Constant.getReferenceLValue(*this, E), + E->getExprLoc()) + .getScalarVal(); + return builder.getConstant(getLoc(E->getSourceRange()), Constant.getValue()); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 1913f8eae145..ee3e89c2ca70 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -64,7 +64,11 @@ class AggExprEmitter : public StmtVisitor { StmtVisitor::Visit(E); } - void VisitStmt(Stmt *S) { llvm_unreachable("NYI"); } + void VisitStmt(Stmt *S) { + llvm::errs() << "Missing visitor for AggExprEmitter Stmt: " + << S->getStmtClassName() << "\n"; + llvm_unreachable("NYI"); + } void VisitParenExpr(ParenExpr *PE) { llvm_unreachable("NYI"); } void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { llvm_unreachable("NYI"); @@ -131,8 +135,9 @@ class AggExprEmitter : public StmtVisitor { } void VisitNoInitExpr(NoInitExpr *E) { llvm_unreachable("NYI"); } void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { llvm_unreachable("NYI"); } - void VisitXCXDefaultInitExpr(CXXDefaultInitExpr *E) { - llvm_unreachable("NYI"); + void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { + CIRGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); + Visit(DIE->getExpr()); } void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); void VisitCXXConstructExpr(const CXXConstructExpr *E); @@ -237,12 +242,14 @@ void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { // FIXME: Ignore result? // FIXME: Are initializers affected by volatile? if (Dest.isZeroed() && isSimpleZero(E, CGF)) { - // TODO(cir): LLVM codegen just returns here, do we want to - // do anything different when we hit this code path? - llvm_unreachable("NYI"); - // Storing "i32 0" to a zero'd memory location is a noop. + // TODO(cir): LLVM codegen considers 'storing "i32 0" to a zero'd memory + // location is a noop'. Consider emitting the store to zero in CIR, as to + // model the actual user behavior, we can have a pass to optimize this out + // later. return; - } else if (isa(E) || isa(E)) { + } + + if (isa(E) || isa(E)) { auto loc = E->getSourceRange().isValid() ? CGF.getLoc(E->getSourceRange()) : *CGF.currSrcLoc; return buildNullInitializationToLValue(loc, LV); @@ -658,8 +665,71 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { // Helpers and dispatcher //===----------------------------------------------------------------------===// -/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of -/// zeros in it, emit a memset and avoid storing the individual zeros. +/// Get an approximate count of the number of non-zero bytes that will be stored +/// when outputting the initializer for the specified initializer expression. +/// FIXME(cir): this can be shared with LLVM codegen. +static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CIRGenFunction &CGF) { + if (auto *MTE = dyn_cast(E)) + E = MTE->getSubExpr(); + E = E->IgnoreParenNoopCasts(CGF.getContext()); + + // 0 and 0.0 won't require any non-zero stores! + if (isSimpleZero(E, CGF)) + return CharUnits::Zero(); + + // If this is an initlist expr, sum up the size of sizes of the (present) + // elements. If this is something weird, assume the whole thing is non-zero. + const InitListExpr *ILE = dyn_cast(E); + while (ILE && ILE->isTransparent()) + ILE = dyn_cast(ILE->getInit(0)); + if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType())) + return CGF.getContext().getTypeSizeInChars(E->getType()); + + // InitListExprs for structs have to be handled carefully. If there are + // reference members, we need to consider the size of the reference, not the + // referencee. InitListExprs for unions and arrays can't have references. + if (const RecordType *RT = E->getType()->getAs()) { + if (!RT->isUnionType()) { + RecordDecl *SD = RT->getDecl(); + CharUnits NumNonZeroBytes = CharUnits::Zero(); + + unsigned ILEElement = 0; + if (auto *CXXRD = dyn_cast(SD)) + while (ILEElement != CXXRD->getNumBases()) + NumNonZeroBytes += + GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF); + for (const auto *Field : SD->fields()) { + // We're done once we hit the flexible array member or run out of + // InitListExpr elements. + if (Field->getType()->isIncompleteArrayType() || + ILEElement == ILE->getNumInits()) + break; + if (Field->isUnnamedBitField()) + continue; + + const Expr *E = ILE->getInit(ILEElement++); + + // Reference values are always non-null and have the width of a pointer. + if (Field->getType()->isReferenceType()) + NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( + CGF.getTarget().getPointerWidth(LangAS::Default)); + else + NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); + } + + return NumNonZeroBytes; + } + } + + // FIXME: This overestimates the number of non-zero bytes for bit-fields. + CharUnits NumNonZeroBytes = CharUnits::Zero(); + for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) + NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); + return NumNonZeroBytes; +} + +/// If the initializer is large and has a lot of zeros in it, emit a memset and +/// avoid storing the individual zeros. static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, CIRGenFunction &CGF) { // If the slot is arleady known to be zeroed, nothing to do. Don't mess with @@ -682,7 +752,24 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, if (Size <= CharUnits::fromQuantity(16)) return; - llvm_unreachable("NYI"); + // Check to see if over 3/4 of the initializer are known to be zero. If so, + // we prefer to emit memset + individual stores for the rest. + CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); + if (NumNonZeroBytes * 4 > Size) + return; + + // Okay, it seems like a good idea to use an initial memset, emit the call. + auto &builder = CGF.getBuilder(); + auto loc = CGF.getLoc(E->getSourceRange()); + Address slotAddr = Slot.getAddress(); + auto zero = builder.getZero(loc, slotAddr.getElementType()); + + builder.createStore(loc, zero, slotAddr); + // Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty); + // CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false); + + // Tell the AggExprEmitter that the slot is known zero. + Slot.setZeroed(); } AggValueSlot::Overlap_t CIRGenFunction::getOverlapForBaseInit( @@ -814,3 +901,22 @@ void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, assert(0 && "NYI"); } } + +AggValueSlot::Overlap_t +CIRGenFunction::getOverlapForFieldInit(const FieldDecl *FD) { + if (!FD->hasAttr() || !FD->getType()->isRecordType()) + return AggValueSlot::DoesNotOverlap; + + // If the field lies entirely within the enclosing class's nvsize, its tail + // padding cannot overlap any already-initialized object. (The only subobjects + // with greater addresses that might already be initialized are vbases.) + const RecordDecl *ClassRD = FD->getParent(); + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD); + if (Layout.getFieldOffset(FD->getFieldIndex()) + + getContext().getTypeSize(FD->getType()) <= + (uint64_t)getContext().toBits(Layout.getNonVirtualSize())) + return AggValueSlot::DoesNotOverlap; + + // The tail padding may contain values we need to preserve. + return AggValueSlot::MayOverlap; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index fec8daee4ecb..908e730d6789 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1450,8 +1450,9 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, } mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { - if (T->getAs()) - llvm_unreachable("NYI"); + if (T->getAs()) { + return builder.getNullPtr(getTypes().convertTypeForMem(T), loc); + } if (getTypes().isZeroInitializable(T)) return builder.getNullValue(getTypes().convertTypeForMem(T), loc); @@ -1470,3 +1471,28 @@ mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { llvm_unreachable("NYI"); return {}; } + +mlir::Attribute ConstantEmitter::emitAbstract(const Expr *E, + QualType destType) { + auto state = pushAbstract(); + auto C = tryEmitPrivate(E, destType).cast(); + C = validateAndPopAbstract(C, state); + if (!C) { + llvm_unreachable("NYI"); + } + return C; +} + +mlir::Attribute ConstantEmitter::emitAbstract(SourceLocation loc, + const APValue &value, + QualType destType) { + auto state = pushAbstract(); + auto C = tryEmitPrivate(value, destType); + C = validateAndPopAbstract(C, state); + if (!C) { + CGM.Error(loc, + "internal error: could not emit constant value \"abstractly\""); + llvm_unreachable("NYI"); + } + return C; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 0547ef6cf933..2bb461caf9e7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -171,8 +171,9 @@ class ScalarExprEmitter : public StmtVisitor { // l-values mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { - // FIXME: we could try to emit this as constant first, see - // CGF.tryEmitAsConstant(E) + if (CIRGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) { + return CGF.buildScalarConstant(Constant, E); + } return buildLoadOfLValue(E); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 085931c90892..058f7d4217a9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1224,4 +1224,9 @@ bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *CE) { } return true; +} + +void CIRGenFunction::buildDeclRefExprDbgValue(const DeclRefExpr *E, + const APValue &Init) { + assert(!UnimplementedFeature::generateDebugInfo()); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 383b1570ae11..72dd507a4922 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -979,9 +979,48 @@ class CIRGenFunction : public CIRGenTypeCache { const clang::Stmt *thenS, const clang::Stmt *elseS); + class ConstantEmission { + // Cannot use mlir::TypedAttr directly here because of bit availability. + llvm::PointerIntPair ValueAndIsReference; + ConstantEmission(mlir::TypedAttr C, bool isReference) + : ValueAndIsReference(C, isReference) {} + + public: + ConstantEmission() {} + static ConstantEmission forReference(mlir::TypedAttr C) { + return ConstantEmission(C, true); + } + static ConstantEmission forValue(mlir::TypedAttr C) { + return ConstantEmission(C, false); + } + + explicit operator bool() const { + return ValueAndIsReference.getOpaqueValue() != nullptr; + } + + bool isReference() const { return ValueAndIsReference.getInt(); } + LValue getReferenceLValue(CIRGenFunction &CGF, Expr *refExpr) const { + assert(isReference()); + // create(loc, ty, getZeroAttr(ty)); + // CGF.getBuilder().const + // return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(), + // refExpr->getType()); + llvm_unreachable("NYI"); + } + + mlir::TypedAttr getValue() const { + assert(!isReference()); + return ValueAndIsReference.getPointer().cast(); + } + }; + + ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr); + ConstantEmission tryEmitAsConstant(const MemberExpr *ME); + /// Emit the computation of the specified expression of scalar type, /// ignoring the result. mlir::Value buildScalarExpr(const clang::Expr *E); + mlir::Value buildScalarConstant(const ConstantEmission &Constant, Expr *E); mlir::Type getCIRType(const clang::QualType &type); @@ -1046,6 +1085,7 @@ class CIRGenFunction : public CIRGenTypeCache { void buildStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit); mlir::Value buildToMemory(mlir::Value Value, clang::QualType Ty); + void buildDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init); /// Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type @@ -1256,6 +1296,7 @@ class CIRGenFunction : public CIRGenTypeCache { const clang::CXXRecordDecl *RD); void initializeVTablePointer(mlir::Location loc, const VPtr &Vptr); + AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); /// Like buildLValueForField, excpet that if the Field is a reference, this diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 3121a3f887b5..a87da9d57ec0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2251,3 +2251,25 @@ CharUnits CIRGenModule::computeNonVirtualBaseClassOffset( return Offset; } + +void CIRGenModule::Error(SourceLocation loc, StringRef message) { + unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0"); + getDiags().Report(astCtx.getFullLoc(loc), diagID) << message; +} + +/// Print out an error that codegen doesn't support the specified stmt yet. +void CIRGenModule::ErrorUnsupported(const Stmt *S, const char *Type) { + unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, + "cannot compile this %0 yet"); + std::string Msg = Type; + getDiags().Report(astCtx.getFullLoc(S->getBeginLoc()), DiagID) + << Msg << S->getSourceRange(); +} + +/// Print out an error that codegen doesn't support the specified decl yet. +void CIRGenModule::ErrorUnsupported(const Decl *D, const char *Type) { + unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, + "cannot compile this %0 yet"); + std::string Msg = Type; + getDiags().Report(astCtx.getFullLoc(D->getLocation()), DiagID) << Msg; +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index d05fbcdede71..916a83cfd4ed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -519,6 +519,15 @@ class CIRGenModule : public CIRGenTypeCache { static constexpr const char *builtinCoroBegin = "__builtin_coro_begin"; static constexpr const char *builtinCoroEnd = "__builtin_coro_end"; + /// Emit a general error that something can't be done. + void Error(SourceLocation loc, StringRef error); + + /// Print out an error that codegen doesn't support the specified stmt yet. + void ErrorUnsupported(const Stmt *S, const char *Type); + + /// Print out an error that codegen doesn't support the specified decl yet. + void ErrorUnsupported(const Decl *D, const char *Type); + private: // An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector MangledDeclNames; diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 68aa451ae3d4..966156559c09 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -417,6 +417,7 @@ class AggValueSlot { bool isSanitizerChecked() const { return SanitizerCheckedFlag; } IsZeroed_t isZeroed() const { return IsZeroed_t(ZeroedFlag); } + void setZeroed(bool V = true) { ZeroedFlag = V; } NeedsGCBarriers_t requiresGCollection() const { return NeedsGCBarriers_t(ObjCGCFlag); diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 96c52f1ba476..cab6c63a52c8 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -27,7 +27,9 @@ struct incomplete; void yoyo(incomplete *i) {} // CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> +// CHECK: !ty_22struct2EMandalore22 = !cir.struct<"struct.Mandalore", i32, !cir.ptr, i32, #cir.recdecl.ast> // CHECK: !ty_22struct2Eincomplete22 = !cir.struct<"struct.incomplete", incomplete +// CHECK: !ty_22class2EAdv22 = !cir.struct<"class.Adv", !ty_22struct2EMandalore22> // CHECK: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !ty_22struct2EBar22> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr @@ -70,4 +72,39 @@ void yoyo(incomplete *i) {} // CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, i32) -> i32 // CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr // CHECK-NEXT: cir.return -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: } + +typedef enum Ways { + ThisIsTheWay = 1000024001, +} Ways; + +typedef struct Mandalore { + Ways w; + const void* n; + int d; +} Mandalore; + +class Adv { + Mandalore x{ThisIsTheWay}; +public: + Adv() {} +}; + +void m() { Adv C; } + +// CHECK: cir.func linkonce_odr @_ZN3AdvC2Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_name = "x"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "w"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %4 = cir.const(1000024001 : i32) : i32 +// CHECK: cir.store %4, %3 : i32, cir.ptr +// CHECK: %5 = "cir.struct_element_addr"(%2) <{member_name = "n"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %6 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > +// CHECK: %7 = "cir.struct_element_addr"(%2) <{member_name = "d"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %8 = cir.const(0 : i32) : i32 +// CHECK: cir.store %8, %7 : i32, cir.ptr +// CHECK: cir.return +// CHECK: } From 7af07b0ba43c646ecd01877295c72ccb49ed098e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 8 May 2023 20:56:32 -0700 Subject: [PATCH 0927/2301] [CIR] TernaryOp: introduce CIR operation Also add implementation for interfaces and add testcase --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 78 ++++++++++++++++++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 46 ++++++++++++ clang/test/CIR/IR/ternary.cir | 29 ++++++++ 3 files changed, 147 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/IR/ternary.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b6c7eabb0bd4..06cea1748815 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -421,6 +421,59 @@ def IfOp : CIR_Op<"if", ]; } +//===----------------------------------------------------------------------===// +// TernaryOp +//===----------------------------------------------------------------------===// + +def TernaryOp : CIR_Op<"ternary", + [DeclareOpInterfaceMethods, + RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { + let summary = "The `cond ? a : b` C/C++ ternary operation"; + let description = [{ + The `cir.ternary` operation represents C/C++ ternary, much like a `select` + operation. First argument is a `cir.bool` condition to evaluate, followed + by two regions to execute (true or false). This is different from `cir.if` + since each region is one block sized and the `cir.yield` closing the block + scope should have one argument. + + Example: + + ```mlir + // x = cond ? a : b; + + %x = cir.ternary (%cond, true_region { + ... + cir.yield %a : i32 + }, false_region { + ... + cir.yield %b : i32 + }) -> i32 + ``` + }]; + let arguments = (ins CIR_BoolType:$cond); + let regions = (region SizedRegion<1>:$trueRegion, + SizedRegion<1>:$falseRegion); + let results = (outs AnyType:$result); + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins "Value":$cond, + "function_ref":$trueBuilder, + "function_ref":$falseBuilder) + > + ]; + + // All constraints already verified elsewhere. + let hasVerifier = 0; + + let assemblyFormat = [{ + `(` $cond `,` + `true_region` $trueRegion `,` + `false_region` $falseRegion + `)` `:` type($result) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // YieldOp //===----------------------------------------------------------------------===// @@ -438,17 +491,15 @@ def YieldOpKind : I32EnumAttr< } def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, - ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", - "LoopOp", "AwaitOp"]>]> { + ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "LoopOp", "AwaitOp", + "TernaryOp"]>]> { let summary = "Terminate CIR regions"; let description = [{ The `cir.yield` operation terminates regions on different CIR operations: - `cir.if`, `cir.scope`, `cir.switch`, `cir.loop` and `cir.await`. + `cir.if`, `cir.scope`, `cir.switch`, `cir.loop`, `cir.await` and `cir.ternary`. Might yield an SSA value and the semantics of how the values are yielded is - defined by the parent operation. Note: there are currently no uses of - `cir.yield` with operands - should be helpful to represent lifetime - extension out of short lived scopes in the future. + defined by the parent operation. Optionally, `cir.yield` can be annotated with extra kind specifiers: - `break`: breaking out of the innermost `cir.switch` / `cir.loop` semantics, @@ -492,6 +543,21 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, } cir.yield // control-flow to the next region for suspension. }, ...) + + cir.scope { + ... + cir.yield + } + + %x = cir.scope { + ... + cir.yield %val + } + + %y = cir.ternary { + ... + cir.yield %val : i32 + } : i32 ``` }]; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index ee476eb26743..476b61f08551 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -587,6 +587,52 @@ void ScopeOp::build(OpBuilder &builder, OperationState &result, LogicalResult ScopeOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// TernaryOp +//===----------------------------------------------------------------------===// + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes that +/// correspond to a constant value for each operand, or null if that operand is +/// not a constant. +void TernaryOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // The `true` and the `false` region branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor(this->getODSResults(0))); + return; + } + + // Try optimize if we have more information + // if (auto condAttr = operands.front().dyn_cast_or_null()) { + // assert(0 && "not implemented"); + // } + + // If the condition isn't constant, both regions may be executed. + regions.push_back(RegionSuccessor(&getTrueRegion())); + regions.push_back(RegionSuccessor(&getFalseRegion())); + return; +} + +void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, + function_ref trueBuilder, + function_ref falseBuilder) { + result.addOperands(cond); + OpBuilder::InsertionGuard guard(builder); + + Region *trueRegion = result.addRegion(); + auto *block = builder.createBlock(trueRegion); + trueBuilder(builder, result.location); + Region *falseRegion = result.addRegion(); + builder.createBlock(falseRegion); + falseBuilder(builder, result.location); + + auto yield = dyn_cast(block->getTerminator()); + assert(yield && "expected cir.yield terminator"); + result.addTypes(TypeRange{yield.getOperand(0).getType()}); +} + //===----------------------------------------------------------------------===// // YieldOp //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/ternary.cir b/clang/test/CIR/IR/ternary.cir new file mode 100644 index 000000000000..013f4dddda01 --- /dev/null +++ b/clang/test/CIR/IR/ternary.cir @@ -0,0 +1,29 @@ +// RUN: cir-tool %s | cir-tool | FileCheck %s + +module { + cir.func @blue(%arg0: !cir.bool) -> i32 { + %0 = cir.ternary(%arg0, true_region { + %a = cir.const(0 : i32) : i32 + cir.yield %a : i32 + }, false_region { + %b = cir.const(1 : i32) : i32 + cir.yield %b : i32 + }) : i32 + cir.return %0 : i32 + } +} + +// CHECK: module { + +// CHECK: cir.func @blue(%arg0: !cir.bool) -> i32 { +// CHECK: %0 = cir.ternary(%arg0, true_region { +// CHECK: %1 = cir.const(0 : i32) : i32 +// CHECK: cir.yield %1 : i32 +// CHECK: }, false_region { +// CHECK: %1 = cir.const(1 : i32) : i32 +// CHECK: cir.yield %1 : i32 +// CHECK: }) : i32 +// CHECK: cir.return %0 : i32 +// CHECK: } + +// CHECK: } From fc688c83a1f33befa6c33920614313704bb174d4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 8 May 2023 16:46:38 -0700 Subject: [PATCH 0928/2301] [CIR][CIRGen] ternary operator: skeleton for scalar types codegen --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 83 ++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 17 ++--- 2 files changed, 86 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 2bb461caf9e7..81b750b2ecc0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -77,7 +77,7 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitConstantExpr(ConstantExpr *E) { llvm_unreachable("NYI"); } - mlir::Value VisitParenExpr(ParenExpr *PE) { llvm_unreachable("NYI"); } + mlir::Value VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); } mlir::Value VisitSubstnonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { llvm_unreachable("NYI"); @@ -521,9 +521,7 @@ class ScalarExprEmitter : public StmtVisitor { // Other Operators. mlir::Value VisitBlockExpr(const BlockExpr *E) { llvm_unreachable("NYI"); } mlir::Value - VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { - llvm_unreachable("NYI"); - } + VisitAbstractConditionalOperator(const AbstractConditionalOperator *E); mlir::Value VisitChooseExpr(ChooseExpr *E) { llvm_unreachable("NYI"); } mlir::Value VisitVAArgExpr(VAArgExpr *E) { llvm_unreachable("NYI"); } mlir::Value VisitObjCStringLiteral(const ObjCStringLiteral *E) { @@ -1441,3 +1439,80 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // Otherwise, reload the value. return buildLoadOfLValue(LHS, E->getExprLoc()); } + +/// Return true if the specified expression is cheap enough and side-effect-free +/// enough to evaluate unconditionally instead of conditionally. This is used +/// to convert control flow into selects in some cases. +/// TODO(cir): can be shared with LLVM codegen. +static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, + CIRGenFunction &CGF) { + // Anything that is an integer or floating point constant is fine. + return E->IgnoreParens()->isEvaluatable(CGF.getContext()); + + // Even non-volatile automatic variables can't be evaluated unconditionally. + // Referencing a thread_local may cause non-trivial initialization work to + // occur. If we're inside a lambda and one of the variables is from the scope + // outside the lambda, that function may have returned already. Reading its + // locals is a bad idea. Also, these reads may introduce races there didn't + // exist in the source-level program. +} + +mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( + const AbstractConditionalOperator *E) { + TestAndClearIgnoreResultAssign(); + + // Bind the common expression if necessary. + CIRGenFunction::OpaqueValueMapping binding(CGF, E); + + Expr *condExpr = E->getCond(); + Expr *lhsExpr = E->getTrueExpr(); + Expr *rhsExpr = E->getFalseExpr(); + + // If the condition constant folds and can be elided, try to avoid emitting + // the condition and the dead arm. + bool CondExprBool; + if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { + Expr *live = lhsExpr, *dead = rhsExpr; + if (!CondExprBool) + std::swap(live, dead); + + // If the dead side doesn't have labels we need, just emit the Live part. + if (!CGF.ContainsLabel(dead)) { + if (CondExprBool) + assert(!UnimplementedFeature::incrementProfileCounter()); + auto Result = Visit(live); + + // If the live part is a throw expression, it acts like it has a void + // type, so evaluating it returns a null Value. However, a conditional + // with non-void type must return a non-null Value. + if (!Result && !E->getType()->isVoidType()) { + llvm_unreachable("NYI"); + } + + return Result; + } + } + + // OpenCL: If the condition is a vector, we can treat this condition like + // the select function. + if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) || + condExpr->getType()->isExtVectorType()) { + llvm_unreachable("NYI"); + } + + if (condExpr->getType()->isVectorType() || + condExpr->getType()->isSveVLSBuiltinType()) { + llvm_unreachable("NYI"); + } + + // If this is a really simple expression (like x ? 4 : 5), emit this as a + // select instead of as control flow. We can only do this if it is cheap and + // safe to evaluate the LHS and RHS unconditionally. + if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && + isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { + llvm_unreachable("NYI"); + } + + [[maybe_unused]] CIRGenFunction::ConditionalEvaluation eval(CGF); + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 72dd507a4922..f26f2ed37f88 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -951,23 +951,20 @@ class CIRGenFunction : public CIRGenTypeCache { void buildDecl(const clang::Decl &D); - /// If the specified expression does not fold - /// to a constant, or if it does but contains a label, return false. If it - /// constant folds return true and set the boolean result in Result. + /// If the specified expression does not fold to a constant, or if it does but + /// contains a label, return false. If it constant folds return true and set + /// the boolean result in Result. bool ConstantFoldsToSimpleInteger(const clang::Expr *Cond, bool &ResultBool, - bool AllowLabels); + bool AllowLabels = false); + bool ConstantFoldsToSimpleInteger(const clang::Expr *Cond, + llvm::APSInt &ResultInt, + bool AllowLabels = false); /// Return true if the statement contains a label in it. If /// this statement is not executed normally, it not containing a label means /// that we can just remove the code. bool ContainsLabel(const clang::Stmt *S, bool IgnoreCaseStmts = false); - /// If the specified expression does not fold - /// to a constant, or if it does but contains a label, return false. If it - /// constant folds return true and set the folded value. - bool ConstantFoldsToSimpleInteger(const clang::Expr *Cond, - llvm::APSInt &ResultInt, bool AllowLabels); - /// Emit an if on a boolean condition to the specified blocks. /// FIXME: Based on the condition, this might try to simplify the codegen of /// the conditional based on the branch. TrueCount should be the number of From 5732facb5cd8312840ad6f744f0dfb1312b67308 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 10 May 2023 11:50:38 -0700 Subject: [PATCH 0929/2301] [CIR][CIRGen] Ternary: codegen cheap to evaluate lhs/rhs --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 26 +++++++++++++++++++- clang/test/CIR/CodeGen/ternary.cpp | 25 +++++++++++++++++++ clang/test/CIR/IR/ternary.cir | 8 +++--- 4 files changed, 56 insertions(+), 7 deletions(-) create mode 100644 clang/test/CIR/CodeGen/ternary.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 06cea1748815..3defa364fb4d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -468,8 +468,8 @@ def TernaryOp : CIR_Op<"ternary", let assemblyFormat = [{ `(` $cond `,` - `true_region` $trueRegion `,` - `false_region` $falseRegion + `true` $trueRegion `,` + `false` $falseRegion `)` `:` type($result) attr-dict }]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 81b750b2ecc0..a8c6d2430008 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1459,6 +1459,8 @@ static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( const AbstractConditionalOperator *E) { + auto &builder = CGF.getBuilder(); + auto loc = CGF.getLoc(E->getSourceRange()); TestAndClearIgnoreResultAssign(); // Bind the common expression if necessary. @@ -1510,7 +1512,29 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( // safe to evaluate the LHS and RHS unconditionally. if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { - llvm_unreachable("NYI"); + bool lhsIsVoid = false; + auto condV = CGF.evaluateExprAsBool(condExpr); + assert(!UnimplementedFeature::incrementProfileCounter()); + + return builder.create( + loc, condV, /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto lhs = Visit(lhsExpr); + if (!lhs) { + lhs = builder.getNullValue(CGF.VoidTy, loc); + lhsIsVoid = true; + } + builder.create(loc, lhs); + }, + /*elseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto rhs = Visit(rhsExpr); + if (lhsIsVoid) { + assert(!rhs && "lhs and rhs types must match"); + rhs = builder.getNullValue(CGF.VoidTy, loc); + } + builder.create(loc, rhs); + }); } [[maybe_unused]] CIRGenFunction::ConditionalEvaluation eval(CGF); diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp new file mode 100644 index 000000000000..1772d22f9cde --- /dev/null +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int x(int y) { + return y > 0 ? 3 : 5; +} + +// CHECK: cir.func @_Z1xi +// CHECK: %0 = cir.alloca i32, cir.ptr , ["y", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : i32, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , i32 +// CHECK: %3 = cir.const(0 : i32) : i32 +// CHECK: %4 = cir.cmp(gt, %2, %3) : i32, !cir.bool +// CHECK: %5 = cir.ternary(%4, true { +// CHECK: %7 = cir.const(3 : i32) : i32 +// CHECK: cir.yield %7 : i32 +// CHECK: }, false { +// CHECK: %7 = cir.const(5 : i32) : i32 +// CHECK: cir.yield %7 : i32 +// CHECK: }) : i32 +// CHECK: cir.store %5, %1 : i32, cir.ptr +// CHECK: %6 = cir.load %1 : cir.ptr , i32 +// CHECK: cir.return %6 : i32 +// CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/IR/ternary.cir b/clang/test/CIR/IR/ternary.cir index 013f4dddda01..eff292de9813 100644 --- a/clang/test/CIR/IR/ternary.cir +++ b/clang/test/CIR/IR/ternary.cir @@ -2,10 +2,10 @@ module { cir.func @blue(%arg0: !cir.bool) -> i32 { - %0 = cir.ternary(%arg0, true_region { + %0 = cir.ternary(%arg0, true { %a = cir.const(0 : i32) : i32 cir.yield %a : i32 - }, false_region { + }, false { %b = cir.const(1 : i32) : i32 cir.yield %b : i32 }) : i32 @@ -16,10 +16,10 @@ module { // CHECK: module { // CHECK: cir.func @blue(%arg0: !cir.bool) -> i32 { -// CHECK: %0 = cir.ternary(%arg0, true_region { +// CHECK: %0 = cir.ternary(%arg0, true { // CHECK: %1 = cir.const(0 : i32) : i32 // CHECK: cir.yield %1 : i32 -// CHECK: }, false_region { +// CHECK: }, false { // CHECK: %1 = cir.const(1 : i32) : i32 // CHECK: cir.yield %1 : i32 // CHECK: }) : i32 From fc1cb5e777929e5019eaf394e1dcca47c995919a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 10 May 2023 15:10:44 -0700 Subject: [PATCH 0930/2301] [CIR][CIRGen] Ternary: handle more complex cases --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 + clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 67 ++++++++++--------- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 78 +++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 9 ++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 15 ++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 +- clang/test/CIR/CodeGen/ternary.cpp | 31 +++++++++ 7 files changed, 167 insertions(+), 39 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 68d768d96576..028fbc5aa069 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -182,6 +182,8 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Creates constant null value for integral type ty. mlir::cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc) { + if (ty.isa()) + return getNullPtr(ty, loc); assert(ty.isa() && "NYI"); return create(loc, ty, mlir::IntegerAttr::get(ty, 0)); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index c71f0e14ecfa..594027e05713 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1722,43 +1722,13 @@ bool CIRGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { /// Emit an `if` on a boolean condition, filling `then` and `else` into /// appropriated regions. -/// TODO(cir): PGO data -/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas). mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, mlir::Location loc, const Stmt *thenS, const Stmt *elseS) { - // TODO(CIR): scoped ApplyDebugLocation DL(*this, Cond); - // TODO(CIR): __builtin_unpredictable and profile counts? - cond = cond->IgnoreParens(); - - // if (const BinaryOperator *CondBOp = dyn_cast(cond)) { - // llvm_unreachable("binaryoperator ifstmt NYI"); - // } - - if (const UnaryOperator *CondUOp = dyn_cast(cond)) { - llvm_unreachable("unaryoperator ifstmt NYI"); - } - - if (const ConditionalOperator *CondOp = dyn_cast(cond)) { - llvm_unreachable("conditionaloperator ifstmt NYI"); - } - - if (const CXXThrowExpr *Throw = dyn_cast(cond)) { - llvm_unreachable("throw expr ifstmt nyi"); - } - // Emit the code with the fully general case. - mlir::Value condV = evaluateExprAsBool(cond); + mlir::Value condV = buildOpOnBoolExpr(cond, loc, thenS, elseS); mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); - - auto *Call = dyn_cast(cond->IgnoreImpCasts()); - if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { - llvm_unreachable("NYI"); - } - - // TODO(CIR): emitCondLikelihoodViaExpectIntrinsic - builder.create( loc, condV, elseS, /*thenBuilder=*/ @@ -1793,6 +1763,41 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, resElse.succeeded()); } +/// TODO(cir): PGO data +/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas). +mlir::Value CIRGenFunction::buildOpOnBoolExpr(const Expr *cond, + mlir::Location loc, + const Stmt *thenS, + const Stmt *elseS) { + // TODO(CIR): scoped ApplyDebugLocation DL(*this, Cond); + // TODO(CIR): __builtin_unpredictable and profile counts? + cond = cond->IgnoreParens(); + + // if (const BinaryOperator *CondBOp = dyn_cast(cond)) { + // llvm_unreachable("binaryoperator ifstmt NYI"); + // } + + if (const UnaryOperator *CondUOp = dyn_cast(cond)) { + llvm_unreachable("unaryoperator ifstmt NYI"); + } + + if (const ConditionalOperator *CondOp = dyn_cast(cond)) { + llvm_unreachable("conditionaloperator ifstmt NYI"); + } + + if (const CXXThrowExpr *Throw = dyn_cast(cond)) { + llvm_unreachable("throw expr ifstmt nyi"); + } + + auto *Call = dyn_cast(cond->IgnoreImpCasts()); + if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { + llvm_unreachable("NYI"); + } + + // Emit the code with the fully general case. + return evaluateExprAsBool(cond); +} + mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, mlir::Location loc, CharUnits alignment) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index a8c6d2430008..c24543d24f30 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1537,6 +1537,80 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( }); } - [[maybe_unused]] CIRGenFunction::ConditionalEvaluation eval(CGF); - llvm_unreachable("NYI"); + mlir::Value condV = CGF.buildOpOnBoolExpr(condExpr, loc, lhsExpr, rhsExpr); + CIRGenFunction::ConditionalEvaluation eval(CGF); + SmallVector insertPoints{}; + mlir::Type yieldTy{}; + auto patchVoidOrThrowSites = [&]() { + if (insertPoints.empty()) + return; + // If both arms are void, so be it. + if (!yieldTy) + yieldTy = CGF.VoidTy; + for (auto &toInsert : insertPoints) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.restoreInsertionPoint(toInsert); + mlir::Value op0 = builder.getNullValue(yieldTy, loc); + builder.create(loc, op0); + } + }; + + return builder.create( + loc, condV, /*trueBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // FIXME: abstract all this massive location handling elsewhere. + SmallVector locs; + if (loc.isa()) { + locs.push_back(loc); + locs.push_back(loc); + } else if (loc.isa()) { + auto fusedLoc = loc.cast(); + locs.push_back(fusedLoc.getLocations()[0]); + locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{locs[0], locs[1], + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto lhs = Visit(lhsExpr); + eval.end(CGF); + + if (lhs) { + yieldTy = lhs.getType(); + b.create(loc, lhs); + return; + } + // If LHS or RHS is a throw or void expression we need to patch arms + // as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + }, + /*falseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto locBegin = fusedLoc.getLocations()[0]; + auto locEnd = fusedLoc.getLocations()[1]; + CIRGenFunction::LexicalScopeContext lexScope{locBegin, locEnd, + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto rhs = Visit(rhsExpr); + eval.end(CGF); + + if (rhs) { + yieldTy = rhs.getType(); + b.create(loc, rhs); + } else { + // If LHS or RHS is a throw or void expression we need to patch arms + // as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + } + + patchVoidOrThrowSites(); + }); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 058f7d4217a9..05d8db6f288c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -344,9 +344,12 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToEnd(InsPt); // TODO: insert actual scope cleanup (dtors and etc) - if (localScope->Depth != 0) // end of any local scope != function - builder.create(localScope->EndLoc); - else + if (localScope->Depth != 0) { // end of any local scope != function + // Ternary ops have to deal with matching arms for yielding types + // and do return a value, it must do its own cir.yield insertion. + if (!localScope->isTernary()) + builder.create(localScope->EndLoc); + } else (void)buildReturn(localScope->EndLoc); }; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index f26f2ed37f88..cc73a88553c3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -113,6 +113,7 @@ class CIRGenFunction : public CIRGenTypeCache { // FIXME: perhaps we can use some info encoded in operations. enum Kind { Regular, // cir.if, cir.scope, if_regions + Ternary, // cir.ternary Switch // cir.switch } ScopeKind = Regular; @@ -120,7 +121,9 @@ class CIRGenFunction : public CIRGenTypeCache { unsigned Depth = 0; bool HasReturn = false; LexicalScopeContext(mlir::Location b, mlir::Location e, mlir::Block *eb) - : EntryBlock(eb), BeginLoc(b), EndLoc(e) {} + : EntryBlock(eb), BeginLoc(b), EndLoc(e) { + assert(EntryBlock && "expected valid block"); + } ~LexicalScopeContext() = default; // --- @@ -134,7 +137,10 @@ class CIRGenFunction : public CIRGenTypeCache { // --- bool isRegular() { return ScopeKind == Kind::Regular; } bool isSwitch() { return ScopeKind == Kind::Switch; } + bool isTernary() { return ScopeKind == Kind::Ternary; } + void setAsSwitch() { ScopeKind = Kind::Switch; } + void setAsTernary() { ScopeKind = Kind::Ternary; } // --- // Goto handling @@ -975,6 +981,13 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Location loc, const clang::Stmt *thenS, const clang::Stmt *elseS); + mlir::Value buildTernaryOnBoolExpr(const clang::Expr *cond, + mlir::Location loc, + const clang::Stmt *thenS, + const clang::Stmt *elseS); + mlir::Value buildOpOnBoolExpr(const clang::Expr *cond, mlir::Location loc, + const clang::Stmt *thenS, + const clang::Stmt *elseS); class ConstantEmission { // Cannot use mlir::TypedAttr directly here because of bit availability. diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 476b61f08551..fd0e0cd686cf 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -620,7 +620,6 @@ void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, function_ref falseBuilder) { result.addOperands(cond); OpBuilder::InsertionGuard guard(builder); - Region *trueRegion = result.addRegion(); auto *block = builder.createBlock(trueRegion); trueBuilder(builder, result.location); @@ -629,7 +628,8 @@ void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, falseBuilder(builder, result.location); auto yield = dyn_cast(block->getTerminator()); - assert(yield && "expected cir.yield terminator"); + assert((yield && yield.getNumOperands() == 1) && + "expected cir.yield terminator with one operand"); result.addTypes(TypeRange{yield.getOperand(0).getType()}); } diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp index 1772d22f9cde..ab01652e975d 100644 --- a/clang/test/CIR/CodeGen/ternary.cpp +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -22,4 +22,35 @@ int x(int y) { // CHECK: cir.store %5, %1 : i32, cir.ptr // CHECK: %6 = cir.load %1 : cir.ptr , i32 // CHECK: cir.return %6 : i32 +// CHECK: } + +typedef enum { + API_A, + API_EnumSize = 0x7fffffff +} APIType; + +void oba(const char *); + +void m(APIType api) { + ((api == API_A) ? (static_cast(0)) : oba("yo.cpp")); +} + +// CHECK: cir.func @_Z1m7APIType +// CHECK: %0 = cir.alloca i32, cir.ptr , ["api", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : i32, cir.ptr +// CHECK: %1 = cir.load %0 : cir.ptr , i32 +// CHECK: %2 = cir.const(0 : i32) : i32 +// CHECK: %3 = cir.cmp(eq, %1, %2) : i32, !cir.bool +// CHECK: %4 = cir.ternary(%3, true { +// CHECK: %5 = cir.const(0 : i32) : i32 +// CHECK: %6 = cir.const(0 : i8) : i8 +// CHECK: cir.yield %6 : i8 +// CHECK: }, false { +// CHECK: %5 = cir.get_global @".str" : cir.ptr > +// CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_Z3obaPKc(%6) : (!cir.ptr) -> () +// CHECK: %7 = cir.const(0 : i8) : i8 +// CHECK: cir.yield %7 : i8 +// CHECK: }) : i8 +// CHECK: cir.return // CHECK: } \ No newline at end of file From 046293302b6de17be3f6e5f91cae3bf7ee142a98 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 10 May 2023 18:06:15 -0700 Subject: [PATCH 0931/2301] [CIR][CIRGen] Predefined: handle __PRETTY_FUNCTION__ and friends --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 23 +++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 ++ clang/test/CIR/CodeGen/predefined.cpp | 22 ++++++++++++++++++++++ 3 files changed, 45 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/predefined.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 594027e05713..96dccc94c5aa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -21,6 +21,8 @@ #include "clang/Basic/Builtins.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "llvm/ADT/StringExtras.h" + #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Value.h" @@ -1668,7 +1670,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return buildStringLiteralLValue(cast(E)); case Expr::MemberExprClass: return buildMemberExpr(cast(E)); - + case Expr::PredefinedExprClass: + return buildPredefinedLValue(cast(E)); case Expr::CStyleCastExprClass: case Expr::CXXFunctionalCastExprClass: case Expr::CXXDynamicCastExprClass: @@ -2160,4 +2163,20 @@ mlir::Value CIRGenFunction::buildScalarConstant( E->getExprLoc()) .getScalarVal(); return builder.getConstant(getLoc(E->getSourceRange()), Constant.getValue()); -} \ No newline at end of file +} + +LValue CIRGenFunction::buildPredefinedLValue(const PredefinedExpr *E) { + auto SL = E->getFunctionName(); + assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); + StringRef FnName = CurFn.getName(); + if (FnName.starts_with("\01")) + FnName = FnName.substr(1); + StringRef NameItems[] = {PredefinedExpr::getIdentKindName(E->getIdentKind()), + FnName}; + std::string GVName = llvm::join(NameItems, NameItems + 2, "."); + if (auto *BD = dyn_cast_or_null(CurCodeDecl)) { + llvm_unreachable("NYI"); + } + + return buildStringLiteralLValue(SL); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index cc73a88553c3..891f4773f0be 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -540,6 +540,8 @@ class CIRGenFunction : public CIRGenTypeCache { void buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD); void buildLambdaStaticInvokeBody(const CXXMethodDecl *MD); + LValue buildPredefinedLValue(const PredefinedExpr *E); + /// When generating code for a C++ member function, this will /// hold the implicit 'this' declaration. clang::ImplicitParamDecl *CXXABIThisDecl = nullptr; diff --git a/clang/test/CIR/CodeGen/predefined.cpp b/clang/test/CIR/CodeGen/predefined.cpp new file mode 100644 index 000000000000..60948dc9dc00 --- /dev/null +++ b/clang/test/CIR/CodeGen/predefined.cpp @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +extern "C" { + void __assert2(const char* __file, int __line, const char* __function, const char* __msg) __attribute__((__noreturn__)); +} + +void m() { + __assert2("yo.cpp", 79, __PRETTY_FUNCTION__, "doom"); +} + +// CHECK: cir.func @_Z1mv() { +// CHECK: %0 = cir.get_global @".str" : cir.ptr > +// CHECK: %1 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK: %2 = cir.const(79 : i32) : i32 +// CHECK: %3 = cir.get_global @".str1" : cir.ptr > +// CHECK: %4 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr +// CHECK: %5 = cir.get_global @".str2" : cir.ptr > +// CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @__assert2(%1, %2, %4, %6) : (!cir.ptr, i32, !cir.ptr, !cir.ptr) -> () +// CHECK: cir.return +// CHECK: } \ No newline at end of file From b863e192675a8baf099f8129bddc7738681639ed Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 10 May 2023 18:26:55 -0700 Subject: [PATCH 0932/2301] [CIR][CIRGen][NFC] Dtor/Ctor: add skeleton for alias emission --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 8c18f768c36c..a7f99a88fe50 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -348,6 +348,19 @@ static StructorCIRGen getCIRGenToUse(CIRGenModule &CGM, return StructorCIRGen::Alias; } +static void emitConstructorDestructorAlias(CIRGenModule &CGM, + GlobalDecl AliasDecl, + GlobalDecl TargetDecl) { + [[maybe_unused]] auto Linkage = CGM.getFunctionLinkage(AliasDecl); + + StringRef MangledName = CGM.getMangledName(AliasDecl); + auto Entry = + dyn_cast_or_null(CGM.getGlobalValue(MangledName)); + if (Entry && !Entry.isDeclaration()) + return; + llvm_unreachable("NYI"); +} + void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { auto *MD = cast(GD.getDecl()); auto *CD = dyn_cast(MD); @@ -365,7 +378,8 @@ void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { if (CIRGenType == StructorCIRGen::Alias || CIRGenType == StructorCIRGen::COMDAT) { - llvm_unreachable("NYI"); + emitConstructorDestructorAlias(CGM, GD, BaseDecl); + return; } if (CIRGenType == StructorCIRGen::RAUW) { From 10c29cb54b0b033abb0554e5f794692fea2c95e4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 11 May 2023 15:37:17 -0700 Subject: [PATCH 0933/2301] [CIR] FuncOp: introduce aliasing capabilities between global functions --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 7 ++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 52 ++++++++++++++++++++ clang/test/CIR/IR/func.cir | 11 +++++ clang/test/CIR/IR/invalid.cir | 12 +++++ 4 files changed, 78 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/IR/func.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 3defa364fb4d..7068ec300562 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1394,6 +1394,7 @@ def FuncOp : CIR_Op<"func", [ OptionalAttr:$sym_visibility, OptionalAttr:$arg_attrs, OptionalAttr:$res_attrs, + OptionalAttr:$aliasee, OptionalAttr:$ast); let regions = (region AnyRegion:$body); let skipDefaultBuilders = 1; @@ -1409,9 +1410,7 @@ def FuncOp : CIR_Op<"func", [ /// Returns the region on the current operation that is callable. This may /// return null in the case of an external callable object, e.g. an external /// function. - ::mlir::Region *getCallableRegion() { - return isExternal() ? nullptr : &getBody(); - } + ::mlir::Region *getCallableRegion(); /// Returns the results types that the callable region produces when /// executed. @@ -1447,7 +1446,7 @@ def FuncOp : CIR_Op<"func", [ // SymbolOpInterface Methods //===------------------------------------------------------------------===// - bool isDeclaration() { return isExternal(); } + bool isDeclaration(); }]; let hasCustomAssemblyFormat = 1; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index fd0e0cd686cf..9d7f72e2c3db 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1340,12 +1340,28 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { builder, state, arguments, resultAttrs, getArgAttrsAttrName(state.name), getResAttrsAttrName(state.name)); + bool hasAlias = false; + auto aliaseeNameAttr = getAliaseeAttrName(state.name); + if (::mlir::succeeded(parser.parseOptionalKeyword("alias"))) { + if (parser.parseLParen().failed()) + return failure(); + StringAttr aliaseeAttr; + if (parser.parseOptionalSymbolName(aliaseeAttr).failed()) + return failure(); + state.addAttribute(aliaseeNameAttr, FlatSymbolRefAttr::get(aliaseeAttr)); + if (parser.parseRParen().failed()) + return failure(); + hasAlias = true; + } + // Parse the optional function body. auto *body = state.addRegion(); llvm::SMLoc loc = parser.getCurrentLocation(); OptionalParseResult parseResult = parser.parseOptionalRegion( *body, arguments, /*enableNameShadowing=*/false); if (parseResult.has_value()) { + if (hasAlias) + parser.emitError(loc, "function alias shall not have a body"); if (failed(*parseResult)) return failure(); // Function body was parsed, make sure its not empty. @@ -1355,6 +1371,32 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { return success(); } +bool cir::FuncOp::isDeclaration() { + auto aliasee = getAliasee(); + if (!aliasee) + return isExternal(); + + auto *modOp = getOperation()->getParentOp(); + auto targetFn = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(modOp, *aliasee)); + assert(targetFn && "expected aliasee to exist"); + return targetFn.isDeclaration(); +} + +::mlir::Region *cir::FuncOp::getCallableRegion() { + auto aliasee = getAliasee(); + if (!aliasee) + return isExternal() ? nullptr : &getBody(); + + // Note that we forward the region from the original aliasee + // function. + auto *modOp = getOperation()->getParentOp(); + auto targetFn = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(modOp, *aliasee)); + assert(targetFn && "expected aliasee to exist"); + return targetFn.getCallableRegion(); +} + void cir::FuncOp::print(OpAsmPrinter &p) { p << ' '; if (getBuiltin()) @@ -1378,6 +1420,9 @@ void cir::FuncOp::print(OpAsmPrinter &p) { function_interface_impl::printFunctionAttributes( p, *this, {getFunctionTypeAttrName(), getLinkageAttrName()}); + if (auto aliaseeName = getAliasee()) + p.printSymbolName(*aliaseeName); + // Print the body if this is not an external function. Region &body = getOperation()->getRegion(0); if (!body.empty()) { @@ -1436,6 +1481,13 @@ LogicalResult cir::FuncOp::verify() { return emitOpError() << "coroutine body must use at least one cir.await op"; } + + // Function alias should have an empty body. + if (auto fn = getAliasee()) { + if (fn && !getBody().empty()) + return emitOpError() << "a function alias '" << *fn + << "' must have empty body"; + } return success(); } diff --git a/clang/test/CIR/IR/func.cir b/clang/test/CIR/IR/func.cir new file mode 100644 index 000000000000..f8d97ac3eb1c --- /dev/null +++ b/clang/test/CIR/IR/func.cir @@ -0,0 +1,11 @@ +// RUN: cir-tool %s | FileCheck %s + +module { + cir.func @l0() { + cir.return + } + + cir.func @l1() alias(@l0) +} + +// CHECK: cir.func @l0() \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index f5f3c1667162..f1ec7236467c 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -275,3 +275,15 @@ module { #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr}>> : !cir.struct<"", !cir.ptr> } // expected-error {{'cir.global' expected constant attribute to match type}} + +// ----- + +module { + cir.func @l0() { + cir.return + } + + cir.func @l1() alias(@l0) { // expected-error {{function alias shall not have a body}} + cir.return + } +} \ No newline at end of file From 8840528d2f815717821379a5de19cc89c2fb5618 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 11 May 2023 15:38:13 -0700 Subject: [PATCH 0934/2301] [CIR][CIRGen] Dtor/Ctor: implement alias emission --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 33 +++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 +++ clang/lib/CIR/CodeGen/CIRGenModule.h | 6 ++++ .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 22 +++++++++++-- clang/test/CIR/CodeGen/assign-operator.cpp | 9 +++-- clang/test/CIR/CodeGen/coro-task.cpp | 10 +++--- clang/test/CIR/CodeGen/ctor-alias.cpp | 14 ++++++++ clang/test/CIR/CodeGen/cxx-default-arg.cpp | 2 +- clang/test/CIR/CodeGen/dtors.cpp | 4 +-- clang/test/CIR/CodeGen/lambda.cpp | 10 +++--- 11 files changed, 95 insertions(+), 20 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index a7f99a88fe50..45d9a35ffc51 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -351,14 +351,43 @@ static StructorCIRGen getCIRGenToUse(CIRGenModule &CGM, static void emitConstructorDestructorAlias(CIRGenModule &CGM, GlobalDecl AliasDecl, GlobalDecl TargetDecl) { - [[maybe_unused]] auto Linkage = CGM.getFunctionLinkage(AliasDecl); + auto Linkage = CGM.getFunctionLinkage(AliasDecl); + // Does this function alias already exists? StringRef MangledName = CGM.getMangledName(AliasDecl); auto Entry = dyn_cast_or_null(CGM.getGlobalValue(MangledName)); if (Entry && !Entry.isDeclaration()) return; - llvm_unreachable("NYI"); + + // Retrieve aliasee info. + auto Aliasee = + dyn_cast_or_null(CGM.GetAddrOfGlobal(TargetDecl)); + assert(Aliasee && "expected cir.func"); + auto *AliasFD = dyn_cast(AliasDecl.getDecl()); + assert(AliasFD && "expected FunctionDecl"); + + // Populate actual alias. + auto Alias = + CGM.createCIRFunction(CGM.getLoc(AliasDecl.getDecl()->getSourceRange()), + MangledName, Aliasee.getFunctionType(), AliasFD); + Alias.setAliasee(Aliasee.getName()); + Alias.setLinkage(Linkage); + mlir::SymbolTable::setSymbolVisibility( + Alias, CGM.getMLIRVisibilityFromCIRLinkage(Linkage)); + + // Alias constructors and destructors are always unnamed_addr. + assert(!UnimplementedFeature::unnamedAddr()); + + // Switch any previous uses to the alias. + if (Entry) { + llvm_unreachable("NYI"); + } else { + // Name already set by createCIRFunction + } + + // Finally, set up the alias with its proper name and attributes. + CGM.setCommonAttributes(AliasDecl, Alias); } void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index a87da9d57ec0..fb947919cfea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -440,6 +440,10 @@ mlir::cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &CGM, return g; } +void CIRGenModule::setCommonAttributes(GlobalDecl GD, mlir::Operation *GV) { + assert(!UnimplementedFeature::setCommonAttributes()); +} + /// If the specified mangled name is not in the module, /// create and return an mlir GlobalOp with the specified type (TODO(cir): /// address space). diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 916a83cfd4ed..25d35deddff2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -274,6 +274,12 @@ class CIRGenModule : public CIRGenTypeCache { /// in AST is always in default address space. LangAS getGlobalConstantAddressSpace() const; + /// Set attributes which are common to any form of a global definition (alias, + /// Objective-C method, function, global variable). + /// + /// NOTE: This should only be called for definitions. + void setCommonAttributes(GlobalDecl GD, mlir::Operation *GV); + // TODO: this obviously overlaps with const TargetCIRGenInfo &getTargetCIRGenInfo(); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 214a1b56fc16..81a84a416c69 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -76,6 +76,7 @@ struct UnimplementedFeature { static bool isVarArg() { return false; } static bool setNonGC() { return false; } static bool armComputeVolatileBitfields() { return false; } + static bool setCommonAttributes() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9d7f72e2c3db..c0c5d2118d23 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1288,6 +1288,7 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { auto builtinNameAttr = getBuiltinAttrName(state.name); auto coroutineNameAttr = getCoroutineAttrName(state.name); auto lambdaNameAttr = getLambdaAttrName(state.name); + auto visNameAttr = getSymVisibilityAttrName(state.name); if (::mlir::succeeded(parser.parseOptionalKeyword(builtinNameAttr.strref()))) state.addAttribute(builtinNameAttr, parser.getBuilder().getUnitAttr()); if (::mlir::succeeded( @@ -1303,6 +1304,13 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { parseOptionalCIRKeyword( parser, GlobalLinkageKind::ExternalLinkage))); + ::llvm::StringRef visAttrStr; + if (parser.parseOptionalKeyword(&visAttrStr, {"private", "public", "nested"}) + .succeeded()) { + state.addAttribute(visNameAttr, + parser.getBuilder().getStringAttr(visAttrStr)); + } + StringAttr nameAttr; SmallVector arguments; SmallVector argAttrs; @@ -1399,6 +1407,7 @@ ::mlir::Region *cir::FuncOp::getCallableRegion() { void cir::FuncOp::print(OpAsmPrinter &p) { p << ' '; + if (getBuiltin()) p << "builtin "; @@ -1411,6 +1420,10 @@ void cir::FuncOp::print(OpAsmPrinter &p) { if (getLinkage() != GlobalLinkageKind::ExternalLinkage) p << stringifyGlobalLinkageKind(getLinkage()) << ' '; + auto vis = getVisibility(); + if (vis != mlir::SymbolTable::Visibility::Public) + p << vis << " "; + // Print function name, signature, and control. p.printSymbolName(getSymName()); auto fnType = getFunctionType(); @@ -1418,10 +1431,15 @@ void cir::FuncOp::print(OpAsmPrinter &p) { /*isVariadic=*/false, fnType.getResults()); function_interface_impl::printFunctionAttributes( - p, *this, {getFunctionTypeAttrName(), getLinkageAttrName()}); + p, *this, + {getSymVisibilityAttrName(), getAliaseeAttrName(), + getFunctionTypeAttrName(), getLinkageAttrName(), getBuiltinAttrName()}); - if (auto aliaseeName = getAliasee()) + if (auto aliaseeName = getAliasee()) { + p << " alias("; p.printSymbolName(*aliaseeName); + p << ")"; + } // Print the body if this is not an external function. Region &body = getOperation()->getRegion(0); diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 542325bfbf6d..798ac3052574 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -1,5 +1,8 @@ -// RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -// RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -clangir-disable-emit-cxx-default %s -o - | FileCheck %s --check-prefix=DISABLE +// RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -clangir-disable-emit-cxx-default %s -o %t-disable.cir +// RUN: FileCheck --input-file=%t-disable.cir %s --check-prefix=DISABLE int strlen(char const *); @@ -59,7 +62,7 @@ struct String { // CHECK: cir.return %8 : !cir.ptr // CHECK: } - // DISABLE: cir.func @_ZN10StringViewaSEOS_ + // DISABLE: cir.func private @_ZN10StringViewaSEOS_ // DISABLE-NEXT: cir.func @main() }; diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index c9f587e49536..7b9b910c24e0 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -137,10 +137,10 @@ co_invoke_fn co_invoke; // CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22struct2Efolly3A3Acoro3A3Aco_invoke_fn22 -// CHECK: cir.func builtin @__builtin_coro_id(i32, !cir.ptr, !cir.ptr, !cir.ptr) -> i32 attributes {builtin, sym_visibility = "private"} -// CHECK: cir.func builtin @__builtin_coro_alloc(i32) -> !cir.bool attributes {builtin, sym_visibility = "private"} -// CHECK: cir.func builtin @__builtin_coro_size() -> i64 attributes {builtin, sym_visibility = "private"} -// CHECK: cir.func builtin @__builtin_coro_begin(i32, !cir.ptr) -> !cir.ptr attributes {builtin, sym_visibility = "private"} +// CHECK: cir.func builtin private @__builtin_coro_id(i32, !cir.ptr, !cir.ptr, !cir.ptr) -> i32 +// CHECK: cir.func builtin private @__builtin_coro_alloc(i32) -> !cir.bool +// CHECK: cir.func builtin private @__builtin_coro_size() -> i64 +// CHECK: cir.func builtin private @__builtin_coro_begin(i32, !cir.ptr) -> !cir.ptr using VoidTask = folly::coro::Task; @@ -341,7 +341,7 @@ folly::coro::Task go1_lambda() { co_return co_await task; } -// CHECK: cir.func coroutine lambda internal @_ZZ10go1_lambdavENK3$_0clEv +// CHECK: cir.func coroutine lambda internal private @_ZZ10go1_lambdavENK3$_0clEv // CHECK: cir.func coroutine @_Z10go1_lambdav() folly::coro::Task go4() { diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp index 89549cceeded..18ad86bcd8a1 100644 --- a/clang/test/CIR/CodeGen/ctor-alias.cpp +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -24,3 +24,17 @@ void t() { // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr // CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return + +struct B { + B(); +}; +B::B() { +} + +// CHECK: cir.func @_ZN1BC2Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.return +// CHECK: } +// CHECK: cir.func @_ZN1BC1Ev(!cir.ptr) alias(@_ZN1BC2Ev) \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/cxx-default-arg.cpp b/clang/test/CIR/CodeGen/cxx-default-arg.cpp index f637119a8475..c5665337608b 100644 --- a/clang/test/CIR/CodeGen/cxx-default-arg.cpp +++ b/clang/test/CIR/CodeGen/cxx-default-arg.cpp @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// CHECK: cir.func @_ZN12MyIntPointerC1EPi +// CHECK: cir.func private @_ZN12MyIntPointerC1EPi struct MyIntPointer { MyIntPointer(int *p = nullptr); diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index 6b5e5a90b2c7..3100e135949a 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -25,10 +25,10 @@ class B : public A // CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> // @B::~B() #1 declaration -// CHECK: cir.func @_ZN1BD2Ev(!cir.ptr) attributes {sym_visibility = "private"} +// CHECK: cir.func private @_ZN1BD2Ev(!cir.ptr) // operator delete(void*) declaration -// CHECK: cir.func @_ZdlPv(!cir.ptr) attributes {sym_visibility = "private"} +// CHECK: cir.func private @_ZdlPv(!cir.ptr) // B dtor => @B::~B() #2 // Calls dtor #1 diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index f9eea7e3962c..f77d80d3b95f 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -9,7 +9,7 @@ void fn() { // CHECK: !ty_22class2Eanon22 = !cir.struct<"class.anon", i8> // CHECK-DAG: module -// CHECK: cir.func lambda internal @_ZZ2fnvENK3$_0clEv +// CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv // CHECK: cir.func @_Z2fnv() // CHECK-NEXT: %0 = cir.alloca !ty_22class2Eanon22, cir.ptr , ["a"] @@ -21,7 +21,7 @@ void l0() { a(); } -// CHECK: cir.func lambda internal @_ZZ2l0vENK3$_0clEv( +// CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv( // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr @@ -98,13 +98,13 @@ int g3() { } // lambda operator() -// CHECK: cir.func lambda internal @_ZZ2g3vENK3$_0clERKi +// CHECK: cir.func lambda internal private @_ZZ2g3vENK3$_0clERKi // lambda __invoke() -// CHECK: cir.func internal @_ZZ2g3vEN3$_08__invokeERKi +// CHECK: cir.func internal private @_ZZ2g3vEN3$_08__invokeERKi // lambda operator int (*)(int const&)() -// CHECK: cir.func internal @_ZZ2g3vENK3$_0cvPFiRKiEEv +// CHECK: cir.func internal private @_ZZ2g3vENK3$_0cvPFiRKiEEv // CHECK: cir.func @_Z2g3v() -> i32 { // CHECK: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} From a1510fffecb1e2ea86c82c8f8a505acca7b76ecb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 May 2023 15:48:34 -0700 Subject: [PATCH 0935/2301] [CIR][CIRGen] cir.bool: add hardcoded datalayout for now --- clang/include/clang/CIR/Dialect/IR/CIRTypes.td | 3 ++- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 18 ++++++++++++++++++ clang/test/CIR/CodeGen/union.cpp | 6 +++++- 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index d1346aa6431a..a02af7636875 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -51,7 +51,8 @@ def CIR_PointerType : CIR_Type<"Pointer", "ptr", // //===----------------------------------------------------------------------===// def CIR_BoolType : - CIR_Type<"Bool", "bool"> { + CIR_Type<"Bool", "bool", + [DeclareTypeInterfaceMethods]> { let summary = "CIR bool type"; let description = [{ diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 0633862422a2..357cdd229bfa 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -168,6 +168,24 @@ void ArrayType::print(mlir::AsmPrinter &printer) const { // Data Layout information for types //===----------------------------------------------------------------------===// +llvm::TypeSize +BoolType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(8); +} + +uint64_t +BoolType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return 1; +} + +uint64_t +BoolType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return 1; +} + llvm::TypeSize PointerType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index 65586bfd461f..da47b290e3b1 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -4,17 +4,21 @@ typedef struct { int x; } yolo; typedef union { yolo y; struct { int lifecnt; }; } yolm; typedef union { yolo y; struct { int *lifecnt; int genpad; }; } yolm2; +typedef union { yolo y; struct { bool life; int genpad; }; } yolm3; void m() { yolm q; yolm2 q2; + yolm3 q3; } // CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", !cir.ptr, i32, #cir.recdecl.ast> +// CHECK: !ty_22struct2Eanon221 = !cir.struct<"struct.anon", !cir.bool, i32, #cir.recdecl.ast> // CHECK: !ty_22struct2Eyolo22 = !cir.struct<"struct.yolo", i32, #cir.recdecl.ast> // CHECK: !ty_22union2Eyolm22 = !cir.struct<"union.yolm", !ty_22struct2Eyolo22> // CHECK: !ty_22union2Eyolm222 = !cir.struct<"union.yolm2", !ty_22struct2Eanon22> // CHECK: cir.func @_Z1mv() { // CHECK: cir.alloca !ty_22union2Eyolm22, cir.ptr , ["q"] {alignment = 4 : i64} -// CHECK: cir.alloca !ty_22union2Eyolm222, cir.ptr , ["q2"] {alignment = 8 : i64} \ No newline at end of file +// CHECK: cir.alloca !ty_22union2Eyolm222, cir.ptr , ["q2"] {alignment = 8 : i64} +// CHECK: cir.alloca !ty_22union2Eyolm322, cir.ptr , ["q3"] {alignment = 4 : i64} loc(#loc12) \ No newline at end of file From 47b0ffd296b79dbdbbc315060cc60d3bebc98730 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 May 2023 16:04:32 -0700 Subject: [PATCH 0936/2301] [CIR][CIRGen] Locations: get a bit more flexible when possible --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 36 +++++++++++++++++------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 05d8db6f288c..b0f96f901de7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -110,19 +110,35 @@ mlir::Type CIRGenFunction::convertType(QualType T) { } mlir::Location CIRGenFunction::getLoc(SourceLocation SLoc) { - const SourceManager &SM = getContext().getSourceManager(); - PresumedLoc PLoc = SM.getPresumedLoc(SLoc); - StringRef Filename = PLoc.getFilename(); - return mlir::FileLineColLoc::get(builder.getStringAttr(Filename), - PLoc.getLine(), PLoc.getColumn()); + // Some AST nodes might contain invalid source locations (e.g. + // CXXDefaultArgExpr), workaround that to still get something out. + if (SLoc.isValid()) { + const SourceManager &SM = getContext().getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(SLoc); + StringRef Filename = PLoc.getFilename(); + return mlir::FileLineColLoc::get(builder.getStringAttr(Filename), + PLoc.getLine(), PLoc.getColumn()); + } else { + // Do our best... + assert(currSrcLoc && "expected to inherit some source location"); + return *currSrcLoc; + } } mlir::Location CIRGenFunction::getLoc(SourceRange SLoc) { - mlir::Location B = getLoc(SLoc.getBegin()); - mlir::Location E = getLoc(SLoc.getEnd()); - SmallVector locs = {B, E}; - mlir::Attribute metadata; - return mlir::FusedLoc::get(locs, metadata, builder.getContext()); + // Some AST nodes might contain invalid source locations (e.g. + // CXXDefaultArgExpr), workaround that to still get something out. + if (SLoc.isValid()) { + mlir::Location B = getLoc(SLoc.getBegin()); + mlir::Location E = getLoc(SLoc.getEnd()); + SmallVector locs = {B, E}; + mlir::Attribute metadata; + return mlir::FusedLoc::get(locs, metadata, builder.getContext()); + } else { + // Do our best... + assert(currSrcLoc && "expected to inherit some source location"); + return *currSrcLoc; + } } mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) { From 8f31722f95bd5477a0fd9e7d790b1f04a0f15d80 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 May 2023 16:08:46 -0700 Subject: [PATCH 0937/2301] [CIR][CIRGen] Locations: get a bit more flexible when possible (part 2) --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index fb947919cfea..616333bbe075 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1813,6 +1813,7 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( } mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { + assert(SLoc.isValid() && "expected valid source location"); const SourceManager &SM = astCtx.getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(SLoc); StringRef Filename = PLoc.getFilename(); @@ -1821,6 +1822,7 @@ mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { } mlir::Location CIRGenModule::getLoc(SourceRange SLoc) { + assert(SLoc.isValid() && "expected valid source location"); mlir::Location B = getLoc(SLoc.getBegin()); mlir::Location E = getLoc(SLoc.getEnd()); SmallVector locs = {B, E}; From 9a88bce162391cdc74df83da7cf605c4ca65c69a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 May 2023 16:58:06 -0700 Subject: [PATCH 0938/2301] [CIR] Change linkage name from private to cir_private Prevents clash with MLIR's symbol visibility "private". --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 +++-- clang/test/CIR/IR/invalid.cir | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7068ec300562..4ff83924ac39 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1098,9 +1098,10 @@ def Global_WeakODRLinkage : // Rename collisions when linking (static functions). def Global_InternalLinkage : I32EnumAttrCase<"InternalLinkage", 7, "internal">; -// Like Internal, but omit from symbol table. +// Like Internal, but omit from symbol table, prefix it with +// "cir_" to prevent clash with MLIR's symbol "private". def Global_PrivateLinkage : - I32EnumAttrCase<"PrivateLinkage", 8, "private">; + I32EnumAttrCase<"PrivateLinkage", 8, "cir_private">; // ExternalWeak linkage description. def Global_ExternalWeakLinkage : I32EnumAttrCase<"ExternalWeakLinkage", 9, "extern_weak">; diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index f1ec7236467c..de3c626671c9 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -184,7 +184,7 @@ module { // ----- module { - cir.global @a = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{expected string or keyword containing one of the following enum values for attribute 'linkage' [external, available_externally, linkonce, linkonce_odr, weak, weak_odr, internal, private, extern_weak, common]}} + cir.global @a = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{expected string or keyword containing one of the following enum values for attribute 'linkage' [external, available_externally, linkonce, linkonce_odr, weak, weak_odr, internal, cir_private, extern_weak, common]}} } // ----- From f964a406d9b1f6b6f08b32b3346614eb47e51dfe Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 May 2023 16:59:02 -0700 Subject: [PATCH 0939/2301] [CIR][CIRGen] Default arguments: codegen for aggregates --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 5 ++++- clang/test/CIR/CodeGen/struct.cpp | 24 +++++++++++++++++++++++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index ee3e89c2ca70..4dcd43d7780f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -134,7 +134,10 @@ class AggExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } void VisitNoInitExpr(NoInitExpr *E) { llvm_unreachable("NYI"); } - void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { llvm_unreachable("NYI"); } + void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { + CIRGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); + Visit(DAE->getExpr()); + } void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { CIRGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); Visit(DIE->getExpr()); diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index cab6c63a52c8..533c097783bf 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s struct Bar { @@ -108,3 +108,25 @@ void m() { Adv C; } // CHECK: cir.store %8, %7 : i32, cir.ptr // CHECK: cir.return // CHECK: } + +struct A { + int a; +}; + +A get_default() { return A{2}; } + +struct S { + S(A a = get_default()); +}; + +void h() { S s; } + +// CHECK: cir.func @_Z1hv() { +// CHECK: %0 = cir.alloca !ty_22struct2ES22, cir.ptr , ["s", init] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !ty_22struct2EA22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} +// CHECK: %2 = cir.call @_Z11get_defaultv() : () -> !ty_22struct2EA22 +// CHECK: cir.store %2, %1 : !ty_22struct2EA22, cir.ptr +// CHECK: %3 = cir.load %1 : cir.ptr , !ty_22struct2EA22 +// CHECK: cir.call @_ZN1SC1E1A(%0, %3) : (!cir.ptr, !ty_22struct2EA22) -> () +// CHECK: cir.return +// CHECK: } From c50daef1a48d74ebef49f23545362196340800f6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 May 2023 18:02:06 -0700 Subject: [PATCH 0940/2301] [CIR][CIRGen][NFC] Dtor: emit more complex dtors based on types/NRVO flags This only adds most of the skeleton but still asserts at the end, follow up with add remaining bits and testcase. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 73 +++++++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 23 ++++++-- 2 files changed, 89 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 3bccda6c5e6d..7e7d0ba6e4d4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -240,9 +240,8 @@ void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { const VarDecl &D = *emission.Variable; // Check the type for a cleanup. - // TODO: something like emitAutoVarTypeCleanup if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext())) - assert(0 && "not implemented"); + buildAutoVarTypeCleanup(emission, dtorKind); // In GC mode, honor objc_precise_lifetime. if (getContext().getLangOpts().getGC() != LangOptions::NonGC && @@ -572,3 +571,73 @@ void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, pushFullExprCleanup(cleanupKind, addr, type, destroyer, useEHCleanupForArray); } + +CIRGenFunction::Destroyer * +CIRGenFunction::getDestroyer(QualType::DestructionKind kind) { + switch (kind) { + case QualType::DK_none: + llvm_unreachable("no destroyer for trivial dtor"); + case QualType::DK_cxx_destructor: + return destroyCXXObject; + case QualType::DK_objc_strong_lifetime: + case QualType::DK_objc_weak_lifetime: + case QualType::DK_nontrivial_c_struct: + llvm_unreachable("NYI"); + } + llvm_unreachable("Unknown DestructionKind"); +} + +/// Enter a destroy cleanup for the given local variable. +void CIRGenFunction::buildAutoVarTypeCleanup( + const CIRGenFunction::AutoVarEmission &emission, + QualType::DestructionKind dtorKind) { + assert(dtorKind != QualType::DK_none); + + // Note that for __block variables, we want to destroy the + // original stack object, not the possibly forwarded object. + Address addr = emission.getObjectAddress(*this); + + const VarDecl *var = emission.Variable; + QualType type = var->getType(); + + CleanupKind cleanupKind = NormalAndEHCleanup; + CIRGenFunction::Destroyer *destroyer = nullptr; + + switch (dtorKind) { + case QualType::DK_none: + llvm_unreachable("no cleanup for trivially-destructible variable"); + + case QualType::DK_cxx_destructor: + // If there's an NRVO flag on the emission, we need a different + // cleanup. + if (emission.NRVOFlag) { + assert(!type->isArrayType()); + CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor(); + EHStack.pushCleanup(cleanupKind, addr, type, dtor, + emission.NRVOFlag); + return; + } + break; + + case QualType::DK_objc_strong_lifetime: + llvm_unreachable("NYI"); + break; + + case QualType::DK_objc_weak_lifetime: + break; + + case QualType::DK_nontrivial_c_struct: + llvm_unreachable("NYI"); + } + + // If we haven't chosen a more specific destroyer, use the default. + if (!destroyer) + destroyer = getDestroyer(dtorKind); + + // Use an EH cleanup in array destructors iff the destructor itself + // is being pushed as an EH cleanup. + bool useEHCleanup = (cleanupKind & EHCleanup); + EHStack.pushCleanup(cleanupKind, addr, type, destroyer, + useEHCleanup); + llvm_unreachable("NYI"); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 891f4773f0be..ffb0dfb0f5b9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1049,7 +1049,6 @@ class CIRGenFunction : public CIRGenTypeCache { clang::QualType buildFunctionArgList(clang::GlobalDecl GD, FunctionArgList &Args); - struct AutoVarEmission { const clang::VarDecl *Variable; /// The address of the alloca for languages with explicit address space @@ -1060,24 +1059,35 @@ class CIRGenFunction : public CIRGenTypeCache { /// True if the variable is of aggregate type and has a constant /// initializer. - bool IsConstantAggregate; + bool IsConstantAggregate = false; /// True if the variable is a __block variable that is captured by an /// escaping block. bool IsEscapingByRef = false; + mlir::Value NRVOFlag{}; + struct Invalid {}; AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {} AutoVarEmission(const clang::VarDecl &variable) - : Variable(&variable), Addr(Address::invalid()), - IsConstantAggregate(false) {} + : Variable(&variable), Addr(Address::invalid()) {} static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } /// Returns the raw, allocated address, which is not necessarily /// the address of the object itself. It is casted to default /// address space for address space agnostic languages. Address getAllocatedAddress() const { return Addr; } + + /// Returns the address of the object within this declaration. + /// Note that this does not chase the forwarding pointer for + /// __block decls. + Address getObjectAddress(CIRGenFunction &CGF) const { + if (!IsEscapingByRef) + return Addr; + + llvm_unreachable("NYI"); + } }; LValue buildMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); @@ -1087,8 +1097,9 @@ class CIRGenFunction : public CIRGenTypeCache { AutoVarEmission buildAutoVarAlloca(const clang::VarDecl &D); void buildAutoVarInit(const AutoVarEmission &emission); - void buildAutoVarCleanups(const AutoVarEmission &emission); + void buildAutoVarTypeCleanup(const AutoVarEmission &emission, + clang::QualType::DestructionKind dtorKind); void buildStoreOfScalar(mlir::Value value, LValue lvalue); void buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, @@ -1443,6 +1454,8 @@ class CIRGenFunction : public CIRGenTypeCache { void pushDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); + Destroyer *getDestroyer(QualType::DestructionKind kind); + /// An object to manage conditionally-evaluated expressions. class ConditionalEvaluation { // llvm::BasicBlock *StartBB; From bbfc24374e67c90266d5349aef865dd0cd152c42 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 15 May 2023 12:21:27 -0700 Subject: [PATCH 0941/2301] [CIR][CIRGen] Dtor: land dtors based on types/NRVO flags --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 1 - clang/test/CIR/CodeGen/dtors.cpp | 28 ++++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 7e7d0ba6e4d4..cdcc1efea166 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -639,5 +639,4 @@ void CIRGenFunction::buildAutoVarTypeCleanup( bool useEHCleanup = (cleanupKind & EHCleanup); EHStack.pushCleanup(cleanupKind, addr, type, destroyer, useEHCleanup); - llvm_unreachable("NYI"); } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index 3100e135949a..f8d2aaffe3fc 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -2,6 +2,25 @@ // RUN: FileCheck --input-file=%t.cir %s // XFAIL: * + +enum class EFMode { Always, Verbose }; + +class PSEvent { + public: + PSEvent( + EFMode m, + const char* n); + ~PSEvent(); + + private: + const char* n; + EFMode m; +}; + +void blue() { + PSEvent p(EFMode::Verbose, __FUNCTION__); +} + class A { public: @@ -24,6 +43,15 @@ class B : public A // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> +// CHECK: cir.func @_Z4bluev() { +// CHECK: %0 = cir.alloca !ty_22class2EPSEvent22, cir.ptr , ["p", init] {alignment = 8 : i64} +// CHECK: %1 = cir.const(1 : i32) : i32 +// CHECK: %2 = cir.get_global @".str" : cir.ptr > +// CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_ZN7PSEventC1E6EFModePKc(%0, %1, %3) : (!cir.ptr, i32, !cir.ptr) -> () +// CHECK: cir.return +// CHECK: } + // @B::~B() #1 declaration // CHECK: cir.func private @_ZN1BD2Ev(!cir.ptr) From 410240bdf8635109421e0460f1b9aa5859548303 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 15 May 2023 15:19:46 -0700 Subject: [PATCH 0942/2301] [CIR][CIRGen] Introduce cir.base_class_addr and use it for simple derived to base casts In the future cir.base_class_addr is going to receive more arguments as to fully map tricks needed when finding base class. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 34 ++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 12 ++++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 8 ++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 10 ++-- clang/lib/CIR/CodeGen/CIRGenValue.h | 6 +- clang/test/CIR/CodeGen/derived-to-base.cpp | 58 ++++++++++++++++++++ 7 files changed, 120 insertions(+), 13 deletions(-) create mode 100644 clang/test/CIR/CodeGen/derived-to-base.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 4ff83924ac39..a1c1aa2614e2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1321,6 +1321,40 @@ def StructElementAddr : CIR_Op<"struct_element_addr"> { // FIXME: add verifier. } +//===----------------------------------------------------------------------===// +// BaseClassAddr +//===----------------------------------------------------------------------===// + +def BaseClassAddrOp : CIR_Op<"base_class_addr"> { + let summary = "Get the base class address for a class/struct"; + let description = [{ + The `cir.base_class_addr` operaration gets the address of a particular + base class given a derived class pointer. + + Example: + ```mlir + TBD + ``` + }]; + + let arguments = (ins + Arg:$derived_addr); + + let results = (outs Res:$base_addr); + + // FIXME: we should not be printing `cir.ptr` below, that should come + // from the pointer type directly. + let assemblyFormat = [{ + `(` + $derived_addr `:` `cir.ptr` type($derived_addr) + `)` `->` `cir.ptr` type($base_addr) attr-dict + }]; + + // FIXME: add verifier. + // Check whether both src/dst pointee's are compatible. + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // FuncOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 028fbc5aa069..ef48543e7714 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -227,6 +227,18 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::CastKind::floating, v); } + cir::Address createBaseClassAddr(mlir::Location loc, cir::Address addr, + mlir::Type destType) { + if (destType == addr.getElementType()) + return addr; + + auto ptrTy = getPointerTo(destType); + auto baseAddr = + create(loc, ptrTy, addr.getPointer()); + + return Address(baseAddr, ptrTy, addr.getAlignment()); + } + /// Cast the element type of the given address to a different type, /// preserving information like the alignment. cir::Address createElementBitCast(mlir::Location loc, cir::Address addr, diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 0ac5bdc3339d..72b2f971b443 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1194,7 +1194,13 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, // If the static offset is zero and we don't have a virtual step, // just do a bitcast; null checks are unnecessary. if (NonVirtualOffset.isZero() && !VBase) { - llvm_unreachable("NYI"); + if (sanitizePerformTypeCheck()) { + llvm_unreachable("NYI"); + } + return builder.createBaseClassAddr(getLoc(Loc), Value, BaseValueTy); + // return builder.createElementBitCast(Value, BaseValueTy); + // return builder.create(getLoc(Loc), + // BaseValueTy, Value); } // Skip over the offset (and the vtable load) if we're supposed to diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 96dccc94c5aa..54020305ec35 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -622,12 +622,15 @@ Address CIRGenFunction::buildPointerWithAlignment(const Expr *E, assert(0 && "not implemented"); switch (CE->getCastKind()) { - default: + default: { + llvm::errs() << CE->getCastKindName() << "\n"; assert(0 && "not implemented"); + } // Nothing to do here... case CK_LValueToRValue: break; + case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { // TODO: Support accesses to members of base classes in TBAA. For now, we // conservatively pretend that the complete object is of the base class diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index c24543d24f30..fd4d9024bc03 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -718,16 +718,14 @@ class ScalarExprEmitter : public StmtVisitor { if (BOInfo.isFixedPointOp()) { assert(0 && "not implemented"); } else { - // TODO: when we add proper basic types to CIR we - // probably won't need to handle + // FIXME(cir): handle another if above for CIR equivalent on // LHSTy->hasSignedIntegerRepresentation() // Unsigned integers and pointers. - if (LHS.getType().isa() || + if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && + LHS.getType().isa() && RHS.getType().isa()) { - // TODO: Handle StrictVTablePointers and - // mayBeDynamicClass/invariant group. - assert(0 && "not implemented"); + llvm_unreachable("NYI"); } mlir::cir::CmpOpKind Kind; diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 966156559c09..9d4fad6cdc95 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -181,11 +181,7 @@ class LValue { assert((!Alignment.isZero() || Type->isIncompleteType()) && "initializing l-value with zero alignment!"); if (isGlobalReg()) - assert(ElementType == nullptr && "Glboal reg does not store elem type"); - else - assert(V.getType().cast().getPointee() == - ElementType && - "Pointer element type mismatch"); + assert(ElementType == nullptr && "Global reg does not store elem type"); this->Type = Type; this->Quals = Quals; diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp new file mode 100644 index 000000000000..0f5f93b654d6 --- /dev/null +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -0,0 +1,58 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +class C1 { + public: + virtual ~C1(); + C1(int i); + class Layer { + public: + Layer(int d); + virtual ~Layer() {} + }; +}; + +class C2 : public C1 { + public: + C2( + void* p, + int i + ); + + ~C2() override; + + class Layer : public C1::Layer { + public: + Layer(int d, const C2* C1); + virtual ~Layer(); + + protected: + const C2* m_C1; + }; +}; + +class C3 : public C2 { + struct Layer : public C2::Layer { + public: + Layer(int d, const C2* C1); + void Initialize(); + }; +}; + +void C3::Layer::Initialize() { + if (m_C1 == nullptr) { + return; + } +} + +// CHECK: !ty_22class2EC23A3ALayer22 = !cir.struct<"class.C2::Layer", !ty_22class2EC13A3ALayer22, !cir.ptr +// CHECK: !ty_22struct2EC33A3ALayer22 = !cir.struct<"struct.C3::Layer", !ty_22class2EC23A3ALayer22 + +// CHECK: cir.func @_ZN2C35Layer10InitializeEv + +// CHECK: cir.scope { +// CHECK: %2 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "m_C1"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %4 = cir.load %3 : cir.ptr >, !cir.ptr +// CHECK: %5 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool From 4315afe3183d35ecd2bd36b473d166649083c0f7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 15 May 2023 16:03:39 -0700 Subject: [PATCH 0943/2301] [CIR][CIRGen][NFC] Clean up some commented code leftover --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 72b2f971b443..a0175275c58a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1198,9 +1198,6 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, llvm_unreachable("NYI"); } return builder.createBaseClassAddr(getLoc(Loc), Value, BaseValueTy); - // return builder.createElementBitCast(Value, BaseValueTy); - // return builder.create(getLoc(Loc), - // BaseValueTy, Value); } // Skip over the offset (and the vtable load) if we're supposed to From 840d17e1e4102b6848810fa570bf7addad50c276 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 15 May 2023 16:09:12 -0700 Subject: [PATCH 0944/2301] [CIR][CIRGen][NFC] Cleanup buildOpOnBoolExpr and make it more flexible --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 11 +++++++---- clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h | 1 + 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 54020305ec35..d21aa10ca379 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1784,20 +1784,23 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(const Expr *cond, // } if (const UnaryOperator *CondUOp = dyn_cast(cond)) { - llvm_unreachable("unaryoperator ifstmt NYI"); + llvm_unreachable("NYI"); } if (const ConditionalOperator *CondOp = dyn_cast(cond)) { - llvm_unreachable("conditionaloperator ifstmt NYI"); + llvm_unreachable("NYI"); } if (const CXXThrowExpr *Throw = dyn_cast(cond)) { - llvm_unreachable("throw expr ifstmt nyi"); + llvm_unreachable("NYI"); } + // If the branch has a condition wrapped by __builtin_unpredictable, + // create metadata that specifies that the branch is unpredictable. + // Don't bother if not optimizing because that metadata would not be used. auto *Call = dyn_cast(cond->IgnoreImpCasts()); if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { - llvm_unreachable("NYI"); + assert(!UnimplementedFeature::insertBuiltinUnpredictable()); } // Emit the code with the fully general case. diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 81a84a416c69..8a65e6f3c477 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -77,6 +77,7 @@ struct UnimplementedFeature { static bool setNonGC() { return false; } static bool armComputeVolatileBitfields() { return false; } static bool setCommonAttributes() { return false; } + static bool insertBuiltinUnpredictable() { return false; } }; } // namespace cir From 73bf1d3f730f141614ce4f7ee41d114bba3ce2db Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 15 May 2023 16:33:07 -0700 Subject: [PATCH 0945/2301] [CIR][CIRGen][NFC] More skeleton for unimplemented lvalue pieces --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 39 +++++++++++++++++-- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index d21aa10ca379..6a3cae433e8d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -73,6 +73,25 @@ static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, return addr; } +static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { + const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); + if (!RD) + return false; + + if (RD->isDynamicClass()) + return true; + + for (const auto &Base : RD->bases()) + if (hasAnyVptr(Base.getType(), Context)) + return true; + + for (const FieldDecl *Field : RD->fields()) + if (hasAnyVptr(Field->getType(), Context)) + return true; + + return false; +} + LValue CIRGenFunction::buildLValueForField(LValue base, const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); @@ -89,10 +108,9 @@ LValue CIRGenFunction::buildLValueForField(LValue base, LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); if (UnimplementedFeature::tbaa() || rec->hasAttr() || FieldType->isVectorType()) { - // TODO(CIR): TBAAAccessInfo FieldTBAAInfo - llvm_unreachable("NYI"); + assert(!UnimplementedFeature::tbaa() && "NYI"); } else if (rec->isUnion()) { - llvm_unreachable("NYI"); + assert(!UnimplementedFeature::tbaa() && "NYI"); } else { // If no base type been assigned for the base access, then try to generate // one for this base lvalue. @@ -109,7 +127,20 @@ LValue CIRGenFunction::buildLValueForField(LValue base, unsigned RecordCVR = base.getVRQualifiers(); if (rec->isUnion()) { - llvm_unreachable("NYI"); + // For unions, there is no pointer adjustment. + if (CGM.getCodeGenOpts().StrictVTablePointers && + hasAnyVptr(FieldType, getContext())) + // Because unions can easily skip invariant.barriers, we need to add + // a barrier every time CXXRecord field with vptr is referenced. + assert(!UnimplementedFeature::createLaunderInvariantGroup()); + + if (IsInPreservedAIRegion || + (getDebugInfo() && rec->hasAttr())) { + assert(!UnimplementedFeature::generateDebugInfo()); + } + + if (FieldType->isReferenceType()) + llvm_unreachable("NYI"); } else { if (!IsInPreservedAIRegion && (!getDebugInfo() || !rec->hasAttr())) { diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 8a65e6f3c477..804bf38a8a20 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -78,6 +78,7 @@ struct UnimplementedFeature { static bool armComputeVolatileBitfields() { return false; } static bool setCommonAttributes() { return false; } static bool insertBuiltinUnpredictable() { return false; } + static bool createLaunderInvariantGroup() { return false; } }; } // namespace cir From 17c27143fc9c9fa133f7e03c6942573a1d9606c5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 16 May 2023 12:01:37 -0700 Subject: [PATCH 0946/2301] [CIR][CIRGen] Structs: add partial initialization using constant aggregates --- clang/lib/CIR/CodeGen/CIRDataLayout.h | 26 +++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 21 ++-- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 33 +++++- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 101 ++++++++++++++---- .../CodeGen/UnimplementedFeatureGuarding.h | 7 ++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 +- clang/test/CIR/CodeGen/agg-init.cpp | 25 +++++ 7 files changed, 186 insertions(+), 33 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRDataLayout.h b/clang/lib/CIR/CodeGen/CIRDataLayout.h index fade07da6d93..b4c1c83995b6 100644 --- a/clang/lib/CIR/CodeGen/CIRDataLayout.h +++ b/clang/lib/CIR/CodeGen/CIRDataLayout.h @@ -34,6 +34,32 @@ class CIRDataLayout { unsigned getABITypeAlign(mlir::Type ty) const { return getAlignment(ty, true); } + + /// Returns the maximum number of bytes that may be overwritten by + /// storing the specified type. + /// + /// If Ty is a scalable vector type, the scalable property will be set and + /// the runtime size will be a positive integer multiple of the base size. + /// + /// For example, returns 5 for i36 and 10 for x86_fp80. + unsigned getTypeStoreSize(mlir::Type Ty) const { + // FIXME: this is a bit inaccurate, see DataLayout::getTypeStoreSize for + // more information. + return llvm::divideCeil(layout.getTypeSizeInBits(Ty), 8); + } + + /// Returns the offset in bytes between successive objects of the + /// specified type, including alignment padding. + /// + /// If Ty is a scalable vector type, the scalable property will be set and + /// the runtime size will be a positive integer multiple of the base size. + /// + /// This is the amount that alloca reserves for this type. For example, + /// returns 12 or 16 for x86_fp80, depending on alignment. + unsigned getTypeAllocSize(mlir::Type Ty) const { + // Round up to the next alignment boundary. + return llvm::alignTo(getTypeStoreSize(Ty), layout.getTypeABIAlignment(Ty)); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index ef48543e7714..2c2e70720603 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -92,6 +92,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return mlir::cir::ZeroAttr::get(getContext(), t); } + mlir::TypedAttr getNullPtrAttr(mlir::Type t) { + assert(t.isa() && "expected cir.ptr"); + return mlir::cir::NullAttr::get(getContext(), t); + } + mlir::cir::ConstArrayAttr getString(llvm::StringRef str, mlir::Type eltTy, unsigned size = 0) { unsigned finalSize = size ? size : str.size(); @@ -105,7 +110,8 @@ class CIRGenBuilderTy : public mlir::OpBuilder { } mlir::cir::ConstStructAttr getAnonConstStruct(mlir::ArrayAttr arrayAttr, - bool packed = false) { + bool packed = false, + mlir::Type ty = {}) { assert(!packed && "NYI"); llvm::SmallVector members; for (auto &f : arrayAttr) { @@ -113,8 +119,13 @@ class CIRGenBuilderTy : public mlir::OpBuilder { assert(ta && "expected typed attribute member"); members.push_back(ta.getType()); } - auto sTy = mlir::cir::StructType::get(arrayAttr.getContext(), members, "", - /*body=*/true); + auto *ctx = arrayAttr.getContext(); + if (!ty) + ty = mlir::cir::StructType::get(ctx, members, mlir::StringAttr::get(ctx), + /*body=*/true, packed, + /*ast=*/std::nullopt); + auto sTy = ty.dyn_cast(); + assert(sTy && "expected struct type"); return mlir::cir::ConstStructAttr::get(sTy, arrayAttr); } @@ -175,9 +186,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Creates constant nullptr for pointer type ty. mlir::cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { - assert(ty.isa() && "expected cir.ptr"); - return create( - loc, ty, mlir::cir::NullAttr::get(getContext(), ty)); + return create(loc, ty, getNullPtrAttr(ty)); } // Creates constant null value for integral type ty. diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index cdcc1efea166..c7737156cd6f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRDataLayout.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "EHScopeStack.h" @@ -146,6 +147,32 @@ bool CIRGenFunction::isTrivialInitializer(const Expr *Init) { return false; } + +static void emitStoresForConstant(CIRGenModule &CGM, const VarDecl &D, + Address addr, bool isVolatile, + CIRGenBuilderTy &builder, + mlir::TypedAttr constant, bool IsAutoInit) { + auto Ty = constant.getType(); + cir::CIRDataLayout layout{CGM.getModule()}; + uint64_t ConstantSize = layout.getTypeAllocSize(Ty); + if (!ConstantSize) + return; + assert(!UnimplementedFeature::addAutoInitAnnotation()); + assert(!UnimplementedFeature::cirVectorType()); + assert(!UnimplementedFeature::shouldUseBZeroPlusStoresToInitialize()); + assert(!UnimplementedFeature::shouldUseMemSetToInitialize()); + assert(!UnimplementedFeature::shouldSplitConstantStore()); + assert(!UnimplementedFeature::shouldCreateMemCpyFromGlobal()); + // In CIR we want to emit a store for the whole thing, later lowering + // prepare to LLVM should unwrap this into the best policy (see asserts + // above). + // + // FIXME(cir): This is closer to memcpy behavior but less optimal, instead of + // copy from a global, we just create a cir.const out of it. + auto loc = CGM.getLoc(D.getSourceRange()); + builder.createStore(loc, builder.getConstant(loc, constant), addr); +} + void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { assert(emission.Variable && "emission was not valid!"); @@ -228,7 +255,11 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { if (!emission.IsConstantAggregate) llvm_unreachable("NYI"); - llvm_unreachable("NYI"); + // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. + auto typedConstant = constant.dyn_cast(); + assert(typedConstant && "expected typed attribute"); + emitStoresForConstant(CGM, D, Loc, type.isVolatileQualified(), builder, + typedConstant, /*IsAutoInit=*/false); } void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 908e730d6789..e8553e27357b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "Address.h" +#include "CIRDataLayout.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" @@ -46,23 +47,22 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, struct ConstantAggregateBuilderUtils { CIRGenModule &CGM; + CIRDataLayout dataLayout; - ConstantAggregateBuilderUtils(CIRGenModule &CGM) : CGM(CGM) {} + ConstantAggregateBuilderUtils(CIRGenModule &CGM) + : CGM(CGM), dataLayout{CGM.getModule()} {} - CharUnits getAlignment(const mlir::Attribute C) const { - llvm_unreachable("NYI"); - // return CharUnits::fromQuantity( - // CGM.getDataLayout().getABITypeAlignment(C->getType())); + CharUnits getAlignment(const mlir::TypedAttr C) const { + return CharUnits::fromQuantity( + dataLayout.getAlignment(C.getType(), /*useABI=*/true)); } CharUnits getSize(mlir::Type Ty) const { - llvm_unreachable("NYI"); - // return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(Ty)); + return CharUnits::fromQuantity(dataLayout.getTypeAllocSize(Ty)); } - CharUnits getSize(const mlir::Attribute C) const { - llvm_unreachable("NYI"); - // return getSize(C.getType()); + CharUnits getSize(const mlir::TypedAttr C) const { + return getSize(C.getType()); } mlir::Attribute getPadding(CharUnits PadSize) const { @@ -74,7 +74,7 @@ struct ConstantAggregateBuilderUtils { } }; -/// Incremental builder for an llvm::Constant* holding a struct or array +/// Incremental builder for an mlir::TypedAttr holding a struct or array /// constant. class ConstantAggregateBuilder : private ConstantAggregateBuilderUtils { /// The elements of the constant. These two arrays must have the same size; @@ -141,8 +141,11 @@ static void replace(Container &C, size_t BeginOff, size_t EndOff, Range Vals) { llvm::replace(C, C.begin() + BeginOff, C.begin() + EndOff, Vals); } -bool ConstantAggregateBuilder::add(mlir::Attribute C, CharUnits Offset, +bool ConstantAggregateBuilder::add(mlir::Attribute A, CharUnits Offset, bool AllowOverwrite) { + // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. + mlir::TypedAttr C = A.dyn_cast(); + assert(C && "expected typed attribute"); // Common case: appending to a layout. if (Offset >= Size) { CharUnits Align = getAlignment(C); @@ -203,9 +206,11 @@ std::optional ConstantAggregateBuilder::splitAt(CharUnits Pos) { return LastAtOrBeforePosIndex; // We found an element starting before Pos. Check for overlap. - if (Offsets[LastAtOrBeforePosIndex] + - getSize(Elems[LastAtOrBeforePosIndex]) <= - Pos) + // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. + mlir::TypedAttr C = + Elems[LastAtOrBeforePosIndex].dyn_cast(); + assert(C && "expected typed attribute"); + if (Offsets[LastAtOrBeforePosIndex] + getSize(C) <= Pos) return LastAtOrBeforePosIndex + 1; // Try to decompose it into smaller constants. @@ -230,7 +235,53 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( if (Elems.empty()) return {}; - llvm_unreachable("NYI"); + // If we want an array type, see if all the elements are the same type and + // appropriately spaced. + if (auto aty = DesiredTy.dyn_cast()) { + llvm_unreachable("NYI"); + } + + // The size of the constant we plan to generate. This is usually just the size + // of the initialized type, but in AllowOversized mode (i.e. flexible array + // init), it can be larger. + CharUnits DesiredSize = Utils.getSize(DesiredTy); + if (Size > DesiredSize) { + assert(AllowOversized && "Elems are oversized"); + DesiredSize = Size; + } + + // The natural alignment of an unpacked CIR struct with the given elements. + CharUnits Align = CharUnits::One(); + for (auto e : Elems) { + // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. + auto C = e.dyn_cast(); + assert(C && "expected typed attribute"); + Align = std::max(Align, Utils.getAlignment(C)); + } + + // The natural size of an unpacked LLVM struct with the given elements. + CharUnits AlignedSize = Size.alignTo(Align); + + bool Packed = false; + ArrayRef UnpackedElems = Elems; + llvm::SmallVector UnpackedElemStorage; + if (DesiredSize < AlignedSize || DesiredSize.alignTo(Align) != DesiredSize) { + llvm_unreachable("NYI"); + } + + // If we don't have a natural layout, insert padding as necessary. + // As we go, double-check to see if we can actually just emit Elems + // as a non-packed struct and do so opportunistically if possible. + llvm::SmallVector PackedElems; + if (!NaturalLayout) { + llvm_unreachable("NYI"); + } + + auto &builder = CGM.getBuilder(); + return builder.getAnonConstStruct( + mlir::ArrayAttr::get(builder.getContext(), + Packed ? PackedElems : UnpackedElems), + Packed, DesiredTy); } void ConstantAggregateBuilder::condense(CharUnits Offset, @@ -251,8 +302,10 @@ void ConstantAggregateBuilder::condense(CharUnits Offset, if (Length == 0) return; - if (Length == 1 && Offsets[First] == Offset && - getSize(Elems[First]) == Size) { + // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. + mlir::TypedAttr C = Elems[First].dyn_cast(); + assert(C && "expected typed attribute"); + if (Length == 1 && Offsets[First] == Offset && getSize(C) == Size) { // Re-wrap single element structs if necessary. Otherwise, leave any single // element constant of the right size alone even if it has the wrong type. llvm_unreachable("NYI"); @@ -1032,7 +1085,7 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { // If there's no base at all, this is a null or absolute pointer, // possibly cast back to an integer type. if (!base) { - assert(0 && "NYI"); + return tryEmitAbsolute(destTy); } // Otherwise, try to emit the base. @@ -1062,8 +1115,14 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { /// Try to emit an absolute l-value, such as a null pointer or an integer /// bitcast to pointer type. mlir::Attribute ConstantLValueEmitter::tryEmitAbsolute(mlir::Type destTy) { - assert(0 && "NYI"); - return {}; + // If we're producing a pointer, this is easy. + auto destPtrTy = destTy.dyn_cast(); + assert(destPtrTy && "expected !cir.ptr type"); + if (Value.isNullPointer()) { + // FIXME: integer offsets from non-zero null pointers. + return CGM.getBuilder().getNullPtrAttr(destPtrTy); + } + llvm_unreachable("NYI"); } ConstantLValue diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 804bf38a8a20..a1c3e2ab11ed 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -62,6 +62,12 @@ struct UnimplementedFeature { // Coroutines static bool unhandledException() { return false; } + // Clang early struct optimizations + static bool shouldUseBZeroPlusStoresToInitialize() { return false; } + static bool shouldUseMemSetToInitialize() { return false; } + static bool shouldSplitConstantStore() { return false; } + static bool shouldCreateMemCpyFromGlobal() { return false; } + static bool capturedByInit() { return false; } static bool tryEmitAsConstant() { return false; } static bool incrementProfileCounter() { return false; } @@ -79,6 +85,7 @@ struct UnimplementedFeature { static bool setCommonAttributes() { return false; } static bool insertBuiltinUnpredictable() { return false; } static bool createLaunderInvariantGroup() { return false; } + static bool addAutoInitAnnotation() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index c0c5d2118d23..34cce70c9b19 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -168,11 +168,6 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return success(); } - if (attrType.isa()) { - // ConstArrayAttr is already verified to bing with cir.array type. - return success(); - } - if (attrType.isa()) { if (opType.isa<::mlir::cir::PointerType>()) return success(); @@ -181,6 +176,7 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, if (attrType.isa() || attrType.isa() || + attrType.isa() || attrType.isa() || attrType.isa()) return success(); diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index 7ae0ce221719..50aa4de9c756 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -43,3 +43,28 @@ void use() { yop{}; } // CHECK: cir.store %4, %3 : i32, cir.ptr // CHECK: cir.return // CHECK: } + +typedef unsigned long long Flags; + +typedef enum XType { + A = 0, + Y = 1000066001, + X = 1000070000 +} XType; + +typedef struct Yo { + XType type; + const void* __attribute__((__may_alias__)) next; + Flags createFlags; +} Yo; + +void yo() { + Yo ext = {X}; +} + +// CHECK: cir.func @_Z2yov() { +// CHECK: %0 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext"] {alignment = 8 : i64} +// CHECK: %1 = cir.const(#cir.const_struct<{1000070000 : i32,#cir.null : !cir.ptr,0 : i64}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 +// CHECK: cir.store %1, %0 : !ty_22struct2EYo22, cir.ptr +// CHECK: cir.return +// CHECK: } From 276adaa9d4bcdbd7e99e086a1992ab2daaf8c5f6 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 15 May 2023 11:09:30 -0300 Subject: [PATCH 0947/2301] [CIR][BugFix] Allow implicit value type in const_array parser Fixes parsing for const_arrays with values (ArrayAttribute) with omitted types. In this case, it infers the value's type from the const_array type. --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 24 ++++++++++++------------ clang/test/CIR/Lowering/globals.cir | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 34cce70c9b19..b215ddd69d95 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1816,18 +1816,18 @@ ::mlir::Attribute ConstArrayAttr::parse(::mlir::AsmParser &parser, // ArrayAttrrs have per-element type, not the type of the array... if (resultVal->dyn_cast()) { - // Parse literal ':' - if (parser.parseColon()) - return {}; - - // Parse variable 'type' - resultTy = ::mlir::FieldParser<::mlir::Type>::parse(parser); - if (failed(resultTy)) { - parser.emitError( - parser.getCurrentLocation(), - "failed to parse ConstArrayAttr parameter 'type' which is " - "to be a `::mlir::Type`"); - return {}; + // Array has implicit type: infer from const array type. + if (parser.parseOptionalColon().failed()) { + resultTy = type; + } else { // Array has explicit type: parse it. + resultTy = ::mlir::FieldParser<::mlir::Type>::parse(parser); + if (failed(resultTy)) { + parser.emitError( + parser.getCurrentLocation(), + "failed to parse ConstArrayAttr parameter 'type' which is " + "to be a `::mlir::Type`"); + return {}; + } } } else { assert(resultVal->isa() && "IDK"); diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index c0c5b45d295f..b7881bbc254e 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -6,7 +6,7 @@ module { cir.global external @y = 3.400000e+00 : f32 cir.global external @w = 4.300000e+00 : f64 cir.global external @x = 51 : i8 - cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> : !cir.array + cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array // Implicit array type cir.global external @alpha = #cir.const_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8] : !cir.array> : !cir.array cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global external @s = @".str": !cir.ptr From 7576d8a2f94f2358cd90eeba93d501a7eee1a208 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 May 2023 11:51:48 -0700 Subject: [PATCH 0948/2301] [CIR][CIRGen] Aggregates: more partial initialization on face of casts --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 106 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 42 ++++++- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/agg-init.cpp | 19 +++- 4 files changed, 154 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 4dcd43d7780f..d7746a676bc8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -175,16 +175,114 @@ class AggExprEmitter : public StmtVisitor { // Visitor Methods //===----------------------------------------------------------------------===// +/// Determine whether the given cast kind is known to always convert values +/// with all zero bits in their value representation to values with all zero +/// bits in their value representation. +/// TODO(cir): this can be shared with LLVM codegen. +static bool castPreservesZero(const CastExpr *CE) { + switch (CE->getCastKind()) { + case CK_HLSLVectorTruncation: + case CK_HLSLArrayRValue: + llvm_unreachable("NYI"); + // No-ops. + case CK_NoOp: + case CK_UserDefinedConversion: + case CK_ConstructorConversion: + case CK_BitCast: + case CK_ToUnion: + case CK_ToVoid: + // Conversions between (possibly-complex) integral, (possibly-complex) + // floating-point, and bool. + case CK_BooleanToSignedIntegral: + case CK_FloatingCast: + case CK_FloatingComplexCast: + case CK_FloatingComplexToBoolean: + case CK_FloatingComplexToIntegralComplex: + case CK_FloatingComplexToReal: + case CK_FloatingRealToComplex: + case CK_FloatingToBoolean: + case CK_FloatingToIntegral: + case CK_IntegralCast: + case CK_IntegralComplexCast: + case CK_IntegralComplexToBoolean: + case CK_IntegralComplexToFloatingComplex: + case CK_IntegralComplexToReal: + case CK_IntegralRealToComplex: + case CK_IntegralToBoolean: + case CK_IntegralToFloating: + // Reinterpreting integers as pointers and vice versa. + case CK_IntegralToPointer: + case CK_PointerToIntegral: + // Language extensions. + case CK_VectorSplat: + case CK_MatrixCast: + case CK_NonAtomicToAtomic: + case CK_AtomicToNonAtomic: + return true; + + case CK_BaseToDerivedMemberPointer: + case CK_DerivedToBaseMemberPointer: + case CK_MemberPointerToBoolean: + case CK_NullToMemberPointer: + case CK_ReinterpretMemberPointer: + // FIXME: ABI-dependent. + return false; + + case CK_AnyPointerToBlockPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_CPointerToObjCPointerCast: + case CK_ObjCObjectLValueCast: + case CK_IntToOCLSampler: + case CK_ZeroToOCLOpaqueType: + // FIXME: Check these. + return false; + + case CK_FixedPointCast: + case CK_FixedPointToBoolean: + case CK_FixedPointToFloating: + case CK_FixedPointToIntegral: + case CK_FloatingToFixedPoint: + case CK_IntegralToFixedPoint: + // FIXME: Do all fixed-point types represent zero as all 0 bits? + return false; + + case CK_AddressSpaceConversion: + case CK_BaseToDerived: + case CK_DerivedToBase: + case CK_Dynamic: + case CK_NullToPointer: + case CK_PointerToBoolean: + // FIXME: Preserves zeroes only if zero pointers and null pointers have the + // same representation in all involved address spaces. + return false; + + case CK_ARCConsumeObject: + case CK_ARCExtendBlockObject: + case CK_ARCProduceObject: + case CK_ARCReclaimReturnedObject: + case CK_CopyAndAutoreleaseBlockObject: + case CK_ArrayToPointerDecay: + case CK_FunctionToPointerDecay: + case CK_BuiltinFnToFnPtr: + case CK_Dependent: + case CK_LValueBitCast: + case CK_LValueToRValue: + case CK_LValueToRValueBitCast: + case CK_UncheckedDerivedToBase: + return false; + } + llvm_unreachable("Unhandled clang::CastKind enum"); +} + /// If emitting this value will obviously just cause a store of /// zero to memory, return true. This can return false if uncertain, so it just /// handles simple cases. static bool isSimpleZero(const Expr *E, CIRGenFunction &CGF) { E = E->IgnoreParens(); while (auto *CE = dyn_cast(E)) { - llvm_unreachable("NYI"); - // if (!castPreservesZero(CE)) - // break; - // E = CE->getSubExpr()->IgnoreParens(); + if (!castPreservesZero(CE)) + break; + E = CE->getSubExpr()->IgnoreParens(); } // 0 diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index fd4d9024bc03..1780cf3f5610 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -948,13 +948,45 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_LValueToRValueBitCast: llvm_unreachable("NYI"); case CK_CPointerToObjCPointerCast: - llvm_unreachable("NYI"); case CK_BlockPointerToObjCPointerCast: - llvm_unreachable("NYI"); case CK_AnyPointerToBlockPointerCast: - llvm_unreachable("NYI"); - case CK_BitCast: - llvm_unreachable("NYI"); + case CK_BitCast: { + auto Src = Visit(const_cast(E)); + mlir::Type DstTy = CGF.convertType(DestTy); + + assert(!UnimplementedFeature::cirVectorType()); + assert(!UnimplementedFeature::addressSpace()); + if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { + llvm_unreachable("NYI"); + } + + if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { + llvm_unreachable("NYI"); + } + + // Update heapallocsite metadata when there is an explicit pointer cast. + assert(!UnimplementedFeature::addHeapAllocSiteMetadata()); + + // If Src is a fixed vector and Dst is a scalable vector, and both have the + // same element type, use the llvm.vector.insert intrinsic to perform the + // bitcast. + assert(!UnimplementedFeature::cirVectorType()); + + // If Src is a scalable vector and Dst is a fixed vector, and both have the + // same element type, use the llvm.vector.extract intrinsic to perform the + // bitcast. + assert(!UnimplementedFeature::cirVectorType()); + + // Perform VLAT <-> VLST bitcast through memory. + // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics + // require the element types of the vectors to be the same, we + // need to keep this around for bitcasts between VLAT <-> VLST where + // the element types of the vectors are not the same, until we figure + // out a better way of doing these casts. + assert(!UnimplementedFeature::cirVectorType()); + return CGF.getBuilder().createBitcast(CGF.getLoc(E->getSourceRange()), Src, + DstTy); + } case CK_AddressSpaceConversion: llvm_unreachable("NYI"); case CK_AtomicToNonAtomic: diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index a1c3e2ab11ed..4a630267a17b 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -86,6 +86,7 @@ struct UnimplementedFeature { static bool insertBuiltinUnpredictable() { return false; } static bool createLaunderInvariantGroup() { return false; } static bool addAutoInitAnnotation() { return false; } + static bool addHeapAllocSiteMetadata() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index 50aa4de9c756..06877e05cfd5 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -60,11 +60,20 @@ typedef struct Yo { void yo() { Yo ext = {X}; + Yo ext2 = {Y, &ext}; } // CHECK: cir.func @_Z2yov() { -// CHECK: %0 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext"] {alignment = 8 : i64} -// CHECK: %1 = cir.const(#cir.const_struct<{1000070000 : i32,#cir.null : !cir.ptr,0 : i64}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 -// CHECK: cir.store %1, %0 : !ty_22struct2EYo22, cir.ptr -// CHECK: cir.return -// CHECK: } +// CHECK: %0 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext"] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} +// CHECK: %2 = cir.const(#cir.const_struct<{1000070000 : i32,#cir.null : !cir.ptr,0 : i64}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 +// CHECK: cir.store %2, %0 : !ty_22struct2EYo22, cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%1) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %4 = cir.const(1000066001 : i32) : i32 +// CHECK: cir.store %4, %3 : i32, cir.ptr +// CHECK: %5 = "cir.struct_element_addr"(%1) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > +// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_name = "createFlags"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %8 = cir.const(0 : i64) : i64 +// CHECK: cir.store %8, %7 : i64, cir.ptr From 63573246b57985c37d23ceae1a90d6563cc88f30 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 May 2023 14:51:57 -0700 Subject: [PATCH 0949/2301] [CIR][CIRGen] Members: build simple missing DeclRefExpr's --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 ++--- clang/test/CIR/CodeGen/derived-to-base.cpp | 13 +++++++++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 6a3cae433e8d..f9590726c0e3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1389,9 +1389,8 @@ bool CIRGenFunction::isWrappedCXXThis(const Expr *Obj) { LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) { - assert(0 && "enable upon testcase that validates this path"); - // buildIgnoredExpr(E->getBase()); - // return buildDeclRefLValue(DRE); + buildIgnoredExpr(E->getBase()); + return buildDeclRefLValue(DRE); } Expr *BaseExpr = E->getBase(); diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 0f5f93b654d6..c226b96b837a 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -5,6 +5,17 @@ class C1 { public: virtual ~C1(); C1(int i); + + struct IE { + bool supported = false; + unsigned version = 0; + }; + + struct IEs { + IE chain; + }; + + static IEs availableIEs; class Layer { public: Layer(int d); @@ -43,6 +54,8 @@ void C3::Layer::Initialize() { if (m_C1 == nullptr) { return; } + if (m_C1->availableIEs.chain.supported) { + } } // CHECK: !ty_22class2EC23A3ALayer22 = !cir.struct<"class.C2::Layer", !ty_22class2EC13A3ALayer22, !cir.ptr From 94291651effeb511a300979cafd67cd6a50e672d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 May 2023 15:21:04 -0700 Subject: [PATCH 0950/2301] [CIR][CIRGen] Function Prototypes: materialize deferred records when necessary --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 5 +++-- clang/test/CIR/CodeGen/struct.cpp | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 06c183938891..75a3aa0ee948 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -272,8 +272,9 @@ mlir::Type CIRGenTypes::ConvertFunctionTypeInternal(QualType QFT) { assert(!SkippedLayout && "Shouldn't have skipped anything yet"); - assert(RecordsBeingLaidOut.empty() && "Deferral NYI"); - assert(DeferredRecords.empty() && "Deferral NYI"); + if (RecordsBeingLaidOut.empty()) + while (!DeferredRecords.empty()) + convertRecordDeclType(DeferredRecords.pop_back_val()); return ResultType; } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 533c097783bf..7d15e9d5c3b8 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -130,3 +130,18 @@ void h() { S s; } // CHECK: cir.call @_ZN1SC1E1A(%0, %3) : (!cir.ptr, !ty_22struct2EA22) -> () // CHECK: cir.return // CHECK: } + +typedef enum enumy { + A = 1 +} enumy; + +typedef enumy (*fnPtr)(int instance, const char* name, void* function); + +struct Entry { + fnPtr procAddr = nullptr; +}; + +void ppp() { Entry x; } + +// CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr +// CHECK: = "cir.struct_element_addr"(%1) <{member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr) -> i32>> From 5647f97ce1297bd013273347366f7534191fae88 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 May 2023 17:06:45 -0700 Subject: [PATCH 0951/2301] [CIR][CIRGen][NFC] CXXForRangeStmt: add skeleton --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 6 +++++- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 13 +++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index ffb0dfb0f5b9..57890e96a2d6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -890,7 +890,8 @@ class CIRGenFunction : public CIRGenTypeCache { // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. - mlir::LogicalResult buildStmt(const clang::Stmt *S, bool useCurrentScope); + mlir::LogicalResult buildStmt(const clang::Stmt *S, bool useCurrentScope, + ArrayRef Attrs = std::nullopt); mlir::LogicalResult buildSimpleStmt(const clang::Stmt *S, bool useCurrentScope); @@ -898,6 +899,9 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildForStmt(const clang::ForStmt &S); mlir::LogicalResult buildWhileStmt(const clang::WhileStmt &S); mlir::LogicalResult buildDoStmt(const clang::DoStmt &S); + mlir::LogicalResult + buildCXXForRangeStmt(const CXXForRangeStmt &S, + ArrayRef Attrs = std::nullopt); mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); mlir::LogicalResult buildCompoundStmt(const clang::CompoundStmt &S); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 192aadca3ecf..032688e44d54 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -56,7 +56,8 @@ mlir::LogicalResult CIRGenFunction::buildCompoundStmt(const CompoundStmt &S) { // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, - bool useCurrentScope) { + bool useCurrentScope, + ArrayRef Attrs) { if (mlir::succeeded(buildSimpleStmt(S, useCurrentScope))) return mlir::success(); @@ -146,6 +147,9 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, case Stmt::CoreturnStmtClass: return buildCoreturnStmt(cast(*S)); + case Stmt::CXXForRangeStmtClass: + return buildCXXForRangeStmt(cast(*S), Attrs); + case Stmt::IndirectGotoStmtClass: case Stmt::ReturnStmtClass: // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. @@ -158,7 +162,6 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, case Stmt::ObjCForCollectionStmtClass: case Stmt::ObjCAutoreleasePoolStmtClass: case Stmt::CXXTryStmtClass: - case Stmt::CXXForRangeStmtClass: case Stmt::SEHTryStmtClass: case Stmt::OMPMetaDirectiveClass: case Stmt::OMPCanonicalLoopClass: @@ -663,6 +666,12 @@ static mlir::LogicalResult buildLoopCondYield(mlir::OpBuilder &builder, return mlir::success(); } +mlir::LogicalResult +CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, + ArrayRef Attrs) { + llvm_unreachable("NYI"); +} + mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { mlir::cir::LoopOp loopOp; From d8594efd370b7fa93ff3ac596ec53f17452eb057 Mon Sep 17 00:00:00 2001 From: Jeremy Kun Date: Tue, 16 May 2023 21:25:24 -0700 Subject: [PATCH 0952/2301] [CIR][CIRGen] Implement unary inc/dec in CIRGenExpr --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 12 ++++++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 7 ++++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 ++ clang/test/CIR/CodeGen/unary.cpp | 38 ++++++++++++++++++++++ 5 files changed, 60 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index f9590726c0e3..f77ad69742c8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -743,7 +743,17 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { } case UO_PreInc: case UO_PreDec: { - assert(0 && "not implemented"); + bool isInc = E->isIncrementOp(); + bool isPre = E->isPrefix(); + LValue LV = buildLValue(E->getSubExpr()); + + if (E->getType()->isAnyComplexType()) { + assert(0 && "not implemented"); + } else { + buildScalarPrePostIncDec(E, LV, isInc, isPre); + } + + return LV; } } } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 1780cf3f5610..bc7caef64329 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1644,3 +1644,10 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( patchVoidOrThrowSites(); }); } + +mlir::Value CIRGenFunction::buildScalarPrePostIncDec(const UnaryOperator *E, + LValue LV, bool isInc, + bool isPre) { + return ScalarExprEmitter(*this, builder) + .buildScalarPrePostIncDec(E, LV, isInc, isPre); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index b0f96f901de7..1917dd6ad853 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1248,4 +1248,4 @@ bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *CE) { void CIRGenFunction::buildDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init) { assert(!UnimplementedFeature::generateDebugInfo()); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 57890e96a2d6..127787d26c92 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -752,6 +752,9 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value createLoad(const clang::VarDecl *VD, const char *Name); + mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre); + // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. struct PrototypeWrapper { diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index 2cbb06b408eb..26eef43d4820 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -36,3 +36,41 @@ unsigned un0() { // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#OUTPUT:]] = cir.unary(not, %[[#INPUT]]) // CHECK: cir.store %[[#OUTPUT]], %[[#RET]] + +unsigned inc0() { + unsigned a = 1; + ++a; + return a; +} + +// CHECK: cir.func @_Z4inc0v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(1 : i32) : i32 +// CHECK: cir.store %[[#ATMP]], %[[#A]] : i32 +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#INPUT]]) +// CHECK: cir.store %[[#INCREMENTED]], %[[#A]] +// CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] +// CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] +// CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] +// CHECK: cir.return %[[#OUTPUT]] : i32 + +unsigned dec0() { + unsigned a = 1; + --a; + return a; +} + +// CHECK: cir.func @_Z4dec0v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(1 : i32) : i32 +// CHECK: cir.store %[[#ATMP]], %[[#A]] : i32 +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#INCREMENTED:]] = cir.unary(dec, %[[#INPUT]]) +// CHECK: cir.store %[[#INCREMENTED]], %[[#A]] +// CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] +// CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] +// CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] +// CHECK: cir.return %[[#OUTPUT]] : i32 From 40646512bdbca32c98c1231edde5afa32149fd61 Mon Sep 17 00:00:00 2001 From: Jeremy Kun Date: Wed, 17 May 2023 23:00:36 -0700 Subject: [PATCH 0953/2301] [CIR][CIRGen] Add more tests for post-inc/dec --- clang/test/CIR/CodeGen/unary.cpp | 62 ++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index 26eef43d4820..405d40fd5298 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -74,3 +74,65 @@ unsigned dec0() { // CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] // CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] // CHECK: cir.return %[[#OUTPUT]] : i32 + + +unsigned inc1() { + unsigned a = 1; + a++; + return a; +} + +// CHECK: cir.func @_Z4inc1v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(1 : i32) : i32 +// CHECK: cir.store %[[#ATMP]], %[[#A]] : i32 +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#INPUT]]) +// CHECK: cir.store %[[#INCREMENTED]], %[[#A]] +// CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] +// CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] +// CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] +// CHECK: cir.return %[[#OUTPUT]] : i32 + +unsigned dec1() { + unsigned a = 1; + a--; + return a; +} + +// CHECK: cir.func @_Z4dec1v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(1 : i32) : i32 +// CHECK: cir.store %[[#ATMP]], %[[#A]] : i32 +// CHECK: %[[#INPUT:]] = cir.load %[[#A]] +// CHECK: %[[#INCREMENTED:]] = cir.unary(dec, %[[#INPUT]]) +// CHECK: cir.store %[[#INCREMENTED]], %[[#A]] +// CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] +// CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] +// CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] +// CHECK: cir.return %[[#OUTPUT]] : i32 + +// Ensure the increment is performed after the assignment to b. +unsigned inc2() { + unsigned a = 1; + unsigned b = a++; + return b; +} + +// CHECK: cir.func @_Z4inc2v() -> i32 { +// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: %[[#B:]] = cir.alloca i32, cir.ptr , ["b", init] +// CHECK: %[[#ATMP:]] = cir.const(1 : i32) : i32 +// CHECK: cir.store %[[#ATMP]], %[[#A]] : i32 +// CHECK: %[[#ATOB:]] = cir.load %[[#A]] +// CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#ATOB]]) +// CHECK: cir.store %[[#INCREMENTED]], %[[#A]] +// CHECK: cir.store %[[#ATOB]], %[[#B]] +// CHECK: %[[#B_TO_OUTPUT:]] = cir.load %[[#B]] +// CHECK: cir.store %[[#B_TO_OUTPUT]], %[[#RET]] +// CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] +// CHECK: cir.return %[[#OUTPUT]] : i32 + From c1556e9d234d3736b5825dd0e3c78e625541010b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 May 2023 18:36:47 -0700 Subject: [PATCH 0954/2301] [CIR][CIRGen] PreInc/Dec: support pointers --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 28 +++++++++++++++++--- clang/test/CIR/CodeGen/unary.cpp | 17 +++++++++++- 3 files changed, 43 insertions(+), 6 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a1c1aa2614e2..78bea7e683d7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -102,8 +102,8 @@ def PtrStrideOp : CIR_Op<"ptr_stride", [Pure, SameFirstOperandAndResultType]> { let summary = "Pointer access with stride"; let description = [{ - Given a base pointer as operand, provides a new pointer after applying - a stride. Currently only used for array subscripts. + Given a base pointer as first operand, provides a new pointer after applying + a stride (second operand). ```mlir %3 = cir.const(0 : i32) : i32 diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index bc7caef64329..c8028e42d149 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -258,9 +258,10 @@ class ScalarExprEmitter : public StmtVisitor { assert(!CGF.getLangOpts().OpenMP && "Not implemented"); QualType type = E->getSubExpr()->getType(); + int amount = (isInc ? 1 : -1); bool atomicPHI = false; - mlir::Value value; - mlir::Value input; + mlir::Value value{}; + mlir::Value input{}; if (const AtomicType *atomicTy = type->getAs()) { llvm_unreachable("no atomics inc/dec yet"); @@ -310,8 +311,29 @@ class ScalarExprEmitter : public StmtVisitor { // NOTE(CIR): clang calls CreateAdd but folds this to a unary op value = buildUnaryOp(E, Kind, input); } + // Next most common: pointer increment. } else if (const PointerType *ptr = type->getAs()) { - llvm_unreachable("no pointer inc/dec yet"); + QualType type = ptr->getPointeeType(); + if (const VariableArrayType *vla = + CGF.getContext().getAsVariableArrayType(type)) { + // VLA types don't have constant size. + llvm_unreachable("NYI"); + } else if (type->isFunctionType()) { + // Arithmetic on function pointers (!) is just +-1. + llvm_unreachable("NYI"); + } else { + // For everything else, we can just do a simple increment. + auto loc = CGF.getLoc(E->getSourceRange()); + auto &builder = CGF.getBuilder(); + auto amt = builder.getInt32(amount, loc); + if (CGF.getLangOpts().isSignedOverflowDefined()) { + llvm_unreachable("NYI"); + } else { + value = builder.create(loc, value.getType(), + value, amt); + assert(!UnimplementedFeature::emitCheckedInBoundsGEP()); + } + } } else if (type->isVectorType()) { llvm_unreachable("no vector inc/dec yet"); } else if (type->isRealFloatingType()) { diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index 405d40fd5298..cdc118b55d90 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -75,7 +75,6 @@ unsigned dec0() { // CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] // CHECK: cir.return %[[#OUTPUT]] : i32 - unsigned inc1() { unsigned a = 1; a++; @@ -136,3 +135,19 @@ unsigned inc2() { // CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] // CHECK: cir.return %[[#OUTPUT]] : i32 +int *inc_p(int *i) { + --i; + ++i; + return i; +} + +// CHECK: cir.func @_Z5inc_pPi(%arg0: !cir.ptr + +// CHECK: %[[#i_addr:]] = cir.alloca !cir.ptr, cir.ptr >, ["i", init] {alignment = 8 : i64} +// CHECK: %[[#i_dec:]] = cir.load %[[#i_addr]] : cir.ptr >, !cir.ptr +// CHECK: %[[#dec_const:]] = cir.const(-1 : i32) : i32 +// CHECK: = cir.ptr_stride(%[[#i_dec]] : !cir.ptr, %[[#dec_const]] : i32), !cir.ptr + +// CHECK: %[[#i_inc:]] = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %[[#inc_const:]] = cir.const(1 : i32) : i32 +// CHECK: = cir.ptr_stride(%[[#i_inc]] : !cir.ptr, %[[#inc_const]] : i32), !cir.ptr \ No newline at end of file From a5275abbf93f096378e504e02807d0e60f3cc8f6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 May 2023 17:49:45 -0700 Subject: [PATCH 0955/2301] [CIR][CIRGen] CXXForRangeStmt: implement range-based for loop --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 + clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 114 +- .../CodeGen/UnimplementedFeatureGuarding.h | 4 + clang/test/CIR/CodeGen/rangefor.cpp | 75 + clang/test/CIR/Inputs/std-cxx.h | 1261 +++++++++++++++++ 5 files changed, 1452 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/rangefor.cpp create mode 100644 clang/test/CIR/Inputs/std-cxx.h diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 127787d26c92..9875abf44ff8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -891,6 +891,9 @@ class CIRGenFunction : public CIRGenTypeCache { RValue buildCoroutineIntrinsic(const CallExpr *E, unsigned int IID); RValue buildCoroutineFrame(); + /// Build a debug stoppoint if we are emitting debug info. + void buildStopPoint(const Stmt *S); + // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. mlir::LogicalResult buildStmt(const clang::Stmt *S, bool useCurrentScope, diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 032688e44d54..c6fe563ae283 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -53,6 +53,10 @@ mlir::LogicalResult CIRGenFunction::buildCompoundStmt(const CompoundStmt &S) { return res; } +void CIRGenFunction::buildStopPoint(const Stmt *S) { + assert(!UnimplementedFeature::generateDebugInfo()); +} + // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, @@ -668,8 +672,84 @@ static mlir::LogicalResult buildLoopCondYield(mlir::OpBuilder &builder, mlir::LogicalResult CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, - ArrayRef Attrs) { - llvm_unreachable("NYI"); + ArrayRef ForAttrs) { + mlir::cir::LoopOp loopOp; + + // TODO(cir): pass in array of attributes. + auto forStmtBuilder = [&]() -> mlir::LogicalResult { + auto loopRes = mlir::success(); + // Evaluate the first pieces before the loop. + if (S.getInit()) + if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + if (buildStmt(S.getRangeStmt(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + if (buildStmt(S.getBeginStmt(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + if (buildStmt(S.getEndStmt(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + assert(!UnimplementedFeature::loopInfoStack()); + // From LLVM: if there are any cleanups between here and the loop-exit + // scope, create a block to stage a loop exit along. + // We probably already do the right thing because of ScopeOp, but make + // sure we handle all cases. + assert(!UnimplementedFeature::requiresCleanups()); + + loopOp = builder.create( + getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::For, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + assert(!UnimplementedFeature::createProfileWeightsForLoop()); + assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); + mlir::Value condVal = evaluateExprAsBool(S.getCond()); + if (buildLoopCondYield(b, loc, condVal).failed()) + loopRes = mlir::failure(); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // https://en.cppreference.com/w/cpp/language/for + // In C++ the scope of the init-statement and the scope of + // statement are one and the same. + bool useCurrentScope = true; + if (buildStmt(S.getLoopVarStmt(), useCurrentScope).failed()) + loopRes = mlir::failure(); + if (buildStmt(S.getBody(), useCurrentScope).failed()) + loopRes = mlir::failure(); + buildStopPoint(&S); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + if (S.getInc()) + if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) + loopRes = mlir::failure(); + builder.create(loc); + }); + return loopRes; + }; + + auto res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto scopeLocBegin = fusedLoc.getLocations()[0]; + auto scopeLocEnd = fusedLoc.getLocations()[1]; + // Create a cleanup scope for the condition variable cleanups. + // Logical equivalent from LLVM codegn for + // LexicalScope ConditionScope(*this, S.getSourceRange())... + LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, + builder.getInsertionBlock()}; + LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; + res = forStmtBuilder(); + }); + + if (res.failed()) + return res; + + terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); + return mlir::success(); } mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { @@ -682,12 +762,19 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { if (S.getInit()) if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); + assert(!UnimplementedFeature::loopInfoStack()); + // From LLVM: if there are any cleanups between here and the loop-exit + // scope, create a block to stage a loop exit along. + // We probably already do the right thing because of ScopeOp, but make + // sure we handle all cases. + assert(!UnimplementedFeature::requiresCleanups()); loopOp = builder.create( getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::For, /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - // TODO: branch weigths, likelyhood, profile counter, etc. + assert(!UnimplementedFeature::createProfileWeightsForLoop()); + assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); mlir::Value condVal; if (S.getCond()) { // If the for statement has a condition scope, @@ -716,6 +803,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { CGM.getASTContext().getLangOpts().CPlusPlus ? true : false; if (buildStmt(S.getBody(), useCurrentScope).failed()) loopRes = mlir::failure(); + buildStopPoint(&S); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -754,12 +842,19 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { // TODO: pass in array of attributes. auto doStmtBuilder = [&]() -> mlir::LogicalResult { auto loopRes = mlir::success(); + assert(!UnimplementedFeature::loopInfoStack()); + // From LLVM: if there are any cleanups between here and the loop-exit + // scope, create a block to stage a loop exit along. + // We probably already do the right thing because of ScopeOp, but make + // sure we handle all cases. + assert(!UnimplementedFeature::requiresCleanups()); loopOp = builder.create( getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::DoWhile, /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - // TODO: branch weigths, likelyhood, profile counter, etc. + assert(!UnimplementedFeature::createProfileWeightsForLoop()); + assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); // C99 6.8.5p2/p4: The first substatement is executed if the // expression compares unequal to 0. The condition must be a // scalar type. @@ -771,6 +866,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { [&](mlir::OpBuilder &b, mlir::Location loc) { if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); + buildStopPoint(&S); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -806,12 +902,19 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { // TODO: pass in array of attributes. auto whileStmtBuilder = [&]() -> mlir::LogicalResult { auto loopRes = mlir::success(); + assert(!UnimplementedFeature::loopInfoStack()); + // From LLVM: if there are any cleanups between here and the loop-exit + // scope, create a block to stage a loop exit along. + // We probably already do the right thing because of ScopeOp, but make + // sure we handle all cases. + assert(!UnimplementedFeature::requiresCleanups()); loopOp = builder.create( getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::While, /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - // TODO: branch weigths, likelyhood, profile counter, etc. + assert(!UnimplementedFeature::createProfileWeightsForLoop()); + assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); mlir::Value condVal; // If the for statement has a condition scope, // emit the local variable declaration. @@ -828,6 +931,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { [&](mlir::OpBuilder &b, mlir::Location loc) { if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); + buildStopPoint(&S); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 4a630267a17b..e461b8ad383d 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -71,6 +71,8 @@ struct UnimplementedFeature { static bool capturedByInit() { return false; } static bool tryEmitAsConstant() { return false; } static bool incrementProfileCounter() { return false; } + static bool createProfileWeightsForLoop() { return false; } + static bool emitCondLikelihoodViaExpectIntrinsic() { return false; } static bool requiresReturnValueCheck() { return false; } static bool shouldEmitLifetimeMarkers() { return false; } static bool peepholeProtection() { return false; } @@ -87,6 +89,8 @@ struct UnimplementedFeature { static bool createLaunderInvariantGroup() { return false; } static bool addAutoInitAnnotation() { return false; } static bool addHeapAllocSiteMetadata() { return false; } + static bool loopInfoStack() { return false; } + static bool requiresCleanups() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp new file mode 100644 index 000000000000..d9c0cb7a9e14 --- /dev/null +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -0,0 +1,75 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +typedef enum enumy { + Unknown = 0, + Some = 1000024002, +} enumy; + +typedef struct triple { + enumy type; + void* __attribute__((__may_alias__)) next; + unsigned image; +} triple; + +void init(unsigned numImages) { + std::vector images(numImages); + for (auto& image : images) { + image = {Some}; + } +} + +// CHECK: !ty_22struct2Etriple22 = !cir.struct<"struct.triple", i32, !cir.ptr, i32> +// CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector", !cir.ptr, !cir.ptr, !cir.ptr> +// CHECK: !ty_22struct2E__vector_iterator22 = !cir.struct<"struct.__vector_iterator", !cir.ptr> + +// CHECK: cir.func @_Z4initj(%arg0: i32 +// CHECK: %0 = cir.alloca i32, cir.ptr , ["numImages", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !ty_22class2Estd3A3Avector22, cir.ptr , ["images", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : i32, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , i32 +// CHECK: %3 = cir.cast(integral, %2 : i32), i64 +// CHECK: cir.call @_ZNSt6vectorI6tripleEC1Em(%1, %3) : (!cir.ptr, i64) -> () +// CHECK: cir.scope { +// CHECK: %4 = cir.alloca !cir.ptr, cir.ptr >, ["__range1", init] {alignment = 8 : i64} +// CHECK: %5 = cir.alloca !ty_22struct2E__vector_iterator22, cir.ptr , ["__begin1", init] {alignment = 8 : i64} +// CHECK: %6 = cir.alloca !ty_22struct2E__vector_iterator22, cir.ptr , ["__end1", init] {alignment = 8 : i64} +// CHECK: %7 = cir.alloca !cir.ptr, cir.ptr >, ["image", init] {alignment = 8 : i64} +// CHECK: cir.store %1, %4 : !cir.ptr, cir.ptr > +// CHECK: %8 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %9 = cir.call @_ZNSt6vectorI6tripleE5beginEv(%8) : (!cir.ptr) -> !ty_22struct2E__vector_iterator22 +// CHECK: cir.store %9, %5 : !ty_22struct2E__vector_iterator22, cir.ptr +// CHECK: %10 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %11 = cir.call @_ZNSt6vectorI6tripleE3endEv(%10) : (!cir.ptr) -> !ty_22struct2E__vector_iterator22 +// CHECK: cir.store %11, %6 : !ty_22struct2E__vector_iterator22, cir.ptr +// CHECK: cir.loop for(cond : { +// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool +// CHECK: cir.brcond %12 ^bb1, ^bb2 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: cir.yield continue +// CHECK: ^bb2: // pred: ^bb0 +// CHECK: cir.yield +// CHECK: }, step : { +// CHECK: %12 = cir.call @_ZN17__vector_iteratorI6triplePS0_RS0_EppEv(%5) : (!cir.ptr) -> !cir.ptr +// CHECK: cir.yield +// CHECK: }) { +// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EdeEv(%5) : (!cir.ptr) -> !cir.ptr +// CHECK: cir.store %12, %7 : !cir.ptr, cir.ptr > +// CHECK: cir.scope { +// CHECK: %13 = cir.alloca !ty_22struct2Etriple22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %14 = cir.const(#cir.zero : !ty_22struct2Etriple22) : !ty_22struct2Etriple22 +// CHECK: cir.store %14, %13 : !ty_22struct2Etriple22, cir.ptr +// CHECK: %15 = "cir.struct_element_addr"(%13) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %16 = cir.const(1000024002 : i32) : i32 +// CHECK: cir.store %16, %15 : i32, cir.ptr +// CHECK: %17 = "cir.struct_element_addr"(%13) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %18 = "cir.struct_element_addr"(%13) <{member_name = "image"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %19 = cir.load %7 : cir.ptr >, !cir.ptr +// CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: } +// CHECK: cir.yield +// CHECK: } +// CHECK: } +// CHECK: cir.return diff --git a/clang/test/CIR/Inputs/std-cxx.h b/clang/test/CIR/Inputs/std-cxx.h new file mode 100644 index 000000000000..ca37aa0cd798 --- /dev/null +++ b/clang/test/CIR/Inputs/std-cxx.h @@ -0,0 +1,1261 @@ +// This header provides reduced versions of common standard library containers +// and whatnots. It's a copy from +// clang/test/Analysis/Inputs/system-header-simulator-cxx.h with some additions +// for ClangIR use cases found along the way. + +// Like the compiler, the static analyzer treats some functions differently if +// they come from a system header -- for example, it is assumed that system +// functions do not arbitrarily free() their parameters, and that some bugs +// found in system headers cannot be fixed by the user and should be +// suppressed. +#pragma clang system_header + +typedef unsigned char uint8_t; + +typedef __typeof__(sizeof(int)) size_t; +typedef __typeof__((char*)0-(char*)0) ptrdiff_t; +void *memmove(void *s1, const void *s2, size_t n); + +namespace std { + typedef size_t size_type; +#if __cplusplus >= 201103L + using nullptr_t = decltype(nullptr); +#endif +} + +namespace std { + struct input_iterator_tag { }; + struct output_iterator_tag { }; + struct forward_iterator_tag : public input_iterator_tag { }; + struct bidirectional_iterator_tag : public forward_iterator_tag { }; + struct random_access_iterator_tag : public bidirectional_iterator_tag { }; + + template struct iterator_traits { + typedef typename Iterator::difference_type difference_type; + typedef typename Iterator::value_type value_type; + typedef typename Iterator::pointer pointer; + typedef typename Iterator::reference reference; + typedef typename Iterator::iterator_category iterator_category; + }; +} + +template struct __vector_iterator { + typedef __vector_iterator iterator; + typedef __vector_iterator const_iterator; + + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef Ptr pointer; + typedef Ref reference; + typedef std::random_access_iterator_tag iterator_category; + + __vector_iterator(const Ptr p = 0) : ptr(p) {} + __vector_iterator(const iterator &rhs): ptr(rhs.base()) {} + __vector_iterator& operator++() { ++ ptr; return *this; } + __vector_iterator operator++(int) { + auto tmp = *this; + ++ ptr; + return tmp; + } + __vector_iterator operator--() { -- ptr; return *this; } + __vector_iterator operator--(int) { + auto tmp = *this; -- ptr; + return tmp; + } + __vector_iterator operator+(difference_type n) { + return ptr + n; + } + friend __vector_iterator operator+( + difference_type n, + const __vector_iterator &iter) { + return n + iter.ptr; + } + __vector_iterator operator-(difference_type n) { + return ptr - n; + } + __vector_iterator operator+=(difference_type n) { + return ptr += n; + } + __vector_iterator operator-=(difference_type n) { + return ptr -= n; + } + + template + difference_type operator-(const __vector_iterator &rhs); + + Ref operator*() const { return *ptr; } + Ptr operator->() const { return ptr; } + + Ref operator[](difference_type n) { + return *(ptr+n); + } + + bool operator==(const iterator &rhs) const { return ptr == rhs.ptr; } + bool operator==(const const_iterator &rhs) const { return ptr == rhs.ptr; } + + bool operator!=(const iterator &rhs) const { return ptr != rhs.ptr; } + bool operator!=(const const_iterator &rhs) const { return ptr != rhs.ptr; } + + const Ptr& base() const { return ptr; } + +private: + Ptr ptr; +}; + +template struct __deque_iterator { + typedef __deque_iterator iterator; + typedef __deque_iterator const_iterator; + + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef Ptr pointer; + typedef Ref reference; + typedef std::random_access_iterator_tag iterator_category; + + __deque_iterator(const Ptr p = 0) : ptr(p) {} + __deque_iterator(const iterator &rhs): ptr(rhs.base()) {} + __deque_iterator& operator++() { ++ ptr; return *this; } + __deque_iterator operator++(int) { + auto tmp = *this; + ++ ptr; + return tmp; + } + __deque_iterator operator--() { -- ptr; return *this; } + __deque_iterator operator--(int) { + auto tmp = *this; -- ptr; + return tmp; + } + __deque_iterator operator+(difference_type n) { + return ptr + n; + } + friend __deque_iterator operator+( + difference_type n, + const __deque_iterator &iter) { + return n + iter.ptr; + } + __deque_iterator operator-(difference_type n) { + return ptr - n; + } + __deque_iterator operator+=(difference_type n) { + return ptr += n; + } + __deque_iterator operator-=(difference_type n) { + return ptr -= n; + } + + Ref operator*() const { return *ptr; } + Ptr operator->() const { return ptr; } + + Ref operator[](difference_type n) { + return *(ptr+n); + } + + bool operator==(const iterator &rhs) const { return ptr == rhs.ptr; } + bool operator==(const const_iterator &rhs) const { return ptr == rhs.ptr; } + + bool operator!=(const iterator &rhs) const { return ptr != rhs.ptr; } + bool operator!=(const const_iterator &rhs) const { return ptr != rhs.ptr; } + + const Ptr& base() const { return ptr; } + +private: + Ptr ptr; +}; + +template struct __list_iterator { + typedef __list_iterator iterator; + typedef __list_iterator const_iterator; + + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef Ptr pointer; + typedef Ref reference; + typedef std::bidirectional_iterator_tag iterator_category; + + __list_iterator(T* it = 0) : item(it) {} + __list_iterator(const iterator &rhs): item(rhs.item) {} + __list_iterator& operator++() { item = item->next; return *this; } + __list_iterator operator++(int) { + auto tmp = *this; + item = item->next; + return tmp; + } + __list_iterator operator--() { item = item->prev; return *this; } + __list_iterator operator--(int) { + auto tmp = *this; + item = item->prev; + return tmp; + } + + Ref operator*() const { return item->data; } + Ptr operator->() const { return &item->data; } + + bool operator==(const iterator &rhs) const { return item == rhs->item; } + bool operator==(const const_iterator &rhs) const { return item == rhs->item; } + + bool operator!=(const iterator &rhs) const { return item != rhs->item; } + bool operator!=(const const_iterator &rhs) const { return item != rhs->item; } + + const T* &base() const { return item; } + + template + friend struct __list_iterator; + +private: + T* item; +}; + +template struct __fwdl_iterator { + typedef __fwdl_iterator iterator; + typedef __fwdl_iterator const_iterator; + + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef Ptr pointer; + typedef Ref reference; + typedef std::forward_iterator_tag iterator_category; + + __fwdl_iterator(T* it = 0) : item(it) {} + __fwdl_iterator(const iterator &rhs): item(rhs.item) {} + __fwdl_iterator& operator++() { item = item->next; return *this; } + __fwdl_iterator operator++(int) { + auto tmp = *this; + item = item->next; + return tmp; + } + Ref operator*() const { return item->data; } + Ptr operator->() const { return &item->data; } + + bool operator==(const iterator &rhs) const { return item == rhs->item; } + bool operator==(const const_iterator &rhs) const { return item == rhs->item; } + + bool operator!=(const iterator &rhs) const { return item != rhs->item; } + bool operator!=(const const_iterator &rhs) const { return item != rhs->item; } + + const T* &base() const { return item; } + + template + friend struct __fwdl_iterator; + +private: + T* item; +}; + +namespace std { + template + struct pair { + T1 first; + T2 second; + + pair() : first(), second() {} + pair(const T1 &a, const T2 &b) : first(a), second(b) {} + + template + pair(const pair &other) : first(other.first), + second(other.second) {} + }; + + typedef __typeof__(sizeof(int)) size_t; + + template class initializer_list; + + template< class T > struct remove_reference {typedef T type;}; + template< class T > struct remove_reference {typedef T type;}; + template< class T > struct remove_reference {typedef T type;}; + + template + typename remove_reference::type&& move(T&& a) { + typedef typename remove_reference::type&& RvalRef; + return static_cast(a); + } + + template + void swap(T &a, T &b) { + T c(std::move(a)); + a = std::move(b); + b = std::move(c); + } + + template + class vector { + T *_start; + T *_finish; + T *_end_of_storage; + + public: + typedef T value_type; + typedef size_t size_type; + typedef __vector_iterator iterator; + typedef __vector_iterator const_iterator; + + vector() : _start(0), _finish(0), _end_of_storage(0) {} + template + vector(InputIterator first, InputIterator last); + vector(const vector &other); + vector(vector &&other); + explicit vector(size_type count); + ~vector(); + + size_t size() const { + return size_t(_finish - _start); + } + + vector& operator=(const vector &other); + vector& operator=(vector &&other); + vector& operator=(std::initializer_list ilist); + + void assign(size_type count, const T &value); + template + void assign(InputIterator first, InputIterator last); + void assign(std::initializer_list ilist); + + void clear(); + + void push_back(const T &value); + void push_back(T &&value); + template + void emplace_back(Args&&... args); + void pop_back(); + + iterator insert(const_iterator position, const value_type &val); + iterator insert(const_iterator position, size_type n, + const value_type &val); + template + iterator insert(const_iterator position, InputIterator first, + InputIterator last); + iterator insert(const_iterator position, value_type &&val); + iterator insert(const_iterator position, initializer_list il); + + template + iterator emplace(const_iterator position, Args&&... args); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + + T &operator[](size_t n) { + return _start[n]; + } + + const T &operator[](size_t n) const { + return _start[n]; + } + + iterator begin() { return iterator(_start); } + const_iterator begin() const { return const_iterator(_start); } + const_iterator cbegin() const { return const_iterator(_start); } + iterator end() { return iterator(_finish); } + const_iterator end() const { return const_iterator(_finish); } + const_iterator cend() const { return const_iterator(_finish); } + T& front() { return *begin(); } + const T& front() const { return *begin(); } + T& back() { return *(end() - 1); } + const T& back() const { return *(end() - 1); } + }; + + template + class list { + struct __item { + T data; + __item *prev, *next; + } *_start, *_finish; + + public: + typedef T value_type; + typedef size_t size_type; + typedef __list_iterator<__item, T *, T &> iterator; + typedef __list_iterator<__item, const T *, const T &> const_iterator; + + list() : _start(0), _finish(0) {} + template + list(InputIterator first, InputIterator last); + list(const list &other); + list(list &&other); + ~list(); + + list& operator=(const list &other); + list& operator=(list &&other); + list& operator=(std::initializer_list ilist); + + void assign(size_type count, const T &value); + template + void assign(InputIterator first, InputIterator last); + void assign(std::initializer_list ilist); + + void clear(); + + void push_back(const T &value); + void push_back(T &&value); + template + void emplace_back(Args&&... args); + void pop_back(); + + void push_front(const T &value); + void push_front(T &&value); + template + void emplace_front(Args&&... args); + void pop_front(); + + iterator insert(const_iterator position, const value_type &val); + iterator insert(const_iterator position, size_type n, + const value_type &val); + template + iterator insert(const_iterator position, InputIterator first, + InputIterator last); + iterator insert(const_iterator position, value_type &&val); + iterator insert(const_iterator position, initializer_list il); + + template + iterator emplace(const_iterator position, Args&&... args); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + + iterator begin() { return iterator(_start); } + const_iterator begin() const { return const_iterator(_start); } + const_iterator cbegin() const { return const_iterator(_start); } + iterator end() { return iterator(_finish); } + const_iterator end() const { return const_iterator(_finish); } + const_iterator cend() const { return const_iterator(_finish); } + + T& front() { return *begin(); } + const T& front() const { return *begin(); } + T& back() { return *--end(); } + const T& back() const { return *--end(); } + }; + + template + class deque { + T *_start; + T *_finish; + T *_end_of_storage; + + public: + typedef T value_type; + typedef size_t size_type; + typedef __deque_iterator iterator; + typedef __deque_iterator const_iterator; + + deque() : _start(0), _finish(0), _end_of_storage(0) {} + template + deque(InputIterator first, InputIterator last); + deque(const deque &other); + deque(deque &&other); + ~deque(); + + size_t size() const { + return size_t(_finish - _start); + } + + deque& operator=(const deque &other); + deque& operator=(deque &&other); + deque& operator=(std::initializer_list ilist); + + void assign(size_type count, const T &value); + template + void assign(InputIterator first, InputIterator last); + void assign(std::initializer_list ilist); + + void clear(); + + void push_back(const T &value); + void push_back(T &&value); + template + void emplace_back(Args&&... args); + void pop_back(); + + void push_front(const T &value); + void push_front(T &&value); + template + void emplace_front(Args&&... args); + void pop_front(); + + iterator insert(const_iterator position, const value_type &val); + iterator insert(const_iterator position, size_type n, + const value_type &val); + template + iterator insert(const_iterator position, InputIterator first, + InputIterator last); + iterator insert(const_iterator position, value_type &&val); + iterator insert(const_iterator position, initializer_list il); + + template + iterator emplace(const_iterator position, Args&&... args); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + + T &operator[](size_t n) { + return _start[n]; + } + + const T &operator[](size_t n) const { + return _start[n]; + } + + iterator begin() { return iterator(_start); } + const_iterator begin() const { return const_iterator(_start); } + const_iterator cbegin() const { return const_iterator(_start); } + iterator end() { return iterator(_finish); } + const_iterator end() const { return const_iterator(_finish); } + const_iterator cend() const { return const_iterator(_finish); } + T& front() { return *begin(); } + const T& front() const { return *begin(); } + T& back() { return *(end() - 1); } + const T& back() const { return *(end() - 1); } + }; + + template + class forward_list { + struct __item { + T data; + __item *next; + } *_start; + + public: + typedef T value_type; + typedef size_t size_type; + typedef __fwdl_iterator<__item, T *, T &> iterator; + typedef __fwdl_iterator<__item, const T *, const T &> const_iterator; + + forward_list() : _start(0) {} + template + forward_list(InputIterator first, InputIterator last); + forward_list(const forward_list &other); + forward_list(forward_list &&other); + ~forward_list(); + + forward_list& operator=(const forward_list &other); + forward_list& operator=(forward_list &&other); + forward_list& operator=(std::initializer_list ilist); + + void assign(size_type count, const T &value); + template + void assign(InputIterator first, InputIterator last); + void assign(std::initializer_list ilist); + + void clear(); + + void push_front(const T &value); + void push_front(T &&value); + template + void emplace_front(Args&&... args); + void pop_front(); + + iterator insert_after(const_iterator position, const value_type &val); + iterator insert_after(const_iterator position, value_type &&val); + iterator insert_after(const_iterator position, size_type n, + const value_type &val); + template + iterator insert_after(const_iterator position, InputIterator first, + InputIterator last); + iterator insert_after(const_iterator position, + initializer_list il); + + template + iterator emplace_after(const_iterator position, Args&&... args); + + iterator erase_after(const_iterator position); + iterator erase_after(const_iterator first, const_iterator last); + + iterator begin() { return iterator(_start); } + const_iterator begin() const { return const_iterator(_start); } + const_iterator cbegin() const { return const_iterator(_start); } + iterator end() { return iterator(); } + const_iterator end() const { return const_iterator(); } + const_iterator cend() const { return const_iterator(); } + + T& front() { return *begin(); } + const T& front() const { return *begin(); } + }; + + template + class basic_string { + class Allocator {}; + + public: + basic_string() : basic_string(Allocator()) {} + explicit basic_string(const Allocator &alloc); + basic_string(size_type count, CharT ch, + const Allocator &alloc = Allocator()); + basic_string(const basic_string &other, + size_type pos, + const Allocator &alloc = Allocator()); + basic_string(const basic_string &other, + size_type pos, size_type count, + const Allocator &alloc = Allocator()); + basic_string(const CharT *s, size_type count, + const Allocator &alloc = Allocator()); + basic_string(const CharT *s, + const Allocator &alloc = Allocator()); + template + basic_string(InputIt first, InputIt last, + const Allocator &alloc = Allocator()); + basic_string(const basic_string &other); + basic_string(const basic_string &other, + const Allocator &alloc); + basic_string(basic_string &&other); + basic_string(basic_string &&other, + const Allocator &alloc); + basic_string(std::initializer_list ilist, + const Allocator &alloc = Allocator()); + template + basic_string(const T &t, size_type pos, size_type n, + const Allocator &alloc = Allocator()); + // basic_string(std::nullptr_t) = delete; + + ~basic_string(); + void clear(); + + basic_string &operator=(const basic_string &str); + basic_string &operator+=(const basic_string &str); + + const CharT *c_str() const; + const CharT *data() const; + CharT *data(); + + const char *begin() const; + const char *end() const; + + basic_string &append(size_type count, CharT ch); + basic_string &assign(size_type count, CharT ch); + basic_string &erase(size_type index, size_type count); + basic_string &insert(size_type index, size_type count, CharT ch); + basic_string &replace(size_type pos, size_type count, const basic_string &str); + void pop_back(); + void push_back(CharT ch); + void reserve(size_type new_cap); + void resize(size_type count); + void shrink_to_fit(); + void swap(basic_string &other); + }; + + typedef basic_string string; + typedef basic_string wstring; +#if __cplusplus >= 201103L + typedef basic_string u16string; + typedef basic_string u32string; +#endif + + class exception { + public: + exception() throw(); + virtual ~exception() throw(); + virtual const char *what() const throw() { + return 0; + } + }; + + class bad_alloc : public exception { + public: + bad_alloc() throw(); + bad_alloc(const bad_alloc&) throw(); + bad_alloc& operator=(const bad_alloc&) throw(); + virtual const char* what() const throw() { + return 0; + } + }; + + struct nothrow_t {}; + extern const nothrow_t nothrow; + + enum class align_val_t : size_t {}; + + // libc++'s implementation + template + class initializer_list + { + const _E* __begin_; + size_t __size_; + + initializer_list(const _E* __b, size_t __s) + : __begin_(__b), + __size_(__s) + {} + + public: + typedef _E value_type; + typedef const _E& reference; + typedef const _E& const_reference; + typedef size_t size_type; + + typedef const _E* iterator; + typedef const _E* const_iterator; + + initializer_list() : __begin_(0), __size_(0) {} + + size_t size() const {return __size_;} + const _E* begin() const {return __begin_;} + const _E* end() const {return __begin_ + __size_;} + }; + + template struct enable_if {}; + template struct enable_if {typedef _Tp type;}; + + template + struct integral_constant + { + static const _Tp value = __v; + typedef _Tp value_type; + typedef integral_constant type; + + operator value_type() const {return value;} + + value_type operator ()() const {return value;} + }; + + template + const _Tp integral_constant<_Tp, __v>::value; + + template + struct is_trivially_assignable + : integral_constant + { + }; + + typedef integral_constant true_type; + typedef integral_constant false_type; + + template struct is_const : public false_type {}; + template struct is_const<_Tp const> : public true_type {}; + + template struct is_reference : public false_type {}; + template struct is_reference<_Tp&> : public true_type {}; + + template struct is_same : public false_type {}; + template struct is_same<_Tp, _Tp> : public true_type {}; + + template ::value || is_reference<_Tp>::value > + struct __add_const {typedef _Tp type;}; + + template + struct __add_const<_Tp, false> {typedef const _Tp type;}; + + template struct add_const {typedef typename __add_const<_Tp>::type type;}; + + template struct remove_const {typedef _Tp type;}; + template struct remove_const {typedef _Tp type;}; + + template struct add_lvalue_reference {typedef _Tp& type;}; + + template struct is_trivially_copy_assignable + : public is_trivially_assignable::type, + typename add_lvalue_reference::type>::type> {}; + + template + OutputIter __copy(InputIter II, InputIter IE, OutputIter OI) { + while (II != IE) + *OI++ = *II++; + + return OI; + } + + template + inline + typename enable_if + < + is_same::type, _Up>::value && + is_trivially_copy_assignable<_Up>::value, + _Up* + >::type __copy(_Tp* __first, _Tp* __last, _Up* __result) { + size_t __n = __last - __first; + + if (__n > 0) + memmove(__result, __first, __n * sizeof(_Up)); + + return __result + __n; + } + + template + OutputIter copy(InputIter II, InputIter IE, OutputIter OI) { + return __copy(II, IE, OI); + } + + template + inline + _OutputIterator + __copy_backward(_BidirectionalIterator __first, _BidirectionalIterator __last, + _OutputIterator __result) + { + while (__first != __last) + *--__result = *--__last; + return __result; + } + + template + inline + typename enable_if + < + is_same::type, _Up>::value && + is_trivially_copy_assignable<_Up>::value, + _Up* + >::type __copy_backward(_Tp* __first, _Tp* __last, _Up* __result) { + size_t __n = __last - __first; + + if (__n > 0) + { + __result -= __n; + memmove(__result, __first, __n * sizeof(_Up)); + } + return __result; + } + + template + OutputIter copy_backward(InputIter II, InputIter IE, OutputIter OI) { + return __copy_backward(II, IE, OI); + } +} + +template +void __advance(BidirectionalIterator& it, Distance n, + std::bidirectional_iterator_tag) +#if !defined(STD_ADVANCE_INLINE_LEVEL) || STD_ADVANCE_INLINE_LEVEL > 2 +{ + if (n >= 0) while(n-- > 0) ++it; else while (n++<0) --it; +} +#else + ; +#endif + +template +void __advance(RandomAccessIterator& it, Distance n, + std::random_access_iterator_tag) +#if !defined(STD_ADVANCE_INLINE_LEVEL) || STD_ADVANCE_INLINE_LEVEL > 2 +{ + it += n; +} +#else + ; +#endif + +namespace std { + +template +void advance(InputIterator& it, Distance n) +#if !defined(STD_ADVANCE_INLINE_LEVEL) || STD_ADVANCE_INLINE_LEVEL > 1 +{ + __advance(it, n, typename InputIterator::iterator_category()); +} +#else + ; +#endif + +template +BidirectionalIterator +prev(BidirectionalIterator it, + typename iterator_traits::difference_type n = + 1) +#if !defined(STD_ADVANCE_INLINE_LEVEL) || STD_ADVANCE_INLINE_LEVEL > 0 +{ + advance(it, -n); + return it; +} +#else + ; +#endif + +template +ForwardIterator +next(ForwardIterator it, + typename iterator_traits::difference_type n = + 1) +#if !defined(STD_ADVANCE_INLINE_LEVEL) || STD_ADVANCE_INLINE_LEVEL > 0 +{ + advance(it, n); + return it; +} +#else + ; +#endif + + template + InputIt find(InputIt first, InputIt last, const T& value); + + template + ForwardIt find(ExecutionPolicy&& policy, ForwardIt first, ForwardIt last, + const T& value); + + template + InputIt find_if (InputIt first, InputIt last, UnaryPredicate p); + + template + ForwardIt find_if (ExecutionPolicy&& policy, ForwardIt first, ForwardIt last, + UnaryPredicate p); + + template + InputIt find_if_not (InputIt first, InputIt last, UnaryPredicate q); + + template + ForwardIt find_if_not (ExecutionPolicy&& policy, ForwardIt first, + ForwardIt last, UnaryPredicate q); + + template + InputIt find_first_of(InputIt first, InputIt last, + ForwardIt s_first, ForwardIt s_last); + + template + ForwardIt1 find_first_of (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last); + + template + InputIt find_first_of (InputIt first, InputIt last, + ForwardIt s_first, ForwardIt s_last, + BinaryPredicate p ); + + template + ForwardIt1 find_first_of (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last, + BinaryPredicate p ); + + template + InputIt find_end(InputIt first, InputIt last, + ForwardIt s_first, ForwardIt s_last); + + template + ForwardIt1 find_end (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last); + + template + InputIt find_end (InputIt first, InputIt last, + ForwardIt s_first, ForwardIt s_last, + BinaryPredicate p ); + + template + ForwardIt1 find_end (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last, + BinaryPredicate p ); + + template + ForwardIt lower_bound (ForwardIt first, ForwardIt last, const T& value); + + template + ForwardIt lower_bound (ForwardIt first, ForwardIt last, const T& value, + Compare comp); + + template + ForwardIt upper_bound (ForwardIt first, ForwardIt last, const T& value); + + template + ForwardIt upper_bound (ForwardIt first, ForwardIt last, const T& value, + Compare comp); + + template + ForwardIt1 search (ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last); + + template + ForwardIt1 search (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last); + + template + ForwardIt1 search (ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last, BinaryPredicate p); + + template + ForwardIt1 search (ExecutionPolicy&& policy, + ForwardIt1 first, ForwardIt1 last, + ForwardIt2 s_first, ForwardIt2 s_last, BinaryPredicate p); + + template + ForwardIt search (ForwardIt first, ForwardIt last, const Searcher& searcher); + + template + ForwardIt search_n (ForwardIt first, ForwardIt last, Size count, + const T& value); + + template + ForwardIt search_n (ExecutionPolicy&& policy, ForwardIt first, ForwardIt last, + Size count, const T& value); + + template + ForwardIt search_n (ForwardIt first, ForwardIt last, Size count, + const T& value, BinaryPredicate p); + + template + ForwardIt search_n (ExecutionPolicy&& policy, ForwardIt first, ForwardIt last, + Size count, const T& value, BinaryPredicate p); + + template + OutputIterator copy(InputIterator first, InputIterator last, + OutputIterator result); + +} + +#if __cplusplus >= 201103L +namespace std { +template // TODO: Implement the stub for deleter. +class unique_ptr { +public: + unique_ptr() noexcept {} + unique_ptr(T *) noexcept {} + unique_ptr(const unique_ptr &) noexcept = delete; + unique_ptr(unique_ptr &&) noexcept; + + T *get() const noexcept; + T *release() noexcept; + void reset(T *p = nullptr) noexcept; + void swap(unique_ptr &p) noexcept; + + typename std::add_lvalue_reference::type operator*() const; + T *operator->() const noexcept; + operator bool() const noexcept; + unique_ptr &operator=(unique_ptr &&p) noexcept; + unique_ptr &operator=(nullptr_t) noexcept; +}; + +// TODO :: Once the deleter parameter is added update with additional template parameter. +template +void swap(unique_ptr &x, unique_ptr &y) noexcept { + x.swap(y); +} + +template +bool operator==(const unique_ptr &x, const unique_ptr &y); + +template +bool operator!=(const unique_ptr &x, const unique_ptr &y); + +template +bool operator<(const unique_ptr &x, const unique_ptr &y); + +template +bool operator>(const unique_ptr &x, const unique_ptr &y); + +template +bool operator<=(const unique_ptr &x, const unique_ptr &y); + +template +bool operator>=(const unique_ptr &x, const unique_ptr &y); + +template +bool operator==(const unique_ptr &x, nullptr_t y); + +template +bool operator!=(const unique_ptr &x, nullptr_t y); + +template +bool operator<(const unique_ptr &x, nullptr_t y); + +template +bool operator>(const unique_ptr &x, nullptr_t y); + +template +bool operator<=(const unique_ptr &x, nullptr_t y); + +template +bool operator>=(const unique_ptr &x, nullptr_t y); + +template +bool operator==(nullptr_t x, const unique_ptr &y); + +template +bool operator!=(nullptr_t x, const unique_ptr &y); + +template +bool operator>(nullptr_t x, const unique_ptr &y); + +template +bool operator<(nullptr_t x, const unique_ptr &y); + +template +bool operator>=(nullptr_t x, const unique_ptr &y); + +template +bool operator<=(nullptr_t x, const unique_ptr &y); + +template +unique_ptr make_unique(Args &&...args); + +#if __cplusplus >= 202002L + +template +unique_ptr make_unique_for_overwrite(); + +#endif + +} // namespace std +#endif + +namespace std { +template +class basic_ostream; + +using ostream = basic_ostream; + +extern std::ostream cout; + +ostream &operator<<(ostream &, const string &); + +#if __cplusplus >= 202002L +template +ostream &operator<<(ostream &, const std::unique_ptr &); +#endif +} // namespace std + +#ifdef TEST_INLINABLE_ALLOCATORS +namespace std { + void *malloc(size_t); + void free(void *); +} +void* operator new(std::size_t size, const std::nothrow_t&) throw() { return std::malloc(size); } +void* operator new[](std::size_t size, const std::nothrow_t&) throw() { return std::malloc(size); } +void operator delete(void* ptr, const std::nothrow_t&) throw() { std::free(ptr); } +void operator delete[](void* ptr, const std::nothrow_t&) throw() { std::free(ptr); } +#else +// C++20 standard draft 17.6.1, from "Header synopsis", but with throw() +// instead of noexcept: + +void *operator new(std::size_t size); +void *operator new(std::size_t size, std::align_val_t alignment); +void *operator new(std::size_t size, const std::nothrow_t &) throw(); +void *operator new(std::size_t size, std::align_val_t alignment, + const std::nothrow_t &) throw(); +void operator delete(void *ptr) throw(); +void operator delete(void *ptr, std::size_t size) throw(); +void operator delete(void *ptr, std::align_val_t alignment) throw(); +void operator delete(void *ptr, std::size_t size, std::align_val_t alignment) throw(); +void operator delete(void *ptr, const std::nothrow_t &)throw(); +void operator delete(void *ptr, std::align_val_t alignment, + const std::nothrow_t &)throw(); +void *operator new[](std::size_t size); +void *operator new[](std::size_t size, std::align_val_t alignment); +void *operator new[](std::size_t size, const std::nothrow_t &) throw(); +void *operator new[](std::size_t size, std::align_val_t alignment, + const std::nothrow_t &) throw(); +void operator delete[](void *ptr) throw(); +void operator delete[](void *ptr, std::size_t size) throw(); +void operator delete[](void *ptr, std::align_val_t alignment) throw(); +void operator delete[](void *ptr, std::size_t size, std::align_val_t alignment) throw(); +void operator delete[](void *ptr, const std::nothrow_t &) throw(); +void operator delete[](void *ptr, std::align_val_t alignment, + const std::nothrow_t &) throw(); +#endif + +void* operator new (std::size_t size, void* ptr) throw() { return ptr; }; +void* operator new[] (std::size_t size, void* ptr) throw() { return ptr; }; +void operator delete (void* ptr, void*) throw() {}; +void operator delete[] (void* ptr, void*) throw() {}; + +namespace __cxxabiv1 { +extern "C" { +extern char *__cxa_demangle(const char *mangled_name, + char *output_buffer, + size_t *length, + int *status); +}} +namespace abi = __cxxabiv1; + +namespace std { + template + bool is_sorted(ForwardIt first, ForwardIt last); + + template + void nth_element(RandomIt first, RandomIt nth, RandomIt last); + + template + void partial_sort(RandomIt first, RandomIt middle, RandomIt last); + + template + void sort (RandomIt first, RandomIt last); + + template + void stable_sort(RandomIt first, RandomIt last); + + template + BidirIt partition(BidirIt first, BidirIt last, UnaryPredicate p); + + template + BidirIt stable_partition(BidirIt first, BidirIt last, UnaryPredicate p); +} + +namespace std { + +template< class T = void > +struct less; + +template< class T > +struct allocator; + +template< class Key > +struct hash; + +template< + class Key, + class Compare = std::less, + class Alloc = std::allocator +> class set { + public: + set(initializer_list __list) {} + + class iterator { + public: + iterator(Key *key): ptr(key) {} + iterator& operator++() { ++ptr; return *this; } + bool operator!=(const iterator &other) const { return ptr != other.ptr; } + const Key &operator*() const { return *ptr; } + private: + Key *ptr; + }; + + public: + Key *val; + iterator begin() const { return iterator(val); } + iterator end() const { return iterator(val + 1); } +}; + +template< + class Key, + class Hash = std::hash, + class Compare = std::less, + class Alloc = std::allocator +> class unordered_set { + public: + unordered_set(initializer_list __list) {} + + class iterator { + public: + iterator(Key *key): ptr(key) {} + iterator& operator++() { ++ptr; return *this; } + bool operator!=(const iterator &other) const { return ptr != other.ptr; } + const Key &operator*() const { return *ptr; } + private: + Key *ptr; + }; + + public: + Key *val; + iterator begin() const { return iterator(val); } + iterator end() const { return iterator(val + 1); } +}; + +namespace execution { +class sequenced_policy {}; +} + +template struct equal_to {}; + +template > +class default_searcher { +public: + default_searcher (ForwardIt pat_first, + ForwardIt pat_last, + BinaryPredicate pred = BinaryPredicate()); + template + std::pair + operator()( ForwardIt2 first, ForwardIt2 last ) const; +}; + +template class packaged_task; +template class packaged_task { + // TODO: Add some actual implementation. +}; + +} // namespace std From 20ee6ced4c769117d9d4552df72776465cb74527 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 18 May 2023 14:15:18 -0700 Subject: [PATCH 0956/2301] [CIR][CIRGen][NFC] Populate evaluation order classification (not yet used) --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index f77ad69742c8..b4079ff4013b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -839,7 +839,24 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, // destruction order is not necessarily reverse construction order. // FIXME: Revisit this based on C++ committee response to unimplementability. EvaluationOrder Order = EvaluationOrder::Default; - assert(!dyn_cast(E) && "Operators NYI"); + if (auto *OCE = dyn_cast(E)) { + if (OCE->isAssignmentOp()) + Order = EvaluationOrder::ForceRightToLeft; + else { + switch (OCE->getOperator()) { + case OO_LessLess: + case OO_GreaterGreater: + case OO_AmpAmp: + case OO_PipePipe: + case OO_Comma: + case OO_ArrowStar: + Order = EvaluationOrder::ForceLeftToRight; + break; + default: + break; + } + } + } buildCallArgs(Args, dyn_cast(FnType), E->arguments(), E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); From eba07c063140d84eb26eb930a5f51ab126aeb820 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 19 May 2023 13:59:29 -0700 Subject: [PATCH 0957/2301] [CIR][CIRGen] BoolAttr: introduce CIR specific one and teach CIRGen to use it --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.h | 1 + .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 19 ++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 +++-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 8 ++--- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 5 ++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 11 ++++++- .../CIR/Dialect/Transforms/MergeCleanups.cpp | 4 +-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 11 ++++++- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 4 ++- clang/test/CIR/CodeGen/basic.cpp | 4 +-- clang/test/CIR/CodeGen/coro-task.cpp | 2 +- clang/test/CIR/IR/branch.cir | 6 ++-- clang/test/CIR/IR/invalid.cir | 30 ++++++++++++++----- clang/test/CIR/IR/loop.cir | 10 ++++--- clang/test/CIR/Lowering/ThroughMLIR/bool.cir | 4 ++- clang/test/CIR/Lowering/bool.cir | 5 +++- clang/test/CIR/Transforms/merge-cleanups.cir | 6 ++-- 17 files changed, 104 insertions(+), 34 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h index 94599cadcd39..3ab044a9f59c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -35,6 +35,7 @@ namespace mlir { namespace cir { class ArrayType; class StructType; +class BoolType; } // namespace cir } // namespace mlir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index aa8b5f03a2d8..7d663c3fc1ae 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -41,6 +41,25 @@ def NullAttr : CIR_Attr<"Null", "null", [TypedAttrInterface]> { let assemblyFormat = [{}]; } +//===----------------------------------------------------------------------===// +// BoolAttr +//===----------------------------------------------------------------------===// + +def CIR_BoolAttr : CIR_Attr<"Bool", "bool", [TypedAttrInterface]> { + let summary = "Represent true/false for !cir.bool types"; + let description = [{ + The BoolAttr represents a 'true' or 'false' value. + }]; + + let parameters = (ins AttributeSelfTypeParameter< + "", "mlir::cir::BoolType">:$type, + "bool":$value); + + let assemblyFormat = [{ + `<` $value `>` + }]; +} + //===----------------------------------------------------------------------===// // ZeroAttr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 2c2e70720603..737c3c9140c7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -92,6 +92,10 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return mlir::cir::ZeroAttr::get(getContext(), t); } + mlir::cir::BoolAttr getCIRBoolAttr(bool state) { + return mlir::cir::BoolAttr::get(getContext(), getBoolTy(), state); + } + mlir::TypedAttr getNullPtrAttr(mlir::Type t) { assert(t.isa() && "expected cir.ptr"); return mlir::cir::NullAttr::get(getContext(), t); @@ -180,8 +184,8 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::IntegerAttr::get(int64Ty, C)); } mlir::Value getBool(bool state, mlir::Location loc) { - return create( - loc, getBoolTy(), mlir::BoolAttr::get(getContext(), state)); + return create(loc, getBoolTy(), + getCIRBoolAttr(state)); } // Creates constant nullptr for pointer type ty. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index c8028e42d149..ca216711bb33 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -123,7 +123,7 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { mlir::Type Ty = CGF.getCIRType(E->getType()); return Builder.create( - CGF.getLoc(E->getExprLoc()), Ty, Builder.getBoolAttr(E->getValue())); + CGF.getLoc(E->getExprLoc()), Ty, Builder.getCIRBoolAttr(E->getValue())); } mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { @@ -284,9 +284,9 @@ class ScalarExprEmitter : public StmtVisitor { // NOTE: We likely want the code below, but loading/store booleans need to // work first. See CIRGenFunction::buildFromMemory(). - value = Builder.create(CGF.getLoc(E->getExprLoc()), - CGF.getCIRType(type), - Builder.getBoolAttr(true)); + value = Builder.create( + CGF.getLoc(E->getExprLoc()), CGF.getCIRType(type), + Builder.getCIRBoolAttr(true)); } else if (type->isIntegerType()) { // QualType promotedType; bool canPerformLossyDemotionCheck = false; diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index c6fe563ae283..411133063b00 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -786,9 +786,10 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { // scalar type. condVal = evaluateExprAsBool(S.getCond()); } else { + auto boolTy = mlir::cir::BoolType::get(b.getContext()); condVal = b.create( - loc, mlir::cir::BoolType::get(b.getContext()), - b.getBoolAttr(true)); + loc, boolTy, + mlir::cir::BoolAttr::get(b.getContext(), boolTy, true)); } if (buildLoopCondYield(b, loc, condVal).failed()) loopRes = mlir::failure(); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index b215ddd69d95..d610db9dfa72 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -47,6 +47,15 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { return AliasResult::NoAlias; } + + AliasResult getAlias(Attribute attr, raw_ostream &os) const final { + if (auto boolAttr = attr.dyn_cast()) { + os << (boolAttr.getValue() ? "true" : "false"); + return AliasResult::FinalAlias; + } + + return AliasResult::NoAlias; + } }; } // namespace @@ -151,7 +160,7 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return op->emitOpError("zero expects struct type"); } - if (attrType.isa()) { + if (attrType.isa()) { if (!opType.isa()) return op->emitOpError("result type (") << opType << ") must be '!cir.bool' for '" << attrType << "'"; diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index ce6b506100f7..71e4aa0c1761 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -106,14 +106,14 @@ struct SimplifyRetYieldBlocks : public mlir::OpRewritePattern { // TODO: leverage SCCP to get improved results. auto cstOp = dyn_cast(brCondOp.getCond().getDefiningOp()); - if (!cstOp || !cstOp.getValue().isa() || + if (!cstOp || !cstOp.getValue().isa() || !trivialYield(brCondOp.getDestTrue()) || !trivialYield(brCondOp.getDestFalse())) return failure(); // If the condition is constant, no need to use brcond, just yield // properly, "yield" for false and "yield continue" for true. - auto boolAttr = cstOp.getValue().cast(); + auto boolAttr = cstOp.getValue().cast(); auto *falseBlock = brCondOp.getDestFalse(); auto *trueBlock = brCondOp.getDestTrue(); auto *currBlock = brCondOp.getOperation()->getBlock(); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3ffe0f51613b..d5362a9d2ac1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -464,8 +464,17 @@ class CIRConstantLowering mlir::LogicalResult matchAndRewrite(mlir::cir::ConstantOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { + mlir::Attribute attr = op.getValue(); + if (op.getType().isa()) { + if (op.getValue() == + mlir::cir::BoolAttr::get( + getContext(), ::mlir::cir::BoolType::get(getContext()), true)) + attr = mlir::BoolAttr::get(getContext(), true); + else + attr = mlir::BoolAttr::get(getContext(), false); + } rewriter.replaceOpWithNewOp( - op, getTypeConverter()->convertType(op.getType()), op.getValue()); + op, getTypeConverter()->convertType(op.getType()), attr); return mlir::LogicalResult::success(); } }; diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 9c79a6dc96c6..6f620ae5a304 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -156,7 +156,9 @@ class CIRConstantLowering mlir::Type type = mlir::IntegerType::get(getContext(), 8, mlir::IntegerType::Signless); mlir::TypedAttr IntegerAttr; - if (op.getValue() == mlir::BoolAttr::get(getContext(), true)) + if (op.getValue() == + mlir::cir::BoolAttr::get( + getContext(), ::mlir::cir::BoolType::get(getContext()), true)) IntegerAttr = mlir::IntegerAttr::get(type, 1); else IntegerAttr = mlir::IntegerAttr::get(type, 0); diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index a5a77f13396e..e840f32f864b 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -58,8 +58,8 @@ int *p2() { void b0() { bool x = true, y = false; } // CHECK: cir.func @_Z2b0v() { -// CHECK: %2 = cir.const(true) : !cir.bool -// CHECK: %3 = cir.const(false) : !cir.bool +// CHECK: %2 = cir.const(#true) : !cir.bool +// CHECK: %3 = cir.const(#false) : !cir.bool void b1(int a) { bool b = a; } diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 7b9b910c24e0..49e7f3fc331d 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -265,7 +265,7 @@ VoidTask silly_task() { // Call builtin coro end and return // CHECK-NEXT: %[[#CoroEndArg0:]] = cir.const(#cir.null : !cir.ptr) -// CHECK-NEXT: %[[#CoroEndArg1:]] = cir.const(false) : !cir.bool +// CHECK-NEXT: %[[#CoroEndArg1:]] = cir.const(#false) : !cir.bool // CHECK-NEXT: = cir.call @__builtin_coro_end(%[[#CoroEndArg0]], %[[#CoroEndArg1]]) // CHECK: %[[#Tmp1:]] = cir.load %[[#VoidTaskAddr]] diff --git a/clang/test/CIR/IR/branch.cir b/clang/test/CIR/IR/branch.cir index b12f4a16db03..6d2f0565a44a 100644 --- a/clang/test/CIR/IR/branch.cir +++ b/clang/test/CIR/IR/branch.cir @@ -1,10 +1,12 @@ // RUN: cir-tool %s | FileCheck %s +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool cir.func @b0() { cir.scope { cir.loop while(cond : { - %0 = cir.const(true) : !cir.bool + %0 = cir.const(#true) : !cir.bool cir.brcond %0 ^bb1, ^bb2 ^bb1: cir.yield continue @@ -24,7 +26,7 @@ cir.func @b0() { // CHECK: cir.func @b0 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %0 = cir.const(true) : !cir.bool +// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: cir.yield continue diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index de3c626671c9..719a0c5b0ab7 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -9,16 +9,20 @@ cir.func @p0() { // ----- -// expected-error@+2 {{'cir.const' op result type ('i32') must be '!cir.bool' for 'true'}} +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +// expected-error@+2 {{op result type ('i32') must be '!cir.bool' for '#cir.bool : !cir.bool'}} cir.func @b0() { - %1 = cir.const(true) : i32 + %1 = cir.const(#true) : i32 cir.return } // ----- +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool cir.func @if0() { - %0 = cir.const(true) : !cir.bool + %0 = cir.const(#true) : !cir.bool // expected-error@+1 {{'cir.if' op region control flow edge from Region #0 to parent results: source has 1 operands, but target successor needs 0}} cir.if %0 { %6 = cir.const(3 : i32) : i32 @@ -29,8 +33,10 @@ cir.func @if0() { // ----- +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool cir.func @yield0() { - %0 = cir.const(true) : !cir.bool + %0 = cir.const(#true) : !cir.bool cir.if %0 { // expected-error {{custom op 'cir.if' expected at least one block with cir.yield or cir.return}} cir.br ^a ^a: @@ -40,8 +46,10 @@ cir.func @yield0() { // ----- +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool cir.func @yieldfallthrough() { - %0 = cir.const(true) : !cir.bool + %0 = cir.const(#true) : !cir.bool cir.if %0 { cir.yield fallthrough // expected-error {{'cir.yield' op fallthrough only expected within 'cir.switch'}} } @@ -50,8 +58,10 @@ cir.func @yieldfallthrough() { // ----- +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool cir.func @yieldbreak() { - %0 = cir.const(true) : !cir.bool + %0 = cir.const(#true) : !cir.bool cir.if %0 { cir.yield break // expected-error {{shall be dominated by 'cir.loop' or 'cir.switch'}} } @@ -60,8 +70,10 @@ cir.func @yieldbreak() { // ----- +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool cir.func @yieldcontinue() { - %0 = cir.const(true) : !cir.bool + %0 = cir.const(#true) : !cir.bool cir.if %0 { cir.yield continue // expected-error {{shall be dominated by 'cir.loop'}} } @@ -137,10 +149,12 @@ cir.func @cast4(%p: !cir.ptr) { // ----- +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool cir.func @b0() { cir.scope { cir.loop while(cond : { // expected-error {{cond region must be terminated with 'cir.yield' or 'cir.yield continue'}} - %0 = cir.const(true) : !cir.bool + %0 = cir.const(#true) : !cir.bool cir.brcond %0 ^bb1, ^bb2 ^bb1: cir.yield break diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 951d8e6f2fc4..97386ec60276 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -1,4 +1,6 @@ // RUN: cir-tool %s | FileCheck %s +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool cir.func @l0() { %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} @@ -28,7 +30,7 @@ cir.func @l0() { %5 = cir.const(1 : i32) : i32 %6 = cir.binop(add, %4, %5) : i32 cir.store %6, %0 : i32, cir.ptr - %7 = cir.const(true) : !cir.bool + %7 = cir.const(#true) : !cir.bool cir.if %7 { cir.yield break } @@ -55,7 +57,7 @@ cir.func @l0() { %5 = cir.const(1 : i32) : i32 %6 = cir.binop(add, %4, %5) : i32 cir.store %6, %0 : i32, cir.ptr - %7 = cir.const(true) : !cir.bool + %7 = cir.const(#true) : !cir.bool cir.if %7 { cir.yield continue } @@ -110,7 +112,7 @@ cir.func @l0() { // CHECK-NEXT: %5 = cir.const(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr -// CHECK-NEXT: %7 = cir.const(true) : !cir.bool +// CHECK-NEXT: %7 = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.if %7 { // CHECK-NEXT: cir.yield break // CHECK-NEXT: } @@ -133,7 +135,7 @@ cir.func @l0() { // CHECK-NEXT: %5 = cir.const(1 : i32) : i32 // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 // CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr -// CHECK-NEXT: %7 = cir.const(true) : !cir.bool +// CHECK-NEXT: %7 = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.if %7 { // CHECK-NEXT: cir.yield continue // CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir index 8a95f54118c4..954619cb5367 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir @@ -1,10 +1,12 @@ // RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool module { cir.func @foo() { %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} - %1 = cir.const(true) : !cir.bool + %1 = cir.const(#true) : !cir.bool cir.store %1, %0 : !cir.bool, cir.ptr cir.return } diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index 9067e75bbf9c..ab58f792b6e5 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -1,9 +1,12 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool + module { cir.func @foo() { - %1 = cir.const(true) : !cir.bool + %1 = cir.const(#true) : !cir.bool %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} cir.store %1, %0 : !cir.bool, cir.ptr cir.return diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index e4dd5197ef2e..2a860af8c091 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -1,6 +1,8 @@ // RUN: cir-tool %s -cir-merge-cleanups -o %t.out.cir // RUN: FileCheck --input-file=%t.out.cir %s +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool module { cir.func @sw1(%arg0: i32, %arg1: i32) { %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} @@ -61,7 +63,7 @@ module { cir.func @l0() { cir.scope { cir.loop while(cond : { - %0 = cir.const(true) : !cir.bool + %0 = cir.const(#true) : !cir.bool cir.brcond %0 ^bb1, ^bb2 ^bb1: cir.yield continue @@ -81,7 +83,7 @@ module { cir.func @l1() { cir.scope { cir.loop while(cond : { - %0 = cir.const(false) : !cir.bool + %0 = cir.const(#false) : !cir.bool cir.brcond %0 ^bb1, ^bb2 ^bb1: cir.yield continue From 74181858167c96aeedac482f8b5c990d963d5ef6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 19 May 2023 14:00:16 -0700 Subject: [PATCH 0958/2301] [CIR][CIRGen] buildAutoVarInit: add bool support --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 13 ++++++++++--- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 11 ++++++++--- clang/lib/CIR/CodeGen/CIRGenValue.h | 1 + clang/test/CIR/CodeGen/basic.cpp | 13 +++++++++++++ 4 files changed, 32 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index c7737156cd6f..f624919cc8f0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -252,12 +252,19 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { return; } - if (!emission.IsConstantAggregate) - llvm_unreachable("NYI"); - // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. auto typedConstant = constant.dyn_cast(); assert(typedConstant && "expected typed attribute"); + if (!emission.IsConstantAggregate) { + // For simple scalar/complex initialization, store the value directly. + LValue lv = makeAddrLValue(Loc, type); + assert(Init && "expected initializer"); + auto initLoc = getLoc(Init->getSourceRange()); + lv.setNonGC(true); + return buildStoreThroughLValue( + RValue::get(builder.getConstant(initLoc, typedConstant)), lv); + } + emitStoresForConstant(CGM, D, Loc, type.isVolatileQualified(), builder, typedConstant, /*IsAutoInit=*/false); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index e8553e27357b..2bfc764800de 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1379,7 +1379,8 @@ mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, // Zero-extend bool. auto typed = C.dyn_cast(); if (typed && typed.getType().isa()) { - assert(0 && "not implemented"); + // Already taken care given that bool values coming from + // integers only carry true/false. } return C; @@ -1413,6 +1414,7 @@ mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *E, QualType T) { mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, QualType DestType) { + auto &builder = CGM.getBuilder(); switch (Value.getKind()) { case APValue::None: case APValue::Indeterminate: @@ -1421,7 +1423,10 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, assert(0 && "not implemented"); case APValue::Int: { mlir::Type ty = CGM.getCIRType(DestType); - return CGM.getBuilder().getIntegerAttr(ty, Value.getInt()); + if (ty.isa()) + return builder.getCIRBoolAttr(Value.getInt().getZExtValue()); + assert(ty.isa() && "expected integral type"); + return builder.getIntegerAttr(ty, Value.getInt()); } case APValue::Float: { const llvm::APFloat &Init = Value.getFloat(); @@ -1431,7 +1436,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, assert(0 && "not implemented"); else { mlir::Type ty = CGM.getCIRType(DestType); - return CGM.getBuilder().getFloatAttr(ty, Init); + return builder.getFloatAttr(ty, Init); } } case APValue::Array: { diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 9d4fad6cdc95..ea8541c031cb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -223,6 +223,7 @@ class LValue { } bool isNonGC() const { return NonGC; } + void setNonGC(bool Value) { NonGC = Value; } bool isNontemporal() const { return Nontemporal; } diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index e840f32f864b..685af4ddbabe 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -149,6 +149,19 @@ struct regs { void use_regs() { regs r; } } +void x() { + const bool b0 = true; + const bool b1 = false; +} + +// CHECK: cir.func @_Z1xv() { +// CHECK: %0 = cir.alloca !cir.bool, cir.ptr , ["b0", init] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !cir.bool, cir.ptr , ["b1", init] {alignment = 1 : i64} +// CHECK: %2 = cir.const(#true) : !cir.bool +// CHECK: cir.store %2, %0 : !cir.bool, cir.ptr +// CHECK: %3 = cir.const(#false) : !cir.bool +// CHECK: cir.store %3, %1 : !cir.bool, cir.ptr + // CHECK-DAG: #[[locScope]] = loc(fused[#[[locScopeA:loc[0-9]+]], #[[locScopeB:loc[0-9]+]]]) // CHECK-DAG: #[[locScopeA]] = loc("{{.*}}basic.cpp":27:3) // CHECK-DAG: #[[locScopeB]] = loc("{{.*}}basic.cpp":31:3) \ No newline at end of file From a07afcd8da10a53e14d01bee760c3450a0eccb17 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 19 May 2023 15:19:08 -0700 Subject: [PATCH 0959/2301] [CIR][CIRGen][NFC] Add more unimplemented bits and fix warning --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 12 +++++++----- clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h | 1 + 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 411133063b00..2fd616356932 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -374,16 +374,18 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { if (S.getConditionVariable()) buildDecl(*S.getConditionVariable()); - // If the condition constant folds and can be elided, try to avoid - // emitting the condition and the dead arm of the if/else. - // FIXME: should this be done as part of a constant folder pass instead? + // During LLVM codegen, if the condition constant folds and can be elided, + // it tries to avoid emitting the condition and the dead arm of the if/else. + // TODO(cir): we skip this in CIRGen, but should implement this as part of + // SSCP or a specific CIR pass. bool CondConstant; if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, S.isConstexpr())) { - llvm_unreachable("ConstantFoldsToSimpleInteger NYI"); + assert(!UnimplementedFeature::constantFoldsToSimpleInteger()); } - // TODO: PGO and likelihood. + assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); + assert(!UnimplementedFeature::incrementProfileCounter()); auto ifLoc = getIfLocs(*this, S.getThen(), S.getElse()); return buildIfOnBoolExpr(S.getCond(), ifLoc, S.getThen(), S.getElse()); }; diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index e461b8ad383d..7f8b7fd450fb 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -91,6 +91,7 @@ struct UnimplementedFeature { static bool addHeapAllocSiteMetadata() { return false; } static bool loopInfoStack() { return false; } static bool requiresCleanups() { return false; } + static bool constantFoldsToSimpleInteger() { return false; } }; } // namespace cir From 4b6fc75d2206f99d22055ffc1684ad31a6a5d9a8 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sat, 13 May 2023 16:59:54 -0300 Subject: [PATCH 0960/2301] [CIR] Implement cir.int type and attribute Replaces the usage of the building integer types by a dialect-specific integer with arbitrary size and signedness. Since building integer attributes requires builtin integer types, a cir.int attribute was also created. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 30 +++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 +- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 27 +++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 34 ++- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 14 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 20 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 15 +- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 4 + clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 11 +- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 71 ++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 20 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 87 ++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 205 +++++++++--------- clang/test/CIR/CodeGen/String.cpp | 46 ++-- clang/test/CIR/CodeGen/agg-init.cpp | 26 +-- clang/test/CIR/CodeGen/array.cpp | 42 ++-- clang/test/CIR/CodeGen/assign-operator.cpp | 22 +- clang/test/CIR/CodeGen/atomic.cpp | 2 +- clang/test/CIR/CodeGen/basic.c | 48 ++-- clang/test/CIR/CodeGen/basic.cpp | 96 ++++---- clang/test/CIR/CodeGen/binassign.cpp | 2 +- clang/test/CIR/CodeGen/binop.cpp | 20 +- clang/test/CIR/CodeGen/bitfields.cpp | 2 +- clang/test/CIR/CodeGen/call.c | 56 ++--- clang/test/CIR/CodeGen/cast.cpp | 26 +-- clang/test/CIR/CodeGen/cmp.cpp | 12 +- clang/test/CIR/CodeGen/comma.cpp | 16 +- clang/test/CIR/CodeGen/coro-task.cpp | 38 ++-- clang/test/CIR/CodeGen/ctor-alias.cpp | 10 +- .../CodeGen/ctor-member-lvalue-to-rvalue.cpp | 4 +- clang/test/CIR/CodeGen/ctor.cpp | 2 +- clang/test/CIR/CodeGen/dtors.cpp | 8 +- clang/test/CIR/CodeGen/fullexpr.cpp | 16 +- clang/test/CIR/CodeGen/globals.c | 8 +- clang/test/CIR/CodeGen/globals.cpp | 100 ++++----- clang/test/CIR/CodeGen/goto.cpp | 42 ++-- clang/test/CIR/CodeGen/inc-dec.cpp | 24 +- clang/test/CIR/CodeGen/lambda.cpp | 105 ++++----- clang/test/CIR/CodeGen/literals.c | 4 +- clang/test/CIR/CodeGen/literals.cpp | 2 +- clang/test/CIR/CodeGen/loop-scope.cpp | 16 +- clang/test/CIR/CodeGen/loop.cpp | 96 ++++---- clang/test/CIR/CodeGen/predefined.cpp | 16 +- clang/test/CIR/CodeGen/rangefor.cpp | 22 +- clang/test/CIR/CodeGen/return.cpp | 14 +- clang/test/CIR/CodeGen/sourcelocation.cpp | 44 ++-- clang/test/CIR/CodeGen/store.c | 10 +- clang/test/CIR/CodeGen/struct.c | 4 +- clang/test/CIR/CodeGen/struct.cpp | 52 ++--- clang/test/CIR/CodeGen/switch.cpp | 80 +++---- clang/test/CIR/CodeGen/ternary.cpp | 68 +++--- clang/test/CIR/CodeGen/types.c | 24 +- clang/test/CIR/CodeGen/unary-deref.cpp | 2 +- clang/test/CIR/CodeGen/unary.cpp | 119 +++++----- clang/test/CIR/CodeGen/union.cpp | 9 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 4 +- clang/test/CIR/IR/cast.cir | 23 +- clang/test/CIR/IR/cir-ops.cir | 94 ++++---- clang/test/CIR/IR/global.cir | 58 ++--- clang/test/CIR/IR/int.cir | 39 ++++ clang/test/CIR/IR/invalid.cir | 56 ++++- clang/test/CIR/IR/ptr_stride.cir | 25 ++- clang/test/CIR/IR/switch.cir | 17 +- .../test/CIR/Lowering/binop-unsigned-int.cir | 91 ++++---- clang/test/CIR/Lowering/branch.cir | 11 +- clang/test/CIR/Lowering/cast.cir | 75 ++++--- clang/test/CIR/Lowering/dot.cir | 35 +-- clang/test/CIR/Lowering/for.cir | 21 +- clang/test/CIR/Lowering/globals.cir | 125 ++++++----- clang/test/CIR/Lowering/goto.cir | 23 +- clang/test/CIR/Lowering/if.cir | 15 +- clang/test/CIR/Lowering/loadstorealloca.cir | 13 +- clang/test/CIR/Lowering/ptrstride.cir | 16 +- clang/test/CIR/Lowering/scope.cir | 7 +- clang/test/CIR/Lowering/unary-inc-dec.cir | 24 +- clang/test/CIR/Lowering/unary-not.cir | 22 +- clang/test/CIR/Lowering/unary-plus-minus.cir | 24 +- clang/test/CIR/Transforms/merge-cleanups.cir | 91 ++++---- 80 files changed, 1606 insertions(+), 1206 deletions(-) create mode 100644 clang/test/CIR/IR/int.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 7d663c3fc1ae..df9d16d751f5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -142,6 +142,36 @@ def ConstStructAttr : CIR_Attr<"ConstStruct", "const_struct", let genVerifyDecl = 1; } +//===----------------------------------------------------------------------===// +// IntegerAttr +//===----------------------------------------------------------------------===// + +def IntAttr : CIR_Attr<"Int", "int", [TypedAttrInterface]> { + let summary = "An Attribute containing a integer value"; + let description = [{ + An integer attribute is a literal attribute that represents an integral + value of the specified integer type. + }]; + let parameters = (ins AttributeSelfTypeParameter<"">:$type, "APInt":$value); + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, + "const APInt &":$value), [{ + return $_get(type.getContext(), type, value); + }]>, + AttrBuilderWithInferredContext<(ins "Type":$type, "int64_t":$value), [{ + IntType intType = type.cast(); + mlir::APInt apValue(intType.getWidth(), value, intType.isSigned()); + return $_get(intType.getContext(), intType, apValue); + }]>, + ]; + let extraClassDeclaration = [{ + int64_t getSInt() const { return getValue().getSExtValue(); } + uint64_t getUInt() const { return getValue().getZExtValue(); } + }]; + let genVerifyDecl = 1; + let hasCustomAssemblyFormat = 1; +} + //===----------------------------------------------------------------------===// // SignedOverflowBehaviorAttr //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 78bea7e683d7..a3873ac76db2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -111,11 +111,11 @@ def PtrStrideOp : CIR_Op<"ptr_stride", ``` }]; - let arguments = (ins AnyType:$base, AnyInteger:$stride); + let arguments = (ins AnyType:$base, CIR_IntType:$stride); let results = (outs AnyType:$result); let assemblyFormat = [{ - `(` $base `:` type($base) `,` $stride `:` type($stride) `)` + `(` $base `:` type($base) `,` $stride `:` qualified(type($stride)) `)` `,` type($result) attr-dict }]; @@ -875,7 +875,7 @@ def SwitchOp : CIR_Op<"switch", ``` }]; - let arguments = (ins AnyInteger:$condition, + let arguments = (ins CIR_IntType:$condition, OptionalAttr:$cases); let regions = (region VariadicRegion:$regions); diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index a02af7636875..03f45838638f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -26,6 +26,33 @@ class CIR_Type traits = []> : let mnemonic = typeMnemonic; } +//===----------------------------------------------------------------------===// +// IntType +//===----------------------------------------------------------------------===// + +def CIR_IntType : CIR_Type<"Int", "int", + [DeclareTypeInterfaceMethods]> { + let summary = "Integer type with arbitrary precision up to a fixed limit"; + let description = [{ + CIR type that represents C/C++ primitive integer types. + Said types are: `char`, `short`, `int`, `long`, `long long`, and their \ + unsigned variations. + }]; + let parameters = (ins "unsigned":$width, "bool":$isSigned); + let hasCustomAssemblyFormat = 1; + let extraClassDeclaration = [{ + /// Return true if this is a signed integer type. + bool isSigned() const { return getIsSigned(); } + /// Return true if this is an unsigned integer type. + bool isUnsigned() const { return !getIsSigned(); } + /// Return type alias. + std::string getAlias() const { + return (isSigned() ? 's' : 'u') + std::to_string(getWidth()) + 'i'; + }; + }]; + let genVerifyDecl = 1; +} + //===----------------------------------------------------------------------===// // PointerType //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 737c3c9140c7..7651129e7c33 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -13,10 +13,15 @@ #include "CIRGenTypeCache.h" #include "UnimplementedFeatureGuarding.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/IR/FPEnv.h" +#include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinTypes.h" #include "llvm/ADT/FloatingPointMode.h" namespace cir { @@ -145,6 +150,17 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::Type getInt8Ty() { return typeCache.Int8Ty; } mlir::Type getInt32Ty() { return typeCache.Int32Ty; } mlir::Type getInt64Ty() { return typeCache.Int64Ty; } + + mlir::Type getSInt8Ty() { return typeCache.SInt8Ty; } + mlir::Type getSInt16Ty() { return typeCache.SInt16Ty; } + mlir::Type getSInt32Ty() { return typeCache.SInt32Ty; } + mlir::Type getSInt64Ty() { return typeCache.SInt64Ty; } + + mlir::Type getUInt8Ty() { return typeCache.UInt8Ty; } + mlir::Type getUInt16Ty() { return typeCache.UInt16Ty; } + mlir::Type getUInt32Ty() { return typeCache.UInt32Ty; } + mlir::Type getUInt64Ty() { return typeCache.UInt64Ty; } + mlir::cir::BoolType getBoolTy() { return ::mlir::cir::BoolType::get(getContext()); } @@ -173,6 +189,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Constant creation helpers // ------------------------- // + mlir::cir::ConstantOp getSInt32(int32_t C, mlir::Location loc) { + auto SInt32Ty = getSInt32Ty(); + return create(loc, SInt32Ty, + mlir::cir::IntAttr::get(SInt32Ty, C)); + } mlir::cir::ConstantOp getInt32(uint32_t C, mlir::Location loc) { auto int32Ty = getInt32Ty(); return create(loc, int32Ty, @@ -197,9 +218,16 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc) { if (ty.isa()) return getNullPtr(ty, loc); - assert(ty.isa() && "NYI"); - return create(loc, ty, - mlir::IntegerAttr::get(ty, 0)); + + mlir::TypedAttr attr; + if (ty.isa()) + attr = mlir::IntegerAttr::get(ty, 0); + else if (ty.isa()) + attr = mlir::cir::IntAttr::get(ty, 0); + else + llvm_unreachable("NYI"); + + return create(loc, ty, attr); } mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 2bfc764800de..146c2f3f07b9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -24,6 +24,8 @@ #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/Builtins.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Sequence.h" #include "llvm/Support/ErrorHandling.h" @@ -940,9 +942,9 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, auto &builder = CGM.getBuilder(); auto isNullValue = [&](mlir::Attribute f) { // TODO(cir): introduce char type in CIR and check for that instead. - auto intVal = f.dyn_cast_or_null(); + auto intVal = f.dyn_cast_or_null(); assert(intVal && "not implemented"); - if (intVal.getInt() == 0) + if (intVal.getValue() == 0) return true; return false; }; @@ -1425,8 +1427,8 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, mlir::Type ty = CGM.getCIRType(DestType); if (ty.isa()) return builder.getCIRBoolAttr(Value.getInt().getZExtValue()); - assert(ty.isa() && "expected integral type"); - return builder.getIntegerAttr(ty, Value.getInt()); + assert(ty.isa() && "expected integral type"); + return CGM.getBuilder().getAttr(ty, Value.getInt()); } case APValue::Float: { const llvm::APFloat &Init = Value.getFloat(); @@ -1445,9 +1447,9 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, unsigned NumInitElts = Value.getArrayInitializedElts(); auto isNullValue = [&](mlir::Attribute f) { // TODO(cir): introduce char type in CIR and check for that instead. - auto intVal = f.dyn_cast_or_null(); + auto intVal = f.dyn_cast_or_null(); assert(intVal && "not implemented"); - if (intVal.getInt() == 0) + if (intVal.getValue() == 0) return true; return false; }; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index ca216711bb33..40e578117b94 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -15,8 +15,10 @@ #include "UnimplementedFeatureGuarding.h" #include "clang/AST/StmtVisitor.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include #include "mlir/IR/Value.h" @@ -98,7 +100,7 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Type Ty = CGF.getCIRType(E->getType()); return Builder.create( CGF.getLoc(E->getExprLoc()), Ty, - Builder.getIntegerAttr(Ty, E->getValue())); + Builder.getAttr(Ty, E->getValue())); } mlir::Value VisitFixedPointLiteral(const FixedPointLiteral *E) { @@ -112,10 +114,9 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitCharacterLiteral(const CharacterLiteral *E) { mlir::Type Ty = CGF.getCIRType(E->getType()); - auto newOp = Builder.create( - CGF.getLoc(E->getExprLoc()), Ty, - Builder.getIntegerAttr(Ty, E->getValue())); - return newOp; + auto loc = CGF.getLoc(E->getExprLoc()); + auto init = mlir::cir::IntAttr::get(Ty, E->getValue()); + return Builder.create(loc, Ty, init); } mlir::Value VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { llvm_unreachable("NYI"); @@ -325,7 +326,7 @@ class ScalarExprEmitter : public StmtVisitor { // For everything else, we can just do a simple increment. auto loc = CGF.getLoc(E->getSourceRange()); auto &builder = CGF.getBuilder(); - auto amt = builder.getInt32(amount, loc); + auto amt = builder.getSInt32(amount, loc); if (CGF.getLangOpts().isSignedOverflowDefined()) { llvm_unreachable("NYI"); } else { @@ -1266,6 +1267,13 @@ mlir::Value ScalarExprEmitter::buildScalarCast( llvm_unreachable("NYI"); } + if (SrcElementTy.isa()) { + if (DstElementTy.isa()) + return Builder.create( + Src.getLoc(), DstTy, mlir::cir::CastKind::integral, Src); + llvm_unreachable("NYI"); + } + if (DstElementTy.isa()) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 45d9a35ffc51..46656cf414b3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1215,7 +1215,7 @@ void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, llvm_unreachable("NYI"); } else { SmallVector offsets{ - mlir::IntegerAttr::get(PtrDiffTy, 2)}; + mlir::cir::IntAttr::get(PtrDiffTy, 2)}; field = mlir::cir::GlobalViewAttr::get( builder.getInt8PtrTy(), mlir::FlatSymbolRefAttr::get(VTable.getSymNameAttr()), diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 616333bbe075..c091b8f952c4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -96,12 +96,25 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, VTables{*this} { - // Initialize the type cache. + // Initialize signless integers types cache. VoidTy = ::mlir::IntegerType::get(builder.getContext(), 8); Int8Ty = ::mlir::IntegerType::get(builder.getContext(), 8); Int16Ty = ::mlir::IntegerType::get(builder.getContext(), 16); Int32Ty = ::mlir::IntegerType::get(builder.getContext(), 32); Int64Ty = ::mlir::IntegerType::get(builder.getContext(), 64); + + // Initialize CIR signed integer types cache. + SInt8Ty = ::mlir::cir::IntType::get(builder.getContext(), 8, true); + SInt16Ty = ::mlir::cir::IntType::get(builder.getContext(), 16, true); + SInt32Ty = ::mlir::cir::IntType::get(builder.getContext(), 32, true); + SInt64Ty = ::mlir::cir::IntType::get(builder.getContext(), 64, true); + + // Initialize CIR unsigned integer types cache. + UInt8Ty = ::mlir::cir::IntType::get(builder.getContext(), 8, false); + UInt16Ty = ::mlir::cir::IntType::get(builder.getContext(), 16, false); + UInt32Ty = ::mlir::cir::IntType::get(builder.getContext(), 32, false); + UInt64Ty = ::mlir::cir::IntType::get(builder.getContext(), 64, false); + // TODO: HalfTy // TODO: BFloatTy FloatTy = builder.getF32Type(); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 2fd616356932..f6d80b7e8395 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -607,7 +607,7 @@ mlir::LogicalResult CIRGenFunction::buildCaseStmt(const CaseStmt &S, // Fold cascading cases whenever possible to simplify codegen a bit. while (true) { auto intVal = caseStmt->getLHS()->EvaluateKnownConstInt(getContext()); - caseEltValueListAttr.push_back(mlir::IntegerAttr::get(condType, intVal)); + caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(condType, intVal)); if (isa(caseStmt->getSubStmt())) caseStmt = dyn_cast_or_null(caseStmt->getSubStmt()); else diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 7c8e6e3914b5..ab10a464b928 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -31,6 +31,10 @@ struct CIRGenTypeCache { /// i8, i16, i32, and i64 mlir::Type Int8Ty, Int16Ty, Int32Ty, Int64Ty; + // char, int, short, long + mlir::Type SInt8Ty, SInt16Ty, SInt32Ty, SInt64Ty; + // usigned char, unsigned, unsigned short, unsigned long + mlir::Type UInt8Ty, UInt16Ty, UInt32Ty, UInt64Ty; /// half, bfloat, float, double // mlir::Type HalfTy, BFloatTy; mlir::Type FloatTy, DoubleTy; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 75a3aa0ee948..69ee557ce2fb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -406,6 +406,10 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::SatLongFract: case BuiltinType::SatShortAccum: case BuiltinType::SatShortFract: + ResultType = + mlir::cir::IntType::get(Builder.getContext(), Context.getTypeSize(T), + /*isSigned=*/true); + break; // Unsigned types. case BuiltinType::Char16: case BuiltinType::Char32: @@ -430,9 +434,9 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::SatULongFract: case BuiltinType::SatUShortAccum: case BuiltinType::SatUShortFract: - // FIXME: break this in s/u and also pass signed param. ResultType = - Builder.getIntegerType(static_cast(Context.getTypeSize(T))); + mlir::cir::IntType::get(Builder.getContext(), Context.getTypeSize(T), + /*isSigned=*/false); break; case BuiltinType::Float16: @@ -606,7 +610,8 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { auto isSized = [&](mlir::Type ty) { if (ty.isIntOrFloat() || ty.isa()) + mlir::cir::ArrayType, mlir::cir::BoolType, + mlir::cir::IntType>()) return true; assert(0 && "not implemented"); return false; diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 4d29e8618713..bb0cde993908 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -13,6 +13,7 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" @@ -137,6 +138,76 @@ LogicalResult ConstStructAttr::verify( return success(); } +//===----------------------------------------------------------------------===// +// IntAttr definitions +//===----------------------------------------------------------------------===// + +Attribute IntAttr::parse(AsmParser &parser, Type odsType) { + mlir::APInt APValue; + + if (!odsType.isa()) + return {}; + auto type = odsType.cast(); + + // Consume the '<' symbol. + if (parser.parseLess()) + return {}; + + // Fetch arbitrary precision integer value. + if (type.isSigned()) { + int64_t value; + if (parser.parseInteger(value)) + parser.emitError(parser.getCurrentLocation(), "expected integer value"); + APValue = mlir::APInt(type.getWidth(), value, type.isSigned(), + /*implicitTrunc=*/true); + if (APValue.getSExtValue() != value) + parser.emitError(parser.getCurrentLocation(), + "integer value too large for the given type"); + } else { + uint64_t value; + if (parser.parseInteger(value)) + parser.emitError(parser.getCurrentLocation(), "expected integer value"); + APValue = mlir::APInt(type.getWidth(), value, type.isSigned(), + /*implicitTrunc=*/true); + if (APValue.getZExtValue() != value) + parser.emitError(parser.getCurrentLocation(), + "integer value too large for the given type"); + } + + // Consume the '>' symbol. + if (parser.parseGreater()) + return {}; + + return IntAttr::get(type, APValue); +} + +void IntAttr::print(AsmPrinter &printer) const { + auto type = getType().cast(); + printer << '<'; + if (type.isSigned()) + printer << getSInt(); + else + printer << getUInt(); + printer << '>'; +} + +LogicalResult IntAttr::verify(function_ref emitError, + Type type, APInt value) { + if (!type.isa()) { + emitError() << "expected 'simple.int' type"; + return failure(); + } + + auto intType = type.cast(); + if (value.getBitWidth() != intType.getWidth()) { + emitError() << "type and value bitwidth mismatch: " << intType.getWidth() + << " != " << value.getBitWidth(); + return failure(); + } + + return success(); +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d610db9dfa72..a6daca808f4d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -44,6 +44,10 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << "ty_" << structType.getTypeName(); return AliasResult::OverridableAlias; } + if (auto intType = type.dyn_cast()) { + os << intType.getAlias(); + return AliasResult::OverridableAlias; + } return AliasResult::NoAlias; } @@ -189,6 +193,8 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, attrType.isa() || attrType.isa()) return success(); + if (attrType.isa()) + return success(); assert(attrType.isa() && "What else could we be looking at here?"); return op->emitOpError("global with type ") @@ -238,14 +244,14 @@ LogicalResult CastOp::verify() { case cir::CastKind::int_to_bool: { if (!resType.isa()) return emitOpError() << "requires !cir.bool type for result"; - if (!(srcType.isInteger(32) || srcType.isInteger(64))) + if (!srcType.isa()) return emitOpError() << "requires integral type for result"; return success(); } case cir::CastKind::integral: { - if (!resType.isa()) + if (!resType.isa()) return emitOpError() << "requires !IntegerType for result"; - if (!srcType.isa()) + if (!srcType.isa()) return emitOpError() << "requires !IntegerType for source"; return success(); } @@ -750,7 +756,7 @@ parseSwitchOp(OpAsmParser &parser, ::mlir::ArrayAttr &casesAttr, mlir::OpAsmParser::UnresolvedOperand &cond, mlir::Type &condType) { - ::mlir::IntegerType intCondType; + mlir::cir::IntType intCondType; SmallVector cases; auto parseAndCheckRegion = [&]() -> ParseResult { @@ -828,7 +834,7 @@ parseSwitchOp(OpAsmParser &parser, int64_t val = 0; if (parser.parseInteger(val).failed()) return ::mlir::failure(); - caseEltValueListAttr.push_back(mlir::IntegerAttr::get(intCondType, val)); + caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(intCondType, val)); break; } case cir::CaseOpKind::Anyof: { @@ -841,7 +847,7 @@ parseSwitchOp(OpAsmParser &parser, if (parser.parseInteger(val).failed()) return ::mlir::failure(); caseEltValueListAttr.push_back( - mlir::IntegerAttr::get(intCondType, val)); + mlir::cir::IntAttr::get(intCondType, val)); return ::mlir::success(); })) return mlir::failure(); @@ -1768,7 +1774,7 @@ LogicalResult mlir::cir::ConstArrayAttr::verify( if (auto strAttr = attr.dyn_cast()) { mlir::cir::ArrayType at = type.cast(); - auto intTy = at.getEltType().dyn_cast(); + auto intTy = at.getEltType().dyn_cast(); // TODO: add CIR type for char. if (!intTy || intTy.getWidth() != 8) { diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 357cdd229bfa..9580bf0be5a8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -16,9 +16,11 @@ #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" +#include "mlir/Support/LogicalResult.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/TypeSwitch.h" +#include "llvm/Support/ErrorHandling.h" #define GET_TYPEDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsTypes.cpp.inc" @@ -303,6 +305,91 @@ void StructType::computeSizeAndAlignment( padded = isPadded; } +//===----------------------------------------------------------------------===// +// IntType Definitions +//===----------------------------------------------------------------------===// + +Type IntType::parse(mlir::AsmParser &parser) { + auto *context = parser.getBuilder().getContext(); + auto loc = parser.getCurrentLocation(); + bool isSigned; + unsigned width; + + if (parser.parseLess()) + return {}; + + // Fetch integer sign. + llvm::StringRef sign; + if (parser.parseKeyword(&sign)) + return {}; + if (sign.equals_insensitive("s")) + isSigned = true; + else if (sign.equals_insensitive("u")) + isSigned = false; + else { + parser.emitError(loc, "expected 's' or 'u'"); + return {}; + } + + if (parser.parseComma()) + return {}; + + // Fetch integer size. + if (parser.parseInteger(width)) + return {}; + if (width % 8 != 0) { + parser.emitError(loc, "expected integer width to be a multiple of 8"); + return {}; + } + if (width < 8 || width > 64) { + parser.emitError(loc, "expected integer width to be from 8 up to 64"); + return {}; + } + + if (parser.parseGreater()) + return {}; + + return IntType::get(context, width, isSigned); +} + +void IntType::print(mlir::AsmPrinter &printer) const { + auto sign = isSigned() ? 's' : 'u'; + printer << '<' << sign << ", " << getWidth() << '>'; +} + +llvm::TypeSize +IntType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(getWidth()); +} + +uint64_t IntType::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +uint64_t +IntType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +mlir::LogicalResult +IntType::verify(llvm::function_ref emitError, + unsigned width, bool isSigned) { + + if (width < 8 || width > 64) { + emitError() << "IntType only supports widths from 8 up to 64"; + return mlir::failure(); + } + if (width % 8 != 0) { + emitError() << "IntType width is not a multiple of 8"; + return mlir::failure(); + } + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d5362a9d2ac1..87443865b00f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -28,16 +28,19 @@ #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinDialect.h" +#include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/IRMapping.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" #include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Passes.h" +#include "llvm/ADT/APInt.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" @@ -207,34 +210,31 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } case mlir::cir::CastKind::int_to_bool: { auto zero = rewriter.create( - src.getLoc(), src.getType(), - mlir::IntegerAttr::get(src.getType(), 0)); + src.getLoc(), castOp.getSrc().getType(), + mlir::cir::IntAttr::get(castOp.getSrc().getType(), 0)); rewriter.replaceOpWithNewOp( castOp, mlir::cir::BoolType::get(getContext()), mlir::cir::CmpOpKind::ne, src, zero); break; } case mlir::cir::CastKind::integral: { - auto oldSourceType = - castOp->getOperands().front().getType().cast(); - auto sourceValue = adaptor.getOperands().front(); - auto sourceType = sourceValue.getType().cast(); - auto targetType = getTypeConverter() - ->convertType(castOp.getResult().getType()) - .cast(); + auto dstType = castOp.getResult().getType().cast(); + auto srcType = castOp.getSrc().getType().dyn_cast(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = + getTypeConverter()->convertType(dstType).cast(); // Target integer is smaller: truncate source value. - if (targetType.getWidth() < sourceType.getWidth()) { - rewriter.replaceOpWithNewOp(castOp, targetType, - sourceValue); + if (dstType.getWidth() < srcType.getWidth()) { + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); } else { - // FIXME: CIR codegen does not distiguishes singned/unsinged types. - if (oldSourceType.isUnsigned()) - rewriter.replaceOpWithNewOp(castOp, targetType, - sourceValue); + if (srcType.isUnsigned()) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); else - rewriter.replaceOpWithNewOp(castOp, targetType, - sourceValue); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); } break; } @@ -465,6 +465,7 @@ class CIRConstantLowering matchAndRewrite(mlir::cir::ConstantOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Attribute attr = op.getValue(); + if (op.getType().isa()) { if (op.getValue() == mlir::cir::BoolAttr::get( @@ -472,10 +473,19 @@ class CIRConstantLowering attr = mlir::BoolAttr::get(getContext(), true); else attr = mlir::BoolAttr::get(getContext(), false); - } + } else if (op.getType().isa()) { + attr = rewriter.getIntegerAttr( + typeConverter->convertType(op.getType()), + op.getValue().cast().getValue()); + } else if (op.getType().isa()) { + attr = op.getValue(); + } else + return op.emitError("unsupported constant type"); + rewriter.replaceOpWithNewOp( op, getTypeConverter()->convertType(op.getType()), attr); - return mlir::LogicalResult::success(); + + return mlir::success(); } }; @@ -522,22 +532,21 @@ class CIRFuncLowering : public mlir::OpConversionPattern { } }; -template mlir::DenseElementsAttr -convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr) { - auto type = attr.getType().cast().getEltType(); - auto values = llvm::SmallVector{}; +convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, mlir::Type type) { + auto values = llvm::SmallVector{}; auto arrayAttr = attr.getElts().dyn_cast(); assert(arrayAttr && "expected array here"); for (auto element : arrayAttr) - values.push_back(element.cast().getInt()); + values.push_back(element.cast().getValue()); return mlir::DenseElementsAttr::get( mlir::RankedTensorType::get({(int64_t)values.size()}, type), llvm::ArrayRef(values)); } std::optional -lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr) { +lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, + const mlir::TypeConverter *converter) { // Ensure ConstArrayAttr has a type. auto typedConstArr = constArr.dyn_cast(); @@ -550,14 +559,8 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr) { // Is a ConstArrayAttr with an cir::ArrayType: fetch element type. auto type = cirArrayType.getEltType(); - if (type.isInteger(8)) - return convertToDenseElementsAttr(constArr); - if (type.isInteger(16)) - return convertToDenseElementsAttr(constArr); - if (type.isInteger(32)) - return convertToDenseElementsAttr(constArr); - if (type.isInteger(64)) - return convertToDenseElementsAttr(constArr); + if (type.isa()) + return convertToDenseElementsAttr(constArr, converter->convertType(type)); return std::nullopt; } @@ -615,7 +618,7 @@ class CIRGlobalOpLowering mlir::ConversionPatternRewriter &rewriter) const override { // Fetch required values to create LLVM op. - auto type = getTypeConverter()->convertType(op.getSymType()); + auto llvmType = getTypeConverter()->convertType(op.getSymType()); auto isConst = op.getConstant(); auto linkage = convertLinkage(op.getLinkage()); auto symbol = op.getSymName(); @@ -632,10 +635,10 @@ class CIRGlobalOpLowering if (auto attr = constArr.getElts().dyn_cast()) { init = rewriter.getStringAttr(attr.getValue()); } else if (auto attr = constArr.getElts().dyn_cast()) { - if (!(init = lowerConstArrayAttr(constArr))) { + if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { op.emitError() << "unsupported lowering for #cir.const_array with element type " - << type; + << op.getSymType(); return mlir::failure(); } } else { @@ -644,13 +647,17 @@ class CIRGlobalOpLowering << constArr.getElts(); return mlir::failure(); } - } else if (llvm::isa(init.value())) { + } else if (llvm::isa(init.value())) { // Nothing to do since LLVM already supports these types as initializers. } + // Initializer is a constant integer: convert to MLIR builtin constant. + else if (auto intAttr = init.value().dyn_cast()) { + init = rewriter.getIntegerAttr(llvmType, intAttr.getValue()); + } // Initializer is a global: load global value in initializer block. else if (auto attr = init.value().dyn_cast()) { auto newGlobalOp = rewriter.replaceOpWithNewOp( - op, type, isConst, linkage, symbol, mlir::Attribute()); + op, llvmType, isConst, linkage, symbol, mlir::Attribute()); mlir::OpBuilder::InsertionGuard guard(rewriter); // Create initializer block. @@ -669,8 +676,8 @@ class CIRGlobalOpLowering sourceSymbol.getSymName()); llvm::SmallVector offset{0}; auto gepOp = rewriter.create( - op->getLoc(), type, sourceSymbol.getType(), addressOfOp.getResult(), - offset); + op->getLoc(), llvmType, sourceSymbol.getType(), + addressOfOp.getResult(), offset); rewriter.create(op->getLoc(), gepOp.getResult()); return mlir::success(); @@ -681,7 +688,7 @@ class CIRGlobalOpLowering // Rewrite op. rewriter.replaceOpWithNewOp( - op, type, isConst, linkage, symbol, init.value()); + op, llvmType, isConst, linkage, symbol, init.value()); return mlir::success(); } }; @@ -695,39 +702,42 @@ class CIRUnaryOpLowering matchAndRewrite(mlir::cir::UnaryOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Type type = op.getInput().getType(); - assert(type.isa() && "operand type not supported yet"); + assert(type.isa() && "operand type not supported yet"); + + auto llvmInType = adaptor.getInput().getType(); + auto llvmType = getTypeConverter()->convertType(op.getType()); switch (op.getKind()) { case mlir::cir::UnaryOpKind::Inc: { auto One = rewriter.create( - op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); - rewriter.replaceOpWithNewOp(op, op.getType(), - op.getInput(), One); + op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 1)); + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput(), One); break; } case mlir::cir::UnaryOpKind::Dec: { auto One = rewriter.create( - op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); - rewriter.replaceOpWithNewOp(op, op.getType(), - op.getInput(), One); + op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 1)); + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput(), One); break; } case mlir::cir::UnaryOpKind::Plus: { - rewriter.replaceOp(op, op.getInput()); + rewriter.replaceOp(op, adaptor.getInput()); break; } case mlir::cir::UnaryOpKind::Minus: { auto Zero = rewriter.create( - op.getLoc(), type, mlir::IntegerAttr::get(type, 0)); - rewriter.replaceOpWithNewOp(op, op.getType(), Zero, - op.getInput()); + op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 0)); + rewriter.replaceOpWithNewOp(op, llvmType, Zero, + adaptor.getInput()); break; } case mlir::cir::UnaryOpKind::Not: { auto MinusOne = rewriter.create( - op.getLoc(), type, mlir::IntegerAttr::get(type, -1)); - rewriter.replaceOpWithNewOp(op, op.getType(), MinusOne, - op.getInput()); + op.getLoc(), llvmType, mlir::IntegerAttr::get(llvmType, -1)); + rewriter.replaceOpWithNewOp(op, llvmType, MinusOne, + adaptor.getInput()); break; } } @@ -746,79 +756,70 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { assert((op.getLhs().getType() == op.getRhs().getType()) && "inconsistent operands' types not supported yet"); mlir::Type type = op.getRhs().getType(); - assert((type.isa() || type.isa()) && + assert((type.isa()) && "operand type not supported yet"); + auto llvmTy = getTypeConverter()->convertType(op.getType()); + auto rhs = adaptor.getRhs(); + auto lhs = adaptor.getLhs(); + switch (op.getKind()) { case mlir::cir::BinOpKind::Add: - if (type.isa()) - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + if (type.isa()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Sub: - if (type.isa()) - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + if (type.isa()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Mul: - if (type.isa()) - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + if (type.isa()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Div: - if (type.isa()) { - if (type.isSignlessInteger()) - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + if (auto ty = type.dyn_cast()) { + if (ty.isUnsigned()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else - llvm_unreachable("integer type not supported in CIR yet"); + llvm_unreachable("signed integer division binop lowering NYI"); } else - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Rem: - if (type.isa()) { - if (type.isSignlessInteger()) - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + if (auto ty = type.dyn_cast()) { + if (ty.isUnsigned()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else - llvm_unreachable("integer type not supported in CIR yet"); + llvm_unreachable("signed integer remainder binop lowering NYI"); } else - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::And: - rewriter.replaceOpWithNewOp(op, op.getType(), - op.getLhs(), op.getRhs()); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Or: - rewriter.replaceOpWithNewOp(op, op.getType(), - op.getLhs(), op.getRhs()); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Xor: - rewriter.replaceOpWithNewOp(op, op.getType(), - op.getLhs(), op.getRhs()); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Shl: - rewriter.replaceOpWithNewOp(op, op.getType(), - op.getLhs(), op.getRhs()); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Shr: - if (type.isSignlessInteger()) - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); - else - llvm_unreachable("integer type not supported in CIR yet"); - break; + if (auto ty = type.dyn_cast()) { + if (ty.isUnsigned()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + else + llvm_unreachable("signed integer shift binop lowering NYI"); + break; + } } return mlir::LogicalResult::success(); @@ -1034,6 +1035,10 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { return mlir::IntegerType::get(type.getContext(), 8, mlir::IntegerType::Signless); }); + converter.addConversion([&](mlir::cir::IntType type) -> mlir::Type { + // LLVM doesn't work with signed types, so we drop the CIR signs here. + return mlir::IntegerType::get(type.getContext(), type.getWidth()); + }); } } // namespace diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index 4861ef87fb24..b9a024496abe 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -22,52 +22,52 @@ void test() { // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 // CHECK-NEXT: %2 = "cir.struct_element_addr"(%1) <{member_name = "storage"}> -// CHECK-NEXT: %3 = cir.const(#cir.null : !cir.ptr) : !cir.ptr -// CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %4 = "cir.struct_element_addr"(%1) <{member_name = "size"}> : (!cir.ptr) -> !cir.ptr -// CHECK-NEXT: %5 = cir.const(0 : i32) : i32 -// CHECK-NEXT: %6 = cir.cast(integral, %5 : i32), i64 -// CHECK-NEXT: cir.store %6, %4 : i64, cir.ptr +// CHECK-NEXT: %3 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %4 = "cir.struct_element_addr"(%1) <{member_name = "size"}> : (!cir.ptr) -> !cir.ptr +// CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.cast(integral, %5 : !s32i), !s64i +// CHECK-NEXT: cir.store %6, %4 : !s64i, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2Ei // CHECK-NEXT: %0 = cir.alloca !cir.ptr -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["size", init] +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["size", init] // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: cir.store %arg1, %1 // CHECK-NEXT: %2 = cir.load %0 // CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_name = "storage"}> -// CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) +// CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) // CHECK-NEXT: cir.store %4, %3 -// CHECK-NEXT: %5 = "cir.struct_element_addr"(%2) <{member_name = "size"}> : (!cir.ptr) -> !cir.ptr -// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %7 = cir.cast(integral, %6 : i32), i64 -// CHECK-NEXT: cir.store %7, %5 : i64, cir.ptr +// CHECK-NEXT: %5 = "cir.struct_element_addr"(%2) <{member_name = "size"}> : (!cir.ptr) -> !cir.ptr +// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %7 = cir.cast(integral, %6 : !s32i), !s64i +// CHECK-NEXT: cir.store %7, %5 : !s64i, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2EPKc // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_name = "storage"}> : (!cir.ptr) -> !cir.ptr> -// CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) : !cir.ptr -// CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_name = "storage"}> : (!cir.ptr) -> !cir.ptr> +// CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.return // CHECK: cir.func linkonce_odr @_ZN6StringC1EPKc // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return // CHECK: cir.func @_Z4testv() { // CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () -// CHECK: cir.call @_ZN6StringC1Ei(%1, %3) : (!cir.ptr, i32) -> () -// CHECK: cir.call @_ZN6StringC1EPKc(%2, %5) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC1Ei(%1, %3) : (!cir.ptr, !s32i) -> () +// CHECK: cir.call @_ZN6StringC1EPKc(%2, %5) : (!cir.ptr, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index 06877e05cfd5..d0118408e31b 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -35,12 +35,12 @@ void use() { yop{}; } // CHECK: cir.func @_Z3usev() { // CHECK: %0 = cir.alloca !ty_22struct2Eyep_22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} -// CHECK: %1 = "cir.struct_element_addr"(%0) <{member_name = "Status"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %2 = cir.const(0 : i32) : i32 -// CHECK: cir.store %2, %1 : i32, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "HC"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %4 = cir.const(0 : i32) : i32 -// CHECK: cir.store %4, %3 : i32, cir.ptr +// CHECK: %1 = "cir.struct_element_addr"(%0) <{member_name = "Status"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %2 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: cir.store %2, %1 : !u32i, cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "HC"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %4 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: cir.store %4, %3 : !u32i, cir.ptr // CHECK: cir.return // CHECK: } @@ -66,14 +66,14 @@ void yo() { // CHECK: cir.func @_Z2yov() { // CHECK: %0 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} -// CHECK: %2 = cir.const(#cir.const_struct<{1000070000 : i32,#cir.null : !cir.ptr,0 : i64}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 +// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i,#cir.null : !cir.ptr,#cir.int<0> : !u64i}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 // CHECK: cir.store %2, %0 : !ty_22struct2EYo22, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%1) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %4 = cir.const(1000066001 : i32) : i32 -// CHECK: cir.store %4, %3 : i32, cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%1) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %4 = cir.const(#cir.int<1000066001> : !u32i) : !u32i +// CHECK: cir.store %4, %3 : !u32i, cir.ptr // CHECK: %5 = "cir.struct_element_addr"(%1) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > -// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_name = "createFlags"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %8 = cir.const(0 : i64) : i64 -// CHECK: cir.store %8, %7 : i64, cir.ptr +// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_name = "createFlags"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %8 = cir.const(#cir.int<0> : !u64i) : !u64i +// CHECK: cir.store %8, %7 : !u64i, cir.ptr diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 5406855dedb8..13267434d422 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -6,7 +6,7 @@ void a0() { } // CHECK: cir.func @_Z2a0v() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} void a1() { int a[10]; @@ -14,35 +14,35 @@ void a1() { } // CHECK: cir.func @_Z2a1v() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} -// CHECK-NEXT: %1 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %2 = cir.const(0 : i32) : i32 -// CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : i32), !cir.ptr -// CHECK-NEXT: cir.store %1, %4 : i32, cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} +// CHECK-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : !s32i), !cir.ptr +// CHECK-NEXT: cir.store %1, %4 : !s32i, cir.ptr int *a2() { int a[4]; return &a[0]; } -// CHECK: cir.func @_Z2a2v() -> !cir.ptr { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} -// CHECK-NEXT: %2 = cir.const(0 : i32) : i32 -// CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : i32), !cir.ptr -// CHECK-NEXT: cir.store %4, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %5 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.return %5 : !cir.ptr +// CHECK: cir.func @_Z2a2v() -> !cir.ptr { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} +// CHECK-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : !s32i), !cir.ptr +// CHECK-NEXT: cir.store %4, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %5 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return %5 : !cir.ptr void local_stringlit() { const char *s = "whatnow"; } -// CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.func @_Z15local_stringlitv() { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > -// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store %2, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > +// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: cir.store %2, %0 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 798ac3052574..2a8e5bc17ebb 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -35,8 +35,8 @@ struct String { // Load value from s.size and store in this->size - // CHECK: %6 = cir.load %5 : cir.ptr , i64 - // CHECK: cir.store %6, %3 : i64, cir.ptr + // CHECK: %6 = cir.load %5 : cir.ptr , !s64i + // CHECK: cir.store %6, %3 : !s64i, cir.ptr // CHECK: cir.return // CHECK: } @@ -54,9 +54,9 @@ struct String { // CHECK: %3 = cir.load deref %0 : cir.ptr > // CHECK: %4 = cir.load %1 : cir.ptr > // CHECK: %5 = "cir.struct_element_addr"(%4) <{member_name = "size"}> - // CHECK: %6 = cir.load %5 : cir.ptr , i64 + // CHECK: %6 = cir.load %5 : cir.ptr , !s64i // CHECK: %7 = "cir.struct_element_addr"(%3) <{member_name = "size"}> - // CHECK: cir.store %6, %7 : i64, cir.ptr + // CHECK: cir.store %6, %7 : !s64i, cir.ptr // CHECK: cir.store %3, %2 : !cir.ptr // CHECK: %8 = cir.load %2 : cir.ptr > // CHECK: cir.return %8 : !cir.ptr @@ -81,21 +81,21 @@ int main() { } } -// CHECK: cir.func @main() -> i32 { -// CHECK: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.func @main() -> !s32i { +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK: %1 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["sv", init] {alignment = 8 : i64} // CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () // CHECK: cir.scope { // CHECK: %3 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s", init] {alignment = 8 : i64} -// CHECK: %4 = cir.get_global @".str" : cir.ptr > -// CHECK: %5 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @_ZN6StringC2EPKc(%3, %5) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %4 = cir.get_global @".str" : cir.ptr > +// CHECK: %5 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_ZN6StringC2EPKc(%3, %5) : (!cir.ptr, !cir.ptr) -> () // CHECK: cir.scope { // CHECK: %6 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} // CHECK: cir.call @_ZN10StringViewC2ERK6String(%6, %3) : (!cir.ptr, !cir.ptr) -> () // CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %6) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } // CHECK: } -// CHECK: %2 = cir.load %0 : cir.ptr , i32 -// CHECK: cir.return %2 : i32 +// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: cir.return %2 : !s32i // CHECK: } diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index fcc989ab8124..9021cf076bdd 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -7,4 +7,4 @@ typedef struct _a { void m() { at y; } -// CHECK: !ty_22struct2E_a22 = !cir.struct<"struct._a", i32> \ No newline at end of file +// CHECK: !ty_22struct2E_a22 = !cir.struct<"struct._a", !s32i> \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 659c3f3e9e80..3f810109f20a 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -9,36 +9,36 @@ int foo(int i) { } // CHECK: module attributes { -// CHECK-NEXT: cir.func @foo(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.store %3, %1 : i32, cir.ptr -// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: cir.return %4 : i32 +// CHECK-NEXT: cir.func @foo(%arg0: !s32i loc({{.*}})) -> !s32i { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.store %3, %1 : !s32i, cir.ptr +// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %4 : !s32i int f2() { return 3; } -// CHECK: cir.func @f2() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.const(3 : i32) : i32 -// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.return %2 : i32 +// CHECK: cir.func @f2() -> !s32i { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %2 : !s32i int f3() { int i = 3; return i; } -// CHECK: cir.func @f3() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.const(3 : i32) : i32 -// CHECK-NEXT: cir.store %2, %1 : i32, cir.ptr -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: cir.store %3, %0 : i32, cir.ptr -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.return %4 : i32 +// CHECK: cir.func @f3() -> !s32i { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: cir.store %2, %1 : !s32i, cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.store %3, %0 : !s32i, cir.ptr +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %4 : !s32i diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 685af4ddbabe..2e29c1b94c22 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -6,10 +6,10 @@ int *p0() { return p; } -// CHECK: cir.func @_Z2p0v() -> !cir.ptr { -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] -// CHECK: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > +// CHECK: cir.func @_Z2p0v() -> !cir.ptr { +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] +// CHECK: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > int *p1() { int *p; @@ -17,10 +17,10 @@ int *p1() { return p; } -// CHECK: cir.func @_Z2p1v() -> !cir.ptr { -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p"] -// CHECK: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > +// CHECK: cir.func @_Z2p1v() -> !cir.ptr { +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p"] +// CHECK: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > int *p2() { int *p = nullptr; @@ -33,27 +33,27 @@ int *p2() { return p; } -// CHECK: cir.func @_Z2p2v() -> !cir.ptr { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] {alignment = 8 : i64} -// CHECK-NEXT: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr -// CHECK-NEXT: cir.store %2, %1 : !cir.ptr, cir.ptr > +// CHECK: cir.func @_Z2p2v() -> !cir.ptr { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] {alignment = 8 : i64} +// CHECK-NEXT: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.store %2, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %7 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} -// CHECK-NEXT: %8 = cir.const(0 : i32) : i32 -// CHECK-NEXT: cir.store %8, %7 : i32, cir.ptr -// CHECK-NEXT: cir.store %7, %1 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %9 = cir.const(42 : i32) : i32 -// CHECK-NEXT: %10 = cir.load deref %1 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.store %9, %10 : i32, cir.ptr +// CHECK-NEXT: %7 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: %8 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: cir.store %8, %7 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %7, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %9 = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK-NEXT: %10 = cir.load deref %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %9, %10 : !s32i, cir.ptr // CHECK-NEXT: } loc(#[[locScope:loc[0-9]+]]) -// CHECK-NEXT: %3 = cir.const(42 : i32) : i32 -// CHECK-NEXT: %4 = cir.load deref %1 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.store %3, %4 : i32, cir.ptr -// CHECK-NEXT: %5 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.store %5, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %6 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.return %6 : !cir.ptr +// CHECK-NEXT: %3 = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.load deref %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %3, %4 : !s32i, cir.ptr +// CHECK-NEXT: %5 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %5, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %6 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.return %6 : !cir.ptr void b0() { bool x = true, y = false; } @@ -63,9 +63,9 @@ void b0() { bool x = true, y = false; } void b1(int a) { bool b = a; } -// CHECK: cir.func @_Z2b1i(%arg0: i32 loc({{.*}})) { -// CHECK: %2 = cir.load %0 : cir.ptr , i32 -// CHECK: %3 = cir.cast(int_to_bool, %2 : i32), !cir.bool +// CHECK: cir.func @_Z2b1i(%arg0: !s32i loc({{.*}})) { +// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool // CHECK: cir.store %3, %1 : !cir.bool, cir.ptr void if0(int a) { @@ -77,16 +77,16 @@ void if0(int a) { } } -// CHECK: cir.func @_Z3if0i(%arg0: i32 loc({{.*}})) +// CHECK: cir.func @_Z3if0i(%arg0: !s32i loc({{.*}})) // CHECK: cir.scope { -// CHECK: %3 = cir.load %0 : cir.ptr , i32 -// CHECK: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool +// CHECK: %3 = cir.load %0 : cir.ptr , !s32i +// CHECK: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.if %4 { -// CHECK-NEXT: %5 = cir.const(3 : i32) : i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr // CHECK-NEXT: } else { -// CHECK-NEXT: %5 = cir.const(4 : i32) : i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: %5 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr // CHECK-NEXT: } // CHECK: } @@ -105,30 +105,30 @@ void if1(int a, bool b, bool c) { } } -// CHECK: cir.func @_Z3if1ibb(%arg0: i32 loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) +// CHECK: cir.func @_Z3if1ibb(%arg0: !s32i loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) // CHECK: cir.scope { -// CHECK: %5 = cir.load %0 : cir.ptr , i32 -// CHECK: %6 = cir.cast(int_to_bool, %5 : i32), !cir.bool +// CHECK: %5 = cir.load %0 : cir.ptr , !s32i +// CHECK: %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool // CHECK: cir.if %6 { -// CHECK: %7 = cir.const(3 : i32) : i32 -// CHECK: cir.store %7, %3 : i32, cir.ptr +// CHECK: %7 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: cir.store %7, %3 : !s32i, cir.ptr // CHECK: cir.scope { // CHECK: %8 = cir.load %1 : cir.ptr , !cir.bool // CHECK-NEXT: cir.if %8 { -// CHECK-NEXT: %9 = cir.const(8 : i32) : i32 -// CHECK-NEXT: cir.store %9, %3 : i32, cir.ptr +// CHECK-NEXT: %9 = cir.const(#cir.int<8> : !s32i) : !s32i +// CHECK-NEXT: cir.store %9, %3 : !s32i, cir.ptr // CHECK-NEXT: } // CHECK: } // CHECK: } else { // CHECK: cir.scope { // CHECK: %8 = cir.load %2 : cir.ptr , !cir.bool // CHECK-NEXT: cir.if %8 { -// CHECK-NEXT: %9 = cir.const(14 : i32) : i32 -// CHECK-NEXT: cir.store %9, %3 : i32, cir.ptr +// CHECK-NEXT: %9 = cir.const(#cir.int<14> : !s32i) : !s32i +// CHECK-NEXT: cir.store %9, %3 : !s32i, cir.ptr // CHECK-NEXT: } // CHECK: } -// CHECK: %7 = cir.const(4 : i32) : i32 -// CHECK: cir.store %7, %3 : i32, cir.ptr +// CHECK: %7 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK: cir.store %7, %3 : !s32i, cir.ptr // CHECK: } // CHECK: } diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp index 5e0ad30363ed..c615289cbb0f 100644 --- a/clang/test/CIR/CodeGen/binassign.cpp +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -16,7 +16,7 @@ int foo(int a, int b) { return x; } -// CHECK: [[Value:%[0-9]+]] = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: [[Value:%[0-9]+]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} // CHECK: = cir.binop(mul, // CHECK: = cir.load {{.*}}[[Value]] // CHECK: = cir.binop(mul, diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index c6ce278060d6..5ea326e149b3 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -14,13 +14,13 @@ void b0(int a, int b) { x = x | b; } -// CHECK: = cir.binop(mul, %3, %4) : i32 -// CHECK: = cir.binop(div, %6, %7) : i32 -// CHECK: = cir.binop(rem, %9, %10) : i32 -// CHECK: = cir.binop(add, %12, %13) : i32 -// CHECK: = cir.binop(sub, %15, %16) : i32 -// CHECK: = cir.binop(shr, %18, %19) : i32 -// CHECK: = cir.binop(shl, %21, %22) : i32 -// CHECK: = cir.binop(and, %24, %25) : i32 -// CHECK: = cir.binop(xor, %27, %28) : i32 -// CHECK: = cir.binop(or, %30, %31) : i32 \ No newline at end of file +// CHECK: = cir.binop(mul, %3, %4) : !s32i +// CHECK: = cir.binop(div, %6, %7) : !s32i +// CHECK: = cir.binop(rem, %9, %10) : !s32i +// CHECK: = cir.binop(add, %12, %13) : !s32i +// CHECK: = cir.binop(sub, %15, %16) : !s32i +// CHECK: = cir.binop(shr, %18, %19) : !s32i +// CHECK: = cir.binop(shl, %21, %22) : !s32i +// CHECK: = cir.binop(and, %24, %25) : !s32i +// CHECK: = cir.binop(xor, %27, %28) : !s32i +// CHECK: = cir.binop(or, %30, %31) : !s32i \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 7a99ba1266a2..b2c1dfd89bca 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -15,4 +15,4 @@ void m() { } // CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", i32, #cir.recdecl.ast> -// CHECK: !ty_22struct2E__long22 = !cir.struct<"struct.__long", !ty_22struct2Eanon22, i32, !cir.ptr> \ No newline at end of file +// CHECK: !ty_22struct2E__long22 = !cir.struct<"struct.__long", !ty_22struct2Eanon22, !u32i, !cir.ptr> \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index ddd09b7ff189..dce64286c7e5 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -18,17 +18,17 @@ void d(void) { // CHECK: cir.func @a() { // CHECK: cir.return // CHECK: } -// CHECK: cir.func @b(%arg0: i32 {{.*}}, %arg1: i32 {{.*}}) -> i32 { -// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", init] -// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", init] -// CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: cir.store %arg0, %0 : i32, cir.ptr -// CHECK: cir.store %arg1, %1 : i32, cir.ptr -// CHECK: %3 = cir.load %0 : cir.ptr , i32 -// CHECK: %4 = cir.load %1 : cir.ptr , i32 -// CHECK: %5 = cir.binop(add, %3, %4) : i32 -// CHECK: cir.store %5, %2 : i32, cir.ptr -// CHECK: %6 = cir.load %2 : cir.ptr , i32 +// CHECK: cir.func @b(%arg0: !s32i {{.*}}, %arg1: !s32i {{.*}}) -> !s32i { +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["b", init] +// CHECK: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK: cir.store %arg1, %1 : !s32i, cir.ptr +// CHECK: %3 = cir.load %0 : cir.ptr , !s32i +// CHECK: %4 = cir.load %1 : cir.ptr , !s32i +// CHECK: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK: cir.store %5, %2 : !s32i, cir.ptr +// CHECK: %6 = cir.load %2 : cir.ptr , !s32i // CHECK: cir.return %6 // CHECK: } // CHECK: cir.func @c(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { @@ -46,9 +46,9 @@ void d(void) { // CHECK: } // CHECK: cir.func @d() { // CHECK: call @a() : () -> () -// CHECK: %0 = cir.const(0 : i32) : i32 -// CHECK: %1 = cir.const(1 : i32) : i32 -// CHECK: call @b(%0, %1) : (i32, i32) -> i32 +// CHECK: %0 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: call @b(%0, %1) : (!s32i, !s32i) -> !s32i // CHECK: cir.return // CHECK: } // @@ -56,17 +56,17 @@ void d(void) { // CXX-NEXT: cir.func @_Z1av() { // CXX-NEXT: cir.return // CXX-NEXT: } -// CXX-NEXT: cir.func @_Z1bii(%arg0: i32 {{.*}}, %arg1: i32 {{.*}}) -> i32 { -// CXX-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", init] -// CXX-NEXT: %1 = cir.alloca i32, cir.ptr , ["b", init] -// CXX-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval"] -// CXX-NEXT: cir.store %arg0, %0 : i32, cir.ptr -// CXX-NEXT: cir.store %arg1, %1 : i32, cir.ptr -// CXX-NEXT: %3 = cir.load %0 : cir.ptr , i32 -// CXX-NEXT: %4 = cir.load %1 : cir.ptr , i32 -// CXX-NEXT: %5 = cir.binop(add, %3, %4) : i32 -// CXX-NEXT: cir.store %5, %2 : i32, cir.ptr -// CXX-NEXT: %6 = cir.load %2 : cir.ptr , i32 +// CXX-NEXT: cir.func @_Z1bii(%arg0: !s32i {{.*}}, %arg1: !s32i {{.*}}) -> !s32i { +// CXX-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["a", init] +// CXX-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["b", init] +// CXX-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] +// CXX-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr +// CXX-NEXT: cir.store %arg1, %1 : !s32i, cir.ptr +// CXX-NEXT: %3 = cir.load %0 : cir.ptr , !s32i +// CXX-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CXX-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CXX-NEXT: cir.store %5, %2 : !s32i, cir.ptr +// CXX-NEXT: %6 = cir.load %2 : cir.ptr , !s32i // CXX-NEXT: cir.return %6 // CXX-NEXT: } // CXX-NEXT: cir.func @_Z1cdd(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { @@ -84,8 +84,8 @@ void d(void) { // CXX-NEXT: } // CXX-NEXT: cir.func @_Z1dv() { // CXX-NEXT: call @_Z1av() : () -> () -// CXX-NEXT: %0 = cir.const(0 : i32) : i32 -// CXX-NEXT: %1 = cir.const(1 : i32) : i32 -// CXX-NEXT: call @_Z1bii(%0, %1) : (i32, i32) -> i32 +// CXX-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i +// CXX-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CXX-NEXT: call @_Z1bii(%0, %1) : (!s32i, !s32i) -> !s32i // CXX-NEXT: cir.return // CXX-NEXT: } diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index d3fc7ff7eb85..604a60359dde 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -5,14 +5,14 @@ unsigned char cxxstaticcast_0(unsigned int x) { } // CHECK: cir.func @_Z15cxxstaticcast_0j -// CHECK: %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca i8, cir.ptr , ["__retval"] {alignment = 1 : i64} -// CHECK: cir.store %arg0, %0 : i32, cir.ptr -// CHECK: %2 = cir.load %0 : cir.ptr , i32 -// CHECK: %3 = cir.cast(integral, %2 : i32), i8 -// CHECK: cir.store %3, %1 : i8, cir.ptr -// CHECK: %4 = cir.load %1 : cir.ptr , i8 -// CHECK: cir.return %4 : i8 +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !u8i, cir.ptr , ["__retval"] {alignment = 1 : i64} +// CHECK: cir.store %arg0, %0 : !u32i, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , !u32i +// CHECK: %3 = cir.cast(integral, %2 : !u32i), !u8i +// CHECK: cir.store %3, %1 : !u8i, cir.ptr +// CHECK: %4 = cir.load %1 : cir.ptr , !u8i +// CHECK: cir.return %4 : !u8i // CHECK: } @@ -20,20 +20,20 @@ int cStyleCasts_0(unsigned x1, int x2) { // CHECK: cir.func @_{{.*}}cStyleCasts_0{{.*}} char a = (char)x1; // truncate - // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : i32), i8 + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s8i short b = (short)x2; // truncate with sign - // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : i32), i16 + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !s16i long long c = (long long)x1; // zero extend - // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : i32), i64 + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s64i long long d = (long long)x2; // sign extend - // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : i32), i64 + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !s64i int arr[3]; int* e = (int*)arr; // explicit pointer decay - // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %{{[0-9]+}} : !cir.ptr>), !cir.ptr + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %{{[0-9]+}} : !cir.ptr>), !cir.ptr return 0; } diff --git a/clang/test/CIR/CodeGen/cmp.cpp b/clang/test/CIR/CodeGen/cmp.cpp index 1eb398fa3c6e..3bca55e78d13 100644 --- a/clang/test/CIR/CodeGen/cmp.cpp +++ b/clang/test/CIR/CodeGen/cmp.cpp @@ -10,9 +10,9 @@ void c0(int a, int b) { x = a == b; } -// CHECK: = cir.cmp(gt, %3, %4) : i32, !cir.bool -// CHECK: = cir.cmp(lt, %6, %7) : i32, !cir.bool -// CHECK: = cir.cmp(le, %9, %10) : i32, !cir.bool -// CHECK: = cir.cmp(ge, %12, %13) : i32, !cir.bool -// CHECK: = cir.cmp(ne, %15, %16) : i32, !cir.bool -// CHECK: = cir.cmp(eq, %18, %19) : i32, !cir.bool +// CHECK: = cir.cmp(gt, %3, %4) : !s32i, !cir.bool +// CHECK: = cir.cmp(lt, %6, %7) : !s32i, !cir.bool +// CHECK: = cir.cmp(le, %9, %10) : !s32i, !cir.bool +// CHECK: = cir.cmp(ge, %12, %13) : !s32i, !cir.bool +// CHECK: = cir.cmp(ne, %15, %16) : !s32i, !cir.bool +// CHECK: = cir.cmp(eq, %18, %19) : !s32i, !cir.bool diff --git a/clang/test/CIR/CodeGen/comma.cpp b/clang/test/CIR/CodeGen/comma.cpp index 679f28a8236e..87c9dcce50b3 100644 --- a/clang/test/CIR/CodeGen/comma.cpp +++ b/clang/test/CIR/CodeGen/comma.cpp @@ -7,11 +7,11 @@ int c0() { return b + 1, a; } -// CHECK: cir.func @_Z2c0v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] -// CHECK: %[[#B:]] = cir.alloca i32, cir.ptr , ["b", init] -// CHECK: %[[#LOADED_B:]] = cir.load %[[#B]] : cir.ptr , i32 -// CHECK: %[[#]] = cir.binop(add, %[[#LOADED_B]], %[[#]]) : i32 -// CHECK: %[[#LOADED_A:]] = cir.load %[[#A]] : cir.ptr , i32 -// CHECK: cir.store %[[#LOADED_A]], %[[#RET]] : i32, cir.ptr +// CHECK: cir.func @_Z2c0v() -> !s32i { +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#B:]] = cir.alloca !s32i, cir.ptr , ["b", init] +// CHECK: %[[#LOADED_B:]] = cir.load %[[#B]] : cir.ptr , !s32i +// CHECK: %[[#]] = cir.binop(add, %[[#LOADED_B]], %[[#]]) : !s32i +// CHECK: %[[#LOADED_A:]] = cir.load %[[#A]] : cir.ptr , !s32i +// CHECK: cir.store %[[#LOADED_A]], %[[#RET]] : !s32i, cir.ptr diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 49e7f3fc331d..f4f45de69850 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -139,7 +139,7 @@ co_invoke_fn co_invoke; // CHECK: cir.func builtin private @__builtin_coro_id(i32, !cir.ptr, !cir.ptr, !cir.ptr) -> i32 // CHECK: cir.func builtin private @__builtin_coro_alloc(i32) -> !cir.bool -// CHECK: cir.func builtin private @__builtin_coro_size() -> i64 +// CHECK: cir.func builtin private @__builtin_coro_size() -> !u64i // CHECK: cir.func builtin private @__builtin_coro_begin(i32, !cir.ptr) -> !cir.ptr using VoidTask = folly::coro::Task; @@ -168,8 +168,8 @@ VoidTask silly_task() { // CHECK: %[[#ShouldAlloc:]] = cir.call @__builtin_coro_alloc(%[[#CoroId]]) : (i32) -> !cir.bool // CHECK: cir.store %[[#NullPtr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > // CHECK: cir.if %[[#ShouldAlloc]] { -// CHECK: %[[#CoroSize:]] = cir.call @__builtin_coro_size() : () -> i64 -// CHECK: %[[#AllocAddr:]] = cir.call @_Znwm(%[[#CoroSize]]) : (i64) -> !cir.ptr +// CHECK: %[[#CoroSize:]] = cir.call @__builtin_coro_size() : () -> !u64i +// CHECK: %[[#AllocAddr:]] = cir.call @_Znwm(%[[#CoroSize]]) : (!u64i) -> !cir.ptr // CHECK: cir.store %[[#AllocAddr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > // CHECK: } // CHECK: %[[#Load0:]] = cir.load %[[#SavedFrameAddr]] : cir.ptr >, !cir.ptr @@ -317,21 +317,21 @@ folly::coro::Task go1() { // The call to go(1) has its own scope due to full-expression rules. // CHECK: cir.scope { -// CHECK: %[[#OneAddr:]] = cir.alloca i32, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} -// CHECK: %[[#One:]] = cir.const(1 : i32) : i32 -// CHECK: cir.store %[[#One]], %[[#OneAddr]] : i32, cir.ptr -// CHECK: %[[#IntTaskTmp:]] = cir.call @_Z2goRKi(%[[#OneAddr]]) : (!cir.ptr) -> ![[IntTask]] +// CHECK: %[[#OneAddr:]] = cir.alloca !s32i, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} +// CHECK: %[[#One:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#One]], %[[#OneAddr]] : !s32i, cir.ptr +// CHECK: %[[#IntTaskTmp:]] = cir.call @_Z2goRKi(%[[#OneAddr]]) : (!cir.ptr) -> ![[IntTask]] // CHECK: cir.store %[[#IntTaskTmp]], %[[#IntTaskAddr]] : ![[IntTask]], cir.ptr // CHECK: } -// CHECK: %[[#CoReturnValAddr:]] = cir.alloca i32, cir.ptr , ["__coawait_resume_rval"] {alignment = 1 : i64} +// CHECK: %[[#CoReturnValAddr:]] = cir.alloca !s32i, cir.ptr , ["__coawait_resume_rval"] {alignment = 1 : i64} // CHECK: cir.await(user, ready : { // CHECK: }, suspend : { // CHECK: }, resume : { // CHECK: %[[#ResumeVal:]] = cir.call @_ZN5folly4coro4TaskIiE12await_resumeEv(%3) -// CHECK: cir.store %[[#ResumeVal]], %[[#CoReturnValAddr]] : i32, cir.ptr +// CHECK: cir.store %[[#ResumeVal]], %[[#CoReturnValAddr]] : !s32i, cir.ptr // CHECK: },) -// CHECK: %[[#V:]] = cir.load %[[#CoReturnValAddr]] : cir.ptr , i32 +// CHECK: %[[#V:]] = cir.load %[[#CoReturnValAddr]] : cir.ptr , !s32i // CHECK: cir.call @_ZN5folly4coro4TaskIiE12promise_type12return_valueEi({{.*}}, %[[#V]]) folly::coro::Task go1_lambda() { @@ -362,19 +362,19 @@ folly::coro::Task go4() { // CHECK: %17 = cir.alloca !ty_22class2Eanon221, cir.ptr , ["ref.tmp1"] {alignment = 1 : i64} // Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` -// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> -// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> -// CHECK: cir.yield %19 : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> +// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> +// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> +// CHECK: cir.yield %19 : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> // CHECK: } -// CHECK: cir.store %12, %3 : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, cir.ptr ) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>> +// CHECK: cir.store %12, %3 : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, cir.ptr ) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>> // CHECK: cir.scope { -// CHECK: %17 = cir.alloca i32, cir.ptr , ["ref.tmp2", init] {alignment = 4 : i64} -// CHECK: %18 = cir.load %3 : cir.ptr ) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>>, !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> -// CHECK: %19 = cir.const(3 : i32) : i32 -// CHECK: cir.store %19, %17 : i32, cir.ptr +// CHECK: %17 = cir.alloca !s32i, cir.ptr , ["ref.tmp2", init] {alignment = 4 : i64} +// CHECK: %18 = cir.load %3 : cir.ptr ) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>>, !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> +// CHECK: %19 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: cir.store %19, %17 : !s32i, cir.ptr // Call invoker, which calls operator() indirectly. -// CHECK: %20 = cir.call %18(%17) : (!cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, !cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221 +// CHECK: %20 = cir.call %18(%17) : (!cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, !cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221 // CHECK: cir.store %20, %4 : !ty_22struct2Efolly3A3Acoro3A3ATask221, cir.ptr // CHECK: } diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp index 18ad86bcd8a1..30173fe84024 100644 --- a/clang/test/CIR/CodeGen/ctor-alias.cpp +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -10,9 +10,9 @@ void t() { // CHECK: cir.func linkonce_odr @_ZN11DummyStringC2EPKc // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return @@ -20,9 +20,9 @@ void t() { // CHECK: cir.func @_Z1tv // CHECK-NEXT: %0 = cir.alloca !ty_22struct2EDummyString22, cir.ptr , ["s4", init] {alignment = 1 : i64} -// CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > -// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > +// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return struct B { diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp index f17a28f22b1f..7b685169810a 100644 --- a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -14,8 +14,8 @@ struct String { // CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "size"}> // CHECK: %4 = cir.load %1 // CHECK: %5 = "cir.struct_element_addr"(%4) <{member_name = "size"}> -// CHECK: %6 = cir.load %5 : cir.ptr , i64 -// CHECK: cir.store %6, %3 : i64, cir.ptr +// CHECK: %6 = cir.load %5 : cir.ptr , !s64i +// CHECK: cir.store %6, %3 : !s64i, cir.ptr // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index 68b4a5136378..c81075dfa880 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -11,7 +11,7 @@ void baz() { Struk s; } -// CHECK: !ty_22struct2EStruk22 = !cir.struct<"struct.Struk", i32> +// CHECK: !ty_22struct2EStruk22 = !cir.struct<"struct.Struk", !s32i> // CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index f8d2aaffe3fc..cfe68b0d3de8 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -45,10 +45,10 @@ class B : public A // CHECK: cir.func @_Z4bluev() { // CHECK: %0 = cir.alloca !ty_22class2EPSEvent22, cir.ptr , ["p", init] {alignment = 8 : i64} -// CHECK: %1 = cir.const(1 : i32) : i32 -// CHECK: %2 = cir.get_global @".str" : cir.ptr > -// CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @_ZN7PSEventC1E6EFModePKc(%0, %1, %3) : (!cir.ptr, i32, !cir.ptr) -> () +// CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %2 = cir.get_global @".str" : cir.ptr > +// CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_ZN7PSEventC1E6EFModePKc(%0, %1, %3) : (!cir.ptr, !s32i, !cir.ptr) -> () // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/fullexpr.cpp b/clang/test/CIR/CodeGen/fullexpr.cpp index e1cc2a34d038..8d2d64a594d5 100644 --- a/clang/test/CIR/CodeGen/fullexpr.cpp +++ b/clang/test/CIR/CodeGen/fullexpr.cpp @@ -8,13 +8,13 @@ int go1() { return x; } -// CHECK: cir.func @_Z3go1v() -> i32 { -// CHECK: %[[#XAddr:]] = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: cir.func @_Z3go1v() -> !s32i { +// CHECK: %[[#XAddr:]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} // CHECK: %[[#RVal:]] = cir.scope { -// CHECK-NEXT: %[[#TmpAddr:]] = cir.alloca i32, cir.ptr , ["ref.tmp0", init] {alignment = 4 : i64} -// CHECK-NEXT: %[[#One:]] = cir.const(1 : i32) : i32 -// CHECK-NEXT: cir.store %[[#One]], %[[#TmpAddr]] : i32, cir.ptr -// CHECK-NEXT: %[[#RValTmp:]] = cir.call @_Z2goRKi(%[[#TmpAddr]]) : (!cir.ptr) -> i32 -// CHECK-NEXT: cir.yield %[[#RValTmp]] : i32 +// CHECK-NEXT: %[[#TmpAddr:]] = cir.alloca !s32i, cir.ptr , ["ref.tmp0", init] {alignment = 4 : i64} +// CHECK-NEXT: %[[#One:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: cir.store %[[#One]], %[[#TmpAddr]] : !s32i, cir.ptr +// CHECK-NEXT: %[[#RValTmp:]] = cir.call @_Z2goRKi(%[[#TmpAddr]]) : (!cir.ptr) -> !s32i +// CHECK-NEXT: cir.yield %[[#RValTmp]] : !s32i // CHECK-NEXT: } -// CHECK-NEXT: cir.store %[[#RVal]], %[[#XAddr]] : i32, cir.ptr \ No newline at end of file +// CHECK-NEXT: cir.store %[[#RVal]], %[[#XAddr]] : !s32i, cir.ptr \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 185aac9e086f..1ab3e4c25c29 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -7,10 +7,10 @@ // XFAIL: * char string[] = "whatnow"; -// CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array +// CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array int sint[] = {123, 456, 789}; -// CHECK: cir.global external @sint = #cir.const_array<[123 : i32, 456 : i32, 789 : i32]> : !cir.array +// CHECK: cir.global external @sint = #cir.const_array<[#cir.int<123> : !s32i, #cir.int<456> : !s32i, #cir.int<789> : !s32i]> : !cir.array int filler_sint[4] = {1, 2}; // Ensure missing elements are zero-initialized. -// CHECK: cir.global external @filler_sint = #cir.const_array<[1 : i32, 2 : i32, 0 : i32, 0 : i32]> : !cir.array +// CHECK: cir.global external @filler_sint = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i]> : !cir.array int excess_sint[2] = {1, 2, 3, 4}; // Ensure excess elements are ignored. -// CHECK: cir.global external @excess_sint = #cir.const_array<[1 : i32, 2 : i32]> : !cir.array +// CHECK: cir.global external @excess_sint = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 63c4f75a24c0..c216df4556b7 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -30,79 +30,81 @@ T func() { int use_func() { return func(); } // CHECK: module {{.*}} { -// CHECK-NEXT: cir.global external @a = 3 : i32 -// CHECK-NEXT: cir.global external @c = 2 : i64 +// CHECK-NEXT: cir.global external @a = #cir.int<3> : !s32i +// CHECK-NEXT: cir.global external @c = #cir.int<2> : !u64i // CHECK-NEXT: cir.global external @y = 3.400000e+00 : f32 // CHECK-NEXT: cir.global external @w = 4.300000e+00 : f64 -// CHECK-NEXT: cir.global external @x = 51 : i8 -// CHECK-NEXT: cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array -// CHECK-NEXT: cir.global external @alpha = #cir.const_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8]> : !cir.array +// CHECK-NEXT: cir.global external @x = #cir.int<51> : !s8i +// CHECK-NEXT: cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array +// CHECK-NEXT: cir.global external @alpha = #cir.const_array<[#cir.int<97> : !s8i, #cir.int<98> : !s8i, #cir.int<99> : !s8i, #cir.int<0> : !s8i]> : !cir.array -// CHECK-NEXT: cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK-NEXT: cir.global external @s = @".str": !cir.ptr +// CHECK-NEXT: cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global external @s = @".str": !cir.ptr -// CHECK-NEXT: cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK-NEXT: cir.global external @s1 = @".str1": !cir.ptr +// CHECK-NEXT: cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global external @s1 = @".str1": !cir.ptr -// CHECK-NEXT: cir.global external @s2 = @".str": !cir.ptr +// CHECK-NEXT: cir.global external @s2 = @".str": !cir.ptr // CHECK: cir.func @_Z10use_globalv() { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["li", init] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.get_global @a : cir.ptr -// CHECK-NEXT: %2 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: cir.store %2, %0 : i32, cir.ptr +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["li", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.get_global @a : cir.ptr +// CHECK-NEXT: %2 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.store %2, %0 : !s32i, cir.ptr // CHECK: cir.func @_Z17use_global_stringv() { -// CHECK-NEXT: %0 = cir.alloca i8, cir.ptr , ["c", init] {alignment = 1 : i64} -// CHECK-NEXT: %1 = cir.get_global @s2 : cir.ptr > -// CHECK-NEXT: %2 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %3 = cir.const(0 : i32) : i32 -// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr -// CHECK-NEXT: %5 = cir.load %4 : cir.ptr , i8 -// CHECK-NEXT: cir.store %5, %0 : i8, cir.ptr - -// CHECK: cir.func linkonce_odr @_Z4funcIiET_v() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.const(0 : i32) : i32 -// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.return %2 : i32 +// CHECK-NEXT: %0 = cir.alloca !u8i, cir.ptr , ["c", init] {alignment = 1 : i64} +// CHECK-NEXT: %1 = cir.get_global @s2 : cir.ptr > +// CHECK-NEXT: %2 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr +// CHECK-NEXT: %5 = cir.load %4 : cir.ptr , !s8i +// CHECK-NEXT: %6 = cir.cast(integral, %5 : !s8i), !u8i +// CHECK-NEXT: cir.store %6, %0 : !u8i, cir.ptr +// CHECK-NEXT: cir.return + +// CHECK: cir.func linkonce_odr @_Z4funcIiET_v() -> !s32i { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %2 : !s32i // CHECK-NEXT: } -// CHECK-NEXT: cir.func @_Z8use_funcv() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.call @_Z4funcIiET_v() : () -> i32 -// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.return %2 : i32 +// CHECK-NEXT: cir.func @_Z8use_funcv() -> !s32i { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.call @_Z4funcIiET_v() : () -> !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %2 : !s32i // CHECK-NEXT: } char string[] = "whatnow"; -// CHECK: cir.global external @string = #cir.const_array<[119 : i8, 104 : i8, 97 : i8, 116 : i8, 110 : i8, 111 : i8, 119 : i8, 0 : i8]> : !cir.array +// CHECK: cir.global external @string = #cir.const_array<[#cir.int<119> : !s8i, #cir.int<104> : !s8i, #cir.int<97> : !s8i, #cir.int<116> : !s8i, #cir.int<110> : !s8i, #cir.int<111> : !s8i, #cir.int<119> : !s8i, #cir.int<0> : !s8i]> : !cir.array unsigned uint[] = {255}; -// CHECK: cir.global external @uint = #cir.const_array<[255 : i32]> : !cir.array +// CHECK: cir.global external @uint = #cir.const_array<[#cir.int<255> : !u32i]> : !cir.array short sshort[] = {11111, 22222}; -// CHECK: cir.global external @sshort = #cir.const_array<[11111 : i16, 22222 : i16]> : !cir.array +// CHECK: cir.global external @sshort = #cir.const_array<[#cir.int<11111> : !s16i, #cir.int<22222> : !s16i]> : !cir.array int sint[] = {123, 456, 789}; -// CHECK: cir.global external @sint = #cir.const_array<[123 : i32, 456 : i32, 789 : i32]> : !cir.array +// CHECK: cir.global external @sint = #cir.const_array<[#cir.int<123> : !s32i, #cir.int<456> : !s32i, #cir.int<789> : !s32i]> : !cir.array long long ll[] = {999999999, 0, 0, 0}; -// CHECK: cir.global external @ll = #cir.const_array<[999999999, 0, 0, 0]> : !cir.array +// CHECK: cir.global external @ll = #cir.const_array<[#cir.int<999999999> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i]> : !cir.array void get_globals() { // CHECK: cir.func @_Z11get_globalsv() char *s = string; - // CHECK: %[[RES:[0-9]+]] = cir.get_global @string : cir.ptr > - // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + // CHECK: %[[RES:[0-9]+]] = cir.get_global @string : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr unsigned *u = uint; - // CHECK: %[[RES:[0-9]+]] = cir.get_global @uint : cir.ptr > - // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + // CHECK: %[[RES:[0-9]+]] = cir.get_global @uint : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr short *ss = sshort; - // CHECK: %[[RES:[0-9]+]] = cir.get_global @sshort : cir.ptr > - // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + // CHECK: %[[RES:[0-9]+]] = cir.get_global @sshort : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr int *si = sint; - // CHECK: %[[RES:[0-9]+]] = cir.get_global @sint : cir.ptr > - // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + // CHECK: %[[RES:[0-9]+]] = cir.get_global @sint : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr long long *l = ll; - // CHECK: %[[RES:[0-9]+]] = cir.get_global @ll : cir.ptr > - // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr + // CHECK: %[[RES:[0-9]+]] = cir.get_global @ll : cir.ptr > + // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr } diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 76493d75714e..f4d87938eb43 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -10,23 +10,23 @@ void g0(int a) { } // CHECK: cir.func @_Z2g0i -// CHECK-NEXT %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK-NEXT %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} -// CHECK-NEXT cir.store %arg0, %0 : i32, cir.ptr -// CHECK-NEXT %2 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT cir.store %2, %1 : i32, cir.ptr +// CHECK-NEXT %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} +// CHECK-NEXT cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK-NEXT %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT cir.store %2, %1 : !s32i, cir.ptr // CHECK-NEXT cir.br ^bb2 // CHECK-NEXT ^bb1: // no predecessors -// CHECK-NEXT %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT %4 = cir.const(1 : i32) : i32 -// CHECK-NEXT %5 = cir.binop(add, %3, %4) : i32 -// CHECK-NEXT cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT %4 = cir.const(1 : !s32i) : !s32i +// CHECK-NEXT %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT cir.store %5, %1 : !s32i, cir.ptr // CHECK-NEXT cir.br ^bb2 // CHECK-NEXT ^bb2: // 2 preds: ^bb0, ^bb1 -// CHECK-NEXT %6 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT %7 = cir.const(2 : i32) : i32 -// CHECK-NEXT %8 = cir.binop(add, %6, %7) : i32 -// CHECK-NEXT cir.store %8, %1 : i32, cir.ptr +// CHECK-NEXT %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT %7 = cir.const(2 : !s32i) : !s32i +// CHECK-NEXT %8 = cir.binop(add, %6, %7) : !s32i +// CHECK-NEXT cir.store %8, %1 : !s32i, cir.ptr // CHECK-NEXT cir.return void g1(int a) { @@ -37,11 +37,11 @@ void g1(int a) { } // Make sure alloca for "y" shows up in the entry block -// CHECK: cir.func @_Z2g1i(%arg0: i32 -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["y", init] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr +// CHECK: cir.func @_Z2g1i(%arg0: !s32i +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr int g2() { int b = 1; @@ -55,12 +55,12 @@ int g2() { // Make sure (1) we don't get dangling unused cleanup blocks // (2) generated returns consider the function type -// CHECK: cir.func @_Z2g2v() -> i32 { +// CHECK: cir.func @_Z2g2v() -> !s32i { // CHECK: cir.br ^bb2 // CHECK-NEXT: ^bb1: // no predecessors // CHECK: ^bb2: // 2 preds: ^bb0, ^bb1 -// CHECK: [[R:%[0-9]+]] = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: [[R]] : i32 +// CHECK: [[R:%[0-9]+]] = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: [[R]] : !s32i // CHECK-NEXT: } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/inc-dec.cpp b/clang/test/CIR/CodeGen/inc-dec.cpp index c342a0b15400..a1b89e1d6e84 100644 --- a/clang/test/CIR/CodeGen/inc-dec.cpp +++ b/clang/test/CIR/CodeGen/inc-dec.cpp @@ -6,9 +6,9 @@ unsigned id0() { return ++a; } -// CHECK: cir.func @_Z3id0v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: cir.func @_Z3id0v() -> !u32i { +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(inc, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] @@ -20,9 +20,9 @@ unsigned id1() { return --a; } -// CHECK: cir.func @_Z3id1v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: cir.func @_Z3id1v() -> !u32i { +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(dec, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] @@ -33,9 +33,9 @@ unsigned id2() { return a++; } -// CHECK: cir.func @_Z3id2v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: cir.func @_Z3id2v() -> !u32i { +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(inc, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] @@ -46,9 +46,9 @@ unsigned id3() { return a--; } -// CHECK: cir.func @_Z3id3v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: cir.func @_Z3id3v() -> !u32i { +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(dec, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index f77d80d3b95f..aac767879995 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -23,16 +23,17 @@ void l0() { // CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv( -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %3 = cir.load %2 : cir.ptr >, !cir.ptr -// CHECK: %4 = cir.load %3 : cir.ptr , i32 -// CHECK: %5 = cir.const(1 : i32) : i32 -// CHECK: %6 = cir.binop(add, %4, %5) : i32 -// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %8 = cir.load %7 : cir.ptr >, !cir.ptr -// CHECK: cir.store %6, %8 : i32, cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %3 = cir.load %2 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.load %3 : cir.ptr , !s32i +// CHECK: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %8 = cir.load %7 : cir.ptr >, !cir.ptr +// CHECK: cir.store %6, %8 : !s32i, cir.ptr // CHECK: cir.func @_Z2l0v() { @@ -44,15 +45,15 @@ auto g() { }; } -// CHECK: cir.func @_Z1gv() -> !ty_22class2Eanon222 { -// CHECK: %0 = cir.alloca !ty_22class2Eanon222, cir.ptr , ["__retval"] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} -// CHECK: %2 = cir.const(12 : i32) : i32 -// CHECK: cir.store %2, %1 : i32, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: cir.store %1, %3 : !cir.ptr, cir.ptr > -// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon222 -// CHECK: cir.return %4 : !ty_22class2Eanon222 +// CHECK: cir.func @_Z1gv() -> !ty_22class2Eanon223 { +// CHECK: %0 = cir.alloca !ty_22class2Eanon223, cir.ptr , ["__retval"] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CHECK: %2 = cir.const(#cir.int<12> : !s32i) : !s32i +// CHECK: cir.store %2, %1 : !s32i, cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: cir.store %1, %3 : !cir.ptr, cir.ptr > +// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon223 +// CHECK: cir.return %4 : !ty_22class2Eanon223 auto g2() { int i = 12; @@ -64,31 +65,31 @@ auto g2() { } // Should be same as above because of NRVO -// CHECK: cir.func @_Z2g2v() -> !ty_22class2Eanon223 { -// CHECK-NEXT: %0 = cir.alloca !ty_22class2Eanon223, cir.ptr , ["__retval", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.const(12 : i32) : i32 -// CHECK-NEXT: cir.store %2, %1 : i32, cir.ptr -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> -// CHECK-NEXT: cir.store %1, %3 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon223 -// CHECK-NEXT: cir.return %4 : !ty_22class2Eanon223 +// CHECK: cir.func @_Z2g2v() -> !ty_22class2Eanon224 { +// CHECK-NEXT: %0 = cir.alloca !ty_22class2Eanon224, cir.ptr , ["__retval", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.const(#cir.int<12> : !s32i) : !s32i +// CHECK-NEXT: cir.store %2, %1 : !s32i, cir.ptr +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK-NEXT: cir.store %1, %3 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon224 +// CHECK-NEXT: cir.return %4 : !ty_22class2Eanon224 int f() { return g2()(); } -// CHECK: cir.func @_Z1fv() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.func @_Z1fv() -> !s32i { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !ty_22class2Eanon223, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} -// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_22class2Eanon223 -// CHECK-NEXT: cir.store %3, %2 : !ty_22class2Eanon223, cir.ptr -// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> i32 -// CHECK-NEXT: cir.store %4, %0 : i32, cir.ptr +// CHECK-NEXT: %2 = cir.alloca !ty_22class2Eanon224, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_22class2Eanon224 +// CHECK-NEXT: cir.store %3, %2 : !ty_22class2Eanon224, cir.ptr +// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> !s32i +// CHECK-NEXT: cir.store %4, %0 : !s32i, cir.ptr // CHECK-NEXT: } -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.return %1 : i32 +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %1 : !s32i // CHECK-NEXT: } int g3() { @@ -106,30 +107,30 @@ int g3() { // lambda operator int (*)(int const&)() // CHECK: cir.func internal private @_ZZ2g3vENK3$_0cvPFiRKiEEv -// CHECK: cir.func @_Z2g3v() -> i32 { -// CHECK: %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !cir.ptr<(!cir.ptr) -> i32>, cir.ptr ) -> i32>>, ["fn", init] {alignment = 8 : i64} -// CHECK: %2 = cir.alloca i32, cir.ptr , ["task", init] {alignment = 4 : i64} +// CHECK: cir.func @_Z2g3v() -> !s32i { +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !cir.ptr<(!cir.ptr) -> !s32i>, cir.ptr ) -> !s32i>>, ["fn", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !s32i, cir.ptr , ["task", init] {alignment = 4 : i64} // 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`. // CHECK: %3 = cir.scope { -// CHECK: %7 = cir.alloca !ty_22class2Eanon224, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} -// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr<(!cir.ptr) -> i32> -// CHECK: %9 = cir.unary(plus, %8) : !cir.ptr<(!cir.ptr) -> i32>, !cir.ptr<(!cir.ptr) -> i32> -// CHECK: cir.yield %9 : !cir.ptr<(!cir.ptr) -> i32> +// CHECK: %7 = cir.alloca !ty_22class2Eanon221, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr<(!cir.ptr) -> !s32i> +// CHECK: %9 = cir.unary(plus, %8) : !cir.ptr<(!cir.ptr) -> !s32i>, !cir.ptr<(!cir.ptr) -> !s32i> +// CHECK: cir.yield %9 : !cir.ptr<(!cir.ptr) -> !s32i> // CHECK: } // 2. Load ptr to `__invoke()`. -// CHECK: cir.store %3, %1 : !cir.ptr<(!cir.ptr) -> i32>, cir.ptr ) -> i32>> +// CHECK: cir.store %3, %1 : !cir.ptr<(!cir.ptr) -> !s32i>, cir.ptr ) -> !s32i>> // CHECK: %4 = cir.scope { -// CHECK: %7 = cir.alloca i32, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} -// CHECK: %8 = cir.load %1 : cir.ptr ) -> i32>>, !cir.ptr<(!cir.ptr) -> i32> -// CHECK: %9 = cir.const(3 : i32) : i32 -// CHECK: cir.store %9, %7 : i32, cir.ptr +// CHECK: %7 = cir.alloca !s32i, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} +// CHECK: %8 = cir.load %1 : cir.ptr ) -> !s32i>>, !cir.ptr<(!cir.ptr) -> !s32i> +// CHECK: %9 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: cir.store %9, %7 : !s32i, cir.ptr // 3. Call `__invoke()`, which effectively executes `operator()`. -// CHECK: %10 = cir.call %8(%7) : (!cir.ptr<(!cir.ptr) -> i32>, !cir.ptr) -> i32 -// CHECK: cir.yield %10 : i32 +// CHECK: %10 = cir.call %8(%7) : (!cir.ptr<(!cir.ptr) -> !s32i>, !cir.ptr) -> !s32i +// CHECK: cir.yield %10 : !s32i // CHECK: } // CHECK: } diff --git a/clang/test/CIR/CodeGen/literals.c b/clang/test/CIR/CodeGen/literals.c index 7b0a69dc5886..47665212c287 100644 --- a/clang/test/CIR/CodeGen/literals.c +++ b/clang/test/CIR/CodeGen/literals.c @@ -2,8 +2,8 @@ int literals(void) { char a = 'a'; // char literals are int in C - // CHECK: %[[RES:[0-9]+]] = cir.const(97 : i32) : i32 - // CHECK: %{{[0-9]+}} = cir.cast(integral, %[[RES]] : i32), i8 + // CHECK: %[[RES:[0-9]+]] = cir.const(#cir.int<97> : !s32i) : !s32i + // CHECK: %{{[0-9]+}} = cir.cast(integral, %[[RES]] : !s32i), !s8i return 0; } diff --git a/clang/test/CIR/CodeGen/literals.cpp b/clang/test/CIR/CodeGen/literals.cpp index 35cf7f8144bb..537ebc8557e1 100644 --- a/clang/test/CIR/CodeGen/literals.cpp +++ b/clang/test/CIR/CodeGen/literals.cpp @@ -2,7 +2,7 @@ int literals() { char a = 'a'; // char literals have char type in C++ - // CHECK: %{{[0-9]+}} = cir.const(97 : i8) : i8 + // CHECK: %{{[0-9]+}} = cir.const(#cir.int<97> : !s8i) : !s8i return 0; } diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index 3001cd26fc85..b7f5b49da857 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -11,19 +11,19 @@ void l0() { // CPPSCOPE: cir.func @_Z2l0v() { // CPPSCOPE-NEXT: cir.scope { -// CPPSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} -// CPPSCOPE-NEXT: %1 = cir.alloca i32, cir.ptr , ["j", init] {alignment = 4 : i64} -// CPPSCOPE-NEXT: %2 = cir.const(0 : i32) : i32 -// CPPSCOPE-NEXT: cir.store %2, %0 : i32, cir.ptr +// CPPSCOPE-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CPPSCOPE-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} +// CPPSCOPE-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i +// CPPSCOPE-NEXT: cir.store %2, %0 : !s32i, cir.ptr // CPPSCOPE-NEXT: cir.loop for(cond : { // CSCOPE: cir.func @l0() { // CSCOPE-NEXT: cir.scope { -// CSCOPE-NEXT: %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} -// CSCOPE-NEXT: %1 = cir.const(0 : i32) : i32 -// CSCOPE-NEXT: cir.store %1, %0 : i32, cir.ptr +// CSCOPE-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CSCOPE-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CSCOPE-NEXT: cir.store %1, %0 : !s32i, cir.ptr // CSCOPE-NEXT: cir.loop for(cond : { // CSCOPE: }) { // CSCOPE-NEXT: cir.scope { -// CSCOPE-NEXT: %2 = cir.alloca i32, cir.ptr , ["j", init] {alignment = 4 : i64} +// CSCOPE-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index cec5a2b9db4d..7f9ee4f8c845 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -24,25 +24,25 @@ void l1() { // CHECK: cir.func @_Z2l1v // CHECK: cir.loop for(cond : { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(10 : i32) : i32 -// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool // CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: cir.yield continue // CHECK-NEXT: ^bb2: // CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 -// CHECK-NEXT: cir.store %6, %2 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: cir.store %6, %2 : !s32i, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 -// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } @@ -71,10 +71,10 @@ void l2(bool cond) { // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } @@ -84,17 +84,17 @@ void l2(bool cond) { // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %3 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool +// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: cir.yield continue @@ -103,10 +103,10 @@ void l2(bool cond) { // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } @@ -136,10 +136,10 @@ void l3(bool cond) { // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } @@ -149,17 +149,17 @@ void l3(bool cond) { // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { -// CHECK-NEXT: %3 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool +// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: cir.yield continue @@ -168,10 +168,10 @@ void l3(bool cond) { // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %4 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } @@ -192,14 +192,14 @@ void l4() { // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 -// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %10 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %11 = cir.const(10 : i32) : i32 -// CHECK-NEXT: %12 = cir.cmp(lt, %10, %11) : i32, !cir.bool +// CHECK-NEXT: %10 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %11 = cir.const(#cir.int<10> : !s32i) : !s32i +// CHECK-NEXT: %12 = cir.cmp(lt, %10, %11) : !s32i, !cir.bool // CHECK-NEXT: cir.if %12 { // CHECK-NEXT: cir.yield continue // CHECK-NEXT: } @@ -213,8 +213,8 @@ void l5() { // CHECK: cir.func @_Z2l5v() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { -// CHECK-NEXT: %0 = cir.const(0 : i32) : i32 -// CHECK-NEXT: %1 = cir.cast(int_to_bool, %0 : i32), !cir.bool +// CHECK-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool // CHECK-NEXT: cir.brcond %1 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: cir.yield continue diff --git a/clang/test/CIR/CodeGen/predefined.cpp b/clang/test/CIR/CodeGen/predefined.cpp index 60948dc9dc00..224504aac61a 100644 --- a/clang/test/CIR/CodeGen/predefined.cpp +++ b/clang/test/CIR/CodeGen/predefined.cpp @@ -10,13 +10,13 @@ void m() { } // CHECK: cir.func @_Z1mv() { -// CHECK: %0 = cir.get_global @".str" : cir.ptr > -// CHECK: %1 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr -// CHECK: %2 = cir.const(79 : i32) : i32 -// CHECK: %3 = cir.get_global @".str1" : cir.ptr > -// CHECK: %4 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr -// CHECK: %5 = cir.get_global @".str2" : cir.ptr > -// CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @__assert2(%1, %2, %4, %6) : (!cir.ptr, i32, !cir.ptr, !cir.ptr) -> () +// CHECK: %0 = cir.get_global @".str" : cir.ptr > +// CHECK: %1 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK: %2 = cir.const(#cir.int<79> : !s32i) : !s32i +// CHECK: %3 = cir.get_global @".str1" : cir.ptr > +// CHECK: %4 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr +// CHECK: %5 = cir.get_global @".str2" : cir.ptr > +// CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @__assert2(%1, %2, %4, %6) : (!cir.ptr, !s32i, !cir.ptr, !cir.ptr) -> () // CHECK: cir.return // CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index d9c0cb7a9e14..8f2503b3ddae 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -21,17 +21,17 @@ void init(unsigned numImages) { } } -// CHECK: !ty_22struct2Etriple22 = !cir.struct<"struct.triple", i32, !cir.ptr, i32> +// CHECK: !ty_22struct2Etriple22 = !cir.struct<"struct.triple", !u32i, !cir.ptr, !u32i> // CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector", !cir.ptr, !cir.ptr, !cir.ptr> // CHECK: !ty_22struct2E__vector_iterator22 = !cir.struct<"struct.__vector_iterator", !cir.ptr> -// CHECK: cir.func @_Z4initj(%arg0: i32 -// CHECK: %0 = cir.alloca i32, cir.ptr , ["numImages", init] {alignment = 4 : i64} +// CHECK: cir.func @_Z4initj(%arg0: !u32i +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["numImages", init] {alignment = 4 : i64} // CHECK: %1 = cir.alloca !ty_22class2Estd3A3Avector22, cir.ptr , ["images", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : i32, cir.ptr -// CHECK: %2 = cir.load %0 : cir.ptr , i32 -// CHECK: %3 = cir.cast(integral, %2 : i32), i64 -// CHECK: cir.call @_ZNSt6vectorI6tripleEC1Em(%1, %3) : (!cir.ptr, i64) -> () +// CHECK: cir.store %arg0, %0 : !u32i, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , !u32i +// CHECK: %3 = cir.cast(integral, %2 : !u32i), !u64i +// CHECK: cir.call @_ZNSt6vectorI6tripleEC1Em(%1, %3) : (!cir.ptr, !u64i) -> () // CHECK: cir.scope { // CHECK: %4 = cir.alloca !cir.ptr, cir.ptr >, ["__range1", init] {alignment = 8 : i64} // CHECK: %5 = cir.alloca !ty_22struct2E__vector_iterator22, cir.ptr , ["__begin1", init] {alignment = 8 : i64} @@ -61,11 +61,11 @@ void init(unsigned numImages) { // CHECK: %13 = cir.alloca !ty_22struct2Etriple22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} // CHECK: %14 = cir.const(#cir.zero : !ty_22struct2Etriple22) : !ty_22struct2Etriple22 // CHECK: cir.store %14, %13 : !ty_22struct2Etriple22, cir.ptr -// CHECK: %15 = "cir.struct_element_addr"(%13) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %16 = cir.const(1000024002 : i32) : i32 -// CHECK: cir.store %16, %15 : i32, cir.ptr +// CHECK: %15 = "cir.struct_element_addr"(%13) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %16 = cir.const(#cir.int<1000024002> : !u32i) : !u32i +// CHECK: cir.store %16, %15 : !u32i, cir.ptr // CHECK: %17 = "cir.struct_element_addr"(%13) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %18 = "cir.struct_element_addr"(%13) <{member_name = "image"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %18 = "cir.struct_element_addr"(%13) <{member_name = "image"}> : (!cir.ptr) -> !cir.ptr // CHECK: %19 = cir.load %7 : cir.ptr >, !cir.ptr // CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } diff --git a/clang/test/CIR/CodeGen/return.cpp b/clang/test/CIR/CodeGen/return.cpp index d56d3c272f7d..ee7eef915c38 100644 --- a/clang/test/CIR/CodeGen/return.cpp +++ b/clang/test/CIR/CodeGen/return.cpp @@ -5,10 +5,10 @@ int &ret0(int &x) { } // CHECK: cir.func @_Z4ret0Ri -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["x", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > -// CHECK: %3 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK: cir.return %3 : !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["x", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > +// CHECK: %3 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: cir.return %3 : !cir.ptr diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 0b1f1b059b68..d83105d3c859 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -17,33 +17,33 @@ int s0(int a, int b) { // CHECK: #loc21 = loc(fused[#loc3, #loc4]) // CHECK: #loc22 = loc(fused[#loc5, #loc6]) // CHECK: module attributes {cir.sob = #cir.signed_overflow_behavior -// CHECK: cir.func @_Z2s0ii(%arg0: i32 loc(fused[#loc3, #loc4]), %arg1: i32 loc(fused[#loc5, #loc6])) -> i32 { -// CHECK: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc21) -// CHECK: %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc22) -// CHECK: %2 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) -// CHECK: %3 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} loc(#loc23) -// CHECK: cir.store %arg0, %0 : i32, cir.ptr loc(#loc9) -// CHECK: cir.store %arg1, %1 : i32, cir.ptr loc(#loc9) -// CHECK: %4 = cir.load %0 : cir.ptr , i32 loc(#loc10) -// CHECK: %5 = cir.load %1 : cir.ptr , i32 loc(#loc8) -// CHECK: %6 = cir.binop(add, %4, %5) : i32 loc(#loc24) -// CHECK: cir.store %6, %3 : i32, cir.ptr loc(#loc23) +// CHECK: cir.func @_Z2s0ii(%arg0: !s32i loc(fused[#loc3, #loc4]), %arg1: !s32i loc(fused[#loc5, #loc6])) -> !s32i { +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc21) +// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc22) +// CHECK: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) +// CHECK: %3 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} loc(#loc23) +// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr loc(#loc9) +// CHECK: cir.store %arg1, %1 : !s32i, cir.ptr loc(#loc9) +// CHECK: %4 = cir.load %0 : cir.ptr , !s32i loc(#loc10) +// CHECK: %5 = cir.load %1 : cir.ptr , !s32i loc(#loc8) +// CHECK: %6 = cir.binop(add, %4, %5) : !s32i loc(#loc24) +// CHECK: cir.store %6, %3 : !s32i, cir.ptr loc(#loc23) // CHECK: cir.scope { -// CHECK: %9 = cir.load %3 : cir.ptr , i32 loc(#loc13) -// CHECK: %10 = cir.const(0 : i32) : i32 loc(#loc14) -// CHECK: %11 = cir.cmp(gt, %9, %10) : i32, !cir.bool loc(#loc26) +// CHECK: %9 = cir.load %3 : cir.ptr , !s32i loc(#loc13) +// CHECK: %10 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc14) +// CHECK: %11 = cir.cmp(gt, %9, %10) : !s32i, !cir.bool loc(#loc26) // CHECK: cir.if %11 { -// CHECK: %12 = cir.const(0 : i32) : i32 loc(#loc16) -// CHECK: cir.store %12, %3 : i32, cir.ptr loc(#loc28) +// CHECK: %12 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc16) +// CHECK: cir.store %12, %3 : !s32i, cir.ptr loc(#loc28) // CHECK: } else { -// CHECK: %12 = cir.const(1 : i32) : i32 loc(#loc12) -// CHECK: cir.store %12, %3 : i32, cir.ptr loc(#loc29) +// CHECK: %12 = cir.const(#cir.int<1> : !s32i) : !s32i loc(#loc12) +// CHECK: cir.store %12, %3 : !s32i, cir.ptr loc(#loc29) // CHECK: } loc(#loc27) // CHECK: } loc(#loc25) -// CHECK: %7 = cir.load %3 : cir.ptr , i32 loc(#loc18) -// CHECK: cir.store %7, %2 : i32, cir.ptr loc(#loc30) -// CHECK: %8 = cir.load %2 : cir.ptr , i32 loc(#loc30) -// CHECK: cir.return %8 : i32 loc(#loc30) +// CHECK: %7 = cir.load %3 : cir.ptr , !s32i loc(#loc18) +// CHECK: cir.store %7, %2 : !s32i, cir.ptr loc(#loc30) +// CHECK: %8 = cir.load %2 : cir.ptr , !s32i loc(#loc30) +// CHECK: cir.return %8 : !s32i loc(#loc30) // CHECK: } loc(#loc20) // CHECK: } loc(#loc) // CHECK: #loc = loc(unknown) diff --git a/clang/test/CIR/CodeGen/store.c b/clang/test/CIR/CodeGen/store.c index 20563f1fd7da..7d8207fbee10 100644 --- a/clang/test/CIR/CodeGen/store.c +++ b/clang/test/CIR/CodeGen/store.c @@ -7,11 +7,11 @@ void foo() { } // CHECK: cir.func @foo() { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.const(0 : i32) : i32 -// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr -// CHECK-NEXT: %2 = cir.const(1 : i32) : i32 -// CHECK-NEXT: cir.store %2, %0 : i32, cir.ptr +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CHECK-NEXT: %2 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: cir.store %2, %0 : !s32i, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 6747b4713ea5..30bb4171759f 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -17,8 +17,8 @@ void baz() { struct Foo f; } -// CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> -// CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !ty_22struct2EBar22> +// CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", !s32i, !s8i> +// CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", !s32i, !s8i, !ty_22struct2EBar22> // CHECK-DAG: module {{.*}} { // CHECK-NEXT: cir.func @baz() { // CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 7d15e9d5c3b8..d932f15f5235 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -26,11 +26,11 @@ void baz() { struct incomplete; void yoyo(incomplete *i) {} -// CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", i32, i8> -// CHECK: !ty_22struct2EMandalore22 = !cir.struct<"struct.Mandalore", i32, !cir.ptr, i32, #cir.recdecl.ast> // CHECK: !ty_22struct2Eincomplete22 = !cir.struct<"struct.incomplete", incomplete +// CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", !s32i, !s8i> +// CHECK: !ty_22struct2EMandalore22 = !cir.struct<"struct.Mandalore", !u32i, !cir.ptr, !s32i, #cir.recdecl.ast> // CHECK: !ty_22class2EAdv22 = !cir.struct<"class.Adv", !ty_22struct2EMandalore22> -// CHECK: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", i32, i8, !ty_22struct2EBar22> +// CHECK: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", !s32i, !s8i, !ty_22struct2EBar22> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} @@ -39,38 +39,38 @@ void yoyo(incomplete *i) {} // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 +// CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: cir.store %arg1, %1 : i32, cir.ptr +// CHECK-NEXT: cir.store %arg1, %1 : !s32i, cir.ptr // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: i32 +// CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: cir.store %arg1, %1 : i32, cir.ptr +// CHECK-NEXT: cir.store %arg1, %1 : !s32i, cir.ptr // CHECK-NEXT: %3 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: cir.store %4, %2 : i32, cir.ptr -// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , i32 +// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr +// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i // CHECK-NEXT: cir.return %5 // CHECK-NEXT: } // CHECK: cir.func @_Z3bazv() // CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["result", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["result", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () -// CHECK-NEXT: %3 = cir.const(4 : i32) : i32 -// CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, i32) -> () -// CHECK-NEXT: %4 = cir.const(4 : i32) : i32 -// CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, i32) -> i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr +// CHECK-NEXT: %3 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, !s32i) -> () +// CHECK-NEXT: %4 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, !s32i) -> !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } @@ -97,15 +97,15 @@ void m() { Adv C; } // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %2 = "cir.struct_element_addr"(%1) <{member_name = "x"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "w"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %4 = cir.const(1000024001 : i32) : i32 -// CHECK: cir.store %4, %3 : i32, cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "w"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %4 = cir.const(#cir.int<1000024001> : !u32i) : !u32i +// CHECK: cir.store %4, %3 : !u32i, cir.ptr // CHECK: %5 = "cir.struct_element_addr"(%2) <{member_name = "n"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %6 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > -// CHECK: %7 = "cir.struct_element_addr"(%2) <{member_name = "d"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %8 = cir.const(0 : i32) : i32 -// CHECK: cir.store %8, %7 : i32, cir.ptr +// CHECK: %7 = "cir.struct_element_addr"(%2) <{member_name = "d"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %8 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.store %8, %7 : !s32i, cir.ptr // CHECK: cir.return // CHECK: } @@ -144,4 +144,4 @@ struct Entry { void ppp() { Entry x; } // CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr -// CHECK: = "cir.struct_element_addr"(%1) <{member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr) -> i32>> +// CHECK: = "cir.struct_element_addr"(%1) <{member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr) -> !u32i>> diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index af55e0a513eb..26faec258615 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -17,26 +17,26 @@ void sw1(int a) { } // CHECK: cir.func @_Z3sw1i -// CHECK: cir.switch (%3 : i32) [ -// CHECK-NEXT: case (equal, 0 : i32) { -// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 -// CHECK-NEXT: cir.store %6, %1 : i32, cir.ptr +// CHECK: cir.switch (%3 : !s32i) [ +// CHECK-NEXT: case (equal, #cir.int<0> : !s32i) { +// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: cir.store %6, %1 : !s32i, cir.ptr // CHECK-NEXT: cir.yield break // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 1 : i32) { +// CHECK-NEXT: case (equal, #cir.int<1> : !s32i) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 2 : i32) { +// CHECK-NEXT: case (equal, #cir.int<2> : !s32i) { // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %4 = cir.alloca i32, cir.ptr , ["yolo", init] -// CHECK-NEXT: %5 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %6 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %7 = cir.binop(add, %5, %6) : i32 -// CHECK-NEXT: cir.store %7, %1 : i32, cir.ptr -// CHECK-NEXT: %8 = cir.const(100 : i32) : i32 -// CHECK-NEXT: cir.store %8, %4 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.alloca !s32i, cir.ptr , ["yolo", init] +// CHECK-NEXT: %5 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %6 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %7 = cir.binop(add, %5, %6) : !s32i +// CHECK-NEXT: cir.store %7, %1 : !s32i, cir.ptr +// CHECK-NEXT: %8 = cir.const(#cir.int<100> : !s32i) : !s32i +// CHECK-NEXT: cir.store %8, %4 : !s32i, cir.ptr // CHECK-NEXT: cir.yield break // CHECK-NEXT: } // CHECK-NEXT: cir.yield fallthrough @@ -54,12 +54,12 @@ void sw2(int a) { // CHECK: cir.func @_Z3sw2i // CHECK: cir.scope { -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["yolo", init] -// CHECK-NEXT: %2 = cir.alloca i32, cir.ptr , ["fomo", init] -// CHECK: cir.switch (%4 : i32) [ -// CHECK-NEXT: case (equal, 3 : i32) { -// CHECK-NEXT: %5 = cir.const(0 : i32) : i32 -// CHECK-NEXT: cir.store %5, %2 : i32, cir.ptr +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["yolo", init] +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["fomo", init] +// CHECK: cir.switch (%4 : !s32i) [ +// CHECK-NEXT: case (equal, #cir.int<3> : !s32i) { +// CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: cir.store %5, %2 : !s32i, cir.ptr void sw3(int a) { switch (a) { @@ -70,8 +70,8 @@ void sw3(int a) { // CHECK: cir.func @_Z3sw3i // CHECK: cir.scope { -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.switch (%1 : i32) [ +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.switch (%1 : !s32i) [ // CHECK-NEXT: case (default) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: } @@ -89,21 +89,21 @@ int sw4(int a) { } // CHECK: cir.func @_Z3sw4i -// CHECK: cir.switch (%4 : i32) [ -// CHECK-NEXT: case (equal, 42 : i32) { +// CHECK: cir.switch (%4 : !s32i) [ +// CHECK-NEXT: case (equal, #cir.int<42> : !s32i) { // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %5 = cir.const(3 : i32) : i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr -// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: cir.return %6 : i32 +// CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %6 : !s32i // CHECK-NEXT: } // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, // CHECK-NEXT: case (default) { -// CHECK-NEXT: %5 = cir.const(2 : i32) : i32 -// CHECK-NEXT: cir.store %5, %1 : i32, cir.ptr -// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: cir.return %6 : i32 +// CHECK-NEXT: %5 = cir.const(#cir.int<2> : !s32i) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %6 : !s32i // CHECK-NEXT: } // CHECK-NEXT: ] @@ -114,8 +114,8 @@ void sw5(int a) { } // CHECK: cir.func @_Z3sw5i -// CHECK: cir.switch (%1 : i32) [ -// CHECK-NEXT: case (equal, 1 : i32) { +// CHECK: cir.switch (%1 : !s32i) [ +// CHECK-NEXT: case (equal, #cir.int<1> : !s32i) { // CHECK-NEXT: cir.yield fallthrough void sw6(int a) { @@ -132,11 +132,11 @@ void sw6(int a) { } // CHECK: cir.func @_Z3sw6i -// CHECK: cir.switch (%1 : i32) [ -// CHECK-NEXT: case (anyof, [0, 1, 2] : i32) { +// CHECK: cir.switch (%1 : !s32i) [ +// CHECK-NEXT: case (anyof, [#cir.int<0>, #cir.int<1>, #cir.int<2>] : !s32i) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: }, -// CHECK-NEXT: case (anyof, [3, 4, 5] : i32) { +// CHECK-NEXT: case (anyof, [#cir.int<3>, #cir.int<4>, #cir.int<5>] : !s32i) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: } @@ -154,9 +154,9 @@ void sw7(int a) { } // CHECK: cir.func @_Z3sw7i -// CHECK: case (anyof, [0, 1, 2] : i32) { +// CHECK: case (anyof, [#cir.int<0>, #cir.int<1>, #cir.int<2>] : !s32i) { // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, -// CHECK-NEXT: case (anyof, [3, 4, 5] : i32) { +// CHECK-NEXT: case (anyof, [#cir.int<3>, #cir.int<4>, #cir.int<5>] : !s32i) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp index ab01652e975d..16d273477266 100644 --- a/clang/test/CIR/CodeGen/ternary.cpp +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -6,22 +6,22 @@ int x(int y) { } // CHECK: cir.func @_Z1xi -// CHECK: %0 = cir.alloca i32, cir.ptr , ["y", init] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: cir.store %arg0, %0 : i32, cir.ptr -// CHECK: %2 = cir.load %0 : cir.ptr , i32 -// CHECK: %3 = cir.const(0 : i32) : i32 -// CHECK: %4 = cir.cmp(gt, %2, %3) : i32, !cir.bool +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool // CHECK: %5 = cir.ternary(%4, true { -// CHECK: %7 = cir.const(3 : i32) : i32 -// CHECK: cir.yield %7 : i32 +// CHECK: %7 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: cir.yield %7 : !s32i // CHECK: }, false { -// CHECK: %7 = cir.const(5 : i32) : i32 -// CHECK: cir.yield %7 : i32 -// CHECK: }) : i32 -// CHECK: cir.store %5, %1 : i32, cir.ptr -// CHECK: %6 = cir.load %1 : cir.ptr , i32 -// CHECK: cir.return %6 : i32 +// CHECK: %7 = cir.const(#cir.int<5> : !s32i) : !s32i +// CHECK: cir.yield %7 : !s32i +// CHECK: }) : !s32i +// CHECK: cir.store %5, %1 : !s32i, cir.ptr +// CHECK: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK: cir.return %6 : !s32i // CHECK: } typedef enum { @@ -35,22 +35,24 @@ void m(APIType api) { ((api == API_A) ? (static_cast(0)) : oba("yo.cpp")); } -// CHECK: cir.func @_Z1m7APIType -// CHECK: %0 = cir.alloca i32, cir.ptr , ["api", init] {alignment = 4 : i64} -// CHECK: cir.store %arg0, %0 : i32, cir.ptr -// CHECK: %1 = cir.load %0 : cir.ptr , i32 -// CHECK: %2 = cir.const(0 : i32) : i32 -// CHECK: %3 = cir.cmp(eq, %1, %2) : i32, !cir.bool -// CHECK: %4 = cir.ternary(%3, true { -// CHECK: %5 = cir.const(0 : i32) : i32 -// CHECK: %6 = cir.const(0 : i8) : i8 -// CHECK: cir.yield %6 : i8 -// CHECK: }, false { -// CHECK: %5 = cir.get_global @".str" : cir.ptr > -// CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @_Z3obaPKc(%6) : (!cir.ptr) -> () -// CHECK: %7 = cir.const(0 : i8) : i8 -// CHECK: cir.yield %7 : i8 -// CHECK: }) : i8 -// CHECK: cir.return -// CHECK: } \ No newline at end of file +// CHECK: cir.func @_Z1m7APIType +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["api", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !u32i, cir.ptr +// CHECK: %1 = cir.load %0 : cir.ptr , !u32i +// CHECK: %2 = cir.cast(integral, %1 : !u32i), !s32i +// CHECK: %3 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: %4 = cir.cast(integral, %3 : !u32i), !s32i +// CHECK: %5 = cir.cmp(eq, %2, %4) : !s32i, !cir.bool +// CHECK: %6 = cir.ternary(%5, true { +// CHECK: %7 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %8 = cir.const(0 : i8) : i8 +// CHECK: cir.yield %8 : i8 +// CHECK: }, false { +// CHECK: %7 = cir.get_global @".str" : cir.ptr > +// CHECK: %8 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_Z3obaPKc(%8) : (!cir.ptr) -> () +// CHECK: %9 = cir.const(0 : i8) : i8 +// CHECK: cir.yield %9 : i8 +// CHECK: }) : i8 +// CHECK: cir.return +// CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index 7ff4a22e8d89..29e2240e2dc0 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -21,22 +21,22 @@ void t8() {} bool t9(bool b) { return b; } #endif -// CHECK: cir.func @t0(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK: cir.func @t1(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK: cir.func @t2(%arg0: i8 loc({{.*}})) -> i8 { -// CHECK: cir.func @t3(%arg0: i8 loc({{.*}})) -> i8 { -// CHECK: cir.func @t4(%arg0: i16 loc({{.*}})) -> i16 { -// CHECK: cir.func @t5(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK: cir.func @t0(%arg0: !s32i loc({{.*}})) -> !s32i { +// CHECK: cir.func @t1(%arg0: !u32i loc({{.*}})) -> !u32i { +// CHECK: cir.func @t2(%arg0: !s8i loc({{.*}})) -> !s8i { +// CHECK: cir.func @t3(%arg0: !u8i loc({{.*}})) -> !u8i { +// CHECK: cir.func @t4(%arg0: !s16i loc({{.*}})) -> !s16i { +// CHECK: cir.func @t5(%arg0: !u16i loc({{.*}})) -> !u16i { // CHECK: cir.func @t6(%arg0: f32 loc({{.*}})) -> f32 { // CHECK: cir.func @t7(%arg0: f64 loc({{.*}})) -> f64 { // CHECK: cir.func @t8() { -// CHECK-CPP: cir.func @_Z2t0i(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK-CPP: cir.func @_Z2t1j(%arg0: i32 loc({{.*}})) -> i32 { -// CHECK-CPP: cir.func @_Z2t2c(%arg0: i8 loc({{.*}})) -> i8 { -// CHECK-CPP: cir.func @_Z2t3h(%arg0: i8 loc({{.*}})) -> i8 { -// CHECK-CPP: cir.func @_Z2t4s(%arg0: i16 loc({{.*}})) -> i16 { -// CHECK-CPP: cir.func @_Z2t5t(%arg0: i16 loc({{.*}})) -> i16 { +// CHECK-CPP: cir.func @_Z2t0i(%arg0: !s32i loc({{.*}})) -> !s32i { +// CHECK-CPP: cir.func @_Z2t1j(%arg0: !u32i loc({{.*}})) -> !u32i { +// CHECK-CPP: cir.func @_Z2t2c(%arg0: !s8i loc({{.*}})) -> !s8i { +// CHECK-CPP: cir.func @_Z2t3h(%arg0: !u8i loc({{.*}})) -> !u8i { +// CHECK-CPP: cir.func @_Z2t4s(%arg0: !s16i loc({{.*}})) -> !s16i { +// CHECK-CPP: cir.func @_Z2t5t(%arg0: !u16i loc({{.*}})) -> !u16i { // CHECK-CPP: cir.func @_Z2t6f(%arg0: f32 loc({{.*}})) -> f32 { // CHECK-CPP: cir.func @_Z2t7d(%arg0: f64 loc({{.*}})) -> f64 { // CHECK-CPP: cir.func @_Z2t8v() { diff --git a/clang/test/CIR/CodeGen/unary-deref.cpp b/clang/test/CIR/CodeGen/unary-deref.cpp index 8de7659756a2..e45f884d01bc 100644 --- a/clang/test/CIR/CodeGen/unary-deref.cpp +++ b/clang/test/CIR/CodeGen/unary-deref.cpp @@ -13,5 +13,5 @@ void foo() { // CHECK: cir.func linkonce_odr @_ZNK12MyIntPointer4readEv // CHECK: %2 = cir.load %0 // CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "ptr"}> -// CHECK: %4 = cir.load deref %3 : cir.ptr > +// CHECK: %4 = cir.load deref %3 : cir.ptr > // CHECK: %5 = cir.load %4 diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index cdc118b55d90..e7141a7d8617 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -6,9 +6,9 @@ unsigned up0() { return +a; } -// CHECK: cir.func @_Z3up0v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: cir.func @_Z3up0v() -> !u32i { +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#OUTPUT:]] = cir.unary(plus, %[[#INPUT]]) // CHECK: cir.store %[[#OUTPUT]], %[[#RET]] @@ -18,9 +18,9 @@ unsigned um0() { return -a; } -// CHECK: cir.func @_Z3um0v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: cir.func @_Z3um0v() -> !u32i { +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#OUTPUT:]] = cir.unary(minus, %[[#INPUT]]) // CHECK: cir.store %[[#OUTPUT]], %[[#RET]] @@ -30,102 +30,103 @@ unsigned un0() { return ~a; // a ^ -1 , not } -// CHECK: cir.func @_Z3un0v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] +// CHECK: cir.func @_Z3un0v() -> !u32i { +// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#OUTPUT:]] = cir.unary(not, %[[#INPUT]]) // CHECK: cir.store %[[#OUTPUT]], %[[#RET]] -unsigned inc0() { - unsigned a = 1; +int inc0() { + int a = 1; ++a; return a; } -// CHECK: cir.func @_Z4inc0v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] -// CHECK: %[[#ATMP:]] = cir.const(1 : i32) : i32 -// CHECK: cir.store %[[#ATMP]], %[[#A]] : i32 +// CHECK: cir.func @_Z4inc0v() -> !s32i { +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#INPUT]]) // CHECK: cir.store %[[#INCREMENTED]], %[[#A]] // CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] // CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] // CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] -// CHECK: cir.return %[[#OUTPUT]] : i32 +// CHECK: cir.return %[[#OUTPUT]] : !s32i -unsigned dec0() { - unsigned a = 1; +int dec0() { + int a = 1; --a; return a; } -// CHECK: cir.func @_Z4dec0v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] -// CHECK: %[[#ATMP:]] = cir.const(1 : i32) : i32 -// CHECK: cir.store %[[#ATMP]], %[[#A]] : i32 +// CHECK: cir.func @_Z4dec0v() -> !s32i { +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#INCREMENTED:]] = cir.unary(dec, %[[#INPUT]]) // CHECK: cir.store %[[#INCREMENTED]], %[[#A]] // CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] // CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] // CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] -// CHECK: cir.return %[[#OUTPUT]] : i32 +// CHECK: cir.return %[[#OUTPUT]] : !s32i -unsigned inc1() { - unsigned a = 1; + +int inc1() { + int a = 1; a++; return a; } -// CHECK: cir.func @_Z4inc1v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] -// CHECK: %[[#ATMP:]] = cir.const(1 : i32) : i32 -// CHECK: cir.store %[[#ATMP]], %[[#A]] : i32 +// CHECK: cir.func @_Z4inc1v() -> !s32i { +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#INPUT]]) // CHECK: cir.store %[[#INCREMENTED]], %[[#A]] // CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] // CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] // CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] -// CHECK: cir.return %[[#OUTPUT]] : i32 +// CHECK: cir.return %[[#OUTPUT]] : !s32i -unsigned dec1() { - unsigned a = 1; +int dec1() { + int a = 1; a--; return a; } -// CHECK: cir.func @_Z4dec1v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] -// CHECK: %[[#ATMP:]] = cir.const(1 : i32) : i32 -// CHECK: cir.store %[[#ATMP]], %[[#A]] : i32 +// CHECK: cir.func @_Z4dec1v() -> !s32i { +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#INCREMENTED:]] = cir.unary(dec, %[[#INPUT]]) // CHECK: cir.store %[[#INCREMENTED]], %[[#A]] // CHECK: %[[#A_TO_OUTPUT:]] = cir.load %[[#A]] // CHECK: cir.store %[[#A_TO_OUTPUT]], %[[#RET]] // CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] -// CHECK: cir.return %[[#OUTPUT]] : i32 +// CHECK: cir.return %[[#OUTPUT]] : !s32i // Ensure the increment is performed after the assignment to b. -unsigned inc2() { - unsigned a = 1; - unsigned b = a++; +int inc2() { + int a = 1; + int b = a++; return b; } -// CHECK: cir.func @_Z4inc2v() -> i32 { -// CHECK: %[[#RET:]] = cir.alloca i32, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca i32, cir.ptr , ["a", init] -// CHECK: %[[#B:]] = cir.alloca i32, cir.ptr , ["b", init] -// CHECK: %[[#ATMP:]] = cir.const(1 : i32) : i32 -// CHECK: cir.store %[[#ATMP]], %[[#A]] : i32 +// CHECK: cir.func @_Z4inc2v() -> !s32i { +// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#B:]] = cir.alloca !s32i, cir.ptr , ["b", init] +// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#ATOB:]] = cir.load %[[#A]] // CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#ATOB]]) // CHECK: cir.store %[[#INCREMENTED]], %[[#A]] @@ -133,7 +134,7 @@ unsigned inc2() { // CHECK: %[[#B_TO_OUTPUT:]] = cir.load %[[#B]] // CHECK: cir.store %[[#B_TO_OUTPUT]], %[[#RET]] // CHECK: %[[#OUTPUT:]] = cir.load %[[#RET]] -// CHECK: cir.return %[[#OUTPUT]] : i32 +// CHECK: cir.return %[[#OUTPUT]] : !s32i int *inc_p(int *i) { --i; @@ -141,13 +142,13 @@ int *inc_p(int *i) { return i; } -// CHECK: cir.func @_Z5inc_pPi(%arg0: !cir.ptr +// CHECK: cir.func @_Z5inc_pPi(%arg0: !cir.ptr -// CHECK: %[[#i_addr:]] = cir.alloca !cir.ptr, cir.ptr >, ["i", init] {alignment = 8 : i64} -// CHECK: %[[#i_dec:]] = cir.load %[[#i_addr]] : cir.ptr >, !cir.ptr -// CHECK: %[[#dec_const:]] = cir.const(-1 : i32) : i32 -// CHECK: = cir.ptr_stride(%[[#i_dec]] : !cir.ptr, %[[#dec_const]] : i32), !cir.ptr +// CHECK: %[[#i_addr:]] = cir.alloca !cir.ptr, cir.ptr >, ["i", init] {alignment = 8 : i64} +// CHECK: %[[#i_dec:]] = cir.load %[[#i_addr]] : cir.ptr >, !cir.ptr +// CHECK: %[[#dec_const:]] = cir.const(#cir.int<-1> : !s32i) : !s32i +// CHECK: = cir.ptr_stride(%[[#i_dec]] : !cir.ptr, %[[#dec_const]] : !s32i), !cir.ptr -// CHECK: %[[#i_inc:]] = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %[[#inc_const:]] = cir.const(1 : i32) : i32 -// CHECK: = cir.ptr_stride(%[[#i_inc]] : !cir.ptr, %[[#inc_const]] : i32), !cir.ptr \ No newline at end of file +// CHECK: %[[#i_inc:]] = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %[[#inc_const:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: = cir.ptr_stride(%[[#i_inc]] : !cir.ptr, %[[#inc_const]] : !s32i), !cir.ptr diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index da47b290e3b1..e4bd31cd38b4 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -12,11 +12,12 @@ void m() { yolm3 q3; } -// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", !cir.ptr, i32, #cir.recdecl.ast> -// CHECK: !ty_22struct2Eanon221 = !cir.struct<"struct.anon", !cir.bool, i32, #cir.recdecl.ast> -// CHECK: !ty_22struct2Eyolo22 = !cir.struct<"struct.yolo", i32, #cir.recdecl.ast> +// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", !cir.bool, !s32i, #cir.recdecl.ast> +// CHECK: !ty_22struct2Eyolo22 = !cir.struct<"struct.yolo", !s32i, #cir.recdecl.ast> +// CHECK: !ty_22struct2Eanon221 = !cir.struct<"struct.anon", !cir.ptr, !s32i, #cir.recdecl.ast> + // CHECK: !ty_22union2Eyolm22 = !cir.struct<"union.yolm", !ty_22struct2Eyolo22> -// CHECK: !ty_22union2Eyolm222 = !cir.struct<"union.yolm2", !ty_22struct2Eanon22> +// CHECK: !ty_22union2Eyolm222 = !cir.struct<"union.yolm2", !ty_22struct2Eanon221> // CHECK: cir.func @_Z1mv() { // CHECK: cir.alloca !ty_22union2Eyolm22, cir.ptr , ["q"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 9c4a4cf61619..8523843f656a 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -80,13 +80,13 @@ class B : public A // CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> // typeinfo name for B -// CHECK: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array {alignment = 1 : i64} // typeinfo for A // CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr // typeinfo for B -// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<<{#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr,#cir.global_view<@_ZTS1B> : !cir.ptr,#cir.global_view<@_ZTI1A> : !cir.ptr}>> : ![[TypeInfoB]] +// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<<{#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr,#cir.global_view<@_ZTS1B> : !cir.ptr,#cir.global_view<@_ZTI1A> : !cir.ptr}>> : ![[TypeInfoB]] // Checks for dtors in dtors.cpp diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index edbb7a9bad5a..a740ec4c503f 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -1,23 +1,24 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s +!s32i = !cir.int module { - cir.func @yolo(%arg0 : i32) { - %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] - %a = cir.cast (int_to_bool, %arg0 : i32), !cir.bool + cir.func @yolo(%arg0 : !s32i) { + %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] + %a = cir.cast (int_to_bool, %arg0 : !s32i), !cir.bool - %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr - %4 = cir.const(0 : i32) : i32 + %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr + %4 = cir.const(#cir.int<0> : !s32i) : !s32i cir.return } - cir.func @bitcast(%p: !cir.ptr) { - %2 = cir.cast(bitcast, %p : !cir.ptr), !cir.ptr + cir.func @bitcast(%p: !cir.ptr) { + %2 = cir.cast(bitcast, %p : !cir.ptr), !cir.ptr cir.return } } -// CHECK: cir.func @yolo(%arg0: i32) -// CHECK: %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool -// CHECK: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK: cir.func @yolo(%arg0: !s32i) +// CHECK: %1 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// CHECK: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // CHECK: cir.func @bitcast -// CHECK: %0 = cir.cast(bitcast, %arg0 : !cir.ptr), !cir.ptr +// CHECK: %0 = cir.cast(bitcast, %arg0 : !cir.ptr), !cir.ptr diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index de66b684ed37..a4dfa12d1425 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -1,45 +1,47 @@ // Test the CIR operations can parse and print correctly (roundtrip) // RUN: cir-tool %s | cir-tool | FileCheck %s +!s32i = !cir.int + module { - cir.func @foo(%arg0: i32) -> i32 { - %0 = cir.alloca i32, cir.ptr , ["x", init] - cir.store %arg0, %0 : i32, cir.ptr - %1 = cir.load %0 : cir.ptr , i32 - cir.return %1 : i32 + cir.func @foo(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] + cir.store %arg0, %0 : !s32i, cir.ptr + %1 = cir.load %0 : cir.ptr , !s32i + cir.return %1 : !s32i } - cir.func @f3() -> i32 { - %0 = cir.alloca i32, cir.ptr , ["x", init] - %1 = cir.const(3 : i32) : i32 - cir.store %1, %0 : i32, cir.ptr - %2 = cir.load %0 : cir.ptr , i32 - cir.return %2 : i32 + cir.func @f3() -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] + %1 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + cir.return %2 : !s32i } - cir.func @if0(%arg0: i32) -> i32 { - %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - cir.store %arg0, %1 : i32, cir.ptr - %2 = cir.const(0 : i32) : i32 - cir.store %2, %0 : i32, cir.ptr - %3 = cir.load %1 : cir.ptr , i32 - %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool + cir.func @if0(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + cir.store %arg0, %1 : !s32i, cir.ptr + %2 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %2, %0 : !s32i, cir.ptr + %3 = cir.load %1 : cir.ptr , !s32i + %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool cir.if %4 { - %6 = cir.const(3 : i32) : i32 - cir.store %6, %0 : i32, cir.ptr + %6 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.store %6, %0 : !s32i, cir.ptr } else { - %6 = cir.const(4 : i32) : i32 - cir.store %6, %0 : i32, cir.ptr + %6 = cir.const(#cir.int<4> : !s32i) : !s32i + cir.store %6, %0 : !s32i, cir.ptr } - %5 = cir.load %0 : cir.ptr , i32 - cir.return %5 : i32 + %5 = cir.load %0 : cir.ptr , !s32i + cir.return %5 : !s32i } cir.func @s0() { - %0 = cir.alloca i32, cir.ptr , ["x"] {alignment = 4 : i64} + %0 = cir.alloca !s32i, cir.ptr , ["x"] {alignment = 4 : i64} cir.scope { - %1 = cir.alloca i32, cir.ptr , ["y"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["y"] {alignment = 4 : i64} } cir.return } @@ -47,35 +49,35 @@ module { // CHECK: module { -// CHECK-NEXT: cir.func @foo(%arg0: i32) -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", init] -// CHECK-NEXT: cir.store %arg0, %0 : i32, cir.ptr -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.return %1 : i32 +// CHECK-NEXT: cir.func @foo(%arg0: !s32i) -> !s32i { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["x", init] +// CHECK-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %1 : !s32i // CHECK-NEXT: } -// CHECK-NEXT: cir.func @f3() -> i32 { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x", init] -// CHECK-NEXT: %1 = cir.const(3 : i32) : i32 -// CHECK-NEXT: cir.store %1, %0 : i32, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: cir.return %2 : i32 +// CHECK-NEXT: cir.func @f3() -> !s32i { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["x", init] +// CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.return %2 : !s32i // CHECK-NEXT: } -// CHECK: @if0(%arg0: i32) -> i32 { -// CHECK: %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool +// CHECK: @if0(%arg0: !s32i) -> !s32i { +// CHECK: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.if %4 { -// CHECK-NEXT: %6 = cir.const(3 : i32) : i32 -// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: %6 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr // CHECK-NEXT: } else { -// CHECK-NEXT: %6 = cir.const(4 : i32) : i32 -// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: %6 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr // CHECK-NEXT: } // CHECK: cir.func @s0() { -// CHECK-NEXT: %0 = cir.alloca i32, cir.ptr , ["x"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["x"] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %1 = cir.alloca i32, cir.ptr , ["y"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["y"] {alignment = 4 : i64} // CHECK-NEXT: } // CHECK: } diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 1a1d126a18a7..347b340a5ea0 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -1,43 +1,45 @@ // RUN: cir-tool %s | FileCheck %s - +!s8i = !cir.int +!s32i = !cir.int +!s64i = !cir.int module { - cir.global external @a = 3 : i32 - cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> - cir.global external @b = #cir.const_array<"example\00" : !cir.array> - cir.global external @rgb2 = #cir.const_struct<{0 : i8, 5 : i64, #cir.null : !cir.ptr}> : !cir.struct<"", i8, i64, !cir.ptr> - cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} - cir.global "private" internal @c : i32 - cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} - cir.global external @s = @".str2": !cir.ptr + cir.global external @a = #cir.int<3> : !s32i + cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i] : !cir.array> + cir.global external @b = #cir.const_array<"example\00" : !cir.array> + cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.null : !cir.ptr}> : !cir.struct<"", !s8i, i64, !cir.ptr> + cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} + cir.global "private" internal @c : !s32i + cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global external @s = @".str2": !cir.ptr cir.func @use_global() { - %0 = cir.get_global @a : cir.ptr + %0 = cir.get_global @a : cir.ptr cir.return } - cir.global external @table = #cir.global_view<@s> : !cir.ptr - cir.global external @elt_ptr = #cir.global_view<@rgb, [1]> : !cir.ptr - cir.global external @table_of_ptrs = #cir.const_array<[#cir.global_view<@rgb, [1]> : !cir.ptr] : !cir.array x 1>> + cir.global external @table = #cir.global_view<@s> : !cir.ptr + cir.global external @elt_ptr = #cir.global_view<@rgb, [1]> : !cir.ptr + cir.global external @table_of_ptrs = #cir.const_array<[#cir.global_view<@rgb, [1]> : !cir.ptr] : !cir.array x 1>> // Note MLIR requires "private" for global declarations, should get // rid of this somehow in favor of clarity? - cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr - cir.global "private" constant external @type_info_A : !cir.ptr - cir.global constant external @type_info_name_B = #cir.const_array<"1B\00" : !cir.array> + cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr + cir.global "private" constant external @type_info_A : !cir.ptr + cir.global constant external @type_info_name_B = #cir.const_array<"1B\00" : !cir.array> cir.global external @type_info_B = #cir.typeinfo<<{ - #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, - #cir.global_view<@type_info_name_B> : !cir.ptr, - #cir.global_view<@type_info_A> : !cir.ptr}>> - : !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr + #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, + #cir.global_view<@type_info_name_B> : !cir.ptr, + #cir.global_view<@type_info_A> : !cir.ptr}>> + : !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr > } -// CHECK: cir.global external @a = 3 : i32 -// CHECK: cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array -// CHECK: cir.global external @b = #cir.const_array<"example\00" : !cir.array> -// CHECK: cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} -// CHECK: cir.global "private" internal @c : i32 -// CHECK: cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK: cir.global external @s = @".str2": !cir.ptr +// CHECK: cir.global external @a = #cir.int<3> : !s32i +// CHECK: cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i]> : !cir.array +// CHECK: cir.global external @b = #cir.const_array<"example\00" : !cir.array> +// CHECK: cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} +// CHECK: cir.global "private" internal @c : !s32i +// CHECK: cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global external @s = @".str2": !cir.ptr // CHECK: cir.func @use_global() -// CHECK-NEXT: %0 = cir.get_global @a : cir.ptr +// CHECK-NEXT: %0 = cir.get_global @a : cir.ptr diff --git a/clang/test/CIR/IR/int.cir b/clang/test/CIR/IR/int.cir new file mode 100644 index 000000000000..79d28427f922 --- /dev/null +++ b/clang/test/CIR/IR/int.cir @@ -0,0 +1,39 @@ +// module { +// cir.global external @a = #cir.int<255> : !cir.int +// } + +// RUN: cir-tool %s | FileCheck %s +!s8i = !cir.int +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int + +!u8i = !cir.int +!u16i = !cir.int +!u32i = !cir.int +!u64i = !cir.int + +cir.func @validIntTypesAndAttributes() -> () { + + %1 = cir.const(#cir.int<-128> : !cir.int) : !s8i + %2 = cir.const(#cir.int<127> : !cir.int) : !s8i + %3 = cir.const(#cir.int<255> : !cir.int) : !u8i + + %4 = cir.const(#cir.int<-32768> : !cir.int) : !s16i + %5 = cir.const(#cir.int<32767> : !cir.int) : !s16i + %6 = cir.const(#cir.int<65535> : !cir.int) : !u16i + + %7 = cir.const(#cir.int<-2147483648> : !cir.int) : !s32i + %8 = cir.const(#cir.int<2147483647> : !cir.int) : !s32i + %9 = cir.const(#cir.int<4294967295> : !cir.int) : !u32i + + // FIXME: MLIR is emitting a "too large" error for this one. Not sure why. + // %10 = cir.const(#cir.int<-9223372036854775808> : !cir.int) : !s64i + %11 = cir.const(#cir.int<9223372036854775807> : !cir.int) : !s64i + %12 = cir.const(#cir.int<18446744073709551615> : !cir.int) : !u64i + + cir.return +} + +// No need to check stuff. If it parses, it's fine. +// CHECK: cir.func @validIntTypesAndAttributes() diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 719a0c5b0ab7..af1f2ead29f4 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -82,11 +82,12 @@ cir.func @yieldcontinue() { // ----- +!s32i = !cir.int cir.func @s0() { - %1 = cir.const(2 : i32) : i32 - cir.switch (%1 : i32) [ + %1 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.switch (%1 : !s32i) [ case (equal, 5) { - %2 = cir.const(3 : i32) : i32 + %2 = cir.const(#cir.int<3> : !s32i) : !s32i } ] // expected-error {{blocks are expected to be explicitly terminated}} cir.return @@ -94,9 +95,10 @@ cir.func @s0() { // ----- +!s32i = !cir.int cir.func @s1() { - %1 = cir.const(2 : i32) : i32 - cir.switch (%1 : i32) [ + %1 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.switch (%1 : !s32i) [ case (equal, 5) { } ] // expected-error {{case region shall not be empty}} @@ -105,9 +107,9 @@ cir.func @s1() { // ----- -cir.func @badstride(%x: !cir.ptr) { - %idx = cir.const(2 : i32) : i32 - %4 = cir.ptr_stride(%x : !cir.ptr, %idx : i32), !cir.ptr // expected-error {{requires the same type for first operand and result}} +cir.func @badstride(%x: !cir.ptr>) { + %idx = cir.const(#cir.int<2> : !cir.int) : !cir.int + %4 = cir.ptr_stride(%x : !cir.ptr>, %idx : !cir.int), !cir.ptr // expected-error {{requires the same type for first operand and result}} cir.return } @@ -300,4 +302,40 @@ module { cir.func @l1() alias(@l0) { // expected-error {{function alias shall not have a body}} cir.return } -} \ No newline at end of file +} + +// ----- + +module { + // expected-error@below {{expected 's' or 'u'}} + cir.func @l0(%arg0: !cir.int) -> () { + cir.return + } +} + +// // ----- + +module { + // expected-error@below {{expected integer width to be from 8 up to 64}} + cir.func @l0(%arg0: !cir.int) -> () { + cir.return + } +} + +// ----- + +module { + // expected-error@below {{expected integer width to be a multiple of 8}} + cir.func @l0(%arg0: !cir.int) -> () { + cir.return + } +} + +// ----- + +module { + // expected-error@below {{integer value too large for the given type}} + cir.global external @a = #cir.int<256> : !cir.int + // expected-error@below {{integer value too large for the given type}} + cir.global external @b = #cir.int<-129> : !cir.int +} diff --git a/clang/test/CIR/IR/ptr_stride.cir b/clang/test/CIR/IR/ptr_stride.cir index a9e7a4ab29b0..738983f15633 100644 --- a/clang/test/CIR/IR/ptr_stride.cir +++ b/clang/test/CIR/IR/ptr_stride.cir @@ -1,21 +1,22 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s +!s32i = !cir.int module { - cir.func @arraysubscript(%arg0: i32) { - %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] - %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool - %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr - %3 = cir.const(0 : i32) : i32 - %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr + cir.func @arraysubscript(%arg0: !s32i) { + %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] + %1 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr cir.return } } -// CHECK: cir.func @arraysubscript(%arg0: i32) { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] -// CHECK-NEXT: %1 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool -// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %3 = cir.const(0 : i32) : i32 -// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr +// CHECK: cir.func @arraysubscript(%arg0: !s32i) { +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] +// CHECK-NEXT: %1 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index 0f2c9acd881d..56edfbbd9b60 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -1,35 +1,36 @@ // RUN: cir-tool %s | FileCheck %s +!s32i = !cir.int cir.func @s0() { - %1 = cir.const(2 : i32) : i32 - cir.switch (%1 : i32) [ + %1 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.switch (%1 : !s32i) [ case (default) { cir.return }, case (equal, 3) { cir.yield fallthrough }, - case (anyof, [6, 7, 8] : i32) { + case (anyof, [6, 7, 8] : !s32i) { cir.yield break }, - case (equal, 5 : i32) { + case (equal, 5 : !s32i) { cir.yield } ] cir.return } -// CHECK: cir.switch (%0 : i32) [ +// CHECK: cir.switch (%0 : !s32i) [ // CHECK-NEXT: case (default) { // CHECK-NEXT: cir.return // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 3 : i32) { +// CHECK-NEXT: case (equal, #cir.int<3> : !s32i) { // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, -// CHECK-NEXT: case (anyof, [6, 7, 8] : i32) { +// CHECK-NEXT: case (anyof, [#cir.int<6>, #cir.int<7>, #cir.int<8>] : !s32i) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 5 : i32) { +// CHECK-NEXT: case (equal, #cir.int<5> : !s32i) { // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: ] diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir index 6347b4f07b4f..04564d9ec0dc 100644 --- a/clang/test/CIR/Lowering/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -1,53 +1,54 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} - %3 = cir.const(2 : i32) : i32 cir.store %3, %0 : i32, cir.ptr - %4 = cir.const(1 : i32) : i32 cir.store %4, %1 : i32, cir.ptr - %5 = cir.load %0 : cir.ptr , i32 - %6 = cir.load %1 : cir.ptr , i32 - %7 = cir.binop(mul, %5, %6) : i32 - cir.store %7, %2 : i32, cir.ptr - %8 = cir.load %2 : cir.ptr , i32 - %9 = cir.load %1 : cir.ptr , i32 - %10 = cir.binop(div, %8, %9) : i32 - cir.store %10, %2 : i32, cir.ptr - %11 = cir.load %2 : cir.ptr , i32 - %12 = cir.load %1 : cir.ptr , i32 - %13 = cir.binop(rem, %11, %12) : i32 - cir.store %13, %2 : i32, cir.ptr - %14 = cir.load %2 : cir.ptr , i32 - %15 = cir.load %1 : cir.ptr , i32 - %16 = cir.binop(add, %14, %15) : i32 - cir.store %16, %2 : i32, cir.ptr - %17 = cir.load %2 : cir.ptr , i32 - %18 = cir.load %1 : cir.ptr , i32 - %19 = cir.binop(sub, %17, %18) : i32 - cir.store %19, %2 : i32, cir.ptr - %20 = cir.load %2 : cir.ptr , i32 - %21 = cir.load %1 : cir.ptr , i32 - %22 = cir.binop(shr, %20, %21) : i32 - cir.store %22, %2 : i32, cir.ptr - %23 = cir.load %2 : cir.ptr , i32 - %24 = cir.load %1 : cir.ptr , i32 - %25 = cir.binop(shl, %23, %24) : i32 - cir.store %25, %2 : i32, cir.ptr - %26 = cir.load %2 : cir.ptr , i32 - %27 = cir.load %1 : cir.ptr , i32 - %28 = cir.binop(and, %26, %27) : i32 - cir.store %28, %2 : i32, cir.ptr - %29 = cir.load %2 : cir.ptr , i32 - %30 = cir.load %1 : cir.ptr , i32 - %31 = cir.binop(xor, %29, %30) : i32 - cir.store %31, %2 : i32, cir.ptr - %32 = cir.load %2 : cir.ptr , i32 - %33 = cir.load %1 : cir.ptr , i32 - %34 = cir.binop(or, %32, %33) : i32 - cir.store %34, %2 : i32, cir.ptr + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<2> : !u32i) : !u32i cir.store %3, %0 : !u32i, cir.ptr + %4 = cir.const(#cir.int<1> : !u32i) : !u32i cir.store %4, %1 : !u32i, cir.ptr + %5 = cir.load %0 : cir.ptr , !u32i + %6 = cir.load %1 : cir.ptr , !u32i + %7 = cir.binop(mul, %5, %6) : !u32i + cir.store %7, %2 : !u32i, cir.ptr + %8 = cir.load %2 : cir.ptr , !u32i + %9 = cir.load %1 : cir.ptr , !u32i + %10 = cir.binop(div, %8, %9) : !u32i + cir.store %10, %2 : !u32i, cir.ptr + %11 = cir.load %2 : cir.ptr , !u32i + %12 = cir.load %1 : cir.ptr , !u32i + %13 = cir.binop(rem, %11, %12) : !u32i + cir.store %13, %2 : !u32i, cir.ptr + %14 = cir.load %2 : cir.ptr , !u32i + %15 = cir.load %1 : cir.ptr , !u32i + %16 = cir.binop(add, %14, %15) : !u32i + cir.store %16, %2 : !u32i, cir.ptr + %17 = cir.load %2 : cir.ptr , !u32i + %18 = cir.load %1 : cir.ptr , !u32i + %19 = cir.binop(sub, %17, %18) : !u32i + cir.store %19, %2 : !u32i, cir.ptr + %20 = cir.load %2 : cir.ptr , !u32i + %21 = cir.load %1 : cir.ptr , !u32i + %22 = cir.binop(shr, %20, %21) : !u32i + cir.store %22, %2 : !u32i, cir.ptr + %23 = cir.load %2 : cir.ptr , !u32i + %24 = cir.load %1 : cir.ptr , !u32i + %25 = cir.binop(shl, %23, %24) : !u32i + cir.store %25, %2 : !u32i, cir.ptr + %26 = cir.load %2 : cir.ptr , !u32i + %27 = cir.load %1 : cir.ptr , !u32i + %28 = cir.binop(and, %26, %27) : !u32i + cir.store %28, %2 : !u32i, cir.ptr + %29 = cir.load %2 : cir.ptr , !u32i + %30 = cir.load %1 : cir.ptr , !u32i + %31 = cir.binop(xor, %29, %30) : !u32i + cir.store %31, %2 : !u32i, cir.ptr + %32 = cir.load %2 : cir.ptr , !u32i + %33 = cir.load %1 : cir.ptr , !u32i + %34 = cir.binop(or, %32, %33) : !u32i + cir.store %34, %2 : !u32i, cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/branch.cir b/clang/test/CIR/Lowering/branch.cir index 1e50806dc355..135fd79ed6a1 100644 --- a/clang/test/CIR/Lowering/branch.cir +++ b/clang/test/CIR/Lowering/branch.cir @@ -1,14 +1,15 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -cir.func @foo(%arg0: !cir.bool) -> i32 { +!s32i = !cir.int +cir.func @foo(%arg0: !cir.bool) -> !s32i { cir.brcond %arg0 ^bb1, ^bb2 ^bb1: - %0 = cir.const(1: i32) : i32 - cir.return %0 : i32 + %0 = cir.const(#cir.int<1>: !s32i) : !s32i + cir.return %0 : !s32i ^bb2: - %1 = cir.const(0: i32) : i32 - cir.return %1 : i32 + %1 = cir.const(#cir.int<0>: !s32i) : !s32i + cir.return %1 : !s32i } // MLIR: module { diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 30f54fd3cd2e..71cb8593610f 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -1,10 +1,16 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!s8i = !cir.int +!u32i = !cir.int +!u8i = !cir.int module { - cir.func @foo(%arg0: i32) -> i32 { - %4 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool - cir.return %arg0 : i32 + cir.func @foo(%arg0: !s32i) -> !s32i { + %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + cir.return %arg0 : !s32i } // MLIR: llvm.func @foo(%arg0: i32) -> i32 { @@ -21,42 +27,41 @@ module { // LLVM-NEXT: ret i32 %0 // LLVM-NEXT: } - cir.func @cStyleCasts(%arg0: i32, %arg1: i32) -> i32 { + cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i) -> !s32i { // MLIR: llvm.func @cStyleCasts(%arg0: i32, %arg1: i32) -> i32 { - %0 = cir.alloca i32, cir.ptr , ["x1", init] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["x2", init] {alignment = 4 : i64} - %2 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} - %3 = cir.alloca i8, cir.ptr , ["a", init] {alignment = 1 : i64} - %4 = cir.alloca i16, cir.ptr , ["b", init] {alignment = 2 : i64} - %5 = cir.alloca i64, cir.ptr , ["c", init] {alignment = 8 : i64} - %6 = cir.alloca i64, cir.ptr , ["d", init] {alignment = 8 : i64} - %17 = cir.alloca !cir.array, cir.ptr >, ["arr"] {alignment = 4 : i64} - %18 = cir.alloca !cir.ptr, cir.ptr >, ["e", init] {alignment = 8 : i64} - cir.store %arg0, %0 : i32, cir.ptr - cir.store %arg1, %1 : i32, cir.ptr - %7 = cir.load %0 : cir.ptr , i32 - %8 = cir.cast(integral, %7 : i32), i8 + %0 = cir.alloca !u32i, cir.ptr , ["x1", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["x2", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %3 = cir.alloca !s8i, cir.ptr , ["a", init] {alignment = 1 : i64} + %4 = cir.alloca !s16i, cir.ptr , ["b", init] {alignment = 2 : i64} + %5 = cir.alloca !s64i, cir.ptr , ["c", init] {alignment = 8 : i64} + %6 = cir.alloca !s64i, cir.ptr , ["d", init] {alignment = 8 : i64} + %7 = cir.alloca !cir.array, cir.ptr >, ["arr"] {alignment = 4 : i64} + %8 = cir.alloca !cir.ptr, cir.ptr >, ["e", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !u32i, cir.ptr + cir.store %arg1, %1 : !s32i, cir.ptr + %9 = cir.load %0 : cir.ptr , !u32i + %10 = cir.cast(integral, %9 : !u32i), !s8i // MLIR: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i8 - cir.store %8, %3 : i8, cir.ptr - %9 = cir.load %1 : cir.ptr , i32 - %10 = cir.cast(integral, %9 : i32), i16 + cir.store %10, %3 : !s8i, cir.ptr + %11 = cir.load %1 : cir.ptr , !s32i + %12 = cir.cast(integral, %11 : !s32i), !s16i // MLIR: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i16 - cir.store %10, %4 : i16, cir.ptr - %11 = cir.load %0 : cir.ptr , i32 - %12 = cir.cast(integral, %11 : i32), i64 - // FIXME: this should be a zext, but we don't distinguish signed/unsigned + cir.store %12, %4 : !s16i, cir.ptr + %13 = cir.load %0 : cir.ptr , !u32i + %14 = cir.cast(integral, %13 : !u32i), !s64i + // MLIR: %{{[0-9]+}} = llvm.zext %{{[0-9]+}} : i32 to i64 + cir.store %14, %5 : !s64i, cir.ptr + %15 = cir.load %1 : cir.ptr , !s32i + %16 = cir.cast(integral, %15 : !s32i), !s64i // MLIR: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 - cir.store %12, %5 : i64, cir.ptr - %13 = cir.load %1 : cir.ptr , i32 - %14 = cir.cast(integral, %13 : i32), i64 - // MLIR: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 - cir.store %14, %6 : i64, cir.ptr - %19 = cir.cast(array_to_ptrdecay, %17 : !cir.ptr>), !cir.ptr + cir.store %16, %6 : !s64i, cir.ptr + %17 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr - cir.store %19, %18 : !cir.ptr, cir.ptr > - %15 = cir.const(0 : i32) : i32 - cir.store %15, %2 : i32, cir.ptr - %16 = cir.load %2 : cir.ptr , i32 - cir.return %16 : i32 + cir.store %17, %8 : !cir.ptr, cir.ptr > + %18 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %18, %2 : !s32i, cir.ptr + %19 = cir.load %2 : cir.ptr , !s32i + cir.return %19 : !s32i } } diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 71f488cd9535..2260d009efa9 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -1,45 +1,46 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!s32i = !cir.int module { - cir.func @dot(%arg0: !cir.ptr, %arg1: !cir.ptr, %arg2: i32) -> f64 { + cir.func @dot(%arg0: !cir.ptr, %arg1: !cir.ptr, %arg2: !s32i) -> f64 { %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} %1 = cir.alloca !cir.ptr, cir.ptr >, ["b", init] {alignment = 8 : i64} - %2 = cir.alloca i32, cir.ptr , ["size", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, cir.ptr , ["size", init] {alignment = 4 : i64} %3 = cir.alloca f64, cir.ptr , ["__retval"] {alignment = 8 : i64} %4 = cir.alloca f64, cir.ptr , ["q", init] {alignment = 8 : i64} cir.store %arg0, %0 : !cir.ptr, cir.ptr > cir.store %arg1, %1 : !cir.ptr, cir.ptr > - cir.store %arg2, %2 : i32, cir.ptr + cir.store %arg2, %2 : !s32i, cir.ptr %5 = cir.const(0.000000e+00 : f64) : f64 cir.store %5, %4 : f64, cir.ptr cir.scope { - %8 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} - %9 = cir.const(0 : i32) : i32 - cir.store %9, %8 : i32, cir.ptr + %8 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %9 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %9, %8 : !s32i, cir.ptr cir.loop for(cond : { - %10 = cir.load %8 : cir.ptr , i32 - %11 = cir.load %2 : cir.ptr , i32 - %12 = cir.cmp(lt, %10, %11) : i32, i32 - %13 = cir.cast(int_to_bool, %12 : i32), !cir.bool + %10 = cir.load %8 : cir.ptr , !s32i + %11 = cir.load %2 : cir.ptr , !s32i + %12 = cir.cmp(lt, %10, %11) : !s32i, !s32i + %13 = cir.cast(int_to_bool, %12 : !s32i), !cir.bool cir.brcond %13 ^bb1, ^bb2 ^bb1: // pred: ^bb0 cir.yield continue ^bb2: // pred: ^bb0 cir.yield }, step : { - %10 = cir.load %8 : cir.ptr , i32 - %11 = cir.unary(inc, %10) : i32, i32 - cir.store %11, %8 : i32, cir.ptr + %10 = cir.load %8 : cir.ptr , !s32i + %11 = cir.unary(inc, %10) : !s32i, !s32i + cir.store %11, %8 : !s32i, cir.ptr cir.yield }) { %10 = cir.load %0 : cir.ptr >, !cir.ptr - %11 = cir.load %8 : cir.ptr , i32 - %12 = cir.ptr_stride(%10 : !cir.ptr, %11 : i32), !cir.ptr + %11 = cir.load %8 : cir.ptr , !s32i + %12 = cir.ptr_stride(%10 : !cir.ptr, %11 : !s32i), !cir.ptr %13 = cir.load %12 : cir.ptr , f64 %14 = cir.load %1 : cir.ptr >, !cir.ptr - %15 = cir.load %8 : cir.ptr , i32 - %16 = cir.ptr_stride(%14 : !cir.ptr, %15 : i32), !cir.ptr + %15 = cir.load %8 : cir.ptr , !s32i + %16 = cir.ptr_stride(%14 : !cir.ptr, %15 : !s32i), !cir.ptr %17 = cir.load %16 : cir.ptr , f64 %18 = cir.binop(mul, %13, %17) : f64 %19 = cir.load %4 : cir.ptr , f64 diff --git a/clang/test/CIR/Lowering/for.cir b/clang/test/CIR/Lowering/for.cir index 40d36b8398dd..efec3d58de9f 100644 --- a/clang/test/CIR/Lowering/for.cir +++ b/clang/test/CIR/Lowering/for.cir @@ -1,25 +1,26 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} - %1 = cir.const(0 : i32) : i32 - cir.store %1, %0 : i32, cir.ptr + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr cir.loop for(cond : { - %2 = cir.load %0 : cir.ptr , i32 - %3 = cir.const(10 : i32) : i32 - %4 = cir.cmp(lt, %2, %3) : i32, i32 - %5 = cir.cast(int_to_bool, %4 : i32), !cir.bool + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.brcond %5 ^bb1, ^bb2 ^bb1: // pred: ^bb0 cir.yield continue ^bb2: // pred: ^bb0 cir.yield }, step : { - %2 = cir.load %0 : cir.ptr , i32 - %3 = cir.unary(inc, %2) : i32, i32 - cir.store %3, %0 : i32, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr cir.yield }) { cir.yield diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index b7881bbc254e..f1d5e9bc5631 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -1,15 +1,23 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// XFAIL: * +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!s8i = !cir.int +!u32i = !cir.int +!u64i = !cir.int +!u8i = !cir.int module { - cir.global external @a = 3 : i32 - cir.global external @c = 2 : i64 + cir.global external @a = #cir.int<3> : !s32i + cir.global external @c = #cir.int<2> : !u64i cir.global external @y = 3.400000e+00 : f32 cir.global external @w = 4.300000e+00 : f64 - cir.global external @x = 51 : i8 - cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8]> : !cir.array // Implicit array type - cir.global external @alpha = #cir.const_array<[97 : i8, 98 : i8, 99 : i8, 0 : i8] : !cir.array> : !cir.array - cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} - cir.global external @s = @".str": !cir.ptr + cir.global external @x = #cir.int<51> : !s8i + cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array + cir.global external @alpha = #cir.const_array<[#cir.int<97> : !s8i, #cir.int<98> : !s8i, #cir.int<99> : !s8i, #cir.int<0> : !s8i]> : !cir.array + cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global external @s = @".str": !cir.ptr // MLIR: llvm.mlir.global internal constant @".str"("example\00") {addr_space = 0 : i32} // MLIR: llvm.mlir.global external @s() {addr_space = 0 : i32} : !llvm.ptr { // MLIR: %0 = llvm.mlir.addressof @".str" : !llvm.ptr @@ -18,91 +26,92 @@ module { // MLIR: } // LLVM: @.str = internal constant [8 x i8] c"example\00" // LLVM: @s = global ptr @.str - cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} - cir.global external @s1 = @".str1": !cir.ptr - cir.global external @s2 = @".str": !cir.ptr + cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global external @s1 = @".str1": !cir.ptr + cir.global external @s2 = @".str": !cir.ptr cir.func @_Z10use_globalv() { - %0 = cir.alloca i32, cir.ptr , ["li", init] {alignment = 4 : i64} - %1 = cir.get_global @a : cir.ptr - %2 = cir.load %1 : cir.ptr , i32 - cir.store %2, %0 : i32, cir.ptr + %0 = cir.alloca !s32i, cir.ptr , ["li", init] {alignment = 4 : i64} + %1 = cir.get_global @a : cir.ptr + %2 = cir.load %1 : cir.ptr , !s32i + cir.store %2, %0 : !s32i, cir.ptr cir.return } cir.func @_Z17use_global_stringv() { - %0 = cir.alloca i8, cir.ptr , ["c", init] {alignment = 1 : i64} - %1 = cir.get_global @s2 : cir.ptr > - %2 = cir.load %1 : cir.ptr >, !cir.ptr - %3 = cir.const(0 : i32) : i32 - %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr - %5 = cir.load %4 : cir.ptr , i8 - cir.store %5, %0 : i8, cir.ptr + %0 = cir.alloca !u8i, cir.ptr , ["c", init] {alignment = 1 : i64} + %1 = cir.get_global @s2 : cir.ptr > + %2 = cir.load %1 : cir.ptr >, !cir.ptr + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr + %5 = cir.load %4 : cir.ptr , !s8i + %6 = cir.cast(integral, %5 : !s8i), !u8i + cir.store %6, %0 : !u8i, cir.ptr cir.return } - cir.func linkonce_odr @_Z4funcIiET_v() -> i32 { - %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} - %1 = cir.const(0 : i32) : i32 - cir.store %1, %0 : i32, cir.ptr - %2 = cir.load %0 : cir.ptr , i32 - cir.return %2 : i32 + cir.func linkonce_odr @_Z4funcIiET_v() -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + cir.return %2 : !s32i } - cir.func @_Z8use_funcv() -> i32 { - %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} - %1 = cir.call @_Z4funcIiET_v() : () -> i32 - cir.store %1, %0 : i32, cir.ptr - %2 = cir.load %0 : cir.ptr , i32 - cir.return %2 : i32 + cir.func @_Z8use_funcv() -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.call @_Z4funcIiET_v() : () -> !s32i + cir.store %1, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + cir.return %2 : !s32i } - cir.global external @string = #cir.const_array<[119 : i8, 104 : i8, 97 : i8, 116 : i8, 110 : i8, 111 : i8, 119 : i8, 0 : i8] : !cir.array> : !cir.array + cir.global external @string = #cir.const_array<[#cir.int<119> : !s8i, #cir.int<104> : !s8i, #cir.int<97> : !s8i, #cir.int<116> : !s8i, #cir.int<110> : !s8i, #cir.int<111> : !s8i, #cir.int<119> : !s8i, #cir.int<0> : !s8i]> : !cir.array // MLIR: llvm.mlir.global external @string(dense<[119, 104, 97, 116, 110, 111, 119, 0]> : tensor<8xi8>) {addr_space = 0 : i32} : !llvm.array<8 x i8> // LLVM: @string = global [8 x i8] c"whatnow\00" - cir.global external @uint = #cir.const_array<[255 : i32] : !cir.array> : !cir.array + cir.global external @uint = #cir.const_array<[#cir.int<255> : !u32i]> : !cir.array // MLIR: llvm.mlir.global external @uint(dense<255> : tensor<1xi32>) {addr_space = 0 : i32} : !llvm.array<1 x i32> // LLVM: @uint = global [1 x i32] [i32 255] - cir.global external @sshort = #cir.const_array<[11111 : i16, 22222 : i16] : !cir.array> : !cir.array + cir.global external @sshort = #cir.const_array<[#cir.int<11111> : !s16i, #cir.int<22222> : !s16i]> : !cir.array // MLIR: llvm.mlir.global external @sshort(dense<[11111, 22222]> : tensor<2xi16>) {addr_space = 0 : i32} : !llvm.array<2 x i16> // LLVM: @sshort = global [2 x i16] [i16 11111, i16 22222] - cir.global external @sint = #cir.const_array<[123 : i32, 456 : i32, 789 : i32] : !cir.array> : !cir.array + cir.global external @sint = #cir.const_array<[#cir.int<123> : !s32i, #cir.int<456> : !s32i, #cir.int<789> : !s32i]> : !cir.array // MLIR: llvm.mlir.global external @sint(dense<[123, 456, 789]> : tensor<3xi32>) {addr_space = 0 : i32} : !llvm.array<3 x i32> // LLVM: @sint = global [3 x i32] [i32 123, i32 456, i32 789] - cir.global external @ll = #cir.const_array<[999999999, 0, 0, 0] : !cir.array> : !cir.array + cir.global external @ll = #cir.const_array<[#cir.int<999999999> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i]> : !cir.array // MLIR: llvm.mlir.global external @ll(dense<[999999999, 0, 0, 0]> : tensor<4xi64>) {addr_space = 0 : i32} : !llvm.array<4 x i64> // LLVM: @ll = global [4 x i64] [i64 999999999, i64 0, i64 0, i64 0] cir.func @_Z11get_globalsv() { - %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} - %1 = cir.alloca !cir.ptr, cir.ptr >, ["u", init] {alignment = 8 : i64} - %2 = cir.alloca !cir.ptr, cir.ptr >, ["ss", init] {alignment = 8 : i64} - %3 = cir.alloca !cir.ptr, cir.ptr >, ["si", init] {alignment = 8 : i64} - %4 = cir.alloca !cir.ptr, cir.ptr >, ["l", init] {alignment = 8 : i64} - %5 = cir.get_global @string : cir.ptr > - %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr + %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr, cir.ptr >, ["u", init] {alignment = 8 : i64} + %2 = cir.alloca !cir.ptr, cir.ptr >, ["ss", init] {alignment = 8 : i64} + %3 = cir.alloca !cir.ptr, cir.ptr >, ["si", init] {alignment = 8 : i64} + %4 = cir.alloca !cir.ptr, cir.ptr >, ["l", init] {alignment = 8 : i64} + %5 = cir.get_global @string : cir.ptr > + %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @string : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr // LLVM: store ptr @string, ptr %{{[0-9]+}} - cir.store %6, %0 : !cir.ptr, cir.ptr > - %7 = cir.get_global @uint : cir.ptr > - %8 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr + cir.store %6, %0 : !cir.ptr, cir.ptr > + %7 = cir.get_global @uint : cir.ptr > + %8 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @uint : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr // LLVM: store ptr @uint, ptr %{{[0-9]+}} - cir.store %8, %1 : !cir.ptr, cir.ptr > - %9 = cir.get_global @sshort : cir.ptr > - %10 = cir.cast(array_to_ptrdecay, %9 : !cir.ptr>), !cir.ptr + cir.store %8, %1 : !cir.ptr, cir.ptr > + %9 = cir.get_global @sshort : cir.ptr > + %10 = cir.cast(array_to_ptrdecay, %9 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sshort : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr // LLVM: store ptr @sshort, ptr %{{[0-9]+}} - cir.store %10, %2 : !cir.ptr, cir.ptr > - %11 = cir.get_global @sint : cir.ptr > - %12 = cir.cast(array_to_ptrdecay, %11 : !cir.ptr>), !cir.ptr + cir.store %10, %2 : !cir.ptr, cir.ptr > + %11 = cir.get_global @sint : cir.ptr > + %12 = cir.cast(array_to_ptrdecay, %11 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sint : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr // LLVM: store ptr @sint, ptr %{{[0-9]+}} - cir.store %12, %3 : !cir.ptr, cir.ptr > - %13 = cir.get_global @ll : cir.ptr > - %14 = cir.cast(array_to_ptrdecay, %13 : !cir.ptr>), !cir.ptr + cir.store %12, %3 : !cir.ptr, cir.ptr > + %13 = cir.get_global @ll : cir.ptr > + %14 = cir.cast(array_to_ptrdecay, %13 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @ll : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr // LLVM: store ptr @ll, ptr %{{[0-9]+}} - cir.store %14, %4 : !cir.ptr, cir.ptr > + cir.store %14, %4 : !cir.ptr, cir.ptr > cir.return } } diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index 2a8057a92144..8ccaca8ca0b5 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -1,23 +1,24 @@ // RUN: cir-tool %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -canonicalize -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %1 = cir.const(1 : i32) : i32 - cir.store %1, %0 : i32, cir.ptr + %0 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr cir.br ^bb2 ^bb1: // no predecessors - %2 = cir.load %0 : cir.ptr , i32 - %3 = cir.const(1 : i32) : i32 - %4 = cir.binop(add, %2, %3) : i32 - cir.store %4, %0 : i32, cir.ptr + %2 = cir.load %0 : cir.ptr , !u32i + %3 = cir.const(#cir.int<1> : !u32i) : !u32i + %4 = cir.binop(add, %2, %3) : !u32i + cir.store %4, %0 : !u32i, cir.ptr cir.br ^bb2 ^bb2: // 2 preds: ^bb0, ^bb1 - %5 = cir.load %0 : cir.ptr , i32 - %6 = cir.const(2 : i32) : i32 - %7 = cir.binop(add, %5, %6) : i32 - cir.store %7, %0 : i32, cir.ptr + %5 = cir.load %0 : cir.ptr , !u32i + %6 = cir.const(#cir.int<2> : !u32i) : !u32i + %7 = cir.binop(add, %5, %6) : !u32i + cir.store %7, %0 : !u32i, cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir index 0a57a03254cc..c7ed945d0892 100644 --- a/clang/test/CIR/Lowering/if.cir +++ b/clang/test/CIR/Lowering/if.cir @@ -1,17 +1,18 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!s32i = !cir.int module { - cir.func @foo(%arg0: i32) -> i32 { - %4 = cir.cast(int_to_bool, %arg0 : i32), !cir.bool + cir.func @foo(%arg0: !s32i) -> !s32i { + %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool cir.if %4 { - %5 = cir.const(1 : i32) : i32 - cir.return %5 : i32 + %5 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.return %5 : !s32i } else { - %5 = cir.const(0 : i32) : i32 - cir.return %5 : i32 + %5 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.return %5 : !s32i } - cir.return %arg0 : i32 + cir.return %arg0 : !s32i } } diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index d53d11a7938a..adf19ac9f266 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -1,13 +1,14 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int module { - cir.func @foo() -> i32 { - %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} - %1 = cir.const(1 : i32) : i32 - cir.store %1, %0 : i32, cir.ptr - %2 = cir.load %0 : cir.ptr , i32 - cir.return %2 : i32 + cir.func @foo() -> !u32i { + %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !u32i + cir.return %2 : !u32i } } diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index 39501250caea..7010302ac88d 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -1,14 +1,14 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM - +!s32i = !cir.int module { - cir.func @f(%arg0: !cir.ptr) { - %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} - cir.store %arg0, %0 : !cir.ptr, cir.ptr > - %1 = cir.load %0 : cir.ptr >, !cir.ptr - %2 = cir.const(1 : i32) : i32 - %3 = cir.ptr_stride(%1 : !cir.ptr, %2 : i32), !cir.ptr - %4 = cir.load %3 : cir.ptr , i32 + cir.func @f(%arg0: !cir.ptr) { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, cir.ptr > + %1 = cir.load %0 : cir.ptr >, !cir.ptr + %2 = cir.const(#cir.int<1> : !s32i) : !s32i + %3 = cir.ptr_stride(%1 : !cir.ptr, %2 : !s32i), !cir.ptr + %4 = cir.load %3 : cir.ptr , !s32i cir.return } } diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index 82d0be699d1e..726176688b79 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -1,12 +1,13 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int module { cir.func @foo() { cir.scope { - %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.const(4 : i32) : i32 - cir.store %1, %0 : i32, cir.ptr + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<4> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr } cir.return } diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir index 829c51192ddb..398ec3214bcb 100644 --- a/clang/test/CIR/Lowering/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -1,21 +1,21 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM - +!s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.const(2 : i32) : i32 - cir.store %2, %0 : i32, cir.ptr - cir.store %2, %1 : i32, cir.ptr + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.store %2, %1 : !s32i, cir.ptr - %3 = cir.load %0 : cir.ptr , i32 - %4 = cir.unary(inc, %3) : i32, i32 - cir.store %4, %0 : i32, cir.ptr + %3 = cir.load %0 : cir.ptr , !s32i + %4 = cir.unary(inc, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, cir.ptr - %5 = cir.load %1 : cir.ptr , i32 - %6 = cir.unary(dec, %5) : i32, i32 - cir.store %6, %1 : i32, cir.ptr + %5 = cir.load %1 : cir.ptr , !s32i + %6 = cir.unary(dec, %5) : !s32i, !s32i + cir.store %6, %1 : !s32i, cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index bdf77e6e4be3..8651ab9523b3 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -1,17 +1,17 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM - +!s32i = !cir.int module { - cir.func @foo() -> i32 { - %0 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %2 = cir.const(1 : i32) : i32 - cir.store %2, %1 : i32, cir.ptr - %3 = cir.load %1 : cir.ptr , i32 - %4 = cir.unary(not, %3) : i32, i32 - cir.store %4, %0 : i32, cir.ptr - %5 = cir.load %0 : cir.ptr , i32 - cir.return %5 : i32 + cir.func @foo() -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %2 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %2, %1 : !s32i, cir.ptr + %3 = cir.load %1 : cir.ptr , !s32i + %4 = cir.unary(not, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, cir.ptr + %5 = cir.load %0 : cir.ptr , !s32i + cir.return %5 : !s32i } } diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index 81569b6d14e9..c4a4c0eab932 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -1,21 +1,21 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM - +!s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.const(2 : i32) : i32 - cir.store %2, %0 : i32, cir.ptr - cir.store %2, %1 : i32, cir.ptr + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.store %2, %1 : !s32i, cir.ptr - %3 = cir.load %0 : cir.ptr , i32 - %4 = cir.unary(plus, %3) : i32, i32 - cir.store %4, %0 : i32, cir.ptr + %3 = cir.load %0 : cir.ptr , !s32i + %4 = cir.unary(plus, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, cir.ptr - %5 = cir.load %1 : cir.ptr , i32 - %6 = cir.unary(minus, %5) : i32, i32 - cir.store %6, %1 : i32, cir.ptr + %5 = cir.load %1 : cir.ptr , !s32i + %6 = cir.unary(minus, %5) : !s32i, !s32i + cir.store %6, %1 : !s32i, cir.ptr cir.return } } diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 2a860af8c091..f3d056ed837a 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -3,33 +3,34 @@ #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool +!s32i = !cir.int module { - cir.func @sw1(%arg0: i32, %arg1: i32) { - %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["c", init] {alignment = 4 : i64} - cir.store %arg0, %0 : i32, cir.ptr - cir.store %arg1, %1 : i32, cir.ptr + cir.func @sw1(%arg0: !s32i, %arg1: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["c", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.store %arg1, %1 : !s32i, cir.ptr cir.scope { - %2 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %3 = cir.const(1 : i32) : i32 - cir.store %3, %2 : i32, cir.ptr - %4 = cir.load %0 : cir.ptr , i32 - cir.switch (%4 : i32) [ - case (equal, 0 : i32) { - %5 = cir.load %2 : cir.ptr , i32 - %6 = cir.const(1 : i32) : i32 - %7 = cir.binop(add, %5, %6) : i32 - cir.store %7, %2 : i32, cir.ptr + %2 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + %4 = cir.load %0 : cir.ptr , !s32i + cir.switch (%4 : !s32i) [ + case (equal, 0 : !s32i) { + %5 = cir.load %2 : cir.ptr , !s32i + %6 = cir.const(#cir.int<1> : !s32i) : !s32i + %7 = cir.binop(add, %5, %6) : !s32i + cir.store %7, %2 : !s32i, cir.ptr cir.br ^bb1 ^bb1: // pred: ^bb0 cir.return }, - case (equal, 1 : i32) { + case (equal, 1 : !s32i) { cir.scope { cir.scope { - %5 = cir.load %1 : cir.ptr , i32 - %6 = cir.const(3 : i32) : i32 - %7 = cir.cmp(eq, %5, %6) : i32, !cir.bool + %5 = cir.load %1 : cir.ptr , !s32i + %6 = cir.const(#cir.int<3> : !s32i) : !s32i + %7 = cir.cmp(eq, %5, %6) : !s32i, !cir.bool cir.if %7 { cir.br ^bb1 ^bb1: // pred: ^bb0 @@ -40,15 +41,15 @@ module { } cir.yield fallthrough }, - case (equal, 2 : i32) { + case (equal, 2 : !s32i) { cir.scope { - %5 = cir.alloca i32, cir.ptr , ["yolo", init] {alignment = 4 : i64} - %6 = cir.load %2 : cir.ptr , i32 - %7 = cir.const(1 : i32) : i32 - %8 = cir.binop(add, %6, %7) : i32 - cir.store %8, %2 : i32, cir.ptr - %9 = cir.const(100 : i32) : i32 - cir.store %9, %5 : i32, cir.ptr + %5 = cir.alloca !s32i, cir.ptr , ["yolo", init] {alignment = 4 : i64} + %6 = cir.load %2 : cir.ptr , !s32i + %7 = cir.const(#cir.int<1> : !s32i) : !s32i + %8 = cir.binop(add, %6, %7) : !s32i + cir.store %8, %2 : !s32i, cir.ptr + %9 = cir.const(#cir.int<100> : !s32i) : !s32i + cir.store %9, %5 : !s32i, cir.ptr cir.br ^bb1 ^bb1: // pred: ^bb0 cir.return @@ -101,20 +102,20 @@ module { } } -// CHECK: cir.switch (%4 : i32) [ -// CHECK-NEXT: case (equal, 0 : i32) { -// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %6 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %7 = cir.binop(add, %5, %6) : i32 -// CHECK-NEXT: cir.store %7, %2 : i32, cir.ptr +// CHECK: cir.switch (%4 : !s32i) [ +// CHECK-NEXT: case (equal, #cir.int<0> : !s32i) { +// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %6 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %7 = cir.binop(add, %5, %6) : !s32i +// CHECK-NEXT: cir.store %7, %2 : !s32i, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 1 : i32) { +// CHECK-NEXT: case (equal, #cir.int<1> : !s32i) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %5 = cir.load %1 : cir.ptr , i32 -// CHECK-NEXT: %6 = cir.const(3 : i32) : i32 -// CHECK-NEXT: %7 = cir.cmp(eq, %5, %6) : i32, !cir.bool +// CHECK-NEXT: %5 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %6 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: %7 = cir.cmp(eq, %5, %6) : !s32i, !cir.bool // CHECK-NEXT: cir.if %7 { // CHECK-NEXT: cir.return // CHECK-NEXT: } @@ -123,15 +124,15 @@ module { // CHECK-NEXT: } // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 2 : i32) { +// CHECK-NEXT: case (equal, #cir.int<2> : !s32i) { // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %5 = cir.alloca i32, cir.ptr , ["yolo", init] {alignment = 4 : i64} -// CHECK-NEXT: %6 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %7 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %8 = cir.binop(add, %6, %7) : i32 -// CHECK-NEXT: cir.store %8, %2 : i32, cir.ptr -// CHECK-NEXT: %9 = cir.const(100 : i32) : i32 -// CHECK-NEXT: cir.store %9, %5 : i32, cir.ptr +// CHECK-NEXT: %5 = cir.alloca !s32i, cir.ptr , ["yolo", init] {alignment = 4 : i64} +// CHECK-NEXT: %6 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %7 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %8 = cir.binop(add, %6, %7) : !s32i +// CHECK-NEXT: cir.store %8, %2 : !s32i, cir.ptr +// CHECK-NEXT: %9 = cir.const(#cir.int<100> : !s32i) : !s32i +// CHECK-NEXT: cir.store %9, %5 : !s32i, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: cir.yield fallthrough From bc3edef6f0cc2fa665d6dedef46f8dd3be239675 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 19 May 2023 16:08:24 -0700 Subject: [PATCH 0961/2301] [CIR][CIRGen] NRVO: implement NRVO flag bits to decide on dtors --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 14 ++++++++- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 18 +++++++++--- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 39 +++++++++++++++++--------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 17 +++++++++-- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 2 +- clang/test/CIR/CodeGen/nrvo.cpp | 31 ++++++++++++++++++++ 6 files changed, 99 insertions(+), 22 deletions(-) create mode 100644 clang/test/CIR/CodeGen/nrvo.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 7651129e7c33..8fce6985d580 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -204,10 +204,16 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return create(loc, int64Ty, mlir::IntegerAttr::get(int64Ty, C)); } - mlir::Value getBool(bool state, mlir::Location loc) { + mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { return create(loc, getBoolTy(), getCIRBoolAttr(state)); } + mlir::cir::ConstantOp getFalse(mlir::Location loc) { + return getBool(false, loc); + } + mlir::cir::ConstantOp getTrue(mlir::Location loc) { + return getBool(true, loc); + } // Creates constant nullptr for pointer type ty. mlir::cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { @@ -309,6 +315,12 @@ class CIRGenBuilderTy : public mlir::OpBuilder { Address dst) { return create(loc, val, dst.getPointer()); } + + mlir::cir::StoreOp createFlagStore(mlir::Location loc, bool val, + mlir::Value dst) { + auto flag = getBool(val, loc); + return create(loc, flag, dst); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index f624919cc8f0..0a368bbf91cf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -35,6 +35,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { "not implemented"); assert(!D.hasAttr() && "not implemented"); + auto loc = getLoc(D.getSourceRange()); bool NRVO = getContext().getLangOpts().ElideConstructors && D.isNRVOVariable(); AutoVarEmission emission(D); @@ -100,8 +101,18 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { RD->isNonTrivialToPrimitiveDestroy()) { // In LLVM: Create a flag that is used to indicate when the NRVO was // applied to this variable. Set it to zero to indicate that NRVO was - // not applied. - llvm_unreachable("NYI"); + // not applied. For now, use the same approach for CIRGen until we can + // be sure it's worth doing something more aggressive. + auto falseNVRO = builder.getFalse(loc); + Address NRVOFlag = CreateTempAlloca( + falseNVRO.getType(), CharUnits::One(), loc, "nrvo", + /*ArraySize=*/nullptr, &allocaAddr); + assert(builder.getInsertionBlock()); + builder.createStore(loc, falseNVRO, NRVOFlag); + + // Record the NRVO flag for this variable. + NRVOFlags[&D] = NRVOFlag.getPointer(); + emission.NRVOFlag = NRVOFlag.getPointer(); } } } else { @@ -112,8 +123,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { CharUnits allocaAlignment = alignment; // Create the temp alloca and declare variable using it. mlir::Value addrVal; - address = CreateTempAlloca(allocaTy, allocaAlignment, - getLoc(D.getSourceRange()), D.getName(), + address = CreateTempAlloca(allocaTy, allocaAlignment, loc, D.getName(), /*ArraySize=*/nullptr, &allocaAddr); if (failed(declare(address, &D, Ty, getLoc(D.getSourceRange()), alignment, addrVal))) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index b4079ff4013b..ea370d90f39a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1865,11 +1865,13 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(const Expr *cond, } mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, - mlir::Location loc, - CharUnits alignment) { - return buildAlloca( - name, ty, loc, alignment, - builder.getBestAllocaInsertPoint(currLexScope->getEntryBlock())); + mlir::Location loc, CharUnits alignment, + bool insertIntoFnEntryBlock) { + mlir::Block *entryBlock = insertIntoFnEntryBlock + ? &CurFn.getRegion().front() + : currLexScope->getEntryBlock(); + return buildAlloca(name, ty, loc, alignment, + builder.getBestAllocaInsertPoint(entryBlock)); } mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, @@ -1894,9 +1896,10 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, } mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, - mlir::Location loc, - CharUnits alignment) { - return buildAlloca(name, getCIRType(ty), loc, alignment); + mlir::Location loc, CharUnits alignment, + bool insertIntoFnEntryBlock) { + return buildAlloca(name, getCIRType(ty), loc, alignment, + insertIntoFnEntryBlock); } mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, @@ -2060,14 +2063,24 @@ Address CIRGenFunction::CreateTempAlloca(mlir::Type Ty, CharUnits Align, /// This creates an alloca and inserts it into the entry block if \p ArraySize /// is nullptr, otherwise inserts it at the current insertion point of the /// builder. -mlir::cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, - mlir::Location Loc, - const Twine &Name, - mlir::Value ArraySize) { +mlir::cir::AllocaOp +CIRGenFunction::CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, + const Twine &Name, mlir::Value ArraySize, + bool insertIntoFnEntryBlock) { if (ArraySize) assert(0 && "NYI"); return cast( - buildAlloca(Name.str(), Ty, Loc, CharUnits()).getDefiningOp()); + buildAlloca(Name.str(), Ty, Loc, CharUnits(), insertIntoFnEntryBlock) + .getDefiningOp()); +} + +/// Just like CreateTempAlloca above, but place the alloca into the function +/// entry basic block instead. +mlir::cir::AllocaOp CIRGenFunction::CreateTempAllocaInFnEntryBlock( + mlir::Type Ty, mlir::Location Loc, const Twine &Name, + mlir::Value ArraySize) { + return CreateTempAlloca(Ty, Loc, Name, ArraySize, + /*insertIntoFnEntryBlock=*/true); } /// Given an object of the given canonical type, can we safely copy a diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9875abf44ff8..6f8262c64754 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -402,9 +402,11 @@ class CIRGenFunction : public CIRGenTypeCache { public: // FIXME(cir): move this to CIRGenBuider.h mlir::Value buildAlloca(llvm::StringRef name, clang::QualType ty, - mlir::Location loc, clang::CharUnits alignment); + mlir::Location loc, clang::CharUnits alignment, + bool insertIntoFnEntryBlock = false); mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, - mlir::Location loc, clang::CharUnits alignment); + mlir::Location loc, clang::CharUnits alignment, + bool insertIntoFnEntryBlock = false); mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, mlir::OpBuilder::InsertPoint ip); @@ -679,6 +681,10 @@ class CIRGenFunction : public CIRGenTypeCache { void setAddrOfLocalVar(const clang::VarDecl *VD, Address Addr) { assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!"); LocalDeclMap.insert({VD, Addr}); + // Add to the symbol table if not there already. + if (symbolTable.count(VD)) + return; + symbolTable.insert(VD, Addr.getPointer()); } /// True if an insertion point is defined. If not, this indicates that the @@ -1628,7 +1634,12 @@ class CIRGenFunction : public CIRGenTypeCache { /// more efficient if the caller knows that the address will not be exposed. mlir::cir::AllocaOp CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, const Twine &Name = "tmp", - mlir::Value ArraySize = nullptr); + mlir::Value ArraySize = nullptr, + bool insertIntoFnEntryBlock = false); + mlir::cir::AllocaOp + CreateTempAllocaInFnEntryBlock(mlir::Type Ty, mlir::Location Loc, + const Twine &Name = "tmp", + mlir::Value ArraySize = nullptr); Address CreateTempAlloca(mlir::Type Ty, CharUnits align, mlir::Location Loc, const Twine &Name = "tmp", mlir::Value ArraySize = nullptr, diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index f6d80b7e8395..72f3908c2857 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -449,7 +449,7 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { // If there is an NRVO flag for this variable, set it to 1 into indicate // that the cleanup code should not destroy the variable. if (auto NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) - llvm_unreachable("NYI"); + getBuilder().createFlagStore(loc, true, NRVOFlag); } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { // Make sure not to return anything, but evaluate the expression // for side effects. diff --git a/clang/test/CIR/CodeGen/nrvo.cpp b/clang/test/CIR/CodeGen/nrvo.cpp new file mode 100644 index 000000000000..d96a15f140ce --- /dev/null +++ b/clang/test/CIR/CodeGen/nrvo.cpp @@ -0,0 +1,31 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +std::vector test_nrvo() { + std::vector result; + result.push_back("Words bend our thinking to infinite paths of self-delusion"); + return result; +} + +// CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector", !cir.ptr>, !cir.ptr>, !cir.ptr>> + +// CHECK: cir.func @_Z9test_nrvov() -> !ty_22class2Estd3A3Avector22 { +// CHECK: %0 = cir.alloca !ty_22class2Estd3A3Avector22, cir.ptr , ["__retval", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.bool, cir.ptr , ["nrvo"] {alignment = 1 : i64} +// CHECK: %2 = cir.const(#false) : !cir.bool +// CHECK: cir.store %2, %1 : !cir.bool, cir.ptr +// CHECK: cir.call @_ZNSt6vectorIPKcEC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.scope { +// CHECK: %5 = cir.alloca !cir.ptr, cir.ptr >, ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %6 = cir.get_global @".str" : cir.ptr > +// CHECK: %7 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr +// CHECK: cir.store %7, %5 : !cir.ptr, cir.ptr > +// CHECK: cir.call @_ZNSt6vectorIPKcE9push_backEOS1_(%0, %5) : (!cir.ptr, !cir.ptr>) -> () +// CHECK: } +// CHECK: %3 = cir.const(#true) : !cir.bool +// CHECK: cir.store %3, %1 : !cir.bool, cir.ptr +// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22class2Estd3A3Avector22 +// CHECK: cir.return %4 : !ty_22class2Estd3A3Avector22 +// CHECK: } \ No newline at end of file From 16699b14410df09c3acc304aceabf1866248fcd9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 19 May 2023 19:04:04 -0700 Subject: [PATCH 0962/2301] [CIR][CIRGen][NFC] Virtual calls: setup 'this' for member calls Effective NFC since it asserts right after, testcase coming later, together with the final virtual call. --- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 6cc9ea976e23..cb12057fd7e3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -140,7 +140,10 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( LValue This; if (IsArrow) { - llvm_unreachable("NYI"); + LValueBaseInfo BaseInfo; + assert(!UnimplementedFeature::tbaa()); + Address ThisValue = buildPointerWithAlignment(Base, &BaseInfo); + This = makeAddrLValue(ThisValue, Base->getType(), BaseInfo); } else { This = buildLValue(Base); } From eb55b598b2c4e2c80a460040fc1b2452082dbb32 Mon Sep 17 00:00:00 2001 From: Keyi Zhang Date: Mon, 15 May 2023 22:20:38 -0700 Subject: [PATCH 0963/2301] [CIR][CodeGen] Add logical binop --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 7 + clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 219 ++++++++++++++++++++- clang/test/CIR/CodeGen/binop.cpp | 78 +++++++- 3 files changed, 301 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 8fce6985d580..1e42a5ea5090 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -321,6 +321,13 @@ class CIRGenBuilderTy : public mlir::OpBuilder { auto flag = getBool(val, loc); return create(loc, flag, dst); } + + mlir::Value createZExtOrBitCast(mlir::Location loc, mlir::Value src, + mlir::Type newTy) { + if (src.getType() == newTy) + return src; + llvm_unreachable("NYI"); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 40e578117b94..be4ecc38203a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -526,8 +526,8 @@ class ScalarExprEmitter : public StmtVisitor { #undef VISITCOMP mlir::Value VisitBinAssign(const BinaryOperator *E); - mlir::Value VisitBinLAnd(const BinaryOperator *E) { llvm_unreachable("NYI"); } - mlir::Value VisitBinLOr(const BinaryOperator *E) { llvm_unreachable("NYI"); } + mlir::Value VisitBinLAnd(const BinaryOperator *B); + mlir::Value VisitBinLOr(const BinaryOperator *B); mlir::Value VisitBinComma(const BinaryOperator *E) { CGF.buildIgnoredExpr(E->getLHS()); // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen. @@ -1681,3 +1681,218 @@ mlir::Value CIRGenFunction::buildScalarPrePostIncDec(const UnaryOperator *E, return ScalarExprEmitter(*this, builder) .buildScalarPrePostIncDec(E, LV, isInc, isPre); } + +mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { + if (E->getType()->isVectorType()) { + llvm_unreachable("NYI"); + } + + bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); + mlir::Type ResTy = ConvertType(E->getType()); + mlir::Location Loc = CGF.getLoc(E->getExprLoc()); + + // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. + // If we have 1 && X, just emit X without inserting the control flow. + bool LHSCondVal; + if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { + if (LHSCondVal) { // If we have 1 && X, just emit X. + + mlir::Value RHSCond = CGF.evaluateExprAsBool(E->getRHS()); + + if (InstrumentRegions) { + llvm_unreachable("NYI"); + } + // ZExt result to int or bool. + return Builder.createZExtOrBitCast(RHSCond.getLoc(), RHSCond, ResTy); + } + // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. + if (!CGF.ContainsLabel(E->getRHS())) + return Builder.getBool(false, Loc); + } + + CIRGenFunction::ConditionalEvaluation eval(CGF); + + mlir::Value LHSCondV = CGF.evaluateExprAsBool(E->getLHS()); + auto ResOp = Builder.create( + Loc, LHSCondV, /*trueBuilder=*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + CIRGenFunction::LexicalScopeContext LexScope{Loc, Loc, + B.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &LexScope}; + CGF.currLexScope->setAsTernary(); + mlir::Value RHSCondV = CGF.evaluateExprAsBool(E->getRHS()); + auto res = B.create( + Loc, RHSCondV, /*trueBuilder*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + SmallVector Locs; + if (Loc.isa()) { + Locs.push_back(Loc); + Locs.push_back(Loc); + } else if (Loc.isa()) { + auto fusedLoc = Loc.cast(); + Locs.push_back(fusedLoc.getLocations()[0]); + Locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{ + Locs[0], Locs[1], B.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + auto res = B.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), + true)); + B.create(Loc, res.getRes()); + }, + /*falseBuilder*/ + [&](mlir::OpBuilder &b, mlir::Location Loc) { + SmallVector Locs; + if (Loc.isa()) { + Locs.push_back(Loc); + Locs.push_back(Loc); + } else if (Loc.isa()) { + auto fusedLoc = Loc.cast(); + Locs.push_back(fusedLoc.getLocations()[0]); + Locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{ + Locs[0], Locs[1], b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + auto res = b.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), + false)); + b.create(Loc, res.getRes()); + }); + B.create(Loc, res.getResult()); + }, + /*falseBuilder*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + SmallVector Locs; + if (Loc.isa()) { + Locs.push_back(Loc); + Locs.push_back(Loc); + } else if (Loc.isa()) { + auto fusedLoc = Loc.cast(); + Locs.push_back(fusedLoc.getLocations()[0]); + Locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{Loc, Loc, + B.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + auto res = B.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), false)); + B.create(Loc, res.getRes()); + }); + return Builder.createZExtOrBitCast(ResOp.getLoc(), ResOp, ResTy); +} + +mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { + if (E->getType()->isVectorType()) { + llvm_unreachable("NYI"); + } + + bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); + mlir::Type ResTy = ConvertType(E->getType()); + mlir::Location Loc = CGF.getLoc(E->getExprLoc()); + + // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. + // If we have 0 || X, just emit X without inserting the control flow. + bool LHSCondVal; + if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { + if (!LHSCondVal) { // If we have 0 || X, just emit X. + + mlir::Value RHSCond = CGF.evaluateExprAsBool(E->getRHS()); + + if (InstrumentRegions) { + llvm_unreachable("NYI"); + } + // ZExt result to int or bool. + return Builder.createZExtOrBitCast(RHSCond.getLoc(), RHSCond, ResTy); + } + // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. + if (!CGF.ContainsLabel(E->getRHS())) + return Builder.getBool(true, Loc); + } + + CIRGenFunction::ConditionalEvaluation eval(CGF); + + mlir::Value LHSCondV = CGF.evaluateExprAsBool(E->getLHS()); + auto ResOp = Builder.create( + Loc, LHSCondV, /*trueBuilder=*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + SmallVector Locs; + if (Loc.isa()) { + Locs.push_back(Loc); + Locs.push_back(Loc); + } else if (Loc.isa()) { + auto fusedLoc = Loc.cast(); + Locs.push_back(fusedLoc.getLocations()[0]); + Locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{Loc, Loc, + B.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + auto res = B.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), true)); + B.create(Loc, res.getRes()); + }, + /*falseBuilder*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + CIRGenFunction::LexicalScopeContext LexScope{Loc, Loc, + B.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &LexScope}; + CGF.currLexScope->setAsTernary(); + mlir::Value RHSCondV = CGF.evaluateExprAsBool(E->getRHS()); + auto res = B.create( + Loc, RHSCondV, /*trueBuilder*/ + [&](mlir::OpBuilder &B, mlir::Location Loc) { + SmallVector Locs; + if (Loc.isa()) { + Locs.push_back(Loc); + Locs.push_back(Loc); + } else if (Loc.isa()) { + auto fusedLoc = Loc.cast(); + Locs.push_back(fusedLoc.getLocations()[0]); + Locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{ + Loc, Loc, B.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + auto res = B.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), + true)); + B.create(Loc, res.getRes()); + }, + /*falseBuilder*/ + [&](mlir::OpBuilder &b, mlir::Location Loc) { + SmallVector Locs; + if (Loc.isa()) { + Locs.push_back(Loc); + Locs.push_back(Loc); + } else if (Loc.isa()) { + auto fusedLoc = Loc.cast(); + Locs.push_back(fusedLoc.getLocations()[0]); + Locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{ + Loc, Loc, B.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + auto res = b.create( + Loc, Builder.getBoolTy(), + Builder.getAttr(Builder.getBoolTy(), + false)); + b.create(Loc, res.getRes()); + }); + B.create(Loc, res.getResult()); + }); + + return Builder.createZExtOrBitCast(ResOp.getLoc(), ResOp, ResTy); +} diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index 5ea326e149b3..0760a1052ee8 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -23,4 +23,80 @@ void b0(int a, int b) { // CHECK: = cir.binop(shl, %21, %22) : !s32i // CHECK: = cir.binop(and, %24, %25) : !s32i // CHECK: = cir.binop(xor, %27, %28) : !s32i -// CHECK: = cir.binop(or, %30, %31) : !s32i \ No newline at end of file +// CHECK: = cir.binop(or, %30, %31) : !s32i + +void b1(bool a, bool b) { + bool x = a && b; + x = x || b; +} + +// CHECK: cir.ternary(%3, true +// CHECK-NEXT: %7 = cir.load %1 +// CHECK-NEXT: cir.ternary(%7, true +// CHECK-NEXT: cir.const(#true) +// CHECK-NEXT: cir.yield +// CHECK-NEXT: false { +// CHECK-NEXT: cir.const(#false) +// CHECK-NEXT: cir.yield +// CHECK: cir.yield +// CHECK-NEXT: false { +// CHECK-NEXT: cir.const(#false) +// CHECK-NEXT: cir.yield + +// CHECK: cir.ternary(%5, true +// CHECK-NEXT: cir.const(#true) +// CHECK-NEXT: cir.yield +// CHECK-NEXT: false { +// CHECK-NEXT: %7 = cir.load %1 +// CHECK-NEXT: cir.ternary(%7, true +// CHECK-NEXT: cir.const(#true) +// CHECK-NEXT: cir.yield +// CHECK-NEXT: false { +// CHECK-NEXT: cir.const(#false) +// CHECK-NEXT: cir.yield + +void b2(bool a) { + bool x = 0 && a; + x = 1 && a; + x = 0 || a; + x = 1 || a; +} + +// CHECK: %0 = cir.alloca {{.*}} ["a", init] +// CHECK: %1 = cir.alloca {{.*}} ["x", init] +// CHECK: %2 = cir.const(#false) +// CHECK-NEXT: cir.store %2, %1 +// CHECK-NEXT: %3 = cir.load %0 +// CHECK-NEXT: cir.store %3, %1 +// CHECK-NEXT: %4 = cir.load %0 +// CHECK-NEXT: cir.store %4, %1 +// CHECK-NEXT: %5 = cir.const(#true) +// CHECK-NEXT: cir.store %5, %1 + +void b3(int a, int b, int c, int d) { + bool x = (a == b) && (c == d); + x = (a == b) || (c == d); +} + +// CHECK: %0 = cir.alloca {{.*}} ["a", init] +// CHECK-NEXT: %1 = cir.alloca {{.*}} ["b", init] +// CHECK-NEXT: %2 = cir.alloca {{.*}} ["c", init] +// CHECK-NEXT: %3 = cir.alloca {{.*}} ["d", init] +// CHECK-NEXT: %4 = cir.alloca {{.*}} ["x", init] +// CHECK: %5 = cir.load %0 +// CHECK-NEXT: %6 = cir.load %1 +// CHECK-NEXT: %7 = cir.cmp(eq, %5, %6) +// CHECK-NEXT: cir.ternary(%7, true +// CHECK-NEXT: %13 = cir.load %2 +// CHECK-NEXT: %14 = cir.load %3 +// CHECK-NEXT: %15 = cir.cmp(eq, %13, %14) +// CHECK-NEXT: cir.ternary(%15, true +// CHECK: %9 = cir.load %0 +// CHECK-NEXT: %10 = cir.load %1 +// CHECK-NEXT: %11 = cir.cmp(eq, %9, %10) +// CHECK-NEXT: %12 = cir.ternary(%11, true { +// CHECK: }, false { +// CHECK-NEXT: %13 = cir.load %2 +// CHECK-NEXT: %14 = cir.load %3 +// CHECK-NEXT: %15 = cir.cmp(eq, %13, %14) +// CHECK-NEXT: %16 = cir.ternary(%15, true From c899ffe5149a3c7afa4d4ca22fe16aa5e5e9a855 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 22 May 2023 19:52:18 -0700 Subject: [PATCH 0964/2301] [CIR][CIRGen] base class calls: adjust 'this' argument for virtual methods --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 10 +++++++++ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 5 ++++- clang/lib/CIR/CodeGen/CIRGenValue.h | 8 +++++++ clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 +- clang/test/CIR/CodeGen/derived-to-base.cpp | 22 +++++++++++++++++++ 5 files changed, 45 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 7a44349a1661..e1a7d8f9bfea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -118,6 +118,16 @@ class CIRGenCXXABI { return CGF.CXXStructorImplicitParamDecl; } + /// Perform ABI-specific "this" argument adjustment required prior to + /// a call of a virtual function. + /// The "VirtualCall" argument is true iff the call itself is virtual. + virtual Address adjustThisArgumentForVirtualFunctionCall(CIRGenFunction &CGF, + GlobalDecl GD, + Address This, + bool VirtualCall) { + return This; + } + /// Build a parameter variable suitable for 'this'. void buildThisParam(CIRGenFunction &CGF, FunctionArgList &Params); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index cb12057fd7e3..16c02ca03269 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -243,7 +243,10 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( } if (MD->isVirtual()) { - llvm_unreachable("NYI"); + Address NewThisAddr = + CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall( + *this, CalleeDecl, This.getAddress(), useVirtualCall); + This.setAddress(NewThisAddr); } return buildCXXMemberOrOperatorCall( diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index ea8541c031cb..f84c20c4b136 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -248,6 +248,14 @@ class LValue { return Address(getPointer(), ElementType, getAlignment()); } + void setAddress(Address address) { + assert(isSimple()); + V = address.getPointer(); + ElementType = address.getElementType(); + Alignment = address.getAlignment().getQuantity(); + // TODO(cir): IsKnownNonNull = address.isKnownNonNull(); + } + LValueBaseInfo getBaseInfo() const { return BaseInfo; } void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 742a188ba379..083c9e469d7a 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -311,7 +311,7 @@ class CIRGenConsumer : public clang::ASTConsumer { llvm_unreachable("NYI"); } - void HandleVTable(CXXRecordDecl *RD) override { llvm_unreachable("NYI"); } + void HandleVTable(CXXRecordDecl *RD) override { gen->HandleVTable(RD); } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index c226b96b837a..feb168387795 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -1,6 +1,10 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +typedef enum { + RequestFailed = -2004, +} enumy; + class C1 { public: virtual ~C1(); @@ -21,6 +25,8 @@ class C1 { Layer(int d); virtual ~Layer() {} }; + + virtual enumy Initialize() = 0; }; class C2 : public C1 { @@ -40,6 +46,8 @@ class C2 : public C1 { protected: const C2* m_C1; }; + + virtual enumy Initialize() override; }; class C3 : public C2 { @@ -48,6 +56,8 @@ class C3 : public C2 { Layer(int d, const C2* C1); void Initialize(); }; + + virtual enumy Initialize() override; }; void C3::Layer::Initialize() { @@ -69,3 +79,15 @@ void C3::Layer::Initialize() { // CHECK: %4 = cir.load %3 : cir.ptr >, !cir.ptr // CHECK: %5 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool + +enumy C3::Initialize() { + return C2::Initialize(); +} + +// CHECK: cir.func @_ZN2C310InitializeEv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %3 = cir.base_class_addr(%2 : cir.ptr ) -> cir.ptr +// CHECK: %4 = cir.call @_ZN2C210InitializeEv(%3) : (!cir.ptr) -> !s32i From 4192bcd44fdc2e76498e0f71567c60da45195b0c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 22 May 2023 23:11:28 -0700 Subject: [PATCH 0965/2301] [CIR][CIRGen] Casts: add ptr to boolean and scalar codegen for it Still missing a LLVM lowering counter part. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 +++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 13 ++++++++++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 +++++++ clang/test/CIR/CodeGen/cast.cpp | 14 +++++++++++++- 5 files changed, 38 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a3873ac76db2..7c61d4cbb209 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -46,12 +46,13 @@ def CK_ArrayToPointerDecay : I32EnumAttrCase<"array_to_ptrdecay", 2>; def CK_IntegralCast : I32EnumAttrCase<"integral", 3>; def CK_BitCast : I32EnumAttrCase<"bitcast", 4>; def CK_FloatingCast : I32EnumAttrCase<"floating", 5>; +def CK_PtrToBoolean : I32EnumAttrCase<"ptr_to_bool", 6>; def CastKind : I32EnumAttr< "CastKind", "cast kind", [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast, - CK_BitCast, CK_FloatingCast]> { + CK_BitCast, CK_FloatingCast, CK_PtrToBoolean]> { let cppNamespace = "::mlir::cir"; } @@ -62,6 +63,7 @@ def CastOp : CIR_Op<"cast", [Pure]> { Apply C/C++ usual conversions rules between values. Currently supported kinds: - `int_to_bool` + - `ptr_to_bool` - `array_to_ptrdecay` - `integral` - `bitcast` diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 1e42a5ea5090..2536145be2ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -274,6 +274,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::CastKind::floating, v); } + mlir::Value createPtrToBoolCast(mlir::Value v) { + return create(v.getLoc(), getBoolTy(), + mlir::cir::CastKind::ptr_to_bool, v); + } + cir::Address createBaseClassAddr(mlir::Location loc, cir::Address addr, mlir::Type destType) { if (destType == addr.getElementType()) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index be4ecc38203a..14ac23e0896d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -514,6 +514,13 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } + /// Perform a pointer to boolean conversion. + mlir::Value buildPointerToBoolConversion(mlir::Value V, QualType QT) { + // An extra pass should make this into a `cir.cmp V, nullptr` before + // lowering to LLVM. + return CGF.getBuilder().createPtrToBoolCast(V); + } + // Comparisons. #define VISITCOMP(CODE) \ mlir::Value VisitBin##CODE(const BinaryOperator *E) { return buildCmp(E); } @@ -804,8 +811,8 @@ class ScalarExprEmitter : public StmtVisitor { loc, boolTy, mlir::cir::CastKind::int_to_bool, srcVal); } - /// EmitConversionToBool - Convert the specified expression value to a - /// boolean (i1) truth value. This is equivalent to "Val != 0". + /// Convert the specified expression value to a boolean (!cir.bool) truth + /// value. This is equivalent to "Val != 0". mlir::Value buildConversionToBool(mlir::Value Src, QualType SrcType, mlir::Location loc) { assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); @@ -1140,7 +1147,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } case CK_PointerToBoolean: - llvm_unreachable("NYI"); + return buildPointerToBoolConversion(Visit(E), E->getType()); case CK_FloatingToBoolean: llvm_unreachable("NYI"); case CK_MemberPointerToBoolean: diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a6daca808f4d..533a1f316c65 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -248,6 +248,13 @@ LogicalResult CastOp::verify() { return emitOpError() << "requires integral type for result"; return success(); } + case cir::CastKind::ptr_to_bool: { + if (!resType.isa()) + return emitOpError() << "requires !cir.bool type for result"; + if (!srcType.isa()) + return emitOpError() << "requires pointer type for result"; + return success(); + } case cir::CastKind::integral: { if (!resType.isa()) return emitOpError() << "requires !IntegerType for result"; diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 604a60359dde..2df21e731828 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s unsigned char cxxstaticcast_0(unsigned int x) { return static_cast(x); @@ -37,3 +38,14 @@ int cStyleCasts_0(unsigned x1, int x2) { return 0; } + +bool cptr(void *d) { + bool x = d; + return x; +} + +// CHECK: cir.func @_Z4cptrPv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} + +// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.cast(ptr_to_bool, %3 : !cir.ptr), !cir.bool \ No newline at end of file From 85768242cca5de79d06dbdf61a26dbacb7e1c3c7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 23 May 2023 15:34:24 -0700 Subject: [PATCH 0966/2301] [CIR][CIRGen] Bool expr evaluation: improve cir.unary(not) support --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 ++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 13 +++++++--- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 24 ++++++++++++++++--- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/cast.cpp | 16 ++++++++++++- 5 files changed, 52 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 2536145be2ca..72bf7380e0ff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -327,6 +327,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return create(loc, flag, dst); } + mlir::Value createNot(mlir::Value value) { + return create(value.getLoc(), value.getType(), + mlir::cir::UnaryOpKind::Not, value); + } + mlir::Value createZExtOrBitCast(mlir::Location loc, mlir::Value src, mlir::Type newTy) { if (src.getType() == newTy) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index ea370d90f39a..1d85df54a497 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -697,14 +697,14 @@ Address CIRGenFunction::buildPointerWithAlignment(const Expr *E, /// Perform the usual unary conversions on the specified /// expression and compare the result against zero, returning an Int1Ty value. mlir::Value CIRGenFunction::evaluateExprAsBool(const Expr *E) { - // TODO: PGO + // TODO(cir): PGO if (const MemberPointerType *MPT = E->getType()->getAs()) { assert(0 && "not implemented"); } QualType BoolTy = getContext().BoolTy; SourceLocation Loc = E->getExprLoc(); - // TODO: CGFPOptionsRAII for FP stuff. + // TODO(cir): CGFPOptionsRAII for FP stuff. if (!E->getType()->isAnyComplexType()) return buildScalarConversion(buildScalarExpr(E), E->getType(), BoolTy, Loc); @@ -1841,7 +1841,14 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(const Expr *cond, // } if (const UnaryOperator *CondUOp = dyn_cast(cond)) { - llvm_unreachable("NYI"); + // In LLVM the condition is reversed here for efficient codegen. + // This should be done in CIR prior to LLVM lowering, if we do now + // we can make CIR based diagnostics misleading. + // cir.ternary(!x, t, f) -> cir.ternary(x, f, t) + // if (CondUOp->getOpcode() == UO_LNot) { + // buildOpOnBoolExpr(CondUOp->getSubExpr(), loc, elseS, thenS); + // } + assert(!UnimplementedFeature::shouldReverseUnaryCondOnBoolExpr()); } if (const ConditionalOperator *CondOp = dyn_cast(cond)) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 14ac23e0896d..2665299d3190 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -444,9 +444,7 @@ class ScalarExprEmitter : public StmtVisitor { return buildUnaryOp(E, mlir::cir::UnaryOpKind::Not, op); } - mlir::Value VisitUnaryLNot(const UnaryOperator *E) { - llvm_unreachable("NYI"); - } + mlir::Value VisitUnaryLNot(const UnaryOperator *E); mlir::Value VisitUnaryReal(const UnaryOperator *E) { llvm_unreachable("NYI"); } @@ -1240,6 +1238,26 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { return Visit(E->getInit(0)); } +mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { + // Perform vector logical not on comparison with zero vector. + if (E->getType()->isVectorType() && + E->getType()->castAs()->getVectorKind() == + VectorKind::Generic) { + llvm_unreachable("NYI"); + } + + // Compare operand to zero. + mlir::Value boolVal = CGF.evaluateExprAsBool(E->getSubExpr()); + + // Invert value. + boolVal = Builder.createNot(boolVal); + + // ZExt result to the expr type. + auto dstTy = ConvertType(E->getType()); + assert(boolVal.getType() == dstTy && "NYI"); + return boolVal; +} + mlir::Value ScalarExprEmitter::buildScalarCast( mlir::Value Src, QualType SrcType, QualType DstType, mlir::Type SrcTy, mlir::Type DstTy, ScalarConversionOpts Opts) { diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 7f8b7fd450fb..4bf53ca7203a 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -67,6 +67,7 @@ struct UnimplementedFeature { static bool shouldUseMemSetToInitialize() { return false; } static bool shouldSplitConstantStore() { return false; } static bool shouldCreateMemCpyFromGlobal() { return false; } + static bool shouldReverseUnaryCondOnBoolExpr() { return false; } static bool capturedByInit() { return false; } static bool tryEmitAsConstant() { return false; } diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 2df21e731828..e3b1e4e86cae 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -48,4 +48,18 @@ bool cptr(void *d) { // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} // CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %4 = cir.cast(ptr_to_bool, %3 : !cir.ptr), !cir.bool \ No newline at end of file +// CHECK: %4 = cir.cast(ptr_to_bool, %3 : !cir.ptr), !cir.bool + +void call_cptr(void *d) { + if (!cptr(d)) { + } +} + +// CHECK: cir.func @_Z9call_cptrPv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} + +// CHECK: cir.scope { +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = cir.call @_Z4cptrPv(%1) : (!cir.ptr) -> !cir.bool +// CHECK: %3 = cir.unary(not, %2) : !cir.bool, !cir.bool +// CHECK: cir.if %3 { \ No newline at end of file From 760d0be175dcebfd210f47454ce5b5fa6774d1ff Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 23 May 2023 15:56:35 -0700 Subject: [PATCH 0967/2301] [CIR][CIRGen] Binary assignments: support some common check patterns --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/test/CIR/CodeGen/binassign.cpp | 23 +++++++++++++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 2665299d3190..f75643267843 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1519,7 +1519,7 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // If the lvalue is non-volatile, return the computed value of the assignment. if (!LHS.isVolatileQualified()) - llvm_unreachable("NYI"); + return RHS; // Otherwise, reload the value. return buildLoadOfLValue(LHS, E->getExprLoc()); diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp index c615289cbb0f..65b5cb583b41 100644 --- a/clang/test/CIR/CodeGen/binassign.cpp +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s int foo(int a, int b) { @@ -49,6 +49,27 @@ int foo(int a, int b) { // CHECK: = cir.binop(or, // CHECK: cir.store {{.*}}[[Value]] +typedef enum { + A = 3, +} enumy; + +enumy getty(); + +void exec() { + enumy r; + if ((r = getty()) < 0) {} +} + +// CHECK: cir.func @_Z4execv() { +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["r"] {alignment = 4 : i64} +// CHECK: cir.scope { +// CHECK: %1 = cir.call @_Z5gettyv() : () -> !u32i +// CHECK: cir.store %1, %0 : !u32i, cir.ptr +// CHECK: %2 = cir.cast(integral, %1 : !u32i), !s32i +// CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool +// CHECK: cir.if %4 { + // CHECK: [[SourceLocationB:#loc[0-9]+]] = loc("{{.*}}binassign.cpp":8:8) // CHECK: [[SourceLocationA:#loc[0-9]+]] = loc("{{.*}}binassign.cpp":8:3) // CHECK: [[SourceLocation]] = loc(fused[[[SourceLocationA]], [[SourceLocationB]]]) From 71fa39bc1b32d528c0fe64604c0a90503c4df3e0 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 24 May 2023 00:12:39 -0400 Subject: [PATCH 0968/2301] [CIR][Rebase][CIRGen] Fix usage of OwningOpRef that deletes twice The OwningOpRef used here will attempt to delete this module during it's dtor at the end of it's function but the module has already been deleted by the context in `lowerFromCIRToLLVMIR`. --- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 +- clang/lib/CIRFrontendAction/CIRGenAction.cpp | 341 ------------------ 2 files changed, 1 insertion(+), 342 deletions(-) delete mode 100644 clang/lib/CIRFrontendAction/CIRGenAction.cpp diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 083c9e469d7a..5e1b16c151c6 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -418,7 +418,7 @@ void CIRGenAction::ExecuteAction() { llvm::LLVMContext llvmCtx; auto llvmModule = lowerFromCIRToLLVMIR( - ci.getFrontendOpts(), *mlirModule, + ci.getFrontendOpts(), mlirModule.release(), std::unique_ptr(mlirContext), llvmCtx); if (outstream) diff --git a/clang/lib/CIRFrontendAction/CIRGenAction.cpp b/clang/lib/CIRFrontendAction/CIRGenAction.cpp deleted file mode 100644 index 0f36a3cef2ec..000000000000 --- a/clang/lib/CIRFrontendAction/CIRGenAction.cpp +++ /dev/null @@ -1,341 +0,0 @@ -//===--- CIRGenAction.cpp - LLVM Code generation Frontend Action ---------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "clang/CIRFrontendAction/CIRGenAction.h" -#include "mlir/Dialect/CIR/IR/CIRDialect.h" -#include "mlir/Dialect/Func/IR/FuncOps.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" -#include "mlir/IR/BuiltinOps.h" -#include "mlir/IR/MLIRContext.h" -#include "mlir/IR/OperationSupport.h" -#include "mlir/Parser/Parser.h" -#include "clang/AST/ASTConsumer.h" -#include "clang/AST/ASTContext.h" -#include "clang/AST/DeclCXX.h" -#include "clang/AST/DeclGroup.h" -#include "clang/Basic/DiagnosticFrontend.h" -#include "clang/Basic/FileManager.h" -#include "clang/Basic/LangStandard.h" -#include "clang/Basic/SourceManager.h" -#include "clang/Basic/TargetInfo.h" -#include "clang/CIR/CIRGenerator.h" -#include "clang/CIR/CIRToCIRPasses.h" -#include "clang/CIR/LowerToLLVM.h" -#include "clang/CodeGen/BackendUtil.h" -#include "clang/CodeGen/ModuleBuilder.h" -#include "clang/Driver/DriverDiagnostic.h" -#include "clang/Frontend/CompilerInstance.h" -#include "clang/Frontend/FrontendDiagnostic.h" -#include "clang/Lex/Preprocessor.h" -#include "llvm/Bitcode/BitcodeReader.h" -#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" -#include "llvm/IR/DebugInfo.h" -#include "llvm/IR/DiagnosticInfo.h" -#include "llvm/IR/DiagnosticPrinter.h" -#include "llvm/IR/GlobalValue.h" -#include "llvm/IR/LLVMContext.h" -#include "llvm/IR/LLVMRemarkStreamer.h" -#include "llvm/IR/Module.h" -#include "llvm/IRReader/IRReader.h" -#include "llvm/LTO/LTOBackend.h" -#include "llvm/Linker/Linker.h" -#include "llvm/Pass.h" -#include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/SourceMgr.h" -#include "llvm/Support/TimeProfiler.h" -#include "llvm/Support/Timer.h" -#include "llvm/Support/ToolOutputFile.h" -#include "llvm/Support/YAMLTraits.h" -#include "llvm/Transforms/IPO/Internalize.h" - -#include - -using namespace cir; -using namespace clang; - -namespace cir { -class CIRGenConsumer : public clang::ASTConsumer { - - virtual void anchor(); - - CIRGenAction::OutputType action; - - CompilerInstance &compilerInstance; - DiagnosticsEngine &diagnosticsEngine; - const HeaderSearchOptions &headerSearchOptions; - CodeGenOptions &codeGenOptions; - const TargetOptions &targetOptions; - const LangOptions &langOptions; - const FrontendOptions &feOptions; - - std::unique_ptr outputStream; - - ASTContext *astContext{nullptr}; - std::unique_ptr gen; - -public: - CIRGenConsumer(CIRGenAction::OutputType action, - CompilerInstance &compilerInstance, - DiagnosticsEngine &diagnosticsEngine, - const HeaderSearchOptions &headerSearchOptions, - CodeGenOptions &codeGenOptions, - const TargetOptions &targetOptions, - const LangOptions &langOptions, - const FrontendOptions &feOptions, - std::unique_ptr os) - : action(action), compilerInstance(compilerInstance), - diagnosticsEngine(diagnosticsEngine), - headerSearchOptions(headerSearchOptions), - codeGenOptions(codeGenOptions), targetOptions(targetOptions), - langOptions(langOptions), feOptions(feOptions), - - outputStream(std::move(os)), - - gen(std::make_unique(diagnosticsEngine, codeGenOptions)) { - } - - void Initialize(ASTContext &ctx) override { - assert(!astContext && "initialized multiple times"); - - astContext = &ctx; - - gen->Initialize(ctx); - } - - bool HandleTopLevelDecl(DeclGroupRef D) override { - PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), - astContext->getSourceManager(), - "LLVM IR generation of declaration"); - gen->HandleTopLevelDecl(D); - return true; - } - - void HandleCXXStaticMemberVarInstantiation(clang::VarDecl *VD) override { - llvm_unreachable("NYI"); - } - - void HandleInlineFunctionDefinition(FunctionDecl *D) override { - gen->HandleInlineFunctionDefinition(D); - } - - void HandleInterestingDecl(DeclGroupRef D) override { - llvm_unreachable("NYI"); - } - - void HandleTranslationUnit(ASTContext &C) override { - // Note that this method is called after `HandleTopLevelDecl` has already - // ran all over the top level decls. Here clang mostly wraps defered and - // global codegen, followed by running CIR passes. - - gen->HandleTranslationUnit(C); - if (!feOptions.DisableCIRVerifier) - if (!gen->verifyModule()) { - llvm::report_fatal_error( - "CIR codegen: module verification error before running CIR passes"); - return; - } - - auto mlirMod = gen->getModule(); - auto mlirCtx = gen->takeContext(); - - switch (action) { - case CIRGenAction::OutputType::EmitCIR: - if (outputStream && mlirMod) { - if (!feOptions.DisableCIRPasses) { - runCIRToCIRPasses(mlirMod, mlirCtx.get(), - !feOptions.DisableCIRVerifier); - } - mlir::OpPrintingFlags flags; - // FIXME: we cannot roundtrip prettyForm=true right now. - flags.enableDebugInfo(/*prettyForm=*/false); - mlirMod->print(*outputStream, flags); - } - break; - case CIRGenAction::OutputType::EmitLLVM: { - llvm::LLVMContext llvmCtx; - auto llvmModule = - lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); - if (outputStream) - llvmModule->print(*outputStream, nullptr); - break; - } - case CIRGenAction::OutputType::EmitObj: { - // TODO: Don't duplicate this from above - llvm::LLVMContext llvmCtx; - auto llvmModule = - lowerFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); - - llvmModule->setTargetTriple(targetOptions.Triple); - - emitBackendOutput(compilerInstance, codeGenOptions, - C.getTargetInfo().getDataLayoutString(), - llvmModule.get(), BackendAction::Backend_EmitObj, - nullptr, std::move(outputStream)); - break; - } - case CIRGenAction::OutputType::EmitAssembly: - assert(false && "Not yet implemented"); - break; - case CIRGenAction::OutputType::None: - break; - } - } - - void HandleTagDeclDefinition(TagDecl *D) override { - PrettyStackTraceDecl CrashInfo(D, SourceLocation(), - astContext->getSourceManager(), - "CIR generation of declaration"); - gen->HandleTagDeclDefinition(D); - } - - void HandleTagDeclRequiredDefinition(const TagDecl *D) override { - gen->HandleTagDeclRequiredDefinition(D); - } - - void CompleteTentativeDefinition(VarDecl *D) override { - llvm_unreachable("NYI"); - } - - void CompleteExternalDeclaration(DeclaratorDecl *D) override { - llvm_unreachable("NYI"); - } - - void AssignInheritanceModel(CXXRecordDecl *RD) override { - llvm_unreachable("NYI"); - } - - void HandleVTable(CXXRecordDecl *RD) override { llvm_unreachable("NYI"); } -}; -} // namespace cir - -void CIRGenConsumer::anchor() {} - -CIRGenAction::CIRGenAction(OutputType act, mlir::MLIRContext *_MLIRContext) - : mlirContext(_MLIRContext ? _MLIRContext : new mlir::MLIRContext), - action(act) {} - -CIRGenAction::~CIRGenAction() { mlirModule.reset(); } - -void CIRGenAction::EndSourceFileAction() { - // If the consumer creation failed, do nothing. - if (!getCompilerInstance().hasASTConsumer()) - return; - - // TODO: pass the module around - // module = cgConsumer->takeModule(); -} - -static std::unique_ptr -getOutputStream(CompilerInstance &ci, StringRef inFile, - CIRGenAction::OutputType action) { - switch (action) { - case CIRGenAction::OutputType::EmitAssembly: - return ci.createDefaultOutputFile(false, inFile, "s"); - case CIRGenAction::OutputType::EmitCIR: - return ci.createDefaultOutputFile(false, inFile, "cir"); - case CIRGenAction::OutputType::EmitLLVM: - return ci.createDefaultOutputFile(false, inFile, "llvm"); - case CIRGenAction::OutputType::EmitObj: - return ci.createDefaultOutputFile(true, inFile, "o"); - case CIRGenAction::OutputType::None: - return nullptr; - } - - llvm_unreachable("Invalid action!"); -} - -std::unique_ptr -CIRGenAction::CreateASTConsumer(CompilerInstance &ci, StringRef inputFile) { - auto out = ci.takeOutputStream(); - if (!out) - out = getOutputStream(ci, inputFile, action); - - auto Result = std::make_unique( - action, ci, ci.getDiagnostics(), ci.getHeaderSearchOpts(), - ci.getCodeGenOpts(), ci.getTargetOpts(), ci.getLangOpts(), - ci.getFrontendOpts(), std::move(out)); - cgConsumer = Result.get(); - - // Enable generating macro debug info only when debug info is not disabled and - // also macrod ebug info is enabled - if (ci.getCodeGenOpts().getDebugInfo() != llvm::codegenoptions::NoDebugInfo && - ci.getCodeGenOpts().MacroDebugInfo) { - llvm_unreachable("NYI"); - } - - return std::move(Result); -} - -mlir::OwningOpRef -CIRGenAction::loadModule(llvm::MemoryBufferRef mbRef) { - auto module = - mlir::parseSourceString(mbRef.getBuffer(), mlirContext); - assert(module && "Failed to parse ClangIR module"); - return module; -} - -void CIRGenAction::ExecuteAction() { - if (getCurrentFileKind().getLanguage() != Language::CIR) { - this->ASTFrontendAction::ExecuteAction(); - return; - } - - // If this is a CIR file we have to treat it specially. - // TODO: This could be done more logically. This is just modeled at the moment - // mimicing CodeGenAction but this is clearly suboptimal. - auto &ci = getCompilerInstance(); - std::unique_ptr outstream = - getOutputStream(ci, getCurrentFile(), action); - if (action != OutputType::None && !outstream) - return; - - auto &sourceManager = ci.getSourceManager(); - auto fileID = sourceManager.getMainFileID(); - auto mainFile = sourceManager.getBufferOrNone(fileID); - - if (!mainFile) - return; - - mlirContext->getOrLoadDialect(); - mlirContext->getOrLoadDialect(); - mlirContext->getOrLoadDialect(); - - // TODO: unwrap this -- this exists because including the `OwningModuleRef` in - // CIRGenAction's header would require linking the Frontend against MLIR. - // Let's avoid that for now. - auto mlirModule = loadModule(*mainFile); - if (!mlirModule) - return; - - llvm::LLVMContext llvmCtx; - auto llvmModule = lowerFromCIRToLLVMIR( - *mlirModule, std::unique_ptr(mlirContext), llvmCtx); - - if (outstream) - llvmModule->print(*outstream, nullptr); -} - -void EmitAssemblyAction::anchor() {} -EmitAssemblyAction::EmitAssemblyAction(mlir::MLIRContext *_MLIRContext) - : CIRGenAction(OutputType::EmitAssembly, _MLIRContext) {} - -void EmitCIRAction::anchor() {} -EmitCIRAction::EmitCIRAction(mlir::MLIRContext *_MLIRContext) - : CIRGenAction(OutputType::EmitCIR, _MLIRContext) {} - -void EmitCIROnlyAction::anchor() {} -EmitCIROnlyAction::EmitCIROnlyAction(mlir::MLIRContext *_MLIRContext) - : CIRGenAction(OutputType::None, _MLIRContext) {} - -void EmitLLVMAction::anchor() {} -EmitLLVMAction::EmitLLVMAction(mlir::MLIRContext *_MLIRContext) - : CIRGenAction(OutputType::EmitLLVM, _MLIRContext) {} - -void EmitObjAction::anchor() {} -EmitObjAction::EmitObjAction(mlir::MLIRContext *_MLIRContext) - : CIRGenAction(OutputType::EmitObj, _MLIRContext) {} From b4f181b3ead7621054ee92add905052f31f8184b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 23 May 2023 16:30:37 -0700 Subject: [PATCH 0969/2301] [CIR][cir-tidy] Fix missing argument for the new VFS argument to CIRGenerator --- clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp index c785bb939eb0..0f4107035177 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp @@ -27,8 +27,8 @@ namespace tidy { CIRASTConsumer::CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, clang::tidy::ClangTidyContext &Context) : Context(Context) { - Gen = - std::make_unique(CI.getDiagnostics(), CI.getCodeGenOpts()); + Gen = std::make_unique(CI.getDiagnostics(), nullptr, + CI.getCodeGenOpts()); } bool CIRASTConsumer::HandleTopLevelDecl(DeclGroupRef D) { From 4af5a278a8dd1ddd973ff7d64bbb7c90a7185e6b Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 24 May 2023 23:41:34 -0400 Subject: [PATCH 0970/2301] [CIR][CodeGen] Support floats when building array constants Support array initializers with floating point values. The last point of failure here was to support the null check here. --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 17 ++++++++++++----- clang/test/CIR/CodeGen/array-init.c | 10 ++++++++++ 2 files changed, 22 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/array-init.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 146c2f3f07b9..c53429a72990 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -942,11 +942,18 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, auto &builder = CGM.getBuilder(); auto isNullValue = [&](mlir::Attribute f) { // TODO(cir): introduce char type in CIR and check for that instead. - auto intVal = f.dyn_cast_or_null(); - assert(intVal && "not implemented"); - if (intVal.getValue() == 0) - return true; - return false; + if (const auto intVal = f.dyn_cast_or_null()) + return intVal.getValue() == 0; + + if (const auto fpVal = f.dyn_cast_or_null()) { + bool ignored; + llvm::APFloat FV(+0.0); + FV.convert(fpVal.getValue().getSemantics(), + llvm::APFloat::rmNearestTiesToEven, &ignored); + return FV.bitwiseIsEqual(fpVal.getValue()); + } + + llvm_unreachable("NYI"); }; // Figure out how long the initial prefix of non-zero elements is. diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c new file mode 100644 index 000000000000..82ccc68a88a9 --- /dev/null +++ b/clang/test/CIR/CodeGen/array-init.c @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +void foo() { + double bar[] = {9,8,7}; +} + +// CHECK: %0 = cir.alloca !cir.array, cir.ptr >, ["bar"] {alignment = 16 : i64} +// CHECK-NEXT: %1 = cir.const(#cir.const_array<[9.000000e+00, 8.000000e+00, 7.000000e+00]> : !cir.array) : !cir.array +// CHECK-NEXT: cir.store %1, %0 : !cir.array, cir.ptr > + From 71561a4b50a9c88eb9c270a0fc25752cf536280e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 25 May 2023 01:01:40 -0400 Subject: [PATCH 0971/2301] [CIR][CodeGen] Support float_to_int casts Add a new type of CastKind for float_to_int and a corresponding verifier. Then plumb through support in a few places to support lowering. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 ++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 32 +++++++++++++++++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 +++++ clang/test/CIR/CodeGen/cast.cpp | 8 +++-- 4 files changed, 44 insertions(+), 7 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7c61d4cbb209..3fcbd6b53e6a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -47,12 +47,13 @@ def CK_IntegralCast : I32EnumAttrCase<"integral", 3>; def CK_BitCast : I32EnumAttrCase<"bitcast", 4>; def CK_FloatingCast : I32EnumAttrCase<"floating", 5>; def CK_PtrToBoolean : I32EnumAttrCase<"ptr_to_bool", 6>; +def CK_FloatToIntegral : I32EnumAttrCase<"float_to_int", 7>; def CastKind : I32EnumAttr< "CastKind", "cast kind", [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast, - CK_BitCast, CK_FloatingCast, CK_PtrToBoolean]> { + CK_BitCast, CK_FloatingCast, CK_PtrToBoolean, CK_FloatToIntegral]> { let cppNamespace = "::mlir::cir"; } @@ -68,6 +69,7 @@ def CastOp : CIR_Op<"cast", [Pure]> { - `integral` - `bitcast` - `floating` + - `float_to_int` This is effectively a subset of the rules from `llvm-project/clang/include/clang/AST/OperationKinds.def`; but note that some diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index f75643267843..0839d88bcd7b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -836,6 +836,13 @@ class ScalarExprEmitter : public StmtVisitor { buildScalarConversion(mlir::Value Src, QualType SrcType, QualType DstType, SourceLocation Loc, ScalarConversionOpts Opts = ScalarConversionOpts()) { + // All conversions involving fixed point types should be handled by the + // buildFixedPoint family functions. This is done to prevent bloating up + // this function more, and although fixed point numbers are represented by + // integers, we do not want to follow any logic that assumes they should be + // treated as integers. + // TODO(leonardchan): When necessary, add another if statement checking for + // conversions to fixed point types from other types. if (SrcType->isFixedPointType()) { llvm_unreachable("not implemented"); } else if (DstType->isFixedPointType()) { @@ -1131,8 +1138,8 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_FloatingCast: case CK_FixedPointToFloating: case CK_FloatingToFixedPoint: { - if (Kind != CK_FloatingCast) - llvm_unreachable("Only FloatingCast supported so far."); + if (!(Kind == CK_FloatingCast || Kind == CK_FloatingToIntegral)) + llvm_unreachable("Only FloatingCast and Integral supported so far."); CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, CE); return buildScalarConversion(Visit(E), E->getType(), DestTy, CE->getExprLoc()); @@ -1266,7 +1273,7 @@ mlir::Value ScalarExprEmitter::buildScalarCast( mlir::Type DstElementTy; QualType SrcElementType; QualType DstElementType; - if (SrcType->isMatrixType() || DstType->isMatrixType()) { + if (SrcType->isMatrixType() && DstType->isMatrixType()) { llvm_unreachable("NYI"); } else { assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && @@ -1296,13 +1303,30 @@ mlir::Value ScalarExprEmitter::buildScalarCast( if (DstElementTy.isa()) return Builder.create( Src.getLoc(), DstTy, mlir::cir::CastKind::integral, Src); - llvm_unreachable("NYI"); + return Builder.create( + Src.getLoc(), DstTy, mlir::cir::CastKind::floating, Src); } + // Leaving mlir::IntegerType around incase any old user lingers if (DstElementTy.isa()) { llvm_unreachable("NYI"); } + if (DstElementTy.isa()) { + assert(SrcElementTy.isa() && "Unknown real conversion"); + + // If we can't recognize overflow as undefined behavior, assume that + // overflow saturates. This protects against normal optimizations if we are + // compiling with non-standard FP semantics. + if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) + llvm_unreachable("NYI"); + + if (Builder.getIsFPConstrained()) + llvm_unreachable("NYI"); + return Builder.create( + Src.getLoc(), DstTy, mlir::cir::CastKind::float_to_int, Src); + } + auto FloatDstTy = DstElementTy.cast(); auto FloatSrcTy = SrcElementTy.cast(); if (FloatDstTy.getWidth() < FloatSrcTy.getWidth()) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 533a1f316c65..66e316d36d17 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -289,6 +289,13 @@ LogicalResult CastOp::verify() { return emitOpError() << "requries floating for source and result"; return success(); } + case cir::CastKind::float_to_int: { + if (!srcType.dyn_cast()) + return emitOpError() << "requires floating for source"; + if (!resType.dyn_cast()) + return emitOpError() << "requires !IntegerType for result"; + return success(); + } } llvm_unreachable("Unknown CastOp kind?"); diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index e3b1e4e86cae..18c7995e713c 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -17,7 +17,7 @@ unsigned char cxxstaticcast_0(unsigned int x) { // CHECK: } -int cStyleCasts_0(unsigned x1, int x2) { +int cStyleCasts_0(unsigned x1, int x2, float x3) { // CHECK: cir.func @_{{.*}}cStyleCasts_0{{.*}} char a = (char)x1; // truncate @@ -36,6 +36,10 @@ int cStyleCasts_0(unsigned x1, int x2) { int* e = (int*)arr; // explicit pointer decay // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %{{[0-9]+}} : !cir.ptr>), !cir.ptr + int f = (int)x3; + // CHECK: %{{[0-9]+}} = cir.cast(float_to_int, %{{[0-9]+}} : f32), !s32i + + return 0; } @@ -62,4 +66,4 @@ void call_cptr(void *d) { // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %2 = cir.call @_Z4cptrPv(%1) : (!cir.ptr) -> !cir.bool // CHECK: %3 = cir.unary(not, %2) : !cir.bool, !cir.bool -// CHECK: cir.if %3 { \ No newline at end of file +// CHECK: cir.if %3 { From 0c59354a710554585566bdff51041c5fd5c646ba Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Thu, 25 May 2023 15:44:50 -0300 Subject: [PATCH 0972/2301] [CIR][NFC] Implement CIR function type Add custom !cir.func type to represent CIR function types. Currently it is only a copy of the builtin FunctionType with an added boolean to track if the function is variadic or not. ghstack-source-id: 7dbcbf7ebeec1942f2ed981aa81ca71179f973f3 Pull Request resolved: https://github.com/llvm/clangir/pull/75 --- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 10 ++++ .../include/clang/CIR/Dialect/IR/CIRTypes.td | 60 ++++++++++++++++++- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 58 ++++++++++++++++++ 3 files changed, 127 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 87aea83b744e..133a30568018 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -17,6 +17,16 @@ #include "mlir/IR/Types.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" +//===----------------------------------------------------------------------===// +// CIR Custom Parser/Printer Signatures +//===----------------------------------------------------------------------===// + +mlir::ParseResult +parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, + bool &isVarArg); +void printFuncTypeArgs(mlir::AsmPrinter &p, + mlir::ArrayRef params, bool isVarArg); + //===----------------------------------------------------------------------===// // CIR Dialect Types //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 03f45838638f..b84bf0bce1b8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -165,11 +165,69 @@ def CIR_ArrayType : CIR_Type<"Array", "array", let hasCustomAssemblyFormat = 1; } +//===----------------------------------------------------------------------===// +// FuncType +//===----------------------------------------------------------------------===// + +def CIR_FuncType : CIR_Type<"Func", "func"> { + let summary = "CIR function type"; + let description = [{ + The `!cir.func` is a function type. It consists of a single return type, a + list of parameter types and can optionally be variadic. + + Example: + + ```mlir + !cir.func + !cir.func + !cir.func + ``` + }]; + + let parameters = (ins ArrayRefParameter<"Type">:$inputs, + ArrayRefParameter<"Type">:$results, "bool":$varArg); + let assemblyFormat = [{ + `<` $results ` ` `(` custom($inputs, $varArg) `>` + }]; + + let skipDefaultBuilders = 1; + + let builders = [ + TypeBuilder<(ins CArg<"TypeRange">:$inputs, CArg<"TypeRange">:$results, + CArg<"bool", "false">:$isVarArg), [{ + return $_get($_ctxt, llvm::to_vector(inputs), llvm::to_vector(results), isVarArg); + }]> + ]; + + let genVerifyDecl = 1; + + let extraClassDeclaration = [{ + /// Returns whether the function is variadic. + bool isVarArg() const { return getVarArg(); } + + /// Returns the `i`th input operand type. Asserts if out of bounds. + Type getInput(unsigned i) const { return getInputs()[i]; } + + /// Returns the number of arguments to the function. + unsigned getNumInputs() const { return getInputs().size(); } + + /// Returns the `i`th result operand type. Asserts if out of bounds. + Type getResult(unsigned i) const { return getResults()[i]; } + + /// Returns the number of results to the function. + unsigned getNumResults() const { return getResults().size(); } + + /// Returns a clone of this function type with the given argument + /// and result types. + FuncType clone(TypeRange inputs, TypeRange results) const; + }]; +} + //===----------------------------------------------------------------------===// // One type to bind them all //===----------------------------------------------------------------------===// def CIR_AnyCIRType : AnyTypeOf<[CIR_PointerType, CIR_BoolType, CIR_StructType, - CIR_ArrayType]>; + CIR_ArrayType, CIR_FuncType]>; #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 9580bf0be5a8..93739dbd3ddc 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -390,6 +390,64 @@ IntType::verify(llvm::function_ref emitError, return mlir::success(); } +//===----------------------------------------------------------------------===// +// FuncType Definitions +//===----------------------------------------------------------------------===// + +mlir::LogicalResult +FuncType::verify(llvm::function_ref emitError, + llvm::ArrayRef inputs, + llvm::ArrayRef results, bool varArg) { + if (results.size() > 1) + return emitError() << "functions only supports 0 or 1 results"; + if (varArg && inputs.empty()) + return emitError() << "functions must have at least one non-variadic input"; + return mlir::success(); +} + +FuncType FuncType::clone(TypeRange inputs, TypeRange results) const { + return get(getContext(), results, inputs, isVarArg()); +} + +mlir::ParseResult +parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, + bool &isVarArg) { + isVarArg = false; + // `(` `)` + if (succeeded(p.parseOptionalRParen())) + return mlir::success(); + + // type (`,` type)* (`,` `...`)? + mlir::Type type; + if (p.parseType(type)) + return mlir::failure(); + params.push_back(type); + while (succeeded(p.parseOptionalComma())) { + if (succeeded(p.parseOptionalEllipsis())) { + isVarArg = true; + return p.parseRParen(); + } + if (p.parseType(type)) + return mlir::failure(); + params.push_back(type); + } + + return p.parseRParen(); +} + +void printFuncTypeArgs(mlir::AsmPrinter &p, + mlir::ArrayRef params, + bool isVarArg) { + llvm::interleaveComma(params, p, + [&p](mlir::Type type) { p.printType(type); }); + if (isVarArg) { + if (!params.empty()) + p << ", "; + p << "..."; + } + p << ')'; +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// From 111f4358bd815cf91df1a475525c58cbe8f911e5 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Thu, 25 May 2023 15:44:50 -0300 Subject: [PATCH 0973/2301] [CIR][CIRGen] Update IR to use !cir.func type Updates CallOp, FuncOp, and everything related to the custom !cir.func type. Also, patches lowering of function operations. ghstack-source-id: fd90735a9b604c7acfa8a30fbe50a57e066b0d31 Pull Request resolved: https://github.com/llvm/clangir/pull/76 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 8 ++--- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 3 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 13 +++---- clang/lib/CIR/CodeGen/CIRGenCall.h | 2 +- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 20 +++++------ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 14 ++++---- clang/lib/CIR/CodeGen/CIRGenModule.h | 8 ++--- clang/lib/CIR/CodeGen/CIRGenTypes.h | 6 ++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 36 +++++++++++-------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 13 ++++--- clang/test/CIR/CodeGen/coro-task.cpp | 12 +++---- clang/test/CIR/CodeGen/dtors.cpp | 2 +- clang/test/CIR/CodeGen/lambda.cpp | 14 ++++---- clang/test/CIR/CodeGen/struct.cpp | 2 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 14 ++++---- clang/test/CIR/IR/func.cir | 23 +++++++++++- clang/test/CIR/IR/invalid.cir | 32 +++++++++++++++++ 18 files changed, 144 insertions(+), 80 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 3fcbd6b53e6a..85a3250bb809 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1424,7 +1424,7 @@ def FuncOp : CIR_Op<"func", [ }]; let arguments = (ins SymbolNameAttr:$sym_name, - TypeAttrOf:$function_type, + TypeAttrOf:$function_type, UnitAttr:$builtin, UnitAttr:$coroutine, UnitAttr:$lambda, @@ -1439,7 +1439,7 @@ def FuncOp : CIR_Op<"func", [ let skipDefaultBuilders = 1; let builders = [OpBuilder<(ins - "StringRef":$name, "FunctionType":$type, + "StringRef":$name, "FuncType":$type, CArg<"GlobalLinkageKind", "GlobalLinkageKind::ExternalLinkage">:$linkage, CArg<"ArrayRef", "{}">:$attrs, CArg<"ArrayRef", "{}">:$argAttrs) @@ -1526,7 +1526,7 @@ def CallOp : CIR_Op<"call", $_state.addTypes(callee.getFunctionType().getResults()); }]>, OpBuilder<(ins "Value":$ind_target, - "FunctionType":$fn_type, + "FuncType":$fn_type, CArg<"ValueRange", "{}">:$operands), [{ $_state.addOperands(ValueRange{ind_target}); $_state.addOperands(operands); @@ -1534,8 +1534,6 @@ def CallOp : CIR_Op<"call", }]>]; let extraClassDeclaration = [{ - FunctionType getCalleeType(); - mlir::Value getIndirectCallee() { assert(!getCallee() && "only works for indirect call"); return *arg_operand_begin(); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 72bf7380e0ff..69240258866f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -167,7 +167,8 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::Type getVirtualFnPtrType([[maybe_unused]] bool isVarArg = false) { // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special // type so it's a bit more clear and C++ idiomatic. - auto fnTy = mlir::FunctionType::get(getContext(), {}, {getInt32Ty()}); + auto fnTy = + mlir::cir::FuncType::get(getContext(), {}, {getInt32Ty()}); assert(!UnimplementedFeature::isVarArg()); return getPointerTo(getPointerTo(fnTy)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index c18a5c1d1226..29f2d6b23591 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -190,12 +190,12 @@ static bool hasInAllocaArgs(CIRGenModule &CGM, CallingConv ExplicitCC, return false; } -mlir::FunctionType CIRGenTypes::GetFunctionType(GlobalDecl GD) { +mlir::cir::FuncType CIRGenTypes::GetFunctionType(GlobalDecl GD) { const CIRGenFunctionInfo &FI = arrangeGlobalDeclaration(GD); return GetFunctionType(FI); } -mlir::FunctionType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { +mlir::cir::FuncType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { bool Inserted = FunctionsBeingProcessed.insert(&FI).second; (void)Inserted; assert(Inserted && "Recursively being processed?"); @@ -255,11 +255,12 @@ mlir::FunctionType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { (void)Erased; assert(Erased && "Not in set?"); - return Builder.getFunctionType(ArgTypes, - resultType ? resultType : mlir::TypeRange()); + return mlir::cir::FuncType::get( + &getMLIRContext(), ArgTypes, + (resultType ? resultType : mlir::TypeRange{})); } -mlir::FunctionType CIRGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { +mlir::cir::FuncType CIRGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { const CXXMethodDecl *MD = cast(GD.getDecl()); const FunctionProtoType *FPT = MD->getType()->getAs(); @@ -314,7 +315,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, QualType RetTy = CallInfo.getReturnType(); const auto &RetAI = CallInfo.getReturnInfo(); - mlir::FunctionType CIRFuncTy = getTypes().GetFunctionType(CallInfo); + mlir::cir::FuncType CIRFuncTy = getTypes().GetFunctionType(CallInfo); const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 5808a30aa79b..538eb26811f2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -71,7 +71,7 @@ class CIRGenCallee { const clang::CallExpr *CE; clang::GlobalDecl MD; Address Addr; - mlir::FunctionType FTy; + mlir::cir::FuncType FTy; }; SpecialKind KindOrFunctionPointer; diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index f11b20596c10..75f6b3fd2de7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -170,7 +170,7 @@ mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, if (!builtin) { fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroId, - builder.getFunctionType( + builder.getType( mlir::TypeRange{int32Ty, int8PtrTy, int8PtrTy, int8PtrTy}, mlir::TypeRange{int32Ty}), /*FD=*/nullptr); @@ -194,11 +194,11 @@ CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { mlir::cir::FuncOp fnOp; if (!builtin) { - fnOp = - CGM.createCIRFunction(loc, CGM.builtinCoroAlloc, - builder.getFunctionType(mlir::TypeRange{int32Ty}, - mlir::TypeRange{boolTy}), - /*FD=*/nullptr); + fnOp = CGM.createCIRFunction( + loc, CGM.builtinCoroAlloc, + builder.getType(mlir::TypeRange{int32Ty}, + mlir::TypeRange{boolTy}), + /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); } else @@ -219,8 +219,8 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, if (!builtin) { fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroBegin, - builder.getFunctionType(mlir::TypeRange{int32Ty, int8PtrTy}, - mlir::TypeRange{int8PtrTy}), + builder.getType( + mlir::TypeRange{int32Ty, int8PtrTy}, mlir::TypeRange{int8PtrTy}), /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); @@ -242,8 +242,8 @@ mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, if (!builtin) { fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroEnd, - builder.getFunctionType(mlir::TypeRange{int8PtrTy, boolTy}, - mlir::TypeRange{boolTy}), + builder.getType(mlir::TypeRange{int8PtrTy, boolTy}, + mlir::TypeRange{boolTy}), /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 16c02ca03269..8aae2b998a27 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -185,7 +185,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( else FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); - mlir::FunctionType Ty = CGM.getTypes().GetFunctionType(*FInfo); + auto Ty = CGM.getTypes().GetFunctionType(*FInfo); // C++11 [class.mfct.non-static]p2: // If a non-static member function of a class X is called for an object that diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c091b8f952c4..271c14e8a816 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -376,7 +376,7 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, // Compute the function info and CIR type. const CIRGenFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); - mlir::FunctionType Ty = getTypes().GetFunctionType(FI); + auto Ty = getTypes().GetFunctionType(FI); // Get or create the prototype for the function. // if (!V || (V.getValueType() != Ty)) @@ -1439,10 +1439,10 @@ bool CIRGenModule::verifyModule() { return mlir::verify(theModule).succeeded(); } -std::pair +std::pair CIRGenModule::getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CIRGenFunctionInfo *FnInfo, - mlir::FunctionType FnType, + mlir::cir::FuncType FnType, bool Dontdefer, ForDefinition_t IsForDefinition) { auto *MD = cast(GD.getDecl()); @@ -1612,7 +1612,7 @@ bool CIRGenModule::lookupRepresentativeDecl(StringRef MangledName, mlir::cir::FuncOp CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, - mlir::FunctionType Ty, + mlir::cir::FuncType Ty, const clang::FunctionDecl *FD) { // At the point we need to create the function, the insertion point // could be anywhere (e.g. callsite). Do not rely on whatever it might @@ -1737,9 +1737,9 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // set attributes. bool IsIncompleteFunction = false; - mlir::FunctionType FTy; - if (Ty.isa()) { - FTy = Ty.cast(); + mlir::cir::FuncType FTy; + if (Ty.isa()) { + FTy = Ty.cast(); } else { assert(false && "NYI"); // FTy = mlir::FunctionType::get(VoidTy, false); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 25d35deddff2..0ca53b2329b6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -323,7 +323,7 @@ class CIRGenModule : public CIRGenTypeCache { mlir::cir::FuncOp getAddrOfCXXStructor( clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, - mlir::FunctionType FnType = nullptr, bool DontDefer = false, + mlir::cir::FuncType FnType = nullptr, bool DontDefer = false, ForDefinition_t IsForDefinition = NotForDefinition) { return getAddrAndTypeOfCXXStructor(GD, FnInfo, FnType, DontDefer, @@ -365,9 +365,9 @@ class CIRGenModule : public CIRGenTypeCache { DefaultMethodsToEmit.emplace_back(GD); } - std::pair getAddrAndTypeOfCXXStructor( + std::pair getAddrAndTypeOfCXXStructor( clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, - mlir::FunctionType FnType = nullptr, bool Dontdefer = false, + mlir::cir::FuncType FnType = nullptr, bool Dontdefer = false, ForDefinition_t IsForDefinition = NotForDefinition); void buildTopLevelDecl(clang::Decl *decl); @@ -512,7 +512,7 @@ class CIRGenModule : public CIRGenTypeCache { // Effectively create the CIR instruction, properly handling insertion // points. mlir::cir::FuncOp createCIRFunction(mlir::Location loc, StringRef name, - mlir::FunctionType Ty, + mlir::cir::FuncType Ty, const clang::FunctionDecl *FD); /// Emit type info if type of an expression is a variably modified diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index bf1a50580878..1b93643928c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -182,14 +182,14 @@ class CIRGenTypes { mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); /// Get the CIR function type for \arg Info. - mlir::FunctionType GetFunctionType(const CIRGenFunctionInfo &Info); + mlir::cir::FuncType GetFunctionType(const CIRGenFunctionInfo &Info); - mlir::FunctionType GetFunctionType(clang::GlobalDecl GD); + mlir::cir::FuncType GetFunctionType(clang::GlobalDecl GD); /// Get the LLVM function type for use in a vtable, given a CXXMethodDecl. If /// the method to has an incomplete return type, and/or incomplete argument /// types, this will return the opaque type. - mlir::FunctionType GetFunctionTypeForVTable(clang::GlobalDecl GD); + mlir::cir::FuncType GetFunctionTypeForVTable(clang::GlobalDecl GD); // The arrangement methods are split into three families: // - those meant to drive the signature and prologue/epilogue diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 66e316d36d17..8944991a0857 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -17,12 +17,17 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/Diagnostics.h" #include "mlir/IR/DialectImplementation.h" +#include "mlir/IR/Location.h" #include "mlir/IR/OpDefinition.h" #include "mlir/IR/OpImplementation.h" +#include "mlir/IR/StorageUniquerSupport.h" #include "mlir/IR/TypeUtilities.h" #include "mlir/Interfaces/FunctionImplementation.h" #include "mlir/Interfaces/InferTypeOpInterface.h" +#include "mlir/Support/LLVM.h" +#include "mlir/Support/LogicalResult.h" using namespace mlir; using namespace mlir::cir; @@ -1267,7 +1272,7 @@ VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { LogicalResult cir::VTableAddrPointOp::verify() { auto resultType = getAddr().getType(); - auto fnTy = mlir::FunctionType::get( + auto fnTy = mlir::cir::FuncType::get( getContext(), {}, {mlir::IntegerType::get(getContext(), 32)}); auto resTy = mlir::cir::PointerType::get( getContext(), mlir::cir::PointerType::get(getContext(), fnTy)); @@ -1287,7 +1292,7 @@ LogicalResult cir::VTableAddrPointOp::verify() { static StringRef getLinkageAttrNameString() { return "linkage"; } void cir::FuncOp::build(OpBuilder &builder, OperationState &result, - StringRef name, FunctionType type, + StringRef name, cir::FuncType type, GlobalLinkageKind linkage, ArrayRef attrs, ArrayRef argAttrs) { @@ -1310,6 +1315,8 @@ void cir::FuncOp::build(OpBuilder &builder, OperationState &result, } ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { + llvm::SMLoc loc = parser.getCurrentLocation(); + auto builtinNameAttr = getBuiltinAttrName(state.name); auto coroutineNameAttr = getCoroutineAttrName(state.name); auto lambdaNameAttr = getLambdaAttrName(state.name); @@ -1352,14 +1359,19 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { // Parse the function signature. bool isVariadic = false; if (function_interface_impl::parseFunctionSignature( - parser, /*allowVariadic=*/false, arguments, isVariadic, resultTypes, + parser, /*allowVariadic=*/true, arguments, isVariadic, resultTypes, resultAttrs)) return failure(); for (auto &arg : arguments) argTypes.push_back(arg.type); - auto fnType = builder.getFunctionType(argTypes, resultTypes); + // Build the function type. + auto fnType = mlir::cir::FuncType::getChecked( + parser.getEncodedSourceLoc(loc), parser.getContext(), + mlir::TypeRange(argTypes), mlir::TypeRange(resultTypes), isVariadic); + if (!fnType) + return failure(); state.addAttribute(getFunctionTypeAttrName(state.name), TypeAttr::get(fnType)); @@ -1389,7 +1401,6 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { // Parse the optional function body. auto *body = state.addRegion(); - llvm::SMLoc loc = parser.getCurrentLocation(); OptionalParseResult parseResult = parser.parseOptionalRegion( *body, arguments, /*enableNameShadowing=*/false); if (parseResult.has_value()) { @@ -1452,9 +1463,8 @@ void cir::FuncOp::print(OpAsmPrinter &p) { // Print function name, signature, and control. p.printSymbolName(getSymName()); auto fnType = getFunctionType(); - function_interface_impl::printFunctionSignature(p, *this, fnType.getInputs(), - /*isVariadic=*/false, - fnType.getResults()); + function_interface_impl::printFunctionSignature( + p, *this, fnType.getInputs(), fnType.isVarArg(), fnType.getResults()); function_interface_impl::printFunctionAttributes( p, *this, {getSymVisibilityAttrName(), getAliaseeAttrName(), @@ -1480,7 +1490,7 @@ void cir::FuncOp::print(OpAsmPrinter &p) { // getNumArguments hook not failing. LogicalResult cir::FuncOp::verifyType() { auto type = getFunctionType(); - if (!type.isa()) + if (!type.isa()) return emitOpError("requires '" + getFunctionTypeAttrName().str() + "' attribute of function type"); if (getFunctionType().getNumResults() > 1) @@ -1575,8 +1585,10 @@ cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // Verify that the operand and result types match the callee. auto fnType = fn.getFunctionType(); - if (fnType.getNumInputs() != getNumOperands()) + if (!fnType.isVarArg() && getNumOperands() != fnType.getNumInputs()) return emitOpError("incorrect number of operands for callee"); + if (fnType.isVarArg() && getNumOperands() < fnType.getNumInputs()) + return emitOpError("too few operands for callee"); for (unsigned i = 0, e = fnType.getNumInputs(); i != e; ++i) if (getOperand(i).getType() != fnType.getInput(i)) @@ -1598,10 +1610,6 @@ cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return success(); } -FunctionType CallOp::getCalleeType() { - return FunctionType::get(getContext(), getOperandTypes(), getResultTypes()); -} - ::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result) { mlir::FlatSymbolRefAttr calleeAttr; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 87443865b00f..d634a878965c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -23,6 +23,7 @@ #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" #include "mlir/IR/Attributes.h" @@ -515,11 +516,13 @@ class CIRFuncLowering : public mlir::OpConversionPattern { return mlir::failure(); } - auto fn = rewriter.create( - op.getLoc(), op.getName(), - rewriter.getFunctionType(signatureConversion.getConvertedTypes(), - resultType ? mlir::TypeRange(resultType) - : mlir::TypeRange())); + // Create the LLVM function operation. + auto llvmFnTy = mlir::LLVM::LLVMFunctionType::get( + resultType ? resultType : mlir::LLVM::LLVMVoidType::get(getContext()), + signatureConversion.getConvertedTypes(), + /*isVarArg=*/fnType.isVarArg()); + auto fn = rewriter.create(op.getLoc(), op.getName(), + llvmFnTy); rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index f4f45de69850..807f511e5e02 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -362,19 +362,19 @@ folly::coro::Task go4() { // CHECK: %17 = cir.alloca !ty_22class2Eanon221, cir.ptr , ["ref.tmp1"] {alignment = 1 : i64} // Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` -// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> -// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> -// CHECK: cir.yield %19 : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> +// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> +// CHECK: cir.yield %19 : !cir.ptr)>> // CHECK: } -// CHECK: cir.store %12, %3 : !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, cir.ptr ) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>> +// CHECK: cir.store %12, %3 : !cir.ptr)>>, cir.ptr )>>> // CHECK: cir.scope { // CHECK: %17 = cir.alloca !s32i, cir.ptr , ["ref.tmp2", init] {alignment = 4 : i64} -// CHECK: %18 = cir.load %3 : cir.ptr ) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>>, !cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221> +// CHECK: %18 = cir.load %3 : cir.ptr )>>>, !cir.ptr)>> // CHECK: %19 = cir.const(#cir.int<3> : !s32i) : !s32i // CHECK: cir.store %19, %17 : !s32i, cir.ptr // Call invoker, which calls operator() indirectly. -// CHECK: %20 = cir.call %18(%17) : (!cir.ptr<(!cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221>, !cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221 +// CHECK: %20 = cir.call %18(%17) : (!cir.ptr)>>, !cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221 // CHECK: cir.store %20, %4 : !ty_22struct2Efolly3A3Acoro3A3ATask221, cir.ptr // CHECK: } diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index cfe68b0d3de8..d7a79cb32dc7 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -38,7 +38,7 @@ class B : public A }; // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr i32>>, #cir.recdecl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr>>, #cir.recdecl.ast> // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index aac767879995..26284e608299 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -109,27 +109,27 @@ int g3() { // CHECK: cir.func @_Z2g3v() -> !s32i { // CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !cir.ptr<(!cir.ptr) -> !s32i>, cir.ptr ) -> !s32i>>, ["fn", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr)>>, cir.ptr )>>>, ["fn", init] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !s32i, cir.ptr , ["task", init] {alignment = 4 : i64} // 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`. // CHECK: %3 = cir.scope { // CHECK: %7 = cir.alloca !ty_22class2Eanon221, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} -// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr<(!cir.ptr) -> !s32i> -// CHECK: %9 = cir.unary(plus, %8) : !cir.ptr<(!cir.ptr) -> !s32i>, !cir.ptr<(!cir.ptr) -> !s32i> -// CHECK: cir.yield %9 : !cir.ptr<(!cir.ptr) -> !s32i> +// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %9 = cir.unary(plus, %8) : !cir.ptr)>>, !cir.ptr)>> +// CHECK: cir.yield %9 : !cir.ptr)>> // CHECK: } // 2. Load ptr to `__invoke()`. -// CHECK: cir.store %3, %1 : !cir.ptr<(!cir.ptr) -> !s32i>, cir.ptr ) -> !s32i>> +// CHECK: cir.store %3, %1 : !cir.ptr)>>, cir.ptr )>>> // CHECK: %4 = cir.scope { // CHECK: %7 = cir.alloca !s32i, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} -// CHECK: %8 = cir.load %1 : cir.ptr ) -> !s32i>>, !cir.ptr<(!cir.ptr) -> !s32i> +// CHECK: %8 = cir.load %1 : cir.ptr )>>>, !cir.ptr)>> // CHECK: %9 = cir.const(#cir.int<3> : !s32i) : !s32i // CHECK: cir.store %9, %7 : !s32i, cir.ptr // 3. Call `__invoke()`, which effectively executes `operator()`. -// CHECK: %10 = cir.call %8(%7) : (!cir.ptr<(!cir.ptr) -> !s32i>, !cir.ptr) -> !s32i +// CHECK: %10 = cir.call %8(%7) : (!cir.ptr)>>, !cir.ptr) -> !s32i // CHECK: cir.yield %10 : !s32i // CHECK: } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index d932f15f5235..e3e57c1710cf 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -144,4 +144,4 @@ struct Entry { void ppp() { Entry x; } // CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr -// CHECK: = "cir.struct_element_addr"(%1) <{member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr) -> !u32i>> +// CHECK: = "cir.struct_element_addr"(%1) <{member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr)>>> diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 8523843f656a..48e294c8af2b 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -25,7 +25,7 @@ class B : public A // CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr i32>>, #cir.recdecl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr>>, #cir.recdecl.ast> // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> @@ -39,9 +39,9 @@ class B : public A // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr // CHECK: cir.call @_ZN1AC2Ev(%2) : (!cir.ptr) -> () -// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr i32>> -// CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr i32>>> -// CHECK: cir.store %3, %4 : !cir.ptr i32>>, cir.ptr i32>>> +// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr >> +// CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> +// CHECK: cir.store %3, %4 : !cir.ptr>>, cir.ptr >>> // CHECK: cir.return // CHECK: } @@ -67,9 +67,9 @@ class B : public A // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : cir.ptr i32>> -// CHECK: %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr i32>>> -// CHECK: cir.store %2, %3 : !cir.ptr i32>>, cir.ptr i32>>> +// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : cir.ptr >> +// CHECK: %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> +// CHECK: cir.store %2, %3 : !cir.ptr>>, cir.ptr >>> // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/IR/func.cir b/clang/test/CIR/IR/func.cir index f8d97ac3eb1c..dae0bf291ff7 100644 --- a/clang/test/CIR/IR/func.cir +++ b/clang/test/CIR/IR/func.cir @@ -1,11 +1,32 @@ // RUN: cir-tool %s | FileCheck %s - +!s32i = !cir.int +!u8i = !cir.int module { cir.func @l0() { cir.return } cir.func @l1() alias(@l0) + + cir.func private @variadic(!s32i, ...) -> !s32i + + // Should accept call with only the required parameters. + cir.func @variadic_call_1(%0: !s32i) -> !s32i { + %9 = cir.call @variadic(%0) : (!s32i) -> !s32i + cir.return %9 : !s32i + } + + // Should accept calls with variadic parameters. + cir.func @variadic_call_2(%0: !s32i, %1: !s32i, %2: !u8i) -> !s32i { + %9 = cir.call @variadic(%0, %1, %2) : (!s32i, !s32i, !u8i) -> !s32i + cir.return %9 : !s32i + } + + // Should parse custom assembly format. + cir.func @parse_func_type() -> () { + %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["fn", init] {alignment = 8 : i64} + cir.return + } } // CHECK: cir.func @l0() \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index af1f2ead29f4..e829d21fd5a6 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -339,3 +339,35 @@ module { // expected-error@below {{integer value too large for the given type}} cir.global external @b = #cir.int<-129> : !cir.int } + +// ----- + +module { + // expected-error@+1 {{functions must have at least one non-variadic input}} + cir.func @variadic(...) -> !cir.int +} + +// ----- + +module { + // expected-error@+1 {{custom op 'cir.func' variadic arguments must be in the end of the argument list}} + cir.func @variadic(..., !cir.int) -> !cir.int +} + +// ----- + +module { + // expected-error@+1 {{functions only supports 0 or 1 results}} + cir.func @variadic() -> (!cir.int, !cir.int) +} + +// ----- + +module { + cir.func private @variadic(!cir.int, !cir.int, ...) -> !cir.int + cir.func @call_variadic(%0: !cir.int) -> !cir.int { + // expected-error@+1 {{'cir.call' op too few operands for callee}} + %1 = cir.call @variadic(%0) : (!cir.int) -> !cir.int + cir.return %1 : !cir.int + } +} From b38fe434945f36be561eb07598b984eb07b943ec Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Thu, 25 May 2023 16:11:43 -0300 Subject: [PATCH 0974/2301] [CIR][CIRGen] Allow variadic function declarations Remove variadic guards and add missing functionality in CIR CodeGen logic allowing variadic declarations. ghstack-source-id: d74ea4264f7c5f73e314cdc70eaa4c17a4f5dd0d Pull Request resolved: https://github.com/llvm/clangir/pull/77 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 51 +++++++++++++------ clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 ++ clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h | 14 +++-- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 2 +- clang/test/CIR/CodeGen/variadics.c | 12 +++++ 6 files changed, 64 insertions(+), 22 deletions(-) create mode 100644 clang/test/CIR/CodeGen/variadics.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 69240258866f..883d99158165 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -164,11 +164,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::BoolType getBoolTy() { return ::mlir::cir::BoolType::get(getContext()); } - mlir::Type getVirtualFnPtrType([[maybe_unused]] bool isVarArg = false) { + mlir::Type getVirtualFnPtrType(bool isVarArg = false) { // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special // type so it's a bit more clear and C++ idiomatic. auto fnTy = - mlir::cir::FuncType::get(getContext(), {}, {getInt32Ty()}); + mlir::cir::FuncType::get(getContext(), {}, {getInt32Ty()}, isVarArg); assert(!UnimplementedFeature::isVarArg()); return getPointerTo(getPointerTo(fnTy)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 29f2d6b23591..e3f9801f5222 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -255,9 +255,9 @@ mlir::cir::FuncType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { (void)Erased; assert(Erased && "Not in set?"); - return mlir::cir::FuncType::get( - &getMLIRContext(), ArgTypes, - (resultType ? resultType : mlir::TypeRange{})); + return mlir::cir::FuncType::get(&getMLIRContext(), ArgTypes, + (resultType ? resultType : mlir::TypeRange{}), + FI.isVariadic()); } mlir::cir::FuncType CIRGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { @@ -638,6 +638,24 @@ void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, args.add(buildAnyExprToTemp(E), type); } +QualType CIRGenFunction::getVarArgType(const Expr *Arg) { + // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC + // implicitly widens null pointer constants that are arguments to varargs + // functions to pointer-sized ints. + if (!getTarget().getTriple().isOSWindows()) + return Arg->getType(); + + if (Arg->getType()->isIntegerType() && + getContext().getTypeSize(Arg->getType()) < + getContext().getTargetInfo().getPointerWidth(LangAS::Default) && + Arg->isNullPointerConstant(getContext(), + Expr::NPC_ValueDependentIsNotNull)) { + return getContext().getIntPtrType(); + } + + return Arg->getType(); +} + /// Similar to buildAnyExpr(), however, the result will always be accessible /// even if no aggregate location is provided. RValue CIRGenFunction::buildAnyExprToTemp(const Expr *E) { @@ -675,17 +693,14 @@ void CIRGenFunction::buildCallArgs( const auto *FPT = Prototype.P.get(); IsVariadic = FPT->isVariadic(); - assert(!IsVariadic && "Variadic functions NYI"); ExplicitCC = FPT->getExtInfo().getCC(); ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, FPT->param_type_end()); } // If we still have any arguments, emit them using the type of the argument. - for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) { - assert(!IsVariadic && "Variadic functions NYI"); - ArgTypes.push_back(A->getType()); - }; + for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) + ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); // We must evaluate arguments from right to left in the MS C++ ABI, because @@ -961,14 +976,18 @@ arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, // In most cases, there are no optional arguments. RequiredArgs required = RequiredArgs::All; - // if we have a variadic prototype, the required arguments are the extra - // prefix plus the arguments in the prototype. - auto *proto = dyn_cast(fnType); - assert(proto && "Only FunctionProtoType supported so far"); - assert(dyn_cast(fnType) && - "Only FunctionProtoType supported so far"); - assert(!proto->isVariadic() && "Variadic NYI"); - assert(!proto->hasExtParameterInfos() && "extparameterinfos NYI"); + // If we have a variadic prototype, the required arguments are the + // extra prefix plus the arguments in the prototype. + if (const FunctionProtoType *proto = dyn_cast(fnType)) { + if (proto->isVariadic()) + required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); + + assert(!proto->hasExtParameterInfos() && "extparameterinfos NYI"); + } else { + assert(!llvm::isa(fnType) && + "FunctionNoProtoType NYI"); + llvm_unreachable("Unknown function prototype"); + } // FIXME: Kill copy. SmallVector argTypes; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 6f8262c64754..c4f4dc2eda19 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1675,6 +1675,9 @@ class CIRGenFunction : public CIRGenTypeCache { AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap); } + +private: + QualType getVarArgType(const Expr *Arg); }; /// A specialization of DominatingValue for RValue. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h index e640584558be..36425beb9fb5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h @@ -251,8 +251,17 @@ class RequiredArgs { static RequiredArgs forPrototypePlus(const clang::FunctionProtoType *prototype, unsigned additional) { - assert(!prototype->isVariadic() && "NYI"); - return All; + if (!prototype->isVariadic()) + return All; + + if (prototype->hasExtParameterInfos()) + additional += llvm::count_if( + prototype->getExtParameterInfos(), + [](const clang::FunctionProtoType::ExtParameterInfo &ExtInfo) { + return ExtInfo.hasPassObjectSize(); + }); + + return RequiredArgs(prototype->getNumParams() + additional); } static RequiredArgs @@ -453,7 +462,6 @@ class CIRGenFunctionInfo final bool isVariadic() const { return Required.allowsOptionalArgs(); } RequiredArgs getRequiredArgs() const { return Required; } unsigned getNumRequiredArgs() const { - assert(!isVariadic() && "Variadic NYI"); return isVariadic() ? getRequiredArgs().getNumRequiredArgs() : arg_size(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 46656cf414b3..34fbb2b8cac9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -589,7 +589,7 @@ CIRGenItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, .getAddressPoint(Base); auto &builder = CGM.getBuilder(); - auto vtablePtrTy = builder.getVirtualFnPtrType(/*isVarArg=*/true); + auto vtablePtrTy = builder.getVirtualFnPtrType(/*isVarArg=*/false); return builder.create( CGM.getLoc(VTableClass->getSourceRange()), vtablePtrTy, diff --git a/clang/test/CIR/CodeGen/variadics.c b/clang/test/CIR/CodeGen/variadics.c new file mode 100644 index 000000000000..16c721090ab2 --- /dev/null +++ b/clang/test/CIR/CodeGen/variadics.c @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int average(int count, ...); +// CHECK: cir.func private @{{.*}}average{{.*}}(!s32i, ...) -> !s32i + +int test(void) { + return average(5, 1, 2, 3, 4, 5); + // CHECK: cir.call @{{.*}}average{{.*}}(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) : (!s32i, !s32i, !s32i, !s32i, !s32i, !s32i) -> !s32i +} From 6ae72c388ce1abee1fe18bc07954037f35c27eb0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 24 May 2023 17:47:43 -0700 Subject: [PATCH 0975/2301] [CIR][CIRGen] Get rid of signeless int types and convert remaining ones Fixes https://github.com/llvm/clangir/issues/81 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 67 ++++++---- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 16 +-- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 14 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 55 ++++---- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 13 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 8 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 18 +-- clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp | 4 +- clang/lib/CIR/CodeGen/ConstantInitBuilder.h | 14 +- clang/lib/CIR/CodeGen/TargetInfo.cpp | 4 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 8 +- clang/test/CIR/CodeGen/agg-init.cpp | 12 +- clang/test/CIR/CodeGen/bitfields.cpp | 2 +- clang/test/CIR/CodeGen/cast.cpp | 16 +-- clang/test/CIR/CodeGen/coro-task.cpp | 38 +++--- clang/test/CIR/CodeGen/dlti.c | 3 +- clang/test/CIR/CodeGen/dtors.cpp | 8 +- clang/test/CIR/CodeGen/lambda.cpp | 2 +- clang/test/CIR/CodeGen/move.cpp | 2 +- clang/test/CIR/CodeGen/rangefor.cpp | 4 +- clang/test/CIR/CodeGen/struct.cpp | 15 ++- clang/test/CIR/CodeGen/ternary.cpp | 10 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 30 ++--- clang/test/CIR/IR/array.cir | 6 +- clang/test/CIR/IR/invalid.cir | 88 +++++++----- clang/test/CIR/IR/loop.cir | 125 +++++++++--------- clang/test/CIR/IR/struct.cir | 13 +- clang/test/CIR/IR/ternary.cir | 29 ++-- clang/test/CIR/IR/types.cir | 6 +- 33 files changed, 352 insertions(+), 294 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 883d99158165..37e17bb5f254 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -147,10 +147,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Type helpers // ------------ // - mlir::Type getInt8Ty() { return typeCache.Int8Ty; } - mlir::Type getInt32Ty() { return typeCache.Int32Ty; } - mlir::Type getInt64Ty() { return typeCache.Int64Ty; } - mlir::Type getSInt8Ty() { return typeCache.SInt8Ty; } mlir::Type getSInt16Ty() { return typeCache.SInt16Ty; } mlir::Type getSInt32Ty() { return typeCache.SInt32Ty; } @@ -161,6 +157,20 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::Type getUInt32Ty() { return typeCache.UInt32Ty; } mlir::Type getUInt64Ty() { return typeCache.UInt64Ty; } + bool isInt8Ty(mlir::Type i) { + return i == typeCache.UInt8Ty || i == typeCache.SInt8Ty; + } + bool isInt16Ty(mlir::Type i) { + return i == typeCache.UInt16Ty || i == typeCache.SInt16Ty; + } + bool isInt32Ty(mlir::Type i) { + return i == typeCache.UInt32Ty || i == typeCache.SInt32Ty; + } + bool isInt64Ty(mlir::Type i) { + return i == typeCache.UInt64Ty || i == typeCache.SInt64Ty; + } + bool isInt(mlir::Type i) { return i.isa(); } + mlir::cir::BoolType getBoolTy() { return ::mlir::cir::BoolType::get(getContext()); } @@ -168,17 +178,17 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special // type so it's a bit more clear and C++ idiomatic. auto fnTy = - mlir::cir::FuncType::get(getContext(), {}, {getInt32Ty()}, isVarArg); + mlir::cir::FuncType::get(getContext(), {}, {getUInt32Ty()}, isVarArg); assert(!UnimplementedFeature::isVarArg()); return getPointerTo(getPointerTo(fnTy)); } - // Fetch the type representing a pointer to integer values. - mlir::cir::PointerType getInt8PtrTy(unsigned AddrSpace = 0) { - return typeCache.Int8PtrTy; + // Fetch the type representing a pointer to unsigned int values. + mlir::cir::PointerType getUInt8PtrTy(unsigned AddrSpace = 0) { + return typeCache.UInt8PtrTy; } - mlir::cir::PointerType getInt32PtrTy(unsigned AddrSpace = 0) { - return mlir::cir::PointerType::get(getContext(), typeCache.Int32Ty); + mlir::cir::PointerType getUInt32PtrTy(unsigned AddrSpace = 0) { + return mlir::cir::PointerType::get(getContext(), typeCache.UInt32Ty); } mlir::cir::PointerType getPointerTo(mlir::Type ty, unsigned addressSpace = 0) { @@ -190,20 +200,25 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Constant creation helpers // ------------------------- // - mlir::cir::ConstantOp getSInt32(int32_t C, mlir::Location loc) { - auto SInt32Ty = getSInt32Ty(); - return create(loc, SInt32Ty, - mlir::cir::IntAttr::get(SInt32Ty, C)); - } - mlir::cir::ConstantOp getInt32(uint32_t C, mlir::Location loc) { - auto int32Ty = getInt32Ty(); - return create(loc, int32Ty, - mlir::IntegerAttr::get(int32Ty, C)); - } - mlir::cir::ConstantOp getInt64(uint32_t C, mlir::Location loc) { - auto int64Ty = getInt64Ty(); - return create(loc, int64Ty, - mlir::IntegerAttr::get(int64Ty, C)); + mlir::cir::ConstantOp getSInt32(int32_t c, mlir::Location loc) { + auto sInt32Ty = getSInt32Ty(); + return create(loc, sInt32Ty, + mlir::cir::IntAttr::get(sInt32Ty, c)); + } + mlir::cir::ConstantOp getUInt32(uint32_t C, mlir::Location loc) { + auto uInt32Ty = getUInt32Ty(); + return create(loc, uInt32Ty, + mlir::cir::IntAttr::get(uInt32Ty, C)); + } + mlir::cir::ConstantOp getSInt64(uint32_t C, mlir::Location loc) { + auto sInt64Ty = getSInt64Ty(); + return create(loc, sInt64Ty, + mlir::cir::IntAttr::get(sInt64Ty, C)); + } + mlir::cir::ConstantOp getUInt64(uint32_t C, mlir::Location loc) { + auto uInt64Ty = getUInt64Ty(); + return create(loc, uInt64Ty, + mlir::cir::IntAttr::get(uInt64Ty, C)); } mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { return create(loc, getBoolTy(), @@ -227,9 +242,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return getNullPtr(ty, loc); mlir::TypedAttr attr; - if (ty.isa()) - attr = mlir::IntegerAttr::get(ty, 0); - else if (ty.isa()) + if (ty.isa()) attr = mlir::cir::IntAttr::get(ty, 0); else llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 75f6b3fd2de7..d70e20318b47 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -158,8 +158,8 @@ static mlir::LogicalResult buildBodyAndFallthrough( mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr) { - auto int8PtrTy = builder.getInt8PtrTy(); - auto int32Ty = mlir::IntegerType::get(builder.getContext(), 32); + auto int8PtrTy = builder.getUInt8PtrTy(); + auto int32Ty = builder.getUInt32Ty(); auto &TI = CGM.getASTContext().getTargetInfo(); unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth(); @@ -181,14 +181,14 @@ mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, return builder.create( loc, fnOp, - mlir::ValueRange{builder.getInt32(NewAlign, loc), nullPtr, nullPtr, + mlir::ValueRange{builder.getUInt32(NewAlign, loc), nullPtr, nullPtr, nullPtr}); } mlir::cir::CallOp CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { auto boolTy = builder.getBoolTy(); - auto int32Ty = mlir::IntegerType::get(builder.getContext(), 32); + auto int32Ty = builder.getUInt32Ty(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroAlloc); @@ -211,8 +211,8 @@ CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { mlir::cir::CallOp CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr) { - auto int8PtrTy = builder.getInt8PtrTy(); - auto int32Ty = mlir::IntegerType::get(builder.getContext(), 32); + auto int8PtrTy = builder.getUInt8PtrTy(); + auto int32Ty = builder.getUInt32Ty(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroBegin); mlir::cir::FuncOp fnOp; @@ -234,7 +234,7 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr) { - auto int8PtrTy = builder.getInt8PtrTy(); + auto int8PtrTy = builder.getUInt8PtrTy(); auto boolTy = builder.getBoolTy(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroEnd); @@ -257,7 +257,7 @@ mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, mlir::LogicalResult CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { auto openCurlyLoc = getLoc(S.getBeginLoc()); - auto nullPtrCst = builder.getNullPtr(builder.getInt8PtrTy(), openCurlyLoc); + auto nullPtrCst = builder.getNullPtr(builder.getUInt8PtrTy(), openCurlyLoc); CurFn.setCoroutineAttr(mlir::UnitAttr::get(builder.getContext())); auto coroId = buildCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 8aae2b998a27..253c48643171 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -624,7 +624,7 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign)); } - allocation = Address(RV.getScalarVal(), Int8Ty, allocationAlign); + allocation = Address(RV.getScalarVal(), UInt8Ty, allocationAlign); } // Emit a null check on the allocation result if the allocation diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 0839d88bcd7b..c367f70c7516 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -888,7 +888,7 @@ class ScalarExprEmitter : public StmtVisitor { if (SrcTy.isa<::mlir::cir::PointerType>()) { // Must be a ptr to int cast. - assert(DstTy.isa() && "not ptr->int?"); + assert(CGF.getBuilder().isInt(DstTy) && "not ptr->int?"); llvm_unreachable("not implemented"); } @@ -1284,13 +1284,13 @@ mlir::Value ScalarExprEmitter::buildScalarCast( DstElementType = DstType; } - if (SrcElementTy.isa()) { + if (CGF.getBuilder().isInt(SrcElementTy)) { bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType(); if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) { llvm_unreachable("NYI"); } - if (DstElementTy.isa()) + if (CGF.getBuilder().isInt(DstElementTy)) return Builder.create( Src.getLoc(), DstTy, mlir::cir::CastKind::integral, Src); if (InputSigned) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 1917dd6ad853..95f0d71798c3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -316,7 +316,7 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { // If we are on a coroutine, add the coro_end builtin call. if (CGF.CurFn.getCoroutine()) CGF.buildCoroEndBuiltinCall( - loc, builder.getNullPtr(builder.getInt8PtrTy(), loc)); + loc, builder.getNullPtr(builder.getUInt8PtrTy(), loc)); if (CGF.FnRetCIRTy.has_value()) { // If there's anything to return, load it first. @@ -1154,7 +1154,7 @@ void CIRGenFunction::buildNullInitialization(mlir::Location loc, } // Cast the dest ptr to the appropriate i8 pointer type. - if (DestPtr.getElementType() == Int8Ty) { + if (builder.isInt8Ty(DestPtr.getElementType())) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 34fbb2b8cac9..4ad54c77f6e3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1201,8 +1201,8 @@ void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, if (CGM.getItaniumVTableContext().isRelativeLayout()) llvm_unreachable("NYI"); if (!VTable) { - VTable = - CGM.getOrInsertGlobal(loc, VTableName, CGM.getBuilder().getInt8PtrTy()); + VTable = CGM.getOrInsertGlobal(loc, VTableName, + CGM.getBuilder().getUInt8PtrTy()); } assert(!UnimplementedFeature::setDSOLocal()); @@ -1217,7 +1217,7 @@ void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, SmallVector offsets{ mlir::cir::IntAttr::get(PtrDiffTy, 2)}; field = mlir::cir::GlobalViewAttr::get( - builder.getInt8PtrTy(), + builder.getUInt8PtrTy(), mlir::FlatSymbolRefAttr::get(VTable.getSymNameAttr()), mlir::ArrayAttr::get(builder.getContext(), offsets)); } @@ -1277,7 +1277,7 @@ CIRGenItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(mlir::Location loc, // From LLVM codegen => Note for the future: If we would ever like to do // deferred emission of RTTI, check if emitting vtables opportunistically // need any adjustment. - GV = CIRGenModule::createGlobalOp(CGM, loc, Name, builder.getInt8PtrTy(), + GV = CIRGenModule::createGlobalOp(CGM, loc, Name, builder.getUInt8PtrTy(), /*isConstant=*/true); const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); CGM.setGVProperties(GV, RD); @@ -1289,7 +1289,7 @@ CIRGenItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(mlir::Location loc, } return mlir::cir::GlobalViewAttr::get( - builder.getInt8PtrTy(), + builder.getUInt8PtrTy(), mlir::FlatSymbolRefAttr::get(GV.getSymNameAttr())); } @@ -1316,7 +1316,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( llvm_unreachable("NYI"); } else { TypeNameField = mlir::cir::GlobalViewAttr::get( - builder.getInt8PtrTy(), + builder.getUInt8PtrTy(), mlir::FlatSymbolRefAttr::get(TypeName.getSymNameAttr())); } Fields.push_back(TypeNameField); @@ -1483,7 +1483,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( CIRGenModule::setInitializer(GV, init); return mlir::cir::GlobalViewAttr::get( - builder.getInt8PtrTy(), + builder.getUInt8PtrTy(), mlir::FlatSymbolRefAttr::get(GV.getSymNameAttr())); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 271c14e8a816..6008896e4c38 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -96,24 +96,27 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, VTables{*this} { - // Initialize signless integers types cache. - VoidTy = ::mlir::IntegerType::get(builder.getContext(), 8); - Int8Ty = ::mlir::IntegerType::get(builder.getContext(), 8); - Int16Ty = ::mlir::IntegerType::get(builder.getContext(), 16); - Int32Ty = ::mlir::IntegerType::get(builder.getContext(), 32); - Int64Ty = ::mlir::IntegerType::get(builder.getContext(), 64); - // Initialize CIR signed integer types cache. - SInt8Ty = ::mlir::cir::IntType::get(builder.getContext(), 8, true); - SInt16Ty = ::mlir::cir::IntType::get(builder.getContext(), 16, true); - SInt32Ty = ::mlir::cir::IntType::get(builder.getContext(), 32, true); - SInt64Ty = ::mlir::cir::IntType::get(builder.getContext(), 64, true); + SInt8Ty = + ::mlir::cir::IntType::get(builder.getContext(), 8, /*isSigned=*/true); + SInt16Ty = + ::mlir::cir::IntType::get(builder.getContext(), 16, /*isSigned=*/true); + SInt32Ty = + ::mlir::cir::IntType::get(builder.getContext(), 32, /*isSigned=*/true); + SInt64Ty = + ::mlir::cir::IntType::get(builder.getContext(), 64, /*isSigned=*/true); // Initialize CIR unsigned integer types cache. - UInt8Ty = ::mlir::cir::IntType::get(builder.getContext(), 8, false); - UInt16Ty = ::mlir::cir::IntType::get(builder.getContext(), 16, false); - UInt32Ty = ::mlir::cir::IntType::get(builder.getContext(), 32, false); - UInt64Ty = ::mlir::cir::IntType::get(builder.getContext(), 64, false); + UInt8Ty = + ::mlir::cir::IntType::get(builder.getContext(), 8, /*isSigned=*/false); + UInt16Ty = + ::mlir::cir::IntType::get(builder.getContext(), 16, /*isSigned=*/false); + UInt32Ty = + ::mlir::cir::IntType::get(builder.getContext(), 32, /*isSigned=*/false); + UInt64Ty = + ::mlir::cir::IntType::get(builder.getContext(), 64, /*isSigned=*/false); + + VoidTy = UInt8Ty; // TODO: HalfTy // TODO: BFloatTy @@ -123,14 +126,17 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, // TODO: PointerAlignInBytes // TODO: SizeSizeInBytes // TODO: IntAlignInBytes - CharTy = ::mlir::IntegerType::get(builder.getContext(), - astCtx.getTargetInfo().getCharWidth()); - IntTy = ::mlir::IntegerType::get(builder.getContext(), - astCtx.getTargetInfo().getIntWidth()); - IntPtrTy = ::mlir::IntegerType::get( - builder.getContext(), astCtx.getTargetInfo().getMaxPointerWidth()); - Int8PtrTy = builder.getPointerTo(Int8Ty); - Int8PtrPtrTy = builder.getPointerTo(Int8PtrTy); + UCharTy = ::mlir::cir::IntType::get(builder.getContext(), + astCtx.getTargetInfo().getCharWidth(), + /*isSigned=*/false); + UIntTy = ::mlir::cir::IntType::get(builder.getContext(), + astCtx.getTargetInfo().getIntWidth(), + /*isSigned=*/false); + UIntPtrTy = ::mlir::cir::IntType::get( + builder.getContext(), astCtx.getTargetInfo().getMaxPointerWidth(), + /*isSigned=*/false); + UInt8PtrTy = builder.getPointerTo(UInt8Ty); + UInt8PtrPtrTy = builder.getPointerTo(UInt8PtrTy); // TODO: AllocaInt8PtrTy // TODO: GlobalsInt8PtrTy // TODO: ConstGlobalsPtrTy @@ -1942,6 +1948,9 @@ void CIRGenModule::buildDefaultMethods() { } mlir::IntegerAttr CIRGenModule::getSize(CharUnits size) { + // Note that mlir::IntegerType is used instead of mlir::cir::IntType here + // because we don't need sign information for this to be useful, so keep + // it simple. return mlir::IntegerAttr::get( mlir::IntegerType::get(builder.getContext(), 64), size.getQuantity()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index ab10a464b928..1777edb17d6a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -28,9 +28,6 @@ struct CIRGenTypeCache { /// void mlir::Type VoidTy; - - /// i8, i16, i32, and i64 - mlir::Type Int8Ty, Int16Ty, Int32Ty, Int64Ty; // char, int, short, long mlir::Type SInt8Ty, SInt16Ty, SInt32Ty, SInt64Ty; // usigned char, unsigned, unsigned short, unsigned long @@ -40,14 +37,14 @@ struct CIRGenTypeCache { mlir::Type FloatTy, DoubleTy; /// int - mlir::Type IntTy; + mlir::Type UIntTy; /// char - mlir::Type CharTy; + mlir::Type UCharTy; /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size. union { - mlir::Type IntPtrTy; + mlir::Type UIntPtrTy; mlir::Type SizeTy; mlir::Type PtrDiffTy; }; @@ -55,13 +52,13 @@ struct CIRGenTypeCache { /// void* in address space 0 union { mlir::cir::PointerType VoidPtrTy; - mlir::cir::PointerType Int8PtrTy; + mlir::cir::PointerType UInt8PtrTy; }; /// void** in address space 0 union { mlir::cir::PointerType VoidPtrPtrTy; - mlir::cir::PointerType Int8PtrPtrTy; + mlir::cir::PointerType UInt8PtrPtrTy; }; /// void* in alloca address space diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 69ee557ce2fb..829ff3a47a5a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -465,7 +465,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::NullPtr: // Model std::nullptr_t as i8* - // ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); + // ResultType = llvm::Type::getUInt8PtrTy(getLLVMContext()); assert(0 && "not implemented"); break; @@ -658,7 +658,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { // Return a placeholder 'i32' type. This can be changed later when the // type is defined (see UpdateCompletedType), but is likely to be the // "right" answer. - ResultType = CGM.Int32Ty; + ResultType = CGM.UInt32Ty; break; } diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index f3231b660f33..056c71a64105 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -38,9 +38,9 @@ static bool UseRelativeLayout(const CIRGenModule &CGM) { bool CIRGenVTables::useRelativeLayout() const { return UseRelativeLayout(CGM); } mlir::Type CIRGenModule::getVTableComponentType() { - mlir::Type ptrTy = builder.getInt8PtrTy(); + mlir::Type ptrTy = builder.getUInt8PtrTy(); if (UseRelativeLayout(*this)) - ptrTy = builder.getInt32PtrTy(); + ptrTy = builder.getUInt32PtrTy(); return ptrTy; } @@ -160,7 +160,7 @@ static void AddPointerLayoutOffset(CIRGenModule &CGM, CharUnits offset) { assert(offset.getQuantity() == 0 && "NYI"); builder.add(mlir::cir::NullAttr::get(CGM.getBuilder().getContext(), - CGM.getBuilder().getInt8PtrTy())); + CGM.getBuilder().getUInt8PtrTy())); } static void AddRelativeLayoutOffset(CIRGenModule &CGM, @@ -278,7 +278,7 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, llvm_unreachable("NYI"); } else { return builder.add(mlir::cir::GlobalViewAttr::get( - CGM.getBuilder().getInt8PtrTy(), + CGM.getBuilder().getUInt8PtrTy(), mlir::FlatSymbolRefAttr::get(fnPtr.getSymNameAttr()))); } } diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 665e9d6601d6..d6b0d840853f 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -129,14 +129,16 @@ struct CIRRecordLowering final { } mlir::Type getCharType() { - return mlir::IntegerType::get(&cirGenTypes.getMLIRContext(), - astContext.getCharWidth()); + return mlir::cir::IntType::get(&cirGenTypes.getMLIRContext(), + astContext.getCharWidth(), + /*isSigned=*/false); } - /// Wraps mlir::IntegerType with some implicit arguments. - mlir::Type getIntNType(uint64_t NumBits) { + /// Wraps mlir::cir::IntType with some implicit arguments. + mlir::Type getUIntNType(uint64_t NumBits) { unsigned AlignedBits = llvm::alignTo(NumBits, astContext.getCharWidth()); - return mlir::IntegerType::get(&cirGenTypes.getMLIRContext(), AlignedBits); + return mlir::cir::IntType::get(&cirGenTypes.getMLIRContext(), AlignedBits, + /*isSigned=*/false); } mlir::Type getByteArrayType(CharUnits numberOfChars) { @@ -162,7 +164,7 @@ struct CIRRecordLowering final { // if (isDiscreteBitFieldABI()) // return type; - // return getIntNType(std::min(fielddecl->getBitWidthValue(astContext), + // return getUIntNType(std::min(fielddecl->getBitWidthValue(astContext), // static_cast(astContext.toBits(getSize(type))))); llvm_unreachable("getStorageType only supports nonBitFields at this point"); } @@ -481,7 +483,7 @@ void CIRRecordLowering::accumulateBitFields( // Make sure StartBitOffset is naturally aligned if it is treated as an // IType integer. // if (StartBitOffset % - // astContext.toBits(getAlignment(getIntNType(OffsetInRecord))) != + // astContext.toBits(getAlignment(getUIntNType(OffsetInRecord))) != // 0) // return false; return true; @@ -526,7 +528,7 @@ void CIRRecordLowering::accumulateBitFields( } // We've hit a break-point in the run and need to emit a storage field. - auto Type = getIntNType(Tail - StartBitOffset); + auto Type = getUIntNType(Tail - StartBitOffset); // Add the storage member to the record and set the bitfield info for all of // the bitfields in the run. Bitfields get the offset of their storage but // come afterward and remain there after a stable sort. diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp index 19b9d5708a37..aaba1230f6ef 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -129,14 +129,14 @@ void ConstantAggregateBuilderBase::addSize(CharUnits size) { } mlir::Attribute -ConstantAggregateBuilderBase::getRelativeOffset(mlir::IntegerType offsetType, +ConstantAggregateBuilderBase::getRelativeOffset(mlir::cir::IntType offsetType, mlir::Attribute target) { return getRelativeOffsetToPosition(offsetType, target, Builder.Buffer.size() - Begin); } mlir::Attribute ConstantAggregateBuilderBase::getRelativeOffsetToPosition( - mlir::IntegerType offsetType, mlir::Attribute target, size_t position) { + mlir::cir::IntType offsetType, mlir::Attribute target, size_t position) { llvm_unreachable("NYI"); // // Compute the address of the relative-address slot. // auto base = getAddrOfPosition(offsetType, position); diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h index 6147c22d0a54..99c2e8f6601d 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -189,7 +189,7 @@ class ConstantAggregateBuilderBase { void addSize(CharUnits size); /// Add an integer value of a specific type. - void addInt(mlir::IntegerType intTy, uint64_t value, bool isSigned = false) { + void addInt(mlir::cir::IntType intTy, uint64_t value, bool isSigned = false) { add(mlir::IntegerAttr::get(intTy, llvm::APInt{intTy.getWidth(), value, isSigned})); } @@ -218,14 +218,14 @@ class ConstantAggregateBuilderBase { /// in the current linkage unit. The offset will have the given /// integer type, which must be no wider than intptr_t. Some /// targets may not fully support this operation. - void addRelativeOffset(mlir::IntegerType type, mlir::Attribute target) { + void addRelativeOffset(mlir::cir::IntType type, mlir::Attribute target) { llvm_unreachable("NYI"); // add(getRelativeOffset(type, target)); } /// Same as addRelativeOffset(), but instead relative to an element in this /// aggregate, identified by its index. - void addRelativeOffsetToPosition(mlir::IntegerType type, + void addRelativeOffsetToPosition(mlir::cir::IntType type, mlir::Attribute target, size_t position) { llvm_unreachable("NYI"); // add(getRelativeOffsetToPosition(type, target, position)); @@ -235,7 +235,7 @@ class ConstantAggregateBuilderBase { /// constant offset. This is primarily useful when the relative /// offset is known to be a multiple of (say) four and therefore /// the tag can be used to express an extra two bits of information. - void addTaggedRelativeOffset(mlir::IntegerType type, mlir::Attribute address, + void addTaggedRelativeOffset(mlir::cir::IntType type, mlir::Attribute address, unsigned tag) { llvm_unreachable("NYI"); // mlir::Attribute offset = @@ -287,7 +287,7 @@ class ConstantAggregateBuilderBase { /// Fill a previously-added placeholder. void fillPlaceholderWithInt(PlaceholderPosition position, - mlir::IntegerType type, uint64_t value, + mlir::cir::IntType type, uint64_t value, bool isSigned = false) { llvm_unreachable("NYI"); // fillPlaceholder(position, llvm::ConstantInt::get(type, value, isSigned)); @@ -333,10 +333,10 @@ class ConstantAggregateBuilderBase { void getGEPIndicesTo(llvm::SmallVectorImpl &indices, size_t position) const; - mlir::Attribute getRelativeOffset(mlir::IntegerType offsetType, + mlir::Attribute getRelativeOffset(mlir::cir::IntType offsetType, mlir::Attribute target); - mlir::Attribute getRelativeOffsetToPosition(mlir::IntegerType offsetType, + mlir::Attribute getRelativeOffsetToPosition(mlir::cir::IntType offsetType, mlir::Attribute target, size_t position); diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index ec73873437e7..dc5ab92b4121 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -260,7 +260,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, // If we have a sign or zero extended integer, make sure to return Extend so // that the parameter gets the right LLVM IR attributes. - if (Hi == NoClass && ResType.isa()) { + if (Hi == NoClass && ResType.isa()) { assert(!Ty->getAs() && "NYI"); if (Ty->isSignedIntegerOrEnumerationType() && isPromotableIntegerTypeForABI(Ty)) @@ -389,7 +389,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { // If we have a sign or zero extended integer, make sure to return Extend so // that the parameter gets the right LLVM IR attributes. // TODO: extend the above consideration to MLIR - if (Hi == NoClass && ResType.isa()) { + if (Hi == NoClass && ResType.isa()) { // Treat an enum type as its underlying type. if (const auto *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 8944991a0857..587ad2635aaa 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1273,7 +1273,9 @@ VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { LogicalResult cir::VTableAddrPointOp::verify() { auto resultType = getAddr().getType(); auto fnTy = mlir::cir::FuncType::get( - getContext(), {}, {mlir::IntegerType::get(getContext(), 32)}); + getContext(), {}, + {mlir::cir::IntType::get(getContext(), 32, /*isSigned=*/false)}); + auto resTy = mlir::cir::PointerType::get( getContext(), mlir::cir::PointerType::get(getContext(), fnTy)); @@ -1800,8 +1802,8 @@ LogicalResult mlir::cir::ConstArrayAttr::verify( // TODO: add CIR type for char. if (!intTy || intTy.getWidth() != 8) { - emitError() << "constant array element for string literals expects i8 " - "array element type"; + emitError() << "constant array element for string literals expects " + "!cir.int element type"; return failure(); } return success(); diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index d0118408e31b..bf58c95fa879 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -1,8 +1,8 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// CHECK !ty_22struct2EZero22 = !cir.struct<"struct.Zero", i8> -// CHECK !ty_22struct2Eyep_22 = !cir.struct<"struct.yep_", i32, i32> +// CHECK: !ty_22struct2EZero22 = !cir.struct<"struct.Zero", !u8i> +// CHECK: !ty_22struct2Eyep_22 = !cir.struct<"struct.yep_", !u32i, !u32i> struct Zero { void yolo(); @@ -66,14 +66,14 @@ void yo() { // CHECK: cir.func @_Z2yov() { // CHECK: %0 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} -// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i,#cir.null : !cir.ptr,#cir.int<0> : !u64i}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 +// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i,#cir.null : !cir.ptr,#cir.int<0> : !u64i}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 // CHECK: cir.store %2, %0 : !ty_22struct2EYo22, cir.ptr // CHECK: %3 = "cir.struct_element_addr"(%1) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000066001> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr -// CHECK: %5 = "cir.struct_element_addr"(%1) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr -// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > +// CHECK: %5 = "cir.struct_element_addr"(%1) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > // CHECK: %7 = "cir.struct_element_addr"(%1) <{member_name = "createFlags"}> : (!cir.ptr) -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !u64i) : !u64i // CHECK: cir.store %8, %7 : !u64i, cir.ptr diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index b2c1dfd89bca..452208bc63da 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -14,5 +14,5 @@ void m() { __long l; } -// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", i32, #cir.recdecl.ast> +// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", !u32i, #cir.recdecl.ast> // CHECK: !ty_22struct2E__long22 = !cir.struct<"struct.__long", !ty_22struct2Eanon22, !u32i, !cir.ptr> \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 18c7995e713c..41c27fec5702 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -48,22 +48,22 @@ bool cptr(void *d) { return x; } -// CHECK: cir.func @_Z4cptrPv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} +// CHECK: cir.func @_Z4cptrPv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} -// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %4 = cir.cast(ptr_to_bool, %3 : !cir.ptr), !cir.bool +// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.cast(ptr_to_bool, %3 : !cir.ptr), !cir.bool void call_cptr(void *d) { if (!cptr(d)) { } } -// CHECK: cir.func @_Z9call_cptrPv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} +// CHECK: cir.func @_Z9call_cptrPv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} // CHECK: cir.scope { -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = cir.call @_Z4cptrPv(%1) : (!cir.ptr) -> !cir.bool +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = cir.call @_Z4cptrPv(%1) : (!cir.ptr) -> !cir.bool // CHECK: %3 = cir.unary(not, %2) : !cir.bool, !cir.bool // CHECK: cir.if %3 { diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 807f511e5e02..ee6eeb9749fd 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -126,21 +126,21 @@ co_invoke_fn co_invoke; }} // namespace folly::coro -// CHECK: ![[VoidTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task", i8> -// CHECK: ![[IntTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task", i8> -// CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct<"struct.folly::coro::Task::promise_type", i8> -// CHECK: ![[CoroHandleVoid:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", i8> -// CHECK: ![[CoroHandlePromise:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", i8> -// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string", i8 -// CHECK: ![[SuspendAlways:ty_.*]] = !cir.struct<"struct.std::suspend_always", i8> +// CHECK: ![[VoidTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task", !u8i> +// CHECK: ![[IntTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task", !u8i> +// CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct<"struct.folly::coro::Task::promise_type", !u8i> +// CHECK: ![[CoroHandleVoid:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", !u8i> +// CHECK: ![[CoroHandlePromise:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", !u8i> +// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string", !u8i +// CHECK: ![[SuspendAlways:ty_.*]] = !cir.struct<"struct.std::suspend_always", !u8i> // CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22struct2Efolly3A3Acoro3A3Aco_invoke_fn22 -// CHECK: cir.func builtin private @__builtin_coro_id(i32, !cir.ptr, !cir.ptr, !cir.ptr) -> i32 -// CHECK: cir.func builtin private @__builtin_coro_alloc(i32) -> !cir.bool +// CHECK: cir.func builtin private @__builtin_coro_id(!u32i, !cir.ptr, !cir.ptr, !cir.ptr) -> !u32i +// CHECK: cir.func builtin private @__builtin_coro_alloc(!u32i) -> !cir.bool // CHECK: cir.func builtin private @__builtin_coro_size() -> !u64i -// CHECK: cir.func builtin private @__builtin_coro_begin(i32, !cir.ptr) -> !cir.ptr +// CHECK: cir.func builtin private @__builtin_coro_begin(!u32i, !cir.ptr) -> !cir.ptr using VoidTask = folly::coro::Task; @@ -153,26 +153,26 @@ VoidTask silly_task() { // Allocate promise. // CHECK: %[[#VoidTaskAddr:]] = cir.alloca ![[VoidTask]], {{.*}}, ["__retval"] -// CHECK: %[[#SavedFrameAddr:]] = cir.alloca !cir.ptr, cir.ptr >, ["__coro_frame_addr"] {alignment = 8 : i64} +// CHECK: %[[#SavedFrameAddr:]] = cir.alloca !cir.ptr, cir.ptr >, ["__coro_frame_addr"] {alignment = 8 : i64} // CHECK: %[[#VoidPromisseAddr:]] = cir.alloca ![[VoidPromisse]], {{.*}}, ["__promise"] // Get coroutine id with __builtin_coro_id. -// CHECK: %[[#NullPtr:]] = cir.const(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: %[[#Align:]] = cir.const(16 : i32) : i32 +// CHECK: %[[#NullPtr:]] = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %[[#Align:]] = cir.const(#cir.int<16> : !u32i) : !u32i // CHECK: %[[#CoroId:]] = cir.call @__builtin_coro_id(%[[#Align]], %[[#NullPtr]], %[[#NullPtr]], %[[#NullPtr]]) // Perform allocation calling operator 'new' depending on __builtin_coro_alloc and // call __builtin_coro_begin for the final coroutine frame address. -// CHECK: %[[#ShouldAlloc:]] = cir.call @__builtin_coro_alloc(%[[#CoroId]]) : (i32) -> !cir.bool -// CHECK: cir.store %[[#NullPtr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > +// CHECK: %[[#ShouldAlloc:]] = cir.call @__builtin_coro_alloc(%[[#CoroId]]) : (!u32i) -> !cir.bool +// CHECK: cir.store %[[#NullPtr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > // CHECK: cir.if %[[#ShouldAlloc]] { // CHECK: %[[#CoroSize:]] = cir.call @__builtin_coro_size() : () -> !u64i -// CHECK: %[[#AllocAddr:]] = cir.call @_Znwm(%[[#CoroSize]]) : (!u64i) -> !cir.ptr -// CHECK: cir.store %[[#AllocAddr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > +// CHECK: %[[#AllocAddr:]] = cir.call @_Znwm(%[[#CoroSize]]) : (!u64i) -> !cir.ptr +// CHECK: cir.store %[[#AllocAddr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > // CHECK: } -// CHECK: %[[#Load0:]] = cir.load %[[#SavedFrameAddr]] : cir.ptr >, !cir.ptr +// CHECK: %[[#Load0:]] = cir.load %[[#SavedFrameAddr]] : cir.ptr >, !cir.ptr // CHECK: %[[#CoroFrameAddr:]] = cir.call @__builtin_coro_begin(%[[#CoroId]], %[[#Load0]]) // Call promise.get_return_object() to retrieve the task object. @@ -264,7 +264,7 @@ VoidTask silly_task() { // Call builtin coro end and return -// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.const(#cir.null : !cir.ptr) +// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.const(#cir.null : !cir.ptr) // CHECK-NEXT: %[[#CoroEndArg1:]] = cir.const(#false) : !cir.bool // CHECK-NEXT: = cir.call @__builtin_coro_end(%[[#CoroEndArg0]], %[[#CoroEndArg1]]) diff --git a/clang/test/CIR/CodeGen/dlti.c b/clang/test/CIR/CodeGen/dlti.c index fefcdf92f3f6..f56687f7bcf8 100644 --- a/clang/test/CIR/CodeGen/dlti.c +++ b/clang/test/CIR/CodeGen/dlti.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s void foo() {} diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index d7a79cb32dc7..fcac427efa56 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -38,7 +38,7 @@ class B : public A }; // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr>>, #cir.recdecl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr>>, #cir.recdecl.ast> // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> @@ -56,7 +56,7 @@ class B : public A // CHECK: cir.func private @_ZN1BD2Ev(!cir.ptr) // operator delete(void*) declaration -// CHECK: cir.func private @_ZdlPv(!cir.ptr) +// CHECK: cir.func private @_ZdlPv(!cir.ptr) // B dtor => @B::~B() #2 // Calls dtor #1 @@ -67,8 +67,8 @@ class B : public A // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: cir.call @_ZN1BD2Ev(%1) : (!cir.ptr) -> () -// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr -// CHECK: cir.call @_ZdlPv(%2) : (!cir.ptr) -> () +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: cir.call @_ZdlPv(%2) : (!cir.ptr) -> () // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 26284e608299..d91ac4173769 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -6,7 +6,7 @@ void fn() { a(); } -// CHECK: !ty_22class2Eanon22 = !cir.struct<"class.anon", i8> +// CHECK: !ty_22class2Eanon22 = !cir.struct<"class.anon", !u8i> // CHECK-DAG: module // CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv diff --git a/clang/test/CIR/CodeGen/move.cpp b/clang/test/CIR/CodeGen/move.cpp index b82e1c35b921..95895015f98d 100644 --- a/clang/test/CIR/CodeGen/move.cpp +++ b/clang/test/CIR/CodeGen/move.cpp @@ -16,7 +16,7 @@ struct string { } // std namespace -// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string", i8> +// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string", !u8i> std::string getstr(); void emplace(std::string &&s); diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 8f2503b3ddae..a8d29bfdc03a 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -21,7 +21,7 @@ void init(unsigned numImages) { } } -// CHECK: !ty_22struct2Etriple22 = !cir.struct<"struct.triple", !u32i, !cir.ptr, !u32i> +// CHECK: !ty_22struct2Etriple22 = !cir.struct<"struct.triple", !u32i, !cir.ptr, !u32i> // CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector", !cir.ptr, !cir.ptr, !cir.ptr> // CHECK: !ty_22struct2E__vector_iterator22 = !cir.struct<"struct.__vector_iterator", !cir.ptr> @@ -64,7 +64,7 @@ void init(unsigned numImages) { // CHECK: %15 = "cir.struct_element_addr"(%13) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr // CHECK: %16 = cir.const(#cir.int<1000024002> : !u32i) : !u32i // CHECK: cir.store %16, %15 : !u32i, cir.ptr -// CHECK: %17 = "cir.struct_element_addr"(%13) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %17 = "cir.struct_element_addr"(%13) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %18 = "cir.struct_element_addr"(%13) <{member_name = "image"}> : (!cir.ptr) -> !cir.ptr // CHECK: %19 = cir.load %7 : cir.ptr >, !cir.ptr // CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index e3e57c1710cf..1c5c56658677 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -28,9 +28,11 @@ void yoyo(incomplete *i) {} // CHECK: !ty_22struct2Eincomplete22 = !cir.struct<"struct.incomplete", incomplete // CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", !s32i, !s8i> -// CHECK: !ty_22struct2EMandalore22 = !cir.struct<"struct.Mandalore", !u32i, !cir.ptr, !s32i, #cir.recdecl.ast> -// CHECK: !ty_22class2EAdv22 = !cir.struct<"class.Adv", !ty_22struct2EMandalore22> + // CHECK: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", !s32i, !s8i, !ty_22struct2EBar22> +// CHECK: !ty_22struct2EMandalore22 = !cir.struct<"struct.Mandalore", !u32i, !cir.ptr, !s32i, #cir.recdecl.ast> +// CHECK: !ty_22class2EAdv22 = !cir.struct<"class.Adv", !ty_22struct2EMandalore22> +// CHECK: !ty_22struct2EEntry22 = !cir.struct<"struct.Entry", !cir.ptr, !cir.ptr)>>> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} @@ -100,9 +102,9 @@ void m() { Adv C; } // CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "w"}> : (!cir.ptr) -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000024001> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr -// CHECK: %5 = "cir.struct_element_addr"(%2) <{member_name = "n"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %6 = cir.const(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > +// CHECK: %5 = "cir.struct_element_addr"(%2) <{member_name = "n"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %6 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > // CHECK: %7 = "cir.struct_element_addr"(%2) <{member_name = "d"}> : (!cir.ptr) -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: cir.store %8, %7 : !s32i, cir.ptr @@ -144,4 +146,5 @@ struct Entry { void ppp() { Entry x; } // CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr -// CHECK: = "cir.struct_element_addr"(%1) <{member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr)>>> + +// CHECK: = "cir.struct_element_addr"(%1) <{member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr)>>> diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp index 16d273477266..d3f48c3237c7 100644 --- a/clang/test/CIR/CodeGen/ternary.cpp +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -45,14 +45,14 @@ void m(APIType api) { // CHECK: %5 = cir.cmp(eq, %2, %4) : !s32i, !cir.bool // CHECK: %6 = cir.ternary(%5, true { // CHECK: %7 = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK: %8 = cir.const(0 : i8) : i8 -// CHECK: cir.yield %8 : i8 +// CHECK: %8 = cir.const(#cir.int<0> : !u8i) : !u8i +// CHECK: cir.yield %8 : !u8i // CHECK: }, false { // CHECK: %7 = cir.get_global @".str" : cir.ptr > // CHECK: %8 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr // CHECK: cir.call @_Z3obaPKc(%8) : (!cir.ptr) -> () -// CHECK: %9 = cir.const(0 : i8) : i8 -// CHECK: cir.yield %9 : i8 -// CHECK: }) : i8 +// CHECK: %9 = cir.const(#cir.int<0> : !u8i) : !u8i +// CHECK: cir.yield %9 : !u8i +// CHECK: }) : !u8i // CHECK: cir.return // CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 48e294c8af2b..88a0497c9e40 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -18,14 +18,14 @@ class B : public A virtual ~B() noexcept {} }; -// vtable for A type -// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct<"", !cir.array x 5>> - // Type info B. -// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> +// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> + +// vtable for A type +// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct<"", !cir.array x 5>> // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr>>, #cir.recdecl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr>>, #cir.recdecl.ast> // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> @@ -39,9 +39,9 @@ class B : public A // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr // CHECK: cir.call @_ZN1AC2Ev(%2) : (!cir.ptr) -> () -// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr >> -// CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> -// CHECK: cir.store %3, %4 : !cir.ptr>>, cir.ptr >>> +// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr >> +// CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> +// CHECK: cir.store %3, %4 : !cir.ptr>>, cir.ptr >>> // CHECK: cir.return // CHECK: } @@ -67,26 +67,26 @@ class B : public A // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : cir.ptr >> -// CHECK: %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> -// CHECK: cir.store %2, %3 : !cir.ptr>>, cir.ptr >>> +// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : cir.ptr >> +// CHECK: %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> +// CHECK: cir.store %2, %3 : !cir.ptr>>, cir.ptr >>> // CHECK: cir.return // CHECK: } // vtable for B -// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<<{#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD2Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}>> : ![[VTableTypeA]] +// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<<{#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD2Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}>> : ![[VTableTypeA]] // vtable for __cxxabiv1::__si_class_type_info -// CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> +// CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> // typeinfo name for B // CHECK: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array {alignment = 1 : i64} // typeinfo for A -// CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr +// CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr // typeinfo for B -// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<<{#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr,#cir.global_view<@_ZTS1B> : !cir.ptr,#cir.global_view<@_ZTI1A> : !cir.ptr}>> : ![[TypeInfoB]] +// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<<{#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr,#cir.global_view<@_ZTS1B> : !cir.ptr,#cir.global_view<@_ZTI1A> : !cir.ptr}>> : ![[TypeInfoB]] // Checks for dtors in dtors.cpp diff --git a/clang/test/CIR/IR/array.cir b/clang/test/CIR/IR/array.cir index f60d9c89acb6..4390d3dabfcd 100644 --- a/clang/test/CIR/IR/array.cir +++ b/clang/test/CIR/IR/array.cir @@ -1,11 +1,13 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s +!u32i = !cir.int + module { cir.func @arrays() { - %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] + %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] cir.return } } // CHECK: cir.func @arrays() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index e829d21fd5a6..787b4673ef9c 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1,9 +1,11 @@ // Test attempts to build bogus CIR // RUN: cir-tool %s -verify-diagnostics -split-input-file +!u32i = !cir.int + // expected-error@+2 {{'cir.const' op nullptr expects pointer type}} cir.func @p0() { - %1 = cir.const(#cir.null : !cir.ptr) : i32 + %1 = cir.const(#cir.null : !cir.ptr) : !u32i cir.return } @@ -11,9 +13,10 @@ cir.func @p0() { #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool -// expected-error@+2 {{op result type ('i32') must be '!cir.bool' for '#cir.bool : !cir.bool'}} +!u32i = !cir.int +// expected-error@+2 {{op result type ('!cir.int') must be '!cir.bool' for '#cir.bool : !cir.bool'}} cir.func @b0() { - %1 = cir.const(#true) : i32 + %1 = cir.const(#true) : !u32i cir.return } @@ -21,12 +24,13 @@ cir.func @b0() { #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool +!u32i = !cir.int cir.func @if0() { %0 = cir.const(#true) : !cir.bool // expected-error@+1 {{'cir.if' op region control flow edge from Region #0 to parent results: source has 1 operands, but target successor needs 0}} cir.if %0 { - %6 = cir.const(3 : i32) : i32 - cir.yield %6 : i32 + %6 = cir.const(#cir.int<3> : !u32i) : !u32i + cir.yield %6 : !u32i } cir.return } @@ -115,8 +119,9 @@ cir.func @badstride(%x: !cir.ptr>) { // ----- -cir.func @cast0(%arg0: i32) { - %1 = cir.cast(int_to_bool, %arg0 : i32), i32 // expected-error {{requires !cir.bool type for result}} +!u32i = !cir.int +cir.func @cast0(%arg0: !u32i) { + %1 = cir.cast(int_to_bool, %arg0 : !u32i), !u32i // expected-error {{requires !cir.bool type for result}} cir.return } @@ -129,23 +134,26 @@ cir.func @cast1(%arg1: f32) { // ----- -cir.func @cast2(%p: !cir.ptr) { - %2 = cir.cast(array_to_ptrdecay, %p : !cir.ptr), !cir.ptr // expected-error {{requires !cir.array pointee}} +!u32i = !cir.int +cir.func @cast2(%p: !cir.ptr) { + %2 = cir.cast(array_to_ptrdecay, %p : !cir.ptr), !cir.ptr // expected-error {{requires !cir.array pointee}} cir.return } // ----- -cir.func @cast3(%p: !cir.ptr) { - %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] - %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // expected-error {{requires same type for array element and pointee result}} +!u32i = !cir.int +cir.func @cast3(%p: !cir.ptr) { + %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] + %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // expected-error {{requires same type for array element and pointee result}} cir.return } // ----- -cir.func @cast4(%p: !cir.ptr) { - %2 = cir.cast(bitcast, %p : !cir.ptr), i32 // expected-error {{requires !cir.ptr type for source and result}} +!u32i = !cir.int +cir.func @cast4(%p: !cir.ptr) { + %2 = cir.cast(bitcast, %p : !cir.ptr), !u32i // expected-error {{requires !cir.ptr type for source and result}} cir.return } @@ -175,20 +183,24 @@ cir.func @b0() { // ----- +!u32i = !cir.int +!u8i = !cir.int module { - cir.global external @a = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array element should match array element type}} + cir.global external @a = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<23> : !u8i, #cir.int<33> : !u8i] : !cir.array> // expected-error {{constant array element should match array element type}} } // expected-error {{expected constant attribute to match type}} // ----- +!u8i = !cir.int module { - cir.global external @a = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{constant array size should match type size}} + cir.global external @a = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<23> : !u8i, #cir.int<33> : !u8i] : !cir.array> // expected-error {{constant array size should match type size}} } // expected-error {{expected constant attribute to match type}} // ----- +!u32i = !cir.int module { - cir.global external @b = #cir.const_array<"example\00" : !cir.array> // expected-error {{constant array element for string literals expects i8 array element type}} + cir.global external @b = #cir.const_array<"example\00" : !cir.array> // expected-error {{constant array element for string literals expects !cir.int element type}} } // expected-error {{expected constant attribute to match type}} // ----- @@ -199,49 +211,55 @@ module { // ----- +!u32i = !cir.int module { - cir.global @a = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> // expected-error {{expected string or keyword containing one of the following enum values for attribute 'linkage' [external, available_externally, linkonce, linkonce_odr, weak, weak_odr, internal, cir_private, extern_weak, common]}} + cir.global @a = #cir.const_array<[0 : !u8i, -23 : !u8i, 33 : !u8i] : !cir.array> // expected-error {{expected string or keyword containing one of the following enum values for attribute 'linkage' [external, available_externally, linkonce, linkonce_odr, weak, weak_odr, internal, cir_private, extern_weak, common]}} } // ----- +!u32i = !cir.int module { - cir.global "private" external @v = 3 : i32 // expected-error {{private visibility not allowed with 'external' linkage}} + cir.global "private" external @v = #cir.int<3> : !u32i // expected-error {{private visibility not allowed with 'external' linkage}} } // ----- +!u32i = !cir.int module { - cir.global "public" internal @v = 3 : i32 // expected-error {{public visibility not allowed with 'internal' linkage}} + cir.global "public" internal @v = #cir.int<3> : !u32i // expected-error {{public visibility not allowed with 'internal' linkage}} } // ----- +!u32i = !cir.int cir.func @unary0() { - %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.const(2 : i32) : i32 + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<2> : !u32i) : !u32i - %3 = cir.unary(inc, %1) : i32, i32 // expected-error {{'cir.unary' op requires input to be defined by a memory load}} - cir.store %3, %0 : i32, cir.ptr + %3 = cir.unary(inc, %1) : !u32i, !u32i // expected-error {{'cir.unary' op requires input to be defined by a memory load}} + cir.store %3, %0 : !u32i, cir.ptr cir.return } // ----- +!u32i = !cir.int cir.func @unary1() { - %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.const(2 : i32) : i32 - cir.store %1, %0 : i32, cir.ptr + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<2> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr - %2 = cir.load %0 : cir.ptr , i32 - %3 = cir.unary(dec, %2) : i32, i32 // // expected-error {{'cir.unary' op requires result to be used by a memory store to the same address as the input memory load}} + %2 = cir.load %0 : cir.ptr , !u32i + %3 = cir.unary(dec, %2) : !u32i, !u32i // // expected-error {{'cir.unary' op requires result to be used by a memory store to the same address as the input memory load}} cir.return } // ----- +!u32i = !cir.int module { - cir.global external @v = #cir.zero : i32 // expected-error {{zero expects struct type}} + cir.global external @v = #cir.zero : !u32i // expected-error {{zero expects struct type}} } // ----- @@ -282,14 +300,16 @@ cir.func coroutine @good_yield() { // ----- +!u8i = !cir.int +!u32i = !cir.int module { // Note MLIR requires "private" for global declarations, should get // rid of this somehow in favor of clarity? - cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr + cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr - cir.global external @type_info_B = #cir.typeinfo<<{ // expected-error {{element at index 0 has type '!cir.ptr' but return type for this element is '!cir.ptr'}} - #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr}>> - : !cir.struct<"", !cir.ptr> + cir.global external @type_info_B = #cir.typeinfo<<{ // expected-error {{element at index 0 has type '!cir.ptr>' but return type for this element is '!cir.ptr>'}} + #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr}>> + : !cir.struct<"", !cir.ptr> } // expected-error {{'cir.global' expected constant attribute to match type}} // ----- diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 97386ec60276..19e97170fff6 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -1,35 +1,36 @@ // RUN: cir-tool %s | FileCheck %s #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool +!u32i = !cir.int cir.func @l0() { - %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} - %1 = cir.const(0 : i32) : i32 - cir.store %1, %0 : i32, cir.ptr + %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr cir.scope { - %2 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} - %3 = cir.const(0 : i32) : i32 - cir.store %3, %2 : i32, cir.ptr + %2 = cir.alloca !u32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<0> : !u32i) : !u32i + cir.store %3, %2 : !u32i, cir.ptr cir.loop for(cond : { - %4 = cir.load %2 : cir.ptr , i32 - %5 = cir.const(10 : i32) : i32 - %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool + %4 = cir.load %2 : cir.ptr , !u32i + %5 = cir.const(#cir.int<10> : !u32i) : !u32i + %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool cir.brcond %6 ^bb1, ^bb2 ^bb1: cir.yield continue ^bb2: cir.yield }, step : { - %4 = cir.load %2 : cir.ptr , i32 - %5 = cir.const(1 : i32) : i32 - %6 = cir.binop(add, %4, %5) : i32 - cir.store %6, %2 : i32, cir.ptr + %4 = cir.load %2 : cir.ptr , !u32i + %5 = cir.const(#cir.int<1> : !u32i) : !u32i + %6 = cir.binop(add, %4, %5) : !u32i + cir.store %6, %2 : !u32i, cir.ptr cir.yield }) { - %4 = cir.load %0 : cir.ptr , i32 - %5 = cir.const(1 : i32) : i32 - %6 = cir.binop(add, %4, %5) : i32 - cir.store %6, %0 : i32, cir.ptr + %4 = cir.load %0 : cir.ptr , !u32i + %5 = cir.const(#cir.int<1> : !u32i) : !u32i + %6 = cir.binop(add, %4, %5) : !u32i + cir.store %6, %0 : !u32i, cir.ptr %7 = cir.const(#true) : !cir.bool cir.if %7 { cir.yield break @@ -38,13 +39,13 @@ cir.func @l0() { } } cir.scope { - %2 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} - %3 = cir.const(0 : i32) : i32 - cir.store %3, %2 : i32, cir.ptr + %2 = cir.alloca !u32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<0> : !u32i) : !u32i + cir.store %3, %2 : !u32i, cir.ptr cir.loop while(cond : { - %4 = cir.load %2 : cir.ptr , i32 - %5 = cir.const(10 : i32) : i32 - %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool + %4 = cir.load %2 : cir.ptr , !u32i + %5 = cir.const(#cir.int<10> : !u32i) : !u32i + %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool cir.brcond %6 ^bb1, ^bb2 ^bb1: cir.yield continue @@ -53,10 +54,10 @@ cir.func @l0() { }, step : { cir.yield }) { - %4 = cir.load %0 : cir.ptr , i32 - %5 = cir.const(1 : i32) : i32 - %6 = cir.binop(add, %4, %5) : i32 - cir.store %6, %0 : i32, cir.ptr + %4 = cir.load %0 : cir.ptr , !u32i + %5 = cir.const(#cir.int<1> : !u32i) : !u32i + %6 = cir.binop(add, %4, %5) : !u32i + cir.store %6, %0 : !u32i, cir.ptr %7 = cir.const(#true) : !cir.bool cir.if %7 { cir.yield continue @@ -66,13 +67,13 @@ cir.func @l0() { } cir.scope { - %2 = cir.alloca i32, cir.ptr , ["i", init] {alignment = 4 : i64} - %3 = cir.const(0 : i32) : i32 - cir.store %3, %2 : i32, cir.ptr + %2 = cir.alloca !u32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<0> : !u32i) : !u32i + cir.store %3, %2 : !u32i, cir.ptr cir.loop dowhile(cond : { - %4 = cir.load %2 : cir.ptr , i32 - %5 = cir.const(10 : i32) : i32 - %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool + %4 = cir.load %2 : cir.ptr , !u32i + %5 = cir.const(#cir.int<10> : !u32i) : !u32i + %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool cir.brcond %6 ^bb1, ^bb2 ^bb1: cir.yield continue @@ -81,10 +82,10 @@ cir.func @l0() { }, step : { cir.yield }) { - %4 = cir.load %0 : cir.ptr , i32 - %5 = cir.const(1 : i32) : i32 - %6 = cir.binop(add, %4, %5) : i32 - cir.store %6, %0 : i32, cir.ptr + %4 = cir.load %0 : cir.ptr , !u32i + %5 = cir.const(#cir.int<1> : !u32i) : !u32i + %6 = cir.binop(add, %4, %5) : !u32i + cir.store %6, %0 : !u32i, cir.ptr cir.yield } } @@ -93,25 +94,25 @@ cir.func @l0() { // CHECK: cir.func @l0 // CHECK: cir.loop for(cond : { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(10 : i32) : i32 -// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i +// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i +// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool // CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: cir.yield continue // CHECK-NEXT: ^bb2: // CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 -// CHECK-NEXT: cir.store %6, %2 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u32i) : !u32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !u32i +// CHECK-NEXT: cir.store %6, %2 : !u32i, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 -// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !u32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u32i) : !u32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !u32i +// CHECK-NEXT: cir.store %6, %0 : !u32i, cir.ptr // CHECK-NEXT: %7 = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.if %7 { // CHECK-NEXT: cir.yield break @@ -120,9 +121,9 @@ cir.func @l0() { // CHECK-NEXT: } // CHECK: cir.loop while(cond : { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(10 : i32) : i32 -// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i +// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i +// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool // CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: cir.yield continue @@ -131,10 +132,10 @@ cir.func @l0() { // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 -// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !u32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u32i) : !u32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !u32i +// CHECK-NEXT: cir.store %6, %0 : !u32i, cir.ptr // CHECK-NEXT: %7 = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.if %7 { // CHECK-NEXT: cir.yield continue @@ -143,9 +144,9 @@ cir.func @l0() { // CHECK-NEXT: } // CHECK: cir.loop dowhile(cond : { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(10 : i32) : i32 -// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : i32, !cir.bool +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i +// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i +// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool // CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // CHECK-NEXT: cir.yield continue @@ -154,10 +155,10 @@ cir.func @l0() { // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , i32 -// CHECK-NEXT: %5 = cir.const(1 : i32) : i32 -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : i32 -// CHECK-NEXT: cir.store %6, %0 : i32, cir.ptr +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !u32i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u32i) : !u32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !u32i +// CHECK-NEXT: cir.store %6, %0 : !u32i, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index 25fb6214751d..72740242ab8b 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -1,19 +1,24 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s -!ty_2222 = !cir.struct<"", !cir.array x 5>> -!ty_22221 = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> +!u8i = !cir.int +!u16i = !cir.int +!u32i = !cir.int + +!ty_2222 = !cir.struct<"", !cir.array x 5>> +!ty_22221 = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> !ty_22class2EA22 = !cir.struct<"class.A", incomplete, #cir.recdecl.ast> module { cir.func @structs() { - %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] + %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["i", init] cir.return } } -// CHECK: !ty_22S22 = !cir.struct<"S", i8, i16, i32> // CHECK: !ty_22i22 = !cir.struct<"i", incomplete> +// CHECK: !ty_22S22 = !cir.struct<"S", !u8i, !u16i, !u32i> + // CHECK-NEXT: module { // CHECK-NEXT: cir.func @structs() { // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] diff --git a/clang/test/CIR/IR/ternary.cir b/clang/test/CIR/IR/ternary.cir index eff292de9813..e752321ee57c 100644 --- a/clang/test/CIR/IR/ternary.cir +++ b/clang/test/CIR/IR/ternary.cir @@ -1,29 +1,30 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s +!u32i = !cir.int module { - cir.func @blue(%arg0: !cir.bool) -> i32 { + cir.func @blue(%arg0: !cir.bool) -> !u32i { %0 = cir.ternary(%arg0, true { - %a = cir.const(0 : i32) : i32 - cir.yield %a : i32 + %a = cir.const(#cir.int<0> : !u32i) : !u32i + cir.yield %a : !u32i }, false { - %b = cir.const(1 : i32) : i32 - cir.yield %b : i32 - }) : i32 - cir.return %0 : i32 + %b = cir.const(#cir.int<1> : !u32i) : !u32i + cir.yield %b : !u32i + }) : !u32i + cir.return %0 : !u32i } } // CHECK: module { -// CHECK: cir.func @blue(%arg0: !cir.bool) -> i32 { +// CHECK: cir.func @blue(%arg0: !cir.bool) -> !u32i { // CHECK: %0 = cir.ternary(%arg0, true { -// CHECK: %1 = cir.const(0 : i32) : i32 -// CHECK: cir.yield %1 : i32 +// CHECK: %1 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: cir.yield %1 : !u32i // CHECK: }, false { -// CHECK: %1 = cir.const(1 : i32) : i32 -// CHECK: cir.yield %1 : i32 -// CHECK: }) : i32 -// CHECK: cir.return %0 : i32 +// CHECK: %1 = cir.const(#cir.int<1> : !u32i) : !u32i +// CHECK: cir.yield %1 : !u32i +// CHECK: }) : !u32i +// CHECK: cir.return %0 : !u32i // CHECK: } // CHECK: } diff --git a/clang/test/CIR/IR/types.cir b/clang/test/CIR/IR/types.cir index f60d9c89acb6..4390d3dabfcd 100644 --- a/clang/test/CIR/IR/types.cir +++ b/clang/test/CIR/IR/types.cir @@ -1,11 +1,13 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s +!u32i = !cir.int + module { cir.func @arrays() { - %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] + %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] cir.return } } // CHECK: cir.func @arrays() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] +// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] From 393754b68233da4f17fc2af3de6c75dcf791a1c7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 May 2023 22:38:54 -0700 Subject: [PATCH 0976/2301] [CIR][NFC] Move prototypes close to their only use --- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 10 ---------- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 133a30568018..87aea83b744e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -17,16 +17,6 @@ #include "mlir/IR/Types.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" -//===----------------------------------------------------------------------===// -// CIR Custom Parser/Printer Signatures -//===----------------------------------------------------------------------===// - -mlir::ParseResult -parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, - bool &isVarArg); -void printFuncTypeArgs(mlir::AsmPrinter &p, - mlir::ArrayRef params, bool isVarArg); - //===----------------------------------------------------------------------===// // CIR Dialect Types //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 93739dbd3ddc..061c17fc9ebd 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -22,6 +22,20 @@ #include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" +//===----------------------------------------------------------------------===// +// CIR Custom Parser/Printer Signatures +//===----------------------------------------------------------------------===// + +static mlir::ParseResult +parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, + bool &isVarArg); +static void printFuncTypeArgs(mlir::AsmPrinter &p, + mlir::ArrayRef params, bool isVarArg); + +//===----------------------------------------------------------------------===// +// Get autogenerated stuff +//===----------------------------------------------------------------------===// + #define GET_TYPEDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsTypes.cpp.inc" From 42803d9fead4413181200e84635eb89c090fdc79 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Fri, 26 May 2023 11:04:06 -0700 Subject: [PATCH 0977/2301] [CIR] Enable -fclangir-direct-lowering by default Summary: This change turns on direct lowering from CIR to LLVM IR by default. A missing API call to register the builtin LLVM dialtect is added, otherwise an error `cannot be converted to LLVM IR: missing `LLVMTranslationDialectInterface` registration for dialect for op: builtin.module` is hit during llvm IR conversion. A test is also tweaked. --- clang/include/clang/Driver/Options.td | 8 ++++---- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 ++- .../lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 1 - clang/test/CIR/cc1.c | 3 +-- clang/test/CIR/driver.c | 12 ++++++++---- 5 files changed, 15 insertions(+), 12 deletions(-) diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 0b20723a2b56..565d1003bf90 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3084,10 +3084,10 @@ def fclangir_lifetime_check : Flag<["-"], "fclangir-lifetime-check">, Visibility<[ClangOption, CC1Option]>, Group, Alias, AliasArgs<["history=invalid,null"]>, HelpText<"Run lifetime checker">; -def fclangir_direct_lowering : Flag<["-"], "fclangir-direct-lowering">, - Visibility<[ClangOption, CC1Option]>, Group, - HelpText<"Lower directly from ClangIR to LLVM">, - MarshallingInfoFlag>; +defm clangir_direct_lowering : BoolFOption<"clangir-direct-lowering", + FrontendOpts<"ClangIRDirectLowering">, DefaultTrue, + PosFlag, + NegFlag>; def flto : Flag<["-"], "flto">, Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d634a878965c..3d660367a04d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -33,6 +33,7 @@ #include "mlir/IR/IRMapping.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" +#include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" #include "mlir/Transforms/DialectConversion.h" @@ -1106,9 +1107,9 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, if (theModule.verify().failed()) report_fatal_error("Verification of the final LLVMIR dialect failed!"); + mlir::registerBuiltinDialectTranslation(*mlirCtx); mlir::registerLLVMDialectTranslation(*mlirCtx); - LLVMContext llvmContext; auto llvmModule = mlir::translateModuleToLLVMIR(theModule, llvmCtx); if (!llvmModule) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 6f620ae5a304..0f8483f99345 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -578,7 +578,6 @@ lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, mlir::registerBuiltinDialectTranslation(*mlirCtx); mlir::registerLLVMDialectTranslation(*mlirCtx); - LLVMContext llvmContext; auto llvmModule = mlir::translateModuleToLLVMIR(theModule, llvmCtx); if (!llvmModule) diff --git a/clang/test/CIR/cc1.c b/clang/test/CIR/cc1.c index 8b968b0b1c49..c29c6943d6ff 100644 --- a/clang/test/CIR/cc1.c +++ b/clang/test/CIR/cc1.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-mlir %s -o %t.mlir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM @@ -24,4 +24,3 @@ void foo() {} // ASM: retq // OBJ: 0: c3 retq - diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index 65435dddd300..96a9caacab86 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -1,7 +1,11 @@ -// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -S -Xclang -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o %t.ll -// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fclangir-direct-lowering -S -Xclang -emit-cir %s -o %t1.cir +// RUN: FileCheck --input-file=%t1.cir %s -check-prefix=CIR +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -S -Xclang -emit-cir %s -o %t2.cir +// RUN: FileCheck --input-file=%t2.cir %s -check-prefix=CIR +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fclangir-direct-lowering -S -emit-llvm %s -o %t1.ll +// RUN: FileCheck --input-file=%t1.ll %s -check-prefix=LLVM +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -S -emit-llvm %s -o %t2.ll +// RUN: FileCheck --input-file=%t2.ll %s -check-prefix=LLVM // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -c %s -o %t.o // RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-passes -S -Xclang -emit-cir %s -o %t.cir From 9961e3a704615e29e254de267a0caa56afecb510 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 May 2023 23:03:36 -0700 Subject: [PATCH 0978/2301] [CIR][CIRGen] new operator: build more complex expressions --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 45 +++++++++- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 115 ++++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 12 ++- clang/test/CIR/CodeGen/new.cpp | 31 +++++++ clang/test/CIR/Inputs/std-cxx.h | 51 +++++++++++ 6 files changed, 245 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/CodeGen/new.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 0b95afa69532..457379faa42f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -51,6 +51,44 @@ void CIRGenFunction::buildCXXTemporary(const CXXTemporary *Temporary, /*useEHCleanup*/ true); } +Address CIRGenFunction::createCleanupActiveFlag() { llvm_unreachable("NYI"); } + +DominatingValue::saved_type +DominatingValue::saved_type::save(CIRGenFunction &CGF, RValue rv) { + llvm_unreachable("NYI"); +} + +/// Deactive a cleanup that was created in an active state. +void CIRGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, + mlir::Operation *dominatingIP) { + assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); + EHCleanupScope &Scope = cast(*EHStack.find(C)); + assert(Scope.isActive() && "double deactivation"); + + // If it's the top of the stack, just pop it, but do so only if it belongs + // to the current RunCleanupsScope. + if (C == EHStack.stable_begin() && + CurrentCleanupScopeDepth.strictlyEncloses(C)) { + // Per comment below, checking EHAsynch is not really necessary + // it's there to assure zero-impact w/o EHAsynch option + if (!Scope.isNormalCleanup() && getLangOpts().EHAsynch) { + llvm_unreachable("NYI"); + } else { + // From LLVM: If it's a normal cleanup, we need to pretend that the + // fallthrough is unreachable. + // CIR remarks: LLVM uses an empty insertion point to signal behavior + // change to other codegen paths (triggered by PopCleanupBlock). + // CIRGen doesn't do that yet, but let's mimic just in case. + mlir::OpBuilder::InsertionGuard guard(builder); + builder.clearInsertionPoint(); + PopCleanupBlock(); + } + return; + } + + llvm_unreachable("NYI"); +} + void CIRGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) { // Set that as the active flag in the cleanup. EHCleanupScope &cleanup = cast(*EHStack.begin()); @@ -140,7 +178,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // rest of CIR gen doesn't need to worry about this; it only happens // during the execution of PopCleanupBlocks(). bool HasTerminator = - !FallthroughSource->empty() && + FallthroughSource && !FallthroughSource->empty() && FallthroughSource->back().mightHaveTrait(); bool HasPrebranchedFallthrough = (FallthroughSource && HasTerminator && FallthroughSource->getTerminator()); @@ -167,7 +205,10 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // If we don't need the cleanup at all, we're done. if (!RequiresNormalCleanup && !RequiresEHCleanup) { - llvm_unreachable("NYI"); + destroyOptimisticNormalEntry(*this, Scope); + EHStack.popCleanup(); // safe because there are no fixups + assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups()); + return; } // Copy the cleanup emission data out. This uses either a stack diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 0a368bbf91cf..e11594d8c184 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -343,7 +343,7 @@ void CIRGenFunction::buildNullabilityCheck(LValue LHS, mlir::Value RHS, } void CIRGenFunction::buildScalarInit(const Expr *init, mlir::Location loc, - LValue lvalue) { + LValue lvalue, bool capturedByInit) { // TODO: this is where a lot of ObjC lifetime stuff would be done. mlir::Value value = buildScalarExpr(init); SourceLocRAIIObject Loc{*this, loc}; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 253c48643171..7b25122e2b36 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -383,9 +383,9 @@ static mlir::Value buildCXXNewAllocSize(CIRGenFunction &CGF, if (!e->isArray()) { CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); - sizeWithoutCookie = CGF.getBuilder().create( - CGF.getLoc(e->getSourceRange()), CGF.SizeTy, - mlir::IntegerAttr::get(CGF.SizeTy, typeSize.getQuantity())); + sizeWithoutCookie = CGF.getBuilder().getConstant( + CGF.getLoc(e->getSourceRange()), + mlir::cir::IntAttr::get(CGF.SizeTy, typeSize.getQuantity())); return sizeWithoutCookie; } @@ -546,6 +546,58 @@ static void EnterNewDeleteCleanup(CIRGenFunction &CGF, const CXXNewExpr *E, CGF.initFullExprCleanup(); } +static void StoreAnyExprIntoOneUnit(CIRGenFunction &CGF, const Expr *Init, + QualType AllocType, Address NewPtr, + AggValueSlot::Overlap_t MayOverlap) { + // FIXME: Refactor with buildExprAsInit. + switch (CGF.getEvaluationKind(AllocType)) { + case TEK_Scalar: + CGF.buildScalarInit(Init, nullptr, CGF.makeAddrLValue(NewPtr, AllocType), + false); + return; + case TEK_Complex: + llvm_unreachable("NYI"); + return; + case TEK_Aggregate: { + AggValueSlot Slot = AggValueSlot::forAddr( + NewPtr, AllocType.getQualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + MayOverlap, AggValueSlot::IsNotZeroed, + AggValueSlot::IsSanitizerChecked); + CGF.buildAggExpr(Init, Slot); + return; + } + } + llvm_unreachable("bad evaluation kind"); +} + +static void buildNewInitializer(CIRGenFunction &CGF, const CXXNewExpr *E, + QualType ElementType, mlir::Type ElementTy, + Address NewPtr, mlir::Value NumElements, + mlir::Value AllocSizeWithoutCookie) { + assert(!UnimplementedFeature::generateDebugInfo()); + if (E->isArray()) { + llvm_unreachable("NYI"); + } else if (const Expr *Init = E->getInitializer()) { + StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr, + AggValueSlot::DoesNotOverlap); + } +} + +static CharUnits CalculateCookiePadding(CIRGenFunction &CGF, + const CXXNewExpr *E) { + if (!E->isArray()) + return CharUnits::Zero(); + + // No cookie is required if the operator new[] being used is the + // reserved placement operator new[]. + if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) + return CharUnits::Zero(); + + llvm_unreachable("NYI"); + // return CGF.CGM.getCXXABI().GetArrayCookieSize(E); +} + mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { // The element type being allocated. QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); @@ -646,17 +698,66 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { // If there's an operator delete, enter a cleanup to call it if an // exception is thrown. EHScopeStack::stable_iterator operatorDeleteCleanup; - // llvm::Instruction *cleanupDominator = nullptr; + [[maybe_unused]] mlir::Operation *cleanupDominator = nullptr; if (E->getOperatorDelete() && !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { - llvm_unreachable("NYI"); EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign, allocatorArgs); operatorDeleteCleanup = EHStack.stable_begin(); + // FIXME: cleanupDominator = Builder.CreateUnreachable(); + } + + assert((allocSize == allocSizeWithoutCookie) == + CalculateCookiePadding(*this, E).isZero()); + if (allocSize != allocSizeWithoutCookie) { llvm_unreachable("NYI"); - // cleanupDominator = Builder.CreateUnreachable(); } - llvm_unreachable("NYI"); + + mlir::Type elementTy = getTypes().convertTypeForMem(allocType); + Address result = builder.createElementBitCast(getLoc(E->getSourceRange()), + allocation, elementTy); + + // Passing pointer through launder.invariant.group to avoid propagation of + // vptrs information which may be included in previous type. + // To not break LTO with different optimizations levels, we do it regardless + // of optimization level. + if (CGM.getCodeGenOpts().StrictVTablePointers && + allocator->isReservedGlobalPlacementOperator()) + llvm_unreachable("NYI"); + + // Emit sanitizer checks for pointer value now, so that in the case of an + // array it was checked only once and not at each constructor call. We may + // have already checked that the pointer is non-null. + // FIXME: If we have an array cookie and a potentially-throwing allocator, + // we'll null check the wrong pointer here. + SanitizerSet SkippedChecks; + SkippedChecks.set(SanitizerKind::Null, nullCheck); + buildTypeCheck(CIRGenFunction::TCK_ConstructorCall, + E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(), + result.getPointer(), allocType, result.getAlignment(), + SkippedChecks, numElements); + + buildNewInitializer(*this, E, allocType, elementTy, result, numElements, + allocSizeWithoutCookie); + auto resultPtr = result.getPointer(); + if (E->isArray()) { + llvm_unreachable("NYI"); + } + + // Deactivate the 'operator delete' cleanup if we finished + // initialization. + if (operatorDeleteCleanup.isValid()) { + // FIXME: enable cleanupDominator above before implementing this. + DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator); + if (cleanupDominator) + cleanupDominator->erase(); + } + + if (nullCheck) { + llvm_unreachable("NYI"); + } + + return resultPtr; } RValue CIRGenFunction::buildCXXDestructorCall(GlobalDecl Dtor, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index c4f4dc2eda19..9b71e9cfe501 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1139,7 +1139,7 @@ class CIRGenFunction : public CIRGenTypeCache { clang::SourceLocation Loc); void buildScalarInit(const clang::Expr *init, mlir::Location loc, - LValue lvalue); + LValue lvalue, bool capturedByInit = false); LValue buildDeclRefLValue(const clang::DeclRefExpr *E); LValue buildBinaryOperatorLValue(const clang::BinaryOperator *E); @@ -1463,6 +1463,16 @@ class CIRGenFunction : public CIRGenTypeCache { /// Will pop the cleanup entry on the stack and process all branch fixups. void PopCleanupBlock(bool FallThroughIsBranchThrough = false); + /// Deactivates the given cleanup block. The block cannot be reactivated. Pops + /// it if it's the top of the stack. + /// + /// \param DominatingIP - An instruction which is known to + /// dominate the current IP (if set) and which lies along + /// all paths of execution between the current IP and the + /// the point at which the cleanup comes into scope. + void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, + mlir::Operation *DominatingIP); + typedef void Destroyer(CIRGenFunction &CGF, Address addr, QualType ty); static Destroyer destroyCXXObject; diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp new file mode 100644 index 000000000000..b843e2c81c09 --- /dev/null +++ b/clang/test/CIR/CodeGen/new.cpp @@ -0,0 +1,31 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + + +struct S { + S(int, int); +}; + +void m(int a, int b) { + std::shared_ptr l = std::make_shared(a, b); +} + +// CHECK: cir.func linkonce_odr @_ZSt11make_sharedI1SJRiS1_EESt10shared_ptrIT_EDpOT0_( +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["args", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["args", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !ty_22class2Estd3A3Ashared_ptr22, cir.ptr , ["__retval"] {alignment = 1 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK: cir.scope { +// CHECK: %4 = cir.const(#cir.int<1> : !u64i) : !u64i +// CHECK: %5 = cir.call @_Znwm(%4) : (!u64i) -> !cir.ptr +// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr +// CHECK: %7 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %8 = cir.load %7 : cir.ptr , !s32i +// CHECK: %9 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %10 = cir.load %9 : cir.ptr , !s32i +// CHECK: cir.call @_ZN1SC1Eii(%6, %8, %10) : (!cir.ptr, !s32i, !s32i) -> () +// CHECK: cir.call @_ZNSt10shared_ptrI1SEC1EPS0_(%2, %6) : (!cir.ptr, !cir.ptr) -> () +// CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/Inputs/std-cxx.h b/clang/test/CIR/Inputs/std-cxx.h index ca37aa0cd798..b86fdf160db4 100644 --- a/clang/test/CIR/Inputs/std-cxx.h +++ b/clang/test/CIR/Inputs/std-cxx.h @@ -1258,4 +1258,55 @@ template class packaged_task { // TODO: Add some actual implementation. }; +#if __has_feature(cxx_decltype) +typedef decltype(nullptr) nullptr_t; + +template +class shared_ptr +{ +public: + constexpr shared_ptr(nullptr_t); + explicit shared_ptr(_Tp* __p); + + shared_ptr(shared_ptr&& __r) { } + + ~shared_ptr(); + + // shared_ptr& operator=(shared_ptr&& __r); + shared_ptr<_Tp>& operator=(const shared_ptr& __r) noexcept + { + return *this; + } + + template + shared_ptr<_Tp>& operator=(const shared_ptr<_Yp>& __r) noexcept + { + return *this; + } + + shared_ptr<_Tp>& operator=(shared_ptr&& __r) noexcept + { + return *this; + } + + template + shared_ptr<_Tp>& operator=(shared_ptr<_Yp>&& __r) + { + return *this; + } +}; + +template +inline +constexpr +shared_ptr<_Tp>::shared_ptr(nullptr_t) { +} + +#endif // __has_feature(cxx_decltype) + +template + shared_ptr make_shared(Args &&...args) { + return shared_ptr(new T(static_cast(args)...)); + } + } // namespace std From 88e23a8d4068f7913213a272bbff617a4d657f90 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 30 May 2023 18:46:04 -0700 Subject: [PATCH 0979/2301] [CIR][CIRGen][NFC] It's already implemented --- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 7b25122e2b36..f90bb2433c4e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -103,8 +103,6 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( // Compute the object pointer. bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier; - assert(!CanUseVirtualCall && "NYI"); - const CXXMethodDecl *DevirtualizedMethod = nullptr; if (CanUseVirtualCall && MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) { From 43964df4991ae19ad3dec7ea677d0c748eb82053 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Wed, 31 May 2023 14:36:22 -0700 Subject: [PATCH 0980/2301] [CIR] Use source path for the CIR module name and propagate it to LLVM IR. Summary: Previously the CIR module doesn't have a name and a fake name "LLVMDialectModule" is used for the resuliting LLVM module. I'm changing it to use the source path, which matches the default behavior. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 9 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 +- clang/test/CIR/CodeGen/basic.c | 2 +- clang/test/CIR/CodeGen/dlti.c | 2 +- clang/test/CIR/CodeGen/sourcelocation.cpp | 130 +++++++++--------- 5 files changed, 81 insertions(+), 66 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 6008896e4c38..c67301243fc2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -156,6 +156,13 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, } theModule->setAttr("cir.sob", mlir::cir::SignedOverflowBehaviorAttr::get(&context, sob)); + // Set the module name to be the name of the main file. + auto MainFileID = astctx.getSourceManager().getMainFileID(); + const FileEntry &MainFile = + *astctx.getSourceManager().getFileEntryForID(MainFileID); + auto Path = MainFile.tryGetRealPathName(); + if (!Path.empty()) + theModule.setSymName(Path); } CIRGenModule::~CIRGenModule() {} @@ -2300,4 +2307,4 @@ void CIRGenModule::ErrorUnsupported(const Decl *D, const char *Type) { "cannot compile this %0 yet"); std::string Msg = Type; getDiags().Report(astCtx.getFullLoc(D->getLocation()), DiagID) << Msg; -} \ No newline at end of file +} diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3d660367a04d..fe87bf58dbfc 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1110,7 +1110,9 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, mlir::registerBuiltinDialectTranslation(*mlirCtx); mlir::registerLLVMDialectTranslation(*mlirCtx); - auto llvmModule = mlir::translateModuleToLLVMIR(theModule, llvmCtx); + auto ModuleName = theModule.getName(); + auto llvmModule = mlir::translateModuleToLLVMIR( + theModule, llvmCtx, ModuleName ? *ModuleName : "CIRToLLVMModule"); if (!llvmModule) report_fatal_error("Lowering from LLVMIR dialect to llvm IR failed!"); diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 3f810109f20a..941563e9cc73 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -8,7 +8,7 @@ int foo(int i) { return i; } -// CHECK: module attributes { +// CHECK: module @"{{.*}}basic.c" attributes { // CHECK-NEXT: cir.func @foo(%arg0: !s32i loc({{.*}})) -> !s32i { // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/dlti.c b/clang/test/CIR/CodeGen/dlti.c index f56687f7bcf8..2267b992f42e 100644 --- a/clang/test/CIR/CodeGen/dlti.c +++ b/clang/test/CIR/CodeGen/dlti.c @@ -3,7 +3,7 @@ void foo() {} -// CHECK: module attributes { +// CHECK: module @"{{.*}}dlti.c" attributes { // CHECK-DAG: cir.sob = #cir.signed_overflow_behavior, // CHECK-DAG: dlti.dl_spec = // CHECK-DAG: #dlti.dl_spec< diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index d83105d3c859..9a9ce21895d9 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM int s0(int a, int b) { int x = a + b; @@ -10,64 +12,68 @@ int s0(int a, int b) { return x; } -// CHECK: #loc3 = loc("{{.*}}sourcelocation.cpp":4:8) -// CHECK: #loc4 = loc("{{.*}}sourcelocation.cpp":4:12) -// CHECK: #loc5 = loc("{{.*}}sourcelocation.cpp":4:15) -// CHECK: #loc6 = loc("{{.*}}sourcelocation.cpp":4:19) -// CHECK: #loc21 = loc(fused[#loc3, #loc4]) -// CHECK: #loc22 = loc(fused[#loc5, #loc6]) -// CHECK: module attributes {cir.sob = #cir.signed_overflow_behavior -// CHECK: cir.func @_Z2s0ii(%arg0: !s32i loc(fused[#loc3, #loc4]), %arg1: !s32i loc(fused[#loc5, #loc6])) -> !s32i { -// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc21) -// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc22) -// CHECK: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) -// CHECK: %3 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} loc(#loc23) -// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr loc(#loc9) -// CHECK: cir.store %arg1, %1 : !s32i, cir.ptr loc(#loc9) -// CHECK: %4 = cir.load %0 : cir.ptr , !s32i loc(#loc10) -// CHECK: %5 = cir.load %1 : cir.ptr , !s32i loc(#loc8) -// CHECK: %6 = cir.binop(add, %4, %5) : !s32i loc(#loc24) -// CHECK: cir.store %6, %3 : !s32i, cir.ptr loc(#loc23) -// CHECK: cir.scope { -// CHECK: %9 = cir.load %3 : cir.ptr , !s32i loc(#loc13) -// CHECK: %10 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc14) -// CHECK: %11 = cir.cmp(gt, %9, %10) : !s32i, !cir.bool loc(#loc26) -// CHECK: cir.if %11 { -// CHECK: %12 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc16) -// CHECK: cir.store %12, %3 : !s32i, cir.ptr loc(#loc28) -// CHECK: } else { -// CHECK: %12 = cir.const(#cir.int<1> : !s32i) : !s32i loc(#loc12) -// CHECK: cir.store %12, %3 : !s32i, cir.ptr loc(#loc29) -// CHECK: } loc(#loc27) -// CHECK: } loc(#loc25) -// CHECK: %7 = cir.load %3 : cir.ptr , !s32i loc(#loc18) -// CHECK: cir.store %7, %2 : !s32i, cir.ptr loc(#loc30) -// CHECK: %8 = cir.load %2 : cir.ptr , !s32i loc(#loc30) -// CHECK: cir.return %8 : !s32i loc(#loc30) -// CHECK: } loc(#loc20) -// CHECK: } loc(#loc) -// CHECK: #loc = loc(unknown) -// CHECK: #loc1 = loc("{{.*}}sourcelocation.cpp":4:1) -// CHECK: #loc2 = loc("{{.*}}sourcelocation.cpp":11:1) -// CHECK: #loc7 = loc("{{.*}}sourcelocation.cpp":5:3) -// CHECK: #loc8 = loc("{{.*}}sourcelocation.cpp":5:15) -// CHECK: #loc9 = loc("{{.*}}sourcelocation.cpp":4:22) -// CHECK: #loc10 = loc("{{.*}}sourcelocation.cpp":5:11) -// CHECK: #loc11 = loc("{{.*}}sourcelocation.cpp":6:3) -// CHECK: #loc12 = loc("{{.*}}sourcelocation.cpp":9:9) -// CHECK: #loc13 = loc("{{.*}}sourcelocation.cpp":6:7) -// CHECK: #loc14 = loc("{{.*}}sourcelocation.cpp":6:11) -// CHECK: #loc15 = loc("{{.*}}sourcelocation.cpp":7:5) -// CHECK: #loc16 = loc("{{.*}}sourcelocation.cpp":7:9) -// CHECK: #loc17 = loc("{{.*}}sourcelocation.cpp":9:5) -// CHECK: #loc18 = loc("{{.*}}sourcelocation.cpp":10:10) -// CHECK: #loc19 = loc("{{.*}}sourcelocation.cpp":10:3) -// CHECK: #loc20 = loc(fused[#loc1, #loc2]) -// CHECK: #loc23 = loc(fused[#loc7, #loc8]) -// CHECK: #loc24 = loc(fused[#loc10, #loc8]) -// CHECK: #loc25 = loc(fused[#loc11, #loc12]) -// CHECK: #loc26 = loc(fused[#loc13, #loc14]) -// CHECK: #loc27 = loc(fused[#loc15, #loc16, #loc17, #loc12]) -// CHECK: #loc28 = loc(fused[#loc15, #loc16]) -// CHECK: #loc29 = loc(fused[#loc17, #loc12]) -// CHECK: #loc30 = loc(fused[#loc19, #loc18]) +// CIR: #loc3 = loc("{{.*}}sourcelocation.cpp":6:8) +// CIR: #loc4 = loc("{{.*}}sourcelocation.cpp":6:12) +// CIR: #loc5 = loc("{{.*}}sourcelocation.cpp":6:15) +// CIR: #loc6 = loc("{{.*}}sourcelocation.cpp":6:19) +// CIR: #loc21 = loc(fused[#loc3, #loc4]) +// CIR: #loc22 = loc(fused[#loc5, #loc6]) +// CIR: module @"{{.*}}sourcelocation.cpp" attributes {cir.sob = #cir.signed_overflow_behavior +// CIR: cir.func @_Z2s0ii(%arg0: !s32i loc(fused[#loc3, #loc4]), %arg1: !s32i loc(fused[#loc5, #loc6])) -> !s32i { +// CIR: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc21) +// CIR: %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc22) +// CIR: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) +// CIR: %3 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} loc(#loc23) +// CIR: cir.store %arg0, %0 : !s32i, cir.ptr loc(#loc9) +// CIR: cir.store %arg1, %1 : !s32i, cir.ptr loc(#loc9) +// CIR: %4 = cir.load %0 : cir.ptr , !s32i loc(#loc10) +// CIR: %5 = cir.load %1 : cir.ptr , !s32i loc(#loc8) +// CIR: %6 = cir.binop(add, %4, %5) : !s32i loc(#loc24) +// CIR: cir.store %6, %3 : !s32i, cir.ptr loc(#loc23) +// CIR: cir.scope { +// CIR: %9 = cir.load %3 : cir.ptr , !s32i loc(#loc13) +// CIR: %10 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc14) +// CIR: %11 = cir.cmp(gt, %9, %10) : !s32i, !cir.bool loc(#loc26) +// CIR: cir.if %11 { +// CIR: %12 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc16) +// CIR: cir.store %12, %3 : !s32i, cir.ptr loc(#loc28) +// CIR: } else { +// CIR: %12 = cir.const(#cir.int<1> : !s32i) : !s32i loc(#loc12) +// CIR: cir.store %12, %3 : !s32i, cir.ptr loc(#loc29) +// CIR: } loc(#loc27) +// CIR: } loc(#loc25) +// CIR: %7 = cir.load %3 : cir.ptr , !s32i loc(#loc18) +// CIR: cir.store %7, %2 : !s32i, cir.ptr loc(#loc30) +// CIR: %8 = cir.load %2 : cir.ptr , !s32i loc(#loc30) +// CIR: cir.return %8 : !s32i loc(#loc30) +// CIR: } loc(#loc20) +// CIR: } loc(#loc) +// CIR: #loc = loc(unknown) +// CIR: #loc1 = loc("{{.*}}sourcelocation.cpp":6:1) +// CIR: #loc2 = loc("{{.*}}sourcelocation.cpp":13:1) +// CIR: #loc7 = loc("{{.*}}sourcelocation.cpp":7:3) +// CIR: #loc8 = loc("{{.*}}sourcelocation.cpp":7:15) +// CIR: #loc9 = loc("{{.*}}sourcelocation.cpp":6:22) +// CIR: #loc10 = loc("{{.*}}sourcelocation.cpp":7:11) +// CIR: #loc11 = loc("{{.*}}sourcelocation.cpp":8:3) +// CIR: #loc12 = loc("{{.*}}sourcelocation.cpp":11:9) +// CIR: #loc13 = loc("{{.*}}sourcelocation.cpp":8:7) +// CIR: #loc14 = loc("{{.*}}sourcelocation.cpp":8:11) +// CIR: #loc15 = loc("{{.*}}sourcelocation.cpp":9:5) +// CIR: #loc16 = loc("{{.*}}sourcelocation.cpp":9:9) +// CIR: #loc17 = loc("{{.*}}sourcelocation.cpp":11:5) +// CIR: #loc18 = loc("{{.*}}sourcelocation.cpp":12:10) +// CIR: #loc19 = loc("{{.*}}sourcelocation.cpp":12:3) +// CIR: #loc20 = loc(fused[#loc1, #loc2]) +// CIR: #loc23 = loc(fused[#loc7, #loc8]) +// CIR: #loc24 = loc(fused[#loc10, #loc8]) +// CIR: #loc25 = loc(fused[#loc11, #loc12]) +// CIR: #loc26 = loc(fused[#loc13, #loc14]) +// CIR: #loc27 = loc(fused[#loc15, #loc16, #loc17, #loc12]) +// CIR: #loc28 = loc(fused[#loc15, #loc16]) +// CIR: #loc29 = loc(fused[#loc17, #loc12]) +// CIR: #loc30 = loc(fused[#loc19, #loc18]) + + +// LLVM: ModuleID = '{{.*}}sourcelocation.cpp' +// LLVM: source_filename = "{{.*}}sourcelocation.cpp" From bda5b1c05525acac61f237eab93580792ac0f4b8 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Thu, 1 Jun 2023 13:18:59 -0700 Subject: [PATCH 0981/2301] [CIR] Enabling debug loc to be progatated into LLVM. Summary: As titled. To achieve that I have to update the loc attribute of a LLVMFuncOp from using a fused range loc to a single file/loc, since the MLIR builtin support doesn't seem to allow fused locs when converted to LLVM (https://github.com/llvm/clangir/blob/main/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp#L27) --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 10 ++++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 18 ++++++++++++++++-- clang/test/CIR/CodeGen/sourcelocation.cpp | 13 ++++++++++++- 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c67301243fc2..a626e6898f3b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -156,13 +156,19 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, } theModule->setAttr("cir.sob", mlir::cir::SignedOverflowBehaviorAttr::get(&context, sob)); - // Set the module name to be the name of the main file. + // Set the module name to be the name of the main file. TranslationUnitDecl + // often contains invalid source locations and isn't a reliable source for the + // module location. auto MainFileID = astctx.getSourceManager().getMainFileID(); const FileEntry &MainFile = *astctx.getSourceManager().getFileEntryForID(MainFileID); auto Path = MainFile.tryGetRealPathName(); - if (!Path.empty()) + if (!Path.empty()) { theModule.setSymName(Path); + theModule->setLoc(mlir::FileLineColLoc::get(&context, Path, + /*line=*/0, + /*col=*/0)); + } } CIRGenModule::~CIRGenModule() {} diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index fe87bf58dbfc..259f1b9dcbfa 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -24,6 +24,7 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" +#include "mlir/Dialect/LLVMIR/Transforms/Passes.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" #include "mlir/IR/Attributes.h" @@ -522,8 +523,16 @@ class CIRFuncLowering : public mlir::OpConversionPattern { resultType ? resultType : mlir::LLVM::LLVMVoidType::get(getContext()), signatureConversion.getConvertedTypes(), /*isVarArg=*/fnType.isVarArg()); - auto fn = rewriter.create(op.getLoc(), op.getName(), - llvmFnTy); + // LLVMFuncOp expects a single FileLine Location instead of a fused + // location. + auto Loc = op.getLoc(); + if (Loc.isa()) { + auto FusedLoc = Loc.cast(); + Loc = FusedLoc.getLocations()[0]; + } + assert(Loc.isa() && "expected single location here"); + auto fn = + rewriter.create(Loc, op.getName(), llvmFnTy); rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, @@ -1098,6 +1107,11 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, pm.addPass(createConvertCIRToLLVMPass()); + // This is necessary to have line tables emitted and basic + // debugger working. In the future we will add proper debug information + // emission directly from our frontend. + pm.addPass(mlir::LLVM::createDIScopeForLLVMFuncOpPass()); + auto result = !mlir::failed(pm.run(theModule)); if (!result) report_fatal_error( diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 9a9ce21895d9..c907083c6a87 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -48,7 +48,7 @@ int s0(int a, int b) { // CIR: cir.return %8 : !s32i loc(#loc30) // CIR: } loc(#loc20) // CIR: } loc(#loc) -// CIR: #loc = loc(unknown) +// CIR: #loc = loc("{{.*}}sourcelocation.cpp":0:0) // CIR: #loc1 = loc("{{.*}}sourcelocation.cpp":6:1) // CIR: #loc2 = loc("{{.*}}sourcelocation.cpp":13:1) // CIR: #loc7 = loc("{{.*}}sourcelocation.cpp":7:3) @@ -77,3 +77,14 @@ int s0(int a, int b) { // LLVM: ModuleID = '{{.*}}sourcelocation.cpp' // LLVM: source_filename = "{{.*}}sourcelocation.cpp" +// LLVM: define i32 @_Z2s0ii(i32 %0, i32 %1) !dbg ![[#SP:]] +// LLVM: %3 = alloca i32, i64 1, align 4, !dbg ![[#LOC1:]] + + +// LLVM: !llvm.module.flags = !{!0} +// LLVM: !llvm.dbg.cu = !{!1} +// LLVM: !0 = !{i32 2, !"Debug Info Version", i32 3} +// LLVM: !1 = distinct !DICompileUnit(language: DW_LANG_C, file: !2, producer: "MLIR", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly) +// LLVM: !2 = !DIFile(filename: "sourcelocation.cpp", directory: "{{.*}}clang/test/CIR/CodeGen") +// LLVM: ![[#SP]] = distinct !DISubprogram(name: "_Z2s0ii", linkageName: "_Z2s0ii", scope: !2, file: !2, line: 6, type: !4, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !1) +// LLVM: ![[#LOC1]] = !DILocation(line: 6, scope: ![[#SP]]) From b9960bae340a4d4c4cc910fdc478b02e4e2bd84c Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 6 Jun 2023 19:58:47 -0300 Subject: [PATCH 0982/2301] [CIR][CIRGen] Implement support for printf builtin Adds a function to emit the printf builtin declaration, alongside some updates to generate calls for said builtin. ghstack-source-id: fe5dad92dca9f33f1c089b5aaac6c5a3b39993f6 Pull Request resolved: https://github.com/llvm/clangir/pull/100 --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 18 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 13 +++++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 7 +++++++ clang/test/CIR/Executables/hello.c | 2 +- 4 files changed, 38 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index e3ddbe75df81..df7cc6bce6df 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -27,12 +27,20 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Value.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "llvm/Support/ErrorHandling.h" using namespace cir; using namespace clang; using namespace mlir::cir; using namespace llvm; +static RValue buildLibraryCall(CIRGenFunction &CGF, const FunctionDecl *FD, + const CallExpr *E, + mlir::Operation *calleeValue) { + auto callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(FD)); + return CGF.buildCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); +} + RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue) { @@ -334,6 +342,13 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("NYI"); break; + case Builtin::BIprintf: + if (getTarget().getTriple().isNVPTX() || + getTarget().getTriple().isAMDGCN()) { + llvm_unreachable("NYI"); + } + break; + // C++ std:: builtins. case Builtin::BImove: case Builtin::BImove_if_noexcept: @@ -387,7 +402,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // If this is a predefined lib function (e.g. malloc), emit the call // using exactly the normal call path. if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) - llvm_unreachable("NYI"); + return buildLibraryCall(*this, FD, E, + buildScalarExpr(E->getCallee()).getDefiningOp()); // Check that a call to a target specific builtin has the correct target // features. diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index e3f9801f5222..39190f3fe304 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -19,12 +19,15 @@ #include "clang/AST/DeclCXX.h" #include "clang/AST/GlobalDecl.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/SymbolTable.h" #include "mlir/IR/Types.h" using namespace cir; @@ -503,6 +506,16 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, } else if (auto loadOp = dyn_cast(CalleePtr)) { theCall = builder.create(callLoc, loadOp->getResult(0), CIRFuncTy, CIRCallArgs); + } else if (auto getGlobalOp = dyn_cast(CalleePtr)) { + // FIXME(cir): This peephole optimization to avoids indirect calls for + // builtins. This should be fixed in the builting declaration instead by not + // emitting an unecessary get_global in the first place. + auto *globalOp = mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), + getGlobalOp.getName()); + assert(getGlobalOp && "undefined global function"); + auto callee = llvm::dyn_cast(globalOp); + assert(callee && "operation is not a function"); + theCall = builder.create(callLoc, callee, CIRCallArgs); } else { llvm_unreachable("expected call variant to be handled"); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 259f1b9dcbfa..dc2b587b52d3 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -614,6 +614,13 @@ class CIRGetGlobalOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::GetGlobalOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { + // FIXME(cir): Premature DCE to avoid lowering stuff we're not using. CIRGen + // should mitigate this and not emit the get_global. + if (op->getUses().empty()) { + rewriter.eraseOp(op); + return mlir::success(); + } + auto type = getTypeConverter()->convertType(op.getType()); auto symbol = op.getName(); rewriter.replaceOpWithNewOp(op, type, symbol); diff --git a/clang/test/CIR/Executables/hello.c b/clang/test/CIR/Executables/hello.c index f81080b35733..ea3415c5df85 100644 --- a/clang/test/CIR/Executables/hello.c +++ b/clang/test/CIR/Executables/hello.c @@ -2,7 +2,7 @@ // RUN: %t | FileCheck %s // REQUIRES: system-linux // REQUIRES: target-linux -int printf(const char *format); +int printf(const char *restrict, ...); int main (void) { printf ("Hello, world!\n"); From a1e45dde9d02427527ebc7a6f69c04e92fe37ba6 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 6 Jun 2023 19:58:47 -0300 Subject: [PATCH 0983/2301] [CIR][Lowering] Lower floating point extension cast ghstack-source-id: 93cc0d72fee8c164536683bef63c4500aacb44a9 Pull Request resolved: https://github.com/llvm/clangir/pull/101 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 17 +++++++++++++++++ clang/test/CIR/CodeGen/cast.cpp | 2 ++ 2 files changed, 19 insertions(+) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index dc2b587b52d3..9e1aae5937cd 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -241,6 +241,23 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } break; } + case mlir::cir::CastKind::floating: { + auto dstTy = castOp.getResult().getType().cast(); + auto srcTy = castOp.getSrc().getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + + if (auto fpSrcTy = srcTy.dyn_cast()) { + if (fpSrcTy.getWidth() > dstTy.getWidth()) + rewriter.replaceOpWithNewOp(castOp, dstTy, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, dstTy, + llvmSrcVal); + return mlir::success(); + } + + return castOp.emitError() << "NYI cast from " << srcTy << " to " << dstTy; + } default: llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 41c27fec5702..35060b58cefd 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -39,6 +39,8 @@ int cStyleCasts_0(unsigned x1, int x2, float x3) { int f = (int)x3; // CHECK: %{{[0-9]+}} = cir.cast(float_to_int, %{{[0-9]+}} : f32), !s32i + double g = (double)x3; // FP extension + // %{{[0-9]+}} = cir.cast(floating, %{{[0-9]+}} : f32), f64 return 0; } From 72545d638a6067ec129f90bd35228083f88d49cb Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 6 Jun 2023 20:21:38 -0300 Subject: [PATCH 0984/2301] [CIR][CIRGen] Implement ptr-to-int and int-to-ptr casts Add required codegen and ABI calls to implement ptr-to-int and int-to-ptr casts in CIR. Also adds lowering for these casts. ghstack-source-id: 16afccc16d1e77284c1cd66f8cc0f2c5b1fa76aa Pull Request resolved: https://github.com/llvm/clangir/pull/102 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 +++- clang/lib/CIR/CodeGen/CIRDataLayout.h | 14 ++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 27 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 23 +++++++++++++--- clang/lib/CIR/CodeGen/CIRGenModule.h | 6 +++++ .../CodeGen/UnimplementedFeatureGuarding.h | 3 +++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 14 ++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 24 +++++++++++++++++ clang/test/CIR/CodeGen/cast.cpp | 7 ++++- clang/test/CIR/Lowering/cast.cir | 9 +++++++ 10 files changed, 126 insertions(+), 6 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 85a3250bb809..23ef20a1c609 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -48,12 +48,15 @@ def CK_BitCast : I32EnumAttrCase<"bitcast", 4>; def CK_FloatingCast : I32EnumAttrCase<"floating", 5>; def CK_PtrToBoolean : I32EnumAttrCase<"ptr_to_bool", 6>; def CK_FloatToIntegral : I32EnumAttrCase<"float_to_int", 7>; +def CK_IntegralToPointer : I32EnumAttrCase<"int_to_ptr", 8>; +def CK_PointerToIntegral : I32EnumAttrCase<"ptr_to_int", 9>; def CastKind : I32EnumAttr< "CastKind", "cast kind", [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast, - CK_BitCast, CK_FloatingCast, CK_PtrToBoolean, CK_FloatToIntegral]> { + CK_BitCast, CK_FloatingCast, CK_PtrToBoolean, CK_FloatToIntegral, + CK_IntegralToPointer, CK_PointerToIntegral]> { let cppNamespace = "::mlir::cir"; } diff --git a/clang/lib/CIR/CodeGen/CIRDataLayout.h b/clang/lib/CIR/CodeGen/CIRDataLayout.h index b4c1c83995b6..92490b86daf3 100644 --- a/clang/lib/CIR/CodeGen/CIRDataLayout.h +++ b/clang/lib/CIR/CodeGen/CIRDataLayout.h @@ -14,6 +14,7 @@ #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/IR/BuiltinOps.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" namespace cir { @@ -60,6 +61,19 @@ class CIRDataLayout { // Round up to the next alignment boundary. return llvm::alignTo(getTypeStoreSize(Ty), layout.getTypeABIAlignment(Ty)); } + + unsigned getPointerTypeSizeInBits(mlir::Type Ty) const { + assert(Ty.isa() && + "This should only be called with a pointer type"); + return layout.getTypeSizeInBits(Ty); + } + + mlir::Type getIntPtrType(mlir::Type Ty) const { + assert(Ty.isa() && "Expected pointer type"); + auto IntTy = mlir::cir::IntType::get(Ty.getContext(), + getPointerTypeSizeInBits(Ty), false); + return IntTy; + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 37e17bb5f254..c34550da7313 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -15,6 +15,7 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/IR/FPEnv.h" @@ -346,6 +347,32 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::UnaryOpKind::Not, value); } + //===--------------------------------------------------------------------===// + // Cast/Conversion Operators + //===--------------------------------------------------------------------===// + + mlir::Value createCast(mlir::cir::CastKind kind, mlir::Value src, + mlir::Type newTy) { + if (newTy == src.getType()) + return src; + return create(src.getLoc(), newTy, kind, src); + } + + mlir::Value createIntCast(mlir::Value src, mlir::Type newTy) { + return create(src.getLoc(), newTy, + mlir::cir::CastKind::integral, src); + } + + mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy) { + return create(src.getLoc(), newTy, + mlir::cir::CastKind::int_to_ptr, src); + } + + mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy) { + return create(src.getLoc(), newTy, + mlir::cir::CastKind::ptr_to_int, src); + } + mlir::Value createZExtOrBitCast(mlir::Location loc, mlir::Value src, mlir::Type newTy) { if (src.getType() == newTy) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index c367f70c7516..be59b672db8d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1102,10 +1102,25 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); return Visit(const_cast(E)); - case CK_IntegralToPointer: - llvm_unreachable("NYI"); - case CK_PointerToIntegral: - llvm_unreachable("NYI"); + case CK_IntegralToPointer: { + auto DestCIRTy = ConvertType(DestTy); + mlir::Value Src = Visit(const_cast(E)); + + // Properly resize by casting to an int of the same size as the pointer. + auto MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestCIRTy); + auto MiddleVal = Builder.createIntCast(Src, MiddleTy); + + if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) + llvm_unreachable("NYI"); + + return Builder.createIntToPtr(MiddleVal, DestCIRTy); + } + case CK_PointerToIntegral: { + assert(!DestTy->isBooleanType() && "bool should use PointerToBool"); + if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) + llvm_unreachable("NYI"); + return Builder.createPtrToInt(Visit(E), ConvertType(DestTy)); + } case CK_ToVoid: { CGF.buildIgnoredExpr(E); return nullptr; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 0ca53b2329b6..f61be4fda3e4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H #define LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H +#include "CIRDataLayout.h" #include "CIRGenBuilder.h" #include "CIRGenTypeCache.h" #include "CIRGenTypes.h" @@ -125,6 +126,11 @@ class CIRGenModule : public CIRGenTypeCache { CIRGenTypes &getTypes() { return genTypes; } const clang::LangOptions &getLangOpts() const { return langOpts; } CIRGenFunction *getCurrCIRGenFun() const { return CurCGF; } + const CIRDataLayout getDataLayout() const { + // FIXME(cir): instead of creating a CIRDataLayout every time, set it as an + // attribute for the CIRModule class. + return {theModule}; + } CIRGenCXXABI &getCXXABI() const { return *ABI; } diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 4bf53ca7203a..72de44fd4bd4 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -31,6 +31,9 @@ struct UnimplementedFeature { static bool addressSpaceInGlobalVar() { return false; } static bool getASTAllocaAddressSpace() { return false; } + // Clang codegen options + static bool strictVTablePointers() { return false; } + // Unhandled global/linkage information. static bool unnamedAddr() { return false; } static bool setComdat() { return false; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 587ad2635aaa..d3178fe71b94 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -301,6 +301,20 @@ LogicalResult CastOp::verify() { return emitOpError() << "requires !IntegerType for result"; return success(); } + case cir::CastKind::int_to_ptr: { + if (!srcType.dyn_cast()) + return emitOpError() << "requires integer for source"; + if (!resType.dyn_cast()) + return emitOpError() << "requires pointer for result"; + return success(); + } + case cir::CastKind::ptr_to_int: { + if (!srcType.dyn_cast()) + return emitOpError() << "requires pointer for source"; + if (!resType.dyn_cast()) + return emitOpError() << "requires integer for result"; + return success(); + } } llvm_unreachable("Unknown CastOp kind?"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 9e1aae5937cd..9911eceede61 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -258,6 +258,22 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return castOp.emitError() << "NYI cast from " << srcTy << " to " << dstTy; } + case mlir::cir::CastKind::int_to_ptr: { + auto dstTy = castOp.getType().cast(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case mlir::cir::CastKind::ptr_to_int: { + auto dstTy = castOp.getType().cast(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } default: llvm_unreachable("NYI"); } @@ -499,6 +515,14 @@ class CIRConstantLowering op.getValue().cast().getValue()); } else if (op.getType().isa()) { attr = op.getValue(); + } else if (op.getType().isa()) { + // Optimize with dedicated LLVM op for null pointers. + if (op.getValue().isa()) { + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType())); + return mlir::success(); + } + attr = op.getValue(); } else return op.emitError("unsupported constant type"); diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 35060b58cefd..9e3fd9f2e1c6 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -17,7 +17,7 @@ unsigned char cxxstaticcast_0(unsigned int x) { // CHECK: } -int cStyleCasts_0(unsigned x1, int x2, float x3) { +int cStyleCasts_0(unsigned x1, int x2, float x3, short x4) { // CHECK: cir.func @_{{.*}}cStyleCasts_0{{.*}} char a = (char)x1; // truncate @@ -42,6 +42,11 @@ int cStyleCasts_0(unsigned x1, int x2, float x3) { double g = (double)x3; // FP extension // %{{[0-9]+}} = cir.cast(floating, %{{[0-9]+}} : f32), f64 + long l = (long)(void*)x4; // Must sign extend before casting to pointer + // CHECK: %[[TMP:[0-9]+]] = cir.cast(integral, %{{[0-9]+}} : !s16i), !u64i + // CHECK: %[[TMP2:[0-9]+]] = cir.cast(int_to_ptr, %[[TMP]] : !u64i), !cir.ptr + // CHECK: %{{[0-9]+}} = cir.cast(ptr_to_int, %[[TMP2]] : !cir.ptr), !s64i + return 0; } diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 71cb8593610f..7a71bde34a9c 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -6,6 +6,7 @@ !s8i = !cir.int !u32i = !cir.int !u8i = !cir.int +!u64i = !cir.int module { cir.func @foo(%arg0: !s32i) -> !s32i { @@ -31,6 +32,7 @@ module { // MLIR: llvm.func @cStyleCasts(%arg0: i32, %arg1: i32) -> i32 { %0 = cir.alloca !u32i, cir.ptr , ["x1", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, cir.ptr , ["x2", init] {alignment = 4 : i64} + %20 = cir.alloca !s16i, cir.ptr , ["x4", init] {alignment = 2 : i64} %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} %3 = cir.alloca !s8i, cir.ptr , ["a", init] {alignment = 1 : i64} %4 = cir.alloca !s16i, cir.ptr , ["b", init] {alignment = 2 : i64} @@ -59,6 +61,13 @@ module { %17 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr cir.store %17, %8 : !cir.ptr, cir.ptr > + %21 = cir.load %20 : cir.ptr , !s16i + %22 = cir.cast(integral, %21 : !s16i), !u64i + // MLIR: %[[TMP:[0-9]+]] = llvm.sext %{{[0-9]+}} : i16 to i64 + %23 = cir.cast(int_to_ptr, %22 : !u64i), !cir.ptr + // MLIR: %[[TMP2:[0-9]+]] = llvm.inttoptr %[[TMP]] : i64 to !llvm.ptr + %24 = cir.cast(ptr_to_int, %23 : !cir.ptr), !s32i + // MLIR: %{{[0-9]+}} = llvm.ptrtoint %[[TMP2]] : !llvm.ptr to i32 %18 = cir.const(#cir.int<0> : !s32i) : !s32i cir.store %18, %2 : !s32i, cir.ptr %19 = cir.load %2 : cir.ptr , !s32i From 79397826c980c7808d3f281440b5da3d081b9d03 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 6 Jun 2023 20:21:38 -0300 Subject: [PATCH 0985/2301] [CIR][Lowering] Patch If without Else lowering Lowering if operations without an else block would crash during. Some steps in the IfOp lowering were updated to only be applied if the else block is not empty. ghstack-source-id: c37f341d87537d64d73b3d7ce72f134b09c39b66 Pull Request resolved: https://github.com/llvm/clangir/pull/104 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 32 ++++++++++++------- clang/test/CIR/Lowering/if.cir | 28 +++++++++++++--- 2 files changed, 44 insertions(+), 16 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 9911eceede61..b57bdc4bafac 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -290,8 +290,8 @@ class CIRIfLowering : public mlir::OpConversionPattern { matchAndRewrite(mlir::cir::IfOp ifOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::OpBuilder::InsertionGuard guard(rewriter); - auto loc = ifOp.getLoc(); + auto emptyElse = ifOp.getElseRegion().empty(); auto *currentBlock = rewriter.getInsertionBlock(); auto *remainingOpsBlock = @@ -318,10 +318,16 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.setInsertionPointToEnd(continueBlock); - // Inline then region - auto *elseBeforeBody = &ifOp.getElseRegion().front(); - auto *elseAfterBody = &ifOp.getElseRegion().back(); - rewriter.inlineRegionBefore(ifOp.getElseRegion(), thenAfterBody); + // Has else region: inline it. + mlir::Block *elseBeforeBody = nullptr; + mlir::Block *elseAfterBody = nullptr; + if (!emptyElse) { + elseBeforeBody = &ifOp.getElseRegion().front(); + elseAfterBody = &ifOp.getElseRegion().back(); + rewriter.inlineRegionBefore(ifOp.getElseRegion(), thenAfterBody); + } else { + elseBeforeBody = elseAfterBody = continueBlock; + } rewriter.setInsertionPointToEnd(currentBlock); auto trunc = rewriter.create(loc, rewriter.getI1Type(), @@ -329,13 +335,15 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.create(loc, trunc.getRes(), thenBeforeBody, elseBeforeBody); - rewriter.setInsertionPointToEnd(elseAfterBody); - if (auto elseYieldOp = - dyn_cast(elseAfterBody->getTerminator())) { - rewriter.replaceOpWithNewOp( - elseYieldOp, elseYieldOp.getArgs(), continueBlock); - } else if (!dyn_cast(elseAfterBody->getTerminator())) { - llvm_unreachable("what are we terminating with?"); + if (!emptyElse) { + rewriter.setInsertionPointToEnd(elseAfterBody); + if (auto elseYieldOp = + dyn_cast(elseAfterBody->getTerminator())) { + rewriter.replaceOpWithNewOp( + elseYieldOp, elseYieldOp.getArgs(), continueBlock); + } else if (!dyn_cast(elseAfterBody->getTerminator())) { + llvm_unreachable("what are we terminating with?"); + } } rewriter.replaceOp(ifOp, continueBlock->getArguments()); diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir index c7ed945d0892..f70460347a5e 100644 --- a/clang/test/CIR/Lowering/if.cir +++ b/clang/test/CIR/Lowering/if.cir @@ -14,10 +14,8 @@ module { } cir.return %arg0 : !s32i } -} -// MLIR: module { -// MLIR-NEXT: llvm.func @foo(%arg0: i32) -> i32 { +// MLIR: llvm.func @foo(%arg0: i32) -> i32 { // MLIR-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %1 = llvm.icmp "ne" %arg0, %0 : i32 // MLIR-NEXT: %2 = llvm.zext %1 : i1 to i8 @@ -32,7 +30,6 @@ module { // MLIR-NEXT: ^bb3: // no predecessors // MLIR-NEXT: llvm.return %arg0 : i32 // MLIR-NEXT: } -// MLIR-NEXT: } // LLVM: define i32 @foo(i32 %0) { // LLVM-NEXT: %2 = icmp ne i32 %0, 0 @@ -49,3 +46,26 @@ module { // LLVM-NEXT: 7: // LLVM-NEXT: ret i32 %0 // LLVM-NEXT: } + + cir.func @onlyIf(%arg0: !s32i) -> !s32i { + %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + cir.if %4 { + %5 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.return %5 : !s32i + } + cir.return %arg0 : !s32i + } + + // MLIR: llvm.func @onlyIf(%arg0: i32) -> i32 { + // MLIR-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 + // MLIR-NEXT: %1 = llvm.icmp "ne" %arg0, %0 : i32 + // MLIR-NEXT: %2 = llvm.zext %1 : i1 to i8 + // MLIR-NEXT: %3 = llvm.trunc %2 : i8 to i1 + // MLIR-NEXT: llvm.cond_br %3, ^bb1, ^bb2 + // MLIR-NEXT: ^bb1: // pred: ^bb0 + // MLIR-NEXT: %4 = llvm.mlir.constant(1 : i32) : i32 + // MLIR-NEXT: llvm.return %4 : i32 + // MLIR-NEXT: ^bb2: // pred: ^bb0 + // MLIR-NEXT: llvm.return %arg0 : i32 + // MLIR-NEXT: } +} From ed622d80edc895c665e1e918dc2d70ed88d24d34 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 6 Jun 2023 20:21:39 -0300 Subject: [PATCH 0986/2301] [CIR][Lowering][NFC] Refactor CmpOp lowering Improves code reusability and readability when lowering CIR's CmpOp to some LLVM comparison op. ghstack-source-id: 4975d9ff70d838069b4ce01f125be6178df18fb2 Pull Request resolved: https://github.com/llvm/clangir/pull/105 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 228 ++++++------------ 1 file changed, 69 insertions(+), 159 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b57bdc4bafac..fdde92d5547c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -22,6 +22,7 @@ #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/Dialect/LLVMIR/Transforms/Passes.h" @@ -34,6 +35,7 @@ #include "mlir/IR/IRMapping.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" +#include "mlir/Support/LogicalResult.h" #include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" @@ -341,7 +343,8 @@ class CIRIfLowering : public mlir::OpConversionPattern { dyn_cast(elseAfterBody->getTerminator())) { rewriter.replaceOpWithNewOp( elseYieldOp, elseYieldOp.getArgs(), continueBlock); - } else if (!dyn_cast(elseAfterBody->getTerminator())) { + } else if (!dyn_cast( + elseAfterBody->getTerminator())) { llvm_unreachable("what are we terminating with?"); } } @@ -899,170 +902,77 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; + mlir::LLVM::ICmpPredicate + convertToICmpPredicate(mlir::cir::CmpOpKind kind) const { + using CIR = mlir::cir::CmpOpKind; + using LLVMICmp = mlir::LLVM::ICmpPredicate; + + switch (kind) { + case CIR::eq: + return LLVMICmp::eq; + case CIR::ne: + return LLVMICmp::ne; + case CIR::lt: + return LLVMICmp::ult; + case CIR::le: + return LLVMICmp::ule; + case CIR::gt: + return LLVMICmp::ugt; + case CIR::ge: + return LLVMICmp::uge; + } + llvm_unreachable("Unknown CmpOpKind"); + } + + mlir::LLVM::FCmpPredicate + convertToFCmpPredicate(mlir::cir::CmpOpKind kind) const { + using CIR = mlir::cir::CmpOpKind; + using LLVMFCmp = mlir::LLVM::FCmpPredicate; + + switch (kind) { + case CIR::eq: + return LLVMFCmp::ueq; + case CIR::ne: + return LLVMFCmp::une; + case CIR::lt: + return LLVMFCmp::ult; + case CIR::le: + return LLVMFCmp::ule; + case CIR::gt: + return LLVMFCmp::ugt; + case CIR::ge: + return LLVMFCmp::uge; + } + llvm_unreachable("Unknown CmpOpKind"); + } + mlir::LogicalResult matchAndRewrite(mlir::cir::CmpOp cmpOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto type = adaptor.getLhs().getType(); - auto i1Type = - mlir::IntegerType::get(getContext(), 1, mlir::IntegerType::Signless); - auto destType = getTypeConverter()->convertType(cmpOp.getType()); - - switch (adaptor.getKind()) { - case mlir::cir::CmpOpKind::gt: { - if (type.isa()) { - mlir::LLVM::ICmpPredicate cmpIType; - if (!type.isSignlessInteger()) - llvm_unreachable("integer type not supported in CIR yet"); - cmpIType = mlir::LLVM::ICmpPredicate::ugt; - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), - adaptor.getLhs(), adaptor.getRhs()); - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else if (type.isa()) { - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::FCmpPredicateAttr::get(getContext(), - mlir::LLVM::FCmpPredicate::ugt), - adaptor.getLhs(), adaptor.getRhs(), - // TODO(CIR): These fastmath flags need to not be defaulted. - mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } - case mlir::cir::CmpOpKind::ge: { - if (type.isa()) { - mlir::LLVM::ICmpPredicate cmpIType; - if (!type.isSignlessInteger()) - llvm_unreachable("integer type not supported in CIR yet"); - cmpIType = mlir::LLVM::ICmpPredicate::uge; - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), - adaptor.getLhs(), adaptor.getRhs()); - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else if (type.isa()) { - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::FCmpPredicateAttr::get(getContext(), - mlir::LLVM::FCmpPredicate::uge), - adaptor.getLhs(), adaptor.getRhs(), - mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } - case mlir::cir::CmpOpKind::lt: { - if (type.isa()) { - mlir::LLVM::ICmpPredicate cmpIType; - if (!type.isSignlessInteger()) - llvm_unreachable("integer type not supported in CIR yet"); - cmpIType = mlir::LLVM::ICmpPredicate::ult; - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), - adaptor.getLhs(), adaptor.getRhs()); - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else if (type.isa()) { - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::FCmpPredicateAttr::get(getContext(), - mlir::LLVM::FCmpPredicate::ult), - adaptor.getLhs(), adaptor.getRhs(), - mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } - case mlir::cir::CmpOpKind::le: { - if (type.isa()) { - mlir::LLVM::ICmpPredicate cmpIType; - if (!type.isSignlessInteger()) - llvm_unreachable("integer type not supported in CIR yet"); - cmpIType = mlir::LLVM::ICmpPredicate::ule; - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::ICmpPredicateAttr::get(getContext(), cmpIType), - adaptor.getLhs(), adaptor.getRhs()); - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else if (type.isa()) { - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::FCmpPredicateAttr::get(getContext(), - mlir::LLVM::FCmpPredicate::ule), - adaptor.getLhs(), adaptor.getRhs(), - mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } - case mlir::cir::CmpOpKind::eq: { - if (type.isa()) { - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::ICmpPredicateAttr::get(getContext(), - mlir::LLVM::ICmpPredicate::eq), - adaptor.getLhs(), adaptor.getRhs()); - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else if (type.isa()) { - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::FCmpPredicateAttr::get(getContext(), - mlir::LLVM::FCmpPredicate::ueq), - adaptor.getLhs(), adaptor.getRhs(), - mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } - case mlir::cir::CmpOpKind::ne: { - if (type.isa()) { - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::ICmpPredicateAttr::get(getContext(), - mlir::LLVM::ICmpPredicate::ne), - adaptor.getLhs(), adaptor.getRhs()); - - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else if (type.isa()) { - auto cmp = rewriter.create( - cmpOp.getLoc(), i1Type, - mlir::LLVM::FCmpPredicateAttr::get(getContext(), - mlir::LLVM::FCmpPredicate::une), - adaptor.getLhs(), adaptor.getRhs(), - mlir::LLVM::FastmathFlagsAttr::get(cmpOp.getContext(), {})); - rewriter.replaceOpWithNewOp(cmpOp, destType, - cmp.getRes()); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } + mlir::Value llResult; + + // Lower to LLVM comparison op. + if (auto intTy = type.dyn_cast()) { + auto kind = convertToICmpPredicate(cmpOp.getKind()); + llResult = rewriter.create( + cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + } else if (type.isa()) { + auto kind = convertToFCmpPredicate(cmpOp.getKind()); + llResult = rewriter.create( + cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + } else { + return cmpOp.emitError() << "unsupported type for CmpOp: " << type; } - return mlir::LogicalResult::success(); + // LLVM comparison ops return i1, but cir::CmpOp returns the same type as + // the LHS value. Since this return value can be used later, we need to + // restore the type with the extension below. + auto llResultTy = getTypeConverter()->convertType(cmpOp.getType()); + rewriter.replaceOpWithNewOp(cmpOp, llResultTy, + llResult); + + return mlir::success(); } }; From 832f08af2122f5660a44b3887cb291b2723b9c04 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 6 Jun 2023 20:21:39 -0300 Subject: [PATCH 0987/2301] [CIR][Lowering] Add support for signed comparisons Updates CIR's CmpOp lowering to use CIR's custom cir::IntType, allowing it to handle signed comparisons. ghstack-source-id: e4709315db1a39853fe978ef9771ab727ad9f9d7 Pull Request resolved: https://github.com/llvm/clangir/pull/106 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 20 +++---- clang/test/CIR/Lowering/cmp.cir | 58 +++++++++---------- clang/test/CIR/Lowering/dot.cir | 4 +- clang/test/CIR/Lowering/for.cir | 4 +- 4 files changed, 43 insertions(+), 43 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index fdde92d5547c..c543c65c597a 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -219,7 +219,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { mlir::cir::IntAttr::get(castOp.getSrc().getType(), 0)); rewriter.replaceOpWithNewOp( castOp, mlir::cir::BoolType::get(getContext()), - mlir::cir::CmpOpKind::ne, src, zero); + mlir::cir::CmpOpKind::ne, castOp.getSrc(), zero); break; } case mlir::cir::CastKind::integral: { @@ -902,8 +902,8 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; - mlir::LLVM::ICmpPredicate - convertToICmpPredicate(mlir::cir::CmpOpKind kind) const { + mlir::LLVM::ICmpPredicate convertToICmpPredicate(mlir::cir::CmpOpKind kind, + bool isSigned) const { using CIR = mlir::cir::CmpOpKind; using LLVMICmp = mlir::LLVM::ICmpPredicate; @@ -913,13 +913,13 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { case CIR::ne: return LLVMICmp::ne; case CIR::lt: - return LLVMICmp::ult; + return (isSigned ? LLVMICmp::slt : LLVMICmp::ult); case CIR::le: - return LLVMICmp::ule; + return (isSigned ? LLVMICmp::sle : LLVMICmp::ule); case CIR::gt: - return LLVMICmp::ugt; + return (isSigned ? LLVMICmp::sgt : LLVMICmp::ugt); case CIR::ge: - return LLVMICmp::uge; + return (isSigned ? LLVMICmp::sge : LLVMICmp::uge); } llvm_unreachable("Unknown CmpOpKind"); } @@ -949,12 +949,12 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::CmpOp cmpOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto type = adaptor.getLhs().getType(); + auto type = cmpOp.getLhs().getType(); mlir::Value llResult; // Lower to LLVM comparison op. - if (auto intTy = type.dyn_cast()) { - auto kind = convertToICmpPredicate(cmpOp.getKind()); + if (auto intTy = type.dyn_cast()) { + auto kind = convertToICmpPredicate(cmpOp.getKind(), intTy.isSigned()); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { diff --git a/clang/test/CIR/Lowering/cmp.cir b/clang/test/CIR/Lowering/cmp.cir index f6ad3bec44d1..06a24cf56308 100644 --- a/clang/test/CIR/Lowering/cmp.cir +++ b/clang/test/CIR/Lowering/cmp.cir @@ -1,31 +1,31 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM - +!s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["a"] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["b"] {alignment = 4 : i64} + %0 = cir.alloca !s32i, cir.ptr , ["a"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b"] {alignment = 4 : i64} %2 = cir.alloca f32, cir.ptr , ["c"] {alignment = 4 : i64} %3 = cir.alloca f32, cir.ptr , ["d"] {alignment = 4 : i64} %4 = cir.alloca !cir.bool, cir.ptr , ["e"] {alignment = 1 : i64} - %5 = cir.load %0 : cir.ptr , i32 - %6 = cir.load %1 : cir.ptr , i32 - %7 = cir.cmp(gt, %5, %6) : i32, !cir.bool - %8 = cir.load %0 : cir.ptr , i32 - %9 = cir.load %1 : cir.ptr , i32 - %10 = cir.cmp(eq, %8, %9) : i32, !cir.bool - %11 = cir.load %0 : cir.ptr , i32 - %12 = cir.load %1 : cir.ptr , i32 - %13 = cir.cmp(lt, %11, %12) : i32, !cir.bool - %14 = cir.load %0 : cir.ptr , i32 - %15 = cir.load %1 : cir.ptr , i32 - %16 = cir.cmp(ge, %14, %15) : i32, !cir.bool - %17 = cir.load %0 : cir.ptr , i32 - %18 = cir.load %1 : cir.ptr , i32 - %19 = cir.cmp(ne, %17, %18) : i32, !cir.bool - %20 = cir.load %0 : cir.ptr , i32 - %21 = cir.load %1 : cir.ptr , i32 - %22 = cir.cmp(le, %20, %21) : i32, !cir.bool + %5 = cir.load %0 : cir.ptr , !s32i + %6 = cir.load %1 : cir.ptr , !s32i + %7 = cir.cmp(gt, %5, %6) : !s32i, !cir.bool + %8 = cir.load %0 : cir.ptr , !s32i + %9 = cir.load %1 : cir.ptr , !s32i + %10 = cir.cmp(eq, %8, %9) : !s32i, !cir.bool + %11 = cir.load %0 : cir.ptr , !s32i + %12 = cir.load %1 : cir.ptr , !s32i + %13 = cir.cmp(lt, %11, %12) : !s32i, !cir.bool + %14 = cir.load %0 : cir.ptr , !s32i + %15 = cir.load %1 : cir.ptr , !s32i + %16 = cir.cmp(ge, %14, %15) : !s32i, !cir.bool + %17 = cir.load %0 : cir.ptr , !s32i + %18 = cir.load %1 : cir.ptr , !s32i + %19 = cir.cmp(ne, %17, %18) : !s32i, !cir.bool + %20 = cir.load %0 : cir.ptr , !s32i + %21 = cir.load %1 : cir.ptr , !s32i + %22 = cir.cmp(le, %20, %21) : !s32i, !cir.bool %23 = cir.load %2 : cir.ptr , f32 %24 = cir.load %3 : cir.ptr , f32 %25 = cir.cmp(gt, %23, %24) : f32, !cir.bool @@ -48,12 +48,12 @@ module { } } -// MLIR: = llvm.icmp "ugt" +// MLIR: = llvm.icmp "sgt" // MLIR: = llvm.icmp "eq" -// MLIR: = llvm.icmp "ult" -// MLIR: = llvm.icmp "uge" +// MLIR: = llvm.icmp "slt" +// MLIR: = llvm.icmp "sge" // MLIR: = llvm.icmp "ne" -// MLIR: = llvm.icmp "ule" +// MLIR: = llvm.icmp "sle" // MLIR: = llvm.fcmp "ugt" // MLIR: = llvm.fcmp "ueq" // MLIR: = llvm.fcmp "ult" @@ -61,12 +61,12 @@ module { // MLIR: = llvm.fcmp "une" // MLIR: = llvm.fcmp "ule" -// LLVM: icmp ugt i32 +// LLVM: icmp sgt i32 // LLVM: icmp eq i32 -// LLVM: icmp ult i32 -// LLVM: icmp uge i32 +// LLVM: icmp slt i32 +// LLVM: icmp sge i32 // LLVM: icmp ne i32 -// LLVM: icmp ule i32 +// LLVM: icmp sle i32 // LLVM: fcmp ugt float // LLVM: fcmp ueq float // LLVM: fcmp ult float diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 2260d009efa9..22407d61e73e 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -83,7 +83,7 @@ module { // MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb6 // MLIR-NEXT: %14 = llvm.load %12 : !llvm.ptr // MLIR-NEXT: %15 = llvm.load %5 : !llvm.ptr -// MLIR-NEXT: %16 = llvm.icmp "ult" %14, %15 : i32 +// MLIR-NEXT: %16 = llvm.icmp "slt" %14, %15 : i32 // MLIR-NEXT: %17 = llvm.zext %16 : i1 to i32 // MLIR-NEXT: %18 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %19 = llvm.icmp "ne" %17, %18 : i32 @@ -144,7 +144,7 @@ module { // LLVM-NEXT: 11: ; preds = %24, %9 // LLVM-NEXT: %12 = load i32, ptr %10, align 4 // LLVM-NEXT: %13 = load i32, ptr %6, align 4 -// LLVM-NEXT: %14 = icmp ult i32 %12, %13 +// LLVM-NEXT: %14 = icmp slt i32 %12, %13 // LLVM-NEXT: %15 = zext i1 %14 to i32 // LLVM-NEXT: %16 = icmp ne i32 %15, 0 // LLVM-NEXT: %17 = zext i1 %16 to i8 diff --git a/clang/test/CIR/Lowering/for.cir b/clang/test/CIR/Lowering/for.cir index efec3d58de9f..659217659789 100644 --- a/clang/test/CIR/Lowering/for.cir +++ b/clang/test/CIR/Lowering/for.cir @@ -39,7 +39,7 @@ module { // MLIR-NEXT: ^bb1: // 2 preds: ^bb0, ^bb5 // MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr // MLIR-NEXT: %4 = llvm.mlir.constant(10 : i32) : i32 -// MLIR-NEXT: %5 = llvm.icmp "ult" %3, %4 : i32 +// MLIR-NEXT: %5 = llvm.icmp "slt" %3, %4 : i32 // MLIR-NEXT: %6 = llvm.zext %5 : i1 to i32 // MLIR-NEXT: %7 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %8 = llvm.icmp "ne" %6, %7 : i32 @@ -70,7 +70,7 @@ module { // LLVM-EMPTY: // LLVM-NEXT: 2: // LLVM-NEXT: %3 = load i32, ptr %1, align 4 -// LLVM-NEXT: %4 = icmp ult i32 %3, 10 +// LLVM-NEXT: %4 = icmp slt i32 %3, 10 // LLVM-NEXT: %5 = zext i1 %4 to i32 // LLVM-NEXT: %6 = icmp ne i32 %5, 0 // LLVM-NEXT: %7 = zext i1 %6 to i8 From 74b12984905d86100322c695c4aeb63c81ffcf77 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 6 Jun 2023 20:46:21 -0300 Subject: [PATCH 0988/2301] [CIR][NFC] Add stdarg builtin CIR Ops Adds custom CIR operations to represent builtin calls from the stdarg header. These include `va_start`, `va_end`, `va_copy`, and `va_arg`. ghstack-source-id: b861b23ba2c414e63ed83635dbc18bb2bf85a451 Pull Request resolved: https://github.com/llvm/clangir/pull/93 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 32 ++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 23ef20a1c609..0b971b7d3e6c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1649,4 +1649,36 @@ def AwaitOp : CIR_Op<"await", let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// Variadic Operations +//===----------------------------------------------------------------------===// + +def VAStartOp : CIR_Op<"va.start">, Arguments<(ins CIR_PointerType:$arg_list)> { + let summary = "Starts a variable argument list"; + let assemblyFormat = "$arg_list attr-dict `:` type(operands)"; + let hasVerifier = 0; +} + +def VAEndOp : CIR_Op<"va.end">, Arguments<(ins CIR_PointerType:$arg_list)> { + let summary = "Ends a variable argument list"; + let assemblyFormat = "$arg_list attr-dict `:` type(operands)"; + let hasVerifier = 0; +} + +def VACopyOp : CIR_Op<"va.copy">, + Arguments<(ins CIR_PointerType:$dst_list, + CIR_PointerType:$src_list)> { + let summary = "Copies a variable argument list"; + let assemblyFormat = "$src_list `to` $dst_list attr-dict `:` type(operands)"; + let hasVerifier = 0; +} + +def VAArgOp : CIR_Op<"va.arg">, + Results<(outs AnyType:$result)>, + Arguments<(ins CIR_PointerType:$arg_list)> { + let summary = "Fetches next variadic element as a given type"; + let assemblyFormat = "$arg_list attr-dict `:` functional-type(operands, $result)"; + let hasVerifier = 0; +} + #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS From d360acc4aa874fe6ff091a69029eb485502479fb Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 6 Jun 2023 20:46:21 -0300 Subject: [PATCH 0989/2301] [CIR][CIRGen] Add codegen for stdarg builtins Implement the necessary codegen to emit the va_start, va_end, va_arg, and va_copy builtins as custom CIR instructions. ghstack-source-id: 4a73b84cb680ad81dc9f3924cd35bd50c70d4a65 Pull Request resolved: https://github.com/llvm/clangir/pull/94 --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 21 +++++++++++++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 12 +++++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 +++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 14 ++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 6 ++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 22 +++++++++++++ .../CodeGen/UnimplementedFeatureGuarding.h | 3 ++ clang/test/CIR/CodeGen/variadics.c | 31 +++++++++++++++++-- 8 files changed, 110 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index df7cc6bce6df..3a764853b578 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -19,6 +19,7 @@ // TODO(cir): we shouldn't need this but we currently reuse intrinsic IDs for // convenience. +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/IR/Intrinsics.h" #include "clang/AST/GlobalDecl.h" @@ -349,6 +350,26 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, } break; + // C stdarg builtins. + case Builtin::BI__builtin_stdarg_start: + case Builtin::BI__builtin_va_start: + case Builtin::BI__va_start: { + auto vaList = buildScalarExpr(E->getArg(0)); + builder.create(vaList.getLoc(), vaList); + return {}; + } + case Builtin::BI__builtin_va_end: { + auto vaList = buildVAListRef(E->getArg(0)).getPointer(); + builder.create(vaList.getLoc(), vaList); + return {}; + } + case Builtin::BI__builtin_va_copy: { + auto dstPtr = buildVAListRef(E->getArg(0)).getPointer(); + auto srcPtr = buildVAListRef(E->getArg(1)).getPointer(); + builder.create(dstPtr.getLoc(), dstPtr, srcPtr); + return {}; + } + // C++ std:: builtins. case Builtin::BImove: case Builtin::BImove_if_noexcept: diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 39190f3fe304..77206c5d73dc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1144,3 +1144,15 @@ RValue CallArg::getRValue(CIRGenFunction &CGF, mlir::Location loc) const { IsUsed = true; return RValue::getAggregate(Copy.getAddress()); } + +/* VarArg handling */ + +// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. We +// need to decide how to handle va_arg target-specific codegen. +mlir::Value CIRGenFunction::buildVAArg(VAArgExpr *VE, Address &VAListAddr) { + assert(!VE->isMicrosoftABI() && "NYI"); + auto loc = CGM.getLoc(VE->getExprLoc()); + auto type = ConvertType(VE->getType()); + auto vaList = buildVAListRef(VE->getSubExpr()).getPointer(); + return builder.create(loc, type, vaList); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 1d85df54a497..80a54ac84cbd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -661,6 +661,10 @@ Address CIRGenFunction::buildPointerWithAlignment(const Expr *E, case CK_LValueToRValue: break; + // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo. + case CK_ArrayToPointerDecay: + return buildArrayToPointerDecay(CE->getSubExpr()); + case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { // TODO: Support accesses to members of base classes in TBAA. For now, we diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index be59b672db8d..e8cac8e93ec3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -551,7 +551,7 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitAbstractConditionalOperator(const AbstractConditionalOperator *E); mlir::Value VisitChooseExpr(ChooseExpr *E) { llvm_unreachable("NYI"); } - mlir::Value VisitVAArgExpr(VAArgExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitVAArgExpr(VAArgExpr *VE); mlir::Value VisitObjCStringLiteral(const ObjCStringLiteral *E) { llvm_unreachable("NYI"); } @@ -1960,3 +1960,15 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { return Builder.createZExtOrBitCast(ResOp.getLoc(), ResOp, ResTy); } + +mlir::Value ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { + QualType Ty = VE->getType(); + + if (Ty->isVariablyModifiedType()) + assert(!UnimplementedFeature::variablyModifiedTypeEmission() && "NYI"); + + Address ArgValue = Address::invalid(); + mlir::Value Val = CGF.buildVAArg(VE, ArgValue); + + return Val; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 95f0d71798c3..142dafaf0fa9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1249,3 +1249,9 @@ void CIRGenFunction::buildDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init) { assert(!UnimplementedFeature::generateDebugInfo()); } + +Address CIRGenFunction::buildVAListRef(const Expr* E) { + if (getContext().getBuiltinVaListType()->isArrayType()) + return buildPointerWithAlignment(E); + return buildLValue(E).getAddress(); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9b71e9cfe501..40345b529097 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -803,6 +803,28 @@ class CIRGenFunction : public CIRGenTypeCache { RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation Loc); + // Build a "reference" to a va_list; this is either the address or the value + // of the expression, depending on how va_list is defined. + Address buildVAListRef(const Expr *E); + + /// Emits a call to an LLVM variable-argument intrinsic, either + /// \c llvm.va_start or \c llvm.va_end. + /// \param ArgValue A reference to the \c va_list as emitted by either + /// \c EmitVAListRef or \c EmitMSVAListRef. + /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise, + /// calls \c llvm.va_end. + mlir::cir::CallOp buildVAStartEnd(mlir::Value ArgValue, bool IsStart); + + /// Generate code to get an argument from the passed in pointer + /// and update it accordingly. + /// \param VE The \c VAArgExpr for which to generate code. + /// \param VAListAddr Receives a reference to the \c va_list as emitted by + /// either \c EmitVAListRef or \c EmitMSVAListRef. + /// \returns SSA value with the argument. + // FIXME: We should be able to get rid of this method and use the va_arg + // instruction in LLVM instead once it works well enough. + mlir::Value buildVAArg(VAArgExpr *VE, Address &VAListAddr); + /// Given an expression that represents a value lvalue, this method emits the /// address of the lvalue, then loads the result as an rvalue, returning the /// rvalue. diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 72de44fd4bd4..2263c12b09bc 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -65,6 +65,9 @@ struct UnimplementedFeature { // Coroutines static bool unhandledException() { return false; } + // Missing Emissions + static bool variablyModifiedTypeEmission() { return false; } + // Clang early struct optimizations static bool shouldUseBZeroPlusStoresToInitialize() { return false; } static bool shouldUseMemSetToInitialize() { return false; } diff --git a/clang/test/CIR/CodeGen/variadics.c b/clang/test/CIR/CodeGen/variadics.c index 16c721090ab2..5d4fd447b201 100644 --- a/clang/test/CIR/CodeGen/variadics.c +++ b/clang/test/CIR/CodeGen/variadics.c @@ -3,8 +3,35 @@ // RUN: %clang_cc1 -x c++ -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -int average(int count, ...); -// CHECK: cir.func private @{{.*}}average{{.*}}(!s32i, ...) -> !s32i +typedef __builtin_va_list va_list; + +#define va_start(ap, param) __builtin_va_start(ap, param) +#define va_end(ap) __builtin_va_end(ap) +#define va_arg(ap, type) __builtin_va_arg(ap, type) +#define va_copy(dst, src) __builtin_va_copy(dst, src) + +// CHECK: [[VALISTTYPE:!.+va_list_.+]] = !cir.struct<"struct.__va_list_tag" + +int average(int count, ...) { +// CHECK: cir.func @{{.*}}average{{.*}}(%arg0: !s32i loc({{.+}}), ...) -> !s32i + va_list args, args_copy; + va_start(args, count); + // CHECK: cir.va.start %{{[0-9]+}} : !cir.ptr<[[VALISTTYPE]]> + + va_copy(args_copy, args); + // CHECK: cir.va.copy %{{[0-9]+}} to %{{[0-9]+}} : !cir.ptr<[[VALISTTYPE]]>, !cir.ptr<[[VALISTTYPE]]> + + int sum = 0; + for(int i = 0; i < count; i++) { + sum += va_arg(args, int); + // CHECK: %{{[0-9]+}} = cir.va.arg %{{[0-9]+}} : (!cir.ptr<[[VALISTTYPE]]>) -> !s32i + } + + va_end(args); + // CHECK: cir.va.end %{{[0-9]+}} : !cir.ptr<[[VALISTTYPE]]> + + return count > 0 ? sum / count : 0; +} int test(void) { return average(5, 1, 2, 3, 4, 5); From 1412327442cf637d677c3a8a9f1b16edbbd08b4d Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 6 Jun 2023 20:46:22 -0300 Subject: [PATCH 0990/2301] [CIR][Lowering] Partially lower variadic builtins Implement lowering steps for va_start, va_end, and va_copy. The va_arg was not implemented because it requires ABI-specific lowering. ghstack-source-id: 1ab2923027143aa28bb7361b884a5c8ee04cfbc9 Pull Request resolved: https://github.com/llvm/clangir/pull/95 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 74 ++++++++++++++++++- clang/test/CIR/Lowering/variadics.cir | 40 ++++++++++ 2 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/variadics.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index c543c65c597a..b24717eeb9d3 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -49,6 +49,7 @@ #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" #include using namespace cir; @@ -544,6 +545,66 @@ class CIRConstantLowering } }; +class CIRVAStartLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VAStartOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto i8PtrTy = mlir::LLVM::LLVMPointerType::get(getContext()); + auto vaList = rewriter.create( + op.getLoc(), i8PtrTy, adaptor.getOperands().front()); + rewriter.replaceOpWithNewOp(op, vaList); + return mlir::success(); + } +}; + +class CIRVAEndLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VAEndOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto i8PtrTy = mlir::LLVM::LLVMPointerType::get(getContext()); + auto vaList = rewriter.create( + op.getLoc(), i8PtrTy, adaptor.getOperands().front()); + rewriter.replaceOpWithNewOp(op, vaList); + return mlir::success(); + } +}; + +class CIRVACopyLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VACopyOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto i8PtrTy = mlir::LLVM::LLVMPointerType::get(getContext()); + auto dstList = rewriter.create( + op.getLoc(), i8PtrTy, adaptor.getOperands().front()); + auto srcList = rewriter.create( + op.getLoc(), i8PtrTy, adaptor.getOperands().back()); + rewriter.replaceOpWithNewOp(op, dstList, srcList); + return mlir::success(); + } +}; + +class CIRVAArgLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VAArgOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + return op.emitError("cir.vaarg lowering is NYI"); + } +}; + class CIRFuncLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -997,7 +1058,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRBinOpLowering, CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, CIRScopeOpLowering, CIRCastOpLowering, CIRIfLowering, - CIRGlobalOpLowering, CIRGetGlobalOpLowering>( + CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, + CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering>( converter, patterns.getContext()); } @@ -1018,6 +1080,16 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { // LLVM doesn't work with signed types, so we drop the CIR signs here. return mlir::IntegerType::get(type.getContext(), type.getWidth()); }); + converter.addConversion([&](mlir::cir::StructType type) -> mlir::Type { + llvm::SmallVector llvmMembers; + for (auto ty : type.getMembers()) + llvmMembers.push_back(converter.convertType(ty)); + auto llvmStruct = mlir::LLVM::LLVMStructType::getIdentified( + type.getContext(), type.getTypeName()); + if (llvmStruct.setBody(llvmMembers, /*isPacked=*/type.getPacked()).failed()) + llvm_unreachable("Failed to set body of struct"); + return llvmStruct; + }); } } // namespace diff --git a/clang/test/CIR/Lowering/variadics.cir b/clang/test/CIR/Lowering/variadics.cir new file mode 100644 index 000000000000..db687ba228ca --- /dev/null +++ b/clang/test/CIR/Lowering/variadics.cir @@ -0,0 +1,40 @@ +// RUN: cir-tool %s -cir-to-llvm -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=MLIR + +!s32i = !cir.int +!u32i = !cir.int +!u8i = !cir.int + +!ty_22struct2E__va_list_tag22 = !cir.struct<"struct.__va_list_tag", !u32i, !u32i, !cir.ptr, !cir.ptr, #cir.recdecl.ast> + +module { + cir.func @average(%arg0: !s32i, ...) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["count", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %2 = cir.alloca !cir.array, cir.ptr >, ["args"] {alignment = 16 : i64} + %3 = cir.alloca !cir.array, cir.ptr >, ["args_copy"] {alignment = 16 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.start %4 : !cir.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr + // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr + // MLIR-NEXT: llvm.intr.vastart %{{[0-9]+}} : !llvm.ptr + %5 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr + %6 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.copy %6 to %5 : !cir.ptr, !cir.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr + // MLIR-NEXT: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr + // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr + // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr + // MLIR-NEXT: llvm.intr.vacopy %13 to %{{[0-9]+}} : !llvm.ptr, !llvm.ptr + %7 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.end %7 : !cir.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr + // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr + // MLIR-NEXT: llvm.intr.vaend %{{[0-9]+}} : !llvm.ptr + %8 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %8, %1 : !s32i, cir.ptr + %9 = cir.load %1 : cir.ptr , !s32i + cir.return %9 : !s32i + } +} From 792846f9d8bea2159b8b7d2df5fc14eb6b5815ad Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Wed, 7 Jun 2023 00:59:29 -0700 Subject: [PATCH 0991/2301] [CIR] Support MLIR command line arguments. Summary: With this change MLIR command line arguments can be passed in with -mmlir, e.g, clang -mmlir -mlir-disable-threadingd clang -mmlir -debug-only=mlircontext --- clang/include/clang/Driver/Options.td | 6 +++--- clang/include/clang/Frontend/FrontendOptions.h | 4 ++++ clang/lib/Driver/ToolChains/Clang.cpp | 5 +++++ clang/lib/FrontendTool/CMakeLists.txt | 2 ++ .../lib/FrontendTool/ExecuteCompilerInvocation.cpp | 14 +++++++++++++- clang/test/CIR/CodeGen/mlirargs.c | 10 ++++++++++ 6 files changed, 37 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/mlirargs.c diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 565d1003bf90..3912efc4020c 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -4893,9 +4893,9 @@ def mllvm : Separate<["-"], "mllvm">, def : Joined<["-"], "mllvm=">, Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>, Alias, HelpText<"Alias for -mllvm">, MetaVarName<"">; -def mmlir : Separate<["-"], "mmlir">, - Visibility<[ClangOption, CLOption, FC1Option, FlangOption]>, - HelpText<"Additional arguments to forward to MLIR's option processing">; +def mmlir : Separate<["-"], "mmlir">, Visibility<[ClangOption,CC1Option,FC1Option,FlangOption]>, + HelpText<"Additional arguments to forward to MLIR's option processing">, + MarshallingInfoStringVector>; def ffuchsia_api_level_EQ : Joined<["-"], "ffuchsia-api-level=">, Group, Visibility<[ClangOption, CC1Option]>, HelpText<"Set Fuchsia API level">, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index beb1696368aa..57cd2e863384 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -583,6 +583,10 @@ class FrontendOptions { /// should only be used for debugging and experimental features. std::vector LLVMArgs; + /// A list of arguments to forward to MLIR's option processing; this + /// should only be used for debugging and experimental features. + std::vector MLIRArgs; + /// File name of the file that will provide record layouts /// (in the format produced by -fdump-record-layouts). std::string OverrideRecordLayoutsFile; diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index eabb3374500b..c95946c06a10 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -7811,6 +7811,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, // features enabled through -Xclang -target-feature flags. SanitizeArgs.addArgs(TC, Args, CmdArgs, InputType); + for (const Arg *A : Args.filtered(options::OPT_mmlir)) { + A->claim(); + A->render(Args, CmdArgs); + } + // With -save-temps, we want to save the unoptimized bitcode output from the // CompileJobAction, use -disable-llvm-passes to get pristine IR generated // by the frontend. diff --git a/clang/lib/FrontendTool/CMakeLists.txt b/clang/lib/FrontendTool/CMakeLists.txt index cb70041b6914..7aeaba7f31b6 100644 --- a/clang/lib/FrontendTool/CMakeLists.txt +++ b/clang/lib/FrontendTool/CMakeLists.txt @@ -15,7 +15,9 @@ set(link_libs if(CLANG_ENABLE_CIR) list(APPEND link_libs clangCIRFrontendAction + MLIRIR ) + include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) endif() if(CLANG_ENABLE_ARCMT) diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 3ec147ce8447..cf8fae2a4ca8 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -33,6 +33,7 @@ #include "llvm/Support/ErrorHandling.h" #if CLANG_ENABLE_CIR +#include "mlir/IR/MLIRContext.h" #include "clang/CIRFrontendAction/CIRGenAction.h" #endif @@ -318,7 +319,18 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) { return true; } #endif - +#if CLANG_ENABLE_CIR + if (!Clang->getFrontendOpts().MLIRArgs.empty()) { + mlir::registerMLIRContextCLOptions(); + unsigned NumArgs = Clang->getFrontendOpts().MLIRArgs.size(); + auto Args = std::make_unique(NumArgs + 2); + Args[0] = "clang (MLIR option parsing)"; + for (unsigned i = 0; i != NumArgs; ++i) + Args[i + 1] = Clang->getFrontendOpts().MLIRArgs[i].c_str(); + Args[NumArgs + 1] = nullptr; + llvm::cl::ParseCommandLineOptions(NumArgs + 1, Args.get()); + } +#endif // If there were errors in processing arguments, don't do anything else. if (Clang->getDiagnostics().hasErrorOccurred()) return false; diff --git a/clang/test/CIR/CodeGen/mlirargs.c b/clang/test/CIR/CodeGen/mlirargs.c new file mode 100644 index 000000000000..7719aaf4f388 --- /dev/null +++ b/clang/test/CIR/CodeGen/mlirargs.c @@ -0,0 +1,10 @@ +// Clang returns 1 when wrong arguments are given. +// RUN: not %clang_cc1 -mmlir -mlir-disable-threadingd 2>&1 | FileCheck %s --check-prefix=WRONG +// Test that the driver can pass mlir args to cc1. +// RUN: %clang -### -mmlir -mlir-disable-threading %s 2>&1 | FileCheck %s --check-prefix=CC1 + + +// WRONG: clang (MLIR option parsing): Unknown command line argument '-mlir-disable-threadingd'. Try: 'clang (MLIR option parsing) --help' +// WRONG: clang (MLIR option parsing): Did you mean '--mlir-disable-threading'? + +// CC1: "-mmlir" "-mlir-disable-threading" From 9fee9a3fd91d88f3a60f39a62c6d6aa707862694 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 1 Jun 2023 18:57:04 -0700 Subject: [PATCH 0992/2301] [CIR] VTableAddrPointOp: support indirect point retrieval --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 20 +++++++++++-------- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 4 ++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 13 ++++++++++-- 3 files changed, 25 insertions(+), 12 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 0b971b7d3e6c..92962944dfde 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1278,7 +1278,8 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", ``` }]; - let arguments = (ins FlatSymbolRefAttr:$name, + let arguments = (ins OptionalAttr:$name, + Optional:$sym_addr, I32Attr:$vtable_index, I32Attr:$address_point_index); let results = (outs Res:$addr); @@ -1286,9 +1287,12 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. let assemblyFormat = [{ - `(` $name `,` - `vtable_index` `=` $vtable_index `,` - `address_point_index` `=` $address_point_index + `(` + ($name^)? + ($sym_addr^ `:` type($sym_addr))? + `,` + `vtable_index` `=` $vtable_index `,` + `address_point_index` `=` $address_point_index `)` `:` `cir.ptr` type($addr) attr-dict }]; @@ -1508,14 +1512,14 @@ def CallOp : CIR_Op<"call", call must match the specified function type. The callee is encoded as a symbol reference attribute named "callee". - Since `mlir::func::CallOp` requires defining symbols to be tied with a - `mlir::func::FuncOp`, a custom `cir.call` is needed to interop with - `cir.func`. For now this is basically a simplified `mlir::func::CallOp`. - Example: ```mlir + // Direct call %2 = cir.call @my_add(%0, %1) : (f32, f32) -> f32 + ... + // Indirect call + %20 = cir.call %18(%17) ``` }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 4ad54c77f6e3..8284af16af15 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -593,8 +593,8 @@ CIRGenItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, return builder.create( CGM.getLoc(VTableClass->getSourceRange()), vtablePtrTy, - vtable.getSymName(), AddressPoint.VTableIndex, - AddressPoint.AddressPointIndex); + mlir::FlatSymbolRefAttr::get(vtable.getSymNameAttr()), mlir::Value{}, + AddressPoint.VTableIndex, AddressPoint.AddressPointIndex); } mlir::Value CIRGenItaniumCXXABI::getVTableAddressPointInStructor( diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d3178fe71b94..97b71ab1b7a7 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1268,23 +1268,32 @@ GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { LogicalResult VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + // vtable ptr is not coming from a symbol. + if (!getName()) + return success(); + auto name = *getName(); + // Verify that the result type underlying pointer type matches the type of the // referenced cir.global or cir.func op. auto op = dyn_cast_or_null( symbolTable.lookupNearestSymbolFrom(*this, getNameAttr())); if (!op) return emitOpError("'") - << getName() << "' does not reference a valid cir.global"; + << name << "' does not reference a valid cir.global"; auto init = op.getInitialValue(); if (!init) return success(); if (!isa(*init)) return emitOpError("Expected #cir.vtable in initializer for global '") - << getName() << "'"; + << name << "'"; return success(); } LogicalResult cir::VTableAddrPointOp::verify() { + // The operation uses either a symbol or a value to operate, but not both + if (getName() && getSymAddr()) + return emitOpError("should use either a symbol or value, but not both"); + auto resultType = getAddr().getType(); auto fnTy = mlir::cir::FuncType::get( getContext(), {}, From ad89aadc9d41375ff8e8c7bbd67348df5cd17177 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 9 Jun 2023 16:51:49 -0700 Subject: [PATCH 0993/2301] [CIR][CIRGen] More prepare work for vcall - Helpers for creating aligned loads (tho we don't support them yet) - Implement itanium ABI specific getVirtualFunctionPointer - Try prevent some early optimizations in the codegen path. - Get the virtual function pointer in order to build virtual calls. - Add extra ABI info in the type cache. - Add cleanup logic. We current assert in the relevant path we are trying to build, since we still can't build the complete testcase, coming in a sooner commit. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 20 +++++ clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 9 ++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 34 ++++---- clang/lib/CIR/CodeGen/CIRGenCall.h | 32 +++++++- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 82 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 9 ++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 14 +++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 36 ++++++++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 56 +++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 10 +++ clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 20 ++--- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 36 ++++++++ .../CodeGen/UnimplementedFeatureGuarding.h | 8 +- 16 files changed, 338 insertions(+), 38 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index c34550da7313..8d4742f38e36 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -331,6 +331,26 @@ class CIRGenBuilderTy : public mlir::OpBuilder { addr.getPointer()); } + mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, + mlir::Value ptr, + [[maybe_unused]] llvm::MaybeAlign align, + [[maybe_unused]] bool isVolatile) { + assert(!UnimplementedFeature::volatileLoadOrStore()); + assert(!UnimplementedFeature::alignedLoad()); + return create(loc, ty, ptr); + } + + mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, + mlir::Value ptr, llvm::MaybeAlign align) { + return createAlignedLoad(loc, ty, ptr, align, /*isVolatile=*/false); + } + + mlir::Value + createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value addr, + clang::CharUnits align = clang::CharUnits::One()) { + return createAlignedLoad(loc, ty, addr, align.getAsAlign()); + } + mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst) { return create(loc, val, dst.getPointer()); diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index e1a7d8f9bfea..e6d4012a110c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -160,11 +160,20 @@ class CIRGenCXXABI { bool Delegating, Address This, QualType ThisTy) = 0; + virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, + FunctionArgList &Args) const = 0; + /// Get the address of the vtable for the given record decl which should be /// used for the vptr at the given offset in RD. virtual mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) = 0; + /// Build a virtual function pointer in the ABI-specific way. + virtual CIRGenCallee getVirtualFunctionPointer(CIRGenFunction &CGF, + GlobalDecl GD, Address This, + mlir::Type Ty, + SourceLocation Loc) = 0; + /// Checks if ABI requires extra virtual offset for vtable field. virtual bool isVirtualOffsetNeededForVTableField(CIRGenFunction &CGF, diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 77206c5d73dc..75dc761b167d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -154,17 +154,14 @@ void ClangToCIRArgMapping::construct(const ASTContext &Context, llvm_unreachable("NYI"); case ABIArgInfo::Extend: case ABIArgInfo::Direct: { - auto STy = AI.getCoerceToType().dyn_cast(); - // FIXME: handle sseregparm someday... - if (AI.isDirect() && AI.getCanBeFlattened() && STy) { - // TODO(cir): we might not want to break it this early, revisit this - // once we have a better ABI lowering story. - CIRArgs.NumberOfArgs = STy.getMembers().size(); - assert(CIRArgs.NumberOfArgs == 1 && - "Initial CIR codegen is not the place to split arguments"); - } else { - CIRArgs.NumberOfArgs = 1; - } + // Postpone splitting structs into elements since this makes it way + // more complicated for analysis to obtain information on the original + // arguments. + // + // TODO(cir): a LLVM lowering prepare pass should break this down into + // the appropriated pieces. + assert(!UnimplementedFeature::constructABIArgDirectExtend()); + CIRArgs.NumberOfArgs = 1; break; } } @@ -276,7 +273,12 @@ mlir::cir::FuncType CIRGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { } CIRGenCallee CIRGenCallee::prepareConcreteCallee(CIRGenFunction &CGF) const { - assert(!isVirtual() && "Virtual NYI"); + if (isVirtual()) { + const CallExpr *CE = getVirtualCallExpr(); + return CGF.CGM.getCXXABI().getVirtualFunctionPointer( + CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), + CE ? CE->getBeginLoc() : SourceLocation()); + } return *this; } @@ -444,12 +446,14 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, Src = builder.createElementBitCast(argLoc, Src, STy); } - assert(NumCIRArgs == STy.getMembers().size()); + // assert(NumCIRArgs == STy.getMembers().size()); // In LLVMGen: Still only pass the struct without any gaps but mark it - // as such somehow. In CIRGen: Emit a load from the "whole" struct, + // as such somehow. + // + // In CIRGen: Emit a load from the "whole" struct, // which shall be broken later by some lowering step into multiple // loads. - assert(STy.getMembers().size() == 1 && "dont break up arguments here!"); + assert(NumCIRArgs == 1 && "dont break up arguments here!"); CIRCallArgs[FirstCIRArg] = builder.createLoad(argLoc, Src); } else { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 538eb26811f2..c5d4157eb636 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -146,7 +146,8 @@ class CIRGenCallee { } CIRGenCalleeInfo getAbstractInfo() const { - assert(!isVirtual() && "Virtual NYI"); + if (isVirtual()) + return VirtualInfo.MD; assert(isOrdinary()); return AbstractInfo; } @@ -154,6 +155,35 @@ class CIRGenCallee { bool isVirtual() const { return KindOrFunctionPointer == SpecialKind::Virtual; } + + static CIRGenCallee forVirtual(const clang::CallExpr *CE, + clang::GlobalDecl MD, Address Addr, + mlir::cir::FuncType FTy) { + CIRGenCallee result(SpecialKind::Virtual); + result.VirtualInfo.CE = CE; + result.VirtualInfo.MD = MD; + result.VirtualInfo.Addr = Addr; + result.VirtualInfo.FTy = FTy; + return result; + } + + const clang::CallExpr *getVirtualCallExpr() const { + assert(isVirtual()); + return VirtualInfo.CE; + } + + clang::GlobalDecl getVirtualMethodDecl() const { + assert(isVirtual()); + return VirtualInfo.MD; + } + Address getThisAddress() const { + assert(isVirtual()); + return VirtualInfo.Addr; + } + mlir::cir::FuncType getVirtualFunctionType() const { + assert(isVirtual()); + return VirtualInfo.FTy; + } }; struct CallArg { diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index a0175275c58a..a43850f4a74b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -16,6 +16,7 @@ #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/RecordLayout.h" +#include "clang/Basic/NoSanitizeList.h" #include "clang/Basic/TargetBuiltins.h" using namespace clang; @@ -261,18 +262,36 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { const CXXConstructorDecl *CD, FunctionArgList &Args) { if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) - llvm_unreachable("NYI"); + return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)]; return nullptr; } // Returns true if a CXXCtorInitializer represents a member initialization - // that can be rolled into a memcpy + // that can be rolled into a memcpy. + // TODO(cir): this could be shared with LLVM codegen. bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { if (!MemcpyableCtor) return false; llvm_unreachable("NYI"); + FieldDecl *Field = MemberInit->getMember(); + assert(Field && "No field for member init."); + QualType FieldType = Field->getType(); + CXXConstructExpr *CE = dyn_cast(MemberInit->getInit()); + + // Bail out on non-memcpyable, not-trivially-copyable members. + if (!(CE && isMemcpyEquivalentSpecialMember(CE->getConstructor())) && + !(FieldType.isTriviallyCopyableType(CGF.getContext()) || + FieldType->isReferenceType())) + return false; + + // Bail out on volatile fields. + if (!isMemcpyableField(Field)) + return false; + + // Otherwise we're good. + return true; } public: @@ -287,7 +306,8 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { void addMemberInitializer(CXXCtorInitializer *MemberInit) { if (isMemberInitMemcpyable(MemberInit)) { - llvm_unreachable("NYI"); + AggregatedInits.push_back(MemberInit); + addMemcpyableField(MemberInit->getMember()); } else { buildAggregatedInits(); buildMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, @@ -318,7 +338,14 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { (void)LHS; for (unsigned i = 0; i < AggregatedInits.size(); ++i) { - llvm_unreachable("NYI"); + CXXCtorInitializer *MemberInit = AggregatedInits[i]; + QualType FieldType = MemberInit->getAnyMember()->getType(); + QualType::DestructionKind dtorKind = FieldType.isDestructedType(); + if (!CGF.needsEHCleanup(dtorKind)) + continue; + LValue FieldLHS = LHS; + buildLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); + CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType); } } @@ -1231,3 +1258,50 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, llvm_unreachable("NYI"); return Value; } + +// TODO(cir): this can be shared with LLVM codegen. +bool CIRGenFunction::shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) { + if (!CGM.getCodeGenOpts().WholeProgramVTables || + !CGM.HasHiddenLTOVisibility(RD)) + return false; + + if (CGM.getCodeGenOpts().VirtualFunctionElimination) + return true; + + if (!SanOpts.has(SanitizerKind::CFIVCall) || + !CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIVCall)) + return false; + + std::string TypeName = RD->getQualifiedNameAsString(); + return !getContext().getNoSanitizeList().containsType(SanitizerKind::CFIVCall, + TypeName); +} + +void CIRGenFunction::buildTypeMetadataCodeForVCall(const CXXRecordDecl *RD, + mlir::Value VTable, + SourceLocation Loc) { + if (SanOpts.has(SanitizerKind::CFIVCall)) { + llvm_unreachable("NYI"); + } else if (CGM.getCodeGenOpts().WholeProgramVTables && + // Don't insert type test assumes if we are forcing public + // visibility. + !CGM.AlwaysHasLTOVisibilityPublic(RD)) { + llvm_unreachable("NYI"); + } +} + +mlir::Value CIRGenFunction::getVTablePtr(SourceLocation Loc, Address This, + mlir::Type VTableTy, + const CXXRecordDecl *RD) { + auto loc = getLoc(Loc); + Address VTablePtrSrc = builder.createElementBitCast(loc, This, VTableTy); + auto VTable = builder.createLoad(loc, VTablePtrSrc); + assert(!UnimplementedFeature::tbaa()); + + if (CGM.getCodeGenOpts().OptimizationLevel > 0 && + CGM.getCodeGenOpts().StrictVTablePointers) { + assert(!UnimplementedFeature::createInvariantGroup()); + } + + return VTable; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index e11594d8c184..d76fd8afda31 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -687,4 +687,13 @@ void CIRGenFunction::buildAutoVarTypeCleanup( bool useEHCleanup = (cleanupKind & EHCleanup); EHStack.pushCleanup(cleanupKind, addr, type, destroyer, useEHCleanup); +} + +/// Push the standard destructor for the given type as an EH-only cleanup. +void CIRGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind, + Address addr, QualType type) { + assert(dtorKind && "cannot push destructor for trivial type"); + assert(needsEHCleanup(dtorKind)); + + pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 80a54ac84cbd..35c48df017e9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -132,7 +132,7 @@ LValue CIRGenFunction::buildLValueForField(LValue base, hasAnyVptr(FieldType, getContext())) // Because unions can easily skip invariant.barriers, we need to add // a barrier every time CXXRecord field with vptr is referenced. - assert(!UnimplementedFeature::createLaunderInvariantGroup()); + assert(!UnimplementedFeature::createInvariantGroup()); if (IsInPreservedAIRegion || (getDebugInfo() && rec->hasAttr())) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index f90bb2433c4e..b3ec99efdb9d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -224,7 +224,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( CIRGenCallee Callee; if (useVirtualCall) { - llvm_unreachable("NYI"); + Callee = CIRGenCallee::forVirtual(CE, MD, This.getAddress(), Ty); } else { if (SanOpts.has(SanitizerKind::CFINVCall)) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 142dafaf0fa9..b61f8ff4ee6b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -613,7 +613,12 @@ void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, Args.add(RValue::get(ThisPtr), D->getThisType()); - assert(!isMemcpyEquivalentSpecialMember(D) && "NYI"); + // In LLVM Codegen: If this is a trivial constructor, just emit what's needed. + // If this is a union copy constructor, we must emit a memcpy, because the AST + // does not model that copy. + if (isMemcpyEquivalentSpecialMember(D)) { + assert(!UnimplementedFeature::isMemcpyEquivalentSpecialMember()); + } const FunctionProtoType *FPT = D->getType()->castAs(); EvaluationOrder Order = E->isListInitialization() @@ -644,10 +649,11 @@ void CIRGenFunction::buildCXXConstructorCall( // In LLVM: do nothing. // In CIR: emit as a regular call, other later passes should lower the // ctor call into trivial initialization. - // if (CD->isTrivial() && CD->isDefaultConstructor()) - // return; + assert(!UnimplementedFeature::isTrivialAndisDefaultConstructor()); - assert(!isMemcpyEquivalentSpecialMember(D) && "NYI"); + if (isMemcpyEquivalentSpecialMember(D)) { + assert(!UnimplementedFeature::isMemcpyEquivalentSpecialMember()); + } bool PassPrototypeArgs = true; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 40345b529097..a6ed9aced881 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1223,6 +1223,27 @@ class CIRGenFunction : public CIRGenTypeCache { /// base classes in reverse order of their construction. void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type); + /// Determines whether an EH cleanup is required to destroy a type + /// with the given destruction kind. + /// TODO(cir): could be shared with Clang LLVM codegen + bool needsEHCleanup(QualType::DestructionKind kind) { + switch (kind) { + case QualType::DK_none: + return false; + case QualType::DK_cxx_destructor: + case QualType::DK_objc_weak_lifetime: + case QualType::DK_nontrivial_c_struct: + return getLangOpts().Exceptions; + case QualType::DK_objc_strong_lifetime: + return getLangOpts().Exceptions && + CGM.getCodeGenOpts().ObjCAutoRefCountExceptions; + } + llvm_unreachable("bad destruction kind"); + } + + void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, + QualType type); + static bool IsConstructorDelegationValid(const clang::CXXConstructorDecl *Ctor); @@ -1244,6 +1265,21 @@ class CIRGenFunction : public CIRGenTypeCache { bool BaseIsNonVirtualPrimaryBase, const clang::CXXRecordDecl *VTableClass, VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs); + /// Return the Value of the vtable pointer member pointed to by This. + mlir::Value getVTablePtr(SourceLocation Loc, Address This, + mlir::Type VTableTy, + const CXXRecordDecl *VTableClass); + + /// Returns whether we should perform a type checked load when loading a + /// virtual function for virtual calls to members of RD. This is generally + /// true when both vcall CFI and whole-program-vtables are enabled. + bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD); + + /// If whole-program virtual table optimization is enabled, emit an assumption + /// that VTable is a member of RD's type identifier. Or, if vptr CFI is + /// enabled, emit a check that VTable is a member of RD's type identifier. + void buildTypeMetadataCodeForVCall(const CXXRecordDecl *RD, + mlir::Value VTable, SourceLocation Loc); /// Return the VTT parameter that should be passed to a base /// constructor/destructor with virtual bases. diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 8284af16af15..2f79bc5004da 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -123,6 +123,9 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) override; + CIRGenCallee getVirtualFunctionPointer(CIRGenFunction &CGF, GlobalDecl GD, + Address This, mlir::Type Ty, + SourceLocation Loc) override; mlir::Value getVTableAddressPoint(BaseSubobject Base, const CXXRecordDecl *VTableClass) override; bool isVirtualOffsetNeededForVTableField(CIRGenFunction &CGF, @@ -206,6 +209,12 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { return true; } + size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, + FunctionArgList &Args) const override { + assert(!Args.empty() && "expected the arglist to not be empty!"); + return Args.size() - 1; + } + /**************************** RTTI Uniqueness ******************************/ protected: /// Returns true if the ABI requires RTTI type_info objects to be unique @@ -576,6 +585,53 @@ CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, return vtable; } +CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer( + CIRGenFunction &CGF, GlobalDecl GD, Address This, mlir::Type Ty, + SourceLocation Loc) { + auto loc = CGF.getLoc(Loc); + auto TyPtr = CGF.getBuilder().getPointerTo(Ty); + auto *MethodDecl = cast(GD.getDecl()); + auto VTable = CGF.getVTablePtr( + Loc, This, CGF.getBuilder().getPointerTo(TyPtr), MethodDecl->getParent()); + + uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); + mlir::Value VFunc{}; + if (CGF.shouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { + llvm_unreachable("NYI"); + } else { + CGF.buildTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); + + mlir::Value VFuncLoad; + if (CGM.getItaniumVTableContext().isRelativeLayout()) { + llvm_unreachable("NYI"); + } else { + VTable = CGF.getBuilder().createBitcast( + loc, VTable, CGF.getBuilder().getPointerTo(TyPtr)); + auto VTableSlotPtr = + CGF.getBuilder().create( + loc, TyPtr, ::mlir::FlatSymbolRefAttr{}, VTable, + /*vtable_index=*/0, VTableIndex); + VFuncLoad = CGF.getBuilder().createAlignedLoad(loc, TyPtr, VTableSlotPtr, + CGF.getPointerAlign()); + } + + // Add !invariant.load md to virtual function load to indicate that + // function didn't change inside vtable. + // It's safe to add it without -fstrict-vtable-pointers, but it would not + // help in devirtualization because it will only matter if we will have 2 + // the same virtual function loads from the same vtable load, which won't + // happen without enabled devirtualization with -fstrict-vtable-pointers. + if (CGM.getCodeGenOpts().OptimizationLevel > 0 && + CGM.getCodeGenOpts().StrictVTablePointers) { + llvm_unreachable("NYI"); + } + VFunc = VFuncLoad; + } + + CIRGenCallee Callee(GD, VFunc.getDefiningOp()); + return Callee; +} + mlir::Value CIRGenItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, const CXXRecordDecl *VTableClass) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index a626e6898f3b..75f87b3df155 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -123,7 +123,11 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, FloatTy = builder.getF32Type(); DoubleTy = builder.getF64Type(); // TODO: PointerWidthInBits - // TODO: PointerAlignInBytes + PointerAlignInBytes = + astctx + .toCharUnitsFromBits( + astctx.getTargetInfo().getPointerAlign(LangAS::Default)) + .getQuantity(); // TODO: SizeSizeInBytes // TODO: IntAlignInBytes UCharTy = ::mlir::cir::IntType::get(builder.getContext(), diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index f61be4fda3e4..27e8296a70bd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -297,6 +297,16 @@ class CIRGenModule : public CIRGenTypeCache { /// Helper to convert Clang's alignment to CIR alignment mlir::IntegerAttr getSize(CharUnits size); + /// Returns whether the given record has public LTO visibility (regardless of + /// -lto-whole-program-visibility) and therefore may not participate in + /// (single-module) CFI and whole-program vtable optimization. + bool AlwaysHasLTOVisibilityPublic(const CXXRecordDecl *RD); + + /// Returns whether the given record has hidden LTO visibility and therefore + /// may participate in (single-module) CFI and whole-program vtable + /// optimization. + bool HasHiddenLTOVisibility(const CXXRecordDecl *RD); + /// Determine whether an object of this type can be emitted /// as a constant. /// diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 1777edb17d6a..dce86ec80255 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -93,10 +93,10 @@ struct CIRGenTypeCache { // unsigned char PointerWidthInBits; /// The size and alignment of a pointer into the generic address space. - // union { - // unsigned char PointerAlignInBytes; - // unsigned char PointerSizeInBytes; - // }; + union { + unsigned char PointerAlignInBytes; + unsigned char PointerSizeInBytes; + }; /// The size and alignment of size_t. // union { @@ -112,12 +112,12 @@ struct CIRGenTypeCache { // clang::CharUnits getSizeAlign() const { // return clang::CharUnits::fromQuantity(SizeAlignInBytes); // } - // clang::CharUnits getPointerSize() const { - // return clang::CharUnits::fromQuantity(PointerSizeInBytes); - // } - // clang::CharUnits getPointerAlign() const { - // return clang::CharUnits::fromQuantity(PointerAlignInBytes); - // } + clang::CharUnits getPointerSize() const { + return clang::CharUnits::fromQuantity(PointerSizeInBytes); + } + clang::CharUnits getPointerAlign() const { + return clang::CharUnits::fromQuantity(PointerAlignInBytes); + } // clang::LangAS getASTAllocaAddressSpace() const { // return ASTAllocaAddressSpace; diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 056c71a64105..012ab673facc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -427,3 +427,39 @@ void CIRGenVTables::buildThunks(GlobalDecl GD) { for ([[maybe_unused]] const ThunkInfo &Thunk : *ThunkInfoVector) llvm_unreachable("NYI"); } + +bool CIRGenModule::AlwaysHasLTOVisibilityPublic(const CXXRecordDecl *RD) { + if (RD->hasAttr() || RD->hasAttr() || + RD->hasAttr() || RD->hasAttr()) + return true; + + if (!getCodeGenOpts().LTOVisibilityPublicStd) + return false; + + const DeclContext *DC = RD; + while (true) { + auto *D = cast(DC); + DC = DC->getParent(); + if (isa(DC->getRedeclContext())) { + if (auto *ND = dyn_cast(D)) + if (const IdentifierInfo *II = ND->getIdentifier()) + if (II->isStr("std") || II->isStr("stdext")) + return true; + break; + } + } + + return false; +} + +bool CIRGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) { + LinkageInfo LV = RD->getLinkageAndVisibility(); + if (!isExternallyVisible(LV.getLinkage())) + return true; + + if (!getTriple().isOSBinFormatCOFF() && + LV.getVisibility() != HiddenVisibility) + return false; + + return !AlwaysHasLTOVisibilityPublic(RD); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 2263c12b09bc..c20e35a5744f 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -74,6 +74,10 @@ struct UnimplementedFeature { static bool shouldSplitConstantStore() { return false; } static bool shouldCreateMemCpyFromGlobal() { return false; } static bool shouldReverseUnaryCondOnBoolExpr() { return false; } + static bool fieldMemcpyizerBuildMemcpy() { return false; } + static bool isTrivialAndisDefaultConstructor() { return false; } + static bool isMemcpyEquivalentSpecialMember() { return false; } + static bool constructABIArgDirectExtend() { return false; } static bool capturedByInit() { return false; } static bool tryEmitAsConstant() { return false; } @@ -90,15 +94,17 @@ struct UnimplementedFeature { static bool ehStack() { return false; } static bool isVarArg() { return false; } static bool setNonGC() { return false; } + static bool volatileLoadOrStore() { return false; } static bool armComputeVolatileBitfields() { return false; } static bool setCommonAttributes() { return false; } static bool insertBuiltinUnpredictable() { return false; } - static bool createLaunderInvariantGroup() { return false; } + static bool createInvariantGroup() { return false; } static bool addAutoInitAnnotation() { return false; } static bool addHeapAllocSiteMetadata() { return false; } static bool loopInfoStack() { return false; } static bool requiresCleanups() { return false; } static bool constantFoldsToSimpleInteger() { return false; } + static bool alignedLoad() { return false; } }; } // namespace cir From 18c5d92d987a79fe738c963dde2faa688aaeec1e Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 12 Jun 2023 09:03:40 -0300 Subject: [PATCH 0994/2301] [CIR][Lowering] Lower float plus and minus unary ops Lower plus and minus unary operators when applied to float types. ghstack-source-id: 257f032b9689905eb0b9152b54a028d320831808 Pull Request resolved: https://github.com/llvm/clangir/pull/107 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 92 ++++++++++++------- clang/test/CIR/CodeGen/unary.cpp | 12 +++ clang/test/CIR/Lowering/unary-plus-minus.cir | 19 +++- 3 files changed, 88 insertions(+), 35 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b24717eeb9d3..d2d9e9e03769 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -33,8 +33,10 @@ #include "mlir/IR/BuiltinDialect.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/IRMapping.h" +#include "mlir/IR/Value.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" +#include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" #include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" @@ -835,47 +837,69 @@ class CIRUnaryOpLowering matchAndRewrite(mlir::cir::UnaryOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Type type = op.getInput().getType(); - assert(type.isa() && "operand type not supported yet"); auto llvmInType = adaptor.getInput().getType(); auto llvmType = getTypeConverter()->convertType(op.getType()); - switch (op.getKind()) { - case mlir::cir::UnaryOpKind::Inc: { - auto One = rewriter.create( - op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 1)); - rewriter.replaceOpWithNewOp(op, llvmType, - adaptor.getInput(), One); - break; - } - case mlir::cir::UnaryOpKind::Dec: { - auto One = rewriter.create( - op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 1)); - rewriter.replaceOpWithNewOp(op, llvmType, - adaptor.getInput(), One); - break; - } - case mlir::cir::UnaryOpKind::Plus: { - rewriter.replaceOp(op, adaptor.getInput()); - break; - } - case mlir::cir::UnaryOpKind::Minus: { - auto Zero = rewriter.create( - op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 0)); - rewriter.replaceOpWithNewOp(op, llvmType, Zero, - adaptor.getInput()); - break; - } - case mlir::cir::UnaryOpKind::Not: { - auto MinusOne = rewriter.create( - op.getLoc(), llvmType, mlir::IntegerAttr::get(llvmType, -1)); - rewriter.replaceOpWithNewOp(op, llvmType, MinusOne, - adaptor.getInput()); - break; + // Integer unary operations. + if (type.isa()) { + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Inc: { + auto One = rewriter.create( + op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 1)); + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput(), One); + return mlir::success(); + } + case mlir::cir::UnaryOpKind::Dec: { + auto One = rewriter.create( + op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 1)); + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput(), One); + return mlir::success(); + } + case mlir::cir::UnaryOpKind::Plus: { + rewriter.replaceOp(op, adaptor.getInput()); + return mlir::success(); + } + case mlir::cir::UnaryOpKind::Minus: { + auto Zero = rewriter.create( + op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 0)); + rewriter.replaceOpWithNewOp(op, llvmType, Zero, + adaptor.getInput()); + return mlir::success(); + } + case mlir::cir::UnaryOpKind::Not: { + auto MinusOne = rewriter.create( + op.getLoc(), llvmType, mlir::IntegerAttr::get(llvmType, -1)); + rewriter.replaceOpWithNewOp(op, llvmType, MinusOne, + adaptor.getInput()); + return mlir::success(); + } + } } + + // Floating point unary operations. + if (type.isa()) { + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Plus: + rewriter.replaceOp(op, adaptor.getInput()); + return mlir::success(); + case mlir::cir::UnaryOpKind::Minus: { + auto negOneAttr = mlir::FloatAttr::get(llvmInType, -1.0); + auto negOneConst = rewriter.create( + op.getLoc(), llvmInType, negOneAttr); + rewriter.replaceOpWithNewOp( + op, llvmType, negOneConst, adaptor.getInput()); + return mlir::success(); + } + default: + op.emitError() << "Floating point unary lowering ot implemented"; + return mlir::failure(); + } } - return mlir::LogicalResult::success(); + return op.emitError() << "Unary operation has unsupported type: " << type; } }; diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index e7141a7d8617..ee08e38b15c3 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -152,3 +152,15 @@ int *inc_p(int *i) { // CHECK: %[[#i_inc:]] = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %[[#inc_const:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: = cir.ptr_stride(%[[#i_inc]] : !cir.ptr, %[[#inc_const]] : !s32i), !cir.ptr + +void floats(float f) { +// CHECK: cir.func @{{.+}}floats{{.+}} + +f; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : f32, f32 + -f; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : f32, f32 +} + +void doubles(double d) { +// CHECK: cir.func @{{.+}}doubles{{.+}} + +d; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : f64, f64 + -d; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : f64, f64 +} diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index c4a4c0eab932..48d4f3d62b3c 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -18,7 +18,6 @@ module { cir.store %6, %1 : !s32i, cir.ptr cir.return } -} // MLIR: %[[#INPUT_PLUS:]] = llvm.load // MLIR: llvm.store %[[#INPUT_PLUS]] @@ -27,3 +26,21 @@ module { // MLIR: llvm.sub %[[ZERO]], %[[#INPUT_MINUS]] // LLVM: = sub i32 0, %[[#]] + + + cir.func @floatingPoints(%arg0: f64) { + // MLIR: llvm.func @floatingPoints(%arg0: f64) { + %0 = cir.alloca f64, cir.ptr , ["X", init] {alignment = 8 : i64} + cir.store %arg0, %0 : f64, cir.ptr + %1 = cir.load %0 : cir.ptr , f64 + %2 = cir.unary(plus, %1) : f64, f64 + // MLIR: llvm.store %arg0, %[[#F_PLUS:]] : f64, !llvm.ptr + // MLIR: %{{[0-9]}} = llvm.load %[[#F_PLUS]] : !llvm.ptr + %3 = cir.load %0 : cir.ptr , f64 + %4 = cir.unary(minus, %3) : f64, f64 + // MLIR: %[[#F_MINUS:]] = llvm.load %{{[0-9]}} : !llvm.ptr + // MLIR: %[[#F_NEG_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f64) : f64 + // MLIR: %5 = llvm.fmul %[[#F_NEG_ONE]], %[[#F_MINUS]] : f64 + cir.return + } +} From 0009b0d931d6f820aa2a657d2ffe1a5a30983c64 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 12 Jun 2023 09:03:40 -0300 Subject: [PATCH 0995/2301] [CIR][Lowering] Lower inc/dec float and double types Implement lowering for inc/dec float and double types. Also adds a small bit of codegen that was missing for this lowering to work. ghstack-source-id: eadc06337a512121541af8ede380fdda5d03e008 Pull Request resolved: https://github.com/llvm/clangir/pull/108 --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 13 ++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 16 +++++++++ clang/test/CIR/CodeGen/unary.cpp | 8 +++++ clang/test/CIR/Lowering/unary-inc-dec.cir | 36 ++++++++++++++++++- 4 files changed, 71 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index e8cac8e93ec3..a77bce66980c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -17,9 +17,12 @@ #include "clang/AST/StmtVisitor.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" #include +#include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Value.h" using namespace cir; @@ -338,7 +341,15 @@ class ScalarExprEmitter : public StmtVisitor { } else if (type->isVectorType()) { llvm_unreachable("no vector inc/dec yet"); } else if (type->isRealFloatingType()) { - llvm_unreachable("no float inc/dec yet"); + auto isFloatOrDouble = type->isSpecificBuiltinType(BuiltinType::Float) || + type->isSpecificBuiltinType(BuiltinType::Double); + assert(isFloatOrDouble && "Non-float/double NYI"); + + // Create the inc/dec operation. + auto kind = + (isInc ? mlir::cir::UnaryOpKind::Inc : mlir::cir::UnaryOpKind::Dec); + value = buildUnaryOp(E, kind, input); + } else if (type->isFixedPointType()) { llvm_unreachable("no fixed point inc/dec yet"); } else { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d2d9e9e03769..08dceecf1b12 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -882,6 +882,22 @@ class CIRUnaryOpLowering // Floating point unary operations. if (type.isa()) { switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Inc: { + auto oneAttr = rewriter.getFloatAttr(llvmInType, 1.0); + auto oneConst = rewriter.create( + op.getLoc(), llvmInType, oneAttr); + rewriter.replaceOpWithNewOp(op, llvmType, oneConst, + adaptor.getInput()); + return mlir::success(); + } + case mlir::cir::UnaryOpKind::Dec: { + auto negOneAttr = rewriter.getFloatAttr(llvmInType, -1.0); + auto negOneConst = rewriter.create( + op.getLoc(), llvmInType, negOneAttr); + rewriter.replaceOpWithNewOp( + op, llvmType, negOneConst, adaptor.getInput()); + return mlir::success(); + } case mlir::cir::UnaryOpKind::Plus: rewriter.replaceOp(op, adaptor.getInput()); return mlir::success(); diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index ee08e38b15c3..bc23ca5bb685 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -157,10 +157,18 @@ void floats(float f) { // CHECK: cir.func @{{.+}}floats{{.+}} +f; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : f32, f32 -f; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : f32, f32 + ++f; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : f32, f32 + --f; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f32, f32 + f++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : f32, f32 + f--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f32, f32 } void doubles(double d) { // CHECK: cir.func @{{.+}}doubles{{.+}} +d; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : f64, f64 -d; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : f64, f64 + ++d; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : f64, f64 + --d; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f64, f64 + d++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : f64, f64 + d--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f64, f64 } diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir index 398ec3214bcb..0f484aafee69 100644 --- a/clang/test/CIR/Lowering/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -18,7 +18,6 @@ module { cir.store %6, %1 : !s32i, cir.ptr cir.return } -} // MLIR: = llvm.mlir.constant(1 : i32) // MLIR: = llvm.add @@ -27,3 +26,38 @@ module { // LLVM: = add i32 %[[#]], 1 // LLVM: = sub i32 %[[#]], 1 + + cir.func @floatingPoint(%arg0: f32, %arg1: f64) { + // MLIR: llvm.func @floatingPoint + %0 = cir.alloca f32, cir.ptr , ["f", init] {alignment = 4 : i64} + %1 = cir.alloca f64, cir.ptr , ["d", init] {alignment = 8 : i64} + cir.store %arg0, %0 : f32, cir.ptr + cir.store %arg1, %1 : f64, cir.ptr + + %2 = cir.load %0 : cir.ptr , f32 + %3 = cir.unary(inc, %2) : f32, f32 + cir.store %3, %0 : f32, cir.ptr + // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(1.000000e+00 : f32) : f32 + // MLIR: = llvm.fadd %[[#F_ONE]], %{{[0-9]+}} : f32 + + %4 = cir.load %0 : cir.ptr , f32 + %5 = cir.unary(dec, %4) : f32, f32 + cir.store %5, %0 : f32, cir.ptr + // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f32) : f32 + // MLIR: = llvm.fsub %[[#D_ONE]], %{{[0-9]+}} : f32 + + %6 = cir.load %1 : cir.ptr , f64 + %7 = cir.unary(inc, %6) : f64, f64 + cir.store %7, %1 : f64, cir.ptr + // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(1.000000e+00 : f64) : f64 + // MLIR: = llvm.fadd %[[#D_ONE]], %{{[0-9]+}} : f64 + + %8 = cir.load %1 : cir.ptr , f64 + %9 = cir.unary(dec, %8) : f64, f64 + cir.store %9, %1 : f64, cir.ptr + // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f64) : f64 + // MLIR: = llvm.fsub %[[#D_ONE]], %{{[0-9]+}} : f64 + + cir.return + } +} From 82a99779e2e2606be5bf27e198b69ae34611336c Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 12 Jun 2023 09:03:41 -0300 Subject: [PATCH 0996/2301] [CIR][CIRGen][Lowering] Support float unary not op Adds new cir.cast op float-to-bool kind, including its codegen and lowering as well. ghstack-source-id: f5d2416dfc62b73b3cad77a07c6c506f33a7acbd Pull Request resolved: https://github.com/llvm/clangir/pull/109 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 10 ++++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 ++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 33 +++++++++++++++++++ clang/test/CIR/CodeGen/unary.cpp | 8 +++++ clang/test/CIR/Lowering/unary-not.cir | 28 +++++++++++++++- 6 files changed, 85 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 92962944dfde..dd2dc709593a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -50,13 +50,14 @@ def CK_PtrToBoolean : I32EnumAttrCase<"ptr_to_bool", 6>; def CK_FloatToIntegral : I32EnumAttrCase<"float_to_int", 7>; def CK_IntegralToPointer : I32EnumAttrCase<"int_to_ptr", 8>; def CK_PointerToIntegral : I32EnumAttrCase<"ptr_to_int", 9>; +def CK_FloatToBoolean : I32EnumAttrCase<"float_to_bool", 10>; def CastKind : I32EnumAttr< "CastKind", "cast kind", [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast, CK_BitCast, CK_FloatingCast, CK_PtrToBoolean, CK_FloatToIntegral, - CK_IntegralToPointer, CK_PointerToIntegral]> { + CK_IntegralToPointer, CK_PointerToIntegral, CK_FloatToBoolean]> { let cppNamespace = "::mlir::cir"; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index a77bce66980c..08c37b9364ac 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -809,6 +809,12 @@ class ScalarExprEmitter : public StmtVisitor { E->getExprLoc()); } + mlir::Value buildFloatToBoolConversion(mlir::Value src, mlir::Location loc) { + auto boolTy = Builder.getBoolTy(); + return Builder.create( + loc, boolTy, mlir::cir::CastKind::float_to_bool, src); + } + mlir::Value buildIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) { // Because of the type rules of C, we often end up computing a // logical value, then zero extending it to int, then wanting it @@ -827,7 +833,7 @@ class ScalarExprEmitter : public StmtVisitor { assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); if (SrcType->isRealFloatingType()) - assert(0 && "not implemented"); + return buildFloatToBoolConversion(Src, loc); if (auto *MPT = llvm::dyn_cast(SrcType)) assert(0 && "not implemented"); @@ -1180,7 +1186,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_PointerToBoolean: return buildPointerToBoolConversion(Visit(E), E->getType()); case CK_FloatingToBoolean: - llvm_unreachable("NYI"); + return buildFloatToBoolConversion(Visit(E), CGF.getLoc(E->getExprLoc())); case CK_MemberPointerToBoolean: llvm_unreachable("NYI"); case CK_FloatingComplexToReal: diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 97b71ab1b7a7..223b24bb1368 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -315,6 +315,13 @@ LogicalResult CastOp::verify() { return emitOpError() << "requires integer for result"; return success(); } + case cir::CastKind::float_to_bool: { + if (!srcType.isa()) + return emitOpError() << "requires float for source"; + if (!resType.isa()) + return emitOpError() << "requires !cir.bool for result"; + return success(); + } } llvm_unreachable("Unknown CastOp kind?"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 08dceecf1b12..7f60bbe18dd8 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -279,6 +279,24 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } + case mlir::cir::CastKind::float_to_bool: { + auto dstTy = castOp.getType().cast(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + auto kind = mlir::LLVM::FCmpPredicate::une; + + // Check if float is not equal to zero. + auto zeroFloat = rewriter.create( + castOp.getLoc(), llvmSrcVal.getType(), + mlir::FloatAttr::get(llvmSrcVal.getType(), 0.0)); + + // Extend comparison result to either bool (C++) or int (C). + mlir::Value cmpResult = rewriter.create( + castOp.getLoc(), kind, llvmSrcVal, zeroFloat); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + cmpResult); + return mlir::success(); + } default: llvm_unreachable("NYI"); } @@ -915,6 +933,21 @@ class CIRUnaryOpLowering } } + // Boolean unary operations. + if (type.isa()) { + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Not: + rewriter.replaceOpWithNewOp( + op, llvmType, adaptor.getInput(), + rewriter.create( + op.getLoc(), llvmType, mlir::IntegerAttr::get(llvmType, 1))); + return mlir::success(); + default: + op.emitError() << "Unary operator not implemented for bool type"; + return mlir::failure(); + } + } + return op.emitError() << "Unary operation has unsupported type: " << type; } }; diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index bc23ca5bb685..778e6b506e7d 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -161,6 +161,10 @@ void floats(float f) { --f; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f32, f32 f++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : f32, f32 f--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f32, f32 + + !f; + // CHECK: %[[#F_BOOL:]] = cir.cast(float_to_bool, %{{[0-9]+}} : f32), !cir.bool + // CHECK: = cir.unary(not, %[[#F_BOOL]]) : !cir.bool, !cir.bool } void doubles(double d) { @@ -171,4 +175,8 @@ void doubles(double d) { --d; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f64, f64 d++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : f64, f64 d--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f64, f64 + + !d; + // CHECK: %[[#D_BOOL:]] = cir.cast(float_to_bool, %{{[0-9]+}} : f64), !cir.bool + // CHECK: = cir.unary(not, %[[#D_BOOL]]) : !cir.bool, !cir.bool } diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index 8651ab9523b3..8374bad7bfd2 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -13,10 +13,36 @@ module { %5 = cir.load %0 : cir.ptr , !s32i cir.return %5 : !s32i } -} // MLIR: = llvm.load // MLIR: = llvm.mlir.constant(-1 : i32) // MLIR: = llvm.xor // LLVM: = xor i32 -1, %[[#]] + + + cir.func @floatingPoint(%arg0: f32, %arg1: f64) { + // MLIR: llvm.func @floatingPoint + %0 = cir.alloca f32, cir.ptr , ["f", init] {alignment = 4 : i64} + %1 = cir.alloca f64, cir.ptr , ["d", init] {alignment = 8 : i64} + cir.store %arg0, %0 : f32, cir.ptr + cir.store %arg1, %1 : f64, cir.ptr + %2 = cir.load %0 : cir.ptr , f32 + %3 = cir.cast(float_to_bool, %2 : f32), !cir.bool + // MLIR: %[[#F_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 + // MLIR: %[[#F_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#F_ZERO]] : f32 + // MLIR: %[[#F_ZEXT:]] = llvm.zext %[[#F_BOOL]] : i1 to i8 + %4 = cir.unary(not, %3) : !cir.bool, !cir.bool + // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(1 : i8) : i8 + // MLIR: = llvm.xor %[[#F_ZEXT]], %[[#F_ONE]] : i8 + %5 = cir.load %1 : cir.ptr , f64 + %6 = cir.cast(float_to_bool, %5 : f64), !cir.bool + // MLIR: %[[#D_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 + // MLIR: %[[#D_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#D_ZERO]] : f64 + // MLIR: %[[#D_ZEXT:]] = llvm.zext %[[#D_BOOL]] : i1 to i8 + %7 = cir.unary(not, %6) : !cir.bool, !cir.bool + // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(1 : i8) : i8 + // MLIR: = llvm.xor %[[#D_ZEXT]], %[[#D_ONE]] : i8 + cir.return + } +} From 2c726993d725a6021dd4bf93a554d524da6664f3 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 12 Jun 2023 09:03:41 -0300 Subject: [PATCH 0997/2301] [CIR][CIRGen][Lowering] Support C values unary negation In C, value negation implicitly casts to int, in C++, it casts to bool. This patch adds support for the C-specific scenario. Since both C and C++ share code gen paths for unary negation, the C cases are handled by casting the boolean type that is already generated for the C++ cases to an int. The following additions were made: - Added C style unary negation test case. - Updated CIR cast to support bool_to_int conversions. - Lowering bool_to_int conversion as a ZExt operation. ghstack-source-id: b721bd66c9d6dad6b0d03d22de21f29cbf97838b Pull Request resolved: https://github.com/llvm/clangir/pull/110 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 ++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 +++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 8 +++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 8 +++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 8 +++++ clang/test/CIR/CodeGen/unary.c | 28 +++++++++++++++ clang/test/CIR/Lowering/unary-not.cir | 34 +++++++++++++++++++ 7 files changed, 91 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/unary.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index dd2dc709593a..1e502d38c15f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -51,13 +51,15 @@ def CK_FloatToIntegral : I32EnumAttrCase<"float_to_int", 7>; def CK_IntegralToPointer : I32EnumAttrCase<"int_to_ptr", 8>; def CK_PointerToIntegral : I32EnumAttrCase<"ptr_to_int", 9>; def CK_FloatToBoolean : I32EnumAttrCase<"float_to_bool", 10>; +def CK_BooleanToIntegral : I32EnumAttrCase<"bool_to_int", 11>; def CastKind : I32EnumAttr< "CastKind", "cast kind", [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast, CK_BitCast, CK_FloatingCast, CK_PtrToBoolean, CK_FloatToIntegral, - CK_IntegralToPointer, CK_PointerToIntegral, CK_FloatToBoolean]> { + CK_IntegralToPointer, CK_PointerToIntegral, CK_FloatToBoolean, + CK_BooleanToIntegral]> { let cppNamespace = "::mlir::cir"; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 8d4742f38e36..3555b8cfbb44 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -399,6 +399,10 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return src; llvm_unreachable("NYI"); } + + mlir::Value createBoolToInt(mlir::Value src, mlir::Type newTy) { + return createCast(mlir::cir::CastKind::bool_to_int, src, newTy); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 08c37b9364ac..b7ebc9773db5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1293,8 +1293,12 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { // ZExt result to the expr type. auto dstTy = ConvertType(E->getType()); - assert(boolVal.getType() == dstTy && "NYI"); - return boolVal; + if (dstTy.isa()) + return Builder.createBoolToInt(boolVal, dstTy); + if (dstTy.isa()) + return boolVal; + + llvm_unreachable("destination type for negation unary operator is NYI"); } mlir::Value ScalarExprEmitter::buildScalarCast( diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 223b24bb1368..d63d7f548c87 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -12,6 +12,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "mlir/Dialect/Func/IR/FuncOps.h" @@ -322,6 +323,13 @@ LogicalResult CastOp::verify() { return emitOpError() << "requires !cir.bool for result"; return success(); } + case cir::CastKind::bool_to_int: { + if (!srcType.isa()) + return emitOpError() << "requires !cir.bool for source"; + if (!resType.isa()) + return emitOpError() << "requires !cir.int for result"; + return success(); + } } llvm_unreachable("Unknown CastOp kind?"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7f60bbe18dd8..b4a2cd47fb5f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -297,6 +297,14 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { cmpResult); return mlir::success(); } + case mlir::cir::CastKind::bool_to_int: { + auto dstTy = castOp.getType().cast(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } default: llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/unary.c b/clang/test/CIR/CodeGen/unary.c new file mode 100644 index 000000000000..cecd1cf042ec --- /dev/null +++ b/clang/test/CIR/CodeGen/unary.c @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void valueNegation(int i, short s, long l, float f, double d) { +// CHECK: cir.func @valueNegation( + !i; + // CHECK: %[[#INT:]] = cir.load %{{[0-9]+}} : cir.ptr , !s32i + // CHECK: %[[#INT_TO_BOOL:]] = cir.cast(int_to_bool, %[[#INT]] : !s32i), !cir.bool + // CHECK: = cir.unary(not, %[[#INT_TO_BOOL]]) : !cir.bool, !cir.bool + !s; + // CHECK: %[[#SHORT:]] = cir.load %{{[0-9]+}} : cir.ptr , !s16i + // CHECK: %[[#SHORT_TO_BOOL:]] = cir.cast(int_to_bool, %[[#SHORT]] : !s16i), !cir.bool + // CHECK: = cir.unary(not, %[[#SHORT_TO_BOOL]]) : !cir.bool, !cir.bool + !l; + // CHECK: %[[#LONG:]] = cir.load %{{[0-9]+}} : cir.ptr , !s64i + // CHECK: %[[#LONG_TO_BOOL:]] = cir.cast(int_to_bool, %[[#LONG]] : !s64i), !cir.bool + // CHECK: = cir.unary(not, %[[#LONG_TO_BOOL]]) : !cir.bool, !cir.bool + !f; + // CHECK: %[[#FLOAT:]] = cir.load %{{[0-9]+}} : cir.ptr , f32 + // CHECK: %[[#FLOAT_TO_BOOL:]] = cir.cast(float_to_bool, %[[#FLOAT]] : f32), !cir.bool + // CHECK: %[[#FLOAT_NOT:]] = cir.unary(not, %[[#FLOAT_TO_BOOL]]) : !cir.bool, !cir.bool + // CHECK: = cir.cast(bool_to_int, %[[#FLOAT_NOT]] : !cir.bool), !s32i + !d; + // CHECK: %[[#DOUBLE:]] = cir.load %{{[0-9]+}} : cir.ptr , f64 + // CHECK: %[[#DOUBLE_TO_BOOL:]] = cir.cast(float_to_bool, %[[#DOUBLE]] : f64), !cir.bool + // CHECK: %[[#DOUBLE_NOT:]] = cir.unary(not, %[[#DOUBLE_TO_BOOL]]) : !cir.bool, !cir.bool + // CHECK: = cir.cast(bool_to_int, %[[#DOUBLE_NOT]] : !cir.bool), !s32i +} diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index 8374bad7bfd2..c4265252a0ae 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -45,4 +45,38 @@ module { // MLIR: = llvm.xor %[[#D_ZEXT]], %[[#D_ONE]] : i8 cir.return } + + cir.func @CStyleValueNegation(%arg0: !s32i, %arg1: f32) { + // MLIR: llvm.func @CStyleValueNegation + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %3 = cir.alloca f32, cir.ptr , ["f", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.store %arg1, %3 : f32, cir.ptr + + %5 = cir.load %0 : cir.ptr , !s32i + %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool + %7 = cir.unary(not, %6) : !cir.bool, !cir.bool + %8 = cir.cast(bool_to_int, %7 : !cir.bool), !s32i + // MLIR: %[[#INT:]] = llvm.load %{{.+}} : !llvm.ptr + // MLIR: %[[#IZERO:]] = llvm.mlir.constant(0 : i32) : i32 + // MLIR: %[[#ICMP:]] = llvm.icmp "ne" %[[#INT]], %[[#IZERO]] : i32 + // MLIR: %[[#IEXT:]] = llvm.zext %[[#ICMP]] : i1 to i8 + // MLIR: %[[#IONE:]] = llvm.mlir.constant(1 : i8) : i8 + // MLIR: %[[#IXOR:]] = llvm.xor %[[#IEXT]], %[[#IONE]] : i8 + // MLIR: = llvm.zext %[[#IXOR]] : i8 to i32 + + %17 = cir.load %3 : cir.ptr , f32 + %18 = cir.cast(float_to_bool, %17 : f32), !cir.bool + %19 = cir.unary(not, %18) : !cir.bool, !cir.bool + %20 = cir.cast(bool_to_int, %19 : !cir.bool), !s32i + // MLIR: %[[#FLOAT:]] = llvm.load %{{.+}} : !llvm.ptr + // MLIR: %[[#FZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 + // MLIR: %[[#FCMP:]] = llvm.fcmp "une" %[[#FLOAT]], %[[#FZERO]] : f32 + // MLIR: %[[#FEXT:]] = llvm.zext %[[#FCMP]] : i1 to i8 + // MLIR: %[[#FONE:]] = llvm.mlir.constant(1 : i8) : i8 + // MLIR: %[[#FXOR:]] = llvm.xor %[[#FEXT]], %[[#FONE]] : i8 + // MLIR: = llvm.zext %[[#FXOR]] : i8 to i32 + + cir.return + } } From 1d0bf877bee49d33a7cfdb2d6220fa3ea0d85e2d Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Fri, 9 Jun 2023 09:33:25 -0700 Subject: [PATCH 0998/2301] [CIR] Clean up include_directories for subdirectories. Summary: It looks like we can just set up the MLIR include_directories paths in the outermost dirctory of CIR instead of specifying them in each of the subdirectory. --- clang/lib/CIR/CMakeLists.txt | 3 +++ clang/lib/CIR/CodeGen/CMakeLists.txt | 3 --- clang/lib/CIR/Dialect/CMakeLists.txt | 3 --- clang/lib/CIR/FrontendAction/CMakeLists.txt | 3 --- clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt | 3 --- clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt | 3 --- 6 files changed, 3 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 79c980ec020c..41e07837d21d 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -1,3 +1,6 @@ +include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) +include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) + add_subdirectory(Dialect) add_subdirectory(CodeGen) add_subdirectory(FrontendAction) diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index bf9c7ef92db5..7e57b8f798a5 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -4,9 +4,6 @@ set( Support ) -include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) -include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) - get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR diff --git a/clang/lib/CIR/Dialect/CMakeLists.txt b/clang/lib/CIR/Dialect/CMakeLists.txt index 5690e9b2fe61..9f57627c321f 100644 --- a/clang/lib/CIR/Dialect/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/CMakeLists.txt @@ -1,5 +1,2 @@ -include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) -include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) - add_subdirectory(IR) add_subdirectory(Transforms) diff --git a/clang/lib/CIR/FrontendAction/CMakeLists.txt b/clang/lib/CIR/FrontendAction/CMakeLists.txt index 39e9b5e2e7d7..c223383d24cf 100644 --- a/clang/lib/CIR/FrontendAction/CMakeLists.txt +++ b/clang/lib/CIR/FrontendAction/CMakeLists.txt @@ -3,9 +3,6 @@ set(LLVM_LINK_COMPONENTS Support ) -include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) -include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) - get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIRFrontendAction diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index 832c99622394..d44b35cd38c6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -3,9 +3,6 @@ set(LLVM_LINK_COMPONENTS Support ) -include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) -include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) - get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIRLoweringDirectToLLVM diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt index dcb7b9e4adb8..b971bb686270 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -3,9 +3,6 @@ set(LLVM_LINK_COMPONENTS Support ) -include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) -include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) - get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIRLoweringThroughMLIR From 879183957d1240fe17f9fb157ba7c6046afdbcf4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 13 Jun 2023 11:47:34 +0300 Subject: [PATCH 0999/2301] [CIR] cir.call: improve indirect call parsing --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d63d7f548c87..9899b7cfd9c2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1663,10 +1663,10 @@ ::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, if (!parser.parseOptionalAttribute(calleeAttr, "callee", result.attributes) .has_value()) { OpAsmParser::UnresolvedOperand indirectVal; - mlir::Type indirectValTy; - if (parser.parseOperand(indirectVal) || - parser.resolveOperand(indirectVal, indirectValTy, result.operands)) + // Do not resolve right now, since we need to figure out the type + if (parser.parseOperand(indirectVal).failed()) return failure(); + ops.push_back(indirectVal); } if (parser.parseLParen()) From 081197b540059d65c385701e05c725fc7f4b6a43 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 13 Jun 2023 11:48:44 +0300 Subject: [PATCH 1000/2301] [CIR][CIRGen] vcall: add remaining bits and add testcase --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 21 ++----------- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 3 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 +++ clang/test/CIR/CodeGen/derived-to-base.cpp | 31 +++++++++++++++++++ 4 files changed, 39 insertions(+), 20 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index a43850f4a74b..54d3036d1459 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -269,29 +269,12 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { // Returns true if a CXXCtorInitializer represents a member initialization // that can be rolled into a memcpy. - // TODO(cir): this could be shared with LLVM codegen. bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { if (!MemcpyableCtor) return false; - llvm_unreachable("NYI"); - FieldDecl *Field = MemberInit->getMember(); - assert(Field && "No field for member init."); - QualType FieldType = Field->getType(); - CXXConstructExpr *CE = dyn_cast(MemberInit->getInit()); - - // Bail out on non-memcpyable, not-trivially-copyable members. - if (!(CE && isMemcpyEquivalentSpecialMember(CE->getConstructor())) && - !(FieldType.isTriviallyCopyableType(CGF.getContext()) || - FieldType->isReferenceType())) - return false; - - // Bail out on volatile fields. - if (!isMemcpyableField(Field)) - return false; - - // Otherwise we're good. - return true; + assert(!UnimplementedFeature::fieldMemcpyizerBuildMemcpy()); + return false; } public: diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 2f79bc5004da..0c06ba0fa594 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -609,7 +609,8 @@ CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer( loc, VTable, CGF.getBuilder().getPointerTo(TyPtr)); auto VTableSlotPtr = CGF.getBuilder().create( - loc, TyPtr, ::mlir::FlatSymbolRefAttr{}, VTable, + loc, CGF.getBuilder().getPointerTo(TyPtr), + ::mlir::FlatSymbolRefAttr{}, VTable, /*vtable_index=*/0, VTableIndex); VFuncLoad = CGF.getBuilder().createAlignedLoad(loc, TyPtr, VTableSlotPtr, CGF.getPointerAlign()); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9899b7cfd9c2..df660b172e1f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1309,6 +1309,10 @@ LogicalResult cir::VTableAddrPointOp::verify() { if (getName() && getSymAddr()) return emitOpError("should use either a symbol or value, but not both"); + // If not a symbol, stick with the concrete type used for getSymAddr. + if (getSymAddr()) + return success(); + auto resultType = getAddr().getType(); auto fnTy = mlir::cir::FuncType::get( getContext(), {}, diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index feb168387795..50034ce5f9b9 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -5,6 +5,11 @@ typedef enum { RequestFailed = -2004, } enumy; +typedef struct { + const void* samples; + int cound; +} buffy; + class C1 { public: virtual ~C1(); @@ -26,6 +31,7 @@ class C1 { virtual ~Layer() {} }; + virtual enumy SetStuff(enumy e, buffy b); virtual enumy Initialize() = 0; }; @@ -47,6 +53,7 @@ class C2 : public C1 { const C2* m_C1; }; + virtual enumy SetStuff(enumy e, buffy b) override; virtual enumy Initialize() override; }; @@ -91,3 +98,27 @@ enumy C3::Initialize() { // CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %3 = cir.base_class_addr(%2 : cir.ptr ) -> cir.ptr // CHECK: %4 = cir.call @_ZN2C210InitializeEv(%3) : (!cir.ptr) -> !s32i + +void vcall(C1 &c1) { + buffy b; + enumy e; + c1.SetStuff(e, b); +} + +// CHECK: cir.func @_Z5vcallR2C1(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["c1", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_22struct2Ebuffy22, cir.ptr , ["b"] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !s32i, cir.ptr , ["e"] {alignment = 4 : i64} +// CHECK: %3 = cir.alloca !ty_22struct2Ebuffy22, cir.ptr , ["agg.tmp0"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %4 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %5 = cir.load %2 : cir.ptr , !s32i +// CHECK: cir.call @_ZN5buffyC2ERKS_(%3, %1) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %6 = cir.load %3 : cir.ptr , !ty_22struct2Ebuffy22 +// CHECK: %7 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr, !s32i, !ty_22struct2Ebuffy22)>>>> +// CHECK: %8 = cir.load %7 : cir.ptr , !s32i, !ty_22struct2Ebuffy22)>>>>, !cir.ptr, !s32i, !ty_22struct2Ebuffy22)>>> +// CHECK: %9 = cir.vtable.address_point( %8 : !cir.ptr, !s32i, !ty_22struct2Ebuffy22)>>>, vtable_index = 0, address_point_index = 2) : cir.ptr , !s32i, !ty_22struct2Ebuffy22)>>> +// CHECK: %10 = cir.load %9 : cir.ptr , !s32i, !ty_22struct2Ebuffy22)>>>, !cir.ptr, !s32i, !ty_22struct2Ebuffy22)>> +// CHECK: %11 = cir.call %10(%4, %5, %6) : (!cir.ptr, !s32i, !ty_22struct2Ebuffy22)>>, !cir.ptr, !s32i, !ty_22struct2Ebuffy22) -> !s32i +// CHECK: cir.return +// CHECK: } From 1eff9372615bcd0b1066f4ddb1a44fd8f2a77f52 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 13 Jun 2023 16:53:55 +0300 Subject: [PATCH 1001/2301] [CIR][CIRGen] Variadics: handle AST variations Different targets can generate different AST nodes for va_list. Follow the skeleton of LLVM codegen and generalize the approach. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 21 ++++++++++++++------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 20 +++++++++++--------- clang/test/CIR/CodeGen/variadics.c | 4 +++- 3 files changed, 28 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 3a764853b578..eba2e966a7cf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -353,14 +353,12 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // C stdarg builtins. case Builtin::BI__builtin_stdarg_start: case Builtin::BI__builtin_va_start: - case Builtin::BI__va_start: { - auto vaList = buildScalarExpr(E->getArg(0)); - builder.create(vaList.getLoc(), vaList); - return {}; - } + case Builtin::BI__va_start: case Builtin::BI__builtin_va_end: { - auto vaList = buildVAListRef(E->getArg(0)).getPointer(); - builder.create(vaList.getLoc(), vaList); + buildVAStartEnd(BuiltinID == Builtin::BI__va_start + ? buildScalarExpr(E->getArg(0)) + : buildVAListRef(E->getArg(0)).getPointer(), + BuiltinID != Builtin::BI__builtin_va_end); return {}; } case Builtin::BI__builtin_va_copy: { @@ -498,3 +496,12 @@ CIRGenFunction::buildTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, return buildTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue, getTarget().getTriple().getArch()); } + +void CIRGenFunction::buildVAStartEnd(mlir::Value ArgValue, bool IsStart) { + // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this + // early, defer to LLVM lowering. + if (IsStart) + builder.create(ArgValue.getLoc(), ArgValue); + else + builder.create(ArgValue.getLoc(), ArgValue); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index a6ed9aced881..59a6eda3588e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -807,22 +807,24 @@ class CIRGenFunction : public CIRGenTypeCache { // of the expression, depending on how va_list is defined. Address buildVAListRef(const Expr *E); - /// Emits a call to an LLVM variable-argument intrinsic, either - /// \c llvm.va_start or \c llvm.va_end. + /// Emits a CIR variable-argument operation, either + /// \c cir.va.start or \c cir.va.end. + /// /// \param ArgValue A reference to the \c va_list as emitted by either - /// \c EmitVAListRef or \c EmitMSVAListRef. - /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise, - /// calls \c llvm.va_end. - mlir::cir::CallOp buildVAStartEnd(mlir::Value ArgValue, bool IsStart); + /// \c buildVAListRef or \c buildMSVAListRef. + /// + /// \param IsStart If \c true, emits \c cir.va.start, otherwise \c cir.va.end. + void buildVAStartEnd(mlir::Value ArgValue, bool IsStart); /// Generate code to get an argument from the passed in pointer /// and update it accordingly. + /// /// \param VE The \c VAArgExpr for which to generate code. + /// /// \param VAListAddr Receives a reference to the \c va_list as emitted by - /// either \c EmitVAListRef or \c EmitMSVAListRef. + /// either \c buildVAListRef or \c buildMSVAListRef. + /// /// \returns SSA value with the argument. - // FIXME: We should be able to get rid of this method and use the va_arg - // instruction in LLVM instead once it works well enough. mlir::Value buildVAArg(VAArgExpr *VE, Address &VAListAddr); /// Given an expression that represents a value lvalue, this method emits the diff --git a/clang/test/CIR/CodeGen/variadics.c b/clang/test/CIR/CodeGen/variadics.c index 5d4fd447b201..894621b078d9 100644 --- a/clang/test/CIR/CodeGen/variadics.c +++ b/clang/test/CIR/CodeGen/variadics.c @@ -2,6 +2,8 @@ // RUN: FileCheck --input-file=%t.cir %s // RUN: %clang_cc1 -x c++ -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -std=c++20 -triple aarch64-none-linux-android24 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s typedef __builtin_va_list va_list; @@ -10,7 +12,7 @@ typedef __builtin_va_list va_list; #define va_arg(ap, type) __builtin_va_arg(ap, type) #define va_copy(dst, src) __builtin_va_copy(dst, src) -// CHECK: [[VALISTTYPE:!.+va_list_.+]] = !cir.struct<"struct.__va_list_tag" +// CHECK: [[VALISTTYPE:!.+va_list.*]] = !cir.struct<"struct{{.*}}__va_list int average(int count, ...) { // CHECK: cir.func @{{.*}}average{{.*}}(%arg0: !s32i loc({{.+}}), ...) -> !s32i From 92b8fa8cf44b0b51f029b53e45349462b2fcd8e0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 13 Jun 2023 16:55:31 +0300 Subject: [PATCH 1002/2301] [CIR][CIRGen] Locations: handle invalid functions slocs Found by inspection, some frontend synthetized functions might have invalid slocs, account for them. --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 11 ++++++++--- clang/test/CIR/CodeGen/union.cpp | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index b61f8ff4ee6b..ee527168431d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -486,9 +486,14 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, SymTableScopeTy varScope(symbolTable); { - auto FnBeginLoc = getLoc(FD->getBody()->getEndLoc()); - auto FnEndLoc = getLoc(FD->getBody()->getEndLoc()); - SourceLocRAIIObject fnLoc{*this, getLoc(Loc)}; + // Compiler synthetized functions might have invalid slocs... + auto bSrcLoc = FD->getBody()->getBeginLoc(); + auto eSrcLoc = FD->getBody()->getEndLoc(); + auto unknownLoc = builder.getUnknownLoc(); + + auto FnBeginLoc = bSrcLoc.isValid() ? getLoc(bSrcLoc) : unknownLoc; + auto FnEndLoc = eSrcLoc.isValid() ? getLoc(eSrcLoc) : unknownLoc; + SourceLocRAIIObject fnLoc{*this, Loc.isValid() ? getLoc(Loc) : unknownLoc}; assert(Fn.isDeclaration() && "Function already has body?"); mlir::Block *EntryBB = Fn.addEntryBlock(); diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index e4bd31cd38b4..24ec8c00253b 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -22,4 +22,4 @@ void m() { // CHECK: cir.func @_Z1mv() { // CHECK: cir.alloca !ty_22union2Eyolm22, cir.ptr , ["q"] {alignment = 4 : i64} // CHECK: cir.alloca !ty_22union2Eyolm222, cir.ptr , ["q2"] {alignment = 8 : i64} -// CHECK: cir.alloca !ty_22union2Eyolm322, cir.ptr , ["q3"] {alignment = 4 : i64} loc(#loc12) \ No newline at end of file +// CHECK: cir.alloca !ty_22union2Eyolm322, cir.ptr , ["q3"] {alignment = 4 : i64} \ No newline at end of file From 5790387ae67167de1a1f24c04f0f50e417cdc993 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Wed, 14 Jun 2023 09:23:23 -0700 Subject: [PATCH 1003/2301] [CIR][Lowering] Add CIRDialectLLVMIRTranslationInterface to handle CIR attributes to LLVM conversion. Summary: The interface is currently just a placeholder and doesn't do real work. Suppport for specific atttribure is on the way. --- .../CIR/Lowering/DirectToLLVM/CMakeLists.txt | 1 + .../DirectToLLVM/LowerAttrToLLVMIR.cpp | 55 +++++++++++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 + 3 files changed, 59 insertions(+) create mode 100644 clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index d44b35cd38c6..809877e09dc1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -6,6 +6,7 @@ set(LLVM_LINK_COMPONENTS get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIRLoweringDirectToLLVM + LowerAttrToLLVMIR.cpp LowerToLLVM.cpp DEPENDS diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp new file mode 100644 index 000000000000..785abb644f2f --- /dev/null +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp @@ -0,0 +1,55 @@ +//====- LowerAttrToLLVMIR.cpp - Lowering CIR attributes to LLVMIR ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements lowering of CIR attributes to LLVMIR. +// +//===----------------------------------------------------------------------===// + +#include "mlir/IR/DialectRegistry.h" +#include "mlir/Target/LLVMIR/LLVMTranslationInterface.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "llvm/ADT/ArrayRef.h" + +using namespace llvm; + +namespace cir { +namespace direct { + +/// Implementation of the dialect interface that converts CIR attributes to LLVM +/// IR metadata. +class CIRDialectLLVMIRTranslationInterface + : public mlir::LLVMTranslationDialectInterface { +public: + using LLVMTranslationDialectInterface::LLVMTranslationDialectInterface; + + /// Any named attribute in the CIR dialect, i.e, with name started with + /// "cir.", will be handled here. + virtual mlir::LogicalResult amendOperation( + mlir::Operation *op, llvm::ArrayRef instructions, + mlir::NamedAttribute attribute, + mlir::LLVM::ModuleTranslation &moduleTranslation) const override { + // TODO: Implement this + return mlir::success(); + } +}; + +void registerCIRDialectTranslation(mlir::DialectRegistry ®istry) { + registry.insert(); + registry.addExtension( + +[](mlir::MLIRContext *ctx, mlir::cir::CIRDialect *dialect) { + dialect->addInterfaces(); + }); +} + +void registerCIRDialectTranslation(mlir::MLIRContext &context) { + mlir::DialectRegistry registry; + registerCIRDialectTranslation(registry); + context.appendDialectRegistry(registry); +} +} // namespace direct +} // namespace cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b4a2cd47fb5f..50743ee1e2f0 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1218,6 +1218,8 @@ std::unique_ptr createConvertCIRToLLVMPass() { return std::make_unique(); } +extern void registerCIRDialectTranslation(mlir::MLIRContext &context); + std::unique_ptr lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, std::unique_ptr mlirCtx, @@ -1242,6 +1244,7 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, mlir::registerBuiltinDialectTranslation(*mlirCtx); mlir::registerLLVMDialectTranslation(*mlirCtx); + registerCIRDialectTranslation(*mlirCtx); auto ModuleName = theModule.getName(); auto llvmModule = mlir::translateModuleToLLVMIR( From b526723e3ca7a2ce7818a59f807aeed2ded114ec Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Wed, 14 Jun 2023 14:02:56 -0700 Subject: [PATCH 1004/2301] [CIR][Lowering] Set linkage type for LLVM functions. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 65 ++++++++++--------- clang/test/CIR/CodeGen/linkage.c | 19 ++++++ 2 files changed, 52 insertions(+), 32 deletions(-) create mode 100644 clang/test/CIR/CodeGen/linkage.c diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 50743ee1e2f0..5dec81069918 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -60,6 +60,34 @@ using namespace llvm; namespace cir { namespace direct { +mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { + using CIR = mlir::cir::GlobalLinkageKind; + using LLVM = mlir::LLVM::Linkage; + + switch (linkage) { + case CIR::AvailableExternallyLinkage: + return LLVM::AvailableExternally; + case CIR::CommonLinkage: + return LLVM::Common; + case CIR::ExternalLinkage: + return LLVM::External; + case CIR::ExternalWeakLinkage: + return LLVM::ExternWeak; + case CIR::InternalLinkage: + return LLVM::Internal; + case CIR::LinkOnceAnyLinkage: + return LLVM::Linkonce; + case CIR::LinkOnceODRLinkage: + return LLVM::LinkonceODR; + case CIR::PrivateLinkage: + return LLVM::Private; + case CIR::WeakAnyLinkage: + return LLVM::Weak; + case CIR::WeakODRLinkage: + return LLVM::WeakODR; + }; +} + class CIRPtrStrideOpLowering : public mlir::OpConversionPattern { public: @@ -284,12 +312,12 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); auto kind = mlir::LLVM::FCmpPredicate::une; - + // Check if float is not equal to zero. auto zeroFloat = rewriter.create( castOp.getLoc(), llvmSrcVal.getType(), mlir::FloatAttr::get(llvmSrcVal.getType(), 0.0)); - + // Extend comparison result to either bool (C++) or int (C). mlir::Value cmpResult = rewriter.create( castOp.getLoc(), kind, llvmSrcVal, zeroFloat); @@ -672,8 +700,9 @@ class CIRFuncLowering : public mlir::OpConversionPattern { Loc = FusedLoc.getLocations()[0]; } assert(Loc.isa() && "expected single location here"); - auto fn = - rewriter.create(Loc, op.getName(), llvmFnTy); + auto linkage = convertLinkage(op.getLinkage()); + auto fn = rewriter.create(Loc, op.getName(), + llvmFnTy, linkage); rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, @@ -719,34 +748,6 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, return std::nullopt; } -mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { - using CIR = mlir::cir::GlobalLinkageKind; - using LLVM = mlir::LLVM::Linkage; - - switch (linkage) { - case CIR::AvailableExternallyLinkage: - return LLVM::AvailableExternally; - case CIR::CommonLinkage: - return LLVM::Common; - case CIR::ExternalLinkage: - return LLVM::External; - case CIR::ExternalWeakLinkage: - return LLVM::ExternWeak; - case CIR::InternalLinkage: - return LLVM::Internal; - case CIR::LinkOnceAnyLinkage: - return LLVM::Linkonce; - case CIR::LinkOnceODRLinkage: - return LLVM::LinkonceODR; - case CIR::PrivateLinkage: - return LLVM::Private; - case CIR::WeakAnyLinkage: - return LLVM::Weak; - case CIR::WeakODRLinkage: - return LLVM::WeakODR; - }; -} - class CIRGetGlobalOpLowering : public mlir::OpConversionPattern { public: diff --git a/clang/test/CIR/CodeGen/linkage.c b/clang/test/CIR/CodeGen/linkage.c new file mode 100644 index 000000000000..ac5f31fd7564 --- /dev/null +++ b/clang/test/CIR/CodeGen/linkage.c @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + + +static int bar(int i) { + return i; +} + +int foo() { + return bar(5); +} + +// CIR: cir.func internal private @bar( +// CIR: cir.func @foo( + +// LLVM: define internal i32 @bar( +// LLVM: define i32 @foo( From 7239c5936b810202e8a137f7935bc8b16ef9a8df Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Thu, 15 Jun 2023 15:30:02 -0700 Subject: [PATCH 1005/2301] [CIR] Share CIR pass pipeline for all emission modes. Summary: Previously CIR pipeline is only avaliable for EmitCIR mode. I'm moving it out and sharing it with all other emission modes. --- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 68 +++++++++---------- clang/test/CIR/CodeGen/basic.c | 62 ++++++++++------- 2 files changed, 70 insertions(+), 60 deletions(-) diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 5e1b16c151c6..a97ef0d36722 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -190,43 +190,43 @@ class CIRGenConsumer : public clang::ASTConsumer { } }; + if (!feOptions.ClangIRDisablePasses) { + // Handle source manager properly given that lifetime analysis + // might emit warnings and remarks. + auto &clangSourceMgr = C.getSourceManager(); + FileID MainFileID = clangSourceMgr.getMainFileID(); + + std::unique_ptr FileBuf = + llvm::MemoryBuffer::getMemBuffer( + clangSourceMgr.getBufferOrFake(MainFileID)); + + llvm::SourceMgr mlirSourceMgr; + mlirSourceMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); + + if (feOptions.ClangIRVerifyDiags) { + mlir::SourceMgrDiagnosticVerifierHandler sourceMgrHandler( + mlirSourceMgr, mlirCtx.get()); + mlirCtx->printOpOnDiagnostic(false); + setupCIRPipelineAndExecute(); + + // Verify the diagnostic handler to make sure that each of the + // diagnostics matched. + if (sourceMgrHandler.verify().failed()) { + // FIXME: we fail ungracefully, there's probably a better way + // to communicate non-zero return so tests can actually fail. + llvm::sys::RunInterruptHandlers(); + exit(1); + } + } else { + mlir::SourceMgrDiagnosticHandler sourceMgrHandler(mlirSourceMgr, + mlirCtx.get()); + setupCIRPipelineAndExecute(); + } + } + switch (action) { case CIRGenAction::OutputType::EmitCIR: if (outputStream && mlirMod) { - if (!feOptions.ClangIRDisablePasses) { - // Handle source manager properly given that lifetime analysis - // might emit warnings and remarks. - auto &clangSourceMgr = C.getSourceManager(); - FileID MainFileID = clangSourceMgr.getMainFileID(); - - std::unique_ptr FileBuf = - llvm::MemoryBuffer::getMemBuffer( - clangSourceMgr.getBufferOrFake(MainFileID)); - - llvm::SourceMgr mlirSourceMgr; - mlirSourceMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); - - if (feOptions.ClangIRVerifyDiags) { - mlir::SourceMgrDiagnosticVerifierHandler sourceMgrHandler( - mlirSourceMgr, mlirCtx.get()); - mlirCtx->printOpOnDiagnostic(false); - setupCIRPipelineAndExecute(); - - // Verify the diagnostic handler to make sure that each of the - // diagnostics matched. - if (sourceMgrHandler.verify().failed()) { - // FIXME: we fail ungracefully, there's probably a better way - // to communicate non-zero return so tests can actually fail. - llvm::sys::RunInterruptHandlers(); - exit(1); - } - } else { - mlir::SourceMgrDiagnosticHandler sourceMgrHandler(mlirSourceMgr, - mlirCtx.get()); - setupCIRPipelineAndExecute(); - } - } - // Emit remaining defaulted C++ methods if (!feOptions.ClangIRDisableEmitCXXDefault) gen->buildDefaultMethods(); diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 941563e9cc73..2e99d6e9731f 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM int foo(int i); @@ -8,37 +10,45 @@ int foo(int i) { return i; } -// CHECK: module @"{{.*}}basic.c" attributes { -// CHECK-NEXT: cir.func @foo(%arg0: !s32i loc({{.*}})) -> !s32i { -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i -// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !s32i -// CHECK-NEXT: cir.store %3, %1 : !s32i, cir.ptr -// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , !s32i -// CHECK-NEXT: cir.return %4 : !s32i +// CIR: module @"{{.*}}basic.c" attributes { +// CIR-NEXT: cir.func @foo(%arg0: !s32i loc({{.*}})) -> !s32i { +// CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CIR-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CIR-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr +// CIR-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CIR-NEXT: %3 = cir.load %0 : cir.ptr , !s32i +// CIR-NEXT: cir.store %3, %1 : !s32i, cir.ptr +// CIR-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CIR-NEXT: cir.return %4 : !s32i int f2() { return 3; } -// CHECK: cir.func @f2() -> !s32i { -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i -// CHECK-NEXT: cir.return %2 : !s32i +// CIR: cir.func @f2() -> !s32i { +// CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CIR-NEXT: %1 = cir.const(#cir.int<3> : !s32i) : !s32i +// CIR-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CIR-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CIR-NEXT: cir.return %2 : !s32i + +// LLVM: define i32 @f2() +// LLVM-NEXT: %1 = alloca i32, i64 1, align 4 +// LLVM-NEXT: store i32 3, ptr %1, align 4 +// LLVM-NEXT: %2 = load i32, ptr %1, align 4 +// LLVM-NEXT: ret i32 %2 + + int f3() { int i = 3; return i; } -// CHECK: cir.func @f3() -> !s32i { -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK-NEXT: cir.store %2, %1 : !s32i, cir.ptr -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i -// CHECK-NEXT: cir.store %3, %0 : !s32i, cir.ptr -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !s32i -// CHECK-NEXT: cir.return %4 : !s32i +// CIR: cir.func @f3() -> !s32i { +// CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CIR-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CIR-NEXT: %2 = cir.const(#cir.int<3> : !s32i) : !s32i +// CIR-NEXT: cir.store %2, %1 : !s32i, cir.ptr +// CIR-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CIR-NEXT: cir.store %3, %0 : !s32i, cir.ptr +// CIR-NEXT: %4 = cir.load %0 : cir.ptr , !s32i +// CIR-NEXT: cir.return %4 : !s32i From 2bd0097a3343a0b8835c48ab9888a5e7270833f4 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Tue, 20 Jun 2023 09:42:47 -0700 Subject: [PATCH 1006/2301] [CIR] Enable per-pass IR printing Summary: Adding support to print IR after each pass. Example usage: -mmlir --mlir-print-ir-after-all // -----// IR Dump After MergeCleanups (cir-merge-cleanups) //----- // !s32i = !cir.int module @"/home/hoy/src/clangir/clang/test/CIR/CodeGen/mlirargs.c" attributes {cir.sob = #cir.signed_overflow_behavior, dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<4xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry, dense<32> : vector<4xi32>>, #dlti.dl_entry, dense<32> : vector<4xi32>>, #dlti.dl_entry, dense<64> : vector<4xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry<"dlti.stack_alignment", 128 : i32>, #dlti.dl_entry<"dlti.endianness", "little">>, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"} { cir.func @f3() -> !s32i attributes {ast = #cir.fndecl.ast} { %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} %1 = cir.alloca !s32i, cir.ptr , ["i", init] ast #cir.vardecl.ast {alignment = 4 : i64} %2 = cir.const(#cir.int<3> : !s32i) : !s32i cir.store %2, %1 : !s32i, cir.ptr %3 = cir.load %1 : cir.ptr , !s32i cir.store %3, %0 : !s32i, cir.ptr %4 = cir.load %0 : cir.ptr , !s32i cir.return %4 : !s32i } } // -----// IR Dump After DropAST (cir-drop-ast) //----- // !s32i = !cir.int module @"/home/hoy/src/clangir/clang/test/CIR/CodeGen/mlirargs.c" attributes {cir.sob = #cir.signed_overflow_behavior, dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<4xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry, dense<32> : vector<4xi32>>, #dlti.dl_entry, dense<32> : vector<4xi32>>, #dlti.dl_entry, dense<64> : vector<4xi32>>, #dlti.dl_entry : vector<2xi32>>, #dlti.dl_entry<"dlti.stack_alignment", 128 : i32>, #dlti.dl_entry<"dlti.endianness", "little">>, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"} { cir.func @f3() -> !s32i { %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} %2 = cir.const(#cir.int<3> : !s32i) : !s32i cir.store %2, %1 : !s32i, cir.ptr %3 = cir.load %1 : cir.ptr , !s32i cir.store %3, %0 : !s32i, cir.ptr %4 = cir.load %0 : cir.ptr , !s32i cir.return %4 : !s32i } } --- clang/lib/CIR/CodeGen/CIRPasses.cpp | 2 +- clang/lib/FrontendTool/CMakeLists.txt | 2 ++ .../lib/FrontendTool/ExecuteCompilerInvocation.cpp | 2 ++ clang/test/CIR/CodeGen/mlirprint.c | 13 +++++++++++++ 4 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/mlirprint.c diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 31a3e61c8020..df65bfaf3a6b 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -49,7 +49,7 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, // need to run this right before dialect emission. pm.addPass(mlir::createDropASTPass()); pm.enableVerifier(enableVerifier); - + (void)mlir::applyPassManagerCLOptions(pm); return pm.run(theModule); } } // namespace cir diff --git a/clang/lib/FrontendTool/CMakeLists.txt b/clang/lib/FrontendTool/CMakeLists.txt index 7aeaba7f31b6..e475f59eb7cf 100644 --- a/clang/lib/FrontendTool/CMakeLists.txt +++ b/clang/lib/FrontendTool/CMakeLists.txt @@ -16,8 +16,10 @@ if(CLANG_ENABLE_CIR) list(APPEND link_libs clangCIRFrontendAction MLIRIR + MLIRPass ) include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) + include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) endif() if(CLANG_ENABLE_ARCMT) diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index cf8fae2a4ca8..5ae02ca4d28f 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -34,6 +34,7 @@ #if CLANG_ENABLE_CIR #include "mlir/IR/MLIRContext.h" +#include "mlir/Pass/PassManager.h" #include "clang/CIRFrontendAction/CIRGenAction.h" #endif @@ -322,6 +323,7 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) { #if CLANG_ENABLE_CIR if (!Clang->getFrontendOpts().MLIRArgs.empty()) { mlir::registerMLIRContextCLOptions(); + mlir::registerPassManagerCLOptions(); unsigned NumArgs = Clang->getFrontendOpts().MLIRArgs.size(); auto Args = std::make_unique(NumArgs + 2); Args[0] = "clang (MLIR option parsing)"; diff --git a/clang/test/CIR/CodeGen/mlirprint.c b/clang/test/CIR/CodeGen/mlirprint.c new file mode 100644 index 000000000000..09c4f9187ec3 --- /dev/null +++ b/clang/test/CIR/CodeGen/mlirprint.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s +// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.ll 2>&1 | FileCheck %s + +int foo() { + int i = 3; + return i; +} + + +// CHECK: IR Dump After MergeCleanups (cir-merge-cleanups) +// cir.func @foo() -> !s32i +// CHECK: IR Dump After DropAST (cir-drop-ast) +// cir.func @foo() -> !s32i From 585329a60a2b1ddfda3df2f83249bccde6a3f475 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 23 Jun 2023 18:01:29 -0700 Subject: [PATCH 1007/2301] [CIR][CIRGen][NFC] Add more boilerplate to Handle pass_object_size Still crashes, this adds initial part of the logic, testcase coming on passing test. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 49 ++++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 75dc761b167d..0b6488b9ce3a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -765,8 +765,36 @@ static CanQual GetFormalType(const CXXMethodDecl *MD) { .getAs(); } +/// TODO(cir): this should be shared with LLVM codegen +static void addExtParameterInfosForCall( + llvm::SmallVectorImpl ¶mInfos, + const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs) { + assert(proto->hasExtParameterInfos()); + assert(paramInfos.size() <= prefixArgs); + assert(proto->getNumParams() + prefixArgs <= totalArgs); + + paramInfos.reserve(totalArgs); + + // Add default infos for any prefix args that don't already have infos. + paramInfos.resize(prefixArgs); + + // Add infos for the prototype. + for (const auto &ParamInfo : proto->getExtParameterInfos()) { + paramInfos.push_back(ParamInfo); + // pass_object_size params have no parameter info. + if (ParamInfo.hasPassObjectSize()) + paramInfos.emplace_back(); + } + + assert(paramInfos.size() <= totalArgs && + "Did we forget to insert pass_object_size args?"); + // Add default infos for the variadic and/or suffix arguments. + paramInfos.resize(totalArgs); +} + /// Adds the formal parameters in FPT to the given prefix. If any parameter in /// FPT has pass_object_size_attrs, then we'll add parameters for those, too. +/// TODO(cir): this should be shared with LLVM codegen static void appendParameterTypes( const CIRGenTypes &CGT, SmallVectorImpl &prefix, SmallVectorImpl ¶mInfos, @@ -779,7 +807,22 @@ static void appendParameterTypes( return; } - assert(false && "params NYI"); + unsigned PrefixSize = prefix.size(); + // In the vast majority of cases, we'll have precisely FPT->getNumParams() + // parameters; the only thing that can change this is the presence of + // pass_object_size. So, we preallocate for the common case. + prefix.reserve(prefix.size() + FPT->getNumParams()); + + auto ExtInfos = FPT->getExtParameterInfos(); + assert(ExtInfos.size() == FPT->getNumParams()); + for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { + prefix.push_back(FPT->getParamType(I)); + if (ExtInfos[I].hasPassObjectSize()) + prefix.push_back(CGT.getContext().getSizeType()); + } + + addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, + prefix.size()); } const CIRGenFunctionInfo & @@ -999,7 +1042,9 @@ arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, if (proto->isVariadic()) required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); - assert(!proto->hasExtParameterInfos() && "extparameterinfos NYI"); + if (proto->hasExtParameterInfos()) + addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, + args.size()); } else { assert(!llvm::isa(fnType) && "FunctionNoProtoType NYI"); From 25c498e1cd85c23b16e0ba75c1e4236100544fda Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 23 Jun 2023 18:04:30 -0700 Subject: [PATCH 1008/2301] [CIR] Add missing test from improvement to indirect call parsing --- clang/test/CIR/IR/call.cir | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 clang/test/CIR/IR/call.cir diff --git a/clang/test/CIR/IR/call.cir b/clang/test/CIR/IR/call.cir new file mode 100644 index 000000000000..857614f33a61 --- /dev/null +++ b/clang/test/CIR/IR/call.cir @@ -0,0 +1,13 @@ +// RUN: cir-tool %s | FileCheck %s + +!s32i = !cir.int +!fnptr = !cir.ptr)>> + +module { + cir.func @ind(%fnptr: !fnptr, %a : !s32i) { + %r = cir.call %fnptr(%a) : (!fnptr, !s32i) -> !s32i + cir.return + } +} + +// CHECK: %0 = cir.call %arg0(%arg1) : (!cir.ptr)>>, !s32i) -> !s32i \ No newline at end of file From 613199291213854135fc56e71f0274cd5576b8bd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 23 Jun 2023 19:24:11 -0700 Subject: [PATCH 1009/2301] [CIR][CIRGen][NFC] Add more constant int generation helpers --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 39 ++++++++++++++++++------- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 4 +-- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 3555b8cfbb44..3438b5cfff70 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -148,15 +148,30 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Type helpers // ------------ // - mlir::Type getSInt8Ty() { return typeCache.SInt8Ty; } - mlir::Type getSInt16Ty() { return typeCache.SInt16Ty; } - mlir::Type getSInt32Ty() { return typeCache.SInt32Ty; } - mlir::Type getSInt64Ty() { return typeCache.SInt64Ty; } + mlir::cir::IntType getUIntNTy(int N) { + switch (N) { + case 8: + return getUInt8Ty(); + case 16: + return getUInt16Ty(); + case 32: + return getUInt32Ty(); + case 64: + return getUInt64Ty(); + default: + llvm_unreachable("Unknown bit-width"); + } + } - mlir::Type getUInt8Ty() { return typeCache.UInt8Ty; } - mlir::Type getUInt16Ty() { return typeCache.UInt16Ty; } - mlir::Type getUInt32Ty() { return typeCache.UInt32Ty; } - mlir::Type getUInt64Ty() { return typeCache.UInt64Ty; } + mlir::cir::IntType getSInt8Ty() { return typeCache.SInt8Ty; } + mlir::cir::IntType getSInt16Ty() { return typeCache.SInt16Ty; } + mlir::cir::IntType getSInt32Ty() { return typeCache.SInt32Ty; } + mlir::cir::IntType getSInt64Ty() { return typeCache.SInt64Ty; } + + mlir::cir::IntType getUInt8Ty() { return typeCache.UInt8Ty; } + mlir::cir::IntType getUInt16Ty() { return typeCache.UInt16Ty; } + mlir::cir::IntType getUInt32Ty() { return typeCache.UInt32Ty; } + mlir::cir::IntType getUInt64Ty() { return typeCache.UInt64Ty; } bool isInt8Ty(mlir::Type i) { return i == typeCache.UInt8Ty || i == typeCache.SInt8Ty; @@ -211,16 +226,20 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return create(loc, uInt32Ty, mlir::cir::IntAttr::get(uInt32Ty, C)); } - mlir::cir::ConstantOp getSInt64(uint32_t C, mlir::Location loc) { + mlir::cir::ConstantOp getSInt64(uint64_t C, mlir::Location loc) { auto sInt64Ty = getSInt64Ty(); return create(loc, sInt64Ty, mlir::cir::IntAttr::get(sInt64Ty, C)); } - mlir::cir::ConstantOp getUInt64(uint32_t C, mlir::Location loc) { + mlir::cir::ConstantOp getUInt64(uint64_t C, mlir::Location loc) { auto uInt64Ty = getUInt64Ty(); return create(loc, uInt64Ty, mlir::cir::IntAttr::get(uInt64Ty, C)); } + mlir::cir::ConstantOp getConstInt(mlir::Location loc, mlir::cir::IntType t, + uint64_t C) { + return create(loc, t, mlir::cir::IntAttr::get(t, C)); + } mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { return create(loc, getBoolTy(), getCIRBoolAttr(state)); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index dce86ec80255..eb522604eecd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -29,9 +29,9 @@ struct CIRGenTypeCache { /// void mlir::Type VoidTy; // char, int, short, long - mlir::Type SInt8Ty, SInt16Ty, SInt32Ty, SInt64Ty; + mlir::cir::IntType SInt8Ty, SInt16Ty, SInt32Ty, SInt64Ty; // usigned char, unsigned, unsigned short, unsigned long - mlir::Type UInt8Ty, UInt16Ty, UInt32Ty, UInt64Ty; + mlir::cir::IntType UInt8Ty, UInt16Ty, UInt32Ty, UInt64Ty; /// half, bfloat, float, double // mlir::Type HalfTy, BFloatTy; mlir::Type FloatTy, DoubleTy; From 0eaebd5da6934119c1b2b548f00577b6e806f5b2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 23 Jun 2023 20:03:30 -0700 Subject: [PATCH 1010/2301] [CIR][CIRGen] Add target sanity checks and cleanup buildCall a bit --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 58 +++++++------ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 81 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 + .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 4 files changed, 117 insertions(+), 26 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 0b6488b9ce3a..dcff58d3d696 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -16,7 +16,9 @@ #include "CIRGenFunction.h" #include "CIRGenFunctionInfo.h" #include "CIRGenTypes.h" +#include "TargetInfo.h" +#include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/GlobalDecl.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" @@ -323,25 +325,27 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, mlir::cir::FuncType CIRFuncTy = getTypes().GetFunctionType(CallInfo); const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); - // This is not always tied to a FunctionDecl (e.g. builtins that are xformed // into calls to other functions) - const FunctionDecl *FD = dyn_cast_or_null(TargetDecl); - - // We can only guarantee that a function is called from the correct - // context/function based on the appropriate target attributes, so only check - // in hte case where we have both always_inline and target since otherwise we - // could be making a conditional call after a check for the proper cpu - // features (and it won't cause code generation issues due to function based - // code generation). - assert((!TargetDecl || !TargetDecl->hasAttr()) && "NYI"); - assert((!TargetDecl || !TargetDecl->hasAttr()) && "NYI"); - - // Some architectures (such as x86-64) have the ABI changed based on - // attribute-target/features. Give them a chance to diagnose. - // TODO: support this eventually, just assume the trivial result for now - // !CGM.getTargetCIRGenInfo().checkFunctionCallABI( - // CGM, Loc, dyn_cast_or_null(CurCodeDecl), FD, CallArgs); + if (const FunctionDecl *FD = dyn_cast_or_null(TargetDecl)) { + // We can only guarantee that a function is called from the correct + // context/function based on the appropriate target attributes, + // so only check in the case where we have both always_inline and target + // since otherwise we could be making a conditional call after a check for + // the proper cpu features (and it won't cause code generation issues due to + // function based code generation). + if (TargetDecl->hasAttr() && + (TargetDecl->hasAttr() || + (CurFuncDecl && CurFuncDecl->hasAttr()))) { + // FIXME(cir): somehow refactor this function to use SourceLocation? + SourceLocation Loc; + checkTargetFeatures(Loc, FD); + } + + // Some architectures (such as x86-64) have the ABI changed based on + // attribute-target/features. Give them a chance to diagnose. + assert(!UnimplementedFeature::checkFunctionCallABI()); + } // TODO: add DNEBUG code @@ -482,20 +486,22 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // TODO: Update the largest vector width if any arguments have vector types. // TODO: Compute the calling convention and attributes. - assert((!FD || !FD->hasAttr()) && "NYI"); + if (const FunctionDecl *FD = dyn_cast_or_null(CurFuncDecl)) { + assert(!FD->hasAttr() && "NYI"); - // TODO: InNoMergeAttributedStmt - // assert(!CurCodeDecl->hasAttr() && - // !TargetDecl->hasAttr() && "NYI"); + // TODO: InNoMergeAttributedStmt + // assert(!CurCodeDecl->hasAttr() && + // !TargetDecl->hasAttr() && "NYI"); - // TODO: isSEHTryScope + // TODO: isSEHTryScope - // TODO: currentFunctionUsesSEHTry - // TODO: isCleanupPadScope + // TODO: currentFunctionUsesSEHTry + // TODO: isCleanupPadScope - // TODO: UnusedReturnSizePtr + // TODO: UnusedReturnSizePtr - assert((!FD || !FD->hasAttr()) && "NYI"); + assert(!FD->hasAttr() && "NYI"); + } // TODO: alignment attributes diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index ee527168431d..8fafb377e1dd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -16,9 +16,11 @@ #include "clang/AST/ASTLambda.h" #include "clang/AST/ExprObjC.h" +#include "clang/Basic/Builtins.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/FPEnv.h" +#include "clang/Frontend/FrontendDiagnostic.h" #include "mlir/Dialect/Func/IR/FuncOps.h" @@ -1266,3 +1268,82 @@ Address CIRGenFunction::buildVAListRef(const Expr* E) { return buildPointerWithAlignment(E); return buildLValue(E).getAddress(); } + +// Emits an error if we don't have a valid set of target features for the +// called function. +void CIRGenFunction::checkTargetFeatures(const CallExpr *E, + const FunctionDecl *TargetDecl) { + return checkTargetFeatures(E->getBeginLoc(), TargetDecl); +} + +// Emits an error if we don't have a valid set of target features for the +// called function. +void CIRGenFunction::checkTargetFeatures(SourceLocation Loc, + const FunctionDecl *TargetDecl) { + // Early exit if this is an indirect call. + if (!TargetDecl) + return; + + // Get the current enclosing function if it exists. If it doesn't + // we can't check the target features anyhow. + const FunctionDecl *FD = dyn_cast_or_null(CurCodeDecl); + if (!FD) + return; + + // Grab the required features for the call. For a builtin this is listed in + // the td file with the default cpu, for an always_inline function this is any + // listed cpu and any listed features. + unsigned BuiltinID = TargetDecl->getBuiltinID(); + std::string MissingFeature; + llvm::StringMap CallerFeatureMap; + CGM.getASTContext().getFunctionFeatureMap(CallerFeatureMap, FD); + if (BuiltinID) { + StringRef FeatureList( + getContext().BuiltinInfo.getRequiredFeatures(BuiltinID)); + if (!Builtin::evaluateRequiredTargetFeatures(FeatureList, + CallerFeatureMap)) { + CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature) + << TargetDecl->getDeclName() << FeatureList; + } + } else if (!TargetDecl->isMultiVersion() && + TargetDecl->hasAttr()) { + // Get the required features for the callee. + + const TargetAttr *TD = TargetDecl->getAttr(); + ParsedTargetAttr ParsedAttr = getContext().filterFunctionTargetAttrs(TD); + + SmallVector ReqFeatures; + llvm::StringMap CalleeFeatureMap; + getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); + + for (const auto &F : ParsedAttr.Features) { + if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1))) + ReqFeatures.push_back(StringRef(F).substr(1)); + } + + for (const auto &F : CalleeFeatureMap) { + // Only positive features are "required". + if (F.getValue()) + ReqFeatures.push_back(F.getKey()); + } + if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) { + if (!CallerFeatureMap.lookup(Feature)) { + MissingFeature = Feature.str(); + return false; + } + return true; + })) + CGM.getDiags().Report(Loc, diag::err_function_needs_feature) + << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature; + } else if (!FD->isMultiVersion() && FD->hasAttr()) { + llvm::StringMap CalleeFeatureMap; + getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); + + for (const auto &F : CalleeFeatureMap) { + if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) || + !CallerFeatureMap.find(F.getKey())->getValue())) + CGM.getDiags().Report(Loc, diag::err_function_needs_feature) + << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey(); + } + } +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 59a6eda3588e..adfa7738d561 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -861,6 +861,9 @@ class CIRGenFunction : public CIRGenTypeCache { AbstractCallee AC = AbstractCallee(), unsigned ParamsToSkip = 0, EvaluationOrder Order = EvaluationOrder::Default); + void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl); + void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl); + /// Generate a call of the given function, expecting the given /// result type, and using the given argument list which specifies both the /// LLVM arguments and the types they were derived from. diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index c20e35a5744f..46fdde1e17a5 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -105,6 +105,7 @@ struct UnimplementedFeature { static bool requiresCleanups() { return false; } static bool constantFoldsToSimpleInteger() { return false; } static bool alignedLoad() { return false; } + static bool checkFunctionCallABI() { return false; } }; } // namespace cir From 62d4ce11aa7c0b581a05cef88c759187410a3212 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 23 Jun 2023 19:24:41 -0700 Subject: [PATCH 1011/2301] [CIR][CIRGen][NFC] Pave more logic for implicit object size emission Still incomplete and assert on the relevant path. We still need to codegen for `__builtin_object_size`. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 26 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 32 ++++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 6 ++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 20 ++++++++++++--- 4 files changed, 78 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index eba2e966a7cf..7bf02d109889 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -504,4 +504,30 @@ void CIRGenFunction::buildVAStartEnd(mlir::Value ArgValue, bool IsStart) { builder.create(ArgValue.getLoc(), ArgValue); else builder.create(ArgValue.getLoc(), ArgValue); +} + +/// Returns a Value corresponding to the size of the given expression. +/// This Value may be either of the following: +/// +/// - In LLVM: a llvm::Argument (if E is a param with the pass_object_size +/// attribute on it), CIR: TBD +/// - A call to a `cir.object_size`. +/// +/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null +/// and we wouldn't otherwise try to reference a pass_object_size parameter, +/// we'll call `cir.object_size` on EmittedE, rather than emitting E. +mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, + mlir::cir::IntType ResType, + mlir::Value EmittedE, + bool IsDynamic) { + llvm_unreachable("NYI"); +} + +mlir::Value CIRGenFunction::evaluateOrEmitBuiltinObjectSize( + const Expr *E, unsigned Type, mlir::cir::IntType ResType, + mlir::Value EmittedE, bool IsDynamic) { + uint64_t ObjectSize; + if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) + return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic); + return builder.getConstInt(getLoc(E->getSourceRange()), ResType, ObjectSize); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index dcff58d3d696..c563180029fb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -736,6 +736,27 @@ void CIRGenFunction::buildCallArgs( "MSABI NYI"); assert(!hasInAllocaArgs(CGM, ExplicitCC, ArgTypes) && "NYI"); + auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, + RValue EmittedArg) { + if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) + return; + auto *PS = AC.getParamDecl(I)->getAttr(); + if (PS == nullptr) + return; + + const auto &Context = getContext(); + auto SizeTy = Context.getSizeType(); + auto T = builder.getUIntNTy(Context.getTypeSize(SizeTy)); + assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); + auto V = evaluateOrEmitBuiltinObjectSize( + Arg, PS->getType(), T, EmittedArg.getScalarVal(), PS->isDynamic()); + Args.add(RValue::get(V), SizeTy); + // If we're emitting args in reverse, be sure to do so with + // pass_object_size, as well. + if (!LeftToRight) + std::swap(Args.back(), *(&Args.back() - 1)); + }; + // Evaluate each argument in the appropriate order. size_t CallArgsStart = Args.size(); for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { @@ -753,8 +774,15 @@ void CIRGenFunction::buildCallArgs( (void)InitialArgSize; // Since pointer argument are never emitted as LValue, it is safe to emit // non-null argument check for r-value only. - assert(!SanOpts.has(SanitizerKind::NonnullAttribute) && "Sanitizers NYI"); - assert(!SanOpts.has(SanitizerKind::NullabilityArg) && "Sanitizers NYI"); + if (!Args.back().hasLValue()) { + RValue RVArg = Args.back().getKnownRValue(); + assert(!SanOpts.has(SanitizerKind::NonnullAttribute) && "Sanitizers NYI"); + assert(!SanOpts.has(SanitizerKind::NullabilityArg) && "Sanitizers NYI"); + // @llvm.objectsize should never have side-effects and shouldn't need + // destruction/cleanups, so we can safely "emit" it after its arg, + // regardless of right-to-leftness + MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); + } } if (!LeftToRight) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 8fafb377e1dd..d8be6cd69cfa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1131,7 +1131,11 @@ clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl GD, if (!Param->hasAttr()) continue; - llvm_unreachable("PassObjectSizeAttr NYI"); + auto *Implicit = ImplicitParamDecl::Create( + getContext(), Param->getDeclContext(), Param->getLocation(), + /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other); + SizeArguments[Param] = Implicit; + Args.push_back(Implicit); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index adfa7738d561..41b975cd7be3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -803,6 +803,12 @@ class CIRGenFunction : public CIRGenTypeCache { RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation Loc); + /// If a ParmVarDecl had the pass_object_size attribute, this + /// will contain a mapping from said ParmVarDecl to its implicit "object_size" + /// parameter. + llvm::SmallDenseMap + SizeArguments; + // Build a "reference" to a va_list; this is either the address or the value // of the expression, depending on how va_list is defined. Address buildVAListRef(const Expr *E); @@ -827,9 +833,17 @@ class CIRGenFunction : public CIRGenTypeCache { /// \returns SSA value with the argument. mlir::Value buildVAArg(VAArgExpr *VE, Address &VAListAddr); - /// Given an expression that represents a value lvalue, this method emits the - /// address of the lvalue, then loads the result as an rvalue, returning the - /// rvalue. + mlir::Value emitBuiltinObjectSize(const Expr *E, unsigned Type, + mlir::cir::IntType ResType, + mlir::Value EmittedE, bool IsDynamic); + mlir::Value evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, + mlir::cir::IntType ResType, + mlir::Value EmittedE, + bool IsDynamic); + + /// Given an expression that represents a value lvalue, this method emits + /// the address of the lvalue, then loads the result as an rvalue, + /// returning the rvalue. RValue buildLoadOfLValue(LValue LV, SourceLocation Loc); mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, clang::SourceLocation Loc, From bdc4a38432d93726fc17261395402a075fc7e23f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 23 Jun 2023 20:35:28 -0700 Subject: [PATCH 1012/2301] [CIR][CIRGen] Add machinery for emitting builtin lib functions --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 77 ++++++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 5 ++ clang/test/CIR/CodeGen/libcall.cpp | 20 +++++++ 3 files changed, 100 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/libcall.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 7bf02d109889..22f79e230682 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -340,7 +340,6 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, switch (BuiltinIDIfNoAsmLabel) { default: - llvm_unreachable("NYI"); break; case Builtin::BIprintf: @@ -416,7 +415,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // the call using the normal call path, but using the unmangled // version of the function name. if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) - llvm_unreachable("NYI"); + return buildLibraryCall(*this, FD, E, + CGM.getBuiltinLibFunction(FD, BuiltinID)); // If this is a predefined lib function (e.g. malloc), emit the call // using exactly the normal call path. @@ -530,4 +530,77 @@ mlir::Value CIRGenFunction::evaluateOrEmitBuiltinObjectSize( if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic); return builder.getConstInt(getLoc(E->getSourceRange()), ResType, ObjectSize); +} + +/// Given a builtin id for a function like "__builtin_fabsf", return a Function* +/// for "fabsf". +mlir::cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *FD, + unsigned BuiltinID) { + assert(astCtx.BuiltinInfo.isLibFunction(BuiltinID)); + + // Get the name, skip over the __builtin_ prefix (if necessary). + StringRef Name; + GlobalDecl D(FD); + + // TODO: This list should be expanded or refactored after all GCC-compatible + // std libcall builtins are implemented. + static SmallDenseMap F128Builtins{ + {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"}, + {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"}, + {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"}, + {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"}, + {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"}, + {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"}, + {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"}, + {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"}, + {Builtin::BI__builtin_fprintf, "__fprintfieee128"}, + {Builtin::BI__builtin_printf, "__printfieee128"}, + {Builtin::BI__builtin_snprintf, "__snprintfieee128"}, + {Builtin::BI__builtin_sprintf, "__sprintfieee128"}, + {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"}, + {Builtin::BI__builtin_vprintf, "__vprintfieee128"}, + {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"}, + {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"}, + {Builtin::BI__builtin_fscanf, "__fscanfieee128"}, + {Builtin::BI__builtin_scanf, "__scanfieee128"}, + {Builtin::BI__builtin_sscanf, "__sscanfieee128"}, + {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"}, + {Builtin::BI__builtin_vscanf, "__vscanfieee128"}, + {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"}, + {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"}, + }; + + // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit + // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions + // if it is 64-bit 'long double' mode. + static SmallDenseMap AIXLongDouble64Builtins{ + {Builtin::BI__builtin_frexpl, "frexp"}, + {Builtin::BI__builtin_ldexpl, "ldexp"}, + {Builtin::BI__builtin_modfl, "modf"}, + }; + + // If the builtin has been declared explicitly with an assembler label, + // use the mangled name. This differs from the plain label on platforms + // that prefix labels. + if (FD->hasAttr()) + Name = getMangledName(D); + else { + // TODO: This mutation should also be applied to other targets other than + // PPC, after backend supports IEEE 128-bit style libcalls. + if (getTriple().isPPC64() && + &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() && + F128Builtins.find(BuiltinID) != F128Builtins.end()) + Name = F128Builtins[BuiltinID]; + else if (getTriple().isOSAIX() && + &getTarget().getLongDoubleFormat() == + &llvm::APFloat::IEEEdouble() && + AIXLongDouble64Builtins.find(BuiltinID) != + AIXLongDouble64Builtins.end()) + Name = AIXLongDouble64Builtins[BuiltinID]; + else + Name = astCtx.BuiltinInfo.getName(BuiltinID).substr(10); + } + + auto Ty = getTypes().ConvertType(FD->getType()); + return GetOrCreateCIRFunction(Name, Ty, D, /*ForVTable=*/false); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 27e8296a70bd..ae7f12e2ac4e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -541,6 +541,11 @@ class CIRGenModule : public CIRGenTypeCache { static constexpr const char *builtinCoroBegin = "__builtin_coro_begin"; static constexpr const char *builtinCoroEnd = "__builtin_coro_end"; + /// Given a builtin id for a function like "__builtin_fabsf", return a + /// Function* for "fabsf". + mlir::cir::FuncOp getBuiltinLibFunction(const FunctionDecl *FD, + unsigned BuiltinID); + /// Emit a general error that something can't be done. void Error(SourceLocation loc, StringRef error); diff --git a/clang/test/CIR/CodeGen/libcall.cpp b/clang/test/CIR/CodeGen/libcall.cpp new file mode 100644 index 000000000000..4ed256acf843 --- /dev/null +++ b/clang/test/CIR/CodeGen/libcall.cpp @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef __builtin_va_list va_list; + +static __inline__ __attribute__((__always_inline__)) __attribute__((__format__(printf, 3, 0))) +int vsnprintf(char* const __attribute__((pass_object_size(1))) dest, int size, const char* format, va_list ap) + __attribute__((overloadable)) { + return __builtin___vsnprintf_chk(dest, size, 0, 0, format, ap); +} + +void t(const char* fmt, ...) { + va_list args; + __builtin_va_start(args, fmt); + const int size = 512; + char message[size]; + vsnprintf(message, size, fmt, args); +} + +// CHECK: cir.func private @__vsnprintf_chk \ No newline at end of file From 349419414c00863079b099bad3ede8c4ff789c54 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Fri, 23 Jun 2023 16:18:15 -0700 Subject: [PATCH 1013/2301] [CIR][Lowering] Enable IR-printing for LLVM passes. Summary: Enabling IR printing to LLVM-lowering related passes. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 ++ clang/test/CIR/CodeGen/mlirprint.c | 16 ++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5dec81069918..aa35a749b6b1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1234,6 +1234,8 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, // emission directly from our frontend. pm.addPass(mlir::LLVM::createDIScopeForLLVMFuncOpPass()); + (void)mlir::applyPassManagerCLOptions(pm); + auto result = !mlir::failed(pm.run(theModule)); if (!result) report_fatal_error( diff --git a/clang/test/CIR/CodeGen/mlirprint.c b/clang/test/CIR/CodeGen/mlirprint.c index 09c4f9187ec3..65a8351676b1 100644 --- a/clang/test/CIR/CodeGen/mlirprint.c +++ b/clang/test/CIR/CodeGen/mlirprint.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.ll 2>&1 | FileCheck %s +// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -fclangir -emit-llvm -mmlir --mlir-print-ir-after-all -mllvm -print-after-all %s -o %t.ll 2>&1 | FileCheck %s -check-prefix=CIR -check-prefix=LLVM int foo() { int i = 3; @@ -7,7 +7,11 @@ int foo() { } -// CHECK: IR Dump After MergeCleanups (cir-merge-cleanups) -// cir.func @foo() -> !s32i -// CHECK: IR Dump After DropAST (cir-drop-ast) -// cir.func @foo() -> !s32i +// CIR: IR Dump After MergeCleanups (cir-merge-cleanups) +// CIR: cir.func @foo() -> !s32i +// CIR: IR Dump After DropAST (cir-drop-ast) +// CIR: cir.func @foo() -> !s32i +// LLVM: IR Dump After cir::direct::ConvertCIRToLLVMPass (cir-to-llvm) +// LLVM: llvm.func @foo() -> i32 +// LLVM: IR Dump After +// LLVM: define i32 @foo() From c405e8950ec04f5d42a6c6957adbc90fb8c9d86e Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sat, 24 Jun 2023 19:59:18 -0300 Subject: [PATCH 1014/2301] [CIR][CIRGen] Implement CIR void type Implements a custom `!cir.void` type with a `!void` alias to represent absence of value and support for void pointers (`!cir.ptr`). The `VoidPtrTy` and `UInt8PtrTy` type cache variables no longer share a union, as they cannot be considered the same in terms of CIR types (`!cir.ptr` is not equivalent to `!cir.ptr`). Due to this pointer differentiation, improper occurrences of `!cir.int` are replaced with `!cir.void` where applicable. This was mostly done in coroutine-related builtin calls. Return values for `cir.ternary` operations are now optional allowing `!cir.void` return types. Consequentially, `!cir.yield` operations with no operands are allowed within ternary regions. ghstack-source-id: a870c2e4e0a2d1156e89e05c98275f43dfba030f Pull Request resolved: https://github.com/llvm/clangir/pull/117 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 ++-- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 17 ++++++++++++++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 12 +++++++++++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 6 ++++-- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 12 +++++------ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 21 +++++++++++++------ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 5 ++++- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 8 +++---- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 11 +++++++--- clang/test/CIR/CodeGen/agg-init.cpp | 8 +++---- clang/test/CIR/CodeGen/cast.cpp | 20 +++++++++--------- clang/test/CIR/CodeGen/coro-task.cpp | 18 ++++++++-------- clang/test/CIR/CodeGen/dtors.cpp | 6 +++--- clang/test/CIR/CodeGen/new.cpp | 4 ++-- clang/test/CIR/CodeGen/rangefor.cpp | 4 ++-- clang/test/CIR/CodeGen/struct.cpp | 12 +++++------ clang/test/CIR/CodeGen/ternary.cpp | 20 ++++++++---------- clang/test/CIR/IR/ternary.cir | 4 ++-- 19 files changed, 117 insertions(+), 77 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1e502d38c15f..af7bacca1806 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -463,7 +463,7 @@ def TernaryOp : CIR_Op<"ternary", let arguments = (ins CIR_BoolType:$cond); let regions = (region SizedRegion<1>:$trueRegion, SizedRegion<1>:$falseRegion); - let results = (outs AnyType:$result); + let results = (outs Optional:$result); let skipDefaultBuilders = 1; let builders = [ @@ -480,7 +480,7 @@ def TernaryOp : CIR_Op<"ternary", `(` $cond `,` `true` $trueRegion `,` `false` $falseRegion - `)` `:` type($result) attr-dict + `)` `:` functional-type(operands, results) attr-dict }]; } diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index b84bf0bce1b8..9247033abe3b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -223,11 +223,26 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { }]; } +//===----------------------------------------------------------------------===// +// Void type +//===----------------------------------------------------------------------===// + +def CIR_VoidType : CIR_Type<"Void", "void"> { + let summary = "CIR void type"; + let description = [{ + The `!cir.void` type represents the C/C++ `void` type. + }]; + let extraClassDeclaration = [{ + /// Returns a clone of this type with the given context. + std::string getAlias() const { return "void"; }; + }]; +} + //===----------------------------------------------------------------------===// // One type to bind them all //===----------------------------------------------------------------------===// def CIR_AnyCIRType : AnyTypeOf<[CIR_PointerType, CIR_BoolType, CIR_StructType, - CIR_ArrayType, CIR_FuncType]>; + CIR_ArrayType, CIR_FuncType, CIR_VoidType]>; #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 3438b5cfff70..a024aa72f5d8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -163,6 +163,8 @@ class CIRGenBuilderTy : public mlir::OpBuilder { } } + mlir::cir::VoidType getVoidTy() { return typeCache.VoidTy; } + mlir::cir::IntType getSInt8Ty() { return typeCache.SInt8Ty; } mlir::cir::IntType getSInt16Ty() { return typeCache.SInt16Ty; } mlir::cir::IntType getSInt32Ty() { return typeCache.SInt32Ty; } @@ -212,6 +214,12 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return mlir::cir::PointerType::get(getContext(), ty); } + mlir::cir::PointerType getVoidPtrTy(unsigned AddrSpace = 0) { + if (AddrSpace) + llvm_unreachable("address space is NYI"); + return typeCache.VoidPtrTy; + } + // // Constant creation helpers // ------------------------- @@ -422,6 +430,10 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::Value createBoolToInt(mlir::Value src, mlir::Type newTy) { return createCast(mlir::cir::CastKind::bool_to_int, src, newTy); } + + mlir::Value createBitcast(mlir::Value src, mlir::Type newTy) { + return createCast(mlir::cir::CastKind::bitcast, src, newTy); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index c563180029fb..f0b5541807a4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -399,13 +399,15 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, "swift NYI"); // We might have to widen integers, but we should never truncate. - assert(ArgInfo.getCoerceToType() == V.getType() && "widening NYI"); + if (ArgInfo.getCoerceToType() != V.getType() && + V.getType().isa()) + llvm_unreachable("NYI"); // If the argument doesn't match, perform a bitcast to coerce it. This // can happen due to trivial type mismatches. if (FirstCIRArg < CIRFuncTy.getNumInputs() && V.getType() != CIRFuncTy.getInput(FirstCIRArg)) - assert(false && "Shouldn't have to bitcast anything yet"); + V = builder.createBitcast(V, CIRFuncTy.getInput(FirstCIRArg)); CIRCallArgs[FirstCIRArg] = V; break; diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index d70e20318b47..e8349c5c87d6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -13,6 +13,7 @@ #include "CIRGenFunction.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtVisitor.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/ADT/ScopeExit.h" using namespace clang; @@ -158,7 +159,6 @@ static mlir::LogicalResult buildBodyAndFallthrough( mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr) { - auto int8PtrTy = builder.getUInt8PtrTy(); auto int32Ty = builder.getUInt32Ty(); auto &TI = CGM.getASTContext().getTargetInfo(); @@ -171,7 +171,7 @@ mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroId, builder.getType( - mlir::TypeRange{int32Ty, int8PtrTy, int8PtrTy, int8PtrTy}, + mlir::TypeRange{int32Ty, VoidPtrTy, VoidPtrTy, VoidPtrTy}, mlir::TypeRange{int32Ty}), /*FD=*/nullptr); assert(fnOp && "should always succeed"); @@ -211,7 +211,6 @@ CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { mlir::cir::CallOp CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr) { - auto int8PtrTy = builder.getUInt8PtrTy(); auto int32Ty = builder.getUInt32Ty(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroBegin); @@ -220,7 +219,7 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroBegin, builder.getType( - mlir::TypeRange{int32Ty, int8PtrTy}, mlir::TypeRange{int8PtrTy}), + mlir::TypeRange{int32Ty, VoidPtrTy}, mlir::TypeRange{VoidPtrTy}), /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); @@ -234,7 +233,6 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr) { - auto int8PtrTy = builder.getUInt8PtrTy(); auto boolTy = builder.getBoolTy(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroEnd); @@ -242,7 +240,7 @@ mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, if (!builtin) { fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroEnd, - builder.getType(mlir::TypeRange{int8PtrTy, boolTy}, + builder.getType(mlir::TypeRange{VoidPtrTy, boolTy}, mlir::TypeRange{boolTy}), /*FD=*/nullptr); assert(fnOp && "should always succeed"); @@ -257,7 +255,7 @@ mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, mlir::LogicalResult CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { auto openCurlyLoc = getLoc(S.getBeginLoc()); - auto nullPtrCst = builder.getNullPtr(builder.getUInt8PtrTy(), openCurlyLoc); + auto nullPtrCst = builder.getNullPtr(VoidPtrTy, openCurlyLoc); CurFn.setCoroutineAttr(mlir::UnitAttr::get(builder.getContext())); auto coroId = buildCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index b7ebc9773db5..1b312366d441 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1679,24 +1679,33 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( rhs = builder.getNullValue(CGF.VoidTy, loc); } builder.create(loc, rhs); - }); + }).getResult(); } mlir::Value condV = CGF.buildOpOnBoolExpr(condExpr, loc, lhsExpr, rhsExpr); CIRGenFunction::ConditionalEvaluation eval(CGF); SmallVector insertPoints{}; mlir::Type yieldTy{}; + auto patchVoidOrThrowSites = [&]() { if (insertPoints.empty()) return; // If both arms are void, so be it. if (!yieldTy) yieldTy = CGF.VoidTy; + + // Insert required yields. for (auto &toInsert : insertPoints) { mlir::OpBuilder::InsertionGuard guard(builder); builder.restoreInsertionPoint(toInsert); - mlir::Value op0 = builder.getNullValue(yieldTy, loc); - builder.create(loc, op0); + + // Block does not return: build empty yield. + if (yieldTy.isa()) { + builder.create(loc); + } else { // Block returns: set null yield value. + mlir::Value op0 = builder.getNullValue(yieldTy, loc); + builder.create(loc, op0); + } } }; @@ -1757,7 +1766,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( } patchVoidOrThrowSites(); - }); + }).getResult(); } mlir::Value CIRGenFunction::buildScalarPrePostIncDec(const UnaryOperator *E, @@ -1871,7 +1880,7 @@ mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { Builder.getAttr(Builder.getBoolTy(), false)); B.create(Loc, res.getRes()); }); - return Builder.createZExtOrBitCast(ResOp.getLoc(), ResOp, ResTy); + return Builder.createZExtOrBitCast(ResOp.getLoc(), ResOp.getResult(), ResTy); } mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { @@ -1979,7 +1988,7 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { B.create(Loc, res.getResult()); }); - return Builder.createZExtOrBitCast(ResOp.getLoc(), ResOp, ResTy); + return Builder.createZExtOrBitCast(ResOp.getLoc(), ResOp.getResult(), ResTy); } mlir::Value ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index d8be6cd69cfa..5c4ee7e68d77 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -318,7 +318,7 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { // If we are on a coroutine, add the coro_end builtin call. if (CGF.CurFn.getCoroutine()) CGF.buildCoroEndBuiltinCall( - loc, builder.getNullPtr(builder.getUInt8PtrTy(), loc)); + loc, builder.getNullPtr(builder.getVoidPtrTy(), loc)); if (CGF.FnRetCIRTy.has_value()) { // If there's anything to return, load it first. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 75f87b3df155..3033aae6d6c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -116,7 +116,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, UInt64Ty = ::mlir::cir::IntType::get(builder.getContext(), 64, /*isSigned=*/false); - VoidTy = UInt8Ty; + VoidTy = ::mlir::cir::VoidType::get(builder.getContext()); + + // Initialize CIR pointer types cache. + VoidPtrTy = ::mlir::cir::PointerType::get(builder.getContext(), VoidTy); // TODO: HalfTy // TODO: BFloatTy diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index eb522604eecd..97ab911c9861 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -27,7 +27,7 @@ struct CIRGenTypeCache { CIRGenTypeCache() {} /// void - mlir::Type VoidTy; + mlir::cir::VoidType VoidTy; // char, int, short, long mlir::cir::IntType SInt8Ty, SInt16Ty, SInt32Ty, SInt64Ty; // usigned char, unsigned, unsigned short, unsigned long @@ -50,10 +50,8 @@ struct CIRGenTypeCache { }; /// void* in address space 0 - union { - mlir::cir::PointerType VoidPtrTy; - mlir::cir::PointerType UInt8PtrTy; - }; + mlir::cir::PointerType VoidPtrTy; + mlir::cir::PointerType UInt8PtrTy; /// void** in address space 0 union { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index df660b172e1f..6d750f298407 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -54,6 +54,10 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << intType.getAlias(); return AliasResult::OverridableAlias; } + if (auto voidType = type.dyn_cast()) { + os << voidType.getAlias(); + return AliasResult::OverridableAlias; + } return AliasResult::NoAlias; } @@ -687,9 +691,10 @@ void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, falseBuilder(builder, result.location); auto yield = dyn_cast(block->getTerminator()); - assert((yield && yield.getNumOperands() == 1) && - "expected cir.yield terminator with one operand"); - result.addTypes(TypeRange{yield.getOperand(0).getType()}); + assert((yield && yield.getNumOperands() <= 1) && + "expected zero or one result type"); + if (yield.getNumOperands() == 1) + result.addTypes(TypeRange{yield.getOperandTypes().front()}); } //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index bf58c95fa879..1458d8eef2f9 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -66,14 +66,14 @@ void yo() { // CHECK: cir.func @_Z2yov() { // CHECK: %0 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} -// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i,#cir.null : !cir.ptr,#cir.int<0> : !u64i}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 +// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i,#cir.null : !cir.ptr,#cir.int<0> : !u64i}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 // CHECK: cir.store %2, %0 : !ty_22struct2EYo22, cir.ptr // CHECK: %3 = "cir.struct_element_addr"(%1) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000066001> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr -// CHECK: %5 = "cir.struct_element_addr"(%1) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr -// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > +// CHECK: %5 = "cir.struct_element_addr"(%1) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > // CHECK: %7 = "cir.struct_element_addr"(%1) <{member_name = "createFlags"}> : (!cir.ptr) -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !u64i) : !u64i // CHECK: cir.store %8, %7 : !u64i, cir.ptr diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 9e3fd9f2e1c6..32d570b94661 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -44,8 +44,8 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4) { long l = (long)(void*)x4; // Must sign extend before casting to pointer // CHECK: %[[TMP:[0-9]+]] = cir.cast(integral, %{{[0-9]+}} : !s16i), !u64i - // CHECK: %[[TMP2:[0-9]+]] = cir.cast(int_to_ptr, %[[TMP]] : !u64i), !cir.ptr - // CHECK: %{{[0-9]+}} = cir.cast(ptr_to_int, %[[TMP2]] : !cir.ptr), !s64i + // CHECK: %[[TMP2:[0-9]+]] = cir.cast(int_to_ptr, %[[TMP]] : !u64i), !cir.ptr + // CHECK: %{{[0-9]+}} = cir.cast(ptr_to_int, %[[TMP2]] : !cir.ptr), !s64i return 0; } @@ -55,22 +55,22 @@ bool cptr(void *d) { return x; } -// CHECK: cir.func @_Z4cptrPv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} +// CHECK: cir.func @_Z4cptrPv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} -// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %4 = cir.cast(ptr_to_bool, %3 : !cir.ptr), !cir.bool +// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.cast(ptr_to_bool, %3 : !cir.ptr), !cir.bool void call_cptr(void *d) { if (!cptr(d)) { } } -// CHECK: cir.func @_Z9call_cptrPv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} +// CHECK: cir.func @_Z9call_cptrPv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} // CHECK: cir.scope { -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = cir.call @_Z4cptrPv(%1) : (!cir.ptr) -> !cir.bool +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = cir.call @_Z4cptrPv(%1) : (!cir.ptr) -> !cir.bool // CHECK: %3 = cir.unary(not, %2) : !cir.bool, !cir.bool // CHECK: cir.if %3 { diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index ee6eeb9749fd..3b2c720a13c4 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -137,10 +137,10 @@ co_invoke_fn co_invoke; // CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22struct2Efolly3A3Acoro3A3Aco_invoke_fn22 -// CHECK: cir.func builtin private @__builtin_coro_id(!u32i, !cir.ptr, !cir.ptr, !cir.ptr) -> !u32i +// CHECK: cir.func builtin private @__builtin_coro_id(!u32i, !cir.ptr, !cir.ptr, !cir.ptr) -> !u32i // CHECK: cir.func builtin private @__builtin_coro_alloc(!u32i) -> !cir.bool // CHECK: cir.func builtin private @__builtin_coro_size() -> !u64i -// CHECK: cir.func builtin private @__builtin_coro_begin(!u32i, !cir.ptr) -> !cir.ptr +// CHECK: cir.func builtin private @__builtin_coro_begin(!u32i, !cir.ptr) -> !cir.ptr using VoidTask = folly::coro::Task; @@ -153,12 +153,12 @@ VoidTask silly_task() { // Allocate promise. // CHECK: %[[#VoidTaskAddr:]] = cir.alloca ![[VoidTask]], {{.*}}, ["__retval"] -// CHECK: %[[#SavedFrameAddr:]] = cir.alloca !cir.ptr, cir.ptr >, ["__coro_frame_addr"] {alignment = 8 : i64} +// CHECK: %[[#SavedFrameAddr:]] = cir.alloca !cir.ptr, cir.ptr >, ["__coro_frame_addr"] {alignment = 8 : i64} // CHECK: %[[#VoidPromisseAddr:]] = cir.alloca ![[VoidPromisse]], {{.*}}, ["__promise"] // Get coroutine id with __builtin_coro_id. -// CHECK: %[[#NullPtr:]] = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %[[#NullPtr:]] = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: %[[#Align:]] = cir.const(#cir.int<16> : !u32i) : !u32i // CHECK: %[[#CoroId:]] = cir.call @__builtin_coro_id(%[[#Align]], %[[#NullPtr]], %[[#NullPtr]], %[[#NullPtr]]) @@ -166,13 +166,13 @@ VoidTask silly_task() { // call __builtin_coro_begin for the final coroutine frame address. // CHECK: %[[#ShouldAlloc:]] = cir.call @__builtin_coro_alloc(%[[#CoroId]]) : (!u32i) -> !cir.bool -// CHECK: cir.store %[[#NullPtr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > +// CHECK: cir.store %[[#NullPtr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > // CHECK: cir.if %[[#ShouldAlloc]] { // CHECK: %[[#CoroSize:]] = cir.call @__builtin_coro_size() : () -> !u64i -// CHECK: %[[#AllocAddr:]] = cir.call @_Znwm(%[[#CoroSize]]) : (!u64i) -> !cir.ptr -// CHECK: cir.store %[[#AllocAddr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > +// CHECK: %[[#AllocAddr:]] = cir.call @_Znwm(%[[#CoroSize]]) : (!u64i) -> !cir.ptr +// CHECK: cir.store %[[#AllocAddr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > // CHECK: } -// CHECK: %[[#Load0:]] = cir.load %[[#SavedFrameAddr]] : cir.ptr >, !cir.ptr +// CHECK: %[[#Load0:]] = cir.load %[[#SavedFrameAddr]] : cir.ptr >, !cir.ptr // CHECK: %[[#CoroFrameAddr:]] = cir.call @__builtin_coro_begin(%[[#CoroId]], %[[#Load0]]) // Call promise.get_return_object() to retrieve the task object. @@ -264,7 +264,7 @@ VoidTask silly_task() { // Call builtin coro end and return -// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.const(#cir.null : !cir.ptr) +// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.const(#cir.null : !cir.ptr) // CHECK-NEXT: %[[#CoroEndArg1:]] = cir.const(#false) : !cir.bool // CHECK-NEXT: = cir.call @__builtin_coro_end(%[[#CoroEndArg0]], %[[#CoroEndArg1]]) diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index fcac427efa56..05925296e25a 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -56,7 +56,7 @@ class B : public A // CHECK: cir.func private @_ZN1BD2Ev(!cir.ptr) // operator delete(void*) declaration -// CHECK: cir.func private @_ZdlPv(!cir.ptr) +// CHECK: cir.func private @_ZdlPv(!cir.ptr) // B dtor => @B::~B() #2 // Calls dtor #1 @@ -67,8 +67,8 @@ class B : public A // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: cir.call @_ZN1BD2Ev(%1) : (!cir.ptr) -> () -// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr -// CHECK: cir.call @_ZdlPv(%2) : (!cir.ptr) -> () +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: cir.call @_ZdlPv(%2) : (!cir.ptr) -> () // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index b843e2c81c09..b2a568235675 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -20,8 +20,8 @@ void m(int a, int b) { // CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK: cir.scope { // CHECK: %4 = cir.const(#cir.int<1> : !u64i) : !u64i -// CHECK: %5 = cir.call @_Znwm(%4) : (!u64i) -> !cir.ptr -// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr +// CHECK: %5 = cir.call @_Znwm(%4) : (!u64i) -> !cir.ptr +// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr // CHECK: %7 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %8 = cir.load %7 : cir.ptr , !s32i // CHECK: %9 = cir.load %1 : cir.ptr >, !cir.ptr diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index a8d29bfdc03a..64386468d061 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -21,7 +21,7 @@ void init(unsigned numImages) { } } -// CHECK: !ty_22struct2Etriple22 = !cir.struct<"struct.triple", !u32i, !cir.ptr, !u32i> +// CHECK: !ty_22struct2Etriple22 = !cir.struct<"struct.triple", !u32i, !cir.ptr, !u32i> // CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector", !cir.ptr, !cir.ptr, !cir.ptr> // CHECK: !ty_22struct2E__vector_iterator22 = !cir.struct<"struct.__vector_iterator", !cir.ptr> @@ -64,7 +64,7 @@ void init(unsigned numImages) { // CHECK: %15 = "cir.struct_element_addr"(%13) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr // CHECK: %16 = cir.const(#cir.int<1000024002> : !u32i) : !u32i // CHECK: cir.store %16, %15 : !u32i, cir.ptr -// CHECK: %17 = "cir.struct_element_addr"(%13) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %17 = "cir.struct_element_addr"(%13) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %18 = "cir.struct_element_addr"(%13) <{member_name = "image"}> : (!cir.ptr) -> !cir.ptr // CHECK: %19 = cir.load %7 : cir.ptr >, !cir.ptr // CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 1c5c56658677..eebd72b2496d 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -30,9 +30,9 @@ void yoyo(incomplete *i) {} // CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", !s32i, !s8i> // CHECK: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", !s32i, !s8i, !ty_22struct2EBar22> -// CHECK: !ty_22struct2EMandalore22 = !cir.struct<"struct.Mandalore", !u32i, !cir.ptr, !s32i, #cir.recdecl.ast> +// CHECK: !ty_22struct2EMandalore22 = !cir.struct<"struct.Mandalore", !u32i, !cir.ptr, !s32i, #cir.recdecl.ast> // CHECK: !ty_22class2EAdv22 = !cir.struct<"class.Adv", !ty_22struct2EMandalore22> -// CHECK: !ty_22struct2EEntry22 = !cir.struct<"struct.Entry", !cir.ptr, !cir.ptr)>>> +// CHECK: !ty_22struct2EEntry22 = !cir.struct<"struct.Entry", !cir.ptr, !cir.ptr)>>> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} @@ -102,9 +102,9 @@ void m() { Adv C; } // CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "w"}> : (!cir.ptr) -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000024001> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr -// CHECK: %5 = "cir.struct_element_addr"(%2) <{member_name = "n"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %6 = cir.const(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > +// CHECK: %5 = "cir.struct_element_addr"(%2) <{member_name = "n"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %6 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > // CHECK: %7 = "cir.struct_element_addr"(%2) <{member_name = "d"}> : (!cir.ptr) -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: cir.store %8, %7 : !s32i, cir.ptr @@ -147,4 +147,4 @@ void ppp() { Entry x; } // CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr -// CHECK: = "cir.struct_element_addr"(%1) <{member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr)>>> +// CHECK: = "cir.struct_element_addr"(%1) <{member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr)>>> diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp index d3f48c3237c7..7c62afdeffa1 100644 --- a/clang/test/CIR/CodeGen/ternary.cpp +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -18,7 +18,7 @@ int x(int y) { // CHECK: }, false { // CHECK: %7 = cir.const(#cir.int<5> : !s32i) : !s32i // CHECK: cir.yield %7 : !s32i -// CHECK: }) : !s32i +// CHECK: }) : (!cir.bool) -> !s32i // CHECK: cir.store %5, %1 : !s32i, cir.ptr // CHECK: %6 = cir.load %1 : cir.ptr , !s32i // CHECK: cir.return %6 : !s32i @@ -43,16 +43,14 @@ void m(APIType api) { // CHECK: %3 = cir.const(#cir.int<0> : !u32i) : !u32i // CHECK: %4 = cir.cast(integral, %3 : !u32i), !s32i // CHECK: %5 = cir.cmp(eq, %2, %4) : !s32i, !cir.bool -// CHECK: %6 = cir.ternary(%5, true { -// CHECK: %7 = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK: %8 = cir.const(#cir.int<0> : !u8i) : !u8i -// CHECK: cir.yield %8 : !u8i +// CHECK: cir.ternary(%5, true { +// CHECK: %6 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.yield // CHECK: }, false { -// CHECK: %7 = cir.get_global @".str" : cir.ptr > -// CHECK: %8 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @_Z3obaPKc(%8) : (!cir.ptr) -> () -// CHECK: %9 = cir.const(#cir.int<0> : !u8i) : !u8i -// CHECK: cir.yield %9 : !u8i -// CHECK: }) : !u8i +// CHECK: %6 = cir.get_global @".str" : cir.ptr > +// CHECK: %7 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr +// CHECK: cir.call @_Z3obaPKc(%7) : (!cir.ptr) -> () +// CHECK: cir.yield +// CHECK: }) : (!cir.bool) -> () // CHECK: cir.return // CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/IR/ternary.cir b/clang/test/CIR/IR/ternary.cir index e752321ee57c..77939474e04b 100644 --- a/clang/test/CIR/IR/ternary.cir +++ b/clang/test/CIR/IR/ternary.cir @@ -9,7 +9,7 @@ module { }, false { %b = cir.const(#cir.int<1> : !u32i) : !u32i cir.yield %b : !u32i - }) : !u32i + }) : (!cir.bool) -> !u32i cir.return %0 : !u32i } } @@ -23,7 +23,7 @@ module { // CHECK: }, false { // CHECK: %1 = cir.const(#cir.int<1> : !u32i) : !u32i // CHECK: cir.yield %1 : !u32i -// CHECK: }) : !u32i +// CHECK: }) : (!cir.bool) -> !u32i // CHECK: cir.return %0 : !u32i // CHECK: } From 14c56d1b281916f184ebc10e5d14502cbc4ea62e Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sat, 24 Jun 2023 19:59:19 -0300 Subject: [PATCH 1015/2301] [CIR][CIRGen][Lowering] Restrict FuncType to exactly one return type Updates FuncType table gen definition to restrict the number of return types to exactly one. Patches FuncOp, CallOp, and codegen builders to work with the new function type definition. Empty results in FuncType builders are replaced by void types. Void function definitions can be parsed in three different ways: - No return type specified: the return type is implicitly void - Return type specified as void: the return type is explicitly void - Return type specified as empty: the return type is implicitly void Some verification checks are removed since they are now implicitly checked. Added lowering for void types in both direct and indirect lowering. ghstack-source-id: 5468006bd0e0ed27be9e9f1a32ee6371a9d9c237 Pull Request resolved: https://github.com/llvm/clangir/pull/118 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 12 ++- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 22 +++--- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 3 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 7 +- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 15 ++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 78 ++++++++++--------- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 16 ++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 11 ++- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 13 ++-- clang/test/CIR/IR/func.cir | 10 +++ clang/test/CIR/IR/invalid.cir | 33 +++++++- 11 files changed, 135 insertions(+), 85 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index af7bacca1806..27453ed60304 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1464,7 +1464,9 @@ def FuncOp : CIR_Op<"func", [ /// Returns the results types that the callable region produces when /// executed. ArrayRef getCallableResults() { - return getFunctionType().getResults(); + if (::llvm::isa(getFunctionType().getReturnType())) + return {}; + return getFunctionType().getReturnTypes(); } /// Returns the argument attributes for all callable region arguments or @@ -1483,7 +1485,7 @@ def FuncOp : CIR_Op<"func", [ ArrayRef getArgumentTypes() { return getFunctionType().getInputs(); } /// Returns the result types of this function. - ArrayRef getResultTypes() { return getFunctionType().getResults(); } + ArrayRef getResultTypes() { return getFunctionType().getReturnTypes(); } /// Hook for OpTrait::FunctionOpInterfaceTrait, called after verifying that /// the 'type' attribute is present and checks if it holds a function type. @@ -1533,14 +1535,16 @@ def CallOp : CIR_Op<"call", OpBuilder<(ins "FuncOp":$callee, CArg<"ValueRange", "{}">:$operands), [{ $_state.addOperands(operands); $_state.addAttribute("callee", SymbolRefAttr::get(callee)); - $_state.addTypes(callee.getFunctionType().getResults()); + if (!callee.getFunctionType().isVoid()) + $_state.addTypes(callee.getFunctionType().getReturnType()); }]>, OpBuilder<(ins "Value":$ind_target, "FuncType":$fn_type, CArg<"ValueRange", "{}">:$operands), [{ $_state.addOperands(ValueRange{ind_target}); $_state.addOperands(operands); - $_state.addTypes(fn_type.getResults()); + if (!fn_type.isVoid()) + $_state.addTypes(fn_type.getReturnType()); }]>]; let extraClassDeclaration = [{ diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 9247033abe3b..fac880347a39 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -184,18 +184,19 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { ``` }]; - let parameters = (ins ArrayRefParameter<"Type">:$inputs, - ArrayRefParameter<"Type">:$results, "bool":$varArg); + let parameters = (ins ArrayRefParameter<"Type">:$inputs, "Type":$returnType, + "bool":$varArg); let assemblyFormat = [{ - `<` $results ` ` `(` custom($inputs, $varArg) `>` + `<` $returnType ` ` `(` custom($inputs, $varArg) `>` }]; let skipDefaultBuilders = 1; let builders = [ - TypeBuilder<(ins CArg<"TypeRange">:$inputs, CArg<"TypeRange">:$results, - CArg<"bool", "false">:$isVarArg), [{ - return $_get($_ctxt, llvm::to_vector(inputs), llvm::to_vector(results), isVarArg); + TypeBuilderWithInferredContext<(ins + "ArrayRef":$inputs, "Type":$returnType, + CArg<"bool", "false">:$isVarArg), [{ + return $_get(returnType.getContext(), inputs, returnType, isVarArg); }]> ]; @@ -211,11 +212,12 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { /// Returns the number of arguments to the function. unsigned getNumInputs() const { return getInputs().size(); } - /// Returns the `i`th result operand type. Asserts if out of bounds. - Type getResult(unsigned i) const { return getResults()[i]; } + /// Returns the result type of the function as an ArrayRef, enabling better + /// integration with generic MLIR utilities. + ArrayRef getReturnTypes() const; - /// Returns the number of results to the function. - unsigned getNumResults() const { return getResults().size(); } + /// Returns whether the function is returns void. + bool isVoid() const; /// Returns a clone of this function type with the given argument /// and result types. diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index a024aa72f5d8..8d53fe99e223 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -195,8 +195,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::Type getVirtualFnPtrType(bool isVarArg = false) { // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special // type so it's a bit more clear and C++ idiomatic. - auto fnTy = - mlir::cir::FuncType::get(getContext(), {}, {getUInt32Ty()}, isVarArg); + auto fnTy = mlir::cir::FuncType::get({}, getUInt32Ty(), isVarArg); assert(!UnimplementedFeature::isVarArg()); return getPointerTo(getPointerTo(fnTy)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index f0b5541807a4..1732f4e079ae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -23,6 +23,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" #include #include "mlir/Dialect/Func/IR/FuncOps.h" @@ -257,9 +258,9 @@ mlir::cir::FuncType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { (void)Erased; assert(Erased && "Not in set?"); - return mlir::cir::FuncType::get(&getMLIRContext(), ArgTypes, - (resultType ? resultType : mlir::TypeRange{}), - FI.isVariadic()); + return mlir::cir::FuncType::get( + ArgTypes, (resultType ? resultType : Builder.getVoidTy()), + FI.isVariadic()); } mlir::cir::FuncType CIRGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index e8349c5c87d6..72f96900eb28 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -170,9 +170,8 @@ mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, if (!builtin) { fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroId, - builder.getType( - mlir::TypeRange{int32Ty, VoidPtrTy, VoidPtrTy, VoidPtrTy}, - mlir::TypeRange{int32Ty}), + mlir::cir::FuncType::get({int32Ty, VoidPtrTy, VoidPtrTy, VoidPtrTy}, + int32Ty), /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); @@ -196,8 +195,7 @@ CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { if (!builtin) { fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroAlloc, - builder.getType(mlir::TypeRange{int32Ty}, - mlir::TypeRange{boolTy}), + mlir::cir::FuncType::get({int32Ty}, boolTy), /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); @@ -218,8 +216,8 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, if (!builtin) { fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroBegin, - builder.getType( - mlir::TypeRange{int32Ty, VoidPtrTy}, mlir::TypeRange{VoidPtrTy}), + mlir::cir::FuncType::get({int32Ty, VoidPtrTy}, + VoidPtrTy), /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); @@ -240,8 +238,7 @@ mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, if (!builtin) { fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroEnd, - builder.getType(mlir::TypeRange{VoidPtrTy, boolTy}, - mlir::TypeRange{boolTy}), + mlir::cir::FuncType::get({VoidPtrTy, boolTy}, boolTy), /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 6d750f298407..a6e586b5a1b4 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -349,28 +349,16 @@ static mlir::LogicalResult checkReturnAndFunction(ReturnOp op, if (op.getNumOperands() > 1) return op.emitOpError() << "expects at most 1 return operand"; - // The operand number and types must match the function signature. - const auto &results = function.getFunctionType().getResults(); - if (op.getNumOperands() != results.size()) - return op.emitOpError() - << "does not return the same number of values (" - << op.getNumOperands() << ") as the enclosing function (" - << results.size() << ")"; - - // If the operation does not have an input, we are done. - if (!op.hasOperand()) - return mlir::success(); - - auto inputType = *op.operand_type_begin(); - auto resultType = results.front(); - - // Check that the result type of the function matches the operand type. - if (inputType == resultType) - return mlir::success(); + // Ensure returned type matches the function signature. + auto expectedTy = function.getFunctionType().getReturnType(); + auto actualTy = + (op.getNumOperands() == 0 ? mlir::cir::VoidType::get(op.getContext()) + : op.getOperand(0).getType()); + if (actualTy != expectedTy) + return op.emitOpError() << "returns " << actualTy + << " but enclosing function returns " << expectedTy; - return op.emitError() << "type of return operand (" << inputType - << ") doesn't match function result type (" - << resultType << ")"; + return mlir::success(); } mlir::LogicalResult ReturnOp::verify() { @@ -1319,9 +1307,8 @@ LogicalResult cir::VTableAddrPointOp::verify() { return success(); auto resultType = getAddr().getType(); - auto fnTy = mlir::cir::FuncType::get( - getContext(), {}, - {mlir::cir::IntType::get(getContext(), 32, /*isSigned=*/false)}); + auto intTy = mlir::cir::IntType::get(getContext(), 32, /*isSigned=*/false); + auto fnTy = mlir::cir::FuncType::get({}, intTy); auto resTy = mlir::cir::PointerType::get( getContext(), mlir::cir::PointerType::get(getContext(), fnTy)); @@ -1415,10 +1402,17 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { for (auto &arg : arguments) argTypes.push_back(arg.type); + if (resultTypes.size() > 1) + return parser.emitError(loc, "functions only supports zero or one results"); + + // Fetch return type or set it to void if empty/ommited. + mlir::Type returnType = + (resultTypes.empty() ? mlir::cir::VoidType::get(builder.getContext()) + : resultTypes.front()); + // Build the function type. auto fnType = mlir::cir::FuncType::getChecked( - parser.getEncodedSourceLoc(loc), parser.getContext(), - mlir::TypeRange(argTypes), mlir::TypeRange(resultTypes), isVariadic); + parser.getEncodedSourceLoc(loc), argTypes, returnType, isVariadic); if (!fnType) return failure(); state.addAttribute(getFunctionTypeAttrName(state.name), @@ -1512,8 +1506,14 @@ void cir::FuncOp::print(OpAsmPrinter &p) { // Print function name, signature, and control. p.printSymbolName(getSymName()); auto fnType = getFunctionType(); - function_interface_impl::printFunctionSignature( - p, *this, fnType.getInputs(), fnType.isVarArg(), fnType.getResults()); + SmallVector resultTypes; + if (!fnType.isVoid()) + function_interface_impl::printFunctionSignature( + p, *this, fnType.getInputs(), fnType.isVarArg(), + fnType.getReturnTypes()); + else + function_interface_impl::printFunctionSignature( + p, *this, fnType.getInputs(), fnType.isVarArg(), {}); function_interface_impl::printFunctionAttributes( p, *this, {getSymVisibilityAttrName(), getAliaseeAttrName(), @@ -1542,8 +1542,6 @@ LogicalResult cir::FuncOp::verifyType() { if (!type.isa()) return emitOpError("requires '" + getFunctionTypeAttrName().str() + "' attribute of function type"); - if (getFunctionType().getNumResults() > 1) - return emitOpError("cannot have more than one result"); return success(); } @@ -1645,16 +1643,20 @@ cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { << fnType.getInput(i) << ", but provided " << getOperand(i).getType() << " for operand number " << i; - if (fnType.getNumResults() != getNumResults()) + // Void function must not return any results. + if (fnType.isVoid() && getNumResults() != 0) + return emitOpError("callee returns void but call has results"); + + // Non-void function calls must return exactly one result. + if (!fnType.isVoid() && getNumResults() != 1) return emitOpError("incorrect number of results for callee"); - for (unsigned i = 0, e = fnType.getNumResults(); i != e; ++i) - if (getResult(i).getType() != fnType.getResult(i)) { - auto diag = emitOpError("result type mismatch at index ") << i; - diag.attachNote() << " op result types: " << getResultTypes(); - diag.attachNote() << "function result types: " << fnType.getResults(); - return diag; - } + // Parent function and return value types must match. + if (!fnType.isVoid() && getResultTypes().front() != fnType.getReturnType()) { + return emitOpError("result type mismatch: expected ") + << fnType.getReturnType() << ", but provided " + << getResult(0).getType(); + } return success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 061c17fc9ebd..db27a2fa664f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -19,6 +19,7 @@ #include "mlir/Support/LogicalResult.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" @@ -410,17 +411,16 @@ IntType::verify(llvm::function_ref emitError, mlir::LogicalResult FuncType::verify(llvm::function_ref emitError, - llvm::ArrayRef inputs, - llvm::ArrayRef results, bool varArg) { - if (results.size() > 1) - return emitError() << "functions only supports 0 or 1 results"; + llvm::ArrayRef inputs, mlir::Type result, + bool varArg) { if (varArg && inputs.empty()) return emitError() << "functions must have at least one non-variadic input"; return mlir::success(); } FuncType FuncType::clone(TypeRange inputs, TypeRange results) const { - return get(getContext(), results, inputs, isVarArg()); + assert(results.size() == 1 && "expected exactly one result type"); + return get(llvm::to_vector(inputs), results[0], isVarArg()); } mlir::ParseResult @@ -462,6 +462,12 @@ void printFuncTypeArgs(mlir::AsmPrinter &p, p << ')'; } +llvm::ArrayRef FuncType::getReturnTypes() const { + return static_cast(getImpl())->returnType; +} + +bool FuncType::isVoid() const { return getReturnType().isa(); } + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index aa35a749b6b1..fdcb5eedfe79 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -680,12 +680,8 @@ class CIRFuncLowering : public mlir::OpConversionPattern { signatureConversion.addInputs(argType.index(), convertedType); } - mlir::Type resultType; - if (fnType.getNumResults() == 1) { - resultType = getTypeConverter()->convertType(fnType.getResult(0)); - if (!resultType) - return mlir::failure(); - } + mlir::Type resultType = + getTypeConverter()->convertType(fnType.getReturnType()); // Create the LLVM function operation. auto llvmFnTy = mlir::LLVM::LLVMFunctionType::get( @@ -1172,6 +1168,9 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { llvm_unreachable("Failed to set body of struct"); return llvmStruct; }); + converter.addConversion([&](mlir::cir::VoidType type) -> mlir::Type { + return mlir::LLVM::LLVMVoidType::get(type.getContext()); + }); } } // namespace diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 0f8483f99345..ae4dbca56fe6 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -28,6 +28,7 @@ #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" #include "mlir/IR/BuiltinDialect.h" +#include "mlir/IR/BuiltinTypes.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" @@ -35,6 +36,7 @@ #include "mlir/Target/LLVMIR/Export.h" #include "mlir/Transforms/DialectConversion.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/Sequence.h" @@ -190,13 +192,8 @@ class CIRFuncLowering : public mlir::OpConversionPattern { signatureConversion.addInputs(argType.index(), convertedType); } - mlir::Type resultType; - if (fnType.getNumResults() == 1) { - resultType = getTypeConverter()->convertType(fnType.getResult(0)); - if (!resultType) - return mlir::failure(); - } - + mlir::Type resultType = + getTypeConverter()->convertType(fnType.getReturnType()); auto fn = rewriter.create( op.getLoc(), op.getName(), rewriter.getFunctionType(signatureConversion.getConvertedTypes(), @@ -533,6 +530,8 @@ static mlir::TypeConverter prepareTypeConverter() { [&](mlir::IntegerType type) -> mlir::Type { return type; }); converter.addConversion( [&](mlir::FloatType type) -> mlir::Type { return type; }); + converter.addConversion( + [&](mlir::cir::VoidType type) -> mlir::Type { return {}; }); return converter; } diff --git a/clang/test/CIR/IR/func.cir b/clang/test/CIR/IR/func.cir index dae0bf291ff7..73898f70172e 100644 --- a/clang/test/CIR/IR/func.cir +++ b/clang/test/CIR/IR/func.cir @@ -27,6 +27,16 @@ module { %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["fn", init] {alignment = 8 : i64} cir.return } + + // Should parse void return types. + cir.func @parse_explicit_void_func() -> !cir.void { + cir.return + } + + // Should parse omitted void return type. + cir.func @parse_func_type_with_omitted_void() { + cir.return + } } // CHECK: cir.func @l0() \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 787b4673ef9c..ab6cf2e11674 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -377,7 +377,7 @@ module { // ----- module { - // expected-error@+1 {{functions only supports 0 or 1 results}} + // expected-error@+1 {{functions only supports zero or one results}} cir.func @variadic() -> (!cir.int, !cir.int) } @@ -391,3 +391,34 @@ module { cir.return %1 : !cir.int } } + +// ----- + +module { + cir.func private @test() -> !cir.void + cir.func @invalid_call() { + // expected-error@+1 {{'cir.call' op callee returns void but call has results}} + %1 = cir.call @test() : () -> (!cir.int) + cir.return + } +} + +// ----- + +module { + cir.func private @test() -> !cir.int + cir.func @invalid_call() { + // expected-error@+1 {{'cir.call' op result type mismatch: expected '!cir.int', but provided '!cir.int'}} + %1 = cir.call @test() : () -> (!cir.int) + cir.return + } +} + +// ----- + +module { + cir.func @invalid_return_type(%0 : !cir.int) -> !cir.int { + // expected-error@+1 {{'cir.return' op returns '!cir.int' but enclosing function returns '!cir.int'}} + cir.return %0 : !cir.int + } +} From 162bbe9c7f2d84f0d23ac856b03741c88a46127a Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Thu, 22 Jun 2023 13:12:31 -0300 Subject: [PATCH 1016/2301] [CIR][CIRGen] Support C89 implicit int --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 9 ++++++++- clang/test/CIR/CodeGen/c89-implicit-int.c | 10 ++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/c89-implicit-int.c diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 3033aae6d6c5..d2f32204a8e9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -41,6 +41,7 @@ #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" +#include "clang/AST/Type.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/NoSanitizeList.h" #include "clang/Basic/SourceLocation.h" @@ -673,6 +674,11 @@ mlir::cir::GlobalOp CIRGenModule::buildGlobal(const VarDecl *D, mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, std::optional Ty, ForDefinition_t IsForDefinition) { + assert(D->hasGlobalStorage() && "Not a global variable"); + QualType ASTTy = D->getType(); + if (!Ty) + Ty = getTypes().convertTypeForMem(ASTTy); + auto g = buildGlobal(D, Ty, IsForDefinition); auto ptrTy = mlir::cir::PointerType::get(builder.getContext(), g.getSymType()); @@ -1998,7 +2004,8 @@ CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { IsForDefinition); } - llvm_unreachable("NYI"); + return getAddrOfGlobalVar(cast(D), /*Ty=*/nullptr, IsForDefinition) + .getDefiningOp(); } void CIRGenModule::Release() { diff --git a/clang/test/CIR/CodeGen/c89-implicit-int.c b/clang/test/CIR/CodeGen/c89-implicit-int.c new file mode 100644 index 000000000000..8fe7b285c338 --- /dev/null +++ b/clang/test/CIR/CodeGen/c89-implicit-int.c @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c89 -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Implicit int return type. +test = 0; +// CHECK: cir.global external @test = #cir.int<0> : !s32i +func (void) { +// CHECK: cir.func @func() -> !s32i + return 0; +} From c88d6d40ecbf12ce367106cdedc7402ef1ae40f6 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sun, 25 Jun 2023 11:03:46 -0300 Subject: [PATCH 1017/2301] [CIR][CIRGen][Lowering] Add integral to FP casts Lowering of these casts is done by mapping them to LLVM's SIToFP and UIToFP casts. Sign interpretation is deferred from CodeGen to the lowering stage. ghstack-source-id: 1975882a1cbcc365b7eec1c03913f500b8c499f7 Pull Request resolved: https://github.com/llvm/clangir/pull/124 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 ++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 11 ++++------- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 +++++++ clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 12 ++++++++++++ clang/test/CIR/CodeGen/cast.cpp | 6 ++++++ clang/test/CIR/Lowering/cast.cir | 4 ++++ 6 files changed, 35 insertions(+), 8 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 27453ed60304..a0736a4793d8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -52,6 +52,7 @@ def CK_IntegralToPointer : I32EnumAttrCase<"int_to_ptr", 8>; def CK_PointerToIntegral : I32EnumAttrCase<"ptr_to_int", 9>; def CK_FloatToBoolean : I32EnumAttrCase<"float_to_bool", 10>; def CK_BooleanToIntegral : I32EnumAttrCase<"bool_to_int", 11>; +def CK_IntegralToFloat : I32EnumAttrCase<"int_to_float", 12>; def CastKind : I32EnumAttr< "CastKind", @@ -59,7 +60,7 @@ def CastKind : I32EnumAttr< [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast, CK_BitCast, CK_FloatingCast, CK_PtrToBoolean, CK_FloatToIntegral, CK_IntegralToPointer, CK_PointerToIntegral, CK_FloatToBoolean, - CK_BooleanToIntegral]> { + CK_BooleanToIntegral, CK_IntegralToFloat]> { let cppNamespace = "::mlir::cir"; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 1b312366d441..0c3b44c0994f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1170,8 +1170,8 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_FloatingCast: case CK_FixedPointToFloating: case CK_FloatingToFixedPoint: { - if (!(Kind == CK_FloatingCast || Kind == CK_FloatingToIntegral)) - llvm_unreachable("Only FloatingCast and Integral supported so far."); + if (Kind == CK_FixedPointToFloating || Kind == CK_FloatingToFixedPoint) + llvm_unreachable("Fixed point casts are NYI."); CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, CE); return buildScalarConversion(Visit(E), E->getType(), DestTy, CE->getExprLoc()); @@ -1321,7 +1321,6 @@ mlir::Value ScalarExprEmitter::buildScalarCast( } if (CGF.getBuilder().isInt(SrcElementTy)) { - bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType(); if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) { llvm_unreachable("NYI"); } @@ -1329,10 +1328,8 @@ mlir::Value ScalarExprEmitter::buildScalarCast( if (CGF.getBuilder().isInt(DstElementTy)) return Builder.create( Src.getLoc(), DstTy, mlir::cir::CastKind::integral, Src); - if (InputSigned) - llvm_unreachable("NYI"); - - llvm_unreachable("NYI"); + return Builder.create( + Src.getLoc(), DstTy, mlir::cir::CastKind::int_to_float, Src); } if (SrcElementTy.isa()) { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a6e586b5a1b4..54ab59397aac 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -18,6 +18,7 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Diagnostics.h" #include "mlir/IR/DialectImplementation.h" #include "mlir/IR/Location.h" @@ -334,6 +335,12 @@ LogicalResult CastOp::verify() { return emitOpError() << "requires !cir.int for result"; return success(); } + case cir::CastKind::int_to_float: + if (!srcType.isa()) + return emitOpError() << "requires !cir.int for source"; + if (!resType.isa()) + return emitOpError() << "requires !cir.float for result"; + return success(); } llvm_unreachable("Unknown CastOp kind?"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index fdcb5eedfe79..e5b356429e80 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -333,6 +333,18 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } + case mlir::cir::CastKind::int_to_float: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + if (castOp.getSrc().getType().cast().isSigned()) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } default: llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 32d570b94661..98ccb84502ea 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -47,6 +47,12 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4) { // CHECK: %[[TMP2:[0-9]+]] = cir.cast(int_to_ptr, %[[TMP]] : !u64i), !cir.ptr // CHECK: %{{[0-9]+}} = cir.cast(ptr_to_int, %[[TMP2]] : !cir.ptr), !s64i + float sitofp = (float)x2; // Signed integer to floating point + // CHECK: %{{.+}} = cir.cast(int_to_float, %{{[0-9]+}} : !s32i), f32 + + float uitofp = (float)x1; // Unsigned integer to floating point + // CHECK: %{{.+}} = cir.cast(int_to_float, %{{[0-9]+}} : !u32i), f32 + return 0; } diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 7a71bde34a9c..37396e2aa663 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -68,6 +68,10 @@ module { // MLIR: %[[TMP2:[0-9]+]] = llvm.inttoptr %[[TMP]] : i64 to !llvm.ptr %24 = cir.cast(ptr_to_int, %23 : !cir.ptr), !s32i // MLIR: %{{[0-9]+}} = llvm.ptrtoint %[[TMP2]] : !llvm.ptr to i32 + %25 = cir.cast(int_to_float, %arg1 : !s32i), f32 + // MLIR: %{{.+}} = llvm.sitofp %{{.+}} : i32 to f32 + %26 = cir.cast(int_to_float, %arg0 : !u32i), f32 + // MLIR: %{{.+}} = llvm.uitofp %{{.+}} : i32 to f32 %18 = cir.const(#cir.int<0> : !s32i) : !s32i cir.store %18, %2 : !s32i, cir.ptr %19 = cir.load %2 : cir.ptr , !s32i From a63b3bf53c6192a07d90d4df0edfaa9033d5a563 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sun, 25 Jun 2023 11:03:47 -0300 Subject: [PATCH 1018/2301] [CIR][CIRGen] Implement tentative definitions Only external tentative definitions were added. A method to easily create initialization attributes for distinct types was added as well. ghstack-source-id: 148f8bc39d6a26f9f30015c292e30cc5c272370e Pull Request resolved: https://github.com/llvm/clangir/pull/125 --- clang/include/clang/CIR/CIRGenerator.h | 1 + clang/lib/CIR/CodeGen/CIRGenBuilder.h | 19 +++++++++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 ++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 33 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 ++ clang/lib/CIR/CodeGen/CIRGenerator.cpp | 7 ++++ .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 +- clang/test/CIR/CodeGen/globals.c | 20 +++++++++++ 9 files changed, 87 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 64c0e4a82202..c0712de63313 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -89,6 +89,7 @@ class CIRGenerator : public clang::ASTConsumer { void HandleTagDeclDefinition(clang::TagDecl *D) override; void HandleTagDeclRequiredDefinition(const clang::TagDecl *D) override; void HandleCXXStaticMemberVarInstantiation(clang::VarDecl *D) override; + void CompleteTentativeDefinition(clang::VarDecl *D) override; mlir::ModuleOp getModule(); std::unique_ptr takeContext() { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 8d53fe99e223..97b707a06607 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -24,6 +24,7 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" #include "llvm/ADT/FloatingPointMode.h" +#include "llvm/Support/ErrorHandling.h" namespace cir { @@ -144,6 +145,24 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return mlir::cir::TypeInfoAttr::get(anonStruct.getType(), anonStruct); } + mlir::TypedAttr getZeroInitAttr(mlir::Type ty) { + if (ty.isa()) + return mlir::cir::IntAttr::get(ty, 0); + if (ty.isa()) + return mlir::FloatAttr::get(ty, 0.0); + if (auto arrTy = ty.dyn_cast()) { + // FIXME(cir): We should have a proper zero initializer CIR instead of + // manually pumping zeros into the array. + assert(!UnimplementedFeature::zeroInitializer()); + auto values = llvm::SmallVector(); + auto zero = getZeroInitAttr(arrTy.getEltType()); + for (unsigned i = 0, e = arrTy.getSize(); i < e; ++i) + values.push_back(zero); + return getConstArray(mlir::ArrayAttr::get(getContext(), values), arrTy); + } + llvm_unreachable("Zero initializer for given type is NYI"); + } + // // Type helpers // ------------ diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 35c48df017e9..d1510db6d8f7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -483,7 +483,10 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { if (const auto *VD = dyn_cast(ND)) { // Global Named registers access via intrinsics only - assert(VD->getStorageClass() != SC_Register && "not implemented"); + if (VD->getStorageClass() == SC_Register && + VD->hasAttr() && !VD->isLocalVarDecl()) + llvm_unreachable("NYI"); + assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); // Check for captured variables. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index d2f32204a8e9..be867f436139 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -773,7 +773,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // exists. A use may still exists, however, so we still may need // to do a RAUW. assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type"); - assert(0 && "not implemented"); + Init = builder.getZeroInitAttr(getCIRType(D->getType())); } else { initializedGlobalDecl = GlobalDecl(D); emitter.emplace(*this); @@ -1611,6 +1611,37 @@ StringRef CIRGenModule::getMangledName(GlobalDecl GD) { return MangledDeclNames[CanonicalGD] = Result.first->first(); } +void CIRGenModule::buildTentativeDefinition(const VarDecl *D) { + assert(!D->getInit() && "Cannot emit definite definitions here!"); + + StringRef MangledName = getMangledName(D); + auto *GV = getGlobalValue(MangledName); + + // TODO(cir): can a tentative definition come from something other than a + // global op? If not, the assertion below is wrong and should be removed. If + // so, getGlobalValue might be better of returining a global value interface + // that alows use to manage different globals value types transparently. + if (GV) + assert(isa(GV) && + "tentative definition can only be built from a cir.global_op"); + + // We already have a definition, not declaration, with the same mangled name. + // Emitting of declaration is not required (and actually overwrites emitted + // definition). + if (GV && !dyn_cast(GV).isDeclaration()) + return; + + // If we have not seen a reference to this variable yet, place it into the + // deferred declarations table to be emitted if needed later. + if (!MustBeEmitted(D) && !GV) { + DeferredDecls[MangledName] = D; + return; + } + + // The tentative definition is the only definition. + buildGlobalVarDefinition(D); +} + void CIRGenModule::setGlobalVisibility(mlir::Operation *GV, const NamedDecl *D) const { assert(!UnimplementedFeature::setGlobalVisibility()); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index ae7f12e2ac4e..c7671a830fc0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -445,6 +445,8 @@ class CIRGenModule : public CIRGenTypeCache { llvm::StringRef getMangledName(clang::GlobalDecl GD); + void buildTentativeDefinition(const VarDecl *D); + // Make sure that this type is translated. void UpdateCompletedType(const clang::TagDecl *TD); diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 2867f1fdc1b0..4fe46c923dda 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -181,3 +181,10 @@ void CIRGenerator::HandleCXXStaticMemberVarInstantiation(VarDecl *D) { CGM->HandleCXXStaticMemberVarInstantiation(D); } + +void CIRGenerator::CompleteTentativeDefinition(VarDecl *D) { + if (Diags.hasErrorOccurred()) + return; + + CGM->buildTentativeDefinition(D); +} diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 46fdde1e17a5..32bfb3650f3a 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -106,6 +106,7 @@ struct UnimplementedFeature { static bool constantFoldsToSimpleInteger() { return false; } static bool alignedLoad() { return false; } static bool checkFunctionCallABI() { return false; } + static bool zeroInitializer() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index a97ef0d36722..5c6bb7f4887f 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -300,7 +300,7 @@ class CIRGenConsumer : public clang::ASTConsumer { } void CompleteTentativeDefinition(VarDecl *D) override { - llvm_unreachable("NYI"); + gen->CompleteTentativeDefinition(D); } void CompleteExternalDeclaration(DeclaratorDecl *D) override { diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 1ab3e4c25c29..39241e2a3ebd 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -14,3 +14,23 @@ int filler_sint[4] = {1, 2}; // Ensure missing elements are zero-initialized. // CHECK: cir.global external @filler_sint = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i]> : !cir.array int excess_sint[2] = {1, 2, 3, 4}; // Ensure excess elements are ignored. // CHECK: cir.global external @excess_sint = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array + +// Tentative definition is just a declaration. +int tentativeB; +int tentativeB = 1; +// CHECK: cir.global external @tentativeB = #cir.int<1> : !s32i + +// Tentative incomplete definition is just a declaration. +int tentativeE[]; +int tentativeE[2] = {1, 2}; +// CHECK: cir.global external @tentativeE = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array + +// TODO: test tentatives with internal linkage. + +// Tentative definition is THE definition. Should be zero-initialized. +int tentativeA; +float tentativeC; +int tentativeD[]; +// CHECK: cir.global external @tentativeA = #cir.int<0> : !s32i +// CHECK: cir.global external @tentativeC = 0.000000e+00 : f32 +// CHECK: cir.global external @tentativeD = #cir.const_array<[#cir.int<0> : !s32i]> : !cir.array From 890b893d5c7ec3705a37625f7132245efdaa2cd3 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sun, 25 Jun 2023 11:03:47 -0300 Subject: [PATCH 1019/2301] [CIR][Lowering] Lower FP to integral casts Essentially lower !cir.cast ops to LLVM's fptosi/fptoui operations. ghstack-source-id: a7f1476bbab9865060d07c4f34f2b778b154a976 Pull Request resolved: https://github.com/llvm/clangir/pull/126 --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 12 ++++++++++++ clang/test/CIR/CodeGen/cast.cpp | 6 ++++++ clang/test/CIR/Lowering/cast.cir | 8 ++++++-- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e5b356429e80..702c738d1a34 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -345,6 +345,18 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } + case mlir::cir::CastKind::float_to_int: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + if (castOp.getResult().getType().cast().isSigned()) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } default: llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 98ccb84502ea..1e72961cc3dd 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -53,6 +53,12 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4) { float uitofp = (float)x1; // Unsigned integer to floating point // CHECK: %{{.+}} = cir.cast(int_to_float, %{{[0-9]+}} : !u32i), f32 + int fptosi = (int)x3; // Floating point to signed integer + // CHECK: %{{.+}} = cir.cast(float_to_int, %{{[0-9]+}} : f32), !s32i + + unsigned fptoui = (unsigned)x3; // Floating point to unsigned integer + // CHECK: %{{.+}} = cir.cast(float_to_int, %{{[0-9]+}} : f32), !u32i + return 0; } diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 37396e2aa663..4b7fa47316f5 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -28,8 +28,8 @@ module { // LLVM-NEXT: ret i32 %0 // LLVM-NEXT: } - cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i) -> !s32i { - // MLIR: llvm.func @cStyleCasts(%arg0: i32, %arg1: i32) -> i32 { + cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i, %arg2: f32) -> !s32i { + // MLIR: llvm.func @cStyleCasts %0 = cir.alloca !u32i, cir.ptr , ["x1", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, cir.ptr , ["x2", init] {alignment = 4 : i64} %20 = cir.alloca !s16i, cir.ptr , ["x4", init] {alignment = 2 : i64} @@ -72,6 +72,10 @@ module { // MLIR: %{{.+}} = llvm.sitofp %{{.+}} : i32 to f32 %26 = cir.cast(int_to_float, %arg0 : !u32i), f32 // MLIR: %{{.+}} = llvm.uitofp %{{.+}} : i32 to f32 + %27 = cir.cast(float_to_int, %arg2 : f32), !s32i + // MLIR: %{{.+}} = llvm.fptosi %{{.+}} : f32 to i32 + %28 = cir.cast(float_to_int, %arg2 : f32), !u32i + // MLIR: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32 %18 = cir.const(#cir.int<0> : !s32i) : !s32i cir.store %18, %2 : !s32i, cir.ptr %19 = cir.load %2 : cir.ptr , !s32i From e5d8dda34944294a6bb4375023499d2b19117d5e Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sun, 25 Jun 2023 11:03:47 -0300 Subject: [PATCH 1020/2301] [CIR][Lowering] Lower float/double const array initializers ghstack-source-id: 308e75c9ce37642bd400da7b8735250fa4004c92 Pull Request resolved: https://github.com/llvm/clangir/pull/127 --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 12 +++++++++--- clang/test/CIR/CodeGen/globals.c | 4 ++++ clang/test/CIR/Lowering/globals.cir | 4 ++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 702c738d1a34..4eb2d32ee678 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -30,6 +30,7 @@ #include "mlir/Dialect/SCF/Transforms/Passes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" +#include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinDialect.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/IRMapping.h" @@ -735,13 +736,14 @@ class CIRFuncLowering : public mlir::OpConversionPattern { } }; +template mlir::DenseElementsAttr convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, mlir::Type type) { - auto values = llvm::SmallVector{}; + auto values = llvm::SmallVector{}; auto arrayAttr = attr.getElts().dyn_cast(); assert(arrayAttr && "expected array here"); for (auto element : arrayAttr) - values.push_back(element.cast().getValue()); + values.push_back(element.cast().getValue()); return mlir::DenseElementsAttr::get( mlir::RankedTensorType::get({(int64_t)values.size()}, type), llvm::ArrayRef(values)); @@ -763,7 +765,11 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, auto type = cirArrayType.getEltType(); if (type.isa()) - return convertToDenseElementsAttr(constArr, converter->convertType(type)); + return convertToDenseElementsAttr( + constArr, converter->convertType(type)); + if (type.isa()) + return convertToDenseElementsAttr( + constArr, converter->convertType(type)); return std::nullopt; } diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 39241e2a3ebd..311c747d0d98 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -14,6 +14,8 @@ int filler_sint[4] = {1, 2}; // Ensure missing elements are zero-initialized. // CHECK: cir.global external @filler_sint = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i]> : !cir.array int excess_sint[2] = {1, 2, 3, 4}; // Ensure excess elements are ignored. // CHECK: cir.global external @excess_sint = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array +float flt[] = {1.0, 2.0}; +// CHECK: cir.global external @flt = #cir.const_array<[1.000000e+00 : f32, 2.000000e+00 : f32]> : !cir.array // Tentative definition is just a declaration. int tentativeB; @@ -31,6 +33,8 @@ int tentativeE[2] = {1, 2}; int tentativeA; float tentativeC; int tentativeD[]; +float zeroInitFlt[2]; // CHECK: cir.global external @tentativeA = #cir.int<0> : !s32i // CHECK: cir.global external @tentativeC = 0.000000e+00 : f32 // CHECK: cir.global external @tentativeD = #cir.const_array<[#cir.int<0> : !s32i]> : !cir.array +// CHECK: cir.global external @zeroInitFlt = #cir.const_array<[0.000000e+00 : f32, 0.000000e+00 : f32]> : !cir.array diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index f1d5e9bc5631..e5e0f8a15d5a 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -114,4 +114,8 @@ module { cir.store %14, %4 : !cir.ptr, cir.ptr > cir.return } + cir.global external @flt = #cir.const_array<[1.000000e+00 : f32, 2.000000e+00 : f32]> : !cir.array + cir.global external @zeroInitFlt = #cir.const_array<[0.000000e+00 : f32, 0.000000e+00 : f32]> : !cir.array + // MLIR: llvm.mlir.global external @flt(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) {addr_space = 0 : i32} : !llvm.array<2 x f32> + // MLIR: llvm.mlir.global external @zeroInitFlt(dense<0.000000e+00> : tensor<2xf32>) {addr_space = 0 : i32} : !llvm.array<2 x f32> } From 8df788ef6079bbb0399c971ba55d3f93e26fbf64 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 26 Jun 2023 20:20:19 -0300 Subject: [PATCH 1021/2301] [CIR][CIRGen] Add !cir.lang module attribute The `!cir.lang` module attribute is used to represent the source language used to compile the module. Not all languages are supported yet. ghstack-source-id: d6cc07dc8fdaeff611a48f0008febe5d815d3b3a Pull Request resolved: https://github.com/llvm/clangir/pull/120 --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 35 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 23 ++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 3 ++ clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 34 ++++++++++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 1 + clang/test/CIR/CodeGen/basic.c | 2 +- clang/test/CIR/CodeGen/sourcelocation.cpp | 2 +- clang/test/CIR/IR/invalid.cir | 5 +++ clang/test/CIR/IR/module.cir | 12 +++++++ 9 files changed, 115 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/IR/module.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index df9d16d751f5..43117d72da7a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -26,6 +26,41 @@ class CIR_Attr traits = []> let mnemonic = attrMnemonic; } +//===----------------------------------------------------------------------===// +// LangAttr +//===----------------------------------------------------------------------===// + +def C : I32EnumAttrCase<"C", 1, "c">; +def CXX : I32EnumAttrCase<"CXX", 2, "cxx">; + +def SourceLanguage : I32EnumAttr<"SourceLanguage", "Source language", [ + C, CXX +]> { + let cppNamespace = "::mlir::cir"; +} + +def LangAttr : CIR_Attr<"Lang", "lang"> { + let summary = "Module source language"; + let parameters = (ins "SourceLanguageAttr":$lang); + let description = [{ + Represents the source language used to generate the module. + + Example: + ``` + // Module compiled from C. + module attributes {cir.lang = cir.lang} {} + // Module compiled from C++. + module attributes {cir.lang = cir.lang} {} + ``` + }]; + let hasCustomAssemblyFormat = 1; + let extraClassDeclaration = [{ + bool isC() const { return getLang().getValue() == SourceLanguage::C; }; + bool isCXX() const { return getLang().getValue() == SourceLanguage::CXX; }; + }]; + let genVerifyDecl = 0; +} + //===----------------------------------------------------------------------===// // NullAttr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index be867f436139..489776ac93aa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -43,9 +43,12 @@ #include "clang/AST/StmtObjC.h" #include "clang/AST/Type.h" #include "clang/Basic/Diagnostic.h" +#include "clang/Basic/LangStandard.h" #include "clang/Basic/NoSanitizeList.h" #include "clang/Basic/SourceLocation.h" #include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/LowerToLLVM.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "clang/Lex/Preprocessor.h" @@ -164,6 +167,9 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, } theModule->setAttr("cir.sob", mlir::cir::SignedOverflowBehaviorAttr::get(&context, sob)); + auto lang = SourceLanguageAttr::get(&context, getCIRSourceLanguage()); + theModule->setAttr( + "cir.lang", mlir::cir::LangAttr::get(&context, lang)); // Set the module name to be the name of the main file. TranslationUnitDecl // often contains invalid source locations and isn't a reliable source for the // module location. @@ -2359,3 +2365,20 @@ void CIRGenModule::ErrorUnsupported(const Decl *D, const char *Type) { std::string Msg = Type; getDiags().Report(astCtx.getFullLoc(D->getLocation()), DiagID) << Msg; } + +mlir::cir::SourceLanguage CIRGenModule::getCIRSourceLanguage() { + using ClangStd = clang::LangStandard; + using CIRLang = mlir::cir::SourceLanguage; + auto opts = getLangOpts(); + + if (opts.CPlusPlus || opts.CPlusPlus11 || opts.CPlusPlus14 || + opts.CPlusPlus17 || opts.CPlusPlus20 || opts.CPlusPlus23 || + opts.CPlusPlus26) + return CIRLang::CXX; + if (opts.C99 || opts.C11 || opts.C17 || opts.C23 || + opts.LangStd == ClangStd::lang_c89) + return CIRLang::C; + + // TODO(cir): support remaining source languages. + llvm_unreachable("CIR does not yet support the given source language"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index c7671a830fc0..9ea55cea9e7d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -567,6 +567,9 @@ class CIRGenModule : public CIRGenTypeCache { ReplacementsTy Replacements; /// Call replaceAllUsesWith on all pairs in Replacements. void applyReplacements(); + + /// Map source language used to a CIR attribute. + mlir::cir::SourceLanguage getCIRSourceLanguage(); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index bb0cde993908..c0d6f06a9554 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -20,7 +20,9 @@ #include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" +#include "mlir/IR/Location.h" #include "mlir/IR/OpImplementation.h" +#include "mlir/Support/LLVM.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/TypeSwitch.h" @@ -138,6 +140,38 @@ LogicalResult ConstStructAttr::verify( return success(); } +//===----------------------------------------------------------------------===// +// LangAttr definitions +//===----------------------------------------------------------------------===// + +Attribute LangAttr::parse(AsmParser &parser, Type odsType) { + auto loc = parser.getCurrentLocation(); + if (parser.parseLess()) + return {}; + + // Parse variable 'lang'. + llvm::StringRef lang; + if (parser.parseKeyword(&lang)) + return {}; + + // Check if parsed value is a valid language. + auto langEnum = symbolizeSourceLanguage(lang); + if (!langEnum.has_value()) { + parser.emitError(loc) << "invalid language keyword '" << lang << "'"; + return {}; + } + + if (parser.parseGreater()) + return {}; + + return get(parser.getContext(), + SourceLanguageAttr::get(parser.getContext(), langEnum.value())); +} + +void LangAttr::print(AsmPrinter &printer) const { + printer << "<" << getLang().getValue() << '>'; +} + //===----------------------------------------------------------------------===// // IntAttr definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 4eb2d32ee678..b14be1891701 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1239,6 +1239,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { mlir::func::FuncDialect>(); getOperation()->removeAttr("cir.sob"); + getOperation()->removeAttr("cir.lang"); if (failed(applyPartialConversion(module, target, std::move(patterns)))) signalPassFailure(); diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 2e99d6e9731f..37027bde884d 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -10,7 +10,7 @@ int foo(int i) { return i; } -// CIR: module @"{{.*}}basic.c" attributes { +// CIR: module @"{{.*}}basic.c" attributes {{{.*}}cir.lang = #cir.lang // CIR-NEXT: cir.func @foo(%arg0: !s32i loc({{.*}})) -> !s32i { // CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CIR-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index c907083c6a87..5aef7b7554da 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -18,7 +18,7 @@ int s0(int a, int b) { // CIR: #loc6 = loc("{{.*}}sourcelocation.cpp":6:19) // CIR: #loc21 = loc(fused[#loc3, #loc4]) // CIR: #loc22 = loc(fused[#loc5, #loc6]) -// CIR: module @"{{.*}}sourcelocation.cpp" attributes {cir.sob = #cir.signed_overflow_behavior +// CIR: module @"{{.*}}sourcelocation.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior // CIR: cir.func @_Z2s0ii(%arg0: !s32i loc(fused[#loc3, #loc4]), %arg1: !s32i loc(fused[#loc5, #loc6])) -> !s32i { // CIR: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc21) // CIR: %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc22) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index ab6cf2e11674..a20e1d4510cf 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -422,3 +422,8 @@ module { cir.return %0 : !cir.int } } + +// ----- + +// expected-error@+1 {{invalid language keyword 'dummy'}} +module attributes {cir.lang = #cir.lang} { } diff --git a/clang/test/CIR/IR/module.cir b/clang/test/CIR/IR/module.cir new file mode 100644 index 000000000000..c2fc99332670 --- /dev/null +++ b/clang/test/CIR/IR/module.cir @@ -0,0 +1,12 @@ +// RUN: cir-tool %s -split-input-file -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Should parse and print C source language attribute. +module attributes {cir.lang = #cir.lang} { } +// CHECK: module attributes {cir.lang = #cir.lang} + +// ----- + +// Should parse and print C++ source language attribute. +module attributes {cir.lang = #cir.lang} { } +// CHECK: module attributes {cir.lang = #cir.lang} From 190255b3d6b37008279920ee78cc6a523b598256 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 26 Jun 2023 20:20:20 -0300 Subject: [PATCH 1022/2301] [CIR][CIRGen] Support no-proto declarations with defined behaviour Essentially add the required snippets of code for the codegen of no-proto declarations. The codegen works as follows: - A no-proto declaration is found and defined in CIR as a function that can take any number of arguments. - Calls to the function are generated as direct calls that can take any number of arguments. - If the function definition is found, the calls the no-proto declaration is replaced by its definition and any call is patched to have the correct type. CIR's verifier will catch any type error. - If no definition is found, a type error occurs. Invalid calls and external definitions for no-proto declarations are not yet handled. ghstack-source-id: bb06823126ea3e4ba10fa985fd67375295b02b8f Pull Request resolved: https://github.com/llvm/clangir/pull/122 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 ++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 20 ++++- clang/lib/CIR/CodeGen/CIRGenCall.h | 6 ++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 23 +++--- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 81 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 + clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 13 ++- clang/lib/CIR/CodeGen/CIRGenTypes.h | 3 + .../CodeGen/UnimplementedFeatureGuarding.h | 5 ++ clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 6 ++ clang/test/CIR/CodeGen/no-proto-is-void.cpp | 13 +++ clang/test/CIR/CodeGen/no-prototype.c | 22 +++++ 12 files changed, 177 insertions(+), 23 deletions(-) create mode 100644 clang/test/CIR/CodeGen/no-proto-is-void.cpp create mode 100644 clang/test/CIR/CodeGen/no-prototype.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 97b707a06607..26649f274174 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -219,6 +219,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return getPointerTo(getPointerTo(fnTy)); } + mlir::cir::FuncType getFuncType(llvm::ArrayRef params, + mlir::Type retTy, bool isVarArg = false) { + return mlir::cir::FuncType::get(params, retTy, isVarArg); + } + // Fetch the type representing a pointer to unsigned int values. mlir::cir::PointerType getUInt8PtrTy(unsigned AddrSpace = 0) { return typeCache.UInt8PtrTy; diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 1732f4e079ae..125172d75ff1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -26,6 +26,7 @@ #include "llvm/Support/ErrorHandling.h" #include +#include "UnimplementedFeatureGuarding.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" @@ -945,6 +946,18 @@ CIRGenTypes::arrangeFreeFunctionType(CanQual FTP) { FTP); } +/// Arrange the argument and result information for a value of the given +/// unprototyped freestanding function type. +const CIRGenFunctionInfo & +CIRGenTypes::arrangeFreeFunctionType(CanQual FTNP) { + // When translating an unprototyped function type, always use a + // variadic type. + return arrangeCIRFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), + /*instanceMethod=*/false, + /*chainCall=*/false, std::nullopt, + FTNP->getExtInfo(), {}, RequiredArgs(0)); +} + /// Arrange a call to a C++ method, passing the given arguments. /// /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` @@ -1082,10 +1095,9 @@ arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, if (proto->hasExtParameterInfos()) addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, args.size()); - } else { - assert(!llvm::isa(fnType) && - "FunctionNoProtoType NYI"); - llvm_unreachable("Unknown function prototype"); + } else if (llvm::isa(fnType)) { + assert(!UnimplementedFeature::targetCodeGenInfoIsProtoCallVariadic()); + required = RequiredArgs(args.size()); } // FIXME: Kill copy. diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index c5d4157eb636..3e8393742616 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -184,6 +184,12 @@ class CIRGenCallee { assert(isVirtual()); return VirtualInfo.FTy; } + + void setFunctionPointer(mlir::Operation *functionPtr) { + assert(isOrdinary()); + KindOrFunctionPointer = + SpecialKind(reinterpret_cast(functionPtr)); + } }; struct CallArg { diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index d1510db6d8f7..b9128985595b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenBuilder.h" #include "CIRGenCXXABI.h" #include "CIRGenCall.h" #include "CIRGenCstEmitter.h" @@ -20,6 +21,8 @@ #include "clang/AST/GlobalDecl.h" #include "clang/Basic/Builtins.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/Casting.h" #include "llvm/ADT/StringExtras.h" @@ -36,8 +39,6 @@ static mlir::cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, assert(!FD->hasAttr() && "NYI"); auto V = CGM.GetAddrOfFunction(GD); - assert(FD->hasPrototype() && - "Only prototyped functions are currently callable"); return V; } @@ -889,16 +890,14 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, // // Chain calls use the same code path to add the inviisble chain parameter to // the function type. - assert(!isa(FnType) && "NYI"); - // if (isa(FnType) || Chain) { - // mlir::FunctionType CalleeTy = getTypes().GetFunctionType(FnInfo); - // int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace(); - // CalleeTy = CalleeTy->getPointerTo(AS); - - // llvm::Value *CalleePtr = Callee.getFunctionPointer(); - // CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast"); - // Callee.setFunctionPointer(CalleePtr); - // } + if (isa(FnType) || Chain) { + assert(!UnimplementedFeature::chainCalls()); + assert(!UnimplementedFeature::addressSpace()); + + // Set no-proto function as callee. + auto Fn = llvm::dyn_cast(Callee.getFunctionPointer()); + Callee.setFunctionPointer(Fn); + } assert(!CGM.getLangOpts().HIP && "HIP NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 489776ac93aa..46fd75da7054 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -19,6 +19,7 @@ #include "CIRGenValue.h" #include "TargetInfo.h" +#include "UnimplementedFeatureGuarding.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/Attributes.h" @@ -26,6 +27,8 @@ #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/MLIRContext.h" +#include "mlir/IR/OperationSupport.h" +#include "mlir/IR/SymbolTable.h" #include "mlir/IR/Verifier.h" #include "clang/AST/ASTConsumer.h" @@ -1445,6 +1448,48 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( return mlir::cir::GlobalLinkageKind::ExternalLinkage; } +/// This function is called when we implement a function with no prototype, e.g. +/// "int foo() {}". If there are existing call uses of the old function in the +/// module, this adjusts them to call the new function directly. +/// +/// This is not just a cleanup: the always_inline pass requires direct calls to +/// functions to be able to inline them. If there is a bitcast in the way, it +/// won't inline them. Instcombine normally deletes these calls, but it isn't +/// run at -O0. +void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( + mlir::Operation *Old, mlir::cir::FuncOp NewFn) { + + // If we're redefining a global as a function, don't transform it. + auto OldFn = dyn_cast(Old); + if (!OldFn) + return; + + // TODO(cir): this RAUW ignores the features below. + assert(!UnimplementedFeature::exceptions() && "Call vs Invoke NYI"); + assert(!UnimplementedFeature::parameterAttributes()); + assert(!UnimplementedFeature::operandBundles()); + assert(OldFn->getAttrs().size() > 0 && "Attribute forwarding NYI"); + + // Iterate through all calls of the no-proto function. + auto Calls = OldFn.getSymbolUses(OldFn->getParentOp()); + for (auto Call : Calls.value()) { + mlir::OpBuilder::InsertionGuard guard(builder); + + // Fetch no-proto call to be replaced. + auto noProtoCallOp = dyn_cast(Call.getUser()); + assert(noProtoCallOp && "unexpected use of no-proto function"); + builder.setInsertionPoint(noProtoCallOp); + + // Patch call type with the real function type. + auto realCallOp = builder.create( + noProtoCallOp.getLoc(), NewFn, noProtoCallOp.getOperands()); + + // Replace old no proto call with fixed call. + noProtoCallOp.replaceAllUsesWith(realCallOp); + noProtoCallOp.erase(); + } +} + mlir::cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { const auto *D = cast(GD.getDecl()); @@ -1795,7 +1840,10 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( if (Fn && Fn.getFunctionType() == Ty) { return Fn; } - llvm_unreachable("NYI"); + + if (!IsForDefinition) { + return Fn; + } // TODO: clang checks here if this is a llvm::GlobalAlias... how will we // support this? @@ -1822,8 +1870,33 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // mangledname if Entry is nullptr auto F = createCIRFunction(getLocForFunction(FD), MangledName, FTy, FD); + // If we already created a function with the same mangled name (but different + // type) before, take its name and add it to the list of functions to be + // replaced with F at the end of CodeGen. + // + // This happens if there is a prototype for a function (e.g. "int f()") and + // then a definition of a different type (e.g. "int f(int x)"). if (Entry) { - llvm_unreachable("NYI"); + + // Fetch a generic symbol-defining operation and its uses. + auto SymbolOp = dyn_cast(Entry); + assert(SymbolOp && "Expected a symbol-defining operation"); + + // TODO(cir): When can this symbol be something other than a function? + assert(isa(Entry) && "NYI"); + + // This might be an implementation of a function without a prototype, in + // which case, try to do special replacement of calls which match the new + // prototype. The really key thing here is that we also potentially drop + // arguments from the call site so as to make a direct call, which makes the + // inliner happier and suppresses a number of optimizer warnings (!) about + // dropping arguments. + if (SymbolOp.getSymbolUses(SymbolOp->getParentOp())) { + ReplaceUsesOfNonProtoTypeWithRealFunction(Entry, F); + } + + // Obliterate no-proto declaration. + Entry->erase(); } // TODO: This might not be valid, seems the uniqueing system doesn't make @@ -1891,7 +1964,9 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( return F; } - assert(false && "Incompmlete functions NYI"); + // TODO(cir): Might need bitcast to different address space. + assert(!UnimplementedFeature::addressSpace()); + return F; } mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 9ea55cea9e7d..a5066e1284fb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -520,6 +520,9 @@ class CIRGenModule : public CIRGenTypeCache { mlir::Location getLocForFunction(const clang::FunctionDecl *FD); + void ReplaceUsesOfNonProtoTypeWithRealFunction(mlir::Operation *Old, + mlir::cir::FuncOp NewFn); + // TODO: CodeGen also passes an AttributeList here. We'll have to match that // in CIR mlir::cir::FuncOp diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 829ff3a47a5a..2b71f1aed14b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -16,6 +16,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/AST/RecordLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" using namespace clang; using namespace cir; @@ -255,10 +256,14 @@ mlir::Type CIRGenTypes::ConvertFunctionTypeInternal(QualType QFT) { // The function type can be built; call the appropriate routines to build it const CIRGenFunctionInfo *FI; - const auto *FPT = dyn_cast(FT); - assert(FPT && "FunctionNonPrototype NIY"); - FI = &arrangeFreeFunctionType( - CanQual::CreateUnsafe(QualType(FPT, 0))); + if (const auto *FPT = dyn_cast(FT)) { + FI = &arrangeFreeFunctionType( + CanQual::CreateUnsafe(QualType(FPT, 0))); + } else { + const FunctionNoProtoType *FNPT = cast(FT); + FI = &arrangeFreeFunctionType( + CanQual::CreateUnsafe(QualType(FNPT, 0))); + } mlir::Type ResultType = nullptr; // If there is something higher level prodding our CIRGenFunctionInfo, then diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 1b93643928c5..1e54d287ec72 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -249,6 +249,9 @@ class CIRGenTypes { const CIRGenFunctionInfo & arrangeFreeFunctionType(clang::CanQual Ty); + const CIRGenFunctionInfo & + arrangeFreeFunctionType(clang::CanQual FTNP); + /// "Arrange" the CIR information for a call or type with the given /// signature. This is largely an internal method; other clients should use /// one of the above routines, which ultimatley defer to this. diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 32bfb3650f3a..abd5917e7687 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -61,6 +61,7 @@ struct UnimplementedFeature { // LLVM Attributes static bool attributeBuiltin() { return false; } static bool attributeNoBuiltin() { return false; } + static bool parameterAttributes() { return false; } // Coroutines static bool unhandledException() { return false; } @@ -107,6 +108,10 @@ struct UnimplementedFeature { static bool alignedLoad() { return false; } static bool checkFunctionCallABI() { return false; } static bool zeroInitializer() { return false; } + static bool targetCodeGenInfoIsProtoCallVariadic() { return false; } + static bool chainCalls() { return false; } + static bool operandBundles() { return false; } + static bool exceptions() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index db27a2fa664f..43c7817fddbd 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -431,6 +431,12 @@ parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, if (succeeded(p.parseOptionalRParen())) return mlir::success(); + // `(` `...` `)` + if (succeeded(p.parseOptionalEllipsis())) { + isVarArg = true; + return p.parseRParen(); + } + // type (`,` type)* (`,` `...`)? mlir::Type type; if (p.parseType(type)) diff --git a/clang/test/CIR/CodeGen/no-proto-is-void.cpp b/clang/test/CIR/CodeGen/no-proto-is-void.cpp new file mode 100644 index 000000000000..0bf99efc3be0 --- /dev/null +++ b/clang/test/CIR/CodeGen/no-proto-is-void.cpp @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -x c -std=c2x -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Both CXX and C2X don't support no-prototype functions. They default to void. +int noProto(); +// CHECK: cir.func @{{.*}}noProto{{.*}}() -> !s32i { +int test(int x) { + return noProto(); + // CHECK {{.+}} = cir.call @{{.*}}noProto{{.*}}() : () -> !s32i +} +int noProto() { return 0; } diff --git a/clang/test/CIR/CodeGen/no-prototype.c b/clang/test/CIR/CodeGen/no-prototype.c new file mode 100644 index 000000000000..3f8a9b40a85a --- /dev/null +++ b/clang/test/CIR/CodeGen/no-prototype.c @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// No-proto definition followed by a correct call. +int noProto0(x) int x; { return x; } +int test0(int x) { + // CHECK: cir.func @test0 + return noProto0(x); // We know the definition. Should be a direct call. + // CHECK: %{{.+}} = cir.call @noProto0(%{{.+}}) +} + +// Declaration without prototype followed by its definition, then a correct call. +// +// Call to no-proto is made after definition, so a direct call can be used. +int noProto1(); +int noProto1(int x) { return x; } +// CHECK: cir.func @noProto1(%arg0: !s32i {{.+}}) -> !s32i { +int test1(int x) { + // CHECK: cir.func @test1 + return noProto1(x); + // CHECK: %{{.+}} = cir.call @noProto1(%{{[0-9]+}}) : (!s32i) -> !s32i +} From c68e552c1bc6dc2acb1312197dcd3bae1e4ed9f9 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 26 Jun 2023 20:20:20 -0300 Subject: [PATCH 1023/2301] [CIR][CIRGen] Bypass type checking for no-prototype functions Invalid calls to no-prototype functions should still compile even if they have undefined behavior. This patch adds a `no_proto` unite attr to function declarations that bypass argument checking. The attribute is also used to bypass checks for functions that can take any number of arguments. This happens when the no-proto function is externally defined and we can not check its signature. ghstack-source-id: 94828fc35df62ecfc4eb0b9f0a3ad130e1fb1718 Pull Request resolved: https://github.com/llvm/clangir/pull/130 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 ++ .../include/clang/CIR/Dialect/IR/CIRTypes.td | 4 -- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 13 ++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 41 ++++++++----- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 9 --- clang/test/CIR/CodeGen/basic.c | 4 +- clang/test/CIR/CodeGen/linkage.c | 2 +- clang/test/CIR/CodeGen/loop-scope.cpp | 2 +- clang/test/CIR/CodeGen/mlirprint.c | 2 +- clang/test/CIR/CodeGen/no-prototype.c | 57 ++++++++++++++++++- clang/test/CIR/CodeGen/store.c | 2 +- clang/test/CIR/CodeGen/struct.c | 2 +- clang/test/CIR/CodeGen/types.c | 2 +- clang/test/CIR/IR/func.cir | 3 + clang/test/CIR/IR/invalid.cir | 4 +- clang/test/CIR/driver.c | 2 +- clang/test/CIR/hello.c | 2 +- 17 files changed, 113 insertions(+), 43 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a0736a4793d8..94df76feca81 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1408,6 +1408,10 @@ def FuncOp : CIR_Op<"func", [ allow callsites to make certain assumptions about the real function nature when writing analysis. The verifier should, but do act on this keyword yet. + The `no_proto` keyword is used to identify functions that were declared + without a prototype and, consequently, may contain calls with invalid + arguments and undefined behavior. + Example: ```mlir @@ -1439,6 +1443,7 @@ def FuncOp : CIR_Op<"func", [ UnitAttr:$builtin, UnitAttr:$coroutine, UnitAttr:$lambda, + UnitAttr:$no_proto, DefaultValuedAttr:$linkage, OptionalAttr:$sym_visibility, diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index fac880347a39..7d157ad964fa 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -190,8 +190,6 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { `<` $returnType ` ` `(` custom($inputs, $varArg) `>` }]; - let skipDefaultBuilders = 1; - let builders = [ TypeBuilderWithInferredContext<(ins "ArrayRef":$inputs, "Type":$returnType, @@ -200,8 +198,6 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { }]> ]; - let genVerifyDecl = 1; - let extraClassDeclaration = [{ /// Returns whether the function is variadic. bool isVarArg() const { return getVarArg(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 46fd75da7054..8c5d6802428e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -24,6 +24,7 @@ #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/MLIRContext.h" @@ -1468,7 +1469,10 @@ void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( assert(!UnimplementedFeature::exceptions() && "Call vs Invoke NYI"); assert(!UnimplementedFeature::parameterAttributes()); assert(!UnimplementedFeature::operandBundles()); - assert(OldFn->getAttrs().size() > 0 && "Attribute forwarding NYI"); + assert(OldFn->getAttrs().size() > 1 && "Attribute forwarding NYI"); + + // Mark new function as originated from a no-proto declaration. + NewFn.setNoProtoAttr(OldFn.getNoProtoAttr()); // Iterate through all calls of the no-proto function. auto Calls = OldFn.getSymbolUses(OldFn->getParentOp()); @@ -1744,9 +1748,12 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, builder.setInsertionPoint(curCGF->CurFn.getOperation()); f = builder.create(loc, name, Ty); + if (FD) - f.setAstAttr( - mlir::cir::ASTFunctionDeclAttr::get(builder.getContext(), FD)); + f.setAstAttr(builder.getAttr(FD)); + + if (FD && !FD->hasPrototype()) + f.setNoProtoAttr(builder.getUnitAttr()); assert(f.isDeclaration() && "expected empty body"); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 54ab59397aac..b838f17bb5a8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1364,6 +1364,7 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { auto coroutineNameAttr = getCoroutineAttrName(state.name); auto lambdaNameAttr = getLambdaAttrName(state.name); auto visNameAttr = getSymVisibilityAttrName(state.name); + auto noProtoNameAttr = getNoProtoAttrName(state.name); if (::mlir::succeeded(parser.parseOptionalKeyword(builtinNameAttr.strref()))) state.addAttribute(builtinNameAttr, parser.getBuilder().getUnitAttr()); if (::mlir::succeeded( @@ -1371,6 +1372,8 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { state.addAttribute(coroutineNameAttr, parser.getBuilder().getUnitAttr()); if (::mlir::succeeded(parser.parseOptionalKeyword(lambdaNameAttr.strref()))) state.addAttribute(lambdaNameAttr, parser.getBuilder().getUnitAttr()); + if (parser.parseOptionalKeyword(noProtoNameAttr).succeeded()) + state.addAttribute(noProtoNameAttr, parser.getBuilder().getUnitAttr()); // Default to external linkage if no keyword is provided. state.addAttribute(getLinkageAttrNameString(), @@ -1418,8 +1421,7 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { : resultTypes.front()); // Build the function type. - auto fnType = mlir::cir::FuncType::getChecked( - parser.getEncodedSourceLoc(loc), argTypes, returnType, isVariadic); + auto fnType = mlir::cir::FuncType::get(argTypes, returnType, isVariadic); if (!fnType) return failure(); state.addAttribute(getFunctionTypeAttrName(state.name), @@ -1503,6 +1505,9 @@ void cir::FuncOp::print(OpAsmPrinter &p) { if (getLambda()) p << "lambda "; + if (getNoProto()) + p << "no_proto "; + if (getLinkage() != GlobalLinkageKind::ExternalLinkage) p << stringifyGlobalLinkageKind(getLinkage()) << ' '; @@ -1524,7 +1529,8 @@ void cir::FuncOp::print(OpAsmPrinter &p) { function_interface_impl::printFunctionAttributes( p, *this, {getSymVisibilityAttrName(), getAliaseeAttrName(), - getFunctionTypeAttrName(), getLinkageAttrName(), getBuiltinAttrName()}); + getFunctionTypeAttrName(), getLinkageAttrName(), getBuiltinAttrName(), + getNoProtoAttrName()}); if (auto aliaseeName = getAliasee()) { p << " alias("; @@ -1549,6 +1555,9 @@ LogicalResult cir::FuncOp::verifyType() { if (!type.isa()) return emitOpError("requires '" + getFunctionTypeAttrName().str() + "' attribute of function type"); + if (!getNoProto() && type.isVarArg() && type.getNumInputs() == 0) + return emitError() + << "prototyped function must have at least one non-variadic input"; return success(); } @@ -1637,18 +1646,22 @@ cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return emitOpError() << "'" << fnAttr.getValue() << "' does not reference a valid function"; - // Verify that the operand and result types match the callee. + // Verify that the operand and result types match the callee. Note that + // argument-checking is disabled for functions without a prototype. auto fnType = fn.getFunctionType(); - if (!fnType.isVarArg() && getNumOperands() != fnType.getNumInputs()) - return emitOpError("incorrect number of operands for callee"); - if (fnType.isVarArg() && getNumOperands() < fnType.getNumInputs()) - return emitOpError("too few operands for callee"); - - for (unsigned i = 0, e = fnType.getNumInputs(); i != e; ++i) - if (getOperand(i).getType() != fnType.getInput(i)) - return emitOpError("operand type mismatch: expected operand type ") - << fnType.getInput(i) << ", but provided " - << getOperand(i).getType() << " for operand number " << i; + if (!fn.getNoProto()) { + if (!fnType.isVarArg() && getNumOperands() != fnType.getNumInputs()) + return emitOpError("incorrect number of operands for callee"); + + if (fnType.isVarArg() && getNumOperands() < fnType.getNumInputs()) + return emitOpError("too few operands for callee"); + + for (unsigned i = 0, e = fnType.getNumInputs(); i != e; ++i) + if (getOperand(i).getType() != fnType.getInput(i)) + return emitOpError("operand type mismatch: expected operand type ") + << fnType.getInput(i) << ", but provided " + << getOperand(i).getType() << " for operand number " << i; + } // Void function must not return any results. if (fnType.isVoid() && getNumResults() != 0) diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 43c7817fddbd..bbf6b382fc30 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -409,15 +409,6 @@ IntType::verify(llvm::function_ref emitError, // FuncType Definitions //===----------------------------------------------------------------------===// -mlir::LogicalResult -FuncType::verify(llvm::function_ref emitError, - llvm::ArrayRef inputs, mlir::Type result, - bool varArg) { - if (varArg && inputs.empty()) - return emitError() << "functions must have at least one non-variadic input"; - return mlir::success(); -} - FuncType FuncType::clone(TypeRange inputs, TypeRange results) const { assert(results.size() == 1 && "expected exactly one result type"); return get(llvm::to_vector(inputs), results[0], isVarArg()); diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 37027bde884d..b16b25c8dfb3 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -21,7 +21,7 @@ int foo(int i) { // CIR-NEXT: %4 = cir.load %1 : cir.ptr , !s32i // CIR-NEXT: cir.return %4 : !s32i -int f2() { return 3; } +int f2(void) { return 3; } // CIR: cir.func @f2() -> !s32i { // CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} @@ -38,7 +38,7 @@ int f2() { return 3; } -int f3() { +int f3(void) { int i = 3; return i; } diff --git a/clang/test/CIR/CodeGen/linkage.c b/clang/test/CIR/CodeGen/linkage.c index ac5f31fd7564..fc0e95499743 100644 --- a/clang/test/CIR/CodeGen/linkage.c +++ b/clang/test/CIR/CodeGen/linkage.c @@ -8,7 +8,7 @@ static int bar(int i) { return i; } -int foo() { +int foo(void) { return bar(5); } diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index b7f5b49da857..9f7f74ef3efc 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -3,7 +3,7 @@ // RUN: %clang_cc1 -x c -std=c11 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.c.cir // RUN: FileCheck --input-file=%t.c.cir %s --check-prefix=CSCOPE -void l0() { +void l0(void) { for (int i = 0;;) { int j = 0; } diff --git a/clang/test/CIR/CodeGen/mlirprint.c b/clang/test/CIR/CodeGen/mlirprint.c index 65a8351676b1..96ff16e95708 100644 --- a/clang/test/CIR/CodeGen/mlirprint.c +++ b/clang/test/CIR/CodeGen/mlirprint.c @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR // RUN: %clang_cc1 -fclangir -emit-llvm -mmlir --mlir-print-ir-after-all -mllvm -print-after-all %s -o %t.ll 2>&1 | FileCheck %s -check-prefix=CIR -check-prefix=LLVM -int foo() { +int foo(void) { int i = 3; return i; } diff --git a/clang/test/CIR/CodeGen/no-prototype.c b/clang/test/CIR/CodeGen/no-prototype.c index 3f8a9b40a85a..9f9cbdfb1c33 100644 --- a/clang/test/CIR/CodeGen/no-prototype.c +++ b/clang/test/CIR/CodeGen/no-prototype.c @@ -1,8 +1,13 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +//===----------------------------------------------------------------------===// +// DEFINED BEHAVIOUR +//===----------------------------------------------------------------------===// + // No-proto definition followed by a correct call. int noProto0(x) int x; { return x; } +// CHECK: cir.func no_proto @noProto0(%arg0: !s32i {{.+}}) -> !s32i int test0(int x) { // CHECK: cir.func @test0 return noProto0(x); // We know the definition. Should be a direct call. @@ -11,7 +16,9 @@ int test0(int x) { // Declaration without prototype followed by its definition, then a correct call. // -// Call to no-proto is made after definition, so a direct call can be used. +// Prototyped definition overrides no-proto declaration before any call is made, +// only allowing calls with proper arguments. This is the only case where the +// definition is not marked as no-proto. int noProto1(); int noProto1(int x) { return x; } // CHECK: cir.func @noProto1(%arg0: !s32i {{.+}}) -> !s32i { @@ -20,3 +27,51 @@ int test1(int x) { return noProto1(x); // CHECK: %{{.+}} = cir.call @noProto1(%{{[0-9]+}}) : (!s32i) -> !s32i } + +// Declaration without prototype followed by a correct call, then its definition. +// +// Call to no-proto is made before definition, so a variadic call that takes anything +// is created. Later, when the definition is found, no-proto is replaced. +int noProto2(); +int test2(int x) { + return noProto2(x); + // CHECK: %{{.+}} = cir.call @noProto2(%{{[0-9]+}}) : (!s32i) -> !s32i +} +int noProto2(int x) { return x; } +// CHECK: cir.func no_proto @noProto2(%arg0: !s32i {{.+}}) -> !s32i + +// No-proto declaration without definition (any call here is "correct"). +// +// Call to no-proto is made before definition, so a variadic call that takes anything +// is created. Definition is not in the translation unit, so it is left as is. +int noProto3(); +// cir.func private no_proto @noProto3(...) -> !s32i +int test3(int x) { +// CHECK: cir.func @test3 + return noProto3(x); + // CHECK: %{{.+}} = cir.call @noProto3(%{{[0-9]+}}) : (!s32i) -> !s32i +} + + +//===----------------------------------------------------------------------===// +// UNDEFINED BEHAVIOUR +// +// No-proto definitions followed by incorrect calls. +//===----------------------------------------------------------------------===// + +// No-proto definition followed by an incorrect call due to extra args. +int noProto4() { return 0; } +// cir.func private no_proto @noProto4() -> !s32i +int test4(int x) { + return noProto4(x); // Even if we know the definition, this should compile. + // CHECK: %{{.+}} = cir.call @noProto4(%{{.+}}) : (!s32i) -> !s32i +} + +// No-proto definition followed by an incorrect call due to lack of args. +int noProto5(); +int test5(int x) { + return noProto5(); + // CHECK: %{{.+}} = cir.call @noProto5() : () -> !s32i +} +int noProto5(int x) { return x; } +// CHECK: cir.func no_proto @noProto5(%arg0: !s32i {{.+}}) -> !s32i diff --git a/clang/test/CIR/CodeGen/store.c b/clang/test/CIR/CodeGen/store.c index 7d8207fbee10..e190b3cb34eb 100644 --- a/clang/test/CIR/CodeGen/store.c +++ b/clang/test/CIR/CodeGen/store.c @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -void foo() { +void foo(void) { int a = 0; a = 1; } diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 30bb4171759f..24c4b318cd87 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -12,7 +12,7 @@ struct Foo { struct Bar z; }; -void baz() { +void baz(void) { struct Bar b; struct Foo f; } diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index 29e2240e2dc0..4f97daeaee5f 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -15,7 +15,7 @@ unsigned short t5(unsigned short i) { return i; } float t6(float i) { return i; } double t7(double i) { return i; } -void t8() {} +void t8(void) {} #ifdef __cplusplus bool t9(bool b) { return b; } diff --git a/clang/test/CIR/IR/func.cir b/clang/test/CIR/IR/func.cir index 73898f70172e..2ab6d54081ff 100644 --- a/clang/test/CIR/IR/func.cir +++ b/clang/test/CIR/IR/func.cir @@ -37,6 +37,9 @@ module { cir.func @parse_func_type_with_omitted_void() { cir.return } + + // Should parse variadic no-proto functions. + cir.func no_proto private @no_proto(...) -> !s32i } // CHECK: cir.func @l0() \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index a20e1d4510cf..636fece63646 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -363,8 +363,8 @@ module { // ----- module { - // expected-error@+1 {{functions must have at least one non-variadic input}} - cir.func @variadic(...) -> !cir.int + // expected-error@+1 {{prototyped function must have at least one non-variadic input}} + cir.func private @variadic(...) -> !cir.int } // ----- diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index 96a9caacab86..61bd42a77cb8 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -13,7 +13,7 @@ // RUN: %clang -target arm64-apple-macosx12.0.0 -fclangir -S -Xclang -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -void foo() {} +void foo(void) {} // CIR: module {{.*}} { // CIR-NEXT: cir.func @foo() { diff --git a/clang/test/CIR/hello.c b/clang/test/CIR/hello.c index 4b07c04994aa..7a806cfae85d 100644 --- a/clang/test/CIR/hello.c +++ b/clang/test/CIR/hello.c @@ -2,4 +2,4 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s void foo() {} -// CHECK: cir.func @foo +// CHECK: cir.func no_proto @foo From c6602f2c1a4e2a4e2b242678934fcc162573cf03 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 26 Jun 2023 20:20:20 -0300 Subject: [PATCH 1024/2301] [CIR][Lowering] Lower casted indirect function calls Lowers bitcasts and function types to LLVM Dialect. These are required to represent indirect casted calls. ghstack-source-id: c274c353894f2c8295d45645519a1d0436b22f3b Pull Request resolved: https://github.com/llvm/clangir/pull/123 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 16 ++++++++++++++++ clang/test/CIR/Lowering/func.cir | 18 ++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 clang/test/CIR/Lowering/func.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b14be1891701..2cdb370adc7c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -358,6 +358,14 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } + case mlir::cir::CastKind::bitcast: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } default: llvm_unreachable("NYI"); } @@ -1188,6 +1196,14 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { // LLVM doesn't work with signed types, so we drop the CIR signs here. return mlir::IntegerType::get(type.getContext(), type.getWidth()); }); + converter.addConversion([&](mlir::cir::FuncType type) -> mlir::Type { + auto result = converter.convertType(type.getReturnType()); + llvm::SmallVector arguments; + if (converter.convertTypes(type.getInputs(), arguments).failed()) + llvm_unreachable("Failed to convert function type parameters"); + auto varArg = type.isVarArg(); + return mlir::LLVM::LLVMFunctionType::get(result, arguments, varArg); + }); converter.addConversion([&](mlir::cir::StructType type) -> mlir::Type { llvm::SmallVector llvmMembers; for (auto ty : type.getMembers()) diff --git a/clang/test/CIR/Lowering/func.cir b/clang/test/CIR/Lowering/func.cir new file mode 100644 index 000000000000..b524729ff697 --- /dev/null +++ b/clang/test/CIR/Lowering/func.cir @@ -0,0 +1,18 @@ +// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck %s -check-prefix=MLIR --input-file=%t.mlir +// XFAIL: * + +!s32i = !cir.int +module { + cir.func no_proto private @noProto3(...) -> !s32i + // MLIR: llvm.func @noProto3(...) -> i32 + cir.func @test3(%arg0: !s32i) { + %3 = cir.get_global @noProto3 : cir.ptr > + // MLIR: %[[#FN_PTR:]] = llvm.mlir.addressof @noProto3 : !llvm.ptr + %4 = cir.cast(bitcast, %3 : !cir.ptr>), !cir.ptr> + // MLIR: %[[#FUNC:]] = llvm.bitcast %[[#FN_PTR]] : !llvm.ptr to !llvm.ptr + %5 = cir.call %4(%arg0) : (!cir.ptr>, !s32i) -> !s32i + // MLIR: %{{.+}} = llvm.call %[[#FUNC]](%{{.+}}) : !llvm.ptr, (i32) -> i32 + cir.return + } +} From 2cbc17b307a88b86a396816a39fbfe660235b265 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 26 Jun 2023 20:00:27 -0700 Subject: [PATCH 1025/2301] [CIR][CIRGen] Initial support for __builtin_object_size --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 43 +++++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 9 ++++++ clang/test/CIR/CodeGen/libcall.cpp | 19 +++++++++-- 3 files changed, 69 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 22f79e230682..b323bb2b82da 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -409,6 +409,22 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return buildCall(E->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), E, ReturnValue); } + case Builtin::BI__builtin_dynamic_object_size: { + // Fallthrough below, assert until we have a testcase. + llvm_unreachable("NYI"); + } + case Builtin::BI__builtin_object_size: { + unsigned Type = + E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); + auto ResType = ConvertType(E->getType()).dyn_cast(); + assert(ResType && "not sure what to do?"); + + // We pass this builtin onto the optimizer so that it can figure out the + // object size in more complex cases. + bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size; + return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, + /*EmittedE=*/nullptr, IsDynamic)); + } } // If this is an alias for a lib function (e.g. __builtin_sin), emit @@ -506,6 +522,15 @@ void CIRGenFunction::buildVAStartEnd(mlir::Value ArgValue, bool IsStart) { builder.create(ArgValue.getLoc(), ArgValue); } +/// Checks if using the result of __builtin_object_size(p, @p From) in place of +/// __builtin_object_size(p, @p To) is correct +static bool areBOSTypesCompatible(int From, int To) { + // Note: Our __builtin_object_size implementation currently treats Type=0 and + // Type=2 identically. Encoding this implementation detail here may make + // improving __builtin_object_size difficult in the future, so it's omitted. + return From == To || (From == 0 && To == 1) || (From == 3 && To == 2); +} + /// Returns a Value corresponding to the size of the given expression. /// This Value may be either of the following: /// @@ -520,6 +545,24 @@ mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, mlir::cir::IntType ResType, mlir::Value EmittedE, bool IsDynamic) { + // We need to reference an argument if the pointer is a parameter with the + // pass_object_size attribute. + if (auto *D = dyn_cast(E->IgnoreParenImpCasts())) { + auto *Param = dyn_cast(D->getDecl()); + auto *PS = D->getDecl()->getAttr(); + if (Param != nullptr && PS != nullptr && + areBOSTypesCompatible(PS->getType(), Type)) { + auto Iter = SizeArguments.find(Param); + assert(Iter != SizeArguments.end()); + + const ImplicitParamDecl *D = Iter->second; + auto DIter = LocalDeclMap.find(D); + assert(DIter != LocalDeclMap.end()); + + return buildLoadOfScalar(DIter->second, /*Volatile=*/false, + getContext().getSizeType(), E->getBeginLoc()); + } + } llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 41b975cd7be3..070eb8a44f7b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -849,6 +849,15 @@ class CIRGenFunction : public CIRGenTypeCache { clang::SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal = false); + /// Load a scalar value from an address, taking care to appropriately convert + /// from the memory representation to CIR value representation. + mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, + clang::SourceLocation Loc, + AlignmentSource Source = AlignmentSource::Type, + bool isNontemporal = false) { + return buildLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source), + isNontemporal); + } /// Load a scalar value from an address, taking care to appropriately convert /// form the memory representation to the CIR value representation. The diff --git a/clang/test/CIR/CodeGen/libcall.cpp b/clang/test/CIR/CodeGen/libcall.cpp index 4ed256acf843..5f926aaeb9e1 100644 --- a/clang/test/CIR/CodeGen/libcall.cpp +++ b/clang/test/CIR/CodeGen/libcall.cpp @@ -6,7 +6,7 @@ typedef __builtin_va_list va_list; static __inline__ __attribute__((__always_inline__)) __attribute__((__format__(printf, 3, 0))) int vsnprintf(char* const __attribute__((pass_object_size(1))) dest, int size, const char* format, va_list ap) __attribute__((overloadable)) { - return __builtin___vsnprintf_chk(dest, size, 0, 0, format, ap); + return __builtin___vsnprintf_chk(dest, size, 0, __builtin_object_size(((dest)), (1)), format, ap); } void t(const char* fmt, ...) { @@ -17,4 +17,19 @@ void t(const char* fmt, ...) { vsnprintf(message, size, fmt, args); } -// CHECK: cir.func private @__vsnprintf_chk \ No newline at end of file +// CHECK: cir.func private @__vsnprintf_chk + +// CHECK: cir.func internal private @_ZL9vsnprintfPcU17pass_object_size1iPKcP13__va_list_tag + +// Implicit size parameter in arg %1 +// +// FIXME: tag the param with an attribute to designate the size information. +// +// CHECK: %1 = cir.alloca !u64i, cir.ptr , ["", init] {alignment = 8 : i64} + +// CHECK: cir.store %arg1, %1 : !u64i, cir.ptr + +// CHECK: %10 = cir.load %1 : cir.ptr , !u64i +// CHECK: %11 = cir.load %3 : cir.ptr >, !cir.ptr +// CHECK: %12 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %13 = cir.call @__vsnprintf_chk(%6, %8, %9, %10, %11, %12) From 9496b13e70de4479477ab9af63770c91ecac6e51 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 26 Jun 2023 21:12:55 -0300 Subject: [PATCH 1026/2301] [CIR][CIRGen][Bugfix] Remove Optional wrapper from MLIR Type MLIR Types are inherently wrappers. Checking if a value exists is a matter of checking if the type is null. The Optional wrapper was causing a bug in the `CIRGenModule::buildGlobal` method. If an empty type (`mlir::Type{}`) was passed as `Ty`, the `if (!ty)` clause would evalute to true, because the `!` operator would simply validate that there was a `mlir::Type` in the optional wrapper. Then the code gen would continue and try to emit an invalid type. This patch removes the wrapper, which ensures that the `!Ty` clause now checks if the given `mlir::Type` is an empty type. In that case, it generates a real type from the AST Type: ``` if (!Ty) Ty = getTypes().convertTypeForMem(ASTTy); ``` ghstack-source-id: 345e49cd75ef0c682b4089121bc4f8ce0a29167f Pull Request resolved: https://github.com/llvm/clangir/pull/132 --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 8 +++----- clang/lib/CIR/CodeGen/CIRGenModule.h | 5 ++--- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 8c5d6802428e..e1c0d13ee139 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -662,8 +662,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, return GV; } -mlir::cir::GlobalOp CIRGenModule::buildGlobal(const VarDecl *D, - std::optional Ty, +mlir::cir::GlobalOp CIRGenModule::buildGlobal(const VarDecl *D, mlir::Type Ty, ForDefinition_t IsForDefinition) { assert(D->hasGlobalStorage() && "Not a global variable"); QualType ASTTy = D->getType(); @@ -671,7 +670,7 @@ mlir::cir::GlobalOp CIRGenModule::buildGlobal(const VarDecl *D, Ty = getTypes().convertTypeForMem(ASTTy); StringRef MangledName = getMangledName(D); - return getOrCreateCIRGlobal(MangledName, *Ty, ASTTy.getAddressSpace(), D, + return getOrCreateCIRGlobal(MangledName, Ty, ASTTy.getAddressSpace(), D, IsForDefinition); } @@ -681,8 +680,7 @@ mlir::cir::GlobalOp CIRGenModule::buildGlobal(const VarDecl *D, /// If IsForDefinition is true, it is guaranteed that an actual global with type /// Ty will be returned, not conversion of a variable with the same mangled name /// but some other type. -mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, - std::optional Ty, +mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty, ForDefinition_t IsForDefinition) { assert(D->hasGlobalStorage() && "Not a global variable"); QualType ASTTy = D->getType(); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index a5066e1284fb..2737806d87cc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -183,8 +183,7 @@ class CIRGenModule : public CIRGenTypeCache { const VarDecl *D, ForDefinition_t IsForDefinition = NotForDefinition); - mlir::cir::GlobalOp buildGlobal(const VarDecl *D, - std::optional Ty, + mlir::cir::GlobalOp buildGlobal(const VarDecl *D, mlir::Type Ty, ForDefinition_t IsForDefinition); /// TODO(cir): once we have cir.module, add this as a convenience method @@ -215,7 +214,7 @@ class CIRGenModule : public CIRGenTypeCache { /// global with type Ty will be returned, not conversion of a variable with /// the same mangled name but some other type. mlir::Value - getAddrOfGlobalVar(const VarDecl *D, std::optional Ty = {}, + getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty = {}, ForDefinition_t IsForDefinition = NotForDefinition); CharUnits From 9b27262bdef453c10657eac471aae557fb421e3a Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 26 Jun 2023 21:12:59 -0300 Subject: [PATCH 1027/2301] [CIR][CIRGen] Add support for static global variables Implements the required changes to support static global variables, ensuring they present the proper linkage type in CIR. Also tests lowering of CIR globals with internal linkage. ghstack-source-id: 7e80f614daf87471d4377640be8f5f7b459a138c Pull Request resolved: https://github.com/llvm/clangir/pull/133 --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 68 ++++++++++++++++---------- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 ++ clang/test/CIR/CodeGen/linkage.c | 6 +++ clang/test/CIR/Lowering/globals.cir | 2 + 4 files changed, 53 insertions(+), 26 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index e1c0d13ee139..896c790b159e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -850,10 +850,9 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, if (D->hasAttr()) assert(0 && "not implemented"); - // TODO(cir): - // Set the llvm linkage type as appropriate. - // llvm::GlobalValue::LinkageTypes Linkage = - // getLLVMLinkageVarDefinition(D, GV->isConstant()); + // Set CIR's linkage type as appropriate. + mlir::cir::GlobalLinkageKind Linkage = + getCIRLinkageVarDefinition(D, /*IsConstant=*/false); // TODO(cir): // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on @@ -903,25 +902,21 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, assert(0 && "not implemented"); } - // TODO(cir): set linkage, dll stuff and common linkage - // GV->setLinkage(Linkage); - // if (D->hasAttr()) - // GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); - // else if (D->hasAttr()) - // GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass); - // else - // GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass); - // - // if (Linkage == llvm::GlobalVariable::CommonLinkage) { - // // common vars aren't constant even if declared const. - // GV->setConstant(false); - // // Tentative definition of global variables may be initialized with - // // non-zero null pointers. In this case they should have weak linkage - // // since common linkage must have zero initializer and must not have - // // explicit section therefore cannot have non-zero initial value. - // if (!GV->getInitializer()->isNullValue()) - // GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage); - // } + // Set CIR linkage and DLL storage class. + GV.setLinkage(Linkage); + // FIXME(cir): setLinkage should likely set MLIR's visibility automatically. + GV.setVisibility(getMLIRVisibilityFromCIRLinkage(Linkage)); + // TODO(cir): handle DLL storage classes in CIR? + if (D->hasAttr()) + assert(!UnimplementedFeature::setDLLStorageClass()); + else if (D->hasAttr()) + assert(!UnimplementedFeature::setDLLStorageClass()); + else + assert(!UnimplementedFeature::setDLLStorageClass()); + + if (Linkage == mlir::cir::GlobalLinkageKind::CommonLinkage) { + llvm_unreachable("common linkage is NYI"); + } // TODO(cir): setNonAliasAttributes(D, GV); @@ -1492,6 +1487,13 @@ void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( } } +mlir::cir::GlobalLinkageKind +CIRGenModule::getCIRLinkageVarDefinition(const VarDecl *VD, bool IsConstant) { + assert(!IsConstant && "constant variables NYI"); + GVALinkage Linkage = astCtx.GetGVALinkageForVariable(VD); + return getCIRLinkageForDeclarator(VD, Linkage, IsConstant); +} + mlir::cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { const auto *D = cast(GD.getDecl()); @@ -2012,10 +2014,14 @@ void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { Op = getGlobalValue(getMangledName(D)); } + // In case of different address spaces, we may still get a cast, even with + // IsForDefinition equal to true. Query mangled names table to get + // GlobalValue. + if (!Op) + llvm_unreachable("Address spaces NYI"); + // Make sure getGlobalValue returned non-null. assert(Op); - assert(isa(Op) && - "not implemented, only supports FuncOp for now"); // Check to see if we've already emitted this. This is necessary for a // couple of reasons: first, decls can end up in deferred-decls queue @@ -2023,10 +2029,20 @@ void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { // ways (e.g. by an extern inline function acquiring a strong function // redefinition). Just ignore those cases. // TODO: Not sure what to map this to for MLIR - if (auto Fn = cast(Op)) + if (auto Fn = dyn_cast(Op)) if (!Fn.isDeclaration()) return; + // TODO(cir): create a global value trait that allow us to uniformly handle + // global variables and functions. + if (auto Gv = dyn_cast(Op)) { + auto *result = + mlir::SymbolTable::lookupSymbolIn(getModule(), Gv.getNameAttr()); + if (auto globalOp = dyn_cast(result)) + if (!globalOp.isDeclaration()) + return; + } + // If this is OpenMP, check if it is legal to emit this global normally. if (getLangOpts().OpenMP) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 2737806d87cc..2f51af73a42b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -515,6 +515,9 @@ class CIRGenModule : public CIRGenTypeCache { getMLIRVisibilityFromCIRLinkage(L)); } + mlir::cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *VD, + bool IsConstant); + void addReplacement(StringRef Name, mlir::Operation *Op); mlir::Location getLocForFunction(const clang::FunctionDecl *FD); diff --git a/clang/test/CIR/CodeGen/linkage.c b/clang/test/CIR/CodeGen/linkage.c index fc0e95499743..4ef82a43981d 100644 --- a/clang/test/CIR/CodeGen/linkage.c +++ b/clang/test/CIR/CodeGen/linkage.c @@ -17,3 +17,9 @@ int foo(void) { // LLVM: define internal i32 @bar( // LLVM: define i32 @foo( + +static int var = 0; +// CIR: cir.global "private" internal @var = #cir.int<0> : !s32i +int get_var(void) { + return var; +} diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index e5e0f8a15d5a..df9ffcf100f9 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -118,4 +118,6 @@ module { cir.global external @zeroInitFlt = #cir.const_array<[0.000000e+00 : f32, 0.000000e+00 : f32]> : !cir.array // MLIR: llvm.mlir.global external @flt(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) {addr_space = 0 : i32} : !llvm.array<2 x f32> // MLIR: llvm.mlir.global external @zeroInitFlt(dense<0.000000e+00> : tensor<2xf32>) {addr_space = 0 : i32} : !llvm.array<2 x f32> + cir.global "private" internal @staticVar = #cir.int<0> : !s32i + // MLIR: llvm.mlir.global internal @staticVar(0 : i32) {addr_space = 0 : i32} : i32 } From 8e0942d79afbe39ed8053fcb5b590ceff6537be7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Jun 2023 13:53:59 -0700 Subject: [PATCH 1028/2301] [CIR][CIRGen] Add cir.objsize and implement rest of __builtin_object_size --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 36 ++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 15 ++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 28 ++++++++++++--- clang/test/CIR/CodeGen/libcall.cpp | 32 +++++++++++++++-- clang/test/CIR/IR/cir-ops.cir | 18 ++++++++++ 5 files changed, 122 insertions(+), 7 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 94df76feca81..232f189f1e4d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -102,6 +102,42 @@ def CastOp : CIR_Op<"cast", [Pure]> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// ObjSizeOp +//===----------------------------------------------------------------------===// + +def SizeInfoTypeMin : I32EnumAttrCase<"min", 0>; +def SizeInfoTypeMax : I32EnumAttrCase<"max", 1>; + +def SizeInfoType : I32EnumAttr< + "SizeInfoType", + "size info type", + [SizeInfoTypeMin, SizeInfoTypeMax]> { + let cppNamespace = "::mlir::cir"; +} + +def ObjSizeOp : CIR_Op<"objsize", [Pure]> { + let summary = "Conversion between values of different types"; + let description = [{ + }]; + + let arguments = (ins CIR_PointerType:$ptr, SizeInfoType:$kind, + UnitAttr:$dynamic); + let results = (outs CIR_IntType:$result); + + let assemblyFormat = [{ + `(` + $ptr `:` type($ptr) `,` + $kind + (`,` `dynamic` $dynamic^)? + `)` + `->` type($result) attr-dict + }]; + + // Nothing to verify that isn't already covered by constraints. + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // PtrStrideOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 26649f274174..f7dc0d350d52 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -182,6 +182,21 @@ class CIRGenBuilderTy : public mlir::OpBuilder { } } + mlir::cir::IntType getSIntNTy(int N) { + switch (N) { + case 8: + return getSInt8Ty(); + case 16: + return getSInt16Ty(); + case 32: + return getSInt32Ty(); + case 64: + return getSInt64Ty(); + default: + llvm_unreachable("Unknown bit-width"); + } + } + mlir::cir::VoidType getVoidTy() { return typeCache.VoidTy; } mlir::cir::IntType getSInt8Ty() { return typeCache.SInt8Ty; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index b323bb2b82da..afc125b80876 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -534,13 +534,12 @@ static bool areBOSTypesCompatible(int From, int To) { /// Returns a Value corresponding to the size of the given expression. /// This Value may be either of the following: /// -/// - In LLVM: a llvm::Argument (if E is a param with the pass_object_size -/// attribute on it), CIR: TBD -/// - A call to a `cir.object_size`. +/// - Reference an argument if `pass_object_size` is used. +/// - A call to a `cir.objsize`. /// /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null /// and we wouldn't otherwise try to reference a pass_object_size parameter, -/// we'll call `cir.object_size` on EmittedE, rather than emitting E. +/// we'll call `cir.objsize` on EmittedE, rather than emitting E. mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, mlir::cir::IntType ResType, mlir::Value EmittedE, @@ -563,7 +562,26 @@ mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, getContext().getSizeType(), E->getBeginLoc()); } } - llvm_unreachable("NYI"); + + // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't + // evaluate E for side-effects. In either case, just like original LLVM + // lowering, we shouldn't lower to `cir.objsize`. + if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) + llvm_unreachable("NYI"); + + auto Ptr = EmittedE ? EmittedE : buildScalarExpr(E); + assert(Ptr.getType().isa() && + "Non-pointer passed to __builtin_object_size?"); + + // LLVM intrinsics (which CIR lowers to at some point, only supports 0 + // and 2, account for that right now. + mlir::cir::SizeInfoType sizeInfoTy = ((Type & 2) != 0) + ? mlir::cir::SizeInfoType::min + : mlir::cir::SizeInfoType::max; + // TODO(cir): Heads up for LLVM lowering, For GCC compatibility, + // __builtin_object_size treat NULL as unknown size. + return builder.create( + getLoc(E->getSourceRange()), ResType, Ptr, sizeInfoTy, IsDynamic); } mlir::Value CIRGenFunction::evaluateOrEmitBuiltinObjectSize( diff --git a/clang/test/CIR/CodeGen/libcall.cpp b/clang/test/CIR/CodeGen/libcall.cpp index 5f926aaeb9e1..30e910c8f28c 100644 --- a/clang/test/CIR/CodeGen/libcall.cpp +++ b/clang/test/CIR/CodeGen/libcall.cpp @@ -9,16 +9,44 @@ int vsnprintf(char* const __attribute__((pass_object_size(1))) dest, int size, c return __builtin___vsnprintf_chk(dest, size, 0, __builtin_object_size(((dest)), (1)), format, ap); } +typedef long unsigned int size_t; + +size_t __strlen_chk(const char* __s, size_t __n) __attribute__((annotate("introduced_in=" "17"))); +size_t strlen(const char* __s) __attribute__((__pure__)); +static __inline__ __attribute__((__always_inline__)) +size_t strlen(const char* const s __attribute__((pass_object_size(0)))) __attribute__((overloadable)) { + size_t bos = __builtin_object_size(((s)), (0)); + + if (bos == ((size_t) -1)) { + return __builtin_strlen(s); + } + + return __strlen_chk(s, bos); +} + +void log(int, const char *, int); + +void consume_message(const char *m) { + log(3, m, strlen(m)); +} + void t(const char* fmt, ...) { va_list args; __builtin_va_start(args, fmt); const int size = 512; char message[size]; vsnprintf(message, size, fmt, args); + consume_message(message); } -// CHECK: cir.func private @__vsnprintf_chk +// CHECK: cir.func @_Z15consume_messagePKc(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["m", init] {alignment = 8 : i64} +// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.objsize(%3 : , max) -> !u64i +// CHECK: %5 = cir.call @_ZL6strlenPKcU17pass_object_size0(%3, %4) : (!cir.ptr, !u64i) -> !u64i + +// CHECK: cir.func private @__vsnprintf_chk // CHECK: cir.func internal private @_ZL9vsnprintfPcU17pass_object_size1iPKcP13__va_list_tag // Implicit size parameter in arg %1 @@ -32,4 +60,4 @@ void t(const char* fmt, ...) { // CHECK: %10 = cir.load %1 : cir.ptr , !u64i // CHECK: %11 = cir.load %3 : cir.ptr >, !cir.ptr // CHECK: %12 = cir.load %4 : cir.ptr >, !cir.ptr -// CHECK: %13 = cir.call @__vsnprintf_chk(%6, %8, %9, %10, %11, %12) +// CHECK: %13 = cir.call @__vsnprintf_chk(%6, %8, %9, %10, %11, %12) \ No newline at end of file diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index a4dfa12d1425..e073d007ba69 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -2,6 +2,8 @@ // RUN: cir-tool %s | cir-tool | FileCheck %s !s32i = !cir.int +!s8i = !cir.int +!u64i = !cir.int module { cir.func @foo(%arg0: !s32i) -> !s32i { @@ -45,6 +47,14 @@ module { } cir.return } + + cir.func @os() { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["m", init] {alignment = 8 : i64} + %3 = cir.load %0 : cir.ptr >, !cir.ptr + %4 = cir.objsize(%3 : , max) -> !u64i + %5 = cir.objsize(%3 : , min) -> !u64i + cir.return + } } // CHECK: module { @@ -80,4 +90,12 @@ module { // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["y"] {alignment = 4 : i64} // CHECK-NEXT: } +// CHECK: cir.func @os() { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["m", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %2 = cir.objsize(%1 : , max) -> !u64i +// CHECK-NEXT: %3 = cir.objsize(%1 : , min) -> !u64i +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + // CHECK: } From 3a62314ba1f0c1eac2f7be1d8afed06cc1ba2a86 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Jun 2023 16:28:54 -0700 Subject: [PATCH 1029/2301] [CIR][CIRGen][NFC] Refactor buildPointerWithAlignment and add KnownNonNull_t --- clang/lib/CIR/CodeGen/Address.h | 40 ++++++++++++++++++++------ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 20 ++++++++++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 24 +++++++++++++--- 3 files changed, 67 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index dcf308f1d3f7..eb59bdb2f3c9 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -23,18 +23,23 @@ namespace cir { +// Indicates whether a pointer is known not to be null. +enum KnownNonNull_t { NotKnownNonNull, KnownNonNull }; + class Address { - mlir::Value Pointer; + llvm::PointerIntPair PointerAndKnownNonNull; mlir::Type ElementType; clang::CharUnits Alignment; protected: - Address(std::nullptr_t) : Pointer(nullptr), ElementType(nullptr) {} + Address(std::nullptr_t) : ElementType(nullptr) {} public: Address(mlir::Value pointer, mlir::Type elementType, - clang::CharUnits alignment) - : Pointer(pointer), ElementType(elementType), Alignment(alignment) { + clang::CharUnits alignment, + KnownNonNull_t IsKnownNonNull = NotKnownNonNull) + : PointerAndKnownNonNull(pointer, IsKnownNonNull), + ElementType(elementType), Alignment(alignment) { assert(pointer.getType().isa() && "Expected cir.ptr type"); @@ -52,17 +57,21 @@ class Address { } static Address invalid() { return Address(nullptr); } - bool isValid() const { return Pointer != nullptr; } + bool isValid() const { + return PointerAndKnownNonNull.getPointer() != nullptr; + } /// Return address with different pointer, but same element type and /// alignment. - Address withPointer(mlir::Value NewPointer) const { - return Address(NewPointer, getElementType(), getAlignment()); + Address withPointer(mlir::Value NewPointer, + KnownNonNull_t IsKnownNonNull) const { + return Address(NewPointer, getElementType(), getAlignment(), + IsKnownNonNull); } mlir::Value getPointer() const { - // assert(isValid()); - return Pointer; + assert(isValid()); + return PointerAndKnownNonNull.getPointer(); } /// Return the alignment of this pointer. @@ -75,6 +84,19 @@ class Address { assert(isValid()); return ElementType; } + + /// Whether the pointer is known not to be null. + KnownNonNull_t isKnownNonNull() const { + assert(isValid()); + return (KnownNonNull_t)PointerAndKnownNonNull.getInt(); + } + + /// Set the non-null bit. + Address setKnownNonNull() { + assert(isValid()); + PointerAndKnownNonNull.setInt(true); + return *this; + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index b9128985595b..4eefc860efbe 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -644,8 +644,8 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { /// Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. -Address CIRGenFunction::buildPointerWithAlignment(const Expr *E, - LValueBaseInfo *BaseInfo) { +Address CIRGenFunction::buildPointerWithAlignment( + const Expr *E, LValueBaseInfo *BaseInfo, KnownNonNull_t IsKnownNonNull) { // We allow this with ObjC object pointers because of fragile ABIs. assert(E->getType()->isPointerType() || E->getType()->isObjCObjectPointerType()); @@ -661,6 +661,18 @@ Address CIRGenFunction::buildPointerWithAlignment(const Expr *E, llvm::errs() << CE->getCastKindName() << "\n"; assert(0 && "not implemented"); } + // Non-converting casts (but not C's implicit conversion from void*). + case CK_BitCast: + case CK_NoOp: + case CK_AddressSpaceConversion: + if (auto PtrTy = + CE->getSubExpr()->getType()->getAs()) { + if (PtrTy->getPointeeType()->isVoidType()) + break; + llvm_unreachable("NYI"); + } + break; + // Nothing to do here... case CK_LValueToRValue: break; @@ -1718,8 +1730,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { // surrounded by cleanups. Address Addr = LV.getAddress(); auto V = Addr.getPointer(); - LV = LValue::makeAddr(Addr.withPointer(V), LV.getType(), - getContext(), + LV = LValue::makeAddr(Addr.withPointer(V, NotKnownNonNull), + LV.getType(), getContext(), LV.getBaseInfo() /*TODO(cir):TBAA*/); } }); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 070eb8a44f7b..824917f8c6e1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1203,10 +1203,26 @@ class CIRGenFunction : public CIRGenTypeCache { const clang::CallExpr *E, ReturnValueSlot ReturnValue); - /// Given an expression of pointer type, try to - /// derive a more accurate bound on the alignment of the pointer. - Address buildPointerWithAlignment(const clang::Expr *E, - LValueBaseInfo *BaseInfo = nullptr); + /// Given an expression with a pointer type, emit the value and compute our + /// best estimate of the alignment of the pointee. + /// + /// \param BaseInfo - If non-null, this will be initialized with + /// information about the source of the alignment and the may-alias + /// attribute. Note that this function will conservatively fall back on + /// the type when it doesn't recognize the expression and may-alias will + /// be set to false. + /// + /// One reasonable way to use this information is when there's a language + /// guarantee that the pointer must be aligned to some stricter value, and + /// we're simply trying to ensure that sufficiently obvious uses of under- + /// aligned objects don't get miscompiled; for example, a placement new + /// into the address of a local variable. In such a case, it's quite + /// reasonable to just ignore the returned alignment when it isn't from an + /// explicit source. + Address + buildPointerWithAlignment(const clang::Expr *E, + LValueBaseInfo *BaseInfo = nullptr, + KnownNonNull_t IsKnownNonNull = NotKnownNonNull); /// Emit an expression as an initializer for an object (variable, field, etc.) /// at the given location. The expression is not necessarily the normal From 3253ec26a660dc7927c78d8dbe660f37c2aede81 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Jun 2023 16:45:51 -0700 Subject: [PATCH 1030/2301] [CIR][CIRGen][NFC] More buildPointerWithAlignment refactoring --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 137 ++++++++++++++------------- 1 file changed, 71 insertions(+), 66 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 4eefc860efbe..bd7ad2b72ad4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -93,6 +93,72 @@ static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { return false; } +static Address buildPointerWithAlignment(const Expr *E, + LValueBaseInfo *BaseInfo, + KnownNonNull_t IsKnownNonNull, + CIRGenFunction &CGF) { + // We allow this with ObjC object pointers because of fragile ABIs. + assert(E->getType()->isPointerType() || + E->getType()->isObjCObjectPointerType()); + E = E->IgnoreParens(); + + // Casts: + if (const CastExpr *CE = dyn_cast(E)) { + if (const auto *ECE = dyn_cast(CE)) + assert(0 && "not implemented"); + + switch (CE->getCastKind()) { + default: { + llvm::errs() << CE->getCastKindName() << "\n"; + assert(0 && "not implemented"); + } + // Non-converting casts (but not C's implicit conversion from void*). + case CK_BitCast: + case CK_NoOp: + case CK_AddressSpaceConversion: + if (auto PtrTy = + CE->getSubExpr()->getType()->getAs()) { + if (PtrTy->getPointeeType()->isVoidType()) + break; + llvm_unreachable("NYI"); + } + break; + + // Nothing to do here... + case CK_LValueToRValue: + break; + + // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo. + case CK_ArrayToPointerDecay: + return CGF.buildArrayToPointerDecay(CE->getSubExpr()); + + case CK_UncheckedDerivedToBase: + case CK_DerivedToBase: { + // TODO: Support accesses to members of base classes in TBAA. For now, we + // conservatively pretend that the complete object is of the base class + // type. + assert(!UnimplementedFeature::tbaa()); + Address Addr = CGF.buildPointerWithAlignment(CE->getSubExpr(), BaseInfo); + auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); + return CGF.getAddressOfBaseClass( + Addr, Derived, CE->path_begin(), CE->path_end(), + CGF.shouldNullCheckClassCastValue(CE), CE->getExprLoc()); + } + } + } + + // Unary &. + if (const UnaryOperator *UO = dyn_cast(E)) { + assert(0 && "not implemented"); + } + + // TODO: conditional operators, comma. + // Otherwise, use the alignment of the type. + CharUnits Align = + CGF.CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo); + return Address(CGF.buildScalarExpr(E), Align); +} + LValue CIRGenFunction::buildLValueForField(LValue base, const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); @@ -646,72 +712,11 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { /// derive a more accurate bound on the alignment of the pointer. Address CIRGenFunction::buildPointerWithAlignment( const Expr *E, LValueBaseInfo *BaseInfo, KnownNonNull_t IsKnownNonNull) { - // We allow this with ObjC object pointers because of fragile ABIs. - assert(E->getType()->isPointerType() || - E->getType()->isObjCObjectPointerType()); - E = E->IgnoreParens(); - - // Casts: - if (const CastExpr *CE = dyn_cast(E)) { - if (const auto *ECE = dyn_cast(CE)) - assert(0 && "not implemented"); - - switch (CE->getCastKind()) { - default: { - llvm::errs() << CE->getCastKindName() << "\n"; - assert(0 && "not implemented"); - } - // Non-converting casts (but not C's implicit conversion from void*). - case CK_BitCast: - case CK_NoOp: - case CK_AddressSpaceConversion: - if (auto PtrTy = - CE->getSubExpr()->getType()->getAs()) { - if (PtrTy->getPointeeType()->isVoidType()) - break; - llvm_unreachable("NYI"); - } - break; - - // Nothing to do here... - case CK_LValueToRValue: - break; - - // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo. - case CK_ArrayToPointerDecay: - return buildArrayToPointerDecay(CE->getSubExpr()); - - case CK_UncheckedDerivedToBase: - case CK_DerivedToBase: { - // TODO: Support accesses to members of base classes in TBAA. For now, we - // conservatively pretend that the complete object is of the base class - // type. - assert(!UnimplementedFeature::tbaa()); - Address Addr = buildPointerWithAlignment(CE->getSubExpr(), BaseInfo); - auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); - return getAddressOfBaseClass( - Addr, Derived, CE->path_begin(), CE->path_end(), - shouldNullCheckClassCastValue(CE), CE->getExprLoc()); - } - } - } - - // Unary &. - if (const UnaryOperator *UO = dyn_cast(E)) { - assert(0 && "not implemented"); - // if (UO->getOpcode() == UO_AddrOf) { - // LValue LV = buildLValue(UO->getSubExpr()); - // if (BaseInfo) - // *BaseInfo = LV.getBaseInfo(); - // // TODO: TBBA info - // return LV.getAddress(); - // } - } - - // TODO: conditional operators, comma. - // Otherwise, use the alignment of the type. - CharUnits Align = CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo); - return Address(buildScalarExpr(E), Align); + Address Addr = + ::buildPointerWithAlignment(E, BaseInfo, IsKnownNonNull, *this); + if (IsKnownNonNull && !Addr.isKnownNonNull()) + Addr.setKnownNonNull(); + return Addr; } /// Perform the usual unary conversions on the specified From ca45667b723df7d356d17dd7c57b8dcf229d4d91 Mon Sep 17 00:00:00 2001 From: redbopo Date: Fri, 2 Jun 2023 23:18:14 +0800 Subject: [PATCH 1031/2301] [CIR] Support cir.br target block to have operands --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 3 +-- clang/test/CIR/IR/branch.cir | 18 +++++++++++++++++- clang/test/CIR/IR/invalid.cir | 11 +++++++++++ 3 files changed, 29 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index b838f17bb5a8..c24232077adc 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -772,8 +772,7 @@ mlir::LogicalResult YieldOp::verify() { mlir::SuccessorOperands BrOp::getSuccessorOperands(unsigned index) { assert(index == 0 && "invalid successor index"); - // Current block targets do not have operands. - return mlir::SuccessorOperands(MutableOperandRange(getOperation(), 0, 0)); + return mlir::SuccessorOperands(getDestOperandsMutable()); } Block *BrOp::getSuccessorForOperands(ArrayRef) { return getDest(); } diff --git a/clang/test/CIR/IR/branch.cir b/clang/test/CIR/IR/branch.cir index 6d2f0565a44a..57977b1389ff 100644 --- a/clang/test/CIR/IR/branch.cir +++ b/clang/test/CIR/IR/branch.cir @@ -41,4 +41,20 @@ cir.func @b0() { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: } + + +!s32i = !cir.int +cir.func @test_br() -> !s32i { + %0 = cir.const(#cir.int<0>: !s32i) : !s32i + cir.br ^bb1(%0 : !s32i) + ^bb1(%x: !s32i): + cir.return %x : !s32i +} + +// CHECK: cir.func @test_br() -> !s32i { +// CHECK-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: cir.br ^bb1(%0 : !s32i) +// CHECK-NEXT: ^bb1(%1: !s32i): // pred: ^bb0 +// CHECK-NEXT: cir.return %1 : !s32i +// CHECK-NEXT: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 636fece63646..71ca31355121 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -394,6 +394,17 @@ module { // ----- +!s32i = !cir.int +cir.func @test_br() -> !s32i { + %0 = cir.const(#cir.int<0>: !s32i) : !s32i + // expected-error@below {{branch has 1 operands for successor #0, but target block has 0}} + cir.br ^bb1(%0 : !s32i) + ^bb1: + cir.return %0 : !s32i +} + +// ----- + module { cir.func private @test() -> !cir.void cir.func @invalid_call() { From e4be70c455fee51b0f9f7a4fb966b0b1e64955ef Mon Sep 17 00:00:00 2001 From: redbopo Date: Thu, 25 May 2023 22:01:21 +0800 Subject: [PATCH 1032/2301] [CIR][Lowering] Support lowering on cir::TernaryOp. - Support cir::TernaryOp lowering -cir-to-llvm. - Mostly mirror the scf.if to cf and cir.if lowerings. - also support cir.br lowering with type converter, like the `cir.br ^bb3(%9 : !s32i)` , !s32i is !cir.int. and this type not supported in LLVM dialect level. - current generate `builtin.unrealized_conversion_cast` Ops in tests, will cause the next rountine mlir-translate failed. Need to consider whether to include ReconcileUnrealizedCasts pass or find some other methods. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 69 ++++++++++++++++--- clang/test/CIR/Lowering/tenary.cir | 52 ++++++++++++++ 2 files changed, 113 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/Lowering/tenary.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 2cdb370adc7c..9f8347d33620 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1075,6 +1075,58 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { } }; +class CIRTernaryOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::TernaryOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto loc = op->getLoc(); + auto *condBlock = rewriter.getInsertionBlock(); + auto opPosition = rewriter.getInsertionPoint(); + auto *remainingOpsBlock = rewriter.splitBlock(condBlock, opPosition); + auto *continueBlock = rewriter.createBlock( + remainingOpsBlock, op->getResultTypes(), + SmallVector(/* result number always 1 */ 1, loc)); + rewriter.create(loc, remainingOpsBlock); + + auto &trueRegion = op.getTrueRegion(); + auto *trueBlock = &trueRegion.front(); + mlir::Operation *trueTerminator = trueRegion.back().getTerminator(); + rewriter.setInsertionPointToEnd(&trueRegion.back()); + auto trueYieldOp = dyn_cast(trueTerminator); + + rewriter.replaceOpWithNewOp( + trueYieldOp, trueYieldOp.getArgs(), continueBlock); + rewriter.inlineRegionBefore(trueRegion, continueBlock); + + auto *falseBlock = continueBlock; + auto &falseRegion = op.getFalseRegion(); + + falseBlock = &falseRegion.front(); + mlir::Operation *falseTerminator = falseRegion.back().getTerminator(); + rewriter.setInsertionPointToEnd(&falseRegion.back()); + auto falseYieldOp = dyn_cast(falseTerminator); + rewriter.replaceOpWithNewOp( + falseYieldOp, falseYieldOp.getArgs(), continueBlock); + rewriter.inlineRegionBefore(falseRegion, continueBlock); + + rewriter.setInsertionPointToEnd(condBlock); + auto condition = adaptor.getCond(); + auto i1Condition = rewriter.create( + op.getLoc(), rewriter.getI1Type(), condition); + rewriter.create(loc, i1Condition.getResult(), + trueBlock, falseBlock); + + rewriter.replaceOp(op, continueBlock->getArguments()); + + // Ok, we're done! + return mlir::success(); + } +}; + class CIRCmpOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -1153,14 +1205,14 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { } }; -class CIRBrOpLowering : public mlir::OpRewritePattern { +class CIRBrOpLowering : public mlir::OpConversionPattern { public: - using OpRewritePattern::OpRewritePattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BrOp op, - mlir::PatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, op.getDestOperands(), + matchAndRewrite(mlir::cir::BrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getOperands(), op.getDest()); return mlir::LogicalResult::success(); } @@ -1168,15 +1220,16 @@ class CIRBrOpLowering : public mlir::OpRewritePattern { void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add(patterns.getContext()); + patterns.add(patterns.getContext()); patterns.add( - converter, patterns.getContext()); + CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, + CIRBrOpLowering, CIRTernaryOpLowering>(converter, + patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/tenary.cir b/clang/test/CIR/Lowering/tenary.cir new file mode 100644 index 000000000000..6452aff25f43 --- /dev/null +++ b/clang/test/CIR/Lowering/tenary.cir @@ -0,0 +1,52 @@ +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR + +!s32i = !cir.int + +module { +cir.func @_Z1xi(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool + %5 = cir.ternary(%4, true { + %7 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.yield %7 : !s32i + }, false { + %7 = cir.const(#cir.int<5> : !s32i) : !s32i + cir.yield %7 : !s32i + }) : (!cir.bool) -> !s32i + cir.store %5, %1 : !s32i, cir.ptr + %6 = cir.load %1 : cir.ptr , !s32i + cir.return %6 : !s32i + } +} + +// MLIR: module { +// MLIR: llvm.func @_Z1xi(%arg0: i32) -> i32 { +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %2 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %3 = llvm.alloca %2 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr +// MLIR-NEXT: %4 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %5 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: %6 = llvm.icmp "sgt" %4, %5 : i32 +// MLIR-NEXT: %7 = llvm.zext %6 : i1 to i8 +// MLIR-NEXT: %8 = llvm.trunc %7 : i8 to i1 +// MLIR-NEXT: llvm.cond_br %8, ^bb1, ^bb2 +// MLIR-NEXT: ^bb1: // pred: ^bb0 +// MLIR-NEXT: %9 = llvm.mlir.constant(3 : i32) : i32 +// MLIR-NEXT: llvm.br ^bb3(%9 : i32) +// MLIR-NEXT: ^bb2: // pred: ^bb0 +// MLIR-NEXT: %10 = llvm.mlir.constant(5 : i32) : i32 +// MLIR-NEXT: llvm.br ^bb3(%10 : i32) +// MLIR-NEXT: ^bb3(%11: i32): // 2 preds: ^bb1, ^bb2 +// MLIR-NEXT: llvm.br ^bb4 +// MLIR-NEXT: ^bb4: // pred: ^bb3 +// MLIR-NEXT: llvm.store %11, %3 : i32, !llvm.ptr +// MLIR-NEXT: %12 = llvm.load %3 : !llvm.ptr +// MLIR-NEXT: llvm.return %12 : i32 +// MLIR-NEXT: } +// MLIR-NEXT: } From 53ca97188778e6da5c288d5a2e03d07793dd919b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Jun 2023 21:35:21 -0700 Subject: [PATCH 1033/2301] [CIR][CIRGen][NFC] Move binop builders outside class body --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 111 +++++++++++---------- 1 file changed, 61 insertions(+), 50 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 0c3b44c0994f..d89eebb4f48d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -643,56 +643,16 @@ class ScalarExprEmitter : public StmtVisitor { return Result; } - mlir::Value buildMul(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Mul, - Ops.LHS, Ops.RHS); - } - mlir::Value buildDiv(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Div, - Ops.LHS, Ops.RHS); - } - mlir::Value buildRem(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Rem, - Ops.LHS, Ops.RHS); - } - mlir::Value buildAdd(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Add, - Ops.LHS, Ops.RHS); - } - mlir::Value buildSub(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Sub, - Ops.LHS, Ops.RHS); - } - mlir::Value buildShl(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shl, - Ops.LHS, Ops.RHS); - } - mlir::Value buildShr(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shr, - Ops.LHS, Ops.RHS); - } - mlir::Value buildAnd(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::And, - Ops.LHS, Ops.RHS); - } - mlir::Value buildXor(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Xor, - Ops.LHS, Ops.RHS); - } - mlir::Value buildOr(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Or, - Ops.LHS, Ops.RHS); - } + mlir::Value buildMul(const BinOpInfo &Ops); + mlir::Value buildDiv(const BinOpInfo &Ops); + mlir::Value buildRem(const BinOpInfo &Ops); + mlir::Value buildAdd(const BinOpInfo &Ops); + mlir::Value buildSub(const BinOpInfo &Ops); + mlir::Value buildShl(const BinOpInfo &Ops); + mlir::Value buildShr(const BinOpInfo &Ops); + mlir::Value buildAnd(const BinOpInfo &Ops); + mlir::Value buildXor(const BinOpInfo &Ops); + mlir::Value buildOr(const BinOpInfo &Ops); LValue buildCompoundAssignLValue( const CompoundAssignOperator *E, @@ -970,6 +930,57 @@ mlir::Value CIRGenFunction::buildScalarExpr(const Expr *E) { return ScalarExprEmitter(*this, builder).Visit(const_cast(E)); } +mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Mul, + Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildDiv(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Div, + Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildRem(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Rem, + Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Add, + Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Sub, + Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shl, + Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shr, + Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildAnd(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::And, + Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildXor(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Xor, + Ops.LHS, Ops.RHS); +} +mlir::Value ScalarExprEmitter::buildOr(const BinOpInfo &Ops) { + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Or, + Ops.LHS, Ops.RHS); +} + // Emit code for an explicit or implicit cast. Implicit // casts have to handle a more broad range of conversions than explicit // casts, as they handle things like function to ptr-to-function decay From 80fd7c0901e34ccc08c174b799408d18de635875 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Jun 2023 21:54:44 -0700 Subject: [PATCH 1034/2301] [CIR][CIRGen][NFC] Add prep work for pointer sub --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 148 ++++++++++++++---- .../CodeGen/UnimplementedFeatureGuarding.h | 3 +- 2 files changed, 118 insertions(+), 33 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index d89eebb4f48d..947e24b9f91e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -30,6 +30,52 @@ using namespace clang; namespace { +struct BinOpInfo { + mlir::Value LHS; + mlir::Value RHS; + SourceRange Loc; + QualType Ty; // Computation Type. + BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform + FPOptions FPFeatures; + const Expr *E; // Entire expr, for error unsupported. May not be binop. + + /// Check if the binop computes a division or a remainder. + bool isDivremOp() const { + return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || + Opcode == BO_RemAssign; + } + + /// Check if the binop can result in integer overflow. + bool mayHaveIntegerOverflow() const { + // Without constant input, we can't rule out overflow. + auto LHSCI = dyn_cast(LHS.getDefiningOp()); + auto RHSCI = dyn_cast(RHS.getDefiningOp()); + if (!LHSCI || !RHSCI) + return true; + + llvm::APInt Result; + assert(!UnimplementedFeature::mayHaveIntegerOverflow()); + llvm_unreachable("NYI"); + return false; + } + + /// Check if at least one operand is a fixed point type. In such cases, + /// this operation did not follow usual arithmetic conversion and both + /// operands might not be of the same type. + bool isFixedPointOp() const { + // We cannot simply check the result type since comparison operations + // return an int. + if (const auto *BinOp = llvm::dyn_cast(E)) { + QualType LHSType = BinOp->getLHS()->getType(); + QualType RHSType = BinOp->getRHS()->getType(); + return LHSType->isFixedPointType() || RHSType->isFixedPointType(); + } + if (const auto *UnOp = llvm::dyn_cast(E)) + return UnOp->getSubExpr()->getType()->isFixedPointType(); + return false; + } +}; + class ScalarExprEmitter : public StmtVisitor { CIRGenFunction &CGF; CIRGenBuilderTy &Builder; @@ -599,38 +645,6 @@ class ScalarExprEmitter : public StmtVisitor { QualType DstType, mlir::Type SrcTy, mlir::Type DstTy, ScalarConversionOpts Opts); - struct BinOpInfo { - mlir::Value LHS; - mlir::Value RHS; - SourceRange Loc; - QualType Ty; // Computation Type. - BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform - FPOptions FPFeatures; - const Expr *E; // Entire expr, for error unsupported. May not be binop. - - /// Check if the binop computes a division or a remainder. - bool isDivremOp() const { - return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || - Opcode == BO_RemAssign; - } - - /// Check if at least one operand is a fixed point type. In such cases, - /// this operation did not follow usual arithmetic conversion and both - /// operands might not be of the same type. - bool isFixedPointOp() const { - // We cannot simply check the result type since comparison operations - // return an int. - if (const auto *BinOp = llvm::dyn_cast(E)) { - QualType LHSType = BinOp->getLHS()->getType(); - QualType RHSType = BinOp->getRHS()->getType(); - return LHSType->isFixedPointType() || RHSType->isFixedPointType(); - } - if (const auto *UnOp = llvm::dyn_cast(E)) - return UnOp->getSubExpr()->getType()->isFixedPointType(); - return false; - } - }; - BinOpInfo buildBinOps(const BinaryOperator *E) { BinOpInfo Result; Result.LHS = Visit(E->getLHS()); @@ -930,6 +944,76 @@ mlir::Value CIRGenFunction::buildScalarExpr(const Expr *E) { return ScalarExprEmitter(*this, builder).Visit(const_cast(E)); } +[[maybe_unused]] static bool MustVisitNullValue(const Expr *E) { + // If a null pointer expression's type is the C++0x nullptr_t, then + // it's not necessarily a simple constant and it must be evaluated + // for its potential side effects. + return E->getType()->isNullPtrType(); +} + +/// If \p E is a widened promoted integer, get its base (unpromoted) type. +static std::optional getUnwidenedIntegerType(const ASTContext &Ctx, + const Expr *E) { + const Expr *Base = E->IgnoreImpCasts(); + if (E == Base) + return std::nullopt; + + QualType BaseTy = Base->getType(); + if (!Ctx.isPromotableIntegerType(BaseTy) || + Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType())) + return std::nullopt; + + return BaseTy; +} + +/// Check if \p E is a widened promoted integer. +[[maybe_unused]] static bool IsWidenedIntegerOp(const ASTContext &Ctx, + const Expr *E) { + return getUnwidenedIntegerType(Ctx, E).has_value(); +} + +/// Check if we can skip the overflow check for \p Op. +[[maybe_unused]] static bool CanElideOverflowCheck(const ASTContext &Ctx, + const BinOpInfo &Op) { + assert((isa(Op.E) || isa(Op.E)) && + "Expected a unary or binary operator"); + + // If the binop has constant inputs and we can prove there is no overflow, + // we can elide the overflow check. + if (!Op.mayHaveIntegerOverflow()) + return true; + + // If a unary op has a widened operand, the op cannot overflow. + if (const auto *UO = dyn_cast(Op.E)) + return !UO->canOverflow(); + + // We usually don't need overflow checks for binops with widened operands. + // Multiplication with promoted unsigned operands is a special case. + const auto *BO = cast(Op.E); + auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS()); + if (!OptionalLHSTy) + return false; + + auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS()); + if (!OptionalRHSTy) + return false; + + QualType LHSTy = *OptionalLHSTy; + QualType RHSTy = *OptionalRHSTy; + + // This is the simple case: binops without unsigned multiplication, and with + // widened operands. No overflow check is needed here. + if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) || + !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType()) + return true; + + // For unsigned multiplication the overflow check can be elided if either one + // of the unpromoted types are less than half the size of the promoted type. + unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType()); + return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize || + (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize; +} + mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { return Builder.create( CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Mul, diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index abd5917e7687..65ad568724f2 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -69,7 +69,7 @@ struct UnimplementedFeature { // Missing Emissions static bool variablyModifiedTypeEmission() { return false; } - // Clang early struct optimizations + // Clang early optimizations static bool shouldUseBZeroPlusStoresToInitialize() { return false; } static bool shouldUseMemSetToInitialize() { return false; } static bool shouldSplitConstantStore() { return false; } @@ -79,6 +79,7 @@ struct UnimplementedFeature { static bool isTrivialAndisDefaultConstructor() { return false; } static bool isMemcpyEquivalentSpecialMember() { return false; } static bool constructABIArgDirectExtend() { return false; } + static bool mayHaveIntegerOverflow() { return false; } static bool capturedByInit() { return false; } static bool tryEmitAsConstant() { return false; } From fcd983d71a08e345ec96c64ab65ef84725414b89 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Jun 2023 22:14:02 -0700 Subject: [PATCH 1035/2301] [CIR][CIRGen][NFC] Enhance buildSub to embrace EmitSub's template --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 55 ++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 947e24b9f91e..4917c4004afb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1035,6 +1035,61 @@ mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { + // The LHS is always a pointer if either side is. + if (!Ops.LHS.getType().isa()) { + if (Ops.Ty->isSignedIntegerOrEnumerationType()) { + switch (CGF.getLangOpts().getSignedOverflowBehavior()) { + case LangOptions::SOB_Defined: { + llvm_unreachable("NYI"); + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); + } + case LangOptions::SOB_Undefined: + if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), + mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); + [[fallthrough]]; + case LangOptions::SOB_Trapping: + if (CanElideOverflowCheck(CGF.getContext(), Ops)) + llvm_unreachable("NYI"); + llvm_unreachable("NYI"); + } + } + + if (Ops.Ty->isConstantMatrixType()) { + llvm_unreachable("NYI"); + } + + if (Ops.Ty->isUnsignedIntegerType() && + CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && + !CanElideOverflowCheck(CGF.getContext(), Ops)) + llvm_unreachable("NYI"); + + assert(!UnimplementedFeature::cirVectorType()); + if (Ops.LHS.getType().isa()) { + llvm_unreachable("NYI"); + } + + if (Ops.isFixedPointOp()) + llvm_unreachable("NYI"); + + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Sub, + Ops.LHS, Ops.RHS); + } + + // If the RHS is not a pointer, then we have normal pointer + // arithmetic. + if (!Ops.RHS.getType().isa()) + llvm_unreachable("NYI"); + + // Otherwise, this is a pointer subtraction. + + // Do the raw subtraction part. + llvm_unreachable("NYI"); + return Builder.create( CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); From 190e2c4b6e78078e5bd59c93c4e174890adfb7ce Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Jun 2023 15:04:35 -0700 Subject: [PATCH 1036/2301] [CIR][CIRGen] Implement pointer subtraction --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 32 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 15 +++++---- .../CodeGen/UnimplementedFeatureGuarding.h | 3 +- clang/test/CIR/CodeGen/ptr_diff.cpp | 13 ++++++++ 4 files changed, 56 insertions(+), 7 deletions(-) create mode 100644 clang/test/CIR/CodeGen/ptr_diff.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 232f189f1e4d..dcdbce981fc5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -138,6 +138,38 @@ def ObjSizeOp : CIR_Op<"objsize", [Pure]> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// PtrDiffOp +//===----------------------------------------------------------------------===// + +def PtrDiffOp : CIR_Op<"ptr_diff", [Pure, SameTypeOperands]> { + + let summary = "Pointer subtraction arithmetic"; + let description = [{ + `cir.ptr_diff` performs a subtraction between two pointer types with the + same element type and produces a `mlir::cir::IntType` result. + + Note that the result considers the pointer size according to the ABI for + the pointee sizes, e.g. the subtraction between two `!cir.ptr` might + yield 1, meaning 8 bytes, whereas for `void` or function type pointees, + yielding 8 means 8 bytes. + + ```mlir + %7 = "cir.ptr_diff"(%0, %1) : !cir.ptr -> !u64i + ``` + }]; + + let results = (outs CIR_IntType:$result); + let arguments = (ins AnyType:$lhs, AnyType:$rhs); + + let assemblyFormat = [{ + `(` $lhs `,` $rhs `)` `:` type($lhs) `->` type($result) attr-dict + }]; + + // Already covered by the traits + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // PtrStrideOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 4917c4004afb..62b6dc1ed38e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1085,14 +1085,17 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { if (!Ops.RHS.getType().isa()) llvm_unreachable("NYI"); - // Otherwise, this is a pointer subtraction. + // Otherwise, this is a pointer subtraction // Do the raw subtraction part. - llvm_unreachable("NYI"); - - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Sub, - Ops.LHS, Ops.RHS); + // + // TODO(cir): note for LLVM lowering out of this; when expanding this into + // LLVM we shall take VLA's, division by element size, etc. + // + // See more in `EmitSub` in CGExprScalar.cpp. + assert(!UnimplementedFeature::llvmLoweringPtrDiffConsidersPointee()); + return Builder.create(CGF.getLoc(Ops.Loc), + CGF.PtrDiffTy, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { return Builder.create( diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 65ad568724f2..72c104de8dfd 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -69,7 +69,7 @@ struct UnimplementedFeature { // Missing Emissions static bool variablyModifiedTypeEmission() { return false; } - // Clang early optimizations + // Clang early optimizations or things defered to LLVM lowering. static bool shouldUseBZeroPlusStoresToInitialize() { return false; } static bool shouldUseMemSetToInitialize() { return false; } static bool shouldSplitConstantStore() { return false; } @@ -80,6 +80,7 @@ struct UnimplementedFeature { static bool isMemcpyEquivalentSpecialMember() { return false; } static bool constructABIArgDirectExtend() { return false; } static bool mayHaveIntegerOverflow() { return false; } + static bool llvmLoweringPtrDiffConsidersPointee() { return false; } static bool capturedByInit() { return false; } static bool tryEmitAsConstant() { return false; } diff --git a/clang/test/CIR/CodeGen/ptr_diff.cpp b/clang/test/CIR/CodeGen/ptr_diff.cpp new file mode 100644 index 000000000000..924162d3c790 --- /dev/null +++ b/clang/test/CIR/CodeGen/ptr_diff.cpp @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef unsigned long size_type; +size_type size(unsigned long *_start, unsigned long *_finish) { + return static_cast(_finish - _start); +} + +// CHECK: cir.func @_Z4sizePmS_(%arg0: !cir.ptr +// CHECK: %3 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %5 = cir.ptr_diff(%3, %4) : !cir.ptr -> !u64i + \ No newline at end of file From 93c89789a62f16f43682f0c4c1f17874e9a4e7b0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 27 Jun 2023 16:52:35 -0700 Subject: [PATCH 1037/2301] [CIR][CIRGen] Implement NoOp cast kind for buildPointerWithAlignment --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 27 ++++++++++++++++++++- clang/test/CIR/CodeGen/vector.cpp | 35 ++++++++++++++++++++++++++++ clang/test/CIR/Inputs/std-cxx.h | 1 + 3 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/vector.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index bd7ad2b72ad4..12f3da44d987 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -120,7 +120,32 @@ static Address buildPointerWithAlignment(const Expr *E, CE->getSubExpr()->getType()->getAs()) { if (PtrTy->getPointeeType()->isVoidType()) break; - llvm_unreachable("NYI"); + assert(!UnimplementedFeature::tbaa()); + LValueBaseInfo InnerBaseInfo; + Address Addr = CGF.buildPointerWithAlignment( + CE->getSubExpr(), &InnerBaseInfo, IsKnownNonNull); + if (BaseInfo) + *BaseInfo = InnerBaseInfo; + + if (isa(CE)) { + llvm_unreachable("NYI"); + } + + if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) && + CE->getCastKind() == CK_BitCast) { + if (auto PT = E->getType()->getAs()) + llvm_unreachable("NYI"); + } + + auto ElemTy = + CGF.getTypes().convertTypeForMem(E->getType()->getPointeeType()); + Addr = CGF.getBuilder().createElementBitCast( + CGF.getLoc(E->getSourceRange()), Addr, ElemTy); + if (CE->getCastKind() == CK_AddressSpaceConversion) { + assert(!UnimplementedFeature::addressSpace()); + llvm_unreachable("NYI"); + } + return Addr; } break; diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp new file mode 100644 index 000000000000..6ffd1509f89b --- /dev/null +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +namespace std { + template + void vector::resize(size_type __sz) { + size_type __cs = size(); + if (__cs) {} + } +} // namespace std + +// CHECK: cir.func linkonce_odr @_ZNSt6vectorIyE6resizeEm( +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !u64i, cir.ptr , ["__sz", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !u64i, cir.ptr , ["__cs", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, %1 : !u64i, cir.ptr +// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.call @_ZNKSt6vectorIyE4sizeEv(%3) : (!cir.ptr) -> !u64i +// CHECK: cir.store %4, %2 : !u64i, cir.ptr +// CHECK: cir.scope { +// CHECK: %5 = cir.load %2 : cir.ptr , !u64i +// CHECK: %6 = cir.cast(int_to_bool, %5 : !u64i), !cir.bool +// CHECK: cir.if %6 { +// CHECK: } +// CHECK: } +// CHECK: cir.return + +void m() { + std::vector a; + int i = 43; + a.resize(i); +} \ No newline at end of file diff --git a/clang/test/CIR/Inputs/std-cxx.h b/clang/test/CIR/Inputs/std-cxx.h index b86fdf160db4..b50098ba3026 100644 --- a/clang/test/CIR/Inputs/std-cxx.h +++ b/clang/test/CIR/Inputs/std-cxx.h @@ -299,6 +299,7 @@ namespace std { size_t size() const { return size_t(_finish - _start); } + void resize(size_type __sz); vector& operator=(const vector &other); vector& operator=(vector &&other); From ffe73fdd2255276fb4336980d1ccd7e8839250a0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Jun 2023 15:43:34 -0700 Subject: [PATCH 1038/2301] [CIR][CIRGen] Support building up calls with reference types on return values --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 6 +++--- .../lib/CIR/CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/call.cpp | 14 ++++++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/call.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 62b6dc1ed38e..d711ec5a1a83 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1365,11 +1365,11 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *E) { - assert(!E->getCallReturnType(CGF.getContext())->isReferenceType() && "NYI"); + if (E->getCallReturnType(CGF.getContext())->isReferenceType()) + return buildLoadOfLValue(E); auto V = CGF.buildCallExpr(E).getScalarVal(); - - // TODO: buildLValueAlignmentAssumption + assert(!UnimplementedFeature::buildLValueAlignmentAssumption()); return V; } diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 72c104de8dfd..d9dda2e5b18f 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -68,6 +68,7 @@ struct UnimplementedFeature { // Missing Emissions static bool variablyModifiedTypeEmission() { return false; } + static bool buildLValueAlignmentAssumption() { return false; } // Clang early optimizations or things defered to LLVM lowering. static bool shouldUseBZeroPlusStoresToInitialize() { return false; } diff --git a/clang/test/CIR/CodeGen/call.cpp b/clang/test/CIR/CodeGen/call.cpp new file mode 100644 index 000000000000..d88bfb21bc8e --- /dev/null +++ b/clang/test/CIR/CodeGen/call.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int& p(); +int f() { + return p() - 22; +} + +// CHECK: cir.func @_Z1fv() -> !s32i { +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.call @_Z1pv() : () -> !cir.ptr +// CHECK: %2 = cir.load %1 : cir.ptr , !s32i +// CHECK: %3 = cir.const(#cir.int<22> : !s32i) : !s32i +// CHECK: %4 = cir.binop(sub, %2, %3) : !s32i \ No newline at end of file From 170f137aa7356aad702ffbaf5c6240688bb93db2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Jun 2023 15:48:27 -0700 Subject: [PATCH 1039/2301] [CIR][NFC] Move drivers tests to more appropriated location --- clang/test/CIR/{CodeGen => }/mlirargs.c | 0 clang/test/CIR/{CodeGen => }/mlirprint.c | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename clang/test/CIR/{CodeGen => }/mlirargs.c (100%) rename clang/test/CIR/{CodeGen => }/mlirprint.c (100%) diff --git a/clang/test/CIR/CodeGen/mlirargs.c b/clang/test/CIR/mlirargs.c similarity index 100% rename from clang/test/CIR/CodeGen/mlirargs.c rename to clang/test/CIR/mlirargs.c diff --git a/clang/test/CIR/CodeGen/mlirprint.c b/clang/test/CIR/mlirprint.c similarity index 100% rename from clang/test/CIR/CodeGen/mlirprint.c rename to clang/test/CIR/mlirprint.c From e65f913696276d209d18996ca156497d0793c6c8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 28 Jun 2023 23:34:47 -0400 Subject: [PATCH 1040/2301] [CIR][Lowering] Fix missing tablegen dep --- clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt index b971bb686270..99f3f8981384 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -11,6 +11,7 @@ add_clang_library(clangCIRLoweringThroughMLIR DEPENDS MLIRCIROpsIncGen + MLIRCIREnumsGen LINK_LIBS clangAST From 4c86351a6c8dbc3bc64329c1f7f380921f420a69 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 29 Jun 2023 10:45:27 -0700 Subject: [PATCH 1041/2301] [CIR][CIRGen] Support derived-to-base casts --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 18 ++++++++- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 39 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 4 +- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/derived-to-base.cpp | 34 ++++++++++++++++ 5 files changed, 92 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 12f3da44d987..24be4f21a741 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1381,7 +1381,23 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { - assert(0 && "NYI"); + const auto *DerivedClassTy = + E->getSubExpr()->getType()->castAs(); + auto *DerivedClassDecl = cast(DerivedClassTy->getDecl()); + + LValue LV = buildLValue(E->getSubExpr()); + Address This = LV.getAddress(); + + // Perform the derived-to-base conversion + Address Base = getAddressOfBaseClass( + This, DerivedClassDecl, E->path_begin(), E->path_end(), + /*NullCheckValue=*/false, E->getExprLoc()); + + // TODO: Support accesses to members of base classes in TBAA. For now, we + // conservatively pretend that the complete object is of the base class + // type. + assert(!UnimplementedFeature::tbaa()); + return makeAddrLValue(Base, E->getType(), LV.getBaseInfo()); } case CK_ToUnion: assert(0 && "NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index b3ec99efdb9d..a9caaa2fcbdb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -95,6 +95,15 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorCall( CE && CE == MustTailCall, loc); } +// TODO(cir): this can be shared with LLVM codegen +static CXXRecordDecl *getCXXRecord(const Expr *E) { + QualType T = E->getType(); + if (const PointerType *PTy = T->getAs()) + T = PTy->getPointeeType(); + const RecordType *Ty = T->castAs(); + return cast(Ty->getDecl()); +} + RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, @@ -106,7 +115,31 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( const CXXMethodDecl *DevirtualizedMethod = nullptr; if (CanUseVirtualCall && MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) { - llvm_unreachable("NYI"); + const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType(); + DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl); + assert(DevirtualizedMethod); + const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent(); + const Expr *Inner = Base->IgnoreParenBaseCasts(); + if (DevirtualizedMethod->getReturnType().getCanonicalType() != + MD->getReturnType().getCanonicalType()) { + // If the return types are not the same, this might be a case where more + // code needs to run to compensate for it. For example, the derived + // method might return a type that inherits form from the return + // type of MD and has a prefix. + // For now we just avoid devirtualizing these covariant cases. + DevirtualizedMethod = nullptr; + } else if (getCXXRecord(Inner) == DevirtualizedClass) { + // If the class of the Inner expression is where the dynamic method + // is defined, build the this pointer from it. + Base = Inner; + } else if (getCXXRecord(Base) != DevirtualizedClass) { + // If the method is defined in a class that is not the best dynamic + // one or the one of the full expression, we would have to build + // a derived-to-base cast to compute the correct this pointer, but + // we don't have support for that yet, so do a virtual call. + assert(!UnimplementedFeature::buildDerivedToBaseCastForDevirt()); + DevirtualizedMethod = nullptr; + } } bool TrivialForCodegen = @@ -233,10 +266,12 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( if (getLangOpts().AppleKext) llvm_unreachable("NYI"); else if (!DevirtualizedMethod) + // TODO(cir): shouldn't this call getAddrOfCXXStructor instead? Callee = CIRGenCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD)); else { - llvm_unreachable("NYI"); + Callee = CIRGenCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), + GlobalDecl(MD)); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 0c06ba0fa594..3db2cc00b53d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1105,7 +1105,9 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo(mlir::Location loc, if (OldGV && !OldGV.isDeclaration()) { assert(!OldGV.hasAvailableExternallyLinkage() && "available_externally typeinfos not yet implemented"); - llvm_unreachable("NYI"); + return mlir::cir::GlobalViewAttr::get( + CGM.getBuilder().getUInt8PtrTy(), + mlir::FlatSymbolRefAttr::get(OldGV.getSymNameAttr())); } // Check if there is already an external RTTI descriptor for this type. diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index d9dda2e5b18f..37ab71be53fe 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -69,6 +69,7 @@ struct UnimplementedFeature { // Missing Emissions static bool variablyModifiedTypeEmission() { return false; } static bool buildLValueAlignmentAssumption() { return false; } + static bool buildDerivedToBaseCastForDevirt() { return false; } // Clang early optimizations or things defered to LLVM lowering. static bool shouldUseBZeroPlusStoresToInitialize() { return false; } diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 50034ce5f9b9..4d20194cbd6e 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -122,3 +122,37 @@ void vcall(C1 &c1) { // CHECK: %11 = cir.call %10(%4, %5, %6) : (!cir.ptr, !s32i, !ty_22struct2Ebuffy22)>>, !cir.ptr, !s32i, !ty_22struct2Ebuffy22) -> !s32i // CHECK: cir.return // CHECK: } + +class A { +public: + int a; + virtual void foo() {a++;} +}; + +class B : public A { +public: + int b; + void foo () { static_cast(*this).foo();} +}; + +// CHECK: cir.func linkonce_odr @_ZN1B3fooEv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load deref %0 : cir.ptr >, !cir.ptr +// CHECK: cir.scope { +// CHECK: %2 = cir.alloca !ty_22class2EA22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %3 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr + +// Call @A::A(A const&) +// CHECK: cir.call @_ZN1AC2ERKS_(%2, %3) : (!cir.ptr, !cir.ptr) -> () + +// Call @A::foo() +// CHECK: cir.call @_ZN1A3fooEv(%2) : (!cir.ptr) -> () +// CHECK: } +// CHECK: cir.return +// CHECK: } + +void t() { + B b; + b.foo(); +} From 79c60a7b3ba75d0df6bb0f367952fa35bfd62a33 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 29 Jun 2023 13:41:08 -0700 Subject: [PATCH 1042/2301] [CIR][CIRGen] Placement new support --- clang/lib/CIR/CodeGen/Address.h | 7 ++++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 ++-- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 22 ++++++++++++++++-- clang/test/CIR/CodeGen/new.cpp | 31 +++++++++++++++++++++++-- 4 files changed, 59 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index eb59bdb2f3c9..cfb79e697d30 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -69,6 +69,13 @@ class Address { IsKnownNonNull); } + /// Return address with different alignment, but same pointer and element + /// type. + Address withAlignment(clang::CharUnits NewAlignment) const { + return Address(getPointer(), getElementType(), NewAlignment, + isKnownNonNull()); + } + mlir::Value getPointer() const { assert(isValid()); return PointerAndKnownNonNull.getPointer(); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 24be4f21a741..25e6badc786f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -105,7 +105,7 @@ static Address buildPointerWithAlignment(const Expr *E, // Casts: if (const CastExpr *CE = dyn_cast(E)) { if (const auto *ECE = dyn_cast(CE)) - assert(0 && "not implemented"); + CGF.CGM.buildExplicitCastExprType(ECE, &CGF); switch (CE->getCastKind()) { default: { @@ -1373,7 +1373,8 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { if (V.isValid()) { auto T = getTypes().convertTypeForMem(E->getType()); if (V.getElementType() != T) - assert(0 && "NYI"); + LV.setAddress( + builder.createElementBitCast(getLoc(E->getSourceRange()), V, T)); } } return LV; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index a9caaa2fcbdb..0629f8f84613 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -661,9 +661,27 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { Address allocation = Address::invalid(); CallArgList allocatorArgs; if (allocator->isReservedGlobalPlacementOperator()) { - // In LLVM codegen: If the allocator is a global placement operator, just + // If the allocator is a global placement operator, just // "inline" it directly. - llvm_unreachable("NYI"); + assert(E->getNumPlacementArgs() == 1); + const Expr *arg = *E->placement_arguments().begin(); + + LValueBaseInfo BaseInfo; + allocation = buildPointerWithAlignment(arg, &BaseInfo); + + // The pointer expression will, in many cases, be an opaque void*. + // In these cases, discard the computed alignment and use the + // formal alignment of the allocated type. + if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl) + allocation = allocation.withAlignment(allocAlign); + + // Set up allocatorArgs for the call to operator delete if it's not + // the reserved global operator. + if (E->getOperatorDelete() && + !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { + allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType()); + allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType()); + } } else { const FunctionProtoType *allocatorType = allocator->getType()->castAs(); diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index b2a568235675..b7f8fa5f384b 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -3,7 +3,6 @@ #include "std-cxx.h" - struct S { S(int, int); }; @@ -28,4 +27,32 @@ void m(int a, int b) { // CHECK: %10 = cir.load %9 : cir.ptr , !s32i // CHECK: cir.call @_ZN1SC1Eii(%6, %8, %10) : (!cir.ptr, !s32i, !s32i) -> () // CHECK: cir.call @_ZNSt10shared_ptrI1SEC1EPS0_(%2, %6) : (!cir.ptr, !cir.ptr) -> () -// CHECK: } \ No newline at end of file +// CHECK: } + +class B { +public: + void construct(B* __p) { + ::new ((void*)__p) B; + } +}; + +// CHECK: cir.func linkonce_odr @_ZN1B9constructEPS_(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__p", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %3 = cir.const(#cir.int<1> : !u64i) : !u64i +// CHECK: %4 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr +// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr + +// cir.call @B::B()(%new_placament_ptr) +// CHECK: cir.call @_ZN1BC1Ev(%6) : (!cir.ptr) -> () +// CHECK: cir.return +// CHECK: } + +void t() { + B b; + b.construct(&b); +} \ No newline at end of file From a2ac697434936e7ba19ac81e42af2532682faf53 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 30 Jun 2023 16:12:57 -0700 Subject: [PATCH 1043/2301] [CIR][CIRGen] More codegen for conditional operator --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 185 +++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 7 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 12 ++ clang/test/CIR/CodeGen/cond.cpp | 32 ++++ 6 files changed, 236 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/CodeGen/cond.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 25e6badc786f..7fb9e6a8deac 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1723,6 +1723,189 @@ CIRGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { return buildAnyExpr(e->getSourceExpr()); } +namespace { +// Handle the case where the condition is a constant evaluatable simple integer, +// which means we don't have to separately handle the true/false blocks. +std::optional HandleConditionalOperatorLValueSimpleCase( + CIRGenFunction &CGF, const AbstractConditionalOperator *E) { + const Expr *condExpr = E->getCond(); + bool CondExprBool; + if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { + const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr(); + if (!CondExprBool) + std::swap(Live, Dead); + + if (!CGF.ContainsLabel(Dead)) { + // If the true case is live, we need to track its region. + if (CondExprBool) { + assert(!UnimplementedFeature::incrementProfileCounter()); + } + // If a throw expression we emit it and return an undefined lvalue + // because it can't be used. + if (auto *ThrowExpr = dyn_cast(Live->IgnoreParens())) { + llvm_unreachable("NYI"); + } + return CGF.buildLValue(Live); + } + } + return std::nullopt; +} +} // namespace + +/// Emit the operand of a glvalue conditional operator. This is either a glvalue +/// or a (possibly-parenthesized) throw-expression. If this is a throw, no +/// LValue is returned and the current block has been terminated. +static std::optional buildLValueOrThrowExpression(CIRGenFunction &CGF, + const Expr *Operand) { + if (auto *ThrowExpr = dyn_cast(Operand->IgnoreParens())) { + llvm_unreachable("NYI"); + } + + return CGF.buildLValue(Operand); +} + +// Create and generate the 3 blocks for a conditional operator. +// Leaves the 'current block' in the continuation basic block. +template +CIRGenFunction::ConditionalInfo +CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, + const FuncTy &BranchGenFunc) { + ConditionalInfo Info; + auto &CGF = *this; + ConditionalEvaluation eval(CGF); + auto loc = CGF.getLoc(E->getSourceRange()); + auto &builder = CGF.getBuilder(); + auto *trueExpr = E->getTrueExpr(); + auto *falseExpr = E->getFalseExpr(); + + mlir::Value condV = + CGF.buildOpOnBoolExpr(E->getCond(), loc, trueExpr, falseExpr); + SmallVector insertPoints{}; + mlir::Type yieldTy{}; + + auto patchVoidOrThrowSites = [&]() { + if (insertPoints.empty()) + return; + // If both arms are void, so be it. + if (!yieldTy) + yieldTy = CGF.VoidTy; + + // Insert required yields. + for (auto &toInsert : insertPoints) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.restoreInsertionPoint(toInsert); + + // Block does not return: build empty yield. + if (yieldTy.isa()) { + builder.create(loc); + } else { // Block returns: set null yield value. + mlir::Value op0 = builder.getNullValue(yieldTy, loc); + builder.create(loc, op0); + } + } + }; + + Info.Result = + builder + .create( + loc, condV, /*trueBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // FIXME: abstract all this massive location handling elsewhere. + SmallVector locs; + if (loc.isa()) { + locs.push_back(loc); + locs.push_back(loc); + } else if (loc.isa()) { + auto fusedLoc = loc.cast(); + locs.push_back(fusedLoc.getLocations()[0]); + locs.push_back(fusedLoc.getLocations()[1]); + } + CIRGenFunction::LexicalScopeContext lexScope{ + locs[0], locs[1], b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + Info.LHS = BranchGenFunc(CGF, trueExpr); + auto lhs = Info.LHS->getPointer(); + eval.end(CGF); + + if (lhs) { + yieldTy = lhs.getType(); + b.create(loc, lhs); + return; + } + // If LHS or RHS is a throw or void expression we need to patch + // arms as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + }, + /*falseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto fusedLoc = loc.cast(); + auto locBegin = fusedLoc.getLocations()[0]; + auto locEnd = fusedLoc.getLocations()[1]; + CIRGenFunction::LexicalScopeContext lexScope{ + locBegin, locEnd, b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + Info.RHS = BranchGenFunc(CGF, falseExpr); + auto rhs = Info.RHS->getPointer(); + eval.end(CGF); + + if (rhs) { + yieldTy = rhs.getType(); + b.create(loc, rhs); + } else { + // If LHS or RHS is a throw or void expression we need to + // patch arms as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + } + + patchVoidOrThrowSites(); + }) + .getResult(); + return Info; +} + +LValue CIRGenFunction::buildConditionalOperatorLValue( + const AbstractConditionalOperator *expr) { + if (!expr->isGLValue()) { + llvm_unreachable("NYI"); + } + + OpaqueValueMapping binding(*this, expr); + if (std::optional Res = + HandleConditionalOperatorLValueSimpleCase(*this, expr)) + return *Res; + + ConditionalInfo Info = + buildConditionalBlocks(expr, [](CIRGenFunction &CGF, const Expr *E) { + return buildLValueOrThrowExpression(CGF, E); + }); + + if ((Info.LHS && !Info.LHS->isSimple()) || + (Info.RHS && !Info.RHS->isSimple())) + llvm_unreachable("unsupported conditional operator"); + + if (Info.LHS && Info.RHS) { + Address lhsAddr = Info.LHS->getAddress(); + Address rhsAddr = Info.RHS->getAddress(); + Address result(Info.Result, lhsAddr.getElementType(), + std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment())); + AlignmentSource alignSource = + std::max(Info.LHS->getBaseInfo().getAlignmentSource(), + Info.RHS->getBaseInfo().getAlignmentSource()); + assert(!UnimplementedFeature::tbaa()); + return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource)); + } else { + llvm_unreachable("NYI"); + } +} + /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. @@ -1734,6 +1917,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { << E->getStmtClassName() << "'"; assert(0 && "not implemented"); } + case Expr::ConditionalOperatorClass: + return buildConditionalOperatorLValue(cast(E)); case Expr::ArraySubscriptExprClass: return buildArraySubscriptExpr(cast(E)); case Expr::BinaryOperatorClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 0629f8f84613..56f29105da4f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -585,8 +585,8 @@ static void StoreAnyExprIntoOneUnit(CIRGenFunction &CGF, const Expr *Init, // FIXME: Refactor with buildExprAsInit. switch (CGF.getEvaluationKind(AllocType)) { case TEK_Scalar: - CGF.buildScalarInit(Init, nullptr, CGF.makeAddrLValue(NewPtr, AllocType), - false); + CGF.buildScalarInit(Init, CGF.getLoc(Init->getSourceRange()), + CGF.makeAddrLValue(NewPtr, AllocType), false); return; case TEK_Complex: llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index d711ec5a1a83..a81da44cdb4e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -277,7 +277,7 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { - llvm_unreachable("NYI"); + return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); } mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *E) { return VisitCastExpr(E); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 5c4ee7e68d77..8e094041c1f9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -136,11 +136,12 @@ mlir::Location CIRGenFunction::getLoc(SourceRange SLoc) { SmallVector locs = {B, E}; mlir::Attribute metadata; return mlir::FusedLoc::get(locs, metadata, builder.getContext()); - } else { - // Do our best... - assert(currSrcLoc && "expected to inherit some source location"); + } else if (currSrcLoc) { return *currSrcLoc; } + + // We're brave, but time to give up. + return builder.getUnknownLoc(); } mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 824917f8c6e1..cfa43350d561 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1224,6 +1224,9 @@ class CIRGenFunction : public CIRGenTypeCache { LValueBaseInfo *BaseInfo = nullptr, KnownNonNull_t IsKnownNonNull = NotKnownNonNull); + LValue + buildConditionalOperatorLValue(const AbstractConditionalOperator *expr); + /// Emit an expression as an initializer for an object (variable, field, etc.) /// at the given location. The expression is not necessarily the normal /// initializer for the object, and the address is not necessarily @@ -1609,6 +1612,15 @@ class CIRGenFunction : public CIRGenTypeCache { // llvm::BasicBlock *getStartingBlock() const { return StartBB; } }; + struct ConditionalInfo { + std::optional LHS{}, RHS{}; + mlir::Value Result{}; + }; + + template + ConditionalInfo buildConditionalBlocks(const AbstractConditionalOperator *E, + const FuncTy &BranchGenFunc); + // Return true if we're currently emitting one branch or the other of a // conditional expression. bool isInConditionalBranch() const { return OutermostConditional != nullptr; } diff --git a/clang/test/CIR/CodeGen/cond.cpp b/clang/test/CIR/CodeGen/cond.cpp new file mode 100644 index 000000000000..42c11a3301d2 --- /dev/null +++ b/clang/test/CIR/CodeGen/cond.cpp @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct __less { + inline constexpr bool operator()(const unsigned long& __x, const unsigned long& __y) const {return __x < __y;} +}; + +const unsigned long& +min(const unsigned long& __a, const unsigned long& __b) { + return __less()(__b, __a) ? __b : __a; +} + +// CHECK: cir.func @_Z3minRKmS0_(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__a", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__b", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK: cir.scope { +// CHECK: %4 = cir.alloca !ty_22struct2E__less22, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: cir.call @_ZN6__lessC1Ev(%4) : (!cir.ptr) -> () +// CHECK: %5 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %6 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %7 = cir.call @_ZNK6__lessclERKmS1_(%4, %5, %6) : (!cir.ptr, !cir.ptr, !cir.ptr) -> !cir.bool +// CHECK: %8 = cir.ternary(%7, true { +// CHECK: %9 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: cir.yield %9 : !cir.ptr +// CHECK: }, false { +// CHECK: %9 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.yield %9 : !cir.ptr +// CHECK: }) : (!cir.bool) -> !cir.ptr +// CHECK: cir.store %8, %2 : !cir.ptr, cir.ptr > \ No newline at end of file From c58b7d7d5aa0c00660e2f73fd1790d5dc9dc48bd Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 3 Jul 2023 15:14:24 -0300 Subject: [PATCH 1044/2301] [CIR][CIRGen] Pass field index to cir.struct_element_addr A `member_index` index attribute in `cir.struct_element_addr` now holds the index for the member being accessed. A APInt is used as the storage type for the index, alongside a custom builder to abstract the APInt object creation. Before, we only passed the name of the field to cir.struct_element_addr, which was not very useful since it couldn't be used to recover the index of the member being accessed. This index is essential for lowering CIR to LLVM, as LLVM needs to know which element is being accessed in the struct data aggregate. ghstack-source-id: fa68727f9bd65016647d7ae3acbbf1b109fa79ec Pull Request resolved: https://github.com/llvm/clangir/pull/148 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 20 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 3 ++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 15 ++++++++------ clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 8 ++++---- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 +++-- clang/test/CIR/CodeGen/String.cpp | 10 +++++----- clang/test/CIR/CodeGen/agg-init.cpp | 10 +++++----- clang/test/CIR/CodeGen/assign-operator.cpp | 8 ++++---- .../CodeGen/ctor-member-lvalue-to-rvalue.cpp | 4 ++-- clang/test/CIR/CodeGen/derived-to-base.cpp | 2 +- clang/test/CIR/CodeGen/lambda.cpp | 8 ++++---- clang/test/CIR/CodeGen/rangefor.cpp | 6 +++--- clang/test/CIR/CodeGen/struct.cpp | 10 +++++----- clang/test/CIR/CodeGen/unary-deref.cpp | 2 +- 14 files changed, 67 insertions(+), 44 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index dcdbce981fc5..8d94edda23c8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1383,6 +1383,9 @@ def StructElementAddr : CIR_Op<"struct_element_addr"> { The `cir.struct_element_addr` operaration gets the address of a particular named member from the input struct. + It expects a pointer to the base struct as well as the name of the member + and its field index. + Example: ```mlir !ty_22struct2EBar22 = type !cir.struct<"struct.Bar", i32, i8> @@ -1397,10 +1400,25 @@ def StructElementAddr : CIR_Op<"struct_element_addr"> { let arguments = (ins Arg:$struct_addr, - StrAttr:$member_name); + StrAttr:$member_name, + IndexAttr:$member_index); let results = (outs Res:$result); + let builders = [ + OpBuilder<(ins "Type":$type, "Value":$value, "llvm::StringRef":$name, + "unsigned":$index), + [{ + mlir::APInt fieldIdx(64, index); + build($_builder, $_state, type, value, name, fieldIdx); + }]> + ]; + + let extraClassDeclaration = [{ + /// Return the index of the struct member being accessed. + uint64_t getIndex() { return getMemberIndex().getZExtValue(); } + }]; + // FIXME: add verifier. } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 54d3036d1459..8e59bb618e1c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -208,7 +208,8 @@ static void buildLValueForAnyFieldInitialization(CIRGenFunction &CGF, if (MemberInit->isIndirectMemberInitializer()) { llvm_unreachable("NYI"); } else { - LHS = CGF.buildLValueForFieldInitialization(LHS, Field, Field->getName()); + LHS = CGF.buildLValueForFieldInitialization(LHS, Field, Field->getName(), + Field->getFieldIndex()); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 7fb9e6a8deac..7185655ba2c6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -52,7 +52,8 @@ static Address buildPreserveStructAccess(CIRGenFunction &CGF, LValue base, /// doesn't necessarily have the right type. static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, const FieldDecl *field, - llvm::StringRef fieldName) { + llvm::StringRef fieldName, + unsigned fieldIndex) { if (field->isZeroSize(CGF.getContext())) llvm_unreachable("NYI"); @@ -65,7 +66,7 @@ static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, // which do not currently carry the name, so it can be passed down from the // CaptureStmt. auto sea = CGF.getBuilder().create( - loc, fieldPtr, Base.getPointer(), fieldName); + loc, fieldPtr, Base.getPointer(), fieldName, fieldIndex); // TODO: We could get the alignment from the CIRGenRecordLayout, but given the // member name based lookup of the member here we probably shouldn't be. We'll @@ -237,9 +238,10 @@ LValue CIRGenFunction::buildLValueForField(LValue base, if (!IsInPreservedAIRegion && (!getDebugInfo() || !rec->hasAttr())) { llvm::StringRef fieldName = field->getName(); + unsigned fieldIndex = field->getFieldIndex(); if (CGM.LambdaFieldToName.count(field)) fieldName = CGM.LambdaFieldToName[field]; - addr = buildAddrOfFieldStorage(*this, addr, field, fieldName); + addr = buildAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); } else // Remember the original struct field index addr = buildPreserveStructAccess(*this, base, addr, field); @@ -283,14 +285,15 @@ LValue CIRGenFunction::buildLValueForField(LValue base, } LValue CIRGenFunction::buildLValueForFieldInitialization( - LValue Base, const clang::FieldDecl *Field, llvm::StringRef FieldName) { + LValue Base, const clang::FieldDecl *Field, llvm::StringRef FieldName, + unsigned FieldIndex) { QualType FieldType = Field->getType(); if (!FieldType->isReferenceType()) return buildLValueForField(Base, Field); - Address V = - buildAddrOfFieldStorage(*this, Base.getAddress(), Field, FieldName); + Address V = buildAddrOfFieldStorage(*this, Base.getAddress(), Field, + FieldName, FieldIndex); // Make sure that the address is pointing to the right type. auto memTy = getTypes().convertTypeForMem(FieldType); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index d7746a676bc8..509c9899aa59 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -448,8 +448,8 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { } // Emit initialization - LValue LV = - CGF.buildLValueForFieldInitialization(SlotLV, *CurField, fieldName); + LValue LV = CGF.buildLValueForFieldInitialization( + SlotLV, *CurField, fieldName, CurField->getFieldIndex()); if (CurField->hasCapturedVLAType()) { llvm_unreachable("NYI"); } @@ -704,8 +704,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( CGF.getTypes().isZeroInitializable(ExprToVisit->getType())) break; - LValue LV = - CGF.buildLValueForFieldInitialization(DestLV, field, field->getName()); + LValue LV = CGF.buildLValueForFieldInitialization( + DestLV, field, field->getName(), field->getFieldIndex()); // We never generate write-barries for initialized fields. assert(!UnimplementedFeature::setNonGC()); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index cfa43350d561..5f57488b407e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -44,7 +44,7 @@ class CallOp; namespace { class ScalarExprEmitter; class AggExprEmitter; -} +} // namespace namespace cir { @@ -1448,7 +1448,8 @@ class CIRGenFunction : public CIRGenTypeCache { /// stored in the reference. LValue buildLValueForFieldInitialization(LValue Base, const clang::FieldDecl *Field, - llvm::StringRef FieldName); + llvm::StringRef FieldName, + unsigned FieldIndex); void buildInitializerForField(clang::FieldDecl *Field, LValue LHS, clang::Expr *Init); diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index b9a024496abe..f92ad17a6d3b 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -21,10 +21,10 @@ void test() { // CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 -// CHECK-NEXT: %2 = "cir.struct_element_addr"(%1) <{member_name = "storage"}> +// CHECK-NEXT: %2 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "storage"}> // CHECK-NEXT: %3 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %4 = "cir.struct_element_addr"(%1) <{member_name = "size"}> : (!cir.ptr) -> !cir.ptr +// CHECK-NEXT: %4 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "size"}> : (!cir.ptr) -> !cir.ptr // CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.cast(integral, %5 : !s32i), !s64i // CHECK-NEXT: cir.store %6, %4 : !s64i, cir.ptr @@ -36,10 +36,10 @@ void test() { // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: cir.store %arg1, %1 // CHECK-NEXT: %2 = cir.load %0 -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_name = "storage"}> +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "storage"}> // CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) // CHECK-NEXT: cir.store %4, %3 -// CHECK-NEXT: %5 = "cir.struct_element_addr"(%2) <{member_name = "size"}> : (!cir.ptr) -> !cir.ptr +// CHECK-NEXT: %5 = "cir.struct_element_addr"(%2) <{member_index = 1 : index, member_name = "size"}> : (!cir.ptr) -> !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i // CHECK-NEXT: %7 = cir.cast(integral, %6 : !s32i), !s64i // CHECK-NEXT: cir.store %7, %5 : !s64i, cir.ptr @@ -52,7 +52,7 @@ void test() { // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_name = "storage"}> : (!cir.ptr) -> !cir.ptr> +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "storage"}> : (!cir.ptr) -> !cir.ptr> // CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index 1458d8eef2f9..f69cb2df69ac 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -35,10 +35,10 @@ void use() { yop{}; } // CHECK: cir.func @_Z3usev() { // CHECK: %0 = cir.alloca !ty_22struct2Eyep_22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} -// CHECK: %1 = "cir.struct_element_addr"(%0) <{member_name = "Status"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %1 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "Status"}> : (!cir.ptr) -> !cir.ptr // CHECK: %2 = cir.const(#cir.int<0> : !u32i) : !u32i // CHECK: cir.store %2, %1 : !u32i, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "HC"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_index = 1 : index, member_name = "HC"}> : (!cir.ptr) -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<0> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr // CHECK: cir.return @@ -68,12 +68,12 @@ void yo() { // CHECK: %1 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} // CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i,#cir.null : !cir.ptr,#cir.int<0> : !u64i}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 // CHECK: cir.store %2, %0 : !ty_22struct2EYo22, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%1) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "type"}> : (!cir.ptr) -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000066001> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr -// CHECK: %5 = "cir.struct_element_addr"(%1) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %5 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "next"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > -// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_name = "createFlags"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_index = 2 : index, member_name = "createFlags"}> : (!cir.ptr) -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !u64i) : !u64i // CHECK: cir.store %8, %7 : !u64i, cir.ptr diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 2a8e5bc17ebb..25f9c56e4c81 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -23,7 +23,7 @@ struct String { // Get address of `this->size` - // CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "size"}> + // CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "size"}> // Get address of `s` @@ -31,7 +31,7 @@ struct String { // Get the address of s.size - // CHECK: %5 = "cir.struct_element_addr"(%4) <{member_name = "size"}> + // CHECK: %5 = "cir.struct_element_addr"(%4) <{member_index = 0 : index, member_name = "size"}> // Load value from s.size and store in this->size @@ -53,9 +53,9 @@ struct String { // CHECK: cir.store %arg1, %1 : !cir.ptr // CHECK: %3 = cir.load deref %0 : cir.ptr > // CHECK: %4 = cir.load %1 : cir.ptr > - // CHECK: %5 = "cir.struct_element_addr"(%4) <{member_name = "size"}> + // CHECK: %5 = "cir.struct_element_addr"(%4) <{member_index = 0 : index, member_name = "size"}> // CHECK: %6 = cir.load %5 : cir.ptr , !s64i - // CHECK: %7 = "cir.struct_element_addr"(%3) <{member_name = "size"}> + // CHECK: %7 = "cir.struct_element_addr"(%3) <{member_index = 0 : index, member_name = "size"}> // CHECK: cir.store %6, %7 : !s64i, cir.ptr // CHECK: cir.store %3, %2 : !cir.ptr // CHECK: %8 = cir.load %2 : cir.ptr > diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp index 7b685169810a..fe79e2b690d3 100644 --- a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -11,9 +11,9 @@ struct String { // CHECK: cir.store %arg0, %0 // CHECK: cir.store %arg1, %1 // CHECK: %2 = cir.load %0 -// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "size"}> +// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "size"}> // CHECK: %4 = cir.load %1 -// CHECK: %5 = "cir.struct_element_addr"(%4) <{member_name = "size"}> +// CHECK: %5 = "cir.struct_element_addr"(%4) <{member_index = 0 : index, member_name = "size"}> // CHECK: %6 = cir.load %5 : cir.ptr , !s64i // CHECK: cir.store %6, %3 : !s64i, cir.ptr // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 4d20194cbd6e..4fdb269afbf4 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -82,7 +82,7 @@ void C3::Layer::Initialize() { // CHECK: cir.scope { // CHECK: %2 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "m_C1"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "m_C1"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %4 = cir.load %3 : cir.ptr >, !cir.ptr // CHECK: %5 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index d91ac4173769..7126d0557160 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -26,12 +26,12 @@ void l0() { // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %3 = cir.load %2 : cir.ptr >, !cir.ptr // CHECK: %4 = cir.load %3 : cir.ptr , !s32i // CHECK: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %6 = cir.binop(add, %4, %5) : !s32i -// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %8 = cir.load %7 : cir.ptr >, !cir.ptr // CHECK: cir.store %6, %8 : !s32i, cir.ptr @@ -50,7 +50,7 @@ auto g() { // CHECK: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK: %2 = cir.const(#cir.int<12> : !s32i) : !s32i // CHECK: cir.store %2, %1 : !s32i, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> // CHECK: cir.store %1, %3 : !cir.ptr, cir.ptr > // CHECK: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon223 // CHECK: cir.return %4 : !ty_22class2Eanon223 @@ -70,7 +70,7 @@ auto g2() { // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.const(#cir.int<12> : !s32i) : !s32i // CHECK-NEXT: cir.store %2, %1 : !s32i, cir.ptr -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> // CHECK-NEXT: cir.store %1, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon224 // CHECK-NEXT: cir.return %4 : !ty_22class2Eanon224 diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 64386468d061..05d310efc515 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -61,11 +61,11 @@ void init(unsigned numImages) { // CHECK: %13 = cir.alloca !ty_22struct2Etriple22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} // CHECK: %14 = cir.const(#cir.zero : !ty_22struct2Etriple22) : !ty_22struct2Etriple22 // CHECK: cir.store %14, %13 : !ty_22struct2Etriple22, cir.ptr -// CHECK: %15 = "cir.struct_element_addr"(%13) <{member_name = "type"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %15 = "cir.struct_element_addr"(%13) <{member_index = 0 : index, member_name = "type"}> : (!cir.ptr) -> !cir.ptr // CHECK: %16 = cir.const(#cir.int<1000024002> : !u32i) : !u32i // CHECK: cir.store %16, %15 : !u32i, cir.ptr -// CHECK: %17 = "cir.struct_element_addr"(%13) <{member_name = "next"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %18 = "cir.struct_element_addr"(%13) <{member_name = "image"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %17 = "cir.struct_element_addr"(%13) <{member_index = 1 : index, member_name = "next"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %18 = "cir.struct_element_addr"(%13) <{member_index = 2 : index, member_name = "image"}> : (!cir.ptr) -> !cir.ptr // CHECK: %19 = cir.load %7 : cir.ptr >, !cir.ptr // CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index eebd72b2496d..17c9e0509650 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -98,14 +98,14 @@ void m() { Adv C; } // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_name = "x"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "w"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "x"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "w"}> : (!cir.ptr) -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000024001> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr -// CHECK: %5 = "cir.struct_element_addr"(%2) <{member_name = "n"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %5 = "cir.struct_element_addr"(%2) <{member_index = 1 : index, member_name = "n"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %6 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > -// CHECK: %7 = "cir.struct_element_addr"(%2) <{member_name = "d"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %7 = "cir.struct_element_addr"(%2) <{member_index = 2 : index, member_name = "d"}> : (!cir.ptr) -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: cir.store %8, %7 : !s32i, cir.ptr // CHECK: cir.return @@ -147,4 +147,4 @@ void ppp() { Entry x; } // CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr -// CHECK: = "cir.struct_element_addr"(%1) <{member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr)>>> +// CHECK: = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr)>>> diff --git a/clang/test/CIR/CodeGen/unary-deref.cpp b/clang/test/CIR/CodeGen/unary-deref.cpp index e45f884d01bc..b3f0e7dc1eec 100644 --- a/clang/test/CIR/CodeGen/unary-deref.cpp +++ b/clang/test/CIR/CodeGen/unary-deref.cpp @@ -12,6 +12,6 @@ void foo() { // CHECK: cir.func linkonce_odr @_ZNK12MyIntPointer4readEv // CHECK: %2 = cir.load %0 -// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_name = "ptr"}> +// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "ptr"}> // CHECK: %4 = cir.load deref %3 : cir.ptr > // CHECK: %5 = cir.load %4 From 8113e0f0254cdd37308a00bc6d6f123fffef20dd Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 3 Jul 2023 15:14:27 -0300 Subject: [PATCH 1045/2301] [CIR][Lowering] Lower basic structs Maps StructElementAddrOp field indexes to LLVM's GEPOp offsets to access struct members. The current implementation does not account for anonymous structs nor nested structs, only simple structs. ghstack-source-id: b931ee744ace61a42e33f9bfa075ecc2f84a5685 Pull Request resolved: https://github.com/llvm/clangir/pull/149 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 39 ++++++++++++++----- clang/test/CIR/Lowering/struct.cir | 18 +++++++++ 2 files changed, 48 insertions(+), 9 deletions(-) create mode 100644 clang/test/CIR/Lowering/struct.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 9f8347d33620..45e02c48eba3 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1218,18 +1218,39 @@ class CIRBrOpLowering : public mlir::OpConversionPattern { } }; +class CIRStructElementAddrOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern< + mlir::cir::StructElementAddr>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::StructElementAddr op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto llResTy = getTypeConverter()->convertType(op.getType()); + // Since the base address is a pointer to structs, the first offset is + // always zero. The second offset tell us which member it will access. + llvm::SmallVector offset{0, op.getIndex()}; + const auto elementTy = getTypeConverter()->convertType( + op.getStructAddr().getType().getPointee()); + rewriter.replaceOpWithNewOp( + op, llResTy, elementTy, adaptor.getStructAddr(), offset); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns.add(converter, - patterns.getContext()); + patterns.add< + CIRCmpOpLowering, CIRLoopOpLowering, CIRBrCondOpLowering, + CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, + CIRBinOpLowering, CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, + CIRAllocaLowering, CIRFuncLowering, CIRScopeOpLowering, CIRCastOpLowering, + CIRIfLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, + CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, + CIRBrOpLowering, CIRTernaryOpLowering, CIRStructElementAddrOpLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir new file mode 100644 index 000000000000..df30ab27b933 --- /dev/null +++ b/clang/test/CIR/Lowering/struct.cir @@ -0,0 +1,18 @@ +// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +!u8i = !cir.int +!ty_22struct2ES22 = !cir.struct<"struct.S", !u8i, !s32i> +module { + cir.func @test() { + %1 = cir.alloca !ty_22struct2ES22, cir.ptr , ["x"] {alignment = 4 : i64} + // CHECK: %[[#ARRSIZE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#STRUCT:]] = llvm.alloca %[[#ARRSIZE]] x !llvm.struct<"struct.S", (i8, i32)> + %3 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "c"}> : (!cir.ptr) -> !cir.ptr + // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 0] : (!llvm.ptr) -> !llvm.ptr + %5 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr + // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 1] : (!llvm.ptr) -> !llvm.ptr + cir.return + } +} From 6c1601d20cc75462560403d6ed14aaedac6e4df3 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 3 Jul 2023 17:56:51 -0300 Subject: [PATCH 1046/2301] [CIR][CIRGen][NFC] Update array subscript feature guarding Refactor portions of CIRGenFunction::buildArraySubscriptExpr to be more akin to the original codegen, while also replacing TODO comments and assertions by `llvm_unreachable` statements with proper messages. ghstack-source-id: a99b1fede1d44851e59aa647ca7c94b4417b9974 Pull Request resolved: https://github.com/llvm/clangir/pull/141 --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 59 +++++++++++++++------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 7185655ba2c6..22802d94e564 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -16,6 +16,7 @@ #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "CIRGenValue.h" #include "UnimplementedFeatureGuarding.h" #include "clang/AST/GlobalDecl.h" @@ -23,6 +24,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/ADT/StringExtras.h" @@ -578,9 +580,9 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { if (const auto *VD = dyn_cast(ND)) { // Global Named registers access via intrinsics only - if (VD->getStorageClass() == SC_Register && - VD->hasAttr() && !VD->isLocalVarDecl()) - llvm_unreachable("NYI"); + if (VD->getStorageClass() == SC_Register && VD->hasAttr() && + !VD->isLocalVarDecl()) + llvm_unreachable("NYI"); assert(E->isNonOdrUse() != NOUR_Constant && "not implemented"); @@ -1173,12 +1175,11 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed) { // The index must always be an integer, which is not an aggregate. Emit it // in lexical order (this complexity is, sadly, required by C++17). - // llvm::Value *IdxPre = - // (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr; - assert(E->getLHS() != E->getIdx() && "not implemented"); + mlir::Value IdxPre = + (E->getLHS() == E->getIdx()) ? buildScalarExpr(E->getIdx()) : nullptr; bool SignedIndices = false; - auto EmitIdxAfterBase = [&](bool Promote) -> mlir::Value { - mlir::Value Idx; + auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> mlir::Value { + mlir::Value Idx = IdxPre; if (E->getLHS() != E->getIdx()) { assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS"); Idx = buildScalarExpr(E->getIdx()); @@ -1188,39 +1189,41 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); SignedIndices |= IdxSigned; - assert(!SanOpts.has(SanitizerKind::ArrayBounds) && "not implemented"); + if (SanOpts.has(SanitizerKind::ArrayBounds)) + llvm_unreachable("array bounds sanitizer is NYI"); - // TODO: Extend or truncate the index type to 32 or 64-bits. - // if (Promote && !Idx.getType().isa<::mlir::cir::PointerType>()) { - // Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); - // } + // Extend or truncate the index type to 32 or 64-bits. + auto ptrTy = Idx.getType().dyn_cast(); + if (Promote && ptrTy && ptrTy.getPointee().isa()) + llvm_unreachable("index type cast is NYI"); return Idx; }; + IdxPre = nullptr; // If the base is a vector type, then we are forming a vector element // with this subscript. if (E->getBase()->getType()->isVectorType() && !isa(E->getBase())) { - assert(0 && "not implemented"); + llvm_unreachable("vector subscript is NYI"); } // All the other cases basically behave like simple offsetting. // Handle the extvector case we ignored above. if (isa(E->getBase())) { - assert(0 && "not implemented"); + llvm_unreachable("extvector subscript is NYI"); } - // TODO: TBAAAccessInfo + assert(!UnimplementedFeature::tbaa() && "TBAA is NYI"); LValueBaseInfo EltBaseInfo; Address Addr = Address::invalid(); if (const VariableArrayType *vla = getContext().getAsVariableArrayType(E->getType())) { - assert(0 && "not implemented"); + llvm_unreachable("variable array subscript is NYI"); } else if (const ObjCObjectType *OIT = E->getType()->getAs()) { - assert(0 && "not implemented"); + llvm_unreachable("ObjC object type subscript is NYI"); } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { // If this is A[i] where A is an array, the frontend will have decayed // the base to be a ArrayToPointerDecay implicit cast. While correct, it is @@ -1231,26 +1234,26 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, LValue ArrayLV; // For simple multidimensional array indexing, set the 'accessed' flag // for better bounds-checking of the base expression. - // if (const auto *ASE = dyn_cast(Array)) - // ArrayLV = buildArraySubscriptExpr(ASE, /*Accessed*/ true); - assert(!llvm::isa(Array) && - "multidimensional array indexing not implemented"); - - ArrayLV = buildLValue(Array); + if (const auto *ASE = dyn_cast(Array)) + assert(!llvm::isa(Array) && "multi-dim access NYI"); + else + ArrayLV = buildLValue(Array); auto Idx = EmitIdxAfterBase(/*Promote=*/true); - QualType arrayType = Array->getType(); // Propagate the alignment from the array itself to the result. + QualType arrayType = Array->getType(); Addr = buildArraySubscriptPtr( *this, CGM.getLoc(Array->getBeginLoc()), CGM.getLoc(Array->getEndLoc()), ArrayLV.getAddress(), {Idx}, E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, CGM.getLoc(E->getExprLoc()), &arrayType, E->getBase()); EltBaseInfo = ArrayLV.getBaseInfo(); - // TODO: EltTBAAInfo + // TODO(cir): EltTBAAInfo + assert(!UnimplementedFeature::tbaa() && "TBAA is NYI"); } else { // The base must be a pointer; emit it with an estimate of its alignment. // TODO(cir): EltTBAAInfo + assert(!UnimplementedFeature::tbaa() && "TBAA is NYI"); Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); auto Idx = EmitIdxAfterBase(/*Promote*/ true); QualType ptrType = E->getBase()->getType(); @@ -1261,9 +1264,11 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, } LValue LV = LValue::makeAddr(Addr, E->getType(), EltBaseInfo); + if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) { - assert(0 && "not implemented"); + llvm_unreachable("ObjC is NYI"); } + return LV; } From 80cf6bea81a8cbc97cdc277751745142584bc7bd Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 3 Jul 2023 17:56:51 -0300 Subject: [PATCH 1047/2301] [CIR][CIRGen] Add basic multi-dim array access ghstack-source-id: fc4f5f6b09272b5258325b7d1d1d947388980a06 Pull Request resolved: https://github.com/llvm/clangir/pull/142 --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/test/CIR/CodeGen/array.cpp | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 22802d94e564..846e1b8fddda 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1235,7 +1235,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // For simple multidimensional array indexing, set the 'accessed' flag // for better bounds-checking of the base expression. if (const auto *ASE = dyn_cast(Array)) - assert(!llvm::isa(Array) && "multi-dim access NYI"); + ArrayLV = buildArraySubscriptExpr(ASE, /*Accessed=*/true); else ArrayLV = buildLValue(Array); auto Idx = EmitIdxAfterBase(/*Promote=*/true); diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 13267434d422..a86e96c5ffa4 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -46,3 +46,18 @@ void local_stringlit() { // CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr // CHECK-NEXT: cir.store %2, %0 : !cir.ptr, cir.ptr > + +int multidim(int i, int j) { + int arr[2][2]; + return arr[i][j]; +} + +// CHECK: %3 = cir.alloca !cir.array x 2>, cir.ptr x 2>> +// Stride first dimension (stride = 2) +// CHECK: %4 = cir.load %{{.+}} : cir.ptr , !s32i +// CHECK: %5 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr x 2>>), !cir.ptr> +// CHECK: %6 = cir.ptr_stride(%5 : !cir.ptr>, %4 : !s32i), !cir.ptr> +// Stride second dimension (stride = 1) +// CHECK: %7 = cir.load %{{.+}} : cir.ptr , !s32i +// CHECK: %8 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr +// CHECK: %9 = cir.ptr_stride(%8 : !cir.ptr, %7 : !s32i), !cir.ptr From 37ebc15910d14a7727873500a6452ef755dbf75f Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 27 Jun 2023 11:21:05 -0300 Subject: [PATCH 1048/2301] [CIR][Lowering] Lower remaining signed integer binops Adds lowering for signed division, remainder, and r-shift operations. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 +- clang/test/CIR/Lowering/binop-signed-int.cir | 65 +++++++++++++++++++ 2 files changed, 68 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/Lowering/binop-signed-int.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 45e02c48eba3..ae1b568f3d22 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1036,7 +1036,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { if (ty.isUnsigned()) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else - llvm_unreachable("signed integer division binop lowering NYI"); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); } else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; @@ -1045,7 +1045,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { if (ty.isUnsigned()) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else - llvm_unreachable("signed integer remainder binop lowering NYI"); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); } else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; @@ -1066,7 +1066,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { if (ty.isUnsigned()) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else - llvm_unreachable("signed integer shift binop lowering NYI"); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; } } diff --git a/clang/test/CIR/Lowering/binop-signed-int.cir b/clang/test/CIR/Lowering/binop-signed-int.cir new file mode 100644 index 000000000000..8adc8a1e1e3e --- /dev/null +++ b/clang/test/CIR/Lowering/binop-signed-int.cir @@ -0,0 +1,65 @@ +// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +module { + cir.func @foo() { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<2> : !s32i) : !s32i cir.store %3, %0 : !s32i, cir.ptr + %4 = cir.const(#cir.int<1> : !s32i) : !s32i cir.store %4, %1 : !s32i, cir.ptr + %5 = cir.load %0 : cir.ptr , !s32i + %6 = cir.load %1 : cir.ptr , !s32i + %7 = cir.binop(mul, %5, %6) : !s32i + // CHECK: = llvm.mul + cir.store %7, %2 : !s32i, cir.ptr + %8 = cir.load %2 : cir.ptr , !s32i + %9 = cir.load %1 : cir.ptr , !s32i + %10 = cir.binop(div, %8, %9) : !s32i + // CHECK: = llvm.sdiv + cir.store %10, %2 : !s32i, cir.ptr + %11 = cir.load %2 : cir.ptr , !s32i + %12 = cir.load %1 : cir.ptr , !s32i + %13 = cir.binop(rem, %11, %12) : !s32i + // CHECK: = llvm.srem + cir.store %13, %2 : !s32i, cir.ptr + %14 = cir.load %2 : cir.ptr , !s32i + %15 = cir.load %1 : cir.ptr , !s32i + %16 = cir.binop(add, %14, %15) : !s32i + // CHECK: = llvm.add + cir.store %16, %2 : !s32i, cir.ptr + %17 = cir.load %2 : cir.ptr , !s32i + %18 = cir.load %1 : cir.ptr , !s32i + %19 = cir.binop(sub, %17, %18) : !s32i + // CHECK: = llvm.sub + cir.store %19, %2 : !s32i, cir.ptr + %20 = cir.load %2 : cir.ptr , !s32i + %21 = cir.load %1 : cir.ptr , !s32i + %22 = cir.binop(shr, %20, %21) : !s32i + // CHECK: = llvm.ashr + cir.store %22, %2 : !s32i, cir.ptr + %23 = cir.load %2 : cir.ptr , !s32i + %24 = cir.load %1 : cir.ptr , !s32i + %25 = cir.binop(shl, %23, %24) : !s32i + // CHECK: = llvm.shl + cir.store %25, %2 : !s32i, cir.ptr + %26 = cir.load %2 : cir.ptr , !s32i + %27 = cir.load %1 : cir.ptr , !s32i + %28 = cir.binop(and, %26, %27) : !s32i + // CHECK: = llvm.and + cir.store %28, %2 : !s32i, cir.ptr + %29 = cir.load %2 : cir.ptr , !s32i + %30 = cir.load %1 : cir.ptr , !s32i + %31 = cir.binop(xor, %29, %30) : !s32i + // CHECK: = llvm.xor + cir.store %31, %2 : !s32i, cir.ptr + %32 = cir.load %2 : cir.ptr , !s32i + %33 = cir.load %1 : cir.ptr , !s32i + %34 = cir.binop(or, %32, %33) : !s32i + // CHECK: = llvm.or + cir.store %34, %2 : !s32i, cir.ptr + cir.return + } +} + From d5df9f6f79c9dd0da714062a96535d166900c2b4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Jul 2023 13:57:25 -0700 Subject: [PATCH 1049/2301] [CIR][CIRGen] VisitUnaryExprOrTypeTraitExpr: implement it for scalars --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 ++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 24 +++++++++++++++++++--- clang/test/CIR/CodeGen/basic.cpp | 17 ++++++++++++++- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index f7dc0d350d52..f4b4c09c9e5d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -23,6 +23,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" +#include "llvm/ADT/APSInt.h" #include "llvm/ADT/FloatingPointMode.h" #include "llvm/Support/ErrorHandling.h" @@ -286,6 +287,13 @@ class CIRGenBuilderTy : public mlir::OpBuilder { uint64_t C) { return create(loc, t, mlir::cir::IntAttr::get(t, C)); } + mlir::cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal) { + bool isSigned = intVal.isSigned(); + auto width = intVal.getBitWidth(); + mlir::cir::IntType t = isSigned ? getSIntNTy(width) : getUIntNTy(width); + return getConstInt( + loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); + } mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { return create(loc, getBoolTy(), getCIRBoolAttr(state)); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index a81da44cdb4e..0adaa6ca95eb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -186,9 +186,7 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitOffsetOfExpr(OffsetOfExpr *E) { llvm_unreachable("NYI"); } - mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E) { - llvm_unreachable("NYI"); - } + mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); mlir::Value VisitAddrLabelExpr(const AddrLabelExpr *E) { llvm_unreachable("NYI"); } @@ -2152,3 +2150,23 @@ mlir::Value ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { return Val; } + +/// Return the size or alignment of the type of argument of the sizeof +/// expression as an integer. +mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( + const UnaryExprOrTypeTraitExpr *E) { + QualType TypeToSize = E->getTypeOfArgument(); + if (E->getKind() == UETT_SizeOf) { + if (const VariableArrayType *VAT = + CGF.getContext().getAsVariableArrayType(TypeToSize)) { + llvm_unreachable("NYI"); + } + } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) { + llvm_unreachable("NYI"); + } + + // If this isn't sizeof(vla), the result must be constant; use the constant + // folding logic so we don't have to duplicate it here. + return Builder.getConstInt(CGF.getLoc(E->getSourceRange()), + E->EvaluateKnownConstInt(CGF.getContext())); +} diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 2e29c1b94c22..12bc06700a96 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s int *p0() { @@ -162,6 +162,21 @@ void x() { // CHECK: %3 = cir.const(#false) : !cir.bool // CHECK: cir.store %3, %1 : !cir.bool, cir.ptr +typedef unsigned long size_type; +typedef unsigned long _Tp; + +size_type max_size() { + return size_type(~0) / sizeof(_Tp); +} + +// CHECK: cir.func @_Z8max_sizev() +// CHECK: %0 = cir.alloca !u64i, cir.ptr , ["__retval"] {alignment = 8 : i64} +// CHECK: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %2 = cir.unary(not, %1) : !s32i, !s32i +// CHECK: %3 = cir.cast(integral, %2 : !s32i), !u64i +// CHECK: %4 = cir.const(#cir.int<8> : !u64i) : !u64i +// CHECK: %5 = cir.binop(div, %3, %4) : !u64i + // CHECK-DAG: #[[locScope]] = loc(fused[#[[locScopeA:loc[0-9]+]], #[[locScopeB:loc[0-9]+]]]) // CHECK-DAG: #[[locScopeA]] = loc("{{.*}}basic.cpp":27:3) // CHECK-DAG: #[[locScopeB]] = loc("{{.*}}basic.cpp":31:3) \ No newline at end of file From 69769fb93a715a8a8e26727cb9c507cb33be2db1 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 5 Jul 2023 18:12:31 -0300 Subject: [PATCH 1050/2301] [CIR][Lowering] Handle empty scope operations Guards cir.scope lowering against empty scopes avoiding segfaults. Also removes empty cir.scope operations on MergeCleanups pass. ghstack-source-id: c4539552568d451ae0bf220ea6bde9a44fc66b8d Pull Request resolved: https://github.com/llvm/clangir/pull/144 --- clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp | 14 +++++++++++--- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 ++++++ clang/test/CIR/Lowering/scope.cir | 14 +++++++++++++- clang/test/CIR/Transforms/merge-cleanups.cir | 12 +++++++++++- 4 files changed, 41 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index 71e4aa0c1761..7aab40b23aa4 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -15,6 +15,7 @@ #include "mlir/IR/Matchers.h" #include "mlir/IR/PatternMatch.h" +#include "mlir/Support/LogicalResult.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" using namespace mlir; @@ -156,10 +157,17 @@ template <> mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp(PatternRewriter &rewriter, ScopeOp scopeOp) const { - auto regionChanged = mlir::failure(); + // Scope region empty: just remove scope. + if (scopeOp.getRegion().empty()) { + rewriter.eraseOp(scopeOp); + return mlir::success(); + } + + // Scope region non-empty: clean it up. if (checkAndRewriteRegion(scopeOp.getRegion(), rewriter).succeeded()) - regionChanged = mlir::success(); - return regionChanged; + return mlir::success(); + + return mlir::failure(); } template <> diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ae1b568f3d22..3678fc246673 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -456,6 +456,12 @@ class CIRScopeOpLowering mlir::OpBuilder::InsertionGuard guard(rewriter); auto loc = scopeOp.getLoc(); + // Empty scope: just remove it. + if (scopeOp.getRegion().empty()) { + rewriter.eraseOp(scopeOp); + return mlir::success(); + } + // Split the current block before the ScopeOp to create the inlining point. auto *currentBlock = rewriter.getInsertionBlock(); auto *remainingOpsBlock = diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index 726176688b79..993571b5d625 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -11,7 +11,6 @@ module { } cir.return } -} // MLIR: llvm.func @foo() { // MLIR-NEXT: llvm.br ^bb1 @@ -36,3 +35,16 @@ module { // LLVM-NEXT: 3: // LLVM-NEXT: ret void // LLVM-NEXT: } + + + // Should drop empty scopes. + cir.func @empty_scope() { + cir.scope { + } + cir.return + } + // MLIR: llvm.func @empty_scope() { + // MLIR-NEXT: llvm.return + // MLIR-NEXT: } + +} diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index f3d056ed837a..0752215499f3 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -100,7 +100,6 @@ module { } cir.return } -} // CHECK: cir.switch (%4 : !s32i) [ // CHECK-NEXT: case (equal, #cir.int<0> : !s32i) { @@ -164,3 +163,14 @@ module { // CHECK-NEXT: } // CHECK-NEXT: cir.return // CHECK-NEXT: } + + // Should remove empty scopes. + cir.func @removeEmptyScope() { + cir.scope { + } + cir.return + } + // CHECK: cir.func @removeEmptyScope + // CHECK-NEXT: cir.return + +} From 7062676458cb08eee32827e96c5cbe51d5da820e Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 5 Jul 2023 18:12:31 -0300 Subject: [PATCH 1051/2301] [CIR][Lowering] Lower structured while loops Essentially converts a `cir.loop` op of the `while` kind to a CFG. The implementation, however, was only tested with structured loops, so if breaks, continues, or returns are found in the body, it is likely to break. ghstack-source-id: 32d262436d11d70fb6d2be23b63ba37ed01b9c1f Pull Request resolved: https://github.com/llvm/clangir/pull/145 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 77 ++++++++++++++++++- clang/test/CIR/Lowering/{for.cir => loop.cir} | 64 ++++++++++++++- 2 files changed, 138 insertions(+), 3 deletions(-) rename clang/test/CIR/Lowering/{for.cir => loop.cir} (58%) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3678fc246673..59fe144ad6da 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -34,6 +34,7 @@ #include "mlir/IR/BuiltinDialect.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/IRMapping.h" +#include "mlir/IR/Operation.h" #include "mlir/IR/Value.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" @@ -111,12 +112,86 @@ class CIRPtrStrideOpLowering class CIRLoopOpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; + using LoopKind = mlir::cir::LoopOpKind; + + mlir::LogicalResult + fetchCondRegionYields(mlir::Region &condRegion, + mlir::cir::YieldOp &yieldToBody, + mlir::cir::YieldOp &yieldToCont) const { + for (auto &bb : condRegion) { + if (auto yieldOp = dyn_cast(bb.getTerminator())) { + if (!yieldOp.getKind().has_value()) + yieldToCont = yieldOp; + else if (yieldOp.getKind() == mlir::cir::YieldOpKind::Continue) + yieldToBody = yieldOp; + else + return mlir::failure(); + } + } + + // Succeed only if both yields are found. + if (!yieldToBody || !yieldToCont) + return mlir::failure(); + return mlir::success(); + } + + mlir::LogicalResult + rewriteWhileLoop(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto *currentBlock = rewriter.getInsertionBlock(); + auto *continueBlock = + rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + + // Fetch required info from the condition region. + auto &condRegion = loopOp.getCond(); + auto &condFrontBlock = condRegion.front(); + mlir::cir::YieldOp yieldToBody, yieldToCont; + if (fetchCondRegionYields(condRegion, yieldToBody, yieldToCont).failed()) + return loopOp.emitError("failed to fetch yields in cond region"); + + // Fetch required info from the condition region. + auto &bodyRegion = loopOp.getBody(); + auto &bodyFrontBlock = bodyRegion.front(); + auto bodyYield = + dyn_cast(bodyRegion.back().getTerminator()); + assert(bodyYield && "unstructured while loops are NYI"); + + // Move loop op region contents to current CFG. + rewriter.inlineRegionBefore(condRegion, continueBlock); + rewriter.inlineRegionBefore(bodyRegion, continueBlock); + + // Set loop entry point to condition block. + rewriter.setInsertionPointToEnd(currentBlock); + rewriter.create(loopOp.getLoc(), &condFrontBlock); + + // Set loop exit point to continue block. + rewriter.setInsertionPoint(yieldToCont); + rewriter.replaceOpWithNewOp(yieldToCont, continueBlock); + + // Branch from condition to body. + rewriter.setInsertionPoint(yieldToBody); + rewriter.replaceOpWithNewOp(yieldToBody, &bodyFrontBlock); + + // Branch from body to condition. + rewriter.setInsertionPoint(bodyYield); + rewriter.replaceOpWithNewOp(bodyYield, &condFrontBlock); + + // Remove the loop op. + rewriter.eraseOp(loopOp); + return mlir::success(); + } mlir::LogicalResult matchAndRewrite(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - if (loopOp.getKind() != mlir::cir::LoopOpKind::For) + switch (loopOp.getKind()) { + case LoopKind::For: + break; + case LoopKind::While: + return rewriteWhileLoop(loopOp, adaptor, rewriter); + case LoopKind::DoWhile: llvm_unreachable("NYI"); + } auto loc = loopOp.getLoc(); diff --git a/clang/test/CIR/Lowering/for.cir b/clang/test/CIR/Lowering/loop.cir similarity index 58% rename from clang/test/CIR/Lowering/for.cir rename to clang/test/CIR/Lowering/loop.cir index 659217659789..05105ab19462 100644 --- a/clang/test/CIR/Lowering/for.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -27,7 +27,6 @@ module { } cir.return } -} // MLIR: module { // MLIR-NEXT: llvm.func @foo() { @@ -61,7 +60,6 @@ module { // MLIR-NEXT: ^bb6: // pred: ^bb3 // MLIR-NEXT: llvm.return // MLIR-NEXT: } -// MLIR-NEXT: } // LLVM: define void @foo() { // LLVM-NEXT: %1 = alloca i32, i64 1, align 4 @@ -95,3 +93,65 @@ module { // LLVM-NEXT: 15: // LLVM-NEXT: ret void // LLVM-NEXT: } + + // Test while cir.loop operation lowering. + cir.func @testWhile(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.scope { + cir.loop while(cond : { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i + %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool + cir.brcond %4 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + cir.yield + }) { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.unary(inc, %1) : !s32i, !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.yield + } + } + cir.return + } + + // MLIR: llvm.func @testWhile(%arg0: i32) { + // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 + // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr + // MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr + // MLIR-NEXT: llvm.br ^bb1 + // MLIR-NEXT: ^bb1: + // MLIR-NEXT: llvm.br ^bb2 + // ============= Condition block ============= + // MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb5 + // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %3 = llvm.mlir.constant(10 : i32) : i32 + // MLIR-NEXT: %4 = llvm.icmp "slt" %2, %3 : i32 + // MLIR-NEXT: %5 = llvm.zext %4 : i1 to i32 + // MLIR-NEXT: %6 = llvm.mlir.constant(0 : i32) : i32 + // MLIR-NEXT: %7 = llvm.icmp "ne" %5, %6 : i32 + // MLIR-NEXT: %8 = llvm.zext %7 : i1 to i8 + // MLIR-NEXT: %9 = llvm.trunc %8 : i8 to i1 + // MLIR-NEXT: llvm.cond_br %9, ^bb3, ^bb4 + // MLIR-NEXT: ^bb3: // pred: ^bb2 + // MLIR-NEXT: llvm.br ^bb5 + // MLIR-NEXT: ^bb4: // pred: ^bb2 + // MLIR-NEXT: llvm.br ^bb6 + // ============= Body block ============= + // MLIR-NEXT: ^bb5: // pred: ^bb3 + // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %11 = llvm.mlir.constant(1 : i32) : i32 + // MLIR-NEXT: %12 = llvm.add %10, %11 : i32 + // MLIR-NEXT: llvm.store %12, %1 : i32, !llvm.ptr + // MLIR-NEXT: llvm.br ^bb2 + // ============= Exit block ============= + // MLIR-NEXT: ^bb6: // pred: ^bb4 + // MLIR-NEXT: llvm.br ^bb7 + +} From 4a74683274f97150de772053de7ee1750b99f7d1 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 5 Jul 2023 18:12:31 -0300 Subject: [PATCH 1052/2301] [CIR][Lowering] Lower structured do-while loops Conditionally set the loop entry point depending on whether the loop is of the `do-while` kind or not. ghstack-source-id: 711f4befd051a68a70bd099982b31d7cad3f5a71 Pull Request resolved: https://github.com/llvm/clangir/pull/146 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 11 ++-- clang/test/CIR/Lowering/loop.cir | 60 +++++++++++++++++++ 2 files changed, 66 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 59fe144ad6da..13938bc86129 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -137,7 +137,8 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { mlir::LogicalResult rewriteWhileLoop(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const { + mlir::ConversionPatternRewriter &rewriter, + mlir::cir::LoopOpKind kind) const { auto *currentBlock = rewriter.getInsertionBlock(); auto *continueBlock = rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); @@ -160,9 +161,10 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { rewriter.inlineRegionBefore(condRegion, continueBlock); rewriter.inlineRegionBefore(bodyRegion, continueBlock); - // Set loop entry point to condition block. + // Set loop entry point to condition or to body in do-while cases. rewriter.setInsertionPointToEnd(currentBlock); - rewriter.create(loopOp.getLoc(), &condFrontBlock); + auto &entry = (kind != LoopKind::DoWhile ? condFrontBlock : bodyFrontBlock); + rewriter.create(loopOp.getLoc(), &entry); // Set loop exit point to continue block. rewriter.setInsertionPoint(yieldToCont); @@ -188,9 +190,8 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { case LoopKind::For: break; case LoopKind::While: - return rewriteWhileLoop(loopOp, adaptor, rewriter); case LoopKind::DoWhile: - llvm_unreachable("NYI"); + return rewriteWhileLoop(loopOp, adaptor, rewriter, loopOp.getKind()); } auto loc = loopOp.getLoc(); diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index 05105ab19462..ffadc539b323 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -154,4 +154,64 @@ module { // MLIR-NEXT: ^bb6: // pred: ^bb4 // MLIR-NEXT: llvm.br ^bb7 + // Test do-while cir.loop operation lowering. + cir.func @testDoWhile(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.scope { + cir.loop dowhile(cond : { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i + %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool + cir.brcond %4 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + cir.yield + }) { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.unary(inc, %1) : !s32i, !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.yield + } + } + cir.return + } + + // MLIR: llvm.func @testDoWhile(%arg0: i32) { + // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 + // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr + // MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr + // MLIR-NEXT: llvm.br ^bb1 + // MLIR-NEXT: ^bb1: + // MLIR-NEXT: llvm.br ^bb5 + // ============= Condition block ============= + // MLIR-NEXT: ^bb2: + // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %3 = llvm.mlir.constant(10 : i32) : i32 + // MLIR-NEXT: %4 = llvm.icmp "slt" %2, %3 : i32 + // MLIR-NEXT: %5 = llvm.zext %4 : i1 to i32 + // MLIR-NEXT: %6 = llvm.mlir.constant(0 : i32) : i32 + // MLIR-NEXT: %7 = llvm.icmp "ne" %5, %6 : i32 + // MLIR-NEXT: %8 = llvm.zext %7 : i1 to i8 + // MLIR-NEXT: %9 = llvm.trunc %8 : i8 to i1 + // MLIR-NEXT: llvm.cond_br %9, ^bb3, ^bb4 + // MLIR-NEXT: ^bb3: + // MLIR-NEXT: llvm.br ^bb5 + // MLIR-NEXT: ^bb4: + // MLIR-NEXT: llvm.br ^bb6 + // ============= Body block ============= + // MLIR-NEXT: ^bb5: + // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %11 = llvm.mlir.constant(1 : i32) : i32 + // MLIR-NEXT: %12 = llvm.add %10, %11 : i32 + // MLIR-NEXT: llvm.store %12, %1 : i32, !llvm.ptr + // MLIR-NEXT: llvm.br ^bb2 + // ============= Exit block ============= + // MLIR-NEXT: ^bb6: + // MLIR-NEXT: llvm.br ^bb7 + } From 6791e80f8d7905cbf7772a6541195b1e4ed65229 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 5 Jul 2023 18:12:32 -0300 Subject: [PATCH 1053/2301] [CIR] Add reconcile unrealized casts pass ghstack-source-id: 8e98e125c9dc41b134e3270f5c453884309d90e6 Pull Request resolved: https://github.com/llvm/clangir/pull/147 --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 5 +++++ clang/test/CIR/Lowering/tenary.cir | 8 +++----- clang/tools/cir-tool/cir-tool.cpp | 5 +++++ 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 13938bc86129..9d87586f9737 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1436,6 +1436,11 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, // emission directly from our frontend. pm.addPass(mlir::LLVM::createDIScopeForLLVMFuncOpPass()); + // FIXME(cir): this shouldn't be necessary. It's meant to be a temporary + // workaround until we understand why some unrealized casts are being emmited + // and how to properly avoid them. + pm.addPass(mlir::createReconcileUnrealizedCastsPass()); + (void)mlir::applyPassManagerCLOptions(pm); auto result = !mlir::failed(pm.run(theModule)); diff --git a/clang/test/CIR/Lowering/tenary.cir b/clang/test/CIR/Lowering/tenary.cir index 6452aff25f43..9f5149342f99 100644 --- a/clang/test/CIR/Lowering/tenary.cir +++ b/clang/test/CIR/Lowering/tenary.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-tool %s -cir-to-llvm -reconcile-unrealized-casts -o - | FileCheck %s -check-prefix=MLIR !s32i = !cir.int @@ -23,8 +23,7 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { } } -// MLIR: module { -// MLIR: llvm.func @_Z1xi(%arg0: i32) -> i32 { +// MLIR: llvm.func @_Z1xi(%arg0: i32) -> i32 { // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %2 = llvm.mlir.constant(1 : index) : i64 @@ -48,5 +47,4 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { // MLIR-NEXT: llvm.store %11, %3 : i32, !llvm.ptr // MLIR-NEXT: %12 = llvm.load %3 : !llvm.ptr // MLIR-NEXT: llvm.return %12 : i32 -// MLIR-NEXT: } -// MLIR-NEXT: } +// MLIR-NEXT: } diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-tool/cir-tool.cpp index ef01f6a81707..0b3d5354b34f 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-tool/cir-tool.cpp @@ -12,6 +12,7 @@ // //===----------------------------------------------------------------------===// +#include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" @@ -46,6 +47,10 @@ int main(int argc, char **argv) { return cir::direct::createConvertCIRToLLVMPass(); }); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return mlir::createReconcileUnrealizedCastsPass(); + }); + mlir::registerTransformsPasses(); return failed(MlirOptMain( From c58a4968eb6e6484171a23fab33ae03b3c6b9523 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 6 Jul 2023 01:34:22 -0400 Subject: [PATCH 1054/2301] [CIR][Rebase] Fix a bunch of llvm::Optional -> std::optional issues --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 4 ++-- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 8e094041c1f9..cc146c2b10ce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1268,7 +1268,7 @@ void CIRGenFunction::buildDeclRefExprDbgValue(const DeclRefExpr *E, assert(!UnimplementedFeature::generateDebugInfo()); } -Address CIRGenFunction::buildVAListRef(const Expr* E) { +Address CIRGenFunction::buildVAListRef(const Expr *E) { if (getContext().getBuiltinVaListType()->isArrayType()) return buildPointerWithAlignment(E); return buildLValue(E).getAddress(); @@ -1351,4 +1351,4 @@ void CIRGenFunction::checkTargetFeatures(SourceLocation Loc, << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey(); } } -} \ No newline at end of file +} diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index f7a57c1aa5c4..11a30ba80b77 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -417,7 +417,6 @@ struct LifetimeCheckPass : public LifetimeCheckBase { /// ----------- std::optional astCtx; - void setASTContext(clang::ASTContext *c) { astCtx = c; } }; } // namespace From e6e3ad90442853253f246704af08292b4e47325f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 6 Jul 2023 01:34:54 -0400 Subject: [PATCH 1055/2301] [CIR][Rebase] XFAIL a few tests globals.cir should be fixed, goto.cir is dead given that we aren't supporting ThroughMLIR --- clang/test/CIR/Lowering/globals.cir | 1 + clang/test/CIR/Lowering/goto.cir | 1 + 2 files changed, 2 insertions(+) diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index df9ffcf100f9..745fa98e13e7 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -1,6 +1,7 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM // XFAIL: * + !s16i = !cir.int !s32i = !cir.int !s64i = !cir.int diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index 8ccaca8ca0b5..b6f22409951d 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -1,5 +1,6 @@ // RUN: cir-tool %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -canonicalize -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + !u32i = !cir.int module { From 29efb8d905706326248526e9818c051f08c98bf2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 6 Jul 2023 14:59:29 -0400 Subject: [PATCH 1056/2301] [CIR][Lowering] Add dep for CIREnumsGen --- clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index 809877e09dc1..d5d01c56d102 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -10,6 +10,7 @@ add_clang_library(clangCIRLoweringDirectToLLVM LowerToLLVM.cpp DEPENDS + MLIRCIREnumsGen MLIRCIROpsIncGen LINK_LIBS From 8976bf9cee163ac1493447ac2b15cc643074e9da Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 6 Jul 2023 10:39:11 -0700 Subject: [PATCH 1057/2301] [CIR] Add MLIRBuiltinLocationAttributesIncGen to deps for FrontendTool --- clang/lib/FrontendTool/CMakeLists.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/clang/lib/FrontendTool/CMakeLists.txt b/clang/lib/FrontendTool/CMakeLists.txt index e475f59eb7cf..c2f7c0150532 100644 --- a/clang/lib/FrontendTool/CMakeLists.txt +++ b/clang/lib/FrontendTool/CMakeLists.txt @@ -12,12 +12,18 @@ set(link_libs clangRewriteFrontend ) +set(deps) + if(CLANG_ENABLE_CIR) list(APPEND link_libs clangCIRFrontendAction MLIRIR MLIRPass ) + list(APPEND deps + MLIRBuiltinLocationAttributesIncGen + ) + include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) endif() @@ -39,6 +45,7 @@ add_clang_library(clangFrontendTool DEPENDS ClangDriverOptions + ${deps} LINK_LIBS ${link_libs} From 7ffaf84e7740b5b3aaadf0ac412e5182723814c6 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 3 Jul 2023 20:54:33 -0300 Subject: [PATCH 1058/2301] [CIR][CodeGen] Support conditional result implicit cast In C, whenever we logically compare two values, we may store the result in any variable type. In this case, the boolean result should be cast to whatever type the variable is. This patch adds support for this. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 13 +++++++++++-- clang/test/CIR/CodeGen/binop.c | 13 +++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/binop.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index f4b4c09c9e5d..674c4c162b66 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -466,11 +466,20 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::CastKind::ptr_to_int, src); } + // TODO(cir): the following function was introduced to keep in sync with LLVM + // codegen. CIR does not have "zext" operations. It should eventually be + // renamed or removed. For now, we just add whatever cast is required here. mlir::Value createZExtOrBitCast(mlir::Location loc, mlir::Value src, mlir::Type newTy) { - if (src.getType() == newTy) + auto srcTy = src.getType(); + + if (srcTy == newTy) return src; - llvm_unreachable("NYI"); + + if (srcTy.isa() && newTy.isa()) + return createBoolToInt(src, newTy); + + llvm_unreachable("unhandled extension cast"); } mlir::Value createBoolToInt(mlir::Value src, mlir::Type newTy) { diff --git a/clang/test/CIR/CodeGen/binop.c b/clang/test/CIR/CodeGen/binop.c new file mode 100644 index 000000000000..bc5093e43ac4 --- /dev/null +++ b/clang/test/CIR/CodeGen/binop.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void conditionalResultIimplicitCast(int a, int b, float f) { + // Should implicit cast back to int. + int x = a && b; + // CHECK: %[[#INT:]] = cir.ternary + // CHECK: %{{.+}} = cir.cast(bool_to_int, %[[#INT]] : !cir.bool), !s32i + float y = f && f; + // CHECK: %[[#BOOL:]] = cir.ternary + // CHECK: %[[#INT:]] = cir.cast(bool_to_int, %[[#BOOL]] : !cir.bool), !s32i + // CHECK: %{{.+}} = cir.cast(int_to_float, %[[#INT]] : !s32i), f32 +} From 7260b81a1785f75ce5060b0908936fcf215a8a79 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 7 Jul 2023 10:43:24 -0300 Subject: [PATCH 1059/2301] [CIR][Lowering] Fix CIR casts lowering Casts from one integer type to another, should not be lowered to LLVM if the integers are of the same size. These may represent sign drops or inclusion in CIR but do not make sense in LLVM. This patch fixes that. ghstack-source-id: 0b028504f8f61683a85fc5fce573175ded3cc725 Pull Request resolved: https://github.com/llvm/clangir/pull/154 --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 +++++- clang/test/CIR/CodeGen/cast.cpp | 9 +++++++++ clang/test/CIR/Lowering/cast.cir | 4 ++++ clang/test/CIR/Lowering/globals.cir | 1 - 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 9d87586f9737..b42d91ce941d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -341,13 +341,17 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { if (dstType.getWidth() < srcType.getWidth()) { rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); - } else { + } + // Target integer is larger: sign extend or zero extend. + else if (dstType.getWidth() > srcType.getWidth()) { if (srcType.isUnsigned()) rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); else rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); + } else { // Target integer is of the same size: do nothing. + rewriter.replaceOp(castOp, llvmSrcVal); } break; } diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 1e72961cc3dd..7ca5f13a4c52 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -32,6 +32,15 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4) { long long d = (long long)x2; // sign extend // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !s64i + unsigned ui = (unsigned)x2; // sign drop + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !u32i + + int si = (int)x1; // sign add + // CHECK: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s32i + + unsigned uu = (unsigned)x1; // should not be generated + // CHECK-NOT: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !u32i + int arr[3]; int* e = (int*)arr; // explicit pointer decay // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %{{[0-9]+}} : !cir.ptr>), !cir.ptr diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 4b7fa47316f5..bff61e541701 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -57,6 +57,10 @@ module { %15 = cir.load %1 : cir.ptr , !s32i %16 = cir.cast(integral, %15 : !s32i), !s64i // MLIR: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 + %30 = cir.cast(integral, %arg1 : !s32i), !u32i + // Should not produce a cast. + %32 = cir.cast(integral, %arg0 : !u32i), !s32i + // Should not produce a cast. cir.store %16, %6 : !s64i, cir.ptr %17 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 745fa98e13e7..c6e2486f1454 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -1,6 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -// XFAIL: * !s16i = !cir.int !s32i = !cir.int From 76fca7a2cc065933ae86a4314b96981c16c841e7 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 7 Jul 2023 10:43:28 -0300 Subject: [PATCH 1060/2301] [CIR] Remove the Executables tests folder We should not have tests that depend on target-specific executables. This patch removes the 'hello.c' executable test and converts it to both a codegen and a lowering test. ghstack-source-id: caa8f3aa01bbc89693ac8724ad7fac1a96114f1a Pull Request resolved: https://github.com/llvm/clangir/pull/155 --- clang/test/CIR/CodeGen/hello.c | 22 +++++++++++++++++++ clang/test/CIR/Executables/hello.c | 11 ---------- clang/test/CIR/Lowering/hello.cir | 35 ++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/CodeGen/hello.c delete mode 100644 clang/test/CIR/Executables/hello.c create mode 100644 clang/test/CIR/Lowering/hello.cir diff --git a/clang/test/CIR/CodeGen/hello.c b/clang/test/CIR/CodeGen/hello.c new file mode 100644 index 000000000000..b55fd228c9e2 --- /dev/null +++ b/clang/test/CIR/CodeGen/hello.c @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +int printf(const char *restrict, ...); + +int main (void) { + printf ("Hello, world!\n"); + return 0; +} + +// CHECK: cir.func private @printf(!cir.ptr, ...) -> !s32i +// CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.func @main() -> !s32i { +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.get_global @printf : cir.ptr , ...)>> +// CHECK: %2 = cir.get_global @".str" : cir.ptr > +// CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr +// CHECK: %4 = cir.call @printf(%3) : (!cir.ptr) -> !s32i +// CHECK: %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.store %5, %0 : !s32i, cir.ptr +// CHECK: %6 = cir.load %0 : cir.ptr , !s32i +// CHECK: cir.return %6 : !s32i +// CHECK: } diff --git a/clang/test/CIR/Executables/hello.c b/clang/test/CIR/Executables/hello.c deleted file mode 100644 index ea3415c5df85..000000000000 --- a/clang/test/CIR/Executables/hello.c +++ /dev/null @@ -1,11 +0,0 @@ -// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fclangir-direct-lowering -o %t %s -// RUN: %t | FileCheck %s -// REQUIRES: system-linux -// REQUIRES: target-linux -int printf(const char *restrict, ...); - -int main (void) { - printf ("Hello, world!\n"); - // CHECK: Hello, world! - return 0; -} diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir new file mode 100644 index 000000000000..8603eefef19d --- /dev/null +++ b/clang/test/CIR/Lowering/hello.cir @@ -0,0 +1,35 @@ +// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s +// XFAIL: * + +!s32i = !cir.int +!s8i = !cir.int +module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior, dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i32>>, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"} { + cir.func private @printf(!cir.ptr, ...) -> !s32i + cir.global "private" constant internal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.func @main() -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.get_global @printf : cir.ptr , ...)>> + %2 = cir.get_global @".str" : cir.ptr > + %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + %4 = cir.call @printf(%3) : (!cir.ptr) -> !s32i + %5 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %5, %0 : !s32i, cir.ptr + %6 = cir.load %0 : cir.ptr , !s32i + cir.return %6 : !s32i + } +} + +// CHECK: llvm.func @printf(!llvm.ptr, ...) -> i32 +// CHECK: llvm.mlir.global internal constant @".str"("Hello, world!\0A\00") {addr_space = 0 : i32} +// CHECK: llvm.func @main() -> i32 { +// CHECK: %0 = llvm.mlir.constant(1 : index) : i64 +// CHECK: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// CHECK: %2 = llvm.mlir.addressof @".str" : !llvm.ptr +// CHECK: %3 = llvm.getelementptr %2[0] : (!llvm.ptr) -> !llvm.ptr +// CHECK: %4 = llvm.call @printf(%3) : (!llvm.ptr) -> i32 +// CHECK: %5 = llvm.mlir.constant(0 : i32) : i32 +// CHECK: llvm.store %5, %1 : i32, !llvm.ptr +// CHECK: %6 = llvm.load %1 : !llvm.ptr +// CHECK: llvm.return %6 : i32 +// CHECK: } From d689312404d31c7dd52780d087d73a8c9e805052 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 7 Jul 2023 10:43:31 -0300 Subject: [PATCH 1061/2301] [CIR][Lowering][Bugfix] Refactor for loop lowering This refactor merges the lowering logic of all the different kinds of loops into a single function. It also removes unnecessary LIT tests that validate LLVM dialect to LLVM IR lowering, as this functionality is not within CIR's scope. Fixes #153 ghstack-source-id: ebaab859057a6d81f1978fd88701c28402712562 Pull Request resolved: https://github.com/llvm/clangir/pull/156 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 110 ++++-------------- clang/test/CIR/Lowering/dot.cir | 100 +++------------- clang/test/CIR/Lowering/loop.cir | 49 ++------ 3 files changed, 50 insertions(+), 209 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b42d91ce941d..dc2d5f037c29 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -135,10 +135,9 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { return mlir::success(); } - mlir::LogicalResult - rewriteWhileLoop(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter, - mlir::cir::LoopOpKind kind) const { + mlir::LogicalResult rewriteLoop(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::cir::LoopOpKind kind) const { auto *currentBlock = rewriter.getInsertionBlock(); auto *continueBlock = rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); @@ -150,16 +149,24 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { if (fetchCondRegionYields(condRegion, yieldToBody, yieldToCont).failed()) return loopOp.emitError("failed to fetch yields in cond region"); - // Fetch required info from the condition region. + // Fetch required info from the body region. auto &bodyRegion = loopOp.getBody(); auto &bodyFrontBlock = bodyRegion.front(); auto bodyYield = dyn_cast(bodyRegion.back().getTerminator()); assert(bodyYield && "unstructured while loops are NYI"); + // Fetch required info from the step region. + auto &stepRegion = loopOp.getStep(); + auto &stepFrontBlock = stepRegion.front(); + auto stepYield = + dyn_cast(stepRegion.back().getTerminator()); + // Move loop op region contents to current CFG. rewriter.inlineRegionBefore(condRegion, continueBlock); rewriter.inlineRegionBefore(bodyRegion, continueBlock); + if (kind == LoopKind::For) // Ignore step if not a for-loop. + rewriter.inlineRegionBefore(stepRegion, continueBlock); // Set loop entry point to condition or to body in do-while cases. rewriter.setInsertionPointToEnd(currentBlock); @@ -174,9 +181,16 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { rewriter.setInsertionPoint(yieldToBody); rewriter.replaceOpWithNewOp(yieldToBody, &bodyFrontBlock); - // Branch from body to condition. + // Branch from body to condition or to step on for-loop cases. rewriter.setInsertionPoint(bodyYield); - rewriter.replaceOpWithNewOp(bodyYield, &condFrontBlock); + auto &bodyExit = (kind == LoopKind::For ? stepFrontBlock : condFrontBlock); + rewriter.replaceOpWithNewOp(bodyYield, &bodyExit); + + // Is a for loop: branch from step to condition. + if (kind == LoopKind::For) { + rewriter.setInsertionPoint(stepYield); + rewriter.replaceOpWithNewOp(stepYield, &condFrontBlock); + } // Remove the loop op. rewriter.eraseOp(loopOp); @@ -188,91 +202,11 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { mlir::ConversionPatternRewriter &rewriter) const override { switch (loopOp.getKind()) { case LoopKind::For: - break; case LoopKind::While: case LoopKind::DoWhile: - return rewriteWhileLoop(loopOp, adaptor, rewriter, loopOp.getKind()); + return rewriteLoop(loopOp, adaptor, rewriter, loopOp.getKind()); } - auto loc = loopOp.getLoc(); - - auto *currentBlock = rewriter.getInsertionBlock(); - auto *remainingOpsBlock = - rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); - mlir::Block *continueBlock; - if (loopOp->getResults().size() == 0) - continueBlock = remainingOpsBlock; - else - llvm_unreachable("NYI"); - - auto &condRegion = loopOp.getCond(); - auto &condFrontBlock = condRegion.front(); - - auto &stepRegion = loopOp.getStep(); - auto &stepFrontBlock = stepRegion.front(); - auto &stepBackBlock = stepRegion.back(); - - auto &bodyRegion = loopOp.getBody(); - auto &bodyFrontBlock = bodyRegion.front(); - auto &bodyBackBlock = bodyRegion.back(); - - bool rewroteContinue = false; - bool rewroteBreak = false; - - for (auto &bb : condRegion) { - if (rewroteContinue && rewroteBreak) - break; - - if (auto yieldOp = dyn_cast(bb.getTerminator())) { - rewriter.setInsertionPointToEnd(yieldOp->getBlock()); - if (yieldOp.getKind().has_value()) { - switch (yieldOp.getKind().value()) { - case mlir::cir::YieldOpKind::Break: - case mlir::cir::YieldOpKind::Fallthrough: - case mlir::cir::YieldOpKind::NoSuspend: - llvm_unreachable("None of these should be present"); - case mlir::cir::YieldOpKind::Continue:; - rewriter.replaceOpWithNewOp( - yieldOp, yieldOp.getArgs(), &stepFrontBlock); - rewroteContinue = true; - } - } else { - rewriter.replaceOpWithNewOp( - yieldOp, yieldOp.getArgs(), continueBlock); - rewroteBreak = true; - } - } - } - - rewriter.inlineRegionBefore(condRegion, continueBlock); - - rewriter.inlineRegionBefore(stepRegion, continueBlock); - - if (auto stepYieldOp = - dyn_cast(stepBackBlock.getTerminator())) { - rewriter.setInsertionPointToEnd(stepYieldOp->getBlock()); - rewriter.replaceOpWithNewOp( - stepYieldOp, stepYieldOp.getArgs(), &bodyFrontBlock); - } else { - llvm_unreachable("What are we terminating with?"); - } - - rewriter.inlineRegionBefore(bodyRegion, continueBlock); - - if (auto bodyYieldOp = - dyn_cast(bodyBackBlock.getTerminator())) { - rewriter.setInsertionPointToEnd(bodyYieldOp->getBlock()); - rewriter.replaceOpWithNewOp( - bodyYieldOp, bodyYieldOp.getArgs(), &condFrontBlock); - } else { - llvm_unreachable("What are we terminating with?"); - } - - rewriter.setInsertionPointToEnd(currentBlock); - rewriter.create(loc, mlir::ValueRange(), &condFrontBlock); - - rewriter.replaceOp(loopOp, continueBlock->getArguments()); - return mlir::success(); } }; diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 22407d61e73e..238dcdc9abde 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR !s32i = !cir.int module { @@ -95,24 +95,24 @@ module { // MLIR-NEXT: ^bb4: // pred: ^bb2 // MLIR-NEXT: llvm.br ^bb7 // MLIR-NEXT: ^bb5: // pred: ^bb3 -// MLIR-NEXT: %22 = llvm.load %12 : !llvm.ptr -// MLIR-NEXT: %23 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: %24 = llvm.add %22, %23 : i32 -// MLIR-NEXT: llvm.store %24, %12 : i32, !llvm.ptr +// MLIR-NEXT: %22 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %23 = llvm.load %12 : !llvm.ptr +// MLIR-NEXT: %24 = llvm.getelementptr %22[%23] : (!llvm.ptr, i32) -> !llvm.ptr +// MLIR-NEXT: %25 = llvm.load %24 : !llvm.ptr +// MLIR-NEXT: %26 = llvm.load %3 : !llvm.ptr +// MLIR-NEXT: %27 = llvm.load %12 : !llvm.ptr +// MLIR-NEXT: %28 = llvm.getelementptr %26[%27] : (!llvm.ptr, i32) -> !llvm.ptr +// MLIR-NEXT: %29 = llvm.load %28 : !llvm.ptr +// MLIR-NEXT: %30 = llvm.fmul %25, %29 : f64 +// MLIR-NEXT: %31 = llvm.load %9 : !llvm.ptr +// MLIR-NEXT: %32 = llvm.fadd %31, %30 : f64 +// MLIR-NEXT: llvm.store %32, %9 : f64, !llvm.ptr // MLIR-NEXT: llvm.br ^bb6 // MLIR-NEXT: ^bb6: // pred: ^bb5 -// MLIR-NEXT: %25 = llvm.load %1 : !llvm.ptr -// MLIR-NEXT: %26 = llvm.load %12 : !llvm.ptr -// MLIR-NEXT: %27 = llvm.getelementptr %25[%26] : (!llvm.ptr, i32) -> !llvm.ptr -// MLIR-NEXT: %28 = llvm.load %27 : !llvm.ptr -// MLIR-NEXT: %29 = llvm.load %3 : !llvm.ptr -// MLIR-NEXT: %30 = llvm.load %12 : !llvm.ptr -// MLIR-NEXT: %31 = llvm.getelementptr %29[%30] : (!llvm.ptr, i32) -> !llvm.ptr -// MLIR-NEXT: %32 = llvm.load %31 : !llvm.ptr -// MLIR-NEXT: %33 = llvm.fmul %28, %32 : f64 -// MLIR-NEXT: %34 = llvm.load %9 : !llvm.ptr -// MLIR-NEXT: %35 = llvm.fadd %34, %33 : f64 -// MLIR-NEXT: llvm.store %35, %9 : f64, !llvm.ptr +// MLIR-NEXT: %33 = llvm.load %12 : !llvm.ptr +// MLIR-NEXT: %34 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: %35 = llvm.add %33, %34 : i32 +// MLIR-NEXT: llvm.store %35, %12 : i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 // MLIR-NEXT: ^bb7: // pred: ^bb4 // MLIR-NEXT: llvm.br ^bb8 @@ -123,67 +123,3 @@ module { // MLIR-NEXT: llvm.return %37 : f64 // MLIR-NEXT: } // MLIR-NEXT: } - -// LLVM: define double @dot(ptr %0, ptr %1, i32 %2) { -// LLVM-NEXT: %4 = alloca ptr, i64 1, align 8 -// LLVM-NEXT: %5 = alloca ptr, i64 1, align 8 -// LLVM-NEXT: %6 = alloca i32, i64 1, align 4 -// LLVM-NEXT: %7 = alloca double, i64 1, align 8 -// LLVM-NEXT: %8 = alloca double, i64 1, align 8 -// LLVM-NEXT: store ptr %0, ptr %4, align 8 -// LLVM-NEXT: store ptr %1, ptr %5, align 8 -// LLVM-NEXT: store i32 %2, ptr %6, align 4 -// LLVM-NEXT: store double 0.000000e+00, ptr %8, align 8 -// LLVM-NEXT: br label %9 -// LLVM-EMPTY: -// LLVM-NEXT: 9: ; preds = %3 -// LLVM-NEXT: %10 = alloca i32, i64 1, align 4 -// LLVM-NEXT: store i32 0, ptr %10, align 4 -// LLVM-NEXT: br label %11 -// LLVM-EMPTY: -// LLVM-NEXT: 11: ; preds = %24, %9 -// LLVM-NEXT: %12 = load i32, ptr %10, align 4 -// LLVM-NEXT: %13 = load i32, ptr %6, align 4 -// LLVM-NEXT: %14 = icmp slt i32 %12, %13 -// LLVM-NEXT: %15 = zext i1 %14 to i32 -// LLVM-NEXT: %16 = icmp ne i32 %15, 0 -// LLVM-NEXT: %17 = zext i1 %16 to i8 -// LLVM-NEXT: %18 = trunc i8 %17 to i1 -// LLVM-NEXT: br i1 %18, label %19, label %20 -// LLVM-EMPTY: -// LLVM-NEXT: 19: ; preds = %11 -// LLVM-NEXT: br label %21 -// LLVM-EMPTY: -// LLVM-NEXT: 20: ; preds = %11 -// LLVM-NEXT: br label %36 -// LLVM-EMPTY: -// LLVM-NEXT: 21: ; preds = %19 -// LLVM-NEXT: %22 = load i32, ptr %10, align 4 -// LLVM-NEXT: %23 = add i32 %22, 1 -// LLVM-NEXT: store i32 %23, ptr %10, align 4 -// LLVM-NEXT: br label %24 -// LLVM-EMPTY: -// LLVM-NEXT: 24: ; preds = %21 -// LLVM-NEXT: %25 = load ptr, ptr %4, align 8 -// LLVM-NEXT: %26 = load i32, ptr %10, align 4 -// LLVM-NEXT: %27 = getelementptr double, ptr %25, i32 %26 -// LLVM-NEXT: %28 = load double, ptr %27, align 8 -// LLVM-NEXT: %29 = load ptr, ptr %5, align 8 -// LLVM-NEXT: %30 = load i32, ptr %10, align 4 -// LLVM-NEXT: %31 = getelementptr double, ptr %29, i32 %30 -// LLVM-NEXT: %32 = load double, ptr %31, align 8 -// LLVM-NEXT: %33 = fmul double %28, %32 -// LLVM-NEXT: %34 = load double, ptr %8, align 8 -// LLVM-NEXT: %35 = fadd double %34, %33 -// LLVM-NEXT: store double %35, ptr %8, align 8 -// LLVM-NEXT: br label %11 -// LLVM-EMPTY: -// LLVM-NEXT: 36: ; preds = %20 -// LLVM-NEXT: br label %37 -// LLVM-EMPTY: -// LLVM-NEXT: 37: ; preds = %36 -// LLVM-NEXT: %38 = load double, ptr %8, align 8 -// LLVM-NEXT: store double %38, ptr %7, align 8 -// LLVM-NEXT: %39 = load double, ptr %7, align 8 -// LLVM-NEXT: ret double %39 -// LLVM-NEXT: } diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index ffadc539b323..e0a0d9840243 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -1,9 +1,9 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR !s32i = !cir.int module { - cir.func @foo() { + cir.func @testFor() { %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<0> : !s32i) : !s32i cir.store %1, %0 : !s32i, cir.ptr @@ -29,12 +29,13 @@ module { } // MLIR: module { -// MLIR-NEXT: llvm.func @foo() { +// MLIR-NEXT: llvm.func @testFor() { // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: llvm.store %2, %1 : i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb1 +// ============= Condition block ============= // MLIR-NEXT: ^bb1: // 2 preds: ^bb0, ^bb5 // MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr // MLIR-NEXT: %4 = llvm.mlir.constant(10 : i32) : i32 @@ -49,51 +50,21 @@ module { // MLIR-NEXT: llvm.br ^bb4 // MLIR-NEXT: ^bb3: // pred: ^bb1 // MLIR-NEXT: llvm.br ^bb6 +// ============= Body block ============= // MLIR-NEXT: ^bb4: // pred: ^bb2 +// MLIR-NEXT: llvm.br ^bb5 +// ============= Step block ============= +// MLIR-NEXT: ^bb5: // pred: ^bb4 // MLIR-NEXT: %11 = llvm.load %1 : !llvm.ptr // MLIR-NEXT: %12 = llvm.mlir.constant(1 : i32) : i32 // MLIR-NEXT: %13 = llvm.add %11, %12 : i32 // MLIR-NEXT: llvm.store %13, %1 : i32, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb5 -// MLIR-NEXT: ^bb5: // pred: ^bb4 // MLIR-NEXT: llvm.br ^bb1 +// ============= Exit block ============= // MLIR-NEXT: ^bb6: // pred: ^bb3 // MLIR-NEXT: llvm.return // MLIR-NEXT: } -// LLVM: define void @foo() { -// LLVM-NEXT: %1 = alloca i32, i64 1, align 4 -// LLVM-NEXT: store i32 0, ptr %1, align 4 -// LLVM-NEXT: br label %2 -// LLVM-EMPTY: -// LLVM-NEXT: 2: -// LLVM-NEXT: %3 = load i32, ptr %1, align 4 -// LLVM-NEXT: %4 = icmp slt i32 %3, 10 -// LLVM-NEXT: %5 = zext i1 %4 to i32 -// LLVM-NEXT: %6 = icmp ne i32 %5, 0 -// LLVM-NEXT: %7 = zext i1 %6 to i8 -// LLVM-NEXT: %8 = trunc i8 %7 to i1 -// LLVM-NEXT: br i1 %8, label %9, label %10 -// LLVM-EMPTY: -// LLVM-NEXT: 9: -// LLVM-NEXT: br label %11 -// LLVM-EMPTY: -// LLVM-NEXT: 10: -// LLVM-NEXT: br label %15 -// LLVM-EMPTY: -// LLVM-NEXT: 11: -// LLVM-NEXT: %12 = load i32, ptr %1, align 4 -// LLVM-NEXT: %13 = add i32 %12, 1 -// LLVM-NEXT: store i32 %13, ptr %1, align 4 -// LLVM-NEXT: br label %14 -// LLVM-EMPTY: -// LLVM-NEXT: 14: -// LLVM-NEXT: br label %2 -// LLVM-EMPTY: -// LLVM-NEXT: 15: -// LLVM-NEXT: ret void -// LLVM-NEXT: } - // Test while cir.loop operation lowering. cir.func @testWhile(%arg0: !s32i) { %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} From c6def3abfdb632a26ba2040cbf09a0aeb2604567 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 7 Jul 2023 10:43:34 -0300 Subject: [PATCH 1062/2301] [CIR][Lowering][NFC] Inline rewriteLoop method ghstack-source-id: 16b236e1faea9f23e09e83bf26b9167f584de0ba Pull Request resolved: https://github.com/llvm/clangir/pull/157 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 20 ++++--------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index dc2d5f037c29..7037d8fcd29e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -135,9 +135,10 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { return mlir::success(); } - mlir::LogicalResult rewriteLoop(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter, - mlir::cir::LoopOpKind kind) const { + mlir::LogicalResult + matchAndRewrite(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto kind = loopOp.getKind(); auto *currentBlock = rewriter.getInsertionBlock(); auto *continueBlock = rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); @@ -196,19 +197,6 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { rewriter.eraseOp(loopOp); return mlir::success(); } - - mlir::LogicalResult - matchAndRewrite(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - switch (loopOp.getKind()) { - case LoopKind::For: - case LoopKind::While: - case LoopKind::DoWhile: - return rewriteLoop(loopOp, adaptor, rewriter, loopOp.getKind()); - } - - return mlir::success(); - } }; class CIRBrCondOpLowering From 37a8e52cb7d48275ac306a3f9599059cc132b42c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Jul 2023 17:10:59 -0700 Subject: [PATCH 1063/2301] [CIR][CIRGen] Add threshold for recursive buildDeferred --- clang/include/clang/Basic/CodeGenOptions.def | 5 +++++ clang/include/clang/Driver/Options.td | 4 ++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 9 ++++++--- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def index 1ab8c7fb4d3c..499fd493d2f7 100644 --- a/clang/include/clang/Basic/CodeGenOptions.def +++ b/clang/include/clang/Basic/CodeGenOptions.def @@ -465,6 +465,11 @@ CODEGENOPT(CtorDtorReturnThis, 1, 0) /// FIXME: Make DebugOptions its own top-level .def file. #include "DebugOptions.def" +/// ClangIR specific (internal): limits recursion depth for buildDeferred() +/// calls. This helps incremental progress while building large C++ TUs, once +/// CIRGen is mature we should probably remove it. +VALUE_CODEGENOPT(ClangIRBuildDeferredThreshold, 32, 500) + #undef CODEGENOPT #undef ENUM_CODEGENOPT #undef VALUE_CODEGENOPT diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 3912efc4020c..6d260aade53d 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3072,6 +3072,10 @@ def clangir_disable_emit_cxx_default : Flag<["-"], "clangir-disable-emit-cxx-def Visibility<[ClangOption, CC1Option]>, HelpText<"ClangIR: Disable emission of c++ default (compiler implemented) methods.">, MarshallingInfoFlag>; +def fclangir_disable_deferred_EQ : Joined<["-"], "fclangir-build-deferred-threshold=">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"ClangIR (internal): Control the recursion level for calls to buildDeferred (defaults to 500)">, + MarshallingInfoInt, "500u">; def clangir_verify_diagnostics : Flag<["-"], "clangir-verify-diagnostics">, Visibility<[ClangOption, CC1Option]>, HelpText<"ClangIR: Enable diagnostic verification in MLIR, similar to clang's -verify">, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 896c790b159e..f10367d6034d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2052,7 +2052,7 @@ void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { buildGlobalDefinition(D, Op); } -void CIRGenModule::buildDeferred() { +void CIRGenModule::buildDeferred(unsigned recursionLimit) { // Emit deferred declare target declarations if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) llvm_unreachable("NYI"); @@ -2085,6 +2085,8 @@ void CIRGenModule::buildDeferred() { // work, it will not interfere with this. std::vector CurDeclsToEmit; CurDeclsToEmit.swap(DeferredDeclsToEmit); + if (recursionLimit == 0) + return; for (auto &D : CurDeclsToEmit) { buildGlobalDecl(D); @@ -2093,7 +2095,8 @@ void CIRGenModule::buildDeferred() { // This has the advantage that the decls are emitted in a DFS and related // ones are close together, which is convenient for testing. if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) { - buildDeferred(); + recursionLimit--; + buildDeferred(recursionLimit); assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty()); } } @@ -2142,7 +2145,7 @@ CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { } void CIRGenModule::Release() { - buildDeferred(); + buildDeferred(getCodeGenOpts().ClangIRBuildDeferredThreshold); // TODO: buildVTablesOpportunistically(); // TODO: applyGlobalValReplacements(); applyReplacements(); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 2f51af73a42b..3e110beaccc0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -466,7 +466,7 @@ class CIRGenModule : public CIRGenTypeCache { std::nullptr_t getModuleDebugInfo() { return nullptr; } /// Emit any needed decls for which code generation was deferred. - void buildDeferred(); + void buildDeferred(unsigned recursionLimit); /// Helper for `buildDeferred` to apply actual codegen. void buildGlobalDecl(clang::GlobalDecl &D); From 3ed67a2c991341ac6aee17a5d5b92ce0c3f7f242 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Fri, 7 Jul 2023 18:09:15 -0700 Subject: [PATCH 1064/2301] [CIR][CIRGen][Lowering] Introduce, use and lower ExtraFuncAttr and InlineAttr (#134) Setting inline attributes based on user input and command line options. This is optional as functions do not need such an attribute will not get the attribute. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 49 ++++++++++++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 ++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 64 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 3 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 24 ++++++- .../DirectToLLVM/LowerAttrToLLVMIR.cpp | 22 +++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 38 ++++++++++- clang/test/CIR/CodeGen/String.cpp | 2 +- clang/test/CIR/CodeGen/agg-init.cpp | 8 +-- clang/test/CIR/CodeGen/array.cpp | 8 +-- clang/test/CIR/CodeGen/assign-operator.cpp | 2 +- clang/test/CIR/CodeGen/basic.c | 6 +- clang/test/CIR/CodeGen/basic.cpp | 14 ++-- clang/test/CIR/CodeGen/binassign.cpp | 2 +- clang/test/CIR/CodeGen/call.c | 16 ++--- clang/test/CIR/CodeGen/call.cpp | 4 +- clang/test/CIR/CodeGen/comma.cpp | 2 +- clang/test/CIR/CodeGen/dtors.cpp | 2 +- clang/test/CIR/CodeGen/fullexpr.cpp | 4 +- clang/test/CIR/CodeGen/globals.cpp | 8 +-- clang/test/CIR/CodeGen/goto.cpp | 4 +- clang/test/CIR/CodeGen/hello.c | 2 +- clang/test/CIR/CodeGen/inc-dec.cpp | 8 +-- clang/test/CIR/CodeGen/inlineAttr.cpp | 36 +++++++++++ clang/test/CIR/CodeGen/lambda.cpp | 11 ++-- clang/test/CIR/CodeGen/loop-scope.cpp | 4 +- clang/test/CIR/CodeGen/loop.cpp | 4 +- clang/test/CIR/CodeGen/lvalue-refs.cpp | 2 +- clang/test/CIR/CodeGen/move.cpp | 4 +- clang/test/CIR/CodeGen/no-proto-is-void.cpp | 2 +- clang/test/CIR/CodeGen/no-prototype.c | 2 +- clang/test/CIR/CodeGen/nrvo.cpp | 4 +- clang/test/CIR/CodeGen/predefined.cpp | 6 +- clang/test/CIR/CodeGen/sourcelocation.cpp | 4 +- clang/test/CIR/CodeGen/store.c | 3 +- clang/test/CIR/CodeGen/struct.c | 2 +- clang/test/CIR/CodeGen/struct.cpp | 2 +- clang/test/CIR/CodeGen/types.c | 40 ++++++------ clang/test/CIR/CodeGen/unary.cpp | 16 ++--- clang/test/CIR/CodeGen/union.cpp | 4 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- clang/test/CIR/IR/inlineAttr.cir | 11 ++++ clang/test/CIR/Lowering/array.cir | 4 +- clang/test/CIR/Lowering/binop-fp.cir | 2 +- .../test/CIR/Lowering/binop-unsigned-int.cir | 2 +- clang/test/CIR/Lowering/bool.cir | 4 +- clang/test/CIR/Lowering/branch.cir | 4 +- clang/test/CIR/Lowering/call.cir | 6 +- clang/test/CIR/Lowering/cast.cir | 4 +- clang/test/CIR/Lowering/cmp.cir | 2 +- clang/test/CIR/Lowering/dot.cir | 2 +- clang/test/CIR/Lowering/globals.cir | 2 +- clang/test/CIR/Lowering/goto.cir | 2 +- clang/test/CIR/Lowering/hello.cir | 2 +- clang/test/CIR/Lowering/if.cir | 6 +- clang/test/CIR/Lowering/loadstorealloca.cir | 4 +- clang/test/CIR/Lowering/loop.cir | 6 +- clang/test/CIR/Lowering/ptrstride.cir | 4 +- clang/test/CIR/Lowering/scope.cir | 6 +- clang/test/CIR/Lowering/tenary.cir | 2 +- clang/test/CIR/Lowering/unary-inc-dec.cir | 4 +- clang/test/CIR/Lowering/unary-not.cir | 2 +- clang/test/CIR/Lowering/unary-plus-minus.cir | 4 +- clang/test/CIR/driver.c | 2 +- 64 files changed, 389 insertions(+), 144 deletions(-) create mode 100644 clang/test/CIR/CodeGen/inlineAttr.cpp create mode 100644 clang/test/CIR/IR/inlineAttr.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 43117d72da7a..16c6b8ecec57 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -386,4 +386,53 @@ def ASTFunctionDeclAttr : ASTDecl<"FunctionDecl", "fndecl">; def ASTVarDeclAttr : ASTDecl<"VarDecl", "vardecl">; def ASTRecordDeclAttr : ASTDecl<"RecordDecl", "recdecl">; + +//===----------------------------------------------------------------------===// +// ExtraFuncAttr +//===----------------------------------------------------------------------===// + +def ExtraFuncAttr : CIR_Attr<"ExtraFuncAttributes", "extra"> { + let summary = "Represents aggregated attributes for a function"; + let description = [{ + This is a wrapper of dictionary attrbiute that contains extra attributes of + a function. + }]; + + let parameters = (ins "DictionaryAttr":$elements); + + let assemblyFormat = [{ `(` $elements `)` }]; + + // Printing and parsing also available in CIRDialect.cpp +} + + +def NoInline : I32EnumAttrCase<"NoInline", 1, "no">; +def AlwaysInline : I32EnumAttrCase<"AlwaysInline", 2, "always">; +def InlineHint : I32EnumAttrCase<"InlineHint", 3, "hint">; + +def InlineKind : I32EnumAttr<"InlineKind", "inlineKind", [ + NoInline, AlwaysInline, InlineHint +]> { + let cppNamespace = "::mlir::cir"; +} + +def InlineAttr : CIR_Attr<"Inline", "inline"> { + let summary = "Inline attribute"; + let description = [{ + Inline attributes represents user directives. + }]; + + let parameters = (ins "InlineKind":$value); + + let assemblyFormat = [{ + `<` $value `>` + }]; + + let extraClassDeclaration = [{ + bool isNoInline() const { return getValue() == InlineKind::NoInline; }; + bool isAlwaysInline() const { return getValue() == InlineKind::AlwaysInline; }; + bool isInlineHint() const { return getValue() == InlineKind::InlineHint; }; + }]; +} + #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8d94edda23c8..4444808638dd 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1498,6 +1498,11 @@ def FuncOp : CIR_Op<"func", [ without a prototype and, consequently, may contain calls with invalid arguments and undefined behavior. + The `extra_attrs`, which is an aggregate of function-specific attributes is + required and mandatory to describle additional attributes that are not listed + above. Though mandatory, the prining of the attribute can be omitted if it is + empty. + Example: ```mlir @@ -1532,6 +1537,7 @@ def FuncOp : CIR_Op<"func", [ UnitAttr:$no_proto, DefaultValuedAttr:$linkage, + ExtraFuncAttr:$extra_attrs, OptionalAttr:$sym_visibility, OptionalAttr:$arg_attrs, OptionalAttr:$res_attrs, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index f10367d6034d..0aa1f336a121 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1763,6 +1763,9 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, builder.getContext(), mlir::cir::GlobalLinkageKind::ExternalLinkage)); mlir::SymbolTable::setSymbolVisibility( f, mlir::SymbolTable::Visibility::Private); + + setExtraAttributesForFunc(f, FD); + if (!curCGF) theModule.push_back(f); } @@ -1788,6 +1791,67 @@ mlir::Location CIRGenModule::getLocForFunction(const clang::FunctionDecl *FD) { return theModule->getLoc(); } +void CIRGenModule::setExtraAttributesForFunc(FuncOp f, + const clang::FunctionDecl *FD) { + mlir::NamedAttrList attrs; + + if (!FD) { + // If we don't have a declaration to control inlining, the function isn't + // explicitly marked as alwaysinline for semantic reasons, and inlining is + // disabled, mark the function as noinline. + if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { + auto attr = mlir::cir::InlineAttr::get( + builder.getContext(), mlir::cir::InlineKind::AlwaysInline); + attrs.set(attr.getMnemonic(), attr); + } + } else if (FD->hasAttr()) { + // Add noinline if the function isn't always_inline. + auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + mlir::cir::InlineKind::NoInline); + attrs.set(attr.getMnemonic(), attr); + } else if (FD->hasAttr()) { + // (noinline wins over always_inline, and we can't specify both in IR) + auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + mlir::cir::InlineKind::AlwaysInline); + attrs.set(attr.getMnemonic(), attr); + } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { + // If we're not inlining, then force everything that isn't always_inline + // to carry an explicit noinline attribute. + auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + mlir::cir::InlineKind::NoInline); + attrs.set(attr.getMnemonic(), attr); + } else { + // Otherwise, propagate the inline hint attribute and potentially use its + // absence to mark things as noinline. + // Search function and template pattern redeclarations for inline. + auto CheckForInline = [](const FunctionDecl *FD) { + auto CheckRedeclForInline = [](const FunctionDecl *Redecl) { + return Redecl->isInlineSpecified(); + }; + if (any_of(FD->redecls(), CheckRedeclForInline)) + return true; + const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern(); + if (!Pattern) + return false; + return any_of(Pattern->redecls(), CheckRedeclForInline); + }; + if (CheckForInline(FD)) { + auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + mlir::cir::InlineKind::InlineHint); + attrs.set(attr.getMnemonic(), attr); + } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyHintInlining) { + auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + mlir::cir::InlineKind::NoInline); + attrs.set(attr.getMnemonic(), attr); + } + + } + + f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), + attrs.getDictionary(builder.getContext()))); +} + /// If the specified mangled name is not in the module, /// create and return a CIR Function with the specified type. If there is /// something in the module with the specified name, return it potentially diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 3e110beaccc0..097e13ce5cb7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -525,6 +525,9 @@ class CIRGenModule : public CIRGenTypeCache { void ReplaceUsesOfNonProtoTypeWithRealFunction(mlir::Operation *Old, mlir::cir::FuncOp NewFn); + void setExtraAttributesForFunc(mlir::cir::FuncOp f, + const clang::FunctionDecl *FD); + // TODO: CodeGen also passes an AttributeList here. We'll have to match that // in CIR mlir::cir::FuncOp diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index c24232077adc..7044fca4af4d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1450,6 +1450,21 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { hasAlias = true; } + // If extra func attributes are present, parse them. + NamedAttrList extraAttrs; + if (::mlir::succeeded(parser.parseOptionalKeyword("extra"))) { + if (parser.parseLParen().failed()) + return failure(); + if (parser.parseOptionalAttrDict(extraAttrs).failed()) + return failure(); + if (parser.parseRParen().failed()) + return failure(); + } + state.addAttribute(getExtraAttrsAttrName(state.name), + mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), + extraAttrs.getDictionary(builder.getContext()))); + // Parse the optional function body. auto *body = state.addRegion(); OptionalParseResult parseResult = parser.parseOptionalRegion( @@ -1529,7 +1544,8 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p, *this, {getSymVisibilityAttrName(), getAliaseeAttrName(), getFunctionTypeAttrName(), getLinkageAttrName(), getBuiltinAttrName(), - getNoProtoAttrName()}); + getNoProtoAttrName(), getExtraAttrsAttrName()}); + if (auto aliaseeName = getAliasee()) { p << " alias("; @@ -1537,6 +1553,12 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p << ")"; } + if (!getExtraAttrs().getElements().empty()) { + p << " extra("; + p.printOptionalAttrDict(getExtraAttrs().getElements().getValue()); + p << " )"; + } + // Print the body if this is not an external function. Region &body = getOperation()->getRegion(0); if (!body.empty()) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp index 785abb644f2f..1416e0f91d85 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp @@ -10,8 +10,11 @@ // //===----------------------------------------------------------------------===// +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/IR/DialectRegistry.h" #include "mlir/Target/LLVMIR/LLVMTranslationInterface.h" +#include "mlir/Target/LLVMIR/ModuleTranslation.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "llvm/ADT/ArrayRef.h" @@ -34,6 +37,25 @@ class CIRDialectLLVMIRTranslationInterface mlir::NamedAttribute attribute, mlir::LLVM::ModuleTranslation &moduleTranslation) const override { // TODO: Implement this + auto func = dyn_cast(op); + if (!func) + return mlir::success(); + llvm::Function *llvmFunc = moduleTranslation.lookupFunction(func.getName()); + if (auto extraAttr = attribute.getValue() + .dyn_cast()) { + for (auto attr : extraAttr.getElements()) { + if (auto inlineAttr = attr.getValue().dyn_cast()) { + if (inlineAttr.isNoInline()) + llvmFunc->addFnAttr(llvm::Attribute::NoInline); + else if (inlineAttr.isAlwaysInline()) + llvmFunc->addFnAttr(llvm::Attribute::AlwaysInline); + else if (inlineAttr.isInlineHint()) + llvmFunc->addFnAttr(llvm::Attribute::InlineHint); + else + llvm_unreachable("Unknown inline kind"); + } + } + } return mlir::success(); } }; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7037d8fcd29e..7aa5ae5ac7d1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -706,6 +706,35 @@ class CIRFuncLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; + /// Returns the name used for the linkage attribute. This *must* correspond to + /// the name of the attribute in ODS. + static StringRef getLinkageAttrNameString() { return "linkage"; } + + /// Only retain those attributes that are not constructed by + /// `LLVMFuncOp::build`. If `filterArgAttrs` is set, also filter out argument + /// attributes. + void + filterFuncAttributes(mlir::cir::FuncOp func, bool filterArgAndResAttrs, + SmallVectorImpl &result) const { + for (auto attr : func->getAttrs()) { + if (attr.getName() == mlir::SymbolTable::getSymbolAttrName() || + attr.getName() == func.getFunctionTypeAttrName() || + attr.getName() == getLinkageAttrNameString() || + (filterArgAndResAttrs && + (attr.getName() == func.getArgAttrsAttrName() || + attr.getName() == func.getResAttrsAttrName()))) + continue; + + // `CIRDialectLLVMIRTranslationInterface` requires "cir." prefix for + // dialect specific attributes, rename them. + if (attr.getName() == func.getExtraAttrsAttrName()) { + std::string cirName = "cir." + func.getExtraAttrsAttrName().str(); + attr.setName(mlir::StringAttr::get(getContext(), cirName)); + } + result.push_back(attr); + } + } + mlir::LogicalResult matchAndRewrite(mlir::cir::FuncOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { @@ -737,9 +766,14 @@ class CIRFuncLowering : public mlir::OpConversionPattern { Loc = FusedLoc.getLocations()[0]; } assert(Loc.isa() && "expected single location here"); + auto linkage = convertLinkage(op.getLinkage()); - auto fn = rewriter.create(Loc, op.getName(), - llvmFnTy, linkage); + SmallVector attributes; + filterFuncAttributes(op, /*filterArgAndResAttrs=*/false, attributes); + + auto fn = rewriter.create( + Loc, op.getName(), llvmFnTy, linkage, false, mlir::LLVM::CConv::C, + mlir::SymbolRefAttr(), attributes); rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index f92ad17a6d3b..26b7c60d4050 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -67,7 +67,7 @@ void test() { // CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return -// CHECK: cir.func @_Z4testv() { +// CHECK: cir.func @_Z4testv() // CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () // CHECK: cir.call @_ZN6StringC1Ei(%1, %3) : (!cir.ptr, !s32i) -> () // CHECK: cir.call @_ZN6StringC1EPKc(%2, %5) : (!cir.ptr, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index f69cb2df69ac..f3709df97de5 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -5,7 +5,7 @@ // CHECK: !ty_22struct2Eyep_22 = !cir.struct<"struct.yep_", !u32i, !u32i> struct Zero { - void yolo(); + void yolo(); }; void f() { @@ -14,7 +14,7 @@ void f() { Zero z1 = Zero{}; } -// CHECK: cir.func @_Z1fv() { +// CHECK: cir.func @_Z1fv() // CHECK: %0 = cir.alloca !ty_22struct2EZero22, cir.ptr , ["z0", init] // CHECK: %1 = cir.alloca !ty_22struct2EZero22, cir.ptr , ["z1"] // CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () @@ -33,7 +33,7 @@ typedef struct yep_ { void use() { yop{}; } -// CHECK: cir.func @_Z3usev() { +// CHECK: cir.func @_Z3usev() // CHECK: %0 = cir.alloca !ty_22struct2Eyep_22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} // CHECK: %1 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "Status"}> : (!cir.ptr) -> !cir.ptr // CHECK: %2 = cir.const(#cir.int<0> : !u32i) : !u32i @@ -63,7 +63,7 @@ void yo() { Yo ext2 = {Y, &ext}; } -// CHECK: cir.func @_Z2yov() { +// CHECK: cir.func @_Z2yov() // CHECK: %0 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} // CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i,#cir.null : !cir.ptr,#cir.int<0> : !u64i}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index a86e96c5ffa4..f5a0cb459cc5 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -5,7 +5,7 @@ void a0() { int a[10]; } -// CHECK: cir.func @_Z2a0v() { +// CHECK: cir.func @_Z2a0v() // CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} void a1() { @@ -13,7 +13,7 @@ void a1() { a[0] = 1; } -// CHECK: cir.func @_Z2a1v() { +// CHECK: cir.func @_Z2a1v() // CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} // CHECK-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i @@ -26,7 +26,7 @@ int *a2() { return &a[0]; } -// CHECK: cir.func @_Z2a2v() -> !cir.ptr { +// CHECK: cir.func @_Z2a2v() -> !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} // CHECK-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i @@ -41,7 +41,7 @@ void local_stringlit() { } // CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK: cir.func @_Z15local_stringlitv() { +// CHECK: cir.func @_Z15local_stringlitv() // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 25f9c56e4c81..01ea4900eebb 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -81,7 +81,7 @@ int main() { } } -// CHECK: cir.func @main() -> !s32i { +// CHECK: cir.func @main() -> !s32i // CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK: %1 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["sv", init] {alignment = 8 : i64} // CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index b16b25c8dfb3..c99ecce64090 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -11,7 +11,7 @@ int foo(int i) { } // CIR: module @"{{.*}}basic.c" attributes {{{.*}}cir.lang = #cir.lang -// CIR-NEXT: cir.func @foo(%arg0: !s32i loc({{.*}})) -> !s32i { +// CIR-NEXT: cir.func @foo(%arg0: !s32i loc({{.*}})) -> !s32i // CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CIR-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CIR-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr @@ -23,7 +23,7 @@ int foo(int i) { int f2(void) { return 3; } -// CIR: cir.func @f2() -> !s32i { +// CIR: cir.func @f2() -> !s32i // CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CIR-NEXT: %1 = cir.const(#cir.int<3> : !s32i) : !s32i // CIR-NEXT: cir.store %1, %0 : !s32i, cir.ptr @@ -43,7 +43,7 @@ int f3(void) { return i; } -// CIR: cir.func @f3() -> !s32i { +// CIR: cir.func @f3() -> !s32i // CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CIR-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CIR-NEXT: %2 = cir.const(#cir.int<3> : !s32i) : !s32i diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 12bc06700a96..7cb85671cae8 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -6,7 +6,7 @@ int *p0() { return p; } -// CHECK: cir.func @_Z2p0v() -> !cir.ptr { +// CHECK: cir.func @_Z2p0v() -> !cir.ptr // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] // CHECK: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > @@ -17,7 +17,7 @@ int *p1() { return p; } -// CHECK: cir.func @_Z2p1v() -> !cir.ptr { +// CHECK: cir.func @_Z2p1v() -> !cir.ptr // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p"] // CHECK: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > @@ -33,7 +33,7 @@ int *p2() { return p; } -// CHECK: cir.func @_Z2p2v() -> !cir.ptr { +// CHECK: cir.func @_Z2p2v() -> !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] {alignment = 8 : i64} // CHECK-NEXT: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr @@ -57,13 +57,13 @@ int *p2() { void b0() { bool x = true, y = false; } -// CHECK: cir.func @_Z2b0v() { +// CHECK: cir.func @_Z2b0v() // CHECK: %2 = cir.const(#true) : !cir.bool // CHECK: %3 = cir.const(#false) : !cir.bool void b1(int a) { bool b = a; } -// CHECK: cir.func @_Z2b1i(%arg0: !s32i loc({{.*}})) { +// CHECK: cir.func @_Z2b1i(%arg0: !s32i loc({{.*}})) // CHECK: %2 = cir.load %0 : cir.ptr , !s32i // CHECK: %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool // CHECK: cir.store %3, %1 : !cir.bool, cir.ptr @@ -154,7 +154,7 @@ void x() { const bool b1 = false; } -// CHECK: cir.func @_Z1xv() { +// CHECK: cir.func @_Z1xv() // CHECK: %0 = cir.alloca !cir.bool, cir.ptr , ["b0", init] {alignment = 1 : i64} // CHECK: %1 = cir.alloca !cir.bool, cir.ptr , ["b1", init] {alignment = 1 : i64} // CHECK: %2 = cir.const(#true) : !cir.bool @@ -179,4 +179,4 @@ size_type max_size() { // CHECK-DAG: #[[locScope]] = loc(fused[#[[locScopeA:loc[0-9]+]], #[[locScopeB:loc[0-9]+]]]) // CHECK-DAG: #[[locScopeA]] = loc("{{.*}}basic.cpp":27:3) -// CHECK-DAG: #[[locScopeB]] = loc("{{.*}}basic.cpp":31:3) \ No newline at end of file +// CHECK-DAG: #[[locScopeB]] = loc("{{.*}}basic.cpp":31:3) diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp index 65b5cb583b41..65cfca7eac20 100644 --- a/clang/test/CIR/CodeGen/binassign.cpp +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -60,7 +60,7 @@ void exec() { if ((r = getty()) < 0) {} } -// CHECK: cir.func @_Z4execv() { +// CHECK: cir.func @_Z4execv() // CHECK: %0 = cir.alloca !u32i, cir.ptr , ["r"] {alignment = 4 : i64} // CHECK: cir.scope { // CHECK: %1 = cir.call @_Z5gettyv() : () -> !u32i diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index dce64286c7e5..9b353f57aba0 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -15,10 +15,10 @@ void d(void) { } // CHECK: module {{.*}} { -// CHECK: cir.func @a() { +// CHECK: cir.func @a() // CHECK: cir.return // CHECK: } -// CHECK: cir.func @b(%arg0: !s32i {{.*}}, %arg1: !s32i {{.*}}) -> !s32i { +// CHECK: cir.func @b(%arg0: !s32i {{.*}}, %arg1: !s32i {{.*}}) -> !s32i // CHECK: %0 = cir.alloca !s32i, cir.ptr , ["a", init] // CHECK: %1 = cir.alloca !s32i, cir.ptr , ["b", init] // CHECK: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] @@ -31,7 +31,7 @@ void d(void) { // CHECK: %6 = cir.load %2 : cir.ptr , !s32i // CHECK: cir.return %6 // CHECK: } -// CHECK: cir.func @c(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { +// CHECK: cir.func @c(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 // CHECK: %0 = cir.alloca f64, cir.ptr , ["a", init] // CHECK: %1 = cir.alloca f64, cir.ptr , ["b", init] // CHECK: %2 = cir.alloca f64, cir.ptr , ["__retval"] @@ -44,7 +44,7 @@ void d(void) { // CHECK: %6 = cir.load %2 : cir.ptr , f64 // CHECK: cir.return %6 : f64 // CHECK: } -// CHECK: cir.func @d() { +// CHECK: cir.func @d() // CHECK: call @a() : () -> () // CHECK: %0 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i @@ -53,10 +53,10 @@ void d(void) { // CHECK: } // // CXX: module {{.*}} { -// CXX-NEXT: cir.func @_Z1av() { +// CXX-NEXT: cir.func @_Z1av() // CXX-NEXT: cir.return // CXX-NEXT: } -// CXX-NEXT: cir.func @_Z1bii(%arg0: !s32i {{.*}}, %arg1: !s32i {{.*}}) -> !s32i { +// CXX-NEXT: cir.func @_Z1bii(%arg0: !s32i {{.*}}, %arg1: !s32i {{.*}}) -> !s32i // CXX-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["a", init] // CXX-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["b", init] // CXX-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] @@ -69,7 +69,7 @@ void d(void) { // CXX-NEXT: %6 = cir.load %2 : cir.ptr , !s32i // CXX-NEXT: cir.return %6 // CXX-NEXT: } -// CXX-NEXT: cir.func @_Z1cdd(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 { +// CXX-NEXT: cir.func @_Z1cdd(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 // CXX-NEXT: %0 = cir.alloca f64, cir.ptr , ["a", init] // CXX-NEXT: %1 = cir.alloca f64, cir.ptr , ["b", init] // CXX-NEXT: %2 = cir.alloca f64, cir.ptr , ["__retval"] @@ -82,7 +82,7 @@ void d(void) { // CXX-NEXT: %6 = cir.load %2 : cir.ptr , f64 // CXX-NEXT: cir.return %6 : f64 // CXX-NEXT: } -// CXX-NEXT: cir.func @_Z1dv() { +// CXX-NEXT: cir.func @_Z1dv() // CXX-NEXT: call @_Z1av() : () -> () // CXX-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i // CXX-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i diff --git a/clang/test/CIR/CodeGen/call.cpp b/clang/test/CIR/CodeGen/call.cpp index d88bfb21bc8e..2fbe34b316ff 100644 --- a/clang/test/CIR/CodeGen/call.cpp +++ b/clang/test/CIR/CodeGen/call.cpp @@ -6,9 +6,9 @@ int f() { return p() - 22; } -// CHECK: cir.func @_Z1fv() -> !s32i { +// CHECK: cir.func @_Z1fv() -> !s32i // CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK: %1 = cir.call @_Z1pv() : () -> !cir.ptr // CHECK: %2 = cir.load %1 : cir.ptr , !s32i // CHECK: %3 = cir.const(#cir.int<22> : !s32i) : !s32i -// CHECK: %4 = cir.binop(sub, %2, %3) : !s32i \ No newline at end of file +// CHECK: %4 = cir.binop(sub, %2, %3) : !s32i diff --git a/clang/test/CIR/CodeGen/comma.cpp b/clang/test/CIR/CodeGen/comma.cpp index 87c9dcce50b3..0e10c8edec3f 100644 --- a/clang/test/CIR/CodeGen/comma.cpp +++ b/clang/test/CIR/CodeGen/comma.cpp @@ -7,7 +7,7 @@ int c0() { return b + 1, a; } -// CHECK: cir.func @_Z2c0v() -> !s32i { +// CHECK: cir.func @_Z2c0v() -> !s32i // CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] // CHECK: %[[#B:]] = cir.alloca !s32i, cir.ptr , ["b", init] diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index 05925296e25a..df60781b5eff 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -43,7 +43,7 @@ class B : public A // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> -// CHECK: cir.func @_Z4bluev() { +// CHECK: cir.func @_Z4bluev() // CHECK: %0 = cir.alloca !ty_22class2EPSEvent22, cir.ptr , ["p", init] {alignment = 8 : i64} // CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %2 = cir.get_global @".str" : cir.ptr > diff --git a/clang/test/CIR/CodeGen/fullexpr.cpp b/clang/test/CIR/CodeGen/fullexpr.cpp index 8d2d64a594d5..bb8f30d9af6d 100644 --- a/clang/test/CIR/CodeGen/fullexpr.cpp +++ b/clang/test/CIR/CodeGen/fullexpr.cpp @@ -8,7 +8,7 @@ int go1() { return x; } -// CHECK: cir.func @_Z3go1v() -> !s32i { +// CHECK: cir.func @_Z3go1v() -> !s32i // CHECK: %[[#XAddr:]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} // CHECK: %[[#RVal:]] = cir.scope { // CHECK-NEXT: %[[#TmpAddr:]] = cir.alloca !s32i, cir.ptr , ["ref.tmp0", init] {alignment = 4 : i64} @@ -17,4 +17,4 @@ int go1() { // CHECK-NEXT: %[[#RValTmp:]] = cir.call @_Z2goRKi(%[[#TmpAddr]]) : (!cir.ptr) -> !s32i // CHECK-NEXT: cir.yield %[[#RValTmp]] : !s32i // CHECK-NEXT: } -// CHECK-NEXT: cir.store %[[#RVal]], %[[#XAddr]] : !s32i, cir.ptr \ No newline at end of file +// CHECK-NEXT: cir.store %[[#RVal]], %[[#XAddr]] : !s32i, cir.ptr diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index c216df4556b7..a51e0e3b31cb 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -46,13 +46,13 @@ int use_func() { return func(); } // CHECK-NEXT: cir.global external @s2 = @".str": !cir.ptr -// CHECK: cir.func @_Z10use_globalv() { +// CHECK: cir.func @_Z10use_globalv() // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["li", init] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.get_global @a : cir.ptr // CHECK-NEXT: %2 = cir.load %1 : cir.ptr , !s32i // CHECK-NEXT: cir.store %2, %0 : !s32i, cir.ptr -// CHECK: cir.func @_Z17use_global_stringv() { +// CHECK: cir.func @_Z17use_global_stringv() // CHECK-NEXT: %0 = cir.alloca !u8i, cir.ptr , ["c", init] {alignment = 1 : i64} // CHECK-NEXT: %1 = cir.get_global @s2 : cir.ptr > // CHECK-NEXT: %2 = cir.load %1 : cir.ptr >, !cir.ptr @@ -63,14 +63,14 @@ int use_func() { return func(); } // CHECK-NEXT: cir.store %6, %0 : !u8i, cir.ptr // CHECK-NEXT: cir.return -// CHECK: cir.func linkonce_odr @_Z4funcIiET_v() -> !s32i { +// CHECK: cir.func linkonce_odr @_Z4funcIiET_v() -> !s32i // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr // CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i // CHECK-NEXT: cir.return %2 : !s32i // CHECK-NEXT: } -// CHECK-NEXT: cir.func @_Z8use_funcv() -> !s32i { +// CHECK-NEXT: cir.func @_Z8use_funcv() -> !s32i // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.call @_Z4funcIiET_v() : () -> !s32i // CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index f4d87938eb43..153bd3d3445d 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -55,7 +55,7 @@ int g2() { // Make sure (1) we don't get dangling unused cleanup blocks // (2) generated returns consider the function type -// CHECK: cir.func @_Z2g2v() -> !s32i { +// CHECK: cir.func @_Z2g2v() -> !s32i // CHECK: cir.br ^bb2 // CHECK-NEXT: ^bb1: // no predecessors @@ -63,4 +63,4 @@ int g2() { // CHECK: [[R:%[0-9]+]] = cir.load %0 : cir.ptr , !s32i // CHECK-NEXT: [[R]] : !s32i -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/hello.c b/clang/test/CIR/CodeGen/hello.c index b55fd228c9e2..3b7155c36ff7 100644 --- a/clang/test/CIR/CodeGen/hello.c +++ b/clang/test/CIR/CodeGen/hello.c @@ -9,7 +9,7 @@ int main (void) { // CHECK: cir.func private @printf(!cir.ptr, ...) -> !s32i // CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK: cir.func @main() -> !s32i { +// CHECK: cir.func @main() -> !s32i // CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK: %1 = cir.get_global @printf : cir.ptr , ...)>> // CHECK: %2 = cir.get_global @".str" : cir.ptr > diff --git a/clang/test/CIR/CodeGen/inc-dec.cpp b/clang/test/CIR/CodeGen/inc-dec.cpp index a1b89e1d6e84..1005299027a1 100644 --- a/clang/test/CIR/CodeGen/inc-dec.cpp +++ b/clang/test/CIR/CodeGen/inc-dec.cpp @@ -6,7 +6,7 @@ unsigned id0() { return ++a; } -// CHECK: cir.func @_Z3id0v() -> !u32i { +// CHECK: cir.func @_Z3id0v() -> !u32i // CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] @@ -20,7 +20,7 @@ unsigned id1() { return --a; } -// CHECK: cir.func @_Z3id1v() -> !u32i { +// CHECK: cir.func @_Z3id1v() -> !u32i // CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] @@ -33,7 +33,7 @@ unsigned id2() { return a++; } -// CHECK: cir.func @_Z3id2v() -> !u32i { +// CHECK: cir.func @_Z3id2v() -> !u32i // CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] @@ -46,7 +46,7 @@ unsigned id3() { return a--; } -// CHECK: cir.func @_Z3id3v() -> !u32i { +// CHECK: cir.func @_Z3id3v() -> !u32i // CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] diff --git a/clang/test/CIR/CodeGen/inlineAttr.cpp b/clang/test/CIR/CodeGen/inlineAttr.cpp new file mode 100644 index 000000000000..1d143e6d2aa3 --- /dev/null +++ b/clang/test/CIR/CodeGen/inlineAttr.cpp @@ -0,0 +1,36 @@ +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + + +inline int s0(int a, int b) { + int x = a + b; + return x; +} + +__attribute__((noinline)) +int s1(int a, int b) { + return s0(a,b); +} + +__attribute__((always_inline)) +int s2(int a, int b) { + return s0(a,b); +} + +int s3(int a, int b) { + int x = a + b; + return x; +} + + +// CIR: cir.func linkonce_odr @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline} ) +// CIR: cir.func @_Z2s1ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline} ) +// CIR: cir.func @_Z2s2ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline} ) +// CIR: cir.func @_Z2s3ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} { + +// LLVM: define i32 @_Z2s1ii(i32 %0, i32 %1) {{.*}} #[[#ATTR1:]] +// LLVM: define i32 @_Z2s2ii(i32 %0, i32 %1) {{.*}} #[[#ATTR2:]] +// LLVM: attributes #[[#ATTR1]] = {{.*}} noinline +// LLVM: attributes #[[#ATTR2]] = {{.*}} alwaysinline diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 7126d0557160..ef3bf6022a33 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -35,7 +35,7 @@ void l0() { // CHECK: %8 = cir.load %7 : cir.ptr >, !cir.ptr // CHECK: cir.store %6, %8 : !s32i, cir.ptr -// CHECK: cir.func @_Z2l0v() { +// CHECK: cir.func @_Z2l0v() auto g() { int i = 12; @@ -45,7 +45,7 @@ auto g() { }; } -// CHECK: cir.func @_Z1gv() -> !ty_22class2Eanon223 { +// CHECK: cir.func @_Z1gv() -> !ty_22class2Eanon223 // CHECK: %0 = cir.alloca !ty_22class2Eanon223, cir.ptr , ["__retval"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK: %2 = cir.const(#cir.int<12> : !s32i) : !s32i @@ -65,7 +65,7 @@ auto g2() { } // Should be same as above because of NRVO -// CHECK: cir.func @_Z2g2v() -> !ty_22class2Eanon224 { +// CHECK: cir.func @_Z2g2v() -> !ty_22class2Eanon224 // CHECK-NEXT: %0 = cir.alloca !ty_22class2Eanon224, cir.ptr , ["__retval", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.const(#cir.int<12> : !s32i) : !s32i @@ -79,7 +79,7 @@ int f() { return g2()(); } -// CHECK: cir.func @_Z1fv() -> !s32i { +// CHECK: cir.func @_Z1fv() -> !s32i // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { // CHECK-NEXT: %2 = cir.alloca !ty_22class2Eanon224, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} @@ -107,7 +107,7 @@ int g3() { // lambda operator int (*)(int const&)() // CHECK: cir.func internal private @_ZZ2g3vENK3$_0cvPFiRKiEEv -// CHECK: cir.func @_Z2g3v() -> !s32i { +// CHECK: cir.func @_Z2g3v() -> !s32i // CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK: %1 = cir.alloca !cir.ptr)>>, cir.ptr )>>>, ["fn", init] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !s32i, cir.ptr , ["task", init] {alignment = 4 : i64} @@ -134,4 +134,3 @@ int g3() { // CHECK: } // CHECK: } - diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index 9f7f74ef3efc..a67d58df76f7 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -9,7 +9,7 @@ void l0(void) { } } -// CPPSCOPE: cir.func @_Z2l0v() { +// CPPSCOPE: cir.func @_Z2l0v() // CPPSCOPE-NEXT: cir.scope { // CPPSCOPE-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CPPSCOPE-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} @@ -17,7 +17,7 @@ void l0(void) { // CPPSCOPE-NEXT: cir.store %2, %0 : !s32i, cir.ptr // CPPSCOPE-NEXT: cir.loop for(cond : { -// CSCOPE: cir.func @l0() { +// CSCOPE: cir.func @l0() // CSCOPE-NEXT: cir.scope { // CSCOPE-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CSCOPE-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 7f9ee4f8c845..90831e31e898 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -210,7 +210,7 @@ void l5() { } while (0); } -// CHECK: cir.func @_Z2l5v() { +// CHECK: cir.func @_Z2l5v() // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i @@ -235,7 +235,7 @@ void l6() { } } -// CHECK: cir.func @_Z2l6v() { +// CHECK: cir.func @_Z2l6v() // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: cir.yield continue diff --git a/clang/test/CIR/CodeGen/lvalue-refs.cpp b/clang/test/CIR/CodeGen/lvalue-refs.cpp index d4afc507d343..66aa6f27b5da 100644 --- a/clang/test/CIR/CodeGen/lvalue-refs.cpp +++ b/clang/test/CIR/CodeGen/lvalue-refs.cpp @@ -14,6 +14,6 @@ void foo() { split(s); } -// CHECK: cir.func @_Z3foov() { +// CHECK: cir.func @_Z3foov() // CHECK: %0 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s"] // CHECK: cir.call @_Z5splitR6String(%0) : (!cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/move.cpp b/clang/test/CIR/CodeGen/move.cpp index 95895015f98d..0c624c94306a 100644 --- a/clang/test/CIR/CodeGen/move.cpp +++ b/clang/test/CIR/CodeGen/move.cpp @@ -28,10 +28,10 @@ void t() { // FIXME: we should explicitly model std::move here since it will // be useful at least for the lifetime checker. -// CHECK: cir.func @_Z1tv() { +// CHECK: cir.func @_Z1tv() // CHECK: %[[#Addr:]] = cir.alloca ![[StdString]], {{.*}} ["ref.tmp0"] // CHECK: %[[#RValStr:]] = cir.call @_Z6getstrv() : () -> ![[StdString]] // CHECK: cir.store %[[#RValStr]], %[[#Addr]] // CHECK: cir.call @_Z7emplaceOSt6string(%[[#Addr]]) // CHECK: cir.return -// CHECK: } \ No newline at end of file +// CHECK: } diff --git a/clang/test/CIR/CodeGen/no-proto-is-void.cpp b/clang/test/CIR/CodeGen/no-proto-is-void.cpp index 0bf99efc3be0..7ab958f8fd00 100644 --- a/clang/test/CIR/CodeGen/no-proto-is-void.cpp +++ b/clang/test/CIR/CodeGen/no-proto-is-void.cpp @@ -5,7 +5,7 @@ // Both CXX and C2X don't support no-prototype functions. They default to void. int noProto(); -// CHECK: cir.func @{{.*}}noProto{{.*}}() -> !s32i { +// CHECK: cir.func @{{.*}}noProto{{.*}}() -> !s32i int test(int x) { return noProto(); // CHECK {{.+}} = cir.call @{{.*}}noProto{{.*}}() : () -> !s32i diff --git a/clang/test/CIR/CodeGen/no-prototype.c b/clang/test/CIR/CodeGen/no-prototype.c index 9f9cbdfb1c33..d5ed15018454 100644 --- a/clang/test/CIR/CodeGen/no-prototype.c +++ b/clang/test/CIR/CodeGen/no-prototype.c @@ -21,7 +21,7 @@ int test0(int x) { // definition is not marked as no-proto. int noProto1(); int noProto1(int x) { return x; } -// CHECK: cir.func @noProto1(%arg0: !s32i {{.+}}) -> !s32i { +// CHECK: cir.func @noProto1(%arg0: !s32i {{.+}}) -> !s32i int test1(int x) { // CHECK: cir.func @test1 return noProto1(x); diff --git a/clang/test/CIR/CodeGen/nrvo.cpp b/clang/test/CIR/CodeGen/nrvo.cpp index d96a15f140ce..4d8ce62fcb36 100644 --- a/clang/test/CIR/CodeGen/nrvo.cpp +++ b/clang/test/CIR/CodeGen/nrvo.cpp @@ -11,7 +11,7 @@ std::vector test_nrvo() { // CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector", !cir.ptr>, !cir.ptr>, !cir.ptr>> -// CHECK: cir.func @_Z9test_nrvov() -> !ty_22class2Estd3A3Avector22 { +// CHECK: cir.func @_Z9test_nrvov() -> !ty_22class2Estd3A3Avector22 // CHECK: %0 = cir.alloca !ty_22class2Estd3A3Avector22, cir.ptr , ["__retval", init] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !cir.bool, cir.ptr , ["nrvo"] {alignment = 1 : i64} // CHECK: %2 = cir.const(#false) : !cir.bool @@ -28,4 +28,4 @@ std::vector test_nrvo() { // CHECK: cir.store %3, %1 : !cir.bool, cir.ptr // CHECK: %4 = cir.load %0 : cir.ptr , !ty_22class2Estd3A3Avector22 // CHECK: cir.return %4 : !ty_22class2Estd3A3Avector22 -// CHECK: } \ No newline at end of file +// CHECK: } diff --git a/clang/test/CIR/CodeGen/predefined.cpp b/clang/test/CIR/CodeGen/predefined.cpp index 224504aac61a..dc849d915598 100644 --- a/clang/test/CIR/CodeGen/predefined.cpp +++ b/clang/test/CIR/CodeGen/predefined.cpp @@ -5,11 +5,11 @@ extern "C" { void __assert2(const char* __file, int __line, const char* __function, const char* __msg) __attribute__((__noreturn__)); } -void m() { +void m() { __assert2("yo.cpp", 79, __PRETTY_FUNCTION__, "doom"); } -// CHECK: cir.func @_Z1mv() { +// CHECK: cir.func @_Z1mv() // CHECK: %0 = cir.get_global @".str" : cir.ptr > // CHECK: %1 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // CHECK: %2 = cir.const(#cir.int<79> : !s32i) : !s32i @@ -19,4 +19,4 @@ void m() { // CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr // CHECK: cir.call @__assert2(%1, %2, %4, %6) : (!cir.ptr, !s32i, !cir.ptr, !cir.ptr) -> () // CHECK: cir.return -// CHECK: } \ No newline at end of file +// CHECK: } diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 5aef7b7554da..b9e25c52aba5 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -19,7 +19,7 @@ int s0(int a, int b) { // CIR: #loc21 = loc(fused[#loc3, #loc4]) // CIR: #loc22 = loc(fused[#loc5, #loc6]) // CIR: module @"{{.*}}sourcelocation.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior -// CIR: cir.func @_Z2s0ii(%arg0: !s32i loc(fused[#loc3, #loc4]), %arg1: !s32i loc(fused[#loc5, #loc6])) -> !s32i { +// CIR: cir.func @_Z2s0ii(%arg0: !s32i loc(fused[#loc3, #loc4]), %arg1: !s32i loc(fused[#loc5, #loc6])) -> !s32i // CIR: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc21) // CIR: %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc22) // CIR: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) @@ -77,7 +77,7 @@ int s0(int a, int b) { // LLVM: ModuleID = '{{.*}}sourcelocation.cpp' // LLVM: source_filename = "{{.*}}sourcelocation.cpp" -// LLVM: define i32 @_Z2s0ii(i32 %0, i32 %1) !dbg ![[#SP:]] +// LLVM: define i32 @_Z2s0ii(i32 %0, i32 %1) #[[#]] !dbg ![[#SP:]] // LLVM: %3 = alloca i32, i64 1, align 4, !dbg ![[#LOC1:]] diff --git a/clang/test/CIR/CodeGen/store.c b/clang/test/CIR/CodeGen/store.c index e190b3cb34eb..14e8d8a37fdb 100644 --- a/clang/test/CIR/CodeGen/store.c +++ b/clang/test/CIR/CodeGen/store.c @@ -6,7 +6,7 @@ void foo(void) { a = 1; } -// CHECK: cir.func @foo() { +// CHECK: cir.func @foo() // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr @@ -14,4 +14,3 @@ void foo(void) { // CHECK-NEXT: cir.store %2, %0 : !s32i, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } - diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 24c4b318cd87..5e25e6ebecc4 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -20,7 +20,7 @@ void baz(void) { // CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", !s32i, !s8i> // CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", !s32i, !s8i, !ty_22struct2EBar22> // CHECK-DAG: module {{.*}} { -// CHECK-NEXT: cir.func @baz() { +// CHECK-NEXT: cir.func @baz() // CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 17c9e0509650..fe9a3f394c9e 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -123,7 +123,7 @@ struct S { void h() { S s; } -// CHECK: cir.func @_Z1hv() { +// CHECK: cir.func @_Z1hv() // CHECK: %0 = cir.alloca !ty_22struct2ES22, cir.ptr , ["s", init] {alignment = 1 : i64} // CHECK: %1 = cir.alloca !ty_22struct2EA22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} // CHECK: %2 = cir.call @_Z11get_defaultv() : () -> !ty_22struct2EA22 diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index 4f97daeaee5f..c6873788e778 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -21,23 +21,23 @@ void t8(void) {} bool t9(bool b) { return b; } #endif -// CHECK: cir.func @t0(%arg0: !s32i loc({{.*}})) -> !s32i { -// CHECK: cir.func @t1(%arg0: !u32i loc({{.*}})) -> !u32i { -// CHECK: cir.func @t2(%arg0: !s8i loc({{.*}})) -> !s8i { -// CHECK: cir.func @t3(%arg0: !u8i loc({{.*}})) -> !u8i { -// CHECK: cir.func @t4(%arg0: !s16i loc({{.*}})) -> !s16i { -// CHECK: cir.func @t5(%arg0: !u16i loc({{.*}})) -> !u16i { -// CHECK: cir.func @t6(%arg0: f32 loc({{.*}})) -> f32 { -// CHECK: cir.func @t7(%arg0: f64 loc({{.*}})) -> f64 { -// CHECK: cir.func @t8() { - -// CHECK-CPP: cir.func @_Z2t0i(%arg0: !s32i loc({{.*}})) -> !s32i { -// CHECK-CPP: cir.func @_Z2t1j(%arg0: !u32i loc({{.*}})) -> !u32i { -// CHECK-CPP: cir.func @_Z2t2c(%arg0: !s8i loc({{.*}})) -> !s8i { -// CHECK-CPP: cir.func @_Z2t3h(%arg0: !u8i loc({{.*}})) -> !u8i { -// CHECK-CPP: cir.func @_Z2t4s(%arg0: !s16i loc({{.*}})) -> !s16i { -// CHECK-CPP: cir.func @_Z2t5t(%arg0: !u16i loc({{.*}})) -> !u16i { -// CHECK-CPP: cir.func @_Z2t6f(%arg0: f32 loc({{.*}})) -> f32 { -// CHECK-CPP: cir.func @_Z2t7d(%arg0: f64 loc({{.*}})) -> f64 { -// CHECK-CPP: cir.func @_Z2t8v() { -// CHECK-CPP: cir.func @_Z2t9b(%arg0: !cir.bool loc({{.*}})) -> !cir.bool { +// CHECK: cir.func @t0(%arg0: !s32i loc({{.*}})) -> !s32i +// CHECK: cir.func @t1(%arg0: !u32i loc({{.*}})) -> !u32i +// CHECK: cir.func @t2(%arg0: !s8i loc({{.*}})) -> !s8i +// CHECK: cir.func @t3(%arg0: !u8i loc({{.*}})) -> !u8i +// CHECK: cir.func @t4(%arg0: !s16i loc({{.*}})) -> !s16i +// CHECK: cir.func @t5(%arg0: !u16i loc({{.*}})) -> !u16i +// CHECK: cir.func @t6(%arg0: f32 loc({{.*}})) -> f32 +// CHECK: cir.func @t7(%arg0: f64 loc({{.*}})) -> f64 +// CHECK: cir.func @t8() + +// CHECK-CPP: cir.func @_Z2t0i(%arg0: !s32i loc({{.*}})) -> !s32i +// CHECK-CPP: cir.func @_Z2t1j(%arg0: !u32i loc({{.*}})) -> !u32i +// CHECK-CPP: cir.func @_Z2t2c(%arg0: !s8i loc({{.*}})) -> !s8i +// CHECK-CPP: cir.func @_Z2t3h(%arg0: !u8i loc({{.*}})) -> !u8i +// CHECK-CPP: cir.func @_Z2t4s(%arg0: !s16i loc({{.*}})) -> !s16i +// CHECK-CPP: cir.func @_Z2t5t(%arg0: !u16i loc({{.*}})) -> !u16i +// CHECK-CPP: cir.func @_Z2t6f(%arg0: f32 loc({{.*}})) -> f32 +// CHECK-CPP: cir.func @_Z2t7d(%arg0: f64 loc({{.*}})) -> f64 +// CHECK-CPP: cir.func @_Z2t8v() +// CHECK-CPP: cir.func @_Z2t9b(%arg0: !cir.bool loc({{.*}})) -> !cir.bool diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index 778e6b506e7d..1fe53cd20681 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -6,7 +6,7 @@ unsigned up0() { return +a; } -// CHECK: cir.func @_Z3up0v() -> !u32i { +// CHECK: cir.func @_Z3up0v() -> !u32i // CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#INPUT:]] = cir.load %[[#A]] @@ -18,7 +18,7 @@ unsigned um0() { return -a; } -// CHECK: cir.func @_Z3um0v() -> !u32i { +// CHECK: cir.func @_Z3um0v() -> !u32i // CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#INPUT:]] = cir.load %[[#A]] @@ -30,7 +30,7 @@ unsigned un0() { return ~a; // a ^ -1 , not } -// CHECK: cir.func @_Z3un0v() -> !u32i { +// CHECK: cir.func @_Z3un0v() -> !u32i // CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] // CHECK: %[[#INPUT:]] = cir.load %[[#A]] @@ -43,7 +43,7 @@ int inc0() { return a; } -// CHECK: cir.func @_Z4inc0v() -> !s32i { +// CHECK: cir.func @_Z4inc0v() -> !s32i // CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] // CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i @@ -62,7 +62,7 @@ int dec0() { return a; } -// CHECK: cir.func @_Z4dec0v() -> !s32i { +// CHECK: cir.func @_Z4dec0v() -> !s32i // CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] // CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i @@ -82,7 +82,7 @@ int inc1() { return a; } -// CHECK: cir.func @_Z4inc1v() -> !s32i { +// CHECK: cir.func @_Z4inc1v() -> !s32i // CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] // CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i @@ -101,7 +101,7 @@ int dec1() { return a; } -// CHECK: cir.func @_Z4dec1v() -> !s32i { +// CHECK: cir.func @_Z4dec1v() -> !s32i // CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] // CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i @@ -121,7 +121,7 @@ int inc2() { return b; } -// CHECK: cir.func @_Z4inc2v() -> !s32i { +// CHECK: cir.func @_Z4inc2v() -> !s32i // CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] // CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] // CHECK: %[[#B:]] = cir.alloca !s32i, cir.ptr , ["b", init] diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index 24ec8c00253b..a31f88910845 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -19,7 +19,7 @@ void m() { // CHECK: !ty_22union2Eyolm22 = !cir.struct<"union.yolm", !ty_22struct2Eyolo22> // CHECK: !ty_22union2Eyolm222 = !cir.struct<"union.yolm2", !ty_22struct2Eanon221> -// CHECK: cir.func @_Z1mv() { +// CHECK: cir.func @_Z1mv() // CHECK: cir.alloca !ty_22union2Eyolm22, cir.ptr , ["q"] {alignment = 4 : i64} // CHECK: cir.alloca !ty_22union2Eyolm222, cir.ptr , ["q2"] {alignment = 8 : i64} -// CHECK: cir.alloca !ty_22union2Eyolm322, cir.ptr , ["q3"] {alignment = 4 : i64} \ No newline at end of file +// CHECK: cir.alloca !ty_22union2Eyolm322, cir.ptr , ["q3"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 88a0497c9e40..adcdb3c0807e 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -47,7 +47,7 @@ class B : public A // foo - zero initialize object B and call ctor (@B::B()) // -// CHECK: cir.func @_Z3foov() { +// CHECK: cir.func @_Z3foov() // CHECK: %0 = cir.alloca ![[ClassB]], cir.ptr , ["agg.tmp0"] {alignment = 8 : i64} // CHECK: cir.scope { // CHECK: %1 = cir.const(#cir.zero : ![[ClassB]]) : ![[ClassB]] diff --git a/clang/test/CIR/IR/inlineAttr.cir b/clang/test/CIR/IR/inlineAttr.cir new file mode 100644 index 000000000000..2b2542b0cd45 --- /dev/null +++ b/clang/test/CIR/IR/inlineAttr.cir @@ -0,0 +1,11 @@ +// RUN: cir-tool %s | FileCheck %s -check-prefix=CIR +// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR + +module { + cir.func @l0() extra( {cir.inline = #cir.inline} ) { + cir.return + } +} + +// CIR: cir.func @l0() extra( {cir.inline = #cir.inline} ) { +// MLIR: llvm.func @l0() attributes {cir.extra_attrs = #cir})>} diff --git a/clang/test/CIR/Lowering/array.cir b/clang/test/CIR/Lowering/array.cir index 582b6d83b148..7ab8a561be63 100644 --- a/clang/test/CIR/Lowering/array.cir +++ b/clang/test/CIR/Lowering/array.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { @@ -9,7 +9,7 @@ module { } // MLIR: module { -// MLIR-NEXT: func @foo() { +// MLIR-NEXT: func @foo() // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.array<10 x i32> {alignment = 16 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: llvm.return diff --git a/clang/test/CIR/Lowering/binop-fp.cir b/clang/test/CIR/Lowering/binop-fp.cir index 144095118b9e..70875a71dbf7 100644 --- a/clang/test/CIR/Lowering/binop-fp.cir +++ b/clang/test/CIR/Lowering/binop-fp.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir index 04564d9ec0dc..7fa76dd4c5b6 100644 --- a/clang/test/CIR/Lowering/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !u32i = !cir.int module { diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index ab58f792b6e5..2cfcf6a94f3e 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool @@ -13,7 +13,7 @@ module { } } -// MLIR: llvm.func @foo() { +// MLIR: llvm.func @foo() // MLIR-DAG: = llvm.mlir.constant(true) : i8 // MLIR-DAG: [[Value:%[a-z0-9]+]] = llvm.mlir.constant(1 : index) : i64 // MLIR-DAG: = llvm.alloca [[Value]] x i8 {alignment = 1 : i64} : (i64) -> !llvm.ptr diff --git a/clang/test/CIR/Lowering/branch.cir b/clang/test/CIR/Lowering/branch.cir index 135fd79ed6a1..85581507eb2c 100644 --- a/clang/test/CIR/Lowering/branch.cir +++ b/clang/test/CIR/Lowering/branch.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !s32i = !cir.int cir.func @foo(%arg0: !cir.bool) -> !s32i { @@ -13,7 +13,7 @@ cir.func @foo(%arg0: !cir.bool) -> !s32i { } // MLIR: module { -// MLIR-NEXT: llvm.func @foo(%arg0: i8) -> i32 { +// MLIR-NEXT: llvm.func @foo(%arg0: i8) -> i32 // MLIR-NEXT: %0 = llvm.trunc %arg0 : i8 to i1 // MLIR-NEXT: llvm.cond_br %0, ^bb1, ^bb2 // MLIR-NEXT: ^bb1: // pred: ^bb0 diff --git a/clang/test/CIR/Lowering/call.cir b/clang/test/CIR/Lowering/call.cir index 01a97ec8c24b..62089cf3dbb5 100644 --- a/clang/test/CIR/Lowering/call.cir +++ b/clang/test/CIR/Lowering/call.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM module { cir.func @a() { @@ -10,10 +10,10 @@ module { cir.return } -// MLIR: llvm.func @a() { +// MLIR: llvm.func @a() // MLIR-NEXT: llvm.return // MLIR-NEXT: } -// MLIR-NEXT: llvm.func @d() { +// MLIR-NEXT: llvm.func @d() // MLIR-NEXT: llvm.call @a() : () -> () // MLIR-NEXT: llvm.return // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index bff61e541701..3f7baa34969a 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !s16i = !cir.int !s32i = !cir.int !s64i = !cir.int @@ -14,7 +14,7 @@ module { cir.return %arg0 : !s32i } -// MLIR: llvm.func @foo(%arg0: i32) -> i32 { +// MLIR: llvm.func @foo(%arg0: i32) -> i32 // MLIR-NEXT: [[v0:%[0-9]]] = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: [[v1:%[0-9]]] = llvm.icmp "ne" %arg0, %0 : i32 // MLIR-NEXT: [[v2:%[0-9]]] = llvm.zext %1 : i1 to i8 diff --git a/clang/test/CIR/Lowering/cmp.cir b/clang/test/CIR/Lowering/cmp.cir index 06a24cf56308..2dbe7bbd3b55 100644 --- a/clang/test/CIR/Lowering/cmp.cir +++ b/clang/test/CIR/Lowering/cmp.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 238dcdc9abde..a709dd91e10f 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -57,7 +57,7 @@ module { } // MLIR: module { -// MLIR-NEXT: llvm.func @dot(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: i32) -> f64 { +// MLIR-NEXT: llvm.func @dot(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: i32) -> f64 // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %2 = llvm.mlir.constant(1 : index) : i64 diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index c6e2486f1454..27a833f34b6d 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !s16i = !cir.int !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index b6f22409951d..2f6b73b7facf 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -canonicalize -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -canonicalize -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !u32i = !cir.int diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index 8603eefef19d..5dc521469721 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -22,7 +22,7 @@ module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.sign // CHECK: llvm.func @printf(!llvm.ptr, ...) -> i32 // CHECK: llvm.mlir.global internal constant @".str"("Hello, world!\0A\00") {addr_space = 0 : i32} -// CHECK: llvm.func @main() -> i32 { +// CHECK: llvm.func @main() -> i32 // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 // CHECK: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // CHECK: %2 = llvm.mlir.addressof @".str" : !llvm.ptr diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir index f70460347a5e..820a0bdebd20 100644 --- a/clang/test/CIR/Lowering/if.cir +++ b/clang/test/CIR/Lowering/if.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { @@ -15,7 +15,7 @@ module { cir.return %arg0 : !s32i } -// MLIR: llvm.func @foo(%arg0: i32) -> i32 { +// MLIR: llvm.func @foo(%arg0: i32) -> i32 // MLIR-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %1 = llvm.icmp "ne" %arg0, %0 : i32 // MLIR-NEXT: %2 = llvm.zext %1 : i1 to i8 @@ -56,7 +56,7 @@ module { cir.return %arg0 : !s32i } - // MLIR: llvm.func @onlyIf(%arg0: i32) -> i32 { + // MLIR: llvm.func @onlyIf(%arg0: i32) -> i32 // MLIR-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %1 = llvm.icmp "ne" %arg0, %0 : i32 // MLIR-NEXT: %2 = llvm.zext %1 : i1 to i8 diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index adf19ac9f266..81bc7bd79668 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !u32i = !cir.int module { @@ -13,7 +13,7 @@ module { } // MLIR: module { -// MLIR-NEXT: func @foo() -> i32 { +// MLIR-NEXT: func @foo() -> i32 // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index e0a0d9840243..b48e84003057 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -29,7 +29,7 @@ module { } // MLIR: module { -// MLIR-NEXT: llvm.func @testFor() { +// MLIR-NEXT: llvm.func @testFor() // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 @@ -92,7 +92,7 @@ module { cir.return } - // MLIR: llvm.func @testWhile(%arg0: i32) { + // MLIR: llvm.func @testWhile(%arg0: i32) // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr @@ -152,7 +152,7 @@ module { cir.return } - // MLIR: llvm.func @testDoWhile(%arg0: i32) { + // MLIR: llvm.func @testDoWhile(%arg0: i32) // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index 7010302ac88d..107dddcb41e7 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @f(%arg0: !cir.ptr) { @@ -14,7 +14,7 @@ module { } // MLIR: module { -// MLIR-NEXT: llvm.func @f(%arg0: !llvm.ptr) { +// MLIR-NEXT: llvm.func @f(%arg0: !llvm.ptr) // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: llvm.store %arg0, %1 : !llvm.ptr, !llvm.ptr diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index 993571b5d625..c4eb9a328631 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !u32i = !cir.int module { @@ -12,7 +12,7 @@ module { cir.return } -// MLIR: llvm.func @foo() { +// MLIR: llvm.func @foo() // MLIR-NEXT: llvm.br ^bb1 // MLIR-NEXT: ^bb1: // MLIR-DAG: [[v1:%[0-9]]] = llvm.mlir.constant(4 : i32) : i32 @@ -43,7 +43,7 @@ module { } cir.return } - // MLIR: llvm.func @empty_scope() { + // MLIR: llvm.func @empty_scope() // MLIR-NEXT: llvm.return // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/tenary.cir b/clang/test/CIR/Lowering/tenary.cir index 9f5149342f99..fb97bc4d6502 100644 --- a/clang/test/CIR/Lowering/tenary.cir +++ b/clang/test/CIR/Lowering/tenary.cir @@ -23,7 +23,7 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { } } -// MLIR: llvm.func @_Z1xi(%arg0: i32) -> i32 { +// MLIR: llvm.func @_Z1xi(%arg0: i32) -> i32 // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %2 = llvm.mlir.constant(1 : index) : i64 diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir index 0f484aafee69..b879470745ac 100644 --- a/clang/test/CIR/Lowering/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @foo() { @@ -51,7 +51,7 @@ module { cir.store %7, %1 : f64, cir.ptr // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(1.000000e+00 : f64) : f64 // MLIR: = llvm.fadd %[[#D_ONE]], %{{[0-9]+}} : f64 - + %8 = cir.load %1 : cir.ptr , f64 %9 = cir.unary(dec, %8) : f64, f64 cir.store %9, %1 : f64, cir.ptr diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index c4265252a0ae..e1fc2c9ec745 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @foo() -> !s32i { diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index 48d4f3d62b3c..660b0fc6adb6 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -1,5 +1,5 @@ // RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @foo() { @@ -29,7 +29,7 @@ module { cir.func @floatingPoints(%arg0: f64) { - // MLIR: llvm.func @floatingPoints(%arg0: f64) { + // MLIR: llvm.func @floatingPoints(%arg0: f64) %0 = cir.alloca f64, cir.ptr , ["X", init] {alignment = 8 : i64} cir.store %arg0, %0 : f64, cir.ptr %1 = cir.load %0 : cir.ptr , f64 diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index 61bd42a77cb8..d1e0d7614489 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -16,7 +16,7 @@ void foo(void) {} // CIR: module {{.*}} { -// CIR-NEXT: cir.func @foo() { +// CIR-NEXT: cir.func @foo() // CIR-NEXT: cir.return // CIR-NEXT: } // CIR-NEXT: } From f8a36ba6f68b494b638af4dda09bcb8bd639baf9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 7 Jul 2023 18:40:51 -0700 Subject: [PATCH 1065/2301] [CIR][CIRGen] Fix previous added flag and add testcase --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/Frontend/CompilerInvocation.cpp | 4 ++++ clang/test/CIR/CodeGen/build-deferred.cpp | 27 +++++++++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/build-deferred.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 0aa1f336a121..b94c005b983f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2151,6 +2151,7 @@ void CIRGenModule::buildDeferred(unsigned recursionLimit) { CurDeclsToEmit.swap(DeferredDeclsToEmit); if (recursionLimit == 0) return; + recursionLimit--; for (auto &D : CurDeclsToEmit) { buildGlobalDecl(D); @@ -2159,7 +2160,6 @@ void CIRGenModule::buildDeferred(unsigned recursionLimit) { // This has the advantage that the decls are emitted in a DFS and related // ones are close together, which is convenient for testing. if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) { - recursionLimit--; buildDeferred(recursionLimit); assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty()); } diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index c85817b29089..38a8eb4ea1df 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -1650,6 +1650,10 @@ void CompilerInvocationBase::GenerateCodeGenArgs(const CodeGenOptions &Opts, if (Opts.NewStructPathTBAA) GenerateArg(Consumer, OPT_new_struct_path_tbaa); + if (Opts.ClangIRBuildDeferredThreshold) + GenerateArg(Consumer, OPT_fclangir_disable_deferred_EQ, + Twine(Opts.ClangIRBuildDeferredThreshold)); + if (Opts.OptimizeSize == 1) GenerateArg(Consumer, OPT_O, "s"); else if (Opts.OptimizeSize == 2) diff --git a/clang/test/CIR/CodeGen/build-deferred.cpp b/clang/test/CIR/CodeGen/build-deferred.cpp new file mode 100644 index 000000000000..f1f1ef4c907e --- /dev/null +++ b/clang/test/CIR/CodeGen/build-deferred.cpp @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fclangir-build-deferred-threshold=0 %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +class String { + char *storage{nullptr}; + long size; + long capacity; + +public: + String() : size{0} {} + String(int size) : size{size} {} + String(const char *s) {} +}; + +void test() { + String s1{}; + String s2{1}; + String s3{"abcdefghijklmnop"}; +} + +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2Ev +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2Ei +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2EPKc +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC1EPKc + +// CHECK: cir.func @_Z4testv() +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file From 0a790f98dcb65e22e7ca66119c6239c8a414f181 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 7 Jul 2023 19:03:49 -0700 Subject: [PATCH 1066/2301] [CIR][CIRGen] -fclangir-skip-system-headers: internal codegen flag to assist clangir dev --- clang/include/clang/Basic/CodeGenOptions.def | 5 +++++ clang/include/clang/Driver/Options.td | 4 ++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 7 +++++++ clang/lib/Frontend/CompilerInvocation.cpp | 3 +++ .../skip-functions-from-system-headers.cpp | 18 ++++++++++++++++++ clang/test/CIR/Inputs/skip-this-header.h | 12 ++++++++++++ 6 files changed, 49 insertions(+) create mode 100644 clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp create mode 100644 clang/test/CIR/Inputs/skip-this-header.h diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def index 499fd493d2f7..3a8e49b1320b 100644 --- a/clang/include/clang/Basic/CodeGenOptions.def +++ b/clang/include/clang/Basic/CodeGenOptions.def @@ -470,6 +470,11 @@ CODEGENOPT(CtorDtorReturnThis, 1, 0) /// CIRGen is mature we should probably remove it. VALUE_CODEGENOPT(ClangIRBuildDeferredThreshold, 32, 500) +/// ClangIR specific (internal): Only build deferred functions not coming from +/// system headers. This helps incremental progress while building large C++ +/// TUs, once CIRGen is mature we should probably remove it. +CODEGENOPT(ClangIRSkipFunctionsFromSystemHeaders, 1, 0) + #undef CODEGENOPT #undef ENUM_CODEGENOPT #undef VALUE_CODEGENOPT diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 6d260aade53d..463694c5abd7 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3076,6 +3076,10 @@ def fclangir_disable_deferred_EQ : Joined<["-"], "fclangir-build-deferred-thresh Visibility<[ClangOption, CC1Option]>, Group, HelpText<"ClangIR (internal): Control the recursion level for calls to buildDeferred (defaults to 500)">, MarshallingInfoInt, "500u">; +def fclangir_skip_system_headers : Joined<["-"], "fclangir-skip-system-headers">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"ClangIR (internal): buildDeferred skip functions defined in system headers">, + MarshallingInfoFlag>; def clangir_verify_diagnostics : Flag<["-"], "clangir-verify-diagnostics">, Visibility<[ClangOption, CC1Option]>, HelpText<"ClangIR: Enable diagnostic verification in MLIR, similar to clang's -verify">, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index b94c005b983f..a39c4b99498b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2154,6 +2154,13 @@ void CIRGenModule::buildDeferred(unsigned recursionLimit) { recursionLimit--; for (auto &D : CurDeclsToEmit) { + if (getCodeGenOpts().ClangIRSkipFunctionsFromSystemHeaders) { + auto *decl = D.getDecl(); + assert(decl && "expected decl"); + if (astCtx.getSourceManager().isInSystemHeader(decl->getLocation())) + continue; + } + buildGlobalDecl(D); // If we found out that we need to emit more decls, do that recursively. diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 38a8eb4ea1df..c24dc33e5262 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -1654,6 +1654,9 @@ void CompilerInvocationBase::GenerateCodeGenArgs(const CodeGenOptions &Opts, GenerateArg(Consumer, OPT_fclangir_disable_deferred_EQ, Twine(Opts.ClangIRBuildDeferredThreshold)); + if (Opts.ClangIRSkipFunctionsFromSystemHeaders) + GenerateArg(Consumer, OPT_fclangir_skip_system_headers); + if (Opts.OptimizeSize == 1) GenerateArg(Consumer, OPT_O, "s"); else if (Opts.OptimizeSize == 2) diff --git a/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp b/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp new file mode 100644 index 000000000000..bb1bebe33edb --- /dev/null +++ b/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fclangir-skip-system-headers -I%S/../Inputs %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "skip-this-header.h" + +void test() { + String s1{}; + String s2{1}; + String s3{"abcdefghijklmnop"}; +} + +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2Ev +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2Ei +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC2EPKc +// CHECK-NOT: cir.func linkonce_odr @_ZN6StringC1EPKc + +// CHECK: cir.func @_Z4testv() +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file diff --git a/clang/test/CIR/Inputs/skip-this-header.h b/clang/test/CIR/Inputs/skip-this-header.h new file mode 100644 index 000000000000..bf94a9cfeb94 --- /dev/null +++ b/clang/test/CIR/Inputs/skip-this-header.h @@ -0,0 +1,12 @@ +#pragma clang system_header + +class String { + char *storage{nullptr}; + long size; + long capacity; + +public: + String() : size{0} {} + String(int size) : size{size} {} + String(const char *s) {} +}; \ No newline at end of file From d383909d312e7896d6fabdaf8676f4318f240866 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 10 Jul 2023 14:54:30 -0700 Subject: [PATCH 1067/2301] [CIR][CIRGen] Add cir.shift and remove kinds from cir.binop cir.binop has the constraints on operands and results types than cannot support shifts with amount types different from the value getting shift. Adding cir.shift is a closer map. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 45 ++++++++++--- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 11 ++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 66 ++++++++++++------- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 22 +++---- clang/test/CIR/CodeGen/binassign.cpp | 4 +- clang/test/CIR/CodeGen/binop.cpp | 4 +- clang/test/CIR/CodeGen/shift.cpp | 8 +++ .../ThroughMLIR/binop-unsigned-int.cir | 26 ++++---- clang/test/CIR/Lowering/binop-signed-int.cir | 4 +- .../test/CIR/Lowering/binop-unsigned-int.cir | 4 +- 10 files changed, 126 insertions(+), 68 deletions(-) create mode 100644 clang/test/CIR/CodeGen/shift.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 4444808638dd..ca0013cbc783 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -787,8 +787,6 @@ def BinOpKind_Div : I32EnumAttrCase<"Div", 2, "div">; def BinOpKind_Rem : I32EnumAttrCase<"Rem", 3, "rem">; def BinOpKind_Add : I32EnumAttrCase<"Add", 4, "add">; def BinOpKind_Sub : I32EnumAttrCase<"Sub", 5, "sub">; -def BinOpKind_Shl : I32EnumAttrCase<"Shl", 6, "shl">; -def BinOpKind_Shr : I32EnumAttrCase<"Shr", 7, "shr">; def BinOpKind_And : I32EnumAttrCase<"And", 8, "and">; def BinOpKind_Xor : I32EnumAttrCase<"Xor", 9, "xor">; def BinOpKind_Or : I32EnumAttrCase<"Or", 10, "or">; @@ -797,8 +795,8 @@ def BinOpKind : I32EnumAttr< "BinOpKind", "binary operation (arith and logic) kind", [BinOpKind_Mul, BinOpKind_Div, BinOpKind_Rem, - BinOpKind_Add, BinOpKind_Sub, BinOpKind_Shl, - BinOpKind_Shr, BinOpKind_And, BinOpKind_Xor, + BinOpKind_Add, BinOpKind_Sub, + BinOpKind_And, BinOpKind_Xor, BinOpKind_Or]> { let cppNamespace = "::mlir::cir"; } @@ -810,15 +808,15 @@ def BinOp : CIR_Op<"binop", [Pure, let summary = "Binary operations (arith and logic)"; let description = [{ cir.binop performs the binary operation according to - the specified opcode kind: [mul, div, rem, add, sub, shl, - shr, and, xor, or]. + the specified opcode kind: [mul, div, rem, add, sub, + and, xor, or]. It requires two input operands and has one result, all types should be the same. ```mlir - %7 = binop(add, %1, %2) : i32 - %7 = binop(mul, %1, %2) : i8 + %7 = cir.binop(add, %1, %2) : !s32i + %7 = cir.binop(mul, %1, %2) : !u8i ``` }]; @@ -835,6 +833,37 @@ def BinOp : CIR_Op<"binop", [Pure, let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// ShiftOp +//===----------------------------------------------------------------------===// + +def ShiftOp : CIR_Op<"shift", [Pure]> { + let summary = "Shift"; + let description = [{ + Shift `left` or `right`, according to the first operand. Second operand is + the shift target and the third the amount. + + ```mlir + %7 = cir.shift(left, %1 : !u64i, %4 : !s32i) -> !u64i + ``` + }]; + + let results = (outs CIR_IntType:$result); + let arguments = (ins CIR_IntType:$value, CIR_IntType:$amount, + UnitAttr:$isShiftleft); + + let assemblyFormat = [{ + `(` + (`left` $isShiftleft^) : (`right`)? + `,` $value `:` type($value) + `,` $amount `:` type($amount) + `)` `->` type($result) attr-dict + }]; + + // Already covered by the traits + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // CmpOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 0adaa6ca95eb..d64a647a05ec 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1096,14 +1096,13 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { CGF.PtrDiffTy, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shl, - Ops.LHS, Ops.RHS); + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), Ops.LHS, Ops.RHS, + CGF.getBuilder().getUnitAttr()); } mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Shr, - Ops.LHS, Ops.RHS); + return Builder.create( + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildAnd(const BinOpInfo &Ops) { return Builder.create( diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7aa5ae5ac7d1..7dc2a7698638 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1100,17 +1100,36 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { case mlir::cir::BinOpKind::Xor: rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; - case mlir::cir::BinOpKind::Shl: - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - break; - case mlir::cir::BinOpKind::Shr: - if (auto ty = type.dyn_cast()) { - if (ty.isUnsigned()) - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - break; - } + } + + return mlir::LogicalResult::success(); + } +}; + +class CIRShiftOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ShiftOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert((op.getValue().getType() == op.getResult().getType()) && + "inconsistent operands' types not supported yet"); + auto ty = op.getValue().getType().dyn_cast(); + assert(ty && "NYI for other than mlir::cir::IntType"); + + auto llvmTy = getTypeConverter()->convertType(op.getType()); + auto val = adaptor.getValue(); + auto amt = adaptor.getAmount(); + + if (op.getIsShiftleft()) + rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); + else { + if (ty.isUnsigned()) + rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); + else + rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); } return mlir::LogicalResult::success(); @@ -1284,21 +1303,22 @@ class CIRStructElementAddrOpLowering void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns.add< - CIRCmpOpLowering, CIRLoopOpLowering, CIRBrCondOpLowering, - CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, - CIRBinOpLowering, CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, - CIRAllocaLowering, CIRFuncLowering, CIRScopeOpLowering, CIRCastOpLowering, - CIRIfLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, - CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, - CIRBrOpLowering, CIRTernaryOpLowering, CIRStructElementAddrOpLowering>( - converter, patterns.getContext()); + patterns.add(converter, + patterns.getContext()); } namespace { void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { - return mlir::LLVM::LLVMPointerType::get(type.getContext()); + return mlir::LLVM::LLVMPointerType::get(&converter.getContext()); }); converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { auto ty = converter.convertType(type.getEltType()); @@ -1397,8 +1417,8 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, pm.addPass(mlir::LLVM::createDIScopeForLLVMFuncOpPass()); // FIXME(cir): this shouldn't be necessary. It's meant to be a temporary - // workaround until we understand why some unrealized casts are being emmited - // and how to properly avoid them. + // workaround until we understand why some unrealized casts are being + // emmited and how to properly avoid them. pm.addPass(mlir::createReconcileUnrealizedCastsPass()); (void)mlir::applyPassManagerCLOptions(pm); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index ae4dbca56fe6..8e84b9a11530 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -332,17 +332,17 @@ class CIRBinOpLowering : public mlir::OpRewritePattern { rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); break; - case mlir::cir::BinOpKind::Shl: - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); - break; - case mlir::cir::BinOpKind::Shr: - if (type.isSignlessInteger()) - rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); - else - llvm_unreachable("integer type not supported in CIR yet"); - break; + // case mlir::cir::BinOpKind::Shl: + // rewriter.replaceOpWithNewOp( + // op, op.getType(), op.getLhs(), op.getRhs()); + // break; + // case mlir::cir::BinOpKind::Shr: + // if (type.isSignlessInteger()) + // rewriter.replaceOpWithNewOp( + // op, op.getType(), op.getLhs(), op.getRhs()); + // else + // llvm_unreachable("integer type not supported in CIR yet"); + // break; } return mlir::LogicalResult::success(); diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp index 65cfca7eac20..a0a029ef7e9a 100644 --- a/clang/test/CIR/CodeGen/binassign.cpp +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -34,10 +34,10 @@ int foo(int a, int b) { // CHECK: = cir.binop(sub, // CHECK: cir.store {{.*}}[[Value]] // CHECK: = cir.load {{.*}}[[Value]] -// CHECK: = cir.binop(shr, +// CHECK: = cir.shift( right // CHECK: cir.store {{.*}}[[Value]] // CHECK: = cir.load {{.*}}[[Value]] -// CHECK: = cir.binop(shl, +// CHECK: = cir.shift(left // CHECK: cir.store {{.*}}[[Value]] // CHECK: = cir.load {{.*}}[[Value]] // CHECK: = cir.binop(and, diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index 0760a1052ee8..6436e4582901 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -19,8 +19,8 @@ void b0(int a, int b) { // CHECK: = cir.binop(rem, %9, %10) : !s32i // CHECK: = cir.binop(add, %12, %13) : !s32i // CHECK: = cir.binop(sub, %15, %16) : !s32i -// CHECK: = cir.binop(shr, %18, %19) : !s32i -// CHECK: = cir.binop(shl, %21, %22) : !s32i +// CHECK: = cir.shift( right, %18 : !s32i, %19 : !s32i) -> !s32i +// CHECK: = cir.shift(left, %21 : !s32i, %22 : !s32i) -> !s32i // CHECK: = cir.binop(and, %24, %25) : !s32i // CHECK: = cir.binop(xor, %27, %28) : !s32i // CHECK: = cir.binop(or, %30, %31) : !s32i diff --git a/clang/test/CIR/CodeGen/shift.cpp b/clang/test/CIR/CodeGen/shift.cpp new file mode 100644 index 000000000000..6f6a10d34ab0 --- /dev/null +++ b/clang/test/CIR/CodeGen/shift.cpp @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +unsigned long s(int i, unsigned long x) { + return x << i; +} + +// CHECK: cir.shift(left, %3 : !u64i, %4 : !s32i) -> !u64i \ No newline at end of file diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir index 86cde3d35256..0fbcfc8f2e81 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir @@ -28,14 +28,16 @@ module { %18 = cir.load %1 : cir.ptr , i32 %19 = cir.binop(sub, %17, %18) : i32 cir.store %19, %2 : i32, cir.ptr - %20 = cir.load %2 : cir.ptr , i32 - %21 = cir.load %1 : cir.ptr , i32 - %22 = cir.binop(shr, %20, %21) : i32 - cir.store %22, %2 : i32, cir.ptr - %23 = cir.load %2 : cir.ptr , i32 - %24 = cir.load %1 : cir.ptr , i32 - %25 = cir.binop(shl, %23, %24) : i32 - cir.store %25, %2 : i32, cir.ptr + // should move to cir.shift, which only accepts + // CIR types. + // %20 = cir.load %2 : cir.ptr , i32 + // %21 = cir.load %1 : cir.ptr , i32 + // %22 = cir.binop(shr, %20, %21) : i32 + // cir.store %22, %2 : i32, cir.ptr + // %23 = cir.load %2 : cir.ptr , i32 + // %24 = cir.load %1 : cir.ptr , i32 + // %25 = cir.binop(shl, %23, %24) : i32 + // cir.store %25, %2 : i32, cir.ptr %26 = cir.load %2 : cir.ptr , i32 %27 = cir.load %1 : cir.ptr , i32 %28 = cir.binop(and, %26, %27) : i32 @@ -57,8 +59,8 @@ module { // MLIR: = arith.remui // MLIR: = arith.addi // MLIR: = arith.subi -// MLIR: = arith.shrui -// MLIR: = arith.shli +// arith.shrui +// arith.shli // MLIR: = arith.andi // MLIR: = arith.xori // MLIR: = arith.ori @@ -68,8 +70,8 @@ module { // LLVM: = urem i32 // LLVM: = add i32 // LLVM: = sub i32 -// LLVM: = lshr i32 -// LLVM: = shl i32 +// = lshr i32 +// = shl i32 // LLVM: = and i32 // LLVM: = xor i32 // LLVM: = or i32 diff --git a/clang/test/CIR/Lowering/binop-signed-int.cir b/clang/test/CIR/Lowering/binop-signed-int.cir index 8adc8a1e1e3e..157aed2c9505 100644 --- a/clang/test/CIR/Lowering/binop-signed-int.cir +++ b/clang/test/CIR/Lowering/binop-signed-int.cir @@ -36,12 +36,12 @@ module { cir.store %19, %2 : !s32i, cir.ptr %20 = cir.load %2 : cir.ptr , !s32i %21 = cir.load %1 : cir.ptr , !s32i - %22 = cir.binop(shr, %20, %21) : !s32i + %22 = cir.shift(right, %20 : !s32i, %21 : !s32i) -> !s32i // CHECK: = llvm.ashr cir.store %22, %2 : !s32i, cir.ptr %23 = cir.load %2 : cir.ptr , !s32i %24 = cir.load %1 : cir.ptr , !s32i - %25 = cir.binop(shl, %23, %24) : !s32i + %25 = cir.shift(left, %23 : !s32i, %24 : !s32i) -> !s32i // CHECK: = llvm.shl cir.store %25, %2 : !s32i, cir.ptr %26 = cir.load %2 : cir.ptr , !s32i diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir index 7fa76dd4c5b6..5efa81937094 100644 --- a/clang/test/CIR/Lowering/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -31,11 +31,11 @@ module { cir.store %19, %2 : !u32i, cir.ptr %20 = cir.load %2 : cir.ptr , !u32i %21 = cir.load %1 : cir.ptr , !u32i - %22 = cir.binop(shr, %20, %21) : !u32i + %22 = cir.shift(right, %20 : !u32i, %21 : !u32i) -> !u32i cir.store %22, %2 : !u32i, cir.ptr %23 = cir.load %2 : cir.ptr , !u32i %24 = cir.load %1 : cir.ptr , !u32i - %25 = cir.binop(shl, %23, %24) : !u32i + %25 = cir.shift(left, %23 : !u32i, %24 : !u32i) -> !u32i cir.store %25, %2 : !u32i, cir.ptr %26 = cir.load %2 : cir.ptr , !u32i %27 = cir.load %1 : cir.ptr , !u32i From 715e1d6c8da436c282398d438781bdfdf3e77be2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 10 Jul 2023 15:12:18 -0700 Subject: [PATCH 1068/2301] [CIR][CIRGen] Refactor buildShr and buildShl and highlight codegen differences --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 47 +++++++++++++++++++ .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 11 ----- 2 files changed, 47 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index d64a647a05ec..6ab2717437c1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1095,15 +1095,62 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), CGF.PtrDiffTy, Ops.LHS, Ops.RHS); } + mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { + // TODO: This misses out on the sanitizer check below. + if (Ops.isFixedPointOp()) + llvm_unreachable("NYI"); + + // CIR accepts shift between different types, meaning nothing special + // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type: + // promote or truncate the RHS to the same size as the LHS. + + bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && + Ops.Ty->hasSignedIntegerRepresentation() && + !CGF.getLangOpts().isSignedOverflowDefined() && + !CGF.getLangOpts().CPlusPlus20; + bool SanitizeUnsignedBase = + CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) && + Ops.Ty->hasUnsignedIntegerRepresentation(); + bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase; + bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); + + // OpenCL 6.3j: shift values are effectively % word size of LHS. + if (CGF.getLangOpts().OpenCL) + llvm_unreachable("NYI"); + else if ((SanitizeBase || SanitizeExponent) && + Ops.LHS.getType().isa()) { + llvm_unreachable("NYI"); + } + return Builder.create( CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), Ops.LHS, Ops.RHS, CGF.getBuilder().getUnitAttr()); } + mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { + // TODO: This misses out on the sanitizer check below. + if (Ops.isFixedPointOp()) + llvm_unreachable("NYI"); + + // CIR accepts shift between different types, meaning nothing special + // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type: + // promote or truncate the RHS to the same size as the LHS. + + // OpenCL 6.3j: shift values are effectively % word size of LHS. + if (CGF.getLangOpts().OpenCL) + llvm_unreachable("NYI"); + else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && + Ops.LHS.getType().isa()) { + llvm_unreachable("NYI"); + } + + // Note that we don't need to distinguish unsigned treatment at this + // point since it will be handled later by LLVM lowering. return Builder.create( CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), Ops.LHS, Ops.RHS); } + mlir::Value ScalarExprEmitter::buildAnd(const BinOpInfo &Ops) { return Builder.create( CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::And, diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 8e84b9a11530..d00a0442cc3a 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -332,17 +332,6 @@ class CIRBinOpLowering : public mlir::OpRewritePattern { rewriter.replaceOpWithNewOp( op, op.getType(), op.getLhs(), op.getRhs()); break; - // case mlir::cir::BinOpKind::Shl: - // rewriter.replaceOpWithNewOp( - // op, op.getType(), op.getLhs(), op.getRhs()); - // break; - // case mlir::cir::BinOpKind::Shr: - // if (type.isSignlessInteger()) - // rewriter.replaceOpWithNewOp( - // op, op.getType(), op.getLhs(), op.getRhs()); - // else - // llvm_unreachable("integer type not supported in CIR yet"); - // break; } return mlir::LogicalResult::success(); From 7b0b2c4f5785d21346817373d1735949c707d631 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 12 Jul 2023 10:23:33 -0300 Subject: [PATCH 1069/2301] [CIR][CIRGen] Generate zero initializer for pointers and structs ghstack-source-id: 9d59b38339fc8c8d6ddce8b4bdb3f00f450c678b Pull Request resolved: https://github.com/llvm/clangir/pull/162 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 ++++ clang/test/CIR/CodeGen/pointer.cpp | 6 ++++++ clang/test/CIR/CodeGen/struct.c | 6 ++++-- 3 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/pointer.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 674c4c162b66..2f95b16f6e9f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -161,6 +161,10 @@ class CIRGenBuilderTy : public mlir::OpBuilder { values.push_back(zero); return getConstArray(mlir::ArrayAttr::get(getContext(), values), arrTy); } + if (auto ptrTy = ty.dyn_cast()) + return getNullPtrAttr(ptrTy); + if (auto structTy = ty.dyn_cast()) + return getZeroAttr(structTy); llvm_unreachable("Zero initializer for given type is NYI"); } diff --git a/clang/test/CIR/CodeGen/pointer.cpp b/clang/test/CIR/CodeGen/pointer.cpp new file mode 100644 index 000000000000..2ac11cd42e32 --- /dev/null +++ b/clang/test/CIR/CodeGen/pointer.cpp @@ -0,0 +1,6 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Global pointer should be zero initialized by default. +int *ptr; +// CHECK: cir.global external @ptr = #cir.null : !cir.ptr diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 5e25e6ebecc4..cc3107d02849 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -4,7 +4,7 @@ struct Bar { int a; char b; -}; +} bar; struct Foo { int a; @@ -25,4 +25,6 @@ void baz(void) { // CHECK-NEXT: %1 = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK-NEXT: } + +// Check if global structs are zero-initialized. +// CHECK: cir.global external @bar = #cir.zero : !ty_22struct2EBar22 From 9ca11bc7e201a20cf5acdb8b400cd6d64ddfd0cb Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 12 Jul 2023 10:23:34 -0300 Subject: [PATCH 1070/2301] [CIR][Lowering] Lower #cir.zero and #cir.null attributes LLVM's dialect does not have (at the time of writing) a way to represent zero-initializers. And, despite being possible to represent null pointers, it requires it to use a region-based initialization. To avoid this, LLVM operations that use either of these attributes are marked with a cir.zero attribute that will be identified by the LowerAttrToLLVMIR interface and patch the LLVM IR operation to be initialized with a zero-initializer or null. ghstack-source-id: 1872476a03ba9b1a75fe347b50f0552fef2e478b Pull Request resolved: https://github.com/llvm/clangir/pull/163 --- .../DirectToLLVM/LowerAttrToLLVMIR.cpp | 18 +++++++++++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 11 +++++++++++ clang/test/CIR/CodeGen/pointer.cpp | 4 ++++ clang/test/CIR/CodeGen/struct.c | 4 ++++ 4 files changed, 36 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp index 1416e0f91d85..c513ee344d3c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp @@ -17,6 +17,8 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/GlobalVariable.h" using namespace llvm; @@ -36,7 +38,18 @@ class CIRDialectLLVMIRTranslationInterface mlir::Operation *op, llvm::ArrayRef instructions, mlir::NamedAttribute attribute, mlir::LLVM::ModuleTranslation &moduleTranslation) const override { - // TODO: Implement this + // Translate CIR's zero attribute to LLVM's zero initializer. + if (isa(attribute.getValue())) { + if (llvm::isa(op)) { + auto *globalVal = llvm::cast( + moduleTranslation.lookupGlobal(op)); + globalVal->setInitializer( + llvm::Constant::getNullValue(globalVal->getValueType())); + } else + return op->emitError("#cir.zero not supported"); + } + + // Translate CIR's extra function attributes to LLVM's function attributes. auto func = dyn_cast(op); if (!func) return mlir::success(); @@ -56,6 +69,9 @@ class CIRDialectLLVMIRTranslationInterface } } } + + // Drop ammended CIR attribute from LLVM op. + op->removeAttr(attribute.getName()); return mlir::success(); } }; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7dc2a7698638..68d3d0282b22 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -918,6 +918,17 @@ class CIRGlobalOpLowering addressOfOp.getResult(), offset); rewriter.create(op->getLoc(), gepOp.getResult()); + return mlir::success(); + } else if (isa(init.value())) { + // TODO(cir): once LLVM's dialect has a proper zeroinitializer attribute + // this should be updated. For now, we tag the LLVM global with a cir.zero + // attribute that is later replaced with a zeroinitializer. Null pointers + // also use this path for simplicity, as we would otherwise require a + // region-based initialization for the global op. + auto llvmGlobalOp = rewriter.replaceOpWithNewOp( + op, llvmType, isConst, linkage, symbol, nullptr); + auto cirZeroAttr = mlir::cir::ZeroAttr::get(getContext(), llvmType); + llvmGlobalOp->setAttr("cir.initial_value", cirZeroAttr); return mlir::success(); } else { op.emitError() << "usupported initializer '" << init.value() << "'"; diff --git a/clang/test/CIR/CodeGen/pointer.cpp b/clang/test/CIR/CodeGen/pointer.cpp index 2ac11cd42e32..06245f06b32b 100644 --- a/clang/test/CIR/CodeGen/pointer.cpp +++ b/clang/test/CIR/CodeGen/pointer.cpp @@ -1,6 +1,10 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// FIXME(cir): Move the test below to lowering and us a separate tool to lower from CIR to LLVM IR. +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s // Global pointer should be zero initialized by default. int *ptr; // CHECK: cir.global external @ptr = #cir.null : !cir.ptr +// LLVM: @ptr = global ptr null diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index cc3107d02849..a6c3d8736c58 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -1,5 +1,8 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// FIXME(cir): Move the test below to lowering and us a separate tool to lower from CIR to LLVM IR. +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s struct Bar { int a; @@ -28,3 +31,4 @@ void baz(void) { // Check if global structs are zero-initialized. // CHECK: cir.global external @bar = #cir.zero : !ty_22struct2EBar22 +// LLVM: @bar = global %struct.Bar zeroinitializer From d523c9a6238b721dbc6efdcbbae77693e83789ce Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 13 Jul 2023 10:28:50 -0300 Subject: [PATCH 1071/2301] [CIR][LifetimeCheck] Detect aggregate categories on top of allocas Incremental work towards tracking exploded structs, no testcase just yet. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 58 ++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 11a30ba80b77..b56e26521480 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -301,6 +301,14 @@ struct LifetimeCheckPass : public LifetimeCheckBase { owners[o]++; } + // Aggregates and exploded fields. + using ExplodedFieldsTy = llvm::SmallSet; + DenseMap aggregates; + void addAggregate(mlir::Value a, SmallVectorImpl &fields) { + assert(!aggregates.count(a) && "already tracked"); + aggregates[a].insert(fields.begin(), fields.end()); + } + // Useful helpers for debugging void printPset(PSetType &pset, llvm::raw_ostream &OS = llvm::errs()); LLVM_DUMP_METHOD void dumpPmap(PMapType &pmap); @@ -852,6 +860,28 @@ static bool isOwnerType(mlir::Type ty) { return isStructAndHasAttr(ty); } +static bool containsPointerElts(mlir::cir::StructType s) { + auto members = s.getMembers(); + return std::any_of(members.begin(), members.end(), [](mlir::Type t) { + return t.isa(); + }); +} + +static bool isAggregateType(AllocaOp allocaOp) { + auto t = allocaOp.getAllocaType().dyn_cast(); + if (!t) + return false; + // FIXME: For now we handle this in a more naive way: any pointer + // element we find is enough to consider this an aggregate. But in + // reality it should be as defined in 2.1: + // + // An Aggregate is a type that is not an Indirection and is a class type with + // public data members none of which are references (& or &&) and no + // user-provided copy or move operations, and no base class that is not also + // an Aggregate. The elements of an Aggregate are its public data members. + return containsPointerElts(t); +} + static bool isPointerType(AllocaOp allocaOp) { // From 2.1: // @@ -909,6 +939,8 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { return TypeCategory::Pointer; if (isOwnerType(allocaOp.getAllocaType())) return TypeCategory::Owner; + if (isAggregateType(allocaOp)) + return TypeCategory::Aggregate; return TypeCategory::Value; }(); @@ -925,11 +957,35 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { getPmap()[addr].insert(State::getOwnedBy(addr)); currScope->localValues.insert(addr); break; + case TypeCategory::Aggregate: { + // 2.1 - Aggregates are types we will “explode” (consider memberwise) at + // local scopes, because the function can operate on the members directly. + + // Explode all pointer members. + SmallVector fields; + auto members = + allocaOp.getAllocaType().cast().getMembers(); + + unsigned fieldIdx = 0; + std::for_each(members.begin(), members.end(), [&](mlir::Type t) { + auto ptrType = t.dyn_cast(); + if (ptrType) + fields.push_back(fieldIdx); + fieldIdx++; + }); + addAggregate(addr, fields); + + // Differently from `TypeCategory::Pointer`, initialization for exploded + // pointer is done lazily, triggered whenever the relevant + // `cir.struct_element_addr` are seen. This also serves optimization + // purposes: only track fields that are actually seen. + break; + } case TypeCategory::Value: { // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. getPmap()[addr].insert(State::getLocalValue(addr)); currScope->localValues.insert(addr); - return; + break; } default: llvm_unreachable("NYI"); From 6557a6cbec3ca1bb7706e37ab51d777101cb9f8c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 13 Jul 2023 13:49:00 -0300 Subject: [PATCH 1072/2301] [CIR][LifetimeCheck][NFC] Factor out type category classification --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 35 +++++++++++-------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index b56e26521480..356826c7f3e1 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -53,6 +53,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkAwait(AwaitOp awaitOp); void checkReturn(ReturnOp retOp); + void classifyTypeCategories(mlir::Value addr, mlir::Type t, + mlir::Location loc); + // FIXME: classify tasks and lambdas prior to check ptr deref // and pass down an enum. void checkPointerDeref(mlir::Value addr, mlir::Location loc, @@ -867,8 +870,8 @@ static bool containsPointerElts(mlir::cir::StructType s) { }); } -static bool isAggregateType(AllocaOp allocaOp) { - auto t = allocaOp.getAllocaType().dyn_cast(); +static bool isAggregateType(mlir::Type agg) { + auto t = agg.dyn_cast(); if (!t) return false; // FIXME: For now we handle this in a more naive way: any pointer @@ -882,7 +885,7 @@ static bool isAggregateType(AllocaOp allocaOp) { return containsPointerElts(t); } -static bool isPointerType(AllocaOp allocaOp) { +static bool isPointerType(mlir::Type t) { // From 2.1: // // A Pointer is not an Owner and provides indirect access to an object it does @@ -914,14 +917,14 @@ static bool isPointerType(AllocaOp allocaOp) { // library headers, the following well- known standard types are treated as-if // annotated as Pointers, in addition to raw pointers and references: ref- // erence_wrapper, and vector::reference. - if (allocaOp.isPointerType()) + if (t.isa()) return true; - return isStructAndHasAttr(allocaOp.getAllocaType()); + return isStructAndHasAttr(t); } -void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { - auto addr = allocaOp.getAddr(); - assert(!getPmap().count(addr) && "only one alloca for any given address"); +void LifetimeCheckPass::classifyTypeCategories(mlir::Value addr, mlir::Type t, + mlir::Location loc) { + assert(!getPmap().count(addr) && "only one map entry for a given address"); getPmap()[addr] = {}; enum TypeCategory { @@ -935,11 +938,11 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { }; auto localStyle = [&]() { - if (isPointerType(allocaOp)) + if (isPointerType(t)) return TypeCategory::Pointer; - if (isOwnerType(allocaOp.getAllocaType())) + if (isOwnerType(t)) return TypeCategory::Owner; - if (isAggregateType(allocaOp)) + if (isAggregateType(t)) return TypeCategory::Aggregate; return TypeCategory::Value; }(); @@ -949,7 +952,7 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { // 2.4.2 - When a non-parameter non-member Pointer p is declared, add // (p, {invalid}) to pmap. ptrs.insert(addr); - markPsetInvalid(addr, InvalidStyle::NotInitialized, allocaOp.getLoc()); + markPsetInvalid(addr, InvalidStyle::NotInitialized, loc); break; case TypeCategory::Owner: // 2.4.2 - When a local Owner x is declared, add (x, {x__1'}) to pmap. @@ -963,8 +966,7 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { // Explode all pointer members. SmallVector fields; - auto members = - allocaOp.getAllocaType().cast().getMembers(); + auto members = t.cast().getMembers(); unsigned fieldIdx = 0; std::for_each(members.begin(), members.end(), [&](mlir::Type t) { @@ -992,6 +994,11 @@ void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { } } +void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { + classifyTypeCategories(allocaOp.getAddr(), allocaOp.getAllocaType(), + allocaOp.getLoc()); +} + void LifetimeCheckPass::checkCoroTaskStore(StoreOp storeOp) { // Given: // auto task = [init task]; From 9e20c2dc0216d7a37484efd1da53a1b224017e36 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Fri, 14 Jul 2023 09:28:17 -0700 Subject: [PATCH 1073/2301] [CIR] Introduce cir-translate to replace mlir-translate (#177) This change introduces `cir-translate` as a replacement of `mlir-translate` to convert CIR directly to LLVM IR. The main benefit of this is to utilize the CIR attribute interface to handle CIR-specific attributes such as `cir.extra`. Other advantages at this time, besides the cir attribute support, could be that we can go directly from CIR to LLVMIR without exposing the intermediate MLIR LLVM dialect form. Previously `cir-tool` emit the LLVM dialect form and `milr-translate` took it from there. Now `cir-translate` can directly take CIR and yield LLVMIR. I'm also renaming `cir-tool` to `cir-opt` which eventually would be just a CIR-to-CIR transformer, but for now I'm keeping the functionality of CIR to LLVM dialect. So, `cir-opt` will do all CIR-to-CIR transforms, just like LLVM `opt` or `mlir-opt`, and `cir-translate` will handle CIR to LLVMIR translation, and LLVMIR-to-LLVMIR transforms, like the LLVM `llc` or the `mlir-translate` --- clang/include/clang/CIR/LowerToLLVM.h | 1 - clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 3 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 +- clang/test/CIR/IR/array.cir | 2 +- clang/test/CIR/IR/branch.cir | 2 +- clang/test/CIR/IR/call.cir | 4 +- clang/test/CIR/IR/cast.cir | 2 +- clang/test/CIR/IR/cir-ops.cir | 2 +- clang/test/CIR/IR/func.cir | 4 +- clang/test/CIR/IR/global.cir | 2 +- clang/test/CIR/IR/inlineAttr.cir | 4 +- clang/test/CIR/IR/int.cir | 2 +- clang/test/CIR/IR/invalid.cir | 2 +- clang/test/CIR/IR/loop.cir | 4 +- clang/test/CIR/IR/module.cir | 2 +- clang/test/CIR/IR/ptr_stride.cir | 2 +- clang/test/CIR/IR/struct.cir | 2 +- clang/test/CIR/IR/switch.cir | 2 +- clang/test/CIR/IR/ternary.cir | 2 +- clang/test/CIR/IR/types.cir | 2 +- clang/test/CIR/Lowering/ThroughMLIR/array.cir | 4 +- .../CIR/Lowering/ThroughMLIR/binop-fp.cir | 4 +- .../ThroughMLIR/binop-unsigned-int.cir | 4 +- clang/test/CIR/Lowering/ThroughMLIR/bool.cir | 4 +- clang/test/CIR/Lowering/ThroughMLIR/cmp.cir | 4 +- clang/test/CIR/Lowering/ThroughMLIR/dot.cir | 2 +- clang/test/CIR/Lowering/ThroughMLIR/goto.cir | 4 +- .../test/CIR/Lowering/ThroughMLIR/memref.cir | 4 +- .../Lowering/ThroughMLIR/unary-inc-dec.cir | 4 +- .../Lowering/ThroughMLIR/unary-plus-minus.cir | 4 +- clang/test/CIR/Lowering/array.cir | 4 +- clang/test/CIR/Lowering/binop-fp.cir | 4 +- clang/test/CIR/Lowering/binop-signed-int.cir | 3 +- .../test/CIR/Lowering/binop-unsigned-int.cir | 4 +- clang/test/CIR/Lowering/bool.cir | 4 +- clang/test/CIR/Lowering/branch.cir | 6 +- clang/test/CIR/Lowering/call.cir | 4 +- clang/test/CIR/Lowering/cast.cir | 6 +- clang/test/CIR/Lowering/cmp.cir | 4 +- clang/test/CIR/Lowering/dot.cir | 2 +- clang/test/CIR/Lowering/func.cir | 2 +- clang/test/CIR/Lowering/globals.cir | 4 +- clang/test/CIR/Lowering/goto.cir | 4 +- clang/test/CIR/Lowering/hello.cir | 2 +- clang/test/CIR/Lowering/if.cir | 6 +- clang/test/CIR/Lowering/loadstorealloca.cir | 4 +- clang/test/CIR/Lowering/loop.cir | 2 +- clang/test/CIR/Lowering/ptrstride.cir | 6 +- clang/test/CIR/Lowering/scope.cir | 6 +- clang/test/CIR/Lowering/struct.cir | 2 +- clang/test/CIR/Lowering/tenary.cir | 2 +- clang/test/CIR/Lowering/unary-inc-dec.cir | 4 +- clang/test/CIR/Lowering/unary-not.cir | 4 +- clang/test/CIR/Lowering/unary-plus-minus.cir | 4 +- clang/test/CIR/Lowering/variadics.cir | 2 +- clang/test/CIR/Transforms/merge-cleanups.cir | 2 +- clang/test/CIR/cirtool.cir | 2 +- clang/test/CMakeLists.txt | 3 +- clang/test/lit.cfg.py | 3 +- clang/tools/CMakeLists.txt | 3 +- .../{cir-tool => cir-opt}/CMakeLists.txt | 8 +-- .../cir-tool.cpp => cir-opt/cir-opt.cpp} | 2 +- clang/tools/cir-translate/CMakeLists.txt | 36 ++++++++++++ clang/tools/cir-translate/cir-translate.cpp | 56 +++++++++++++++++++ llvm/CMakeLists.txt | 2 +- 65 files changed, 196 insertions(+), 104 deletions(-) rename clang/tools/{cir-tool => cir-opt}/CMakeLists.txt (82%) rename clang/tools/{cir-tool/cir-tool.cpp => cir-opt/cir-opt.cpp} (96%) create mode 100644 clang/tools/cir-translate/CMakeLists.txt create mode 100644 clang/tools/cir-translate/cir-translate.cpp diff --git a/clang/include/clang/CIR/LowerToLLVM.h b/clang/include/clang/CIR/LowerToLLVM.h index 9494b37fd75b..e3984bd2ce93 100644 --- a/clang/include/clang/CIR/LowerToLLVM.h +++ b/clang/include/clang/CIR/LowerToLLVM.h @@ -31,7 +31,6 @@ namespace cir { namespace direct { std::unique_ptr lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, - std::unique_ptr mlirCtx, llvm::LLVMContext &llvmCtx); } diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 5c6bb7f4887f..b248b3fb8deb 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -76,8 +76,7 @@ static std::unique_ptr lowerFromCIRToLLVMIR( const clang::FrontendOptions &feOptions, mlir::ModuleOp mlirMod, std::unique_ptr mlirCtx, llvm::LLVMContext &llvmCtx) { if (feOptions.ClangIRDirectLowering) - return direct::lowerDirectlyFromCIRToLLVMIR(mlirMod, std::move(mlirCtx), - llvmCtx); + return direct::lowerDirectlyFromCIRToLLVMIR(mlirMod, llvmCtx); else return lowerFromCIRToMLIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 68d3d0282b22..0e9ed0aed29c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1416,9 +1416,9 @@ extern void registerCIRDialectTranslation(mlir::MLIRContext &context); std::unique_ptr lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, - std::unique_ptr mlirCtx, LLVMContext &llvmCtx) { - mlir::PassManager pm(mlirCtx.get()); + mlir::MLIRContext *mlirCtx = theModule.getContext(); + mlir::PassManager pm(mlirCtx); pm.addPass(createConvertCIRToLLVMPass()); diff --git a/clang/test/CIR/IR/array.cir b/clang/test/CIR/IR/array.cir index 4390d3dabfcd..6653cdbfbe2e 100644 --- a/clang/test/CIR/IR/array.cir +++ b/clang/test/CIR/IR/array.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | cir-tool | FileCheck %s +// RUN: cir-opt %s | cir-opt | FileCheck %s !u32i = !cir.int diff --git a/clang/test/CIR/IR/branch.cir b/clang/test/CIR/IR/branch.cir index 57977b1389ff..6f75d9e25bd3 100644 --- a/clang/test/CIR/IR/branch.cir +++ b/clang/test/CIR/IR/branch.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | FileCheck %s +// RUN: cir-opt %s | FileCheck %s #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool diff --git a/clang/test/CIR/IR/call.cir b/clang/test/CIR/IR/call.cir index 857614f33a61..2ed1fa062868 100644 --- a/clang/test/CIR/IR/call.cir +++ b/clang/test/CIR/IR/call.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | FileCheck %s +// RUN: cir-opt %s | FileCheck %s !s32i = !cir.int !fnptr = !cir.ptr)>> @@ -10,4 +10,4 @@ module { } } -// CHECK: %0 = cir.call %arg0(%arg1) : (!cir.ptr)>>, !s32i) -> !s32i \ No newline at end of file +// CHECK: %0 = cir.call %arg0(%arg1) : (!cir.ptr)>>, !s32i) -> !s32i diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index a740ec4c503f..e8b5989fd8ad 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | cir-tool | FileCheck %s +// RUN: cir-opt %s | cir-opt | FileCheck %s !s32i = !cir.int module { diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index e073d007ba69..97d58223b1db 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -1,6 +1,6 @@ // Test the CIR operations can parse and print correctly (roundtrip) -// RUN: cir-tool %s | cir-tool | FileCheck %s +// RUN: cir-opt %s | cir-opt | FileCheck %s !s32i = !cir.int !s8i = !cir.int !u64i = !cir.int diff --git a/clang/test/CIR/IR/func.cir b/clang/test/CIR/IR/func.cir index 2ab6d54081ff..01f6b54877c8 100644 --- a/clang/test/CIR/IR/func.cir +++ b/clang/test/CIR/IR/func.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | FileCheck %s +// RUN: cir-opt %s | FileCheck %s !s32i = !cir.int !u8i = !cir.int module { @@ -42,4 +42,4 @@ module { cir.func no_proto private @no_proto(...) -> !s32i } -// CHECK: cir.func @l0() \ No newline at end of file +// CHECK: cir.func @l0() diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 347b340a5ea0..b5b9af61174a 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | FileCheck %s +// RUN: cir-opt %s | FileCheck %s !s8i = !cir.int !s32i = !cir.int !s64i = !cir.int diff --git a/clang/test/CIR/IR/inlineAttr.cir b/clang/test/CIR/IR/inlineAttr.cir index 2b2542b0cd45..54275afae6db 100644 --- a/clang/test/CIR/IR/inlineAttr.cir +++ b/clang/test/CIR/IR/inlineAttr.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s | FileCheck %s -check-prefix=CIR -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s | FileCheck %s -check-prefix=CIR +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR module { cir.func @l0() extra( {cir.inline = #cir.inline} ) { diff --git a/clang/test/CIR/IR/int.cir b/clang/test/CIR/IR/int.cir index 79d28427f922..233198e4e335 100644 --- a/clang/test/CIR/IR/int.cir +++ b/clang/test/CIR/IR/int.cir @@ -2,7 +2,7 @@ // cir.global external @a = #cir.int<255> : !cir.int // } -// RUN: cir-tool %s | FileCheck %s +// RUN: cir-opt %s | FileCheck %s !s8i = !cir.int !s16i = !cir.int !s32i = !cir.int diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 71ca31355121..17d3afcfe0e9 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1,5 +1,5 @@ // Test attempts to build bogus CIR -// RUN: cir-tool %s -verify-diagnostics -split-input-file +// RUN: cir-opt %s -verify-diagnostics -split-input-file !u32i = !cir.int diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 19e97170fff6..ac9658a304d3 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | FileCheck %s +// RUN: cir-opt %s | FileCheck %s #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool !u32i = !cir.int @@ -212,4 +212,4 @@ cir.func @l2() { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return -// CHECK-NEXT: } \ No newline at end of file +// CHECK-NEXT: } diff --git a/clang/test/CIR/IR/module.cir b/clang/test/CIR/IR/module.cir index c2fc99332670..7ce2c0ba21cb 100644 --- a/clang/test/CIR/IR/module.cir +++ b/clang/test/CIR/IR/module.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -split-input-file -o %t.cir +// RUN: cir-opt %s -split-input-file -o %t.cir // RUN: FileCheck --input-file=%t.cir %s // Should parse and print C source language attribute. diff --git a/clang/test/CIR/IR/ptr_stride.cir b/clang/test/CIR/IR/ptr_stride.cir index 738983f15633..826ed571c3cb 100644 --- a/clang/test/CIR/IR/ptr_stride.cir +++ b/clang/test/CIR/IR/ptr_stride.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | cir-tool | FileCheck %s +// RUN: cir-opt %s | cir-opt | FileCheck %s !s32i = !cir.int module { diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index 72740242ab8b..ae6a8169e4c7 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | cir-tool | FileCheck %s +// RUN: cir-opt %s | cir-opt | FileCheck %s !u8i = !cir.int !u16i = !cir.int diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index 56edfbbd9b60..dfc4d72409d3 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | FileCheck %s +// RUN: cir-opt %s | FileCheck %s !s32i = !cir.int cir.func @s0() { diff --git a/clang/test/CIR/IR/ternary.cir b/clang/test/CIR/IR/ternary.cir index 77939474e04b..127d8ed8f2dc 100644 --- a/clang/test/CIR/IR/ternary.cir +++ b/clang/test/CIR/IR/ternary.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | cir-tool | FileCheck %s +// RUN: cir-opt %s | cir-opt | FileCheck %s !u32i = !cir.int module { diff --git a/clang/test/CIR/IR/types.cir b/clang/test/CIR/IR/types.cir index 4390d3dabfcd..6653cdbfbe2e 100644 --- a/clang/test/CIR/IR/types.cir +++ b/clang/test/CIR/IR/types.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s | cir-tool | FileCheck %s +// RUN: cir-opt %s | cir-opt | FileCheck %s !u32i = !cir.int diff --git a/clang/test/CIR/Lowering/ThroughMLIR/array.cir b/clang/test/CIR/Lowering/ThroughMLIR/array.cir index dfbf6846d77c..fc69ff680f4e 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/array.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/array.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-mlir -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir index a1e3b5f5d183..f6dfda5fa435 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir index 0fbcfc8f2e81..138ada1dd42e 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir index 954619cb5367..2163f063d9e9 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir index bda86d3d9047..5a8816a1ef99 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir index 291487fab4c3..dc6b11636059 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-mlir -o %t.mlir +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s // XFAIL: * diff --git a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir index d7e5c432a333..4f1b9cccb312 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -canonicalize -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -canonicalize -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -canonicalize -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -canonicalize -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir index cacc0f50f528..e957d3ef16cd 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() -> i32 { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir index 5c195a69c57e..57541c194206 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir index 01b3f9c04236..09f16f4d342f 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/array.cir b/clang/test/CIR/Lowering/array.cir index 7ab8a561be63..3a7a9b3f8dfa 100644 --- a/clang/test/CIR/Lowering/array.cir +++ b/clang/test/CIR/Lowering/array.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/binop-fp.cir b/clang/test/CIR/Lowering/binop-fp.cir index 70875a71dbf7..33a9c6f2a20b 100644 --- a/clang/test/CIR/Lowering/binop-fp.cir +++ b/clang/test/CIR/Lowering/binop-fp.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/binop-signed-int.cir b/clang/test/CIR/Lowering/binop-signed-int.cir index 157aed2c9505..855cd8cfbe92 100644 --- a/clang/test/CIR/Lowering/binop-signed-int.cir +++ b/clang/test/CIR/Lowering/binop-signed-int.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s !s32i = !cir.int @@ -62,4 +62,3 @@ module { cir.return } } - diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir index 5efa81937094..29076c52f51f 100644 --- a/clang/test/CIR/Lowering/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !u32i = !cir.int module { diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index 2cfcf6a94f3e..834a148460ee 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool diff --git a/clang/test/CIR/Lowering/branch.cir b/clang/test/CIR/Lowering/branch.cir index 85581507eb2c..90e143913d50 100644 --- a/clang/test/CIR/Lowering/branch.cir +++ b/clang/test/CIR/Lowering/branch.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !s32i = !cir.int cir.func @foo(%arg0: !cir.bool) -> !s32i { @@ -25,7 +25,7 @@ cir.func @foo(%arg0: !cir.bool) -> !s32i { // MLIR-NEXT: } // MLIR-NEXT: } -// LLVM: define i32 @foo(i8 %0) { +// LLVM: define i32 @foo(i8 %0) // LLVM-NEXT: %2 = trunc i8 %0 to i1 // LLVM-NEXT: br i1 %2, label %3, label %4 // LLVM-EMPTY: diff --git a/clang/test/CIR/Lowering/call.cir b/clang/test/CIR/Lowering/call.cir index 62089cf3dbb5..2c40bb88e523 100644 --- a/clang/test/CIR/Lowering/call.cir +++ b/clang/test/CIR/Lowering/call.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @a() { diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 3f7baa34969a..8ec26b9b2557 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !s16i = !cir.int !s32i = !cir.int !s64i = !cir.int @@ -22,7 +22,7 @@ module { // MLIR-NEXT: } -// LLVM: define i32 @foo(i32 %0) { +// LLVM: define i32 @foo(i32 %0) // LLVM-NEXT: %2 = icmp ne i32 %0, 0 // LLVM-NEXT: %3 = zext i1 %2 to i8 // LLVM-NEXT: ret i32 %0 diff --git a/clang/test/CIR/Lowering/cmp.cir b/clang/test/CIR/Lowering/cmp.cir index 2dbe7bbd3b55..a1da2d8e26a0 100644 --- a/clang/test/CIR/Lowering/cmp.cir +++ b/clang/test/CIR/Lowering/cmp.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index a709dd91e10f..8b3b553492b1 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/func.cir b/clang/test/CIR/Lowering/func.cir index b524729ff697..41cf5c3afdd8 100644 --- a/clang/test/CIR/Lowering/func.cir +++ b/clang/test/CIR/Lowering/func.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck %s -check-prefix=MLIR --input-file=%t.mlir // XFAIL: * diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 27a833f34b6d..325ef58bf4f1 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !s16i = !cir.int !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index 2f6b73b7facf..6dc2191c916e 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -canonicalize -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -canonicalize -o - | cir-translate -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !u32i = !cir.int diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index 5dc521469721..65674ff96cf7 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s // XFAIL: * diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir index 820a0bdebd20..a6dfd8e65900 100644 --- a/clang/test/CIR/Lowering/if.cir +++ b/clang/test/CIR/Lowering/if.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { @@ -31,7 +31,7 @@ module { // MLIR-NEXT: llvm.return %arg0 : i32 // MLIR-NEXT: } -// LLVM: define i32 @foo(i32 %0) { +// LLVM: define i32 @foo(i32 %0) // LLVM-NEXT: %2 = icmp ne i32 %0, 0 // LLVM-NEXT: %3 = zext i1 %2 to i8 // LLVM-NEXT: %4 = trunc i8 %3 to i1 diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index 81bc7bd79668..a70d66daef59 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !u32i = !cir.int module { diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index b48e84003057..f513185ac0ca 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index 107dddcb41e7..6e1646835002 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @f(%arg0: !cir.ptr) { @@ -26,7 +26,7 @@ module { // MLIR-NEXT: } // MLIR-NEXT: } -// LLVM: define void @f(ptr %0) { +// LLVM: define void @f(ptr %0) // LLVM-NEXT: %2 = alloca ptr, i64 1, align 8 // LLVM-NEXT: store ptr %0, ptr %2, align 8 // LLVM-NEXT: %3 = load ptr, ptr %2, align 8 diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index c4eb9a328631..e384d308281c 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !u32i = !cir.int module { @@ -24,7 +24,7 @@ module { // MLIR-NEXT: llvm.return -// LLVM: define void @foo() { +// LLVM: define void @foo() // LLVM-NEXT: br label %1 // LLVM-EMPTY: // LLVM-NEXT: 1: diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index df30ab27b933..c68e0c91bb97 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/tenary.cir b/clang/test/CIR/Lowering/tenary.cir index fb97bc4d6502..40774b0a84fd 100644 --- a/clang/test/CIR/Lowering/tenary.cir +++ b/clang/test/CIR/Lowering/tenary.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-llvm -reconcile-unrealized-casts -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o - | FileCheck %s -check-prefix=MLIR !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir index b879470745ac..2b4a001dfc7c 100644 --- a/clang/test/CIR/Lowering/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index e1fc2c9ec745..58f3357c9df2 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @foo() -> !s32i { diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index 660b0fc6adb6..791d017da102 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -1,5 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-tool %s -cir-to-llvm -o - | mlir-translate -mlir-to-llvmir -allow-unregistered-dialect | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/variadics.cir b/clang/test/CIR/Lowering/variadics.cir index db687ba228ca..f95ed7638795 100644 --- a/clang/test/CIR/Lowering/variadics.cir +++ b/clang/test/CIR/Lowering/variadics.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-llvm -o %t.cir +// RUN: cir-opt %s -cir-to-llvm -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=MLIR !s32i = !cir.int diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 0752215499f3..89907d59637a 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-merge-cleanups -o %t.out.cir +// RUN: cir-opt %s -cir-merge-cleanups -o %t.out.cir // RUN: FileCheck --input-file=%t.out.cir %s #false = #cir.bool : !cir.bool diff --git a/clang/test/CIR/cirtool.cir b/clang/test/CIR/cirtool.cir index 986e9dddd24e..8351d5be3165 100644 --- a/clang/test/CIR/cirtool.cir +++ b/clang/test/CIR/cirtool.cir @@ -1,4 +1,4 @@ -// RUN: cir-tool %s -cir-to-mlir -cir-mlir-to-llvm -o %t.mlir +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: mlir-translate -mlir-to-llvmir %t.mlir -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt index 3bacf428e27c..ee9646d4568d 100644 --- a/clang/test/CMakeLists.txt +++ b/clang/test/CMakeLists.txt @@ -87,7 +87,8 @@ list(APPEND CLANG_TEST_DEPS if(CLANG_ENABLE_CIR) list(APPEND CLANG_TEST_DEPS - cir-tool + cir-opt + cir-translate ) endif() diff --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py index ff3b88b941b0..ade1f9ef453b 100644 --- a/clang/test/lit.cfg.py +++ b/clang/test/lit.cfg.py @@ -86,7 +86,8 @@ tools = [ "apinotes-test", "c-index-test", - "cir-tool", + "cir-opt", + "cir-translate", "clang-diff", "clang-format", "clang-repl", diff --git a/clang/tools/CMakeLists.txt b/clang/tools/CMakeLists.txt index 8f6ed6041e3a..d2677e369b6a 100644 --- a/clang/tools/CMakeLists.txt +++ b/clang/tools/CMakeLists.txt @@ -4,7 +4,8 @@ add_clang_subdirectory(diagtool) add_clang_subdirectory(driver) add_clang_subdirectory(apinotes-test) if(CLANG_ENABLE_CIR) - add_clang_subdirectory(cir-tool) + add_clang_subdirectory(cir-opt) + add_clang_subdirectory(cir-translate) add_clang_subdirectory(cir-lsp-server) endif() add_clang_subdirectory(clang-diff) diff --git a/clang/tools/cir-tool/CMakeLists.txt b/clang/tools/cir-opt/CMakeLists.txt similarity index 82% rename from clang/tools/cir-tool/CMakeLists.txt rename to clang/tools/cir-opt/CMakeLists.txt index db22c216c173..741cdfa5950d 100644 --- a/clang/tools/cir-tool/CMakeLists.txt +++ b/clang/tools/cir-opt/CMakeLists.txt @@ -24,12 +24,12 @@ set(LIBS MLIRTransformUtils ) -add_clang_tool(cir-tool - cir-tool.cpp +add_clang_tool(cir-opt + cir-opt.cpp DEPENDS ${LIBS} ) -target_link_libraries(cir-tool PRIVATE ${LIBS}) -llvm_update_compile_flags(cir-tool) +target_link_libraries(cir-opt PRIVATE ${LIBS}) +llvm_update_compile_flags(cir-opt) diff --git a/clang/tools/cir-tool/cir-tool.cpp b/clang/tools/cir-opt/cir-opt.cpp similarity index 96% rename from clang/tools/cir-tool/cir-tool.cpp rename to clang/tools/cir-opt/cir-opt.cpp index 0b3d5354b34f..67de6a1c99be 100644 --- a/clang/tools/cir-tool/cir-tool.cpp +++ b/clang/tools/cir-opt/cir-opt.cpp @@ -1,4 +1,4 @@ -//===- cir-tool.cpp - CIR optimizationa and analysis driver -----*- C++ -*-===// +//===- cir-opt.cpp - CIR optimization and analysis driver -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/clang/tools/cir-translate/CMakeLists.txt b/clang/tools/cir-translate/CMakeLists.txt new file mode 100644 index 000000000000..a5e22b02e505 --- /dev/null +++ b/clang/tools/cir-translate/CMakeLists.txt @@ -0,0 +1,36 @@ +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) +get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS) +get_property(translation_libs GLOBAL PROPERTY MLIR_TRANSLATION_LIBS) + +include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) +include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) + +set(LIBS + ${dialect_libs} + ${conversion_libs} + ${translation_libs} + clangCIR + clangCIRLoweringDirectToLLVM + MLIRAnalysis + MLIRCIR + MLIRCIRTransforms + MLIRDialect + MLIRIR + MLIROptLib + MLIRParser + MLIRPass + MLIRTransforms + MLIRTransformUtils + MLIRTranslateLib + MLIRSupport +) + +add_clang_tool(cir-translate + cir-translate.cpp + + DEPENDS + ${LIBS} +) + +target_link_libraries(cir-translate PRIVATE ${LIBS}) +llvm_update_compile_flags(cir-translate) diff --git a/clang/tools/cir-translate/cir-translate.cpp b/clang/tools/cir-translate/cir-translate.cpp new file mode 100644 index 000000000000..743f612194f5 --- /dev/null +++ b/clang/tools/cir-translate/cir-translate.cpp @@ -0,0 +1,56 @@ +//===- cir-translate.cpp - CIR Translate Driver ------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Converts CIR directly to LLVM IR, similar to mlir-translate or LLVM llc. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/DLTI/DLTI.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/InitAllTranslations.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Target/LLVMIR/Dialect/All.h" +#include "mlir/Tools/mlir-translate/MlirTranslateMain.h" +#include "mlir/Tools/mlir-translate/Translation.h" +#include "llvm/IR/Module.h" + +namespace cir { +namespace direct { +extern void registerCIRDialectTranslation(mlir::DialectRegistry ®istry); +extern std::unique_ptr +lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, + llvm::LLVMContext &llvmCtx); +} // namespace direct +} + +void registerToLLVMTranslation() { + mlir::TranslateFromMLIRRegistration registration( + "cir-to-llvmir", "Translate CIR to LLVMIR", + [](mlir::Operation *op, mlir::raw_ostream &output) { + llvm::LLVMContext llvmContext; + auto llvmModule = cir::direct::lowerDirectlyFromCIRToLLVMIR( + llvm::dyn_cast(op), llvmContext); + if (!llvmModule) + return mlir::failure(); + llvmModule->print(output, nullptr); + return mlir::success(); + }, + [](mlir::DialectRegistry ®istry) { + registry.insert(); + mlir::registerAllToLLVMIRTranslations(registry); + cir::direct::registerCIRDialectTranslation(registry); + }); +} + +int main(int argc, char **argv) { + registerToLLVMTranslation(); + return failed( + mlir::mlirTranslateMain(argc, argv, "CIR Translation Tool")); +} diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt index 0e40b398fcfb..bbd9a43ce8bd 100644 --- a/llvm/CMakeLists.txt +++ b/llvm/CMakeLists.txt @@ -237,7 +237,7 @@ if (LLVM_ENABLE_PROJECTS_USED OR NOT LLVM_ENABLE_PROJECTS STREQUAL "") string(REGEX REPLACE "-" "_" upper_proj ${upper_proj}) if ("${proj}" IN_LIST LLVM_ENABLE_PROJECTS) message(STATUS "${proj} project is enabled") - # ClangIR is integrated inside clang and also provides the cir-tool, + # ClangIR is integrated inside clang and also provides the cir-opt, # it needs some special handling. if ("${proj}" STREQUAL "cir") set(CLANG_ENABLE_CIR ON) From d4508da7e0b5b7c3b1c449000b76f9755f75df9e Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Fri, 14 Jul 2023 09:42:51 -0700 Subject: [PATCH 1074/2301] [CIR][CIRGen] Support weakref function calls. (#179) --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 7 ++++++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 28 ++++++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 +++ clang/test/CIR/CodeGen/weak.c | 20 ++++++++++++++++++ 4 files changed, 55 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/weak.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 846e1b8fddda..b8b519f334f6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -29,6 +29,7 @@ #include "llvm/ADT/StringExtras.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Operation.h" #include "mlir/IR/Value.h" using namespace cir; @@ -38,7 +39,11 @@ using namespace mlir::cir; static mlir::cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); - assert(!FD->hasAttr() && "NYI"); + + if (FD->hasAttr()) { + mlir::Operation* aliasee = CGM.getWeakRefReference(FD); + return dyn_cast(aliasee); + } auto V = CGM.GetAddrOfFunction(GD); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index a39c4b99498b..590561c96cec 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -52,6 +52,7 @@ #include "clang/Basic/SourceLocation.h" #include "clang/CIR/CIRGenerator.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/LowerToLLVM.h" #include "clang/Frontend/FrontendDiagnostic.h" @@ -335,8 +336,6 @@ bool CIRGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { void CIRGenModule::buildGlobal(GlobalDecl GD) { const auto *Global = cast(GD.getDecl()); - assert(!Global->hasAttr() && "NYI"); - assert(!Global->hasAttr() && "NYI"); assert(!Global->hasAttr() && "NYI"); assert(!Global->hasAttr() && "NYI"); assert(!langOpts.CUDA && "NYI"); @@ -694,6 +693,31 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty, ptrTy, g.getSymName()); } +mlir::Operation* CIRGenModule::getWeakRefReference(const ValueDecl *VD) { + const AliasAttr *AA = VD->getAttr(); + assert(AA && "No alias?"); + + // See if there is already something with the target's name in the module. + mlir::Operation *Entry = getGlobalValue(AA->getAliasee()); + if (Entry) { + assert((isa(Entry) || isa(Entry)) && + "weak ref should be against a global variable or function"); + return Entry; + } + + mlir::Type DeclTy = getTypes().convertTypeForMem(VD->getType()); + if (DeclTy.isa()) { + auto F = GetOrCreateCIRFunction(AA->getAliasee(), DeclTy, + GlobalDecl(cast(VD)), + /*ForVtable=*/false); + F.setLinkage(mlir::cir::GlobalLinkageKind::ExternalWeakLinkage); + WeakRefReferences.insert(F); + return F; + } + + llvm_unreachable("GlobalOp NYI"); +} + /// TODO(cir): looks like part of this code can be part of a common AST /// helper betweem CIR and LLVM codegen. template diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 097e13ce5cb7..1afa4c117d56 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -217,6 +217,9 @@ class CIRGenModule : public CIRGenTypeCache { getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty = {}, ForDefinition_t IsForDefinition = NotForDefinition); + /// Get a reference to the target of VD. + mlir::Operation* getWeakRefReference(const ValueDecl *VD); + CharUnits computeNonVirtualBaseClassOffset(const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start, diff --git a/clang/test/CIR/CodeGen/weak.c b/clang/test/CIR/CodeGen/weak.c new file mode 100644 index 000000000000..02adfeb53de2 --- /dev/null +++ b/clang/test/CIR/CodeGen/weak.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +extern void B (void); +static __typeof(B) A __attribute__ ((__weakref__("B"))); + +void active (void) +{ + A(); +} + +// CIR: cir.func extern_weak private @B() +// CIR: cir.func @active() +// CIR-NEXT: cir.call @B() : () -> () + +// LLVM: declare !dbg !{{.}} extern_weak void @B() +// LLVM: define void @active() +// LLVM-NEXT: call void @B() From f987e085d68607ece3692b0ebb06ab781defb50c Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 14 Jul 2023 18:00:22 -0300 Subject: [PATCH 1075/2301] [CIR][Bugfix] Omit case value type on switch printer The switch op parser expects a simple integer value in the case clause, but the printer was printing the full CIR integer attribute. This patch fixes the printer to print only the integer value. ghstack-source-id: 4b79f8796e15f3d568fa26dca03d82e623f82fbe Pull Request resolved: https://github.com/llvm/clangir/pull/168 --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 9 +++++++-- clang/test/CIR/CodeGen/switch.cpp | 20 ++++++++++---------- clang/test/CIR/IR/switch.cir | 6 +++--- clang/test/CIR/Transforms/merge-cleanups.cir | 6 +++--- 4 files changed, 23 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7044fca4af4d..cbafa9a1ccd1 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -981,13 +981,18 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, switch (kind) { case cir::CaseOpKind::Equal: { p << ", "; - p.printStrippedAttrOrType(attr.getValue()[0]); + auto intAttr = attr.getValue()[0].cast(); + auto intAttrTy = intAttr.getType().cast(); + (intAttrTy.isSigned() ? p << intAttr.getSInt() : p << intAttr.getUInt()); break; } case cir::CaseOpKind::Anyof: { p << ", ["; llvm::interleaveComma(attr.getValue(), p, [&](const Attribute &a) { - p.printAttributeWithoutType(a); + auto intAttr = a.cast(); + auto intAttrTy = intAttr.getType().cast(); + (intAttrTy.isSigned() ? p << intAttr.getSInt() + : p << intAttr.getUInt()); }); p << "] : "; auto typedAttr = attr.getValue()[0].dyn_cast(); diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 26faec258615..79e75ec5708a 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -18,17 +18,17 @@ void sw1(int a) { // CHECK: cir.func @_Z3sw1i // CHECK: cir.switch (%3 : !s32i) [ -// CHECK-NEXT: case (equal, #cir.int<0> : !s32i) { +// CHECK-NEXT: case (equal, 0) { // CHECK-NEXT: %4 = cir.load %1 : cir.ptr , !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i // CHECK-NEXT: cir.store %6, %1 : !s32i, cir.ptr // CHECK-NEXT: cir.yield break // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, #cir.int<1> : !s32i) { +// CHECK-NEXT: case (equal, 1) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, #cir.int<2> : !s32i) { +// CHECK-NEXT: case (equal, 2) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %4 = cir.alloca !s32i, cir.ptr , ["yolo", init] // CHECK-NEXT: %5 = cir.load %1 : cir.ptr , !s32i @@ -57,7 +57,7 @@ void sw2(int a) { // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["yolo", init] // CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["fomo", init] // CHECK: cir.switch (%4 : !s32i) [ -// CHECK-NEXT: case (equal, #cir.int<3> : !s32i) { +// CHECK-NEXT: case (equal, 3) { // CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: cir.store %5, %2 : !s32i, cir.ptr @@ -90,7 +90,7 @@ int sw4(int a) { // CHECK: cir.func @_Z3sw4i // CHECK: cir.switch (%4 : !s32i) [ -// CHECK-NEXT: case (equal, #cir.int<42> : !s32i) { +// CHECK-NEXT: case (equal, 42) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr @@ -115,7 +115,7 @@ void sw5(int a) { // CHECK: cir.func @_Z3sw5i // CHECK: cir.switch (%1 : !s32i) [ -// CHECK-NEXT: case (equal, #cir.int<1> : !s32i) { +// CHECK-NEXT: case (equal, 1) { // CHECK-NEXT: cir.yield fallthrough void sw6(int a) { @@ -133,10 +133,10 @@ void sw6(int a) { // CHECK: cir.func @_Z3sw6i // CHECK: cir.switch (%1 : !s32i) [ -// CHECK-NEXT: case (anyof, [#cir.int<0>, #cir.int<1>, #cir.int<2>] : !s32i) { +// CHECK-NEXT: case (anyof, [0, 1, 2] : !s32i) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: }, -// CHECK-NEXT: case (anyof, [#cir.int<3>, #cir.int<4>, #cir.int<5>] : !s32i) { +// CHECK-NEXT: case (anyof, [3, 4, 5] : !s32i) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: } @@ -154,9 +154,9 @@ void sw7(int a) { } // CHECK: cir.func @_Z3sw7i -// CHECK: case (anyof, [#cir.int<0>, #cir.int<1>, #cir.int<2>] : !s32i) { +// CHECK: case (anyof, [0, 1, 2] : !s32i) { // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, -// CHECK-NEXT: case (anyof, [#cir.int<3>, #cir.int<4>, #cir.int<5>] : !s32i) { +// CHECK-NEXT: case (anyof, [3, 4, 5] : !s32i) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index dfc4d72409d3..bcac3e321f31 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -24,13 +24,13 @@ cir.func @s0() { // CHECK-NEXT: case (default) { // CHECK-NEXT: cir.return // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, #cir.int<3> : !s32i) { +// CHECK-NEXT: case (equal, 3) { // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, -// CHECK-NEXT: case (anyof, [#cir.int<6>, #cir.int<7>, #cir.int<8>] : !s32i) { +// CHECK-NEXT: case (anyof, [6, 7, 8] : !s32i) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, #cir.int<5> : !s32i) { +// CHECK-NEXT: case (equal, 5) { // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: ] diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 89907d59637a..d535fea8db2d 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -102,14 +102,14 @@ module { } // CHECK: cir.switch (%4 : !s32i) [ -// CHECK-NEXT: case (equal, #cir.int<0> : !s32i) { +// CHECK-NEXT: case (equal, 0) { // CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i // CHECK-NEXT: %6 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %7 = cir.binop(add, %5, %6) : !s32i // CHECK-NEXT: cir.store %7, %2 : !s32i, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, #cir.int<1> : !s32i) { +// CHECK-NEXT: case (equal, 1) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %5 = cir.load %1 : cir.ptr , !s32i @@ -123,7 +123,7 @@ module { // CHECK-NEXT: } // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, -// CHECK-NEXT: case (equal, #cir.int<2> : !s32i) { +// CHECK-NEXT: case (equal, 2) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %5 = cir.alloca !s32i, cir.ptr , ["yolo", init] {alignment = 4 : i64} // CHECK-NEXT: %6 = cir.load %2 : cir.ptr , !s32i From 9b11dfb9da8769f97724e884d0bb75da38182a0e Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 14 Jul 2023 18:00:22 -0300 Subject: [PATCH 1076/2301] [CIR][Lowering] Lower structured switch operations Adds a CIRSwitchOpLowering pattern to convert structured CIR switch ops to LLVM. Also updates the MergeCleanups pass to drop empty switch ops. ghstack-source-id: b7997a20ad175d5837d86ce9571f9343cfc85d13 Pull Request resolved: https://github.com/llvm/clangir/pull/169 --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 +- .../CIR/Dialect/Transforms/MergeCleanups.cpp | 8 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 113 +++++++++++++++++- clang/test/CIR/Lowering/switch.cir | 108 +++++++++++++++++ clang/test/CIR/Transforms/merge-cleanups.cir | 10 ++ 5 files changed, 242 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/Lowering/switch.cir diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index cbafa9a1ccd1..2e6cbf7fea53 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1049,7 +1049,11 @@ void SwitchOp::getSuccessorRegions(mlir::RegionBranchPoint point, regions.push_back(RegionSuccessor(&r)); } -LogicalResult SwitchOp::verify() { return success(); } +LogicalResult SwitchOp::verify() { + if (getCases().has_value() && getCases()->size() != getNumRegions()) + return emitOpError("number of cases attributes and regions must match"); + return success(); +} void SwitchOp::build( OpBuilder &builder, OperationState &result, Value cond, diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index 7aab40b23aa4..f295361140a9 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -183,6 +183,14 @@ template <> mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( PatternRewriter &rewriter, cir::SwitchOp switchOp) const { auto regionChanged = mlir::failure(); + + // Empty switch statement: just remove it. + if (!switchOp.getCases().has_value() || switchOp.getCases()->empty()) { + rewriter.eraseOp(switchOp); + return mlir::success(); + } + + // Non-empty switch statement: clean it up. for (auto &r : switchOp.getRegions()) { if (checkAndRewriteRegion(r, rewriter).succeeded()) regionChanged = mlir::success(); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 0e9ed0aed29c..01fcb2eec5eb 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -29,6 +29,7 @@ #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" #include "mlir/IR/Attributes.h" +#include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinDialect.h" @@ -36,6 +37,7 @@ #include "mlir/IR/IRMapping.h" #include "mlir/IR/Operation.h" #include "mlir/IR/Value.h" +#include "mlir/IR/ValueRange.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Support/LLVM.h" @@ -846,6 +848,113 @@ class CIRGetGlobalOpLowering } }; +class CIRSwitchOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + inline void rewriteYieldOp(mlir::ConversionPatternRewriter &rewriter, + mlir::cir::YieldOp yieldOp, + mlir::Block *destination) const { + rewriter.setInsertionPoint(yieldOp); + rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getOperands(), + destination); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::SwitchOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Empty switch statement: just erase it. + if (!op.getCases().has_value() || op.getCases()->empty()) { + rewriter.eraseOp(op); + return mlir::success(); + } + + // Create exit block. + rewriter.setInsertionPointAfter(op); + auto *exitBlock = + rewriter.splitBlock(rewriter.getBlock(), rewriter.getInsertionPoint()); + + // Allocate required data structures (disconsider default case in vectors). + llvm::SmallVector caseValues; + llvm::SmallVector caseDestinations; + llvm::SmallVector caseOperands; + + // Initialize default case as optional. + mlir::Block *defaultDestination = exitBlock; + mlir::ValueRange defaultOperands = exitBlock->getArguments(); + + // Track fallthrough between cases. + mlir::cir::YieldOp fallthroughYieldOp = nullptr; + + // Digest the case statements values and bodies. + for (size_t i = 0; i < op.getCases()->size(); ++i) { + auto ®ion = op.getRegion(i); + auto caseAttr = op.getCases()->getValue()[i].cast(); + + // Found default case: save destination and operands. + if (caseAttr.getKind().getValue() == mlir::cir::CaseOpKind::Default) { + defaultDestination = ®ion.front(); + defaultOperands = region.getArguments(); + } else { + // AnyOf cases kind can have multiple values, hence the loop below. + for (auto &value : caseAttr.getValue()) { + caseValues.push_back(value.cast().getValue()); + caseOperands.push_back(region.getArguments()); + caseDestinations.push_back(®ion.front()); + } + } + + // Previous case is a fallthrough: branch it to this case. + if (fallthroughYieldOp) { + rewriteYieldOp(rewriter, fallthroughYieldOp, ®ion.front()); + fallthroughYieldOp = nullptr; + } + + // TODO(cir): Handle multi-block case statements. + if (region.getBlocks().size() != 1) + return op->emitError("multi-block case statement is NYI"); + + // Handle switch-case yields. + auto *terminator = region.front().getTerminator(); + if (auto yieldOp = dyn_cast(terminator)) { + // TODO(cir): Ensure every yield instead of dealing with optional + // values. + assert(yieldOp.getKind().has_value() && "switch yield has no kind"); + + switch (yieldOp.getKind().value()) { + // Fallthrough to next case: track it for the next case to handle. + case mlir::cir::YieldOpKind::Fallthrough: + fallthroughYieldOp = yieldOp; + break; + // Break out of switch: branch to exit block. + case mlir::cir::YieldOpKind::Break: + rewriteYieldOp(rewriter, yieldOp, exitBlock); + break; + default: + return op->emitError("invalid yield kind in case statement"); + } + } + + // Extract region contents before erasing the switch op. + rewriter.inlineRegionBefore(region, exitBlock); + } + + // Last case is a fallthrough: branch it to exit. + if (fallthroughYieldOp) { + rewriteYieldOp(rewriter, fallthroughYieldOp, exitBlock); + fallthroughYieldOp = nullptr; + } + + // Set switch op to branch to the newly created blocks. + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp( + op, adaptor.getCondition(), defaultDestination, defaultOperands, + caseValues, caseDestinations, caseOperands); + return mlir::success(); + } +}; + class CIRGlobalOpLowering : public mlir::OpConversionPattern { public: @@ -1322,8 +1431,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRIfLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, - CIRStructElementAddrOpLowering>(converter, - patterns.getContext()); + CIRStructElementAddrOpLowering, CIRSwitchOpLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir new file mode 100644 index 000000000000..1b5c9b387937 --- /dev/null +++ b/clang/test/CIR/Lowering/switch.cir @@ -0,0 +1,108 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s8i = !cir.int +!s32i = !cir.int +!s64i = !cir.int + +module { + cir.func @shouldLowerSwitchWithDefault(%arg0: !s8i) { + cir.switch (%arg0 : !s8i) [ + // CHECK: llvm.switch %arg0 : i8, ^bb[[#DEFAULT:]] [ + // CHECK: 1: ^bb[[#CASE1:]] + // CHECK: ] + case (equal, 1) { + cir.yield break + }, + // CHECK: ^bb[[#CASE1]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + case (default) { + cir.yield break + } + // CHECK: ^bb[[#DEFAULT]]: + // CHECK: llvm.br ^bb[[#EXIT]] + ] + // CHECK: ^bb[[#EXIT]]: + cir.return + } + + + cir.func @shouldLowerSwitchWithoutDefault(%arg0: !s32i) { + cir.switch (%arg0 : !s32i) [ + // Default block is the exit block: + // CHECK: llvm.switch %arg0 : i32, ^bb[[#EXIT:]] [ + // CHECK: 1: ^bb[[#CASE1:]] + // CHECK: ] + case (equal, 1) { + cir.yield break + } + // CHECK: ^bb[[#CASE1]]: + // CHECK: llvm.br ^bb[[#EXIT]] + ] + // CHECK: ^bb[[#EXIT]]: + cir.return + } + + + cir.func @shouldLowerSwitchWithImplicitFallthrough(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + // CHECK: llvm.switch %arg0 : i64, ^bb[[#EXIT:]] [ + // CHECK: 1: ^bb[[#CASE1N2:]], + // CHECK: 2: ^bb[[#CASE1N2]] + // CHECK: ] + case (anyof, [1, 2] : !s64i) { // case 1 and 2 use same region + cir.yield break + } + // CHECK: ^bb[[#CASE1N2]]: + // CHECK: llvm.br ^bb[[#EXIT]] + ] + // CHECK: ^bb[[#EXIT]]: + cir.return + } + + + cir.func @shouldLowerSwitchWithExplicitFallthrough(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + // CHECK: llvm.switch %arg0 : i64, ^bb[[#EXIT:]] [ + // CHECK: 1: ^bb[[#CASE1:]], + // CHECK: 2: ^bb[[#CASE2:]] + // CHECK: ] + case (equal, 1 : !s64i) { // case 1 has its own region + cir.yield fallthrough // fallthrough to case 2 + }, + // CHECK: ^bb[[#CASE1]]: + // CHECK: llvm.br ^bb[[#CASE2]] + case (equal, 2 : !s64i) { + cir.yield break + } + // CHECK: ^bb[[#CASE2]]: + // CHECK: llvm.br ^bb[[#EXIT]] + ] + // CHECK: ^bb[[#EXIT]]: + cir.return + } + + + cir.func @shouldLowerSwitchWithFallthroughToExit(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + // CHECK: llvm.switch %arg0 : i64, ^bb[[#EXIT:]] [ + // CHECK: 1: ^bb[[#CASE1:]] + // CHECK: ] + case (equal, 1 : !s64i) { + cir.yield fallthrough // fallthrough to exit + } + // CHECK: ^bb[[#CASE1]]: + // CHECK: llvm.br ^bb[[#EXIT]] + ] + // CHECK: ^bb[[#EXIT]]: + cir.return + } + + + cir.func @shouldDropEmptySwitch(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + ] + // CHECK-NOT: llvm.switch + cir.return + } +} diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index d535fea8db2d..3b0b21e935fe 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -173,4 +173,14 @@ module { // CHECK: cir.func @removeEmptyScope // CHECK-NEXT: cir.return + // Should remove empty switch-case statements. + cir.func @removeEmptySwitch(%arg0: !s32i) { + // CHECK: cir.func @removeEmptySwitch + cir.switch (%arg0 : !s32i) [ + ] + // CHECK-NOT: cir.switch + cir.return + // CHECK: cir.return + } + } From 0d65f66d47eab26e1ea2bd1eb1320a5b8e4ba040 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 14 Jul 2023 20:03:16 -0300 Subject: [PATCH 1077/2301] [CIR][CIRGen] Implement basic pointer arithmetic Allows the use of pointer arithmetic in CIR by converting it to a pointer stride operation. Differently than LLVM, CIR does not casts the index to the same width as the pointer. ghstack-source-id: 4ae4bb61e6911e5d7fb6d7c0874c91dcae8d3ace Pull Request resolved: https://github.com/llvm/clangir/pull/164 --- clang/lib/CIR/CodeGen/CIRDataLayout.h | 1 + clang/lib/CIR/CodeGen/CIRGenBuilder.h | 14 +++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 109 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 4 + clang/lib/CIR/CodeGen/CIRGenFunction.h | 12 ++ .../CodeGen/UnimplementedFeatureGuarding.h | 4 + clang/test/CIR/CodeGen/pointers.cpp | 30 +++++ 7 files changed, 173 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/pointers.cpp diff --git a/clang/lib/CIR/CodeGen/CIRDataLayout.h b/clang/lib/CIR/CodeGen/CIRDataLayout.h index 92490b86daf3..b1b10ba6b6da 100644 --- a/clang/lib/CIR/CodeGen/CIRDataLayout.h +++ b/clang/lib/CIR/CodeGen/CIRDataLayout.h @@ -12,6 +12,7 @@ #ifndef LLVM_CLANG_LIB_CIR_CIRDATALAYOUT_H #define LLVM_CLANG_LIB_CIR_CIRDATALAYOUT_H +#include "UnimplementedFeatureGuarding.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/IR/BuiltinOps.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 2f95b16f6e9f..5694a6a8c4fc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -23,6 +23,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Types.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/FloatingPointMode.h" #include "llvm/Support/ErrorHandling.h" @@ -358,6 +359,19 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Operation creation helpers // -------------------------- // + mlir::Value createNeg(mlir::Value value) { + + if (auto intTy = value.getType().dyn_cast()) { + // Source is a unsigned integer: first cast it to signed. + if (intTy.isUnsigned()) + value = createIntCast(value, getSIntNTy(intTy.getWidth())); + return create(value.getLoc(), value.getType(), + mlir::cir::UnaryOpKind::Minus, value); + } + + llvm_unreachable("negation for the given type is NYI"); + } + mlir::Value createFPExt(mlir::Value v, mlir::Type destType) { if (getIsFPConstrained()) llvm_unreachable("constrainedfp NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 6ab2717437c1..8482c32999ff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRDataLayout.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "UnimplementedFeatureGuarding.h" @@ -1012,6 +1013,88 @@ static std::optional getUnwidenedIntegerType(const ASTContext &Ctx, (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize; } +/// Emit pointer + index arithmetic. +static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, + const BinOpInfo &op, + bool isSubtraction) { + // Must have binary (not unary) expr here. Unary pointer + // increment/decrement doesn't use this path. + const BinaryOperator *expr = cast(op.E); + + mlir::Value pointer = op.LHS; + Expr *pointerOperand = expr->getLHS(); + mlir::Value index = op.RHS; + Expr *indexOperand = expr->getRHS(); + + // In a subtraction, the LHS is always the pointer. + if (!isSubtraction && !pointer.getType().isa()) { + std::swap(pointer, index); + std::swap(pointerOperand, indexOperand); + } + + bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); + + auto &DL = CGF.CGM.getDataLayout(); + + // Some versions of glibc and gcc use idioms (particularly in their malloc + // routines) that add a pointer-sized integer (known to be a pointer value) + // to a null pointer in order to cast the value back to an integer or as + // part of a pointer alignment algorithm. This is undefined behavior, but + // we'd like to be able to compile programs that use it. + // + // Normally, we'd generate a GEP with a null-pointer base here in response + // to that code, but it's also UB to dereference a pointer created that + // way. Instead (as an acknowledged hack to tolerate the idiom) we will + // generate a direct cast of the integer value to a pointer. + // + // The idiom (p = nullptr + N) is not met if any of the following are true: + // + // The operation is subtraction. + // The index is not pointer-sized. + // The pointer type is not byte-sized. + // + if (BinaryOperator::isNullPointerArithmeticExtension( + CGF.getContext(), op.Opcode, expr->getLHS(), expr->getRHS())) + llvm_unreachable("null pointer arithmetic extension is NYI"); + + if (UnimplementedFeature::dataLayoutGetIndexTypeSizeInBits()) { + // TODO(cir): original codegen zero/sign-extends the index to the same width + // as the pointer. Since CIR's pointer stride doesn't care about that, it's + // skiped here. + llvm_unreachable("target-specific pointer width is NYI"); + } + + // If this is subtraction, negate the index. + if (isSubtraction) + index = CGF.getBuilder().createNeg(index); + + if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) + llvm_unreachable("array bounds sanitizer is NYI"); + + const PointerType *pointerType = + pointerOperand->getType()->getAs(); + if (!pointerType) + llvm_unreachable("ObjC is NYI"); + + QualType elementType = pointerType->getPointeeType(); + if (const VariableArrayType *vla = + CGF.getContext().getAsVariableArrayType(elementType)) + llvm_unreachable("VLA pointer arithmetic is NYI"); + + // Explicitly handle GNU void* and function pointer arithmetic extensions. The + // GNU void* casts amount to no-ops since our void* type is i8*, but this is + // future proof. + if (elementType->isVoidType() || elementType->isFunctionType()) + llvm_unreachable("GNU void* and func ptr arithmetic extensions are NYI"); + + mlir::Type elemTy = CGF.convertTypeForMem(elementType); + if (CGF.getLangOpts().isSignedOverflowDefined()) + llvm_unreachable("ptr arithmetic with signed overflow is NYI"); + + return CGF.buildCheckedInBoundsGEP(elemTy, pointer, index, isSigned, + isSubtraction, op.E->getExprLoc()); +} + mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { return Builder.create( CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Mul, @@ -1027,11 +1110,17 @@ mlir::Value ScalarExprEmitter::buildRem(const BinOpInfo &Ops) { CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); } + mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { + if (Ops.LHS.getType().isa() || + Ops.RHS.getType().isa()) + return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/false); + return Builder.create( CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Add, Ops.LHS, Ops.RHS); } + mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { // The LHS is always a pointer if either side is. if (!Ops.LHS.getType().isa()) { @@ -1081,7 +1170,7 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { // If the RHS is not a pointer, then we have normal pointer // arithmetic. if (!Ops.RHS.getType().isa()) - llvm_unreachable("NYI"); + return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/true); // Otherwise, this is a pointer subtraction @@ -2216,3 +2305,21 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( return Builder.getConstInt(CGF.getLoc(E->getSourceRange()), E->EvaluateKnownConstInt(CGF.getContext())); } + +mlir::Value CIRGenFunction::buildCheckedInBoundsGEP( + mlir::Type ElemTy, mlir::Value Ptr, ArrayRef IdxList, + bool SignedIndices, bool IsSubtraction, SourceLocation Loc) { + mlir::Type PtrTy = Ptr.getType(); + assert(IdxList.size() == 1 && "multi-index ptr arithmetic NYI"); + mlir::Value GEPVal = builder.create( + CGM.getLoc(Loc), PtrTy, Ptr, IdxList[0]); + + // If the pointer overflow sanitizer isn't enabled, do nothing. + if (!SanOpts.has(SanitizerKind::PointerOverflow)) + return GEPVal; + + // TODO(cir): the unreachable code below hides a substantial amount of code + // from the original codegen related with pointer overflow sanitizer. + assert(UnimplementedFeature::pointerOverflowSanitizer()); + llvm_unreachable("pointer overflow sanitizer NYI"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index cc146c2b10ce..4070d23ea209 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -107,6 +107,10 @@ TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { } } +mlir::Type CIRGenFunction::convertTypeForMem(QualType T) { + return CGM.getTypes().convertTypeForMem(T); +} + mlir::Type CIRGenFunction::convertType(QualType T) { return CGM.getTypes().ConvertType(T); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 5f57488b407e..6ce29919aa3f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -638,6 +638,8 @@ class CIRGenFunction : public CIRGenTypeCache { std::string getCounterRefTmpAsString(); std::string getCounterAggTmpAsString(); + mlir::Type convertTypeForMem(QualType T); + mlir::Type ConvertType(clang::QualType T); mlir::Type ConvertType(const TypeDecl *T) { return ConvertType(getContext().getTypeDeclType(T)); @@ -1188,6 +1190,16 @@ class CIRGenFunction : public CIRGenTypeCache { void buildNullabilityCheck(LValue LHS, mlir::Value RHS, clang::SourceLocation Loc); + /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to + /// detect undefined behavior when the pointer overflow sanitizer is enabled. + /// \p SignedIndices indicates whether any of the GEP indices are signed. + /// \p IsSubtraction indicates whether the expression used to form the GEP + /// is a subtraction. + mlir::Value buildCheckedInBoundsGEP(mlir::Type ElemTy, mlir::Value Ptr, + ArrayRef IdxList, + bool SignedIndices, bool IsSubtraction, + SourceLocation Loc); + void buildScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit = false); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 37ab71be53fe..c820751f21f6 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -51,6 +51,7 @@ struct UnimplementedFeature { static bool reportGlobalToASan() { return false; } static bool emitAsanPrologueOrEpilogue() { return false; } static bool emitCheckedInBoundsGEP() { return false; } + static bool pointerOverflowSanitizer() { return false; } // ObjC static bool setObjCGCLValueClass() { return false; } @@ -71,6 +72,9 @@ struct UnimplementedFeature { static bool buildLValueAlignmentAssumption() { return false; } static bool buildDerivedToBaseCastForDevirt() { return false; } + // Data layout + static bool dataLayoutGetIndexTypeSizeInBits() { return false; } + // Clang early optimizations or things defered to LLVM lowering. static bool shouldUseBZeroPlusStoresToInitialize() { return false; } static bool shouldUseMemSetToInitialize() { return false; } diff --git a/clang/test/CIR/CodeGen/pointers.cpp b/clang/test/CIR/CodeGen/pointers.cpp new file mode 100644 index 000000000000..5dcb458502c9 --- /dev/null +++ b/clang/test/CIR/CodeGen/pointers.cpp @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Should generate basic pointer arithmetics. +void foo(int *iptr, char *cptr, unsigned ustride) { + iptr + 2; + // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr + cptr + 3; + // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<3> : !s32i) : !s32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr + iptr - 2; + // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#STRIDE]]) : !s32i, !s32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr + cptr - 3; + // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<3> : !s32i) : !s32i + // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#STRIDE]]) : !s32i, !s32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr + iptr + ustride; + // CHECK: %[[#STRIDE:]] = cir.load %{{.+}} : cir.ptr , !u32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#STRIDE]] : !u32i), !cir.ptr + + // Must convert unsigned stride to a signed one. + iptr - ustride; + // CHECK: %[[#STRIDE:]] = cir.load %{{.+}} : cir.ptr , !u32i + // CHECK: %[[#SIGNSTRIDE:]] = cir.cast(integral, %[[#STRIDE]] : !u32i), !s32i + // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#SIGNSTRIDE]]) : !s32i, !s32i + // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr +} From 28a4ae1cd46f3840486aab709e21d9ebe2ec1d69 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 14 Jul 2023 20:03:17 -0300 Subject: [PATCH 1078/2301] [CIR][Lowering] Lower pointer comparisons Also simplifies the cir.cmp op lowering tests. ghstack-source-id: fc4125719fe3bf2b5038f9851b65755fc5f1690f Pull Request resolved: https://github.com/llvm/clangir/pull/165 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 ++ clang/test/CIR/Lowering/cmp.cir | 49 ++++++++----------- 2 files changed, 25 insertions(+), 28 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 01fcb2eec5eb..1998e3b807fa 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1367,6 +1367,10 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { auto kind = convertToICmpPredicate(cmpOp.getKind(), intTy.isSigned()); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + } else if (auto ptrTy = type.dyn_cast()) { + auto kind = convertToICmpPredicate(cmpOp.getKind(), /* isSigned=*/false); + llResult = rewriter.create( + cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { auto kind = convertToFCmpPredicate(cmpOp.getKind()); llResult = rewriter.create( diff --git a/clang/test/CIR/Lowering/cmp.cir b/clang/test/CIR/Lowering/cmp.cir index a1da2d8e26a0..94df95173a7a 100644 --- a/clang/test/CIR/Lowering/cmp.cir +++ b/clang/test/CIR/Lowering/cmp.cir @@ -1,5 +1,6 @@ -// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + !s32i = !cir.int module { cir.func @foo() { @@ -11,65 +12,57 @@ module { %5 = cir.load %0 : cir.ptr , !s32i %6 = cir.load %1 : cir.ptr , !s32i %7 = cir.cmp(gt, %5, %6) : !s32i, !cir.bool + // CHECK: llvm.icmp "sgt" %8 = cir.load %0 : cir.ptr , !s32i %9 = cir.load %1 : cir.ptr , !s32i %10 = cir.cmp(eq, %8, %9) : !s32i, !cir.bool + // CHECK: llvm.icmp "eq" %11 = cir.load %0 : cir.ptr , !s32i %12 = cir.load %1 : cir.ptr , !s32i %13 = cir.cmp(lt, %11, %12) : !s32i, !cir.bool + // CHECK: llvm.icmp "slt" %14 = cir.load %0 : cir.ptr , !s32i %15 = cir.load %1 : cir.ptr , !s32i %16 = cir.cmp(ge, %14, %15) : !s32i, !cir.bool + // CHECK: llvm.icmp "sge" %17 = cir.load %0 : cir.ptr , !s32i %18 = cir.load %1 : cir.ptr , !s32i %19 = cir.cmp(ne, %17, %18) : !s32i, !cir.bool + // CHECK: llvm.icmp "ne" %20 = cir.load %0 : cir.ptr , !s32i %21 = cir.load %1 : cir.ptr , !s32i %22 = cir.cmp(le, %20, %21) : !s32i, !cir.bool + // CHECK: llvm.icmp "sle" %23 = cir.load %2 : cir.ptr , f32 %24 = cir.load %3 : cir.ptr , f32 %25 = cir.cmp(gt, %23, %24) : f32, !cir.bool + // CHECK: llvm.fcmp "ugt" %26 = cir.load %2 : cir.ptr , f32 %27 = cir.load %3 : cir.ptr , f32 %28 = cir.cmp(eq, %26, %27) : f32, !cir.bool + // CHECK: llvm.fcmp "ueq" %29 = cir.load %2 : cir.ptr , f32 %30 = cir.load %3 : cir.ptr , f32 %31 = cir.cmp(lt, %29, %30) : f32, !cir.bool + // CHECK: llvm.fcmp "ult" %32 = cir.load %2 : cir.ptr , f32 %33 = cir.load %3 : cir.ptr , f32 %34 = cir.cmp(ge, %32, %33) : f32, !cir.bool + // CHECK: llvm.fcmp "uge" %35 = cir.load %2 : cir.ptr , f32 %36 = cir.load %3 : cir.ptr , f32 %37 = cir.cmp(ne, %35, %36) : f32, !cir.bool + // CHECK: llvm.fcmp "une" %38 = cir.load %2 : cir.ptr , f32 %39 = cir.load %3 : cir.ptr , f32 %40 = cir.cmp(le, %38, %39) : f32, !cir.bool + // CHECK: llvm.fcmp "ule" + + // Pointer comparisons. + %41 = cir.cmp(ne, %0, %1) : !cir.ptr, !cir.bool + // CHECK: llvm.icmp "ne" + %42 = cir.cmp(lt, %0, %1) : !cir.ptr, !cir.bool + // CHECK: llvm.icmp "ult" cir.return } } - -// MLIR: = llvm.icmp "sgt" -// MLIR: = llvm.icmp "eq" -// MLIR: = llvm.icmp "slt" -// MLIR: = llvm.icmp "sge" -// MLIR: = llvm.icmp "ne" -// MLIR: = llvm.icmp "sle" -// MLIR: = llvm.fcmp "ugt" -// MLIR: = llvm.fcmp "ueq" -// MLIR: = llvm.fcmp "ult" -// MLIR: = llvm.fcmp "uge" -// MLIR: = llvm.fcmp "une" -// MLIR: = llvm.fcmp "ule" - -// LLVM: icmp sgt i32 -// LLVM: icmp eq i32 -// LLVM: icmp slt i32 -// LLVM: icmp sge i32 -// LLVM: icmp ne i32 -// LLVM: icmp sle i32 -// LLVM: fcmp ugt float -// LLVM: fcmp ueq float -// LLVM: fcmp ult float -// LLVM: fcmp uge float -// LLVM: fcmp une float -// LLVM: fcmp ule float From 289a541b3346b1421839c3f9efc9b3f12347e865 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 14 Jul 2023 20:03:17 -0300 Subject: [PATCH 1079/2301] [CIR][Lowering] Lower CIR void pointers as opaque pointers LLVM's dialect does not support !llvm.ptr types. This patch works around this limitation by lowering CIR void pointers as opaque pointers. This prevents hacks like assuming void pointers to be char pointers. ghstack-source-id: 92632e0675ad43604862bffa60651abfb303782f Pull Request resolved: https://github.com/llvm/clangir/pull/166 --- clang/test/CIR/Lowering/types.cir | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 clang/test/CIR/Lowering/types.cir diff --git a/clang/test/CIR/Lowering/types.cir b/clang/test/CIR/Lowering/types.cir new file mode 100644 index 000000000000..ba52bf55514d --- /dev/null +++ b/clang/test/CIR/Lowering/types.cir @@ -0,0 +1,14 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!void = !cir.void +module { + cir.func @testTypeLowering() { + // Should lower void pointers as opaque pointers. + %0 = cir.const(#cir.null : !cir.ptr) : !cir.ptr + // CHECK: llvm.mlir.zero : !llvm.ptr + %1 = cir.const(#cir.null : !cir.ptr>) : !cir.ptr> + // CHECK: llvm.mlir.zero : !llvm.ptr + cir.return + } +} From 8bb0e130aa30f65c4dc60acecebc8e79edf1c53d Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 14 Jul 2023 20:03:17 -0300 Subject: [PATCH 1080/2301] [CIR][CIRGen][Lowering] Support pointer to bool casts Updates the codegen to generate pointer to bool casts while also adding the required lowering steps for said cast. Also adds an inferred context builder in CIR NullAttr. ghstack-source-id: 70ae6a9968526e4abb3ffc46ba9abbc3c3445777 Pull Request resolved: https://github.com/llvm/clangir/pull/167 --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 6 +++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 6 +-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 49 +++++++++++-------- clang/test/CIR/Lowering/cast.cir | 16 ++++-- 4 files changed, 50 insertions(+), 27 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 16c6b8ecec57..bc7d5f59b2b7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -73,6 +73,12 @@ def NullAttr : CIR_Attr<"Null", "null", [TypedAttrInterface]> { let parameters = (ins AttributeSelfTypeParameter<"">:$type); + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type), [{ + return $_get(type.getContext(), type); + }]> + ]; + let assemblyFormat = [{}]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 8482c32999ff..7b477a501884 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -570,8 +570,8 @@ class ScalarExprEmitter : public StmtVisitor { /// Perform a pointer to boolean conversion. mlir::Value buildPointerToBoolConversion(mlir::Value V, QualType QT) { - // An extra pass should make this into a `cir.cmp V, nullptr` before - // lowering to LLVM. + // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM. + // We might want to have a separate pass for these types of conversions. return CGF.getBuilder().createPtrToBoolCast(V); } @@ -815,7 +815,7 @@ class ScalarExprEmitter : public StmtVisitor { return buildIntToBoolConversion(Src, loc); assert(Src.getType().isa<::mlir::cir::PointerType>()); - llvm_unreachable("pointer source not implemented"); + return buildPointerToBoolConversion(Src, SrcType); } /// Emit a conversion from the specified type to the specified destination diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 1998e3b807fa..4e9245e82ef6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -370,8 +370,15 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } - default: - llvm_unreachable("NYI"); + case mlir::cir::CastKind::ptr_to_bool: { + auto null = rewriter.create( + src.getLoc(), castOp.getSrc().getType(), + mlir::cir::NullAttr::get(castOp.getSrc().getType())); + rewriter.replaceOpWithNewOp( + castOp, mlir::cir::BoolType::get(getContext()), + mlir::cir::CmpOpKind::ne, castOp.getSrc(), null); + break; + } } return mlir::success(); @@ -466,7 +473,8 @@ class CIRScopeOpLowering return mlir::success(); } - // Split the current block before the ScopeOp to create the inlining point. + // Split the current block before the ScopeOp to create the inlining + // point. auto *currentBlock = rewriter.getInsertionBlock(); auto *remainingOpsBlock = rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); @@ -708,13 +716,13 @@ class CIRFuncLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; - /// Returns the name used for the linkage attribute. This *must* correspond to - /// the name of the attribute in ODS. + /// Returns the name used for the linkage attribute. This *must* correspond + /// to the name of the attribute in ODS. static StringRef getLinkageAttrNameString() { return "linkage"; } /// Only retain those attributes that are not constructed by - /// `LLVMFuncOp::build`. If `filterArgAttrs` is set, also filter out argument - /// attributes. + /// `LLVMFuncOp::build`. If `filterArgAttrs` is set, also filter out + /// argument attributes. void filterFuncAttributes(mlir::cir::FuncOp func, bool filterArgAndResAttrs, SmallVectorImpl &result) const { @@ -834,8 +842,8 @@ class CIRGetGlobalOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::GetGlobalOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - // FIXME(cir): Premature DCE to avoid lowering stuff we're not using. CIRGen - // should mitigate this and not emit the get_global. + // FIXME(cir): Premature DCE to avoid lowering stuff we're not using. + // CIRGen should mitigate this and not emit the get_global. if (op->getUses().empty()) { rewriter.eraseOp(op); return mlir::success(); @@ -875,7 +883,8 @@ class CIRSwitchOpLowering auto *exitBlock = rewriter.splitBlock(rewriter.getBlock(), rewriter.getInsertionPoint()); - // Allocate required data structures (disconsider default case in vectors). + // Allocate required data structures (disconsider default case in + // vectors). llvm::SmallVector caseValues; llvm::SmallVector caseDestinations; llvm::SmallVector caseOperands; @@ -983,9 +992,9 @@ class CIRGlobalOpLowering init = rewriter.getStringAttr(attr.getValue()); } else if (auto attr = constArr.getElts().dyn_cast()) { if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { - op.emitError() - << "unsupported lowering for #cir.const_array with element type " - << op.getSymType(); + op.emitError() << "unsupported lowering for #cir.const_array with " + "element type " + << op.getSymType(); return mlir::failure(); } } else { @@ -995,7 +1004,8 @@ class CIRGlobalOpLowering return mlir::failure(); } } else if (llvm::isa(init.value())) { - // Nothing to do since LLVM already supports these types as initializers. + // Nothing to do since LLVM already supports these types as + // initializers. } // Initializer is a constant integer: convert to MLIR builtin constant. else if (auto intAttr = init.value().dyn_cast()) { @@ -1030,10 +1040,10 @@ class CIRGlobalOpLowering return mlir::success(); } else if (isa(init.value())) { // TODO(cir): once LLVM's dialect has a proper zeroinitializer attribute - // this should be updated. For now, we tag the LLVM global with a cir.zero - // attribute that is later replaced with a zeroinitializer. Null pointers - // also use this path for simplicity, as we would otherwise require a - // region-based initialization for the global op. + // this should be updated. For now, we tag the LLVM global with a + // cir.zero attribute that is later replaced with a zeroinitializer. + // Null pointers also use this path for simplicity, as we would + // otherwise require a region-based initialization for the global op. auto llvmGlobalOp = rewriter.replaceOpWithNewOp( op, llvmType, isConst, linkage, symbol, nullptr); auto cirZeroAttr = mlir::cir::ZeroAttr::get(getContext(), llvmType); @@ -1528,8 +1538,7 @@ std::unique_ptr createConvertCIRToLLVMPass() { extern void registerCIRDialectTranslation(mlir::MLIRContext &context); std::unique_ptr -lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, - LLVMContext &llvmCtx) { +lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx) { mlir::MLIRContext *mlirCtx = theModule.getContext(); mlir::PassManager pm(mlirCtx); diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 8ec26b9b2557..16010444be6f 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -42,6 +42,8 @@ module { %8 = cir.alloca !cir.ptr, cir.ptr >, ["e", init] {alignment = 8 : i64} cir.store %arg0, %0 : !u32i, cir.ptr cir.store %arg1, %1 : !s32i, cir.ptr + + // Integer casts. %9 = cir.load %0 : cir.ptr , !u32i %10 = cir.cast(integral, %9 : !u32i), !s8i // MLIR: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i8 @@ -61,17 +63,22 @@ module { // Should not produce a cast. %32 = cir.cast(integral, %arg0 : !u32i), !s32i // Should not produce a cast. - cir.store %16, %6 : !s64i, cir.ptr - %17 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr - // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr - cir.store %17, %8 : !cir.ptr, cir.ptr > %21 = cir.load %20 : cir.ptr , !s16i %22 = cir.cast(integral, %21 : !s16i), !u64i // MLIR: %[[TMP:[0-9]+]] = llvm.sext %{{[0-9]+}} : i16 to i64 + + // Pointer casts. + cir.store %16, %6 : !s64i, cir.ptr + %17 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr + cir.store %17, %8 : !cir.ptr, cir.ptr > + // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr %23 = cir.cast(int_to_ptr, %22 : !u64i), !cir.ptr // MLIR: %[[TMP2:[0-9]+]] = llvm.inttoptr %[[TMP]] : i64 to !llvm.ptr %24 = cir.cast(ptr_to_int, %23 : !cir.ptr), !s32i // MLIR: %{{[0-9]+}} = llvm.ptrtoint %[[TMP2]] : !llvm.ptr to i32 + %29 = cir.cast(ptr_to_bool, %23 : !cir.ptr), !cir.bool + + // Floating point casts. %25 = cir.cast(int_to_float, %arg1 : !s32i), f32 // MLIR: %{{.+}} = llvm.sitofp %{{.+}} : i32 to f32 %26 = cir.cast(int_to_float, %arg0 : !u32i), f32 @@ -81,6 +88,7 @@ module { %28 = cir.cast(float_to_int, %arg2 : f32), !u32i // MLIR: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32 %18 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %18, %2 : !s32i, cir.ptr %19 = cir.load %2 : cir.ptr , !s32i cir.return %19 : !s32i From 693c1a77bbb003271b1f99e8fc24bc86316673e2 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 14 Jul 2023 20:03:18 -0300 Subject: [PATCH 1081/2301] [CIR][CIRGen] Add codegen for pointer post inc/dec unary ops ghstack-source-id: b6df7e978f5b15e13257bce623bfca7307027a23 Pull Request resolved: https://github.com/llvm/clangir/pull/180 --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 9 ++++++++- clang/test/CIR/CodeGen/unary.cpp | 29 ++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index b8b519f334f6..d2a6ba392d9f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -182,7 +182,14 @@ static Address buildPointerWithAlignment(const Expr *E, // Unary &. if (const UnaryOperator *UO = dyn_cast(E)) { - assert(0 && "not implemented"); + // TODO(cir): maybe we should use cir.unary for pointers here instead. + if (UO->getOpcode() == UO_AddrOf) { + LValue LV = CGF.buildLValue(UO->getSubExpr()); + if (BaseInfo) + *BaseInfo = LV.getBaseInfo(); + assert(UnimplementedFeature::tbaa()); + return LV.getAddress(); + } } // TODO: conditional operators, comma. diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index 1fe53cd20681..b47b20c7d689 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -180,3 +180,32 @@ void doubles(double d) { // CHECK: %[[#D_BOOL:]] = cir.cast(float_to_bool, %{{[0-9]+}} : f64), !cir.bool // CHECK: = cir.unary(not, %[[#D_BOOL]]) : !cir.bool, !cir.bool } + +void pointers(int *p) { +// CHECK: cir.func @{{[^ ]+}}pointers + // CHECK: %[[#P:]] = cir.alloca !cir.ptr, cir.ptr > + + +p; + // CHECK: cir.unary(plus, %{{.+}}) : !cir.ptr, !cir.ptr + + ++p; + // CHECK: %[[#INC:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#INC]] : !s32i), !cir.ptr + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + --p; + // CHECK: %[[#DEC:]] = cir.const(#cir.int<-1> : !s32i) : !s32i + // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#DEC]] : !s32i), !cir.ptr + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + p++; + // CHECK: %[[#INC:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#INC]] : !s32i), !cir.ptr + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + p--; + // CHECK: %[[#DEC:]] = cir.const(#cir.int<-1> : !s32i) : !s32i + // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#DEC]] : !s32i), !cir.ptr + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + + !p; + // %[[BOOLPTR:]] = cir.cast(ptr_to_bool, %15 : !cir.ptr), !cir.bool + // cir.unary(not, %[[BOOLPTR]]) : !cir.bool, !cir.bool +} From f3fd9376ef579a9060abc25f59f41ed1fef14e52 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 17 Jul 2023 15:06:43 -0300 Subject: [PATCH 1082/2301] [CIR][NFC] Simplify ArrayType parsing and printing ghstack-source-id: 3ecee068f76d6cf319f774600e052d4cc6d52944 Pull Request resolved: https://github.com/llvm/clangir/pull/170 --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 4 ++- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 25 ------------------- 2 files changed, 3 insertions(+), 26 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 7d157ad964fa..b84c9222be41 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -162,7 +162,9 @@ def CIR_ArrayType : CIR_Type<"Array", "array", let parameters = (ins "mlir::Type":$eltType, "uint64_t":$size); - let hasCustomAssemblyFormat = 1; + let assemblyFormat = [{ + `<` $eltType `x` $size `>` + }]; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index bbf6b382fc30..a0b8a0c20a61 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -156,31 +156,6 @@ void StructType::print(mlir::AsmPrinter &printer) const { printer << '>'; } -Type ArrayType::parse(mlir::AsmParser &parser) { - if (parser.parseLess()) - return Type(); - Type eltType; - if (parser.parseType(eltType)) - return Type(); - if (parser.parseKeyword("x")) - return Type(); - - uint64_t val = 0; - if (parser.parseInteger(val).failed()) - return Type(); - - if (parser.parseGreater()) - return Type(); - return get(parser.getContext(), eltType, val); -} - -void ArrayType::print(mlir::AsmPrinter &printer) const { - printer << '<'; - printer.printType(getEltType()); - printer << " x " << getSize(); - printer << '>'; -} - //===----------------------------------------------------------------------===// // Data Layout information for types //===----------------------------------------------------------------------===// From 117e3a084f016d48796ed868b4bd107b3a59e714 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 17 Jul 2023 15:06:44 -0300 Subject: [PATCH 1083/2301] [CIR][Lowering] Lower non-global constant arrays Lowers `cir.const` operations initialized with constant arrays to LLVM const operations. Also adds supports for `#const.array` with string literals. This implementation differs from Clang: it does not create a global for the constant array, nor uses memcpy to initialize the array. Instead, it stores a constant value directly into the stack where it is used. ghstack-source-id: af42e9a89a27360f58657803ea452d890d2f2324 Pull Request resolved: https://github.com/llvm/clangir/pull/171 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 116 ++++++++++++------ clang/test/CIR/Lowering/const.cir | 17 +++ 2 files changed, 94 insertions(+), 39 deletions(-) create mode 100644 clang/test/CIR/Lowering/const.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 4e9245e82ef6..5538944f6c83 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -33,6 +33,7 @@ #include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinDialect.h" +#include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/IRMapping.h" #include "mlir/IR/Operation.h" @@ -56,6 +57,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" +#include #include using namespace cir; @@ -611,6 +613,61 @@ class CIRStoreLowering : public mlir::OpConversionPattern { } }; +mlir::DenseElementsAttr +convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, + mlir::Type type) { + auto values = llvm::SmallVector{}; + auto stringAttr = attr.getElts().dyn_cast(); + assert(stringAttr && "expected string attribute here"); + for (auto element : stringAttr) + values.push_back({8, (uint64_t)element}); + return mlir::DenseElementsAttr::get( + mlir::RankedTensorType::get({(int64_t)values.size()}, type), + llvm::ArrayRef(values)); +} + +template +mlir::DenseElementsAttr +convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, mlir::Type type) { + auto values = llvm::SmallVector{}; + auto arrayAttr = attr.getElts().dyn_cast(); + assert(arrayAttr && "expected array here"); + for (auto element : arrayAttr) + values.push_back(element.cast().getValue()); + return mlir::DenseElementsAttr::get( + mlir::RankedTensorType::get({(int64_t)values.size()}, type), + llvm::ArrayRef(values)); +} + +std::optional +lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, + const mlir::TypeConverter *converter) { + + // Ensure ConstArrayAttr has a type. + auto typedConstArr = constArr.dyn_cast(); + assert(typedConstArr && "cir::ConstArrayAttr is not a mlir::TypedAttr"); + + // Ensure ConstArrayAttr type is a ArrayType. + auto cirArrayType = typedConstArr.getType().dyn_cast(); + assert(cirArrayType && "cir::ConstArrayAttr is not a cir::ArrayType"); + + // Is a ConstArrayAttr with an cir::ArrayType: fetch element type. + auto type = cirArrayType.getEltType(); + + // Convert array attr to LLVM compatible dense elements attr. + if (constArr.getElts().isa()) + return convertStringAttrToDenseElementsAttr(constArr, + converter->convertType(type)); + if (type.isa()) + return convertToDenseElementsAttr( + constArr, converter->convertType(type)); + if (type.isa()) + return convertToDenseElementsAttr( + constArr, converter->convertType(type)); + + return std::nullopt; +} + class CIRConstantLowering : public mlir::OpConversionPattern { public: @@ -642,8 +699,27 @@ class CIRConstantLowering return mlir::success(); } attr = op.getValue(); + } + // TODO(cir): constant arrays are currently just pushed into the stack using + // the store instruction, instead of being stored as global variables and + // then memcopyied into the stack (as done in Clang). + else if (auto arrTy = op.getType().dyn_cast()) { + // Fetch operation constant array initializer. + auto constArr = op.getValue().dyn_cast(); + if (!constArr) + return op.emitError() << "array does not have a constant initializer"; + + // Lower constant array initializer. + auto denseAttr = lowerConstArrayAttr(constArr, typeConverter); + if (!denseAttr.has_value()) { + op.emitError() + << "unsupported lowering for #cir.const_array with element type " + << arrTy.getEltType(); + return mlir::failure(); + } + attr = denseAttr.value(); } else - return op.emitError("unsupported constant type"); + return op.emitError() << "unsupported constant type " << op.getType(); rewriter.replaceOpWithNewOp( op, getTypeConverter()->convertType(op.getType()), attr); @@ -796,44 +872,6 @@ class CIRFuncLowering : public mlir::OpConversionPattern { } }; -template -mlir::DenseElementsAttr -convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, mlir::Type type) { - auto values = llvm::SmallVector{}; - auto arrayAttr = attr.getElts().dyn_cast(); - assert(arrayAttr && "expected array here"); - for (auto element : arrayAttr) - values.push_back(element.cast().getValue()); - return mlir::DenseElementsAttr::get( - mlir::RankedTensorType::get({(int64_t)values.size()}, type), - llvm::ArrayRef(values)); -} - -std::optional -lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, - const mlir::TypeConverter *converter) { - - // Ensure ConstArrayAttr has a type. - auto typedConstArr = constArr.dyn_cast(); - assert(typedConstArr && "cir::ConstArrayAttr is not a mlir::TypedAttr"); - - // Ensure ConstArrayAttr type is a ArrayType. - auto cirArrayType = typedConstArr.getType().dyn_cast(); - assert(cirArrayType && "cir::ConstArrayAttr is not a cir::ArrayType"); - - // Is a ConstArrayAttr with an cir::ArrayType: fetch element type. - auto type = cirArrayType.getEltType(); - - if (type.isa()) - return convertToDenseElementsAttr( - constArr, converter->convertType(type)); - if (type.isa()) - return convertToDenseElementsAttr( - constArr, converter->convertType(type)); - - return std::nullopt; -} - class CIRGetGlobalOpLowering : public mlir::OpConversionPattern { public: diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir new file mode 100644 index 000000000000..c8f6aba84590 --- /dev/null +++ b/clang/test/CIR/Lowering/const.cir @@ -0,0 +1,17 @@ +// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s +// XFAIL: * + +!s32i = !cir.int +!s8i = !cir.int +module { + cir.func @testConstArrInit() { + %0 = cir.const(#cir.const_array<"string\00" : !cir.array> : !cir.array) : !cir.array + // CHECK: llvm.mlir.constant(dense<[115, 116, 114, 105, 110, 103, 0]> : tensor<7xi8>) : !llvm.array<7 x i8> + %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array) : !cir.array + // CHECK: llvm.mlir.constant(dense<[1, 2]> : tensor<2xi32>) : !llvm.array<2 x i32> + %3 = cir.const(#cir.const_array<[1.000000e+00 : f32, 2.000000e+00 : f32]> : !cir.array) : !cir.array + // CHECK: llvm.mlir.constant(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) : !llvm.array<2 x f32> + cir.return + } +} From 9cb1c9e0b64e1ef164eeb6139f563622528d6aba Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sun, 23 Jul 2023 08:14:47 -0300 Subject: [PATCH 1084/2301] [CIR][Bugfix] Replace cir-tool with cir-opt in const.cir test --- clang/test/CIR/Lowering/const.cir | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index c8f6aba84590..95119c04c30c 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -1,6 +1,5 @@ -// RUN: cir-tool %s -cir-to-llvm -o %t.mlir +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -// XFAIL: * !s32i = !cir.int !s8i = !cir.int From b932b1549667e27ac4422a2779fc51a67f98bcbb Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sun, 23 Jul 2023 10:58:48 -0300 Subject: [PATCH 1085/2301] [CIR][NFC] Separate CIR to LLVM IR translation tests The goal is to keep tests self-contained and focused. Mixing CIR to LLVM IR translation with codegen tests, or with CIR to LLVM Dialect lowering tests, makes it harder to understand what is being tested and requires the same file to go through multiple tools. This patch creates a separate `clang/tests/CIR/Translation` folder dedicated strictly to CIR's LLVM IR translation interface. While `tests/Lowering` should validate the `lowerDirectlyFromCIRToLLVMIR` pass, `tests/Translation` should validate `CIRDialectLLVMIRTranslationInterface`. --- clang/test/CIR/CodeGen/pointer.cpp | 4 ---- clang/test/CIR/CodeGen/struct.c | 4 ---- clang/test/CIR/Translation/zeroinitializer.cir | 12 ++++++++++++ 3 files changed, 12 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/Translation/zeroinitializer.cir diff --git a/clang/test/CIR/CodeGen/pointer.cpp b/clang/test/CIR/CodeGen/pointer.cpp index 06245f06b32b..2ac11cd42e32 100644 --- a/clang/test/CIR/CodeGen/pointer.cpp +++ b/clang/test/CIR/CodeGen/pointer.cpp @@ -1,10 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// FIXME(cir): Move the test below to lowering and us a separate tool to lower from CIR to LLVM IR. -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s // Global pointer should be zero initialized by default. int *ptr; // CHECK: cir.global external @ptr = #cir.null : !cir.ptr -// LLVM: @ptr = global ptr null diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index a6c3d8736c58..cc3107d02849 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -1,8 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// FIXME(cir): Move the test below to lowering and us a separate tool to lower from CIR to LLVM IR. -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s struct Bar { int a; @@ -31,4 +28,3 @@ void baz(void) { // Check if global structs are zero-initialized. // CHECK: cir.global external @bar = #cir.zero : !ty_22struct2EBar22 -// LLVM: @bar = global %struct.Bar zeroinitializer diff --git a/clang/test/CIR/Translation/zeroinitializer.cir b/clang/test/CIR/Translation/zeroinitializer.cir new file mode 100644 index 000000000000..63750fee10cb --- /dev/null +++ b/clang/test/CIR/Translation/zeroinitializer.cir @@ -0,0 +1,12 @@ +// RUN: cir-translate %s -cir-to-llvmir -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s + +module { + // Should lower #cir.zero on structs to a zeroinitializer. + llvm.mlir.global external @bar() {addr_space = 0 : i32, cir.initial_value = #cir.zero : !llvm.struct<"struct.S", (i8, i32)>} : !llvm.struct<"struct.S", (i8, i32)> + // CHECK: @bar = global %struct.S zeroinitializer + + // Should lower #cir.null on pointers to a null initializer. + llvm.mlir.global external @ptr() {addr_space = 0 : i32, cir.initial_value = #cir.zero : !llvm.ptr} : !llvm.ptr + // CHECK: @ptr = global ptr null +} From 6c1d814dc249038860f45ae99f06b1bd303ba936 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 14 Jul 2023 00:02:13 -0300 Subject: [PATCH 1086/2301] [CIR][LifetimeCheck] Use cir.struct_element_addr to track exploded fields This adds exploded members into pmap, fix name printing for those, but doesn't touch aggregate initialization just yet. Coming next. Add the testcase, but no checks since this is incremental work towards getting those. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 88 +++++++++++++------ .../CIR/Transforms/lifetime-check-agg.cpp | 33 +++++++ 2 files changed, 92 insertions(+), 29 deletions(-) create mode 100644 clang/test/CIR/Transforms/lifetime-check-agg.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 356826c7f3e1..4c5df2efb49b 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -53,8 +53,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkAwait(AwaitOp awaitOp); void checkReturn(ReturnOp retOp); - void classifyTypeCategories(mlir::Value addr, mlir::Type t, - mlir::Location loc); + void classifyAndInitTypeCategories(mlir::Value addr, mlir::Type t, + mlir::Location loc, unsigned nestLevel); // FIXME: classify tasks and lambdas prior to check ptr deref // and pass down an enum. @@ -305,11 +305,11 @@ struct LifetimeCheckPass : public LifetimeCheckBase { } // Aggregates and exploded fields. - using ExplodedFieldsTy = llvm::SmallSet; + using ExplodedFieldsTy = llvm::SmallVector; DenseMap aggregates; - void addAggregate(mlir::Value a, SmallVectorImpl &fields) { + void addAggregate(mlir::Value a, SmallVectorImpl &fields) { assert(!aggregates.count(a) && "already tracked"); - aggregates[a].insert(fields.begin(), fields.end()); + aggregates[a].swap(fields); } // Useful helpers for debugging @@ -432,9 +432,18 @@ struct LifetimeCheckPass : public LifetimeCheckBase { }; } // namespace -static StringRef getVarNameFromValue(mlir::Value v) { +static std::string getVarNameFromValue(mlir::Value v) { if (auto allocaOp = dyn_cast(v.getDefiningOp())) - return allocaOp.getName(); + return allocaOp.getName().str(); + if (auto getElemOp = dyn_cast(v.getDefiningOp())) { + auto parent = dyn_cast(getElemOp.getStructAddr().getDefiningOp()); + if (parent) { + llvm::SmallString<128> finalName; + llvm::raw_svector_ostream Out(finalName); + Out << parent.getName() << "." << getElemOp.getMemberName(); + return Out.str().str(); + } + } assert(0 && "how did it get here?"); return ""; } @@ -870,10 +879,14 @@ static bool containsPointerElts(mlir::cir::StructType s) { }); } -static bool isAggregateType(mlir::Type agg) { +static bool isAggregateType(LifetimeCheckPass *pass, mlir::Type agg) { auto t = agg.dyn_cast(); if (!t) return false; + // Lambdas have their special handling, and shall not be considered as + // aggregate types. + if (pass->isLambdaType(agg)) + return false; // FIXME: For now we handle this in a more naive way: any pointer // element we find is enough to consider this an aggregate. But in // reality it should be as defined in 2.1: @@ -922,8 +935,10 @@ static bool isPointerType(mlir::Type t) { return isStructAndHasAttr(t); } -void LifetimeCheckPass::classifyTypeCategories(mlir::Value addr, mlir::Type t, - mlir::Location loc) { +void LifetimeCheckPass::classifyAndInitTypeCategories(mlir::Value addr, + mlir::Type t, + mlir::Location loc, + unsigned nestLevel) { assert(!getPmap().count(addr) && "only one map entry for a given address"); getPmap()[addr] = {}; @@ -942,7 +957,7 @@ void LifetimeCheckPass::classifyTypeCategories(mlir::Value addr, mlir::Type t, return TypeCategory::Pointer; if (isOwnerType(t)) return TypeCategory::Owner; - if (isAggregateType(t)) + if (isAggregateType(this, t)) return TypeCategory::Aggregate; return TypeCategory::Value; }(); @@ -964,24 +979,39 @@ void LifetimeCheckPass::classifyTypeCategories(mlir::Value addr, mlir::Type t, // 2.1 - Aggregates are types we will “explode” (consider memberwise) at // local scopes, because the function can operate on the members directly. - // Explode all pointer members. - SmallVector fields; + // TODO: only track first level of aggregates subobjects for now, get some + // data before we increase this. + if (nestLevel > 1) + break; + + // Map values for members to it's index in the aggregate. auto members = t.cast().getMembers(); + SmallVector fieldVals; + fieldVals.assign(members.size(), {}); + + // Go through uses of the alloca via `cir.struct_element_addr`, and + // track only the fields that are actually used. + std::for_each(addr.use_begin(), addr.use_end(), [&](mlir::OpOperand &use) { + auto op = dyn_cast(use.getOwner()); + if (!op) + return; + + auto eltAddr = op.getResult(); + auto eltTy = + eltAddr.getType().cast().getPointee(); - unsigned fieldIdx = 0; - std::for_each(members.begin(), members.end(), [&](mlir::Type t) { - auto ptrType = t.dyn_cast(); - if (ptrType) - fields.push_back(fieldIdx); - fieldIdx++; + // Classify exploded types. Keep alloca original location. + classifyAndInitTypeCategories(eltAddr, eltTy, loc, ++nestLevel); + fieldVals[op.getMemberIndex().getZExtValue()] = eltAddr; }); - addAggregate(addr, fields); - // Differently from `TypeCategory::Pointer`, initialization for exploded - // pointer is done lazily, triggered whenever the relevant - // `cir.struct_element_addr` are seen. This also serves optimization - // purposes: only track fields that are actually seen. - break; + // In case this aggregate gets initialized at once, the fields need + // to be mapped to the elements values. + addAggregate(addr, fieldVals); + + // There might be pointers to this aggregate, so also make a value + // for it. + LLVM_FALLTHROUGH; } case TypeCategory::Value: { // 2.4.2 - When a local Value x is declared, add (x, {x}) to pmap. @@ -995,8 +1025,8 @@ void LifetimeCheckPass::classifyTypeCategories(mlir::Value addr, mlir::Type t, } void LifetimeCheckPass::checkAlloca(AllocaOp allocaOp) { - classifyTypeCategories(allocaOp.getAddr(), allocaOp.getAllocaType(), - allocaOp.getLoc()); + classifyAndInitTypeCategories(allocaOp.getAddr(), allocaOp.getAllocaType(), + allocaOp.getLoc(), /*nestLevel=*/0); } void LifetimeCheckPass::checkCoroTaskStore(StoreOp storeOp) { @@ -1187,7 +1217,7 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, << "declared here but invalid after enclosing " << parent << " ends"; } else { - StringRef outOfScopeVarName = getVarNameFromValue(*info.val); + auto outOfScopeVarName = getVarNameFromValue(*info.val); D.attachNote(info.loc) << "pointee '" << outOfScopeVarName << "' invalidated at end of scope"; } @@ -1233,7 +1263,7 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, // Looks like we found a bad path leading to this deference point, // diagnose it. - StringRef varName = getVarNameFromValue(addr); + auto varName = getVarNameFromValue(addr); auto D = emitWarning(loc); if (tasks.count(addr)) diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp new file mode 100644 index 000000000000..506b55d7c7c0 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +typedef enum SType { + INFO_ENUM_0 = 9, + INFO_ENUM_1 = 2020, +} SType; + +typedef struct InfoRaw { + SType type; + const void* __attribute__((__may_alias__)) next; +} InfoRaw; + +typedef unsigned long long FlagsPriv; +typedef struct InfoPriv { + SType type; + void* __attribute__((__may_alias__)) next; + FlagsPriv flags; +} InfoPriv; + +static const FlagsPriv PrivBit = 0x00000001; + +void escape_info(InfoRaw *info); +void exploded_fields(bool cond) { + { + InfoRaw info = {INFO_ENUM_0}; + if (cond) { + InfoPriv privTmp = {INFO_ENUM_1}; + privTmp.flags = PrivBit; + info.next = &privTmp; + } + escape_info(&info); + } +} \ No newline at end of file From 65028a12f5ea320105b0301f66762a36ea550304 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 25 Jul 2023 14:28:23 -0300 Subject: [PATCH 1087/2301] [CIR][CIRGen] Unblock extern global variables codegen path ghstack-source-id: f00fe1297836ce9749ae3c323e81917ff77f656e Pull Request resolved: https://github.com/llvm/clangir/pull/190 --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 8 ++++++-- clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/globals.cpp | 7 +++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 590561c96cec..a07627f1d420 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -628,8 +628,12 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, } // Emit section information for extern variables. - if (D->hasExternalStorage()) - assert(0 && "not implemented"); + if (D->hasExternalStorage()) { + if (const SectionAttr *SA = D->getAttr()) { + assert(!UnimplementedFeature::setGlobalVarSection()); + llvm_unreachable("section info for extern vars is NYI"); + } + } // Handle XCore specific ABI requirements. if (getTriple().getArch() == llvm::Triple::xcore) diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index c820751f21f6..b7230cab788c 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -37,6 +37,7 @@ struct UnimplementedFeature { // Unhandled global/linkage information. static bool unnamedAddr() { return false; } static bool setComdat() { return false; } + static bool setGlobalVarSection() { return false; } static bool setDSOLocal() { return false; } static bool threadLocal() { return false; } static bool setDLLStorageClass() { return false; } diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index a51e0e3b31cb..340e68320129 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -108,3 +108,10 @@ void get_globals() { // CHECK: %[[RES:[0-9]+]] = cir.get_global @ll : cir.ptr > // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr } + +// Should generate extern global variables. +extern int externVar; +int testExternVar(void) { return externVar; } +// CHECK: cir.global "private" external @externVar : !s32i +// CHECK: cir.func @{{.+}}testExternVar +// CHECK: cir.get_global @externVar : cir.ptr From 1d09e3246ee9a0497a60eadf03a22812a30528ae Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 25 Jul 2023 14:28:26 -0300 Subject: [PATCH 1088/2301] [CIR][CIRGen] Build LValue parentheses expressions ghstack-source-id: bfcc5a2912ff652dcdadb92c7cdd70ca868d6e9d Pull Request resolved: https://github.com/llvm/clangir/pull/191 --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 ++ clang/test/CIR/CodeGen/expressions.cpp | 11 +++++++++++ 2 files changed, 13 insertions(+) create mode 100644 clang/test/CIR/CodeGen/expressions.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index d2a6ba392d9f..ea83cd212ce7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1992,6 +1992,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { // bitfield lvalue or some other non-simple lvalue? return LV; } + case Expr::ParenExprClass: + return buildLValue(cast(E)->getSubExpr()); case Expr::DeclRefExprClass: return buildDeclRefLValue(cast(E)); case Expr::UnaryOperatorClass: diff --git a/clang/test/CIR/CodeGen/expressions.cpp b/clang/test/CIR/CodeGen/expressions.cpp new file mode 100644 index 000000000000..fa17f0921fcd --- /dev/null +++ b/clang/test/CIR/CodeGen/expressions.cpp @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void test(int a) { +// CHECK: cir.func @{{.+}}test + + // Should generate LValue parenthesis expression. + (a) = 1; + // CHECK: %[[#C:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: cir.store %[[#C]], %{{.+}} : !s32i, cir.ptr +} From 63d1a356421c305b496149e016ff7c1dd0e5b856 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 25 Jul 2023 14:28:30 -0300 Subject: [PATCH 1089/2301] [CIR][CIRGen] Unblock available_externally linkage codegen path ghstack-source-id: 9a6ef915e58cbd51d8314470e9448a51db61af53 Pull Request resolved: https://github.com/llvm/clangir/pull/192 --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 1 + clang/test/CIR/CodeGen/linkage.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index a07627f1d420..123d55fcc56d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1383,6 +1383,7 @@ mlir::SymbolTable::Visibility CIRGenModule::getMLIRVisibilityFromCIRLinkage( case mlir::cir::GlobalLinkageKind::ExternalLinkage: case mlir::cir::GlobalLinkageKind::ExternalWeakLinkage: case mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage: + case mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage: return mlir::SymbolTable::Visibility::Public; default: { llvm::errs() << "visibility not implemented for '" diff --git a/clang/test/CIR/CodeGen/linkage.c b/clang/test/CIR/CodeGen/linkage.c index 4ef82a43981d..aff2c6ccafad 100644 --- a/clang/test/CIR/CodeGen/linkage.c +++ b/clang/test/CIR/CodeGen/linkage.c @@ -23,3 +23,8 @@ static int var = 0; int get_var(void) { return var; } + +// Should generate available_externally linkage. +inline int availableExternallyMethod(void) { return 0; } +void callAvailableExternallyMethod(void) { availableExternallyMethod(); } +// CIR: cir.func available_externally @availableExternallyMethod From 4a6d02fb1809096f5ed4681d59b80dfde8c2f3d0 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 10 Jul 2023 08:45:22 -0300 Subject: [PATCH 1090/2301] [CIR][CIRGen] Support floating point subtraction While the original codegen attempts to generate a fmuladd, this patch ignores this optimization as it should be deferred to MLIR. It also updates FP options builder to be closer to the original codegen and improve the tracking of missing features. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 10 ++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 3 ++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 24 ++++++++++++++++++- .../CodeGen/UnimplementedFeatureGuarding.h | 9 +++++++ clang/test/CIR/CodeGen/binop.cpp | 11 +++++++++ 5 files changed, 55 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 5694a6a8c4fc..aa8dbf753b03 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -380,6 +380,16 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::CastKind::floating, v); } + mlir::Value createFSub(mlir::Value lhs, mlir::Value rhs) { + assert(!UnimplementedFeature::metaDataNode()); + if (IsFPConstrained) + llvm_unreachable("Constrained FP NYI"); + + assert(!UnimplementedFeature::foldBinOpFMF()); + return create(lhs.getLoc(), mlir::cir::BinOpKind::Sub, + lhs, rhs); + } + mlir::Value createPtrToBoolCast(mlir::Value v) { return create(v.getLoc(), getBoolTy(), mlir::cir::CastKind::ptr_to_bool, v); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 7b477a501884..53e5af790c5a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1156,7 +1156,8 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { assert(!UnimplementedFeature::cirVectorType()); if (Ops.LHS.getType().isa()) { - llvm_unreachable("NYI"); + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); + return Builder.createFSub(Ops.LHS, Ops.RHS); } if (Ops.isFixedPointOp()) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 4070d23ea209..890cb638ee77 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -13,6 +13,7 @@ #include "CIRGenFunction.h" #include "CIRGenCXXABI.h" #include "CIRGenModule.h" +#include "UnimplementedFeatureGuarding.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/ExprObjC.h" @@ -1237,7 +1238,28 @@ void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper( if (OldFPFeatures == FPFeatures) return; - llvm_unreachable("NYI"); + // TODO(cir): create guard to restore fast math configurations. + assert(!UnimplementedFeature::fastMathGuard()); + + llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode(); + // TODO(cir): override rounding behaviour once FM configs are guarded. + auto NewExceptionBehavior = + ToConstrainedExceptMD(static_cast( + FPFeatures.getExceptionMode())); + // TODO(cir): override exception behaviour once FM configs are guarded. + + // TODO(cir): override FP flags once FM configs are guarded. + assert(!UnimplementedFeature::fastMathFlags()); + + assert((CGF.CurFuncDecl == nullptr || CGF.builder.getIsFPConstrained() || + isa(CGF.CurFuncDecl) || + isa(CGF.CurFuncDecl) || + (NewExceptionBehavior == fp::ebIgnore && + NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) && + "FPConstrained should be enabled on entire function"); + + // TODO(cir): mark CIR function with fast math attributes. + assert(!UnimplementedFeature::fastMathFuncAttributes()); } CIRGenFunction::CIRGenFPOptionsRAII::~CIRGenFPOptionsRAII() { diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index b7230cab788c..accfbc63297f 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -89,6 +89,14 @@ struct UnimplementedFeature { static bool mayHaveIntegerOverflow() { return false; } static bool llvmLoweringPtrDiffConsidersPointee() { return false; } + // Folding methods. + static bool foldBinOpFMF() { return false; } + + // Fast math. + static bool fastMathGuard() { return false; } + static bool fastMathFlags() { return false; } + static bool fastMathFuncAttributes() { return false; } + static bool capturedByInit() { return false; } static bool tryEmitAsConstant() { return false; } static bool incrementProfileCounter() { return false; } @@ -121,6 +129,7 @@ struct UnimplementedFeature { static bool chainCalls() { return false; } static bool operandBundles() { return false; } static bool exceptions() { return false; } + static bool metaDataNode() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index 6436e4582901..4384a9c391cb 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -100,3 +100,14 @@ void b3(int a, int b, int c, int d) { // CHECK-NEXT: %14 = cir.load %3 // CHECK-NEXT: %15 = cir.cmp(eq, %13, %14) // CHECK-NEXT: %16 = cir.ternary(%15, true + +void testFloatingPointBinOps(float a, float b) { + a * b; + // CHECK: cir.binop(mul, %{{.+}}, %{{.+}}) : f32 + a / b; + // CHECK: cir.binop(div, %{{.+}}, %{{.+}}) : f32 + a + b; + // CHECK: cir.binop(add, %{{.+}}, %{{.+}}) : f32 + a - b; + // CHECK: cir.binop(sub, %{{.+}}, %{{.+}}) : f32 +} From ced0793d6298de4e3add047d5ecd3367b5449423 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Sat, 1 Jul 2023 06:40:08 -0300 Subject: [PATCH 1091/2301] [CIR] Yield boolean value in cir.loop condition region Before this patch, the loop operation condition block yielded either empty or continue. This was replaced by a yield of a boolean value. This change simplifies both codegen and lowering, while also being semantically closer to the C language. It also refactors loop op codegen tests to validate only the lowering related to the cir.loop operation. Fixes #161 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 +- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 32 +-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 61 ++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 39 +--- clang/test/CIR/CodeGen/loop.cpp | 57 ++--- clang/test/CIR/CodeGen/rangefor.cpp | 6 +- clang/test/CIR/IR/branch.cir | 16 +- clang/test/CIR/IR/invalid.cir | 68 +++++- clang/test/CIR/IR/loop.cir | 48 ++-- clang/test/CIR/Lowering/dot.cir | 26 +-- clang/test/CIR/Lowering/loop.cir | 215 ++++++------------ clang/test/CIR/Transforms/merge-cleanups.cir | 18 +- 12 files changed, 235 insertions(+), 357 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index ca0013cbc783..bac028a34f13 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1114,8 +1114,7 @@ def LoopOp : CIR_Op<"loop", let description = [{ `cir.loop` represents C/C++ loop forms. It defines 3 blocks: - `cond`: region can contain multiple blocks, terminated by regular - `cir.yield` when control should yield back to the parent, and - `cir.yield continue` when execution continues to another region. + `cir.yield %x` where `%x` is the boolean value to be evaluated. The region destination depends on the loop form specified. - `step`: region with one block, containing code to compute the loop step, must be terminated with `cir.yield`. @@ -1130,7 +1129,8 @@ def LoopOp : CIR_Op<"loop", // i = i + 1; // } cir.loop while(cond : { - cir.yield continue + %2 = cir.const(#cir.bool) : !cir.bool + cir.yield %2 : !cir.bool }, step : { cir.yield }) { diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 72f3908c2857..c57af3890e09 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -652,26 +652,6 @@ mlir::LogicalResult CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, return res; } -static mlir::LogicalResult buildLoopCondYield(mlir::OpBuilder &builder, - mlir::Location loc, - mlir::Value cond) { - mlir::Block *trueBB = nullptr, *falseBB = nullptr; - { - mlir::OpBuilder::InsertionGuard guard(builder); - trueBB = builder.createBlock(builder.getBlock()->getParent()); - builder.create(loc, YieldOpKind::Continue); - } - { - mlir::OpBuilder::InsertionGuard guard(builder); - falseBB = builder.createBlock(builder.getBlock()->getParent()); - builder.create(loc); - } - - assert((trueBB && falseBB) && "expected both blocks to exist"); - builder.create(loc, cond, trueBB, falseBB); - return mlir::success(); -} - mlir::LogicalResult CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef ForAttrs) { @@ -705,8 +685,7 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, assert(!UnimplementedFeature::createProfileWeightsForLoop()); assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); mlir::Value condVal = evaluateExprAsBool(S.getCond()); - if (buildLoopCondYield(b, loc, condVal).failed()) - loopRes = mlir::failure(); + builder.create(loc, condVal); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -793,8 +772,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { loc, boolTy, mlir::cir::BoolAttr::get(b.getContext(), boolTy, true)); } - if (buildLoopCondYield(b, loc, condVal).failed()) - loopRes = mlir::failure(); + builder.create(loc, condVal); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -862,8 +840,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { // expression compares unequal to 0. The condition must be a // scalar type. mlir::Value condVal = evaluateExprAsBool(S.getCond()); - if (buildLoopCondYield(b, loc, condVal).failed()) - loopRes = mlir::failure(); + builder.create(loc, condVal); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -927,8 +904,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { // expression compares unequal to 0. The condition must be a // scalar type. condVal = evaluateExprAsBool(S.getCond()); - if (buildLoopCondYield(b, loc, condVal).failed()) - loopRes = mlir::failure(); + builder.create(loc, condVal); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 2e6cbf7fea53..4dbb9a4d5d32 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -14,6 +14,7 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/ADT/SmallVector.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" @@ -1098,12 +1099,9 @@ void LoopOp::build(OpBuilder &builder, OperationState &result, /// operand is not a constant. void LoopOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { - // If any index all the underlying regions branch back to the parent - // operation. - if (!point.isParent()) { - regions.push_back(RegionSuccessor()); + // If any index, do nothing. + if (!point.isParent()) return; - } // FIXME: we want to look at cond region for getting more accurate results // if the other regions will get a chance to execute. @@ -1115,26 +1113,29 @@ void LoopOp::getSuccessorRegions(mlir::RegionBranchPoint point, llvm::SmallVector LoopOp::getLoopRegions() { return {&getBody()}; } LogicalResult LoopOp::verify() { - // Cond regions should only terminate with plain 'cir.yield' or - // 'cir.yield continue'. - auto terminateError = [&]() { - return emitOpError() << "cond region must be terminated with " - "'cir.yield' or 'cir.yield continue'"; - }; - auto &blocks = getCond().getBlocks(); - for (Block &block : blocks) { - if (block.empty()) - continue; - auto &op = block.back(); - if (isa(op)) - continue; - if (!isa(op)) - terminateError(); - auto y = cast(op); - if (!(y.isPlain() || y.isContinue())) - terminateError(); - } + if (getCond().empty() || getStep().empty() || getBody().empty()) + return emitOpError("regions must not be empty"); + + auto condYield = dyn_cast(getCond().back().getTerminator()); + auto stepYield = dyn_cast(getStep().back().getTerminator()); + + if (!condYield || !stepYield) + return emitOpError( + "cond and step regions must be terminated with 'cir.yield'"); + + if (condYield.getNumOperands() != 1 || + !condYield.getOperand(0).getType().isa()) + return emitOpError("cond region must yield a single boolean value"); + + if (stepYield.getNumOperands() != 0) + return emitOpError("step region should not yield values"); + + // Body may yield or return. + auto *bodyTerminator = getBody().back().getTerminator(); + + if (isa(bodyTerminator) && bodyTerminator->getNumOperands() != 0) + return emitOpError("body region must not yield values"); return success(); } @@ -1261,8 +1262,8 @@ void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, LogicalResult GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { - // Verify that the result type underlying pointer type matches the type of the - // referenced cir.global or cir.func op. + // Verify that the result type underlying pointer type matches the type of + // the referenced cir.global or cir.func op. auto op = symbolTable.lookupNearestSymbolFrom(*this, getNameAttr()); if (!(isa(op) || isa(op))) return emitOpError("'") @@ -1296,8 +1297,8 @@ VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return success(); auto name = *getName(); - // Verify that the result type underlying pointer type matches the type of the - // referenced cir.global or cir.func op. + // Verify that the result type underlying pointer type matches the type of + // the referenced cir.global or cir.func op. auto op = dyn_cast_or_null( symbolTable.lookupNearestSymbolFrom(*this, getNameAttr())); if (!op) @@ -1555,7 +1556,6 @@ void cir::FuncOp::print(OpAsmPrinter &p) { getFunctionTypeAttrName(), getLinkageAttrName(), getBuiltinAttrName(), getNoProtoAttrName(), getExtraAttrsAttrName()}); - if (auto aliaseeName = getAliasee()) { p << " alias("; p.printSymbolName(*aliaseeName); @@ -1785,7 +1785,8 @@ LogicalResult UnaryOp::verify() { case cir::UnaryOpKind::Inc: LLVM_FALLTHROUGH; case cir::UnaryOpKind::Dec: { - // TODO: Consider looking at the memory interface instead of LoadOp/StoreOp. + // TODO: Consider looking at the memory interface instead of + // LoadOp/StoreOp. auto loadOp = getInput().getDefiningOp(); if (!loadOp) return emitOpError() << "requires input to be defined by a memory load"; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5538944f6c83..a143c7631e19 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -118,27 +118,6 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { using mlir::OpConversionPattern::OpConversionPattern; using LoopKind = mlir::cir::LoopOpKind; - mlir::LogicalResult - fetchCondRegionYields(mlir::Region &condRegion, - mlir::cir::YieldOp &yieldToBody, - mlir::cir::YieldOp &yieldToCont) const { - for (auto &bb : condRegion) { - if (auto yieldOp = dyn_cast(bb.getTerminator())) { - if (!yieldOp.getKind().has_value()) - yieldToCont = yieldOp; - else if (yieldOp.getKind() == mlir::cir::YieldOpKind::Continue) - yieldToBody = yieldOp; - else - return mlir::failure(); - } - } - - // Succeed only if both yields are found. - if (!yieldToBody || !yieldToCont) - return mlir::failure(); - return mlir::success(); - } - mlir::LogicalResult matchAndRewrite(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { @@ -150,9 +129,8 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { // Fetch required info from the condition region. auto &condRegion = loopOp.getCond(); auto &condFrontBlock = condRegion.front(); - mlir::cir::YieldOp yieldToBody, yieldToCont; - if (fetchCondRegionYields(condRegion, yieldToBody, yieldToCont).failed()) - return loopOp.emitError("failed to fetch yields in cond region"); + auto condYield = + cast(condRegion.back().getTerminator()); // Fetch required info from the body region. auto &bodyRegion = loopOp.getBody(); @@ -165,7 +143,7 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { auto &stepRegion = loopOp.getStep(); auto &stepFrontBlock = stepRegion.front(); auto stepYield = - dyn_cast(stepRegion.back().getTerminator()); + cast(stepRegion.back().getTerminator()); // Move loop op region contents to current CFG. rewriter.inlineRegionBefore(condRegion, continueBlock); @@ -178,13 +156,10 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { auto &entry = (kind != LoopKind::DoWhile ? condFrontBlock : bodyFrontBlock); rewriter.create(loopOp.getLoc(), &entry); - // Set loop exit point to continue block. - rewriter.setInsertionPoint(yieldToCont); - rewriter.replaceOpWithNewOp(yieldToCont, continueBlock); - - // Branch from condition to body. - rewriter.setInsertionPoint(yieldToBody); - rewriter.replaceOpWithNewOp(yieldToBody, &bodyFrontBlock); + // Branch to body when true and to exit when false. + rewriter.setInsertionPoint(condYield); + rewriter.replaceOpWithNewOp( + condYield, condYield.getOperand(0), &bodyFrontBlock, continueBlock); // Branch from body to condition or to step on for-loop cases. rewriter.setInsertionPoint(bodyYield); diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 90831e31e898..698c32c890c9 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -8,7 +8,8 @@ void l0() { // CHECK: cir.func @_Z2l0v // CHECK: cir.loop for(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.yield %0 // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -27,11 +28,7 @@ void l1() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool -// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield %6 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i @@ -62,12 +59,8 @@ void l2(bool cond) { // CHECK: cir.func @_Z2l2b // CHECK: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool -// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.yield %3 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -80,7 +73,8 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: %3 = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.yield %3 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -93,13 +87,9 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool -// CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool +// CHECK-NEXT: cir.yield %4 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -128,11 +118,7 @@ void l3(bool cond) { // CHECK: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool -// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield %3 // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -145,7 +131,8 @@ void l3(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: %3 = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.yield %3 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -160,11 +147,7 @@ void l3(bool cond) { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool -// CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield %4 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -188,7 +171,8 @@ void l4() { // CHECK: cir.func @_Z2l4v // CHECK: cir.loop while(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: %4 = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.yield %4 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -215,11 +199,7 @@ void l5() { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool -// CHECK-NEXT: cir.brcond %1 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield %1 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -238,7 +218,8 @@ void l6() { // CHECK: cir.func @_Z2l6v() // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.yield %0 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 05d310efc515..3bf27b417692 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -46,11 +46,7 @@ void init(unsigned numImages) { // CHECK: cir.store %11, %6 : !ty_22struct2E__vector_iterator22, cir.ptr // CHECK: cir.loop for(cond : { // CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool -// CHECK: cir.brcond %12 ^bb1, ^bb2 -// CHECK: ^bb1: // pred: ^bb0 -// CHECK: cir.yield continue -// CHECK: ^bb2: // pred: ^bb0 -// CHECK: cir.yield +// CHECK: cir.yield %12 : !cir.bool // CHECK: }, step : { // CHECK: %12 = cir.call @_ZN17__vector_iteratorI6triplePS0_RS0_EppEv(%5) : (!cir.ptr) -> !cir.ptr // CHECK: cir.yield diff --git a/clang/test/CIR/IR/branch.cir b/clang/test/CIR/IR/branch.cir index 6f75d9e25bd3..bc9c26df7669 100644 --- a/clang/test/CIR/IR/branch.cir +++ b/clang/test/CIR/IR/branch.cir @@ -7,17 +7,13 @@ cir.func @b0() { cir.scope { cir.loop while(cond : { %0 = cir.const(#true) : !cir.bool - cir.brcond %0 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield + cir.yield %0 : !cir.bool }, step : { cir.yield }) { cir.br ^bb1 ^bb1: - cir.return + cir.yield } } cir.return @@ -27,17 +23,13 @@ cir.func @b0() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %0 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield %0 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: cir.br ^bb1 // CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.return +// CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 17d3afcfe0e9..78d93a2f933a 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -163,7 +163,7 @@ cir.func @cast4(%p: !cir.ptr) { #true = #cir.bool : !cir.bool cir.func @b0() { cir.scope { - cir.loop while(cond : { // expected-error {{cond region must be terminated with 'cir.yield' or 'cir.yield continue'}} + cir.loop while(cond : { // expected-error {{cond region must yield a single boolean value}} %0 = cir.const(#true) : !cir.bool cir.brcond %0 ^bb1, ^bb2 ^bb1: @@ -183,6 +183,72 @@ cir.func @b0() { // ----- +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +cir.func @b0() { + cir.loop while(cond : { // expected-error {{cond and step regions must be terminated with 'cir.yield'}} + %0 = cir.const(#true) : !cir.bool + cir.return %0 : !cir.bool + }, step : { + cir.yield + }) { + cir.yield + } + cir.return +} + +// ----- + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +cir.func @b0() { + cir.loop while(cond : { // expected-error {{cond and step regions must be terminated with 'cir.yield'}} + %0 = cir.const(#true) : !cir.bool + cir.yield %0 : !cir.bool + }, step : { + cir.return + }) { + cir.return + } + cir.return +} + +// ----- + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +cir.func @b0() { + cir.loop while(cond : { // expected-error {{step region should not yield values}} + %0 = cir.const(#true) : !cir.bool + cir.yield %0 : !cir.bool + }, step : { + %1 = cir.const(#true) : !cir.bool + cir.yield %1 : !cir.bool + }) { + cir.return + } + cir.return +} + +// ----- + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +cir.func @b0() { + cir.loop while(cond : { // expected-error {{body region must not yield values}} + %0 = cir.const(#true) : !cir.bool + cir.yield %0 : !cir.bool + }, step : { + cir.yield + }) { + %1 = cir.const(#true) : !cir.bool + cir.yield %1 : !cir.bool + } + cir.return +} + +// ----- + !u32i = !cir.int !u8i = !cir.int module { diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index ac9658a304d3..9b1ba9da6a80 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -15,11 +15,7 @@ cir.func @l0() { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<10> : !u32i) : !u32i %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.brcond %6 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield + cir.yield %6 : !cir.bool }, step : { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<1> : !u32i) : !u32i @@ -46,11 +42,7 @@ cir.func @l0() { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<10> : !u32i) : !u32i %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.brcond %6 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield + cir.yield %6 : !cir.bool }, step : { cir.yield }) { @@ -74,11 +66,7 @@ cir.func @l0() { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<10> : !u32i) : !u32i %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.brcond %6 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield + cir.yield %6 : !cir.bool }, step : { cir.yield }) { @@ -97,11 +85,7 @@ cir.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield %6 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u32i) : !u32i @@ -124,11 +108,7 @@ cir.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield %6 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -147,11 +127,7 @@ cir.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.yield %6 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -165,7 +141,8 @@ cir.func @l0() { cir.func @l1() { cir.scope { cir.loop while(cond : { - cir.yield continue + %0 = cir.const(#true) : !cir.bool + cir.yield %0 : !cir.bool }, step : { cir.yield }) { @@ -178,7 +155,8 @@ cir.func @l1() { // CHECK: cir.func @l1 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.yield %0 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -191,7 +169,8 @@ cir.func @l1() { cir.func @l2() { cir.scope { cir.loop while(cond : { - cir.yield + %0 = cir.const(#true) : !cir.bool + cir.yield %0 : !cir.bool }, step : { cir.yield }) { @@ -204,7 +183,8 @@ cir.func @l2() { // CHECK: cir.func @l2 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield +// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.yield %0 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 8b3b553492b1..780317bd8d2a 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR !s32i = !cir.int @@ -23,11 +23,7 @@ module { %11 = cir.load %2 : cir.ptr , !s32i %12 = cir.cmp(lt, %10, %11) : !s32i, !s32i %13 = cir.cast(int_to_bool, %12 : !s32i), !cir.bool - cir.brcond %13 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.yield %13 : !cir.bool }, step : { %10 = cir.load %8 : cir.ptr , !s32i %11 = cir.unary(inc, %10) : !s32i, !s32i @@ -80,7 +76,7 @@ module { // MLIR-NEXT: %13 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: llvm.store %13, %12 : i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 -// MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb6 +// MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb4 // MLIR-NEXT: %14 = llvm.load %12 : !llvm.ptr // MLIR-NEXT: %15 = llvm.load %5 : !llvm.ptr // MLIR-NEXT: %16 = llvm.icmp "slt" %14, %15 : i32 @@ -89,12 +85,8 @@ module { // MLIR-NEXT: %19 = llvm.icmp "ne" %17, %18 : i32 // MLIR-NEXT: %20 = llvm.zext %19 : i1 to i8 // MLIR-NEXT: %21 = llvm.trunc %20 : i8 to i1 -// MLIR-NEXT: llvm.cond_br %21, ^bb3, ^bb4 +// MLIR-NEXT: llvm.cond_br %21, ^bb3, ^bb5 // MLIR-NEXT: ^bb3: // pred: ^bb2 -// MLIR-NEXT: llvm.br ^bb5 -// MLIR-NEXT: ^bb4: // pred: ^bb2 -// MLIR-NEXT: llvm.br ^bb7 -// MLIR-NEXT: ^bb5: // pred: ^bb3 // MLIR-NEXT: %22 = llvm.load %1 : !llvm.ptr // MLIR-NEXT: %23 = llvm.load %12 : !llvm.ptr // MLIR-NEXT: %24 = llvm.getelementptr %22[%23] : (!llvm.ptr, i32) -> !llvm.ptr @@ -107,16 +99,16 @@ module { // MLIR-NEXT: %31 = llvm.load %9 : !llvm.ptr // MLIR-NEXT: %32 = llvm.fadd %31, %30 : f64 // MLIR-NEXT: llvm.store %32, %9 : f64, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb6 -// MLIR-NEXT: ^bb6: // pred: ^bb5 +// MLIR-NEXT: llvm.br ^bb4 +// MLIR-NEXT: ^bb4: // pred: ^bb3 // MLIR-NEXT: %33 = llvm.load %12 : !llvm.ptr // MLIR-NEXT: %34 = llvm.mlir.constant(1 : i32) : i32 // MLIR-NEXT: %35 = llvm.add %33, %34 : i32 // MLIR-NEXT: llvm.store %35, %12 : i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 -// MLIR-NEXT: ^bb7: // pred: ^bb4 -// MLIR-NEXT: llvm.br ^bb8 -// MLIR-NEXT: ^bb8: // pred: ^bb7 +// MLIR-NEXT: ^bb5: // pred: ^bb2 +// MLIR-NEXT: llvm.br ^bb6 +// MLIR-NEXT: ^bb6: // pred: ^bb5 // MLIR-NEXT: %36 = llvm.load %9 : !llvm.ptr // MLIR-NEXT: llvm.store %36, %7 : f64, !llvm.ptr // MLIR-NEXT: %37 = llvm.load %7 : !llvm.ptr diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index f513185ac0ca..dcef5e304c5c 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -1,5 +1,5 @@ -// RUN: cir-opt %s -cir-to-llvm -o %t.mlir -// RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s !s32i = !cir.int module { @@ -12,11 +12,7 @@ module { %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.brcond %5 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.yield %5 : !cir.bool }, step : { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i @@ -28,161 +24,90 @@ module { cir.return } -// MLIR: module { -// MLIR-NEXT: llvm.func @testFor() -// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 -// MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 -// MLIR-NEXT: llvm.store %2, %1 : i32, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb1 -// ============= Condition block ============= -// MLIR-NEXT: ^bb1: // 2 preds: ^bb0, ^bb5 -// MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr -// MLIR-NEXT: %4 = llvm.mlir.constant(10 : i32) : i32 -// MLIR-NEXT: %5 = llvm.icmp "slt" %3, %4 : i32 -// MLIR-NEXT: %6 = llvm.zext %5 : i1 to i32 -// MLIR-NEXT: %7 = llvm.mlir.constant(0 : i32) : i32 -// MLIR-NEXT: %8 = llvm.icmp "ne" %6, %7 : i32 -// MLIR-NEXT: %9 = llvm.zext %8 : i1 to i8 -// MLIR-NEXT: %10 = llvm.trunc %9 : i8 to i1 -// MLIR-NEXT: llvm.cond_br %10, ^bb2, ^bb3 -// MLIR-NEXT: ^bb2: // pred: ^bb1 -// MLIR-NEXT: llvm.br ^bb4 -// MLIR-NEXT: ^bb3: // pred: ^bb1 -// MLIR-NEXT: llvm.br ^bb6 -// ============= Body block ============= -// MLIR-NEXT: ^bb4: // pred: ^bb2 -// MLIR-NEXT: llvm.br ^bb5 -// ============= Step block ============= -// MLIR-NEXT: ^bb5: // pred: ^bb4 -// MLIR-NEXT: %11 = llvm.load %1 : !llvm.ptr -// MLIR-NEXT: %12 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: %13 = llvm.add %11, %12 : i32 -// MLIR-NEXT: llvm.store %13, %1 : i32, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb1 -// ============= Exit block ============= -// MLIR-NEXT: ^bb6: // pred: ^bb3 -// MLIR-NEXT: llvm.return -// MLIR-NEXT: } + // CHECK: llvm.func @testFor() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] + // CHECK: llvm.br ^bb[[#COND]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + // Test while cir.loop operation lowering. cir.func @testWhile(%arg0: !s32i) { %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, cir.ptr - cir.scope { - cir.loop while(cond : { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.const(#cir.int<10> : !s32i) : !s32i - %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i - %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool - cir.brcond %4 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield - }, step : { - cir.yield - }) { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.unary(inc, %1) : !s32i, !s32i - cir.store %2, %0 : !s32i, cir.ptr - cir.yield - } + cir.loop while(cond : { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i + %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool + cir.yield %4 : !cir.bool + }, step : { + cir.yield + }) { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.unary(inc, %1) : !s32i, !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.yield } cir.return } - // MLIR: llvm.func @testWhile(%arg0: i32) - // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 - // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr - // MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr - // MLIR-NEXT: llvm.br ^bb1 - // MLIR-NEXT: ^bb1: - // MLIR-NEXT: llvm.br ^bb2 - // ============= Condition block ============= - // MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb5 - // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr - // MLIR-NEXT: %3 = llvm.mlir.constant(10 : i32) : i32 - // MLIR-NEXT: %4 = llvm.icmp "slt" %2, %3 : i32 - // MLIR-NEXT: %5 = llvm.zext %4 : i1 to i32 - // MLIR-NEXT: %6 = llvm.mlir.constant(0 : i32) : i32 - // MLIR-NEXT: %7 = llvm.icmp "ne" %5, %6 : i32 - // MLIR-NEXT: %8 = llvm.zext %7 : i1 to i8 - // MLIR-NEXT: %9 = llvm.trunc %8 : i8 to i1 - // MLIR-NEXT: llvm.cond_br %9, ^bb3, ^bb4 - // MLIR-NEXT: ^bb3: // pred: ^bb2 - // MLIR-NEXT: llvm.br ^bb5 - // MLIR-NEXT: ^bb4: // pred: ^bb2 - // MLIR-NEXT: llvm.br ^bb6 - // ============= Body block ============= - // MLIR-NEXT: ^bb5: // pred: ^bb3 - // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr - // MLIR-NEXT: %11 = llvm.mlir.constant(1 : i32) : i32 - // MLIR-NEXT: %12 = llvm.add %10, %11 : i32 - // MLIR-NEXT: llvm.store %12, %1 : i32, !llvm.ptr - // MLIR-NEXT: llvm.br ^bb2 - // ============= Exit block ============= - // MLIR-NEXT: ^bb6: // pred: ^bb4 - // MLIR-NEXT: llvm.br ^bb7 + // CHECK: llvm.func @testWhile + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.br ^bb[[#COND]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + // Test do-while cir.loop operation lowering. cir.func @testDoWhile(%arg0: !s32i) { %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, cir.ptr - cir.scope { - cir.loop dowhile(cond : { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.const(#cir.int<10> : !s32i) : !s32i - %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i - %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool - cir.brcond %4 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield - }, step : { - cir.yield - }) { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.unary(inc, %1) : !s32i, !s32i - cir.store %2, %0 : !s32i, cir.ptr - cir.yield - } + cir.loop dowhile(cond : { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i + %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool + cir.yield %4 : !cir.bool + }, step : { + cir.yield + }) { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.unary(inc, %1) : !s32i, !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.yield } cir.return } - // MLIR: llvm.func @testDoWhile(%arg0: i32) - // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 - // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr - // MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr - // MLIR-NEXT: llvm.br ^bb1 - // MLIR-NEXT: ^bb1: - // MLIR-NEXT: llvm.br ^bb5 - // ============= Condition block ============= - // MLIR-NEXT: ^bb2: - // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr - // MLIR-NEXT: %3 = llvm.mlir.constant(10 : i32) : i32 - // MLIR-NEXT: %4 = llvm.icmp "slt" %2, %3 : i32 - // MLIR-NEXT: %5 = llvm.zext %4 : i1 to i32 - // MLIR-NEXT: %6 = llvm.mlir.constant(0 : i32) : i32 - // MLIR-NEXT: %7 = llvm.icmp "ne" %5, %6 : i32 - // MLIR-NEXT: %8 = llvm.zext %7 : i1 to i8 - // MLIR-NEXT: %9 = llvm.trunc %8 : i8 to i1 - // MLIR-NEXT: llvm.cond_br %9, ^bb3, ^bb4 - // MLIR-NEXT: ^bb3: - // MLIR-NEXT: llvm.br ^bb5 - // MLIR-NEXT: ^bb4: - // MLIR-NEXT: llvm.br ^bb6 - // ============= Body block ============= - // MLIR-NEXT: ^bb5: - // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr - // MLIR-NEXT: %11 = llvm.mlir.constant(1 : i32) : i32 - // MLIR-NEXT: %12 = llvm.add %10, %11 : i32 - // MLIR-NEXT: llvm.store %12, %1 : i32, !llvm.ptr - // MLIR-NEXT: llvm.br ^bb2 - // ============= Exit block ============= - // MLIR-NEXT: ^bb6: - // MLIR-NEXT: llvm.br ^bb7 + // CHECK: llvm.func @testDoWhile + // [...] + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#COND:]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.br ^bb[[#COND]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } } diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 3b0b21e935fe..818570930928 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -65,11 +65,7 @@ module { cir.scope { cir.loop while(cond : { %0 = cir.const(#true) : !cir.bool - cir.brcond %0 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield + cir.yield %0 : !cir.bool }, step : { cir.yield }) { @@ -85,11 +81,7 @@ module { cir.scope { cir.loop while(cond : { %0 = cir.const(#false) : !cir.bool - cir.brcond %0 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield + cir.yield %0 : !cir.bool }, step : { cir.yield }) { @@ -141,7 +133,8 @@ module { // CHECK: cir.func @l0 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.yield %0 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -154,7 +147,8 @@ module { // CHECK: cir.func @l1 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield +// CHECK-NEXT: %0 = cir.const(#false) : !cir.bool +// CHECK-NEXT: cir.yield %0 : !cir.bool // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { From c54557b49fc1e2043c3cfae3e9eafb4c3dc7d29c Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 14 Jul 2023 14:03:47 -0300 Subject: [PATCH 1092/2301] [CIR][CIRGen] Generate 80-bit long doubles Uses the builtin types to generate the long double 80-bit variant present in x86/x87 ISA. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 27 +++++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 ++++ clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 5 ++++- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 3 +++ clang/test/CIR/CodeGen/types.c | 3 +++ 5 files changed, 41 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index aa8dbf753b03..2fe8d35589bd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -229,6 +229,33 @@ class CIRGenBuilderTy : public mlir::OpBuilder { } bool isInt(mlir::Type i) { return i.isa(); } + mlir::FloatType getLongDouble80BitsTy() const { + return typeCache.LongDouble80BitsTy; + } + + /// Get the proper floating point type for the given semantics. + mlir::FloatType getFloatTyForFormat(const llvm::fltSemantics &format, + bool useNativeHalf) const { + if (&format == &llvm::APFloat::IEEEhalf()) { + llvm_unreachable("IEEEhalf float format is NYI"); + } + + if (&format == &llvm::APFloat::BFloat()) + llvm_unreachable("BFloat float format is NYI"); + if (&format == &llvm::APFloat::IEEEsingle()) + llvm_unreachable("IEEEsingle float format is NYI"); + if (&format == &llvm::APFloat::IEEEdouble()) + llvm_unreachable("IEEEdouble float format is NYI"); + if (&format == &llvm::APFloat::IEEEquad()) + llvm_unreachable("IEEEquad float format is NYI"); + if (&format == &llvm::APFloat::PPCDoubleDouble()) + llvm_unreachable("PPCDoubleDouble float format is NYI"); + if (&format == &llvm::APFloat::x87DoubleExtended()) + return getLongDouble80BitsTy(); + + llvm_unreachable("Unknown float format!"); + } + mlir::cir::BoolType getBoolTy() { return ::mlir::cir::BoolType::get(getContext()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 123d55fcc56d..32c92bc96929 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -134,6 +134,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, // TODO: BFloatTy FloatTy = builder.getF32Type(); DoubleTy = builder.getF64Type(); + // TODO(cir): perhaps we should abstract long double variations into a custom + // cir.long_double type. Said type would also hold the semantics for lowering. + LongDouble80BitsTy = builder.getF80Type(); + // TODO: PointerWidthInBits PointerAlignInBytes = astctx diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 97ab911c9861..cea3f07922e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_LIB_CIR_CODEGENTYPECACHE_H #define LLVM_CLANG_LIB_CIR_CODEGENTYPECACHE_H +#include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Types.h" #include "clang/AST/CharUnits.h" #include "clang/Basic/AddressSpaces.h" @@ -34,7 +35,9 @@ struct CIRGenTypeCache { mlir::cir::IntType UInt8Ty, UInt16Ty, UInt32Ty, UInt64Ty; /// half, bfloat, float, double // mlir::Type HalfTy, BFloatTy; - mlir::Type FloatTy, DoubleTy; + // TODO(cir): perhaps we should abstract long double variations into a custom + // cir.long_double type. Said type would also hold the semantics for lowering. + mlir::FloatType FloatTy, DoubleTy, LongDouble80BitsTy; /// int mlir::Type UIntTy; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 2b71f1aed14b..badeff3e354a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -461,6 +461,9 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { ResultType = CGM.DoubleTy; break; case BuiltinType::LongDouble: + ResultType = Builder.getFloatTyForFormat(Context.getFloatTypeSemantics(T), + /*useNativeHalf=*/false); + break; case BuiltinType::Float128: case BuiltinType::Ibm128: // FIXME: look at Context.getFloatTypeSemantics(T) and getTypeForFormat diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index c6873788e778..16fe03bc3597 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -14,6 +14,7 @@ unsigned short t5(unsigned short i) { return i; } float t6(float i) { return i; } double t7(double i) { return i; } +long double t10(long double i) { return i; } void t8(void) {} @@ -29,6 +30,7 @@ bool t9(bool b) { return b; } // CHECK: cir.func @t5(%arg0: !u16i loc({{.*}})) -> !u16i // CHECK: cir.func @t6(%arg0: f32 loc({{.*}})) -> f32 // CHECK: cir.func @t7(%arg0: f64 loc({{.*}})) -> f64 +// CHECK: cir.func @t10(%arg0: f80 loc({{.*}})) -> f80 // CHECK: cir.func @t8() // CHECK-CPP: cir.func @_Z2t0i(%arg0: !s32i loc({{.*}})) -> !s32i @@ -39,5 +41,6 @@ bool t9(bool b) { return b; } // CHECK-CPP: cir.func @_Z2t5t(%arg0: !u16i loc({{.*}})) -> !u16i // CHECK-CPP: cir.func @_Z2t6f(%arg0: f32 loc({{.*}})) -> f32 // CHECK-CPP: cir.func @_Z2t7d(%arg0: f64 loc({{.*}})) -> f64 +// CHECK-CPP: cir.func @{{.+}}t10{{.+}}(%arg0: f80 loc({{.*}})) -> f80 // CHECK-CPP: cir.func @_Z2t8v() // CHECK-CPP: cir.func @_Z2t9b(%arg0: !cir.bool loc({{.*}})) -> !cir.bool From 6e919b6a2fd844ff13e86869bececf73ae826572 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 10 Jul 2023 11:06:25 -0300 Subject: [PATCH 1093/2301] [CIR][CIRGen] Unblock lossy demotion checks codegen path Removes unreachable clause to unlock codegen path. Although checks for lossy demotions are implemented, these are not tested. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 32 ++++++++++++++++++++-- clang/test/CIR/CodeGen/unary.cpp | 21 ++++++++++++++ 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 53e5af790c5a..fed890001707 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -77,6 +77,11 @@ struct BinOpInfo { } }; +static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( + QualType SrcType, QualType DstType) { + return SrcType->isIntegerType() && DstType->isIntegerType(); +} + class ScalarExprEmitter : public StmtVisitor { CIRGenFunction &CGF; CIRGenBuilderTy &Builder; @@ -337,12 +342,35 @@ class ScalarExprEmitter : public StmtVisitor { CGF.getLoc(E->getExprLoc()), CGF.getCIRType(type), Builder.getCIRBoolAttr(true)); } else if (type->isIntegerType()) { - // QualType promotedType; + QualType promotedType; bool canPerformLossyDemotionCheck = false; if (CGF.getContext().isPromotableIntegerType(type)) { + promotedType = CGF.getContext().getPromotedIntegerType(type); + assert(promotedType != type && "Shouldn't promote to the same type."); canPerformLossyDemotionCheck = true; - llvm_unreachable("no promotable integer inc/dec yet"); + canPerformLossyDemotionCheck &= + CGF.getContext().getCanonicalType(type) != + CGF.getContext().getCanonicalType(promotedType); + canPerformLossyDemotionCheck &= + PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( + type, promotedType); + + // TODO(cir): Currently, we store bitwidths in CIR types only for + // integers. This might also be required for other types. + auto srcCirTy = ConvertType(type).dyn_cast(); + auto promotedCirTy = ConvertType(type).dyn_cast(); + assert(srcCirTy && promotedCirTy && "Expected integer type"); + + assert( + (!canPerformLossyDemotionCheck || + type->isSignedIntegerOrEnumerationType() || + promotedType->isSignedIntegerOrEnumerationType() || + srcCirTy.getWidth() == promotedCirTy.getWidth()) && + "The following check expects that if we do promotion to different " + "underlying canonical type, at least one of the types (either " + "base or promoted) will be signed, or the bitwidths will match."); } + if (CGF.SanOpts.hasOneOf( SanitizerKind::ImplicitIntegerArithmeticValueChange) && canPerformLossyDemotionCheck) { diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index b47b20c7d689..532124d5e1a6 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -209,3 +209,24 @@ void pointers(int *p) { // %[[BOOLPTR:]] = cir.cast(ptr_to_bool, %15 : !cir.ptr), !cir.bool // cir.unary(not, %[[BOOLPTR]]) : !cir.bool, !cir.bool } + +void chars(char c) { +// CHECK: cir.func @{{.+}}chars{{.+}} + + +c; + // CHECK: %[[#PROMO:]] = cir.cast(integral, %{{.+}} : !s8i), !s32i + // CHECK: cir.unary(plus, %[[#PROMO]]) : !s32i, !s32i + -c; + // CHECK: %[[#PROMO:]] = cir.cast(integral, %{{.+}} : !s8i), !s32i + // CHECK: cir.unary(minus, %[[#PROMO]]) : !s32i, !s32i + + // Chars can go through some integer promotion codegen paths even when not promoted. + ++c; // CHECK: cir.unary(inc, %7) : !s8i, !s8i + --c; // CHECK: cir.unary(dec, %9) : !s8i, !s8i + c++; // CHECK: cir.unary(inc, %11) : !s8i, !s8i + c--; // CHECK: cir.unary(dec, %13) : !s8i, !s8i + + !c; + // CHECK: %[[#C_BOOL:]] = cir.cast(int_to_bool, %{{[0-9]+}} : !s8i), !cir.bool + // CHECK: cir.unary(not, %[[#C_BOOL]]) : !cir.bool, !cir.bool +} From 3818b7c041db8c1aa8be89dc21bd3ba5b10e626a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 26 Jul 2023 11:21:51 -0300 Subject: [PATCH 1094/2301] [CIR][LifetimeCheck][NFC] Factor out points-to updates out of checking stores --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 45 ++++++++++--------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 4c5df2efb49b..a700d6687ee7 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -55,6 +55,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void classifyAndInitTypeCategories(mlir::Value addr, mlir::Type t, mlir::Location loc, unsigned nestLevel); + void updatePointsTo(mlir::Value addr, mlir::Value data, mlir::Location loc); // FIXME: classify tasks and lambdas prior to check ptr deref // and pass down an enum. @@ -1097,23 +1098,8 @@ void LifetimeCheckPass::checkLambdaCaptureStore(StoreOp storeOp) { getPmap()[lambdaAddr].insert(State::getLocalValue(localByRefAddr)); } -void LifetimeCheckPass::checkStore(StoreOp storeOp) { - auto addr = storeOp.getAddr(); - - // The bulk of the check is done on top of store to pointer categories, - // which usually represent the most common case. - // - // We handle some special local values, like coroutine tasks and lambdas, - // which could be holding references to things with dangling lifetime. - if (!ptrs.count(addr)) { - if (currScope->localTempTasks.count(storeOp.getValue())) - checkCoroTaskStore(storeOp); - else - checkLambdaCaptureStore(storeOp); - return; - } - - // Only handle ptrs from here on. +void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, + mlir::Location loc) { auto getArrayFromSubscript = [&](PtrStrideOp strideOp) -> mlir::Value { auto castOp = dyn_cast(strideOp.getBase().getDefiningOp()); @@ -1124,8 +1110,7 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { return castOp.getSrc(); }; - auto data = storeOp.getValue(); - auto *defOp = data.getDefiningOp(); + auto defOp = data.getDefiningOp(); // Do not handle block arguments just yet. if (!defOp) @@ -1144,7 +1129,7 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} getPmap()[addr].clear(); getPmap()[addr].insert(State::getNullPtr()); - pmapNullHist[addr] = storeOp.getValue().getLoc(); + pmapNullHist[addr] = loc; return; } @@ -1168,6 +1153,26 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { // From here on, some uninterestring store (for now?) } +void LifetimeCheckPass::checkStore(StoreOp storeOp) { + auto addr = storeOp.getAddr(); + + // The bulk of the check is done on top of store to pointer categories, + // which usually represent the most common case. + // + // We handle some special local values, like coroutine tasks and lambdas, + // which could be holding references to things with dangling lifetime. + if (!ptrs.count(addr)) { + if (currScope->localTempTasks.count(storeOp.getValue())) + checkCoroTaskStore(storeOp); + else + checkLambdaCaptureStore(storeOp); + return; + } + + // Only handle ptrs from here on. + updatePointsTo(addr, storeOp.getValue(), storeOp.getValue().getLoc()); +} + void LifetimeCheckPass::checkLoad(LoadOp loadOp) { auto addr = loadOp.getAddr(); // Only interested in checking deference on top of pointer types. From b76a93fd3727d19cee27d6e16b01ffa493e5f4e1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 26 Jul 2023 12:12:58 -0300 Subject: [PATCH 1095/2301] [CIR][LifetimeCheck] Handle #cir.const_struct for aggregate initialization One more incremental step towards detect dangling exploded fields. No testcase just yet, coming soon as part of completing the feature. This is NFC for all previous existing functionality. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 51 ++++++++++++++++--- 1 file changed, 44 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index a700d6687ee7..f83d1533ff67 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -56,6 +56,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void classifyAndInitTypeCategories(mlir::Value addr, mlir::Type t, mlir::Location loc, unsigned nestLevel); void updatePointsTo(mlir::Value addr, mlir::Value data, mlir::Location loc); + void updatePointsToForConstStruct(mlir::Value addr, + mlir::cir::ConstStructAttr value, + mlir::Location loc); // FIXME: classify tasks and lambdas prior to check ptr deref // and pass down an enum. @@ -281,6 +284,12 @@ struct LifetimeCheckPass : public LifetimeCheckBase { invalidHist[ptr].add(ptr, invalidStyle, loc, extraVal); } + void markPsetNull(mlir::Value addr, mlir::Location loc) { + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getNullPtr()); + pmapNullHist[addr] = loc; + } + void joinPmaps(SmallVectorImpl &pmaps); // Provides p1179's 'KILL' functionality. See implementation for more @@ -1098,6 +1107,24 @@ void LifetimeCheckPass::checkLambdaCaptureStore(StoreOp storeOp) { getPmap()[lambdaAddr].insert(State::getLocalValue(localByRefAddr)); } +void LifetimeCheckPass::updatePointsToForConstStruct( + mlir::Value addr, mlir::cir::ConstStructAttr value, mlir::Location loc) { + assert(aggregates.count(addr) && "expected association with aggregate"); + int memberIdx = 0; + for (auto &attr : value.getMembers()) { + auto ta = attr.dyn_cast(); + assert(ta && "expected typed attribute"); + auto fieldAddr = aggregates[addr][memberIdx]; + // Unseen fields are not tracked. + if (fieldAddr && ta.getType().isa()) { + assert(ta.isa() && + "other than null not implemented"); + markPsetNull(fieldAddr, loc); + } + memberIdx++; + } +} + void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, mlir::Location loc) { @@ -1119,7 +1146,15 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, // 2.4.2 - If the declaration includes an initialization, the // initialization is treated as a separate operation if (auto cstOp = dyn_cast(defOp)) { - assert(cstOp.isNullPtr() && "not implemented"); + // Aggregates can be bulk materialized in CIR, handle proper update of + // individual exploded fields. + if (auto constStruct = + cstOp.getValue().dyn_cast()) { + updatePointsToForConstStruct(addr, constStruct, loc); + return; + } + + assert(cstOp.isNullPtr() && "other than null not implemented"); assert(getPmap().count(addr) && "address should always be valid"); // 2.4.2 - If the initialization is default initialization or zero // initialization, set pset(p) = {null}; for example: @@ -1127,9 +1162,7 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, // int* p; => pset(p) == {invalid} // int* p{}; or string_view p; => pset(p) == {null}. // int *p = nullptr; => pset(p) == {nullptr} => pset(p) == {null} - getPmap()[addr].clear(); - getPmap()[addr].insert(State::getNullPtr()); - pmapNullHist[addr] = loc; + markPsetNull(addr, loc); return; } @@ -1156,6 +1189,12 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, void LifetimeCheckPass::checkStore(StoreOp storeOp) { auto addr = storeOp.getAddr(); + // Decompose store's to aggregates into multiple updates to individual fields. + if (aggregates.count(addr)) { + updatePointsTo(addr, storeOp.getValue(), storeOp.getValue().getLoc()); + return; + } + // The bulk of the check is done on top of store to pointer categories, // which usually represent the most common case. // @@ -1402,9 +1441,7 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, if (!dyn_cast_or_null(addr.getDefiningOp())) return; - getPmap()[addr].clear(); - getPmap()[addr].insert(State::getNullPtr()); - pmapNullHist[addr] = callOp.getLoc(); + markPsetNull(addr, callOp.getLoc()); return; } From 7459e12420e58d3b3b82ce5125e40eedcdc456c7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 26 Jul 2023 12:44:26 -0300 Subject: [PATCH 1096/2301] [CIR][LifetimeCheck][NFC] Use more meaningful data source var name --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index f83d1533ff67..b04915b0b7e9 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1137,15 +1137,15 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, return castOp.getSrc(); }; - auto defOp = data.getDefiningOp(); + auto dataSrcOp = data.getDefiningOp(); // Do not handle block arguments just yet. - if (!defOp) + if (!dataSrcOp) return; // 2.4.2 - If the declaration includes an initialization, the // initialization is treated as a separate operation - if (auto cstOp = dyn_cast(defOp)) { + if (auto cstOp = dyn_cast(dataSrcOp)) { // Aggregates can be bulk materialized in CIR, handle proper update of // individual exploded fields. if (auto constStruct = @@ -1166,14 +1166,14 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, return; } - if (auto allocaOp = dyn_cast(defOp)) { + if (auto allocaOp = dyn_cast(dataSrcOp)) { // p = &x; getPmap()[addr].clear(); getPmap()[addr].insert(State::getLocalValue(data)); return; } - if (auto ptrStrideOp = dyn_cast(defOp)) { + if (auto ptrStrideOp = dyn_cast(dataSrcOp)) { // p = &a[0]; auto array = getArrayFromSubscript(ptrStrideOp); if (array) { From c688497102e3af9c7b26b6b405b3c557f37e8992 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 26 Jul 2023 15:56:19 -0300 Subject: [PATCH 1097/2301] [CIR][LifetimeCheck] Trigger deref checking for aggregate members upon parent usage This concludes basic support for tracking dangling exploded members, there are still some "notes" noise that needs to be fixed, and other minor improvements, coming next. Added testcase to cover all work done in previous commits. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 37 ++++++++++++++++++- .../CIR/Transforms/lifetime-check-agg.cpp | 3 +- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index b04915b0b7e9..ca1654b26e1b 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1125,6 +1125,19 @@ void LifetimeCheckPass::updatePointsToForConstStruct( } } +static mlir::Operation *ignoreBitcasts(mlir::Operation *op) { + while (auto bitcast = dyn_cast(op)) { + if (bitcast.getKind() != CastKind::bitcast) + return op; + auto b = bitcast.getSrc().getDefiningOp(); + // Do not handle block arguments just yet. + if (!b) + return op; + op = b; + } + return op; +} + void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, mlir::Location loc) { @@ -1143,6 +1156,12 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, if (!dataSrcOp) return; + // Ignore chains of bitcasts and update data source. Note that when + // dataSrcOp gets updated, `data` might not be the most updated resource + // to use, so avoid using it directly, and instead get things from newer + // dataSrcOp. + dataSrcOp = ignoreBitcasts(dataSrcOp); + // 2.4.2 - If the declaration includes an initialization, the // initialization is treated as a separate operation if (auto cstOp = dyn_cast(dataSrcOp)) { @@ -1169,7 +1188,7 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, if (auto allocaOp = dyn_cast(dataSrcOp)) { // p = &x; getPmap()[addr].clear(); - getPmap()[addr].insert(State::getLocalValue(data)); + getPmap()[addr].insert(State::getLocalValue(allocaOp.getAddr())); return; } @@ -1183,7 +1202,7 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, return; } - // From here on, some uninterestring store (for now?) + // What should we add next? } void LifetimeCheckPass::checkStore(StoreOp storeOp) { @@ -1526,6 +1545,7 @@ void LifetimeCheckPass::checkForOwnerAndPointerArguments(CallOp callOp, // - Pointers: always check for deref. // - Coroutine tasks: check the task for deref when calling methods of // the task, but also when the passing the task around to other functions. + // - Aggregates: check ptr subelements for deref. // // FIXME: even before 2.5 we should only invalidate non-const param types. if (owners.count(arg)) @@ -1534,6 +1554,19 @@ void LifetimeCheckPass::checkForOwnerAndPointerArguments(CallOp callOp, ptrsToDeref.insert(arg); if (tasks.count(arg)) ptrsToDeref.insert(arg); + if (aggregates.count(arg)) { + int memberIdx = 0; + auto sTy = + arg.getType().cast().getPointee().dyn_cast(); + assert(sTy && "expected struct type"); + for (auto m : sTy.getMembers()) { + auto ptrMemberAddr = aggregates[arg][memberIdx]; + if (m.isa() && ptrMemberAddr) { + ptrsToDeref.insert(ptrMemberAddr); + } + memberIdx++; + } + } } // FIXME: CIR should track source info on the passed args, so we can get diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp index 506b55d7c7c0..a2c8e21dd089 100644 --- a/clang/test/CIR/Transforms/lifetime-check-agg.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// XFAIL: * typedef enum SType { INFO_ENUM_0 = 9, @@ -30,4 +31,4 @@ void exploded_fields(bool cond) { } escape_info(&info); } -} \ No newline at end of file +} From ea420cb51c2e784f1a4f7a70b59c1d8bd9bc5544 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 26 Jul 2023 16:42:15 -0300 Subject: [PATCH 1098/2301] [CIR][LifetimeCheck][NFC] Improve null invalidation note a bit --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 4 +++- clang/test/CIR/Transforms/lifetime-check-agg.cpp | 13 +++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index ca1654b26e1b..957e07ba9c34 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1336,13 +1336,15 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, else D << "use of invalid pointer '" << varName << "'"; + // TODO: add accuracy levels, different combinations of invalid and null + // could have different ratios of false positives. if (hasInvalid && opts.emitHistoryInvalid()) emitInvalidHistory(D, addr, loc, forRetLambda); if (hasNullptr && opts.emitHistoryNull()) { assert(pmapNullHist.count(addr) && "expected nullptr hist"); auto ¬e = pmapNullHist[addr]; - D.attachNote(*note) << "invalidated here"; + D.attachNote(*note) << "'nullptr' invalidated here"; } if (!psetRemarkEmitted && opts.emitRemarkPsetInvalid()) diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp index a2c8e21dd089..63c60182f00e 100644 --- a/clang/test/CIR/Transforms/lifetime-check-agg.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -1,5 +1,4 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir -// XFAIL: * typedef enum SType { INFO_ENUM_0 = 9, @@ -23,12 +22,18 @@ static const FlagsPriv PrivBit = 0x00000001; void escape_info(InfoRaw *info); void exploded_fields(bool cond) { { - InfoRaw info = {INFO_ENUM_0}; + InfoRaw info = {INFO_ENUM_0}; // expected-note {{invalidated here}} if (cond) { InfoPriv privTmp = {INFO_ENUM_1}; privTmp.flags = PrivBit; info.next = &privTmp; - } - escape_info(&info); + } // expected-note {{pointee 'privTmp' invalidated at end of scope}} + + // If the 'if' above is taken, info.next is invalidated at the end of the scope, otherwise + // it's also invalid because it was initialized with 'nullptr'. This could be a noisy + // check if calls like `escape_info` are used to further initialize `info`. + + escape_info(&info); // expected-remark {{pset => { invalid, nullptr }}} + // expected-warning@-1 {{use of invalid pointer 'info.next'}} } } From 94ddb15861d223c84cdb50616b529030da74ecb3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 26 Jul 2023 16:55:57 -0300 Subject: [PATCH 1099/2301] [CIR][LifetimeCheck] Improve notes for passing down invalid pointers on calls --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 19 ++++++++++++++----- .../CIR/Transforms/lifetime-check-agg.cpp | 2 +- .../CIR/Transforms/lifetime-check-string.cpp | 2 +- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 957e07ba9c34..978da170bc02 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -60,10 +60,10 @@ struct LifetimeCheckPass : public LifetimeCheckBase { mlir::cir::ConstStructAttr value, mlir::Location loc); - // FIXME: classify tasks and lambdas prior to check ptr deref + // FIXME: classify tasks, lambdas and call args prior to check ptr deref // and pass down an enum. void checkPointerDeref(mlir::Value addr, mlir::Location loc, - bool forRetLambda = false); + bool forRetLambda = false, bool inCallArg = false); void checkCoroTaskStore(StoreOp storeOp); void checkLambdaCaptureStore(StoreOp storeOp); void trackCallToCoroutine(CallOp callOp); @@ -1297,7 +1297,7 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, } void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, - bool forRetLambda) { + bool forRetLambda, bool inCallArg) { bool hasInvalid = getPmap()[addr].count(State::getInvalid()); bool hasNullptr = getPmap()[addr].count(State::getNullPtr()); @@ -1333,7 +1333,15 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, D << "use of coroutine '" << varName << "' with dangling reference"; else if (forRetLambda) D << "returned lambda captures local variable"; - else + else if (inCallArg) { + bool isAgg = isa_and_nonnull(addr.getDefiningOp()); + D << "passing "; + if (!isAgg) + D << "invalid pointer"; + else + D << "aggregate containing invalid pointer member"; + D << " '" << varName << "'"; + } else D << "use of invalid pointer '" << varName << "'"; // TODO: add accuracy levels, different combinations of invalid and null @@ -1576,7 +1584,8 @@ void LifetimeCheckPass::checkForOwnerAndPointerArguments(CallOp callOp, for (auto o : ownersToInvalidate) checkNonConstUseOfOwner(o, callOp.getLoc()); for (auto p : ptrsToDeref) - checkPointerDeref(p, callOp.getLoc()); + checkPointerDeref(p, callOp.getLoc(), /*forRetLambda=*/false, + /*inCallArg=*/true); } void LifetimeCheckPass::checkOtherMethodsAndFunctions( diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp index 63c60182f00e..75e9ed2a3103 100644 --- a/clang/test/CIR/Transforms/lifetime-check-agg.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -34,6 +34,6 @@ void exploded_fields(bool cond) { // check if calls like `escape_info` are used to further initialize `info`. escape_info(&info); // expected-remark {{pset => { invalid, nullptr }}} - // expected-warning@-1 {{use of invalid pointer 'info.next'}} + // expected-warning@-1 {{passing aggregate containing invalid pointer member 'info.next'}} } } diff --git a/clang/test/CIR/Transforms/lifetime-check-string.cpp b/clang/test/CIR/Transforms/lifetime-check-string.cpp index 6455f0b8e96b..383f3b5da626 100644 --- a/clang/test/CIR/Transforms/lifetime-check-string.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-string.cpp @@ -82,6 +82,6 @@ void sv3() { sv = name; // expected-remark@-1 {{pset => { invalid }}} cout << sv; // expected-remark {{pset => { name__2' }}} cout << name; // expected-note {{invalidated by non-const use of owner type}} - cout << sv; // expected-warning {{use of invalid pointer 'sv'}} + cout << sv; // expected-warning {{passing invalid pointer 'sv'}} // expected-remark@-1 {{pset => { invalid }}} } \ No newline at end of file From eab3ba4fb1bfede4c205ab01ab619456cfdde7aa Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 27 Jul 2023 18:50:38 -0300 Subject: [PATCH 1100/2301] [CIR][LifetimeCheck] Handle zero initialized structs --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 24 +++++++++++++++++++ .../CIR/Transforms/lifetime-check-agg.cpp | 24 +++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 978da170bc02..9556dbab4f7f 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -59,6 +59,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void updatePointsToForConstStruct(mlir::Value addr, mlir::cir::ConstStructAttr value, mlir::Location loc); + void updatePointsToForZeroStruct(mlir::Value addr, StructType sTy, + mlir::Location loc); // FIXME: classify tasks, lambdas and call args prior to check ptr deref // and pass down an enum. @@ -1125,6 +1127,21 @@ void LifetimeCheckPass::updatePointsToForConstStruct( } } +void LifetimeCheckPass::updatePointsToForZeroStruct(mlir::Value addr, + StructType sTy, + mlir::Location loc) { + assert(aggregates.count(addr) && "expected association with aggregate"); + int memberIdx = 0; + for (auto &t : sTy.getMembers()) { + auto fieldAddr = aggregates[addr][memberIdx]; + // Unseen fields are not tracked. + if (fieldAddr && t.isa()) { + markPsetNull(fieldAddr, loc); + } + memberIdx++; + } +} + static mlir::Operation *ignoreBitcasts(mlir::Operation *op) { while (auto bitcast = dyn_cast(op)) { if (bitcast.getKind() != CastKind::bitcast) @@ -1173,6 +1190,13 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, return; } + if (auto zero = cstOp.getValue().dyn_cast()) { + if (auto zeroStructTy = zero.getType().dyn_cast()) { + updatePointsToForZeroStruct(addr, zeroStructTy, loc); + return; + } + } + assert(cstOp.isNullPtr() && "other than null not implemented"); assert(getPmap().count(addr) && "address should always be valid"); // 2.4.2 - If the initialization is default initialization or zero diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp index 75e9ed2a3103..96635a6cdd23 100644 --- a/clang/test/CIR/Transforms/lifetime-check-agg.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -8,6 +8,13 @@ typedef enum SType { typedef struct InfoRaw { SType type; const void* __attribute__((__may_alias__)) next; + unsigned int fa; + unsigned f; + unsigned s; + unsigned w; + unsigned h; + unsigned g; + unsigned a; } InfoRaw; typedef unsigned long long FlagsPriv; @@ -37,3 +44,20 @@ void exploded_fields(bool cond) { // expected-warning@-1 {{passing aggregate containing invalid pointer member 'info.next'}} } } + +void exploded_fields1(bool cond, unsigned t) { + { + InfoRaw info = {INFO_ENUM_0, &t}; + if (cond) { + InfoPriv privTmp = {INFO_ENUM_1}; + privTmp.flags = PrivBit; + info.next = &privTmp; + } + + // A warning is not emitted here, lack of context for inferring + // anything about `cond` would make it too noisy given `info.next` + // wasn't null initialized. + + escape_info(&info); // expected-remark {{pset => { t }}} + } +} From f697cb02a42b9422564de6e5d5cfc7ba6f345059 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 28 Jul 2023 12:12:45 -0300 Subject: [PATCH 1101/2301] [CIR][NFC] CallOp: be consistent on all arg iterators for indirect calls --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index bac028a34f13..d09130c9ea57 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1644,6 +1644,12 @@ def CallOp : CIR_Op<"call", call must match the specified function type. The callee is encoded as a symbol reference attribute named "callee". + To walk the operands for this operation, use `getArgOperands()` or a combo + of `arg_operand_begin()` and `arg_operand_begin()`. Using `operand_begin()` + and friends direclty might be misleading given that the indirect call + version encodes the target in the first operation operand. + `` + Example: ```mlir @@ -1680,7 +1686,12 @@ def CallOp : CIR_Op<"call", return *arg_operand_begin(); } - operand_iterator arg_operand_begin() { return operand_begin(); } + operand_iterator arg_operand_begin() { + auto arg_begin = operand_begin(); + if (!getCallee()) + arg_begin++; + return arg_begin; + } operand_iterator arg_operand_end() { return operand_end(); } }]; From 8256d7e2078a1104e452bdea1b5cdbbd2d2930d1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 28 Jul 2023 12:37:22 -0300 Subject: [PATCH 1102/2301] [CIR][NFC] CallOp: more indirect call interface updates --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 23 ++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d09130c9ea57..3f7f14238eaf 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1644,10 +1644,11 @@ def CallOp : CIR_Op<"call", call must match the specified function type. The callee is encoded as a symbol reference attribute named "callee". - To walk the operands for this operation, use `getArgOperands()` or a combo - of `arg_operand_begin()` and `arg_operand_begin()`. Using `operand_begin()` - and friends direclty might be misleading given that the indirect call - version encodes the target in the first operation operand. + To walk the operands for this operation, use `getNumArgOperands()`, + `getArgOperand()`, `getArgOperands()`, `arg_operand_begin()` and + `arg_operand_begin()`. Avoid using `getNumOperands()`, `getOperand()`, + `operand_begin()`, etc, direclty - might be misleading given on indirect + calls the callee is encoded in the first operation operand. `` Example: @@ -1693,6 +1694,20 @@ def CallOp : CIR_Op<"call", return arg_begin; } operand_iterator arg_operand_end() { return operand_end(); } + + /// Return the operand at index 'i', accounts for indirect call. + Value getArgOperand(unsigned i) { + if (!getCallee()) + i++; + return getOperand(i); + } + + /// Return the number of operands, , accounts for indirect call. + unsigned getNumArgOperands() { + if (!getCallee()) + return this->getOperation()->getNumOperands()-1; + return this->getOperation()->getNumOperands(); + } }]; let hasCustomAssemblyFormat = 1; From ede0eb121eaa7d0ca92f4044db3492ce0513a086 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 28 Jul 2023 12:40:11 -0300 Subject: [PATCH 1103/2301] [CIR][LifetimeCheck] Add support for looking into indirect calls --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 50 +++++++++---------- .../CIR/Transforms/lifetime-check-agg.cpp | 10 ++++ 2 files changed, 34 insertions(+), 26 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 9556dbab4f7f..cd487e6d4127 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1389,7 +1389,10 @@ static FuncOp getCalleeFromSymbol(ModuleOp mod, StringRef name) { return dyn_cast(global); } -static const clang::CXXMethodDecl *getMethod(ModuleOp mod, StringRef name) { +static const clang::CXXMethodDecl *getMethod(ModuleOp mod, CallOp callOp) { + if (!callOp.getCallee()) + return nullptr; + StringRef name = *callOp.getCallee(); auto method = getCalleeFromSymbol(mod, name); if (!method || method.getBuiltin()) return nullptr; @@ -1401,8 +1404,8 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, // MyPointer::operator=(MyPointer&&)(%dst, %src) // or // MyOwner::operator=(MyOwner&&)(%dst, %src) - auto dst = callOp.getOperand(0); - auto src = callOp.getOperand(1); + auto dst = callOp.getArgOperand(0); + auto src = callOp.getArgOperand(1); // Move assignments between pointer categories. if (ptrs.count(dst) && ptrs.count(src)) { @@ -1431,8 +1434,8 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, void LifetimeCheckPass::checkCopyAssignment(CallOp callOp, const clang::CXXMethodDecl *m) { // MyIntOwner::operator=(MyIntOwner&)(%dst, %src) - auto dst = callOp.getOperand(0); - auto src = callOp.getOperand(1); + auto dst = callOp.getArgOperand(0); + auto src = callOp.getArgOperand(1); // Copy assignment between owner categories. if (owners.count(dst) && owners.count(src)) @@ -1453,12 +1456,12 @@ void LifetimeCheckPass::checkCopyAssignment(CallOp callOp, // bool LifetimeCheckPass::isCtorInitPointerFromOwner( CallOp callOp, const clang::CXXConstructorDecl *ctor) { - if (callOp.getNumOperands() < 2) + if (callOp.getNumArgOperands() < 2) return false; // FIXME: should we scan all arguments past first to look for an owner? - auto addr = callOp.getOperand(0); - auto owner = callOp.getOperand(1); + auto addr = callOp.getArgOperand(0); + auto owner = callOp.getArgOperand(1); if (ptrs.count(addr) && owners.count(owner)) return true; @@ -1478,7 +1481,7 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, // both results in pset(p) == {null} if (ctor->isDefaultConstructor()) { // First argument passed is always the alloca for the 'this' ptr. - auto addr = callOp.getOperand(0); + auto addr = callOp.getArgOperand(0); // Currently two possible actions: // 1. Skip Owner category initialization. @@ -1504,8 +1507,8 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, } if (isCtorInitPointerFromOwner(callOp, ctor)) { - auto addr = callOp.getOperand(0); - auto owner = callOp.getOperand(1); + auto addr = callOp.getArgOperand(0); + auto owner = callOp.getArgOperand(1); getPmap()[addr].clear(); getPmap()[addr].insert(State::getOwnedBy(owner)); return; @@ -1514,7 +1517,7 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, void LifetimeCheckPass::checkOperators(CallOp callOp, const clang::CXXMethodDecl *m) { - auto addr = callOp.getOperand(0); + auto addr = callOp.getArgOperand(0); if (owners.count(addr)) { // const access to the owner is fine. if (m->isConst()) @@ -1542,7 +1545,7 @@ bool LifetimeCheckPass::isNonConstUseOfOwner(CallOp callOp, const clang::CXXMethodDecl *m) { if (m->isConst()) return false; - auto addr = callOp.getOperand(0); + auto addr = callOp.getArgOperand(0); if (owners.count(addr)) return true; return false; @@ -1566,13 +1569,13 @@ void LifetimeCheckPass::checkNonConstUseOfOwner(mlir::Value ownerAddr, void LifetimeCheckPass::checkForOwnerAndPointerArguments(CallOp callOp, unsigned firstArgIdx) { - auto numOperands = callOp.getNumOperands(); + auto numOperands = callOp.getNumArgOperands(); if (firstArgIdx >= numOperands) return; llvm::SmallSetVector ownersToInvalidate, ptrsToDeref; for (unsigned i = firstArgIdx, e = numOperands; i != e; ++i) { - auto arg = callOp.getOperand(i); + auto arg = callOp.getArgOperand(i); // FIXME: apply p1179 rules as described in 2.5. Very conservative for now: // // - Owners: always invalidate. @@ -1620,7 +1623,7 @@ void LifetimeCheckPass::checkOtherMethodsAndFunctions( // - If a method call to a class we consider interesting, like a method // call on a coroutine task (promise_type). // - Skip the 'this' for any other method. - if (m && !tasks.count(callOp.getOperand(firstArgIdx))) + if (m && !tasks.count(callOp.getArgOperand(firstArgIdx))) firstArgIdx++; checkForOwnerAndPointerArguments(callOp, firstArgIdx); } @@ -1698,7 +1701,7 @@ void LifetimeCheckPass::trackCallToCoroutine(CallOp callOp) { } void LifetimeCheckPass::checkCall(CallOp callOp) { - if (callOp.getNumOperands() == 0) + if (callOp.getNumArgOperands() == 0) return; // Identify calls to coroutines and track returning temporary task types. @@ -1707,13 +1710,8 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // part of declaration trackCallToCoroutine(callOp); - // FIXME: General indirect calls not yet supported. - if (!callOp.getCallee()) - return; - - auto fnName = *callOp.getCallee(); - auto methodDecl = getMethod(theModule, fnName); - if (!isOwnerOrPointerClassMethod(callOp.getOperand(0), methodDecl)) + auto methodDecl = getMethod(theModule, callOp); + if (!isOwnerOrPointerClassMethod(callOp.getArgOperand(0), methodDecl)) return checkOtherMethodsAndFunctions(callOp, methodDecl); // From this point on only owner and pointer class methods handling, @@ -1731,11 +1729,11 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // Non-const member call to a Owner invalidates any of its users. if (isNonConstUseOfOwner(callOp, methodDecl)) - return checkNonConstUseOfOwner(callOp.getOperand(0), callOp.getLoc()); + return checkNonConstUseOfOwner(callOp.getArgOperand(0), callOp.getLoc()); // Take a pset(Ptr) = { Ownr' } where Own got invalidated, this will become // invalid access to Ptr if any of its methods are used. - auto addr = callOp.getOperand(0); + auto addr = callOp.getArgOperand(0); if (ptrs.count(addr)) return checkPointerDeref(addr, callOp.getLoc()); } diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp index 96635a6cdd23..f820daceb5f5 100644 --- a/clang/test/CIR/Transforms/lifetime-check-agg.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -27,6 +27,14 @@ typedef struct InfoPriv { static const FlagsPriv PrivBit = 0x00000001; void escape_info(InfoRaw *info); +typedef SType ( *FnPtr)(unsigned s, const InfoRaw* i); +struct X { + struct entries { + FnPtr wildfn = nullptr; + }; + static entries e; +}; + void exploded_fields(bool cond) { { InfoRaw info = {INFO_ENUM_0}; // expected-note {{invalidated here}} @@ -42,6 +50,8 @@ void exploded_fields(bool cond) { escape_info(&info); // expected-remark {{pset => { invalid, nullptr }}} // expected-warning@-1 {{passing aggregate containing invalid pointer member 'info.next'}} + X::e.wildfn(0, &info); // expected-remark {{pset => { invalid, nullptr }}} + // expected-warning@-1 {{passing aggregate containing invalid pointer member 'info.next'}} } } From 8e0dd68660a045e7ef2f866d71b143445397c3aa Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 26 Jul 2023 16:41:52 -0300 Subject: [PATCH 1104/2301] [CIR][CIRGen][Bugfix] Refactor lexical scope locations Refactors LexicalScopContext to receive a single location and decide how to use it. This allows the context to have one or two locations for distinguishing between the start and end of the scope. Fixes a bug where a mlir::Location was wrongly casted to a mlir::FusedLoc when building the scope surrounding if statements. In the rare case where the if statement originates from a preprocessor macro definition, the location where the macro is expanded is used as the source location for both the start and the end of the range. Once these locations are fused, MLIR automatically detects the duplicated start and end locations and merges them resulting in single a regular location. This patch allows the if statement to have a single location as its "begin" and "end" locations. Note: spelling locations are not tracked due to performance issues. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 55 ++++--------- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 11 +-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 80 +++---------------- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 13 ++- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 53 ++---------- clang/test/CIR/CodeGen/spelling-locations.cpp | 29 +++++++ 7 files changed, 77 insertions(+), 168 deletions(-) create mode 100644 clang/test/CIR/CodeGen/spelling-locations.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index ea83cd212ce7..42c5d46f8513 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1830,18 +1830,8 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, .create( loc, condV, /*trueBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - // FIXME: abstract all this massive location handling elsewhere. - SmallVector locs; - if (loc.isa()) { - locs.push_back(loc); - locs.push_back(loc); - } else if (loc.isa()) { - auto fusedLoc = loc.cast(); - locs.push_back(fusedLoc.getLocations()[0]); - locs.push_back(fusedLoc.getLocations()[1]); - } CIRGenFunction::LexicalScopeContext lexScope{ - locs[0], locs[1], b.getInsertionBlock()}; + loc, b.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; CGF.currLexScope->setAsTernary(); @@ -1862,11 +1852,8 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, }, /*falseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto locBegin = fusedLoc.getLocations()[0]; - auto locEnd = fusedLoc.getLocations()[1]; CIRGenFunction::LexicalScopeContext lexScope{ - locBegin, locEnd, b.getInsertionBlock()}; + loc, b.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; CGF.currLexScope->setAsTernary(); @@ -1963,17 +1950,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { [[maybe_unused]] auto scope = builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - SmallVector locs; - if (loc.isa()) { - locs.push_back(loc); - locs.push_back(loc); - } else if (loc.isa()) { - auto fusedLoc = loc.cast(); - locs.push_back(fusedLoc.getLocations()[0]); - locs.push_back(fusedLoc.getLocations()[1]); - } CIRGenFunction::LexicalScopeContext lexScope{ - locs[0], locs[1], builder.getInsertionBlock()}; + loc, builder.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexScopeGuard{*this, &lexScope}; LV = buildLValue(cleanups->getSubExpr()); @@ -2068,28 +2046,23 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, loc, condV, elseS, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - // FIXME: abstract all this massive location handling elsewhere. - SmallVector locs; - if (loc.isa()) { - locs.push_back(loc); - locs.push_back(loc); - } else if (loc.isa()) { - auto fusedLoc = loc.cast(); - locs.push_back(fusedLoc.getLocations()[0]); - locs.push_back(fusedLoc.getLocations()[1]); + if (const auto fusedLoc = loc.dyn_cast()) { + loc = mlir::FusedLoc::get( + builder.getContext(), + {fusedLoc.getLocations()[0], fusedLoc.getLocations()[1]}); } - LexicalScopeContext lexScope{locs[0], locs[1], - builder.getInsertionBlock()}; + LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; LexicalScopeGuard lexThenGuard{*this, &lexScope}; resThen = buildStmt(thenS, /*useCurrentScope=*/true); }, /*elseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto locBegin = fusedLoc.getLocations()[2]; - auto locEnd = fusedLoc.getLocations()[3]; - LexicalScopeContext lexScope{locBegin, locEnd, - builder.getInsertionBlock()}; + if (const auto fusedLoc = loc.dyn_cast()) { + loc = mlir::FusedLoc::get( + builder.getContext(), + {fusedLoc.getLocations()[2], fusedLoc.getLocations()[3]}); + } + LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; LexicalScopeGuard lexElseGuard{*this, &lexScope}; resElse = buildStmt(elseS, /*useCurrentScope=*/true); }); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 509c9899aa59..d5fedfd21a59 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -404,17 +404,8 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { [[maybe_unused]] auto scope = builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - SmallVector locs; - if (loc.isa()) { - locs.push_back(loc); - locs.push_back(loc); - } else if (loc.isa()) { - auto fusedLoc = loc.cast(); - locs.push_back(fusedLoc.getLocations()[0]); - locs.push_back(fusedLoc.getLocations()[1]); - } CIRGenFunction::LexicalScopeContext lexScope{ - locs[0], locs[1], builder.getInsertionBlock()}; + loc, builder.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexScopeGuard{CGF, &lexScope}; Visit(E->getSubExpr()); }); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index fed890001707..0da1d1cb3fe7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1821,17 +1821,8 @@ mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { auto scope = builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) { - SmallVector locs; - if (loc.isa()) { - locs.push_back(loc); - locs.push_back(loc); - } else if (loc.isa()) { - auto fusedLoc = loc.cast(); - locs.push_back(fusedLoc.getLocations()[0]); - locs.push_back(fusedLoc.getLocations()[1]); - } CIRGenFunction::LexicalScopeContext lexScope{ - locs[0], locs[1], builder.getInsertionBlock()}; + loc, builder.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexScopeGuard{CGF, &lexScope}; auto scopeYieldVal = Visit(E->getSubExpr()); if (scopeYieldVal) { @@ -2024,17 +2015,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( return builder.create( loc, condV, /*trueBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - // FIXME: abstract all this massive location handling elsewhere. - SmallVector locs; - if (loc.isa()) { - locs.push_back(loc); - locs.push_back(loc); - } else if (loc.isa()) { - auto fusedLoc = loc.cast(); - locs.push_back(fusedLoc.getLocations()[0]); - locs.push_back(fusedLoc.getLocations()[1]); - } - CIRGenFunction::LexicalScopeContext lexScope{locs[0], locs[1], + CIRGenFunction::LexicalScopeContext lexScope{loc, b.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; CGF.currLexScope->setAsTernary(); @@ -2055,10 +2036,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( }, /*falseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto locBegin = fusedLoc.getLocations()[0]; - auto locEnd = fusedLoc.getLocations()[1]; - CIRGenFunction::LexicalScopeContext lexScope{locBegin, locEnd, + CIRGenFunction::LexicalScopeContext lexScope{loc, b.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; CGF.currLexScope->setAsTernary(); @@ -2122,7 +2100,7 @@ mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { auto ResOp = Builder.create( Loc, LHSCondV, /*trueBuilder=*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { - CIRGenFunction::LexicalScopeContext LexScope{Loc, Loc, + CIRGenFunction::LexicalScopeContext LexScope{Loc, B.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &LexScope}; CGF.currLexScope->setAsTernary(); @@ -2130,17 +2108,8 @@ mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { auto res = B.create( Loc, RHSCondV, /*trueBuilder*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { - SmallVector Locs; - if (Loc.isa()) { - Locs.push_back(Loc); - Locs.push_back(Loc); - } else if (Loc.isa()) { - auto fusedLoc = Loc.cast(); - Locs.push_back(fusedLoc.getLocations()[0]); - Locs.push_back(fusedLoc.getLocations()[1]); - } CIRGenFunction::LexicalScopeContext lexScope{ - Locs[0], Locs[1], B.getInsertionBlock()}; + Loc, B.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; CGF.currLexScope->setAsTernary(); auto res = B.create( @@ -2151,17 +2120,8 @@ mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { }, /*falseBuilder*/ [&](mlir::OpBuilder &b, mlir::Location Loc) { - SmallVector Locs; - if (Loc.isa()) { - Locs.push_back(Loc); - Locs.push_back(Loc); - } else if (Loc.isa()) { - auto fusedLoc = Loc.cast(); - Locs.push_back(fusedLoc.getLocations()[0]); - Locs.push_back(fusedLoc.getLocations()[1]); - } CIRGenFunction::LexicalScopeContext lexScope{ - Locs[0], Locs[1], b.getInsertionBlock()}; + Loc, b.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; CGF.currLexScope->setAsTernary(); auto res = b.create( @@ -2174,16 +2134,7 @@ mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { }, /*falseBuilder*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { - SmallVector Locs; - if (Loc.isa()) { - Locs.push_back(Loc); - Locs.push_back(Loc); - } else if (Loc.isa()) { - auto fusedLoc = Loc.cast(); - Locs.push_back(fusedLoc.getLocations()[0]); - Locs.push_back(fusedLoc.getLocations()[1]); - } - CIRGenFunction::LexicalScopeContext lexScope{Loc, Loc, + CIRGenFunction::LexicalScopeContext lexScope{Loc, B.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; CGF.currLexScope->setAsTernary(); @@ -2229,16 +2180,7 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { auto ResOp = Builder.create( Loc, LHSCondV, /*trueBuilder=*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { - SmallVector Locs; - if (Loc.isa()) { - Locs.push_back(Loc); - Locs.push_back(Loc); - } else if (Loc.isa()) { - auto fusedLoc = Loc.cast(); - Locs.push_back(fusedLoc.getLocations()[0]); - Locs.push_back(fusedLoc.getLocations()[1]); - } - CIRGenFunction::LexicalScopeContext lexScope{Loc, Loc, + CIRGenFunction::LexicalScopeContext lexScope{Loc, B.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; CGF.currLexScope->setAsTernary(); @@ -2249,7 +2191,7 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { }, /*falseBuilder*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { - CIRGenFunction::LexicalScopeContext LexScope{Loc, Loc, + CIRGenFunction::LexicalScopeContext LexScope{Loc, B.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &LexScope}; CGF.currLexScope->setAsTernary(); @@ -2267,7 +2209,7 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { Locs.push_back(fusedLoc.getLocations()[1]); } CIRGenFunction::LexicalScopeContext lexScope{ - Loc, Loc, B.getInsertionBlock()}; + Loc, B.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; CGF.currLexScope->setAsTernary(); auto res = B.create( @@ -2288,7 +2230,7 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { Locs.push_back(fusedLoc.getLocations()[1]); } CIRGenFunction::LexicalScopeContext lexScope{ - Loc, Loc, B.getInsertionBlock()}; + Loc, B.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; CGF.currLexScope->setAsTernary(); auto res = b.create( diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 890cb638ee77..37e09f9d574b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -507,7 +507,9 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, mlir::Block *EntryBB = Fn.addEntryBlock(); builder.setInsertionPointToStart(EntryBB); - LexicalScopeContext lexScope{FnBeginLoc, FnEndLoc, EntryBB}; + const auto fusedLoc = + mlir::FusedLoc::get(builder.getContext(), {FnBeginLoc, FnEndLoc}); + LexicalScopeContext lexScope{fusedLoc, EntryBB}; LexicalScopeGuard scopeGuard{*this, &lexScope}; // Emit the standard function prologue. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 6ce29919aa3f..25c4b52c07a0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -120,10 +120,19 @@ class CIRGenFunction : public CIRGenTypeCache { public: unsigned Depth = 0; bool HasReturn = false; - LexicalScopeContext(mlir::Location b, mlir::Location e, mlir::Block *eb) - : EntryBlock(eb), BeginLoc(b), EndLoc(e) { + + LexicalScopeContext(mlir::Location loc, mlir::Block *eb) + : EntryBlock(eb), BeginLoc(loc), EndLoc(loc) { + // Has multiple locations: overwrite with separate start and end locs. + if (const auto fusedLoc = loc.dyn_cast()) { + assert(fusedLoc.getLocations().size() == 2 && "too many locations"); + BeginLoc = fusedLoc.getLocations()[0]; + EndLoc = fusedLoc.getLocations()[1]; + } + assert(EntryBlock && "expected valid block"); } + ~LexicalScopeContext() = default; // --- diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index c57af3890e09..0f84cd6b3bbf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -41,11 +41,7 @@ mlir::LogicalResult CIRGenFunction::buildCompoundStmt(const CompoundStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto locBegin = fusedLoc.getLocations()[0]; - auto locEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{locBegin, locEnd, - builder.getInsertionBlock()}; + LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; LexicalScopeGuard lexScopeGuard{*this, &lexScope}; res = compoundStmtBuilder(); }); @@ -397,11 +393,7 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto scopeLocBegin = fusedLoc.getLocations()[0]; - auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, - builder.getInsertionBlock()}; + LexicalScopeContext lexScope{scopeLoc, builder.getInsertionBlock()}; LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; res = ifStmtBuilder(); }); @@ -493,17 +485,8 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - SmallVector locs; - if (loc.isa()) { - locs.push_back(loc); - locs.push_back(loc); - } else if (loc.isa()) { - auto fusedLoc = loc.cast(); - locs.push_back(fusedLoc.getLocations()[0]); - locs.push_back(fusedLoc.getLocations()[1]); - } CIRGenFunction::LexicalScopeContext lexScope{ - locs[0], locs[1], builder.getInsertionBlock()}; + loc, builder.getInsertionBlock()}; CIRGenFunction::LexicalScopeGuard lexScopeGuard{*this, &lexScope}; handleReturnVal(); }); @@ -714,14 +697,10 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto scopeLocBegin = fusedLoc.getLocations()[0]; - auto scopeLocEnd = fusedLoc.getLocations()[1]; // Create a cleanup scope for the condition variable cleanups. // Logical equivalent from LLVM codegn for // LexicalScope ConditionScope(*this, S.getSourceRange())... - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, - builder.getInsertionBlock()}; + LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; res = forStmtBuilder(); }); @@ -801,11 +780,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto scopeLocBegin = fusedLoc.getLocations()[0]; - auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, - builder.getInsertionBlock()}; + LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; res = forStmtBuilder(); }); @@ -860,11 +835,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto scopeLocBegin = fusedLoc.getLocations()[0]; - auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, - builder.getInsertionBlock()}; + LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; res = doStmtBuilder(); }); @@ -924,11 +895,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto scopeLocBegin = fusedLoc.getLocations()[0]; - auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, - builder.getInsertionBlock()}; + LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; res = whileStmtBuilder(); }); @@ -1026,11 +993,7 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto fusedLoc = loc.cast(); - auto scopeLocBegin = fusedLoc.getLocations()[0]; - auto scopeLocEnd = fusedLoc.getLocations()[1]; - LexicalScopeContext lexScope{scopeLocBegin, scopeLocEnd, - builder.getInsertionBlock()}; + LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; res = switchStmtBuilder(); }); diff --git a/clang/test/CIR/CodeGen/spelling-locations.cpp b/clang/test/CIR/CodeGen/spelling-locations.cpp new file mode 100644 index 000000000000..e3d4f6586056 --- /dev/null +++ b/clang/test/CIR/CodeGen/spelling-locations.cpp @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +#define multiline_if_macro(c, t) \ +if (c) { \ + return t; \ +} + +int testMacroLocations(void) { + + // Expanded macros will use the location of the expansion site. + multiline_if_macro(1, 3); + // CHECK: cir.scope { + // CHECK: cir.if %{{.+}} { + // CHECK: cir.return %{{.+}} : !s32i loc(#loc[[#LOC:]]) + // CHECK: } loc(#loc[[#LOC]]) + // CHECK: } loc(#loc[[#LOC]]) + + // Regular if statements should use different locations. + if (1) { + return 3; + } + // CHECK: cir.scope { + // CHECK: cir.if %{{.+}} { + // CHECK: cir.return %{{.+}} : !s32i loc(#loc[[#LOC:]]) + // CHECK-NOT: } loc(#loc[[#LOC]]) + // CHECK-NOT: } loc(#loc[[#LOC]]) + + return 0; +} From 511c05c15bb86363b8c24bb4b203426779d605dc Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 28 Jul 2023 15:54:21 -0400 Subject: [PATCH 1105/2301] [CIR][cir-tidy] Add missing link deps --- clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt b/clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt index d271d927cc39..f31eba82228e 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/cir-tidy/tool/CMakeLists.txt @@ -19,6 +19,7 @@ add_clang_library(CIRTidyMain clangTidy MLIRIR ${ALL_CLANG_TIDY_CHECKS} + MLIRIR DEPENDS omp_gen From 9809ec80e83d27d917e1034f1172a845144b25ed Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 28 Jul 2023 16:23:39 -0300 Subject: [PATCH 1106/2301] [CIR][Lowering][Bufix] Cast cir.shift amount to same width as the value The new cir.shift op allows for distinct value and amount types, however LLVM does not. Before this patch, the amount was not cast to the same width as the value, breaking the lowering process. This patch fixes the issue by casting the amount to the same width as the value. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 31 +++++++++++++------ clang/test/CIR/Lowering/shift.cir | 28 +++++++++++++++++ 2 files changed, 50 insertions(+), 9 deletions(-) create mode 100644 clang/test/CIR/Lowering/shift.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index a143c7631e19..34f6ad698207 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1257,25 +1257,38 @@ class CIRShiftOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::ShiftOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - assert((op.getValue().getType() == op.getResult().getType()) && - "inconsistent operands' types not supported yet"); - auto ty = op.getValue().getType().dyn_cast(); - assert(ty && "NYI for other than mlir::cir::IntType"); - + auto cirAmtTy = op.getAmount().getType().dyn_cast(); + auto cirValTy = op.getValue().getType().dyn_cast(); auto llvmTy = getTypeConverter()->convertType(op.getType()); - auto val = adaptor.getValue(); - auto amt = adaptor.getAmount(); + auto loc = op.getLoc(); + mlir::Value amt = adaptor.getAmount(); + mlir::Value val = adaptor.getValue(); + + assert(cirValTy && cirAmtTy && "non-integer shift is NYI"); + assert(cirValTy == op.getType() && "inconsistent operands' types NYI"); + + // Ensure shift amount is the same type as the value. Some undefined + // behavior might occur in the casts below as per [C99 6.5.7.3]. + if (cirAmtTy.getWidth() > cirValTy.getWidth()) { + amt = rewriter.create(loc, llvmTy, amt); + } else if (cirAmtTy.getWidth() < cirValTy.getWidth()) { + if (cirAmtTy.isSigned()) + amt = rewriter.create(loc, llvmTy, amt); + else + amt = rewriter.create(loc, llvmTy, amt); + } + // Lower to the proper LLVM shift operation. if (op.getIsShiftleft()) rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); else { - if (ty.isUnsigned()) + if (cirValTy.isUnsigned()) rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); else rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); } - return mlir::LogicalResult::success(); + return mlir::success(); } }; diff --git a/clang/test/CIR/Lowering/shift.cir b/clang/test/CIR/Lowering/shift.cir new file mode 100644 index 000000000000..78a7f89e13d0 --- /dev/null +++ b/clang/test/CIR/Lowering/shift.cir @@ -0,0 +1,28 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!u16i = !cir.int +module { + cir.func @testShiftWithDifferentValueAndAmountTypes(%arg0: !s16i, %arg1: !s32i, %arg2: !s64i, %arg3: !u16i) { + // CHECK: testShiftWithDifferentValueAndAmountTypes + + // Should allow shift with larger amount type. + %1 = cir.shift(left, %arg1: !s32i, %arg2 : !s64i) -> !s32i + // CHECK: %[[#CAST:]] = llvm.trunc %{{.+}} : i64 to i32 + // CHECK: llvm.shl %{{.+}}, %[[#CAST]] : i32 + + // Should allow shift with signed smaller amount type. + %2 = cir.shift(left, %arg1 : !s32i, %arg0 : !s16i) -> !s32i + // CHECK: %[[#CAST:]] = llvm.sext %{{.+}} : i16 to i32 + // CHECK: llvm.shl %{{.+}}, %[[#CAST]] : i32 + + // Should allow shift with unsigned smaller amount type. + %14 = cir.shift(left, %arg1 : !s32i, %arg3 : !u16i) -> !s32i + // CHECK: %[[#CAST:]] = llvm.zext %{{.+}} : i16 to i32 + // CHECK: llvm.shl %{{.+}}, %[[#CAST]] : i32 + cir.return + } +} From 71c5e070538624ef4b6467b0f61d489149f9bba7 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 31 Jul 2023 09:51:23 -0300 Subject: [PATCH 1107/2301] [CIR][CIRGen][Bugfix] Patch struct builder element initialization When const initializing a struct builder, the element initialization was not returning the expected typed attribute with the constant value, which caused a cast error. This patch fixes this issue. It also adds missing codegen for building basic records initialization. ghstack-source-id: 6a701a63d63d5ee6501a74db4728a12220cddf6e Pull Request resolved: https://github.com/llvm/clangir/pull/184 --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 5 ++--- clang/test/CIR/CodeGen/struct.c | 8 ++++++++ clang/test/CIR/CodeGen/struct.cpp | 4 ++++ 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index c53429a72990..147bdc2ad44f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -496,7 +496,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) { mlir::Attribute EltInit; if (Init) - Emitter.tryEmitPrivateForMemory(Init, Field->getType()); + EltInit = Emitter.tryEmitPrivateForMemory(Init, Field->getType()); else llvm_unreachable("NYI"); @@ -862,8 +862,7 @@ class ConstExprEmitter } mlir::Attribute EmitRecordInitialization(InitListExpr *ILE, QualType T) { - assert(0 && "not implemented"); - return {}; + return ConstStructBuilder::BuildStruct(Emitter, ILE, T); } mlir::Attribute VisitImplicitValueInitExpr(ImplicitValueInitExpr *E, diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index cc3107d02849..0ca48041e29b 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -26,5 +26,13 @@ void baz(void) { // CHECK-NEXT: cir.return // CHECK-NEXT: } +void shouldConstInitStructs(void) { +// CHECK: cir.func @shouldConstInitStructs + struct Foo f = {1, 2, {3, 4}}; + // CHECK: %[[#V0:]] = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} + // CHECK: %[[#V1:]] = cir.const(#cir.const_struct<{#cir.int<1> : !s32i,#cir.int<2> : !s8i,#cir.const_struct<{#cir.int<3> : !s32i,#cir.int<4> : !s8i}> : !ty_22struct2EBar22}> : !ty_22struct2EFoo22) : !ty_22struct2EFoo22 + // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22struct2EFoo22, cir.ptr +} + // Check if global structs are zero-initialized. // CHECK: cir.global external @bar = #cir.zero : !ty_22struct2EBar22 diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index fe9a3f394c9e..1f455303adc2 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -115,6 +115,10 @@ struct A { int a; }; +// Should globally const-initialize struct members. +struct A simpleConstInit = {1}; +// CHECK: cir.global external @simpleConstInit = #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2EA22 + A get_default() { return A{2}; } struct S { From 84cd66b277e43f488e8db3c2782f74011a183552 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 31 Jul 2023 09:51:24 -0300 Subject: [PATCH 1108/2301] [CIR][CIRGen][NFC] Move isNullValue check to CIRGenBuilder The rationale here is that, since `isNullValue` is meant to check if the given constant is what the `getNullValue` method would return for a certain type, it makes sense for `isNullValue` to be encapsulated in the builder along the `getNullValue` method. ghstack-source-id: b3ba353454f221510546a053a54c45985bfb2422 Pull Request resolved: https://github.com/llvm/clangir/pull/185 --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 1 + clang/lib/CIR/CodeGen/CIRGenBuilder.h | 18 +++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 20 +++---------------- 3 files changed, 22 insertions(+), 17 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index bc7d5f59b2b7..bdc6cd276f48 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -208,6 +208,7 @@ def IntAttr : CIR_Attr<"Int", "int", [TypedAttrInterface]> { let extraClassDeclaration = [{ int64_t getSInt() const { return getValue().getSExtValue(); } uint64_t getUInt() const { return getValue().getZExtValue(); } + bool isNullValue() const { return getValue() == 0; } }]; let genVerifyDecl = 1; let hasCustomAssemblyFormat = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 2fe8d35589bd..5bfbca0d49d2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -169,6 +169,24 @@ class CIRGenBuilderTy : public mlir::OpBuilder { llvm_unreachable("Zero initializer for given type is NYI"); } + // TODO(cir): Once we have CIR float types, replace this by something like a + // NullableValueInterface to allow for type-independent queries. + bool isNullValue(mlir::Attribute attr) const { + // TODO(cir): introduce char type in CIR and check for that instead. + if (const auto intVal = attr.dyn_cast()) + return intVal.isNullValue(); + + if (const auto fpVal = attr.dyn_cast()) { + bool ignored; + llvm::APFloat FV(+0.0); + FV.convert(fpVal.getValue().getSemantics(), + llvm::APFloat::rmNearestTiesToEven, &ignored); + return FV.bitwiseIsEqual(fpVal.getValue()); + } + + llvm_unreachable("NYI"); + } + // // Type helpers // ------------ diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 147bdc2ad44f..b6c290f18da9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -939,28 +939,14 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, SmallVectorImpl &Elements, mlir::TypedAttr Filler) { auto &builder = CGM.getBuilder(); - auto isNullValue = [&](mlir::Attribute f) { - // TODO(cir): introduce char type in CIR and check for that instead. - if (const auto intVal = f.dyn_cast_or_null()) - return intVal.getValue() == 0; - - if (const auto fpVal = f.dyn_cast_or_null()) { - bool ignored; - llvm::APFloat FV(+0.0); - FV.convert(fpVal.getValue().getSemantics(), - llvm::APFloat::rmNearestTiesToEven, &ignored); - return FV.bitwiseIsEqual(fpVal.getValue()); - } - - llvm_unreachable("NYI"); - }; // Figure out how long the initial prefix of non-zero elements is. unsigned NonzeroLength = ArrayBound; - if (Elements.size() < NonzeroLength && isNullValue(Filler)) + if (Elements.size() < NonzeroLength && builder.isNullValue(Filler)) NonzeroLength = Elements.size(); if (NonzeroLength == Elements.size()) { - while (NonzeroLength > 0 && isNullValue(Elements[NonzeroLength - 1])) + while (NonzeroLength > 0 && + builder.isNullValue(Elements[NonzeroLength - 1])) --NonzeroLength; } From 60925dc13087dacb12eaed54b26e85017ec270f8 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 31 Jul 2023 09:51:24 -0300 Subject: [PATCH 1109/2301] [CIR][CIRGen] Generate const struct array initialization ghstack-source-id: 8767eb417a044c7344bac855773635a80262d5c0 Pull Request resolved: https://github.com/llvm/clangir/pull/186 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 11 +++++++++++ clang/test/CIR/CodeGen/struct.cpp | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 5bfbca0d49d2..389566759c80 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -184,6 +184,17 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return FV.bitwiseIsEqual(fpVal.getValue()); } + if (const auto structVal = attr.dyn_cast()) { + for (const auto elt : structVal.getMembers()) { + // FIXME(cir): the struct's ID should not be considered a member. + if (elt.isa()) + continue; + if (!isNullValue(elt)) + return false; + } + return true; + } + llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 1f455303adc2..becf9307d5d5 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -119,6 +119,10 @@ struct A { struct A simpleConstInit = {1}; // CHECK: cir.global external @simpleConstInit = #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2EA22 +// Should globally const-initialize arrays with struct members. +struct A arrConstInit[1] = {{1}}; +// CHECK: cir.global external @arrConstInit = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2EA22]> : !cir.array + A get_default() { return A{2}; } struct S { From fa841f694f799b2c419713a8c5f9bb251308864b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 28 Jul 2023 16:59:14 -0300 Subject: [PATCH 1110/2301] [CIR][LifetimeCheck] Improve support to loop iterators and mitigate false positives This fixes a crash and first set of false positives related to loops, one more to come next. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 52 +++++++++++++++---- .../lifetime-check-range-for-vector.cpp | 23 ++++++++ 2 files changed, 64 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index cd487e6d4127..1aee8a0b60fa 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -456,6 +456,14 @@ static std::string getVarNameFromValue(mlir::Value v) { return Out.str().str(); } } + if (auto callOp = dyn_cast(v.getDefiningOp())) { + if (callOp.getCallee()) { + llvm::SmallString<128> finalName; + llvm::raw_svector_ostream Out(finalName); + Out << "call:" << callOp.getCallee()->str(); + return Out.str().str(); + } + } assert(0 && "how did it get here?"); return ""; } @@ -951,7 +959,13 @@ void LifetimeCheckPass::classifyAndInitTypeCategories(mlir::Value addr, mlir::Type t, mlir::Location loc, unsigned nestLevel) { - assert(!getPmap().count(addr) && "only one map entry for a given address"); + // The same alloca can be hit more than once when checking for dangling + // pointers out of subsequent loop iterations (e.g. second iteraton using + // pointer invalidated in the first run). Since we copy the pmap out to + // start those subsequent checks, make sure sure we skip existing alloca + // tracking. + if (getPmap().count(addr)) + return; getPmap()[addr] = {}; enum TypeCategory { @@ -1184,17 +1198,20 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, if (auto cstOp = dyn_cast(dataSrcOp)) { // Aggregates can be bulk materialized in CIR, handle proper update of // individual exploded fields. - if (auto constStruct = - cstOp.getValue().dyn_cast()) { - updatePointsToForConstStruct(addr, constStruct, loc); - return; - } - - if (auto zero = cstOp.getValue().dyn_cast()) { - if (auto zeroStructTy = zero.getType().dyn_cast()) { - updatePointsToForZeroStruct(addr, zeroStructTy, loc); + if (aggregates.count(addr)) { + if (auto constStruct = + cstOp.getValue().dyn_cast()) { + updatePointsToForConstStruct(addr, constStruct, loc); return; } + + if (auto zero = cstOp.getValue().dyn_cast()) { + if (auto zeroStructTy = zero.getType().dyn_cast()) { + updatePointsToForZeroStruct(addr, zeroStructTy, loc); + return; + } + } + return; } assert(cstOp.isNullPtr() && "other than null not implemented"); @@ -1226,6 +1243,14 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, return; } + // Initializes ptr types out of known lib calls marked with pointer + // attributes. TODO: find a better way to tag this. + if (auto callOp = dyn_cast(dataSrcOp)) { + // iter = vector::begin() + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getLocalValue(callOp.getResult(0))); + } + // What should we add next? } @@ -1234,7 +1259,12 @@ void LifetimeCheckPass::checkStore(StoreOp storeOp) { // Decompose store's to aggregates into multiple updates to individual fields. if (aggregates.count(addr)) { - updatePointsTo(addr, storeOp.getValue(), storeOp.getValue().getLoc()); + auto data = storeOp.getValue(); + auto dataSrcOp = data.getDefiningOp(); + // Only interested in updating and tracking fields, anything besides + // constants isn't really relevant. + if (dataSrcOp && isa(dataSrcOp)) + updatePointsTo(addr, data, data.getLoc()); return; } diff --git a/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp b/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp new file mode 100644 index 000000000000..9028eaf50252 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all" -fclangir-skip-system-headers -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +#include "std-cxx.h" + +typedef enum SType { + INFO_ENUM_0 = 9, + INFO_ENUM_1 = 2020, +} SType; + +typedef struct InfoRaw { + SType type; + const void* __attribute__((__may_alias__)) next; + unsigned u; +} InfoRaw; + +void swappy(unsigned c) { + std::vector images(c); + for (auto& image : images) { + // FIXME: this warning shall not happen, fix next! + image = {INFO_ENUM_1}; // expected-warning {{passing aggregate containing invalid pointer member 'ref.tmp0.next'}} + // expected-note@-1 {{'nullptr' invalidated here}} + } +} \ No newline at end of file From b90653713dbc5e3d99cbba973041cc8b63f5e7be Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 31 Jul 2023 15:51:43 -0300 Subject: [PATCH 1111/2301] [CIR][LifetimeCheck] Fix false positive from unsupported loads for 'this' pointer When looking at 'this' pointer to identify pointer categories, the checker was missing checking loads that materialize the 'this' pointer. Add that capability and prevent a false positive. While here, normalize all accesses to 'this' pointer through special functions that factor out logic and check for said loads. --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 109 ++++++++++++------ .../lifetime-check-range-for-vector.cpp | 6 +- 2 files changed, 78 insertions(+), 37 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 1aee8a0b60fa..ec9e6773a53e 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -79,6 +79,19 @@ struct LifetimeCheckPass : public LifetimeCheckBase { const clang::CXXMethodDecl *m); void checkForOwnerAndPointerArguments(CallOp callOp, unsigned firstArgIdx); + // TODO: merge both methods below and pass down an enum. + // + // Check if a method's 'this' pointer (first arg) is tracked as + // a pointer category. Assumes the CallOp in question represents a method + // and returns the actual value associated with the tracked 'this' or an + // empty value if none is found. + mlir::Value getThisParamPointerCategory(CallOp callOp); + // Check if a method's 'this' pointer (first arg) is tracked as + // a owner category. Assumes the CallOp in question represents a method + // and returns the actual value associated with the tracked 'this' or an + // empty value if none is found. + mlir::Value getThisParamOwnerCategory(CallOp callOp); + // Tracks current module. ModuleOp theModule; // Track current function under analysis @@ -87,8 +100,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Common helpers. bool isCtorInitPointerFromOwner(CallOp callOp, const clang::CXXConstructorDecl *ctor); - bool isNonConstUseOfOwner(CallOp callOp, const clang::CXXMethodDecl *m); - bool isOwnerOrPointerClassMethod(mlir::Value firstParam, + mlir::Value getNonConstUseOfOwner(CallOp callOp, + const clang::CXXMethodDecl *m); + bool isOwnerOrPointerClassMethod(CallOp callOp, const clang::CXXMethodDecl *m); // Diagnostic helpers. @@ -1429,16 +1443,38 @@ static const clang::CXXMethodDecl *getMethod(ModuleOp mod, CallOp callOp) { return dyn_cast(method.getAstAttr().getAstDecl()); } +mlir::Value LifetimeCheckPass::getThisParamPointerCategory(CallOp callOp) { + auto thisptr = callOp.getArgOperand(0); + if (ptrs.count(thisptr)) + return thisptr; + if (auto loadOp = dyn_cast_or_null(thisptr.getDefiningOp())) { + if (ptrs.count(loadOp.getAddr())) + return loadOp.getAddr(); + } + return {}; +} + +mlir::Value LifetimeCheckPass::getThisParamOwnerCategory(CallOp callOp) { + auto thisptr = callOp.getArgOperand(0); + if (owners.count(thisptr)) + return thisptr; + if (auto loadOp = dyn_cast_or_null(thisptr.getDefiningOp())) { + if (owners.count(loadOp.getAddr())) + return loadOp.getAddr(); + } + return {}; +} + void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m) { // MyPointer::operator=(MyPointer&&)(%dst, %src) // or // MyOwner::operator=(MyOwner&&)(%dst, %src) - auto dst = callOp.getArgOperand(0); + auto dst = getThisParamPointerCategory(callOp); auto src = callOp.getArgOperand(1); // Move assignments between pointer categories. - if (ptrs.count(dst) && ptrs.count(src)) { + if (dst && ptrs.count(src)) { // Note that the current pattern here usually comes from a xvalue in src // where all the initialization is done, and this move assignment is // where we finally materialize it back to the original pointer category. @@ -1450,8 +1486,9 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, return; } - // Copy assignments between pointer categories. - if (owners.count(dst) && owners.count(src)) { + // Copy assignments between owner categories. + dst = getThisParamOwnerCategory(callOp); + if (dst && owners.count(src)) { // Handle as a non const use of owner, invalidating pointers. checkNonConstUseOfOwner(dst, callOp.getLoc()); @@ -1464,15 +1501,16 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, void LifetimeCheckPass::checkCopyAssignment(CallOp callOp, const clang::CXXMethodDecl *m) { // MyIntOwner::operator=(MyIntOwner&)(%dst, %src) - auto dst = callOp.getArgOperand(0); + auto dst = getThisParamOwnerCategory(callOp); auto src = callOp.getArgOperand(1); // Copy assignment between owner categories. - if (owners.count(dst) && owners.count(src)) + if (dst && owners.count(src)) return checkNonConstUseOfOwner(dst, callOp.getLoc()); // Copy assignment between pointer categories. - if (ptrs.count(dst) && ptrs.count(src)) { + dst = getThisParamPointerCategory(callOp); + if (dst && ptrs.count(src)) { getPmap()[dst] = getPmap()[src]; return; } @@ -1490,10 +1528,10 @@ bool LifetimeCheckPass::isCtorInitPointerFromOwner( return false; // FIXME: should we scan all arguments past first to look for an owner? - auto addr = callOp.getArgOperand(0); + auto ptr = getThisParamPointerCategory(callOp); auto owner = callOp.getArgOperand(1); - if (ptrs.count(addr) && owners.count(owner)) + if (ptr && owners.count(owner)) return true; return false; @@ -1511,15 +1549,16 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, // both results in pset(p) == {null} if (ctor->isDefaultConstructor()) { // First argument passed is always the alloca for the 'this' ptr. - auto addr = callOp.getArgOperand(0); // Currently two possible actions: // 1. Skip Owner category initialization. // 2. Initialize Pointer categories. - if (owners.count(addr)) + auto addr = getThisParamOwnerCategory(callOp); + if (addr) return; - if (!ptrs.count(addr)) + addr = getThisParamPointerCategory(callOp); + if (!addr) return; // Not interested in block/function arguments or any indirect @@ -1537,7 +1576,8 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, } if (isCtorInitPointerFromOwner(callOp, ctor)) { - auto addr = callOp.getArgOperand(0); + auto addr = getThisParamPointerCategory(callOp); + assert(addr && "expected pointer category"); auto owner = callOp.getArgOperand(1); getPmap()[addr].clear(); getPmap()[addr].insert(State::getOwnedBy(owner)); @@ -1547,8 +1587,8 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, void LifetimeCheckPass::checkOperators(CallOp callOp, const clang::CXXMethodDecl *m) { - auto addr = callOp.getArgOperand(0); - if (owners.count(addr)) { + auto addr = getThisParamOwnerCategory(callOp); + if (addr) { // const access to the owner is fine. if (m->isConst()) return; @@ -1561,24 +1601,24 @@ void LifetimeCheckPass::checkOperators(CallOp callOp, return checkNonConstUseOfOwner(addr, callOp.getLoc()); } - if (ptrs.count(addr)) { + addr = getThisParamPointerCategory(callOp); + if (addr) { // The assumption is that method calls on pointer types should trigger // deref checking. checkPointerDeref(addr, callOp.getLoc()); + return; } // FIXME: we also need to look at operators from non owner or pointer // types that could be using Owner/Pointer types as parameters. } -bool LifetimeCheckPass::isNonConstUseOfOwner(CallOp callOp, - const clang::CXXMethodDecl *m) { +mlir::Value +LifetimeCheckPass::getNonConstUseOfOwner(CallOp callOp, + const clang::CXXMethodDecl *m) { if (m->isConst()) - return false; - auto addr = callOp.getArgOperand(0); - if (owners.count(addr)) - return true; - return false; + return {}; + return getThisParamOwnerCategory(callOp); } void LifetimeCheckPass::checkNonConstUseOfOwner(mlir::Value ownerAddr, @@ -1659,13 +1699,13 @@ void LifetimeCheckPass::checkOtherMethodsAndFunctions( } bool LifetimeCheckPass::isOwnerOrPointerClassMethod( - mlir::Value firstParam, const clang::CXXMethodDecl *m) { + CallOp callOp, const clang::CXXMethodDecl *m) { // For the sake of analysis, these behave like regular functions if (!m || m->isStatic()) return false; - if (owners.count(firstParam) || ptrs.count(firstParam)) - return true; - return false; + // Check the object for owner/pointer by looking at the 'this' pointer. + return getThisParamPointerCategory(callOp) || + getThisParamOwnerCategory(callOp); } bool LifetimeCheckPass::isLambdaType(mlir::Type ty) { @@ -1741,7 +1781,7 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { trackCallToCoroutine(callOp); auto methodDecl = getMethod(theModule, callOp); - if (!isOwnerOrPointerClassMethod(callOp.getArgOperand(0), methodDecl)) + if (!isOwnerOrPointerClassMethod(callOp, methodDecl)) return checkOtherMethodsAndFunctions(callOp, methodDecl); // From this point on only owner and pointer class methods handling, @@ -1758,13 +1798,14 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // For any other methods... // Non-const member call to a Owner invalidates any of its users. - if (isNonConstUseOfOwner(callOp, methodDecl)) - return checkNonConstUseOfOwner(callOp.getArgOperand(0), callOp.getLoc()); + if (auto owner = getNonConstUseOfOwner(callOp, methodDecl)) { + return checkNonConstUseOfOwner(owner, callOp.getLoc()); + } // Take a pset(Ptr) = { Ownr' } where Own got invalidated, this will become // invalid access to Ptr if any of its methods are used. - auto addr = callOp.getArgOperand(0); - if (ptrs.count(addr)) + auto addr = getThisParamPointerCategory(callOp); + if (addr) return checkPointerDeref(addr, callOp.getLoc()); } diff --git a/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp b/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp index 9028eaf50252..52108c625831 100644 --- a/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp @@ -2,6 +2,8 @@ #include "std-cxx.h" +// expected-no-diagnostics + typedef enum SType { INFO_ENUM_0 = 9, INFO_ENUM_1 = 2020, @@ -16,8 +18,6 @@ typedef struct InfoRaw { void swappy(unsigned c) { std::vector images(c); for (auto& image : images) { - // FIXME: this warning shall not happen, fix next! - image = {INFO_ENUM_1}; // expected-warning {{passing aggregate containing invalid pointer member 'ref.tmp0.next'}} - // expected-note@-1 {{'nullptr' invalidated here}} + image = {INFO_ENUM_1}; } } \ No newline at end of file From a23f7ec6298c0dfce12cd4e05cb063067281df8b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 31 Jul 2023 16:43:53 -0300 Subject: [PATCH 1112/2301] [CIR][LifetimeCheck] Avoid more false positives by ignoring unused references to aggregate fields --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 7 +++++++ .../CIR/Transforms/lifetime-check-range-for-vector.cpp | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index ec9e6773a53e..6dbf0d19bff6 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1037,6 +1037,11 @@ void LifetimeCheckPass::classifyAndInitTypeCategories(mlir::Value addr, return; auto eltAddr = op.getResult(); + // If nothing is using this StructElementAddr, don't bother since + // it could lead to even more noisy outcomes. + if (eltAddr.use_empty()) + return; + auto eltTy = eltAddr.getType().cast().getPointee(); @@ -1451,6 +1456,7 @@ mlir::Value LifetimeCheckPass::getThisParamPointerCategory(CallOp callOp) { if (ptrs.count(loadOp.getAddr())) return loadOp.getAddr(); } + // TODO: add a remark to spot 'this' indirections we currently not track. return {}; } @@ -1462,6 +1468,7 @@ mlir::Value LifetimeCheckPass::getThisParamOwnerCategory(CallOp callOp) { if (owners.count(loadOp.getAddr())) return loadOp.getAddr(); } + // TODO: add a remark to spot 'this' indirections we currently not track. return {}; } diff --git a/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp b/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp index 52108c625831..e9c6d62b6f64 100644 --- a/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp @@ -20,4 +20,9 @@ void swappy(unsigned c) { for (auto& image : images) { image = {INFO_ENUM_1}; } + + std::vector images2(c); + for (unsigned i = 0; i < c; i++) { + images2[i] = {INFO_ENUM_1}; + } } \ No newline at end of file From 84cdf1185710574e4a872220df7786b289c46fd4 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Mon, 31 Jul 2023 15:15:44 -0700 Subject: [PATCH 1113/2301] [CIR] Enable MLIR asm printer options (#195) Enabling MLIR asm printing options such as -mlir-print-op-generic --- clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp | 2 ++ clang/test/CIR/mlirargs.c | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 5ae02ca4d28f..76c92fdb5442 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -33,6 +33,7 @@ #include "llvm/Support/ErrorHandling.h" #if CLANG_ENABLE_CIR +#include "mlir/IR/AsmState.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Pass/PassManager.h" #include "clang/CIRFrontendAction/CIRGenAction.h" @@ -324,6 +325,7 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) { if (!Clang->getFrontendOpts().MLIRArgs.empty()) { mlir::registerMLIRContextCLOptions(); mlir::registerPassManagerCLOptions(); + mlir::registerAsmPrinterCLOptions(); unsigned NumArgs = Clang->getFrontendOpts().MLIRArgs.size(); auto Args = std::make_unique(NumArgs + 2); Args[0] = "clang (MLIR option parsing)"; diff --git a/clang/test/CIR/mlirargs.c b/clang/test/CIR/mlirargs.c index 7719aaf4f388..cfb07197ef18 100644 --- a/clang/test/CIR/mlirargs.c +++ b/clang/test/CIR/mlirargs.c @@ -1,10 +1,12 @@ // Clang returns 1 when wrong arguments are given. -// RUN: not %clang_cc1 -mmlir -mlir-disable-threadingd 2>&1 | FileCheck %s --check-prefix=WRONG +// RUN: not %clang_cc1 -mmlir -mlir-disable-threadingd -mmlir -mlir-print-op-genericd 2>&1 | FileCheck %s --check-prefix=WRONG // Test that the driver can pass mlir args to cc1. // RUN: %clang -### -mmlir -mlir-disable-threading %s 2>&1 | FileCheck %s --check-prefix=CC1 // WRONG: clang (MLIR option parsing): Unknown command line argument '-mlir-disable-threadingd'. Try: 'clang (MLIR option parsing) --help' // WRONG: clang (MLIR option parsing): Did you mean '--mlir-disable-threading'? +// WRONG: clang (MLIR option parsing): Unknown command line argument '-mlir-print-op-genericd'. Try: 'clang (MLIR option parsing) --help' +// WRONG: clang (MLIR option parsing): Did you mean '--mlir-print-op-generic'? // CC1: "-mmlir" "-mlir-disable-threading" From e0f1878a168317f3d9a27e41fd5e4a355ad4b067 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 31 Jul 2023 16:50:16 -0300 Subject: [PATCH 1114/2301] Revert "[CIR] Yield boolean value in cir.loop condition region" The changes made to `getSuccessorRegion` seem to have caused an issue. This reverts commit 5e449c0cd5d3ba4de61357cafc4ea0fb4463dfd5. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 +- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 32 ++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 49 ++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 39 +++- clang/test/CIR/CodeGen/loop.cpp | 57 +++-- clang/test/CIR/CodeGen/rangefor.cpp | 6 +- clang/test/CIR/IR/branch.cir | 16 +- clang/test/CIR/IR/invalid.cir | 68 +----- clang/test/CIR/IR/loop.cir | 48 ++-- clang/test/CIR/Lowering/dot.cir | 26 ++- clang/test/CIR/Lowering/loop.cir | 215 ++++++++++++------ clang/test/CIR/Transforms/merge-cleanups.cir | 18 +- 12 files changed, 351 insertions(+), 229 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 3f7f14238eaf..7c0fe4c19518 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1114,7 +1114,8 @@ def LoopOp : CIR_Op<"loop", let description = [{ `cir.loop` represents C/C++ loop forms. It defines 3 blocks: - `cond`: region can contain multiple blocks, terminated by regular - `cir.yield %x` where `%x` is the boolean value to be evaluated. + `cir.yield` when control should yield back to the parent, and + `cir.yield continue` when execution continues to another region. The region destination depends on the loop form specified. - `step`: region with one block, containing code to compute the loop step, must be terminated with `cir.yield`. @@ -1129,8 +1130,7 @@ def LoopOp : CIR_Op<"loop", // i = i + 1; // } cir.loop while(cond : { - %2 = cir.const(#cir.bool) : !cir.bool - cir.yield %2 : !cir.bool + cir.yield continue }, step : { cir.yield }) { diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 0f84cd6b3bbf..ca0c65599543 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -635,6 +635,26 @@ mlir::LogicalResult CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, return res; } +static mlir::LogicalResult buildLoopCondYield(mlir::OpBuilder &builder, + mlir::Location loc, + mlir::Value cond) { + mlir::Block *trueBB = nullptr, *falseBB = nullptr; + { + mlir::OpBuilder::InsertionGuard guard(builder); + trueBB = builder.createBlock(builder.getBlock()->getParent()); + builder.create(loc, YieldOpKind::Continue); + } + { + mlir::OpBuilder::InsertionGuard guard(builder); + falseBB = builder.createBlock(builder.getBlock()->getParent()); + builder.create(loc); + } + + assert((trueBB && falseBB) && "expected both blocks to exist"); + builder.create(loc, cond, trueBB, falseBB); + return mlir::success(); +} + mlir::LogicalResult CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef ForAttrs) { @@ -668,7 +688,8 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, assert(!UnimplementedFeature::createProfileWeightsForLoop()); assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); mlir::Value condVal = evaluateExprAsBool(S.getCond()); - builder.create(loc, condVal); + if (buildLoopCondYield(b, loc, condVal).failed()) + loopRes = mlir::failure(); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -751,7 +772,8 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { loc, boolTy, mlir::cir::BoolAttr::get(b.getContext(), boolTy, true)); } - builder.create(loc, condVal); + if (buildLoopCondYield(b, loc, condVal).failed()) + loopRes = mlir::failure(); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -815,7 +837,8 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { // expression compares unequal to 0. The condition must be a // scalar type. mlir::Value condVal = evaluateExprAsBool(S.getCond()); - builder.create(loc, condVal); + if (buildLoopCondYield(b, loc, condVal).failed()) + loopRes = mlir::failure(); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -875,7 +898,8 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { // expression compares unequal to 0. The condition must be a // scalar type. condVal = evaluateExprAsBool(S.getCond()); - builder.create(loc, condVal); + if (buildLoopCondYield(b, loc, condVal).failed()) + loopRes = mlir::failure(); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 4dbb9a4d5d32..d5f024eb5e22 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -14,7 +14,6 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "llvm/ADT/SmallVector.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" @@ -1099,9 +1098,12 @@ void LoopOp::build(OpBuilder &builder, OperationState &result, /// operand is not a constant. void LoopOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { - // If any index, do nothing. - if (!point.isParent()) + // If any index all the underlying regions branch back to the parent + // operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); return; + } // FIXME: we want to look at cond region for getting more accurate results // if the other regions will get a chance to execute. @@ -1113,29 +1115,26 @@ void LoopOp::getSuccessorRegions(mlir::RegionBranchPoint point, llvm::SmallVector LoopOp::getLoopRegions() { return {&getBody()}; } LogicalResult LoopOp::verify() { + // Cond regions should only terminate with plain 'cir.yield' or + // 'cir.yield continue'. + auto terminateError = [&]() { + return emitOpError() << "cond region must be terminated with " + "'cir.yield' or 'cir.yield continue'"; + }; - if (getCond().empty() || getStep().empty() || getBody().empty()) - return emitOpError("regions must not be empty"); - - auto condYield = dyn_cast(getCond().back().getTerminator()); - auto stepYield = dyn_cast(getStep().back().getTerminator()); - - if (!condYield || !stepYield) - return emitOpError( - "cond and step regions must be terminated with 'cir.yield'"); - - if (condYield.getNumOperands() != 1 || - !condYield.getOperand(0).getType().isa()) - return emitOpError("cond region must yield a single boolean value"); - - if (stepYield.getNumOperands() != 0) - return emitOpError("step region should not yield values"); - - // Body may yield or return. - auto *bodyTerminator = getBody().back().getTerminator(); - - if (isa(bodyTerminator) && bodyTerminator->getNumOperands() != 0) - return emitOpError("body region must not yield values"); + auto &blocks = getCond().getBlocks(); + for (Block &block : blocks) { + if (block.empty()) + continue; + auto &op = block.back(); + if (isa(op)) + continue; + if (!isa(op)) + terminateError(); + auto y = cast(op); + if (!(y.isPlain() || y.isContinue())) + terminateError(); + } return success(); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 34f6ad698207..6d6ba48ca663 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -118,6 +118,27 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { using mlir::OpConversionPattern::OpConversionPattern; using LoopKind = mlir::cir::LoopOpKind; + mlir::LogicalResult + fetchCondRegionYields(mlir::Region &condRegion, + mlir::cir::YieldOp &yieldToBody, + mlir::cir::YieldOp &yieldToCont) const { + for (auto &bb : condRegion) { + if (auto yieldOp = dyn_cast(bb.getTerminator())) { + if (!yieldOp.getKind().has_value()) + yieldToCont = yieldOp; + else if (yieldOp.getKind() == mlir::cir::YieldOpKind::Continue) + yieldToBody = yieldOp; + else + return mlir::failure(); + } + } + + // Succeed only if both yields are found. + if (!yieldToBody || !yieldToCont) + return mlir::failure(); + return mlir::success(); + } + mlir::LogicalResult matchAndRewrite(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { @@ -129,8 +150,9 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { // Fetch required info from the condition region. auto &condRegion = loopOp.getCond(); auto &condFrontBlock = condRegion.front(); - auto condYield = - cast(condRegion.back().getTerminator()); + mlir::cir::YieldOp yieldToBody, yieldToCont; + if (fetchCondRegionYields(condRegion, yieldToBody, yieldToCont).failed()) + return loopOp.emitError("failed to fetch yields in cond region"); // Fetch required info from the body region. auto &bodyRegion = loopOp.getBody(); @@ -143,7 +165,7 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { auto &stepRegion = loopOp.getStep(); auto &stepFrontBlock = stepRegion.front(); auto stepYield = - cast(stepRegion.back().getTerminator()); + dyn_cast(stepRegion.back().getTerminator()); // Move loop op region contents to current CFG. rewriter.inlineRegionBefore(condRegion, continueBlock); @@ -156,10 +178,13 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { auto &entry = (kind != LoopKind::DoWhile ? condFrontBlock : bodyFrontBlock); rewriter.create(loopOp.getLoc(), &entry); - // Branch to body when true and to exit when false. - rewriter.setInsertionPoint(condYield); - rewriter.replaceOpWithNewOp( - condYield, condYield.getOperand(0), &bodyFrontBlock, continueBlock); + // Set loop exit point to continue block. + rewriter.setInsertionPoint(yieldToCont); + rewriter.replaceOpWithNewOp(yieldToCont, continueBlock); + + // Branch from condition to body. + rewriter.setInsertionPoint(yieldToBody); + rewriter.replaceOpWithNewOp(yieldToBody, &bodyFrontBlock); // Branch from body to condition or to step on for-loop cases. rewriter.setInsertionPoint(bodyYield); diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 698c32c890c9..90831e31e898 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -8,8 +8,7 @@ void l0() { // CHECK: cir.func @_Z2l0v // CHECK: cir.loop for(cond : { -// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.yield %0 +// CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -28,7 +27,11 @@ void l1() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool -// CHECK-NEXT: cir.yield %6 : !cir.bool +// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i @@ -59,8 +62,12 @@ void l2(bool cond) { // CHECK: cir.func @_Z2l2b // CHECK: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool -// CHECK-NEXT: cir.yield %3 : !cir.bool +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -73,8 +80,7 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %3 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.yield %3 : !cir.bool +// CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -87,9 +93,13 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool -// CHECK-NEXT: cir.yield %4 : !cir.bool +// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool +// CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -118,7 +128,11 @@ void l3(bool cond) { // CHECK: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool -// CHECK-NEXT: cir.yield %3 +// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -131,8 +145,7 @@ void l3(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { -// CHECK-NEXT: %3 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.yield %3 : !cir.bool +// CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -147,7 +160,11 @@ void l3(bool cond) { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool -// CHECK-NEXT: cir.yield %4 : !cir.bool +// CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -171,8 +188,7 @@ void l4() { // CHECK: cir.func @_Z2l4v // CHECK: cir.loop while(cond : { -// CHECK-NEXT: %4 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.yield %4 : !cir.bool +// CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -199,7 +215,11 @@ void l5() { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool -// CHECK-NEXT: cir.yield %1 : !cir.bool +// CHECK-NEXT: cir.brcond %1 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -218,8 +238,7 @@ void l6() { // CHECK: cir.func @_Z2l6v() // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.yield %0 : !cir.bool +// CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 3bf27b417692..05d310efc515 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -46,7 +46,11 @@ void init(unsigned numImages) { // CHECK: cir.store %11, %6 : !ty_22struct2E__vector_iterator22, cir.ptr // CHECK: cir.loop for(cond : { // CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool -// CHECK: cir.yield %12 : !cir.bool +// CHECK: cir.brcond %12 ^bb1, ^bb2 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: cir.yield continue +// CHECK: ^bb2: // pred: ^bb0 +// CHECK: cir.yield // CHECK: }, step : { // CHECK: %12 = cir.call @_ZN17__vector_iteratorI6triplePS0_RS0_EppEv(%5) : (!cir.ptr) -> !cir.ptr // CHECK: cir.yield diff --git a/clang/test/CIR/IR/branch.cir b/clang/test/CIR/IR/branch.cir index bc9c26df7669..6f75d9e25bd3 100644 --- a/clang/test/CIR/IR/branch.cir +++ b/clang/test/CIR/IR/branch.cir @@ -7,13 +7,17 @@ cir.func @b0() { cir.scope { cir.loop while(cond : { %0 = cir.const(#true) : !cir.bool - cir.yield %0 : !cir.bool + cir.brcond %0 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield }, step : { cir.yield }) { cir.br ^bb1 ^bb1: - cir.yield + cir.return } } cir.return @@ -23,13 +27,17 @@ cir.func @b0() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %0 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.yield %0 : !cir.bool +// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { // CHECK-NEXT: cir.br ^bb1 // CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 78d93a2f933a..17d3afcfe0e9 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -163,7 +163,7 @@ cir.func @cast4(%p: !cir.ptr) { #true = #cir.bool : !cir.bool cir.func @b0() { cir.scope { - cir.loop while(cond : { // expected-error {{cond region must yield a single boolean value}} + cir.loop while(cond : { // expected-error {{cond region must be terminated with 'cir.yield' or 'cir.yield continue'}} %0 = cir.const(#true) : !cir.bool cir.brcond %0 ^bb1, ^bb2 ^bb1: @@ -183,72 +183,6 @@ cir.func @b0() { // ----- -#false = #cir.bool : !cir.bool -#true = #cir.bool : !cir.bool -cir.func @b0() { - cir.loop while(cond : { // expected-error {{cond and step regions must be terminated with 'cir.yield'}} - %0 = cir.const(#true) : !cir.bool - cir.return %0 : !cir.bool - }, step : { - cir.yield - }) { - cir.yield - } - cir.return -} - -// ----- - -#false = #cir.bool : !cir.bool -#true = #cir.bool : !cir.bool -cir.func @b0() { - cir.loop while(cond : { // expected-error {{cond and step regions must be terminated with 'cir.yield'}} - %0 = cir.const(#true) : !cir.bool - cir.yield %0 : !cir.bool - }, step : { - cir.return - }) { - cir.return - } - cir.return -} - -// ----- - -#false = #cir.bool : !cir.bool -#true = #cir.bool : !cir.bool -cir.func @b0() { - cir.loop while(cond : { // expected-error {{step region should not yield values}} - %0 = cir.const(#true) : !cir.bool - cir.yield %0 : !cir.bool - }, step : { - %1 = cir.const(#true) : !cir.bool - cir.yield %1 : !cir.bool - }) { - cir.return - } - cir.return -} - -// ----- - -#false = #cir.bool : !cir.bool -#true = #cir.bool : !cir.bool -cir.func @b0() { - cir.loop while(cond : { // expected-error {{body region must not yield values}} - %0 = cir.const(#true) : !cir.bool - cir.yield %0 : !cir.bool - }, step : { - cir.yield - }) { - %1 = cir.const(#true) : !cir.bool - cir.yield %1 : !cir.bool - } - cir.return -} - -// ----- - !u32i = !cir.int !u8i = !cir.int module { diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 9b1ba9da6a80..ac9658a304d3 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -15,7 +15,11 @@ cir.func @l0() { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<10> : !u32i) : !u32i %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.yield %6 : !cir.bool + cir.brcond %6 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield }, step : { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<1> : !u32i) : !u32i @@ -42,7 +46,11 @@ cir.func @l0() { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<10> : !u32i) : !u32i %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.yield %6 : !cir.bool + cir.brcond %6 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield }, step : { cir.yield }) { @@ -66,7 +74,11 @@ cir.func @l0() { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<10> : !u32i) : !u32i %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.yield %6 : !cir.bool + cir.brcond %6 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield }, step : { cir.yield }) { @@ -85,7 +97,11 @@ cir.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.yield %6 : !cir.bool +// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u32i) : !u32i @@ -108,7 +124,11 @@ cir.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.yield %6 : !cir.bool +// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -127,7 +147,11 @@ cir.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.yield %6 : !cir.bool +// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: ^bb2: +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -141,8 +165,7 @@ cir.func @l0() { cir.func @l1() { cir.scope { cir.loop while(cond : { - %0 = cir.const(#true) : !cir.bool - cir.yield %0 : !cir.bool + cir.yield continue }, step : { cir.yield }) { @@ -155,8 +178,7 @@ cir.func @l1() { // CHECK: cir.func @l1 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.yield %0 : !cir.bool +// CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -169,8 +191,7 @@ cir.func @l1() { cir.func @l2() { cir.scope { cir.loop while(cond : { - %0 = cir.const(#true) : !cir.bool - cir.yield %0 : !cir.bool + cir.yield }, step : { cir.yield }) { @@ -183,8 +204,7 @@ cir.func @l2() { // CHECK: cir.func @l2 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.yield %0 : !cir.bool +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 780317bd8d2a..8b3b553492b1 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o %t.mlir +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR !s32i = !cir.int @@ -23,7 +23,11 @@ module { %11 = cir.load %2 : cir.ptr , !s32i %12 = cir.cmp(lt, %10, %11) : !s32i, !s32i %13 = cir.cast(int_to_bool, %12 : !s32i), !cir.bool - cir.yield %13 : !cir.bool + cir.brcond %13 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield }, step : { %10 = cir.load %8 : cir.ptr , !s32i %11 = cir.unary(inc, %10) : !s32i, !s32i @@ -76,7 +80,7 @@ module { // MLIR-NEXT: %13 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: llvm.store %13, %12 : i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 -// MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb4 +// MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb6 // MLIR-NEXT: %14 = llvm.load %12 : !llvm.ptr // MLIR-NEXT: %15 = llvm.load %5 : !llvm.ptr // MLIR-NEXT: %16 = llvm.icmp "slt" %14, %15 : i32 @@ -85,8 +89,12 @@ module { // MLIR-NEXT: %19 = llvm.icmp "ne" %17, %18 : i32 // MLIR-NEXT: %20 = llvm.zext %19 : i1 to i8 // MLIR-NEXT: %21 = llvm.trunc %20 : i8 to i1 -// MLIR-NEXT: llvm.cond_br %21, ^bb3, ^bb5 +// MLIR-NEXT: llvm.cond_br %21, ^bb3, ^bb4 // MLIR-NEXT: ^bb3: // pred: ^bb2 +// MLIR-NEXT: llvm.br ^bb5 +// MLIR-NEXT: ^bb4: // pred: ^bb2 +// MLIR-NEXT: llvm.br ^bb7 +// MLIR-NEXT: ^bb5: // pred: ^bb3 // MLIR-NEXT: %22 = llvm.load %1 : !llvm.ptr // MLIR-NEXT: %23 = llvm.load %12 : !llvm.ptr // MLIR-NEXT: %24 = llvm.getelementptr %22[%23] : (!llvm.ptr, i32) -> !llvm.ptr @@ -99,16 +107,16 @@ module { // MLIR-NEXT: %31 = llvm.load %9 : !llvm.ptr // MLIR-NEXT: %32 = llvm.fadd %31, %30 : f64 // MLIR-NEXT: llvm.store %32, %9 : f64, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb4 -// MLIR-NEXT: ^bb4: // pred: ^bb3 +// MLIR-NEXT: llvm.br ^bb6 +// MLIR-NEXT: ^bb6: // pred: ^bb5 // MLIR-NEXT: %33 = llvm.load %12 : !llvm.ptr // MLIR-NEXT: %34 = llvm.mlir.constant(1 : i32) : i32 // MLIR-NEXT: %35 = llvm.add %33, %34 : i32 // MLIR-NEXT: llvm.store %35, %12 : i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 -// MLIR-NEXT: ^bb5: // pred: ^bb2 -// MLIR-NEXT: llvm.br ^bb6 -// MLIR-NEXT: ^bb6: // pred: ^bb5 +// MLIR-NEXT: ^bb7: // pred: ^bb4 +// MLIR-NEXT: llvm.br ^bb8 +// MLIR-NEXT: ^bb8: // pred: ^bb7 // MLIR-NEXT: %36 = llvm.load %9 : !llvm.ptr // MLIR-NEXT: llvm.store %36, %7 : f64, !llvm.ptr // MLIR-NEXT: %37 = llvm.load %7 : !llvm.ptr diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index dcef5e304c5c..f513185ac0ca 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -1,5 +1,5 @@ -// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o %t.mlir -// RUN: FileCheck --input-file=%t.mlir %s +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR !s32i = !cir.int module { @@ -12,7 +12,11 @@ module { %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.yield %5 : !cir.bool + cir.brcond %5 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield }, step : { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i @@ -24,90 +28,161 @@ module { cir.return } - // CHECK: llvm.func @testFor() - // [...] - // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#COND]]: - // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#BODY]]: - // [...] - // CHECK: llvm.br ^bb[[#STEP:]] - // CHECK: ^bb[[#STEP]]: - // [...] - // CHECK: llvm.br ^bb[[#COND]] - // CHECK: ^bb[[#EXIT]]: - // [...] - // CHECK: } - +// MLIR: module { +// MLIR-NEXT: llvm.func @testFor() +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: llvm.store %2, %1 : i32, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb1 +// ============= Condition block ============= +// MLIR-NEXT: ^bb1: // 2 preds: ^bb0, ^bb5 +// MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %4 = llvm.mlir.constant(10 : i32) : i32 +// MLIR-NEXT: %5 = llvm.icmp "slt" %3, %4 : i32 +// MLIR-NEXT: %6 = llvm.zext %5 : i1 to i32 +// MLIR-NEXT: %7 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: %8 = llvm.icmp "ne" %6, %7 : i32 +// MLIR-NEXT: %9 = llvm.zext %8 : i1 to i8 +// MLIR-NEXT: %10 = llvm.trunc %9 : i8 to i1 +// MLIR-NEXT: llvm.cond_br %10, ^bb2, ^bb3 +// MLIR-NEXT: ^bb2: // pred: ^bb1 +// MLIR-NEXT: llvm.br ^bb4 +// MLIR-NEXT: ^bb3: // pred: ^bb1 +// MLIR-NEXT: llvm.br ^bb6 +// ============= Body block ============= +// MLIR-NEXT: ^bb4: // pred: ^bb2 +// MLIR-NEXT: llvm.br ^bb5 +// ============= Step block ============= +// MLIR-NEXT: ^bb5: // pred: ^bb4 +// MLIR-NEXT: %11 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %12 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: %13 = llvm.add %11, %12 : i32 +// MLIR-NEXT: llvm.store %13, %1 : i32, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb1 +// ============= Exit block ============= +// MLIR-NEXT: ^bb6: // pred: ^bb3 +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } // Test while cir.loop operation lowering. cir.func @testWhile(%arg0: !s32i) { %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, cir.ptr - cir.loop while(cond : { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.const(#cir.int<10> : !s32i) : !s32i - %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i - %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool - cir.yield %4 : !cir.bool - }, step : { - cir.yield - }) { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.unary(inc, %1) : !s32i, !s32i - cir.store %2, %0 : !s32i, cir.ptr - cir.yield + cir.scope { + cir.loop while(cond : { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i + %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool + cir.brcond %4 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + cir.yield + }) { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.unary(inc, %1) : !s32i, !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.yield + } } cir.return } - // CHECK: llvm.func @testWhile - // [...] - // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#COND]]: - // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#BODY]]: - // [...] - // CHECK: llvm.br ^bb[[#COND]] - // CHECK: ^bb[[#EXIT]]: - // [...] - // CHECK: } - + // MLIR: llvm.func @testWhile(%arg0: i32) + // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 + // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr + // MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr + // MLIR-NEXT: llvm.br ^bb1 + // MLIR-NEXT: ^bb1: + // MLIR-NEXT: llvm.br ^bb2 + // ============= Condition block ============= + // MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb5 + // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %3 = llvm.mlir.constant(10 : i32) : i32 + // MLIR-NEXT: %4 = llvm.icmp "slt" %2, %3 : i32 + // MLIR-NEXT: %5 = llvm.zext %4 : i1 to i32 + // MLIR-NEXT: %6 = llvm.mlir.constant(0 : i32) : i32 + // MLIR-NEXT: %7 = llvm.icmp "ne" %5, %6 : i32 + // MLIR-NEXT: %8 = llvm.zext %7 : i1 to i8 + // MLIR-NEXT: %9 = llvm.trunc %8 : i8 to i1 + // MLIR-NEXT: llvm.cond_br %9, ^bb3, ^bb4 + // MLIR-NEXT: ^bb3: // pred: ^bb2 + // MLIR-NEXT: llvm.br ^bb5 + // MLIR-NEXT: ^bb4: // pred: ^bb2 + // MLIR-NEXT: llvm.br ^bb6 + // ============= Body block ============= + // MLIR-NEXT: ^bb5: // pred: ^bb3 + // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %11 = llvm.mlir.constant(1 : i32) : i32 + // MLIR-NEXT: %12 = llvm.add %10, %11 : i32 + // MLIR-NEXT: llvm.store %12, %1 : i32, !llvm.ptr + // MLIR-NEXT: llvm.br ^bb2 + // ============= Exit block ============= + // MLIR-NEXT: ^bb6: // pred: ^bb4 + // MLIR-NEXT: llvm.br ^bb7 // Test do-while cir.loop operation lowering. cir.func @testDoWhile(%arg0: !s32i) { %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, cir.ptr - cir.loop dowhile(cond : { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.const(#cir.int<10> : !s32i) : !s32i - %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i - %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool - cir.yield %4 : !cir.bool - }, step : { - cir.yield - }) { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.unary(inc, %1) : !s32i, !s32i - cir.store %2, %0 : !s32i, cir.ptr - cir.yield + cir.scope { + cir.loop dowhile(cond : { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i + %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool + cir.brcond %4 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + cir.yield + }) { + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.unary(inc, %1) : !s32i, !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.yield + } } cir.return } - // CHECK: llvm.func @testDoWhile - // [...] - // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#COND:]]: - // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#BODY]]: - // [...] - // CHECK: llvm.br ^bb[[#COND]] - // CHECK: ^bb[[#EXIT]]: - // [...] - // CHECK: } + // MLIR: llvm.func @testDoWhile(%arg0: i32) + // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 + // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr + // MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr + // MLIR-NEXT: llvm.br ^bb1 + // MLIR-NEXT: ^bb1: + // MLIR-NEXT: llvm.br ^bb5 + // ============= Condition block ============= + // MLIR-NEXT: ^bb2: + // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %3 = llvm.mlir.constant(10 : i32) : i32 + // MLIR-NEXT: %4 = llvm.icmp "slt" %2, %3 : i32 + // MLIR-NEXT: %5 = llvm.zext %4 : i1 to i32 + // MLIR-NEXT: %6 = llvm.mlir.constant(0 : i32) : i32 + // MLIR-NEXT: %7 = llvm.icmp "ne" %5, %6 : i32 + // MLIR-NEXT: %8 = llvm.zext %7 : i1 to i8 + // MLIR-NEXT: %9 = llvm.trunc %8 : i8 to i1 + // MLIR-NEXT: llvm.cond_br %9, ^bb3, ^bb4 + // MLIR-NEXT: ^bb3: + // MLIR-NEXT: llvm.br ^bb5 + // MLIR-NEXT: ^bb4: + // MLIR-NEXT: llvm.br ^bb6 + // ============= Body block ============= + // MLIR-NEXT: ^bb5: + // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %11 = llvm.mlir.constant(1 : i32) : i32 + // MLIR-NEXT: %12 = llvm.add %10, %11 : i32 + // MLIR-NEXT: llvm.store %12, %1 : i32, !llvm.ptr + // MLIR-NEXT: llvm.br ^bb2 + // ============= Exit block ============= + // MLIR-NEXT: ^bb6: + // MLIR-NEXT: llvm.br ^bb7 } diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 818570930928..3b0b21e935fe 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -65,7 +65,11 @@ module { cir.scope { cir.loop while(cond : { %0 = cir.const(#true) : !cir.bool - cir.yield %0 : !cir.bool + cir.brcond %0 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield }, step : { cir.yield }) { @@ -81,7 +85,11 @@ module { cir.scope { cir.loop while(cond : { %0 = cir.const(#false) : !cir.bool - cir.yield %0 : !cir.bool + cir.brcond %0 ^bb1, ^bb2 + ^bb1: + cir.yield continue + ^bb2: + cir.yield }, step : { cir.yield }) { @@ -133,8 +141,7 @@ module { // CHECK: cir.func @l0 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.yield %0 : !cir.bool +// CHECK-NEXT: cir.yield continue // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -147,8 +154,7 @@ module { // CHECK: cir.func @l1 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %0 = cir.const(#false) : !cir.bool -// CHECK-NEXT: cir.yield %0 : !cir.bool +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { From 4d0c71d03a07b8f078f4e1d0373487482c4080bb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 31 Jul 2023 20:04:32 -0300 Subject: [PATCH 1115/2301] [CIR][LifetimeCheck][NFC] Add DerefStyle to more clear reason on invalidation --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 34 +++++++++++-------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 6dbf0d19bff6..72882bdf288d 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -62,10 +62,14 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void updatePointsToForZeroStruct(mlir::Value addr, StructType sTy, mlir::Location loc); - // FIXME: classify tasks, lambdas and call args prior to check ptr deref - // and pass down an enum. + enum DerefStyle { + Direct, + RetLambda, + CallParam, + IndirectCallParam, + }; void checkPointerDeref(mlir::Value addr, mlir::Location loc, - bool forRetLambda = false, bool inCallArg = false); + DerefStyle derefStyle = DerefStyle::Direct); void checkCoroTaskStore(StoreOp storeOp); void checkLambdaCaptureStore(StoreOp storeOp); void trackCallToCoroutine(CallOp callOp); @@ -107,7 +111,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { // Diagnostic helpers. void emitInvalidHistory(mlir::InFlightDiagnostic &D, mlir::Value histKey, - mlir::Location warningLoc, bool forRetLambda); + mlir::Location warningLoc, + DerefStyle derefStyle = DerefStyle::Direct); /// /// Pass options handling @@ -573,8 +578,7 @@ void LifetimeCheckPass::LexicalScopeGuard::cleanup() { // Catch interesting dangling references out of returns. for (auto l : localScope->localRetLambdas) - Pass.checkPointerDeref(l.first, l.second, - /*forRetLambda=*/true); + Pass.checkPointerDeref(l.first, l.second, DerefStyle::RetLambda); } void LifetimeCheckPass::checkBlock(Block &block) { @@ -1321,7 +1325,7 @@ void LifetimeCheckPass::checkLoad(LoadOp loadOp) { void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, mlir::Value histKey, mlir::Location warningLoc, - bool forRetLambda) { + DerefStyle derefStyle) { assert(invalidHist.count(histKey) && "expected invalid hist"); auto &hist = invalidHist[histKey]; unsigned limit = opts.histLimit; @@ -1346,7 +1350,7 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, << "coroutine bound to " << resource << " with expired lifetime"; D.attachNote(info.loc) << "at the end of scope or full-expression"; emittedDanglingTasks.insert(warningLoc); - } else if (forRetLambda) { + } else if (derefStyle == DerefStyle::RetLambda) { assert(currFunc && "expected function"); StringRef parent = currFunc->getLambda() ? "lambda" : "function"; D.attachNote(info.val->getLoc()) @@ -1370,7 +1374,7 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, } void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, - bool forRetLambda, bool inCallArg) { + DerefStyle derefStyle) { bool hasInvalid = getPmap()[addr].count(State::getInvalid()); bool hasNullptr = getPmap()[addr].count(State::getNullPtr()); @@ -1404,9 +1408,10 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, if (tasks.count(addr)) D << "use of coroutine '" << varName << "' with dangling reference"; - else if (forRetLambda) + else if (derefStyle == DerefStyle::RetLambda) D << "returned lambda captures local variable"; - else if (inCallArg) { + else if (derefStyle == DerefStyle::CallParam || + derefStyle == DerefStyle::IndirectCallParam) { bool isAgg = isa_and_nonnull(addr.getDefiningOp()); D << "passing "; if (!isAgg) @@ -1420,7 +1425,7 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, // TODO: add accuracy levels, different combinations of invalid and null // could have different ratios of false positives. if (hasInvalid && opts.emitHistoryInvalid()) - emitInvalidHistory(D, addr, loc, forRetLambda); + emitInvalidHistory(D, addr, loc, derefStyle); if (hasNullptr && opts.emitHistoryNull()) { assert(pmapNullHist.count(addr) && "expected nullptr hist"); @@ -1688,8 +1693,9 @@ void LifetimeCheckPass::checkForOwnerAndPointerArguments(CallOp callOp, for (auto o : ownersToInvalidate) checkNonConstUseOfOwner(o, callOp.getLoc()); for (auto p : ptrsToDeref) - checkPointerDeref(p, callOp.getLoc(), /*forRetLambda=*/false, - /*inCallArg=*/true); + checkPointerDeref(p, callOp.getLoc(), + callOp.getCallee() ? DerefStyle::CallParam + : DerefStyle::IndirectCallParam); } void LifetimeCheckPass::checkOtherMethodsAndFunctions( From 4cbad6d706458f1ad2d91552a47a40304e0ec7e5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 31 Jul 2023 20:13:16 -0300 Subject: [PATCH 1116/2301] [CIR][LifetimeCheck] Change the accuracy for nulltpr passing to indirect calls --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 12 ++++++++-- .../CIR/Transforms/lifetime-null-passing.cpp | 23 +++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/Transforms/lifetime-null-passing.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 72882bdf288d..795b65de15a6 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1401,8 +1401,16 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, if (!hasInvalid && !hasNullptr) return; - // Looks like we found a bad path leading to this deference point, - // diagnose it. + // TODO: create verbosity/accuracy levels, for now use deref styles directly + // to decide when not to emit a warning. + + // For indirect calls, do not relly on blunt nullptr passing, require some + // invalidation to have happened in a path. + if (derefStyle == DerefStyle::IndirectCallParam && !hasInvalid) + return; + + // Ok, filtered out questionable warnings, take the bad path leading to this + // deference point and diagnose it. auto varName = getVarNameFromValue(addr); auto D = emitWarning(loc); diff --git a/clang/test/CIR/Transforms/lifetime-null-passing.cpp b/clang/test/CIR/Transforms/lifetime-null-passing.cpp new file mode 100644 index 000000000000..e26210b56234 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-null-passing.cpp @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +class _j {}; +typedef _j* jobj; + +typedef enum SType { + INFO_ENUM_0 = 9, + INFO_ENUM_1 = 2020, +} SType; + +typedef SType ( *FnPtr2)(unsigned session, jobj* surface); + +struct X { + struct entries { + FnPtr2 wildfn = nullptr; + }; + static entries e; +}; + +void nullpassing() { + jobj o = nullptr; + X::e.wildfn(0, &o); +} \ No newline at end of file From 988d1089f828bcaa90771315e21882a68c060f8e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 31 Jul 2023 20:49:01 -0300 Subject: [PATCH 1117/2301] [CIR][LifetimeCheck] Fix crash while exploding aggregates, missing cleanup --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 1 + clang/test/CIR/Transforms/lifetime-check-agg.cpp | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 795b65de15a6..207865956bb9 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -567,6 +567,7 @@ void LifetimeCheckPass::kill(const State &s, InvalidStyle invalidStyle, owners.erase(v); ptrs.erase(v); tasks.erase(v); + aggregates.erase(v); } } diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp index f820daceb5f5..fb89c0e6fd8f 100644 --- a/clang/test/CIR/Transforms/lifetime-check-agg.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -35,8 +35,8 @@ struct X { static entries e; }; -void exploded_fields(bool cond) { - { +void exploded_fields(bool cond, int c) { + for (int i = 0; i < c; i++) { InfoRaw info = {INFO_ENUM_0}; // expected-note {{invalidated here}} if (cond) { InfoPriv privTmp = {INFO_ENUM_1}; From 273bee4fa4f985b24ddba5ff4bbfc12c8d4181d7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 31 Jul 2023 20:58:04 -0300 Subject: [PATCH 1118/2301] [CIR][LifetimeCheck] Do not repeat the same diagnostic There is no really good way to test this since `-verify` does not really care how many times the same diagnostics shows up (somehow it already dedups). This improves user experience a tiny bit :) --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 207865956bb9..aadca0fa2819 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -279,6 +279,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { llvm::DenseMap>; PMapNullHistType pmapNullHist; + // Track emitted diagnostics, and do not repeat them. + llvm::SmallSet emittedDiagnostics; + /// /// Pointer Map and Pointer Set /// --------------------------- @@ -359,9 +362,6 @@ struct LifetimeCheckPass : public LifetimeCheckBase { bool isTaskType(mlir::Value taskVal); // Addresses of coroutine Tasks found in the current function. SmallPtrSet tasks; - // Since coawait encapsulates several calls to a promise, do not emit - // the same warning multiple times, e.g. under the same coawait. - llvm::SmallSet emittedDanglingTasks; /// /// Lambdas @@ -1350,7 +1350,6 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, D.attachNote((*info.val).getLoc()) << "coroutine bound to " << resource << " with expired lifetime"; D.attachNote(info.loc) << "at the end of scope or full-expression"; - emittedDanglingTasks.insert(warningLoc); } else if (derefStyle == DerefStyle::RetLambda) { assert(currFunc && "expected function"); StringRef parent = currFunc->getLambda() ? "lambda" : "function"; @@ -1386,10 +1385,8 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, emitRemark(loc) << "pset => " << Out.str(); }; - // Do not emit more than one diagonistic for the same task deref location. - // Since cowait hides a bunch of logic and calls to the promise type, just - // have one per suspend expr. - if (tasks.count(addr) && emittedDanglingTasks.count(loc)) + // Do not emit the same warning twice or more. + if (emittedDiagnostics.count(loc)) return; bool psetRemarkEmitted = false; @@ -1414,6 +1411,7 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, // deference point and diagnose it. auto varName = getVarNameFromValue(addr); auto D = emitWarning(loc); + emittedDiagnostics.insert(loc); if (tasks.count(addr)) D << "use of coroutine '" << varName << "' with dangling reference"; From 13d94b3c6a3f4d36ff785794f1340c784022baa4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Aug 2023 11:25:53 -0300 Subject: [PATCH 1119/2301] [CIR][LifetimeCheck] Add support for initialization for function arguments --- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 27 +++++++++++++++---- .../test/CIR/Transforms/lifetime-fn-args.cpp | 12 +++++++++ 2 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/Transforms/lifetime-fn-args.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index aadca0fa2819..710e1f360a14 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -464,9 +464,20 @@ struct LifetimeCheckPass : public LifetimeCheckBase { } // namespace static std::string getVarNameFromValue(mlir::Value v) { - if (auto allocaOp = dyn_cast(v.getDefiningOp())) + + auto srcOp = v.getDefiningOp(); + if (!srcOp) { + auto blockArg = cast(v); + assert(blockArg.getOwner()->isEntryBlock() && "random block args NYI"); + llvm::SmallString<128> finalName; + llvm::raw_svector_ostream Out(finalName); + Out << "fn_arg:" << blockArg.getArgNumber(); + return Out.str().str(); + } + + if (auto allocaOp = dyn_cast(srcOp)) return allocaOp.getName().str(); - if (auto getElemOp = dyn_cast(v.getDefiningOp())) { + if (auto getElemOp = dyn_cast(srcOp)) { auto parent = dyn_cast(getElemOp.getStructAddr().getDefiningOp()); if (parent) { llvm::SmallString<128> finalName; @@ -475,7 +486,7 @@ static std::string getVarNameFromValue(mlir::Value v) { return Out.str().str(); } } - if (auto callOp = dyn_cast(v.getDefiningOp())) { + if (auto callOp = dyn_cast(srcOp)) { if (callOp.getCallee()) { llvm::SmallString<128> finalName; llvm::raw_svector_ostream Out(finalName); @@ -1207,9 +1218,15 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, auto dataSrcOp = data.getDefiningOp(); - // Do not handle block arguments just yet. - if (!dataSrcOp) + // Handle function arguments but not all block arguments just yet. + if (!dataSrcOp) { + auto blockArg = cast(data); + if (!blockArg.getOwner()->isEntryBlock()) + return; + getPmap()[addr].clear(); + getPmap()[addr].insert(State::getLocalValue(data)); return; + } // Ignore chains of bitcasts and update data source. Note that when // dataSrcOp gets updated, `data` might not be the most updated resource diff --git a/clang/test/CIR/Transforms/lifetime-fn-args.cpp b/clang/test/CIR/Transforms/lifetime-fn-args.cpp new file mode 100644 index 000000000000..6c1b297f1b32 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-fn-args.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +struct A { + void* ctx; + void setInfo(void** ctxPtr); +}; + +void A::setInfo(void** ctxPtr) { + if (ctxPtr != nullptr) { + *ctxPtr = ctx; // expected-remark {{pset => { fn_arg:1 }}} + } +} \ No newline at end of file From 458228248a674f2bd3e7f4d85e92211b7be663b4 Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Fri, 28 Jul 2023 10:38:14 +0300 Subject: [PATCH 1120/2301] [CIR][Lowering] Fixed LLVM generation for true boolean constants. Before this fix foo(1, 1) returned 255 in int foo(int a, int b) { return a && b; } --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 12 +++++------ clang/test/CIR/Lowering/bool-to-int.cir | 21 +++++++++++++++++++ clang/test/CIR/Lowering/bool.cir | 4 ++-- 3 files changed, 29 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/Lowering/bool-to-int.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6d6ba48ca663..3160f074109e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -679,12 +679,12 @@ class CIRConstantLowering mlir::Attribute attr = op.getValue(); if (op.getType().isa()) { - if (op.getValue() == - mlir::cir::BoolAttr::get( - getContext(), ::mlir::cir::BoolType::get(getContext()), true)) - attr = mlir::BoolAttr::get(getContext(), true); - else - attr = mlir::BoolAttr::get(getContext(), false); + int value = + (op.getValue() == + mlir::cir::BoolAttr::get( + getContext(), ::mlir::cir::BoolType::get(getContext()), true)); + attr = rewriter.getIntegerAttr(typeConverter->convertType(op.getType()), + value); } else if (op.getType().isa()) { attr = rewriter.getIntegerAttr( typeConverter->convertType(op.getType()), diff --git a/clang/test/CIR/Lowering/bool-to-int.cir b/clang/test/CIR/Lowering/bool-to-int.cir new file mode 100644 index 000000000000..d7e2e45686cc --- /dev/null +++ b/clang/test/CIR/Lowering/bool-to-int.cir @@ -0,0 +1,21 @@ +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s + +!s32i = !cir.int +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool + +module { + cir.func @foo(%arg0: !s32i, %arg1: !s32i) -> !s32i { + %1 = cir.const(#true) : !cir.bool + %2 = cir.cast(bool_to_int, %1 : !cir.bool), !s32i + cir.return %2 : !s32i + } + cir.func @bar(%arg0: !s32i, %arg1: !s32i) -> !s32i { + %1 = cir.const(#false) : !cir.bool + %2 = cir.cast(bool_to_int, %1 : !cir.bool), !s32i + cir.return %2 : !s32i + } +} + +// CHECK: ret i32 1 +// CHECK: ret i32 0 diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index 834a148460ee..79b406cc1634 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -14,7 +14,7 @@ module { } // MLIR: llvm.func @foo() -// MLIR-DAG: = llvm.mlir.constant(true) : i8 +// MLIR-DAG: = llvm.mlir.constant(1 : i8) : i8 // MLIR-DAG: [[Value:%[a-z0-9]+]] = llvm.mlir.constant(1 : index) : i64 // MLIR-DAG: = llvm.alloca [[Value]] x i8 {alignment = 1 : i64} : (i64) -> !llvm.ptr // MLIR-DAG: llvm.store %0, %2 : i8, !llvm.ptr @@ -22,5 +22,5 @@ module { // LLVM: define void @foo() // LLVM-NEXT: %1 = alloca i8, i64 1, align 1 -// LLVM-NEXT: store i8 -1, ptr %1, align 1 +// LLVM-NEXT: store i8 1, ptr %1, align 1 // LLVM-NEXT: ret void From e2dcbc8231edd9a3007c49f175da27dc39196e5e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 1 Aug 2023 16:10:36 -0300 Subject: [PATCH 1121/2301] [CIR][Lifetime] Look through loads for initializing pointer categories --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 7 +++++++ clang/test/CIR/Transforms/lifetime-this.cpp | 12 ++++++++++++ 2 files changed, 19 insertions(+) create mode 100644 clang/test/CIR/Transforms/lifetime-this.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 710e1f360a14..e2165c4af42f 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1292,6 +1292,13 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, getPmap()[addr].insert(State::getLocalValue(callOp.getResult(0))); } + if (auto loadOp = dyn_cast(dataSrcOp)) { + // handle indirections through a load, a common example are temporaries + // copying the 'this' param to a subsequent call. + updatePointsTo(addr, loadOp.getAddr(), loc); + return; + } + // What should we add next? } diff --git a/clang/test/CIR/Transforms/lifetime-this.cpp b/clang/test/CIR/Transforms/lifetime-this.cpp new file mode 100644 index 000000000000..8e18af8a9e16 --- /dev/null +++ b/clang/test/CIR/Transforms/lifetime-this.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -fclangir-skip-system-headers -clangir-verify-diagnostics -emit-cir %s -o %t.cir + +#include "std-cxx.h" + +struct S { + S(int, int, const S* s); + void f(int a, int b); +}; + +void S::f(int a, int b) { + std::shared_ptr l = std::make_shared(a, b, this); // expected-remark {{pset => { this }}} +} \ No newline at end of file From 619eefaa9f6836890c95e44cf7f2f2c8e7a087e5 Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Tue, 1 Aug 2023 15:53:53 +0300 Subject: [PATCH 1122/2301] [CIR][Lowering] Support lowering of cir.ptr_diff operation. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 44 ++++++++++++++++++- clang/test/CIR/Lowering/ptrdiff.cir | 18 ++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/ptrdiff.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3160f074109e..4e513983b063 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -39,6 +39,7 @@ #include "mlir/IR/Operation.h" #include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Support/LLVM.h" @@ -1485,6 +1486,46 @@ class CIRStructElementAddrOpLowering } }; +class CIRPtrDiffOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + uint64_t getTypeSize(mlir::Type type, mlir::Operation &op) const { + mlir::DataLayout layout(op.getParentOfType()); + return llvm::divideCeil(layout.getTypeSizeInBits(type), 8); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::PtrDiffOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto dstTy = op.getType().cast(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + + auto lhs = rewriter.create(op.getLoc(), llvmDstTy, + adaptor.getLhs()); + auto rhs = rewriter.create(op.getLoc(), llvmDstTy, + adaptor.getRhs()); + + auto diff = + rewriter.create(op.getLoc(), llvmDstTy, lhs, rhs); + + auto ptrTy = op.getLhs().getType().cast(); + auto typeSize = getTypeSize(ptrTy.getPointee(), *op); + auto typeSizeVal = rewriter.create( + op.getLoc(), llvmDstTy, mlir::IntegerAttr::get(llvmDstTy, typeSize)); + + if (dstTy.isUnsigned()) + rewriter.replaceOpWithNewOp(op, llvmDstTy, diff, + typeSizeVal); + else + rewriter.replaceOpWithNewOp(op, llvmDstTy, diff, + typeSizeVal); + + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -1496,7 +1537,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRIfLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, - CIRStructElementAddrOpLowering, CIRSwitchOpLowering>( + CIRStructElementAddrOpLowering, CIRSwitchOpLowering, + CIRPtrDiffOpLowering>( converter, patterns.getContext()); } diff --git a/clang/test/CIR/Lowering/ptrdiff.cir b/clang/test/CIR/Lowering/ptrdiff.cir new file mode 100644 index 000000000000..ff1248ddad66 --- /dev/null +++ b/clang/test/CIR/Lowering/ptrdiff.cir @@ -0,0 +1,18 @@ +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s + +!s32i = !cir.int +!u64i = !cir.int + +module { + cir.func @foo(%arg0: !cir.ptr, %arg1: !cir.ptr) -> !s32i { + %1 = cir.ptr_diff(%arg0, %arg1) : !cir.ptr -> !u64i + %2 = cir.cast(integral, %1 : !u64i), !s32i + cir.return %2 : !s32i + } +} + +// CHECK: %3 = ptrtoint ptr %0 to i64 +// CHECK-NEXT: %4 = ptrtoint ptr %1 to i64 +// CHECK-NEXT: %5 = sub i64 %3, %4 +// CHECK-NEXT: %6 = udiv i64 %5, 4 +// CHECK-NEXT: %7 = trunc i64 %6 to i32 From ad98b768ddf8131aedb84babd1ea9007ad986a51 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 10 Jul 2023 13:15:34 -0300 Subject: [PATCH 1123/2301] [CIR][CIRGen] Support function basic static variables Whenever a variable declaration is found, it is created as a global variable in the module. In C, these variables must be instantiated with the CIRGenBuilder::createVersionedGlobal to prevent naming conflicts when multiple static variables are declared across the compilation unit. In C++, name mangling is used to prevent naming conflicts. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 + clang/lib/CIR/CodeGen/CIRGenBuilder.h | 35 +++ clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 279 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 34 ++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 6 + clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 18 +- .../CodeGen/UnimplementedFeatureGuarding.h | 3 + clang/test/CIR/CodeGen/static-vars.c | 37 +++ clang/test/CIR/CodeGen/static-vars.cpp | 37 +++ 10 files changed, 437 insertions(+), 19 deletions(-) create mode 100644 clang/test/CIR/CodeGen/static-vars.c create mode 100644 clang/test/CIR/CodeGen/static-vars.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7c0fe4c19518..b206d506da5d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1304,6 +1304,9 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { return isDeclaration(); } + + /// Whether the definition of this global may be replaced at link time. + bool isWeakForLinker() { return cir::isWeakForLinker(getLinkage()); } }]; let skipDefaultBuilders = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 389566759c80..e8b52a4ebb21 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -22,11 +22,14 @@ #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Types.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/FloatingPointMode.h" +#include "llvm/ADT/StringMap.h" #include "llvm/Support/ErrorHandling.h" +#include namespace cir { @@ -38,6 +41,8 @@ class CIRGenBuilderTy : public mlir::OpBuilder { fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict; llvm::RoundingMode DefaultConstrainedRounding = llvm::RoundingMode::Dynamic; + llvm::StringMap GlobalsVersioning; + public: CIRGenBuilderTy(mlir::MLIRContext &C, const CIRGenTypeCache &tc) : mlir::OpBuilder(&C), typeCache(tc) {} @@ -463,6 +468,36 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return Address(baseAddr, ptrTy, addr.getAlignment()); } + // FIXME(cir): CIRGenBuilder class should have an attribute with a reference + // to the module so that we don't have search for it or pass it around. + // FIXME(cir): Track a list of globals, or at least the last one inserted, so + // that we can insert globals in the same order they are defined by CIRGen. + + /// Creates a versioned global variable. If the symbol is already taken, an ID + /// will be appended to the symbol. The returned global must always be queried + /// for its name so it can be referenced correctly. + [[nodiscard]] mlir::cir::GlobalOp + createVersionedGlobal(mlir::ModuleOp module, mlir::Location loc, + mlir::StringRef name, mlir::Type type, bool isConst, + mlir::cir::GlobalLinkageKind linkage) { + mlir::OpBuilder::InsertionGuard guard(*this); + setInsertionPointToStart(module.getBody()); + + // Create a unique name if the given name is already taken. + std::string uniqueName; + if (unsigned version = GlobalsVersioning[name.str()]++) + uniqueName = name.str() + "." + std::to_string(version); + else + uniqueName = name.str(); + + return create(loc, uniqueName, type, isConst, linkage); + } + + mlir::Value createGetGlobal(mlir::cir::GlobalOp global) { + return create( + global.getLoc(), getPointerTo(global.getSymType()), global.getName()); + } + /// Cast the element type of the given address to a different type, /// preserving information like the alignment. cir::Address createElementBitCast(mlir::Location loc, cir::Address addr, diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index d76fd8afda31..5e5868b5edf2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -11,11 +11,21 @@ //===----------------------------------------------------------------------===// #include "CIRDataLayout.h" +#include "CIRGenBuilder.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "EHScopeStack.h" +#include "UnimplementedFeatureGuarding.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/SymbolTable.h" #include "clang/AST/Decl.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" +#include using namespace cir; using namespace clang; @@ -76,7 +86,8 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { // be done as part of lowering down to LLVM. if ((!getContext().getLangOpts().OpenCL || Ty.getAddressSpace() == LangAS::opencl_constant) && - (!NRVO && !D.isEscapingByref() && CGM.isTypeConstant(Ty, true))) + (!NRVO && !D.isEscapingByref() && + CGM.isTypeConstant(Ty, /*ExcludeCtor=*/true, /*ExcludeDtor=*/false))) assert(0 && "not implemented"); // Otherwise, tell the initialization code that we're in this case. @@ -322,11 +333,22 @@ void CIRGenFunction::buildVarDecl(const VarDecl &D) { // Some function-scope variable does not have static storage but still // needs to be emitted like a static variable, e.g. a function-scope // variable in constant address space in OpenCL. - if (D.getStorageDuration() != SD_Automatic) - assert(0 && "not implemented"); + if (D.getStorageDuration() != SD_Automatic) { + // Static sampler variables translated to function calls. + if (D.getType()->isSamplerT()) + return; + + auto Linkage = CGM.getCIRLinkageVarDefinition(&D, /*IsConstant=*/false); + + // FIXME: We need to force the emission/use of a guard variable for + // some variables even if we can constant-evaluate them because + // we can't guarantee every translation unit will constant-evaluate them. + + return buildStaticVarDecl(D, Linkage); + } if (D.getType().getAddressSpace() == LangAS::opencl_local) - assert(0 && "not implemented"); + llvm_unreachable("OpenCL and address space are NYI"); assert(D.hasLocalStorage()); @@ -334,6 +356,255 @@ void CIRGenFunction::buildVarDecl(const VarDecl &D) { return buildAutoVarDecl(D); } +static std::string getStaticDeclName(CIRGenModule &CGM, const VarDecl &D) { + if (CGM.getLangOpts().CPlusPlus) + return CGM.getMangledName(&D).str(); + + // If this isn't C++, we don't need a mangled name, just a pretty one. + assert(!D.isExternallyVisible() && "name shouldn't matter"); + std::string ContextName; + const DeclContext *DC = D.getDeclContext(); + if (auto *CD = dyn_cast(DC)) + DC = cast(CD->getNonClosureContext()); + if (const auto *FD = dyn_cast(DC)) + ContextName = std::string(CGM.getMangledName(FD)); + else if (const auto *BD = dyn_cast(DC)) + llvm_unreachable("block decl context for static var is NYI"); + else if (const auto *OMD = dyn_cast(DC)) + llvm_unreachable("ObjC decl context for static var is NYI"); + else + llvm_unreachable("Unknown context for static var decl"); + + ContextName += "." + D.getNameAsString(); + return ContextName; +} + +// TODO(cir): LLVM uses a Constant base class. Maybe CIR could leverage an +// interface for all constants? +mlir::cir::GlobalOp +CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalLinkageKind Linkage) { + // In general, we don't always emit static var decls once before we reference + // them. It is possible to reference them before emitting the function that + // contains them, and it is possible to emit the containing function multiple + // times. + if (mlir::cir::GlobalOp ExistingGV = StaticLocalDeclMap[&D]) + return ExistingGV; + + QualType Ty = D.getType(); + assert(Ty->isConstantSizeType() && "VLAs can't be static"); + + // Use the label if the variable is renamed with the asm-label extension. + std::string Name; + if (D.hasAttr()) + llvm_unreachable("asm label is NYI"); + else + Name = getStaticDeclName(*this, D); + + mlir::Type LTy = getTypes().convertTypeForMem(Ty); + assert(!UnimplementedFeature::addressSpace()); + + // OpenCL variables in local address space and CUDA shared + // variables cannot have an initializer. + mlir::Attribute Init = nullptr; + if (Ty.getAddressSpace() == LangAS::opencl_local || + D.hasAttr() || D.hasAttr()) + llvm_unreachable("OpenCL & CUDA are NYI"); + else + Init = builder.getZeroInitAttr(getTypes().ConvertType(Ty)); + + mlir::cir::GlobalOp GV = builder.createVersionedGlobal( + getModule(), getLoc(D.getLocation()), Name, LTy, false, Linkage); + // TODO(cir): infer visibility from linkage in global op builder. + GV.setVisibility(getMLIRVisibilityFromCIRLinkage(Linkage)); + GV.setInitialValueAttr(Init); + GV.setAlignment(getASTContext().getDeclAlign(&D).getAsAlign().value()); + + if (supportsCOMDAT() && GV.isWeakForLinker()) + llvm_unreachable("COMDAT globals are NYI"); + + if (D.getTLSKind()) + llvm_unreachable("TLS mode is NYI"); + + setGVProperties(GV, &D); + + // Make sure the result is of the correct type. + assert(!UnimplementedFeature::addressSpace()); + + // Ensure that the static local gets initialized by making sure the parent + // function gets emitted eventually. + const Decl *DC = cast(D.getDeclContext()); + + // We can't name blocks or captured statements directly, so try to emit their + // parents. + if (isa(DC) || isa(DC)) { + DC = DC->getNonClosureContext(); + // FIXME: Ensure that global blocks get emitted. + if (!DC) + llvm_unreachable("address space is NYI"); + } + + GlobalDecl GD; + if (const auto *CD = dyn_cast(DC)) + llvm_unreachable("C++ constructors static var context is NYI"); + else if (const auto *DD = dyn_cast(DC)) + llvm_unreachable("C++ destructors static var context is NYI"); + else if (const auto *FD = dyn_cast(DC)) + GD = GlobalDecl(FD); + else { + // Don't do anything for Obj-C method decls or global closures. We should + // never defer them. + assert(isa(DC) && "unexpected parent code decl"); + } + if (GD.getDecl() && UnimplementedFeature::openMP()) { + // Disable emission of the parent function for the OpenMP device codegen. + llvm_unreachable("OpenMP is NYI"); + } + + return GV; +} + +/// Add the initializer for 'D' to the global variable that has already been +/// created for it. If the initializer has a different type than GV does, this +/// may free GV and return a different one. Otherwise it just returns GV. +mlir::cir::GlobalOp +CIRGenFunction::addInitializerToStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalOp GV) { + ConstantEmitter emitter(*this); + mlir::TypedAttr Init = + emitter.tryEmitForInitializer(D).dyn_cast(); + assert(Init && "Expected typed attribute"); + + // If constant emission failed, then this should be a C++ static + // initializer. + if (!Init) { + if (!getLangOpts().CPlusPlus) + CGM.ErrorUnsupported(D.getInit(), "constant l-value expression"); + else if (D.hasFlexibleArrayInit(getContext())) + CGM.ErrorUnsupported(D.getInit(), "flexible array initializer"); + else { + // Since we have a static initializer, this global variable can't + // be constant. + GV.setConstant(false); + llvm_unreachable("C++ guarded init it NYI"); + } + return GV; + } + +#ifndef NDEBUG + CharUnits VarSize = CGM.getASTContext().getTypeSizeInChars(D.getType()) + + D.getFlexibleArrayInitChars(getContext()); + CharUnits CstSize = CharUnits::fromQuantity( + CGM.getDataLayout().getTypeAllocSize(Init.getType())); + assert(VarSize == CstSize && "Emitted constant has unexpected size"); +#endif + + // The initializer may differ in type from the global. Rewrite + // the global to match the initializer. (We have to do this + // because some types, like unions, can't be completely represented + // in the LLVM type system.) + if (GV.getSymType() != Init.getType()) { + llvm_unreachable("static decl initializer type mismatch is NYI"); + } + + bool NeedsDtor = + D.needsDestruction(getContext()) == QualType::DK_cxx_destructor; + + GV.setConstant( + CGM.isTypeConstant(D.getType(), /*ExcludeCtor=*/true, !NeedsDtor)); + GV.setInitialValueAttr(Init); + + emitter.finalize(GV); + + if (NeedsDtor) { + // We have a constant initializer, but a nontrivial destructor. We still + // need to perform a guarded "initialization" in order to register the + // destructor. + llvm_unreachable("C++ guarded init is NYI"); + } + + return GV; +} + +void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalLinkageKind Linkage) { + // Check to see if we already have a global variable for this + // declaration. This can happen when double-emitting function + // bodies, e.g. with complete and base constructors. + auto globalOp = CGM.getOrCreateStaticVarDecl(D, Linkage); + // TODO(cir): we should have a way to represent global ops as values without + // having to emit a get global op. Sometimes these emissions are not used. + auto addr = getBuilder().createGetGlobal(globalOp); + CharUnits alignment = getContext().getDeclAlign(&D); + + // Store into LocalDeclMap before generating initializer to handle + // circular references. + mlir::Type elemTy = getTypes().convertTypeForMem(D.getType()); + setAddrOfLocalVar(&D, Address(addr, elemTy, alignment)); + + // We can't have a VLA here, but we can have a pointer to a VLA, + // even though that doesn't really make any sense. + // Make sure to evaluate VLA bounds now so that we have them for later. + if (D.getType()->isVariablyModifiedType()) + llvm_unreachable("VLAs are NYI"); + + // Save the type in case adding the initializer forces a type change. + mlir::Type expectedType = addr.getType(); + + auto var = globalOp; + + // CUDA's local and local static __shared__ variables should not + // have any non-empty initializers. This is ensured by Sema. + // Whatever initializer such variable may have when it gets here is + // a no-op and should not be emitted. + bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice && + D.hasAttr(); + // If this value has an initializer, emit it. + if (D.getInit() && !isCudaSharedVar) + var = addInitializerToStaticVarDecl(D, var); + + var.setAlignment(alignment.getAsAlign().value()); + + if (D.hasAttr()) + llvm_unreachable("Global annotations are NYI"); + + if (auto *SA = D.getAttr()) + llvm_unreachable("CIR global BSS section attribute is NYI"); + if (auto *SA = D.getAttr()) + llvm_unreachable("CIR global Data section attribute is NYI"); + if (auto *SA = D.getAttr()) + llvm_unreachable("CIR global Rodata section attribute is NYI"); + if (auto *SA = D.getAttr()) + llvm_unreachable("CIR global Relro section attribute is NYI"); + + if (const SectionAttr *SA = D.getAttr()) + llvm_unreachable("CIR global object file section attribute is NYI"); + + if (D.hasAttr()) + llvm_unreachable("llvm.used metadata is NYI"); + else if (D.hasAttr()) + llvm_unreachable("llvm.compiler.used metadata is NYI"); + + // We may have to cast the constant because of the initializer + // mismatch above. + // + // FIXME: It is really dangerous to store this in the map; if anyone + // RAUW's the GV uses of this constant will be invalid. + // TODO(cir): its suppose to be possible that the initializer does not match + // the static var type. When this happens, there should be a cast here. + assert(var.getSymType() != expectedType && + "static var init type mismatch is NYI"); + CGM.setStaticLocalDeclAddress(&D, var); + + assert(!UnimplementedFeature::reportGlobalToASan()); + + // Emit global variable debug descriptor for static vars. + auto *DI = getDebugInfo(); + if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) { + llvm_unreachable("Debug info is NYI"); + } +} + void CIRGenFunction::buildNullabilityCheck(LValue LHS, mlir::Value RHS, SourceLocation Loc) { if (!SanOpts.has(SanitizerKind::NullabilityAssign)) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 42c5d46f8513..aaedaa6d1a24 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -41,7 +41,7 @@ static mlir::cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, const auto *FD = cast(GD.getDecl()); if (FD->hasAttr()) { - mlir::Operation* aliasee = CGM.getWeakRefReference(FD); + mlir::Operation *aliasee = CGM.getWeakRefReference(FD); return dyn_cast(aliasee); } @@ -637,11 +637,18 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { // Otherwise, it might be static local we haven't emitted yet for some // reason; most likely, because it's in an outer function. else if (VD->isStaticLocal()) { - llvm_unreachable("NYI"); + mlir::cir::GlobalOp var = CGM.getOrCreateStaticVarDecl( + *VD, CGM.getCIRLinkageVarDefinition(VD, /*IsConstant=*/false)); + addr = Address(builder.createGetGlobal(var), convertType(VD->getType()), + getContext().getDeclAlign(VD)); } else { llvm_unreachable("DeclRefExpr for decl not entered in LocalDeclMap?"); } + // Handle threadlocal function locals. + if (VD->getTLSKind() != VarDecl::TLS_None) + llvm_unreachable("thread-local storage is NYI"); + // Check for OpenMP threadprivate variables. if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && VD->hasAttr()) { @@ -661,25 +668,30 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { VD->getType(), AlignmentSource::Decl) : makeAddrLValue(addr, T, AlignmentSource::Decl); - assert(symbolTable.count(VD) && "should be already mapped"); + // Statics are defined as globals, so they are not include in the function's + // symbol table. + assert((VD->isStaticLocal() || symbolTable.count(VD)) && + "non-static locals should be already mapped"); bool isLocalStorage = VD->hasLocalStorage(); bool NonGCable = isLocalStorage && !VD->getType()->isReferenceType() && !isBlockByref; - if (NonGCable) { - // TODO: nongcable + if (NonGCable && UnimplementedFeature::setNonGC()) { + llvm_unreachable("garbage collection is NYI"); } bool isImpreciseLifetime = (isLocalStorage && !VD->hasAttr()); - if (isImpreciseLifetime) - ; // TODO: LV.setARCPreciseLifetime - // TODO: setObjCGCLValueClass(getContext(), E, LV); + if (isImpreciseLifetime && UnimplementedFeature::ARC()) + llvm_unreachable("imprecise lifetime is NYI"); + assert(!UnimplementedFeature::setObjCGCLValueClass()); - mlir::Value V = symbolTable.lookup(VD); - assert(V && "Name lookup must succeed"); + // Statics are defined as globals, so they are not include in the function's + // symbol table. + assert((VD->isStaticLocal() || symbolTable.lookup(VD)) && + "Name lookup must succeed for non-static local variables"); return LV; } @@ -1601,7 +1613,7 @@ static Address createReferenceTemporary(CIRGenFunction &CGF, QualType Ty = Inner->getType(); if (CGF.CGM.getCodeGenOpts().MergeAllConstants && (Ty->isArrayType() || Ty->isRecordType()) && - CGF.CGM.isTypeConstant(Ty, true)) + CGF.CGM.isTypeConstant(Ty, /*ExcludeCtor=*/true, /*ExcludeDtor=*/false)) assert(0 && "NYI"); return CGF.CreateMemTemp(Ty, CGF.getLoc(M->getSourceRange()), CGF.getCounterRefTmpAsString(), Alloca); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 25c4b52c07a0..dd50ee2ede30 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1270,6 +1270,12 @@ class CIRGenFunction : public CIRGenTypeCache { /// inside a function, including static vars etc. void buildVarDecl(const clang::VarDecl &D); + mlir::cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalOp GV); + + void buildStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalLinkageKind Linkage); + /// Perform the usual unary conversions on the specified /// expression and compare the result against zero, returning an Int1Ty value. mlir::Value evaluateExprAsBool(const clang::Expr *E); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 32c92bc96929..39e4bd2601fb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -196,7 +196,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, CIRGenModule::~CIRGenModule() {} -bool CIRGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) { +bool CIRGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor, bool ExcludeDtor) { if (!Ty.isConstant(astCtx) && !Ty->isReferenceType()) return false; @@ -204,7 +204,7 @@ bool CIRGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) { if (const CXXRecordDecl *Record = astCtx.getBaseElementType(Ty)->getAsCXXRecordDecl()) return ExcludeCtor && !Record->hasMutableFields() && - Record->hasTrivialDestructor(); + (Record->hasTrivialDestructor() || ExcludeDtor); } return true; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 1afa4c117d56..709de7753e32 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -27,6 +27,7 @@ #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/ADT/ScopedHashTable.h" @@ -172,6 +173,7 @@ class CIRGenModule : public CIRGenTypeCache { /// Tell the consumer that this variable has been instantiated. void HandleCXXStaticMemberVarInstantiation(VarDecl *VD); + llvm::DenseMap StaticLocalDeclMap; llvm::DenseMap Globals; mlir::Operation *getGlobalValue(StringRef Ref); mlir::Value getGlobalValue(const clang::Decl *D); @@ -183,6 +185,18 @@ class CIRGenModule : public CIRGenTypeCache { const VarDecl *D, ForDefinition_t IsForDefinition = NotForDefinition); + mlir::cir::GlobalOp getStaticLocalDeclAddress(const VarDecl *D) { + return StaticLocalDeclMap[D]; + } + + void setStaticLocalDeclAddress(const VarDecl *D, mlir::cir::GlobalOp C) { + StaticLocalDeclMap[D] = C; + } + + mlir::cir::GlobalOp + getOrCreateStaticVarDecl(const VarDecl &D, + mlir::cir::GlobalLinkageKind Linkage); + mlir::cir::GlobalOp buildGlobal(const VarDecl *D, mlir::Type Ty, ForDefinition_t IsForDefinition); @@ -318,7 +332,7 @@ class CIRGenModule : public CIRGenTypeCache { /// FIXME: in LLVM codegen path this is part of CGM, which doesn't seem /// like necessary, since (1) it doesn't use CGM at all and (2) is AST type /// query specific. - bool isTypeConstant(clang::QualType Ty, bool ExcludeCtor); + bool isTypeConstant(clang::QualType Ty, bool ExcludeCtor, bool ExcludeDtor); /// FIXME: this could likely be a common helper and not necessarily related /// with codegen. @@ -529,7 +543,7 @@ class CIRGenModule : public CIRGenTypeCache { mlir::cir::FuncOp NewFn); void setExtraAttributesForFunc(mlir::cir::FuncOp f, - const clang::FunctionDecl *FD); + const clang::FunctionDecl *FD); // TODO: CodeGen also passes an AttributeList here. We'll have to match that // in CIR diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index accfbc63297f..8ff7cd665d6b 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -76,6 +76,9 @@ struct UnimplementedFeature { // Data layout static bool dataLayoutGetIndexTypeSizeInBits() { return false; } + // References related stuff + static bool ARC() { return false; } // Automatic reference counting + // Clang early optimizations or things defered to LLVM lowering. static bool shouldUseBZeroPlusStoresToInitialize() { return false; } static bool shouldUseMemSetToInitialize() { return false; } diff --git a/clang/test/CIR/CodeGen/static-vars.c b/clang/test/CIR/CodeGen/static-vars.c new file mode 100644 index 000000000000..55d6997ca5df --- /dev/null +++ b/clang/test/CIR/CodeGen/static-vars.c @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void func1(void) { + // Should lower default-initialized static vars. + static int i; + // CHECK-DAG: cir.global "private" internal @func1.i = #cir.int<0> : !s32i + + // Should lower constant-initialized static vars. + static int j = 1; + // CHECK-DAG: cir.global "private" internal @func1.j = #cir.int<1> : !s32i + + // Should properly shadow static vars in nested scopes. + { + static int j = 2; + // CHECK-DAG: cir.global "private" internal @func1.j.1 = #cir.int<2> : !s32i + } + { + static int j = 3; + // CHECK-DAG: cir.global "private" internal @func1.j.2 = #cir.int<3> : !s32i + } + + // Should lower basic static vars arithmetics. + j++; + // CHECK-DAG: %[[#V2:]] = cir.get_global @func1.j : cir.ptr + // CHECK-DAG: %[[#V3:]] = cir.load %[[#V2]] : cir.ptr , !s32i + // CHECK-DAG: %[[#V4:]] = cir.unary(inc, %[[#V3]]) : !s32i, !s32i + // CHECK-DAG: cir.store %[[#V4]], %[[#V2]] : !s32i, cir.ptr +} + +// Should shadow static vars on different functions. +void func2(void) { + static char i; + // CHECK-DAG: cir.global "private" internal @func2.i = #cir.int<0> : !s8i + static float j; + // CHECK-DAG: cir.global "private" internal @func2.j = 0.000000e+00 : f32 +} diff --git a/clang/test/CIR/CodeGen/static-vars.cpp b/clang/test/CIR/CodeGen/static-vars.cpp new file mode 100644 index 000000000000..7acc8c1b70f3 --- /dev/null +++ b/clang/test/CIR/CodeGen/static-vars.cpp @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void func1(void) { + // Should lower default-initialized static vars. + static int i; + // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1i = #cir.int<0> : !s32i + + // Should lower constant-initialized static vars. + static int j = 1; + // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1j = #cir.int<1> : !s32i + + // Should properly shadow static vars in nested scopes. + { + static int j = 2; + // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1j_0 = #cir.int<2> : !s32i + } + { + static int j = 3; + // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1j_1 = #cir.int<3> : !s32i + } + + // Should lower basic static vars arithmetics. + j++; + // CHECK-DAG: %[[#V2:]] = cir.get_global @_ZZ5func1vE1j : cir.ptr + // CHECK-DAG: %[[#V3:]] = cir.load %[[#V2]] : cir.ptr , !s32i + // CHECK-DAG: %[[#V4:]] = cir.unary(inc, %[[#V3]]) : !s32i, !s32i + // CHECK-DAG: cir.store %[[#V4]], %[[#V2]] : !s32i, cir.ptr +} + +// Should shadow static vars on different functions. +void func2(void) { + static char i; + // CHECK-DAG: cir.global "private" internal @_ZZ5func2vE1i = #cir.int<0> : !s8i + static float j; + // CHECK-DAG: cir.global "private" internal @_ZZ5func2vE1j = 0.000000e+00 : f32 +} From fecfadd291df238e89d8bb30b183ff72dca6358f Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 2 Aug 2023 16:59:35 -0300 Subject: [PATCH 1124/2301] [CIR][Lowering][NFC] Cleanup global ops lowering Remove boilerplate code for replacing a CIR global op with a region initialized LLVM global op and mark variables as constant whenever possible. ghstack-source-id: dcf7ff7183426700a780f08cc2e29268b10a50c5 Pull Request resolved: https://github.com/llvm/clangir/pull/199 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 39 +++++++++++-------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 4e513983b063..6783dc899777 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1008,16 +1008,29 @@ class CIRGlobalOpLowering public: using OpConversionPattern::OpConversionPattern; + /// Replace CIR global with a region initialized LLVM global and update + /// insertion point to the end of the initializer block. + inline void setupRegionInitializedLLVMGlobalOp( + mlir::cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const { + const auto llvmType = getTypeConverter()->convertType(op.getSymType()); + auto newGlobalOp = rewriter.replaceOpWithNewOp( + op, llvmType, op.getConstant(), convertLinkage(op.getLinkage()), + op.getSymName(), nullptr); + newGlobalOp.getRegion().push_back(new mlir::Block()); + rewriter.setInsertionPointToEnd(newGlobalOp.getInitializerBlock()); + } + mlir::LogicalResult matchAndRewrite(mlir::cir::GlobalOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // Fetch required values to create LLVM op. - auto llvmType = getTypeConverter()->convertType(op.getSymType()); - auto isConst = op.getConstant(); - auto linkage = convertLinkage(op.getLinkage()); - auto symbol = op.getSymName(); - auto init = op.getInitialValue(); + const auto llvmType = getTypeConverter()->convertType(op.getSymType()); + const auto isConst = op.getConstant(); + const auto linkage = convertLinkage(op.getLinkage()); + const auto symbol = op.getSymName(); + const auto loc = op.getLoc(); + std::optional init = op.getInitialValue(); // Check for missing funcionalities. if (!init.has_value()) { @@ -1052,13 +1065,7 @@ class CIRGlobalOpLowering } // Initializer is a global: load global value in initializer block. else if (auto attr = init.value().dyn_cast()) { - auto newGlobalOp = rewriter.replaceOpWithNewOp( - op, llvmType, isConst, linkage, symbol, mlir::Attribute()); - mlir::OpBuilder::InsertionGuard guard(rewriter); - - // Create initializer block. - auto *newBlock = new mlir::Block(); - newGlobalOp.getRegion().push_back(newBlock); + setupRegionInitializedLLVMGlobalOp(op, rewriter); // Fetch global used as initializer. auto sourceSymbol = @@ -1066,16 +1073,14 @@ class CIRGlobalOpLowering op->getParentOfType(), attr.getValue())); // Load and return the initializer value. - rewriter.setInsertionPointToEnd(newBlock); auto addressOfOp = rewriter.create( - op->getLoc(), mlir::LLVM::LLVMPointerType::get(getContext()), + loc, mlir::LLVM::LLVMPointerType::get(getContext()), sourceSymbol.getSymName()); llvm::SmallVector offset{0}; auto gepOp = rewriter.create( - op->getLoc(), llvmType, sourceSymbol.getType(), + loc, llvmType, sourceSymbol.getType(), addressOfOp.getResult(), offset); - rewriter.create(op->getLoc(), gepOp.getResult()); - + rewriter.create(loc, gepOp.getResult()); return mlir::success(); } else if (isa(init.value())) { // TODO(cir): once LLVM's dialect has a proper zeroinitializer attribute From 961e98100afb5d67da1b028b5d1972ce67d4542b Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 2 Aug 2023 16:59:36 -0300 Subject: [PATCH 1125/2301] [CIR][Bugfix] Fix #cir.const_struct parsing At its core, this patch fixes an issue where the ConstStruct parser would infer its type from the list of elements and override its explicit attribute type that had already been parsed. This caused type mismatches since the name of the StructType would be dropped. Since `cir.typeinfo` depended on this broken parsing method to work, it was also fixed to use a `ArrayAttr` instead of a `cir.const_struct` to store its values. To simplify parsing and printing struct member on both `cir.typeinfo` and `cir.const_struct`, the custom `ConstStructMembers` parser/printer was renamed to `StructMembers` and is now used on both attributes. It was also refactored to patch malformed spacing between members and to simplify the code. ghstack-source-id: 1c913c6c00b2826fd0dac3dd18e7cb56bfe2e737 Pull Request resolved: https://github.com/llvm/clangir/pull/200 --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 12 ++-- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 +- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 60 +++++++------------ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 8 +-- clang/test/CIR/CodeGen/agg-init.cpp | 2 +- clang/test/CIR/CodeGen/struct.c | 2 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- clang/test/CIR/IR/global.cir | 9 +-- clang/test/CIR/IR/invalid.cir | 4 +- clang/test/CIR/IR/struct.cir | 20 ++++--- 10 files changed, 55 insertions(+), 66 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index bdc6cd276f48..726bcfc3cc2a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -175,9 +175,7 @@ def ConstStructAttr : CIR_Attr<"ConstStruct", "const_struct", ]; let assemblyFormat = [{ - `<` - custom($type, $members) - `>` + `<` custom($members) `>` }]; let genVerifyDecl = 1; @@ -307,12 +305,12 @@ def TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> { }]; let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "ConstStructAttr":$typeinfo_data); + "mlir::ArrayAttr":$data); let builders = [ AttrBuilderWithInferredContext<(ins "Type":$type, - "ConstStructAttr":$typeinfo_data), [{ - return $_get(type.getContext(), type, typeinfo_data); + "mlir::ArrayAttr":$data), [{ + return $_get(type.getContext(), type, data); }]> ]; @@ -320,7 +318,7 @@ def TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> { // element type. let genVerifyDecl = 1; let assemblyFormat = [{ - `<` $typeinfo_data `>` + `<` custom($data) `>` }]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index e8b52a4ebb21..2865c6243be6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -149,7 +149,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::TypeInfoAttr getTypeInfo(mlir::ArrayAttr fieldsAttr) { auto anonStruct = getAnonConstStruct(fieldsAttr); - return mlir::cir::TypeInfoAttr::get(anonStruct.getType(), anonStruct); + return mlir::cir::TypeInfoAttr::get(anonStruct.getType(), fieldsAttr); } mlir::TypedAttr getZeroInitAttr(mlir::Type ty) { diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index c0d6f06a9554..538653c60988 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -23,6 +23,7 @@ #include "mlir/IR/Location.h" #include "mlir/IR/OpImplementation.h" #include "mlir/Support/LLVM.h" +#include "mlir/Support/LogicalResult.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/TypeSwitch.h" @@ -30,10 +31,9 @@ // ClangIR holds back AST references when available. #include "clang/AST/Decl.h" -static void printConstStructMembers(mlir::AsmPrinter &p, mlir::Type type, +static void printStructMembers(mlir::AsmPrinter &p, mlir::ArrayAttr members); -static mlir::ParseResult parseConstStructMembers(::mlir::AsmParser &parser, - mlir::Type &type, +static mlir::ParseResult parseStructMembers(::mlir::AsmParser &parser, mlir::ArrayAttr &members); #define GET_ATTRDEF_CLASSES @@ -64,47 +64,31 @@ void CIRDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const { llvm_unreachable("unexpected CIR type kind"); } -static void printConstStructMembers(mlir::AsmPrinter &p, mlir::Type type, +static void printStructMembers(mlir::AsmPrinter &printer, mlir::ArrayAttr members) { - p << "{"; - unsigned i = 0, e = members.size(); - while (i < e) { - p << members[i]; - if (e > 0 && i < e - 1) - p << ","; - i++; - } - p << "}"; + printer << '{'; + llvm::interleaveComma(members, printer); + printer << '}'; } -static ParseResult parseConstStructMembers(::mlir::AsmParser &parser, - mlir::Type &type, +static ParseResult parseStructMembers(mlir::AsmParser &parser, mlir::ArrayAttr &members) { SmallVector elts; - SmallVector tys; - if (parser - .parseCommaSeparatedList( - AsmParser::Delimiter::Braces, - [&]() { - Attribute attr; - if (parser.parseAttribute(attr).succeeded()) { - elts.push_back(attr); - if (auto tyAttr = attr.dyn_cast()) { - tys.push_back(tyAttr.getType()); - return success(); - } - parser.emitError(parser.getCurrentLocation(), - "expected a typed attribute"); - } - return failure(); - }) - .failed()) - return failure(); - auto *ctx = parser.getContext(); - members = mlir::ArrayAttr::get(ctx, elts); - type = mlir::cir::StructType::get(ctx, tys, "", /*body=*/true); - return success(); + auto delimiter = AsmParser::Delimiter::Braces; + auto result = parser.parseCommaSeparatedList(delimiter, [&]() { + mlir::TypedAttr attr; + if (parser.parseAttribute(attr).failed()) + return mlir::failure(); + elts.push_back(attr); + return mlir::success(); + }); + + if (result.failed()) + return mlir::failure(); + + members = mlir::ArrayAttr::get(parser.getContext(), elts); + return mlir::success(); } LogicalResult ConstStructAttr::verify( diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d5f024eb5e22..d204d1eb3922 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -18,6 +18,7 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Diagnostics.h" #include "mlir/IR/DialectImplementation.h" @@ -2072,14 +2073,13 @@ LogicalResult ASTRecordDeclAttr::verify( LogicalResult TypeInfoAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ::mlir::Type type, ConstStructAttr typeinfoData) { + ::mlir::Type type, ::mlir::ArrayAttr typeinfoData) { - if (mlir::cir::ConstStructAttr::verify(emitError, type, - typeinfoData.getMembers()) + if (mlir::cir::ConstStructAttr::verify(emitError, type, typeinfoData) .failed()) return failure(); - for (auto &member : typeinfoData.getMembers()) { + for (auto &member : typeinfoData) { auto gview = member.dyn_cast_or_null(); if (gview) continue; diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index f3709df97de5..486bced2fc93 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -66,7 +66,7 @@ void yo() { // CHECK: cir.func @_Z2yov() // CHECK: %0 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} -// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i,#cir.null : !cir.ptr,#cir.int<0> : !u64i}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 +// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.null : !cir.ptr, #cir.int<0> : !u64i}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 // CHECK: cir.store %2, %0 : !ty_22struct2EYo22, cir.ptr // CHECK: %3 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "type"}> : (!cir.ptr) -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000066001> : !u32i) : !u32i diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 0ca48041e29b..9292402f7bae 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -30,7 +30,7 @@ void shouldConstInitStructs(void) { // CHECK: cir.func @shouldConstInitStructs struct Foo f = {1, 2, {3, 4}}; // CHECK: %[[#V0:]] = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} - // CHECK: %[[#V1:]] = cir.const(#cir.const_struct<{#cir.int<1> : !s32i,#cir.int<2> : !s8i,#cir.const_struct<{#cir.int<3> : !s32i,#cir.int<4> : !s8i}> : !ty_22struct2EBar22}> : !ty_22struct2EFoo22) : !ty_22struct2EFoo22 + // CHECK: %[[#V1:]] = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_22struct2EBar22}> : !ty_22struct2EFoo22) : !ty_22struct2EFoo22 // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22struct2EFoo22, cir.ptr } diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index adcdb3c0807e..5e48e1c1a9d0 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -86,7 +86,7 @@ class B : public A // CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr // typeinfo for B -// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<<{#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr,#cir.global_view<@_ZTS1B> : !cir.ptr,#cir.global_view<@_ZTI1A> : !cir.ptr}>> : ![[TypeInfoB]] +// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr}> : ![[TypeInfoB]] // Checks for dtors in dtors.cpp diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index b5b9af61174a..6eeb940b9e17 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -1,4 +1,5 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s !s8i = !cir.int !s32i = !cir.int !s64i = !cir.int @@ -6,7 +7,7 @@ module { cir.global external @a = #cir.int<3> : !s32i cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i] : !cir.array> cir.global external @b = #cir.const_array<"example\00" : !cir.array> - cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.null : !cir.ptr}> : !cir.struct<"", !s8i, i64, !cir.ptr> + cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.null : !cir.ptr}> : !cir.struct<"", !s8i, !s64i, !cir.ptr> cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} cir.global "private" internal @c : !s32i cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} @@ -25,10 +26,10 @@ module { cir.global "private" constant external @type_info_A : !cir.ptr cir.global constant external @type_info_name_B = #cir.const_array<"1B\00" : !cir.array> - cir.global external @type_info_B = #cir.typeinfo<<{ + cir.global external @type_info_B = #cir.typeinfo<{ #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, #cir.global_view<@type_info_name_B> : !cir.ptr, - #cir.global_view<@type_info_A> : !cir.ptr}>> + #cir.global_view<@type_info_A> : !cir.ptr}> : !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr > } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 17d3afcfe0e9..840149589b0c 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -307,8 +307,8 @@ module { // rid of this somehow in favor of clarity? cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr - cir.global external @type_info_B = #cir.typeinfo<<{ // expected-error {{element at index 0 has type '!cir.ptr>' but return type for this element is '!cir.ptr>'}} - #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr}>> + cir.global external @type_info_B = #cir.typeinfo<{ // expected-error {{element at index 0 has type '!cir.ptr>' but return type for this element is '!cir.ptr>'}} + #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr}> : !cir.struct<"", !cir.ptr> } // expected-error {{'cir.global' expected constant attribute to match type}} diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index ae6a8169e4c7..e82dc92ce431 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -2,11 +2,15 @@ !u8i = !cir.int !u16i = !cir.int +!s32i = !cir.int !u32i = !cir.int !ty_2222 = !cir.struct<"", !cir.array x 5>> !ty_22221 = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> !ty_22class2EA22 = !cir.struct<"class.A", incomplete, #cir.recdecl.ast> +// CHECK: !ty_22i22 = !cir.struct<"i", incomplete> +// CHECK: !ty_22S22 = !cir.struct<"S", !u8i, !u16i, !u32i> +!ty_22struct2ES22 = !cir.struct<"struct.S", !s32i, !s32i> module { cir.func @structs() { @@ -14,12 +18,14 @@ module { %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["i", init] cir.return } -} -// CHECK: !ty_22i22 = !cir.struct<"i", incomplete> -// CHECK: !ty_22S22 = !cir.struct<"S", !u8i, !u16i, !u32i> +// CHECK: cir.func @structs() { +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["i", init] -// CHECK-NEXT: module { -// CHECK-NEXT: cir.func @structs() { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["i", init] + cir.func @shouldSuccessfullyParseConstStructAttrs() { + %0 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22struct2ES22) : !ty_22struct2ES22 + // CHECK: cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22struct2ES22) : !ty_22struct2ES22 + cir.return + } +} From d51cec0383b58fe5780ef4efa6a16e3bebc54f7b Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 2 Aug 2023 16:59:36 -0300 Subject: [PATCH 1126/2301] [CIR][Lowering] Partially lower global #cir.const_struct initializers Adds support for CIR globals with basic struct initializers. This patch tackes only primitive, pointers, and simple nested structs. Arrays, char pointers, and other more complex types are not supported yet. ghstack-source-id: a077f04ca9ddb2649bcba0e986280ab19eaabe79 Pull Request resolved: https://github.com/llvm/clangir/pull/201 --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 1 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 107 +++++++++++++++++- clang/test/CIR/CodeGen/struct.c | 30 ++++- clang/test/CIR/Lowering/struct.cir | 28 +++++ 4 files changed, 157 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index b6c290f18da9..b4f24b09b9b7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -279,6 +279,7 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( llvm_unreachable("NYI"); } + // TODO(cir): emit a #cir.zero if all elements are null values. auto &builder = CGM.getBuilder(); return builder.getAnonConstStruct( mlir::ArrayAttr::get(builder.getContext(), diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6783dc899777..62c7065f642c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -54,8 +54,11 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/APInt.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/IR/DebugInfoMetadata.h" +#include "llvm/IR/DerivedTypes.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include @@ -67,6 +70,85 @@ using namespace llvm; namespace cir { namespace direct { +//===----------------------------------------------------------------------===// +// Visitors for Lowering CIR Const Attributes +//===----------------------------------------------------------------------===// + +/// Switches on the type of attribute and calls the appropriate conversion. +inline mlir::Value +lowerCirAttrAsValue(mlir::Attribute attr, mlir::Location loc, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter); + +/// IntAttr visitor. +inline mlir::Value +lowerCirAttrAsValue(mlir::cir::IntAttr intAttr, mlir::Location loc, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + return rewriter.create( + loc, converter->convertType(intAttr.getType()), intAttr.getValue()); +} + +/// NullAttr visitor. +inline mlir::Value +lowerCirAttrAsValue(mlir::cir::NullAttr nullAttr, mlir::Location loc, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + return rewriter.create( + loc, converter->convertType(nullAttr.getType())); +} + +/// FloatAttr visitor. +inline mlir::Value +lowerCirAttrAsValue(mlir::FloatAttr fltAttr, mlir::Location loc, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + return rewriter.create( + loc, converter->convertType(fltAttr.getType()), fltAttr.getValue()); +} + +/// ConstStruct visitor. +mlir::Value lowerCirAttrAsValue(mlir::cir::ConstStructAttr constStruct, + mlir::Location loc, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto llvmTy = converter->convertType(constStruct.getType()); + mlir::Value result = rewriter.create(loc, llvmTy); + + // Iteratively lower each constant element of the struct. + for (auto [idx, elt] : llvm::enumerate(constStruct.getMembers())) { + mlir::Value init = lowerCirAttrAsValue(elt, loc, rewriter, converter); + result = rewriter.create(loc, result, init, idx); + } + + return result; +} + +/// Switches on the type of attribute and calls the appropriate conversion. +inline mlir::Value +lowerCirAttrAsValue(mlir::Attribute attr, mlir::Location loc, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + if (const auto intAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(intAttr, loc, rewriter, converter); + if (const auto fltAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(fltAttr, loc, rewriter, converter); + if (const auto nullAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(nullAttr, loc, rewriter, converter); + if (const auto constStruct = attr.dyn_cast()) + return lowerCirAttrAsValue(constStruct, loc, rewriter, converter); + if (const auto constArr = attr.dyn_cast()) + llvm_unreachable("const array attribute is NYI"); + if (const auto zeroAttr = attr.dyn_cast()) + llvm_unreachable("bool attribute is NYI"); + if (const auto zeroAttr = attr.dyn_cast()) + llvm_unreachable("zero attribute is NYI"); + + llvm_unreachable("unhandled attribute type"); +} + +//===----------------------------------------------------------------------===// + mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { using CIR = mlir::cir::GlobalLinkageKind; using LLVM = mlir::LLVM::Linkage; @@ -1093,6 +1175,13 @@ class CIRGlobalOpLowering auto cirZeroAttr = mlir::cir::ZeroAttr::get(getContext(), llvmType); llvmGlobalOp->setAttr("cir.initial_value", cirZeroAttr); return mlir::success(); + } else if (const auto structAttr = + init.value().dyn_cast()) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), lowerCirAttrAsValue(structAttr, op->getLoc(), rewriter, + typeConverter)); + return mlir::success(); } else { op.emitError() << "usupported initializer '" << init.value() << "'"; return mlir::failure(); @@ -1576,10 +1665,20 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { llvm::SmallVector llvmMembers; for (auto ty : type.getMembers()) llvmMembers.push_back(converter.convertType(ty)); - auto llvmStruct = mlir::LLVM::LLVMStructType::getIdentified( - type.getContext(), type.getTypeName()); - if (llvmStruct.setBody(llvmMembers, /*isPacked=*/type.getPacked()).failed()) - llvm_unreachable("Failed to set body of struct"); + + // Struct has a name: lower as an identified struct. + mlir::LLVM::LLVMStructType llvmStruct; + if (type.getTypeName().size() != 0) { + llvmStruct = mlir::LLVM::LLVMStructType::getIdentified( + type.getContext(), type.getTypeName()); + if (llvmStruct.setBody(llvmMembers, /*isPacked=*/type.getPacked()) + .failed()) + llvm_unreachable("Failed to set body of struct"); + } else { // Struct has no name: lower as literal struct. + llvmStruct = mlir::LLVM::LLVMStructType::getLiteral( + type.getContext(), llvmMembers, /*isPacked=*/type.getPacked()); + } + return llvmStruct; }); converter.addConversion([&](mlir::cir::VoidType type) -> mlir::Type { diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 9292402f7bae..4a1db984f239 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -17,10 +17,10 @@ void baz(void) { struct Foo f; } -// CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", !s32i, !s8i> -// CHECK-NEXT: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", !s32i, !s8i, !ty_22struct2EBar22> +// CHECK-DAG: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", !s32i, !s8i> +// CHECK-DAG: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", !s32i, !s8i, !ty_22struct2EBar22> // CHECK-DAG: module {{.*}} { -// CHECK-NEXT: cir.func @baz() + // CHECK: cir.func @baz() // CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.return @@ -34,5 +34,25 @@ void shouldConstInitStructs(void) { // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22struct2EFoo22, cir.ptr } -// Check if global structs are zero-initialized. -// CHECK: cir.global external @bar = #cir.zero : !ty_22struct2EBar22 +// Should zero-initialize uninitialized global structs. +struct S { + int a,b; +} s; +// CHECK-DAG: cir.global external @s = #cir.zero : !ty_22struct2ES22 + +// Should initialize basic global structs. +struct S1 { + int a; + float f; + int *p; +} s1 = {1, .1, 0}; +// CHECK-DAG: cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.null : !cir.ptr}> : !ty_22struct2ES122 + +// Should initialize global nested structs. +struct S2 { + struct S2A { + int a; + } s2a; +} s2 = {{1}}; +// CHECK-DAG: cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES2A22}> : !ty_22struct2ES222 + diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index c68e0c91bb97..d9ec1eb86700 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -4,6 +4,10 @@ !s32i = !cir.int !u8i = !cir.int !ty_22struct2ES22 = !cir.struct<"struct.S", !u8i, !s32i> +!ty_22struct2ES2A22 = !cir.struct<"struct.S2A", !s32i, #cir.recdecl.ast> +!ty_22struct2ES122 = !cir.struct<"struct.S1", !s32i, f32, !cir.ptr, #cir.recdecl.ast> +!ty_22struct2ES222 = !cir.struct<"struct.S2", !ty_22struct2ES2A22, #cir.recdecl.ast> + module { cir.func @test() { %1 = cir.alloca !ty_22struct2ES22, cir.ptr , ["x"] {alignment = 4 : i64} @@ -15,4 +19,28 @@ module { // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 1] : (!llvm.ptr) -> !llvm.ptr cir.return } + + // Should lower basic #cir.const_struct initializer. + cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.null : !cir.ptr}> : !ty_22struct2ES122 + // CHECK: llvm.mlir.global external @s1() {addr_space = 0 : i32} : !llvm.struct<"struct.S1", (i32, f32, ptr)> { + // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %2 = llvm.insertvalue %1, %0[0] : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: %3 = llvm.mlir.constant(1.000000e-01 : f32) : f32 + // CHECK: %4 = llvm.insertvalue %3, %2[1] : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: %5 = llvm.mlir.zero : !llvm.ptr + // CHECK: %6 = llvm.insertvalue %5, %4[2] : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: llvm.return %6 : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: } + + // Should lower nested #cir.const_struct initializer. + cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES2A22}> : !ty_22struct2ES222 + // CHECK: llvm.mlir.global external @s2() {addr_space = 0 : i32} : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> { + // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> + // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S2A", (i32)> + // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S2A", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> + // CHECK: llvm.return %4 : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> + // CHECK: } } From 47350403b611117484f35d7fbbd1b001db7315ab Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 2 Aug 2023 16:59:36 -0300 Subject: [PATCH 1127/2301] [CIR][Lowering] Lower constant arrays of structs Lowers the particular case where an array of structs is constant-initialized. ghstack-source-id: f8389d899e07f73485658d86469d7595a4373901 Pull Request resolved: https://github.com/llvm/clangir/pull/202 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 32 ++++++++++++++++++- clang/test/CIR/CodeGen/struct.c | 5 +++ clang/test/CIR/Lowering/struct.cir | 19 +++++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 62c7065f642c..aef28aacd89f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -124,6 +124,27 @@ mlir::Value lowerCirAttrAsValue(mlir::cir::ConstStructAttr constStruct, return result; } +// ArrayAttr visitor. +mlir::Value lowerCirAttrAsValue(mlir::cir::ConstArrayAttr constArr, + mlir::Location loc, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto llvmTy = converter->convertType(constArr.getType()); + mlir::Value result = rewriter.create(loc, llvmTy); + auto arrayAttr = constArr.getElts().cast(); + auto cirArrayType = constArr.getType().cast(); + assert(cirArrayType.getEltType().isa() && + "Types other than ConstArrayAttr are NYI"); + + // Iteratively lower each constant element of the array. + for (auto [idx, elt] : llvm::enumerate(arrayAttr)) { + mlir::Value init = lowerCirAttrAsValue(elt, loc, rewriter, converter); + result = rewriter.create(loc, result, init, idx); + } + + return result; +} + /// Switches on the type of attribute and calls the appropriate conversion. inline mlir::Value lowerCirAttrAsValue(mlir::Attribute attr, mlir::Location loc, @@ -138,7 +159,7 @@ lowerCirAttrAsValue(mlir::Attribute attr, mlir::Location loc, if (const auto constStruct = attr.dyn_cast()) return lowerCirAttrAsValue(constStruct, loc, rewriter, converter); if (const auto constArr = attr.dyn_cast()) - llvm_unreachable("const array attribute is NYI"); + return lowerCirAttrAsValue(constArr, loc, rewriter, converter); if (const auto zeroAttr = attr.dyn_cast()) llvm_unreachable("bool attribute is NYI"); if (const auto zeroAttr = attr.dyn_cast()) @@ -1125,6 +1146,15 @@ class CIRGlobalOpLowering if (auto attr = constArr.getElts().dyn_cast()) { init = rewriter.getStringAttr(attr.getValue()); } else if (auto attr = constArr.getElts().dyn_cast()) { + auto eltTy = + constArr.getType().cast().getEltType(); + if (eltTy.isa()) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), lowerCirAttrAsValue(constArr, op->getLoc(), + rewriter, typeConverter)); + return mlir::success(); + } if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { op.emitError() << "unsupported lowering for #cir.const_array with " "element type " diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 4a1db984f239..5a8d97bd4b52 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -56,3 +56,8 @@ struct S2 { } s2 = {{1}}; // CHECK-DAG: cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES2A22}> : !ty_22struct2ES222 +// Should initialize global arrays of structs. +struct S3 { + int a; +} s3[3] = {{1}, {2}, {3}}; +// CHECK-DAG: cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22struct2ES322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22struct2ES322]> : !cir.array diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index d9ec1eb86700..e824426cf8e2 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -7,6 +7,7 @@ !ty_22struct2ES2A22 = !cir.struct<"struct.S2A", !s32i, #cir.recdecl.ast> !ty_22struct2ES122 = !cir.struct<"struct.S1", !s32i, f32, !cir.ptr, #cir.recdecl.ast> !ty_22struct2ES222 = !cir.struct<"struct.S2", !ty_22struct2ES2A22, #cir.recdecl.ast> +!ty_22struct2ES322 = !cir.struct<"struct.S3", !s32i, #cir.recdecl.ast> module { cir.func @test() { @@ -43,4 +44,22 @@ module { // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> // CHECK: llvm.return %4 : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> // CHECK: } + + cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22struct2ES322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22struct2ES322]> : !cir.array + // CHECK: llvm.mlir.global external @s3() {addr_space = 0 : i32} : !llvm.array<3 x struct<"struct.S3", (i32)>> { + // CHECK: %0 = llvm.mlir.undef : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S3", (i32)> + // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S3", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: %5 = llvm.mlir.undef : !llvm.struct<"struct.S3", (i32)> + // CHECK: %6 = llvm.mlir.constant(2 : i32) : i32 + // CHECK: %7 = llvm.insertvalue %6, %5[0] : !llvm.struct<"struct.S3", (i32)> + // CHECK: %8 = llvm.insertvalue %7, %4[1] : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: %9 = llvm.mlir.undef : !llvm.struct<"struct.S3", (i32)> + // CHECK: %10 = llvm.mlir.constant(3 : i32) : i32 + // CHECK: %11 = llvm.insertvalue %10, %9[0] : !llvm.struct<"struct.S3", (i32)> + // CHECK: %12 = llvm.insertvalue %11, %8[2] : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: llvm.return %12 : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: } } From 0104ec827bd4a7066b1e140f0bc0f4dd55f69679 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 2 Aug 2023 16:59:37 -0300 Subject: [PATCH 1128/2301] [CIR][Lowering] Partially lower local #cir.const_struct initializers Updates the lowering of cir.const to support #cir.const_struct as a initializer, allowing the initialization of local structs with said attribute. ghstack-source-id: cc6c1378775bd2239c821c357e281bbc8cc3b0a7 Pull Request resolved: https://github.com/llvm/clangir/pull/203 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 11 +++++++++++ clang/test/CIR/Lowering/struct.cir | 16 ++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index aef28aacd89f..71c9a6059e6c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -822,6 +822,17 @@ class CIRConstantLowering return mlir::failure(); } attr = denseAttr.value(); + } else if (const auto structAttr = + op.getValue().dyn_cast()) { + // TODO(cir): this diverges from traditional lowering. Normally the + // initializer would be a global constant that is memcopied. Here we just + // define a local constant with llvm.undef that will be stored into the + // stack. + auto initVal = + lowerCirAttrAsValue(structAttr, op.getLoc(), rewriter, typeConverter); + rewriter.replaceAllUsesWith(op, initVal); + rewriter.eraseOp(op); + return mlir::success(); } else return op.emitError() << "unsupported constant type " << op.getType(); diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index e824426cf8e2..38b9e894b8d0 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -21,6 +21,22 @@ module { cir.return } + cir.func @shouldConstInitLocalStructsWithConstStructAttr() { + %0 = cir.alloca !ty_22struct2ES2A22, cir.ptr , ["s"] {alignment = 4 : i64} + %1 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES2A22) : !ty_22struct2ES2A22 + cir.store %1, %0 : !ty_22struct2ES2A22, cir.ptr + cir.return + } + // CHECK: llvm.func @shouldConstInitLocalStructsWithConstStructAttr() + // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 + // CHECK: %1 = llvm.alloca %0 x !llvm.struct<"struct.S2A", (i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + // CHECK: %2 = llvm.mlir.undef : !llvm.struct<"struct.S2A", (i32)> + // CHECK: %3 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.struct<"struct.S2A", (i32)> + // CHECK: llvm.store %4, %1 : !llvm.struct<"struct.S2A", (i32)>, !llvm.ptr + // CHECK: llvm.return + // CHECK: } + // Should lower basic #cir.const_struct initializer. cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.null : !cir.ptr}> : !ty_22struct2ES122 // CHECK: llvm.mlir.global external @s1() {addr_space = 0 : i32} : !llvm.struct<"struct.S1", (i32, f32, ptr)> { From 042dc36c18cd3e7e17ace707b4c6e9eb650c5561 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 3 Aug 2023 15:59:43 -0400 Subject: [PATCH 1129/2301] [CIR][CIRTidy] Add config options for codegen manipulation --- .../clang-tidy/cir-tidy/CIRASTConsumer.cpp | 16 ++++++++++------ .../clang-tidy/cir-tidy/CIRASTConsumer.h | 2 ++ .../test/cir-tidy/lifetime-basic.cpp | 2 ++ 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp index 0f4107035177..d643ff8e41f5 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp @@ -10,7 +10,6 @@ #include "CIRChecks.h" #include "../utils/OptionsUtils.h" -#include "ClangTidyCheck.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Pass/Pass.h" @@ -26,7 +25,16 @@ namespace tidy { CIRASTConsumer::CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, clang::tidy::ClangTidyContext &Context) - : Context(Context) { + : Context(Context), + OptsView(ClangTidyCheck::OptionsView(cir::checks::LifetimeCheckName, + Context.getOptions().CheckOptions, + &Context)) { + // Setup CIR codegen options via config specified information. + CI.getCodeGenOpts().ClangIRBuildDeferredThreshold = + OptsView.get("CodeGenBuildDeferredThreshold", 500U); + CI.getCodeGenOpts().ClangIRSkipFunctionsFromSystemHeaders = + OptsView.get("CodeGenSkipFunctionsFromSystemHeaders", false); + Gen = std::make_unique(CI.getDiagnostics(), nullptr, CI.getCodeGenOpts()); } @@ -138,10 +146,6 @@ void CIRASTConsumer::HandleTranslationUnit(ASTContext &C) { mlir::PassManager pm(mlirCtx.get()); pm.addPass(mlir::createMergeCleanupsPass()); - clang::tidy::ClangTidyOptions Opts = Context.getOptions(); - ClangTidyCheck::OptionsView OptsView(cir::checks::LifetimeCheckName, - Opts.CheckOptions, &Context); - auto remarks = utils::options::parseStringList(OptsView.get("RemarksList", "")); auto hist = diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h index eb758b09135a..d95114519986 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h @@ -1,4 +1,5 @@ #include "../ClangTidyDiagnosticConsumer.h" +#include "ClangTidyCheck.h" #include "clang/AST/ASTContext.h" #include "clang/CIR/CIRGenerator.h" #include "clang/Frontend/CompilerInstance.h" @@ -19,6 +20,7 @@ class CIRASTConsumer : public ASTConsumer { std::unique_ptr Gen; ASTContext *AstContext{nullptr}; clang::tidy::ClangTidyContext &Context; + clang::tidy::ClangTidyCheck::OptionsView OptsView; }; } // namespace tidy } // namespace cir diff --git a/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp b/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp index f2088c59a27c..7bf684fbad66 100644 --- a/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp +++ b/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp @@ -3,6 +3,8 @@ // RUN: -config='{CheckOptions: \ // RUN: [{key: cir-lifetime-check.RemarksList, value: "all"}, \ // RUN: {key: cir-lifetime-check.HistLimit, value: "1"}, \ +// RUN: {key: cir-lifetime-check.CodeGenBuildDeferredThreshold, value: "500"}, \ +// RUN: {key: cir-lifetime-check.CodeGenSkipFunctionsFromSystemHeaders, value: "false"}, \ // RUN: {key: cir-lifetime-check.HistoryList, value: "invalid;null"}]}' \ // RUN: -- // RUN: FileCheck -input-file=%t.yaml -check-prefix=CHECK-YAML %s From c874194df30b9b800c1b06e37ae67d3ae9592581 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 7 Aug 2023 11:56:53 -0300 Subject: [PATCH 1130/2301] [CIR][CIRGen] Static initialize global addresses Global addresses are constant, so we can initialize them at compile time using CIR's global_view attribute. This patch adds codegen support for the initialization of variables with constant global addresses. Since a builder method was added for global_view, the patch also updates the codegen of global variables to use it wherever possible. ghstack-source-id: 513365c52ac1ca603a81fcf3ff124b5da39f6f14 Pull Request resolved: https://github.com/llvm/clangir/pull/204 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 17 +++++++ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 45 +++++++++++++++---- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 25 ++++------- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 12 +++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 5 +++ clang/test/CIR/CodeGen/globals.cpp | 5 +++ clang/test/CIR/CodeGen/static-vars.c | 7 +++ 7 files changed, 91 insertions(+), 25 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 2865c6243be6..dfc441f3cabc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -102,6 +102,23 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Attribute helpers // ----------------- // + + /// Get constant address of a global variable as an MLIR attribute. + /// This wrapper infers the attribute type through the global op. + mlir::cir::GlobalViewAttr getGlobalViewAttr(mlir::cir::GlobalOp globalOp, + mlir::ArrayAttr indices = {}) { + auto type = getPointerTo(globalOp.getSymType()); + return getGlobalViewAttr(type, globalOp, indices); + } + + /// Get constant address of a global variable as an MLIR attribute. + mlir::cir::GlobalViewAttr getGlobalViewAttr(mlir::cir::PointerType type, + mlir::cir::GlobalOp globalOp, + mlir::ArrayAttr indices = {}) { + auto symbol = mlir::FlatSymbolRefAttr::get(globalOp.getSymNameAttr()); + return mlir::cir::GlobalViewAttr::get(type, symbol, indices); + } + mlir::TypedAttr getZeroAttr(mlir::Type t) { return mlir::cir::ZeroAttr::get(getContext(), t); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index b4f24b09b9b7..87762476449a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -17,6 +17,7 @@ #include "CIRGenModule.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" +#include "mlir/IR/BuiltinAttributes.h" #include "clang/AST/APValue.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" @@ -999,16 +1000,16 @@ namespace { /// A struct which can be used to peephole certain kinds of finalization /// that normally happen during l-value emission. struct ConstantLValue { - using SymbolTy = mlir::SymbolRefAttr; - llvm::PointerUnion Value; + llvm::PointerUnion Value; bool HasOffsetApplied; /*implicit*/ ConstantLValue(mlir::Value value, bool hasOffsetApplied = false) : Value(value), HasOffsetApplied(hasOffsetApplied) {} - /*implicit*/ ConstantLValue(SymbolTy address) : Value(address) {} + /*implicit*/ ConstantLValue(mlir::SymbolRefAttr address) : Value(address) {} ConstantLValue(std::nullptr_t) : ConstantLValue({}, false) {} + ConstantLValue(mlir::Attribute value) : Value(value) {} }; /// A helper class for emitting constant l-values. @@ -1053,10 +1054,13 @@ class ConstantLValueEmitter /// Return the value offset. mlir::Attribute getOffset() { llvm_unreachable("NYI"); } + // TODO(cir): create a proper interface to absctract CIR constant values. + /// Apply the value offset to the given constant. - mlir::Attribute applyOffset(mlir::Attribute C) { + ConstantLValue applyOffset(ConstantLValue &C) { if (!hasNonZeroOffset()) return C; + // TODO(cir): use ptr_stride, or something... llvm_unreachable("NYI"); } @@ -1092,15 +1096,15 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { return {}; // Apply the offset if necessary and not already done. - if (!result.HasOffsetApplied && !value.is()) { - assert(0 && "NYI"); + if (!result.HasOffsetApplied && !value.is()) { + value = applyOffset(result).Value; } // Convert to the appropriate type; this could be an lvalue for // an integer. FIXME: performAddrSpaceCast if (destTy.isa()) { - if (value.is()) - return value.get(); + if (value.is()) + return value.get(); llvm_unreachable("NYI"); } @@ -1124,7 +1128,30 @@ ConstantLValue ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) { // Handle values. if (const ValueDecl *D = base.dyn_cast()) { - assert(0 && "NYI"); + // The constant always points to the canonical declaration. We want to look + // at properties of the most recent declaration at the point of emission. + D = cast(D->getMostRecentDecl()); + + if (D->hasAttr()) + llvm_unreachable("emit pointer base for weakref is NYI"); + + if (auto *FD = dyn_cast(D)) + llvm_unreachable("emit pointer base for fun decl is NYI"); + + if (auto *VD = dyn_cast(D)) { + // We can never refer to a variable with local storage. + if (!VD->hasLocalStorage()) { + if (VD->isFileVarDecl() || VD->hasExternalStorage()) + return CGM.getAddrOfGlobalVarAttr(VD); + + if (VD->isLocalVarDecl()) { + auto linkage = + CGM.getCIRLinkageVarDefinition(VD, /*IsConstant=*/false); + return CGM.getBuilder().getGlobalViewAttr( + CGM.getOrCreateStaticVarDecl(*VD, linkage)); + } + } + } } // Handle typeid(T). diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 3db2cc00b53d..21451cebb3c8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1105,9 +1105,8 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo(mlir::Location loc, if (OldGV && !OldGV.isDeclaration()) { assert(!OldGV.hasAvailableExternallyLinkage() && "available_externally typeinfos not yet implemented"); - return mlir::cir::GlobalViewAttr::get( - CGM.getBuilder().getUInt8PtrTy(), - mlir::FlatSymbolRefAttr::get(OldGV.getSymNameAttr())); + return CGM.getBuilder().getGlobalViewAttr(CGM.getBuilder().getUInt8PtrTy(), + OldGV); } // Check if there is already an external RTTI descriptor for this type. @@ -1275,10 +1274,9 @@ void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, } else { SmallVector offsets{ mlir::cir::IntAttr::get(PtrDiffTy, 2)}; - field = mlir::cir::GlobalViewAttr::get( - builder.getUInt8PtrTy(), - mlir::FlatSymbolRefAttr::get(VTable.getSymNameAttr()), - mlir::ArrayAttr::get(builder.getContext(), offsets)); + auto indices = mlir::ArrayAttr::get(builder.getContext(), offsets); + field = CGM.getBuilder().getGlobalViewAttr(CGM.getBuilder().getUInt8PtrTy(), + VTable, indices); } assert(field && "expected attribute"); @@ -1347,9 +1345,7 @@ CIRGenItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(mlir::Location loc, llvm_unreachable("NYI"); } - return mlir::cir::GlobalViewAttr::get( - builder.getUInt8PtrTy(), - mlir::FlatSymbolRefAttr::get(GV.getSymNameAttr())); + return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), GV); } mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( @@ -1374,9 +1370,8 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( // for global pointers. This is very ARM64-specific. llvm_unreachable("NYI"); } else { - TypeNameField = mlir::cir::GlobalViewAttr::get( - builder.getUInt8PtrTy(), - mlir::FlatSymbolRefAttr::get(TypeName.getSymNameAttr())); + TypeNameField = + builder.getGlobalViewAttr(builder.getUInt8PtrTy(), TypeName); } Fields.push_back(TypeNameField); @@ -1541,9 +1536,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( assert(!UnimplementedFeature::setDSOLocal()); CIRGenModule::setInitializer(GV, init); - return mlir::cir::GlobalViewAttr::get( - builder.getUInt8PtrTy(), - mlir::FlatSymbolRefAttr::get(GV.getSymNameAttr())); + return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), GV);; } mlir::Attribute CIRGenItaniumCXXABI::getAddrOfRTTIDescriptor(mlir::Location loc, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 39e4bd2601fb..ae94489e81bb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -701,6 +701,18 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty, ptrTy, g.getSymName()); } +mlir::cir::GlobalViewAttr +CIRGenModule::getAddrOfGlobalVarAttr(const VarDecl *D, mlir::Type Ty, + ForDefinition_t IsForDefinition) { + assert(D->hasGlobalStorage() && "Not a global variable"); + QualType ASTTy = D->getType(); + if (!Ty) + Ty = getTypes().convertTypeForMem(ASTTy); + + auto globalOp = buildGlobal(D, Ty, IsForDefinition); + return builder.getGlobalViewAttr(builder.getPointerTo(Ty), globalOp); +} + mlir::Operation* CIRGenModule::getWeakRefReference(const ValueDecl *VD) { const AliasAttr *AA = VD->getAttr(); assert(AA && "No alias?"); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 709de7753e32..96f14b433850 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -231,6 +231,11 @@ class CIRGenModule : public CIRGenTypeCache { getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty = {}, ForDefinition_t IsForDefinition = NotForDefinition); + /// Return the mlir::GlobalViewAttr for the address of the given global. + mlir::cir::GlobalViewAttr + getAddrOfGlobalVarAttr(const VarDecl *D, mlir::Type Ty = {}, + ForDefinition_t IsForDefinition = NotForDefinition); + /// Get a reference to the target of VD. mlir::Operation* getWeakRefReference(const ValueDecl *VD); diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 340e68320129..b14cfe46be46 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -115,3 +115,8 @@ int testExternVar(void) { return externVar; } // CHECK: cir.global "private" external @externVar : !s32i // CHECK: cir.func @{{.+}}testExternVar // CHECK: cir.get_global @externVar : cir.ptr + +// Should constant initialize global with constant address. +int var = 1; +int *constAddr = &var; +// CHECK-DAG: cir.global external @constAddr = #cir.global_view<@var> : !cir.ptr diff --git a/clang/test/CIR/CodeGen/static-vars.c b/clang/test/CIR/CodeGen/static-vars.c index 55d6997ca5df..1144b4837d94 100644 --- a/clang/test/CIR/CodeGen/static-vars.c +++ b/clang/test/CIR/CodeGen/static-vars.c @@ -35,3 +35,10 @@ void func2(void) { static float j; // CHECK-DAG: cir.global "private" internal @func2.j = 0.000000e+00 : f32 } + +// Should const initialize static vars with constant addresses. +void func3(void) { + static int var; + static int *constAddr = &var; + // CHECK-DAG: cir.global "private" internal @func3.constAddr = #cir.global_view<@func3.var> : !cir.ptr +} From fccb06a3f50423c5120930699f948ebd4de58469 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 7 Aug 2023 11:56:53 -0300 Subject: [PATCH 1131/2301] [CIR][Lowering] Lower globals with global_view initializer Adds lowering logic for CIR's global_view attributes with no indexes. This is done by converting the global to a region-initialized LLVM global operation, where the region returns the address of the global used in the gloval_view initializer attribute. ghstack-source-id: a9452eddbd516553273461a8187afcebc211e4d3 Pull Request resolved: https://github.com/llvm/clangir/pull/205 --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 9 +++++++++ clang/test/CIR/Lowering/globals.cir | 11 +++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 71c9a6059e6c..b5551776c402 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1223,6 +1223,15 @@ class CIRGlobalOpLowering op->getLoc(), lowerCirAttrAsValue(structAttr, op->getLoc(), rewriter, typeConverter)); return mlir::success(); + } else if (auto attr = init.value().dyn_cast()) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + + // Return the address of the global symbol. + auto elementType = typeConverter->convertType(attr.getType()); + auto addrOfOp = rewriter.create( + op->getLoc(), elementType, attr.getSymbol()); + rewriter.create(op->getLoc(), addrOfOp.getResult()); + return mlir::success(); } else { op.emitError() << "usupported initializer '" << init.value() << "'"; return mlir::failure(); diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 325ef58bf4f1..0a77d721ec5f 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -1,5 +1,7 @@ -// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM !s16i = !cir.int !s32i = !cir.int @@ -26,6 +28,11 @@ module { // MLIR: } // LLVM: @.str = internal constant [8 x i8] c"example\00" // LLVM: @s = global ptr @.str + cir.global external @aPtr = #cir.global_view<@a> : !cir.ptr + // MLIR: llvm.mlir.global external @aPtr() {addr_space = 0 : i32} : !llvm.ptr { + // MLIR: %0 = llvm.mlir.addressof @a : !llvm.ptr + // MLIR: llvm.return %0 : !llvm.ptr + // MLIR: } cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global external @s1 = @".str1": !cir.ptr cir.global external @s2 = @".str": !cir.ptr From acf2f42bcf5f852e9a2fa66d824678c75e6c830d Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 7 Aug 2023 12:04:53 -0300 Subject: [PATCH 1132/2301] [CIR][Bugfix] Fix cir.array getTypeSizeInBits method Constant initialization of static local arrays would fail due to a mismatch between the variable and the initializer type size. This patch fixes the data layout interface implementation for the cir.array type. A complete array in C/C++ should have its type size in bits equal to the size of the array times the size of the element type. ghstack-source-id: 56f3f2918b23309210ad026017bafa37ca03b2d4 Pull Request resolved: https://github.com/llvm/clangir/pull/206 --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 10 +++++++--- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 2 +- clang/test/CIR/CodeGen/static-vars.c | 7 +++++++ 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 87762476449a..72448be3ddc2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -910,11 +910,15 @@ class ConstExprEmitter assert(CGM.getASTContext().hasSameUnqualifiedType(Ty, Arg->getType()) && "argument to copy ctor is of wrong type"); - return Visit(Arg, Ty); + // Look through the temporary; it's just converting the value to an lvalue + // to pass it to the constructor. + if (auto *MTE = dyn_cast(Arg)) + return Visit(MTE->getSubExpr(), Ty); + // Don't try to support arbitrary lvalue-to-rvalue conversions for now. + return nullptr; } - assert(0 && "not implemented"); - return {}; + llvm_unreachable("NYI"); } mlir::Attribute VisitStringLiteral(StringLiteral *E, QualType T) { diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index a0b8a0c20a61..30ce9fc2151f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -202,7 +202,7 @@ uint64_t PointerType::getPreferredAlignment( llvm::TypeSize ArrayType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { - return dataLayout.getTypeSizeInBits(getEltType()); + return getSize() * dataLayout.getTypeSizeInBits(getEltType()); } uint64_t diff --git a/clang/test/CIR/CodeGen/static-vars.c b/clang/test/CIR/CodeGen/static-vars.c index 1144b4837d94..26f94bd62e64 100644 --- a/clang/test/CIR/CodeGen/static-vars.c +++ b/clang/test/CIR/CodeGen/static-vars.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * void func1(void) { // Should lower default-initialized static vars. @@ -42,3 +43,9 @@ void func3(void) { static int *constAddr = &var; // CHECK-DAG: cir.global "private" internal @func3.constAddr = #cir.global_view<@func3.var> : !cir.ptr } + +// Should match type size in bytes between var and initializer. +void func4(void) { + static char string[] = "Hello"; + // CHECK-DAG: cir.global "private" internal @func4.string = #cir.const_array<"Hello\00" : !cir.array> : !cir.array +} From d07c9443452468ef835401f5eebeae49c117fb48 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 7 Aug 2023 12:04:53 -0300 Subject: [PATCH 1133/2301] [CIR][CIRGen] Use #cir.zero on zero-initialized global arrays ghstack-source-id: 1f793b2abcb144ab10b1ddbd99f12d1dcc6c8707 Pull Request resolved: https://github.com/llvm/clangir/pull/207 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 12 ++---------- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 5 ++--- clang/test/CIR/CodeGen/array.cpp | 4 ++++ clang/test/CIR/CodeGen/globals.c | 4 ++-- clang/test/CIR/IR/invalid.cir | 2 +- 6 files changed, 12 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index dfc441f3cabc..c128d9e7fc4c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -174,16 +174,8 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return mlir::cir::IntAttr::get(ty, 0); if (ty.isa()) return mlir::FloatAttr::get(ty, 0.0); - if (auto arrTy = ty.dyn_cast()) { - // FIXME(cir): We should have a proper zero initializer CIR instead of - // manually pumping zeros into the array. - assert(!UnimplementedFeature::zeroInitializer()); - auto values = llvm::SmallVector(); - auto zero = getZeroInitAttr(arrTy.getEltType()); - for (unsigned i = 0, e = arrTy.getSize(); i < e; ++i) - values.push_back(zero); - return getConstArray(mlir::ArrayAttr::get(getContext(), values), arrTy); - } + if (auto arrTy = ty.dyn_cast()) + return getZeroAttr(arrTy); if (auto ptrTy = ty.dyn_cast()) return getNullPtrAttr(ptrTy); if (auto structTy = ty.dyn_cast()) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 72448be3ddc2..4b85f73df2ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -957,7 +957,7 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, } if (NonzeroLength == 0) - assert(0 && "NYE"); + return builder.getZeroInitAttr(DesiredType); // Add a zeroinitializer array filler if we have lots of trailing zeroes. unsigned TrailingZeroes = ArrayBound - NonzeroLength; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d204d1eb3922..a8778b4ab011 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -170,10 +170,9 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, } if (attrType.isa()) { - // FIXME: should also support arrays / const_arrays. - if (opType.isa<::mlir::cir::StructType>()) + if (opType.isa<::mlir::cir::StructType, ::mlir::cir::ArrayType>()) return success(); - return op->emitOpError("zero expects struct type"); + return op->emitOpError("zero expects struct or array type"); } if (attrType.isa()) { diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index f5a0cb459cc5..b89c115afb84 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -61,3 +61,7 @@ int multidim(int i, int j) { // CHECK: %7 = cir.load %{{.+}} : cir.ptr , !s32i // CHECK: %8 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr // CHECK: %9 = cir.ptr_stride(%8 : !cir.ptr, %7 : !s32i), !cir.ptr + +// Should globally zero-initialize null arrays. +int globalNullArr[] = {0, 0}; +// CHECK: cir.global external @globalNullArr = #cir.zero : !cir.array diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 311c747d0d98..08f9563a032d 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -36,5 +36,5 @@ int tentativeD[]; float zeroInitFlt[2]; // CHECK: cir.global external @tentativeA = #cir.int<0> : !s32i // CHECK: cir.global external @tentativeC = 0.000000e+00 : f32 -// CHECK: cir.global external @tentativeD = #cir.const_array<[#cir.int<0> : !s32i]> : !cir.array -// CHECK: cir.global external @zeroInitFlt = #cir.const_array<[0.000000e+00 : f32, 0.000000e+00 : f32]> : !cir.array +// CHECK: cir.global external @tentativeD = #cir.zero : !cir.array +// CHECK: cir.global external @zeroInitFlt = #cir.zero : !cir.array diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 840149589b0c..b81e42ca4507 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -259,7 +259,7 @@ cir.func @unary1() { !u32i = !cir.int module { - cir.global external @v = #cir.zero : !u32i // expected-error {{zero expects struct type}} + cir.global external @v = #cir.zero : !u32i // expected-error {{zero expects struct or array type}} } // ----- From 9adf7f9c3cba1d6057836610546d5d7ebcb86fa3 Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Fri, 4 Aug 2023 12:53:28 +0300 Subject: [PATCH 1134/2301] [CIR][Lowering] Lower global multidimensional array initializers. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 ++++++++ .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 15 +++------------ clang/test/CIR/CodeGen/globals.c | 9 +++++++++ clang/test/CIR/Lowering/globals.cir | 6 ++++++ 4 files changed, 26 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index c128d9e7fc4c..8c701d8a6570 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -209,6 +209,14 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return true; } + if (const auto arrayVal = attr.dyn_cast()) { + for (const auto elt : arrayVal.getElts().cast()) { + if (!isNullValue(elt)) + return false; + } + return true; + } + llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b5551776c402..31e7e0a8356e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -132,9 +132,6 @@ mlir::Value lowerCirAttrAsValue(mlir::cir::ConstArrayAttr constArr, auto llvmTy = converter->convertType(constArr.getType()); mlir::Value result = rewriter.create(loc, llvmTy); auto arrayAttr = constArr.getElts().cast(); - auto cirArrayType = constArr.getType().cast(); - assert(cirArrayType.getEltType().isa() && - "Types other than ConstArrayAttr are NYI"); // Iteratively lower each constant element of the array. for (auto [idx, elt] : llvm::enumerate(arrayAttr)) { @@ -1157,21 +1154,15 @@ class CIRGlobalOpLowering if (auto attr = constArr.getElts().dyn_cast()) { init = rewriter.getStringAttr(attr.getValue()); } else if (auto attr = constArr.getElts().dyn_cast()) { - auto eltTy = - constArr.getType().cast().getEltType(); - if (eltTy.isa()) { + // Failed to use a compact attribute as an initializer: + // initialize elements individually. + if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( op->getLoc(), lowerCirAttrAsValue(constArr, op->getLoc(), rewriter, typeConverter)); return mlir::success(); } - if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { - op.emitError() << "unsupported lowering for #cir.const_array with " - "element type " - << op.getSymType(); - return mlir::failure(); - } } else { op.emitError() << "unsupported lowering for #cir.const_array with value " diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 08f9563a032d..2ab1057b523a 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -27,6 +27,15 @@ int tentativeE[]; int tentativeE[2] = {1, 2}; // CHECK: cir.global external @tentativeE = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array +int twoDim[2][2] = {{1, 2}, {3, 4}}; +// CHECK: cir.global external @twoDim = #cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.array]> : !cir.array x 2> + +struct { + int x; + int y[2][2]; +} nestedTwoDim = {1, {{2, 3}, {4, 5}}}; +// CHECK: cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22struct2Eanon22 + // TODO: test tentatives with internal linkage. // Tentative definition is THE definition. Should be zero-initialized. diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 0a77d721ec5f..2e548dcc8efb 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -10,6 +10,8 @@ !u32i = !cir.int !u64i = !cir.int !u8i = !cir.int +!ty_22struct2EA22 = !cir.struct<"struct.A", !s32i, !cir.array x 2>, #cir.recdecl.ast> + module { cir.global external @a = #cir.int<3> : !s32i cir.global external @c = #cir.int<2> : !u64i @@ -83,6 +85,10 @@ module { cir.global external @ll = #cir.const_array<[#cir.int<999999999> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i]> : !cir.array // MLIR: llvm.mlir.global external @ll(dense<[999999999, 0, 0, 0]> : tensor<4xi64>) {addr_space = 0 : i32} : !llvm.array<4 x i64> // LLVM: @ll = global [4 x i64] [i64 999999999, i64 0, i64 0, i64 0] + cir.global external @twoDim = #cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.array]> : !cir.array x 2> + // LLVM: @twoDim = global [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 1, i32 2], [2 x i32] [i32 3, i32 4{{\]\]}} + cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22struct2EA22 + // LLVM: @nestedTwoDim = global %struct.A { i32 1, [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 2, i32 3], [2 x i32] [i32 4, i32 5{{\]\]}} } cir.func @_Z11get_globalsv() { %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} %1 = cir.alloca !cir.ptr, cir.ptr >, ["u", init] {alignment = 8 : i64} From a9554b5c4ca931e1af712018318c8ba6a64bc7fc Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Mon, 7 Aug 2023 11:02:35 +0300 Subject: [PATCH 1135/2301] [CIR][Lowering] Lower nested arrays of simple types to dense attributes instead of region initializers. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 41 ++++++++++++++----- clang/test/CIR/Lowering/globals.cir | 1 + 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 31e7e0a8356e..a9708f526a9c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -727,17 +727,31 @@ convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, llvm::ArrayRef(values)); } +template +void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, + llvm::SmallVectorImpl &values) { + auto arrayAttr = attr.getElts().cast(); + for (auto eltAttr : arrayAttr) { + if (auto valueAttr = eltAttr.dyn_cast()) { + values.push_back(valueAttr.getValue()); + } else if (auto subArrayAttr = + eltAttr.dyn_cast()) { + convertToDenseElementsAttrImpl(subArrayAttr, values); + } else { + llvm_unreachable("unknown element in ConstArrayAttr"); + } + } +} + template mlir::DenseElementsAttr -convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, mlir::Type type) { +convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, + const llvm::SmallVectorImpl &dims, + mlir::Type type) { auto values = llvm::SmallVector{}; - auto arrayAttr = attr.getElts().dyn_cast(); - assert(arrayAttr && "expected array here"); - for (auto element : arrayAttr) - values.push_back(element.cast().getValue()); - return mlir::DenseElementsAttr::get( - mlir::RankedTensorType::get({(int64_t)values.size()}, type), - llvm::ArrayRef(values)); + convertToDenseElementsAttrImpl(attr, values); + return mlir::DenseElementsAttr::get(mlir::RankedTensorType::get(dims, type), + llvm::ArrayRef(values)); } std::optional @@ -753,7 +767,12 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, assert(cirArrayType && "cir::ConstArrayAttr is not a cir::ArrayType"); // Is a ConstArrayAttr with an cir::ArrayType: fetch element type. - auto type = cirArrayType.getEltType(); + mlir::Type type = cirArrayType; + auto dims = llvm::SmallVector{}; + while (auto arrayType = type.dyn_cast()) { + dims.push_back(arrayType.getSize()); + type = arrayType.getEltType(); + } // Convert array attr to LLVM compatible dense elements attr. if (constArr.getElts().isa()) @@ -761,10 +780,10 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, converter->convertType(type)); if (type.isa()) return convertToDenseElementsAttr( - constArr, converter->convertType(type)); + constArr, dims, converter->convertType(type)); if (type.isa()) return convertToDenseElementsAttr( - constArr, converter->convertType(type)); + constArr, dims, converter->convertType(type)); return std::nullopt; } diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 2e548dcc8efb..c4628f97cc5e 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -86,6 +86,7 @@ module { // MLIR: llvm.mlir.global external @ll(dense<[999999999, 0, 0, 0]> : tensor<4xi64>) {addr_space = 0 : i32} : !llvm.array<4 x i64> // LLVM: @ll = global [4 x i64] [i64 999999999, i64 0, i64 0, i64 0] cir.global external @twoDim = #cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.array]> : !cir.array x 2> + // MLIR: llvm.mlir.global external @twoDim(dense<{{\[\[}}1, 2], [3, 4{{\]\]}}> : tensor<2x2xi32>) {addr_space = 0 : i32} : !llvm.array<2 x array<2 x i32>> // LLVM: @twoDim = global [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 1, i32 2], [2 x i32] [i32 3, i32 4{{\]\]}} cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22struct2EA22 // LLVM: @nestedTwoDim = global %struct.A { i32 1, [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 2, i32 3], [2 x i32] [i32 4, i32 5{{\]\]}} } From 4022e87e357a9415320212c850967c7f4c3d3433 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 7 Aug 2023 17:27:54 -0300 Subject: [PATCH 1136/2301] [CIR][Bugfix] Fix vtableAttr const struct usage PR #200 broke the vtableAttr usage, as it was not properly refactored to use ArrayAttr instead of ConstStructAttr to store its members. --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 6 +++--- clang/lib/CIR/CodeGen/ConstantInitBuilder.h | 6 +++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 11 ++++------- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- clang/test/CIR/IR/vtableAttr.cir | 9 +++++++++ 5 files changed, 20 insertions(+), 14 deletions(-) create mode 100644 clang/test/CIR/IR/vtableAttr.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 726bcfc3cc2a..2293638afbe2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -347,18 +347,18 @@ def VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> { // `vtable_data` is const struct with one element, containing an array of // vtable information. let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "ConstStructAttr":$vtable_data); + "ArrayAttr":$vtable_data); let builders = [ AttrBuilderWithInferredContext<(ins "Type":$type, - "ConstStructAttr":$vtable_data), [{ + "ArrayAttr":$vtable_data), [{ return $_get(type.getContext(), type, vtable_data); }]> ]; let genVerifyDecl = 1; let assemblyFormat = [{ - `<` $vtable_data `>` + `<` custom($vtable_data) `>` }]; } diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h index 99c2e8f6601d..b4ff54e835be 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -406,9 +406,9 @@ class ConstantAggregateBuilderTemplateBase assert(initCSA && "expected #cir.const_struct attribute to represent vtable data"); return this->Builder.setGlobalInitializer( - global, forVTable - ? mlir::cir::VTableAttr::get(initCSA.getType(), initCSA) - : init); + global, forVTable ? mlir::cir::VTableAttr::get(initCSA.getType(), + initCSA.getMembers()) + : init); } /// Given that this builder was created by beginning an array or struct diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a8778b4ab011..572299700b1e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2091,28 +2091,25 @@ LogicalResult TypeInfoAttr::verify( LogicalResult VTableAttr::verify(::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ::mlir::Type type, ConstStructAttr vtableData) { + ::mlir::Type type, ::mlir::ArrayAttr vtableData) { auto sTy = type.dyn_cast_or_null(); if (!sTy) { emitError() << "expected !cir.struct type result"; return failure(); } - if (sTy.getMembers().size() != 1 || vtableData.getMembers().size() != 1) { + if (sTy.getMembers().size() != 1 || vtableData.size() != 1) { emitError() << "expected struct type with only one subtype"; return failure(); } auto arrayTy = sTy.getMembers()[0].dyn_cast(); - auto constArrayAttr = - vtableData.getMembers()[0].dyn_cast(); + auto constArrayAttr = vtableData[0].dyn_cast(); if (!arrayTy || !constArrayAttr) { emitError() << "expected struct type with one array element"; return failure(); } - if (mlir::cir::ConstStructAttr::verify(emitError, type, - vtableData.getMembers()) - .failed()) + if (mlir::cir::ConstStructAttr::verify(emitError, type, vtableData).failed()) return failure(); LogicalResult eltTypeCheck = success(); diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 5e48e1c1a9d0..6978e561a9f9 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -74,7 +74,7 @@ class B : public A // CHECK: } // vtable for B -// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<<{#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD2Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}>> : ![[VTableTypeA]] +// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD2Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}> : ![[VTableTypeA]] // vtable for __cxxabiv1::__si_class_type_info // CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> diff --git a/clang/test/CIR/IR/vtableAttr.cir b/clang/test/CIR/IR/vtableAttr.cir new file mode 100644 index 000000000000..5c9f414feb98 --- /dev/null +++ b/clang/test/CIR/IR/vtableAttr.cir @@ -0,0 +1,9 @@ +// RUN: cir-opt %s | FileCheck %s + +!u8i = !cir.int +!ty_2222 = !cir.struct<"", !cir.array x 1>> +module { + // Should parse VTable attribute. + cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.null : !cir.ptr]> : !cir.array x 1>}> : !ty_2222 + // CHECK: cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.null : !cir.ptr]> : !cir.array x 1>}> : !ty_2222 +} From 529787e5e2b30b88363cba64e3d226baeb8b158f Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Mon, 7 Aug 2023 15:42:54 -0700 Subject: [PATCH 1137/2301] [CIR][CodeGen] Emit globals with constructor initializer (#197) This change does the CIR generation for globals initialized by a constructor call. It currently only covers C++ to CIR generation. The corresponding LLVM lowering will be in a follow-up commit. A motivating example is ``` class Init { friend class ios_base; public: Init(bool); ~Init(); private: static bool _S_synced_with_stdio; }; static Init ioinit(true); ``` Unlike what the default Clang codegen generates LLVM that detaches the initialization code from the global var definition (like below), we are taking a different approach that keeps them together, which we think will make the later dataflow analysis/transform easier. ``` @_ZL8ioinit = internal global %class.Init zeroinitializer, align 1, !dbg !0 define internal void @cxx_global_var_init() #0 section ".text.startup" !dbg !23 { entry: call void @_ZN4InitC2Ev(ptr noundef nonnull align 1 dereferenceable(1) @_ZL8ioinit), !dbg !27 %0 = call i32 @cxa_atexit(ptr @_ZN4InitD1Ev, ptr @_ZL8ioinit, ptr @dso_handle) #3, !dbg !29 ret void, !dbg !27 } ``` So on CIR, we have something like: ``` cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22class2EInit22 { %0 = cir.get_global @_ZL8__ioinit : cir.ptr loc(#loc8) %1 = cir.const(#true) : !cir.bool loc(#loc5) cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () loc(#loc6) } ``` The destructor support will also be in a separate change. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 18 +-- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 45 +++++++ clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp | 51 +++++++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 11 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 8 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 10 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 30 ++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 8 ++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 124 ++++++++++++++----- clang/test/CIR/CodeGen/static.cpp | 24 ++++ clang/test/CIR/IR/global.cir | 13 ++ 12 files changed, 291 insertions(+), 55 deletions(-) create mode 100644 clang/test/CIR/CodeGen/static.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b206d506da5d..e61b3d8e8c8b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -571,11 +571,12 @@ def YieldOpKind : I32EnumAttr< def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "LoopOp", "AwaitOp", - "TernaryOp"]>]> { + "TernaryOp", "GlobalOp"]>]> { let summary = "Terminate CIR regions"; let description = [{ The `cir.yield` operation terminates regions on different CIR operations: - `cir.if`, `cir.scope`, `cir.switch`, `cir.loop`, `cir.await` and `cir.ternary`. + `cir.if`, `cir.scope`, `cir.switch`, `cir.loop`, `cir.await`, `cir.ternary` + and `cir.global`. Might yield an SSA value and the semantics of how the values are yielded is defined by the parent operation. @@ -1242,7 +1243,7 @@ def SignedOverflowBehaviorEnum : I32EnumAttr< } -def GlobalOp : CIR_Op<"global", [Symbol]> { +def GlobalOp : CIR_Op<"global", [Symbol, DeclareOpInterfaceMethods, NoRegionArguments]> { let summary = "Declares or defines a global variable"; let description = [{ The `cir.global` operation declares or defines a named global variable. @@ -1280,19 +1281,19 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { OptionalAttr:$initial_value, UnitAttr:$constant, OptionalAttr:$alignment); - + let regions = (region AnyRegion:$ctorRegion); let assemblyFormat = [{ ($sym_visibility^)? (`constant` $constant^)? $linkage $sym_name - custom($sym_type, $initial_value) + custom($sym_type, $initial_value, $ctorRegion) attr-dict }]; let extraClassDeclaration = [{ bool isDeclaration() { - return !getInitialValue(); + return !getInitialValue() && getCtorRegion().empty(); } bool hasInitializer() { return !isDeclaration(); } bool hasAvailableExternallyLinkage() { @@ -1318,8 +1319,9 @@ def GlobalOp : CIR_Op<"global", [Symbol]> { CArg<"bool", "false">:$isConstant, // CIR defaults to external linkage. CArg<"cir::GlobalLinkageKind", - "cir::GlobalLinkageKind::ExternalLinkage">:$linkage - )> + "cir::GlobalLinkageKind::ExternalLinkage">:$linkage, + CArg<"function_ref", + "nullptr">:$ctorBuilder)> ]; let hasVerifier = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 45252ba732f4..68ba9dfaaa64 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -20,6 +20,34 @@ using namespace clang; using namespace cir; +static void buildDeclInit(CIRGenFunction &CGF, const VarDecl *D, + Address DeclPtr) { + assert((D->hasGlobalStorage() || + (D->hasLocalStorage() && + CGF.getContext().getLangOpts().OpenCLCPlusPlus)) && + "VarDecl must have global or local (in the case of OpenCL) storage!"); + assert(!D->getType()->isReferenceType() && + "Should not call buildDeclInit on a reference!"); + + QualType type = D->getType(); + LValue lv = CGF.makeAddrLValue(DeclPtr, type); + + const Expr *Init = D->getInit(); + switch (CIRGenFunction::getEvaluationKind(type)) { + case TEK_Aggregate: + CGF.buildAggExpr( + Init, AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap)); + return; + case TEK_Scalar: + llvm_unreachable("scalar evaluation NYI"); + case TEK_Complex: + llvm_unreachable("complext evaluation NYI"); + } +} + mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { const auto &FnInfo = getTypes().arrangeCXXStructorDeclaration(GD); auto Fn = getAddrOfCXXStructor(GD, &FnInfo, /*FnType=*/nullptr, @@ -38,3 +66,20 @@ mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { // TODO: SetLLVMFunctionAttributesForDefinition return Fn; } + +void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, + mlir::cir::GlobalOp Addr) { + CIRGenFunction CGF{*this, builder, true}; + CurCGF = &CGF; + CurCGF->CurFn = Addr; + { + mlir::OpBuilder::InsertionGuard guard(builder); + auto block = builder.createBlock(&Addr.getCtorRegion()); + builder.setInsertionPointToStart(block); + Address DeclAddr(getAddrOfGlobalVar(D), getASTContext().getDeclAlign(D)); + buildDeclInit(CGF, D, DeclAddr); + builder.setInsertionPointToEnd(block); + builder.create(Addr->getLoc()); + } + CurCGF = nullptr; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 72f96900eb28..629e186a5f2b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -254,7 +254,9 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { auto openCurlyLoc = getLoc(S.getBeginLoc()); auto nullPtrCst = builder.getNullPtr(VoidPtrTy, openCurlyLoc); - CurFn.setCoroutineAttr(mlir::UnitAttr::get(builder.getContext())); + auto Fn = dyn_cast(CurFn); + assert(Fn && "other callables NYI"); + Fn.setCoroutineAttr(mlir::UnitAttr::get(builder.getContext())); auto coroId = buildCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); createCoroData(*this, CurCoro, coroId); diff --git a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp index 594ce748d472..ee3426699541 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenFunction.h" #include "CIRGenModule.h" #include "TargetInfo.h" #include "clang/AST/Attr.h" @@ -28,4 +29,52 @@ void CIRGenModule::buildCXXGlobalInitFunc() { return; assert(0 && "NYE"); -} \ No newline at end of file +} + +void CIRGenModule::buildGlobalVarDeclInit(const VarDecl *D, + mlir::cir::GlobalOp Addr, + bool PerformInit) { + // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, + // __constant__ and __shared__ variables defined in namespace scope, + // that are of class type, cannot have a non-empty constructor. All + // the checks have been done in Sema by now. Whatever initializers + // are allowed are empty and we just need to ignore them here. + if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit && + (D->hasAttr() || D->hasAttr() || + D->hasAttr())) + return; + + assert(!getLangOpts().OpenMP && "OpenMP global var init not implemented"); + + // Check if we've already initialized this decl. + auto I = DelayedCXXInitPosition.find(D); + if (I != DelayedCXXInitPosition.end() && I->second == ~0U) + return; + + if (PerformInit) { + QualType T = D->getType(); + + // TODO: handle address space + // The address space of a static local variable (DeclPtr) may be different + // from the address space of the "this" argument of the constructor. In that + // case, we need an addrspacecast before calling the constructor. + // + // struct StructWithCtor { + // __device__ StructWithCtor() {...} + // }; + // __device__ void foo() { + // __shared__ StructWithCtor s; + // ... + // } + // + // For example, in the above CUDA code, the static local variable s has a + // "shared" address space qualifier, but the constructor of StructWithCtor + // expects "this" in the "generic" address space. + assert(!UnimplementedFeature::addressSpace()); + + if (!T->isReferenceType()) { + codegenGlobalInitCxxStructor(D, Addr); + return; + } + } +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index aaedaa6d1a24..4f888ecac051 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -355,7 +355,8 @@ static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { // When directing calling an inline builtin, call it through it's mangled // name to make it clear it's not the actual builtin. - if (CGF.CurFn.getName() != FDInlineName && + auto Fn = cast(CGF.CurFn); + if (Fn.getName() != FDInlineName && onlyHasInlineBuiltinDeclaration(FD)) { assert(0 && "NYI"); } @@ -2132,7 +2133,7 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, mlir::Location loc, CharUnits alignment, bool insertIntoFnEntryBlock) { mlir::Block *entryBlock = insertIntoFnEntryBlock - ? &CurFn.getRegion().front() + ? getCurFunctionEntryBlock() : currLexScope->getEntryBlock(); return buildAlloca(name, ty, loc, alignment, builder.getBestAllocaInsertPoint(entryBlock)); @@ -2506,9 +2507,11 @@ mlir::Value CIRGenFunction::buildScalarConstant( } LValue CIRGenFunction::buildPredefinedLValue(const PredefinedExpr *E) { - auto SL = E->getFunctionName(); + const auto *SL = E->getFunctionName(); assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); - StringRef FnName = CurFn.getName(); + auto Fn = dyn_cast(CurFn); + assert(Fn && "other callables NYI"); + StringRef FnName = Fn.getName(); if (FnName.starts_with("\01")) FnName = FnName.substr(1); StringRef NameItems[] = {PredefinedExpr::getIdentKindName(E->getIdentKind()), diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 37e09f9d574b..576725e4af65 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -322,7 +322,9 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { auto buildReturn = [&](mlir::Location loc) { // If we are on a coroutine, add the coro_end builtin call. - if (CGF.CurFn.getCoroutine()) + auto Fn = dyn_cast(CGF.CurFn); + assert(Fn && "other callables NYI"); + if (Fn.getCoroutine()) CGF.buildCoroEndBuiltinCall( loc, builder.getNullPtr(builder.getVoidPtrTy(), loc)); @@ -1012,7 +1014,9 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, const auto *MD = cast(D); if (MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call) { // We're in a lambda. - CurFn.setLambdaAttr(mlir::UnitAttr::get(builder.getContext())); + auto Fn = dyn_cast(CurFn); + assert(Fn && "other callables NYI"); + Fn.setLambdaAttr(mlir::UnitAttr::get(builder.getContext())); // Figure out the captures. MD->getParent()->getCaptureFields(LambdaCaptureFields, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index dd50ee2ede30..2f2e4a7c276a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -576,7 +576,9 @@ class CIRGenFunction : public CIRGenTypeCache { const clang::Decl *CurCodeDecl; const CIRGenFunctionInfo *CurFnInfo; clang::QualType FnRetTy; - mlir::cir::FuncOp CurFn = nullptr; + + /// This is the current function or global initializer that is generated code for. + mlir::Operation *CurFn = nullptr; /// Save Parameter Decl for coroutine. llvm::SmallVector FnArgs; @@ -591,6 +593,12 @@ class CIRGenFunction : public CIRGenTypeCache { CIRGenModule &getCIRGenModule() { return CGM; } + mlir::Block* getCurFunctionEntryBlock() { + auto Fn = dyn_cast(CurFn); + assert(Fn && "other callables NYI"); + return &Fn.getRegion().front(); + } + /// Sanitizers enabled for this function. clang::SanitizerSet SanOpts; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index ae94489e81bb..88055fbd6913 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -479,7 +479,7 @@ mlir::cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &CGM, // Be sure to insert global before the current function auto *curCGF = CGM.getCurrCIRGenFun(); if (curCGF) - builder.setInsertionPoint(curCGF->CurFn.getOperation()); + builder.setInsertionPoint(curCGF->CurFn); g = builder.create(loc, name, t, isCst); if (!curCGF) @@ -784,8 +784,14 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // TODO(cir): LLVM's codegen uses a llvm::TrackingVH here. Is that // necessary here for CIR gen? mlir::Attribute Init; - // TODO(cir): bool NeedsGlobalCtor = false; + bool NeedsGlobalCtor = false; + // Whether the definition of the variable is available externally. + // If yes, we shouldn't emit the GloablCtor and GlobalDtor for the variable + // since this is the job for its original source. + bool IsDefinitionAvailableExternally = + astCtx.GetGVALinkageForVariable(D) == GVA_AvailableExternally; bool NeedsGlobalDtor = + !IsDefinitionAvailableExternally && D->needsDestruction(astCtx) == QualType::DK_cxx_destructor; const VarDecl *InitDecl; @@ -831,7 +837,19 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, emitter.emplace(*this); auto Initializer = emitter->tryEmitForInitializer(*InitDecl); if (!Initializer) { - assert(0 && "not implemented"); + QualType T = InitExpr->getType(); + if (D->getType()->isReferenceType()) + T = D->getType(); + + if (getLangOpts().CPlusPlus) { + if (InitDecl->hasFlexibleArrayInit(astCtx)) + ErrorUnsupported(D, "flexible array initializer"); + Init = builder.getZeroInitAttr(getCIRType(T)); + if (!IsDefinitionAvailableExternally) + NeedsGlobalCtor = true; + } else { + ErrorUnsupported(D, "static initializer"); + } } else { Init = Initializer; // We don't need an initializer, so remove the entry for the delayed @@ -973,8 +991,8 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // TODO(cir): // Emit the initializer function if necessary. - // if (NeedsGlobalCtor || NeedsGlobalDtor) - // EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor); + if (NeedsGlobalCtor || NeedsGlobalDtor) + buildGlobalVarDeclInit(D, GV, NeedsGlobalCtor); // TODO(cir): sanitizers (reportGlobalToASan) and global variable debug // information. @@ -1790,7 +1808,7 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, // Be sure to insert a new function before a current one. auto *curCGF = getCurrCIRGenFun(); if (curCGF) - builder.setInsertionPoint(curCGF->CurFn.getOperation()); + builder.setInsertionPoint(curCGF->CurFn); f = builder.create(loc, name, Ty); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 96f14b433850..6dfdecfc5633 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -477,6 +477,10 @@ class CIRGenModule : public CIRGenTypeCache { void buildGlobalVarDefinition(const clang::VarDecl *D, bool IsTentative = false); + /// Emit the function that initializes the specified global + void buildGlobalVarDeclInit(const VarDecl *D, mlir::cir::GlobalOp Addr, + bool PerformInit); + void addDeferredVTable(const CXXRecordDecl *RD) { DeferredVTables.push_back(RD); } @@ -508,6 +512,10 @@ class CIRGenModule : public CIRGenTypeCache { // or if they are alias to each other. mlir::cir::FuncOp codegenCXXStructor(clang::GlobalDecl GD); + // Produce code for this constructor/destructor for global initialzation. + void codegenGlobalInitCxxStructor(const clang::VarDecl *D, + mlir::cir::GlobalOp Addr); + bool lookupRepresentativeDecl(llvm::StringRef MangledName, clang::GlobalDecl &Result) const; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 572299700b1e..3115491bf9ac 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1144,49 +1144,79 @@ LogicalResult LoopOp::verify() { //===----------------------------------------------------------------------===// static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, - TypeAttr type, - Attribute initAttr) { + TypeAttr type, Attribute initAttr, + mlir::Region& ctorRegion) { auto printType = [&]() { p << ": " << type; }; if (!op.isDeclaration()) { p << "= "; - // This also prints the type... - printConstant(p, initAttr); - if (initAttr.isa()) + if (!ctorRegion.empty()) { + p << "ctor "; printType(); + p << " "; + p.printRegion(ctorRegion, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/false); + } else { + // This also prints the type... + if (initAttr) + printConstant(p, initAttr); + if (initAttr.isa()) + printType(); + } + } else { printType(); } } -static ParseResult -parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, - Attribute &initialValueAttr) { +static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, + TypeAttr &typeAttr, + Attribute &initialValueAttr, + mlir::Region& ctorRegion) { + mlir::Type opTy; if (parser.parseOptionalEqual().failed()) { // Absence of equal means a declaration, so we need to parse the type. // cir.global @a : i32 - Type type; - if (parser.parseColonType(type)) - return failure(); - typeAttr = TypeAttr::get(type); - return success(); - } - - // Parse constant with initializer, examples: - // cir.global @y = 3.400000e+00 : f32 - // cir.global @rgb = #cir.const_array<[...] : !cir.array> - if (parseConstantValue(parser, initialValueAttr).failed()) - return failure(); - - mlir::Type opTy; - if (auto sra = initialValueAttr.dyn_cast()) { if (parser.parseColonType(opTy)) return failure(); - } else { - // Handle StringAttrs - assert(initialValueAttr.isa() && - "Non-typed attrs shouldn't appear here."); - auto typedAttr = initialValueAttr.cast(); - opTy = typedAttr.getType(); + } + else { + // Parse contructor, example: + // cir.global @rgb = ctor : type { ... } + if (!parser.parseOptionalKeyword("ctor")) { + if (parser.parseColonType(opTy)) + return failure(); + auto parseLoc = parser.getCurrentLocation(); + if (parser.parseRegion(ctorRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + if (!ctorRegion.hasOneBlock()) + return parser.emitError(parser.getCurrentLocation(), + "ctor region must have exactly one block"); + if (ctorRegion.back().empty()) + return parser.emitError(parser.getCurrentLocation(), + "ctor region shall not be empty"); + if (checkBlockTerminator(parser, parseLoc, + ctorRegion.back().back().getLoc(), &ctorRegion) + .failed()) + return failure(); + } else { + // Parse constant with initializer, examples: + // cir.global @y = 3.400000e+00 : f32 + // cir.global @rgb = #cir.const_array<[...] : !cir.array> + if (parseConstantValue(parser, initialValueAttr).failed()) + return failure(); + + if (auto sra = initialValueAttr.dyn_cast()) { + if (parser.parseColonType(opTy)) + return failure(); + } else { + // Handle StringAttrs + assert(initialValueAttr.isa() && + "Non-typed attrs shouldn't appear here."); + auto typedAttr = initialValueAttr.cast(); + opTy = typedAttr.getType(); + } + } } typeAttr = TypeAttr::get(opTy); @@ -1239,9 +1269,10 @@ LogicalResult GlobalOp::verify() { return success(); } -void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, - StringRef sym_name, Type sym_type, bool isConstant, - cir::GlobalLinkageKind linkage) { +void GlobalOp::build( + OpBuilder &odsBuilder, OperationState &odsState, StringRef sym_name, + Type sym_type, bool isConstant, cir::GlobalLinkageKind linkage, + function_ref ctorBuilder) { odsState.addAttribute(getSymNameAttrName(odsState.name), odsBuilder.getStringAttr(sym_name)); odsState.addAttribute(getSymTypeAttrName(odsState.name), @@ -1253,6 +1284,35 @@ void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, ::mlir::cir::GlobalLinkageKindAttr linkageAttr = cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage); odsState.addAttribute(getLinkageAttrName(odsState.name), linkageAttr); + + Region *ctorRegion = odsState.addRegion(); + if (ctorBuilder) { + odsBuilder.createBlock(ctorRegion); + ctorBuilder(odsBuilder, odsState.location); + } +} + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes that +/// correspond to a constant value for each operand, or null if that operand is +/// not a constant. +void GlobalOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // The only region always branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // Don't consider the ctor region if it is empty. + Region *ctorRegion = &this->getCtorRegion(); + if (ctorRegion->empty()) + ctorRegion = nullptr; + + // If the condition isn't constant, both regions may be executed. + if (ctorRegion) + regions.push_back(RegionSuccessor(ctorRegion)); } //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp new file mode 100644 index 000000000000..27b89cb6d53a --- /dev/null +++ b/clang/test/CIR/CodeGen/static.cpp @@ -0,0 +1,24 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: cir-opt %t.cir -o - | FileCheck %s -check-prefix=CIR + +class Init { + +public: + Init(bool a) ; + +private: + static bool _S_synced_with_stdio; +}; + + +static Init __ioinit(true); + +// CIR: module {{.*}} { +// CIR-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) +// CIR-NEXT: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22class2EInit22 { +// CIR-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// CIR-NEXT: %1 = cir.const(#true) : !cir.bool +// CIR-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// CIR-NEXT: } +// CIR-NEXT: } diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 6eeb940b9e17..d572cb21c29b 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -3,6 +3,7 @@ !s8i = !cir.int !s32i = !cir.int !s64i = !cir.int +!ty_22class2EInit22 = !cir.struct<"class.Init", !s8i, #cir.recdecl.ast> module { cir.global external @a = #cir.int<3> : !s32i cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i] : !cir.array> @@ -32,6 +33,12 @@ module { #cir.global_view<@type_info_A> : !cir.ptr}> : !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr > + cir.func private @_ZN4InitC1Eb(!cir.ptr, !s8i) + cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22class2EInit22 { + %0 = cir.get_global @_ZL8__ioinit : cir.ptr + %1 = cir.const(#cir.int<3> : !s8i) : !s8i + cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () + } } // CHECK: cir.global external @a = #cir.int<3> : !s32i @@ -44,3 +51,9 @@ module { // CHECK: cir.func @use_global() // CHECK-NEXT: %0 = cir.get_global @a : cir.ptr + +// CHECK: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22class2EInit22 { +// CHECK-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s8i) : !s8i +// CHECK-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () +// CHECK-NEXT: } From f38c69686cf5f0f0e37b71403c617141564773d5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 8 Aug 2023 16:50:34 -0400 Subject: [PATCH 1138/2301] [CIR][CIRTidy] Handle mlir::FusedLoc when translating to clang::SourceLocation --- .../clang-tidy/cir-tidy/CIRASTConsumer.cpp | 55 ++++++++++++++----- 1 file changed, 40 insertions(+), 15 deletions(-) diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp index d643ff8e41f5..4ef0b0d88d73 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp @@ -76,22 +76,47 @@ void CIRASTConsumer::HandleTranslationUnit(ASTContext &C) { clang::tidy::ClangTidyContext &tidyCtx; clang::SourceManager &clangSrcMgr; - clang::SourceLocation getClangSrcLoc(mlir::Location loc) { + clang::SourceLocation getClangFromFileLineCol(mlir::FileLineColLoc loc) { clang::SourceLocation clangLoc; FileManager &fileMgr = clangSrcMgr.getFileManager(); - - auto fileLoc = loc.dyn_cast(); - if (!fileLoc) - return clangLoc; + assert(loc && "not a valid mlir::FileLineColLoc"); // The column and line may be zero to represent unknown column and/or // unknown line/column information. - if (fileLoc.getLine() == 0 || fileLoc.getColumn() == 0) + if (loc.getLine() == 0 || loc.getColumn() == 0) { + llvm_unreachable("How should we workaround this?"); return clangLoc; - if (auto FE = fileMgr.getFile(fileLoc.getFilename())) { - return clangSrcMgr.translateFileLineCol(*FE, fileLoc.getLine(), - fileLoc.getColumn()); } - return clangLoc; + if (auto FE = fileMgr.getFile(loc.getFilename())) { + return clangSrcMgr.translateFileLineCol(*FE, loc.getLine(), + loc.getColumn()); + } + llvm_unreachable("location doesn't map to a file?"); + } + + clang::SourceLocation getClangSrcLoc(mlir::Location loc) { + // Direct maps into a clang::SourceLocation. + if (auto fileLoc = loc.dyn_cast()) { + return getClangFromFileLineCol(fileLoc); + } + + // FusedLoc needs to be decomposed but the canonical one + // is the first location, we handle source ranges somewhere + // else. + if (auto fileLoc = loc.dyn_cast()) { + auto locArray = fileLoc.getLocations(); + assert(locArray.size() > 0 && "expected multiple locs"); + return getClangFromFileLineCol( + locArray[0].dyn_cast()); + } + + // Many loc styles are yet to be handled. + if (auto fileLoc = loc.dyn_cast()) { + llvm_unreachable("mlir::UnknownLoc not implemented!"); + } + if (auto fileLoc = loc.dyn_cast()) { + llvm_unreachable("mlir::CallSiteLoc not implemented!"); + } + llvm_unreachable("Unknown location style"); } clang::DiagnosticIDs::Level @@ -111,13 +136,13 @@ void CIRASTConsumer::HandleTranslationUnit(ASTContext &C) { public: void emitClangTidyDiagnostic(mlir::Diagnostic &diag) { - tidyCtx.diag(cir::checks::LifetimeCheckName, - getClangSrcLoc(diag.getLocation()), diag.str(), + auto clangBeginLoc = getClangSrcLoc(diag.getLocation()); + tidyCtx.diag(cir::checks::LifetimeCheckName, clangBeginLoc, diag.str(), translateToClangDiagLevel(diag.getSeverity())); for (const auto ¬e : diag.getNotes()) { - tidyCtx.diag(cir::checks::LifetimeCheckName, - getClangSrcLoc(note.getLocation()), note.str(), - translateToClangDiagLevel(note.getSeverity())); + auto clangNoteBeginLoc = getClangSrcLoc(note.getLocation()); + tidyCtx.diag(cir::checks::LifetimeCheckName, clangNoteBeginLoc, + note.str(), translateToClangDiagLevel(note.getSeverity())); } } From 3866b4b58b92bb59284d9780e9b2de8d17a56979 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 9 Aug 2023 22:24:19 +0300 Subject: [PATCH 1139/2301] [CIR][Lowering] Fixed break/continue lowering for loops (#211) This PR fixes lowering for `break/continue` in loops. The idea is to replace `cir.yield break` and `cir.yield continue` with the branch operations to the corresponding blocks. Note, that we need to ignore nesting loops and don't touch `break` in switch operations. Also, `yield` from `if` need to be considered only when it's not the loop `yield` and `continue` in switch is ignored since it's processed in the loops lowering. Fixes #160 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 70 +++- clang/test/CIR/Lowering/loops-with-break.cir | 322 ++++++++++++++++++ .../test/CIR/Lowering/loops-with-continue.cir | 318 +++++++++++++++++ 3 files changed, 702 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/Lowering/loops-with-break.cir create mode 100644 clang/test/CIR/Lowering/loops-with-continue.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index a9708f526a9c..33e707d78909 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -240,6 +240,47 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { return mlir::success(); } + void makeYieldIf(mlir::cir::YieldOpKind kind, mlir::cir::YieldOp &op, + mlir::Block *to, + mlir::ConversionPatternRewriter &rewriter) const { + if (op.getKind() == kind) { + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp(op, op.getArgs(), to); + } + } + + void + lowerNestedBreakContinue(mlir::Region &loopBody, mlir::Block *exitBlock, + mlir::Block *continueBlock, + mlir::ConversionPatternRewriter &rewriter) const { + + auto processBreak = [&](mlir::Operation *op) { + if (isa( + *op)) // don't process breaks in nested loops and switches + return mlir::WalkResult::skip(); + + if (auto yield = dyn_cast(*op)) + makeYieldIf(mlir::cir::YieldOpKind::Break, yield, exitBlock, rewriter); + + return mlir::WalkResult::advance(); + }; + + auto processContinue = [&](mlir::Operation *op) { + if (isa( + *op)) // don't process continues in nested loops + return mlir::WalkResult::skip(); + + if (auto yield = dyn_cast(*op)) + makeYieldIf(mlir::cir::YieldOpKind::Continue, yield, continueBlock, + rewriter); + + return mlir::WalkResult::advance(); + }; + + loopBody.walk(processBreak); + loopBody.walk(processContinue); + } + mlir::LogicalResult matchAndRewrite(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { @@ -267,6 +308,9 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { auto &stepFrontBlock = stepRegion.front(); auto stepYield = dyn_cast(stepRegion.back().getTerminator()); + auto &stepBlock = (kind == LoopKind::For ? stepFrontBlock : condFrontBlock); + + lowerNestedBreakContinue(bodyRegion, continueBlock, &stepBlock, rewriter); // Move loop op region contents to current CFG. rewriter.inlineRegionBefore(condRegion, continueBlock); @@ -289,8 +333,7 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { // Branch from body to condition or to step on for-loop cases. rewriter.setInsertionPoint(bodyYield); - auto &bodyExit = (kind == LoopKind::For ? stepFrontBlock : condFrontBlock); - rewriter.replaceOpWithNewOp(bodyYield, &bodyExit); + rewriter.replaceOpWithNewOp(bodyYield, &stepBlock); // Is a for loop: branch from step to condition. if (kind == LoopKind::For) { @@ -488,6 +531,11 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } }; +static bool isLoopYield(mlir::cir::YieldOp &op) { + return op.getKind() == mlir::cir::YieldOpKind::Break || + op.getKind() == mlir::cir::YieldOpKind::Continue; +} + class CIRIfLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -516,8 +564,10 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.setInsertionPointToEnd(thenAfterBody); if (auto thenYieldOp = dyn_cast(thenAfterBody->getTerminator())) { - rewriter.replaceOpWithNewOp( - thenYieldOp, thenYieldOp.getArgs(), continueBlock); + if (!isLoopYield(thenYieldOp)) // lowering of parent loop yields is + // deferred to loop lowering + rewriter.replaceOpWithNewOp( + thenYieldOp, thenYieldOp.getArgs(), continueBlock); } else if (!dyn_cast(thenAfterBody->getTerminator())) { llvm_unreachable("what are we terminating with?"); } @@ -545,8 +595,10 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.setInsertionPointToEnd(elseAfterBody); if (auto elseYieldOp = dyn_cast(elseAfterBody->getTerminator())) { - rewriter.replaceOpWithNewOp( - elseYieldOp, elseYieldOp.getArgs(), continueBlock); + if (!isLoopYield(elseYieldOp)) // lowering of parent loop yields is + // deferred to loop lowering + rewriter.replaceOpWithNewOp( + elseYieldOp, elseYieldOp.getArgs(), continueBlock); } else if (!dyn_cast( elseAfterBody->getTerminator())) { llvm_unreachable("what are we terminating with?"); @@ -1109,6 +1161,9 @@ class CIRSwitchOpLowering case mlir::cir::YieldOpKind::Break: rewriteYieldOp(rewriter, yieldOp, exitBlock); break; + case mlir::cir::YieldOpKind::Continue: // Continue is handled only in + // loop lowering + break; default: return op->emitError("invalid yield kind in case statement"); } @@ -1692,8 +1747,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, CIRStructElementAddrOpLowering, CIRSwitchOpLowering, - CIRPtrDiffOpLowering>( - converter, patterns.getContext()); + CIRPtrDiffOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir new file mode 100644 index 000000000000..f22865ebcc78 --- /dev/null +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -0,0 +1,322 @@ +// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +module { + cir.func @testFor() { + cir.scope { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.loop for(cond : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.brcond %5 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield + }) { + cir.scope { + cir.scope { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<5> : !s32i) : !s32i + %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.if %5 { + cir.yield break + } + } + } + cir.yield + } + } + cir.return + } + + // CHECK: llvm.func @testFor() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK0:]], ^bb[[#preEXIT0:]] + // CHECK: ^bb[[#preBREAK0]]: + // CHECK: llvm.br ^bb[[#preBREAK1:]] + // CHECK: ^bb[[#preEXIT0]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#preBREAK1]]: + // CHECK: llvm.br ^bb[[#preBREAK2:]] + // CHECK: ^bb[[#preBREAK2]]: + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preBODY0:]] + // CHECK: ^bb[[#preEXIT1]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + + cir.func @testForNested() { + cir.scope { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.loop for(cond : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.brcond %5 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield + }) { + cir.scope { + cir.scope { + %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + cir.loop for(cond : { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.const(#cir.int<10> : !s32i) : !s32i + %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.brcond %7 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.unary(inc, %4) : !s32i, !s32i + cir.store %5, %2 : !s32i, cir.ptr + cir.yield + }) { + cir.scope { + cir.scope { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.yield break + } + } + } + cir.yield + } + } + } + cir.yield + } + } + cir.return + } + + // CHECK: llvm.func @testForNested() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED0:]], ^bb[[#preEXIT0:]] + // CHECK: ^bb[[#preNESTED0]]: + // CHECK: llvm.br ^bb[[#preNESTED1:]] + // CHECK: ^bb[[#preEXIT0]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#preNESTED1]]: + // CHECK: llvm.br ^bb[[#preNESTED2:]] + // CHECK: ^bb[[#preNESTED2]]: + // CHECK: llvm.br ^bb[[#NESTED:]] + // CHECK: ^bb[[#NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#COND_NESTED:]] + // CHECK: ^bb[[#COND_NESTED]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK0:]], ^bb[[#preEXIT1:]] + // CHECK: ^bb[[#preBREAK0]]: + // CHECK: llvm.br ^bb[[#preBREAK1:]] + // CHECK: ^bb[[#preEXIT1]]: + // CHECK: llvm.br ^bb[[#EXIT_NESTED:]] + // CHECK: ^bb[[#preBREAK1]]: + // CHECK: llvm.br ^bb[[#preBREAK2:]] + // CHECK: ^bb[[#preBREAK2]]: + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT2:]], ^bb[[#preBODY0:]] + // CHECK: ^bb[[#preEXIT2]]: + // CHECK: llvm.br ^bb[[#EXIT_NESTED:]] + // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY_NESTED:]] + // CHECK: ^bb[[#BODY_NESTED]]: + // CHECK: llvm.br ^bb[[#STEP_NESTED:]] + // CHECK: ^bb[[#STEP_NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#COND_NESTED:]] + // CHECK: ^bb[[#EXIT_NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + + cir.func @testWhile() { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.scope { + cir.loop while(cond : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.brcond %5 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + cir.yield + }) { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.scope { + %4 = cir.load %0 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.yield break + } + } + cir.yield + } + } + cir.return + } + + + // CHECK: llvm.func @testWhile() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBODY:]], ^bb[[#preEXIT0:]] + // CHECK: ^bb[[#preBODY]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#preEXIT0]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preCOND0:]] + // CHECK: ^bb[[#preEXIT1]]: + // CHECK: llvm.br ^bb[[#preEXIT2:]] + // CHECK: ^bb[[#preCOND0]]: + // CHECK: llvm.br ^bb[[#preCOND1:]] + // CHECK: ^bb[[#preCOND1]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#preEXIT2]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + +cir.func @testDoWhile() { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.scope { + cir.loop dowhile(cond : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.brcond %5 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + cir.yield + }) { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.scope { + %4 = cir.load %0 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.yield break + } + } + cir.yield + } + } + cir.return + } + + // CHECK: llvm.func @testDoWhile() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBODY:]], ^bb[[#preEXIT0:]] + // CHECK: ^bb[[#preBODY]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#preEXIT0]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preCOND0:]] + // CHECK: ^bb[[#preEXIT1]]: + // CHECK: llvm.br ^bb[[#preEXIT2:]] + // CHECK: ^bb[[#preCOND0]]: + // CHECK: llvm.br ^bb[[#preCOND1:]] + // CHECK: ^bb[[#preCOND1]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#preEXIT2]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir new file mode 100644 index 000000000000..c0f2c2658c2c --- /dev/null +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -0,0 +1,318 @@ +// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +module { + cir.func @testFor() { + cir.scope { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.loop for(cond : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.brcond %5 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield + }) { + cir.scope { + cir.scope { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<5> : !s32i) : !s32i + %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.if %5 { + cir.yield continue + } + } + } + cir.yield + } + } + cir.return + } + + // CHECK: llvm.func @testFor() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE0:]], ^bb[[#preEXIT0:]] + // CHECK: ^bb[[#preCONTINUE0]]: + // CHECK: llvm.br ^bb[[#preCONTINUE1:]] + // CHECK: ^bb[[#preEXIT0]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#preCONTINUE1]]: + // CHECK: llvm.br ^bb[[#preCONTINUE2:]] + // CHECK: ^bb[[#preCONTINUE2]]: + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preSTEP:]], ^bb[[#preBODY0:]] + // CHECK: ^bb[[#preSTEP]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + + + cir.func @testForNested() { + cir.scope { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.loop for(cond : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.brcond %5 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield + }) { + cir.scope { + cir.scope { + %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + cir.loop for(cond : { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.const(#cir.int<10> : !s32i) : !s32i + %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.brcond %7 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.unary(inc, %4) : !s32i, !s32i + cir.store %5, %2 : !s32i, cir.ptr + cir.yield + }) { + cir.scope { + cir.scope { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.yield continue + } + } + } + cir.yield + } + } + } + cir.yield + } + } + cir.return + } + + // CHECK: llvm.func @testForNested() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED0:]], ^bb[[#preEXIT0:]] + // CHECK: ^bb[[#preNESTED0]]: + // CHECK: llvm.br ^bb[[#preNESTED1:]] + // CHECK: ^bb[[#preEXIT0]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#preNESTED1]]: + // CHECK: llvm.br ^bb[[#preNESTED2:]] + // CHECK: ^bb[[#preNESTED2]]: + // CHECK: llvm.br ^bb[[#NESTED:]] + // CHECK: ^bb[[#NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#COND_NESTED:]] + // CHECK: ^bb[[#COND_NESTED]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE0:]], ^bb[[#preEXIT1:]] + // CHECK: ^bb[[#preCONTINUE0]]: + // CHECK: llvm.br ^bb[[#preCONTINUE1:]] + // CHECK: ^bb[[#preEXIT1]]: + // CHECK: llvm.br ^bb[[#EXIT_NESTED:]] + // CHECK: ^bb[[#preCONTINUE1]]: + // CHECK: llvm.br ^bb[[#preCONTINUE2:]] + // CHECK: ^bb[[#preCONTINUE2]]: + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preSTEP0:]], ^bb[[#preBODY0:]] + // CHECK: ^bb[[#preSTEP0]]: + // CHECK: llvm.br ^bb[[#STEP_NESTED:]] + // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY_NESTED:]] + // CHECK: ^bb[[#BODY_NESTED]]: + // CHECK: llvm.br ^bb[[#STEP_NESTED:]] + // CHECK: ^bb[[#STEP_NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#COND_NESTED:]] + // CHECK: ^bb[[#EXIT_NESTED]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + +cir.func @testWhile() { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.scope { + cir.loop while(cond : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.brcond %5 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + cir.yield + }) { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.scope { + %4 = cir.load %0 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.yield continue + } + } + cir.yield + } + } + cir.return + } + + // CHECK: llvm.func @testWhile() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBODY:]], ^bb[[#preEXIT0:]] + // CHECK: ^bb[[#preBODY]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#preEXIT0]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCOND0:]], ^bb[[#preCOND1:]] + // CHECK: ^bb[[#preCOND0]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#preCOND1]]: + // CHECK: llvm.br ^bb[[#preCOND2:]] + // CHECK: ^bb[[#preCOND2]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + + cir.func @testDoWhile() { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + cir.scope { + cir.loop dowhile(cond : { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.brcond %5 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + cir.yield + }) { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.scope { + %4 = cir.load %0 : cir.ptr , !s32i + %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.if %7 { + cir.yield continue + } + } + cir.yield + } + } + cir.return + } + + + // CHECK: llvm.func @testDoWhile() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBODY:]], ^bb[[#preEXIT0:]] + // CHECK: ^bb[[#preBODY]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#preEXIT0]]: + // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCOND0:]], ^bb[[#preCOND1:]] + // CHECK: ^bb[[#preCOND0]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#preCOND1]]: + // CHECK: llvm.br ^bb[[#preCOND2:]] + // CHECK: ^bb[[#preCOND2]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } + +} \ No newline at end of file From 9622cc48a61ab7fc0d768d06ba1fe0ea953a7765 Mon Sep 17 00:00:00 2001 From: Roman Rusyaev Date: Thu, 3 Aug 2023 22:29:31 +0300 Subject: [PATCH 1140/2301] [CIR][CodeGen] Fix generation of 'if statement' when begin and end locations are the same --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 41 +++++++---- clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 - clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 23 +----- clang/test/CIR/CodeGen/spelling-locations.cpp | 71 +++++++++++++++++++ 4 files changed, 98 insertions(+), 38 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 4f888ecac051..372c77ffa0a8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2049,33 +2049,44 @@ bool CIRGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { /// Emit an `if` on a boolean condition, filling `then` and `else` into /// appropriated regions. mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, - mlir::Location loc, const Stmt *thenS, const Stmt *elseS) { + auto getStmtLoc = [this](const Stmt &s) { + return mlir::FusedLoc::get(builder.getContext(), + {getLoc(s.getSourceRange().getBegin()), + getLoc(s.getSourceRange().getEnd())}); + }; + + auto thenLoc = getStmtLoc(*thenS); + std::optional elseLoc; + SmallVector ifLocs{thenLoc}; + + if (elseS) { + elseLoc = getStmtLoc(*elseS); + ifLocs.push_back(*elseLoc); + } + + // Attempt to be more accurate as possible with IfOp location, generate + // one fused location that has either 2 or 4 total locations, depending + // on else's availability. + auto loc = mlir::FusedLoc::get(builder.getContext(), ifLocs); + // Emit the code with the fully general case. mlir::Value condV = buildOpOnBoolExpr(cond, loc, thenS, elseS); mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); + builder.create( loc, condV, elseS, /*thenBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - if (const auto fusedLoc = loc.dyn_cast()) { - loc = mlir::FusedLoc::get( - builder.getContext(), - {fusedLoc.getLocations()[0], fusedLoc.getLocations()[1]}); - } - LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; + [&](mlir::OpBuilder &, mlir::Location) { + LexicalScopeContext lexScope{thenLoc, builder.getInsertionBlock()}; LexicalScopeGuard lexThenGuard{*this, &lexScope}; resThen = buildStmt(thenS, /*useCurrentScope=*/true); }, /*elseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - if (const auto fusedLoc = loc.dyn_cast()) { - loc = mlir::FusedLoc::get( - builder.getContext(), - {fusedLoc.getLocations()[2], fusedLoc.getLocations()[3]}); - } - LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; + [&](mlir::OpBuilder &, mlir::Location) { + assert(elseLoc && "Invalid location for elseS."); + LexicalScopeContext lexScope{*elseLoc, builder.getInsertionBlock()}; LexicalScopeGuard lexElseGuard{*this, &lexScope}; resElse = buildStmt(elseS, /*useCurrentScope=*/true); }); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 2f2e4a7c276a..1459dff6f121 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1065,7 +1065,6 @@ class CIRGenFunction : public CIRGenTypeCache { /// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr /// for extra ideas). mlir::LogicalResult buildIfOnBoolExpr(const clang::Expr *cond, - mlir::Location loc, const clang::Stmt *thenS, const clang::Stmt *elseS); mlir::Value buildTernaryOnBoolExpr(const clang::Expr *cond, diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index ca0c65599543..836e074ba4ea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -332,26 +332,6 @@ static void terminateBody(mlir::OpBuilder &builder, mlir::Region &r, b->erase(); } -static mlir::Location getIfLocs(CIRGenFunction &CGF, const clang::Stmt *thenS, - const clang::Stmt *elseS) { - // Attempt to be more accurate as possible with IfOp location, generate - // one fused location that has either 2 or 4 total locations, depending - // on else's availability. - SmallVector ifLocs; - mlir::Attribute metadata; - - clang::SourceRange t = thenS->getSourceRange(); - ifLocs.push_back(CGF.getLoc(t.getBegin())); - ifLocs.push_back(CGF.getLoc(t.getEnd())); - if (elseS) { - clang::SourceRange e = elseS->getSourceRange(); - ifLocs.push_back(CGF.getLoc(e.getBegin())); - ifLocs.push_back(CGF.getLoc(e.getEnd())); - } - - return mlir::FusedLoc::get(ifLocs, metadata, CGF.getBuilder().getContext()); -} - mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { // The else branch of a consteval if statement is always the only branch // that can be runtime evaluated. @@ -382,8 +362,7 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); assert(!UnimplementedFeature::incrementProfileCounter()); - auto ifLoc = getIfLocs(*this, S.getThen(), S.getElse()); - return buildIfOnBoolExpr(S.getCond(), ifLoc, S.getThen(), S.getElse()); + return buildIfOnBoolExpr(S.getCond(), S.getThen(), S.getElse()); }; // TODO: Add a new scoped symbol table. diff --git a/clang/test/CIR/CodeGen/spelling-locations.cpp b/clang/test/CIR/CodeGen/spelling-locations.cpp index e3d4f6586056..66c09c88a029 100644 --- a/clang/test/CIR/CodeGen/spelling-locations.cpp +++ b/clang/test/CIR/CodeGen/spelling-locations.cpp @@ -27,3 +27,74 @@ int testMacroLocations(void) { return 0; } + +void testIfStmtLocations(int f) { + if (f) + ; + else + ; + + if (f) + ++f; + else + ; + + if (f) + ; + else + --f; + + if (f) + ++f; + else + --f; +} + +// CHECK: cir.if %{{.+}} { +// CHECK: } else { +// CHECK: } loc(#loc[[#LOC1:]]) + +// CHECK: cir.if %{{.+}} { +// CHECK: %{{.+}} = cir.load +// CHECK: %{{.+}} = cir.unary(inc +// CHECK: cir.store +// CHECK: } else { +// CHECK: } loc(#loc[[#LOC2:]]) + +// CHECK: cir.if %{{.+}} { +// CHECK: } else { +// CHECK: %{{.+}} = cir.load +// CHECK: %{{.+}} = cir.unary(dec +// CHECK: cir.store +// CHECK: } loc(#loc[[#LOC3:]]) + +// CHECK: cir.if %{{.+}} { +// CHECK: %{{.+}} = cir.load +// CHECK: %{{.+}} = cir.unary(inc +// CHECK: cir.store +// CHECK: } else { +// CHECK: %{{.+}} = cir.load +// CHECK: %{{.+}} = cir.unary(dec +// CHECK: cir.store +// CHECK: } loc(#loc[[#LOC4:]]) + +// CHECK: #loc[[#LOC12:]] = loc({{.+}}:35:5) +// CHECK: #loc[[#LOC11:]] = loc({{.+}}:33:5) + +// CHECK: #loc[[#LOC23:]] = loc({{.+}}:40:5) +// CHECK: #loc[[#LOC21:]] = loc({{.+}}:38:5) +// CHECK: #loc[[#LOC22:]] = loc({{.+}}:38:7) + +// CHECK: #loc[[#LOC33:]] = loc({{.+}}:45:7) +// CHECK: #loc[[#LOC31:]] = loc({{.+}}:43:5) +// CHECK: #loc[[#LOC32:]] = loc({{.+}}:45:5) + +// CHECK: #loc[[#LOC44:]] = loc({{.+}}:50:7) +// CHECK: #loc[[#LOC41:]] = loc({{.+}}:48:5) +// CHECK: #loc[[#LOC42:]] = loc({{.+}}:48:7) +// CHECK: #loc[[#LOC43:]] = loc({{.+}}:50:5) + +// CHECK: #loc[[#LOC1]] = loc(fused[#loc[[#LOC11]], #loc[[#LOC12]]]) +// CHECK: #loc[[#LOC2]] = loc(fused[#loc[[#LOC21]], #loc[[#LOC22]], #loc[[#LOC23]]]) +// CHECK: #loc[[#LOC3]] = loc(fused[#loc[[#LOC31]], #loc[[#LOC32]], #loc[[#LOC33]]]) +// CHECK: #loc[[#LOC4]] = loc(fused[#loc[[#LOC41]], #loc[[#LOC42]], #loc[[#LOC43]], #loc[[#LOC44]]]) From d035d5d13bc6c6d75a46d5a3d02bf5dba55c3e03 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 10 Aug 2023 21:17:18 -0700 Subject: [PATCH 1141/2301] [CIR][CIRGen] Fix heap-use-after-free in recordDeclTypes, found by ASAN --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index badeff3e354a..80a267a9652b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -164,8 +164,7 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { // TagDecl's are not necessarily unique, instead use the (clang) type // connected to the decl. const auto *key = Context.getTagDeclType(RD).getTypePtr(); - - mlir::cir::StructType &entry = recordDeclTypes[key]; + mlir::cir::StructType entry = recordDeclTypes[key]; // Handle forward decl / incomplete types. if (!entry) { @@ -174,6 +173,7 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { entry = mlir::cir::StructType::get( &getMLIRContext(), {}, identifier, /*body=*/false, /**packed=*/false, mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), RD)); + recordDeclTypes[key] = entry; } RD = RD->getDefinition(); @@ -202,6 +202,7 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { // Layout fields. std::unique_ptr Layout = computeRecordLayout(RD, &entry); + recordDeclTypes[key] = entry; CIRGenRecordLayouts[key] = std::move(Layout); // We're done laying out this struct. From b9288648d37de771f966c55420fbee61e048b0a6 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 8 Aug 2023 06:51:55 -0300 Subject: [PATCH 1142/2301] [CIR][CIRGen] Support for struct call arguments Essentially emits an LValue for the struct and then passes it as a call argument. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 7 +++++-- clang/lib/CIR/CodeGen/CIRGenCall.h | 6 ++++++ clang/test/CIR/CodeGen/struct.c | 7 +++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 125172d75ff1..9d5e91316f7e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -654,12 +654,15 @@ void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, // we make it to the call. if (type->isRecordType() && type->castAs()->getDecl()->isParamDestroyedInCallee()) { - llvm_unreachable("NYI"); + llvm_unreachable("Microsoft C++ ABI is NYI"); } if (HasAggregateEvalKind && isa(E) && cast(E)->getCastKind() == CK_LValueToRValue) { - assert(0 && "NYI"); + LValue L = buildLValue(cast(E)->getSubExpr()); + assert(L.isSimple()); + args.addUncopiedAggregate(L, type); + return; } args.add(buildAnyExprToTemp(E), type); diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 3e8393742616..80941919e2e1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -210,6 +210,8 @@ struct CallArg { : RV(rv), HasLV(false), IsUsed(false), Ty(ty) { (void)IsUsed; } + CallArg(LValue lv, clang::QualType ty) + : LV(lv), HasLV(true), IsUsed(false), Ty(ty) {} /// \returns an independent RValue. If the CallArg contains an LValue, /// a temporary copy is returned. @@ -242,6 +244,10 @@ class CallArgList : public llvm::SmallVector { push_back(CallArg(rvalue, type)); } + void addUncopiedAggregate(LValue LV, clang::QualType type) { + push_back(CallArg(LV, type)); + } + /// Add all the arguments from another CallArgList to this one. After doing /// this, the old CallArgList retains its list of arguments, but must not /// be used to emit a call. diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 5a8d97bd4b52..14df39b332aa 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -61,3 +61,10 @@ struct S3 { int a; } s3[3] = {{1}, {2}, {3}}; // CHECK-DAG: cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22struct2ES322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22struct2ES322]> : !cir.array + +void shouldCopyStructAsCallArg(struct S1 s) { +// CHECK-DAG: cir.func @shouldCopyStructAsCallArg + shouldCopyStructAsCallArg(s); + // CHECK-DAG: %[[#LV:]] = cir.load %{{.+}} : cir.ptr , !ty_22struct2ES122 + // CHECK-DAG: cir.call @shouldCopyStructAsCallArg(%[[#LV]]) : (!ty_22struct2ES122) -> () +} From 0fba4517052b0bca65aba0867943487320c3796d Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Mon, 14 Aug 2023 21:03:13 -0700 Subject: [PATCH 1143/2301] [CIR] Enable per-pass IR printing (#234) Enabling IR printing with --mlir-print-ir-after=passName1, passName2. This requires all CIR passes to be registered at startup time. --- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 4 +--- clang/lib/FrontendTool/CMakeLists.txt | 1 + clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp | 2 ++ clang/test/CIR/mlirprint.c | 4 ++++ 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index e2165c4af42f..a0588402727a 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1903,15 +1903,13 @@ void LifetimeCheckPass::checkOperation(Operation *op) { } void LifetimeCheckPass::runOnOperation() { + assert(astCtx && "Missing ASTContext, please construct with the right ctor"); opts.parseOptions(*this); Operation *op = getOperation(); checkOperation(op); } std::unique_ptr mlir::createLifetimeCheckPass() { - // FIXME: MLIR requres a default "constructor", but should never - // be used. - llvm_unreachable("Check requires clang::ASTContext, use the other ctor"); return std::make_unique(); } diff --git a/clang/lib/FrontendTool/CMakeLists.txt b/clang/lib/FrontendTool/CMakeLists.txt index c2f7c0150532..ceb4d3f91b68 100644 --- a/clang/lib/FrontendTool/CMakeLists.txt +++ b/clang/lib/FrontendTool/CMakeLists.txt @@ -17,6 +17,7 @@ set(deps) if(CLANG_ENABLE_CIR) list(APPEND link_libs clangCIRFrontendAction + MLIRCIRTransforms MLIRIR MLIRPass ) diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 76c92fdb5442..93d2eebc9a9a 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -36,6 +36,7 @@ #include "mlir/IR/AsmState.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Pass/PassManager.h" +#include "clang/CIR/Dialect/Passes.h" #include "clang/CIRFrontendAction/CIRGenAction.h" #endif @@ -323,6 +324,7 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) { #endif #if CLANG_ENABLE_CIR if (!Clang->getFrontendOpts().MLIRArgs.empty()) { + mlir::registerCIRPasses(); mlir::registerMLIRContextCLOptions(); mlir::registerPassManagerCLOptions(); mlir::registerAsmPrinterCLOptions(); diff --git a/clang/test/CIR/mlirprint.c b/clang/test/CIR/mlirprint.c index 96ff16e95708..c1e9aa2777a7 100644 --- a/clang/test/CIR/mlirprint.c +++ b/clang/test/CIR/mlirprint.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR // RUN: %clang_cc1 -fclangir -emit-llvm -mmlir --mlir-print-ir-after-all -mllvm -print-after-all %s -o %t.ll 2>&1 | FileCheck %s -check-prefix=CIR -check-prefix=LLVM +// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-drop-ast %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIRPASS int foo(void) { int i = 3; @@ -15,3 +16,6 @@ int foo(void) { // LLVM: llvm.func @foo() -> i32 // LLVM: IR Dump After // LLVM: define i32 @foo() + +// CIRPASS-NOT: IR Dump After MergeCleanups +// CIRPASS: IR Dump After DropAST From 1297606f6036b25b956e92d41186545943f2c9ae Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Tue, 15 Aug 2023 11:00:35 -0700 Subject: [PATCH 1144/2301] [ClR] Set optnone attribute for functions. (#115) Summary: Setting the Optnone attribute for CIR functions and progating it all the way down to LLVM IR for those not supposed to be optimized. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 12 +++++++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 19 ++++++++++++++ .../DirectToLLVM/LowerAttrToLLVMIR.cpp | 2 ++ clang/test/CIR/CodeGen/optnone.cpp | 25 +++++++++++++++++++ 4 files changed, 58 insertions(+) create mode 100644 clang/test/CIR/CodeGen/optnone.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 2293638afbe2..5c5f87601032 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -26,6 +26,14 @@ class CIR_Attr traits = []> let mnemonic = attrMnemonic; } +class CIRUnitAttr traits = []> + : CIR_Attr { + let returnType = "bool"; + let defaultValue = "false"; + let valueType = NoneType; + let isOptional = 1; +} + //===----------------------------------------------------------------------===// // LangAttr //===----------------------------------------------------------------------===// @@ -440,4 +448,8 @@ def InlineAttr : CIR_Attr<"Inline", "inline"> { }]; } +def OptNoneAttr : CIRUnitAttr<"OptNone", "optnone"> { + let storageType = [{ OptNoneAttr }]; +} + #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 88055fbd6913..07ddff989f91 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1907,7 +1907,26 @@ void CIRGenModule::setExtraAttributesForFunc(FuncOp f, mlir::cir::InlineKind::NoInline); attrs.set(attr.getMnemonic(), attr); } + } + + // Track whether we need to add the optnone attribute, + // starting with the default for this optimization level. + bool ShouldAddOptNone = + !codeGenOpts.DisableO0ImplyOptNone && codeGenOpts.OptimizationLevel == 0; + if (FD) { + ShouldAddOptNone &= !FD->hasAttr(); + ShouldAddOptNone &= !FD->hasAttr(); + ShouldAddOptNone |= FD->hasAttr(); + } + + if (ShouldAddOptNone) { + auto optNoneAttr = mlir::cir::OptNoneAttr::get(builder.getContext()); + attrs.set(optNoneAttr.getMnemonic(), optNoneAttr); + // OptimizeNone implies noinline; we should not be inlining such functions. + auto noInlineAttr = mlir::cir::InlineAttr::get( + builder.getContext(), mlir::cir::InlineKind::NoInline); + attrs.set(noInlineAttr.getMnemonic(), noInlineAttr); } f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp index c513ee344d3c..2ec3d15e3887 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp @@ -66,6 +66,8 @@ class CIRDialectLLVMIRTranslationInterface llvmFunc->addFnAttr(llvm::Attribute::InlineHint); else llvm_unreachable("Unknown inline kind"); + } else if (attr.getValue().dyn_cast()) { + llvmFunc->addFnAttr(llvm::Attribute::OptimizeNone); } } } diff --git a/clang/test/CIR/CodeGen/optnone.cpp b/clang/test/CIR/CodeGen/optnone.cpp new file mode 100644 index 000000000000..08965675ff70 --- /dev/null +++ b/clang/test/CIR/CodeGen/optnone.cpp @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -O0 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR-O0 +// RUN: %clang_cc1 -O0 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM-O0 + +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t2.cir +// RUN: FileCheck --input-file=%t2.cir %s -check-prefix=CIR-O2 +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t2.ll +// RUN: FileCheck --input-file=%t2.ll %s -check-prefix=LLVM-O2 + +int s0(int a, int b) { + int x = a + b; + if (x > 0) + x = 0; + else + x = 1; + return x; +} + +// CIR-O0: cir.func @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline, optnone = #cir.optnone} ) +// CIR-O2-NOT: cir.func @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} optnone + +// LLVM-O0: define i32 @_Z2s0ii(i32 %0, i32 %1) #[[#ATTR:]] +// LLVM-O0: attributes #[[#ATTR]] = { noinline optnone } +// LLVM-O2-NOT: attributes #[[#]] = { noinline optnone } From dc8b481de94df01ce0ff8b30ff5688cab3b8e41c Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Tue, 15 Aug 2023 13:15:54 -0700 Subject: [PATCH 1145/2301] [CIR][Lowering] Add an empty LoweringPrepare pass (#236) This change is a prerequisite of https://github.com/llvm/clangir/pull/235 --- clang/include/clang/CIR/Dialect/Passes.h | 2 + clang/include/clang/CIR/Dialect/Passes.td | 10 ++++ clang/lib/CIR/CodeGen/CIRPasses.cpp | 2 + .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + .../Dialect/Transforms/LoweringPrepare.cpp | 51 +++++++++++++++++++ clang/test/CIR/mlirprint.c | 2 + 6 files changed, 68 insertions(+) create mode 100644 clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index 903681c0d1ba..abf915bf687a 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -28,6 +28,8 @@ std::unique_ptr createLifetimeCheckPass(ArrayRef remark, clang::ASTContext *astCtx); std::unique_ptr createMergeCleanupsPass(); std::unique_ptr createDropASTPass(); +std::unique_ptr createLoweringPreparePass(); +std::unique_ptr createLoweringPreparePass(clang::ASTContext *astCtx); //===----------------------------------------------------------------------===// // Registration diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index ce95aea2ed29..08c95ab92ed7 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -65,4 +65,14 @@ def DropAST : Pass<"cir-drop-ast"> { let dependentDialects = ["cir::CIRDialect"]; } +def LoweringPrepare : Pass<"cir-lowering-prepare"> { + let summary = "Preparation work before lowering to LLVM dialect"; + let description = [{ + This pass does preparation work for LLVM lowering. For example, it may + expand the global variable initialziation in a more ABI-friendly form. + }]; + let constructor = "mlir::createLoweringPreparePass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + #endif // MLIR_DIALECT_CIR_PASSES diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index df65bfaf3a6b..e278f4c22bde 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -45,6 +45,8 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, pm.addPass(std::move(lifetimePass)); } + pm.addPass(mlir::createLoweringPreparePass(&astCtx)); + // FIXME: once CIRCodenAction fixes emission other than CIR we // need to run this right before dialect emission. pm.addPass(mlir::createDropASTPass()); diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 3a9e96715740..27fede4064c7 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -1,5 +1,6 @@ add_clang_library(MLIRCIRTransforms LifetimeCheck.cpp + LoweringPrepare.cpp MergeCleanups.cpp DropAST.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp new file mode 100644 index 000000000000..49c1bff5df4e --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -0,0 +1,51 @@ +//===- LoweringPrepare.cpp - pareparation work for LLVM lowering ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "clang/AST/ASTContext.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" + +using namespace mlir; +using namespace cir; + +namespace { +struct LoweringPreparePass : public LoweringPrepareBase { + LoweringPreparePass() = default; + void runOnOperation() override; + + /// + /// AST related + /// ----------- + + clang::ASTContext *astCtx; + void setASTContext(clang::ASTContext *c) { astCtx = c; } + + /// Tracks current module. + ModuleOp theModule; +}; +} // namespace + + +void LoweringPreparePass::runOnOperation() { + assert(astCtx && "Missing ASTContext, please construct with the right ctor"); + auto* op = getOperation(); + if (isa<::mlir::ModuleOp>(op)) { + theModule = cast<::mlir::ModuleOp>(op); + } +} + +std::unique_ptr mlir::createLoweringPreparePass() { + return std::make_unique(); +} + +std::unique_ptr mlir::createLoweringPreparePass(clang::ASTContext *astCtx) { + auto pass = std::make_unique(); + pass->setASTContext(astCtx); + return std::move(pass); +} diff --git a/clang/test/CIR/mlirprint.c b/clang/test/CIR/mlirprint.c index c1e9aa2777a7..35e5a2ff49ac 100644 --- a/clang/test/CIR/mlirprint.c +++ b/clang/test/CIR/mlirprint.c @@ -10,6 +10,8 @@ int foo(void) { // CIR: IR Dump After MergeCleanups (cir-merge-cleanups) // CIR: cir.func @foo() -> !s32i +// CIR: IR Dump After LoweringPrepare (cir-lowering-prepare) +// CIR: cir.func @foo() -> !s32i // CIR: IR Dump After DropAST (cir-drop-ast) // CIR: cir.func @foo() -> !s32i // LLVM: IR Dump After cir::direct::ConvertCIRToLLVMPass (cir-to-llvm) From 0d0c33ea581d6923c5000cb031c2b18d08b30da8 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Tue, 15 Aug 2023 17:15:20 -0700 Subject: [PATCH 1146/2301] [CIR][Lowering] Add LoweringPrepare pass to pre-lower global initializers (#235) As a follow up to https://github.com/llvm/clangir/pull/197, this change pre-lowers high-level CIR for global initializers. High-level CIR: ``` cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22class2EInit22 { %0 = cir.get_global @_ZL8__ioinit : cir.ptr loc(#loc8) %1 = cir.const(#true) : !cir.bool loc(#loc5) cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () loc(#loc6) } ``` After pre-lowering: ``` cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22class2EInit22 {ast = #cir.vardecl.ast} cir.func internal private @__cxx_global_var_init() { %0 = cir.get_global @_ZL8__ioinit : cir.ptr %1 = cir.const(#true) : !cir.bool cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () cir.return } cir.func private @_GLOBAL__sub_I_static.cpp() { cir.call @__cxx_global_var_init() : () -> () cir.return } ``` There is still work to be done to fully lower to LLVM. E.g, add `llvm.global_ctors` global to list all module initializers like `_GLOBAL__sub_I_static`. This will be handled in a separate change. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 14 ++ .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + .../Dialect/Transforms/LoweringPrepare.cpp | 160 ++++++++++++++++++ clang/test/CIR/CodeGen/static.cpp | 63 +++++-- 6 files changed, 231 insertions(+), 12 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e61b3d8e8c8b..27bea737e561 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1280,7 +1280,9 @@ def GlobalOp : CIR_Op<"global", [Symbol, DeclareOpInterfaceMethods:$initial_value, UnitAttr:$constant, - OptionalAttr:$alignment); + OptionalAttr:$alignment, + OptionalAttr:$ast + ); let regions = (region AnyRegion:$ctorRegion); let assemblyFormat = [{ ($sym_visibility^)? diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 68ba9dfaaa64..3a7642f38a03 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -80,6 +80,7 @@ void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, buildDeclInit(CGF, D, DeclAddr); builder.setInsertionPointToEnd(block); builder.create(Addr->getLoc()); + Addr.setAstAttr(mlir::cir::ASTVarDeclAttr::get(builder.getContext(), D)); } CurCGF = nullptr; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 3115491bf9ac..7c3d32690f3b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1232,6 +1232,20 @@ LogicalResult GlobalOp::verify() { return failure(); } + // Verify that the constructor region, if present, has only one block which is + // not empty. + auto &ctorRegion = getCtorRegion(); + if (!ctorRegion.empty()) { + if (!ctorRegion.hasOneBlock()) { + return emitError() << "ctor region must have exactly one block."; + } + + auto &block = ctorRegion.front(); + if (block.empty()) { + return emitError() << "ctor region shall not be empty."; + } + } + if (std::optional alignAttr = getAlignment()) { uint64_t alignment = alignAttr.value(); if (!llvm::isPowerOf2_64(alignment)) diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 27fede4064c7..880542f6d889 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -9,6 +9,7 @@ add_clang_library(MLIRCIRTransforms LINK_LIBS PUBLIC clangAST + clangBasic MLIRAnalysis MLIRIR diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 49c1bff5df4e..053d91e05f57 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -7,18 +7,55 @@ //===----------------------------------------------------------------------===// #include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Region.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/Mangle.h" +#include "clang/Basic/Module.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Support/Path.h" using namespace mlir; using namespace cir; +static SmallString<128> getTransformedFileName(ModuleOp theModule) { + SmallString<128> FileName; + + if (theModule.getSymName()) { + FileName = llvm::sys::path::filename(theModule.getSymName()->str()); + } + + if (FileName.empty()) + FileName = ""; + + for (size_t i = 0; i < FileName.size(); ++i) { + // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens + // to be the set of C preprocessing numbers. + if (!clang::isPreprocessingNumberBody(FileName[i])) + FileName[i] = '_'; + } + + return FileName; +} + namespace { struct LoweringPreparePass : public LoweringPrepareBase { LoweringPreparePass() = default; void runOnOperation() override; + void runOnOp(Operation *op); + void lowerGlobalOp(GlobalOp op); + + /// Build the function that initializes the specified global + cir::FuncOp buildCXXGlobalVarDeclInitFunc(GlobalOp op); + + /// Build a module init function that calls all the dynamic initializers. + void buildCXXGlobalInitFunc(); + /// /// AST related /// ----------- @@ -28,9 +65,120 @@ struct LoweringPreparePass : public LoweringPrepareBase { /// Tracks current module. ModuleOp theModule; + + /// Tracks existing dynamic initializers. + llvm::StringMap dynamicInitializerNames; + llvm::SmallVector dynamicInitializers; }; } // namespace +cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { + SmallString<256> fnName; + { + std::unique_ptr MangleCtx( + astCtx->createMangleContext()); + llvm::raw_svector_ostream Out(fnName); + auto varDecl = op.getAst()->getAstDecl(); + MangleCtx->mangleDynamicInitializer(varDecl, Out); + // Name numbering + uint32_t cnt = dynamicInitializerNames[fnName]++; + if (cnt) + fnName += "." + llvm::Twine(cnt).str(); + } + + // Create a variable initialization function. + mlir::OpBuilder builder(&getContext()); + builder.setInsertionPointAfter(op); + auto fnType = mlir::cir::FuncType::get( + {}, mlir::cir::VoidType::get(builder.getContext())); + FuncOp f = builder.create(op.getLoc(), fnName, fnType); + f.setLinkageAttr(mlir::cir::GlobalLinkageKindAttr::get( + builder.getContext(), mlir::cir::GlobalLinkageKind::InternalLinkage)); + mlir::SymbolTable::setSymbolVisibility( + f, mlir::SymbolTable::Visibility::Private); + mlir::NamedAttrList attrs; + f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), attrs.getDictionary(builder.getContext()))); + + // move over the initialzation code of the ctor region. + auto &block = op.getCtorRegion().front(); + mlir::Block *EntryBB = f.addEntryBlock(); + EntryBB->getOperations().splice(EntryBB->begin(), block.getOperations(), + block.begin(), std::prev(block.end())); + + // Replace cir.yield with cir.return + builder.setInsertionPointToEnd(EntryBB); + auto &yieldOp = block.getOperations().back(); + assert(isa(yieldOp)); + builder.create(yieldOp.getLoc()); + return f; +} + +void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { + auto &ctorRegion = op.getCtorRegion(); + if (!ctorRegion.empty()) { + // Build a variable initialization function and move the initialzation code + // in the ctor region over. + auto f = buildCXXGlobalVarDeclInitFunc(op); + + // Clear the ctor region + ctorRegion.getBlocks().clear(); + + // Add a function call to the variable initialization function. + dynamicInitializers.push_back(f); + } +} + +void LoweringPreparePass::buildCXXGlobalInitFunc() { + if (dynamicInitializers.empty()) + return; + + SmallString<256> fnName; + // Include the filename in the symbol name. Including "sub_" matches gcc + // and makes sure these symbols appear lexicographically behind the symbols + // with priority emitted above. Module implementation units behave the same + // way as a non-modular TU with imports. + // TODO: check CXX20ModuleInits + if (astCtx->getCurrentNamedModule() && + !astCtx->getCurrentNamedModule()->isModuleImplementation()) { + llvm::raw_svector_ostream Out(fnName); + std::unique_ptr MangleCtx( + astCtx->createMangleContext()); + cast(*MangleCtx) + .mangleModuleInitializer(astCtx->getCurrentNamedModule(), Out); + } else { + fnName += "_GLOBAL__sub_I_"; + fnName += getTransformedFileName(theModule); + } + + mlir::OpBuilder builder(&getContext()); + builder.setInsertionPointToEnd(&theModule.getBodyRegion().back()); + auto fnType = mlir::cir::FuncType::get( + {}, mlir::cir::VoidType::get(builder.getContext())); + FuncOp f = + builder.create(theModule.getLoc(), fnName, fnType); + f.setLinkageAttr(mlir::cir::GlobalLinkageKindAttr::get( + builder.getContext(), mlir::cir::GlobalLinkageKind::ExternalLinkage)); + mlir::SymbolTable::setSymbolVisibility( + f, mlir::SymbolTable::Visibility::Private); + mlir::NamedAttrList attrs; + f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), attrs.getDictionary(builder.getContext()))); + + builder.setInsertionPointToStart(f.addEntryBlock()); + for (auto &f : dynamicInitializers) { + builder.create(f.getLoc(), f); + } + + builder.create(f.getLoc()); +} + +void LoweringPreparePass::runOnOp(Operation *op) { + if (GlobalOp globalOp = cast(op)) { + lowerGlobalOp(globalOp); + return; + } +} void LoweringPreparePass::runOnOperation() { assert(astCtx && "Missing ASTContext, please construct with the right ctor"); @@ -38,6 +186,18 @@ void LoweringPreparePass::runOnOperation() { if (isa<::mlir::ModuleOp>(op)) { theModule = cast<::mlir::ModuleOp>(op); } + + SmallVector opsToTransform; + op->walk([&](Operation *op) { + if (isa(op)) + opsToTransform.push_back(op); + }); + + for (auto *o : opsToTransform) { + runOnOp(o); + } + + buildCXXGlobalInitFunc(); } std::unique_ptr mlir::createLoweringPreparePass() { diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index 27b89cb6d53a..6577d44d9efd 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -1,6 +1,7 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// RUN: cir-opt %t.cir -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: cir-opt %t.cir -o - | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM class Init { @@ -13,12 +14,52 @@ class Init { static Init __ioinit(true); +static Init __ioinit2(false); -// CIR: module {{.*}} { -// CIR-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) -// CIR-NEXT: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22class2EInit22 { -// CIR-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr -// CIR-NEXT: %1 = cir.const(#true) : !cir.bool -// CIR-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () -// CIR-NEXT: } -// CIR-NEXT: } +// BEFORE: module {{.*}} { +// BEFORE-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) +// BEFORE-NEXT: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22class2EInit22 { +// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// BEFORE-NEXT: %1 = cir.const(#true) : !cir.bool +// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// BEFORE-NEXT: } {ast = #cir.vardecl.ast} +// BEFORE: cir.global "private" internal @_ZL9__ioinit2 = ctor : !ty_22class2EInit22 { +// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// BEFORE-NEXT: %1 = cir.const(#false) : !cir.bool +// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// BEFORE-NEXT: } {ast = #cir.vardecl.ast} +// BEFORE-NEXT: } + + +// AFTER: module {{.*}} { +// AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) +// AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22class2EInit22 {ast = #cir.vardecl.ast} +// AFTER-NEXT: cir.func internal private @__cxx_global_var_init() +// AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// AFTER-NEXT: %1 = cir.const(#true) : !cir.bool +// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// AFTER-NEXT: cir.return +// AFTER: cir.global "private" internal @_ZL9__ioinit2 = #cir.zero : !ty_22class2EInit22 {ast = #cir.vardecl.ast} +// AFTER-NEXT: cir.func internal private @__cxx_global_var_init.1() +// AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// AFTER-NEXT: %1 = cir.const(#false) : !cir.bool +// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// AFTER-NEXT: cir.return +// AFTER: cir.func private @_GLOBAL__sub_I_static.cpp() +// AFTER-NEXT: cir.call @__cxx_global_var_init() : () -> () +// AFTER-NEXT: cir.call @__cxx_global_var_init.1() : () -> () +// AFTER-NEXT: cir.return + + +// LLVM: @_ZL8__ioinit = internal global %class.Init zeroinitializer +// LLVM: @_ZL9__ioinit2 = internal global %class.Init zeroinitializer +// LLVM: define internal void @__cxx_global_var_init() +// LLVM-NEXT: call void @_ZN4InitC1Eb(ptr @_ZL8__ioinit, i8 1) +// LLVM-NEXT: ret void +// LLVM: define internal void @__cxx_global_var_init.1() +// LLVM-NEXT: call void @_ZN4InitC1Eb(ptr @_ZL9__ioinit2, i8 0) +// LLVM-NEXT: ret void +// LLVM: define void @_GLOBAL__sub_I_static.cpp() +// LLVM-NEXT: call void @__cxx_global_var_init() +// LLVM-NEXT: call void @__cxx_global_var_init.1() +// LLVM-NEXT: ret void From a6ca5dd33c76c0e708174419b9f381f9ada4971e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 16 Aug 2023 14:49:14 -0700 Subject: [PATCH 1147/2301] [CIR][ClangTidy] Add CIR support in clang-tidy (to deprecate cir-tidy) cir-tidy has been our custom hack for using clang-tidy. There's no reason it needs to be that way - add a CIRModule to clang-tidy and make the lifetime checker its first check. Some of the way this was built was inspired in CLANG_TIDY_ENABLE_STATIC_ANALYZER Once we can fully migrate deps from cir-tidy, we should remove it in favor of this. --- clang-tools-extra/clang-tidy/CMakeLists.txt | 6 + clang-tools-extra/clang-tidy/ClangTidy.cpp | 13 ++ .../clang-tidy/ClangTidyForceLinker.h | 7 + .../clang-tidy/cir/CIRASTConsumer.cpp | 183 ++++++++++++++++++ .../clang-tidy/cir/CIRASTConsumer.h | 28 +++ .../clang-tidy/cir/CIRTidyModule.cpp | 34 ++++ .../clang-tidy/cir/CMakeLists.txt | 62 ++++++ clang-tools-extra/clang-tidy/cir/Lifetime.cpp | 28 +++ clang-tools-extra/clang-tidy/cir/Lifetime.h | 28 +++ .../clang-tidy/clang-tidy-config.h.cmake | 2 + .../checkers/cir/lifetime-basic.cpp | 39 ++++ .../clang-tidy/checkers/cir/lit.local.cfg | 2 + 12 files changed, 432 insertions(+) create mode 100644 clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp create mode 100644 clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h create mode 100644 clang-tools-extra/clang-tidy/cir/CIRTidyModule.cpp create mode 100644 clang-tools-extra/clang-tidy/cir/CMakeLists.txt create mode 100644 clang-tools-extra/clang-tidy/cir/Lifetime.cpp create mode 100644 clang-tools-extra/clang-tidy/cir/Lifetime.h create mode 100644 clang-tools-extra/test/clang-tidy/checkers/cir/lifetime-basic.cpp create mode 100644 clang-tools-extra/test/clang-tidy/checkers/cir/lit.local.cfg diff --git a/clang-tools-extra/clang-tidy/CMakeLists.txt b/clang-tools-extra/clang-tidy/CMakeLists.txt index cab701f0f08d..d998fc56ce21 100644 --- a/clang-tools-extra/clang-tidy/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/CMakeLists.txt @@ -76,6 +76,9 @@ add_subdirectory(performance) add_subdirectory(portability) add_subdirectory(readability) add_subdirectory(zircon) +if(CLANG_ENABLE_CIR) + add_subdirectory(cir) +endif() set(ALL_CLANG_TIDY_CHECKS clangTidyAndroidModule clangTidyAbseilModule @@ -104,6 +107,9 @@ set(ALL_CLANG_TIDY_CHECKS if(CLANG_TIDY_ENABLE_STATIC_ANALYZER) list(APPEND ALL_CLANG_TIDY_CHECKS clangTidyMPIModule) endif() +if(CLANG_ENABLE_CIR) + list(APPEND ALL_CLANG_TIDY_CHECKS clangTidyCIRModule) +endif() set(ALL_CLANG_TIDY_CHECKS ${ALL_CLANG_TIDY_CHECKS} PARENT_SCOPE) # Other subtargets. These may reference ALL_CLANG_TIDY_CHECKS diff --git a/clang-tools-extra/clang-tidy/ClangTidy.cpp b/clang-tools-extra/clang-tidy/ClangTidy.cpp index 959b11777e88..c9b9f18baa4a 100644 --- a/clang-tools-extra/clang-tidy/ClangTidy.cpp +++ b/clang-tools-extra/clang-tidy/ClangTidy.cpp @@ -48,6 +48,10 @@ #include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h" #endif // CLANG_TIDY_ENABLE_STATIC_ANALYZER +#if CLANG_ENABLE_CIR +#include "cir/CIRASTConsumer.h" +#endif + using namespace clang::ast_matchers; using namespace clang::driver; using namespace clang::tooling; @@ -466,6 +470,15 @@ ClangTidyASTConsumerFactory::createASTConsumer( Consumers.push_back(std::move(AnalysisConsumer)); } #endif // CLANG_TIDY_ENABLE_STATIC_ANALYZER + +#if CLANG_ENABLE_CIR + if (Context.isCheckEnabled(cir::LifetimeCheckName)) { + std::unique_ptr CIRConsumer = + std::make_unique(Compiler, File, Context); + Consumers.push_back(std::move(CIRConsumer)); + } +#endif // CLANG_ENABLE_CIR + return std::make_unique( std::move(Consumers), std::move(Profiling), std::move(Finder), std::move(Checks)); diff --git a/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h b/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h index adde9136ff1d..9926571fe989 100644 --- a/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h +++ b/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h @@ -137,6 +137,13 @@ extern volatile int ZirconModuleAnchorSource; static int LLVM_ATTRIBUTE_UNUSED ZirconModuleAnchorDestination = ZirconModuleAnchorSource; +#if CLANG_ENABLE_CIR +// This anchor is used to force the linker to link the CIRModule. +extern volatile int CIRModuleAnchorSource; +static int LLVM_ATTRIBUTE_UNUSED CIRModuleAnchorDestination = + CIRModuleAnchorSource; +#endif + } // namespace clang::tidy #endif diff --git a/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp b/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp new file mode 100644 index 000000000000..3c85872c7bda --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp @@ -0,0 +1,183 @@ +//===--- clang-tidy/cir-tidy/CIRASTConsumer.cpp ---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "CIRASTConsumer.h" + +#include "../utils/OptionsUtils.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "clang/CIR/Dialect/Passes.h" +#include + +using namespace clang; +using namespace clang::tidy; + +namespace clang::tidy::cir { + +/// CIR AST Consumer +CIRASTConsumer::CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, + clang::tidy::ClangTidyContext &Context) + : Context(Context), + OptsView(ClangTidyCheck::OptionsView( + LifetimeCheckName, Context.getOptions().CheckOptions, &Context)) { + // Setup CIR codegen options via config specified information. + CI.getCodeGenOpts().ClangIRBuildDeferredThreshold = + OptsView.get("CodeGenBuildDeferredThreshold", 500U); + CI.getCodeGenOpts().ClangIRSkipFunctionsFromSystemHeaders = + OptsView.get("CodeGenSkipFunctionsFromSystemHeaders", false); + + Gen = std::make_unique<::cir::CIRGenerator>(CI.getDiagnostics(), nullptr, + CI.getCodeGenOpts()); +} + +bool CIRASTConsumer::HandleTopLevelDecl(DeclGroupRef D) { + PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), + AstContext->getSourceManager(), + "CIR generation of declaration"); + Gen->HandleTopLevelDecl(D); + return true; +} + +void CIRASTConsumer::Initialize(ASTContext &Context) { + AstContext = &Context; + Gen->Initialize(Context); +} + +void CIRASTConsumer::HandleTranslationUnit(ASTContext &C) { + Gen->HandleTranslationUnit(C); + Gen->verifyModule(); + + mlir::ModuleOp mlirMod = Gen->getModule(); + std::unique_ptr mlirCtx = Gen->takeContext(); + + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(/*prettyForm=*/false); + + clang::SourceManager &clangSrcMgr = C.getSourceManager(); + FileID MainFileID = clangSrcMgr.getMainFileID(); + + llvm::MemoryBufferRef MainFileBuf = clangSrcMgr.getBufferOrFake(MainFileID); + std::unique_ptr FileBuf = + llvm::MemoryBuffer::getMemBuffer(MainFileBuf); + + llvm::SourceMgr llvmSrcMgr; + llvmSrcMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); + + class CIRTidyDiagnosticHandler : public mlir::SourceMgrDiagnosticHandler { + clang::tidy::ClangTidyContext &tidyCtx; + clang::SourceManager &clangSrcMgr; + + clang::SourceLocation getClangFromFileLineCol(mlir::FileLineColLoc loc) { + clang::SourceLocation clangLoc; + FileManager &fileMgr = clangSrcMgr.getFileManager(); + assert(loc && "not a valid mlir::FileLineColLoc"); + // The column and line may be zero to represent unknown column and/or + // unknown line/column information. + if (loc.getLine() == 0 || loc.getColumn() == 0) { + llvm_unreachable("How should we workaround this?"); + return clangLoc; + } + if (auto FE = fileMgr.getFile(loc.getFilename())) { + return clangSrcMgr.translateFileLineCol(*FE, loc.getLine(), + loc.getColumn()); + } + llvm_unreachable("location doesn't map to a file?"); + } + + clang::SourceLocation getClangSrcLoc(mlir::Location loc) { + // Direct maps into a clang::SourceLocation. + if (auto fileLoc = loc.dyn_cast()) { + return getClangFromFileLineCol(fileLoc); + } + + // FusedLoc needs to be decomposed but the canonical one + // is the first location, we handle source ranges somewhere + // else. + if (auto fileLoc = loc.dyn_cast()) { + auto locArray = fileLoc.getLocations(); + assert(locArray.size() > 0 && "expected multiple locs"); + return getClangFromFileLineCol( + locArray[0].dyn_cast()); + } + + // Many loc styles are yet to be handled. + if (auto fileLoc = loc.dyn_cast()) { + llvm_unreachable("mlir::UnknownLoc not implemented!"); + } + if (auto fileLoc = loc.dyn_cast()) { + llvm_unreachable("mlir::CallSiteLoc not implemented!"); + } + llvm_unreachable("Unknown location style"); + } + + clang::DiagnosticIDs::Level + translateToClangDiagLevel(const mlir::DiagnosticSeverity &sev) { + switch (sev) { + case mlir::DiagnosticSeverity::Note: + return clang::DiagnosticIDs::Level::Note; + case mlir::DiagnosticSeverity::Warning: + return clang::DiagnosticIDs::Level::Warning; + case mlir::DiagnosticSeverity::Error: + return clang::DiagnosticIDs::Level::Error; + case mlir::DiagnosticSeverity::Remark: + return clang::DiagnosticIDs::Level::Remark; + } + llvm_unreachable("should not get here!"); + } + + public: + void emitClangTidyDiagnostic(mlir::Diagnostic &diag) { + auto clangBeginLoc = getClangSrcLoc(diag.getLocation()); + tidyCtx.diag(LifetimeCheckName, clangBeginLoc, diag.str(), + translateToClangDiagLevel(diag.getSeverity())); + for (const auto ¬e : diag.getNotes()) { + auto clangNoteBeginLoc = getClangSrcLoc(note.getLocation()); + tidyCtx.diag(LifetimeCheckName, clangNoteBeginLoc, note.str(), + translateToClangDiagLevel(note.getSeverity())); + } + } + + CIRTidyDiagnosticHandler(llvm::SourceMgr &mgr, mlir::MLIRContext *ctx, + clang::tidy::ClangTidyContext &tidyContext, + clang::SourceManager &clangMgr, + ShouldShowLocFn &&shouldShowLocFn = {}) + : SourceMgrDiagnosticHandler(mgr, ctx, llvm::errs(), + std::move(shouldShowLocFn)), + tidyCtx(tidyContext), clangSrcMgr(clangMgr) { + setHandler( + [this](mlir::Diagnostic &diag) { emitClangTidyDiagnostic(diag); }); + } + ~CIRTidyDiagnosticHandler() = default; + }; + + // Use a custom diagnostic handler that can allow both regular printing to + // stderr but also populates clang-tidy context with diagnostics (and allow + // for instance, diagnostics to be later converted to YAML). + CIRTidyDiagnosticHandler sourceMgrHandler(llvmSrcMgr, mlirCtx.get(), Context, + clangSrcMgr); + + mlir::PassManager pm(mlirCtx.get()); + pm.addPass(mlir::createMergeCleanupsPass()); + + auto remarks = + utils::options::parseStringList(OptsView.get("RemarksList", "")); + auto hist = + utils::options::parseStringList(OptsView.get("HistoryList", "all")); + auto hLimit = OptsView.get("HistLimit", 1U); + + if (Context.isCheckEnabled(LifetimeCheckName)) + pm.addPass(mlir::createLifetimeCheckPass(remarks, hist, hLimit, &C)); + + bool Result = !mlir::failed(pm.run(mlirMod)); + if (!Result) + llvm::report_fatal_error( + "The pass manager failed to run pass on the module!"); +} +} // namespace clang::tidy::cir diff --git a/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h b/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h new file mode 100644 index 000000000000..298e8398c0c9 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h @@ -0,0 +1,28 @@ +#include "../ClangTidyDiagnosticConsumer.h" +#include "ClangTidyCheck.h" +#include "clang/AST/ASTContext.h" +#include "clang/CIR/CIRGenerator.h" +#include "clang/Frontend/CompilerInstance.h" + +using namespace clang; + +namespace clang::tidy::cir { + +constexpr const char *LifetimeCheckName = "cir-lifetime-check"; + +class CIRASTConsumer : public ASTConsumer { +public: + CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, + clang::tidy::ClangTidyContext &Context); + +private: + void Initialize(ASTContext &Context) override; + void HandleTranslationUnit(ASTContext &C) override; + bool HandleTopLevelDecl(DeclGroupRef D) override; + std::unique_ptr<::cir::CIRGenerator> Gen; + ASTContext *AstContext{nullptr}; + clang::tidy::ClangTidyContext &Context; + clang::tidy::ClangTidyCheck::OptionsView OptsView; +}; + +} // namespace clang::tidy::cir diff --git a/clang-tools-extra/clang-tidy/cir/CIRTidyModule.cpp b/clang-tools-extra/clang-tidy/cir/CIRTidyModule.cpp new file mode 100644 index 000000000000..0c54cde3d0f0 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir/CIRTidyModule.cpp @@ -0,0 +1,34 @@ +//===--- CIRTidyModule.cpp - clang-tidy -----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "../ClangTidy.h" +#include "../ClangTidyModule.h" +#include "../ClangTidyModuleRegistry.h" +#include "Lifetime.h" + +namespace clang::tidy { +namespace cir { + +class CIRModule : public ClangTidyModule { +public: + void addCheckFactories(ClangTidyCheckFactories &CheckFactories) override { + CheckFactories.registerCheck("cir-lifetime-check"); + } +}; + +} // namespace cir + +// Register the CIRTidyModule using this statically initialized variable. +static ClangTidyModuleRegistry::Add + X("cir-module", "Adds ClangIR (CIR) based clang-tidy checks."); + +// This anchor is used to force the linker to link in the generated object file +// and thus register the CIRModule. +volatile int CIRModuleAnchorSource = 0; + +} // namespace clang::tidy diff --git a/clang-tools-extra/clang-tidy/cir/CMakeLists.txt b/clang-tools-extra/clang-tidy/cir/CMakeLists.txt new file mode 100644 index 000000000000..f28daceb4a24 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir/CMakeLists.txt @@ -0,0 +1,62 @@ +set(LLVM_LINK_COMPONENTS + FrontendOpenMP + Support + ) + +include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/.. ) +include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) +include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_clang_library(clangTidyCIRModule + Lifetime.cpp + CIRASTConsumer.cpp + CIRTidyModule.cpp + + LINK_LIBS + clangTidy + clangTidyUtils + + DEPENDS + omp_gen + + LINK_LIBS + clangASTMatchers + clangCIR + clangFrontend + clangSerialization + clangTidy + clangTidyUtils + ${dialect_libs} + MLIRCIR + MLIRCIRTransforms + MLIRAffineToStandard + MLIRAnalysis + MLIRIR + MLIRLLVMCommonConversion + MLIRLLVMDialect + MLIRLLVMToLLVMIRTranslation + MLIRMemRefDialect + MLIRMemRefToLLVM + MLIRParser + MLIRPass + MLIRSideEffectInterfaces + MLIRSCFToControlFlow + MLIRFuncToLLVM + MLIRSupport + MLIRMemRefDialect + MLIRTargetLLVMIRExport + MLIRTransforms + ) + +clang_target_link_libraries(clangTidyCIRModule + PRIVATE + clangAnalysis + clangAST + clangASTMatchers + clangBasic + clangLex + clangTooling + clangToolingCore + ) diff --git a/clang-tools-extra/clang-tidy/cir/Lifetime.cpp b/clang-tools-extra/clang-tidy/cir/Lifetime.cpp new file mode 100644 index 000000000000..93aec96271ee --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir/Lifetime.cpp @@ -0,0 +1,28 @@ +//===--- Lifetime.cpp - clang-tidy ----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "Lifetime.h" +#include "clang/AST/ASTContext.h" +#include "clang/ASTMatchers/ASTMatchFinder.h" +#include "clang/Tooling/FixIt.h" + +using namespace clang::ast_matchers; + +namespace clang::tidy::cir { + +void Lifetime::registerMatchers(MatchFinder *Finder) { + // Finder->addMatcher(callExpr().bind("CE"), this); + // assert(0 && "BOOM0!"); +} + +void Lifetime::check(const MatchFinder::MatchResult &Result) { + // assert(0 && "BOOM1!"); +} + +void Lifetime::onEndOfTranslationUnit() { assert(0 && "BOOM2!"); } +} // namespace clang::tidy::cir diff --git a/clang-tools-extra/clang-tidy/cir/Lifetime.h b/clang-tools-extra/clang-tidy/cir/Lifetime.h new file mode 100644 index 000000000000..684f1e09f698 --- /dev/null +++ b/clang-tools-extra/clang-tidy/cir/Lifetime.h @@ -0,0 +1,28 @@ +//===--- Lifetime.h - clang-tidy --------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIR_LIFETIME_H +#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIR_LIFETIME_H + +#include "../ClangTidyCheck.h" +#include + +namespace clang::tidy::cir { + +class Lifetime : public ClangTidyCheck { +public: + Lifetime(StringRef Name, ClangTidyContext *Context) + : ClangTidyCheck(Name, Context) {} + void registerMatchers(ast_matchers::MatchFinder *Finder) override; + void check(const ast_matchers::MatchFinder::MatchResult &Result) override; + void onEndOfTranslationUnit() override; +}; + +} // namespace clang::tidy::cir + +#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIR_LIFETIME_H diff --git a/clang-tools-extra/clang-tidy/clang-tidy-config.h.cmake b/clang-tools-extra/clang-tidy/clang-tidy-config.h.cmake index f4d1a4b38004..7397c1a65249 100644 --- a/clang-tools-extra/clang-tidy/clang-tidy-config.h.cmake +++ b/clang-tools-extra/clang-tidy/clang-tidy-config.h.cmake @@ -7,4 +7,6 @@ #cmakedefine01 CLANG_TIDY_ENABLE_STATIC_ANALYZER +#cmakedefine01 CLANG_ENABLE_CIR + #endif diff --git a/clang-tools-extra/test/clang-tidy/checkers/cir/lifetime-basic.cpp b/clang-tools-extra/test/clang-tidy/checkers/cir/lifetime-basic.cpp new file mode 100644 index 000000000000..c65781190663 --- /dev/null +++ b/clang-tools-extra/test/clang-tidy/checkers/cir/lifetime-basic.cpp @@ -0,0 +1,39 @@ +// RUN: %check_clang_tidy %s cir-lifetime-check %t \ +// RUN: --export-fixes=%t.yaml \ +// RUN: -config='{CheckOptions: \ +// RUN: [{key: cir-lifetime-check.RemarksList, value: "all"}, \ +// RUN: {key: cir-lifetime-check.HistLimit, value: "1"}, \ +// RUN: {key: cir-lifetime-check.CodeGenBuildDeferredThreshold, value: "500"}, \ +// RUN: {key: cir-lifetime-check.CodeGenSkipFunctionsFromSystemHeaders, value: "false"}, \ +// RUN: {key: cir-lifetime-check.HistoryList, value: "invalid;null"}]}' \ +// RUN: -- +// RUN: FileCheck -input-file=%t.yaml -check-prefix=CHECK-YAML %s + +int *p0() { + int *p = nullptr; + { + int x = 0; + p = &x; + *p = 42; + } + *p = 42; // CHECK-MESSAGES: :[[@LINE]]:4: warning: use of invalid pointer 'p' + return p; +} + +// CHECK-YAML: DiagnosticMessage: +// CHECK-YAML: Message: 'pset => { x }' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Level: Remark + +// CHECK-YAML: DiagnosticMessage: +// CHECK-YAML: Message: 'pset => { invalid }' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Level: Remark + +// CHECK-YAML: DiagnosticMessage: +// CHECK-YAML: Message: 'use of invalid pointer ''p''' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Notes: +// CHECK-YAML: - Message: 'pointee ''x'' invalidated at end of scope' +// CHECK-YAML: Replacements: [] +// CHECK-YAML: Level: Warning \ No newline at end of file diff --git a/clang-tools-extra/test/clang-tidy/checkers/cir/lit.local.cfg b/clang-tools-extra/test/clang-tidy/checkers/cir/lit.local.cfg new file mode 100644 index 000000000000..e479c3e74cb6 --- /dev/null +++ b/clang-tools-extra/test/clang-tidy/checkers/cir/lit.local.cfg @@ -0,0 +1,2 @@ +if not config.clang_enable_cir: + config.unsupported = True \ No newline at end of file From 149c035ed86ea1169fb3794398f420522efa3b6d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 18 Aug 2023 17:38:42 -0700 Subject: [PATCH 1148/2301] [CIR][CIRTidy] Break CIRASTConsumer dep from clang::tidy::utils --- clang-tools-extra/clang-tidy/ClangTidy.cpp | 18 ++++++++++++++- .../clang-tidy/cir/CIRASTConsumer.cpp | 23 ++++--------------- .../clang-tidy/cir/CIRASTConsumer.h | 9 ++++++-- .../clang-tidy/cir/CMakeLists.txt | 11 +++------ 4 files changed, 32 insertions(+), 29 deletions(-) diff --git a/clang-tools-extra/clang-tidy/ClangTidy.cpp b/clang-tools-extra/clang-tidy/ClangTidy.cpp index c9b9f18baa4a..09bdca4b200f 100644 --- a/clang-tools-extra/clang-tidy/ClangTidy.cpp +++ b/clang-tools-extra/clang-tidy/ClangTidy.cpp @@ -21,6 +21,7 @@ #include "ClangTidyProfiling.h" #include "ExpandModularHeadersPPCallbacks.h" #include "clang-tidy-config.h" +#include "utils/OptionsUtils.h" #include "clang/AST/ASTConsumer.h" #include "clang/ASTMatchers/ASTMatchFinder.h" #include "clang/Format/Format.h" @@ -473,8 +474,23 @@ ClangTidyASTConsumerFactory::createASTConsumer( #if CLANG_ENABLE_CIR if (Context.isCheckEnabled(cir::LifetimeCheckName)) { + auto OV = ClangTidyCheck::OptionsView( + cir::LifetimeCheckName, Context.getOptions().CheckOptions, &Context); + // Setup CIR codegen options via config specified information. + Compiler.getCodeGenOpts().ClangIRBuildDeferredThreshold = + OV.get("CodeGenBuildDeferredThreshold", 500U); + Compiler.getCodeGenOpts().ClangIRSkipFunctionsFromSystemHeaders = + OV.get("CodeGenSkipFunctionsFromSystemHeaders", false); + + cir::CIROpts opts; + opts.RemarksList = + utils::options::parseStringList(OV.get("RemarksList", "")); + opts.HistoryList = + utils::options::parseStringList(OV.get("HistoryList", "all")); + opts.HistLimit = OV.get("HistLimit", 1U); + std::unique_ptr CIRConsumer = - std::make_unique(Compiler, File, Context); + std::make_unique(Compiler, File, Context, opts); Consumers.push_back(std::move(CIRConsumer)); } #endif // CLANG_ENABLE_CIR diff --git a/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp b/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp index 3c85872c7bda..8520be831ec3 100644 --- a/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp +++ b/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp @@ -8,7 +8,6 @@ #include "CIRASTConsumer.h" -#include "../utils/OptionsUtils.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Pass/Pass.h" @@ -23,16 +22,9 @@ namespace clang::tidy::cir { /// CIR AST Consumer CIRASTConsumer::CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, - clang::tidy::ClangTidyContext &Context) - : Context(Context), - OptsView(ClangTidyCheck::OptionsView( - LifetimeCheckName, Context.getOptions().CheckOptions, &Context)) { - // Setup CIR codegen options via config specified information. - CI.getCodeGenOpts().ClangIRBuildDeferredThreshold = - OptsView.get("CodeGenBuildDeferredThreshold", 500U); - CI.getCodeGenOpts().ClangIRSkipFunctionsFromSystemHeaders = - OptsView.get("CodeGenSkipFunctionsFromSystemHeaders", false); - + clang::tidy::ClangTidyContext &Context, + CIROpts &O) + : Context(Context), cirOpts(O) { Gen = std::make_unique<::cir::CIRGenerator>(CI.getDiagnostics(), nullptr, CI.getCodeGenOpts()); } @@ -166,14 +158,9 @@ void CIRASTConsumer::HandleTranslationUnit(ASTContext &C) { mlir::PassManager pm(mlirCtx.get()); pm.addPass(mlir::createMergeCleanupsPass()); - auto remarks = - utils::options::parseStringList(OptsView.get("RemarksList", "")); - auto hist = - utils::options::parseStringList(OptsView.get("HistoryList", "all")); - auto hLimit = OptsView.get("HistLimit", 1U); - if (Context.isCheckEnabled(LifetimeCheckName)) - pm.addPass(mlir::createLifetimeCheckPass(remarks, hist, hLimit, &C)); + pm.addPass(mlir::createLifetimeCheckPass( + cirOpts.RemarksList, cirOpts.HistoryList, cirOpts.HistLimit, &C)); bool Result = !mlir::failed(pm.run(mlirMod)); if (!Result) diff --git a/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h b/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h index 298e8398c0c9..8356ed2022d1 100644 --- a/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h +++ b/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h @@ -9,11 +9,16 @@ using namespace clang; namespace clang::tidy::cir { constexpr const char *LifetimeCheckName = "cir-lifetime-check"; +struct CIROpts { + std::vector RemarksList; + std::vector HistoryList; + unsigned HistLimit; +}; class CIRASTConsumer : public ASTConsumer { public: CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, - clang::tidy::ClangTidyContext &Context); + clang::tidy::ClangTidyContext &Context, CIROpts &cirOpts); private: void Initialize(ASTContext &Context) override; @@ -22,7 +27,7 @@ class CIRASTConsumer : public ASTConsumer { std::unique_ptr<::cir::CIRGenerator> Gen; ASTContext *AstContext{nullptr}; clang::tidy::ClangTidyContext &Context; - clang::tidy::ClangTidyCheck::OptionsView OptsView; + CIROpts cirOpts; }; } // namespace clang::tidy::cir diff --git a/clang-tools-extra/clang-tidy/cir/CMakeLists.txt b/clang-tools-extra/clang-tidy/cir/CMakeLists.txt index f28daceb4a24..2c6f7d0dfdeb 100644 --- a/clang-tools-extra/clang-tidy/cir/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/cir/CMakeLists.txt @@ -14,20 +14,12 @@ add_clang_library(clangTidyCIRModule CIRASTConsumer.cpp CIRTidyModule.cpp - LINK_LIBS - clangTidy - clangTidyUtils - - DEPENDS - omp_gen - LINK_LIBS clangASTMatchers clangCIR clangFrontend clangSerialization clangTidy - clangTidyUtils ${dialect_libs} MLIRCIR MLIRCIRTransforms @@ -48,6 +40,9 @@ add_clang_library(clangTidyCIRModule MLIRMemRefDialect MLIRTargetLLVMIRExport MLIRTransforms + + DEPENDS + omp_gen ) clang_target_link_libraries(clangTidyCIRModule From 62f58a6ddaece558f6506f9a03fb2c74eb390618 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 18 Aug 2023 20:15:55 -0700 Subject: [PATCH 1149/2301] [CIR][ClangTidy] Move dependence up and remove CIRASTConsumer files --- clang-tools-extra/clang-tidy/CMakeLists.txt | 31 +++ clang-tools-extra/clang-tidy/ClangTidy.cpp | 187 +++++++++++++++++- .../clang-tidy/cir/CIRASTConsumer.cpp | 170 ---------------- .../clang-tidy/cir/CIRASTConsumer.h | 33 ---- .../clang-tidy/cir/CMakeLists.txt | 1 - 5 files changed, 216 insertions(+), 206 deletions(-) delete mode 100644 clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp delete mode 100644 clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h diff --git a/clang-tools-extra/clang-tidy/CMakeLists.txt b/clang-tools-extra/clang-tidy/CMakeLists.txt index d998fc56ce21..5bfd445cc891 100644 --- a/clang-tools-extra/clang-tidy/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/CMakeLists.txt @@ -3,6 +3,14 @@ set(LLVM_LINK_COMPONENTS Support ) +if(CLANG_ENABLE_CIR) + include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/.. ) + include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) + include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) + + get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) +endif() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/clang-tidy-config.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/clang-tidy-config.h) @@ -19,6 +27,29 @@ add_clang_library(clangTidy STATIC GlobList.cpp NoLintDirectiveHandler.cpp + LINK_LIBS + clangCIR + ${dialect_libs} + MLIRCIR + MLIRCIRTransforms + MLIRAffineToStandard + MLIRAnalysis + MLIRIR + MLIRLLVMCommonConversion + MLIRLLVMDialect + MLIRLLVMToLLVMIRTranslation + MLIRMemRefDialect + MLIRMemRefToLLVM + MLIRParser + MLIRPass + MLIRSideEffectInterfaces + MLIRSCFToControlFlow + MLIRFuncToLLVM + MLIRSupport + MLIRMemRefDialect + MLIRTargetLLVMIRExport + MLIRTransforms + DEPENDS ClangSACheckers omp_gen diff --git a/clang-tools-extra/clang-tidy/ClangTidy.cpp b/clang-tools-extra/clang-tidy/ClangTidy.cpp index 09bdca4b200f..550324ac3d12 100644 --- a/clang-tools-extra/clang-tidy/ClangTidy.cpp +++ b/clang-tools-extra/clang-tidy/ClangTidy.cpp @@ -50,8 +50,15 @@ #endif // CLANG_TIDY_ENABLE_STATIC_ANALYZER #if CLANG_ENABLE_CIR -#include "cir/CIRASTConsumer.h" -#endif +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "clang/AST/ASTContext.h" +#include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/Dialect/Passes.h" +#include +#endif // CLANG_ENABLE_CIR using namespace clang::ast_matchers; using namespace clang::driver; @@ -97,6 +104,182 @@ class AnalyzerDiagnosticConsumer : public ento::PathDiagnosticConsumer { }; #endif // CLANG_TIDY_ENABLE_STATIC_ANALYZER +#if CLANG_ENABLE_CIR +namespace cir { + +constexpr const char *LifetimeCheckName = "cir-lifetime-check"; +struct CIROpts { + std::vector RemarksList; + std::vector HistoryList; + unsigned HistLimit; +}; + +class CIRASTConsumer : public ASTConsumer { +public: + CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, + clang::tidy::ClangTidyContext &Context, CIROpts &cirOpts); + +private: + void Initialize(ASTContext &Context) override; + void HandleTranslationUnit(ASTContext &C) override; + bool HandleTopLevelDecl(DeclGroupRef D) override; + std::unique_ptr<::cir::CIRGenerator> Gen; + ASTContext *AstContext{nullptr}; + clang::tidy::ClangTidyContext &Context; + CIROpts cirOpts; +}; + +/// CIR AST Consumer +CIRASTConsumer::CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, + clang::tidy::ClangTidyContext &Context, + CIROpts &O) + : Context(Context), cirOpts(O) { + Gen = std::make_unique<::cir::CIRGenerator>(CI.getDiagnostics(), nullptr, + CI.getCodeGenOpts()); +} + +bool CIRASTConsumer::HandleTopLevelDecl(DeclGroupRef D) { + PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), + AstContext->getSourceManager(), + "CIR generation of declaration"); + Gen->HandleTopLevelDecl(D); + return true; +} + +void CIRASTConsumer::Initialize(ASTContext &Context) { + AstContext = &Context; + Gen->Initialize(Context); +} + +void CIRASTConsumer::HandleTranslationUnit(ASTContext &C) { + Gen->HandleTranslationUnit(C); + Gen->verifyModule(); + + mlir::ModuleOp mlirMod = Gen->getModule(); + std::unique_ptr mlirCtx = Gen->takeContext(); + + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(/*prettyForm=*/false); + + clang::SourceManager &clangSrcMgr = C.getSourceManager(); + FileID MainFileID = clangSrcMgr.getMainFileID(); + + llvm::MemoryBufferRef MainFileBuf = clangSrcMgr.getBufferOrFake(MainFileID); + std::unique_ptr FileBuf = + llvm::MemoryBuffer::getMemBuffer(MainFileBuf); + + llvm::SourceMgr llvmSrcMgr; + llvmSrcMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); + + class CIRTidyDiagnosticHandler : public mlir::SourceMgrDiagnosticHandler { + clang::tidy::ClangTidyContext &tidyCtx; + clang::SourceManager &clangSrcMgr; + + clang::SourceLocation getClangFromFileLineCol(mlir::FileLineColLoc loc) { + clang::SourceLocation clangLoc; + FileManager &fileMgr = clangSrcMgr.getFileManager(); + assert(loc && "not a valid mlir::FileLineColLoc"); + // The column and line may be zero to represent unknown column and/or + // unknown line/column information. + if (loc.getLine() == 0 || loc.getColumn() == 0) { + llvm_unreachable("How should we workaround this?"); + return clangLoc; + } + if (auto FE = fileMgr.getFile(loc.getFilename())) { + return clangSrcMgr.translateFileLineCol(*FE, loc.getLine(), + loc.getColumn()); + } + llvm_unreachable("location doesn't map to a file?"); + } + + clang::SourceLocation getClangSrcLoc(mlir::Location loc) { + // Direct maps into a clang::SourceLocation. + if (auto fileLoc = loc.dyn_cast()) { + return getClangFromFileLineCol(fileLoc); + } + + // FusedLoc needs to be decomposed but the canonical one + // is the first location, we handle source ranges somewhere + // else. + if (auto fileLoc = loc.dyn_cast()) { + auto locArray = fileLoc.getLocations(); + assert(locArray.size() > 0 && "expected multiple locs"); + return getClangFromFileLineCol( + locArray[0].dyn_cast()); + } + + // Many loc styles are yet to be handled. + if (auto fileLoc = loc.dyn_cast()) { + llvm_unreachable("mlir::UnknownLoc not implemented!"); + } + if (auto fileLoc = loc.dyn_cast()) { + llvm_unreachable("mlir::CallSiteLoc not implemented!"); + } + llvm_unreachable("Unknown location style"); + } + + clang::DiagnosticIDs::Level + translateToClangDiagLevel(const mlir::DiagnosticSeverity &sev) { + switch (sev) { + case mlir::DiagnosticSeverity::Note: + return clang::DiagnosticIDs::Level::Note; + case mlir::DiagnosticSeverity::Warning: + return clang::DiagnosticIDs::Level::Warning; + case mlir::DiagnosticSeverity::Error: + return clang::DiagnosticIDs::Level::Error; + case mlir::DiagnosticSeverity::Remark: + return clang::DiagnosticIDs::Level::Remark; + } + llvm_unreachable("should not get here!"); + } + + public: + void emitClangTidyDiagnostic(mlir::Diagnostic &diag) { + auto clangBeginLoc = getClangSrcLoc(diag.getLocation()); + tidyCtx.diag(LifetimeCheckName, clangBeginLoc, diag.str(), + translateToClangDiagLevel(diag.getSeverity())); + for (const auto ¬e : diag.getNotes()) { + auto clangNoteBeginLoc = getClangSrcLoc(note.getLocation()); + tidyCtx.diag(LifetimeCheckName, clangNoteBeginLoc, note.str(), + translateToClangDiagLevel(note.getSeverity())); + } + } + + CIRTidyDiagnosticHandler(llvm::SourceMgr &mgr, mlir::MLIRContext *ctx, + clang::tidy::ClangTidyContext &tidyContext, + clang::SourceManager &clangMgr, + ShouldShowLocFn &&shouldShowLocFn = {}) + : SourceMgrDiagnosticHandler(mgr, ctx, llvm::errs(), + std::move(shouldShowLocFn)), + tidyCtx(tidyContext), clangSrcMgr(clangMgr) { + setHandler( + [this](mlir::Diagnostic &diag) { emitClangTidyDiagnostic(diag); }); + } + ~CIRTidyDiagnosticHandler() = default; + }; + + // Use a custom diagnostic handler that can allow both regular printing to + // stderr but also populates clang-tidy context with diagnostics (and allow + // for instance, diagnostics to be later converted to YAML). + CIRTidyDiagnosticHandler sourceMgrHandler(llvmSrcMgr, mlirCtx.get(), Context, + clangSrcMgr); + + mlir::PassManager pm(mlirCtx.get()); + pm.addPass(mlir::createMergeCleanupsPass()); + + if (Context.isCheckEnabled(LifetimeCheckName)) + pm.addPass(mlir::createLifetimeCheckPass( + cirOpts.RemarksList, cirOpts.HistoryList, cirOpts.HistLimit, &C)); + + bool Result = !mlir::failed(pm.run(mlirMod)); + if (!Result) + llvm::report_fatal_error( + "The pass manager failed to run pass on the module!"); +} +} // namespace cir + +#endif + class ErrorReporter { public: ErrorReporter(ClangTidyContext &Context, FixBehaviour ApplyFixes, diff --git a/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp b/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp deleted file mode 100644 index 8520be831ec3..000000000000 --- a/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.cpp +++ /dev/null @@ -1,170 +0,0 @@ -//===--- clang-tidy/cir-tidy/CIRASTConsumer.cpp ---------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "CIRASTConsumer.h" - -#include "mlir/IR/BuiltinOps.h" -#include "mlir/IR/MLIRContext.h" -#include "mlir/Pass/Pass.h" -#include "mlir/Pass/PassManager.h" -#include "clang/CIR/Dialect/Passes.h" -#include - -using namespace clang; -using namespace clang::tidy; - -namespace clang::tidy::cir { - -/// CIR AST Consumer -CIRASTConsumer::CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, - clang::tidy::ClangTidyContext &Context, - CIROpts &O) - : Context(Context), cirOpts(O) { - Gen = std::make_unique<::cir::CIRGenerator>(CI.getDiagnostics(), nullptr, - CI.getCodeGenOpts()); -} - -bool CIRASTConsumer::HandleTopLevelDecl(DeclGroupRef D) { - PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), - AstContext->getSourceManager(), - "CIR generation of declaration"); - Gen->HandleTopLevelDecl(D); - return true; -} - -void CIRASTConsumer::Initialize(ASTContext &Context) { - AstContext = &Context; - Gen->Initialize(Context); -} - -void CIRASTConsumer::HandleTranslationUnit(ASTContext &C) { - Gen->HandleTranslationUnit(C); - Gen->verifyModule(); - - mlir::ModuleOp mlirMod = Gen->getModule(); - std::unique_ptr mlirCtx = Gen->takeContext(); - - mlir::OpPrintingFlags flags; - flags.enableDebugInfo(/*prettyForm=*/false); - - clang::SourceManager &clangSrcMgr = C.getSourceManager(); - FileID MainFileID = clangSrcMgr.getMainFileID(); - - llvm::MemoryBufferRef MainFileBuf = clangSrcMgr.getBufferOrFake(MainFileID); - std::unique_ptr FileBuf = - llvm::MemoryBuffer::getMemBuffer(MainFileBuf); - - llvm::SourceMgr llvmSrcMgr; - llvmSrcMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); - - class CIRTidyDiagnosticHandler : public mlir::SourceMgrDiagnosticHandler { - clang::tidy::ClangTidyContext &tidyCtx; - clang::SourceManager &clangSrcMgr; - - clang::SourceLocation getClangFromFileLineCol(mlir::FileLineColLoc loc) { - clang::SourceLocation clangLoc; - FileManager &fileMgr = clangSrcMgr.getFileManager(); - assert(loc && "not a valid mlir::FileLineColLoc"); - // The column and line may be zero to represent unknown column and/or - // unknown line/column information. - if (loc.getLine() == 0 || loc.getColumn() == 0) { - llvm_unreachable("How should we workaround this?"); - return clangLoc; - } - if (auto FE = fileMgr.getFile(loc.getFilename())) { - return clangSrcMgr.translateFileLineCol(*FE, loc.getLine(), - loc.getColumn()); - } - llvm_unreachable("location doesn't map to a file?"); - } - - clang::SourceLocation getClangSrcLoc(mlir::Location loc) { - // Direct maps into a clang::SourceLocation. - if (auto fileLoc = loc.dyn_cast()) { - return getClangFromFileLineCol(fileLoc); - } - - // FusedLoc needs to be decomposed but the canonical one - // is the first location, we handle source ranges somewhere - // else. - if (auto fileLoc = loc.dyn_cast()) { - auto locArray = fileLoc.getLocations(); - assert(locArray.size() > 0 && "expected multiple locs"); - return getClangFromFileLineCol( - locArray[0].dyn_cast()); - } - - // Many loc styles are yet to be handled. - if (auto fileLoc = loc.dyn_cast()) { - llvm_unreachable("mlir::UnknownLoc not implemented!"); - } - if (auto fileLoc = loc.dyn_cast()) { - llvm_unreachable("mlir::CallSiteLoc not implemented!"); - } - llvm_unreachable("Unknown location style"); - } - - clang::DiagnosticIDs::Level - translateToClangDiagLevel(const mlir::DiagnosticSeverity &sev) { - switch (sev) { - case mlir::DiagnosticSeverity::Note: - return clang::DiagnosticIDs::Level::Note; - case mlir::DiagnosticSeverity::Warning: - return clang::DiagnosticIDs::Level::Warning; - case mlir::DiagnosticSeverity::Error: - return clang::DiagnosticIDs::Level::Error; - case mlir::DiagnosticSeverity::Remark: - return clang::DiagnosticIDs::Level::Remark; - } - llvm_unreachable("should not get here!"); - } - - public: - void emitClangTidyDiagnostic(mlir::Diagnostic &diag) { - auto clangBeginLoc = getClangSrcLoc(diag.getLocation()); - tidyCtx.diag(LifetimeCheckName, clangBeginLoc, diag.str(), - translateToClangDiagLevel(diag.getSeverity())); - for (const auto ¬e : diag.getNotes()) { - auto clangNoteBeginLoc = getClangSrcLoc(note.getLocation()); - tidyCtx.diag(LifetimeCheckName, clangNoteBeginLoc, note.str(), - translateToClangDiagLevel(note.getSeverity())); - } - } - - CIRTidyDiagnosticHandler(llvm::SourceMgr &mgr, mlir::MLIRContext *ctx, - clang::tidy::ClangTidyContext &tidyContext, - clang::SourceManager &clangMgr, - ShouldShowLocFn &&shouldShowLocFn = {}) - : SourceMgrDiagnosticHandler(mgr, ctx, llvm::errs(), - std::move(shouldShowLocFn)), - tidyCtx(tidyContext), clangSrcMgr(clangMgr) { - setHandler( - [this](mlir::Diagnostic &diag) { emitClangTidyDiagnostic(diag); }); - } - ~CIRTidyDiagnosticHandler() = default; - }; - - // Use a custom diagnostic handler that can allow both regular printing to - // stderr but also populates clang-tidy context with diagnostics (and allow - // for instance, diagnostics to be later converted to YAML). - CIRTidyDiagnosticHandler sourceMgrHandler(llvmSrcMgr, mlirCtx.get(), Context, - clangSrcMgr); - - mlir::PassManager pm(mlirCtx.get()); - pm.addPass(mlir::createMergeCleanupsPass()); - - if (Context.isCheckEnabled(LifetimeCheckName)) - pm.addPass(mlir::createLifetimeCheckPass( - cirOpts.RemarksList, cirOpts.HistoryList, cirOpts.HistLimit, &C)); - - bool Result = !mlir::failed(pm.run(mlirMod)); - if (!Result) - llvm::report_fatal_error( - "The pass manager failed to run pass on the module!"); -} -} // namespace clang::tidy::cir diff --git a/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h b/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h deleted file mode 100644 index 8356ed2022d1..000000000000 --- a/clang-tools-extra/clang-tidy/cir/CIRASTConsumer.h +++ /dev/null @@ -1,33 +0,0 @@ -#include "../ClangTidyDiagnosticConsumer.h" -#include "ClangTidyCheck.h" -#include "clang/AST/ASTContext.h" -#include "clang/CIR/CIRGenerator.h" -#include "clang/Frontend/CompilerInstance.h" - -using namespace clang; - -namespace clang::tidy::cir { - -constexpr const char *LifetimeCheckName = "cir-lifetime-check"; -struct CIROpts { - std::vector RemarksList; - std::vector HistoryList; - unsigned HistLimit; -}; - -class CIRASTConsumer : public ASTConsumer { -public: - CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, - clang::tidy::ClangTidyContext &Context, CIROpts &cirOpts); - -private: - void Initialize(ASTContext &Context) override; - void HandleTranslationUnit(ASTContext &C) override; - bool HandleTopLevelDecl(DeclGroupRef D) override; - std::unique_ptr<::cir::CIRGenerator> Gen; - ASTContext *AstContext{nullptr}; - clang::tidy::ClangTidyContext &Context; - CIROpts cirOpts; -}; - -} // namespace clang::tidy::cir diff --git a/clang-tools-extra/clang-tidy/cir/CMakeLists.txt b/clang-tools-extra/clang-tidy/cir/CMakeLists.txt index 2c6f7d0dfdeb..5c40efc09a12 100644 --- a/clang-tools-extra/clang-tidy/cir/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/cir/CMakeLists.txt @@ -11,7 +11,6 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangTidyCIRModule Lifetime.cpp - CIRASTConsumer.cpp CIRTidyModule.cpp LINK_LIBS From c0ed356b4ba2b85da58a9babe5f792366c016242 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 21 Aug 2023 12:13:14 -0700 Subject: [PATCH 1150/2301] [CIR][ClangTidy] Copy parseStringList function from clang::tidy::utils::options This unbreaks Linux builds, which points to cycles while using utils::options from ClangTidy.cpp. We need to find a better way to convey this. --- clang-tools-extra/clang-tidy/ClangTidy.cpp | 29 +++++++++++++++++++--- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/clang-tools-extra/clang-tidy/ClangTidy.cpp b/clang-tools-extra/clang-tidy/ClangTidy.cpp index 550324ac3d12..7788a2d753ee 100644 --- a/clang-tools-extra/clang-tidy/ClangTidy.cpp +++ b/clang-tools-extra/clang-tidy/ClangTidy.cpp @@ -114,6 +114,29 @@ struct CIROpts { unsigned HistLimit; }; +static const char StringsDelimiter[] = ";"; + +// FIXME(cir): this function was extracted from clang::tidy::utils::options +// given that ClangTidy.cpp cannot be linked with ClangTidyUtils. +std::vector parseStringList(StringRef Option) { + Option = Option.trim().trim(StringsDelimiter); + if (Option.empty()) + return {}; + std::vector Result; + Result.reserve(Option.count(StringsDelimiter) + 1); + StringRef Cur; + while (std::tie(Cur, Option) = Option.split(StringsDelimiter), + !Option.empty()) { + Cur = Cur.trim(); + if (!Cur.empty()) + Result.push_back(Cur); + } + Cur = Cur.trim(); + if (!Cur.empty()) + Result.push_back(Cur); + return Result; +} + class CIRASTConsumer : public ASTConsumer { public: CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, @@ -666,10 +689,8 @@ ClangTidyASTConsumerFactory::createASTConsumer( OV.get("CodeGenSkipFunctionsFromSystemHeaders", false); cir::CIROpts opts; - opts.RemarksList = - utils::options::parseStringList(OV.get("RemarksList", "")); - opts.HistoryList = - utils::options::parseStringList(OV.get("HistoryList", "all")); + opts.RemarksList = cir::parseStringList(OV.get("RemarksList", "")); + opts.HistoryList = cir::parseStringList(OV.get("HistoryList", "all")); opts.HistLimit = OV.get("HistLimit", 1U); std::unique_ptr CIRConsumer = From 62cba8697a8086946cf066e2e197f42b88a5b039 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 21 Aug 2023 13:50:05 -0700 Subject: [PATCH 1151/2301] [CIR][ClangTidy] Fix cmake remaining issues --- clang-tools-extra/clang-tidy/CMakeLists.txt | 99 +++++++++++++-------- 1 file changed, 63 insertions(+), 36 deletions(-) diff --git a/clang-tools-extra/clang-tidy/CMakeLists.txt b/clang-tools-extra/clang-tidy/CMakeLists.txt index 5bfd445cc891..02bec99c65ae 100644 --- a/clang-tools-extra/clang-tidy/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/CMakeLists.txt @@ -16,45 +16,72 @@ configure_file( ${CMAKE_CURRENT_BINARY_DIR}/clang-tidy-config.h) include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) -add_clang_library(clangTidy STATIC - ClangTidy.cpp - ClangTidyCheck.cpp - ClangTidyModule.cpp - ClangTidyDiagnosticConsumer.cpp - ClangTidyOptions.cpp - ClangTidyProfiling.cpp - ExpandModularHeadersPPCallbacks.cpp - GlobList.cpp - NoLintDirectiveHandler.cpp +if(CLANG_ENABLE_CIR) + add_clang_library(clangTidy + ClangTidy.cpp + ClangTidyCheck.cpp + ClangTidyModule.cpp + ClangTidyDiagnosticConsumer.cpp + ClangTidyOptions.cpp + ClangTidyProfiling.cpp + ExpandModularHeadersPPCallbacks.cpp + GlobList.cpp + NoLintDirectiveHandler.cpp + + DEPENDS + MLIRBuiltinLocationAttributesIncGen + MLIRCIROpsIncGen + MLIRCIREnumsGen + MLIRSymbolInterfacesIncGen + ClangSACheckers + omp_gen + ClangDriverOptions - LINK_LIBS - clangCIR - ${dialect_libs} - MLIRCIR - MLIRCIRTransforms - MLIRAffineToStandard - MLIRAnalysis - MLIRIR - MLIRLLVMCommonConversion - MLIRLLVMDialect - MLIRLLVMToLLVMIRTranslation - MLIRMemRefDialect - MLIRMemRefToLLVM - MLIRParser - MLIRPass - MLIRSideEffectInterfaces - MLIRSCFToControlFlow - MLIRFuncToLLVM - MLIRSupport - MLIRMemRefDialect - MLIRTargetLLVMIRExport - MLIRTransforms + LINK_LIBS + clangCIR + ${dialect_libs} + MLIRCIR + MLIRCIRTransforms + MLIRAffineToStandard + MLIRAnalysis + MLIRIR + MLIRLLVMCommonConversion + MLIRLLVMDialect + MLIRLLVMToLLVMIRTranslation + MLIRMemRefDialect + MLIRMemRefToLLVM + MLIRParser + MLIRPass + MLIRSideEffectInterfaces + MLIRSCFToControlFlow + MLIRFuncToLLVM + MLIRSupport + MLIRMemRefDialect + MLIRTargetLLVMIRExport + MLIRTransforms - DEPENDS - ClangSACheckers - omp_gen - ClangDriverOptions + DEPENDS + ClangSACheckers + omp_gen + ClangDriverOptions + ) +else() + add_clang_library(clangTidy + ClangTidy.cpp + ClangTidyCheck.cpp + ClangTidyModule.cpp + ClangTidyDiagnosticConsumer.cpp + ClangTidyOptions.cpp + ClangTidyProfiling.cpp + ExpandModularHeadersPPCallbacks.cpp + GlobList.cpp + NoLintDirectiveHandler.cpp + + DEPENDS + ClangSACheckers + omp_gen ) +endif() clang_target_link_libraries(clangTidy PRIVATE From 2ae2a5ccdd6006a77caf7dcda153843f3f9a40d0 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 22 Aug 2023 00:58:01 +0300 Subject: [PATCH 1152/2301] [CIR][CIRGen][Bugfix] Fixes switch-case sub statements (#232) This PR fixes CIR generation for the `switch-case` cases like the following: ``` case 'a': default: ... ``` or ``` default: case 'a': ... ``` i.e. when the `default` clause is sub-statement of the `case` one and vice versa. --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 26 ++++- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 150 +++++++++++++++---------- clang/test/CIR/CodeGen/switch.cpp | 100 ++++++++++++++++- 3 files changed, 209 insertions(+), 67 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 1459dff6f121..fba2aae533be 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -577,7 +577,8 @@ class CIRGenFunction : public CIRGenTypeCache { const CIRGenFunctionInfo *CurFnInfo; clang::QualType FnRetTy; - /// This is the current function or global initializer that is generated code for. + /// This is the current function or global initializer that is generated code + /// for. mlir::Operation *CurFn = nullptr; /// Save Parameter Decl for coroutine. @@ -593,7 +594,7 @@ class CIRGenFunction : public CIRGenTypeCache { CIRGenModule &getCIRGenModule() { return CGM; } - mlir::Block* getCurFunctionEntryBlock() { + mlir::Block *getCurFunctionEntryBlock() { auto Fn = dyn_cast(CurFn); assert(Fn && "other callables NYI"); return &Fn.getRegion().front(); @@ -1120,13 +1121,26 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Type getCIRType(const clang::QualType &type); + const CaseStmt *foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, + SmallVector &caseAttrs); + + void insertFallthrough(const clang::Stmt &S); + + template + mlir::LogicalResult + buildCaseDefaultCascade(const T *stmt, mlir::Type condType, + SmallVector &caseAttrs, + mlir::OperationState &os); + mlir::LogicalResult buildCaseStmt(const clang::CaseStmt &S, mlir::Type condType, - mlir::cir::CaseAttr &caseEntry); + SmallVector &caseAttrs, + mlir::OperationState &op); - mlir::LogicalResult buildDefaultStmt(const clang::DefaultStmt &S, - mlir::Type condType, - mlir::cir::CaseAttr &caseEntry); + mlir::LogicalResult + buildDefaultStmt(const clang::DefaultStmt &S, mlir::Type condType, + SmallVector &caseAttrs, + mlir::OperationState &op); mlir::cir::FuncOp generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 836e074ba4ea..68f38230a42b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -557,63 +557,102 @@ mlir::LogicalResult CIRGenFunction::buildBreakStmt(const clang::BreakStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildCaseStmt(const CaseStmt &S, - mlir::Type condType, - CaseAttr &caseEntry) { - assert((!S.getRHS() || !S.caseStmtIsGNURange()) && - "case ranges not implemented"); - auto res = mlir::success(); - +const CaseStmt * +CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, + SmallVector &caseAttrs) { const CaseStmt *caseStmt = &S; + const CaseStmt *lastCase = &S; SmallVector caseEltValueListAttr; + // Fold cascading cases whenever possible to simplify codegen a bit. - while (true) { + while (caseStmt) { + lastCase = caseStmt; auto intVal = caseStmt->getLHS()->EvaluateKnownConstInt(getContext()); caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(condType, intVal)); - if (isa(caseStmt->getSubStmt())) - caseStmt = dyn_cast_or_null(caseStmt->getSubStmt()); - else - break; + caseStmt = dyn_cast_or_null(caseStmt->getSubStmt()); } - auto caseValueList = builder.getArrayAttr(caseEltValueListAttr); + auto *ctxt = builder.getContext(); - auto *ctx = builder.getContext(); - caseEntry = mlir::cir::CaseAttr::get( - ctx, caseValueList, - CaseOpKindAttr::get(ctx, caseEltValueListAttr.size() > 1 - ? mlir::cir::CaseOpKind::Anyof - : mlir::cir::CaseOpKind::Equal)); + auto caseAttr = mlir::cir::CaseAttr::get( + ctxt, builder.getArrayAttr(caseEltValueListAttr), + CaseOpKindAttr::get(ctxt, caseEltValueListAttr.size() > 1 + ? mlir::cir::CaseOpKind::Anyof + : mlir::cir::CaseOpKind::Equal)); - { - mlir::OpBuilder::InsertionGuard guardCase(builder); - res = buildStmt( - caseStmt->getSubStmt(), - /*useCurrentScope=*/!isa(caseStmt->getSubStmt())); - } + caseAttrs.push_back(caseAttr); - // TODO: likelihood - return res; + return lastCase; } -mlir::LogicalResult CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, - mlir::Type condType, - CaseAttr &caseEntry) { +void CIRGenFunction::insertFallthrough(const clang::Stmt &S) { + builder.create( + getLoc(S.getBeginLoc()), + mlir::cir::YieldOpKindAttr::get(builder.getContext(), + mlir::cir::YieldOpKind::Fallthrough), + mlir::ValueRange({})); +} + +template +mlir::LogicalResult CIRGenFunction::buildCaseDefaultCascade( + const T *stmt, mlir::Type condType, + SmallVector &caseAttrs, mlir::OperationState &os) { + + assert((isa(stmt)) && + "only case or default stmt go here"); + auto res = mlir::success(); - auto *ctx = builder.getContext(); - caseEntry = mlir::cir::CaseAttr::get( - ctx, builder.getArrayAttr({}), - CaseOpKindAttr::get(ctx, mlir::cir::CaseOpKind::Default)); - { + + // Update scope information with the current region we are + // emitting code for. This is useful to allow return blocks to be + // automatically and properly placed during cleanup. + auto *region = os.addRegion(); + auto *block = builder.createBlock(region); + builder.setInsertionPointToEnd(block); + currLexScope->updateCurrentSwitchCaseRegion(); + + auto *sub = stmt->getSubStmt(); + + if (isa(sub) && isa(stmt)) { + insertFallthrough(*stmt); + res = + buildDefaultStmt(*dyn_cast(sub), condType, caseAttrs, os); + } else if (isa(sub) && isa(stmt)) { + insertFallthrough(*stmt); + res = buildCaseStmt(*dyn_cast(sub), condType, caseAttrs, os); + } else { mlir::OpBuilder::InsertionGuard guardCase(builder); - res = buildStmt(S.getSubStmt(), - /*useCurrentScope=*/!isa(S.getSubStmt())); + res = buildStmt(sub, /*useCurrentScope=*/!isa(sub)); } - // TODO: likelihood return res; } +mlir::LogicalResult +CIRGenFunction::buildCaseStmt(const CaseStmt &S, mlir::Type condType, + SmallVector &caseAttrs, + mlir::OperationState &os) { + assert((!S.getRHS() || !S.caseStmtIsGNURange()) && + "case ranges not implemented"); + + auto *caseStmt = foldCaseStmt(S, condType, caseAttrs); + return buildCaseDefaultCascade(caseStmt, condType, caseAttrs, os); +} + +mlir::LogicalResult +CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, mlir::Type condType, + SmallVector &caseAttrs, + mlir::OperationState &os) { + auto ctxt = builder.getContext(); + + auto defAttr = mlir::cir::CaseAttr::get( + ctxt, builder.getArrayAttr({}), + CaseOpKindAttr::get(ctxt, mlir::cir::CaseOpKind::Default)); + + caseAttrs.push_back(defAttr); + return buildCaseDefaultCascade(&S, condType, caseAttrs, os); +} + static mlir::LogicalResult buildLoopCondYield(mlir::OpBuilder &builder, mlir::Location loc, mlir::Value cond) { @@ -958,29 +997,20 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { } auto *caseStmt = dyn_cast(c); - CaseAttr caseAttr; - { - mlir::OpBuilder::InsertionGuard guardCase(builder); - // Update scope information with the current region we are - // emitting code for. This is useful to allow return blocks to be - // automatically and properly placed during cleanup. - mlir::Region *caseRegion = os.addRegion(); - currLexScope->updateCurrentSwitchCaseRegion(); - - lastCaseBlock = builder.createBlock(caseRegion); - if (caseStmt) - res = buildCaseStmt(*caseStmt, condV.getType(), caseAttr); - else { - auto *defaultStmt = dyn_cast(c); - assert(defaultStmt && "expected default stmt"); - res = buildDefaultStmt(*defaultStmt, condV.getType(), caseAttr); - } - - if (res.failed()) - break; + if (caseStmt) + res = buildCaseStmt(*caseStmt, condV.getType(), caseAttrs, os); + else { + auto *defaultStmt = dyn_cast(c); + assert(defaultStmt && "expected default stmt"); + res = buildDefaultStmt(*defaultStmt, condV.getType(), caseAttrs, + os); } - caseAttrs.push_back(caseAttr); + + lastCaseBlock = builder.getBlock(); + + if (res.failed()) + break; } os.addAttribute("cases", builder.getArrayAttr(caseAttrs)); @@ -1057,4 +1087,4 @@ void CIRGenFunction::buildReturnOfRValue(mlir::Location loc, RValue RV, llvm_unreachable("NYI"); } buildBranchThroughCleanup(loc, ReturnBlock()); -} \ No newline at end of file +} diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 79e75ec5708a..873e197b3f47 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -15,7 +15,6 @@ void sw1(int a) { } } } - // CHECK: cir.func @_Z3sw1i // CHECK: cir.switch (%3 : !s32i) [ // CHECK-NEXT: case (equal, 0) { @@ -160,3 +159,102 @@ void sw7(int a) { // CHECK-NEXT: case (anyof, [3, 4, 5] : !s32i) { // CHECK-NEXT: cir.yield break // CHECK-NEXT: } + +void sw8(int a) { + switch (a) + { + case 3: + break; + case 4: + default: + break; + } +} + +//CHECK: cir.func @_Z3sw8i +//CHECK: case (equal, 3) +//CHECK-NEXT: cir.yield break +//CHECK-NEXT: }, +//CHECK-NEXT: case (equal, 4) { +//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: } +//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.yield break +//CHECK-NEXT: } + +void sw9(int a) { + switch (a) + { + case 3: + break; + default: + case 4: + break; + } +} + +//CHECK: cir.func @_Z3sw9i +//CHECK: case (equal, 3) { +//CHECK-NEXT: cir.yield break +//CHECK-NEXT: } +//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: } +//CHECK: case (equal, 4) +//CHECK-NEXT: cir.yield break +//CHECK-NEXT: } + +void sw10(int a) { + switch (a) + { + case 3: + break; + case 4: + default: + case 5: + break; + } +} + +//CHECK: cir.func @_Z4sw10i +//CHECK: case (equal, 3) +//CHECK-NEXT: cir.yield break +//CHECK-NEXT: }, +//CHECK-NEXT: case (equal, 4) { +//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: } +//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: } +//CHECK-NEXT: case (equal, 5) { +//CHECK-NEXT: cir.yield break +//CHECK-NEXT: } + +void sw11(int a) { + switch (a) + { + case 3: + break; + case 4: + case 5: + default: + case 6: + case 7: + break; + } +} + +//CHECK: cir.func @_Z4sw11i +//CHECK: case (equal, 3) +//CHECK-NEXT: cir.yield break +//CHECK-NEXT: }, +//CHECK-NEXT: case (anyof, [4, 5] : !s32i) { +//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: } +//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: } +//CHECK-NEXT: case (anyof, [6, 7] : !s32i) { +//CHECK-NEXT: cir.yield break +//CHECK-NEXT: } + From 623d4e7b326adcf29babf32a6b734fdc601a2348 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:17:51 -0300 Subject: [PATCH 1153/2301] [CIR][CIRGen] Implicitly zero-initialize global arrays elements Whenever a global array is declared and initialized with fewer elements than its size, the remaining elements are implicitly initialized with zero. For aggregates types, such as structs, the initialization is done through the #cir.zero attribute. ghstack-source-id: b3d172c8092acf904f9c2204621d900d70d0e819 Pull Request resolved: https://github.com/llvm/clangir/pull/216 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 3 +++ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 3 +-- clang/test/CIR/CodeGen/array.c | 9 +++++++++ 3 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/array.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 8c701d8a6570..593c76625357 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -186,6 +186,9 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // TODO(cir): Once we have CIR float types, replace this by something like a // NullableValueInterface to allow for type-independent queries. bool isNullValue(mlir::Attribute attr) const { + if (attr.isa()) + return true; + // TODO(cir): introduce char type in CIR and check for that instead. if (const auto intVal = attr.dyn_cast()) return intVal.isNullValue(); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 4b85f73df2ca..5815a56b38a6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -869,8 +869,7 @@ class ConstExprEmitter mlir::Attribute VisitImplicitValueInitExpr(ImplicitValueInitExpr *E, QualType T) { - assert(0 && "not implemented"); - return {}; + return CGM.getBuilder().getZeroInitAttr(CGM.getCIRType(T)); } mlir::Attribute VisitInitListExpr(InitListExpr *ILE, QualType T) { diff --git a/clang/test/CIR/CodeGen/array.c b/clang/test/CIR/CodeGen/array.c new file mode 100644 index 000000000000..10ab656b1a07 --- /dev/null +++ b/clang/test/CIR/CodeGen/array.c @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +// Should implicitly zero-initialize global array elements. +struct S { + int i; +} arr[3] = {{1}}; +// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22]> : !cir.array From ed38399eec9b42d7ad6163e6b608641773310aee Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:17:52 -0300 Subject: [PATCH 1154/2301] [CIR][CIRGen] Update C++ codegen to use #cir.zero on structs This small improvement allows for a more compact representation of structs with zero-initialized fields in the C++ codegen. ghstack-source-id: a3fe0dc6c0dd89e22cb38167bb9d9cd7b9f43b8f Pull Request resolved: https://github.com/llvm/clangir/pull/217 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 31 ++++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 17 +++---------- clang/test/CIR/CodeGen/array.c | 1 - clang/test/CIR/CodeGen/array.cpp | 6 +++++ 4 files changed, 40 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 593c76625357..f135a5ca5a32 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -144,6 +144,35 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return mlir::cir::ConstArrayAttr::get(arrayTy, attrs); } + mlir::Attribute getConstStructOrZeroAttr(mlir::ArrayAttr arrayAttr, + bool packed = false, + mlir::Type type = {}) { + llvm::SmallVector members; + auto structTy = type.dyn_cast(); + assert(structTy && "expected cir.struct"); + assert(!packed && "unpacked struct is NYI"); + + // Collect members and check if they are all zero. + bool isZero = true; + for (auto &attr : arrayAttr) { + const auto typedAttr = attr.dyn_cast(); + members.push_back(typedAttr.getType()); + isZero &= isNullValue(typedAttr); + } + + // Struct type not specified: create type from members. + if (!structTy) + structTy = getType( + members, mlir::StringAttr::get(getContext()), + /*body=*/true, packed, + /*ast=*/std::nullopt); + + // Return zero or anonymous constant struct. + if (isZero) + return mlir::cir::ZeroAttr::get(getContext(), structTy); + return mlir::cir::ConstStructAttr::get(structTy, arrayAttr); + } + mlir::cir::ConstStructAttr getAnonConstStruct(mlir::ArrayAttr arrayAttr, bool packed = false, mlir::Type ty = {}) { @@ -186,7 +215,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // TODO(cir): Once we have CIR float types, replace this by something like a // NullableValueInterface to allow for type-independent queries. bool isNullValue(mlir::Attribute attr) const { - if (attr.isa()) + if (attr.isa()) return true; // TODO(cir): introduce char type in CIR and check for that instead. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 5815a56b38a6..da1556122bb4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -282,10 +282,9 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( // TODO(cir): emit a #cir.zero if all elements are null values. auto &builder = CGM.getBuilder(); - return builder.getAnonConstStruct( - mlir::ArrayAttr::get(builder.getContext(), - Packed ? PackedElems : UnpackedElems), - Packed, DesiredTy); + auto arrAttr = mlir::ArrayAttr::get(builder.getContext(), + Packed ? PackedElems : UnpackedElems); + return builder.getConstStructOrZeroAttr(arrAttr, Packed, DesiredTy); } void ConstantAggregateBuilder::condense(CharUnits Offset, @@ -1468,14 +1467,6 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, const ArrayType *ArrayTy = CGM.getASTContext().getAsArrayType(DestType); unsigned NumElements = Value.getArraySize(); unsigned NumInitElts = Value.getArrayInitializedElts(); - auto isNullValue = [&](mlir::Attribute f) { - // TODO(cir): introduce char type in CIR and check for that instead. - auto intVal = f.dyn_cast_or_null(); - assert(intVal && "not implemented"); - if (intVal.getValue() == 0) - return true; - return false; - }; // Emit array filler, if there is one. mlir::Attribute Filler; @@ -1488,7 +1479,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, // Emit initializer elements. SmallVector Elts; - if (Filler && isNullValue(Filler)) + if (Filler && builder.isNullValue(Filler)) Elts.reserve(NumInitElts + 1); else Elts.reserve(NumElements); diff --git a/clang/test/CIR/CodeGen/array.c b/clang/test/CIR/CodeGen/array.c index 10ab656b1a07..e24ad24ebc51 100644 --- a/clang/test/CIR/CodeGen/array.c +++ b/clang/test/CIR/CodeGen/array.c @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * // Should implicitly zero-initialize global array elements. struct S { diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index b89c115afb84..de81e92131ca 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -65,3 +65,9 @@ int multidim(int i, int j) { // Should globally zero-initialize null arrays. int globalNullArr[] = {0, 0}; // CHECK: cir.global external @globalNullArr = #cir.zero : !cir.array + +// Should implicitly zero-initialize global array elements. +struct S { + int i; +} arr[3] = {{1}}; +// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22]> : !cir.array From 5bc2c59a9b069eb53ebb714796b1d5ccc68dd95c Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:17:52 -0300 Subject: [PATCH 1155/2301] [CIR][Lowering] Implement cir.llvmir.zeroinit operation Due to the lack of zeroinitializer support in LLVM, some cases are tricky to lower #cir.zero. An example is when an array is only partially initialize with #cir.zero attributes. Since we can't just zeroinitialize the whole array, the current #cir.zero attribute amend does not suffice. To simplify the lowering, this patch introduces a new operation that is solely used to generate zeroinitialize LLVM IR constants. ghstack-source-id: a3fd40ec3ce8970ac4e958076cc17d3fac573696 Pull Request resolved: https://github.com/llvm/clangir/pull/218 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 18 ++++++++++++++++++ .../DirectToLLVM/LowerAttrToLLVMIR.cpp | 14 ++++++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 +++ clang/test/CIR/Translation/zeroinitializer.cir | 9 +++++++++ 4 files changed, 44 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 27bea737e561..7f45f55eb993 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1852,4 +1852,22 @@ def VAArgOp : CIR_Op<"va.arg">, let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// Operations Lowered Directly to LLVM IR +// +// These operations are hacks to get around missing features in LLVM's dialect. +// Use it sparingly and remove it once the features are added. +//===----------------------------------------------------------------------===// + +def ZeroInitConstOp : CIR_Op<"llvmir.zeroinit", [Pure]>, + Results<(outs AnyType:$result)> { + let summary = "Zero initializes a constant value of a given type"; + let description = [{ + This operation circumvents the lack of a zeroinitializer operation in LLVM + Dialect. It can zeroinitialize any LLVM type. + }]; + let assemblyFormat = "attr-dict `:` type($result)"; + let hasVerifier = 0; +} + #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp index 2ec3d15e3887..884cd0eb7d89 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp @@ -76,6 +76,20 @@ class CIRDialectLLVMIRTranslationInterface op->removeAttr(attribute.getName()); return mlir::success(); } + + /// Translates the given operation to LLVM IR using the provided IR builder + /// and saving the state in `moduleTranslation`. + mlir::LogicalResult convertOperation( + mlir::Operation *op, llvm::IRBuilderBase &builder, + mlir::LLVM::ModuleTranslation &moduleTranslation) const final { + + if (auto cirOp = llvm::dyn_cast(op)) + moduleTranslation.mapValue(cirOp.getResult()) = + llvm::Constant::getNullValue( + moduleTranslation.convertType(cirOp.getType())); + + return mlir::success(); + } }; void registerCIRDialectTranslation(mlir::DialectRegistry ®istry) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 33e707d78909..1f3b915d3a9c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1835,6 +1835,9 @@ void ConvertCIRToLLVMPass::runOnOperation() { target.addIllegalDialect(); + // Allow operations that will be lowered directly to LLVM IR. + target.addLegalOp(); + getOperation()->removeAttr("cir.sob"); getOperation()->removeAttr("cir.lang"); diff --git a/clang/test/CIR/Translation/zeroinitializer.cir b/clang/test/CIR/Translation/zeroinitializer.cir index 63750fee10cb..ac70805bd231 100644 --- a/clang/test/CIR/Translation/zeroinitializer.cir +++ b/clang/test/CIR/Translation/zeroinitializer.cir @@ -9,4 +9,13 @@ module { // Should lower #cir.null on pointers to a null initializer. llvm.mlir.global external @ptr() {addr_space = 0 : i32, cir.initial_value = #cir.zero : !llvm.ptr} : !llvm.ptr // CHECK: @ptr = global ptr null + + // Should lower aggregates types with elements initialized with cir.llvmir.zeroinit. + llvm.mlir.global external @arr() {addr_space = 0 : i32} : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> { + %0 = llvm.mlir.undef : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> + %1 = cir.llvmir.zeroinit : !llvm.struct<"struct.S", (i8, i32)> + %2 = llvm.insertvalue %1, %0[0] : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> + llvm.return %2 : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> + } + // CHECK: @arr = global [1 x %struct.S] zeroinitializer } From c2d1be8005ca54938bbc9d3ee726fd66b563199e Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:17:52 -0300 Subject: [PATCH 1156/2301] [CIR][Lowering] Lower #cir.zero nested in initializer attributes Supports lowering for #cir.zero attributes when it appears in aggregate attributes such as #const.array and #cir.struct. ghstack-source-id: d26a506f29b581ed57d838e5281d9abbb2c24820 Pull Request resolved: https://github.com/llvm/clangir/pull/219 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 13 +++++++++++-- clang/test/CIR/Lowering/array.cir | 17 ++++++++++++++++- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 1f3b915d3a9c..59098ecfe159 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -107,6 +107,15 @@ lowerCirAttrAsValue(mlir::FloatAttr fltAttr, mlir::Location loc, loc, converter->convertType(fltAttr.getType()), fltAttr.getValue()); } +/// ZeroAttr visitor. +inline mlir::Value +lowerCirAttrAsValue(mlir::cir::ZeroAttr zeroAttr, mlir::Location loc, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + return rewriter.create( + loc, converter->convertType(zeroAttr.getType())); +} + /// ConstStruct visitor. mlir::Value lowerCirAttrAsValue(mlir::cir::ConstStructAttr constStruct, mlir::Location loc, @@ -157,10 +166,10 @@ lowerCirAttrAsValue(mlir::Attribute attr, mlir::Location loc, return lowerCirAttrAsValue(constStruct, loc, rewriter, converter); if (const auto constArr = attr.dyn_cast()) return lowerCirAttrAsValue(constArr, loc, rewriter, converter); - if (const auto zeroAttr = attr.dyn_cast()) + if (const auto boolAttr = attr.dyn_cast()) llvm_unreachable("bool attribute is NYI"); if (const auto zeroAttr = attr.dyn_cast()) - llvm_unreachable("zero attribute is NYI"); + return lowerCirAttrAsValue(zeroAttr, loc, rewriter, converter); llvm_unreachable("unhandled attribute type"); } diff --git a/clang/test/CIR/Lowering/array.cir b/clang/test/CIR/Lowering/array.cir index 3a7a9b3f8dfa..652994aa686b 100644 --- a/clang/test/CIR/Lowering/array.cir +++ b/clang/test/CIR/Lowering/array.cir @@ -1,12 +1,14 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM +!s32i = !cir.int +!ty_22struct2ES22 = !cir.struct<"struct.S", !s32i, #cir.recdecl.ast> + module { cir.func @foo() { %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} cir.return } -} // MLIR: module { // MLIR-NEXT: func @foo() @@ -18,3 +20,16 @@ module { // LLVM: %1 = alloca [10 x i32], i64 1, align 16 // LLVM-NEXT: ret void + + cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22]> : !cir.array + // CHECK: llvm.mlir.global external @arr() {addr_space = 0 : i32} : !llvm.array<2 x struct<"struct.S", (i32)>> { + // CHECK: %0 = llvm.mlir.undef : !llvm.array<2 x struct<"struct.S", (i32)>> + // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S", (i32)> + // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.array<2 x struct<"struct.S", (i32)>> + // CHECK: %5 = cir.llvmir.zeroinit : !llvm.struct<"struct.S", (i32)> + // CHECK: %6 = llvm.insertvalue %5, %4[1] : !llvm.array<2 x struct<"struct.S", (i32)>> + // CHECK: llvm.return %6 : !llvm.array<2 x struct<"struct.S", (i32)>> + // CHECK: } +} From 932cc7fb55a37cafbe1afcd6d39079bc006941b9 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:17:53 -0300 Subject: [PATCH 1157/2301] [CIR][Lowering][NFC] Rename LowerAttrToLLVMIR to LowerToLLVMIR The rationale is that we are lowering more than just attributes now, also to maintain the naming standard of the other lowering files (e.g. LowerToLLVM). ghstack-source-id: c4a3b0ea6b055881a35cf1e85a409e1fa0b8c53d Pull Request resolved: https://github.com/llvm/clangir/pull/220 --- clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt | 2 +- .../{LowerAttrToLLVMIR.cpp => LowerToLLVMIR.cpp} | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) rename clang/lib/CIR/Lowering/DirectToLLVM/{LowerAttrToLLVMIR.cpp => LowerToLLVMIR.cpp} (93%) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index d5d01c56d102..c7f713e85da0 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -6,7 +6,7 @@ set(LLVM_LINK_COMPONENTS get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIRLoweringDirectToLLVM - LowerAttrToLLVMIR.cpp + LowerToLLVMIR.cpp LowerToLLVM.cpp DEPENDS diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp similarity index 93% rename from clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp rename to clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index 884cd0eb7d89..5790bc54da1c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerAttrToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -1,4 +1,4 @@ -//====- LowerAttrToLLVMIR.cpp - Lowering CIR attributes to LLVMIR ---------===// +//====- LoweToLLVMIR.cpp - Lowering CIR attributes to LLVMIR ---------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,7 +6,8 @@ // //===----------------------------------------------------------------------===// // -// This file implements lowering of CIR attributes to LLVMIR. +// This file implements lowering of CIR attributes and operations directly to +// LLVMIR. // //===----------------------------------------------------------------------===// @@ -57,7 +58,8 @@ class CIRDialectLLVMIRTranslationInterface if (auto extraAttr = attribute.getValue() .dyn_cast()) { for (auto attr : extraAttr.getElements()) { - if (auto inlineAttr = attr.getValue().dyn_cast()) { + if (auto inlineAttr = + attr.getValue().dyn_cast()) { if (inlineAttr.isNoInline()) llvmFunc->addFnAttr(llvm::Attribute::NoInline); else if (inlineAttr.isAlwaysInline()) From 0a7bb161578e4a00e753e9e6770f217fb35331e1 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:17:53 -0300 Subject: [PATCH 1158/2301] [CIR][Lowering] Deprecate attributes for LLVM zero-initialization This replaces the usage of attributes for zero-initializing global variables, with a more robust zero-initialization op-based method (cir.llvmir.zeroinit). The downside of this approach is that is not as compact or efficient as the attribute-based method, however: - Both are temporary solutions, but it's easier to track and patch the usage of a single op than an attribute in any op. - Attribute-based method is more difficult to lower, requiring more maintenance. - Op-based method may require a region, but it will populate the region with at most a couple of operations. ghstack-source-id: 4f239b84865c1ad51c72efbe3486be18eddeea4c Pull Request resolved: https://github.com/llvm/clangir/pull/221 --- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 14 ++++++-------- .../CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 11 ----------- clang/test/CIR/Lowering/globals.cir | 11 +++++++++++ clang/test/CIR/Translation/zeroinitializer.cir | 14 ++++++++++---- 4 files changed, 27 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 59098ecfe159..6707e0d56428 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1281,14 +1281,12 @@ class CIRGlobalOpLowering return mlir::success(); } else if (isa(init.value())) { // TODO(cir): once LLVM's dialect has a proper zeroinitializer attribute - // this should be updated. For now, we tag the LLVM global with a - // cir.zero attribute that is later replaced with a zeroinitializer. - // Null pointers also use this path for simplicity, as we would - // otherwise require a region-based initialization for the global op. - auto llvmGlobalOp = rewriter.replaceOpWithNewOp( - op, llvmType, isConst, linkage, symbol, nullptr); - auto cirZeroAttr = mlir::cir::ZeroAttr::get(getContext(), llvmType); - llvmGlobalOp->setAttr("cir.initial_value", cirZeroAttr); + // this should be updated. For now, we use a custom op to initialize + // globals to zero. + setupRegionInitializedLLVMGlobalOp(op, rewriter); + auto value = + lowerCirAttrAsValue(init.value(), loc, rewriter, typeConverter); + rewriter.create(loc, value); return mlir::success(); } else if (const auto structAttr = init.value().dyn_cast()) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index 5790bc54da1c..c19831bda087 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -39,17 +39,6 @@ class CIRDialectLLVMIRTranslationInterface mlir::Operation *op, llvm::ArrayRef instructions, mlir::NamedAttribute attribute, mlir::LLVM::ModuleTranslation &moduleTranslation) const override { - // Translate CIR's zero attribute to LLVM's zero initializer. - if (isa(attribute.getValue())) { - if (llvm::isa(op)) { - auto *globalVal = llvm::cast( - moduleTranslation.lookupGlobal(op)); - globalVal->setInitializer( - llvm::Constant::getNullValue(globalVal->getValueType())); - } else - return op->emitError("#cir.zero not supported"); - } - // Translate CIR's extra function attributes to LLVM's function attributes. auto func = dyn_cast(op); if (!func) diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index c4628f97cc5e..43ca8c3fb030 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -11,6 +11,7 @@ !u64i = !cir.int !u8i = !cir.int !ty_22struct2EA22 = !cir.struct<"struct.A", !s32i, !cir.array x 2>, #cir.recdecl.ast> +!ty_22struct2EBar22 = !cir.struct<"struct.Bar", !s32i, !s8i, #cir.recdecl.ast> module { cir.global external @a = #cir.int<3> : !s32i @@ -134,4 +135,14 @@ module { // MLIR: llvm.mlir.global external @zeroInitFlt(dense<0.000000e+00> : tensor<2xf32>) {addr_space = 0 : i32} : !llvm.array<2 x f32> cir.global "private" internal @staticVar = #cir.int<0> : !s32i // MLIR: llvm.mlir.global internal @staticVar(0 : i32) {addr_space = 0 : i32} : i32 + cir.global external @nullPtr = #cir.null : !cir.ptr + // MLIR: llvm.mlir.global external @nullPtr() + // MLIR: %0 = llvm.mlir.zero : !llvm.ptr + // MLIR: llvm.return %0 : !llvm.ptr + // MLIR: } + cir.global external @zeroStruct = #cir.zero : !ty_22struct2EBar22 + // MLIR: llvm.mlir.global external @zeroStruct() + // MLIR: %0 = cir.llvmir.zeroinit : !llvm.struct<"struct.Bar", (i32, i8)> + // MLIR: llvm.return %0 : !llvm.struct<"struct.Bar", (i32, i8)> + // MLIR: } } diff --git a/clang/test/CIR/Translation/zeroinitializer.cir b/clang/test/CIR/Translation/zeroinitializer.cir index ac70805bd231..c6b92be604d5 100644 --- a/clang/test/CIR/Translation/zeroinitializer.cir +++ b/clang/test/CIR/Translation/zeroinitializer.cir @@ -2,12 +2,18 @@ // RUN: FileCheck --input-file=%t.ll %s module { - // Should lower #cir.zero on structs to a zeroinitializer. - llvm.mlir.global external @bar() {addr_space = 0 : i32, cir.initial_value = #cir.zero : !llvm.struct<"struct.S", (i8, i32)>} : !llvm.struct<"struct.S", (i8, i32)> + // Should zero-initialize global structs initialized with cir.llvmir.zeroinit. + llvm.mlir.global external @bar() {addr_space = 0 : i32} : !llvm.struct<"struct.S", (i8, i32)> { + %0 = cir.llvmir.zeroinit : !llvm.struct<"struct.S", (i8, i32)> + llvm.return %0 : !llvm.struct<"struct.S", (i8, i32)> + } // CHECK: @bar = global %struct.S zeroinitializer - // Should lower #cir.null on pointers to a null initializer. - llvm.mlir.global external @ptr() {addr_space = 0 : i32, cir.initial_value = #cir.zero : !llvm.ptr} : !llvm.ptr + // Should null-initialize global pointer initialized with cir.llvmir.zeroinit. + llvm.mlir.global external @ptr() {addr_space = 0 : i32} : !llvm.ptr { + %0 = cir.llvmir.zeroinit : !llvm.ptr + llvm.return %0 : !llvm.ptr + } // CHECK: @ptr = global ptr null // Should lower aggregates types with elements initialized with cir.llvmir.zeroinit. From f505427153b8ac0997b7dc9b948b38fa3ef3d226 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:40:37 -0300 Subject: [PATCH 1159/2301] [CIR] Implement cir.copy operation Adds a new `cir.copy` operation that copies the contents of one pointer to another. Similar to a `memcpy`, but the number of bytes to be copied is inferred from the pointee type. Two constraints are enforced: - The source and destination pointers must have the same pointee type. - The source and destination pointers must be different values. ghstack-source-id: 2d5b6bd0aeb71afd29cf522f0da2955de48a3e3c Pull Request resolved: https://github.com/llvm/clangir/pull/213 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 38 ++++++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 18 ++++++++++ clang/test/CIR/IR/copy.cir | 9 +++++ clang/test/CIR/IR/invalid.cir | 22 ++++++++++++ 4 files changed, 87 insertions(+) create mode 100644 clang/test/CIR/IR/copy.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7f45f55eb993..0cc65cb456a1 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1820,6 +1820,44 @@ def AwaitOp : CIR_Op<"await", let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// CopyOp +//===----------------------------------------------------------------------===// + +def CopyOp : CIR_Op<"copy", [SameTypeOperands]> { + let arguments = (ins Arg:$dst, + Arg:$src); + let summary = "Copies contents from a CIR pointer to another"; + let description = [{ + Given two CIR pointers, `src` and `dst`, `cir.copy` will copy the memory + pointed by `src` to the memory pointed by `dst`. + + The amount of bytes copied is inferred from the pointee type. Naturally, + the pointee type of both `src` and `dst` must match and must implement + the `DataLayoutTypeInterface`. + + Examples: + + ```mlir + // Copying contents from one struct to another: + cir.copy %0 to %1 : !cir.ptr + ``` + }]; + + let assemblyFormat = "$src `to` $dst attr-dict `:` qualified(type($dst))"; + let hasVerifier = 1; + + let extraClassDeclaration = [{ + /// Returns the pointer type being copied. + mlir::cir::PointerType getType() { return getSrc().getType(); } + + /// Returns the number of bytes to be copied. + unsigned getLength() { + return DataLayout::closest(*this).getTypeSize(getType().getPointee()); + } + }]; +} + //===----------------------------------------------------------------------===// // Variadic Operations //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7c3d32690f3b..e49b7b66076d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -22,11 +22,13 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Diagnostics.h" #include "mlir/IR/DialectImplementation.h" +#include "mlir/IR/DialectInterface.h" #include "mlir/IR/Location.h" #include "mlir/IR/OpDefinition.h" #include "mlir/IR/OpImplementation.h" #include "mlir/IR/StorageUniquerSupport.h" #include "mlir/IR/TypeUtilities.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Interfaces/FunctionImplementation.h" #include "mlir/Interfaces/InferTypeOpInterface.h" #include "mlir/Support/LLVM.h" @@ -2202,6 +2204,22 @@ VTableAttr::verify(::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, return success(); } +//===----------------------------------------------------------------------===// +// CopyOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult CopyOp::verify() { + + // A data layout is required for us to know the number of bytes to be copied. + if (!getType().getPointee().hasTrait()) + return emitError() << "missing data layout for pointee type"; + + if (getSrc() == getDst()) + return emitError() << "source and destination are the same"; + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/copy.cir b/clang/test/CIR/IR/copy.cir new file mode 100644 index 000000000000..9a689036985e --- /dev/null +++ b/clang/test/CIR/IR/copy.cir @@ -0,0 +1,9 @@ +// RUN: cir-opt %s + +!s32i = !cir.int +module { + cir.func @shouldParseCopyOp(%arg0 : !cir.ptr, %arg1 : !cir.ptr) { + cir.copy %arg0 to %arg1 : !cir.ptr + cir.return + } +} diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index b81e42ca4507..feabf3be18fa 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -438,3 +438,25 @@ module { // expected-error@+1 {{invalid language keyword 'dummy'}} module attributes {cir.lang = #cir.lang} { } + +// ----- + +module { + // Should not copy types with no data layout (unkonwn byte size). + cir.func @invalid_copy(%arg0 : !cir.ptr, %arg1 : !cir.ptr) { + // expected-error@+1 {{missing data layout for pointee type}} + cir.copy %arg0 to %arg1 : !cir.ptr + cir.return + } +} + +// ----- + +module { + // Should not copy to same address. + cir.func @invalid_copy(%arg0 : !cir.ptr>) { + // expected-error@+1 {{source and destination are the same}} + cir.copy %arg0 to %arg0 : !cir.ptr> + cir.return + } +} From 487f523e095412177c0e1e5cbc072645712f17d3 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:40:38 -0300 Subject: [PATCH 1160/2301] [CIR][CIRGen] Add support for basic struct assignment Unblocks codegen path for generating basic (no copy constructors or move operators) struct assignment copy, which involves mainly: - Unblocking LValueToRValue cast expressions - Updating buildAggregateCopy to copy structs on assignment - Visiting DeclRefExpr nodes ghstack-source-id: 719f2bc7365bb68f5af59d1bc38dafde80c6e39f Pull Request resolved: https://github.com/llvm/clangir/pull/214 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 7 + clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 175 ++++++++++++++++-- .../CodeGen/UnimplementedFeatureGuarding.h | 4 + clang/test/CIR/CodeGen/struct.c | 10 + clang/test/CIR/CodeGen/struct.cpp | 9 + 5 files changed, 185 insertions(+), 20 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index f135a5ca5a32..dff8426c5144 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -29,6 +29,7 @@ #include "llvm/ADT/FloatingPointMode.h" #include "llvm/ADT/StringMap.h" #include "llvm/Support/ErrorHandling.h" +#include #include namespace cir { @@ -469,6 +470,12 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Operation creation helpers // -------------------------- // + + /// Create a copy with inferred length. + mlir::cir::CopyOp createCopy(mlir::Value dst, mlir::Value src) { + return create(dst.getLoc(), dst, src); + } + mlir::Value createNeg(mlir::Value value) { if (auto intTy = value.getType().dyn_cast()) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index d5fedfd21a59..6b31001144cc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -16,9 +16,15 @@ #include "CIRGenTypes.h" #include "CIRGenValue.h" #include "UnimplementedFeatureGuarding.h" +#include "mlir/IR/Attributes.h" +#include "clang/AST/Decl.h" +#include "clang/AST/OperationKinds.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtVisitor.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" using namespace cir; using namespace clang; @@ -53,6 +59,32 @@ class AggExprEmitter : public StmtVisitor { AggExprEmitter(CIRGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused) : CGF{cgf}, Dest(Dest), IsResultUnused(IsResultUnused) {} + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + /// Given an expression with aggregate type that represents a value lvalue, + /// this method emits the address of the lvalue, then loads the result into + /// DestPtr. + void buildAggLoadOfLValue(const Expr *E); + + enum ExprValueKind { EVK_RValue, EVK_NonRValue }; + + /// Perform the final copy to DestPtr, if desired. SrcIsRValue is true if + /// source comes from an RValue. + void buildFinalDestCopy(QualType type, const LValue &src, + ExprValueKind SrcValueKind = EVK_NonRValue); + void buildCopy(QualType type, const AggValueSlot &dest, + const AggValueSlot &src); + + AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { + if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) + llvm_unreachable("garbage collection is NYI"); + return AggValueSlot::DoesNotNeedGCBarriers; + } + + bool TypeRequiresGCollection(QualType T); + //===--------------------------------------------------------------------===// // Visitor Methods //===--------------------------------------------------------------------===// @@ -85,7 +117,7 @@ class AggExprEmitter : public StmtVisitor { void VisitConstantExpr(ConstantExpr *E) { llvm_unreachable("NYI"); } // l-values - void VisitDeclRefExpr(DeclRefExpr *E) { llvm_unreachable("NYI"); } + void VisitDeclRefExpr(DeclRefExpr *E) { buildAggLoadOfLValue(E); } void VisitMemberExpr(MemberExpr *E) { llvm_unreachable("NYI"); } void VisitUnaryDeref(UnaryOperator *E) { llvm_unreachable("NYI"); } void VisitStringLiteral(StringLiteral *E) { llvm_unreachable("NYI"); } @@ -93,7 +125,7 @@ class AggExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { - llvm_unreachable("NYI"); + buildAggLoadOfLValue(E); } void VisitPredefinedExpr(const PredefinedExpr *E) { llvm_unreachable("NYI"); } @@ -171,6 +203,88 @@ class AggExprEmitter : public StmtVisitor { }; } // namespace +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +/// Given an expression with aggregate type that represents a value lvalue, this +/// method emits the address of the lvalue, then loads the result into DestPtr. +void AggExprEmitter::buildAggLoadOfLValue(const Expr *E) { + LValue LV = CGF.buildLValue(E); + + // If the type of the l-value is atomic, then do an atomic load. + if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV) || + UnimplementedFeature::atomicTypes()) + llvm_unreachable("atomic load is NYI"); + + buildFinalDestCopy(E->getType(), LV); +} + +/// Perform the final copy to DestPtr, if desired. +void AggExprEmitter::buildFinalDestCopy(QualType type, const LValue &src, + ExprValueKind SrcValueKind) { + // If Dest is ignored, then we're evaluating an aggregate expression + // in a context that doesn't care about the result. Note that loads + // from volatile l-values force the existence of a non-ignored + // destination. + if (Dest.isIgnored()) + return; + + // Copy non-trivial C structs here. + if (Dest.isVolatile() || UnimplementedFeature::volatileTypes()) + llvm_unreachable("volatile is NYI"); + + if (SrcValueKind == EVK_RValue) { + llvm_unreachable("rvalue is NYI"); + } else { + if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) + llvm_unreachable("non-trivial primitive copy is NYI"); + } + + AggValueSlot srcAgg = AggValueSlot::forLValue( + src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased, + AggValueSlot::MayOverlap); + buildCopy(type, Dest, srcAgg); +} + +/// Perform a copy from the source into the destination. +/// +/// \param type - the type of the aggregate being copied; qualifiers are +/// ignored +void AggExprEmitter::buildCopy(QualType type, const AggValueSlot &dest, + const AggValueSlot &src) { + if (dest.requiresGCollection()) + llvm_unreachable("garbage collection is NYI"); + + // If the result of the assignment is used, copy the LHS there also. + // It's volatile if either side is. Use the minimum alignment of + // the two sides. + LValue DestLV = CGF.makeAddrLValue(dest.getAddress(), type); + LValue SrcLV = CGF.makeAddrLValue(src.getAddress(), type); + if (dest.isVolatile() || src.isVolatile() || + UnimplementedFeature::volatileTypes()) + llvm_unreachable("volatile is NYI"); + CGF.buildAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), false); +} + +/// True if the given aggregate type requires special GC API calls. +bool AggExprEmitter::TypeRequiresGCollection(QualType T) { + // Only record types have members that might require garbage collection. + const RecordType *RecordTy = T->getAs(); + if (!RecordTy) + return false; + + // Don't mess with non-trivial C++ types. + RecordDecl *Record = RecordTy->getDecl(); + if (isa(Record) && + (cast(Record)->hasNonTrivialCopyConstructor() || + !cast(Record)->hasTrivialDestructor())) + return false; + + // Check whether the type has an object member. + return Record->hasObjectMember(); +} + //===----------------------------------------------------------------------===// // Visitor Methods //===----------------------------------------------------------------------===// @@ -472,6 +586,15 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { CGF.CGM.buildExplicitCastExprType(ECE, &CGF); switch (E->getCastKind()) { + case CK_LValueToRValue: + // If we're loading from a volatile type, force the destination + // into existence. + if (E->getSubExpr()->getType().isVolatileQualified() || + UnimplementedFeature::volatileTypes()) { + llvm_unreachable("volatile is NYI"); + } + [[fallthrough]]; + case CK_NoOp: case CK_UserDefinedConversion: case CK_ConstructorConversion: @@ -536,6 +659,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { case CK_FixedPointToBoolean: case CK_FixedPointToIntegral: case CK_IntegralToFixedPoint: + llvm::errs() << "cast '" << E->getCastKindName() + << "' invalid for aggregate types\n"; llvm_unreachable("cast kind invalid for aggregate types"); default: { llvm::errs() << "cast kind not implemented: '" << E->getCastKindName() @@ -904,8 +1029,8 @@ void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, // this will be touched again soon. assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); - // Address DestPtr = Dest.getAddress(); - // Address SrcPtr = Src.getAddress(); + Address DestPtr = Dest.getAddress(); + Address SrcPtr = Src.getAddress(); if (getLangOpts().CPlusPlus) { if (const RecordType *RT = Ty->getAs()) { @@ -924,7 +1049,7 @@ void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, } if (getLangOpts().CUDAIsDevice) { - assert(0 && "NYI"); + llvm_unreachable("CUDA is NYI"); } // Aggregate assignment turns into llvm.memcpy. This is almost valid per @@ -947,13 +1072,18 @@ void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, else TypeInfo = getContext().getTypeInfoInChars(Ty); - llvm::Value *SizeVal = nullptr; + mlir::Attribute SizeVal = nullptr; if (TypeInfo.Width.isZero()) { - assert(0 && "NYI"); + // But note that getTypeInfo returns 0 for a VLA. + if (auto *VAT = dyn_cast_or_null( + getContext().getAsArrayType(Ty))) { + llvm_unreachable("VLA is NYI"); + } } if (!SizeVal) { - assert(0 && "NYI"); - // SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()); + // NOTE(cir): CIR types already carry info about their sizes. This is here + // just for codegen parity. + SizeVal = builder.getI64IntegerAttr(TypeInfo.Width.getQuantity()); } // FIXME: If we have a volatile struct, the optimizer can remove what might @@ -969,29 +1099,34 @@ void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, // we need to use a different call here. We use isVolatile to indicate when // either the source or the destination is volatile. - assert(0 && "NYI"); - // DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty); - // SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty); + // NOTE(cir): original codegen would normally convert DestPtr and SrcPtr to + // i8* since memcpy operates on bytes. We don't need that in CIR because + // cir.copy will operate on any CIR pointer that points to a sized type. // Don't do any of the memmove_collectable tests if GC isn't set. if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { // fall through } else if (const RecordType *RecordTy = Ty->getAs()) { - assert(0 && "NYI"); + RecordDecl *Record = RecordTy->getDecl(); + if (Record->hasObjectMember()) { + llvm_unreachable("ObjC is NYI"); + } } else if (Ty->isArrayType()) { - assert(0 && "NYI"); + QualType BaseType = getContext().getBaseElementType(Ty); + if (const RecordType *RecordTy = BaseType->getAs()) { + if (RecordTy->getDecl()->hasObjectMember()) { + llvm_unreachable("ObjC is NYI"); + } + } } - assert(0 && "NYI"); - // auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile); + builder.createCopy(DestPtr.getPointer(), SrcPtr.getPointer()); // Determine the metadata to describe the position of any padding in this // memcpy, as well as the TBAA tags for the members of the struct, in case // the optimizer wishes to expand it in to scalar memory operations. - assert(!UnimplementedFeature::tbaa()); - if (CGM.getCodeGenOpts().NewStructPathTBAA) { - assert(0 && "NYI"); - } + if (CGM.getCodeGenOpts().NewStructPathTBAA || UnimplementedFeature::tbaa()) + llvm_unreachable("TBAA is NYI"); } AggValueSlot::Overlap_t diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 8ff7cd665d6b..b545d3a4afe6 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -100,6 +100,10 @@ struct UnimplementedFeature { static bool fastMathFlags() { return false; } static bool fastMathFuncAttributes() { return false; } + // Type qualifiers. + static bool atomicTypes() { return false; } + static bool volatileTypes() { return false; } + static bool capturedByInit() { return false; } static bool tryEmitAsConstant() { return false; } static bool incrementProfileCounter() { return false; } diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 14df39b332aa..5f269b1835c3 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -68,3 +68,13 @@ void shouldCopyStructAsCallArg(struct S1 s) { // CHECK-DAG: %[[#LV:]] = cir.load %{{.+}} : cir.ptr , !ty_22struct2ES122 // CHECK-DAG: cir.call @shouldCopyStructAsCallArg(%[[#LV]]) : (!ty_22struct2ES122) -> () } + +struct Bar shouldGenerateAndAccessStructArrays(void) { + struct Bar s[1] = {{3, 4}}; + return s[0]; +} +// CHECK-DAG: cir.func @shouldGenerateAndAccessStructArrays +// CHECK-DAG: %[[#STRIDE:]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-DAG: %[[#DARR:]] = cir.cast(array_to_ptrdecay, %{{.+}} : !cir.ptr>), !cir.ptr +// CHECK-DAG: %[[#ELT:]] = cir.ptr_stride(%[[#DARR]] : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr +// CHECK-DAG: cir.copy %[[#ELT]] to %{{.+}} : !cir.ptr diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index becf9307d5d5..166dff0d812d 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -123,6 +123,15 @@ struct A simpleConstInit = {1}; struct A arrConstInit[1] = {{1}}; // CHECK: cir.global external @arrConstInit = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2EA22]> : !cir.array +// Should locally copy struct members. +void shouldLocallyCopyStructAssignments(void) { + struct A a = { 3 }; + // CHECK: %[[#SA:]] = cir.alloca !ty_22struct2EA22, cir.ptr , ["a"] {alignment = 4 : i64} + struct A b = a; + // CHECK: %[[#SB:]] = cir.alloca !ty_22struct2EA22, cir.ptr , ["b", init] {alignment = 4 : i64} + // cir.copy %[[#SA]] to %[[SB]] : !cir.ptr +} + A get_default() { return A{2}; } struct S { From 68378b596cbcd97d5423f4bd8488e6db2317c427 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:40:38 -0300 Subject: [PATCH 1161/2301] [CIR][Lowering] Lower cir.copy operations Lower `cir.copy` operations to non-volatile `llvm.memcpy` intrinsic calls. ghstack-source-id: b58bca4998da8e258a6f626ef97e392976babf62 Pull Request resolved: https://github.com/llvm/clangir/pull/215 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 18 +++++++++++++++++- clang/test/CIR/Lowering/struct.cir | 15 +++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6707e0d56428..beca6beedfab 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -204,6 +204,21 @@ mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { }; } +class CIRCopyOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CopyOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + const mlir::Value length = rewriter.create( + op.getLoc(), rewriter.getI32Type(), op.getLength()); + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), adaptor.getSrc(), length, /*isVolatile=*/false); + return mlir::success(); + } +}; + class CIRPtrStrideOpLowering : public mlir::OpConversionPattern { public: @@ -1754,7 +1769,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, CIRStructElementAddrOpLowering, CIRSwitchOpLowering, - CIRPtrDiffOpLowering>(converter, patterns.getContext()); + CIRPtrDiffOpLowering, CIRCopyOpLowering>(converter, + patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index 38b9e894b8d0..c2efc8f4f6a0 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -3,6 +3,7 @@ !s32i = !cir.int !u8i = !cir.int +!u32i = !cir.int !ty_22struct2ES22 = !cir.struct<"struct.S", !u8i, !s32i> !ty_22struct2ES2A22 = !cir.struct<"struct.S2A", !s32i, #cir.recdecl.ast> !ty_22struct2ES122 = !cir.struct<"struct.S1", !s32i, f32, !cir.ptr, #cir.recdecl.ast> @@ -78,4 +79,18 @@ module { // CHECK: %12 = llvm.insertvalue %11, %8[2] : !llvm.array<3 x struct<"struct.S3", (i32)>> // CHECK: llvm.return %12 : !llvm.array<3 x struct<"struct.S3", (i32)>> // CHECK: } + + cir.func @shouldLowerStructCopies() { + // CHECK: llvm.func @shouldLowerStructCopies() + %1 = cir.alloca !ty_22struct2ES22, cir.ptr , ["a"] {alignment = 4 : i64} + // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#SA:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"struct.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + %2 = cir.alloca !ty_22struct2ES22, cir.ptr , ["b", init] {alignment = 4 : i64} + // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#SB:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"struct.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + cir.copy %1 to %2 : !cir.ptr + // CHECK: %[[#SIZE:]] = llvm.mlir.constant(8 : i32) : i32 + // CHECK: "llvm.intr.memcpy"(%[[#SB]], %[[#SA]], %[[#SIZE]]) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () + cir.return + } } From 7429ce01d9ca563e32a50888079622ad00b9276a Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:40:38 -0300 Subject: [PATCH 1162/2301] [CIR] Implement cir.libc.memcpy operation The operation is a 1:1 mapping to libc's memcpy. ghstack-source-id: 97f02f4d782b954c49b08054107e257cb46538fd Pull Request resolved: https://github.com/llvm/clangir/pull/237 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 44 ++++++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 32 ++++++++++---- clang/test/CIR/IR/invalid.cir | 25 +++++++++++ clang/test/CIR/IR/libc-memcpy.cir | 9 ++++ 4 files changed, 102 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/IR/libc-memcpy.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 0cc65cb456a1..a12119e42684 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1858,6 +1858,50 @@ def CopyOp : CIR_Op<"copy", [SameTypeOperands]> { }]; } +//===----------------------------------------------------------------------===// +// MemCpyOp +//===----------------------------------------------------------------------===// + +def MemCpyOp : CIR_Op<"libc.memcpy"> { + let arguments = (ins Arg:$dst, + Arg:$src, + CIR_IntType:$len); + let summary = "Equivalent to libc's `memcpy`"; + let description = [{ + Given two CIR pointers, `src` and `dst`, `cir.libc.memcpy` will copy `len` + bytes from the memory pointed by `src` to the memory pointed by `dst`. + + While `cir.copy` is meant to be used for implicit copies in the code where + the length of the copy is known, `cir.memcpy` copies only from and to void + pointers, requiring the copy length to be passed as an argument. + + Examples: + + ```mlir + // Copying 2 bytes from one array to a struct: + %2 = cir.const(#cir.int<2> : !u32i) : !u32i + cir.libc.memcpy %2 bytes from %arr to %struct : !cir.ptr -> !cir.ptr + ``` + }]; + + let assemblyFormat = [{ + $len `bytes` `from` $src `to` $dst attr-dict + `:` type($len) `` `,` qualified(type($src)) `->` qualified(type($dst)) + }]; + let hasVerifier = 1; + + let extraClassDeclaration = [{ + /// Returns the data source pointer type. + mlir::cir::PointerType getSrcTy() { return getSrc().getType(); } + + /// Returns the data destination pointer type. + mlir::cir::PointerType getDstTy() { return getDst().getType(); } + + /// Returns the byte length type. + mlir::cir::IntType getLenTy() { return getLen().getType(); } + }]; +} + //===----------------------------------------------------------------------===// // Variadic Operations //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index e49b7b66076d..b4a16bc9bf6e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1147,7 +1147,7 @@ LogicalResult LoopOp::verify() { static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, TypeAttr type, Attribute initAttr, - mlir::Region& ctorRegion) { + mlir::Region &ctorRegion) { auto printType = [&]() { p << ": " << type; }; if (!op.isDeclaration()) { p << "= "; @@ -1174,15 +1174,14 @@ static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, Attribute &initialValueAttr, - mlir::Region& ctorRegion) { + mlir::Region &ctorRegion) { mlir::Type opTy; if (parser.parseOptionalEqual().failed()) { // Absence of equal means a declaration, so we need to parse the type. // cir.global @a : i32 if (parser.parseColonType(opTy)) return failure(); - } - else { + } else { // Parse contructor, example: // cir.global @rgb = ctor : type { ... } if (!parser.parseOptionalKeyword("ctor")) { @@ -1285,10 +1284,10 @@ LogicalResult GlobalOp::verify() { return success(); } -void GlobalOp::build( - OpBuilder &odsBuilder, OperationState &odsState, StringRef sym_name, - Type sym_type, bool isConstant, cir::GlobalLinkageKind linkage, - function_ref ctorBuilder) { +void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, + StringRef sym_name, Type sym_type, bool isConstant, + cir::GlobalLinkageKind linkage, + function_ref ctorBuilder) { odsState.addAttribute(getSymNameAttrName(odsState.name), odsBuilder.getStringAttr(sym_name)); odsState.addAttribute(getSymTypeAttrName(odsState.name), @@ -2220,6 +2219,23 @@ LogicalResult CopyOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// MemCpyOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult MemCpyOp::verify() { + auto voidPtr = + cir::PointerType::get(getContext(), cir::VoidType::get(getContext())); + + if (!getLenTy().isUnsigned()) + return emitError() << "memcpy length must be an unsigned integer"; + + if (getSrcTy() != voidPtr || getDstTy() != voidPtr) + return emitError() << "memcpy src and dst must be void pointers"; + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index feabf3be18fa..f798499ad118 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -460,3 +460,28 @@ module { cir.return } } + +// ----- + +!s8i = !cir.int +module { + // Should not memcpy with invalid length type. + cir.func @invalid_memcpy_len(%arg0 : !cir.ptr, %arg1 : !s8i) { + // expected-error@+1 {{memcpy length must be an unsigned integer}} + cir.libc.memcpy %arg1 bytes from %arg0 to %arg0 : !s8i, !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- + +!s8i = !cir.int +!u32i = !cir.int +module { + // Should not memcpy non-void pointers. + cir.func @invalid_memcpy_len(%arg0 : !cir.ptr, %arg1 : !u32i) { + // expected-error@+1 {{memcpy src and dst must be void pointers}} + cir.libc.memcpy %arg1 bytes from %arg0 to %arg0 : !u32i, !cir.ptr -> !cir.ptr + cir.return + } +} diff --git a/clang/test/CIR/IR/libc-memcpy.cir b/clang/test/CIR/IR/libc-memcpy.cir new file mode 100644 index 000000000000..737f56d533e3 --- /dev/null +++ b/clang/test/CIR/IR/libc-memcpy.cir @@ -0,0 +1,9 @@ +// RUN: cir-opt %s + +!u32i = !cir.int +module { + cir.func @shouldParseLibcMemcpyOp(%arg0 : !cir.ptr, %arg1 : !u32i) { + cir.libc.memcpy %arg1 bytes from %arg0 to %arg0 : !u32i, !cir.ptr -> !cir.ptr + cir.return + } +} From 85f4b2b6c197ef6abe1821fab6c1141c757c2a90 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:40:39 -0300 Subject: [PATCH 1163/2301] [CIR][CIRGen] Generate memcpy builtin Update codegen to generate CIR's custom memcpy builtin. ghstack-source-id: 7c01bc56929a3ce1439715f55f7d20fb39b0cad0 Pull Request resolved: https://github.com/llvm/clangir/pull/238 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 6 ++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 19 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 9 +++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 +++++ clang/test/CIR/CodeGen/libc.c | 9 +++++++++ 5 files changed, 48 insertions(+) create mode 100644 clang/test/CIR/CodeGen/libc.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index dff8426c5144..d8477acc532e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -24,6 +24,7 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Location.h" #include "mlir/IR/Types.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/FloatingPointMode.h" @@ -476,6 +477,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return create(dst.getLoc(), dst, src); } + mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, + mlir::Value src, mlir::Value len) { + return create(loc, dst, src, len); + } + mlir::Value createNeg(mlir::Value value) { if (auto intTy = value.getType().dyn_cast()) { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index afc125b80876..bf1c94009a49 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -425,6 +425,25 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, /*EmittedE=*/nullptr, IsDynamic)); } + case Builtin::BImemcpy: + case Builtin::BI__builtin_memcpy: + case Builtin::BImempcpy: + case Builtin::BI__builtin_mempcpy: + Address Dest = buildPointerWithAlignment(E->getArg(0)); + Address Src = buildPointerWithAlignment(E->getArg(1)); + mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); + buildNonNullArgCheck(RValue::get(Dest.getPointer()), + E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), + FD, 0); + buildNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), + E->getArg(1)->getExprLoc(), FD, 1); + builder.createMemCpy(getLoc(E->getSourceRange()), Dest.getPointer(), + Src.getPointer(), SizeVal); + if (BuiltinID == Builtin::BImempcpy || + BuiltinID == Builtin::BI__builtin_mempcpy) + llvm_unreachable("mempcpy is NYI"); + else + return RValue::get(Dest.getPointer()); } // If this is an alias for a lib function (e.g. __builtin_sin), emit diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 9d5e91316f7e..8086635f49dd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1246,6 +1246,15 @@ RValue CallArg::getRValue(CIRGenFunction &CGF, mlir::Location loc) const { return RValue::getAggregate(Copy.getAddress()); } +void CIRGenFunction::buildNonNullArgCheck(RValue RV, QualType ArgType, + SourceLocation ArgLoc, + AbstractCallee AC, unsigned ParmNum) { + if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || + SanOpts.has(SanitizerKind::NullabilityArg))) + return; + llvm_unreachable("non-null arg check is NYI"); +} + /* VarArg handling */ // FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. We diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index fba2aae533be..90119d82091e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -930,6 +930,11 @@ class CIRGenFunction : public CIRGenTypeCache { RValue buildCallExpr(const clang::CallExpr *E, ReturnValueSlot ReturnValue = ReturnValueSlot()); + /// Create a check for a function parameter that may potentially be + /// declared as non-null. + void buildNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, + AbstractCallee AC, unsigned ParmNum); + void buildCallArg(CallArgList &args, const clang::Expr *E, clang::QualType ArgType); diff --git a/clang/test/CIR/CodeGen/libc.c b/clang/test/CIR/CodeGen/libc.c new file mode 100644 index 000000000000..16c3adf1a383 --- /dev/null +++ b/clang/test/CIR/CodeGen/libc.c @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Should generate CIR's builtin memcpy op. +void *memcpy(void *, const void *, unsigned long); +void testMemcpy(void *src, const void *dst, unsigned long size) { + memcpy(dst, src, size); + // CHECK: cir.libc.memcpy %{{.+}} bytes from %{{.+}} to %{{.+}} : !u64i, !cir.ptr -> !cir.ptr +} From e9eef9dabf46730fce787f9c365bfaf0e78beddc Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 21 Aug 2023 19:40:39 -0300 Subject: [PATCH 1164/2301] [CIR][Lowering] Lower cir.libc.memcpy operation Converts cir.libc.memcpy operation to llvm.intr.memcpy intrinsic. ghstack-source-id: cbeabd1d526873835bb67f1c1027be57415798c1 Pull Request resolved: https://github.com/llvm/clangir/pull/239 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 19 +++++++++++++++++-- clang/test/CIR/Lowering/libc.cir | 12 ++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/Lowering/libc.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index beca6beedfab..83001105b4be 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -219,6 +219,21 @@ class CIRCopyOpLowering : public mlir::OpConversionPattern { } }; +class CIRMemCpyOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::MemCpyOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLen(), + /*isVolatile=*/false); + return mlir::success(); + } +}; + class CIRPtrStrideOpLowering : public mlir::OpConversionPattern { public: @@ -1769,8 +1784,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, CIRStructElementAddrOpLowering, CIRSwitchOpLowering, - CIRPtrDiffOpLowering, CIRCopyOpLowering>(converter, - patterns.getContext()); + CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/libc.cir b/clang/test/CIR/Lowering/libc.cir new file mode 100644 index 000000000000..74e384d08a74 --- /dev/null +++ b/clang/test/CIR/Lowering/libc.cir @@ -0,0 +1,12 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!void = !cir.void +!u64i = !cir.int +module { + cir.func @shouldLowerLibcMemcpyBuiltin(%arg0: !cir.ptr, %arg1: !cir.ptr, %arg2: !u64i) { + cir.libc.memcpy %arg2 bytes from %arg0 to %arg1 : !u64i, !cir.ptr -> !cir.ptr + // CHECK: "llvm.intr.memcpy"(%{{.+}}, %{{.+}}, %{{.+}}) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i64) -> () + cir.return + } +} From bc1d5f9934e83319c6f4343f2b385ede97a01157 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 21 Aug 2023 19:50:36 -0700 Subject: [PATCH 1165/2301] [CIR][CIRTidy] Add workaround now that clang-tidy is taking place This unbreaks the linux build and make sure cir-tidy can build successfully until we change our infra to rely instead on clang-tidy for the lifetime checker. --- clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp index 0468f9198ce8..e333e562c3cc 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp @@ -141,3 +141,15 @@ runCIRTidy(ClangTidyContext &Context, const CompilationDatabase &Compilations, } // namespace tidy } // namespace cir + +// Now that clang-tidy is integrated with the lifetime checker, CIR changes to +// ClangTidyForceLinker.h are forcing CIRModuleAnchorSource to also be available +// as part of cir-tidy. Since cir-tidy is going to be removed soon, add this so +// that it can still builds in the meantime. +namespace clang::tidy { + +// This anchor is used to force the linker to link in the generated object file +// and thus register the CIRModule. +volatile int CIRModuleAnchorSource = 0; + +} // namespace clang::tidy From 6a948f47f1ecf49269b7a1198ac045a02aa56c9b Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Thu, 20 Jul 2023 14:45:59 -0300 Subject: [PATCH 1166/2301] [CIR][CIRGen][Bugfix] Fix pointer subscript operator access When using subscript access operators on pointers and decayed arrays, an `array_to_ptrdecay` was wrongly applied, generating invalid strides. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 31 ++++++++++++++++------------ clang/test/CIR/CodeGen/array.cpp | 19 +++++++++++++++++ clang/test/CIR/CodeGen/pointers.cpp | 19 +++++++++++++++++ 3 files changed, 56 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 372c77ffa0a8..190ed0fb5d4e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1134,19 +1134,22 @@ static mlir::Value buildArrayAccessOp(mlir::OpBuilder &builder, mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, - mlir::Value idx) { - mlir::Value basePtr = - maybeBuildArrayDecay(builder, arrayLocBegin, arrayPtr, eltTy); + mlir::Value idx, bool shouldDecay) { + mlir::Value basePtr = arrayPtr; + if (shouldDecay) + basePtr = maybeBuildArrayDecay(builder, arrayLocBegin, arrayPtr, eltTy); mlir::Type flatPtrTy = basePtr.getType(); return builder.create(arrayLocEnd, flatPtrTy, basePtr, idx); } -static mlir::Value buildArraySubscriptPtr( - CIRGenFunction &CGF, mlir::Location beginLoc, mlir::Location endLoc, - mlir::Value ptr, mlir::Type eltTy, ArrayRef indices, - bool inbounds, bool signedIndices, const llvm::Twine &name = "arrayidx") { +static mlir::Value +buildArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, + mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, + ArrayRef indices, bool inbounds, + bool signedIndices, bool shouldDecay, + const llvm::Twine &name = "arrayidx") { assert(indices.size() == 1 && "cannot handle multiple indices yet"); auto idx = indices.back(); auto &CGM = CGF.getCIRGenModule(); @@ -1154,14 +1157,14 @@ static mlir::Value buildArraySubscriptPtr( // that would enhance tracking this later in CIR? if (inbounds) assert(!UnimplementedFeature::emitCheckedInBoundsGEP() && "NYI"); - return buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, ptr, eltTy, - idx); + return buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, ptr, eltTy, idx, + shouldDecay); } static Address buildArraySubscriptPtr( CIRGenFunction &CGF, mlir::Location beginLoc, mlir::Location endLoc, Address addr, ArrayRef indices, QualType eltType, - bool inbounds, bool signedIndices, mlir::Location loc, + bool inbounds, bool signedIndices, mlir::Location loc, bool shouldDecay, QualType *arrayType = nullptr, const Expr *Base = nullptr, const llvm::Twine &name = "arrayidx") { // Determine the element size of the statically-sized base. This is @@ -1181,7 +1184,7 @@ static Address buildArraySubscriptPtr( (!CGF.IsInPreservedAIRegion && !isPreserveAIArrayBase(CGF, Base))) { eltPtr = buildArraySubscriptPtr(CGF, beginLoc, endLoc, addr.getPointer(), addr.getElementType(), indices, inbounds, - signedIndices, name); + signedIndices, shouldDecay, name); } else { // assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); // assert(indices.size() == 1 && "cannot handle multiple indices yet"); @@ -1271,7 +1274,8 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, *this, CGM.getLoc(Array->getBeginLoc()), CGM.getLoc(Array->getEndLoc()), ArrayLV.getAddress(), {Idx}, E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, - CGM.getLoc(E->getExprLoc()), &arrayType, E->getBase()); + CGM.getLoc(E->getExprLoc()), /*shouldDecay=*/true, &arrayType, + E->getBase()); EltBaseInfo = ArrayLV.getBaseInfo(); // TODO(cir): EltTBAAInfo assert(!UnimplementedFeature::tbaa() && "TBAA is NYI"); @@ -1285,7 +1289,8 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, Addr = buildArraySubscriptPtr( *this, CGM.getLoc(E->getBeginLoc()), CGM.getLoc(E->getEndLoc()), Addr, Idx, E->getType(), !getLangOpts().isSignedOverflowDefined(), - SignedIndices, CGM.getLoc(E->getExprLoc()), &ptrType, E->getBase()); + SignedIndices, CGM.getLoc(E->getExprLoc()), /*shouldDecay=*/false, + &ptrType, E->getBase()); } LValue LV = LValue::makeAddr(Addr, E->getType(), EltBaseInfo); diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index de81e92131ca..fef24a5e95f2 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -71,3 +71,22 @@ struct S { int i; } arr[3] = {{1}}; // CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22]> : !cir.array + +void testPointerDecaySubscriptAccess(int arr[]) { +// CHECK: cir.func @{{.+}}testPointerDecaySubscriptAccess + arr[1]; + // CHECK: %[[#BASE:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr + // CHECK: %[[#DIM1:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: cir.ptr_stride(%[[#BASE]] : !cir.ptr, %[[#DIM1]] : !s32i), !cir.ptr +} + +void testPointerDecayedArrayMultiDimSubscriptAccess(int arr[][3]) { +// CHECK: cir.func @{{.+}}testPointerDecayedArrayMultiDimSubscriptAccess + arr[1][2]; + // CHECK: %[[#V1:]] = cir.load %{{.+}} : cir.ptr >>, !cir.ptr> + // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#V3:]] = cir.ptr_stride(%[[#V1]] : !cir.ptr>, %[[#V2]] : !s32i), !cir.ptr> + // CHECK: %[[#V4:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: %[[#V5:]] = cir.cast(array_to_ptrdecay, %[[#V3]] : !cir.ptr>), !cir.ptr + // CHECK: cir.ptr_stride(%[[#V5]] : !cir.ptr, %[[#V4]] : !s32i), !cir.ptr +} diff --git a/clang/test/CIR/CodeGen/pointers.cpp b/clang/test/CIR/CodeGen/pointers.cpp index 5dcb458502c9..8df8a0f6b658 100644 --- a/clang/test/CIR/CodeGen/pointers.cpp +++ b/clang/test/CIR/CodeGen/pointers.cpp @@ -28,3 +28,22 @@ void foo(int *iptr, char *cptr, unsigned ustride) { // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#SIGNSTRIDE]]) : !s32i, !s32i // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr } + +void testPointerSubscriptAccess(int *ptr) { +// CHECK: testPointerSubscriptAccess + ptr[1]; + // CHECK: %[[#V1:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr + // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: cir.ptr_stride(%[[#V1]] : !cir.ptr, %[[#V2]] : !s32i), !cir.ptr +} + +void testPointerMultiDimSubscriptAccess(int **ptr) { +// CHECK: testPointerMultiDimSubscriptAccess + ptr[1][2]; + // CHECK: %[[#V1:]] = cir.load %{{.+}} : cir.ptr >>, !cir.ptr> + // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#V3:]] = cir.ptr_stride(%[[#V1]] : !cir.ptr>, %[[#V2]] : !s32i), !cir.ptr> + // CHECK: %[[#V4:]] = cir.load %[[#V3]] : cir.ptr >, !cir.ptr + // CHECK: %[[#V5:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: cir.ptr_stride(%[[#V4]] : !cir.ptr, %[[#V5]] : !s32i), !cir.ptr +} From 06455473558819fefe1184502a197c4c5992b7b7 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Wed, 23 Aug 2023 15:14:14 -0700 Subject: [PATCH 1167/2301] [CIR][Lowering] Emit llvm.global_ctors list (#240) Creating the `llvm.global_ctors` list to hold all global dynamic initializers. The list has the following format: ``` %0 = type { i32, ptr, ptr } @llvm.global_ctors = appending global [1 x %0] [%0 { i32 65535, ptr @ctor, ptr @data }] ``` The list will be converted to `.init_array` for ELF by LLVM which will be loaded and executed by the C++ runtime. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 21 +++++ .../Dialect/Transforms/LoweringPrepare.cpp | 18 ++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 77 +++++++++++++++++++ clang/test/CIR/CodeGen/static.cpp | 4 +- 4 files changed, 116 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 5c5f87601032..af73fc889be6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -452,4 +452,25 @@ def OptNoneAttr : CIRUnitAttr<"OptNone", "optnone"> { let storageType = [{ OptNoneAttr }]; } +def GlobalCtorAttr : CIR_Attr<"GlobalCtor", "globalCtor"> { + let summary = "Indicates a function is a global constructor."; + let description = [{ + Describing a global constructor with an optional priority. + }]; + let parameters = (ins "StringAttr":$name, + OptionalParameter<"std::optional">:$priority); + let assemblyFormat = [{ + `<` + $name + (`,` $priority^)? + `>` + }]; + let builders = [ + AttrBuilder<(ins "StringRef":$name, + CArg<"std::optional", "{}">:$priority), [{ + return $_get($_ctxt, StringAttr::get($_ctxt, name), priority); + }]> + ]; + let skipDefaultBuilders = 1; +} #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 053d91e05f57..2e1eaa05a8e7 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -8,6 +8,7 @@ #include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Region.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Mangle.h" @@ -125,6 +126,8 @@ void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { ctorRegion.getBlocks().clear(); // Add a function call to the variable initialization function. + assert(!op.getAst()->getAstDecl()->getAttr() && + "custom initialization priority NYI"); dynamicInitializers.push_back(f); } } @@ -133,6 +136,17 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { if (dynamicInitializers.empty()) return; + SmallVector attrs; + for (auto &f : dynamicInitializers) { + // TODO: handle globals with a user-specified initialzation priority. + auto ctorAttr = + mlir::cir::GlobalCtorAttr::get(&getContext(), f.getName()); + attrs.push_back(ctorAttr); + } + + theModule->setAttr("cir.globalCtors", + mlir::ArrayAttr::get(&getContext(), attrs)); + SmallString<256> fnName; // Include the filename in the symbol name. Including "sub_" matches gcc // and makes sure these symbols appear lexicographically behind the symbols @@ -161,9 +175,9 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { builder.getContext(), mlir::cir::GlobalLinkageKind::ExternalLinkage)); mlir::SymbolTable::setSymbolVisibility( f, mlir::SymbolTable::Visibility::Private); - mlir::NamedAttrList attrs; + mlir::NamedAttrList extraAttrs; f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), attrs.getDictionary(builder.getContext()))); + builder.getContext(), extraAttrs.getDictionary(builder.getContext()))); builder.setInsertionPointToStart(f.addEntryBlock()); for (auto &f : dynamicInitializers) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 83001105b4be..f9fc04f6d41b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -57,6 +57,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/Support/Casting.h" @@ -1839,6 +1840,79 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { } } // namespace +static void buildCtorList(mlir::ModuleOp module) { + llvm::SmallVector, 2> globalCtors; + for (auto namedAttr : module->getAttrs()) { + if (namedAttr.getName() == "cir.globalCtors") { + for (auto attr : namedAttr.getValue().cast()) { + assert(attr.isa() && + "must be a GlobalCtorAttr"); + if (auto ctorAttr = attr.cast()) { + // default priority is 65536 + int priority = 65536; + if (ctorAttr.getPriority()) + priority = *ctorAttr.getPriority(); + globalCtors.emplace_back(ctorAttr.getName(), priority); + } + } + break; + } + } + + if (globalCtors.empty()) + return; + + mlir::OpBuilder builder(module.getContext()); + builder.setInsertionPointToEnd(&module.getBodyRegion().back()); + + // Create a global array llvm.global_ctors with element type of + // struct { i32, ptr, ptr } + auto CtorPFTy = mlir::LLVM::LLVMPointerType::get(builder.getContext()); + llvm::SmallVector CtorStructFields; + CtorStructFields.push_back(builder.getI32Type()); + CtorStructFields.push_back(CtorPFTy); + CtorStructFields.push_back(CtorPFTy); + + auto CtorStructTy = mlir::LLVM::LLVMStructType::getLiteral( + builder.getContext(), CtorStructFields); + auto CtorStructArrayTy = + mlir::LLVM::LLVMArrayType::get(CtorStructTy, globalCtors.size()); + + auto loc = module.getLoc(); + auto newGlobalOp = builder.create( + loc, CtorStructArrayTy, true, mlir::LLVM::Linkage::Appending, + "llvm.global_ctors", mlir::Attribute()); + + newGlobalOp.getRegion().push_back(new mlir::Block()); + builder.setInsertionPointToEnd(newGlobalOp.getInitializerBlock()); + + mlir::Value result = builder.create( + loc, CtorStructArrayTy); + + for (uint64_t I = 0; I < globalCtors.size(); I++) { + auto fn = globalCtors[I]; + mlir::Value structInit = + builder.create(loc, CtorStructTy); + mlir::Value initPriority = + builder.create(loc, CtorStructFields[0], fn.second); + mlir::Value initFuncAddr = builder.create( + loc, CtorStructFields[1], fn.first); + mlir::Value initAssociate = + builder.create(loc, CtorStructFields[2]); + structInit = builder.create(loc, structInit, + initPriority, 0); + structInit = builder.create(loc, structInit, + initFuncAddr, 1); + // TODO: handle associated data for initializers. + structInit = builder.create(loc, structInit, + initAssociate, 2); + result = + builder.create(loc, result, structInit, I); + } + + builder.create(loc, result); +} + void ConvertCIRToLLVMPass::runOnOperation() { auto module = getOperation(); @@ -1881,6 +1955,9 @@ void ConvertCIRToLLVMPass::runOnOperation() { if (failed(applyPartialConversion(module, target, std::move(patterns)))) signalPassFailure(); + + // Emit the llvm.global_ctors array. + buildCtorList(module); } std::unique_ptr createConvertCIRToLLVMPass() { diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index 6577d44d9efd..5fd84f2c295d 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -31,7 +31,7 @@ static Init __ioinit2(false); // BEFORE-NEXT: } -// AFTER: module {{.*}} { +// AFTER: module {{.*}} attributes {{.*}}cir.globalCtors = [#cir.globalCtor<"__cxx_global_var_init">, #cir.globalCtor<"__cxx_global_var_init.1">] // AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) // AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22class2EInit22 {ast = #cir.vardecl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init() @@ -50,9 +50,9 @@ static Init __ioinit2(false); // AFTER-NEXT: cir.call @__cxx_global_var_init.1() : () -> () // AFTER-NEXT: cir.return - // LLVM: @_ZL8__ioinit = internal global %class.Init zeroinitializer // LLVM: @_ZL9__ioinit2 = internal global %class.Init zeroinitializer +// LLVM: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init, ptr null }, { i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init.1, ptr null }] // LLVM: define internal void @__cxx_global_var_init() // LLVM-NEXT: call void @_ZN4InitC1Eb(ptr @_ZL8__ioinit, i8 1) // LLVM-NEXT: ret void From f5749ddc06dcd6927af603b1f09784efe2183f8a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 24 Aug 2023 16:26:45 -0700 Subject: [PATCH 1168/2301] [CIR][ClangTidy] Change the approach to running CIR passes We previously were relying on adding a custom ASTConsumer to run the passes and generate diagnostics (e.g. lifetime checker). The problem with this approach is that it doesn't work when trying to use clang-tidy from clangd. Since clangd plays differently with ASTConsumer's, it's not possible to apply the same approach. This is mitigated by setting up a simple matcher for TranslationUnitDecl, and use ASTContext to emit CIR at `check` time. --- clang-tools-extra/clang-tidy/ClangTidy.cpp | 233 +----------------- .../clang-tidy/ClangTidyDiagnosticConsumer.h | 22 ++ .../clang-tidy/ClangTidyForceLinker.h | 2 + .../clang-tidy/cir-tidy/CIRTidy.cpp | 2 + clang-tools-extra/clang-tidy/cir/Lifetime.cpp | 177 ++++++++++++- clang-tools-extra/clang-tidy/cir/Lifetime.h | 13 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 10 + 7 files changed, 221 insertions(+), 238 deletions(-) diff --git a/clang-tools-extra/clang-tidy/ClangTidy.cpp b/clang-tools-extra/clang-tidy/ClangTidy.cpp index 7788a2d753ee..474e6733ba1a 100644 --- a/clang-tools-extra/clang-tidy/ClangTidy.cpp +++ b/clang-tools-extra/clang-tidy/ClangTidy.cpp @@ -20,7 +20,9 @@ #include "ClangTidyModuleRegistry.h" #include "ClangTidyProfiling.h" #include "ExpandModularHeadersPPCallbacks.h" +#ifndef CLANG_TIDY_CONFIG_H #include "clang-tidy-config.h" +#endif #include "utils/OptionsUtils.h" #include "clang/AST/ASTConsumer.h" #include "clang/ASTMatchers/ASTMatchFinder.h" @@ -49,17 +51,6 @@ #include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h" #endif // CLANG_TIDY_ENABLE_STATIC_ANALYZER -#if CLANG_ENABLE_CIR -#include "mlir/IR/BuiltinOps.h" -#include "mlir/IR/MLIRContext.h" -#include "mlir/Pass/Pass.h" -#include "mlir/Pass/PassManager.h" -#include "clang/AST/ASTContext.h" -#include "clang/CIR/CIRGenerator.h" -#include "clang/CIR/Dialect/Passes.h" -#include -#endif // CLANG_ENABLE_CIR - using namespace clang::ast_matchers; using namespace clang::driver; using namespace clang::tooling; @@ -104,205 +95,6 @@ class AnalyzerDiagnosticConsumer : public ento::PathDiagnosticConsumer { }; #endif // CLANG_TIDY_ENABLE_STATIC_ANALYZER -#if CLANG_ENABLE_CIR -namespace cir { - -constexpr const char *LifetimeCheckName = "cir-lifetime-check"; -struct CIROpts { - std::vector RemarksList; - std::vector HistoryList; - unsigned HistLimit; -}; - -static const char StringsDelimiter[] = ";"; - -// FIXME(cir): this function was extracted from clang::tidy::utils::options -// given that ClangTidy.cpp cannot be linked with ClangTidyUtils. -std::vector parseStringList(StringRef Option) { - Option = Option.trim().trim(StringsDelimiter); - if (Option.empty()) - return {}; - std::vector Result; - Result.reserve(Option.count(StringsDelimiter) + 1); - StringRef Cur; - while (std::tie(Cur, Option) = Option.split(StringsDelimiter), - !Option.empty()) { - Cur = Cur.trim(); - if (!Cur.empty()) - Result.push_back(Cur); - } - Cur = Cur.trim(); - if (!Cur.empty()) - Result.push_back(Cur); - return Result; -} - -class CIRASTConsumer : public ASTConsumer { -public: - CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, - clang::tidy::ClangTidyContext &Context, CIROpts &cirOpts); - -private: - void Initialize(ASTContext &Context) override; - void HandleTranslationUnit(ASTContext &C) override; - bool HandleTopLevelDecl(DeclGroupRef D) override; - std::unique_ptr<::cir::CIRGenerator> Gen; - ASTContext *AstContext{nullptr}; - clang::tidy::ClangTidyContext &Context; - CIROpts cirOpts; -}; - -/// CIR AST Consumer -CIRASTConsumer::CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, - clang::tidy::ClangTidyContext &Context, - CIROpts &O) - : Context(Context), cirOpts(O) { - Gen = std::make_unique<::cir::CIRGenerator>(CI.getDiagnostics(), nullptr, - CI.getCodeGenOpts()); -} - -bool CIRASTConsumer::HandleTopLevelDecl(DeclGroupRef D) { - PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), - AstContext->getSourceManager(), - "CIR generation of declaration"); - Gen->HandleTopLevelDecl(D); - return true; -} - -void CIRASTConsumer::Initialize(ASTContext &Context) { - AstContext = &Context; - Gen->Initialize(Context); -} - -void CIRASTConsumer::HandleTranslationUnit(ASTContext &C) { - Gen->HandleTranslationUnit(C); - Gen->verifyModule(); - - mlir::ModuleOp mlirMod = Gen->getModule(); - std::unique_ptr mlirCtx = Gen->takeContext(); - - mlir::OpPrintingFlags flags; - flags.enableDebugInfo(/*prettyForm=*/false); - - clang::SourceManager &clangSrcMgr = C.getSourceManager(); - FileID MainFileID = clangSrcMgr.getMainFileID(); - - llvm::MemoryBufferRef MainFileBuf = clangSrcMgr.getBufferOrFake(MainFileID); - std::unique_ptr FileBuf = - llvm::MemoryBuffer::getMemBuffer(MainFileBuf); - - llvm::SourceMgr llvmSrcMgr; - llvmSrcMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); - - class CIRTidyDiagnosticHandler : public mlir::SourceMgrDiagnosticHandler { - clang::tidy::ClangTidyContext &tidyCtx; - clang::SourceManager &clangSrcMgr; - - clang::SourceLocation getClangFromFileLineCol(mlir::FileLineColLoc loc) { - clang::SourceLocation clangLoc; - FileManager &fileMgr = clangSrcMgr.getFileManager(); - assert(loc && "not a valid mlir::FileLineColLoc"); - // The column and line may be zero to represent unknown column and/or - // unknown line/column information. - if (loc.getLine() == 0 || loc.getColumn() == 0) { - llvm_unreachable("How should we workaround this?"); - return clangLoc; - } - if (auto FE = fileMgr.getFile(loc.getFilename())) { - return clangSrcMgr.translateFileLineCol(*FE, loc.getLine(), - loc.getColumn()); - } - llvm_unreachable("location doesn't map to a file?"); - } - - clang::SourceLocation getClangSrcLoc(mlir::Location loc) { - // Direct maps into a clang::SourceLocation. - if (auto fileLoc = loc.dyn_cast()) { - return getClangFromFileLineCol(fileLoc); - } - - // FusedLoc needs to be decomposed but the canonical one - // is the first location, we handle source ranges somewhere - // else. - if (auto fileLoc = loc.dyn_cast()) { - auto locArray = fileLoc.getLocations(); - assert(locArray.size() > 0 && "expected multiple locs"); - return getClangFromFileLineCol( - locArray[0].dyn_cast()); - } - - // Many loc styles are yet to be handled. - if (auto fileLoc = loc.dyn_cast()) { - llvm_unreachable("mlir::UnknownLoc not implemented!"); - } - if (auto fileLoc = loc.dyn_cast()) { - llvm_unreachable("mlir::CallSiteLoc not implemented!"); - } - llvm_unreachable("Unknown location style"); - } - - clang::DiagnosticIDs::Level - translateToClangDiagLevel(const mlir::DiagnosticSeverity &sev) { - switch (sev) { - case mlir::DiagnosticSeverity::Note: - return clang::DiagnosticIDs::Level::Note; - case mlir::DiagnosticSeverity::Warning: - return clang::DiagnosticIDs::Level::Warning; - case mlir::DiagnosticSeverity::Error: - return clang::DiagnosticIDs::Level::Error; - case mlir::DiagnosticSeverity::Remark: - return clang::DiagnosticIDs::Level::Remark; - } - llvm_unreachable("should not get here!"); - } - - public: - void emitClangTidyDiagnostic(mlir::Diagnostic &diag) { - auto clangBeginLoc = getClangSrcLoc(diag.getLocation()); - tidyCtx.diag(LifetimeCheckName, clangBeginLoc, diag.str(), - translateToClangDiagLevel(diag.getSeverity())); - for (const auto ¬e : diag.getNotes()) { - auto clangNoteBeginLoc = getClangSrcLoc(note.getLocation()); - tidyCtx.diag(LifetimeCheckName, clangNoteBeginLoc, note.str(), - translateToClangDiagLevel(note.getSeverity())); - } - } - - CIRTidyDiagnosticHandler(llvm::SourceMgr &mgr, mlir::MLIRContext *ctx, - clang::tidy::ClangTidyContext &tidyContext, - clang::SourceManager &clangMgr, - ShouldShowLocFn &&shouldShowLocFn = {}) - : SourceMgrDiagnosticHandler(mgr, ctx, llvm::errs(), - std::move(shouldShowLocFn)), - tidyCtx(tidyContext), clangSrcMgr(clangMgr) { - setHandler( - [this](mlir::Diagnostic &diag) { emitClangTidyDiagnostic(diag); }); - } - ~CIRTidyDiagnosticHandler() = default; - }; - - // Use a custom diagnostic handler that can allow both regular printing to - // stderr but also populates clang-tidy context with diagnostics (and allow - // for instance, diagnostics to be later converted to YAML). - CIRTidyDiagnosticHandler sourceMgrHandler(llvmSrcMgr, mlirCtx.get(), Context, - clangSrcMgr); - - mlir::PassManager pm(mlirCtx.get()); - pm.addPass(mlir::createMergeCleanupsPass()); - - if (Context.isCheckEnabled(LifetimeCheckName)) - pm.addPass(mlir::createLifetimeCheckPass( - cirOpts.RemarksList, cirOpts.HistoryList, cirOpts.HistLimit, &C)); - - bool Result = !mlir::failed(pm.run(mlirMod)); - if (!Result) - llvm::report_fatal_error( - "The pass manager failed to run pass on the module!"); -} -} // namespace cir - -#endif - class ErrorReporter { public: ErrorReporter(ClangTidyContext &Context, FixBehaviour ApplyFixes, @@ -678,27 +470,6 @@ ClangTidyASTConsumerFactory::createASTConsumer( } #endif // CLANG_TIDY_ENABLE_STATIC_ANALYZER -#if CLANG_ENABLE_CIR - if (Context.isCheckEnabled(cir::LifetimeCheckName)) { - auto OV = ClangTidyCheck::OptionsView( - cir::LifetimeCheckName, Context.getOptions().CheckOptions, &Context); - // Setup CIR codegen options via config specified information. - Compiler.getCodeGenOpts().ClangIRBuildDeferredThreshold = - OV.get("CodeGenBuildDeferredThreshold", 500U); - Compiler.getCodeGenOpts().ClangIRSkipFunctionsFromSystemHeaders = - OV.get("CodeGenSkipFunctionsFromSystemHeaders", false); - - cir::CIROpts opts; - opts.RemarksList = cir::parseStringList(OV.get("RemarksList", "")); - opts.HistoryList = cir::parseStringList(OV.get("HistoryList", "all")); - opts.HistLimit = OV.get("HistLimit", 1U); - - std::unique_ptr CIRConsumer = - std::make_unique(Compiler, File, Context, opts); - Consumers.push_back(std::move(CIRConsumer)); - } -#endif // CLANG_ENABLE_CIR - return std::make_unique( std::move(Consumers), std::move(Profiling), std::move(Finder), std::move(Checks)); diff --git a/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h b/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h index ff42f96a0477..82fb8636c755 100644 --- a/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h +++ b/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.h @@ -20,6 +20,18 @@ #include "llvm/Support/Regex.h" #include +// Workaround unitests not needing to change unittests to require +// "clang-tidy-config.h" being generated. +#if __has_include("clang-tidy-config.h") +#ifndef CLANG_TIDY_CONFIG_H +#include "clang-tidy-config.h" +#endif +#endif + +#if CLANG_ENABLE_CIR +#include "clang/Basic/CodeGenOptions.h" +#endif + namespace clang { class ASTContext; @@ -140,6 +152,12 @@ class ClangTidyContext { /// Gets the language options from the AST context. const LangOptions &getLangOpts() const { return LangOpts; } +#if CLANG_ENABLE_CIR + /// Get and set CodeGenOpts + CodeGenOptions &getCodeGenOpts() { return CodeGenOpts; }; + void setCodeGenOpts(CodeGenOptions &CGO) { CodeGenOpts = CGO; } +#endif + /// Returns the name of the clang-tidy check which produced this /// diagnostic ID. std::string getCheckName(unsigned DiagnosticID) const; @@ -245,6 +263,10 @@ class ClangTidyContext { LangOptions LangOpts; +#if CLANG_ENABLE_CIR + CodeGenOptions CodeGenOpts; +#endif + ClangTidyStats Stats; std::string CurrentBuildDirectory; diff --git a/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h b/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h index 9926571fe989..6d3ffa743460 100644 --- a/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h +++ b/clang-tools-extra/clang-tidy/ClangTidyForceLinker.h @@ -9,7 +9,9 @@ #ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CLANGTIDYFORCELINKER_H #define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CLANGTIDYFORCELINKER_H +#ifndef CLANG_TIDY_CONFIG_H #include "clang-tidy-config.h" +#endif #include "llvm/Support/Compiler.h" namespace clang::tidy { diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp index e333e562c3cc..2a751fab2744 100644 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp +++ b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp @@ -18,7 +18,9 @@ #include "CIRASTConsumer.h" #include "ClangTidyModuleRegistry.h" #include "ClangTidyProfiling.h" +#ifndef CLANG_TIDY_CONFIG_H #include "clang-tidy-config.h" +#endif #include "clang/Frontend/CompilerInstance.h" #include "clang/Lex/PreprocessorOptions.h" #include "clang/Tooling/DiagnosticsYaml.h" diff --git a/clang-tools-extra/clang-tidy/cir/Lifetime.cpp b/clang-tools-extra/clang-tidy/cir/Lifetime.cpp index 93aec96271ee..e74b34825318 100644 --- a/clang-tools-extra/clang-tidy/cir/Lifetime.cpp +++ b/clang-tools-extra/clang-tidy/cir/Lifetime.cpp @@ -7,22 +7,191 @@ //===----------------------------------------------------------------------===// #include "Lifetime.h" +#include "../utils/OptionsUtils.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/DeclGroup.h" #include "clang/ASTMatchers/ASTMatchFinder.h" +#include "clang/CIR/CIRGenerator.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/Format/Format.h" +#include "clang/Frontend/ASTConsumers.h" #include "clang/Tooling/FixIt.h" +#include using namespace clang::ast_matchers; +using namespace clang; namespace clang::tidy::cir { +Lifetime::Lifetime(StringRef Name, ClangTidyContext *Context) + : ClangTidyCheck(Name, Context), codeGenOpts(Context->getCodeGenOpts()), + cirOpts{} { + auto OV = OptionsView(Name, Context->getOptions().CheckOptions, Context); + codeGenOpts.ClangIRBuildDeferredThreshold = + OV.get("CodeGenBuildDeferredThreshold", 500U); + codeGenOpts.ClangIRSkipFunctionsFromSystemHeaders = + OV.get("CodeGenSkipFunctionsFromSystemHeaders", false); + + cirOpts.RemarksList = + utils::options::parseStringList(OV.get("RemarksList", "")); + cirOpts.HistoryList = + utils::options::parseStringList(OV.get("HistoryList", "all")); + cirOpts.HistLimit = OV.get("HistLimit", 1U); +} + void Lifetime::registerMatchers(MatchFinder *Finder) { - // Finder->addMatcher(callExpr().bind("CE"), this); - // assert(0 && "BOOM0!"); + Finder->addMatcher(translationUnitDecl(), this); +} + +void Lifetime::setupAndRunClangIRLifetimeChecker(ASTContext &astCtx) { + auto *TU = astCtx.getTranslationUnitDecl(); + // This is the hook used to build clangir and run the lifetime checker + // pass. Perhaps in the future it's possible to come up with a better + // integration story. + + // Create an instance of CIRGenerator and use it to build CIR, followed by + // MLIR module verification. + std::unique_ptr<::cir::CIRGenerator> Gen = + std::make_unique<::cir::CIRGenerator>(astCtx.getDiagnostics(), nullptr, + codeGenOpts); + Gen->Initialize(astCtx); + Gen->HandleTopLevelDecl(DeclGroupRef(TU)); + Gen->HandleTranslationUnit(astCtx); + Gen->verifyModule(); + + mlir::ModuleOp mlirMod = Gen->getModule(); + std::unique_ptr mlirCtx = Gen->takeContext(); + + mlir::OpPrintingFlags flags; + flags.enableDebugInfo(/*prettyForm=*/false); + + clang::SourceManager &clangSrcMgr = astCtx.getSourceManager(); + FileID MainFileID = clangSrcMgr.getMainFileID(); + + // Do some big dance with diagnostics here: hijack clang's diagnostics with + // MLIR one. + llvm::MemoryBufferRef MainFileBuf = clangSrcMgr.getBufferOrFake(MainFileID); + std::unique_ptr FileBuf = + llvm::MemoryBuffer::getMemBuffer(MainFileBuf); + + llvm::SourceMgr llvmSrcMgr; + llvmSrcMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); + + class CIRTidyDiagnosticHandler : public mlir::SourceMgrDiagnosticHandler { + ClangTidyCheck &tidyCheck; + clang::SourceManager &clangSrcMgr; + + clang::SourceLocation getClangFromFileLineCol(mlir::FileLineColLoc loc) { + clang::SourceLocation clangLoc; + FileManager &fileMgr = clangSrcMgr.getFileManager(); + assert(loc && "not a valid mlir::FileLineColLoc"); + // The column and line may be zero to represent unknown column + // and/or unknown line/column information. + if (loc.getLine() == 0 || loc.getColumn() == 0) { + llvm_unreachable("How should we workaround this?"); + return clangLoc; + } + if (auto FE = fileMgr.getFile(loc.getFilename())) { + return clangSrcMgr.translateFileLineCol(*FE, loc.getLine(), + loc.getColumn()); + } + llvm_unreachable("location doesn't map to a file?"); + } + + clang::SourceLocation getClangSrcLoc(mlir::Location loc) { + // Direct maps into a clang::SourceLocation. + if (auto fileLoc = loc.dyn_cast()) { + return getClangFromFileLineCol(fileLoc); + } + + // FusedLoc needs to be decomposed but the canonical one + // is the first location, we handle source ranges somewhere + // else. + if (auto fileLoc = loc.dyn_cast()) { + auto locArray = fileLoc.getLocations(); + assert(locArray.size() > 0 && "expected multiple locs"); + return getClangFromFileLineCol( + locArray[0].dyn_cast()); + } + + // Many loc styles are yet to be handled. + if (auto fileLoc = loc.dyn_cast()) { + llvm_unreachable("mlir::UnknownLoc not implemented!"); + } + if (auto fileLoc = loc.dyn_cast()) { + llvm_unreachable("mlir::CallSiteLoc not implemented!"); + } + llvm_unreachable("Unknown location style"); + } + + clang::DiagnosticIDs::Level + translateToClangDiagLevel(const mlir::DiagnosticSeverity &sev) { + switch (sev) { + case mlir::DiagnosticSeverity::Note: + return clang::DiagnosticIDs::Level::Note; + case mlir::DiagnosticSeverity::Warning: + return clang::DiagnosticIDs::Level::Warning; + case mlir::DiagnosticSeverity::Error: + return clang::DiagnosticIDs::Level::Error; + case mlir::DiagnosticSeverity::Remark: + return clang::DiagnosticIDs::Level::Remark; + } + llvm_unreachable("should not get here!"); + } + + public: + void emitClangTidyDiagnostic(mlir::Diagnostic &diag) { + auto clangBeginLoc = getClangSrcLoc(diag.getLocation()); + tidyCheck.diag(clangBeginLoc, diag.str(), + translateToClangDiagLevel(diag.getSeverity())); + for (const auto ¬e : diag.getNotes()) { + auto clangNoteBeginLoc = getClangSrcLoc(note.getLocation()); + tidyCheck.diag(clangNoteBeginLoc, note.str(), + translateToClangDiagLevel(note.getSeverity())); + } + } + + CIRTidyDiagnosticHandler(llvm::SourceMgr &mgr, mlir::MLIRContext *ctx, + ClangTidyCheck &tidyCheck, + clang::SourceManager &clangMgr, + ShouldShowLocFn &&shouldShowLocFn = {}) + : SourceMgrDiagnosticHandler(mgr, ctx, llvm::errs(), + std::move(shouldShowLocFn)), + tidyCheck(tidyCheck), clangSrcMgr(clangMgr) { + setHandler( + [this](mlir::Diagnostic &diag) { emitClangTidyDiagnostic(diag); }); + } + ~CIRTidyDiagnosticHandler() = default; + }; + + // Use a custom diagnostic handler that can allow both regular printing + // to stderr but also populates clang-tidy context with diagnostics (and + // allow for instance, diagnostics to be later converted to YAML). + CIRTidyDiagnosticHandler sourceMgrHandler(llvmSrcMgr, mlirCtx.get(), *this, + clangSrcMgr); + + mlir::PassManager pm(mlirCtx.get()); + + // Add pre-requisite passes to the pipeline + pm.addPass(mlir::createMergeCleanupsPass()); + + // Insert the lifetime checker. + pm.addPass(mlir::createLifetimeCheckPass( + cirOpts.RemarksList, cirOpts.HistoryList, cirOpts.HistLimit, &astCtx)); + + bool passResult = !mlir::failed(pm.run(mlirMod)); + if (!passResult) + llvm::report_fatal_error( + "The pass manager failed to run pass on the module!"); } void Lifetime::check(const MatchFinder::MatchResult &Result) { - // assert(0 && "BOOM1!"); + setupAndRunClangIRLifetimeChecker(*Result.Context); } -void Lifetime::onEndOfTranslationUnit() { assert(0 && "BOOM2!"); } } // namespace clang::tidy::cir diff --git a/clang-tools-extra/clang-tidy/cir/Lifetime.h b/clang-tools-extra/clang-tidy/cir/Lifetime.h index 684f1e09f698..fb65bbf5be80 100644 --- a/clang-tools-extra/clang-tidy/cir/Lifetime.h +++ b/clang-tools-extra/clang-tidy/cir/Lifetime.h @@ -14,13 +14,20 @@ namespace clang::tidy::cir { +struct CIROpts { + std::vector RemarksList; + std::vector HistoryList; + unsigned HistLimit; +}; class Lifetime : public ClangTidyCheck { public: - Lifetime(StringRef Name, ClangTidyContext *Context) - : ClangTidyCheck(Name, Context) {} + Lifetime(StringRef Name, ClangTidyContext *Context); void registerMatchers(ast_matchers::MatchFinder *Finder) override; void check(const ast_matchers::MatchFinder::MatchResult &Result) override; - void onEndOfTranslationUnit() override; + void setupAndRunClangIRLifetimeChecker(ASTContext &astCtx); + + CodeGenOptions codeGenOpts; + CIROpts cirOpts; }; } // namespace clang::tidy::cir diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 07ddff989f91..22f27dc06826 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1212,6 +1212,16 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { << decl->getDeclKindName() << "' not implemented\n"; assert(false && "Not yet implemented"); + case Decl::TranslationUnit: { + // This path is CIR only - CIRGen handles TUDecls because + // of clang-tidy checks, that operate on TU granularity. + TranslationUnitDecl *TU = cast(decl); + for (DeclContext::decl_iterator D = TU->decls_begin(), + DEnd = TU->decls_end(); + D != DEnd; ++D) + buildTopLevelDecl(*D); + return; + } case Decl::Var: case Decl::Decomposition: case Decl::VarTemplateSpecialization: From 4b639f362be5fef3b63acc21f833b31473206046 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 24 Aug 2023 17:33:14 -0700 Subject: [PATCH 1169/2301] [CIR][ClangTidy] Remove CIR/MLIR dep from main cmakefile --- clang-tools-extra/clang-tidy/CMakeLists.txt | 88 ++++----------------- 1 file changed, 15 insertions(+), 73 deletions(-) diff --git a/clang-tools-extra/clang-tidy/CMakeLists.txt b/clang-tools-extra/clang-tidy/CMakeLists.txt index 02bec99c65ae..a01ce8f560ac 100644 --- a/clang-tools-extra/clang-tidy/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/CMakeLists.txt @@ -3,85 +3,27 @@ set(LLVM_LINK_COMPONENTS Support ) -if(CLANG_ENABLE_CIR) - include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/.. ) - include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) - include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) - - get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) -endif() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/clang-tidy-config.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/clang-tidy-config.h) include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) -if(CLANG_ENABLE_CIR) - add_clang_library(clangTidy - ClangTidy.cpp - ClangTidyCheck.cpp - ClangTidyModule.cpp - ClangTidyDiagnosticConsumer.cpp - ClangTidyOptions.cpp - ClangTidyProfiling.cpp - ExpandModularHeadersPPCallbacks.cpp - GlobList.cpp - NoLintDirectiveHandler.cpp - - DEPENDS - MLIRBuiltinLocationAttributesIncGen - MLIRCIROpsIncGen - MLIRCIREnumsGen - MLIRSymbolInterfacesIncGen - ClangSACheckers - omp_gen - ClangDriverOptions +add_clang_library(clangTidy + ClangTidy.cpp + ClangTidyCheck.cpp + ClangTidyModule.cpp + ClangTidyDiagnosticConsumer.cpp + ClangTidyOptions.cpp + ClangTidyProfiling.cpp + ExpandModularHeadersPPCallbacks.cpp + GlobList.cpp + NoLintDirectiveHandler.cpp - LINK_LIBS - clangCIR - ${dialect_libs} - MLIRCIR - MLIRCIRTransforms - MLIRAffineToStandard - MLIRAnalysis - MLIRIR - MLIRLLVMCommonConversion - MLIRLLVMDialect - MLIRLLVMToLLVMIRTranslation - MLIRMemRefDialect - MLIRMemRefToLLVM - MLIRParser - MLIRPass - MLIRSideEffectInterfaces - MLIRSCFToControlFlow - MLIRFuncToLLVM - MLIRSupport - MLIRMemRefDialect - MLIRTargetLLVMIRExport - MLIRTransforms - - DEPENDS - ClangSACheckers - omp_gen - ClangDriverOptions - ) -else() - add_clang_library(clangTidy - ClangTidy.cpp - ClangTidyCheck.cpp - ClangTidyModule.cpp - ClangTidyDiagnosticConsumer.cpp - ClangTidyOptions.cpp - ClangTidyProfiling.cpp - ExpandModularHeadersPPCallbacks.cpp - GlobList.cpp - NoLintDirectiveHandler.cpp - - DEPENDS - ClangSACheckers - omp_gen - ) -endif() + DEPENDS + ClangSACheckers + omp_gen + ClangDriverOptions +) clang_target_link_libraries(clangTidy PRIVATE From 2c1bbd44e7258c37cb785735e03eaa815f6ac917 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 24 Aug 2023 20:11:36 -0700 Subject: [PATCH 1170/2301] [CIR][ClangTidy] Add dep to clangTidyUtils --- clang-tools-extra/clang-tidy/cir/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/clang-tools-extra/clang-tidy/cir/CMakeLists.txt b/clang-tools-extra/clang-tidy/cir/CMakeLists.txt index 5c40efc09a12..0b892f332790 100644 --- a/clang-tools-extra/clang-tidy/cir/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/cir/CMakeLists.txt @@ -19,6 +19,7 @@ add_clang_library(clangTidyCIRModule clangFrontend clangSerialization clangTidy + clangTidyUtils ${dialect_libs} MLIRCIR MLIRCIRTransforms From cfd30e6d67ea9718dc1fdb215170eebfaf1275b1 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 28 Aug 2023 20:45:13 -0300 Subject: [PATCH 1171/2301] [CIR][CIRGen][NFC] Refactor struct type building Updates every struct type creation to use the CIRGenBuilder API. ghstack-source-id: b868f8206efdc61e791d821f4d4c7297865b0457 Pull Request resolved: https://github.com/llvm/clangir/pull/225 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 28 ++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 6 ++-- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 2 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 18 ++++-------- clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp | 19 ++----------- 5 files changed, 34 insertions(+), 39 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index d8477acc532e..a3556429687f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -27,10 +27,12 @@ #include "mlir/IR/Location.h" #include "mlir/IR/Types.h" #include "llvm/ADT/APSInt.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FloatingPointMode.h" #include "llvm/ADT/StringMap.h" #include "llvm/Support/ErrorHandling.h" #include +#include #include namespace cir { @@ -185,11 +187,10 @@ class CIRGenBuilderTy : public mlir::OpBuilder { assert(ta && "expected typed attribute member"); members.push_back(ta.getType()); } - auto *ctx = arrayAttr.getContext(); + if (!ty) - ty = mlir::cir::StructType::get(ctx, members, mlir::StringAttr::get(ctx), - /*body=*/true, packed, - /*ast=*/std::nullopt); + ty = getAnonStructTy(members, /*body=*/true, packed); + auto sTy = ty.dyn_cast(); assert(sTy && "expected struct type"); return mlir::cir::ConstStructAttr::get(sTy, arrayAttr); @@ -376,6 +377,25 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return typeCache.VoidPtrTy; } + /// Get a CIR anonymous struct type. + mlir::cir::StructType + getAnonStructTy(llvm::ArrayRef members, bool body, + bool packed = false, const clang::RecordDecl *ast = nullptr) { + return getStructTy(members, "", body, packed, ast); + } + + /// Get a CIR named struct type. + mlir::cir::StructType getStructTy(llvm::ArrayRef members, + llvm::StringRef name, bool body, + bool packed, const clang::RecordDecl *ast) { + const auto nameAttr = getStringAttr(name); + std::optional astAttr = std::nullopt; + if (ast) + astAttr = getAttr(ast); + return mlir::cir::StructType::get(getContext(), members, nameAttr, body, + packed, astAttr); + } + // // Constant creation helpers // ------------------------- diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 80a267a9652b..bd81e498dae3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -169,10 +169,8 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { // Handle forward decl / incomplete types. if (!entry) { auto name = getRecordTypeName(RD, ""); - auto identifier = mlir::StringAttr::get(&getMLIRContext(), name); - entry = mlir::cir::StructType::get( - &getMLIRContext(), {}, identifier, /*body=*/false, /**packed=*/false, - mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), RD)); + entry = Builder.getStructTy({}, name, /*body=*/false, + /*packed=*/false, RD); recordDeclTypes[key] = entry; } diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 012ab673facc..3ee14ccc014c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -58,7 +58,7 @@ mlir::Type CIRGenVTables::getVTableType(const VTableLayout &layout) { // FIXME(cir): should VTableLayout be encoded like we do for some // AST nodes? - return mlir::cir::StructType::get(ctx, tys, "", /*body=*/true); + return CGM.getBuilder().getAnonStructTy(tys, /*body=*/true); } /// At this point in the translation unit, does it appear that can we diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index d6b0d840853f..c70cf78810cf 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -571,9 +571,6 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, builder.lower(/*nonVirtualBaseType=*/false); - auto name = getRecordTypeName(D, ""); - auto identifier = mlir::StringAttr::get(&getMLIRContext(), name); - // If we're in C++, compute the base subobject type. mlir::cir::StructType *BaseTy = nullptr; if (llvm::isa(D) && !D->isUnion() && @@ -582,12 +579,9 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, if (builder.astRecordLayout.getNonVirtualSize() != builder.astRecordLayout.getSize()) { CIRRecordLowering baseBuilder(*this, D, /*Packed=*/builder.isPacked); - auto baseIdentifier = - mlir::StringAttr::get(&getMLIRContext(), name + ".base"); - *BaseTy = mlir::cir::StructType::get( - &getMLIRContext(), baseBuilder.fieldTypes, baseIdentifier, - /*body=*/true, /**packed=*/false, - mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); + auto baseIdentifier = getRecordTypeName(D, ".base"); + *BaseTy = Builder.getStructTy(baseBuilder.fieldTypes, baseIdentifier, + /*body=*/true, /*packed=*/false, D); // TODO(cir): add something like addRecordTypeName // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work @@ -600,10 +594,8 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, // Fill in the struct *after* computing the base type. Filling in the body // signifies that the type is no longer opaque and record layout is complete, // but we may need to recursively layout D while laying D out as a base type. - *Ty = mlir::cir::StructType::get( - &getMLIRContext(), builder.fieldTypes, identifier, - /*body=*/true, /**packed=*/false, - mlir::cir::ASTRecordDeclAttr::get(&getMLIRContext(), D)); + *Ty = Builder.getStructTy(builder.fieldTypes, getRecordTypeName(D, ""), + /*body=*/true, /*packed=*/false, D); auto RL = std::make_unique( Ty ? *Ty : mlir::cir::StructType{}, diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp index aaba1230f6ef..89852f29e648 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -277,19 +277,6 @@ static mlir::cir::ConstArrayAttr getConstArray(mlir::Attribute attrs, mlir::cir::ArrayType arrayTy) { return mlir::cir::ConstArrayAttr::get(arrayTy, attrs); } -static mlir::Attribute getAnonConstStruct(mlir::ArrayAttr arrayAttr, - bool packed = false) { - assert(!packed && "NYI"); - llvm::SmallVector members; - for (auto &f : arrayAttr) { - auto ta = f.dyn_cast(); - assert(ta && "expected typed attribute member"); - members.push_back(ta.getType()); - } - auto sTy = mlir::cir::StructType::get(arrayAttr.getContext(), members, "", - /*body=*/true); - return mlir::cir::ConstStructAttr::get(sTy, arrayAttr); -} mlir::Attribute ConstantAggregateBuilderBase::finishArray(mlir::Type eltTy) { markFinished(); @@ -323,8 +310,6 @@ ConstantAggregateBuilderBase::finishStruct(mlir::MLIRContext *ctx, if (ty == nullptr && elts.empty()) { llvm_unreachable("NYI"); - // ty = mlir::cir::StructType::get(Builder.CGM.getLLVMContext(), {}, - // Packed); } mlir::Attribute constant; @@ -333,8 +318,8 @@ ConstantAggregateBuilderBase::finishStruct(mlir::MLIRContext *ctx, // assert(ty->isPacked() == Packed); // constant = llvm::ConstantStruct::get(ty, elts); } else { - assert(!Packed && "NYI"); - constant = getAnonConstStruct(mlir::ArrayAttr::get(ctx, elts), Packed); + const auto members = mlir::ArrayAttr::get(ctx, elts); + constant = Builder.CGM.getBuilder().getAnonConstStruct(members, Packed); } buffer.erase(buffer.begin() + Begin, buffer.end()); From 60f5ff2b58a50c7b422e95b731600843e1530cf6 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 28 Aug 2023 20:45:13 -0300 Subject: [PATCH 1172/2301] [CIR] Refactor StructType printing/parsing This change simplifies the StructType printing/parsing methods. It also updates the representations to group members between braces and to remove commas between type attributes (e.g. id, packed, etc.). ghstack-source-id: 094d5064f827efe8963855ab8ffd04f4ad1cfb2b Pull Request resolved: https://github.com/llvm/clangir/pull/226 --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 12 -- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 22 ++-- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 104 +++++++++--------- clang/test/CIR/CodeGen/agg-init.cpp | 4 +- clang/test/CIR/CodeGen/atomic.cpp | 2 +- clang/test/CIR/CodeGen/bitfields.cpp | 4 +- clang/test/CIR/CodeGen/coro-task.cpp | 14 +-- clang/test/CIR/CodeGen/ctor.cpp | 2 +- clang/test/CIR/CodeGen/derived-to-base.cpp | 4 +- clang/test/CIR/CodeGen/dtors.cpp | 4 +- clang/test/CIR/CodeGen/lambda.cpp | 2 +- clang/test/CIR/CodeGen/move.cpp | 2 +- clang/test/CIR/CodeGen/nrvo.cpp | 2 +- clang/test/CIR/CodeGen/rangefor.cpp | 6 +- clang/test/CIR/CodeGen/struct.c | 4 +- clang/test/CIR/CodeGen/struct.cpp | 12 +- clang/test/CIR/CodeGen/union.cpp | 10 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 8 +- clang/test/CIR/IR/global.cir | 7 +- clang/test/CIR/IR/invalid.cir | 2 +- clang/test/CIR/IR/struct.cir | 16 +-- clang/test/CIR/IR/vtableAttr.cir | 2 +- clang/test/CIR/Lowering/array.cir | 2 +- clang/test/CIR/Lowering/globals.cir | 4 +- clang/test/CIR/Lowering/struct.cir | 10 +- clang/test/CIR/Lowering/variadics.cir | 2 +- 26 files changed, 127 insertions(+), 136 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index b84c9222be41..178d944f1df7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -113,18 +113,6 @@ def CIR_StructType : CIR_Type<"Struct", "struct", "std::optional<::mlir::cir::ASTRecordDeclAttr>":$ast ); - let builders = [ - TypeBuilder<(ins - "ArrayRef":$members, "StringRef":$typeName, - "bool":$body - ), [{ - auto id = mlir::StringAttr::get(context, typeName); - auto sTy = StructType::get(context, members, id, body, - /*packed=*/false, std::nullopt); - return sTy; - }]> - ]; - let hasCustomAssemblyFormat = 1; let extraClassDeclaration = [{ diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index b4a16bc9bf6e..6daba84b9c30 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -14,6 +14,7 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" @@ -2096,9 +2097,10 @@ void SignedOverflowBehaviorAttr::print(::mlir::AsmPrinter &printer) const { ::mlir::Attribute ASTFunctionDeclAttr::parse(::mlir::AsmParser &parser, ::mlir::Type type) { - // We cannot really parse anything AST related at this point - // since we have no serialization/JSON story. - return ASTFunctionDeclAttr::get(parser.getContext(), nullptr); + // We cannot really parse anything AST related at this point since we have no + // serialization/JSON story. Even if the attr is parsed, it just holds nullptr + // instead of the AST node. + return get(parser.getContext(), nullptr); } void ASTFunctionDeclAttr::print(::mlir::AsmPrinter &printer) const { @@ -2113,9 +2115,10 @@ LogicalResult ASTFunctionDeclAttr::verify( ::mlir::Attribute ASTVarDeclAttr::parse(::mlir::AsmParser &parser, ::mlir::Type type) { - // We cannot really parse anything AST related at this point - // since we have no serialization/JSON story. - return ASTVarDeclAttr::get(parser.getContext(), nullptr); + // We cannot really parse anything AST related at this point since we have no + // serialization/JSON story. Even if the attr is parsed, it just holds nullptr + // instead of the AST node. + return get(parser.getContext(), nullptr); } void ASTVarDeclAttr::print(::mlir::AsmPrinter &printer) const { @@ -2130,9 +2133,10 @@ LogicalResult ASTVarDeclAttr::verify( ::mlir::Attribute ASTRecordDeclAttr::parse(::mlir::AsmParser &parser, ::mlir::Type type) { - // We cannot really parse anything AST related at this point - // since we have no serialization/JSON story. - return ASTRecordDeclAttr::get(parser.getContext(), nullptr); + // We cannot really parse anything AST related at this point since we have no + // serialization/JSON story. Even if the attr is parsed, it just holds nullptr + // instead of the AST node. + return get(parser.getContext(), nullptr); } void ASTRecordDeclAttr::print(::mlir::AsmPrinter &printer) const { diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 30ce9fc2151f..cff0a3a63a4b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -11,9 +11,11 @@ //===----------------------------------------------------------------------===// #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" #include "mlir/Support/LogicalResult.h" @@ -22,6 +24,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" +#include //===----------------------------------------------------------------------===// // CIR Custom Parser/Printer Signatures @@ -87,72 +90,69 @@ Type BoolType::parse(mlir::AsmParser &parser) { void BoolType::print(mlir::AsmPrinter &printer) const {} +//===----------------------------------------------------------------------===// +// StructType Definitions +//===----------------------------------------------------------------------===// + Type StructType::parse(mlir::AsmParser &parser) { + llvm::SmallVector members; + mlir::StringAttr id; + bool body = false; + bool packed = false; + mlir::cir::ASTRecordDeclAttr ast = nullptr; + if (parser.parseLess()) - return Type(); - std::string typeName; - if (parser.parseString(&typeName)) - return Type(); + return {}; - llvm::SmallVector members; - bool parsedBody = false; - - auto parseASTAttribute = [&](Attribute &attr) { - auto optAttr = parser.parseOptionalAttribute(attr); - if (optAttr.has_value()) { - if (failed(*optAttr)) - return false; - if (attr.isa() || attr.isa() || - attr.isa()) - return true; - parser.emitError(parser.getCurrentLocation(), - "Unknown cir.struct attribute"); - return false; - } - return false; - }; - - while (mlir::succeeded(parser.parseOptionalComma())) { - if (mlir::succeeded(parser.parseOptionalKeyword("incomplete"))) - continue; - - parsedBody = true; - Type nextMember; - auto optTy = parser.parseOptionalType(nextMember); - if (optTy.has_value()) { - if (failed(*optTy)) - return Type(); - members.push_back(nextMember); - continue; - } + if (parser.parseAttribute(id)) + return {}; - // Maybe it's an AST attribute: always last member, break. - Attribute astAttr; - if (parseASTAttribute(astAttr)) - break; + if (parser.parseOptionalKeyword("packed").succeeded()) + packed = true; + + if (parser.parseOptionalKeyword("incomplete").failed()) { + body = true; + const auto delim = AsmParser::Delimiter::Braces; + auto result = parser.parseCommaSeparatedList(delim, [&]() -> ParseResult { + mlir::Type ty; + if (parser.parseType(ty)) + return mlir::failure(); + members.push_back(ty); + return mlir::success(); + }); + + if (result.failed()) + return {}; } + parser.parseOptionalAttribute(ast); + if (parser.parseGreater()) - return Type(); - auto sTy = get(parser.getContext(), members, typeName, parsedBody); - return sTy; + return {}; + + return StructType::get(parser.getContext(), members, id, body, packed, + std::nullopt); } void StructType::print(mlir::AsmPrinter &printer) const { - printer << '<' << getTypeName(); + printer << '<' << getTypeName() << " "; + + if (getPacked()) + printer << "packed "; + if (!getBody()) { - printer << ", incomplete"; + printer << "incomplete"; } else { - auto members = getMembers(); - if (!members.empty()) { - printer << ", "; - llvm::interleaveComma(getMembers(), printer); - } + printer << "{"; + llvm::interleaveComma(getMembers(), printer); + printer << "}"; } - if (getAst()) { - printer << ", "; - printer.printAttributeWithoutType(*getAst()); + + if (getAst().has_value()) { + printer << " "; + printer.printAttribute(getAst().value()); } + printer << '>'; } diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index 486bced2fc93..eee6c0f07081 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -1,8 +1,8 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// CHECK: !ty_22struct2EZero22 = !cir.struct<"struct.Zero", !u8i> -// CHECK: !ty_22struct2Eyep_22 = !cir.struct<"struct.yep_", !u32i, !u32i> +// CHECK: !ty_22struct2EZero22 = !cir.struct<"struct.Zero" {!u8i}> +// CHECK: !ty_22struct2Eyep_22 = !cir.struct<"struct.yep_" {!u32i, !u32i}> struct Zero { void yolo(); diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 9021cf076bdd..58a6a29d5768 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -7,4 +7,4 @@ typedef struct _a { void m() { at y; } -// CHECK: !ty_22struct2E_a22 = !cir.struct<"struct._a", !s32i> \ No newline at end of file +// CHECK: !ty_22struct2E_a22 = !cir.struct<"struct._a" {!s32i}> \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 452208bc63da..a9a48314742d 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -14,5 +14,5 @@ void m() { __long l; } -// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", !u32i, #cir.recdecl.ast> -// CHECK: !ty_22struct2E__long22 = !cir.struct<"struct.__long", !ty_22struct2Eanon22, !u32i, !cir.ptr> \ No newline at end of file +// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon" {!u32i} #cir.recdecl.ast> +// CHECK: !ty_22struct2E__long22 = !cir.struct<"struct.__long" {!ty_22struct2Eanon22, !u32i, !cir.ptr}> \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 3b2c720a13c4..ca496c74dd07 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -126,13 +126,13 @@ co_invoke_fn co_invoke; }} // namespace folly::coro -// CHECK: ![[VoidTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task", !u8i> -// CHECK: ![[IntTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task", !u8i> -// CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct<"struct.folly::coro::Task::promise_type", !u8i> -// CHECK: ![[CoroHandleVoid:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", !u8i> -// CHECK: ![[CoroHandlePromise:ty_.*]] = !cir.struct<"struct.std::coroutine_handle", !u8i> -// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string", !u8i -// CHECK: ![[SuspendAlways:ty_.*]] = !cir.struct<"struct.std::suspend_always", !u8i> +// CHECK: ![[VoidTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task" {!u8i}> +// CHECK: ![[IntTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task" {!u8i}> +// CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct<"struct.folly::coro::Task::promise_type" {!u8i}> +// CHECK: ![[CoroHandleVoid:ty_.*]] = !cir.struct<"struct.std::coroutine_handle" {!u8i}> +// CHECK: ![[CoroHandlePromise:ty_.*]] = !cir.struct<"struct.std::coroutine_handle" {!u8i}> +// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string" {!u8i} +// CHECK: ![[SuspendAlways:ty_.*]] = !cir.struct<"struct.std::suspend_always" {!u8i}> // CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22struct2Efolly3A3Acoro3A3Aco_invoke_fn22 diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index c81075dfa880..3e1c58395a14 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -11,7 +11,7 @@ void baz() { Struk s; } -// CHECK: !ty_22struct2EStruk22 = !cir.struct<"struct.Struk", !s32i> +// CHECK: !ty_22struct2EStruk22 = !cir.struct<"struct.Struk" {!s32i}> // CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 4fdb269afbf4..ade5e97a7501 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -75,8 +75,8 @@ void C3::Layer::Initialize() { } } -// CHECK: !ty_22class2EC23A3ALayer22 = !cir.struct<"class.C2::Layer", !ty_22class2EC13A3ALayer22, !cir.ptr -// CHECK: !ty_22struct2EC33A3ALayer22 = !cir.struct<"struct.C3::Layer", !ty_22class2EC23A3ALayer22 +// CHECK: !ty_22class2EC23A3ALayer22 = !cir.struct<"class.C2::Layer" {!ty_22class2EC13A3ALayer22, !cir.ptr +// CHECK: !ty_22struct2EC33A3ALayer22 = !cir.struct<"struct.C3::Layer" {!ty_22class2EC23A3ALayer22 // CHECK: cir.func @_ZN2C35Layer10InitializeEv diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index df60781b5eff..45e5174cd96d 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -38,10 +38,10 @@ class B : public A }; // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr>>, #cir.recdecl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A" {!cir.ptr>>} #cir.recdecl.ast> // Class B -// CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> +// CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B" {![[ClassA]]}> // CHECK: cir.func @_Z4bluev() // CHECK: %0 = cir.alloca !ty_22class2EPSEvent22, cir.ptr , ["p", init] {alignment = 8 : i64} diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index ef3bf6022a33..6118616d2b20 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -6,7 +6,7 @@ void fn() { a(); } -// CHECK: !ty_22class2Eanon22 = !cir.struct<"class.anon", !u8i> +// CHECK: !ty_22class2Eanon22 = !cir.struct<"class.anon" {!u8i}> // CHECK-DAG: module // CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv diff --git a/clang/test/CIR/CodeGen/move.cpp b/clang/test/CIR/CodeGen/move.cpp index 0c624c94306a..c661a0a89f37 100644 --- a/clang/test/CIR/CodeGen/move.cpp +++ b/clang/test/CIR/CodeGen/move.cpp @@ -16,7 +16,7 @@ struct string { } // std namespace -// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string", !u8i> +// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string" {!u8i}> std::string getstr(); void emplace(std::string &&s); diff --git a/clang/test/CIR/CodeGen/nrvo.cpp b/clang/test/CIR/CodeGen/nrvo.cpp index 4d8ce62fcb36..91f803fd110b 100644 --- a/clang/test/CIR/CodeGen/nrvo.cpp +++ b/clang/test/CIR/CodeGen/nrvo.cpp @@ -9,7 +9,7 @@ std::vector test_nrvo() { return result; } -// CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector", !cir.ptr>, !cir.ptr>, !cir.ptr>> +// CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector" {!cir.ptr>, !cir.ptr>, !cir.ptr>}> // CHECK: cir.func @_Z9test_nrvov() -> !ty_22class2Estd3A3Avector22 // CHECK: %0 = cir.alloca !ty_22class2Estd3A3Avector22, cir.ptr , ["__retval", init] {alignment = 8 : i64} diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 05d310efc515..61be75a1b8e6 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -21,9 +21,9 @@ void init(unsigned numImages) { } } -// CHECK: !ty_22struct2Etriple22 = !cir.struct<"struct.triple", !u32i, !cir.ptr, !u32i> -// CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector", !cir.ptr, !cir.ptr, !cir.ptr> -// CHECK: !ty_22struct2E__vector_iterator22 = !cir.struct<"struct.__vector_iterator", !cir.ptr> +// CHECK: !ty_22struct2Etriple22 = !cir.struct<"struct.triple" {!u32i, !cir.ptr, !u32i}> +// CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector" {!cir.ptr, !cir.ptr, !cir.ptr}> +// CHECK: !ty_22struct2E__vector_iterator22 = !cir.struct<"struct.__vector_iterator" {!cir.ptr}> // CHECK: cir.func @_Z4initj(%arg0: !u32i // CHECK: %0 = cir.alloca !u32i, cir.ptr , ["numImages", init] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 5f269b1835c3..67097d25326f 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -17,8 +17,8 @@ void baz(void) { struct Foo f; } -// CHECK-DAG: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", !s32i, !s8i> -// CHECK-DAG: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", !s32i, !s8i, !ty_22struct2EBar22> +// CHECK-DAG: !ty_22struct2EBar22 = !cir.struct<"struct.Bar" {!s32i, !s8i}> +// CHECK-DAG: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo" {!s32i, !s8i, !ty_22struct2EBar22}> // CHECK-DAG: module {{.*}} { // CHECK: cir.func @baz() // CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 166dff0d812d..4efe026e0e5b 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -26,13 +26,13 @@ void baz() { struct incomplete; void yoyo(incomplete *i) {} -// CHECK: !ty_22struct2Eincomplete22 = !cir.struct<"struct.incomplete", incomplete -// CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar", !s32i, !s8i> +// CHECK: !ty_22struct2Eincomplete22 = !cir.struct<"struct.incomplete" incomplete +// CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar" {!s32i, !s8i}> -// CHECK: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo", !s32i, !s8i, !ty_22struct2EBar22> -// CHECK: !ty_22struct2EMandalore22 = !cir.struct<"struct.Mandalore", !u32i, !cir.ptr, !s32i, #cir.recdecl.ast> -// CHECK: !ty_22class2EAdv22 = !cir.struct<"class.Adv", !ty_22struct2EMandalore22> -// CHECK: !ty_22struct2EEntry22 = !cir.struct<"struct.Entry", !cir.ptr, !cir.ptr)>>> +// CHECK: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo" {!s32i, !s8i, !ty_22struct2EBar22}> +// CHECK: !ty_22struct2EMandalore22 = !cir.struct<"struct.Mandalore" {!u32i, !cir.ptr, !s32i} #cir.recdecl.ast> +// CHECK: !ty_22class2EAdv22 = !cir.struct<"class.Adv" {!ty_22struct2EMandalore22}> +// CHECK: !ty_22struct2EEntry22 = !cir.struct<"struct.Entry" {!cir.ptr, !cir.ptr)>>}> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index a31f88910845..6a5a9d5a0fd3 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -12,12 +12,12 @@ void m() { yolm3 q3; } -// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", !cir.bool, !s32i, #cir.recdecl.ast> -// CHECK: !ty_22struct2Eyolo22 = !cir.struct<"struct.yolo", !s32i, #cir.recdecl.ast> -// CHECK: !ty_22struct2Eanon221 = !cir.struct<"struct.anon", !cir.ptr, !s32i, #cir.recdecl.ast> +// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon" {!cir.bool, !s32i} #cir.recdecl.ast> +// CHECK: !ty_22struct2Eyolo22 = !cir.struct<"struct.yolo" {!s32i} #cir.recdecl.ast> +// CHECK: !ty_22struct2Eanon221 = !cir.struct<"struct.anon" {!cir.ptr, !s32i} #cir.recdecl.ast> -// CHECK: !ty_22union2Eyolm22 = !cir.struct<"union.yolm", !ty_22struct2Eyolo22> -// CHECK: !ty_22union2Eyolm222 = !cir.struct<"union.yolm2", !ty_22struct2Eanon221> +// CHECK: !ty_22union2Eyolm22 = !cir.struct<"union.yolm" {!ty_22struct2Eyolo22}> +// CHECK: !ty_22union2Eyolm222 = !cir.struct<"union.yolm2" {!ty_22struct2Eanon221}> // CHECK: cir.func @_Z1mv() // CHECK: cir.alloca !ty_22union2Eyolm22, cir.ptr , ["q"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 6978e561a9f9..5df467f78fb1 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -19,16 +19,16 @@ class B : public A }; // Type info B. -// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> +// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct<"" {!cir.ptr, !cir.ptr, !cir.ptr}> // vtable for A type -// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct<"", !cir.array x 5>> +// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct<"" {!cir.array x 5>}> // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A", !cir.ptr>>, #cir.recdecl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A" {!cir.ptr>>} #cir.recdecl.ast> // Class B -// CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B", ![[ClassA]]> +// CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B" {![[ClassA]]}> // B ctor => @B::B() // Calls @A::A() and initialize __vptr with address of B's vtable. diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index d572cb21c29b..d860f2632d25 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -3,12 +3,12 @@ !s8i = !cir.int !s32i = !cir.int !s64i = !cir.int -!ty_22class2EInit22 = !cir.struct<"class.Init", !s8i, #cir.recdecl.ast> +!ty_22class2EInit22 = !cir.struct<"class.Init" {!s8i} #cir.recdecl.ast> module { cir.global external @a = #cir.int<3> : !s32i cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i] : !cir.array> cir.global external @b = #cir.const_array<"example\00" : !cir.array> - cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.null : !cir.ptr}> : !cir.struct<"", !s8i, !s64i, !cir.ptr> + cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.null : !cir.ptr}> : !cir.struct<"" {!s8i, !s64i, !cir.ptr}> cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} cir.global "private" internal @c : !s32i cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} @@ -31,8 +31,7 @@ module { #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, #cir.global_view<@type_info_name_B> : !cir.ptr, #cir.global_view<@type_info_A> : !cir.ptr}> - : !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr - > + : !cir.struct<"" {!cir.ptr, !cir.ptr, !cir.ptr}> cir.func private @_ZN4InitC1Eb(!cir.ptr, !s8i) cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22class2EInit22 { %0 = cir.get_global @_ZL8__ioinit : cir.ptr diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index f798499ad118..fc8d80128375 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -309,7 +309,7 @@ module { cir.global external @type_info_B = #cir.typeinfo<{ // expected-error {{element at index 0 has type '!cir.ptr>' but return type for this element is '!cir.ptr>'}} #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr}> - : !cir.struct<"", !cir.ptr> + : !cir.struct<"" {!cir.ptr}> } // expected-error {{'cir.global' expected constant attribute to match type}} // ----- diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index e82dc92ce431..4f98102a3bad 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -5,17 +5,17 @@ !s32i = !cir.int !u32i = !cir.int -!ty_2222 = !cir.struct<"", !cir.array x 5>> -!ty_22221 = !cir.struct<"", !cir.ptr, !cir.ptr, !cir.ptr> -!ty_22class2EA22 = !cir.struct<"class.A", incomplete, #cir.recdecl.ast> -// CHECK: !ty_22i22 = !cir.struct<"i", incomplete> -// CHECK: !ty_22S22 = !cir.struct<"S", !u8i, !u16i, !u32i> -!ty_22struct2ES22 = !cir.struct<"struct.S", !s32i, !s32i> +!ty_2222 = !cir.struct<"" {!cir.array x 5>}> +!ty_22221 = !cir.struct<"" {!cir.ptr, !cir.ptr, !cir.ptr}> +!ty_22class2EA22 = !cir.struct<"class.A" incomplete #cir.recdecl.ast> +// CHECK: !ty_22i22 = !cir.struct<"i" incomplete> +// CHECK: !ty_22S22 = !cir.struct<"S" {!u8i, !u16i, !u32i}> +!ty_22struct2ES22 = !cir.struct<"struct.S" {!s32i, !s32i}> module { cir.func @structs() { - %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] - %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["i", init] + %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] + %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["i", init] cir.return } diff --git a/clang/test/CIR/IR/vtableAttr.cir b/clang/test/CIR/IR/vtableAttr.cir index 5c9f414feb98..596644d2cfc7 100644 --- a/clang/test/CIR/IR/vtableAttr.cir +++ b/clang/test/CIR/IR/vtableAttr.cir @@ -1,7 +1,7 @@ // RUN: cir-opt %s | FileCheck %s !u8i = !cir.int -!ty_2222 = !cir.struct<"", !cir.array x 1>> +!ty_2222 = !cir.struct<"" {!cir.array x 1>}> module { // Should parse VTable attribute. cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.null : !cir.ptr]> : !cir.array x 1>}> : !ty_2222 diff --git a/clang/test/CIR/Lowering/array.cir b/clang/test/CIR/Lowering/array.cir index 652994aa686b..3b85567acd91 100644 --- a/clang/test/CIR/Lowering/array.cir +++ b/clang/test/CIR/Lowering/array.cir @@ -2,7 +2,7 @@ // RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM !s32i = !cir.int -!ty_22struct2ES22 = !cir.struct<"struct.S", !s32i, #cir.recdecl.ast> +!ty_22struct2ES22 = !cir.struct<"struct.S" {!s32i} #cir.recdecl.ast> module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 43ca8c3fb030..a469ea97b43d 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -10,8 +10,8 @@ !u32i = !cir.int !u64i = !cir.int !u8i = !cir.int -!ty_22struct2EA22 = !cir.struct<"struct.A", !s32i, !cir.array x 2>, #cir.recdecl.ast> -!ty_22struct2EBar22 = !cir.struct<"struct.Bar", !s32i, !s8i, #cir.recdecl.ast> +!ty_22struct2EA22 = !cir.struct<"struct.A" {!s32i, !cir.array x 2>} #cir.recdecl.ast> +!ty_22struct2EBar22 = !cir.struct<"struct.Bar" {!s32i, !s8i} #cir.recdecl.ast> module { cir.global external @a = #cir.int<3> : !s32i diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index c2efc8f4f6a0..45649c1fa7f1 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -4,11 +4,11 @@ !s32i = !cir.int !u8i = !cir.int !u32i = !cir.int -!ty_22struct2ES22 = !cir.struct<"struct.S", !u8i, !s32i> -!ty_22struct2ES2A22 = !cir.struct<"struct.S2A", !s32i, #cir.recdecl.ast> -!ty_22struct2ES122 = !cir.struct<"struct.S1", !s32i, f32, !cir.ptr, #cir.recdecl.ast> -!ty_22struct2ES222 = !cir.struct<"struct.S2", !ty_22struct2ES2A22, #cir.recdecl.ast> -!ty_22struct2ES322 = !cir.struct<"struct.S3", !s32i, #cir.recdecl.ast> +!ty_22struct2ES22 = !cir.struct<"struct.S" {!u8i, !s32i}> +!ty_22struct2ES2A22 = !cir.struct<"struct.S2A" {!s32i} #cir.recdecl.ast> +!ty_22struct2ES122 = !cir.struct<"struct.S1" {!s32i, f32, !cir.ptr} #cir.recdecl.ast> +!ty_22struct2ES222 = !cir.struct<"struct.S2" {!ty_22struct2ES2A22} #cir.recdecl.ast> +!ty_22struct2ES322 = !cir.struct<"struct.S3" {!s32i} #cir.recdecl.ast> module { cir.func @test() { diff --git a/clang/test/CIR/Lowering/variadics.cir b/clang/test/CIR/Lowering/variadics.cir index f95ed7638795..465f222edee4 100644 --- a/clang/test/CIR/Lowering/variadics.cir +++ b/clang/test/CIR/Lowering/variadics.cir @@ -5,7 +5,7 @@ !u32i = !cir.int !u8i = !cir.int -!ty_22struct2E__va_list_tag22 = !cir.struct<"struct.__va_list_tag", !u32i, !u32i, !cir.ptr, !cir.ptr, #cir.recdecl.ast> +!ty_22struct2E__va_list_tag22 = !cir.struct<"struct.__va_list_tag" {!u32i, !u32i, !cir.ptr, !cir.ptr} #cir.recdecl.ast> module { cir.func @average(%arg0: !s32i, ...) -> !s32i { From 6a795689cb1da903662251d7eb31ea82f76f9a27 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 28 Aug 2023 20:45:13 -0300 Subject: [PATCH 1173/2301] [CIR] Add record kind enum in StructType Serves as a way to identify the kind of record type (union, class, struct, etc.), which is useful for codegen and lowering. ghstack-source-id: a642fe834f12893d555b527af030d1c2a9c9df46 Pull Request resolved: https://github.com/llvm/clangir/pull/227 --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 28 ++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 28 +++++- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 1 + clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 33 ++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +- clang/test/CIR/CodeGen/String.cpp | 30 +++--- clang/test/CIR/CodeGen/agg-init.cpp | 32 +++---- clang/test/CIR/CodeGen/array.c | 2 +- clang/test/CIR/CodeGen/array.cpp | 2 +- clang/test/CIR/CodeGen/assign-operator.cpp | 48 +++++----- clang/test/CIR/CodeGen/atomic.cpp | 2 +- clang/test/CIR/CodeGen/bitfields.cpp | 4 +- clang/test/CIR/CodeGen/build-deferred.cpp | 2 +- clang/test/CIR/CodeGen/cond.cpp | 6 +- clang/test/CIR/CodeGen/coro-task.cpp | 32 +++---- clang/test/CIR/CodeGen/ctor-alias.cpp | 20 ++-- .../CodeGen/ctor-member-lvalue-to-rvalue.cpp | 12 +-- clang/test/CIR/CodeGen/ctor.cpp | 24 ++--- clang/test/CIR/CodeGen/derived-to-base.cpp | 68 +++++++------- clang/test/CIR/CodeGen/dtors.cpp | 8 +- clang/test/CIR/CodeGen/globals.c | 2 +- clang/test/CIR/CodeGen/lambda.cpp | 46 +++++----- clang/test/CIR/CodeGen/libcall.cpp | 2 +- clang/test/CIR/CodeGen/lvalue-refs.cpp | 8 +- clang/test/CIR/CodeGen/move.cpp | 2 +- clang/test/CIR/CodeGen/new.cpp | 28 +++--- clang/test/CIR/CodeGen/nrvo.cpp | 14 +-- clang/test/CIR/CodeGen/rangefor.cpp | 56 +++++------ .../skip-functions-from-system-headers.cpp | 2 +- clang/test/CIR/CodeGen/static.cpp | 28 +++--- clang/test/CIR/CodeGen/struct.c | 32 +++---- clang/test/CIR/CodeGen/struct.cpp | 92 +++++++++---------- clang/test/CIR/CodeGen/union.cpp | 16 ++-- clang/test/CIR/CodeGen/variadics.c | 2 +- clang/test/CIR/CodeGen/vector.cpp | 8 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 8 +- clang/test/CIR/IR/global.cir | 20 ++-- clang/test/CIR/IR/invalid.cir | 2 +- clang/test/CIR/IR/struct.cir | 23 ++--- clang/test/CIR/IR/vtableAttr.cir | 2 +- clang/test/CIR/Lowering/array.cir | 4 +- clang/test/CIR/Lowering/globals.cir | 8 +- clang/test/CIR/Lowering/struct.cir | 34 +++---- clang/test/CIR/Lowering/variadics.cir | 20 ++-- 45 files changed, 463 insertions(+), 385 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 178d944f1df7..abbee1419613 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -110,12 +110,19 @@ def CIR_StructType : CIR_Type<"Struct", "struct", "mlir::StringAttr":$typeName, "bool":$body, "bool":$packed, + "mlir::cir::StructType::RecordKind":$kind, "std::optional<::mlir::cir::ASTRecordDeclAttr>":$ast ); let hasCustomAssemblyFormat = 1; let extraClassDeclaration = [{ + enum RecordKind : uint32_t { + Class, + Union, + Struct + }; + private: // All these support lazily computation and storage // for the struct size and alignment. @@ -127,6 +134,27 @@ def CIR_StructType : CIR_Type<"Struct", "struct", size_t getNumElements() const { return getMembers().size(); } bool isOpaque() const { return !getBody(); } bool isPadded(const ::mlir::DataLayout &dataLayout) const; + + std::string getPrefixedName() { + const auto name = getTypeName().getValue().str(); + switch (getKind()) { + case RecordKind::Class: + return "class." + name; + case RecordKind::Union: + return "union "+ name; + case RecordKind::Struct: + return "struct." + name; + } + } + + /// Return whether this is a class declaration. + bool isClass() const { return getKind() == RecordKind::Class; } + + /// Return whether this is a union declaration. + bool isUnion() const { return getKind() == RecordKind::Union; } + + /// Return whether this is a struct declaration. + bool isStruct() const { return getKind() == RecordKind::Struct; } }]; let extraClassDefinition = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index a3556429687f..494bf9900eef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -13,6 +13,8 @@ #include "CIRGenTypeCache.h" #include "UnimplementedFeatureGuarding.h" +#include "clang/AST/Decl.h" +#include "clang/AST/Type.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" @@ -168,7 +170,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { if (!structTy) structTy = getType( members, mlir::StringAttr::get(getContext()), - /*body=*/true, packed, + /*body=*/true, packed, mlir::cir::StructType::Struct, /*ast=*/std::nullopt); // Return zero or anonymous constant struct. @@ -384,16 +386,36 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return getStructTy(members, "", body, packed, ast); } + /// Get a CIR record kind from a AST declaration tag. + mlir::cir::StructType::RecordKind + getRecordKind(const clang::TagTypeKind kind) { + switch (kind) { + case clang::TagTypeKind::Struct: + return mlir::cir::StructType::Struct; + case clang::TagTypeKind::Union: + return mlir::cir::StructType::Union; + case clang::TagTypeKind::Class: + return mlir::cir::StructType::Class; + case clang::TagTypeKind::Interface: + llvm_unreachable("interface records are NYI"); + case clang::TagTypeKind::Enum: + llvm_unreachable("enum records are NYI"); + } + } + /// Get a CIR named struct type. mlir::cir::StructType getStructTy(llvm::ArrayRef members, llvm::StringRef name, bool body, bool packed, const clang::RecordDecl *ast) { const auto nameAttr = getStringAttr(name); std::optional astAttr = std::nullopt; - if (ast) + auto kind = mlir::cir::StructType::RecordKind::Struct; + if (ast) { astAttr = getAttr(ast); + kind = getRecordKind(ast->getTagKind()); + } return mlir::cir::StructType::get(getContext(), members, nameAttr, body, - packed, astAttr); + packed, kind, astAttr); } // diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index bd81e498dae3..aaa7ff379668 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -46,8 +46,6 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, llvm::SmallString<256> typeName; llvm::raw_svector_ostream outStream(typeName); - outStream << recordDecl->getKindName() << '.'; - PrintingPolicy policy = recordDecl->getASTContext().getPrintingPolicy(); policy.SuppressInlineNamespace = false; @@ -169,8 +167,7 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { // Handle forward decl / incomplete types. if (!entry) { auto name = getRecordTypeName(RD, ""); - entry = Builder.getStructTy({}, name, /*body=*/false, - /*packed=*/false, RD); + entry = Builder.getStructTy({}, name, /*body=*/false, /*packed=*/false, RD); recordDeclTypes[key] = entry; } diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 3ee14ccc014c..425713b0c378 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -17,6 +17,7 @@ #include "clang/AST/CXXInheritance.h" #include "clang/AST/RecordLayout.h" #include "clang/Basic/CodeGenOptions.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/CodeGen/ConstantInitBuilder.h" #include "llvm/Support/Format.h" diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index cff0a3a63a4b..47691a7fc463 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -95,15 +95,30 @@ void BoolType::print(mlir::AsmPrinter &printer) const {} //===----------------------------------------------------------------------===// Type StructType::parse(mlir::AsmParser &parser) { + const auto loc = parser.getCurrentLocation(); llvm::SmallVector members; mlir::StringAttr id; bool body = false; bool packed = false; mlir::cir::ASTRecordDeclAttr ast = nullptr; + RecordKind kind; if (parser.parseLess()) return {}; + // TODO(cir): in the future we should probably separate types for different + // source language declarations such as cir.class, cir.union, and cir.struct + if (parser.parseOptionalKeyword("struct").succeeded()) + kind = RecordKind::Struct; + else if (parser.parseOptionalKeyword("union").succeeded()) + kind = RecordKind::Union; + else if (parser.parseOptionalKeyword("class").succeeded()) + kind = RecordKind::Class; + else { + parser.emitError(loc, "unknown struct type"); + return {}; + } + if (parser.parseAttribute(id)) return {}; @@ -130,12 +145,26 @@ Type StructType::parse(mlir::AsmParser &parser) { if (parser.parseGreater()) return {}; - return StructType::get(parser.getContext(), members, id, body, packed, + return StructType::get(parser.getContext(), members, id, body, packed, kind, std::nullopt); } void StructType::print(mlir::AsmPrinter &printer) const { - printer << '<' << getTypeName() << " "; + printer << '<'; + + switch (getKind()) { + case RecordKind::Struct: + printer << "struct "; + break; + case RecordKind::Union: + printer << "union "; + break; + case RecordKind::Class: + printer << "class "; + break; + } + + printer << getTypeName() << " "; if (getPacked()) printer << "packed "; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f9fc04f6d41b..f3dcc2cde8c1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1823,7 +1823,7 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { mlir::LLVM::LLVMStructType llvmStruct; if (type.getTypeName().size() != 0) { llvmStruct = mlir::LLVM::LLVMStructType::getIdentified( - type.getContext(), type.getTypeName()); + type.getContext(), type.getPrefixedName()); if (llvmStruct.setBody(llvmMembers, /*isPacked=*/type.getPacked()) .failed()) llvm_unreachable("Failed to set body of struct"); diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index 26b7c60d4050..78797f3ece6d 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -18,20 +18,20 @@ void test() { } // CHECK: cir.func linkonce_odr @_ZN6StringC2Ev -// CHECK-NEXT: %0 = cir.alloca !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 // CHECK-NEXT: %2 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "storage"}> // CHECK-NEXT: %3 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %4 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "size"}> : (!cir.ptr) -> !cir.ptr +// CHECK-NEXT: %4 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "size"}> : (!cir.ptr) -> !cir.ptr // CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.cast(integral, %5 : !s32i), !s64i // CHECK-NEXT: cir.store %6, %4 : !s64i, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2Ei -// CHECK-NEXT: %0 = cir.alloca !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["size", init] // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: cir.store %arg1, %1 @@ -39,7 +39,7 @@ void test() { // CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "storage"}> // CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) // CHECK-NEXT: cir.store %4, %3 -// CHECK-NEXT: %5 = "cir.struct_element_addr"(%2) <{member_index = 1 : index, member_name = "size"}> : (!cir.ptr) -> !cir.ptr +// CHECK-NEXT: %5 = "cir.struct_element_addr"(%2) <{member_index = 1 : index, member_name = "size"}> : (!cir.ptr) -> !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i // CHECK-NEXT: %7 = cir.cast(integral, %6 : !s32i), !s64i // CHECK-NEXT: cir.store %7, %5 : !s64i, cir.ptr @@ -47,27 +47,27 @@ void test() { // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "storage"}> : (!cir.ptr) -> !cir.ptr> +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "storage"}> : (!cir.ptr) -> !cir.ptr> // CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.return // CHECK: cir.func linkonce_odr @_ZN6StringC1EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: %3 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return // CHECK: cir.func @_Z4testv() -// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () -// CHECK: cir.call @_ZN6StringC1Ei(%1, %3) : (!cir.ptr, !s32i) -> () -// CHECK: cir.call @_ZN6StringC1EPKc(%2, %5) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC1Ei(%1, %3) : (!cir.ptr, !s32i) -> () +// CHECK: cir.call @_ZN6StringC1EPKc(%2, %5) : (!cir.ptr, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index eee6c0f07081..c94c0d19c86e 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -1,8 +1,8 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// CHECK: !ty_22struct2EZero22 = !cir.struct<"struct.Zero" {!u8i}> -// CHECK: !ty_22struct2Eyep_22 = !cir.struct<"struct.yep_" {!u32i, !u32i}> +// CHECK: !ty_22Zero22 = !cir.struct +// CHECK: !ty_22yep_22 = !cir.struct struct Zero { void yolo(); @@ -15,9 +15,9 @@ void f() { } // CHECK: cir.func @_Z1fv() -// CHECK: %0 = cir.alloca !ty_22struct2EZero22, cir.ptr , ["z0", init] -// CHECK: %1 = cir.alloca !ty_22struct2EZero22, cir.ptr , ["z1"] -// CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () +// CHECK: %0 = cir.alloca !ty_22Zero22, cir.ptr , ["z0", init] +// CHECK: %1 = cir.alloca !ty_22Zero22, cir.ptr , ["z1"] +// CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () // CHECK: cir.return typedef enum xxy_ { @@ -34,11 +34,11 @@ typedef struct yep_ { void use() { yop{}; } // CHECK: cir.func @_Z3usev() -// CHECK: %0 = cir.alloca !ty_22struct2Eyep_22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} -// CHECK: %1 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "Status"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %0 = cir.alloca !ty_22yep_22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} +// CHECK: %1 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "Status"}> : (!cir.ptr) -> !cir.ptr // CHECK: %2 = cir.const(#cir.int<0> : !u32i) : !u32i // CHECK: cir.store %2, %1 : !u32i, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_index = 1 : index, member_name = "HC"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_index = 1 : index, member_name = "HC"}> : (!cir.ptr) -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<0> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr // CHECK: cir.return @@ -64,16 +64,16 @@ void yo() { } // CHECK: cir.func @_Z2yov() -// CHECK: %0 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext"] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !ty_22struct2EYo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} -// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.null : !cir.ptr, #cir.int<0> : !u64i}> : !ty_22struct2EYo22) : !ty_22struct2EYo22 -// CHECK: cir.store %2, %0 : !ty_22struct2EYo22, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "type"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %0 = cir.alloca !ty_22Yo22, cir.ptr , ["ext"] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_22Yo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} +// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.null : !cir.ptr, #cir.int<0> : !u64i}> : !ty_22Yo22) : !ty_22Yo22 +// CHECK: cir.store %2, %0 : !ty_22Yo22, cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "type"}> : (!cir.ptr) -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000066001> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr -// CHECK: %5 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "next"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: %5 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "next"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > -// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_index = 2 : index, member_name = "createFlags"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_index = 2 : index, member_name = "createFlags"}> : (!cir.ptr) -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !u64i) : !u64i // CHECK: cir.store %8, %7 : !u64i, cir.ptr diff --git a/clang/test/CIR/CodeGen/array.c b/clang/test/CIR/CodeGen/array.c index e24ad24ebc51..1bf556f65973 100644 --- a/clang/test/CIR/CodeGen/array.c +++ b/clang/test/CIR/CodeGen/array.c @@ -5,4 +5,4 @@ struct S { int i; } arr[3] = {{1}}; -// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22]> : !cir.array +// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S22, #cir.zero : !ty_22S22, #cir.zero : !ty_22S22]> : !cir.array diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index fef24a5e95f2..61bde35e261c 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -70,7 +70,7 @@ int globalNullArr[] = {0, 0}; struct S { int i; } arr[3] = {{1}}; -// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22]> : !cir.array +// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S22, #cir.zero : !ty_22S22, #cir.zero : !ty_22S22]> : !cir.array void testPointerDecaySubscriptAccess(int arr[]) { // CHECK: cir.func @{{.+}}testPointerDecaySubscriptAccess diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 01ea4900eebb..a2f579fc919f 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -15,11 +15,11 @@ struct String { // StringView::StringView(String const&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewC2ERK6String - // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} - // CHECK: cir.store %arg0, %0 : !cir.ptr - // CHECK: cir.store %arg1, %1 : !cir.ptr - // CHECK: %2 = cir.load %0 : cir.ptr > + // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} + // CHECK: cir.store %arg0, %0 : !cir.ptr + // CHECK: cir.store %arg1, %1 : !cir.ptr + // CHECK: %2 = cir.load %0 : cir.ptr > // Get address of `this->size` @@ -27,7 +27,7 @@ struct String { // Get address of `s` - // CHECK: %4 = cir.load %1 : cir.ptr > + // CHECK: %4 = cir.load %1 : cir.ptr > // Get the address of s.size @@ -41,25 +41,25 @@ struct String { // CHECK: } // DISABLE: cir.func linkonce_odr @_ZN10StringViewC2ERK6String - // DISABLE-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // DISABLE-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // StringView::operator=(StringView&&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewaSEOS_ - // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["", init] {alignment = 8 : i64} - // CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} - // CHECK: cir.store %arg0, %0 : !cir.ptr - // CHECK: cir.store %arg1, %1 : !cir.ptr - // CHECK: %3 = cir.load deref %0 : cir.ptr > - // CHECK: %4 = cir.load %1 : cir.ptr > + // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["", init] {alignment = 8 : i64} + // CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} + // CHECK: cir.store %arg0, %0 : !cir.ptr + // CHECK: cir.store %arg1, %1 : !cir.ptr + // CHECK: %3 = cir.load deref %0 : cir.ptr > + // CHECK: %4 = cir.load %1 : cir.ptr > // CHECK: %5 = "cir.struct_element_addr"(%4) <{member_index = 0 : index, member_name = "size"}> // CHECK: %6 = cir.load %5 : cir.ptr , !s64i // CHECK: %7 = "cir.struct_element_addr"(%3) <{member_index = 0 : index, member_name = "size"}> // CHECK: cir.store %6, %7 : !s64i, cir.ptr - // CHECK: cir.store %3, %2 : !cir.ptr - // CHECK: %8 = cir.load %2 : cir.ptr > - // CHECK: cir.return %8 : !cir.ptr + // CHECK: cir.store %3, %2 : !cir.ptr + // CHECK: %8 = cir.load %2 : cir.ptr > + // CHECK: cir.return %8 : !cir.ptr // CHECK: } // DISABLE: cir.func private @_ZN10StringViewaSEOS_ @@ -83,17 +83,17 @@ int main() { // CHECK: cir.func @main() -> !s32i // CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["sv", init] {alignment = 8 : i64} -// CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () +// CHECK: %1 = cir.alloca !ty_22StringView22, cir.ptr , ["sv", init] {alignment = 8 : i64} +// CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () // CHECK: cir.scope { -// CHECK: %3 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s", init] {alignment = 8 : i64} +// CHECK: %3 = cir.alloca !ty_22String22, cir.ptr , ["s", init] {alignment = 8 : i64} // CHECK: %4 = cir.get_global @".str" : cir.ptr > // CHECK: %5 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @_ZN6StringC2EPKc(%3, %5) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC2EPKc(%3, %5) : (!cir.ptr, !cir.ptr) -> () // CHECK: cir.scope { -// CHECK: %6 = cir.alloca !ty_22struct2EStringView22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} -// CHECK: cir.call @_ZN10StringViewC2ERK6String(%6, %3) : (!cir.ptr, !cir.ptr) -> () -// CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %6) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: %6 = cir.alloca !ty_22StringView22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: cir.call @_ZN10StringViewC2ERK6String(%6, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %6) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } // CHECK: } // CHECK: %2 = cir.load %0 : cir.ptr , !s32i diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 58a6a29d5768..cd6852beccd7 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -7,4 +7,4 @@ typedef struct _a { void m() { at y; } -// CHECK: !ty_22struct2E_a22 = !cir.struct<"struct._a" {!s32i}> \ No newline at end of file +// CHECK: !ty_22_a22 = !cir.struct \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index a9a48314742d..48eec3bd093b 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -14,5 +14,5 @@ void m() { __long l; } -// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon" {!u32i} #cir.recdecl.ast> -// CHECK: !ty_22struct2E__long22 = !cir.struct<"struct.__long" {!ty_22struct2Eanon22, !u32i, !cir.ptr}> \ No newline at end of file +// CHECK: !ty_22anon22 = !cir.struct +// CHECK: !ty_22__long22 = !cir.struct}> diff --git a/clang/test/CIR/CodeGen/build-deferred.cpp b/clang/test/CIR/CodeGen/build-deferred.cpp index f1f1ef4c907e..bf0f2ce30c9e 100644 --- a/clang/test/CIR/CodeGen/build-deferred.cpp +++ b/clang/test/CIR/CodeGen/build-deferred.cpp @@ -24,4 +24,4 @@ void test() { // CHECK-NOT: cir.func linkonce_odr @_ZN6StringC1EPKc // CHECK: cir.func @_Z4testv() -// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/cond.cpp b/clang/test/CIR/CodeGen/cond.cpp index 42c11a3301d2..fdeab5942e7b 100644 --- a/clang/test/CIR/CodeGen/cond.cpp +++ b/clang/test/CIR/CodeGen/cond.cpp @@ -17,11 +17,11 @@ min(const unsigned long& __a, const unsigned long& __b) { // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK: cir.scope { -// CHECK: %4 = cir.alloca !ty_22struct2E__less22, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} -// CHECK: cir.call @_ZN6__lessC1Ev(%4) : (!cir.ptr) -> () +// CHECK: %4 = cir.alloca !ty_22__less22, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: cir.call @_ZN6__lessC1Ev(%4) : (!cir.ptr) -> () // CHECK: %5 = cir.load %1 : cir.ptr >, !cir.ptr // CHECK: %6 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %7 = cir.call @_ZNK6__lessclERKmS1_(%4, %5, %6) : (!cir.ptr, !cir.ptr, !cir.ptr) -> !cir.bool +// CHECK: %7 = cir.call @_ZNK6__lessclERKmS1_(%4, %5, %6) : (!cir.ptr, !cir.ptr, !cir.ptr) -> !cir.bool // CHECK: %8 = cir.ternary(%7, true { // CHECK: %9 = cir.load %1 : cir.ptr >, !cir.ptr // CHECK: cir.yield %9 : !cir.ptr diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index ca496c74dd07..b07373fd02f4 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -126,16 +126,16 @@ co_invoke_fn co_invoke; }} // namespace folly::coro -// CHECK: ![[VoidTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task" {!u8i}> -// CHECK: ![[IntTask:ty_.*]] = !cir.struct<"struct.folly::coro::Task" {!u8i}> -// CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct<"struct.folly::coro::Task::promise_type" {!u8i}> -// CHECK: ![[CoroHandleVoid:ty_.*]] = !cir.struct<"struct.std::coroutine_handle" {!u8i}> -// CHECK: ![[CoroHandlePromise:ty_.*]] = !cir.struct<"struct.std::coroutine_handle" {!u8i}> -// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string" {!u8i} -// CHECK: ![[SuspendAlways:ty_.*]] = !cir.struct<"struct.std::suspend_always" {!u8i}> +// CHECK: ![[VoidTask:ty_.*]] = !cir.struct +// CHECK: ![[IntTask:ty_.*]] = !cir.struct +// CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct::promise_type" {!u8i}> +// CHECK: ![[CoroHandleVoid:ty_.*]] = !cir.struct +// CHECK: ![[CoroHandlePromise:ty_.*]] = !cir.struct +// CHECK: ![[StdString:ty_.*]] = !cir.struct // CHECK: module {{.*}} { -// CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22struct2Efolly3A3Acoro3A3Aco_invoke_fn22 +// CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22folly3A3Acoro3A3Aco_invoke_fn22 // CHECK: cir.func builtin private @__builtin_coro_id(!u32i, !cir.ptr, !cir.ptr, !cir.ptr) -> !u32i // CHECK: cir.func builtin private @__builtin_coro_alloc(!u32i) -> !cir.bool @@ -359,23 +359,23 @@ folly::coro::Task go4() { // CHECK: } // CHECK: %12 = cir.scope { -// CHECK: %17 = cir.alloca !ty_22class2Eanon221, cir.ptr , ["ref.tmp1"] {alignment = 1 : i64} +// CHECK: %17 = cir.alloca !ty_22anon221, cir.ptr , ["ref.tmp1"] {alignment = 1 : i64} // Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` -// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> -// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> -// CHECK: cir.yield %19 : !cir.ptr)>> +// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> +// CHECK: cir.yield %19 : !cir.ptr)>> // CHECK: } -// CHECK: cir.store %12, %3 : !cir.ptr)>>, cir.ptr )>>> +// CHECK: cir.store %12, %3 : !cir.ptr)>>, cir.ptr )>>> // CHECK: cir.scope { // CHECK: %17 = cir.alloca !s32i, cir.ptr , ["ref.tmp2", init] {alignment = 4 : i64} -// CHECK: %18 = cir.load %3 : cir.ptr )>>>, !cir.ptr)>> +// CHECK: %18 = cir.load %3 : cir.ptr )>>>, !cir.ptr)>> // CHECK: %19 = cir.const(#cir.int<3> : !s32i) : !s32i // CHECK: cir.store %19, %17 : !s32i, cir.ptr // Call invoker, which calls operator() indirectly. -// CHECK: %20 = cir.call %18(%17) : (!cir.ptr)>>, !cir.ptr) -> !ty_22struct2Efolly3A3Acoro3A3ATask221 -// CHECK: cir.store %20, %4 : !ty_22struct2Efolly3A3Acoro3A3ATask221, cir.ptr +// CHECK: %20 = cir.call %18(%17) : (!cir.ptr)>>, !cir.ptr) -> !ty_22folly3A3Acoro3A3ATask221 +// CHECK: cir.store %20, %4 : !ty_22folly3A3Acoro3A3ATask221, cir.ptr // CHECK: } // CHECK: cir.await(user, ready : { diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp index 30173fe84024..865b05b267b5 100644 --- a/clang/test/CIR/CodeGen/ctor-alias.cpp +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -9,20 +9,20 @@ void t() { } // CHECK: cir.func linkonce_odr @_ZN11DummyStringC2EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NOT: cir.fun @_ZN11DummyStringC1EPKc // CHECK: cir.func @_Z1tv -// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EDummyString22, cir.ptr , ["s4", init] {alignment = 1 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22DummyString22, cir.ptr , ["s4", init] {alignment = 1 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return struct B { @@ -31,10 +31,10 @@ struct B { B::B() { } -// CHECK: cir.func @_ZN1BC2Ev(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.func @_ZN1BC2Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: cir.return // CHECK: } -// CHECK: cir.func @_ZN1BC1Ev(!cir.ptr) alias(@_ZN1BC2Ev) \ No newline at end of file +// CHECK: cir.func @_ZN1BC1Ev(!cir.ptr) alias(@_ZN1BC2Ev) \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp index fe79e2b690d3..5fea2c60dcf5 100644 --- a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -6,8 +6,8 @@ struct String { long size; String(const String &s) : size{s.size} {} // CHECK: cir.func linkonce_odr @_ZN6StringC2ERKS_ -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 // CHECK: cir.store %arg1, %1 // CHECK: %2 = cir.load %0 @@ -28,10 +28,10 @@ void foo() { // FIXME: s1 shouldn't be uninitialized. // cir.func @_Z3foov() { - // %0 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s"] {alignment = 8 : i64} - // %1 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s1"] {alignment = 8 : i64} - // cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () - // cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () + // %0 = cir.alloca !ty_22String22, cir.ptr , ["s"] {alignment = 8 : i64} + // %1 = cir.alloca !ty_22String22, cir.ptr , ["s1"] {alignment = 8 : i64} + // cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () + // cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () // cir.return // } } diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index 3e1c58395a14..09f54c37f7b6 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -11,22 +11,22 @@ void baz() { Struk s; } -// CHECK: !ty_22struct2EStruk22 = !cir.struct<"struct.Struk" {!s32i}> +// CHECK: !ty_22Struk22 = !cir.struct -// CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return -// CHECK: cir.func linkonce_odr @_ZN5StrukC1Ev(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () +// CHECK: cir.func linkonce_odr @_ZN5StrukC1Ev(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () // CHECK-NEXT: cir.return // CHECK: cir.func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EStruk22, cir.ptr , ["s", init] {alignment = 4 : i64} -// CHECK-NEXT: cir.call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () +// CHECK-NEXT: %0 = cir.alloca !ty_22Struk22, cir.ptr , ["s", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index ade5e97a7501..fd235935b3ef 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -75,29 +75,29 @@ void C3::Layer::Initialize() { } } -// CHECK: !ty_22class2EC23A3ALayer22 = !cir.struct<"class.C2::Layer" {!ty_22class2EC13A3ALayer22, !cir.ptr -// CHECK: !ty_22struct2EC33A3ALayer22 = !cir.struct<"struct.C3::Layer" {!ty_22class2EC23A3ALayer22 +// CHECK-DAG: !ty_22C23A3ALayer22 = !cir.struct +// CHECK-DAG: !ty_22C33A3ALayer22 = !cir.struct) -> cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "m_C1"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %4 = cir.load %3 : cir.ptr >, !cir.ptr -// CHECK: %5 = cir.const(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool +// CHECK: %2 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "m_C1"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %4 = cir.load %3 : cir.ptr >, !cir.ptr +// CHECK: %5 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool enumy C3::Initialize() { return C2::Initialize(); } -// CHECK: cir.func @_ZN2C310InitializeEv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.func @_ZN2C310InitializeEv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %3 = cir.base_class_addr(%2 : cir.ptr ) -> cir.ptr -// CHECK: %4 = cir.call @_ZN2C210InitializeEv(%3) : (!cir.ptr) -> !s32i +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %3 = cir.base_class_addr(%2 : cir.ptr ) -> cir.ptr +// CHECK: %4 = cir.call @_ZN2C210InitializeEv(%3) : (!cir.ptr) -> !s32i void vcall(C1 &c1) { buffy b; @@ -105,21 +105,21 @@ void vcall(C1 &c1) { c1.SetStuff(e, b); } -// CHECK: cir.func @_Z5vcallR2C1(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["c1", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !ty_22struct2Ebuffy22, cir.ptr , ["b"] {alignment = 8 : i64} +// CHECK: cir.func @_Z5vcallR2C1(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["c1", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_22buffy22, cir.ptr , ["b"] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !s32i, cir.ptr , ["e"] {alignment = 4 : i64} -// CHECK: %3 = cir.alloca !ty_22struct2Ebuffy22, cir.ptr , ["agg.tmp0"] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %4 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %3 = cir.alloca !ty_22buffy22, cir.ptr , ["agg.tmp0"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %4 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %5 = cir.load %2 : cir.ptr , !s32i -// CHECK: cir.call @_ZN5buffyC2ERKS_(%3, %1) : (!cir.ptr, !cir.ptr) -> () -// CHECK: %6 = cir.load %3 : cir.ptr , !ty_22struct2Ebuffy22 -// CHECK: %7 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr, !s32i, !ty_22struct2Ebuffy22)>>>> -// CHECK: %8 = cir.load %7 : cir.ptr , !s32i, !ty_22struct2Ebuffy22)>>>>, !cir.ptr, !s32i, !ty_22struct2Ebuffy22)>>> -// CHECK: %9 = cir.vtable.address_point( %8 : !cir.ptr, !s32i, !ty_22struct2Ebuffy22)>>>, vtable_index = 0, address_point_index = 2) : cir.ptr , !s32i, !ty_22struct2Ebuffy22)>>> -// CHECK: %10 = cir.load %9 : cir.ptr , !s32i, !ty_22struct2Ebuffy22)>>>, !cir.ptr, !s32i, !ty_22struct2Ebuffy22)>> -// CHECK: %11 = cir.call %10(%4, %5, %6) : (!cir.ptr, !s32i, !ty_22struct2Ebuffy22)>>, !cir.ptr, !s32i, !ty_22struct2Ebuffy22) -> !s32i +// CHECK: cir.call @_ZN5buffyC2ERKS_(%3, %1) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %6 = cir.load %3 : cir.ptr , !ty_22buffy22 +// CHECK: %7 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr, !s32i, !ty_22buffy22)>>>> +// CHECK: %8 = cir.load %7 : cir.ptr , !s32i, !ty_22buffy22)>>>>, !cir.ptr, !s32i, !ty_22buffy22)>>> +// CHECK: %9 = cir.vtable.address_point( %8 : !cir.ptr, !s32i, !ty_22buffy22)>>>, vtable_index = 0, address_point_index = 2) : cir.ptr , !s32i, !ty_22buffy22)>>> +// CHECK: %10 = cir.load %9 : cir.ptr , !s32i, !ty_22buffy22)>>>, !cir.ptr, !s32i, !ty_22buffy22)>> +// CHECK: %11 = cir.call %10(%4, %5, %6) : (!cir.ptr, !s32i, !ty_22buffy22)>>, !cir.ptr, !s32i, !ty_22buffy22) -> !s32i // CHECK: cir.return // CHECK: } @@ -135,19 +135,19 @@ class B : public A { void foo () { static_cast(*this).foo();} }; -// CHECK: cir.func linkonce_odr @_ZN1B3fooEv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load deref %0 : cir.ptr >, !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN1B3fooEv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load deref %0 : cir.ptr >, !cir.ptr // CHECK: cir.scope { -// CHECK: %2 = cir.alloca !ty_22class2EA22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} -// CHECK: %3 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr +// CHECK: %2 = cir.alloca !ty_22A22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %3 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr // Call @A::A(A const&) -// CHECK: cir.call @_ZN1AC2ERKS_(%2, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN1AC2ERKS_(%2, %3) : (!cir.ptr, !cir.ptr) -> () // Call @A::foo() -// CHECK: cir.call @_ZN1A3fooEv(%2) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1A3fooEv(%2) : (!cir.ptr) -> () // CHECK: } // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index 45e5174cd96d..c1b3692d06f3 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -38,17 +38,17 @@ class B : public A }; // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A" {!cir.ptr>>} #cir.recdecl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.recdecl.ast> // Class B -// CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B" {![[ClassA]]}> +// CHECK: ![[ClassB:ty_.*]] = !cir.struct // CHECK: cir.func @_Z4bluev() -// CHECK: %0 = cir.alloca !ty_22class2EPSEvent22, cir.ptr , ["p", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !ty_22PSEvent22, cir.ptr , ["p", init] {alignment = 8 : i64} // CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %2 = cir.get_global @".str" : cir.ptr > // CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @_ZN7PSEventC1E6EFModePKc(%0, %1, %3) : (!cir.ptr, !s32i, !cir.ptr) -> () +// CHECK: cir.call @_ZN7PSEventC1E6EFModePKc(%0, %1, %3) : (!cir.ptr, !s32i, !cir.ptr) -> () // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 2ab1057b523a..b72b0a09b625 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -34,7 +34,7 @@ struct { int x; int y[2][2]; } nestedTwoDim = {1, {{2, 3}, {4, 5}}}; -// CHECK: cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22struct2Eanon22 +// CHECK: cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22anon22 // TODO: test tentatives with internal linkage. diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 6118616d2b20..c1e633d750a7 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -6,13 +6,13 @@ void fn() { a(); } -// CHECK: !ty_22class2Eanon22 = !cir.struct<"class.anon" {!u8i}> +// CHECK: !ty_22anon22 = !cir.struct // CHECK-DAG: module // CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv // CHECK: cir.func @_Z2fnv() -// CHECK-NEXT: %0 = cir.alloca !ty_22class2Eanon22, cir.ptr , ["a"] +// CHECK-NEXT: %0 = cir.alloca !ty_22anon22, cir.ptr , ["a"] // CHECK: cir.call @_ZZ2fnvENK3$_0clEv void l0() { @@ -23,15 +23,15 @@ void l0() { // CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv( -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %3 = cir.load %2 : cir.ptr >, !cir.ptr // CHECK: %4 = cir.load %3 : cir.ptr , !s32i // CHECK: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %6 = cir.binop(add, %4, %5) : !s32i -// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %8 = cir.load %7 : cir.ptr >, !cir.ptr // CHECK: cir.store %6, %8 : !s32i, cir.ptr @@ -45,15 +45,15 @@ auto g() { }; } -// CHECK: cir.func @_Z1gv() -> !ty_22class2Eanon223 -// CHECK: %0 = cir.alloca !ty_22class2Eanon223, cir.ptr , ["__retval"] {alignment = 8 : i64} +// CHECK: cir.func @_Z1gv() -> !ty_22anon223 +// CHECK: %0 = cir.alloca !ty_22anon223, cir.ptr , ["__retval"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK: %2 = cir.const(#cir.int<12> : !s32i) : !s32i // CHECK: cir.store %2, %1 : !s32i, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> // CHECK: cir.store %1, %3 : !cir.ptr, cir.ptr > -// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon223 -// CHECK: cir.return %4 : !ty_22class2Eanon223 +// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22anon223 +// CHECK: cir.return %4 : !ty_22anon223 auto g2() { int i = 12; @@ -65,15 +65,15 @@ auto g2() { } // Should be same as above because of NRVO -// CHECK: cir.func @_Z2g2v() -> !ty_22class2Eanon224 -// CHECK-NEXT: %0 = cir.alloca !ty_22class2Eanon224, cir.ptr , ["__retval", init] {alignment = 8 : i64} +// CHECK: cir.func @_Z2g2v() -> !ty_22anon224 +// CHECK-NEXT: %0 = cir.alloca !ty_22anon224, cir.ptr , ["__retval", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.const(#cir.int<12> : !s32i) : !s32i // CHECK-NEXT: cir.store %2, %1 : !s32i, cir.ptr -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> // CHECK-NEXT: cir.store %1, %3 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22class2Eanon224 -// CHECK-NEXT: cir.return %4 : !ty_22class2Eanon224 +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22anon224 +// CHECK-NEXT: cir.return %4 : !ty_22anon224 int f() { return g2()(); @@ -82,10 +82,10 @@ int f() { // CHECK: cir.func @_Z1fv() -> !s32i // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !ty_22class2Eanon224, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} -// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_22class2Eanon224 -// CHECK-NEXT: cir.store %3, %2 : !ty_22class2Eanon224, cir.ptr -// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> !s32i +// CHECK-NEXT: %2 = cir.alloca !ty_22anon224, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_22anon224 +// CHECK-NEXT: cir.store %3, %2 : !ty_22anon224, cir.ptr +// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> !s32i // CHECK-NEXT: cir.store %4, %0 : !s32i, cir.ptr // CHECK-NEXT: } // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i @@ -114,8 +114,8 @@ int g3() { // 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`. // CHECK: %3 = cir.scope { -// CHECK: %7 = cir.alloca !ty_22class2Eanon221, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} -// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %7 = cir.alloca !ty_22anon221, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> // CHECK: %9 = cir.unary(plus, %8) : !cir.ptr)>>, !cir.ptr)>> // CHECK: cir.yield %9 : !cir.ptr)>> // CHECK: } diff --git a/clang/test/CIR/CodeGen/libcall.cpp b/clang/test/CIR/CodeGen/libcall.cpp index 30e910c8f28c..3df45c43e124 100644 --- a/clang/test/CIR/CodeGen/libcall.cpp +++ b/clang/test/CIR/CodeGen/libcall.cpp @@ -59,5 +59,5 @@ void t(const char* fmt, ...) { // CHECK: %10 = cir.load %1 : cir.ptr , !u64i // CHECK: %11 = cir.load %3 : cir.ptr >, !cir.ptr -// CHECK: %12 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %12 = cir.load %4 : cir.ptr >, !cir.ptr // CHECK: %13 = cir.call @__vsnprintf_chk(%6, %8, %9, %10, %11, %12) \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/lvalue-refs.cpp b/clang/test/CIR/CodeGen/lvalue-refs.cpp index 66aa6f27b5da..ad56d820c5e9 100644 --- a/clang/test/CIR/CodeGen/lvalue-refs.cpp +++ b/clang/test/CIR/CodeGen/lvalue-refs.cpp @@ -6,8 +6,8 @@ struct String { void split(String &S) {} -// CHECK: cir.func @_Z5splitR6String(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["S", init] +// CHECK: cir.func @_Z5splitR6String(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["S", init] void foo() { String s; @@ -15,5 +15,5 @@ void foo() { } // CHECK: cir.func @_Z3foov() -// CHECK: %0 = cir.alloca !ty_22struct2EString22, cir.ptr , ["s"] -// CHECK: cir.call @_Z5splitR6String(%0) : (!cir.ptr) -> () +// CHECK: %0 = cir.alloca !ty_22String22, cir.ptr , ["s"] +// CHECK: cir.call @_Z5splitR6String(%0) : (!cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/move.cpp b/clang/test/CIR/CodeGen/move.cpp index c661a0a89f37..6e3f317d0615 100644 --- a/clang/test/CIR/CodeGen/move.cpp +++ b/clang/test/CIR/CodeGen/move.cpp @@ -16,7 +16,7 @@ struct string { } // std namespace -// CHECK: ![[StdString:ty_.*]] = !cir.struct<"struct.std::string" {!u8i}> +// CHECK: ![[StdString:ty_.*]] = !cir.struct std::string getstr(); void emplace(std::string &&s); diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index b7f8fa5f384b..bf756b880d6b 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -14,19 +14,19 @@ void m(int a, int b) { // CHECK: cir.func linkonce_odr @_ZSt11make_sharedI1SJRiS1_EESt10shared_ptrIT_EDpOT0_( // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["args", init] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["args", init] {alignment = 8 : i64} -// CHECK: %2 = cir.alloca !ty_22class2Estd3A3Ashared_ptr22, cir.ptr , ["__retval"] {alignment = 1 : i64} +// CHECK: %2 = cir.alloca !ty_22std3A3Ashared_ptr22, cir.ptr , ["__retval"] {alignment = 1 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK: cir.scope { // CHECK: %4 = cir.const(#cir.int<1> : !u64i) : !u64i // CHECK: %5 = cir.call @_Znwm(%4) : (!u64i) -> !cir.ptr -// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr +// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr // CHECK: %7 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %8 = cir.load %7 : cir.ptr , !s32i // CHECK: %9 = cir.load %1 : cir.ptr >, !cir.ptr // CHECK: %10 = cir.load %9 : cir.ptr , !s32i -// CHECK: cir.call @_ZN1SC1Eii(%6, %8, %10) : (!cir.ptr, !s32i, !s32i) -> () -// CHECK: cir.call @_ZNSt10shared_ptrI1SEC1EPS0_(%2, %6) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN1SC1Eii(%6, %8, %10) : (!cir.ptr, !s32i, !s32i) -> () +// CHECK: cir.call @_ZNSt10shared_ptrI1SEC1EPS0_(%2, %6) : (!cir.ptr, !cir.ptr) -> () // CHECK: } class B { @@ -36,19 +36,19 @@ class B { } }; -// CHECK: cir.func linkonce_odr @_ZN1B9constructEPS_(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__p", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > -// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN1B9constructEPS_(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__p", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK: %3 = cir.const(#cir.int<1> : !u64i) : !u64i -// CHECK: %4 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr -// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr +// CHECK: %4 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr +// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr // cir.call @B::B()(%new_placament_ptr) -// CHECK: cir.call @_ZN1BC1Ev(%6) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1BC1Ev(%6) : (!cir.ptr) -> () // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/nrvo.cpp b/clang/test/CIR/CodeGen/nrvo.cpp index 91f803fd110b..0a80c686806a 100644 --- a/clang/test/CIR/CodeGen/nrvo.cpp +++ b/clang/test/CIR/CodeGen/nrvo.cpp @@ -9,23 +9,23 @@ std::vector test_nrvo() { return result; } -// CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector" {!cir.ptr>, !cir.ptr>, !cir.ptr>}> +// CHECK: !ty_22std3A3Avector22 = !cir.struct>, !cir.ptr>, !cir.ptr>}> -// CHECK: cir.func @_Z9test_nrvov() -> !ty_22class2Estd3A3Avector22 -// CHECK: %0 = cir.alloca !ty_22class2Estd3A3Avector22, cir.ptr , ["__retval", init] {alignment = 8 : i64} +// CHECK: cir.func @_Z9test_nrvov() -> !ty_22std3A3Avector22 +// CHECK: %0 = cir.alloca !ty_22std3A3Avector22, cir.ptr , ["__retval", init] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !cir.bool, cir.ptr , ["nrvo"] {alignment = 1 : i64} // CHECK: %2 = cir.const(#false) : !cir.bool // CHECK: cir.store %2, %1 : !cir.bool, cir.ptr -// CHECK: cir.call @_ZNSt6vectorIPKcEC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZNSt6vectorIPKcEC1Ev(%0) : (!cir.ptr) -> () // CHECK: cir.scope { // CHECK: %5 = cir.alloca !cir.ptr, cir.ptr >, ["ref.tmp0"] {alignment = 8 : i64} // CHECK: %6 = cir.get_global @".str" : cir.ptr > // CHECK: %7 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr // CHECK: cir.store %7, %5 : !cir.ptr, cir.ptr > -// CHECK: cir.call @_ZNSt6vectorIPKcE9push_backEOS1_(%0, %5) : (!cir.ptr, !cir.ptr>) -> () +// CHECK: cir.call @_ZNSt6vectorIPKcE9push_backEOS1_(%0, %5) : (!cir.ptr, !cir.ptr>) -> () // CHECK: } // CHECK: %3 = cir.const(#true) : !cir.bool // CHECK: cir.store %3, %1 : !cir.bool, cir.ptr -// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22class2Estd3A3Avector22 -// CHECK: cir.return %4 : !ty_22class2Estd3A3Avector22 +// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22std3A3Avector22 +// CHECK: cir.return %4 : !ty_22std3A3Avector22 // CHECK: } diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 61be75a1b8e6..c4ba0472d978 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -21,53 +21,53 @@ void init(unsigned numImages) { } } -// CHECK: !ty_22struct2Etriple22 = !cir.struct<"struct.triple" {!u32i, !cir.ptr, !u32i}> -// CHECK: !ty_22class2Estd3A3Avector22 = !cir.struct<"class.std::vector" {!cir.ptr, !cir.ptr, !cir.ptr}> -// CHECK: !ty_22struct2E__vector_iterator22 = !cir.struct<"struct.__vector_iterator" {!cir.ptr}> +// CHECK-DAG: !ty_22triple22 = !cir.struct, !u32i}> +// CHECK-DAG: !ty_22std3A3Avector22 = !cir.struct, !cir.ptr, !cir.ptr}> +// CHECK-DAG: !ty_22__vector_iterator22 = !cir.struct}> // CHECK: cir.func @_Z4initj(%arg0: !u32i // CHECK: %0 = cir.alloca !u32i, cir.ptr , ["numImages", init] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !ty_22class2Estd3A3Avector22, cir.ptr , ["images", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_22std3A3Avector22, cir.ptr , ["images", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !u32i, cir.ptr // CHECK: %2 = cir.load %0 : cir.ptr , !u32i // CHECK: %3 = cir.cast(integral, %2 : !u32i), !u64i -// CHECK: cir.call @_ZNSt6vectorI6tripleEC1Em(%1, %3) : (!cir.ptr, !u64i) -> () +// CHECK: cir.call @_ZNSt6vectorI6tripleEC1Em(%1, %3) : (!cir.ptr, !u64i) -> () // CHECK: cir.scope { -// CHECK: %4 = cir.alloca !cir.ptr, cir.ptr >, ["__range1", init] {alignment = 8 : i64} -// CHECK: %5 = cir.alloca !ty_22struct2E__vector_iterator22, cir.ptr , ["__begin1", init] {alignment = 8 : i64} -// CHECK: %6 = cir.alloca !ty_22struct2E__vector_iterator22, cir.ptr , ["__end1", init] {alignment = 8 : i64} -// CHECK: %7 = cir.alloca !cir.ptr, cir.ptr >, ["image", init] {alignment = 8 : i64} -// CHECK: cir.store %1, %4 : !cir.ptr, cir.ptr > -// CHECK: %8 = cir.load %4 : cir.ptr >, !cir.ptr -// CHECK: %9 = cir.call @_ZNSt6vectorI6tripleE5beginEv(%8) : (!cir.ptr) -> !ty_22struct2E__vector_iterator22 -// CHECK: cir.store %9, %5 : !ty_22struct2E__vector_iterator22, cir.ptr -// CHECK: %10 = cir.load %4 : cir.ptr >, !cir.ptr -// CHECK: %11 = cir.call @_ZNSt6vectorI6tripleE3endEv(%10) : (!cir.ptr) -> !ty_22struct2E__vector_iterator22 -// CHECK: cir.store %11, %6 : !ty_22struct2E__vector_iterator22, cir.ptr +// CHECK: %4 = cir.alloca !cir.ptr, cir.ptr >, ["__range1", init] {alignment = 8 : i64} +// CHECK: %5 = cir.alloca !ty_22__vector_iterator22, cir.ptr , ["__begin1", init] {alignment = 8 : i64} +// CHECK: %6 = cir.alloca !ty_22__vector_iterator22, cir.ptr , ["__end1", init] {alignment = 8 : i64} +// CHECK: %7 = cir.alloca !cir.ptr, cir.ptr >, ["image", init] {alignment = 8 : i64} +// CHECK: cir.store %1, %4 : !cir.ptr, cir.ptr > +// CHECK: %8 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %9 = cir.call @_ZNSt6vectorI6tripleE5beginEv(%8) : (!cir.ptr) -> !ty_22__vector_iterator22 +// CHECK: cir.store %9, %5 : !ty_22__vector_iterator22, cir.ptr +// CHECK: %10 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %11 = cir.call @_ZNSt6vectorI6tripleE3endEv(%10) : (!cir.ptr) -> !ty_22__vector_iterator22 +// CHECK: cir.store %11, %6 : !ty_22__vector_iterator22, cir.ptr // CHECK: cir.loop for(cond : { -// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool +// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool // CHECK: cir.brcond %12 ^bb1, ^bb2 // CHECK: ^bb1: // pred: ^bb0 // CHECK: cir.yield continue // CHECK: ^bb2: // pred: ^bb0 // CHECK: cir.yield // CHECK: }, step : { -// CHECK: %12 = cir.call @_ZN17__vector_iteratorI6triplePS0_RS0_EppEv(%5) : (!cir.ptr) -> !cir.ptr +// CHECK: %12 = cir.call @_ZN17__vector_iteratorI6triplePS0_RS0_EppEv(%5) : (!cir.ptr) -> !cir.ptr // CHECK: cir.yield // CHECK: }) { -// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EdeEv(%5) : (!cir.ptr) -> !cir.ptr -// CHECK: cir.store %12, %7 : !cir.ptr, cir.ptr > +// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EdeEv(%5) : (!cir.ptr) -> !cir.ptr +// CHECK: cir.store %12, %7 : !cir.ptr, cir.ptr > // CHECK: cir.scope { -// CHECK: %13 = cir.alloca !ty_22struct2Etriple22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} -// CHECK: %14 = cir.const(#cir.zero : !ty_22struct2Etriple22) : !ty_22struct2Etriple22 -// CHECK: cir.store %14, %13 : !ty_22struct2Etriple22, cir.ptr -// CHECK: %15 = "cir.struct_element_addr"(%13) <{member_index = 0 : index, member_name = "type"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %13 = cir.alloca !ty_22triple22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %14 = cir.const(#cir.zero : !ty_22triple22) : !ty_22triple22 +// CHECK: cir.store %14, %13 : !ty_22triple22, cir.ptr +// CHECK: %15 = "cir.struct_element_addr"(%13) <{member_index = 0 : index, member_name = "type"}> : (!cir.ptr) -> !cir.ptr // CHECK: %16 = cir.const(#cir.int<1000024002> : !u32i) : !u32i // CHECK: cir.store %16, %15 : !u32i, cir.ptr -// CHECK: %17 = "cir.struct_element_addr"(%13) <{member_index = 1 : index, member_name = "next"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %18 = "cir.struct_element_addr"(%13) <{member_index = 2 : index, member_name = "image"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %19 = cir.load %7 : cir.ptr >, !cir.ptr -// CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: %17 = "cir.struct_element_addr"(%13) <{member_index = 1 : index, member_name = "next"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %18 = "cir.struct_element_addr"(%13) <{member_index = 2 : index, member_name = "image"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %19 = cir.load %7 : cir.ptr >, !cir.ptr +// CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } // CHECK: cir.yield // CHECK: } diff --git a/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp b/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp index bb1bebe33edb..96730e748a4c 100644 --- a/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp +++ b/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp @@ -15,4 +15,4 @@ void test() { // CHECK-NOT: cir.func linkonce_odr @_ZN6StringC1EPKc // CHECK: cir.func @_Z4testv() -// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index 5fd84f2c295d..fcdb54afefec 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -17,33 +17,33 @@ static Init __ioinit(true); static Init __ioinit2(false); // BEFORE: module {{.*}} { -// BEFORE-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) -// BEFORE-NEXT: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22class2EInit22 { -// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// BEFORE-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) +// BEFORE-NEXT: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { +// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr // BEFORE-NEXT: %1 = cir.const(#true) : !cir.bool -// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // BEFORE-NEXT: } {ast = #cir.vardecl.ast} -// BEFORE: cir.global "private" internal @_ZL9__ioinit2 = ctor : !ty_22class2EInit22 { -// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// BEFORE: cir.global "private" internal @_ZL9__ioinit2 = ctor : !ty_22Init22 { +// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr // BEFORE-NEXT: %1 = cir.const(#false) : !cir.bool -// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // BEFORE-NEXT: } {ast = #cir.vardecl.ast} // BEFORE-NEXT: } // AFTER: module {{.*}} attributes {{.*}}cir.globalCtors = [#cir.globalCtor<"__cxx_global_var_init">, #cir.globalCtor<"__cxx_global_var_init.1">] -// AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) -// AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22class2EInit22 {ast = #cir.vardecl.ast} +// AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) +// AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {ast = #cir.vardecl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init() -// AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr // AFTER-NEXT: %1 = cir.const(#true) : !cir.bool -// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // AFTER-NEXT: cir.return -// AFTER: cir.global "private" internal @_ZL9__ioinit2 = #cir.zero : !ty_22class2EInit22 {ast = #cir.vardecl.ast} +// AFTER: cir.global "private" internal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {ast = #cir.vardecl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init.1() -// AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr // AFTER-NEXT: %1 = cir.const(#false) : !cir.bool -// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // AFTER-NEXT: cir.return // AFTER: cir.func private @_GLOBAL__sub_I_static.cpp() // AFTER-NEXT: cir.call @__cxx_global_var_init() : () -> () diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 67097d25326f..b89ba790d22b 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -17,28 +17,28 @@ void baz(void) { struct Foo f; } -// CHECK-DAG: !ty_22struct2EBar22 = !cir.struct<"struct.Bar" {!s32i, !s8i}> -// CHECK-DAG: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo" {!s32i, !s8i, !ty_22struct2EBar22}> +// CHECK-DAG: !ty_22Bar22 = !cir.struct +// CHECK-DAG: !ty_22Foo22 = !cir.struct // CHECK-DAG: module {{.*}} { // CHECK: cir.func @baz() -// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22Bar22, cir.ptr , ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !ty_22Foo22, cir.ptr , ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.return // CHECK-NEXT: } void shouldConstInitStructs(void) { // CHECK: cir.func @shouldConstInitStructs struct Foo f = {1, 2, {3, 4}}; - // CHECK: %[[#V0:]] = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} - // CHECK: %[[#V1:]] = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_22struct2EBar22}> : !ty_22struct2EFoo22) : !ty_22struct2EFoo22 - // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22struct2EFoo22, cir.ptr + // CHECK: %[[#V0:]] = cir.alloca !ty_22Foo22, cir.ptr , ["f"] {alignment = 4 : i64} + // CHECK: %[[#V1:]] = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_22Bar22}> : !ty_22Foo22) : !ty_22Foo22 + // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22Foo22, cir.ptr } // Should zero-initialize uninitialized global structs. struct S { int a,b; } s; -// CHECK-DAG: cir.global external @s = #cir.zero : !ty_22struct2ES22 +// CHECK-DAG: cir.global external @s = #cir.zero : !ty_22S22 // Should initialize basic global structs. struct S1 { @@ -46,7 +46,7 @@ struct S1 { float f; int *p; } s1 = {1, .1, 0}; -// CHECK-DAG: cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.null : !cir.ptr}> : !ty_22struct2ES122 +// CHECK-DAG: cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.null : !cir.ptr}> : !ty_22S122 // Should initialize global nested structs. struct S2 { @@ -54,19 +54,19 @@ struct S2 { int a; } s2a; } s2 = {{1}}; -// CHECK-DAG: cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES2A22}> : !ty_22struct2ES222 +// CHECK-DAG: cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22}> : !ty_22S222 // Should initialize global arrays of structs. struct S3 { int a; } s3[3] = {{1}, {2}, {3}}; -// CHECK-DAG: cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22struct2ES322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22struct2ES322]> : !cir.array +// CHECK-DAG: cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22S322]> : !cir.array void shouldCopyStructAsCallArg(struct S1 s) { // CHECK-DAG: cir.func @shouldCopyStructAsCallArg shouldCopyStructAsCallArg(s); - // CHECK-DAG: %[[#LV:]] = cir.load %{{.+}} : cir.ptr , !ty_22struct2ES122 - // CHECK-DAG: cir.call @shouldCopyStructAsCallArg(%[[#LV]]) : (!ty_22struct2ES122) -> () + // CHECK-DAG: %[[#LV:]] = cir.load %{{.+}} : cir.ptr , !ty_22S122 + // CHECK-DAG: cir.call @shouldCopyStructAsCallArg(%[[#LV]]) : (!ty_22S122) -> () } struct Bar shouldGenerateAndAccessStructArrays(void) { @@ -75,6 +75,6 @@ struct Bar shouldGenerateAndAccessStructArrays(void) { } // CHECK-DAG: cir.func @shouldGenerateAndAccessStructArrays // CHECK-DAG: %[[#STRIDE:]] = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK-DAG: %[[#DARR:]] = cir.cast(array_to_ptrdecay, %{{.+}} : !cir.ptr>), !cir.ptr -// CHECK-DAG: %[[#ELT:]] = cir.ptr_stride(%[[#DARR]] : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr -// CHECK-DAG: cir.copy %[[#ELT]] to %{{.+}} : !cir.ptr +// CHECK-DAG: %[[#DARR:]] = cir.cast(array_to_ptrdecay, %{{.+}} : !cir.ptr>), !cir.ptr +// CHECK-DAG: %[[#ELT:]] = cir.ptr_stride(%[[#DARR]] : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr +// CHECK-DAG: cir.copy %[[#ELT]] to %{{.+}} : !cir.ptr diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 4efe026e0e5b..70df084c7fc6 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -26,37 +26,37 @@ void baz() { struct incomplete; void yoyo(incomplete *i) {} -// CHECK: !ty_22struct2Eincomplete22 = !cir.struct<"struct.incomplete" incomplete -// CHECK: !ty_22struct2EBar22 = !cir.struct<"struct.Bar" {!s32i, !s8i}> - -// CHECK: !ty_22struct2EFoo22 = !cir.struct<"struct.Foo" {!s32i, !s8i, !ty_22struct2EBar22}> -// CHECK: !ty_22struct2EMandalore22 = !cir.struct<"struct.Mandalore" {!u32i, !cir.ptr, !s32i} #cir.recdecl.ast> -// CHECK: !ty_22class2EAdv22 = !cir.struct<"class.Adv" {!ty_22struct2EMandalore22}> -// CHECK: !ty_22struct2EEntry22 = !cir.struct<"struct.Entry" {!cir.ptr, !cir.ptr)>>}> - -// CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-DAG-DAG: !ty_22incomplete22 = !cir.struct + +// CHECK-DAG: !ty_22Foo22 = !cir.struct +// CHECK-DAG: !ty_22Mandalore22 = !cir.struct, !s32i} #cir.recdecl.ast> +// CHECK-DAG: !ty_22Adv22 = !cir.struct +// CHECK-DAG: !ty_22Entry22 = !cir.struct, !cir.ptr)>>}> + +// CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !s32i, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !s32i, cir.ptr -// CHECK-NEXT: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %3 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: %4 = cir.load %1 : cir.ptr , !s32i // CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr // CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i @@ -64,14 +64,14 @@ void yoyo(incomplete *i) {} // CHECK-NEXT: } // CHECK: cir.func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !ty_22struct2EBar22, cir.ptr , ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22Bar22, cir.ptr , ["b"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["result", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca !ty_22struct2EFoo22, cir.ptr , ["f"] {alignment = 4 : i64} -// CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () +// CHECK-NEXT: %2 = cir.alloca !ty_22Foo22, cir.ptr , ["f"] {alignment = 4 : i64} +// CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () // CHECK-NEXT: %3 = cir.const(#cir.int<4> : !s32i) : !s32i -// CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, !s32i) -> () +// CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, !s32i) -> () // CHECK-NEXT: %4 = cir.const(#cir.int<4> : !s32i) : !s32i -// CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, !s32i) -> !s32i +// CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, !s32i) -> !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } @@ -94,18 +94,18 @@ class Adv { void m() { Adv C; } -// CHECK: cir.func linkonce_odr @_ZN3AdvC2Ev(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "x"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "w"}> : (!cir.ptr) -> !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN3AdvC2Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "x"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "w"}> : (!cir.ptr) -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000024001> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr -// CHECK: %5 = "cir.struct_element_addr"(%2) <{member_index = 1 : index, member_name = "n"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %5 = "cir.struct_element_addr"(%2) <{member_index = 1 : index, member_name = "n"}> : (!cir.ptr) -> !cir.ptr> // CHECK: %6 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > -// CHECK: %7 = "cir.struct_element_addr"(%2) <{member_index = 2 : index, member_name = "d"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %7 = "cir.struct_element_addr"(%2) <{member_index = 2 : index, member_name = "d"}> : (!cir.ptr) -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: cir.store %8, %7 : !s32i, cir.ptr // CHECK: cir.return @@ -117,19 +117,19 @@ struct A { // Should globally const-initialize struct members. struct A simpleConstInit = {1}; -// CHECK: cir.global external @simpleConstInit = #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2EA22 +// CHECK: cir.global external @simpleConstInit = #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22A22 // Should globally const-initialize arrays with struct members. struct A arrConstInit[1] = {{1}}; -// CHECK: cir.global external @arrConstInit = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2EA22]> : !cir.array +// CHECK: cir.global external @arrConstInit = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22A22]> : !cir.array // Should locally copy struct members. void shouldLocallyCopyStructAssignments(void) { struct A a = { 3 }; - // CHECK: %[[#SA:]] = cir.alloca !ty_22struct2EA22, cir.ptr , ["a"] {alignment = 4 : i64} + // CHECK: %[[#SA:]] = cir.alloca !ty_22A22, cir.ptr , ["a"] {alignment = 4 : i64} struct A b = a; - // CHECK: %[[#SB:]] = cir.alloca !ty_22struct2EA22, cir.ptr , ["b", init] {alignment = 4 : i64} - // cir.copy %[[#SA]] to %[[SB]] : !cir.ptr + // CHECK: %[[#SB:]] = cir.alloca !ty_22A22, cir.ptr , ["b", init] {alignment = 4 : i64} + // cir.copy %[[#SA]] to %[[SB]] : !cir.ptr } A get_default() { return A{2}; } @@ -141,12 +141,12 @@ struct S { void h() { S s; } // CHECK: cir.func @_Z1hv() -// CHECK: %0 = cir.alloca !ty_22struct2ES22, cir.ptr , ["s", init] {alignment = 1 : i64} -// CHECK: %1 = cir.alloca !ty_22struct2EA22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} -// CHECK: %2 = cir.call @_Z11get_defaultv() : () -> !ty_22struct2EA22 -// CHECK: cir.store %2, %1 : !ty_22struct2EA22, cir.ptr -// CHECK: %3 = cir.load %1 : cir.ptr , !ty_22struct2EA22 -// CHECK: cir.call @_ZN1SC1E1A(%0, %3) : (!cir.ptr, !ty_22struct2EA22) -> () +// CHECK: %0 = cir.alloca !ty_22S22, cir.ptr , ["s", init] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !ty_22A22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} +// CHECK: %2 = cir.call @_Z11get_defaultv() : () -> !ty_22A22 +// CHECK: cir.store %2, %1 : !ty_22A22, cir.ptr +// CHECK: %3 = cir.load %1 : cir.ptr , !ty_22A22 +// CHECK: cir.call @_ZN1SC1E1A(%0, %3) : (!cir.ptr, !ty_22A22) -> () // CHECK: cir.return // CHECK: } @@ -162,6 +162,6 @@ struct Entry { void ppp() { Entry x; } -// CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr -// CHECK: = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr)>>> +// CHECK: = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr)>>> diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index 6a5a9d5a0fd3..e77bed5a4f89 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -12,14 +12,14 @@ void m() { yolm3 q3; } -// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon" {!cir.bool, !s32i} #cir.recdecl.ast> -// CHECK: !ty_22struct2Eyolo22 = !cir.struct<"struct.yolo" {!s32i} #cir.recdecl.ast> -// CHECK: !ty_22struct2Eanon221 = !cir.struct<"struct.anon" {!cir.ptr, !s32i} #cir.recdecl.ast> +// CHECK: !ty_22anon22 = !cir.struct +// CHECK: !ty_22yolo22 = !cir.struct +// CHECK: !ty_22anon221 = !cir.struct, !s32i} #cir.recdecl.ast> -// CHECK: !ty_22union2Eyolm22 = !cir.struct<"union.yolm" {!ty_22struct2Eyolo22}> -// CHECK: !ty_22union2Eyolm222 = !cir.struct<"union.yolm2" {!ty_22struct2Eanon221}> +// CHECK: !ty_22yolm22 = !cir.struct +// CHECK: !ty_22yolm222 = !cir.struct // CHECK: cir.func @_Z1mv() -// CHECK: cir.alloca !ty_22union2Eyolm22, cir.ptr , ["q"] {alignment = 4 : i64} -// CHECK: cir.alloca !ty_22union2Eyolm222, cir.ptr , ["q2"] {alignment = 8 : i64} -// CHECK: cir.alloca !ty_22union2Eyolm322, cir.ptr , ["q3"] {alignment = 4 : i64} +// CHECK: cir.alloca !ty_22yolm22, cir.ptr , ["q"] {alignment = 4 : i64} +// CHECK: cir.alloca !ty_22yolm222, cir.ptr , ["q2"] {alignment = 8 : i64} +// CHECK: cir.alloca !ty_22yolm322, cir.ptr , ["q3"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/variadics.c b/clang/test/CIR/CodeGen/variadics.c index 894621b078d9..90ab27cc8ae5 100644 --- a/clang/test/CIR/CodeGen/variadics.c +++ b/clang/test/CIR/CodeGen/variadics.c @@ -12,7 +12,7 @@ typedef __builtin_va_list va_list; #define va_arg(ap, type) __builtin_va_arg(ap, type) #define va_copy(dst, src) __builtin_va_copy(dst, src) -// CHECK: [[VALISTTYPE:!.+va_list.*]] = !cir.struct<"struct{{.*}}__va_list +// CHECK: [[VALISTTYPE:!.+va_list.*]] = !cir.struct !s32i diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp index 6ffd1509f89b..4f9b3495bd87 100644 --- a/clang/test/CIR/CodeGen/vector.cpp +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -12,13 +12,13 @@ namespace std { } // namespace std // CHECK: cir.func linkonce_odr @_ZNSt6vectorIyE6resizeEm( -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !u64i, cir.ptr , ["__sz", init] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !u64i, cir.ptr , ["__cs", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: cir.store %arg1, %1 : !u64i, cir.ptr -// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %4 = cir.call @_ZNKSt6vectorIyE4sizeEv(%3) : (!cir.ptr) -> !u64i +// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.call @_ZNKSt6vectorIyE4sizeEv(%3) : (!cir.ptr) -> !u64i // CHECK: cir.store %4, %2 : !u64i, cir.ptr // CHECK: cir.scope { // CHECK: %5 = cir.load %2 : cir.ptr , !u64i diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 5df467f78fb1..873a354a611a 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -19,16 +19,16 @@ class B : public A }; // Type info B. -// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct<"" {!cir.ptr, !cir.ptr, !cir.ptr}> +// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct, !cir.ptr, !cir.ptr}> // vtable for A type -// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct<"" {!cir.array x 5>}> +// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct x 5>}> // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct<"class.A" {!cir.ptr>>} #cir.recdecl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.recdecl.ast> // Class B -// CHECK: ![[ClassB:ty_.*]] = !cir.struct<"class.B" {![[ClassA]]}> +// CHECK: ![[ClassB:ty_.*]] = !cir.struct // B ctor => @B::B() // Calls @A::A() and initialize __vptr with address of B's vtable. diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index d860f2632d25..378fdd4ba85b 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -3,12 +3,12 @@ !s8i = !cir.int !s32i = !cir.int !s64i = !cir.int -!ty_22class2EInit22 = !cir.struct<"class.Init" {!s8i} #cir.recdecl.ast> +!ty_22Init22 = !cir.struct module { cir.global external @a = #cir.int<3> : !s32i cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i] : !cir.array> cir.global external @b = #cir.const_array<"example\00" : !cir.array> - cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.null : !cir.ptr}> : !cir.struct<"" {!s8i, !s64i, !cir.ptr}> + cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.null : !cir.ptr}> : !cir.struct}> cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} cir.global "private" internal @c : !s32i cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} @@ -31,12 +31,12 @@ module { #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, #cir.global_view<@type_info_name_B> : !cir.ptr, #cir.global_view<@type_info_A> : !cir.ptr}> - : !cir.struct<"" {!cir.ptr, !cir.ptr, !cir.ptr}> - cir.func private @_ZN4InitC1Eb(!cir.ptr, !s8i) - cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22class2EInit22 { - %0 = cir.get_global @_ZL8__ioinit : cir.ptr + : !cir.struct, !cir.ptr, !cir.ptr}> + cir.func private @_ZN4InitC1Eb(!cir.ptr, !s8i) + cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { + %0 = cir.get_global @_ZL8__ioinit : cir.ptr %1 = cir.const(#cir.int<3> : !s8i) : !s8i - cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () + cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () } } @@ -51,8 +51,8 @@ module { // CHECK: cir.func @use_global() // CHECK-NEXT: %0 = cir.get_global @a : cir.ptr -// CHECK: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22class2EInit22 { -// CHECK-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// CHECK: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { +// CHECK-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr // CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s8i) : !s8i -// CHECK-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () +// CHECK-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index fc8d80128375..5c6e652e1f56 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -309,7 +309,7 @@ module { cir.global external @type_info_B = #cir.typeinfo<{ // expected-error {{element at index 0 has type '!cir.ptr>' but return type for this element is '!cir.ptr>'}} #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr}> - : !cir.struct<"" {!cir.ptr}> + : !cir.struct}> } // expected-error {{'cir.global' expected constant attribute to match type}} // ----- diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index 4f98102a3bad..aa0acce60abd 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -1,21 +1,22 @@ -// RUN: cir-opt %s | cir-opt | FileCheck %s +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s !u8i = !cir.int !u16i = !cir.int !s32i = !cir.int !u32i = !cir.int -!ty_2222 = !cir.struct<"" {!cir.array x 5>}> -!ty_22221 = !cir.struct<"" {!cir.ptr, !cir.ptr, !cir.ptr}> -!ty_22class2EA22 = !cir.struct<"class.A" incomplete #cir.recdecl.ast> -// CHECK: !ty_22i22 = !cir.struct<"i" incomplete> -// CHECK: !ty_22S22 = !cir.struct<"S" {!u8i, !u16i, !u32i}> -!ty_22struct2ES22 = !cir.struct<"struct.S" {!s32i, !s32i}> +!ty_2222 = !cir.struct x 5>}> +!ty_22221 = !cir.struct, !cir.ptr, !cir.ptr}> +!ty_22A22 = !cir.struct +!ty_22i22 = !cir.struct +!ty_22S22 = !cir.struct +!ty_22S122 = !cir.struct module { cir.func @structs() { - %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] - %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["i", init] + %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] + %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["i", init] cir.return } @@ -24,8 +25,8 @@ module { // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["i", init] cir.func @shouldSuccessfullyParseConstStructAttrs() { - %0 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22struct2ES22) : !ty_22struct2ES22 - // CHECK: cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22struct2ES22) : !ty_22struct2ES22 + %0 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22S122) : !ty_22S122 + // CHECK: cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22S122) : !ty_22S122 cir.return } } diff --git a/clang/test/CIR/IR/vtableAttr.cir b/clang/test/CIR/IR/vtableAttr.cir index 596644d2cfc7..ae175d5fa987 100644 --- a/clang/test/CIR/IR/vtableAttr.cir +++ b/clang/test/CIR/IR/vtableAttr.cir @@ -1,7 +1,7 @@ // RUN: cir-opt %s | FileCheck %s !u8i = !cir.int -!ty_2222 = !cir.struct<"" {!cir.array x 1>}> +!ty_2222 = !cir.struct x 1>}> module { // Should parse VTable attribute. cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.null : !cir.ptr]> : !cir.array x 1>}> : !ty_2222 diff --git a/clang/test/CIR/Lowering/array.cir b/clang/test/CIR/Lowering/array.cir index 3b85567acd91..1136f8a3beb5 100644 --- a/clang/test/CIR/Lowering/array.cir +++ b/clang/test/CIR/Lowering/array.cir @@ -2,7 +2,7 @@ // RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM !s32i = !cir.int -!ty_22struct2ES22 = !cir.struct<"struct.S" {!s32i} #cir.recdecl.ast> +!ty_22S22 = !cir.struct module { cir.func @foo() { @@ -21,7 +21,7 @@ module { // LLVM: %1 = alloca [10 x i32], i64 1, align 16 // LLVM-NEXT: ret void - cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES22, #cir.zero : !ty_22struct2ES22]> : !cir.array + cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S22, #cir.zero : !ty_22S22]> : !cir.array // CHECK: llvm.mlir.global external @arr() {addr_space = 0 : i32} : !llvm.array<2 x struct<"struct.S", (i32)>> { // CHECK: %0 = llvm.mlir.undef : !llvm.array<2 x struct<"struct.S", (i32)>> // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S", (i32)> diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index a469ea97b43d..9833f54c3f62 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -10,8 +10,8 @@ !u32i = !cir.int !u64i = !cir.int !u8i = !cir.int -!ty_22struct2EA22 = !cir.struct<"struct.A" {!s32i, !cir.array x 2>} #cir.recdecl.ast> -!ty_22struct2EBar22 = !cir.struct<"struct.Bar" {!s32i, !s8i} #cir.recdecl.ast> +!ty_22A22 = !cir.struct x 2>} #cir.recdecl.ast> +!ty_22Bar22 = !cir.struct module { cir.global external @a = #cir.int<3> : !s32i @@ -89,7 +89,7 @@ module { cir.global external @twoDim = #cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.array]> : !cir.array x 2> // MLIR: llvm.mlir.global external @twoDim(dense<{{\[\[}}1, 2], [3, 4{{\]\]}}> : tensor<2x2xi32>) {addr_space = 0 : i32} : !llvm.array<2 x array<2 x i32>> // LLVM: @twoDim = global [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 1, i32 2], [2 x i32] [i32 3, i32 4{{\]\]}} - cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22struct2EA22 + cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22A22 // LLVM: @nestedTwoDim = global %struct.A { i32 1, [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 2, i32 3], [2 x i32] [i32 4, i32 5{{\]\]}} } cir.func @_Z11get_globalsv() { %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} @@ -140,7 +140,7 @@ module { // MLIR: %0 = llvm.mlir.zero : !llvm.ptr // MLIR: llvm.return %0 : !llvm.ptr // MLIR: } - cir.global external @zeroStruct = #cir.zero : !ty_22struct2EBar22 + cir.global external @zeroStruct = #cir.zero : !ty_22Bar22 // MLIR: llvm.mlir.global external @zeroStruct() // MLIR: %0 = cir.llvmir.zeroinit : !llvm.struct<"struct.Bar", (i32, i8)> // MLIR: llvm.return %0 : !llvm.struct<"struct.Bar", (i32, i8)> diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index 45649c1fa7f1..a5b0746fbe9c 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -4,28 +4,28 @@ !s32i = !cir.int !u8i = !cir.int !u32i = !cir.int -!ty_22struct2ES22 = !cir.struct<"struct.S" {!u8i, !s32i}> -!ty_22struct2ES2A22 = !cir.struct<"struct.S2A" {!s32i} #cir.recdecl.ast> -!ty_22struct2ES122 = !cir.struct<"struct.S1" {!s32i, f32, !cir.ptr} #cir.recdecl.ast> -!ty_22struct2ES222 = !cir.struct<"struct.S2" {!ty_22struct2ES2A22} #cir.recdecl.ast> -!ty_22struct2ES322 = !cir.struct<"struct.S3" {!s32i} #cir.recdecl.ast> +!ty_22S22 = !cir.struct +!ty_22S2A22 = !cir.struct +!ty_22S122 = !cir.struct} #cir.recdecl.ast> +!ty_22S222 = !cir.struct +!ty_22S322 = !cir.struct module { cir.func @test() { - %1 = cir.alloca !ty_22struct2ES22, cir.ptr , ["x"] {alignment = 4 : i64} + %1 = cir.alloca !ty_22S22, cir.ptr , ["x"] {alignment = 4 : i64} // CHECK: %[[#ARRSIZE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#STRUCT:]] = llvm.alloca %[[#ARRSIZE]] x !llvm.struct<"struct.S", (i8, i32)> - %3 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "c"}> : (!cir.ptr) -> !cir.ptr + %3 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "c"}> : (!cir.ptr) -> !cir.ptr // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 0] : (!llvm.ptr) -> !llvm.ptr - %5 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr + %5 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 1] : (!llvm.ptr) -> !llvm.ptr cir.return } cir.func @shouldConstInitLocalStructsWithConstStructAttr() { - %0 = cir.alloca !ty_22struct2ES2A22, cir.ptr , ["s"] {alignment = 4 : i64} - %1 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES2A22) : !ty_22struct2ES2A22 - cir.store %1, %0 : !ty_22struct2ES2A22, cir.ptr + %0 = cir.alloca !ty_22S2A22, cir.ptr , ["s"] {alignment = 4 : i64} + %1 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22) : !ty_22S2A22 + cir.store %1, %0 : !ty_22S2A22, cir.ptr cir.return } // CHECK: llvm.func @shouldConstInitLocalStructsWithConstStructAttr() @@ -39,7 +39,7 @@ module { // CHECK: } // Should lower basic #cir.const_struct initializer. - cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.null : !cir.ptr}> : !ty_22struct2ES122 + cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.null : !cir.ptr}> : !ty_22S122 // CHECK: llvm.mlir.global external @s1() {addr_space = 0 : i32} : !llvm.struct<"struct.S1", (i32, f32, ptr)> { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S1", (i32, f32, ptr)> // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 @@ -52,7 +52,7 @@ module { // CHECK: } // Should lower nested #cir.const_struct initializer. - cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES2A22}> : !ty_22struct2ES222 + cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22}> : !ty_22S222 // CHECK: llvm.mlir.global external @s2() {addr_space = 0 : i32} : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S2A", (i32)> @@ -62,7 +62,7 @@ module { // CHECK: llvm.return %4 : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> // CHECK: } - cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22struct2ES322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22struct2ES322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22struct2ES322]> : !cir.array + cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22S322]> : !cir.array // CHECK: llvm.mlir.global external @s3() {addr_space = 0 : i32} : !llvm.array<3 x struct<"struct.S3", (i32)>> { // CHECK: %0 = llvm.mlir.undef : !llvm.array<3 x struct<"struct.S3", (i32)>> // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S3", (i32)> @@ -82,13 +82,13 @@ module { cir.func @shouldLowerStructCopies() { // CHECK: llvm.func @shouldLowerStructCopies() - %1 = cir.alloca !ty_22struct2ES22, cir.ptr , ["a"] {alignment = 4 : i64} + %1 = cir.alloca !ty_22S22, cir.ptr , ["a"] {alignment = 4 : i64} // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#SA:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"struct.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr - %2 = cir.alloca !ty_22struct2ES22, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !ty_22S22, cir.ptr , ["b", init] {alignment = 4 : i64} // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#SB:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"struct.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr - cir.copy %1 to %2 : !cir.ptr + cir.copy %1 to %2 : !cir.ptr // CHECK: %[[#SIZE:]] = llvm.mlir.constant(8 : i32) : i32 // CHECK: "llvm.intr.memcpy"(%[[#SB]], %[[#SA]], %[[#SIZE]]) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () cir.return diff --git a/clang/test/CIR/Lowering/variadics.cir b/clang/test/CIR/Lowering/variadics.cir index 465f222edee4..050ae53d610c 100644 --- a/clang/test/CIR/Lowering/variadics.cir +++ b/clang/test/CIR/Lowering/variadics.cir @@ -5,30 +5,30 @@ !u32i = !cir.int !u8i = !cir.int -!ty_22struct2E__va_list_tag22 = !cir.struct<"struct.__va_list_tag" {!u32i, !u32i, !cir.ptr, !cir.ptr} #cir.recdecl.ast> +!ty_22__va_list_tag22 = !cir.struct, !cir.ptr} #cir.recdecl.ast> module { cir.func @average(%arg0: !s32i, ...) -> !s32i { %0 = cir.alloca !s32i, cir.ptr , ["count", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} - %2 = cir.alloca !cir.array, cir.ptr >, ["args"] {alignment = 16 : i64} - %3 = cir.alloca !cir.array, cir.ptr >, ["args_copy"] {alignment = 16 : i64} + %2 = cir.alloca !cir.array, cir.ptr >, ["args"] {alignment = 16 : i64} + %3 = cir.alloca !cir.array, cir.ptr >, ["args_copy"] {alignment = 16 : i64} cir.store %arg0, %0 : !s32i, cir.ptr - %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr - cir.va.start %4 : !cir.ptr + %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.start %4 : !cir.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: llvm.intr.vastart %{{[0-9]+}} : !llvm.ptr - %5 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr - %6 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr - cir.va.copy %6 to %5 : !cir.ptr, !cir.ptr + %5 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr + %6 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.copy %6 to %5 : !cir.ptr, !cir.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr // MLIR-NEXT: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: llvm.intr.vacopy %13 to %{{[0-9]+}} : !llvm.ptr, !llvm.ptr - %7 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr - cir.va.end %7 : !cir.ptr + %7 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.end %7 : !cir.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: llvm.intr.vaend %{{[0-9]+}} : !llvm.ptr From 7b0d7b06a5e712a7a0dc2d942ac396319b612222 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 28 Aug 2023 20:45:14 -0300 Subject: [PATCH 1174/2301] [CIR] Refactor StructElementAddr into GetMemberOp Improves a few aspects of the old CIR GEP equivalent: * Generalize the name to GetMemberOp, since it can be used for unions, classes, structs, and others. * Add custom assembly format to improve readability. * Add a new CIR dialect operation to represent the operation. * Remove redundancy from arguments names (e.g. "member_index" to just "index") for terseness. * Add verifier to check if: * The index is within bounds. * The type is a record (has members to be accessed). * The result type matches the type of the member. * Use CIRGenBuilder when building GetMemberOps. * Also add some getter wrappers. ghstack-source-id: f28916ea336724caaa21beab34e7e4feafd7c8b1 Pull Request resolved: https://github.com/llvm/clangir/pull/228 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 54 ++++++++++++------- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 7 +++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 7 ++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 22 ++++++++ .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 16 +++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 15 +++--- clang/test/CIR/CodeGen/String.cpp | 10 ++-- clang/test/CIR/CodeGen/agg-init.cpp | 10 ++-- clang/test/CIR/CodeGen/assign-operator.cpp | 8 +-- .../CodeGen/ctor-member-lvalue-to-rvalue.cpp | 4 +- clang/test/CIR/CodeGen/derived-to-base.cpp | 4 +- clang/test/CIR/CodeGen/lambda.cpp | 8 +-- clang/test/CIR/CodeGen/rangefor.cpp | 6 +-- clang/test/CIR/CodeGen/struct.cpp | 10 ++-- clang/test/CIR/CodeGen/unary-deref.cpp | 4 +- clang/test/CIR/Lowering/struct.cir | 4 +- 16 files changed, 115 insertions(+), 74 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a12119e42684..8296030dce3c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1409,41 +1409,47 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", } //===----------------------------------------------------------------------===// -// StructElementAddr +// GetMemberOp //===----------------------------------------------------------------------===// -// FIXME: rename this among the lines of GetGlobalOp. -def StructElementAddr : CIR_Op<"struct_element_addr"> { +def GetMemberOp : CIR_Op<"get_member"> { let summary = "Get the address of a member of a struct"; let description = [{ - The `cir.struct_element_addr` operaration gets the address of a particular - named member from the input struct. + The `cir.get_member` operation gets the address of a particular named + member from the input record. - It expects a pointer to the base struct as well as the name of the member + It expects a pointer to the base record as well as the name of the member and its field index. Example: ```mlir - !ty_22struct2EBar22 = type !cir.struct<"struct.Bar", i32, i8> - ... - %0 = cir.alloca !ty_22struct2EBar22, cir.ptr - ... - %1 = cir.struct_element_addr %0, "Bar.a" - %2 = cir.load %1 : cir.ptr , int - ... + // Suppose we have a struct with multiple members. + !s32i = !cir.int + !s8i = !cir.int + !struct_ty = !cir.struct<"struct.Bar" {!s32i, !s8i}> + + // Get the address of the member at index 1. + %1 = cir.get_member %0[1] {name = "i"} : (!cir.ptr) -> !cir.ptr ``` }]; let arguments = (ins - Arg:$struct_addr, - StrAttr:$member_name, - IndexAttr:$member_index); + Arg:$addr, + StrAttr:$name, + IndexAttr:$index_attr); let results = (outs Res:$result); + let assemblyFormat = [{ + $addr `[` $index_attr `]` attr-dict + `:` qualified(type($addr)) `->` qualified(type($result)) + }]; + let builders = [ - OpBuilder<(ins "Type":$type, "Value":$value, "llvm::StringRef":$name, - "unsigned":$index), + OpBuilder<(ins "Type":$type, + "Value":$value, + "llvm::StringRef":$name, + "unsigned":$index), [{ mlir::APInt fieldIdx(64, index); build($_builder, $_state, type, value, name, fieldIdx); @@ -1452,10 +1458,18 @@ def StructElementAddr : CIR_Op<"struct_element_addr"> { let extraClassDeclaration = [{ /// Return the index of the struct member being accessed. - uint64_t getIndex() { return getMemberIndex().getZExtValue(); } + uint64_t getIndex() { return getIndexAttr().getZExtValue(); } + + /// Return the record type pointed by the base pointer. + mlir::cir::PointerType getAddrTy() { return getAddr().getType(); } + + /// Return the result type. + mlir::cir::PointerType getResultTy() { + return getResult().getType().cast(); + } }]; - // FIXME: add verifier. + let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 494bf9900eef..b0621f61accf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -602,6 +602,13 @@ class CIRGenBuilderTy : public mlir::OpBuilder { global.getLoc(), getPointerTo(global.getSymType()), global.getName()); } + /// Create a pointer to a record member. + mlir::Value createGetMember(mlir::Location loc, mlir::Type result, + mlir::Value base, llvm::StringRef name, + unsigned index) { + return create(loc, result, base, name, index); + } + /// Cast the element type of the given address to a different type, /// preserving information like the alignment. cir::Address createElementBitCast(mlir::Location loc, cir::Address addr, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 190ed0fb5d4e..47204c4daf70 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -72,13 +72,13 @@ static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, // For most cases fieldName is the same as field->getName() but for lambdas, // which do not currently carry the name, so it can be passed down from the // CaptureStmt. - auto sea = CGF.getBuilder().create( + auto memberAddr = CGF.getBuilder().createGetMember( loc, fieldPtr, Base.getPointer(), fieldName, fieldIndex); // TODO: We could get the alignment from the CIRGenRecordLayout, but given the // member name based lookup of the member here we probably shouldn't be. We'll // have to consider this later. - auto addr = Address(sea->getResult(0), CharUnits::One()); + auto addr = Address(memberAddr, CharUnits::One()); return addr; } @@ -356,8 +356,7 @@ static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { // When directing calling an inline builtin, call it through it's mangled // name to make it clear it's not the actual builtin. auto Fn = cast(CGF.CurFn); - if (Fn.getName() != FDInlineName && - onlyHasInlineBuiltinDeclaration(FD)) { + if (Fn.getName() != FDInlineName && onlyHasInlineBuiltinDeclaration(FD)) { assert(0 && "NYI"); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 6daba84b9c30..c5ac96642d87 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2240,6 +2240,28 @@ LogicalResult MemCpyOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// GetMemberOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult GetMemberOp::verify() { + + const auto recordTy = getAddrTy().getPointee().dyn_cast(); + if (!recordTy) + return emitError() << "expected pointer to a record type"; + + if (recordTy.getMembers().size() <= getIndex()) + return emitError() << "member index out of bounds"; + + // FIXME(cir): Member type check is disabled for classes and incomplete types + // as the codegen for these still need to be patched. + if (!recordTy.isClass() && !recordTy.getBody() && + recordTy.getMembers()[getIndex()] != getResultTy().getPointee()) + return emitError() << "member type mismatch"; + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index a0588402727a..d0f16983bb3a 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -477,12 +477,12 @@ static std::string getVarNameFromValue(mlir::Value v) { if (auto allocaOp = dyn_cast(srcOp)) return allocaOp.getName().str(); - if (auto getElemOp = dyn_cast(srcOp)) { - auto parent = dyn_cast(getElemOp.getStructAddr().getDefiningOp()); + if (auto getElemOp = dyn_cast(srcOp)) { + auto parent = dyn_cast(getElemOp.getAddr().getDefiningOp()); if (parent) { llvm::SmallString<128> finalName; llvm::raw_svector_ostream Out(finalName); - Out << parent.getName() << "." << getElemOp.getMemberName(); + Out << parent.getName() << "." << getElemOp.getName(); return Out.str().str(); } } @@ -1048,12 +1048,12 @@ void LifetimeCheckPass::classifyAndInitTypeCategories(mlir::Value addr, // Go through uses of the alloca via `cir.struct_element_addr`, and // track only the fields that are actually used. std::for_each(addr.use_begin(), addr.use_end(), [&](mlir::OpOperand &use) { - auto op = dyn_cast(use.getOwner()); + auto op = dyn_cast(use.getOwner()); if (!op) return; auto eltAddr = op.getResult(); - // If nothing is using this StructElementAddr, don't bother since + // If nothing is using this GetMemberOp, don't bother since // it could lead to even more noisy outcomes. if (eltAddr.use_empty()) return; @@ -1063,7 +1063,7 @@ void LifetimeCheckPass::classifyAndInitTypeCategories(mlir::Value addr, // Classify exploded types. Keep alloca original location. classifyAndInitTypeCategories(eltAddr, eltTy, loc, ++nestLevel); - fieldVals[op.getMemberIndex().getZExtValue()] = eltAddr; + fieldVals[op.getIndex()] = eltAddr; }); // In case this aggregate gets initialized at once, the fields need @@ -1135,7 +1135,7 @@ void LifetimeCheckPass::checkCoroTaskStore(StoreOp storeOp) { mlir::Value LifetimeCheckPass::getLambdaFromMemberAccess(mlir::Value addr) { auto op = addr.getDefiningOp(); // FIXME: we likely want to consider more indirections here... - if (!isa(op)) + if (!isa(op)) return nullptr; auto allocaOp = dyn_cast(op->getOperand(0).getDefiningOp()); @@ -1443,7 +1443,7 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, D << "returned lambda captures local variable"; else if (derefStyle == DerefStyle::CallParam || derefStyle == DerefStyle::IndirectCallParam) { - bool isAgg = isa_and_nonnull(addr.getDefiningOp()); + bool isAgg = isa_and_nonnull(addr.getDefiningOp()); D << "passing "; if (!isAgg) D << "invalid pointer"; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f3dcc2cde8c1..011984399c83 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1712,23 +1712,22 @@ class CIRBrOpLowering : public mlir::OpConversionPattern { } }; -class CIRStructElementAddrOpLowering - : public mlir::OpConversionPattern { +class CIRGetMemberOpLowering + : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern< - mlir::cir::StructElementAddr>::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::StructElementAddr op, OpAdaptor adaptor, + matchAndRewrite(mlir::cir::GetMemberOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto llResTy = getTypeConverter()->convertType(op.getType()); // Since the base address is a pointer to structs, the first offset is // always zero. The second offset tell us which member it will access. llvm::SmallVector offset{0, op.getIndex()}; const auto elementTy = getTypeConverter()->convertType( - op.getStructAddr().getType().getPointee()); + op.getAddr().getType().getPointee()); rewriter.replaceOpWithNewOp( - op, llResTy, elementTy, adaptor.getStructAddr(), offset); + op, llResTy, elementTy, adaptor.getAddr(), offset); return mlir::success(); } }; @@ -1784,7 +1783,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRIfLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, - CIRStructElementAddrOpLowering, CIRSwitchOpLowering, + CIRGetMemberOpLowering, CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering>( converter, patterns.getContext()); } diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index 78797f3ece6d..e41e0fc72211 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -21,10 +21,10 @@ void test() { // CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 -// CHECK-NEXT: %2 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "storage"}> +// CHECK-NEXT: %2 = cir.get_member %1[0] {name = "storage"} // CHECK-NEXT: %3 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %4 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "size"}> : (!cir.ptr) -> !cir.ptr +// CHECK-NEXT: %4 = cir.get_member %1[1] {name = "size"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.cast(integral, %5 : !s32i), !s64i // CHECK-NEXT: cir.store %6, %4 : !s64i, cir.ptr @@ -36,10 +36,10 @@ void test() { // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: cir.store %arg1, %1 // CHECK-NEXT: %2 = cir.load %0 -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "storage"}> +// CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} // CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) // CHECK-NEXT: cir.store %4, %3 -// CHECK-NEXT: %5 = "cir.struct_element_addr"(%2) <{member_index = 1 : index, member_name = "size"}> : (!cir.ptr) -> !cir.ptr +// CHECK-NEXT: %5 = cir.get_member %2[1] {name = "size"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i // CHECK-NEXT: %7 = cir.cast(integral, %6 : !s32i), !s64i // CHECK-NEXT: cir.store %7, %5 : !s64i, cir.ptr @@ -52,7 +52,7 @@ void test() { // CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "storage"}> : (!cir.ptr) -> !cir.ptr> +// CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} : !cir.ptr -> !cir.ptr> // CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index c94c0d19c86e..c7286a5cf8f0 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -35,10 +35,10 @@ void use() { yop{}; } // CHECK: cir.func @_Z3usev() // CHECK: %0 = cir.alloca !ty_22yep_22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} -// CHECK: %1 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "Status"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %1 = cir.get_member %0[0] {name = "Status"} : !cir.ptr -> !cir.ptr // CHECK: %2 = cir.const(#cir.int<0> : !u32i) : !u32i // CHECK: cir.store %2, %1 : !u32i, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_index = 1 : index, member_name = "HC"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %3 = cir.get_member %0[1] {name = "HC"} : !cir.ptr -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<0> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr // CHECK: cir.return @@ -68,12 +68,12 @@ void yo() { // CHECK: %1 = cir.alloca !ty_22Yo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} // CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.null : !cir.ptr, #cir.int<0> : !u64i}> : !ty_22Yo22) : !ty_22Yo22 // CHECK: cir.store %2, %0 : !ty_22Yo22, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "type"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %3 = cir.get_member %1[0] {name = "type"} : !cir.ptr -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000066001> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr -// CHECK: %5 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "next"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %5 = cir.get_member %1[1] {name = "next"} : !cir.ptr -> !cir.ptr> // CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > -// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_index = 2 : index, member_name = "createFlags"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %7 = cir.get_member %1[2] {name = "createFlags"} : !cir.ptr -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !u64i) : !u64i // CHECK: cir.store %8, %7 : !u64i, cir.ptr diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index a2f579fc919f..a7adf5f11502 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -23,7 +23,7 @@ struct String { // Get address of `this->size` - // CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "size"}> + // CHECK: %3 = cir.get_member %2[0] {name = "size"} // Get address of `s` @@ -31,7 +31,7 @@ struct String { // Get the address of s.size - // CHECK: %5 = "cir.struct_element_addr"(%4) <{member_index = 0 : index, member_name = "size"}> + // CHECK: %5 = cir.get_member %4[0] {name = "size"} // Load value from s.size and store in this->size @@ -53,9 +53,9 @@ struct String { // CHECK: cir.store %arg1, %1 : !cir.ptr // CHECK: %3 = cir.load deref %0 : cir.ptr > // CHECK: %4 = cir.load %1 : cir.ptr > - // CHECK: %5 = "cir.struct_element_addr"(%4) <{member_index = 0 : index, member_name = "size"}> + // CHECK: %5 = cir.get_member %4[0] {name = "size"} // CHECK: %6 = cir.load %5 : cir.ptr , !s64i - // CHECK: %7 = "cir.struct_element_addr"(%3) <{member_index = 0 : index, member_name = "size"}> + // CHECK: %7 = cir.get_member %3[0] {name = "size"} // CHECK: cir.store %6, %7 : !s64i, cir.ptr // CHECK: cir.store %3, %2 : !cir.ptr // CHECK: %8 = cir.load %2 : cir.ptr > diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp index 5fea2c60dcf5..743e1db42584 100644 --- a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -11,9 +11,9 @@ struct String { // CHECK: cir.store %arg0, %0 // CHECK: cir.store %arg1, %1 // CHECK: %2 = cir.load %0 -// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "size"}> +// CHECK: %3 = cir.get_member %2[0] {name = "size"} // CHECK: %4 = cir.load %1 -// CHECK: %5 = "cir.struct_element_addr"(%4) <{member_index = 0 : index, member_name = "size"}> +// CHECK: %5 = cir.get_member %4[0] {name = "size"} // CHECK: %6 = cir.load %5 : cir.ptr , !s64i // CHECK: cir.store %6, %3 : !s64i, cir.ptr // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index fd235935b3ef..7e60edecb01c 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -82,7 +82,7 @@ void C3::Layer::Initialize() { // CHECK: cir.scope { // CHECK: %2 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "m_C1"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %3 = cir.get_member %2[0] {name = "m_C1"} : !cir.ptr -> !cir.ptr> // CHECK: %4 = cir.load %3 : cir.ptr >, !cir.ptr // CHECK: %5 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool @@ -155,4 +155,4 @@ class B : public A { void t() { B b; b.foo(); -} +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index c1e633d750a7..43fc8003d00e 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -26,12 +26,12 @@ void l0() { // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %2 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: %3 = cir.load %2 : cir.ptr >, !cir.ptr // CHECK: %4 = cir.load %3 : cir.ptr , !s32i // CHECK: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %6 = cir.binop(add, %4, %5) : !s32i -// CHECK: %7 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %7 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: %8 = cir.load %7 : cir.ptr >, !cir.ptr // CHECK: cir.store %6, %8 : !s32i, cir.ptr @@ -50,7 +50,7 @@ auto g() { // CHECK: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK: %2 = cir.const(#cir.int<12> : !s32i) : !s32i // CHECK: cir.store %2, %1 : !s32i, cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: cir.store %1, %3 : !cir.ptr, cir.ptr > // CHECK: %4 = cir.load %0 : cir.ptr , !ty_22anon223 // CHECK: cir.return %4 : !ty_22anon223 @@ -70,7 +70,7 @@ auto g2() { // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.const(#cir.int<12> : !s32i) : !s32i // CHECK-NEXT: cir.store %2, %1 : !s32i, cir.ptr -// CHECK-NEXT: %3 = "cir.struct_element_addr"(%0) <{member_index = 0 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr> +// CHECK-NEXT: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK-NEXT: cir.store %1, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22anon224 // CHECK-NEXT: cir.return %4 : !ty_22anon224 diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index c4ba0472d978..19dcfa2425d5 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -61,11 +61,11 @@ void init(unsigned numImages) { // CHECK: %13 = cir.alloca !ty_22triple22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} // CHECK: %14 = cir.const(#cir.zero : !ty_22triple22) : !ty_22triple22 // CHECK: cir.store %14, %13 : !ty_22triple22, cir.ptr -// CHECK: %15 = "cir.struct_element_addr"(%13) <{member_index = 0 : index, member_name = "type"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %15 = cir.get_member %13[0] {name = "type"} : !cir.ptr -> !cir.ptr // CHECK: %16 = cir.const(#cir.int<1000024002> : !u32i) : !u32i // CHECK: cir.store %16, %15 : !u32i, cir.ptr -// CHECK: %17 = "cir.struct_element_addr"(%13) <{member_index = 1 : index, member_name = "next"}> : (!cir.ptr) -> !cir.ptr> -// CHECK: %18 = "cir.struct_element_addr"(%13) <{member_index = 2 : index, member_name = "image"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %17 = cir.get_member %13[1] {name = "next"} : !cir.ptr -> !cir.ptr> +// CHECK: %18 = cir.get_member %13[2] {name = "image"} : !cir.ptr -> !cir.ptr // CHECK: %19 = cir.load %7 : cir.ptr >, !cir.ptr // CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 70df084c7fc6..e82d566f4e34 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -98,14 +98,14 @@ void m() { Adv C; } // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "x"}> : (!cir.ptr) -> !cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "w"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %2 = cir.get_member %1[0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: %3 = cir.get_member %2[0] {name = "w"} : !cir.ptr -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000024001> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr -// CHECK: %5 = "cir.struct_element_addr"(%2) <{member_index = 1 : index, member_name = "n"}> : (!cir.ptr) -> !cir.ptr> +// CHECK: %5 = cir.get_member %2[1] {name = "n"} : !cir.ptr -> !cir.ptr> // CHECK: %6 = cir.const(#cir.null : !cir.ptr) : !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > -// CHECK: %7 = "cir.struct_element_addr"(%2) <{member_index = 2 : index, member_name = "d"}> : (!cir.ptr) -> !cir.ptr +// CHECK: %7 = cir.get_member %2[2] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: cir.store %8, %7 : !s32i, cir.ptr // CHECK: cir.return @@ -164,4 +164,4 @@ void ppp() { Entry x; } // CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr -// CHECK: = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "procAddr"}> : (!cir.ptr) -> !cir.ptr, !cir.ptr)>>> +// CHECK: cir.get_member %1[0] {name = "procAddr"} : !cir.ptr -> !cir.ptr, !cir.ptr)>>> diff --git a/clang/test/CIR/CodeGen/unary-deref.cpp b/clang/test/CIR/CodeGen/unary-deref.cpp index b3f0e7dc1eec..92eb404b1204 100644 --- a/clang/test/CIR/CodeGen/unary-deref.cpp +++ b/clang/test/CIR/CodeGen/unary-deref.cpp @@ -12,6 +12,6 @@ void foo() { // CHECK: cir.func linkonce_odr @_ZNK12MyIntPointer4readEv // CHECK: %2 = cir.load %0 -// CHECK: %3 = "cir.struct_element_addr"(%2) <{member_index = 0 : index, member_name = "ptr"}> +// CHECK: %3 = cir.get_member %2[0] {name = "ptr"} // CHECK: %4 = cir.load deref %3 : cir.ptr > -// CHECK: %5 = cir.load %4 +// CHECK: %5 = cir.load %4 \ No newline at end of file diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index a5b0746fbe9c..a5facb14383d 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -15,9 +15,9 @@ module { %1 = cir.alloca !ty_22S22, cir.ptr , ["x"] {alignment = 4 : i64} // CHECK: %[[#ARRSIZE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#STRUCT:]] = llvm.alloca %[[#ARRSIZE]] x !llvm.struct<"struct.S", (i8, i32)> - %3 = "cir.struct_element_addr"(%1) <{member_index = 0 : index, member_name = "c"}> : (!cir.ptr) -> !cir.ptr + %3 = cir.get_member %1[0] {name = "c"} : !cir.ptr -> !cir.ptr // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 0] : (!llvm.ptr) -> !llvm.ptr - %5 = "cir.struct_element_addr"(%1) <{member_index = 1 : index, member_name = "i"}> : (!cir.ptr) -> !cir.ptr + %5 = cir.get_member %1[1] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 1] : (!llvm.ptr) -> !llvm.ptr cir.return } From 964be93b8ef2d3c1f714548c6431ea41a4504d84 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 28 Aug 2023 20:45:14 -0300 Subject: [PATCH 1175/2301] [CIR][CIRGen][Bugfix] Update unions to track all members This diverges from the original codegen by tracking all members of a union, instead of just the largest one. This is necessary to support type-checking at the MLIR level when accessing union members. It also preserves more information about the source code, which might be useful. Fixes #224 ghstack-source-id: 8a975426d077a66c49f050741d7362da3c102fed Pull Request resolved: https://github.com/llvm/clangir/pull/229 --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 9 ++- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 10 ++- clang/test/CIR/CodeGen/union.cpp | 66 +++++++++++++++++-- 3 files changed, 74 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 47204c4daf70..5f0032d94c79 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -234,7 +234,14 @@ LValue CIRGenFunction::buildLValueForField(LValue base, unsigned RecordCVR = base.getVRQualifiers(); if (rec->isUnion()) { - // For unions, there is no pointer adjustment. + // NOTE(cir): the element to be loaded/stored need to type-match the + // source/destination, so we emit a GetMemberOp here. + llvm::StringRef fieldName = field->getName(); + unsigned fieldIndex = field->getFieldIndex(); + if (CGM.LambdaFieldToName.count(field)) + fieldName = CGM.LambdaFieldToName[field]; + addr = buildAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); + if (CGM.getCodeGenOpts().StrictVTablePointers && hasAnyVptr(FieldType, getContext())) // Because unions can easily skip invariant.barriers, we need to add diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index c70cf78810cf..fa5ef6f1057a 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -324,16 +324,20 @@ void CIRRecordLowering::lowerUnion() { (getAlignment(FieldType) == getAlignment(StorageType) && getSize(FieldType) > getSize(StorageType))) StorageType = FieldType; + + // NOTE(cir): Track all union member's types, not just the largest one. It + // allows for proper type-checking and retain more info for analisys. + fieldTypes.push_back(FieldType); } // If we have no storage type just pad to the appropriate size and return. if (!StorageType) - return appendPaddingBytes(LayoutSize); + llvm_unreachable("no-storage union NYI"); // If our storage size was bigger than our required size (can happen in the // case of packed bitfields on Itanium) then just use an I8 array. if (LayoutSize < getSize(StorageType)) StorageType = getByteArrayType(LayoutSize); - fieldTypes.push_back(StorageType); - appendPaddingBytes(LayoutSize - getSize(StorageType)); + // NOTE(cir): Defer padding calculations to the lowering process. + // appendPaddingBytes(LayoutSize - getSize(StorageType)); // Set packed if we need it. if (LayoutSize % getAlignment(StorageType)) isPacked = true; diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index e77bed5a4f89..1f53cd60744a 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -6,20 +6,72 @@ typedef union { yolo y; struct { int lifecnt; }; } yolm; typedef union { yolo y; struct { int *lifecnt; int genpad; }; } yolm2; typedef union { yolo y; struct { bool life; int genpad; }; } yolm3; +// CHECK-DAG: !ty_22U23A3ADummy22 = !cir.struct +// CHECK-DAG: !ty_22anon221 = !cir.struct +// CHECK-DAG: !ty_22yolo22 = !cir.struct +// CHECK-DAG: !ty_22anon222 = !cir.struct, !s32i} #cir.recdecl.ast> + +// CHECK-DAG: !ty_22yolm22 = !cir.struct +// CHECK-DAG: !ty_22yolm322 = !cir.struct +// CHECK-DAG: !ty_22yolm222 = !cir.struct + +// Should generate a union type with all members preserved. +union U { + bool b; + short s; + int i; + float f; + double d; +}; +// CHECK-DAG: !ty_22U22 = !cir.struct + +// Should generate unions with complex members. +union U2 { + bool b; + struct Dummy { + short s; + float f; + } s; +} u2; +// CHECK-DAG: !cir.struct + +// Should genereate unions without padding. +union U3 { + short b; + U u; +} u3; +// CHECK-DAG: !ty_22U322 = !cir.struct + void m() { yolm q; yolm2 q2; yolm3 q3; } -// CHECK: !ty_22anon22 = !cir.struct -// CHECK: !ty_22yolo22 = !cir.struct -// CHECK: !ty_22anon221 = !cir.struct, !s32i} #cir.recdecl.ast> - -// CHECK: !ty_22yolm22 = !cir.struct -// CHECK: !ty_22yolm222 = !cir.struct - // CHECK: cir.func @_Z1mv() // CHECK: cir.alloca !ty_22yolm22, cir.ptr , ["q"] {alignment = 4 : i64} // CHECK: cir.alloca !ty_22yolm222, cir.ptr , ["q2"] {alignment = 8 : i64} // CHECK: cir.alloca !ty_22yolm322, cir.ptr , ["q3"] {alignment = 4 : i64} + +void shouldGenerateUnionAccess(union U u) { + u.b = true; + // CHECK: %[[#BASE:]] = cir.get_member %0[0] {name = "b"} : !cir.ptr -> !cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.bool, cir.ptr + u.b; + // CHECK: cir.get_member %0[0] {name = "b"} : !cir.ptr -> !cir.ptr + u.i = 1; + // CHECK: %[[#BASE:]] = cir.get_member %0[2] {name = "i"} : !cir.ptr -> !cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !s32i, cir.ptr + u.i; + // CHECK: %[[#BASE:]] = cir.get_member %0[2] {name = "i"} : !cir.ptr -> !cir.ptr + u.f = 0.1F; + // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : f32, cir.ptr + u.f; + // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr + u.d = 0.1; + // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : f64, cir.ptr + u.d; + // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr +} From eb02071a327f9045cd2b1bc161915c17741913f1 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 28 Aug 2023 20:45:15 -0300 Subject: [PATCH 1176/2301] [CIR][Lowering] Lower unions Converts a union to a struct containing only its largest element. GetMemberOp for unions is lowered as bitcasts instead of GEPs, since union members share the same address space. ghstack-source-id: 744ac312675b8f3225ccc459fcd09474bcfcfe81 Pull Request resolved: https://github.com/llvm/clangir/pull/230 --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 6 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 34 ++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 74 ++++++++++++++----- clang/test/CIR/Lowering/unions.cir | 42 +++++++++++ 4 files changed, 134 insertions(+), 22 deletions(-) create mode 100644 clang/test/CIR/Lowering/unions.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index abbee1419613..88087f8915ad 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -128,6 +128,7 @@ def CIR_StructType : CIR_Type<"Struct", "struct", // for the struct size and alignment. mutable std::optional size{}, align{}; mutable std::optional padded{}; + mutable mlir::Type largestMember{}; void computeSizeAndAlignment(const ::mlir::DataLayout &dataLayout) const; public: void dropAst(); @@ -141,12 +142,15 @@ def CIR_StructType : CIR_Type<"Struct", "struct", case RecordKind::Class: return "class." + name; case RecordKind::Union: - return "union "+ name; + return "union." + name; case RecordKind::Struct: return "struct." + name; } } + /// Return the member with the largest bit-length. + mlir::Type getLargestMember(const ::mlir::DataLayout &dataLayout) const; + /// Return whether this is a class declaration. bool isClass() const { return getKind() == RecordKind::Class; } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 47691a7fc463..241f570b8719 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -18,6 +18,7 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Support/LogicalResult.h" #include "llvm/ADT/STLExtras.h" @@ -94,6 +95,15 @@ void BoolType::print(mlir::AsmPrinter &printer) const {} // StructType Definitions //===----------------------------------------------------------------------===// +/// Return the largest member of in the type. +/// +/// Recurses into union members never returning a union as the largest member. +Type StructType::getLargestMember(const ::mlir::DataLayout &dataLayout) const { + if (!largestMember) + computeSizeAndAlignment(dataLayout); + return largestMember; +} + Type StructType::parse(mlir::AsmParser &parser) { const auto loc = parser.getCurrentLocation(); llvm::SmallVector members; @@ -278,7 +288,7 @@ void StructType::computeSizeAndAlignment( const ::mlir::DataLayout &dataLayout) const { assert(!isOpaque() && "Cannot get layout of opaque structs"); // Do not recompute. - if (size || align || padded) + if (size || align || padded || largestMember) return; // This is a similar algorithm to LLVM's StructLayout. @@ -287,11 +297,25 @@ void StructType::computeSizeAndAlignment( [[maybe_unused]] bool isPadded = false; unsigned numElements = getNumElements(); auto members = getMembers(); + unsigned largestMemberSize = 0; // Loop over each of the elements, placing them in memory. for (unsigned i = 0, e = numElements; i != e; ++i) { auto ty = members[i]; + // Found a nested union: recurse into it to fetch its largest member. + auto structMember = ty.dyn_cast(); + if (structMember && structMember.isUnion()) { + auto candidate = structMember.getLargestMember(dataLayout); + if (dataLayout.getTypeSize(candidate) > largestMemberSize) { + largestMember = candidate; + largestMemberSize = dataLayout.getTypeSize(largestMember); + } + } else if (dataLayout.getTypeSize(ty) > largestMemberSize) { + largestMember = ty; + largestMemberSize = dataLayout.getTypeSize(largestMember); + } + // This matches LLVM since it uses the ABI instead of preferred alignment. const llvm::Align tyAlign = llvm::Align(getPacked() ? 1 : dataLayout.getTypeABIAlignment(ty)); @@ -312,6 +336,14 @@ void StructType::computeSizeAndAlignment( structSize += dataLayout.getTypeSize(ty); } + // For unions, the size and aligment is that of the largest element. + if (isUnion()) { + size = largestMemberSize; + align = structAlignment.value(); + padded = false; + return; + } + // Add padding to the end of the struct so that it could be put in an array // and all array elements would be aligned correctly. if (!llvm::isAligned(structAlignment, structSize)) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 011984399c83..ffa3f099d703 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -54,10 +54,12 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/APInt.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" +#include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/Support/Casting.h" @@ -1306,8 +1308,8 @@ class CIRGlobalOpLowering sourceSymbol.getSymName()); llvm::SmallVector offset{0}; auto gepOp = rewriter.create( - loc, llvmType, sourceSymbol.getType(), - addressOfOp.getResult(), offset); + loc, llvmType, sourceSymbol.getType(), addressOfOp.getResult(), + offset); rewriter.create(loc, gepOp.getResult()); return mlir::success(); } else if (isa(init.value())) { @@ -1721,14 +1723,30 @@ class CIRGetMemberOpLowering matchAndRewrite(mlir::cir::GetMemberOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto llResTy = getTypeConverter()->convertType(op.getType()); - // Since the base address is a pointer to structs, the first offset is - // always zero. The second offset tell us which member it will access. - llvm::SmallVector offset{0, op.getIndex()}; - const auto elementTy = getTypeConverter()->convertType( - op.getAddr().getType().getPointee()); - rewriter.replaceOpWithNewOp( - op, llResTy, elementTy, adaptor.getAddr(), offset); - return mlir::success(); + const auto structTy = + op.getAddrTy().getPointee().cast(); + assert(structTy && "expected struct type"); + + switch (structTy.getKind()) { + case mlir::cir::StructType::Struct: { + // Since the base address is a pointer to an aggregate, the first offset + // is always zero. The second offset tell us which member it will access. + llvm::SmallVector offset{0, op.getIndex()}; + const auto elementTy = getTypeConverter()->convertType(structTy); + rewriter.replaceOpWithNewOp(op, llResTy, elementTy, + adaptor.getAddr(), offset); + return mlir::success(); + } + case mlir::cir::StructType::Union: + // Union members share the address space, so we just need a bitcast to + // conform to type-checking. + rewriter.replaceOpWithNewOp(op, llResTy, + adaptor.getAddr()); + return mlir::success(); + default: + return op.emitError() + << "struct kind '" << structTy.getKind() << "' is NYI"; + } } }; @@ -1789,7 +1807,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, } namespace { -void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { +void prepareTypeConverter(mlir::LLVMTypeConverter &converter, + mlir::DataLayout &dataLayout) { converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { return mlir::LLVM::LLVMPointerType::get(&converter.getContext()); }); @@ -1814,9 +1833,24 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter) { return mlir::LLVM::LLVMFunctionType::get(result, arguments, varArg); }); converter.addConversion([&](mlir::cir::StructType type) -> mlir::Type { + // FIXME(cir): create separate unions, struct, and classes types. + // Convert struct members. llvm::SmallVector llvmMembers; - for (auto ty : type.getMembers()) - llvmMembers.push_back(converter.convertType(ty)); + switch (type.getKind()) { + case mlir::cir::StructType::Class: + // TODO(cir): This should be properly validated. + case mlir::cir::StructType::Struct: + for (auto ty : type.getMembers()) + llvmMembers.push_back(converter.convertType(ty)); + break; + // Unions are lowered as only the largest member. + case mlir::cir::StructType::Union: { + auto largestMember = type.getLargestMember(dataLayout); + if (largestMember) + llvmMembers.push_back(converter.convertType(largestMember)); + break; + } + } // Struct has a name: lower as an identified struct. mlir::LLVM::LLVMStructType llvmStruct; @@ -1847,7 +1881,7 @@ static void buildCtorList(mlir::ModuleOp module) { assert(attr.isa() && "must be a GlobalCtorAttr"); if (auto ctorAttr = attr.cast()) { - // default priority is 65536 + // default priority is 65536 int priority = 65536; if (ctorAttr.getPriority()) priority = *ctorAttr.getPriority(); @@ -1885,15 +1919,15 @@ static void buildCtorList(mlir::ModuleOp module) { newGlobalOp.getRegion().push_back(new mlir::Block()); builder.setInsertionPointToEnd(newGlobalOp.getInitializerBlock()); - mlir::Value result = builder.create( - loc, CtorStructArrayTy); + mlir::Value result = + builder.create(loc, CtorStructArrayTy); for (uint64_t I = 0; I < globalCtors.size(); I++) { auto fn = globalCtors[I]; mlir::Value structInit = builder.create(loc, CtorStructTy); - mlir::Value initPriority = - builder.create(loc, CtorStructFields[0], fn.second); + mlir::Value initPriority = builder.create( + loc, CtorStructFields[0], fn.second); mlir::Value initFuncAddr = builder.create( loc, CtorStructFields[1], fn.first); mlir::Value initAssociate = @@ -1914,9 +1948,9 @@ static void buildCtorList(mlir::ModuleOp module) { void ConvertCIRToLLVMPass::runOnOperation() { auto module = getOperation(); - + mlir::DataLayout dataLayout(module); mlir::LLVMTypeConverter converter(&getContext()); - prepareTypeConverter(converter); + prepareTypeConverter(converter, dataLayout); mlir::RewritePatternSet patterns(&getContext()); diff --git a/clang/test/CIR/Lowering/unions.cir b/clang/test/CIR/Lowering/unions.cir new file mode 100644 index 000000000000..c5ee736c4a7d --- /dev/null +++ b/clang/test/CIR/Lowering/unions.cir @@ -0,0 +1,42 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s16i = !cir.int +!s32i = !cir.int +#true = #cir.bool : !cir.bool +!ty_22U122 = !cir.struct +!ty_22U222 = !cir.struct +!ty_22U322 = !cir.struct +module { + // Should lower union to struct with only the largest member. + cir.global external @u1 = #cir.zero : !ty_22U122 + // CHECK: llvm.mlir.global external @u1() {addr_space = 0 : i32} : !llvm.struct<"union.U1", (i32)> + + // Should recursively find the largest member if there are nested unions. + cir.global external @u2 = #cir.zero : !ty_22U222 + cir.global external @u3 = #cir.zero : !ty_22U322 + // CHECK: llvm.mlir.global external @u2() {addr_space = 0 : i32} : !llvm.struct<"union.U2", (f64)> + // CHECK: llvm.mlir.global external @u3() {addr_space = 0 : i32} : !llvm.struct<"union.U3", (i32)> + + // CHECK: llvm.func @test + cir.func @test(%arg0: !cir.ptr) { + + // Should store directly to the union's base address. + %5 = cir.const(#true) : !cir.bool + %6 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr + cir.store %5, %6 : !cir.bool, cir.ptr + // CHECK: %[[#VAL:]] = llvm.mlir.constant(1 : i8) : i8 + // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. + // CHECK: %[[#ADDR:]] = llvm.bitcast %{{.+}} : !llvm.ptr to !llvm.ptr + // CHECK: llvm.store %[[#VAL]], %[[#ADDR]] : i8, !llvm.ptr + + // Should load direclty from the union's base address. + %7 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr + %8 = cir.load %7 : cir.ptr , !cir.bool + // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. + // CHECK: %[[#BASE:]] = llvm.bitcast %{{.+}} : !llvm.ptr to !llvm.ptr + // CHECK: %{{.+}} = llvm.load %[[#BASE]] : !llvm.ptr + + cir.return + } +} From 010af127c0ebd0edc9381629ea53c5f4d5c8a41e Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Mon, 28 Aug 2023 21:03:06 -0700 Subject: [PATCH 1177/2301] [CIR][Codegen] Destructor support for global variable initialization (#241) Similar with the previous ctor support, I'm bringing up the dtor support for global var initialization. This change only contains the CIR early codegen work. The upcoming lowering prepare work will be in a separate change. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 10 +-- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 57 ++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 10 +++ clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp | 8 ++- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 62 +++++++++++++++++-- .../Dialect/Transforms/LoweringPrepare.cpp | 8 +++ clang/test/CIR/CodeGen/static.cpp | 10 ++- clang/test/CIR/IR/global.cir | 4 ++ clang/test/CIR/IR/invalid.cir | 22 +++++++ 10 files changed, 180 insertions(+), 14 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8296030dce3c..f9ac95fcef85 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1283,19 +1283,19 @@ def GlobalOp : CIR_Op<"global", [Symbol, DeclareOpInterfaceMethods:$alignment, OptionalAttr:$ast ); - let regions = (region AnyRegion:$ctorRegion); + let regions = (region AnyRegion:$ctorRegion, AnyRegion:$dtorRegion); let assemblyFormat = [{ ($sym_visibility^)? (`constant` $constant^)? $linkage $sym_name - custom($sym_type, $initial_value, $ctorRegion) + custom($sym_type, $initial_value, $ctorRegion, $dtorRegion) attr-dict }]; let extraClassDeclaration = [{ bool isDeclaration() { - return !getInitialValue() && getCtorRegion().empty(); + return !getInitialValue() && getCtorRegion().empty() && getDtorRegion().empty(); } bool hasInitializer() { return !isDeclaration(); } bool hasAvailableExternallyLinkage() { @@ -1323,7 +1323,9 @@ def GlobalOp : CIR_Op<"global", [Symbol, DeclareOpInterfaceMethods:$linkage, CArg<"function_ref", - "nullptr">:$ctorBuilder)> + "nullptr">:$ctorBuilder, + CArg<"function_ref", + "nullptr">:$dtorBuilder)> ]; let hasVerifier = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 3a7642f38a03..d6c33dcd5ce7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -12,10 +12,13 @@ // We might split this into multiple files if it gets too unwieldy +#include "CIRGenCXXABI.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "clang/AST/GlobalDecl.h" +#include "llvm/Support/ErrorHandling.h" +#include using namespace clang; using namespace cir; @@ -48,6 +51,39 @@ static void buildDeclInit(CIRGenFunction &CGF, const VarDecl *D, } } +static void buildDeclDestory(CIRGenFunction &CGF, const VarDecl *D, + Address DeclPtr) { + // Honor __attribute__((no_destroy)) and bail instead of attempting + // to emit a reference to a possibly nonexistent destructor, which + // in turn can cause a crash. This will result in a global constructor + // that isn't balanced out by a destructor call as intended by the + // attribute. This also checks for -fno-c++-static-destructors and + // bails even if the attribute is not present. + assert(D->needsDestruction(CGF.getContext()) == QualType::DK_cxx_destructor); + + auto &CGM = CGF.CGM; + + // If __cxa_atexit is disabled via a flag, a different helper function is + // generated elsewhere which uses atexit instead, and it takes the destructor + // directly. + auto UsingExternalHelper = CGM.getCodeGenOpts().CXAAtExit; + QualType type = D->getType(); + const CXXRecordDecl *Record = type->getAsCXXRecordDecl(); + bool CanRegisterDestructor = + Record && (!CGM.getCXXABI().HasThisReturn( + GlobalDecl(Record->getDestructor(), Dtor_Complete)) || + CGM.getCXXABI().canCallMismatchedFunctionType()); + if (Record && (CanRegisterDestructor || UsingExternalHelper)) { + assert(!D->getTLSKind() && "TLS NYI"); + CXXDestructorDecl *Dtor = Record->getDestructor(); + CGM.getCXXABI().buildDestructorCall(CGF, Dtor, Dtor_Complete, + /*ForVirtualBase=*/false, + /*Delegating=*/false, DeclPtr, type); + } else { + llvm_unreachable("array destructors not yet supported!"); + } +} + mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { const auto &FnInfo = getTypes().arrangeCXXStructorDeclaration(GD); auto Fn = getAddrOfCXXStructor(GD, &FnInfo, /*FnType=*/nullptr, @@ -68,11 +104,16 @@ mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { } void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, - mlir::cir::GlobalOp Addr) { + mlir::cir::GlobalOp Addr, + bool NeedsCtor, + bool NeedsDtor) { + assert(D && " Expected a global declaration!"); CIRGenFunction CGF{*this, builder, true}; CurCGF = &CGF; CurCGF->CurFn = Addr; - { + Addr.setAstAttr(mlir::cir::ASTVarDeclAttr::get(builder.getContext(), D)); + + if (NeedsCtor) { mlir::OpBuilder::InsertionGuard guard(builder); auto block = builder.createBlock(&Addr.getCtorRegion()); builder.setInsertionPointToStart(block); @@ -80,7 +121,17 @@ void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, buildDeclInit(CGF, D, DeclAddr); builder.setInsertionPointToEnd(block); builder.create(Addr->getLoc()); - Addr.setAstAttr(mlir::cir::ASTVarDeclAttr::get(builder.getContext(), D)); } + + if (NeedsDtor) { + mlir::OpBuilder::InsertionGuard guard(builder); + auto block = builder.createBlock(&Addr.getDtorRegion()); + builder.setInsertionPointToStart(block); + Address DeclAddr(getAddrOfGlobalVar(D), getASTContext().getDeclAlign(D)); + buildDeclDestory(CGF, D, DeclAddr); + builder.setInsertionPointToEnd(block); + builder.create(Addr->getLoc()); + } + CurCGF = nullptr; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index e6d4012a110c..d01a4b4b88e8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -258,6 +258,16 @@ class CIRGenCXXABI { return false; } + /// Returns true if the target allows calling a function through a pointer + /// with a different signature than the actual function (or equivalently, + /// bitcasting a function or function pointer to a different function type). + /// In principle in the most general case this could depend on the target, the + /// calling convention, and the actual types of the arguments and return + /// value. Here it just means whether the signature mismatch could *ever* be + /// allowed; in other words, does the target do strict checking of signatures + /// for all calls. + virtual bool canCallMismatchedFunctionType() const { return true; } + virtual ~CIRGenCXXABI(); void setCXXABIThisValue(CIRGenFunction &CGF, mlir::Value ThisPtr); diff --git a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp index ee3426699541..3d8c72dd7f5e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp @@ -73,7 +73,13 @@ void CIRGenModule::buildGlobalVarDeclInit(const VarDecl *D, assert(!UnimplementedFeature::addressSpace()); if (!T->isReferenceType()) { - codegenGlobalInitCxxStructor(D, Addr); + bool NeedsDtor = + D->needsDestruction(getASTContext()) == QualType::DK_cxx_destructor; + assert(!isTypeConstant(D->getType(), true, !NeedsDtor) && + "invaraint-typed initialization NYI"); + + if (PerformInit || NeedsDtor) + codegenGlobalInitCxxStructor(D, Addr, PerformInit, NeedsDtor); return; } } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 6dfdecfc5633..ee95e501be85 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -514,7 +514,8 @@ class CIRGenModule : public CIRGenTypeCache { // Produce code for this constructor/destructor for global initialzation. void codegenGlobalInitCxxStructor(const clang::VarDecl *D, - mlir::cir::GlobalOp Addr); + mlir::cir::GlobalOp Addr, bool NeedsCtor, + bool NeedsDtor); bool lookupRepresentativeDecl(llvm::StringRef MangledName, clang::GlobalDecl &Result) const; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index c5ac96642d87..fa019065804d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1148,7 +1148,8 @@ LogicalResult LoopOp::verify() { static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, TypeAttr type, Attribute initAttr, - mlir::Region &ctorRegion) { + mlir::Region &ctorRegion, + mlir::Region &dtorRegion) { auto printType = [&]() { p << ": " << type; }; if (!op.isDeclaration()) { p << "= "; @@ -1167,6 +1168,12 @@ static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, printType(); } + if (!dtorRegion.empty()) { + p << " dtor "; + p.printRegion(dtorRegion, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/false); + } } else { printType(); } @@ -1175,7 +1182,8 @@ static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, Attribute &initialValueAttr, - mlir::Region &ctorRegion) { + mlir::Region &ctorRegion, + mlir::Region &dtorRegion) { mlir::Type opTy; if (parser.parseOptionalEqual().failed()) { // Absence of equal means a declaration, so we need to parse the type. @@ -1219,6 +1227,24 @@ static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, opTy = typedAttr.getType(); } } + + // Parse destructor, example: + // dtor { ... } + if (!parser.parseOptionalKeyword("dtor")) { + auto parseLoc = parser.getCurrentLocation(); + if (parser.parseRegion(dtorRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + if (!dtorRegion.hasOneBlock()) + return parser.emitError(parser.getCurrentLocation(), + "dtor region must have exactly one block"); + if (dtorRegion.back().empty()) + return parser.emitError(parser.getCurrentLocation(), + "dtor region shall not be empty"); + if (checkBlockTerminator(parser, parseLoc, + dtorRegion.back().back().getLoc(), &dtorRegion) + .failed()) + return failure(); + } } typeAttr = TypeAttr::get(opTy); @@ -1248,6 +1274,20 @@ LogicalResult GlobalOp::verify() { } } + // Verify that the destructor region, if present, has only one block which is + // not empty. + auto &dtorRegion = getDtorRegion(); + if (!dtorRegion.empty()) { + if (!dtorRegion.hasOneBlock()) { + return emitError() << "dtor region must have exactly one block."; + } + + auto &block = dtorRegion.front(); + if (block.empty()) { + return emitError() << "dtor region shall not be empty."; + } + } + if (std::optional alignAttr = getAlignment()) { uint64_t alignment = alignAttr.value(); if (!llvm::isPowerOf2_64(alignment)) @@ -1288,7 +1328,8 @@ LogicalResult GlobalOp::verify() { void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, StringRef sym_name, Type sym_type, bool isConstant, cir::GlobalLinkageKind linkage, - function_ref ctorBuilder) { + function_ref ctorBuilder, + function_ref dtorBuilder) { odsState.addAttribute(getSymNameAttrName(odsState.name), odsBuilder.getStringAttr(sym_name)); odsState.addAttribute(getSymTypeAttrName(odsState.name), @@ -1306,6 +1347,12 @@ void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, odsBuilder.createBlock(ctorRegion); ctorBuilder(odsBuilder, odsState.location); } + + Region *dtorRegion = odsState.addRegion(); + if (dtorBuilder) { + odsBuilder.createBlock(dtorRegion); + dtorBuilder(odsBuilder, odsState.location); + } } /// Given the region at `index`, or the parent operation if `index` is None, @@ -1315,7 +1362,7 @@ void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, /// not a constant. void GlobalOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { - // The only region always branch back to the parent operation. + // The `ctor` and `dtor` regions always branch back to the parent operation. if (!point.isParent()) { regions.push_back(RegionSuccessor()); return; @@ -1326,9 +1373,16 @@ void GlobalOp::getSuccessorRegions(mlir::RegionBranchPoint point, if (ctorRegion->empty()) ctorRegion = nullptr; + // Don't consider the dtor region if it is empty. + Region *dtorRegion = &this->getCtorRegion(); + if (dtorRegion->empty()) + dtorRegion = nullptr; + // If the condition isn't constant, both regions may be executed. if (ctorRegion) regions.push_back(RegionSuccessor(ctorRegion)); + if (dtorRegion) + regions.push_back(RegionSuccessor(dtorRegion)); } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 2e1eaa05a8e7..c87cf4798a0f 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -18,6 +18,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/Twine.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Path.h" using namespace mlir; @@ -130,6 +131,13 @@ void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { "custom initialization priority NYI"); dynamicInitializers.push_back(f); } + + auto &dtorRegion = op.getDtorRegion(); + if (!dtorRegion.empty()) { + // TODO: handle destructor + // Clear the dtor region + dtorRegion.getBlocks().clear(); + } } void LoweringPreparePass::buildCXXGlobalInitFunc() { diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index fcdb54afefec..40bd1715d67d 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -7,7 +7,7 @@ class Init { public: Init(bool a) ; - + ~Init(); private: static bool _S_synced_with_stdio; }; @@ -18,21 +18,29 @@ static Init __ioinit2(false); // BEFORE: module {{.*}} { // BEFORE-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) +// BEFORE-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) // BEFORE-NEXT: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { // BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr // BEFORE-NEXT: %1 = cir.const(#true) : !cir.bool // BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// BEFORE-NEXT: } dtor { +// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () // BEFORE-NEXT: } {ast = #cir.vardecl.ast} // BEFORE: cir.global "private" internal @_ZL9__ioinit2 = ctor : !ty_22Init22 { // BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr // BEFORE-NEXT: %1 = cir.const(#false) : !cir.bool // BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// BEFORE-NEXT: } dtor { +// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () // BEFORE-NEXT: } {ast = #cir.vardecl.ast} // BEFORE-NEXT: } // AFTER: module {{.*}} attributes {{.*}}cir.globalCtors = [#cir.globalCtor<"__cxx_global_var_init">, #cir.globalCtor<"__cxx_global_var_init.1">] // AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) +// AFTER-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) // AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {ast = #cir.vardecl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init() // AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 378fdd4ba85b..b673483de498 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -33,10 +33,14 @@ module { #cir.global_view<@type_info_A> : !cir.ptr}> : !cir.struct, !cir.ptr, !cir.ptr}> cir.func private @_ZN4InitC1Eb(!cir.ptr, !s8i) + cir.func private @_ZN4InitD1Ev(!cir.ptr) cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { %0 = cir.get_global @_ZL8__ioinit : cir.ptr %1 = cir.const(#cir.int<3> : !s8i) : !s8i cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () + } dtor { + %0 = cir.get_global @_ZL8__ioinit : cir.ptr + cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () } } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 5c6e652e1f56..58dbaf0507c8 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -485,3 +485,25 @@ module { cir.return } } + +// ----- +!s8i = !cir.int +!ty_22Init22 = !cir.struct +module { + cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { + } + // expected-error@+1 {{custom op 'cir.global' ctor region must have exactly one block}} +} + +// ----- +!s8i = !cir.int +#true = #cir.bool : !cir.bool +!ty_22Init22 = !cir.struct +module { + cir.func private @_ZN4InitC1Eb(!cir.ptr) + cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { + %0 = cir.get_global @_ZL8__ioinit : cir.ptr + cir.call @_ZN4InitC1Eb(%0) : (!cir.ptr) -> () + } dtor {} + // expected-error@+1 {{custom op 'cir.global' dtor region must have exactly one block}} +} From f564ab9d48f5bafec1bce7e2bb7553f88b9da001 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 25 Aug 2023 11:21:45 +0300 Subject: [PATCH 1178/2301] [CIR][CIRGen] fixes explicit cast and minor bug in unary & operator --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 19 ++++++++++++++++--- clang/test/CIR/CodeGen/cast.cpp | 9 +++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 5f0032d94c79..63a0f2c27743 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -135,8 +135,21 @@ static Address buildPointerWithAlignment(const Expr *E, if (BaseInfo) *BaseInfo = InnerBaseInfo; - if (isa(CE)) { - llvm_unreachable("NYI"); + if (isa(CE)) { + assert(!UnimplementedFeature::tbaa()); + LValueBaseInfo TargetTypeBaseInfo; + + CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( + E->getType(), &TargetTypeBaseInfo); + + // If the source l-value is opaque, honor the alignment of the + // casted-to type. + if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { + if (BaseInfo) + BaseInfo->mergeForCast(TargetTypeBaseInfo); + Addr = Address(Addr.getPointer(), Addr.getElementType(), Align, + IsKnownNonNull); + } } if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) && @@ -187,7 +200,7 @@ static Address buildPointerWithAlignment(const Expr *E, LValue LV = CGF.buildLValue(UO->getSubExpr()); if (BaseInfo) *BaseInfo = LV.getBaseInfo(); - assert(UnimplementedFeature::tbaa()); + assert(!UnimplementedFeature::tbaa()); return LV.getAddress(); } } diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 7ca5f13a4c52..1ce940629a3c 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -95,3 +95,12 @@ void call_cptr(void *d) { // CHECK: %2 = cir.call @_Z4cptrPv(%1) : (!cir.ptr) -> !cir.bool // CHECK: %3 = cir.unary(not, %2) : !cir.bool, !cir.bool // CHECK: cir.if %3 { + +void lvalue_cast(int x) { + *(int *)&x = 42; +} + +// CHECK: cir.func @_Z11lvalue_cast +// CHECK: %1 = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK: cir.store %1, %0 : !s32i, cir.ptr + From 0a668954bd330d99cc9ab9e5984aeb7c41783c75 Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Thu, 24 Aug 2023 14:34:22 +0300 Subject: [PATCH 1179/2301] [CIR][Lowering] Support lowering of nested string ConstantArrays. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 25 ++++++++++++++++--- clang/test/CIR/CodeGen/globals.c | 9 ++++++- clang/test/CIR/Lowering/globals.cir | 3 +++ 4 files changed, 34 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index b0621f61accf..55bf9e3d4a72 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -247,6 +247,8 @@ class CIRGenBuilderTy : public mlir::OpBuilder { } if (const auto arrayVal = attr.dyn_cast()) { + if (arrayVal.getElts().isa()) + return false; for (const auto elt : arrayVal.getElts().cast()) { if (!isNullValue(elt)) return false; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ffa3f099d703..5af8a09566b2 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -143,12 +143,29 @@ mlir::Value lowerCirAttrAsValue(mlir::cir::ConstArrayAttr constArr, const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(constArr.getType()); mlir::Value result = rewriter.create(loc, llvmTy); - auto arrayAttr = constArr.getElts().cast(); // Iteratively lower each constant element of the array. - for (auto [idx, elt] : llvm::enumerate(arrayAttr)) { - mlir::Value init = lowerCirAttrAsValue(elt, loc, rewriter, converter); - result = rewriter.create(loc, result, init, idx); + if (auto arrayAttr = constArr.getElts().dyn_cast()) { + for (auto [idx, elt] : llvm::enumerate(arrayAttr)) { + mlir::Value init = lowerCirAttrAsValue(elt, loc, rewriter, converter); + result = + rewriter.create(loc, result, init, idx); + } + } + // TODO(cir): this diverges from traditional lowering. Normally the string + // would be a global constant that is memcopied. + else if (auto strAttr = constArr.getElts().dyn_cast()) { + auto arrayTy = strAttr.getType().dyn_cast(); + assert(arrayTy && "String attribute must have an array type"); + auto eltTy = arrayTy.getEltType(); + for (auto [idx, elt] : llvm::enumerate(strAttr)) { + auto init = rewriter.create( + loc, converter->convertType(eltTy), elt); + result = + rewriter.create(loc, result, init, idx); + } + } else { + llvm_unreachable("unexpected ConstArrayAttr elements"); } return result; diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index b72b0a09b625..cb5e0d978d8d 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -34,7 +34,14 @@ struct { int x; int y[2][2]; } nestedTwoDim = {1, {{2, 3}, {4, 5}}}; -// CHECK: cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22anon22 +// CHECK: cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> + +struct { + char x[3]; + char y[3]; + char z[3]; +} nestedString = {"1", "", "\0"}; +// CHECK: cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array}> // TODO: test tentatives with internal linkage. diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 9833f54c3f62..7eb3d772ede3 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -12,6 +12,7 @@ !u8i = !cir.int !ty_22A22 = !cir.struct x 2>} #cir.recdecl.ast> !ty_22Bar22 = !cir.struct +!ty_22StringStruct22 = !cir.struct, !cir.array, !cir.array} #cir.recdecl.ast> module { cir.global external @a = #cir.int<3> : !s32i @@ -91,6 +92,8 @@ module { // LLVM: @twoDim = global [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 1, i32 2], [2 x i32] [i32 3, i32 4{{\]\]}} cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22A22 // LLVM: @nestedTwoDim = global %struct.A { i32 1, [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 2, i32 3], [2 x i32] [i32 4, i32 5{{\]\]}} } + cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array}> : !ty_22StringStruct22 + // LLVM: @nestedString = global %struct.StringStruct { [3 x i8] c"1\00\00", [3 x i8] zeroinitializer, [3 x i8] zeroinitializer } cir.func @_Z11get_globalsv() { %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} %1 = cir.alloca !cir.ptr, cir.ptr >, ["u", init] {alignment = 8 : i64} From a3924b9e5f20829b893290c3a972adfe29011458 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Wed, 30 Aug 2023 15:53:40 -0700 Subject: [PATCH 1180/2301] [CIR][Lowering] Global destructor support (#249) This change adds lowering support for global variable definition with destructor. For example: ``` cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { %0 = cir.get_global @_ZL8__ioinit : cir.ptr %1 = cir.const(#true) : !cir.bool cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () } dtor { %0 = cir.get_global @_ZL8__ioinit : cir.ptr cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () } ``` is now lowered to ``` cir.func internal private @__cxx_global_var_init() { %0 = cir.get_global @_ZL8__ioinit : cir.ptr %1 = cir.const(#true) : !cir.bool cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () %2 = cir.get_global @_ZL8__ioinit : cir.ptr %3 = cir.get_global @_ZN4InitD1Ev : cir.ptr )>> %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr %6 = cir.get_global @__dso_handle : cir.ptr cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () cir.return } ``` Note that instead calling the destructor in the global initializer function, a registration with `__cxa_atexit` is done instead so that the destructor will be called right before the program is shut down. --- .../Dialect/Transforms/LoweringPrepare.cpp | 168 ++++++++++++++---- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 5 +- clang/test/CIR/CodeGen/static.cpp | 17 ++ 3 files changed, 154 insertions(+), 36 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index c87cf4798a0f..6ed1846bf277 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -17,6 +17,7 @@ #include "clang/CIR/Dialect/Passes.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Path.h" @@ -44,6 +45,16 @@ static SmallString<128> getTransformedFileName(ModuleOp theModule) { return FileName; } +/// Return the FuncOp called by `callOp`. +static cir::FuncOp getCalledFunction(cir::CallOp callOp) { + SymbolRefAttr sym = + llvm::dyn_cast_if_present(callOp.getCallableForCallee()); + if (!sym) + return nullptr; + return dyn_cast_or_null( + SymbolTable::lookupNearestSymbolFrom(callOp, sym)); +} + namespace { struct LoweringPreparePass : public LoweringPrepareBase { LoweringPreparePass() = default; @@ -58,6 +69,18 @@ struct LoweringPreparePass : public LoweringPrepareBase { /// Build a module init function that calls all the dynamic initializers. void buildCXXGlobalInitFunc(); + cir::FuncOp + buildRuntimeFunction(mlir::OpBuilder &builder, llvm::StringRef name, + mlir::Location loc, mlir::cir::FuncType type, + mlir::cir::GlobalLinkageKind linkage = + mlir::cir::GlobalLinkageKind::ExternalLinkage); + + cir::GlobalOp + buildRuntimeVariable(mlir::OpBuilder &Builder, llvm::StringRef Name, + mlir::Location Loc, mlir::Type type, + mlir::cir::GlobalLinkageKind Linkage = + mlir::cir::GlobalLinkageKind::ExternalLinkage); + /// /// AST related /// ----------- @@ -74,13 +97,48 @@ struct LoweringPreparePass : public LoweringPrepareBase { }; } // namespace +cir::GlobalOp LoweringPreparePass::buildRuntimeVariable( + mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, + mlir::Type type, mlir::cir::GlobalLinkageKind linkage) { + cir::GlobalOp g = + dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( + theModule, StringAttr::get(theModule->getContext(), name))); + if (!g) { + g = builder.create(loc, name, type); + g.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage)); + mlir::SymbolTable::setSymbolVisibility( + g, mlir::SymbolTable::Visibility::Private); + } + return g; +} + +cir::FuncOp LoweringPreparePass::buildRuntimeFunction( + mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, + mlir::cir::FuncType type, mlir::cir::GlobalLinkageKind linkage) { + cir::FuncOp f = + dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( + theModule, StringAttr::get(theModule->getContext(), name))); + if (!f) { + f = builder.create(loc, name, type); + f.setLinkageAttr( + mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage)); + mlir::SymbolTable::setSymbolVisibility( + f, mlir::SymbolTable::Visibility::Private); + mlir::NamedAttrList attrs; + f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), attrs.getDictionary(builder.getContext()))); + } + return f; +} + cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { + auto varDecl = op.getAst()->getAstDecl(); SmallString<256> fnName; { std::unique_ptr MangleCtx( astCtx->createMangleContext()); llvm::raw_svector_ostream Out(fnName); - auto varDecl = op.getAst()->getAstDecl(); MangleCtx->mangleDynamicInitializer(varDecl, Out); // Name numbering uint32_t cnt = dynamicInitializerNames[fnName]++; @@ -91,25 +149,78 @@ cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { // Create a variable initialization function. mlir::OpBuilder builder(&getContext()); builder.setInsertionPointAfter(op); - auto fnType = mlir::cir::FuncType::get( - {}, mlir::cir::VoidType::get(builder.getContext())); - FuncOp f = builder.create(op.getLoc(), fnName, fnType); - f.setLinkageAttr(mlir::cir::GlobalLinkageKindAttr::get( - builder.getContext(), mlir::cir::GlobalLinkageKind::InternalLinkage)); - mlir::SymbolTable::setSymbolVisibility( - f, mlir::SymbolTable::Visibility::Private); - mlir::NamedAttrList attrs; - f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), attrs.getDictionary(builder.getContext()))); - - // move over the initialzation code of the ctor region. + auto voidTy = ::mlir::cir::VoidType::get(builder.getContext()); + auto fnType = mlir::cir::FuncType::get({}, voidTy); + FuncOp f = + buildRuntimeFunction(builder, fnName, op.getLoc(), fnType, + mlir::cir::GlobalLinkageKind::InternalLinkage); + + // Move over the initialzation code of the ctor region. auto &block = op.getCtorRegion().front(); - mlir::Block *EntryBB = f.addEntryBlock(); - EntryBB->getOperations().splice(EntryBB->begin(), block.getOperations(), + mlir::Block *entryBB = f.addEntryBlock(); + entryBB->getOperations().splice(entryBB->begin(), block.getOperations(), block.begin(), std::prev(block.end())); + + // Register the destructor call with __cxa_atexit + + assert(varDecl->getTLSKind() == clang::VarDecl::TLS_None && " TLS NYI"); + // Create a variable that binds the atexit to this shared object. + builder.setInsertionPointToStart(&theModule.getBodyRegion().front()); + auto Handle = buildRuntimeVariable(builder, "__dso_handle", op.getLoc(), + builder.getI8Type()); + + // Look for the destructor call in dtorBlock + auto &dtorBlock = op.getDtorRegion().front(); + mlir::cir::CallOp dtorCall; + for (auto op : reverse(dtorBlock.getOps())) { + dtorCall = op; + break; + } + assert(dtorCall && "Expected a dtor call"); + cir::FuncOp dtorFunc = getCalledFunction(dtorCall); + assert(dtorFunc && + isa(dtorFunc.getAst()->getAstDecl()) && + "Expected a dtor call"); + + // Create a runtime helper function: + // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); + auto voidPtrTy = ::mlir::cir::PointerType::get(builder.getContext(), voidTy); + auto voidFnTy = mlir::cir::FuncType::get({voidPtrTy}, voidTy); + auto voidFnPtrTy = + ::mlir::cir::PointerType::get(builder.getContext(), voidFnTy); + auto HandlePtrTy = + mlir::cir::PointerType::get(builder.getContext(), Handle.getSymType()); + auto fnAtExitType = + mlir::cir::FuncType::get({voidFnPtrTy, voidPtrTy, HandlePtrTy}, + mlir::cir::VoidType::get(builder.getContext())); + const char *nameAtExit = "__cxa_atexit"; + FuncOp fnAtExit = + buildRuntimeFunction(builder, nameAtExit, op.getLoc(), fnAtExitType); + + // Replace the dtor call with a call to __cxa_atexit(&dtor, &var, &__dso_handle) + builder.setInsertionPointAfter(dtorCall); + mlir::Value args[3]; + auto dtorPtrTy = mlir::cir::PointerType::get(builder.getContext(), + dtorFunc.getFunctionType()); + // dtorPtrTy + args[0] = builder.create(dtorCall.getLoc(), dtorPtrTy, + dtorFunc.getSymName()); + args[0] = builder.create( + dtorCall.getLoc(), voidFnPtrTy, mlir::cir::CastKind::bitcast, args[0]); + args[1] = builder.create(dtorCall.getLoc(), voidPtrTy, + mlir::cir::CastKind::bitcast, + dtorCall.getArgOperand(0)); + args[2] = builder.create(Handle.getLoc(), HandlePtrTy, + Handle.getSymName()); + builder.create(dtorCall.getLoc(), fnAtExit, args); + dtorCall->erase(); + entryBB->getOperations().splice(entryBB->end(), dtorBlock.getOperations(), + dtorBlock.begin(), + std::prev(dtorBlock.end())); + // Replace cir.yield with cir.return - builder.setInsertionPointToEnd(EntryBB); + builder.setInsertionPointToEnd(entryBB); auto &yieldOp = block.getOperations().back(); assert(isa(yieldOp)); builder.create(yieldOp.getLoc()); @@ -118,26 +229,22 @@ cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { auto &ctorRegion = op.getCtorRegion(); - if (!ctorRegion.empty()) { + auto &dtorRegion = op.getDtorRegion(); + + if (!ctorRegion.empty() || !dtorRegion.empty()) { // Build a variable initialization function and move the initialzation code // in the ctor region over. auto f = buildCXXGlobalVarDeclInitFunc(op); - // Clear the ctor region + // Clear the ctor and dtor region ctorRegion.getBlocks().clear(); + dtorRegion.getBlocks().clear(); // Add a function call to the variable initialization function. assert(!op.getAst()->getAstDecl()->getAttr() && "custom initialization priority NYI"); dynamicInitializers.push_back(f); } - - auto &dtorRegion = op.getDtorRegion(); - if (!dtorRegion.empty()) { - // TODO: handle destructor - // Clear the dtor region - dtorRegion.getBlocks().clear(); - } } void LoweringPreparePass::buildCXXGlobalInitFunc() { @@ -178,15 +285,8 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { auto fnType = mlir::cir::FuncType::get( {}, mlir::cir::VoidType::get(builder.getContext())); FuncOp f = - builder.create(theModule.getLoc(), fnName, fnType); - f.setLinkageAttr(mlir::cir::GlobalLinkageKindAttr::get( - builder.getContext(), mlir::cir::GlobalLinkageKind::ExternalLinkage)); - mlir::SymbolTable::setSymbolVisibility( - f, mlir::SymbolTable::Visibility::Private); - mlir::NamedAttrList extraAttrs; - f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), extraAttrs.getDictionary(builder.getContext()))); - + buildRuntimeFunction(builder, fnName, theModule.getLoc(), fnType, + mlir::cir::GlobalLinkageKind::ExternalLinkage); builder.setInsertionPointToStart(f.addEntryBlock()); for (auto &f : dynamicInitializers) { builder.create(f.getLoc(), f); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5af8a09566b2..e88101c9b2a0 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1278,8 +1278,9 @@ class CIRGlobalOpLowering // Check for missing funcionalities. if (!init.has_value()) { - op.emitError() << "uninitialized globals are not yet supported."; - return mlir::failure(); + rewriter.replaceOpWithNewOp(op, llvmType, isConst, + linkage, symbol, mlir::Attribute()); + return mlir::success(); } // Initializer is a constant array: convert it to a compatible llvm init. diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index 40bd1715d67d..786fe85301a8 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -39,6 +39,8 @@ static Init __ioinit2(false); // AFTER: module {{.*}} attributes {{.*}}cir.globalCtors = [#cir.globalCtor<"__cxx_global_var_init">, #cir.globalCtor<"__cxx_global_var_init.1">] +// AFTER-NEXT: cir.global "private" external @__dso_handle : i8 +// AFTER-NEXT: cir.func private @__cxa_atexit(!cir.ptr)>>, !cir.ptr, !cir.ptr) // AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) // AFTER-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) // AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {ast = #cir.vardecl.ast} @@ -46,26 +48,41 @@ static Init __ioinit2(false); // AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr // AFTER-NEXT: %1 = cir.const(#true) : !cir.bool // AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// AFTER-NEXT: %2 = cir.get_global @_ZL8__ioinit : cir.ptr +// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : cir.ptr )>> +// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> +// AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr +// AFTER-NEXT: %6 = cir.get_global @__dso_handle : cir.ptr +// AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () // AFTER-NEXT: cir.return // AFTER: cir.global "private" internal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {ast = #cir.vardecl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init.1() // AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr // AFTER-NEXT: %1 = cir.const(#false) : !cir.bool // AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// AFTER-NEXT: %2 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : cir.ptr )>> +// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> +// AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr +// AFTER-NEXT: %6 = cir.get_global @__dso_handle : cir.ptr +// AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () // AFTER-NEXT: cir.return // AFTER: cir.func private @_GLOBAL__sub_I_static.cpp() // AFTER-NEXT: cir.call @__cxx_global_var_init() : () -> () // AFTER-NEXT: cir.call @__cxx_global_var_init.1() : () -> () // AFTER-NEXT: cir.return +// LLVM: @__dso_handle = external global i8 // LLVM: @_ZL8__ioinit = internal global %class.Init zeroinitializer // LLVM: @_ZL9__ioinit2 = internal global %class.Init zeroinitializer // LLVM: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init, ptr null }, { i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init.1, ptr null }] // LLVM: define internal void @__cxx_global_var_init() // LLVM-NEXT: call void @_ZN4InitC1Eb(ptr @_ZL8__ioinit, i8 1) +// LLVM-NEXT: call void @__cxa_atexit(ptr @_ZN4InitD1Ev, ptr @_ZL8__ioinit, ptr @__dso_handle) // LLVM-NEXT: ret void // LLVM: define internal void @__cxx_global_var_init.1() // LLVM-NEXT: call void @_ZN4InitC1Eb(ptr @_ZL9__ioinit2, i8 0) +// LLVM-NEXT: call void @__cxa_atexit(ptr @_ZN4InitD1Ev, ptr @_ZL9__ioinit2, ptr @__dso_handle) // LLVM-NEXT: ret void // LLVM: define void @_GLOBAL__sub_I_static.cpp() // LLVM-NEXT: call void @__cxx_global_var_init() From a2cbf8e6afa9f80ccb344f9d8e1d2ee8f68492d0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 30 Aug 2023 10:21:30 -0700 Subject: [PATCH 1181/2301] [CIR][CIRGen] Add initial support for throw expression Add two new CIR ops: cir.alloc_exception and cir.throw, they are higher level representations for their __cxa_* counterparts. Add CIRGen for a simple `throw ` example, which requires using the above added operations. Rethrow mechanism (plain `throw`) is still NYI. For LLVM lowering: LoweringPrepare work needs to be done next, breaking these ops back to functions calls. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 93 +++++++++++++++++++ clang/lib/CIR/CodeGen/Address.h | 6 ++ clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 3 + clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 14 +++ clang/lib/CIR/CodeGen/CIRGenException.cpp | 92 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 11 +++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 63 +++++++++++++ clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 23 ++++- clang/test/CIR/CodeGen/throw.cpp | 16 ++++ 11 files changed, 324 insertions(+), 3 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenException.cpp create mode 100644 clang/test/CIR/CodeGen/throw.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index f9ac95fcef85..c906de43876f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1950,6 +1950,99 @@ def VAArgOp : CIR_Op<"va.arg">, let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// AllocException +//===----------------------------------------------------------------------===// + +def AllocException : CIR_Op<"alloc_exception", [ + AllocaTypesMatchWith<"'allocType' matches pointee type of 'addr'", + "addr", "allocType", + "$_self.cast().getPointee()">]> { + let summary = "Defines a scope-local variable"; + let description = [{ + Implements a slightly higher level __cxa_allocate_exception: + + `void *__cxa_allocate_exception(size_t thrown_size);` + + If operation fails, program terminates, not throw. + + Example: + + ```mlir + // if (b == 0) { + // ... + // throw "..."; + cir.if %10 { + %11 = cir.alloc_exception(!cir.ptr) -> > + ... // store exception content into %11 + cir.throw(%11 : !cir.ptr>, ... + ``` + }]; + + let arguments = (ins TypeAttr:$allocType); + let results = (outs Res]>:$addr); + + let assemblyFormat = [{ + `(` $allocType `)` `->` type($addr) attr-dict + }]; + + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// ThrowOp +//===----------------------------------------------------------------------===// + +def ThrowOp : CIR_Op<"throw", + [HasParent<"FuncOp, ScopeOp, IfOp, SwitchOp, LoopOp">, + Terminator]> { + let summary = "(Re)Throws an exception"; + let description = [{ + Very similar to __cxa_throw: + + ``` + void __cxa_throw(void *thrown_exception, std::type_info *tinfo, + void (*dest) (void *)); + ``` + + The absense of arguments for `cir.throw` means it rethrows. + + For the no-rethrow version, it must have at least two operands, the RTTI + information, a pointer to the exception object (likely allocated via + `cir.cxa.allocate_exception`) and finally an optional dtor, which might + run as part of this operation. + + ```mlir + // if (b == 0) + // throw "Division by zero condition!"; + cir.if %10 { + %11 = cir.alloc_exception(!cir.ptr) -> > + ... + cir.store %13, %11 : // Store string addr for "Division by zero condition!" + cir.throw(%11 : !cir.ptr>, @"typeinfo for char const*") + ``` + }]; + + let arguments = (ins Optional:$exception_ptr, + OptionalAttr:$type_info, + OptionalAttr:$dtor); + + let assemblyFormat = [{ + `(` + ($exception_ptr^ `:` type($exception_ptr))? + (`,` $type_info^)? + (`,` $dtor^)? + `)` attr-dict + }]; + + let extraClassDeclaration = [{ + bool rethrows() { return getNumOperands() == 0; } + }]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // Operations Lowered Directly to LLVM IR // diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index cfb79e697d30..31186f4a8e1f 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -76,6 +76,12 @@ class Address { isKnownNonNull()); } + /// Return address with different element type, but same pointer and + /// alignment. + Address withElementType(mlir::Type ElemTy) const { + return Address(getPointer(), ElemTy, getAlignment(), isKnownNonNull()); + } + mlir::Value getPointer() const { assert(isValid()); return PointerAndKnownNonNull.getPointer(); diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index d01a4b4b88e8..5d334b09072b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -279,6 +279,9 @@ class CIRGenCXXABI { /// Emit a single constructor/destructor with the gien type from a C++ /// constructor Decl. virtual void buildCXXStructor(clang::GlobalDecl GD) = 0; + + virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) = 0; + virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) = 0; }; /// Creates and Itanium-family ABI diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 457379faa42f..37cd154b5d1f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -447,3 +447,17 @@ void EHScopeStack::popNullFixups() { // normal cleanup; otherwise there really shouldn't be any fixups. llvm_unreachable("NYI"); } + +bool EHScopeStack::requiresLandingPad() const { + for (stable_iterator si = getInnermostEHScope(); si != stable_end();) { + // Skip lifetime markers. + if (auto *cleanup = dyn_cast(&*find(si))) + if (cleanup->isLifetimeMarker()) { + si = cleanup->getEnclosingEHScope(); + continue; + } + return true; + } + + return false; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp new file mode 100644 index 000000000000..528dcf269d99 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -0,0 +1,92 @@ +//===--- CIRGenException.cpp - Emit CIR Code for C++ exceptions -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ exception related code generation. +// +//===----------------------------------------------------------------------===// + +#include "CIRDataLayout.h" +#include "CIRGenCXXABI.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "UnimplementedFeatureGuarding.h" + +#include "clang/AST/StmtVisitor.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" +#include + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Value.h" + +using namespace cir; +using namespace clang; + +void CIRGenFunction::buildCXXThrowExpr(const CXXThrowExpr *E) { + if (const Expr *SubExpr = E->getSubExpr()) { + QualType ThrowType = SubExpr->getType(); + if (ThrowType->isObjCObjectPointerType()) { + llvm_unreachable("NYI"); + } else { + CGM.getCXXABI().buildThrow(*this, E); + } + } else { + CGM.getCXXABI().buildRethrow(*this, /*isNoReturn=*/true); + } + + // In LLVM codegen the expression emitters expect to leave this + // path by starting a new basic block. We do not need that in CIR. +} + +namespace { +/// A cleanup to free the exception object if its initialization +/// throws. +struct FreeException final : EHScopeStack::Cleanup { + mlir::Value exn; + FreeException(mlir::Value exn) : exn(exn) {} + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("call to cxa_free or equivalent op NYI"); + } +}; +} // end anonymous namespace + +// Emits an exception expression into the given location. This +// differs from buildAnyExprToMem only in that, if a final copy-ctor +// call is required, an exception within that copy ctor causes +// std::terminate to be invoked. +void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { + // Make sure the exception object is cleaned up if there's an + // exception during initialization. + pushFullExprCleanup(EHCleanup, addr.getPointer()); + EHScopeStack::stable_iterator cleanup = EHStack.stable_begin(); + + // __cxa_allocate_exception returns a void*; we need to cast this + // to the appropriate type for the object. + auto ty = convertTypeForMem(e->getType()); + Address typedAddr = addr.withElementType(ty); + + // From LLVM's codegen: + // FIXME: this isn't quite right! If there's a final unelided call + // to a copy constructor, then according to [except.terminate]p1 we + // must call std::terminate() if that constructor throws, because + // technically that copy occurs after the exception expression is + // evaluated but before the exception is caught. But the best way + // to handle that is to teach EmitAggExpr to do the final copy + // differently if it can't be elided. + buildAnyExprToMem(e, typedAddr, e->getType().getQualifiers(), + /*IsInit*/ true); + + // Deactivate the cleanup block. + auto op = typedAddr.getPointer().getDefiningOp(); + assert(op && + "expected valid Operation *, block arguments are not meaningful here"); + DeactivateCleanupBlock(cleanup, op); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 0da1d1cb3fe7..f07aa6e8295b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -591,7 +591,10 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) { llvm_unreachable("NYI"); } - mlir::Value VisitCXXThrowExpr(CXXThrowExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitCXXThrowExpr(CXXThrowExpr *E) { + CGF.buildCXXThrowExpr(E); + return nullptr; + } mlir::Value VisitCXXNoexceptExpr(CXXNoexceptExpr *E) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 90119d82091e..4cec0b6d3075 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1514,6 +1514,8 @@ class CIRGenFunction : public CIRGenTypeCache { // TODO: this can also be abstrated into common AST helpers bool hasBooleanRepresentation(clang::QualType Ty); + void buildCXXThrowExpr(const CXXThrowExpr *E); + /// Return the address of a local variable. Address GetAddrOfLocalVar(const clang::VarDecl *VD) { auto it = LocalDeclMap.find(VD); @@ -1556,6 +1558,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// given memory location. void buildAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer); + void buildAnyExprToExn(const Expr *E, Address Addr); LValue buildCheckedLValue(const Expr *E, TypeCheckKind TCK); LValue buildMemberExpr(const MemberExpr *E); @@ -1605,6 +1608,14 @@ class CIRGenFunction : public CIRGenTypeCache { bool isConditional() const { return IsConditional; } }; + // TODO(cir): perhaps return a mlir::BasicBlock* here, for now + // only check if a landing pad is required. + bool getInvokeDest() { + if (!EHStack.requiresLandingPad()) + return false; + return true; + } + /// Takes the old cleanup stack size and emits the cleanup blocks /// that have been added. void diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 21451cebb3c8..3cd0a35cb462 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -119,6 +119,8 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy) override; + virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) override; + virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) override; bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, @@ -1662,3 +1664,64 @@ mlir::Value CIRGenItaniumCXXABI::getCXXDestructorImplicitParam( GlobalDecl GD(DD, Type); return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); } + +void CIRGenItaniumCXXABI::buildRethrow(CIRGenFunction &CGF, bool isNoReturn) { + // void __cxa_rethrow(); + llvm_unreachable("NYI"); +} + +void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, + const CXXThrowExpr *E) { + // This differs a bit from LLVM codegen, CIR has native operations for some + // cxa functions, and defers allocation size computation, always pass the dtor + // symbol, etc. CIRGen also does not use getAllocateExceptionFn / getThrowFn. + + // Now allocate the exception object. + auto &builder = CGF.getBuilder(); + QualType clangThrowType = E->getSubExpr()->getType(); + auto throwTy = CGF.ConvertType(clangThrowType); + auto subExprLoc = CGF.getLoc(E->getSubExpr()->getSourceRange()); + // Defer computing allocation size to some later lowering pass. + auto exceptionPtr = + builder + .create( + subExprLoc, builder.getPointerTo(throwTy), throwTy) + .getAddr(); + + // Build expression and store its result into exceptionPtr. + CharUnits exnAlign = CGF.getContext().getExnObjectAlignment(); + CGF.buildAnyExprToExn(E->getSubExpr(), Address(exceptionPtr, exnAlign)); + + // Get the RTTI symbol address. + auto typeInfo = CGM.getAddrOfRTTIDescriptor(subExprLoc, clangThrowType, + /*ForEH=*/true) + .dyn_cast_or_null(); + assert(typeInfo && "expected GlobalViewAttr typeinfo"); + assert(!typeInfo.getIndices() && "expected no indirection"); + + // The address of the destructor. + // + // Note: LLVM codegen already optimizes out the dtor if the + // type is a record with trivial dtor (by passing down a + // null dtor). In CIR, we forward this info and allow for + // LoweringPrepare or some other pass to skip passing the + // trivial function. + // + // TODO(cir): alternatively, dtor could be ignored here and + // the type used to gather the relevant dtor during + // LoweringPrepare. + mlir::FlatSymbolRefAttr dtor{}; + if (const RecordType *recordTy = clangThrowType->getAs()) { + CXXRecordDecl *rec = cast(recordTy->getDecl()); + CXXDestructorDecl *dtorD = rec->getDestructor(); + dtor = mlir::FlatSymbolRefAttr::get( + CGM.getAddrOfCXXStructor(GlobalDecl(dtorD, Dtor_Complete)) + .getSymNameAttr()); + } + + assert(!CGF.getInvokeDest() && "landing pad like logic NYI"); + + // Now throw the exception. + builder.create(CGF.getLoc(E->getSourceRange()), + exceptionPtr, typeInfo.getSymbol(), dtor); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 7e57b8f798a5..7c0474aee006 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -16,6 +16,7 @@ add_clang_library(clangCIR CIRGenCoroutine.cpp CIRGenDecl.cpp CIRGenDeclCXX.cpp + CIRGenException.cpp CIRGenExpr.cpp CIRGenExprConst.cpp CIRGenExprAgg.cpp diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index fa019065804d..29de0e82896a 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -385,6 +385,23 @@ mlir::LogicalResult ReturnOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// ThrowOp +//===----------------------------------------------------------------------===// + +mlir::LogicalResult ThrowOp::verify() { + // For the no-rethrow version, it must have at least the exception pointer. + if (rethrows()) + return success(); + + if (getNumOperands() == 1) + return success(); + + return emitOpError() << "expected zero (rethrow) or at least both " + "exception_ptr and type_info"; + return failure(); +} + //===----------------------------------------------------------------------===// // IfOp //===----------------------------------------------------------------------===// @@ -419,14 +436,14 @@ static LogicalResult checkBlockTerminator(OpAsmParser &parser, if (blocks.empty()) return success(); - // Test that at least one block has a yield/return terminator. We can + // Test that at least one block has a yield/return/throw terminator. We can // probably make this a bit more strict. for (Block &block : blocks) { if (block.empty()) continue; auto &op = block.back(); if (op.hasTrait() && - isa(op)) { + isa(op)) { return success(); } } @@ -483,6 +500,8 @@ bool shouldPrintTerm(mlir::Region &r) { return false; if (isa(entryBlock->back())) return true; + if (isa(entryBlock->back())) + return true; YieldOp y = dyn_cast(entryBlock->back()); if (y && (!y.isPlain() || !y.getArgs().empty())) return true; diff --git a/clang/test/CIR/CodeGen/throw.cpp b/clang/test/CIR/CodeGen/throw.cpp new file mode 100644 index 000000000000..9c390ebb8136 --- /dev/null +++ b/clang/test/CIR/CodeGen/throw.cpp @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +double d(int a, int b) { + if (b == 0) + throw "Division by zero condition!"; + return (a/b); +} + +// CHECK: cir.if %10 { +// CHECK-NEXT: %11 = cir.alloc_exception(!cir.ptr) -> > +// CHECK-NEXT: %12 = cir.get_global @".str" : cir.ptr > +// CHECK-NEXT: %13 = cir.cast(array_to_ptrdecay, %12 : !cir.ptr>), !cir.ptr +// CHECK-NEXT: cir.store %13, %11 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.throw(%11 : !cir.ptr>, @_ZTIPKc) +// CHECK-NEXT: } \ No newline at end of file From 67d35aa87edf61fe17676d84c4b4afaefe26d815 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 31 Aug 2023 16:32:39 -0700 Subject: [PATCH 1182/2301] [CIR][CIRGen] Add missing test for cir.throw --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 ++++--- clang/test/CIR/IR/invalid.cir | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index c906de43876f..c9beaacfd5e6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1987,6 +1987,7 @@ def AllocException : CIR_Op<"alloc_exception", [ `(` $allocType `)` `->` type($addr) attr-dict }]; + // Constraints verified elsewhere. let hasVerifier = 0; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 29de0e82896a..1b11e6c315a0 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -394,11 +394,12 @@ mlir::LogicalResult ThrowOp::verify() { if (rethrows()) return success(); - if (getNumOperands() == 1) + if (getNumOperands() == 1) { + if (!getTypeInfo()) + return emitOpError() << "'type_info' symbol attribute missing"; return success(); + } - return emitOpError() << "expected zero (rethrow) or at least both " - "exception_ptr and type_info"; return failure(); } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 58dbaf0507c8..bb675491d3ac 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -507,3 +507,18 @@ module { } dtor {} // expected-error@+1 {{custom op 'cir.global' dtor region must have exactly one block}} } + +// ----- +!s32i = !cir.int +!u8i = !cir.int +module { + cir.global "private" constant internal @".str" = #cir.const_array<"Division by zero condition!\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global "private" constant external @_ZTIPKc : !cir.ptr + cir.func @_Z8divisionii() { + %11 = cir.alloc_exception(!cir.ptr) -> > + %12 = cir.get_global @".str" : cir.ptr > + %13 = cir.cast(array_to_ptrdecay, %12 : !cir.ptr>), !cir.ptr + cir.store %13, %11 : !cir.ptr, cir.ptr > + cir.throw(%11 : !cir.ptr>) // expected-error {{'type_info' symbol attribute missing}} + } +} \ No newline at end of file From 6a6cabce6af00398c96099b33d011f44bae19ea1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 31 Aug 2023 17:35:20 -0700 Subject: [PATCH 1183/2301] [CIR][CIRGen][NFC] Add more checks for detecting proper vtable linkage Add an assert while we don't handle the available externally. --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 109 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 1 - 2 files changed, 107 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 3cd0a35cb462..f410ec2196de 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -44,6 +44,56 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { return cast(cir::CIRGenCXXABI::getMangleContext()); } + bool isVTableHidden(const CXXRecordDecl *RD) const { + const auto &VtableLayout = + CGM.getItaniumVTableContext().getVTableLayout(RD); + + for (const auto &VtableComponent : VtableLayout.vtable_components()) { + if (VtableComponent.isRTTIKind()) { + const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl(); + if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility) + return true; + } else if (VtableComponent.isUsedFunctionPointerKind()) { + const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); + if (Method->getVisibility() == Visibility::HiddenVisibility && + !Method->isDefined()) + return true; + } + } + return false; + } + + bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const { + const auto &VtableLayout = + CGM.getItaniumVTableContext().getVTableLayout(RD); + + for (const auto &VtableComponent : VtableLayout.vtable_components()) { + // Skip empty slot. + if (!VtableComponent.isUsedFunctionPointerKind()) + continue; + + const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); + if (!Method->getCanonicalDecl()->isInlined()) + continue; + + StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl()); + auto *op = CGM.getGlobalValue(Name); + if (auto globalOp = dyn_cast_or_null(op)) + llvm_unreachable("NYI"); + + if (auto funcOp = dyn_cast_or_null(op)) { + // This checks if virtual inline function has already been emitted. + // Note that it is possible that this inline function would be emitted + // after trying to emit vtable speculatively. Because of this we do + // an extra pass after emitting all deferred vtables to find and emit + // these vtables opportunistically. + if (!funcOp || funcOp.isDeclaration()) + return true; + } + } + return false; + } + public: CIRGenItaniumCXXABI(CIRGenModule &CGM, bool UseARMMethodPtrABI = false, bool UseARMGuardVarABI = false) @@ -132,6 +182,7 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { const CXXRecordDecl *VTableClass) override; bool isVirtualOffsetNeededForVTableField(CIRGenFunction &CGF, CIRGenFunction::VPtr Vptr) override; + bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const; mlir::Value getVTableAddressPointInStructor( CIRGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, const CXXRecordDecl *NearestVBase) override; @@ -674,9 +725,63 @@ bool CIRGenItaniumCXXABI::isVirtualOffsetNeededForVTableField( return NeedsVTTParameter(CGF.CurGD); } +bool CIRGenItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass( + const CXXRecordDecl *RD) const { + // We don't emit available_externally vtables if we are in -fapple-kext mode + // because kext mode does not permit devirtualization. + if (CGM.getLangOpts().AppleKext) + return false; + + // If the vtable is hidden then it is not safe to emit an available_externally + // copy of vtable. + if (isVTableHidden(RD)) + return false; + + if (CGM.getCodeGenOpts().ForceEmitVTables) + return true; + + // If we don't have any not emitted inline virtual function then we are safe + // to emit an available_externally copy of vtable. + // FIXME we can still emit a copy of the vtable if we + // can emit definition of the inline functions. + if (hasAnyUnusedVirtualInlineFunction(RD)) + return false; + + // For a class with virtual bases, we must also be able to speculatively + // emit the VTT, because CodeGen doesn't have separate notions of "can emit + // the vtable" and "can emit the VTT". For a base subobject, this means we + // need to be able to emit non-virtual base vtables. + if (RD->getNumVBases()) { + for (const auto &B : RD->bases()) { + auto *BRD = B.getType()->getAsCXXRecordDecl(); + assert(BRD && "no class for base specifier"); + if (B.isVirtual() || !BRD->isDynamicClass()) + continue; + if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) + return false; + } + } + + return true; +} + bool CIRGenItaniumCXXABI::canSpeculativelyEmitVTable( - [[maybe_unused]] const CXXRecordDecl *RD) const { - llvm_unreachable("NYI"); + const CXXRecordDecl *RD) const { + if (!canSpeculativelyEmitVTableAsBaseClass(RD)) + return false; + + // For a complete-object vtable (or more specifically, for the VTT), we need + // to be able to speculatively emit the vtables of all dynamic virtual bases. + for (const auto &B : RD->vbases()) { + auto *BRD = B.getType()->getAsCXXRecordDecl(); + assert(BRD && "no class for base specifier"); + if (!BRD->isDynamicClass()) + continue; + if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) + return false; + } + + return true; } namespace { diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 425713b0c378..f54e66115202 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -107,7 +107,6 @@ bool CIRGenVTables::isVTableExternal(const CXXRecordDecl *RD) { static bool shouldEmitAvailableExternallyVTable(const CIRGenModule &CGM, const CXXRecordDecl *RD) { - assert(CGM.getCodeGenOpts().OptimizationLevel == 0 && "NYI"); return CGM.getCodeGenOpts().OptimizationLevel > 0 && CGM.getCXXABI().canSpeculativelyEmitVTable(RD); } From f3a5a5f86451567536ce493533fb32750e2cbd4f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 31 Aug 2023 18:30:44 -0700 Subject: [PATCH 1184/2301] [CIR][CIRGen] Add support for nullptr_t --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 8 +++++--- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 12 ++++++++---- clang/test/CIR/CodeGen/types-nullptr.cpp | 9 +++++++++ 4 files changed, 23 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/types-nullptr.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index f07aa6e8295b..3a136df08054 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -589,7 +589,7 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) { - llvm_unreachable("NYI"); + return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); } mlir::Value VisitCXXThrowExpr(CXXThrowExpr *E) { CGF.buildCXXThrowExpr(E); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index aaa7ff379668..c288274e15bb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -468,9 +468,11 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { break; case BuiltinType::NullPtr: - // Model std::nullptr_t as i8* - // ResultType = llvm::Type::getUInt8PtrTy(getLLVMContext()); - assert(0 && "not implemented"); + // Add proper CIR type for it? this looks mostly useful for sema related + // things (like for overloads accepting void), for now, given that + // `sizeof(std::nullptr_t)` is equal to `sizeof(void *)`, model + // std::nullptr_t as !cir.ptr + ResultType = Builder.getVoidPtrTy(); break; case BuiltinType::UInt128: diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index f54e66115202..04fef71973d1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -394,14 +394,18 @@ CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { case TSK_ImplicitInstantiation: return DiscardableODRLinkage; - case TSK_ExplicitInstantiationDeclaration: + case TSK_ExplicitInstantiationDeclaration: { // Explicit instantiations in MSVC do not provide vtables, so we must emit // our own. if (getTarget().getCXXABI().isMicrosoft()) return DiscardableODRLinkage; - return shouldEmitAvailableExternallyVTable(*this, RD) - ? mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage - : mlir::cir::GlobalLinkageKind::ExternalLinkage; + auto r = shouldEmitAvailableExternallyVTable(*this, RD) + ? mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage + : mlir::cir::GlobalLinkageKind::ExternalLinkage; + assert(r == mlir::cir::GlobalLinkageKind::ExternalLinkage && + "available external NYI"); + return r; + } case TSK_ExplicitInstantiationDefinition: return NonDiscardableODRLinkage; diff --git a/clang/test/CIR/CodeGen/types-nullptr.cpp b/clang/test/CIR/CodeGen/types-nullptr.cpp new file mode 100644 index 000000000000..4f7b1df747c5 --- /dev/null +++ b/clang/test/CIR/CodeGen/types-nullptr.cpp @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef decltype(nullptr) nullptr_t; +void f() { nullptr_t t = nullptr; } + +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr > +// CHECK: %1 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > \ No newline at end of file From 4fcb2a7fea4b0ff0b52efde622d86c9b37cbca3c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 1 Sep 2023 14:39:03 -0700 Subject: [PATCH 1185/2301] [CIR][CIRGen][NFC] Try-catch support: skeleton Add lots of necessary boilerplate for personality functions, catch block tracking and building catch scope stack. We enter the try but no catching handling yet. New paths all guarded with asserts, therefore NFC. --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 4 + clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 8 + clang/lib/CIR/CodeGen/CIRGenCleanup.h | 4 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 243 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 7 + clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 9 + clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 4 +- 8 files changed, 277 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 5d334b09072b..4f1d45ac50b7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -15,6 +15,7 @@ #define LLVM_CLANG_LIB_CIR_CIRGENCXXABI_H #include "CIRGenCall.h" +#include "CIRGenCleanup.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" @@ -189,6 +190,9 @@ class CIRGenCXXABI { const CXXRecordDecl *RD) = 0; virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType Ty) = 0; + virtual CatchTypeInfo + getAddrOfCXXCatchHandlerType(mlir::Location loc, QualType Ty, + QualType CatchHandlerType) = 0; /// Returns true if the given destructor type should be emitted as a linkonce /// delegating thunk, regardless of whether the dtor is defined in this TU or diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 37cd154b5d1f..6350e8ddc118 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -461,3 +461,11 @@ bool EHScopeStack::requiresLandingPad() const { return false; } + +EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { + char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers)); + EHCatchScope *scope = + new (buffer) EHCatchScope(numHandlers, InnermostEHScope); + InnermostEHScope = stable_begin(); + return scope; +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h index b9f7ac2eeaae..fa3b8cde0d25 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -31,7 +31,7 @@ class CIRGenFunction; /// The MS C++ ABI needs a pointer to RTTI data plus some flags to describe the /// type of a catch handler, so we use this wrapper. struct CatchTypeInfo { - mlir::Value RTTI; + mlir::TypedAttr RTTI; unsigned Flags; }; @@ -181,7 +181,7 @@ class EHCatchScope : public EHScope { setHandler(I, CatchTypeInfo{nullptr, 0}, Block); } - void setHandler(unsigned I, mlir::Value Type, mlir::Block *Block) { + void setHandler(unsigned I, mlir::TypedAttr Type, mlir::Block *Block) { assert(I < getNumHandlers()); getHandlers()[I].Type = CatchTypeInfo{Type, 0}; getHandlers()[I].Block = Block; diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 528dcf269d99..3a268af9296e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -12,6 +12,7 @@ #include "CIRDataLayout.h" #include "CIRGenCXXABI.h" +#include "CIRGenCleanup.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "UnimplementedFeatureGuarding.h" @@ -30,6 +31,165 @@ using namespace cir; using namespace clang; +const EHPersonality EHPersonality::GNU_C = {"__gcc_personality_v0", nullptr}; +const EHPersonality EHPersonality::GNU_C_SJLJ = {"__gcc_personality_sj0", + nullptr}; +const EHPersonality EHPersonality::GNU_C_SEH = {"__gcc_personality_seh0", + nullptr}; +const EHPersonality EHPersonality::NeXT_ObjC = {"__objc_personality_v0", + nullptr}; +const EHPersonality EHPersonality::GNU_CPlusPlus = {"__gxx_personality_v0", + nullptr}; +const EHPersonality EHPersonality::GNU_CPlusPlus_SJLJ = { + "__gxx_personality_sj0", nullptr}; +const EHPersonality EHPersonality::GNU_CPlusPlus_SEH = { + "__gxx_personality_seh0", nullptr}; +const EHPersonality EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0", + "objc_exception_throw"}; +const EHPersonality EHPersonality::GNU_ObjC_SJLJ = { + "__gnu_objc_personality_sj0", "objc_exception_throw"}; +const EHPersonality EHPersonality::GNU_ObjC_SEH = { + "__gnu_objc_personality_seh0", "objc_exception_throw"}; +const EHPersonality EHPersonality::GNU_ObjCXX = { + "__gnustep_objcxx_personality_v0", nullptr}; +const EHPersonality EHPersonality::GNUstep_ObjC = { + "__gnustep_objc_personality_v0", nullptr}; +const EHPersonality EHPersonality::MSVC_except_handler = {"_except_handler3", + nullptr}; +const EHPersonality EHPersonality::MSVC_C_specific_handler = { + "__C_specific_handler", nullptr}; +const EHPersonality EHPersonality::MSVC_CxxFrameHandler3 = { + "__CxxFrameHandler3", nullptr}; +const EHPersonality EHPersonality::GNU_Wasm_CPlusPlus = { + "__gxx_wasm_personality_v0", nullptr}; +const EHPersonality EHPersonality::XL_CPlusPlus = {"__xlcxx_personality_v1", + nullptr}; + +static const EHPersonality &getCPersonality(const TargetInfo &Target, + const LangOptions &L) { + const llvm::Triple &T = Target.getTriple(); + if (T.isWindowsMSVCEnvironment()) + return EHPersonality::MSVC_CxxFrameHandler3; + if (L.hasSjLjExceptions()) + return EHPersonality::GNU_C_SJLJ; + if (L.hasDWARFExceptions()) + return EHPersonality::GNU_C; + if (L.hasSEHExceptions()) + return EHPersonality::GNU_C_SEH; + return EHPersonality::GNU_C; +} + +static const EHPersonality &getObjCPersonality(const TargetInfo &Target, + const LangOptions &L) { + const llvm::Triple &T = Target.getTriple(); + if (T.isWindowsMSVCEnvironment()) + return EHPersonality::MSVC_CxxFrameHandler3; + + switch (L.ObjCRuntime.getKind()) { + case ObjCRuntime::FragileMacOSX: + return getCPersonality(Target, L); + case ObjCRuntime::MacOSX: + case ObjCRuntime::iOS: + case ObjCRuntime::WatchOS: + return EHPersonality::NeXT_ObjC; + case ObjCRuntime::GNUstep: + if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7)) + return EHPersonality::GNUstep_ObjC; + [[fallthrough]]; + case ObjCRuntime::GCC: + case ObjCRuntime::ObjFW: + if (L.hasSjLjExceptions()) + return EHPersonality::GNU_ObjC_SJLJ; + if (L.hasSEHExceptions()) + return EHPersonality::GNU_ObjC_SEH; + return EHPersonality::GNU_ObjC; + } + llvm_unreachable("bad runtime kind"); +} + +static const EHPersonality &getCXXPersonality(const TargetInfo &Target, + const LangOptions &L) { + const llvm::Triple &T = Target.getTriple(); + if (T.isWindowsMSVCEnvironment()) + return EHPersonality::MSVC_CxxFrameHandler3; + if (T.isOSAIX()) + return EHPersonality::XL_CPlusPlus; + if (L.hasSjLjExceptions()) + return EHPersonality::GNU_CPlusPlus_SJLJ; + if (L.hasDWARFExceptions()) + return EHPersonality::GNU_CPlusPlus; + if (L.hasSEHExceptions()) + return EHPersonality::GNU_CPlusPlus_SEH; + if (L.hasWasmExceptions()) + return EHPersonality::GNU_Wasm_CPlusPlus; + return EHPersonality::GNU_CPlusPlus; +} + +/// Determines the personality function to use when both C++ +/// and Objective-C exceptions are being caught. +static const EHPersonality &getObjCXXPersonality(const TargetInfo &Target, + const LangOptions &L) { + if (Target.getTriple().isWindowsMSVCEnvironment()) + return EHPersonality::MSVC_CxxFrameHandler3; + + switch (L.ObjCRuntime.getKind()) { + // In the fragile ABI, just use C++ exception handling and hope + // they're not doing crazy exception mixing. + case ObjCRuntime::FragileMacOSX: + return getCXXPersonality(Target, L); + + // The ObjC personality defers to the C++ personality for non-ObjC + // handlers. Unlike the C++ case, we use the same personality + // function on targets using (backend-driven) SJLJ EH. + case ObjCRuntime::MacOSX: + case ObjCRuntime::iOS: + case ObjCRuntime::WatchOS: + return getObjCPersonality(Target, L); + + case ObjCRuntime::GNUstep: + return EHPersonality::GNU_ObjCXX; + + // The GCC runtime's personality function inherently doesn't support + // mixed EH. Use the ObjC personality just to avoid returning null. + case ObjCRuntime::GCC: + case ObjCRuntime::ObjFW: + return getObjCPersonality(Target, L); + } + llvm_unreachable("bad runtime kind"); +} + +static const EHPersonality &getSEHPersonalityMSVC(const llvm::Triple &T) { + if (T.getArch() == llvm::Triple::x86) + return EHPersonality::MSVC_except_handler; + return EHPersonality::MSVC_C_specific_handler; +} + +const EHPersonality &EHPersonality::get(CIRGenModule &CGM, + const FunctionDecl *FD) { + const llvm::Triple &T = CGM.getTarget().getTriple(); + const LangOptions &L = CGM.getLangOpts(); + const TargetInfo &Target = CGM.getTarget(); + + // Functions using SEH get an SEH personality. + if (FD && FD->usesSEHTry()) + return getSEHPersonalityMSVC(T); + + if (L.ObjC) + return L.CPlusPlus ? getObjCXXPersonality(Target, L) + : getObjCPersonality(Target, L); + return L.CPlusPlus ? getCXXPersonality(Target, L) + : getCPersonality(Target, L); +} + +const EHPersonality &EHPersonality::get(CIRGenFunction &CGF) { + const auto *FD = CGF.CurCodeDecl; + // For outlined finallys and filters, use the SEH personality in case they + // contain more SEH. This mostly only affects finallys. Filters could + // hypothetically use gnu statement expressions to sneak in nested SEH. + FD = FD ? FD : CGF.CurSEHParent.getDecl(); + return get(CGF.CGM, dyn_cast_or_null(FD)); +} + void CIRGenFunction::buildCXXThrowExpr(const CXXThrowExpr *E) { if (const Expr *SubExpr = E->getSubExpr()) { QualType ThrowType = SubExpr->getType(); @@ -89,4 +249,87 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { assert(op && "expected valid Operation *, block arguments are not meaningful here"); DeactivateCleanupBlock(cleanup, op); +} + +mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { + enterCXXTryStmt(S); + if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + exitCXXTryStmt(S); + return mlir::success(); +} + +/// Emit the structure of the dispatch block for the given catch scope. +/// It is an invariant that the dispatch block already exists. +static void buildCatchDispatchBlock(CIRGenFunction &CGF, + EHCatchScope &catchScope) { + if (EHPersonality::get(CGF).isWasmPersonality()) + llvm_unreachable("NYI"); + if (EHPersonality::get(CGF).usesFuncletPads()) + llvm_unreachable("NYI"); + + auto *dispatchBlock = catchScope.getCachedEHDispatchBlock(); + assert(dispatchBlock); + + // If there's only a single catch-all, getEHDispatchBlock returned + // that catch-all as the dispatch block. + if (catchScope.getNumHandlers() == 1 && + catchScope.getHandler(0).isCatchAll()) { + llvm_unreachable("NYI"); // Remove when adding testcase. + assert(dispatchBlock == catchScope.getHandler(0).Block); + return; + } + + llvm_unreachable("NYI"); +} + +void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { + unsigned NumHandlers = S.getNumHandlers(); + EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers); + for (unsigned I = 0; I != NumHandlers; ++I) { + const CXXCatchStmt *C = S.getHandler(I); + + // FIXME: hook the CIR block for the right catch region here. + mlir::Block *Handler = nullptr; // createBasicBlock("catch"); + if (C->getExceptionDecl()) { + // FIXME: Dropping the reference type on the type into makes it + // impossible to correctly implement catch-by-reference + // semantics for pointers. Unfortunately, this is what all + // existing compilers do, and it's not clear that the standard + // personality routine is capable of doing this right. See C++ DR 388 : + // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388 + Qualifiers CaughtTypeQuals; + QualType CaughtType = CGM.getASTContext().getUnqualifiedArrayType( + C->getCaughtType().getNonReferenceType(), CaughtTypeQuals); + + CatchTypeInfo TypeInfo{nullptr, 0}; + if (CaughtType->isObjCObjectPointerType()) + llvm_unreachable("NYI"); + else + TypeInfo = CGM.getCXXABI().getAddrOfCXXCatchHandlerType( + getLoc(S.getSourceRange()), CaughtType, C->getCaughtType()); + CatchScope->setHandler(I, TypeInfo, Handler); + } else { + // No exception decl indicates '...', a catch-all. + llvm_unreachable("NYI"); + } + } +} + +void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { + unsigned NumHandlers = S.getNumHandlers(); + EHCatchScope &CatchScope = cast(*EHStack.begin()); + assert(CatchScope.getNumHandlers() == NumHandlers); + + // If the catch was not required, bail out now. + if (!CatchScope.hasEHBranches()) { + llvm_unreachable("NYI"); + CatchScope.clearHandlerBlocks(); + EHStack.popCatch(); + return; + } + + // Emit the structure of the EH dispatch for this catch. + buildCatchDispatchBlock(*this, CatchScope); + llvm_unreachable("NYI"); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 576725e4af65..5dcbb05f0849 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -802,7 +802,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, CurCodeDecl = D; const auto *FD = dyn_cast_or_null(D); if (FD && FD->usesSEHTry()) - llvm_unreachable("NYI"); + CurSEHParent = GD; CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); FnRetTy = RetTy; CurFn = Fn; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 4cec0b6d3075..d21990a313cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -991,11 +991,18 @@ class CIRGenFunction : public CIRGenTypeCache { ArrayRef Attrs = std::nullopt); mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); + mlir::LogicalResult buildCXXTryStmt(const clang::CXXTryStmt &S); + void enterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); + void exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); + mlir::LogicalResult buildCompoundStmt(const clang::CompoundStmt &S); mlir::LogicalResult buildCompoundStmtWithoutScope(const clang::CompoundStmt &S); + GlobalDecl CurSEHParent; + bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; } + /// Emit code to compute the specified expression, /// ignoring the result. void buildIgnoredExpr(const clang::Expr *E); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index f410ec2196de..50fb494df371 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -18,6 +18,7 @@ //===----------------------------------------------------------------------===// #include "CIRGenCXXABI.h" +#include "CIRGenCleanup.h" #include "CIRGenFunctionInfo.h" #include "ConstantInitBuilder.h" @@ -171,6 +172,14 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { QualType ThisTy) override; virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) override; virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) override; + CatchTypeInfo + getAddrOfCXXCatchHandlerType(mlir::Location loc, QualType Ty, + QualType CatchHandlerType) override { + auto rtti = + dyn_cast(getAddrOfRTTIDescriptor(loc, Ty)); + assert(rtti && "expected GlobalViewAttr"); + return CatchTypeInfo{rtti, 0}; + } bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 68f38230a42b..c2e676f4d24f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -147,6 +147,9 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, case Stmt::CoreturnStmtClass: return buildCoreturnStmt(cast(*S)); + case Stmt::CXXTryStmtClass: + return buildCXXTryStmt(cast(*S)); + case Stmt::CXXForRangeStmtClass: return buildCXXForRangeStmt(cast(*S), Attrs); @@ -161,7 +164,6 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, case Stmt::ObjCAtSynchronizedStmtClass: case Stmt::ObjCForCollectionStmtClass: case Stmt::ObjCAutoreleasePoolStmtClass: - case Stmt::CXXTryStmtClass: case Stmt::SEHTryStmtClass: case Stmt::OMPMetaDirectiveClass: case Stmt::OMPCanonicalLoopClass: From 81b715542840bae0442a8781311b0455d673acbb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 1 Sep 2023 18:57:09 -0700 Subject: [PATCH 1186/2301] [CIR][CIRGen][NFC] Cleanup buildCall a bit and pave for CannotThrow In order to support exceptions, skeleton for unwind detection. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 58 ++++++++++++------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 11 ++++ .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 3 files changed, 49 insertions(+), 21 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 8086635f49dd..7ef9e0c03e6b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -480,32 +480,55 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // If we're using inalloca, set up that argument. assert(!ArgMemory.isValid() && "inalloca NYI"); + // 2. Prepare the function pointer. + // TODO: simplifyVariadicCallee // 3. Perform the actual call. - // Deactivate any cleanups that we're supposed to do immediately before the - // call. - // TODO: do this + // TODO: Deactivate any cleanups that we're supposed to do immediately before + // the call. + // if (!CallArgs.getCleanupsToDeactivate().empty()) + // deactivateArgCleanupsBeforeCall(*this, CallArgs); // TODO: Update the largest vector width if any arguments have vector types. // TODO: Compute the calling convention and attributes. - if (const FunctionDecl *FD = dyn_cast_or_null(CurFuncDecl)) { - assert(!FD->hasAttr() && "NYI"); - // TODO: InNoMergeAttributedStmt - // assert(!CurCodeDecl->hasAttr() && - // !TargetDecl->hasAttr() && "NYI"); + // TODO: strictfp + // TODO: Add call-site nomerge, noinline, always_inline attribute if exists. - // TODO: isSEHTryScope + // Apply some call-site-specific attributes. + // TODO: work this into building the attribute set. - // TODO: currentFunctionUsesSEHTry - // TODO: isCleanupPadScope + // Apply always_inline to all calls within flatten functions. + // FIXME: should this really take priority over __try, below? + // assert(!CurCodeDecl->hasAttr() && + // !TargetDecl->hasAttr() && "NYI"); - // TODO: UnusedReturnSizePtr + // Disable inlining inside SEH __try blocks. + if (isSEHTryScope()) + llvm_unreachable("NYI"); - assert(!FD->hasAttr() && "NYI"); + // Decide whether to use a call or an invoke. + bool CannotThrow; + if (currentFunctionUsesSEHTry()) { + // SEH cares about asynchronous exceptions, so everything can "throw." + CannotThrow = false; + } else if (isCleanupPadScope() && + EHPersonality::get(*this).isMSVCXXPersonality()) { + // The MSVC++ personality will implicitly terminate the program if an + // exception is thrown during a cleanup outside of a try/catch. + // We don't need to model anything in IR to get this behavior. + CannotThrow = true; + } else { + // FIXME(cir): pass down nounwind attribute + CannotThrow = true; } + (void)CannotThrow; + + // TODO: UnusedReturnSizePtr + if (const FunctionDecl *FD = dyn_cast_or_null(CurFuncDecl)) + assert(!FD->hasAttr() && "NYI"); // TODO: alignment attributes @@ -537,23 +560,16 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, if (callOrInvoke) callOrInvoke = &theCall; - if (const auto *FD = dyn_cast_or_null(CurFuncDecl)) { + if (const auto *FD = dyn_cast_or_null(CurFuncDecl)) assert(!FD->getAttr() && "NYI"); - } // TODO: set attributes on callop - // assert(!theCall.getResults().getType().front().isSignlessInteger() && // "Vector NYI"); - // TODO: LLVM models indirect calls via a null callee, how should we do this? - assert(!CGM.getLangOpts().ObjCAutoRefCount && "Not supported"); - assert((!TargetDecl || !TargetDecl->hasAttr()) && "NYI"); - assert(!getDebugInfo() && "No debug info yet"); - assert((!TargetDecl || !TargetDecl->hasAttr()) && "NYI"); // 4. Finish the call. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index d21990a313cd..91649d0edafc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1003,6 +1003,17 @@ class CIRGenFunction : public CIRGenTypeCache { GlobalDecl CurSEHParent; bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; } + /// Returns true inside SEH __try blocks. + bool isSEHTryScope() const { return UnimplementedFeature::isSEHTryScope(); } + + mlir::Operation *CurrentFuncletPad = nullptr; + + /// Returns true while emitting a cleanuppad. + bool isCleanupPadScope() const { + assert(!CurrentFuncletPad && "NYI"); + return false; + } + /// Emit code to compute the specified expression, /// ignoring the result. void buildIgnoredExpr(const clang::Expr *E); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index b545d3a4afe6..d39bb3c1b48d 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -137,6 +137,7 @@ struct UnimplementedFeature { static bool operandBundles() { return false; } static bool exceptions() { return false; } static bool metaDataNode() { return false; } + static bool isSEHTryScope() { return false; } }; } // namespace cir From 4fc5469f70e324b249c51ba16bf78755f4339208 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 1 Sep 2023 19:54:40 -0700 Subject: [PATCH 1187/2301] [CIR][CIRGen][NFC] Introduce ConstructAttributeList, to populate cir.call attributes soon --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 61 +++++++++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 15 +++++++ 2 files changed, 74 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 7ef9e0c03e6b..1da2641f219b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -308,6 +308,56 @@ static Address emitAddressAtOffset(CIRGenFunction &CGF, Address addr, return addr; } +static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, + const FunctionProtoType *FPT) { + if (!FPT) + return; + + if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && + FPT->isNothrow()) + llvm_unreachable("NoUnwind NYI"); +} + +/// Construct the CIR attribute list of a function or call. +/// +/// When adding an attribute, please consider where it should be handled: +/// +/// - getDefaultFunctionAttributes is for attributes that are essentially +/// part of the global target configuration (but perhaps can be +/// overridden on a per-function basis). Adding attributes there +/// will cause them to also be set in frontends that build on Clang's +/// target-configuration logic, as well as for code defined in library +/// modules such as CUDA's libdevice. +/// +/// - ConstructAttributeList builds on top of getDefaultFunctionAttributes +/// and adds declaration-specific, convention-specific, and +/// frontend-specific logic. The last is of particular importance: +/// attributes that restrict how the frontend generates code must be +/// added here rather than getDefaultFunctionAttributes. +/// +void CIRGenModule::ConstructAttributeList( + StringRef Name, const CIRGenFunctionInfo &FI, CIRGenCalleeInfo CalleeInfo, + llvm::SmallSet &Attrs, bool AttrOnCallSite, + bool IsThunk) { + // Implementation Disclaimer + // + // UnimplementedFeature and asserts are used throughout the code to track + // unsupported and things not yet implemented. However, most of the content of + // this function is on detecting attributes, which doesn't not cope with + // existing approaches to track work because its too big. + // + // That said, for the most part, the approach here is very specific compared + // to the rest of CIRGen and attributes and other handling should be done upon + // demand. + + // Collect function CIR attributes from the CC lowering. + // TODO: NoReturn, cmse_nonsecure_call + + // Collect function CIR attributes from the callee prototype if we have one. + AddAttributesFromFunctionProtoType(astCtx, + CalleeInfo.getCalleeFunctionProtoType()); +} + RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, @@ -490,9 +540,16 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // the call. // if (!CallArgs.getCleanupsToDeactivate().empty()) // deactivateArgCleanupsBeforeCall(*this, CallArgs); - // TODO: Update the largest vector width if any arguments have vector types. - // TODO: Compute the calling convention and attributes. + + // Compute the calling convention and attributes. + llvm::SmallSet Attrs; + StringRef FnName; + if (auto calleeFnOp = dyn_cast(CalleePtr)) + FnName = calleeFnOp.getName(); + CGM.ConstructAttributeList(FnName, CallInfo, Callee.getAbstractInfo(), Attrs, + /*AttrOnCallSite=*/true, + /*IsThunk=*/false); // TODO: strictfp // TODO: Add call-site nomerge, noinline, always_inline attribute if exists. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index ee95e501be85..6a0761ed4445 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -15,6 +15,7 @@ #include "CIRDataLayout.h" #include "CIRGenBuilder.h" +#include "CIRGenCall.h" #include "CIRGenTypeCache.h" #include "CIRGenTypes.h" #include "CIRGenVTables.h" @@ -244,6 +245,20 @@ class CIRGenModule : public CIRGenTypeCache { CastExpr::path_const_iterator Start, CastExpr::path_const_iterator End); + /// Get the CIR attributes and calling convention to use for a particular + /// function type. + /// + /// \param Name - The function name. + /// \param Info - The function type information. + /// \param CalleeInfo - The callee information these attributes are being + /// constructed for. If valid, the attributes applied to this decl may + /// contribute to the function attributes and calling convention. + /// \param Attrs [out] - On return, the attribute list to use. + void ConstructAttributeList(StringRef Name, const CIRGenFunctionInfo &Info, + CIRGenCalleeInfo CalleeInfo, + llvm::SmallSet &Attrs, + bool AttrOnCallSite, bool IsThunk); + /// Will return a global variable of the given type. If a variable with a /// different type already exists then a new variable with the right type /// will be created and all uses of the old variable will be replaced with a From 0e1c4199b1e623ec3ff481083df25b71568a3b18 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 1 Sep 2023 21:55:47 -0700 Subject: [PATCH 1188/2301] [CIR][CIRGen] Add skeleton to handle invoke / landing pad like logic --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 90 +++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 8 +- 3 files changed, 101 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 1da2641f219b..8688d8dcfdd2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -579,10 +579,14 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, CannotThrow = true; } else { // FIXME(cir): pass down nounwind attribute - CannotThrow = true; + CannotThrow = false; } (void)CannotThrow; + // In LLVM this contains the basic block, in CIR we solely track for now. + bool InvokeDest = getInvokeDest(); + (void)InvokeDest; + // TODO: UnusedReturnSizePtr if (const FunctionDecl *FD = dyn_cast_or_null(CurFuncDecl)) assert(!FD->hasAttr() && "NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 3a268af9296e..c6a8df981d44 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -332,4 +332,94 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { // Emit the structure of the EH dispatch for this catch. buildCatchDispatchBlock(*this, CatchScope); llvm_unreachable("NYI"); +} + +/// Check whether this is a non-EH scope, i.e. a scope which doesn't +/// affect exception handling. Currently, the only non-EH scopes are +/// normal-only cleanup scopes. +static bool isNonEHScope(const EHScope &S) { + switch (S.getKind()) { + case EHScope::Cleanup: + return !cast(S).isEHCleanup(); + case EHScope::Filter: + case EHScope::Catch: + case EHScope::Terminate: + return false; + } + + llvm_unreachable("Invalid EHScope Kind!"); +} + +mlir::Block *CIRGenFunction::buildLandingPad() { + assert(EHStack.requiresLandingPad()); + assert(!CGM.getLangOpts().IgnoreExceptions && + "LandingPad should not be emitted when -fignore-exceptions are in " + "effect."); + EHScope &innermostEHScope = *EHStack.find(EHStack.getInnermostEHScope()); + switch (innermostEHScope.getKind()) { + case EHScope::Terminate: + llvm_unreachable("NYI"); + + case EHScope::Catch: + case EHScope::Cleanup: + case EHScope::Filter: + llvm_unreachable("NYI"); + if (auto *lpad = innermostEHScope.getCachedLandingPad()) + return lpad; + } + + llvm_unreachable("NYI"); +} + +mlir::Block *CIRGenFunction::getInvokeDestImpl() { + assert(EHStack.requiresLandingPad()); + assert(!EHStack.empty()); + + // If exceptions are disabled/ignored and SEH is not in use, then there is no + // invoke destination. SEH "works" even if exceptions are off. In practice, + // this means that C++ destructors and other EH cleanups don't run, which is + // consistent with MSVC's behavior, except in the presence of -EHa + const LangOptions &LO = CGM.getLangOpts(); + if (!LO.Exceptions || LO.IgnoreExceptions) { + if (!LO.Borland && !LO.MicrosoftExt) + return nullptr; + if (!currentFunctionUsesSEHTry()) + return nullptr; + } + + // CUDA device code doesn't have exceptions. + if (LO.CUDA && LO.CUDAIsDevice) + return nullptr; + + // Check the innermost scope for a cached landing pad. If this is + // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad. + auto *LP = EHStack.begin()->getCachedLandingPad(); + if (LP) + return LP; + + const EHPersonality &Personality = EHPersonality::get(*this); + + // FIXME(cir): add personality function + // if (!CurFn->hasPersonalityFn()) + // CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality)); + + if (Personality.usesFuncletPads()) { + // We don't need separate landing pads in the funclet model. + llvm_unreachable("NYI"); + } else { + // Build the landing pad for this scope. + LP = buildLandingPad(); + } + + assert(LP); + + // Cache the landing pad on the innermost scope. If this is a + // non-EH scope, cache the landing pad on the enclosing scope, too. + for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) { + ir->setCachedLandingPad(LP); + if (!isNonEHScope(*ir)) + break; + } + + return LP; } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 91649d0edafc..809018e092a4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1626,12 +1626,16 @@ class CIRGenFunction : public CIRGenTypeCache { bool isConditional() const { return IsConditional; } }; - // TODO(cir): perhaps return a mlir::BasicBlock* here, for now + /// Emits landing pad information for the current EH stack. + mlir::Block *buildLandingPad(); + + // TODO(cir): perhaps return a mlir::Block* here, for now // only check if a landing pad is required. + mlir::Block *getInvokeDestImpl(); bool getInvokeDest() { if (!EHStack.requiresLandingPad()) return false; - return true; + return (bool)getInvokeDestImpl(); } /// Takes the old cleanup stack size and emits the cleanup blocks From 9f12b984e470d35bd0fa42465fbe192c5d96d18a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Sep 2023 17:12:14 -0700 Subject: [PATCH 1189/2301] [CIR][NFC] Add sketch for TryOp --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 58 ++++++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 16 ++++++ 2 files changed, 74 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index c9beaacfd5e6..2db5b567acbe 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1836,6 +1836,64 @@ def AwaitOp : CIR_Op<"await", let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// TryOp +//===----------------------------------------------------------------------===// + +def TryOp : CIR_Op<"try", + [DeclareOpInterfaceMethods, + RecursivelySpeculatable, NoRegionArguments]> { + let summary = ""; + let description = [{ + ```mlir + cir.scope { + // Selector and exception control related allocas + // C++ `try {}` local variable declarations + %except_info = cir.try { + %res0, %exh = cir.call @return_something() + %if %exh + cir.yield %exh + + %exh2 = cir.call @return_void() + %if %exh2 + cir.yield %exh + cir.yield #zero : !except_type + } + ... + cir.br ^cleanup + ^cleanup: + // Run dtors + ... + // Catch based %except_info + cir.catch(%except_info, [ + /*catch A*/ {}, + /*catch B*/ {}, + ... + all {} + ]) + cir.yield + } + ``` + + Note that variables declared inside a `try {}` in C++ will + have their allocas places in the surrounding scope. + }]; + + let regions = (region SizedRegion<1>:$body); + // FIXME: should be exception type. + let results = (outs AnyType:$result); + + let assemblyFormat = [{ + `{` + $body + `}` `:` functional-type(operands, results) attr-dict + }]; + + // Everything already covered elswhere. + let hasVerifier = 0; +} + + //===----------------------------------------------------------------------===// // CopyOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 1b11e6c315a0..0ee67d390c35 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -668,6 +668,22 @@ void ScopeOp::build(OpBuilder &builder, OperationState &result, LogicalResult ScopeOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// TryOp +//===----------------------------------------------------------------------===// + +void TryOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // The only region always branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor(this->getODSResults(0))); + return; + } + + // If the condition isn't constant, both regions may be executed. + regions.push_back(RegionSuccessor(&getBody())); +} + //===----------------------------------------------------------------------===// // TernaryOp //===----------------------------------------------------------------------===// From 6b482b5718fdbf49b2235e98dda3b09debdc0a44 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Sep 2023 18:47:03 -0700 Subject: [PATCH 1190/2301] [CIR][NFC] Add sketch for CatchOp --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 45 ++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 143 +++++++++++++++++++ 2 files changed, 188 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 2db5b567acbe..5e139cca99d3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1893,6 +1893,51 @@ def TryOp : CIR_Op<"try", let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// CatchOp +//===----------------------------------------------------------------------===// + +def CatchEntryAttr : AttrDef { + let parameters = (ins "mlir::Type":$exception_type, + "Attribute":$exception_type_info); + let mnemonic = "type"; + let assemblyFormat = "`<` struct(params) `>`"; +} + +def CatchArrayAttr : + TypedArrayAttrBase { + let constBuilderCall = ?; +} + +def CatchOp : CIR_Op<"catch", + [SameVariadicOperandSize, + DeclareOpInterfaceMethods, + RecursivelySpeculatable, NoRegionArguments]> { + let summary = "Catch operation"; + let description = [{ + }]; + + let arguments = (ins AnyType:$exception_info, + OptionalAttr:$catchers); + let regions = (region VariadicRegion:$regions); + + // Already verified elsewhere + let hasVerifier = 0; + + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins + "function_ref" + :$catchBuilder)> + ]; + + let assemblyFormat = [{ + `(` + $exception_info `:` type($exception_info) `,` + custom($regions, $catchers) + `)` attr-dict + }]; +} //===----------------------------------------------------------------------===// // CopyOp diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 0ee67d390c35..9a6eab428358 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1103,6 +1103,149 @@ void SwitchOp::build( switchBuilder(builder, result.location, result); } +//===----------------------------------------------------------------------===// +// CatchOp +//===----------------------------------------------------------------------===// + +ParseResult +parseCatchOp(OpAsmParser &parser, + llvm::SmallVectorImpl> ®ions, + ::mlir::ArrayAttr &catchersAttr) { + SmallVector catchList; + + auto parseAndCheckRegion = [&]() -> ParseResult { + // Parse region attached to catch + regions.emplace_back(new Region); + Region &currRegion = *regions.back().get(); + auto parserLoc = parser.getCurrentLocation(); + if (parser.parseRegion(currRegion, /*arguments=*/{}, /*argTypes=*/{})) { + regions.clear(); + return failure(); + } + + if (currRegion.empty()) { + return parser.emitError(parser.getCurrentLocation(), + "catch region shall not be empty"); + } + + if (checkBlockTerminator(parser, parserLoc, std::nullopt, &currRegion, + /*ensureTerm=*/false) + .failed()) + return failure(); + return success(); + }; + + auto parseCatchEntry = [&]() -> ParseResult { + mlir::Type exceptionType; + mlir::Attribute exceptionTypeInfo; + + // cir.catch(..., [ + // type (!cir.ptr, @type_info_char_star) { + // ... + // }, + // all { + // ... + // } + // ] + ::llvm::StringRef attrStr; + if (!parser.parseOptionalKeyword(&attrStr, {"all"})) { + if (parser.parseKeyword("type").failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected 'type' keyword here"); + + if (parser.parseLParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected '('"); + + if (parser.parseType(exceptionType).failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected valid exception type"); + if (parser.parseAttribute(exceptionTypeInfo).failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected valid RTTI info attribute"); + if (parser.parseRParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected ')'"); + } + catchList.push_back(mlir::cir::CatchEntryAttr::get( + parser.getContext(), exceptionType, exceptionTypeInfo)); + + return parseAndCheckRegion(); + }; + + if (parser + .parseCommaSeparatedList(OpAsmParser::Delimiter::Square, + parseCatchEntry, " in catch list") + .failed()) + return failure(); + + catchersAttr = parser.getBuilder().getArrayAttr(catchList); + return ::mlir::success(); +} + +void printCatchOp(OpAsmPrinter &p, CatchOp op, + mlir::MutableArrayRef<::mlir::Region> regions, + mlir::ArrayAttr catchList) { + + int currCatchIdx = 0; + p << "["; + llvm::interleaveComma(catchList, p, [&](const Attribute &a) { + p.printNewline(); + p.increaseIndent(); + auto attr = a.cast(); + auto exType = attr.getExceptionType(); + auto exRtti = attr.getExceptionTypeInfo(); + + if (!exType) { + p << "all"; + } else { + p << "type ("; + p.printType(exType); + p << ", "; + p.printAttribute(exRtti); + p << ") "; + } + p.printNewline(); + p.increaseIndent(); + p.printRegion(regions[currCatchIdx], /*printEntryBLockArgs=*/false, + /*printBlockTerminators=*/true); + currCatchIdx++; + p.decreaseIndent(); + p.decreaseIndent(); + }); + p << "]"; +} + +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes +/// that correspond to a constant value for each operand, or null if that +/// operand is not a constant. +void CatchOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { + // If any index all the underlying regions branch back to the parent + // operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // FIXME: optimize, ideas include: + // - If we know a target function never throws a specific type, we can + // remove the catch handler. + // - ??? + + // If the condition isn't constant, all regions may be executed. + for (auto &r : this->getRegions()) + regions.push_back(RegionSuccessor(&r)); +} + +void CatchOp::build( + OpBuilder &builder, OperationState &result, + function_ref catchBuilder) { + assert(catchBuilder && "the builder callback for regions must be present"); + OpBuilder::InsertionGuard guardCatch(builder); + catchBuilder(builder, result.location, result); +} + //===----------------------------------------------------------------------===// // LoopOp //===----------------------------------------------------------------------===// From ada64f102a5cf0d4d84128ce7d8a49593558b91e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Sep 2023 19:20:09 -0700 Subject: [PATCH 1191/2301] [CIR][CIRGen] Create catch op and track entry blocks for each clause Populate regions and basic blocks for handling try-catch! No testcases just yet, new path guarded with unreachable, so effectively this is NFC. --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 28 ++++++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 ++- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index c6a8df981d44..d0ce14db0d1f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -252,7 +252,27 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { } mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { - enterCXXTryStmt(S); + auto tryLoc = getLoc(S.getBeginLoc()); + auto numHandlers = S.getNumHandlers(); + + // FIXME(cir): create scope, and add catchOp to the lastest possible position + // inside the cleanup block. + + // Create the skeleton for the catch statements. + auto catchOp = builder.create( + tryLoc, // FIXME(cir): we can do better source location here. + [&](mlir::OpBuilder &b, mlir::Location loc, + mlir::OperationState &result) { + mlir::OpBuilder::InsertionGuard guard(b); + for (int i = 0, e = numHandlers; i != e; ++i) { + auto *r = result.addRegion(); + builder.createBlock(r); + } + }); + + enterCXXTryStmt(S, catchOp); + llvm_unreachable("NYI"); + if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) return mlir::failure(); exitCXXTryStmt(S); @@ -283,14 +303,16 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, llvm_unreachable("NYI"); } -void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { +void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &S, + mlir::cir::CatchOp catchOp, + bool IsFnTryBlock) { unsigned NumHandlers = S.getNumHandlers(); EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers); for (unsigned I = 0; I != NumHandlers; ++I) { const CXXCatchStmt *C = S.getHandler(I); // FIXME: hook the CIR block for the right catch region here. - mlir::Block *Handler = nullptr; // createBasicBlock("catch"); + mlir::Block *Handler = &catchOp.getRegion(I).getBlocks().front(); if (C->getExceptionDecl()) { // FIXME: Dropping the reference type on the type into makes it // impossible to correctly implement catch-by-reference diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 809018e092a4..819a99f81ec7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -992,7 +992,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); mlir::LogicalResult buildCXXTryStmt(const clang::CXXTryStmt &S); - void enterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); + void enterCXXTryStmt(const CXXTryStmt &S, mlir::cir::CatchOp catchOp, + bool IsFnTryBlock = false); void exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); mlir::LogicalResult buildCompoundStmt(const clang::CompoundStmt &S); From 1003cfce9be60b666ecf9a224c8da6076552437b Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Thu, 24 Aug 2023 10:38:52 +0300 Subject: [PATCH 1192/2301] [CIR][CIRGen][Lowering] Generate GlobalViewAttr for string literals (#242). --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 3 + clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 102 +++++++--------- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 16 +-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 114 ++++++++++-------- clang/test/CIR/CodeGen/globals.c | 5 + clang/test/CIR/CodeGen/globals.cpp | 6 +- clang/test/CIR/IR/global.cir | 5 +- clang/test/CIR/Lowering/globals.cir | 11 +- 11 files changed, 135 insertions(+), 133 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 55bf9e3d4a72..8244518fc01c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -223,6 +223,9 @@ class CIRGenBuilderTy : public mlir::OpBuilder { if (attr.isa()) return true; + if (attr.isa()) + return false; + // TODO(cir): introduce char type in CIR and check for that instead. if (const auto intVal = attr.dyn_cast()) return intVal.isNullValue(); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 63a0f2c27743..8fc013d92791 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1322,7 +1322,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, } LValue CIRGenFunction::buildStringLiteralLValue(const StringLiteral *E) { - auto sym = CGM.getAddrOfConstantStringFromLiteral(E); + auto sym = CGM.getAddrOfConstantStringFromLiteral(E).getSymbol(); auto cstGlobal = mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), sym); assert(cstGlobal && "Expected global"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index da1556122bb4..7a204227186e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1008,7 +1008,7 @@ struct ConstantLValue { /*implicit*/ ConstantLValue(mlir::Value value, bool hasOffsetApplied = false) : Value(value), HasOffsetApplied(hasOffsetApplied) {} - /*implicit*/ ConstantLValue(mlir::SymbolRefAttr address) : Value(address) {} + /*implicit*/ ConstantLValue(mlir::cir::GlobalViewAttr address) : Value(address) {} ConstantLValue(std::nullptr_t) : ConstantLValue({}, false) {} ConstantLValue(mlir::Attribute value) : Value(value) {} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 22f27dc06826..e468c5e5fc58 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1098,78 +1098,62 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, return GV; } -// In address space agnostic languages, string literals are in default address -// space in AST. However, certain targets (e.g. amdgcn) request them to be -// emitted in constant address space in LLVM IR. To be consistent with other -// parts of AST, string literal global variables in constant address space -// need to be casted to default address space before being put into address -// map and referenced by other part of CodeGen. -// In OpenCL, string literals are in constant address space in AST, therefore -// they should not be casted to default address space. -static mlir::StringAttr -castStringLiteralToDefaultAddressSpace(CIRGenModule &CGM, mlir::StringAttr GV) { - if (!CGM.getLangOpts().OpenCL) { - auto AS = CGM.getGlobalConstantAddressSpace(); - if (AS != LangAS::Default) - assert(0 && "not implemented"); - } - return GV; -} - /// Return a pointer to a constant array for the given string literal. -mlir::SymbolRefAttr +mlir::cir::GlobalViewAttr CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name) { CharUnits Alignment = astCtx.getAlignOfGlobalVarInChars(S->getType(), /*VD=*/nullptr); mlir::Attribute C = getConstantArrayFromStringLiteral(S); - mlir::cir::GlobalOp Entry; - if (!getLangOpts().WritableStrings) { - if (ConstantStringMap.count(C)) { - auto g = ConstantStringMap[C]; - // The bigger alignment always wins. - if (!g.getAlignment() || - uint64_t(Alignment.getQuantity()) > *g.getAlignment()) - g.setAlignmentAttr(getSize(Alignment)); - return mlir::SymbolRefAttr::get( - castStringLiteralToDefaultAddressSpace(*this, g.getSymNameAttr())); - } - } - SmallString<256> StringNameBuffer = Name; - llvm::raw_svector_ostream Out(StringNameBuffer); - if (StringLiteralCnt) - Out << StringLiteralCnt; - Name = Out.str(); - StringLiteralCnt++; + mlir::cir::GlobalOp GV; + if (!getLangOpts().WritableStrings && ConstantStringMap.count(C)) { + GV = ConstantStringMap[C]; + // The bigger alignment always wins. + if (!GV.getAlignment() || + uint64_t(Alignment.getQuantity()) > *GV.getAlignment()) + GV.setAlignmentAttr(getSize(Alignment)); + } else { + SmallString<256> StringNameBuffer = Name; + llvm::raw_svector_ostream Out(StringNameBuffer); + if (StringLiteralCnt) + Out << StringLiteralCnt; + Name = Out.str(); + StringLiteralCnt++; + + SmallString<256> MangledNameBuffer; + StringRef GlobalVariableName; + auto LT = mlir::cir::GlobalLinkageKind::ExternalLinkage; + + // Mangle the string literal if that's how the ABI merges duplicate strings. + // Don't do it if they are writable, since we don't want writes in one TU to + // affect strings in another. + if (getCXXABI().getMangleContext().shouldMangleStringLiteral(S) && + !getLangOpts().WritableStrings) { + assert(0 && "not implemented"); + } else { + LT = mlir::cir::GlobalLinkageKind::InternalLinkage; + GlobalVariableName = Name; + } - SmallString<256> MangledNameBuffer; - StringRef GlobalVariableName; - auto LT = mlir::cir::GlobalLinkageKind::ExternalLinkage; + auto loc = getLoc(S->getSourceRange()); + auto typedC = llvm::dyn_cast(C); + if (!typedC) + llvm_unreachable("this should never be untyped at this point"); + GV = generateStringLiteral(loc, typedC, LT, *this, GlobalVariableName, + Alignment); + ConstantStringMap[C] = GV; - // Mangle the string literal if that's how the ABI merges duplicate strings. - // Don't do it if they are writable, since we don't want writes in one TU to - // affect strings in another. - if (getCXXABI().getMangleContext().shouldMangleStringLiteral(S) && - !getLangOpts().WritableStrings) { - assert(0 && "not implemented"); - } else { - LT = mlir::cir::GlobalLinkageKind::InternalLinkage; - GlobalVariableName = Name; + assert(!cir::UnimplementedFeature::reportGlobalToASan() && "NYI"); } - auto loc = getLoc(S->getSourceRange()); - auto typedC = llvm::dyn_cast(C); - if (!typedC) - llvm_unreachable("this should never be untyped at this point"); - auto GV = generateStringLiteral(loc, typedC, LT, *this, GlobalVariableName, - Alignment); - ConstantStringMap[C] = GV; + auto ArrayTy = GV.getSymType().dyn_cast(); + assert(ArrayTy && "String literal must be array"); + auto PtrTy = + mlir::cir::PointerType::get(builder.getContext(), ArrayTy.getEltType()); - assert(!cir::UnimplementedFeature::reportGlobalToASan() && "NYI"); - return mlir::SymbolRefAttr::get( - castStringLiteralToDefaultAddressSpace(*this, GV.getSymNameAttr())); + return builder.getGlobalViewAttr(PtrTy, GV); } void CIRGenModule::buildDeclContext(const DeclContext *DC) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 6a0761ed4445..414e91f29649 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -304,7 +304,7 @@ class CIRGenModule : public CIRGenTypeCache { /// Return a global symbol reference to a constant array for the given string /// literal. - mlir::SymbolRefAttr + mlir::cir::GlobalViewAttr getAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name = ".str"); unsigned StringLiteralCnt = 0; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9a6eab428358..3877f6e9e871 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1343,8 +1343,6 @@ static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, // This also prints the type... if (initAttr) printConstant(p, initAttr); - if (initAttr.isa()) - printType(); } if (!dtorRegion.empty()) { @@ -1395,16 +1393,10 @@ static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, if (parseConstantValue(parser, initialValueAttr).failed()) return failure(); - if (auto sra = initialValueAttr.dyn_cast()) { - if (parser.parseColonType(opTy)) - return failure(); - } else { - // Handle StringAttrs - assert(initialValueAttr.isa() && - "Non-typed attrs shouldn't appear here."); - auto typedAttr = initialValueAttr.cast(); - opTy = typedAttr.getType(); - } + assert(initialValueAttr.isa() && + "Non-typed attrs shouldn't appear here."); + auto typedAttr = initialValueAttr.cast(); + opTy = typedAttr.getType(); } // Parse destructor, example: diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e88101c9b2a0..70cd0c58ceb1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -79,57 +79,62 @@ namespace direct { /// Switches on the type of attribute and calls the appropriate conversion. inline mlir::Value -lowerCirAttrAsValue(mlir::Attribute attr, mlir::Location loc, +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter); /// IntAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::cir::IntAttr intAttr, mlir::Location loc, +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::IntAttr intAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(intAttr.getType()), intAttr.getValue()); } /// NullAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::cir::NullAttr nullAttr, mlir::Location loc, +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::NullAttr nullAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(nullAttr.getType())); } /// FloatAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::FloatAttr fltAttr, mlir::Location loc, +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::FloatAttr fltAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(fltAttr.getType()), fltAttr.getValue()); } /// ZeroAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::cir::ZeroAttr zeroAttr, mlir::Location loc, +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ZeroAttr zeroAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(zeroAttr.getType())); } /// ConstStruct visitor. -mlir::Value lowerCirAttrAsValue(mlir::cir::ConstStructAttr constStruct, - mlir::Location loc, +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + mlir::cir::ConstStructAttr constStruct, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(constStruct.getType()); + auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); // Iteratively lower each constant element of the struct. for (auto [idx, elt] : llvm::enumerate(constStruct.getMembers())) { - mlir::Value init = lowerCirAttrAsValue(elt, loc, rewriter, converter); + mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); result = rewriter.create(loc, result, init, idx); } @@ -137,17 +142,19 @@ mlir::Value lowerCirAttrAsValue(mlir::cir::ConstStructAttr constStruct, } // ArrayAttr visitor. -mlir::Value lowerCirAttrAsValue(mlir::cir::ConstArrayAttr constArr, - mlir::Location loc, +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + mlir::cir::ConstArrayAttr constArr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(constArr.getType()); + auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); // Iteratively lower each constant element of the array. if (auto arrayAttr = constArr.getElts().dyn_cast()) { for (auto [idx, elt] : llvm::enumerate(arrayAttr)) { - mlir::Value init = lowerCirAttrAsValue(elt, loc, rewriter, converter); + mlir::Value init = + lowerCirAttrAsValue(parentOp, elt, rewriter, converter); result = rewriter.create(loc, result, init, idx); } @@ -171,25 +178,56 @@ mlir::Value lowerCirAttrAsValue(mlir::cir::ConstArrayAttr constArr, return result; } +// GlobalViewAttr visitor. +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + mlir::cir::GlobalViewAttr globalAttr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto module = parentOp->getParentOfType(); + auto sourceSymbol = dyn_cast( + mlir::SymbolTable::lookupSymbolIn(module, globalAttr.getSymbol())); + assert(sourceSymbol && "Unlowered GlobalOp"); + auto loc = parentOp->getLoc(); + + auto addressOfOp = rewriter.create( + loc, mlir::LLVM::LLVMPointerType::get(sourceSymbol.getContext()), + sourceSymbol.getSymName()); + + assert(!globalAttr.getIndices() && "TODO"); + + auto ptrTy = globalAttr.getType().dyn_cast(); + assert(ptrTy && "Expecting pointer type in GlobalViewAttr"); + auto llvmEltTy = converter->convertType(ptrTy.getPointee()); + + if (llvmEltTy == sourceSymbol.getType()) + return addressOfOp; + + auto llvmDstTy = converter->convertType(globalAttr.getType()); + return rewriter.create(parentOp->getLoc(), llvmDstTy, + addressOfOp.getResult()); +} + /// Switches on the type of attribute and calls the appropriate conversion. inline mlir::Value -lowerCirAttrAsValue(mlir::Attribute attr, mlir::Location loc, +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { if (const auto intAttr = attr.dyn_cast()) - return lowerCirAttrAsValue(intAttr, loc, rewriter, converter); + return lowerCirAttrAsValue(parentOp, intAttr, rewriter, converter); if (const auto fltAttr = attr.dyn_cast()) - return lowerCirAttrAsValue(fltAttr, loc, rewriter, converter); + return lowerCirAttrAsValue(parentOp, fltAttr, rewriter, converter); if (const auto nullAttr = attr.dyn_cast()) - return lowerCirAttrAsValue(nullAttr, loc, rewriter, converter); + return lowerCirAttrAsValue(parentOp, nullAttr, rewriter, converter); if (const auto constStruct = attr.dyn_cast()) - return lowerCirAttrAsValue(constStruct, loc, rewriter, converter); + return lowerCirAttrAsValue(parentOp, constStruct, rewriter, converter); if (const auto constArr = attr.dyn_cast()) - return lowerCirAttrAsValue(constArr, loc, rewriter, converter); + return lowerCirAttrAsValue(parentOp, constArr, rewriter, converter); if (const auto boolAttr = attr.dyn_cast()) llvm_unreachable("bool attribute is NYI"); if (const auto zeroAttr = attr.dyn_cast()) - return lowerCirAttrAsValue(zeroAttr, loc, rewriter, converter); + return lowerCirAttrAsValue(parentOp, zeroAttr, rewriter, converter); + if (const auto globalAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, globalAttr, rewriter, converter); llvm_unreachable("unhandled attribute type"); } @@ -956,7 +994,7 @@ class CIRConstantLowering // define a local constant with llvm.undef that will be stored into the // stack. auto initVal = - lowerCirAttrAsValue(structAttr, op.getLoc(), rewriter, typeConverter); + lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter); rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); return mlir::success(); @@ -1293,8 +1331,8 @@ class CIRGlobalOpLowering if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( - op->getLoc(), lowerCirAttrAsValue(constArr, op->getLoc(), - rewriter, typeConverter)); + op->getLoc(), + lowerCirAttrAsValue(op, constArr, rewriter, typeConverter)); return mlir::success(); } } else { @@ -1310,50 +1348,26 @@ class CIRGlobalOpLowering // Initializer is a constant integer: convert to MLIR builtin constant. else if (auto intAttr = init.value().dyn_cast()) { init = rewriter.getIntegerAttr(llvmType, intAttr.getValue()); - } - // Initializer is a global: load global value in initializer block. - else if (auto attr = init.value().dyn_cast()) { - setupRegionInitializedLLVMGlobalOp(op, rewriter); - - // Fetch global used as initializer. - auto sourceSymbol = - dyn_cast(mlir::SymbolTable::lookupSymbolIn( - op->getParentOfType(), attr.getValue())); - - // Load and return the initializer value. - auto addressOfOp = rewriter.create( - loc, mlir::LLVM::LLVMPointerType::get(getContext()), - sourceSymbol.getSymName()); - llvm::SmallVector offset{0}; - auto gepOp = rewriter.create( - loc, llvmType, sourceSymbol.getType(), addressOfOp.getResult(), - offset); - rewriter.create(loc, gepOp.getResult()); - return mlir::success(); } else if (isa(init.value())) { // TODO(cir): once LLVM's dialect has a proper zeroinitializer attribute // this should be updated. For now, we use a custom op to initialize // globals to zero. setupRegionInitializedLLVMGlobalOp(op, rewriter); auto value = - lowerCirAttrAsValue(init.value(), loc, rewriter, typeConverter); + lowerCirAttrAsValue(op, init.value(), rewriter, typeConverter); rewriter.create(loc, value); return mlir::success(); } else if (const auto structAttr = init.value().dyn_cast()) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( - op->getLoc(), lowerCirAttrAsValue(structAttr, op->getLoc(), rewriter, - typeConverter)); + op->getLoc(), + lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter)); return mlir::success(); } else if (auto attr = init.value().dyn_cast()) { setupRegionInitializedLLVMGlobalOp(op, rewriter); - - // Return the address of the global symbol. - auto elementType = typeConverter->convertType(attr.getType()); - auto addrOfOp = rewriter.create( - op->getLoc(), elementType, attr.getSymbol()); - rewriter.create(op->getLoc(), addrOfOp.getResult()); + rewriter.create( + loc, lowerCirAttrAsValue(op, attr, rewriter, typeConverter)); return mlir::success(); } else { op.emitError() << "usupported initializer '" << init.value() << "'"; diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index cb5e0d978d8d..f917735593e4 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -43,6 +43,11 @@ struct { } nestedString = {"1", "", "\0"}; // CHECK: cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array}> +struct { + char *name; +} nestedStringPtr = {"1"}; +// CHECK: cir.global external @nestedStringPtr = #cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr}> + // TODO: test tentatives with internal linkage. // Tentative definition is THE definition. Should be zero-initialized. diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index b14cfe46be46..4ffa1ab35c1a 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -39,12 +39,12 @@ int use_func() { return func(); } // CHECK-NEXT: cir.global external @alpha = #cir.const_array<[#cir.int<97> : !s8i, #cir.int<98> : !s8i, #cir.int<99> : !s8i, #cir.int<0> : !s8i]> : !cir.array // CHECK-NEXT: cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK-NEXT: cir.global external @s = @".str": !cir.ptr +// CHECK-NEXT: cir.global external @s = #cir.global_view<@".str"> : !cir.ptr // CHECK-NEXT: cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK-NEXT: cir.global external @s1 = @".str1": !cir.ptr +// CHECK-NEXT: cir.global external @s1 = #cir.global_view<@".str1"> : !cir.ptr -// CHECK-NEXT: cir.global external @s2 = @".str": !cir.ptr +// CHECK-NEXT: cir.global external @s2 = #cir.global_view<@".str"> : !cir.ptr // CHECK: cir.func @_Z10use_globalv() // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["li", init] {alignment = 4 : i64} diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index b673483de498..0f6e3f6efef3 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -12,7 +12,7 @@ module { cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} cir.global "private" internal @c : !s32i cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} - cir.global external @s = @".str2": !cir.ptr + cir.global external @s = #cir.global_view<@".str2"> : !cir.ptr cir.func @use_global() { %0 = cir.get_global @a : cir.ptr cir.return @@ -50,7 +50,8 @@ module { // CHECK: cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} // CHECK: cir.global "private" internal @c : !s32i // CHECK: cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK: cir.global external @s = @".str2": !cir.ptr +// CHECK: cir.global external @s = #cir.global_view<@".str2"> : !cir.ptr + // CHECK: cir.func @use_global() // CHECK-NEXT: %0 = cir.get_global @a : cir.ptr diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 7eb3d772ede3..fd01a6611988 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -13,6 +13,7 @@ !ty_22A22 = !cir.struct x 2>} #cir.recdecl.ast> !ty_22Bar22 = !cir.struct !ty_22StringStruct22 = !cir.struct, !cir.array, !cir.array} #cir.recdecl.ast> +!ty_22StringStructPtr22 = !cir.struct} #cir.recdecl.ast> module { cir.global external @a = #cir.int<3> : !s32i @@ -23,11 +24,11 @@ module { cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array cir.global external @alpha = #cir.const_array<[#cir.int<97> : !s8i, #cir.int<98> : !s8i, #cir.int<99> : !s8i, #cir.int<0> : !s8i]> : !cir.array cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} - cir.global external @s = @".str": !cir.ptr + cir.global external @s = #cir.global_view<@".str"> : !cir.ptr // MLIR: llvm.mlir.global internal constant @".str"("example\00") {addr_space = 0 : i32} // MLIR: llvm.mlir.global external @s() {addr_space = 0 : i32} : !llvm.ptr { // MLIR: %0 = llvm.mlir.addressof @".str" : !llvm.ptr - // MLIR: %1 = llvm.getelementptr %0[0] : (!llvm.ptr) -> !llvm.ptr + // MLIR: %1 = llvm.bitcast %0 : !llvm.ptr to !llvm.ptr // MLIR: llvm.return %1 : !llvm.ptr // MLIR: } // LLVM: @.str = internal constant [8 x i8] c"example\00" @@ -38,8 +39,8 @@ module { // MLIR: llvm.return %0 : !llvm.ptr // MLIR: } cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} - cir.global external @s1 = @".str1": !cir.ptr - cir.global external @s2 = @".str": !cir.ptr + cir.global external @s1 = #cir.global_view<@".str1"> : !cir.ptr + cir.global external @s2 = #cir.global_view<@".str"> : !cir.ptr cir.func @_Z10use_globalv() { %0 = cir.alloca !s32i, cir.ptr , ["li", init] {alignment = 4 : i64} %1 = cir.get_global @a : cir.ptr @@ -94,6 +95,8 @@ module { // LLVM: @nestedTwoDim = global %struct.A { i32 1, [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 2, i32 3], [2 x i32] [i32 4, i32 5{{\]\]}} } cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array}> : !ty_22StringStruct22 // LLVM: @nestedString = global %struct.StringStruct { [3 x i8] c"1\00\00", [3 x i8] zeroinitializer, [3 x i8] zeroinitializer } + cir.global external @nestedStringPtr = #cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr}> : !ty_22StringStructPtr22 + // LLVM: @nestedStringPtr = global %struct.StringStructPtr { ptr @.str } cir.func @_Z11get_globalsv() { %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} %1 = cir.alloca !cir.ptr, cir.ptr >, ["u", init] {alignment = 8 : i64} From 7408f5b99301f976771f68e88a8c26b62fb94fea Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 8 Sep 2023 00:04:01 +0300 Subject: [PATCH 1193/2301] [CIR][CIRGen] Removes hasBooleanRepresentation (#251) This PR removes the method `hasBooleanRepresentation` as was discussed in [PR#233](https://github.com/llvm/clangir/pull/233) Briefly, the `cir.bool` has the same representation in memory and after load. The LLVM IR differs, that's why the check is there, in the origin `CodeGen`. Also, in order to trigger the path and make the implementation to be conform with the original `CodeGen`, there are changes in the `CIRGenExprScalar`: call the common `buildFromLValue` instead of manaul `load` creation. Also, a couple of tests for the bool load/store added --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 6 ++--- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 5 +--- clang/test/CIR/CodeGen/bool.c | 31 ++++++++++++++++++++++ 3 files changed, 34 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/bool.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 8fc013d92791..5265760ecbdc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -9,7 +9,6 @@ // This contains code to emit Expr nodes as CIR code. // //===----------------------------------------------------------------------===// - #include "CIRGenBuilder.h" #include "CIRGenCXXABI.h" #include "CIRGenCall.h" @@ -2210,9 +2209,8 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, } mlir::Value CIRGenFunction::buildFromMemory(mlir::Value Value, QualType Ty) { - // Bool has a different representation in memory than in registers. - if (hasBooleanRepresentation(Ty)) { - llvm_unreachable("NYI"); + if (!Ty->isBooleanType() && hasBooleanRepresentation(Ty)) { + llvm_unreachable("NIY"); } return Value; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 3a136df08054..9eec9aad1d46 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -212,11 +212,8 @@ class ScalarExprEmitter : public StmtVisitor { /// Emits the address of the l-value, then loads and returns the result. mlir::Value buildLoadOfLValue(const Expr *E) { LValue LV = CGF.buildLValue(E); - auto load = Builder.create(CGF.getLoc(E->getExprLoc()), - CGF.getCIRType(E->getType()), - LV.getPointer()); // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); - return load; + return CGF.buildLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); } mlir::Value buildLoadOfLValue(LValue LV, SourceLocation Loc) { diff --git a/clang/test/CIR/CodeGen/bool.c b/clang/test/CIR/CodeGen/bool.c new file mode 100644 index 000000000000..f1e487f35223 --- /dev/null +++ b/clang/test/CIR/CodeGen/bool.c @@ -0,0 +1,31 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include + +typedef struct { + bool x; +} S; + +// CHECK: cir.func @store_bool +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(int_to_bool, [[TMP1]] : !s32i), !cir.bool +// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.get_member [[TMP3]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: cir.store [[TMP2]], [[TMP4]] : !cir.bool, cir.ptr +void store_bool(S *s) { + s->x = false; +} + +// CHECK: cir.func @load_bool +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.bool, cir.ptr , ["x", init] {alignment = 1 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : cir.ptr , !cir.bool +void load_bool(S *s) { + bool x = s->x; +} \ No newline at end of file From 8a724da2cf9e63f366ad5f773f1edbfd6373a6f8 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Tue, 12 Sep 2023 16:23:57 -0700 Subject: [PATCH 1194/2301] [CIR] Introduce ConstPtrAttr for absolute pointer value initialization. (#253) Introducing `cir.ConstPtrAttr` to represent arbitrary absolute pointer value initializations. Also incorporating previous `cir.nullptr` effort into this work. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 43 ++++++++++--------- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 13 +++--- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 7 +-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 7 +-- clang/lib/CIR/CodeGen/ConstantInitBuilder.h | 7 +-- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 37 ++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 +- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 3 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 35 +++++++++------ clang/test/CIR/CodeGen/String.cpp | 6 +-- clang/test/CIR/CodeGen/agg-init.cpp | 2 +- clang/test/CIR/CodeGen/basic.cpp | 6 +-- clang/test/CIR/CodeGen/constptr.c | 8 ++++ clang/test/CIR/CodeGen/coro-task.cpp | 8 ++-- clang/test/CIR/CodeGen/derived-to-base.cpp | 4 +- clang/test/CIR/CodeGen/pointer.cpp | 2 +- clang/test/CIR/CodeGen/struct.c | 2 +- clang/test/CIR/CodeGen/struct.cpp | 2 +- clang/test/CIR/CodeGen/types-nullptr.cpp | 4 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- clang/test/CIR/IR/global.cir | 2 +- clang/test/CIR/IR/invalid.cir | 4 +- clang/test/CIR/IR/vtableAttr.cir | 4 +- clang/test/CIR/Lowering/globals.cir | 2 +- clang/test/CIR/Lowering/struct.cir | 26 +++++------ clang/test/CIR/Lowering/types.cir | 4 +- 28 files changed, 155 insertions(+), 95 deletions(-) create mode 100644 clang/test/CIR/CodeGen/constptr.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index af73fc889be6..0de74011f18a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -69,27 +69,6 @@ def LangAttr : CIR_Attr<"Lang", "lang"> { let genVerifyDecl = 0; } -//===----------------------------------------------------------------------===// -// NullAttr -//===----------------------------------------------------------------------===// - -def NullAttr : CIR_Attr<"Null", "null", [TypedAttrInterface]> { - let summary = "A simple attr to represent nullptr"; - let description = [{ - The NullAttr represents the value of nullptr within cir. - }]; - - let parameters = (ins AttributeSelfTypeParameter<"">:$type); - - let builders = [ - AttrBuilderWithInferredContext<(ins "Type":$type), [{ - return $_get(type.getContext(), type); - }]> - ]; - - let assemblyFormat = [{}]; -} - //===----------------------------------------------------------------------===// // BoolAttr //===----------------------------------------------------------------------===// @@ -220,6 +199,28 @@ def IntAttr : CIR_Attr<"Int", "int", [TypedAttrInterface]> { let hasCustomAssemblyFormat = 1; } +//===----------------------------------------------------------------------===// +// ConstPointerAttr +//===----------------------------------------------------------------------===// + +def ConstPtrAttr : CIR_Attr<"ConstPtr", "ptr", [TypedAttrInterface]> { + let summary = "Holds a constant pointer value"; + let parameters = (ins AttributeSelfTypeParameter<"">:$type, "uint64_t":$value); + let description = [{ + A pointer attribute is a literal attribute that represents an integral + value of a pointer type. + }]; + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, "uint64_t":$value), [{ + return $_get(type.getContext(), type, value); + }]>, + ]; + let extraClassDeclaration = [{ + bool isNullValue() const { return getValue() == 0; } + }]; + let hasCustomAssemblyFormat = 1; +} + //===----------------------------------------------------------------------===// // SignedOverflowBehaviorAttr //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 5e139cca99d3..909283bbe692 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -244,7 +244,9 @@ def ConstantOp : CIR_Op<"const", let extraClassDeclaration = [{ bool isNullPtr() { - return getValue().isa(); + if (const auto ptrAttr = getValue().dyn_cast()) + return ptrAttr.isNullValue(); + return false; } }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 8244518fc01c..572b85be4c2c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -133,9 +133,9 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return mlir::cir::BoolAttr::get(getContext(), getBoolTy(), state); } - mlir::TypedAttr getNullPtrAttr(mlir::Type t) { + mlir::TypedAttr getConstPtrAttr(mlir::Type t, uint64_t v) { assert(t.isa() && "expected cir.ptr"); - return mlir::cir::NullAttr::get(getContext(), t); + return mlir::cir::ConstPtrAttr::get(getContext(), t, v); } mlir::cir::ConstArrayAttr getString(llvm::StringRef str, mlir::Type eltTy, @@ -211,7 +211,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { if (auto arrTy = ty.dyn_cast()) return getZeroAttr(arrTy); if (auto ptrTy = ty.dyn_cast()) - return getNullPtrAttr(ptrTy); + return getConstPtrAttr(ptrTy, 0); if (auto structTy = ty.dyn_cast()) return getZeroAttr(structTy); llvm_unreachable("Zero initializer for given type is NYI"); @@ -220,8 +220,10 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // TODO(cir): Once we have CIR float types, replace this by something like a // NullableValueInterface to allow for type-independent queries. bool isNullValue(mlir::Attribute attr) const { - if (attr.isa()) + if (attr.isa()) return true; + if (const auto ptrVal = attr.dyn_cast()) + return ptrVal.isNullValue(); if (attr.isa()) return false; @@ -471,7 +473,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { // Creates constant nullptr for pointer type ty. mlir::cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { - return create(loc, ty, getNullPtrAttr(ty)); + return create(loc, ty, getConstPtrAttr(ty, 0)); } // Creates constant null value for integral type ty. @@ -727,5 +729,4 @@ class CIRGenBuilderTy : public mlir::OpBuilder { }; } // namespace cir - #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 7a204227186e..a823f3fd660a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1119,11 +1119,8 @@ mlir::Attribute ConstantLValueEmitter::tryEmitAbsolute(mlir::Type destTy) { // If we're producing a pointer, this is easy. auto destPtrTy = destTy.dyn_cast(); assert(destPtrTy && "expected !cir.ptr type"); - if (Value.isNullPointer()) { - // FIXME: integer offsets from non-zero null pointers. - return CGM.getBuilder().getNullPtrAttr(destPtrTy); - } - llvm_unreachable("NYI"); + return CGM.getBuilder().getConstPtrAttr( + destPtrTy, Value.getLValueOffset().getQuantity()); } ConstantLValue diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 9eec9aad1d46..5a48e44f61eb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1395,7 +1395,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { mlir::Type Ty = CGF.getCIRType(DestTy); return Builder.create( CGF.getLoc(E->getExprLoc()), Ty, - mlir::cir::NullAttr::get(Builder.getContext(), Ty)); + mlir::cir::ConstPtrAttr::get(Builder.getContext(), Ty, 0)); } case CK_NullToMemberPointer: diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 04fef71973d1..e49c0454734a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -17,6 +17,7 @@ #include "clang/AST/CXXInheritance.h" #include "clang/AST/RecordLayout.h" #include "clang/Basic/CodeGenOptions.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/CodeGen/ConstantInitBuilder.h" @@ -159,8 +160,8 @@ static void AddPointerLayoutOffset(CIRGenModule &CGM, ConstantArrayBuilder &builder, CharUnits offset) { assert(offset.getQuantity() == 0 && "NYI"); - builder.add(mlir::cir::NullAttr::get(CGM.getBuilder().getContext(), - CGM.getBuilder().getUInt8PtrTy())); + builder.add(mlir::cir::ConstPtrAttr::get( + CGM.getBuilder().getContext(), CGM.getBuilder().getUInt8PtrTy(), 0)); } static void AddRelativeLayoutOffset(CIRGenModule &CGM, @@ -466,4 +467,4 @@ bool CIRGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) { return false; return !AlwaysHasLTOVisibilityPublic(RD); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h index b4ff54e835be..7a32aa591182 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -23,6 +23,7 @@ #include "CIRGenBuilder.h" #include "ConstantInitFuture.h" +#include #include using namespace clang; @@ -194,9 +195,9 @@ class ConstantAggregateBuilderBase { llvm::APInt{intTy.getWidth(), value, isSigned})); } - /// Add a null pointer of a specific type. - void addNullPointer(mlir::cir::PointerType ptrTy) { - add(mlir::cir::NullAttr::get(ptrTy.getContext(), ptrTy)); + /// Add a pointer of a specific type. + void addPointer(mlir::cir::PointerType ptrTy, uint64_t value) { + add(mlir::cir::ConstPtrAttr::get(ptrTy.getContext(), ptrTy, value)); } /// Add a bitcast of a value to a specific type. diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 538653c60988..64c68b5f8ce1 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -156,6 +156,43 @@ void LangAttr::print(AsmPrinter &printer) const { printer << "<" << getLang().getValue() << '>'; } +//===----------------------------------------------------------------------===// +// ConstPtrAttr definitions +//===----------------------------------------------------------------------===// + +Attribute ConstPtrAttr::parse(AsmParser &parser, Type odsType) { + uint64_t value; + + if (!odsType.isa()) + return {}; + + // Consume the '<' symbol. + if (parser.parseLess()) + return {}; + + if (parser.parseKeyword("null").succeeded()) { + value = 0; + } else { + if (parser.parseInteger(value)) + parser.emitError(parser.getCurrentLocation(), "expected integer value"); + } + + // Consume the '>' symbol. + if (parser.parseGreater()) + return {}; + + return ConstPtrAttr::get(odsType, value); +} + +void ConstPtrAttr::print(AsmPrinter &printer) const { + printer << '<'; + if (isNullValue()) + printer << "null"; + else + printer << getValue(); + printer << '>'; +} + //===----------------------------------------------------------------------===// // IntAttr definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 3877f6e9e871..a437d308c4da 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -166,7 +166,7 @@ void AllocaOp::build(::mlir::OpBuilder &odsBuilder, static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, mlir::Attribute attrType) { - if (attrType.isa()) { + if (attrType.isa()) { if (opType.isa<::mlir::cir::PointerType>()) return success(); return op->emitOpError("nullptr expects pointer type"); @@ -2420,7 +2420,7 @@ VTableAttr::verify(::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, if (auto arrayElts = constArrayAttr.getElts().dyn_cast()) { arrayElts.walkImmediateSubElements( [&](Attribute attr) { - if (attr.isa() || attr.isa()) + if (attr.isa() || attr.isa()) return; emitError() << "expected GlobalViewAttr attribute"; eltTypeCheck = failure(); diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index d0f16983bb3a..c6313db69d11 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -12,6 +12,7 @@ #include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclTemplate.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" @@ -1168,7 +1169,7 @@ void LifetimeCheckPass::updatePointsToForConstStruct( auto fieldAddr = aggregates[addr][memberIdx]; // Unseen fields are not tracked. if (fieldAddr && ta.getType().isa()) { - assert(ta.isa() && + assert(ta.isa() && "other than null not implemented"); markPsetNull(fieldAddr, loc); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 70cd0c58ceb1..541e0b150adc 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -93,14 +93,21 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::IntAttr intAttr, loc, converter->convertType(intAttr.getType()), intAttr.getValue()); } -/// NullAttr visitor. +/// ConstPtrAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::NullAttr nullAttr, +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ConstPtrAttr ptrAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto loc = parentOp->getLoc(); - return rewriter.create( - loc, converter->convertType(nullAttr.getType())); + if (ptrAttr.isNullValue()) { + return rewriter.create( + loc, converter->convertType(ptrAttr.getType())); + } else { + mlir::Value ptrVal = rewriter.create( + loc, rewriter.getI64Type(), ptrAttr.getValue()); + return rewriter.create( + loc, converter->convertType(ptrAttr.getType()), ptrVal); + } } /// FloatAttr visitor. @@ -216,8 +223,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, return lowerCirAttrAsValue(parentOp, intAttr, rewriter, converter); if (const auto fltAttr = attr.dyn_cast()) return lowerCirAttrAsValue(parentOp, fltAttr, rewriter, converter); - if (const auto nullAttr = attr.dyn_cast()) - return lowerCirAttrAsValue(parentOp, nullAttr, rewriter, converter); + if (const auto ptrAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, ptrAttr, rewriter, converter); if (const auto constStruct = attr.dyn_cast()) return lowerCirAttrAsValue(parentOp, constStruct, rewriter, converter); if (const auto constArr = attr.dyn_cast()) @@ -616,7 +623,8 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { case mlir::cir::CastKind::ptr_to_bool: { auto null = rewriter.create( src.getLoc(), castOp.getSrc().getType(), - mlir::cir::NullAttr::get(castOp.getSrc().getType())); + mlir::cir::ConstPtrAttr::get(getContext(), castOp.getSrc().getType(), + 0)); rewriter.replaceOpWithNewOp( castOp, mlir::cir::BoolType::get(getContext()), mlir::cir::CmpOpKind::ne, castOp.getSrc(), null); @@ -962,10 +970,12 @@ class CIRConstantLowering attr = op.getValue(); } else if (op.getType().isa()) { // Optimize with dedicated LLVM op for null pointers. - if (op.getValue().isa()) { - rewriter.replaceOpWithNewOp( - op, typeConverter->convertType(op.getType())); - return mlir::success(); + if (op.getValue().isa()) { + if (op.getValue().cast().isNullValue()) { + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType())); + return mlir::success(); + } } attr = op.getValue(); } @@ -1348,7 +1358,8 @@ class CIRGlobalOpLowering // Initializer is a constant integer: convert to MLIR builtin constant. else if (auto intAttr = init.value().dyn_cast()) { init = rewriter.getIntegerAttr(llvmType, intAttr.getValue()); - } else if (isa(init.value())) { + } else if (isa( + init.value())) { // TODO(cir): once LLVM's dialect has a proper zeroinitializer attribute // this should be updated. For now, we use a custom op to initialize // globals to zero. diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index e41e0fc72211..576eed964761 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -22,7 +22,7 @@ void test() { // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 // CHECK-NEXT: %2 = cir.get_member %1[0] {name = "storage"} -// CHECK-NEXT: %3 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %3 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > // CHECK-NEXT: %4 = cir.get_member %1[1] {name = "size"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i @@ -37,7 +37,7 @@ void test() { // CHECK-NEXT: cir.store %arg1, %1 // CHECK-NEXT: %2 = cir.load %0 // CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} -// CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) +// CHECK-NEXT: %4 = cir.const(#cir.ptr : !cir.ptr) // CHECK-NEXT: cir.store %4, %3 // CHECK-NEXT: %5 = cir.get_member %2[1] {name = "size"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i @@ -53,7 +53,7 @@ void test() { // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr // CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} : !cir.ptr -> !cir.ptr> -// CHECK-NEXT: %4 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %4 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index c7286a5cf8f0..14763710a001 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -66,7 +66,7 @@ void yo() { // CHECK: cir.func @_Z2yov() // CHECK: %0 = cir.alloca !ty_22Yo22, cir.ptr , ["ext"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !ty_22Yo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} -// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.null : !cir.ptr, #cir.int<0> : !u64i}> : !ty_22Yo22) : !ty_22Yo22 +// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.ptr : !cir.ptr, #cir.int<0> : !u64i}> : !ty_22Yo22) : !ty_22Yo22 // CHECK: cir.store %2, %0 : !ty_22Yo22, cir.ptr // CHECK: %3 = cir.get_member %1[0] {name = "type"} : !cir.ptr -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000066001> : !u32i) : !u32i diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 7cb85671cae8..83c423ea917c 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -8,7 +8,7 @@ int *p0() { // CHECK: cir.func @_Z2p0v() -> !cir.ptr // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] -// CHECK: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > int *p1() { @@ -19,7 +19,7 @@ int *p1() { // CHECK: cir.func @_Z2p1v() -> !cir.ptr // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p"] -// CHECK: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > int *p2() { @@ -36,7 +36,7 @@ int *p2() { // CHECK: cir.func @_Z2p2v() -> !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] {alignment = 8 : i64} -// CHECK-NEXT: %2 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK-NEXT: cir.store %2, %1 : !cir.ptr, cir.ptr > // CHECK-NEXT: cir.scope { // CHECK-NEXT: %7 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/constptr.c b/clang/test/CIR/CodeGen/constptr.c new file mode 100644 index 000000000000..b400cb8c444f --- /dev/null +++ b/clang/test/CIR/CodeGen/constptr.c @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +int *p = (int*)0x1234; + + +// CIR: cir.global external @p = #cir.ptr<4660> : !cir.ptr +// LLVM: @p = global ptr inttoptr (i64 4660 to ptr) diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index b07373fd02f4..108d4947da1d 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -158,7 +158,7 @@ VoidTask silly_task() { // Get coroutine id with __builtin_coro_id. -// CHECK: %[[#NullPtr:]] = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %[[#NullPtr:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK: %[[#Align:]] = cir.const(#cir.int<16> : !u32i) : !u32i // CHECK: %[[#CoroId:]] = cir.call @__builtin_coro_id(%[[#Align]], %[[#NullPtr]], %[[#NullPtr]], %[[#NullPtr]]) @@ -264,7 +264,7 @@ VoidTask silly_task() { // Call builtin coro end and return -// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.const(#cir.null : !cir.ptr) +// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.const(#cir.ptr : !cir.ptr) // CHECK-NEXT: %[[#CoroEndArg1:]] = cir.const(#false) : !cir.bool // CHECK-NEXT: = cir.call @__builtin_coro_end(%[[#CoroEndArg0]], %[[#CoroEndArg1]]) @@ -363,7 +363,7 @@ folly::coro::Task go4() { // Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` // CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> -// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> +// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> // CHECK: cir.yield %19 : !cir.ptr)>> // CHECK: } // CHECK: cir.store %12, %3 : !cir.ptr)>>, cir.ptr )>>> @@ -382,4 +382,4 @@ folly::coro::Task go4() { // CHECK: }, suspend : { // CHECK: }, resume : { // CHECK: },) -// CHECK: } \ No newline at end of file +// CHECK: } diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 7e60edecb01c..65f881afccfe 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -84,7 +84,7 @@ void C3::Layer::Initialize() { // CHECK: %2 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr // CHECK: %3 = cir.get_member %2[0] {name = "m_C1"} : !cir.ptr -> !cir.ptr> // CHECK: %4 = cir.load %3 : cir.ptr >, !cir.ptr -// CHECK: %5 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %5 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool enumy C3::Initialize() { @@ -155,4 +155,4 @@ class B : public A { void t() { B b; b.foo(); -} \ No newline at end of file +} diff --git a/clang/test/CIR/CodeGen/pointer.cpp b/clang/test/CIR/CodeGen/pointer.cpp index 2ac11cd42e32..bdf0e2103192 100644 --- a/clang/test/CIR/CodeGen/pointer.cpp +++ b/clang/test/CIR/CodeGen/pointer.cpp @@ -3,4 +3,4 @@ // Global pointer should be zero initialized by default. int *ptr; -// CHECK: cir.global external @ptr = #cir.null : !cir.ptr +// CHECK: cir.global external @ptr = #cir.ptr : !cir.ptr diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index b89ba790d22b..d53b15954abd 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -46,7 +46,7 @@ struct S1 { float f; int *p; } s1 = {1, .1, 0}; -// CHECK-DAG: cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.null : !cir.ptr}> : !ty_22S122 +// CHECK-DAG: cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.ptr : !cir.ptr}> : !ty_22S122 // Should initialize global nested structs. struct S2 { diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index e82d566f4e34..2181e60fd7cd 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -103,7 +103,7 @@ void m() { Adv C; } // CHECK: %4 = cir.const(#cir.int<1000024001> : !u32i) : !u32i // CHECK: cir.store %4, %3 : !u32i, cir.ptr // CHECK: %5 = cir.get_member %2[1] {name = "n"} : !cir.ptr -> !cir.ptr> -// CHECK: %6 = cir.const(#cir.null : !cir.ptr) : !cir.ptr +// CHECK: %6 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > // CHECK: %7 = cir.get_member %2[2] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !s32i) : !s32i diff --git a/clang/test/CIR/CodeGen/types-nullptr.cpp b/clang/test/CIR/CodeGen/types-nullptr.cpp index 4f7b1df747c5..e84c386417a7 100644 --- a/clang/test/CIR/CodeGen/types-nullptr.cpp +++ b/clang/test/CIR/CodeGen/types-nullptr.cpp @@ -5,5 +5,5 @@ typedef decltype(nullptr) nullptr_t; void f() { nullptr_t t = nullptr; } // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr > -// CHECK: %1 = cir.const(#cir.null : !cir.ptr) : !cir.ptr -// CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > \ No newline at end of file +// CHECK: %1 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 873a354a611a..92ed9e1eea59 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -74,7 +74,7 @@ class B : public A // CHECK: } // vtable for B -// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.null : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD2Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}> : ![[VTableTypeA]] +// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD2Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}> : ![[VTableTypeA]] // vtable for __cxxabiv1::__si_class_type_info // CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 0f6e3f6efef3..4ffda321f221 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -8,7 +8,7 @@ module { cir.global external @a = #cir.int<3> : !s32i cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i] : !cir.array> cir.global external @b = #cir.const_array<"example\00" : !cir.array> - cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.null : !cir.ptr}> : !cir.struct}> + cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.ptr : !cir.ptr}> : !cir.struct}> cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} cir.global "private" internal @c : !s32i cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index bb675491d3ac..0ca97dbbcc27 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -5,7 +5,7 @@ // expected-error@+2 {{'cir.const' op nullptr expects pointer type}} cir.func @p0() { - %1 = cir.const(#cir.null : !cir.ptr) : !u32i + %1 = cir.const(#cir.ptr : !cir.ptr) : !u32i cir.return } @@ -521,4 +521,4 @@ module { cir.store %13, %11 : !cir.ptr, cir.ptr > cir.throw(%11 : !cir.ptr>) // expected-error {{'type_info' symbol attribute missing}} } -} \ No newline at end of file +} diff --git a/clang/test/CIR/IR/vtableAttr.cir b/clang/test/CIR/IR/vtableAttr.cir index ae175d5fa987..a9766e36ffe9 100644 --- a/clang/test/CIR/IR/vtableAttr.cir +++ b/clang/test/CIR/IR/vtableAttr.cir @@ -4,6 +4,6 @@ !ty_2222 = !cir.struct x 1>}> module { // Should parse VTable attribute. - cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.null : !cir.ptr]> : !cir.array x 1>}> : !ty_2222 - // CHECK: cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.null : !cir.ptr]> : !cir.array x 1>}> : !ty_2222 + cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr]> : !cir.array x 1>}> : !ty_2222 + // CHECK: cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr]> : !cir.array x 1>}> : !ty_2222 } diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index fd01a6611988..f51d8a85f968 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -141,7 +141,7 @@ module { // MLIR: llvm.mlir.global external @zeroInitFlt(dense<0.000000e+00> : tensor<2xf32>) {addr_space = 0 : i32} : !llvm.array<2 x f32> cir.global "private" internal @staticVar = #cir.int<0> : !s32i // MLIR: llvm.mlir.global internal @staticVar(0 : i32) {addr_space = 0 : i32} : i32 - cir.global external @nullPtr = #cir.null : !cir.ptr + cir.global external @nullPtr = #cir.ptr : !cir.ptr // MLIR: llvm.mlir.global external @nullPtr() // MLIR: %0 = llvm.mlir.zero : !llvm.ptr // MLIR: llvm.return %0 : !llvm.ptr diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index a5facb14383d..1fea10b3d90b 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -33,21 +33,21 @@ module { // CHECK: %1 = llvm.alloca %0 x !llvm.struct<"struct.S2A", (i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr // CHECK: %2 = llvm.mlir.undef : !llvm.struct<"struct.S2A", (i32)> // CHECK: %3 = llvm.mlir.constant(1 : i32) : i32 - // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.struct<"struct.S2A", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.struct<"struct.S2A", (i32)> // CHECK: llvm.store %4, %1 : !llvm.struct<"struct.S2A", (i32)>, !llvm.ptr // CHECK: llvm.return // CHECK: } // Should lower basic #cir.const_struct initializer. - cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.null : !cir.ptr}> : !ty_22S122 + cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.ptr : !cir.ptr}> : !ty_22S122 // CHECK: llvm.mlir.global external @s1() {addr_space = 0 : i32} : !llvm.struct<"struct.S1", (i32, f32, ptr)> { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S1", (i32, f32, ptr)> // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 - // CHECK: %2 = llvm.insertvalue %1, %0[0] : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: %2 = llvm.insertvalue %1, %0[0] : !llvm.struct<"struct.S1", (i32, f32, ptr)> // CHECK: %3 = llvm.mlir.constant(1.000000e-01 : f32) : f32 - // CHECK: %4 = llvm.insertvalue %3, %2[1] : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: %4 = llvm.insertvalue %3, %2[1] : !llvm.struct<"struct.S1", (i32, f32, ptr)> // CHECK: %5 = llvm.mlir.zero : !llvm.ptr - // CHECK: %6 = llvm.insertvalue %5, %4[2] : !llvm.struct<"struct.S1", (i32, f32, ptr)> + // CHECK: %6 = llvm.insertvalue %5, %4[2] : !llvm.struct<"struct.S1", (i32, f32, ptr)> // CHECK: llvm.return %6 : !llvm.struct<"struct.S1", (i32, f32, ptr)> // CHECK: } @@ -57,8 +57,8 @@ module { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S2A", (i32)> // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 - // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S2A", (i32)> - // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S2A", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> // CHECK: llvm.return %4 : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> // CHECK: } @@ -67,16 +67,16 @@ module { // CHECK: %0 = llvm.mlir.undef : !llvm.array<3 x struct<"struct.S3", (i32)>> // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S3", (i32)> // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 - // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S3", (i32)> - // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S3", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.array<3 x struct<"struct.S3", (i32)>> // CHECK: %5 = llvm.mlir.undef : !llvm.struct<"struct.S3", (i32)> // CHECK: %6 = llvm.mlir.constant(2 : i32) : i32 - // CHECK: %7 = llvm.insertvalue %6, %5[0] : !llvm.struct<"struct.S3", (i32)> - // CHECK: %8 = llvm.insertvalue %7, %4[1] : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: %7 = llvm.insertvalue %6, %5[0] : !llvm.struct<"struct.S3", (i32)> + // CHECK: %8 = llvm.insertvalue %7, %4[1] : !llvm.array<3 x struct<"struct.S3", (i32)>> // CHECK: %9 = llvm.mlir.undef : !llvm.struct<"struct.S3", (i32)> // CHECK: %10 = llvm.mlir.constant(3 : i32) : i32 - // CHECK: %11 = llvm.insertvalue %10, %9[0] : !llvm.struct<"struct.S3", (i32)> - // CHECK: %12 = llvm.insertvalue %11, %8[2] : !llvm.array<3 x struct<"struct.S3", (i32)>> + // CHECK: %11 = llvm.insertvalue %10, %9[0] : !llvm.struct<"struct.S3", (i32)> + // CHECK: %12 = llvm.insertvalue %11, %8[2] : !llvm.array<3 x struct<"struct.S3", (i32)>> // CHECK: llvm.return %12 : !llvm.array<3 x struct<"struct.S3", (i32)>> // CHECK: } diff --git a/clang/test/CIR/Lowering/types.cir b/clang/test/CIR/Lowering/types.cir index ba52bf55514d..12bb892bd4c4 100644 --- a/clang/test/CIR/Lowering/types.cir +++ b/clang/test/CIR/Lowering/types.cir @@ -5,9 +5,9 @@ module { cir.func @testTypeLowering() { // Should lower void pointers as opaque pointers. - %0 = cir.const(#cir.null : !cir.ptr) : !cir.ptr + %0 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK: llvm.mlir.zero : !llvm.ptr - %1 = cir.const(#cir.null : !cir.ptr>) : !cir.ptr> + %1 = cir.const(#cir.ptr : !cir.ptr>) : !cir.ptr> // CHECK: llvm.mlir.zero : !llvm.ptr cir.return } From 74a8c110ec2df28c82c3035e56132ccd7611690e Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Tue, 12 Sep 2023 14:01:51 +0300 Subject: [PATCH 1195/2301] [CIR][Lowering] Support endless loops (gh-161). --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 8 +++-- clang/test/CIR/Lowering/loop.cir | 32 +++++++++++++++++++ 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 541e0b150adc..4f1394b75f73 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -339,7 +339,7 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { } // Succeed only if both yields are found. - if (!yieldToBody || !yieldToCont) + if (!yieldToBody) return mlir::failure(); return mlir::success(); } @@ -428,8 +428,10 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { rewriter.create(loopOp.getLoc(), &entry); // Set loop exit point to continue block. - rewriter.setInsertionPoint(yieldToCont); - rewriter.replaceOpWithNewOp(yieldToCont, continueBlock); + if (yieldToCont) { + rewriter.setInsertionPoint(yieldToCont); + rewriter.replaceOpWithNewOp(yieldToCont, continueBlock); + } // Branch from condition to body. rewriter.setInsertionPoint(yieldToBody); diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index f513185ac0ca..adea273b6cc4 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -185,4 +185,36 @@ module { // MLIR-NEXT: ^bb6: // MLIR-NEXT: llvm.br ^bb7 + // Test endless cir.loop lowering. + cir.func @testEndless() { + cir.scope { + cir.loop for(cond : { + cir.yield continue + }, step : { + cir.yield + }) { + cir.yield + } + } + cir.return + } + + // MLIR: llvm.func @testEndless() + // MLIR-NEXT: llvm.br ^bb1 + // MLIR-NEXT: ^bb1: + // MLIR-NEXT: llvm.br ^bb2 + // ============= Condition block ============= + // MLIR-NEXT: ^bb2: + // MLIR-NEXT: llvm.br ^bb3 + // ============= Body block ============= + // MLIR-NEXT: ^bb3: + // MLIR-NEXT: llvm.br ^bb4 + // ============= Step block ============= + // MLIR-NEXT: ^bb4: + // MLIR-NEXT: llvm.br ^bb2 + // ============= Exit block ============= + // MLIR-NEXT: ^bb5: + // MLIR-NEXT: llvm.br ^bb6 + // MLIR-NEXT: ^bb6: + // MLIR-NEXT: llvm.return } From d156ee6887af69e0d78affd20ffd1960ea123c0d Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Thu, 14 Sep 2023 01:51:57 +0300 Subject: [PATCH 1196/2301] [CIR] Implement fabs operation. (#254) Following discussion in #237 this adds support for `fabs` builtins which are used extensively in llvm-test-suite. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 25 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 9 +++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 16 +++++++++++- clang/test/CIR/CodeGen/libc.c | 12 +++++++++ clang/test/CIR/IR/libc-fabs.cir | 9 +++++++ clang/test/CIR/Lowering/libc.cir | 6 +++++ 6 files changed, 74 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/IR/libc-fabs.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 909283bbe692..7bf35dc1b35f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2023,6 +2023,31 @@ def MemCpyOp : CIR_Op<"libc.memcpy"> { }]; } +//===----------------------------------------------------------------------===// +// FAbsOp +//===----------------------------------------------------------------------===// + +def FAbsOp : CIR_Op<"fabs", [Pure, SameOperandsAndResultType]> { + let arguments = (ins AnyFloat:$src); + let results = (outs AnyFloat:$result); + let summary = "Returns absolute value for floating-point input."; + let description = [{ + Equivalent to libc's `fabs` and LLVM's intrinsic with the same name. + + Examples: + + ```mlir + %1 = cir.const(1.0 : f64) : f64 + %2 = cir.fabs %1 : f64 + ``` + }]; + + let assemblyFormat = [{ + $src `:` type($src) attr-dict + }]; + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // Variadic Operations //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index bf1c94009a49..825024daab8a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -142,8 +142,13 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fabsf: case Builtin::BI__builtin_fabsf16: case Builtin::BI__builtin_fabsl: - case Builtin::BI__builtin_fabsf128: - llvm_unreachable("NYI"); + case Builtin::BI__builtin_fabsf128: { + mlir::Value Src0 = buildScalarExpr(E->getArg(0)); + auto SrcType = Src0.getType(); + auto Call = + builder.create(Src0.getLoc(), SrcType, Src0); + return RValue::get(Call->getResult(0)); + } case Builtin::BIfloor: case Builtin::BIfloorf: diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 4f1394b75f73..ef51197088fb 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1835,6 +1835,19 @@ class CIRPtrDiffOpLowering } }; +class CIRFAbsOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::FAbsOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getOperands().front()); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -1847,7 +1860,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, - CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering>( + CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, + CIRFAbsOpLowering>( converter, patterns.getContext()); } diff --git a/clang/test/CIR/CodeGen/libc.c b/clang/test/CIR/CodeGen/libc.c index 16c3adf1a383..1b4c7c34b068 100644 --- a/clang/test/CIR/CodeGen/libc.c +++ b/clang/test/CIR/CodeGen/libc.c @@ -7,3 +7,15 @@ void testMemcpy(void *src, const void *dst, unsigned long size) { memcpy(dst, src, size); // CHECK: cir.libc.memcpy %{{.+}} bytes from %{{.+}} to %{{.+}} : !u64i, !cir.ptr -> !cir.ptr } + +double fabs(double); +double testFabs(double x) { + return fabs(x); + // CHECK: cir.fabs %{{.+}} : f64 +} + +float fabsf(float); +float testFabsf(float x) { + return fabsf(x); + // CHECK: cir.fabs %{{.+}} : f32 +} diff --git a/clang/test/CIR/IR/libc-fabs.cir b/clang/test/CIR/IR/libc-fabs.cir new file mode 100644 index 000000000000..cfd5129b6350 --- /dev/null +++ b/clang/test/CIR/IR/libc-fabs.cir @@ -0,0 +1,9 @@ +// RUN: cir-opt %s + +!u32i = !cir.int +module { + cir.func @foo(%arg0: f64) -> f64 { + %0 = cir.fabs %arg0 : f64 + cir.return %0 : f64 + } +} diff --git a/clang/test/CIR/Lowering/libc.cir b/clang/test/CIR/Lowering/libc.cir index 74e384d08a74..70a066854d46 100644 --- a/clang/test/CIR/Lowering/libc.cir +++ b/clang/test/CIR/Lowering/libc.cir @@ -9,4 +9,10 @@ module { // CHECK: "llvm.intr.memcpy"(%{{.+}}, %{{.+}}, %{{.+}}) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i64) -> () cir.return } + + cir.func @shouldLowerLibcFAbsBuiltin(%arg0: f64) -> f64 { + %0 = cir.fabs %arg0 : f64 + // CHECK: %0 = llvm.intr.fabs(%arg0) : (f64) -> f64 + cir.return %0 : f64 + } } From b377f8d05b357b82e2887022563f00c2efc1c25f Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 13 Sep 2023 16:32:50 -0300 Subject: [PATCH 1197/2301] [CIR][IR] Bypass get_member verifier for incomplete types Temporary workaround until we patch the codegen for record types. --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 11 ++++++--- clang/test/CIR/IR/getmember.cir | 31 +++++++++++++++++++++++++ clang/test/CIR/IR/invalid.cir | 26 +++++++++++++++++++++ 3 files changed, 65 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/IR/getmember.cir diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a437d308c4da..8b5f325920f8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2475,12 +2475,17 @@ LogicalResult GetMemberOp::verify() { if (!recordTy) return emitError() << "expected pointer to a record type"; + // FIXME: currently we bypass typechecking of incomplete types due to errors + // in the codegen process. This should be removed once the codegen is fixed. + if (!recordTy.getBody()) + return mlir::success(); + if (recordTy.getMembers().size() <= getIndex()) return emitError() << "member index out of bounds"; - // FIXME(cir): Member type check is disabled for classes and incomplete types - // as the codegen for these still need to be patched. - if (!recordTy.isClass() && !recordTy.getBody() && + // FIXME(cir): member type check is disabled for classes as the codegen for + // these still need to be patched. + if (!recordTy.isClass() && recordTy.getMembers()[getIndex()] != getResultTy().getPointee()) return emitError() << "member type mismatch"; diff --git a/clang/test/CIR/IR/getmember.cir b/clang/test/CIR/IR/getmember.cir new file mode 100644 index 000000000000..932e4a5b29f5 --- /dev/null +++ b/clang/test/CIR/IR/getmember.cir @@ -0,0 +1,31 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +!u16i = !cir.int +!u32i = !cir.int + +!ty_22Class22 = !cir.struct +!ty_22Incomplete22 = !cir.struct +!ty_22Struct22 = !cir.struct + +module { + cir.func @shouldGetStructMember(%arg0 : !cir.ptr) { + // CHECK: cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr + %0 = cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr + cir.return + } + + // FIXME: remove bypass once codegen for CIR records is patched. + cir.func @shouldBypassMemberIndexCheckForIncompleteRecords(%arg0 : !cir.ptr) { + // CHECK: cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr + %0 = cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr + cir.return + } + + // FIXME: remove bypass once codegen for CIR class records is patched. + cir.func @shouldBypassMemberTypeCheckForClassRecords(%arg0 : !cir.ptr) { + // CHECK: cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr> + %0 = cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr> + cir.return + } +} diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 0ca97dbbcc27..cd5d709e57a4 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -522,3 +522,29 @@ module { cir.throw(%11 : !cir.ptr>) // expected-error {{'type_info' symbol attribute missing}} } } + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct = !cir.struct +module { + cir.func @memeber_index_out_of_bounds(%arg0 : !cir.ptr) { + // expected-error@+1 {{member index out of bounds}} + %0 = cir.get_member %arg0[2] {name = "test"} : !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct = !cir.struct +module { + cir.func @memeber_type_mismatch(%arg0 : !cir.ptr) { + // expected-error@+1 {{member type mismatch}} + %0 = cir.get_member %arg0[0] {name = "test"} : !cir.ptr -> !cir.ptr + cir.return + } +} From 9aebfa7ef4cc07145470c3d3c6ab3362cb426930 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Wed, 13 Sep 2023 16:46:03 -0700 Subject: [PATCH 1198/2301] [CIR][Codegen] VTT support for virtual class inheritance (#252) This patch brings up the basic support for C++ virtual inheritance. VTT (virtual table table) now can be laid out as expected for simple program with single virtual inheritance. RTTI support is on the way. This patch does not include LLVM lowering support. --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 11 ++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 22 ++-- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 25 +++- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 123 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenVTables.h | 12 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 29 ++++- clang/test/CIR/CodeGen/vbase.cpp | 19 +++ 8 files changed, 214 insertions(+), 31 deletions(-) create mode 100644 clang/test/CIR/CodeGen/vbase.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 4f1d45ac50b7..e1fa33a3f22f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -188,6 +188,11 @@ class CIRGenCXXABI { /// Emits the VTable definitions required for the given record type. virtual void emitVTableDefinitions(CIRGenVTables &CGVT, const CXXRecordDecl *RD) = 0; + + /// Emit any tables needed to implement virtual inheritance. For Itanium, + /// this emits virtual table tables. + virtual void emitVirtualInheritanceTables(const CXXRecordDecl *RD) = 0; + virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType Ty) = 0; virtual CatchTypeInfo @@ -280,6 +285,12 @@ class CIRGenCXXABI { // directly or require access through a thread wrapper function. virtual bool usesThreadWrapperFunction(const VarDecl *VD) const = 0; + /// Emit the code to initialize hidden members required to handle virtual + /// inheritance, if needed by the ABI. + virtual void + initializeHiddenVirtualInheritanceMembers(CIRGenFunction &CGF, + const CXXRecordDecl *RD) {} + /// Emit a single constructor/destructor with the gien type from a C++ /// constructor Decl. virtual void buildCXXStructor(clang::GlobalDecl GD) = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 8e59bb618e1c..9a550a61f794 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -40,9 +40,8 @@ bool CIRGenFunction::IsConstructorDelegationValid( // }; // ...although even this example could in principle be emitted as a delegation // since the address of the parameter doesn't escape. - if (Ctor->getParent()->getNumVBases()) { - llvm_unreachable("NYI"); - } + if (Ctor->getParent()->getNumVBases()) + return false; // We also disable the optimization for variadic functions because it's // impossible to "re-pass" varargs. @@ -236,7 +235,7 @@ static void buildMemberInitializer(CIRGenFunction &CGF, if (CGF.CurGD.getCtorType() == Ctor_Base) LHS = CGF.MakeNaturalAlignPointeeAddrLValue(ThisPtr, RecordTy); else - llvm_unreachable("NYI"); + LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); buildLValueForAnyFieldInitialization(CGF, MemberInit, LHS); @@ -524,9 +523,10 @@ Address CIRGenFunction::getAddressOfDirectBaseInCompleteClass( // TODO: for complete types, this should be possible with a GEP. Address V = This; if (!Offset.isZero()) { - // TODO(cir): probably create a new operation to account for - // down casting when the offset isn't zero. - llvm_unreachable("NYI"); + mlir::Value OffsetVal = builder.getSInt32(Offset.getQuantity(), loc); + mlir::Value VBaseThisPtr = builder.create( + loc, This.getPointer().getType(), This.getPointer(), OffsetVal); + V = Address(VBaseThisPtr, CXXABIThisAlignment); } V = builder.createElementBitCast(loc, V, ConvertType(Base)); return V; @@ -605,7 +605,11 @@ void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { if (!ConstructVBases) continue; - llvm_unreachable("NYI"); + if (CGM.getCodeGenOpts().StrictVTablePointers && + CGM.getCodeGenOpts().OptimizationLevel > 0 && + isInitializerOfDynamicClass(*B)) + llvm_unreachable("NYI"); + buildBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); } if (BaseCtorContinueBB) { @@ -699,7 +703,7 @@ void CIRGenFunction::initializeVTablePointers(mlir::Location loc, initializeVTablePointer(loc, Vptr); if (RD->getNumVBases()) - llvm_unreachable("NYI"); + CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD); } CIRGenFunction::VPtrsVector diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 56f29105da4f..287ed88872a8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -356,8 +356,10 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, break; case CXXConstructionKind::Delegating: llvm_unreachable("NYI"); + break; case CXXConstructionKind::VirtualBase: - llvm_unreachable("NYI"); + ForVirtualBase = true; + [[fallthrough]]; case CXXConstructionKind::NonVirtualBase: Type = Ctor_Base; break; diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 50fb494df371..ceb0b8e5efa8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -197,6 +197,7 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { const CXXRecordDecl *NearestVBase) override; void emitVTableDefinitions(CIRGenVTables &CGVT, const CXXRecordDecl *RD) override; + void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override; mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType Ty) override; bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, @@ -817,10 +818,10 @@ class CIRGenItaniumRTTIBuilder { /// to the Itanium C++ ABI, 2.9.5p6b. void BuildSIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *RD); - // /// Build an abi::__vmi_class_type_info, used for - // /// classes with bases that do not satisfy the abi::__si_class_type_info - // /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. - // void BuildVMIClassTypeInfo(const CXXRecordDecl *RD); + /// Build an abi::__vmi_class_type_info, used for + /// classes with bases that do not satisfy the abi::__si_class_type_info + /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. + void BuildVMIClassTypeInfo(const CXXRecordDecl *RD); // /// Build an abi::__pointer_type_info struct, used // /// for pointer types. @@ -1432,6 +1433,10 @@ void CIRGenItaniumRTTIBuilder::BuildSIClassTypeInfo(mlir::Location loc, Fields.push_back(BaseTypeInfo); } +void CIRGenItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { + // TODO: Implement this function. +} + mlir::Attribute CIRGenItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(mlir::Location loc, QualType Ty) { @@ -1556,8 +1561,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( if (CanUseSingleInheritance(RD)) { BuildSIClassTypeInfo(loc, RD); } else { - llvm_unreachable("NYI"); - // BuildVMIClassTypeInfo(RD); + BuildVMIClassTypeInfo(RD); } break; @@ -1726,6 +1730,13 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, llvm_unreachable("NYI"); } +void CIRGenItaniumCXXABI::emitVirtualInheritanceTables( + const CXXRecordDecl *RD) { + CIRGenVTables &VTables = CGM.getVTables(); + auto VTT = VTables.getAddrOfVTT(RD); + VTables.buildVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); +} + /// What sort of uniqueness rules should we use for the RTTI for the /// given type? CIRGenItaniumCXXABI::RTTIUniquenessKind @@ -1838,4 +1849,4 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, // Now throw the exception. builder.create(CGF.getLoc(E->getSourceRange()), exceptionPtr, typeInfo.getSymbol(), dtor); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index e49c0454734a..77f3fe11340d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -13,14 +13,17 @@ #include "CIRGenCXXABI.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "mlir/IR/Attributes.h" #include "clang/AST/Attr.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/RecordLayout.h" +#include "clang/AST/VTTBuilder.h" #include "clang/Basic/CodeGenOptions.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/CodeGen/ConstantInitBuilder.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Format.h" #include "llvm/Transforms/Utils/Cloning.h" #include @@ -151,7 +154,7 @@ void CIRGenVTables::GenerateClassData(const CXXRecordDecl *RD) { assert(!UnimplementedFeature::generateDebugInfo()); if (RD->getNumVBases()) - llvm_unreachable("NYI"); + CGM.getCXXABI().emitVirtualInheritanceTables(RD); CGM.getCXXABI().emitVTableDefinitions(*this, RD); } @@ -159,9 +162,9 @@ void CIRGenVTables::GenerateClassData(const CXXRecordDecl *RD) { static void AddPointerLayoutOffset(CIRGenModule &CGM, ConstantArrayBuilder &builder, CharUnits offset) { - assert(offset.getQuantity() == 0 && "NYI"); - builder.add(mlir::cir::ConstPtrAttr::get( - CGM.getBuilder().getContext(), CGM.getBuilder().getUInt8PtrTy(), 0)); + builder.add(mlir::cir::ConstPtrAttr::get(CGM.getBuilder().getContext(), + CGM.getBuilder().getUInt8PtrTy(), + offset.getQuantity())); } static void AddRelativeLayoutOffset(CIRGenModule &CGM, @@ -415,6 +418,118 @@ CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { llvm_unreachable("Invalid TemplateSpecializationKind!"); } +mlir::cir::GlobalOp +getAddrOfVTTVTable(CIRGenVTables &CGVT, CIRGenModule &CGM, + const CXXRecordDecl *MostDerivedClass, + const VTTVTable &vtable, + mlir::cir::GlobalLinkageKind linkage, + VTableLayout::AddressPointsMapTy &addressPoints) { + if (vtable.getBase() == MostDerivedClass) { + assert(vtable.getBaseOffset().isZero() && + "Most derived class vtable must have a zero offset!"); + // This is a regular vtable. + return CGM.getCXXABI().getAddrOfVTable(MostDerivedClass, CharUnits()); + } + + llvm_unreachable("generateConstructionVTable NYI"); +} + +mlir::cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *RD) +{ + assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT"); + + SmallString<256> OutName; + llvm::raw_svector_ostream Out(OutName); + cast(CGM.getCXXABI().getMangleContext()) + .mangleCXXVTT(RD, Out); + StringRef Name = OutName.str(); + + // This will also defer the definition of the VTT. + (void)CGM.getCXXABI().getAddrOfVTable(RD, CharUnits()); + + VTTBuilder Builder(CGM.getASTContext(), RD, /*GenerateDefinition=*/false); + + auto ArrayType = mlir::cir::ArrayType::get(CGM.getBuilder().getContext(), + CGM.getBuilder().getUInt8PtrTy(), + Builder.getVTTComponents().size()); + auto Align = + CGM.getDataLayout().getABITypeAlign(CGM.getBuilder().getUInt8PtrTy()); + auto VTT = CGM.createOrReplaceCXXRuntimeVariable( + CGM.getLoc(RD->getSourceRange()), Name, ArrayType, + mlir::cir::GlobalLinkageKind::ExternalLinkage, + CharUnits::fromQuantity(Align)); + CGM.setGVProperties(VTT, RD); + return VTT; +} + +/// Emit the definition of the given vtable. +void CIRGenVTables::buildVTTDefinition(mlir::cir::GlobalOp VTT, + mlir::cir::GlobalLinkageKind Linkage, + const CXXRecordDecl *RD) { + VTTBuilder Builder(CGM.getASTContext(), RD, /*GenerateDefinition=*/true); + + auto ArrayType = mlir::cir::ArrayType::get(CGM.getBuilder().getContext(), + CGM.getBuilder().getUInt8PtrTy(), + Builder.getVTTComponents().size()); + + SmallVector VTables; + SmallVector VTableAddressPoints; + for (const VTTVTable *i = Builder.getVTTVTables().begin(), + *e = Builder.getVTTVTables().end(); + i != e; ++i) { + VTableAddressPoints.push_back(VTableAddressPointsMapTy()); + VTables.push_back(getAddrOfVTTVTable(*this, CGM, RD, *i, Linkage, + VTableAddressPoints.back())); + } + + SmallVector VTTComponents; + for (const VTTComponent *i = Builder.getVTTComponents().begin(), + *e = Builder.getVTTComponents().end(); + i != e; ++i) { + const VTTVTable &VTTVT = Builder.getVTTVTables()[i->VTableIndex]; + mlir::cir::GlobalOp VTable = VTables[i->VTableIndex]; + VTableLayout::AddressPointLocation AddressPoint; + if (VTTVT.getBase() == RD) { + // Just get the address point for the regular vtable. + AddressPoint = + getItaniumVTableContext().getVTableLayout(RD).getAddressPoint( + i->VTableBase); + } else { + AddressPoint = VTableAddressPoints[i->VTableIndex].lookup(i->VTableBase); + assert(AddressPoint.AddressPointIndex != 0 && + "Did not find ctor vtable address point!"); + } + + mlir::Attribute Idxs[3] = { + CGM.getBuilder().getI32IntegerAttr(0), + CGM.getBuilder().getI32IntegerAttr(AddressPoint.VTableIndex), + CGM.getBuilder().getI32IntegerAttr(AddressPoint.AddressPointIndex), + }; + + auto Init = mlir::cir::GlobalViewAttr::get( + CGM.getBuilder().getUInt8PtrTy(), + mlir::FlatSymbolRefAttr::get(VTable.getSymNameAttr()), + mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Idxs)); + + VTTComponents.push_back(Init); + } + + auto Init = CGM.getBuilder().getConstArray( + mlir::ArrayAttr::get(CGM.getBuilder().getContext(), VTTComponents), + ArrayType); + + VTT.setInitialValueAttr(Init); + + // Set the correct linkage. + VTT.setLinkage(Linkage); + mlir::SymbolTable::setSymbolVisibility(VTT, + CIRGenModule::getMLIRVisibility(VTT)); + + if (CGM.supportsCOMDAT() && VTT.isWeakForLinker()) { + assert(!UnimplementedFeature::setComdat()); + } +} + void CIRGenVTables::buildThunks(GlobalDecl GD) { const CXXMethodDecl *MD = cast(GD.getDecl())->getCanonicalDecl(); diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index 754490674445..e92f60394270 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -138,13 +138,13 @@ class CIRGenVTables { // llvm::GlobalVariable::LinkageTypes Linkage, // VTableAddressPointsMapTy &AddressPoints); - // /// GetAddrOfVTT - Get the address of the VTT for the given record decl. - // llvm::GlobalVariable *GetAddrOfVTT(const CXXRecordDecl *RD); + /// Get the address of the VTT for the given record decl. + mlir::cir::GlobalOp getAddrOfVTT(const CXXRecordDecl *RD); - // /// EmitVTTDefinition - Emit the definition of the given vtable. - // void EmitVTTDefinition(llvm::GlobalVariable *VTT, - // llvm::GlobalVariable::LinkageTypes Linkage, - // const CXXRecordDecl *RD); + /// Emit the definition of the given vtable. + void buildVTTDefinition(mlir::cir::GlobalOp VTT, + mlir::cir::GlobalLinkageKind Linkage, + const CXXRecordDecl *RD); /// Emit the associated thunks for the given global decl. void buildThunks(GlobalDecl GD); diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index fa5ef6f1057a..3910315e3b04 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -410,7 +410,13 @@ void CIRRecordLowering::accumulateVBases() { const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); if (BaseDecl->isEmpty()) continue; - llvm_unreachable("NYI"); + // If the vbase is a primary virtual base of some base, then it doesn't + // get its own storage location but instead lives inside of that base. + if (astContext.isNearlyEmpty(BaseDecl) && + !hasOwnStorage(cxxRecordDecl, BaseDecl)) + continue; + ScissorOffset = std::min(ScissorOffset, + astRecordLayout.getVBaseClassOffset(BaseDecl)); } members.push_back(MemberInfo(ScissorOffset, MemberInfo::InfoKind::Scissor, mlir::Type{}, cxxRecordDecl)); @@ -418,7 +424,23 @@ void CIRRecordLowering::accumulateVBases() { const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); if (BaseDecl->isEmpty()) continue; - llvm_unreachable("NYI"); + CharUnits Offset = astRecordLayout.getVBaseClassOffset(BaseDecl); + // If the vbase is a primary virtual base of some base, then it doesn't + // get its own storage location but instead lives inside of that base. + if (isOverlappingVBaseABI() && astContext.isNearlyEmpty(BaseDecl) && + !hasOwnStorage(cxxRecordDecl, BaseDecl)) { + members.push_back( + MemberInfo(Offset, MemberInfo::InfoKind::VBase, nullptr, BaseDecl)); + continue; + } + // If we've got a vtordisp, add it as a storage type. + if (astRecordLayout.getVBaseOffsetsMap() + .find(BaseDecl) + ->second.hasVtorDisp()) + members.push_back( + StorageInfo(Offset - CharUnits::fromQuantity(4), getUIntNType(32))); + members.push_back(MemberInfo(Offset, MemberInfo::InfoKind::VBase, + getStorageType(BaseDecl), BaseDecl)); } } @@ -449,8 +471,7 @@ void CIRRecordLowering::fillOutputFields() { } else if (member.kind == MemberInfo::InfoKind::Base) { nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1; } else if (member.kind == MemberInfo::InfoKind::VBase) { - llvm_unreachable("NYI"); - // virtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1; + virtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1; } } } diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp new file mode 100644 index 000000000000..44623e73f8ae --- /dev/null +++ b/clang/test/CIR/CodeGen/vbase.cpp @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct A { + int a; +}; + +struct B: virtual A { + int b; +}; + +void ppp() { B b; } + + +// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr<12> : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr]> : !cir.array x 3>}> +// CHECK: cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 1> +// CHECK: cir.global "private" external @_ZTVN10__cxxabiv121__vmi_class_type_infoE +// CHECK: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array +// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr}> From 3e87489bf1fe14e0b3fa07d96c85d3a687d9181a Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 14 Sep 2023 20:12:53 +0300 Subject: [PATCH 1199/2301] [CIR][CIRGen] CIR generation for bitfields. Fixes #13 (#233) This PR introduces bitfelds support. This now works: ``` typedef struct { int a1 : 4; int a2 : 28; int a3 : 16; int a4 : 3; int a5 : 17; int a6 : 25; } A; void init(A* a) { a->a1 = 1; a->a2 = 321; a->a3 = 15; a->a4 = -2; a->a5 = -123; a->a6 = 1234; } void print(A* a) { printf("%d %d %d %d %d %d\n", a->a1, a->a2, a->a3, a->a4, a->a5, a->a6 ); } int main() { A a; init(&a); print(&a); return 0; } ``` the output is: `1 321 15 -2 -123 1234` --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 65 ++++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 229 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 138 +++++------ clang/lib/CIR/CodeGen/CIRGenFunction.h | 15 +- clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 10 + clang/lib/CIR/CodeGen/CIRGenValue.h | 34 +++ .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 8 +- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/bitfields.c | 83 +++++++ clang/test/CIR/CodeGen/bitfields.cpp | 65 +++++ 10 files changed, 563 insertions(+), 85 deletions(-) create mode 100644 clang/test/CIR/CodeGen/bitfields.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 572b85be4c2c..d7dea8f3ad5e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -460,6 +460,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return getConstInt( loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); } + mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, + const llvm::APInt &val) { + return create(loc, typ, + getAttr(typ, val)); + } mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { return create(loc, getBoolTy(), getCIRBoolAttr(state)); @@ -677,6 +682,65 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::UnaryOpKind::Not, value); } + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + const llvm::APInt &rhs) { + return create( + lhs.getLoc(), lhs.getType(), kind, lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); + } + + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + mlir::Value rhs) { + return create(lhs.getLoc(), lhs.getType(), kind, lhs, + rhs); + } + + mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, + bool isShiftLeft) { + return create( + lhs.getLoc(), lhs.getType(), lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft); + } + + mlir::Value createShift(mlir::Value lhs, unsigned bits, bool isShiftLeft) { + auto width = lhs.getType().dyn_cast().getWidth(); + auto shift = llvm::APInt(width, bits); + return createShift(lhs, shift, isShiftLeft); + } + + mlir::Value createShiftLeft(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, true); + } + + mlir::Value createShiftRight(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, false); + } + + mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, + unsigned bits) { + auto val = llvm::APInt::getLowBitsSet(size, bits); + auto typ = mlir::cir::IntType::get(getContext(), size, false); + return getConstAPInt(loc, typ, val); + } + + mlir::Value createAnd(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::And, val); + } + + mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::And, rhs); + } + + mlir::Value createOr(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::Or, val); + } + + mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// @@ -727,6 +791,5 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return createCast(mlir::cir::CastKind::bitcast, src, newTy); } }; - } // namespace cir #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 5265760ecbdc..4372fcff35ed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -21,6 +21,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/Basic/Builtins.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" @@ -128,6 +129,7 @@ static Address buildPointerWithAlignment(const Expr *E, if (PtrTy->getPointeeType()->isVoidType()) break; assert(!UnimplementedFeature::tbaa()); + LValueBaseInfo InnerBaseInfo; Address Addr = CGF.buildPointerWithAlignment( CE->getSubExpr(), &InnerBaseInfo, IsKnownNonNull); @@ -211,13 +213,78 @@ static Address buildPointerWithAlignment(const Expr *E, return Address(CGF.buildScalarExpr(E), Align); } +/// Helper method to check if the underlying ABI is AAPCS +static bool isAAPCS(const TargetInfo &TargetInfo) { + return TargetInfo.getABI().starts_with("aapcs"); +} + +Address CIRGenFunction::getAddrOfField(LValue base, const FieldDecl *field, + unsigned index) { + if (index == 0) + return base.getAddress(); + + auto loc = getLoc(field->getLocation()); + auto fieldType = convertType(field->getType()); + auto fieldPtr = + mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); + auto sea = getBuilder().createGetMember( + loc, fieldPtr, base.getPointer(), field->getName(), index); + + return Address(sea, CharUnits::One()); +} + +static bool useVolatileForBitField(const CIRGenModule &cgm, LValue base, + const CIRGenBitFieldInfo &info, + const FieldDecl *field) { + return isAAPCS(cgm.getTarget()) && cgm.getCodeGenOpts().AAPCSBitfieldWidth && + info.VolatileStorageSize != 0 && + field->getType() + .withCVRQualifiers(base.getVRQualifiers()) + .isVolatileQualified(); +} + +LValue CIRGenFunction::buildLValueForBitField(LValue base, + const FieldDecl *field) { + + LValueBaseInfo BaseInfo = base.getBaseInfo(); + const RecordDecl *rec = field->getParent(); + auto &layout = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); + auto &info = layout.getBitFieldInfo(field); + auto useVolatile = useVolatileForBitField(CGM, base, info, field); + unsigned Idx = layout.getCIRFieldNo(field); + + if (useVolatile || + (IsInPreservedAIRegion || + (getDebugInfo() && rec->hasAttr()))) { + llvm_unreachable("NYI"); + } + + Address Addr = getAddrOfField(base, field, Idx); + + const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize; + + // Get the access type. + mlir::Type FieldIntTy = builder.getUIntNTy(SS); + + auto loc = getLoc(field->getLocation()); + if (Addr.getElementType() != FieldIntTy) + Addr = builder.createElementBitCast(loc, Addr, FieldIntTy); + + QualType fieldType = + field->getType().withCVRQualifiers(base.getVRQualifiers()); + + assert(!UnimplementedFeature::tbaa() && "NYI TBAA for bit fields"); + LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); + return LValue::MakeBitfield(Addr, info, fieldType, FieldBaseInfo); +} + LValue CIRGenFunction::buildLValueForField(LValue base, const FieldDecl *field) { + LValueBaseInfo BaseInfo = base.getBaseInfo(); - if (field->isBitField()) { - llvm_unreachable("NYI"); - } + if (field->isBitField()) + return buildLValueForBitField(base, field); // Fields of may-alias structures are may-alais themselves. // FIXME: this hould get propagated down through anonymous structs and unions. @@ -516,12 +583,55 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, /// method emits the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { - assert(LV.isSimple() && "not implemented"); assert(!LV.getType()->isFunctionType()); assert(!(LV.getType()->isConstantMatrixType()) && "not implemented"); - // Everything needs a load. - return RValue::get(buildLoadOfScalar(LV, Loc)); + if (LV.isBitField()) + return buildLoadOfBitfieldLValue(LV, Loc); + + if (LV.isSimple()) + return RValue::get(buildLoadOfScalar(LV, Loc)); + llvm_unreachable("NYI"); +} + +RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, + SourceLocation Loc) { + const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo(); + + // Get the output type. + mlir::Type ResLTy = convertType(LV.getType()); + Address Ptr = LV.getBitFieldAddress(); + mlir::Value Val = builder.createLoad(getLoc(Loc), Ptr); + auto ValWidth = Val.getType().cast().getWidth(); + + bool UseVolatile = LV.isVolatileQualified() && + Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); + const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; + const unsigned StorageSize = + UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; + + if (Info.IsSigned) { + assert(static_cast(Offset + Info.Size) <= StorageSize); + + mlir::Type typ = builder.getSIntNTy(ValWidth); + Val = builder.createIntCast(Val, typ); + + unsigned HighBits = StorageSize - Offset - Info.Size; + if (HighBits) + Val = builder.createShiftLeft(Val, HighBits); + if (Offset + HighBits) + Val = builder.createShiftRight(Val, Offset + HighBits); + } else { + if (Offset) + Val = builder.createShiftRight(Val, Offset); + + if (static_cast(Offset) + Info.Size < StorageSize) + Val = builder.createAnd(Val, + llvm::APInt::getLowBitsSet(ValWidth, Info.Size)); + } + Val = builder.createIntCast(Val, ResLTy); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); + return RValue::get(Val); } void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { @@ -544,6 +654,81 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { buildStoreOfScalar(Src.getScalarVal(), Dst); } +void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result) { + const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo(); + mlir::Type ResLTy = getTypes().convertTypeForMem(Dst.getType()); + Address Ptr = Dst.getBitFieldAddress(); + + // Get the source value, truncated to the width of the bit-field. + mlir::Value SrcVal = Src.getScalarVal(); + + // Cast the source to the storage type and shift it into place. + SrcVal = builder.createIntCast(SrcVal, Ptr.getElementType()); + auto SrcWidth = SrcVal.getType().cast().getWidth(); + mlir::Value MaskedVal = SrcVal; + + const bool UseVolatile = + CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && + Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); + const unsigned StorageSize = + UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; + const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; + // See if there are other bits in the bitfield's storage we'll need to load + // and mask together with source before storing. + if (StorageSize != Info.Size) { + assert(StorageSize > Info.Size && "Invalid bitfield size."); + + mlir::Value Val = buildLoadOfScalar(Dst, Dst.getPointer().getLoc()); + + // Mask the source value as needed. + if (!hasBooleanRepresentation(Dst.getType())) + SrcVal = builder.createAnd( + SrcVal, llvm::APInt::getLowBitsSet(SrcWidth, Info.Size)); + + MaskedVal = SrcVal; + if (Offset) + SrcVal = builder.createShiftLeft(SrcVal, Offset); + + // Mask out the original value. + Val = builder.createAnd( + Val, ~llvm::APInt::getBitsSet(SrcWidth, Offset, Offset + Info.Size)); + + // Or together the unchanged values and the source value. + SrcVal = builder.createOr(Val, SrcVal); + + } else { + // According to the AACPS: + // When a volatile bit-field is written, and its container does not overlap + // with any non-bit-field member, its container must be read exactly once + // and written exactly once using the access width appropriate to the type + // of the container. The two accesses are not atomic. + llvm_unreachable("volatile bit-field is not implemented for the AACPS"); + } + + // Write the new value back out. + // TODO: constant matrix type, volatile, no init, non temporal, TBAA + buildStoreOfScalar(SrcVal, Ptr, Dst.isVolatileQualified(), Dst.getType(), + Dst.getBaseInfo(), false, false); + + // Return the new value of the bit-field. + mlir::Value ResultVal = MaskedVal; + ResultVal = builder.createIntCast(ResultVal, ResLTy); + + // Sign extend the value if needed. + if (Info.IsSigned) { + assert(Info.Size <= StorageSize); + unsigned HighBits = StorageSize - Info.Size; + + if (HighBits) { + ResultVal = builder.createShiftLeft(ResultVal, HighBits); + ResultVal = builder.createShiftRight(ResultVal, HighBits); + } + } + + Result = buildFromMemory(ResultVal, Dst.getType()); +} + static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, const VarDecl *VD) { QualType T = E->getType(); @@ -767,7 +952,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { LValue LV = buildLValue(E->getLHS()); SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; - buildStoreThroughLValue(RV, LV); + if (LV.isBitField()) { + mlir::Value result; + buildStoreThroughBitfieldLValue(RV, LV, result); + } else { + buildStoreThroughLValue(RV, LV); + } + assert(!getContext().getLangOpts().OpenMP && "last priv cond not implemented"); return LV; @@ -2203,6 +2394,13 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, SourceLocation Loc) { + return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), getLoc(Loc), lvalue.getBaseInfo(), + lvalue.isNontemporal()); +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, + mlir::Location Loc) { return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), lvalue.getType(), Loc, lvalue.getBaseInfo(), lvalue.isNontemporal()); @@ -2220,6 +2418,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal) { + return buildLoadOfScalar(Addr, Volatile, Ty, getLoc(Loc), BaseInfo, + isNontemporal); +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, + QualType Ty, mlir::Location Loc, + LValueBaseInfo BaseInfo, + bool isNontemporal) { // TODO(CIR): this has fallen out of sync with codegen // Atomic operations have to be done on integral types @@ -2229,15 +2435,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, } mlir::cir::LoadOp Load = builder.create( - getLoc(Loc), Addr.getElementType(), Addr.getPointer()); + Loc, Addr.getElementType(), Addr.getPointer()); if (isNontemporal) { llvm_unreachable("NYI"); } - - // TODO: TBAA - - // TODO: buildScalarRangeCheck + + assert(!UnimplementedFeature::tbaa() && "NYI"); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); return buildFromMemory(Load, Ty); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 5a48e44f61eb..f4a76958bcf2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1060,9 +1060,7 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, std::swap(pointerOperand, indexOperand); } - bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); - - auto &DL = CGF.CGM.getDataLayout(); + bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); // Some versions of glibc and gcc use idioms (particularly in their malloc // routines) that add a pointer-sized integer (known to be a pointer value) @@ -1863,7 +1861,7 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHS.isBitField()) { - llvm_unreachable("NYI"); + CGF.buildStoreThroughBitfieldLValue(RValue::get(RHS), LHS, RHS); } else { CGF.buildNullabilityCheck(LHS, RHS, E->getExprLoc()); CIRGenFunction::SourceLocRAIIObject loc{CGF, @@ -1964,25 +1962,27 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( auto condV = CGF.evaluateExprAsBool(condExpr); assert(!UnimplementedFeature::incrementProfileCounter()); - return builder.create( - loc, condV, /*thenBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto lhs = Visit(lhsExpr); - if (!lhs) { - lhs = builder.getNullValue(CGF.VoidTy, loc); - lhsIsVoid = true; - } - builder.create(loc, lhs); - }, - /*elseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto rhs = Visit(rhsExpr); - if (lhsIsVoid) { - assert(!rhs && "lhs and rhs types must match"); - rhs = builder.getNullValue(CGF.VoidTy, loc); - } - builder.create(loc, rhs); - }).getResult(); + return builder + .create( + loc, condV, /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto lhs = Visit(lhsExpr); + if (!lhs) { + lhs = builder.getNullValue(CGF.VoidTy, loc); + lhsIsVoid = true; + } + builder.create(loc, lhs); + }, + /*elseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto rhs = Visit(rhsExpr); + if (lhsIsVoid) { + assert(!rhs && "lhs and rhs types must match"); + rhs = builder.getNullValue(CGF.VoidTy, loc); + } + builder.create(loc, rhs); + }) + .getResult(); } mlir::Value condV = CGF.buildOpOnBoolExpr(condExpr, loc, lhsExpr, rhsExpr); @@ -2012,51 +2012,53 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( } }; - return builder.create( - loc, condV, /*trueBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - auto lhs = Visit(lhsExpr); - eval.end(CGF); - - if (lhs) { - yieldTy = lhs.getType(); - b.create(loc, lhs); - return; - } - // If LHS or RHS is a throw or void expression we need to patch arms - // as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - }, - /*falseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - auto rhs = Visit(rhsExpr); - eval.end(CGF); - - if (rhs) { - yieldTy = rhs.getType(); - b.create(loc, rhs); - } else { - // If LHS or RHS is a throw or void expression we need to patch arms - // as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - } - - patchVoidOrThrowSites(); - }).getResult(); + return builder + .create( + loc, condV, /*trueBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScopeContext lexScope{loc, + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto lhs = Visit(lhsExpr); + eval.end(CGF); + + if (lhs) { + yieldTy = lhs.getType(); + b.create(loc, lhs); + return; + } + // If LHS or RHS is a throw or void expression we need to patch arms + // as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + }, + /*falseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScopeContext lexScope{loc, + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto rhs = Visit(rhsExpr); + eval.end(CGF); + + if (rhs) { + yieldTy = rhs.getType(); + b.create(loc, rhs); + } else { + // If LHS or RHS is a throw or void expression we need to patch + // arms as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + } + + patchVoidOrThrowSites(); + }) + .getResult(); } mlir::Value CIRGenFunction::buildScalarPrePostIncDec(const UnaryOperator *E, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 819a99f81ec7..ea5bf59d92c8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -869,6 +869,12 @@ class CIRGenFunction : public CIRGenTypeCache { clang::SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal = false); + mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, + mlir::Location Loc, LValueBaseInfo BaseInfo, + bool isNontemporal = false); + + RValue buildLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); + /// Load a scalar value from an address, taking care to appropriately convert /// from the memory representation to CIR value representation. mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, @@ -883,6 +889,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// form the memory representation to the CIR value representation. The /// l-value must be a simple l-value. mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); + mlir::Value buildLoadOfScalar(LValue lvalue, mlir::Location Loc); Address buildLoadOfReference(LValue RefLVal, mlir::Location Loc, LValueBaseInfo *PointeeBaseInfo = nullptr); @@ -1237,6 +1244,9 @@ class CIRGenFunction : public CIRGenTypeCache { /// is 'Ty'. void buildStoreThroughLValue(RValue Src, LValue Dst); + void buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result); + mlir::cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is @@ -1514,7 +1524,8 @@ class CIRGenFunction : public CIRGenTypeCache { AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); - + LValue buildLValueForBitField(LValue base, const FieldDecl *field); + /// Like buildLValueForField, excpet that if the Field is a reference, this /// will return the address of the reference and not the address of the value /// stored in the reference. @@ -1543,6 +1554,8 @@ class CIRGenFunction : public CIRGenTypeCache { return it->second; } + Address getAddrOfField(LValue base, const clang::FieldDecl *field, unsigned index); + /// Given an opaque value expression, return its LValue mapping if it exists, /// otherwise create one. LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e); diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index b1ded0017d59..0a686181db61 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -187,6 +187,16 @@ class CIRGenRecordLayout { /// Check whether this struct can be C++ zero-initialized with a /// zeroinitializer. bool isZeroInitializable() const { return IsZeroInitializable; } + + /// Return the BitFieldInfo that corresponds to the field FD. + const CIRGenBitFieldInfo &getBitFieldInfo(const clang::FieldDecl *FD) const { + FD = FD->getCanonicalDecl(); + assert(FD->isBitField() && "Invalid call for non-bit-field decl!"); + llvm::DenseMap::const_iterator + it = BitFields.find(FD); + assert(it != BitFields.end() && "Unable to find bitfield info"); + return it->second; + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index f84c20c4b136..c6edeb4d4fe4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -15,6 +15,7 @@ #define LLVM_CLANG_LIB_CIR_CIRGENVALUE_H #include "Address.h" +#include "CIRGenRecordLayout.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" @@ -207,6 +208,7 @@ class LValue { mlir::Value V; mlir::Type ElementType; LValueBaseInfo BaseInfo; + const CIRGenBitFieldInfo *BitFieldInfo{0}; public: bool isSimple() const { return LVType == Simple; } @@ -298,6 +300,38 @@ class LValue { const clang::Qualifiers &getQuals() const { return Quals; } clang::Qualifiers &getQuals() { return Quals; } + + // bitfield lvalue + Address getBitFieldAddress() const { + return Address(getBitFieldPointer(), ElementType, getAlignment()); + } + + mlir::Value getBitFieldPointer() const { + assert(isBitField()); + return V; + } + + const CIRGenBitFieldInfo &getBitFieldInfo() const { + assert(isBitField()); + return *BitFieldInfo; + } + + /// Create a new object to represent a bit-field access. + /// + /// \param Addr - The base address of the bit-field sequence this + /// bit-field refers to. + /// \param Info - The information describing how to perform the bit-field + /// access. + static LValue MakeBitfield(Address Addr, const CIRGenBitFieldInfo &Info, + clang::QualType type, LValueBaseInfo BaseInfo) { + LValue R; + R.LVType = BitField; + R.V = Addr.getPointer(); + R.ElementType = Addr.getElementType(); + R.BitFieldInfo = &Info; + R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo); + return R; + } }; /// An aggregate value slot. diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 3910315e3b04..c756a9789030 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -136,7 +136,7 @@ struct CIRRecordLowering final { /// Wraps mlir::cir::IntType with some implicit arguments. mlir::Type getUIntNType(uint64_t NumBits) { - unsigned AlignedBits = llvm::alignTo(NumBits, astContext.getCharWidth()); + unsigned AlignedBits = llvm::PowerOf2Ceil(NumBits); return mlir::cir::IntType::get(&cirGenTypes.getMLIRContext(), AlignedBits, /*isSigned=*/false); } @@ -214,8 +214,8 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, cxxRecordDecl{llvm::dyn_cast(recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, dataLayout{cirGenTypes.getModule().getModule()}, - IsZeroInitializable(true), IsZeroInitializableAsBase(true), - isPacked{isPacked} {} + IsZeroInitializable(true), + IsZeroInitializableAsBase(true), isPacked{isPacked} {} void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, @@ -499,6 +499,8 @@ void CIRRecordLowering::accumulateBitFields( // with lower cost. auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord, uint64_t StartBitOffset) { + if (OffsetInRecord >= 64) // See IntType::verify + return true; if (!cirGenTypes.getModule().getCodeGenOpts().FineGrainedBitfieldAccesses) return false; llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index d39bb3c1b48d..5a857a2db39f 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -138,6 +138,7 @@ struct UnimplementedFeature { static bool exceptions() { return false; } static bool metaDataNode() { return false; } static bool isSEHTryScope() { return false; } + static bool emitScalarRangeCheck() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c new file mode 100644 index 000000000000..3be014e50ede --- /dev/null +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -0,0 +1,83 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +struct __long { + struct __attribute__((__packed__)) { + unsigned __is_long_ : 1; + unsigned __cap_ : sizeof(unsigned) * 8 - 1; + }; + unsigned __size_; + unsigned *__data_; +}; + +void m() { + struct __long l; +} + +// CHECK: !ty_22anon22 = !cir.struct +// CHECK: !ty_22__long22 = !cir.struct}> + +typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; +} S; // 65 bits in total, i.e. more than 64 + +// CHECK: cir.func {{.*@store_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i +// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i +// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i +// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i +// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr +void store_field() { + S s; + s.a = 3; +} + +// CHECK: cir.func {{.*@store_neg_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i +// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i +// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u32i, [[TMP9]] : !u32i) -> !u32i +// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i +// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u32i +// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u32i +// CHECK: cir.store [[TMP13]], [[TMP4]] : !u32i, cir.ptr +void store_neg_field() { + S s; + s.d = -1; +} + +// CHECK: cir.func {{.*@load_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u32i), !s32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i +// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s32i, [[TMP7]] : !s32i) -> !s32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i +// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s32i, [[TMP9]] : !s32i) -> !s32i +// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s32i), !s32i +// CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr +// CHECK: [[TMP12:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +int load_field(S* s) { + return s->d; +} diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 48eec3bd093b..27e24f30d582 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * struct __long { struct __attribute__((__packed__)) { @@ -16,3 +17,67 @@ void m() { // CHECK: !ty_22anon22 = !cir.struct // CHECK: !ty_22__long22 = !cir.struct}> + +struct S { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; +}; // 65 bits in total, i.e. more than 64 + +// CHECK: cir.func @_Z11store_field +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i +// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i +// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i +// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i +// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr +void store_field() { + S s; + s.a = 3; +} + +// CHECK: cir.func @_Z15store_neg_field +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i +// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i +// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u32i, [[TMP9]] : !u32i) -> !u32i +// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i +// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u32i +// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u32i +// CHECK: cir.store [[TMP13]], [[TMP4]] : !u32i, cir.ptr +void store_neg_field() { + S s; + s.d = -1; +} + +// CHECK: cir.func @_Z10load_field +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u32i), !s32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i +// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s32i, [[TMP7]] : !s32i) -> !s32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i +// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s32i, [[TMP9]] : !s32i) -> !s32i +// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s32i), !s32i +// CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr +// CHECK: [[TMP12:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +int load_field(S& s) { + return s.d; +} From fff2cad3a9babe599e05fadb92a103bd94cdb007 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 15 Sep 2023 13:53:55 -0700 Subject: [PATCH 1200/2301] Revert "[CIR][CIRGen] CIR generation for bitfields. Fixes #13 (#233)" Breaks ninja check-clang-cir This reverts commit 471e568d8c75ea9320e201aecbd608f9633c7a63. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 65 +---- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 229 +----------------- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 138 ++++++----- clang/lib/CIR/CodeGen/CIRGenFunction.h | 15 +- clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 10 - clang/lib/CIR/CodeGen/CIRGenValue.h | 34 --- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 8 +- .../CodeGen/UnimplementedFeatureGuarding.h | 1 - clang/test/CIR/CodeGen/bitfields.c | 83 ------- clang/test/CIR/CodeGen/bitfields.cpp | 65 ----- 10 files changed, 85 insertions(+), 563 deletions(-) delete mode 100644 clang/test/CIR/CodeGen/bitfields.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index d7dea8f3ad5e..572b85be4c2c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -460,11 +460,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return getConstInt( loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); } - mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, - const llvm::APInt &val) { - return create(loc, typ, - getAttr(typ, val)); - } mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { return create(loc, getBoolTy(), getCIRBoolAttr(state)); @@ -682,65 +677,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::UnaryOpKind::Not, value); } - mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, - const llvm::APInt &rhs) { - return create( - lhs.getLoc(), lhs.getType(), kind, lhs, - getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); - } - - mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, - mlir::Value rhs) { - return create(lhs.getLoc(), lhs.getType(), kind, lhs, - rhs); - } - - mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, - bool isShiftLeft) { - return create( - lhs.getLoc(), lhs.getType(), lhs, - getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft); - } - - mlir::Value createShift(mlir::Value lhs, unsigned bits, bool isShiftLeft) { - auto width = lhs.getType().dyn_cast().getWidth(); - auto shift = llvm::APInt(width, bits); - return createShift(lhs, shift, isShiftLeft); - } - - mlir::Value createShiftLeft(mlir::Value lhs, unsigned bits) { - return createShift(lhs, bits, true); - } - - mlir::Value createShiftRight(mlir::Value lhs, unsigned bits) { - return createShift(lhs, bits, false); - } - - mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, - unsigned bits) { - auto val = llvm::APInt::getLowBitsSet(size, bits); - auto typ = mlir::cir::IntType::get(getContext(), size, false); - return getConstAPInt(loc, typ, val); - } - - mlir::Value createAnd(mlir::Value lhs, llvm::APInt rhs) { - auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); - return createBinop(lhs, mlir::cir::BinOpKind::And, val); - } - - mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { - return createBinop(lhs, mlir::cir::BinOpKind::And, rhs); - } - - mlir::Value createOr(mlir::Value lhs, llvm::APInt rhs) { - auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); - return createBinop(lhs, mlir::cir::BinOpKind::Or, val); - } - - mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { - return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); - } - //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// @@ -791,5 +727,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return createCast(mlir::cir::CastKind::bitcast, src, newTy); } }; + } // namespace cir #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 4372fcff35ed..5265760ecbdc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -21,7 +21,6 @@ #include "clang/AST/GlobalDecl.h" #include "clang/Basic/Builtins.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" @@ -129,7 +128,6 @@ static Address buildPointerWithAlignment(const Expr *E, if (PtrTy->getPointeeType()->isVoidType()) break; assert(!UnimplementedFeature::tbaa()); - LValueBaseInfo InnerBaseInfo; Address Addr = CGF.buildPointerWithAlignment( CE->getSubExpr(), &InnerBaseInfo, IsKnownNonNull); @@ -213,78 +211,13 @@ static Address buildPointerWithAlignment(const Expr *E, return Address(CGF.buildScalarExpr(E), Align); } -/// Helper method to check if the underlying ABI is AAPCS -static bool isAAPCS(const TargetInfo &TargetInfo) { - return TargetInfo.getABI().starts_with("aapcs"); -} - -Address CIRGenFunction::getAddrOfField(LValue base, const FieldDecl *field, - unsigned index) { - if (index == 0) - return base.getAddress(); - - auto loc = getLoc(field->getLocation()); - auto fieldType = convertType(field->getType()); - auto fieldPtr = - mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); - auto sea = getBuilder().createGetMember( - loc, fieldPtr, base.getPointer(), field->getName(), index); - - return Address(sea, CharUnits::One()); -} - -static bool useVolatileForBitField(const CIRGenModule &cgm, LValue base, - const CIRGenBitFieldInfo &info, - const FieldDecl *field) { - return isAAPCS(cgm.getTarget()) && cgm.getCodeGenOpts().AAPCSBitfieldWidth && - info.VolatileStorageSize != 0 && - field->getType() - .withCVRQualifiers(base.getVRQualifiers()) - .isVolatileQualified(); -} - -LValue CIRGenFunction::buildLValueForBitField(LValue base, - const FieldDecl *field) { - - LValueBaseInfo BaseInfo = base.getBaseInfo(); - const RecordDecl *rec = field->getParent(); - auto &layout = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); - auto &info = layout.getBitFieldInfo(field); - auto useVolatile = useVolatileForBitField(CGM, base, info, field); - unsigned Idx = layout.getCIRFieldNo(field); - - if (useVolatile || - (IsInPreservedAIRegion || - (getDebugInfo() && rec->hasAttr()))) { - llvm_unreachable("NYI"); - } - - Address Addr = getAddrOfField(base, field, Idx); - - const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize; - - // Get the access type. - mlir::Type FieldIntTy = builder.getUIntNTy(SS); - - auto loc = getLoc(field->getLocation()); - if (Addr.getElementType() != FieldIntTy) - Addr = builder.createElementBitCast(loc, Addr, FieldIntTy); - - QualType fieldType = - field->getType().withCVRQualifiers(base.getVRQualifiers()); - - assert(!UnimplementedFeature::tbaa() && "NYI TBAA for bit fields"); - LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); - return LValue::MakeBitfield(Addr, info, fieldType, FieldBaseInfo); -} - LValue CIRGenFunction::buildLValueForField(LValue base, const FieldDecl *field) { - LValueBaseInfo BaseInfo = base.getBaseInfo(); - if (field->isBitField()) - return buildLValueForBitField(base, field); + if (field->isBitField()) { + llvm_unreachable("NYI"); + } // Fields of may-alias structures are may-alais themselves. // FIXME: this hould get propagated down through anonymous structs and unions. @@ -583,55 +516,12 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, /// method emits the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { + assert(LV.isSimple() && "not implemented"); assert(!LV.getType()->isFunctionType()); assert(!(LV.getType()->isConstantMatrixType()) && "not implemented"); - if (LV.isBitField()) - return buildLoadOfBitfieldLValue(LV, Loc); - - if (LV.isSimple()) - return RValue::get(buildLoadOfScalar(LV, Loc)); - llvm_unreachable("NYI"); -} - -RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, - SourceLocation Loc) { - const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo(); - - // Get the output type. - mlir::Type ResLTy = convertType(LV.getType()); - Address Ptr = LV.getBitFieldAddress(); - mlir::Value Val = builder.createLoad(getLoc(Loc), Ptr); - auto ValWidth = Val.getType().cast().getWidth(); - - bool UseVolatile = LV.isVolatileQualified() && - Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); - const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; - const unsigned StorageSize = - UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; - - if (Info.IsSigned) { - assert(static_cast(Offset + Info.Size) <= StorageSize); - - mlir::Type typ = builder.getSIntNTy(ValWidth); - Val = builder.createIntCast(Val, typ); - - unsigned HighBits = StorageSize - Offset - Info.Size; - if (HighBits) - Val = builder.createShiftLeft(Val, HighBits); - if (Offset + HighBits) - Val = builder.createShiftRight(Val, Offset + HighBits); - } else { - if (Offset) - Val = builder.createShiftRight(Val, Offset); - - if (static_cast(Offset) + Info.Size < StorageSize) - Val = builder.createAnd(Val, - llvm::APInt::getLowBitsSet(ValWidth, Info.Size)); - } - Val = builder.createIntCast(Val, ResLTy); - assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); - return RValue::get(Val); + // Everything needs a load. + return RValue::get(buildLoadOfScalar(LV, Loc)); } void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { @@ -654,81 +544,6 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { buildStoreOfScalar(Src.getScalarVal(), Dst); } -void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, - mlir::Value &Result) { - const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo(); - mlir::Type ResLTy = getTypes().convertTypeForMem(Dst.getType()); - Address Ptr = Dst.getBitFieldAddress(); - - // Get the source value, truncated to the width of the bit-field. - mlir::Value SrcVal = Src.getScalarVal(); - - // Cast the source to the storage type and shift it into place. - SrcVal = builder.createIntCast(SrcVal, Ptr.getElementType()); - auto SrcWidth = SrcVal.getType().cast().getWidth(); - mlir::Value MaskedVal = SrcVal; - - const bool UseVolatile = - CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && - Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); - const unsigned StorageSize = - UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; - const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; - // See if there are other bits in the bitfield's storage we'll need to load - // and mask together with source before storing. - if (StorageSize != Info.Size) { - assert(StorageSize > Info.Size && "Invalid bitfield size."); - - mlir::Value Val = buildLoadOfScalar(Dst, Dst.getPointer().getLoc()); - - // Mask the source value as needed. - if (!hasBooleanRepresentation(Dst.getType())) - SrcVal = builder.createAnd( - SrcVal, llvm::APInt::getLowBitsSet(SrcWidth, Info.Size)); - - MaskedVal = SrcVal; - if (Offset) - SrcVal = builder.createShiftLeft(SrcVal, Offset); - - // Mask out the original value. - Val = builder.createAnd( - Val, ~llvm::APInt::getBitsSet(SrcWidth, Offset, Offset + Info.Size)); - - // Or together the unchanged values and the source value. - SrcVal = builder.createOr(Val, SrcVal); - - } else { - // According to the AACPS: - // When a volatile bit-field is written, and its container does not overlap - // with any non-bit-field member, its container must be read exactly once - // and written exactly once using the access width appropriate to the type - // of the container. The two accesses are not atomic. - llvm_unreachable("volatile bit-field is not implemented for the AACPS"); - } - - // Write the new value back out. - // TODO: constant matrix type, volatile, no init, non temporal, TBAA - buildStoreOfScalar(SrcVal, Ptr, Dst.isVolatileQualified(), Dst.getType(), - Dst.getBaseInfo(), false, false); - - // Return the new value of the bit-field. - mlir::Value ResultVal = MaskedVal; - ResultVal = builder.createIntCast(ResultVal, ResLTy); - - // Sign extend the value if needed. - if (Info.IsSigned) { - assert(Info.Size <= StorageSize); - unsigned HighBits = StorageSize - Info.Size; - - if (HighBits) { - ResultVal = builder.createShiftLeft(ResultVal, HighBits); - ResultVal = builder.createShiftRight(ResultVal, HighBits); - } - } - - Result = buildFromMemory(ResultVal, Dst.getType()); -} - static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, const VarDecl *VD) { QualType T = E->getType(); @@ -952,13 +767,7 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { LValue LV = buildLValue(E->getLHS()); SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; - if (LV.isBitField()) { - mlir::Value result; - buildStoreThroughBitfieldLValue(RV, LV, result); - } else { - buildStoreThroughLValue(RV, LV); - } - + buildStoreThroughLValue(RV, LV); assert(!getContext().getLangOpts().OpenMP && "last priv cond not implemented"); return LV; @@ -2394,13 +2203,6 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, SourceLocation Loc) { - return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), getLoc(Loc), lvalue.getBaseInfo(), - lvalue.isNontemporal()); -} - -mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, - mlir::Location Loc) { return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), lvalue.getType(), Loc, lvalue.getBaseInfo(), lvalue.isNontemporal()); @@ -2418,14 +2220,6 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal) { - return buildLoadOfScalar(Addr, Volatile, Ty, getLoc(Loc), BaseInfo, - isNontemporal); -} - -mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, - QualType Ty, mlir::Location Loc, - LValueBaseInfo BaseInfo, - bool isNontemporal) { // TODO(CIR): this has fallen out of sync with codegen // Atomic operations have to be done on integral types @@ -2435,14 +2229,15 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, } mlir::cir::LoadOp Load = builder.create( - Loc, Addr.getElementType(), Addr.getPointer()); + getLoc(Loc), Addr.getElementType(), Addr.getPointer()); if (isNontemporal) { llvm_unreachable("NYI"); } - - assert(!UnimplementedFeature::tbaa() && "NYI"); - assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); + + // TODO: TBAA + + // TODO: buildScalarRangeCheck return buildFromMemory(Load, Ty); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index f4a76958bcf2..5a48e44f61eb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1060,7 +1060,9 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, std::swap(pointerOperand, indexOperand); } - bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); + bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); + + auto &DL = CGF.CGM.getDataLayout(); // Some versions of glibc and gcc use idioms (particularly in their malloc // routines) that add a pointer-sized integer (known to be a pointer value) @@ -1861,7 +1863,7 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHS.isBitField()) { - CGF.buildStoreThroughBitfieldLValue(RValue::get(RHS), LHS, RHS); + llvm_unreachable("NYI"); } else { CGF.buildNullabilityCheck(LHS, RHS, E->getExprLoc()); CIRGenFunction::SourceLocRAIIObject loc{CGF, @@ -1962,27 +1964,25 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( auto condV = CGF.evaluateExprAsBool(condExpr); assert(!UnimplementedFeature::incrementProfileCounter()); - return builder - .create( - loc, condV, /*thenBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto lhs = Visit(lhsExpr); - if (!lhs) { - lhs = builder.getNullValue(CGF.VoidTy, loc); - lhsIsVoid = true; - } - builder.create(loc, lhs); - }, - /*elseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto rhs = Visit(rhsExpr); - if (lhsIsVoid) { - assert(!rhs && "lhs and rhs types must match"); - rhs = builder.getNullValue(CGF.VoidTy, loc); - } - builder.create(loc, rhs); - }) - .getResult(); + return builder.create( + loc, condV, /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto lhs = Visit(lhsExpr); + if (!lhs) { + lhs = builder.getNullValue(CGF.VoidTy, loc); + lhsIsVoid = true; + } + builder.create(loc, lhs); + }, + /*elseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto rhs = Visit(rhsExpr); + if (lhsIsVoid) { + assert(!rhs && "lhs and rhs types must match"); + rhs = builder.getNullValue(CGF.VoidTy, loc); + } + builder.create(loc, rhs); + }).getResult(); } mlir::Value condV = CGF.buildOpOnBoolExpr(condExpr, loc, lhsExpr, rhsExpr); @@ -2012,53 +2012,51 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( } }; - return builder - .create( - loc, condV, /*trueBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - auto lhs = Visit(lhsExpr); - eval.end(CGF); - - if (lhs) { - yieldTy = lhs.getType(); - b.create(loc, lhs); - return; - } - // If LHS or RHS is a throw or void expression we need to patch arms - // as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - }, - /*falseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - auto rhs = Visit(rhsExpr); - eval.end(CGF); - - if (rhs) { - yieldTy = rhs.getType(); - b.create(loc, rhs); - } else { - // If LHS or RHS is a throw or void expression we need to patch - // arms as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - } - - patchVoidOrThrowSites(); - }) - .getResult(); + return builder.create( + loc, condV, /*trueBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScopeContext lexScope{loc, + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto lhs = Visit(lhsExpr); + eval.end(CGF); + + if (lhs) { + yieldTy = lhs.getType(); + b.create(loc, lhs); + return; + } + // If LHS or RHS is a throw or void expression we need to patch arms + // as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + }, + /*falseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScopeContext lexScope{loc, + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto rhs = Visit(rhsExpr); + eval.end(CGF); + + if (rhs) { + yieldTy = rhs.getType(); + b.create(loc, rhs); + } else { + // If LHS or RHS is a throw or void expression we need to patch arms + // as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + } + + patchVoidOrThrowSites(); + }).getResult(); } mlir::Value CIRGenFunction::buildScalarPrePostIncDec(const UnaryOperator *E, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index ea5bf59d92c8..819a99f81ec7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -869,12 +869,6 @@ class CIRGenFunction : public CIRGenTypeCache { clang::SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal = false); - mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, - mlir::Location Loc, LValueBaseInfo BaseInfo, - bool isNontemporal = false); - - RValue buildLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); - /// Load a scalar value from an address, taking care to appropriately convert /// from the memory representation to CIR value representation. mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, @@ -889,7 +883,6 @@ class CIRGenFunction : public CIRGenTypeCache { /// form the memory representation to the CIR value representation. The /// l-value must be a simple l-value. mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); - mlir::Value buildLoadOfScalar(LValue lvalue, mlir::Location Loc); Address buildLoadOfReference(LValue RefLVal, mlir::Location Loc, LValueBaseInfo *PointeeBaseInfo = nullptr); @@ -1244,9 +1237,6 @@ class CIRGenFunction : public CIRGenTypeCache { /// is 'Ty'. void buildStoreThroughLValue(RValue Src, LValue Dst); - void buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, - mlir::Value &Result); - mlir::cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is @@ -1524,8 +1514,7 @@ class CIRGenFunction : public CIRGenTypeCache { AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); - LValue buildLValueForBitField(LValue base, const FieldDecl *field); - + /// Like buildLValueForField, excpet that if the Field is a reference, this /// will return the address of the reference and not the address of the value /// stored in the reference. @@ -1554,8 +1543,6 @@ class CIRGenFunction : public CIRGenTypeCache { return it->second; } - Address getAddrOfField(LValue base, const clang::FieldDecl *field, unsigned index); - /// Given an opaque value expression, return its LValue mapping if it exists, /// otherwise create one. LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e); diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index 0a686181db61..b1ded0017d59 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -187,16 +187,6 @@ class CIRGenRecordLayout { /// Check whether this struct can be C++ zero-initialized with a /// zeroinitializer. bool isZeroInitializable() const { return IsZeroInitializable; } - - /// Return the BitFieldInfo that corresponds to the field FD. - const CIRGenBitFieldInfo &getBitFieldInfo(const clang::FieldDecl *FD) const { - FD = FD->getCanonicalDecl(); - assert(FD->isBitField() && "Invalid call for non-bit-field decl!"); - llvm::DenseMap::const_iterator - it = BitFields.find(FD); - assert(it != BitFields.end() && "Unable to find bitfield info"); - return it->second; - } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index c6edeb4d4fe4..f84c20c4b136 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -15,7 +15,6 @@ #define LLVM_CLANG_LIB_CIR_CIRGENVALUE_H #include "Address.h" -#include "CIRGenRecordLayout.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" @@ -208,7 +207,6 @@ class LValue { mlir::Value V; mlir::Type ElementType; LValueBaseInfo BaseInfo; - const CIRGenBitFieldInfo *BitFieldInfo{0}; public: bool isSimple() const { return LVType == Simple; } @@ -300,38 +298,6 @@ class LValue { const clang::Qualifiers &getQuals() const { return Quals; } clang::Qualifiers &getQuals() { return Quals; } - - // bitfield lvalue - Address getBitFieldAddress() const { - return Address(getBitFieldPointer(), ElementType, getAlignment()); - } - - mlir::Value getBitFieldPointer() const { - assert(isBitField()); - return V; - } - - const CIRGenBitFieldInfo &getBitFieldInfo() const { - assert(isBitField()); - return *BitFieldInfo; - } - - /// Create a new object to represent a bit-field access. - /// - /// \param Addr - The base address of the bit-field sequence this - /// bit-field refers to. - /// \param Info - The information describing how to perform the bit-field - /// access. - static LValue MakeBitfield(Address Addr, const CIRGenBitFieldInfo &Info, - clang::QualType type, LValueBaseInfo BaseInfo) { - LValue R; - R.LVType = BitField; - R.V = Addr.getPointer(); - R.ElementType = Addr.getElementType(); - R.BitFieldInfo = &Info; - R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo); - return R; - } }; /// An aggregate value slot. diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index c756a9789030..3910315e3b04 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -136,7 +136,7 @@ struct CIRRecordLowering final { /// Wraps mlir::cir::IntType with some implicit arguments. mlir::Type getUIntNType(uint64_t NumBits) { - unsigned AlignedBits = llvm::PowerOf2Ceil(NumBits); + unsigned AlignedBits = llvm::alignTo(NumBits, astContext.getCharWidth()); return mlir::cir::IntType::get(&cirGenTypes.getMLIRContext(), AlignedBits, /*isSigned=*/false); } @@ -214,8 +214,8 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, cxxRecordDecl{llvm::dyn_cast(recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, dataLayout{cirGenTypes.getModule().getModule()}, - IsZeroInitializable(true), - IsZeroInitializableAsBase(true), isPacked{isPacked} {} + IsZeroInitializable(true), IsZeroInitializableAsBase(true), + isPacked{isPacked} {} void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, @@ -499,8 +499,6 @@ void CIRRecordLowering::accumulateBitFields( // with lower cost. auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord, uint64_t StartBitOffset) { - if (OffsetInRecord >= 64) // See IntType::verify - return true; if (!cirGenTypes.getModule().getCodeGenOpts().FineGrainedBitfieldAccesses) return false; llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 5a857a2db39f..d39bb3c1b48d 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -138,7 +138,6 @@ struct UnimplementedFeature { static bool exceptions() { return false; } static bool metaDataNode() { return false; } static bool isSEHTryScope() { return false; } - static bool emitScalarRangeCheck() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c deleted file mode 100644 index 3be014e50ede..000000000000 --- a/clang/test/CIR/CodeGen/bitfields.c +++ /dev/null @@ -1,83 +0,0 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * - -struct __long { - struct __attribute__((__packed__)) { - unsigned __is_long_ : 1; - unsigned __cap_ : sizeof(unsigned) * 8 - 1; - }; - unsigned __size_; - unsigned *__data_; -}; - -void m() { - struct __long l; -} - -// CHECK: !ty_22anon22 = !cir.struct -// CHECK: !ty_22__long22 = !cir.struct}> - -typedef struct { - int a : 4; - int b : 27; - int c : 17; - int d : 2; - int e : 15; -} S; // 65 bits in total, i.e. more than 64 - -// CHECK: cir.func {{.*@store_field}} -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , -// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i -// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i -// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i -// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i -// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i -// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i -// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr -void store_field() { - S s; - s.a = 3; -} - -// CHECK: cir.func {{.*@store_neg_field}} -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , -// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i -// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i -// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i -// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i -// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i -// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u32i, [[TMP9]] : !u32i) -> !u32i -// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i -// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u32i -// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u32i -// CHECK: cir.store [[TMP13]], [[TMP4]] : !u32i, cir.ptr -void store_neg_field() { - S s; - s.d = -1; -} - -// CHECK: cir.func {{.*@load_field}} -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i -// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u32i), !s32i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i -// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s32i, [[TMP7]] : !s32i) -> !s32i -// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i -// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s32i, [[TMP9]] : !s32i) -> !s32i -// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s32i), !s32i -// CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr -// CHECK: [[TMP12:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i -int load_field(S* s) { - return s->d; -} diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 27e24f30d582..48eec3bd093b 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * struct __long { struct __attribute__((__packed__)) { @@ -17,67 +16,3 @@ void m() { // CHECK: !ty_22anon22 = !cir.struct // CHECK: !ty_22__long22 = !cir.struct}> - -struct S { - int a : 4; - int b : 27; - int c : 17; - int d : 2; - int e : 15; -}; // 65 bits in total, i.e. more than 64 - -// CHECK: cir.func @_Z11store_field -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr -// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i -// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i -// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i -// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i -// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i -// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i -// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr -void store_field() { - S s; - s.a = 3; -} - -// CHECK: cir.func @_Z15store_neg_field -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr -// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i -// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i -// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i -// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i -// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i -// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u32i, [[TMP9]] : !u32i) -> !u32i -// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i -// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u32i -// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u32i -// CHECK: cir.store [[TMP13]], [[TMP4]] : !u32i, cir.ptr -void store_neg_field() { - S s; - s.d = -1; -} - -// CHECK: cir.func @_Z10load_field -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i -// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u32i), !s32i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i -// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s32i, [[TMP7]] : !s32i) -> !s32i -// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i -// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s32i, [[TMP9]] : !s32i) -> !s32i -// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s32i), !s32i -// CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr -// CHECK: [[TMP12:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i -int load_field(S& s) { - return s.d; -} From 8c9380161d1c03518c674912f12bc4aa3fa79006 Mon Sep 17 00:00:00 2001 From: Henrich Lauko Date: Mon, 18 Sep 2023 20:14:04 +0200 Subject: [PATCH 1201/2301] [CIR] Make AST attributes accessible via interfaces. (#250) - Introduces `CIR/Interfaces/ASTAttrInterfaces` which model API of clang AST nodes, but allows to plugin custom attribute, making `CIR` dialect AST independent. - Extends hierarchy of `DeclAttr`s to model `Decl` attributes more faithfully. - Notably all `CIRASTAttr`s are now created uniformly using `makeAstDeclAttr` which builds corresponding Attribute based on `clang::Decl`. --- clang/include/clang/CIR/CMakeLists.txt | 6 +- .../include/clang/CIR/Dialect/CMakeLists.txt | 6 - clang/include/clang/CIR/Dialect/IR/CIRAttrs.h | 4 + .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 52 ++++- .../include/clang/CIR/Dialect/IR/CIRDialect.h | 2 + clang/include/clang/CIR/Dialect/IR/CIROps.td | 8 +- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 8 +- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 3 +- .../clang/CIR/Interfaces/ASTAttrInterfaces.h | 45 +++++ .../clang/CIR/Interfaces/ASTAttrInterfaces.td | 191 ++++++++++++++++++ .../clang/CIR/Interfaces/CMakeLists.txt | 15 ++ clang/lib/CIR/CMakeLists.txt | 1 + clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 16 +- clang/lib/CIR/CodeGen/CMakeLists.txt | 2 + clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 44 +++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 55 +---- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 + .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 4 +- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 95 ++++----- .../Dialect/Transforms/LoweringPrepare.cpp | 30 +-- clang/lib/CIR/FrontendAction/CMakeLists.txt | 4 + .../lib/CIR/Interfaces/ASTAttrInterfaces.cpp | 15 ++ clang/lib/CIR/Interfaces/CMakeLists.txt | 14 ++ .../CIR/Lowering/DirectToLLVM/CMakeLists.txt | 4 + .../CIR/Lowering/ThroughMLIR/CMakeLists.txt | 4 + clang/lib/FrontendTool/CMakeLists.txt | 1 + clang/test/CIR/CodeGen/bitfields.cpp | 2 +- clang/test/CIR/CodeGen/dtors.cpp | 2 +- clang/test/CIR/CodeGen/static.cpp | 8 +- clang/test/CIR/CodeGen/struct.cpp | 2 +- clang/test/CIR/CodeGen/union.cpp | 12 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- clang/test/CIR/IR/global.cir | 2 +- clang/test/CIR/IR/invalid.cir | 4 +- clang/test/CIR/IR/struct.cir | 2 +- clang/test/CIR/Lowering/array.cir | 8 +- clang/test/CIR/Lowering/globals.cir | 8 +- clang/test/CIR/Lowering/struct.cir | 8 +- clang/test/CIR/Lowering/unions.cir | 6 +- clang/test/CIR/Lowering/variadics.cir | 2 +- 41 files changed, 505 insertions(+), 197 deletions(-) create mode 100644 clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.h create mode 100644 clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td create mode 100644 clang/include/clang/CIR/Interfaces/CMakeLists.txt create mode 100644 clang/lib/CIR/Interfaces/ASTAttrInterfaces.cpp create mode 100644 clang/lib/CIR/Interfaces/CMakeLists.txt diff --git a/clang/include/clang/CIR/CMakeLists.txt b/clang/include/clang/CIR/CMakeLists.txt index f8d6f407a03d..2028af5232c2 100644 --- a/clang/include/clang/CIR/CMakeLists.txt +++ b/clang/include/clang/CIR/CMakeLists.txt @@ -1,6 +1,8 @@ +set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --src-root set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --includedir set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include) -include_directories(${MLIR_INCLUDE_DIR}) -include_directories(${MLIR_TABLEGEN_OUTPUT_DIR}) +include_directories(SYSTEM ${MLIR_INCLUDE_DIR}) +include_directories(SYSTEM ${MLIR_TABLEGEN_OUTPUT_DIR}) add_subdirectory(Dialect) +add_subdirectory(Interfaces) diff --git a/clang/include/clang/CIR/Dialect/CMakeLists.txt b/clang/include/clang/CIR/Dialect/CMakeLists.txt index 383bf5231f57..cd837615e82f 100644 --- a/clang/include/clang/CIR/Dialect/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/CMakeLists.txt @@ -1,9 +1,3 @@ -set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --src-root -set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --includedir -set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include) -include_directories(SYSTEM ${MLIR_INCLUDE_DIR}) -include_directories(SYSTEM ${MLIR_TABLEGEN_OUTPUT_DIR}) - add_custom_target(clang-cir-doc) # This replicates part of the add_mlir_doc cmake function from MLIR that cannot diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h index 3ab044a9f59c..5961f77629b5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -19,8 +19,12 @@ #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" +#include "llvm/ADT/SmallVector.h" + #include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" + //===----------------------------------------------------------------------===// // CIR Dialect Attrs //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 0de74011f18a..4ec2657bdb0e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -15,8 +15,11 @@ include "mlir/IR/BuiltinAttributeInterfaces.td" include "mlir/IR/EnumAttr.td" + include "clang/CIR/Dialect/IR/CIRDialect.td" +include "clang/CIR/Interfaces/ASTAttrInterfaces.td" + //===----------------------------------------------------------------------===// // CIR Attrs //===----------------------------------------------------------------------===// @@ -394,12 +397,55 @@ class ASTDecl traits = []> // Enable verifier. let genVerifyDecl = 1; + + let extraClassDefinition = [{ + ::mlir::Attribute $cppClass::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { + // We cannot really parse anything AST related at this point + // since we have no serialization/JSON story. + return $cppClass::get(parser.getContext(), nullptr); + } + + void $cppClass::print(::mlir::AsmPrinter &printer) const { + // Nothing to print besides the mnemonics. + } + + LogicalResult $cppClass::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + }] # clang_name # [{ decl) { + return success(); + } + }]; } -def ASTFunctionDeclAttr : ASTDecl<"FunctionDecl", "fndecl">; -def ASTVarDeclAttr : ASTDecl<"VarDecl", "vardecl">; -def ASTRecordDeclAttr : ASTDecl<"RecordDecl", "recdecl">; +def ASTDeclAttr : ASTDecl<"Decl", "decl", [ASTDeclInterface]>; + +def ASTFunctionDeclAttr : ASTDecl<"FunctionDecl", "function.decl", + [ASTFunctionDeclInterface]>; + +def ASTCXXMethodDeclAttr : ASTDecl<"CXXMethodDecl", "cxxmethod.decl", + [ASTCXXMethodDeclInterface]>; + +def ASTCXXConstructorDeclAttr : ASTDecl<"CXXConstructorDecl", + "cxxconstructor.decl", [ASTCXXConstructorDeclInterface]>; + +def ASTCXXConversionDeclAttr : ASTDecl<"CXXConversionDecl", + "cxxconversion.decl", [ASTCXXConversionDeclInterface]>; + +def ASTCXXDestructorDeclAttr : ASTDecl<"CXXDestructorDecl", + "cxxdestructor.decl", [ASTCXXDestructorDeclInterface]>; + +def ASTVarDeclAttr : ASTDecl<"VarDecl", "var.decl", + [ASTVarDeclInterface]>; + +def ASTTypeDeclAttr: ASTDecl<"TypeDecl", "type.decl", + [ASTTypeDeclInterface]>; + +def ASTTagDeclAttr : ASTDecl<"TagDecl", "tag.decl", + [ASTTagDeclInterface]>; +def ASTRecordDeclAttr : ASTDecl<"RecordDecl", "record.decl", + [ASTRecordDeclInterface]>; //===----------------------------------------------------------------------===// // ExtraFuncAttr diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index ef56711fed72..58ff4881f90f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -31,6 +31,8 @@ #include "clang/CIR/Dialect/IR/CIROpsStructs.h.inc" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" + namespace mlir { namespace OpTrait { diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7bf35dc1b35f..09f793403b2b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -18,6 +18,8 @@ include "clang/CIR/Dialect/IR/CIRDialect.td" include "clang/CIR/Dialect/IR/CIRTypes.td" include "clang/CIR/Dialect/IR/CIRAttrs.td" +include "clang/CIR/Interfaces/ASTAttrInterfaces.td" + include "mlir/Interfaces/CallInterfaces.td" include "mlir/Interfaces/ControlFlowInterfaces.td" include "mlir/Interfaces/FunctionInterfaces.td" @@ -300,7 +302,7 @@ def AllocaOp : CIR_Op<"alloca", [ StrAttr:$name, UnitAttr:$init, ConfinedAttr, [IntMinValue<0>]>:$alignment, - OptionalAttr:$ast + OptionalAttr:$ast ); let results = (outs Res:$initial_value, UnitAttr:$constant, OptionalAttr:$alignment, - OptionalAttr:$ast + OptionalAttr:$ast ); let regions = (region AnyRegion:$ctorRegion, AnyRegion:$dtorRegion); let assemblyFormat = [{ @@ -1596,7 +1598,7 @@ def FuncOp : CIR_Op<"func", [ OptionalAttr:$arg_attrs, OptionalAttr:$res_attrs, OptionalAttr:$aliasee, - OptionalAttr:$ast); + OptionalAttr:$ast); let regions = (region AnyRegion:$body); let skipDefaultBuilders = 1; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 87aea83b744e..1286225f04aa 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -17,16 +17,12 @@ #include "mlir/IR/Types.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" + //===----------------------------------------------------------------------===// // CIR Dialect Types //===----------------------------------------------------------------------===// -namespace mlir { -namespace cir { -class ASTRecordDeclAttr; -} // namespace cir -} // namespace mlir - #define GET_TYPEDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsTypes.h.inc" diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 88087f8915ad..ea0738d19245 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -14,6 +14,7 @@ #define MLIR_CIR_DIALECT_CIR_TYPES include "clang/CIR/Dialect/IR/CIRDialect.td" +include "clang/CIR/Interfaces/ASTAttrInterfaces.td" include "mlir/Interfaces/DataLayoutInterfaces.td" include "mlir/IR/AttrTypeBase.td" @@ -111,7 +112,7 @@ def CIR_StructType : CIR_Type<"Struct", "struct", "bool":$body, "bool":$packed, "mlir::cir::StructType::RecordKind":$kind, - "std::optional<::mlir::cir::ASTRecordDeclAttr>":$ast + "std::optional":$ast ); let hasCustomAssemblyFormat = 1; diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.h b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.h new file mode 100644 index 000000000000..e2f1e16eb511 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.h @@ -0,0 +1,45 @@ +//===- ASTAttrInterfaces.h - CIR AST Interfaces -----------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_INTERFACES_CIR_AST_ATTR_INTERFACES_H_ +#define MLIR_INTERFACES_CIR_AST_ATTR_INTERFACES_H_ + +#include "mlir/IR/Attributes.h" + +#include "clang/AST/Attr.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Mangle.h" + +namespace mlir { +namespace cir { + +mlir::Attribute makeFuncDeclAttr(const clang::Decl *decl, + mlir::MLIRContext *ctx); + +} // namespace cir +} // namespace mlir + +/// Include the generated interface declarations. +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h.inc" + +namespace mlir { +namespace cir { + +template bool hasAttr(ASTDeclInterface decl) { + if constexpr (std::is_same_v) + return decl.hasOwnerAttr(); + if constexpr (std::is_same_v) + return decl.hasPointerAttr(); + if constexpr (std::is_same_v) + return decl.hasInitPriorityAttr(); +} + +} // namespace cir +} // namespace mlir + +#endif // MLIR_INTERFACES_CIR_AST_ATAR_INTERFACES_H_ diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td new file mode 100644 index 000000000000..8aca1d9c8e63 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td @@ -0,0 +1,191 @@ +//===- ASTAttrInterfaces.td - CIR AST Interface Definitions -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_INTERFACES_AST_ATTR_INTERFACES +#define MLIR_CIR_INTERFACES_AST_ATTR_INTERFACES + +include "mlir/IR/OpBase.td" + +let cppNamespace = "::mlir::cir" in { + def ASTDeclInterface : AttrInterface<"ASTDeclInterface"> { + let methods = [ + InterfaceMethod<"", "bool", "hasOwnerAttr", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->template hasAttr(); + }] + >, + InterfaceMethod<"", "bool", "hasPointerAttr", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->template hasAttr(); + }] + >, + InterfaceMethod<"", "bool", "hasInitPriorityAttr", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->template hasAttr(); + }] + > + ]; + } + + def ASTNamedDeclInterface : AttrInterface<"ASTNamedDeclInterface", + [ASTDeclInterface]> { + let methods = [ + InterfaceMethod<"", "clang::DeclarationName", "getDeclName", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->getDeclName(); + }] + >, + InterfaceMethod<"", "llvm::StringRef", "getName", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->getName(); + }] + > + ]; + } + + def ASTValueDeclInterface : AttrInterface<"ASTValueDeclInterface", + [ASTNamedDeclInterface]>; + + def ASTDeclaratorDeclInterface : AttrInterface<"ASTDeclaratorDeclInterface", + [ASTValueDeclInterface]>; + + def ASTVarDeclInterface : AttrInterface<"ASTVarDeclInterface", + [ASTDeclaratorDeclInterface]> { + let methods = [ + InterfaceMethod<"", "void", "mangleDynamicInitializer", (ins "llvm::raw_ostream&":$Out), [{}], + /*defaultImplementation=*/ [{ + std::unique_ptr MangleCtx( + $_attr.getAstDecl()->getASTContext().createMangleContext()); + MangleCtx->mangleDynamicInitializer($_attr.getAstDecl(), Out); + }] + >, + InterfaceMethod<"", "clang::VarDecl::TLSKind", "getTLSKind", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->getTLSKind(); + }] + > + ]; + } + + def ASTFunctionDeclInterface : AttrInterface<"ASTFunctionDeclInterface", + [ASTDeclaratorDeclInterface]> { + let methods = [ + InterfaceMethod<"", "bool", "isOverloadedOperator", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->isOverloadedOperator(); + }] + >, + InterfaceMethod<"", "bool", "isStatic", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->isStatic(); + }] + > + ]; + } + + def ASTCXXMethodDeclInterface : AttrInterface<"ASTCXXMethodDeclInterface", + [ASTFunctionDeclInterface]> { + let methods = [ + InterfaceMethod<"", "bool", "isCopyAssignmentOperator", (ins), [{}], + /*defaultImplementation=*/ [{ + if (auto decl = dyn_cast($_attr.getAstDecl())) + return decl->isCopyAssignmentOperator(); + return false; + }] + >, + InterfaceMethod<"", "bool", "isMoveAssignmentOperator", (ins), [{}], + /*defaultImplementation=*/ [{ + if (auto decl = dyn_cast($_attr.getAstDecl())) + return decl->isMoveAssignmentOperator(); + return false; + }] + >, + InterfaceMethod<"", "bool", "isConst", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->isConst(); + }] + > + ]; + } + + def ASTCXXConstructorDeclInterface : AttrInterface<"ASTCXXConstructorDeclInterface", + [ASTCXXMethodDeclInterface]> { + let methods = [ + InterfaceMethod<"", "bool", "isDefaultConstructor", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->isDefaultConstructor(); + }] + >, + InterfaceMethod<"", "bool", "isCopyConstructor", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->isCopyConstructor(); + }] + > + ]; + } + + def ASTCXXConversionDeclInterface : AttrInterface<"ASTCXXConversionDeclInterface", + [ASTCXXMethodDeclInterface]>; + + def ASTCXXDestructorDeclInterface : AttrInterface<"ASTCXXDestructorDeclInterface", + [ASTCXXMethodDeclInterface]>; + + def ASTTypeDeclInterface : AttrInterface<"ASTTypeDeclInterface", + [ASTNamedDeclInterface]>; + + def ASTTagDeclInterface : AttrInterface<"ASTTagDeclInterface", + [ASTTypeDeclInterface]> { + let methods = [ + InterfaceMethod<"", "clang::TagTypeKind", "getTagKind", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAstDecl()->getTagKind(); + }] + > + ]; + } + + def ASTRecordDeclInterface : AttrInterface<"ASTRecordDeclInterface", + [ASTTagDeclInterface]> { + let methods = [ + InterfaceMethod<"", "bool", "isLambda", (ins), [{}], + /*defaultImplementation=*/ [{ + if (auto ast = clang::dyn_cast($_attr.getAstDecl())) + return ast->isLambda(); + return false; + }] + >, + InterfaceMethod<"", "bool", "hasPromiseType", (ins), [{}], + /*defaultImplementation=*/ [{ + if (!clang::isa($_attr.getAstDecl())) + return false; + for (const auto *sub : $_attr.getAstDecl()->decls()) { + if (auto subRec = clang::dyn_cast(sub)) { + if (subRec->getDeclName().isIdentifier() && + subRec->getName() == "promise_type") { + return true; + } + } + } + return false; + }] + > + ]; + } + + def AnyASTFunctionDeclAttr : Attr< + CPred<"::mlir::isa<::mlir::cir::ASTFunctionDeclInterface>($_self)">, + "AST Function attribute"> { + let storageType = "::mlir::Attribute"; + let returnType = "::mlir::Attribute"; + let convertFromStorage = "$_self"; + let constBuilderCall = "$0"; + } + +} // namespace mlir::cir + +#endif // MLIR_CIR_INTERFACES_AST_ATTR_INTERFACES diff --git a/clang/include/clang/CIR/Interfaces/CMakeLists.txt b/clang/include/clang/CIR/Interfaces/CMakeLists.txt new file mode 100644 index 000000000000..6925b69a2c97 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CMakeLists.txt @@ -0,0 +1,15 @@ +# This replicates part of the add_mlir_interface cmake function from MLIR that +# cannot be used here. This happens because it expects to be run inside MLIR +# directory which is not the case for CIR (and also FIR, both have similar +# workarounds). + +# Declare a dialect in the include directory +function(add_clang_mlir_attr_interface interface) + set(LLVM_TARGET_DEFINITIONS ${interface}.td) + mlir_tablegen(${interface}.h.inc -gen-attr-interface-decls) + mlir_tablegen(${interface}.cpp.inc -gen-attr-interface-defs) + add_public_tablegen_target(MLIRCIR${interface}IncGen) + add_dependencies(mlir-generic-headers MLIRCIR${interface}IncGen) +endfunction() + +add_clang_mlir_attr_interface(ASTAttrInterfaces) diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 41e07837d21d..093420b4fee3 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -5,3 +5,4 @@ add_subdirectory(Dialect) add_subdirectory(CodeGen) add_subdirectory(FrontendAction) add_subdirectory(Lowering) +add_subdirectory(Interfaces) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 5265760ecbdc..a884c9144637 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -134,8 +134,8 @@ static Address buildPointerWithAlignment(const Expr *E, if (BaseInfo) *BaseInfo = InnerBaseInfo; - if (isa(CE)) { - assert(!UnimplementedFeature::tbaa()); + if (isa(CE)) { + assert(!UnimplementedFeature::tbaa()); LValueBaseInfo TargetTypeBaseInfo; CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index e468c5e5fc58..6485ddde51ae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -196,7 +196,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, CIRGenModule::~CIRGenModule() {} -bool CIRGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor, bool ExcludeDtor) { +bool CIRGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor, + bool ExcludeDtor) { if (!Ty.isConstant(astCtx) && !Ty->isReferenceType()) return false; @@ -713,7 +714,7 @@ CIRGenModule::getAddrOfGlobalVarAttr(const VarDecl *D, mlir::Type Ty, return builder.getGlobalViewAttr(builder.getPointerTo(Ty), globalOp); } -mlir::Operation* CIRGenModule::getWeakRefReference(const ValueDecl *VD) { +mlir::Operation *CIRGenModule::getWeakRefReference(const ValueDecl *VD) { const AliasAttr *AA = VD->getAttr(); assert(AA && "No alias?"); @@ -728,8 +729,8 @@ mlir::Operation* CIRGenModule::getWeakRefReference(const ValueDecl *VD) { mlir::Type DeclTy = getTypes().convertTypeForMem(VD->getType()); if (DeclTy.isa()) { auto F = GetOrCreateCIRFunction(AA->getAliasee(), DeclTy, - GlobalDecl(cast(VD)), - /*ForVtable=*/false); + GlobalDecl(cast(VD)), + /*ForVtable=*/false); F.setLinkage(mlir::cir::GlobalLinkageKind::ExternalWeakLinkage); WeakRefReferences.insert(F); return F; @@ -1807,7 +1808,7 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, f = builder.create(loc, name, Ty); if (FD) - f.setAstAttr(builder.getAttr(FD)); + f.setAstAttr(makeFuncDeclAttr(FD, builder.getContext())); if (FD && !FD->hasPrototype()) f.setNoProtoAttr(builder.getUnitAttr()); @@ -1849,7 +1850,7 @@ mlir::Location CIRGenModule::getLocForFunction(const clang::FunctionDecl *FD) { } void CIRGenModule::setExtraAttributesForFunc(FuncOp f, - const clang::FunctionDecl *FD) { + const clang::FunctionDecl *FD) { mlir::NamedAttrList attrs; if (!FD) { @@ -1924,8 +1925,7 @@ void CIRGenModule::setExtraAttributesForFunc(FuncOp f, } f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), - attrs.getDictionary(builder.getContext()))); + builder.getContext(), attrs.getDictionary(builder.getContext()))); } /// If the specified mangled name is not in the module, diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 7c0474aee006..a379ed464316 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -38,6 +38,7 @@ add_clang_library(clangCIR DEPENDS MLIRCIR MLIRCIROpsIncGen + MLIRCIRASTAttrInterfacesIncGen ${dialect_libs} LINK_LIBS @@ -47,6 +48,7 @@ add_clang_library(clangCIR ${dialect_libs} MLIRCIR MLIRCIRTransforms + MLIRCIRASTAttrInterfaces MLIRAffineToStandard MLIRAnalysis MLIRDLTIDialect diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 64c68b5f8ce1..8cbdefa788e5 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -30,11 +30,11 @@ // ClangIR holds back AST references when available. #include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" -static void printStructMembers(mlir::AsmPrinter &p, - mlir::ArrayAttr members); +static void printStructMembers(mlir::AsmPrinter &p, mlir::ArrayAttr members); static mlir::ParseResult parseStructMembers(::mlir::AsmParser &parser, - mlir::ArrayAttr &members); + mlir::ArrayAttr &members); #define GET_ATTRDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" @@ -42,6 +42,40 @@ static mlir::ParseResult parseStructMembers(::mlir::AsmParser &parser, using namespace mlir; using namespace mlir::cir; +//===----------------------------------------------------------------------===// +// CIR AST Attr helpers +//===----------------------------------------------------------------------===// + +namespace mlir { +namespace cir { + +mlir::Attribute makeFuncDeclAttr(const clang::Decl *decl, + mlir::MLIRContext *ctx) { + return llvm::TypeSwitch(decl) + .Case([ctx](const clang::CXXConstructorDecl *ast) { + return ASTCXXConstructorDeclAttr::get(ctx, ast); + }) + .Case([ctx](const clang::CXXConversionDecl *ast) { + return ASTCXXConversionDeclAttr::get(ctx, ast); + }) + .Case([ctx](const clang::CXXDestructorDecl *ast) { + return ASTCXXDestructorDeclAttr::get(ctx, ast); + }) + .Case([ctx](const clang::CXXMethodDecl *ast) { + return ASTCXXMethodDeclAttr::get(ctx, ast); + }) + .Case([ctx](const clang::FunctionDecl *ast) { + return ASTFunctionDeclAttr::get(ctx, ast); + }) + .Default([](auto) { + llvm_unreachable("unexpected Decl kind"); + return mlir::Attribute(); + }); +} + +} // namespace cir +} // namespace mlir + //===----------------------------------------------------------------------===// // General CIR parsing / printing //===----------------------------------------------------------------------===// @@ -65,14 +99,14 @@ void CIRDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const { } static void printStructMembers(mlir::AsmPrinter &printer, - mlir::ArrayAttr members) { + mlir::ArrayAttr members) { printer << '{'; llvm::interleaveComma(members, printer); printer << '}'; } static ParseResult parseStructMembers(mlir::AsmParser &parser, - mlir::ArrayAttr &members) { + mlir::ArrayAttr &members) { SmallVector elts; auto delimiter = AsmParser::Delimiter::Braces; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 8b5f325920f8..5763971fa49e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -42,6 +42,7 @@ using namespace mlir::cir; #include "clang/CIR/Dialect/IR/CIROpsStructs.cpp.inc" #include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" //===----------------------------------------------------------------------===// // CIR Dialect @@ -2320,60 +2321,6 @@ void SignedOverflowBehaviorAttr::print(::mlir::AsmPrinter &printer) const { printer << ">"; } -::mlir::Attribute ASTFunctionDeclAttr::parse(::mlir::AsmParser &parser, - ::mlir::Type type) { - // We cannot really parse anything AST related at this point since we have no - // serialization/JSON story. Even if the attr is parsed, it just holds nullptr - // instead of the AST node. - return get(parser.getContext(), nullptr); -} - -void ASTFunctionDeclAttr::print(::mlir::AsmPrinter &printer) const { - // Nothing to print besides the mnemonics. -} - -LogicalResult ASTFunctionDeclAttr::verify( - ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - const ::clang::FunctionDecl *decl) { - return success(); -} - -::mlir::Attribute ASTVarDeclAttr::parse(::mlir::AsmParser &parser, - ::mlir::Type type) { - // We cannot really parse anything AST related at this point since we have no - // serialization/JSON story. Even if the attr is parsed, it just holds nullptr - // instead of the AST node. - return get(parser.getContext(), nullptr); -} - -void ASTVarDeclAttr::print(::mlir::AsmPrinter &printer) const { - // Nothing to print besides the mnemonics. -} - -LogicalResult ASTVarDeclAttr::verify( - ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - const ::clang::VarDecl *decl) { - return success(); -} - -::mlir::Attribute ASTRecordDeclAttr::parse(::mlir::AsmParser &parser, - ::mlir::Type type) { - // We cannot really parse anything AST related at this point since we have no - // serialization/JSON story. Even if the attr is parsed, it just holds nullptr - // instead of the AST node. - return get(parser.getContext(), nullptr); -} - -void ASTRecordDeclAttr::print(::mlir::AsmPrinter &printer) const { - // Nothing to print besides the mnemonics. -} - -LogicalResult ASTRecordDeclAttr::verify( - ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - const ::clang::RecordDecl *decl) { - return success(); -} - LogicalResult TypeInfoAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::Type type, ::mlir::ArrayAttr typeinfoData) { diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 703718d3d2c7..894a5a278beb 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -9,6 +9,7 @@ add_clang_library(MLIRCIR MLIRCIROpsIncGen MLIRCIREnumsGen MLIRSymbolInterfacesIncGen + MLIRCIRASTAttrInterfacesIncGen LINK_LIBS PUBLIC MLIRIR diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 880542f6d889..82952f42a2d2 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -13,7 +13,9 @@ add_clang_library(MLIRCIRTransforms MLIRAnalysis MLIRIR - MLIRCIR MLIRPass MLIRTransformUtils + + MLIRCIR + MLIRCIRASTAttrInterfaces ) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index c6313db69d11..d3b074fe16ed 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -75,13 +75,13 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkLambdaCaptureStore(StoreOp storeOp); void trackCallToCoroutine(CallOp callOp); - void checkCtor(CallOp callOp, const clang::CXXConstructorDecl *ctor); - void checkMoveAssignment(CallOp callOp, const clang::CXXMethodDecl *m); - void checkCopyAssignment(CallOp callOp, const clang::CXXMethodDecl *m); + void checkCtor(CallOp callOp, ASTCXXConstructorDeclInterface ctor); + void checkMoveAssignment(CallOp callOp, ASTCXXMethodDeclInterface m); + void checkCopyAssignment(CallOp callOp, ASTCXXMethodDeclInterface m); void checkNonConstUseOfOwner(mlir::Value ownerAddr, mlir::Location loc); - void checkOperators(CallOp callOp, const clang::CXXMethodDecl *m); + void checkOperators(CallOp callOp, ASTCXXMethodDeclInterface m); void checkOtherMethodsAndFunctions(CallOp callOp, - const clang::CXXMethodDecl *m); + ASTCXXMethodDeclInterface m); void checkForOwnerAndPointerArguments(CallOp callOp, unsigned firstArgIdx); // TODO: merge both methods below and pass down an enum. @@ -103,12 +103,9 @@ struct LifetimeCheckPass : public LifetimeCheckBase { std::optional currFunc; // Common helpers. - bool isCtorInitPointerFromOwner(CallOp callOp, - const clang::CXXConstructorDecl *ctor); - mlir::Value getNonConstUseOfOwner(CallOp callOp, - const clang::CXXMethodDecl *m); - bool isOwnerOrPointerClassMethod(CallOp callOp, - const clang::CXXMethodDecl *m); + bool isCtorInitPointerFromOwner(CallOp callOp); + mlir::Value getNonConstUseOfOwner(CallOp callOp, ASTCXXMethodDeclInterface m); + bool isOwnerOrPointerClassMethod(CallOp callOp, ASTCXXMethodDeclInterface m); // Diagnostic helpers. void emitInvalidHistory(mlir::InFlightDiagnostic &D, mlir::Value histKey, @@ -890,11 +887,7 @@ void LifetimeCheckPass::checkIf(IfOp ifOp) { template bool isStructAndHasAttr(mlir::Type ty) { if (!ty.isa()) return false; - auto sTy = ty.cast(); - const auto *recordDecl = sTy.getAst()->getAstDecl(); - if (recordDecl->hasAttr()) - return true; - return false; + return hasAttr(*mlir::cast(ty).getAst()); } static bool isOwnerType(mlir::Type ty) { @@ -1475,14 +1468,14 @@ static FuncOp getCalleeFromSymbol(ModuleOp mod, StringRef name) { return dyn_cast(global); } -static const clang::CXXMethodDecl *getMethod(ModuleOp mod, CallOp callOp) { +static const ASTCXXMethodDeclInterface getMethod(ModuleOp mod, CallOp callOp) { if (!callOp.getCallee()) return nullptr; StringRef name = *callOp.getCallee(); auto method = getCalleeFromSymbol(mod, name); if (!method || method.getBuiltin()) return nullptr; - return dyn_cast(method.getAstAttr().getAstDecl()); + return dyn_cast(method.getAstAttr()); } mlir::Value LifetimeCheckPass::getThisParamPointerCategory(CallOp callOp) { @@ -1510,7 +1503,7 @@ mlir::Value LifetimeCheckPass::getThisParamOwnerCategory(CallOp callOp) { } void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, - const clang::CXXMethodDecl *m) { + ASTCXXMethodDeclInterface m) { // MyPointer::operator=(MyPointer&&)(%dst, %src) // or // MyOwner::operator=(MyOwner&&)(%dst, %src) @@ -1543,7 +1536,7 @@ void LifetimeCheckPass::checkMoveAssignment(CallOp callOp, } void LifetimeCheckPass::checkCopyAssignment(CallOp callOp, - const clang::CXXMethodDecl *m) { + ASTCXXMethodDeclInterface m) { // MyIntOwner::operator=(MyIntOwner&)(%dst, %src) auto dst = getThisParamOwnerCategory(callOp); auto src = callOp.getArgOperand(1); @@ -1566,8 +1559,7 @@ void LifetimeCheckPass::checkCopyAssignment(CallOp callOp, // Example: // MyIntPointer::MyIntPointer(MyIntOwner const&)(%5, %4) // -bool LifetimeCheckPass::isCtorInitPointerFromOwner( - CallOp callOp, const clang::CXXConstructorDecl *ctor) { +bool LifetimeCheckPass::isCtorInitPointerFromOwner(CallOp callOp) { if (callOp.getNumArgOperands() < 2) return false; @@ -1582,7 +1574,7 @@ bool LifetimeCheckPass::isCtorInitPointerFromOwner( } void LifetimeCheckPass::checkCtor(CallOp callOp, - const clang::CXXConstructorDecl *ctor) { + ASTCXXConstructorDeclInterface ctor) { // TODO: zero init // 2.4.2 if the initialization is default initialization or zero // initialization, example: @@ -1591,7 +1583,7 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, // string_view p; // // both results in pset(p) == {null} - if (ctor->isDefaultConstructor()) { + if (ctor.isDefaultConstructor()) { // First argument passed is always the alloca for the 'this' ptr. // Currently two possible actions: @@ -1615,11 +1607,11 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, } // User defined copy ctor calls ... - if (ctor->isCopyConstructor()) { + if (ctor.isCopyConstructor()) { llvm_unreachable("NYI"); } - if (isCtorInitPointerFromOwner(callOp, ctor)) { + if (isCtorInitPointerFromOwner(callOp)) { auto addr = getThisParamPointerCategory(callOp); assert(addr && "expected pointer category"); auto owner = callOp.getArgOperand(1); @@ -1630,11 +1622,11 @@ void LifetimeCheckPass::checkCtor(CallOp callOp, } void LifetimeCheckPass::checkOperators(CallOp callOp, - const clang::CXXMethodDecl *m) { + ASTCXXMethodDeclInterface m) { auto addr = getThisParamOwnerCategory(callOp); if (addr) { // const access to the owner is fine. - if (m->isConst()) + if (m.isConst()) return; // TODO: this is a place where we can hook in some idiom recocgnition // so we don't need to use actual source code annotation to make assumptions @@ -1659,8 +1651,8 @@ void LifetimeCheckPass::checkOperators(CallOp callOp, mlir::Value LifetimeCheckPass::getNonConstUseOfOwner(CallOp callOp, - const clang::CXXMethodDecl *m) { - if (m->isConst()) + ASTCXXMethodDeclInterface m) { + if (m.isConst()) return {}; return getThisParamOwnerCategory(callOp); } @@ -1731,7 +1723,7 @@ void LifetimeCheckPass::checkForOwnerAndPointerArguments(CallOp callOp, } void LifetimeCheckPass::checkOtherMethodsAndFunctions( - CallOp callOp, const clang::CXXMethodDecl *m) { + CallOp callOp, ASTCXXMethodDeclInterface m) { unsigned firstArgIdx = 0; // Looks at a method 'this' pointer: @@ -1744,9 +1736,9 @@ void LifetimeCheckPass::checkOtherMethodsAndFunctions( } bool LifetimeCheckPass::isOwnerOrPointerClassMethod( - CallOp callOp, const clang::CXXMethodDecl *m) { + CallOp callOp, ASTCXXMethodDeclInterface m) { // For the sake of analysis, these behave like regular functions - if (!m || m->isStatic()) + if (!m || m.isStatic()) return false; // Check the object for owner/pointer by looking at the 'this' pointer. return getThisParamPointerCategory(callOp) || @@ -1761,8 +1753,7 @@ bool LifetimeCheckPass::isLambdaType(mlir::Type ty) { auto taskTy = ty.dyn_cast(); if (!taskTy) return false; - auto recordDecl = taskTy.getAst()->getAstDecl(); - if (recordDecl->isLambda()) + if (taskTy.getAst()->isLambda()) IsLambdaTyCache[ty] = true; return IsLambdaTyCache[ty]; @@ -1773,25 +1764,15 @@ bool LifetimeCheckPass::isTaskType(mlir::Value taskVal) { if (IsTaskTyCache.count(ty)) return IsTaskTyCache[ty]; - IsTaskTyCache[ty] = false; - auto taskTy = taskVal.getType().dyn_cast(); - if (!taskTy) - return false; - auto recordDecl = taskTy.getAst()->getAstDecl(); - auto *spec = dyn_cast(recordDecl); - if (!spec) - return false; - - for (auto *sub : spec->decls()) { - auto *subRec = dyn_cast(sub); - if (subRec && subRec->getDeclName().isIdentifier() && - subRec->getName() == "promise_type") { - IsTaskTyCache[ty] = true; - break; - } - } + bool result = [&] { + auto taskTy = taskVal.getType().dyn_cast(); + if (!taskTy) + return false; + return taskTy.getAst()->hasPromiseType(); + }(); - return IsTaskTyCache[ty]; + IsTaskTyCache[ty] = result; + return result; } void LifetimeCheckPass::trackCallToCoroutine(CallOp callOp) { @@ -1831,13 +1812,13 @@ void LifetimeCheckPass::checkCall(CallOp callOp) { // From this point on only owner and pointer class methods handling, // starting from special methods. - if (const auto *ctor = dyn_cast(methodDecl)) + if (auto ctor = dyn_cast(methodDecl)) return checkCtor(callOp, ctor); - if (methodDecl->isMoveAssignmentOperator()) + if (methodDecl.isMoveAssignmentOperator()) return checkMoveAssignment(callOp, methodDecl); - if (methodDecl->isCopyAssignmentOperator()) + if (methodDecl.isCopyAssignmentOperator()) return checkCopyAssignment(callOp, methodDecl); - if (methodDecl->isOverloadedOperator()) + if (methodDecl.isOverloadedOperator()) return checkOperators(callOp, methodDecl); // For any other methods... diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 6ed1846bf277..9e0b9ec4a203 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -15,6 +15,7 @@ #include "clang/Basic/Module.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" @@ -133,13 +134,10 @@ cir::FuncOp LoweringPreparePass::buildRuntimeFunction( } cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { - auto varDecl = op.getAst()->getAstDecl(); SmallString<256> fnName; { - std::unique_ptr MangleCtx( - astCtx->createMangleContext()); llvm::raw_svector_ostream Out(fnName); - MangleCtx->mangleDynamicInitializer(varDecl, Out); + op.getAst()->mangleDynamicInitializer(Out); // Name numbering uint32_t cnt = dynamicInitializerNames[fnName]++; if (cnt) @@ -153,7 +151,7 @@ cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { auto fnType = mlir::cir::FuncType::get({}, voidTy); FuncOp f = buildRuntimeFunction(builder, fnName, op.getLoc(), fnType, - mlir::cir::GlobalLinkageKind::InternalLinkage); + mlir::cir::GlobalLinkageKind::InternalLinkage); // Move over the initialzation code of the ctor region. auto &block = op.getCtorRegion().front(); @@ -161,10 +159,10 @@ cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { entryBB->getOperations().splice(entryBB->begin(), block.getOperations(), block.begin(), std::prev(block.end())); - // Register the destructor call with __cxa_atexit - assert(varDecl->getTLSKind() == clang::VarDecl::TLS_None && " TLS NYI"); + assert(op.getAst() && op.getAst()->getTLSKind() == clang::VarDecl::TLS_None && + " TLS NYI"); // Create a variable that binds the atexit to this shared object. builder.setInsertionPointToStart(&theModule.getBodyRegion().front()); auto Handle = buildRuntimeVariable(builder, "__dso_handle", op.getLoc(), @@ -180,7 +178,7 @@ cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { assert(dtorCall && "Expected a dtor call"); cir::FuncOp dtorFunc = getCalledFunction(dtorCall); assert(dtorFunc && - isa(dtorFunc.getAst()->getAstDecl()) && + mlir::isa(*dtorFunc.getAst()) && "Expected a dtor call"); // Create a runtime helper function: @@ -198,7 +196,8 @@ cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { FuncOp fnAtExit = buildRuntimeFunction(builder, nameAtExit, op.getLoc(), fnAtExitType); - // Replace the dtor call with a call to __cxa_atexit(&dtor, &var, &__dso_handle) + // Replace the dtor call with a call to __cxa_atexit(&dtor, &var, + // &__dso_handle) builder.setInsertionPointAfter(dtorCall); mlir::Value args[3]; auto dtorPtrTy = mlir::cir::PointerType::get(builder.getContext(), @@ -241,7 +240,8 @@ void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { dtorRegion.getBlocks().clear(); // Add a function call to the variable initialization function. - assert(!op.getAst()->getAstDecl()->getAttr() && + assert(!hasAttr( + mlir::cast(*op.getAst())) && "custom initialization priority NYI"); dynamicInitializers.push_back(f); } @@ -254,8 +254,7 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { SmallVector attrs; for (auto &f : dynamicInitializers) { // TODO: handle globals with a user-specified initialzation priority. - auto ctorAttr = - mlir::cir::GlobalCtorAttr::get(&getContext(), f.getName()); + auto ctorAttr = mlir::cir::GlobalCtorAttr::get(&getContext(), f.getName()); attrs.push_back(ctorAttr); } @@ -286,7 +285,7 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { {}, mlir::cir::VoidType::get(builder.getContext())); FuncOp f = buildRuntimeFunction(builder, fnName, theModule.getLoc(), fnType, - mlir::cir::GlobalLinkageKind::ExternalLinkage); + mlir::cir::GlobalLinkageKind::ExternalLinkage); builder.setInsertionPointToStart(f.addEntryBlock()); for (auto &f : dynamicInitializers) { builder.create(f.getLoc(), f); @@ -304,7 +303,7 @@ void LoweringPreparePass::runOnOp(Operation *op) { void LoweringPreparePass::runOnOperation() { assert(astCtx && "Missing ASTContext, please construct with the right ctor"); - auto* op = getOperation(); + auto *op = getOperation(); if (isa<::mlir::ModuleOp>(op)) { theModule = cast<::mlir::ModuleOp>(op); } @@ -326,7 +325,8 @@ std::unique_ptr mlir::createLoweringPreparePass() { return std::make_unique(); } -std::unique_ptr mlir::createLoweringPreparePass(clang::ASTContext *astCtx) { +std::unique_ptr +mlir::createLoweringPreparePass(clang::ASTContext *astCtx) { auto pass = std::make_unique(); pass->setASTContext(astCtx); return std::move(pass); diff --git a/clang/lib/CIR/FrontendAction/CMakeLists.txt b/clang/lib/CIR/FrontendAction/CMakeLists.txt index c223383d24cf..7201db6502e6 100644 --- a/clang/lib/CIR/FrontendAction/CMakeLists.txt +++ b/clang/lib/CIR/FrontendAction/CMakeLists.txt @@ -10,6 +10,10 @@ add_clang_library(clangCIRFrontendAction DEPENDS MLIRCIROpsIncGen + MLIRCIRASTAttrInterfacesIncGen + MLIRBuiltinLocationAttributesIncGen + MLIRBuiltinTypeInterfacesIncGen + MLIRFunctionInterfacesIncGen LINK_LIBS clangAST diff --git a/clang/lib/CIR/Interfaces/ASTAttrInterfaces.cpp b/clang/lib/CIR/Interfaces/ASTAttrInterfaces.cpp new file mode 100644 index 000000000000..a3f525dd65a3 --- /dev/null +++ b/clang/lib/CIR/Interfaces/ASTAttrInterfaces.cpp @@ -0,0 +1,15 @@ +//====- ASTAttrInterfaces.cpp - Interface to AST Attributes ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" + +#include "llvm/ADT/SmallVector.h" + +using namespace mlir::cir; + +/// Include the generated type qualifiers interfaces. +#include "clang/CIR/Interfaces/ASTAttrInterfaces.cpp.inc" diff --git a/clang/lib/CIR/Interfaces/CMakeLists.txt b/clang/lib/CIR/Interfaces/CMakeLists.txt new file mode 100644 index 000000000000..3f41389807d7 --- /dev/null +++ b/clang/lib/CIR/Interfaces/CMakeLists.txt @@ -0,0 +1,14 @@ +add_clang_library(MLIRCIRASTAttrInterfaces + ASTAttrInterfaces.cpp + + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Interfaces + + DEPENDS + MLIRCIRASTAttrInterfacesIncGen + + LINK_LIBS + ${dialect_libs} + MLIRIR + MLIRSupport + ) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index c7f713e85da0..b252af37dace 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -12,6 +12,10 @@ add_clang_library(clangCIRLoweringDirectToLLVM DEPENDS MLIRCIREnumsGen MLIRCIROpsIncGen + MLIRCIRASTAttrInterfacesIncGen + MLIRBuiltinLocationAttributesIncGen + MLIRBuiltinTypeInterfacesIncGen + MLIRFunctionInterfacesIncGen LINK_LIBS clangAST diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt index 99f3f8981384..716212d6b899 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -12,6 +12,10 @@ add_clang_library(clangCIRLoweringThroughMLIR DEPENDS MLIRCIROpsIncGen MLIRCIREnumsGen + MLIRCIRASTAttrInterfacesIncGen + MLIRBuiltinLocationAttributesIncGen + MLIRBuiltinTypeInterfacesIncGen + MLIRFunctionInterfacesIncGen LINK_LIBS clangAST diff --git a/clang/lib/FrontendTool/CMakeLists.txt b/clang/lib/FrontendTool/CMakeLists.txt index ceb4d3f91b68..6dae1455010c 100644 --- a/clang/lib/FrontendTool/CMakeLists.txt +++ b/clang/lib/FrontendTool/CMakeLists.txt @@ -23,6 +23,7 @@ if(CLANG_ENABLE_CIR) ) list(APPEND deps MLIRBuiltinLocationAttributesIncGen + MLIRBuiltinTypeInterfacesIncGen ) include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 48eec3bd093b..8b9deed0f512 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -14,5 +14,5 @@ void m() { __long l; } -// CHECK: !ty_22anon22 = !cir.struct +// CHECK: !ty_22anon22 = !cir.struct // CHECK: !ty_22__long22 = !cir.struct}> diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index c1b3692d06f3..da41f37f1262 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -38,7 +38,7 @@ class B : public A }; // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.recdecl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.record.decl.ast> // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index 786fe85301a8..108b13c6009c 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -26,7 +26,7 @@ static Init __ioinit2(false); // BEFORE-NEXT: } dtor { // BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr // BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () -// BEFORE-NEXT: } {ast = #cir.vardecl.ast} +// BEFORE-NEXT: } {ast = #cir.var.decl.ast} // BEFORE: cir.global "private" internal @_ZL9__ioinit2 = ctor : !ty_22Init22 { // BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr // BEFORE-NEXT: %1 = cir.const(#false) : !cir.bool @@ -34,7 +34,7 @@ static Init __ioinit2(false); // BEFORE-NEXT: } dtor { // BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr // BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () -// BEFORE-NEXT: } {ast = #cir.vardecl.ast} +// BEFORE-NEXT: } {ast = #cir.var.decl.ast} // BEFORE-NEXT: } @@ -43,7 +43,7 @@ static Init __ioinit2(false); // AFTER-NEXT: cir.func private @__cxa_atexit(!cir.ptr)>>, !cir.ptr, !cir.ptr) // AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) // AFTER-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) -// AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {ast = #cir.vardecl.ast} +// AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init() // AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr // AFTER-NEXT: %1 = cir.const(#true) : !cir.bool @@ -55,7 +55,7 @@ static Init __ioinit2(false); // AFTER-NEXT: %6 = cir.get_global @__dso_handle : cir.ptr // AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () // AFTER-NEXT: cir.return -// AFTER: cir.global "private" internal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {ast = #cir.vardecl.ast} +// AFTER: cir.global "private" internal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init.1() // AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr // AFTER-NEXT: %1 = cir.const(#false) : !cir.bool diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 2181e60fd7cd..cbd106f77754 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -30,7 +30,7 @@ void yoyo(incomplete *i) {} // CHECK-DAG: !ty_22Bar22 = !cir.struct // CHECK-DAG: !ty_22Foo22 = !cir.struct -// CHECK-DAG: !ty_22Mandalore22 = !cir.struct, !s32i} #cir.recdecl.ast> +// CHECK-DAG: !ty_22Mandalore22 = !cir.struct, !s32i} #cir.record.decl.ast> // CHECK-DAG: !ty_22Adv22 = !cir.struct // CHECK-DAG: !ty_22Entry22 = !cir.struct, !cir.ptr)>>}> diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index 1f53cd60744a..da96f418e001 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -6,10 +6,10 @@ typedef union { yolo y; struct { int lifecnt; }; } yolm; typedef union { yolo y; struct { int *lifecnt; int genpad; }; } yolm2; typedef union { yolo y; struct { bool life; int genpad; }; } yolm3; -// CHECK-DAG: !ty_22U23A3ADummy22 = !cir.struct -// CHECK-DAG: !ty_22anon221 = !cir.struct -// CHECK-DAG: !ty_22yolo22 = !cir.struct -// CHECK-DAG: !ty_22anon222 = !cir.struct, !s32i} #cir.recdecl.ast> +// CHECK-DAG: !ty_22U23A3ADummy22 = !cir.struct +// CHECK-DAG: !ty_22anon221 = !cir.struct +// CHECK-DAG: !ty_22yolo22 = !cir.struct +// CHECK-DAG: !ty_22anon222 = !cir.struct, !s32i} #cir.record.decl.ast> // CHECK-DAG: !ty_22yolm22 = !cir.struct // CHECK-DAG: !ty_22yolm322 = !cir.struct @@ -33,14 +33,14 @@ union U2 { float f; } s; } u2; -// CHECK-DAG: !cir.struct +// CHECK-DAG: !cir.struct // Should genereate unions without padding. union U3 { short b; U u; } u3; -// CHECK-DAG: !ty_22U322 = !cir.struct +// CHECK-DAG: !ty_22U322 = !cir.struct void m() { yolm q; diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 92ed9e1eea59..26d46ec249cf 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -25,7 +25,7 @@ class B : public A // CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct x 5>}> // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.recdecl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.record.decl.ast> // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 4ffda321f221..8ee44c5beeb0 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -3,7 +3,7 @@ !s8i = !cir.int !s32i = !cir.int !s64i = !cir.int -!ty_22Init22 = !cir.struct +!ty_22Init22 = !cir.struct module { cir.global external @a = #cir.int<3> : !s32i cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i] : !cir.array> diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index cd5d709e57a4..ce7eafd6a1e8 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -488,7 +488,7 @@ module { // ----- !s8i = !cir.int -!ty_22Init22 = !cir.struct +!ty_22Init22 = !cir.struct module { cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { } @@ -498,7 +498,7 @@ module { // ----- !s8i = !cir.int #true = #cir.bool : !cir.bool -!ty_22Init22 = !cir.struct +!ty_22Init22 = !cir.struct module { cir.func private @_ZN4InitC1Eb(!cir.ptr) cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index aa0acce60abd..fb25d04533da 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -8,7 +8,7 @@ !ty_2222 = !cir.struct x 5>}> !ty_22221 = !cir.struct, !cir.ptr, !cir.ptr}> -!ty_22A22 = !cir.struct +!ty_22A22 = !cir.struct !ty_22i22 = !cir.struct !ty_22S22 = !cir.struct !ty_22S122 = !cir.struct diff --git a/clang/test/CIR/Lowering/array.cir b/clang/test/CIR/Lowering/array.cir index 1136f8a3beb5..56f4fd3a6331 100644 --- a/clang/test/CIR/Lowering/array.cir +++ b/clang/test/CIR/Lowering/array.cir @@ -2,7 +2,7 @@ // RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM !s32i = !cir.int -!ty_22S22 = !cir.struct +!ty_22S22 = !cir.struct module { cir.func @foo() { @@ -26,10 +26,10 @@ module { // CHECK: %0 = llvm.mlir.undef : !llvm.array<2 x struct<"struct.S", (i32)>> // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S", (i32)> // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 - // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S", (i32)> - // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.array<2 x struct<"struct.S", (i32)>> + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"struct.S", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.array<2 x struct<"struct.S", (i32)>> // CHECK: %5 = cir.llvmir.zeroinit : !llvm.struct<"struct.S", (i32)> - // CHECK: %6 = llvm.insertvalue %5, %4[1] : !llvm.array<2 x struct<"struct.S", (i32)>> + // CHECK: %6 = llvm.insertvalue %5, %4[1] : !llvm.array<2 x struct<"struct.S", (i32)>> // CHECK: llvm.return %6 : !llvm.array<2 x struct<"struct.S", (i32)>> // CHECK: } } diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index f51d8a85f968..e8640db81c7a 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -10,10 +10,10 @@ !u32i = !cir.int !u64i = !cir.int !u8i = !cir.int -!ty_22A22 = !cir.struct x 2>} #cir.recdecl.ast> -!ty_22Bar22 = !cir.struct -!ty_22StringStruct22 = !cir.struct, !cir.array, !cir.array} #cir.recdecl.ast> -!ty_22StringStructPtr22 = !cir.struct} #cir.recdecl.ast> +!ty_22A22 = !cir.struct x 2>} #cir.record.decl.ast> +!ty_22Bar22 = !cir.struct +!ty_22StringStruct22 = !cir.struct, !cir.array, !cir.array} #cir.record.decl.ast> +!ty_22StringStructPtr22 = !cir.struct} #cir.record.decl.ast> module { cir.global external @a = #cir.int<3> : !s32i diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index 1fea10b3d90b..9430d698d9ca 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -5,10 +5,10 @@ !u8i = !cir.int !u32i = !cir.int !ty_22S22 = !cir.struct -!ty_22S2A22 = !cir.struct -!ty_22S122 = !cir.struct} #cir.recdecl.ast> -!ty_22S222 = !cir.struct -!ty_22S322 = !cir.struct +!ty_22S2A22 = !cir.struct +!ty_22S122 = !cir.struct} #cir.record.decl.ast> +!ty_22S222 = !cir.struct +!ty_22S322 = !cir.struct module { cir.func @test() { diff --git a/clang/test/CIR/Lowering/unions.cir b/clang/test/CIR/Lowering/unions.cir index c5ee736c4a7d..ea6ed375c201 100644 --- a/clang/test/CIR/Lowering/unions.cir +++ b/clang/test/CIR/Lowering/unions.cir @@ -4,9 +4,9 @@ !s16i = !cir.int !s32i = !cir.int #true = #cir.bool : !cir.bool -!ty_22U122 = !cir.struct -!ty_22U222 = !cir.struct -!ty_22U322 = !cir.struct +!ty_22U122 = !cir.struct +!ty_22U222 = !cir.struct +!ty_22U322 = !cir.struct module { // Should lower union to struct with only the largest member. cir.global external @u1 = #cir.zero : !ty_22U122 diff --git a/clang/test/CIR/Lowering/variadics.cir b/clang/test/CIR/Lowering/variadics.cir index 050ae53d610c..ca7dbcc866a1 100644 --- a/clang/test/CIR/Lowering/variadics.cir +++ b/clang/test/CIR/Lowering/variadics.cir @@ -5,7 +5,7 @@ !u32i = !cir.int !u8i = !cir.int -!ty_22__va_list_tag22 = !cir.struct, !cir.ptr} #cir.recdecl.ast> +!ty_22__va_list_tag22 = !cir.struct, !cir.ptr} #cir.record.decl.ast> module { cir.func @average(%arg0: !s32i, ...) -> !s32i { From 1df4e0742bf511eefd0cf9ec84c96cd45bccd62c Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 19 Sep 2023 03:59:09 +0300 Subject: [PATCH 1202/2301] [CIR][CodeGen][Bugfix] Generate field index with respect to layout (#263) There is a bug in the code generation: the field index for the `GetMemberOp` is taken from the `FieldDecl`, with no respect to the record layout. One of the manifestation of the bug is the wrong index generated for a field in a derived class that does not take into the account the instance of the base class (that has index 0). You can take a look at the example in `test/CIR/CodeGen/derived-to-base.cpp`, i.e. the current test is not the correct one ``` // CHECK-DAG: !ty_22C23A3ALayer22 = !cir.struct // CHECK-DAG: !ty_22C33A3ALayer22 = !cir.struct) -> cir.ptr // CHECK: %3 = cir.get_member %2[0] {name = "m_C1"} : !cir.ptr -> !cir.ptr> ``` As one can see, the result (of ptr type to `!ty_22C222` ) must have the index `1` in the corresponded struct `ty_22C23A3ALayer22`. Basically the same is done in the `clang/CodeGen/CGExpr.cpp`, so we don't invent something new here. Note, this fix doesn't affect anything related to the usage of `buildPreserveStructAccess` where the `field->getFieldIndex()` is used. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 +++- clang/test/CIR/CodeGen/derived-to-base.cpp | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index a884c9144637..40f2b97ab919 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -271,7 +271,9 @@ LValue CIRGenFunction::buildLValueForField(LValue base, if (!IsInPreservedAIRegion && (!getDebugInfo() || !rec->hasAttr())) { llvm::StringRef fieldName = field->getName(); - unsigned fieldIndex = field->getFieldIndex(); + auto& layout = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); + unsigned fieldIndex = layout.getCIRFieldNo(field); + if (CGM.LambdaFieldToName.count(field)) fieldName = CGM.LambdaFieldToName[field]; addr = buildAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 65f881afccfe..03846ca45688 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -82,7 +82,7 @@ void C3::Layer::Initialize() { // CHECK: cir.scope { // CHECK: %2 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr -// CHECK: %3 = cir.get_member %2[0] {name = "m_C1"} : !cir.ptr -> !cir.ptr> +// CHECK: %3 = cir.get_member %2[1] {name = "m_C1"} : !cir.ptr -> !cir.ptr> // CHECK: %4 = cir.load %3 : cir.ptr >, !cir.ptr // CHECK: %5 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool From 567cc39524b12ae768b47eb3789c8464397bd29c Mon Sep 17 00:00:00 2001 From: Keyi Zhang Date: Mon, 18 Sep 2023 22:49:37 -0700 Subject: [PATCH 1203/2301] [CIR][Lowering] Add scf.scope lowering to standard dialects (#262) This PR adds MLIR lowering of `cir.scope`. I also notice that the MLIR unit tests still uses old integer types. I will fix those in a separate PR. --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 33 +++++++++++- clang/test/CIR/Lowering/ThroughMLIR/scope.cir | 52 +++++++++++++++++++ 2 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/scope.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index d00a0442cc3a..b2921abd16c0 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -501,12 +501,43 @@ class CIRBrOpLowering : public mlir::OpRewritePattern { } }; +class CIRScopeOpLowering : public mlir::OpRewritePattern { + using mlir::OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ScopeOp scopeOp, + mlir::PatternRewriter &rewriter) const override { + // Empty scope: just remove it. + if (scopeOp.getRegion().empty()) { + rewriter.eraseOp(scopeOp); + return mlir::success(); + } + + for (auto &block : scopeOp.getRegion()) { + rewriter.setInsertionPointToEnd(&block); + auto *terminator = block.getTerminator(); + rewriter.replaceOpWithNewOp( + terminator, terminator->getOperands()); + } + + rewriter.setInsertionPoint(scopeOp); + auto newScopeOp = rewriter.create( + scopeOp.getLoc(), scopeOp.getResultTypes()); + rewriter.inlineRegionBefore(scopeOp.getScopeRegion(), + newScopeOp.getBodyRegion(), + newScopeOp.getBodyRegion().end()); + rewriter.replaceOp(scopeOp, newScopeOp); + + return mlir::LogicalResult::success(); + } +}; + void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); + CIRReturnLowering, CIRScopeOpLowering>(patterns.getContext()); patterns.add(converter, patterns.getContext()); } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/scope.cir b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir new file mode 100644 index 000000000000..310580683cd2 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir @@ -0,0 +1,52 @@ +// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +module { + cir.func @foo() { + cir.scope { + %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.const(4 : i32) : i32 + cir.store %1, %0 : i32, cir.ptr + } + cir.return + } + +// MLIR: func.func @foo() +// MLIR-NEXT: memref.alloca_scope +// MLIR-NEXT: %alloca = memref.alloca() {alignment = 4 : i64} : memref +// MLIR-NEXT: %c4_i32 = arith.constant 4 : i32 +// MLIR-NEXT: memref.store %c4_i32, %alloca[] : memref +// MLIR-NEXT: } +// MLIR-NEXT: return + + +// LLVM: define void @foo() +// LLVM-NEXT: %1 = call ptr @llvm.stacksave.p0() +// LLVM-NEXT: br label %2 +// LLVM-EMPTY: +// LLVM-NEXT: 2: +// LLVM-NEXT: %3 = alloca i32, i64 1, align 4 +// LLVM-NEXT: %4 = insertvalue { ptr, ptr, i64 } undef, ptr %3, 0 +// LLVM-NEXT: %5 = insertvalue { ptr, ptr, i64 } %4, ptr %3, 1 +// LLVM-NEXT: %6 = insertvalue { ptr, ptr, i64 } %5, i64 0, 2 +// LLVM-NEXT: %7 = extractvalue { ptr, ptr, i64 } %6, 1 +// LLVM-NEXT: store i32 4, ptr %7, align 4 +// LLVM-NEXT: call void @llvm.stackrestore.p0(ptr %1) +// LLVM-NEXT: br label %8 +// LLVM-EMPTY: +// LLVM-NEXT: 8: +// LLVM-NEXT: ret void +// LLVM-NEXT: } + + + // Should drop empty scopes. + cir.func @empty_scope() { + cir.scope { + } + cir.return + } + // MLIR: func.func @empty_scope() + // MLIR-NEXT: return + // MLIR-NEXT: } + +} From 723703c6b523862d1174109137149c1261ba9d75 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Sep 2023 16:31:48 -0700 Subject: [PATCH 1204/2301] [CIR][CIRGen][Exception] Workaround internal testcase break Recent work on exceptions broke an internal testcase, free the path back while I work on a holistic solution. --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index d0ce14db0d1f..bc20ec9839fa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -271,8 +271,6 @@ mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { }); enterCXXTryStmt(S, catchOp); - llvm_unreachable("NYI"); - if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) return mlir::failure(); exitCXXTryStmt(S); @@ -385,12 +383,20 @@ mlir::Block *CIRGenFunction::buildLandingPad() { case EHScope::Catch: case EHScope::Cleanup: case EHScope::Filter: - llvm_unreachable("NYI"); if (auto *lpad = innermostEHScope.getCachedLandingPad()) return lpad; } - llvm_unreachable("NYI"); + { + // Save the current CIR generation state. + mlir::OpBuilder::InsertionGuard guard(builder); + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + // FIXME(cir): handle CIR relevant landing pad bits, there's no good + // way to assert here right now and leaving one in break important + // testcases. Work to fill this in is coming soon. + } + + return nullptr; } mlir::Block *CIRGenFunction::getInvokeDestImpl() { @@ -433,7 +439,8 @@ mlir::Block *CIRGenFunction::getInvokeDestImpl() { LP = buildLandingPad(); } - assert(LP); + // FIXME(cir): this breaks important testcases, fix is coming soon. + // assert(LP); // Cache the landing pad on the innermost scope. If this is a // non-EH scope, cache the landing pad on the enclosing scope, too. From 86d7cdeb9ef11704ae1ac044f849be8eea5c13cf Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 20 Sep 2023 12:19:19 +0300 Subject: [PATCH 1205/2301] [CIR][Bugfix] adds clangAST dependency --- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 894a5a278beb..4956786314b9 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -18,4 +18,5 @@ add_clang_library(MLIRCIR MLIRLoopLikeInterface MLIRLLVMDialect MLIRSideEffectInterfaces + clangAST ) From 3763ad665e8a57dc610ba4d4aec7e0039fe147bb Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 21 Sep 2023 20:38:28 -0400 Subject: [PATCH 1206/2301] [CIR] Remove cir-tidy We've integrated this functionality into the main clang-tidy and clangd tools and no longer have a purpose for it. We only kept around to maintain support for internal tooling built upon it. So remove it here. --- clang-tools-extra/clang-tidy/CMakeLists.txt | 4 - .../clang-tidy/cir-tidy/CIRASTConsumer.cpp | 189 ------- .../clang-tidy/cir-tidy/CIRASTConsumer.h | 26 - .../clang-tidy/cir-tidy/CIRChecks.h | 21 - .../clang-tidy/cir-tidy/CIRTidy.cpp | 157 ------ .../clang-tidy/cir-tidy/CIRTidy.h | 62 --- .../clang-tidy/cir-tidy/CMakeLists.txt | 55 -- .../clang-tidy/cir-tidy/tool/CIRTidyMain.cpp | 493 ------------------ .../clang-tidy/cir-tidy/tool/CIRTidyMain.h | 23 - .../cir-tidy/tool/CIRTidyToolMain.cpp | 21 - clang-tools-extra/test/CMakeLists.txt | 6 - .../test/cir-tidy/check_cir_tidy.py | 191 ------- .../test/cir-tidy/lifetime-basic.cpp | 40 -- clang-tools-extra/test/cir-tidy/lit.local.cfg | 2 - clang-tools-extra/test/lit.cfg.py | 5 - 15 files changed, 1295 deletions(-) delete mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp delete mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h delete mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CIRChecks.h delete mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp delete mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h delete mode 100644 clang-tools-extra/clang-tidy/cir-tidy/CMakeLists.txt delete mode 100644 clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp delete mode 100644 clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.h delete mode 100644 clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyToolMain.cpp delete mode 100644 clang-tools-extra/test/cir-tidy/check_cir_tidy.py delete mode 100644 clang-tools-extra/test/cir-tidy/lifetime-basic.cpp delete mode 100644 clang-tools-extra/test/cir-tidy/lit.local.cfg diff --git a/clang-tools-extra/clang-tidy/CMakeLists.txt b/clang-tools-extra/clang-tidy/CMakeLists.txt index a01ce8f560ac..532e7fa31c8f 100644 --- a/clang-tools-extra/clang-tidy/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/CMakeLists.txt @@ -118,10 +118,6 @@ add_subdirectory(plugin) add_subdirectory(tool) add_subdirectory(utils) -if(CLANG_ENABLE_CIR) - add_subdirectory(cir-tidy) -endif() - if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY) install(DIRECTORY . DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/clang-tidy" diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp deleted file mode 100644 index 4ef0b0d88d73..000000000000 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.cpp +++ /dev/null @@ -1,189 +0,0 @@ -//===--- clang-tidy/cir-tidy/CIRASTConsumer.cpp ---------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "CIRASTConsumer.h" -#include "CIRChecks.h" - -#include "../utils/OptionsUtils.h" -#include "mlir/IR/BuiltinOps.h" -#include "mlir/IR/MLIRContext.h" -#include "mlir/Pass/Pass.h" -#include "mlir/Pass/PassManager.h" -#include "clang/CIR/Dialect/Passes.h" -#include - -using namespace clang; -using namespace clang::tidy; - -namespace cir { -namespace tidy { - -CIRASTConsumer::CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, - clang::tidy::ClangTidyContext &Context) - : Context(Context), - OptsView(ClangTidyCheck::OptionsView(cir::checks::LifetimeCheckName, - Context.getOptions().CheckOptions, - &Context)) { - // Setup CIR codegen options via config specified information. - CI.getCodeGenOpts().ClangIRBuildDeferredThreshold = - OptsView.get("CodeGenBuildDeferredThreshold", 500U); - CI.getCodeGenOpts().ClangIRSkipFunctionsFromSystemHeaders = - OptsView.get("CodeGenSkipFunctionsFromSystemHeaders", false); - - Gen = std::make_unique(CI.getDiagnostics(), nullptr, - CI.getCodeGenOpts()); -} - -bool CIRASTConsumer::HandleTopLevelDecl(DeclGroupRef D) { - PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), - AstContext->getSourceManager(), - "CIR generation of declaration"); - Gen->HandleTopLevelDecl(D); - return true; -} - -void CIRASTConsumer::Initialize(ASTContext &Context) { - AstContext = &Context; - Gen->Initialize(Context); -} - -void CIRASTConsumer::HandleTranslationUnit(ASTContext &C) { - Gen->HandleTranslationUnit(C); - Gen->verifyModule(); - - mlir::ModuleOp mlirMod = Gen->getModule(); - std::unique_ptr mlirCtx = Gen->takeContext(); - - mlir::OpPrintingFlags flags; - flags.enableDebugInfo(/*prettyForm=*/false); - - clang::SourceManager &clangSrcMgr = C.getSourceManager(); - FileID MainFileID = clangSrcMgr.getMainFileID(); - - llvm::MemoryBufferRef MainFileBuf = clangSrcMgr.getBufferOrFake(MainFileID); - std::unique_ptr FileBuf = - llvm::MemoryBuffer::getMemBuffer(MainFileBuf); - - llvm::SourceMgr llvmSrcMgr; - llvmSrcMgr.AddNewSourceBuffer(std::move(FileBuf), llvm::SMLoc()); - - class CIRTidyDiagnosticHandler : public mlir::SourceMgrDiagnosticHandler { - clang::tidy::ClangTidyContext &tidyCtx; - clang::SourceManager &clangSrcMgr; - - clang::SourceLocation getClangFromFileLineCol(mlir::FileLineColLoc loc) { - clang::SourceLocation clangLoc; - FileManager &fileMgr = clangSrcMgr.getFileManager(); - assert(loc && "not a valid mlir::FileLineColLoc"); - // The column and line may be zero to represent unknown column and/or - // unknown line/column information. - if (loc.getLine() == 0 || loc.getColumn() == 0) { - llvm_unreachable("How should we workaround this?"); - return clangLoc; - } - if (auto FE = fileMgr.getFile(loc.getFilename())) { - return clangSrcMgr.translateFileLineCol(*FE, loc.getLine(), - loc.getColumn()); - } - llvm_unreachable("location doesn't map to a file?"); - } - - clang::SourceLocation getClangSrcLoc(mlir::Location loc) { - // Direct maps into a clang::SourceLocation. - if (auto fileLoc = loc.dyn_cast()) { - return getClangFromFileLineCol(fileLoc); - } - - // FusedLoc needs to be decomposed but the canonical one - // is the first location, we handle source ranges somewhere - // else. - if (auto fileLoc = loc.dyn_cast()) { - auto locArray = fileLoc.getLocations(); - assert(locArray.size() > 0 && "expected multiple locs"); - return getClangFromFileLineCol( - locArray[0].dyn_cast()); - } - - // Many loc styles are yet to be handled. - if (auto fileLoc = loc.dyn_cast()) { - llvm_unreachable("mlir::UnknownLoc not implemented!"); - } - if (auto fileLoc = loc.dyn_cast()) { - llvm_unreachable("mlir::CallSiteLoc not implemented!"); - } - llvm_unreachable("Unknown location style"); - } - - clang::DiagnosticIDs::Level - translateToClangDiagLevel(const mlir::DiagnosticSeverity &sev) { - switch (sev) { - case mlir::DiagnosticSeverity::Note: - return clang::DiagnosticIDs::Level::Note; - case mlir::DiagnosticSeverity::Warning: - return clang::DiagnosticIDs::Level::Warning; - case mlir::DiagnosticSeverity::Error: - return clang::DiagnosticIDs::Level::Error; - case mlir::DiagnosticSeverity::Remark: - return clang::DiagnosticIDs::Level::Remark; - } - llvm_unreachable("should not get here!"); - } - - public: - void emitClangTidyDiagnostic(mlir::Diagnostic &diag) { - auto clangBeginLoc = getClangSrcLoc(diag.getLocation()); - tidyCtx.diag(cir::checks::LifetimeCheckName, clangBeginLoc, diag.str(), - translateToClangDiagLevel(diag.getSeverity())); - for (const auto ¬e : diag.getNotes()) { - auto clangNoteBeginLoc = getClangSrcLoc(note.getLocation()); - tidyCtx.diag(cir::checks::LifetimeCheckName, clangNoteBeginLoc, - note.str(), translateToClangDiagLevel(note.getSeverity())); - } - } - - CIRTidyDiagnosticHandler(llvm::SourceMgr &mgr, mlir::MLIRContext *ctx, - clang::tidy::ClangTidyContext &tidyContext, - clang::SourceManager &clangMgr, - ShouldShowLocFn &&shouldShowLocFn = {}) - : SourceMgrDiagnosticHandler(mgr, ctx, llvm::errs(), - std::move(shouldShowLocFn)), - tidyCtx(tidyContext), clangSrcMgr(clangMgr) { - setHandler([this](mlir::Diagnostic &diag) { - // Emit diagnostic to llvm::errs() but also populate Clang - emitClangTidyDiagnostic(diag); - emitDiagnostic(diag); - }); - } - ~CIRTidyDiagnosticHandler() = default; - }; - - // Use a custom diagnostic handler that can allow both regular printing to - // stderr but also populates clang-tidy context with diagnostics (and allow - // for instance, diagnostics to be later converted to YAML). - CIRTidyDiagnosticHandler sourceMgrHandler(llvmSrcMgr, mlirCtx.get(), Context, - clangSrcMgr); - - mlir::PassManager pm(mlirCtx.get()); - pm.addPass(mlir::createMergeCleanupsPass()); - - auto remarks = - utils::options::parseStringList(OptsView.get("RemarksList", "")); - auto hist = - utils::options::parseStringList(OptsView.get("HistoryList", "all")); - auto hLimit = OptsView.get("HistLimit", 1U); - - if (Context.isCheckEnabled(cir::checks::LifetimeCheckName)) - pm.addPass(mlir::createLifetimeCheckPass(remarks, hist, hLimit, &C)); - - bool Result = !mlir::failed(pm.run(mlirMod)); - if (!Result) - llvm::report_fatal_error( - "The pass manager failed to run pass on the module!"); -} -} // namespace tidy -} // namespace cir diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h b/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h deleted file mode 100644 index d95114519986..000000000000 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRASTConsumer.h +++ /dev/null @@ -1,26 +0,0 @@ -#include "../ClangTidyDiagnosticConsumer.h" -#include "ClangTidyCheck.h" -#include "clang/AST/ASTContext.h" -#include "clang/CIR/CIRGenerator.h" -#include "clang/Frontend/CompilerInstance.h" - -using namespace clang; - -namespace cir { -namespace tidy { -class CIRASTConsumer : public ASTConsumer { -public: - CIRASTConsumer(CompilerInstance &CI, StringRef inputFile, - clang::tidy::ClangTidyContext &Context); - -private: - void Initialize(ASTContext &Context) override; - void HandleTranslationUnit(ASTContext &C) override; - bool HandleTopLevelDecl(DeclGroupRef D) override; - std::unique_ptr Gen; - ASTContext *AstContext{nullptr}; - clang::tidy::ClangTidyContext &Context; - clang::tidy::ClangTidyCheck::OptionsView OptsView; -}; -} // namespace tidy -} // namespace cir diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRChecks.h b/clang-tools-extra/clang-tidy/cir-tidy/CIRChecks.h deleted file mode 100644 index 7dccbf879b4b..000000000000 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRChecks.h +++ /dev/null @@ -1,21 +0,0 @@ -//===--- CIRChecks.h - cir-tidy -----------------------------*- C++ -*-----===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_TOOLS_EXTRA_CIRTIDY_CHECKS_H -#define LLVM_CLANG_TOOLS_EXTRA_CIRTIDY_CHECKS_H - -// FIXME: split LifetimeCheck.cpp into headers and expose the class in a way -// we can directly query the pass name and unique the source of truth. - -namespace cir { -namespace checks { -constexpr const char *LifetimeCheckName = "cir-lifetime-check"; -} -} // namespace cir - -#endif \ No newline at end of file diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp deleted file mode 100644 index 2a751fab2744..000000000000 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.cpp +++ /dev/null @@ -1,157 +0,0 @@ -//===--- clang-tidy/cir-tidy/CIRTidy.cpp - CIR tidy tool ------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// -/// \file This file implements a cir-tidy tool. -/// -/// This tool uses the Clang Tooling infrastructure, see -/// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html -/// for details on setting it up with LLVM source tree. -/// -//===----------------------------------------------------------------------===// - -#include "CIRTidy.h" -#include "CIRASTConsumer.h" -#include "ClangTidyModuleRegistry.h" -#include "ClangTidyProfiling.h" -#ifndef CLANG_TIDY_CONFIG_H -#include "clang-tidy-config.h" -#endif -#include "clang/Frontend/CompilerInstance.h" -#include "clang/Lex/PreprocessorOptions.h" -#include "clang/Tooling/DiagnosticsYaml.h" -#include "clang/Tooling/Refactoring.h" -#include "clang/Tooling/ReplacementsYaml.h" -#include "clang/Tooling/Tooling.h" - -using namespace clang::tooling; -using namespace llvm; - -namespace cir { -namespace tidy { - -CIRTidyASTConsumerFactory::CIRTidyASTConsumerFactory( - ClangTidyContext &Context, - IntrusiveRefCntPtr OverlayFS) - : Context(Context), OverlayFS(std::move(OverlayFS)) {} - -std::unique_ptr -CIRTidyASTConsumerFactory::createASTConsumer(clang::CompilerInstance &Compiler, - StringRef File) { - // FIXME(clang-tidy): Move this to a separate method, so that - // CreateASTConsumer doesn't modify Compiler. - SourceManager *SM = &Compiler.getSourceManager(); - Context.setSourceManager(SM); - Context.setCurrentFile(File); - Context.setASTContext(&Compiler.getASTContext()); - - auto WorkingDir = Compiler.getSourceManager() - .getFileManager() - .getVirtualFileSystem() - .getCurrentWorkingDirectory(); - if (WorkingDir) - Context.setCurrentBuildDirectory(WorkingDir.get()); - return std::make_unique(Compiler, File, Context); -} - -std::vector CIRTidyASTConsumerFactory::getCheckNames() { - std::vector CheckNames; - for (const auto &CIRCheckName : this->CIRChecks) { - if (Context.isCheckEnabled(CIRCheckName)) - CheckNames.emplace_back(CIRCheckName); - } - - llvm::sort(CheckNames); - return CheckNames; -} - -void exportReplacements(const llvm::StringRef MainFilePath, - const std::vector &Errors, - raw_ostream &OS) { - TranslationUnitDiagnostics TUD; - TUD.MainSourceFile = std::string(MainFilePath); - for (const auto &Error : Errors) { - tooling::Diagnostic Diag = Error; - TUD.Diagnostics.insert(TUD.Diagnostics.end(), Diag); - } - - yaml::Output YAML(OS); - YAML << TUD; -} - -std::vector -runCIRTidy(ClangTidyContext &Context, const CompilationDatabase &Compilations, - ArrayRef InputFiles, - llvm::IntrusiveRefCntPtr BaseFS, - bool ApplyAnyFix, bool EnableCheckProfile, - llvm::StringRef StoreCheckProfile) { - ClangTool Tool(Compilations, InputFiles, - std::make_shared(), BaseFS); - - Context.setEnableProfiling(EnableCheckProfile); - Context.setProfileStoragePrefix(StoreCheckProfile); - - ClangTidyDiagnosticConsumer DiagConsumer(Context, nullptr, true, ApplyAnyFix); - DiagnosticsEngine DE(new DiagnosticIDs(), new DiagnosticOptions(), - &DiagConsumer, /*ShouldOwnClient=*/false); - Context.setDiagnosticsEngine(&DE); - Tool.setDiagnosticConsumer(&DiagConsumer); - - class ActionFactory : public FrontendActionFactory { - public: - ActionFactory(ClangTidyContext &Context, - IntrusiveRefCntPtr BaseFS) - : ConsumerFactory(Context, std::move(BaseFS)) {} - std::unique_ptr create() override { - return std::make_unique(&ConsumerFactory); - } - - bool runInvocation(std::shared_ptr Invocation, - FileManager *Files, - std::shared_ptr PCHContainerOps, - DiagnosticConsumer *DiagConsumer) override { - // Explicitly ask to define __clang_analyzer__ macro. - Invocation->getPreprocessorOpts().SetUpStaticAnalyzer = true; - return FrontendActionFactory::runInvocation( - Invocation, Files, PCHContainerOps, DiagConsumer); - } - - private: - class Action : public ASTFrontendAction { - public: - Action(CIRTidyASTConsumerFactory *Factory) : Factory(Factory) {} - std::unique_ptr CreateASTConsumer(CompilerInstance &Compiler, - StringRef File) override { - return Factory->createASTConsumer(Compiler, File); - } - - private: - CIRTidyASTConsumerFactory *Factory; - }; - - CIRTidyASTConsumerFactory ConsumerFactory; - }; - - ActionFactory Factory(Context, std::move(BaseFS)); - Tool.run(&Factory); - return DiagConsumer.take(); -} - -} // namespace tidy -} // namespace cir - -// Now that clang-tidy is integrated with the lifetime checker, CIR changes to -// ClangTidyForceLinker.h are forcing CIRModuleAnchorSource to also be available -// as part of cir-tidy. Since cir-tidy is going to be removed soon, add this so -// that it can still builds in the meantime. -namespace clang::tidy { - -// This anchor is used to force the linker to link in the generated object file -// and thus register the CIRModule. -volatile int CIRModuleAnchorSource = 0; - -} // namespace clang::tidy diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h b/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h deleted file mode 100644 index 91073d106328..000000000000 --- a/clang-tools-extra/clang-tidy/cir-tidy/CIRTidy.h +++ /dev/null @@ -1,62 +0,0 @@ -//===--- CIRTidy.h - cir-tidy -------------------------------*- C++ -*-----===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIRTIDY_H -#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIRTIDY_H - -#include "CIRChecks.h" -#include "ClangTidyDiagnosticConsumer.h" -#include "ClangTidyModule.h" -#include "clang/AST/ASTConsumer.h" -#include "clang/CIR/Dialect/Passes.h" -#include - -namespace clang { -class CompilerInstance; -namespace tooling { -class CompilationDatabase; -} -} // namespace clang - -using namespace clang; -using namespace clang::tidy; - -namespace cir { -namespace tidy { - -class CIRTidyASTConsumerFactory { -public: - CIRTidyASTConsumerFactory( - ClangTidyContext &Context, - IntrusiveRefCntPtr OverlayFS = nullptr); - - std::unique_ptr - createASTConsumer(clang::CompilerInstance &Compiler, StringRef File); - - /// Get the list of enabled checks. - std::vector getCheckNames(); - -private: - ClangTidyContext &Context; - IntrusiveRefCntPtr OverlayFS; - const std::vector CIRChecks = { - cir::checks::LifetimeCheckName}; -}; - -std::vector -runCIRTidy(clang::tidy::ClangTidyContext &Context, - const tooling::CompilationDatabase &Compilations, - ArrayRef InputFiles, - llvm::IntrusiveRefCntPtr BaseFS, - bool ApplyAnyFix, bool EnableCheckProfile = false, - llvm::StringRef StoreCheckProfile = StringRef()); - -} // end namespace tidy -} // end namespace cir - -#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CIRTIDY_H diff --git a/clang-tools-extra/clang-tidy/cir-tidy/CMakeLists.txt b/clang-tools-extra/clang-tidy/cir-tidy/CMakeLists.txt deleted file mode 100644 index fb58718bf769..000000000000 --- a/clang-tools-extra/clang-tidy/cir-tidy/CMakeLists.txt +++ /dev/null @@ -1,55 +0,0 @@ -set(LLVM_LINK_COMPONENTS - FrontendOpenMP - Support - ) - -include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/.. ) -include_directories( ${LLVM_MAIN_SRC_DIR}/../mlir/include ) -include_directories( ${CMAKE_BINARY_DIR}/tools/mlir/include ) - -get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) - -add_subdirectory(tool) - -add_clang_library(CIRTidy - CIRTidy.cpp - CIRASTConsumer.cpp - - DEPENDS - omp_gen - - LINK_LIBS - clangASTMatchers - clangCIR - clangFrontend - clangSerialization - clangTidy - clangTidyUtils - ${dialect_libs} - MLIRCIR - MLIRCIRTransforms - MLIRAffineToStandard - MLIRAnalysis - MLIRIR - MLIRLLVMCommonConversion - MLIRLLVMDialect - MLIRLLVMToLLVMIRTranslation - MLIRMemRefDialect - MLIRMemRefToLLVM - MLIRParser - MLIRPass - MLIRSideEffectInterfaces - MLIRSCFToControlFlow - MLIRFuncToLLVM - MLIRSupport - MLIRMemRefDialect - MLIRTargetLLVMIRExport - MLIRTransforms -) - -clang_target_link_libraries(CIRTidy - PRIVATE - clangBasic - clangTooling - clangToolingCore - ) diff --git a/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp b/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp deleted file mode 100644 index 14d9298e09cc..000000000000 --- a/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.cpp +++ /dev/null @@ -1,493 +0,0 @@ -//===--- tools/extra/clang-tidy/cir/CIRTidyMain.cpp - cir tidy tool -------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// -/// \file This file implements a cir-tidy tool. -/// -/// This tool uses the Clang Tooling infrastructure, see -/// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html -/// for details on setting it up with LLVM source tree. -/// -//===----------------------------------------------------------------------===// - -#include "CIRTidyMain.h" -#include "../ClangTidy.h" -#include "../ClangTidyForceLinker.h" -#include "../GlobList.h" -#include "CIRTidy.h" -#include "clang/Tooling/CommonOptionsParser.h" -#include "llvm/Support/InitLLVM.h" -#include "llvm/Support/Process.h" -#include "llvm/Support/TargetSelect.h" -#include "llvm/Support/WithColor.h" - -using namespace clang::tooling; -using namespace clang::tidy; -using namespace llvm; - -static cl::OptionCategory CIRTidyCategory("cir-tidy options"); - -static cl::extrahelp CommonHelp(CommonOptionsParser::HelpMessage); -static cl::extrahelp CIRTidyHelp(R"( -Configuration files: - cir-tidy attempts to read configuration for each source file from a - .clang-tidy file located in the closest parent directory of the source - file. If InheritParentConfig is true in a config file, the configuration file - in the parent directory (if any exists) will be taken and current config file - will be applied on top of the parent one. If any configuration options have - a corresponding command-line option, command-line option takes precedence. - The effective configuration can be inspected using -dump-config: - - $ cir-tidy -dump-config - --- - Checks: '-*,some-check' - WarningsAsErrors: '' - HeaderFilterRegex: '' - FormatStyle: none - InheritParentConfig: true - User: user - CheckOptions: - - key: some-check.SomeOption - value: 'some value' - ... - -)"); - -const char DefaultChecks[] = // Enable these checks by default: - "clang-diagnostic-*," // * compiler diagnostics - "clang-analyzer-*"; // * Static Analyzer checks - -static cl::opt - Checks("checks", cl::desc(R"(Comma-separated list of globs with optional '-' -prefix. Globs are processed in order of -appearance in the list. Globs without '-' -prefix add checks with matching names to the -set, globs with the '-' prefix remove checks -with matching names from the set of enabled -checks. This option's value is appended to the -value of the 'Checks' option in .clang-tidy -file, if any. -)"), - cl::init(""), cl::cat(CIRTidyCategory)); - -static cl::opt - WarningsAsErrors("warnings-as-errors", - cl::desc(R"(Upgrades warnings to errors. Same format as -'-checks'. -This option's value is appended to the value of -the 'WarningsAsErrors' option in .clang-tidy -file, if any. -)"), - cl::init(""), cl::cat(CIRTidyCategory)); - -static cl::opt - HeaderFilter("header-filter", - cl::desc(R"(Regular expression matching the names of the -headers to output diagnostics from. Diagnostics -from the main file of each translation unit are -always displayed. -Can be used together with -line-filter. -This option overrides the 'HeaderFilterRegex' -option in .clang-tidy file, if any. -)"), - cl::init(""), cl::cat(CIRTidyCategory)); - -static cl::opt - SystemHeaders("system-headers", - cl::desc("Display the errors from system headers."), - cl::init(false), cl::cat(CIRTidyCategory)); -static cl::opt - LineFilter("line-filter", - cl::desc(R"(List of files with line ranges to filter the -warnings. Can be used together with --header-filter. The format of the list is a -JSON array of objects: - [ - {"name":"file1.cpp","lines":[[1,3],[5,7]]}, - {"name":"file2.h"} - ] -)"), - cl::init(""), cl::cat(CIRTidyCategory)); - -static cl::opt Fix("fix", - cl::desc(R"(Apply suggested fixes. Without -fix-errors -cir-tidy will bail out if any compilation -errors were found. -)"), - cl::init(false), cl::cat(CIRTidyCategory)); - -static cl::opt - FixErrors("fix-errors", - cl::desc(R"(Apply suggested fixes even if compilation -errors were found. If compiler errors have -attached fix-its, cir-tidy will apply them as -well. -)"), - cl::init(false), cl::cat(CIRTidyCategory)); - -static cl::opt - FixNotes("fix-notes", - cl::desc(R"(If a warning has no fix, but a single fix can -be found through an associated diagnostic note, -apply the fix. -Specifying this flag will implicitly enable the -'--fix' flag. -)"), - cl::init(false), cl::cat(CIRTidyCategory)); - -static cl::opt - FormatStyle("format-style", - cl::desc(R"(Style for formatting code around applied fixes: - - 'none' (default) turns off formatting - - 'file' (literally 'file', not a placeholder) - uses .clang-format file in the closest parent - directory - - '{ }' specifies options inline, e.g. - -format-style='{BasedOnStyle: llvm, IndentWidth: 8}' - - 'llvm', 'google', 'webkit', 'mozilla' -See clang-format documentation for the up-to-date -information about formatting styles and options. -This option overrides the 'FormatStyle` option in -.clang-tidy file, if any. -)"), - cl::init("none"), cl::cat(CIRTidyCategory)); - -static cl::opt - ListChecks("list-checks", - cl::desc(R"(List all enabled checks and exit. Use with --checks=* to list all available checks. -)"), - cl::init(false), cl::cat(CIRTidyCategory)); - -static cl::opt - ExplainConfig("explain-config", - cl::desc(R"(For each enabled check explains, where it is -enabled, i.e. in cir-tidy binary, command -line or a specific configuration file. -)"), - cl::init(false), cl::cat(CIRTidyCategory)); - -static cl::opt - Config("config", cl::desc(R"(Specifies a configuration in YAML/JSON format: - -config="{Checks: '*', - CheckOptions: [{key: x, - value: y}]}" -When the value is empty, cir-tidy will -attempt to find a file named .clang-tidy for -each source file in its parent directories. -)"), - cl::init(""), cl::cat(CIRTidyCategory)); - -static cl::opt ConfigFile( - "config-file", - cl::desc(R"(Specify the path of .clang-tidy or custom config file: - e.g. --config-file=/some/path/myTidyConfigFile -This option internally works exactly the same way as - --config option after reading specified config file. -Use either --config-file or --config, not both. -)"), - cl::init(""), cl::cat(CIRTidyCategory)); - -static cl::opt - DumpConfig("dump-config", - cl::desc(R"(Dumps configuration in the YAML format to -stdout. This option can be used along with a -file name (and '--' if the file is outside of a -project with configured compilation database). -The configuration used for this file will be -printed. -Use along with -checks=* to include -configuration of all checks. -)"), - cl::init(false), cl::cat(CIRTidyCategory)); - -static cl::opt - EnableCheckProfile("enable-check-profile", - cl::desc(R"(Enable per-check timing profiles, and print a -report to stderr. -)"), - cl::init(false), cl::cat(CIRTidyCategory)); - -static cl::opt - StoreCheckProfile("store-check-profile", - cl::desc(R"(By default reports are printed in tabulated -format to stderr. When this option is passed, -these per-TU profiles are instead stored as JSON. -)"), - cl::value_desc("prefix"), cl::cat(CIRTidyCategory)); - -/// This option allows enabling the experimental alpha checkers from the static -/// analyzer. This option is set to false and not visible in help, because it is -/// highly not recommended for users. -static cl::opt - AllowEnablingAnalyzerAlphaCheckers("allow-enabling-analyzer-alpha-checkers", - cl::init(false), cl::Hidden, - cl::cat(CIRTidyCategory)); - -static cl::opt - ExportFixes("export-fixes", - cl::desc(R"(YAML file to store suggested fixes in. The -stored fixes can be applied to the input source -code with cir-apply-replacements. -)"), - cl::value_desc("filename"), cl::cat(CIRTidyCategory)); - -static cl::opt - Quiet("quiet", cl::desc(R"(Run cir-tidy in quiet mode. This suppresses -printing statistics about ignored warnings and -warnings treated as errors if the respective -options are specified. -)"), - cl::init(false), cl::cat(CIRTidyCategory)); - -static cl::opt - VfsOverlay("vfsoverlay", - cl::desc(R"(Overlay the virtual filesystem described by file -over the real file system. -)"), - cl::value_desc("filename"), cl::cat(CIRTidyCategory)); - -static cl::opt - UseColor("use-color", - cl::desc(R"(Use colors in diagnostics. If not set, colors -will be used if the terminal connected to -standard output supports colors. -This option overrides the 'UseColor' option in -.clang-tidy file, if any. -)"), - cl::init(false), cl::cat(CIRTidyCategory)); -namespace cir { -namespace tidy { - -std::vector getCIRCheckNames(const ClangTidyOptions &Options) { - clang::tidy::ClangTidyContext Context( - std::make_unique(ClangTidyGlobalOptions(), - Options)); - CIRTidyASTConsumerFactory Factory(Context); - return Factory.getCheckNames(); -} - -static std::unique_ptr -createOptionsProvider(llvm::IntrusiveRefCntPtr FS) { - ClangTidyGlobalOptions GlobalOptions; - if (std::error_code Err = parseLineFilter(LineFilter, GlobalOptions)) { - llvm::errs() << "Invalid LineFilter: " << Err.message() << "\n\nUsage:\n"; - llvm::cl::PrintHelpMessage(/*Hidden=*/false, /*Categorized=*/true); - return nullptr; - } - - ClangTidyOptions DefaultOptions; - DefaultOptions.Checks = DefaultChecks; - DefaultOptions.WarningsAsErrors = ""; - DefaultOptions.HeaderFilterRegex = HeaderFilter; - DefaultOptions.SystemHeaders = SystemHeaders; - DefaultOptions.FormatStyle = FormatStyle; - DefaultOptions.User = llvm::sys::Process::GetEnv("USER"); - // USERNAME is used on Windows. - if (!DefaultOptions.User) - DefaultOptions.User = llvm::sys::Process::GetEnv("USERNAME"); - - ClangTidyOptions OverrideOptions; - if (Checks.getNumOccurrences() > 0) - OverrideOptions.Checks = Checks; - if (WarningsAsErrors.getNumOccurrences() > 0) - OverrideOptions.WarningsAsErrors = WarningsAsErrors; - if (HeaderFilter.getNumOccurrences() > 0) - OverrideOptions.HeaderFilterRegex = HeaderFilter; - if (SystemHeaders.getNumOccurrences() > 0) - OverrideOptions.SystemHeaders = SystemHeaders; - if (FormatStyle.getNumOccurrences() > 0) - OverrideOptions.FormatStyle = FormatStyle; - if (UseColor.getNumOccurrences() > 0) - OverrideOptions.UseColor = UseColor; - - auto LoadConfig = - [&](StringRef Configuration, - StringRef Source) -> std::unique_ptr { - llvm::ErrorOr ParsedConfig = - parseConfiguration(MemoryBufferRef(Configuration, Source)); - if (ParsedConfig) - return std::make_unique( - std::move(GlobalOptions), - ClangTidyOptions::getDefaults().merge(DefaultOptions, 0), - std::move(*ParsedConfig), std::move(OverrideOptions), std::move(FS)); - llvm::errs() << "Error: invalid configuration specified.\n" - << ParsedConfig.getError().message() << "\n"; - return nullptr; - }; - - if (ConfigFile.getNumOccurrences() > 0) { - if (Config.getNumOccurrences() > 0) { - llvm::errs() << "Error: --config-file and --config are " - "mutually exclusive. Specify only one.\n"; - return nullptr; - } - - llvm::ErrorOr> Text = - llvm::MemoryBuffer::getFile(ConfigFile); - if (std::error_code EC = Text.getError()) { - llvm::errs() << "Error: can't read config-file '" << ConfigFile - << "': " << EC.message() << "\n"; - return nullptr; - } - - return LoadConfig((*Text)->getBuffer(), ConfigFile); - } - - if (Config.getNumOccurrences() > 0) - return LoadConfig(Config, ""); - - return std::make_unique( - std::move(GlobalOptions), std::move(DefaultOptions), - std::move(OverrideOptions), std::move(FS)); -} - -llvm::IntrusiveRefCntPtr -getVfsFromFile(const std::string &OverlayFile, - llvm::IntrusiveRefCntPtr BaseFS) { - llvm::ErrorOr> Buffer = - BaseFS->getBufferForFile(OverlayFile); - if (!Buffer) { - llvm::errs() << "Can't load virtual filesystem overlay file '" - << OverlayFile << "': " << Buffer.getError().message() - << ".\n"; - return nullptr; - } - - IntrusiveRefCntPtr FS = vfs::getVFSFromYAML( - std::move(Buffer.get()), /*DiagHandler*/ nullptr, OverlayFile); - if (!FS) { - llvm::errs() << "Error: invalid virtual filesystem overlay file '" - << OverlayFile << "'.\n"; - return nullptr; - } - return FS; -} - -int CIRTidyMain(int argc, const char **argv) { - llvm::InitLLVM X(argc, argv); - llvm::Expected OptionsParser = - CommonOptionsParser::create(argc, argv, CIRTidyCategory, cl::ZeroOrMore); - if (!OptionsParser) { - llvm::WithColor::error() << llvm::toString(OptionsParser.takeError()); - return 1; - } - - llvm::IntrusiveRefCntPtr BaseFS( - new vfs::OverlayFileSystem(vfs::getRealFileSystem())); - - if (!VfsOverlay.empty()) { - IntrusiveRefCntPtr VfsFromFile = - getVfsFromFile(VfsOverlay, BaseFS); - if (!VfsFromFile) - return 1; - BaseFS->pushOverlay(std::move(VfsFromFile)); - } - - auto OwningOptionsProvider = createOptionsProvider(BaseFS); - auto *OptionsProvider = OwningOptionsProvider.get(); - if (!OptionsProvider) - return 1; - - auto MakeAbsolute = [](const std::string &Input) -> SmallString<256> { - if (Input.empty()) - return {}; - SmallString<256> AbsolutePath(Input); - if (std::error_code EC = llvm::sys::fs::make_absolute(AbsolutePath)) { - llvm::errs() << "Can't make absolute path from " << Input << ": " - << EC.message() << "\n"; - } - return AbsolutePath; - }; - - SmallString<256> ProfilePrefix = MakeAbsolute(StoreCheckProfile); - - StringRef FileName("dummy"); - auto PathList = OptionsParser->getSourcePathList(); - if (!PathList.empty()) { - FileName = PathList.front(); - } - - SmallString<256> FilePath = MakeAbsolute(std::string(FileName)); - - ClangTidyOptions EffectiveOptions = OptionsProvider->getOptions(FilePath); - std::vector EnabledChecks = getCIRCheckNames(EffectiveOptions); - - if (ExplainConfig) { - // FIXME: Show other ClangTidyOptions' fields, like ExtraArg. - std::vector - RawOptions = OptionsProvider->getRawOptions(FilePath); - for (const std::string &Check : EnabledChecks) { - for (auto It = RawOptions.rbegin(); It != RawOptions.rend(); ++It) { - if (It->first.Checks && GlobList(*It->first.Checks).contains(Check)) { - llvm::outs() << "'" << Check << "' is enabled in the " << It->second - << ".\n"; - break; - } - } - } - return 0; - } - - if (ListChecks) { - if (EnabledChecks.empty()) { - llvm::errs() << "No checks enabled.\n"; - return 1; - } - llvm::outs() << "Enabled checks:"; - for (const auto &CheckName : EnabledChecks) - llvm::outs() << "\n " << CheckName; - llvm::outs() << "\n\n"; - return 0; - } - - if (DumpConfig) { - EffectiveOptions.CheckOptions = - getCheckOptions(EffectiveOptions, AllowEnablingAnalyzerAlphaCheckers); - llvm::outs() << configurationAsText(ClangTidyOptions::getDefaults().merge( - EffectiveOptions, 0)) - << "\n"; - return 0; - } - - if (EnabledChecks.empty()) { - llvm::errs() << "Error: no checks enabled.\n"; - llvm::cl::PrintHelpMessage(/*Hidden=*/false, /*Categorized=*/true); - return 1; - } - - if (PathList.empty()) { - llvm::errs() << "Error: no input files specified.\n"; - llvm::cl::PrintHelpMessage(/*Hidden=*/false, /*Categorized=*/true); - return 1; - } - - llvm::InitializeAllTargetInfos(); - llvm::InitializeAllTargetMCs(); - llvm::InitializeAllAsmParsers(); - - ClangTidyContext Context(std::move(OwningOptionsProvider), - AllowEnablingAnalyzerAlphaCheckers); - std::vector Errors = - runCIRTidy(Context, OptionsParser->getCompilations(), PathList, BaseFS, - FixNotes, EnableCheckProfile, ProfilePrefix); - - if (!ExportFixes.empty() && !Errors.empty()) { - std::error_code EC; - llvm::raw_fd_ostream OS(ExportFixes, EC, llvm::sys::fs::OF_None); - if (EC) { - llvm::errs() << "Error opening output file: " << EC.message() << '\n'; - return 1; - } - exportReplacements(FilePath.str(), Errors, OS); - } - - return 0; -} - -} // namespace tidy -} // namespace cir diff --git a/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.h b/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.h deleted file mode 100644 index 08d25544dbf3..000000000000 --- a/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyMain.h +++ /dev/null @@ -1,23 +0,0 @@ -//===--- tools/extra/clang-tidy/cir/CIRTidyMain.h - cir tidy tool ---------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// -/// \file This file declares the main function for the cir-tidy tool. -/// -/// This tool uses the Clang Tooling infrastructure, see -/// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html -/// for details on setting it up with LLVM source tree. -/// -//===----------------------------------------------------------------------===// - -namespace cir { -namespace tidy { - -int CIRTidyMain(int argc, const char **argv); - -} // namespace tidy -} // namespace cir diff --git a/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyToolMain.cpp b/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyToolMain.cpp deleted file mode 100644 index b5213510e822..000000000000 --- a/clang-tools-extra/clang-tidy/cir-tidy/tool/CIRTidyToolMain.cpp +++ /dev/null @@ -1,21 +0,0 @@ -//===--- tools/extra/clang-tidy/cir/CIRTidyToolMain.cpp - cir tidy tool ---===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// -/// \file This file contains cir-tidy tool entry point main function. -/// -/// This tool uses the Clang Tooling infrastructure, see -/// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html -/// for details on setting it up with LLVM source tree. -/// -//===----------------------------------------------------------------------===// - -#include "CIRTidyMain.h" - -int main(int argc, const char **argv) { - return cir::tidy::CIRTidyMain(argc, argv); -} diff --git a/clang-tools-extra/test/CMakeLists.txt b/clang-tools-extra/test/CMakeLists.txt index 2b51500fa2cf..6ab2288abd7c 100644 --- a/clang-tools-extra/test/CMakeLists.txt +++ b/clang-tools-extra/test/CMakeLists.txt @@ -53,12 +53,6 @@ set(CLANG_TOOLS_TEST_DEPS clang-tidy ) -if(CLANG_ENABLE_CIR) - list(APPEND CLANG_TOOLS_TEST_DEPS - cir-tidy - ) -endif() - # Add lit test dependencies. set(LLVM_UTILS_DEPS FileCheck count not diff --git a/clang-tools-extra/test/cir-tidy/check_cir_tidy.py b/clang-tools-extra/test/cir-tidy/check_cir_tidy.py deleted file mode 100644 index 5f042718efda..000000000000 --- a/clang-tools-extra/test/cir-tidy/check_cir_tidy.py +++ /dev/null @@ -1,191 +0,0 @@ -#!/usr/bin/env python -# -#===- check_cir_tidy.py - CIRTidy Test Helper ------------*- python -*--=======# -# -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -#===------------------------------------------------------------------------===# - -r""" -CIRTIDY Test Helper -===================== - -This script runs cir-tidy and check outputed messages. - -Usage: - check_cir_tidy.py -- \ - [optional cir-tidy arguments] - -Example: - // RUN: %check_cir_tidy %s cir-lifetime-check %t -- -""" - -import argparse -import re -import subprocess -import sys -import shutil - -def write_file(file_name, text): - with open(file_name, 'w', encoding='utf-8') as f: - f.write(text) - f.truncate() - - -def run_test_once(args, extra_args): - input_file_name = args.input_file_name - check_name = args.check_name - temp_file_name = args.temp_file_name - temp_file_name = temp_file_name + ".cpp" - - cir_tidy_extra_args = extra_args - cir_extra_args = [] - if '--' in extra_args: - i = cir_tidy_extra_args.index('--') - cir_extra_args = cir_tidy_extra_args[i + 1:] - cir_tidy_extra_args = cir_tidy_extra_args[:i] - - # If the test does not specify a config style, force an empty one; otherwise - # autodetection logic can discover a ".clang-tidy" file that is not related to - # the test. - if not any( - [arg.startswith('-config=') for arg in cir_tidy_extra_args]): - cir_tidy_extra_args.append('-config={}') - - with open(input_file_name, 'r', encoding='utf-8') as input_file: - input_text = input_file.read() - - check_fixes_prefixes = [] - check_messages_prefixes = [] - check_notes_prefixes = [] - - has_check_fixes = False - has_check_messages = False - has_check_notes = False - - check_fixes_prefix = 'CHECK-FIXES' - check_messages_prefix = 'CHECK-MESSAGES' - check_notes_prefix = 'CHECK-NOTES' - - has_check_fix = check_fixes_prefix in input_text - has_check_message = check_messages_prefix in input_text - has_check_note = check_notes_prefix in input_text - - if not has_check_fix and not has_check_message and not has_check_note: - sys.exit('%s, %s or %s not found in the input' % - (check_fixes_prefix, check_messages_prefix, check_notes_prefix)) - - has_check_fixes = has_check_fixes or has_check_fix - has_check_messages = has_check_messages or has_check_message - has_check_notes = has_check_notes or has_check_note - - if has_check_fix: - check_fixes_prefixes.append(check_fixes_prefix) - if has_check_message: - check_messages_prefixes.append(check_messages_prefix) - if has_check_note: - check_notes_prefixes.append(check_notes_prefix) - - assert has_check_fixes or has_check_messages or has_check_notes - # Remove the contents of the CHECK lines to avoid CHECKs matching on - # themselves. We need to keep the comments to preserve line numbers while - # avoiding empty lines which could potentially trigger formatting-related - # checks. - cleaned_test = re.sub('// *CHECK-[A-Z0-9\-]*:[^\r\n]*', '//', input_text) - - write_file(temp_file_name, cleaned_test) - - original_file_name = temp_file_name + ".orig" - write_file(original_file_name, cleaned_test) - - args = ['cir-tidy', temp_file_name, '--checks=-*,' + check_name] + \ - cir_tidy_extra_args + ['--'] + cir_extra_args - - arg_print_list = [] - for arg_print in cir_tidy_extra_args: - if (arg_print.startswith("-config=")): - conf = arg_print.replace("-config=", "-config='") - conf += "'" - arg_print_list.append(conf) - continue - arg_print_list.append(arg_print) - - cir_tidy_bin = shutil.which('cir-tidy') - args_for_print = [cir_tidy_bin, temp_file_name, "--checks='-*," + check_name + "'"] + \ - arg_print_list + ['--'] + cir_extra_args - print('Running: ' + " ".join(args_for_print)) - - try: - cir_tidy_output = \ - subprocess.check_output(args, stderr=subprocess.STDOUT).decode() - except subprocess.CalledProcessError as e: - print('cir-tidy failed:\n' + e.output.decode()) - raise - - print('------------------------ cir-tidy output -------------------------') - print(cir_tidy_output.encode()) - print('\n------------------------------------------------------------------') - - try: - diff_output = subprocess.check_output( - ['diff', '-u', original_file_name, temp_file_name], - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - diff_output = e.output - - print('------------------------------ Fixes -----------------------------\n' + - diff_output.decode(errors='ignore') + - '\n------------------------------------------------------------------') - - if has_check_fixes: - try: - subprocess.check_output( - ['FileCheck', '-input-file=' + temp_file_name, input_file_name, - '-check-prefixes=' + ','.join(check_fixes_prefixes), - '-strict-whitespace'], - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - print('FileCheck failed:\n' + e.output.decode()) - raise - - if has_check_messages: - messages_file = temp_file_name + '.msg' - write_file(messages_file, cir_tidy_output) - try: - subprocess.check_output( - ['FileCheck', '-input-file=' + messages_file, input_file_name, - '-check-prefixes=' + ','.join(check_messages_prefixes), - '-implicit-check-not={{warning|error}}:'], - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - print('FileCheck failed:\n' + e.output.decode()) - raise - - if has_check_notes: - notes_file = temp_file_name + '.notes' - write_file(notes_file, cir_tidy_output) - try: - subprocess.check_output( - ['FileCheck', '-input-file=' + notes_file, input_file_name, - '-check-prefixes=' + ','.join(check_notes_prefixes), - '-implicit-check-not={{error}}:'], - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - print('FileCheck failed:\n' + e.output.decode()) - raise - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('input_file_name') - parser.add_argument('check_name') - parser.add_argument('temp_file_name') - - args, extra_args = parser.parse_known_args() - run_test_once(args, extra_args) - - -if __name__ == '__main__': - main() diff --git a/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp b/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp deleted file mode 100644 index 7bf684fbad66..000000000000 --- a/clang-tools-extra/test/cir-tidy/lifetime-basic.cpp +++ /dev/null @@ -1,40 +0,0 @@ -// RUN: %check_cir_tidy %s cir-lifetime-check %t \ -// RUN: --export-fixes=%t.yaml \ -// RUN: -config='{CheckOptions: \ -// RUN: [{key: cir-lifetime-check.RemarksList, value: "all"}, \ -// RUN: {key: cir-lifetime-check.HistLimit, value: "1"}, \ -// RUN: {key: cir-lifetime-check.CodeGenBuildDeferredThreshold, value: "500"}, \ -// RUN: {key: cir-lifetime-check.CodeGenSkipFunctionsFromSystemHeaders, value: "false"}, \ -// RUN: {key: cir-lifetime-check.HistoryList, value: "invalid;null"}]}' \ -// RUN: -- -// RUN: FileCheck -input-file=%t.yaml -check-prefix=CHECK-YAML %s - -int *p0() { - int *p = nullptr; - { - int x = 0; - p = &x; - *p = 42; // CHECK-MESSAGES: remark: pset => { x } - } // CHECK-NOTES: note: pointee 'x' invalidated at end of scope - *p = 42; // CHECK-MESSAGES: remark: pset => { invalid } - // CHECK-MESSAGES: :[[@LINE-1]]:4: warning: use of invalid pointer 'p' - return p; -} - -// CHECK-YAML: DiagnosticMessage: -// CHECK-YAML: Message: 'pset => { x }' -// CHECK-YAML: Replacements: [] -// CHECK-YAML: Level: Remark - -// CHECK-YAML: DiagnosticMessage: -// CHECK-YAML: Message: 'pset => { invalid }' -// CHECK-YAML: Replacements: [] -// CHECK-YAML: Level: Remark - -// CHECK-YAML: DiagnosticMessage: -// CHECK-YAML: Message: 'use of invalid pointer ''p''' -// CHECK-YAML: Replacements: [] -// CHECK-YAML: Notes: -// CHECK-YAML: - Message: 'pointee ''x'' invalidated at end of scope' -// CHECK-YAML: Replacements: [] -// CHECK-YAML: Level: Warning \ No newline at end of file diff --git a/clang-tools-extra/test/cir-tidy/lit.local.cfg b/clang-tools-extra/test/cir-tidy/lit.local.cfg deleted file mode 100644 index e479c3e74cb6..000000000000 --- a/clang-tools-extra/test/cir-tidy/lit.local.cfg +++ /dev/null @@ -1,2 +0,0 @@ -if not config.clang_enable_cir: - config.unsupported = True \ No newline at end of file diff --git a/clang-tools-extra/test/lit.cfg.py b/clang-tools-extra/test/lit.cfg.py index 2e3937337ed3..9f64fd3d2ffa 100644 --- a/clang-tools-extra/test/lit.cfg.py +++ b/clang-tools-extra/test/lit.cfg.py @@ -54,11 +54,6 @@ config.substitutions.append( ("%check_clang_tidy", "%s %s" % (python_exec, check_clang_tidy)) ) -check_cir_tidy = os.path.join( - config.test_source_root, "cir-tidy", "check_cir_tidy.py") -config.substitutions.append( - ('%check_cir_tidy', - '%s %s' % (python_exec, check_cir_tidy)) ) clang_tidy_diff = os.path.join( config.test_source_root, "..", "clang-tidy", "tool", "clang-tidy-diff.py" ) From 8a3af8de8d1b9b4ebe410bba6df020059d806d9b Mon Sep 17 00:00:00 2001 From: Keyi Zhang Date: Mon, 25 Sep 2023 23:01:00 -0700 Subject: [PATCH 1207/2301] [CIR][Lowering] use cir.int type in LIT tests (#266) Here is the promised patch that adds proper type conversion to CIR -> MLIR conversion. I tried to keep the changes minimum but the existing implementation doesn't use `TypeConverter`. This should not have any functional changes except for one tiny fix that registers the `cf` dialect, which should allow `goto.mlir` to pass. Happy to break the PR into two if requested. --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 316 +++++++++--------- clang/test/CIR/Lowering/ThroughMLIR/array.cir | 11 +- .../ThroughMLIR/binop-unsigned-int.cir | 91 ++--- clang/test/CIR/Lowering/ThroughMLIR/cmp.cir | 41 +-- clang/test/CIR/Lowering/ThroughMLIR/goto.cir | 23 +- .../test/CIR/Lowering/ThroughMLIR/memref.cir | 13 +- clang/test/CIR/Lowering/ThroughMLIR/scope.cir | 7 +- .../Lowering/ThroughMLIR/unary-inc-dec.cir | 23 +- .../Lowering/ThroughMLIR/unary-plus-minus.cir | 23 +- 9 files changed, 278 insertions(+), 270 deletions(-) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index b2921abd16c0..c3b819e16d6e 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -72,47 +72,35 @@ struct ConvertCIRToMLIRPass virtual StringRef getArgument() const override { return "cir-to-mlir"; } }; -class CIRCallLowering : public mlir::OpRewritePattern { +class CIRCallLowering : public mlir::OpConversionPattern { public: - using OpRewritePattern::OpRewritePattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CallOp op, - mlir::PatternRewriter &rewriter) const override { + matchAndRewrite(mlir::cir::CallOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + SmallVector types; + if (mlir::failed( + getTypeConverter()->convertTypes(op.getResultTypes(), types))) + return mlir::failure(); rewriter.replaceOpWithNewOp( - op, mlir::SymbolRefAttr::get(op), op.getResultTypes(), - op.getArgOperands()); + op, mlir::SymbolRefAttr::get(op), types, adaptor.getOperands()); return mlir::LogicalResult::success(); } }; -class CIRAllocaLowering : public mlir::OpRewritePattern { +class CIRAllocaLowering + : public mlir::OpConversionPattern { public: - using OpRewritePattern::OpRewritePattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::AllocaOp op, - mlir::PatternRewriter &rewriter) const override { - auto type = op.getAllocaType(); - mlir::MemRefType memreftype; - - if (type.isa()) { - auto integerType = - mlir::IntegerType::get(getContext(), 8, mlir::IntegerType::Signless); - memreftype = mlir::MemRefType::get({}, integerType); - } else if (type.isa()) { - mlir::cir::ArrayType arraytype = type.dyn_cast(); - memreftype = - mlir::MemRefType::get(arraytype.getSize(), arraytype.getEltType()); - } else if (type.isa() || type.isa()) { - memreftype = mlir::MemRefType::get({}, op.getAllocaType()); - } else if (type.isa()) { - auto ptrType = type.cast(); - auto innerMemref = mlir::MemRefType::get({-1}, ptrType.getPointee()); - memreftype = mlir::MemRefType::get({}, innerMemref); - } else { - llvm_unreachable("type to be allocated not supported yet"); - } + matchAndRewrite(mlir::cir::AllocaOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto type = adaptor.getAllocaType(); + auto mlirType = getTypeConverter()->convertType(type); + + auto memreftype = mlir::MemRefType::get({}, mlirType); rewriter.replaceOpWithNewOp(op, memreftype, op.getAlignmentAttr()); return mlir::LogicalResult::success(); @@ -131,44 +119,39 @@ class CIRLoadLowering : public mlir::OpConversionPattern { } }; -class CIRStoreLowering : public mlir::ConversionPattern { +class CIRStoreLowering : public mlir::OpConversionPattern { public: - CIRStoreLowering(mlir::MLIRContext *ctx) - : mlir::ConversionPattern(mlir::cir::StoreOp::getOperationName(), 1, - ctx) {} + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::Operation *op, ArrayRef operands, + matchAndRewrite(mlir::cir::StoreOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, operands[0], - operands[1]); + rewriter.replaceOpWithNewOp(op, adaptor.getValue(), + adaptor.getAddr()); return mlir::LogicalResult::success(); } }; class CIRConstantLowering - : public mlir::OpRewritePattern { + : public mlir::OpConversionPattern { public: - using OpRewritePattern::OpRewritePattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ConstantOp op, - mlir::PatternRewriter &rewriter) const override { - if (op.getType().isa()) { - mlir::Type type = - mlir::IntegerType::get(getContext(), 8, mlir::IntegerType::Signless); - mlir::TypedAttr IntegerAttr; - if (op.getValue() == - mlir::cir::BoolAttr::get( - getContext(), ::mlir::cir::BoolType::get(getContext()), true)) - IntegerAttr = mlir::IntegerAttr::get(type, 1); - else - IntegerAttr = mlir::IntegerAttr::get(type, 0); - rewriter.replaceOpWithNewOp(op, type, - IntegerAttr); - } else - rewriter.replaceOpWithNewOp(op, op.getType(), - op.getValue()); + matchAndRewrite(mlir::cir::ConstantOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto ty = getTypeConverter()->convertType(op.getType()); + mlir::TypedAttr value; + if (mlir::isa(op.getType())) { + auto boolValue = mlir::cast(op.getValue()); + value = rewriter.getIntegerAttr(ty, boolValue.getValue()); + } else { + auto cirIntAttr = mlir::dyn_cast(op.getValue()); + assert(cirIntAttr && "NYI non cir.int attr"); + value = rewriter.getIntegerAttr( + ty, cast(op.getValue()).getValue()); + } + rewriter.replaceOpWithNewOp(op, ty, value); return mlir::LogicalResult::success(); } }; @@ -211,29 +194,28 @@ class CIRFuncLowering : public mlir::OpConversionPattern { } }; -class CIRUnaryOpLowering : public mlir::OpRewritePattern { +class CIRUnaryOpLowering + : public mlir::OpConversionPattern { public: - using OpRewritePattern::OpRewritePattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::UnaryOp op, - mlir::PatternRewriter &rewriter) const override { - mlir::Type type = op.getInput().getType(); - assert(type.isa() && "operand type not supported yet"); + matchAndRewrite(mlir::cir::UnaryOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto input = adaptor.getInput(); + auto type = getTypeConverter()->convertType(op.getType()); switch (op.getKind()) { case mlir::cir::UnaryOpKind::Inc: { auto One = rewriter.create( op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); - rewriter.replaceOpWithNewOp(op, op.getType(), - op.getInput(), One); + rewriter.replaceOpWithNewOp(op, type, input, One); break; } case mlir::cir::UnaryOpKind::Dec: { auto One = rewriter.create( op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); - rewriter.replaceOpWithNewOp(op, op.getType(), - op.getInput(), One); + rewriter.replaceOpWithNewOp(op, type, input, One); break; } case mlir::cir::UnaryOpKind::Plus: { @@ -243,15 +225,14 @@ class CIRUnaryOpLowering : public mlir::OpRewritePattern { case mlir::cir::UnaryOpKind::Minus: { auto Zero = rewriter.create( op.getLoc(), type, mlir::IntegerAttr::get(type, 0)); - rewriter.replaceOpWithNewOp(op, op.getType(), Zero, - op.getInput()); + rewriter.replaceOpWithNewOp(op, type, Zero, input); break; } case mlir::cir::UnaryOpKind::Not: { auto MinusOne = rewriter.create( op.getLoc(), type, mlir::IntegerAttr::get(type, -1)); - rewriter.replaceOpWithNewOp(op, op.getType(), - MinusOne, op.getInput()); + rewriter.replaceOpWithNewOp(op, type, MinusOne, + input); break; } } @@ -260,77 +241,78 @@ class CIRUnaryOpLowering : public mlir::OpRewritePattern { } }; -class CIRBinOpLowering : public mlir::OpRewritePattern { +class CIRBinOpLowering : public mlir::OpConversionPattern { public: - using OpRewritePattern::OpRewritePattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BinOp op, - mlir::PatternRewriter &rewriter) const override { - assert((op.getLhs().getType() == op.getRhs().getType()) && + matchAndRewrite(mlir::cir::BinOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert((adaptor.getLhs().getType() == adaptor.getRhs().getType()) && "inconsistent operands' types not supported yet"); - mlir::Type type = op.getRhs().getType(); - assert((type.isa() || type.isa()) && + mlir::Type mlirType = getTypeConverter()->convertType(op.getType()); + assert((mlirType.isa() || + mlirType.isa()) && "operand type not supported yet"); switch (op.getKind()) { case mlir::cir::BinOpKind::Add: - if (type.isa()) + if (mlirType.isa()) rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Sub: - if (type.isa()) + if (mlirType.isa()) rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Mul: - if (type.isa()) + if (mlirType.isa()) rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Div: - if (type.isa()) { - if (type.isSignlessInteger()) + if (mlirType.isa()) { + if (mlirType.isSignlessInteger()) rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else - llvm_unreachable("integer type not supported in CIR yet"); + llvm_unreachable("integer mlirType not supported in CIR yet"); } else rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Rem: - if (type.isa()) { - if (type.isSignlessInteger()) + if (mlirType.isa()) { + if (mlirType.isSignlessInteger()) rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else - llvm_unreachable("integer type not supported in CIR yet"); + llvm_unreachable("integer mlirType not supported in CIR yet"); } else rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::And: rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Or: - rewriter.replaceOpWithNewOp(op, op.getType(), - op.getLhs(), op.getRhs()); + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Xor: rewriter.replaceOpWithNewOp( - op, op.getType(), op.getLhs(), op.getRhs()); + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; } @@ -338,17 +320,18 @@ class CIRBinOpLowering : public mlir::OpRewritePattern { } }; -class CIRCmpOpLowering : public mlir::OpRewritePattern { +class CIRCmpOpLowering : public mlir::OpConversionPattern { public: - using OpRewritePattern::OpRewritePattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CmpOp op, - mlir::PatternRewriter &rewriter) const override { - auto type = op.getLhs().getType(); + matchAndRewrite(mlir::cir::CmpOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto type = adaptor.getLhs().getType(); auto integerType = mlir::IntegerType::get(getContext(), 1, mlir::IntegerType::Signless); + mlir::Value mlirResult; switch (op.getKind()) { case mlir::cir::CmpOpKind::gt: { if (type.isa()) { @@ -356,16 +339,16 @@ class CIRCmpOpLowering : public mlir::OpRewritePattern { if (!type.isSignlessInteger()) llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::arith::CmpIPredicate::ugt; - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpFPredicateAttr::get( getContext(), mlir::arith::CmpFPredicate::UGT), - op.getLhs(), op.getRhs(), + adaptor.getLhs(), adaptor.getRhs(), mlir::arith::FastMathFlagsAttr::get( getContext(), mlir::arith::FastMathFlags::none)); } else { @@ -379,16 +362,16 @@ class CIRCmpOpLowering : public mlir::OpRewritePattern { if (!type.isSignlessInteger()) llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::arith::CmpIPredicate::uge; - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpFPredicateAttr::get( getContext(), mlir::arith::CmpFPredicate::UGE), - op.getLhs(), op.getRhs(), + adaptor.getLhs(), adaptor.getRhs(), mlir::arith::FastMathFlagsAttr::get( getContext(), mlir::arith::FastMathFlags::none)); } else { @@ -402,19 +385,18 @@ class CIRCmpOpLowering : public mlir::OpRewritePattern { if (!type.isSignlessInteger()) llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::arith::CmpIPredicate::ult; - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpFPredicateAttr::get( getContext(), mlir::arith::CmpFPredicate::ULT), - op.getLhs(), op.getRhs(), + adaptor.getLhs(), adaptor.getRhs(), mlir::arith::FastMathFlagsAttr::get( getContext(), mlir::arith::FastMathFlags::none)); - } else { llvm_unreachable("Unknown Operand Type"); } @@ -426,16 +408,16 @@ class CIRCmpOpLowering : public mlir::OpRewritePattern { if (!type.isSignlessInteger()) llvm_unreachable("integer type not supported in CIR yet"); cmpIType = mlir::arith::CmpIPredicate::ule; - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpFPredicateAttr::get( getContext(), mlir::arith::CmpFPredicate::ULE), - op.getLhs(), op.getRhs(), + adaptor.getLhs(), adaptor.getRhs(), mlir::arith::FastMathFlagsAttr::get( getContext(), mlir::arith::FastMathFlags::none)); } else { @@ -445,17 +427,17 @@ class CIRCmpOpLowering : public mlir::OpRewritePattern { } case mlir::cir::CmpOpKind::eq: { if (type.isa()) { - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpIPredicateAttr::get(getContext(), mlir::arith::CmpIPredicate::eq), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpFPredicateAttr::get( getContext(), mlir::arith::CmpFPredicate::UEQ), - op.getLhs(), op.getRhs(), + adaptor.getLhs(), adaptor.getRhs(), mlir::arith::FastMathFlagsAttr::get( getContext(), mlir::arith::FastMathFlags::none)); } else { @@ -465,17 +447,17 @@ class CIRCmpOpLowering : public mlir::OpRewritePattern { } case mlir::cir::CmpOpKind::ne: { if (type.isa()) { - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpIPredicateAttr::get(getContext(), mlir::arith::CmpIPredicate::ne), - op.getLhs(), op.getRhs()); + adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { - rewriter.replaceOpWithNewOp( - op, integerType, + mlirResult = rewriter.create( + op.getLoc(), integerType, mlir::arith::CmpFPredicateAttr::get( getContext(), mlir::arith::CmpFPredicate::UNE), - op.getLhs(), op.getRhs(), + adaptor.getLhs(), adaptor.getRhs(), mlir::arith::FastMathFlagsAttr::get( getContext(), mlir::arith::FastMathFlags::none)); } else { @@ -485,6 +467,13 @@ class CIRCmpOpLowering : public mlir::OpRewritePattern { } } + // MLIR comparison ops return i1, but cir::CmpOp returns the same type as + // the LHS value. Since this return value can be used later, we need to + // restore the type with the extension below. + auto mlirResultTy = getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, mlirResultTy, + mlirResult); + return mlir::LogicalResult::success(); } }; @@ -501,12 +490,13 @@ class CIRBrOpLowering : public mlir::OpRewritePattern { } }; -class CIRScopeOpLowering : public mlir::OpRewritePattern { - using mlir::OpRewritePattern::OpRewritePattern; +class CIRScopeOpLowering + : public mlir::OpConversionPattern { + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ScopeOp scopeOp, - mlir::PatternRewriter &rewriter) const override { + matchAndRewrite(mlir::cir::ScopeOp scopeOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { // Empty scope: just remove it. if (scopeOp.getRegion().empty()) { rewriter.eraseOp(scopeOp); @@ -520,9 +510,14 @@ class CIRScopeOpLowering : public mlir::OpRewritePattern { terminator, terminator->getOperands()); } + SmallVector mlirResultTypes; + if (mlir::failed(getTypeConverter()->convertTypes(scopeOp->getResultTypes(), + mlirResultTypes))) + return mlir::LogicalResult::failure(); + rewriter.setInsertionPoint(scopeOp); auto newScopeOp = rewriter.create( - scopeOp.getLoc(), scopeOp.getResultTypes()); + scopeOp.getLoc(), mlirResultTypes); rewriter.inlineRegionBefore(scopeOp.getScopeRegion(), newScopeOp.getBodyRegion(), newScopeOp.getBodyRegion().end()); @@ -534,17 +529,19 @@ class CIRScopeOpLowering : public mlir::OpRewritePattern { void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add(patterns.getContext()); - patterns.add(converter, patterns.getContext()); + patterns.add(patterns.getContext()); + + patterns.add(converter, patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { mlir::TypeConverter converter; converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { - return mlir::MemRefType::get({-1}, type.getPointee()); + auto ty = converter.convertType(type.getPointee()); + return mlir::MemRefType::get({}, ty); }); converter.addConversion( [&](mlir::IntegerType type) -> mlir::Type { return type; }); @@ -552,6 +549,19 @@ static mlir::TypeConverter prepareTypeConverter() { [&](mlir::FloatType type) -> mlir::Type { return type; }); converter.addConversion( [&](mlir::cir::VoidType type) -> mlir::Type { return {}; }); + converter.addConversion([&](mlir::cir::IntType type) -> mlir::Type { + // arith dialect ops doesn't take signed integer -- drop cir sign here + return mlir::IntegerType::get( + type.getContext(), type.getWidth(), + mlir::IntegerType::SignednessSemantics::Signless); + }); + converter.addConversion([&](mlir::cir::BoolType type) -> mlir::Type { + return mlir::IntegerType::get(type.getContext(), 8); + }); + converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { + auto elementType = converter.convertType(type.getEltType()); + return mlir::MemRefType::get(type.getSize(), elementType); + }); return converter; } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/array.cir b/clang/test/CIR/Lowering/ThroughMLIR/array.cir index fc69ff680f4e..40e622928769 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/array.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/array.cir @@ -1,5 +1,4 @@ // RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-opt %s -cir-to-mlir -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { @@ -10,15 +9,7 @@ module { // MLIR: module { // MLIR-NEXT: func @foo() { -// MLIR-NEXT: = memref.alloca() {alignment = 16 : i64} : memref<10xi32> +// MLIR-NEXT: = memref.alloca() {alignment = 16 : i64} : memref> // MLIR-NEXT: return // MLIR-NEXT: } // MLIR-NEXT: } - -// LLVM: = alloca i32, i64 10, align 16 -// LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } undef, ptr %1, 0 -// LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } %2, ptr %1, 1 -// LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } %3, i64 0, 2 -// LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } %4, i64 10, 3, 0 -// LLVM-NEXT: = insertvalue { ptr, ptr, i64, [1 x i64], [1 x i64] } %5, i64 1, 4, 0 -// LLVM-NEXT: ret void diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir index 138ada1dd42e..51c89f564efa 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir @@ -1,55 +1,56 @@ // RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} - %3 = cir.const(2 : i32) : i32 cir.store %3, %0 : i32, cir.ptr - %4 = cir.const(1 : i32) : i32 cir.store %4, %1 : i32, cir.ptr - %5 = cir.load %0 : cir.ptr , i32 - %6 = cir.load %1 : cir.ptr , i32 - %7 = cir.binop(mul, %5, %6) : i32 - cir.store %7, %2 : i32, cir.ptr - %8 = cir.load %2 : cir.ptr , i32 - %9 = cir.load %1 : cir.ptr , i32 - %10 = cir.binop(div, %8, %9) : i32 - cir.store %10, %2 : i32, cir.ptr - %11 = cir.load %2 : cir.ptr , i32 - %12 = cir.load %1 : cir.ptr , i32 - %13 = cir.binop(rem, %11, %12) : i32 - cir.store %13, %2 : i32, cir.ptr - %14 = cir.load %2 : cir.ptr , i32 - %15 = cir.load %1 : cir.ptr , i32 - %16 = cir.binop(add, %14, %15) : i32 - cir.store %16, %2 : i32, cir.ptr - %17 = cir.load %2 : cir.ptr , i32 - %18 = cir.load %1 : cir.ptr , i32 - %19 = cir.binop(sub, %17, %18) : i32 - cir.store %19, %2 : i32, cir.ptr + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<2> : !u32i) : !u32i cir.store %3, %0 : !u32i, cir.ptr + %4 = cir.const(#cir.int<1> : !u32i) : !u32i cir.store %4, %1 : !u32i, cir.ptr + %5 = cir.load %0 : cir.ptr , !u32i + %6 = cir.load %1 : cir.ptr , !u32i + %7 = cir.binop(mul, %5, %6) : !u32i + cir.store %7, %2 : !u32i, cir.ptr + %8 = cir.load %2 : cir.ptr , !u32i + %9 = cir.load %1 : cir.ptr , !u32i + %10 = cir.binop(div, %8, %9) : !u32i + cir.store %10, %2 : !u32i, cir.ptr + %11 = cir.load %2 : cir.ptr , !u32i + %12 = cir.load %1 : cir.ptr , !u32i + %13 = cir.binop(rem, %11, %12) : !u32i + cir.store %13, %2 : !u32i, cir.ptr + %14 = cir.load %2 : cir.ptr , !u32i + %15 = cir.load %1 : cir.ptr , !u32i + %16 = cir.binop(add, %14, %15) : !u32i + cir.store %16, %2 : !u32i, cir.ptr + %17 = cir.load %2 : cir.ptr , !u32i + %18 = cir.load %1 : cir.ptr , !u32i + %19 = cir.binop(sub, %17, %18) : !u32i + cir.store %19, %2 : !u32i, cir.ptr // should move to cir.shift, which only accepts // CIR types. - // %20 = cir.load %2 : cir.ptr , i32 - // %21 = cir.load %1 : cir.ptr , i32 - // %22 = cir.binop(shr, %20, %21) : i32 - // cir.store %22, %2 : i32, cir.ptr - // %23 = cir.load %2 : cir.ptr , i32 - // %24 = cir.load %1 : cir.ptr , i32 - // %25 = cir.binop(shl, %23, %24) : i32 - // cir.store %25, %2 : i32, cir.ptr - %26 = cir.load %2 : cir.ptr , i32 - %27 = cir.load %1 : cir.ptr , i32 - %28 = cir.binop(and, %26, %27) : i32 - cir.store %28, %2 : i32, cir.ptr - %29 = cir.load %2 : cir.ptr , i32 - %30 = cir.load %1 : cir.ptr , i32 - %31 = cir.binop(xor, %29, %30) : i32 - cir.store %31, %2 : i32, cir.ptr - %32 = cir.load %2 : cir.ptr , i32 - %33 = cir.load %1 : cir.ptr , i32 - %34 = cir.binop(or, %32, %33) : i32 - cir.store %34, %2 : i32, cir.ptr + // %20 = cir.load %2 : cir.ptr , !u32i + // %21 = cir.load %1 : cir.ptr , !u32i + // %22 = cir.binop(shr, %20, %21) : !u32i + // cir.store %22, %2 : !u32i, cir.ptr + // %23 = cir.load %2 : cir.ptr , !u32i + // %24 = cir.load %1 : cir.ptr , !u32i + // %25 = cir.binop(shl, %23, %24) : !u32i + // cir.store %25, %2 : !u32i, cir.ptr + %26 = cir.load %2 : cir.ptr , !u32i + %27 = cir.load %1 : cir.ptr , !u32i + %28 = cir.binop(and, %26, %27) : !u32i + cir.store %28, %2 : !u32i, cir.ptr + %29 = cir.load %2 : cir.ptr , !u32i + %30 = cir.load %1 : cir.ptr , !u32i + %31 = cir.binop(xor, %29, %30) : !u32i + cir.store %31, %2 : !u32i, cir.ptr + %32 = cir.load %2 : cir.ptr , !u32i + %33 = cir.load %1 : cir.ptr , !u32i + %34 = cir.binop(or, %32, %33) : !u32i + cir.store %34, %2 : !u32i, cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir index 5a8816a1ef99..190d8a2256d4 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir @@ -1,31 +1,32 @@ // RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["a"] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["b"] {alignment = 4 : i64} + %0 = cir.alloca !s32i, cir.ptr , ["a"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b"] {alignment = 4 : i64} %2 = cir.alloca f32, cir.ptr , ["c"] {alignment = 4 : i64} %3 = cir.alloca f32, cir.ptr , ["d"] {alignment = 4 : i64} %4 = cir.alloca !cir.bool, cir.ptr , ["e"] {alignment = 1 : i64} - %5 = cir.load %0 : cir.ptr , i32 - %6 = cir.load %1 : cir.ptr , i32 - %7 = cir.cmp(gt, %5, %6) : i32, !cir.bool - %8 = cir.load %0 : cir.ptr , i32 - %9 = cir.load %1 : cir.ptr , i32 - %10 = cir.cmp(eq, %8, %9) : i32, !cir.bool - %11 = cir.load %0 : cir.ptr , i32 - %12 = cir.load %1 : cir.ptr , i32 - %13 = cir.cmp(lt, %11, %12) : i32, !cir.bool - %14 = cir.load %0 : cir.ptr , i32 - %15 = cir.load %1 : cir.ptr , i32 - %16 = cir.cmp(ge, %14, %15) : i32, !cir.bool - %17 = cir.load %0 : cir.ptr , i32 - %18 = cir.load %1 : cir.ptr , i32 - %19 = cir.cmp(ne, %17, %18) : i32, !cir.bool - %20 = cir.load %0 : cir.ptr , i32 - %21 = cir.load %1 : cir.ptr , i32 - %22 = cir.cmp(le, %20, %21) : i32, !cir.bool + %5 = cir.load %0 : cir.ptr , !s32i + %6 = cir.load %1 : cir.ptr , !s32i + %7 = cir.cmp(gt, %5, %6) : !s32i, !cir.bool + %8 = cir.load %0 : cir.ptr , !s32i + %9 = cir.load %1 : cir.ptr , !s32i + %10 = cir.cmp(eq, %8, %9) : !s32i, !cir.bool + %11 = cir.load %0 : cir.ptr , !s32i + %12 = cir.load %1 : cir.ptr , !s32i + %13 = cir.cmp(lt, %11, %12) : !s32i, !cir.bool + %14 = cir.load %0 : cir.ptr , !s32i + %15 = cir.load %1 : cir.ptr , !s32i + %16 = cir.cmp(ge, %14, %15) : !s32i, !cir.bool + %17 = cir.load %0 : cir.ptr , !s32i + %18 = cir.load %1 : cir.ptr , !s32i + %19 = cir.cmp(ne, %17, %18) : !s32i, !cir.bool + %20 = cir.load %0 : cir.ptr , !s32i + %21 = cir.load %1 : cir.ptr , !s32i + %22 = cir.cmp(le, %20, %21) : !s32i, !cir.bool %23 = cir.load %2 : cir.ptr , f32 %24 = cir.load %3 : cir.ptr , f32 %25 = cir.cmp(gt, %23, %24) : f32, !cir.bool diff --git a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir index 4f1b9cccb312..9cc9cc45b65f 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir @@ -1,23 +1,24 @@ // RUN: cir-opt %s -canonicalize -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-opt %s -canonicalize -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %1 = cir.const(1 : i32) : i32 - cir.store %1, %0 : i32, cir.ptr + %0 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr cir.br ^bb2 ^bb1: // no predecessors - %2 = cir.load %0 : cir.ptr , i32 - %3 = cir.const(1 : i32) : i32 - %4 = cir.binop(add, %2, %3) : i32 - cir.store %4, %0 : i32, cir.ptr + %2 = cir.load %0 : cir.ptr , !u32i + %3 = cir.const(#cir.int<1> : !u32i) : !u32i + %4 = cir.binop(add, %2, %3) : !u32i + cir.store %4, %0 : !u32i, cir.ptr cir.br ^bb2 ^bb2: // 2 preds: ^bb0, ^bb1 - %5 = cir.load %0 : cir.ptr , i32 - %6 = cir.const(2 : i32) : i32 - %7 = cir.binop(add, %5, %6) : i32 - cir.store %7, %0 : i32, cir.ptr + %5 = cir.load %0 : cir.ptr , !u32i + %6 = cir.const(#cir.int<2> : !u32i) : !u32i + %7 = cir.binop(add, %5, %6) : !u32i + cir.store %7, %0 : !u32i, cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir index e957d3ef16cd..ad338992806b 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir @@ -1,13 +1,14 @@ // RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int module { - cir.func @foo() -> i32 { - %0 = cir.alloca i32, cir.ptr , ["x", init] {alignment = 4 : i64} - %1 = cir.const(1 : i32) : i32 - cir.store %1, %0 : i32, cir.ptr - %2 = cir.load %0 : cir.ptr , i32 - cir.return %2 : i32 + cir.func @foo() -> !u32i { + %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !u32i + cir.return %2 : !u32i } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/scope.cir b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir index 310580683cd2..6d877351b7c6 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/scope.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir @@ -1,12 +1,13 @@ // RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!u32i = !cir.int module { cir.func @foo() { cir.scope { - %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.const(4 : i32) : i32 - cir.store %1, %0 : i32, cir.ptr + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<4> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr } cir.return } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir index 57541c194206..45368fb48f40 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir @@ -1,21 +1,22 @@ // RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.const(2 : i32) : i32 - cir.store %2, %0 : i32, cir.ptr - cir.store %2, %1 : i32, cir.ptr + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.store %2, %1 : !s32i, cir.ptr - %3 = cir.load %0 : cir.ptr , i32 - %4 = cir.unary(inc, %3) : i32, i32 - cir.store %4, %0 : i32, cir.ptr + %3 = cir.load %0 : cir.ptr , !s32i + %4 = cir.unary(inc, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, cir.ptr - %5 = cir.load %1 : cir.ptr , i32 - %6 = cir.unary(dec, %5) : i32, i32 - cir.store %6, %1 : i32, cir.ptr + %5 = cir.load %1 : cir.ptr , !s32i + %6 = cir.unary(dec, %5) : !s32i, !s32i + cir.store %6, %1 : !s32i, cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir index 09f16f4d342f..013bc65e95e3 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir @@ -1,21 +1,22 @@ // RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +!s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca i32, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca i32, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.const(2 : i32) : i32 - cir.store %2, %0 : i32, cir.ptr - cir.store %2, %1 : i32, cir.ptr + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.const(#cir.int<2> : !s32i) : !s32i + cir.store %2, %0 : !s32i, cir.ptr + cir.store %2, %1 : !s32i, cir.ptr - %3 = cir.load %0 : cir.ptr , i32 - %4 = cir.unary(plus, %3) : i32, i32 - cir.store %4, %0 : i32, cir.ptr + %3 = cir.load %0 : cir.ptr , !s32i + %4 = cir.unary(plus, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, cir.ptr - %5 = cir.load %1 : cir.ptr , i32 - %6 = cir.unary(minus, %5) : i32, i32 - cir.store %6, %1 : i32, cir.ptr + %5 = cir.load %1 : cir.ptr , !s32i + %6 = cir.unary(minus, %5) : !s32i, !s32i + cir.store %6, %1 : !s32i, cir.ptr cir.return } } From 266a6e2d7ab132db5d64d4d0ab7b346c3db82325 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 26 Sep 2023 09:10:21 +0300 Subject: [PATCH 1208/2301] [CIR][CIRGen] Revisiting CIR generation for bitfields. Fixes #13 (#268) This is an updated PR for [PR explained in [PR #261](https://github.com/llvm/clangir/pull/261) which now can be safely closed. First of all, let me introduce how do the bitfields looks like in CIR. For the struct `S` defined as following: ``` typedef struct { int a : 4; int b : 27; int c : 17; int d : 2; int e : 15; unsigned f; } S; ``` the CIR type is `!ty_22S22 = !cir.struct` where all the bitfields are packed in the first three storages. Also, the next bugs was fixed: - type mismatch - index out of bounds - single bitfield of size < 8 The cases covered with tests. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 64 +++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 232 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 138 ++++++----- clang/lib/CIR/CodeGen/CIRGenFunction.h | 16 +- clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 10 + clang/lib/CIR/CodeGen/CIRGenValue.h | 34 +++ .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 9 +- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/bitfields.c | 99 ++++++++ clang/test/CIR/CodeGen/bitfields.cpp | 81 ++++++ 10 files changed, 600 insertions(+), 84 deletions(-) create mode 100644 clang/test/CIR/CodeGen/bitfields.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 572b85be4c2c..ac54edaa3eaa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -460,6 +460,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return getConstInt( loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); } + mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, + const llvm::APInt &val) { + return create(loc, typ, + getAttr(typ, val)); + } mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { return create(loc, getBoolTy(), getCIRBoolAttr(state)); @@ -677,6 +682,65 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::UnaryOpKind::Not, value); } + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + const llvm::APInt &rhs) { + return create( + lhs.getLoc(), lhs.getType(), kind, lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); + } + + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + mlir::Value rhs) { + return create(lhs.getLoc(), lhs.getType(), kind, lhs, + rhs); + } + + mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, + bool isShiftLeft) { + return create( + lhs.getLoc(), lhs.getType(), lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft); + } + + mlir::Value createShift(mlir::Value lhs, unsigned bits, bool isShiftLeft) { + auto width = lhs.getType().dyn_cast().getWidth(); + auto shift = llvm::APInt(width, bits); + return createShift(lhs, shift, isShiftLeft); + } + + mlir::Value createShiftLeft(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, true); + } + + mlir::Value createShiftRight(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, false); + } + + mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, + unsigned bits) { + auto val = llvm::APInt::getLowBitsSet(size, bits); + auto typ = mlir::cir::IntType::get(getContext(), size, false); + return getConstAPInt(loc, typ, val); + } + + mlir::Value createAnd(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::And, val); + } + + mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::And, rhs); + } + + mlir::Value createOr(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::Or, val); + } + + mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 40f2b97ab919..329d5e813d28 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -21,6 +21,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/Basic/Builtins.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" @@ -128,6 +129,7 @@ static Address buildPointerWithAlignment(const Expr *E, if (PtrTy->getPointeeType()->isVoidType()) break; assert(!UnimplementedFeature::tbaa()); + LValueBaseInfo InnerBaseInfo; Address Addr = CGF.buildPointerWithAlignment( CE->getSubExpr(), &InnerBaseInfo, IsKnownNonNull); @@ -211,13 +213,79 @@ static Address buildPointerWithAlignment(const Expr *E, return Address(CGF.buildScalarExpr(E), Align); } +/// Helper method to check if the underlying ABI is AAPCS +static bool isAAPCS(const TargetInfo &TargetInfo) { + return TargetInfo.getABI().starts_with("aapcs"); +} + +Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base, + const FieldDecl *field, + unsigned index, + unsigned size) { + if (index == 0) + return base.getAddress(); + + auto loc = getLoc(field->getLocation()); + auto fieldType = builder.getUIntNTy(size); + + auto fieldPtr = + mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); + auto sea = getBuilder().createGetMember( + loc, fieldPtr, base.getPointer(), field->getName(), index); + + return Address(sea, CharUnits::One()); +} + +static bool useVolatileForBitField(const CIRGenModule &cgm, LValue base, + const CIRGenBitFieldInfo &info, + const FieldDecl *field) { + return isAAPCS(cgm.getTarget()) && cgm.getCodeGenOpts().AAPCSBitfieldWidth && + info.VolatileStorageSize != 0 && + field->getType() + .withCVRQualifiers(base.getVRQualifiers()) + .isVolatileQualified(); +} + +LValue CIRGenFunction::buildLValueForBitField(LValue base, + const FieldDecl *field) { + + LValueBaseInfo BaseInfo = base.getBaseInfo(); + const RecordDecl *rec = field->getParent(); + auto &layout = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); + auto &info = layout.getBitFieldInfo(field); + auto useVolatile = useVolatileForBitField(CGM, base, info, field); + unsigned Idx = layout.getCIRFieldNo(field); + + if (useVolatile || + (IsInPreservedAIRegion || + (getDebugInfo() && rec->hasAttr()))) { + llvm_unreachable("NYI"); + } + + const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize; + Address Addr = getAddrOfBitFieldStorage(base, field, Idx, SS); + + // Get the access type. + mlir::Type FieldIntTy = builder.getUIntNTy(SS); + + auto loc = getLoc(field->getLocation()); + if (Addr.getElementType() != FieldIntTy) + Addr = builder.createElementBitCast(loc, Addr, FieldIntTy); + + QualType fieldType = + field->getType().withCVRQualifiers(base.getVRQualifiers()); + + assert(!UnimplementedFeature::tbaa() && "NYI TBAA for bit fields"); + LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); + return LValue::MakeBitfield(Addr, info, fieldType, FieldBaseInfo); +} + LValue CIRGenFunction::buildLValueForField(LValue base, const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); - if (field->isBitField()) { - llvm_unreachable("NYI"); - } + if (field->isBitField()) + return buildLValueForBitField(base, field); // Fields of may-alias structures are may-alais themselves. // FIXME: this hould get propagated down through anonymous structs and unions. @@ -518,12 +586,55 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, /// method emits the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { - assert(LV.isSimple() && "not implemented"); assert(!LV.getType()->isFunctionType()); assert(!(LV.getType()->isConstantMatrixType()) && "not implemented"); - // Everything needs a load. - return RValue::get(buildLoadOfScalar(LV, Loc)); + if (LV.isBitField()) + return buildLoadOfBitfieldLValue(LV, Loc); + + if (LV.isSimple()) + return RValue::get(buildLoadOfScalar(LV, Loc)); + llvm_unreachable("NYI"); +} + +RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, + SourceLocation Loc) { + const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo(); + + // Get the output type. + mlir::Type ResLTy = convertType(LV.getType()); + Address Ptr = LV.getBitFieldAddress(); + mlir::Value Val = builder.createLoad(getLoc(Loc), Ptr); + auto ValWidth = Val.getType().cast().getWidth(); + + bool UseVolatile = LV.isVolatileQualified() && + Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); + const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; + const unsigned StorageSize = + UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; + + if (Info.IsSigned) { + assert(static_cast(Offset + Info.Size) <= StorageSize); + + mlir::Type typ = builder.getSIntNTy(ValWidth); + Val = builder.createIntCast(Val, typ); + + unsigned HighBits = StorageSize - Offset - Info.Size; + if (HighBits) + Val = builder.createShiftLeft(Val, HighBits); + if (Offset + HighBits) + Val = builder.createShiftRight(Val, Offset + HighBits); + } else { + if (Offset) + Val = builder.createShiftRight(Val, Offset); + + if (static_cast(Offset) + Info.Size < StorageSize) + Val = builder.createAnd(Val, + llvm::APInt::getLowBitsSet(ValWidth, Info.Size)); + } + Val = builder.createIntCast(Val, ResLTy); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); + return RValue::get(Val); } void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { @@ -546,6 +657,83 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { buildStoreOfScalar(Src.getScalarVal(), Dst); } +void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result) { + const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo(); + mlir::Type ResLTy = getTypes().convertTypeForMem(Dst.getType()); + Address Ptr = Dst.getBitFieldAddress(); + + // Get the source value, truncated to the width of the bit-field. + mlir::Value SrcVal = Src.getScalarVal(); + + // Cast the source to the storage type and shift it into place. + SrcVal = builder.createIntCast(SrcVal, Ptr.getElementType()); + auto SrcWidth = SrcVal.getType().cast().getWidth(); + mlir::Value MaskedVal = SrcVal; + + const bool UseVolatile = + CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && + Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); + const unsigned StorageSize = + UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; + const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; + // See if there are other bits in the bitfield's storage we'll need to load + // and mask together with source before storing. + if (StorageSize != Info.Size) { + assert(StorageSize > Info.Size && "Invalid bitfield size."); + + mlir::Value Val = buildLoadOfScalar(Dst, Dst.getPointer().getLoc()); + + // Mask the source value as needed. + if (!hasBooleanRepresentation(Dst.getType())) + SrcVal = builder.createAnd( + SrcVal, llvm::APInt::getLowBitsSet(SrcWidth, Info.Size)); + + MaskedVal = SrcVal; + if (Offset) + SrcVal = builder.createShiftLeft(SrcVal, Offset); + + // Mask out the original value. + Val = builder.createAnd( + Val, ~llvm::APInt::getBitsSet(SrcWidth, Offset, Offset + Info.Size)); + + // Or together the unchanged values and the source value. + SrcVal = builder.createOr(Val, SrcVal); + + } else { + // According to the AACPS: + // When a volatile bit-field is written, and its container does not overlap + // with any non-bit-field member, its container must be read exactly once + // and written exactly once using the access width appropriate to the type + // of the container. The two accesses are not atomic. + if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && + CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) + llvm_unreachable("volatile bit-field is not implemented for the AACPS"); + } + + // Write the new value back out. + // TODO: constant matrix type, volatile, no init, non temporal, TBAA + buildStoreOfScalar(SrcVal, Ptr, Dst.isVolatileQualified(), Dst.getType(), + Dst.getBaseInfo(), false, false); + + // Return the new value of the bit-field. + mlir::Value ResultVal = MaskedVal; + ResultVal = builder.createIntCast(ResultVal, ResLTy); + + // Sign extend the value if needed. + if (Info.IsSigned) { + assert(Info.Size <= StorageSize); + unsigned HighBits = StorageSize - Info.Size; + + if (HighBits) { + ResultVal = builder.createShiftLeft(ResultVal, HighBits); + ResultVal = builder.createShiftRight(ResultVal, HighBits); + } + } + + Result = buildFromMemory(ResultVal, Dst.getType()); +} + static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, const VarDecl *VD) { QualType T = E->getType(); @@ -769,7 +957,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { LValue LV = buildLValue(E->getLHS()); SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; - buildStoreThroughLValue(RV, LV); + if (LV.isBitField()) { + mlir::Value result; + buildStoreThroughBitfieldLValue(RV, LV, result); + } else { + buildStoreThroughLValue(RV, LV); + } + assert(!getContext().getLangOpts().OpenMP && "last priv cond not implemented"); return LV; @@ -2205,6 +2399,13 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, SourceLocation Loc) { + return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), getLoc(Loc), lvalue.getBaseInfo(), + lvalue.isNontemporal()); +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, + mlir::Location Loc) { return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), lvalue.getType(), Loc, lvalue.getBaseInfo(), lvalue.isNontemporal()); @@ -2222,6 +2423,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal) { + return buildLoadOfScalar(Addr, Volatile, Ty, getLoc(Loc), BaseInfo, + isNontemporal); +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, + QualType Ty, mlir::Location Loc, + LValueBaseInfo BaseInfo, + bool isNontemporal) { // TODO(CIR): this has fallen out of sync with codegen // Atomic operations have to be done on integral types @@ -2231,15 +2440,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, } mlir::cir::LoadOp Load = builder.create( - getLoc(Loc), Addr.getElementType(), Addr.getPointer()); + Loc, Addr.getElementType(), Addr.getPointer()); if (isNontemporal) { llvm_unreachable("NYI"); } - - // TODO: TBAA - - // TODO: buildScalarRangeCheck + + assert(!UnimplementedFeature::tbaa() && "NYI"); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); return buildFromMemory(Load, Ty); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 5a48e44f61eb..f4a76958bcf2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1060,9 +1060,7 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, std::swap(pointerOperand, indexOperand); } - bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); - - auto &DL = CGF.CGM.getDataLayout(); + bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); // Some versions of glibc and gcc use idioms (particularly in their malloc // routines) that add a pointer-sized integer (known to be a pointer value) @@ -1863,7 +1861,7 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHS.isBitField()) { - llvm_unreachable("NYI"); + CGF.buildStoreThroughBitfieldLValue(RValue::get(RHS), LHS, RHS); } else { CGF.buildNullabilityCheck(LHS, RHS, E->getExprLoc()); CIRGenFunction::SourceLocRAIIObject loc{CGF, @@ -1964,25 +1962,27 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( auto condV = CGF.evaluateExprAsBool(condExpr); assert(!UnimplementedFeature::incrementProfileCounter()); - return builder.create( - loc, condV, /*thenBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto lhs = Visit(lhsExpr); - if (!lhs) { - lhs = builder.getNullValue(CGF.VoidTy, loc); - lhsIsVoid = true; - } - builder.create(loc, lhs); - }, - /*elseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto rhs = Visit(rhsExpr); - if (lhsIsVoid) { - assert(!rhs && "lhs and rhs types must match"); - rhs = builder.getNullValue(CGF.VoidTy, loc); - } - builder.create(loc, rhs); - }).getResult(); + return builder + .create( + loc, condV, /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto lhs = Visit(lhsExpr); + if (!lhs) { + lhs = builder.getNullValue(CGF.VoidTy, loc); + lhsIsVoid = true; + } + builder.create(loc, lhs); + }, + /*elseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto rhs = Visit(rhsExpr); + if (lhsIsVoid) { + assert(!rhs && "lhs and rhs types must match"); + rhs = builder.getNullValue(CGF.VoidTy, loc); + } + builder.create(loc, rhs); + }) + .getResult(); } mlir::Value condV = CGF.buildOpOnBoolExpr(condExpr, loc, lhsExpr, rhsExpr); @@ -2012,51 +2012,53 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( } }; - return builder.create( - loc, condV, /*trueBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - auto lhs = Visit(lhsExpr); - eval.end(CGF); - - if (lhs) { - yieldTy = lhs.getType(); - b.create(loc, lhs); - return; - } - // If LHS or RHS is a throw or void expression we need to patch arms - // as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - }, - /*falseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - auto rhs = Visit(rhsExpr); - eval.end(CGF); - - if (rhs) { - yieldTy = rhs.getType(); - b.create(loc, rhs); - } else { - // If LHS or RHS is a throw or void expression we need to patch arms - // as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - } - - patchVoidOrThrowSites(); - }).getResult(); + return builder + .create( + loc, condV, /*trueBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScopeContext lexScope{loc, + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto lhs = Visit(lhsExpr); + eval.end(CGF); + + if (lhs) { + yieldTy = lhs.getType(); + b.create(loc, lhs); + return; + } + // If LHS or RHS is a throw or void expression we need to patch arms + // as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + }, + /*falseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScopeContext lexScope{loc, + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto rhs = Visit(rhsExpr); + eval.end(CGF); + + if (rhs) { + yieldTy = rhs.getType(); + b.create(loc, rhs); + } else { + // If LHS or RHS is a throw or void expression we need to patch + // arms as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + } + + patchVoidOrThrowSites(); + }) + .getResult(); } mlir::Value CIRGenFunction::buildScalarPrePostIncDec(const UnaryOperator *E, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 819a99f81ec7..1890361eaeb1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -869,6 +869,12 @@ class CIRGenFunction : public CIRGenTypeCache { clang::SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal = false); + mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, + mlir::Location Loc, LValueBaseInfo BaseInfo, + bool isNontemporal = false); + + RValue buildLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); + /// Load a scalar value from an address, taking care to appropriately convert /// from the memory representation to CIR value representation. mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, @@ -883,6 +889,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// form the memory representation to the CIR value representation. The /// l-value must be a simple l-value. mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); + mlir::Value buildLoadOfScalar(LValue lvalue, mlir::Location Loc); Address buildLoadOfReference(LValue RefLVal, mlir::Location Loc, LValueBaseInfo *PointeeBaseInfo = nullptr); @@ -1237,6 +1244,9 @@ class CIRGenFunction : public CIRGenTypeCache { /// is 'Ty'. void buildStoreThroughLValue(RValue Src, LValue Dst); + void buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result); + mlir::cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is @@ -1514,7 +1524,8 @@ class CIRGenFunction : public CIRGenTypeCache { AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); - + LValue buildLValueForBitField(LValue base, const FieldDecl *field); + /// Like buildLValueForField, excpet that if the Field is a reference, this /// will return the address of the reference and not the address of the value /// stored in the reference. @@ -1543,6 +1554,9 @@ class CIRGenFunction : public CIRGenTypeCache { return it->second; } + Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, + unsigned index, unsigned size); + /// Given an opaque value expression, return its LValue mapping if it exists, /// otherwise create one. LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e); diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index b1ded0017d59..0a686181db61 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -187,6 +187,16 @@ class CIRGenRecordLayout { /// Check whether this struct can be C++ zero-initialized with a /// zeroinitializer. bool isZeroInitializable() const { return IsZeroInitializable; } + + /// Return the BitFieldInfo that corresponds to the field FD. + const CIRGenBitFieldInfo &getBitFieldInfo(const clang::FieldDecl *FD) const { + FD = FD->getCanonicalDecl(); + assert(FD->isBitField() && "Invalid call for non-bit-field decl!"); + llvm::DenseMap::const_iterator + it = BitFields.find(FD); + assert(it != BitFields.end() && "Unable to find bitfield info"); + return it->second; + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index f84c20c4b136..c6edeb4d4fe4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -15,6 +15,7 @@ #define LLVM_CLANG_LIB_CIR_CIRGENVALUE_H #include "Address.h" +#include "CIRGenRecordLayout.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" @@ -207,6 +208,7 @@ class LValue { mlir::Value V; mlir::Type ElementType; LValueBaseInfo BaseInfo; + const CIRGenBitFieldInfo *BitFieldInfo{0}; public: bool isSimple() const { return LVType == Simple; } @@ -298,6 +300,38 @@ class LValue { const clang::Qualifiers &getQuals() const { return Quals; } clang::Qualifiers &getQuals() { return Quals; } + + // bitfield lvalue + Address getBitFieldAddress() const { + return Address(getBitFieldPointer(), ElementType, getAlignment()); + } + + mlir::Value getBitFieldPointer() const { + assert(isBitField()); + return V; + } + + const CIRGenBitFieldInfo &getBitFieldInfo() const { + assert(isBitField()); + return *BitFieldInfo; + } + + /// Create a new object to represent a bit-field access. + /// + /// \param Addr - The base address of the bit-field sequence this + /// bit-field refers to. + /// \param Info - The information describing how to perform the bit-field + /// access. + static LValue MakeBitfield(Address Addr, const CIRGenBitFieldInfo &Info, + clang::QualType type, LValueBaseInfo BaseInfo) { + LValue R; + R.LVType = BitField; + R.V = Addr.getPointer(); + R.ElementType = Addr.getElementType(); + R.BitFieldInfo = &Info; + R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo); + return R; + } }; /// An aggregate value slot. diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 3910315e3b04..12d2d06bb70e 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -136,7 +136,8 @@ struct CIRRecordLowering final { /// Wraps mlir::cir::IntType with some implicit arguments. mlir::Type getUIntNType(uint64_t NumBits) { - unsigned AlignedBits = llvm::alignTo(NumBits, astContext.getCharWidth()); + unsigned AlignedBits = llvm::PowerOf2Ceil(NumBits); + AlignedBits = std::max(8u, AlignedBits); return mlir::cir::IntType::get(&cirGenTypes.getMLIRContext(), AlignedBits, /*isSigned=*/false); } @@ -214,8 +215,8 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, cxxRecordDecl{llvm::dyn_cast(recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, dataLayout{cirGenTypes.getModule().getModule()}, - IsZeroInitializable(true), IsZeroInitializableAsBase(true), - isPacked{isPacked} {} + IsZeroInitializable(true), + IsZeroInitializableAsBase(true), isPacked{isPacked} {} void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, @@ -499,6 +500,8 @@ void CIRRecordLowering::accumulateBitFields( // with lower cost. auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord, uint64_t StartBitOffset) { + if (OffsetInRecord >= 64) // See IntType::verify + return true; if (!cirGenTypes.getModule().getCodeGenOpts().FineGrainedBitfieldAccesses) return false; llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index d39bb3c1b48d..5a857a2db39f 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -138,6 +138,7 @@ struct UnimplementedFeature { static bool exceptions() { return false; } static bool metaDataNode() { return false; } static bool isSEHTryScope() { return false; } + static bool emitScalarRangeCheck() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c new file mode 100644 index 000000000000..919ca317952c --- /dev/null +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -0,0 +1,99 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct __long { + struct __attribute__((__packed__)) { + unsigned __is_long_ : 1; + unsigned __cap_ : sizeof(unsigned) * 8 - 1; + }; + unsigned __size_; + unsigned *__data_; +}; + +void m() { + struct __long l; +} + +typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; + unsigned f; // type other than int above, not a bitfield +} S; + +typedef struct { + int a : 3; // one bitfield with size < 8 + unsigned b; +} T; +// CHECK: !ty_22S22 = !cir.struct +// CHECK: !ty_22T22 = !cir.struct +// CHECK: !ty_22anon22 = !cir.struct +// CHECK: !ty_22__long22 = !cir.struct}> + +// CHECK: cir.func {{.*@store_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i +// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i +// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i +// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i +// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr +void store_field() { + S s; + s.a = 3; +} + +// CHECK: cir.func {{.*@store_neg_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i +// CHECK: [[TMP5:%.*]] = cir.load [[TMP3]] : cir.ptr , !u32i +// CHECK: [[TMP6:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i +// CHECK: [[TMP7:%.*]] = cir.binop(and, [[TMP4]], [[TMP6]]) : !u32i +// CHECK: [[TMP8:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i +// CHECK: [[TMP9:%.*]] = cir.shift(left, [[TMP7]] : !u32i, [[TMP8]] : !u32i) -> !u32i +// CHECK: [[TMP10:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i +// CHECK: [[TMP11:%.*]] = cir.binop(and, [[TMP5]], [[TMP10]]) : !u32i +// CHECK: [[TMP12:%.*]] = cir.binop(or, [[TMP11]], [[TMP9]]) : !u32i +// CHECK: cir.store [[TMP12]], [[TMP3]] : !u32i, cir.ptr +void store_neg_field() { + S s; + s.d = -1; +} + +// CHECK: cir.func {{.*@load_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : cir.ptr , !u32i +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP4]] : !u32i), !s32i +// CHECK: [[TMP6:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i +// CHECK: [[TMP7:%.*]] = cir.shift(left, [[TMP5]] : !s32i, [[TMP6]] : !s32i) -> !s32i +// CHECK: [[TMP8:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i +// CHECK: [[TMP9:%.*]] = cir.shift( right, [[TMP7]] : !s32i, [[TMP8]] : !s32i) -> !s32i +// CHECK: [[TMP10:%.*]] = cir.cast(integral, [[TMP9]] : !s32i), !s32i +// CHECK: cir.store [[TMP10]], [[TMP1]] : !s32i, cir.ptr +// CHECK: [[TMP11:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +int load_field(S* s) { + return s->d; +} + +// CHECK: cir.func {{.*@load_non_bitfield}} +// CHECK: cir.get_member {{%.}}[3] {name = "f"} : !cir.ptr -> !cir.ptr +unsigned load_non_bitfield(S *s) { + return s->f; +} + +// just create a usage of T type +// CHECK: cir.func {{.*@load_one_bitfield}} +int load_one_bitfield(T* t) { + return t->a; +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 8b9deed0f512..3ab1652c8fd3 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -14,5 +14,86 @@ void m() { __long l; } +typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; + unsigned f; // type other than int above, not a bitfield +} S; + +typedef struct { + int a : 3; // one bitfield with size < 8 + unsigned b; +} T; +// CHECK: !ty_22S22 = !cir.struct +// CHECK: !ty_22T22 = !cir.struct // CHECK: !ty_22anon22 = !cir.struct // CHECK: !ty_22__long22 = !cir.struct}> + +// CHECK: cir.func @_Z11store_field +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i +// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i +// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i +// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i +// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr +void store_field() { + S s; + s.a = 3; +} + +// CHECK: cir.func @_Z15store_neg_field +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i +// CHECK: [[TMP5:%.*]] = cir.load [[TMP3]] : cir.ptr , !u32i +// CHECK: [[TMP6:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i +// CHECK: [[TMP7:%.*]] = cir.binop(and, [[TMP4]], [[TMP6]]) : !u32i +// CHECK: [[TMP8:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i +// CHECK: [[TMP9:%.*]] = cir.shift(left, [[TMP7]] : !u32i, [[TMP8]] : !u32i) -> !u32i +// CHECK: [[TMP10:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i +// CHECK: [[TMP11:%.*]] = cir.binop(and, [[TMP5]], [[TMP10]]) : !u32i +// CHECK: [[TMP12:%.*]] = cir.binop(or, [[TMP11]], [[TMP9]]) : !u32i +// CHECK: cir.store [[TMP12]], [[TMP3]] : !u32i, cir.ptr +void store_neg_field() { + S s; + s.d = -1; +} + +// CHECK: cir.func @_Z10load_field +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : cir.ptr , !u32i +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP4]] : !u32i), !s32i +// CHECK: [[TMP6:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i +// CHECK: [[TMP7:%.*]] = cir.shift(left, [[TMP5]] : !s32i, [[TMP6]] : !s32i) -> !s32i +// CHECK: [[TMP8:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i +// CHECK: [[TMP9:%.*]] = cir.shift( right, [[TMP7]] : !s32i, [[TMP8]] : !s32i) -> !s32i +// CHECK: [[TMP10:%.*]] = cir.cast(integral, [[TMP9]] : !s32i), !s32i +// CHECK: cir.store [[TMP10]], [[TMP1]] : !s32i, cir.ptr +// CHECK: [[TMP11:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +int load_field(S& s) { + return s.d; +} + +// CHECK: cir.func @_Z17load_non_bitfield +// CHECK: cir.get_member {{%.}}[3] {name = "f"} : !cir.ptr -> !cir.ptr +unsigned load_non_bitfield(S& s) { + return s.f; +} + +// just create a usage of T type +// CHECK: cir.func @_Z17load_one_bitfield +int load_one_bitfield(T& t) { + return t.a; +} \ No newline at end of file From b55877c40bab057edb38bb823b02f76d2a3c9648 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 26 Sep 2023 09:17:37 +0300 Subject: [PATCH 1209/2301] [CIR][Codegen][Bugfix] use record layout to generate index for a field (#270) This is a minor fix similar to the one introduced in #263. Basically, all calls to the `buildLValueForFieldInitialization` are even with the origin codegen `emitLValueForFieldInitialization` calls, i.e. the field index is calculated from the record layout, but not from the decl `field->getFieldIndex()`. Added just one test, because looks like we need to implement some `NYI` features first to test another places e.g. in `CIRGenExprAgg.cpp`, though I could miss something. Anyway, given the original codegen doesn't use `getFieldIndex` in these places, we also should not. All the remaining usages of `getFieldIndex` are ok. --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 3 +-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 6 ++++-- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 6 ++---- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 +-- clang/test/CIR/CodeGen/derived-to-base.cpp | 13 +++++++++++++ 5 files changed, 21 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 9a550a61f794..b031e93e53c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -207,8 +207,7 @@ static void buildLValueForAnyFieldInitialization(CIRGenFunction &CGF, if (MemberInit->isIndirectMemberInitializer()) { llvm_unreachable("NYI"); } else { - LHS = CGF.buildLValueForFieldInitialization(LHS, Field, Field->getName(), - Field->getFieldIndex()); + LHS = CGF.buildLValueForFieldInitialization(LHS, Field, Field->getName()); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 329d5e813d28..9aa4a847ccb4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -388,13 +388,15 @@ LValue CIRGenFunction::buildLValueForField(LValue base, } LValue CIRGenFunction::buildLValueForFieldInitialization( - LValue Base, const clang::FieldDecl *Field, llvm::StringRef FieldName, - unsigned FieldIndex) { + LValue Base, const clang::FieldDecl *Field, llvm::StringRef FieldName) { QualType FieldType = Field->getType(); if (!FieldType->isReferenceType()) return buildLValueForField(Base, Field); + auto& layout = CGM.getTypes().getCIRGenRecordLayout(Field->getParent()); + unsigned FieldIndex = layout.getCIRFieldNo(Field); + Address V = buildAddrOfFieldStorage(*this, Base.getAddress(), Field, FieldName, FieldIndex); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 6b31001144cc..a67eb88b9976 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -9,7 +9,6 @@ // This contains code to emit Aggregate Expr nodes as CIR code. // //===----------------------------------------------------------------------===// - #include "CIRGenCall.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" @@ -554,7 +553,7 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { // Emit initialization LValue LV = CGF.buildLValueForFieldInitialization( - SlotLV, *CurField, fieldName, CurField->getFieldIndex()); + SlotLV, *CurField, fieldName); if (CurField->hasCapturedVLAType()) { llvm_unreachable("NYI"); } @@ -819,9 +818,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( if (curInitIndex == NumInitElements && Dest.isZeroed() && CGF.getTypes().isZeroInitializable(ExprToVisit->getType())) break; - LValue LV = CGF.buildLValueForFieldInitialization( - DestLV, field, field->getName(), field->getFieldIndex()); + DestLV, field, field->getName()); // We never generate write-barries for initialized fields. assert(!UnimplementedFeature::setNonGC()); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 1890361eaeb1..3428c7f254df 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1531,8 +1531,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// stored in the reference. LValue buildLValueForFieldInitialization(LValue Base, const clang::FieldDecl *Field, - llvm::StringRef FieldName, - unsigned FieldIndex); + llvm::StringRef FieldName); void buildInitializerForField(clang::FieldDecl *Field, LValue LHS, clang::Expr *Init); diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 03846ca45688..7eeb41e17555 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -156,3 +156,16 @@ void t() { B b; b.foo(); } + +struct C : public A { + int& ref; + C(int& x) : ref(x) {} +}; + +// CHECK: cir.func @_Z8test_refv() +// CHECK: cir.get_member %2[1] {name = "ref"} +int test_ref() { + int x = 42; + C c(x); + return c.ref; +} \ No newline at end of file From 99b7223e9c6ab9943a708fced39bd408da24e431 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Tue, 26 Sep 2023 03:19:09 -0300 Subject: [PATCH 1210/2301] [CIR][Lowering] Deprecate typed LLVM dialect pointers (#271) Updates the lowering pass to use only opaque pointers. This essentially involves updating the type converter to drop pointee types and explicitly defining the types loaded/stored/GEPed by LLVM operations. The reasons for this are twofold: - LLVM dialect is currently transitioning to deprecate typed pointers, allowing only opaque pointers. The sooner we transition the fewer changes we will have to make. - Opaque pointers greatly simplify lowering self-references, since all self-references in records are wrapped in a pointer. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 34 +++++++++---------- clang/test/CIR/Lowering/dot.cir | 28 +++++++-------- clang/test/CIR/Lowering/globals.cir | 15 +++----- clang/test/CIR/Lowering/hello.cir | 2 +- clang/test/CIR/Lowering/loadstorealloca.cir | 10 ++---- clang/test/CIR/Lowering/loop.cir | 12 +++---- clang/test/CIR/Lowering/scope.cir | 3 +- clang/test/CIR/Lowering/struct.cir | 1 + clang/test/CIR/Lowering/tenary.cir | 7 ++-- clang/test/CIR/Lowering/unary-plus-minus.cir | 12 +++---- clang/test/CIR/Lowering/unions.cir | 6 ++-- 11 files changed, 58 insertions(+), 72 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ef51197088fb..d598f6381978 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -197,7 +197,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, auto loc = parentOp->getLoc(); auto addressOfOp = rewriter.create( - loc, mlir::LLVM::LLVMPointerType::get(sourceSymbol.getContext()), + loc, mlir::LLVM::LLVMPointerType::get(parentOp->getContext()), sourceSymbol.getSymName()); assert(!globalAttr.getIndices() && "TODO"); @@ -313,7 +313,6 @@ class CIRPtrStrideOpLowering rewriter.replaceOpWithNewOp(ptrStrideOp, resultTy, elementTy, adaptor.getBase(), adaptor.getStride()); - return mlir::success(); } }; @@ -485,12 +484,12 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { matchAndRewrite(mlir::cir::CastOp castOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto src = adaptor.getSrc(); + switch (castOp.getKind()) { case mlir::cir::CastKind::array_to_ptrdecay: { const auto ptrTy = castOp.getType().cast(); auto sourceValue = adaptor.getOperands().front(); - auto targetType = - getTypeConverter()->convertType(castOp->getResult(0).getType()); + auto targetType = convertTy(ptrTy); auto elementTy = convertTy(ptrTy.getPointee()); auto offset = llvm::SmallVector{0}; rewriter.replaceOpWithNewOp( @@ -831,17 +830,14 @@ class CIRAllocaLowering mlir::LogicalResult matchAndRewrite(mlir::cir::AllocaOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto elementTy = getTypeConverter()->convertType(op.getAllocaType()); - mlir::Value one = rewriter.create( op.getLoc(), typeConverter->convertType(rewriter.getIndexType()), rewriter.getIntegerAttr(rewriter.getIndexType(), 1)); - + auto elementTy = getTypeConverter()->convertType(op.getAllocaType()); auto resultTy = mlir::LLVM::LLVMPointerType::get(getContext()); - rewriter.replaceOpWithNewOp( op, resultTy, elementTy, one, op.getAlignmentAttr().getInt()); - return mlir::LogicalResult::success(); + return mlir::success(); } }; @@ -1028,9 +1024,9 @@ class CIRVAStartLowering mlir::LogicalResult matchAndRewrite(mlir::cir::VAStartOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto i8PtrTy = mlir::LLVM::LLVMPointerType::get(getContext()); + auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); auto vaList = rewriter.create( - op.getLoc(), i8PtrTy, adaptor.getOperands().front()); + op.getLoc(), opaquePtr, adaptor.getOperands().front()); rewriter.replaceOpWithNewOp(op, vaList); return mlir::success(); } @@ -1043,9 +1039,9 @@ class CIRVAEndLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::VAEndOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto i8PtrTy = mlir::LLVM::LLVMPointerType::get(getContext()); + auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); auto vaList = rewriter.create( - op.getLoc(), i8PtrTy, adaptor.getOperands().front()); + op.getLoc(), opaquePtr, adaptor.getOperands().front()); rewriter.replaceOpWithNewOp(op, vaList); return mlir::success(); } @@ -1059,11 +1055,11 @@ class CIRVACopyLowering mlir::LogicalResult matchAndRewrite(mlir::cir::VACopyOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto i8PtrTy = mlir::LLVM::LLVMPointerType::get(getContext()); + auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); auto dstList = rewriter.create( - op.getLoc(), i8PtrTy, adaptor.getOperands().front()); + op.getLoc(), opaquePtr, adaptor.getOperands().front()); auto srcList = rewriter.create( - op.getLoc(), i8PtrTy, adaptor.getOperands().back()); + op.getLoc(), opaquePtr, adaptor.getOperands().back()); rewriter.replaceOpWithNewOp(op, dstList, srcList); return mlir::success(); } @@ -1777,7 +1773,8 @@ class CIRGetMemberOpLowering // Since the base address is a pointer to an aggregate, the first offset // is always zero. The second offset tell us which member it will access. llvm::SmallVector offset{0, op.getIndex()}; - const auto elementTy = getTypeConverter()->convertType(structTy); + const auto elementTy = + getTypeConverter()->convertType(structTy.getMembers()[op.getIndex()]); rewriter.replaceOpWithNewOp(op, llResTy, elementTy, adaptor.getAddr(), offset); return mlir::success(); @@ -1869,7 +1866,8 @@ namespace { void prepareTypeConverter(mlir::LLVMTypeConverter &converter, mlir::DataLayout &dataLayout) { converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { - return mlir::LLVM::LLVMPointerType::get(&converter.getContext()); + // Drop pointee type since LLVM dialect only allows opaque pointers. + return mlir::LLVM::LLVMPointerType::get(type.getContext()); }); converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { auto ty = converter.convertType(type.getEltType()); diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 8b3b553492b1..2236a0d2784d 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -68,8 +68,8 @@ module { // MLIR-NEXT: %7 = llvm.alloca %6 x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %8 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %9 = llvm.alloca %8 x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: llvm.store %arg0, %1 : !llvm.ptr -// MLIR-NEXT: llvm.store %arg1, %3 : !llvm.ptr +// MLIR-NEXT: llvm.store %arg0, %1 : !llvm.ptr, !llvm.ptr +// MLIR-NEXT: llvm.store %arg1, %3 : !llvm.ptr, !llvm.ptr // MLIR-NEXT: llvm.store %arg2, %5 : i32, !llvm.ptr // MLIR-NEXT: %10 = llvm.mlir.constant(0.000000e+00 : f64) : f64 // MLIR-NEXT: llvm.store %10, %9 : f64, !llvm.ptr @@ -81,8 +81,8 @@ module { // MLIR-NEXT: llvm.store %13, %12 : i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 // MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb6 -// MLIR-NEXT: %14 = llvm.load %12 : !llvm.ptr -// MLIR-NEXT: %15 = llvm.load %5 : !llvm.ptr +// MLIR-NEXT: %14 = llvm.load %12 : !llvm.ptr -> i32 +// MLIR-NEXT: %15 = llvm.load %5 : !llvm.ptr -> i32 // MLIR-NEXT: %16 = llvm.icmp "slt" %14, %15 : i32 // MLIR-NEXT: %17 = llvm.zext %16 : i1 to i32 // MLIR-NEXT: %18 = llvm.mlir.constant(0 : i32) : i32 @@ -95,21 +95,21 @@ module { // MLIR-NEXT: ^bb4: // pred: ^bb2 // MLIR-NEXT: llvm.br ^bb7 // MLIR-NEXT: ^bb5: // pred: ^bb3 -// MLIR-NEXT: %22 = llvm.load %1 : !llvm.ptr -// MLIR-NEXT: %23 = llvm.load %12 : !llvm.ptr +// MLIR-NEXT: %22 = llvm.load %1 : !llvm.ptr -> !llvm.ptr +// MLIR-NEXT: %23 = llvm.load %12 : !llvm.ptr -> i32 // MLIR-NEXT: %24 = llvm.getelementptr %22[%23] : (!llvm.ptr, i32) -> !llvm.ptr -// MLIR-NEXT: %25 = llvm.load %24 : !llvm.ptr -// MLIR-NEXT: %26 = llvm.load %3 : !llvm.ptr -// MLIR-NEXT: %27 = llvm.load %12 : !llvm.ptr +// MLIR-NEXT: %25 = llvm.load %24 : !llvm.ptr -> f64 +// MLIR-NEXT: %26 = llvm.load %3 : !llvm.ptr -> !llvm.ptr +// MLIR-NEXT: %27 = llvm.load %12 : !llvm.ptr -> i32 // MLIR-NEXT: %28 = llvm.getelementptr %26[%27] : (!llvm.ptr, i32) -> !llvm.ptr -// MLIR-NEXT: %29 = llvm.load %28 : !llvm.ptr +// MLIR-NEXT: %29 = llvm.load %28 : !llvm.ptr -> f64 // MLIR-NEXT: %30 = llvm.fmul %25, %29 : f64 -// MLIR-NEXT: %31 = llvm.load %9 : !llvm.ptr +// MLIR-NEXT: %31 = llvm.load %9 : !llvm.ptr -> f64 // MLIR-NEXT: %32 = llvm.fadd %31, %30 : f64 // MLIR-NEXT: llvm.store %32, %9 : f64, !llvm.ptr // MLIR-NEXT: llvm.br ^bb6 // MLIR-NEXT: ^bb6: // pred: ^bb5 -// MLIR-NEXT: %33 = llvm.load %12 : !llvm.ptr +// MLIR-NEXT: %33 = llvm.load %12 : !llvm.ptr -> i32 // MLIR-NEXT: %34 = llvm.mlir.constant(1 : i32) : i32 // MLIR-NEXT: %35 = llvm.add %33, %34 : i32 // MLIR-NEXT: llvm.store %35, %12 : i32, !llvm.ptr @@ -117,9 +117,9 @@ module { // MLIR-NEXT: ^bb7: // pred: ^bb4 // MLIR-NEXT: llvm.br ^bb8 // MLIR-NEXT: ^bb8: // pred: ^bb7 -// MLIR-NEXT: %36 = llvm.load %9 : !llvm.ptr +// MLIR-NEXT: %36 = llvm.load %9 : !llvm.ptr -> f64 // MLIR-NEXT: llvm.store %36, %7 : f64, !llvm.ptr -// MLIR-NEXT: %37 = llvm.load %7 : !llvm.ptr +// MLIR-NEXT: %37 = llvm.load %7 : !llvm.ptr -> f64 // MLIR-NEXT: llvm.return %37 : f64 // MLIR-NEXT: } // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index e8640db81c7a..73b690f945b5 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -75,28 +75,26 @@ module { } cir.global external @string = #cir.const_array<[#cir.int<119> : !s8i, #cir.int<104> : !s8i, #cir.int<97> : !s8i, #cir.int<116> : !s8i, #cir.int<110> : !s8i, #cir.int<111> : !s8i, #cir.int<119> : !s8i, #cir.int<0> : !s8i]> : !cir.array // MLIR: llvm.mlir.global external @string(dense<[119, 104, 97, 116, 110, 111, 119, 0]> : tensor<8xi8>) {addr_space = 0 : i32} : !llvm.array<8 x i8> - // LLVM: @string = global [8 x i8] c"whatnow\00" cir.global external @uint = #cir.const_array<[#cir.int<255> : !u32i]> : !cir.array // MLIR: llvm.mlir.global external @uint(dense<255> : tensor<1xi32>) {addr_space = 0 : i32} : !llvm.array<1 x i32> - // LLVM: @uint = global [1 x i32] [i32 255] cir.global external @sshort = #cir.const_array<[#cir.int<11111> : !s16i, #cir.int<22222> : !s16i]> : !cir.array // MLIR: llvm.mlir.global external @sshort(dense<[11111, 22222]> : tensor<2xi16>) {addr_space = 0 : i32} : !llvm.array<2 x i16> - // LLVM: @sshort = global [2 x i16] [i16 11111, i16 22222] cir.global external @sint = #cir.const_array<[#cir.int<123> : !s32i, #cir.int<456> : !s32i, #cir.int<789> : !s32i]> : !cir.array // MLIR: llvm.mlir.global external @sint(dense<[123, 456, 789]> : tensor<3xi32>) {addr_space = 0 : i32} : !llvm.array<3 x i32> - // LLVM: @sint = global [3 x i32] [i32 123, i32 456, i32 789] cir.global external @ll = #cir.const_array<[#cir.int<999999999> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i, #cir.int<0> : !s64i]> : !cir.array // MLIR: llvm.mlir.global external @ll(dense<[999999999, 0, 0, 0]> : tensor<4xi64>) {addr_space = 0 : i32} : !llvm.array<4 x i64> - // LLVM: @ll = global [4 x i64] [i64 999999999, i64 0, i64 0, i64 0] cir.global external @twoDim = #cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.array]> : !cir.array x 2> // MLIR: llvm.mlir.global external @twoDim(dense<{{\[\[}}1, 2], [3, 4{{\]\]}}> : tensor<2x2xi32>) {addr_space = 0 : i32} : !llvm.array<2 x array<2 x i32>> - // LLVM: @twoDim = global [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 1, i32 2], [2 x i32] [i32 3, i32 4{{\]\]}} + + // The following tests check direclty the resulting LLVM IR because the MLIR + // version is two long. Always prefer the MLIR prefix when possible. cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22A22 // LLVM: @nestedTwoDim = global %struct.A { i32 1, [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 2, i32 3], [2 x i32] [i32 4, i32 5{{\]\]}} } cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array}> : !ty_22StringStruct22 // LLVM: @nestedString = global %struct.StringStruct { [3 x i8] c"1\00\00", [3 x i8] zeroinitializer, [3 x i8] zeroinitializer } cir.global external @nestedStringPtr = #cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr}> : !ty_22StringStructPtr22 // LLVM: @nestedStringPtr = global %struct.StringStructPtr { ptr @.str } + cir.func @_Z11get_globalsv() { %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} %1 = cir.alloca !cir.ptr, cir.ptr >, ["u", init] {alignment = 8 : i64} @@ -107,31 +105,26 @@ module { %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @string : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr - // LLVM: store ptr @string, ptr %{{[0-9]+}} cir.store %6, %0 : !cir.ptr, cir.ptr > %7 = cir.get_global @uint : cir.ptr > %8 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @uint : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr - // LLVM: store ptr @uint, ptr %{{[0-9]+}} cir.store %8, %1 : !cir.ptr, cir.ptr > %9 = cir.get_global @sshort : cir.ptr > %10 = cir.cast(array_to_ptrdecay, %9 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sshort : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr - // LLVM: store ptr @sshort, ptr %{{[0-9]+}} cir.store %10, %2 : !cir.ptr, cir.ptr > %11 = cir.get_global @sint : cir.ptr > %12 = cir.cast(array_to_ptrdecay, %11 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sint : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr - // LLVM: store ptr @sint, ptr %{{[0-9]+}} cir.store %12, %3 : !cir.ptr, cir.ptr > %13 = cir.get_global @ll : cir.ptr > %14 = cir.cast(array_to_ptrdecay, %13 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @ll : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr - // LLVM: store ptr @ll, ptr %{{[0-9]+}} cir.store %14, %4 : !cir.ptr, cir.ptr > cir.return } diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index 65674ff96cf7..f19d16c7b8a0 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -30,6 +30,6 @@ module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.sign // CHECK: %4 = llvm.call @printf(%3) : (!llvm.ptr) -> i32 // CHECK: %5 = llvm.mlir.constant(0 : i32) : i32 // CHECK: llvm.store %5, %1 : i32, !llvm.ptr -// CHECK: %6 = llvm.load %1 : !llvm.ptr +// CHECK: %6 = llvm.load %1 : !llvm.ptr -> i32 // CHECK: llvm.return %6 : i32 // CHECK: } diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index a70d66daef59..833e2dbb469f 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -1,5 +1,5 @@ -// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR !u32i = !cir.int module { @@ -20,9 +20,3 @@ module { // MLIR-NEXT: llvm.store %2, %1 : i32, !llvm.ptr // MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: return %3 : i32 - -// LLVM: define i32 @foo() -// LLVM-NEXT: %1 = alloca i32, i64 1, align 4 -// LLVM-NEXT: store i32 1, ptr %1, align 4 -// LLVM-NEXT: %2 = load i32, ptr %1, align 4 -// LLVM-NEXT: ret i32 %2 diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index adea273b6cc4..9ac1c672886a 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -37,7 +37,7 @@ module { // MLIR-NEXT: llvm.br ^bb1 // ============= Condition block ============= // MLIR-NEXT: ^bb1: // 2 preds: ^bb0, ^bb5 -// MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: %4 = llvm.mlir.constant(10 : i32) : i32 // MLIR-NEXT: %5 = llvm.icmp "slt" %3, %4 : i32 // MLIR-NEXT: %6 = llvm.zext %5 : i1 to i32 @@ -55,7 +55,7 @@ module { // MLIR-NEXT: llvm.br ^bb5 // ============= Step block ============= // MLIR-NEXT: ^bb5: // pred: ^bb4 -// MLIR-NEXT: %11 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %11 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: %12 = llvm.mlir.constant(1 : i32) : i32 // MLIR-NEXT: %13 = llvm.add %11, %12 : i32 // MLIR-NEXT: llvm.store %13, %1 : i32, !llvm.ptr @@ -101,7 +101,7 @@ module { // MLIR-NEXT: llvm.br ^bb2 // ============= Condition block ============= // MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb5 - // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: %3 = llvm.mlir.constant(10 : i32) : i32 // MLIR-NEXT: %4 = llvm.icmp "slt" %2, %3 : i32 // MLIR-NEXT: %5 = llvm.zext %4 : i1 to i32 @@ -116,7 +116,7 @@ module { // MLIR-NEXT: llvm.br ^bb6 // ============= Body block ============= // MLIR-NEXT: ^bb5: // pred: ^bb3 - // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: %11 = llvm.mlir.constant(1 : i32) : i32 // MLIR-NEXT: %12 = llvm.add %10, %11 : i32 // MLIR-NEXT: llvm.store %12, %1 : i32, !llvm.ptr @@ -161,7 +161,7 @@ module { // MLIR-NEXT: llvm.br ^bb5 // ============= Condition block ============= // MLIR-NEXT: ^bb2: - // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: %3 = llvm.mlir.constant(10 : i32) : i32 // MLIR-NEXT: %4 = llvm.icmp "slt" %2, %3 : i32 // MLIR-NEXT: %5 = llvm.zext %4 : i1 to i32 @@ -176,7 +176,7 @@ module { // MLIR-NEXT: llvm.br ^bb6 // ============= Body block ============= // MLIR-NEXT: ^bb5: - // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr + // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: %11 = llvm.mlir.constant(1 : i32) : i32 // MLIR-NEXT: %12 = llvm.add %10, %11 : i32 // MLIR-NEXT: llvm.store %12, %1 : i32, !llvm.ptr diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index e384d308281c..7ebd46a974f7 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -1,4 +1,5 @@ -// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR // RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !u32i = !cir.int diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index 9430d698d9ca..524bf32714af 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -1,5 +1,6 @@ // RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s +// XFAIL: * !s32i = !cir.int !u8i = !cir.int diff --git a/clang/test/CIR/Lowering/tenary.cir b/clang/test/CIR/Lowering/tenary.cir index 40774b0a84fd..213dcc5b3ade 100644 --- a/clang/test/CIR/Lowering/tenary.cir +++ b/clang/test/CIR/Lowering/tenary.cir @@ -1,4 +1,5 @@ -// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-llvm -reconcile-unrealized-casts -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR !s32i = !cir.int @@ -29,7 +30,7 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { // MLIR-NEXT: %2 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %3 = llvm.alloca %2 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr -// MLIR-NEXT: %4 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %4 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: %5 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %6 = llvm.icmp "sgt" %4, %5 : i32 // MLIR-NEXT: %7 = llvm.zext %6 : i1 to i8 @@ -45,6 +46,6 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { // MLIR-NEXT: llvm.br ^bb4 // MLIR-NEXT: ^bb4: // pred: ^bb3 // MLIR-NEXT: llvm.store %11, %3 : i32, !llvm.ptr -// MLIR-NEXT: %12 = llvm.load %3 : !llvm.ptr +// MLIR-NEXT: %12 = llvm.load %3 : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return %12 : i32 // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index 791d017da102..ffadbc3df3be 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -1,5 +1,6 @@ -// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR + !s32i = !cir.int module { cir.func @foo() { @@ -25,9 +26,6 @@ module { // MLIR: %[[ZERO:[a-z0-9_]+]] = llvm.mlir.constant(0 : i32) // MLIR: llvm.sub %[[ZERO]], %[[#INPUT_MINUS]] -// LLVM: = sub i32 0, %[[#]] - - cir.func @floatingPoints(%arg0: f64) { // MLIR: llvm.func @floatingPoints(%arg0: f64) %0 = cir.alloca f64, cir.ptr , ["X", init] {alignment = 8 : i64} @@ -35,10 +33,10 @@ module { %1 = cir.load %0 : cir.ptr , f64 %2 = cir.unary(plus, %1) : f64, f64 // MLIR: llvm.store %arg0, %[[#F_PLUS:]] : f64, !llvm.ptr - // MLIR: %{{[0-9]}} = llvm.load %[[#F_PLUS]] : !llvm.ptr + // MLIR: %{{[0-9]}} = llvm.load %[[#F_PLUS]] : !llvm.ptr -> f64 %3 = cir.load %0 : cir.ptr , f64 %4 = cir.unary(minus, %3) : f64, f64 - // MLIR: %[[#F_MINUS:]] = llvm.load %{{[0-9]}} : !llvm.ptr + // MLIR: %[[#F_MINUS:]] = llvm.load %{{[0-9]}} : !llvm.ptr -> f64 // MLIR: %[[#F_NEG_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f64) : f64 // MLIR: %5 = llvm.fmul %[[#F_NEG_ONE]], %[[#F_MINUS]] : f64 cir.return diff --git a/clang/test/CIR/Lowering/unions.cir b/clang/test/CIR/Lowering/unions.cir index ea6ed375c201..dac1006cd8d2 100644 --- a/clang/test/CIR/Lowering/unions.cir +++ b/clang/test/CIR/Lowering/unions.cir @@ -27,15 +27,15 @@ module { cir.store %5, %6 : !cir.bool, cir.ptr // CHECK: %[[#VAL:]] = llvm.mlir.constant(1 : i8) : i8 // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. - // CHECK: %[[#ADDR:]] = llvm.bitcast %{{.+}} : !llvm.ptr to !llvm.ptr + // CHECK: %[[#ADDR:]] = llvm.bitcast %{{.+}} : !llvm.ptr // CHECK: llvm.store %[[#VAL]], %[[#ADDR]] : i8, !llvm.ptr // Should load direclty from the union's base address. %7 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr %8 = cir.load %7 : cir.ptr , !cir.bool // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. - // CHECK: %[[#BASE:]] = llvm.bitcast %{{.+}} : !llvm.ptr to !llvm.ptr - // CHECK: %{{.+}} = llvm.load %[[#BASE]] : !llvm.ptr + // CHECK: %[[#BASE:]] = llvm.bitcast %{{.+}} : !llvm.ptr + // CHECK: %{{.+}} = llvm.load %[[#BASE]] : !llvm.ptr -> i8 cir.return } From efece592164414226243d88f22ec0f9866c7bea6 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Tue, 26 Sep 2023 18:42:45 -0300 Subject: [PATCH 1211/2301] [CIR][Lowering][Bugfix] Fix GetMemberOp lowering (#273) The wrong element type was being passed to LLVM's GEP op, generating an invalid IR. Tests were also updated to properly validate the `llvm.getelementptr` element type. Fixes #272 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 +- clang/test/CIR/Lowering/cast.cir | 54 +++++++------------ clang/test/CIR/Lowering/dot.cir | 4 +- clang/test/CIR/Lowering/globals.cir | 10 ++-- clang/test/CIR/Lowering/hello.cir | 2 +- clang/test/CIR/Lowering/ptrstride.cir | 20 +++---- clang/test/CIR/Lowering/struct.cir | 5 +- clang/test/CIR/Lowering/variadics.cir | 8 +-- 8 files changed, 41 insertions(+), 65 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d598f6381978..84fc520f220c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1773,8 +1773,7 @@ class CIRGetMemberOpLowering // Since the base address is a pointer to an aggregate, the first offset // is always zero. The second offset tell us which member it will access. llvm::SmallVector offset{0, op.getIndex()}; - const auto elementTy = - getTypeConverter()->convertType(structTy.getMembers()[op.getIndex()]); + const auto elementTy = getTypeConverter()->convertType(structTy); rewriter.replaceOpWithNewOp(op, llResTy, elementTy, adaptor.getAddr(), offset); return mlir::success(); diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 16010444be6f..74e176a29f10 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -1,5 +1,6 @@ -// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir + !s16i = !cir.int !s32i = !cir.int !s64i = !cir.int @@ -9,27 +10,8 @@ !u64i = !cir.int module { - cir.func @foo(%arg0: !s32i) -> !s32i { - %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool - cir.return %arg0 : !s32i - } - -// MLIR: llvm.func @foo(%arg0: i32) -> i32 -// MLIR-NEXT: [[v0:%[0-9]]] = llvm.mlir.constant(0 : i32) : i32 -// MLIR-NEXT: [[v1:%[0-9]]] = llvm.icmp "ne" %arg0, %0 : i32 -// MLIR-NEXT: [[v2:%[0-9]]] = llvm.zext %1 : i1 to i8 -// MLIR-NEXT: llvm.return %arg0 : i32 -// MLIR-NEXT: } - - -// LLVM: define i32 @foo(i32 %0) -// LLVM-NEXT: %2 = icmp ne i32 %0, 0 -// LLVM-NEXT: %3 = zext i1 %2 to i8 -// LLVM-NEXT: ret i32 %0 -// LLVM-NEXT: } - cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i, %arg2: f32) -> !s32i { - // MLIR: llvm.func @cStyleCasts + // CHECK: llvm.func @cStyleCasts %0 = cir.alloca !u32i, cir.ptr , ["x1", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, cir.ptr , ["x2", init] {alignment = 4 : i64} %20 = cir.alloca !s16i, cir.ptr , ["x4", init] {alignment = 2 : i64} @@ -46,47 +28,51 @@ module { // Integer casts. %9 = cir.load %0 : cir.ptr , !u32i %10 = cir.cast(integral, %9 : !u32i), !s8i - // MLIR: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i8 + // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i8 cir.store %10, %3 : !s8i, cir.ptr %11 = cir.load %1 : cir.ptr , !s32i %12 = cir.cast(integral, %11 : !s32i), !s16i - // MLIR: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i16 + // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i16 cir.store %12, %4 : !s16i, cir.ptr %13 = cir.load %0 : cir.ptr , !u32i %14 = cir.cast(integral, %13 : !u32i), !s64i - // MLIR: %{{[0-9]+}} = llvm.zext %{{[0-9]+}} : i32 to i64 + // CHECK: %{{[0-9]+}} = llvm.zext %{{[0-9]+}} : i32 to i64 cir.store %14, %5 : !s64i, cir.ptr %15 = cir.load %1 : cir.ptr , !s32i %16 = cir.cast(integral, %15 : !s32i), !s64i - // MLIR: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 + // CHECK: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 %30 = cir.cast(integral, %arg1 : !s32i), !u32i // Should not produce a cast. %32 = cir.cast(integral, %arg0 : !u32i), !s32i // Should not produce a cast. %21 = cir.load %20 : cir.ptr , !s16i %22 = cir.cast(integral, %21 : !s16i), !u64i - // MLIR: %[[TMP:[0-9]+]] = llvm.sext %{{[0-9]+}} : i16 to i64 + // CHECK: %[[TMP:[0-9]+]] = llvm.sext %{{[0-9]+}} : i16 to i64 + %33 = cir.cast(int_to_bool, %arg1 : !s32i), !cir.bool + // CHECK: %[[#ZERO:]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK: %[[#CMP:]] = llvm.icmp "ne" %arg1, %[[#ZERO]] : i32 + // CHECK: %{{.+}} = llvm.zext %[[#CMP]] : i1 to i8 // Pointer casts. cir.store %16, %6 : !s64i, cir.ptr %17 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr cir.store %17, %8 : !cir.ptr, cir.ptr > - // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr + // CHECK: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, i32 %23 = cir.cast(int_to_ptr, %22 : !u64i), !cir.ptr - // MLIR: %[[TMP2:[0-9]+]] = llvm.inttoptr %[[TMP]] : i64 to !llvm.ptr + // CHECK: %[[TMP2:[0-9]+]] = llvm.inttoptr %[[TMP]] : i64 to !llvm.ptr %24 = cir.cast(ptr_to_int, %23 : !cir.ptr), !s32i - // MLIR: %{{[0-9]+}} = llvm.ptrtoint %[[TMP2]] : !llvm.ptr to i32 + // CHECK: %{{[0-9]+}} = llvm.ptrtoint %[[TMP2]] : !llvm.ptr to i32 %29 = cir.cast(ptr_to_bool, %23 : !cir.ptr), !cir.bool // Floating point casts. %25 = cir.cast(int_to_float, %arg1 : !s32i), f32 - // MLIR: %{{.+}} = llvm.sitofp %{{.+}} : i32 to f32 + // CHECK: %{{.+}} = llvm.sitofp %{{.+}} : i32 to f32 %26 = cir.cast(int_to_float, %arg0 : !u32i), f32 - // MLIR: %{{.+}} = llvm.uitofp %{{.+}} : i32 to f32 + // CHECK: %{{.+}} = llvm.uitofp %{{.+}} : i32 to f32 %27 = cir.cast(float_to_int, %arg2 : f32), !s32i - // MLIR: %{{.+}} = llvm.fptosi %{{.+}} : f32 to i32 + // CHECK: %{{.+}} = llvm.fptosi %{{.+}} : f32 to i32 %28 = cir.cast(float_to_int, %arg2 : f32), !u32i - // MLIR: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32 + // CHECK: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32 %18 = cir.const(#cir.int<0> : !s32i) : !s32i cir.store %18, %2 : !s32i, cir.ptr diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 2236a0d2784d..e889dcd05827 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -97,11 +97,11 @@ module { // MLIR-NEXT: ^bb5: // pred: ^bb3 // MLIR-NEXT: %22 = llvm.load %1 : !llvm.ptr -> !llvm.ptr // MLIR-NEXT: %23 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %24 = llvm.getelementptr %22[%23] : (!llvm.ptr, i32) -> !llvm.ptr +// MLIR-NEXT: %24 = llvm.getelementptr %22[%23] : (!llvm.ptr, i32) -> !llvm.ptr, f64 // MLIR-NEXT: %25 = llvm.load %24 : !llvm.ptr -> f64 // MLIR-NEXT: %26 = llvm.load %3 : !llvm.ptr -> !llvm.ptr // MLIR-NEXT: %27 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %28 = llvm.getelementptr %26[%27] : (!llvm.ptr, i32) -> !llvm.ptr +// MLIR-NEXT: %28 = llvm.getelementptr %26[%27] : (!llvm.ptr, i32) -> !llvm.ptr, f64 // MLIR-NEXT: %29 = llvm.load %28 : !llvm.ptr -> f64 // MLIR-NEXT: %30 = llvm.fmul %25, %29 : f64 // MLIR-NEXT: %31 = llvm.load %9 : !llvm.ptr -> f64 diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 73b690f945b5..62034745aa29 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -104,27 +104,27 @@ module { %5 = cir.get_global @string : cir.ptr > %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @string : !llvm.ptr - // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i8 cir.store %6, %0 : !cir.ptr, cir.ptr > %7 = cir.get_global @uint : cir.ptr > %8 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @uint : !llvm.ptr - // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i32 cir.store %8, %1 : !cir.ptr, cir.ptr > %9 = cir.get_global @sshort : cir.ptr > %10 = cir.cast(array_to_ptrdecay, %9 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sshort : !llvm.ptr - // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i16 cir.store %10, %2 : !cir.ptr, cir.ptr > %11 = cir.get_global @sint : cir.ptr > %12 = cir.cast(array_to_ptrdecay, %11 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sint : !llvm.ptr - // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i32 cir.store %12, %3 : !cir.ptr, cir.ptr > %13 = cir.get_global @ll : cir.ptr > %14 = cir.cast(array_to_ptrdecay, %13 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @ll : !llvm.ptr - // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i64 cir.store %14, %4 : !cir.ptr, cir.ptr > cir.return } diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index f19d16c7b8a0..1f912805553f 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -26,7 +26,7 @@ module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.sign // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 // CHECK: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // CHECK: %2 = llvm.mlir.addressof @".str" : !llvm.ptr -// CHECK: %3 = llvm.getelementptr %2[0] : (!llvm.ptr) -> !llvm.ptr +// CHECK: %3 = llvm.getelementptr %2[0] : (!llvm.ptr) -> !llvm.ptr, i8 // CHECK: %4 = llvm.call @printf(%3) : (!llvm.ptr) -> i32 // CHECK: %5 = llvm.mlir.constant(0 : i32) : i32 // CHECK: llvm.store %5, %1 : i32, !llvm.ptr diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index 6e1646835002..9c01fd7fde01 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -1,5 +1,6 @@ -// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR + !s32i = !cir.int module { cir.func @f(%arg0: !cir.ptr) { @@ -18,19 +19,10 @@ module { // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: llvm.store %arg0, %1 : !llvm.ptr, !llvm.ptr -// MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr +// MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr -> !llvm.ptr // MLIR-NEXT: %3 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: %4 = llvm.getelementptr %2[%3] : (!llvm.ptr, i32) -> !llvm.ptr -// MLIR-NEXT: %5 = llvm.load %4 : !llvm.ptr +// MLIR-NEXT: %4 = llvm.getelementptr %2[%3] : (!llvm.ptr, i32) -> !llvm.ptr, i32 +// MLIR-NEXT: %5 = llvm.load %4 : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return // MLIR-NEXT: } // MLIR-NEXT: } - -// LLVM: define void @f(ptr %0) -// LLVM-NEXT: %2 = alloca ptr, i64 1, align 8 -// LLVM-NEXT: store ptr %0, ptr %2, align 8 -// LLVM-NEXT: %3 = load ptr, ptr %2, align 8 -// LLVM-NEXT: %4 = getelementptr i32, ptr %3, i32 1 -// LLVM-NEXT: %5 = load i32, ptr %4, align 4 -// LLVM-NEXT: ret void -// LLVM-NEXT: } diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index 524bf32714af..207aa6d47031 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -1,6 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -// XFAIL: * !s32i = !cir.int !u8i = !cir.int @@ -17,9 +16,9 @@ module { // CHECK: %[[#ARRSIZE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#STRUCT:]] = llvm.alloca %[[#ARRSIZE]] x !llvm.struct<"struct.S", (i8, i32)> %3 = cir.get_member %1[0] {name = "c"} : !cir.ptr -> !cir.ptr - // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 0] : (!llvm.ptr) -> !llvm.ptr + // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.S", (i8, i32)> %5 = cir.get_member %1[1] {name = "i"} : !cir.ptr -> !cir.ptr - // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 1] : (!llvm.ptr) -> !llvm.ptr + // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.S", (i8, i32)> cir.return } diff --git a/clang/test/CIR/Lowering/variadics.cir b/clang/test/CIR/Lowering/variadics.cir index ca7dbcc866a1..8e5cb670fa30 100644 --- a/clang/test/CIR/Lowering/variadics.cir +++ b/clang/test/CIR/Lowering/variadics.cir @@ -16,20 +16,20 @@ module { cir.store %arg0, %0 : !s32i, cir.ptr %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr cir.va.start %4 : !cir.ptr - // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: llvm.intr.vastart %{{[0-9]+}} : !llvm.ptr %5 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr %6 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr cir.va.copy %6 to %5 : !cir.ptr, !cir.ptr - // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr - // MLIR-NEXT: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> + // MLIR-NEXT: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: llvm.intr.vacopy %13 to %{{[0-9]+}} : !llvm.ptr, !llvm.ptr %7 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr cir.va.end %7 : !cir.ptr - // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr + // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: llvm.intr.vaend %{{[0-9]+}} : !llvm.ptr %8 = cir.const(#cir.int<0> : !s32i) : !s32i From 97cb740c0bc0cf933dcad652d18f2e32b3eb5da2 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 Oct 2023 14:19:07 -0400 Subject: [PATCH 1212/2301] [CIR][Rebasing] Add header for llvm::join --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 9aa4a847ccb4..c8cefefd7e44 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -23,6 +23,8 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "llvm/ADT/StringExtras.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" From 59312b2d576df58f46e1e69e26c2ad8906c4a226 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 Oct 2023 14:19:45 -0400 Subject: [PATCH 1213/2301] [CIR][Rebasing] Account for OpenMPIsDevice renaming --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 ++--- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 15 ++++++++------- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index c8cefefd7e44..76f946c2884a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -749,9 +749,8 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, // Check if the variable is marked as declare target with link clause in // device codegen. - if (CGF.getLangOpts().OpenMP) { - assert(0 && "not implemented"); - } + if (CGF.getLangOpts().OpenMP) + llvm_unreachable("not implemented"); auto V = CGF.CGM.getAddrOfGlobalVar(VD); auto RealVarTy = CGF.getTypes().convertTypeForMem(VD->getType()); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 6485ddde51ae..fd8fba099cd0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -100,10 +100,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, const clang::CodeGenOptions &CGO, DiagnosticsEngine &Diags) : builder(context, *this), astCtx(astctx), langOpts(astctx.getLangOpts()), - codeGenOpts(CGO), - theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), - target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, - VTables{*this} { + codeGenOpts(CGO), theModule{mlir::ModuleOp::create( + builder.getUnknownLoc())}, + Diags(Diags), target(astCtx.getTargetInfo()), + ABI(createCXXABI(*this)), genTypes{*this}, VTables{*this} { // Initialize CIR signed integer types cache. SInt8Ty = @@ -780,7 +780,8 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // If this is OpenMP device, check if it is legal to emit this global // normally. QualType ASTTy = D->getType(); - assert(!(getLangOpts().OpenCL || getLangOpts().OpenMP) && "not implemented"); + if (getLangOpts().OpenCL || getLangOpts().OpenMPIsTargetDevice) + llvm_unreachable("not implemented"); // TODO(cir): LLVM's codegen uses a llvm::TrackingVH here. Is that // necessary here for CIR gen? @@ -2509,9 +2510,9 @@ mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc, // FIXME: should we even be calling this method if RTTI is disabled // and it's not for EH? if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice || - (getLangOpts().OpenMP && getLangOpts().OpenMP && getTriple().isNVPTX())) { + (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && + getTriple().isNVPTX())) llvm_unreachable("NYI"); - } if (ForEH && Ty->isObjCObjectPointerType() && getLangOpts().ObjCRuntime.isGNUFamily()) { diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 77f3fe11340d..789edca33048 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -231,7 +231,7 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, // For NVPTX devices in OpenMP emit special functon as null pointers, // otherwise linking ends up with unresolved references. - if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMP && + if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPIsTargetDevice && CGM.getTriple().isNVPTX()) llvm_unreachable("NYI"); From 0f994e90ad41cccd8669fd840e297838c15e7762 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 Oct 2023 18:45:54 -0400 Subject: [PATCH 1214/2301] [CIR][Rebasing] Adapt a few fns for upstream and adjust some tests --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 74 +++++++++++------------ clang/test/CIR/CodeGen/agg-init.cpp | 17 ------ clang/test/CIR/CodeGen/agg-init2.cpp | 21 +++++++ clang/test/CIR/CodeGen/constptr.c | 1 + clang/test/CIR/CodeGen/globals.c | 1 - clang/test/CIR/CodeGen/globals.cpp | 4 +- clang/test/CIR/CodeGen/static-vars.c | 1 - 7 files changed, 58 insertions(+), 61 deletions(-) create mode 100644 clang/test/CIR/CodeGen/agg-init2.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index a823f3fd660a..699ba7468f4f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1309,34 +1309,24 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { } InConstantContext = D.hasConstantInitialization(); + const Expr * E = D.getInit(); + assert(E && "No initializer to emit"); + QualType destType = D.getType(); + if (!destType->isReferenceType()) { + QualType nonMemoryDestType = getNonMemoryType(CGM, destType); + if (auto C = ConstExprEmitter(*this).Visit(const_cast(E), + nonMemoryDestType)) + return emitForMemory(C, destType); + } + // Try to emit the initializer. Note that this can allow some things that // are not allowed by tryEmitPrivateForMemory alone. - if (auto value = D.evaluateValue()) { + if (auto value = D.evaluateValue()) return tryEmitPrivateForMemory(*value, destType); - } - - // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a - // reference is a constant expression, and the reference binds to a temporary, - // then constant initialization is performed. ConstExprEmitter will - // incorrectly emit a prvalue constant in this case, and the calling code - // interprets that as the (pointer) value of the reference, rather than the - // desired value of the referee. - if (destType->isReferenceType()) - return {}; - - // Evaluation failed and not a reference type: ensure initializer exists. - const Expr *E = D.getInit(); - assert(E && "No initializer to emit"); - // Initializer exists: emit it "manually" through visitors. - auto nonMemoryDestType = getNonMemoryType(CGM, destType); - auto C = - ConstExprEmitter(*this).Visit(const_cast(E), nonMemoryDestType); - - // Return either the initializer attribute or a null attribute on failure. - return (C ? emitForMemory(C, destType) : nullptr); + return nullptr; } mlir::Attribute ConstantEmitter::tryEmitAbstract(const Expr *E, @@ -1407,30 +1397,34 @@ mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, return C; } -mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *E, QualType T) { - assert(!T->isVoidType() && "can't emit a void constant"); - Expr::EvalResult Result; - bool Success; +mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *E, + QualType destType) { + assert(!destType->isVoidType() && "can't emit a void constant"); - // TODO: Implement the missing functionalities below. - assert(!T->isReferenceType() && "NYI"); + if (auto C = ConstExprEmitter(*this).Visit(const_cast(E), destType)) { + if (auto TypedC = C.dyn_cast_or_null()) + return TypedC; + llvm_unreachable("this should always be typed"); + } - // NOTE: Not all constant expressions can be emited by the ConstExprEmitter. - // So we have to fold/evaluate the expression in some cases. - // - // Try folding constant expression into an RValue. - Success = E->EvaluateAsRValue(Result, CGM.getASTContext(), InConstantContext); + Expr::EvalResult Result; - mlir::Attribute C; - if (Success && !Result.HasSideEffects) - C = tryEmitPrivate(Result.Val, T); + bool Success; + + if (destType->isReferenceType()) + Success = E->EvaluateAsLValue(Result, CGM.getASTContext()); else - C = ConstExprEmitter(*this).Visit(const_cast(E), T); + Success = + E->EvaluateAsRValue(Result, CGM.getASTContext(), InConstantContext); - auto typedC = llvm::dyn_cast(C); - if (!typedC) + if (Success && !Result.hasSideEffects()) { + auto C = tryEmitPrivate(Result.Val, destType); + if (auto TypedC = C.dyn_cast_or_null()) + return TypedC; llvm_unreachable("this should always be typed"); - return typedC; + } + + return nullptr; } mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index 14763710a001..0e72da619cea 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -1,25 +1,8 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// CHECK: !ty_22Zero22 = !cir.struct // CHECK: !ty_22yep_22 = !cir.struct -struct Zero { - void yolo(); -}; - -void f() { - Zero z0 = Zero(); - // {} no element init. - Zero z1 = Zero{}; -} - -// CHECK: cir.func @_Z1fv() -// CHECK: %0 = cir.alloca !ty_22Zero22, cir.ptr , ["z0", init] -// CHECK: %1 = cir.alloca !ty_22Zero22, cir.ptr , ["z1"] -// CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () -// CHECK: cir.return - typedef enum xxy_ { xxy_Low = 0, xxy_High = 0x3f800000, diff --git a/clang/test/CIR/CodeGen/agg-init2.cpp b/clang/test/CIR/CodeGen/agg-init2.cpp new file mode 100644 index 000000000000..683c6c480f21 --- /dev/null +++ b/clang/test/CIR/CodeGen/agg-init2.cpp @@ -0,0 +1,21 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +// CHECK: !ty_22Zero22 = !cir.struct + +struct Zero { + void yolo(); +}; + +void f() { + Zero z0 = Zero(); + // {} no element init. + Zero z1 = Zero{}; +} + +// CHECK: cir.func @_Z1fv() +// CHECK: %0 = cir.alloca !ty_22Zero22, cir.ptr , ["z0", init] +// CHECK: %1 = cir.alloca !ty_22Zero22, cir.ptr , ["z1"] +// CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/constptr.c b/clang/test/CIR/CodeGen/constptr.c index b400cb8c444f..7f01cb854c6b 100644 --- a/clang/test/CIR/CodeGen/constptr.c +++ b/clang/test/CIR/CodeGen/constptr.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM +// XFAIL: * int *p = (int*)0x1234; diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index f917735593e4..5e5428045a3e 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -4,7 +4,6 @@ // are accounted for. // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s -// XFAIL: * char string[] = "whatnow"; // CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 4ffa1ab35c1a..1d2af8c85414 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -36,7 +36,7 @@ int use_func() { return func(); } // CHECK-NEXT: cir.global external @w = 4.300000e+00 : f64 // CHECK-NEXT: cir.global external @x = #cir.int<51> : !s8i // CHECK-NEXT: cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array -// CHECK-NEXT: cir.global external @alpha = #cir.const_array<[#cir.int<97> : !s8i, #cir.int<98> : !s8i, #cir.int<99> : !s8i, #cir.int<0> : !s8i]> : !cir.array +// CHECK-NEXT: cir.global external @alpha = #cir.const_array<"abc\00" : !cir.array> : !cir.array // CHECK-NEXT: cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK-NEXT: cir.global external @s = #cir.global_view<@".str"> : !cir.ptr @@ -80,7 +80,7 @@ int use_func() { return func(); } char string[] = "whatnow"; -// CHECK: cir.global external @string = #cir.const_array<[#cir.int<119> : !s8i, #cir.int<104> : !s8i, #cir.int<97> : !s8i, #cir.int<116> : !s8i, #cir.int<110> : !s8i, #cir.int<111> : !s8i, #cir.int<119> : !s8i, #cir.int<0> : !s8i]> : !cir.array +// CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array unsigned uint[] = {255}; // CHECK: cir.global external @uint = #cir.const_array<[#cir.int<255> : !u32i]> : !cir.array short sshort[] = {11111, 22222}; diff --git a/clang/test/CIR/CodeGen/static-vars.c b/clang/test/CIR/CodeGen/static-vars.c index 26f94bd62e64..714ce76f930b 100644 --- a/clang/test/CIR/CodeGen/static-vars.c +++ b/clang/test/CIR/CodeGen/static-vars.c @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * void func1(void) { // Should lower default-initialized static vars. From 6529e3ddcd93588898971c530e0fde575b9a2822 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 6 Oct 2023 18:49:17 -0400 Subject: [PATCH 1215/2301] [CIR] Replace an assert with an unreachable --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 699ba7468f4f..55d4c2baaf93 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -705,7 +705,7 @@ class ConstExprEmitter mlir::Attribute VisitCastExpr(CastExpr *E, QualType destType) { if (const auto *ECE = dyn_cast(E)) - assert(0 && "not implemented"); + llvm_unreachable("NYI"); Expr *subExpr = E->getSubExpr(); switch (E->getCastKind()) { From 2b492d5f2e737d179a7399984cfcf5704e35a80e Mon Sep 17 00:00:00 2001 From: Keyi Zhang Date: Tue, 17 Oct 2023 18:06:50 -0700 Subject: [PATCH 1216/2301] [CIR][Lowering] Add cir.brcond lowering (#278) This PR adds `cir.brcond` lowering, which more or less follows the one in LLVM dialect lowering. --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 23 +++++++++++- .../test/CIR/Lowering/ThroughMLIR/branch.cir | 37 +++++++++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/branch.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index c3b819e16d6e..8471230c6eab 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -527,6 +527,26 @@ class CIRScopeOpLowering } }; +struct CIRBrCondOpLowering + : public mlir::OpConversionPattern { + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BrCondOp brOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + auto condition = adaptor.getCond(); + auto i1Condition = rewriter.create( + brOp.getLoc(), rewriter.getI1Type(), condition); + rewriter.replaceOpWithNewOp( + brOp, i1Condition.getResult(), brOp.getDestTrue(), + adaptor.getDestOperandsTrue(), brOp.getDestFalse(), + adaptor.getDestOperandsFalse()); + + return mlir::success(); + } +}; + void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -534,7 +554,8 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add(converter, patterns.getContext()); + CIRScopeOpLowering, CIRBrCondOpLowering>(converter, + patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/branch.cir b/clang/test/CIR/Lowering/ThroughMLIR/branch.cir new file mode 100644 index 000000000000..83c980838890 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/branch.cir @@ -0,0 +1,37 @@ +// RUN: cir-opt %s -cir-to-mlir | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +cir.func @foo(%arg0: !cir.bool) -> !s32i { + cir.brcond %arg0 ^bb1, ^bb2 + ^bb1: + %0 = cir.const(#cir.int<1>: !s32i) : !s32i + cir.return %0 : !s32i + ^bb2: + %1 = cir.const(#cir.int<0>: !s32i) : !s32i + cir.return %1 : !s32i +} + +// MLIR: module { +// MLIR-NEXT: func.func @foo(%arg0: i8) -> i32 +// MLIR-NEXT: %0 = arith.trunci %arg0 : i8 to i1 +// MLIR-NEXT: cf.cond_br %0, ^bb1, ^bb2 +// MLIR-NEXT: ^bb1: // pred: ^bb0 +// MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 +// MLIR-NEXT: return %c1_i32 : i32 +// MLIR-NEXT: ^bb2: // pred: ^bb0 +// MLIR-NEXT: %c0_i32 = arith.constant 0 : i32 +// MLIR-NEXT: return %c0_i32 : i32 +// MLIR-NEXT: } +// MLIR-NEXT: } + +// LLVM: define i32 @foo(i8 %0) +// LLVM-NEXT: %2 = trunc i8 %0 to i1 +// LLVM-NEXT: br i1 %2, label %3, label %4 +// LLVM-EMPTY: +// LLVM-NEXT: 3: ; preds = %1 +// LLVM-NEXT: ret i32 1 +// LLVM-EMPTY: +// LLVM-NEXT: 4: ; preds = %1 +// LLVM-NEXT: ret i32 0 +// LLVM-NEXT: } From 72467a157536ea2cd40e40a93ab64ab2a6b19601 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 18 Oct 2023 04:20:06 +0300 Subject: [PATCH 1217/2301] [CIR][IR] Relax get_member verifier for incomplete types (#269) This is a suggestion to relax the existing verification even more than we did it in PR #257. Here we also skip verification if a field on the given index is also of incomplete type - and we can not compare it with the result type of the operation. Now the next code fails with type mismatch error: ``` typedef struct Node { struct Node* next; } NodeStru; void foo(NodeStru* a) { a->next = 0; } ``` because the result type is kind of full and the type of field is not (for the reasons discussed in #256). Basically, the problem is in the `GetMemberOp` result type generated as following (via `CIRGenTypes::convertType`) `!cir.ptr>} #cir.record.decl.ast>>` where the field type at index differs from the record type - compare with `!cir.ptr>` We just slightly relax the previous solution in #257 - and the compilation won't fail in the case of recursive types. Well, if there are some other thoughts? --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 18 +++++++++++++++--- clang/test/CIR/CodeGen/struct.c | 13 +++++++++++++ 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 5763971fa49e..99d7381c577c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2412,6 +2412,14 @@ LogicalResult MemCpyOp::verify() { return mlir::success(); } +static bool isIncompleteType(mlir::Type typ) { + if (auto ptr = typ.dyn_cast()) + return isIncompleteType(ptr.getPointee()); + else if (auto rec = typ.dyn_cast()) + return !rec.getBody(); + return false; +} + //===----------------------------------------------------------------------===// // GetMemberOp Definitions //===----------------------------------------------------------------------===// @@ -2424,7 +2432,7 @@ LogicalResult GetMemberOp::verify() { // FIXME: currently we bypass typechecking of incomplete types due to errors // in the codegen process. This should be removed once the codegen is fixed. - if (!recordTy.getBody()) + if (isIncompleteType(recordTy)) return mlir::success(); if (recordTy.getMembers().size() <= getIndex()) @@ -2432,8 +2440,12 @@ LogicalResult GetMemberOp::verify() { // FIXME(cir): member type check is disabled for classes as the codegen for // these still need to be patched. - if (!recordTy.isClass() && - recordTy.getMembers()[getIndex()] != getResultTy().getPointee()) + // Also we bypass the typechecking for the fields of incomplete types. + bool shouldSkipMemberTypeMismatch = + recordTy.isClass() || isIncompleteType(recordTy.getMembers()[getIndex()]); + + if (!shouldSkipMemberTypeMismatch + && recordTy.getMembers()[getIndex()] != getResultTy().getPointee()) return emitError() << "member type mismatch"; return mlir::success(); diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index d53b15954abd..f2883f280f1f 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -12,11 +12,18 @@ struct Foo { struct Bar z; }; +// Recursive type +typedef struct Node { + struct Node* next; +} NodeStru; + void baz(void) { struct Bar b; struct Foo f; } +// CHECK-DAG: !ty_22Node22 = !cir.struct +// CHECK-DAG: !ty_22Node221 = !cir.struct} #cir.record.decl.ast> // CHECK-DAG: !ty_22Bar22 = !cir.struct // CHECK-DAG: !ty_22Foo22 = !cir.struct // CHECK-DAG: module {{.*}} { @@ -78,3 +85,9 @@ struct Bar shouldGenerateAndAccessStructArrays(void) { // CHECK-DAG: %[[#DARR:]] = cir.cast(array_to_ptrdecay, %{{.+}} : !cir.ptr>), !cir.ptr // CHECK-DAG: %[[#ELT:]] = cir.ptr_stride(%[[#DARR]] : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr // CHECK-DAG: cir.copy %[[#ELT]] to %{{.+}} : !cir.ptr + +// CHECK-DAG: cir.func @useRecursiveType +// CHECK-DAG: cir.get_member {{%.}}[0] {name = "next"} : !cir.ptr -> !cir.ptr> +void useRecursiveType(NodeStru* a) { + a->next = 0; +} From 9d350609d882dbfdbfec80ae0c208f28cb73749e Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 18 Oct 2023 04:23:36 +0300 Subject: [PATCH 1218/2301] [CIR][CodeGen][Bugfix] supports local structs decl (#280) Just a trivial fix that enables declaration of local structs. Basically, there it's a copy-pasta from the original `CodeGen`, without debug info handling. Co-authored-by: Bruno Cardoso Lopes --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 6 ++++-- clang/test/CIR/CodeGen/struct.c | 9 +++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 5e5868b5edf2..f278be7ac107 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -726,10 +726,12 @@ void CIRGenFunction::buildDecl(const Decl &D) { llvm_unreachable("Declaration should not be in declstmts!"); case Decl::Record: // struct/union/class X; case Decl::CXXRecord: // struct/union/class X; [C++] - llvm_unreachable("NYI"); + if (auto *DI = getDebugInfo()) + llvm_unreachable("NYI"); return; case Decl::Enum: // enum X; - llvm_unreachable("NYI"); + if (auto *DI = getDebugInfo()) + llvm_unreachable("NYI"); return; case Decl::Function: // void X(); case Decl::EnumConstant: // enum ? { X = ? } diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index f2883f280f1f..fc0775af77f4 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -86,6 +86,15 @@ struct Bar shouldGenerateAndAccessStructArrays(void) { // CHECK-DAG: %[[#ELT:]] = cir.ptr_stride(%[[#DARR]] : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr // CHECK-DAG: cir.copy %[[#ELT]] to %{{.+}} : !cir.ptr +// CHECK-DAG: cir.func @local_decl +// CHECK-DAG: {{%.}} = cir.alloca !ty_22Local22, cir.ptr , ["a"] +void local_decl(void) { + struct Local { + int i; + }; + struct Local a; +} + // CHECK-DAG: cir.func @useRecursiveType // CHECK-DAG: cir.get_member {{%.}}[0] {name = "next"} : !cir.ptr -> !cir.ptr> void useRecursiveType(NodeStru* a) { From 14d2e513b23a9b2baa6c0ae60481251c444f408f Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 18 Oct 2023 04:28:52 +0300 Subject: [PATCH 1219/2301] [CIR][CodeGen][Bugfix] fixes global vars initialization (#281) This PR handles globals initializations for c++ code for the case when a global is inited from a function call or another global. --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 3 +- .../Dialect/Transforms/LoweringPrepare.cpp | 115 +++++++++--------- clang/test/CIR/CodeGen/globals.cpp | 11 +- 3 files changed, 71 insertions(+), 58 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index d6c33dcd5ce7..8d88746d017e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -45,7 +45,8 @@ static void buildDeclInit(CIRGenFunction &CGF, const VarDecl *D, AggValueSlot::DoesNotOverlap)); return; case TEK_Scalar: - llvm_unreachable("scalar evaluation NYI"); + CGF.buildScalarInit(Init, CGF.getLoc(D->getLocation()), lv, false); + return; case TEK_Complex: llvm_unreachable("complext evaluation NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 9e0b9ec4a203..8ec0d76226fb 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -160,63 +160,66 @@ cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { block.begin(), std::prev(block.end())); // Register the destructor call with __cxa_atexit - - assert(op.getAst() && op.getAst()->getTLSKind() == clang::VarDecl::TLS_None && - " TLS NYI"); - // Create a variable that binds the atexit to this shared object. - builder.setInsertionPointToStart(&theModule.getBodyRegion().front()); - auto Handle = buildRuntimeVariable(builder, "__dso_handle", op.getLoc(), - builder.getI8Type()); - - // Look for the destructor call in dtorBlock - auto &dtorBlock = op.getDtorRegion().front(); - mlir::cir::CallOp dtorCall; - for (auto op : reverse(dtorBlock.getOps())) { - dtorCall = op; - break; + auto &dtorRegion = op.getDtorRegion(); + if (!dtorRegion.empty()) { + assert(op.getAst() && + op.getAst()->getTLSKind() == clang::VarDecl::TLS_None && " TLS NYI"); + // Create a variable that binds the atexit to this shared object. + builder.setInsertionPointToStart(&theModule.getBodyRegion().front()); + auto Handle = buildRuntimeVariable(builder, "__dso_handle", op.getLoc(), + builder.getI8Type()); + + // Look for the destructor call in dtorBlock + auto &dtorBlock = dtorRegion.front(); + mlir::cir::CallOp dtorCall; + for (auto op : reverse(dtorBlock.getOps())) { + dtorCall = op; + break; + } + assert(dtorCall && "Expected a dtor call"); + cir::FuncOp dtorFunc = getCalledFunction(dtorCall); + assert(dtorFunc && + mlir::isa(*dtorFunc.getAst()) && + "Expected a dtor call"); + + // Create a runtime helper function: + // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); + auto voidPtrTy = + ::mlir::cir::PointerType::get(builder.getContext(), voidTy); + auto voidFnTy = mlir::cir::FuncType::get({voidPtrTy}, voidTy); + auto voidFnPtrTy = + ::mlir::cir::PointerType::get(builder.getContext(), voidFnTy); + auto HandlePtrTy = + mlir::cir::PointerType::get(builder.getContext(), Handle.getSymType()); + auto fnAtExitType = mlir::cir::FuncType::get( + {voidFnPtrTy, voidPtrTy, HandlePtrTy}, + mlir::cir::VoidType::get(builder.getContext())); + const char *nameAtExit = "__cxa_atexit"; + FuncOp fnAtExit = + buildRuntimeFunction(builder, nameAtExit, op.getLoc(), fnAtExitType); + + // Replace the dtor call with a call to __cxa_atexit(&dtor, &var, + // &__dso_handle) + builder.setInsertionPointAfter(dtorCall); + mlir::Value args[3]; + auto dtorPtrTy = mlir::cir::PointerType::get(builder.getContext(), + dtorFunc.getFunctionType()); + // dtorPtrTy + args[0] = builder.create( + dtorCall.getLoc(), dtorPtrTy, dtorFunc.getSymName()); + args[0] = builder.create( + dtorCall.getLoc(), voidFnPtrTy, mlir::cir::CastKind::bitcast, args[0]); + args[1] = builder.create(dtorCall.getLoc(), voidPtrTy, + mlir::cir::CastKind::bitcast, + dtorCall.getArgOperand(0)); + args[2] = builder.create( + Handle.getLoc(), HandlePtrTy, Handle.getSymName()); + builder.create(dtorCall.getLoc(), fnAtExit, args); + dtorCall->erase(); + entryBB->getOperations().splice(entryBB->end(), dtorBlock.getOperations(), + dtorBlock.begin(), + std::prev(dtorBlock.end())); } - assert(dtorCall && "Expected a dtor call"); - cir::FuncOp dtorFunc = getCalledFunction(dtorCall); - assert(dtorFunc && - mlir::isa(*dtorFunc.getAst()) && - "Expected a dtor call"); - - // Create a runtime helper function: - // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); - auto voidPtrTy = ::mlir::cir::PointerType::get(builder.getContext(), voidTy); - auto voidFnTy = mlir::cir::FuncType::get({voidPtrTy}, voidTy); - auto voidFnPtrTy = - ::mlir::cir::PointerType::get(builder.getContext(), voidFnTy); - auto HandlePtrTy = - mlir::cir::PointerType::get(builder.getContext(), Handle.getSymType()); - auto fnAtExitType = - mlir::cir::FuncType::get({voidFnPtrTy, voidPtrTy, HandlePtrTy}, - mlir::cir::VoidType::get(builder.getContext())); - const char *nameAtExit = "__cxa_atexit"; - FuncOp fnAtExit = - buildRuntimeFunction(builder, nameAtExit, op.getLoc(), fnAtExitType); - - // Replace the dtor call with a call to __cxa_atexit(&dtor, &var, - // &__dso_handle) - builder.setInsertionPointAfter(dtorCall); - mlir::Value args[3]; - auto dtorPtrTy = mlir::cir::PointerType::get(builder.getContext(), - dtorFunc.getFunctionType()); - // dtorPtrTy - args[0] = builder.create(dtorCall.getLoc(), dtorPtrTy, - dtorFunc.getSymName()); - args[0] = builder.create( - dtorCall.getLoc(), voidFnPtrTy, mlir::cir::CastKind::bitcast, args[0]); - args[1] = builder.create(dtorCall.getLoc(), voidPtrTy, - mlir::cir::CastKind::bitcast, - dtorCall.getArgOperand(0)); - args[2] = builder.create(Handle.getLoc(), HandlePtrTy, - Handle.getSymName()); - builder.create(dtorCall.getLoc(), fnAtExit, args); - dtorCall->erase(); - entryBB->getOperations().splice(entryBB->end(), dtorBlock.getOperations(), - dtorBlock.begin(), - std::prev(dtorBlock.end())); // Replace cir.yield with cir.return builder.setInsertionPointToEnd(entryBB); diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 1d2af8c85414..4792cb341400 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -5,6 +5,7 @@ int a = 3; const int b = 4; // unless used wont be generated unsigned long int c = 2; +int d = a; float y = 3.4; double w = 4.3; char x = '3'; @@ -32,7 +33,15 @@ int use_func() { return func(); } // CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @a = #cir.int<3> : !s32i // CHECK-NEXT: cir.global external @c = #cir.int<2> : !u64i -// CHECK-NEXT: cir.global external @y = 3.400000e+00 : f32 +// CHECK-NEXT: cir.global external @d = #cir.int<0> : !s32i + +// CHECK-NEXT: cir.func internal private @__cxx_global_var_init() +// CHECK-NEXT: [[TMP0:%.*]] = cir.get_global @d : cir.ptr +// CHECK-NEXT: [[TMP1:%.*]] = cir.get_global @a : cir.ptr +// CHECK-NEXT: [[TMP2:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +// CHECK-NEXT: cir.store [[TMP2]], [[TMP0]] : !s32i, cir.ptr + +// CHECK: cir.global external @y = 3.400000e+00 : f32 // CHECK-NEXT: cir.global external @w = 4.300000e+00 : f64 // CHECK-NEXT: cir.global external @x = #cir.int<51> : !s8i // CHECK-NEXT: cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array From 1d50ef9e94b0471a15540ad80e8544bafdac9598 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Tue, 17 Oct 2023 18:29:47 -0700 Subject: [PATCH 1220/2301] [CIR][CIRGen] Ensure unique IDs for anonymous records (#274) Traditional Clang's codegen generates IDs for anonymous records (e.g. "struct.anon.1") and ensures that they are unique. This patch does the same for CIRGen, which, until now, would just identify any anonymous record as "anon". This will be required to support mutable structs uniquely identified by their names. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 +++++ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 2 +- clang/test/CIR/CodeGen/bitfields.c | 4 +-- clang/test/CIR/CodeGen/bitfields.cpp | 7 ++-- clang/test/CIR/CodeGen/coro-task.cpp | 4 +-- clang/test/CIR/CodeGen/lambda.cpp | 46 +++++++++++++-------------- clang/test/CIR/CodeGen/union.cpp | 10 +++--- 7 files changed, 45 insertions(+), 36 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index ac54edaa3eaa..6b2844785374 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -32,6 +32,7 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FloatingPointMode.h" #include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringSet.h" #include "llvm/Support/ErrorHandling.h" #include #include @@ -48,11 +49,18 @@ class CIRGenBuilderTy : public mlir::OpBuilder { llvm::RoundingMode DefaultConstrainedRounding = llvm::RoundingMode::Dynamic; llvm::StringMap GlobalsVersioning; + llvm::StringSet<> anonRecordNames; public: CIRGenBuilderTy(mlir::MLIRContext &C, const CIRGenTypeCache &tc) : mlir::OpBuilder(&C), typeCache(tc) {} + std::string getUniqueAnonRecordName() { + std::string name = "anon." + std::to_string(anonRecordNames.size()); + anonRecordNames.insert(name); + return name; + } + // // Floating point specific helpers // ------------------------------- diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index c288274e15bb..603864c30218 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -60,7 +60,7 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, else typedefNameDecl->printName(outStream); } else { - outStream << "anon"; + outStream << Builder.getUniqueAnonRecordName(); } if (!suffix.empty()) diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index 919ca317952c..ec8d7c66af3a 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -29,8 +29,8 @@ typedef struct { } T; // CHECK: !ty_22S22 = !cir.struct // CHECK: !ty_22T22 = !cir.struct -// CHECK: !ty_22anon22 = !cir.struct -// CHECK: !ty_22__long22 = !cir.struct}> +// CHECK: !ty_22anon2E122 = !cir.struct +// CHECK: !ty_22__long22 = !cir.struct}> // CHECK: cir.func {{.*@store_field}} // CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 3ab1652c8fd3..af086c111204 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -27,10 +27,11 @@ typedef struct { int a : 3; // one bitfield with size < 8 unsigned b; } T; + // CHECK: !ty_22S22 = !cir.struct // CHECK: !ty_22T22 = !cir.struct -// CHECK: !ty_22anon22 = !cir.struct -// CHECK: !ty_22__long22 = !cir.struct}> +// CHECK: !ty_22anon2E122 = !cir.struct +// CHECK: !ty_22__long22 = !cir.struct}> // CHECK: cir.func @_Z11store_field // CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , @@ -96,4 +97,4 @@ unsigned load_non_bitfield(S& s) { // CHECK: cir.func @_Z17load_one_bitfield int load_one_bitfield(T& t) { return t.a; -} \ No newline at end of file +} diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 108d4947da1d..edf1df65de45 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -359,10 +359,10 @@ folly::coro::Task go4() { // CHECK: } // CHECK: %12 = cir.scope { -// CHECK: %17 = cir.alloca !ty_22anon221, cir.ptr , ["ref.tmp1"] {alignment = 1 : i64} +// CHECK: %17 = cir.alloca !ty_22anon2E522, cir.ptr , ["ref.tmp1"] {alignment = 1 : i64} // Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` -// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> // CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> // CHECK: cir.yield %19 : !cir.ptr)>> // CHECK: } diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 43fc8003d00e..d9eca2c0fbc2 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -6,13 +6,13 @@ void fn() { a(); } -// CHECK: !ty_22anon22 = !cir.struct +// CHECK: !ty_22anon2E222 = !cir.struct // CHECK-DAG: module // CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv // CHECK: cir.func @_Z2fnv() -// CHECK-NEXT: %0 = cir.alloca !ty_22anon22, cir.ptr , ["a"] +// CHECK-NEXT: %0 = cir.alloca !ty_22anon2E222, cir.ptr , ["a"] // CHECK: cir.call @_ZZ2fnvENK3$_0clEv void l0() { @@ -23,15 +23,15 @@ void l0() { // CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv( -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %2 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: %3 = cir.load %2 : cir.ptr >, !cir.ptr // CHECK: %4 = cir.load %3 : cir.ptr , !s32i // CHECK: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %6 = cir.binop(add, %4, %5) : !s32i -// CHECK: %7 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: %7 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: %8 = cir.load %7 : cir.ptr >, !cir.ptr // CHECK: cir.store %6, %8 : !s32i, cir.ptr @@ -45,15 +45,15 @@ auto g() { }; } -// CHECK: cir.func @_Z1gv() -> !ty_22anon223 -// CHECK: %0 = cir.alloca !ty_22anon223, cir.ptr , ["__retval"] {alignment = 8 : i64} +// CHECK: cir.func @_Z1gv() -> !ty_22anon2E622 +// CHECK: %0 = cir.alloca !ty_22anon2E622, cir.ptr , ["__retval"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK: %2 = cir.const(#cir.int<12> : !s32i) : !s32i // CHECK: cir.store %2, %1 : !s32i, cir.ptr -// CHECK: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: cir.store %1, %3 : !cir.ptr, cir.ptr > -// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22anon223 -// CHECK: cir.return %4 : !ty_22anon223 +// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22anon2E622 +// CHECK: cir.return %4 : !ty_22anon2E622 auto g2() { int i = 12; @@ -65,15 +65,15 @@ auto g2() { } // Should be same as above because of NRVO -// CHECK: cir.func @_Z2g2v() -> !ty_22anon224 -// CHECK-NEXT: %0 = cir.alloca !ty_22anon224, cir.ptr , ["__retval", init] {alignment = 8 : i64} +// CHECK: cir.func @_Z2g2v() -> !ty_22anon2E822 +// CHECK-NEXT: %0 = cir.alloca !ty_22anon2E822, cir.ptr , ["__retval", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.const(#cir.int<12> : !s32i) : !s32i // CHECK-NEXT: cir.store %2, %1 : !s32i, cir.ptr -// CHECK-NEXT: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK-NEXT: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK-NEXT: cir.store %1, %3 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22anon224 -// CHECK-NEXT: cir.return %4 : !ty_22anon224 +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22anon2E822 +// CHECK-NEXT: cir.return %4 : !ty_22anon2E822 int f() { return g2()(); @@ -82,10 +82,10 @@ int f() { // CHECK: cir.func @_Z1fv() -> !s32i // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !ty_22anon224, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} -// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_22anon224 -// CHECK-NEXT: cir.store %3, %2 : !ty_22anon224, cir.ptr -// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> !s32i +// CHECK-NEXT: %2 = cir.alloca !ty_22anon2E822, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_22anon2E822 +// CHECK-NEXT: cir.store %3, %2 : !ty_22anon2E822, cir.ptr +// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> !s32i // CHECK-NEXT: cir.store %4, %0 : !s32i, cir.ptr // CHECK-NEXT: } // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i @@ -114,8 +114,8 @@ int g3() { // 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`. // CHECK: %3 = cir.scope { -// CHECK: %7 = cir.alloca !ty_22anon221, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} -// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %7 = cir.alloca !ty_22anon2E1122, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> // CHECK: %9 = cir.unary(plus, %8) : !cir.ptr)>>, !cir.ptr)>> // CHECK: cir.yield %9 : !cir.ptr)>> // CHECK: } diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index da96f418e001..cc75106e659f 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -7,13 +7,13 @@ typedef union { yolo y; struct { int *lifecnt; int genpad; }; } yolm2; typedef union { yolo y; struct { bool life; int genpad; }; } yolm3; // CHECK-DAG: !ty_22U23A3ADummy22 = !cir.struct -// CHECK-DAG: !ty_22anon221 = !cir.struct +// CHECK-DAG: !ty_22anon2E522 = !cir.struct // CHECK-DAG: !ty_22yolo22 = !cir.struct -// CHECK-DAG: !ty_22anon222 = !cir.struct, !s32i} #cir.record.decl.ast> +// CHECK-DAG: !ty_22anon2E322 = !cir.struct, !s32i} #cir.record.decl.ast> -// CHECK-DAG: !ty_22yolm22 = !cir.struct -// CHECK-DAG: !ty_22yolm322 = !cir.struct -// CHECK-DAG: !ty_22yolm222 = !cir.struct +// CHECK-DAG: !ty_22yolm22 = !cir.struct +// CHECK-DAG: !ty_22yolm322 = !cir.struct +// CHECK-DAG: !ty_22yolm222 = !cir.struct // Should generate a union type with all members preserved. union U { From c3e9d0d7acec6fa0aa89768cd8978a4a78ac5b67 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Tue, 17 Oct 2023 18:30:48 -0700 Subject: [PATCH 1221/2301] [CIR] Rename StructType "typeName" attribute to "name" (#275) Rename `typeName` to just `name`, also use `StringAttr`'s nullability to identify if the record is identified or anonymous. Unnamed structs are also no longer aliased, as they have no unique name to be used in the alias. --- clang/include/clang/CIR/Dialect/IR/CIRTypes.td | 4 ++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 5 ++++- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 10 +++++----- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +- clang/test/CIR/IR/aliases.cir | 10 ++++++++++ 5 files changed, 22 insertions(+), 9 deletions(-) create mode 100644 clang/test/CIR/IR/aliases.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index ea0738d19245..24cc591521ed 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -108,7 +108,7 @@ def CIR_StructType : CIR_Type<"Struct", "struct", let parameters = (ins ArrayRefParameter<"mlir::Type", "members">:$members, - "mlir::StringAttr":$typeName, + "mlir::StringAttr":$name, "bool":$body, "bool":$packed, "mlir::cir::StructType::RecordKind":$kind, @@ -138,7 +138,7 @@ def CIR_StructType : CIR_Type<"Struct", "struct", bool isPadded(const ::mlir::DataLayout &dataLayout) const; std::string getPrefixedName() { - const auto name = getTypeName().getValue().str(); + const auto name = getName().getValue().str(); switch (getKind()) { case RecordKind::Class: return "class." + name; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 99d7381c577c..a518ae7401b2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -53,7 +53,10 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { AliasResult getAlias(Type type, raw_ostream &os) const final { if (auto structType = type.dyn_cast()) { - os << "ty_" << structType.getTypeName(); + // TODO(cir): generate unique alias names for anonymous records. + if (!structType.getName()) + return AliasResult::NoAlias; + os << "ty_" << structType.getName(); return AliasResult::OverridableAlias; } if (auto intType = type.dyn_cast()) { diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 241f570b8719..f8a739399ea4 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -107,7 +107,6 @@ Type StructType::getLargestMember(const ::mlir::DataLayout &dataLayout) const { Type StructType::parse(mlir::AsmParser &parser) { const auto loc = parser.getCurrentLocation(); llvm::SmallVector members; - mlir::StringAttr id; bool body = false; bool packed = false; mlir::cir::ASTRecordDeclAttr ast = nullptr; @@ -129,8 +128,8 @@ Type StructType::parse(mlir::AsmParser &parser) { return {}; } - if (parser.parseAttribute(id)) - return {}; + mlir::StringAttr name; + parser.parseOptionalAttribute(name); if (parser.parseOptionalKeyword("packed").succeeded()) packed = true; @@ -155,7 +154,7 @@ Type StructType::parse(mlir::AsmParser &parser) { if (parser.parseGreater()) return {}; - return StructType::get(parser.getContext(), members, id, body, packed, kind, + return StructType::get(parser.getContext(), members, name, body, packed, kind, std::nullopt); } @@ -174,7 +173,8 @@ void StructType::print(mlir::AsmPrinter &printer) const { break; } - printer << getTypeName() << " "; + if (getName()) + printer << getName() << " "; if (getPacked()) printer << "packed "; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 84fc520f220c..6fa1f8c5c5dc 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1910,7 +1910,7 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, // Struct has a name: lower as an identified struct. mlir::LLVM::LLVMStructType llvmStruct; - if (type.getTypeName().size() != 0) { + if (type.getName().size() != 0) { llvmStruct = mlir::LLVM::LLVMStructType::getIdentified( type.getContext(), type.getPrefixedName()); if (llvmStruct.setBody(llvmMembers, /*isPacked=*/type.getPacked()) diff --git a/clang/test/CIR/IR/aliases.cir b/clang/test/CIR/IR/aliases.cir new file mode 100644 index 000000000000..a22c5dba4bcc --- /dev/null +++ b/clang/test/CIR/IR/aliases.cir @@ -0,0 +1,10 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +!s32i = !cir.int +module { + // CHECK: cir.func @shouldNotUseAliasWithAnonStruct(%arg0: !cir.struct) + cir.func @shouldNotUseAliasWithAnonStruct(%arg0 : !cir.struct) { + cir.return + } +} From 661a669d734dc3a131ac14d4bee420e74fb43aae Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Tue, 17 Oct 2023 21:03:38 -0700 Subject: [PATCH 1222/2301] [CIR][Codegen] RTTI support for virtual class inheritence (#259) This patch adds RTTI support for C++ virtual inheritance. This patch does not include LLVM lowering support. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 3 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 150 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 15 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 5 +- clang/test/CIR/CodeGen/vbase.cpp | 20 ++- 5 files changed, 176 insertions(+), 17 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 4ec2657bdb0e..f01ee98c4c55 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -303,7 +303,8 @@ def TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> { The verifier enforces that the output type is always a `!cir.struct`, and that the ArrayAttr element types match the equivalent member type - for the resulting struct. + for the resulting struct, i.e, a GlobalViewAttr for symbol reference or + an IntAttr for flags. Example: diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index ceb0b8e5efa8..df052b83430b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -27,6 +27,7 @@ #include "clang/AST/VTableBuilder.h" #include "clang/Basic/Linkage.h" #include "clang/Basic/TargetInfo.h" +#include "llvm/Support/ErrorHandling.h" using namespace cir; using namespace clang; @@ -821,7 +822,7 @@ class CIRGenItaniumRTTIBuilder { /// Build an abi::__vmi_class_type_info, used for /// classes with bases that do not satisfy the abi::__si_class_type_info /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. - void BuildVMIClassTypeInfo(const CXXRecordDecl *RD); + void BuildVMIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *RD); // /// Build an abi::__pointer_type_info struct, used // /// for pointer types. @@ -1433,8 +1434,149 @@ void CIRGenItaniumRTTIBuilder::BuildSIClassTypeInfo(mlir::Location loc, Fields.push_back(BaseTypeInfo); } -void CIRGenItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { - // TODO: Implement this function. +namespace { +/// Contains virtual and non-virtual bases seen when traversing a class +/// hierarchy. +struct SeenBases { + llvm::SmallPtrSet NonVirtualBases; + llvm::SmallPtrSet VirtualBases; +}; +} // namespace + +/// Compute the value of the flags member in abi::__vmi_class_type_info. +/// +static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, + SeenBases &Bases) { + + unsigned Flags = 0; + + auto *BaseDecl = + cast(Base->getType()->castAs()->getDecl()); + + if (Base->isVirtual()) { + // Mark the virtual base as seen. + if (!Bases.VirtualBases.insert(BaseDecl).second) { + // If this virtual base has been seen before, then the class is diamond + // shaped. + Flags |= CIRGenItaniumRTTIBuilder::VMI_DiamondShaped; + } else { + if (Bases.NonVirtualBases.count(BaseDecl)) + Flags |= CIRGenItaniumRTTIBuilder::VMI_NonDiamondRepeat; + } + } else { + // Mark the non-virtual base as seen. + if (!Bases.NonVirtualBases.insert(BaseDecl).second) { + // If this non-virtual base has been seen before, then the class has non- + // diamond shaped repeated inheritance. + Flags |= CIRGenItaniumRTTIBuilder::VMI_NonDiamondRepeat; + } else { + if (Bases.VirtualBases.count(BaseDecl)) + Flags |= CIRGenItaniumRTTIBuilder::VMI_NonDiamondRepeat; + } + } + + // Walk all bases. + for (const auto &I : BaseDecl->bases()) + Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); + + return Flags; +} + +static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) { + unsigned Flags = 0; + SeenBases Bases; + + // Walk all bases. + for (const auto &I : RD->bases()) + Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); + + return Flags; +} + +/// Build an abi::__vmi_class_type_info, used for +/// classes with bases that do not satisfy the abi::__si_class_type_info +/// constraints, according to the Itanium C++ ABI, 2.9.5p5c. +void CIRGenItaniumRTTIBuilder::BuildVMIClassTypeInfo(mlir::Location loc, + const CXXRecordDecl *RD) { + auto UnsignedIntLTy = + CGM.getTypes().ConvertType(CGM.getASTContext().UnsignedIntTy); + // Itanium C++ ABI 2.9.5p6c: + // __flags is a word with flags describing details about the class + // structure, which may be referenced by using the __flags_masks + // enumeration. These flags refer to both direct and indirect bases. + unsigned Flags = ComputeVMIClassTypeInfoFlags(RD); + Fields.push_back(mlir::cir::IntAttr::get(UnsignedIntLTy, Flags)); + + // Itanium C++ ABI 2.9.5p6c: + // __base_count is a word with the number of direct proper base class + // descriptions that follow. + Fields.push_back(mlir::cir::IntAttr::get(UnsignedIntLTy, RD->getNumBases())); + + if (!RD->getNumBases()) + return; + + // Now add the base class descriptions. + + // Itanium C++ ABI 2.9.5p6c: + // __base_info[] is an array of base class descriptions -- one for every + // direct proper base. Each description is of the type: + // + // struct abi::__base_class_type_info { + // public: + // const __class_type_info *__base_type; + // long __offset_flags; + // + // enum __offset_flags_masks { + // __virtual_mask = 0x1, + // __public_mask = 0x2, + // __offset_shift = 8 + // }; + // }; + + // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long + // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on + // LLP64 platforms. + // FIXME: Consider updating libc++abi to match, and extend this logic to all + // LLP64 platforms. + QualType OffsetFlagsTy = CGM.getASTContext().LongTy; + const TargetInfo &TI = CGM.getASTContext().getTargetInfo(); + if (TI.getTriple().isOSCygMing() && + TI.getPointerWidth(LangAS::Default) > TI.getLongWidth()) + OffsetFlagsTy = CGM.getASTContext().LongLongTy; + auto OffsetFlagsLTy = CGM.getTypes().ConvertType(OffsetFlagsTy); + + for (const auto &Base : RD->bases()) { + // The __base_type member points to the RTTI for the base type. + Fields.push_back( + CIRGenItaniumRTTIBuilder(CXXABI, CGM).BuildTypeInfo(loc, Base.getType())); + + auto *BaseDecl = + cast(Base.getType()->castAs()->getDecl()); + + int64_t OffsetFlags = 0; + + // All but the lower 8 bits of __offset_flags are a signed offset. + // For a non-virtual base, this is the offset in the object of the base + // subobject. For a virtual base, this is the offset in the virtual table of + // the virtual base offset for the virtual base referenced (negative). + CharUnits Offset; + if (Base.isVirtual()) + Offset = CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset( + RD, BaseDecl); + else + llvm_unreachable("Multi-inheritence NYI"); + + OffsetFlags = uint64_t(Offset.getQuantity()) << 8; + + // The low-order byte of __offset_flags contains flags, as given by the + // masks from the enumeration __offset_flags_masks. + if (Base.isVirtual()) + OffsetFlags |= BCTI_Virtual; + if (Base.getAccessSpecifier() == AS_public) + OffsetFlags |= BCTI_Public; + + Fields.push_back(mlir::cir::IntAttr::get(OffsetFlagsLTy, OffsetFlags)); + } } mlir::Attribute @@ -1561,7 +1703,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( if (CanUseSingleInheritance(RD)) { BuildSIClassTypeInfo(loc, RD); } else { - BuildVMIClassTypeInfo(RD); + BuildVMIClassTypeInfo(loc, RD); } break; diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 789edca33048..76719186c72f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -501,15 +501,16 @@ void CIRGenVTables::buildVTTDefinition(mlir::cir::GlobalOp VTT, } mlir::Attribute Idxs[3] = { - CGM.getBuilder().getI32IntegerAttr(0), - CGM.getBuilder().getI32IntegerAttr(AddressPoint.VTableIndex), - CGM.getBuilder().getI32IntegerAttr(AddressPoint.AddressPointIndex), + mlir::cir::IntAttr::get(CGM.getBuilder().getSInt32Ty(), 0), + mlir::cir::IntAttr::get(CGM.getBuilder().getSInt32Ty(), + AddressPoint.VTableIndex), + mlir::cir::IntAttr::get(CGM.getBuilder().getSInt32Ty(), + AddressPoint.AddressPointIndex), }; - auto Init = mlir::cir::GlobalViewAttr::get( - CGM.getBuilder().getUInt8PtrTy(), - mlir::FlatSymbolRefAttr::get(VTable.getSymNameAttr()), - mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Idxs)); + auto Indices = mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Idxs); + auto Init = CGM.getBuilder().getGlobalViewAttr( + CGM.getBuilder().getUInt8PtrTy(), VTable, Indices); VTTComponents.push_back(Init); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a518ae7401b2..a5e6f5ec6a8f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2333,10 +2333,9 @@ LogicalResult TypeInfoAttr::verify( return failure(); for (auto &member : typeinfoData) { - auto gview = member.dyn_cast_or_null(); - if (gview) + if (llvm::isa(member)) continue; - emitError() << "expected GlobalViewAttr attribute"; + emitError() << "expected GlobalViewAttr or IntAttr attribute"; return failure(); } diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp index 44623e73f8ae..22ea0d236a7b 100644 --- a/clang/test/CIR/CodeGen/vbase.cpp +++ b/clang/test/CIR/CodeGen/vbase.cpp @@ -12,8 +12,24 @@ struct B: virtual A { void ppp() { B b; } +// Vtable definition for B // CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr<12> : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr]> : !cir.array x 3>}> -// CHECK: cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 1> + +// VTT for B. +// CHECK: cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [#cir.int<0> : !s32i, #cir.int<0> : !s32i, #cir.int<3> : !s32i]> : !cir.ptr]> : !cir.array x 1> + // CHECK: cir.global "private" external @_ZTVN10__cxxabiv121__vmi_class_type_infoE + +// Type info name for B // CHECK: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array -// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr}> + +// CHECK: cir.global "private" external @_ZTVN10__cxxabiv117__class_type_infoE : !cir.ptr> + +// Type info name for A +// CHECK: cir.global linkonce_odr @_ZTS1A = #cir.const_array<"1A" : !cir.array> : !cir.array + +// Type info A. +// CHECK: cir.global constant external @_ZTI1A = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1A> : !cir.ptr}> + +// Type info B. +// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.int<0> : !u32i, #cir.int<1> : !u32i, #cir.global_view<@_ZTI1A> : !cir.ptr, #cir.int<-6141> : !s64i}> From 3f23a184a1f6221e7e3d1f96ff4d901be3e03a35 Mon Sep 17 00:00:00 2001 From: Hongtao Yu Date: Wed, 18 Oct 2023 22:20:15 -0700 Subject: [PATCH 1223/2301] [CIR][Lowering] Lower vtable and type info (#264) Lowering Vtable and RTTI globals. Also lowering AddressPoint. based on https://github.com/llvm/clangir/pull/259 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 136 +++++++++++++++--- clang/test/CIR/CodeGen/vbase.cpp | 30 ++-- 2 files changed, 141 insertions(+), 25 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6fa1f8c5c5dc..7541a96134aa 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -37,6 +37,7 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/IRMapping.h" #include "mlir/IR/Operation.h" +#include "mlir/IR/Types.h" #include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" @@ -148,7 +149,41 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, return result; } -// ArrayAttr visitor. +// VTableAttr visitor. +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + mlir::cir::VTableAttr vtableArr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto llvmTy = converter->convertType(vtableArr.getType()); + auto loc = parentOp->getLoc(); + mlir::Value result = rewriter.create(loc, llvmTy); + + for (auto [idx, elt] : llvm::enumerate(vtableArr.getVtableData())) { + mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); + result = rewriter.create(loc, result, init, idx); + } + + return result; +} + +// TypeInfoAttr visitor. +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + mlir::cir::TypeInfoAttr typeinfoArr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto llvmTy = converter->convertType(typeinfoArr.getType()); + auto loc = parentOp->getLoc(); + mlir::Value result = rewriter.create(loc, llvmTy); + + for (auto [idx, elt] : llvm::enumerate(typeinfoArr.getData())) { + mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); + result = rewriter.create(loc, result, init, idx); + } + + return result; +} + +// ConstArrayAttr visitor mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ConstArrayAttr constArr, mlir::ConversionPatternRewriter &rewriter, @@ -191,27 +226,47 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto module = parentOp->getParentOfType(); - auto sourceSymbol = dyn_cast( - mlir::SymbolTable::lookupSymbolIn(module, globalAttr.getSymbol())); - assert(sourceSymbol && "Unlowered GlobalOp"); - auto loc = parentOp->getLoc(); - - auto addressOfOp = rewriter.create( - loc, mlir::LLVM::LLVMPointerType::get(parentOp->getContext()), - sourceSymbol.getSymName()); + mlir::Type sourceType; + llvm::StringRef symName; + auto sourceSymbol = + mlir::SymbolTable::lookupSymbolIn(module, globalAttr.getSymbol()); + if (auto llvmSymbol = dyn_cast(sourceSymbol)) { + sourceType = llvmSymbol.getType(); + symName = llvmSymbol.getSymName(); + } else if (auto cirSymbol = dyn_cast(sourceSymbol)) { + sourceType = converter->convertType(cirSymbol.getSymType()); + symName = cirSymbol.getSymName(); + } else { + llvm_unreachable("Unexpected GlobalOp type"); + } - assert(!globalAttr.getIndices() && "TODO"); + auto loc = parentOp->getLoc(); + auto srcPtrType = mlir::LLVM::LLVMPointerType::get(parentOp->getContext()); + mlir::Value addrOp = + rewriter.create(loc, srcPtrType, symName); + + if (globalAttr.getIndices()) { + llvm::SmallVector Indices; + for (auto idx : globalAttr.getIndices()) { + auto intAttr = dyn_cast(idx); + assert(intAttr && "index must be integers"); + Indices.push_back(intAttr.getSInt()); + } + auto eltTy = converter->convertType(sourceType); + addrOp = rewriter.create(loc, srcPtrType, eltTy, addrOp, + Indices, true); + } auto ptrTy = globalAttr.getType().dyn_cast(); assert(ptrTy && "Expecting pointer type in GlobalViewAttr"); auto llvmEltTy = converter->convertType(ptrTy.getPointee()); - if (llvmEltTy == sourceSymbol.getType()) - return addressOfOp; + if (llvmEltTy == sourceType) + return addrOp; auto llvmDstTy = converter->convertType(globalAttr.getType()); return rewriter.create(parentOp->getLoc(), llvmDstTy, - addressOfOp.getResult()); + addrOp); } /// Switches on the type of attribute and calls the appropriate conversion. @@ -235,6 +290,10 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, return lowerCirAttrAsValue(parentOp, zeroAttr, rewriter, converter); if (const auto globalAttr = attr.dyn_cast()) return lowerCirAttrAsValue(parentOp, globalAttr, rewriter, converter); + if (const auto vtableAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, vtableAttr, rewriter, converter); + if (const auto typeinfoAttr = attr.dyn_cast()) + return lowerCirAttrAsValue(parentOp, typeinfoAttr, rewriter, converter); llvm_unreachable("unhandled attribute type"); } @@ -1324,8 +1383,8 @@ class CIRGlobalOpLowering // Check for missing funcionalities. if (!init.has_value()) { - rewriter.replaceOpWithNewOp(op, llvmType, isConst, - linkage, symbol, mlir::Attribute()); + rewriter.replaceOpWithNewOp( + op, llvmType, isConst, linkage, symbol, mlir::Attribute()); return mlir::success(); } @@ -1378,6 +1437,20 @@ class CIRGlobalOpLowering rewriter.create( loc, lowerCirAttrAsValue(op, attr, rewriter, typeConverter)); return mlir::success(); + } else if (const auto vtableAttr = + init.value().dyn_cast()) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), + lowerCirAttrAsValue(op, vtableAttr, rewriter, typeConverter)); + return mlir::success(); + } else if (const auto typeinfoAttr = + init.value().dyn_cast()) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), + lowerCirAttrAsValue(op, typeinfoAttr, rewriter, typeConverter)); + return mlir::success(); } else { op.emitError() << "usupported initializer '" << init.value() << "'"; return mlir::failure(); @@ -1844,6 +1917,37 @@ class CIRFAbsOpLowering : public mlir::OpConversionPattern { } }; +class CIRVTableAddrPointOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VTableAddrPointOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + const auto *converter = getTypeConverter(); + auto targetType = converter->convertType(op.getType()); + mlir::Value symAddr = op.getSymAddr(); + + mlir::Type eltType; + if (!symAddr) { + auto module = op->getParentOfType(); + auto symbol = dyn_cast( + mlir::SymbolTable::lookupSymbolIn(module, op.getNameAttr())); + symAddr = rewriter.create( + op.getLoc(), mlir::LLVM::LLVMPointerType::get(getContext()), + *op.getName()); + eltType = converter->convertType(symbol.getType()); + } + + auto offsets = llvm::SmallVector{ + 0, op.getVtableIndex(), op.getAddressPointIndex()}; + rewriter.replaceOpWithNewOp(op, targetType, eltType, + symAddr, offsets, true); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -1857,7 +1961,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, - CIRFAbsOpLowering>( + CIRFAbsOpLowering, CIRVTableAddrPointOpLowering>( converter, patterns.getContext()); } diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp index 22ea0d236a7b..3672331a90a8 100644 --- a/clang/test/CIR/CodeGen/vbase.cpp +++ b/clang/test/CIR/CodeGen/vbase.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM struct A { int a; @@ -13,23 +15,33 @@ void ppp() { B b; } // Vtable definition for B -// CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr<12> : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr]> : !cir.array x 3>}> +// CIR: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr<12> : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr]> : !cir.array x 3>}> // VTT for B. -// CHECK: cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [#cir.int<0> : !s32i, #cir.int<0> : !s32i, #cir.int<3> : !s32i]> : !cir.ptr]> : !cir.array x 1> +// CIR: cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [#cir.int<0> : !s32i, #cir.int<0> : !s32i, #cir.int<3> : !s32i]> : !cir.ptr]> : !cir.array x 1> -// CHECK: cir.global "private" external @_ZTVN10__cxxabiv121__vmi_class_type_infoE +// CIR: cir.global "private" external @_ZTVN10__cxxabiv121__vmi_class_type_infoE // Type info name for B -// CHECK: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array +// CIR: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array -// CHECK: cir.global "private" external @_ZTVN10__cxxabiv117__class_type_infoE : !cir.ptr> +// CIR: cir.global "private" external @_ZTVN10__cxxabiv117__class_type_infoE : !cir.ptr> // Type info name for A -// CHECK: cir.global linkonce_odr @_ZTS1A = #cir.const_array<"1A" : !cir.array> : !cir.array +// CIR: cir.global linkonce_odr @_ZTS1A = #cir.const_array<"1A" : !cir.array> : !cir.array // Type info A. -// CHECK: cir.global constant external @_ZTI1A = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1A> : !cir.ptr}> +// CIR: cir.global constant external @_ZTI1A = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1A> : !cir.ptr}> // Type info B. -// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.int<0> : !u32i, #cir.int<1> : !u32i, #cir.global_view<@_ZTI1A> : !cir.ptr, #cir.int<-6141> : !s64i}> +// CIR: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.int<0> : !u32i, #cir.int<1> : !u32i, #cir.global_view<@_ZTI1A> : !cir.ptr, #cir.int<-6141> : !s64i}> + + +// LLVM: @_ZTV1B = linkonce_odr global { [3 x ptr] } { [3 x ptr] [ptr inttoptr (i64 12 to ptr), ptr null, ptr @_ZTI1B] } +// LLVM: @_ZTT1B = linkonce_odr global [1 x ptr] [ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1B, i32 0, i32 0, i32 3)] +// LLVM: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global ptr +// LLVM: @_ZTS1B = linkonce_odr global [2 x i8] c"1B" +// LLVM: @_ZTVN10__cxxabiv117__class_type_infoE = external global ptr +// LLVM: @_ZTS1A = linkonce_odr global [2 x i8] c"1A" +// LLVM: @_ZTI1A = constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 2), ptr @_ZTS1A } +// LLVM: @_ZTI1B = constant { ptr, ptr, i32, i32, ptr, i64 } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i32 2), ptr @_ZTS1B, i32 0, i32 1, ptr @_ZTI1A, i64 -6141 } From 4db3f7d03b4151f216fd9fff661048fdc800ace0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 Oct 2023 22:14:49 -0700 Subject: [PATCH 1224/2301] [CIR][Lowering] Fix opaque pointer issue from recent merge --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7541a96134aa..739d4a481f30 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -241,20 +241,20 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } auto loc = parentOp->getLoc(); - auto srcPtrType = mlir::LLVM::LLVMPointerType::get(parentOp->getContext()); - mlir::Value addrOp = - rewriter.create(loc, srcPtrType, symName); + mlir::Value addrOp = rewriter.create( + loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), symName); if (globalAttr.getIndices()) { - llvm::SmallVector Indices; + llvm::SmallVector indices; for (auto idx : globalAttr.getIndices()) { auto intAttr = dyn_cast(idx); assert(intAttr && "index must be integers"); - Indices.push_back(intAttr.getSInt()); + indices.push_back(intAttr.getSInt()); } + auto resTy = addrOp.getType(); auto eltTy = converter->convertType(sourceType); - addrOp = rewriter.create(loc, srcPtrType, eltTy, addrOp, - Indices, true); + addrOp = rewriter.create(loc, resTy, eltTy, addrOp, + indices, true); } auto ptrTy = globalAttr.getType().dyn_cast(); @@ -1925,7 +1925,7 @@ class CIRVTableAddrPointOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::VTableAddrPointOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - const auto *converter = getTypeConverter(); + auto converter = getTypeConverter(); auto targetType = converter->convertType(op.getType()); mlir::Value symAddr = op.getSymAddr(); @@ -1942,8 +1942,12 @@ class CIRVTableAddrPointOpLowering auto offsets = llvm::SmallVector{ 0, op.getVtableIndex(), op.getAddressPointIndex()}; - rewriter.replaceOpWithNewOp(op, targetType, eltType, - symAddr, offsets, true); + if (eltType) + rewriter.replaceOpWithNewOp(op, targetType, eltType, + symAddr, offsets, true); + else + llvm_unreachable("Shouldn't ever be missing an eltType here"); + return mlir::success(); } }; From cdce7138ebd45447bee3912cb52779dd1d91cdc0 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Thu, 21 Sep 2023 23:10:58 -0300 Subject: [PATCH 1225/2301] [CIR][NFC] Refactor StructType body attribute The `body` attribute is used to identify if a struct type has a body or not. In other words, it separates complete from incomplete structs. For this reason, the `body` attribute was renamed to `incomplete`, matching its keyword in the CIR language. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 5 ++-- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 16 +++++----- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 7 +++-- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 2 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 4 +-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 30 ++++++++----------- 7 files changed, 32 insertions(+), 34 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 24cc591521ed..8f31012cafd4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -109,7 +109,7 @@ def CIR_StructType : CIR_Type<"Struct", "struct", let parameters = (ins ArrayRefParameter<"mlir::Type", "members">:$members, "mlir::StringAttr":$name, - "bool":$body, + "bool":$incomplete, "bool":$packed, "mlir::cir::StructType::RecordKind":$kind, "std::optional":$ast @@ -134,7 +134,8 @@ def CIR_StructType : CIR_Type<"Struct", "struct", public: void dropAst(); size_t getNumElements() const { return getMembers().size(); } - bool isOpaque() const { return !getBody(); } + bool isIncomplete() const { return getIncomplete(); } + bool isComplete() const { return !getIncomplete(); } bool isPadded(const ::mlir::DataLayout &dataLayout) const; std::string getPrefixedName() { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 6b2844785374..91c1768eb745 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -178,8 +178,8 @@ class CIRGenBuilderTy : public mlir::OpBuilder { if (!structTy) structTy = getType( members, mlir::StringAttr::get(getContext()), - /*body=*/true, packed, mlir::cir::StructType::Struct, - /*ast=*/std::nullopt); + /*incomplete=*/false, packed, mlir::cir::StructType::Struct, + /*ast=*/nullptr); // Return zero or anonymous constant struct. if (isZero) @@ -199,7 +199,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { } if (!ty) - ty = getAnonStructTy(members, /*body=*/true, packed); + ty = getAnonStructTy(members, /*incomplete=*/false, packed); auto sTy = ty.dyn_cast(); assert(sTy && "expected struct type"); @@ -396,9 +396,9 @@ class CIRGenBuilderTy : public mlir::OpBuilder { /// Get a CIR anonymous struct type. mlir::cir::StructType - getAnonStructTy(llvm::ArrayRef members, bool body, + getAnonStructTy(llvm::ArrayRef members, bool incomplete, bool packed = false, const clang::RecordDecl *ast = nullptr) { - return getStructTy(members, "", body, packed, ast); + return getStructTy(members, "", incomplete, packed, ast); } /// Get a CIR record kind from a AST declaration tag. @@ -420,7 +420,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { /// Get a CIR named struct type. mlir::cir::StructType getStructTy(llvm::ArrayRef members, - llvm::StringRef name, bool body, + llvm::StringRef name, bool incomplete, bool packed, const clang::RecordDecl *ast) { const auto nameAttr = getStringAttr(name); std::optional astAttr = std::nullopt; @@ -429,8 +429,8 @@ class CIRGenBuilderTy : public mlir::OpBuilder { astAttr = getAttr(ast); kind = getRecordKind(ast->getTagKind()); } - return mlir::cir::StructType::get(getContext(), members, nameAttr, body, - packed, kind, astAttr); + return mlir::cir::StructType::get(getContext(), members, nameAttr, + incomplete, packed, kind, astAttr); } // diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 603864c30218..5ea75045e335 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -73,7 +73,7 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, bool CIRGenTypes::isRecordLayoutComplete(const Type *Ty) const { llvm::DenseMap::const_iterator I = recordDeclTypes.find(Ty); - return I != recordDeclTypes.end() && I->second.getBody(); + return I != recordDeclTypes.end() && I->second.isComplete(); } static bool @@ -167,12 +167,13 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { // Handle forward decl / incomplete types. if (!entry) { auto name = getRecordTypeName(RD, ""); - entry = Builder.getStructTy({}, name, /*body=*/false, /*packed=*/false, RD); + entry = Builder.getStructTy({}, name, /*incomplete=*/true, /*packed=*/false, + RD); recordDeclTypes[key] = entry; } RD = RD->getDefinition(); - if (!RD || !RD->isCompleteDefinition() || entry.getBody()) + if (!RD || !RD->isCompleteDefinition() || entry.isComplete()) return entry; // If converting this type would cause us to infinitely loop, don't do it! diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 76719186c72f..8e11eda15a5c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -63,7 +63,7 @@ mlir::Type CIRGenVTables::getVTableType(const VTableLayout &layout) { // FIXME(cir): should VTableLayout be encoded like we do for some // AST nodes? - return CGM.getBuilder().getAnonStructTy(tys, /*body=*/true); + return CGM.getBuilder().getAnonStructTy(tys, /*incomplete=*/false); } /// At this point in the translation unit, does it appear that can we diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 12d2d06bb70e..f800ba3bfc5d 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -609,7 +609,7 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, CIRRecordLowering baseBuilder(*this, D, /*Packed=*/builder.isPacked); auto baseIdentifier = getRecordTypeName(D, ".base"); *BaseTy = Builder.getStructTy(baseBuilder.fieldTypes, baseIdentifier, - /*body=*/true, /*packed=*/false, D); + /*incomplete=*/false, /*packed=*/false, D); // TODO(cir): add something like addRecordTypeName // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work @@ -623,7 +623,7 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, // signifies that the type is no longer opaque and record layout is complete, // but we may need to recursively layout D while laying D out as a base type. *Ty = Builder.getStructTy(builder.fieldTypes, getRecordTypeName(D, ""), - /*body=*/true, /*packed=*/false, D); + /*incomplete=*/false, /*packed=*/false, D); auto RL = std::make_unique( Ty ? *Ty : mlir::cir::StructType{}, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a5e6f5ec6a8f..2e6a2f2db798 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2418,7 +2418,7 @@ static bool isIncompleteType(mlir::Type typ) { if (auto ptr = typ.dyn_cast()) return isIncompleteType(ptr.getPointee()); else if (auto rec = typ.dyn_cast()) - return !rec.getBody(); + return rec.isIncomplete(); return false; } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index f8a739399ea4..df8e5b2971e5 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -106,8 +106,6 @@ Type StructType::getLargestMember(const ::mlir::DataLayout &dataLayout) const { Type StructType::parse(mlir::AsmParser &parser) { const auto loc = parser.getCurrentLocation(); - llvm::SmallVector members; - bool body = false; bool packed = false; mlir::cir::ASTRecordDeclAttr ast = nullptr; RecordKind kind; @@ -134,18 +132,16 @@ Type StructType::parse(mlir::AsmParser &parser) { if (parser.parseOptionalKeyword("packed").succeeded()) packed = true; + // Parse record members or lack thereof. + bool incomplete = true; + llvm::SmallVector members; if (parser.parseOptionalKeyword("incomplete").failed()) { - body = true; - const auto delim = AsmParser::Delimiter::Braces; - auto result = parser.parseCommaSeparatedList(delim, [&]() -> ParseResult { - mlir::Type ty; - if (parser.parseType(ty)) - return mlir::failure(); - members.push_back(ty); - return mlir::success(); - }); - - if (result.failed()) + incomplete = false; + const auto delimiter = AsmParser::Delimiter::Braces; + const auto parseElementFn = [&parser, &members]() { + return parser.parseType(members.emplace_back()); + }; + if (parser.parseCommaSeparatedList(delimiter, parseElementFn).failed()) return {}; } @@ -154,8 +150,8 @@ Type StructType::parse(mlir::AsmParser &parser) { if (parser.parseGreater()) return {}; - return StructType::get(parser.getContext(), members, name, body, packed, kind, - std::nullopt); + return StructType::get(parser.getContext(), members, name, incomplete, packed, + kind, std::nullopt); } void StructType::print(mlir::AsmPrinter &printer) const { @@ -179,7 +175,7 @@ void StructType::print(mlir::AsmPrinter &printer) const { if (getPacked()) printer << "packed "; - if (!getBody()) { + if (isIncomplete()) { printer << "incomplete"; } else { printer << "{"; @@ -286,7 +282,7 @@ bool StructType::isPadded(const ::mlir::DataLayout &dataLayout) const { void StructType::computeSizeAndAlignment( const ::mlir::DataLayout &dataLayout) const { - assert(!isOpaque() && "Cannot get layout of opaque structs"); + assert(isComplete() && "Cannot get layout of incomplete structs"); // Do not recompute. if (size || align || padded || largestMember) return; From bb879cb636600ae8c45a1cd52a27cd5333b60e9d Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Thu, 21 Sep 2023 21:24:10 -0300 Subject: [PATCH 1226/2301] [CIR][NFC] Remove std::optional from StructType ast attribute MLIR's attributes are inherently nullable, so there is no need to use std::optional to represent a nullable attribute. This patch removes std::optional from StructType's ast attribute to simplify its usage. --- clang/include/clang/CIR/Dialect/IR/CIRTypes.td | 4 ++-- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 10 ++++++---- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 6 +++--- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 8f31012cafd4..96078c8bbeb5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -112,7 +112,7 @@ def CIR_StructType : CIR_Type<"Struct", "struct", "bool":$incomplete, "bool":$packed, "mlir::cir::StructType::RecordKind":$kind, - "std::optional":$ast + "ASTRecordDeclInterface":$ast ); let hasCustomAssemblyFormat = 1; @@ -165,7 +165,7 @@ def CIR_StructType : CIR_Type<"Struct", "struct", let extraClassDefinition = [{ void $cppClass::dropAst() { - getImpl()->ast = std::nullopt; + getImpl()->ast = nullptr; } }]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 91c1768eb745..1b5e96f18f64 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -423,7 +423,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { llvm::StringRef name, bool incomplete, bool packed, const clang::RecordDecl *ast) { const auto nameAttr = getStringAttr(name); - std::optional astAttr = std::nullopt; + mlir::cir::ASTRecordDeclAttr astAttr = nullptr; auto kind = mlir::cir::StructType::RecordKind::Struct; if (ast) { astAttr = getAttr(ast); diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index df8e5b2971e5..39eb5bb58696 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -107,7 +107,6 @@ Type StructType::getLargestMember(const ::mlir::DataLayout &dataLayout) const { Type StructType::parse(mlir::AsmParser &parser) { const auto loc = parser.getCurrentLocation(); bool packed = false; - mlir::cir::ASTRecordDeclAttr ast = nullptr; RecordKind kind; if (parser.parseLess()) @@ -145,13 +144,16 @@ Type StructType::parse(mlir::AsmParser &parser) { return {}; } + // Parse optional AST attribute. This is just a formality for now, since CIR + // cannot yet read serialized AST. + mlir::cir::ASTRecordDeclAttr ast = nullptr; parser.parseOptionalAttribute(ast); if (parser.parseGreater()) return {}; return StructType::get(parser.getContext(), members, name, incomplete, packed, - kind, std::nullopt); + kind, nullptr); } void StructType::print(mlir::AsmPrinter &printer) const { @@ -183,9 +185,9 @@ void StructType::print(mlir::AsmPrinter &printer) const { printer << "}"; } - if (getAst().has_value()) { + if (getAst()) { printer << " "; - printer.printAttribute(getAst().value()); + printer.printAttribute(getAst()); } printer << '>'; diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index d3b074fe16ed..93abd4f729b4 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -887,7 +887,7 @@ void LifetimeCheckPass::checkIf(IfOp ifOp) { template bool isStructAndHasAttr(mlir::Type ty) { if (!ty.isa()) return false; - return hasAttr(*mlir::cast(ty).getAst()); + return hasAttr(ty.cast().getAst()); } static bool isOwnerType(mlir::Type ty) { @@ -1753,7 +1753,7 @@ bool LifetimeCheckPass::isLambdaType(mlir::Type ty) { auto taskTy = ty.dyn_cast(); if (!taskTy) return false; - if (taskTy.getAst()->isLambda()) + if (taskTy.getAst().isLambda()) IsLambdaTyCache[ty] = true; return IsLambdaTyCache[ty]; @@ -1768,7 +1768,7 @@ bool LifetimeCheckPass::isTaskType(mlir::Value taskVal) { auto taskTy = taskVal.getType().dyn_cast(); if (!taskTy) return false; - return taskTy.getAst()->hasPromiseType(); + return taskTy.getAst().hasPromiseType(); }(); IsTaskTyCache[ty] = result; From 88d84523d4917b014af5fe3e3cef9d384a1e8f31 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 25 Oct 2023 16:45:04 -0700 Subject: [PATCH 1227/2301] [CIR][CIRGen][NFC] Fix redudancy while casting integers and remove wrong and untested CastKind::floating path --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 12 ++---- clang/test/CIR/Lowering/ThroughMLIR/scope.cir | 37 ++++++++----------- 2 files changed, 19 insertions(+), 30 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index f4a76958bcf2..b2c0ae97d02d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1641,16 +1641,10 @@ mlir::Value ScalarExprEmitter::buildScalarCast( if (CGF.getBuilder().isInt(DstElementTy)) return Builder.create( Src.getLoc(), DstTy, mlir::cir::CastKind::integral, Src); - return Builder.create( - Src.getLoc(), DstTy, mlir::cir::CastKind::int_to_float, Src); - } - - if (SrcElementTy.isa()) { - if (DstElementTy.isa()) + if (DstElementTy.isa()) return Builder.create( - Src.getLoc(), DstTy, mlir::cir::CastKind::integral, Src); - return Builder.create( - Src.getLoc(), DstTy, mlir::cir::CastKind::floating, Src); + Src.getLoc(), DstTy, mlir::cir::CastKind::int_to_float, Src); + llvm_unreachable("Unknown type cast"); } // Leaving mlir::IntegerType around incase any old user lingers diff --git a/clang/test/CIR/Lowering/ThroughMLIR/scope.cir b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir index 6d877351b7c6..4ebd7749a72f 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/scope.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir @@ -1,5 +1,7 @@ -// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s -input-file=%t.mlir -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o %t.mlir +// RUN: FileCheck %s -input-file=%t.mlir -check-prefix=LLVM !u32i = !cir.int module { @@ -20,25 +22,16 @@ module { // MLIR-NEXT: } // MLIR-NEXT: return - -// LLVM: define void @foo() -// LLVM-NEXT: %1 = call ptr @llvm.stacksave.p0() -// LLVM-NEXT: br label %2 -// LLVM-EMPTY: -// LLVM-NEXT: 2: -// LLVM-NEXT: %3 = alloca i32, i64 1, align 4 -// LLVM-NEXT: %4 = insertvalue { ptr, ptr, i64 } undef, ptr %3, 0 -// LLVM-NEXT: %5 = insertvalue { ptr, ptr, i64 } %4, ptr %3, 1 -// LLVM-NEXT: %6 = insertvalue { ptr, ptr, i64 } %5, i64 0, 2 -// LLVM-NEXT: %7 = extractvalue { ptr, ptr, i64 } %6, 1 -// LLVM-NEXT: store i32 4, ptr %7, align 4 -// LLVM-NEXT: call void @llvm.stackrestore.p0(ptr %1) -// LLVM-NEXT: br label %8 -// LLVM-EMPTY: -// LLVM-NEXT: 8: -// LLVM-NEXT: ret void -// LLVM-NEXT: } - +// LLVM: llvm.func @foo() { +// LLVM: %0 = llvm.intr.stacksave : !llvm.ptr +// LLVM: llvm.br ^bb1 +// LLVM: ^bb1: +// [...] +// LLVM: llvm.intr.stackrestore %0 : !llvm.ptr +// LLVM: llvm.br ^bb2 +// LLVM: ^bb2: +// LLVM: llvm.return +// LLVM: } // Should drop empty scopes. cir.func @empty_scope() { @@ -50,4 +43,6 @@ module { // MLIR-NEXT: return // MLIR-NEXT: } + // LLVM: llvm.func @empty_scope() + // LLVM: llvm.return } From 8821fa18c870e6dc99d164941426cecca11213bf Mon Sep 17 00:00:00 2001 From: David Olsen Date: Fri, 27 Oct 2023 15:38:13 -0700 Subject: [PATCH 1228/2301] [CIR] Implement bool->int conversion (#292) In the CIR CodeGen function `ScalarExprEmitter::buildScalarCast`, implement conversions from bool to an integral type. This was inadvertently left out in earlier changes. Reorganize the code in the function to be more clear, with better assertion failure messages when encountering an unimplemented construct. This is a partial fix for issue #290 --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 75 ++++++++++++---------- clang/test/CIR/CodeGen/cast.cpp | 5 ++ 2 files changed, 47 insertions(+), 33 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index b2c0ae97d02d..ad111e9e6911 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1633,45 +1633,54 @@ mlir::Value ScalarExprEmitter::buildScalarCast( DstElementType = DstType; } - if (CGF.getBuilder().isInt(SrcElementTy)) { - if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) { - llvm_unreachable("NYI"); + if (SrcElementTy.isa() || + DstElementTy.isa()) + llvm_unreachable("Obsolete code. Don't use mlir::IntegerType with CIR."); + + if (SrcElementType->isBooleanType()) { + if (Opts.TreatBooleanAsSigned) + llvm_unreachable("NYI: signed bool"); + if (CGF.getBuilder().isInt(DstElementTy)) { + return Builder.create( + Src.getLoc(), DstTy, mlir::cir::CastKind::bool_to_int, Src); + } else if (DstTy.isa()) { + llvm_unreachable("NYI: bool->float cast"); + } else { + llvm_unreachable("Unexpected destination type for scalar cast"); } - - if (CGF.getBuilder().isInt(DstElementTy)) + } else if (CGF.getBuilder().isInt(SrcElementTy)) { + if (CGF.getBuilder().isInt(DstElementTy)) { return Builder.create( Src.getLoc(), DstTy, mlir::cir::CastKind::integral, Src); - if (DstElementTy.isa()) + } else if (DstElementTy.isa()) { return Builder.create( Src.getLoc(), DstTy, mlir::cir::CastKind::int_to_float, Src); - llvm_unreachable("Unknown type cast"); - } - - // Leaving mlir::IntegerType around incase any old user lingers - if (DstElementTy.isa()) { - llvm_unreachable("NYI"); - } - - if (DstElementTy.isa()) { - assert(SrcElementTy.isa() && "Unknown real conversion"); - - // If we can't recognize overflow as undefined behavior, assume that - // overflow saturates. This protects against normal optimizations if we are - // compiling with non-standard FP semantics. - if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) - llvm_unreachable("NYI"); - - if (Builder.getIsFPConstrained()) - llvm_unreachable("NYI"); - return Builder.create( - Src.getLoc(), DstTy, mlir::cir::CastKind::float_to_int, Src); + } else { + llvm_unreachable("Unexpected destination type for scalar cast"); + } + } else if (SrcElementTy.isa()) { + if (CGF.getBuilder().isInt(DstElementTy)) { + // If we can't recognize overflow as undefined behavior, assume that + // overflow saturates. This protects against normal optimizations if we + // are compiling with non-standard FP semantics. + if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) + llvm_unreachable("NYI"); + if (Builder.getIsFPConstrained()) + llvm_unreachable("NYI"); + return Builder.create( + Src.getLoc(), DstTy, mlir::cir::CastKind::float_to_int, Src); + } else if (DstElementTy.isa()) { + auto FloatDstTy = DstTy.cast(); + auto FloatSrcTy = SrcTy.cast(); + if (FloatDstTy.getWidth() < FloatSrcTy.getWidth()) + llvm_unreachable("NYI: narrowing floating-point cast"); + return Builder.createFPExt(Src, DstTy); + } else { + llvm_unreachable("Unexpected destination type for scalar cast"); + } + } else { + llvm_unreachable("Unexpected source type for scalar cast"); } - - auto FloatDstTy = DstElementTy.cast(); - auto FloatSrcTy = SrcElementTy.cast(); - if (FloatDstTy.getWidth() < FloatSrcTy.getWidth()) - llvm_unreachable("truncation NYI"); - return Builder.createFPExt(Src, DstTy); } LValue diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 1ce940629a3c..9ce946d8528d 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -68,6 +68,11 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4) { unsigned fptoui = (unsigned)x3; // Floating point to unsigned integer // CHECK: %{{.+}} = cir.cast(float_to_int, %{{[0-9]+}} : f32), !u32i + bool x5 = (bool)x1; // No checking, because this isn't a cast. + + int bi = (int)x5; // bool to int + // CHECK: %{{[0-9]+}} = cir.cast(bool_to_int, %{{[0-9]+}} : !cir.bool), !s32i + return 0; } From 2830ebdb853835d82da67b180be04682ee38c7ad Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Mon, 30 Oct 2023 10:08:53 +0300 Subject: [PATCH 1229/2301] [CIR][Codegen] Support codegen for FP truncations. (#291) --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 +++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 7 ++----- clang/test/CIR/CodeGen/cast.cpp | 9 ++++++--- clang/test/CIR/Lowering/cast.cir | 4 +++- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 1b5e96f18f64..bc78370cf0ee 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -557,7 +557,9 @@ class CIRGenBuilderTy : public mlir::OpBuilder { llvm_unreachable("negation for the given type is NYI"); } - mlir::Value createFPExt(mlir::Value v, mlir::Type destType) { + // TODO: split this to createFPExt/createFPTrunc when we have dedicated cast + // operations. + mlir::Value createFloatingCast(mlir::Value v, mlir::Type destType) { if (getIsFPConstrained()) llvm_unreachable("constrainedfp NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index ad111e9e6911..e1cc84141a64 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1670,11 +1670,8 @@ mlir::Value ScalarExprEmitter::buildScalarCast( return Builder.create( Src.getLoc(), DstTy, mlir::cir::CastKind::float_to_int, Src); } else if (DstElementTy.isa()) { - auto FloatDstTy = DstTy.cast(); - auto FloatSrcTy = SrcTy.cast(); - if (FloatDstTy.getWidth() < FloatSrcTy.getWidth()) - llvm_unreachable("NYI: narrowing floating-point cast"); - return Builder.createFPExt(Src, DstTy); + // TODO: split this to createFPExt/createFPTrunc + return Builder.createFloatingCast(Src, DstTy); } else { llvm_unreachable("Unexpected destination type for scalar cast"); } diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 9ce946d8528d..adbe18dadca8 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -17,7 +17,7 @@ unsigned char cxxstaticcast_0(unsigned int x) { // CHECK: } -int cStyleCasts_0(unsigned x1, int x2, float x3, short x4) { +int cStyleCasts_0(unsigned x1, int x2, float x3, short x4, double x5) { // CHECK: cir.func @_{{.*}}cStyleCasts_0{{.*}} char a = (char)x1; // truncate @@ -68,11 +68,14 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4) { unsigned fptoui = (unsigned)x3; // Floating point to unsigned integer // CHECK: %{{.+}} = cir.cast(float_to_int, %{{[0-9]+}} : f32), !u32i - bool x5 = (bool)x1; // No checking, because this isn't a cast. + bool ib = (bool)x1; // No checking, because this isn't a cast. - int bi = (int)x5; // bool to int + int bi = (int)ib; // bool to int // CHECK: %{{[0-9]+}} = cir.cast(bool_to_int, %{{[0-9]+}} : !cir.bool), !s32i + float dptofp = (float)x5; + // CHECK: %{{.+}} = cir.cast(floating, %{{[0-9]+}} : f64), f32 + return 0; } diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 74e176a29f10..70420c63d8f8 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -10,7 +10,7 @@ !u64i = !cir.int module { - cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i, %arg2: f32) -> !s32i { + cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i, %arg2: f32, %arg3: f64) -> !s32i { // CHECK: llvm.func @cStyleCasts %0 = cir.alloca !u32i, cir.ptr , ["x1", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, cir.ptr , ["x2", init] {alignment = 4 : i64} @@ -74,6 +74,8 @@ module { %28 = cir.cast(float_to_int, %arg2 : f32), !u32i // CHECK: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32 %18 = cir.const(#cir.int<0> : !s32i) : !s32i + // CHECK: %{{.+}} = llvm.fptrunc %{{.+}} : f64 to f32 + %34 = cir.cast(floating, %arg3 : f64), f32 cir.store %18, %2 : !s32i, cir.ptr %19 = cir.load %2 : cir.ptr , !s32i From 5bfe144e8da1842b403d1d7f734da62b23b6990d Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 26 Jan 2025 10:25:42 -0800 Subject: [PATCH 1230/2301] [CIR] Add empty CIRBaseBuilder file --- clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h new file mode 100644 index 000000000000..d2ef681b6a56 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -0,0 +1,7 @@ +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_CIR_DIALECT_BUILDER_CIRBASEBUILDER_H +#define LLVM_CLANG_CIR_DIALECT_BUILDER_CIRBASEBUILDER_H +#include "mlir/IR/Builders.h" + +#endif From d5b0f01f9b47e7899852e23b070791a8d515ea2f Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 1 Nov 2023 21:30:43 +0300 Subject: [PATCH 1231/2301] [CIR][CodeGen] Introduce CIRBaseBuilder (#297) As discussed in #279, we split `CIRGenBuilder` in two parts, which make some of the helpers usable outside of the `CodeGen` part. Basically, I placed casts and binary operations into a separate class, `CIRBaseBuilder`. Later, it can be extended with another helpers. But right now an idea to have a state less builder as a base one. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 171 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 131 +------------- 2 files changed, 172 insertions(+), 130 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index d2ef681b6a56..7fe9d9991345 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -1,7 +1,174 @@ +//===-- CIRBaseBuilder.h - CIRBuilder implementation -----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// //===----------------------------------------------------------------------===// -#ifndef LLVM_CLANG_CIR_DIALECT_BUILDER_CIRBASEBUILDER_H -#define LLVM_CLANG_CIR_DIALECT_BUILDER_CIRBASEBUILDER_H +#ifndef LLVM_CLANG_LIB_CIRBASEBUILDER_H +#define LLVM_CLANG_LIB_CIRBASEBUILDER_H + +#include "clang/AST/Decl.h" +#include "clang/AST/Type.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/FPEnv.h" + +#include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Location.h" +#include "mlir/IR/Types.h" +#include "llvm/ADT/APSInt.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/FloatingPointMode.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringSet.h" +#include "llvm/Support/ErrorHandling.h" +#include +#include +#include + +namespace cir { + +class CIRBaseBuilderTy : public mlir::OpBuilder { + +public: + CIRBaseBuilderTy(mlir::MLIRContext &C) : mlir::OpBuilder(&C) {} + + mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, + const llvm::APInt &val) { + return create(loc, typ, + getAttr(typ, val)); + } + + mlir::Value createNot(mlir::Value value) { + return create(value.getLoc(), value.getType(), + mlir::cir::UnaryOpKind::Not, value); + } + + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + const llvm::APInt &rhs) { + return create( + lhs.getLoc(), lhs.getType(), kind, lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); + } + + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + mlir::Value rhs) { + return create(lhs.getLoc(), lhs.getType(), kind, lhs, + rhs); + } + + mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, + bool isShiftLeft) { + return create( + lhs.getLoc(), lhs.getType(), lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft); + } + + mlir::Value createShift(mlir::Value lhs, unsigned bits, bool isShiftLeft) { + auto width = lhs.getType().dyn_cast().getWidth(); + auto shift = llvm::APInt(width, bits); + return createShift(lhs, shift, isShiftLeft); + } + + mlir::Value createShiftLeft(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, true); + } + + mlir::Value createShiftRight(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, false); + } + + mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, + unsigned bits) { + auto val = llvm::APInt::getLowBitsSet(size, bits); + auto typ = mlir::cir::IntType::get(getContext(), size, false); + return getConstAPInt(loc, typ, val); + } + + mlir::Value createAnd(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::And, val); + } + + mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::And, rhs); + } + + mlir::Value createOr(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::Or, val); + } + + mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); + } + + //===--------------------------------------------------------------------===// + // Cast/Conversion Operators + //===--------------------------------------------------------------------===// + + mlir::Value createCast(mlir::cir::CastKind kind, mlir::Value src, + mlir::Type newTy) { + if (newTy == src.getType()) + return src; + return create(src.getLoc(), newTy, kind, src); + } + + mlir::Value createIntCast(mlir::Value src, mlir::Type newTy) { + return create(src.getLoc(), newTy, + mlir::cir::CastKind::integral, src); + } + + mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy) { + return create(src.getLoc(), newTy, + mlir::cir::CastKind::int_to_ptr, src); + } + + mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy) { + return create(src.getLoc(), newTy, + mlir::cir::CastKind::ptr_to_int, src); + } + + // TODO(cir): the following function was introduced to keep in sync with LLVM + // codegen. CIR does not have "zext" operations. It should eventually be + // renamed or removed. For now, we just add whatever cast is required here. + mlir::Value createZExtOrBitCast(mlir::Location loc, mlir::Value src, + mlir::Type newTy) { + auto srcTy = src.getType(); + + if (srcTy == newTy) + return src; + + if (srcTy.isa() && newTy.isa()) + return createBoolToInt(src, newTy); + + llvm_unreachable("unhandled extension cast"); + } + + mlir::Value createBoolToInt(mlir::Value src, mlir::Type newTy) { + return createCast(mlir::cir::CastKind::bool_to_int, src, newTy); + } + + mlir::Value createBitcast(mlir::Value src, mlir::Type newTy) { + return createCast(mlir::cir::CastKind::bitcast, src, newTy); + } + + mlir::Value createBitcast(mlir::Location loc, mlir::Value src, + mlir::Type newTy) { + if (newTy == src.getType()) + return src; + return create(loc, newTy, mlir::cir::CastKind::bitcast, + src); + } +}; +} // namespace cir #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index bc78370cf0ee..ad3ced352014 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -15,6 +15,7 @@ #include "clang/AST/Decl.h" #include "clang/AST/Type.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" @@ -42,7 +43,7 @@ namespace cir { class CIRGenFunction; -class CIRGenBuilderTy : public mlir::OpBuilder { +class CIRGenBuilderTy : public CIRBaseBuilderTy { const CIRGenTypeCache &typeCache; bool IsFPConstrained = false; fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict; @@ -53,7 +54,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { public: CIRGenBuilderTy(mlir::MLIRContext &C, const CIRGenTypeCache &tc) - : mlir::OpBuilder(&C), typeCache(tc) {} + : CIRBaseBuilderTy(C), typeCache(tc) {} std::string getUniqueAnonRecordName() { std::string name = "anon." + std::to_string(anonRecordNames.size()); @@ -468,11 +469,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return getConstInt( loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); } - mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, - const llvm::APInt &val) { - return create(loc, typ, - getAttr(typ, val)); - } mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { return create(loc, getBoolTy(), getCIRBoolAttr(state)); @@ -643,14 +639,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { addr.getAlignment()); } - mlir::Value createBitcast(mlir::Location loc, mlir::Value src, - mlir::Type newTy) { - if (newTy == src.getType()) - return src; - return create(loc, newTy, mlir::cir::CastKind::bitcast, - src); - } - mlir::Value createLoad(mlir::Location loc, Address addr) { return create(loc, addr.getElementType(), addr.getPointer()); @@ -687,119 +675,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return create(loc, flag, dst); } - mlir::Value createNot(mlir::Value value) { - return create(value.getLoc(), value.getType(), - mlir::cir::UnaryOpKind::Not, value); - } - - mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, - const llvm::APInt &rhs) { - return create( - lhs.getLoc(), lhs.getType(), kind, lhs, - getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); - } - - mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, - mlir::Value rhs) { - return create(lhs.getLoc(), lhs.getType(), kind, lhs, - rhs); - } - - mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, - bool isShiftLeft) { - return create( - lhs.getLoc(), lhs.getType(), lhs, - getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft); - } - - mlir::Value createShift(mlir::Value lhs, unsigned bits, bool isShiftLeft) { - auto width = lhs.getType().dyn_cast().getWidth(); - auto shift = llvm::APInt(width, bits); - return createShift(lhs, shift, isShiftLeft); - } - - mlir::Value createShiftLeft(mlir::Value lhs, unsigned bits) { - return createShift(lhs, bits, true); - } - - mlir::Value createShiftRight(mlir::Value lhs, unsigned bits) { - return createShift(lhs, bits, false); - } - - mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, - unsigned bits) { - auto val = llvm::APInt::getLowBitsSet(size, bits); - auto typ = mlir::cir::IntType::get(getContext(), size, false); - return getConstAPInt(loc, typ, val); - } - - mlir::Value createAnd(mlir::Value lhs, llvm::APInt rhs) { - auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); - return createBinop(lhs, mlir::cir::BinOpKind::And, val); - } - - mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { - return createBinop(lhs, mlir::cir::BinOpKind::And, rhs); - } - - mlir::Value createOr(mlir::Value lhs, llvm::APInt rhs) { - auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); - return createBinop(lhs, mlir::cir::BinOpKind::Or, val); - } - - mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { - return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); - } - - //===--------------------------------------------------------------------===// - // Cast/Conversion Operators - //===--------------------------------------------------------------------===// - - mlir::Value createCast(mlir::cir::CastKind kind, mlir::Value src, - mlir::Type newTy) { - if (newTy == src.getType()) - return src; - return create(src.getLoc(), newTy, kind, src); - } - - mlir::Value createIntCast(mlir::Value src, mlir::Type newTy) { - return create(src.getLoc(), newTy, - mlir::cir::CastKind::integral, src); - } - - mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy) { - return create(src.getLoc(), newTy, - mlir::cir::CastKind::int_to_ptr, src); - } - - mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy) { - return create(src.getLoc(), newTy, - mlir::cir::CastKind::ptr_to_int, src); - } - - // TODO(cir): the following function was introduced to keep in sync with LLVM - // codegen. CIR does not have "zext" operations. It should eventually be - // renamed or removed. For now, we just add whatever cast is required here. - mlir::Value createZExtOrBitCast(mlir::Location loc, mlir::Value src, - mlir::Type newTy) { - auto srcTy = src.getType(); - - if (srcTy == newTy) - return src; - - if (srcTy.isa() && newTy.isa()) - return createBoolToInt(src, newTy); - - llvm_unreachable("unhandled extension cast"); - } - - mlir::Value createBoolToInt(mlir::Value src, mlir::Type newTy) { - return createCast(mlir::cir::CastKind::bool_to_int, src, newTy); - } - - mlir::Value createBitcast(mlir::Value src, mlir::Type newTy) { - return createCast(mlir::cir::CastKind::bitcast, src, newTy); - } }; } // namespace cir From a5fabf765c7d87c2524025868238df4a0781bcf0 Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Wed, 1 Nov 2023 21:32:23 +0300 Subject: [PATCH 1232/2301] [CIR][CodeGen] Support integer-to-pointer casts. (#298) A silly fix for code like ``` *(char *)0 = 0; ``` --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 ++ clang/test/CIR/CodeGen/cast.cpp | 23 +++++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 76f946c2884a..2ee4431438df 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -175,6 +175,8 @@ static Address buildPointerWithAlignment(const Expr *E, // Nothing to do here... case CK_LValueToRValue: + case CK_NullToPointer: + case CK_IntegralToPointer: break; // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo. diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index adbe18dadca8..b7f4c8538a61 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -112,3 +112,26 @@ void lvalue_cast(int x) { // CHECK: %1 = cir.const(#cir.int<42> : !s32i) : !s32i // CHECK: cir.store %1, %0 : !s32i, cir.ptr +struct A { int x; }; + +void null_cast(long ptr) { + *(int *)0 = 0; + ((A *)0)->x = 0; +} + +// CHECK: cir.func @_Z9null_castl +// CHECK: %[[ADDR:[0-9]+]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: cir.store %{{[0-9]+}}, %[[ADDR]] : !s32i, cir.ptr +// CHECK: %[[BASE:[0-9]+]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: %[[FIELD:[0-9]+]] = cir.get_member %[[BASE]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: cir.store %{{[0-9]+}}, %[[FIELD]] : !s32i, cir.ptr + +void int_cast(long ptr) { + ((A *)ptr)->x = 0; +} + +// CHECK: cir.func @_Z8int_castl +// CHECK: %[[BASE:[0-9]+]] = cir.cast(int_to_ptr, %{{[0-9]+}} : !u64i), !cir.ptr +// CHECK: %[[FIELD:[0-9]+]] = cir.get_member %[[BASE]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: cir.store %{{[0-9]+}}, %[[FIELD]] : !s32i, cir.ptr + From bc531c29026e299bcf6f760aabd625d6667f75dd Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Wed, 1 Nov 2023 15:33:14 -0300 Subject: [PATCH 1233/2301] [CIR][CIRGen] Ensure unique names for template specializations (#295) Currently, different specializations of a template will generate distinct types with the same name. To properly differentiate each specialization, this patch includes the template arguments in the name of the type. This will be required to support mutable structs uniquely identified by their names. --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 15 ++++++++++++ clang/test/CIR/CodeGen/coro-task.cpp | 28 +++++++++++----------- clang/test/CIR/CodeGen/new.cpp | 4 ++-- clang/test/CIR/CodeGen/nrvo.cpp | 14 +++++------ clang/test/CIR/CodeGen/rangefor.cpp | 34 +++++++++++++-------------- clang/test/CIR/CodeGen/vector.cpp | 8 +++---- 6 files changed, 59 insertions(+), 44 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 5ea75045e335..d8de0023ebc3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -16,6 +16,8 @@ #include "clang/AST/GlobalDecl.h" #include "clang/AST/RecordLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" using namespace clang; @@ -54,6 +56,19 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, recordDecl->printQualifiedName(outStream, policy); else recordDecl->printName(outStream, policy); + + // Ensure each template specialization has a unique name. + if (auto *templateSpecialization = + llvm::dyn_cast(recordDecl)) { + outStream << '<'; + const auto args = templateSpecialization->getTemplateArgs().asArray(); + const auto printer = [&policy, &outStream](const TemplateArgument &arg) { + arg.getAsType().print(outStream, policy); + }; + llvm::interleaveComma(args, outStream, printer); + outStream << '>'; + } + } else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl()) { if (typedefNameDecl->getDeclContext()) typedefNameDecl->printQualifiedName(outStream, policy); diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index edf1df65de45..f10156eeb46d 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -126,13 +126,13 @@ co_invoke_fn co_invoke; }} // namespace folly::coro -// CHECK: ![[VoidTask:ty_.*]] = !cir.struct -// CHECK: ![[IntTask:ty_.*]] = !cir.struct -// CHECK: ![[VoidPromisse:ty_.*]] = !cir.struct::promise_type" {!u8i}> -// CHECK: ![[CoroHandleVoid:ty_.*]] = !cir.struct -// CHECK: ![[CoroHandlePromise:ty_.*]] = !cir.struct -// CHECK: ![[StdString:ty_.*]] = !cir.struct +// CHECK-DAG: ![[IntTask:.*]] = !cir.struct" {!u8i}> +// CHECK-DAG: ![[VoidTask:.*]] = !cir.struct" {!u8i}> +// CHECK-DAG: ![[VoidPromisse:.*]] = !cir.struct::promise_type" {!u8i}> +// CHECK-DAG: ![[CoroHandleVoid:.*]] = !cir.struct" {!u8i}> +// CHECK-DAG: ![[CoroHandlePromise:ty_.*]] = !cir.struct::promise_type>" {!u8i}> +// CHECK-DAG: ![[StdString:.*]] = !cir.struct +// CHECK-DAG: ![[SuspendAlways:.*]] = !cir.struct // CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22folly3A3Acoro3A3Aco_invoke_fn22 @@ -362,20 +362,20 @@ folly::coro::Task go4() { // CHECK: %17 = cir.alloca !ty_22anon2E522, cir.ptr , ["ref.tmp1"] {alignment = 1 : i64} // Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` -// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> -// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> -// CHECK: cir.yield %19 : !cir.ptr)>> +// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> +// CHECK: cir.yield %19 : !cir.ptr)>> // CHECK: } -// CHECK: cir.store %12, %3 : !cir.ptr)>>, cir.ptr )>>> +// CHECK: cir.store %12, %3 : !cir.ptr)>>, cir.ptr )>>> // CHECK: cir.scope { // CHECK: %17 = cir.alloca !s32i, cir.ptr , ["ref.tmp2", init] {alignment = 4 : i64} -// CHECK: %18 = cir.load %3 : cir.ptr )>>>, !cir.ptr)>> +// CHECK: %18 = cir.load %3 : cir.ptr )>>>, !cir.ptr)>> // CHECK: %19 = cir.const(#cir.int<3> : !s32i) : !s32i // CHECK: cir.store %19, %17 : !s32i, cir.ptr // Call invoker, which calls operator() indirectly. -// CHECK: %20 = cir.call %18(%17) : (!cir.ptr)>>, !cir.ptr) -> !ty_22folly3A3Acoro3A3ATask221 -// CHECK: cir.store %20, %4 : !ty_22folly3A3Acoro3A3ATask221, cir.ptr +// CHECK: %20 = cir.call %18(%17) : (!cir.ptr)>>, !cir.ptr) -> ![[IntTask]] +// CHECK: cir.store %20, %4 : ![[IntTask]], cir.ptr // CHECK: } // CHECK: cir.await(user, ready : { diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index bf756b880d6b..93e3f7f5c40e 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -14,7 +14,7 @@ void m(int a, int b) { // CHECK: cir.func linkonce_odr @_ZSt11make_sharedI1SJRiS1_EESt10shared_ptrIT_EDpOT0_( // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["args", init] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["args", init] {alignment = 8 : i64} -// CHECK: %2 = cir.alloca !ty_22std3A3Ashared_ptr22, cir.ptr , ["__retval"] {alignment = 1 : i64} +// CHECK: %2 = cir.alloca !ty_22std3A3Ashared_ptr3CS3E22, cir.ptr , ["__retval"] {alignment = 1 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > // CHECK: cir.scope { @@ -26,7 +26,7 @@ void m(int a, int b) { // CHECK: %9 = cir.load %1 : cir.ptr >, !cir.ptr // CHECK: %10 = cir.load %9 : cir.ptr , !s32i // CHECK: cir.call @_ZN1SC1Eii(%6, %8, %10) : (!cir.ptr, !s32i, !s32i) -> () -// CHECK: cir.call @_ZNSt10shared_ptrI1SEC1EPS0_(%2, %6) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZNSt10shared_ptrI1SEC1EPS0_(%2, %6) : (!cir.ptr, !cir.ptr) -> () // CHECK: } class B { diff --git a/clang/test/CIR/CodeGen/nrvo.cpp b/clang/test/CIR/CodeGen/nrvo.cpp index 0a80c686806a..9aeedb4614c3 100644 --- a/clang/test/CIR/CodeGen/nrvo.cpp +++ b/clang/test/CIR/CodeGen/nrvo.cpp @@ -9,23 +9,23 @@ std::vector test_nrvo() { return result; } -// CHECK: !ty_22std3A3Avector22 = !cir.struct>, !cir.ptr>, !cir.ptr>}> +// CHECK: ![[VEC:.*]] = !cir.struct" {!cir.ptr>, !cir.ptr>, !cir.ptr>}> -// CHECK: cir.func @_Z9test_nrvov() -> !ty_22std3A3Avector22 -// CHECK: %0 = cir.alloca !ty_22std3A3Avector22, cir.ptr , ["__retval", init] {alignment = 8 : i64} +// CHECK: cir.func @_Z9test_nrvov() -> ![[VEC]] +// CHECK: %0 = cir.alloca ![[VEC]], cir.ptr , ["__retval", init] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !cir.bool, cir.ptr , ["nrvo"] {alignment = 1 : i64} // CHECK: %2 = cir.const(#false) : !cir.bool // CHECK: cir.store %2, %1 : !cir.bool, cir.ptr -// CHECK: cir.call @_ZNSt6vectorIPKcEC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZNSt6vectorIPKcEC1Ev(%0) : (!cir.ptr) -> () // CHECK: cir.scope { // CHECK: %5 = cir.alloca !cir.ptr, cir.ptr >, ["ref.tmp0"] {alignment = 8 : i64} // CHECK: %6 = cir.get_global @".str" : cir.ptr > // CHECK: %7 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr // CHECK: cir.store %7, %5 : !cir.ptr, cir.ptr > -// CHECK: cir.call @_ZNSt6vectorIPKcE9push_backEOS1_(%0, %5) : (!cir.ptr, !cir.ptr>) -> () +// CHECK: cir.call @_ZNSt6vectorIPKcE9push_backEOS1_(%0, %5) : (!cir.ptr, !cir.ptr>) -> () // CHECK: } // CHECK: %3 = cir.const(#true) : !cir.bool // CHECK: cir.store %3, %1 : !cir.bool, cir.ptr -// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22std3A3Avector22 -// CHECK: cir.return %4 : !ty_22std3A3Avector22 +// CHECK: %4 = cir.load %0 : cir.ptr , ![[VEC]] +// CHECK: cir.return %4 : ![[VEC]] // CHECK: } diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 19dcfa2425d5..78d221dfa41f 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -22,40 +22,40 @@ void init(unsigned numImages) { } // CHECK-DAG: !ty_22triple22 = !cir.struct, !u32i}> -// CHECK-DAG: !ty_22std3A3Avector22 = !cir.struct, !cir.ptr, !cir.ptr}> -// CHECK-DAG: !ty_22__vector_iterator22 = !cir.struct}> +// CHECK-DAG: ![[VEC:.*]] = !cir.struct" {!cir.ptr, !cir.ptr, !cir.ptr}> +// CHECK-DAG: ![[VEC_IT:.*]] = !cir.struct" {!cir.ptr}> // CHECK: cir.func @_Z4initj(%arg0: !u32i // CHECK: %0 = cir.alloca !u32i, cir.ptr , ["numImages", init] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !ty_22std3A3Avector22, cir.ptr , ["images", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca ![[VEC]], cir.ptr , ["images", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !u32i, cir.ptr // CHECK: %2 = cir.load %0 : cir.ptr , !u32i // CHECK: %3 = cir.cast(integral, %2 : !u32i), !u64i -// CHECK: cir.call @_ZNSt6vectorI6tripleEC1Em(%1, %3) : (!cir.ptr, !u64i) -> () +// CHECK: cir.call @_ZNSt6vectorI6tripleEC1Em(%1, %3) : (!cir.ptr, !u64i) -> () // CHECK: cir.scope { -// CHECK: %4 = cir.alloca !cir.ptr, cir.ptr >, ["__range1", init] {alignment = 8 : i64} -// CHECK: %5 = cir.alloca !ty_22__vector_iterator22, cir.ptr , ["__begin1", init] {alignment = 8 : i64} -// CHECK: %6 = cir.alloca !ty_22__vector_iterator22, cir.ptr , ["__end1", init] {alignment = 8 : i64} +// CHECK: %4 = cir.alloca !cir.ptr, cir.ptr >, ["__range1", init] {alignment = 8 : i64} +// CHECK: %5 = cir.alloca ![[VEC_IT]], cir.ptr , ["__begin1", init] {alignment = 8 : i64} +// CHECK: %6 = cir.alloca ![[VEC_IT]], cir.ptr , ["__end1", init] {alignment = 8 : i64} // CHECK: %7 = cir.alloca !cir.ptr, cir.ptr >, ["image", init] {alignment = 8 : i64} -// CHECK: cir.store %1, %4 : !cir.ptr, cir.ptr > -// CHECK: %8 = cir.load %4 : cir.ptr >, !cir.ptr -// CHECK: %9 = cir.call @_ZNSt6vectorI6tripleE5beginEv(%8) : (!cir.ptr) -> !ty_22__vector_iterator22 -// CHECK: cir.store %9, %5 : !ty_22__vector_iterator22, cir.ptr -// CHECK: %10 = cir.load %4 : cir.ptr >, !cir.ptr -// CHECK: %11 = cir.call @_ZNSt6vectorI6tripleE3endEv(%10) : (!cir.ptr) -> !ty_22__vector_iterator22 -// CHECK: cir.store %11, %6 : !ty_22__vector_iterator22, cir.ptr +// CHECK: cir.store %1, %4 : !cir.ptr, cir.ptr > +// CHECK: %8 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %9 = cir.call @_ZNSt6vectorI6tripleE5beginEv(%8) : (!cir.ptr) -> ![[VEC_IT]] +// CHECK: cir.store %9, %5 : ![[VEC_IT]], cir.ptr +// CHECK: %10 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %11 = cir.call @_ZNSt6vectorI6tripleE3endEv(%10) : (!cir.ptr) -> ![[VEC_IT]] +// CHECK: cir.store %11, %6 : ![[VEC_IT]], cir.ptr // CHECK: cir.loop for(cond : { -// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool +// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool // CHECK: cir.brcond %12 ^bb1, ^bb2 // CHECK: ^bb1: // pred: ^bb0 // CHECK: cir.yield continue // CHECK: ^bb2: // pred: ^bb0 // CHECK: cir.yield // CHECK: }, step : { -// CHECK: %12 = cir.call @_ZN17__vector_iteratorI6triplePS0_RS0_EppEv(%5) : (!cir.ptr) -> !cir.ptr +// CHECK: %12 = cir.call @_ZN17__vector_iteratorI6triplePS0_RS0_EppEv(%5) : (!cir.ptr) -> !cir.ptr // CHECK: cir.yield // CHECK: }) { -// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EdeEv(%5) : (!cir.ptr) -> !cir.ptr +// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EdeEv(%5) : (!cir.ptr) -> !cir.ptr // CHECK: cir.store %12, %7 : !cir.ptr, cir.ptr > // CHECK: cir.scope { // CHECK: %13 = cir.alloca !ty_22triple22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp index 4f9b3495bd87..8464ce3173e2 100644 --- a/clang/test/CIR/CodeGen/vector.cpp +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -12,13 +12,13 @@ namespace std { } // namespace std // CHECK: cir.func linkonce_odr @_ZNSt6vectorIyE6resizeEm( -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !u64i, cir.ptr , ["__sz", init] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !u64i, cir.ptr , ["__cs", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: cir.store %arg1, %1 : !u64i, cir.ptr -// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %4 = cir.call @_ZNKSt6vectorIyE4sizeEv(%3) : (!cir.ptr) -> !u64i +// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.call @_ZNKSt6vectorIyE4sizeEv(%3) : (!cir.ptr) -> !u64i // CHECK: cir.store %4, %2 : !u64i, cir.ptr // CHECK: cir.scope { // CHECK: %5 = cir.load %2 : cir.ptr , !u64i From a35107c3ff8fed2628956d56e2e1c827678e3b60 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Wed, 1 Nov 2023 20:13:53 -0300 Subject: [PATCH 1234/2301] [CIR][CIRGen] Refactor StructType builders (#294) Instead of using a single builder for every possible StructType, we now have three builders: identified complete, identified incomplete, and anonymous struct types. This allows us to enforce correctness and to explicitly show the intent when creating a StructType. This patch also adds support for anonymous structs type aliases. When a StructType has no name, it will generate a `ty_anon_` alias. Conflicts are automatically resolved by MLIR. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 44 ++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 43 ++++++++++++------ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 3 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 10 +++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 +-- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 17 ++++++- clang/test/CIR/CodeGen/struct.c | 2 +- clang/test/CIR/CodeGen/vbase.cpp | 1 + clang/test/CIR/CodeGen/vtable-rtti.cpp | 6 +-- clang/test/CIR/IR/aliases.cir | 11 +++-- clang/test/CIR/IR/invalid.cir | 7 +++ 11 files changed, 114 insertions(+), 37 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 96078c8bbeb5..7b0c060cabe9 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -115,6 +115,37 @@ def CIR_StructType : CIR_Type<"Struct", "struct", "ASTRecordDeclInterface":$ast ); + let skipDefaultBuilders = 1; + let builders = [ + // Build an identified and complete struct. + TypeBuilder<(ins + "ArrayRef":$members, + "StringAttr":$name, + "bool":$packed, + "RecordKind":$kind, + CArg<"ASTRecordDeclInterface", "nullptr">:$ast), [{ + return $_get(context, members, name, /*incomplete=*/false, + packed, kind, ast); + }]>, + // Build an incomplete struct. + TypeBuilder<(ins + "StringAttr":$name, + "RecordKind":$kind), [{ + return $_get(context, /*members=*/ArrayRef{}, name, + /*incomplete=*/true, /*packed=*/false, kind, + /*ast=*/nullptr); + }]>, + // Build an anonymous struct. + TypeBuilder<(ins + "ArrayRef":$members, + "bool":$packed, + "RecordKind":$kind, + CArg<"ASTRecordDeclInterface", "nullptr">:$ast), [{ + return $_get(context, members, /*name=*/nullptr, + /*incomplete=*/false, packed, kind, ast); + }]> + ]; + let hasCustomAssemblyFormat = 1; let extraClassDeclaration = [{ @@ -138,18 +169,21 @@ def CIR_StructType : CIR_Type<"Struct", "struct", bool isComplete() const { return !getIncomplete(); } bool isPadded(const ::mlir::DataLayout &dataLayout) const; - std::string getPrefixedName() { - const auto name = getName().getValue().str(); + std::string getKindAsStr() { switch (getKind()) { case RecordKind::Class: - return "class." + name; + return "class"; case RecordKind::Union: - return "union." + name; + return "union"; case RecordKind::Struct: - return "struct." + name; + return "struct"; } } + std::string getPrefixedName() { + return getKindAsStr() + "." + getName().getValue().str(); + } + /// Return the member with the largest bit-length. mlir::Type getLargestMember(const ::mlir::DataLayout &dataLayout) const; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index ad3ced352014..8b7a27aab27a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -175,12 +175,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { isZero &= isNullValue(typedAttr); } - // Struct type not specified: create type from members. + // Struct type not specified: create anon struct type from members. if (!structTy) - structTy = getType( - members, mlir::StringAttr::get(getContext()), - /*incomplete=*/false, packed, mlir::cir::StructType::Struct, - /*ast=*/nullptr); + structTy = getType(members, packed, + mlir::cir::StructType::Struct, + /*ast=*/nullptr); // Return zero or anonymous constant struct. if (isZero) @@ -200,7 +199,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } if (!ty) - ty = getAnonStructTy(members, /*incomplete=*/false, packed); + ty = getAnonStructTy(members, packed); auto sTy = ty.dyn_cast(); assert(sTy && "expected struct type"); @@ -397,9 +396,15 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { /// Get a CIR anonymous struct type. mlir::cir::StructType - getAnonStructTy(llvm::ArrayRef members, bool incomplete, - bool packed = false, const clang::RecordDecl *ast = nullptr) { - return getStructTy(members, "", incomplete, packed, ast); + getAnonStructTy(llvm::ArrayRef members, bool packed = false, + const clang::RecordDecl *ast = nullptr) { + mlir::cir::ASTRecordDeclAttr astAttr = nullptr; + auto kind = mlir::cir::StructType::RecordKind::Struct; + if (ast) { + astAttr = getAttr(ast); + kind = getRecordKind(ast->getTagKind()); + } + return getType(members, packed, kind, astAttr); } /// Get a CIR record kind from a AST declaration tag. @@ -419,10 +424,20 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } } + /// Get a incomplete CIR struct type. + mlir::cir::StructType getIncompleteStructTy(llvm::StringRef name, + const clang::RecordDecl *ast) { + const auto nameAttr = getStringAttr(name); + auto kind = mlir::cir::StructType::RecordKind::Struct; + if (ast) + kind = getRecordKind(ast->getTagKind()); + return getType(nameAttr, kind); + } + /// Get a CIR named struct type. - mlir::cir::StructType getStructTy(llvm::ArrayRef members, - llvm::StringRef name, bool incomplete, - bool packed, const clang::RecordDecl *ast) { + mlir::cir::StructType getCompleteStructTy(llvm::ArrayRef members, + llvm::StringRef name, bool packed, + const clang::RecordDecl *ast) { const auto nameAttr = getStringAttr(name); mlir::cir::ASTRecordDeclAttr astAttr = nullptr; auto kind = mlir::cir::StructType::RecordKind::Struct; @@ -430,8 +445,8 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { astAttr = getAttr(ast); kind = getRecordKind(ast->getTagKind()); } - return mlir::cir::StructType::get(getContext(), members, nameAttr, - incomplete, packed, kind, astAttr); + return getType(members, nameAttr, packed, kind, + astAttr); } // diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index d8de0023ebc3..ceae82729cc1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -182,8 +182,7 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { // Handle forward decl / incomplete types. if (!entry) { auto name = getRecordTypeName(RD, ""); - entry = Builder.getStructTy({}, name, /*incomplete=*/true, /*packed=*/false, - RD); + entry = Builder.getIncompleteStructTy(name, RD); recordDeclTypes[key] = entry; } diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index f800ba3bfc5d..18be250d0fc6 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -608,8 +608,9 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, builder.astRecordLayout.getSize()) { CIRRecordLowering baseBuilder(*this, D, /*Packed=*/builder.isPacked); auto baseIdentifier = getRecordTypeName(D, ".base"); - *BaseTy = Builder.getStructTy(baseBuilder.fieldTypes, baseIdentifier, - /*incomplete=*/false, /*packed=*/false, D); + *BaseTy = + Builder.getCompleteStructTy(baseBuilder.fieldTypes, baseIdentifier, + /*packed=*/false, D); // TODO(cir): add something like addRecordTypeName // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work @@ -622,8 +623,9 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, // Fill in the struct *after* computing the base type. Filling in the body // signifies that the type is no longer opaque and record layout is complete, // but we may need to recursively layout D while laying D out as a base type. - *Ty = Builder.getStructTy(builder.fieldTypes, getRecordTypeName(D, ""), - /*incomplete=*/false, /*packed=*/false, D); + *Ty = + Builder.getCompleteStructTy(builder.fieldTypes, getRecordTypeName(D, ""), + /*packed=*/false, D); auto RL = std::make_unique( Ty ? *Ty : mlir::cir::StructType{}, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 2e6a2f2db798..daf5b71d5502 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -53,9 +53,10 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { AliasResult getAlias(Type type, raw_ostream &os) const final { if (auto structType = type.dyn_cast()) { - // TODO(cir): generate unique alias names for anonymous records. - if (!structType.getName()) - return AliasResult::NoAlias; + if (!structType.getName()) { + os << "ty_anon_" << structType.getKindAsStr(); + return AliasResult::OverridableAlias; + } os << "ty_" << structType.getName(); return AliasResult::OverridableAlias; } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 39eb5bb58696..5afd3761b2e9 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -108,6 +108,7 @@ Type StructType::parse(mlir::AsmParser &parser) { const auto loc = parser.getCurrentLocation(); bool packed = false; RecordKind kind; + auto *context = parser.getContext(); if (parser.parseLess()) return {}; @@ -152,8 +153,20 @@ Type StructType::parse(mlir::AsmParser &parser) { if (parser.parseGreater()) return {}; - return StructType::get(parser.getContext(), members, name, incomplete, packed, - kind, nullptr); + // Try to create the proper type. + mlir::Type type = {}; + if (name && incomplete) { // Identified & incomplete + type = StructType::get(context, name, kind); + } else if (name && !incomplete) { // Identified & complete + type = StructType::get(context, members, name, packed, kind); + } else if (!name && !incomplete) { // anonymous + type = StructType::get(context, members, packed, kind); + } else { + parser.emitError(loc, "anonymous structs must be complete"); + return {}; + } + + return type; } void StructType::print(mlir::AsmPrinter &printer) const { diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index fc0775af77f4..44c37e83433b 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -22,7 +22,7 @@ void baz(void) { struct Foo f; } -// CHECK-DAG: !ty_22Node22 = !cir.struct +// CHECK-DAG: !ty_22Node22 = !cir.struct // CHECK-DAG: !ty_22Node221 = !cir.struct} #cir.record.decl.ast> // CHECK-DAG: !ty_22Bar22 = !cir.struct // CHECK-DAG: !ty_22Foo22 = !cir.struct diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp index 3672331a90a8..7dcb4c90d6e9 100644 --- a/clang/test/CIR/CodeGen/vbase.cpp +++ b/clang/test/CIR/CodeGen/vbase.cpp @@ -2,6 +2,7 @@ // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM +// XFAIL: * struct A { int a; diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 26d46ec249cf..cabd28d1417e 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -19,10 +19,10 @@ class B : public A }; // Type info B. -// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct, !cir.ptr, !cir.ptr}> +// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct, !cir.ptr, !cir.ptr}> // vtable for A type -// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct x 5>}> +// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct x 5>}> // Class A // CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.record.decl.ast> @@ -58,7 +58,7 @@ class B : public A // CHECK: } // Vtable definition for A -// cir.global "private" external @_ZTV1A : ![[VTableTypeA]] {alignment = 8 : i64} +// CHECK: cir.global "private" external @_ZTV1A : ![[VTableTypeA]] {alignment = 8 : i64} // A ctor => @A::A() // Calls @A::A() and initialize __vptr with address of A's vtable diff --git a/clang/test/CIR/IR/aliases.cir b/clang/test/CIR/IR/aliases.cir index a22c5dba4bcc..8d6cbd04c7a2 100644 --- a/clang/test/CIR/IR/aliases.cir +++ b/clang/test/CIR/IR/aliases.cir @@ -1,10 +1,15 @@ // RUN: cir-opt %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -!s32i = !cir.int module { - // CHECK: cir.func @shouldNotUseAliasWithAnonStruct(%arg0: !cir.struct) - cir.func @shouldNotUseAliasWithAnonStruct(%arg0 : !cir.struct) { + // CHECK: @testAnonRecordsAlias + cir.func @testAnonRecordsAlias() { + // CHECK: cir.alloca !ty_anon_struct, cir.ptr + %0 = cir.alloca !cir.struct}>, cir.ptr }>>, ["A"] + // CHECK: cir.alloca !ty_anon_struct1, cir.ptr + %1 = cir.alloca !cir.struct}>, cir.ptr }>>, ["B"] + // CHECK: cir.alloca !ty_anon_union, cir.ptr + %2 = cir.alloca !cir.struct}>, cir.ptr }>>, ["C"] cir.return } } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index ce7eafd6a1e8..95b70193bf4c 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -548,3 +548,10 @@ module { cir.return } } + + +// ----- + +!u16i = !cir.int +// expected-error@+1 {{anonymous structs must be complete}} +!struct = !cir.struct From 51c9a1d426a0878b4c9f08b71e3cdb061d7a5553 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Wed, 1 Nov 2023 20:43:37 -0300 Subject: [PATCH 1235/2301] [CIR] Forbid identified structs with empty names (#301) There are instances of CIR where anonymous structs are generated as identified structs with an empty name. This patch Adds a verifier for StructType, which ensures structs have a non-empty name. This will be required for properly uniqueing mutable CIR structs. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 1 + clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 28 +++++++++++++++---- clang/test/CIR/IR/global.cir | 4 +-- clang/test/CIR/IR/invalid.cir | 9 ++++-- clang/test/CIR/IR/struct.cir | 4 +-- clang/test/CIR/IR/vtableAttr.cir | 5 ++-- 6 files changed, 37 insertions(+), 14 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 7b0c060cabe9..0e6670bf18c6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -116,6 +116,7 @@ def CIR_StructType : CIR_Type<"Struct", "struct", ); let skipDefaultBuilders = 1; + let genVerifyDecl = 1; let builders = [ // Build an identified and complete struct. TypeBuilder<(ins diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 5afd3761b2e9..e362c1b13942 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -17,10 +17,13 @@ #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Diagnostics.h" #include "mlir/IR/DialectImplementation.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TypeSwitch.h" @@ -155,13 +158,15 @@ Type StructType::parse(mlir::AsmParser &parser) { // Try to create the proper type. mlir::Type type = {}; + ArrayRef membersRef(members); // Needed for template deduction. + const auto eLoc = parser.getEncodedSourceLoc(loc); if (name && incomplete) { // Identified & incomplete - type = StructType::get(context, name, kind); + type = getChecked(eLoc, context, name, kind); } else if (name && !incomplete) { // Identified & complete - type = StructType::get(context, members, name, packed, kind); - } else if (!name && !incomplete) { // anonymous - type = StructType::get(context, members, packed, kind); - } else { + type = getChecked(eLoc, context, membersRef, name, packed, kind); + } else if (!name && !incomplete) { // anonymous & complete + type = getChecked(eLoc, context, membersRef, packed, kind); + } else { // anonymous & incomplete parser.emitError(loc, "anonymous structs must be complete"); return {}; } @@ -206,6 +211,19 @@ void StructType::print(mlir::AsmPrinter &printer) const { printer << '>'; } +mlir::LogicalResult +StructType::verify(llvm::function_ref emitError, + llvm::ArrayRef members, mlir::StringAttr name, + bool incomplete, bool packed, + mlir::cir::StructType::RecordKind kind, + ASTRecordDeclInterface ast) { + if (name && name.getValue().empty()) { + emitError() << "identified structs cannot have an empty name"; + return mlir::failure(); + } + return mlir::success(); +} + //===----------------------------------------------------------------------===// // Data Layout information for types //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 8ee44c5beeb0..ff81597d8171 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -8,7 +8,7 @@ module { cir.global external @a = #cir.int<3> : !s32i cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i] : !cir.array> cir.global external @b = #cir.const_array<"example\00" : !cir.array> - cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.ptr : !cir.ptr}> : !cir.struct}> + cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.ptr : !cir.ptr}> : !cir.struct}> cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} cir.global "private" internal @c : !s32i cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} @@ -31,7 +31,7 @@ module { #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr, #cir.global_view<@type_info_name_B> : !cir.ptr, #cir.global_view<@type_info_A> : !cir.ptr}> - : !cir.struct, !cir.ptr, !cir.ptr}> + : !cir.struct, !cir.ptr, !cir.ptr}> cir.func private @_ZN4InitC1Eb(!cir.ptr, !s8i) cir.func private @_ZN4InitD1Ev(!cir.ptr) cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 95b70193bf4c..9f8e06c8c2ca 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -309,7 +309,7 @@ module { cir.global external @type_info_B = #cir.typeinfo<{ // expected-error {{element at index 0 has type '!cir.ptr>' but return type for this element is '!cir.ptr>'}} #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr}> - : !cir.struct}> + : !cir.struct}> } // expected-error {{'cir.global' expected constant attribute to match type}} // ----- @@ -549,9 +549,14 @@ module { } } - // ----- !u16i = !cir.int // expected-error@+1 {{anonymous structs must be complete}} !struct = !cir.struct + +// ----- + +!u16i = !cir.int +// expected-error@+1 {{identified structs cannot have an empty name}} +!struct = !cir.struct diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index fb25d04533da..45f31014f159 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -6,8 +6,8 @@ !s32i = !cir.int !u32i = !cir.int -!ty_2222 = !cir.struct x 5>}> -!ty_22221 = !cir.struct, !cir.ptr, !cir.ptr}> +!ty_2222 = !cir.struct x 5>}> +!ty_22221 = !cir.struct, !cir.ptr, !cir.ptr}> !ty_22A22 = !cir.struct !ty_22i22 = !cir.struct !ty_22S22 = !cir.struct diff --git a/clang/test/CIR/IR/vtableAttr.cir b/clang/test/CIR/IR/vtableAttr.cir index a9766e36ffe9..f3792517eea4 100644 --- a/clang/test/CIR/IR/vtableAttr.cir +++ b/clang/test/CIR/IR/vtableAttr.cir @@ -1,9 +1,8 @@ // RUN: cir-opt %s | FileCheck %s !u8i = !cir.int -!ty_2222 = !cir.struct x 1>}> module { // Should parse VTable attribute. - cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr]> : !cir.array x 1>}> : !ty_2222 - // CHECK: cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr]> : !cir.array x 1>}> : !ty_2222 + cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr]> : !cir.array x 1>}> : !cir.struct x 1>}> + // CHECK: cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr]> : !cir.array x 1>}> : !ty_anon_struct } From a98d2ac64a9c1a4e3008a7b8a91ea129818d408f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 3 Nov 2023 15:13:59 -0700 Subject: [PATCH 1236/2301] [CIR] Honor disabling the verifier in lowerDirectlyFromCIRToLLVMIR --- clang/include/clang/CIR/LowerToLLVM.h | 3 ++- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 20 ++++++++++++------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 +++- clang/tools/cir-translate/cir-translate.cpp | 3 ++- 4 files changed, 20 insertions(+), 10 deletions(-) diff --git a/clang/include/clang/CIR/LowerToLLVM.h b/clang/include/clang/CIR/LowerToLLVM.h index e3984bd2ce93..88713bf6e07f 100644 --- a/clang/include/clang/CIR/LowerToLLVM.h +++ b/clang/include/clang/CIR/LowerToLLVM.h @@ -31,7 +31,8 @@ namespace cir { namespace direct { std::unique_ptr lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, - llvm::LLVMContext &llvmCtx); + llvm::LLVMContext &llvmCtx, + bool disableVerifier = false); } // Lower directly from pristine CIR to LLVMIR. diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index b248b3fb8deb..4969f5e2afed 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -72,11 +72,14 @@ static std::string sanitizePassOptions(llvm::StringRef o) { namespace cir { -static std::unique_ptr lowerFromCIRToLLVMIR( - const clang::FrontendOptions &feOptions, mlir::ModuleOp mlirMod, - std::unique_ptr mlirCtx, llvm::LLVMContext &llvmCtx) { +static std::unique_ptr +lowerFromCIRToLLVMIR(const clang::FrontendOptions &feOptions, + mlir::ModuleOp mlirMod, + std::unique_ptr mlirCtx, + llvm::LLVMContext &llvmCtx, bool disableVerifier = false) { if (feOptions.ClangIRDirectLowering) - return direct::lowerDirectlyFromCIRToLLVMIR(mlirMod, llvmCtx); + return direct::lowerDirectlyFromCIRToLLVMIR(mlirMod, llvmCtx, + disableVerifier); else return lowerFromCIRToMLIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); } @@ -248,7 +251,8 @@ class CIRGenConsumer : public clang::ASTConsumer { case CIRGenAction::OutputType::EmitLLVM: { llvm::LLVMContext llvmCtx; auto llvmModule = - lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx); + lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx, + feOptions.ClangIRDisableCIRVerifier); llvmModule->setTargetTriple(targetOptions.Triple); @@ -261,7 +265,8 @@ class CIRGenConsumer : public clang::ASTConsumer { case CIRGenAction::OutputType::EmitObj: { llvm::LLVMContext llvmCtx; auto llvmModule = - lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx); + lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx, + feOptions.ClangIRDisableCIRVerifier); llvmModule->setTargetTriple(targetOptions.Triple); emitBackendOutput(compilerInstance, codeGenOptions, @@ -273,7 +278,8 @@ class CIRGenConsumer : public clang::ASTConsumer { case CIRGenAction::OutputType::EmitAssembly: { llvm::LLVMContext llvmCtx; auto llvmModule = - lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx); + lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx, + feOptions.ClangIRDisableCIRVerifier); llvmModule->setTargetTriple(targetOptions.Triple); emitBackendOutput(compilerInstance, codeGenOptions, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 739d4a481f30..907d45e16eda 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2164,7 +2164,8 @@ std::unique_ptr createConvertCIRToLLVMPass() { extern void registerCIRDialectTranslation(mlir::MLIRContext &context); std::unique_ptr -lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx) { +lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx, + bool disableVerifier) { mlir::MLIRContext *mlirCtx = theModule.getContext(); mlir::PassManager pm(mlirCtx); @@ -2180,6 +2181,7 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx) { // emmited and how to properly avoid them. pm.addPass(mlir::createReconcileUnrealizedCastsPass()); + pm.enableVerifier(!disableVerifier); (void)mlir::applyPassManagerCLOptions(pm); auto result = !mlir::failed(pm.run(theModule)); diff --git a/clang/tools/cir-translate/cir-translate.cpp b/clang/tools/cir-translate/cir-translate.cpp index 743f612194f5..9ff379a26588 100644 --- a/clang/tools/cir-translate/cir-translate.cpp +++ b/clang/tools/cir-translate/cir-translate.cpp @@ -26,7 +26,8 @@ namespace direct { extern void registerCIRDialectTranslation(mlir::DialectRegistry ®istry); extern std::unique_ptr lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, - llvm::LLVMContext &llvmCtx); + llvm::LLVMContext &llvmCtx, + bool disableVerifier = false); } // namespace direct } From e6bd0ecdb563579be298d4ae1283f26fef544a0e Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 3 Nov 2023 19:34:37 -0300 Subject: [PATCH 1237/2301] [CIR][IR][NFC] Redefine tablegen CIR StructType with C++ Essentially, this patch redefines the CIR StructType manually instead of using the autogenerated definition from tablegen. This is the first step to make StructType mutable, as this feature is not yet supported by tablegen. It's mostly a copy of the tablegen definition, with a few notable differences: - A few embellishments are added to make the code more dev-friendly - Addition of a CIRTypesDetails.h file to keep custom storage definitions - The CIR_AnyCIRType constraint is removed, as it is not used and must be defined in C++ to ensure StructType is a part of it. ghstack-source-id: 5f706dc0a61a4a2ed6e2f20ab0937b1a42bfa9cc Pull Request resolved: https://github.com/llvm/clangir/pull/302 --- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 119 ++++++++++++++++- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 122 ------------------ .../clang/CIR/Dialect/IR/CIRTypesDetails.h | 83 ++++++++++++ clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 107 +++++++++++++-- 4 files changed, 298 insertions(+), 133 deletions(-) create mode 100644 clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 1286225f04aa..0d15621e9c61 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -20,10 +20,127 @@ #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" //===----------------------------------------------------------------------===// -// CIR Dialect Types +// CIR Dialect Tablegen'd Types //===----------------------------------------------------------------------===// #define GET_TYPEDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsTypes.h.inc" +//===----------------------------------------------------------------------===// +// CIR StructType +// +// The base type for all RecordDecls. +//===----------------------------------------------------------------------===// + +namespace mlir { +namespace cir { + +namespace detail { +struct StructTypeStorage; +} // namespace detail + +/// Each unique clang::RecordDecl is mapped to a `cir.struct` and any object in +/// C/C++ that has a struct type will have a `cir.struct` in CIR. +class StructType + : public Type::TypeBase { + // FIXME(cir): migrate this type to Tablegen once mutable types are supported. +public: + using Base::Base; + using Base::getChecked; + using Base::verifyInvariants; + + static constexpr StringLiteral name = "cir.struct"; + + enum RecordKind : uint32_t { Class, Union, Struct }; + + /// Create a identified and complete struct type. + static StructType get(MLIRContext *context, ArrayRef members, + StringAttr name, bool packed, RecordKind kind, + ASTRecordDeclInterface ast = {}); + static StructType getChecked(function_ref emitError, + MLIRContext *context, ArrayRef members, + StringAttr name, bool packed, RecordKind kind, + ASTRecordDeclInterface ast = {}); + + /// Create a identified and incomplete struct type. + static StructType get(MLIRContext *context, StringAttr name, RecordKind kind); + static StructType getChecked(function_ref emitError, + MLIRContext *context, StringAttr name, + RecordKind kind); + + /// Create a anonymous struct type (always complete). + static StructType get(MLIRContext *context, ArrayRef members, + bool packed, RecordKind kind, + ASTRecordDeclInterface ast = {}); + static StructType getChecked(function_ref emitError, + MLIRContext *context, ArrayRef members, + bool packed, RecordKind kind, + ASTRecordDeclInterface ast = {}); + + /// Validate the struct about to be constructed. + static LogicalResult + verifyInvariants(function_ref emitError, + ArrayRef members, StringAttr name, bool incomplete, + bool packed, StructType::RecordKind kind, + ASTRecordDeclInterface ast); + + // Parse/print methods. + static constexpr StringLiteral getMnemonic() { return {"struct"}; } + static Type parse(AsmParser &odsParser); + void print(AsmPrinter &odsPrinter) const; + + // Accessors + ASTRecordDeclInterface getAst() const; + ArrayRef getMembers() const; + StringAttr getName() const; + StructType::RecordKind getKind() const; + bool getIncomplete() const; + bool getPacked() const; + void dropAst(); + + // Predicates + bool isClass() const { return getKind() == RecordKind::Class; }; + bool isStruct() const { return getKind() == RecordKind::Struct; }; + bool isUnion() const { return getKind() == RecordKind::Union; }; + bool isComplete() const { return !isIncomplete(); }; + bool isIncomplete() const; + + // Utilities + Type getLargestMember(const DataLayout &dataLayout) const; + size_t getNumElements() const { return getMembers().size(); }; + std::string getKindAsStr() { + switch (getKind()) { + case RecordKind::Class: + return "class"; + case RecordKind::Union: + return "union"; + case RecordKind::Struct: + return "struct"; + } + } + std::string getPrefixedName() { + return getKindAsStr() + "." + getName().getValue().str(); + } + + /// DataLayoutTypeInterface methods. + llvm::TypeSize getTypeSizeInBits(const DataLayout &dataLayout, + DataLayoutEntryListRef params) const; + uint64_t getABIAlignment(const DataLayout &dataLayout, + DataLayoutEntryListRef params) const; + uint64_t getPreferredAlignment(const DataLayout &dataLayout, + DataLayoutEntryListRef params) const; + + // Utilities for lazily computing and cacheing data layout info. +private: + mutable Type largestMember{}; + mutable std::optional padded{}; + mutable std::optional size{}, align{}; + bool isPadded(const DataLayout &dataLayout) const; + void computeSizeAndAlignment(const DataLayout &dataLayout) const; +}; + +} // namespace cir +} // namespace mlir + #endif // MLIR_DIALECT_CIR_IR_CIRTYPES_H_ diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 0e6670bf18c6..bcbb63218e22 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -90,121 +90,6 @@ def CIR_BoolType : let hasCustomAssemblyFormat = 1; } -//===----------------------------------------------------------------------===// -// StructType -// -// The base type for all RecordDecls. -// -//===----------------------------------------------------------------------===// - -def CIR_StructType : CIR_Type<"Struct", "struct", - [DeclareTypeInterfaceMethods]> { - - let summary = "CIR struct type"; - let description = [{ - Each unique clang::RecordDecl is mapped to a `cir.struct` and any object in - C/C++ that has a struct type will have a `cir.struct` in CIR. - }]; - - let parameters = (ins - ArrayRefParameter<"mlir::Type", "members">:$members, - "mlir::StringAttr":$name, - "bool":$incomplete, - "bool":$packed, - "mlir::cir::StructType::RecordKind":$kind, - "ASTRecordDeclInterface":$ast - ); - - let skipDefaultBuilders = 1; - let genVerifyDecl = 1; - let builders = [ - // Build an identified and complete struct. - TypeBuilder<(ins - "ArrayRef":$members, - "StringAttr":$name, - "bool":$packed, - "RecordKind":$kind, - CArg<"ASTRecordDeclInterface", "nullptr">:$ast), [{ - return $_get(context, members, name, /*incomplete=*/false, - packed, kind, ast); - }]>, - // Build an incomplete struct. - TypeBuilder<(ins - "StringAttr":$name, - "RecordKind":$kind), [{ - return $_get(context, /*members=*/ArrayRef{}, name, - /*incomplete=*/true, /*packed=*/false, kind, - /*ast=*/nullptr); - }]>, - // Build an anonymous struct. - TypeBuilder<(ins - "ArrayRef":$members, - "bool":$packed, - "RecordKind":$kind, - CArg<"ASTRecordDeclInterface", "nullptr">:$ast), [{ - return $_get(context, members, /*name=*/nullptr, - /*incomplete=*/false, packed, kind, ast); - }]> - ]; - - let hasCustomAssemblyFormat = 1; - - let extraClassDeclaration = [{ - enum RecordKind : uint32_t { - Class, - Union, - Struct - }; - - private: - // All these support lazily computation and storage - // for the struct size and alignment. - mutable std::optional size{}, align{}; - mutable std::optional padded{}; - mutable mlir::Type largestMember{}; - void computeSizeAndAlignment(const ::mlir::DataLayout &dataLayout) const; - public: - void dropAst(); - size_t getNumElements() const { return getMembers().size(); } - bool isIncomplete() const { return getIncomplete(); } - bool isComplete() const { return !getIncomplete(); } - bool isPadded(const ::mlir::DataLayout &dataLayout) const; - - std::string getKindAsStr() { - switch (getKind()) { - case RecordKind::Class: - return "class"; - case RecordKind::Union: - return "union"; - case RecordKind::Struct: - return "struct"; - } - } - - std::string getPrefixedName() { - return getKindAsStr() + "." + getName().getValue().str(); - } - - /// Return the member with the largest bit-length. - mlir::Type getLargestMember(const ::mlir::DataLayout &dataLayout) const; - - /// Return whether this is a class declaration. - bool isClass() const { return getKind() == RecordKind::Class; } - - /// Return whether this is a union declaration. - bool isUnion() const { return getKind() == RecordKind::Union; } - - /// Return whether this is a struct declaration. - bool isStruct() const { return getKind() == RecordKind::Struct; } - }]; - - let extraClassDefinition = [{ - void $cppClass::dropAst() { - getImpl()->ast = nullptr; - } - }]; -} - //===----------------------------------------------------------------------===// // ArrayType //===----------------------------------------------------------------------===// @@ -295,11 +180,4 @@ def CIR_VoidType : CIR_Type<"Void", "void"> { }]; } -//===----------------------------------------------------------------------===// -// One type to bind them all -//===----------------------------------------------------------------------===// - -def CIR_AnyCIRType : AnyTypeOf<[CIR_PointerType, CIR_BoolType, CIR_StructType, - CIR_ArrayType, CIR_FuncType, CIR_VoidType]>; - #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h new file mode 100644 index 000000000000..d33d43c346d5 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h @@ -0,0 +1,83 @@ +//===- CIRTypesDetails.h - Details of CIR dialect types -----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains implementation details, such as storage structures, of +// CIR dialect types. +// +//===----------------------------------------------------------------------===// +#ifndef CIR_DIALECT_IR_CIRTYPESDETAILS_H +#define CIR_DIALECT_IR_CIRTYPESDETAILS_H + +#include "mlir/IR/BuiltinAttributes.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +namespace mlir { +namespace cir { +namespace detail { + +//===----------------------------------------------------------------------===// +// CIR StructTypeStorage +//===----------------------------------------------------------------------===// + +/// Type storage for CIR record types. +struct StructTypeStorage : public TypeStorage { + struct KeyTy { + ArrayRef members; + StringAttr name; + bool incomplete; + bool packed; + StructType::RecordKind kind; + ASTRecordDeclInterface ast; + + KeyTy(ArrayRef members, StringAttr name, bool incomplete, bool packed, + StructType::RecordKind kind, ASTRecordDeclInterface ast) + : members(members), name(name), incomplete(incomplete), packed(packed), + kind(kind), ast(ast) {} + }; + + ArrayRef members; + StringAttr name; + bool incomplete; + bool packed; + StructType::RecordKind kind; + ASTRecordDeclInterface ast; + + StructTypeStorage(ArrayRef members, StringAttr name, bool incomplete, + bool packed, StructType::RecordKind kind, + ASTRecordDeclInterface ast) + : members(members), name(name), incomplete(incomplete), packed(packed), + kind(kind), ast(ast) {} + + KeyTy getAsKey() const { + return KeyTy(members, name, incomplete, packed, kind, ast); + } + + bool operator==(const KeyTy &key) const { + return (members == key.members) && (name == key.name) && + (incomplete == key.incomplete) && (packed == key.packed) && + (kind == key.kind) && (ast == key.ast); + } + + static llvm::hash_code hashKey(const KeyTy &key) { + return hash_combine(key.members, key.name, key.incomplete, key.packed, + key.kind, key.ast); + } + + static StructTypeStorage *construct(TypeStorageAllocator &allocator, + const KeyTy &key) { + return new (allocator.allocate()) + StructTypeStorage(allocator.copyInto(key.members), key.name, + key.incomplete, key.packed, key.kind, key.ast); + } +}; + +} // namespace detail +} // namespace cir +} // namespace mlir + +#endif // CIR_DIALECT_IR_CIRTYPESDETAILS_H diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index e362c1b13942..1283aa49ace8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -13,6 +13,7 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypesDetails.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributes.h" @@ -58,17 +59,36 @@ Type CIRDialect::parseType(DialectAsmParser &parser) const { llvm::SMLoc typeLoc = parser.getCurrentLocation(); StringRef mnemonic; Type genType; + + // Try to parse as a tablegen'd type. OptionalParseResult parseResult = generatedTypeParser(parser, &mnemonic, genType); if (parseResult.has_value()) return genType; - parser.emitError(typeLoc, "unknown type in CIR dialect"); - return Type(); + + // Type is not tablegen'd: try to parse as a raw C++ type. + return StringSwitch>(mnemonic) + .Case("struct", [&] { return StructType::parse(parser); }) + .Default([&] { + parser.emitError(typeLoc) << "unknown CIR type: " << mnemonic; + return Type(); + })(); } void CIRDialect::printType(Type type, DialectAsmPrinter &os) const { - if (failed(generatedTypePrinter(type, os))) - llvm_unreachable("unexpected CIR type kind"); + // Try to print as a tablegen'd type. + if (generatedTypePrinter(type, os).succeeded()) + return; + + // Type is not tablegen'd: try printing as a raw C++ type. + TypeSwitch(type) + .Case([&](StructType type) { + os << type.getMnemonic(); + type.print(os); + }) + .Default([](Type) { + llvm::report_fatal_error("printer is missing a handler for this type"); + }); } Type PointerType::parse(mlir::AsmParser &parser) { @@ -211,12 +231,11 @@ void StructType::print(mlir::AsmPrinter &printer) const { printer << '>'; } -mlir::LogicalResult -StructType::verify(llvm::function_ref emitError, - llvm::ArrayRef members, mlir::StringAttr name, - bool incomplete, bool packed, - mlir::cir::StructType::RecordKind kind, - ASTRecordDeclInterface ast) { +mlir::LogicalResult StructType::verifyInvariants( + llvm::function_ref emitError, + llvm::ArrayRef members, mlir::StringAttr name, bool incomplete, + bool packed, mlir::cir::StructType::RecordKind kind, + ASTRecordDeclInterface ast) { if (name && name.getValue().empty()) { emitError() << "identified structs cannot have an empty name"; return mlir::failure(); @@ -224,6 +243,70 @@ StructType::verify(llvm::function_ref emitError, return mlir::success(); } +void StructType::dropAst() { getImpl()->ast = nullptr; } +StructType StructType::get(::mlir::MLIRContext *context, ArrayRef members, + StringAttr name, bool packed, RecordKind kind, + ASTRecordDeclInterface ast) { + return Base::get(context, members, name, /*incomplete=*/false, packed, kind, + ast); +} + +StructType StructType::getChecked( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::MLIRContext *context, ArrayRef members, StringAttr name, + bool packed, RecordKind kind, ASTRecordDeclInterface ast) { + return Base::getChecked(emitError, context, members, name, + /*incomplete=*/false, packed, kind, ast); +} + +StructType StructType::get(::mlir::MLIRContext *context, StringAttr name, + RecordKind kind) { + return Base::get(context, /*members=*/ArrayRef{}, name, + /*incomplete=*/true, /*packed=*/false, kind, + /*ast=*/ASTRecordDeclInterface{}); +} + +StructType StructType::getChecked( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::MLIRContext *context, StringAttr name, RecordKind kind) { + return Base::getChecked(emitError, context, ArrayRef{}, name, + /*incomplete=*/true, /*packed=*/false, kind, + ASTRecordDeclInterface{}); +} + +StructType StructType::get(::mlir::MLIRContext *context, ArrayRef members, + bool packed, RecordKind kind, + ASTRecordDeclInterface ast) { + return Base::get(context, members, StringAttr{}, /*incomplete=*/false, packed, + kind, ast); +} + +StructType StructType::getChecked( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::MLIRContext *context, ArrayRef members, bool packed, + RecordKind kind, ASTRecordDeclInterface ast) { + return Base::getChecked(emitError, context, members, StringAttr{}, + /*incomplete=*/false, packed, kind, ast); +} + +::llvm::ArrayRef StructType::getMembers() const { + return getImpl()->members; +} + +bool StructType::isIncomplete() const { return getImpl()->incomplete; } + +mlir::StringAttr StructType::getName() const { return getImpl()->name; } + +bool StructType::getIncomplete() const { return getImpl()->incomplete; } + +bool StructType::getPacked() const { return getImpl()->packed; } + +mlir::cir::StructType::RecordKind StructType::getKind() const { + return getImpl()->kind; +} + +ASTRecordDeclInterface StructType::getAst() const { return getImpl()->ast; } + //===----------------------------------------------------------------------===// // Data Layout information for types //===----------------------------------------------------------------------===// @@ -535,8 +618,12 @@ bool FuncType::isVoid() const { return getReturnType().isa(); } //===----------------------------------------------------------------------===// void CIRDialect::registerTypes() { + // Register tablegen'd types. addTypes< #define GET_TYPEDEF_LIST #include "clang/CIR/Dialect/IR/CIROpsTypes.cpp.inc" >(); + + // Register raw C++ types. + addTypes(); } From fef536d2ec9ed6f833987833c995c372cae12ef6 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Fri, 3 Nov 2023 19:34:37 -0300 Subject: [PATCH 1238/2301] [CIR][CIRGen] Support mutable and recursive named records This allows a named StructType to be mutated after it has been created, if it is identified and incomplete. The motivation for this is to improve the codegen of CIR in certain scenarios where an incomplete type is used and later completed. These usually leave the IR in an inconsistent state, where there are two records types with the same identifier but different definitions (one complete the other incomplete). For example: ```c++ struct Node { Node *next; }; void test(struct Node n) {} ``` Generates: ```mlir !temp_struct = !cir.struct !full_struct = !cir.struct}> ``` To generate the `Node` struct type, its members must be created first. However, the `next` member is a recursive reference, so it can only be completed after its parent. This generates a temporary incomplete definition of the `Node` type that remains in the code even after the type to which it refers is completed. As a consequence, accessing the `next` member of a `Node` value fetches the old incomplete version of the type which affects CIR's type-checking capabilities. This patch ensures that, once the parent is fully visited, the `next` member can be completed in place, automatically updating any references to it at a low cost. To represent recursive types, the StructType now is equipped with self-references. These are represented by a `cir.struct` type with just the name of the parent struct that it refers to. The same snippet of code will not generate the following CIR IR: ```mlir !full_struct = !cir.struct>}> ``` Summary of the changes made: - Named records are now uniquely identified by their name. An attempt to create a new record with the same will fail. - Anonymous records are uniquely identified by members and other relevant attributes. - StructType has a new `mutate` method that allows it to be mutated after it has been created. Each type can only be mutated if it is identified and incomplete, rendering further changes impossible. - When building a new name StructType, the builder will try to first create, then complete the type, ensuring that: - Inexistent types are created - Existing incomplete types are completed - Existing complete types with matching attributes are reused - Existing complete types with different attributes raise errors - StructType now uses the CyclicParser/Printer guard to avoid infinite recursion and identify when it should print/parse a self-reference. ghstack-source-id: a6d4f650515cbf2d7f6e27d45aae6f768ba44f92 Pull Request resolved: https://github.com/llvm/clangir/pull/303 --- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 40 +++++- .../clang/CIR/Dialect/IR/CIRTypesDetails.h | 36 ++++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 16 ++- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 50 ++++++- clang/test/CIR/CodeGen/bitfields.cpp | 1 - clang/test/CIR/CodeGen/derived-to-base.cpp | 4 +- clang/test/CIR/CodeGen/dtors.cpp | 4 +- clang/test/CIR/CodeGen/forward-decls.cpp | 125 ++++++++++++++++++ clang/test/CIR/CodeGen/struct.c | 5 +- clang/test/CIR/CodeGen/struct.cpp | 2 +- clang/test/CIR/CodeGen/union.cpp | 1 + clang/test/CIR/CodeGen/vtable-rtti.cpp | 8 +- clang/test/CIR/IR/invalid.cir | 10 ++ clang/test/CIR/IR/struct.cir | 9 ++ 15 files changed, 289 insertions(+), 24 deletions(-) create mode 100644 clang/test/CIR/CodeGen/forward-decls.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 0d15621e9c61..40a3486e12ee 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -41,9 +41,43 @@ struct StructTypeStorage; /// Each unique clang::RecordDecl is mapped to a `cir.struct` and any object in /// C/C++ that has a struct type will have a `cir.struct` in CIR. +/// +/// There are three possible formats for this type: +/// +/// - Identified and complete structs: unique name and a known body. +/// - Identified and incomplete structs: unique name and unkonwn body. +/// - Anonymous structs: no name and a known body. +/// +/// Identified structs are uniqued by their name, and anonymous structs are +/// uniqued by their body. This means that two anonymous structs with the same +/// body will be the same type, and two identified structs with the same name +/// will be the same type. Attempting to build a struct with a existing name, +/// but a different body will result in an error. +/// +/// A few examples: +/// +/// ```mlir +/// !complete = !cir.struct}> +/// !incomplete = !cir.struct +/// !anonymous = !cir.struct}> +/// ``` +/// +/// Incomplete structs are mutable, meaning the can be later completed with a +/// body automatically updating in place every type in the code that uses the +/// incomplete struct. Mutability allows for recursive types to be represented, +/// meaning the struct can have members that refer to itself. This is useful for +/// representing recursive records and is implemented through a special syntax. +/// In the example below, the `Node` struct has a member that is a pointer to a +/// `Node` struct: +/// +/// ```mlir +/// !struct = !cir.struct>}> +/// ``` class StructType : public Type::TypeBase { + DataLayoutTypeInterface::Trait, + TypeTrait::IsMutable> { // FIXME(cir): migrate this type to Tablegen once mutable types are supported. public: using Base::Base; @@ -123,6 +157,10 @@ class StructType return getKindAsStr() + "." + getName().getValue().str(); } + /// Complete the struct type by mutating its members and attributes. + void complete(ArrayRef members, bool packed, + ASTRecordDeclInterface ast = {}); + /// DataLayoutTypeInterface methods. llvm::TypeSize getTypeSizeInBits(const DataLayout &dataLayout, DataLayoutEntryListRef params) const; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h index d33d43c346d5..ae9e97ce3cab 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h @@ -14,7 +14,9 @@ #define CIR_DIALECT_IR_CIRTYPESDETAILS_H #include "mlir/IR/BuiltinAttributes.h" +#include "mlir/Support/LogicalResult.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/ADT/Hashing.h" namespace mlir { namespace cir { @@ -58,14 +60,18 @@ struct StructTypeStorage : public TypeStorage { } bool operator==(const KeyTy &key) const { + if (name) + return (name == key.name) && (kind == key.kind); return (members == key.members) && (name == key.name) && (incomplete == key.incomplete) && (packed == key.packed) && (kind == key.kind) && (ast == key.ast); } static llvm::hash_code hashKey(const KeyTy &key) { - return hash_combine(key.members, key.name, key.incomplete, key.packed, - key.kind, key.ast); + if (key.name) + return llvm::hash_combine(key.name, key.kind); + return llvm::hash_combine(key.members, key.incomplete, key.packed, key.kind, + key.ast); } static StructTypeStorage *construct(TypeStorageAllocator &allocator, @@ -74,6 +80,32 @@ struct StructTypeStorage : public TypeStorage { StructTypeStorage(allocator.copyInto(key.members), key.name, key.incomplete, key.packed, key.kind, key.ast); } + + /// Mutates the members and attributes an identified struct. + /// + /// Once a record is mutated, it is marked as complete, preventing further + /// mutations. Anonymous structs are always complete and cannot be mutated. + /// This method does not fail if a mutation of a complete struct does not + /// change the struct. + LogicalResult mutate(TypeStorageAllocator &allocator, ArrayRef members, + bool packed, ASTRecordDeclInterface ast) { + // Anonymous structs cannot mutate. + if (!name) + return failure(); + + // Mutation of complete structs are allowed if they change nothing. + if (!incomplete) + return mlir::success((this->members == members) && + (this->packed == packed) && (this->ast == ast)); + + // Mutate incomplete struct. + this->members = allocator.copyInto(members); + this->packed = packed; + this->ast = ast; + + incomplete = false; + return success(); + } }; } // namespace detail diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 8b7a27aab27a..b4c80b23c9c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -435,6 +435,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } /// Get a CIR named struct type. + /// + /// If a struct already exists and is complete, but the client tries to fetch + /// it with a different set of attributes, this method will crash. mlir::cir::StructType getCompleteStructTy(llvm::ArrayRef members, llvm::StringRef name, bool packed, const clang::RecordDecl *ast) { @@ -445,8 +448,16 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { astAttr = getAttr(ast); kind = getRecordKind(ast->getTagKind()); } - return getType(members, nameAttr, packed, kind, - astAttr); + + // Create or get the struct. + auto type = getType(members, nameAttr, packed, kind, + astAttr); + + // Complete an incomplete struct or ensure the existing complete struct + // matches the requested attributes. + type.complete(members, packed, astAttr); + + return type; } // @@ -689,7 +700,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { auto flag = getBool(val, loc); return create(loc, flag, dst); } - }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 18be250d0fc6..9b6fdf484438 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -596,7 +596,7 @@ std::unique_ptr CIRGenTypes::computeRecordLayout(const RecordDecl *D, mlir::cir::StructType *Ty) { CIRRecordLowering builder(*this, D, /*packed=*/false); - + assert(Ty->isIncomplete() && "recomputing record layout?"); builder.lower(/*nonVirtualBaseType=*/false); // If we're in C++, compute the base subobject type. diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 1283aa49ace8..9ebdac1ae903 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -128,7 +128,9 @@ Type StructType::getLargestMember(const ::mlir::DataLayout &dataLayout) const { } Type StructType::parse(mlir::AsmParser &parser) { + FailureOr cyclicParseGuard; const auto loc = parser.getCurrentLocation(); + const auto eLoc = parser.getEncodedSourceLoc(loc); bool packed = false; RecordKind kind; auto *context = parser.getContext(); @@ -152,6 +154,26 @@ Type StructType::parse(mlir::AsmParser &parser) { mlir::StringAttr name; parser.parseOptionalAttribute(name); + // Is a self reference: ensure referenced type was parsed. + if (name && parser.parseOptionalGreater().succeeded()) { + auto type = getChecked(eLoc, context, name, kind); + if (succeeded(parser.tryStartCyclicParse(type))) { + parser.emitError(loc, "invalid self-reference within record"); + return {}; + } + return type; + } + + // Is a named record definition: ensure name has not been parsed yet. + if (name) { + auto type = getChecked(eLoc, context, name, kind); + cyclicParseGuard = parser.tryStartCyclicParse(type); + if (failed(cyclicParseGuard)) { + parser.emitError(loc, "record already defined"); + return {}; + } + } + if (parser.parseOptionalKeyword("packed").succeeded()) packed = true; @@ -176,14 +198,17 @@ Type StructType::parse(mlir::AsmParser &parser) { if (parser.parseGreater()) return {}; - // Try to create the proper type. - mlir::Type type = {}; + // Try to create the proper record type. ArrayRef membersRef(members); // Needed for template deduction. - const auto eLoc = parser.getEncodedSourceLoc(loc); + mlir::Type type = {}; if (name && incomplete) { // Identified & incomplete type = getChecked(eLoc, context, name, kind); } else if (name && !incomplete) { // Identified & complete type = getChecked(eLoc, context, membersRef, name, packed, kind); + // If the record has a self-reference, its type already exists in a + // incomplete state. In this case, we must complete it. + if (type.cast().isIncomplete()) + type.cast().complete(membersRef, packed, ast); } else if (!name && !incomplete) { // anonymous & complete type = getChecked(eLoc, context, membersRef, packed, kind); } else { // anonymous & incomplete @@ -195,6 +220,7 @@ Type StructType::parse(mlir::AsmParser &parser) { } void StructType::print(mlir::AsmPrinter &printer) const { + FailureOr cyclicPrintGuard; printer << '<'; switch (getKind()) { @@ -210,7 +236,17 @@ void StructType::print(mlir::AsmPrinter &printer) const { } if (getName()) - printer << getName() << " "; + printer << getName(); + + // Current type has already been printed: print as self reference. + cyclicPrintGuard = printer.tryStartCyclicPrint(*this); + if (failed(cyclicPrintGuard)) { + printer << '>'; + return; + } + + // Type not yet printed: continue printing the entire record. + printer << ' '; if (getPacked()) printer << "packed "; @@ -307,6 +343,12 @@ mlir::cir::StructType::RecordKind StructType::getKind() const { ASTRecordDeclInterface StructType::getAst() const { return getImpl()->ast; } +void StructType::complete(ArrayRef members, bool packed, + ASTRecordDeclInterface ast) { + if (mutate(members, packed, ast).failed()) + llvm_unreachable("failed to complete struct"); +} + //===----------------------------------------------------------------------===// // Data Layout information for types //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index af086c111204..d9514a0ce2bf 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -27,7 +27,6 @@ typedef struct { int a : 3; // one bitfield with size < 8 unsigned b; } T; - // CHECK: !ty_22S22 = !cir.struct // CHECK: !ty_22T22 = !cir.struct // CHECK: !ty_22anon2E122 = !cir.struct diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 7eeb41e17555..b6794b6d970e 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -75,8 +75,8 @@ void C3::Layer::Initialize() { } } -// CHECK-DAG: !ty_22C23A3ALayer22 = !cir.struct -// CHECK-DAG: !ty_22C33A3ALayer22 = !cir.struct>>} #cir.record.decl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast> // Class B -// CHECK: ![[ClassB:ty_.*]] = !cir.struct +// CHECK: ![[ClassB:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast>}> // CHECK: cir.func @_Z4bluev() // CHECK: %0 = cir.alloca !ty_22PSEvent22, cir.ptr , ["p", init] {alignment = 8 : i64} diff --git a/clang/test/CIR/CodeGen/forward-decls.cpp b/clang/test/CIR/CodeGen/forward-decls.cpp new file mode 100644 index 000000000000..a67807a540bb --- /dev/null +++ b/clang/test/CIR/CodeGen/forward-decls.cpp @@ -0,0 +1,125 @@ +// RUN: split-file %s %t + + +//--- incomplete_struct + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %t/incomplete_struct -o %t/incomplete_struct.cir +// RUN: FileCheck %s --input-file=%t/incomplete_struct.cir --check-prefix=CHECK1 + +// Forward declaration of the record is never defined, so it is created as +// an incomplete struct in CIR and will remain as such. + +// CHECK1: ![[INC_STRUCT:.+]] = !cir.struct +struct IncompleteStruct; +// CHECK1: testIncompleteStruct(%arg0: !cir.ptr +void testIncompleteStruct(struct IncompleteStruct *s) {}; + + + +//--- mutated_struct + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %t/mutated_struct -o %t/mutated_struct.cir +// RUN: FileCheck %s --input-file=%t/mutated_struct.cir --check-prefix=CHECK2 + +// Foward declaration of the struct is followed by usage, then definition. +// This means it will initially be created as incomplete, then completed. + +// CHECK2: ![[COMPLETE:.+]] = !cir.struct +// CHECK2: testForwardDeclaredStruct(%arg0: !cir.ptr +struct ForwardDeclaredStruct; +void testForwardDeclaredStruct(struct ForwardDeclaredStruct *fds) {}; +struct ForwardDeclaredStruct { + int testVal; +}; + + + +//--- recursive_struct + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %t/recursive_struct -o %t/recursive_struct.cir +// RUN: FileCheck --check-prefix=CHECK3 --input-file=%t/recursive_struct.cir %s + +// Struct is initially forward declared since the self-reference is generated +// first. Then, once the type is fully generated, it is completed. + +// CHECK3: ![[STRUCT:.+]] = !cir.struct>} #cir.record.decl.ast> +struct RecursiveStruct { + int value; + struct RecursiveStruct *next; +}; +// CHECK3: testRecursiveStruct(%arg0: !cir.ptr +void testRecursiveStruct(struct RecursiveStruct *arg) { + // CHECK3: %[[#NEXT:]] = cir.get_member %{{.+}}[1] {name = "next"} : !cir.ptr -> !cir.ptr> + // CHECK3: %[[#DEREF:]] = cir.load %[[#NEXT]] : cir.ptr >, !cir.ptr + // CHECK3: cir.get_member %[[#DEREF]][0] {name = "value"} : !cir.ptr -> !cir.ptr + arg->next->value; +} + + + +//--- indirect_recursive_struct + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %t/indirect_recursive_struct -o %t/indirect_recursive_struct.cir +// RUN: FileCheck --check-prefix=CHECK4 --input-file=%t/indirect_recursive_struct.cir %s + +// Node B refers to A, and vice-versa, so a forward declaration is used to +// ensure the classes can be defined. Since types alias are not yet supported +// in recursive type, each struct is expanded until there are no more recursive +// types, or all the recursive types are self references. + +// CHECK4: ![[B:.+]] = !cir.struct>} +// CHECK4: ![[A:.+]] = !cir.struct}> +struct StructNodeB; +struct StructNodeA { + int value; + struct StructNodeB *next; +}; +struct StructNodeB { + int value; + struct StructNodeA *next; +}; + +void testIndirectSelfReference(struct StructNodeA arg) { + // CHECK4: %[[#V1:]] = cir.get_member %{{.+}}[1] {name = "next"} : !cir.ptr -> !cir.ptr> + // CHECK4: %[[#V2:]] = cir.load %[[#V1]] : cir.ptr >, !cir.ptr + // CHECK4: %[[#V3:]] = cir.get_member %[[#V2]][1] {name = "next"} : !cir.ptr -> !cir.ptr> + // CHECK4: %[[#V4:]] = cir.load %[[#V3]] : cir.ptr >, !cir.ptr + // CHECK4: cir.get_member %[[#V4]][0] {name = "value"} : !cir.ptr -> !cir.ptr + arg.next->next->value; +} + + + +//--- complex_struct + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %t/complex_struct -o %t/complex_struct.cir +// RUN: FileCheck --check-prefix=CHECK5 --input-file=%t/complex_struct.cir %s + +// A sizeable complex struct just to double check that stuff is working. + +// CHECK5: !cir.struct, !cir.struct>, !cir.struct>, !cir.ptr>, !cir.ptr>} #cir.record.decl.ast>, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !cir.struct, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>, !cir.struct, !cir.struct} #cir.record.decl.ast>>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !ty_22C22, !cir.struct} #cir.record.decl.ast>>, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !ty_22C22, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>, !ty_22anon2E422} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !ty_22C22, !ty_22anon2E522} #cir.record.decl.ast> +// CHECK5: !cir.struct +struct A { + struct { + struct A *a1; + }; + struct B { + struct B *b1; + struct C { + struct A *a2; + struct B *b2; + struct C *c1; + } c; + union { + struct A *a2; + struct { + struct B *b3; + }; + } u; + } b; +}; +void test(struct A *a){}; diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 44c37e83433b..5039e8abb8fd 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -22,8 +22,7 @@ void baz(void) { struct Foo f; } -// CHECK-DAG: !ty_22Node22 = !cir.struct -// CHECK-DAG: !ty_22Node221 = !cir.struct} #cir.record.decl.ast> +// CHECK-DAG: !ty_22Node22 = !cir.struct>} #cir.record.decl.ast> // CHECK-DAG: !ty_22Bar22 = !cir.struct // CHECK-DAG: !ty_22Foo22 = !cir.struct // CHECK-DAG: module {{.*}} { @@ -96,7 +95,7 @@ void local_decl(void) { } // CHECK-DAG: cir.func @useRecursiveType -// CHECK-DAG: cir.get_member {{%.}}[0] {name = "next"} : !cir.ptr -> !cir.ptr> +// CHECK-DAG: cir.get_member {{%.}}[0] {name = "next"} : !cir.ptr -> !cir.ptr> void useRecursiveType(NodeStru* a) { a->next = 0; } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index cbd106f77754..a4df34c3bd8d 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -26,7 +26,7 @@ void baz() { struct incomplete; void yoyo(incomplete *i) {} -// CHECK-DAG-DAG: !ty_22incomplete22 = !cir.struct // CHECK-DAG: !ty_22Foo22 = !cir.struct diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index cc75106e659f..b8bb4d0fc5be 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -8,6 +8,7 @@ typedef union { yolo y; struct { bool life; int genpad; }; } yolm3; // CHECK-DAG: !ty_22U23A3ADummy22 = !cir.struct // CHECK-DAG: !ty_22anon2E522 = !cir.struct +// CHECK-DAG: !ty_22anon2E122 = !cir.struct // CHECK-DAG: !ty_22yolo22 = !cir.struct // CHECK-DAG: !ty_22anon2E322 = !cir.struct, !s32i} #cir.record.decl.ast> diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index cabd28d1417e..f4410aed0d24 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -19,16 +19,16 @@ class B : public A }; // Type info B. -// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct, !cir.ptr, !cir.ptr}> +// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct>, !cir.ptr>, !cir.ptr>}> // vtable for A type -// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct x 5>}> +// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct> x 5>}> // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.record.decl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast> // Class B -// CHECK: ![[ClassB:ty_.*]] = !cir.struct +// CHECK: ![[ClassB:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast>}> // B ctor => @B::B() // Calls @A::A() and initialize __vptr with address of B's vtable. diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 9f8e06c8c2ca..e40d2d4aab96 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -560,3 +560,13 @@ module { !u16i = !cir.int // expected-error@+1 {{identified structs cannot have an empty name}} !struct = !cir.struct + +// ----- + +// expected-error@+1 {{invalid self-reference within record}} +!struct = !cir.struct}> + +// ----- + +// expected-error@+1 {{record already defined}} +!struct = !cir.struct}> diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index 45f31014f159..65a319538d1a 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -13,7 +13,16 @@ !ty_22S22 = !cir.struct !ty_22S122 = !cir.struct +// Test recursive struct parsing/printing. +!ty_22Node22 = !cir.struct>} #cir.record.decl.ast> +// CHECK-DAG: !cir.struct>} #cir.record.decl.ast> + module { + // Dummy function to use types and force them to be printed. + cir.func @useTypes(%arg0: !ty_22Node22) { + cir.return + } + cir.func @structs() { %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["i", init] From 75067a99a4b34ce3c9134b5858b1e53f23627d06 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Wed, 8 Nov 2023 12:27:30 -0800 Subject: [PATCH 1239/2301] [CIR][NFC] Refactor ScalarExprEmitter::buildScalarCast (#306) Matrix types are already checked for in `buildScalarConversion`, so they don't need to be checked for again in `buildScalarCast`. Not having to worry about matrix types means the `Element` local variables are no longer necessary. Remove duplicate code by having a variable to store the `CastKind`, and have only one call to `Builder.create`. There are no test changes, because this is refactoring only. There should be no functional changes. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 70 ++++++++++------------ 1 file changed, 31 insertions(+), 39 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index e1cc84141a64..50703e5a0cde 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -922,7 +922,9 @@ class ScalarExprEmitter : public StmtVisitor { } if (SrcType->isMatrixType() && DstType->isMatrixType()) - llvm_unreachable("not implemented"); + llvm_unreachable("NYI: matrix type to matrix type conversion"); + assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && + "Internal error: conversion between matrix type and scalar type"); // TODO(CIR): Support VectorTypes @@ -1614,52 +1616,40 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { llvm_unreachable("destination type for negation unary operator is NYI"); } +// Conversion from bool, integral, or floating-point to integral or +// floating-point. Conversions involving other types are handled elsewhere. +// Conversion to bool is handled elsewhere because that's a comparison against +// zero, not a simple cast. mlir::Value ScalarExprEmitter::buildScalarCast( mlir::Value Src, QualType SrcType, QualType DstType, mlir::Type SrcTy, mlir::Type DstTy, ScalarConversionOpts Opts) { - // The Element types determine the type of cast to perform. - mlir::Type SrcElementTy; - mlir::Type DstElementTy; - QualType SrcElementType; - QualType DstElementType; - if (SrcType->isMatrixType() && DstType->isMatrixType()) { - llvm_unreachable("NYI"); - } else { - assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && - "cannot cast between matrix and non-matrix types"); - SrcElementTy = SrcTy; - DstElementTy = DstTy; - SrcElementType = SrcType; - DstElementType = DstType; - } - - if (SrcElementTy.isa() || - DstElementTy.isa()) + assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && + "Internal error: matrix types not handled by this function."); + if (SrcTy.isa() || DstTy.isa()) llvm_unreachable("Obsolete code. Don't use mlir::IntegerType with CIR."); - if (SrcElementType->isBooleanType()) { + std::optional CastKind; + + if (SrcType->isBooleanType()) { if (Opts.TreatBooleanAsSigned) llvm_unreachable("NYI: signed bool"); - if (CGF.getBuilder().isInt(DstElementTy)) { - return Builder.create( - Src.getLoc(), DstTy, mlir::cir::CastKind::bool_to_int, Src); + if (CGF.getBuilder().isInt(DstTy)) { + CastKind = mlir::cir::CastKind::bool_to_int; } else if (DstTy.isa()) { llvm_unreachable("NYI: bool->float cast"); } else { - llvm_unreachable("Unexpected destination type for scalar cast"); + llvm_unreachable("Internal error: Cast to unexpected type"); } - } else if (CGF.getBuilder().isInt(SrcElementTy)) { - if (CGF.getBuilder().isInt(DstElementTy)) { - return Builder.create( - Src.getLoc(), DstTy, mlir::cir::CastKind::integral, Src); - } else if (DstElementTy.isa()) { - return Builder.create( - Src.getLoc(), DstTy, mlir::cir::CastKind::int_to_float, Src); + } else if (CGF.getBuilder().isInt(SrcTy)) { + if (CGF.getBuilder().isInt(DstTy)) { + CastKind = mlir::cir::CastKind::integral; + } else if (DstTy.isa()) { + CastKind = mlir::cir::CastKind::int_to_float; } else { - llvm_unreachable("Unexpected destination type for scalar cast"); + llvm_unreachable("Internal error: Cast to unexpected type"); } - } else if (SrcElementTy.isa()) { - if (CGF.getBuilder().isInt(DstElementTy)) { + } else if (SrcTy.isa()) { + if (CGF.getBuilder().isInt(DstTy)) { // If we can't recognize overflow as undefined behavior, assume that // overflow saturates. This protects against normal optimizations if we // are compiling with non-standard FP semantics. @@ -1667,17 +1657,19 @@ mlir::Value ScalarExprEmitter::buildScalarCast( llvm_unreachable("NYI"); if (Builder.getIsFPConstrained()) llvm_unreachable("NYI"); - return Builder.create( - Src.getLoc(), DstTy, mlir::cir::CastKind::float_to_int, Src); - } else if (DstElementTy.isa()) { + CastKind = mlir::cir::CastKind::float_to_int; + } else if (DstTy.isa()) { // TODO: split this to createFPExt/createFPTrunc return Builder.createFloatingCast(Src, DstTy); } else { - llvm_unreachable("Unexpected destination type for scalar cast"); + llvm_unreachable("Internal error: Cast to unexpected type"); } } else { - llvm_unreachable("Unexpected source type for scalar cast"); + llvm_unreachable("Internal error: Cast from unexpected type"); } + + assert(CastKind.has_value() && "Internal error: CastKind not set."); + return Builder.create(Src.getLoc(), DstTy, *CastKind, Src); } LValue From 68534a56b2001197069c95a053310ba243b5294e Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 14 Nov 2023 19:57:05 +0300 Subject: [PATCH 1240/2301] [CIR][CodeGen][Bugfix] fixes explicit cast in initialization (#309) The PR fixes a var initialization with explicit cast. --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 3 +-- clang/test/CIR/CodeGen/constptr.c | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 55d4c2baaf93..77a08848c222 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -9,7 +9,6 @@ // This contains code to emit Constant Expr nodes as LLVM code. // //===----------------------------------------------------------------------===// - #include "Address.h" #include "CIRDataLayout.h" #include "CIRGenCstEmitter.h" @@ -705,7 +704,7 @@ class ConstExprEmitter mlir::Attribute VisitCastExpr(CastExpr *E, QualType destType) { if (const auto *ECE = dyn_cast(E)) - llvm_unreachable("NYI"); + CGM.buildExplicitCastExprType(ECE, Emitter.CGF); Expr *subExpr = E->getSubExpr(); switch (E->getCastKind()) { diff --git a/clang/test/CIR/CodeGen/constptr.c b/clang/test/CIR/CodeGen/constptr.c index 7f01cb854c6b..b400cb8c444f 100644 --- a/clang/test/CIR/CodeGen/constptr.c +++ b/clang/test/CIR/CodeGen/constptr.c @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM -// XFAIL: * int *p = (int*)0x1234; From de6489c388b2a66f160572d3e898a8b15a989d31 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Wed, 15 Nov 2023 13:52:05 -0800 Subject: [PATCH 1241/2301] [CIR] Support bool-to-float conversions (#307) Add a new entry to enum `CastKind`, `bool_to_float`, since none of the existing enum values adequately covered that conversion. Add code to code gen, CIR validation, LLVM lowering, and the cast test to cover this conversion. Fix ClangIR issue #290 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 ++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 10 +++++++++- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 8 ++++++++ clang/test/CIR/CodeGen/cast.cpp | 5 ++++- clang/test/CIR/IR/invalid.cir | 14 ++++++++++++++ 6 files changed, 38 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 09f793403b2b..71f00d4e299f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -55,6 +55,7 @@ def CK_PointerToIntegral : I32EnumAttrCase<"ptr_to_int", 9>; def CK_FloatToBoolean : I32EnumAttrCase<"float_to_bool", 10>; def CK_BooleanToIntegral : I32EnumAttrCase<"bool_to_int", 11>; def CK_IntegralToFloat : I32EnumAttrCase<"int_to_float", 12>; +def CK_BooleanToFloat : I32EnumAttrCase<"bool_to_float", 13>; def CastKind : I32EnumAttr< "CastKind", @@ -62,7 +63,7 @@ def CastKind : I32EnumAttr< [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast, CK_BitCast, CK_FloatingCast, CK_PtrToBoolean, CK_FloatToIntegral, CK_IntegralToPointer, CK_PointerToIntegral, CK_FloatToBoolean, - CK_BooleanToIntegral, CK_IntegralToFloat]> { + CK_BooleanToIntegral, CK_IntegralToFloat, CK_BooleanToFloat]> { let cppNamespace = "::mlir::cir"; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 50703e5a0cde..312bef00fc41 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1636,7 +1636,7 @@ mlir::Value ScalarExprEmitter::buildScalarCast( if (CGF.getBuilder().isInt(DstTy)) { CastKind = mlir::cir::CastKind::bool_to_int; } else if (DstTy.isa()) { - llvm_unreachable("NYI: bool->float cast"); + CastKind = mlir::cir::CastKind::bool_to_float; } else { llvm_unreachable("Internal error: Cast to unexpected type"); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index daf5b71d5502..29181e4dbe54 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -343,13 +343,21 @@ LogicalResult CastOp::verify() { return emitOpError() << "requires !cir.int for result"; return success(); } - case cir::CastKind::int_to_float: + case cir::CastKind::int_to_float: { if (!srcType.isa()) return emitOpError() << "requires !cir.int for source"; if (!resType.isa()) return emitOpError() << "requires !cir.float for result"; return success(); } + case cir::CastKind::bool_to_float: { + if (!srcType.isa()) + return emitOpError() << "requires !cir.bool for source"; + if (!resType.isa()) + return emitOpError() << "requires !cir.float for result"; + return success(); + } + } llvm_unreachable("Unknown CastOp kind?"); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 907d45e16eda..d9f9dbed8d5e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -648,6 +648,14 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } + case mlir::cir::CastKind::bool_to_float: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } case mlir::cir::CastKind::int_to_float: { auto dstTy = castOp.getType(); auto llvmSrcVal = adaptor.getOperands().front(); diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index b7f4c8538a61..b6fc4e7d959b 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -68,11 +68,14 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4, double x5) { unsigned fptoui = (unsigned)x3; // Floating point to unsigned integer // CHECK: %{{.+}} = cir.cast(float_to_int, %{{[0-9]+}} : f32), !u32i - bool ib = (bool)x1; // No checking, because this isn't a cast. + bool ib = (bool)x1; // No checking, because this isn't a regular cast. int bi = (int)ib; // bool to int // CHECK: %{{[0-9]+}} = cir.cast(bool_to_int, %{{[0-9]+}} : !cir.bool), !s32i + float bf = (float)ib; // bool to float + // CHECK: %{{[0-9]+}} = cir.cast(bool_to_float, %{{[0-9]+}} : !cir.bool), f32 + float dptofp = (float)x5; // CHECK: %{{.+}} = cir.cast(floating, %{{[0-9]+}} : f64), f32 diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index e40d2d4aab96..54b6b16d5644 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -159,6 +159,20 @@ cir.func @cast4(%p: !cir.ptr) { // ----- +cir.func @cast5(%p: f32) { + %2 = cir.cast(bool_to_float, %p : f32), f32 // expected-error {{requires !cir.bool for source}} + cir.return +} + +// ----- + +cir.func @cast6(%p: !cir.bool) { + %2 = cir.cast(bool_to_float, %p : !cir.bool), !cir.int // expected-error {{requires !cir.float for result}} + cir.return +} + +// ----- + #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool cir.func @b0() { From 12def754d37985c6a1ec5959217ff1a8c5494b13 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Wed, 15 Nov 2023 15:36:17 -0800 Subject: [PATCH 1242/2301] [CIR] Add validation tests for scalar casts (#317) Fix a couple typos in the validation failure messages for scalar casts --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 +- clang/test/CIR/IR/invalid.cir | 143 +++++++++++++++++++++++- 2 files changed, 144 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 29181e4dbe54..726516732c6e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -264,14 +264,14 @@ LogicalResult CastOp::verify() { if (!resType.isa()) return emitOpError() << "requires !cir.bool type for result"; if (!srcType.isa()) - return emitOpError() << "requires integral type for result"; + return emitOpError() << "requires integral type for source"; return success(); } case cir::CastKind::ptr_to_bool: { if (!resType.isa()) return emitOpError() << "requires !cir.bool type for result"; if (!srcType.isa()) - return emitOpError() << "requires pointer type for result"; + return emitOpError() << "requires pointer type for source"; return success(); } case cir::CastKind::integral: { diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 54b6b16d5644..989cd36b787d 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -128,7 +128,7 @@ cir.func @cast0(%arg0: !u32i) { // ----- cir.func @cast1(%arg1: f32) { - %1 = cir.cast(int_to_bool, %arg1 : f32), !cir.bool // expected-error {{requires integral type for result}} + %1 = cir.cast(int_to_bool, %arg1 : f32), !cir.bool // expected-error {{requires integral type for source}} cir.return } @@ -173,6 +173,147 @@ cir.func @cast6(%p: !cir.bool) { // ----- +!u32i = !cir.int +cir.func @cast7(%p: !cir.ptr) { + %2 = cir.cast(ptr_to_bool, %p : !cir.ptr), !u32i // expected-error {{requires !cir.bool type for result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast8(%p: !u32i) { + %2 = cir.cast(ptr_to_bool, %p : !u32i), !cir.bool // expected-error {{requires pointer type for source}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast9(%p : !u32i) { + %2 = cir.cast(integral, %p : !u32i), f32 // expected-error {{requires !IntegerType for result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast10(%p : f32) { + %2 = cir.cast(integral, %p : f32), !u32i // expected-error {{requires !IntegerType for source}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast11(%p : f32) { + %2 = cir.cast(floating, %p : f32), !u32i // expected-error {{requries floating for source and result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast12(%p : !u32i) { + %2 = cir.cast(floating, %p : !u32i), f32 // expected-error {{requries floating for source and result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast13(%p : !u32i) { + %2 = cir.cast(float_to_int, %p : !u32i), !u32i // expected-error {{requires floating for source}} + cir.return +} + +// ----- + +cir.func @cast14(%p : f32) { + %2 = cir.cast(float_to_int, %p : f32), f32 // expected-error {{requires !IntegerType for result}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @cast15(%p : !cir.ptr) { + %2 = cir.cast(int_to_ptr, %p : !cir.ptr), !cir.ptr // expected-error {{requires integer for source}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @cast16(%p : !u64i) { + %2 = cir.cast(int_to_ptr, %p : !u64i), !u64i // expected-error {{requires pointer for result}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @cast17(%p : !u64i) { + %2 = cir.cast(ptr_to_int, %p : !u64i), !u64i // expected-error {{requires pointer for source}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @cast18(%p : !cir.ptr) { + %2 = cir.cast(ptr_to_int, %p : !cir.ptr), !cir.ptr // expected-error {{requires integer for result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast19(%p : !u32i) { + %2 = cir.cast(float_to_bool, %p : !u32i), !cir.bool // expected-error {{requires float for source}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast20(%p : f32) { + %2 = cir.cast(float_to_bool, %p : f32), !u32i // expected-error {{requires !cir.bool for result}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast21(%p : !u32i) { + %2 = cir.cast(bool_to_int, %p : !u32i), !u32i // expected-error {{requires !cir.bool for source}} + cir.return +} + +// ----- + +cir.func @cast22(%p : !cir.bool) { + %2 = cir.cast(bool_to_int, %p : !cir.bool), f32 // expected-error {{requires !cir.int for result}} + cir.return +} + +// ----- + +cir.func @cast23(%p : !cir.bool) { + %2 = cir.cast(int_to_float, %p : !cir.bool), f32 // expected-error {{requires !cir.int for source}} + cir.return +} + +// ----- + +!u32i = !cir.int +cir.func @cast24(%p : !u32i) { + %2 = cir.cast(int_to_float, %p : !u32i), !cir.bool // expected-error {{requires !cir.float for result}} + cir.return +} + +// ----- + #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool cir.func @b0() { From aefc069d2a2bfe8105534f793c344fa3b40710c9 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Fri, 17 Nov 2023 15:17:22 -0800 Subject: [PATCH 1243/2301] [CIR] Fix bool-to-pointer conversions (#319) Conversions from an integer to a pointer are implemented in CIR code gen as an integral conversion to uintptr_t followed by the integral-to-pointer conversion. Conversions from bool to pointer were following the same code path. But bool-to-int is a different CastKind than int-to-int in CIR, and CIR was failing validation. Fix the integer to pointer conversion code to correctly handle a source type of bool. (A conversion from bool to pointer makes no sense and should never happen in the real world. But it is legal due to bool being sort of an integral type. So we need to support it.) Also, in `ScalarExprEmitter::buildScalarConversion` change a couple not-yet-implemented messages about pointer types into assertion failures. Conversions involving pointer types should never go through `ScalarExprEmitter::buildScalarConversion`. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 22 ++++++++++------------ clang/test/CIR/CodeGen/cast.cpp | 4 ++++ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 312bef00fc41..fd3528b763dc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -898,17 +898,9 @@ class ScalarExprEmitter : public StmtVisitor { return Src; } - // Handle pointer conversions next: pointers can only be converted to/from - // other pointers and integers. - if (DstTy.isa<::mlir::cir::PointerType>()) { - llvm_unreachable("not implemented"); - } - - if (SrcTy.isa<::mlir::cir::PointerType>()) { - // Must be a ptr to int cast. - assert(CGF.getBuilder().isInt(DstTy) && "not ptr->int?"); - llvm_unreachable("not implemented"); - } + assert(!SrcTy.isa<::mlir::cir::PointerType>() && + !DstTy.isa<::mlir::cir::PointerType>() && + "Internal error: pointer conversions are handled elsewhere"); // A scalar can be splatted to an extended vector of the same element type if (DstType->isExtVectorType() && !SrcType->isVectorType()) { @@ -1439,8 +1431,14 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { mlir::Value Src = Visit(const_cast(E)); // Properly resize by casting to an int of the same size as the pointer. + // Clang's IntegralToPointer includes 'bool' as the source, but in CIR + // 'bool' is not an integral type. So check the source type to get the + // correct CIR conversion. auto MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestCIRTy); - auto MiddleVal = Builder.createIntCast(Src, MiddleTy); + auto MiddleVal = Builder.createCast(E->getType()->isBooleanType() + ? mlir::cir::CastKind::bool_to_int + : mlir::cir::CastKind::integral, + Src, MiddleTy); if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) llvm_unreachable("NYI"); diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index b6fc4e7d959b..d1b6be12b15b 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -76,6 +76,10 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4, double x5) { float bf = (float)ib; // bool to float // CHECK: %{{[0-9]+}} = cir.cast(bool_to_float, %{{[0-9]+}} : !cir.bool), f32 + void* bpv = (void*)ib; // bool to pointer, which is done in two steps + // CHECK: %[[TMP:[0-9]+]] = cir.cast(bool_to_int, %{{[0-9]+}} : !cir.bool), !u64i + // CHECK: %{{[0-9]+}} = cir.cast(int_to_ptr, %[[TMP]] : !u64i), !cir.ptr + float dptofp = (float)x5; // CHECK: %{{.+}} = cir.cast(floating, %{{[0-9]+}} : f64), f32 From 063d6ef45b52f3b6e65c13cbf7c8c45bc3e39332 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Sat, 18 Nov 2023 03:14:56 +0300 Subject: [PATCH 1244/2301] [CIR][CIRGen] Ensure proper tmp location for agg exprs (#320) This PR fixes a bug with wrong arguments passed into the call to `buildAnyExpr`. This function has args with default values, hence the bug occurred. All the changes are even with the clang's original codegen. For the reference, the LLVM IR code for `agg-init.cpp::usev()` function looks like the following: ``` define dso_local void @_Z3usev() #0 { entry: %agg.tmp.ensured = alloca %struct.yep_, align 4 %Status = getelementptr inbounds %struct.yep_, ptr %agg.tmp.ensured, i32 0, i32 0 store i32 0, ptr %Status, align 4 %HC = getelementptr inbounds %struct.yep_, ptr %agg.tmp.ensured, i32 0, i32 1 store i32 0, ptr %HC, align 4 ret void } ``` --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 13 +++++++------ clang/test/CIR/CodeGen/agg-init.cpp | 2 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 14 +++++++------- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 2ee4431438df..2c2eb9470344 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1205,7 +1205,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, /// Emit code to compute the specified expression, ignoring the result. void CIRGenFunction::buildIgnoredExpr(const Expr *E) { if (E->isPRValue()) - return (void)buildAnyExpr(E); + return (void)buildAnyExpr(E, AggValueSlot::ignored(), true); // Just emit it as an l-value and drop the result. buildLValue(E); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index a67eb88b9976..c1ec0c9f6690 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -43,9 +43,9 @@ class AggExprEmitter : public StmtVisitor { void withReturnValueSlot(const Expr *E, llvm::function_ref Fn); - AggValueSlot EnsureSlot(QualType T) { - assert(!Dest.isIgnored() && "ignored slots NYI"); - return Dest; + AggValueSlot EnsureSlot(mlir::Location loc, QualType T) { + if (!Dest.isIgnored()) return Dest; + return CGF.CreateAggTemp(T, loc, "agg.tmp.ensured"); } void EnsureDest(mlir::Location loc, QualType T) { @@ -504,7 +504,7 @@ void AggExprEmitter::VisitMaterializeTemporaryExpr( } void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { - AggValueSlot Slot = EnsureSlot(E->getType()); + AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); CGF.buildCXXConstructExpr(E, Slot); } @@ -526,7 +526,7 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { CIRGenFunction::SourceLocRAIIObject loc{CGF, CGF.getLoc(E->getSourceRange())}; - AggValueSlot Slot = EnsureSlot(E->getType()); + AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); LLVM_ATTRIBUTE_UNUSED LValue SlotLV = CGF.makeAddrLValue(Slot.getAddress(), E->getType()); @@ -754,7 +754,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( } #endif - AggValueSlot Dest = EnsureSlot(ExprToVisit->getType()); + AggValueSlot Dest = EnsureSlot(CGF.getLoc(ExprToVisit->getSourceRange()), + ExprToVisit->getType()); LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), ExprToVisit->getType()); diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index 0e72da619cea..198ac6004d7e 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -17,7 +17,7 @@ typedef struct yep_ { void use() { yop{}; } // CHECK: cir.func @_Z3usev() -// CHECK: %0 = cir.alloca !ty_22yep_22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} +// CHECK: %0 = cir.alloca !ty_22yep_22, cir.ptr , ["agg.tmp.ensured"] {alignment = 4 : i64} // CHECK: %1 = cir.get_member %0[0] {name = "Status"} : !cir.ptr -> !cir.ptr // CHECK: %2 = cir.const(#cir.int<0> : !u32i) : !u32i // CHECK: cir.store %2, %1 : !u32i, cir.ptr diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index f4410aed0d24..0389a0493301 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -48,14 +48,14 @@ class B : public A // foo - zero initialize object B and call ctor (@B::B()) // // CHECK: cir.func @_Z3foov() -// CHECK: %0 = cir.alloca ![[ClassB]], cir.ptr , ["agg.tmp0"] {alignment = 8 : i64} -// CHECK: cir.scope { -// CHECK: %1 = cir.const(#cir.zero : ![[ClassB]]) : ![[ClassB]] -// CHECK: cir.store %1, %0 : ![[ClassB]], cir.ptr -// CHECK: cir.call @_ZN1BC2Ev(%0) : (!cir.ptr) -> () -// CHECK: } -// CHECK: cir.return +// CHECK: cir.scope { +// CHECK: %0 = cir.alloca !ty_22B22, cir.ptr , ["agg.tmp.ensured"] {alignment = 8 : i64} +// CHECK: %1 = cir.const(#cir.zero : ![[ClassB]]) : ![[ClassB]] +// CHECK: cir.store %1, %0 : ![[ClassB]], cir.ptr +// CHECK: cir.call @_ZN1BC2Ev(%0) : (!cir.ptr) -> () // CHECK: } +// CHECK: cir.return +// CHECK: } // Vtable definition for A // CHECK: cir.global "private" external @_ZTV1A : ![[VTableTypeA]] {alignment = 8 : i64} From 2568e2b2132721f5302ea4ac03d5ada6e6661e03 Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Mon, 20 Nov 2023 22:20:53 +0300 Subject: [PATCH 1245/2301] [CIR][CodeGen] Support global variable offsets in initializers (gh-299). (#305) This PR adds proper handling for address offsets in global initializers as e.g. in ``` int val[10]; int *addr = &val[1]; ``` (such offsets are ignored on current trunk). I'm not proud of this patch because it performs an ugly conversion from byte offset, produced by `APValue::getLValueOffset`, to a sequence of CIR indices. Alternative suggestions are welcomed. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 42 +++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 32 ++++++++++++++--- clang/test/CIR/CodeGen/globals.c | 3 ++ 3 files changed, 72 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index b4c80b23c9c9..55b578a6646e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -10,6 +10,7 @@ #define LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H #include "Address.h" +#include "CIRDataLayout.h" #include "CIRGenTypeCache.h" #include "UnimplementedFeatureGuarding.h" @@ -700,6 +701,47 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { auto flag = getBool(val, loc); return create(loc, flag, dst); } + + // Convert byte offset to sequence of high-level indices suitable for + // GlobalViewAttr. Ideally we shouldn't deal with low-level offsets at all + // but currently some parts of Clang AST, which we don't want to touch just + // yet, return them. + void computeGlobalViewIndicesFromFlatOffset( + int64_t Offset, mlir::Type Ty, CIRDataLayout Layout, + llvm::SmallVectorImpl &Indices) { + if (!Offset) + return; + + mlir::Type SubType; + + if (auto ArrayTy = Ty.dyn_cast()) { + auto EltSize = Layout.getTypeAllocSize(ArrayTy.getEltType()); + Indices.push_back(Offset / EltSize); + SubType = ArrayTy.getEltType(); + Offset %= EltSize; + } else if (auto PtrTy = Ty.dyn_cast()) { + auto EltSize = Layout.getTypeAllocSize(PtrTy.getPointee()); + Indices.push_back(Offset / EltSize); + SubType = PtrTy.getPointee(); + Offset %= EltSize; + } else if (auto StructTy = Ty.dyn_cast()) { + auto Elts = StructTy.getMembers(); + for (size_t I = 0; I < Elts.size(); ++I) { + auto EltSize = Layout.getTypeAllocSize(Elts[I]); + if (Offset < EltSize) { + Indices.push_back(I); + SubType = Elts[I]; + break; + } + Offset -= EltSize; + } + } else { + llvm_unreachable("unexpected type"); + } + + assert(SubType); + computeGlobalViewIndicesFromFlatOffset(Offset, SubType, Layout, Indices); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 77a08848c222..f2db6408eef5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1007,10 +1007,10 @@ struct ConstantLValue { /*implicit*/ ConstantLValue(mlir::Value value, bool hasOffsetApplied = false) : Value(value), HasOffsetApplied(hasOffsetApplied) {} - /*implicit*/ ConstantLValue(mlir::cir::GlobalViewAttr address) : Value(address) {} + /*implicit*/ ConstantLValue(mlir::cir::GlobalViewAttr address) + : Value(address), HasOffsetApplied(false) {} ConstantLValue(std::nullptr_t) : ConstantLValue({}, false) {} - ConstantLValue(mlir::Attribute value) : Value(value) {} }; /// A helper class for emitting constant l-values. @@ -1052,8 +1052,22 @@ class ConstantLValueEmitter bool hasNonZeroOffset() const { return !Value.getLValueOffset().isZero(); } - /// Return the value offset. - mlir::Attribute getOffset() { llvm_unreachable("NYI"); } + /// Return GEP-like value offset + mlir::ArrayAttr getOffset(mlir::Type Ty) { + auto Offset = Value.getLValueOffset().getQuantity(); + CIRDataLayout Layout(CGM.getModule()); + SmallVector Idx; + CGM.getBuilder().computeGlobalViewIndicesFromFlatOffset(Offset, Ty, Layout, + Idx); + + llvm::SmallVector Indices; + for (auto I : Idx) { + auto Attr = mlir::cir::IntAttr::get(CGM.getBuilder().getSInt64Ty(), I); + Indices.push_back(Attr); + } + + return CGM.getBuilder().getArrayAttr(Indices); + } // TODO(cir): create a proper interface to absctract CIR constant values. @@ -1062,6 +1076,14 @@ class ConstantLValueEmitter if (!hasNonZeroOffset()) return C; + if (auto Attr = C.Value.dyn_cast()) { + auto GV = cast(Attr); + assert(!GV.getIndices()); + + return mlir::cir::GlobalViewAttr::get( + GV.getType(), GV.getSymbol(), getOffset(GV.getType())); + } + // TODO(cir): use ptr_stride, or something... llvm_unreachable("NYI"); } @@ -1097,7 +1119,7 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { return {}; // Apply the offset if necessary and not already done. - if (!result.HasOffsetApplied && !value.is()) { + if (!result.HasOffsetApplied) { value = applyOffset(result).Value; } diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 5e5428045a3e..a6b9309dbad6 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -47,6 +47,9 @@ struct { } nestedStringPtr = {"1"}; // CHECK: cir.global external @nestedStringPtr = #cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr}> +int *globalPtr = &nestedString.y[1]; +// CHECK: cir.global external @globalPtr = #cir.global_view<@nestedString, [#cir.int<0> : !s64i, #cir.int<1> : !s64i, #cir.int<1> : !s64i]> + // TODO: test tentatives with internal linkage. // Tentative definition is THE definition. Should be zero-initialized. From d96d7fde3747e0f53097538f8ea4c70c8db3b196 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Mon, 20 Nov 2023 22:24:43 +0300 Subject: [PATCH 1246/2301] [CIR][CIRGen][Lowering] supports functions pointers (#316) This PR adds a support of the function pointers in CIR. From the implementation point of view, we emit an address of a function as a `GlobalViewAttr`. --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 10 +++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 ++ clang/test/CIR/CodeGen/fun-ptr.c | 47 +++++++++++++++++++ 3 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/fun-ptr.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index f2db6408eef5..79621c31e3dd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1155,8 +1155,14 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) { if (D->hasAttr()) llvm_unreachable("emit pointer base for weakref is NYI"); - if (auto *FD = dyn_cast(D)) - llvm_unreachable("emit pointer base for fun decl is NYI"); + if (auto *FD = dyn_cast(D)) { + auto fop = CGM.GetAddrOfFunction(FD); + auto builder = CGM.getBuilder(); + auto ctxt = builder.getContext(); + return mlir::cir::GlobalViewAttr::get( + builder.getPointerTo(fop.getFunctionType()), + mlir::FlatSymbolRefAttr::get(ctxt, fop.getSymNameAttr())); + } if (auto *VD = dyn_cast(D)) { // We can never refer to a variable with local storage. diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d9f9dbed8d5e..5e4a3d6eb260 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -236,6 +236,9 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } else if (auto cirSymbol = dyn_cast(sourceSymbol)) { sourceType = converter->convertType(cirSymbol.getSymType()); symName = cirSymbol.getSymName(); + } else if (auto llvmFun = dyn_cast(sourceSymbol)) { + sourceType = llvmFun.getFunctionType(); + symName = llvmFun.getSymName(); } else { llvm_unreachable("Unexpected GlobalOp type"); } diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c new file mode 100644 index 000000000000..d9d4a7809bc2 --- /dev/null +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -0,0 +1,47 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -x c++ -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM +// RUN: %clang_cc1 -x c++ -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM +// XFAIL: * + +typedef struct { + int a; + int b; +} Data; + +typedef int (*fun_t)(Data* d); + +int extract_a(Data* d) { + return d->a; +} + +// CIR: cir.func {{@.*foo.*}}(%arg0: !cir.ptr +// CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["d", init] +// CIR: [[TMP1:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] +// CIR: [[TMP2:%.*]] = cir.alloca !cir.ptr)>>, cir.ptr )>>>, ["f", init] +// CIR: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CIR: [[TMP3:%.*]] = cir.const(#cir.ptr : !cir.ptr)>>) : !cir.ptr)>> +// CIR: cir.store [[TMP3]], [[TMP2]] : !cir.ptr)>>, cir.ptr )>>> +// CIR: [[TMP4:%.*]] = cir.get_global {{@.*extract_a.*}} : cir.ptr )>> +// CIR: cir.store [[TMP4]], [[TMP2]] : !cir.ptr)>>, cir.ptr )>>> +// CIR: [[TMP5:%.*]] = cir.load [[TMP2]] : cir.ptr )>>>, !cir.ptr)>> +// CIR: [[TMP6:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CIR: [[TMP7:%.*]] = cir.call [[TMP5]]([[TMP6]]) : (!cir.ptr)>>, !cir.ptr) -> !s32i +// CIR: cir.store [[TMP7]], [[TMP1]] : !s32i, cir.ptr + +// LLVM: define i32 {{@.*foo.*}}(ptr %0) +// LLVM: [[TMP1:%.*]] = alloca ptr, i64 1 +// LLVM: [[TMP2:%.*]] = alloca i32, i64 1 +// LLVM: [[TMP3:%.*]] = alloca ptr, i64 1 +// LLVM: store ptr %0, ptr [[TMP1]] +// LLVM: store ptr null, ptr [[TMP3]] +// LLVM: store ptr {{@.*extract_a.*}}, ptr [[TMP3]] +// LLVM: [[TMP4:%.*]] = load ptr, ptr [[TMP3]] +// LLVM: [[TMP5:%.*]] = load ptr, ptr [[TMP1]] +// LLVM: [[TMP6:%.*]] = call i32 [[TMP4]](ptr [[TMP5]]) +// LLVM: store i32 [[TMP6]], ptr [[TMP2]] +int foo(Data* d) { + fun_t f = 0; + f = extract_a; + return f(d); +} From 7fc7e54f335b58b4e12f386148629f3c8275edc4 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Mon, 20 Nov 2023 22:25:34 +0300 Subject: [PATCH 1247/2301] [CIR][Lowering] fix lowering for the structs inited with zeros (#315) Basically that is, the next code should work now ``` typedef struct { int a; int b; } A; ... A a = {0, 0}; ``` --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 12 +++++++++++- clang/test/CIR/Lowering/struct-init.c | 12 ++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/struct-init.c diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5e4a3d6eb260..72eca4f76c76 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -9,7 +9,6 @@ // This file implements lowering of CIR operations to LLVMIR. // //===----------------------------------------------------------------------===// - #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" @@ -1076,6 +1075,17 @@ class CIRConstantLowering rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); return mlir::success(); + } else if (auto strTy = op.getType().dyn_cast()) { + if (auto zero = op.getValue().dyn_cast()) { + auto initVal = + lowerCirAttrAsValue(op, zero, rewriter, typeConverter); + rewriter.replaceAllUsesWith(op, initVal); + rewriter.eraseOp(op); + return mlir::success(); + } + + return op.emitError() + << "unsupported lowering for struct constant type " << op.getType(); } else return op.emitError() << "unsupported constant type " << op.getType(); diff --git a/clang/test/CIR/Lowering/struct-init.c b/clang/test/CIR/Lowering/struct-init.c new file mode 100644 index 000000000000..3c94cf9d5f50 --- /dev/null +++ b/clang/test/CIR/Lowering/struct-init.c @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +struct S { + int x; +}; + +// LLVM: define void @zeroInit +// LLVM: [[TMP0:%.*]] = alloca %struct.S, i64 1 +// LLVM: store %struct.S zeroinitializer, ptr [[TMP0]] +void zeroInit() { + struct S s = {0}; +} From a12ee5003c67eaaa571406c4dcfbb50369344a7e Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 22 Nov 2023 22:43:10 +0300 Subject: [PATCH 1248/2301] [CIR][Codegen] Fixes function ptrs in recursive types (#328) Since recursive types were perfectly fixed, we can safely remove the assert that prevented functons types generation for the case of incomplete types. The test is added - just to show that everything is ok for such kind of functions. --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 5 ----- clang/test/CIR/CodeGen/fun-ptr.c | 10 ++++++++++ 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index ceae82729cc1..d52cd0f3380f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -260,11 +260,6 @@ mlir::Type CIRGenTypes::ConvertFunctionTypeInternal(QualType QFT) { // the function type. assert(isFuncTypeConvertible(FT) && "NYI"); - // While we're converting the parameter types for a function, we don't want to - // recursively convert any pointed-to structs. Converting directly-used - // structs is ok though. - assert(RecordsBeingLaidOut.insert(Ty).second && "NYI"); - // The function type can be built; call the appropriate routines to build it const CIRGenFunctionInfo *FI; if (const auto *FPT = dyn_cast(FT)) { diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c index d9d4a7809bc2..9e0681881c77 100644 --- a/clang/test/CIR/CodeGen/fun-ptr.c +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -11,6 +11,16 @@ typedef struct { typedef int (*fun_t)(Data* d); +struct A; +typedef int (*fun_typ)(struct A*); + +typedef struct A { + fun_typ fun; +} A; + +// CIR: !ty_22A22 = !cir.struct (!cir.ptr>)>>} #cir.record.decl.ast> +A a = {(fun_typ)0}; + int extract_a(Data* d) { return d->a; } From 2aa389b4ca5435b6206e7d51df8e88f644fdbbcb Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 22 Nov 2023 22:54:17 +0300 Subject: [PATCH 1249/2301] [CIR][IR] Harden get_member verifier (#330) I think it's time to claim that CIR supports recursive types (many thanks to #303 and to @sitio-couto :) ) And we can bring back the `get_member` verification back, with no checks for incomplete types. What do you think? And we can close #256 as well --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 19 +------------------ clang/test/CIR/IR/getmember.cir | 7 ------- 2 files changed, 1 insertion(+), 25 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 726516732c6e..8fcc8c472ac2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2423,14 +2423,6 @@ LogicalResult MemCpyOp::verify() { return mlir::success(); } -static bool isIncompleteType(mlir::Type typ) { - if (auto ptr = typ.dyn_cast()) - return isIncompleteType(ptr.getPointee()); - else if (auto rec = typ.dyn_cast()) - return rec.isIncomplete(); - return false; -} - //===----------------------------------------------------------------------===// // GetMemberOp Definitions //===----------------------------------------------------------------------===// @@ -2441,21 +2433,12 @@ LogicalResult GetMemberOp::verify() { if (!recordTy) return emitError() << "expected pointer to a record type"; - // FIXME: currently we bypass typechecking of incomplete types due to errors - // in the codegen process. This should be removed once the codegen is fixed. - if (isIncompleteType(recordTy)) - return mlir::success(); - if (recordTy.getMembers().size() <= getIndex()) return emitError() << "member index out of bounds"; // FIXME(cir): member type check is disabled for classes as the codegen for // these still need to be patched. - // Also we bypass the typechecking for the fields of incomplete types. - bool shouldSkipMemberTypeMismatch = - recordTy.isClass() || isIncompleteType(recordTy.getMembers()[getIndex()]); - - if (!shouldSkipMemberTypeMismatch + if (!recordTy.isClass() && recordTy.getMembers()[getIndex()] != getResultTy().getPointee()) return emitError() << "member type mismatch"; diff --git a/clang/test/CIR/IR/getmember.cir b/clang/test/CIR/IR/getmember.cir index 932e4a5b29f5..5bfd8f24d161 100644 --- a/clang/test/CIR/IR/getmember.cir +++ b/clang/test/CIR/IR/getmember.cir @@ -15,13 +15,6 @@ module { cir.return } - // FIXME: remove bypass once codegen for CIR records is patched. - cir.func @shouldBypassMemberIndexCheckForIncompleteRecords(%arg0 : !cir.ptr) { - // CHECK: cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr - %0 = cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr - cir.return - } - // FIXME: remove bypass once codegen for CIR class records is patched. cir.func @shouldBypassMemberTypeCheckForClassRecords(%arg0 : !cir.ptr) { // CHECK: cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr> From a92025b7c1420cc8a6d55c7d05dcd6b1d2433642 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 22 Nov 2023 23:00:15 +0300 Subject: [PATCH 1250/2301] [CIR][CodeGen] Support incomplete arrays (#333) Just a minor fix with for incomplete arrays + minor refactoring --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 14 ++++++++++++++ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 25 ++++++++++++------------- clang/test/CIR/CodeGen/array.c | 12 ++++++++++++ 3 files changed, 38 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 55b578a6646e..503f0797ae93 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -461,6 +461,20 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return type; } + mlir::cir::ArrayType getArrayType(mlir::Type eltType, unsigned size) { + return mlir::cir::ArrayType::get(getContext(), eltType, size); + } + + bool isSized(mlir::Type ty) { + if (ty.isIntOrFloat() || + ty.isa()) + return true; + assert(0 && "Unimplemented size for type"); + return false; + } + // // Constant creation helpers // ------------------------- diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index d52cd0f3380f..d5cfa41b94d7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -615,27 +615,26 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { break; } case Type::IncompleteArray: { - assert(0 && "not implemented"); + const IncompleteArrayType *A = cast(Ty); + assert(A->getIndexTypeCVRQualifiers() == 0 && + "FIXME: We only handle trivial array types so far!"); + // int X[] -> [0 x int], unless the element type is not sized. If it is + // unsized (e.g. an incomplete struct) just use [0 x i8]. + ResultType = convertTypeForMem(A->getElementType()); + if (!Builder.isSized(ResultType)) { + SkippedLayout = true; + ResultType = Builder.getUInt8Ty(); + } + ResultType = Builder.getArrayType(ResultType, 0); break; } case Type::ConstantArray: { const ConstantArrayType *A = cast(Ty); auto EltTy = convertTypeForMem(A->getElementType()); - // FIXME(cir): add a `isSized` method to CIRGenBuilder. - auto isSized = [&](mlir::Type ty) { - if (ty.isIntOrFloat() || - ty.isa()) - return true; - assert(0 && "not implemented"); - return false; - }; - // FIXME: In LLVM, "lower arrays of undefined struct type to arrays of // i8 just to have a concrete type". Not sure this makes sense in CIR yet. - assert(isSized(EltTy) && "not implemented"); + assert(Builder.isSized(EltTy) && "not implemented"); ResultType = ::mlir::cir::ArrayType::get(Builder.getContext(), EltTy, A->getSize().getZExtValue()); break; diff --git a/clang/test/CIR/CodeGen/array.c b/clang/test/CIR/CodeGen/array.c index 1bf556f65973..22c9b906fde8 100644 --- a/clang/test/CIR/CodeGen/array.c +++ b/clang/test/CIR/CodeGen/array.c @@ -6,3 +6,15 @@ struct S { int i; } arr[3] = {{1}}; // CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S22, #cir.zero : !ty_22S22, #cir.zero : !ty_22S22]> : !cir.array + +int a[4]; +int (*ptr_a)[] = &a; +// CHECK: cir.global external @a = #cir.zero : !cir.array +// CHECK: cir.global external @ptr_a = #cir.global_view<@a> : !cir.ptr> + +extern int foo[]; +// CHECK: cir.global "private" external @foo : !cir.array + +void useFoo(int i) { + foo[i] = 42; +} \ No newline at end of file From 18150c7f6f7edffa8c4bd8cfc696d47379957584 Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Wed, 22 Nov 2023 23:02:46 +0300 Subject: [PATCH 1251/2301] [CIR][CodeGen][Lowering] Support global variables under -fcommon. (#334) --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 11 +++++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 1 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 28 ++++++------------- clang/test/CIR/CodeGen/no-common.c | 16 +++++++++++ clang/test/CIR/Lowering/globals.cir | 2 ++ 5 files changed, 38 insertions(+), 20 deletions(-) create mode 100644 clang/test/CIR/CodeGen/no-common.c diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index fd8fba099cd0..8c92085c396a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -979,7 +979,15 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, assert(!UnimplementedFeature::setDLLStorageClass()); if (Linkage == mlir::cir::GlobalLinkageKind::CommonLinkage) { - llvm_unreachable("common linkage is NYI"); + // common vars aren't constant even if declared const. + GV.setConstant(false); + // Tentative definition of global variables may be initialized with + // non-zero null pointers. In this case they should have weak linkage + // since common linkage must have zero initializer and must not have + // explicit section therefore cannot have non-zero initial value. + auto Initializer = GV.getInitialValue(); + if (Initializer && !getBuilder().isNullValue(*Initializer)) + GV.setLinkage(mlir::cir::GlobalLinkageKind::WeakAnyLinkage); } // TODO(cir): setNonAliasAttributes(D, GV); @@ -1414,6 +1422,7 @@ mlir::SymbolTable::Visibility CIRGenModule::getMLIRVisibilityFromCIRLinkage( case mlir::cir::GlobalLinkageKind::ExternalWeakLinkage: case mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage: case mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage: + case mlir::cir::GlobalLinkageKind::CommonLinkage: return mlir::SymbolTable::Visibility::Public; default: { llvm::errs() << "visibility not implemented for '" diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 8fcc8c472ac2..fa91f7c46978 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1491,6 +1491,7 @@ LogicalResult GlobalOp::verify() { case GlobalLinkageKind::ExternalWeakLinkage: case GlobalLinkageKind::LinkOnceODRLinkage: case GlobalLinkageKind::LinkOnceAnyLinkage: + case GlobalLinkageKind::CommonLinkage: // FIXME: mlir's concept of visibility gets tricky with LLVM ones, // for instance, symbol declarations cannot be "public", so we // have to mark them "private" to workaround the symbol verifier. diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 72eca4f76c76..ac109baf16e5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -13,20 +13,15 @@ #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" -#include "mlir/Conversion/LLVMCommon/ConversionTarget.h" #include "mlir/Conversion/LLVMCommon/TypeConverter.h" #include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h" #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" -#include "mlir/Dialect/Affine/IR/AffineOps.h" -#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMAttrs.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/Dialect/LLVMIR/Transforms/Passes.h" -#include "mlir/Dialect/SCF/IR/SCF.h" -#include "mlir/Dialect/SCF/Transforms/Passes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" @@ -34,7 +29,6 @@ #include "mlir/IR/BuiltinDialect.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" -#include "mlir/IR/IRMapping.h" #include "mlir/IR/Operation.h" #include "mlir/IR/Types.h" #include "mlir/IR/Value.h" @@ -56,11 +50,9 @@ #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/STLExtras.h" -#include "llvm/ADT/Sequence.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/IR/DataLayout.h" -#include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" @@ -102,12 +94,11 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ConstPtrAttr ptrAttr, if (ptrAttr.isNullValue()) { return rewriter.create( loc, converter->convertType(ptrAttr.getType())); - } else { - mlir::Value ptrVal = rewriter.create( - loc, rewriter.getI64Type(), ptrAttr.getValue()); - return rewriter.create( - loc, converter->convertType(ptrAttr.getType()), ptrVal); } + mlir::Value ptrVal = rewriter.create( + loc, rewriter.getI64Type(), ptrAttr.getValue()); + return rewriter.create( + loc, converter->convertType(ptrAttr.getType()), ptrVal); } /// FloatAttr visitor. @@ -227,7 +218,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, auto module = parentOp->getParentOfType(); mlir::Type sourceType; llvm::StringRef symName; - auto sourceSymbol = + auto *sourceSymbol = mlir::SymbolTable::lookupSymbolIn(module, globalAttr.getSymbol()); if (auto llvmSymbol = dyn_cast(sourceSymbol)) { sourceType = llvmSymbol.getType(); @@ -1077,15 +1068,14 @@ class CIRConstantLowering return mlir::success(); } else if (auto strTy = op.getType().dyn_cast()) { if (auto zero = op.getValue().dyn_cast()) { - auto initVal = - lowerCirAttrAsValue(op, zero, rewriter, typeConverter); + auto initVal = lowerCirAttrAsValue(op, zero, rewriter, typeConverter); rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); return mlir::success(); } - return op.emitError() - << "unsupported lowering for struct constant type " << op.getType(); + return op.emitError() << "unsupported lowering for struct constant type " + << op.getType(); } else return op.emitError() << "unsupported constant type " << op.getType(); @@ -1946,7 +1936,7 @@ class CIRVTableAddrPointOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::VTableAddrPointOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto converter = getTypeConverter(); + const auto *converter = getTypeConverter(); auto targetType = converter->convertType(op.getType()); mlir::Value symAddr = op.getSymAddr(); diff --git a/clang/test/CIR/CodeGen/no-common.c b/clang/test/CIR/CodeGen/no-common.c new file mode 100644 index 000000000000..61ecea191636 --- /dev/null +++ b/clang/test/CIR/CodeGen/no-common.c @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir %s -emit-cir -o - | FileCheck %s -check-prefix=CHECK-DEFAULT +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir %s -fno-common -emit-cir -o - | FileCheck %s -check-prefix=CHECK-DEFAULT +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir %s -fcommon -emit-cir -o - | FileCheck %s -check-prefix=CHECK-COMMON + +// CHECK-COMMON: cir.global common @x +// CHECK-DEFAULT: cir.global external @x +int x; + +// CHECK-COMMON: cir.global external @ABC +// CHECK-DEFAULT: cir.global external @ABC +typedef void* (*fn_t)(long a, long b, char *f, int c); +fn_t ABC __attribute__ ((nocommon)); + +// CHECK-COMMON: cir.global common @y +// CHECK-DEFAULT: cir.global common @y +int y __attribute__((common)); diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 62034745aa29..e4d3ee2fe740 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -144,4 +144,6 @@ module { // MLIR: %0 = cir.llvmir.zeroinit : !llvm.struct<"struct.Bar", (i32, i8)> // MLIR: llvm.return %0 : !llvm.struct<"struct.Bar", (i32, i8)> // MLIR: } + cir.global common @comm = #cir.int<0> : !s32i + // MLIR: llvm.mlir.global common @comm(0 : i32) {addr_space = 0 : i32} : i32 } From e99b71282caf04a604a2034281d8e52c10546d79 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 27 Nov 2023 20:07:42 -0800 Subject: [PATCH 1252/2301] [CIR][NFC] Move LexicalScope after RunCleanupsScope While here toggle LexicalScopeGuard's visibility, to be consistent.. --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 345 ++++++++++++------------- 1 file changed, 172 insertions(+), 173 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 3428c7f254df..f4a084693be7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -85,179 +85,6 @@ class CIRGenFunction : public CIRGenTypeCache { llvm::DenseMap LabelMap; JumpDest &getJumpDestForLabel(const clang::LabelDecl *D); - /// ------- - /// Lexical Scope: to be read as in the meaning in CIR, a scope is always - /// related with initialization and destruction of objects. - /// ------- - -public: - // Represents a cir.scope, cir.if, and then/else regions. I.e. lexical - // scopes that require cleanups. - struct LexicalScopeContext { - private: - // Block containing cleanup code for things initialized in this - // lexical context (scope). - mlir::Block *CleanupBlock = nullptr; - - // Points to scope entry block. This is useful, for instance, for - // helping to insert allocas before finalizing any recursive codegen - // from switches. - mlir::Block *EntryBlock; - - // On a coroutine body, the OnFallthrough sub stmt holds the handler - // (CoreturnStmt) for control flow falling off the body. Keep track - // of emitted co_return in this scope and allow OnFallthrough to be - // skipeed. - bool HasCoreturn = false; - - // FIXME: perhaps we can use some info encoded in operations. - enum Kind { - Regular, // cir.if, cir.scope, if_regions - Ternary, // cir.ternary - Switch // cir.switch - } ScopeKind = Regular; - - public: - unsigned Depth = 0; - bool HasReturn = false; - - LexicalScopeContext(mlir::Location loc, mlir::Block *eb) - : EntryBlock(eb), BeginLoc(loc), EndLoc(loc) { - // Has multiple locations: overwrite with separate start and end locs. - if (const auto fusedLoc = loc.dyn_cast()) { - assert(fusedLoc.getLocations().size() == 2 && "too many locations"); - BeginLoc = fusedLoc.getLocations()[0]; - EndLoc = fusedLoc.getLocations()[1]; - } - - assert(EntryBlock && "expected valid block"); - } - - ~LexicalScopeContext() = default; - - // --- - // Coroutine tracking - // --- - bool hasCoreturn() const { return HasCoreturn; } - void setCoreturn() { HasCoreturn = true; } - - // --- - // Kind - // --- - bool isRegular() { return ScopeKind == Kind::Regular; } - bool isSwitch() { return ScopeKind == Kind::Switch; } - bool isTernary() { return ScopeKind == Kind::Ternary; } - - void setAsSwitch() { ScopeKind = Kind::Switch; } - void setAsTernary() { ScopeKind = Kind::Ternary; } - - // --- - // Goto handling - // --- - - // Lazy create cleanup block or return what's available. - mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) { - if (CleanupBlock) - return getCleanupBlock(builder); - return createCleanupBlock(builder); - } - - mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) { - return CleanupBlock; - } - mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) { - { - // Create the cleanup block but dont hook it up around just yet. - mlir::OpBuilder::InsertionGuard guard(builder); - CleanupBlock = builder.createBlock(builder.getBlock()->getParent()); - } - assert(builder.getInsertionBlock() && "Should be valid"); - return CleanupBlock; - } - - // Goto's introduced in this scope but didn't get fixed. - llvm::SmallVector, 4> - PendingGotos; - - // Labels solved inside this scope. - llvm::SmallPtrSet SolvedLabels; - - // --- - // Return handling - // --- - - private: - // On switches we need one return block per region, since cases don't - // have their own scopes but are distinct regions nonetheless. - llvm::SmallVector RetBlocks; - llvm::SmallVector> RetLocs; - unsigned int CurrentSwitchRegionIdx = -1; - - // There's usually only one ret block per scope, but this needs to be - // get or create because of potential unreachable return statements, note - // that for those, all source location maps to the first one found. - mlir::Block *createRetBlock(CIRGenFunction &CGF, mlir::Location loc) { - assert((isSwitch() || RetBlocks.size() == 0) && - "only switches can hold more than one ret block"); - - // Create the cleanup block but dont hook it up around just yet. - mlir::OpBuilder::InsertionGuard guard(CGF.builder); - auto *b = CGF.builder.createBlock(CGF.builder.getBlock()->getParent()); - RetBlocks.push_back(b); - RetLocs.push_back(loc); - return b; - } - - public: - void updateCurrentSwitchCaseRegion() { CurrentSwitchRegionIdx++; } - llvm::ArrayRef getRetBlocks() { return RetBlocks; } - llvm::ArrayRef> getRetLocs() { - return RetLocs; - } - - mlir::Block *getOrCreateRetBlock(CIRGenFunction &CGF, mlir::Location loc) { - unsigned int regionIdx = 0; - if (isSwitch()) - regionIdx = CurrentSwitchRegionIdx; - if (regionIdx >= RetBlocks.size()) - return createRetBlock(CGF, loc); - return &*RetBlocks.back(); - } - - // Scope entry block tracking - mlir::Block *getEntryBlock() { return EntryBlock; } - - mlir::Location BeginLoc, EndLoc; - }; - -private: - class LexicalScopeGuard { - CIRGenFunction &CGF; - LexicalScopeContext *OldVal = nullptr; - - public: - LexicalScopeGuard(CIRGenFunction &c, LexicalScopeContext *L) : CGF(c) { - if (CGF.currLexScope) { - OldVal = CGF.currLexScope; - L->Depth++; - } - CGF.currLexScope = L; - } - - LexicalScopeGuard(const LexicalScopeGuard &) = delete; - LexicalScopeGuard &operator=(const LexicalScopeGuard &) = delete; - LexicalScopeGuard &operator=(LexicalScopeGuard &&other) = delete; - - void cleanup(); - void restore() { CGF.currLexScope = OldVal; } - ~LexicalScopeGuard() { - cleanup(); - restore(); - } - }; - - LexicalScopeContext *currLexScope = nullptr; - // --------------------- // Opaque value handling // --------------------- @@ -1826,6 +1653,178 @@ class CIRGenFunction : public CIRGenTypeCache { EHScopeStack::stable_iterator CurrentCleanupScopeDepth = EHScopeStack::stable_end(); + /// ------- + /// Lexical Scope: to be read as in the meaning in CIR, a scope is always + /// related with initialization and destruction of objects. + /// ------- + +public: + // Represents a cir.scope, cir.if, and then/else regions. I.e. lexical + // scopes that require cleanups. + struct LexicalScopeContext { + private: + // Block containing cleanup code for things initialized in this + // lexical context (scope). + mlir::Block *CleanupBlock = nullptr; + + // Points to scope entry block. This is useful, for instance, for + // helping to insert allocas before finalizing any recursive codegen + // from switches. + mlir::Block *EntryBlock; + + // On a coroutine body, the OnFallthrough sub stmt holds the handler + // (CoreturnStmt) for control flow falling off the body. Keep track + // of emitted co_return in this scope and allow OnFallthrough to be + // skipeed. + bool HasCoreturn = false; + + // FIXME: perhaps we can use some info encoded in operations. + enum Kind { + Regular, // cir.if, cir.scope, if_regions + Ternary, // cir.ternary + Switch // cir.switch + } ScopeKind = Regular; + + public: + unsigned Depth = 0; + bool HasReturn = false; + + LexicalScopeContext(mlir::Location loc, mlir::Block *eb) + : EntryBlock(eb), BeginLoc(loc), EndLoc(loc) { + // Has multiple locations: overwrite with separate start and end locs. + if (const auto fusedLoc = loc.dyn_cast()) { + assert(fusedLoc.getLocations().size() == 2 && "too many locations"); + BeginLoc = fusedLoc.getLocations()[0]; + EndLoc = fusedLoc.getLocations()[1]; + } + + assert(EntryBlock && "expected valid block"); + } + + ~LexicalScopeContext() = default; + + // --- + // Coroutine tracking + // --- + bool hasCoreturn() const { return HasCoreturn; } + void setCoreturn() { HasCoreturn = true; } + + // --- + // Kind + // --- + bool isRegular() { return ScopeKind == Kind::Regular; } + bool isSwitch() { return ScopeKind == Kind::Switch; } + bool isTernary() { return ScopeKind == Kind::Ternary; } + + void setAsSwitch() { ScopeKind = Kind::Switch; } + void setAsTernary() { ScopeKind = Kind::Ternary; } + + // --- + // Goto handling + // --- + + // Lazy create cleanup block or return what's available. + mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) { + if (CleanupBlock) + return getCleanupBlock(builder); + return createCleanupBlock(builder); + } + + mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) { + return CleanupBlock; + } + mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) { + { + // Create the cleanup block but dont hook it up around just yet. + mlir::OpBuilder::InsertionGuard guard(builder); + CleanupBlock = builder.createBlock(builder.getBlock()->getParent()); + } + assert(builder.getInsertionBlock() && "Should be valid"); + return CleanupBlock; + } + + // Goto's introduced in this scope but didn't get fixed. + llvm::SmallVector, 4> + PendingGotos; + + // Labels solved inside this scope. + llvm::SmallPtrSet SolvedLabels; + + // --- + // Return handling + // --- + + private: + // On switches we need one return block per region, since cases don't + // have their own scopes but are distinct regions nonetheless. + llvm::SmallVector RetBlocks; + llvm::SmallVector> RetLocs; + unsigned int CurrentSwitchRegionIdx = -1; + + // There's usually only one ret block per scope, but this needs to be + // get or create because of potential unreachable return statements, note + // that for those, all source location maps to the first one found. + mlir::Block *createRetBlock(CIRGenFunction &CGF, mlir::Location loc) { + assert((isSwitch() || RetBlocks.size() == 0) && + "only switches can hold more than one ret block"); + + // Create the cleanup block but dont hook it up around just yet. + mlir::OpBuilder::InsertionGuard guard(CGF.builder); + auto *b = CGF.builder.createBlock(CGF.builder.getBlock()->getParent()); + RetBlocks.push_back(b); + RetLocs.push_back(loc); + return b; + } + + public: + void updateCurrentSwitchCaseRegion() { CurrentSwitchRegionIdx++; } + llvm::ArrayRef getRetBlocks() { return RetBlocks; } + llvm::ArrayRef> getRetLocs() { + return RetLocs; + } + + mlir::Block *getOrCreateRetBlock(CIRGenFunction &CGF, mlir::Location loc) { + unsigned int regionIdx = 0; + if (isSwitch()) + regionIdx = CurrentSwitchRegionIdx; + if (regionIdx >= RetBlocks.size()) + return createRetBlock(CGF, loc); + return &*RetBlocks.back(); + } + + // Scope entry block tracking + mlir::Block *getEntryBlock() { return EntryBlock; } + + mlir::Location BeginLoc, EndLoc; + }; + + class LexicalScopeGuard { + CIRGenFunction &CGF; + LexicalScopeContext *OldVal = nullptr; + + public: + LexicalScopeGuard(CIRGenFunction &c, LexicalScopeContext *L) : CGF(c) { + if (CGF.currLexScope) { + OldVal = CGF.currLexScope; + L->Depth++; + } + CGF.currLexScope = L; + } + + LexicalScopeGuard(const LexicalScopeGuard &) = delete; + LexicalScopeGuard &operator=(const LexicalScopeGuard &) = delete; + LexicalScopeGuard &operator=(LexicalScopeGuard &&other) = delete; + + void cleanup(); + void restore() { CGF.currLexScope = OldVal; } + ~LexicalScopeGuard() { + cleanup(); + restore(); + } + }; + + LexicalScopeContext *currLexScope = nullptr; + /// CIR build helpers /// ----------------- From e2b1538ea2ac32049751eef33abc9606ab8cbf72 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 27 Nov 2023 20:33:43 -0800 Subject: [PATCH 1253/2301] [CIR][CIRGen] Generalize and run cleanups on lexical scopes This adds more support for automatic variable destruction, which is long due. There are more bits to come in following patches but this enables the bulk mechanism. - Bake running cleanup functionality from scopes into LexicalScopeContext - This is closer to traditional LLVM codegen, LexicalScopeContext now inherits from RunCleanupsScope. - Merge LexicalScopeContext and LexicalScopeGuard into one LexicalScope - Proper implement dtor for lexicalScope and fwd ForceCleanup - Add testcase --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 8 ++- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 21 +++---- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 7 ++- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 23 +++++++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 21 +++---- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 51 ++++++----------- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 65 ++++++++++++---------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 63 +++++++++++---------- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 26 +++------ clang/test/CIR/CodeGen/dtors-scopes.cpp | 21 +++++++ 11 files changed, 169 insertions(+), 142 deletions(-) create mode 100644 clang/test/CIR/CodeGen/dtors-scopes.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index b031e93e53c9..63a15146c6bd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -952,10 +952,12 @@ void CIRGenFunction::destroyCXXObject(CIRGenFunction &CGF, Address addr, const RecordType *rtype = type->castAs(); const CXXRecordDecl *record = cast(rtype->getDecl()); const CXXDestructorDecl *dtor = record->getDestructor(); + // TODO(cir): Unlike traditional codegen, CIRGen should actually emit trivial + // dtors which shall be removed on later CIR passes. However, only remove this + // assertion once we get a testcase to exercise this path. assert(!dtor->isTrivial()); - llvm_unreachable("NYI"); - // CGF.buildCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, - // /*Delegating=*/false, addr, type); + CGF.buildCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, + /*Delegating=*/false, addr, type); } /// Emits the body of the current destructor. diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 6350e8ddc118..6ed4c7049d83 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -146,10 +146,10 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); // Remember activation information. - [[maybe_unused]] bool IsActive = Scope.isActive(); - [[maybe_unused]] Address NormalActiveFlag = - Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() - : Address::invalid(); + bool IsActive = Scope.isActive(); + Address NormalActiveFlag = Scope.shouldTestFlagInNormalCleanup() + ? Scope.getActiveFlag() + : Address::invalid(); [[maybe_unused]] Address EHActiveFlag = Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : Address::invalid(); @@ -177,11 +177,12 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // end of the last cleanup, which points to the current scope. The // rest of CIR gen doesn't need to worry about this; it only happens // during the execution of PopCleanupBlocks(). - bool HasTerminator = - FallthroughSource && !FallthroughSource->empty() && - FallthroughSource->back().mightHaveTrait(); - bool HasPrebranchedFallthrough = (FallthroughSource && HasTerminator && - FallthroughSource->getTerminator()); + bool HasTerminator = FallthroughSource && + FallthroughSource->mightHaveTerminator() && + FallthroughSource->getTerminator(); + bool HasPrebranchedFallthrough = + HasTerminator && + !isa(FallthroughSource->getTerminator()); // If this is a normal cleanup, then having a prebranched // fallthrough implies that the fallthrough source unconditionally @@ -468,4 +469,4 @@ EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { new (buffer) EHCatchScope(numHandlers, InnermostEHScope); InnermostEHScope = stable_begin(); return scope; -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 629e186a5f2b..98e1f9281a33 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -135,9 +135,10 @@ RValue CIRGenFunction::buildCoroutineFrame() { llvm_unreachable("NYI"); } -static mlir::LogicalResult buildBodyAndFallthrough( - CIRGenFunction &CGF, const CoroutineBodyStmt &S, Stmt *Body, - const CIRGenFunction::LexicalScopeContext *currLexScope) { +static mlir::LogicalResult +buildBodyAndFallthrough(CIRGenFunction &CGF, const CoroutineBodyStmt &S, + Stmt *Body, + const CIRGenFunction::LexicalScope *currLexScope) { if (CGF.buildStmt(Body, /*useCurrentScope=*/true).failed()) return mlir::failure(); // Note that LLVM checks CanFallthrough by looking into the availability diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index f278be7ac107..619ae69526cc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -812,8 +812,7 @@ struct DestroyObject final : EHScopeStack::Cleanup { [[maybe_unused]] bool useEHCleanupForArray = flags.isForNormalCleanup() && this->useEHCleanupForArray; - llvm_unreachable("NYI"); - // CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray); + CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray); } }; @@ -893,6 +892,26 @@ void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, useEHCleanupForArray); } +/// Immediately perform the destruction of the given object. +/// +/// \param addr - the address of the object; a type* +/// \param type - the type of the object; if an array type, all +/// objects are destroyed in reverse order +/// \param destroyer - the function to call to destroy individual +/// elements +/// \param useEHCleanupForArray - whether an EH cleanup should be +/// used when destroying array elements, in case one of the +/// destructions throws an exception +void CIRGenFunction::emitDestroy(Address addr, QualType type, + Destroyer *destroyer, + bool useEHCleanupForArray) { + const ArrayType *arrayType = getContext().getAsArrayType(type); + if (!arrayType) + return destroyer(*this, addr, type); + + llvm_unreachable("Array destroy NYI"); +} + CIRGenFunction::Destroyer * CIRGenFunction::getDestroyer(QualType::DestructionKind kind) { switch (kind) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 2c2eb9470344..f78f163d4fdd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2067,9 +2067,8 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, .create( loc, condV, /*trueBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{ - loc, b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{*this, loc, + b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); assert(!UnimplementedFeature::incrementProfileCounter()); @@ -2089,9 +2088,8 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, }, /*falseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{ - loc, b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{*this, loc, + b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); assert(!UnimplementedFeature::incrementProfileCounter()); @@ -2187,9 +2185,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { [[maybe_unused]] auto scope = builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{ - loc, builder.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexScopeGuard{*this, &lexScope}; + CIRGenFunction::LexicalScope lexScope{*this, loc, + builder.getInsertionBlock()}; LV = buildLValue(cleanups->getSubExpr()); if (LV.isSimple()) { @@ -2303,15 +2300,13 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, loc, condV, elseS, /*thenBuilder=*/ [&](mlir::OpBuilder &, mlir::Location) { - LexicalScopeContext lexScope{thenLoc, builder.getInsertionBlock()}; - LexicalScopeGuard lexThenGuard{*this, &lexScope}; + LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()}; resThen = buildStmt(thenS, /*useCurrentScope=*/true); }, /*elseBuilder=*/ [&](mlir::OpBuilder &, mlir::Location) { assert(elseLoc && "Invalid location for elseS."); - LexicalScopeContext lexScope{*elseLoc, builder.getInsertionBlock()}; - LexicalScopeGuard lexElseGuard{*this, &lexScope}; + LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()}; resElse = buildStmt(elseS, /*useCurrentScope=*/true); }); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index c1ec0c9f6690..7b679eea1357 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -517,9 +517,8 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { [[maybe_unused]] auto scope = builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{ - loc, builder.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexScopeGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{CGF, loc, + builder.getInsertionBlock()}; Visit(E->getSubExpr()); }); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index fd3528b763dc..4e8c32ddc508 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1809,9 +1809,8 @@ mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { auto scope = builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{ - loc, builder.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexScopeGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{CGF, loc, + builder.getInsertionBlock()}; auto scopeYieldVal = Visit(E->getSubExpr()); if (scopeYieldVal) { builder.create(loc, scopeYieldVal); @@ -2006,9 +2005,8 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( .create( loc, condV, /*trueBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{CGF, loc, + b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); assert(!UnimplementedFeature::incrementProfileCounter()); @@ -2027,9 +2025,8 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( }, /*falseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{CGF, loc, + b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); assert(!UnimplementedFeature::incrementProfileCounter()); @@ -2092,17 +2089,14 @@ mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { auto ResOp = Builder.create( Loc, LHSCondV, /*trueBuilder=*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { - CIRGenFunction::LexicalScopeContext LexScope{Loc, - B.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &LexScope}; + CIRGenFunction::LexicalScope LexScope{CGF, Loc, B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); mlir::Value RHSCondV = CGF.evaluateExprAsBool(E->getRHS()); auto res = B.create( Loc, RHSCondV, /*trueBuilder*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { - CIRGenFunction::LexicalScopeContext lexScope{ - Loc, B.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{CGF, Loc, + B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); auto res = B.create( Loc, Builder.getBoolTy(), @@ -2112,9 +2106,8 @@ mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { }, /*falseBuilder*/ [&](mlir::OpBuilder &b, mlir::Location Loc) { - CIRGenFunction::LexicalScopeContext lexScope{ - Loc, b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{CGF, Loc, + b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); auto res = b.create( Loc, Builder.getBoolTy(), @@ -2126,9 +2119,7 @@ mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { }, /*falseBuilder*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { - CIRGenFunction::LexicalScopeContext lexScope{Loc, - B.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{CGF, Loc, B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); auto res = B.create( Loc, Builder.getBoolTy(), @@ -2172,9 +2163,7 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { auto ResOp = Builder.create( Loc, LHSCondV, /*trueBuilder=*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { - CIRGenFunction::LexicalScopeContext lexScope{Loc, - B.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{CGF, Loc, B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); auto res = B.create( Loc, Builder.getBoolTy(), @@ -2183,9 +2172,7 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { }, /*falseBuilder*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { - CIRGenFunction::LexicalScopeContext LexScope{Loc, - B.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &LexScope}; + CIRGenFunction::LexicalScope LexScope{CGF, Loc, B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); mlir::Value RHSCondV = CGF.evaluateExprAsBool(E->getRHS()); auto res = B.create( @@ -2200,9 +2187,8 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { Locs.push_back(fusedLoc.getLocations()[0]); Locs.push_back(fusedLoc.getLocations()[1]); } - CIRGenFunction::LexicalScopeContext lexScope{ - Loc, B.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{CGF, Loc, + B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); auto res = B.create( Loc, Builder.getBoolTy(), @@ -2221,9 +2207,8 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { Locs.push_back(fusedLoc.getLocations()[0]); Locs.push_back(fusedLoc.getLocations()[1]); } - CIRGenFunction::LexicalScopeContext lexScope{ - Loc, B.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CIRGenFunction::LexicalScope lexScope{CGF, Loc, + B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); auto res = b.create( Loc, Builder.getBoolTy(), diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 5dcbb05f0849..6d0cacffa753 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -316,7 +316,7 @@ mlir::LogicalResult CIRGenFunction::declare(Address addr, const Decl *var, /// All scope related cleanup needed: /// - Patching up unsolved goto's. /// - Build all cleanup code and insert yield/returns. -void CIRGenFunction::LexicalScopeGuard::cleanup() { +void CIRGenFunction::LexicalScope::cleanup() { auto &builder = CGF.builder; auto *localScope = CGF.currLexScope; @@ -352,6 +352,14 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { } localScope->SolvedLabels.clear(); + auto applyCleanup = [&]() { + if (PerformCleanup) { + // ApplyDebugLocation + assert(!UnimplementedFeature::generateDebugInfo()); + ForceCleanup(); + } + }; + // Cleanup are done right before codegen resume a scope. This is where // objects are destroyed. unsigned curLoc = 0; @@ -360,16 +368,16 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { builder.setInsertionPointToEnd(retBlock); mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; curLoc++; - - // TODO(cir): insert actual scope cleanup HERE (dtors and etc) - (void)buildReturn(retLoc); } auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToEnd(InsPt); - // TODO: insert actual scope cleanup (dtors and etc) + + // Leverage and defers to RunCleanupsScope's dtor and scope handling. + applyCleanup(); + if (localScope->Depth != 0) { // end of any local scope != function // Ternary ops have to deal with matching arms for yielding types // and do return a value, it must do its own cir.yield insertion. @@ -392,20 +400,22 @@ void CIRGenFunction::LexicalScopeGuard::cleanup() { // If a terminator is already present in the current block, nothing // else to do here. - bool entryBlock = builder.getInsertionBlock()->isEntryBlock(); auto *currBlock = builder.getBlock(); - bool hasTerminator = - !currBlock->empty() && - currBlock->back().hasTrait(); - if (hasTerminator) + if (currBlock->mightHaveTerminator() && currBlock->getTerminator()) return; - // An empty non-entry block has nothing to offer. + // An empty non-entry block has nothing to offer, and since this is + // synthetic, losing information does not affect anything. + bool entryBlock = builder.getInsertionBlock()->isEntryBlock(); if (!entryBlock && currBlock->empty()) { currBlock->erase(); // Remove unused cleanup blocks. if (cleanupBlock && cleanupBlock->hasNoPredecessors()) cleanupBlock->erase(); + // FIXME(cir): ideally we should call applyCleanup() before we + // get into this condition and emit the proper cleanup. This is + // needed to get nrvo to interop with dtor logic. + PerformCleanup = false; return; } @@ -494,25 +504,23 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, // Create a scope in the symbol table to hold variable declarations. SymTableScopeTy varScope(symbolTable); + // Compiler synthetized functions might have invalid slocs... + auto bSrcLoc = FD->getBody()->getBeginLoc(); + auto eSrcLoc = FD->getBody()->getEndLoc(); + auto unknownLoc = builder.getUnknownLoc(); - { - // Compiler synthetized functions might have invalid slocs... - auto bSrcLoc = FD->getBody()->getBeginLoc(); - auto eSrcLoc = FD->getBody()->getEndLoc(); - auto unknownLoc = builder.getUnknownLoc(); - - auto FnBeginLoc = bSrcLoc.isValid() ? getLoc(bSrcLoc) : unknownLoc; - auto FnEndLoc = eSrcLoc.isValid() ? getLoc(eSrcLoc) : unknownLoc; - SourceLocRAIIObject fnLoc{*this, Loc.isValid() ? getLoc(Loc) : unknownLoc}; + auto FnBeginLoc = bSrcLoc.isValid() ? getLoc(bSrcLoc) : unknownLoc; + auto FnEndLoc = eSrcLoc.isValid() ? getLoc(eSrcLoc) : unknownLoc; + const auto fusedLoc = + mlir::FusedLoc::get(builder.getContext(), {FnBeginLoc, FnEndLoc}); + SourceLocRAIIObject fnLoc{*this, Loc.isValid() ? getLoc(Loc) : unknownLoc}; - assert(Fn.isDeclaration() && "Function already has body?"); - mlir::Block *EntryBB = Fn.addEntryBlock(); - builder.setInsertionPointToStart(EntryBB); + assert(Fn.isDeclaration() && "Function already has body?"); + mlir::Block *EntryBB = Fn.addEntryBlock(); + builder.setInsertionPointToStart(EntryBB); - const auto fusedLoc = - mlir::FusedLoc::get(builder.getContext(), {FnBeginLoc, FnEndLoc}); - LexicalScopeContext lexScope{fusedLoc, EntryBB}; - LexicalScopeGuard scopeGuard{*this, &lexScope}; + { + LexicalScope lexScope{*this, fusedLoc, EntryBB}; // Emit the standard function prologue. StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); @@ -535,7 +543,8 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, else if (isa(FD) && cast(FD)->isLambdaStaticInvoker()) { // The lambda static invoker function is special, because it forwards or - // clones the body of the function call operator (but is actually static). + // clones the body of the function call operator (but is actually + // static). buildLambdaStaticInvokeBody(cast(FD)); } else if (FD->isDefaulted() && isa(FD) && (cast(FD)->isCopyAssignmentOperator() || diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index f4a084693be7..d265e94c77fa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1515,6 +1515,9 @@ class CIRGenFunction : public CIRGenTypeCache { Destroyer *getDestroyer(QualType::DestructionKind kind); + void emitDestroy(Address addr, QualType type, Destroyer *destroyer, + bool useEHCleanupForArray); + /// An object to manage conditionally-evaluated expressions. class ConditionalEvaluation { // llvm::BasicBlock *StartBB; @@ -1661,7 +1664,7 @@ class CIRGenFunction : public CIRGenTypeCache { public: // Represents a cir.scope, cir.if, and then/else regions. I.e. lexical // scopes that require cleanups. - struct LexicalScopeContext { + struct LexicalScope : public RunCleanupsScope { private: // Block containing cleanup code for things initialized in this // lexical context (scope). @@ -1678,6 +1681,8 @@ class CIRGenFunction : public CIRGenTypeCache { // skipeed. bool HasCoreturn = false; + LexicalScope *ParentScope = nullptr; + // FIXME: perhaps we can use some info encoded in operations. enum Kind { Regular, // cir.if, cir.scope, if_regions @@ -1689,8 +1694,14 @@ class CIRGenFunction : public CIRGenTypeCache { unsigned Depth = 0; bool HasReturn = false; - LexicalScopeContext(mlir::Location loc, mlir::Block *eb) - : EntryBlock(eb), BeginLoc(loc), EndLoc(loc) { + LexicalScope(CIRGenFunction &CGF, mlir::Location loc, mlir::Block *eb) + : RunCleanupsScope(CGF), EntryBlock(eb), ParentScope(CGF.currLexScope), + BeginLoc(loc), EndLoc(loc) { + + CGF.currLexScope = this; + if (ParentScope) + Depth++; + // Has multiple locations: overwrite with separate start and end locs. if (const auto fusedLoc = loc.dyn_cast()) { assert(fusedLoc.getLocations().size() == 2 && "too many locations"); @@ -1701,7 +1712,24 @@ class CIRGenFunction : public CIRGenTypeCache { assert(EntryBlock && "expected valid block"); } - ~LexicalScopeContext() = default; + void cleanup(); + void restore() { CGF.currLexScope = ParentScope; } + + ~LexicalScope() { + // EmitLexicalBlockEnd + assert(!UnimplementedFeature::generateDebugInfo()); + // If we should perform a cleanup, force them now. Note that + // this ends the cleanup scope before rescoping any labels. + cleanup(); + restore(); + } + + /// Force the emission of cleanups now, instead of waiting + /// until this object is destroyed. + void ForceCleanup() { + RunCleanupsScope::ForceCleanup(); + // TODO(cir): something akin to rescopeLabels if it makes sense to CIR. + } // --- // Coroutine tracking @@ -1798,32 +1826,7 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Location BeginLoc, EndLoc; }; - class LexicalScopeGuard { - CIRGenFunction &CGF; - LexicalScopeContext *OldVal = nullptr; - - public: - LexicalScopeGuard(CIRGenFunction &c, LexicalScopeContext *L) : CGF(c) { - if (CGF.currLexScope) { - OldVal = CGF.currLexScope; - L->Depth++; - } - CGF.currLexScope = L; - } - - LexicalScopeGuard(const LexicalScopeGuard &) = delete; - LexicalScopeGuard &operator=(const LexicalScopeGuard &) = delete; - LexicalScopeGuard &operator=(LexicalScopeGuard &&other) = delete; - - void cleanup(); - void restore() { CGF.currLexScope = OldVal; } - ~LexicalScopeGuard() { - cleanup(); - restore(); - } - }; - - LexicalScopeContext *currLexScope = nullptr; + LexicalScope *currLexScope = nullptr; /// CIR build helpers /// ----------------- diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index c2e676f4d24f..282d7cf9e6f3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -41,8 +41,7 @@ mlir::LogicalResult CIRGenFunction::buildCompoundStmt(const CompoundStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; - LexicalScopeGuard lexScopeGuard{*this, &lexScope}; + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; res = compoundStmtBuilder(); }); @@ -374,8 +373,7 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - LexicalScopeContext lexScope{scopeLoc, builder.getInsertionBlock()}; - LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; + LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; res = ifStmtBuilder(); }); @@ -466,9 +464,8 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{ - loc, builder.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexScopeGuard{*this, &lexScope}; + CIRGenFunction::LexicalScope lexScope{*this, loc, + builder.getInsertionBlock()}; handleReturnVal(); }); } @@ -741,8 +738,7 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, // Create a cleanup scope for the condition variable cleanups. // Logical equivalent from LLVM codegn for // LexicalScope ConditionScope(*this, S.getSourceRange())... - LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; - LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; res = forStmtBuilder(); }); @@ -822,8 +818,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; - LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; res = forStmtBuilder(); }); @@ -878,8 +873,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; - LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; res = doStmtBuilder(); }); @@ -939,8 +933,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; - LexicalScopeGuard lexForScopeGuard{*this, &lexScope}; + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; res = whileStmtBuilder(); }); @@ -1028,8 +1021,7 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - LexicalScopeContext lexScope{loc, builder.getInsertionBlock()}; - LexicalScopeGuard lexIfScopeGuard{*this, &lexScope}; + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; res = switchStmtBuilder(); }); diff --git a/clang/test/CIR/CodeGen/dtors-scopes.cpp b/clang/test/CIR/CodeGen/dtors-scopes.cpp new file mode 100644 index 000000000000..bb3c23010a78 --- /dev/null +++ b/clang/test/CIR/CodeGen/dtors-scopes.cpp @@ -0,0 +1,21 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +extern "C" int printf(char const*, ...); +struct C { + C() { printf("++A\n"); } + ~C() { printf("--A\n"); } +}; +void dtor1() { + { + C c; + } + printf("Done\n"); +} + +// CHECK: cir.func @_Z5dtor1v() +// CHECK: cir.scope { +// CHECK: %4 = cir.alloca !ty_22C22, cir.ptr , ["c", init] {alignment = 1 : i64} +// CHECK: cir.call @_ZN1CC2Ev(%4) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1CD2Ev(%4) : (!cir.ptr) -> () +// CHECK: } \ No newline at end of file From 6bd0c8302ca794b4ffd79931978f0c2d1c08cc99 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 29 Nov 2023 11:42:00 -0800 Subject: [PATCH 1254/2301] [CIR][NFC] Make test more portable on windows Fixes #337 --- clang/test/CIR/CodeGen/sourcelocation.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index b9e25c52aba5..9c1fe9760c8c 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -85,6 +85,6 @@ int s0(int a, int b) { // LLVM: !llvm.dbg.cu = !{!1} // LLVM: !0 = !{i32 2, !"Debug Info Version", i32 3} // LLVM: !1 = distinct !DICompileUnit(language: DW_LANG_C, file: !2, producer: "MLIR", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly) -// LLVM: !2 = !DIFile(filename: "sourcelocation.cpp", directory: "{{.*}}clang/test/CIR/CodeGen") +// LLVM: !2 = !DIFile(filename: "sourcelocation.cpp", directory: "{{.*}}CodeGen") // LLVM: ![[#SP]] = distinct !DISubprogram(name: "_Z2s0ii", linkageName: "_Z2s0ii", scope: !2, file: !2, line: 6, type: !4, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !1) // LLVM: ![[#LOC1]] = !DILocation(line: 6, scope: ![[#SP]]) From 168df7ebc0d9d8621b5873364d7d3ecd175ca1da Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Wed, 29 Nov 2023 22:59:40 +0300 Subject: [PATCH 1255/2301] [CIR] Change GlobalViewAttr indices to use MLIR integers. (#327) This is a followup PR to comment https://github.com/llvm/clangir/pull/305#discussion_r1393023729 by @bcardosolopes --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 7 +++---- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 8 +++----- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 ++-- clang/test/CIR/CodeGen/globals.c | 2 +- clang/test/CIR/CodeGen/vbase.cpp | 6 +++--- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- 7 files changed, 14 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 79621c31e3dd..1b48017678e2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1062,7 +1062,7 @@ class ConstantLValueEmitter llvm::SmallVector Indices; for (auto I : Idx) { - auto Attr = mlir::cir::IntAttr::get(CGM.getBuilder().getSInt64Ty(), I); + auto Attr = CGM.getBuilder().getI32IntegerAttr(I); Indices.push_back(Attr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index df052b83430b..f450e45ad08c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1381,9 +1381,8 @@ void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, CGM.getBuilder().getUInt8PtrTy()); } - assert(!UnimplementedFeature::setDSOLocal()); - auto PtrDiffTy = - CGM.getTypes().ConvertType(CGM.getASTContext().getPointerDiffType()); + if (UnimplementedFeature::setDSOLocal()) + llvm_unreachable("NYI"); // The vtable address point is 2. mlir::Attribute field{}; @@ -1391,7 +1390,7 @@ void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, llvm_unreachable("NYI"); } else { SmallVector offsets{ - mlir::cir::IntAttr::get(PtrDiffTy, 2)}; + CGM.getBuilder().getI32IntegerAttr(2)}; auto indices = mlir::ArrayAttr::get(builder.getContext(), offsets); field = CGM.getBuilder().getGlobalViewAttr(CGM.getBuilder().getUInt8PtrTy(), VTable, indices); diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 8e11eda15a5c..4add04e56ccb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -501,11 +501,9 @@ void CIRGenVTables::buildVTTDefinition(mlir::cir::GlobalOp VTT, } mlir::Attribute Idxs[3] = { - mlir::cir::IntAttr::get(CGM.getBuilder().getSInt32Ty(), 0), - mlir::cir::IntAttr::get(CGM.getBuilder().getSInt32Ty(), - AddressPoint.VTableIndex), - mlir::cir::IntAttr::get(CGM.getBuilder().getSInt32Ty(), - AddressPoint.AddressPointIndex), + CGM.getBuilder().getI32IntegerAttr(0), + CGM.getBuilder().getI32IntegerAttr(AddressPoint.VTableIndex), + CGM.getBuilder().getI32IntegerAttr(AddressPoint.AddressPointIndex), }; auto Indices = mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Idxs); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ac109baf16e5..810734d4dd50 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -240,9 +240,9 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, if (globalAttr.getIndices()) { llvm::SmallVector indices; for (auto idx : globalAttr.getIndices()) { - auto intAttr = dyn_cast(idx); + auto intAttr = dyn_cast(idx); assert(intAttr && "index must be integers"); - indices.push_back(intAttr.getSInt()); + indices.push_back(intAttr.getValue().getSExtValue()); } auto resTy = addrOp.getType(); auto eltTy = converter->convertType(sourceType); diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index a6b9309dbad6..cbeee30eeae6 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -48,7 +48,7 @@ struct { // CHECK: cir.global external @nestedStringPtr = #cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr}> int *globalPtr = &nestedString.y[1]; -// CHECK: cir.global external @globalPtr = #cir.global_view<@nestedString, [#cir.int<0> : !s64i, #cir.int<1> : !s64i, #cir.int<1> : !s64i]> +// CHECK: cir.global external @globalPtr = #cir.global_view<@nestedString, [0 : i32, 1 : i32, 1 : i32]> // TODO: test tentatives with internal linkage. diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp index 7dcb4c90d6e9..3b1c50d09118 100644 --- a/clang/test/CIR/CodeGen/vbase.cpp +++ b/clang/test/CIR/CodeGen/vbase.cpp @@ -19,7 +19,7 @@ void ppp() { B b; } // CIR: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr<12> : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr]> : !cir.array x 3>}> // VTT for B. -// CIR: cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [#cir.int<0> : !s32i, #cir.int<0> : !s32i, #cir.int<3> : !s32i]> : !cir.ptr]> : !cir.array x 1> +// CIR: cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 1> // CIR: cir.global "private" external @_ZTVN10__cxxabiv121__vmi_class_type_infoE @@ -32,10 +32,10 @@ void ppp() { B b; } // CIR: cir.global linkonce_odr @_ZTS1A = #cir.const_array<"1A" : !cir.array> : !cir.array // Type info A. -// CIR: cir.global constant external @_ZTI1A = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1A> : !cir.ptr}> +// CIR: cir.global constant external @_ZTI1A = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS1A> : !cir.ptr}> // Type info B. -// CIR: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.int<0> : !u32i, #cir.int<1> : !u32i, #cir.global_view<@_ZTI1A> : !cir.ptr, #cir.int<-6141> : !s64i}> +// CIR: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.int<0> : !u32i, #cir.int<1> : !u32i, #cir.global_view<@_ZTI1A> : !cir.ptr, #cir.int<-6141> : !s64i}> // LLVM: @_ZTV1B = linkonce_odr global { [3 x ptr] } { [3 x ptr] [ptr inttoptr (i64 12 to ptr), ptr null, ptr @_ZTI1B] } diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 0389a0493301..bdde79b926cf 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -86,7 +86,7 @@ class B : public A // CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr // typeinfo for B -// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [#cir.int<2> : !s64i]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr}> : ![[TypeInfoB]] +// CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr}> : ![[TypeInfoB]] // Checks for dtors in dtors.cpp From 99f1301fd7aa26d7342878b8e9e8845c70562ada Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Wed, 29 Nov 2023 17:01:31 -0300 Subject: [PATCH 1256/2301] [CIR][IR] Fix ConstPtrAttr parsing (#341) ConstPtrAttrs with numbers instead of the `null` keyword were not parsed correctly. --- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 2 +- clang/test/CIR/IR/constptrattr.cir | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/IR/constptrattr.cir diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 8cbdefa788e5..04f2c405f9ee 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -204,7 +204,7 @@ Attribute ConstPtrAttr::parse(AsmParser &parser, Type odsType) { if (parser.parseLess()) return {}; - if (parser.parseKeyword("null").succeeded()) { + if (parser.parseOptionalKeyword("null").succeeded()) { value = 0; } else { if (parser.parseInteger(value)) diff --git a/clang/test/CIR/IR/constptrattr.cir b/clang/test/CIR/IR/constptrattr.cir new file mode 100644 index 000000000000..30b79a882ac1 --- /dev/null +++ b/clang/test/CIR/IR/constptrattr.cir @@ -0,0 +1,8 @@ +// RUN: cir-opt %s | FileCheck %s + +!s32i = !cir.int + +cir.global external @const_ptr = #cir.ptr<4660> : !cir.ptr +// CHECK: cir.global external @const_ptr = #cir.ptr<4660> : !cir.ptr +cir.global external @null_ptr = #cir.ptr : !cir.ptr +// CHECK: cir.global external @null_ptr = #cir.ptr : !cir.ptr From 54c7e5f5d86fc09656fac0440f8cbc9da128d478 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Wed, 29 Nov 2023 17:02:55 -0300 Subject: [PATCH 1257/2301] [CIR][Lowering] Fix vbase.cpp test (#342) Two fixes were applied: - A couple of `lowerCirAttrAsValue` visitors were fixed as one of the arguments was missing the const qualifier on the type converter. To avoid this problem, the const qualifier was moved to before the type. - In the type converter, `cir.struct`s were wrongly identified by their name's length, now, they are identified by their name's existence. --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +- clang/test/CIR/CodeGen/vbase.cpp | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 810734d4dd50..775577f7c6b8 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2029,7 +2029,7 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, // Struct has a name: lower as an identified struct. mlir::LLVM::LLVMStructType llvmStruct; - if (type.getName().size() != 0) { + if (type.getName()) { llvmStruct = mlir::LLVM::LLVMStructType::getIdentified( type.getContext(), type.getPrefixedName()); if (llvmStruct.setBody(llvmMembers, /*isPacked=*/type.getPacked()) diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp index 3b1c50d09118..a966f82069c6 100644 --- a/clang/test/CIR/CodeGen/vbase.cpp +++ b/clang/test/CIR/CodeGen/vbase.cpp @@ -2,7 +2,6 @@ // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM -// XFAIL: * struct A { int a; From 6ad275a173698c2e73c3bcd9cb3a7137272f732b Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Wed, 29 Nov 2023 17:05:07 -0300 Subject: [PATCH 1258/2301] [CIR][CIRGen] Fix agg-init2.cpp test (#343) This patch fixes the agg-init2.cpp test by doing two things: - Updating the `VisitCXXConstructExpr` to return a zero attribute initializer for trivial zero-initialized objects. - Forcing the `buildAutoVarInit` method to use ctor calls on temporary object expressions even if the object can be constant-initialized. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 7 ++++++- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 2 +- clang/test/CIR/CodeGen/agg-init2.cpp | 1 - 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 619ae69526cc..b37329be00c2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -22,6 +22,7 @@ #include "mlir/IR/SymbolTable.h" #include "clang/AST/Decl.h" +#include "clang/AST/ExprCXX.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/ErrorHandling.h" @@ -257,7 +258,11 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { } } - if (!constant) { + // NOTE(cir): In case we have a constant initializer, we can just emit a + // store. But, in CIR, we wish to retain any ctor calls, so if it is a + // CXX temporary object creation, we ensure the ctor call is used deferring + // its removal/optimization to the CIR lowering. + if (!constant || isa(Init)) { initializeWhatIsTechnicallyUninitialized(Loc); LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); buildExprAsInit(Init, &D, lv); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 1b48017678e2..84ebfa669211 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -915,7 +915,7 @@ class ConstExprEmitter return nullptr; } - llvm_unreachable("NYI"); + return CGM.getBuilder().getZeroInitAttr(CGM.getCIRType(Ty)); } mlir::Attribute VisitStringLiteral(StringLiteral *E, QualType T) { diff --git a/clang/test/CIR/CodeGen/agg-init2.cpp b/clang/test/CIR/CodeGen/agg-init2.cpp index 683c6c480f21..3790d493d0ce 100644 --- a/clang/test/CIR/CodeGen/agg-init2.cpp +++ b/clang/test/CIR/CodeGen/agg-init2.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * // CHECK: !ty_22Zero22 = !cir.struct From 482002e34002bd410fffd2834b7bb1a9cbf11a96 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Wed, 29 Nov 2023 17:05:43 -0300 Subject: [PATCH 1259/2301] [CIR][NFC] Remove XFAIL from ThroughMLIR tests (#344) Replaces the usage of builtin integers in the CIR code and removes the dynamic dimension from `memref`s lowered from `!cir.ptr` types. --- clang/test/CIR/Lowering/ThroughMLIR/array.cir | 18 ++++++++------- clang/test/CIR/Lowering/ThroughMLIR/dot.cir | 23 ++++++++++--------- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/clang/test/CIR/Lowering/ThroughMLIR/array.cir b/clang/test/CIR/Lowering/ThroughMLIR/array.cir index 40e622928769..1a7e15531fd8 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/array.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/array.cir @@ -1,15 +1,17 @@ -// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir +!s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} + %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} cir.return } } -// MLIR: module { -// MLIR-NEXT: func @foo() { -// MLIR-NEXT: = memref.alloca() {alignment = 16 : i64} : memref> -// MLIR-NEXT: return -// MLIR-NEXT: } -// MLIR-NEXT: } +// CHECK: module { +// CHECK: func @foo() { +// CHECK: = memref.alloca() {alignment = 16 : i64} : memref> +// CHECK: return +// CHECK: } +// CHECK: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir index dc6b11636059..cd82f88d9e46 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir @@ -2,28 +2,29 @@ // RUN: FileCheck --input-file=%t.mlir %s // XFAIL: * +!s32i = !cir.int module { - cir.func @dot(%arg0: !cir.ptr) -> i32 { + cir.func @dot(%arg0: !cir.ptr) -> !s32i { %0 = cir.alloca !cir.ptr, cir.ptr >, ["x", init] {alignment = 8 : i64} - %1 = cir.alloca i32, cir.ptr , ["__retval"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} %2 = cir.alloca !cir.ptr, cir.ptr >, ["y", init] {alignment = 8 : i64} cir.store %arg0, %0 : !cir.ptr, cir.ptr > %3 = cir.load %0 : cir.ptr >, !cir.ptr cir.store %3, %2 : !cir.ptr, cir.ptr > - %4 = cir.const(0 : i32) : i32 - %5 = cir.load %1 : cir.ptr , i32 - cir.return %5 : i32 + %4 = cir.const(#cir.int<0> : !s32i) : !s32i + %5 = cir.load %1 : cir.ptr , !s32i + cir.return %5 : !s32i } } // CHECK: module { -// CHECK-NEXT: func.func @dot(%arg0: memref) -> i32 { -// CHECK-NEXT: %alloca = memref.alloca() {alignment = 8 : i64} : memref> +// CHECK-NEXT: func.func @dot(%arg0: memref) -> i32 { +// CHECK-NEXT: %alloca = memref.alloca() {alignment = 8 : i64} : memref> // CHECK-NEXT: %alloca_0 = memref.alloca() {alignment = 4 : i64} : memref -// CHECK-NEXT: %alloca_1 = memref.alloca() {alignment = 8 : i64} : memref> -// CHECK-NEXT: memref.store %arg0, %alloca[] : memref> -// CHECK-NEXT: %0 = memref.load %alloca[] : memref> -// CHECK-NEXT: memref.store %0, %alloca_1[] : memref> +// CHECK-NEXT: %alloca_1 = memref.alloca() {alignment = 8 : i64} : memref> +// CHECK-NEXT: memref.store %arg0, %alloca[] : memref> +// CHECK-NEXT: %0 = memref.load %alloca[] : memref> +// CHECK-NEXT: memref.store %0, %alloca_1[] : memref> // CHECK-NEXT: %c0_i32 = arith.constant 0 : i32 // CHECK-NEXT: %1 = memref.load %alloca_0[] : memref // CHECK-NEXT: return %1 : i32 From 674a7133df174d07c464f59a6f3bde58a8227c60 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 30 Nov 2023 21:26:24 +0300 Subject: [PATCH 1260/2301] [CIR][CogeGen] Support aggregate copy via assignment (#325) This PR adds a support of copies of aggregated data types via assignment. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 11 +++ clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 118 +++++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 7 ++ clang/test/CIR/CodeGen/agg-copy.c | 62 +++++++++++++ 4 files changed, 195 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/agg-copy.c diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index b37329be00c2..739790b3d150 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -890,6 +890,17 @@ struct CallCleanupFunction final : EHScopeStack::Cleanup { }; } // end anonymous namespace +/// Push the standard destructor for the given type as +/// at least a normal cleanup. +void CIRGenFunction::pushDestroy(QualType::DestructionKind dtorKind, + Address addr, QualType type) { + assert(dtorKind && "cannot push destructor for trivial type"); + + CleanupKind cleanupKind = getCleanupKind(dtorKind); + pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind), + cleanupKind & EHCleanup); +} + void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 7b679eea1357..8abb7f045ccd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -29,6 +29,75 @@ using namespace cir; using namespace clang; namespace { + +// FIXME(cir): This should be a common helper between CIRGen +// and traditional CodeGen +/// Is the value of the given expression possibly a reference to or +/// into a __block variable? +static bool isBlockVarRef(const Expr *E) { + // Make sure we look through parens. + E = E->IgnoreParens(); + + // Check for a direct reference to a __block variable. + if (const DeclRefExpr *DRE = dyn_cast(E)) { + const VarDecl *var = dyn_cast(DRE->getDecl()); + return (var && var->hasAttr()); + } + + // More complicated stuff. + + // Binary operators. + if (const BinaryOperator *op = dyn_cast(E)) { + // For an assignment or pointer-to-member operation, just care + // about the LHS. + if (op->isAssignmentOp() || op->isPtrMemOp()) + return isBlockVarRef(op->getLHS()); + + // For a comma, just care about the RHS. + if (op->getOpcode() == BO_Comma) + return isBlockVarRef(op->getRHS()); + + // FIXME: pointer arithmetic? + return false; + + // Check both sides of a conditional operator. + } else if (const AbstractConditionalOperator *op + = dyn_cast(E)) { + return isBlockVarRef(op->getTrueExpr()) + || isBlockVarRef(op->getFalseExpr()); + + // OVEs are required to support BinaryConditionalOperators. + } else if (const OpaqueValueExpr *op + = dyn_cast(E)) { + if (const Expr *src = op->getSourceExpr()) + return isBlockVarRef(src); + + // Casts are necessary to get things like (*(int*)&var) = foo(). + // We don't really care about the kind of cast here, except + // we don't want to look through l2r casts, because it's okay + // to get the *value* in a __block variable. + } else if (const CastExpr *cast = dyn_cast(E)) { + if (cast->getCastKind() == CK_LValueToRValue) + return false; + return isBlockVarRef(cast->getSubExpr()); + + // Handle unary operators. Again, just aggressively look through + // it, ignoring the operation. + } else if (const UnaryOperator *uop = dyn_cast(E)) { + return isBlockVarRef(uop->getSubExpr()); + + // Look into the base of a field access. + } else if (const MemberExpr *mem = dyn_cast(E)) { + return isBlockVarRef(mem->getBase()); + + // Look into the base of a subscript. + } else if (const ArraySubscriptExpr *sub = dyn_cast(E)) { + return isBlockVarRef(sub->getBase()); + } + + return false; +} + class AggExprEmitter : public StmtVisitor { CIRGenFunction &CGF; AggValueSlot Dest; @@ -117,8 +186,8 @@ class AggExprEmitter : public StmtVisitor { // l-values void VisitDeclRefExpr(DeclRefExpr *E) { buildAggLoadOfLValue(E); } - void VisitMemberExpr(MemberExpr *E) { llvm_unreachable("NYI"); } - void VisitUnaryDeref(UnaryOperator *E) { llvm_unreachable("NYI"); } + void VisitMemberExpr(MemberExpr *E) { buildAggLoadOfLValue(E); } + void VisitUnaryDeref(UnaryOperator *E) { buildAggLoadOfLValue(E); } void VisitStringLiteral(StringLiteral *E) { llvm_unreachable("NYI"); } void VisitCompoundLIteralExpr(CompoundLiteralExpr *E) { llvm_unreachable("NYI"); @@ -136,7 +205,50 @@ class AggExprEmitter : public StmtVisitor { void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *E) { llvm_unreachable("NYI"); } - void VisitBinAssign(const BinaryOperator *E) { llvm_unreachable("NYI"); } + void VisitBinAssign(const BinaryOperator *E) { + + // For an assignment to work, the value on the right has + // to be compatible with the value on the left. + assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), + E->getRHS()->getType()) + && "Invalid assignment"); + + if (isBlockVarRef(E->getLHS()) && + E->getRHS()->HasSideEffects(CGF.getContext())) { + llvm_unreachable("NYI"); + } + + LValue lhs = CGF.buildLValue(E->getLHS()); + + // If we have an atomic type, evaluate into the destination and then + // do an atomic copy. + if (lhs.getType()->isAtomicType() || + CGF.LValueIsSuitableForInlineAtomic(lhs)) { + assert(!UnimplementedFeature::atomicTypes()); + return; + } + + // Codegen the RHS so that it stores directly into the LHS. + AggValueSlot lhsSlot = AggValueSlot::forLValue( + lhs, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsAliased, AggValueSlot::MayOverlap); + + // A non-volatile aggregate destination might have volatile member. + if (!lhsSlot.isVolatile() && + CGF.hasVolatileMember(E->getLHS()->getType())) + assert(!UnimplementedFeature::atomicTypes()); + + CGF.buildAggExpr(E->getRHS(), lhsSlot); + + // Copy into the destination if the assignment isn't ignored. + buildFinalDestCopy(E->getType(), lhs); + + if (!Dest.isIgnored() && !Dest.isExternallyDestructed() && + E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) + CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), + E->getType()); + } + void VisitBinComma(const BinaryOperator *E) { llvm_unreachable("NYI"); } void VisitBinCmp(const BinaryOperator *E) { llvm_unreachable("NYI"); } void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index d265e94c77fa..a4707194a04c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1197,6 +1197,10 @@ class CIRGenFunction : public CIRGenTypeCache { llvm_unreachable("bad destruction kind"); } + CleanupKind getCleanupKind(QualType::DestructionKind kind) { + return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup); + } + void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type); @@ -1510,6 +1514,9 @@ class CIRGenFunction : public CIRGenTypeCache { static Destroyer destroyCXXObject; + void pushDestroy(QualType::DestructionKind dtorKind, + Address addr, QualType type); + void pushDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); diff --git a/clang/test/CIR/CodeGen/agg-copy.c b/clang/test/CIR/CodeGen/agg-copy.c new file mode 100644 index 000000000000..9f259583e1c2 --- /dev/null +++ b/clang/test/CIR/CodeGen/agg-copy.c @@ -0,0 +1,62 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef struct {} S; + +typedef struct { + int a; + int b; + S s; +} A; + +// CHECK: cir.func @foo1 +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP4:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[TMP3]] : !s32i), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP1]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP6:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP7:%.*]] = cir.ptr_stride([[TMP5]] : !cir.ptr, [[TMP6]] : !s32i), !cir.ptr +// CHECK: cir.copy [[TMP7]] to [[TMP4]] : !cir.ptr +void foo1(A* a1, A* a2) { + a1[1] = a2[1]; +} + +// CHECK: cir.func @foo2 +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][2] {name = "s"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.load [[TMP1]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.get_member [[TMP4]][2] {name = "s"} : !cir.ptr -> !cir.ptr +// CHECK: cir.copy [[TMP5]] to [[TMP3]] : !cir.ptr +void foo2(A* a1, A* a2) { + a1->s = a2->s; +} + +// CHECK: cir.global external @a = #cir.zero : !ty_22A22 +// CHECK: cir.func @foo3 +// CHECK: [[TMP0]] = cir.alloca !ty_22A22, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: [[TMP1]] = cir.get_global @a : cir.ptr +// CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr +// CHECK: [[TMP2]] = cir.load [[TMP0]] : cir.ptr , !ty_22A22 +// CHECK: cir.return [[TMP2]] : !ty_22A22 +A a; +A foo3(void) { + return a; +} + +// CHECK: cir.func @foo4 +// CHECK: [[TMP0]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] +// CHECK: [[TMP1]] = cir.alloca !ty_22A22, cir.ptr , ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP2]] = cir.load deref [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr +void foo4(A* a1) { + A a2 = *a1; +} \ No newline at end of file From ae52f9e0fb9ac22f79fc51802aef566aa4b68435 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 30 Nov 2023 21:30:37 +0300 Subject: [PATCH 1261/2301] [CIR][CodeGen] Bitfield operations (#279) As we discussed in #233, there is a desire to have CIR operations for bit fields set/get access and do all the stuff in the `LoweringPrepare`. There is one thing I want to discuss, that's why the PR is marked as a draft now. Looks like I have to introduce some redundant helpers for all these `or` and `shift` operations: while we were in the `CodeGen` area, we used `CIRGenBuilder` and we could easily extend it. I bet we don't want to depend from `CodeGen` in the `LoweringPrepare`. Once it's true. what is a good place for all this common things? As an idea, we could introduce one more layer for builder, with no state involved - just helpers and nothing else. But again, what is a good place for it from your point of view? --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 27 ++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 148 +++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 21 +++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 147 +++++------------ clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 3 + .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 3 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 + .../Dialect/Transforms/LoweringPrepare.cpp | 150 ++++++++++++++++-- clang/test/CIR/CodeGen/bitfield-ops.c | 33 ++++ 9 files changed, 407 insertions(+), 129 deletions(-) create mode 100644 clang/test/CIR/CodeGen/bitfield-ops.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index f01ee98c4c55..8706a9019c66 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -521,4 +521,31 @@ def GlobalCtorAttr : CIR_Attr<"GlobalCtor", "globalCtor"> { ]; let skipDefaultBuilders = 1; } + +def BitfieldInfoAttr : CIR_Attr<"BitfieldInfo", "bitfield_info"> { + let summary = "Represents a bit field info"; + let description = [{ + Holds the next information about bitfields: name, storage type, a bitfield size + and position in the storage, if the bitfield is signed or not. + }]; + let parameters = (ins "StringAttr":$name, + "Type":$storage_type, + "uint64_t":$size, + "uint64_t":$offset, + "bool":$is_signed); + + let assemblyFormat = "`<` struct($name, $storage_type, $size, $offset, $is_signed) `>`"; + + let builders = [ + AttrBuilder<(ins "StringRef":$name, + "Type":$storage_type, + "uint64_t":$size, + "uint64_t":$offset, + "bool":$is_signed + ), [{ + return $_get($_ctxt, StringAttr::get($_ctxt, name), storage_type, size, offset, is_signed); + }]> + ]; +} + #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 71f00d4e299f..57b173c10660 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1415,6 +1415,154 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// SetBitfieldOp +//===----------------------------------------------------------------------===// + +def SetBitfieldOp : CIR_Op<"set_bitfield"> { + let summary = "Set a bitfield"; + let description = [{ + The `cir.set_bitfield` operation provides a store-like access to + a bit field of a record. + + It expects an address of a storage where to store, a type of the storage, + a value being stored, a name of a bit field, a pointer to the storage in the + base record, a size of the storage, a size the bit field, an offset + of the bit field and a sign. Returns a value being stored. + + Example. + Suppose we have a struct with multiple bitfields stored in + different storages. The `cir.set_bitfield` operation sets the value + of the bitfield. + ```C++ + typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; + } S; + + void store_bitfield(S& s) { + s.d = 3; + } + ``` + + ```mlir + // 'd' is in the storage with the index 1 + !struct_type = !cir.struct, !cir.int, !cir.int} #cir.record.decl.ast> + #bfi_d = #cir.bitfield_info + + %1 = cir.const(#cir.int<3> : !s32i) : !s32i + %2 = cir.load %0 : cir.ptr >, !cir.ptr + %3 = cir.get_member %2[1] {name = "d"} : !cir.ptr -> !cir.ptr + %4 = cir.set_bitfield(#bfi_d, %3 : !cir.ptr, %1 : !s32i) -> !s32i + ``` + }]; + + let arguments = (ins + AnyType:$dst, + AnyType:$src, + BitfieldInfoAttr:$bitfield_info + ); + + let results = (outs CIR_IntType:$result); + + let assemblyFormat = [{ `(`$bitfield_info`,` $dst`:`type($dst)`,` + $src`:`type($src) `)` attr-dict `->` type($result) }]; + + let builders = [ + OpBuilder<(ins "Type":$type, + "Value":$dst, + "Type":$storage_type, + "Value":$src, + "StringRef":$name, + "unsigned":$size, + "unsigned":$offset, + "bool":$is_signed + ), + [{ + BitfieldInfoAttr info = + BitfieldInfoAttr::get($_builder.getContext(), + name, storage_type, + size, offset, is_signed); + build($_builder, $_state, type, dst, src, info); + }]> + ]; +} + +//===----------------------------------------------------------------------===// +// GetBitfieldOp +//===----------------------------------------------------------------------===// + +def GetBitfieldOp : CIR_Op<"get_bitfield"> { + let summary = "Get a bitfield"; + let description = [{ + The `cir.get_bitfield` operation provides a load-like access to + a bit field of a record. + + It expects a name if a bit field, a pointer to a storage in the + base record, a type of the storage, a name of the bitfield, + a size the bit field, an offset of the bit field and a sign. + + Example: + Suppose we have a struct with multiple bitfields stored in + different storages. The `cir.get_bitfield` operation gets the value + of the bitfield + ```C++ + typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; + } S; + + int load_bitfield(S& s) { + return s.d; + } + ``` + + ```mlir + // 'd' is in the storage with the index 1 + !struct_type = !cir.struct, !cir.int, !cir.int} #cir.record.decl.ast> + #bfi_d = #cir.bitfield_info + + %2 = cir.load %0 : cir.ptr >, !cir.ptr + %3 = cir.get_member %2[1] {name = "d"} : !cir.ptr -> !cir.ptr + %4 = cir.get_bitfield(#bfi_d, %3 : !cir.ptr) -> !s32i + ``` + }]; + + let arguments = (ins + AnyType:$addr, + BitfieldInfoAttr:$bitfield_info + ); + + let results = (outs CIR_IntType:$result); + + let assemblyFormat = [{ `(`$bitfield_info `,` $addr attr-dict `:` + type($addr) `)` `->` type($result) }]; + + let builders = [ + OpBuilder<(ins "Type":$type, + "Value":$addr, + "Type":$storage_type, + "StringRef":$name, + "unsigned":$size, + "unsigned":$offset, + "bool":$is_signed + ), + [{ + BitfieldInfoAttr info = + BitfieldInfoAttr::get($_builder.getContext(), + name, storage_type, + size, offset, is_signed); + build($_builder, $_state, type, addr, info); + }]> + ]; +} + //===----------------------------------------------------------------------===// // GetMemberOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 503f0797ae93..66bf63d8c73c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -10,6 +10,7 @@ #define LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H #include "Address.h" +#include "CIRGenRecordLayout.h" #include "CIRDataLayout.h" #include "CIRGenTypeCache.h" #include "UnimplementedFeatureGuarding.h" @@ -661,6 +662,26 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { global.getLoc(), getPointerTo(global.getSymType()), global.getName()); } + mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType, + mlir::Value addr, mlir::Type storageType, + const CIRGenBitFieldInfo &info, + bool useVolatile) { + auto offset = useVolatile ? info.VolatileOffset : info.Offset; + return create(loc, resultType, addr, storageType, + info.Name, info.Size, + offset, info.IsSigned); + } + + mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType, + mlir::Value dstAddr, mlir::Type storageType, + mlir::Value src, const CIRGenBitFieldInfo &info, + bool useVolatile) { + auto offset = useVolatile ? info.VolatileOffset : info.Offset; + return create( + loc, resultType, dstAddr, storageType, src, info.Name, + info.Size, offset, info.IsSigned); + } + /// Create a pointer to a record member. mlir::Value createGetMember(mlir::Location loc, mlir::Type result, mlir::Value base, llvm::StringRef name, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index f78f163d4fdd..7af792926def 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -222,14 +222,14 @@ static bool isAAPCS(const TargetInfo &TargetInfo) { return TargetInfo.getABI().starts_with("aapcs"); } -Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base, +Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base, const FieldDecl *field, unsigned index, unsigned size) { if (index == 0) return base.getAddress(); - auto loc = getLoc(field->getLocation()); + auto loc = getLoc(field->getLocation()); auto fieldType = builder.getUIntNTy(size); auto fieldPtr = @@ -268,7 +268,6 @@ LValue CIRGenFunction::buildLValueForBitField(LValue base, const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize; Address Addr = getAddrOfBitFieldStorage(base, field, Idx, SS); - // Get the access type. mlir::Type FieldIntTy = builder.getUIntNTy(SS); @@ -278,7 +277,6 @@ LValue CIRGenFunction::buildLValueForBitField(LValue base, QualType fieldType = field->getType().withCVRQualifiers(base.getVRQualifiers()); - assert(!UnimplementedFeature::tbaa() && "NYI TBAA for bit fields"); LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); return LValue::MakeBitfield(Addr, info, fieldType, FieldBaseInfo); @@ -400,7 +398,7 @@ LValue CIRGenFunction::buildLValueForFieldInitialization( auto& layout = CGM.getTypes().getCIRGenRecordLayout(Field->getParent()); unsigned FieldIndex = layout.getCIRFieldNo(Field); - + Address V = buildAddrOfFieldStorage(*this, Base.getAddress(), Field, FieldName, FieldIndex); @@ -605,42 +603,20 @@ RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, SourceLocation Loc) { - const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo(); + const CIRGenBitFieldInfo &info = LV.getBitFieldInfo(); // Get the output type. - mlir::Type ResLTy = convertType(LV.getType()); - Address Ptr = LV.getBitFieldAddress(); - mlir::Value Val = builder.createLoad(getLoc(Loc), Ptr); - auto ValWidth = Val.getType().cast().getWidth(); - - bool UseVolatile = LV.isVolatileQualified() && - Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); - const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; - const unsigned StorageSize = - UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; - - if (Info.IsSigned) { - assert(static_cast(Offset + Info.Size) <= StorageSize); - - mlir::Type typ = builder.getSIntNTy(ValWidth); - Val = builder.createIntCast(Val, typ); - - unsigned HighBits = StorageSize - Offset - Info.Size; - if (HighBits) - Val = builder.createShiftLeft(Val, HighBits); - if (Offset + HighBits) - Val = builder.createShiftRight(Val, Offset + HighBits); - } else { - if (Offset) - Val = builder.createShiftRight(Val, Offset); + mlir::Type resLTy = convertType(LV.getType()); + Address ptr = LV.getBitFieldAddress(); - if (static_cast(Offset) + Info.Size < StorageSize) - Val = builder.createAnd(Val, - llvm::APInt::getLowBitsSet(ValWidth, Info.Size)); - } - Val = builder.createIntCast(Val, ResLTy); - assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); - return RValue::get(Val); + bool useVolatile = LV.isVolatileQualified() && + info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); + + auto field = + builder.createGetBitfield(getLoc(Loc), resLTy, ptr.getPointer(), + ptr.getElementType(), info, useVolatile); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); + return RValue::get(field); } void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { @@ -665,79 +641,28 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, mlir::Value &Result) { - const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo(); - mlir::Type ResLTy = getTypes().convertTypeForMem(Dst.getType()); - Address Ptr = Dst.getBitFieldAddress(); - - // Get the source value, truncated to the width of the bit-field. - mlir::Value SrcVal = Src.getScalarVal(); - - // Cast the source to the storage type and shift it into place. - SrcVal = builder.createIntCast(SrcVal, Ptr.getElementType()); - auto SrcWidth = SrcVal.getType().cast().getWidth(); - mlir::Value MaskedVal = SrcVal; - - const bool UseVolatile = + // According to the AACPS: + // When a volatile bit-field is written, and its container does not overlap + // with any non-bit-field member, its container must be read exactly once + // and written exactly once using the access width appropriate to the type + // of the container. The two accesses are not atomic. + if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && + CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) + llvm_unreachable("volatile bit-field is not implemented for the AACPS"); + + const CIRGenBitFieldInfo &info = Dst.getBitFieldInfo(); + mlir::Type resLTy = getTypes().convertTypeForMem(Dst.getType()); + Address ptr = Dst.getBitFieldAddress(); + + const bool useVolatile = CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && - Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); - const unsigned StorageSize = - UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; - const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; - // See if there are other bits in the bitfield's storage we'll need to load - // and mask together with source before storing. - if (StorageSize != Info.Size) { - assert(StorageSize > Info.Size && "Invalid bitfield size."); - - mlir::Value Val = buildLoadOfScalar(Dst, Dst.getPointer().getLoc()); - - // Mask the source value as needed. - if (!hasBooleanRepresentation(Dst.getType())) - SrcVal = builder.createAnd( - SrcVal, llvm::APInt::getLowBitsSet(SrcWidth, Info.Size)); - - MaskedVal = SrcVal; - if (Offset) - SrcVal = builder.createShiftLeft(SrcVal, Offset); - - // Mask out the original value. - Val = builder.createAnd( - Val, ~llvm::APInt::getBitsSet(SrcWidth, Offset, Offset + Info.Size)); + info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); - // Or together the unchanged values and the source value. - SrcVal = builder.createOr(Val, SrcVal); + mlir::Value dstAddr = Dst.getAddress().getPointer(); - } else { - // According to the AACPS: - // When a volatile bit-field is written, and its container does not overlap - // with any non-bit-field member, its container must be read exactly once - // and written exactly once using the access width appropriate to the type - // of the container. The two accesses are not atomic. - if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && - CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) - llvm_unreachable("volatile bit-field is not implemented for the AACPS"); - } - - // Write the new value back out. - // TODO: constant matrix type, volatile, no init, non temporal, TBAA - buildStoreOfScalar(SrcVal, Ptr, Dst.isVolatileQualified(), Dst.getType(), - Dst.getBaseInfo(), false, false); - - // Return the new value of the bit-field. - mlir::Value ResultVal = MaskedVal; - ResultVal = builder.createIntCast(ResultVal, ResLTy); - - // Sign extend the value if needed. - if (Info.IsSigned) { - assert(Info.Size <= StorageSize); - unsigned HighBits = StorageSize - Info.Size; - - if (HighBits) { - ResultVal = builder.createShiftLeft(ResultVal, HighBits); - ResultVal = builder.createShiftRight(ResultVal, HighBits); - } - } - - Result = buildFromMemory(ResultVal, Dst.getType()); + Result = builder.createSetBitfield(dstAddr.getLoc(), resLTy, dstAddr, + ptr.getElementType(), Src.getScalarVal(), + info, useVolatile); } static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, @@ -2445,9 +2370,9 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, if (isNontemporal) { llvm_unreachable("NYI"); } - - assert(!UnimplementedFeature::tbaa() && "NYI"); - assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); + + assert(!UnimplementedFeature::tbaa() && "NYI"); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); return buildFromMemory(Load, Ty); } diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index 0a686181db61..fc198776511e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -91,6 +91,9 @@ struct CIRGenBitFieldInfo { /// The offset of the bitfield storage from the start of the struct. clang::CharUnits VolatileStorageOffset; + /// The name of a bitfield + llvm::StringRef Name; + CIRGenBitFieldInfo() : Offset(), Size(), IsSigned(), StorageSize(), VolatileOffset(), VolatileStorageSize() {} diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 9b6fdf484438..a153473b9e64 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -227,7 +227,8 @@ void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, (unsigned)(getFieldBitOffset(FD) - astContext.toBits(StartOffset)); Info.Size = FD->getBitWidthValue(); Info.StorageSize = getSizeInBits(StorageType).getQuantity(); - Info.StorageOffset = StartOffset; + Info.StorageOffset = StartOffset; + Info.Name = FD->getName(); if (Info.Size > Info.StorageSize) Info.Size = Info.StorageSize; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index fa91f7c46978..8f031d611712 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -77,6 +77,10 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << (boolAttr.getValue() ? "true" : "false"); return AliasResult::FinalAlias; } + if (auto bitfield = attr.dyn_cast()) { + os << "bfi_" << bitfield.getName().str(); + return AliasResult::FinalAlias; + } return AliasResult::NoAlias; } diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 8ec0d76226fb..92a7137e8e40 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -13,6 +13,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/Mangle.h" #include "clang/Basic/Module.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" @@ -23,8 +24,9 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Path.h" +using cir::CIRBaseBuilderTy; using namespace mlir; -using namespace cir; +using namespace mlir::cir; static SmallString<128> getTransformedFileName(ModuleOp theModule) { SmallString<128> FileName; @@ -47,36 +49,39 @@ static SmallString<128> getTransformedFileName(ModuleOp theModule) { } /// Return the FuncOp called by `callOp`. -static cir::FuncOp getCalledFunction(cir::CallOp callOp) { +static FuncOp getCalledFunction(CallOp callOp) { SymbolRefAttr sym = llvm::dyn_cast_if_present(callOp.getCallableForCallee()); if (!sym) return nullptr; - return dyn_cast_or_null( + return dyn_cast_or_null( SymbolTable::lookupNearestSymbolFrom(callOp, sym)); } namespace { + struct LoweringPreparePass : public LoweringPrepareBase { LoweringPreparePass() = default; void runOnOperation() override; void runOnOp(Operation *op); void lowerGlobalOp(GlobalOp op); + void lowerGetBitfieldOp(GetBitfieldOp op); + void lowerSetBitfieldOp(SetBitfieldOp op); /// Build the function that initializes the specified global - cir::FuncOp buildCXXGlobalVarDeclInitFunc(GlobalOp op); + FuncOp buildCXXGlobalVarDeclInitFunc(GlobalOp op); /// Build a module init function that calls all the dynamic initializers. void buildCXXGlobalInitFunc(); - cir::FuncOp + FuncOp buildRuntimeFunction(mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, mlir::cir::FuncType type, mlir::cir::GlobalLinkageKind linkage = mlir::cir::GlobalLinkageKind::ExternalLinkage); - cir::GlobalOp + GlobalOp buildRuntimeVariable(mlir::OpBuilder &Builder, llvm::StringRef Name, mlir::Location Loc, mlir::Type type, mlir::cir::GlobalLinkageKind Linkage = @@ -98,11 +103,11 @@ struct LoweringPreparePass : public LoweringPrepareBase { }; } // namespace -cir::GlobalOp LoweringPreparePass::buildRuntimeVariable( +GlobalOp LoweringPreparePass::buildRuntimeVariable( mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, mlir::Type type, mlir::cir::GlobalLinkageKind linkage) { - cir::GlobalOp g = - dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( + GlobalOp g = + dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( theModule, StringAttr::get(theModule->getContext(), name))); if (!g) { g = builder.create(loc, name, type); @@ -114,11 +119,11 @@ cir::GlobalOp LoweringPreparePass::buildRuntimeVariable( return g; } -cir::FuncOp LoweringPreparePass::buildRuntimeFunction( +FuncOp LoweringPreparePass::buildRuntimeFunction( mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, mlir::cir::FuncType type, mlir::cir::GlobalLinkageKind linkage) { - cir::FuncOp f = - dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( + FuncOp f = + dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( theModule, StringAttr::get(theModule->getContext(), name))); if (!f) { f = builder.create(loc, name, type); @@ -133,7 +138,7 @@ cir::FuncOp LoweringPreparePass::buildRuntimeFunction( return f; } -cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { +FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { SmallString<256> fnName; { llvm::raw_svector_ostream Out(fnName); @@ -177,7 +182,7 @@ cir::FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { break; } assert(dtorCall && "Expected a dtor call"); - cir::FuncOp dtorFunc = getCalledFunction(dtorCall); + FuncOp dtorFunc = getCalledFunction(dtorCall); assert(dtorFunc && mlir::isa(*dtorFunc.getAst()) && "Expected a dtor call"); @@ -297,10 +302,117 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { builder.create(f.getLoc()); } +void LoweringPreparePass::lowerGetBitfieldOp(GetBitfieldOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + + auto info = op.getBitfieldInfo(); + auto size = info.getSize(); + auto storageType = info.getStorageType(); + auto storageSize = storageType.cast().getWidth(); + auto offset = info.getOffset(); + auto resultTy = op.getType(); + auto addr = op.getAddr(); + auto loc = addr.getLoc(); + mlir::Value val = + builder.create(loc, storageType, op.getAddr()); + auto valWidth = val.getType().cast().getWidth(); + + if (info.getIsSigned()) { + assert(static_cast(offset + size) <= storageSize); + mlir::Type typ = + mlir::cir::IntType::get(builder.getContext(), valWidth, true); + + val = builder.createIntCast(val, typ); + + unsigned highBits = storageSize - offset - size; + if (highBits) + val = builder.createShiftLeft(val, highBits); + if (offset + highBits) + val = builder.createShiftRight(val, offset + highBits); + } else { + if (offset) + val = builder.createShiftRight(val, offset); + + if (static_cast(offset) + size < storageSize) + val = builder.createAnd(val, llvm::APInt::getLowBitsSet(valWidth, size)); + } + val = builder.createIntCast(val, resultTy); + + op.replaceAllUsesWith(val); + op.erase(); +} + +void LoweringPreparePass::lowerSetBitfieldOp(SetBitfieldOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + + auto srcVal = op.getSrc(); + auto addr = op.getDst(); + auto info = op.getBitfieldInfo(); + auto size = info.getSize(); + auto storageType = info.getStorageType(); + auto storageSize = storageType.cast().getWidth(); + auto offset = info.getOffset(); + auto resultTy = op.getType(); + auto loc = addr.getLoc(); + + // Get the source value, truncated to the width of the bit-field. + srcVal = builder.createIntCast(op.getSrc(), storageType); + auto srcWidth = srcVal.getType().cast().getWidth(); + + mlir::Value maskedVal = srcVal; + + if (storageSize != size) { + assert(storageSize > size && "Invalid bitfield size."); + + mlir::Value val = + builder.create(loc, storageType, addr); + + srcVal = + builder.createAnd(srcVal, llvm::APInt::getLowBitsSet(srcWidth, size)); + + maskedVal = srcVal; + if (offset) + srcVal = builder.createShiftLeft(srcVal, offset); + + // Mask out the original value. + val = builder.createAnd(val, + ~llvm::APInt::getBitsSet(srcWidth, offset, offset + size)); + + // Or together the unchanged values and the source value. + srcVal = builder.createOr(val, srcVal); + } + + builder.create(loc, srcVal, addr); + + if (!op->getUses().empty()) { + mlir::Value resultVal = maskedVal; + resultVal = builder.createIntCast(resultVal, resultTy); + + if (info.getIsSigned()) { + assert(size <= storageSize); + unsigned highBits = storageSize - size; + + if (highBits) { + resultVal = builder.createShiftLeft(resultVal, highBits); + resultVal = builder.createShiftRight(resultVal, highBits); + } + } + + op.replaceAllUsesWith(resultVal); + } + + op.erase(); +} + void LoweringPreparePass::runOnOp(Operation *op) { - if (GlobalOp globalOp = cast(op)) { - lowerGlobalOp(globalOp); - return; + if (auto getGlobal = dyn_cast(op)) { + lowerGlobalOp(getGlobal); + } else if (auto getBitfield = dyn_cast(op)) { + lowerGetBitfieldOp(getBitfield); + } else if (auto setBitfield = dyn_cast(op)) { + lowerSetBitfieldOp(setBitfield); } } @@ -315,6 +427,10 @@ void LoweringPreparePass::runOnOperation() { op->walk([&](Operation *op) { if (isa(op)) opsToTransform.push_back(op); + if (isa(op)) + opsToTransform.push_back(op); + if (isa(op)) + opsToTransform.push_back(op); }); for (auto *o : opsToTransform) { diff --git a/clang/test/CIR/CodeGen/bitfield-ops.c b/clang/test/CIR/CodeGen/bitfield-ops.c new file mode 100644 index 000000000000..5957054f6c6d --- /dev/null +++ b/clang/test/CIR/CodeGen/bitfield-ops.c @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o - 2>&1 | FileCheck %s + +// CHECK: !ty_22S22 = !cir.struct +typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; + unsigned f; +} S; + +// CHECK: #bfi_d = #cir.bitfield_info +// CHECK: #bfi_e = #cir.bitfield_info + +// CHECK: cir.func {{.*@store_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , ["s"] +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP0]][2] {name = "e"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.set_bitfield(#bfi_e, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) -> !s32i +void store_field() { + S s; + s.e = 3; +} + +// CHECK: cir.func {{.*@load_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["s", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr) -> !s32i +int load_field(S* s) { + return s->d; +} \ No newline at end of file From 4656eee62c68239b16b3f66d15eaea607a0f7cc1 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Thu, 23 Nov 2023 19:54:55 -0300 Subject: [PATCH 1262/2301] [CIR][CIRGen] Fix zero-offset global view access Instead of ignoring global view access with zero offset, we should properly dereference the global type to prevent type-matching errors. This patch also fixes other two issues: - An extra index was wrongly added to the global view access. E.g. for an access like `a[0]` would generate a GV with 2 zero indexes. The indexes, however, do not take the pointer wrapping the global type into account. - When assigning the address of a complete type to an incomplete one the complete type would override the incomplete destination type, causing inconsistencies during CodeGen. For example, given `int a[3];` and `int (*ptr_a)[] = &a;`, despite `ptr_a`, in CIR it would be considered complete. Fixes #329 --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 21 +++++++++------- clang/test/CIR/CodeGen/array.c | 20 ++++++++++++---- clang/test/CIR/CodeGen/globals.c | 29 +++++++++++++++++++++-- 3 files changed, 56 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 84ebfa669211..c00ebe9c1b27 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1066,6 +1066,8 @@ class ConstantLValueEmitter Indices.push_back(Attr); } + if (Indices.empty()) + return {}; return CGM.getBuilder().getArrayAttr(Indices); } @@ -1073,15 +1075,18 @@ class ConstantLValueEmitter /// Apply the value offset to the given constant. ConstantLValue applyOffset(ConstantLValue &C) { - if (!hasNonZeroOffset()) - return C; - if (auto Attr = C.Value.dyn_cast()) { - auto GV = cast(Attr); - assert(!GV.getIndices()); - - return mlir::cir::GlobalViewAttr::get( - GV.getType(), GV.getSymbol(), getOffset(GV.getType())); + // Handle attribute constant LValues. + if (auto Attr = + C.Value.dyn_cast()) { + if (auto GV = Attr.dyn_cast()) { + auto baseTy = GV.getType().cast().getPointee(); + auto destTy = CGM.getTypes().convertTypeForMem(DestType); + assert(!GV.getIndices() && "Global view is already indexed"); + return mlir::cir::GlobalViewAttr::get(destTy, GV.getSymbol(), + getOffset(baseTy)); + } + llvm_unreachable("Unsupported attribute type to offset"); } // TODO(cir): use ptr_stride, or something... diff --git a/clang/test/CIR/CodeGen/array.c b/clang/test/CIR/CodeGen/array.c index 22c9b906fde8..c98e97961602 100644 --- a/clang/test/CIR/CodeGen/array.c +++ b/clang/test/CIR/CodeGen/array.c @@ -8,13 +8,25 @@ struct S { // CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S22, #cir.zero : !ty_22S22, #cir.zero : !ty_22S22]> : !cir.array int a[4]; -int (*ptr_a)[] = &a; // CHECK: cir.global external @a = #cir.zero : !cir.array -// CHECK: cir.global external @ptr_a = #cir.global_view<@a> : !cir.ptr> +// Should create a pointer to a complete array. +int (*complete_ptr_a)[4] = &a; +// CHECK: cir.global external @complete_ptr_a = #cir.global_view<@a> : !cir.ptr> + +// Should create a pointer to an incomplete array. +int (*incomplete_ptr_a)[] = &a; +// CHECK: cir.global external @incomplete_ptr_a = #cir.global_view<@a> : !cir.ptr> + +// Should access incomplete array if external. extern int foo[]; // CHECK: cir.global "private" external @foo : !cir.array - void useFoo(int i) { foo[i] = 42; -} \ No newline at end of file +} +// CHECK: @useFoo +// CHECK: %[[#V2:]] = cir.get_global @foo : cir.ptr > +// CHECK: %[[#V3:]] = cir.load %{{.+}} : cir.ptr , !s32i +// CHECK: %[[#V4:]] = cir.cast(array_to_ptrdecay, %[[#V2]] : !cir.ptr>), !cir.ptr +// CHECK: %[[#V5:]] = cir.ptr_stride(%[[#V4]] : !cir.ptr, %[[#V3]] : !s32i), !cir.ptr +// CHECK: cir.store %{{.+}}, %[[#V5]] : !s32i, cir.ptr diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index cbeee30eeae6..3faf4e9f2548 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -3,7 +3,8 @@ // bit different from the C++ version. This test ensures that these differences // are accounted for. -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s char string[] = "whatnow"; // CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array @@ -48,7 +49,31 @@ struct { // CHECK: cir.global external @nestedStringPtr = #cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr}> int *globalPtr = &nestedString.y[1]; -// CHECK: cir.global external @globalPtr = #cir.global_view<@nestedString, [0 : i32, 1 : i32, 1 : i32]> +// CHECK: cir.global external @globalPtr = #cir.global_view<@nestedString, [1 : i32, 1 : i32]> : !cir.ptr + +const int i = 12; +int i2 = i; +struct { int i; } i3 = {i}; +// CHECK: cir.global external @i2 = #cir.int<12> : !s32i +// CHECK: cir.global external @i3 = #cir.const_struct<{#cir.int<12> : !s32i}> : !ty_22anon2E722 + +int a[10][10][10]; +int *a2 = &a[3][0][8]; +struct { int *p; } a3 = {&a[3][0][8]}; +// CHECK: cir.global external @a2 = #cir.global_view<@a, [3 : i32, 0 : i32, 8 : i32]> : !cir.ptr +// CHECK: cir.global external @a3 = #cir.const_struct<{#cir.global_view<@a, [3 : i32, 0 : i32, 8 : i32]> : !cir.ptr}> : !ty_22anon2E922 + +int p[10]; +int *p1 = &p[0]; +struct { int *x; } p2 = {&p[0]}; +// CHECK: cir.global external @p1 = #cir.global_view<@p> : !cir.ptr +// CHECK: cir.global external @p2 = #cir.const_struct<{#cir.global_view<@p> : !cir.ptr}> : !ty_22anon2E1122 + +int q[10]; +int *q1 = q; +struct { int *x; } q2 = {q}; +// CHECK: cir.global external @q1 = #cir.global_view<@q> : !cir.ptr +// CHECK: cir.global external @q2 = #cir.const_struct<{#cir.global_view<@q> : !cir.ptr}> : !ty_22anon2E1322 // TODO: test tentatives with internal linkage. From cbb32724789a50dff8a0dcd3b1829016deeb5dd3 Mon Sep 17 00:00:00 2001 From: Henrich Lauko Date: Thu, 30 Nov 2023 23:01:24 +0100 Subject: [PATCH 1263/2301] [CIR][github] Setup github test CIR workflow. (#332) Closes #288 The action mirrors https://github.com/llvm/llvm-project/blob/main/.github/workflows/clang-tests.yml with a few minor differences: - it is not restricted to: `github.repository_owner == 'llvm'` - it triggers on pull requests and pushes to `main` instead of `release` branches I suggest adding branch protection rule to require tests to pass for each pull request: `Settings -> Branches -> Add branch protection rule -> Require status checks to pass before merging` --- .github/workflows/clang-cir-tests.yml | 37 +++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 .github/workflows/clang-cir-tests.yml diff --git a/.github/workflows/clang-cir-tests.yml b/.github/workflows/clang-cir-tests.yml new file mode 100644 index 000000000000..9add7ccc5b15 --- /dev/null +++ b/.github/workflows/clang-cir-tests.yml @@ -0,0 +1,37 @@ +name: Clang CIR Tests + +permissions: + contents: read + +on: + workflow_dispatch: + push: + branches: + - 'main' + paths: + - 'clang/**' + - '.github/workflows/clang-cir-tests.yml' + - '.github/workflows/llvm-project-tests.yml' + - '!llvm/**' + pull_request: + branches: + - 'main' + paths: + - 'clang/**' + - '.github/workflows/clang-cir-tests.yml' + - '.github/workflows/llvm-project-tests.yml' + - '!llvm/**' + +concurrency: + # Skip intermediate builds: always. + # Cancel intermediate builds: only if it is a pull request build. + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + +jobs: + check_clang_cir: + name: Test clang-cir + uses: ./.github/workflows/llvm-project-tests.yml + with: + build_target: check-clang-cir + projects: clang;mlir;cir From 84e064c58e5d6a86ba0882fa91e82f23552b8196 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 1 Dec 2023 17:02:20 -0800 Subject: [PATCH 1264/2301] [CIR][CIRGen] Handle more variations on dtor emission --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 68 ++++++++++++++----- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/dtors-scopes.cpp | 11 ++- 3 files changed, 61 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 63a15146c6bd..c3feb9a96ae6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1005,8 +1005,7 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { llvm_unreachable("NYI"); // Enter the epilogue cleanups. - llvm_unreachable("NYI"); - // RunCleanupsScope DtorEpilogue(*this); + RunCleanupsScope DtorEpilogue(*this); // If this is the complete variant, just invoke the base variant; // the epilogue will destruct the virtual bases. But we can't do @@ -1020,20 +1019,19 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { llvm_unreachable("already handled deleting case"); case Dtor_Complete: - llvm_unreachable("NYI"); - // assert((Body || getTarget().getCXXABI().isMicrosoft()) && - // "can't emit a dtor without a body for non-Microsoft ABIs"); - - // // Enter the cleanup scopes for virtual bases. - // EnterDtorCleanups(Dtor, Dtor_Complete); - - // if (!isTryBody) { - // QualType ThisTy = Dtor->getThisObjectType(); - // EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, - // /*Delegating=*/false, LoadCXXThisAddress(), - // ThisTy); - // break; - // } + assert((Body || getTarget().getCXXABI().isMicrosoft()) && + "can't emit a dtor without a body for non-Microsoft ABIs"); + + // Enter the cleanup scopes for virtual bases. + EnterDtorCleanups(Dtor, Dtor_Complete); + + if (!isTryBody) { + QualType ThisTy = Dtor->getFunctionObjectParameterType(); + buildCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, + /*Delegating=*/false, LoadCXXThisAddress(), + ThisTy); + break; + } // Fallthrough: act like we're in the base variant. [[fallthrough]]; @@ -1073,8 +1071,7 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { } // Jump out through the epilogue cleanups. - llvm_unreachable("NYI"); - // DtorEpilogue.ForceCleanup(); + DtorEpilogue.ForceCleanup(); // Exit the try if applicable. if (isTryBody) @@ -1131,6 +1128,41 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, return; } + const CXXRecordDecl *ClassDecl = DD->getParent(); + + // Unions have no bases and do not call field destructors. + if (ClassDecl->isUnion()) + return; + + // The complete-destructor phase just destructs all the virtual bases. + if (DtorType == Dtor_Complete) { + // Poison the vtable pointer such that access after the base + // and member destructors are invoked is invalid. + if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && + SanOpts.has(SanitizerKind::Memory) && ClassDecl->getNumVBases() && + ClassDecl->isPolymorphic()) + assert(!UnimplementedFeature::sanitizeDtor()); + + // We push them in the forward order so that they'll be popped in + // the reverse order. + for (const auto &Base : ClassDecl->vbases()) { + auto *BaseClassDecl = + cast(Base.getType()->castAs()->getDecl()); + + if (BaseClassDecl->hasTrivialDestructor()) { + // Under SanitizeMemoryUseAfterDtor, poison the trivial base class + // memory. For non-trival base classes the same is done in the class + // destructor. + assert(!UnimplementedFeature::sanitizeDtor()); + } else { + EHStack.pushCleanup(NormalAndEHCleanup, BaseClassDecl, + /*BaseIsVirtual*/ true); + } + } + + return; + } + llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 5a857a2db39f..12f2b2037d61 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -53,6 +53,7 @@ struct UnimplementedFeature { static bool emitAsanPrologueOrEpilogue() { return false; } static bool emitCheckedInBoundsGEP() { return false; } static bool pointerOverflowSanitizer() { return false; } + static bool sanitizeDtor() { return false; } // ObjC static bool setObjCGCLValueClass() { return false; } diff --git a/clang/test/CIR/CodeGen/dtors-scopes.cpp b/clang/test/CIR/CodeGen/dtors-scopes.cpp index bb3c23010a78..16d813e71b0c 100644 --- a/clang/test/CIR/CodeGen/dtors-scopes.cpp +++ b/clang/test/CIR/CodeGen/dtors-scopes.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -std=c++20 -fclangir -emit-cir %s -o %t2.cir +// RUN: FileCheck --input-file=%t2.cir %s --check-prefix=DTOR_BODY extern "C" int printf(char const*, ...); struct C { @@ -18,4 +20,11 @@ void dtor1() { // CHECK: %4 = cir.alloca !ty_22C22, cir.ptr , ["c", init] {alignment = 1 : i64} // CHECK: cir.call @_ZN1CC2Ev(%4) : (!cir.ptr) -> () // CHECK: cir.call @_ZN1CD2Ev(%4) : (!cir.ptr) -> () -// CHECK: } \ No newline at end of file +// CHECK: } + +// DTOR_BODY: cir.func private @_ZN1CD2Ev(!cir.ptr) +// DTOR_BODY: cir.func linkonce_odr @_ZN1CD1Ev(%arg0: !cir.ptr + +// DTOR_BODY: cir.call @_ZN1CD2Ev +// DTOR_BODY: cir.return +// DTOR_BODY: } \ No newline at end of file From 2383d42f2ddc2ff9b239ad0306c6e4b31f7efc13 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 1 Dec 2023 18:42:57 -0800 Subject: [PATCH 1265/2301] [CIR][CIRGen] Enable more cases for dtor generation This allows some dtors code (like in dtors-scopes.cpp) to work and run on macOS --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 8 + clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 173 ++++++++++++++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 212 +++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 + clang/test/CIR/CodeGen/dtors-scopes.cpp | 8 +- clang/test/CIR/CodeGen/dtors.cpp | 11 +- 7 files changed, 382 insertions(+), 35 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 57b173c10660..b8e0b9c6ac22 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1801,6 +1801,14 @@ def FuncOp : CIR_Op<"func", [ //===------------------------------------------------------------------===// bool isDeclaration(); + + // FIXME: should be shared with GlobalOp extra declaration. + bool isDeclarationForLinker() { + if (mlir::cir::isAvailableExternallyLinkage(getLinkage())) + return true; + + return isDeclaration(); + } }]; let hasCustomAssemblyFormat = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 8d88746d017e..0b7d3dfa359b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -23,6 +23,179 @@ using namespace clang; using namespace cir; +/// Try to emit a base destructor as an alias to its primary +/// base-class destructor. +bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { + if (!getCodeGenOpts().CXXCtorDtorAliases) + return true; + + // Producing an alias to a base class ctor/dtor can degrade debug quality + // as the debugger cannot tell them apart. + if (getCodeGenOpts().OptimizationLevel == 0) + return true; + + // If sanitizing memory to check for use-after-dtor, do not emit as + // an alias, unless this class owns no members. + if (getCodeGenOpts().SanitizeMemoryUseAfterDtor && + !D->getParent()->field_empty()) + assert(!UnimplementedFeature::sanitizeDtor()); + + // If the destructor doesn't have a trivial body, we have to emit it + // separately. + if (!D->hasTrivialBody()) + return true; + + const CXXRecordDecl *Class = D->getParent(); + + // We are going to instrument this destructor, so give up even if it is + // currently empty. + if (Class->mayInsertExtraPadding()) + return true; + + // If we need to manipulate a VTT parameter, give up. + if (Class->getNumVBases()) { + // Extra Credit: passing extra parameters is perfectly safe + // in many calling conventions, so only bail out if the ctor's + // calling convention is nonstandard. + return true; + } + + // If any field has a non-trivial destructor, we have to emit the + // destructor separately. + for (const auto *I : Class->fields()) + if (I->getType().isDestructedType()) + return true; + + // Try to find a unique base class with a non-trivial destructor. + const CXXRecordDecl *UniqueBase = nullptr; + for (const auto &I : Class->bases()) { + + // We're in the base destructor, so skip virtual bases. + if (I.isVirtual()) + continue; + + // Skip base classes with trivial destructors. + const auto *Base = + cast(I.getType()->castAs()->getDecl()); + if (Base->hasTrivialDestructor()) + continue; + + // If we've already found a base class with a non-trivial + // destructor, give up. + if (UniqueBase) + return true; + UniqueBase = Base; + } + + // If we didn't find any bases with a non-trivial destructor, then + // the base destructor is actually effectively trivial, which can + // happen if it was needlessly user-defined or if there are virtual + // bases with non-trivial destructors. + if (!UniqueBase) + return true; + + // If the base is at a non-zero offset, give up. + const ASTRecordLayout &ClassLayout = astCtx.getASTRecordLayout(Class); + if (!ClassLayout.getBaseClassOffset(UniqueBase).isZero()) + return true; + + // Give up if the calling conventions don't match. We could update the call, + // but it is probably not worth it. + const CXXDestructorDecl *BaseD = UniqueBase->getDestructor(); + if (BaseD->getType()->castAs()->getCallConv() != + D->getType()->castAs()->getCallConv()) + return true; + + GlobalDecl AliasDecl(D, Dtor_Base); + GlobalDecl TargetDecl(BaseD, Dtor_Base); + + // The alias will use the linkage of the referent. If we can't + // support aliases with that linkage, fail. + auto Linkage = getFunctionLinkage(AliasDecl); + + // We can't use an alias if the linkage is not valid for one. + if (!mlir::cir::isValidLinkage(Linkage)) + return true; + + auto TargetLinkage = getFunctionLinkage(TargetDecl); + + // Check if we have it already. + StringRef MangledName = getMangledName(AliasDecl); + auto Entry = getGlobalValue(MangledName); + auto fnOp = dyn_cast_or_null(Entry); + if (Entry && fnOp && !fnOp.isDeclaration()) + return false; + if (Replacements.count(MangledName)) + return false; + + assert(fnOp && "only knows how to handle FuncOp"); + [[maybe_unused]] auto AliasValueType = getTypes().GetFunctionType(AliasDecl); + + // Find the referent. + auto Aliasee = cast(GetAddrOfGlobal(TargetDecl)); + + // Instead of creating as alias to a linkonce_odr, replace all of the uses + // of the aliasee. + if (mlir::cir::isDiscardableIfUnused(Linkage) && + !(TargetLinkage == + mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage && + TargetDecl.getDecl()->hasAttr())) { + // FIXME: An extern template instantiation will create functions with + // linkage "AvailableExternally". In libc++, some classes also define + // members with attribute "AlwaysInline" and expect no reference to + // be generated. It is desirable to reenable this optimisation after + // corresponding LLVM changes. + llvm_unreachable("NYI"); + } + + // If we have a weak, non-discardable alias (weak, weak_odr), like an + // extern template instantiation or a dllexported class, avoid forming it on + // COFF. A COFF weak external alias cannot satisfy a normal undefined + // symbol reference from another TU. The other TU must also mark the + // referenced symbol as weak, which we cannot rely on. + if (mlir::cir::isWeakForLinker(Linkage) && getTriple().isOSBinFormatCOFF()) { + llvm_unreachable("NYI"); + } + + // If we don't have a definition for the destructor yet or the definition + // is + // avaialable_externally, don't emit an alias. We can't emit aliases to + // declarations; that's just not how aliases work. + if (Aliasee.isDeclarationForLinker()) + return true; + + // Don't create an alias to a linker weak symbol. This avoids producing + // different COMDATs in different TUs. Another option would be to + // output the alias both for weak_odr and linkonce_odr, but that + // requires explicit comdat support in the IL. + if (mlir::cir::isWeakForLinker(TargetLinkage)) + llvm_unreachable("NYI"); + + // Create the alias with no name. + auto *AliasFD = dyn_cast(AliasDecl.getDecl()); + assert(AliasFD && "expected FunctionDecl"); + auto Alias = createCIRFunction(getLoc(AliasDecl.getDecl()->getSourceRange()), + "", Aliasee.getFunctionType(), AliasFD); + Alias.setAliasee(Aliasee.getName()); + Alias.setLinkage(Linkage); + mlir::SymbolTable::setSymbolVisibility( + Alias, getMLIRVisibilityFromCIRLinkage(Linkage)); + + // Alias constructors and destructors are always unnamed_addr. + assert(!UnimplementedFeature::unnamedAddr()); + + // Switch any previous uses to the alias. + if (Entry) { + llvm_unreachable("NYI"); + } else { + // Name already set by createCIRFunction + } + + // Finally, set up the alias with its proper name and attributes. + setCommonAttributes(AliasDecl, Alias); + return false; +} + static void buildDeclInit(CIRGenFunction &CGF, const VarDecl *D, Address DeclPtr) { assert((D->hasGlobalStorage() || diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index c3feb9a96ae6..54483b0b5ea8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -470,7 +470,19 @@ struct CallBaseDtor final : EHScopeStack::Cleanup { : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} void Emit(CIRGenFunction &CGF, Flags flags) override { - llvm_unreachable("NYI"); + const CXXRecordDecl *DerivedClass = + cast(CGF.CurCodeDecl)->getParent(); + + const CXXDestructorDecl *D = BaseClass->getDestructor(); + // We are already inside a destructor, so presumably the object being + // destroyed should have the expected type. + QualType ThisTy = D->getFunctionObjectParameterType(); + assert(CGF.currSrcLoc && "expected source location"); + Address Addr = CGF.getAddressOfDirectBaseInCompleteClass( + *CGF.currSrcLoc, CGF.LoadCXXThisAddress(), DerivedClass, BaseClass, + BaseIsVirtual); + CGF.buildCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, + /*Delegating=*/false, Addr, ThisTy); } }; @@ -960,6 +972,94 @@ void CIRGenFunction::destroyCXXObject(CIRGenFunction &CGF, Address addr, /*Delegating=*/false, addr, type); } +static bool FieldHasTrivialDestructorBody(ASTContext &Context, + const FieldDecl *Field); + +// FIXME(cir): this should be shared with traditional codegen. +static bool +HasTrivialDestructorBody(ASTContext &Context, + const CXXRecordDecl *BaseClassDecl, + const CXXRecordDecl *MostDerivedClassDecl) { + // If the destructor is trivial we don't have to check anything else. + if (BaseClassDecl->hasTrivialDestructor()) + return true; + + if (!BaseClassDecl->getDestructor()->hasTrivialBody()) + return false; + + // Check fields. + for (const auto *Field : BaseClassDecl->fields()) + if (!FieldHasTrivialDestructorBody(Context, Field)) + return false; + + // Check non-virtual bases. + for (const auto &I : BaseClassDecl->bases()) { + if (I.isVirtual()) + continue; + + const CXXRecordDecl *NonVirtualBase = + cast(I.getType()->castAs()->getDecl()); + if (!HasTrivialDestructorBody(Context, NonVirtualBase, + MostDerivedClassDecl)) + return false; + } + + if (BaseClassDecl == MostDerivedClassDecl) { + // Check virtual bases. + for (const auto &I : BaseClassDecl->vbases()) { + const CXXRecordDecl *VirtualBase = + cast(I.getType()->castAs()->getDecl()); + if (!HasTrivialDestructorBody(Context, VirtualBase, MostDerivedClassDecl)) + return false; + } + } + + return true; +} + +// FIXME(cir): this should be shared with traditional codegen. +static bool FieldHasTrivialDestructorBody(ASTContext &Context, + const FieldDecl *Field) { + QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); + + const RecordType *RT = FieldBaseElementType->getAs(); + if (!RT) + return true; + + CXXRecordDecl *FieldClassDecl = cast(RT->getDecl()); + + // The destructor for an implicit anonymous union member is never invoked. + if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion()) + return false; + + return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); +} + +/// Check whether we need to initialize any vtable pointers before calling this +/// destructor. +/// FIXME(cir): this should be shared with traditional codegen. +static bool CanSkipVTablePointerInitialization(CIRGenFunction &CGF, + const CXXDestructorDecl *Dtor) { + const CXXRecordDecl *ClassDecl = Dtor->getParent(); + if (!ClassDecl->isDynamicClass()) + return true; + + // For a final class, the vtable pointer is known to already point to the + // class's vtable. + if (ClassDecl->isEffectivelyFinal()) + return true; + + if (!Dtor->hasTrivialBody()) + return false; + + // Check the fields. + for (const auto *Field : ClassDecl->fields()) + if (!FieldHasTrivialDestructorBody(CGF.getContext(), Field)) + return false; + + return true; +} + /// Emits the body of the current destructor. void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { const CXXDestructorDecl *Dtor = cast(CurGD.getDecl()); @@ -1037,37 +1137,35 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { [[fallthrough]]; case Dtor_Base: - llvm_unreachable("NYI"); assert(Body); - // // Enter the cleanup scopes for fields and non-virtual bases. - // EnterDtorCleanups(Dtor, Dtor_Base); - - // // Initialize the vtable pointers before entering the body. - // if (!CanSkipVTablePointerInitialization(*this, Dtor)) { - // // Insert the llvm.launder.invariant.group intrinsic before - // initializing - // // the vptrs to cancel any previous assumptions we might have made. - // if (CGM.getCodeGenOpts().StrictVTablePointers && - // CGM.getCodeGenOpts().OptimizationLevel > 0) - // CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis()); - // InitializeVTablePointers(Dtor->getParent()); - // } - - // if (isTryBody) - // EmitStmt(cast(Body)->getTryBlock()); - // else if (Body) - // EmitStmt(Body); - // else { - // assert(Dtor->isImplicit() && "bodyless dtor not implicit"); - // // nothing to do besides what's in the epilogue - // } - // // -fapple-kext must inline any call to this dtor into - // // the caller's body. - // if (getLangOpts().AppleKext) - // CurFn->addFnAttr(llvm::Attribute::AlwaysInline); - - // break; + // Enter the cleanup scopes for fields and non-virtual bases. + EnterDtorCleanups(Dtor, Dtor_Base); + + // Initialize the vtable pointers before entering the body. + if (!CanSkipVTablePointerInitialization(*this, Dtor)) { + // Insert the llvm.launder.invariant.group intrinsic before initializing + // the vptrs to cancel any previous assumptions we might have made. + if (CGM.getCodeGenOpts().StrictVTablePointers && + CGM.getCodeGenOpts().OptimizationLevel > 0) + llvm_unreachable("NYI"); + llvm_unreachable("NYI"); + } + + if (isTryBody) + llvm_unreachable("NYI"); + else if (Body) + (void)buildStmt(Body, /*useCurrentScope=*/true); + else { + assert(Dtor->isImplicit() && "bodyless dtor not implicit"); + // nothing to do besides what's in the epilogue + } + // -fapple-kext must inline any call to this dtor into + // the caller's body. + if (getLangOpts().AppleKext) + llvm_unreachable("NYI"); + + break; } // Jump out through the epilogue cleanups. @@ -1163,7 +1261,59 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, return; } - llvm_unreachable("NYI"); + assert(DtorType == Dtor_Base); + // Poison the vtable pointer if it has no virtual bases, but inherits + // virtual functions. + if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && + SanOpts.has(SanitizerKind::Memory) && !ClassDecl->getNumVBases() && + ClassDecl->isPolymorphic()) + assert(!UnimplementedFeature::sanitizeDtor()); + + // Destroy non-virtual bases. + for (const auto &Base : ClassDecl->bases()) { + // Ignore virtual bases. + if (Base.isVirtual()) + continue; + + CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); + + if (BaseClassDecl->hasTrivialDestructor()) { + if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && + SanOpts.has(SanitizerKind::Memory) && !BaseClassDecl->isEmpty()) + assert(!UnimplementedFeature::sanitizeDtor()); + } else { + EHStack.pushCleanup(NormalAndEHCleanup, BaseClassDecl, + /*BaseIsVirtual*/ false); + } + } + + // Poison fields such that access after their destructors are + // invoked, and before the base class destructor runs, is invalid. + bool SanitizeFields = CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && + SanOpts.has(SanitizerKind::Memory); + assert(!UnimplementedFeature::sanitizeDtor()); + + // Destroy direct fields. + for (const auto *Field : ClassDecl->fields()) { + if (SanitizeFields) + assert(!UnimplementedFeature::sanitizeDtor()); + + QualType type = Field->getType(); + QualType::DestructionKind dtorKind = type.isDestructedType(); + if (!dtorKind) + continue; + + // Anonymous union members do not have their destructors called. + const RecordType *RT = type->getAsUnionType(); + if (RT && RT->getDecl()->isAnonymousStructOrUnion()) + continue; + + [[maybe_unused]] CleanupKind cleanupKind = getCleanupKind(dtorKind); + llvm_unreachable("EHStack.pushCleanup(...) NYI"); + } + + if (SanitizeFields) + assert(!UnimplementedFeature::sanitizeDtor()); } void CIRGenFunction::buildCXXDestructorCall(const CXXDestructorDecl *DD, diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index f450e45ad08c..4263fac8af8b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -497,7 +497,8 @@ void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { // destructor, there are no fields with a non-trivial destructor, and the body // of the destructor is trivial. if (DD && GD.getDtorType() == Dtor_Base && - CIRGenType != StructorCIRGen::COMDAT) + CIRGenType != StructorCIRGen::COMDAT && + !CGM.tryEmitBaseDestructorAsAlias(DD)) return; // FIXME: The deleting destructor is equivalent to the selected operator diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 414e91f29649..94acc0efccfe 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -429,6 +429,8 @@ class CIRGenModule : public CIRGenTypeCache { /// are emitted lazily. void buildGlobal(clang::GlobalDecl D); + bool tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D); + mlir::Type getCIRType(const clang::QualType &type); /// Set the visibility for the given global. diff --git a/clang/test/CIR/CodeGen/dtors-scopes.cpp b/clang/test/CIR/CodeGen/dtors-scopes.cpp index 16d813e71b0c..d0991dc304c1 100644 --- a/clang/test/CIR/CodeGen/dtors-scopes.cpp +++ b/clang/test/CIR/CodeGen/dtors-scopes.cpp @@ -22,7 +22,13 @@ void dtor1() { // CHECK: cir.call @_ZN1CD2Ev(%4) : (!cir.ptr) -> () // CHECK: } -// DTOR_BODY: cir.func private @_ZN1CD2Ev(!cir.ptr) +// DTOR_BODY: cir.func linkonce_odr @_ZN1CD2Ev{{.*}}{ +// DTOR_BODY: %2 = cir.get_global @printf +// DTOR_BODY: %3 = cir.get_global @".str2" +// DTOR_BODY: %4 = cir.cast(array_to_ptrdecay, %3 +// DTOR_BODY: %5 = cir.call @printf(%4) +// DTOR_BODY: cir.return + // DTOR_BODY: cir.func linkonce_odr @_ZN1CD1Ev(%arg0: !cir.ptr // DTOR_BODY: cir.call @_ZN1CD2Ev diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index 065ad2be8a5d..35ae4b1ff395 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -52,8 +52,15 @@ class B : public A // CHECK: cir.return // CHECK: } -// @B::~B() #1 declaration -// CHECK: cir.func private @_ZN1BD2Ev(!cir.ptr) +// @B::~B() #1 definition call into base @A::~A() +// CHECK: cir.func linkonce_odr @_ZN1BD2Ev{{.*}}{ +// CHECK: cir.call @_ZN1AD2Ev( + +// void foo() +// CHECK: cir.func @_Z3foov() +// CHECK: cir.scope { +// CHECK: cir.call @_ZN1BC2Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1BD2Ev(%0) : (!cir.ptr) -> () // operator delete(void*) declaration // CHECK: cir.func private @_ZdlPv(!cir.ptr) From 863d4c8016a7f5a0df6770f1bb37b790353c8083 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 5 Dec 2023 12:16:21 -0800 Subject: [PATCH 1266/2301] [CIR][CIRGen][NFC] Refactor alias emission --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 22 +------------- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 23 +------------- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 30 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 4 +++ 4 files changed, 36 insertions(+), 43 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 0b7d3dfa359b..31dba6be75eb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -172,27 +172,7 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { llvm_unreachable("NYI"); // Create the alias with no name. - auto *AliasFD = dyn_cast(AliasDecl.getDecl()); - assert(AliasFD && "expected FunctionDecl"); - auto Alias = createCIRFunction(getLoc(AliasDecl.getDecl()->getSourceRange()), - "", Aliasee.getFunctionType(), AliasFD); - Alias.setAliasee(Aliasee.getName()); - Alias.setLinkage(Linkage); - mlir::SymbolTable::setSymbolVisibility( - Alias, getMLIRVisibilityFromCIRLinkage(Linkage)); - - // Alias constructors and destructors are always unnamed_addr. - assert(!UnimplementedFeature::unnamedAddr()); - - // Switch any previous uses to the alias. - if (Entry) { - llvm_unreachable("NYI"); - } else { - // Name already set by createCIRFunction - } - - // Finally, set up the alias with its proper name and attributes. - setCommonAttributes(AliasDecl, Alias); + buildAliasForGlobal("", Entry, AliasDecl, Aliasee, Linkage); return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 4263fac8af8b..48b3623b6ee5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -437,30 +437,9 @@ static void emitConstructorDestructorAlias(CIRGenModule &CGM, auto Aliasee = dyn_cast_or_null(CGM.GetAddrOfGlobal(TargetDecl)); assert(Aliasee && "expected cir.func"); - auto *AliasFD = dyn_cast(AliasDecl.getDecl()); - assert(AliasFD && "expected FunctionDecl"); // Populate actual alias. - auto Alias = - CGM.createCIRFunction(CGM.getLoc(AliasDecl.getDecl()->getSourceRange()), - MangledName, Aliasee.getFunctionType(), AliasFD); - Alias.setAliasee(Aliasee.getName()); - Alias.setLinkage(Linkage); - mlir::SymbolTable::setSymbolVisibility( - Alias, CGM.getMLIRVisibilityFromCIRLinkage(Linkage)); - - // Alias constructors and destructors are always unnamed_addr. - assert(!UnimplementedFeature::unnamedAddr()); - - // Switch any previous uses to the alias. - if (Entry) { - llvm_unreachable("NYI"); - } else { - // Name already set by createCIRFunction - } - - // Finally, set up the alias with its proper name and attributes. - CGM.setCommonAttributes(AliasDecl, Alias); + CGM.buildAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); } void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 8c92085c396a..f6dca4c1e665 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1583,6 +1583,36 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { return getCIRLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false); } +void CIRGenModule::buildAliasForGlobal(StringRef mangledName, + mlir::Operation *op, GlobalDecl aliasGD, + mlir::cir::FuncOp aliasee, + mlir::cir::GlobalLinkageKind linkage) { + + // Create the alias with no name. + auto *aliasFD = dyn_cast(aliasGD.getDecl()); + assert(aliasFD && "expected FunctionDecl"); + auto alias = + createCIRFunction(getLoc(aliasGD.getDecl()->getSourceRange()), + mangledName, aliasee.getFunctionType(), aliasFD); + alias.setAliasee(aliasee.getName()); + alias.setLinkage(linkage); + mlir::SymbolTable::setSymbolVisibility( + alias, getMLIRVisibilityFromCIRLinkage(linkage)); + + // Alias constructors and destructors are always unnamed_addr. + assert(!UnimplementedFeature::unnamedAddr()); + + // Switch any previous uses to the alias. + if (op) { + llvm_unreachable("NYI"); + } else { + // Name already set by createCIRFunction + } + + // Finally, set up the alias with its proper name and attributes. + setCommonAttributes(aliasGD, alias); +} + mlir::Type CIRGenModule::getCIRType(const QualType &type) { return genTypes.ConvertType(type); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 94acc0efccfe..aeb1313b38c4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -431,6 +431,10 @@ class CIRGenModule : public CIRGenTypeCache { bool tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D); + void buildAliasForGlobal(StringRef mangledName, mlir::Operation *op, + GlobalDecl aliasGD, mlir::cir::FuncOp aliasee, + mlir::cir::GlobalLinkageKind linkage); + mlir::Type getCIRType(const clang::QualType &type); /// Set the visibility for the given global. From 967c0fcd3afa1e64172a82f7c13f72eab7a5c73a Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 6 Dec 2023 11:14:00 -0300 Subject: [PATCH 1267/2301] [CIR][IR] Refactor parsing/printing of implicitly terminated regions The `shouldPrintTerm` and `checkBlockTerminator` were replaced in favor of `omitRegionTerm` and `ensureRegionTerm` respectively. The first is essentially the same method but simplified. The latter was refactored to do only two things: check if the terminator omission of a region is valid and, if so, insert the omitted terminator into the region. The simplifications mostly leverage the fact that we only omit empty yield values in a single-block region. ghstack-source-id: 7b943719ca0fb4ac2d1d29775d7545787c23bcbf Pull Request resolved: https://github.com/llvm/clangir/pull/321 --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 143 +++++++++--------------- clang/test/CIR/IR/invalid.cir | 6 +- 2 files changed, 58 insertions(+), 91 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 8f031d611712..5cbd2a594c4b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -77,7 +77,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << (boolAttr.getValue() ? "true" : "false"); return AliasResult::FinalAlias; } - if (auto bitfield = attr.dyn_cast()) { + if (auto bitfield = attr.dyn_cast()) { os << "bfi_" << bitfield.getName().str(); return AliasResult::FinalAlias; } @@ -151,6 +151,40 @@ static RetTy parseOptionalCIRKeyword(AsmParser &parser, EnumTy defaultValue) { return static_cast(index); } +// Check if a region's termination omission is valid and, if so, creates and +// inserts the omitted terminator into the region. +LogicalResult ensureRegionTerm(OpAsmParser &parser, Region ®ion, + SMLoc errLoc) { + Location eLoc = parser.getEncodedSourceLoc(parser.getCurrentLocation()); + OpBuilder builder(parser.getBuilder().getContext()); + + // Region is empty or properly terminated: nothing to do. + if (region.empty() || + (region.back().mightHaveTerminator() && region.back().getTerminator())) + return success(); + + // Check for invalid terminator omissions. + if (!region.hasOneBlock()) + return parser.emitError(errLoc, + "multi-block region must not omit terminator"); + if (region.back().empty()) + return parser.emitError(errLoc, "empty region must not omit terminator"); + + // Terminator was omited correctly: recreate it. + region.back().push_back(builder.create(eLoc)); + return success(); +} + +// True if the region's terminator should be omitted. +bool omitRegionTerm(mlir::Region &r) { + const auto singleNonEmptyBlock = r.hasOneBlock() && !r.back().empty(); + const auto yieldsNothing = [&r]() { + YieldOp y = dyn_cast(r.back().getTerminator()); + return y && y.isPlain() && y.getArgs().empty(); + }; + return singleNonEmptyBlock && yieldsNothing(); +} + //===----------------------------------------------------------------------===// // AllocaOp //===----------------------------------------------------------------------===// @@ -424,53 +458,6 @@ mlir::LogicalResult ThrowOp::verify() { // IfOp //===----------------------------------------------------------------------===// -static LogicalResult checkBlockTerminator(OpAsmParser &parser, - llvm::SMLoc parserLoc, - std::optional l, Region *r, - bool ensureTerm = true) { - mlir::Builder &builder = parser.getBuilder(); - if (r->hasOneBlock()) { - if (ensureTerm) { - ::mlir::impl::ensureRegionTerminator( - *r, builder, *l, [](OpBuilder &builder, Location loc) { - OperationState state(loc, YieldOp::getOperationName()); - YieldOp::build(builder, state); - return Operation::create(state); - }); - } else { - assert(r && "region must not be empty"); - Block &block = r->back(); - if (block.empty() || !block.back().hasTrait()) { - return parser.emitError( - parser.getCurrentLocation(), - "blocks are expected to be explicitly terminated"); - } - } - return success(); - } - - // Empty regions don't need any handling. - auto &blocks = r->getBlocks(); - if (blocks.empty()) - return success(); - - // Test that at least one block has a yield/return/throw terminator. We can - // probably make this a bit more strict. - for (Block &block : blocks) { - if (block.empty()) - continue; - auto &op = block.back(); - if (op.hasTrait() && - isa(op)) { - return success(); - } - } - - parser.emitError(parserLoc, - "expected at least one block with cir.yield or cir.return"); - return failure(); -} - ParseResult cir::IfOp::parse(OpAsmParser &parser, OperationState &result) { // Create the regions for 'then'. result.regions.reserve(2); @@ -490,8 +477,7 @@ ParseResult cir::IfOp::parse(OpAsmParser &parser, OperationState &result) { if (parser.parseRegion(*thenRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - if (checkBlockTerminator(parser, parseThenLoc, result.location, thenRegion) - .failed()) + if (ensureRegionTerm(parser, *thenRegion, parseThenLoc).failed()) return failure(); // If we find an 'else' keyword, parse the 'else' region. @@ -499,8 +485,7 @@ ParseResult cir::IfOp::parse(OpAsmParser &parser, OperationState &result) { auto parseElseLoc = parser.getCurrentLocation(); if (parser.parseRegion(*elseRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - if (checkBlockTerminator(parser, parseElseLoc, result.location, elseRegion) - .failed()) + if (ensureRegionTerm(parser, *elseRegion, parseElseLoc).failed()) return failure(); } @@ -510,28 +495,12 @@ ParseResult cir::IfOp::parse(OpAsmParser &parser, OperationState &result) { return success(); } -bool shouldPrintTerm(mlir::Region &r) { - if (!r.hasOneBlock()) - return true; - auto *entryBlock = &r.front(); - if (entryBlock->empty()) - return false; - if (isa(entryBlock->back())) - return true; - if (isa(entryBlock->back())) - return true; - YieldOp y = dyn_cast(entryBlock->back()); - if (y && (!y.isPlain() || !y.getArgs().empty())) - return true; - return false; -} - void cir::IfOp::print(OpAsmPrinter &p) { p << " " << getCondition() << " "; auto &thenRegion = this->getThenRegion(); p.printRegion(thenRegion, /*printEntryBlockArgs=*/false, - /*printBlockTerminators=*/shouldPrintTerm(thenRegion)); + /*printBlockTerminators=*/!omitRegionTerm(thenRegion)); // Print the 'else' regions if it exists and has a block. auto &elseRegion = this->getElseRegion(); @@ -539,7 +508,7 @@ void cir::IfOp::print(OpAsmPrinter &p) { p << " else "; p.printRegion(elseRegion, /*printEntryBlockArgs=*/false, - /*printBlockTerminators=*/shouldPrintTerm(elseRegion)); + /*printBlockTerminators=*/!omitRegionTerm(elseRegion)); } p.printOptionalAttrDict(getOperation()->getAttrs()); @@ -622,7 +591,7 @@ ParseResult cir::ScopeOp::parse(OpAsmParser &parser, OperationState &result) { if (parser.parseRegion(*scopeRegion, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - if (checkBlockTerminator(parser, loc, result.location, scopeRegion).failed()) + if (ensureRegionTerm(parser, *scopeRegion, loc).failed()) return failure(); // Parse the optional attribute list. @@ -636,7 +605,7 @@ void cir::ScopeOp::print(OpAsmPrinter &p) { auto &scopeRegion = this->getScopeRegion(); p.printRegion(scopeRegion, /*printEntryBlockArgs=*/false, - /*printBlockTerminators=*/shouldPrintTerm(scopeRegion)); + /*printBlockTerminators=*/!omitRegionTerm(scopeRegion)); p.printOptionalAttrDict(getOperation()->getAttrs()); } @@ -877,10 +846,11 @@ parseSwitchOp(OpAsmParser &parser, "case region shall not be empty"); } - if (checkBlockTerminator(parser, parserLoc, std::nullopt, &currRegion, - /*ensureTerm=*/false) - .failed()) - return failure(); + if (!(currRegion.back().mightHaveTerminator() && + currRegion.back().getTerminator())) + return parser.emitError(parserLoc, + "case regions must be explicitly terminated"); + return success(); }; @@ -1145,10 +1115,11 @@ parseCatchOp(OpAsmParser &parser, "catch region shall not be empty"); } - if (checkBlockTerminator(parser, parserLoc, std::nullopt, &currRegion, - /*ensureTerm=*/false) - .failed()) - return failure(); + if (!(currRegion.back().mightHaveTerminator() && + currRegion.back().getTerminator())) + return parser.emitError( + parserLoc, "blocks are expected to be explicitly terminated"); + return success(); }; @@ -1399,9 +1370,7 @@ static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, if (ctorRegion.back().empty()) return parser.emitError(parser.getCurrentLocation(), "ctor region shall not be empty"); - if (checkBlockTerminator(parser, parseLoc, - ctorRegion.back().back().getLoc(), &ctorRegion) - .failed()) + if (ensureRegionTerm(parser, ctorRegion, parseLoc).failed()) return failure(); } else { // Parse constant with initializer, examples: @@ -1428,9 +1397,7 @@ static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, if (dtorRegion.back().empty()) return parser.emitError(parser.getCurrentLocation(), "dtor region shall not be empty"); - if (checkBlockTerminator(parser, parseLoc, - dtorRegion.back().back().getLoc(), &dtorRegion) - .failed()) + if (ensureRegionTerm(parser, dtorRegion, parseLoc).failed()) return failure(); } } @@ -2443,8 +2410,8 @@ LogicalResult GetMemberOp::verify() { // FIXME(cir): member type check is disabled for classes as the codegen for // these still need to be patched. - if (!recordTy.isClass() - && recordTy.getMembers()[getIndex()] != getResultTy().getPointee()) + if (!recordTy.isClass() && + recordTy.getMembers()[getIndex()] != getResultTy().getPointee()) return emitError() << "member type mismatch"; return mlir::success(); diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 989cd36b787d..329ed2560a51 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -41,7 +41,7 @@ cir.func @if0() { #true = #cir.bool : !cir.bool cir.func @yield0() { %0 = cir.const(#true) : !cir.bool - cir.if %0 { // expected-error {{custom op 'cir.if' expected at least one block with cir.yield or cir.return}} + cir.if %0 { // expected-error {{custom op 'cir.if' multi-block region must not omit terminator}} cir.br ^a ^a: } @@ -90,10 +90,10 @@ cir.func @yieldcontinue() { cir.func @s0() { %1 = cir.const(#cir.int<2> : !s32i) : !s32i cir.switch (%1 : !s32i) [ - case (equal, 5) { + case (equal, 5) { // expected-error {{custom op 'cir.switch' case regions must be explicitly terminated}} %2 = cir.const(#cir.int<3> : !s32i) : !s32i } - ] // expected-error {{blocks are expected to be explicitly terminated}} + ] cir.return } From 87e6666d8e4fb30e662dcfe8b2b7d5be92492e2f Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 6 Dec 2023 11:14:00 -0300 Subject: [PATCH 1268/2301] [CIR][IR] Refactor ScopeOp assembly format This simplifies and modularizes the assembly format for ScopeOp by using the Tablegen assembly description and a new custom printer/parser that handles regions with omitted terminators. It also fixes an issue where the parser would not correctly handle `cir.scopes` with a return value. ghstack-source-id: c5b9be705113c21117363cb3bd78e19d133c3fc5 Pull Request resolved: https://github.com/llvm/clangir/pull/311 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 ++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 53 +++++++++----------- clang/test/CIR/IR/scope.cir | 27 ++++++++++ 3 files changed, 54 insertions(+), 32 deletions(-) create mode 100644 clang/test/CIR/IR/scope.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b8e0b9c6ac22..221f4b2e1690 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -715,12 +715,14 @@ def ScopeOp : CIR_Op<"scope", [ will be inserted implicitly. }]; - let results = (outs Variadic:$results); + let results = (outs Optional:$results); let regions = (region AnyRegion:$scopeRegion); - let hasCustomAssemblyFormat = 1; let hasVerifier = 1; let skipDefaultBuilders = 1; + let assemblyFormat = [{ + custom($scopeRegion) (`:` type($results)^)? attr-dict + }]; let builders = [ // Scopes for yielding values. diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 5cbd2a594c4b..c75b497ed059 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -185,6 +185,28 @@ bool omitRegionTerm(mlir::Region &r) { return singleNonEmptyBlock && yieldsNothing(); } +//===----------------------------------------------------------------------===// +// CIR Custom Parsers/Printers +//===----------------------------------------------------------------------===// + +static mlir::ParseResult +parseOmittedTerminatorRegion(mlir::OpAsmParser &parser, mlir::Region ®ion) { + auto regionLoc = parser.getCurrentLocation(); + if (parser.parseRegion(region)) + return failure(); + if (ensureRegionTerm(parser, region, regionLoc).failed()) + return failure(); + return success(); +} + +static void printOmittedTerminatorRegion(mlir::OpAsmPrinter &printer, + mlir::cir::ScopeOp &op, + mlir::Region ®ion) { + printer.printRegion(region, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/!omitRegionTerm(region)); +} + //===----------------------------------------------------------------------===// // AllocaOp //===----------------------------------------------------------------------===// @@ -581,35 +603,6 @@ LogicalResult IfOp::verify() { return success(); } // ScopeOp //===----------------------------------------------------------------------===// -ParseResult cir::ScopeOp::parse(OpAsmParser &parser, OperationState &result) { - // Create one region within 'scope'. - result.regions.reserve(1); - Region *scopeRegion = result.addRegion(); - auto loc = parser.getCurrentLocation(); - - // Parse the scope region. - if (parser.parseRegion(*scopeRegion, /*arguments=*/{}, /*argTypes=*/{})) - return failure(); - - if (ensureRegionTerm(parser, *scopeRegion, loc).failed()) - return failure(); - - // Parse the optional attribute list. - if (parser.parseOptionalAttrDict(result.attributes)) - return failure(); - return success(); -} - -void cir::ScopeOp::print(OpAsmPrinter &p) { - p << ' '; - auto &scopeRegion = this->getScopeRegion(); - p.printRegion(scopeRegion, - /*printEntryBlockArgs=*/false, - /*printBlockTerminators=*/!omitRegionTerm(scopeRegion)); - - p.printOptionalAttrDict(getOperation()->getAttrs()); -} - /// Given the region at `index`, or the parent operation if `index` is None, /// return the successor regions. These are the regions that may be selected /// during the flow of control. `operands` is a set of optional attributes that @@ -619,7 +612,7 @@ void ScopeOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // The only region always branch back to the parent operation. if (!point.isParent()) { - regions.push_back(RegionSuccessor(getResults())); + regions.push_back(RegionSuccessor(getODSResults(0))); return; } diff --git a/clang/test/CIR/IR/scope.cir b/clang/test/CIR/IR/scope.cir new file mode 100644 index 000000000000..0cc45c8e389b --- /dev/null +++ b/clang/test/CIR/IR/scope.cir @@ -0,0 +1,27 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +!u32i = !cir.int + +module { + // Should properly print/parse scope with implicit empty yield. + cir.func @implicit_yield() { + cir.scope { + } + // CHECK: cir.scope { + // CHECK: } + cir.return + } + + // Should properly print/parse scope with explicit yield. + cir.func @explicit_yield() { + %0 = cir.scope { + %1 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + cir.yield %1 : !cir.ptr + } : !cir.ptr + // CHECK: %0 = cir.scope { + // [...] + // CHECK: cir.yield %1 : !cir.ptr + // CHECK: } : !cir.ptr + cir.return + } +} From c8ed36113eca4203b84459580d0edc11dc9091d4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 5 Dec 2023 12:17:57 -0800 Subject: [PATCH 1269/2301] [CIR][NFC] Remove comment breadcrumb --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index f6dca4c1e665..4218e06f724e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1587,8 +1587,6 @@ void CIRGenModule::buildAliasForGlobal(StringRef mangledName, mlir::Operation *op, GlobalDecl aliasGD, mlir::cir::FuncOp aliasee, mlir::cir::GlobalLinkageKind linkage) { - - // Create the alias with no name. auto *aliasFD = dyn_cast(aliasGD.getDecl()); assert(aliasFD && "expected FunctionDecl"); auto alias = From 7427972276e17fa4bb0d6ec42d1b49348a808b17 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Dec 2023 14:46:55 -0800 Subject: [PATCH 1270/2301] [CIR][CIRGen] Handle non-type template params during record printing --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 11 ++++++++++- clang/test/CIR/CodeGen/record-names.cpp | 10 ++++++++++ clang/test/CIR/Inputs/std-cxx.h | 13 +++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/record-names.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index d5cfa41b94d7..f6a1c3f5d7cf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -63,7 +63,16 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, outStream << '<'; const auto args = templateSpecialization->getTemplateArgs().asArray(); const auto printer = [&policy, &outStream](const TemplateArgument &arg) { - arg.getAsType().print(outStream, policy); + switch (arg.getKind()) { + case TemplateArgument::Integral: + outStream << arg.getAsIntegral(); + break; + case TemplateArgument::Type: + arg.getAsType().print(outStream, policy); + break; + default: + llvm_unreachable("NYI"); + } }; llvm::interleaveComma(args, outStream, printer); outStream << '>'; diff --git a/clang/test/CIR/CodeGen/record-names.cpp b/clang/test/CIR/CodeGen/record-names.cpp new file mode 100644 index 000000000000..8f28fe5adbd9 --- /dev/null +++ b/clang/test/CIR/CodeGen/record-names.cpp @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +void t() { + std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; +} + +// CHECK: ![[array:.*]] = !cir.struct" \ No newline at end of file diff --git a/clang/test/CIR/Inputs/std-cxx.h b/clang/test/CIR/Inputs/std-cxx.h index b50098ba3026..b4eccca352b0 100644 --- a/clang/test/CIR/Inputs/std-cxx.h +++ b/clang/test/CIR/Inputs/std-cxx.h @@ -1310,4 +1310,17 @@ template return shared_ptr(new T(static_cast(args)...)); } +template struct array { + T arr[N]; + struct iterator { + T *p; + constexpr explicit iterator(T *p) : p(p) {} + constexpr bool operator!=(iterator o) { return p != o.p; } + constexpr iterator &operator++() { ++p; return *this; } + constexpr T &operator*() { return *p; } + }; + constexpr iterator begin() { return iterator(arr); } + constexpr iterator end() { return iterator(arr + N); } +}; + } // namespace std From 9a46b013b993aee01fa6dcc374126b15f96420b6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Dec 2023 15:01:07 -0800 Subject: [PATCH 1271/2301] [CIR][CIRGen] More non-type template param expr for std::array --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 6 +++--- .../test/CIR/CodeGen/{record-names.cpp => std-array.cpp} | 9 ++++++++- 2 files changed, 11 insertions(+), 4 deletions(-) rename clang/test/CIR/CodeGen/{record-names.cpp => std-array.cpp} (61%) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 4e8c32ddc508..2cac7abfc203 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -129,15 +129,15 @@ class ScalarExprEmitter : public StmtVisitor { // VisitScalarExprClassName(...) to get this working. emitError(CGF.getLoc(E->getExprLoc()), "scalar exp no implemented: '") << E->getStmtClassName() << "'"; - assert(0 && "shouldn't be here!"); + llvm_unreachable("NYI"); return {}; } mlir::Value VisitConstantExpr(ConstantExpr *E) { llvm_unreachable("NYI"); } mlir::Value VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); } mlir::Value - VisitSubstnonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { - llvm_unreachable("NYI"); + VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { + return Visit(E->getReplacement()); } mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *GE) { llvm_unreachable("NYI"); diff --git a/clang/test/CIR/CodeGen/record-names.cpp b/clang/test/CIR/CodeGen/std-array.cpp similarity index 61% rename from clang/test/CIR/CodeGen/record-names.cpp rename to clang/test/CIR/CodeGen/std-array.cpp index 8f28fe5adbd9..902f2c44c6e8 100644 --- a/clang/test/CIR/CodeGen/record-names.cpp +++ b/clang/test/CIR/CodeGen/std-array.cpp @@ -5,6 +5,13 @@ void t() { std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + (void)v.end(); } -// CHECK: ![[array:.*]] = !cir.struct" \ No newline at end of file +// CHECK: ![[array:.*]] = !cir.struct" + +// CHECK: {{.*}} = cir.get_member +// CHECK: {{.*}} = cir.cast(array_to_ptrdecay +// CHECK: {{.*}} = cir.const(#cir.int<9> : !u32i) : !u32i + +// CHECK: cir.call @_ZNSt5arrayIhLj9EE8iteratorC1EPh \ No newline at end of file From 3d070aaee2876a2fcadfdf1e9f88a39c60ba3faf Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Dec 2023 18:22:10 -0800 Subject: [PATCH 1272/2301] [CIR] Add a testcase for a usage of std::find --- clang/test/CIR/CodeGen/std-find.cpp | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 clang/test/CIR/CodeGen/std-find.cpp diff --git a/clang/test/CIR/CodeGen/std-find.cpp b/clang/test/CIR/CodeGen/std-find.cpp new file mode 100644 index 000000000000..a04ffd79f41c --- /dev/null +++ b/clang/test/CIR/CodeGen/std-find.cpp @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +// CHECK: ![[array:.*]] = !cir.struct" + +int test_find(unsigned char n = 3) +{ + // CHECK: cir.func @_Z9test_findh(%arg0: !u8i + unsigned num_found = 0; + std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + // CHECK: %[[array_addr:.*]] = cir.alloca ![[array]], cir.ptr , ["v"] + + auto f = std::find(v.begin(), v.end(), n); + // CHECK: {{.*}} cir.call @_ZNSt5arrayIhLj9EE5beginEv(%[[array_addr]]) + // CHECK: {{.*}} cir.call @_ZNSt5arrayIhLj9EE3endEv(%[[array_addr]]) + // CHECK: {{.*}} cir.call @_ZSt4findINSt5arrayIhLj9EE8iteratorEhET_S3_S3_RKT0_( + + if (f != v.end()) + num_found++; + // CHECK: {{.*}} cir.call @_ZNSt5arrayIhLj9EE3endEv(%[[array_addr]] + // CHECK: %[[neq_cmp:.*]] = cir.call @_ZNSt5arrayIhLj9EE8iteratorneES1_( + // CHECK: cir.if %[[neq_cmp]] + + return num_found; +} \ No newline at end of file From 5ec59349f43f84229d801e3d95af08798f8578be Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Dec 2023 17:09:01 -0800 Subject: [PATCH 1273/2301] [CIR] Add cir.libc.memchr operation --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 33 ++++++++++++++++++++ clang/test/CIR/IR/libc-memchr.cir | 11 +++++++ 2 files changed, 44 insertions(+) create mode 100644 clang/test/CIR/IR/libc-memchr.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 221f4b2e1690..4471bf94af32 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2184,6 +2184,39 @@ def MemCpyOp : CIR_Op<"libc.memcpy"> { }]; } +//===----------------------------------------------------------------------===// +// MemChrOp +//===----------------------------------------------------------------------===// + +def MemChrOp : CIR_Op<"libc.memchr"> { + let arguments = (ins Arg:$src, + CIR_IntType:$pattern, + CIR_IntType:$len); + let summary = "libc's `memchr`"; + let results = (outs Res:$result); + + let description = [{ + Search for `pattern` in data range from `src` to `src` + `len`. + provides a bound to the search in `src`. `result` is a pointer to found + `pattern` or a null pointer. + + Examples: + + ```mlir + %p = cir.libc.memchr(%src : !cir.ptr, %pattern : !u32i, %len : !u32i) -> !cir.ptr + ``` + }]; + + let assemblyFormat = [{ + `(` + $src `:` qualified(type($src)) + `,` $pattern `:` type($pattern) + `,` $len `:` type($len) + `)` `->` qualified(type($result)) attr-dict + }]; + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // FAbsOp //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/libc-memchr.cir b/clang/test/CIR/IR/libc-memchr.cir new file mode 100644 index 000000000000..69c70bbc0dbd --- /dev/null +++ b/clang/test/CIR/IR/libc-memchr.cir @@ -0,0 +1,11 @@ +// RUN: cir-opt %s + +!voidptr = !cir.ptr +!u32i = !cir.int +!u64i = !cir.int +module { + cir.func @f(%src : !voidptr, %pattern : !u32i, %len : !u64i) -> !voidptr { + %ptr = cir.libc.memchr(%src : !voidptr, %pattern : !u32i, %len : !u64i) -> !voidptr + cir.return %ptr : !voidptr + } +} From 8c483d2feeaa129ed6fcf9242c5fcb4ef050c883 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Dec 2023 18:04:17 -0800 Subject: [PATCH 1274/2301] [CIR] Add tablegen constraints for cir::IntType --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 40 +++++++++++++++++++ clang/test/CIR/IR/libc-memchr.cir | 2 +- 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 4471bf94af32..015d7d3ac1eb 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2190,7 +2190,7 @@ def MemCpyOp : CIR_Op<"libc.memcpy"> { def MemChrOp : CIR_Op<"libc.memchr"> { let arguments = (ins Arg:$src, - CIR_IntType:$pattern, + UInt32:$pattern, CIR_IntType:$len); let summary = "libc's `memchr`"; let results = (outs Res:$result); @@ -2210,7 +2210,7 @@ def MemChrOp : CIR_Op<"libc.memchr"> { let assemblyFormat = [{ `(` $src `:` qualified(type($src)) - `,` $pattern `:` type($pattern) + `,` $pattern `,` $len `:` type($len) `)` `->` qualified(type($result)) attr-dict }]; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index bcbb63218e22..3c98999c9a62 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -54,6 +54,46 @@ def CIR_IntType : CIR_Type<"Int", "int", let genVerifyDecl = 1; } +// Constraints + +// Unsigned integer type of a specific width. +class UInt + : Type()">, + CPred<"$_self.cast<::mlir::cir::IntType>().isUnsigned()">, + CPred<"$_self.cast<::mlir::cir::IntType>().getWidth() == " # width> + ]>, width # "-bit unsigned integer", "::mlir::cir::IntType">, + BuildableType< + "mlir::cir::IntType::get($_builder.getContext(), " + # width # ", /*isSigned=*/false)"> { + int bitwidth = width; +} + +def UInt1 : UInt<1>; +def UInt8 : UInt<8>; +def UInt16 : UInt<16>; +def UInt32 : UInt<32>; +def UInt64 : UInt<64>; + +// Signed integer type of a specific width. +class SInt + : Type()">, + CPred<"$_self.cast<::mlir::cir::IntType>().isSigned()">, + CPred<"$_self.cast<::mlir::cir::IntType>().getWidth() == " # width> + ]>, width # "-bit signed integer", "::mlir::cir::IntType">, + BuildableType< + "mlir::cir::IntType::get($_builder.getContext(), " + # width # ", /*isSigned=*/true)"> { + int bitwidth = width; +} + +def SInt1 : SInt<1>; +def SInt8 : SInt<8>; +def SInt16 : SInt<16>; +def SInt32 : SInt<32>; +def SInt64 : SInt<64>; + //===----------------------------------------------------------------------===// // PointerType //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/libc-memchr.cir b/clang/test/CIR/IR/libc-memchr.cir index 69c70bbc0dbd..f957a3e9f379 100644 --- a/clang/test/CIR/IR/libc-memchr.cir +++ b/clang/test/CIR/IR/libc-memchr.cir @@ -5,7 +5,7 @@ !u64i = !cir.int module { cir.func @f(%src : !voidptr, %pattern : !u32i, %len : !u64i) -> !voidptr { - %ptr = cir.libc.memchr(%src : !voidptr, %pattern : !u32i, %len : !u64i) -> !voidptr + %ptr = cir.libc.memchr(%src : !voidptr, %pattern, %len : !u64i) -> !voidptr cir.return %ptr : !voidptr } } From 0e5f1e33d7bf1ba051e7962bd308eee49b22fb4b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Dec 2023 18:32:34 -0800 Subject: [PATCH 1275/2301] [CIR] Add tablgen constraints void pointers --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 8 ++++---- clang/include/clang/CIR/Dialect/IR/CIRTypes.td | 13 +++++++++++++ clang/test/CIR/IR/libc-memchr.cir | 2 +- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 015d7d3ac1eb..58eb44f6342f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2189,11 +2189,11 @@ def MemCpyOp : CIR_Op<"libc.memcpy"> { //===----------------------------------------------------------------------===// def MemChrOp : CIR_Op<"libc.memchr"> { - let arguments = (ins Arg:$src, + let arguments = (ins Arg:$src, UInt32:$pattern, CIR_IntType:$len); let summary = "libc's `memchr`"; - let results = (outs Res:$result); + let results = (outs Res:$result); let description = [{ Search for `pattern` in data range from `src` to `src` + `len`. @@ -2209,10 +2209,10 @@ def MemChrOp : CIR_Op<"libc.memchr"> { let assemblyFormat = [{ `(` - $src `:` qualified(type($src)) + $src `,` $pattern `,` $len `:` type($len) - `)` `->` qualified(type($result)) attr-dict + `)` attr-dict }]; let hasVerifier = 0; } diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 3c98999c9a62..e9c60e763ba8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -220,4 +220,17 @@ def CIR_VoidType : CIR_Type<"Void", "void"> { }]; } +// Constraints + +// Pointer to void +def VoidPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>().getPointee().isa<::mlir::cir::VoidType>()">, + ]>, "void*">, + BuildableType< + "mlir::cir::PointerType::get($_builder.getContext()," + "mlir::cir::VoidType::get($_builder.getContext()))"> { +} + #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/test/CIR/IR/libc-memchr.cir b/clang/test/CIR/IR/libc-memchr.cir index f957a3e9f379..9d0daa70b5ea 100644 --- a/clang/test/CIR/IR/libc-memchr.cir +++ b/clang/test/CIR/IR/libc-memchr.cir @@ -5,7 +5,7 @@ !u64i = !cir.int module { cir.func @f(%src : !voidptr, %pattern : !u32i, %len : !u64i) -> !voidptr { - %ptr = cir.libc.memchr(%src : !voidptr, %pattern, %len : !u64i) -> !voidptr + %ptr = cir.libc.memchr(%src, %pattern, %len : !u64i) cir.return %ptr : !voidptr } } From f41d53b8cde211262d3b38c8019688d8790aa6fa Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 8 Dec 2023 10:34:23 +0300 Subject: [PATCH 1276/2301] [CIR][CodeGen] Inline asm: CIR operation (#326) I will break the PR #308 into pieces and submit them one-by-one. The first PR introduce CIR operation and the `buildAsmStmt` function. The latter is the main place for the future changesm and the former was taken directly from MLIR LLVM IR dialect. As a result, there is nothing really interesting happen here, but now we can at least emit cir for an empty inline assembler. And as a first step --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 36 +++++++++++++++ clang/lib/CIR/CodeGen/CIRAsm.cpp | 48 ++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 + clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 1 + clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + clang/test/CIR/CodeGen/asm.c | 12 +++++ 6 files changed, 100 insertions(+) create mode 100644 clang/lib/CIR/CodeGen/CIRAsm.cpp create mode 100644 clang/test/CIR/CodeGen/asm.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 58eb44f6342f..5183bd5379e8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2386,4 +2386,40 @@ def ZeroInitConstOp : CIR_Op<"llvmir.zeroinit", [Pure]>, let hasVerifier = 0; } +def AsmATT : I32EnumAttrCase<"x86_att", 0>; +def AsmIntel : I32EnumAttrCase<"x86_intel", 1>; + +def AsmFlavor : I32EnumAttr< + "AsmDialect", + "ATT or Intel", + [AsmATT, AsmIntel]> { + let cppNamespace = "::mlir::cir"; +} + +def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { + let description = [{ + The `cir.asm` operation represents C/C++ asm inline. + + Example: + ```C++ + __asm__ volatile("xyz" : : : ); + ``` + + ``` + ```mlir + cir.asm(x86_att, {"xyz"}) -> !void + ``` + }]; + + let results = (outs Optional:$res); + + let arguments = ( + ins StrAttr:$asm_string, + AsmFlavor:$asm_flavor); + + let assemblyFormat = [{ + `(`$asm_flavor`,` `{` $asm_string `}` `)` attr-dict `:` type($res) + }]; +} + #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp new file mode 100644 index 000000000000..91d3f5420a77 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -0,0 +1,48 @@ +#include "clang/Basic/DiagnosticSema.h" +#include "llvm/ADT/StringExtras.h" + +#include "CIRGenFunction.h" +#include "TargetInfo.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; + +static AsmDialect inferDialect(const CIRGenModule &cgm, const AsmStmt &S) { + AsmDialect GnuAsmDialect = + cgm.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT + ? AsmDialect::x86_att + : AsmDialect::x86_intel; + + return isa(&S) ? AsmDialect::x86_intel : GnuAsmDialect; +} + +mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { + // Assemble the final asm string. + std::string AsmString = S.generateAsmString(getContext()); + + std::string Constraints; + std::vector ResultRegTypes; + std::vector Args; + + assert(!S.getNumOutputs() && "asm output operands are NYI"); + assert(!S.getNumInputs() && "asm intput operands are NYI"); + assert(!S.getNumClobbers() && "asm clobbers operands are NYI"); + + mlir::Type ResultType; + + if (ResultRegTypes.size() == 1) + ResultType = ResultRegTypes[0]; + else if (ResultRegTypes.size() > 1) { + auto sname = builder.getUniqueAnonRecordName(); + ResultType = + builder.getCompleteStructTy(ResultRegTypes, sname, false, nullptr); + } + + AsmDialect AsmDialect = inferDialect(CGM, S); + + builder.create( + getLoc(S.getAsmLoc()), ResultType, AsmString, AsmDialect); + + return mlir::success(); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index a4707194a04c..feffab919b51 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -882,6 +882,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Type convertType(clang::QualType T); + mlir::LogicalResult buildAsmStmt(const clang::AsmStmt &S); + mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); mlir::LogicalResult buildReturnStmt(const clang::ReturnStmt &S); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 282d7cf9e6f3..8bf702d4c824 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -157,6 +157,7 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. case Stmt::GCCAsmStmtClass: case Stmt::MSAsmStmtClass: + return buildAsmStmt(cast(*S)); case Stmt::CapturedStmtClass: case Stmt::ObjCAtTryStmtClass: case Stmt::ObjCAtThrowStmtClass: diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index a379ed464316..3750f5cae638 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -7,6 +7,7 @@ set( get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR + CIRAsm.cpp CIRGenBuiltin.cpp CIRGenCXX.cpp CIRGenCXXABI.cpp diff --git a/clang/test/CIR/CodeGen/asm.c b/clang/test/CIR/CodeGen/asm.c new file mode 100644 index 000000000000..02d3cc59af8b --- /dev/null +++ b/clang/test/CIR/CodeGen/asm.c @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +//CHECK: cir.asm(x86_att, {""}) +void empty1() { + __asm__ volatile("" : : : ); +} + +//CHECK: cir.asm(x86_att, {"xyz"}) +void empty2() { + __asm__ volatile("xyz" : : : ); +} \ No newline at end of file From 1b8406022eaa0fb1fb0250fc05b8bc2dac0b09d6 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 8 Dec 2023 10:37:29 +0300 Subject: [PATCH 1277/2301] [CIR][CodeGen] Add dynamic AllocaOp support (#340) This PR adds dynamic stack allocation into `AllocaOp` that will be useful in future - currently I work on variable length array support. So I start to make tiny PRs in advance) No changes in tests needed, I tried to make the changes as smooth as possible, so no existing `AllocaOp` usages need to be changed. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 21 ++++++++++++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 12 +++++++---- clang/test/CIR/IR/alloca.cir | 21 +++++++++++++++++++ clang/test/CIR/IR/invalid.cir | 10 +++++++++ clang/test/CIR/Lowering/alloca.cir | 17 +++++++++++++++ 5 files changed, 76 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/IR/alloca.cir create mode 100644 clang/test/CIR/Lowering/alloca.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 5183bd5379e8..0d367964d61c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -284,6 +284,10 @@ def AllocaOp : CIR_Op<"alloca", [ cases, the first use contains the initialization (a cir.store, a cir.call to a ctor, etc). + The `dynAllocSize` specifies the size to dynamically allocate on the stack + and ignores the allocation size based on the original type. This is useful + when handling VLAs and is omitted when declaring regular local variables. + The result type is a pointer to the input's type. Example: @@ -299,6 +303,7 @@ def AllocaOp : CIR_Op<"alloca", [ }]; let arguments = (ins + Optional:$dynAllocSize, TypeAttr:$allocaType, StrAttr:$name, UnitAttr:$init, @@ -313,18 +318,32 @@ def AllocaOp : CIR_Op<"alloca", [ let builders = [ OpBuilder<(ins "Type":$addr, "Type":$allocaType, "StringRef":$name, - "IntegerAttr":$alignment)> + "IntegerAttr":$alignment)>, + + OpBuilder<(ins "Type":$addr, + "Type":$allocaType, + "StringRef":$name, + "IntegerAttr":$alignment, + "Value":$dynAllocSize), + [{ + if (dynAllocSize) + $_state.addOperands(dynAllocSize); + build($_builder, $_state, addr, allocaType, name, alignment); + }]> ]; let extraClassDeclaration = [{ // Whether the alloca input type is a pointer. bool isPointerType() { return getAllocaType().isa<::mlir::cir::PointerType>(); } + + bool isDynamic() { return (bool)getDynAllocSize(); } }]; // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. let assemblyFormat = [{ $allocaType `,` `cir.ptr` type($addr) `,` + ($dynAllocSize^ `:` type($dynAllocSize) `,`)? `[` $name (`,` `init` $init^)? `]` diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 775577f7c6b8..86b983035055 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -890,13 +890,17 @@ class CIRAllocaLowering mlir::LogicalResult matchAndRewrite(mlir::cir::AllocaOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - mlir::Value one = rewriter.create( - op.getLoc(), typeConverter->convertType(rewriter.getIndexType()), - rewriter.getIntegerAttr(rewriter.getIndexType(), 1)); + mlir::Value size = + op.isDynamic() + ? adaptor.getDynAllocSize() + : rewriter.create( + op.getLoc(), + typeConverter->convertType(rewriter.getIndexType()), + rewriter.getIntegerAttr(rewriter.getIndexType(), 1)); auto elementTy = getTypeConverter()->convertType(op.getAllocaType()); auto resultTy = mlir::LLVM::LLVMPointerType::get(getContext()); rewriter.replaceOpWithNewOp( - op, resultTy, elementTy, one, op.getAlignmentAttr().getInt()); + op, resultTy, elementTy, size, op.getAlignmentAttr().getInt()); return mlir::success(); } }; diff --git a/clang/test/CIR/IR/alloca.cir b/clang/test/CIR/IR/alloca.cir new file mode 100644 index 000000000000..71293f6a0948 --- /dev/null +++ b/clang/test/CIR/IR/alloca.cir @@ -0,0 +1,21 @@ +// Test the CIR operations can parse and print correctly (roundtrip) + +// RUN: cir-opt %s | cir-opt | FileCheck %s +!s32i = !cir.int +!u64i = !cir.int + +module { + cir.func @foo(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} + cir.return + } +} + +//CHECK: module { + +//CHECK-NEXT: cir.func @foo(%arg0: !s32i) { +//CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} +//CHECK-NEXT: cir.return +//CHECK-NEXT: } + +//CHECK: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 329ed2560a51..5571dd030f25 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -725,3 +725,13 @@ module { // expected-error@+1 {{record already defined}} !struct = !cir.struct}> + +// ----- +!s32i = !cir.int +module { + cir.func @tmp(%arg0: f32) { + // expected-error@+1 {{operand #0 must be Integer type}} + %0 = cir.alloca !s32i, cir.ptr , %arg0 : f32, ["tmp"] + cir.return + } +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/alloca.cir b/clang/test/CIR/Lowering/alloca.cir new file mode 100644 index 000000000000..faa99843ca74 --- /dev/null +++ b/clang/test/CIR/Lowering/alloca.cir @@ -0,0 +1,17 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR + +!s32i = !cir.int + +module { + cir.func @foo(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @foo(%arg0: i32) attributes {cir.extra_attrs = #cir} { +// MLIR-NEXT: %0 = llvm.alloca %arg0 x i32 {alignment = 16 : i64} : (i32) -> !llvm.ptr +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: } From 99205ff2a7a782e35bd5dbbbfefe3d3f11e56443 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Sat, 9 Dec 2023 01:50:27 +0300 Subject: [PATCH 1278/2301] [CIR][Lowering] Fix loop lowering for top-level break/continue (#349) This PR fixes a couple of corner cases connected with the `YieldOp` lowering in loops. Previously, in #211 we introduced `lowerNestedBreakContinue` but we didn't check that `YieldOp` may belong to the same region, i.e. it is not nested, e.g. ``` while(1) { break; } ``` Hence the error `op already replaced`. Next, we fix `yield` lowering for `ifOp` and `switchOp` but didn't cover `scopeOp`, and the same error occurred. This PR fixes this as well. I added two tests - with no checks actually, just to make sure no more crashes happen. fixes #324 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 27 +++++-- clang/test/CIR/Lowering/loop.cir | 78 +++++++++++++++++++ 2 files changed, 100 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 86b983035055..fd77f49fd294 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -408,8 +408,15 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { lowerNestedBreakContinue(mlir::Region &loopBody, mlir::Block *exitBlock, mlir::Block *continueBlock, mlir::ConversionPatternRewriter &rewriter) const { + // top-level yields are lowered in matchAndRewrite + auto isNested = [&](mlir::Operation *op) { + return op->getParentRegion() != &loopBody; + }; auto processBreak = [&](mlir::Operation *op) { + if (!isNested(op)) + return mlir::WalkResult::advance(); + if (isa( *op)) // don't process breaks in nested loops and switches return mlir::WalkResult::skip(); @@ -421,6 +428,9 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { }; auto processContinue = [&](mlir::Operation *op) { + if (!isNested(op)) + return mlir::WalkResult::advance(); + if (isa( *op)) // don't process continues in nested loops return mlir::WalkResult::skip(); @@ -490,7 +500,10 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { // Branch from body to condition or to step on for-loop cases. rewriter.setInsertionPoint(bodyYield); - rewriter.replaceOpWithNewOp(bodyYield, &stepBlock); + auto bodyYieldDest = bodyYield.getKind() == mlir::cir::YieldOpKind::Break + ? continueBlock + : &stepBlock; + rewriter.replaceOpWithNewOp(bodyYield, bodyYieldDest); // Is a for loop: branch from step to condition. if (kind == LoopKind::For) { @@ -822,11 +835,15 @@ class CIRScopeOpLowering // Stack restore before leaving the body region. rewriter.setInsertionPointToEnd(afterBody); auto yieldOp = cast(afterBody->getTerminator()); - auto branchOp = rewriter.replaceOpWithNewOp( - yieldOp, yieldOp.getArgs(), continueBlock); - // // Insert stack restore before jumping out of the body of the region. - rewriter.setInsertionPoint(branchOp); + if (!isLoopYield(yieldOp)) { + auto branchOp = rewriter.replaceOpWithNewOp( + yieldOp, yieldOp.getArgs(), continueBlock); + + // // Insert stack restore before jumping out of the body of the region. + rewriter.setInsertionPoint(branchOp); + } + // TODO(CIR): stackrestore? // rewriter.create(loc, stackSaveOp); diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index 9ac1c672886a..685792a5b342 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -217,4 +217,82 @@ module { // MLIR-NEXT: llvm.br ^bb6 // MLIR-NEXT: ^bb6: // MLIR-NEXT: llvm.return + + // test corner case + // while (1) { + // break; + // } + cir.func @whileCornerCase() { + cir.scope { + cir.loop while(cond : { + %0 = cir.const(#cir.int<1> : !s32i) : !s32i + %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool + cir.brcond %1 ^bb1, ^bb2 + ^bb1: // pred: ^bb0 + cir.yield continue + ^bb2: // pred: ^bb0 + cir.yield + }, step : { + cir.yield + }) { + cir.yield break + } + } + cir.return + } + // MLIR: llvm.func @whileCornerCase() + // MLIR: %0 = llvm.mlir.constant(1 : i32) : i32 + // MLIR-NEXT: %1 = llvm.mlir.constant(0 : i32) : i32 + // MLIR-NEXT: %2 = llvm.icmp "ne" %0, %1 : i32 + // MLIR-NEXT: %3 = llvm.zext %2 : i1 to i8 + // MLIR-NEXT: %4 = llvm.trunc %3 : i8 to i + // MLIR-NEXT: llvm.cond_br %4, ^bb3, ^bb4 + // MLIR-NEXT: ^bb3: // pred: ^bb2 + // MLIR-NEXT: llvm.br ^bb5 + // MLIR-NEXT: ^bb4: // pred: ^bb2 + // MLIR-NEXT: llvm.br ^bb6 + // MLIR-NEXT: ^bb5: // pred: ^bb3 + // MLIR-NEXT: llvm.br ^bb6 + // MLIR-NEXT: ^bb6: // 2 preds: ^bb4, ^bb5 + // MLIR-NEXT: llvm.br ^bb7 + // MLIR-NEXT: ^bb7: // pred: ^bb6 + // MLIR-NEXT: llvm.return + + // test corner case - no fails during the lowering + // for (;;) { + // break; + // } + cir.func @forCornerCase() { + cir.scope { + cir.loop for(cond : { + cir.yield continue + }, step : { + cir.yield + }) { + cir.scope { + cir.yield break + } + cir.yield + } + } + cir.return + } +// MLIR: llvm.func @forCornerCase() +// MLIR: llvm.br ^bb1 +// MLIR-NEXT: ^bb1: // pred: ^bb0 +// MLIR-NEXT: llvm.br ^bb2 +// MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb6 +// MLIR-NEXT: llvm.br ^bb3 +// MLIR-NEXT: ^bb3: // pred: ^bb2 +// MLIR-NEXT: llvm.br ^bb4 +// MLIR-NEXT: ^bb4: // pred: ^bb3 +// MLIR-NEXT: llvm.br ^bb7 +// MLIR-NEXT: ^bb5: // no predecessors +// MLIR-NEXT: llvm.br ^bb6 +// MLIR-NEXT: ^bb6: // pred: ^bb5 +// MLIR-NEXT: llvm.br ^bb2 +// MLIR-NEXT: ^bb7: // pred: ^bb4 +// MLIR-NEXT: llvm.br ^bb8 +// MLIR-NEXT: ^bb8: // pred: ^bb7 +// MLIR-NEXT: llvm.return } From 5642459a46f4d771596c11c251e10e6aac9ea303 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Dec 2023 18:32:24 -0800 Subject: [PATCH 1279/2301] [CIR] Make cir.libc.memchr more constrained --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 13 ++++++------- clang/test/CIR/IR/libc-memchr.cir | 6 +++--- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 0d367964d61c..f33749a2e63f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2208,9 +2208,11 @@ def MemCpyOp : CIR_Op<"libc.memcpy"> { //===----------------------------------------------------------------------===// def MemChrOp : CIR_Op<"libc.memchr"> { + // TODO: instead of using UInt64 for len, we could make it constrained on + // size_t (64 or 32) and have a builder that does the right job. let arguments = (ins Arg:$src, - UInt32:$pattern, - CIR_IntType:$len); + SInt32:$pattern, + UInt64:$len); let summary = "libc's `memchr`"; let results = (outs Res:$result); @@ -2222,16 +2224,13 @@ def MemChrOp : CIR_Op<"libc.memchr"> { Examples: ```mlir - %p = cir.libc.memchr(%src : !cir.ptr, %pattern : !u32i, %len : !u32i) -> !cir.ptr + %p = cir.libc.memchr(%src, %pattern, %len) -> !cir.ptr ``` }]; let assemblyFormat = [{ `(` - $src - `,` $pattern - `,` $len `:` type($len) - `)` attr-dict + $src `,` $pattern `,` $len `)` attr-dict }]; let hasVerifier = 0; } diff --git a/clang/test/CIR/IR/libc-memchr.cir b/clang/test/CIR/IR/libc-memchr.cir index 9d0daa70b5ea..014414322819 100644 --- a/clang/test/CIR/IR/libc-memchr.cir +++ b/clang/test/CIR/IR/libc-memchr.cir @@ -1,11 +1,11 @@ // RUN: cir-opt %s !voidptr = !cir.ptr -!u32i = !cir.int +!s32i = !cir.int !u64i = !cir.int module { - cir.func @f(%src : !voidptr, %pattern : !u32i, %len : !u64i) -> !voidptr { - %ptr = cir.libc.memchr(%src, %pattern, %len : !u64i) + cir.func @f(%src : !voidptr, %pattern : !s32i, %len : !u64i) -> !voidptr { + %ptr = cir.libc.memchr(%src, %pattern, %len) cir.return %ptr : !voidptr } } From b7479962addbf034a2e088df843629b393925d54 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Dec 2023 18:53:54 -0800 Subject: [PATCH 1280/2301] [CIR] Add skeleton for Idiom Recognizer pass Among other long term things, in the short term this will be used to map some higher level library calls into CIR operations. --- clang/include/clang/CIR/Dialect/Passes.h | 2 + clang/include/clang/CIR/Dialect/Passes.td | 11 ++++ clang/lib/CIR/CodeGen/CIRPasses.cpp | 1 + .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + .../Dialect/Transforms/IdiomRecognizer.cpp | 64 +++++++++++++++++++ .../test/CIR/Transforms/idiom-recognizer.cpp | 4 ++ 6 files changed, 83 insertions(+) create mode 100644 clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp create mode 100644 clang/test/CIR/Transforms/idiom-recognizer.cpp diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index abf915bf687a..200fc956d08d 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -30,6 +30,8 @@ std::unique_ptr createMergeCleanupsPass(); std::unique_ptr createDropASTPass(); std::unique_ptr createLoweringPreparePass(); std::unique_ptr createLoweringPreparePass(clang::ASTContext *astCtx); +std::unique_ptr createIdiomRecognizerPass(); +std::unique_ptr createIdiomRecognizerPass(clang::ASTContext *astCtx); //===----------------------------------------------------------------------===// // Registration diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index 08c95ab92ed7..a31bc2a30388 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -75,4 +75,15 @@ def LoweringPrepare : Pass<"cir-lowering-prepare"> { let dependentDialects = ["cir::CIRDialect"]; } +def IdiomRecognizer : Pass<"cir-idiom-recognizer"> { + let summary = "Raise calls to C/C++ libraries to CIR operations"; + let description = [{ + This pass recognize idiomatic C++ usage and incorporate C++ standard + containers, library functions calls, and types into CIR operation, + attributes and types. + }]; + let constructor = "mlir::createIdiomRecognizerPass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + #endif // MLIR_DIALECT_CIR_PASSES diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index e278f4c22bde..653148a5a8d4 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -45,6 +45,7 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, pm.addPass(std::move(lifetimePass)); } + pm.addPass(mlir::createIdiomRecognizerPass(&astCtx)); pm.addPass(mlir::createLoweringPreparePass(&astCtx)); // FIXME: once CIRCodenAction fixes emission other than CIR we diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 82952f42a2d2..36bfcd3de951 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -3,6 +3,7 @@ add_clang_library(MLIRCIRTransforms LoweringPrepare.cpp MergeCleanups.cpp DropAST.cpp + IdiomRecognizer.cpp DEPENDS MLIRCIRPassIncGen diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp new file mode 100644 index 000000000000..1fc7e5d6509c --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -0,0 +1,64 @@ +//===- IdiomRecognizer.cpp - pareparation work for LLVM lowering ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Region.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Mangle.h" +#include "clang/Basic/Module.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Path.h" + +using cir::CIRBaseBuilderTy; +using namespace mlir; +using namespace mlir::cir; + +namespace { + +struct IdiomRecognizerPass : public IdiomRecognizerBase { + IdiomRecognizerPass() = default; + void runOnOperation() override; + + /// + /// AST related + /// ----------- + clang::ASTContext *astCtx; + void setASTContext(clang::ASTContext *c) { astCtx = c; } + + /// Tracks current module. + ModuleOp theModule; +}; +} // namespace + +void IdiomRecognizerPass::runOnOperation() { + assert(astCtx && "Missing ASTContext, please construct with the right ctor"); + auto *op = getOperation(); + if (isa<::mlir::ModuleOp>(op)) + theModule = cast<::mlir::ModuleOp>(op); +} + +std::unique_ptr mlir::createIdiomRecognizerPass() { + return std::make_unique(); +} + +std::unique_ptr +mlir::createIdiomRecognizerPass(clang::ASTContext *astCtx) { + auto pass = std::make_unique(); + pass->setASTContext(astCtx); + return std::move(pass); +} diff --git a/clang/test/CIR/Transforms/idiom-recognizer.cpp b/clang/test/CIR/Transforms/idiom-recognizer.cpp new file mode 100644 index 000000000000..ee53b606c61b --- /dev/null +++ b/clang/test/CIR/Transforms/idiom-recognizer.cpp @@ -0,0 +1,4 @@ +// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR + +// CIR: IR Dump After IdiomRecognizer (cir-idiom-recognizer) + From f264e2858e7ed7c3262f689719d76c767bcbe547 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Dec 2023 22:31:20 -0800 Subject: [PATCH 1281/2301] [CIR][NFC] Make ASTDecl more about AST than Decls --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 24 ++++++------ .../clang/CIR/Interfaces/ASTAttrInterfaces.td | 38 +++++++++---------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 8706a9019c66..34a221497cd5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -379,7 +379,7 @@ def VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> { // AST Wrappers //===----------------------------------------------------------------------===// -class ASTDecl traits = []> +class AST traits = []> : CIR_Attr { string clang_name = !strconcat("const clang::", name, " *"); @@ -391,7 +391,7 @@ class ASTDecl traits = []> This always implies a non-null AST reference (verified). }]; - let parameters = (ins clang_name:$astDecl); + let parameters = (ins clang_name:$ast); // Printing and parsing available in CIRDialect.cpp let hasCustomAssemblyFormat = 1; @@ -419,33 +419,33 @@ class ASTDecl traits = []> }]; } -def ASTDeclAttr : ASTDecl<"Decl", "decl", [ASTDeclInterface]>; +def ASTDeclAttr : AST<"Decl", "decl", [ASTDeclInterface]>; -def ASTFunctionDeclAttr : ASTDecl<"FunctionDecl", "function.decl", +def ASTFunctionDeclAttr : AST<"FunctionDecl", "function.decl", [ASTFunctionDeclInterface]>; -def ASTCXXMethodDeclAttr : ASTDecl<"CXXMethodDecl", "cxxmethod.decl", +def ASTCXXMethodDeclAttr : AST<"CXXMethodDecl", "cxxmethod.decl", [ASTCXXMethodDeclInterface]>; -def ASTCXXConstructorDeclAttr : ASTDecl<"CXXConstructorDecl", +def ASTCXXConstructorDeclAttr : AST<"CXXConstructorDecl", "cxxconstructor.decl", [ASTCXXConstructorDeclInterface]>; -def ASTCXXConversionDeclAttr : ASTDecl<"CXXConversionDecl", +def ASTCXXConversionDeclAttr : AST<"CXXConversionDecl", "cxxconversion.decl", [ASTCXXConversionDeclInterface]>; -def ASTCXXDestructorDeclAttr : ASTDecl<"CXXDestructorDecl", +def ASTCXXDestructorDeclAttr : AST<"CXXDestructorDecl", "cxxdestructor.decl", [ASTCXXDestructorDeclInterface]>; -def ASTVarDeclAttr : ASTDecl<"VarDecl", "var.decl", +def ASTVarDeclAttr : AST<"VarDecl", "var.decl", [ASTVarDeclInterface]>; -def ASTTypeDeclAttr: ASTDecl<"TypeDecl", "type.decl", +def ASTTypeDeclAttr: AST<"TypeDecl", "type.decl", [ASTTypeDeclInterface]>; -def ASTTagDeclAttr : ASTDecl<"TagDecl", "tag.decl", +def ASTTagDeclAttr : AST<"TagDecl", "tag.decl", [ASTTagDeclInterface]>; -def ASTRecordDeclAttr : ASTDecl<"RecordDecl", "record.decl", +def ASTRecordDeclAttr : AST<"RecordDecl", "record.decl", [ASTRecordDeclInterface]>; //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td index 8aca1d9c8e63..3c7b6894efe6 100644 --- a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td @@ -16,17 +16,17 @@ let cppNamespace = "::mlir::cir" in { let methods = [ InterfaceMethod<"", "bool", "hasOwnerAttr", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->template hasAttr(); + return $_attr.getAst()->template hasAttr(); }] >, InterfaceMethod<"", "bool", "hasPointerAttr", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->template hasAttr(); + return $_attr.getAst()->template hasAttr(); }] >, InterfaceMethod<"", "bool", "hasInitPriorityAttr", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->template hasAttr(); + return $_attr.getAst()->template hasAttr(); }] > ]; @@ -37,12 +37,12 @@ let cppNamespace = "::mlir::cir" in { let methods = [ InterfaceMethod<"", "clang::DeclarationName", "getDeclName", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->getDeclName(); + return $_attr.getAst()->getDeclName(); }] >, InterfaceMethod<"", "llvm::StringRef", "getName", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->getName(); + return $_attr.getAst()->getName(); }] > ]; @@ -60,13 +60,13 @@ let cppNamespace = "::mlir::cir" in { InterfaceMethod<"", "void", "mangleDynamicInitializer", (ins "llvm::raw_ostream&":$Out), [{}], /*defaultImplementation=*/ [{ std::unique_ptr MangleCtx( - $_attr.getAstDecl()->getASTContext().createMangleContext()); - MangleCtx->mangleDynamicInitializer($_attr.getAstDecl(), Out); + $_attr.getAst()->getASTContext().createMangleContext()); + MangleCtx->mangleDynamicInitializer($_attr.getAst(), Out); }] >, InterfaceMethod<"", "clang::VarDecl::TLSKind", "getTLSKind", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->getTLSKind(); + return $_attr.getAst()->getTLSKind(); }] > ]; @@ -77,12 +77,12 @@ let cppNamespace = "::mlir::cir" in { let methods = [ InterfaceMethod<"", "bool", "isOverloadedOperator", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->isOverloadedOperator(); + return $_attr.getAst()->isOverloadedOperator(); }] >, InterfaceMethod<"", "bool", "isStatic", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->isStatic(); + return $_attr.getAst()->isStatic(); }] > ]; @@ -93,21 +93,21 @@ let cppNamespace = "::mlir::cir" in { let methods = [ InterfaceMethod<"", "bool", "isCopyAssignmentOperator", (ins), [{}], /*defaultImplementation=*/ [{ - if (auto decl = dyn_cast($_attr.getAstDecl())) + if (auto decl = dyn_cast($_attr.getAst())) return decl->isCopyAssignmentOperator(); return false; }] >, InterfaceMethod<"", "bool", "isMoveAssignmentOperator", (ins), [{}], /*defaultImplementation=*/ [{ - if (auto decl = dyn_cast($_attr.getAstDecl())) + if (auto decl = dyn_cast($_attr.getAst())) return decl->isMoveAssignmentOperator(); return false; }] >, InterfaceMethod<"", "bool", "isConst", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->isConst(); + return $_attr.getAst()->isConst(); }] > ]; @@ -118,12 +118,12 @@ let cppNamespace = "::mlir::cir" in { let methods = [ InterfaceMethod<"", "bool", "isDefaultConstructor", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->isDefaultConstructor(); + return $_attr.getAst()->isDefaultConstructor(); }] >, InterfaceMethod<"", "bool", "isCopyConstructor", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->isCopyConstructor(); + return $_attr.getAst()->isCopyConstructor(); }] > ]; @@ -143,7 +143,7 @@ let cppNamespace = "::mlir::cir" in { let methods = [ InterfaceMethod<"", "clang::TagTypeKind", "getTagKind", (ins), [{}], /*defaultImplementation=*/ [{ - return $_attr.getAstDecl()->getTagKind(); + return $_attr.getAst()->getTagKind(); }] > ]; @@ -154,16 +154,16 @@ let cppNamespace = "::mlir::cir" in { let methods = [ InterfaceMethod<"", "bool", "isLambda", (ins), [{}], /*defaultImplementation=*/ [{ - if (auto ast = clang::dyn_cast($_attr.getAstDecl())) + if (auto ast = clang::dyn_cast($_attr.getAst())) return ast->isLambda(); return false; }] >, InterfaceMethod<"", "bool", "hasPromiseType", (ins), [{}], /*defaultImplementation=*/ [{ - if (!clang::isa($_attr.getAstDecl())) + if (!clang::isa($_attr.getAst())) return false; - for (const auto *sub : $_attr.getAstDecl()->decls()) { + for (const auto *sub : $_attr.getAst()->decls()) { if (auto subRec = clang::dyn_cast(sub)) { if (subRec->getDeclName().isIdentifier() && subRec->getName() == "promise_type") { From c6025e774eba54d2d64720e814891bdabb516cf5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Dec 2023 22:44:24 -0800 Subject: [PATCH 1282/2301] [CIR] Introduce mappings for clang::Expr --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 6 ++++++ clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 34a221497cd5..b9fe93b98de6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -448,6 +448,12 @@ def ASTTagDeclAttr : AST<"TagDecl", "tag.decl", def ASTRecordDeclAttr : AST<"RecordDecl", "record.decl", [ASTRecordDeclInterface]>; +def ASTExprAttr : AST<"Expr", "expr", + [ASTExprInterface]>; + +def ASTCallExprAttr : AST<"CallExpr", "call.expr", + [ASTCallExprInterface]>; + //===----------------------------------------------------------------------===// // ExtraFuncAttr //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td index 3c7b6894efe6..e3702e7faa6c 100644 --- a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td @@ -186,6 +186,12 @@ let cppNamespace = "::mlir::cir" in { let constBuilderCall = "$0"; } + def ASTExprInterface : AttrInterface<"ASTExprInterface"> {} + + def ASTCallExprInterface : AttrInterface<"ASTCallExprInterface", + [ASTExprInterface]> {} + + } // namespace mlir::cir #endif // MLIR_CIR_INTERFACES_AST_ATTR_INTERFACES From 1f1f5479b9315bf992d03e47b9bae24455d6b867 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Dec 2023 22:48:51 -0800 Subject: [PATCH 1283/2301] [CIR][NFC] Add ASTCallExprInterface to CallOp --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index f33749a2e63f..eb6c4be057ff 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1841,7 +1841,8 @@ def FuncOp : CIR_Op<"func", [ //===----------------------------------------------------------------------===// def CallOp : CIR_Op<"call", - [DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods]> { let summary = "call operation"; let description = [{ The `call` operation represents a direct call to a function that is within @@ -1867,7 +1868,9 @@ def CallOp : CIR_Op<"call", ``` }]; - let arguments = (ins OptionalAttr:$callee, Variadic:$operands); + let arguments = (ins OptionalAttr:$callee, + Variadic:$operands, + OptionalAttr:$ast); let results = (outs Variadic); let builders = [ From 054ba6f81c5c1a1bf61e7121a63a5daa79c8c9a1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Dec 2023 23:26:52 -0800 Subject: [PATCH 1284/2301] [CIR][CIRGen] Add CallExpr nodes to cir.call when possible It's not used just yet, and the ast was removed from printing given we don't have a serialization story anyways, so it's less aggressive with existing tests. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 7 ++++++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 +++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 1 + 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 8688d8dcfdd2..fb0322a0f341 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -363,7 +363,8 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, ReturnValueSlot ReturnValue, const CallArgList &CallArgs, mlir::cir::CallOp *callOrInvoke, - bool IsMustTail, mlir::Location loc) { + bool IsMustTail, mlir::Location loc, + std::optional E) { auto builder = CGM.getBuilder(); // FIXME: We no longer need the types from CallArgs; lift up and simplify @@ -618,6 +619,10 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, llvm_unreachable("expected call variant to be handled"); } + if (E) + theCall.setAstAttr( + mlir::cir::ASTCallExprAttr::get(builder.getContext(), *E)); + if (callOrInvoke) callOrInvoke = &theCall; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 7af792926def..368ecac5f0ed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1120,7 +1120,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, assert(!MustTailCall && "Must tail NYI"); mlir::cir::CallOp callOP = nullptr; RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, &callOP, - E == MustTailCall, getLoc(E->getExprLoc())); + E == MustTailCall, getLoc(E->getExprLoc()), E); assert(!getDebugInfo() && "Debug Info NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index feffab919b51..834755cf9609 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -747,7 +747,8 @@ class CIRGenFunction : public CIRGenTypeCache { RValue buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, mlir::cir::CallOp *callOrInvoke, - bool IsMustTail, mlir::Location loc); + bool IsMustTail, mlir::Location loc, + std::optional E = std::nullopt); RValue buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, @@ -755,7 +756,7 @@ class CIRGenFunction : public CIRGenTypeCache { bool IsMustTail = false) { assert(currSrcLoc && "source location must have been set"); return buildCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke, - IsMustTail, *currSrcLoc); + IsMustTail, *currSrcLoc, std::nullopt); } RValue buildCall(clang::QualType FnType, const CIRGenCallee &Callee, const clang::CallExpr *E, ReturnValueSlot returnValue, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index c75b497ed059..7683a97c3932 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2047,6 +2047,7 @@ void CallOp::print(::mlir::OpAsmPrinter &state) { state << ")"; llvm::SmallVector<::llvm::StringRef, 2> elidedAttrs; elidedAttrs.push_back("callee"); + elidedAttrs.push_back("ast"); state.printOptionalAttrDict((*this)->getAttrs(), elidedAttrs); state << ' ' << ":"; state << ' '; From cbe6a2818fea47d36c6310ca6a892820888c6e20 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 11 Dec 2023 23:20:27 -0300 Subject: [PATCH 1285/2301] [CIR][NFC] Add a isStdFunctionCall method to ASTCallExprInterface This will be tested soon by some idiom recognition code. --- .../clang/CIR/Interfaces/ASTAttrInterfaces.td | 33 ++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td index e3702e7faa6c..147aecb38ae6 100644 --- a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td @@ -189,7 +189,38 @@ let cppNamespace = "::mlir::cir" in { def ASTExprInterface : AttrInterface<"ASTExprInterface"> {} def ASTCallExprInterface : AttrInterface<"ASTCallExprInterface", - [ASTExprInterface]> {} + [ASTExprInterface]> { + let methods = [ + InterfaceMethod<"", "bool", "isStdFunctionCall", + (ins), [{}], /*defaultImplementation=*/ [{ + // Check that the entity being called is in standard + // "std" namespace. + auto callee = $_attr.getAst()->getCallee(); + if (!callee) + return false; + auto *ice = dyn_cast(callee); + if (!ice) + return false; + + auto *dre = dyn_cast_or_null(ice->getSubExpr()); + if (!dre) + return false; + auto qual = dre->getQualifier(); + if (!qual) + return false; + + // FIXME: should we check NamespaceAlias as well? + auto nqual = qual->getAsNamespace(); + if (!nqual || !nqual->getIdentifier() || + nqual->getName().compare("std") != 0) + return false; + + return true; + }] + > + ]; + + } } // namespace mlir::cir From 2ad5b8536b778284f847518158471976716508b0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 12 Dec 2023 12:00:24 -0300 Subject: [PATCH 1286/2301] [CIR][NFC] Isolate ClangIR-specific options in Options.td Organize things a bit while here. --- clang/include/clang/Driver/Options.td | 70 +++++++++++++-------------- 1 file changed, 33 insertions(+), 37 deletions(-) diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 463694c5abd7..d4dcbc25f3b4 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3032,32 +3032,38 @@ def flimited_precision_EQ : Joined<["-"], "flimited-precision=">, Group def fapple_link_rtlib : Flag<["-"], "fapple-link-rtlib">, Group, HelpText<"Force linking the clang builtins runtime library">; +def flto_EQ : Joined<["-"], "flto=">, + Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>, + Group, + HelpText<"Set LTO mode">, Values<"thin,full">; +def flto_EQ_auto : Flag<["-"], "flto=auto">, Visibility<[ClangOption, FlangOption]>, Group, + Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; +def flto_EQ_jobserver : Flag<["-"], "flto=jobserver">, Visibility<[ClangOption, FlangOption]>, Group, + Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; + /// ClangIR-specific options - BEGIN defm clangir : BoolFOption<"clangir", FrontendOpts<"UseClangIRPipeline">, DefaultFalse, PosFlag, NegFlag LLVM pipeline to compile">, BothFlags<[], [ClangOption, CC1Option], "">>; -def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, - Group, - HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; -def emit_cir_only : Flag<["-"], "emit-cir-only">, - HelpText<"Build ASTs and convert to CIR, discarding output">; -def emit_mlir : Flag<["-"], "emit-mlir">, Visibility<[CC1Option]>, Group, - HelpText<"Build ASTs and then lower through ClangIR to MLIR, emit the .milr file">; -defm cir_warnings : BoolFOption<"cir-warnings", - LangOpts<"CIRWarnings">, DefaultFalse, - PosFlag, - NegFlag, - BothFlags<[], [CC1Option], " CIR to emit (analysis based) warnings">>; -/// ClangIR-specific options - END +def fclangir_disable_deferred_EQ : Joined<["-"], "fclangir-build-deferred-threshold=">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"ClangIR (internal): Control the recursion level for calls to buildDeferred (defaults to 500)">, + MarshallingInfoInt, "500u">; +def fclangir_skip_system_headers : Joined<["-"], "fclangir-skip-system-headers">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"ClangIR (internal): buildDeferred skip functions defined in system headers">, + MarshallingInfoFlag>; +def fclangir_lifetime_check_EQ : Joined<["-"], "fclangir-lifetime-check=">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"Run lifetime checker">, + MarshallingInfoString>; +def fclangir_lifetime_check : Flag<["-"], "fclangir-lifetime-check">, + Visibility<[ClangOption, CC1Option]>, Group, + Alias, AliasArgs<["history=invalid,null"]>, + HelpText<"Run lifetime checker">; -def flto_EQ : Joined<["-"], "flto=">, - Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>, - Group, - HelpText<"Set LTO mode">, Values<"thin,full">; -def flto_EQ_jobserver : Flag<["-"], "flto=jobserver">, Visibility<[ClangOption, FlangOption]>, Group, - Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; def clangir_disable_passes : Flag<["-"], "clangir-disable-passes">, Visibility<[ClangOption, CC1Option]>, HelpText<"Disable CIR transformations pipeline">, @@ -3066,37 +3072,27 @@ def clangir_disable_verifier : Flag<["-"], "clangir-disable-verifier">, Visibility<[ClangOption, CC1Option]>, HelpText<"ClangIR: Disable MLIR module verifier">, MarshallingInfoFlag>; -def flto_EQ_auto : Flag<["-"], "flto=auto">, Visibility<[ClangOption, FlangOption]>, Group, - Alias, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">; def clangir_disable_emit_cxx_default : Flag<["-"], "clangir-disable-emit-cxx-default">, Visibility<[ClangOption, CC1Option]>, HelpText<"ClangIR: Disable emission of c++ default (compiler implemented) methods.">, MarshallingInfoFlag>; -def fclangir_disable_deferred_EQ : Joined<["-"], "fclangir-build-deferred-threshold=">, - Visibility<[ClangOption, CC1Option]>, Group, - HelpText<"ClangIR (internal): Control the recursion level for calls to buildDeferred (defaults to 500)">, - MarshallingInfoInt, "500u">; -def fclangir_skip_system_headers : Joined<["-"], "fclangir-skip-system-headers">, - Visibility<[ClangOption, CC1Option]>, Group, - HelpText<"ClangIR (internal): buildDeferred skip functions defined in system headers">, - MarshallingInfoFlag>; def clangir_verify_diagnostics : Flag<["-"], "clangir-verify-diagnostics">, Visibility<[ClangOption, CC1Option]>, HelpText<"ClangIR: Enable diagnostic verification in MLIR, similar to clang's -verify">, MarshallingInfoFlag>; -def fclangir_lifetime_check_EQ : Joined<["-"], "fclangir-lifetime-check=">, - Visibility<[ClangOption, CC1Option]>, Group, - HelpText<"Run lifetime checker">, - MarshallingInfoString>; -def fclangir_lifetime_check : Flag<["-"], "fclangir-lifetime-check">, - Visibility<[ClangOption, CC1Option]>, Group, - Alias, AliasArgs<["history=invalid,null"]>, - HelpText<"Run lifetime checker">; defm clangir_direct_lowering : BoolFOption<"clangir-direct-lowering", FrontendOpts<"ClangIRDirectLowering">, DefaultTrue, PosFlag, NegFlag>; +def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, + Group, HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; +def emit_cir_only : Flag<["-"], "emit-cir-only">, + HelpText<"Build ASTs and convert to CIR, discarding output">; +def emit_mlir : Flag<["-"], "emit-mlir">, Visibility<[CC1Option]>, Group, + HelpText<"Build ASTs and then lower through ClangIR to MLIR, emit the .milr file">; +/// ClangIR-specific options - END + def flto : Flag<["-"], "flto">, Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>, Group, From a7ceff04f3e4bb6a0896e841b72d62cbe67b8617 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Dec 2023 21:37:24 -0800 Subject: [PATCH 1287/2301] [CIR] Add initial support for idiom recognizing std::find - Identify such calls and produce a remark. Next step is to map this to a CIR operation representing std::find, which should come next. - Add new `-fclangir-idiom-recognizer=` option, used to control remark options for now. --- clang/include/clang/CIR/CIRToCIRPasses.h | 1 + clang/include/clang/CIR/Dialect/Passes.td | 8 +++ .../clang/CIR/Interfaces/ASTAttrInterfaces.td | 17 ++++- clang/include/clang/Driver/Options.td | 4 ++ .../include/clang/Frontend/FrontendOptions.h | 1 + clang/lib/CIR/CodeGen/CIRPasses.cpp | 10 ++- .../Dialect/Transforms/IdiomRecognizer.cpp | 71 +++++++++++++++++++ clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 7 +- clang/lib/Frontend/CompilerInvocation.cpp | 4 ++ .../test/CIR/Transforms/idiom-recognizer.cpp | 16 ++++- 10 files changed, 134 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index 06d928e5cf15..7994542f6ddf 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -33,6 +33,7 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, llvm::StringRef lifetimeOpts, + llvm::StringRef idiomRecognizerOpts, bool &passOptParsingFailure); } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index a31bc2a30388..55fe7a32dc1a 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -84,6 +84,14 @@ def IdiomRecognizer : Pass<"cir-idiom-recognizer"> { }]; let constructor = "mlir::createIdiomRecognizerPass()"; let dependentDialects = ["cir::CIRDialect"]; + + let options = [ + ListOption<"remarksList", "remarks", "std::string", + "Diagnostic remarks to enable" + " Supported styles: {all|found-calls}", "llvm::cl::ZeroOrMore">, + Option<"historyLimit", "history_limit", "unsigned", /*default=*/"1", + "Max amount of diagnostics to emit on pointer history"> + ]; } #endif // MLIR_DIALECT_CIR_PASSES diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td index 147aecb38ae6..328c2876ed2e 100644 --- a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td @@ -191,7 +191,7 @@ let cppNamespace = "::mlir::cir" in { def ASTCallExprInterface : AttrInterface<"ASTCallExprInterface", [ASTExprInterface]> { let methods = [ - InterfaceMethod<"", "bool", "isStdFunctionCall", + InterfaceMethod<"", "bool", "isCalleeInStdNamespace", (ins), [{}], /*defaultImplementation=*/ [{ // Check that the entity being called is in standard // "std" namespace. @@ -217,6 +217,21 @@ let cppNamespace = "::mlir::cir" in { return true; }] + >, + InterfaceMethod<"", "bool", "isStdFunctionCall", + (ins "llvm::StringRef":$fn), + [{}], /*defaultImplementation=*/ [{ + if (!isCalleeInStdNamespace()) + return false; + auto fnDecl = $_attr.getAst()->getDirectCallee(); + if (!fnDecl) + return false; + // We're looking for `std::`. + if (!fnDecl->getIdentifier() || + fnDecl->getName().compare(fn) != 0) + return false; + return true; + }] > ]; diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index d4dcbc25f3b4..a68a9180ce43 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3063,6 +3063,10 @@ def fclangir_lifetime_check : Flag<["-"], "fclangir-lifetime-check">, Visibility<[ClangOption, CC1Option]>, Group, Alias, AliasArgs<["history=invalid,null"]>, HelpText<"Run lifetime checker">; +def fclangir_idiom_recognizer_EQ : Joined<["-"], "fclangir-idiom-recognizer=">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"Pass configuration options to CIR idiom recognizer">, + MarshallingInfoString>; def clangir_disable_passes : Flag<["-"], "clangir-disable-passes">, Visibility<[ClangOption, CC1Option]>, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 57cd2e863384..70e0e718dbaa 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -511,6 +511,7 @@ class FrontendOptions { std::string ARCMTMigrateReportOut; std::string ClangIRLifetimeCheckOpts; + std::string ClangIRIdiomRecognizerOpts; /// The input kind, either specified via -x argument or deduced from the input /// file name. diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 653148a5a8d4..5bcdc787d6ca 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -24,6 +24,7 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, llvm::StringRef lifetimeOpts, + llvm::StringRef idiomRecognizerOpts, bool &passOptParsingFailure) { mlir::PassManager pm(mlirCtx); passOptParsingFailure = false; @@ -45,7 +46,14 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, pm.addPass(std::move(lifetimePass)); } - pm.addPass(mlir::createIdiomRecognizerPass(&astCtx)); + auto idiomPass = mlir::createIdiomRecognizerPass(&astCtx); + if (idiomPass->initializeOptions(idiomRecognizerOpts, errorHandler) + .failed()) { + passOptParsingFailure = true; + return mlir::failure(); + } + pm.addPass(std::move(idiomPass)); + pm.addPass(mlir::createLoweringPreparePass(&astCtx)); // FIXME: once CIRCodenAction fixes emission other than CIR we diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index 1fc7e5d6509c..889f1eaf677f 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -33,6 +33,46 @@ namespace { struct IdiomRecognizerPass : public IdiomRecognizerBase { IdiomRecognizerPass() = default; void runOnOperation() override; + void recognizeCall(CallOp call); + void raiseStdFind(CallOp call); + + // Handle pass options + struct Options { + enum : unsigned { + None = 0, + RemarkFoundCalls = 1, + RemarkAll = 1 << 1, + }; + unsigned val = None; + bool isOptionsParsed = false; + + void parseOptions(ArrayRef remarks) { + if (isOptionsParsed) + return; + + for (auto &remark : remarks) { + val |= StringSwitch(remark) + .Case("found-calls", RemarkFoundCalls) + .Case("all", RemarkAll) + .Default(None); + } + isOptionsParsed = true; + } + + void parseOptions(IdiomRecognizerPass &pass) { + SmallVector remarks; + + for (auto &r : pass.remarksList) + remarks.push_back(r); + + parseOptions(remarks); + } + + bool emitRemarkAll() { return val & RemarkAll; } + bool emitRemarkFoundCalls() { + return emitRemarkAll() || val & RemarkFoundCalls; + } + } opts; /// /// AST related @@ -45,11 +85,42 @@ struct IdiomRecognizerPass : public IdiomRecognizerBase { }; } // namespace +void IdiomRecognizerPass::raiseStdFind(CallOp call) { + // FIXME: tablegen all of this function. + if (call.getNumOperands() != 3) + return; + + auto callExprAttr = call.getAstAttr(); + if (!callExprAttr || !callExprAttr.isStdFunctionCall("find")) { + return; + } + + if (opts.emitRemarkFoundCalls()) + emitRemark(call.getLoc()) << "found call to std::find()"; +} + +void IdiomRecognizerPass::recognizeCall(CallOp call) { raiseStdFind(call); } + void IdiomRecognizerPass::runOnOperation() { assert(astCtx && "Missing ASTContext, please construct with the right ctor"); + opts.parseOptions(*this); auto *op = getOperation(); if (isa<::mlir::ModuleOp>(op)) theModule = cast<::mlir::ModuleOp>(op); + + SmallVector callsToTransform; + op->walk([&](CallOp callOp) { + // Process call operations + + // Skip indirect calls. + auto c = callOp.getCallee(); + if (!c) + return; + callsToTransform.push_back(callOp); + }); + + for (auto c : callsToTransform) + recognizeCall(c); } std::unique_ptr mlir::createIdiomRecognizerPass() { diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 4969f5e2afed..34e13c6f9c16 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -60,6 +60,8 @@ using namespace cir; using namespace clang; static std::string sanitizePassOptions(llvm::StringRef o) { + if (o.empty()) + return ""; std::string opts{o}; // MLIR pass options are space separated, but we use ';' in clang since // space aren't well supported, switch it back. @@ -172,15 +174,18 @@ class CIRGenConsumer : public clang::ASTConsumer { // Sanitize passes options. MLIR uses spaces between pass options // and since that's hard to fly in clang, we currently use ';'. std::string lifetimeOpts; + std::string idiomRecognizerOpts; if (feOptions.ClangIRLifetimeCheck) lifetimeOpts = sanitizePassOptions(feOptions.ClangIRLifetimeCheckOpts); + idiomRecognizerOpts = + sanitizePassOptions(feOptions.ClangIRIdiomRecognizerOpts); // Setup and run CIR pipeline. bool passOptParsingFailure = false; if (runCIRToCIRPasses(mlirMod, mlirCtx.get(), C, !feOptions.ClangIRDisableCIRVerifier, feOptions.ClangIRLifetimeCheck, lifetimeOpts, - passOptParsingFailure) + idiomRecognizerOpts, passOptParsingFailure) .failed()) { if (passOptParsingFailure) diagnosticsEngine.Report(diag::err_drv_cir_pass_opt_parsing) diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index c24dc33e5262..4fcc444fb6ff 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3121,6 +3121,10 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Opts.ClangIRLifetimeCheckOpts = A->getValue(); } + if (Args.hasArg(OPT_fclangir_idiom_recognizer_EQ)) + Opts.AuxTargetCPU = + std::string(Args.getLastArgValue(OPT_fclangir_idiom_recognizer_EQ)); + if (Args.hasArg(OPT_aux_target_cpu)) Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu)); if (Args.hasArg(OPT_aux_target_feature)) diff --git a/clang/test/CIR/Transforms/idiom-recognizer.cpp b/clang/test/CIR/Transforms/idiom-recognizer.cpp index ee53b606c61b..d047487b2847 100644 --- a/clang/test/CIR/Transforms/idiom-recognizer.cpp +++ b/clang/test/CIR/Transforms/idiom-recognizer.cpp @@ -1,4 +1,16 @@ -// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=PASS_ENABLED +// RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -fclangir-idiom-recognizer="remarks=found-calls" -clangir-verify-diagnostics -std=c++20 -triple x86_64-unknown-linux-gnu %s -o %t2.cir -// CIR: IR Dump After IdiomRecognizer (cir-idiom-recognizer) +// PASS_ENABLED: IR Dump After IdiomRecognizer (cir-idiom-recognizer) +#include "std-cxx.h" + +int test_find(unsigned char n = 3) +{ + unsigned num_found = 0; + std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + auto f = std::find(v.begin(), v.end(), n); // expected-remark {{found call to std::find()}} + if (f != v.end()) + num_found++; + return num_found; +} \ No newline at end of file From 7e827749e366784b8bee173a82f5495c148e7c0f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 12 Dec 2023 17:02:07 -0300 Subject: [PATCH 1288/2301] [CIR] Add cir.std.find operation This is going to be used to raise `cir.call`s to `std::find(...)` into `cir.std.find`. --- .../include/clang/CIR/Dialect/IR/CIRDialect.h | 13 ++++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 40 +++++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 14 +++++++ 3 files changed, 67 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index 58ff4881f90f..818332c284d2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -41,6 +41,7 @@ namespace impl { // corresponding trait classes. This avoids them being template // instantiated/duplicated. LogicalResult verifySameFirstOperandAndResultType(Operation *op); +LogicalResult verifySameFirstSecondOperandAndResultType(Operation *op); } // namespace impl /// This class provides verification for ops that are known to have the same @@ -55,6 +56,18 @@ class SameFirstOperandAndResultType } }; +/// This class provides verification for ops that are known to have the same +/// first operand and result type. +/// +template +class SameFirstSecondOperandAndResultType + : public TraitBase { +public: + static LogicalResult verifyTrait(Operation *op) { + return impl::verifySameFirstSecondOperandAndResultType(op); + } +}; + } // namespace OpTrait namespace cir { diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index eb6c4be057ff..10635f4fea9c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2238,6 +2238,46 @@ def MemChrOp : CIR_Op<"libc.memchr"> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// StdFindOp +//===----------------------------------------------------------------------===// + +def SameFirstSecondOperandAndResultType : + NativeOpTrait<"SameFirstSecondOperandAndResultType">; + +def StdFindOp : CIR_Op<"std.find", [SameFirstSecondOperandAndResultType]> { + let arguments = (ins FlatSymbolRefAttr:$original_fn, + AnyType:$first, + AnyType:$last, + AnyType:$pattern); + let summary = "std:find()"; + let results = (outs AnyType:$result); + + let description = [{ + Search for `pattern` in data range from `first` to `last`. This currently + maps to only one form of `std::find`. The `original_fn` operand tracks the + mangled named that can be used when lowering to a `cir.call`. + + Example: + + ```mlir + ... + %result = cir.std.find(@original_fn, + %first : !T, %last : !T, %pattern : !P) -> !T + ``` + }]; + + let assemblyFormat = [{ + `(` + $original_fn + `,` $first `:` type($first) + `,` $last `:` type($last) + `,` $pattern `:` type($pattern) + `)` `->` type($result) attr-dict + }]; + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // FAbsOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7683a97c3932..87bc36f33314 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2163,6 +2163,20 @@ mlir::OpTrait::impl::verifySameFirstOperandAndResultType(Operation *op) { return success(); } +LogicalResult +mlir::OpTrait::impl::verifySameFirstSecondOperandAndResultType(Operation *op) { + if (failed(verifyAtLeastNOperands(op, 3)) || failed(verifyOneResult(op))) + return failure(); + + auto checkType = op->getResult(0).getType(); + if (checkType != op->getOperand(0).getType() && + checkType != op->getOperand(1).getType()) + return op->emitOpError() + << "requires the same type for first operand and result"; + + return success(); +} + //===----------------------------------------------------------------------===// // CIR attributes // FIXME: move all of these to CIRAttrs.cpp From 25c90ee5c736e91cb21ac90a9a845bfd48d9e47d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 12 Dec 2023 18:12:44 -0300 Subject: [PATCH 1289/2301] [CIR] Raise std::find call to cir.std.find Also implement lowering back to `std::call` before lowering to LLVM. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 +++++ .../Dialect/Transforms/IdiomRecognizer.cpp | 9 ++++++++ .../Dialect/Transforms/LoweringPrepare.cpp | 23 +++++++++++++------ .../test/CIR/Transforms/idiom-recognizer.cpp | 8 +++++++ 4 files changed, 39 insertions(+), 7 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 10635f4fea9c..d7379b5bc849 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1887,6 +1887,12 @@ def CallOp : CIR_Op<"call", $_state.addOperands(operands); if (!fn_type.isVoid()) $_state.addTypes(fn_type.getReturnType()); + }]>, + OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Type":$resType, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(operands); + $_state.addAttribute("callee", callee); + $_state.addTypes(resType); }]>]; let extraClassDeclaration = [{ diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index 889f1eaf677f..1dd382d64ad8 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -97,6 +97,15 @@ void IdiomRecognizerPass::raiseStdFind(CallOp call) { if (opts.emitRemarkFoundCalls()) emitRemark(call.getLoc()) << "found call to std::find()"; + + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(call.getOperation()); + auto findOp = builder.create( + call.getLoc(), call.getResult(0).getType(), call.getCalleeAttr(), + call.getOperand(0), call.getOperand(1), call.getOperand(2)); + + call.replaceAllUsesWith(findOp); + call.erase(); } void IdiomRecognizerPass::recognizeCall(CallOp call) { raiseStdFind(call); } diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 92a7137e8e40..c87afea8be52 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -68,6 +68,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { void lowerGlobalOp(GlobalOp op); void lowerGetBitfieldOp(GetBitfieldOp op); void lowerSetBitfieldOp(SetBitfieldOp op); + void lowerStdFindOp(StdFindOp op); /// Build the function that initializes the specified global FuncOp buildCXXGlobalVarDeclInitFunc(GlobalOp op); @@ -406,6 +407,17 @@ void LoweringPreparePass::lowerSetBitfieldOp(SetBitfieldOp op) { op.erase(); } +void LoweringPreparePass::lowerStdFindOp(StdFindOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + auto call = builder.create( + op.getLoc(), op.getOriginalFnAttr(), op.getResult().getType(), + mlir::ValueRange{op.getOperand(0), op.getOperand(1), op.getOperand(2)}); + + op.replaceAllUsesWith(call); + op.erase(); +} + void LoweringPreparePass::runOnOp(Operation *op) { if (auto getGlobal = dyn_cast(op)) { lowerGlobalOp(getGlobal); @@ -413,6 +425,8 @@ void LoweringPreparePass::runOnOp(Operation *op) { lowerGetBitfieldOp(getBitfield); } else if (auto setBitfield = dyn_cast(op)) { lowerSetBitfieldOp(setBitfield); + } else if (auto stdFind = dyn_cast(op)) { + lowerStdFindOp(stdFind); } } @@ -425,17 +439,12 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { - if (isa(op)) - opsToTransform.push_back(op); - if (isa(op)) - opsToTransform.push_back(op); - if (isa(op)) + if (isa(op)) opsToTransform.push_back(op); }); - for (auto *o : opsToTransform) { + for (auto *o : opsToTransform) runOnOp(o); - } buildCXXGlobalInitFunc(); } diff --git a/clang/test/CIR/Transforms/idiom-recognizer.cpp b/clang/test/CIR/Transforms/idiom-recognizer.cpp index d047487b2847..86e4a388b423 100644 --- a/clang/test/CIR/Transforms/idiom-recognizer.cpp +++ b/clang/test/CIR/Transforms/idiom-recognizer.cpp @@ -1,6 +1,10 @@ // RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=PASS_ENABLED // RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -fclangir-idiom-recognizer="remarks=found-calls" -clangir-verify-diagnostics -std=c++20 -triple x86_64-unknown-linux-gnu %s -o %t2.cir +// RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-before=cir-idiom-recognizer -std=c++20 -triple x86_64-unknown-linux-gnu %s -o - 2>&1 | FileCheck %s -check-prefix=BEFORE-IDIOM +// RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-idiom-recognizer -std=c++20 -triple x86_64-unknown-linux-gnu %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-IDIOM +// RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-lowering-prepare -std=c++20 -triple x86_64-unknown-linux-gnu %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-LOWERING-PREPARE + // PASS_ENABLED: IR Dump After IdiomRecognizer (cir-idiom-recognizer) #include "std-cxx.h" @@ -10,6 +14,10 @@ int test_find(unsigned char n = 3) unsigned num_found = 0; std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; auto f = std::find(v.begin(), v.end(), n); // expected-remark {{found call to std::find()}} + // BEFORE-IDIOM: {{.*}} cir.call @_ZSt4findINSt5arrayIhLj9EE8iteratorEhET_S3_S3_RKT0_( + // AFTER-IDIOM: {{.*}} cir.std.find(@_ZSt4findINSt5arrayIhLj9EE8iteratorEhET_S3_S3_RKT0_, + // AFTER-LOWERING-PREPARE: {{.*}} cir.call @_ZSt4findINSt5arrayIhLj9EE8iteratorEhET_S3_S3_RKT0_( + if (f != v.end()) num_found++; return num_found; From 29cb13138108676ebf09d2848ada6d3f777f89c5 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 13 Dec 2023 18:28:09 +0300 Subject: [PATCH 1290/2301] [CIR][Lowering] Fix function ptr field lowering in a global struct (#353) This PR fixes a global vars lowering with a funciton ptr field. Previously, the next code caused fail in the `foo` lowering: ``` static void myfun(int a) {} static struct { void (*func)(int flag); } const Handlers[] = { {myfun}, {myfun}, {myfun} }; void foo(int i, int flag) { Handlers[i].func(flag); } ``` --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 +++- clang/test/CIR/Lowering/globals.cir | 34 ++++++++++++++++++- 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index fd77f49fd294..182fb65d78e7 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -229,7 +229,11 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } else if (auto llvmFun = dyn_cast(sourceSymbol)) { sourceType = llvmFun.getFunctionType(); symName = llvmFun.getSymName(); - } else { + } else if (auto fun = dyn_cast(sourceSymbol)) { + sourceType = converter->convertType(fun.getFunctionType()); + symName = fun.getSymName(); + } + else { llvm_unreachable("Unexpected GlobalOp type"); } diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index e4d3ee2fe740..052c2045752b 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -2,7 +2,9 @@ // RUN: FileCheck --input-file=%t.cir %s -check-prefix=MLIR // RUN: cir-translate %s -cir-to-llvmir -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// XFAIL: * +!void = !cir.void !s16i = !cir.int !s32i = !cir.int !s64i = !cir.int @@ -14,6 +16,7 @@ !ty_22Bar22 = !cir.struct !ty_22StringStruct22 = !cir.struct, !cir.array, !cir.array} #cir.record.decl.ast> !ty_22StringStructPtr22 = !cir.struct} #cir.record.decl.ast> +!ty_22anon2E122 = !cir.struct)>>} #cir.record.decl.ast> module { cir.global external @a = #cir.int<3> : !s32i @@ -146,4 +149,33 @@ module { // MLIR: } cir.global common @comm = #cir.int<0> : !s32i // MLIR: llvm.mlir.global common @comm(0 : i32) {addr_space = 0 : i32} : i32 -} + + cir.global "private" internal @Handlers = #cir.const_array<[#cir.const_struct<{#cir.global_view<@myfun> : !cir.ptr>}> : !ty_22anon2E122]> : !cir.array + cir.func internal private @myfun(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.return + } + cir.func @foo(%arg0: !s32i, %arg1: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["flag", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.store %arg1, %1 : !s32i, cir.ptr + %2 = cir.get_global @Handlers : cir.ptr > + %3 = cir.load %0 : cir.ptr , !s32i + %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + %5 = cir.ptr_stride(%4 : !cir.ptr, %3 : !s32i), !cir.ptr + %6 = cir.get_member %5[0] {name = "func"} : !cir.ptr -> !cir.ptr>> + %7 = cir.load %6 : cir.ptr >>, !cir.ptr> + %8 = cir.load %1 : cir.ptr , !s32i + cir.call %7(%8) : (!cir.ptr>, !s32i) -> () + cir.return + } + //MLIR: %[[RES4:.*]] = llvm.mlir.addressof @Handlers : !llvm.ptr + //MLIR: %[[RES5:.*]] = llvm.load {{.*}} : !llvm.ptr -> i32 + //MLIR: %[[RES6:.*]] = llvm.getelementptr %[[RES4]][0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> + //MLIR: %[[RES7:.*]] = llvm.getelementptr %[[RES6]][%[[RES5]]] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> + //MLIR: %[[RES8:.*]] = llvm.getelementptr %[[RES7]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> + //MLIR: %[[RES9:.*]] = llvm.load %[[RES8]] : !llvm.ptr -> !llvm.ptr + //MLIR: llvm.call %[[RES9]]({{.*}}) : !llvm.ptr, (i32) -> () +} \ No newline at end of file From d67a452727ccc926c911c6aa97a1ac70aed10b14 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 13 Dec 2023 18:13:55 -0800 Subject: [PATCH 1291/2301] [CIR][NFC] Formatting --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 17 ++++++++--------- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 7 ++++--- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 66bf63d8c73c..f0a9a5f935ce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -10,8 +10,8 @@ #define LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H #include "Address.h" -#include "CIRGenRecordLayout.h" #include "CIRDataLayout.h" +#include "CIRGenRecordLayout.h" #include "CIRGenTypeCache.h" #include "UnimplementedFeatureGuarding.h" @@ -465,12 +465,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::ArrayType getArrayType(mlir::Type eltType, unsigned size) { return mlir::cir::ArrayType::get(getContext(), eltType, size); } - + bool isSized(mlir::Type ty) { if (ty.isIntOrFloat() || ty.isa()) + mlir::cir::ArrayType, mlir::cir::BoolType, mlir::cir::IntType>()) return true; assert(0 && "Unimplemented size for type"); return false; @@ -668,8 +667,8 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { bool useVolatile) { auto offset = useVolatile ? info.VolatileOffset : info.Offset; return create(loc, resultType, addr, storageType, - info.Name, info.Size, - offset, info.IsSigned); + info.Name, info.Size, offset, + info.IsSigned); } mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType, @@ -677,9 +676,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Value src, const CIRGenBitFieldInfo &info, bool useVolatile) { auto offset = useVolatile ? info.VolatileOffset : info.Offset; - return create( - loc, resultType, dstAddr, storageType, src, info.Name, - info.Size, offset, info.IsSigned); + return create(loc, resultType, dstAddr, + storageType, src, info.Name, + info.Size, offset, info.IsSigned); } /// Create a pointer to a record member. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 2cac7abfc203..3e18e033a641 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -213,7 +213,7 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value buildLoadOfLValue(const Expr *E) { LValue LV = CGF.buildLValue(E); // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); - return CGF.buildLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); + return CGF.buildLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); } mlir::Value buildLoadOfLValue(LValue LV, SourceLocation Loc) { @@ -1054,7 +1054,7 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, std::swap(pointerOperand, indexOperand); } - bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); + bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); // Some versions of glibc and gcc use idioms (particularly in their malloc // routines) that add a pointer-sized integer (known to be a pointer value) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 48b3623b6ee5..3962c1f9f026 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1526,8 +1526,8 @@ void CIRGenItaniumRTTIBuilder::BuildVMIClassTypeInfo(mlir::Location loc, for (const auto &Base : RD->bases()) { // The __base_type member points to the RTTI for the base type. - Fields.push_back( - CIRGenItaniumRTTIBuilder(CXXABI, CGM).BuildTypeInfo(loc, Base.getType())); + Fields.push_back(CIRGenItaniumRTTIBuilder(CXXABI, CGM) + .BuildTypeInfo(loc, Base.getType())); auto *BaseDecl = cast(Base.getType()->castAs()->getDecl()); @@ -1777,7 +1777,8 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( assert(!UnimplementedFeature::setDSOLocal()); CIRGenModule::setInitializer(GV, init); - return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), GV);; + return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), GV); + ; } mlir::Attribute CIRGenItaniumCXXABI::getAddrOfRTTIDescriptor(mlir::Location loc, From b5c71bd6633288c2baa98e0bc69998db2754f187 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Thu, 14 Dec 2023 07:46:44 -0800 Subject: [PATCH 1292/2301] [CIR] Vector types - part 1 (#347) This is the first part of implementing vector types and vector operations in ClangIR, issue #284. This is enough to compile this test program. I haven't tried to do anything beyond that yet. ``` typedef int int4 __attribute__((vector_size(16))); int main(int argc, char** argv) { int4 a = { 1, argc, argc + 1, 4 }; int4 b = { 5, argc + 2, argc + 3, 8 }; int4 c = a + b; return c[1]; } ``` This change includes: * Fixed-sized vector types which are parameterized on the element type and the number of elements. For example, `!cir.vector`. (No scalable vector types yet; those will come later.) * New operation `cir.vec` which creates an object of a vector type with the given operands. * New operation `cir.vec_elem` which extracts an element from a vector. (The array subscript operation doesn't work here because the result is an rvalue, not an lvalue.) * Basic binary arithmetic operations on vector types, though only addition has been tested. There are no unary operators, comparison operators, casts, or shuffle operations yet. Those will all come later. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 59 ++++++++++++++++++- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 20 +++++++ clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 33 ++++++++--- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 5 +- .../CodeGen/UnimplementedFeatureGuarding.h | 8 ++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 25 ++++++++ clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 30 ++++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 58 ++++++++++++++++-- clang/test/CIR/CodeGen/vectype.cpp | 40 +++++++++++++ clang/test/CIR/IR/invalid.cir | 29 +++++++++ 11 files changed, 285 insertions(+), 26 deletions(-) create mode 100644 clang/test/CIR/CodeGen/vectype.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d7379b5bc849..286200844638 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -73,13 +73,18 @@ def CastOp : CIR_Op<"cast", [Pure]> { let description = [{ Apply C/C++ usual conversions rules between values. Currently supported kinds: - - `int_to_bool` - - `ptr_to_bool` - `array_to_ptrdecay` - - `integral` - `bitcast` + - `integral` + - `int_to_bool` + - `int_to_float` - `floating` - `float_to_int` + - `float_to_bool` + - `ptr_to_int` + - `ptr_to_bool` + - `bool_to_int` + - `bool_to_float` This is effectively a subset of the rules from `llvm-project/clang/include/clang/AST/OperationKinds.def`; but note that some @@ -1648,6 +1653,54 @@ def GetMemberOp : CIR_Op<"get_member"> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// VecExtractOp +//===----------------------------------------------------------------------===// + +def VecExtractOp : CIR_Op<"vec.extract", [Pure, + TypesMatchWith<"type of 'result' matches element type of 'vec'", + "vec", "result", + "$_self.cast().getEltType()">]> { + + let summary = "Extract one element from a vector object"; + let description = [{ + The `cir.vec.extract` operation extracts the element at the given index + from a vector object. + }]; + + let arguments = (ins CIR_VectorType:$vec, CIR_IntType:$index); + let results = (outs AnyType:$result); + + let assemblyFormat = [{ + $vec `[` $index `:` type($index) `]` type($vec) `->` type($result) attr-dict + }]; + + let hasVerifier = 0; +} + +//===----------------------------------------------------------------------===// +// VecCreate +//===----------------------------------------------------------------------===// + +def VecCreateOp : CIR_Op<"vec.create", [Pure]> { + + let summary = "Create a vector value"; + let description = [{ + The `cir.vec.create` operation creates a vector value with the given element + values. The number of element arguments must match the number of elements + in the vector type. + }]; + + let arguments = (ins Variadic:$elements); + let results = (outs CIR_VectorType:$result); + + let assemblyFormat = [{ + `(` ($elements^ `:` type($elements))? `)` `:` type($result) attr-dict + }]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // BaseClassAddr //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index e9c60e763ba8..0d568c2d504c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -149,6 +149,26 @@ def CIR_ArrayType : CIR_Type<"Array", "array", }]; } +//===----------------------------------------------------------------------===// +// VectorType (fixed size) +//===----------------------------------------------------------------------===// + +def CIR_VectorType : CIR_Type<"Vector", "vector", + [DeclareTypeInterfaceMethods]> { + + let summary = "CIR vector type"; + let description = [{ + `cir.vector' represents fixed-size vector types. The parameters are the + element type and the number of elements. + }]; + + let parameters = (ins "mlir::Type":$eltType, "uint64_t":$size); + + let assemblyFormat = [{ + `<` $eltType `x` $size `>` + }]; +} + //===----------------------------------------------------------------------===// // FuncType //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 739790b3d150..1fc2e923b2b1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -180,7 +180,7 @@ static void emitStoresForConstant(CIRGenModule &CGM, const VarDecl &D, if (!ConstantSize) return; assert(!UnimplementedFeature::addAutoInitAnnotation()); - assert(!UnimplementedFeature::cirVectorType()); + assert(!UnimplementedFeature::vectorConstants()); assert(!UnimplementedFeature::shouldUseBZeroPlusStoresToInitialize()); assert(!UnimplementedFeature::shouldUseMemSetToInitialize()); assert(!UnimplementedFeature::shouldSplitConstantStore()); @@ -1004,4 +1004,4 @@ void CIRGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind, assert(needsEHCleanup(dtorKind)); pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 3e18e033a641..6103570bb34d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -246,13 +246,19 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *E) { // Do we need anything like TestAndClearIgnoreResultAssign()? - assert(!E->getBase()->getType()->isVectorType() && - "vector types not implemented"); - // Emit subscript expressions in rvalue context's. For most cases, this - // just loads the lvalue formed by the subscript expr. However, we have to - // be careful, because the base of a vector subscript is occasionally an - // rvalue, so we can't get it as an lvalue. + if (E->getBase()->getType()->isVectorType()) { + assert(!UnimplementedFeature::scalableVectors() && + "NYI: index into scalable vector"); + // Subscript of vector type. This is handled differently, with a custom + // operation. + mlir::Value VecValue = Visit(E->getBase()); + mlir::Value IndexValue = Visit(E->getIdx()); + return CGF.builder.create( + CGF.getLoc(E->getSourceRange()), VecValue, IndexValue); + } + + // Just load the lvalue formed by the subscript expression. return buildLoadOfLValue(E); } @@ -919,6 +925,7 @@ class ScalarExprEmitter : public StmtVisitor { "Internal error: conversion between matrix type and scalar type"); // TODO(CIR): Support VectorTypes + assert(!UnimplementedFeature::cirVectorType() && "NYI: vector cast"); // Finally, we have the arithmetic types: real int/float. mlir::Value Res = nullptr; @@ -1579,8 +1586,18 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { if (E->hadArrayRangeDesignator()) llvm_unreachable("NYI"); - if (UnimplementedFeature::cirVectorType()) - llvm_unreachable("NYI"); + if (E->getType()->isVectorType()) { + assert(!UnimplementedFeature::scalableVectors() && + "NYI: scalable vector init"); + assert(!UnimplementedFeature::vectorConstants() && "NYI: vector constants"); + SmallVector Elements; + for (Expr *init : E->inits()) { + Elements.push_back(Visit(init)); + } + return CGF.getBuilder().create( + CGF.getLoc(E->getSourceRange()), CGF.getCIRType(E->getType()), + Elements); + } if (NumInitElements == 0) { // C++11 value-initialization for the scalar. diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index f6a1c3f5d7cf..4fb8a36a0547 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -650,7 +650,10 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { } case Type::ExtVector: case Type::Vector: { - assert(0 && "not implemented"); + const VectorType *V = cast(Ty); + auto ElementType = convertTypeForMem(V->getElementType()); + ResultType = ::mlir::cir::VectorType::get(Builder.getContext(), ElementType, + V->getNumElements()); break; } case Type::ConstantMatrix: { diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 12f2b2037d61..ee3d643dd136 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -22,9 +22,13 @@ struct UnimplementedFeature { static bool buildTypeCheck() { return false; } static bool tbaa() { return false; } static bool cleanups() { return false; } - // This is for whether or not we've implemented a cir::VectorType - // corresponding to `llvm::VectorType` + + // cir::VectorType is in progress, so cirVectorType() will go away soon. + // Start adding feature flags for more advanced vector types and operations + // that will take longer to implement. static bool cirVectorType() { return false; } + static bool scalableVectors() { return false; } + static bool vectorConstants() { return false; } // Address space related static bool addressSpace() { return false; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 87bc36f33314..af6b0b85f3f5 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -422,6 +422,31 @@ LogicalResult CastOp::verify() { llvm_unreachable("Unknown CastOp kind?"); } +//===----------------------------------------------------------------------===// +// VecCreateOp +//===----------------------------------------------------------------------===// + +LogicalResult VecCreateOp::verify() { + // Verify that the number of arguments matches the number of elements in the + // vector, and that the type of all the arguments matches the type of the + // elements in the vector. + auto VecTy = getResult().getType(); + if (getElements().size() != VecTy.getSize()) { + return emitOpError() << "operand count of " << getElements().size() + << " doesn't match vector type " << VecTy + << " element count of " << VecTy.getSize(); + } + auto ElementType = VecTy.getEltType(); + for (auto Element : getElements()) { + if (Element.getType() != ElementType) { + return emitOpError() << "operand type " << Element.getType() + << " doesn't match vector element type " + << ElementType; + } + } + return success(); +} + //===----------------------------------------------------------------------===// // ReturnOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 9ebdac1ae903..e5268066fdbf 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -410,6 +410,25 @@ ArrayType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, return dataLayout.getTypePreferredAlignment(getEltType()); } +llvm::TypeSize cir::VectorType::getTypeSizeInBits( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(getSize() * + dataLayout.getTypeSizeInBits(getEltType())); +} + +uint64_t +cir::VectorType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return getSize() * dataLayout.getTypeABIAlignment(getEltType()); +} + +uint64_t cir::VectorType::getPreferredAlignment( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return getSize() * dataLayout.getTypePreferredAlignment(getEltType()); +} + llvm::TypeSize StructType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { @@ -604,9 +623,9 @@ FuncType FuncType::clone(TypeRange inputs, TypeRange results) const { return get(llvm::to_vector(inputs), results[0], isVarArg()); } -mlir::ParseResult -parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, - bool &isVarArg) { +mlir::ParseResult parseFuncTypeArgs(mlir::AsmParser &p, + llvm::SmallVector ¶ms, + bool &isVarArg) { isVarArg = false; // `(` `)` if (succeeded(p.parseOptionalRParen())) @@ -636,9 +655,8 @@ parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, return p.parseRParen(); } -void printFuncTypeArgs(mlir::AsmPrinter &p, - mlir::ArrayRef params, - bool isVarArg) { +void printFuncTypeArgs(mlir::AsmPrinter &p, mlir::ArrayRef params, + bool isVarArg) { llvm::interleaveComma(params, p, [&p](mlir::Type type) { p.printType(type); }); if (isVarArg) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 182fb65d78e7..428f8f2211db 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -232,8 +232,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } else if (auto fun = dyn_cast(sourceSymbol)) { sourceType = converter->convertType(fun.getFunctionType()); symName = fun.getSymName(); - } - else { + } else { llvm_unreachable("Unexpected GlobalOp type"); } @@ -1111,6 +1110,48 @@ class CIRConstantLowering } }; +class CIRVectorCreateLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecCreateOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Start with an 'undef' value for the vector. Then 'insertelement' for + // each of the vector elements. + auto vecTy = op.getType().dyn_cast(); + assert(vecTy && "result type of cir.vec op is not VectorType"); + auto llvmTy = typeConverter->convertType(vecTy); + auto loc = op.getLoc(); + mlir::Value result = rewriter.create(loc, llvmTy); + assert(vecTy.getSize() == op.getElements().size() && + "cir.vec operands count doesn't match vector type elements count"); + for (uint64_t i = 0; i < vecTy.getSize(); ++i) { + mlir::Value indexValue = rewriter.create( + loc, rewriter.getI64Type(), i); + result = rewriter.create( + loc, result, adaptor.getElements()[i], indexValue); + } + rewriter.replaceOp(op, result); + return mlir::success(); + } +}; + +class CIRVectorExtractLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecExtractOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getVec(), adaptor.getIndex()); + return mlir::success(); + } +}; + class CIRVAStartLowering : public mlir::OpConversionPattern { public: @@ -1615,13 +1656,17 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { assert((op.getLhs().getType() == op.getRhs().getType()) && "inconsistent operands' types not supported yet"); mlir::Type type = op.getRhs().getType(); - assert((type.isa()) && + assert((type.isa()) && "operand type not supported yet"); auto llvmTy = getTypeConverter()->convertType(op.getType()); auto rhs = adaptor.getRhs(); auto lhs = adaptor.getLhs(); + if (type.isa()) + type = type.dyn_cast().getEltType(); + switch (op.getKind()) { case mlir::cir::BinOpKind::Add: if (type.isa()) @@ -2001,7 +2046,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, - CIRFAbsOpLowering, CIRVTableAddrPointOpLowering>( + CIRFAbsOpLowering, CIRVTableAddrPointOpLowering, + CIRVectorCreateLowering, CIRVectorExtractLowering>( converter, patterns.getContext()); } @@ -2016,6 +2062,10 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, auto ty = converter.convertType(type.getEltType()); return mlir::LLVM::LLVMArrayType::get(ty, type.getSize()); }); + converter.addConversion([&](mlir::cir::VectorType type) -> mlir::Type { + auto ty = converter.convertType(type.getEltType()); + return mlir::LLVM::getFixedVectorType(ty, type.getSize()); + }); converter.addConversion([&](mlir::cir::BoolType type) -> mlir::Type { return mlir::IntegerType::get(type.getContext(), 8, mlir::IntegerType::Signless); diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp new file mode 100644 index 000000000000..80f6bf39258c --- /dev/null +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -0,0 +1,40 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +typedef int int4 __attribute__((vector_size(16))); +int test_vector_basic(int x, int y, int z) { + int4 a = { 1, 2, 3, 4 }; + int4 b = { x, y, z, x + y + z }; + int4 c = a + b; + return c[1]; +} + +// CHECK: %4 = cir.alloca !cir.vector, cir.ptr >, ["a", init] {alignment = 16 : i64} +// CHECK: %5 = cir.alloca !cir.vector, cir.ptr >, ["b", init] {alignment = 16 : i64} +// CHECK: %6 = cir.alloca !cir.vector, cir.ptr >, ["c", init] {alignment = 16 : i64} + +// CHECK: %7 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %8 = cir.const(#cir.int<2> : !s32i) : !s32i +// CHECK: %9 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: %10 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK: %11 = cir.vec.create(%7, %8, %9, %10 : !s32i, !s32i, !s32i, !s32i) : +// CHECK: cir.store %11, %4 : !cir.vector, cir.ptr > +// CHECK: %12 = cir.load %0 : cir.ptr , !s32i +// CHECK: %13 = cir.load %1 : cir.ptr , !s32i +// CHECK: %14 = cir.load %2 : cir.ptr , !s32i +// CHECK: %15 = cir.load %0 : cir.ptr , !s32i +// CHECK: %16 = cir.load %1 : cir.ptr , !s32i +// CHECK: %17 = cir.binop(add, %15, %16) : !s32i +// CHECK: %18 = cir.load %2 : cir.ptr , !s32i +// CHECK: %19 = cir.binop(add, %17, %18) : !s32i +// CHECK: %20 = cir.vec.create(%12, %13, %14, %19 : !s32i, !s32i, !s32i, !s32i) : +// CHECK: cir.store %20, %5 : !cir.vector, cir.ptr > +// CHECK: %21 = cir.load %4 : cir.ptr >, !cir.vector +// CHECK: %22 = cir.load %5 : cir.ptr >, !cir.vector +// CHECK: %23 = cir.binop(add, %21, %22) : !cir.vector +// CHECK: cir.store %23, %6 : !cir.vector, cir.ptr > +// CHECK: %24 = cir.load %6 : cir.ptr >, !cir.vector +// CHECK: %25 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %26 = cir.vec.extract %24[%25 : !s32i] -> !s32i +// CHECK: cir.store %26, %3 : !s32i, cir.ptr +// CHECK: %27 = cir.load %3 : cir.ptr , !s32i +// CHECK: cir.return %27 : !s32i diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 5571dd030f25..d122be4d0a34 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -419,6 +419,35 @@ module { // ----- +!s32i = !cir.int +cir.func @vec_op_size() { + %0 = cir.const(#cir.int<1> : !s32i) : !s32i + %1 = cir.vec.create(%0 : !s32i) : // expected-error {{'cir.vec.create' op operand count of 1 doesn't match vector type '!cir.vector x 2>' element count of 2}} +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int +cir.func @vec_op_type() { + %0 = cir.const(#cir.int<1> : !s32i) : !s32i + %1 = cir.const(#cir.int<2> : !u32i) : !u32i + %2 = cir.vec.create(%0, %1 : !s32i, !u32i) : // expected-error {{'cir.vec.create' op operand type '!cir.int' doesn't match vector element type '!cir.int'}} +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int +cir.func @vec_extract_type() { + %0 = cir.const(#cir.int<1> : !s32i) : !s32i + %1 = cir.const(#cir.int<2> : !s32i) : !s32i + %2 = cir.vec.create(%0, %1 : !s32i, !s32i) : + %3 = cir.vec.extract %2[%0 : !s32i] -> !u32i // expected-error {{'cir.vec.extract' op failed to verify that type of 'result' matches element type of 'vec'}} +} + +// ----- + cir.func coroutine @bad_task() { // expected-error {{coroutine body must use at least one cir.await op}} cir.return } From 01698d4f9cbabfb57699868c7eb72204199b2925 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 19 Dec 2023 17:41:42 +0300 Subject: [PATCH 1293/2301] [CIR][CodeGen][Lowering] Support multi-block case/default clauses (#356) This PR adds a support for the multi-block case statements. Previously, the code example below caused crash in cir verification Lowering to the `llvm` dialect is pretty straightforward: the same logic as before is applied to all the region's blocks with no successors, i.e. we no longer think a case/default region contains only one block. The `CodeGen` part is a little bit tricky. Previously, any sub-statement of `case` or`default`, that was not any of them (i.e. neither `case` nor `default`) was processed with an insertion guard, meaning that the next sub-statement in the same clause was inserted again in the same block as the first one. It would be fine, once sub-statement didn't generate any blocks as well. For instance, ``` void foo(int a) { switch (a) { case 3: return; break; } } ``` The `return` statement actually emit a new block after, where the unreachable code with `break` should be inserted in. That's why we also need to update `lastCaseBlock` while generating `cir.switch` This is quite frequent bug in `llvm-test-suite` --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 2 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 51 ++++++++++--------- clang/test/CIR/CodeGen/switch.cpp | 16 ++++++ clang/test/CIR/Lowering/switch.cir | 31 +++++++++++ 4 files changed, 74 insertions(+), 26 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 8bf702d4c824..233a1b216283 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -621,7 +621,6 @@ mlir::LogicalResult CIRGenFunction::buildCaseDefaultCascade( insertFallthrough(*stmt); res = buildCaseStmt(*dyn_cast(sub), condType, caseAttrs, os); } else { - mlir::OpBuilder::InsertionGuard guardCase(builder); res = buildStmt(sub, /*useCurrentScope=*/!isa(sub)); } @@ -987,6 +986,7 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { mlir::OpBuilder::InsertionGuard guardCase(builder); builder.setInsertionPointToEnd(lastCaseBlock); res = buildStmt(c, /*useCurrentScope=*/!isa(c)); + lastCaseBlock = builder.getBlock(); if (res.failed()) break; continue; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 428f8f2211db..e1d894267f74 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1382,31 +1382,32 @@ class CIRSwitchOpLowering fallthroughYieldOp = nullptr; } - // TODO(cir): Handle multi-block case statements. - if (region.getBlocks().size() != 1) - return op->emitError("multi-block case statement is NYI"); - - // Handle switch-case yields. - auto *terminator = region.front().getTerminator(); - if (auto yieldOp = dyn_cast(terminator)) { - // TODO(cir): Ensure every yield instead of dealing with optional - // values. - assert(yieldOp.getKind().has_value() && "switch yield has no kind"); - - switch (yieldOp.getKind().value()) { - // Fallthrough to next case: track it for the next case to handle. - case mlir::cir::YieldOpKind::Fallthrough: - fallthroughYieldOp = yieldOp; - break; - // Break out of switch: branch to exit block. - case mlir::cir::YieldOpKind::Break: - rewriteYieldOp(rewriter, yieldOp, exitBlock); - break; - case mlir::cir::YieldOpKind::Continue: // Continue is handled only in - // loop lowering - break; - default: - return op->emitError("invalid yield kind in case statement"); + for (auto& blk : region.getBlocks()) { + if (blk.getNumSuccessors()) + continue; + + // Handle switch-case yields. + auto *terminator = blk.getTerminator(); + if (auto yieldOp = dyn_cast(terminator)) { + // TODO(cir): Ensure every yield instead of dealing with optional + // values. + assert(yieldOp.getKind().has_value() && "switch yield has no kind"); + + switch (yieldOp.getKind().value()) { + // Fallthrough to next case: track it for the next case to handle. + case mlir::cir::YieldOpKind::Fallthrough: + fallthroughYieldOp = yieldOp; + break; + // Break out of switch: branch to exit block. + case mlir::cir::YieldOpKind::Break: + rewriteYieldOp(rewriter, yieldOp, exitBlock); + break; + case mlir::cir::YieldOpKind::Continue: // Continue is handled only in + // loop lowering + break; + default: + return op->emitError("invalid yield kind in case statement"); + } } } diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 873e197b3f47..b9f7626d8064 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -258,3 +258,19 @@ void sw11(int a) { //CHECK-NEXT: cir.yield break //CHECK-NEXT: } +void sw12(int a) { + switch (a) + { + case 3: + return; + break; + } +} +// CHECK: cir.func @_Z4sw12i +// CHECK: cir.scope { +// CHECK: cir.switch +// CHECK-NEXT: case (equal, 3) { +// CHECK-NEXT: cir.return +// CHECK-NEXT: ^bb1: // no predecessors +// CHECK-NEXT: cir.yield break +// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir index 1b5c9b387937..08e0ae760080 100644 --- a/clang/test/CIR/Lowering/switch.cir +++ b/clang/test/CIR/Lowering/switch.cir @@ -105,4 +105,35 @@ module { // CHECK-NOT: llvm.switch cir.return } + + cir.func @shouldLowerMultiBlockCase(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.scope { + %1 = cir.load %0 : cir.ptr , !s32i + cir.switch (%1 : !s32i) [ + case (equal, 3) { + cir.return + ^bb1: // no predecessors + cir.yield break + } + ] + } + cir.return + } + // CHECK: llvm.func @shouldLowerMultiBlockCase + // CHECK: ^bb1: // pred: ^bb0 + // CHECK: llvm.switch {{.*}} : i32, ^bb4 [ + // CHECK: 3: ^bb2 + // CHECK: ] + // CHECK: ^bb2: // pred: ^bb1 + // CHECK: llvm.return + // CHECK: ^bb3: // no predecessors + // CHECK: llvm.br ^bb4 + // CHECK: ^bb4: // 2 preds: ^bb1, ^bb3 + // CHECK: llvm.br ^bb5 + // CHECK: ^bb5: // pred: ^bb4 + // CHECK: llvm.return + // CHECK: } + } From 72d1edc7ac421d7b8e5c8cbf7799d19ebd353770 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 19 Dec 2023 17:29:23 -0300 Subject: [PATCH 1294/2301] [CIR][NFC] Fix some copy n paste and update comments --- clang/include/clang/CIR/Dialect/Passes.td | 5 +++-- clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index 55fe7a32dc1a..26ff43f2be62 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -81,6 +81,9 @@ def IdiomRecognizer : Pass<"cir-idiom-recognizer"> { This pass recognize idiomatic C++ usage and incorporate C++ standard containers, library functions calls, and types into CIR operation, attributes and types. + + Detections done by this pass can be inspected by users by using + remarks. Currently supported are `all` and `found-calls`. }]; let constructor = "mlir::createIdiomRecognizerPass()"; let dependentDialects = ["cir::CIRDialect"]; @@ -89,8 +92,6 @@ def IdiomRecognizer : Pass<"cir-idiom-recognizer"> { ListOption<"remarksList", "remarks", "std::string", "Diagnostic remarks to enable" " Supported styles: {all|found-calls}", "llvm::cl::ZeroOrMore">, - Option<"historyLimit", "history_limit", "unsigned", /*default=*/"1", - "Max amount of diagnostics to emit on pointer history"> ]; } diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index 1dd382d64ad8..c39a3255d29c 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -1,4 +1,4 @@ -//===- IdiomRecognizer.cpp - pareparation work for LLVM lowering ----------===// +//===- IdiomRecognizer.cpp - Recognize and raise C/C++ library calls ------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. From 36c6c3e8ea122e097741ba087db692837f850262 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 19 Dec 2023 17:30:17 -0300 Subject: [PATCH 1295/2301] [CIR][Passes] Introduce cir-lib-opt pass This contains just the skeleton, but the idea is that this pass is going to contain transformations done on top CIR generated by the C/C++ idiom recognizer. --- clang/include/clang/CIR/CIRToCIRPasses.h | 13 ++- clang/include/clang/CIR/Dialect/Passes.h | 2 + clang/include/clang/CIR/Dialect/Passes.td | 19 ++++ clang/include/clang/Driver/Options.td | 8 ++ .../include/clang/Frontend/FrontendOptions.h | 1 + clang/lib/CIR/CodeGen/CIRPasses.cpp | 20 ++-- .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + clang/lib/CIR/Dialect/Transforms/LibOpt.cpp | 102 ++++++++++++++++++ clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 12 +-- clang/test/CIR/Transforms/lib-opt.cpp | 3 + 10 files changed, 161 insertions(+), 20 deletions(-) create mode 100644 clang/lib/CIR/Dialect/Transforms/LibOpt.cpp create mode 100644 clang/test/CIR/Transforms/lib-opt.cpp diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index 7994542f6ddf..a1e745c6f096 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -28,13 +28,12 @@ class ModuleOp; namespace cir { // Run set of cleanup/prepare/etc passes CIR <-> CIR. -mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, - mlir::MLIRContext *mlirCtx, - clang::ASTContext &astCtx, - bool enableVerifier, bool enableLifetime, - llvm::StringRef lifetimeOpts, - llvm::StringRef idiomRecognizerOpts, - bool &passOptParsingFailure); +mlir::LogicalResult +runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, + bool enableLifetime, llvm::StringRef lifetimeOpts, + llvm::StringRef idiomRecognizerOpts, + llvm::StringRef libOptOpts, bool &passOptParsingFailure); } // namespace cir #endif // CLANG_CIR_CIRTOCIRPASSES_H_ diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index 200fc956d08d..a685ab8ce3fa 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -32,6 +32,8 @@ std::unique_ptr createLoweringPreparePass(); std::unique_ptr createLoweringPreparePass(clang::ASTContext *astCtx); std::unique_ptr createIdiomRecognizerPass(); std::unique_ptr createIdiomRecognizerPass(clang::ASTContext *astCtx); +std::unique_ptr createLibOptPass(); +std::unique_ptr createLibOptPass(clang::ASTContext *astCtx); //===----------------------------------------------------------------------===// // Registration diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index 26ff43f2be62..affc28b85003 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -95,4 +95,23 @@ def IdiomRecognizer : Pass<"cir-idiom-recognizer"> { ]; } +def LibOpt : Pass<"cir-lib-opt"> { + let summary = "Optimize C/C++ library calls"; + let description = [{ + By using higher level information from `cir-idiom-recognize`, this pass + apply transformations to CIR based on specific C/C++ library semantics. + + Transformations done by this pass can be inspected by users by using + remarks. Currently supported are `all` and `transforms`. + }]; + let constructor = "mlir::createLibOptPass()"; + let dependentDialects = ["cir::CIRDialect"]; + + let options = [ + ListOption<"remarksList", "remarks", "std::string", + "Diagnostic remarks to enable" + " Supported styles: {all|transforms}", "llvm::cl::ZeroOrMore">, + ]; +} + #endif // MLIR_DIALECT_CIR_PASSES diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index a68a9180ce43..7648a82c0886 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3067,6 +3067,14 @@ def fclangir_idiom_recognizer_EQ : Joined<["-"], "fclangir-idiom-recognizer=">, Visibility<[ClangOption, CC1Option]>, Group, HelpText<"Pass configuration options to CIR idiom recognizer">, MarshallingInfoString>; +def fclangir_lib_opt_EQ : Joined<["-"], "fclangir-lib-opt=">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"Enable C/C++ library based optimizations (with options)">, + MarshallingInfoString>; +def fclangir_lib_opt : Flag<["-"], "fclangir-lib-opt">, + Visibility<[ClangOption, CC1Option]>, Group, + Alias, AliasArgs<[""]>, + HelpText<"Enable C/C++ library based optimizations">; def clangir_disable_passes : Flag<["-"], "clangir-disable-passes">, Visibility<[ClangOption, CC1Option]>, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 70e0e718dbaa..5ff8c5e3c1ed 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -512,6 +512,7 @@ class FrontendOptions { std::string ClangIRLifetimeCheckOpts; std::string ClangIRIdiomRecognizerOpts; + std::string ClangIRLibOptOpts; /// The input kind, either specified via -x argument or deduced from the input /// file name. diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 5bcdc787d6ca..f6b4df73578b 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -19,13 +19,12 @@ #include "mlir/Support/LogicalResult.h" namespace cir { -mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, - mlir::MLIRContext *mlirCtx, - clang::ASTContext &astCtx, - bool enableVerifier, bool enableLifetime, - llvm::StringRef lifetimeOpts, - llvm::StringRef idiomRecognizerOpts, - bool &passOptParsingFailure) { +mlir::LogicalResult +runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, + bool enableLifetime, llvm::StringRef lifetimeOpts, + llvm::StringRef idiomRecognizerOpts, + llvm::StringRef libOptOpts, bool &passOptParsingFailure) { mlir::PassManager pm(mlirCtx); passOptParsingFailure = false; @@ -54,6 +53,13 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule, } pm.addPass(std::move(idiomPass)); + auto libOpPass = mlir::createLibOptPass(&astCtx); + if (libOpPass->initializeOptions(libOptOpts, errorHandler).failed()) { + passOptParsingFailure = true; + return mlir::failure(); + } + pm.addPass(std::move(libOpPass)); + pm.addPass(mlir::createLoweringPreparePass(&astCtx)); // FIXME: once CIRCodenAction fixes emission other than CIR we diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 36bfcd3de951..3778bc54b43f 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -4,6 +4,7 @@ add_clang_library(MLIRCIRTransforms MergeCleanups.cpp DropAST.cpp IdiomRecognizer.cpp + LibOpt.cpp DEPENDS MLIRCIRPassIncGen diff --git a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp new file mode 100644 index 000000000000..2b7dd159adaa --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp @@ -0,0 +1,102 @@ +//===- LibOpt.cpp - Optimize CIR raised C/C++ library idioms --------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Region.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Mangle.h" +#include "clang/Basic/Module.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Path.h" + +using cir::CIRBaseBuilderTy; +using namespace mlir; +using namespace mlir::cir; + +namespace { + +struct LibOptPass : public LibOptBase { + LibOptPass() = default; + void runOnOperation() override; + + // Handle pass options + struct Options { + enum : unsigned { + None = 0, + RemarkTransforms = 1, + RemarkAll = 1 << 1, + }; + unsigned val = None; + bool isOptionsParsed = false; + + void parseOptions(ArrayRef remarks) { + if (isOptionsParsed) + return; + + for (auto &remark : remarks) { + val |= StringSwitch(remark) + .Case("transforms", RemarkTransforms) + .Case("all", RemarkAll) + .Default(None); + } + isOptionsParsed = true; + } + + void parseOptions(LibOptPass &pass) { + SmallVector remarks; + + for (auto &r : pass.remarksList) + remarks.push_back(r); + + parseOptions(remarks); + } + + bool emitRemarkAll() { return val & RemarkAll; } + bool emitRemarkTransforms() { + return emitRemarkAll() || val & RemarkTransforms; + } + } opts; + + /// + /// AST related + /// ----------- + clang::ASTContext *astCtx; + void setASTContext(clang::ASTContext *c) { astCtx = c; } + + /// Tracks current module. + ModuleOp theModule; +}; +} // namespace + +void LibOptPass::runOnOperation() { + assert(astCtx && "Missing ASTContext, please construct with the right ctor"); + opts.parseOptions(*this); + auto *op = getOperation(); + if (isa<::mlir::ModuleOp>(op)) + theModule = cast<::mlir::ModuleOp>(op); +} + +std::unique_ptr mlir::createLibOptPass() { + return std::make_unique(); +} + +std::unique_ptr mlir::createLibOptPass(clang::ASTContext *astCtx) { + auto pass = std::make_unique(); + pass->setASTContext(astCtx); + return std::move(pass); +} diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 34e13c6f9c16..e93beea09be2 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -173,19 +173,19 @@ class CIRGenConsumer : public clang::ASTConsumer { auto setupCIRPipelineAndExecute = [&] { // Sanitize passes options. MLIR uses spaces between pass options // and since that's hard to fly in clang, we currently use ';'. - std::string lifetimeOpts; - std::string idiomRecognizerOpts; + std::string lifetimeOpts, idiomRecognizerOpts, libOptOpts; if (feOptions.ClangIRLifetimeCheck) lifetimeOpts = sanitizePassOptions(feOptions.ClangIRLifetimeCheckOpts); idiomRecognizerOpts = sanitizePassOptions(feOptions.ClangIRIdiomRecognizerOpts); + libOptOpts = sanitizePassOptions(feOptions.ClangIRLibOptOpts); // Setup and run CIR pipeline. bool passOptParsingFailure = false; - if (runCIRToCIRPasses(mlirMod, mlirCtx.get(), C, - !feOptions.ClangIRDisableCIRVerifier, - feOptions.ClangIRLifetimeCheck, lifetimeOpts, - idiomRecognizerOpts, passOptParsingFailure) + if (runCIRToCIRPasses( + mlirMod, mlirCtx.get(), C, !feOptions.ClangIRDisableCIRVerifier, + feOptions.ClangIRLifetimeCheck, lifetimeOpts, idiomRecognizerOpts, + libOptOpts, passOptParsingFailure) .failed()) { if (passOptParsingFailure) diagnosticsEngine.Report(diag::err_drv_cir_pass_opt_parsing) diff --git a/clang/test/CIR/Transforms/lib-opt.cpp b/clang/test/CIR/Transforms/lib-opt.cpp new file mode 100644 index 000000000000..3ee5a2ab49c5 --- /dev/null +++ b/clang/test/CIR/Transforms/lib-opt.cpp @@ -0,0 +1,3 @@ +// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o - 2>&1 | FileCheck %s -check-prefix=CIR + +// CIR: IR Dump After LibOpt (cir-lib-opt) \ No newline at end of file From d9065700553e61f1d2939682e850caedcf489249 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 20 Dec 2023 11:08:31 -0300 Subject: [PATCH 1296/2301] [CIR] Fix issues pointed by github CI Specify proper target triples to prevent issues on both Windows and MacOS regarding non-implemented ABI bits. --- clang/test/CIR/Transforms/idiom-recognizer.cpp | 10 +++++----- clang/test/CIR/Transforms/lib-opt.cpp | 2 +- clang/test/CIR/mlirprint.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/clang/test/CIR/Transforms/idiom-recognizer.cpp b/clang/test/CIR/Transforms/idiom-recognizer.cpp index 86e4a388b423..87b95fa5f900 100644 --- a/clang/test/CIR/Transforms/idiom-recognizer.cpp +++ b/clang/test/CIR/Transforms/idiom-recognizer.cpp @@ -1,9 +1,9 @@ -// RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=PASS_ENABLED -// RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -fclangir-idiom-recognizer="remarks=found-calls" -clangir-verify-diagnostics -std=c++20 -triple x86_64-unknown-linux-gnu %s -o %t2.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=PASS_ENABLED +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -fclangir-idiom-recognizer="remarks=found-calls" -clangir-verify-diagnostics %s -o %t2.cir -// RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-before=cir-idiom-recognizer -std=c++20 -triple x86_64-unknown-linux-gnu %s -o - 2>&1 | FileCheck %s -check-prefix=BEFORE-IDIOM -// RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-idiom-recognizer -std=c++20 -triple x86_64-unknown-linux-gnu %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-IDIOM -// RUN: %clang_cc1 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-lowering-prepare -std=c++20 -triple x86_64-unknown-linux-gnu %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-LOWERING-PREPARE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-before=cir-idiom-recognizer %s -o - 2>&1 | FileCheck %s -check-prefix=BEFORE-IDIOM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-idiom-recognizer %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-IDIOM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-LOWERING-PREPARE // PASS_ENABLED: IR Dump After IdiomRecognizer (cir-idiom-recognizer) diff --git a/clang/test/CIR/Transforms/lib-opt.cpp b/clang/test/CIR/Transforms/lib-opt.cpp index 3ee5a2ab49c5..e1cfa30dabe4 100644 --- a/clang/test/CIR/Transforms/lib-opt.cpp +++ b/clang/test/CIR/Transforms/lib-opt.cpp @@ -1,3 +1,3 @@ -// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o - 2>&1 | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o - 2>&1 | FileCheck %s -check-prefix=CIR // CIR: IR Dump After LibOpt (cir-lib-opt) \ No newline at end of file diff --git a/clang/test/CIR/mlirprint.c b/clang/test/CIR/mlirprint.c index 35e5a2ff49ac..f61f2244af23 100644 --- a/clang/test/CIR/mlirprint.c +++ b/clang/test/CIR/mlirprint.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR -// RUN: %clang_cc1 -fclangir -emit-llvm -mmlir --mlir-print-ir-after-all -mllvm -print-after-all %s -o %t.ll 2>&1 | FileCheck %s -check-prefix=CIR -check-prefix=LLVM -// RUN: %clang_cc1 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-drop-ast %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIRPASS +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -mmlir --mlir-print-ir-after-all -mllvm -print-after-all %s -o %t.ll 2>&1 | FileCheck %s -check-prefix=CIR -check-prefix=LLVM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-drop-ast %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIRPASS int foo(void) { int i = 3; From 8eca34ce433054387f0a5e1da44047518b9e6534 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 20 Dec 2023 12:05:39 -0300 Subject: [PATCH 1297/2301] [CIR] Add iterator_{begin,end} ops Only a skeleton for incremental work. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 29 ++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 286200844638..204df6ae5de3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2337,6 +2337,35 @@ def StdFindOp : CIR_Op<"std.find", [SameFirstSecondOperandAndResultType]> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// IterBegin/End +//===----------------------------------------------------------------------===// + +def IterBeginOp : CIR_Op<"iterator_begin"> { + let arguments = (ins FlatSymbolRefAttr:$original_fn, AnyType:$container); + let summary = "Returns an iterator to the first element of a container"; + let results = (outs AnyType:$result); + let assemblyFormat = [{ + `(` + $original_fn `,` $container `:` type($container) + `)` `->` type($result) attr-dict + }]; + let hasVerifier = 0; +} + +def IterEndOp : CIR_Op<"iterator_end"> { + let arguments = (ins FlatSymbolRefAttr:$original_fn, AnyType:$container); + let summary = "Returns an iterator to the element following the last element" + " of a container"; + let results = (outs AnyType:$result); + let assemblyFormat = [{ + `(` + $original_fn `,` $container `:` type($container) + `)` `->` type($result) attr-dict + }]; + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // FAbsOp //===----------------------------------------------------------------------===// From 9bdf0c7583f4ed49d5f332b05950d52c9b0627f1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 20 Dec 2023 12:57:30 -0300 Subject: [PATCH 1298/2301] [CIR][IdiomRecognizer] Recognize few variations for begin/end iterators Initial step into modeling iterators in CIR. Right now it only looks at the member functions with .begin/.end function calls, it does not look at the iterator type, has no notion of forward/reverse iterators, nor filters based on the container types - those improvements will come next. --- .../clang/CIR/Interfaces/ASTAttrInterfaces.td | 27 +++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 1 + .../Dialect/Transforms/IdiomRecognizer.cpp | 39 ++++++++++++++++++- .../Dialect/Transforms/LoweringPrepare.cpp | 31 ++++++++++++++- .../test/CIR/Transforms/idiom-recognizer.cpp | 13 ++++++- 6 files changed, 109 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td index 328c2876ed2e..60f6b2b16c90 100644 --- a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td @@ -232,6 +232,33 @@ let cppNamespace = "::mlir::cir" in { return false; return true; }] + >, + InterfaceMethod<"", "bool", "isMemberCallTo", + (ins "llvm::StringRef":$fn), + [{}], /*defaultImplementation=*/ [{ + auto memberCall = dyn_cast($_attr.getAst()); + if (!memberCall) + return false; + auto methodDecl = memberCall->getMethodDecl(); + if (!methodDecl) + return false; + if (!methodDecl->getIdentifier() || + methodDecl->getName().compare(fn) != 0) + return false; + return true; + }] + >, + InterfaceMethod<"", "bool", "isIteratorBeginCall", + (ins), + [{}], /*defaultImplementation=*/ [{ + return isMemberCallTo("begin"); + }] + >, + InterfaceMethod<"", "bool", "isIteratorEndCall", + (ins), + [{}], /*defaultImplementation=*/ [{ + return isMemberCallTo("end"); + }] > ]; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 287ed88872a8..3e318abdbcd1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -92,7 +92,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorCall( assert((CE || currSrcLoc) && "expected source location"); mlir::Location loc = CE ? getLoc(CE->getExprLoc()) : *currSrcLoc; return buildCall(FnInfo, Callee, ReturnValue, Args, nullptr, - CE && CE == MustTailCall, loc); + CE && CE == MustTailCall, loc, CE); } // TODO(cir): this can be shared with LLVM codegen diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 04f2c405f9ee..d805b81bac07 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -31,6 +31,7 @@ // ClangIR holds back AST references when available. #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/ExprCXX.h" static void printStructMembers(mlir::AsmPrinter &p, mlir::ArrayAttr members); static mlir::ParseResult parseStructMembers(::mlir::AsmParser &parser, diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index c39a3255d29c..48894c9eb302 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -35,6 +35,7 @@ struct IdiomRecognizerPass : public IdiomRecognizerBase { void runOnOperation() override; void recognizeCall(CallOp call); void raiseStdFind(CallOp call); + void raiseIteratorBeginEnd(CallOp call); // Handle pass options struct Options { @@ -108,7 +109,43 @@ void IdiomRecognizerPass::raiseStdFind(CallOp call) { call.erase(); } -void IdiomRecognizerPass::recognizeCall(CallOp call) { raiseStdFind(call); } +void IdiomRecognizerPass::raiseIteratorBeginEnd(CallOp call) { + // FIXME: tablegen all of this function. + if (call.getNumOperands() != 1) + return; + + auto callExprAttr = call.getAstAttr(); + if (!callExprAttr) + return; + + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(call.getOperation()); + + mlir::Operation *iterOp; + if (callExprAttr.isIteratorBeginCall()) { + if (opts.emitRemarkFoundCalls()) + emitRemark(call.getLoc()) << "found call to begin() iterator"; + iterOp = builder.create( + call.getLoc(), call.getResult(0).getType(), call.getCalleeAttr(), + call.getOperand(0)); + } else if (callExprAttr.isIteratorEndCall()) { + if (opts.emitRemarkFoundCalls()) + emitRemark(call.getLoc()) << "found call to end() iterator"; + iterOp = builder.create( + call.getLoc(), call.getResult(0).getType(), call.getCalleeAttr(), + call.getOperand(0)); + } else { + return; + } + + call.replaceAllUsesWith(iterOp); + call.erase(); +} + +void IdiomRecognizerPass::recognizeCall(CallOp call) { + raiseIteratorBeginEnd(call); + raiseStdFind(call); +} void IdiomRecognizerPass::runOnOperation() { assert(astCtx && "Missing ASTContext, please construct with the right ctor"); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index c87afea8be52..63148b74c4ca 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -69,6 +69,8 @@ struct LoweringPreparePass : public LoweringPrepareBase { void lowerGetBitfieldOp(GetBitfieldOp op); void lowerSetBitfieldOp(SetBitfieldOp op); void lowerStdFindOp(StdFindOp op); + void lowerIterBeginOp(IterBeginOp op); + void lowerIterEndOp(IterEndOp op); /// Build the function that initializes the specified global FuncOp buildCXXGlobalVarDeclInitFunc(GlobalOp op); @@ -418,6 +420,28 @@ void LoweringPreparePass::lowerStdFindOp(StdFindOp op) { op.erase(); } +void LoweringPreparePass::lowerIterBeginOp(IterBeginOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + auto call = builder.create( + op.getLoc(), op.getOriginalFnAttr(), op.getResult().getType(), + mlir::ValueRange{op.getOperand()}); + + op.replaceAllUsesWith(call); + op.erase(); +} + +void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + auto call = builder.create( + op.getLoc(), op.getOriginalFnAttr(), op.getResult().getType(), + mlir::ValueRange{op.getOperand()}); + + op.replaceAllUsesWith(call); + op.erase(); +} + void LoweringPreparePass::runOnOp(Operation *op) { if (auto getGlobal = dyn_cast(op)) { lowerGlobalOp(getGlobal); @@ -427,6 +451,10 @@ void LoweringPreparePass::runOnOp(Operation *op) { lowerSetBitfieldOp(setBitfield); } else if (auto stdFind = dyn_cast(op)) { lowerStdFindOp(stdFind); + } else if (auto iterBegin = dyn_cast(op)) { + lowerIterBeginOp(iterBegin); + } else if (auto iterEnd = dyn_cast(op)) { + lowerIterEndOp(iterEnd); } } @@ -439,7 +467,8 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { - if (isa(op)) + if (isa(op)) opsToTransform.push_back(op); }); diff --git a/clang/test/CIR/Transforms/idiom-recognizer.cpp b/clang/test/CIR/Transforms/idiom-recognizer.cpp index 87b95fa5f900..83fa5a2a1a13 100644 --- a/clang/test/CIR/Transforms/idiom-recognizer.cpp +++ b/clang/test/CIR/Transforms/idiom-recognizer.cpp @@ -14,11 +14,22 @@ int test_find(unsigned char n = 3) unsigned num_found = 0; std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; auto f = std::find(v.begin(), v.end(), n); // expected-remark {{found call to std::find()}} + // expected-remark@-1 {{found call to begin() iterator}} + // expected-remark@-2 {{found call to end() iterator}} + + // BEFORE-IDIOM: {{.*}} cir.call @_ZNSt5arrayIhLj9EE5beginEv( + // AFTER-IDIOM: {{.*}} cir.iterator_begin(@_ZNSt5arrayIhLj9EE5beginEv, + // AFTER-LOWERING-PREPARE: {{.*}} cir.call @_ZNSt5arrayIhLj9EE5beginEv( + + // BEFORE-IDIOM: {{.*}} cir.call @_ZNSt5arrayIhLj9EE3endEv( + // AFTER-IDIOM: {{.*}} cir.iterator_end(@_ZNSt5arrayIhLj9EE3endEv, + // AFTER-LOWERING-PREPARE: {{.*}} cir.call @_ZNSt5arrayIhLj9EE3endEv( + // BEFORE-IDIOM: {{.*}} cir.call @_ZSt4findINSt5arrayIhLj9EE8iteratorEhET_S3_S3_RKT0_( // AFTER-IDIOM: {{.*}} cir.std.find(@_ZSt4findINSt5arrayIhLj9EE8iteratorEhET_S3_S3_RKT0_, // AFTER-LOWERING-PREPARE: {{.*}} cir.call @_ZSt4findINSt5arrayIhLj9EE8iteratorEhET_S3_S3_RKT0_( - if (f != v.end()) + if (f != v.end()) // expected-remark {{found call to end() iterator}} num_found++; return num_found; } \ No newline at end of file From 6f03980243042704bcd11f213fc74f95dcd8a6fe Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Thu, 21 Dec 2023 17:44:11 +0300 Subject: [PATCH 1299/2301] [CIR][CodeGen] Use signed type for result of ptrdiff operation. (#355) Before this fix attached test case has been failing due to type mismatch (signed vs unsigned). --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 ++++ clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 3 ++- clang/test/CIR/CodeGen/ptr_diff.cpp | 15 +++++++++++++-- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 4218e06f724e..72a781a365d8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -162,6 +162,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, // TODO: ConstGlobalsPtrTy // TODO: ASTAllocaAddressSpace + PtrDiffTy = ::mlir::cir::IntType::get( + builder.getContext(), astCtx.getTargetInfo().getMaxPointerWidth(), + /*isSigned=*/true); + mlir::cir::sob::SignedOverflowBehavior sob; switch (langOpts.getSignedOverflowBehavior()) { case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Defined: diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index cea3f07922e0..ac3442626ca8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -49,9 +49,10 @@ struct CIRGenTypeCache { union { mlir::Type UIntPtrTy; mlir::Type SizeTy; - mlir::Type PtrDiffTy; }; + mlir::Type PtrDiffTy; + /// void* in address space 0 mlir::cir::PointerType VoidPtrTy; mlir::cir::PointerType UInt8PtrTy; diff --git a/clang/test/CIR/CodeGen/ptr_diff.cpp b/clang/test/CIR/CodeGen/ptr_diff.cpp index 924162d3c790..ebaa5ec6bfac 100644 --- a/clang/test/CIR/CodeGen/ptr_diff.cpp +++ b/clang/test/CIR/CodeGen/ptr_diff.cpp @@ -9,5 +9,16 @@ size_type size(unsigned long *_start, unsigned long *_finish) { // CHECK: cir.func @_Z4sizePmS_(%arg0: !cir.ptr // CHECK: %3 = cir.load %1 : cir.ptr >, !cir.ptr // CHECK: %4 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %5 = cir.ptr_diff(%3, %4) : !cir.ptr -> !u64i - \ No newline at end of file +// CHECK: %5 = cir.ptr_diff(%3, %4) : !cir.ptr -> !s64i +// CHECK: %6 = cir.cast(integral, %5 : !s64i), !u64i + +long add(char *a, char *b) { + return a - b + 1; +} + +// CHECK: cir.func @_Z3addPcS_(%arg0: !cir.ptr +// %5 = cir.ptr_diff(%3, %4) : !cir.ptr -> !s64i +// %6 = cir.const(#cir.int<1> : !s32i) : !s32i +// %7 = cir.cast(integral, %6 : !s32i), !s64i +// %8 = cir.binop(add, %5, %7) : !s64i + From a7371c6b024b2d2129865a4899c38f999f3771f4 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 21 Dec 2023 17:46:55 +0300 Subject: [PATCH 1300/2301] [CIR] support -std=gnu89 (#358) Tiny PR, support `-std=gnu89` option This is quite frequent bug in `llvm-test-suite` --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 3 ++- clang/test/CIR/CodeGen/gnu89.c | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/gnu89.c diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 72a781a365d8..e7d3a8f521cf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2664,7 +2664,8 @@ mlir::cir::SourceLanguage CIRGenModule::getCIRSourceLanguage() { opts.CPlusPlus26) return CIRLang::CXX; if (opts.C99 || opts.C11 || opts.C17 || opts.C23 || - opts.LangStd == ClangStd::lang_c89) + opts.LangStd == ClangStd::lang_c89 || + opts.LangStd == ClangStd::lang_gnu89) return CIRLang::C; // TODO(cir): support remaining source languages. diff --git a/clang/test/CIR/CodeGen/gnu89.c b/clang/test/CIR/CodeGen/gnu89.c new file mode 100644 index 000000000000..5254576779aa --- /dev/null +++ b/clang/test/CIR/CodeGen/gnu89.c @@ -0,0 +1,5 @@ +// RUN: %clang_cc1 -std=gnu89 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void foo() {} +//CHECK: cir.func {{.*@foo}} \ No newline at end of file From 59adf17c7349e75632bdff52415a65b111e77377 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 21 Dec 2023 17:47:30 +0300 Subject: [PATCH 1301/2301] [CIR][CodeGen] support extern var in function (#359) This PR "adds" the support of extern vars in function body. Actually, I just erased an assert. Any reason it was there? ``` int foo() { extern int optind; return optind; } ``` This is quite frequent bug in `llvm-test-suite` --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 1 - clang/test/CIR/CodeGen/globals.c | 8 ++++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 1fc2e923b2b1..77dae8cfb878 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -330,7 +330,6 @@ void CIRGenFunction::buildAutoVarDecl(const VarDecl &D) { void CIRGenFunction::buildVarDecl(const VarDecl &D) { if (D.hasExternalStorage()) { - assert(0 && "should we just returns is there something to track?"); // Don't emit it now, allow it to be emitted lazily on its first use. return; } diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 3faf4e9f2548..bc1535488334 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -75,6 +75,14 @@ struct { int *x; } q2 = {q}; // CHECK: cir.global external @q1 = #cir.global_view<@q> : !cir.ptr // CHECK: cir.global external @q2 = #cir.const_struct<{#cir.global_view<@q> : !cir.ptr}> : !ty_22anon2E1322 +int foo() { + extern int optind; + return optind; +} +// CHECK: cir.global "private" external @optind : !s32i +// CHECK: cir.func {{.*@foo}} +// CHECK: {{.*}} = cir.get_global @optind : cir.ptr + // TODO: test tentatives with internal linkage. // Tentative definition is THE definition. Should be zero-initialized. From 7d7d01470747b61d1ce85f37c7f7489f8caecd58 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 21 Dec 2023 17:48:43 +0300 Subject: [PATCH 1302/2301] [CIR][Codegen] Adds Stack save-restore ops (#346) This PR adds `cir.stack_save` and `cir.stack_restore` operations. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 37 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 9 +++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 30 ++++++++++++++- clang/test/CIR/IR/invalid.cir | 24 +++++++++++- clang/test/CIR/IR/stack-save-restore.cir | 23 ++++++++++++ .../test/CIR/Lowering/stack-save-restore.cir | 19 ++++++++++ 6 files changed, 140 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/IR/stack-save-restore.cir create mode 100644 clang/test/CIR/Lowering/stack-save-restore.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 204df6ae5de3..3515196f7acc 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2517,6 +2517,43 @@ def ThrowOp : CIR_Op<"throw", let hasVerifier = 1; } +def StackSaveOp : CIR_Op<"stack_save"> { + let summary = "remembers the current state of the function stack"; + let description = [{ + Remembers the current state of the function stack. Returns a pointer + that later can be passed into cir.stack_restore. + Useful for implementing language features like variable length arrays. + + ```mlir + %0 = cir.stack_save : + ``` + + }]; + + let results = (outs CIR_PointerType:$result); + let assemblyFormat = "attr-dict `:` qualified(type($result))"; +} + +def StackRestoreOp : CIR_Op<"stack_restore"> { + let summary = "restores the state of the function stack"; + let description = [{ + Restore the state of the function stack to the state it was + in when the corresponding cir.stack_save executed. + Useful for implementing language features like variable length arrays. + + ```mlir + %0 = cir.alloca !cir.ptr, cir.ptr >, ["saved_stack"] {alignment = 8 : i64} + %1 = cir.stack_save : + cir.store %1, %0 : !cir.ptr, cir.ptr > + %2 = cir.load %0 : cir.ptr >, !cir.ptr + cir.stack_restore %2 : !cir.ptr + ``` + }]; + + let arguments = (ins CIR_PointerType:$ptr); + let assemblyFormat = "$ptr attr-dict `:` qualified(type($ptr))"; +} + //===----------------------------------------------------------------------===// // Operations Lowered Directly to LLVM IR // diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index f0a9a5f935ce..eb365bc3ac96 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -776,6 +776,15 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { assert(SubType); computeGlobalViewIndicesFromFlatOffset(Offset, SubType, Layout, Indices); } + + mlir::cir::StackSaveOp createStackSave(mlir::Location loc, mlir::Type ty) { + return create(loc, ty); + } + + mlir::cir::StackRestoreOp createStackRestore(mlir::Location loc, mlir::Value v) { + return create(loc, v); + } + }; } // namespace cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e1d894267f74..8b38cbcd7bf7 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2034,6 +2034,33 @@ class CIRVTableAddrPointOpLowering } }; +class CIRStackSaveLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::StackSaveOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto ptrTy = getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, ptrTy); + return mlir::success(); + } +}; + +class CIRStackRestoreLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::StackRestoreOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, + adaptor.getPtr()); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -2048,7 +2075,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRGetMemberOpLowering, CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRVTableAddrPointOpLowering, - CIRVectorCreateLowering, CIRVectorExtractLowering>( + CIRVectorCreateLowering, CIRVectorExtractLowering, + CIRStackSaveLowering, CIRStackRestoreLowering>( converter, patterns.getContext()); } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index d122be4d0a34..278909d59850 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -763,4 +763,26 @@ module { %0 = cir.alloca !s32i, cir.ptr , %arg0 : f32, ["tmp"] cir.return } -} \ No newline at end of file +} + +// ----- + +!u8i = !cir.int +module { + cir.func @stack_save_type_mismatch() { + // expected-error@+1 {{must be CIR pointer type}} + %1 = cir.stack_save : !u8i + cir.return + } +} +// ----- + +!u8i = !cir.int +module { + cir.func @stack_restore_type_mismatch(%arg0 : !u8i) { + // expected-error@+1 {{must be CIR pointer type}} + cir.stack_restore %arg0 : !u8i + cir.return + } +} + diff --git a/clang/test/CIR/IR/stack-save-restore.cir b/clang/test/CIR/IR/stack-save-restore.cir new file mode 100644 index 000000000000..f6027258786d --- /dev/null +++ b/clang/test/CIR/IR/stack-save-restore.cir @@ -0,0 +1,23 @@ +// Test the CIR operations can parse and print correctly (roundtrip) + +// RUN: cir-opt %s | cir-opt | FileCheck %s + +!u8i = !cir.int + +module { + cir.func @stack_save_restore() { + %0 = cir.stack_save : !cir.ptr + cir.stack_restore %0 : !cir.ptr + cir.return + } +} + +//CHECK: module { + +//CHECK-NEXT: cir.func @stack_save_restore() { +//CHECK-NEXT: %0 = cir.stack_save : !cir.ptr +//CHECK-NEXT: cir.stack_restore %0 : !cir.ptr +//CHECK-NEXT: cir.return +//CHECK-NEXT: } + +//CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/stack-save-restore.cir b/clang/test/CIR/Lowering/stack-save-restore.cir new file mode 100644 index 000000000000..ad9dee66b53f --- /dev/null +++ b/clang/test/CIR/Lowering/stack-save-restore.cir @@ -0,0 +1,19 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR + +!u8i = !cir.int + +module { + cir.func @stack_save() { + %0 = cir.stack_save : !cir.ptr + cir.stack_restore %0 : !cir.ptr + cir.return + } +} + +// MLIR: module { +// MLIR-NEXT: llvm.func @stack_save +// MLIR-NEXT: %0 = llvm.intr.stacksave : !llvm.ptr +// MLIR-NEXT: llvm.intr.stackrestore %0 : !llvm.ptr +// MLIR-NEXT: llvm.return +// MLIR-NEXT: } +// MLIR-NEXT: } From af0d785c6a8d06b343026ddedb9d3ff43992bbce Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 21 Dec 2023 17:49:40 +0300 Subject: [PATCH 1303/2301] [CIR][Lowering][Bugfix] Lower nested breaks in switch statements (#357) This PR fixes lowering of the next code: ``` void foo(int x, int y) { switch (x) { case 0: if (y) break; break; } } ``` i.e. when some sub statement contains `break` as well. Previously, we did this trick for `loop`: process nested `break`/`continue` statements while `LoopOp` lowering if they don't belong to another `LoopOp` or `SwitchOp`. This is why there is some refactoring here as well, but the idea is stiil the same: we need to process nested operations and emit branches to the proper blocks. This is quite frequent bug in `llvm-test-suite` --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 100 ++++++++---------- clang/test/CIR/Lowering/switch.cir | 46 ++++++++ 2 files changed, 88 insertions(+), 58 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8b38cbcd7bf7..0dbc64baa491 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -324,6 +324,35 @@ mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { }; } +static void lowerNestedYield(mlir::cir::YieldOpKind targetKind, + mlir::ConversionPatternRewriter &rewriter, + mlir::Region &body, + mlir::Block *dst) { + // top-level yields are lowered in matchAndRewrite of the parent operations + auto isNested = [&](mlir::Operation *op) { + return op->getParentRegion() != &body; + }; + + body.walk( + [&](mlir::Operation *op) { + if (!isNested(op)) + return mlir::WalkResult::advance(); + + // don't process breaks/continues in nested loops and switches + if (isa(*op)) + return mlir::WalkResult::skip(); + + auto yield = dyn_cast(*op); + if (yield && yield.getKind() == targetKind) { + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp(op, yield.getArgs(), dst); + } + + return mlir::WalkResult::advance(); + }); +} + + class CIRCopyOpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -398,57 +427,6 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { return mlir::success(); } - void makeYieldIf(mlir::cir::YieldOpKind kind, mlir::cir::YieldOp &op, - mlir::Block *to, - mlir::ConversionPatternRewriter &rewriter) const { - if (op.getKind() == kind) { - rewriter.setInsertionPoint(op); - rewriter.replaceOpWithNewOp(op, op.getArgs(), to); - } - } - - void - lowerNestedBreakContinue(mlir::Region &loopBody, mlir::Block *exitBlock, - mlir::Block *continueBlock, - mlir::ConversionPatternRewriter &rewriter) const { - // top-level yields are lowered in matchAndRewrite - auto isNested = [&](mlir::Operation *op) { - return op->getParentRegion() != &loopBody; - }; - - auto processBreak = [&](mlir::Operation *op) { - if (!isNested(op)) - return mlir::WalkResult::advance(); - - if (isa( - *op)) // don't process breaks in nested loops and switches - return mlir::WalkResult::skip(); - - if (auto yield = dyn_cast(*op)) - makeYieldIf(mlir::cir::YieldOpKind::Break, yield, exitBlock, rewriter); - - return mlir::WalkResult::advance(); - }; - - auto processContinue = [&](mlir::Operation *op) { - if (!isNested(op)) - return mlir::WalkResult::advance(); - - if (isa( - *op)) // don't process continues in nested loops - return mlir::WalkResult::skip(); - - if (auto yield = dyn_cast(*op)) - makeYieldIf(mlir::cir::YieldOpKind::Continue, yield, continueBlock, - rewriter); - - return mlir::WalkResult::advance(); - }; - - loopBody.walk(processBreak); - loopBody.walk(processContinue); - } - mlir::LogicalResult matchAndRewrite(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { @@ -478,7 +456,10 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { dyn_cast(stepRegion.back().getTerminator()); auto &stepBlock = (kind == LoopKind::For ? stepFrontBlock : condFrontBlock); - lowerNestedBreakContinue(bodyRegion, continueBlock, &stepBlock, rewriter); + lowerNestedYield(mlir::cir::YieldOpKind::Break, + rewriter, bodyRegion, continueBlock); + lowerNestedYield(mlir::cir::YieldOpKind::Continue, + rewriter, bodyRegion, &stepBlock); // Move loop op region contents to current CFG. rewriter.inlineRegionBefore(condRegion, continueBlock); @@ -713,7 +694,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } }; -static bool isLoopYield(mlir::cir::YieldOp &op) { +static bool isBreakOrContinue(mlir::cir::YieldOp &op) { return op.getKind() == mlir::cir::YieldOpKind::Break || op.getKind() == mlir::cir::YieldOpKind::Continue; } @@ -746,8 +727,8 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.setInsertionPointToEnd(thenAfterBody); if (auto thenYieldOp = dyn_cast(thenAfterBody->getTerminator())) { - if (!isLoopYield(thenYieldOp)) // lowering of parent loop yields is - // deferred to loop lowering + if (!isBreakOrContinue(thenYieldOp)) // lowering of parent loop yields is + // deferred to loop lowering rewriter.replaceOpWithNewOp( thenYieldOp, thenYieldOp.getArgs(), continueBlock); } else if (!dyn_cast(thenAfterBody->getTerminator())) { @@ -777,8 +758,8 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.setInsertionPointToEnd(elseAfterBody); if (auto elseYieldOp = dyn_cast(elseAfterBody->getTerminator())) { - if (!isLoopYield(elseYieldOp)) // lowering of parent loop yields is - // deferred to loop lowering + if (!isBreakOrContinue(elseYieldOp)) // lowering of parent loop yields is + // deferred to loop lowering rewriter.replaceOpWithNewOp( elseYieldOp, elseYieldOp.getArgs(), continueBlock); } else if (!dyn_cast( @@ -839,7 +820,7 @@ class CIRScopeOpLowering rewriter.setInsertionPointToEnd(afterBody); auto yieldOp = cast(afterBody->getTerminator()); - if (!isLoopYield(yieldOp)) { + if (!isBreakOrContinue(yieldOp)) { auto branchOp = rewriter.replaceOpWithNewOp( yieldOp, yieldOp.getArgs(), continueBlock); @@ -1411,6 +1392,9 @@ class CIRSwitchOpLowering } } + lowerNestedYield(mlir::cir::YieldOpKind::Break, + rewriter, region, exitBlock); + // Extract region contents before erasing the switch op. rewriter.inlineRegionBefore(region, exitBlock); } diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir index 08e0ae760080..31a70d567caa 100644 --- a/clang/test/CIR/Lowering/switch.cir +++ b/clang/test/CIR/Lowering/switch.cir @@ -136,4 +136,50 @@ module { // CHECK: llvm.return // CHECK: } + cir.func @shouldLowerNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.store %arg1, %1 : !s32i, cir.ptr + cir.scope { + %5 = cir.load %0 : cir.ptr , !s32i + cir.switch (%5 : !s32i) [ + case (equal, 0) { + cir.scope { + %6 = cir.load %1 : cir.ptr , !s32i + %7 = cir.const(#cir.int<0> : !s32i) : !s32i + %8 = cir.cmp(ge, %6, %7) : !s32i, !s32i + %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool + cir.if %9 { + cir.yield break + } + } + cir.yield break + } + ] + } + %3 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + %4 = cir.load %2 : cir.ptr , !s32i + cir.return %4 : !s32i + } + // CHECK: llvm.func @shouldLowerNestedBreak + // CHECK: llvm.switch %6 : i32, ^bb7 [ + // CHECK: 0: ^bb2 + // CHECK: ] + // CHECK: ^bb2: // pred: ^bb1 + // CHECK: llvm.br ^bb3 + // CHECK: ^bb3: // pred: ^bb2 + // CHECK: llvm.cond_br %14, ^bb4, ^bb5 + // CHECK: ^bb4: // pred: ^bb3 + // CHECK: llvm.br ^bb7 + // CHECK: ^bb5: // pred: ^bb3 + // CHECK: llvm.br ^bb6 + // CHECK: ^bb6: // pred: ^bb5 + // CHECK: llvm.br ^bb7 + // CHECK: ^bb7: // 3 preds: ^bb1, ^bb4, ^bb6 + // CHECK: llvm.br ^bb8 + // CHECK: ^bb8: // pred: ^bb7 + // CHECK: llvm.return } From ac00281ab665edbab578c2754d48d82219d28aeb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 21 Dec 2023 14:11:58 -0300 Subject: [PATCH 1304/2301] [CIR] Change mock std::array iterator definitions This is how both libc++ and libstdc++ implement iterator in std::array, stick to those use cases for now. We could add other variations in the future if there are others around. --- clang/test/CIR/CodeGen/std-array.cpp | 2 +- clang/test/CIR/CodeGen/std-find.cpp | 6 +++--- clang/test/CIR/Inputs/std-cxx.h | 9 ++------- clang/test/CIR/Transforms/idiom-recognizer.cpp | 10 +++++----- 4 files changed, 11 insertions(+), 16 deletions(-) diff --git a/clang/test/CIR/CodeGen/std-array.cpp b/clang/test/CIR/CodeGen/std-array.cpp index 902f2c44c6e8..ac4b119bdeb0 100644 --- a/clang/test/CIR/CodeGen/std-array.cpp +++ b/clang/test/CIR/CodeGen/std-array.cpp @@ -14,4 +14,4 @@ void t() { // CHECK: {{.*}} = cir.cast(array_to_ptrdecay // CHECK: {{.*}} = cir.const(#cir.int<9> : !u32i) : !u32i -// CHECK: cir.call @_ZNSt5arrayIhLj9EE8iteratorC1EPh \ No newline at end of file +// CHECK: cir.call @_ZNSt5arrayIhLj9EE3endEv \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/std-find.cpp b/clang/test/CIR/CodeGen/std-find.cpp index a04ffd79f41c..3b043a6e3766 100644 --- a/clang/test/CIR/CodeGen/std-find.cpp +++ b/clang/test/CIR/CodeGen/std-find.cpp @@ -15,12 +15,12 @@ int test_find(unsigned char n = 3) auto f = std::find(v.begin(), v.end(), n); // CHECK: {{.*}} cir.call @_ZNSt5arrayIhLj9EE5beginEv(%[[array_addr]]) // CHECK: {{.*}} cir.call @_ZNSt5arrayIhLj9EE3endEv(%[[array_addr]]) - // CHECK: {{.*}} cir.call @_ZSt4findINSt5arrayIhLj9EE8iteratorEhET_S3_S3_RKT0_( + // CHECK: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( if (f != v.end()) num_found++; - // CHECK: {{.*}} cir.call @_ZNSt5arrayIhLj9EE3endEv(%[[array_addr]] - // CHECK: %[[neq_cmp:.*]] = cir.call @_ZNSt5arrayIhLj9EE8iteratorneES1_( + // CHECK: cir.call @_ZNSt5arrayIhLj9EE3endEv(%[[array_addr]] + // CHECK: %[[neq_cmp:.*]] = cir.cmp // CHECK: cir.if %[[neq_cmp]] return num_found; diff --git a/clang/test/CIR/Inputs/std-cxx.h b/clang/test/CIR/Inputs/std-cxx.h index b4eccca352b0..1697e311bcb3 100644 --- a/clang/test/CIR/Inputs/std-cxx.h +++ b/clang/test/CIR/Inputs/std-cxx.h @@ -1312,13 +1312,8 @@ template template struct array { T arr[N]; - struct iterator { - T *p; - constexpr explicit iterator(T *p) : p(p) {} - constexpr bool operator!=(iterator o) { return p != o.p; } - constexpr iterator &operator++() { ++p; return *this; } - constexpr T &operator*() { return *p; } - }; + typedef T value_type; + typedef value_type* iterator; constexpr iterator begin() { return iterator(arr); } constexpr iterator end() { return iterator(arr + N); } }; diff --git a/clang/test/CIR/Transforms/idiom-recognizer.cpp b/clang/test/CIR/Transforms/idiom-recognizer.cpp index 83fa5a2a1a13..dca74f7fc451 100644 --- a/clang/test/CIR/Transforms/idiom-recognizer.cpp +++ b/clang/test/CIR/Transforms/idiom-recognizer.cpp @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=PASS_ENABLED -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -fclangir-idiom-recognizer="remarks=found-calls" -clangir-verify-diagnostics %s -o %t2.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after-all %s -o - 2>&1 | FileCheck %s -check-prefix=PASS_ENABLED +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -fclangir-idiom-recognizer="remarks=found-calls" -clangir-verify-diagnostics %s -o %t.cir // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-before=cir-idiom-recognizer %s -o - 2>&1 | FileCheck %s -check-prefix=BEFORE-IDIOM // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-idiom-recognizer %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-IDIOM @@ -25,9 +25,9 @@ int test_find(unsigned char n = 3) // AFTER-IDIOM: {{.*}} cir.iterator_end(@_ZNSt5arrayIhLj9EE3endEv, // AFTER-LOWERING-PREPARE: {{.*}} cir.call @_ZNSt5arrayIhLj9EE3endEv( - // BEFORE-IDIOM: {{.*}} cir.call @_ZSt4findINSt5arrayIhLj9EE8iteratorEhET_S3_S3_RKT0_( - // AFTER-IDIOM: {{.*}} cir.std.find(@_ZSt4findINSt5arrayIhLj9EE8iteratorEhET_S3_S3_RKT0_, - // AFTER-LOWERING-PREPARE: {{.*}} cir.call @_ZSt4findINSt5arrayIhLj9EE8iteratorEhET_S3_S3_RKT0_( + // BEFORE-IDIOM: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( + // AFTER-IDIOM: {{.*}} cir.std.find(@_ZSt4findIPhhET_S1_S1_RKT0_, + // AFTER-LOWERING-PREPARE: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( if (f != v.end()) // expected-remark {{found call to end() iterator}} num_found++; From 48fe844b0252f158dd8f529013a4d25142bfc806 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 21 Dec 2023 14:41:24 -0300 Subject: [PATCH 1305/2301] [CIR][IdiomRecognizer] Make iterator recognition more strict - Check whether container is part of std, add a fixed list of available containers (for now only std::array) - Add a getRawDecl method to ASTRecordDeclInterface - Testcases --- .../clang/CIR/Interfaces/ASTAttrInterfaces.td | 12 ++++++ .../Dialect/Transforms/IdiomRecognizer.cpp | 41 +++++++++++++++++-- clang/test/CIR/Transforms/idiom-iter.cpp | 21 ++++++++++ .../test/CIR/Transforms/idiom-recognizer.cpp | 15 +++++++ 4 files changed, 86 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/Transforms/idiom-iter.cpp diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td index 60f6b2b16c90..fc162c11f42c 100644 --- a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td @@ -173,6 +173,18 @@ let cppNamespace = "::mlir::cir" in { } return false; }] + >, + InterfaceMethod<"", "bool", "isInStdNamespace", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst()->getDeclContext()->isStdNamespace(); + }] + >, + // Note: `getRawDecl` is useful for debugging because it allows dumping + // the RecordDecl - it should not be used in regular code. + InterfaceMethod<"", "const clang::RecordDecl *", "getRawDecl", (ins), [{}], + /*defaultImplementation=*/ [{ + return $_attr.getAst(); + }] > ]; } diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index 48894c9eb302..c437a609735b 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -109,18 +109,53 @@ void IdiomRecognizerPass::raiseStdFind(CallOp call) { call.erase(); } +static bool isIteratorLikeType(mlir::Type t) { + // TODO: some iterators are going to be represented with structs, + // in which case we could look at ASTRecordDeclInterface for more + // information. + auto pTy = t.dyn_cast(); + if (!pTy || !pTy.getPointee().isa()) + return false; + return true; +} + +static bool isIteratorInStdContainter(mlir::Type t) { + auto sTy = t.dyn_cast(); + if (!sTy) + return false; + auto recordDecl = sTy.getAst(); + if (!recordDecl.isInStdNamespace()) + return false; + + // TODO: only std::array supported for now, generalize and + // use tablegen. CallDescription.cpp in the static analyzer + // could be a good inspiration source too. + if (recordDecl.getName().compare("array") != 0) + return false; + + return true; +} + void IdiomRecognizerPass::raiseIteratorBeginEnd(CallOp call) { // FIXME: tablegen all of this function. - if (call.getNumOperands() != 1) + CIRBaseBuilderTy builder(getContext()); + + if (call.getNumOperands() != 1 || call.getNumResults() != 1) return; auto callExprAttr = call.getAstAttr(); if (!callExprAttr) return; - CIRBaseBuilderTy builder(getContext()); - builder.setInsertionPointAfter(call.getOperation()); + if (!isIteratorLikeType(call.getResult(0).getType())) + return; + // First argument is the container "this" pointer. + auto thisPtr = call.getOperand(0).getType().dyn_cast(); + if (!thisPtr || !isIteratorInStdContainter(thisPtr.getPointee())) + return; + + builder.setInsertionPointAfter(call.getOperation()); mlir::Operation *iterOp; if (callExprAttr.isIteratorBeginCall()) { if (opts.emitRemarkFoundCalls()) diff --git a/clang/test/CIR/Transforms/idiom-iter.cpp b/clang/test/CIR/Transforms/idiom-iter.cpp new file mode 100644 index 000000000000..5591baa04ff6 --- /dev/null +++ b/clang/test/CIR/Transforms/idiom-iter.cpp @@ -0,0 +1,21 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -fclangir-idiom-recognizer="remarks=found-calls" -clangir-verify-diagnostics %s -o %t.cir + +namespace std { +template struct array { + T arr[N]; + struct iterator { + T *p; + constexpr explicit iterator(T *p) : p(p) {} + constexpr bool operator!=(iterator o) { return p != o.p; } + constexpr iterator &operator++() { ++p; return *this; } + constexpr T &operator*() { return *p; } + }; + constexpr iterator begin() { return iterator(arr); } +}; +} + +void iter_test() +{ + std::array v2 = {1, 2, 3}; + (void)v2.begin(); // no remark should be produced. +} \ No newline at end of file diff --git a/clang/test/CIR/Transforms/idiom-recognizer.cpp b/clang/test/CIR/Transforms/idiom-recognizer.cpp index dca74f7fc451..5d11e50dcd9f 100644 --- a/clang/test/CIR/Transforms/idiom-recognizer.cpp +++ b/clang/test/CIR/Transforms/idiom-recognizer.cpp @@ -32,4 +32,19 @@ int test_find(unsigned char n = 3) if (f != v.end()) // expected-remark {{found call to end() iterator}} num_found++; return num_found; +} + +namespace yolo { +template struct array { + T arr[N]; + typedef T value_type; + typedef value_type* iterator; + constexpr iterator begin() { return iterator(arr); } +}; +} + +int iter_test() +{ + yolo::array v = {1, 2, 3}; + (void)v.begin(); // no remark should be produced. } \ No newline at end of file From b7ef846ecd73b4c50cdbb51f6ebef3d6f7206524 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 21 Dec 2023 21:03:23 -0300 Subject: [PATCH 1306/2301] [CIR] Cleanup idiom-recognizer and lib-opt options This was a bit half backed, give it some love. --- clang/include/clang/CIR/CIRToCIRPasses.h | 12 ++--- clang/include/clang/Driver/Options.td | 8 +++- .../include/clang/Frontend/FrontendOptions.h | 10 ++++- clang/lib/CIR/CodeGen/CIRPasses.cpp | 44 ++++++++++--------- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 17 ++++--- clang/lib/Driver/ToolChains/Clang.cpp | 8 ++++ clang/lib/Frontend/CompilerInvocation.cpp | 25 +++++++++-- .../test/CIR/Transforms/idiom-recognizer.cpp | 8 ++-- clang/test/CIR/Transforms/lib-opt.cpp | 2 +- 9 files changed, 88 insertions(+), 46 deletions(-) diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index a1e745c6f096..162846c75184 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -28,12 +28,12 @@ class ModuleOp; namespace cir { // Run set of cleanup/prepare/etc passes CIR <-> CIR. -mlir::LogicalResult -runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - clang::ASTContext &astCtx, bool enableVerifier, - bool enableLifetime, llvm::StringRef lifetimeOpts, - llvm::StringRef idiomRecognizerOpts, - llvm::StringRef libOptOpts, bool &passOptParsingFailure); +mlir::LogicalResult runCIRToCIRPasses( + mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, + llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, + llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, + llvm::StringRef libOptOpts, std::string &passOptParsingFailure); } // namespace cir #endif // CLANG_CIR_CIRTOCIRPASSES_H_ diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 7648a82c0886..f37248573f34 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3065,15 +3065,19 @@ def fclangir_lifetime_check : Flag<["-"], "fclangir-lifetime-check">, HelpText<"Run lifetime checker">; def fclangir_idiom_recognizer_EQ : Joined<["-"], "fclangir-idiom-recognizer=">, Visibility<[ClangOption, CC1Option]>, Group, - HelpText<"Pass configuration options to CIR idiom recognizer">, + HelpText<"Enable C/C++ idiom recognizer">, MarshallingInfoString>; +def fclangir_idiom_recognizer : Flag<["-"], "fclangir-idiom-recognizer">, + Visibility<[ClangOption, CC1Option]>, Group, + Alias, + HelpText<"Enable C/C++ idiom recognizer">; def fclangir_lib_opt_EQ : Joined<["-"], "fclangir-lib-opt=">, Visibility<[ClangOption, CC1Option]>, Group, HelpText<"Enable C/C++ library based optimizations (with options)">, MarshallingInfoString>; def fclangir_lib_opt : Flag<["-"], "fclangir-lib-opt">, Visibility<[ClangOption, CC1Option]>, Group, - Alias, AliasArgs<[""]>, + Alias, HelpText<"Enable C/C++ library based optimizations">; def clangir_disable_passes : Flag<["-"], "clangir-disable-passes">, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 5ff8c5e3c1ed..3d2ebc1b30b8 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -439,6 +439,12 @@ class FrontendOptions { // Enable Clang IR based lifetime check unsigned ClangIRLifetimeCheck : 1; + // Enable Clang IR idiom recognizer + unsigned ClangIRIdiomRecognizer : 1; + + // Enable Clang IR library optimizations + unsigned ClangIRLibOpt : 1; + CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. @@ -637,8 +643,8 @@ class FrontendOptions { UseClangIRPipeline(false), ClangIRDirectLowering(false), ClangIRDisablePasses(false), ClangIRDisableCIRVerifier(false), ClangIRDisableEmitCXXDefault(false), ClangIRLifetimeCheck(false), - TimeTraceGranularity(500), - TimeTraceVerbose(false) {} + ClangIRIdiomRecognizer(false), ClangIRLibOpt(false), + TimeTraceGranularity(500), TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file /// extension. For example, "c" would return Language::C. diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index f6b4df73578b..a83aa1cf41cf 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -11,23 +11,21 @@ //===----------------------------------------------------------------------===// #include "clang/AST/ASTContext.h" -#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "mlir/IR/BuiltinOps.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Support/LogicalResult.h" namespace cir { -mlir::LogicalResult -runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - clang::ASTContext &astCtx, bool enableVerifier, - bool enableLifetime, llvm::StringRef lifetimeOpts, - llvm::StringRef idiomRecognizerOpts, - llvm::StringRef libOptOpts, bool &passOptParsingFailure) { +mlir::LogicalResult runCIRToCIRPasses( + mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, + llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, + llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, + llvm::StringRef libOptOpts, std::string &passOptParsingFailure) { mlir::PassManager pm(mlirCtx); - passOptParsingFailure = false; - pm.addPass(mlir::createMergeCleanupsPass()); // TODO(CIR): Make this actually propagate errors correctly. This is stubbed @@ -39,26 +37,30 @@ runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, if (enableLifetime) { auto lifetimePass = mlir::createLifetimeCheckPass(&astCtx); if (lifetimePass->initializeOptions(lifetimeOpts, errorHandler).failed()) { - passOptParsingFailure = true; + passOptParsingFailure = lifetimeOpts; return mlir::failure(); } pm.addPass(std::move(lifetimePass)); } - auto idiomPass = mlir::createIdiomRecognizerPass(&astCtx); - if (idiomPass->initializeOptions(idiomRecognizerOpts, errorHandler) - .failed()) { - passOptParsingFailure = true; - return mlir::failure(); + if (enableIdiomRecognizer) { + auto idiomPass = mlir::createIdiomRecognizerPass(&astCtx); + if (idiomPass->initializeOptions(idiomRecognizerOpts, errorHandler) + .failed()) { + passOptParsingFailure = idiomRecognizerOpts; + return mlir::failure(); + } + pm.addPass(std::move(idiomPass)); } - pm.addPass(std::move(idiomPass)); - auto libOpPass = mlir::createLibOptPass(&astCtx); - if (libOpPass->initializeOptions(libOptOpts, errorHandler).failed()) { - passOptParsingFailure = true; - return mlir::failure(); + if (enableLibOpt) { + auto libOpPass = mlir::createLibOptPass(&astCtx); + if (libOpPass->initializeOptions(libOptOpts, errorHandler).failed()) { + passOptParsingFailure = libOptOpts; + return mlir::failure(); + } + pm.addPass(std::move(libOpPass)); } - pm.addPass(std::move(libOpPass)); pm.addPass(mlir::createLoweringPreparePass(&astCtx)); diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index e93beea09be2..4e004f1fed83 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -176,18 +176,21 @@ class CIRGenConsumer : public clang::ASTConsumer { std::string lifetimeOpts, idiomRecognizerOpts, libOptOpts; if (feOptions.ClangIRLifetimeCheck) lifetimeOpts = sanitizePassOptions(feOptions.ClangIRLifetimeCheckOpts); - idiomRecognizerOpts = - sanitizePassOptions(feOptions.ClangIRIdiomRecognizerOpts); - libOptOpts = sanitizePassOptions(feOptions.ClangIRLibOptOpts); + if (feOptions.ClangIRIdiomRecognizer) + idiomRecognizerOpts = + sanitizePassOptions(feOptions.ClangIRIdiomRecognizerOpts); + if (feOptions.ClangIRLibOpt) + libOptOpts = sanitizePassOptions(feOptions.ClangIRLibOptOpts); // Setup and run CIR pipeline. - bool passOptParsingFailure = false; + std::string passOptParsingFailure; if (runCIRToCIRPasses( mlirMod, mlirCtx.get(), C, !feOptions.ClangIRDisableCIRVerifier, - feOptions.ClangIRLifetimeCheck, lifetimeOpts, idiomRecognizerOpts, - libOptOpts, passOptParsingFailure) + feOptions.ClangIRLifetimeCheck, lifetimeOpts, + feOptions.ClangIRIdiomRecognizer, idiomRecognizerOpts, + feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure) .failed()) { - if (passOptParsingFailure) + if (!passOptParsingFailure.empty()) diagnosticsEngine.Report(diag::err_drv_cir_pass_opt_parsing) << feOptions.ClangIRLifetimeCheckOpts; else diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index c95946c06a10..8d86b32107cd 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5250,6 +5250,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasArg(options::OPT_clangir_disable_passes)) CmdArgs.push_back("-clangir-disable-passes"); + // ClangIR lib opt requires idiom recognizer. + if (Args.hasArg(options::OPT_fclangir_lib_opt, + options::OPT_fclangir_lib_opt_EQ)) { + if (!Args.hasArg(options::OPT_fclangir_idiom_recognizer, + options::OPT_fclangir_idiom_recognizer_EQ)) + CmdArgs.push_back("-fclangir-idiom-recognizer"); + } + if (IsOpenMPDevice) { // We have to pass the triple of the host if compiling for an OpenMP device. std::string NormalizedTriple = diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 4fcc444fb6ff..2b90a7993b98 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -2880,6 +2880,17 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts, for (const auto &ModuleFile : Opts.ModuleFiles) GenerateArg(Consumer, OPT_fmodule_file, ModuleFile); + if (Opts.ClangIRLifetimeCheck) + GenerateArg(Consumer, OPT_fclangir_lifetime_check_EQ, + Opts.ClangIRLifetimeCheckOpts); + + if (Opts.ClangIRIdiomRecognizer) + GenerateArg(Consumer, OPT_fclangir_idiom_recognizer_EQ, + Opts.ClangIRIdiomRecognizerOpts); + + if (Opts.ClangIRLibOpt) + GenerateArg(Consumer, OPT_fclangir_lib_opt_EQ, Opts.ClangIRLibOptOpts); + if (Opts.AuxTargetCPU) GenerateArg(Consumer, OPT_aux_target_cpu, *Opts.AuxTargetCPU); @@ -3121,9 +3132,17 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Opts.ClangIRLifetimeCheckOpts = A->getValue(); } - if (Args.hasArg(OPT_fclangir_idiom_recognizer_EQ)) - Opts.AuxTargetCPU = - std::string(Args.getLastArgValue(OPT_fclangir_idiom_recognizer_EQ)); + if (const Arg *A = Args.getLastArg(OPT_fclangir_idiom_recognizer, + OPT_fclangir_idiom_recognizer_EQ)) { + Opts.ClangIRIdiomRecognizer = true; + Opts.ClangIRIdiomRecognizerOpts = A->getValue(); + } + + if (const Arg *A = + Args.getLastArg(OPT_fclangir_lib_opt, OPT_fclangir_lib_opt_EQ)) { + Opts.ClangIRLibOpt = true; + Opts.ClangIRLibOptOpts = A->getValue(); + } if (Args.hasArg(OPT_aux_target_cpu)) Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu)); diff --git a/clang/test/CIR/Transforms/idiom-recognizer.cpp b/clang/test/CIR/Transforms/idiom-recognizer.cpp index 5d11e50dcd9f..7264444cd98f 100644 --- a/clang/test/CIR/Transforms/idiom-recognizer.cpp +++ b/clang/test/CIR/Transforms/idiom-recognizer.cpp @@ -1,9 +1,9 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after-all %s -o - 2>&1 | FileCheck %s -check-prefix=PASS_ENABLED +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-idiom-recognizer -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after-all %s -o - 2>&1 | FileCheck %s -check-prefix=PASS_ENABLED // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -fclangir-idiom-recognizer="remarks=found-calls" -clangir-verify-diagnostics %s -o %t.cir -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-before=cir-idiom-recognizer %s -o - 2>&1 | FileCheck %s -check-prefix=BEFORE-IDIOM -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-idiom-recognizer %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-IDIOM -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-LOWERING-PREPARE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -fclangir-idiom-recognizer -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-before=cir-idiom-recognizer %s -o - 2>&1 | FileCheck %s -check-prefix=BEFORE-IDIOM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -fclangir-idiom-recognizer -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-idiom-recognizer %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-IDIOM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -fclangir-idiom-recognizer -emit-cir -I%S/../Inputs -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o - 2>&1 | FileCheck %s -check-prefix=AFTER-LOWERING-PREPARE // PASS_ENABLED: IR Dump After IdiomRecognizer (cir-idiom-recognizer) diff --git a/clang/test/CIR/Transforms/lib-opt.cpp b/clang/test/CIR/Transforms/lib-opt.cpp index e1cfa30dabe4..17895e567645 100644 --- a/clang/test/CIR/Transforms/lib-opt.cpp +++ b/clang/test/CIR/Transforms/lib-opt.cpp @@ -1,3 +1,3 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o - 2>&1 | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-idiom-recognizer -fclangir-lib-opt -emit-cir -mmlir --mlir-print-ir-after-all %s -o - 2>&1 | FileCheck %s -check-prefix=CIR // CIR: IR Dump After LibOpt (cir-lib-opt) \ No newline at end of file From 102151c4c8cbc2223a0dda9305dbeea23ffb7cd2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 21 Dec 2023 23:37:10 -0300 Subject: [PATCH 1307/2301] [CIR][LibOpt] Add a first transformation: std::find to memchr Inspired by similar work in libc++, pointed to me by Louis Dionne and Nikolas Klauser. This is initial, very conservative and not generalized yet: works for `char`s within a specific version of `std::find`. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 7 ++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 6 - .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + .../Dialect/Transforms/IdiomRecognizer.cpp | 14 +-- clang/lib/CIR/Dialect/Transforms/LibOpt.cpp | 118 ++++++++++++++++++ .../lib/CIR/Dialect/Transforms/StdHelpers.cpp | 32 +++++ clang/lib/CIR/Dialect/Transforms/StdHelpers.h | 36 ++++++ clang/test/CIR/Transforms/lib-opt-find.cpp | 28 +++++ 8 files changed, 225 insertions(+), 17 deletions(-) create mode 100644 clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/StdHelpers.h create mode 100644 clang/test/CIR/Transforms/lib-opt-find.cpp diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 7fe9d9991345..3257d180c44e 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -47,6 +47,13 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { getAttr(typ, val)); } + mlir::cir::PointerType getVoidPtrTy(unsigned AddrSpace = 0) { + if (AddrSpace) + llvm_unreachable("address space is NYI"); + return ::mlir::cir::PointerType::get( + getContext(), ::mlir::cir::VoidType::get(getContext())); + } + mlir::Value createNot(mlir::Value value) { return create(value.getLoc(), value.getType(), mlir::cir::UnaryOpKind::Not, value); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index eb365bc3ac96..768087f6e0e8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -390,12 +390,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::PointerType::get(getContext(), ty); } - mlir::cir::PointerType getVoidPtrTy(unsigned AddrSpace = 0) { - if (AddrSpace) - llvm_unreachable("address space is NYI"); - return typeCache.VoidPtrTy; - } - /// Get a CIR anonymous struct type. mlir::cir::StructType getAnonStructTy(llvm::ArrayRef members, bool packed = false, diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 3778bc54b43f..7dcb9656d81e 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -5,6 +5,7 @@ add_clang_library(MLIRCIRTransforms DropAST.cpp IdiomRecognizer.cpp LibOpt.cpp + StdHelpers.cpp DEPENDS MLIRCIRPassIncGen diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index c437a609735b..c0c31b0052f7 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -24,6 +24,8 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Path.h" +#include "StdHelpers.h" + using cir::CIRBaseBuilderTy; using namespace mlir; using namespace mlir::cir; @@ -120,20 +122,10 @@ static bool isIteratorLikeType(mlir::Type t) { } static bool isIteratorInStdContainter(mlir::Type t) { - auto sTy = t.dyn_cast(); - if (!sTy) - return false; - auto recordDecl = sTy.getAst(); - if (!recordDecl.isInStdNamespace()) - return false; - // TODO: only std::array supported for now, generalize and // use tablegen. CallDescription.cpp in the static analyzer // could be a good inspiration source too. - if (recordDecl.getName().compare("array") != 0) - return false; - - return true; + return isStdArrayType(t); } void IdiomRecognizerPass::raiseIteratorBeginEnd(CallOp call) { diff --git a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp index 2b7dd159adaa..2422613a5315 100644 --- a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp @@ -24,6 +24,8 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Path.h" +#include "StdHelpers.h" + using cir::CIRBaseBuilderTy; using namespace mlir; using namespace mlir::cir; @@ -33,6 +35,7 @@ namespace { struct LibOptPass : public LibOptBase { LibOptPass() = default; void runOnOperation() override; + void xformStdFindIntoMemchr(StdFindOp findOp); // Handle pass options struct Options { @@ -83,12 +86,127 @@ struct LibOptPass : public LibOptBase { }; } // namespace +static bool isSequentialContainer(mlir::Type t) { + // TODO: other sequential ones, vector, dequeue, list, forward_list. + return isStdArrayType(t); +} + +static bool getIntegralNTTPAt(StructType t, size_t pos, unsigned &size) { + auto *d = + dyn_cast(t.getAst().getRawDecl()); + if (!d) + return false; + + auto &templArgs = d->getTemplateArgs(); + if (pos >= templArgs.size()) + return false; + + auto arraySizeTemplateArg = templArgs[pos]; + if (arraySizeTemplateArg.getKind() != clang::TemplateArgument::Integral) + return false; + + size = arraySizeTemplateArg.getAsIntegral().getSExtValue(); + return true; +} + +static bool containerHasStaticSize(StructType t, unsigned &size) { + // TODO: add others. + if (!isStdArrayType(t)) + return false; + + // Get "size" from std::array + unsigned sizeNTTPPos = 1; + return getIntegralNTTPAt(t, sizeNTTPPos, size); +} + +void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { + // First and second operands need to be iterators begin() and end(). + // TODO: look over cir.loads until we have a mem2reg + other passes + // to help out here. + auto iterBegin = dyn_cast(findOp.getOperand(0).getDefiningOp()); + if (!iterBegin) + return; + if (!isa(findOp.getOperand(1).getDefiningOp())) + return; + + // Both operands have the same type, use iterBegin. + + // Look at this pointer to retrieve container information. + auto thisPtr = + iterBegin.getOperand().getType().cast().getPointee(); + auto containerTy = dyn_cast(thisPtr); + if (!containerTy) + return; + + if (!isSequentialContainer(containerTy)) + return; + + unsigned staticSize = 0; + if (!containerHasStaticSize(containerTy, staticSize)) + return; + + // Transformation: + // - 1st arg: the data pointer + // - Assert the Iterator is a pointer to primitive type. + // - Check IterBeginOp is char sized. TODO: add other types that map to + // char size. + auto iterResTy = iterBegin.getResult().getType().dyn_cast(); + assert(iterResTy && "expected pointer type for iterator"); + auto underlyingDataTy = iterResTy.getPointee().dyn_cast(); + if (!underlyingDataTy || underlyingDataTy.getWidth() != 8) + return; + + // - 2nd arg: the pattern + // - Check it's a pointer type. + // - Load the pattern from memory + // - cast it to `int`. + auto patternAddrTy = findOp.getOperand(2).getType().dyn_cast(); + if (!patternAddrTy || patternAddrTy.getPointee() != underlyingDataTy) + return; + + // - 3rd arg: the size + // - Create and pass a cir.const with NTTP value + + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(findOp.getOperation()); + auto memchrOp0 = builder.createBitcast( + iterBegin.getLoc(), iterBegin.getResult(), builder.getVoidPtrTy()); + + // FIXME: get datalayout based "int" instead of fixed size 4. + auto loadPattern = builder.create( + findOp.getOperand(2).getLoc(), underlyingDataTy, findOp.getOperand(2)); + auto memchrOp1 = builder.createIntCast( + loadPattern, IntType::get(builder.getContext(), 32, true)); + + // FIXME: get datalayout based "size_t" instead of fixed size 64. + auto uInt64Ty = IntType::get(builder.getContext(), 64, false); + auto memchrOp2 = builder.create( + findOp.getLoc(), uInt64Ty, mlir::cir::IntAttr::get(uInt64Ty, staticSize)); + + // Build memchr op: + // void *memchr(const void *s, int c, size_t n); + auto memChr = builder.create(findOp.getLoc(), memchrOp0, memchrOp1, + memchrOp2); + mlir::Operation *result = + builder.createBitcast(findOp.getLoc(), memChr.getResult(), iterResTy) + .getDefiningOp(); + + findOp.replaceAllUsesWith(result); + findOp.erase(); +} + void LibOptPass::runOnOperation() { assert(astCtx && "Missing ASTContext, please construct with the right ctor"); opts.parseOptions(*this); auto *op = getOperation(); if (isa<::mlir::ModuleOp>(op)) theModule = cast<::mlir::ModuleOp>(op); + + SmallVector stdFindToTransform; + op->walk([&](StdFindOp findOp) { stdFindToTransform.push_back(findOp); }); + + for (auto c : stdFindToTransform) + xformStdFindIntoMemchr(c); } std::unique_ptr mlir::createLibOptPass() { diff --git a/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp b/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp new file mode 100644 index 000000000000..e6beada09786 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp @@ -0,0 +1,32 @@ +//===- StdHelpers.cpp - Implementation standard related helpers--*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "StdHelpers.h" + +namespace mlir { +namespace cir { + +bool isStdArrayType(mlir::Type t) { + auto sTy = t.dyn_cast(); + if (!sTy) + return false; + auto recordDecl = sTy.getAst(); + if (!recordDecl.isInStdNamespace()) + return false; + + // TODO: only std::array supported for now, generalize and + // use tablegen. CallDescription.cpp in the static analyzer + // could be a good inspiration source too. + if (recordDecl.getName().compare("array") != 0) + return false; + + return true; +} + +} // namespace cir +} // namespace mlir \ No newline at end of file diff --git a/clang/lib/CIR/Dialect/Transforms/StdHelpers.h b/clang/lib/CIR/Dialect/Transforms/StdHelpers.h new file mode 100644 index 000000000000..302272feb6bb --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/StdHelpers.h @@ -0,0 +1,36 @@ +//===- StdHelpers.h - Helpers for standard types/functions ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Region.h" +#include "clang/AST/ASTContext.h" +#include "clang/Basic/Module.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Path.h" + +#ifndef DIALECT_CIR_TRANSFORMS_STDHELPERS_H_ +#define DIALECT_CIR_TRANSFORMS_STDHELPERS_H_ + +namespace mlir { +namespace cir { + +bool isStdArrayType(mlir::Type t); + +} // namespace cir +} // namespace mlir + +#endif diff --git a/clang/test/CIR/Transforms/lib-opt-find.cpp b/clang/test/CIR/Transforms/lib-opt-find.cpp new file mode 100644 index 000000000000..a1a3f81d065d --- /dev/null +++ b/clang/test/CIR/Transforms/lib-opt-find.cpp @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -fclangir-idiom-recognizer -fclangir-lib-opt -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include "std-cxx.h" + +int test_find(unsigned char n = 3) +{ + unsigned num_found = 0; + // CHECK: %[[pattern_addr:.*]] = cir.alloca !u8i, cir.ptr , ["n" + std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + + auto f = std::find(v.begin(), v.end(), n); + // CHECK: %[[begin:.*]] = cir.call @_ZNSt5arrayIhLj9EE5beginEv + // CHECK: cir.call @_ZNSt5arrayIhLj9EE3endEv + // CHECK: %[[cast_to_void:.*]] = cir.cast(bitcast, %[[begin]] : !cir.ptr), !cir.ptr + // CHECK: %[[load_pattern:.*]] = cir.load %[[pattern_addr]] : cir.ptr , !u8i + // CHECK: %[[pattern:.*]] = cir.cast(integral, %[[load_pattern:.*]] : !u8i), !s32i + + // CHECK-NOT: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( + // CHECK: %[[array_size:.*]] = cir.const(#cir.int<9> : !u64i) : !u64i + + // CHECK: %[[result_cast:.*]] = cir.libc.memchr(%[[cast_to_void]], %[[pattern]], %[[array_size]]) + // CHECK: cir.cast(bitcast, %[[result_cast]] : !cir.ptr), !cir.ptr + if (f != v.end()) + num_found++; + + return num_found; +} \ No newline at end of file From 280f9d6dd6bec8890d2e1699541f2a6785c57539 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 22 Dec 2023 16:40:25 +0300 Subject: [PATCH 1308/2301] [mlir][llvm] Fixes CallOp builder for the case of indirect call --- clang/test/CIR/CodeGen/fun-ptr.c | 3 +-- clang/test/CIR/Lowering/func.cir | 1 - clang/test/CIR/Lowering/globals.cir | 3 +-- mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp | 10 +++++++++- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c index 9e0681881c77..5edf526ffbc4 100644 --- a/clang/test/CIR/CodeGen/fun-ptr.c +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -2,7 +2,6 @@ // RUN: %clang_cc1 -x c++ -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM // RUN: %clang_cc1 -x c++ -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM -// XFAIL: * typedef struct { int a; @@ -18,7 +17,7 @@ typedef struct A { fun_typ fun; } A; -// CIR: !ty_22A22 = !cir.struct (!cir.ptr>)>>} #cir.record.decl.ast> +// CIR: !ty_22A22 = !cir.struct>)>>} #cir.record.decl.ast> A a = {(fun_typ)0}; int extract_a(Data* d) { diff --git a/clang/test/CIR/Lowering/func.cir b/clang/test/CIR/Lowering/func.cir index 41cf5c3afdd8..6dcb7bdb42d0 100644 --- a/clang/test/CIR/Lowering/func.cir +++ b/clang/test/CIR/Lowering/func.cir @@ -1,6 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck %s -check-prefix=MLIR --input-file=%t.mlir -// XFAIL: * !s32i = !cir.int module { diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 052c2045752b..699bf3fd3b5a 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -2,7 +2,6 @@ // RUN: FileCheck --input-file=%t.cir %s -check-prefix=MLIR // RUN: cir-translate %s -cir-to-llvmir -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// XFAIL: * !void = !cir.void !s16i = !cir.int @@ -178,4 +177,4 @@ module { //MLIR: %[[RES8:.*]] = llvm.getelementptr %[[RES7]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> //MLIR: %[[RES9:.*]] = llvm.load %[[RES8]] : !llvm.ptr -> !llvm.ptr //MLIR: llvm.call %[[RES9]]({{.*}}) : !llvm.ptr, (i32) -> () -} \ No newline at end of file +} diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp index ef5f1b069b40..bf7221b1a02b 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -1025,8 +1025,16 @@ void CallOp::build(OpBuilder &builder, OperationState &state, TypeRange results, void CallOp::build(OpBuilder &builder, OperationState &state, TypeRange results, FlatSymbolRefAttr callee, ValueRange args) { - assert(callee && "expected non-null callee in direct call builder"); + auto fargs = callee ? args : args.drop_front(); build(builder, state, results, + // + // TODO(ClangIR): This was a local change that is no longer valid during + // rebase + // TypeAttr::get(getLLVMFuncType(builder.getContext(), results, fargs)), + // callee, args, /*fastmathFlags=*/nullptr, /*branch_weights=*/nullptr, + // /*CConv=*/nullptr, + // + // /*var_callee_type=*/nullptr, callee, args, /*fastmathFlags=*/nullptr, /*branch_weights=*/nullptr, /*CConv=*/nullptr, /*TailCallKind=*/nullptr, From 4dcb4dd49ae82411d1d2b06e8707beb9edcda6af Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Wed, 13 Dec 2023 17:06:22 +0300 Subject: [PATCH 1309/2301] [CIR][CodeGen] Fix flat offset lowering code to consider field alignments. Before this fix conversion of flat offset to GlobalView indices could crash or compute invalid result. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 ++++++-- clang/test/CIR/CodeGen/globals.c | 9 +++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 768087f6e0e8..c248d4cb560e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -754,14 +754,18 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { Offset %= EltSize; } else if (auto StructTy = Ty.dyn_cast()) { auto Elts = StructTy.getMembers(); + unsigned Pos = 0; for (size_t I = 0; I < Elts.size(); ++I) { auto EltSize = Layout.getTypeAllocSize(Elts[I]); - if (Offset < EltSize) { + unsigned AlignMask = Layout.getABITypeAlign(Elts[I]) - 1; + Pos = (Pos + AlignMask) & ~AlignMask; + if (Offset < Pos + EltSize) { Indices.push_back(I); SubType = Elts[I]; + Offset -= Pos; break; } - Offset -= EltSize; + Pos += EltSize; } } else { llvm_unreachable("unexpected type"); diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index bc1535488334..ca347f425df6 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -83,6 +83,15 @@ int foo() { // CHECK: cir.func {{.*@foo}} // CHECK: {{.*}} = cir.get_global @optind : cir.ptr +struct Glob { + double a[42]; + int pad1[3]; + double b[42]; +} glob; + +double *const glob_ptr = &glob.b[1]; +// CHECK: cir.global external @glob_ptr = #cir.global_view<@glob, [2 : i32, 1 : i32]> : !cir.ptr + // TODO: test tentatives with internal linkage. // Tentative definition is THE definition. Should be zero-initialized. From b2392fe529f2ccf18b70170008475e6d5197c1b3 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 4 Jan 2024 18:29:34 +0300 Subject: [PATCH 1310/2301] [CIR][Lowering][Bugfix] Lower ScopeOp with return op (#364) `ScopeOp` may end with `ReturnOp` instead of `YieldOp`, that is not expected now. This PR fix this. The reduced example is: ``` int foo() { { return 0; } } ``` This is quite frequent bug in `llvm-test-suite` --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 +-- clang/test/CIR/Lowering/scope.cir | 29 ++++++++++++++++++- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 0dbc64baa491..ace65685db06 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -818,9 +818,9 @@ class CIRScopeOpLowering // Replace the scopeop return with a branch that jumps out of the body. // Stack restore before leaving the body region. rewriter.setInsertionPointToEnd(afterBody); - auto yieldOp = cast(afterBody->getTerminator()); + auto yieldOp = dyn_cast(afterBody->getTerminator()); - if (!isBreakOrContinue(yieldOp)) { + if (yieldOp && !isBreakOrContinue(yieldOp)) { auto branchOp = rewriter.replaceOpWithNewOp( yieldOp, yieldOp.getArgs(), continueBlock); diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index 7ebd46a974f7..8afa84d0c247 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -48,4 +48,31 @@ module { // MLIR-NEXT: llvm.return // MLIR-NEXT: } -} + + cir.func @scope_with_return() -> !u32i { + %0 = cir.alloca !u32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.scope { + %2 = cir.const(#cir.int<0> : !u32i) : !u32i + cir.store %2, %0 : !u32i, cir.ptr + %3 = cir.load %0 : cir.ptr , !u32i + cir.return %3 : !u32i + } + %1 = cir.load %0 : cir.ptr , !u32i + cir.return %1 : !u32i + } + + // MLIR: llvm.func @scope_with_return() + // MLIR-NEXT: [[v0:%.*]] = llvm.mlir.constant(1 : index) : i64 + // MLIR-NEXT: [[v1:%.*]] = llvm.alloca [[v0]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr + // MLIR-NEXT: llvm.br ^bb1 + // MLIR-NEXT: ^bb1: // pred: ^bb0 + // MLIR-NEXT: [[v2:%.*]] = llvm.mlir.constant(0 : i32) : i32 + // MLIR-NEXT: llvm.store [[v2]], [[v1]] : i32, !llvm.ptr + // MLIR-NEXT: [[v3:%.*]] = llvm.load [[v1]] : !llvm.ptr -> i32 + // MLIR-NEXT: llvm.return [[v3]] : i32 + // MLIR-NEXT: ^bb2: // no predecessors + // MLIR-NEXT: [[v4:%.*]] = llvm.load [[v1]] : !llvm.ptr -> i32 + // MLIR-NEXT: llvm.return [[v4]] : i32 + // MLIR-NEXT: } + + } From dd9e239fc797a705d33284ac56829329bd96ee57 Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Thu, 4 Jan 2024 18:31:07 +0300 Subject: [PATCH 1311/2301] [CIR][CodeGen] Support lowering of cir.const with ZeroAttr. (#365) --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 28 +++++++++++-------- clang/test/CIR/Lowering/array-init.c | 10 +++++++ clang/test/CIR/Lowering/const.cir | 2 ++ 3 files changed, 29 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/Lowering/array-init.c diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ace65685db06..30a1fb0e9d9a 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1047,19 +1047,25 @@ class CIRConstantLowering // then memcopyied into the stack (as done in Clang). else if (auto arrTy = op.getType().dyn_cast()) { // Fetch operation constant array initializer. - auto constArr = op.getValue().dyn_cast(); - if (!constArr) - return op.emitError() << "array does not have a constant initializer"; + if (auto constArr = op.getValue().dyn_cast()) { + // Lower constant array initializer. + auto denseAttr = lowerConstArrayAttr(constArr, typeConverter); + if (!denseAttr.has_value()) { + op.emitError() + << "unsupported lowering for #cir.const_array with element type " + << arrTy.getEltType(); + return mlir::failure(); + } - // Lower constant array initializer. - auto denseAttr = lowerConstArrayAttr(constArr, typeConverter); - if (!denseAttr.has_value()) { - op.emitError() - << "unsupported lowering for #cir.const_array with element type " - << arrTy.getEltType(); - return mlir::failure(); + attr = denseAttr.value(); + } else if (auto zero = op.getValue().dyn_cast()) { + auto initVal = lowerCirAttrAsValue(op, zero, rewriter, typeConverter); + rewriter.replaceAllUsesWith(op, initVal); + rewriter.eraseOp(op); + return mlir::success(); + } else { + return op.emitError() << "array does not have a constant initializer"; } - attr = denseAttr.value(); } else if (const auto structAttr = op.getValue().dyn_cast()) { // TODO(cir): this diverges from traditional lowering. Normally the diff --git a/clang/test/CIR/Lowering/array-init.c b/clang/test/CIR/Lowering/array-init.c new file mode 100644 index 000000000000..8e452bf06878 --- /dev/null +++ b/clang/test/CIR/Lowering/array-init.c @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +// LLVM: define void @zeroInit +// LLVM: [[RES:%.*]] = alloca [3 x i32], i64 1 +// LLVM: store [3 x i32] zeroinitializer, ptr [[RES]] +void zeroInit() { + int a[3] = {0, 0, 0}; +} + diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 95119c04c30c..62d0b1aa2e64 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -11,6 +11,8 @@ module { // CHECK: llvm.mlir.constant(dense<[1, 2]> : tensor<2xi32>) : !llvm.array<2 x i32> %3 = cir.const(#cir.const_array<[1.000000e+00 : f32, 2.000000e+00 : f32]> : !cir.array) : !cir.array // CHECK: llvm.mlir.constant(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) : !llvm.array<2 x f32> + %4 = cir.const(#cir.zero : !cir.array) : !cir.array + // CHECK: cir.llvmir.zeroinit : !llvm.array<3 x i32> cir.return } } From a5b4208badc1f6dffa04e07b033d328bdd299e5c Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 26 Dec 2023 11:16:03 +0300 Subject: [PATCH 1312/2301] [CIR][Lowering][Bugfix] explicit lowering for the indirect call --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 30a1fb0e9d9a..2d63b559804d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -873,12 +873,28 @@ class CIRCallLowering : public mlir::OpConversionPattern { mlir::ConversionPatternRewriter &rewriter) const override { llvm::SmallVector llvmResults; auto cirResults = op.getResultTypes(); + auto* converter = getTypeConverter(); - if (getTypeConverter()->convertTypes(cirResults, llvmResults).failed()) + if (converter->convertTypes(cirResults, llvmResults).failed()) return mlir::failure(); - rewriter.replaceOpWithNewOp( + if (auto callee = op.getCalleeAttr()) { // direct call + rewriter.replaceOpWithNewOp( op, llvmResults, op.getCalleeAttr(), adaptor.getOperands()); + } else { // indirect call + assert(op.getOperands().size() + && "operands list must no be empty for the indirect call"); + auto typ = op.getOperands().front().getType(); + assert(isa(typ) && "expected pointer type"); + auto ptyp = dyn_cast(typ); + auto ftyp = dyn_cast(ptyp.getPointee()); + assert(ftyp && "expected a pointer to a function as the first operand"); + + rewriter.replaceOpWithNewOp( + op, + dyn_cast(converter->convertType(ftyp)), + adaptor.getOperands()); + } return mlir::success(); } }; From 456122f45edb86a376e42c22a5b43ec7205c2a90 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 26 Dec 2023 11:19:37 +0300 Subject: [PATCH 1313/2301] Revert "[mlir][llvm] Fixes CallOp builder for the case of indirect call" This reverts commit bbaa147083ac6cf1b406c5f63b199fb7b971d6dc. --- mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp index bf7221b1a02b..ef5f1b069b40 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -1025,16 +1025,8 @@ void CallOp::build(OpBuilder &builder, OperationState &state, TypeRange results, void CallOp::build(OpBuilder &builder, OperationState &state, TypeRange results, FlatSymbolRefAttr callee, ValueRange args) { - auto fargs = callee ? args : args.drop_front(); + assert(callee && "expected non-null callee in direct call builder"); build(builder, state, results, - // - // TODO(ClangIR): This was a local change that is no longer valid during - // rebase - // TypeAttr::get(getLLVMFuncType(builder.getContext(), results, fargs)), - // callee, args, /*fastmathFlags=*/nullptr, /*branch_weights=*/nullptr, - // /*CConv=*/nullptr, - // - // /*var_callee_type=*/nullptr, callee, args, /*fastmathFlags=*/nullptr, /*branch_weights=*/nullptr, /*CConv=*/nullptr, /*TailCallKind=*/nullptr, From 2411bda2ea4e69624c5be31ba25dc4dbf12609a2 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 4 Jan 2024 18:39:23 +0300 Subject: [PATCH 1314/2301] [CIR][CIRGen][NFC] Enhance alloca helpers (#367) One more step towards variable length array support. This PR adds one more helper for the `alloca` instruction and re-use the existing ones. The reason is the following: right now there are two possible ways to insert alloca: either to a function entry block or to the given block after all the existing alloca instructions. But for VLA support we need to insert alloca anywhere, right after an array's size becomes known. Thus, we add one more parameter with the default value - insertion point. Also, we don't want copy-paste the code, and reuse the existing helpers, but it may be a little bit confusing to read. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 47 ++++++++++++++++---------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 19 ++++++++--- 2 files changed, 44 insertions(+), 22 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 368ecac5f0ed..0e3d05766ad4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2286,17 +2286,19 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(const Expr *cond, mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, mlir::Location loc, CharUnits alignment, - bool insertIntoFnEntryBlock) { + bool insertIntoFnEntryBlock, + mlir::Value arraySize) { mlir::Block *entryBlock = insertIntoFnEntryBlock ? getCurFunctionEntryBlock() : currLexScope->getEntryBlock(); return buildAlloca(name, ty, loc, alignment, - builder.getBestAllocaInsertPoint(entryBlock)); + builder.getBestAllocaInsertPoint(entryBlock), arraySize); } mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, mlir::Location loc, CharUnits alignment, - mlir::OpBuilder::InsertPoint ip) { + mlir::OpBuilder::InsertPoint ip, + mlir::Value arraySize) { auto localVarPtrTy = mlir::cir::PointerType::get(builder.getContext(), ty); auto alignIntAttr = CGM.getSize(alignment); @@ -2306,7 +2308,7 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, builder.restoreInsertionPoint(ip); addr = builder.create(loc, /*addr type*/ localVarPtrTy, /*var type*/ ty, name, - alignIntAttr); + alignIntAttr, arraySize); if (currVarDecl) { auto alloca = cast(addr.getDefiningOp()); alloca.setAstAttr(ASTVarDeclAttr::get(builder.getContext(), currVarDecl)); @@ -2317,9 +2319,10 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, mlir::Location loc, CharUnits alignment, - bool insertIntoFnEntryBlock) { + bool insertIntoFnEntryBlock, + mlir::Value arraySize) { return buildAlloca(name, getCIRType(ty), loc, alignment, - insertIntoFnEntryBlock); + insertIntoFnEntryBlock, arraySize); } mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, @@ -2465,12 +2468,11 @@ Address CIRGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, /// This creates a alloca and inserts it into the entry block of the /// current region. -Address CIRGenFunction::CreateTempAllocaWithoutCast(mlir::Type Ty, - CharUnits Align, - mlir::Location Loc, - const Twine &Name, - mlir::Value ArraySize) { - auto Alloca = CreateTempAlloca(Ty, Loc, Name, ArraySize); +Address CIRGenFunction::CreateTempAllocaWithoutCast( + mlir::Type Ty, CharUnits Align, mlir::Location Loc, const Twine &Name, + mlir::Value ArraySize, mlir::OpBuilder::InsertPoint ip) { + auto Alloca = ip.isSet() ? CreateTempAlloca(Ty, Loc, Name, ip, ArraySize) + : CreateTempAlloca(Ty, Loc, Name, ArraySize); Alloca.setAlignmentAttr(CGM.getSize(Align)); return Address(Alloca, Ty, Align); } @@ -2480,8 +2482,10 @@ Address CIRGenFunction::CreateTempAllocaWithoutCast(mlir::Type Ty, Address CIRGenFunction::CreateTempAlloca(mlir::Type Ty, CharUnits Align, mlir::Location Loc, const Twine &Name, mlir::Value ArraySize, - Address *AllocaAddr) { - auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Loc, Name, ArraySize); + Address *AllocaAddr, + mlir::OpBuilder::InsertPoint ip) { + auto Alloca = + CreateTempAllocaWithoutCast(Ty, Align, Loc, Name, ArraySize, ip); if (AllocaAddr) *AllocaAddr = Alloca; mlir::Value V = Alloca.getPointer(); @@ -2500,10 +2504,19 @@ mlir::cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, const Twine &Name, mlir::Value ArraySize, bool insertIntoFnEntryBlock) { - if (ArraySize) - assert(0 && "NYI"); + return cast(buildAlloca(Name.str(), Ty, Loc, CharUnits(), + insertIntoFnEntryBlock, + ArraySize) + .getDefiningOp()); +} + +/// This creates an alloca and inserts it into the provided insertion point +mlir::cir::AllocaOp CIRGenFunction::CreateTempAlloca( + mlir::Type Ty, mlir::Location Loc, const Twine &Name, + mlir::OpBuilder::InsertPoint ip, mlir::Value ArraySize) { + assert(ip.isSet() && "Insertion point is not set"); return cast( - buildAlloca(Name.str(), Ty, Loc, CharUnits(), insertIntoFnEntryBlock) + buildAlloca(Name.str(), Ty, Loc, CharUnits(), ip, ArraySize) .getDefiningOp()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 834755cf9609..7b8ce358f301 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -239,13 +239,16 @@ class CIRGenFunction : public CIRGenTypeCache { // FIXME(cir): move this to CIRGenBuider.h mlir::Value buildAlloca(llvm::StringRef name, clang::QualType ty, mlir::Location loc, clang::CharUnits alignment, - bool insertIntoFnEntryBlock = false); + bool insertIntoFnEntryBlock = false, + mlir::Value arraySize = nullptr); mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, - bool insertIntoFnEntryBlock = false); + bool insertIntoFnEntryBlock = false, + mlir::Value arraySize = nullptr); mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, - mlir::OpBuilder::InsertPoint ip); + mlir::OpBuilder::InsertPoint ip, + mlir::Value arraySize = nullptr); private: void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, @@ -1877,14 +1880,20 @@ class CIRGenFunction : public CIRGenTypeCache { CreateTempAllocaInFnEntryBlock(mlir::Type Ty, mlir::Location Loc, const Twine &Name = "tmp", mlir::Value ArraySize = nullptr); + mlir::cir::AllocaOp CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, + const Twine &Name = "tmp", + mlir::OpBuilder::InsertPoint ip = {}, + mlir::Value ArraySize = nullptr); Address CreateTempAlloca(mlir::Type Ty, CharUnits align, mlir::Location Loc, const Twine &Name = "tmp", mlir::Value ArraySize = nullptr, - Address *Alloca = nullptr); + Address *Alloca = nullptr, + mlir::OpBuilder::InsertPoint ip = {}); Address CreateTempAllocaWithoutCast(mlir::Type Ty, CharUnits align, mlir::Location Loc, const Twine &Name = "tmp", - mlir::Value ArraySize = nullptr); + mlir::Value ArraySize = nullptr, + mlir::OpBuilder::InsertPoint ip = {}); /// Create a temporary memory object of the given type, with /// appropriate alignmen and cast it to the default address space. Returns From d258980bf2de0997e0f6142af39742c8c96912c6 Mon Sep 17 00:00:00 2001 From: Keyi Zhang Date: Thu, 4 Jan 2024 07:43:41 -0800 Subject: [PATCH 1315/2301] [CIR][Lowering] add cir.ternary to scf.if lowering (#368) This PR adds `cir.ternary` lowering. There are two approaches to lower `cir.ternary` imo: 1. Use `scf.if` op. 2. Use `cf.cond_br` op. I choose `scf.if` because `scf.if` + canonicalization produces `arith.select` whereas `cf.cond_br` requires scf lifting. In many ways `scf.if` is more high-level and closer to `cir.ternary`. A separate `cir.yield` lowering is required since we cannot directly replace `cir.yield` in the ternary op lowering -- the yield operands may still be illegal and doing so produces `builtin.unrealized_cast` ops. I couldn't figured out a way to solve this issue without adding a separate lowering pattern. Please let me know if you know a way to solve this issue. --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 57 ++++++++++++++++++- .../test/CIR/Lowering/ThroughMLIR/tenary.cir | 44 ++++++++++++++ 2 files changed, 98 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/tenary.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 8471230c6eab..0853eeb87782 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -39,6 +39,7 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/Sequence.h" +#include "llvm/ADT/TypeSwitch.h" using namespace cir; using namespace llvm; @@ -65,7 +66,8 @@ struct ConvertCIRToMLIRPass void getDependentDialects(mlir::DialectRegistry ®istry) const override { registry.insert(); + mlir::arith::ArithDialect, mlir::cf::ControlFlowDialect, + mlir::scf::SCFDialect>(); } void runOnOperation() final; @@ -547,6 +549,55 @@ struct CIRBrCondOpLowering } }; +class CIRTernaryOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::TernaryOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.setInsertionPoint(op); + auto condition = adaptor.getCond(); + auto i1Condition = rewriter.create( + op.getLoc(), rewriter.getI1Type(), condition); + SmallVector resultTypes; + if (mlir::failed(getTypeConverter()->convertTypes(op->getResultTypes(), + resultTypes))) + return mlir::failure(); + + auto ifOp = rewriter.create(op.getLoc(), resultTypes, + i1Condition.getResult(), true); + auto *thenBlock = &ifOp.getThenRegion().front(); + auto *elseBlock = &ifOp.getElseRegion().front(); + rewriter.inlineBlockBefore(&op.getTrueRegion().front(), thenBlock, + thenBlock->end()); + rewriter.inlineBlockBefore(&op.getFalseRegion().front(), elseBlock, + elseBlock->end()); + + rewriter.replaceOp(op, ifOp); + return mlir::success(); + } +}; + +class CIRYieldOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + mlir::LogicalResult + matchAndRewrite(mlir::cir::YieldOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto *parentOp = op->getParentOp(); + return llvm::TypeSwitch(parentOp) + .Case([&](auto) { + rewriter.replaceOpWithNewOp( + op, adaptor.getOperands()); + return mlir::success(); + }) + .Default([](auto) { return mlir::failure(); }); + } +}; + void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -554,8 +605,8 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add(converter, - patterns.getContext()); + CIRScopeOpLowering, CIRBrCondOpLowering, CIRTernaryOpLowering, + CIRYieldOpLowering>(converter, patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir new file mode 100644 index 000000000000..df6e6a09a5ff --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir @@ -0,0 +1,44 @@ +// RUN: cir-opt %s -cir-to-mlir | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir --canonicalize | FileCheck %s --check-prefix=MLIR-CANONICALIZE +// RUN: cir-opt %s -cir-to-mlir --canonicalize -cir-mlir-to-llvm | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int + +module { +cir.func @_Z1xi(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool + %5 = cir.ternary(%4, true { + %7 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.yield %7 : !s32i + }, false { + %7 = cir.const(#cir.int<5> : !s32i) : !s32i + cir.yield %7 : !s32i + }) : (!cir.bool) -> !s32i + cir.store %5, %1 : !s32i, cir.ptr + %6 = cir.load %1 : cir.ptr , !s32i + cir.return %6 : !s32i + } +} + +// MLIR: %1 = arith.cmpi ugt, %0, %c0_i32 : i32 +// MLIR-NEXT: %2 = arith.extui %1 : i1 to i8 +// MLIR-NEXT: %3 = arith.trunci %2 : i8 to i1 +// MLIR-NEXT: %4 = scf.if %3 -> (i32) { +// MLIR-NEXT: %c3_i32 = arith.constant 3 : i32 +// MLIR-NEXT: scf.yield %c3_i32 : i32 +// MLIR-NEXT: } else { +// MLIR-NEXT: %c5_i32 = arith.constant 5 : i32 +// MLIR-NEXT: scf.yield %c5_i32 : i32 +// MLIR-NEXT: } +// MLIR-NEXT: memref.store %4, %alloca_0[] : memref + +// MLIR-CANONICALIZE: %[[CMP:.*]] = arith.cmpi ugt +// MLIR-CANONICALIZE: arith.select %[[CMP]] + +// LLVM: %[[CMP:.*]] = icmp ugt +// LLVM: select i1 %[[CMP]] From 8fd754b69792e9d91987c7c10b29f566fc7e32b4 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 4 Jan 2024 18:46:18 +0300 Subject: [PATCH 1316/2301] [CIR][CIRGen] supports struct copy from function call result (#369) This PR fixes the next case ``` typedef struct { } A; A create() { A a; return a; } void foo() { A a; a = create(); } ``` i.e. when a struct is assigned to a function call result --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 27 ++++++++++++++++++------- clang/test/CIR/CodeGen/agg-copy.c | 13 ++++++++++++ 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 8abb7f045ccd..74f1400ec9f7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -138,6 +138,9 @@ class AggExprEmitter : public StmtVisitor { enum ExprValueKind { EVK_RValue, EVK_NonRValue }; + /// Perform the final copy to DestPtr, if desired. + void buildFinalDestCopy(QualType type, RValue src); + /// Perform the final copy to DestPtr, if desired. SrcIsRValue is true if /// source comes from an RValue. void buildFinalDestCopy(QualType type, const LValue &src, @@ -331,6 +334,13 @@ void AggExprEmitter::buildAggLoadOfLValue(const Expr *E) { buildFinalDestCopy(E->getType(), LV); } +/// Perform the final copy to DestPtr, if desired. +void AggExprEmitter::buildFinalDestCopy(QualType type, RValue src) { + assert(src.isAggregate() && "value must be aggregate value!"); + LValue srcLV = CGF.makeAddrLValue(src.getAggregateAddress(), type); + buildFinalDestCopy(type, srcLV, EVK_RValue); +} + /// Perform the final copy to DestPtr, if desired. void AggExprEmitter::buildFinalDestCopy(QualType type, const LValue &src, ExprValueKind SrcValueKind) { @@ -342,11 +352,13 @@ void AggExprEmitter::buildFinalDestCopy(QualType type, const LValue &src, return; // Copy non-trivial C structs here. - if (Dest.isVolatile() || UnimplementedFeature::volatileTypes()) - llvm_unreachable("volatile is NYI"); + if (Dest.isVolatile()) + assert(!UnimplementedFeature::volatileTypes()); if (SrcValueKind == EVK_RValue) { - llvm_unreachable("rvalue is NYI"); + if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { + llvm_unreachable("move assignment/move ctor for rvalue is NYI"); + } } else { if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) llvm_unreachable("non-trivial primitive copy is NYI"); @@ -811,7 +823,9 @@ void AggExprEmitter::withReturnValueSlot( if (!UseTemp) { RetAddr = Dest.getAddress(); } else { - llvm_unreachable("NYI"); + RetAddr = CGF.CreateMemTemp(RetTy, CGF.getLoc(E->getSourceRange()), + "tmp", &RetAddr); + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); } RValue Src = @@ -822,14 +836,13 @@ void AggExprEmitter::withReturnValueSlot( return; assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer()); - llvm_unreachable("NYI"); - // TODO(cir): EmitFinalDestCopy(E->getType(), Src); + buildFinalDestCopy(E->getType(), Src); if (!RequiresDestruction) { // If there's no dtor to run, the copy was the last use of our temporary. // Since we're not guaranteed to be in an ExprWithCleanups, clean up // eagerly. - llvm_unreachable("NYI"); + assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); } } diff --git a/clang/test/CIR/CodeGen/agg-copy.c b/clang/test/CIR/CodeGen/agg-copy.c index 9f259583e1c2..43f106c55c57 100644 --- a/clang/test/CIR/CodeGen/agg-copy.c +++ b/clang/test/CIR/CodeGen/agg-copy.c @@ -59,4 +59,17 @@ A foo3(void) { // CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr void foo4(A* a1) { A a2 = *a1; +} + +A create() { A a; return a; } + +// CHECK: cir.func {{.*@foo5}} +// CHECK: [[TMP0]] = cir.alloca !ty_22A22, cir.ptr , +// CHECK: [[TMP1]] = cir.alloca !ty_22A22, cir.ptr , ["tmp"] {alignment = 4 : i64} +// CHECK: [[TMP2]] = cir.call @create() : () -> !ty_22A22 +// CHECK: cir.store [[TMP2]], [[TMP1]] : !ty_22A22, cir.ptr +// CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr +void foo5() { + A a; + a = create(); } \ No newline at end of file From 4b284d84952a6e4f83866725e2c931a4368bde60 Mon Sep 17 00:00:00 2001 From: Nikolas Klauser Date: Thu, 4 Jan 2024 17:10:13 +0100 Subject: [PATCH 1317/2301] [CIR][CodeGen] Fix lowering for class types (#378) --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 16 ++-- clang/test/CIR/Lowering/class.cir | 96 +++++++++++++++++++ 2 files changed, 103 insertions(+), 9 deletions(-) create mode 100644 clang/test/CIR/Lowering/class.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 2d63b559804d..bc1edff65af6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -337,7 +337,7 @@ static void lowerNestedYield(mlir::cir::YieldOpKind targetKind, [&](mlir::Operation *op) { if (!isNested(op)) return mlir::WalkResult::advance(); - + // don't process breaks/continues in nested loops and switches if (isa(*op)) return mlir::WalkResult::skip(); @@ -345,7 +345,7 @@ static void lowerNestedYield(mlir::cir::YieldOpKind targetKind, auto yield = dyn_cast(*op); if (yield && yield.getKind() == targetKind) { rewriter.setInsertionPoint(op); - rewriter.replaceOpWithNewOp(op, yield.getArgs(), dst); + rewriter.replaceOpWithNewOp(op, yield.getArgs(), dst); } return mlir::WalkResult::advance(); @@ -1386,11 +1386,11 @@ class CIRSwitchOpLowering } for (auto& blk : region.getBlocks()) { - if (blk.getNumSuccessors()) + if (blk.getNumSuccessors()) continue; // Handle switch-case yields. - auto *terminator = blk.getTerminator(); + auto *terminator = blk.getTerminator(); if (auto yieldOp = dyn_cast(terminator)) { // TODO(cir): Ensure every yield instead of dealing with optional // values. @@ -1414,7 +1414,7 @@ class CIRSwitchOpLowering } } - lowerNestedYield(mlir::cir::YieldOpKind::Break, + lowerNestedYield(mlir::cir::YieldOpKind::Break, rewriter, region, exitBlock); // Extract region contents before erasing the switch op. @@ -1930,7 +1930,8 @@ class CIRGetMemberOpLowering assert(structTy && "expected struct type"); switch (structTy.getKind()) { - case mlir::cir::StructType::Struct: { + case mlir::cir::StructType::Struct: + case mlir::cir::StructType::Class: { // Since the base address is a pointer to an aggregate, the first offset // is always zero. The second offset tell us which member it will access. llvm::SmallVector offset{0, op.getIndex()}; @@ -1945,9 +1946,6 @@ class CIRGetMemberOpLowering rewriter.replaceOpWithNewOp(op, llResTy, adaptor.getAddr()); return mlir::success(); - default: - return op.emitError() - << "struct kind '" << structTy.getKind() << "' is NYI"; } } }; diff --git a/clang/test/CIR/Lowering/class.cir b/clang/test/CIR/Lowering/class.cir new file mode 100644 index 000000000000..afaacbec1bac --- /dev/null +++ b/clang/test/CIR/Lowering/class.cir @@ -0,0 +1,96 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +!s32i = !cir.int +!u8i = !cir.int +!u32i = !cir.int +!ty_22S22 = !cir.struct +!ty_22S2A22 = !cir.struct +!ty_22S122 = !cir.struct} #cir.record.decl.ast> +!ty_22S222 = !cir.struct +!ty_22S322 = !cir.struct + +module { + cir.func @test() { + %1 = cir.alloca !ty_22S22, cir.ptr , ["x"] {alignment = 4 : i64} + // CHECK: %[[#ARRSIZE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#CLASS:]] = llvm.alloca %[[#ARRSIZE]] x !llvm.struct<"class.S", (i8, i32)> + %3 = cir.get_member %1[0] {name = "c"} : !cir.ptr -> !cir.ptr + // CHECK: = llvm.getelementptr %[[#CLASS]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"class.S", (i8, i32)> + %5 = cir.get_member %1[1] {name = "i"} : !cir.ptr -> !cir.ptr + // CHECK: = llvm.getelementptr %[[#CLASS]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"class.S", (i8, i32)> + cir.return + } + + cir.func @shouldConstInitLocalClassesWithConstStructAttr() { + %0 = cir.alloca !ty_22S2A22, cir.ptr , ["s"] {alignment = 4 : i64} + %1 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22) : !ty_22S2A22 + cir.store %1, %0 : !ty_22S2A22, cir.ptr + cir.return + } + // CHECK: llvm.func @shouldConstInitLocalClassesWithConstStructAttr() + // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 + // CHECK: %1 = llvm.alloca %0 x !llvm.struct<"class.S2A", (i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + // CHECK: %2 = llvm.mlir.undef : !llvm.struct<"class.S2A", (i32)> + // CHECK: %3 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.struct<"class.S2A", (i32)> + // CHECK: llvm.store %4, %1 : !llvm.struct<"class.S2A", (i32)>, !llvm.ptr + // CHECK: llvm.return + // CHECK: } + + // Should lower basic #cir.const_struct initializer. + cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.ptr : !cir.ptr}> : !ty_22S122 + // CHECK: llvm.mlir.global external @s1() {addr_space = 0 : i32} : !llvm.struct<"class.S1", (i32, f32, ptr)> { + // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"class.S1", (i32, f32, ptr)> + // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %2 = llvm.insertvalue %1, %0[0] : !llvm.struct<"class.S1", (i32, f32, ptr)> + // CHECK: %3 = llvm.mlir.constant(1.000000e-01 : f32) : f32 + // CHECK: %4 = llvm.insertvalue %3, %2[1] : !llvm.struct<"class.S1", (i32, f32, ptr)> + // CHECK: %5 = llvm.mlir.zero : !llvm.ptr + // CHECK: %6 = llvm.insertvalue %5, %4[2] : !llvm.struct<"class.S1", (i32, f32, ptr)> + // CHECK: llvm.return %6 : !llvm.struct<"class.S1", (i32, f32, ptr)> + // CHECK: } + + // Should lower nested #cir.const_struct initializer. + cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22}> : !ty_22S222 + // CHECK: llvm.mlir.global external @s2() {addr_space = 0 : i32} : !llvm.struct<"class.S2", (struct<"class.S2A", (i32)>)> { + // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"class.S2", (struct<"class.S2A", (i32)>)> + // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"class.S2A", (i32)> + // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"class.S2A", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.struct<"class.S2", (struct<"class.S2A", (i32)>)> + // CHECK: llvm.return %4 : !llvm.struct<"class.S2", (struct<"class.S2A", (i32)>)> + // CHECK: } + + cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22S322]> : !cir.array + // CHECK: llvm.mlir.global external @s3() {addr_space = 0 : i32} : !llvm.array<3 x struct<"class.S3", (i32)>> { + // CHECK: %0 = llvm.mlir.undef : !llvm.array<3 x struct<"class.S3", (i32)>> + // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"class.S3", (i32)> + // CHECK: %2 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %3 = llvm.insertvalue %2, %1[0] : !llvm.struct<"class.S3", (i32)> + // CHECK: %4 = llvm.insertvalue %3, %0[0] : !llvm.array<3 x struct<"class.S3", (i32)>> + // CHECK: %5 = llvm.mlir.undef : !llvm.struct<"class.S3", (i32)> + // CHECK: %6 = llvm.mlir.constant(2 : i32) : i32 + // CHECK: %7 = llvm.insertvalue %6, %5[0] : !llvm.struct<"class.S3", (i32)> + // CHECK: %8 = llvm.insertvalue %7, %4[1] : !llvm.array<3 x struct<"class.S3", (i32)>> + // CHECK: %9 = llvm.mlir.undef : !llvm.struct<"class.S3", (i32)> + // CHECK: %10 = llvm.mlir.constant(3 : i32) : i32 + // CHECK: %11 = llvm.insertvalue %10, %9[0] : !llvm.struct<"class.S3", (i32)> + // CHECK: %12 = llvm.insertvalue %11, %8[2] : !llvm.array<3 x struct<"class.S3", (i32)>> + // CHECK: llvm.return %12 : !llvm.array<3 x struct<"class.S3", (i32)>> + // CHECK: } + + cir.func @shouldLowerClassCopies() { + // CHECK: llvm.func @shouldLowerClassCopies() + %1 = cir.alloca !ty_22S22, cir.ptr , ["a"] {alignment = 4 : i64} + // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#SA:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"class.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + %2 = cir.alloca !ty_22S22, cir.ptr , ["b", init] {alignment = 4 : i64} + // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 + // CHECK: %[[#SB:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"class.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr + cir.copy %1 to %2 : !cir.ptr + // CHECK: %[[#SIZE:]] = llvm.mlir.constant(8 : i32) : i32 + // CHECK: "llvm.intr.memcpy"(%[[#SB]], %[[#SA]], %[[#SIZE]]) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () + cir.return + } +} From 562bef6260ac71916507ca49ecec72e258cc28a2 Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Thu, 4 Jan 2024 23:37:10 +0300 Subject: [PATCH 1318/2301] [CIR][Lowering] Support lowering of cir.const with GlobalViewAttr (gh-352) (#363) The error manifested in code like ``` int a[16]; int *const p = a; void foo() { p[0]; } ``` It's one the most frequent errors in current llvm-test-suite. I've added the test to globals.cir which is currently XFAILed, I think @gitoleg will fix it soon. Co-authored-by: Bruno Cardoso Lopes --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 ++++++ clang/test/CIR/Lowering/globals.cir | 11 +++++++++++ 2 files changed, 17 insertions(+) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index bc1edff65af6..786cca9425a6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1056,6 +1056,12 @@ class CIRConstantLowering return mlir::success(); } } + // Lower GlobalViewAttr to llvm.mlir.addressof + if (auto gv = op.getValue().dyn_cast()) { + auto newOp = lowerCirAttrAsValue(op, gv, rewriter, getTypeConverter()); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } attr = op.getValue(); } // TODO(cir): constant arrays are currently just pushed into the stack using diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 699bf3fd3b5a..99bfa76dd3a8 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -177,4 +177,15 @@ module { //MLIR: %[[RES8:.*]] = llvm.getelementptr %[[RES7]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> //MLIR: %[[RES9:.*]] = llvm.load %[[RES8]] : !llvm.ptr -> !llvm.ptr //MLIR: llvm.call %[[RES9]]({{.*}}) : !llvm.ptr, (i32) -> () + + cir.global external @zero_array = #cir.zero : !cir.array + cir.func @use_zero_array() { + %0 = cir.const(#cir.global_view<@zero_array> : !cir.ptr) : !cir.ptr + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %2 = cir.ptr_stride(%0 : !cir.ptr, %1 : !s32i), !cir.ptr + %3 = cir.load %2 : cir.ptr , !s32i + cir.return + } + // MLIR: %0 = llvm.mlir.addressof @zero_array + } From 0613af7ef8172f6a544df537724bcbad1904b455 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 5 Jan 2024 04:38:22 +0800 Subject: [PATCH 1319/2301] [CIR][CIRGen] emit cir.zero for constant string literals (#373) This PR addresses #248 . Currently string literals are always lowered to a `cir.const_array` attribute even if the string literal only contains null bytes. This patch make the CodeGen emits `cir.zero` for these string literals. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 12 ++++++++++-- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 7 ++++++- clang/test/CIR/CodeGen/globals.c | 2 +- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index c248d4cb560e..2755c62f4463 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -149,9 +149,17 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::ConstPtrAttr::get(getContext(), t, v); } - mlir::cir::ConstArrayAttr getString(llvm::StringRef str, mlir::Type eltTy, - unsigned size = 0) { + mlir::Attribute getString(llvm::StringRef str, mlir::Type eltTy, + unsigned size = 0) { unsigned finalSize = size ? size : str.size(); + + // If the string is full of null bytes, emit a #cir.zero rather than + // a #cir.const_array. + if (str.count('\0') == str.size()) { + auto arrayTy = mlir::cir::ArrayType::get(getContext(), eltTy, finalSize); + return getZeroAttr(arrayTy); + } + auto arrayTy = mlir::cir::ArrayType::get(getContext(), eltTy, finalSize); return getConstArray(mlir::StringAttr::get(str, arrayTy), arrayTy); } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 3962c1f9f026..0c2989fe00e6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1395,7 +1395,12 @@ mlir::cir::GlobalOp CIRGenItaniumRTTIBuilder::GetAddrOfTypeName( auto Align = CGM.getASTContext().getTypeAlignInChars(CGM.getASTContext().CharTy); - auto GV = CGM.createOrReplaceCXXRuntimeVariable(loc, Name, Init.getType(), + // builder.getString can return a #cir.zero if the string given to it only + // contains null bytes. However, type names cannot be full of null bytes. + // So cast Init to a ConstArrayAttr should be safe. + auto InitStr = cast(Init); + + auto GV = CGM.createOrReplaceCXXRuntimeVariable(loc, Name, InitStr.getType(), Linkage, Align); CIRGenModule::setInitializer(GV, Init); return GV; diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index ca347f425df6..522687aac53f 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -41,7 +41,7 @@ struct { char y[3]; char z[3]; } nestedString = {"1", "", "\0"}; -// CHECK: cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array}> +// CHECK: cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.zero : !cir.array, #cir.zero : !cir.array}> struct { char *name; From 77f897d163a05b6d0bc11c7a7e9973cd384ee74c Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 5 Jan 2024 04:41:34 +0800 Subject: [PATCH 1320/2301] [CIR][CIRGen] Lvalues and comma expression (#376) Currently, codegen of lvalue comma expression would crash: ```cpp int &foo1(); int &foo2(); void c1() { int &x = (foo1(), foo2()); // CRASH } ``` This simple patch fixes this issue. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 3 ++- clang/test/CIR/CodeGen/comma.cpp | 13 +++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 0e3d05766ad4..88141cff9293 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -866,7 +866,8 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { // Comma expressions just emit their LHS then their RHS as an l-value. if (E->getOpcode() == BO_Comma) { - assert(0 && "not implemented"); + buildIgnoredExpr(E->getLHS()); + return buildLValue(E->getRHS()); } if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) diff --git a/clang/test/CIR/CodeGen/comma.cpp b/clang/test/CIR/CodeGen/comma.cpp index 0e10c8edec3f..4d2ce88b9d26 100644 --- a/clang/test/CIR/CodeGen/comma.cpp +++ b/clang/test/CIR/CodeGen/comma.cpp @@ -15,3 +15,16 @@ int c0() { // CHECK: %[[#]] = cir.binop(add, %[[#LOADED_B]], %[[#]]) : !s32i // CHECK: %[[#LOADED_A:]] = cir.load %[[#A]] : cir.ptr , !s32i // CHECK: cir.store %[[#LOADED_A]], %[[#RET]] : !s32i, cir.ptr + +int &foo1(); +int &foo2(); + +void c1() { + int &x = (foo1(), foo2()); +} + +// CHECK: cir.func @_Z2c1v() +// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr > +// CHECK: %1 = cir.call @_Z4foo1v() : () -> !cir.ptr +// CHECK: %2 = cir.call @_Z4foo2v() : () -> !cir.ptr +// CHECK: cir.store %2, %0 : !cir.ptr, cir.ptr > From 8ccdc8709e74b000976e47a58aecd260b8f6fd11 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 9 Jan 2024 08:14:10 +0800 Subject: [PATCH 1321/2301] [CIR] Replace AnyType with CIR_AnyType (#371) This PR addresses #90. It introduces a new type constraint `CIR_AnyType` which allows CIR types and MLIR floating-point types. Present `AnyType` constraints are replaced with the new `CIR_AnyType` constraint. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 124 +++++++++--------- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 12 ++ 2 files changed, 74 insertions(+), 62 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 3515196f7acc..fed38ccc1930 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -98,8 +98,8 @@ def CastOp : CIR_Op<"cast", [Pure]> { ``` }]; - let arguments = (ins CastKind:$kind, AnyType:$src); - let results = (outs AnyType:$result); + let arguments = (ins CastKind:$kind, CIR_AnyType:$src); + let results = (outs CIR_AnyType:$result); let assemblyFormat = [{ `(` $kind `,` $src `:` type($src) `)` @@ -168,10 +168,10 @@ def PtrDiffOp : CIR_Op<"ptr_diff", [Pure, SameTypeOperands]> { }]; let results = (outs CIR_IntType:$result); - let arguments = (ins AnyType:$lhs, AnyType:$rhs); + let arguments = (ins CIR_PointerType:$lhs, CIR_PointerType:$rhs); let assemblyFormat = [{ - `(` $lhs `,` $rhs `)` `:` type($lhs) `->` type($result) attr-dict + `(` $lhs `,` $rhs `)` `:` qualified(type($lhs)) `->` qualified(type($result)) attr-dict }]; // Already covered by the traits @@ -198,12 +198,12 @@ def PtrStrideOp : CIR_Op<"ptr_stride", ``` }]; - let arguments = (ins AnyType:$base, CIR_IntType:$stride); - let results = (outs AnyType:$result); + let arguments = (ins CIR_PointerType:$base, CIR_IntType:$stride); + let results = (outs CIR_PointerType:$result); let assemblyFormat = [{ - `(` $base `:` type($base) `,` $stride `:` qualified(type($stride)) `)` - `,` type($result) attr-dict + `(` $base `:` qualified(type($base)) `,` $stride `:` qualified(type($stride)) `)` + `,` qualified(type($result)) attr-dict }]; let extraClassDeclaration = [{ @@ -241,8 +241,8 @@ def ConstantOp : CIR_Op<"const", // The constant operation takes an attribute as the only input. let arguments = (ins TypedAttrInterface:$value); - // The constant operation returns a single value of AnyType. - let results = (outs AnyType:$res); + // The constant operation returns a single value of CIR_AnyType. + let results = (outs CIR_AnyType:$res); let assemblyFormat = [{ `(` custom($value) `)` attr-dict `:` type($res) @@ -389,7 +389,7 @@ def LoadOp : CIR_Op<"load", [ let arguments = (ins Arg:$addr, UnitAttr:$isDeref); - let results = (outs AnyType:$result); + let results = (outs CIR_AnyType:$result); // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. @@ -423,7 +423,7 @@ def StoreOp : CIR_Op<"store", [ ``` }]; - let arguments = (ins AnyType:$value, + let arguments = (ins CIR_AnyType:$value, Arg:$addr); @@ -458,7 +458,7 @@ def ReturnOp : CIR_Op<"return", [HasParent<"FuncOp, ScopeOp, IfOp, SwitchOp, Loo // The return operation takes an optional input operand to return. This // value must match the return type of the enclosing function. - let arguments = (ins Variadic:$input); + let arguments = (ins Variadic:$input); // The return operation only emits the input in the format if it is present. let assemblyFormat = "($input^ `:` type($input))? attr-dict "; @@ -561,7 +561,7 @@ def TernaryOp : CIR_Op<"ternary", let arguments = (ins CIR_BoolType:$cond); let regions = (region SizedRegion<1>:$trueRegion, SizedRegion<1>:$falseRegion); - let results = (outs Optional:$result); + let results = (outs Optional:$result); let skipDefaultBuilders = 1; let builders = [ @@ -671,7 +671,7 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, }]; let arguments = (ins OptionalAttr:$kind, - Variadic:$args); + Variadic:$args); let builders = [ OpBuilder<(ins), [{ /* nothing to do */ }]>, OpBuilder<(ins "YieldOpKind":$kind), [{ @@ -739,7 +739,7 @@ def ScopeOp : CIR_Op<"scope", [ will be inserted implicitly. }]; - let results = (outs Optional:$results); + let results = (outs Optional:$results); let regions = (region AnyRegion:$scopeRegion); let hasVerifier = 1; @@ -799,8 +799,8 @@ def UnaryOp : CIR_Op<"unary", [Pure, SameOperandsAndResultType]> { ``` }]; - let results = (outs AnyType:$result); - let arguments = (ins Arg:$kind, Arg:$input); + let results = (outs CIR_AnyType:$result); + let arguments = (ins Arg:$kind, Arg:$input); let assemblyFormat = [{ `(` $kind `,` $input `)` `:` type($input) `,` type($result) attr-dict @@ -852,10 +852,10 @@ def BinOp : CIR_Op<"binop", [Pure, ``` }]; - // TODO: get more accurate than AnyType - let results = (outs AnyType:$result); + // TODO: get more accurate than CIR_AnyType + let results = (outs CIR_AnyType:$result); let arguments = (ins Arg:$kind, - AnyType:$lhs, AnyType:$rhs); + CIR_AnyType:$lhs, CIR_AnyType:$rhs); let assemblyFormat = [{ `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) attr-dict @@ -929,10 +929,10 @@ def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { ``` }]; - // TODO: get more accurate than AnyType - let results = (outs AnyType:$result); + // TODO: get more accurate than CIR_AnyType + let results = (outs CIR_AnyType:$result); let arguments = (ins Arg:$kind, - AnyType:$lhs, AnyType:$rhs); + CIR_AnyType:$lhs, CIR_AnyType:$rhs); let assemblyFormat = [{ `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) `,` type($result) attr-dict @@ -1066,7 +1066,7 @@ def BrOp : CIR_Op<"br", }]> ]; - let arguments = (ins Variadic:$destOperands); + let arguments = (ins Variadic:$destOperands); let successors = (successor AnySuccessor:$dest); let assemblyFormat = [{ $dest (`(` $destOperands^ `:` type($destOperands) `)`)? attr-dict @@ -1111,8 +1111,8 @@ def BrCondOp : CIR_Op<"brcond", ]; let arguments = (ins CIR_BoolType:$cond, - Variadic:$destOperandsTrue, - Variadic:$destOperandsFalse); + Variadic:$destOperandsTrue, + Variadic:$destOperandsFalse); let successors = (successor AnySuccessor:$destTrue, AnySuccessor:$destFalse); let assemblyFormat = [{ $cond @@ -1420,7 +1420,7 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", }]; let arguments = (ins OptionalAttr:$name, - Optional:$sym_addr, + Optional:$sym_addr, I32Attr:$vtable_index, I32Attr:$address_point_index); let results = (outs Res:$addr); @@ -1447,13 +1447,13 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", def SetBitfieldOp : CIR_Op<"set_bitfield"> { let summary = "Set a bitfield"; - let description = [{ - The `cir.set_bitfield` operation provides a store-like access to + let description = [{ + The `cir.set_bitfield` operation provides a store-like access to a bit field of a record. It expects an address of a storage where to store, a type of the storage, a value being stored, a name of a bit field, a pointer to the storage in the - base record, a size of the storage, a size the bit field, an offset + base record, a size of the storage, a size the bit field, an offset of the bit field and a sign. Returns a value being stored. Example. @@ -1487,29 +1487,29 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { }]; let arguments = (ins - AnyType:$dst, - AnyType:$src, + CIR_PointerType:$dst, + CIR_AnyType:$src, BitfieldInfoAttr:$bitfield_info ); let results = (outs CIR_IntType:$result); - let assemblyFormat = [{ `(`$bitfield_info`,` $dst`:`type($dst)`,` + let assemblyFormat = [{ `(`$bitfield_info`,` $dst`:`qualified(type($dst))`,` $src`:`type($src) `)` attr-dict `->` type($result) }]; - + let builders = [ OpBuilder<(ins "Type":$type, "Value":$dst, "Type":$storage_type, "Value":$src, "StringRef":$name, - "unsigned":$size, + "unsigned":$size, "unsigned":$offset, "bool":$is_signed ), - [{ - BitfieldInfoAttr info = - BitfieldInfoAttr::get($_builder.getContext(), + [{ + BitfieldInfoAttr info = + BitfieldInfoAttr::get($_builder.getContext(), name, storage_type, size, offset, is_signed); build($_builder, $_state, type, dst, src, info); @@ -1523,7 +1523,7 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { def GetBitfieldOp : CIR_Op<"get_bitfield"> { let summary = "Get a bitfield"; - let description = [{ + let description = [{ The `cir.get_bitfield` operation provides a load-like access to a bit field of a record. @@ -1561,14 +1561,14 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> { }]; let arguments = (ins - AnyType:$addr, + CIR_PointerType:$addr, BitfieldInfoAttr:$bitfield_info ); let results = (outs CIR_IntType:$result); - let assemblyFormat = [{ `(`$bitfield_info `,` $addr attr-dict `:` - type($addr) `)` `->` type($result) }]; + let assemblyFormat = [{ `(`$bitfield_info `,` $addr attr-dict `:` + qualified(type($addr)) `)` `->` type($result) }]; let builders = [ OpBuilder<(ins "Type":$type, @@ -1580,8 +1580,8 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> { "bool":$is_signed ), [{ - BitfieldInfoAttr info = - BitfieldInfoAttr::get($_builder.getContext(), + BitfieldInfoAttr info = + BitfieldInfoAttr::get($_builder.getContext(), name, storage_type, size, offset, is_signed); build($_builder, $_state, type, addr, info); @@ -1669,7 +1669,7 @@ def VecExtractOp : CIR_Op<"vec.extract", [Pure, }]; let arguments = (ins CIR_VectorType:$vec, CIR_IntType:$index); - let results = (outs AnyType:$result); + let results = (outs CIR_AnyType:$result); let assemblyFormat = [{ $vec `[` $index `:` type($index) `]` type($vec) `->` type($result) attr-dict @@ -1691,7 +1691,7 @@ def VecCreateOp : CIR_Op<"vec.create", [Pure]> { in the vector type. }]; - let arguments = (ins Variadic:$elements); + let arguments = (ins Variadic:$elements); let results = (outs CIR_VectorType:$result); let assemblyFormat = [{ @@ -1922,9 +1922,9 @@ def CallOp : CIR_Op<"call", }]; let arguments = (ins OptionalAttr:$callee, - Variadic:$operands, + Variadic:$operands, OptionalAttr:$ast); - let results = (outs Variadic); + let results = (outs Variadic); let builders = [ OpBuilder<(ins "FuncOp":$callee, CArg<"ValueRange", "{}">:$operands), [{ @@ -2125,7 +2125,7 @@ def TryOp : CIR_Op<"try", let regions = (region SizedRegion<1>:$body); // FIXME: should be exception type. - let results = (outs AnyType:$result); + let results = (outs CIR_AnyType:$result); let assemblyFormat = [{ `{` @@ -2161,7 +2161,7 @@ def CatchOp : CIR_Op<"catch", let description = [{ }]; - let arguments = (ins AnyType:$exception_info, + let arguments = (ins CIR_AnyType:$exception_info, OptionalAttr:$catchers); let regions = (region VariadicRegion:$regions); @@ -2306,11 +2306,11 @@ def SameFirstSecondOperandAndResultType : def StdFindOp : CIR_Op<"std.find", [SameFirstSecondOperandAndResultType]> { let arguments = (ins FlatSymbolRefAttr:$original_fn, - AnyType:$first, - AnyType:$last, - AnyType:$pattern); + CIR_AnyType:$first, + CIR_AnyType:$last, + CIR_AnyType:$pattern); let summary = "std:find()"; - let results = (outs AnyType:$result); + let results = (outs CIR_AnyType:$result); let description = [{ Search for `pattern` in data range from `first` to `last`. This currently @@ -2342,9 +2342,9 @@ def StdFindOp : CIR_Op<"std.find", [SameFirstSecondOperandAndResultType]> { //===----------------------------------------------------------------------===// def IterBeginOp : CIR_Op<"iterator_begin"> { - let arguments = (ins FlatSymbolRefAttr:$original_fn, AnyType:$container); + let arguments = (ins FlatSymbolRefAttr:$original_fn, CIR_AnyType:$container); let summary = "Returns an iterator to the first element of a container"; - let results = (outs AnyType:$result); + let results = (outs CIR_AnyType:$result); let assemblyFormat = [{ `(` $original_fn `,` $container `:` type($container) @@ -2354,10 +2354,10 @@ def IterBeginOp : CIR_Op<"iterator_begin"> { } def IterEndOp : CIR_Op<"iterator_end"> { - let arguments = (ins FlatSymbolRefAttr:$original_fn, AnyType:$container); + let arguments = (ins FlatSymbolRefAttr:$original_fn, CIR_AnyType:$container); let summary = "Returns an iterator to the element following the last element" " of a container"; - let results = (outs AnyType:$result); + let results = (outs CIR_AnyType:$result); let assemblyFormat = [{ `(` $original_fn `,` $container `:` type($container) @@ -2416,7 +2416,7 @@ def VACopyOp : CIR_Op<"va.copy">, } def VAArgOp : CIR_Op<"va.arg">, - Results<(outs AnyType:$result)>, + Results<(outs CIR_AnyType:$result)>, Arguments<(ins CIR_PointerType:$arg_list)> { let summary = "Fetches next variadic element as a given type"; let assemblyFormat = "$arg_list attr-dict `:` functional-type(operands, $result)"; @@ -2498,7 +2498,7 @@ def ThrowOp : CIR_Op<"throw", ``` }]; - let arguments = (ins Optional:$exception_ptr, + let arguments = (ins Optional:$exception_ptr, OptionalAttr:$type_info, OptionalAttr:$dtor); @@ -2597,7 +2597,7 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { ``` }]; - let results = (outs Optional:$res); + let results = (outs Optional:$res); let arguments = ( ins StrAttr:$asm_string, diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 0d568c2d504c..464ef2100d96 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -253,4 +253,16 @@ def VoidPtr : Type< "mlir::cir::VoidType::get($_builder.getContext()))"> { } +//===----------------------------------------------------------------------===// +// Global type constraints +//===----------------------------------------------------------------------===// + +def CIR_StructType : Type()">, + "CIR struct type">; + +def CIR_AnyType : AnyTypeOf<[ + CIR_IntType, CIR_PointerType, CIR_BoolType, CIR_ArrayType, CIR_VectorType, + CIR_FuncType, CIR_VoidType, CIR_StructType, AnyFloat, +]>; + #endif // MLIR_CIR_DIALECT_CIR_TYPES From f4cd65e399e69ce267e4c8698c9acd631a9ebbbe Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 9 Jan 2024 08:14:55 +0800 Subject: [PATCH 1322/2301] [CIR][CIRGen] Support array def after decl with unknown bound (#375) Arrays can be first declared without a known bound, and then defined with a known bound. For example: ```cpp extern int data[]; int test() { return data[1]; } int data[3] {1, 2, 3}; ``` Currently `clangir` crashes on generating CIR for this case. This is due to the type of the `data` definition being different from its declaration. This patch adds support for such a case. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 49 ++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenModule.h | 11 ++++- .../test/CIR/CodeGen/array-unknown-bound.cpp | 14 ++++++ 3 files changed, 67 insertions(+), 7 deletions(-) create mode 100644 clang/test/CIR/CodeGen/array-unknown-bound.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index e7d3a8f521cf..d5bdf54ee403 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -472,7 +472,8 @@ mlir::Value CIRGenModule::getGlobalValue(const Decl *D) { mlir::cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &CGM, mlir::Location loc, StringRef name, mlir::Type t, - bool isCst) { + bool isCst, + mlir::Operation *insertPoint) { mlir::cir::GlobalOp g; auto &builder = CGM.getBuilder(); { @@ -487,8 +488,12 @@ mlir::cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &CGM, builder.setInsertionPoint(curCGF->CurFn); g = builder.create(loc, name, t, isCst); - if (!curCGF) - CGM.getModule().push_back(g); + if (!curCGF) { + if (insertPoint) + CGM.getModule().insert(insertPoint, g); + else + CGM.getModule().push_back(g); + } // Default to private until we can judge based on the initializer, // since MLIR doesn't allow public declarations. @@ -502,6 +507,35 @@ void CIRGenModule::setCommonAttributes(GlobalDecl GD, mlir::Operation *GV) { assert(!UnimplementedFeature::setCommonAttributes()); } +void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, + mlir::cir::GlobalOp New) { + assert(Old.getSymName() == New.getSymName() && "symbol names must match"); + + // If the types does not match, update all references to Old to the new type. + auto OldTy = Old.getSymType(); + auto NewTy = New.getSymType(); + if (OldTy != NewTy) { + auto OldSymUses = Old.getSymbolUses(theModule.getOperation()); + if (OldSymUses.has_value()) { + for (auto Use : *OldSymUses) { + auto *UserOp = Use.getUser(); + assert((isa(UserOp) || + isa(UserOp)) && + "GlobalOp symbol user is neither a GetGlobalOp nor a GlobalOp"); + + if (auto GGO = dyn_cast(Use.getUser())) { + auto UseOpResultValue = GGO.getAddr(); + UseOpResultValue.setType( + mlir::cir::PointerType::get(builder.getContext(), NewTy)); + } + } + } + } + + // Remove old global from the module. + Old.erase(); +} + /// If the specified mangled name is not in the module, /// create and return an mlir GlobalOp with the specified type (TODO(cir): /// address space). @@ -593,11 +627,14 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // mlir::SymbolTable::Visibility::Public is the default, no need to explicitly // mark it as such. auto GV = CIRGenModule::createGlobalOp(*this, loc, MangledName, Ty, - /*isConstant=*/false); + /*isConstant=*/false, + /*insertPoint=*/Entry.getOperation()); // If we already created a global with the same mangled name (but different - // type) before, take its name and remove it from its parent. - assert(!Entry && "not implemented"); + // type) before, replace it with the new global. + if (Entry) { + replaceGlobal(Entry, GV); + } // This is the first use or definition of a mangled name. If there is a // deferred decl with this name, remember that we need to emit it at the end diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index aeb1313b38c4..a0b30e7464ab 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -220,7 +220,8 @@ class CIRGenModule : public CIRGenTypeCache { static mlir::cir::GlobalOp createGlobalOp(CIRGenModule &CGM, mlir::Location loc, StringRef name, - mlir::Type t, bool isCst = false); + mlir::Type t, bool isCst = false, + mlir::Operation *insertPoint = nullptr); /// Return the mlir::Value for the address of the given global variable. /// If Ty is non-null and if the global doesn't exist, then it will be created @@ -445,6 +446,14 @@ class CIRGenModule : public CIRGenTypeCache { void setGVProperties(mlir::Operation *Op, const NamedDecl *D) const; void setGVPropertiesAux(mlir::Operation *Op, const NamedDecl *D) const; + /// Replace the present global `Old` with the given global `New`. Their symbol + /// names must match; their types can be different. Usages of the old global + /// will be automatically updated if their types mismatch. + /// + /// This function will erase the old global. This function will NOT insert the + /// new global into the module. + void replaceGlobal(mlir::cir::GlobalOp Old, mlir::cir::GlobalOp New); + /// Determine whether the definition must be emitted; if this returns \c /// false, the definition can be emitted lazily if it's used. bool MustBeEmitted(const clang::ValueDecl *D); diff --git a/clang/test/CIR/CodeGen/array-unknown-bound.cpp b/clang/test/CIR/CodeGen/array-unknown-bound.cpp new file mode 100644 index 000000000000..09f75ca27f27 --- /dev/null +++ b/clang/test/CIR/CodeGen/array-unknown-bound.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +extern int table[]; +// CHECK: cir.global external @table = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array + +int *table_ptr = table; +// CHECK: cir.global external @table_ptr = #cir.global_view<@table> : !cir.ptr + +int test() { return table[1]; } +// CHECK: cir.func @_Z4testv() -> !s32i extra( {inline = #cir.inline, optnone = #cir.optnone} ) { +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.get_global @table : cir.ptr > + +int table[3] {1, 2, 3}; From 7bfb2f9d2130237266f5fb359ab4bd2361d889d2 Mon Sep 17 00:00:00 2001 From: Nikolas Klauser Date: Tue, 9 Jan 2024 01:15:56 +0100 Subject: [PATCH 1323/2301] [CIR][CIRGen] Implement constant evaluation for integral builtins (#381) --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 7 ++++++- clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp | 11 +++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 825024daab8a..4b56fe53c1a0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -52,7 +52,12 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Expr::EvalResult Result; if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getASTContext()) && !Result.hasSideEffects()) { - llvm_unreachable("NYI"); + if (Result.Val.isInt()) { + return RValue::get(builder.getConstInt(getLoc(E->getSourceRange()), + Result.Val.getInt())); + } + if (Result.Val.isFloat()) + llvm_unreachable("NYI"); } // If current long-double semantics is IEEE 128-bit, replace math builtins diff --git a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp new file mode 100644 index 000000000000..ad89dcd25484 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +auto func() { + return __builtin_strcmp("", ""); + // CHECK: cir.func @_Z4funcv() -> !s32i extra( {inline = #cir.inline, optnone = #cir.optnone} ) { + // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) + // CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc7) + // CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr loc(#loc8) + // CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i loc(#loc8) + // CHECK-NEXT: cir.return %2 : !s32i loc(#loc8) +} From eb30c34cb817a1ffa6f7e1cbe60fb5967dba4d32 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Mon, 8 Jan 2024 21:54:39 -0300 Subject: [PATCH 1324/2301] [CIR][Transforms][NFC] Refactor MergeCleanups pass (#384) Breaks the pass into smaller more manageable rewrites. --- .../CIR/Dialect/Transforms/MergeCleanups.cpp | 326 +++++++----------- 1 file changed, 123 insertions(+), 203 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index f295361140a9..822ce6f4bb2c 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -7,248 +7,168 @@ //===----------------------------------------------------------------------===// #include "PassDetail.h" - -#include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "clang/CIR/Dialect/Passes.h" - #include "mlir/Dialect/Func/IR/FuncOps.h" - -#include "mlir/IR/Matchers.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" using namespace mlir; using namespace cir; -namespace { - -template -struct SimplifyRetYieldBlocks : public mlir::OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - mlir::LogicalResult replaceScopeLikeOp(PatternRewriter &rewriter, - ScopeLikeOpTy scopeLikeOp) const; - - SimplifyRetYieldBlocks(mlir::MLIRContext *context) - : OpRewritePattern(context, /*benefit=*/1) {} - - mlir::LogicalResult - checkAndRewriteRegion(mlir::Region &r, - mlir::PatternRewriter &rewriter) const { - auto &blocks = r.getBlocks(); - - if (blocks.size() <= 1) - return failure(); - - // Rewrite something like this: - // - // cir.if %2 { - // %3 = cir.const(3 : i32) : i32 - // cir.br ^bb1 - // ^bb1: // pred: ^bb0 - // cir.return %3 : i32 - // } - // - // to this: - // - // cir.if %2 { - // %3 = cir.const(3 : i32) : i32 - // cir.return %3 : i32 - // } - // - SmallPtrSet candidateBlocks; - for (Block &block : blocks) { - if (block.isEntryBlock()) - continue; - - auto yieldVars = block.getOps(); - for (cir::YieldOp yield : yieldVars) - candidateBlocks.insert(yield.getOperation()->getBlock()); +//===----------------------------------------------------------------------===// +// Rewrite patterns +//===----------------------------------------------------------------------===// - auto retVars = block.getOps(); - for (cir::ReturnOp ret : retVars) - candidateBlocks.insert(ret.getOperation()->getBlock()); - } +namespace { - auto changed = mlir::failure(); - for (auto *mergeSource : candidateBlocks) { - if (!(mergeSource->hasNoSuccessors() && mergeSource->hasOneUse())) - continue; - auto *mergeDest = mergeSource->getSinglePredecessor(); - if (!mergeDest || mergeDest->getNumSuccessors() != 1) - continue; - rewriter.eraseOp(mergeDest->getTerminator()); - rewriter.mergeBlocks(mergeSource, mergeDest); - changed = mlir::success(); +/// Removes branches between two blocks if it is the only branch. +/// +/// From: +/// ^bb0: +/// cir.br ^bb1 +/// ^bb1: // pred: ^bb0 +/// cir.return +/// +/// To: +/// ^bb0: +/// cir.return +struct RemoveRedudantBranches : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(BrOp op, + PatternRewriter &rewriter) const final { + Block *block = op.getOperation()->getBlock(); + Block *dest = op.getDest(); + + // Single edge between blocks: merge it. + if (block->getNumSuccessors() == 1 && + dest->getSinglePredecessor() == block) { + rewriter.eraseOp(op); + rewriter.mergeBlocks(dest, block); + return success(); } - return changed; + return failure(); } +}; - mlir::LogicalResult - checkAndRewriteLoopCond(mlir::Region &condRegion, - mlir::PatternRewriter &rewriter) const { - SmallVector opsToSimplify; - condRegion.walk([&](Operation *op) { - if (isa(op)) - opsToSimplify.push_back(op); - }); - - // Blocks should only contain one "yield" operation. - auto trivialYield = [&](Block *b) { - if (&b->front() != &b->back()) - return false; - return isa(b->getTerminator()); - }; - - if (opsToSimplify.size() != 1) - return failure(); - BrCondOp brCondOp = cast(opsToSimplify[0]); +/// Merges basic blocks of trivial conditional branches. This is useful when a +/// the condition of conditional branch is a constant and the destinations of +/// the conditional branch both have only one predecessor. +/// +/// From: +/// ^bb0: +/// %0 = cir.const(#true) : !cir.bool +/// cir.brcond %0 ^bb1, ^bb2 +/// ^bb1: // pred: ^bb0 +/// cir.yield continue +/// ^bb2: // pred: ^bb0 +/// cir.yield +/// +/// To: +/// ^bb0: +/// cir.yield continue +/// +struct MergeTrivialConditionalBranches : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult match(BrCondOp op) const final { + return success(isa(op.getCond().getDefiningOp()) && + op.getDestFalse()->hasOneUse() && + op.getDestTrue()->hasOneUse()); + } - // TODO: leverage SCCP to get improved results. - auto cstOp = dyn_cast(brCondOp.getCond().getDefiningOp()); - if (!cstOp || !cstOp.getValue().isa() || - !trivialYield(brCondOp.getDestTrue()) || - !trivialYield(brCondOp.getDestFalse())) - return failure(); + /// Replace conditional branch with unconditional branch. + void rewrite(BrCondOp op, PatternRewriter &rewriter) const final { + auto constOp = llvm::cast(op.getCond().getDefiningOp()); + bool cond = constOp.getValue().cast().getValue(); + Block *block = op.getOperation()->getBlock(); - // If the condition is constant, no need to use brcond, just yield - // properly, "yield" for false and "yield continue" for true. - auto boolAttr = cstOp.getValue().cast(); - auto *falseBlock = brCondOp.getDestFalse(); - auto *trueBlock = brCondOp.getDestTrue(); - auto *currBlock = brCondOp.getOperation()->getBlock(); - if (boolAttr.getValue()) { - rewriter.eraseOp(opsToSimplify[0]); - rewriter.mergeBlocks(trueBlock, currBlock); - falseBlock->erase(); + rewriter.eraseOp(op); + if (cond) { + rewriter.mergeBlocks(op.getDestTrue(), block); + rewriter.eraseBlock(op.getDestFalse()); } else { - rewriter.eraseOp(opsToSimplify[0]); - rewriter.mergeBlocks(falseBlock, currBlock); - trueBlock->erase(); + rewriter.mergeBlocks(op.getDestFalse(), block); + rewriter.eraseBlock(op.getDestTrue()); } - if (cstOp.use_empty()) - rewriter.eraseOp(cstOp); - return success(); - } - - mlir::LogicalResult - matchAndRewrite(ScopeLikeOpTy op, - mlir::PatternRewriter &rewriter) const override { - return replaceScopeLikeOp(rewriter, op); } }; -// Specialize the template to account for the different build signatures for -// IfOp, ScopeOp, FuncOp, SwitchOp, LoopOp. -template <> -mlir::LogicalResult -SimplifyRetYieldBlocks::replaceScopeLikeOp(PatternRewriter &rewriter, - IfOp ifOp) const { - auto regionChanged = mlir::failure(); - if (checkAndRewriteRegion(ifOp.getThenRegion(), rewriter).succeeded()) - regionChanged = mlir::success(); - if (checkAndRewriteRegion(ifOp.getElseRegion(), rewriter).succeeded()) - regionChanged = mlir::success(); - return regionChanged; -} +struct RemoveEmptyScope : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; -template <> -mlir::LogicalResult -SimplifyRetYieldBlocks::replaceScopeLikeOp(PatternRewriter &rewriter, - ScopeOp scopeOp) const { - // Scope region empty: just remove scope. - if (scopeOp.getRegion().empty()) { - rewriter.eraseOp(scopeOp); - return mlir::success(); + LogicalResult match(ScopeOp op) const final { + return success(op.getRegion().empty() || + (op.getRegion().getBlocks().size() == 1 && + op.getRegion().front().empty())); } - // Scope region non-empty: clean it up. - if (checkAndRewriteRegion(scopeOp.getRegion(), rewriter).succeeded()) - return mlir::success(); - - return mlir::failure(); -} - -template <> -mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( - PatternRewriter &rewriter, cir::FuncOp funcOp) const { - auto regionChanged = mlir::failure(); - if (checkAndRewriteRegion(funcOp.getRegion(), rewriter).succeeded()) - regionChanged = mlir::success(); - return regionChanged; -} + void rewrite(ScopeOp op, PatternRewriter &rewriter) const final { + rewriter.eraseOp(op); + } +}; -template <> -mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( - PatternRewriter &rewriter, cir::SwitchOp switchOp) const { - auto regionChanged = mlir::failure(); +struct RemoveEmptySwitch : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; - // Empty switch statement: just remove it. - if (!switchOp.getCases().has_value() || switchOp.getCases()->empty()) { - rewriter.eraseOp(switchOp); - return mlir::success(); + LogicalResult match(SwitchOp op) const final { + return success(op.getRegions().empty()); } - // Non-empty switch statement: clean it up. - for (auto &r : switchOp.getRegions()) { - if (checkAndRewriteRegion(r, rewriter).succeeded()) - regionChanged = mlir::success(); + void rewrite(SwitchOp op, PatternRewriter &rewriter) const final { + rewriter.eraseOp(op); } - return regionChanged; -} - -template <> -mlir::LogicalResult SimplifyRetYieldBlocks::replaceScopeLikeOp( - PatternRewriter &rewriter, cir::LoopOp loopOp) const { - auto regionChanged = mlir::failure(); - if (checkAndRewriteRegion(loopOp.getBody(), rewriter).succeeded()) - regionChanged = mlir::success(); - if (checkAndRewriteLoopCond(loopOp.getCond(), rewriter).succeeded()) - regionChanged = mlir::success(); - return regionChanged; -} +}; -void getMergeCleanupsPatterns(RewritePatternSet &results, - MLIRContext *context) { - results.add, SimplifyRetYieldBlocks, - SimplifyRetYieldBlocks, - SimplifyRetYieldBlocks, - SimplifyRetYieldBlocks>(context); -} +//===----------------------------------------------------------------------===// +// MergeCleanupsPass +//===----------------------------------------------------------------------===// struct MergeCleanupsPass : public MergeCleanupsBase { - MergeCleanupsPass() = default; + using MergeCleanupsBase::MergeCleanupsBase; + + // The same operation rewriting done here could have been performed + // by CanonicalizerPass (adding hasCanonicalizer for target Ops and + // implementing the same from above in CIRDialects.cpp). However, it's + // currently too aggressive for static analysis purposes, since it might + // remove things where a diagnostic can be generated. + // + // FIXME: perhaps we can add one more mode to GreedyRewriteConfig to + // disable this behavior. void runOnOperation() override; }; -// The same operation rewriting done here could have been performed -// by CanonicalizerPass (adding hasCanonicalizer for target Ops and implementing -// the same from above in CIRDialects.cpp). However, it's currently too -// aggressive for static analysis purposes, since it might remove things where -// a diagnostic can be generated. -// -// FIXME: perhaps we can add one more mode to GreedyRewriteConfig to -// disable this behavior. -void MergeCleanupsPass::runOnOperation() { - auto op = getOperation(); - mlir::RewritePatternSet patterns(&getContext()); - getMergeCleanupsPatterns(patterns, &getContext()); - FrozenRewritePatternSet frozenPatterns(std::move(patterns)); +void populateMergeCleanupPatterns(RewritePatternSet &patterns) { + // clang-format off + patterns.add< + RemoveRedudantBranches, + MergeTrivialConditionalBranches, + RemoveEmptyScope, + RemoveEmptySwitch + >(patterns.getContext()); + // clang-format on +} - SmallVector opsToSimplify; - op->walk([&](Operation *op) { - if (isa( - op)) - opsToSimplify.push_back(op); +void MergeCleanupsPass::runOnOperation() { + // Collect rewrite patterns. + RewritePatternSet patterns(&getContext()); + populateMergeCleanupPatterns(patterns); + + // Collect operations to apply patterns. + SmallVector ops; + getOperation()->walk([&](Operation *op) { + if (isa(op)) + ops.push_back(op); }); - for (auto *o : opsToSimplify) { - bool erase = false; - (void)applyOpPatternsAndFold(o, frozenPatterns, GreedyRewriteConfig(), - &erase); - } + // Apply patterns. + if (applyOpPatternsAndFold(ops, std::move(patterns)).failed()) + signalPassFailure(); } + } // namespace std::unique_ptr mlir::createMergeCleanupsPass() { From bd200dadfcaf9fbafe2096483a696c8a7d82c171 Mon Sep 17 00:00:00 2001 From: Nikolas Klauser Date: Wed, 10 Jan 2024 02:33:39 +0100 Subject: [PATCH 1325/2301] [CIR][NFC] Enable the formatting workflow for llvm/clangir and format the files (#390) Fixes #345 --- .github/workflows/pr-code-format.yml | 2 +- .../include/clang/CIR/Dialect/IR/CIRDialect.h | 2 +- .../clang/CIR/Dialect/IR/CIRTypesDetails.h | 2 +- clang/lib/CIR/CodeGen/CIRAsm.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 +- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 8 +- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 71 ++++++------ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 7 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 8 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 8 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 6 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 3 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 6 +- clang/lib/CIR/CodeGen/EHScopeStack.h | 21 ++-- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 4 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 8 +- .../Dialect/Transforms/LoweringPrepare.cpp | 21 ++-- .../lib/CIR/Dialect/Transforms/StdHelpers.cpp | 2 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 101 +++++++++--------- 22 files changed, 143 insertions(+), 159 deletions(-) diff --git a/.github/workflows/pr-code-format.yml b/.github/workflows/pr-code-format.yml index 0e6180acf4a4..1871679c0b3b 100644 --- a/.github/workflows/pr-code-format.yml +++ b/.github/workflows/pr-code-format.yml @@ -16,7 +16,7 @@ jobs: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number }} cancel-in-progress: true - if: github.repository == 'llvm/llvm-project' + if: github.repository == 'llvm/clangir' steps: - name: Fetch LLVM sources uses: actions/checkout@v4 diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index 818332c284d2..1f677356c4d4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -18,9 +18,9 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Dialect.h" #include "mlir/IR/OpDefinition.h" -#include "mlir/Interfaces/FunctionInterfaces.h" #include "mlir/Interfaces/CallInterfaces.h" #include "mlir/Interfaces/ControlFlowInterfaces.h" +#include "mlir/Interfaces/FunctionInterfaces.h" #include "mlir/Interfaces/InferTypeOpInterface.h" #include "mlir/Interfaces/LoopLikeInterface.h" #include "mlir/Interfaces/SideEffectInterfaces.h" diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h index ae9e97ce3cab..5eba4ac460a7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h @@ -1,4 +1,4 @@ -//===- CIRTypesDetails.h - Details of CIR dialect types -----------*- C++ -*-===// +//===- CIRTypesDetails.h - Details of CIR dialect types ---------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 91d3f5420a77..bf184a3d4f07 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -41,8 +41,8 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { AsmDialect AsmDialect = inferDialect(CGM, S); - builder.create( - getLoc(S.getAsmLoc()), ResultType, AsmString, AsmDialect); + builder.create(getLoc(S.getAsmLoc()), ResultType, + AsmString, AsmDialect); return mlir::success(); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 2755c62f4463..73e5a08f332a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -787,10 +787,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, ty); } - mlir::cir::StackRestoreOp createStackRestore(mlir::Location loc, mlir::Value v) { + mlir::cir::StackRestoreOp createStackRestore(mlir::Location loc, + mlir::Value v) { return create(loc, v); } - }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 98e1f9281a33..1c05018b535d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -194,10 +194,9 @@ CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { mlir::cir::FuncOp fnOp; if (!builtin) { - fnOp = CGM.createCIRFunction( - loc, CGM.builtinCoroAlloc, - mlir::cir::FuncType::get({int32Ty}, boolTy), - /*FD=*/nullptr); + fnOp = CGM.createCIRFunction(loc, CGM.builtinCoroAlloc, + mlir::cir::FuncType::get({int32Ty}, boolTy), + /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); } else @@ -217,8 +216,7 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, if (!builtin) { fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroBegin, - mlir::cir::FuncType::get({int32Ty, VoidPtrTy}, - VoidPtrTy), + mlir::cir::FuncType::get({int32Ty, VoidPtrTy}, VoidPtrTy), /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 77dae8cfb878..662d24cd63a9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -892,7 +892,7 @@ struct CallCleanupFunction final : EHScopeStack::Cleanup { /// Push the standard destructor for the given type as /// at least a normal cleanup. void CIRGenFunction::pushDestroy(QualType::DestructionKind dtorKind, - Address addr, QualType type) { + Address addr, QualType type) { assert(dtorKind && "cannot push destructor for trivial type"); CleanupKind cleanupKind = getCleanupKind(dtorKind); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 88141cff9293..ccef09fe44a0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -234,8 +234,8 @@ Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base, auto fieldPtr = mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); - auto sea = getBuilder().createGetMember( - loc, fieldPtr, base.getPointer(), field->getName(), index); + auto sea = getBuilder().createGetMember(loc, fieldPtr, base.getPointer(), + field->getName(), index); return Address(sea, CharUnits::One()); } @@ -341,7 +341,7 @@ LValue CIRGenFunction::buildLValueForField(LValue base, if (!IsInPreservedAIRegion && (!getDebugInfo() || !rec->hasAttr())) { llvm::StringRef fieldName = field->getName(); - auto& layout = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); + auto &layout = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); unsigned fieldIndex = layout.getCIRFieldNo(field); if (CGM.LambdaFieldToName.count(field)) @@ -396,7 +396,7 @@ LValue CIRGenFunction::buildLValueForFieldInitialization( if (!FieldType->isReferenceType()) return buildLValueForField(Base, Field); - auto& layout = CGM.getTypes().getCIRGenRecordLayout(Field->getParent()); + auto &layout = CGM.getTypes().getCIRGenRecordLayout(Field->getParent()); unsigned FieldIndex = layout.getCIRFieldNo(Field); Address V = buildAddrOfFieldStorage(*this, Base.getAddress(), Field, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 74f1400ec9f7..3837144ef975 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -60,37 +60,36 @@ static bool isBlockVarRef(const Expr *E) { // FIXME: pointer arithmetic? return false; - // Check both sides of a conditional operator. - } else if (const AbstractConditionalOperator *op - = dyn_cast(E)) { - return isBlockVarRef(op->getTrueExpr()) - || isBlockVarRef(op->getFalseExpr()); - - // OVEs are required to support BinaryConditionalOperators. - } else if (const OpaqueValueExpr *op - = dyn_cast(E)) { + // Check both sides of a conditional operator. + } else if (const AbstractConditionalOperator *op = + dyn_cast(E)) { + return isBlockVarRef(op->getTrueExpr()) || + isBlockVarRef(op->getFalseExpr()); + + // OVEs are required to support BinaryConditionalOperators. + } else if (const OpaqueValueExpr *op = dyn_cast(E)) { if (const Expr *src = op->getSourceExpr()) return isBlockVarRef(src); - // Casts are necessary to get things like (*(int*)&var) = foo(). - // We don't really care about the kind of cast here, except - // we don't want to look through l2r casts, because it's okay - // to get the *value* in a __block variable. + // Casts are necessary to get things like (*(int*)&var) = foo(). + // We don't really care about the kind of cast here, except + // we don't want to look through l2r casts, because it's okay + // to get the *value* in a __block variable. } else if (const CastExpr *cast = dyn_cast(E)) { if (cast->getCastKind() == CK_LValueToRValue) return false; return isBlockVarRef(cast->getSubExpr()); - // Handle unary operators. Again, just aggressively look through - // it, ignoring the operation. + // Handle unary operators. Again, just aggressively look through + // it, ignoring the operation. } else if (const UnaryOperator *uop = dyn_cast(E)) { return isBlockVarRef(uop->getSubExpr()); - // Look into the base of a field access. + // Look into the base of a field access. } else if (const MemberExpr *mem = dyn_cast(E)) { return isBlockVarRef(mem->getBase()); - // Look into the base of a subscript. + // Look into the base of a subscript. } else if (const ArraySubscriptExpr *sub = dyn_cast(E)) { return isBlockVarRef(sub->getBase()); } @@ -113,7 +112,8 @@ class AggExprEmitter : public StmtVisitor { llvm::function_ref Fn); AggValueSlot EnsureSlot(mlir::Location loc, QualType T) { - if (!Dest.isIgnored()) return Dest; + if (!Dest.isIgnored()) + return Dest; return CGF.CreateAggTemp(T, loc, "agg.tmp.ensured"); } @@ -213,11 +213,11 @@ class AggExprEmitter : public StmtVisitor { // For an assignment to work, the value on the right has // to be compatible with the value on the left. assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), - E->getRHS()->getType()) - && "Invalid assignment"); + E->getRHS()->getType()) && + "Invalid assignment"); if (isBlockVarRef(E->getLHS()) && - E->getRHS()->HasSideEffects(CGF.getContext())) { + E->getRHS()->HasSideEffects(CGF.getContext())) { llvm_unreachable("NYI"); } @@ -233,12 +233,11 @@ class AggExprEmitter : public StmtVisitor { // Codegen the RHS so that it stores directly into the LHS. AggValueSlot lhsSlot = AggValueSlot::forLValue( - lhs, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsAliased, AggValueSlot::MayOverlap); + lhs, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsAliased, AggValueSlot::MayOverlap); // A non-volatile aggregate destination might have volatile member. - if (!lhsSlot.isVolatile() && - CGF.hasVolatileMember(E->getLHS()->getType())) + if (!lhsSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType())) assert(!UnimplementedFeature::atomicTypes()); CGF.buildAggExpr(E->getRHS(), lhsSlot); @@ -247,10 +246,10 @@ class AggExprEmitter : public StmtVisitor { buildFinalDestCopy(E->getType(), lhs); if (!Dest.isIgnored() && !Dest.isExternallyDestructed() && - E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) + E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), - E->getType()); - } + E->getType()); + } void VisitBinComma(const BinaryOperator *E) { llvm_unreachable("NYI"); } void VisitBinCmp(const BinaryOperator *E) { llvm_unreachable("NYI"); } @@ -356,8 +355,8 @@ void AggExprEmitter::buildFinalDestCopy(QualType type, const LValue &src, assert(!UnimplementedFeature::volatileTypes()); if (SrcValueKind == EVK_RValue) { - if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { - llvm_unreachable("move assignment/move ctor for rvalue is NYI"); + if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { + llvm_unreachable("move assignment/move ctor for rvalue is NYI"); } } else { if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) @@ -675,8 +674,8 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { } // Emit initialization - LValue LV = CGF.buildLValueForFieldInitialization( - SlotLV, *CurField, fieldName); + LValue LV = + CGF.buildLValueForFieldInitialization(SlotLV, *CurField, fieldName); if (CurField->hasCapturedVLAType()) { llvm_unreachable("NYI"); } @@ -823,8 +822,8 @@ void AggExprEmitter::withReturnValueSlot( if (!UseTemp) { RetAddr = Dest.getAddress(); } else { - RetAddr = CGF.CreateMemTemp(RetTy, CGF.getLoc(E->getSourceRange()), - "tmp", &RetAddr); + RetAddr = CGF.CreateMemTemp(RetTy, CGF.getLoc(E->getSourceRange()), "tmp", + &RetAddr); assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); } @@ -943,8 +942,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( if (curInitIndex == NumInitElements && Dest.isZeroed() && CGF.getTypes().isZeroInitializable(ExprToVisit->getType())) break; - LValue LV = CGF.buildLValueForFieldInitialization( - DestLV, field, field->getName()); + LValue LV = + CGF.buildLValueForFieldInitialization(DestLV, field, field->getName()); // We never generate write-barries for initialized fields. assert(!UnimplementedFeature::setNonGC()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index c00ebe9c1b27..2fb84275751c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -910,7 +910,7 @@ class ConstExprEmitter // Look through the temporary; it's just converting the value to an lvalue // to pass it to the constructor. if (auto *MTE = dyn_cast(Arg)) - return Visit(MTE->getSubExpr(), Ty); + return Visit(MTE->getSubExpr(), Ty); // Don't try to support arbitrary lvalue-to-rvalue conversions for now. return nullptr; } @@ -1077,8 +1077,7 @@ class ConstantLValueEmitter ConstantLValue applyOffset(ConstantLValue &C) { // Handle attribute constant LValues. - if (auto Attr = - C.Value.dyn_cast()) { + if (auto Attr = C.Value.dyn_cast()) { if (auto GV = Attr.dyn_cast()) { auto baseTy = GV.getType().cast().getPointee(); auto destTy = CGM.getTypes().convertTypeForMem(DestType); @@ -1341,7 +1340,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { } InConstantContext = D.hasConstantInitialization(); - const Expr * E = D.getInit(); + const Expr *E = D.getInit(); assert(E && "No initializer to emit"); QualType destType = D.getType(); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 7b8ce358f301..024ec494bc5b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -771,7 +771,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Create a check for a function parameter that may potentially be /// declared as non-null. void buildNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, - AbstractCallee AC, unsigned ParmNum); + AbstractCallee AC, unsigned ParmNum); void buildCallArg(CallArgList &args, const clang::Expr *E, clang::QualType ArgType); @@ -1362,7 +1362,7 @@ class CIRGenFunction : public CIRGenTypeCache { AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); LValue buildLValueForBitField(LValue base, const FieldDecl *field); - + /// Like buildLValueForField, excpet that if the Field is a reference, this /// will return the address of the reference and not the address of the value /// stored in the reference. @@ -1520,8 +1520,8 @@ class CIRGenFunction : public CIRGenTypeCache { static Destroyer destroyCXXObject; - void pushDestroy(QualType::DestructionKind dtorKind, - Address addr, QualType type); + void pushDestroy(QualType::DestructionKind dtorKind, Address addr, + QualType type); void pushDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index d5bdf54ee403..c01d7c96485f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -100,10 +100,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, const clang::CodeGenOptions &CGO, DiagnosticsEngine &Diags) : builder(context, *this), astCtx(astctx), langOpts(astctx.getLangOpts()), - codeGenOpts(CGO), theModule{mlir::ModuleOp::create( - builder.getUnknownLoc())}, - Diags(Diags), target(astCtx.getTargetInfo()), - ABI(createCXXABI(*this)), genTypes{*this}, VTables{*this} { + codeGenOpts(CGO), + theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), + target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, + VTables{*this} { // Initialize CIR signed integer types cache. SInt8Ty = diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index a0b30e7464ab..e468c53e58d4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -196,7 +196,7 @@ class CIRGenModule : public CIRGenTypeCache { mlir::cir::GlobalOp getOrCreateStaticVarDecl(const VarDecl &D, - mlir::cir::GlobalLinkageKind Linkage); + mlir::cir::GlobalLinkageKind Linkage); mlir::cir::GlobalOp buildGlobal(const VarDecl *D, mlir::Type Ty, ForDefinition_t IsForDefinition); @@ -239,7 +239,7 @@ class CIRGenModule : public CIRGenTypeCache { ForDefinition_t IsForDefinition = NotForDefinition); /// Get a reference to the target of VD. - mlir::Operation* getWeakRefReference(const ValueDecl *VD); + mlir::Operation *getWeakRefReference(const ValueDecl *VD); CharUnits computeNonVirtualBaseClassOffset(const CXXRecordDecl *DerivedClass, @@ -509,7 +509,7 @@ class CIRGenModule : public CIRGenTypeCache { /// Emit the function that initializes the specified global void buildGlobalVarDeclInit(const VarDecl *D, mlir::cir::GlobalOp Addr, - bool PerformInit); + bool PerformInit); void addDeferredVTable(const CXXRecordDecl *RD) { DeferredVTables.push_back(RD); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 4fb8a36a0547..fab6d65a833c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -634,7 +634,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { SkippedLayout = true; ResultType = Builder.getUInt8Ty(); } - ResultType = Builder.getArrayType(ResultType, 0); + ResultType = Builder.getArrayType(ResultType, 0); break; } case Type::ConstantArray: { diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 4add04e56ccb..673f5feec48c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -434,8 +434,7 @@ getAddrOfVTTVTable(CIRGenVTables &CGVT, CIRGenModule &CGM, llvm_unreachable("generateConstructionVTable NYI"); } -mlir::cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *RD) -{ +mlir::cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *RD) { assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT"); SmallString<256> OutName; diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index a153473b9e64..6cda57545ae5 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -215,8 +215,8 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, cxxRecordDecl{llvm::dyn_cast(recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, dataLayout{cirGenTypes.getModule().getModule()}, - IsZeroInitializable(true), - IsZeroInitializableAsBase(true), isPacked{isPacked} {} + IsZeroInitializable(true), IsZeroInitializableAsBase(true), + isPacked{isPacked} {} void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, @@ -227,7 +227,7 @@ void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, (unsigned)(getFieldBitOffset(FD) - astContext.toBits(StartOffset)); Info.Size = FD->getBitWidthValue(); Info.StorageSize = getSizeInBits(StorageType).getQuantity(); - Info.StorageOffset = StartOffset; + Info.StorageOffset = StartOffset; Info.Name = FD->getName(); if (Info.Size > Info.StorageSize) diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h index 8711cd3c232e..5ab356df319f 100644 --- a/clang/lib/CIR/CodeGen/EHScopeStack.h +++ b/clang/lib/CIR/CodeGen/EHScopeStack.h @@ -73,7 +73,7 @@ struct DominatingPointer; template struct DominatingPointer : InvariantValue {}; // template struct DominatingPointer at end of file -template struct DominatingValue : DominatingPointer {}; +template struct DominatingValue : DominatingPointer {}; enum CleanupKind : unsigned { /// Denotes a cleanup that should run when a scope is exited using exceptional @@ -268,9 +268,9 @@ class EHScopeStack { public: EHScopeStack() - : StartOfBuffer(nullptr), EndOfBuffer(nullptr), StartOfData(nullptr), - InnermostNormalCleanup(stable_end()), InnermostEHScope(stable_end()), - CGF(nullptr) {} + : StartOfBuffer(nullptr), EndOfBuffer(nullptr), StartOfData(nullptr), + InnermostNormalCleanup(stable_end()), InnermostEHScope(stable_end()), + CGF(nullptr) {} ~EHScopeStack() { delete[] StartOfBuffer; } /// Push a lazily-created cleanup on the stack. @@ -279,7 +279,7 @@ class EHScopeStack { "Cleanup's alignment is too large."); void *Buffer = pushCleanup(Kind, sizeof(T)); Cleanup *Obj = new (Buffer) T(A...); - (void) Obj; + (void)Obj; } /// Push a lazily-created cleanup on the stack. Tuple version. @@ -289,7 +289,7 @@ class EHScopeStack { "Cleanup's alignment is too large."); void *Buffer = pushCleanup(Kind, sizeof(T)); Cleanup *Obj = new (Buffer) T(std::move(A)); - (void) Obj; + (void)Obj; } // Feel free to add more variants of the following: @@ -364,10 +364,7 @@ class EHScopeStack { } stable_iterator getInnermostActiveNormalCleanup() const; - stable_iterator getInnermostEHScope() const { - return InnermostEHScope; - } - + stable_iterator getInnermostEHScope() const { return InnermostEHScope; } /// An unstable reference to a scope-stack depth. Invalidated by /// pushes but not pops. @@ -387,9 +384,7 @@ class EHScopeStack { } /// Create a stable reference to the bottom of the EH stack. - static stable_iterator stable_end() { - return stable_iterator(0); - } + static stable_iterator stable_end() { return stable_iterator(0); } /// Translates an iterator into a stable_iterator. stable_iterator stabilize(iterator it) const; diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index d805b81bac07..a3086b333806 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -222,9 +222,9 @@ Attribute ConstPtrAttr::parse(AsmParser &parser, Type odsType) { void ConstPtrAttr::print(AsmPrinter &printer) const { printer << '<'; if (isNullValue()) - printer << "null"; + printer << "null"; else - printer << getValue(); + printer << getValue(); printer << '>'; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index af6b0b85f3f5..cab294012cd7 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -189,8 +189,8 @@ bool omitRegionTerm(mlir::Region &r) { // CIR Custom Parsers/Printers //===----------------------------------------------------------------------===// -static mlir::ParseResult -parseOmittedTerminatorRegion(mlir::OpAsmParser &parser, mlir::Region ®ion) { +static mlir::ParseResult parseOmittedTerminatorRegion(mlir::OpAsmParser &parser, + mlir::Region ®ion) { auto regionLoc = parser.getCurrentLocation(); if (parser.parseRegion(region)) return failure(); @@ -200,8 +200,8 @@ parseOmittedTerminatorRegion(mlir::OpAsmParser &parser, mlir::Region ®ion) { } static void printOmittedTerminatorRegion(mlir::OpAsmPrinter &printer, - mlir::cir::ScopeOp &op, - mlir::Region ®ion) { + mlir::cir::ScopeOp &op, + mlir::Region ®ion) { printer.printRegion(region, /*printEntryBlockArgs=*/false, /*printBlockTerminators=*/!omitRegionTerm(region)); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 63148b74c4ca..611a35eacc2a 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -109,9 +109,8 @@ struct LoweringPreparePass : public LoweringPrepareBase { GlobalOp LoweringPreparePass::buildRuntimeVariable( mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, mlir::Type type, mlir::cir::GlobalLinkageKind linkage) { - GlobalOp g = - dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( - theModule, StringAttr::get(theModule->getContext(), name))); + GlobalOp g = dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( + theModule, StringAttr::get(theModule->getContext(), name))); if (!g) { g = builder.create(loc, name, type); g.setLinkageAttr( @@ -125,9 +124,8 @@ GlobalOp LoweringPreparePass::buildRuntimeVariable( FuncOp LoweringPreparePass::buildRuntimeFunction( mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, mlir::cir::FuncType type, mlir::cir::GlobalLinkageKind linkage) { - FuncOp f = - dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( - theModule, StringAttr::get(theModule->getContext(), name))); + FuncOp f = dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( + theModule, StringAttr::get(theModule->getContext(), name))); if (!f) { f = builder.create(loc, name, type); f.setLinkageAttr( @@ -342,8 +340,8 @@ void LoweringPreparePass::lowerGetBitfieldOp(GetBitfieldOp op) { } val = builder.createIntCast(val, resultTy); - op.replaceAllUsesWith(val); - op.erase(); + op.replaceAllUsesWith(val); + op.erase(); } void LoweringPreparePass::lowerSetBitfieldOp(SetBitfieldOp op) { @@ -369,8 +367,7 @@ void LoweringPreparePass::lowerSetBitfieldOp(SetBitfieldOp op) { if (storageSize != size) { assert(storageSize > size && "Invalid bitfield size."); - mlir::Value val = - builder.create(loc, storageType, addr); + mlir::Value val = builder.create(loc, storageType, addr); srcVal = builder.createAnd(srcVal, llvm::APInt::getLowBitsSet(srcWidth, size)); @@ -380,8 +377,8 @@ void LoweringPreparePass::lowerSetBitfieldOp(SetBitfieldOp op) { srcVal = builder.createShiftLeft(srcVal, offset); // Mask out the original value. - val = builder.createAnd(val, - ~llvm::APInt::getBitsSet(srcWidth, offset, offset + size)); + val = builder.createAnd( + val, ~llvm::APInt::getBitsSet(srcWidth, offset, offset + size)); // Or together the unchanged values and the source value. srcVal = builder.createOr(val, srcVal); diff --git a/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp b/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp index e6beada09786..93e19294feec 100644 --- a/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp +++ b/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp @@ -29,4 +29,4 @@ bool isStdArrayType(mlir::Type t) { } } // namespace cir -} // namespace mlir \ No newline at end of file +} // namespace mlir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 786cca9425a6..09f9170ca570 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -326,33 +326,30 @@ mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { static void lowerNestedYield(mlir::cir::YieldOpKind targetKind, mlir::ConversionPatternRewriter &rewriter, - mlir::Region &body, - mlir::Block *dst) { + mlir::Region &body, mlir::Block *dst) { // top-level yields are lowered in matchAndRewrite of the parent operations auto isNested = [&](mlir::Operation *op) { return op->getParentRegion() != &body; }; - body.walk( - [&](mlir::Operation *op) { - if (!isNested(op)) - return mlir::WalkResult::advance(); + body.walk([&](mlir::Operation *op) { + if (!isNested(op)) + return mlir::WalkResult::advance(); - // don't process breaks/continues in nested loops and switches - if (isa(*op)) - return mlir::WalkResult::skip(); + // don't process breaks/continues in nested loops and switches + if (isa(*op)) + return mlir::WalkResult::skip(); - auto yield = dyn_cast(*op); - if (yield && yield.getKind() == targetKind) { - rewriter.setInsertionPoint(op); - rewriter.replaceOpWithNewOp(op, yield.getArgs(), dst); - } + auto yield = dyn_cast(*op); + if (yield && yield.getKind() == targetKind) { + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp(op, yield.getArgs(), dst); + } - return mlir::WalkResult::advance(); - }); + return mlir::WalkResult::advance(); + }); } - class CIRCopyOpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -456,10 +453,10 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { dyn_cast(stepRegion.back().getTerminator()); auto &stepBlock = (kind == LoopKind::For ? stepFrontBlock : condFrontBlock); - lowerNestedYield(mlir::cir::YieldOpKind::Break, - rewriter, bodyRegion, continueBlock); - lowerNestedYield(mlir::cir::YieldOpKind::Continue, - rewriter, bodyRegion, &stepBlock); + lowerNestedYield(mlir::cir::YieldOpKind::Break, rewriter, bodyRegion, + continueBlock); + lowerNestedYield(mlir::cir::YieldOpKind::Continue, rewriter, bodyRegion, + &stepBlock); // Move loop op region contents to current CFG. rewriter.inlineRegionBefore(condRegion, continueBlock); @@ -758,8 +755,8 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.setInsertionPointToEnd(elseAfterBody); if (auto elseYieldOp = dyn_cast(elseAfterBody->getTerminator())) { - if (!isBreakOrContinue(elseYieldOp)) // lowering of parent loop yields is - // deferred to loop lowering + if (!isBreakOrContinue(elseYieldOp)) // lowering of parent loop yields + // is deferred to loop lowering rewriter.replaceOpWithNewOp( elseYieldOp, elseYieldOp.getArgs(), continueBlock); } else if (!dyn_cast( @@ -873,23 +870,23 @@ class CIRCallLowering : public mlir::OpConversionPattern { mlir::ConversionPatternRewriter &rewriter) const override { llvm::SmallVector llvmResults; auto cirResults = op.getResultTypes(); - auto* converter = getTypeConverter(); + auto *converter = getTypeConverter(); if (converter->convertTypes(cirResults, llvmResults).failed()) return mlir::failure(); - if (auto callee = op.getCalleeAttr()) { // direct call + if (auto callee = op.getCalleeAttr()) { // direct call rewriter.replaceOpWithNewOp( - op, llvmResults, op.getCalleeAttr(), adaptor.getOperands()); + op, llvmResults, op.getCalleeAttr(), adaptor.getOperands()); } else { // indirect call - assert(op.getOperands().size() - && "operands list must no be empty for the indirect call"); - auto typ = op.getOperands().front().getType(); + assert(op.getOperands().size() && + "operands list must no be empty for the indirect call"); + auto typ = op.getOperands().front().getType(); assert(isa(typ) && "expected pointer type"); auto ptyp = dyn_cast(typ); auto ftyp = dyn_cast(ptyp.getPointee()); assert(ftyp && "expected a pointer to a function as the first operand"); - + rewriter.replaceOpWithNewOp( op, dyn_cast(converter->convertType(ftyp)), @@ -1391,7 +1388,7 @@ class CIRSwitchOpLowering fallthroughYieldOp = nullptr; } - for (auto& blk : region.getBlocks()) { + for (auto &blk : region.getBlocks()) { if (blk.getNumSuccessors()) continue; @@ -1412,7 +1409,7 @@ class CIRSwitchOpLowering rewriteYieldOp(rewriter, yieldOp, exitBlock); break; case mlir::cir::YieldOpKind::Continue: // Continue is handled only in - // loop lowering + // loop lowering break; default: return op->emitError("invalid yield kind in case statement"); @@ -1420,8 +1417,8 @@ class CIRSwitchOpLowering } } - lowerNestedYield(mlir::cir::YieldOpKind::Break, - rewriter, region, exitBlock); + lowerNestedYield(mlir::cir::YieldOpKind::Break, rewriter, region, + exitBlock); // Extract region contents before erasing the switch op. rewriter.inlineRegionBefore(region, exitBlock); @@ -2044,7 +2041,8 @@ class CIRVTableAddrPointOpLowering } }; -class CIRStackSaveLowering : public mlir::OpConversionPattern { +class CIRStackSaveLowering + : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -2057,16 +2055,16 @@ class CIRStackSaveLowering : public mlir::OpConversionPattern { +class CIRStackRestoreLowering + : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; mlir::LogicalResult matchAndRewrite(mlir::cir::StackRestoreOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp( - op, - adaptor.getPtr()); + rewriter.replaceOpWithNewOp(op, + adaptor.getPtr()); return mlir::success(); } }; @@ -2074,19 +2072,18 @@ class CIRStackRestoreLowering : public mlir::OpConversionPattern(patterns.getContext()); - patterns.add( + patterns.add< + CIRCmpOpLowering, CIRLoopOpLowering, CIRBrCondOpLowering, + CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, + CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, + CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, + CIRScopeOpLowering, CIRCastOpLowering, CIRIfLowering, CIRGlobalOpLowering, + CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, + CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, + CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, + CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, + CIRFAbsOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, + CIRVectorExtractLowering, CIRStackSaveLowering, CIRStackRestoreLowering>( converter, patterns.getContext()); } From 0917e1270362a931499fff4b809391f0c50590e2 Mon Sep 17 00:00:00 2001 From: Yury Gribov Date: Wed, 10 Jan 2024 04:38:05 +0300 Subject: [PATCH 1326/2301] [CIR][Transforms][Bugfix] Do not use-after-free in MergeCleanups and IdiomRecognizer. (#389) Some tests started failing under `-DLLVM_USE_SANITIZER=Address` due to trivial use-after-free errors. --- .../Dialect/Transforms/IdiomRecognizer.cpp | 31 +++++++++++-------- .../CIR/Dialect/Transforms/MergeCleanups.cpp | 9 +++--- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index c0c31b0052f7..7b1218ad7c27 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -36,8 +36,8 @@ struct IdiomRecognizerPass : public IdiomRecognizerBase { IdiomRecognizerPass() = default; void runOnOperation() override; void recognizeCall(CallOp call); - void raiseStdFind(CallOp call); - void raiseIteratorBeginEnd(CallOp call); + bool raiseStdFind(CallOp call); + bool raiseIteratorBeginEnd(CallOp call); // Handle pass options struct Options { @@ -88,14 +88,14 @@ struct IdiomRecognizerPass : public IdiomRecognizerBase { }; } // namespace -void IdiomRecognizerPass::raiseStdFind(CallOp call) { +bool IdiomRecognizerPass::raiseStdFind(CallOp call) { // FIXME: tablegen all of this function. if (call.getNumOperands() != 3) - return; + return false; auto callExprAttr = call.getAstAttr(); if (!callExprAttr || !callExprAttr.isStdFunctionCall("find")) { - return; + return false; } if (opts.emitRemarkFoundCalls()) @@ -109,6 +109,7 @@ void IdiomRecognizerPass::raiseStdFind(CallOp call) { call.replaceAllUsesWith(findOp); call.erase(); + return true; } static bool isIteratorLikeType(mlir::Type t) { @@ -128,24 +129,24 @@ static bool isIteratorInStdContainter(mlir::Type t) { return isStdArrayType(t); } -void IdiomRecognizerPass::raiseIteratorBeginEnd(CallOp call) { +bool IdiomRecognizerPass::raiseIteratorBeginEnd(CallOp call) { // FIXME: tablegen all of this function. CIRBaseBuilderTy builder(getContext()); if (call.getNumOperands() != 1 || call.getNumResults() != 1) - return; + return false; auto callExprAttr = call.getAstAttr(); if (!callExprAttr) - return; + return false; if (!isIteratorLikeType(call.getResult(0).getType())) - return; + return false; // First argument is the container "this" pointer. auto thisPtr = call.getOperand(0).getType().dyn_cast(); if (!thisPtr || !isIteratorInStdContainter(thisPtr.getPointee())) - return; + return false; builder.setInsertionPointAfter(call.getOperation()); mlir::Operation *iterOp; @@ -162,16 +163,20 @@ void IdiomRecognizerPass::raiseIteratorBeginEnd(CallOp call) { call.getLoc(), call.getResult(0).getType(), call.getCalleeAttr(), call.getOperand(0)); } else { - return; + return false; } call.replaceAllUsesWith(iterOp); call.erase(); + return true; } void IdiomRecognizerPass::recognizeCall(CallOp call) { - raiseIteratorBeginEnd(call); - raiseStdFind(call); + if (raiseIteratorBeginEnd(call)) + return; + + if (raiseStdFind(call)) + return; } void IdiomRecognizerPass::runOnOperation() { diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index 822ce6f4bb2c..473b0e71ca96 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -84,15 +84,16 @@ struct MergeTrivialConditionalBranches : public OpRewritePattern { void rewrite(BrCondOp op, PatternRewriter &rewriter) const final { auto constOp = llvm::cast(op.getCond().getDefiningOp()); bool cond = constOp.getValue().cast().getValue(); + auto *destTrue = op.getDestTrue(), *destFalse = op.getDestFalse(); Block *block = op.getOperation()->getBlock(); rewriter.eraseOp(op); if (cond) { - rewriter.mergeBlocks(op.getDestTrue(), block); - rewriter.eraseBlock(op.getDestFalse()); + rewriter.mergeBlocks(destTrue, block); + rewriter.eraseBlock(destFalse); } else { - rewriter.mergeBlocks(op.getDestFalse(), block); - rewriter.eraseBlock(op.getDestTrue()); + rewriter.mergeBlocks(destFalse, block); + rewriter.eraseBlock(destTrue); } } }; From 235020e0a053e0dfaa598fbac57c6fc60d5ff19a Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Tue, 9 Jan 2024 22:42:35 -0300 Subject: [PATCH 1327/2301] [CIR][IR] Implement loop's conditional operation (#391) Like SCF's `scf.condition`, the `cir.condition` simplifies codegen of loop conditions by removing the need of a contitional branch. It takes a single boolean operand which, if true, executes the body region, otherwise exits the loop. This also simplifies lowering and the dialect it self. A new constraint is now enforced on `cir.loops`: the condition region must terminate with a `cir.condition` operation. A few tests were removed as they became redundant, and others where simplified. The merge-cleanups pass no longer simplifies compile-time constant conditions, as the condition body terminator is no longer allowed to be terminated with a `cir.yield`. To circumvent this, a proper folder should be implemented to fold constant conditions, but this was left as future work. Co-authored-by: Bruno Cardoso Lopes --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 19 + clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 + clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 32 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 47 ++- .../CIR/Dialect/Transforms/MergeCleanups.cpp | 45 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 43 +-- clang/test/CIR/CodeGen/loop.cpp | 58 +-- clang/test/CIR/CodeGen/rangefor.cpp | 6 +- clang/test/CIR/IR/branch.cir | 69 +--- clang/test/CIR/IR/invalid.cir | 9 +- clang/test/CIR/IR/loop.cir | 48 +-- clang/test/CIR/Lowering/dot.cir | 26 +- clang/test/CIR/Lowering/loop.cir | 332 +++++------------- clang/test/CIR/Lowering/loops-with-break.cir | 60 +--- .../test/CIR/Lowering/loops-with-continue.cir | 60 +--- clang/test/CIR/Transforms/merge-cleanups.cir | 42 +-- 16 files changed, 230 insertions(+), 671 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index fed38ccc1930..53682e28c20f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -582,6 +582,25 @@ def TernaryOp : CIR_Op<"ternary", }]; } +//===----------------------------------------------------------------------===// +// ConditionOp +//===----------------------------------------------------------------------===// + +def ConditionOp : CIR_Op<"condition", [ + Terminator, + DeclareOpInterfaceMethods +]> { + let summary = "Loop continuation condition."; + let description = [{ + The `cir.condition` termintes loop's conditional regions. It takes a single + `cir.bool` operand. if the operand is true, the loop continues, otherwise + it terminates. + }]; + let arguments = (ins CIR_BoolType:$condition); + let assemblyFormat = " `(` $condition `)` attr-dict "; +} + //===----------------------------------------------------------------------===// // YieldOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 73e5a08f332a..5aa751eb90a3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -578,6 +578,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(dst.getLoc(), dst, src); } + /// Create a loop condition. + mlir::cir::ConditionOp createCondition(mlir::Value condition) { + return create(condition.getLoc(), condition); + } + mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, mlir::Value src, mlir::Value len) { return create(loc, dst, src, len); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 233a1b216283..cedcdac48621 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -652,26 +652,6 @@ CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, mlir::Type condType, return buildCaseDefaultCascade(&S, condType, caseAttrs, os); } -static mlir::LogicalResult buildLoopCondYield(mlir::OpBuilder &builder, - mlir::Location loc, - mlir::Value cond) { - mlir::Block *trueBB = nullptr, *falseBB = nullptr; - { - mlir::OpBuilder::InsertionGuard guard(builder); - trueBB = builder.createBlock(builder.getBlock()->getParent()); - builder.create(loc, YieldOpKind::Continue); - } - { - mlir::OpBuilder::InsertionGuard guard(builder); - falseBB = builder.createBlock(builder.getBlock()->getParent()); - builder.create(loc); - } - - assert((trueBB && falseBB) && "expected both blocks to exist"); - builder.create(loc, cond, trueBB, falseBB); - return mlir::success(); -} - mlir::LogicalResult CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef ForAttrs) { @@ -705,8 +685,7 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, assert(!UnimplementedFeature::createProfileWeightsForLoop()); assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); mlir::Value condVal = evaluateExprAsBool(S.getCond()); - if (buildLoopCondYield(b, loc, condVal).failed()) - loopRes = mlir::failure(); + builder.createCondition(condVal); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -788,8 +767,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { loc, boolTy, mlir::cir::BoolAttr::get(b.getContext(), boolTy, true)); } - if (buildLoopCondYield(b, loc, condVal).failed()) - loopRes = mlir::failure(); + builder.createCondition(condVal); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -852,8 +830,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { // expression compares unequal to 0. The condition must be a // scalar type. mlir::Value condVal = evaluateExprAsBool(S.getCond()); - if (buildLoopCondYield(b, loc, condVal).failed()) - loopRes = mlir::failure(); + builder.createCondition(condVal); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -912,8 +889,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { // expression compares unequal to 0. The condition must be a // scalar type. condVal = evaluateExprAsBool(S.getCond()); - if (buildLoopCondYield(b, loc, condVal).failed()) - loopRes = mlir::failure(); + builder.createCondition(condVal); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index cab294012cd7..0a6e10812b0c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -225,6 +225,30 @@ void AllocaOp::build(::mlir::OpBuilder &odsBuilder, odsState.addTypes(addr); } +//===----------------------------------------------------------------------===// +// ConditionOp +//===-----------------------------------------------------------------------===// + +//===---------------------------------- +// BranchOpTerminatorInterface Methods + +void ConditionOp::getSuccessorRegions( + ArrayRef operands, SmallVectorImpl ®ions) { + auto loopOp = cast(getOperation()->getParentOp()); + + // TODO(cir): The condition value may be folded to a constant, narrowing + // down its list of possible successors. + // Condition may branch to the body or to the parent op. + regions.emplace_back(&loopOp.getBody(), loopOp.getBody().getArguments()); + regions.emplace_back(loopOp->getResults()); +} + +MutableOperandRange +ConditionOp::getMutableSuccessorOperands(RegionBranchPoint point) { + // No values are yielded to the successor region. + return MutableOperandRange(getOperation(), 0, 0); +} + //===----------------------------------------------------------------------===// // ConstantOp //===----------------------------------------------------------------------===// @@ -1303,26 +1327,11 @@ void LoopOp::getSuccessorRegions(mlir::RegionBranchPoint point, llvm::SmallVector LoopOp::getLoopRegions() { return {&getBody()}; } LogicalResult LoopOp::verify() { - // Cond regions should only terminate with plain 'cir.yield' or - // 'cir.yield continue'. - auto terminateError = [&]() { - return emitOpError() << "cond region must be terminated with " - "'cir.yield' or 'cir.yield continue'"; - }; + if (getCond().empty()) + return emitOpError() << "cond region must not be empty"; - auto &blocks = getCond().getBlocks(); - for (Block &block : blocks) { - if (block.empty()) - continue; - auto &op = block.back(); - if (isa(op)) - continue; - if (!isa(op)) - terminateError(); - auto y = cast(op); - if (!(y.isPlain() || y.isContinue())) - terminateError(); - } + if (!llvm::isa(getCond().back().getTerminator())) + return emitOpError() << "cond region terminate with 'cir.condition'"; return success(); } diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index 473b0e71ca96..e4848a21d0bd 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -54,50 +54,6 @@ struct RemoveRedudantBranches : public OpRewritePattern { } }; -/// Merges basic blocks of trivial conditional branches. This is useful when a -/// the condition of conditional branch is a constant and the destinations of -/// the conditional branch both have only one predecessor. -/// -/// From: -/// ^bb0: -/// %0 = cir.const(#true) : !cir.bool -/// cir.brcond %0 ^bb1, ^bb2 -/// ^bb1: // pred: ^bb0 -/// cir.yield continue -/// ^bb2: // pred: ^bb0 -/// cir.yield -/// -/// To: -/// ^bb0: -/// cir.yield continue -/// -struct MergeTrivialConditionalBranches : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - LogicalResult match(BrCondOp op) const final { - return success(isa(op.getCond().getDefiningOp()) && - op.getDestFalse()->hasOneUse() && - op.getDestTrue()->hasOneUse()); - } - - /// Replace conditional branch with unconditional branch. - void rewrite(BrCondOp op, PatternRewriter &rewriter) const final { - auto constOp = llvm::cast(op.getCond().getDefiningOp()); - bool cond = constOp.getValue().cast().getValue(); - auto *destTrue = op.getDestTrue(), *destFalse = op.getDestFalse(); - Block *block = op.getOperation()->getBlock(); - - rewriter.eraseOp(op); - if (cond) { - rewriter.mergeBlocks(destTrue, block); - rewriter.eraseBlock(destFalse); - } else { - rewriter.mergeBlocks(destFalse, block); - rewriter.eraseBlock(destTrue); - } - } -}; - struct RemoveEmptyScope : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; @@ -146,7 +102,6 @@ void populateMergeCleanupPatterns(RewritePatternSet &patterns) { // clang-format off patterns.add< RemoveRedudantBranches, - MergeTrivialConditionalBranches, RemoveEmptyScope, RemoveEmptySwitch >(patterns.getContext()); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 09f9170ca570..fb358bfad4b5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -403,25 +403,14 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { using mlir::OpConversionPattern::OpConversionPattern; using LoopKind = mlir::cir::LoopOpKind; - mlir::LogicalResult - fetchCondRegionYields(mlir::Region &condRegion, - mlir::cir::YieldOp &yieldToBody, - mlir::cir::YieldOp &yieldToCont) const { - for (auto &bb : condRegion) { - if (auto yieldOp = dyn_cast(bb.getTerminator())) { - if (!yieldOp.getKind().has_value()) - yieldToCont = yieldOp; - else if (yieldOp.getKind() == mlir::cir::YieldOpKind::Continue) - yieldToBody = yieldOp; - else - return mlir::failure(); - } - } - - // Succeed only if both yields are found. - if (!yieldToBody) - return mlir::failure(); - return mlir::success(); + inline void + lowerConditionOp(mlir::cir::ConditionOp op, mlir::Block *body, + mlir::Block *exit, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp(op, op.getCondition(), + body, exit); } mlir::LogicalResult @@ -435,9 +424,6 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { // Fetch required info from the condition region. auto &condRegion = loopOp.getCond(); auto &condFrontBlock = condRegion.front(); - mlir::cir::YieldOp yieldToBody, yieldToCont; - if (fetchCondRegionYields(condRegion, yieldToBody, yieldToCont).failed()) - return loopOp.emitError("failed to fetch yields in cond region"); // Fetch required info from the body region. auto &bodyRegion = loopOp.getBody(); @@ -469,15 +455,10 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { auto &entry = (kind != LoopKind::DoWhile ? condFrontBlock : bodyFrontBlock); rewriter.create(loopOp.getLoc(), &entry); - // Set loop exit point to continue block. - if (yieldToCont) { - rewriter.setInsertionPoint(yieldToCont); - rewriter.replaceOpWithNewOp(yieldToCont, continueBlock); - } - - // Branch from condition to body. - rewriter.setInsertionPoint(yieldToBody); - rewriter.replaceOpWithNewOp(yieldToBody, &bodyFrontBlock); + // Branch from condition region to body or exit. + auto conditionOp = + cast(condFrontBlock.getTerminator()); + lowerConditionOp(conditionOp, &bodyFrontBlock, continueBlock, rewriter); // Branch from body to condition or to step on for-loop cases. rewriter.setInsertionPoint(bodyYield); diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 90831e31e898..3360692929f4 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -8,12 +8,8 @@ void l0() { // CHECK: cir.func @_Z2l0v // CHECK: cir.loop for(cond : { -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } +// CHECK: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK: cir.condition(%[[#TRUE]]) void l1() { int x = 0; @@ -27,11 +23,7 @@ void l1() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool -// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.condition(%6) // CHECK-NEXT: }, step : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i @@ -63,11 +55,7 @@ void l2(bool cond) { // CHECK: cir.scope { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool -// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.condition(%3) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -80,7 +68,8 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -95,11 +84,7 @@ void l2(bool cond) { // CHECK-NEXT: cir.loop while(cond : { // CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool -// CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.condition(%4) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -127,12 +112,8 @@ void l3(bool cond) { // CHECK: cir.func @_Z2l3b // CHECK: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { -// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool -// CHECK-NEXT: cir.brcond %3 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: %[[#TRUE:]] = cir.load %0 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -145,7 +126,8 @@ void l3(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop dowhile(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -160,11 +142,7 @@ void l3(bool cond) { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool -// CHECK-NEXT: cir.brcond %4 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.condition(%4) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -188,7 +166,8 @@ void l4() { // CHECK: cir.func @_Z2l4v // CHECK: cir.loop while(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -215,11 +194,7 @@ void l5() { // CHECK-NEXT: cir.loop dowhile(cond : { // CHECK-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool -// CHECK-NEXT: cir.brcond %1 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.condition(%1) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -238,7 +213,8 @@ void l6() { // CHECK: cir.func @_Z2l6v() // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 78d221dfa41f..977eae88e22f 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -46,11 +46,7 @@ void init(unsigned numImages) { // CHECK: cir.store %11, %6 : ![[VEC_IT]], cir.ptr // CHECK: cir.loop for(cond : { // CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool -// CHECK: cir.brcond %12 ^bb1, ^bb2 -// CHECK: ^bb1: // pred: ^bb0 -// CHECK: cir.yield continue -// CHECK: ^bb2: // pred: ^bb0 -// CHECK: cir.yield +// CHECK: cir.condition(%12) // CHECK: }, step : { // CHECK: %12 = cir.call @_ZN17__vector_iteratorI6triplePS0_RS0_EppEv(%5) : (!cir.ptr) -> !cir.ptr // CHECK: cir.yield diff --git a/clang/test/CIR/IR/branch.cir b/clang/test/CIR/IR/branch.cir index 6f75d9e25bd3..7f418908a94c 100644 --- a/clang/test/CIR/IR/branch.cir +++ b/clang/test/CIR/IR/branch.cir @@ -1,60 +1,21 @@ // RUN: cir-opt %s | FileCheck %s -#false = #cir.bool : !cir.bool -#true = #cir.bool : !cir.bool - -cir.func @b0() { - cir.scope { - cir.loop while(cond : { - %0 = cir.const(#true) : !cir.bool - cir.brcond %0 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield - }, step : { - cir.yield - }) { - cir.br ^bb1 - ^bb1: - cir.return - } - } +cir.func @test_branch_parsing(%arg0: !cir.bool) { + // CHECK: cir.br ^bb1 + cir.br ^bb1 +^bb1: + // CHECK: cir.br ^bb2(%arg0 : !cir.bool) + cir.br ^bb2(%arg0 : !cir.bool) +// CHECK: ^bb2(%0: !cir.bool): +^bb2(%x: !cir.bool): cir.return } -// CHECK: cir.func @b0 -// CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: cir.br ^bb1 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.return -// CHECK-NEXT: } -// CHECK-NEXT: } -// CHECK-NEXT: cir.return -// CHECK-NEXT: } - - -!s32i = !cir.int -cir.func @test_br() -> !s32i { - %0 = cir.const(#cir.int<0>: !s32i) : !s32i - cir.br ^bb1(%0 : !s32i) - ^bb1(%x: !s32i): - cir.return %x : !s32i +cir.func @test_conditional_branch_parsing(%arg0 : !cir.bool) { + // CHEK: cir.brcond %arg0 ^bb1, ^bb2 + cir.brcond %arg0 ^bb1, ^bb2 +^bb1: + cir.return +^bb2: + cir.return } - -// CHECK: cir.func @test_br() -> !s32i { -// CHECK-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK-NEXT: cir.br ^bb1(%0 : !s32i) -// CHECK-NEXT: ^bb1(%1: !s32i): // pred: ^bb0 -// CHECK-NEXT: cir.return %1 : !s32i -// CHECK-NEXT: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 278909d59850..d353c7d7b878 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -318,13 +318,8 @@ cir.func @cast24(%p : !u32i) { #true = #cir.bool : !cir.bool cir.func @b0() { cir.scope { - cir.loop while(cond : { // expected-error {{cond region must be terminated with 'cir.yield' or 'cir.yield continue'}} - %0 = cir.const(#true) : !cir.bool - cir.brcond %0 ^bb1, ^bb2 - ^bb1: - cir.yield break - ^bb2: - cir.yield + cir.loop while(cond : { // expected-error {{cond region terminate with 'cir.condition'}} + cir.yield }, step : { cir.yield }) { diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index ac9658a304d3..798aaaeb5ae9 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -15,11 +15,7 @@ cir.func @l0() { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<10> : !u32i) : !u32i %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.brcond %6 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield + cir.condition(%6) }, step : { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<1> : !u32i) : !u32i @@ -46,11 +42,7 @@ cir.func @l0() { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<10> : !u32i) : !u32i %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.brcond %6 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield + cir.condition(%6) }, step : { cir.yield }) { @@ -74,11 +66,7 @@ cir.func @l0() { %4 = cir.load %2 : cir.ptr , !u32i %5 = cir.const(#cir.int<10> : !u32i) : !u32i %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.brcond %6 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield + cir.condition(%6) }, step : { cir.yield }) { @@ -97,11 +85,7 @@ cir.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.condition(%6) // CHECK-NEXT: }, step : { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u32i) : !u32i @@ -124,11 +108,7 @@ cir.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.condition(%6) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -147,11 +127,7 @@ cir.func @l0() { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.brcond %6 ^bb1, ^bb2 -// CHECK-NEXT: ^bb1: -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: ^bb2: -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.condition(%6) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -162,10 +138,10 @@ cir.func @l0() { // CHECK-NEXT: cir.yield // CHECK-NEXT: } -cir.func @l1() { +cir.func @l1(%arg0 : !cir.bool) { cir.scope { cir.loop while(cond : { - cir.yield continue + cir.condition(%arg0) }, step : { cir.yield }) { @@ -178,7 +154,7 @@ cir.func @l1() { // CHECK: cir.func @l1 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: cir.condition(%arg0) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { @@ -188,10 +164,10 @@ cir.func @l1() { // CHECK-NEXT: cir.return // CHECK-NEXT: } -cir.func @l2() { +cir.func @l2(%arg0 : !cir.bool) { cir.scope { cir.loop while(cond : { - cir.yield + cir.condition(%arg0) }, step : { cir.yield }) { @@ -204,7 +180,7 @@ cir.func @l2() { // CHECK: cir.func @l2 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.condition(%arg0) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index e889dcd05827..4f588e1f05f9 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: cir-opt %s -cir-to-llvm --reconcile-unrealized-casts -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR !s32i = !cir.int @@ -23,11 +23,7 @@ module { %11 = cir.load %2 : cir.ptr , !s32i %12 = cir.cmp(lt, %10, %11) : !s32i, !s32i %13 = cir.cast(int_to_bool, %12 : !s32i), !cir.bool - cir.brcond %13 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%13) }, step : { %10 = cir.load %8 : cir.ptr , !s32i %11 = cir.unary(inc, %10) : !s32i, !s32i @@ -80,7 +76,7 @@ module { // MLIR-NEXT: %13 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: llvm.store %13, %12 : i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 -// MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb6 +// MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb4 // MLIR-NEXT: %14 = llvm.load %12 : !llvm.ptr -> i32 // MLIR-NEXT: %15 = llvm.load %5 : !llvm.ptr -> i32 // MLIR-NEXT: %16 = llvm.icmp "slt" %14, %15 : i32 @@ -89,12 +85,8 @@ module { // MLIR-NEXT: %19 = llvm.icmp "ne" %17, %18 : i32 // MLIR-NEXT: %20 = llvm.zext %19 : i1 to i8 // MLIR-NEXT: %21 = llvm.trunc %20 : i8 to i1 -// MLIR-NEXT: llvm.cond_br %21, ^bb3, ^bb4 +// MLIR-NEXT: llvm.cond_br %21, ^bb3, ^bb5 // MLIR-NEXT: ^bb3: // pred: ^bb2 -// MLIR-NEXT: llvm.br ^bb5 -// MLIR-NEXT: ^bb4: // pred: ^bb2 -// MLIR-NEXT: llvm.br ^bb7 -// MLIR-NEXT: ^bb5: // pred: ^bb3 // MLIR-NEXT: %22 = llvm.load %1 : !llvm.ptr -> !llvm.ptr // MLIR-NEXT: %23 = llvm.load %12 : !llvm.ptr -> i32 // MLIR-NEXT: %24 = llvm.getelementptr %22[%23] : (!llvm.ptr, i32) -> !llvm.ptr, f64 @@ -107,16 +99,16 @@ module { // MLIR-NEXT: %31 = llvm.load %9 : !llvm.ptr -> f64 // MLIR-NEXT: %32 = llvm.fadd %31, %30 : f64 // MLIR-NEXT: llvm.store %32, %9 : f64, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb6 -// MLIR-NEXT: ^bb6: // pred: ^bb5 +// MLIR-NEXT: llvm.br ^bb4 +// MLIR-NEXT: ^bb4: // pred: ^bb3 // MLIR-NEXT: %33 = llvm.load %12 : !llvm.ptr -> i32 // MLIR-NEXT: %34 = llvm.mlir.constant(1 : i32) : i32 // MLIR-NEXT: %35 = llvm.add %33, %34 : i32 // MLIR-NEXT: llvm.store %35, %12 : i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 -// MLIR-NEXT: ^bb7: // pred: ^bb4 -// MLIR-NEXT: llvm.br ^bb8 -// MLIR-NEXT: ^bb8: // pred: ^bb7 +// MLIR-NEXT: ^bb5: // pred: ^bb2 +// MLIR-NEXT: llvm.br ^bb6 +// MLIR-NEXT: ^bb6: // pred: ^bb5 // MLIR-NEXT: %36 = llvm.load %9 : !llvm.ptr -> f64 // MLIR-NEXT: llvm.store %36, %7 : f64, !llvm.ptr // MLIR-NEXT: %37 = llvm.load %7 : !llvm.ptr -> f64 diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index 685792a5b342..bbe42d179273 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -1,26 +1,15 @@ // RUN: cir-opt %s -cir-to-llvm -o %t.mlir -// RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR - +// RUN: FileCheck --input-file=%t.mlir %s +#true = #cir.bool : !cir.bool !s32i = !cir.int + + module { - cir.func @testFor() { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr + + cir.func @testFor(%arg0 : !cir.bool) { cir.loop for(cond : { - %2 = cir.load %0 : cir.ptr , !s32i - %3 = cir.const(#cir.int<10> : !s32i) : !s32i - %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.brcond %5 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%arg0) }, step : { - %2 = cir.load %0 : cir.ptr , !s32i - %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr cir.yield }) { cir.yield @@ -28,271 +17,116 @@ module { cir.return } -// MLIR: module { -// MLIR-NEXT: llvm.func @testFor() -// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 -// MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 -// MLIR-NEXT: llvm.store %2, %1 : i32, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb1 -// ============= Condition block ============= -// MLIR-NEXT: ^bb1: // 2 preds: ^bb0, ^bb5 -// MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr -> i32 -// MLIR-NEXT: %4 = llvm.mlir.constant(10 : i32) : i32 -// MLIR-NEXT: %5 = llvm.icmp "slt" %3, %4 : i32 -// MLIR-NEXT: %6 = llvm.zext %5 : i1 to i32 -// MLIR-NEXT: %7 = llvm.mlir.constant(0 : i32) : i32 -// MLIR-NEXT: %8 = llvm.icmp "ne" %6, %7 : i32 -// MLIR-NEXT: %9 = llvm.zext %8 : i1 to i8 -// MLIR-NEXT: %10 = llvm.trunc %9 : i8 to i1 -// MLIR-NEXT: llvm.cond_br %10, ^bb2, ^bb3 -// MLIR-NEXT: ^bb2: // pred: ^bb1 -// MLIR-NEXT: llvm.br ^bb4 -// MLIR-NEXT: ^bb3: // pred: ^bb1 -// MLIR-NEXT: llvm.br ^bb6 -// ============= Body block ============= -// MLIR-NEXT: ^bb4: // pred: ^bb2 -// MLIR-NEXT: llvm.br ^bb5 -// ============= Step block ============= -// MLIR-NEXT: ^bb5: // pred: ^bb4 -// MLIR-NEXT: %11 = llvm.load %1 : !llvm.ptr -> i32 -// MLIR-NEXT: %12 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: %13 = llvm.add %11, %12 : i32 -// MLIR-NEXT: llvm.store %13, %1 : i32, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb1 -// ============= Exit block ============= -// MLIR-NEXT: ^bb6: // pred: ^bb3 -// MLIR-NEXT: llvm.return -// MLIR-NEXT: } +// CHECK: @testFor +// CHECK: llvm.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: llvm.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: + + // Test while cir.loop operation lowering. - cir.func @testWhile(%arg0: !s32i) { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr - cir.scope { - cir.loop while(cond : { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.const(#cir.int<10> : !s32i) : !s32i - %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i - %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool - cir.brcond %4 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield - }, step : { - cir.yield - }) { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.unary(inc, %1) : !s32i, !s32i - cir.store %2, %0 : !s32i, cir.ptr - cir.yield - } + cir.func @testWhile(%arg0 : !cir.bool) { + cir.loop while(cond : { + cir.condition(%arg0) + }, step : { // Droped when lowering while statements. + cir.yield + }) { + cir.yield } cir.return } - // MLIR: llvm.func @testWhile(%arg0: i32) - // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 - // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr - // MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr - // MLIR-NEXT: llvm.br ^bb1 - // MLIR-NEXT: ^bb1: - // MLIR-NEXT: llvm.br ^bb2 - // ============= Condition block ============= - // MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb5 - // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr -> i32 - // MLIR-NEXT: %3 = llvm.mlir.constant(10 : i32) : i32 - // MLIR-NEXT: %4 = llvm.icmp "slt" %2, %3 : i32 - // MLIR-NEXT: %5 = llvm.zext %4 : i1 to i32 - // MLIR-NEXT: %6 = llvm.mlir.constant(0 : i32) : i32 - // MLIR-NEXT: %7 = llvm.icmp "ne" %5, %6 : i32 - // MLIR-NEXT: %8 = llvm.zext %7 : i1 to i8 - // MLIR-NEXT: %9 = llvm.trunc %8 : i8 to i1 - // MLIR-NEXT: llvm.cond_br %9, ^bb3, ^bb4 - // MLIR-NEXT: ^bb3: // pred: ^bb2 - // MLIR-NEXT: llvm.br ^bb5 - // MLIR-NEXT: ^bb4: // pred: ^bb2 - // MLIR-NEXT: llvm.br ^bb6 - // ============= Body block ============= - // MLIR-NEXT: ^bb5: // pred: ^bb3 - // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr -> i32 - // MLIR-NEXT: %11 = llvm.mlir.constant(1 : i32) : i32 - // MLIR-NEXT: %12 = llvm.add %10, %11 : i32 - // MLIR-NEXT: llvm.store %12, %1 : i32, !llvm.ptr - // MLIR-NEXT: llvm.br ^bb2 - // ============= Exit block ============= - // MLIR-NEXT: ^bb6: // pred: ^bb4 - // MLIR-NEXT: llvm.br ^bb7 +// CHECK: @testWhile +// CHECK: llvm.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: + + // Test do-while cir.loop operation lowering. - cir.func @testDoWhile(%arg0: !s32i) { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr - cir.scope { - cir.loop dowhile(cond : { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.const(#cir.int<10> : !s32i) : !s32i - %3 = cir.cmp(lt, %1, %2) : !s32i, !s32i - %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool - cir.brcond %4 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield - }, step : { - cir.yield - }) { - %1 = cir.load %0 : cir.ptr , !s32i - %2 = cir.unary(inc, %1) : !s32i, !s32i - cir.store %2, %0 : !s32i, cir.ptr - cir.yield - } + cir.func @testDoWhile(%arg0 : !cir.bool) { + cir.loop dowhile(cond : { + cir.condition(%arg0) + }, step : { // Droped when lowering while statements. + cir.yield + }) { + cir.yield } cir.return } - // MLIR: llvm.func @testDoWhile(%arg0: i32) - // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 - // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr - // MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr - // MLIR-NEXT: llvm.br ^bb1 - // MLIR-NEXT: ^bb1: - // MLIR-NEXT: llvm.br ^bb5 - // ============= Condition block ============= - // MLIR-NEXT: ^bb2: - // MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr -> i32 - // MLIR-NEXT: %3 = llvm.mlir.constant(10 : i32) : i32 - // MLIR-NEXT: %4 = llvm.icmp "slt" %2, %3 : i32 - // MLIR-NEXT: %5 = llvm.zext %4 : i1 to i32 - // MLIR-NEXT: %6 = llvm.mlir.constant(0 : i32) : i32 - // MLIR-NEXT: %7 = llvm.icmp "ne" %5, %6 : i32 - // MLIR-NEXT: %8 = llvm.zext %7 : i1 to i8 - // MLIR-NEXT: %9 = llvm.trunc %8 : i8 to i1 - // MLIR-NEXT: llvm.cond_br %9, ^bb3, ^bb4 - // MLIR-NEXT: ^bb3: - // MLIR-NEXT: llvm.br ^bb5 - // MLIR-NEXT: ^bb4: - // MLIR-NEXT: llvm.br ^bb6 - // ============= Body block ============= - // MLIR-NEXT: ^bb5: - // MLIR-NEXT: %10 = llvm.load %1 : !llvm.ptr -> i32 - // MLIR-NEXT: %11 = llvm.mlir.constant(1 : i32) : i32 - // MLIR-NEXT: %12 = llvm.add %10, %11 : i32 - // MLIR-NEXT: llvm.store %12, %1 : i32, !llvm.ptr - // MLIR-NEXT: llvm.br ^bb2 - // ============= Exit block ============= - // MLIR-NEXT: ^bb6: - // MLIR-NEXT: llvm.br ^bb7 +// CHECK: @testDoWhile +// CHECK: llvm.br ^bb[[#BODY:]] +// CHECK: ^bb[[#COND:]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: - // Test endless cir.loop lowering. - cir.func @testEndless() { - cir.scope { - cir.loop for(cond : { - cir.yield continue - }, step : { - cir.yield - }) { - cir.yield - } - } - cir.return - } - // MLIR: llvm.func @testEndless() - // MLIR-NEXT: llvm.br ^bb1 - // MLIR-NEXT: ^bb1: - // MLIR-NEXT: llvm.br ^bb2 - // ============= Condition block ============= - // MLIR-NEXT: ^bb2: - // MLIR-NEXT: llvm.br ^bb3 - // ============= Body block ============= - // MLIR-NEXT: ^bb3: - // MLIR-NEXT: llvm.br ^bb4 - // ============= Step block ============= - // MLIR-NEXT: ^bb4: - // MLIR-NEXT: llvm.br ^bb2 - // ============= Exit block ============= - // MLIR-NEXT: ^bb5: - // MLIR-NEXT: llvm.br ^bb6 - // MLIR-NEXT: ^bb6: - // MLIR-NEXT: llvm.return // test corner case // while (1) { // break; // } - cir.func @whileCornerCase() { - cir.scope { - cir.loop while(cond : { - %0 = cir.const(#cir.int<1> : !s32i) : !s32i - %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool - cir.brcond %1 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield - }, step : { - cir.yield - }) { - cir.yield break - } + cir.func @testWhileWithBreakTerminatedBody(%arg0 : !cir.bool) { + cir.loop while(cond : { + cir.condition(%arg0) + }, step : { // Droped when lowering while statements. + cir.yield + }) { + cir.yield break } cir.return } - // MLIR: llvm.func @whileCornerCase() - // MLIR: %0 = llvm.mlir.constant(1 : i32) : i32 - // MLIR-NEXT: %1 = llvm.mlir.constant(0 : i32) : i32 - // MLIR-NEXT: %2 = llvm.icmp "ne" %0, %1 : i32 - // MLIR-NEXT: %3 = llvm.zext %2 : i1 to i8 - // MLIR-NEXT: %4 = llvm.trunc %3 : i8 to i - // MLIR-NEXT: llvm.cond_br %4, ^bb3, ^bb4 - // MLIR-NEXT: ^bb3: // pred: ^bb2 - // MLIR-NEXT: llvm.br ^bb5 - // MLIR-NEXT: ^bb4: // pred: ^bb2 - // MLIR-NEXT: llvm.br ^bb6 - // MLIR-NEXT: ^bb5: // pred: ^bb3 - // MLIR-NEXT: llvm.br ^bb6 - // MLIR-NEXT: ^bb6: // 2 preds: ^bb4, ^bb5 - // MLIR-NEXT: llvm.br ^bb7 - // MLIR-NEXT: ^bb7: // pred: ^bb6 - // MLIR-NEXT: llvm.return - // test corner case - no fails during the lowering +// CHECK: @testWhileWithBreakTerminatedBody +// CHECK: llvm.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: + + + + // test C only corner case - no fails during the lowering // for (;;) { // break; // } - cir.func @forCornerCase() { - cir.scope { + cir.func @forWithBreakTerminatedScopeInBody(%arg0 : !cir.bool) { cir.loop for(cond : { - cir.yield continue + cir.condition(%arg0) }, step : { cir.yield }) { - cir.scope { + cir.scope { // FIXME(cir): Redundant scope emitted during C codegen. cir.yield break } cir.yield } - } cir.return } -// MLIR: llvm.func @forCornerCase() -// MLIR: llvm.br ^bb1 -// MLIR-NEXT: ^bb1: // pred: ^bb0 -// MLIR-NEXT: llvm.br ^bb2 -// MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb6 -// MLIR-NEXT: llvm.br ^bb3 -// MLIR-NEXT: ^bb3: // pred: ^bb2 -// MLIR-NEXT: llvm.br ^bb4 -// MLIR-NEXT: ^bb4: // pred: ^bb3 -// MLIR-NEXT: llvm.br ^bb7 -// MLIR-NEXT: ^bb5: // no predecessors -// MLIR-NEXT: llvm.br ^bb6 -// MLIR-NEXT: ^bb6: // pred: ^bb5 -// MLIR-NEXT: llvm.br ^bb2 -// MLIR-NEXT: ^bb7: // pred: ^bb4 -// MLIR-NEXT: llvm.br ^bb8 -// MLIR-NEXT: ^bb8: // pred: ^bb7 -// MLIR-NEXT: llvm.return + +// CHECK: @forWithBreakTerminatedScopeInBody +// CHECK: llvm.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND:]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#SCOPE_IN:]] +// CHECK: ^bb[[#SCOPE_IN]]: +// CHECK: llvm.br ^bb[[#EXIT]] +// CHECK: ^bb[[#SCOPE_EXIT:]]: +// CHECK: llvm.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: llvm.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: } diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir index f22865ebcc78..5bccde54df27 100644 --- a/clang/test/CIR/Lowering/loops-with-break.cir +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -13,11 +13,7 @@ module { %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.brcond %5 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%5) }, step : { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i @@ -46,11 +42,7 @@ module { // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK0:]], ^bb[[#preEXIT0:]] - // CHECK: ^bb[[#preBREAK0]]: - // CHECK: llvm.br ^bb[[#preBREAK1:]] - // CHECK: ^bb[[#preEXIT0]]: - // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK1:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#preBREAK1]]: // CHECK: llvm.br ^bb[[#preBREAK2:]] // CHECK: ^bb[[#preBREAK2]]: @@ -83,11 +75,7 @@ module { %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.brcond %5 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%5) }, step : { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i @@ -104,11 +92,7 @@ module { %5 = cir.const(#cir.int<10> : !s32i) : !s32i %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.brcond %7 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%7) }, step : { %4 = cir.load %2 : cir.ptr , !s32i %5 = cir.unary(inc, %4) : !s32i, !s32i @@ -141,11 +125,7 @@ module { // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED0:]], ^bb[[#preEXIT0:]] - // CHECK: ^bb[[#preNESTED0]]: - // CHECK: llvm.br ^bb[[#preNESTED1:]] - // CHECK: ^bb[[#preEXIT0]]: - // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED1:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#preNESTED1]]: // CHECK: llvm.br ^bb[[#preNESTED2:]] // CHECK: ^bb[[#preNESTED2]]: @@ -155,11 +135,7 @@ module { // CHECK: llvm.br ^bb[[#COND_NESTED:]] // CHECK: ^bb[[#COND_NESTED]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK0:]], ^bb[[#preEXIT1:]] - // CHECK: ^bb[[#preBREAK0]]: - // CHECK: llvm.br ^bb[[#preBREAK1:]] - // CHECK: ^bb[[#preEXIT1]]: - // CHECK: llvm.br ^bb[[#EXIT_NESTED:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK1:]], ^bb[[#EXIT_NESTED:]] // CHECK: ^bb[[#preBREAK1]]: // CHECK: llvm.br ^bb[[#preBREAK2:]] // CHECK: ^bb[[#preBREAK2]]: @@ -200,11 +176,7 @@ module { %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.brcond %5 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%5) }, step : { cir.yield }) { @@ -232,11 +204,7 @@ module { // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBODY:]], ^bb[[#preEXIT0:]] - // CHECK: ^bb[[#preBODY]]: - // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#preEXIT0]]: - // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // [...] // CHECK: llvm.br ^bb[[#BREAK:]] @@ -265,11 +233,7 @@ cir.func @testDoWhile() { %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.brcond %5 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%5) }, step : { cir.yield }) { @@ -296,11 +260,7 @@ cir.func @testDoWhile() { // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBODY:]], ^bb[[#preEXIT0:]] - // CHECK: ^bb[[#preBODY]]: - // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#preEXIT0]]: - // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // [...] // CHECK: llvm.br ^bb[[#BREAK:]] diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir index c0f2c2658c2c..5dac140f7e24 100644 --- a/clang/test/CIR/Lowering/loops-with-continue.cir +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -13,11 +13,7 @@ module { %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.brcond %5 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%5) }, step : { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i @@ -46,11 +42,7 @@ module { // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE0:]], ^bb[[#preEXIT0:]] - // CHECK: ^bb[[#preCONTINUE0]]: - // CHECK: llvm.br ^bb[[#preCONTINUE1:]] - // CHECK: ^bb[[#preEXIT0]]: - // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE1:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#preCONTINUE1]]: // CHECK: llvm.br ^bb[[#preCONTINUE2:]] // CHECK: ^bb[[#preCONTINUE2]]: @@ -84,11 +76,7 @@ module { %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.brcond %5 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%5) }, step : { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i @@ -105,11 +93,7 @@ module { %5 = cir.const(#cir.int<10> : !s32i) : !s32i %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.brcond %7 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%7) }, step : { %4 = cir.load %2 : cir.ptr , !s32i %5 = cir.unary(inc, %4) : !s32i, !s32i @@ -142,11 +126,7 @@ module { // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED0:]], ^bb[[#preEXIT0:]] - // CHECK: ^bb[[#preNESTED0]]: - // CHECK: llvm.br ^bb[[#preNESTED1:]] - // CHECK: ^bb[[#preEXIT0]]: - // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED1:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#preNESTED1]]: // CHECK: llvm.br ^bb[[#preNESTED2:]] // CHECK: ^bb[[#preNESTED2]]: @@ -156,11 +136,7 @@ module { // CHECK: llvm.br ^bb[[#COND_NESTED:]] // CHECK: ^bb[[#COND_NESTED]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE0:]], ^bb[[#preEXIT1:]] - // CHECK: ^bb[[#preCONTINUE0]]: - // CHECK: llvm.br ^bb[[#preCONTINUE1:]] - // CHECK: ^bb[[#preEXIT1]]: - // CHECK: llvm.br ^bb[[#EXIT_NESTED:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE1:]], ^bb[[#EXIT_NESTED:]] // CHECK: ^bb[[#preCONTINUE1]]: // CHECK: llvm.br ^bb[[#preCONTINUE2:]] // CHECK: ^bb[[#preCONTINUE2]]: @@ -200,11 +176,7 @@ cir.func @testWhile() { %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.brcond %5 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%5) }, step : { cir.yield }) { @@ -231,11 +203,7 @@ cir.func @testWhile() { // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBODY:]], ^bb[[#preEXIT0:]] - // CHECK: ^bb[[#preBODY]]: - // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#preEXIT0]]: - // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // [...] // CHECK: llvm.br ^bb[[#CONTINUE:]] @@ -262,11 +230,7 @@ cir.func @testWhile() { %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.brcond %5 ^bb1, ^bb2 - ^bb1: // pred: ^bb0 - cir.yield continue - ^bb2: // pred: ^bb0 - cir.yield + cir.condition(%5) }, step : { cir.yield }) { @@ -294,11 +258,7 @@ cir.func @testWhile() { // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBODY:]], ^bb[[#preEXIT0:]] - // CHECK: ^bb[[#preBODY]]: - // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#preEXIT0]]: - // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // [...] // CHECK: llvm.br ^bb[[#CONTINUE:]] diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 3b0b21e935fe..8d84201aee35 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -65,31 +65,7 @@ module { cir.scope { cir.loop while(cond : { %0 = cir.const(#true) : !cir.bool - cir.brcond %0 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield - }, step : { - cir.yield - }) { - cir.br ^bb1 - ^bb1: - cir.return - } - } - cir.return - } - - cir.func @l1() { - cir.scope { - cir.loop while(cond : { - %0 = cir.const(#false) : !cir.bool - cir.brcond %0 ^bb1, ^bb2 - ^bb1: - cir.yield continue - ^bb2: - cir.yield + cir.condition(%0) }, step : { cir.yield }) { @@ -141,20 +117,8 @@ module { // CHECK: cir.func @l0 // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield continue -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: cir.return -// CHECK-NEXT: } -// CHECK-NEXT: } -// CHECK-NEXT: cir.return -// CHECK-NEXT: } - -// CHECK: cir.func @l1 -// CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.yield +// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool +// CHECK-NEXT: cir.condition(%0) // CHECK-NEXT: }, step : { // CHECK-NEXT: cir.yield // CHECK-NEXT: }) { From 7c7d339d2feed08870b074d3a980bd94e11afe92 Mon Sep 17 00:00:00 2001 From: Nikolas Klauser Date: Wed, 10 Jan 2024 16:30:59 +0100 Subject: [PATCH 1328/2301] [CIR][NFC] Canonicalize the names of the lowering classes --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 0853eeb87782..e0a06c5bf401 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -74,7 +74,7 @@ struct ConvertCIRToMLIRPass virtual StringRef getArgument() const override { return "cir-to-mlir"; } }; -class CIRCallLowering : public mlir::OpConversionPattern { +class CIRCallOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -91,7 +91,7 @@ class CIRCallLowering : public mlir::OpConversionPattern { } }; -class CIRAllocaLowering +class CIRAllocaOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -109,7 +109,7 @@ class CIRAllocaLowering } }; -class CIRLoadLowering : public mlir::OpConversionPattern { +class CIRLoadOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -121,7 +121,8 @@ class CIRLoadLowering : public mlir::OpConversionPattern { } }; -class CIRStoreLowering : public mlir::OpConversionPattern { +class CIRStoreOpLowering + : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -134,7 +135,7 @@ class CIRStoreLowering : public mlir::OpConversionPattern { } }; -class CIRConstantLowering +class CIRConstantOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -158,7 +159,7 @@ class CIRConstantLowering } }; -class CIRFuncLowering : public mlir::OpConversionPattern { +class CIRFuncOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -602,9 +603,9 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns.add(converter, patterns.getContext()); } From 47f1e86c7881ab70655f803a9a12685764e8b524 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 10 Jan 2024 20:08:12 -0300 Subject: [PATCH 1329/2301] [CIR][CIRGen][NFC] Support yielding values in LexicalScope Once the LexicalScope goes out of scope, its cleanup process will also check if a return was set to be yielded, and, if so, generate the yield with the respective value. ghstack-source-id: 9305d2ba5631840937721755358a774dc9e08b90 Pull Request resolved: https://github.com/llvm/clangir/pull/312 --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 6 ++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 +++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 6d0cacffa753..696c6c877dd9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -381,8 +381,10 @@ void CIRGenFunction::LexicalScope::cleanup() { if (localScope->Depth != 0) { // end of any local scope != function // Ternary ops have to deal with matching arms for yielding types // and do return a value, it must do its own cir.yield insertion. - if (!localScope->isTernary()) - builder.create(localScope->EndLoc); + if (!localScope->isTernary()) { + !retVal ? builder.create(localScope->EndLoc) + : builder.create(localScope->EndLoc, retVal); + } } else (void)buildReturn(localScope->EndLoc); }; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 024ec494bc5b..9e2d1687366f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1703,6 +1703,9 @@ class CIRGenFunction : public CIRGenTypeCache { Switch // cir.switch } ScopeKind = Regular; + // Track scope return value. + mlir::Value retVal = nullptr; + public: unsigned Depth = 0; bool HasReturn = false; @@ -1725,6 +1728,8 @@ class CIRGenFunction : public CIRGenTypeCache { assert(EntryBlock && "expected valid block"); } + void setRetVal(mlir::Value v) { retVal = v; } + void cleanup(); void restore() { CGF.currLexScope = ParentScope; } From 74bcb2976f5f63b999d5bbb196c98878ed05786b Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 10 Jan 2024 20:08:12 -0300 Subject: [PATCH 1330/2301] [CIR][CIRGen][NFC] Return scope result in compound stmt builders Instead of returning a boolean indicating whether the statement was handled, returns the ReturnExpr of the statement if there is one. It also adds some extra bookkeeping to ensure that the result is returned when needed. This allows for better support of GCC's `ExprStmt` extension. The logical result was not used: it was handled but it would never fail. Any errors within builders should likely be handled with asserts and unreachables since they imply a programmer's error in the code. ghstack-source-id: 2319cf3f12e56374a52aaafa4304e74de3ee6453 Pull Request resolved: https://github.com/llvm/clangir/pull/313 --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 3 ++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 10 ++++--- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 33 +++++++++++------------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 696c6c877dd9..a3c6b8da87d1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -24,6 +24,7 @@ #include "clang/Frontend/FrontendDiagnostic.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Support/LogicalResult.h" using namespace cir; using namespace clang; @@ -1114,7 +1115,7 @@ mlir::LogicalResult CIRGenFunction::buildFunctionBody(const clang::Stmt *Body) { auto result = mlir::LogicalResult::success(); if (const CompoundStmt *S = dyn_cast(Body)) - result = buildCompoundStmtWithoutScope(*S); + buildCompoundStmtWithoutScope(*S); else result = buildStmt(Body, /*useCurrentScope*/ true); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9e2d1687366f..2d56b33dc823 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -834,11 +834,13 @@ class CIRGenFunction : public CIRGenTypeCache { bool IsFnTryBlock = false); void exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); - mlir::LogicalResult buildCompoundStmt(const clang::CompoundStmt &S); - - mlir::LogicalResult - buildCompoundStmtWithoutScope(const clang::CompoundStmt &S); + Address buildCompoundStmt(const clang::CompoundStmt &S, bool getLast = false, + AggValueSlot slot = AggValueSlot::ignored()); + Address + buildCompoundStmtWithoutScope(const clang::CompoundStmt &S, + bool getLast = false, + AggValueSlot slot = AggValueSlot::ignored()); GlobalDecl CurSEHParent; bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; } diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index cedcdac48621..f683f7509876 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -10,30 +10,27 @@ // //===----------------------------------------------------------------------===// +#include "Address.h" #include "CIRGenFunction.h" +#include "mlir/IR/Value.h" using namespace cir; using namespace clang; using namespace mlir::cir; -mlir::LogicalResult -CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S) { +Address CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S, + bool getLast, + AggValueSlot slot) { for (auto *CurStmt : S.body()) if (buildStmt(CurStmt, /*useCurrentScope=*/false).failed()) - return mlir::failure(); + return Address::invalid(); - return mlir::success(); + return Address::invalid(); } -mlir::LogicalResult CIRGenFunction::buildCompoundStmt(const CompoundStmt &S) { - mlir::LogicalResult res = mlir::success(); - - auto compoundStmtBuilder = [&]() -> mlir::LogicalResult { - if (buildCompoundStmtWithoutScope(S).failed()) - return mlir::failure(); - - return mlir::success(); - }; +Address CIRGenFunction::buildCompoundStmt(const CompoundStmt &S, bool getLast, + AggValueSlot slot) { + Address retAlloca = Address::invalid(); // Add local scope to track new declared variables. SymTableScopeTy varScope(symbolTable); @@ -42,10 +39,10 @@ mlir::LogicalResult CIRGenFunction::buildCompoundStmt(const CompoundStmt &S) { scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - res = compoundStmtBuilder(); + retAlloca = buildCompoundStmtWithoutScope(S); }); - return res; + return retAlloca; } void CIRGenFunction::buildStopPoint(const Stmt *S) { @@ -260,9 +257,9 @@ mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, case Stmt::DeclStmtClass: return buildDeclStmt(cast(*S)); case Stmt::CompoundStmtClass: - return useCurrentScope - ? buildCompoundStmtWithoutScope(cast(*S)) - : buildCompoundStmt(cast(*S)); + useCurrentScope ? buildCompoundStmtWithoutScope(cast(*S)) + : buildCompoundStmt(cast(*S)); + break; case Stmt::ReturnStmtClass: return buildReturnStmt(cast(*S)); case Stmt::GotoStmtClass: From cf0408b7f4af272404b0a9df418447f90f6c4bcb Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Wed, 10 Jan 2024 20:08:12 -0300 Subject: [PATCH 1331/2301] [CIR][CIRGen] Partially support statement expressions return values Adds support for GCC statement expressions return values as well as StmtExpr LValue emissions. To simplify the lowering process, the scope return value is not used. Instead, a temporary allocation is created on the parent scope where the return value is stored. For classes, a second scope is created around this temporary allocation to ensure any destructors are called. This does not implement the full semantics of statement expressions. ghstack-source-id: 64e03fc3df45975590ddbcab44959c2b49601101 Pull Request resolved: https://github.com/llvm/clangir/pull/314 --- clang/lib/CIR/CodeGen/Address.h | 7 +++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 12 +++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 9 ++++ clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 7 ++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 20 +++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 + clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 47 ++++++++++++++++--- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/stmt-expr.c | 42 +++++++++++++++++ clang/test/CIR/CodeGen/stmt-expr.cpp | 31 ++++++++++++ 10 files changed, 170 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/stmt-expr.c create mode 100644 clang/test/CIR/CodeGen/stmt-expr.cpp diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 31186f4a8e1f..3213c6a633bc 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -110,6 +110,13 @@ class Address { PointerAndKnownNonNull.setInt(true); return *this; } + + /// Get the operation which defines this address. + mlir::Operation *getDefiningOp() const { + if (!isValid()) + return nullptr; + return getPointer().getDefiningOp(); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 5aa751eb90a3..30082f133300 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -796,6 +796,18 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Value v) { return create(loc, v); } + + // TODO(cir): Change this to hoist alloca to the parent *scope* instead. + /// Move alloca operation to the parent region. + void hoistAllocaToParentRegion(mlir::cir::AllocaOp alloca) { + auto &block = alloca->getParentOp()->getParentRegion()->front(); + const auto allocas = block.getOps(); + if (allocas.empty()) { + alloca->moveBefore(&block, block.begin()); + } else { + alloca->moveAfter(*std::prev(allocas.end())); + } + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index ccef09fe44a0..e3582737b4f6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1027,6 +1027,13 @@ RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, return buildCall(E->getCallee()->getType(), callee, E, ReturnValue); } +LValue CIRGenFunction::buildStmtExprLValue(const StmtExpr *E) { + // Can only get l-value for message expression returning aggregate type + RValue RV = buildAnyExprToTemp(E); + return makeAddrLValue(RV.getAggregateAddress(), E->getType(), + AlignmentSource::Decl); +} + RValue CIRGenFunction::buildCall(clang::QualType CalleeType, const CIRGenCallee &OrigCallee, const clang::CallExpr *E, @@ -2163,6 +2170,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { case Expr::ObjCPropertyRefExprClass: llvm_unreachable("cannot emit a property reference directly"); + case Expr::StmtExprClass: + return buildStmtExprLValue(cast(E)); } return LValue::makeAddr(Address::invalid(), E->getType()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 3837144ef975..e4a9923a055d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -203,7 +203,12 @@ class AggExprEmitter : public StmtVisitor { // Operators. void VisitCastExpr(CastExpr *E); void VisitCallExpr(const CallExpr *E); - void VisitStmtExpr(const StmtExpr *E) { llvm_unreachable("NYI"); } + + void VisitStmtExpr(const StmtExpr *E) { + assert(!UnimplementedFeature::stmtExprEvaluation() && "NYI"); + CGF.buildCompoundStmt(*E->getSubStmt(), /*getLast=*/true, Dest); + } + void VisitBinaryOperator(const BinaryOperator *E) { llvm_unreachable("NYI"); } void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *E) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 6103570bb34d..880e47f6efb4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "Address.h" #include "CIRDataLayout.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" @@ -291,7 +292,24 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitCastExpr(CastExpr *E); mlir::Value VisitCallExpr(const CallExpr *E); - mlir::Value VisitStmtExpr(StmtExpr *E) { llvm_unreachable("NYI"); } + + mlir::Value VisitStmtExpr(StmtExpr *E) { + assert(!UnimplementedFeature::stmtExprEvaluation() && "NYI"); + Address retAlloca = + CGF.buildCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType()); + if (!retAlloca.isValid()) + return {}; + + // FIXME(cir): This is a work around the ScopeOp builder. If we build the + // ScopeOp before its body, we would be able to create the retAlloca + // direclty in the parent scope removing the need to hoist it. + assert(retAlloca.getDefiningOp() && "expected a alloca op"); + CGF.getBuilder().hoistAllocaToParentRegion( + cast(retAlloca.getDefiningOp())); + + return CGF.buildLoadOfScalar(CGF.makeAddrLValue(retAlloca, E->getType()), + E->getExprLoc()); + } // Unary Operators. mlir::Value VisitUnaryPostDec(const UnaryOperator *E) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 2d56b33dc823..d74a978e4193 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -744,6 +744,8 @@ class CIRGenFunction : public CIRGenTypeCache { void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl); void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl); + LValue buildStmtExprLValue(const StmtExpr *E); + /// Generate a call of the given function, expecting the given /// result type, and using the given argument list which specifies both the /// LLVM arguments and the types they were derived from. diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index f683f7509876..801fc73b179d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -13,6 +13,10 @@ #include "Address.h" #include "CIRGenFunction.h" #include "mlir/IR/Value.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/Stmt.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" using namespace cir; using namespace clang; @@ -21,11 +25,42 @@ using namespace mlir::cir; Address CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S, bool getLast, AggValueSlot slot) { - for (auto *CurStmt : S.body()) - if (buildStmt(CurStmt, /*useCurrentScope=*/false).failed()) - return Address::invalid(); + const Stmt *ExprResult = S.getStmtExprResult(); + assert((!getLast || (getLast && ExprResult)) && + "If getLast is true then the CompoundStmt must have a StmtExprResult"); - return Address::invalid(); + Address retAlloca = Address::invalid(); + + for (auto *CurStmt : S.body()) { + if (getLast && ExprResult == CurStmt) { + while (!isa(ExprResult)) { + if (const auto *LS = dyn_cast(ExprResult)) + llvm_unreachable("labels are NYI"); + else if (const auto *AS = dyn_cast(ExprResult)) + llvm_unreachable("statement attributes are NYI"); + else + llvm_unreachable("Unknown value statement"); + } + + const Expr *E = cast(ExprResult); + QualType exprTy = E->getType(); + if (hasAggregateEvaluationKind(exprTy)) { + buildAggExpr(E, slot); + } else { + // We can't return an RValue here because there might be cleanups at + // the end of the StmtExpr. Because of that, we have to emit the result + // here into a temporary alloca. + retAlloca = CreateMemTemp(exprTy, getLoc(E->getSourceRange())); + buildAnyExprToMem(E, retAlloca, Qualifiers(), + /*IsInit*/ false); + } + } else { + if (buildStmt(CurStmt, /*useCurrentScope=*/false).failed()) + llvm_unreachable("failed to build statement"); + } + } + + return retAlloca; } Address CIRGenFunction::buildCompoundStmt(const CompoundStmt &S, bool getLast, @@ -37,9 +72,9 @@ Address CIRGenFunction::buildCompoundStmt(const CompoundStmt &S, bool getLast, auto scopeLoc = getLoc(S.getSourceRange()); builder.create( scopeLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { + [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) { LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - retAlloca = buildCompoundStmtWithoutScope(S); + retAlloca = buildCompoundStmtWithoutScope(S, getLast, slot); }); return retAlloca; diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index ee3d643dd136..1e5d1dfe7526 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -144,6 +144,7 @@ struct UnimplementedFeature { static bool metaDataNode() { return false; } static bool isSEHTryScope() { return false; } static bool emitScalarRangeCheck() { return false; } + static bool stmtExprEvaluation() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/stmt-expr.c b/clang/test/CIR/CodeGen/stmt-expr.c new file mode 100644 index 000000000000..8029358887e0 --- /dev/null +++ b/clang/test/CIR/CodeGen/stmt-expr.c @@ -0,0 +1,42 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Yields void. +void test1() { ({ }); } +// CHECK: @test1 +// CHECK: cir.scope { +// CHECK-NOT: cir.yield +// CHECK: } + +// Yields an out-of-scope scalar. +void test2() { ({int x = 3; x; }); } +// CHECK: @test2 +// CHECK: %[[#RETVAL:]] = cir.alloca !s32i, cir.ptr +// CHECK: cir.scope { +// CHECK: %[[#VAR:]] = cir.alloca !s32i, cir.ptr , ["x", init] +// [...] +// CHECK: %[[#TMP:]] = cir.load %[[#VAR]] : cir.ptr , !s32i +// CHECK: cir.store %[[#TMP]], %[[#RETVAL]] : !s32i, cir.ptr +// CHECK: } +// CHECK: %{{.+}} = cir.load %[[#RETVAL]] : cir.ptr , !s32i + +// Yields an aggregate. +struct S { int x; }; +int test3() { return ({ struct S s = {1}; s; }).x; } +// CHECK: @test3 +// CHECK: %[[#RETVAL:]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: cir.scope { +// CHECK: %[[#VAR:]] = cir.alloca !ty_22S22, cir.ptr +// [...] +// CHECK: cir.copy %[[#VAR]] to %[[#RETVAL]] : !cir.ptr +// CHECK: } +// CHECK: %[[#RETADDR:]] = cir.get_member %1[0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: %{{.+}} = cir.load %[[#RETADDR]] : cir.ptr , !s32i + +// Expression is wrapped in an expression attribute (just ensure it does not crash). +void test4(int x) { ({[[gsl::suppress("foo")]] x;}); } +// CHECK: @test4 + +// TODO(cir): Missing label support. +// // Expression is wrapped in a label. +// // void test5(int x) { x = ({ label: x; }); } diff --git a/clang/test/CIR/CodeGen/stmt-expr.cpp b/clang/test/CIR/CodeGen/stmt-expr.cpp new file mode 100644 index 000000000000..d9d619f70a92 --- /dev/null +++ b/clang/test/CIR/CodeGen/stmt-expr.cpp @@ -0,0 +1,31 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +class A { +public: + A(): x(0) {} + A(A &a) : x(a.x) {} + // TODO(cir): Ensure dtors are properly called. The dtor below crashes. + // ~A() {} + int x; + void Foo() {} +}; + +void test1() { + ({ + A a; + a; + }).Foo(); +} +// CHECK: @_Z5test1v +// CHECK: cir.scope { +// CHECK: %[[#RETVAL:]] = cir.alloca !ty_22A22, cir.ptr +// CHECK: cir.scope { +// CHECK: %[[#VAR:]] = cir.alloca !ty_22A22, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: cir.call @_ZN1AC1Ev(%[[#VAR]]) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1AC1ERS_(%[[#RETVAL]], %[[#VAR]]) : (!cir.ptr, !cir.ptr) -> () +// TODO(cir): the local VAR should be destroyed here. +// CHECK: } +// CHECK: cir.call @_ZN1A3FooEv(%[[#RETVAL]]) : (!cir.ptr) -> () +// TODO(cir): the temporary RETVAL should be destroyed here. +// CHECK: } From d9f3c456acf9d881002968ef40830fbd9e131620 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 3 Nov 2023 15:17:44 -0700 Subject: [PATCH 1332/2301] [CIR] Introduce exception info type Incrementally design out support for try/catch and relationship with calls. This introduces an exception information type, which is will be returned somehow by throwing calls. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 22 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenException.cpp | 1 + 2 files changed, 23 insertions(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 464ef2100d96..22fe594bf9ba 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -118,6 +118,7 @@ def CIR_PointerType : CIR_Type<"Pointer", "ptr", // generic. // //===----------------------------------------------------------------------===// + def CIR_BoolType : CIR_Type<"Bool", "bool", [DeclareTypeInterfaceMethods]> { @@ -265,4 +266,25 @@ def CIR_AnyType : AnyTypeOf<[ CIR_FuncType, CIR_VoidType, CIR_StructType, AnyFloat, ]>; + +//===----------------------------------------------------------------------===// +// Exception info type +// +// By introducing an exception info type, exception related operations can be +// more descriptive. +// +// This basically wraps a uint8_t* and a uint32_t +// +//===----------------------------------------------------------------------===// + +def CIR_ExceptionInfo : CIR_Type<"ExceptionInfo", "eh.info"> { + let summary = "CIR exception info"; + let description = [{ + Represents the content necessary for a `cir.call` to pass back an exception + object pointer + some extra selector information. This type is required for + some exception related operations, like `cir.catch`, `cir.eh.selector_slot` + and `cir.eh.slot`. + }]; +} + #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index bc20ec9839fa..e7595e89cd91 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -390,6 +390,7 @@ mlir::Block *CIRGenFunction::buildLandingPad() { { // Save the current CIR generation state. mlir::OpBuilder::InsertionGuard guard(builder); + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); // FIXME(cir): handle CIR relevant landing pad bits, there's no good // way to assert here right now and leaving one in break important From aa32285ee482f79424e18892374dc8a9abd08d80 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 3 Nov 2023 15:23:22 -0700 Subject: [PATCH 1333/2301] Revert "[CIR][CIRGen][Exception] Workaround internal testcase break" This reverts commit 55c03f8976ea --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index e7595e89cd91..e9c70653abec 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -271,6 +271,8 @@ mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { }); enterCXXTryStmt(S, catchOp); + llvm_unreachable("NYI"); + if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) return mlir::failure(); exitCXXTryStmt(S); @@ -383,6 +385,7 @@ mlir::Block *CIRGenFunction::buildLandingPad() { case EHScope::Catch: case EHScope::Cleanup: case EHScope::Filter: + llvm_unreachable("NYI"); if (auto *lpad = innermostEHScope.getCachedLandingPad()) return lpad; } @@ -397,6 +400,7 @@ mlir::Block *CIRGenFunction::buildLandingPad() { // testcases. Work to fill this in is coming soon. } + llvm_unreachable("NYI"); return nullptr; } @@ -440,8 +444,7 @@ mlir::Block *CIRGenFunction::getInvokeDestImpl() { LP = buildLandingPad(); } - // FIXME(cir): this breaks important testcases, fix is coming soon. - // assert(LP); + assert(LP); // Cache the landing pad on the innermost scope. If this is a // non-EH scope, cache the landing pad on the enclosing scope, too. From 6cbea9cac497dbc79378a938840db6ebc60bc643 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 10 Jan 2024 17:13:24 -0800 Subject: [PATCH 1334/2301] [CIR][CIRGen][Exceptions] More on try statemet codegen: wrap with cir.scope Incremental work, test coming soon. This code path isn't exercised just yet. --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 55 +++++++++++++++-------- 1 file changed, 37 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index e9c70653abec..45e8cffdde0b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -252,31 +252,50 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { } mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { + const llvm::Triple &T = getTarget().getTriple(); + // If we encounter a try statement on in an OpenMP target region offloaded to + // a GPU, we treat it as a basic block. + const bool IsTargetDevice = + (CGM.getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN())); + assert(IsTargetDevice && "NYI"); + auto tryLoc = getLoc(S.getBeginLoc()); auto numHandlers = S.getNumHandlers(); - // FIXME(cir): create scope, and add catchOp to the lastest possible position + // FIXME(cir): add catchOp to the lastest possible position // inside the cleanup block. - - // Create the skeleton for the catch statements. - auto catchOp = builder.create( - tryLoc, // FIXME(cir): we can do better source location here. - [&](mlir::OpBuilder &b, mlir::Location loc, - mlir::OperationState &result) { - mlir::OpBuilder::InsertionGuard guard(b); - for (int i = 0, e = numHandlers; i != e; ++i) { - auto *r = result.addRegion(); - builder.createBlock(r); + auto scopeLoc = getLoc(S.getSourceRange()); + auto res = mlir::success(); + + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{*this, loc, + builder.getInsertionBlock()}; + + // Create the skeleton for the catch statements. + auto catchOp = builder.create( + tryLoc, // FIXME(cir): we can do better source location here. + [&](mlir::OpBuilder &b, mlir::Location loc, + mlir::OperationState &result) { + mlir::OpBuilder::InsertionGuard guard(b); + for (int i = 0, e = numHandlers; i != e; ++i) { + auto *r = result.addRegion(); + builder.createBlock(r); + } + }); + + enterCXXTryStmt(S, catchOp); + + if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) { + res = mlir::failure(); + return; } - }); - enterCXXTryStmt(S, catchOp); - llvm_unreachable("NYI"); + exitCXXTryStmt(S); + }); - if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) - return mlir::failure(); - exitCXXTryStmt(S); - return mlir::success(); + return res; } /// Emit the structure of the dispatch block for the given catch scope. From 1541f9e948861d4688dbd19af385270f87ce7283 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 10 Jan 2024 17:18:31 -0800 Subject: [PATCH 1335/2301] [CIR][NFC] Update comment for implemented fix --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 45e8cffdde0b..df4a1f913935 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -330,7 +330,6 @@ void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &S, for (unsigned I = 0; I != NumHandlers; ++I) { const CXXCatchStmt *C = S.getHandler(I); - // FIXME: hook the CIR block for the right catch region here. mlir::Block *Handler = &catchOp.getRegion(I).getBlocks().front(); if (C->getExceptionDecl()) { // FIXME: Dropping the reference type on the type into makes it From 186c906b8d21f538d9f95e3e57e0b05877676ca0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 13 Nov 2023 13:55:24 -0800 Subject: [PATCH 1336/2301] [CIR][CIRGen] Fix wrong assert Silly mistake introduced in previous commit. --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index df4a1f913935..383968cbb517 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -257,7 +257,7 @@ mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { // a GPU, we treat it as a basic block. const bool IsTargetDevice = (CGM.getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN())); - assert(IsTargetDevice && "NYI"); + assert(!IsTargetDevice && "NYI"); auto tryLoc = getLoc(S.getBeginLoc()); auto numHandlers = S.getNumHandlers(); From fc657d3fb85bd22d94bd99f930dc2ed7d26c3d4c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 11 Jan 2024 15:21:09 -0800 Subject: [PATCH 1337/2301] [CIR] Add a CIRCallOpInterface, on top of CallOpInterface --- .../include/clang/CIR/Dialect/IR/CIRDialect.h | 1 + clang/include/clang/CIR/Dialect/IR/CIROps.td | 37 ++++++++----------- .../clang/CIR/Interfaces/CIROpInterfaces.h | 32 ++++++++++++++++ .../clang/CIR/Interfaces/CIROpInterfaces.td | 36 ++++++++++++++++++ .../clang/CIR/Interfaces/CMakeLists.txt | 9 +++++ clang/lib/CIR/CodeGen/CMakeLists.txt | 3 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 34 +++++++++-------- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 + .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 2 +- clang/lib/CIR/FrontendAction/CMakeLists.txt | 1 + clang/lib/CIR/Interfaces/CIROpInterfaces.cpp | 15 ++++++++ clang/lib/CIR/Interfaces/CMakeLists.txt | 6 ++- .../CIR/Lowering/DirectToLLVM/CMakeLists.txt | 1 + .../CIR/Lowering/ThroughMLIR/CMakeLists.txt | 1 + 14 files changed, 138 insertions(+), 41 deletions(-) create mode 100644 clang/include/clang/CIR/Interfaces/CIROpInterfaces.h create mode 100644 clang/include/clang/CIR/Interfaces/CIROpInterfaces.td create mode 100644 clang/lib/CIR/Interfaces/CIROpInterfaces.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index 1f677356c4d4..925f34e852a6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -32,6 +32,7 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "clang/CIR/Interfaces/CIROpInterfaces.h" namespace mlir { namespace OpTrait { diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 53682e28c20f..df366f004396 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -19,8 +19,8 @@ include "clang/CIR/Dialect/IR/CIRTypes.td" include "clang/CIR/Dialect/IR/CIRAttrs.td" include "clang/CIR/Interfaces/ASTAttrInterfaces.td" +include "clang/CIR/Interfaces/CIROpInterfaces.td" -include "mlir/Interfaces/CallInterfaces.td" include "mlir/Interfaces/ControlFlowInterfaces.td" include "mlir/Interfaces/FunctionInterfaces.td" include "mlir/Interfaces/InferTypeOpInterface.td" @@ -1913,7 +1913,7 @@ def FuncOp : CIR_Op<"func", [ //===----------------------------------------------------------------------===// def CallOp : CIR_Op<"call", - [DeclareOpInterfaceMethods, + [DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "call operation"; let description = [{ @@ -1968,31 +1968,26 @@ def CallOp : CIR_Op<"call", }]>]; let extraClassDeclaration = [{ - mlir::Value getIndirectCallee() { - assert(!getCallee() && "only works for indirect call"); - return *arg_operand_begin(); + /// Get the argument operands to the called function. + OperandRange getArgOperands() { + return {arg_operand_begin(), arg_operand_end()}; } - operand_iterator arg_operand_begin() { - auto arg_begin = operand_begin(); - if (!getCallee()) - arg_begin++; - return arg_begin; + MutableOperandRange getArgOperandsMutable() { + llvm_unreachable("NYI"); } - operand_iterator arg_operand_end() { return operand_end(); } - /// Return the operand at index 'i', accounts for indirect call. - Value getArgOperand(unsigned i) { - if (!getCallee()) - i++; - return getOperand(i); + /// Return the callee of this operation + CallInterfaceCallable getCallableForCallee() { + return (*this)->getAttrOfType("callee"); } - /// Return the number of operands, , accounts for indirect call. - unsigned getNumArgOperands() { - if (!getCallee()) - return this->getOperation()->getNumOperands()-1; - return this->getOperation()->getNumOperands(); + /// Set the callee for this operation. + void setCalleeFromCallable(::mlir::CallInterfaceCallable callee) { + if (auto calling = + (*this)->getAttrOfType(getCalleeAttrName())) + (*this)->setAttr(getCalleeAttrName(), callee.get()); + setOperand(0, callee.get()); } }]; diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h new file mode 100644 index 000000000000..fcef7a33eb20 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h @@ -0,0 +1,32 @@ +//===- CIROpInterfaces.h - CIR Op Interfaces --------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_INTERFACES_CIR_OP_H_ +#define MLIR_INTERFACES_CIR_OP_H_ + +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Operation.h" +#include "mlir/IR/Value.h" +#include "mlir/Interfaces/CallInterfaces.h" + +#include "clang/AST/Attr.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Mangle.h" + +namespace mlir { +namespace cir {} // namespace cir +} // namespace mlir + +/// Include the generated interface declarations. +#include "clang/CIR/Interfaces/CIROpInterfaces.h.inc" + +namespace mlir { +namespace cir {} // namespace cir +} // namespace mlir + +#endif // MLIR_INTERFACES_CIR_OP_H_ diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td new file mode 100644 index 000000000000..0b176f8a0701 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td @@ -0,0 +1,36 @@ +//===- CIROpInterfaces.td - CIR Op Interface Definitions --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_OP_INTERFACES +#define MLIR_CIR_OP_INTERFACES + +include "mlir/IR/OpBase.td" +include "mlir/Interfaces/CallInterfaces.td" + +let cppNamespace = "::mlir::cir" in { + // The CIRCallOpInterface must be used instead of CallOpInterface when looking + // at arguments and other bits of CallOp. This creates a level of abstraction + // that's useful for handling indirect calls and other details. + def CIRCallOpInterface : OpInterface<"CIRCallOpInterface", [CallOpInterface]> { + let methods = [ + InterfaceMethod<"", "mlir::Operation::operand_iterator", + "arg_operand_begin", (ins)>, + InterfaceMethod<"", "mlir::Operation::operand_iterator", + "arg_operand_end", (ins)>, + InterfaceMethod< + "Return the operand at index 'i', accounts for indirect call", + "mlir::Value", "getArgOperand", (ins "unsigned":$i)>, + InterfaceMethod< + "Return the number of operands, accounts for indirect call", + "unsigned", "getNumArgOperands", (ins)>, + ]; + } + +} // namespace mlir::cir + +#endif // MLIR_CIR_OP_INTERFACES diff --git a/clang/include/clang/CIR/Interfaces/CMakeLists.txt b/clang/include/clang/CIR/Interfaces/CMakeLists.txt index 6925b69a2c97..fe835cade35c 100644 --- a/clang/include/clang/CIR/Interfaces/CMakeLists.txt +++ b/clang/include/clang/CIR/Interfaces/CMakeLists.txt @@ -12,4 +12,13 @@ function(add_clang_mlir_attr_interface interface) add_dependencies(mlir-generic-headers MLIRCIR${interface}IncGen) endfunction() +function(add_clang_mlir_op_interface interface) + set(LLVM_TARGET_DEFINITIONS ${interface}.td) + mlir_tablegen(${interface}.h.inc -gen-op-interface-decls) + mlir_tablegen(${interface}.cpp.inc -gen-op-interface-defs) + add_public_tablegen_target(MLIR${interface}IncGen) + add_dependencies(mlir-generic-headers MLIR${interface}IncGen) +endfunction() + add_clang_mlir_attr_interface(ASTAttrInterfaces) +add_clang_mlir_op_interface(CIROpInterfaces) diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 3750f5cae638..62df7a8d3d68 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -40,6 +40,7 @@ add_clang_library(clangCIR MLIRCIR MLIRCIROpsIncGen MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen ${dialect_libs} LINK_LIBS @@ -49,7 +50,7 @@ add_clang_library(clangCIR ${dialect_libs} MLIRCIR MLIRCIRTransforms - MLIRCIRASTAttrInterfaces + MLIRCIRInterfaces MLIRAffineToStandard MLIRAnalysis MLIRDLTIDialect diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 0a6e10812b0c..08837b4e5aa1 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -43,6 +43,7 @@ using namespace mlir::cir; #include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "clang/CIR/Interfaces/CIROpInterfaces.h" //===----------------------------------------------------------------------===// // CIR Dialect @@ -1953,26 +1954,27 @@ LogicalResult cir::FuncOp::verify() { // CallOp //===----------------------------------------------------------------------===// -/// Get the argument operands to the called function. -OperandRange cir::CallOp::getArgOperands() { - return {arg_operand_begin(), arg_operand_end()}; +mlir::Operation::operand_iterator cir::CallOp::arg_operand_begin() { + auto arg_begin = operand_begin(); + if (!getCallee()) + arg_begin++; + return arg_begin; } - -MutableOperandRange cir::CallOp::getArgOperandsMutable() { - return getOperandsMutable(); +mlir::Operation::operand_iterator cir::CallOp::arg_operand_end() { + return operand_end(); } -/// Return the callee of this operation -CallInterfaceCallable cir::CallOp::getCallableForCallee() { - return (*this)->getAttrOfType("callee"); +/// Return the operand at index 'i', accounts for indirect call. +Value cir::CallOp::getArgOperand(unsigned i) { + if (!getCallee()) + i++; + return getOperand(i); } - -/// Set the callee for this operation. -void cir::CallOp::setCalleeFromCallable(::mlir::CallInterfaceCallable callee) { - if (auto calling = - (*this)->getAttrOfType(getCalleeAttrName())) - (*this)->setAttr(getCalleeAttrName(), callee.get()); - setOperand(0, callee.get()); +/// Return the number of operands, , accounts for indirect call. +unsigned cir::CallOp::getNumArgOperands() { + if (!getCallee()) + return this->getOperation()->getNumOperands() - 1; + return this->getOperation()->getNumOperands(); } LogicalResult diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 4956786314b9..b8cc5b84e93e 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -10,6 +10,7 @@ add_clang_library(MLIRCIR MLIRCIREnumsGen MLIRSymbolInterfacesIncGen MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen LINK_LIBS PUBLIC MLIRIR diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 7dcb9656d81e..a1ff2fc7d119 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -20,5 +20,5 @@ add_clang_library(MLIRCIRTransforms MLIRTransformUtils MLIRCIR - MLIRCIRASTAttrInterfaces + MLIRCIRInterfaces ) diff --git a/clang/lib/CIR/FrontendAction/CMakeLists.txt b/clang/lib/CIR/FrontendAction/CMakeLists.txt index 7201db6502e6..31ca49fedf44 100644 --- a/clang/lib/CIR/FrontendAction/CMakeLists.txt +++ b/clang/lib/CIR/FrontendAction/CMakeLists.txt @@ -11,6 +11,7 @@ add_clang_library(clangCIRFrontendAction DEPENDS MLIRCIROpsIncGen MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen MLIRBuiltinLocationAttributesIncGen MLIRBuiltinTypeInterfacesIncGen MLIRFunctionInterfacesIncGen diff --git a/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp b/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp new file mode 100644 index 000000000000..38211effb79c --- /dev/null +++ b/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp @@ -0,0 +1,15 @@ +//====- CIROpInterfaces.cpp - Interface to AST Attributes ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "clang/CIR/Interfaces/CIROpInterfaces.h" + +#include "llvm/ADT/SmallVector.h" + +using namespace mlir::cir; + +/// Include the generated type qualifiers interfaces. +#include "clang/CIR/Interfaces/CIROpInterfaces.cpp.inc" diff --git a/clang/lib/CIR/Interfaces/CMakeLists.txt b/clang/lib/CIR/Interfaces/CMakeLists.txt index 3f41389807d7..f672eb3f6a9c 100644 --- a/clang/lib/CIR/Interfaces/CMakeLists.txt +++ b/clang/lib/CIR/Interfaces/CMakeLists.txt @@ -1,14 +1,16 @@ -add_clang_library(MLIRCIRASTAttrInterfaces +add_clang_library(MLIRCIRInterfaces ASTAttrInterfaces.cpp + CIROpInterfaces.cpp ADDITIONAL_HEADER_DIRS ${MLIR_MAIN_INCLUDE_DIR}/mlir/Interfaces DEPENDS MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen LINK_LIBS ${dialect_libs} MLIRIR MLIRSupport - ) + ) \ No newline at end of file diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index b252af37dace..14b879ee1c44 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -13,6 +13,7 @@ add_clang_library(clangCIRLoweringDirectToLLVM MLIRCIREnumsGen MLIRCIROpsIncGen MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen MLIRBuiltinLocationAttributesIncGen MLIRBuiltinTypeInterfacesIncGen MLIRFunctionInterfacesIncGen diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt index 716212d6b899..bcee5c65984d 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -13,6 +13,7 @@ add_clang_library(clangCIRLoweringThroughMLIR MLIRCIROpsIncGen MLIRCIREnumsGen MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen MLIRBuiltinLocationAttributesIncGen MLIRBuiltinTypeInterfacesIncGen MLIRFunctionInterfacesIncGen From d108cf5a6da73c96666677ef074d0a46ceb466d7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 Jan 2024 15:36:29 -0800 Subject: [PATCH 1338/2301] [CIR][NFC] Refactor cir.call into a tablegen helper class --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 65 ++++++++++---------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index df366f004396..8b0a4a244447 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1912,9 +1912,40 @@ def FuncOp : CIR_Op<"func", [ // CallOp //===----------------------------------------------------------------------===// -def CallOp : CIR_Op<"call", - [DeclareOpInterfaceMethods, - DeclareOpInterfaceMethods]> { +class CIR_CallOp : + Op, + DeclareOpInterfaceMethods]> { + let extraClassDeclaration = [{ + /// Get the argument operands to the called function. + OperandRange getArgOperands() { + return {arg_operand_begin(), arg_operand_end()}; + } + + MutableOperandRange getArgOperandsMutable() { + llvm_unreachable("NYI"); + } + + /// Return the callee of this operation + CallInterfaceCallable getCallableForCallee() { + return (*this)->getAttrOfType("callee"); + } + + /// Set the callee for this operation. + void setCalleeFromCallable(::mlir::CallInterfaceCallable callee) { + if (auto calling = + (*this)->getAttrOfType(getCalleeAttrName())) + (*this)->setAttr(getCalleeAttrName(), callee.get()); + setOperand(0, callee.get()); + } + }]; + + let hasCustomAssemblyFormat = 1; + let skipDefaultBuilders = 1; + let hasVerifier = 0; +} + +def CallOp : CIR_CallOp<"call"> { let summary = "call operation"; let description = [{ The `call` operation represents a direct call to a function that is within @@ -1966,34 +1997,6 @@ def CallOp : CIR_Op<"call", $_state.addAttribute("callee", callee); $_state.addTypes(resType); }]>]; - - let extraClassDeclaration = [{ - /// Get the argument operands to the called function. - OperandRange getArgOperands() { - return {arg_operand_begin(), arg_operand_end()}; - } - - MutableOperandRange getArgOperandsMutable() { - llvm_unreachable("NYI"); - } - - /// Return the callee of this operation - CallInterfaceCallable getCallableForCallee() { - return (*this)->getAttrOfType("callee"); - } - - /// Set the callee for this operation. - void setCalleeFromCallable(::mlir::CallInterfaceCallable callee) { - if (auto calling = - (*this)->getAttrOfType(getCalleeAttrName())) - (*this)->setAttr(getCalleeAttrName(), callee.get()); - setOperand(0, callee.get()); - } - }]; - - let hasCustomAssemblyFormat = 1; - let skipDefaultBuilders = 1; - let hasVerifier = 0; } //===----------------------------------------------------------------------===// From c25b8a4e01e18ed1070183ae140ad88d949841c2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 Jan 2024 15:54:59 -0800 Subject: [PATCH 1339/2301] [CIR] Introduce cir.try_call operation This will be used for any calls happening inside try regions. More refactoring. For now it's incremental work, still some mileage to cover before I can introduce a testcase. The current implementation mimics cir.call, pieces are going to change in following commits. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 52 ++++++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 114 ++++++++++++++----- 2 files changed, 136 insertions(+), 30 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8b0a4a244447..1a9da26b49ed 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1909,7 +1909,7 @@ def FuncOp : CIR_Op<"func", [ } //===----------------------------------------------------------------------===// -// CallOp +// CallOp and TryCallOp //===----------------------------------------------------------------------===// class CIR_CallOp : @@ -1999,6 +1999,56 @@ def CallOp : CIR_CallOp<"call"> { }]>]; } +def TryCallOp : CIR_CallOp<"try_call"> { + let summary = "try call operation"; + let description = [{ + Works very similar to `cir.call` but passes down an exception object + in case anything is thrown by the callee. Upon the callee throwing, + `cir.try_call` goes to current `cir.scope`'s `abort` label, otherwise + execution follows to the `continue` label. + + To walk the operands for this operation, use `getNumArgOperands()`, + `getArgOperand()`, `getArgOperands()`, `arg_operand_begin()` and + `arg_operand_begin()`. Avoid using `getNumOperands()`, `getOperand()`, + `operand_begin()`, etc, direclty - might be misleading given the + exception object address is also part of the raw operation's operands. + `` + + Example: + + ```mlir + %r = cir.try_call @division(%1, %2), ^continue_A, ^abort, %0 + ``` + }]; + + let arguments = (ins OptionalAttr:$callee, + Variadic:$operands, + OptionalAttr:$ast); + let results = (outs Variadic); + + let builders = [ + OpBuilder<(ins "FuncOp":$callee, CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(operands); + $_state.addAttribute("callee", SymbolRefAttr::get(callee)); + if (!callee.getFunctionType().isVoid()) + $_state.addTypes(callee.getFunctionType().getReturnType()); + }]>, + OpBuilder<(ins "Value":$ind_target, + "FuncType":$fn_type, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(ValueRange{ind_target}); + $_state.addOperands(operands); + if (!fn_type.isVoid()) + $_state.addTypes(fn_type.getReturnType()); + }]>, + OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Type":$resType, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(operands); + $_state.addAttribute("callee", callee); + $_state.addTypes(resType); + }]>]; +} + //===----------------------------------------------------------------------===// // AwaitOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 08837b4e5aa1..7b71b7d61cda 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1977,56 +1977,57 @@ unsigned cir::CallOp::getNumArgOperands() { return this->getOperation()->getNumOperands(); } -LogicalResult -cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { +static LogicalResult +verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { // Callee attribute only need on indirect calls. - auto fnAttr = (*this)->getAttrOfType("callee"); + auto fnAttr = op->getAttrOfType("callee"); if (!fnAttr) return success(); FuncOp fn = - symbolTable.lookupNearestSymbolFrom(*this, fnAttr); + symbolTable.lookupNearestSymbolFrom(op, fnAttr); if (!fn) - return emitOpError() << "'" << fnAttr.getValue() - << "' does not reference a valid function"; + return op->emitOpError() << "'" << fnAttr.getValue() + << "' does not reference a valid function"; // Verify that the operand and result types match the callee. Note that // argument-checking is disabled for functions without a prototype. auto fnType = fn.getFunctionType(); if (!fn.getNoProto()) { - if (!fnType.isVarArg() && getNumOperands() != fnType.getNumInputs()) - return emitOpError("incorrect number of operands for callee"); + if (!fnType.isVarArg() && op->getNumOperands() != fnType.getNumInputs()) + return op->emitOpError("incorrect number of operands for callee"); - if (fnType.isVarArg() && getNumOperands() < fnType.getNumInputs()) - return emitOpError("too few operands for callee"); + if (fnType.isVarArg() && op->getNumOperands() < fnType.getNumInputs()) + return op->emitOpError("too few operands for callee"); for (unsigned i = 0, e = fnType.getNumInputs(); i != e; ++i) - if (getOperand(i).getType() != fnType.getInput(i)) - return emitOpError("operand type mismatch: expected operand type ") + if (op->getOperand(i).getType() != fnType.getInput(i)) + return op->emitOpError("operand type mismatch: expected operand type ") << fnType.getInput(i) << ", but provided " - << getOperand(i).getType() << " for operand number " << i; + << op->getOperand(i).getType() << " for operand number " << i; } // Void function must not return any results. - if (fnType.isVoid() && getNumResults() != 0) - return emitOpError("callee returns void but call has results"); + if (fnType.isVoid() && op->getNumResults() != 0) + return op->emitOpError("callee returns void but call has results"); // Non-void function calls must return exactly one result. - if (!fnType.isVoid() && getNumResults() != 1) - return emitOpError("incorrect number of results for callee"); + if (!fnType.isVoid() && op->getNumResults() != 1) + return op->emitOpError("incorrect number of results for callee"); // Parent function and return value types must match. - if (!fnType.isVoid() && getResultTypes().front() != fnType.getReturnType()) { - return emitOpError("result type mismatch: expected ") + if (!fnType.isVoid() && + op->getResultTypes().front() != fnType.getReturnType()) { + return op->emitOpError("result type mismatch: expected ") << fnType.getReturnType() << ", but provided " - << getResult(0).getType(); + << op->getResult(0).getType(); } return success(); } -::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, - ::mlir::OperationState &result) { +static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { mlir::FlatSymbolRefAttr calleeAttr; llvm::SmallVector<::mlir::OpAsmParser::UnresolvedOperand, 4> ops; llvm::SMLoc opsLoc; @@ -2068,12 +2069,13 @@ ::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, return ::mlir::success(); } -void CallOp::print(::mlir::OpAsmPrinter &state) { +void printCallCommon(Operation *op, mlir::FlatSymbolRefAttr flatSym, + ::mlir::OpAsmPrinter &state) { state << ' '; - auto ops = getOperands(); + auto ops = op->getOperands(); - if (getCallee()) { // Direct calls - state.printAttributeWithoutType(getCalleeAttr()); + if (flatSym) { // Direct calls + state.printAttributeWithoutType(flatSym); } else { // Indirect calls state << ops.front(); ops = ops.drop_front(); @@ -2084,11 +2086,65 @@ void CallOp::print(::mlir::OpAsmPrinter &state) { llvm::SmallVector<::llvm::StringRef, 2> elidedAttrs; elidedAttrs.push_back("callee"); elidedAttrs.push_back("ast"); - state.printOptionalAttrDict((*this)->getAttrs(), elidedAttrs); + state.printOptionalAttrDict(op->getAttrs(), elidedAttrs); state << ' ' << ":"; state << ' '; - state.printFunctionalType(getOperands().getTypes(), - getOperation()->getResultTypes()); + state.printFunctionalType(op->getOperands().getTypes(), op->getResultTypes()); +} + +LogicalResult +cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + return verifyCallCommInSymbolUses(*this, symbolTable); +} + +::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { + return parseCallCommon(parser, result); +} + +void CallOp::print(::mlir::OpAsmPrinter &state) { + printCallCommon(*this, getCalleeAttr(), state); +} + +//===----------------------------------------------------------------------===// +// TryCallOp +//===----------------------------------------------------------------------===// + +mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_begin() { + auto arg_begin = operand_begin(); + if (!getCallee()) + arg_begin++; + return arg_begin; +} +mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_end() { + return operand_end(); +} + +/// Return the operand at index 'i', accounts for indirect call. +Value cir::TryCallOp::getArgOperand(unsigned i) { + if (!getCallee()) + i++; + return getOperand(i); +} +/// Return the number of operands, , accounts for indirect call. +unsigned cir::TryCallOp::getNumArgOperands() { + if (!getCallee()) + return this->getOperation()->getNumOperands() - 1; + return this->getOperation()->getNumOperands(); +} + +LogicalResult +cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + return verifyCallCommInSymbolUses(*this, symbolTable); +} + +::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { + return parseCallCommon(parser, result); +} + +void TryCallOp::print(::mlir::OpAsmPrinter &state) { + printCallCommon(*this, getCalleeAttr(), state); } //===----------------------------------------------------------------------===// From 59d1ed627e5eb2da32d9c077e3cffa4c7e393c77 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 16 Jan 2024 21:44:28 +0300 Subject: [PATCH 1340/2301] [CIR][CIRGen] Add constraints to inline assembly (#351) The next step for inline assembly. Sorry, maybe it looks too big on the first glance. And it's kind of hard to extract something well-grained from the code and introduce it as a separate PR, but I try. Actually there is nothing really interesting done here, and the next will (I hope :) ) simplify your review process. 1) In this PR we introduce operand's constraints and the task is to collect them (and maybe transform a little) 2) There are two big functions copy-pasted from the traditional `Codegen` and I doubt they need to be reviewed. 3) We still don't do anything CIR-specific. Basically, we just work with strings in the same way like traditional `Codegen` does. 4) We just iterate over the input and output operands and collect the constraints 5) We still follow to the traditional `CodeGen` and don't do anything new, except a separate function that collects constraints infos in the very beginning of the `buildStmt`. Also, I renamed `AsmDialect` to `AsmFlavor` as you asked in #326 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 65 +++--- clang/lib/CIR/CodeGen/CIRAsm.cpp | 228 ++++++++++++++++++- clang/test/CIR/CodeGen/asm.c | 24 +- 3 files changed, 280 insertions(+), 37 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1a9da26b49ed..fd6cc34ef6d0 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2621,29 +2621,11 @@ def StackRestoreOp : CIR_Op<"stack_restore"> { let assemblyFormat = "$ptr attr-dict `:` qualified(type($ptr))"; } -//===----------------------------------------------------------------------===// -// Operations Lowered Directly to LLVM IR -// -// These operations are hacks to get around missing features in LLVM's dialect. -// Use it sparingly and remove it once the features are added. -//===----------------------------------------------------------------------===// - -def ZeroInitConstOp : CIR_Op<"llvmir.zeroinit", [Pure]>, - Results<(outs AnyType:$result)> { - let summary = "Zero initializes a constant value of a given type"; - let description = [{ - This operation circumvents the lack of a zeroinitializer operation in LLVM - Dialect. It can zeroinitialize any LLVM type. - }]; - let assemblyFormat = "attr-dict `:` type($result)"; - let hasVerifier = 0; -} - def AsmATT : I32EnumAttrCase<"x86_att", 0>; def AsmIntel : I32EnumAttrCase<"x86_intel", 1>; def AsmFlavor : I32EnumAttr< - "AsmDialect", + "AsmFlavor", "ATT or Intel", [AsmATT, AsmIntel]> { let cppNamespace = "::mlir::cir"; @@ -2653,14 +2635,25 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { let description = [{ The `cir.asm` operation represents C/C++ asm inline. + CIR constraints strings follow barelly the same rules that are established + for the C level assembler constraints with several differences caused by + clang::AsmStmt processing. + + Thus, numbers that appears in the constraint string may also refer to: + - the output variable index referenced by the input operands. + - the index of early-clobber operand + Example: ```C++ - __asm__ volatile("xyz" : : : ); - ``` - + __asm__("foo" : : : ); + __asm__("bar $42 %[val]" : [val] "=r" (x), "+&r"(x)); + __asm__("baz $42 %[val]" : [val] "=r" (x), "+&r"(x) : "[val]"(y)); ``` + ```mlir - cir.asm(x86_att, {"xyz"}) -> !void + cir.asm(x86_att, {"foo" ""}) + cir.asm(x86_att, {"bar $$42 $0" "=r,=&r,1"}) + cir.asm(x86_att, {"baz $$42 $0" "=r,=&r,0,1"}) ``` }]; @@ -2668,11 +2661,31 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { let arguments = ( ins StrAttr:$asm_string, - AsmFlavor:$asm_flavor); + StrAttr:$constraints, + AsmFlavor:$asm_flavor); let assemblyFormat = [{ - `(`$asm_flavor`,` `{` $asm_string `}` `)` attr-dict `:` type($res) - }]; + `(`$asm_flavor`,` `{` $asm_string $constraints `}` `)` attr-dict + `:` type($res) + }]; +} + +//===----------------------------------------------------------------------===// +// Operations Lowered Directly to LLVM IR +// +// These operations are hacks to get around missing features in LLVM's dialect. +// Use it sparingly and remove it once the features are added. +//===----------------------------------------------------------------------===// + +def ZeroInitConstOp : CIR_Op<"llvmir.zeroinit", [Pure]>, + Results<(outs AnyType:$result)> { + let summary = "Zero initializes a constant value of a given type"; + let description = [{ + This operation circumvents the lack of a zeroinitializer operation in LLVM + Dialect. It can zeroinitialize any LLVM type. + }]; + let assemblyFormat = "attr-dict `:` type($result)"; + let hasVerifier = 0; } #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index bf184a3d4f07..1e2f11e66eac 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -8,27 +8,237 @@ using namespace cir; using namespace clang; using namespace mlir::cir; -static AsmDialect inferDialect(const CIRGenModule &cgm, const AsmStmt &S) { - AsmDialect GnuAsmDialect = +static AsmFlavor inferFlavor(const CIRGenModule &cgm, const AsmStmt &S) { + AsmFlavor GnuAsmFlavor = cgm.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT - ? AsmDialect::x86_att - : AsmDialect::x86_intel; + ? AsmFlavor::x86_att + : AsmFlavor::x86_intel; - return isa(&S) ? AsmDialect::x86_intel : GnuAsmDialect; + return isa(&S) ? AsmFlavor::x86_intel : GnuAsmFlavor; +} + +// FIXME(cir): This should be a common helper between CIRGen +// and traditional CodeGen +static std::string SimplifyConstraint( + const char *Constraint, const TargetInfo &Target, + SmallVectorImpl *OutCons = nullptr) { + std::string Result; + + while (*Constraint) { + switch (*Constraint) { + default: + Result += Target.convertConstraint(Constraint); + break; + // Ignore these + case '*': + case '?': + case '!': + case '=': // Will see this and the following in mult-alt constraints. + case '+': + break; + case '#': // Ignore the rest of the constraint alternative. + while (Constraint[1] && Constraint[1] != ',') + Constraint++; + break; + case '&': + case '%': + Result += *Constraint; + while (Constraint[1] && Constraint[1] == *Constraint) + Constraint++; + break; + case ',': + Result += "|"; + break; + case 'g': + Result += "imr"; + break; + case '[': { + assert(OutCons && + "Must pass output names to constraints with a symbolic name"); + unsigned Index; + bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index); + assert(result && "Could not resolve symbolic name"); + (void)result; + Result += llvm::utostr(Index); + break; + } + } + + Constraint++; + } + + return Result; +} + +// FIXME(cir): This should be a common helper between CIRGen +// and traditional CodeGen +/// Look at AsmExpr and if it is a variable declared +/// as using a particular register add that as a constraint that will be used +/// in this asm stmt. +static std::string +AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, + const TargetInfo &Target, CIRGenModule &CGM, + const AsmStmt &Stmt, const bool EarlyClobber, + std::string *GCCReg = nullptr) { + const DeclRefExpr *AsmDeclRef = dyn_cast(&AsmExpr); + if (!AsmDeclRef) + return Constraint; + const ValueDecl &Value = *AsmDeclRef->getDecl(); + const VarDecl *Variable = dyn_cast(&Value); + if (!Variable) + return Constraint; + if (Variable->getStorageClass() != SC_Register) + return Constraint; + AsmLabelAttr *Attr = Variable->getAttr(); + if (!Attr) + return Constraint; + StringRef Register = Attr->getLabel(); + assert(Target.isValidGCCRegisterName(Register)); + // We're using validateOutputConstraint here because we only care if + // this is a register constraint. + TargetInfo::ConstraintInfo Info(Constraint, ""); + if (Target.validateOutputConstraint(Info) && !Info.allowsRegister()) { + CGM.ErrorUnsupported(&Stmt, "__asm__"); + return Constraint; + } + // Canonicalize the register here before returning it. + Register = Target.getNormalizedGCCRegisterName(Register); + if (GCCReg != nullptr) + *GCCReg = Register.str(); + return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; +} + +using constraintInfos = SmallVector; + +static void collectInOutConstrainsInfos(const CIRGenFunction &cgf, + const AsmStmt &S, constraintInfos &out, + constraintInfos &in) { + + for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { + StringRef Name; + if (const GCCAsmStmt *GAS = dyn_cast(&S)) + Name = GAS->getOutputName(i); + TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name); + bool IsValid = cgf.getTarget().validateOutputConstraint(Info); + (void)IsValid; + assert(IsValid && "Failed to parse output constraint"); + out.push_back(Info); + } + + for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { + StringRef Name; + if (const GCCAsmStmt *GAS = dyn_cast(&S)) + Name = GAS->getInputName(i); + TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name); + bool IsValid = cgf.getTarget().validateInputConstraint(out, Info); + assert(IsValid && "Failed to parse input constraint"); + (void)IsValid; + in.push_back(Info); + } } mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { // Assemble the final asm string. std::string AsmString = S.generateAsmString(getContext()); + // Get all the output and input constraints together. + constraintInfos OutputConstraintInfos; + constraintInfos InputConstraintInfos; + collectInOutConstrainsInfos(*this, S, OutputConstraintInfos, + InputConstraintInfos); + std::string Constraints; std::vector ResultRegTypes; std::vector Args; - assert(!S.getNumOutputs() && "asm output operands are NYI"); - assert(!S.getNumInputs() && "asm intput operands are NYI"); + // Keep track of input constraints. + std::string InOutConstraints; + + // Keep track of out constraints for tied input operand. + std::vector OutputConstraints; + assert(!S.getNumClobbers() && "asm clobbers operands are NYI"); + for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { + TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; + + // Simplify the output constraint. + std::string OutputConstraint(S.getOutputConstraint(i)); + OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, + getTarget(), &OutputConstraintInfos); + + const Expr *OutExpr = S.getOutputExpr(i); + OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); + + std::string GCCReg; + OutputConstraint = + AddVariableConstraints(OutputConstraint, *OutExpr, getTarget(), CGM, S, + Info.earlyClobber(), &GCCReg); + + OutputConstraints.push_back(OutputConstraint); + + if (!Constraints.empty()) + Constraints += ','; + + // If this is a register output, then make the inline a sm return it + // by-value. If this is a memory result, return the value by-reference. + QualType QTy = OutExpr->getType(); + const bool IsScalarOrAggregate = + hasScalarEvaluationKind(QTy) || hasAggregateEvaluationKind(QTy); + if (!Info.allowsMemory() && IsScalarOrAggregate) { + Constraints += "=" + OutputConstraint; + } else { + Constraints += "=*"; + Constraints += OutputConstraint; + } + + if (Info.isReadWrite()) { + InOutConstraints += ','; + + // Only tie earlyclobber physregs. + if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber())) + InOutConstraints += llvm::utostr(i); + else + InOutConstraints += OutputConstraint; + } + } // iterate over output operands + + for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { + const Expr *InputExpr = S.getInputExpr(i); + + TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; + + if (!Constraints.empty()) + Constraints += ','; + + // Simplify the input constraint. + std::string InputConstraint(S.getInputConstraint(i)); + InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), + &OutputConstraintInfos); + + InputConstraint = AddVariableConstraints( + InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), + getTarget(), CGM, S, false /* No EarlyClobber */); + + std::string ReplaceConstraint(InputConstraint); + + // If this input argument is tied to a larger output result, extend the + // input to be the same size as the output. The LLVM backend wants to see + // the input and output of a matching constraint be the same size. Note + // that GCC does not define what the top bits are here. We use zext because + // that is usually cheaper, but LLVM IR should really get an anyext someday. + if (Info.hasTiedOperand()) { + unsigned Output = Info.getTiedOperand(); + + // Deal with the tied operands' constraint code in adjustInlineAsmType. + ReplaceConstraint = OutputConstraints[Output]; + } + + Constraints += InputConstraint; + } // iterate over input operands + + Constraints += InOutConstraints; + mlir::Type ResultType; if (ResultRegTypes.size() == 1) @@ -39,10 +249,10 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { builder.getCompleteStructTy(ResultRegTypes, sname, false, nullptr); } - AsmDialect AsmDialect = inferDialect(CGM, S); + AsmFlavor AsmFlavor = inferFlavor(CGM, S); builder.create(getLoc(S.getAsmLoc()), ResultType, - AsmString, AsmDialect); + AsmString, Constraints, AsmFlavor); return mlir::success(); } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/asm.c b/clang/test/CIR/CodeGen/asm.c index 02d3cc59af8b..6aa4ef5e6355 100644 --- a/clang/test/CIR/CodeGen/asm.c +++ b/clang/test/CIR/CodeGen/asm.c @@ -1,12 +1,32 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -//CHECK: cir.asm(x86_att, {""}) +//CHECK: cir.asm(x86_att, {"" ""}) void empty1() { __asm__ volatile("" : : : ); } -//CHECK: cir.asm(x86_att, {"xyz"}) +//CHECK: cir.asm(x86_att, {"xyz" ""}) void empty2() { __asm__ volatile("xyz" : : : ); +} + +//CHECK: cir.asm(x86_att, {"" "=*m,m"}) +void t1(int x) { + __asm__ volatile("" : "+m"(x)); +} + +//CHECK: cir.asm(x86_att, {"" "m"}) +void t2(int x) { + __asm__ volatile("" : : "m"(x)); +} + +//CHECK: cir.asm(x86_att, {"" "=*m"}) +void t3(int x) { + __asm__ volatile("" : "=m"(x)); +} + +//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1"}) +void t4(int x) { + __asm__ volatile("" : "=&r"(x), "+&r"(x)); } \ No newline at end of file From 71072dd28b354db992a850eddc335b53e440cd5a Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 16 Jan 2024 21:48:59 +0300 Subject: [PATCH 1341/2301] [CIR][Lowering] Support lowering of const arrays of structs (#370) This PR fixes CIR lowering for the next case. ``` void foo() { struct { int a; int b; } a[1] = {{0,1}}; } ``` Note, we don't create attribute here and lower such const arrays as values. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 23 ++++++++----------- clang/test/CIR/Lowering/const.cir | 22 ++++++++++++++++++ 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index fb358bfad4b5..662a12ebc7a6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1047,24 +1047,21 @@ class CIRConstantLowering // then memcopyied into the stack (as done in Clang). else if (auto arrTy = op.getType().dyn_cast()) { // Fetch operation constant array initializer. - if (auto constArr = op.getValue().dyn_cast()) { - // Lower constant array initializer. - auto denseAttr = lowerConstArrayAttr(constArr, typeConverter); - if (!denseAttr.has_value()) { - op.emitError() - << "unsupported lowering for #cir.const_array with element type " - << arrTy.getEltType(); - return mlir::failure(); - } + auto constArr = op.getValue().dyn_cast(); + if (!constArr && !isa(op.getValue())) + return op.emitError() << "array does not have a constant initializer"; + + std::optional denseAttr; + if (constArr && + (denseAttr = lowerConstArrayAttr(constArr, typeConverter))) { attr = denseAttr.value(); - } else if (auto zero = op.getValue().dyn_cast()) { - auto initVal = lowerCirAttrAsValue(op, zero, rewriter, typeConverter); + } else { + auto initVal = + lowerCirAttrAsValue(op, op.getValue(), rewriter, typeConverter); rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); return mlir::success(); - } else { - return op.emitError() << "array does not have a constant initializer"; } } else if (const auto structAttr = op.getValue().dyn_cast()) { diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 62d0b1aa2e64..deca881e2e6a 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -3,6 +3,8 @@ !s32i = !cir.int !s8i = !cir.int +!ty_22anon2E122 = !cir.struct, !cir.int} #cir.record.decl.ast> + module { cir.func @testConstArrInit() { %0 = cir.const(#cir.const_array<"string\00" : !cir.array> : !cir.array) : !cir.array @@ -15,4 +17,24 @@ module { // CHECK: cir.llvmir.zeroinit : !llvm.array<3 x i32> cir.return } + + cir.func @testConstArrayOfStructs() { + %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 4 : i64} + %1 = cir.const(#cir.const_array<[#cir.const_struct<{#cir.int<0> : !s32i, #cir.int<1> : !s32i}> : !ty_22anon2E122]> : !cir.array) : !cir.array + cir.store %1, %0 : !cir.array, cir.ptr > + cir.return + } + // CHECK: llvm.func @testConstArrayOfStructs() + // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 + // CHECK: %1 = llvm.alloca %0 x !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>> {alignment = 4 : i64} : (i64) -> !llvm.ptr + // CHECK: %2 = llvm.mlir.undef : !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>> + // CHECK: %3 = llvm.mlir.undef : !llvm.struct<"struct.anon.1", (i32, i32)> + // CHECK: %4 = llvm.mlir.constant(0 : i32) : i32 + // CHECK: %5 = llvm.insertvalue %4, %3[0] : !llvm.struct<"struct.anon.1", (i32, i32)> + // CHECK: %6 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %7 = llvm.insertvalue %6, %5[1] : !llvm.struct<"struct.anon.1", (i32, i32)> + // CHECK: %8 = llvm.insertvalue %7, %2[0] : !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>> + // CHECK: llvm.store %8, %1 : !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>>, !llvm.ptr + // CHECK: llvm.return + } From 3708340f25919979b1bdb69b967a33b1c3eca8db Mon Sep 17 00:00:00 2001 From: Nikolas Klauser Date: Tue, 16 Jan 2024 19:49:43 +0100 Subject: [PATCH 1342/2301] [CIR][CIRGen] Implement unary floating point builtins (#383) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 42 +- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 50 +- .../test/CIR/CodeGen/builtin-floating-point.c | 589 ++++++++++++++++++ 3 files changed, 641 insertions(+), 40 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-floating-point.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index fd6cc34ef6d0..6fab7c65d010 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2434,29 +2434,33 @@ def IterEndOp : CIR_Op<"iterator_end"> { } //===----------------------------------------------------------------------===// -// FAbsOp +// Floating Point Ops //===----------------------------------------------------------------------===// -def FAbsOp : CIR_Op<"fabs", [Pure, SameOperandsAndResultType]> { +class UnaryFPToFPBuiltinOp + : CIR_Op { let arguments = (ins AnyFloat:$src); let results = (outs AnyFloat:$result); - let summary = "Returns absolute value for floating-point input."; - let description = [{ - Equivalent to libc's `fabs` and LLVM's intrinsic with the same name. - - Examples: - - ```mlir - %1 = cir.const(1.0 : f64) : f64 - %2 = cir.fabs %1 : f64 - ``` - }]; - - let assemblyFormat = [{ - $src `:` type($src) attr-dict - }]; - let hasVerifier = 0; -} + let summary = "libc builtin equivalent ignoring " + "floating point exceptions and errno"; + let assemblyFormat = "$src `:` type($src) attr-dict"; +} + +def CeilOp : UnaryFPToFPBuiltinOp<"ceil">; +def CosOp : UnaryFPToFPBuiltinOp<"cos">; +def ExpOp : UnaryFPToFPBuiltinOp<"exp">; +def Exp2Op : UnaryFPToFPBuiltinOp<"exp2">; +def FloorOp : UnaryFPToFPBuiltinOp<"floor">; +def FAbsOp : UnaryFPToFPBuiltinOp<"fabs">; +def LogOp : UnaryFPToFPBuiltinOp<"log">; +def Log10Op : UnaryFPToFPBuiltinOp<"log10">; +def Log2Op : UnaryFPToFPBuiltinOp<"log2">; +def NearbyintOp : UnaryFPToFPBuiltinOp<"nearbyint">; +def RintOp : UnaryFPToFPBuiltinOp<"rint">; +def RoundOp : UnaryFPToFPBuiltinOp<"round">; +def SinOp : UnaryFPToFPBuiltinOp<"sin">; +def SqrtOp : UnaryFPToFPBuiltinOp<"sqrt">; +def TruncOp : UnaryFPToFPBuiltinOp<"trunc">; //===----------------------------------------------------------------------===// // Variadic Operations diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 4b56fe53c1a0..962f01d44d58 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -42,6 +42,19 @@ static RValue buildLibraryCall(CIRGenFunction &CGF, const FunctionDecl *FD, return CGF.buildCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); } +template +static RValue buildUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { + auto Arg = CGF.buildScalarExpr(E.getArg(0)); + + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, &E); + if (CGF.getBuilder().getIsFPConstrained()) + llvm_unreachable("constraint FP operations are NYI"); + + auto Call = + CGF.getBuilder().create(Arg.getLoc(), Arg.getType(), Arg); + return RValue::get(Call->getResult(0)); +} + RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue) { @@ -98,7 +111,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_ceilf16: case Builtin::BI__builtin_ceill: case Builtin::BI__builtin_ceilf128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIcopysign: case Builtin::BIcopysignf: @@ -118,7 +131,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_cosf16: case Builtin::BI__builtin_cosl: case Builtin::BI__builtin_cosf128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIexp: case Builtin::BIexpf: @@ -128,7 +141,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_expf16: case Builtin::BI__builtin_expl: case Builtin::BI__builtin_expf128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIexp2: case Builtin::BIexp2f: @@ -138,7 +151,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_exp2f16: case Builtin::BI__builtin_exp2l: case Builtin::BI__builtin_exp2f128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIfabs: case Builtin::BIfabsf: @@ -147,13 +160,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fabsf: case Builtin::BI__builtin_fabsf16: case Builtin::BI__builtin_fabsl: - case Builtin::BI__builtin_fabsf128: { - mlir::Value Src0 = buildScalarExpr(E->getArg(0)); - auto SrcType = Src0.getType(); - auto Call = - builder.create(Src0.getLoc(), SrcType, Src0); - return RValue::get(Call->getResult(0)); - } + case Builtin::BI__builtin_fabsf128: + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIfloor: case Builtin::BIfloorf: @@ -163,7 +171,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_floorf16: case Builtin::BI__builtin_floorl: case Builtin::BI__builtin_floorf128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIfma: case Builtin::BIfmaf: @@ -216,7 +224,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_logf16: case Builtin::BI__builtin_logl: case Builtin::BI__builtin_logf128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlog10: case Builtin::BIlog10f: @@ -226,7 +234,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log10f16: case Builtin::BI__builtin_log10l: case Builtin::BI__builtin_log10f128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlog2: case Builtin::BIlog2f: @@ -236,7 +244,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log2f16: case Builtin::BI__builtin_log2l: case Builtin::BI__builtin_log2f128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BInearbyint: case Builtin::BInearbyintf: @@ -245,7 +253,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_nearbyintf: case Builtin::BI__builtin_nearbyintl: case Builtin::BI__builtin_nearbyintf128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIpow: case Builtin::BIpowf: @@ -265,7 +273,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_rintf16: case Builtin::BI__builtin_rintl: case Builtin::BI__builtin_rintf128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIround: case Builtin::BIroundf: @@ -275,7 +283,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_roundf16: case Builtin::BI__builtin_roundl: case Builtin::BI__builtin_roundf128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIsin: case Builtin::BIsinf: @@ -285,7 +293,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sinf16: case Builtin::BI__builtin_sinl: case Builtin::BI__builtin_sinf128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIsqrt: case Builtin::BIsqrtf: @@ -295,7 +303,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sqrtf16: case Builtin::BI__builtin_sqrtl: case Builtin::BI__builtin_sqrtf128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BItrunc: case Builtin::BItruncf: @@ -305,7 +313,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_truncf16: case Builtin::BI__builtin_truncl: case Builtin::BI__builtin_truncf128: - llvm_unreachable("NYI"); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlround: case Builtin::BIlroundf: diff --git a/clang/test/CIR/CodeGen/builtin-floating-point.c b/clang/test/CIR/CodeGen/builtin-floating-point.c new file mode 100644 index 000000000000..f62f7ae0a3d4 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-floating-point.c @@ -0,0 +1,589 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -ffast-math -fclangir -emit-cir %s -o - | FileCheck %s + +// ceil + +float my_ceilf(float f) { + return __builtin_ceilf(f); + // CHECK: cir.func @my_ceilf + // CHECK: {{.+}} = cir.ceil {{.+}} : f32 +} + +double my_ceil(double f) { + return __builtin_ceil(f); + // CHECK: cir.func @my_ceil + // CHECK: {{.+}} = cir.ceil {{.+}} : f64 +} + +long double my_ceill(long double f) { + return __builtin_ceill(f); + // CHECK: cir.func @my_ceill + // CHECK: {{.+}} = cir.ceil {{.+}} : f80 +} + +float ceilf(float); +double ceil(double); +long double ceill(long double); + +float call_ceilf(float f) { + return ceilf(f); + // CHECK: cir.func @call_ceilf + // CHECK: {{.+}} = cir.ceil {{.+}} : f32 +} + +double call_ceil(double f) { + return ceil(f); + // CHECK: cir.func @call_ceil + // CHECK: {{.+}} = cir.ceil {{.+}} : f64 +} + +long double call_ceill(long double f) { + return ceill(f); + // CHECK: cir.func @call_ceill + // CHECK: {{.+}} = cir.ceil {{.+}} : f80 +} + +// cos + +float my_cosf(float f) { + return __builtin_cosf(f); + // CHECK: cir.func @my_cosf + // CHECK: {{.+}} = cir.cos {{.+}} : f32 +} + +double my_cos(double f) { + return __builtin_cos(f); + // CHECK: cir.func @my_cos + // CHECK: {{.+}} = cir.cos {{.+}} : f64 +} + +long double my_cosl(long double f) { + return __builtin_cosl(f); + // CHECK: cir.func @my_cosl + // CHECK: {{.+}} = cir.cos {{.+}} : f80 +} + +float cosf(float); +double cos(double); +long double cosl(long double); + +float call_cosf(float f) { + return cosf(f); + // CHECK: cir.func @call_cosf + // CHECK: {{.+}} = cir.cos {{.+}} : f32 +} + +double call_cos(double f) { + return cos(f); + // CHECK: cir.func @call_cos + // CHECK: {{.+}} = cir.cos {{.+}} : f64 +} + +long double call_cosl(long double f) { + return cosl(f); + // CHECK: cir.func @call_cosl + // CHECK: {{.+}} = cir.cos {{.+}} : f80 +} + +// exp + +float my_expf(float f) { + return __builtin_expf(f); + // CHECK: cir.func @my_expf + // CHECK: {{.+}} = cir.exp {{.+}} : f32 +} + +double my_exp(double f) { + return __builtin_exp(f); + // CHECK: cir.func @my_exp + // CHECK: {{.+}} = cir.exp {{.+}} : f64 +} + +long double my_expl(long double f) { + return __builtin_expl(f); + // CHECK: cir.func @my_expl + // CHECK: {{.+}} = cir.exp {{.+}} : f80 +} + +float expf(float); +double exp(double); +long double expl(long double); + +float call_expf(float f) { + return expf(f); + // CHECK: cir.func @call_expf + // CHECK: {{.+}} = cir.exp {{.+}} : f32 +} + +double call_exp(double f) { + return exp(f); + // CHECK: cir.func @call_exp + // CHECK: {{.+}} = cir.exp {{.+}} : f64 +} + +long double call_expl(long double f) { + return expl(f); + // CHECK: cir.func @call_expl + // CHECK: {{.+}} = cir.exp {{.+}} : f80 +} + +// exp2 + +float my_exp2f(float f) { + return __builtin_exp2f(f); + // CHECK: cir.func @my_exp2f + // CHECK: {{.+}} = cir.exp2 {{.+}} : f32 +} + +double my_exp2(double f) { + return __builtin_exp2(f); + // CHECK: cir.func @my_exp2 + // CHECK: {{.+}} = cir.exp2 {{.+}} : f64 +} + +long double my_exp2l(long double f) { + return __builtin_exp2l(f); + // CHECK: cir.func @my_exp2l + // CHECK: {{.+}} = cir.exp2 {{.+}} : f80 +} + +float exp2f(float); +double exp2(double); +long double exp2l(long double); + +float call_exp2f(float f) { + return exp2f(f); + // CHECK: cir.func @call_exp2f + // CHECK: {{.+}} = cir.exp2 {{.+}} : f32 +} + +double call_exp2(double f) { + return exp2(f); + // CHECK: cir.func @call_exp2 + // CHECK: {{.+}} = cir.exp2 {{.+}} : f64 +} + +long double call_exp2l(long double f) { + return exp2l(f); + // CHECK: cir.func @call_exp2l + // CHECK: {{.+}} = cir.exp2 {{.+}} : f80 +} + +// floor + +float my_floorf(float f) { + return __builtin_floorf(f); + // CHECK: cir.func @my_floorf + // CHECK: {{.+}} = cir.floor {{.+}} : f32 +} + +double my_floor(double f) { + return __builtin_floor(f); + // CHECK: cir.func @my_floor + // CHECK: {{.+}} = cir.floor {{.+}} : f64 +} + +long double my_floorl(long double f) { + return __builtin_floorl(f); + // CHECK: cir.func @my_floorl + // CHECK: {{.+}} = cir.floor {{.+}} : f80 +} + +float floorf(float); +double floor(double); +long double floorl(long double); + +float call_floorf(float f) { + return floorf(f); + // CHECK: cir.func @call_floorf + // CHECK: {{.+}} = cir.floor {{.+}} : f32 +} + +double call_floor(double f) { + return floor(f); + // CHECK: cir.func @call_floor + // CHECK: {{.+}} = cir.floor {{.+}} : f64 +} + +long double call_floorl(long double f) { + return floorl(f); + // CHECK: cir.func @call_floorl + // CHECK: {{.+}} = cir.floor {{.+}} : f80 +} + +// log + +float my_logf(float f) { + return __builtin_logf(f); + // CHECK: cir.func @my_logf + // CHECK: {{.+}} = cir.log {{.+}} : f32 +} + +double my_log(double f) { + return __builtin_log(f); + // CHECK: cir.func @my_log + // CHECK: {{.+}} = cir.log {{.+}} : f64 +} + +long double my_logl(long double f) { + return __builtin_logl(f); + // CHECK: cir.func @my_logl + // CHECK: {{.+}} = cir.log {{.+}} : f80 +} + +float logf(float); +double log(double); +long double logl(long double); + +float call_logf(float f) { + return logf(f); + // CHECK: cir.func @call_logf + // CHECK: {{.+}} = cir.log {{.+}} : f32 +} + +double call_log(double f) { + return log(f); + // CHECK: cir.func @call_log + // CHECK: {{.+}} = cir.log {{.+}} : f64 +} + +long double call_logl(long double f) { + return logl(f); + // CHECK: cir.func @call_logl + // CHECK: {{.+}} = cir.log {{.+}} : f80 +} + +// log10 + +float my_log10f(float f) { + return __builtin_log10f(f); + // CHECK: cir.func @my_log10f + // CHECK: {{.+}} = cir.log10 {{.+}} : f32 +} + +double my_log10(double f) { + return __builtin_log10(f); + // CHECK: cir.func @my_log10 + // CHECK: {{.+}} = cir.log10 {{.+}} : f64 +} + +long double my_log10l(long double f) { + return __builtin_log10l(f); + // CHECK: cir.func @my_log10l + // CHECK: {{.+}} = cir.log10 {{.+}} : f80 +} + +float log10f(float); +double log10(double); +long double log10l(long double); + +float call_log10f(float f) { + return log10f(f); + // CHECK: cir.func @call_log10f + // CHECK: {{.+}} = cir.log10 {{.+}} : f32 +} + +double call_log10(double f) { + return log10(f); + // CHECK: cir.func @call_log10 + // CHECK: {{.+}} = cir.log10 {{.+}} : f64 +} + +long double call_log10l(long double f) { + return log10l(f); + // CHECK: cir.func @call_log10l + // CHECK: {{.+}} = cir.log10 {{.+}} : f80 +} + +// log2 + +float my_log2f(float f) { + return __builtin_log2f(f); + // CHECK: cir.func @my_log2f + // CHECK: {{.+}} = cir.log2 {{.+}} : f32 +} + +double my_log2(double f) { + return __builtin_log2(f); + // CHECK: cir.func @my_log2 + // CHECK: {{.+}} = cir.log2 {{.+}} : f64 +} + +long double my_log2l(long double f) { + return __builtin_log2l(f); + // CHECK: cir.func @my_log2l + // CHECK: {{.+}} = cir.log2 {{.+}} : f80 +} + +float log2f(float); +double log2(double); +long double log2l(long double); + +float call_log2f(float f) { + return log2f(f); + // CHECK: cir.func @call_log2f + // CHECK: {{.+}} = cir.log2 {{.+}} : f32 +} + +double call_log2(double f) { + return log2(f); + // CHECK: cir.func @call_log2 + // CHECK: {{.+}} = cir.log2 {{.+}} : f64 +} + +long double call_log2l(long double f) { + return log2l(f); + // CHECK: cir.func @call_log2l + // CHECK: {{.+}} = cir.log2 {{.+}} : f80 +} + +// nearbyint + +float my_nearbyintf(float f) { + return __builtin_nearbyintf(f); + // CHECK: cir.func @my_nearbyintf + // CHECK: {{.+}} = cir.nearbyint {{.+}} : f32 +} + +double my_nearbyint(double f) { + return __builtin_nearbyint(f); + // CHECK: cir.func @my_nearbyint + // CHECK: {{.+}} = cir.nearbyint {{.+}} : f64 +} + +long double my_nearbyintl(long double f) { + return __builtin_nearbyintl(f); + // CHECK: cir.func @my_nearbyintl + // CHECK: {{.+}} = cir.nearbyint {{.+}} : f80 +} + +float nearbyintf(float); +double nearbyint(double); +long double nearbyintl(long double); + +float call_nearbyintf(float f) { + return nearbyintf(f); + // CHECK: cir.func @call_nearbyintf + // CHECK: {{.+}} = cir.nearbyint {{.+}} : f32 +} + +double call_nearbyint(double f) { + return nearbyint(f); + // CHECK: cir.func @call_nearbyint + // CHECK: {{.+}} = cir.nearbyint {{.+}} : f64 +} + +long double call_nearbyintl(long double f) { + return nearbyintl(f); + // CHECK: cir.func @call_nearbyintl + // CHECK: {{.+}} = cir.nearbyint {{.+}} : f80 +} + +// rint + +float my_rintf(float f) { + return __builtin_rintf(f); + // CHECK: cir.func @my_rintf + // CHECK: {{.+}} = cir.rint {{.+}} : f32 +} + +double my_rint(double f) { + return __builtin_rint(f); + // CHECK: cir.func @my_rint + // CHECK: {{.+}} = cir.rint {{.+}} : f64 +} + +long double my_rintl(long double f) { + return __builtin_rintl(f); + // CHECK: cir.func @my_rintl + // CHECK: {{.+}} = cir.rint {{.+}} : f80 +} + +float rintf(float); +double rint(double); +long double rintl(long double); + +float call_rintf(float f) { + return rintf(f); + // CHECK: cir.func @call_rintf + // CHECK: {{.+}} = cir.rint {{.+}} : f32 +} + +double call_rint(double f) { + return rint(f); + // CHECK: cir.func @call_rint + // CHECK: {{.+}} = cir.rint {{.+}} : f64 +} + +long double call_rintl(long double f) { + return rintl(f); + // CHECK: cir.func @call_rintl + // CHECK: {{.+}} = cir.rint {{.+}} : f80 +} + +// round + +float my_roundf(float f) { + return __builtin_roundf(f); + // CHECK: cir.func @my_roundf + // CHECK: {{.+}} = cir.round {{.+}} : f32 +} + +double my_round(double f) { + return __builtin_round(f); + // CHECK: cir.func @my_round + // CHECK: {{.+}} = cir.round {{.+}} : f64 +} + +long double my_roundl(long double f) { + return __builtin_roundl(f); + // CHECK: cir.func @my_roundl + // CHECK: {{.+}} = cir.round {{.+}} : f80 +} + +float roundf(float); +double round(double); +long double roundl(long double); + +float call_roundf(float f) { + return roundf(f); + // CHECK: cir.func @call_roundf + // CHECK: {{.+}} = cir.round {{.+}} : f32 +} + +double call_round(double f) { + return round(f); + // CHECK: cir.func @call_round + // CHECK: {{.+}} = cir.round {{.+}} : f64 +} + +long double call_roundl(long double f) { + return roundl(f); + // CHECK: cir.func @call_roundl + // CHECK: {{.+}} = cir.round {{.+}} : f80 +} + +// sin + +float my_sinf(float f) { + return __builtin_sinf(f); + // CHECK: cir.func @my_sinf + // CHECK: {{.+}} = cir.sin {{.+}} : f32 +} + +double my_sin(double f) { + return __builtin_sin(f); + // CHECK: cir.func @my_sin + // CHECK: {{.+}} = cir.sin {{.+}} : f64 +} + +long double my_sinl(long double f) { + return __builtin_sinl(f); + // CHECK: cir.func @my_sinl + // CHECK: {{.+}} = cir.sin {{.+}} : f80 +} + +float sinf(float); +double sin(double); +long double sinl(long double); + +float call_sinf(float f) { + return sinf(f); + // CHECK: cir.func @call_sinf + // CHECK: {{.+}} = cir.sin {{.+}} : f32 +} + +double call_sin(double f) { + return sin(f); + // CHECK: cir.func @call_sin + // CHECK: {{.+}} = cir.sin {{.+}} : f64 +} + +long double call_sinl(long double f) { + return sinl(f); + // CHECK: cir.func @call_sinl + // CHECK: {{.+}} = cir.sin {{.+}} : f80 +} + +// sqrt + +float my_sqrtf(float f) { + return __builtin_sqrtf(f); + // CHECK: cir.func @my_sqrtf + // CHECK: {{.+}} = cir.sqrt {{.+}} : f32 +} + +double my_sqrt(double f) { + return __builtin_sqrt(f); + // CHECK: cir.func @my_sqrt + // CHECK: {{.+}} = cir.sqrt {{.+}} : f64 +} + +long double my_sqrtl(long double f) { + return __builtin_sqrtl(f); + // CHECK: cir.func @my_sqrtl + // CHECK: {{.+}} = cir.sqrt {{.+}} : f80 +} + +float sqrtf(float); +double sqrt(double); +long double sqrtl(long double); + +float call_sqrtf(float f) { + return sqrtf(f); + // CHECK: cir.func @call_sqrtf + // CHECK: {{.+}} = cir.sqrt {{.+}} : f32 +} + +double call_sqrt(double f) { + return sqrt(f); + // CHECK: cir.func @call_sqrt + // CHECK: {{.+}} = cir.sqrt {{.+}} : f64 +} + +long double call_sqrtl(long double f) { + return sqrtl(f); + // CHECK: cir.func @call_sqrtl + // CHECK: {{.+}} = cir.sqrt {{.+}} : f80 +} + +// trunc + +float my_truncf(float f) { + return __builtin_truncf(f); + // CHECK: cir.func @my_truncf + // CHECK: {{.+}} = cir.trunc {{.+}} : f32 +} + +double my_trunc(double f) { + return __builtin_trunc(f); + // CHECK: cir.func @my_trunc + // CHECK: {{.+}} = cir.trunc {{.+}} : f64 +} + +long double my_truncl(long double f) { + return __builtin_truncl(f); + // CHECK: cir.func @my_truncl + // CHECK: {{.+}} = cir.trunc {{.+}} : f80 +} + +float truncf(float); +double trunc(double); +long double truncl(long double); + +float call_truncf(float f) { + return truncf(f); + // CHECK: cir.func @call_truncf + // CHECK: {{.+}} = cir.trunc {{.+}} : f32 +} + +double call_trunc(double f) { + return trunc(f); + // CHECK: cir.func @call_trunc + // CHECK: {{.+}} = cir.trunc {{.+}} : f64 +} + +long double call_truncl(long double f) { + return truncl(f); + // CHECK: cir.func @call_truncl + // CHECK: {{.+}} = cir.trunc {{.+}} : f80 +} From 6ff6512d450a36f1d54537868ca794e3e699bb3f Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 17 Jan 2024 03:30:51 +0800 Subject: [PATCH 1343/2301] [CIR] Fix int constant type verification (#386) When introducing attribute `#cir.int`, the constant type verification is not updated. If a `cir.const` operation produces an integral constant from a `#cir.int` attribute, the integer's type is not verified: ```mlir %1 = cir.const(#cir.int<0> : !cir.int) : !cir.int // Not verified: !cir.int differs from !cir.int ``` The corresponding test is also wrong but fail to be detected. This patch fixes this issue. --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 2 +- clang/test/CIR/IR/int.cir | 8 ++++---- clang/test/CIR/IR/invalid.cir | 9 +++++++++ 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7b71b7d61cda..eaeb1ddc5bee 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -275,7 +275,7 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return success(); } - if (attrType.isa()) { + if (attrType.isa()) { auto at = attrType.cast(); if (at.getType() != opType) { return op->emitOpError("result type (") diff --git a/clang/test/CIR/IR/int.cir b/clang/test/CIR/IR/int.cir index 233198e4e335..3acaacd011f7 100644 --- a/clang/test/CIR/IR/int.cir +++ b/clang/test/CIR/IR/int.cir @@ -8,10 +8,10 @@ !s32i = !cir.int !s64i = !cir.int -!u8i = !cir.int -!u16i = !cir.int -!u32i = !cir.int -!u64i = !cir.int +!u8i = !cir.int +!u16i = !cir.int +!u32i = !cir.int +!u64i = !cir.int cir.func @validIntTypesAndAttributes() -> () { diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index d353c7d7b878..386417ae2ffd 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -781,3 +781,12 @@ module { } } +// ----- + +!s8i = !cir.int +!u8i = !cir.int +cir.func @const_type_mismatch() -> () { + // expected-error@+1 {{'cir.const' op result type ('!cir.int') does not match value type ('!cir.int')}} + %2 = cir.const(#cir.int<0> : !s8i) : !u8i + cir.return +} From 7b6e833663c8af7cf662ceb390e19559af3e7c36 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Tue, 16 Jan 2024 11:42:45 -0800 Subject: [PATCH 1344/2301] [CIR] Vector types, part 2 (#387) This is part 2 of implementing vector types and vector operations in ClangIR, issue #284. Create new operation `cir.vec.insert`, which changes one element of an existing vector object and returns the modified vector object. The input and output vectors are prvalues; this operation does not touch memory. The assembly format and the order of the arguments match that of llvm.insertelement in the LLVM dialect, since the operations have identical semantics. Implement vector element lvalues in class `LValue`, adding member functions `getVectorAddress()`, `getVectorPointer()`, `getVectorIdx()`, and `MakeVectorElt(...)`. The assembly format for operation `cir.vec.extract` was changed to match that of llvm.extractelement in the LLVM dialect, since the operations have identical semantics. These two features, `cir.vec.insert` and vector element lvalues, are used to implement `v[n] = e`, where `v` is a vector. This is a little tricky, because `v[n]` isn't really an lvalue, as its address cannot be taken. The only place it can be used as an lvalue is on the left-hand side of an assignment. Implement unary operators on vector objects (except for logical not on a vector mask, which will be covered in a future commit for boolean vectors). The code for lowering cir.unary for all types, in `CIRUnaryOpLowering::matchAndRewrite`, was largely rewritten. Support for unary `+` on non-vector pointer types was added. (It was already supported and tested in AST->ClangIR CodeGen, but was missing from ClangIR->LLVM Dialect lowering.) Add tests for all binary vector arithmetic operations other than relational operators and shift operators. There were all working after the previous vector types commit, but only addition had beet tested at the time. Co-authored-by: Bruno Cardoso Lopes --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 33 ++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 27 ++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenValue.h | 26 +++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 134 ++++++++---- clang/test/CIR/CodeGen/vectype.cpp | 122 +++++++---- clang/test/CIR/IR/invalid.cir | 55 ++++- clang/test/CIR/Lowering/unary-plus-minus.cir | 3 +- clang/test/CIR/Lowering/vectype.cpp | 202 ++++++++++++++++++ 9 files changed, 519 insertions(+), 85 deletions(-) create mode 100644 clang/test/CIR/Lowering/vectype.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 6fab7c65d010..49c2bce154a8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1672,14 +1672,39 @@ def GetMemberOp : CIR_Op<"get_member"> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// VecInsertOp +//===----------------------------------------------------------------------===// + +def VecInsertOp : CIR_Op<"vec.insert", [Pure, + TypesMatchWith<"argument type matches vector element type", "vec", "value", + "$_self.cast().getEltType()">, + AllTypesMatch<["result", "vec"]>]> { + + let summary = "Insert one element into a vector object"; + let description = [{ + The `cir.vec.insert` operation replaces the element of the given vector at + the given index with the given value. The new vector with the inserted + element is returned. + }]; + + let arguments = (ins CIR_VectorType:$vec, AnyType:$value, CIR_IntType:$index); + let results = (outs CIR_VectorType:$result); + + let assemblyFormat = [{ + $value `,` $vec `[` $index `:` type($index) `]` attr-dict `:` type($vec) + }]; + + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // VecExtractOp //===----------------------------------------------------------------------===// def VecExtractOp : CIR_Op<"vec.extract", [Pure, - TypesMatchWith<"type of 'result' matches element type of 'vec'", - "vec", "result", - "$_self.cast().getEltType()">]> { + TypesMatchWith<"type of 'result' matches element type of 'vec'", "vec", + "result", "$_self.cast().getEltType()">]> { let summary = "Extract one element from a vector object"; let description = [{ @@ -1691,7 +1716,7 @@ def VecExtractOp : CIR_Op<"vec.extract", [Pure, let results = (outs CIR_AnyType:$result); let assemblyFormat = [{ - $vec `[` $index `:` type($index) `]` type($vec) `->` type($result) attr-dict + $vec `[` $index `:` type($index) `]` attr-dict `:` type($vec) }]; let hasVerifier = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index e3582737b4f6..828687bcea8a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -553,6 +553,11 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, llvm_unreachable("NYI"); } + if (const auto *ClangVecTy = Ty->getAs()) { + // TODO(CIR): this has fallen out of date with codegen + llvm_unreachable("NYI: Special treatment of 3-element vector store"); + } + // Update the alloca with more info on initialization. assert(Addr.getPointer() && "expected pointer to exist"); auto SrcAlloca = @@ -620,6 +625,18 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, } void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { + if (!Dst.isSimple()) { + if (Dst.isVectorElt()) { + // Read/modify/write the vector, inserting the new element + mlir::Location loc = Dst.getVectorPointer().getLoc(); + mlir::Value Vector = builder.createLoad(loc, Dst.getVectorAddress()); + Vector = builder.create( + loc, Vector, Src.getScalarVal(), Dst.getVectorIdx()); + builder.createStore(loc, Vector, Dst.getVectorAddress()); + return; + } + llvm_unreachable("NYI: non-simple store through lvalue"); + } assert(Dst.isSimple() && "only implemented simple"); // There's special magic for assigning into an ARC-qualified l-value. @@ -1385,7 +1402,10 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // with this subscript. if (E->getBase()->getType()->isVectorType() && !isa(E->getBase())) { - llvm_unreachable("vector subscript is NYI"); + LValue LHS = buildLValue(E->getBase()); + auto Index = EmitIdxAfterBase(/*Promote=*/false); + return LValue::MakeVectorElt(LHS.getAddress(), Index, + E->getBase()->getType(), LHS.getBaseInfo()); } // All the other cases basically behave like simple offsetting. @@ -2377,6 +2397,11 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, llvm_unreachable("NYI"); } + if (const auto *ClangVecTy = Ty->getAs()) { + // TODO(CIR): this has fallen out of sync with codegen + llvm_unreachable("NYI: Special treatment of 3-element vector load"); + } + mlir::cir::LoadOp Load = builder.create( Loc, Addr.getElementType(), Addr.getPointer()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 880e47f6efb4..68e2ca82c2ff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1646,7 +1646,7 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { if (dstTy.isa()) return boolVal; - llvm_unreachable("destination type for negation unary operator is NYI"); + llvm_unreachable("destination type for logical-not unary operator is NYI"); } // Conversion from bool, integral, or floating-point to integral or diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index c6edeb4d4fe4..86b6f5443856 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -207,6 +207,7 @@ class LValue { unsigned Alignment; mlir::Value V; mlir::Type ElementType; + mlir::Value VectorIdx; // Index for vector subscript LValueBaseInfo BaseInfo; const CIRGenBitFieldInfo *BitFieldInfo{0}; @@ -301,6 +302,31 @@ class LValue { const clang::Qualifiers &getQuals() const { return Quals; } clang::Qualifiers &getQuals() { return Quals; } + // vector element lvalue + Address getVectorAddress() const { + return Address(getVectorPointer(), ElementType, getAlignment()); + } + mlir::Value getVectorPointer() const { + assert(isVectorElt()); + return V; + } + mlir::Value getVectorIdx() const { + assert(isVectorElt()); + return VectorIdx; + } + + static LValue MakeVectorElt(Address vecAddress, mlir::Value Index, + clang::QualType type, LValueBaseInfo BaseInfo) { + LValue R; + R.LVType = VectorElt; + R.V = vecAddress.getPointer(); + R.ElementType = vecAddress.getElementType(); + R.VectorIdx = Index; + R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(), + BaseInfo); + return R; + } + // bitfield lvalue Address getBitFieldAddress() const { return Address(getBitFieldPointer(), ElementType, getAlignment()); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 662a12ebc7a6..2f4ac580f8c3 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1105,12 +1105,12 @@ class CIRVectorCreateLowering // Start with an 'undef' value for the vector. Then 'insertelement' for // each of the vector elements. auto vecTy = op.getType().dyn_cast(); - assert(vecTy && "result type of cir.vec op is not VectorType"); + assert(vecTy && "result type of cir.vec.create op is not VectorType"); auto llvmTy = typeConverter->convertType(vecTy); auto loc = op.getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); assert(vecTy.getSize() == op.getElements().size() && - "cir.vec operands count doesn't match vector type elements count"); + "cir.vec.create op count doesn't match vector type elements count"); for (uint64_t i = 0; i < vecTy.getSize(); ++i) { mlir::Value indexValue = rewriter.create( loc, rewriter.getI64Type(), i); @@ -1122,6 +1122,20 @@ class CIRVectorCreateLowering } }; +class CIRVectorInsertLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecInsertOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getVec(), adaptor.getValue(), adaptor.getIndex()); + return mlir::success(); + } +}; + class CIRVectorExtractLowering : public mlir::OpConversionPattern { public: @@ -1536,24 +1550,33 @@ class CIRUnaryOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::UnaryOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - mlir::Type type = op.getInput().getType(); - - auto llvmInType = adaptor.getInput().getType(); - auto llvmType = getTypeConverter()->convertType(op.getType()); + assert(op.getType() == op.getInput().getType() && + "Unary operation's operand type and result type are different"); + mlir::Type type = op.getType(); + mlir::Type elementType = type; + bool IsVector = false; + if (auto VecType = type.dyn_cast()) { + IsVector = true; + elementType = VecType.getEltType(); + } + auto llvmType = getTypeConverter()->convertType(type); + auto loc = op.getLoc(); - // Integer unary operations. - if (type.isa()) { + // Integer unary operations: + - ~ ++ -- + if (elementType.isa()) { switch (op.getKind()) { case mlir::cir::UnaryOpKind::Inc: { + assert(!IsVector && "++ not allowed on vector types"); auto One = rewriter.create( - op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 1)); + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); rewriter.replaceOpWithNewOp(op, llvmType, adaptor.getInput(), One); return mlir::success(); } case mlir::cir::UnaryOpKind::Dec: { + assert(!IsVector && "-- not allowed on vector types"); auto One = rewriter.create( - op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 1)); + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); rewriter.replaceOpWithNewOp(op, llvmType, adaptor.getInput(), One); return mlir::success(); @@ -1563,15 +1586,39 @@ class CIRUnaryOpLowering return mlir::success(); } case mlir::cir::UnaryOpKind::Minus: { - auto Zero = rewriter.create( - op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 0)); + mlir::Value Zero; + if (IsVector) + Zero = rewriter.create(loc, llvmType); + else + Zero = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, 0)); rewriter.replaceOpWithNewOp(op, llvmType, Zero, adaptor.getInput()); return mlir::success(); } case mlir::cir::UnaryOpKind::Not: { - auto MinusOne = rewriter.create( - op.getLoc(), llvmType, mlir::IntegerAttr::get(llvmType, -1)); + // bit-wise compliment operator, implemented as an XOR with -1. + mlir::Value MinusOne; + if (IsVector) { + // Creating a vector object with all -1 values is easier said than + // done. It requires a series of insertelement ops. + mlir::Type llvmElementType = + getTypeConverter()->convertType(elementType); + auto MinusOneInt = rewriter.create( + loc, llvmElementType, + mlir::IntegerAttr::get(llvmElementType, -1)); + MinusOne = rewriter.create(loc, llvmType); + auto NumElements = type.dyn_cast().getSize(); + for (uint64_t i = 0; i < NumElements; ++i) { + mlir::Value indexValue = rewriter.create( + loc, rewriter.getI64Type(), i); + MinusOne = rewriter.create( + loc, MinusOne, MinusOneInt, indexValue); + } + } else { + MinusOne = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, -1)); + } rewriter.replaceOpWithNewOp(op, llvmType, MinusOne, adaptor.getInput()); return mlir::success(); @@ -1579,21 +1626,23 @@ class CIRUnaryOpLowering } } - // Floating point unary operations. - if (type.isa()) { + // Floating point unary operations: + - ++ -- + if (elementType.isa()) { switch (op.getKind()) { case mlir::cir::UnaryOpKind::Inc: { - auto oneAttr = rewriter.getFloatAttr(llvmInType, 1.0); - auto oneConst = rewriter.create( - op.getLoc(), llvmInType, oneAttr); + assert(!IsVector && "++ not allowed on vector types"); + auto oneAttr = rewriter.getFloatAttr(llvmType, 1.0); + auto oneConst = + rewriter.create(loc, llvmType, oneAttr); rewriter.replaceOpWithNewOp(op, llvmType, oneConst, adaptor.getInput()); return mlir::success(); } case mlir::cir::UnaryOpKind::Dec: { - auto negOneAttr = rewriter.getFloatAttr(llvmInType, -1.0); - auto negOneConst = rewriter.create( - op.getLoc(), llvmInType, negOneAttr); + assert(!IsVector && "-- not allowed on vector types"); + auto negOneAttr = rewriter.getFloatAttr(llvmType, -1.0); + auto negOneConst = + rewriter.create(loc, llvmType, negOneAttr); rewriter.replaceOpWithNewOp( op, llvmType, negOneConst, adaptor.getInput()); return mlir::success(); @@ -1602,35 +1651,48 @@ class CIRUnaryOpLowering rewriter.replaceOp(op, adaptor.getInput()); return mlir::success(); case mlir::cir::UnaryOpKind::Minus: { - auto negOneAttr = mlir::FloatAttr::get(llvmInType, -1.0); - auto negOneConst = rewriter.create( - op.getLoc(), llvmInType, negOneAttr); - rewriter.replaceOpWithNewOp( - op, llvmType, negOneConst, adaptor.getInput()); + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput()); return mlir::success(); } default: - op.emitError() << "Floating point unary lowering ot implemented"; - return mlir::failure(); + return op.emitError() + << "Unknown floating-point unary operation during CIR lowering"; } } - // Boolean unary operations. - if (type.isa()) { + // Boolean unary operations: ! only. (For all others, the operand has + // already been promoted to int.) + if (elementType.isa()) { switch (op.getKind()) { case mlir::cir::UnaryOpKind::Not: + assert(!IsVector && "NYI: op! on vector mask"); rewriter.replaceOpWithNewOp( op, llvmType, adaptor.getInput(), rewriter.create( - op.getLoc(), llvmType, mlir::IntegerAttr::get(llvmType, 1))); + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1))); + return mlir::success(); + default: + return op.emitError() + << "Unknown boolean unary operation during CIR lowering"; + } + } + + // Pointer unary operations: + only. (++ and -- of pointers are implemented + // with cir.ptr_stride, not cir.unary.) + if (elementType.isa()) { + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Plus: + rewriter.replaceOp(op, adaptor.getInput()); return mlir::success(); default: - op.emitError() << "Unary operator not implemented for bool type"; + op.emitError() << "Unknown pointer unary operation during CIR lowering"; return mlir::failure(); } } - return op.emitError() << "Unary operation has unsupported type: " << type; + return op.emitError() << "Unary operation has unsupported type: " + << elementType; } }; @@ -2061,8 +2123,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, - CIRVectorExtractLowering, CIRStackSaveLowering, CIRStackRestoreLowering>( - converter, patterns.getContext()); + CIRVectorInsertLowering, CIRVectorExtractLowering, CIRStackSaveLowering, + CIRStackRestoreLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index 80f6bf39258c..54b2ade13c05 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -1,40 +1,90 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s +// XFAIL: * -typedef int int4 __attribute__((vector_size(16))); -int test_vector_basic(int x, int y, int z) { - int4 a = { 1, 2, 3, 4 }; - int4 b = { x, y, z, x + y + z }; - int4 c = a + b; - return c[1]; +typedef int vi4 __attribute__((vector_size(16))); +typedef double vd2 __attribute__((vector_size(16))); + +void vector_int_test(int x) { + + // Vector constant. Not yet implemented. Expected results will change from + // cir.vec.create to cir.const. + vi4 a = { 1, 2, 3, 4 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : + + // Non-const vector initialization. + vi4 b = { x, 5, 6, x + 1 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : + + // Extract element + int c = a[x]; + // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : + + // Insert element + a[x] = x; + // CHECK: %[[#LOADEDVI:]] = cir.load %[[#STORAGEVI:]] : cir.ptr >, !cir.vector + // CHECK: %[[#UPDATEDVI:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVI]][%{{[0-9]+}} : !s32i] : + // CHECK: cir.store %[[#UPDATEDVI]], %[[#STORAGEVI]] : !cir.vector, cir.ptr > + + // Binary arithmetic operations + vi4 d = a + b; + // CHECK: %{{[0-9]+}} = cir.binop(add, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 e = a - b; + // CHECK: %{{[0-9]+}} = cir.binop(sub, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 f = a * b; + // CHECK: %{{[0-9]+}} = cir.binop(mul, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 g = a / b; + // CHECK: %{{[0-9]+}} = cir.binop(div, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 h = a % b; + // CHECK: %{{[0-9]+}} = cir.binop(rem, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 i = a & b; + // CHECK: %{{[0-9]+}} = cir.binop(and, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 j = a | b; + // CHECK: %{{[0-9]+}} = cir.binop(or, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 k = a ^ b; + // CHECK: %{{[0-9]+}} = cir.binop(xor, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + + // Unary arithmetic operations + vi4 l = +a; + // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.vector, !cir.vector + vi4 m = -a; + // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector + vi4 n = ~a; + // CHECK: %{{[0-9]+}} = cir.unary(not, %{{[0-9]+}}) : !cir.vector, !cir.vector } -// CHECK: %4 = cir.alloca !cir.vector, cir.ptr >, ["a", init] {alignment = 16 : i64} -// CHECK: %5 = cir.alloca !cir.vector, cir.ptr >, ["b", init] {alignment = 16 : i64} -// CHECK: %6 = cir.alloca !cir.vector, cir.ptr >, ["c", init] {alignment = 16 : i64} - -// CHECK: %7 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: %8 = cir.const(#cir.int<2> : !s32i) : !s32i -// CHECK: %9 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: %10 = cir.const(#cir.int<4> : !s32i) : !s32i -// CHECK: %11 = cir.vec.create(%7, %8, %9, %10 : !s32i, !s32i, !s32i, !s32i) : -// CHECK: cir.store %11, %4 : !cir.vector, cir.ptr > -// CHECK: %12 = cir.load %0 : cir.ptr , !s32i -// CHECK: %13 = cir.load %1 : cir.ptr , !s32i -// CHECK: %14 = cir.load %2 : cir.ptr , !s32i -// CHECK: %15 = cir.load %0 : cir.ptr , !s32i -// CHECK: %16 = cir.load %1 : cir.ptr , !s32i -// CHECK: %17 = cir.binop(add, %15, %16) : !s32i -// CHECK: %18 = cir.load %2 : cir.ptr , !s32i -// CHECK: %19 = cir.binop(add, %17, %18) : !s32i -// CHECK: %20 = cir.vec.create(%12, %13, %14, %19 : !s32i, !s32i, !s32i, !s32i) : -// CHECK: cir.store %20, %5 : !cir.vector, cir.ptr > -// CHECK: %21 = cir.load %4 : cir.ptr >, !cir.vector -// CHECK: %22 = cir.load %5 : cir.ptr >, !cir.vector -// CHECK: %23 = cir.binop(add, %21, %22) : !cir.vector -// CHECK: cir.store %23, %6 : !cir.vector, cir.ptr > -// CHECK: %24 = cir.load %6 : cir.ptr >, !cir.vector -// CHECK: %25 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: %26 = cir.vec.extract %24[%25 : !s32i] -> !s32i -// CHECK: cir.store %26, %3 : !s32i, cir.ptr -// CHECK: %27 = cir.load %3 : cir.ptr , !s32i -// CHECK: cir.return %27 : !s32i +void vector_double_test(int x, double y) { + // Vector constant. Not yet implemented. Expected results will change from + // cir.vec.create to cir.const. + vd2 a = { 1.5, 2.5 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : f64, f64) : + + // Non-const vector initialization. + vd2 b = { y, y + 1.0 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : f64, f64) : + + // Extract element + double c = a[x]; + // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : + + // Insert element + a[x] = y; + // CHECK: %[[#LOADEDVF:]] = cir.load %[[#STORAGEVF:]] : cir.ptr >, !cir.vector + // CHECK: %[[#UPDATEDVF:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVF]][%{{[0-9]+}} : !s32i] : + // CHECK: cir.store %[[#UPDATEDVF]], %[[#STORAGEVF]] : !cir.vector, cir.ptr > + + // Binary arithmetic operations + vd2 d = a + b; + // CHECK: %{{[0-9]+}} = cir.binop(add, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vd2 e = a - b; + // CHECK: %{{[0-9]+}} = cir.binop(sub, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vd2 f = a * b; + // CHECK: %{{[0-9]+}} = cir.binop(mul, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vd2 g = a / b; + // CHECK: %{{[0-9]+}} = cir.binop(div, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + + // Unary arithmetic operations + vd2 l = +a; + // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.vector, !cir.vector + vd2 m = -a; + // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector +} diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 386417ae2ffd..e73b0ef0cbbb 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -418,6 +418,7 @@ module { cir.func @vec_op_size() { %0 = cir.const(#cir.int<1> : !s32i) : !s32i %1 = cir.vec.create(%0 : !s32i) : // expected-error {{'cir.vec.create' op operand count of 1 doesn't match vector type '!cir.vector x 2>' element count of 2}} + cir.return } // ----- @@ -428,17 +429,61 @@ cir.func @vec_op_type() { %0 = cir.const(#cir.int<1> : !s32i) : !s32i %1 = cir.const(#cir.int<2> : !u32i) : !u32i %2 = cir.vec.create(%0, %1 : !s32i, !u32i) : // expected-error {{'cir.vec.create' op operand type '!cir.int' doesn't match vector element type '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_extract_non_int_idx() { + %0 = cir.const(1.5e+00 : f64) : f64 + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : + %3 = cir.vec.extract %2[%0 : f64] : // expected-error {{expected '<'}} + cir.return } // ----- !s32i = !cir.int !u32i = !cir.int -cir.func @vec_extract_type() { - %0 = cir.const(#cir.int<1> : !s32i) : !s32i - %1 = cir.const(#cir.int<2> : !s32i) : !s32i - %2 = cir.vec.create(%0, %1 : !s32i, !s32i) : - %3 = cir.vec.extract %2[%0 : !s32i] -> !u32i // expected-error {{'cir.vec.extract' op failed to verify that type of 'result' matches element type of 'vec'}} +cir.func @vec_extract_bad_type() { + %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : + %3 = cir.vec.extract %2[%1 : !s32i] : // expected-note {{prior use here}} + cir.store %3, %0 : !u32i, cir.ptr // expected-error {{use of value '%3' expects different type than prior uses: '!cir.int' vs '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_extract_non_vector() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.extract %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.extract' invalid kind of Type specified}} + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int +cir.func @vec_insert_bad_type() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : + %2 = cir.const(#cir.int<0> : !u32i) : !u32i // expected-note {{prior use here}} + %3 = cir.vec.insert %2, %1[%0 : !s32i] : // expected-error {{use of value '%2' expects different type than prior uses: '!cir.int' vs '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_insert_non_vector() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.insert %0, %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.insert' invalid kind of Type specified}} + cir.return } // ----- diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index ffadbc3df3be..a4e254939912 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -37,8 +37,7 @@ module { %3 = cir.load %0 : cir.ptr , f64 %4 = cir.unary(minus, %3) : f64, f64 // MLIR: %[[#F_MINUS:]] = llvm.load %{{[0-9]}} : !llvm.ptr -> f64 - // MLIR: %[[#F_NEG_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f64) : f64 - // MLIR: %5 = llvm.fmul %[[#F_NEG_ONE]], %[[#F_MINUS]] : f64 + // MLIR: %{{[0-9]}} = llvm.fneg %[[#F_MINUS]] : f64 cir.return } } diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp new file mode 100644 index 000000000000..84686fcef505 --- /dev/null +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -0,0 +1,202 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: cir-opt %t.cir -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s +// XFAIL: * + +typedef int vi4 __attribute__((vector_size(16))); +typedef double vd2 __attribute__((vector_size(16))); + +void vector_int_test(int x) { + + // Vector constant. Not yet implemented. Expected results will change when + // fully implemented. + vi4 a = { 1, 2, 3, 4 }; + // CHECK: %[[#T30:]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[#T31:]] = llvm.mlir.constant(2 : i32) : i32 + // CHECK: %[[#T32:]] = llvm.mlir.constant(3 : i32) : i32 + // CHECK: %[[#T33:]] = llvm.mlir.constant(4 : i32) : i32 + // CHECK: %[[#T34:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#T35:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T36:]] = llvm.insertelement %[[#T30]], %[[#T34]][%[[#T35]] : i64] : vector<4xi32> + // CHECK: %[[#T37:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T38:]] = llvm.insertelement %[[#T31]], %[[#T36]][%[[#T37]] : i64] : vector<4xi32> + // CHECK: %[[#T39:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#T40:]] = llvm.insertelement %[[#T32]], %[[#T38]][%[[#T39]] : i64] : vector<4xi32> + // CHECK: %[[#T41:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#T42:]] = llvm.insertelement %[[#T33]], %[[#T40]][%[[#T41]] : i64] : vector<4xi32> + // CHECK: llvm.store %[[#T42]], %[[#T3:]] : vector<4xi32>, !llvm.ptr + + // Non-const vector initialization. + vi4 b = { x, 5, 6, x + 1 }; + // CHECK: %[[#T43:]] = llvm.load %[[#T1:]] : !llvm.ptr -> i32 + // CHECK: %[[#T44:]] = llvm.mlir.constant(5 : i32) : i32 + // CHECK: %[[#T45:]] = llvm.mlir.constant(6 : i32) : i32 + // CHECK: %[[#T46:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T47:]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[#T48:]] = llvm.add %[[#T46]], %[[#T47]] : i32 + // CHECK: %[[#T49:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#T50:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T51:]] = llvm.insertelement %[[#T43]], %[[#T49]][%[[#T50]] : i64] : vector<4xi32> + // CHECK: %[[#T52:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T53:]] = llvm.insertelement %[[#T44]], %[[#T51]][%[[#T52]] : i64] : vector<4xi32> + // CHECK: %[[#T54:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#T55:]] = llvm.insertelement %[[#T45]], %[[#T53]][%[[#T54]] : i64] : vector<4xi32> + // CHECK: %[[#T56:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#T57:]] = llvm.insertelement %[[#T48]], %[[#T55]][%[[#T56]] : i64] : vector<4xi32> + // CHECK: llvm.store %[[#T57]], %[[#T5:]] : vector<4xi32>, !llvm.ptr + + // Extract element. + int c = a[x]; + // CHECK: %[[#T58:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T59:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T60:]] = llvm.extractelement %[[#T58]][%[[#T59]] : i32] : vector<4xi32> + // CHECK: llvm.store %[[#T60]], %[[#T7:]] : i32, !llvm.ptr + + // Insert element. + a[x] = x; + // CHECK: %[[#T61:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T62:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T63:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T64:]] = llvm.insertelement %[[#T61]], %[[#T63]][%[[#T62]] : i32] : vector<4xi32> + // CHECK: llvm.store %[[#T64]], %[[#T3]] : vector<4xi32>, !llvm.ptr + + // Binary arithmetic operators. + vi4 d = a + b; + // CHECK: %[[#T65:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T66:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T67:]] = llvm.add %[[#T65]], %[[#T66]] : vector<4xi32> + // CHECK: llvm.store %[[#T67]], %[[#T9:]] : vector<4xi32>, !llvm.ptr + vi4 e = a - b; + // CHECK: %[[#T68:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T69:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T70:]] = llvm.sub %[[#T68]], %[[#T69]] : vector<4xi32> + // CHECK: llvm.store %[[#T70]], %[[#T11:]] : vector<4xi32>, !llvm.ptr + vi4 f = a * b; + // CHECK: %[[#T71:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T72:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T73:]] = llvm.mul %[[#T71]], %[[#T72]] : vector<4xi32> + // CHECK: llvm.store %[[#T73]], %[[#T13:]] : vector<4xi32>, !llvm.ptr + vi4 g = a / b; + // CHECK: %[[#T74:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T75:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T76:]] = llvm.sdiv %[[#T74]], %[[#T75]] : vector<4xi32> + // CHECK: llvm.store %[[#T76]], %[[#T15:]] : vector<4xi32>, !llvm.ptr + vi4 h = a % b; + // CHECK: %[[#T77:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T78:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T79:]] = llvm.srem %[[#T77]], %[[#T78]] : vector<4xi32> + // CHECK: llvm.store %[[#T79]], %[[#T17:]] : vector<4xi32>, !llvm.ptr + vi4 i = a & b; + // CHECK: %[[#T80:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T81:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T82:]] = llvm.and %[[#T80]], %[[#T81]] : vector<4xi32> + // CHECK: llvm.store %[[#T82]], %[[#T19:]] : vector<4xi32>, !llvm.ptr + vi4 j = a | b; + // CHECK: %[[#T83:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T84:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T85:]] = llvm.or %[[#T83]], %[[#T84]] : vector<4xi32> + // CHECK: llvm.store %[[#T85]], %[[#T21:]] : vector<4xi32>, !llvm.ptr + vi4 k = a ^ b; + // CHECK: %[[#T86:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T87:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T88:]] = llvm.xor %[[#T86]], %[[#T87]] : vector<4xi32> + // CHECK: llvm.store %[[#T88]], %[[#T23:]] : vector<4xi32>, !llvm.ptr + + // Unary arithmetic operators. + vi4 l = +a; + // CHECK: %[[#T89:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: llvm.store %[[#T89]], %[[#T25:]] : vector<4xi32>, !llvm.ptr + vi4 m = -a; + // CHECK: %[[#T90:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T91:]] = llvm.mlir.zero : vector<4xi32> + // CHECK: %[[#T92:]] = llvm.sub %[[#T91]], %[[#T90]] : vector<4xi32> + // CHECK: llvm.store %[[#T92]], %[[#T27:]] : vector<4xi32>, !llvm.ptr + vi4 n = ~a; + // CHECK: %[[#T93:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T94:]] = llvm.mlir.constant(-1 : i32) : i32 + // CHECK: %[[#T95:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#T96:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T97:]] = llvm.insertelement %[[#T94]], %[[#T95]][%[[#T96]] : i64] : vector<4xi32> + // CHECK: %[[#T98:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T99:]] = llvm.insertelement %[[#T94]], %[[#T97]][%[[#T98]] : i64] : vector<4xi32> + // CHECK: %[[#T100:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#T101:]] = llvm.insertelement %[[#T94]], %[[#T99]][%[[#T100]] : i64] : vector<4xi32> + // CHECK: %[[#T102:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#T103:]] = llvm.insertelement %[[#T94]], %[[#T101]][%[[#T102]] : i64] : vector<4xi32> + // CHECK: %[[#T104:]] = llvm.xor %[[#T103]], %[[#T93]] : vector<4xi32> + // CHECK: llvm.store %[[#T104]], %[[#T29:]] : vector<4xi32>, !llvm.ptr +} + +void vector_double_test(int x, double y) { + + // Vector constant. Not yet implemented. Expected results will change when + // fully implemented. + vd2 a = { 1.5, 2.5 }; + // CHECK: %[[#T22:]] = llvm.mlir.constant(1.500000e+00 : f64) : f64 + // CHECK: %[[#T23:]] = llvm.mlir.constant(2.500000e+00 : f64) : f64 + // CHECK: %[[#T24:]] = llvm.mlir.undef : vector<2xf64> + // CHECK: %[[#T25:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T26:]] = llvm.insertelement %[[#T22]], %[[#T24]][%[[#T25]] : i64] : vector<2xf64> + // CHECK: %[[#T27:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T28:]] = llvm.insertelement %[[#T23]], %[[#T26]][%[[#T27]] : i64] : vector<2xf64> + // CHECK: llvm.store %[[#T28]], %[[#T5:]] : vector<2xf64>, !llvm.ptr + + // Non-const vector initialization. + vd2 b = { y, y + 1.0 }; + // CHECK: %[[#T29:]] = llvm.load %[[#T3:]] : !llvm.ptr -> f64 + // CHECK: %[[#T30:]] = llvm.load %[[#T3]] : !llvm.ptr -> f64 + // CHECK: %[[#T31:]] = llvm.mlir.constant(1.000000e+00 : f64) : f64 + // CHECK: %[[#T32:]] = llvm.fadd %[[#T30]], %[[#T31]] : f64 + // CHECK: %[[#T33:]] = llvm.mlir.undef : vector<2xf64> + // CHECK: %[[#T34:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T35:]] = llvm.insertelement %[[#T29]], %[[#T33]][%[[#T34]] : i64] : vector<2xf64> + // CHECK: %[[#T36:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T37:]] = llvm.insertelement %[[#T32]], %[[#T35]][%[[#T36]] : i64] : vector<2xf64> + // CHECK: llvm.store %[[#T37]], %[[#T7:]] : vector<2xf64>, !llvm.ptr + + // Extract element. + double c = a[x]; + // CHECK: 38 = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T39:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T40:]] = llvm.extractelement %[[#T38]][%[[#T39]] : i32] : vector<2xf64> + // CHECK: llvm.store %[[#T40]], %[[#T9:]] : f64, !llvm.ptr + + // Insert element. + a[x] = y; + // CHECK: %[[#T41:]] = llvm.load %[[#T3]] : !llvm.ptr -> f64 + // CHECK: %[[#T42:]] = llvm.load %[[#T1:]] : !llvm.ptr -> i32 + // CHECK: %[[#T43:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T44:]] = llvm.insertelement %[[#T41]], %[[#T43]][%[[#T42]] : i32] : vector<2xf64> + // CHECK: llvm.store %[[#T44]], %[[#T5]] : vector<2xf64>, !llvm.ptr + + // Binary arithmetic operators. + vd2 d = a + b; + // CHECK: %[[#T45:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T46:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T47:]] = llvm.fadd %[[#T45]], %[[#T46]] : vector<2xf64> + // CHECK: llvm.store %[[#T47]], %[[#T11:]] : vector<2xf64>, !llvm.ptr + vd2 e = a - b; + // CHECK: %[[#T48:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T49:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T50:]] = llvm.fsub %[[#T48]], %[[#T49]] : vector<2xf64> + // CHECK: llvm.store %[[#T50]], %[[#T13:]] : vector<2xf64>, !llvm.ptr + vd2 f = a * b; + // CHECK: %[[#T51:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T52:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T53:]] = llvm.fmul %[[#T51]], %[[#T52]] : vector<2xf64> + // CHECK: llvm.store %[[#T53]], %[[#T15:]] : vector<2xf64>, !llvm.ptr + vd2 g = a / b; + // CHECK: %[[#T54:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T55:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T56:]] = llvm.fdiv %[[#T54]], %[[#T55]] : vector<2xf64> + // CHECK: llvm.store %[[#T56]], %[[#T17:]] : vector<2xf64>, !llvm.ptr + + // Unary arithmetic operators. + vd2 l = +a; + // CHECK: %[[#T57:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: llvm.store %[[#T57]], %[[#T19:]] : vector<2xf64>, !llvm.ptr + vd2 m = -a; + // CHECK: %[[#T58:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T59:]] = llvm.fneg %[[#T58]] : vector<2xf64> + // CHECK: llvm.store %[[#T59]], %[[#T21:]] : vector<2xf64>, !llvm.ptr +} From db0999ba11a21c0e349c24e3d9805f17c072521d Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 16 Jan 2024 17:34:53 -0300 Subject: [PATCH 1345/2301] [CIR][IR] Implement `cir.continue` operation Detaches the representation of the C/C++ `continue` statement into a separate operation. This simplifies mostly lowering and verifications related to `continue` statements, as well as the definition and lowering of the `cir.yield` operation. A few checks regarding region terminators were also removed from the lowering stage, since they are already enforced by MLIR. ghstack-source-id: 1810a48ada88fe7ef5638b0758a2298d9cfbdb8b Pull Request resolved: https://github.com/llvm/clangir/pull/394 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 31 +++++---- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 ++ clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 7 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 25 +++---- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 66 ++++++++++++++----- clang/test/CIR/CodeGen/loop.cpp | 2 +- clang/test/CIR/IR/invalid.cir | 2 +- clang/test/CIR/IR/loop.cir | 4 +- .../test/CIR/Lowering/loops-with-continue.cir | 8 +-- 9 files changed, 89 insertions(+), 61 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 49c2bce154a8..dfcfdfdb3c64 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -607,13 +607,12 @@ def ConditionOp : CIR_Op<"condition", [ def YieldOpKind_BK : I32EnumAttrCase<"Break", 1, "break">; def YieldOpKind_FT : I32EnumAttrCase<"Fallthrough", 2, "fallthrough">; -def YieldOpKind_CE : I32EnumAttrCase<"Continue", 3, "continue">; def YieldOpKind_NS : I32EnumAttrCase<"NoSuspend", 4, "nosuspend">; def YieldOpKind : I32EnumAttr< "YieldOpKind", "yield kind", - [YieldOpKind_BK, YieldOpKind_FT, YieldOpKind_CE, YieldOpKind_NS]> { + [YieldOpKind_BK, YieldOpKind_FT, YieldOpKind_NS]> { let cppNamespace = "::mlir::cir"; } @@ -634,8 +633,6 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, cannot be used if not dominated by these parent operations. - `fallthrough`: execution falls to the next region in `cir.switch` case list. Only available inside `cir.switch` regions. - - `continue`: only allowed under `cir.loop`, continue execution to the next - loop step. - `nosuspend`: specific to the `ready` region inside `cir.await` op, it makes control-flow to be transfered back to the parent, preventing suspension. @@ -657,11 +654,6 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, }, ... ] - cir.loop (cond : {...}, step : {...}) { - ... - cir.yield continue - } - cir.await(init, ready : { // Call std::suspend_always::await_ready %18 = cir.call @_ZNSt14suspend_always11await_readyEv(...) @@ -718,9 +710,6 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, bool isBreak() { return !isPlain() && *getKind() == YieldOpKind::Break; } - bool isContinue() { - return !isPlain() && *getKind() == YieldOpKind::Continue; - } bool isNoSuspend() { return !isPlain() && *getKind() == YieldOpKind::NoSuspend; } @@ -729,6 +718,20 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// ContinueOp +//===----------------------------------------------------------------------===// + +def ContinueOp : CIR_Op<"continue", [Terminator]> { + let summary = "C/C++ `continue` statement equivalent"; + let description = [{ + The `cir.continue` operation is used to continue execution to the next + iteration of a loop. It is only allowed within `cir.loop` regions. + }]; + let assemblyFormat = "attr-dict"; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // ScopeOp //===----------------------------------------------------------------------===// @@ -1166,7 +1169,7 @@ def LoopOp : CIR_Op<"loop", `cir.loop` represents C/C++ loop forms. It defines 3 blocks: - `cond`: region can contain multiple blocks, terminated by regular `cir.yield` when control should yield back to the parent, and - `cir.yield continue` when execution continues to another region. + `cir.continue` when execution continues to the next region. The region destination depends on the loop form specified. - `step`: region with one block, containing code to compute the loop step, must be terminated with `cir.yield`. @@ -1181,7 +1184,7 @@ def LoopOp : CIR_Op<"loop", // i = i + 1; // } cir.loop while(cond : { - cir.yield continue + cir.continue }, step : { cir.yield }) { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 30082f133300..2cc9d16c49a1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -583,6 +583,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(condition.getLoc(), condition); } + /// Create a continue operation. + mlir::cir::ContinueOp createContinue(mlir::Location loc) { + return create(loc); + } + mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, mlir::Value src, mlir::Value len) { return create(loc, dst, src, len); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 801fc73b179d..8ba22aaa6033 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -301,7 +301,6 @@ mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, return buildGotoStmt(cast(*S)); case Stmt::ContinueStmtClass: return buildContinueStmt(cast(*S)); - case Stmt::NullStmtClass: break; @@ -572,11 +571,7 @@ mlir::LogicalResult CIRGenFunction::buildLabel(const LabelDecl *D) { mlir::LogicalResult CIRGenFunction::buildContinueStmt(const clang::ContinueStmt &S) { - builder.create( - getLoc(S.getContinueLoc()), - mlir::cir::YieldOpKindAttr::get(builder.getContext(), - mlir::cir::YieldOpKind::Continue), - mlir::ValueRange({})); + builder.createContinue(getLoc(S.getContinueLoc())); return mlir::success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index eaeb1ddc5bee..17a71e5408d2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -336,6 +336,16 @@ static void printConstantValue(OpAsmPrinter &p, cir::ConstantOp op, OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } +//===----------------------------------------------------------------------===// +// ContinueOp +//===----------------------------------------------------------------------===// + +LogicalResult ContinueOp::verify() { + if (!this->getOperation()->getParentOfType()) + return emitOpError("must be within a loop"); + return success(); +} + //===----------------------------------------------------------------------===// // CastOp //===----------------------------------------------------------------------===// @@ -797,15 +807,6 @@ mlir::LogicalResult YieldOp::verify() { return false; }; - auto isDominatedByLoop = [](Operation *parentOp) { - while (!llvm::isa(parentOp)) { - if (llvm::isa(parentOp)) - return true; - parentOp = parentOp->getParentOp(); - } - return false; - }; - if (isNoSuspend()) { if (!isDominatedByProperAwaitRegion(getOperation()->getParentOp(), getOperation()->getParentRegion())) @@ -819,12 +820,6 @@ mlir::LogicalResult YieldOp::verify() { return mlir::success(); } - if (isContinue()) { - if (!isDominatedByLoop(getOperation()->getParentOp())) - return emitOpError() << "shall be dominated by 'cir.loop'"; - return mlir::success(); - } - if (isFallthrough()) { if (!llvm::isa(getOperation()->getParentOp())) return emitOpError() << "fallthrough only expected within 'cir.switch'"; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 2f4ac580f8c3..ce1d792cb514 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -29,10 +29,12 @@ #include "mlir/IR/BuiltinDialect.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/OpDefinition.h" #include "mlir/IR/Operation.h" #include "mlir/IR/Types.h" #include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" +#include "mlir/IR/Visitors.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" @@ -65,6 +67,36 @@ using namespace llvm; namespace cir { namespace direct { +//===----------------------------------------------------------------------===// +// Helper Methods +//===----------------------------------------------------------------------===// + +namespace { + +/// Lowers operations with the terminator trait that have a single successor. +void lowerTerminator(mlir::Operation *op, mlir::Block *dest, + mlir::ConversionPatternRewriter &rewriter) { + assert(op->hasTrait() && "not a terminator"); + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp(op, dest); +} + +/// Walks a region while skipping operations of type `Ops`. This ensures the +/// callback is not applied to said operations and its children. +template +void walkRegionSkipping(mlir::Region ®ion, + mlir::function_ref callback) { + region.walk([&](mlir::Operation *op) { + if (isa(op)) + return mlir::WalkResult::skip(); + callback(op); + return mlir::WalkResult::advance(); + }); +} + +} // namespace + //===----------------------------------------------------------------------===// // Visitors for Lowering CIR Const Attributes //===----------------------------------------------------------------------===// @@ -441,8 +473,15 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { lowerNestedYield(mlir::cir::YieldOpKind::Break, rewriter, bodyRegion, continueBlock); - lowerNestedYield(mlir::cir::YieldOpKind::Continue, rewriter, bodyRegion, - &stepBlock); + + // Lower continue statements. + mlir::Block &dest = + (kind != LoopKind::For ? condFrontBlock : stepFrontBlock); + walkRegionSkipping( + loopOp.getBody(), [&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, &dest, rewriter); + }); // Move loop op region contents to current CFG. rewriter.inlineRegionBefore(condRegion, continueBlock); @@ -672,9 +711,8 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } }; -static bool isBreakOrContinue(mlir::cir::YieldOp &op) { - return op.getKind() == mlir::cir::YieldOpKind::Break || - op.getKind() == mlir::cir::YieldOpKind::Continue; +static bool isBreak(mlir::cir::YieldOp &op) { + return op.getKind() == mlir::cir::YieldOpKind::Break; } class CIRIfLowering : public mlir::OpConversionPattern { @@ -705,12 +743,10 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.setInsertionPointToEnd(thenAfterBody); if (auto thenYieldOp = dyn_cast(thenAfterBody->getTerminator())) { - if (!isBreakOrContinue(thenYieldOp)) // lowering of parent loop yields is - // deferred to loop lowering + if (!isBreak(thenYieldOp)) // lowering of parent loop yields is + // deferred to loop lowering rewriter.replaceOpWithNewOp( thenYieldOp, thenYieldOp.getArgs(), continueBlock); - } else if (!dyn_cast(thenAfterBody->getTerminator())) { - llvm_unreachable("what are we terminating with?"); } rewriter.setInsertionPointToEnd(continueBlock); @@ -736,13 +772,10 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.setInsertionPointToEnd(elseAfterBody); if (auto elseYieldOp = dyn_cast(elseAfterBody->getTerminator())) { - if (!isBreakOrContinue(elseYieldOp)) // lowering of parent loop yields - // is deferred to loop lowering + if (!isBreak(elseYieldOp)) // lowering of parent loop yields + // is deferred to loop lowering rewriter.replaceOpWithNewOp( elseYieldOp, elseYieldOp.getArgs(), continueBlock); - } else if (!dyn_cast( - elseAfterBody->getTerminator())) { - llvm_unreachable("what are we terminating with?"); } } @@ -798,7 +831,7 @@ class CIRScopeOpLowering rewriter.setInsertionPointToEnd(afterBody); auto yieldOp = dyn_cast(afterBody->getTerminator()); - if (yieldOp && !isBreakOrContinue(yieldOp)) { + if (yieldOp && !isBreak(yieldOp)) { auto branchOp = rewriter.replaceOpWithNewOp( yieldOp, yieldOp.getArgs(), continueBlock); @@ -1400,9 +1433,6 @@ class CIRSwitchOpLowering case mlir::cir::YieldOpKind::Break: rewriteYieldOp(rewriter, yieldOp, exitBlock); break; - case mlir::cir::YieldOpKind::Continue: // Continue is handled only in - // loop lowering - break; default: return op->emitError("invalid yield kind in case statement"); } diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 3360692929f4..0a8bb34c975e 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -180,7 +180,7 @@ void l4() { // CHECK-NEXT: %11 = cir.const(#cir.int<10> : !s32i) : !s32i // CHECK-NEXT: %12 = cir.cmp(lt, %10, %11) : !s32i, !cir.bool // CHECK-NEXT: cir.if %12 { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: cir.continue // CHECK-NEXT: } // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index e73b0ef0cbbb..6ec37e1141a3 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -79,7 +79,7 @@ cir.func @yieldbreak() { cir.func @yieldcontinue() { %0 = cir.const(#true) : !cir.bool cir.if %0 { - cir.yield continue // expected-error {{shall be dominated by 'cir.loop'}} + cir.continue // expected-error {{op must be within a loop}} } cir.return } diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 798aaaeb5ae9..623b178554ab 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -52,7 +52,7 @@ cir.func @l0() { cir.store %6, %0 : !u32i, cir.ptr %7 = cir.const(#true) : !cir.bool cir.if %7 { - cir.yield continue + cir.continue } cir.yield } @@ -118,7 +118,7 @@ cir.func @l0() { // CHECK-NEXT: cir.store %6, %0 : !u32i, cir.ptr // CHECK-NEXT: %7 = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.if %7 { -// CHECK-NEXT: cir.yield continue +// CHECK-NEXT: cir.continue // CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir index 5dac140f7e24..07cd6179f7ae 100644 --- a/clang/test/CIR/Lowering/loops-with-continue.cir +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -27,7 +27,7 @@ module { %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.if %5 { - cir.yield continue + cir.continue } } } @@ -107,7 +107,7 @@ module { %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { - cir.yield continue + cir.continue } } } @@ -189,7 +189,7 @@ cir.func @testWhile() { %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { - cir.yield continue + cir.continue } } cir.yield @@ -243,7 +243,7 @@ cir.func @testWhile() { %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { - cir.yield continue + cir.continue } } cir.yield From 52f022fe5fd2c5bf518c40e6a52bc34d71b58113 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 16 Jan 2024 17:34:53 -0300 Subject: [PATCH 1346/2301] [CIR][IR] Implement `cir.break` operation Same rationale as `cir.continue`, it detaches the representation of the C/C++ `break` statement into a separate operation. This simplifies lowering and verifications related to `break` statements, as well as the definition and lowering of the `cir.yield` operation. ghstack-source-id: 929cf96c3abe51d717c2fa6ca9e0073e42e770c6 Pull Request resolved: https://github.com/llvm/clangir/pull/395 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 23 +++-- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 ++ clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 6 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 28 +++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 90 ++++++------------- clang/test/CIR/CodeGen/switch.cpp | 32 +++---- clang/test/CIR/IR/invalid.cir | 2 +- clang/test/CIR/IR/loop.cir | 4 +- clang/test/CIR/IR/switch.cir | 4 +- clang/test/CIR/Lowering/loop.cir | 4 +- clang/test/CIR/Lowering/loops-with-break.cir | 8 +- clang/test/CIR/Lowering/switch.cir | 16 ++-- clang/test/CIR/Transforms/merge-cleanups.cir | 4 +- 13 files changed, 96 insertions(+), 130 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index dfcfdfdb3c64..3276255ea09e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -605,14 +605,13 @@ def ConditionOp : CIR_Op<"condition", [ // YieldOp //===----------------------------------------------------------------------===// -def YieldOpKind_BK : I32EnumAttrCase<"Break", 1, "break">; def YieldOpKind_FT : I32EnumAttrCase<"Fallthrough", 2, "fallthrough">; def YieldOpKind_NS : I32EnumAttrCase<"NoSuspend", 4, "nosuspend">; def YieldOpKind : I32EnumAttr< "YieldOpKind", "yield kind", - [YieldOpKind_BK, YieldOpKind_FT, YieldOpKind_NS]> { + [YieldOpKind_FT, YieldOpKind_NS]> { let cppNamespace = "::mlir::cir"; } @@ -629,8 +628,6 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, defined by the parent operation. Optionally, `cir.yield` can be annotated with extra kind specifiers: - - `break`: breaking out of the innermost `cir.switch` / `cir.loop` semantics, - cannot be used if not dominated by these parent operations. - `fallthrough`: execution falls to the next region in `cir.switch` case list. Only available inside `cir.switch` regions. - `nosuspend`: specific to the `ready` region inside `cir.await` op, it makes @@ -707,9 +704,6 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, bool isFallthrough() { return !isPlain() && *getKind() == YieldOpKind::Fallthrough; } - bool isBreak() { - return !isPlain() && *getKind() == YieldOpKind::Break; - } bool isNoSuspend() { return !isPlain() && *getKind() == YieldOpKind::NoSuspend; } @@ -718,6 +712,21 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// BreakOp +//===----------------------------------------------------------------------===// + +def BreakOp : CIR_Op<"break", [Terminator]> { + let summary = "C/C++ `break` statement equivalent"; + let description = [{ + The `cir.break` operation is used to cease the control flow to the parent + operation, exiting its region's control flow. It is only allowed if it is + within a breakable operation (loops and `switch`). + }]; + let assemblyFormat = "attr-dict"; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // ContinueOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 2cc9d16c49a1..a79e2c6b2aa9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -583,6 +583,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(condition.getLoc(), condition); } + /// Create a break operation. + mlir::cir::BreakOp createBreak(mlir::Location loc) { + return create(loc); + } + /// Create a continue operation. mlir::cir::ContinueOp createContinue(mlir::Location loc) { return create(loc); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 8ba22aaa6033..d3739f18a23e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -576,11 +576,7 @@ CIRGenFunction::buildContinueStmt(const clang::ContinueStmt &S) { } mlir::LogicalResult CIRGenFunction::buildBreakStmt(const clang::BreakStmt &S) { - builder.create( - getLoc(S.getBreakLoc()), - mlir::cir::YieldOpKindAttr::get(builder.getContext(), - mlir::cir::YieldOpKind::Break), - mlir::ValueRange({})); + builder.createBreak(getLoc(S.getBreakLoc())); return mlir::success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 17a71e5408d2..ecc1e238d6b5 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -226,6 +226,17 @@ void AllocaOp::build(::mlir::OpBuilder &odsBuilder, odsState.addTypes(addr); } +//===----------------------------------------------------------------------===// +// BreakOp +//===----------------------------------------------------------------------===// + +LogicalResult BreakOp::verify() { + if (!getOperation()->getParentOfType() && + !getOperation()->getParentOfType()) + return emitOpError("must be within a loop or switch"); + return success(); +} + //===----------------------------------------------------------------------===// // ConditionOp //===-----------------------------------------------------------------------===// @@ -775,17 +786,6 @@ void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, //===----------------------------------------------------------------------===// mlir::LogicalResult YieldOp::verify() { - auto isDominatedByLoopOrSwitch = [&](Operation *parentOp) { - while (!llvm::isa(parentOp)) { - if (llvm::isa(parentOp)) - return true; - parentOp = parentOp->getParentOp(); - } - - emitOpError() << "shall be dominated by 'cir.loop' or 'cir.switch'"; - return false; - }; - auto isDominatedByProperAwaitRegion = [&](Operation *parentOp, mlir::Region *currRegion) { while (!llvm::isa(parentOp)) { @@ -814,12 +814,6 @@ mlir::LogicalResult YieldOp::verify() { return mlir::success(); } - if (isBreak()) { - if (!isDominatedByLoopOrSwitch(getOperation()->getParentOp())) - return mlir::failure(); - return mlir::success(); - } - if (isFallthrough()) { if (!llvm::isa(getOperation()->getParentOp())) return emitOpError() << "fallthrough only expected within 'cir.switch'"; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ce1d792cb514..215618906d5b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -356,32 +356,6 @@ mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { }; } -static void lowerNestedYield(mlir::cir::YieldOpKind targetKind, - mlir::ConversionPatternRewriter &rewriter, - mlir::Region &body, mlir::Block *dst) { - // top-level yields are lowered in matchAndRewrite of the parent operations - auto isNested = [&](mlir::Operation *op) { - return op->getParentRegion() != &body; - }; - - body.walk([&](mlir::Operation *op) { - if (!isNested(op)) - return mlir::WalkResult::advance(); - - // don't process breaks/continues in nested loops and switches - if (isa(*op)) - return mlir::WalkResult::skip(); - - auto yield = dyn_cast(*op); - if (yield && yield.getKind() == targetKind) { - rewriter.setInsertionPoint(op); - rewriter.replaceOpWithNewOp(op, yield.getArgs(), dst); - } - - return mlir::WalkResult::advance(); - }); -} - class CIRCopyOpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -462,7 +436,6 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { auto &bodyFrontBlock = bodyRegion.front(); auto bodyYield = dyn_cast(bodyRegion.back().getTerminator()); - assert(bodyYield && "unstructured while loops are NYI"); // Fetch required info from the step region. auto &stepRegion = loopOp.getStep(); @@ -471,9 +444,6 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { dyn_cast(stepRegion.back().getTerminator()); auto &stepBlock = (kind == LoopKind::For ? stepFrontBlock : condFrontBlock); - lowerNestedYield(mlir::cir::YieldOpKind::Break, rewriter, bodyRegion, - continueBlock); - // Lower continue statements. mlir::Block &dest = (kind != LoopKind::For ? condFrontBlock : stepFrontBlock); @@ -483,6 +453,13 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { lowerTerminator(op, &dest, rewriter); }); + // Lower break statements. + walkRegionSkipping( + loopOp.getBody(), [&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, continueBlock, rewriter); + }); + // Move loop op region contents to current CFG. rewriter.inlineRegionBefore(condRegion, continueBlock); rewriter.inlineRegionBefore(bodyRegion, continueBlock); @@ -500,11 +477,10 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { lowerConditionOp(conditionOp, &bodyFrontBlock, continueBlock, rewriter); // Branch from body to condition or to step on for-loop cases. - rewriter.setInsertionPoint(bodyYield); - auto bodyYieldDest = bodyYield.getKind() == mlir::cir::YieldOpKind::Break - ? continueBlock - : &stepBlock; - rewriter.replaceOpWithNewOp(bodyYield, bodyYieldDest); + if (bodyYield) { + rewriter.setInsertionPoint(bodyYield); + rewriter.replaceOpWithNewOp(bodyYield, &stepBlock); + } // Is a for loop: branch from step to condition. if (kind == LoopKind::For) { @@ -711,10 +687,6 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } }; -static bool isBreak(mlir::cir::YieldOp &op) { - return op.getKind() == mlir::cir::YieldOpKind::Break; -} - class CIRIfLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -743,10 +715,8 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.setInsertionPointToEnd(thenAfterBody); if (auto thenYieldOp = dyn_cast(thenAfterBody->getTerminator())) { - if (!isBreak(thenYieldOp)) // lowering of parent loop yields is - // deferred to loop lowering - rewriter.replaceOpWithNewOp( - thenYieldOp, thenYieldOp.getArgs(), continueBlock); + rewriter.replaceOpWithNewOp( + thenYieldOp, thenYieldOp.getArgs(), continueBlock); } rewriter.setInsertionPointToEnd(continueBlock); @@ -772,10 +742,8 @@ class CIRIfLowering : public mlir::OpConversionPattern { rewriter.setInsertionPointToEnd(elseAfterBody); if (auto elseYieldOp = dyn_cast(elseAfterBody->getTerminator())) { - if (!isBreak(elseYieldOp)) // lowering of parent loop yields - // is deferred to loop lowering - rewriter.replaceOpWithNewOp( - elseYieldOp, elseYieldOp.getArgs(), continueBlock); + rewriter.replaceOpWithNewOp( + elseYieldOp, elseYieldOp.getArgs(), continueBlock); } } @@ -829,18 +797,13 @@ class CIRScopeOpLowering // Replace the scopeop return with a branch that jumps out of the body. // Stack restore before leaving the body region. rewriter.setInsertionPointToEnd(afterBody); - auto yieldOp = dyn_cast(afterBody->getTerminator()); - - if (yieldOp && !isBreak(yieldOp)) { - auto branchOp = rewriter.replaceOpWithNewOp( - yieldOp, yieldOp.getArgs(), continueBlock); - - // // Insert stack restore before jumping out of the body of the region. - rewriter.setInsertionPoint(branchOp); + if (auto yieldOp = + dyn_cast(afterBody->getTerminator())) { + rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getArgs(), + continueBlock); } - // TODO(CIR): stackrestore? - // rewriter.create(loc, stackSaveOp); + // TODO(cir): stackrestore? // Replace the op with values return from the body region. rewriter.replaceOp(scopeOp, continueBlock->getArguments()); @@ -1423,24 +1386,23 @@ class CIRSwitchOpLowering // TODO(cir): Ensure every yield instead of dealing with optional // values. assert(yieldOp.getKind().has_value() && "switch yield has no kind"); - switch (yieldOp.getKind().value()) { // Fallthrough to next case: track it for the next case to handle. case mlir::cir::YieldOpKind::Fallthrough: fallthroughYieldOp = yieldOp; break; - // Break out of switch: branch to exit block. - case mlir::cir::YieldOpKind::Break: - rewriteYieldOp(rewriter, yieldOp, exitBlock); - break; default: return op->emitError("invalid yield kind in case statement"); } } } - lowerNestedYield(mlir::cir::YieldOpKind::Break, rewriter, region, - exitBlock); + // Handle break statements. + walkRegionSkipping( + region, [&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, exitBlock, rewriter); + }); // Extract region contents before erasing the switch op. rewriter.inlineRegionBefore(region, exitBlock); diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index b9f7626d8064..66becf01e190 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -22,10 +22,10 @@ void sw1(int a) { // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i // CHECK-NEXT: cir.store %6, %1 : !s32i, cir.ptr -// CHECK-NEXT: cir.yield break +// CHECK-NEXT: cir.break // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 1) { -// CHECK-NEXT: cir.yield break +// CHECK-NEXT: cir.break // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 2) { // CHECK-NEXT: cir.scope { @@ -36,7 +36,7 @@ void sw1(int a) { // CHECK-NEXT: cir.store %7, %1 : !s32i, cir.ptr // CHECK-NEXT: %8 = cir.const(#cir.int<100> : !s32i) : !s32i // CHECK-NEXT: cir.store %8, %4 : !s32i, cir.ptr -// CHECK-NEXT: cir.yield break +// CHECK-NEXT: cir.break // CHECK-NEXT: } // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: } @@ -72,7 +72,7 @@ void sw3(int a) { // CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i // CHECK-NEXT: cir.switch (%1 : !s32i) [ // CHECK-NEXT: case (default) { -// CHECK-NEXT: cir.yield break +// CHECK-NEXT: cir.break // CHECK-NEXT: } // CHECK-NEXT: ] @@ -133,10 +133,10 @@ void sw6(int a) { // CHECK: cir.func @_Z3sw6i // CHECK: cir.switch (%1 : !s32i) [ // CHECK-NEXT: case (anyof, [0, 1, 2] : !s32i) { -// CHECK-NEXT: cir.yield break +// CHECK-NEXT: cir.break // CHECK-NEXT: }, // CHECK-NEXT: case (anyof, [3, 4, 5] : !s32i) { -// CHECK-NEXT: cir.yield break +// CHECK-NEXT: cir.break // CHECK-NEXT: } void sw7(int a) { @@ -157,7 +157,7 @@ void sw7(int a) { // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, // CHECK-NEXT: case (anyof, [3, 4, 5] : !s32i) { -// CHECK-NEXT: cir.yield break +// CHECK-NEXT: cir.break // CHECK-NEXT: } void sw8(int a) { @@ -173,13 +173,13 @@ void sw8(int a) { //CHECK: cir.func @_Z3sw8i //CHECK: case (equal, 3) -//CHECK-NEXT: cir.yield break +//CHECK-NEXT: cir.break //CHECK-NEXT: }, //CHECK-NEXT: case (equal, 4) { //CHECK-NEXT: cir.yield fallthrough //CHECK-NEXT: } //CHECK-NEXT: case (default) { -//CHECK-NEXT: cir.yield break +//CHECK-NEXT: cir.break //CHECK-NEXT: } void sw9(int a) { @@ -195,13 +195,13 @@ void sw9(int a) { //CHECK: cir.func @_Z3sw9i //CHECK: case (equal, 3) { -//CHECK-NEXT: cir.yield break +//CHECK-NEXT: cir.break //CHECK-NEXT: } //CHECK-NEXT: case (default) { //CHECK-NEXT: cir.yield fallthrough //CHECK-NEXT: } //CHECK: case (equal, 4) -//CHECK-NEXT: cir.yield break +//CHECK-NEXT: cir.break //CHECK-NEXT: } void sw10(int a) { @@ -218,7 +218,7 @@ void sw10(int a) { //CHECK: cir.func @_Z4sw10i //CHECK: case (equal, 3) -//CHECK-NEXT: cir.yield break +//CHECK-NEXT: cir.break //CHECK-NEXT: }, //CHECK-NEXT: case (equal, 4) { //CHECK-NEXT: cir.yield fallthrough @@ -227,7 +227,7 @@ void sw10(int a) { //CHECK-NEXT: cir.yield fallthrough //CHECK-NEXT: } //CHECK-NEXT: case (equal, 5) { -//CHECK-NEXT: cir.yield break +//CHECK-NEXT: cir.break //CHECK-NEXT: } void sw11(int a) { @@ -246,7 +246,7 @@ void sw11(int a) { //CHECK: cir.func @_Z4sw11i //CHECK: case (equal, 3) -//CHECK-NEXT: cir.yield break +//CHECK-NEXT: cir.break //CHECK-NEXT: }, //CHECK-NEXT: case (anyof, [4, 5] : !s32i) { //CHECK-NEXT: cir.yield fallthrough @@ -255,7 +255,7 @@ void sw11(int a) { //CHECK-NEXT: cir.yield fallthrough //CHECK-NEXT: } //CHECK-NEXT: case (anyof, [6, 7] : !s32i) { -//CHECK-NEXT: cir.yield break +//CHECK-NEXT: cir.break //CHECK-NEXT: } void sw12(int a) { @@ -272,5 +272,5 @@ void sw12(int a) { // CHECK-NEXT: case (equal, 3) { // CHECK-NEXT: cir.return // CHECK-NEXT: ^bb1: // no predecessors -// CHECK-NEXT: cir.yield break +// CHECK-NEXT: cir.break // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 6ec37e1141a3..ea33e5817fe7 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -67,7 +67,7 @@ cir.func @yieldfallthrough() { cir.func @yieldbreak() { %0 = cir.const(#true) : !cir.bool cir.if %0 { - cir.yield break // expected-error {{shall be dominated by 'cir.loop' or 'cir.switch'}} + cir.break // expected-error {{op must be within a loop or switch}} } cir.return } diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 623b178554ab..132e68119239 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -29,7 +29,7 @@ cir.func @l0() { cir.store %6, %0 : !u32i, cir.ptr %7 = cir.const(#true) : !cir.bool cir.if %7 { - cir.yield break + cir.break } cir.yield } @@ -99,7 +99,7 @@ cir.func @l0() { // CHECK-NEXT: cir.store %6, %0 : !u32i, cir.ptr // CHECK-NEXT: %7 = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.if %7 { -// CHECK-NEXT: cir.yield break +// CHECK-NEXT: cir.break // CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index bcac3e321f31..d16f93f8297d 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -11,7 +11,7 @@ cir.func @s0() { cir.yield fallthrough }, case (anyof, [6, 7, 8] : !s32i) { - cir.yield break + cir.break }, case (equal, 5 : !s32i) { cir.yield @@ -28,7 +28,7 @@ cir.func @s0() { // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, // CHECK-NEXT: case (anyof, [6, 7, 8] : !s32i) { -// CHECK-NEXT: cir.yield break +// CHECK-NEXT: cir.break // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 5) { // CHECK-NEXT: cir.yield diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index bbe42d179273..04c4a5debae0 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -83,7 +83,7 @@ module { }, step : { // Droped when lowering while statements. cir.yield }) { - cir.yield break + cir.break } cir.return } @@ -109,7 +109,7 @@ module { cir.yield }) { cir.scope { // FIXME(cir): Redundant scope emitted during C codegen. - cir.yield break + cir.break } cir.yield } diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir index 5bccde54df27..147163ab307f 100644 --- a/clang/test/CIR/Lowering/loops-with-break.cir +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -27,7 +27,7 @@ module { %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.if %5 { - cir.yield break + cir.break } } } @@ -106,7 +106,7 @@ module { %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { - cir.yield break + cir.break } } } @@ -189,7 +189,7 @@ module { %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { - cir.yield break + cir.break } } cir.yield @@ -246,7 +246,7 @@ cir.func @testDoWhile() { %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { - cir.yield break + cir.break } } cir.yield diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir index 31a70d567caa..aff2632518b1 100644 --- a/clang/test/CIR/Lowering/switch.cir +++ b/clang/test/CIR/Lowering/switch.cir @@ -12,12 +12,12 @@ module { // CHECK: 1: ^bb[[#CASE1:]] // CHECK: ] case (equal, 1) { - cir.yield break + cir.break }, // CHECK: ^bb[[#CASE1]]: // CHECK: llvm.br ^bb[[#EXIT:]] case (default) { - cir.yield break + cir.break } // CHECK: ^bb[[#DEFAULT]]: // CHECK: llvm.br ^bb[[#EXIT]] @@ -34,7 +34,7 @@ module { // CHECK: 1: ^bb[[#CASE1:]] // CHECK: ] case (equal, 1) { - cir.yield break + cir.break } // CHECK: ^bb[[#CASE1]]: // CHECK: llvm.br ^bb[[#EXIT]] @@ -51,7 +51,7 @@ module { // CHECK: 2: ^bb[[#CASE1N2]] // CHECK: ] case (anyof, [1, 2] : !s64i) { // case 1 and 2 use same region - cir.yield break + cir.break } // CHECK: ^bb[[#CASE1N2]]: // CHECK: llvm.br ^bb[[#EXIT]] @@ -73,7 +73,7 @@ module { // CHECK: ^bb[[#CASE1]]: // CHECK: llvm.br ^bb[[#CASE2]] case (equal, 2 : !s64i) { - cir.yield break + cir.break } // CHECK: ^bb[[#CASE2]]: // CHECK: llvm.br ^bb[[#EXIT]] @@ -115,7 +115,7 @@ module { case (equal, 3) { cir.return ^bb1: // no predecessors - cir.yield break + cir.break } ] } @@ -152,10 +152,10 @@ module { %8 = cir.cmp(ge, %6, %7) : !s32i, !s32i %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool cir.if %9 { - cir.yield break + cir.break } } - cir.yield break + cir.break } ] } diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 8d84201aee35..1f269069d787 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -37,7 +37,7 @@ module { cir.return } } - cir.yield break + cir.break } cir.yield fallthrough }, @@ -95,7 +95,7 @@ module { // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: } -// CHECK-NEXT: cir.yield break +// CHECK-NEXT: cir.break // CHECK-NEXT: } // CHECK-NEXT: cir.yield fallthrough // CHECK-NEXT: }, From d9771383cafeaa6ce79e41254617af169d8938f3 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 16 Jan 2024 17:34:54 -0300 Subject: [PATCH 1347/2301] [CIR][IR] Deprecate `cir.yield nosuspend` This changes the `cir.await` operation to expect a `cir.condition` as the terminator for the ready region. This simplifies the `cir.await` while also simplifying the `cir.yield`. If `cir.condition` holds a true value, then the `cir.await` will continue the coroutine, otherwise, it will suspend its execution. The `cir.condition` op was also updated to allow `cir.await` as its parent operation. ghstack-source-id: 1ebeb2cfbdeff6f289936d16354cba534e093ea7 Pull Request resolved: https://github.com/llvm/clangir/pull/396 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 50 +++++++++------- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 24 +------- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 57 ++++++++----------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 - clang/test/CIR/CodeGen/coro-task.cpp | 5 +- clang/test/CIR/IR/await.cir | 22 +++++++ clang/test/CIR/IR/invalid.cir | 19 +------ 7 files changed, 80 insertions(+), 99 deletions(-) create mode 100644 clang/test/CIR/IR/await.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 3276255ea09e..32498c8885aa 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -593,12 +593,38 @@ def ConditionOp : CIR_Op<"condition", [ ]> { let summary = "Loop continuation condition."; let description = [{ - The `cir.condition` termintes loop's conditional regions. It takes a single - `cir.bool` operand. if the operand is true, the loop continues, otherwise - it terminates. + The `cir.condition` terminates conditional regions. It takes a single + `cir.bool` operand and, depending on its value, may branch to different + regions: + + - When in the `cond` region of a `cir.loop`, it continues the loop + if true, or exits it if false. + - When in the `ready` region of a `cir.await`, it branches to the `resume` + region when true, and to the `suspend` region when false. + + Example: + + ```mlir + cir.loop for(cond : { + cir.condition(%arg0) // Branches to `step` region or exits. + }, step : { + [...] + }) { + [...] + } + + cir.await(user, ready : { + cir.condition(%arg0) // Branches to `resume` or `suspend` region. + }, suspend : { + [...] + }, resume : { + [...] + },) + ``` }]; let arguments = (ins CIR_BoolType:$condition); let assemblyFormat = " `(` $condition `)` attr-dict "; + let hasVerifier = 1; } //===----------------------------------------------------------------------===// @@ -606,12 +632,11 @@ def ConditionOp : CIR_Op<"condition", [ //===----------------------------------------------------------------------===// def YieldOpKind_FT : I32EnumAttrCase<"Fallthrough", 2, "fallthrough">; -def YieldOpKind_NS : I32EnumAttrCase<"NoSuspend", 4, "nosuspend">; def YieldOpKind : I32EnumAttr< "YieldOpKind", "yield kind", - [YieldOpKind_FT, YieldOpKind_NS]> { + [YieldOpKind_FT]> { let cppNamespace = "::mlir::cir"; } @@ -630,8 +655,6 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, Optionally, `cir.yield` can be annotated with extra kind specifiers: - `fallthrough`: execution falls to the next region in `cir.switch` case list. Only available inside `cir.switch` regions. - - `nosuspend`: specific to the `ready` region inside `cir.await` op, it makes - control-flow to be transfered back to the parent, preventing suspension. As a general rule, `cir.yield` must be explicitly used whenever a region has more than one block and no terminator, or within `cir.switch` regions not @@ -651,16 +674,6 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, }, ... ] - cir.await(init, ready : { - // Call std::suspend_always::await_ready - %18 = cir.call @_ZNSt14suspend_always11await_readyEv(...) - cir.if %18 { - // yields back to the parent. - cir.yield nosuspend - } - cir.yield // control-flow to the next region for suspension. - }, ...) - cir.scope { ... cir.yield @@ -704,9 +717,6 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, bool isFallthrough() { return !isPlain() && *getKind() == YieldOpKind::Fallthrough; } - bool isNoSuspend() { - return !isPlain() && *getKind() == YieldOpKind::NoSuspend; - } }]; let hasVerifier = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 1c05018b535d..360cccb6bf3a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -428,28 +428,8 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, CGF.getLoc(S.getSourceRange()), Kind, /*readyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto *cond = S.getReadyExpr(); - cond = cond->IgnoreParens(); - mlir::Value condV = CGF.evaluateExprAsBool(cond); - - builder.create( - loc, condV, /*withElseRegion=*/false, - /*thenBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - // If expression is ready, no need to suspend, - // `YieldOpKind::NoSuspend` tells control flow to return to - // parent, no more regions to be executed. - builder.create( - loc, mlir::cir::YieldOpKind::NoSuspend); - }); - - if (!condV) { - awaitBuild = mlir::failure(); - return; - } - - // Signals the parent that execution flows to next region. - builder.create(loc); + Expr *condExpr = S.getReadyExpr()->IgnoreParens(); + builder.createCondition(CGF.evaluateExprAsBool(condExpr)); }, /*suspendBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index ecc1e238d6b5..2543f4a2b76d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -14,6 +14,7 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" #include #include "mlir/Dialect/Func/IR/FuncOps.h" @@ -246,13 +247,19 @@ LogicalResult BreakOp::verify() { void ConditionOp::getSuccessorRegions( ArrayRef operands, SmallVectorImpl ®ions) { - auto loopOp = cast(getOperation()->getParentOp()); - // TODO(cir): The condition value may be folded to a constant, narrowing // down its list of possible successors. - // Condition may branch to the body or to the parent op. - regions.emplace_back(&loopOp.getBody(), loopOp.getBody().getArguments()); - regions.emplace_back(loopOp->getResults()); + + // Parent is a loop: condition may branch to the body or to the parent op. + if (auto loopOp = dyn_cast(getOperation()->getParentOp())) { + regions.emplace_back(&loopOp.getBody(), loopOp.getBody().getArguments()); + regions.emplace_back(loopOp->getResults()); + } + + // Parent is an await: condition may branch to resume or suspend regions. + auto await = cast(getOperation()->getParentOp()); + regions.emplace_back(&await.getResume(), await.getResume().getArguments()); + regions.emplace_back(&await.getSuspend(), await.getSuspend().getArguments()); } MutableOperandRange @@ -261,6 +268,12 @@ ConditionOp::getMutableSuccessorOperands(RegionBranchPoint point) { return MutableOperandRange(getOperation(), 0, 0); } +LogicalResult ConditionOp::verify() { + if (!isa(getOperation()->getParentOp())) + return emitOpError("condition must be within a conditional region"); + return success(); +} + //===----------------------------------------------------------------------===// // ConstantOp //===----------------------------------------------------------------------===// @@ -786,34 +799,6 @@ void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, //===----------------------------------------------------------------------===// mlir::LogicalResult YieldOp::verify() { - auto isDominatedByProperAwaitRegion = [&](Operation *parentOp, - mlir::Region *currRegion) { - while (!llvm::isa(parentOp)) { - auto awaitOp = dyn_cast(parentOp); - if (awaitOp) { - if (currRegion && currRegion == &awaitOp.getResume()) { - emitOpError() << "kind 'nosuspend' can only be used in 'ready' and " - "'suspend' regions"; - return false; - } - return true; - } - - currRegion = parentOp->getParentRegion(); - parentOp = parentOp->getParentOp(); - } - - emitOpError() << "shall be dominated by 'cir.await'"; - return false; - }; - - if (isNoSuspend()) { - if (!isDominatedByProperAwaitRegion(getOperation()->getParentOp(), - getOperation()->getParentRegion())) - return mlir::failure(); - return mlir::success(); - } - if (isFallthrough()) { if (!llvm::isa(getOperation()->getParentOp())) return emitOpError() << "fallthrough only expected within 'cir.switch'"; @@ -2223,7 +2208,11 @@ void AwaitOp::getSuccessorRegions(mlir::RegionBranchPoint point, regions.push_back(RegionSuccessor(&this->getResume())); } -LogicalResult AwaitOp::verify() { return success(); } +LogicalResult AwaitOp::verify() { + if (!isa(this->getReady().back().getTerminator())) + return emitOpError("ready region must end with cir.condition"); + return success(); +} //===----------------------------------------------------------------------===// // CIR defined traits diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 215618906d5b..07583342f186 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1391,8 +1391,6 @@ class CIRSwitchOpLowering case mlir::cir::YieldOpKind::Fallthrough: fallthroughYieldOp = yieldOp; break; - default: - return op->emitError("invalid yield kind in case statement"); } } } diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index f10156eeb46d..a67447572b16 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -206,10 +206,7 @@ VoidTask silly_task() { // CHECK: %[[#TmpCallRes:]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[#SuspendAlwaysAddr]]) // CHECK: cir.yield %[[#TmpCallRes]] : !cir.bool // CHECK: } -// CHECK: cir.if %[[#ReadyVeto]] { -// CHECK: cir.yield nosuspend -// CHECK: } -// CHECK: cir.yield +// CHECK: cir.condition(%[[#ReadyVeto]]) // Second region `suspend` contains the actual suspend logic. // diff --git a/clang/test/CIR/IR/await.cir b/clang/test/CIR/IR/await.cir new file mode 100644 index 000000000000..c62e6b7b88b6 --- /dev/null +++ b/clang/test/CIR/IR/await.cir @@ -0,0 +1,22 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +cir.func coroutine @checkPrintParse(%arg0 : !cir.bool) { + cir.await(user, ready : { + cir.condition(%arg0) + }, suspend : { + cir.yield + }, resume : { + cir.yield + },) + cir.return +} + +// CHECK: cir.func coroutine @checkPrintParse +// CHECK: cir.await(user, ready : { +// CHECK: cir.condition(%arg0) +// CHECK: }, suspend : { +// CHECK: cir.yield +// CHECK: }, resume : { +// CHECK: cir.yield +// CHECK: },) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index ea33e5817fe7..dde85c5aaf4b 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -494,27 +494,12 @@ cir.func coroutine @bad_task() { // expected-error {{coroutine body must use at // ----- -cir.func coroutine @bad_yield() { +cir.func coroutine @missing_condition() { cir.scope { - cir.await(user, ready : { + cir.await(user, ready : { // expected-error {{ready region must end with cir.condition}} cir.yield }, suspend : { cir.yield - }, resume : { - cir.yield nosuspend // expected-error {{kind 'nosuspend' can only be used in 'ready' and 'suspend' regions}} - },) - } - cir.return -} - -// ----- - -cir.func coroutine @good_yield() { - cir.scope { - cir.await(user, ready : { - cir.yield nosuspend - }, suspend : { - cir.yield nosuspend }, resume : { cir.yield },) From d68b39cccfb4ea13827b7574895611904b815f80 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Tue, 16 Jan 2024 17:34:54 -0300 Subject: [PATCH 1348/2301] [CIR][IR] Deprecate `cir.yield fallthrough` Instead of having a `cir.yield fallthrough` operation, the default branching behavior of the parent operation is denoted by `cir.yield`. In other words, a `cir.yield` operation in a switch case region represents the default branching behavior of the switch operation, which is a fallthrough. The `cir.yield` operation now represents the default branching behavior of the parent operation's region. For example, in a if-else region, a `cir.yield` operation represents a branch to the exit block. ghstack-source-id: 713c457dfb2228fbdf63ba72dd6396665512bb9d Pull Request resolved: https://github.com/llvm/clangir/pull/397 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 59 ++++--------------- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 6 ++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 - clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 31 ++++------ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 16 +---- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 4 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 14 +---- clang/test/CIR/CodeGen/switch.cpp | 20 +++---- clang/test/CIR/IR/invalid.cir | 12 ---- clang/test/CIR/IR/switch.cir | 4 +- clang/test/CIR/Lowering/switch.cir | 4 +- clang/test/CIR/Transforms/merge-cleanups.cir | 8 +-- 12 files changed, 51 insertions(+), 129 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 32498c8885aa..0797ba5a40cc 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -631,30 +631,21 @@ def ConditionOp : CIR_Op<"condition", [ // YieldOp //===----------------------------------------------------------------------===// -def YieldOpKind_FT : I32EnumAttrCase<"Fallthrough", 2, "fallthrough">; - -def YieldOpKind : I32EnumAttr< - "YieldOpKind", - "yield kind", - [YieldOpKind_FT]> { - let cppNamespace = "::mlir::cir"; -} - def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "LoopOp", "AwaitOp", "TernaryOp", "GlobalOp"]>]> { - let summary = "Terminate CIR regions"; + let summary = "Represents the default branching behaviour of a region"; let description = [{ - The `cir.yield` operation terminates regions on different CIR operations: - `cir.if`, `cir.scope`, `cir.switch`, `cir.loop`, `cir.await`, `cir.ternary` - and `cir.global`. - - Might yield an SSA value and the semantics of how the values are yielded is - defined by the parent operation. + The `cir.yield` operation terminates regions on different CIR operations, + and it is used to represent the default branching behaviour of a region. + Said branching behaviour is determinted by the parent operation. For + example, a yield in a `switch-case` region implies a fallthrough, while + a yield in a `cir.if` region implies a branch to the exit block, and so + on. - Optionally, `cir.yield` can be annotated with extra kind specifiers: - - `fallthrough`: execution falls to the next region in `cir.switch` case list. - Only available inside `cir.switch` regions. + In some cases, it might yield an SSA value and the semantics of how the + values are yielded is defined by the parent operation. For example, a + `cir.ternary` operation yields a value from one of its regions. As a general rule, `cir.yield` must be explicitly used whenever a region has more than one block and no terminator, or within `cir.switch` regions not @@ -670,7 +661,7 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, cir.switch (%5) [ case (equal, 3) { ... - cir.yield fallthrough + cir.yield }, ... ] @@ -691,35 +682,11 @@ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ``` }]; - let arguments = (ins OptionalAttr:$kind, - Variadic:$args); + let arguments = (ins Variadic:$args); + let assemblyFormat = "($args^ `:` type($args))? attr-dict"; let builders = [ OpBuilder<(ins), [{ /* nothing to do */ }]>, - OpBuilder<(ins "YieldOpKind":$kind), [{ - mlir::cir::YieldOpKindAttr kattr = mlir::cir::YieldOpKindAttr::get( - $_builder.getContext(), kind); - $_state.addAttribute(getKindAttrName($_state.name), kattr); - }]>, - OpBuilder<(ins "ValueRange":$results), [{ - $_state.addOperands(results); - }]> ]; - - let assemblyFormat = [{ - ($kind^)? ($args^ `:` type($args))? attr-dict - }]; - - let extraClassDeclaration = [{ - // None of the below - bool isPlain() { - return !getKind(); - } - bool isFallthrough() { - return !isPlain() && *getKind() == YieldOpKind::Fallthrough; - } - }]; - - let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index a79e2c6b2aa9..8274df33298e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -588,6 +588,12 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc); } + /// Create a yield operation. + mlir::cir::YieldOp createYield(mlir::Location loc, + mlir::ValueRange value = {}) { + return create(loc, value); + } + /// Create a continue operation. mlir::cir::ContinueOp createContinue(mlir::Location loc) { return create(loc); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index d74a978e4193..b2604265cf74 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -992,8 +992,6 @@ class CIRGenFunction : public CIRGenTypeCache { const CaseStmt *foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, SmallVector &caseAttrs); - void insertFallthrough(const clang::Stmt &S); - template mlir::LogicalResult buildCaseDefaultCascade(const T *stmt, mlir::Type condType, diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index d3739f18a23e..d96264f2f408 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "Address.h" +#include "CIRGenBuilder.h" #include "CIRGenFunction.h" #include "mlir/IR/Value.h" #include "clang/AST/CharUnits.h" @@ -339,7 +340,7 @@ mlir::LogicalResult CIRGenFunction::buildLabelStmt(const clang::LabelStmt &S) { // Add terminating yield on body regions (loops, ...) in case there are // not other terminators used. // FIXME: make terminateCaseRegion use this too. -static void terminateBody(mlir::OpBuilder &builder, mlir::Region &r, +static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r, mlir::Location loc) { if (r.empty()) return; @@ -357,7 +358,7 @@ static void terminateBody(mlir::OpBuilder &builder, mlir::Region &r, !block.back().hasTrait()) { mlir::OpBuilder::InsertionGuard guardCase(builder); builder.setInsertionPointToEnd(&block); - builder.create(loc); + builder.createYield(loc); } } @@ -608,14 +609,6 @@ CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, return lastCase; } -void CIRGenFunction::insertFallthrough(const clang::Stmt &S) { - builder.create( - getLoc(S.getBeginLoc()), - mlir::cir::YieldOpKindAttr::get(builder.getContext(), - mlir::cir::YieldOpKind::Fallthrough), - mlir::ValueRange({})); -} - template mlir::LogicalResult CIRGenFunction::buildCaseDefaultCascade( const T *stmt, mlir::Type condType, @@ -637,11 +630,11 @@ mlir::LogicalResult CIRGenFunction::buildCaseDefaultCascade( auto *sub = stmt->getSubStmt(); if (isa(sub) && isa(stmt)) { - insertFallthrough(*stmt); + builder.createYield(getLoc(stmt->getBeginLoc())); res = buildDefaultStmt(*dyn_cast(sub), condType, caseAttrs, os); } else if (isa(sub) && isa(stmt)) { - insertFallthrough(*stmt); + builder.createYield(getLoc(stmt->getBeginLoc())); res = buildCaseStmt(*dyn_cast(sub), condType, caseAttrs, os); } else { res = buildStmt(sub, /*useCurrentScope=*/!isa(sub)); @@ -727,7 +720,7 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, if (S.getInc()) if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); - builder.create(loc); + builder.createYield(loc); }); return loopRes; }; @@ -809,7 +802,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { if (S.getInc()) if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); - builder.create(loc); + builder.createYield(loc); }); return loopRes; }; @@ -863,7 +856,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - builder.create(loc); + builder.createYield(loc); }); return loopRes; }; @@ -922,7 +915,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - builder.create(loc); + builder.createYield(loc); }); return loopRes; }; @@ -1049,11 +1042,7 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { !block.back().hasTrait()) { mlir::OpBuilder::InsertionGuard guardCase(builder); builder.setInsertionPointToEnd(&block); - builder.create( - loc, - mlir::cir::YieldOpKindAttr::get( - builder.getContext(), mlir::cir::YieldOpKind::Fallthrough), - mlir::ValueRange({})); + builder.createYield(loc); } } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 2543f4a2b76d..cfa1a7132fa2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -182,7 +182,7 @@ bool omitRegionTerm(mlir::Region &r) { const auto singleNonEmptyBlock = r.hasOneBlock() && !r.back().empty(); const auto yieldsNothing = [&r]() { YieldOp y = dyn_cast(r.back().getTerminator()); - return y && y.isPlain() && y.getArgs().empty(); + return y && y.getArgs().empty(); }; return singleNonEmptyBlock && yieldsNothing(); } @@ -794,20 +794,6 @@ void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, result.addTypes(TypeRange{yield.getOperandTypes().front()}); } -//===----------------------------------------------------------------------===// -// YieldOp -//===----------------------------------------------------------------------===// - -mlir::LogicalResult YieldOp::verify() { - if (isFallthrough()) { - if (!llvm::isa(getOperation()->getParentOp())) - return emitOpError() << "fallthrough only expected within 'cir.switch'"; - return mlir::success(); - } - - return mlir::success(); -} - //===----------------------------------------------------------------------===// // BrOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 93abd4f729b4..551024854077 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -820,9 +820,7 @@ void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { YieldOp y = dyn_cast(block.back()); if (!y) return false; - if (y.isFallthrough()) - return true; - return false; + return true; }; auto regions = switchOp.getRegions(); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 07583342f186..b6afaf29671d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1381,18 +1381,8 @@ class CIRSwitchOpLowering continue; // Handle switch-case yields. - auto *terminator = blk.getTerminator(); - if (auto yieldOp = dyn_cast(terminator)) { - // TODO(cir): Ensure every yield instead of dealing with optional - // values. - assert(yieldOp.getKind().has_value() && "switch yield has no kind"); - switch (yieldOp.getKind().value()) { - // Fallthrough to next case: track it for the next case to handle. - case mlir::cir::YieldOpKind::Fallthrough: - fallthroughYieldOp = yieldOp; - break; - } - } + if (auto yieldOp = dyn_cast(blk.getTerminator())) + fallthroughYieldOp = yieldOp; } // Handle break statements. diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 66becf01e190..4f1ac78eb9c1 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -38,7 +38,7 @@ void sw1(int a) { // CHECK-NEXT: cir.store %8, %4 : !s32i, cir.ptr // CHECK-NEXT: cir.break // CHECK-NEXT: } -// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: cir.yield // CHECK-NEXT: } void sw2(int a) { @@ -96,7 +96,7 @@ int sw4(int a) { // CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i // CHECK-NEXT: cir.return %6 : !s32i // CHECK-NEXT: } -// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, // CHECK-NEXT: case (default) { // CHECK-NEXT: %5 = cir.const(#cir.int<2> : !s32i) : !s32i @@ -115,7 +115,7 @@ void sw5(int a) { // CHECK: cir.func @_Z3sw5i // CHECK: cir.switch (%1 : !s32i) [ // CHECK-NEXT: case (equal, 1) { -// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: cir.yield void sw6(int a) { switch (a) { @@ -154,7 +154,7 @@ void sw7(int a) { // CHECK: cir.func @_Z3sw7i // CHECK: case (anyof, [0, 1, 2] : !s32i) { -// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, // CHECK-NEXT: case (anyof, [3, 4, 5] : !s32i) { // CHECK-NEXT: cir.break @@ -176,7 +176,7 @@ void sw8(int a) { //CHECK-NEXT: cir.break //CHECK-NEXT: }, //CHECK-NEXT: case (equal, 4) { -//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: cir.yield //CHECK-NEXT: } //CHECK-NEXT: case (default) { //CHECK-NEXT: cir.break @@ -198,7 +198,7 @@ void sw9(int a) { //CHECK-NEXT: cir.break //CHECK-NEXT: } //CHECK-NEXT: case (default) { -//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: cir.yield //CHECK-NEXT: } //CHECK: case (equal, 4) //CHECK-NEXT: cir.break @@ -221,10 +221,10 @@ void sw10(int a) { //CHECK-NEXT: cir.break //CHECK-NEXT: }, //CHECK-NEXT: case (equal, 4) { -//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: cir.yield //CHECK-NEXT: } //CHECK-NEXT: case (default) { -//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: cir.yield //CHECK-NEXT: } //CHECK-NEXT: case (equal, 5) { //CHECK-NEXT: cir.break @@ -249,10 +249,10 @@ void sw11(int a) { //CHECK-NEXT: cir.break //CHECK-NEXT: }, //CHECK-NEXT: case (anyof, [4, 5] : !s32i) { -//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: cir.yield //CHECK-NEXT: } //CHECK-NEXT: case (default) { -//CHECK-NEXT: cir.yield fallthrough +//CHECK-NEXT: cir.yield //CHECK-NEXT: } //CHECK-NEXT: case (anyof, [6, 7] : !s32i) { //CHECK-NEXT: cir.break diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index dde85c5aaf4b..00ecacdcacad 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -50,18 +50,6 @@ cir.func @yield0() { // ----- -#false = #cir.bool : !cir.bool -#true = #cir.bool : !cir.bool -cir.func @yieldfallthrough() { - %0 = cir.const(#true) : !cir.bool - cir.if %0 { - cir.yield fallthrough // expected-error {{'cir.yield' op fallthrough only expected within 'cir.switch'}} - } - cir.return -} - -// ----- - #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool cir.func @yieldbreak() { diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index d16f93f8297d..db63a2928862 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -8,7 +8,7 @@ cir.func @s0() { cir.return }, case (equal, 3) { - cir.yield fallthrough + cir.yield }, case (anyof, [6, 7, 8] : !s32i) { cir.break @@ -25,7 +25,7 @@ cir.func @s0() { // CHECK-NEXT: cir.return // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 3) { -// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, // CHECK-NEXT: case (anyof, [6, 7, 8] : !s32i) { // CHECK-NEXT: cir.break diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir index aff2632518b1..92f8e4654a40 100644 --- a/clang/test/CIR/Lowering/switch.cir +++ b/clang/test/CIR/Lowering/switch.cir @@ -68,7 +68,7 @@ module { // CHECK: 2: ^bb[[#CASE2:]] // CHECK: ] case (equal, 1 : !s64i) { // case 1 has its own region - cir.yield fallthrough // fallthrough to case 2 + cir.yield // fallthrough to case 2 }, // CHECK: ^bb[[#CASE1]]: // CHECK: llvm.br ^bb[[#CASE2]] @@ -89,7 +89,7 @@ module { // CHECK: 1: ^bb[[#CASE1:]] // CHECK: ] case (equal, 1 : !s64i) { - cir.yield fallthrough // fallthrough to exit + cir.yield // fallthrough to exit } // CHECK: ^bb[[#CASE1]]: // CHECK: llvm.br ^bb[[#EXIT]] diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 1f269069d787..52ba8b7842d2 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -39,7 +39,7 @@ module { } cir.break } - cir.yield fallthrough + cir.yield }, case (equal, 2 : !s32i) { cir.scope { @@ -54,7 +54,7 @@ module { ^bb1: // pred: ^bb0 cir.return } - cir.yield fallthrough + cir.yield } ] } @@ -97,7 +97,7 @@ module { // CHECK-NEXT: } // CHECK-NEXT: cir.break // CHECK-NEXT: } -// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: cir.yield // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 2) { // CHECK-NEXT: cir.scope { @@ -110,7 +110,7 @@ module { // CHECK-NEXT: cir.store %9, %5 : !s32i, cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK-NEXT: cir.yield fallthrough +// CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: ] From 41d63829a4f87b79cf25a657eb2e39026982b865 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 16 Jan 2024 14:22:02 -0800 Subject: [PATCH 1349/2301] [CIR] TryCallOp: add blocks, arguments, proper interface impl and testcase - Add cir.try_call parsing. - Add block destinations and hookup exception info type. - Properly implement interface methods. Printer is still missing, but coming next. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 24 ++- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 61 ++++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 163 ++++++++++++++++-- clang/test/CIR/IR/exceptions.cir | 27 +++ 4 files changed, 233 insertions(+), 42 deletions(-) create mode 100644 clang/test/CIR/IR/exceptions.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 0797ba5a40cc..3eb36e91a2cf 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1923,13 +1923,14 @@ def FuncOp : CIR_Op<"func", [ } //===----------------------------------------------------------------------===// -// CallOp and TryCallOp +// CallOp //===----------------------------------------------------------------------===// -class CIR_CallOp : +class CIR_CallOp extra_traits = []> : Op, - DeclareOpInterfaceMethods]> { + !listconcat(extra_traits, + [DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods])> { let extraClassDeclaration = [{ /// Get the argument operands to the called function. OperandRange getArgOperands() { @@ -2013,7 +2014,13 @@ def CallOp : CIR_CallOp<"call"> { }]>]; } -def TryCallOp : CIR_CallOp<"try_call"> { +//===----------------------------------------------------------------------===// +// TryCallOp +//===----------------------------------------------------------------------===// + +def TryCallOp : CIR_CallOp<"try_call", + [AttrSizedOperandSegments, DeclareOpInterfaceMethods, + Terminator]> { let summary = "try call operation"; let description = [{ Works very similar to `cir.call` but passes down an exception object @@ -2036,8 +2043,13 @@ def TryCallOp : CIR_CallOp<"try_call"> { }]; let arguments = (ins OptionalAttr:$callee, - Variadic:$operands, + ExceptionInfoPtr:$exceptionInfo, + Variadic:$destContOps, + Variadic:$destAbortOps, + Variadic:$callOps, OptionalAttr:$ast); + let successors = (successor AnySuccessor:$destContinue, + AnySuccessor:$destAbort); let results = (outs Variadic); let builders = [ diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 22fe594bf9ba..7e85d3ddeff3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -226,6 +226,26 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { }]; } +//===----------------------------------------------------------------------===// +// Exception info type +// +// By introducing an exception info type, exception related operations can be +// more descriptive. +// +// This basically wraps a uint8_t* and a uint32_t +// +//===----------------------------------------------------------------------===// + +def CIR_ExceptionInfo : CIR_Type<"ExceptionInfo", "eh.info"> { + let summary = "CIR exception info"; + let description = [{ + Represents the content necessary for a `cir.call` to pass back an exception + object pointer + some extra selector information. This type is required for + some exception related operations, like `cir.catch`, `cir.eh.selector_slot` + and `cir.eh.slot`. + }]; +} + //===----------------------------------------------------------------------===// // Void type //===----------------------------------------------------------------------===// @@ -254,37 +274,32 @@ def VoidPtr : Type< "mlir::cir::VoidType::get($_builder.getContext()))"> { } +// Pointer to exception info +def ExceptionInfoPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::ExceptionInfoType>()">, + ]>, "void*">, + BuildableType< + "mlir::cir::PointerType::get($_builder.getContext()," + "mlir::cir::ExceptionInfo::get($_builder.getContext()))"> { +} + //===----------------------------------------------------------------------===// -// Global type constraints +// StructType (defined in cpp files) //===----------------------------------------------------------------------===// def CIR_StructType : Type()">, "CIR struct type">; -def CIR_AnyType : AnyTypeOf<[ - CIR_IntType, CIR_PointerType, CIR_BoolType, CIR_ArrayType, CIR_VectorType, - CIR_FuncType, CIR_VoidType, CIR_StructType, AnyFloat, -]>; - - //===----------------------------------------------------------------------===// -// Exception info type -// -// By introducing an exception info type, exception related operations can be -// more descriptive. -// -// This basically wraps a uint8_t* and a uint32_t -// +// Global type constraints //===----------------------------------------------------------------------===// -def CIR_ExceptionInfo : CIR_Type<"ExceptionInfo", "eh.info"> { - let summary = "CIR exception info"; - let description = [{ - Represents the content necessary for a `cir.call` to pass back an exception - object pointer + some extra selector information. This type is required for - some exception related operations, like `cir.catch`, `cir.eh.selector_slot` - and `cir.eh.slot`. - }]; -} +def CIR_AnyType : AnyTypeOf<[ + CIR_IntType, CIR_PointerType, CIR_BoolType, CIR_ArrayType, CIR_VectorType, + CIR_FuncType, CIR_VoidType, CIR_StructType, CIR_ExceptionInfo, AnyFloat, +]>; #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index cfa1a7132fa2..489394125d05 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1949,19 +1949,24 @@ verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { if (!fn) return op->emitOpError() << "'" << fnAttr.getValue() << "' does not reference a valid function"; + auto callIf = dyn_cast(op); + assert(callIf && "expected CIR call interface to be always available"); // Verify that the operand and result types match the callee. Note that // argument-checking is disabled for functions without a prototype. auto fnType = fn.getFunctionType(); if (!fn.getNoProto()) { - if (!fnType.isVarArg() && op->getNumOperands() != fnType.getNumInputs()) + unsigned numCallOperands = callIf.getNumArgOperands(); + unsigned numFnOpOperands = fnType.getNumInputs(); + + if (!fnType.isVarArg() && numCallOperands != numFnOpOperands) return op->emitOpError("incorrect number of operands for callee"); - if (fnType.isVarArg() && op->getNumOperands() < fnType.getNumInputs()) + if (fnType.isVarArg() && numCallOperands < numFnOpOperands) return op->emitOpError("too few operands for callee"); - for (unsigned i = 0, e = fnType.getNumInputs(); i != e; ++i) - if (op->getOperand(i).getType() != fnType.getInput(i)) + for (unsigned i = 0, e = numFnOpOperands; i != e; ++i) + if (callIf.getArgOperand(i).getType() != fnType.getInput(i)) return op->emitOpError("operand type mismatch: expected operand type ") << fnType.getInput(i) << ", but provided " << op->getOperand(i).getType() << " for operand number " << i; @@ -1986,8 +1991,13 @@ verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { return success(); } -static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, - ::mlir::OperationState &result) { +static ::mlir::ParseResult parseCallCommon( + ::mlir::OpAsmParser &parser, ::mlir::OperationState &result, + llvm::function_ref<::mlir::ParseResult(::mlir::OpAsmParser &, + ::mlir::OperationState &, int32_t)> + customOpHandler = [](::mlir::OpAsmParser &parser, + ::mlir::OperationState &result, + int32_t numCallArgs) { return mlir::success(); }) { mlir::FlatSymbolRefAttr calleeAttr; llvm::SmallVector<::mlir::OpAsmParser::UnresolvedOperand, 4> ops; llvm::SMLoc opsLoc; @@ -2024,13 +2034,18 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, operandsTypes = opsFnTy.getInputs(); allResultTypes = opsFnTy.getResults(); result.addTypes(allResultTypes); + + if (customOpHandler(parser, result, operandsTypes.size()).failed()) + return ::mlir::failure(); + if (parser.resolveOperands(ops, operandsTypes, opsLoc, result.operands)) return ::mlir::failure(); return ::mlir::success(); } -void printCallCommon(Operation *op, mlir::FlatSymbolRefAttr flatSym, - ::mlir::OpAsmPrinter &state) { +void printCallCommon( + Operation *op, mlir::FlatSymbolRefAttr flatSym, ::mlir::OpAsmPrinter &state, + llvm::function_ref customOpHandler = []() {}) { state << ' '; auto ops = op->getOperands(); @@ -2074,7 +2089,12 @@ mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_begin() { auto arg_begin = operand_begin(); if (!getCallee()) arg_begin++; - return arg_begin; + // First operand is the exception pointer, skip it. + // + // FIXME(cir): for this and all the other calculations in the other methods: + // we currently have no basic block arguments on cir.try_call, but if it gets + // to that, this needs further adjustment. + return arg_begin++; } mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_end() { return operand_end(); @@ -2084,13 +2104,17 @@ mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_end() { Value cir::TryCallOp::getArgOperand(unsigned i) { if (!getCallee()) i++; - return getOperand(i); + // First operand is the exception pointer, skip it. + return getOperand(i + 1); } /// Return the number of operands, , accounts for indirect call. unsigned cir::TryCallOp::getNumArgOperands() { + unsigned numOperands = this->getOperation()->getNumOperands(); if (!getCallee()) - return this->getOperation()->getNumOperands() - 1; - return this->getOperation()->getNumOperands(); + numOperands--; + // First operand is the exception pointer, skip it. + numOperands--; + return numOperands; } LogicalResult @@ -2100,13 +2124,126 @@ cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { ::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result) { - return parseCallCommon(parser, result); + return parseCallCommon( + parser, result, + [](::mlir::OpAsmParser &parser, ::mlir::OperationState &result, + int32_t numCallArgs) -> ::mlir::ParseResult { + ::mlir::OpAsmParser::UnresolvedOperand exceptionRawOperands[1]; + ::llvm::ArrayRef<::mlir::OpAsmParser::UnresolvedOperand> + exceptionOperands(exceptionRawOperands); + ::llvm::SMLoc exceptionOperandsLoc; + (void)exceptionOperandsLoc; + + ::mlir::Block *destContinueSuccessor = nullptr; + ::llvm::SmallVector<::mlir::OpAsmParser::UnresolvedOperand, 4> + destOperandsContinue; + ::llvm::SMLoc destOperandsContinueLoc; + (void)destOperandsContinueLoc; + ::llvm::SmallVector<::mlir::Type, 1> destOperandsContinueTypes; + ::mlir::Block *destAbortSuccessor = nullptr; + ::llvm::SmallVector<::mlir::OpAsmParser::UnresolvedOperand, 4> + destOperandsAbort; + ::llvm::SMLoc destOperandsAbortLoc; + (void)destOperandsAbortLoc; + ::llvm::SmallVector<::mlir::Type, 1> destOperandsAbortTypes; + + // So far we have 4: exception ptr, variadic continue, variadic abort + // and variadic call args. + enum { + Segment_Exception_Idx, + Segment_Continue_Idx, + Segment_Abort_Idx, + Segment_CallArgs_Idx, + }; + ::llvm::SmallVector operandSegmentSizes = {0, 0, 0, 0}; + + if (parser.parseComma()) + return ::mlir::failure(); + + // Handle continue destination and potential bb operands. + if (parser.parseSuccessor(destContinueSuccessor)) + return ::mlir::failure(); + if (::mlir::succeeded(parser.parseOptionalLParen())) { + + destOperandsContinueLoc = parser.getCurrentLocation(); + if (parser.parseOperandList(destOperandsContinue)) + return ::mlir::failure(); + if (parser.parseColon()) + return ::mlir::failure(); + + if (parser.parseTypeList(destOperandsContinueTypes)) + return ::mlir::failure(); + if (parser.parseRParen()) + return ::mlir::failure(); + } + if (parser.parseComma()) + return ::mlir::failure(); + + // Handle abort destination and potential bb operands. + if (parser.parseSuccessor(destAbortSuccessor)) + return ::mlir::failure(); + if (::mlir::succeeded(parser.parseOptionalLParen())) { + destOperandsAbortLoc = parser.getCurrentLocation(); + if (parser.parseOperandList(destOperandsAbort)) + return ::mlir::failure(); + if (parser.parseColon()) + return ::mlir::failure(); + + if (parser.parseTypeList(destOperandsAbortTypes)) + return ::mlir::failure(); + if (parser.parseRParen()) + return ::mlir::failure(); + } + + if (parser.parseComma()) + return ::mlir::failure(); + exceptionOperandsLoc = parser.getCurrentLocation(); + if (parser.parseOperand(exceptionRawOperands[0])) + return ::mlir::failure(); + + auto exceptionPtrTy = cir::PointerType::get( + parser.getBuilder().getContext(), + parser.getBuilder().getType<::mlir::cir::ExceptionInfoType>()); + if (parser.resolveOperands(exceptionOperands, exceptionPtrTy, + exceptionOperandsLoc, result.operands)) + return ::mlir::failure(); + + // Add information to the builders. + result.addSuccessors(destContinueSuccessor); + result.addSuccessors(destAbortSuccessor); + + if (parser.resolveOperands(destOperandsContinue, + destOperandsContinueTypes, + destOperandsContinueLoc, result.operands)) + return ::mlir::failure(); + if (parser.resolveOperands(destOperandsAbort, destOperandsAbortTypes, + destOperandsAbortLoc, result.operands)) + return ::mlir::failure(); + + // Required to always be there. + operandSegmentSizes[Segment_Exception_Idx] = 1; + operandSegmentSizes[Segment_Continue_Idx] = + destOperandsContinueTypes.size(); + operandSegmentSizes[Segment_Abort_Idx] = destOperandsAbortTypes.size(); + operandSegmentSizes[Segment_CallArgs_Idx] = numCallArgs; + result.addAttribute( + "operandSegmentSizes", + parser.getBuilder().getDenseI32ArrayAttr(operandSegmentSizes)); + + return ::mlir::success(); + }); } void TryCallOp::print(::mlir::OpAsmPrinter &state) { printCallCommon(*this, getCalleeAttr(), state); } +mlir::SuccessorOperands TryCallOp::getSuccessorOperands(unsigned index) { + assert(index < getNumSuccessors() && "invalid successor index"); + return SuccessorOperands(index == 0 ? getDestContOpsMutable() + : getDestAbortOpsMutable()); +} + //===----------------------------------------------------------------------===// // UnaryOp //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/exceptions.cir b/clang/test/CIR/IR/exceptions.cir new file mode 100644 index 000000000000..e40652bb9a16 --- /dev/null +++ b/clang/test/CIR/IR/exceptions.cir @@ -0,0 +1,27 @@ +// RUN: cir-opt %s | FileCheck %s + +!s32i = !cir.int + +module { + cir.func @div(%x : !s32i, %y : !s32i) -> !s32i { + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.return %3 : !s32i + } + + cir.func @foo(%x : !s32i, %y : !s32i) { + cir.scope { + %10 = cir.scope { + %0 = cir.alloca !cir.eh.info, cir.ptr , ["exception_info"] {alignment = 16 : i64} + %d = cir.try_call @div(%x, %y) : (!s32i, !s32i) -> !s32i, ^continue_A, ^abort, %0 + // CHECK: cir.try_call @div(%1, %arg0, %arg1) {operandSegmentSizes = array} : (!cir.ptr, !s32i, !s32i) -> !s32i + ^continue_A: + cir.br ^abort + ^abort: + %1 = cir.load %0 : cir.ptr , !cir.eh.info + cir.yield %1 : !cir.eh.info + } : !cir.eh.info + cir.yield + } + cir.return + } +} \ No newline at end of file From 2946d3770ca0b1774a47a342c2236da753a02e25 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 Jan 2024 14:09:28 -0800 Subject: [PATCH 1350/2301] [CIR][Exceptions] Simplify cir.try_call After some discussions with @sitio-couto, it might be better if we use a simplified version that doesn't take the labels into account just yet. `cir.try_call` should have the same semantics as `cir.break`, in the sense that it needs further expansion when getting rid of structured control flow. Early lowering here would complicate CIR generated code and make it harder to analyse. Further CIR to CIR passes will properly expand this at some point prior to LLVM lowering. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 14 +--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 88 -------------------- clang/test/CIR/IR/exceptions.cir | 7 +- 3 files changed, 5 insertions(+), 104 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 3eb36e91a2cf..181135f6fc6a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2018,15 +2018,11 @@ def CallOp : CIR_CallOp<"call"> { // TryCallOp //===----------------------------------------------------------------------===// -def TryCallOp : CIR_CallOp<"try_call", - [AttrSizedOperandSegments, DeclareOpInterfaceMethods, - Terminator]> { +def TryCallOp : CIR_CallOp<"try_call"> { let summary = "try call operation"; let description = [{ Works very similar to `cir.call` but passes down an exception object - in case anything is thrown by the callee. Upon the callee throwing, - `cir.try_call` goes to current `cir.scope`'s `abort` label, otherwise - execution follows to the `continue` label. + in case anything is thrown by the callee. To walk the operands for this operation, use `getNumArgOperands()`, `getArgOperand()`, `getArgOperands()`, `arg_operand_begin()` and @@ -2038,18 +2034,14 @@ def TryCallOp : CIR_CallOp<"try_call", Example: ```mlir - %r = cir.try_call @division(%1, %2), ^continue_A, ^abort, %0 + %r = cir.try_call @division(%1, %2), %0 ``` }]; let arguments = (ins OptionalAttr:$callee, ExceptionInfoPtr:$exceptionInfo, - Variadic:$destContOps, - Variadic:$destAbortOps, Variadic:$callOps, OptionalAttr:$ast); - let successors = (successor AnySuccessor:$destContinue, - AnySuccessor:$destAbort); let results = (outs Variadic); let builders = [ diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 489394125d05..7b797fd9494f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2134,69 +2134,9 @@ ::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, ::llvm::SMLoc exceptionOperandsLoc; (void)exceptionOperandsLoc; - ::mlir::Block *destContinueSuccessor = nullptr; - ::llvm::SmallVector<::mlir::OpAsmParser::UnresolvedOperand, 4> - destOperandsContinue; - ::llvm::SMLoc destOperandsContinueLoc; - (void)destOperandsContinueLoc; - ::llvm::SmallVector<::mlir::Type, 1> destOperandsContinueTypes; - ::mlir::Block *destAbortSuccessor = nullptr; - ::llvm::SmallVector<::mlir::OpAsmParser::UnresolvedOperand, 4> - destOperandsAbort; - ::llvm::SMLoc destOperandsAbortLoc; - (void)destOperandsAbortLoc; - ::llvm::SmallVector<::mlir::Type, 1> destOperandsAbortTypes; - - // So far we have 4: exception ptr, variadic continue, variadic abort - // and variadic call args. - enum { - Segment_Exception_Idx, - Segment_Continue_Idx, - Segment_Abort_Idx, - Segment_CallArgs_Idx, - }; - ::llvm::SmallVector operandSegmentSizes = {0, 0, 0, 0}; - - if (parser.parseComma()) - return ::mlir::failure(); - - // Handle continue destination and potential bb operands. - if (parser.parseSuccessor(destContinueSuccessor)) - return ::mlir::failure(); - if (::mlir::succeeded(parser.parseOptionalLParen())) { - - destOperandsContinueLoc = parser.getCurrentLocation(); - if (parser.parseOperandList(destOperandsContinue)) - return ::mlir::failure(); - if (parser.parseColon()) - return ::mlir::failure(); - - if (parser.parseTypeList(destOperandsContinueTypes)) - return ::mlir::failure(); - if (parser.parseRParen()) - return ::mlir::failure(); - } if (parser.parseComma()) return ::mlir::failure(); - // Handle abort destination and potential bb operands. - if (parser.parseSuccessor(destAbortSuccessor)) - return ::mlir::failure(); - if (::mlir::succeeded(parser.parseOptionalLParen())) { - destOperandsAbortLoc = parser.getCurrentLocation(); - if (parser.parseOperandList(destOperandsAbort)) - return ::mlir::failure(); - if (parser.parseColon()) - return ::mlir::failure(); - - if (parser.parseTypeList(destOperandsAbortTypes)) - return ::mlir::failure(); - if (parser.parseRParen()) - return ::mlir::failure(); - } - - if (parser.parseComma()) - return ::mlir::failure(); exceptionOperandsLoc = parser.getCurrentLocation(); if (parser.parseOperand(exceptionRawOperands[0])) return ::mlir::failure(); @@ -2208,28 +2148,6 @@ ::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, exceptionOperandsLoc, result.operands)) return ::mlir::failure(); - // Add information to the builders. - result.addSuccessors(destContinueSuccessor); - result.addSuccessors(destAbortSuccessor); - - if (parser.resolveOperands(destOperandsContinue, - destOperandsContinueTypes, - destOperandsContinueLoc, result.operands)) - return ::mlir::failure(); - if (parser.resolveOperands(destOperandsAbort, destOperandsAbortTypes, - destOperandsAbortLoc, result.operands)) - return ::mlir::failure(); - - // Required to always be there. - operandSegmentSizes[Segment_Exception_Idx] = 1; - operandSegmentSizes[Segment_Continue_Idx] = - destOperandsContinueTypes.size(); - operandSegmentSizes[Segment_Abort_Idx] = destOperandsAbortTypes.size(); - operandSegmentSizes[Segment_CallArgs_Idx] = numCallArgs; - result.addAttribute( - "operandSegmentSizes", - parser.getBuilder().getDenseI32ArrayAttr(operandSegmentSizes)); - return ::mlir::success(); }); } @@ -2238,12 +2156,6 @@ void TryCallOp::print(::mlir::OpAsmPrinter &state) { printCallCommon(*this, getCalleeAttr(), state); } -mlir::SuccessorOperands TryCallOp::getSuccessorOperands(unsigned index) { - assert(index < getNumSuccessors() && "invalid successor index"); - return SuccessorOperands(index == 0 ? getDestContOpsMutable() - : getDestAbortOpsMutable()); -} - //===----------------------------------------------------------------------===// // UnaryOp //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/exceptions.cir b/clang/test/CIR/IR/exceptions.cir index e40652bb9a16..8f9c038b5393 100644 --- a/clang/test/CIR/IR/exceptions.cir +++ b/clang/test/CIR/IR/exceptions.cir @@ -12,11 +12,8 @@ module { cir.scope { %10 = cir.scope { %0 = cir.alloca !cir.eh.info, cir.ptr , ["exception_info"] {alignment = 16 : i64} - %d = cir.try_call @div(%x, %y) : (!s32i, !s32i) -> !s32i, ^continue_A, ^abort, %0 - // CHECK: cir.try_call @div(%1, %arg0, %arg1) {operandSegmentSizes = array} : (!cir.ptr, !s32i, !s32i) -> !s32i - ^continue_A: - cir.br ^abort - ^abort: + %d = cir.try_call @div(%x, %y) : (!s32i, !s32i) -> !s32i, %0 + // CHECK: cir.try_call @div(%1, %arg0, %arg1) : (!cir.ptr, !s32i, !s32i) -> !s32i %1 = cir.load %0 : cir.ptr , !cir.eh.info cir.yield %1 : !cir.eh.info } : !cir.eh.info From a2dba1ed317fa6ceb07bf2a90f178fd7add47a72 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 Jan 2024 14:35:43 -0800 Subject: [PATCH 1351/2301] [CIR] TryCallOp: improve printing --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 +-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 33 +++++++++++++------- clang/test/CIR/IR/exceptions.cir | 4 +-- 3 files changed, 27 insertions(+), 15 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 181135f6fc6a..82a761b73afd 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2021,7 +2021,7 @@ def CallOp : CIR_CallOp<"call"> { def TryCallOp : CIR_CallOp<"try_call"> { let summary = "try call operation"; let description = [{ - Works very similar to `cir.call` but passes down an exception object + Very similar to `cir.call` but passes down an exception object in case anything is thrown by the callee. To walk the operands for this operation, use `getNumArgOperands()`, @@ -2034,7 +2034,8 @@ def TryCallOp : CIR_CallOp<"try_call"> { Example: ```mlir - %r = cir.try_call @division(%1, %2), %0 + %0 = cir.alloca !cir.eh.info, cir.ptr ... + %r = cir.try_call %exception(%0) @division(%1, %2), %0 ``` }]; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7b797fd9494f..4bec3353ff91 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1994,10 +1994,11 @@ verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { static ::mlir::ParseResult parseCallCommon( ::mlir::OpAsmParser &parser, ::mlir::OperationState &result, llvm::function_ref<::mlir::ParseResult(::mlir::OpAsmParser &, - ::mlir::OperationState &, int32_t)> - customOpHandler = [](::mlir::OpAsmParser &parser, - ::mlir::OperationState &result, - int32_t numCallArgs) { return mlir::success(); }) { + ::mlir::OperationState &)> + customOpHandler = + [](::mlir::OpAsmParser &parser, ::mlir::OperationState &result) { + return mlir::success(); + }) { mlir::FlatSymbolRefAttr calleeAttr; llvm::SmallVector<::mlir::OpAsmParser::UnresolvedOperand, 4> ops; llvm::SMLoc opsLoc; @@ -2005,6 +2006,9 @@ static ::mlir::ParseResult parseCallCommon( llvm::ArrayRef<::mlir::Type> operandsTypes; llvm::ArrayRef<::mlir::Type> allResultTypes; + if (customOpHandler(parser, result)) + return ::mlir::failure(); + // If we cannot parse a string callee, it means this is an indirect call. if (!parser.parseOptionalAttribute(calleeAttr, "callee", result.attributes) .has_value()) { @@ -2035,9 +2039,6 @@ static ::mlir::ParseResult parseCallCommon( allResultTypes = opsFnTy.getResults(); result.addTypes(allResultTypes); - if (customOpHandler(parser, result, operandsTypes.size()).failed()) - return ::mlir::failure(); - if (parser.resolveOperands(ops, operandsTypes, opsLoc, result.operands)) return ::mlir::failure(); return ::mlir::success(); @@ -2126,21 +2127,28 @@ ::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result) { return parseCallCommon( parser, result, - [](::mlir::OpAsmParser &parser, ::mlir::OperationState &result, - int32_t numCallArgs) -> ::mlir::ParseResult { + [](::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) -> ::mlir::ParseResult { ::mlir::OpAsmParser::UnresolvedOperand exceptionRawOperands[1]; ::llvm::ArrayRef<::mlir::OpAsmParser::UnresolvedOperand> exceptionOperands(exceptionRawOperands); ::llvm::SMLoc exceptionOperandsLoc; (void)exceptionOperandsLoc; - if (parser.parseComma()) - return ::mlir::failure(); + if (parser.parseKeyword("exception").failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected 'exception' keyword here"); + + if (parser.parseLParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected '('"); exceptionOperandsLoc = parser.getCurrentLocation(); if (parser.parseOperand(exceptionRawOperands[0])) return ::mlir::failure(); + if (parser.parseRParen().failed()) + return parser.emitError(parser.getCurrentLocation(), "expected ')'"); + auto exceptionPtrTy = cir::PointerType::get( parser.getBuilder().getContext(), parser.getBuilder().getType<::mlir::cir::ExceptionInfoType>()); @@ -2153,6 +2161,9 @@ ::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, } void TryCallOp::print(::mlir::OpAsmPrinter &state) { + state << " exception("; + state << getExceptionInfo(); + state << ")"; printCallCommon(*this, getCalleeAttr(), state); } diff --git a/clang/test/CIR/IR/exceptions.cir b/clang/test/CIR/IR/exceptions.cir index 8f9c038b5393..d11c720e4275 100644 --- a/clang/test/CIR/IR/exceptions.cir +++ b/clang/test/CIR/IR/exceptions.cir @@ -12,8 +12,8 @@ module { cir.scope { %10 = cir.scope { %0 = cir.alloca !cir.eh.info, cir.ptr , ["exception_info"] {alignment = 16 : i64} - %d = cir.try_call @div(%x, %y) : (!s32i, !s32i) -> !s32i, %0 - // CHECK: cir.try_call @div(%1, %arg0, %arg1) : (!cir.ptr, !s32i, !s32i) -> !s32i + %d = cir.try_call exception(%0) @div(%x, %y) : (!s32i, !s32i) -> !s32i + // CHECK: cir.try_call exception(%1) @div(%1, %arg0, %arg1) : (!cir.ptr, !s32i, !s32i) -> !s32i %1 = cir.load %0 : cir.ptr , !cir.eh.info cir.yield %1 : !cir.eh.info } : !cir.eh.info From 164f29573b6f5a3376a25ed0488b5de90c726355 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 Jan 2024 16:06:10 -0800 Subject: [PATCH 1352/2301] [CIR][CIRGen][Exceptions] More prep work on landing-pad like logic We can now handle more of EHScope::Catch and lay out the skeleton for CIR's version of that, adding tons of asserts for cases not currently handled. As part of this we're able to build the clause list as part of CatchOp based on the handlers, and create allocation for the exception_info type. In the next part (where we currently hit an assert) of this work, the CatchOp will then get its regions populated. Incremental steps into getting basic exceptions to work, not enough for a testcase just yet. --- clang/lib/CIR/CodeGen/CIRGenCleanup.h | 11 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 171 ++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 33 ++++- 3 files changed, 196 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h index fa3b8cde0d25..4627b60d1c63 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -37,7 +37,7 @@ struct CatchTypeInfo { /// A protected scope for zero-cost EH handling. class EHScope { - mlir::Block *CachedLandingPad; + mlir::Operation *CachedLandingPad; mlir::Block *CachedEHDispatchBlock; EHScopeStack::stable_iterator EnclosingEHScope; @@ -108,9 +108,9 @@ class EHScope { Kind getKind() const { return static_cast(CommonBits.Kind); } - mlir::Block *getCachedLandingPad() const { return CachedLandingPad; } + mlir::Operation *getCachedLandingPad() const { return CachedLandingPad; } - void setCachedLandingPad(mlir::Block *block) { CachedLandingPad = block; } + void setCachedLandingPad(mlir::Operation *op) { CachedLandingPad = op; } mlir::Block *getCachedEHDispatchBlock() const { return CachedEHDispatchBlock; @@ -121,8 +121,11 @@ class EHScope { } bool hasEHBranches() const { + // Traditional LLVM codegen also checks for `!block->use_empty()`, but + // in CIRGen the block content is not important, just used as a way to + // signal `hasEHBranches`. if (mlir::Block *block = getCachedEHDispatchBlock()) - return !block->use_empty(); + return true; return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 383968cbb517..74b27cea67b3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -267,13 +267,22 @@ mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { auto scopeLoc = getLoc(S.getSourceRange()); auto res = mlir::success(); + // This scope represents the higher level try {} statement. builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { CIRGenFunction::LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - - // Create the skeleton for the catch statements. + // Allocate space for our exception info that might be passed down + // to `cir.try_call` everytime a call happens. + auto exceptionInfo = buildAlloca( + "__exception_ptr", + mlir::cir::PointerType::get( + b.getContext(), b.getType<::mlir::cir::ExceptionInfoType>()), + loc, CharUnits::One()); + + // Create the skeleton for the catch statements to be further populated + // by cir::CIRGenFunction::buildLandingPad. auto catchOp = builder.create( tryLoc, // FIXME(cir): we can do better source location here. [&](mlir::OpBuilder &b, mlir::Location loc, @@ -284,7 +293,9 @@ mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { builder.createBlock(r); } }); + ExceptionInfoRAIIObject ehx{*this, {exceptionInfo, catchOp}}; + // Do actual emission. enterCXXTryStmt(S, catchOp); if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) { @@ -390,7 +401,7 @@ static bool isNonEHScope(const EHScope &S) { llvm_unreachable("Invalid EHScope Kind!"); } -mlir::Block *CIRGenFunction::buildLandingPad() { +mlir::Operation *CIRGenFunction::buildLandingPad() { assert(EHStack.requiresLandingPad()); assert(!CGM.getLangOpts().IgnoreExceptions && "LandingPad should not be emitted when -fignore-exceptions are in " @@ -403,26 +414,164 @@ mlir::Block *CIRGenFunction::buildLandingPad() { case EHScope::Catch: case EHScope::Cleanup: case EHScope::Filter: - llvm_unreachable("NYI"); if (auto *lpad = innermostEHScope.getCachedLandingPad()) return lpad; } + auto catchOp = currExceptionInfo.catchOp; + assert(catchOp && "Should be valid"); { // Save the current CIR generation state. mlir::OpBuilder::InsertionGuard guard(builder); - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); - // FIXME(cir): handle CIR relevant landing pad bits, there's no good - // way to assert here right now and leaving one in break important - // testcases. Work to fill this in is coming soon. + + // Traditional LLVM codegen creates the lpad basic block, extract + // values, landing pad instructions, etc. + + // Accumulate all the handlers in scope. + bool hasCatchAll = false; + bool hasCleanup = false; + bool hasFilter = false; + SmallVector filterTypes; + llvm::SmallPtrSet catchTypes; + SmallVector clauses; + + for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end(); I != E; + ++I) { + + switch (I->getKind()) { + case EHScope::Cleanup: + // If we have a cleanup, remember that. + llvm_unreachable("NYI"); + continue; + + case EHScope::Filter: { + llvm_unreachable("NYI"); + } + + case EHScope::Terminate: + // Terminate scopes are basically catch-alls. + // assert(!hasCatchAll); + // hasCatchAll = true; + // goto done; + llvm_unreachable("NYI"); + + case EHScope::Catch: + break; + } + + EHCatchScope &catchScope = cast(*I); + for (unsigned hi = 0, he = catchScope.getNumHandlers(); hi != he; ++hi) { + EHCatchScope::Handler handler = catchScope.getHandler(hi); + assert(handler.Type.Flags == 0 && + "landingpads do not support catch handler flags"); + + // If this is a catch-all, register that and abort. + if (!handler.Type.RTTI) { + assert(!hasCatchAll); + hasCatchAll = true; + goto done; + } + + // Check whether we already have a handler for this type. + if (catchTypes.insert(handler.Type.RTTI).second) { + // If not, keep track to later add to catch op. + clauses.push_back(handler.Type.RTTI); + } + } + } + + done: + // If we have a catch-all, add null to the landingpad. + assert(!(hasCatchAll && hasFilter)); + if (hasCatchAll) { + llvm_unreachable("NYI"); + + // If we have an EH filter, we need to add those handlers in the + // right place in the landingpad, which is to say, at the end. + } else if (hasFilter) { + // Create a filter expression: a constant array indicating which filter + // types there are. The personality routine only lands here if the filter + // doesn't match. + llvm_unreachable("NYI"); + + // Otherwise, signal that we at least have cleanups. + } else if (hasCleanup) { + llvm_unreachable("NYI"); + } + + assert((clauses.size() > 0 || hasCleanup) && "CatchOp has no clauses!"); + + // Add final array of clauses into catchOp. + catchOp.setCatchersAttr( + mlir::ArrayAttr::get(builder.getContext(), clauses)); + + // In traditional LLVM codegen. this tells the backend how to generate the + // landing pad by generating a branch to the dispatch block. In CIR the same + // function is called to gather some state, but this block info it's not + // useful per-se. + (void)getEHDispatchBlock(EHStack.getInnermostEHScope()); } - llvm_unreachable("NYI"); - return nullptr; + return catchOp; +} + +// Differently from LLVM traditional codegen, there are no dispatch blocks +// to look at given cir.try_call does not jump to blocks like invoke does. +// However, we keep this around since other parts of CIRGen use +// getCachedEHDispatchBlock to infer state. +mlir::Block * +CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) { + if (EHPersonality::get(*this).usesFuncletPads()) + llvm_unreachable("NYI"); + + // The dispatch block for the end of the scope chain is a block that + // just resumes unwinding. + if (si == EHStack.stable_end()) + llvm_unreachable("NYI"); + + // Otherwise, we should look at the actual scope. + EHScope &scope = *EHStack.find(si); + + auto *dispatchBlock = scope.getCachedEHDispatchBlock(); + if (!dispatchBlock) { + switch (scope.getKind()) { + case EHScope::Catch: { + // Apply a special case to a single catch-all. + EHCatchScope &catchScope = cast(scope); + if (catchScope.getNumHandlers() == 1 && + catchScope.getHandler(0).isCatchAll()) { + dispatchBlock = catchScope.getHandler(0).Block; + + // Otherwise, make a dispatch block. + } else { + // As said in the function comment, just signal back we + // have something - even though the block value doesn't + // have any real meaning. + dispatchBlock = catchScope.getHandler(0).Block; + assert(dispatchBlock && "find another approach to signal"); + } + break; + } + + case EHScope::Cleanup: + llvm_unreachable("NYI"); + break; + + case EHScope::Filter: + llvm_unreachable("NYI"); + break; + + case EHScope::Terminate: + llvm_unreachable("NYI"); + break; + } + scope.setCachedEHDispatchBlock(dispatchBlock); + } + return dispatchBlock; } -mlir::Block *CIRGenFunction::getInvokeDestImpl() { +mlir::Operation *CIRGenFunction::getInvokeDestImpl() { assert(EHStack.requiresLandingPad()); assert(!EHStack.empty()); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index b2604265cf74..64f48ba97aeb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -301,6 +301,30 @@ class CIRGenFunction : public CIRGenTypeCache { using SymTableScopeTy = llvm::ScopedHashTableScope; + /// Try/Catch: calls within try statements need to refer to local + /// allocas for the exception info + struct CIRExceptionInfo { + mlir::Value exceptionAddr{}; + mlir::cir::CatchOp catchOp{}; + }; + CIRExceptionInfo currExceptionInfo{}; + class ExceptionInfoRAIIObject { + CIRGenFunction &P; + CIRExceptionInfo OldVal{}; + + public: + ExceptionInfoRAIIObject(CIRGenFunction &p, CIRExceptionInfo info) : P(p) { + if (P.currExceptionInfo.exceptionAddr) + OldVal = P.currExceptionInfo; + P.currExceptionInfo = info; + } + + /// Can be used to restore the state early, before the dtor + /// is run. + void restore() { P.currExceptionInfo = OldVal; } + ~ExceptionInfoRAIIObject() { restore(); } + }; + enum class EvaluationOrder { ///! No langauge constraints on evaluation order. Default, @@ -1480,14 +1504,15 @@ class CIRGenFunction : public CIRGenTypeCache { }; /// Emits landing pad information for the current EH stack. - mlir::Block *buildLandingPad(); + mlir::Operation *buildLandingPad(); + mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope); - // TODO(cir): perhaps return a mlir::Block* here, for now - // only check if a landing pad is required. - mlir::Block *getInvokeDestImpl(); + mlir::Operation *getInvokeDestImpl(); bool getInvokeDest() { if (!EHStack.requiresLandingPad()) return false; + // cir.try_call does not require a block destination, but keep the + // overall traditional LLVM codegen names, and just ignore the result. return (bool)getInvokeDestImpl(); } From 3aa7145cc67bbcb6f51e05ad5fe7a91c9eb20444 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 Jan 2024 18:41:41 -0800 Subject: [PATCH 1353/2301] [CIR][CIRGen][Exceptions] Complete buildCatchDispatchBlock Doesn't do a lot of things compared to LLVM traditional codegen, one more step towards basic exception support. No testcase possible just yet. --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 74b27cea67b3..8a604453870e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -330,7 +330,28 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, return; } - llvm_unreachable("NYI"); + // In traditional LLVM codegen, the right handler is selected (with calls to + // eh_typeid_for) and the selector value is loaded. After that, blocks get + // connected for later codegen. In CIR, these are all implicit behaviors of + // cir.catch - not a lot of work to do. + // + // Test against each of the exception types we claim to catch. + for (unsigned i = 0, e = catchScope.getNumHandlers();; ++i) { + assert(i < e && "ran off end of handlers!"); + const EHCatchScope::Handler &handler = catchScope.getHandler(i); + + auto typeValue = handler.Type.RTTI; + assert(handler.Type.Flags == 0 && "catch handler flags not supported"); + assert(typeValue && "fell into catch-all case!"); + // Check for address space mismatch: if (typeValue->getType() != argTy) + assert(!UnimplementedFeature::addressSpace()); + + // If this is the last handler, we're at the end, and the next + // block is the block for the enclosing EH scope. Make sure to call + // getEHDispatchBlock for caching it. + if (i + 1 == e) + (void)CGF.getEHDispatchBlock(catchScope.getEnclosingEHScope()); + } } void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &S, From 3c8dc9ff5c42ca7a4a3f6032f71a718279f37854 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 18 Jan 2024 15:34:21 -0800 Subject: [PATCH 1354/2301] [CIR] Add cir.resume op and use it in cir.catch - Add an extra CatchOp region to hold fallback (where EH usually resumes or rethrows as part of try/catch). - Emit `cir.resume` on the fallback region. Incremental step into the next assertion, still missing pieces before adding the first testcase. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 25 +++++++++ .../include/clang/CIR/Dialect/IR/CIRTypes.td | 2 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 52 +++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 +- 4 files changed, 77 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 82a761b73afd..967e50839df4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -718,6 +718,31 @@ def ContinueOp : CIR_Op<"continue", [Terminator]> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// Resume +//===----------------------------------------------------------------------===// + +def ResumeOp : CIR_Op<"resume", [ReturnLike, Terminator, + ParentOneOf<["CatchOp"]>]> { + let summary = "Resumes execution after not catching exceptions"; + let description = [{ + The `cir.resume` operation terminates a region on `cir.catch`, "resuming" + or continuing the unwind process. The incoming argument is of !cir.eh_info + populated by `cir.try_call` and available in `cir.catch`. + + Examples: + ```mlir + cir.catch %4 { + ... + fallback { cir.resume(%0) }; + } + ``` + }]; + + let arguments = (ins ExceptionInfoPtr:$ptr); + let assemblyFormat = "$ptr attr-dict"; +} + //===----------------------------------------------------------------------===// // ScopeOp //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 7e85d3ddeff3..341de9406c22 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -283,7 +283,7 @@ def ExceptionInfoPtr : Type< ]>, "void*">, BuildableType< "mlir::cir::PointerType::get($_builder.getContext()," - "mlir::cir::ExceptionInfo::get($_builder.getContext()))"> { + "mlir::cir::ExceptionInfoType::get($_builder.getContext()))"> { } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 8a604453870e..0316536e7393 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -251,6 +251,36 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { DeactivateCleanupBlock(cleanup, op); } +mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup) { + // Just like some other try/catch related logic: return the basic block + // pointer but only use it to denote we're tracking things, but there + // shouldn't be any changes to that block after work done in this function. + auto catchOp = currExceptionInfo.catchOp; + assert(catchOp.getNumRegions() && "expected at least one region"); + auto &fallbackRegion = catchOp.getRegion(catchOp.getNumRegions() - 1); + + auto *resumeBlock = &fallbackRegion.getBlocks().back(); + if (!resumeBlock->empty()) + return resumeBlock; + + auto ip = getBuilder().saveInsertionPoint(); + getBuilder().setInsertionPointToStart(resumeBlock); + + const EHPersonality &Personality = EHPersonality::get(*this); + + // This can always be a call because we necessarily didn't find + // anything on the EH stack which needs our help. + const char *RethrowName = Personality.CatchallRethrowFn; + if (RethrowName != nullptr && !isCleanup) { + llvm_unreachable("NYI"); + } + + getBuilder().create(catchOp.getLoc(), + currExceptionInfo.exceptionAddr); + getBuilder().restoreInsertionPoint(ip); + return resumeBlock; +} + mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { const llvm::Triple &T = getTarget().getTriple(); // If we encounter a try statement on in an OpenMP target region offloaded to @@ -288,7 +318,9 @@ mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &result) { mlir::OpBuilder::InsertionGuard guard(b); - for (int i = 0, e = numHandlers; i != e; ++i) { + // Once for each handler and one for fallback (which could be a + // resume or rethrow). + for (int i = 0, e = numHandlers + 1; i != e; ++i) { auto *r = result.addRegion(); builder.createBlock(r); } @@ -346,11 +378,25 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, // Check for address space mismatch: if (typeValue->getType() != argTy) assert(!UnimplementedFeature::addressSpace()); + bool nextIsEnd = false; // If this is the last handler, we're at the end, and the next // block is the block for the enclosing EH scope. Make sure to call // getEHDispatchBlock for caching it. - if (i + 1 == e) + if (i + 1 == e) { (void)CGF.getEHDispatchBlock(catchScope.getEnclosingEHScope()); + nextIsEnd = true; + + // If the next handler is a catch-all, we're at the end, and the + // next block is that handler. + } else if (catchScope.getHandler(i + 1).isCatchAll()) { + // Block already created when creating CatchOp, just mark this + // is the end. + nextIsEnd = true; + } + + // If the next handler is a catch-all, we're completely done. + if (nextIsEnd) + return; } } @@ -549,7 +595,7 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) { // The dispatch block for the end of the scope chain is a block that // just resumes unwinding. if (si == EHStack.stable_end()) - llvm_unreachable("NYI"); + return getEHResumeBlock(true); // Otherwise, we should look at the actual scope. EHScope &scope = *EHStack.find(si); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 64f48ba97aeb..02d7b26cd48b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1503,8 +1503,9 @@ class CIRGenFunction : public CIRGenTypeCache { bool isConditional() const { return IsConditional; } }; - /// Emits landing pad information for the current EH stack. + /// Emits try/catch information for the current EH stack. mlir::Operation *buildLandingPad(); + mlir::Block *getEHResumeBlock(bool isCleanup); mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope); mlir::Operation *getInvokeDestImpl(); From 8bd50f3778d37724384f937a0954fc94e51ce5cc Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 19 Jan 2024 08:23:11 +0800 Subject: [PATCH 1355/2301] [CIR][CIRGen] Support wide string literals (#399) This commit supports the codegen of wide string literals, including `wchar_t` string literals, `char16_t` string literals, and `char32_t` string literals. I'm not following the proposal in #374. The clang frontend doesn't record the literal string. It only records the encoded code units for wide string literals. So I believe that a dedicated string attribute with an encoding tag as described in #374 may not be that helpful as I thought. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 33 ++++++++++++++++++++++++-- clang/test/CIR/CodeGen/wide-string.cpp | 26 ++++++++++++++++++++ 2 files changed, 57 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/wide-string.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c01d7c96485f..955eb9a2453b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1104,8 +1104,37 @@ CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { return builder.getString(Str, eltTy, finalSize); } - assert(0 && "not implemented"); - return {}; + auto arrayTy = + getTypes().ConvertType(E->getType()).dyn_cast(); + assert(arrayTy && "string literals must be emitted as an array type"); + + auto arrayEltTy = arrayTy.getEltType().dyn_cast(); + assert(arrayEltTy && + "string literal elements must be emitted as integral type"); + + auto arraySize = arrayTy.getSize(); + auto literalSize = E->getLength(); + + // Collect the code units. + SmallVector elementValues; + elementValues.reserve(arraySize); + for (unsigned i = 0; i < literalSize; ++i) + elementValues.push_back(E->getCodeUnit(i)); + elementValues.resize(arraySize); + + // If the string is full of null bytes, emit a #cir.zero instead. + if (std::all_of(elementValues.begin(), elementValues.end(), + [](uint32_t x) { return x == 0; })) + return builder.getZeroAttr(arrayTy); + + // Otherwise emit a constant array holding the characters. + SmallVector elements; + elements.reserve(arraySize); + for (uint64_t i = 0; i < arraySize; ++i) + elements.push_back(mlir::cir::IntAttr::get(arrayEltTy, elementValues[i])); + + auto elementsAttr = mlir::ArrayAttr::get(builder.getContext(), elements); + return builder.getConstArray(elementsAttr, arrayTy); } // TODO(cir): this could be a common AST helper for both CIR and LLVM codegen. diff --git a/clang/test/CIR/CodeGen/wide-string.cpp b/clang/test/CIR/CodeGen/wide-string.cpp new file mode 100644 index 000000000000..1b3cacc4dd49 --- /dev/null +++ b/clang/test/CIR/CodeGen/wide-string.cpp @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +const char16_t *test_utf16() { + return u"你好世界"; +} + +// CHECK: cir.global "private" constant internal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u16i, #cir.int<22909> : !u16i, #cir.int<19990> : !u16i, #cir.int<30028> : !u16i, #cir.int<0> : !u16i]> : !cir.array + +const char32_t *test_utf32() { + return U"你好世界"; +} + +// CHECK: cir.global "private" constant internal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u32i, #cir.int<22909> : !u32i, #cir.int<19990> : !u32i, #cir.int<30028> : !u32i, #cir.int<0> : !u32i]> : !cir.array + +const char16_t *test_zero16() { + return u"\0\0\0\0"; +} + +// CHECK: cir.global "private" constant internal @{{.+}} = #cir.zero : !cir.array + +const char32_t *test_zero32() { + return U"\0\0\0\0"; +} + +// CHECK: cir.global "private" constant internal @{{.+}} = #cir.zero : !cir.array From 6329b4ec228a891c66666870578ad2489a39a4d9 Mon Sep 17 00:00:00 2001 From: Fabian Mora Date: Mon, 22 Jan 2024 14:13:55 -0500 Subject: [PATCH 1356/2301] [CIR][OpenMP] Initial commit for OpenMP support in CIR (#382) This patch introduces initial support for: ``` pragma omp parallel ``` This patch doesn't add support for any of the `parallel` clauses, including variable privatization; thus, all variables are handled as shared. This PR fixes issue #285. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 10 +-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 12 ++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 + clang/lib/CIR/CodeGen/CIRGenModule.cpp | 74 ++++++++++++++---- clang/lib/CIR/CodeGen/CIRGenModule.h | 10 +++ clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp | 54 +++++++++++++ clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h | 77 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp | 45 +++++++++++ clang/lib/CIR/CodeGen/CIRGenerator.cpp | 2 + clang/lib/CIR/CodeGen/CMakeLists.txt | 3 + .../CodeGen/UnimplementedFeatureGuarding.h | 2 + .../CIR/Lowering/DirectToLLVM/CMakeLists.txt | 2 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 + .../CIR/Lowering/ThroughMLIR/CMakeLists.txt | 2 + .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 2 + clang/test/CIR/CodeGen/openmp.cpp | 36 +++++++++ clang/test/CIR/Lowering/openmp.cir | 35 +++++++++ clang/tools/cir-opt/cir-opt.cpp | 4 +- 21 files changed, 359 insertions(+), 29 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp create mode 100644 clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h create mode 100644 clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp create mode 100644 clang/test/CIR/CodeGen/openmp.cpp create mode 100644 clang/test/CIR/Lowering/openmp.cir diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 662d24cd63a9..dd86a854e01c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -14,6 +14,7 @@ #include "CIRGenBuilder.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" +#include "CIRGenOpenMPRuntime.h" #include "EHScopeStack.h" #include "UnimplementedFeatureGuarding.h" #include "mlir/IR/Attributes.h" @@ -37,13 +38,8 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { // TODO: (|| Ty.getAddressSpace() == LangAS::opencl_private && // getLangOpts().OpenCL)) assert(!UnimplementedFeature::openCL()); - assert(!UnimplementedFeature::openMP()); assert(Ty.getAddressSpace() == LangAS::Default); assert(!Ty->isVariablyModifiedType() && "not implemented"); - assert(!getContext() - .getLangOpts() - .OpenMP && // !CGF.getLangOpts().OpenMPIRBuilder - "not implemented"); assert(!D.hasAttr() && "not implemented"); auto loc = getLoc(D.getSourceRange()); @@ -59,7 +55,9 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { Address address = Address::invalid(); Address allocaAddr = Address::invalid(); - Address openMPLocalAddr = Address::invalid(); + Address openMPLocalAddr = + getCIRGenModule().getOpenMPRuntime().getAddressOfLocalVariable(*this, &D); + assert(!getLangOpts().OpenMPIsTargetDevice && "NYI"); if (getLangOpts().OpenMP && openMPLocalAddr.isValid()) { llvm_unreachable("NYI"); } else if (Ty->isConstantSizeType()) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 828687bcea8a..777c9b57de31 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -15,6 +15,7 @@ #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "CIRGenOpenMPRuntime.h" #include "CIRGenValue.h" #include "UnimplementedFeatureGuarding.h" @@ -760,8 +761,11 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { if (auto *FD = LambdaCaptureFields.lookup(VD)) return buildCapturedFieldLValue(*this, FD, CXXABIThisValue); assert(!UnimplementedFeature::CGCapturedStmtInfo() && "NYI"); - llvm_unreachable("NYI"); + // TODO[OpenMP]: Find the appropiate captured variable value and return + // it. + // TODO[OpenMP]: Set non-temporal information in the captured LVal. // LLVM codegen: + assert(!UnimplementedFeature::openMP()); // Address addr = GetAddrOfBlockDecl(VD); // return MakeAddrLValue(addr, T, AlignmentSource::Decl); } @@ -911,9 +915,9 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { } else { buildStoreThroughLValue(RV, LV); } - - assert(!getContext().getLangOpts().OpenMP && - "last priv cond not implemented"); + if (getLangOpts().OpenMP) + CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, + E->getLHS()); return LV; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 68e2ca82c2ff..0f85e0da58dd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -14,6 +14,7 @@ #include "CIRDataLayout.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "CIRGenOpenMPRuntime.h" #include "UnimplementedFeatureGuarding.h" #include "clang/AST/StmtVisitor.h" @@ -1805,7 +1806,9 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( else CGF.buildStoreThroughLValue(RValue::get(Result), LHSLV); - assert(!CGF.getLangOpts().OpenMP && "Not implemented"); + if (CGF.getLangOpts().OpenMP) + CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, + E->getLHS()); return LHSLV; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index a3c6b8da87d1..b0db17212dd0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -13,6 +13,7 @@ #include "CIRGenFunction.h" #include "CIRGenCXXABI.h" #include "CIRGenModule.h" +#include "CIRGenOpenMPRuntime.h" #include "UnimplementedFeatureGuarding.h" #include "clang/AST/ASTLambda.h" @@ -977,7 +978,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // TODO: prologuecleanupdepth if (getLangOpts().OpenMP && CurCodeDecl) - llvm_unreachable("NYI"); + CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl); // TODO: buildFunctionProlog diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 02d7b26cd48b..de0f20718980 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -928,6 +928,9 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); mlir::LogicalResult buildContinueStmt(const clang::ContinueStmt &S); + // OpenMP gen functions: + mlir::LogicalResult buildOMPParallelDirective(const OMPParallelDirective &S); + LValue buildOpaqueValueLValue(const OpaqueValueExpr *e); /// Emit code to compute a designator that specifies the location diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 955eb9a2453b..286149cd5539 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -15,6 +15,7 @@ #include "CIRGenCXXABI.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" +#include "CIRGenOpenMPRuntime.h" #include "CIRGenTypes.h" #include "CIRGenValue.h" #include "TargetInfo.h" @@ -103,7 +104,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, codeGenOpts(CGO), theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, - VTables{*this} { + VTables{*this}, openMPRuntime(new CIRGenOpenMPRuntime(*this)) { // Initialize CIR signed integer types cache. SInt8Ty = @@ -317,7 +318,18 @@ bool CIRGenModule::MustBeEmitted(const ValueDecl *Global) { } bool CIRGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { - assert(!langOpts.OpenMP && "NYI"); + // In OpenMP 5.0 variables and function may be marked as + // device_type(host/nohost) and we should not emit them eagerly unless we sure + // that they must be emitted on the host/device. To be sure we need to have + // seen a declare target with an explicit mentioning of the function, we know + // we have if the level of the declare target attribute is -1. Note that we + // check somewhere else if we should emit this at all. + if (langOpts.OpenMP >= 50 && !langOpts.OpenMPSimd) { + std::optional ActiveAttr = + OMPDeclareTargetDeclAttr::getActiveAttr(Global); + if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1) + return false; + } const auto *FD = dyn_cast(Global); if (FD) { @@ -337,6 +349,15 @@ bool CIRGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { ASTContext::InlineVariableDefinitionKind::WeakUnknown && "not implemented"); + // If OpenMP is enabled and threadprivates must be generated like TLS, delay + // codegen for global variables, because they may be marked as threadprivate. + if (langOpts.OpenMP && langOpts.OpenMPUseTLS && + getASTContext().getTargetInfo().isTLSSupported() && + isa(Global) && + !Global->getType().isConstantStorage(getASTContext(), false, false) && + !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global)) + return false; + assert((FD || VD) && "Only FunctionDecl and VarDecl should hit this path so far."); return true; @@ -348,7 +369,22 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { assert(!Global->hasAttr() && "NYI"); assert(!Global->hasAttr() && "NYI"); assert(!langOpts.CUDA && "NYI"); - assert(!langOpts.OpenMP && "NYI"); + + if (langOpts.OpenMP) { + // If this is OpenMP, check if it is legal to emit this global normally. + if (openMPRuntime && openMPRuntime->emitTargetGlobal(GD)) { + assert(!UnimplementedFeature::openMPRuntime()); + return; + } + if (auto *DRD = dyn_cast(Global)) { + assert(!UnimplementedFeature::openMP()); + return; + } + if (auto *DMD = dyn_cast(Global)) { + assert(!UnimplementedFeature::openMP()); + return; + } + } // Ignore declarations, they will be emitted on their first use. if (const auto *FD = dyn_cast(Global)) { @@ -372,7 +408,13 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { assert(VD->isFileVarDecl() && "Cannot emit local var decl as global."); if (VD->isThisDeclarationADefinition() != VarDecl::Definition && !astCtx.isMSStaticDataMemberInlineDefinition(VD)) { - assert(!getLangOpts().OpenMP && "not implemented"); + if (langOpts.OpenMP) { + // Emit declaration of the must-be-emitted declare target variable. + if (std::optional Res = + OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) { + assert(0 && "OMPDeclareTargetDeclAttr NYI"); + } + } // If this declaration may have caused an inline variable definition // to change linkage, make sure that it's emitted. // TODO(cir): probably use GetAddrOfGlobalVar(VD) below? @@ -577,8 +619,8 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, !D->hasAttr()) assert(!UnimplementedFeature::setDLLStorageClass() && "NYI"); - if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && D) - assert(0 && "not implemented"); + if (langOpts.OpenMP && !langOpts.OpenMPSimd && D) + getOpenMPRuntime().registerTargetGlobalVariable(D, Entry); // TODO(cir): check TargetAS matches Entry address space if (Entry.getSymType() == Ty && @@ -648,10 +690,9 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, } // Handle things which are present even on external declarations. - auto &LangOpts = getLangOpts(); if (D) { - if (LangOpts.OpenMP && !LangOpts.OpenMPSimd) - assert(0 && "not implemented"); + if (langOpts.OpenMP && !langOpts.OpenMPSimd && D) + getOpenMPRuntime().registerTargetGlobalVariable(D, Entry); // FIXME: This code is overly simple and should be merged with other global // handling. @@ -2053,8 +2094,11 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // Any attempts to use a MultiVersion function should result in retrieving the // iFunc instead. Name mangling will handle the rest of the changes. if (const auto *FD = cast_or_null(D)) { - if (getLangOpts().OpenMP) - llvm_unreachable("open MP NYI"); + // For the device mark the function as one that should be emitted. + if (getLangOpts().OpenMPIsTargetDevice && FD->isDefined() && !DontDefer && + !IsForDefinition) { + assert(0 && "OpenMP target functions NYI"); + } if (FD->isMultiVersion()) llvm_unreachable("NYI"); } @@ -2292,9 +2336,9 @@ void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { } // If this is OpenMP, check if it is legal to emit this global normally. - if (getLangOpts().OpenMP) { - llvm_unreachable("NYI"); - } + if (getLangOpts().OpenMP && openMPRuntime && + openMPRuntime->emitTargetGlobal(D)) + return; // Otherwise, emit the definition and move on to the next one. buildGlobalDefinition(D, Op); @@ -2303,7 +2347,7 @@ void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { void CIRGenModule::buildDeferred(unsigned recursionLimit) { // Emit deferred declare target declarations if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) - llvm_unreachable("NYI"); + getOpenMPRuntime().emitDeferredTargetDecls(); // Emit code for any potentially referenced deferred decls. Since a previously // unused static decl may become used during the generation of code for a diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index e468c53e58d4..900210a7c24a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -46,6 +46,7 @@ namespace cir { class CIRGenFunction; class CIRGenCXXABI; class TargetCIRGenInfo; +class CIRGenOpenMPRuntime; enum ForDefinition_t : bool { NotForDefinition = false, ForDefinition = true }; @@ -100,6 +101,9 @@ class CIRGenModule : public CIRGenTypeCache { /// Holds information about C++ vtables. CIRGenVTables VTables; + /// Holds the OpenMP runtime + std::unique_ptr openMPRuntime; + /// Per-function codegen information. Updated everytime buildCIR is called /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; @@ -626,6 +630,12 @@ class CIRGenModule : public CIRGenTypeCache { /// Print out an error that codegen doesn't support the specified decl yet. void ErrorUnsupported(const Decl *D, const char *Type); + /// Return a reference to the configured OpenMP runtime. + CIRGenOpenMPRuntime &getOpenMPRuntime() { + assert(openMPRuntime != nullptr); + return *openMPRuntime; + } + private: // An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector MangledDeclNames; diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp new file mode 100644 index 000000000000..2060ce8e2d31 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp @@ -0,0 +1,54 @@ +//===--- CIRGenStmtOpenMP.cpp - Interface to OpenMP Runtimes --------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides a class for OpenMP runtime MLIR code generation. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenOpenMPRuntime.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" + +using namespace cir; +using namespace clang; + +CIRGenOpenMPRuntime::CIRGenOpenMPRuntime(CIRGenModule &CGM) : CGM(CGM) {} + +Address CIRGenOpenMPRuntime::getAddressOfLocalVariable(CIRGenFunction &CGF, + const VarDecl *VD) { + assert(!UnimplementedFeature::openMPRuntime()); + return Address::invalid(); +} + +void CIRGenOpenMPRuntime::checkAndEmitLastprivateConditional( + CIRGenFunction &CGF, const Expr *LHS) { + assert(!UnimplementedFeature::openMPRuntime()); + return; +} + +void CIRGenOpenMPRuntime::registerTargetGlobalVariable( + const clang::VarDecl *VD, mlir::cir::GlobalOp globalOp) { + assert(!UnimplementedFeature::openMPRuntime()); + return; +} + +void CIRGenOpenMPRuntime::emitDeferredTargetDecls() const { + assert(!UnimplementedFeature::openMPRuntime()); + return; +} + +void CIRGenOpenMPRuntime::emitFunctionProlog(CIRGenFunction &CGF, + const clang::Decl *D) { + assert(!UnimplementedFeature::openMPRuntime()); + return; +} + +bool CIRGenOpenMPRuntime::emitTargetGlobal(clang::GlobalDecl &GD) { + assert(!UnimplementedFeature::openMPRuntime()); + return false; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h new file mode 100644 index 000000000000..c4a53db44c92 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h @@ -0,0 +1,77 @@ +//===--- CIRGenOpenMPRuntime.h - Interface to OpenMP Runtimes -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides a class for OpenMP runtime MLIR code generation. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENOPENMPRUNTIME_H +#define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENOPENMPRUNTIME_H + +#include "CIRGenValue.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +namespace clang { +class Decl; +class Expr; +class GlobalDecl; +class VarDecl; +} // namespace clang + +namespace cir { +class CIRGenModule; +class CIRGenFunction; + +class CIRGenOpenMPRuntime { +public: + explicit CIRGenOpenMPRuntime(CIRGenModule &CGM); + virtual ~CIRGenOpenMPRuntime() {} + + /// Gets the OpenMP-specific address of the local variable. + virtual Address getAddressOfLocalVariable(CIRGenFunction &CGF, + const clang::VarDecl *VD); + + /// Checks if the provided \p LVal is lastprivate conditional and emits the + /// code to update the value of the original variable. + /// \code + /// lastprivate(conditional: a) + /// ... + /// a; + /// lp_a = ...; + /// #pragma omp critical(a) + /// if (last_iv_a <= iv) { + /// last_iv_a = iv; + /// global_a = lp_a; + /// } + /// \endcode + virtual void checkAndEmitLastprivateConditional(CIRGenFunction &CGF, + const clang::Expr *LHS); + + /// Checks if the provided global decl \a GD is a declare target variable and + /// registers it when emitting code for the host. + virtual void registerTargetGlobalVariable(const clang::VarDecl *VD, + mlir::cir::GlobalOp globalOp); + + /// Emit deferred declare target variables marked for deferred emission. + void emitDeferredTargetDecls() const; + + /// Emits OpenMP-specific function prolog. + /// Required for device constructs. + virtual void emitFunctionProlog(CIRGenFunction &CGF, const clang::Decl *D); + + /// Emit the global \a GD if it is meaningful for the target. Returns + /// if it was emitted successfully. + /// \param GD Global to scan. + virtual bool emitTargetGlobal(clang::GlobalDecl &D); + +protected: + CIRGenModule &CGM; +}; +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENOPENMPRUNTIME_H diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index d96264f2f408..a9c962ad6666 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -191,6 +191,10 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, case Stmt::GCCAsmStmtClass: case Stmt::MSAsmStmtClass: return buildAsmStmt(cast(*S)); + // OMP directives: + case Stmt::OMPParallelDirectiveClass: + return buildOMPParallelDirective(cast(*S)); + // Unsupported AST nodes: case Stmt::CapturedStmtClass: case Stmt::ObjCAtTryStmtClass: case Stmt::ObjCAtThrowStmtClass: @@ -200,7 +204,6 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, case Stmt::SEHTryStmtClass: case Stmt::OMPMetaDirectiveClass: case Stmt::OMPCanonicalLoopClass: - case Stmt::OMPParallelDirectiveClass: case Stmt::OMPSimdDirectiveClass: case Stmt::OMPTileDirectiveClass: case Stmt::OMPUnrollDirectiveClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp new file mode 100644 index 000000000000..3874ef3dcee6 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp @@ -0,0 +1,45 @@ +//===--- CIRGenStmtOpenMP.cpp - Emit MLIR Code from OpenMP Statements -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit OpenMP Stmt nodes as MLIR code. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" +#include "CIRGenOpenMPRuntime.h" +#include "mlir/Dialect/OpenMP/OpenMPDialect.h" + +using namespace cir; +using namespace clang; +using namespace mlir::omp; + +mlir::LogicalResult +CIRGenFunction::buildOMPParallelDirective(const OMPParallelDirective &S) { + mlir::LogicalResult res = mlir::success(); + auto scopeLoc = getLoc(S.getSourceRange()); + // Create a `omp.parallel` op. + auto parallelOp = builder.create(scopeLoc); + mlir::Block &block = parallelOp.getRegion().emplaceBlock(); + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(&block); + // Create a scope for the OpenMP region. + builder.create( + scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; + // Emit the body of the region. + if (buildStmt(S.getCapturedStmt(OpenMPDirectiveKind::OMPD_parallel) + ->getCapturedStmt(), + /*useCurrentScope=*/true) + .failed()) + res = mlir::failure(); + }); + // Add the terminator for `omp.parallel`. + builder.create(getLoc(S.getSourceRange().getEnd())); + return res; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 4fe46c923dda..4d6a6c6c5d84 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -16,6 +16,7 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/OpenMP/OpenMPDialect.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Target/LLVMIR/Import.h" @@ -58,6 +59,7 @@ void CIRGenerator::Initialize(ASTContext &astCtx) { mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); CGM = std::make_unique(*mlirCtx.get(), astCtx, codeGenOpts, Diags); auto mod = CGM->getModule(); diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 62df7a8d3d68..4c11e3eb8368 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -26,7 +26,9 @@ add_clang_library(clangCIR CIRGenFunction.cpp CIRGenItaniumCXXABI.cpp CIRGenModule.cpp + CIRGenOpenMPRuntime.cpp CIRGenStmt.cpp + CIRGenStmtOpenMP.cpp CIRGenTBAA.cpp CIRGenTypes.cpp CIRGenVTables.cpp @@ -58,6 +60,7 @@ add_clang_library(clangCIR MLIRIR MLIRLLVMCommonConversion MLIRLLVMDialect + MLIROpenMPDialect MLIRLLVMToLLVMIRTranslation MLIRMemRefDialect MLIRMemRefToLLVM diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 1e5d1dfe7526..6c699c709ab3 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -121,6 +121,8 @@ struct UnimplementedFeature { static bool cxxABI() { return false; } static bool openCL() { return false; } static bool openMP() { return false; } + static bool openMPRuntime() { return false; } + static bool openMPTarget() { return false; } static bool ehStack() { return false; } static bool isVarArg() { return false; } static bool setNonGC() { return false; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index 14b879ee1c44..eb6991852332 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -34,4 +34,6 @@ add_clang_library(clangCIRLoweringDirectToLLVM MLIRTransforms MLIRSupport MLIRMemRefDialect + MLIROpenMPDialect + MLIROpenMPToLLVMIRTranslation ) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b6afaf29671d..1f75712c5728 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -42,6 +42,7 @@ #include "mlir/Support/LogicalResult.h" #include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" #include "mlir/Transforms/DialectConversion.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" @@ -2337,6 +2338,7 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx, mlir::registerBuiltinDialectTranslation(*mlirCtx); mlir::registerLLVMDialectTranslation(*mlirCtx); + mlir::registerOpenMPDialectTranslation(*mlirCtx); registerCIRDialectTranslation(*mlirCtx); auto ModuleName = theModule.getName(); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt index bcee5c65984d..1b84236a9c36 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -35,4 +35,6 @@ add_clang_library(clangCIRLoweringThroughMLIR MLIRTransforms MLIRSupport MLIRMemRefDialect + MLIROpenMPDialect + MLIROpenMPToLLVMIRTranslation ) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index e0a06c5bf401..948d0a34e376 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -33,6 +33,7 @@ #include "mlir/Pass/PassManager.h" #include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" #include "mlir/Transforms/DialectConversion.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" @@ -679,6 +680,7 @@ lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, mlir::registerBuiltinDialectTranslation(*mlirCtx); mlir::registerLLVMDialectTranslation(*mlirCtx); + mlir::registerOpenMPDialectTranslation(*mlirCtx); auto llvmModule = mlir::translateModuleToLLVMIR(theModule, llvmCtx); diff --git a/clang/test/CIR/CodeGen/openmp.cpp b/clang/test/CIR/CodeGen/openmp.cpp new file mode 100644 index 000000000000..a3c37da349b4 --- /dev/null +++ b/clang/test/CIR/CodeGen/openmp.cpp @@ -0,0 +1,36 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fopenmp -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// CHECK: cir.func +void omp_parallel_1() { +// CHECK: omp.parallel { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: } +// CHECK-NEXT: omp.terminator +// CHECK-NEXT: } +#pragma omp parallel +{ +} +} +// CHECK: cir.func +void omp_parallel_2() { +// CHECK: %[[YVarDecl:.+]] = {{.*}} ["y", init] +// CHECK: omp.parallel { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[XVarDecl:.+]] = {{.*}} ["x", init] +// CHECK-NEXT: %[[C1:.+]] = cir.const(#cir.int<1> : !s32i) +// CHECK-NEXT: cir.store %[[C1]], %[[XVarDecl]] +// CHECK-NEXT: %[[XVal:.+]] = cir.load %[[XVarDecl]] +// CHECK-NEXT: %[[COne:.+]] = cir.const(#cir.int<1> : !s32i) +// CHECK-NEXT: %[[BinOpVal:.+]] = cir.binop(add, %[[XVal]], %[[COne]]) +// CHECK-NEXT: cir.store %[[BinOpVal]], %[[YVarDecl]] +// CHECK-NEXT: } +// CHECK-NEXT: omp.terminator +// CHECK-NEXT: } + int y = 0; +#pragma omp parallel +{ + int x = 1; + y = x + 1; +} +} diff --git a/clang/test/CIR/Lowering/openmp.cir b/clang/test/CIR/Lowering/openmp.cir new file mode 100644 index 000000000000..73b3155252cc --- /dev/null +++ b/clang/test/CIR/Lowering/openmp.cir @@ -0,0 +1,35 @@ +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s + +!s32i = !cir.int +module { + cir.func @omp_parallel() { + %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.store %1, %0 : !s32i, cir.ptr + omp.parallel { + cir.scope { + %2 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.const(#cir.int<1> : !s32i) : !s32i + %6 = cir.binop(add, %4, %5) : !s32i + cir.store %6, %0 : !s32i, cir.ptr + } + omp.terminator + } + cir.return + } +} +// CHECK-LABEL: omp_parallel +// CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call({{.*}}, ptr @omp_parallel..omp_par, +// CHECK: ret void +// CHECK-NEXT: } +// CHECK: define{{.*}} void @omp_parallel..omp_par(ptr +// CHECK: %[[YVar:.*]] = load ptr, ptr %{{.*}}, align 8 +// CHECK: %[[XVar:.*]] = alloca i32, i64 1, align 4 +// CHECK: store i32 1, ptr %[[XVar]], align 4 +// CHECK: %[[XVal:.*]] = load i32, ptr %[[XVar]], align 4 +// CHECK: %[[BinOp:.*]] = add i32 %[[XVal]], 1 +// CHECK: store i32 %[[BinOp]], ptr %[[YVar]], align 4 +// CHECK: ret diff --git a/clang/tools/cir-opt/cir-opt.cpp b/clang/tools/cir-opt/cir-opt.cpp index 67de6a1c99be..deee67afa8a4 100644 --- a/clang/tools/cir-opt/cir-opt.cpp +++ b/clang/tools/cir-opt/cir-opt.cpp @@ -18,6 +18,7 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/OpenMP/OpenMPDialect.h" #include "mlir/InitAllPasses.h" #include "mlir/Pass/PassRegistry.h" #include "mlir/Tools/mlir-opt/MlirOptMain.h" @@ -30,7 +31,8 @@ int main(int argc, char **argv) { mlir::DialectRegistry registry; registry.insert(); + mlir::LLVM::LLVMDialect, mlir::DLTIDialect, + mlir::omp::OpenMPDialect>(); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertMLIRToLLVMPass(); From b10a1133e7f625b982444ebf2ca03e6dcc92cab5 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 22 Jan 2024 17:21:00 -0300 Subject: [PATCH 1357/2301] [CIR][Interfaces] Implement LoopOpInterface Adds an interface to generically handle lowering and analysis of loop operations in CIR. It can also perform verification of invariants common to all loop operations. ghstack-source-id: 0e413b14ea063a2b0d75aeaca0af88e547c15277 Pull Request resolved: https://github.com/llvm/clangir/pull/405 --- .../include/clang/CIR/Dialect/IR/CIRDialect.h | 1 + clang/include/clang/CIR/Dialect/IR/CIROps.td | 23 +++- .../clang/CIR/Interfaces/CIRLoopOpInterface.h | 36 +++++++ .../CIR/Interfaces/CIRLoopOpInterface.td | 100 ++++++++++++++++++ .../clang/CIR/Interfaces/CMakeLists.txt | 1 + clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 37 ++----- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 + .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 26 +---- clang/lib/CIR/FrontendAction/CMakeLists.txt | 1 + .../lib/CIR/Interfaces/CIRLoopOpInterface.cpp | 54 ++++++++++ clang/lib/CIR/Interfaces/CMakeLists.txt | 2 + .../CIR/Lowering/DirectToLLVM/CMakeLists.txt | 1 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 +- .../CIR/Lowering/ThroughMLIR/CMakeLists.txt | 1 + clang/test/CIR/IR/invalid.cir | 2 +- clang/test/CIR/Transforms/merge-cleanups.cir | 30 ------ 17 files changed, 237 insertions(+), 86 deletions(-) create mode 100644 clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.h create mode 100644 clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td create mode 100644 clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index 925f34e852a6..9d92cc20c0e2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -32,6 +32,7 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "clang/CIR/Interfaces/CIRLoopOpInterface.h" #include "clang/CIR/Interfaces/CIROpInterfaces.h" namespace mlir { diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 967e50839df4..a2a0e359a156 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -20,6 +20,7 @@ include "clang/CIR/Dialect/IR/CIRAttrs.td" include "clang/CIR/Interfaces/ASTAttrInterfaces.td" include "clang/CIR/Interfaces/CIROpInterfaces.td" +include "clang/CIR/Interfaces/CIRLoopOpInterface.td" include "mlir/Interfaces/ControlFlowInterfaces.td" include "mlir/Interfaces/FunctionInterfaces.td" @@ -1172,7 +1173,8 @@ def LoopOpKind : I32EnumAttr< } def LoopOp : CIR_Op<"loop", - [DeclareOpInterfaceMethods, + [LoopOpInterface, + DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, RecursivelySpeculatable, NoRegionArguments]> { let summary = "Loop"; @@ -1236,6 +1238,25 @@ def LoopOp : CIR_Op<"loop", ]; let hasVerifier = 1; + + let extraClassDeclaration = [{ + Region *maybeGetStep() { + if (getKind() == LoopOpKind::For) + return &getStep(); + return nullptr; + } + + llvm::SmallVector getRegionsInExecutionOrder() { + switch(getKind()) { + case LoopOpKind::For: + return llvm::SmallVector{&getCond(), &getBody(), &getStep()}; + case LoopOpKind::While: + return llvm::SmallVector{&getCond(), &getBody()}; + case LoopOpKind::DoWhile: + return llvm::SmallVector{&getBody(), &getCond()}; + } + } + }]; } //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.h b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.h new file mode 100644 index 000000000000..2e8a0c8e8a94 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.h @@ -0,0 +1,36 @@ +//===- CIRLoopOpInterface.h - Interface for CIR loop-like ops --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// Defines the interface to generically handle CIR loop operations. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_INTERFACES_CIR_CIRLOOPOPINTERFACE_H_ +#define CLANG_INTERFACES_CIR_CIRLOOPOPINTERFACE_H_ + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/OpDefinition.h" +#include "mlir/IR/Operation.h" +#include "mlir/Interfaces/ControlFlowInterfaces.h" +#include "mlir/Interfaces/LoopLikeInterface.h" + +namespace mlir { +namespace cir { +namespace detail { + +/// Verify invariants of the LoopOpInterface. +::mlir::LogicalResult verifyLoopOpInterface(::mlir::Operation *op); + +} // namespace detail +} // namespace cir +} // namespace mlir + +/// Include the tablegen'd interface declarations. +#include "clang/CIR/Interfaces/CIRLoopOpInterface.h.inc" + +#endif // CLANG_INTERFACES_CIR_CIRLOOPOPINTERFACE_H_ diff --git a/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td new file mode 100644 index 000000000000..c2b871785ffd --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td @@ -0,0 +1,100 @@ +//===- CIRLoopOpInterface.td - Interface for CIR loop-like ops -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef CLANG_CIR_INTERFACES_CIRLOOPOPINTERFACE +#define CLANG_CIR_INTERFACES_CIRLOOPOPINTERFACE + +include "mlir/IR/OpBase.td" +include "mlir/Interfaces/ControlFlowInterfaces.td" +include "mlir/Interfaces/LoopLikeInterface.td" + +def LoopOpInterface : OpInterface<"LoopOpInterface", [ + DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods +]> { + let description = [{ + Contains helper functions to query properties and perform transformations + on a loop. + }]; + let cppNamespace = "::mlir::cir"; + + let methods = [ + InterfaceMethod<[{ + Returns the loop's conditional region. + }], + /*retTy=*/"mlir::Region &", + /*methodName=*/"getCond" + >, + InterfaceMethod<[{ + Returns the loop's body region. + }], + /*retTy=*/"mlir::Region &", + /*methodName=*/"getBody" + >, + InterfaceMethod<[{ + Returns a pointer to the loop's step region or nullptr. + }], + /*retTy=*/"mlir::Region *", + /*methodName=*/"maybeGetStep", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/"return nullptr;" + >, + InterfaceMethod<[{ + Returns the first region to be executed in the loop. + }], + /*retTy=*/"mlir::Region &", + /*methodName=*/"getEntry", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/"return $_op.getCond();" + >, + InterfaceMethod<[{ + Returns a list of regions in order of execution. + }], + /*retTy=*/"llvm::SmallVector", + /*methodName=*/"getRegionsInExecutionOrder", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + return llvm::SmallVector{&$_op.getRegion(0), &$_op.getRegion(1)}; + }] + >, + InterfaceMethod<[{ + Recursively walks the body of the loop in pre-order while skipping + nested loops and executing a callback on every other operation. + }], + /*retTy=*/"mlir::WalkResult", + /*methodName=*/"walkBodySkippingNestedLoops", + /*args=*/(ins "::llvm::function_ref":$callback), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + return $_op.getBody().template walk([&](Operation *op) { + if (isa(op)) + return mlir::WalkResult::skip(); + callback(op); + return mlir::WalkResult::advance(); + }); + }] + > + ]; + + let extraClassDeclaration = [{ + /// Generic method to retrieve the successors of a LoopOpInterface operation. + static void getLoopOpSuccessorRegions( + ::mlir::cir::LoopOpInterface op, ::mlir::RegionBranchPoint point, + ::mlir::SmallVectorImpl<::mlir::RegionSuccessor> ®ions); + }]; + + let verify = [{ + /// Verify invariants of the LoopOpInterface. + return detail::verifyLoopOpInterface($_op); + }]; +} + +#endif // CLANG_CIR_INTERFACES_CIRLOOPOPINTERFACE diff --git a/clang/include/clang/CIR/Interfaces/CMakeLists.txt b/clang/include/clang/CIR/Interfaces/CMakeLists.txt index fe835cade35c..c7132abca833 100644 --- a/clang/include/clang/CIR/Interfaces/CMakeLists.txt +++ b/clang/include/clang/CIR/Interfaces/CMakeLists.txt @@ -22,3 +22,4 @@ endfunction() add_clang_mlir_attr_interface(ASTAttrInterfaces) add_clang_mlir_op_interface(CIROpInterfaces) +add_clang_mlir_op_interface(CIRLoopOpInterface) diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 4c11e3eb8368..1f9d0c6d1c0b 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -43,6 +43,7 @@ add_clang_library(clangCIR MLIRCIROpsIncGen MLIRCIRASTAttrInterfacesIncGen MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen ${dialect_libs} LINK_LIBS diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 4bec3353ff91..bf04b61b5721 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -14,6 +14,7 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Interfaces/CIRLoopOpInterface.h" #include "llvm/Support/ErrorHandling.h" #include @@ -232,7 +233,7 @@ void AllocaOp::build(::mlir::OpBuilder &odsBuilder, //===----------------------------------------------------------------------===// LogicalResult BreakOp::verify() { - if (!getOperation()->getParentOfType() && + if (!getOperation()->getParentOfType() && !getOperation()->getParentOfType()) return emitOpError("must be within a loop or switch"); return success(); @@ -251,7 +252,7 @@ void ConditionOp::getSuccessorRegions( // down its list of possible successors. // Parent is a loop: condition may branch to the body or to the parent op. - if (auto loopOp = dyn_cast(getOperation()->getParentOp())) { + if (auto loopOp = dyn_cast(getOperation()->getParentOp())) { regions.emplace_back(&loopOp.getBody(), loopOp.getBody().getArguments()); regions.emplace_back(loopOp->getResults()); } @@ -269,7 +270,7 @@ ConditionOp::getMutableSuccessorOperands(RegionBranchPoint point) { } LogicalResult ConditionOp::verify() { - if (!isa(getOperation()->getParentOp())) + if (!isa(getOperation()->getParentOp())) return emitOpError("condition must be within a conditional region"); return success(); } @@ -365,7 +366,7 @@ OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } //===----------------------------------------------------------------------===// LogicalResult ContinueOp::verify() { - if (!this->getOperation()->getParentOfType()) + if (!this->getOperation()->getParentOfType()) return emitOpError("must be within a loop"); return success(); } @@ -1264,38 +1265,14 @@ void LoopOp::build(OpBuilder &builder, OperationState &result, stepBuilder(builder, result.location); } -/// Given the region at `index`, or the parent operation if `index` is None, -/// return the successor regions. These are the regions that may be selected -/// during the flow of control. `operands` is a set of optional attributes -/// that correspond to a constant value for each operand, or null if that -/// operand is not a constant. void LoopOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { - // If any index all the underlying regions branch back to the parent - // operation. - if (!point.isParent()) { - regions.push_back(RegionSuccessor()); - return; - } - - // FIXME: we want to look at cond region for getting more accurate results - // if the other regions will get a chance to execute. - regions.push_back(RegionSuccessor(&this->getCond())); - regions.push_back(RegionSuccessor(&this->getBody())); - regions.push_back(RegionSuccessor(&this->getStep())); + LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); } llvm::SmallVector LoopOp::getLoopRegions() { return {&getBody()}; } -LogicalResult LoopOp::verify() { - if (getCond().empty()) - return emitOpError() << "cond region must not be empty"; - - if (!llvm::isa(getCond().back().getTerminator())) - return emitOpError() << "cond region terminate with 'cir.condition'"; - - return success(); -} +LogicalResult LoopOp::verify() { return success(); } //===----------------------------------------------------------------------===// // GlobalOp diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index b8cc5b84e93e..f4609c3aad32 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -11,6 +11,7 @@ add_clang_library(MLIRCIR MLIRSymbolInterfacesIncGen MLIRCIRASTAttrInterfacesIncGen MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen LINK_LIBS PUBLIC MLIRIR diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 551024854077..ea324bd090b2 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -16,6 +16,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Interfaces/CIRLoopOpInterface.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SmallSet.h" @@ -674,27 +675,10 @@ void LifetimeCheckPass::checkLoop(LoopOp loopOp) { SmallVector regionsToCheck; auto setupLoopRegionsToCheck = [&](bool isSubsequentTaken = false) { - regionsToCheck.clear(); - switch (loopOp.getKind()) { - case LoopOpKind::For: { - regionsToCheck.push_back(&loopOp.getCond()); - regionsToCheck.push_back(&loopOp.getBody()); - if (!isSubsequentTaken) - regionsToCheck.push_back(&loopOp.getStep()); - break; - } - case LoopOpKind::While: { - regionsToCheck.push_back(&loopOp.getCond()); - regionsToCheck.push_back(&loopOp.getBody()); - break; - } - case LoopOpKind::DoWhile: { - // Note this is the reverse order from While above. - regionsToCheck.push_back(&loopOp.getBody()); - regionsToCheck.push_back(&loopOp.getCond()); - break; - } - } + regionsToCheck = loopOp.getRegionsInExecutionOrder(); + // Drop step if it exists and we are not checking the subsequent taken. + if (loopOp.maybeGetStep() && !isSubsequentTaken) + regionsToCheck.pop_back(); }; // From 2.4.9 "Note": diff --git a/clang/lib/CIR/FrontendAction/CMakeLists.txt b/clang/lib/CIR/FrontendAction/CMakeLists.txt index 31ca49fedf44..077bd733cbd8 100644 --- a/clang/lib/CIR/FrontendAction/CMakeLists.txt +++ b/clang/lib/CIR/FrontendAction/CMakeLists.txt @@ -12,6 +12,7 @@ add_clang_library(clangCIRFrontendAction MLIRCIROpsIncGen MLIRCIRASTAttrInterfacesIncGen MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen MLIRBuiltinLocationAttributesIncGen MLIRBuiltinTypeInterfacesIncGen MLIRFunctionInterfacesIncGen diff --git a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp new file mode 100644 index 000000000000..ae5947a56944 --- /dev/null +++ b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp @@ -0,0 +1,54 @@ +//===- CIRLoopOpInterface.cpp - Interface for CIR loop-like ops *- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#include "clang/CIR/Interfaces/CIRLoopOpInterface.h" + +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Interfaces/CIRLoopOpInterface.cpp.inc" +#include "llvm/Support/ErrorHandling.h" + +namespace mlir { +namespace cir { + +void LoopOpInterface::getLoopOpSuccessorRegions( + LoopOpInterface op, RegionBranchPoint point, + SmallVectorImpl ®ions) { + assert(point.isParent() || point.getRegionOrNull()); + + // Branching to first region: go to condition or body (do-while). + if (point.isParent()) { + regions.emplace_back(&op.getEntry(), op.getEntry().getArguments()); + } + // Branching from condition: go to body or exit. + else if (&op.getCond() == point.getRegionOrNull()) { + regions.emplace_back(RegionSuccessor(op->getResults())); + regions.emplace_back(&op.getBody(), op.getBody().getArguments()); + } + // Branching from body: go to step (for) or condition. + else if (&op.getBody() == point.getRegionOrNull()) { + // FIXME(cir): Should we consider break/continue statements here? + auto *afterBody = (op.maybeGetStep() ? op.maybeGetStep() : &op.getCond()); + regions.emplace_back(afterBody, afterBody->getArguments()); + } + // Branching from step: go to condition. + else if (op.maybeGetStep() == point.getRegionOrNull()) { + regions.emplace_back(&op.getCond(), op.getCond().getArguments()); + } +} + +/// Verify invariants of the LoopOpInterface. +LogicalResult detail::verifyLoopOpInterface(Operation *op) { + auto loopOp = cast(op); + if (!isa(loopOp.getCond().back().getTerminator())) + return op->emitOpError( + "expected condition region to terminate with 'cir.condition'"); + return success(); +} + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Interfaces/CMakeLists.txt b/clang/lib/CIR/Interfaces/CMakeLists.txt index f672eb3f6a9c..84322f4836e0 100644 --- a/clang/lib/CIR/Interfaces/CMakeLists.txt +++ b/clang/lib/CIR/Interfaces/CMakeLists.txt @@ -1,6 +1,7 @@ add_clang_library(MLIRCIRInterfaces ASTAttrInterfaces.cpp CIROpInterfaces.cpp + CIRLoopOpInterface.cpp ADDITIONAL_HEADER_DIRS ${MLIR_MAIN_INCLUDE_DIR}/mlir/Interfaces @@ -8,6 +9,7 @@ add_clang_library(MLIRCIRInterfaces DEPENDS MLIRCIRASTAttrInterfacesIncGen MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen LINK_LIBS ${dialect_libs} diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index eb6991852332..7cf80d0b0a0e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -14,6 +14,7 @@ add_clang_library(clangCIRLoweringDirectToLLVM MLIRCIROpsIncGen MLIRCIRASTAttrInterfacesIncGen MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen MLIRBuiltinLocationAttributesIncGen MLIRBuiltinTypeInterfacesIncGen MLIRFunctionInterfacesIncGen diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 1f75712c5728..f6ddc2553a9f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -448,14 +448,14 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { // Lower continue statements. mlir::Block &dest = (kind != LoopKind::For ? condFrontBlock : stepFrontBlock); - walkRegionSkipping( + walkRegionSkipping( loopOp.getBody(), [&](mlir::Operation *op) { if (isa(op)) lowerTerminator(op, &dest, rewriter); }); // Lower break statements. - walkRegionSkipping( + walkRegionSkipping( loopOp.getBody(), [&](mlir::Operation *op) { if (isa(op)) lowerTerminator(op, continueBlock, rewriter); @@ -1387,7 +1387,7 @@ class CIRSwitchOpLowering } // Handle break statements. - walkRegionSkipping( + walkRegionSkipping( region, [&](mlir::Operation *op) { if (isa(op)) lowerTerminator(op, exitBlock, rewriter); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt index 1b84236a9c36..c5f3d21e363d 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -14,6 +14,7 @@ add_clang_library(clangCIRLoweringThroughMLIR MLIRCIREnumsGen MLIRCIRASTAttrInterfacesIncGen MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen MLIRBuiltinLocationAttributesIncGen MLIRBuiltinTypeInterfacesIncGen MLIRFunctionInterfacesIncGen diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 00ecacdcacad..a8601342919a 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -306,7 +306,7 @@ cir.func @cast24(%p : !u32i) { #true = #cir.bool : !cir.bool cir.func @b0() { cir.scope { - cir.loop while(cond : { // expected-error {{cond region terminate with 'cir.condition'}} + cir.loop while(cond : { // expected-error {{expected condition region to terminate with 'cir.condition'}} cir.yield }, step : { cir.yield diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 52ba8b7842d2..17880efeac2a 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -61,22 +61,6 @@ module { cir.return } - cir.func @l0() { - cir.scope { - cir.loop while(cond : { - %0 = cir.const(#true) : !cir.bool - cir.condition(%0) - }, step : { - cir.yield - }) { - cir.br ^bb1 - ^bb1: - cir.return - } - } - cir.return - } - // CHECK: cir.switch (%4 : !s32i) [ // CHECK-NEXT: case (equal, 0) { // CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i @@ -114,20 +98,6 @@ module { // CHECK-NEXT: } // CHECK-NEXT: ] -// CHECK: cir.func @l0 -// CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: %0 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.condition(%0) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: cir.return -// CHECK-NEXT: } -// CHECK-NEXT: } -// CHECK-NEXT: cir.return -// CHECK-NEXT: } - // Should remove empty scopes. cir.func @removeEmptyScope() { cir.scope { From 6749ee4b1c255976033fa07ce75b87c49be1cb78 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 22 Jan 2024 17:21:01 -0300 Subject: [PATCH 1358/2301] [CIR][Lowering][NFC] Refactor LoopOp lowering Leverages the new LoopOpInterface for lowering instead of the LoopOp operation. This is a step towards removing the LoopOp operation. ghstack-source-id: 28c1294833a12669d222a293de76609d2cf19148 Pull Request resolved: https://github.com/llvm/clangir/pull/406 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 106 ++++++++---------- 1 file changed, 45 insertions(+), 61 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f6ddc2553a9f..3317e2654bc6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -405,10 +405,11 @@ class CIRPtrStrideOpLowering } }; -class CIRLoopOpLowering : public mlir::OpConversionPattern { +class CIRLoopOpInterfaceLowering + : public mlir::OpInterfaceConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; - using LoopKind = mlir::cir::LoopOpKind; + using mlir::OpInterfaceConversionPattern< + mlir::cir::LoopOpInterface>::OpInterfaceConversionPattern; inline void lowerConditionOp(mlir::cir::ConditionOp op, mlir::Block *body, @@ -421,76 +422,59 @@ class CIRLoopOpLowering : public mlir::OpConversionPattern { } mlir::LogicalResult - matchAndRewrite(mlir::cir::LoopOp loopOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto kind = loopOp.getKind(); - auto *currentBlock = rewriter.getInsertionBlock(); - auto *continueBlock = - rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + matchAndRewrite(mlir::cir::LoopOpInterface op, + mlir::ArrayRef operands, + mlir::ConversionPatternRewriter &rewriter) const final { + // Setup CFG blocks. + auto *entry = rewriter.getInsertionBlock(); + auto *exit = rewriter.splitBlock(entry, rewriter.getInsertionPoint()); + auto *cond = &op.getCond().front(); + auto *body = &op.getBody().front(); + auto *step = (op.maybeGetStep() ? &op.maybeGetStep()->front() : nullptr); + + // Setup loop entry branch. + rewriter.setInsertionPointToEnd(entry); + rewriter.create(op.getLoc(), &op.getEntry().front()); - // Fetch required info from the condition region. - auto &condRegion = loopOp.getCond(); - auto &condFrontBlock = condRegion.front(); - - // Fetch required info from the body region. - auto &bodyRegion = loopOp.getBody(); - auto &bodyFrontBlock = bodyRegion.front(); - auto bodyYield = - dyn_cast(bodyRegion.back().getTerminator()); + // Branch from condition region to body or exit. + auto conditionOp = cast(cond->getTerminator()); + lowerConditionOp(conditionOp, body, exit, rewriter); - // Fetch required info from the step region. - auto &stepRegion = loopOp.getStep(); - auto &stepFrontBlock = stepRegion.front(); - auto stepYield = - dyn_cast(stepRegion.back().getTerminator()); - auto &stepBlock = (kind == LoopKind::For ? stepFrontBlock : condFrontBlock); + // TODO(cir): Remove the walks below. It visits operations unnecessarily, + // however, to solve this we would likely need a custom DialecConversion + // driver to customize the order that operations are visited. // Lower continue statements. - mlir::Block &dest = - (kind != LoopKind::For ? condFrontBlock : stepFrontBlock); - walkRegionSkipping( - loopOp.getBody(), [&](mlir::Operation *op) { - if (isa(op)) - lowerTerminator(op, &dest, rewriter); - }); + mlir::Block *dest = (step ? step : cond); + op.walkBodySkippingNestedLoops([&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, dest, rewriter); + }); // Lower break statements. walkRegionSkipping( - loopOp.getBody(), [&](mlir::Operation *op) { + op.getBody(), [&](mlir::Operation *op) { if (isa(op)) - lowerTerminator(op, continueBlock, rewriter); + lowerTerminator(op, exit, rewriter); }); - // Move loop op region contents to current CFG. - rewriter.inlineRegionBefore(condRegion, continueBlock); - rewriter.inlineRegionBefore(bodyRegion, continueBlock); - if (kind == LoopKind::For) // Ignore step if not a for-loop. - rewriter.inlineRegionBefore(stepRegion, continueBlock); + // Lower optional body region yield. + auto bodyYield = dyn_cast(body->getTerminator()); + if (bodyYield) + lowerTerminator(bodyYield, (step ? step : cond), rewriter); - // Set loop entry point to condition or to body in do-while cases. - rewriter.setInsertionPointToEnd(currentBlock); - auto &entry = (kind != LoopKind::DoWhile ? condFrontBlock : bodyFrontBlock); - rewriter.create(loopOp.getLoc(), &entry); - - // Branch from condition region to body or exit. - auto conditionOp = - cast(condFrontBlock.getTerminator()); - lowerConditionOp(conditionOp, &bodyFrontBlock, continueBlock, rewriter); - - // Branch from body to condition or to step on for-loop cases. - if (bodyYield) { - rewriter.setInsertionPoint(bodyYield); - rewriter.replaceOpWithNewOp(bodyYield, &stepBlock); - } + // Lower mandatory step region yield. + if (step) + lowerTerminator(cast(step->getTerminator()), cond, + rewriter); - // Is a for loop: branch from step to condition. - if (kind == LoopKind::For) { - rewriter.setInsertionPoint(stepYield); - rewriter.replaceOpWithNewOp(stepYield, &condFrontBlock); - } + // Move region contents out of the loop op. + rewriter.inlineRegionBefore(op.getCond(), exit); + rewriter.inlineRegionBefore(op.getBody(), exit); + if (step) + rewriter.inlineRegionBefore(*op.maybeGetStep(), exit); - // Remove the loop op. - rewriter.eraseOp(loopOp); + rewriter.eraseOp(op); return mlir::success(); } }; @@ -2094,7 +2078,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); patterns.add< - CIRCmpOpLowering, CIRLoopOpLowering, CIRBrCondOpLowering, + CIRCmpOpLowering, CIRLoopOpInterfaceLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, From 76980d4d31620615a8715f256f63a88bc3b44c60 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 22 Jan 2024 17:21:01 -0300 Subject: [PATCH 1359/2301] [CIR][IR] Refactor do-while loops Creates a separate C/C++ operation for do-while loops, while keeping the LoopOpInterface to generically handle loops. This simplifies the IR generation and printing/parsing of do-while loops. It also allows us to define it regions in the order that they are executed, which is useful for the lifetime analysis. ghstack-source-id: b4d9517197b8f82ae677dc2684101fe5762b21b7 Pull Request resolved: https://github.com/llvm/clangir/pull/407 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 71 ++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 +++ clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 12 ++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 14 ++++ .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 6 +- clang/test/CIR/CodeGen/loop.cpp | 66 ++++++++--------- clang/test/CIR/IR/do-while.cir | 18 +++++ clang/test/CIR/IR/invalid.cir | 11 +++ clang/test/CIR/IR/loop.cir | 34 --------- clang/test/CIR/Lowering/loop.cir | 8 +-- clang/test/CIR/Lowering/loops-with-break.cir | 16 ++--- .../test/CIR/Lowering/loops-with-continue.cir | 16 ++--- 12 files changed, 167 insertions(+), 113 deletions(-) create mode 100644 clang/test/CIR/IR/do-while.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a2a0e359a156..89dfaaa9ce3d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -440,7 +440,9 @@ def StoreOp : CIR_Op<"store", [ // ReturnOp //===----------------------------------------------------------------------===// -def ReturnOp : CIR_Op<"return", [HasParent<"FuncOp, ScopeOp, IfOp, SwitchOp, LoopOp">, +def ReturnOp : CIR_Op<"return", [ParentOneOf<["FuncOp", "ScopeOp", "IfOp", + "SwitchOp", "DoWhileOp", + "LoopOp"]>, Terminator]> { let summary = "Return from function"; let description = [{ @@ -634,7 +636,7 @@ def ConditionOp : CIR_Op<"condition", [ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "LoopOp", "AwaitOp", - "TernaryOp", "GlobalOp"]>]> { + "TernaryOp", "GlobalOp", "DoWhileOp"]>]> { let summary = "Represents the default branching behaviour of a region"; let description = [{ The `cir.yield` operation terminates regions on different CIR operations, @@ -1163,12 +1165,11 @@ def BrCondOp : CIR_Op<"brcond", def LoopOpKind_For : I32EnumAttrCase<"For", 1, "for">; def LoopOpKind_While : I32EnumAttrCase<"While", 2, "while">; -def LoopOpKind_DoWhile : I32EnumAttrCase<"DoWhile", 3, "dowhile">; def LoopOpKind : I32EnumAttr< "LoopOpKind", "Loop kind", - [LoopOpKind_For, LoopOpKind_While, LoopOpKind_DoWhile]> { + [LoopOpKind_For, LoopOpKind_While]> { let cppNamespace = "::mlir::cir"; } @@ -1252,13 +1253,66 @@ def LoopOp : CIR_Op<"loop", return llvm::SmallVector{&getCond(), &getBody(), &getStep()}; case LoopOpKind::While: return llvm::SmallVector{&getCond(), &getBody()}; - case LoopOpKind::DoWhile: - return llvm::SmallVector{&getBody(), &getCond()}; + // case LoopOpKind::DoWhile: + // return llvm::SmallVector{&getBody(), &getCond()}; } } }]; } +//===----------------------------------------------------------------------===// +// DoWhileOp +//===----------------------------------------------------------------------===// + +class WhileOpBase : CIR_Op { + defvar isWhile = !eq(mnemonic, "while"); + let summary = "C/C++ " # !if(isWhile, "while", "do-while") # " loop"; + let builders = [ + OpBuilder<(ins "function_ref":$condBuilder, + "function_ref":$bodyBuilder), [{ + OpBuilder::InsertionGuard guard($_builder); + $_builder.createBlock($_state.addRegion()); + }] # !if(isWhile, [{ + condBuilder($_builder, $_state.location); + $_builder.createBlock($_state.addRegion()); + bodyBuilder($_builder, $_state.location); + }], [{ + bodyBuilder($_builder, $_state.location); + $_builder.createBlock($_state.addRegion()); + condBuilder($_builder, $_state.location); + }])> + ]; +} + +def DoWhileOp : WhileOpBase<"do"> { + let regions = (region MinSizedRegion<1>:$body, SizedRegion<1>:$cond); + let assemblyFormat = " $body `while` $cond attr-dict"; + + let extraClassDeclaration = [{ + Region &getEntry() { return getBody(); } + }]; + + let description = [{ + Represents a C/C++ do-while loop. Identical to `cir.while` but the + condition is evaluated after the body. + + Example: + + ```mlir + cir.do { + cir.break + ^bb2: + cir.yield + } while { + cir.condition %cond : cir.bool + } + ``` + }]; +} + //===----------------------------------------------------------------------===// // GlobalOp //===----------------------------------------------------------------------===// @@ -2604,8 +2658,9 @@ def AllocException : CIR_Op<"alloc_exception", [ // ThrowOp //===----------------------------------------------------------------------===// -def ThrowOp : CIR_Op<"throw", - [HasParent<"FuncOp, ScopeOp, IfOp, SwitchOp, LoopOp">, +def ThrowOp : CIR_Op<"throw", [ + ParentOneOf<["FuncOp", "ScopeOp", "IfOp", "SwitchOp", + "DoWhileOp", "LoopOp"]>, Terminator]> { let summary = "(Re)Throws an exception"; let description = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 8274df33298e..6ac35c5a9fcc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -599,6 +599,14 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc); } + /// Create a do-while operation. + mlir::cir::DoWhileOp createDoWhile( + mlir::Location loc, + llvm::function_ref condBuilder, + llvm::function_ref bodyBuilder) { + return create(loc, condBuilder, bodyBuilder); + } + mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, mlir::Value src, mlir::Value len) { return create(loc, dst, src, len); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index a9c962ad6666..b6c4dd279e65 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -827,7 +827,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { } mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { - mlir::cir::LoopOp loopOp; + mlir::cir::DoWhileOp doWhileOp; // TODO: pass in array of attributes. auto doStmtBuilder = [&]() -> mlir::LogicalResult { @@ -839,8 +839,8 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { // sure we handle all cases. assert(!UnimplementedFeature::requiresCleanups()); - loopOp = builder.create( - getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::DoWhile, + doWhileOp = builder.createDoWhile( + getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { assert(!UnimplementedFeature::createProfileWeightsForLoop()); @@ -856,10 +856,6 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); buildStopPoint(&S); - }, - /*stepBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - builder.createYield(loc); }); return loopRes; }; @@ -876,7 +872,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { if (res.failed()) return res; - terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); + terminateBody(builder, doWhileOp.getBody(), getLoc(S.getEndLoc())); return mlir::success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index bf04b61b5721..f2c21a74695f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1274,6 +1274,20 @@ llvm::SmallVector LoopOp::getLoopRegions() { return {&getBody()}; } LogicalResult LoopOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// LoopOpInterface Methods +//===----------------------------------------------------------------------===// + +void DoWhileOp::getSuccessorRegions( + ::mlir::RegionBranchPoint point, + ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { + LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); +} + +::llvm::SmallVector DoWhileOp::getLoopRegions() { + return {&getBody()}; +} + //===----------------------------------------------------------------------===// // GlobalOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index ea324bd090b2..e77a6bdf14b8 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -47,7 +47,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { void checkIf(IfOp op); void checkSwitch(SwitchOp op); - void checkLoop(LoopOp op); + void checkLoop(LoopOpInterface op); void checkAlloca(AllocaOp op); void checkStore(StoreOp op); void checkLoad(LoadOp op); @@ -654,7 +654,7 @@ void LifetimeCheckPass::joinPmaps(SmallVectorImpl &pmaps) { } } -void LifetimeCheckPass::checkLoop(LoopOp loopOp) { +void LifetimeCheckPass::checkLoop(LoopOpInterface loopOp) { // 2.4.9. Loops // // A loop is treated as if it were the first two loop iterations unrolled @@ -1850,7 +1850,7 @@ void LifetimeCheckPass::checkOperation(Operation *op) { return checkIf(ifOp); if (auto switchOp = dyn_cast(op)) return checkSwitch(switchOp); - if (auto loopOp = dyn_cast(op)) + if (auto loopOp = dyn_cast(op)) return checkLoop(loopOp); if (auto allocaOp = dyn_cast(op)) return checkAlloca(allocaOp); diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 0a8bb34c975e..fa4c2b3b3e7b 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -111,46 +111,40 @@ void l3(bool cond) { // CHECK: cir.func @_Z2l3b // CHECK: cir.scope { -// CHECK-NEXT: cir.loop dowhile(cond : { -// CHECK-NEXT: %[[#TRUE:]] = cir.load %0 : cir.ptr , !cir.bool -// CHECK-NEXT: cir.condition(%[[#TRUE]]) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i -// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[#TRUE:]] = cir.load %0 : cir.ptr , !cir.bool +// CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop dowhile(cond : { +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { // CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.condition(%[[#TRUE]]) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i -// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr -// CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop dowhile(cond : { -// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool -// CHECK-NEXT: cir.condition(%4) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i -// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr -// CHECK-NEXT: cir.yield +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool +// CHECK-NEXT: cir.condition(%4) // CHECK-NEXT: } // CHECK-NEXT: } @@ -191,14 +185,12 @@ void l5() { // CHECK: cir.func @_Z2l5v() // CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop dowhile(cond : { +// CHECK-NEXT: cir.do { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { // CHECK-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool // CHECK-NEXT: cir.condition(%1) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/IR/do-while.cir b/clang/test/CIR/IR/do-while.cir new file mode 100644 index 000000000000..6664b4cfe4bf --- /dev/null +++ b/clang/test/CIR/IR/do-while.cir @@ -0,0 +1,18 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +cir.func @testPrintingAndParsing (%arg0 : !cir.bool) -> !cir.void { + cir.do { + cir.yield + } while { + cir.condition(%arg0) + } + cir.return +} + +// CHECK: testPrintingAndParsing +// CHECK: cir.do { +// CHECK: cir.yield +// CHECK: } while { +// CHECK: cir.condition(%arg0) +// CHECK: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index a8601342919a..92132f2ba8fb 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -808,3 +808,14 @@ cir.func @const_type_mismatch() -> () { %2 = cir.const(#cir.int<0> : !s8i) : !u8i cir.return } + +// ----- + +cir.func @invalid_cond_region_terminator(%arg0 : !cir.bool) -> !cir.void { + cir.do { // expected-error {{op expected condition region to terminate with 'cir.condition'}} + cir.yield + } while { + cir.yield + } + cir.return +} diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index 132e68119239..af9271ac9f46 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -58,25 +58,6 @@ cir.func @l0() { } } - cir.scope { - %2 = cir.alloca !u32i, cir.ptr , ["i", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<0> : !u32i) : !u32i - cir.store %3, %2 : !u32i, cir.ptr - cir.loop dowhile(cond : { - %4 = cir.load %2 : cir.ptr , !u32i - %5 = cir.const(#cir.int<10> : !u32i) : !u32i - %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.condition(%6) - }, step : { - cir.yield - }) { - %4 = cir.load %0 : cir.ptr , !u32i - %5 = cir.const(#cir.int<1> : !u32i) : !u32i - %6 = cir.binop(add, %4, %5) : !u32i - cir.store %6, %0 : !u32i, cir.ptr - cir.yield - } - } cir.return } @@ -123,21 +104,6 @@ cir.func @l0() { // CHECK-NEXT: cir.yield // CHECK-NEXT: } -// CHECK: cir.loop dowhile(cond : { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i -// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i -// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.condition(%6) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !u32i -// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u32i) : !u32i -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !u32i -// CHECK-NEXT: cir.store %6, %0 : !u32i, cir.ptr -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } - cir.func @l1(%arg0 : !cir.bool) { cir.scope { cir.loop while(cond : { diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index 04c4a5debae0..1ff39e7f84b3 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -53,12 +53,10 @@ module { // Test do-while cir.loop operation lowering. cir.func @testDoWhile(%arg0 : !cir.bool) { - cir.loop dowhile(cond : { - cir.condition(%arg0) - }, step : { // Droped when lowering while statements. - cir.yield - }) { + cir.do { cir.yield + } while { + cir.condition(%arg0) } cir.return } diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir index 147163ab307f..4452d8b25b32 100644 --- a/clang/test/CIR/Lowering/loops-with-break.cir +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -228,15 +228,7 @@ cir.func @testDoWhile() { %1 = cir.const(#cir.int<0> : !s32i) : !s32i cir.store %1, %0 : !s32i, cir.ptr cir.scope { - cir.loop dowhile(cond : { - %2 = cir.load %0 : cir.ptr , !s32i - %3 = cir.const(#cir.int<10> : !s32i) : !s32i - %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) - }, step : { - cir.yield - }) { + cir.do { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i cir.store %3, %0 : !s32i, cir.ptr @@ -250,6 +242,12 @@ cir.func @testDoWhile() { } } cir.yield + } while { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) } } cir.return diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir index 07cd6179f7ae..0f20d4b01f18 100644 --- a/clang/test/CIR/Lowering/loops-with-continue.cir +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -225,15 +225,7 @@ cir.func @testWhile() { %1 = cir.const(#cir.int<0> : !s32i) : !s32i cir.store %1, %0 : !s32i, cir.ptr cir.scope { - cir.loop dowhile(cond : { - %2 = cir.load %0 : cir.ptr , !s32i - %3 = cir.const(#cir.int<10> : !s32i) : !s32i - %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) - }, step : { - cir.yield - }) { + cir.do { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i cir.store %3, %0 : !s32i, cir.ptr @@ -247,6 +239,12 @@ cir.func @testWhile() { } } cir.yield + } while { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) } } cir.return From 4b5edcb0d52f05ade941a9914d57ae8107ecf46b Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 22 Jan 2024 17:21:02 -0300 Subject: [PATCH 1360/2301] [CIR][IR] Refactor while loops Creates a separate C/C++ operation for while loops, while keeping the LoopOpInterface to generically handle loops. This simplifies the IR generation and printing/parsing of while loops. ghstack-source-id: 29a6d7530263a4f96dbe6ce3052875831126005d Pull Request resolved: https://github.com/llvm/clangir/pull/408 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 40 ++++++-- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 ++ clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 13 +-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 8 ++ clang/test/CIR/CodeGen/loop.cpp | 30 ++---- clang/test/CIR/IR/invalid.cir | 6 +- clang/test/CIR/IR/loop.cir | 94 ------------------- clang/test/CIR/IR/while.cir | 18 ++++ clang/test/CIR/Lowering/loop.cir | 12 +-- clang/test/CIR/Lowering/loops-with-break.cir | 6 +- .../test/CIR/Lowering/loops-with-continue.cir | 6 +- 11 files changed, 91 insertions(+), 150 deletions(-) create mode 100644 clang/test/CIR/IR/while.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 89dfaaa9ce3d..e5ea6406d6a4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -442,7 +442,7 @@ def StoreOp : CIR_Op<"store", [ def ReturnOp : CIR_Op<"return", [ParentOneOf<["FuncOp", "ScopeOp", "IfOp", "SwitchOp", "DoWhileOp", - "LoopOp"]>, + "WhileOp", "LoopOp"]>, Terminator]> { let summary = "Return from function"; let description = [{ @@ -635,7 +635,7 @@ def ConditionOp : CIR_Op<"condition", [ //===----------------------------------------------------------------------===// def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, - ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "LoopOp", "AwaitOp", + ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "WhileOp", "LoopOp", "AwaitOp", "TernaryOp", "GlobalOp", "DoWhileOp"]>]> { let summary = "Represents the default branching behaviour of a region"; let description = [{ @@ -1164,12 +1164,11 @@ def BrCondOp : CIR_Op<"brcond", //===----------------------------------------------------------------------===// def LoopOpKind_For : I32EnumAttrCase<"For", 1, "for">; -def LoopOpKind_While : I32EnumAttrCase<"While", 2, "while">; def LoopOpKind : I32EnumAttr< "LoopOpKind", "Loop kind", - [LoopOpKind_For, LoopOpKind_While]> { + [LoopOpKind_For]> { let cppNamespace = "::mlir::cir"; } @@ -1251,8 +1250,8 @@ def LoopOp : CIR_Op<"loop", switch(getKind()) { case LoopOpKind::For: return llvm::SmallVector{&getCond(), &getBody(), &getStep()}; - case LoopOpKind::While: - return llvm::SmallVector{&getCond(), &getBody()}; + // case LoopOpKind::While: + // return llvm::SmallVector{&getCond(), &getBody()}; // case LoopOpKind::DoWhile: // return llvm::SmallVector{&getBody(), &getCond()}; } @@ -1261,7 +1260,7 @@ def LoopOp : CIR_Op<"loop", } //===----------------------------------------------------------------------===// -// DoWhileOp +// While & DoWhileOp //===----------------------------------------------------------------------===// class WhileOpBase : CIR_Op : CIR_Op { + let regions = (region SizedRegion<1>:$cond, MinSizedRegion<1>:$body); + let assemblyFormat = "$cond `do` $body attr-dict"; + + let description = [{ + Represents a C/C++ while loop. It consists of two regions: + + - `cond`: single block region with the loop's condition. Should be + terminated with a `cir.condition` operation. + - `body`: contains the loop body and an arbitrary number of blocks. + + Example: + + ```mlir + cir.while { + cir.break + ^bb2: + cir.yield + } do { + cir.condition %cond : cir.bool + } + ``` + }]; +} + def DoWhileOp : WhileOpBase<"do"> { let regions = (region MinSizedRegion<1>:$body, SizedRegion<1>:$cond); let assemblyFormat = " $body `while` $cond attr-dict"; @@ -2660,7 +2684,7 @@ def AllocException : CIR_Op<"alloc_exception", [ def ThrowOp : CIR_Op<"throw", [ ParentOneOf<["FuncOp", "ScopeOp", "IfOp", "SwitchOp", - "DoWhileOp", "LoopOp"]>, + "DoWhileOp", "WhileOp", "LoopOp"]>, Terminator]> { let summary = "(Re)Throws an exception"; let description = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 6ac35c5a9fcc..bfbe0b86c0d9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -607,6 +607,14 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, condBuilder, bodyBuilder); } + /// Create a while operation. + mlir::cir::WhileOp createWhile( + mlir::Location loc, + llvm::function_ref condBuilder, + llvm::function_ref bodyBuilder) { + return create(loc, condBuilder, bodyBuilder); + } + mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, mlir::Value src, mlir::Value len) { return create(loc, dst, src, len); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index b6c4dd279e65..5c9cc027b247 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -16,6 +16,7 @@ #include "mlir/IR/Value.h" #include "clang/AST/CharUnits.h" #include "clang/AST/Stmt.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/ErrorHandling.h" @@ -877,7 +878,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { } mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { - mlir::cir::LoopOp loopOp; + mlir::cir::WhileOp whileOp; // TODO: pass in array of attributes. auto whileStmtBuilder = [&]() -> mlir::LogicalResult { @@ -889,8 +890,8 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { // sure we handle all cases. assert(!UnimplementedFeature::requiresCleanups()); - loopOp = builder.create( - getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::While, + whileOp = builder.createWhile( + getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { assert(!UnimplementedFeature::createProfileWeightsForLoop()); @@ -911,10 +912,6 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); buildStopPoint(&S); - }, - /*stepBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - builder.createYield(loc); }); return loopRes; }; @@ -931,7 +928,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { if (res.failed()) return res; - terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); + terminateBody(builder, whileOp.getBody(), getLoc(S.getEndLoc())); return mlir::success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index f2c21a74695f..b5a8bfd2f698 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1288,6 +1288,14 @@ ::llvm::SmallVector DoWhileOp::getLoopRegions() { return {&getBody()}; } +void WhileOp::getSuccessorRegions( + ::mlir::RegionBranchPoint point, + ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { + LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); +} + +::llvm::SmallVector WhileOp::getLoopRegions() { return {&getBody()}; } + //===----------------------------------------------------------------------===// // GlobalOp //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index fa4c2b3b3e7b..c21267013f6e 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -53,12 +53,10 @@ void l2(bool cond) { // CHECK: cir.func @_Z2l2b // CHECK: cir.scope { -// CHECK-NEXT: cir.loop while(cond : { +// CHECK-NEXT: cir.while { // CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool // CHECK-NEXT: cir.condition(%3) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { +// CHECK-NEXT: } do { // CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i // CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i @@ -67,12 +65,10 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop while(cond : { +// CHECK-NEXT: cir.while { // CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.condition(%[[#TRUE]]) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { +// CHECK-NEXT: } do { // CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i // CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i @@ -81,13 +77,11 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop while(cond : { +// CHECK-NEXT: cir.while { // CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.condition(%4) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { +// CHECK-NEXT: } do { // CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i // CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i @@ -159,12 +153,10 @@ void l4() { } // CHECK: cir.func @_Z2l4v -// CHECK: cir.loop while(cond : { +// CHECK: cir.while { // CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.condition(%[[#TRUE]]) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { +// CHECK-NEXT: } do { // CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i @@ -204,12 +196,10 @@ void l6() { // CHECK: cir.func @_Z2l6v() // CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop while(cond : { +// CHECK-NEXT: cir.while { // CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.condition(%[[#TRUE]]) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { +// CHECK-NEXT: } do { // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 92132f2ba8fb..5d02ee27ff53 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -306,11 +306,9 @@ cir.func @cast24(%p : !u32i) { #true = #cir.bool : !cir.bool cir.func @b0() { cir.scope { - cir.loop while(cond : { // expected-error {{expected condition region to terminate with 'cir.condition'}} + cir.while { // expected-error {{expected condition region to terminate with 'cir.condition'}} cir.yield - }, step : { - cir.yield - }) { + } do { cir.br ^bb1 ^bb1: cir.return diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir index af9271ac9f46..b163da45d34c 100644 --- a/clang/test/CIR/IR/loop.cir +++ b/clang/test/CIR/IR/loop.cir @@ -34,29 +34,6 @@ cir.func @l0() { cir.yield } } - cir.scope { - %2 = cir.alloca !u32i, cir.ptr , ["i", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<0> : !u32i) : !u32i - cir.store %3, %2 : !u32i, cir.ptr - cir.loop while(cond : { - %4 = cir.load %2 : cir.ptr , !u32i - %5 = cir.const(#cir.int<10> : !u32i) : !u32i - %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.condition(%6) - }, step : { - cir.yield - }) { - %4 = cir.load %0 : cir.ptr , !u32i - %5 = cir.const(#cir.int<1> : !u32i) : !u32i - %6 = cir.binop(add, %4, %5) : !u32i - cir.store %6, %0 : !u32i, cir.ptr - %7 = cir.const(#true) : !cir.bool - cir.if %7 { - cir.continue - } - cir.yield - } - } cir.return } @@ -84,74 +61,3 @@ cir.func @l0() { // CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } - -// CHECK: cir.loop while(cond : { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i -// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i -// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.condition(%6) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !u32i -// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u32i) : !u32i -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !u32i -// CHECK-NEXT: cir.store %6, %0 : !u32i, cir.ptr -// CHECK-NEXT: %7 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.if %7 { -// CHECK-NEXT: cir.continue -// CHECK-NEXT: } -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } - -cir.func @l1(%arg0 : !cir.bool) { - cir.scope { - cir.loop while(cond : { - cir.condition(%arg0) - }, step : { - cir.yield - }) { - cir.yield - } - } - cir.return -} - -// CHECK: cir.func @l1 -// CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.condition(%arg0) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } -// CHECK-NEXT: } -// CHECK-NEXT: cir.return -// CHECK-NEXT: } - -cir.func @l2(%arg0 : !cir.bool) { - cir.scope { - cir.loop while(cond : { - cir.condition(%arg0) - }, step : { - cir.yield - }) { - cir.yield - } - } - cir.return -} - -// CHECK: cir.func @l2 -// CHECK-NEXT: cir.scope { -// CHECK-NEXT: cir.loop while(cond : { -// CHECK-NEXT: cir.condition(%arg0) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } -// CHECK-NEXT: } -// CHECK-NEXT: cir.return -// CHECK-NEXT: } diff --git a/clang/test/CIR/IR/while.cir b/clang/test/CIR/IR/while.cir new file mode 100644 index 000000000000..85897af76800 --- /dev/null +++ b/clang/test/CIR/IR/while.cir @@ -0,0 +1,18 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +cir.func @testPrintingParsing(%arg0 : !cir.bool) { + cir.while { + cir.condition(%arg0) + } do { + cir.yield + } + cir.return +} + +// CHECK: @testPrintingParsing +// CHECK: cir.while { +// CHECK: cir.condition(%arg0) +// CHECK: } do { +// CHECK: cir.yield +// CHECK: } diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index 1ff39e7f84b3..86097c379169 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -31,11 +31,9 @@ module { // Test while cir.loop operation lowering. cir.func @testWhile(%arg0 : !cir.bool) { - cir.loop while(cond : { + cir.while { cir.condition(%arg0) - }, step : { // Droped when lowering while statements. - cir.yield - }) { + } do { cir.yield } cir.return @@ -76,11 +74,9 @@ module { // break; // } cir.func @testWhileWithBreakTerminatedBody(%arg0 : !cir.bool) { - cir.loop while(cond : { + cir.while { cir.condition(%arg0) - }, step : { // Droped when lowering while statements. - cir.yield - }) { + } do { cir.break } cir.return diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir index 4452d8b25b32..31a2bb99e0a9 100644 --- a/clang/test/CIR/Lowering/loops-with-break.cir +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -171,15 +171,13 @@ module { %1 = cir.const(#cir.int<0> : !s32i) : !s32i cir.store %1, %0 : !s32i, cir.ptr cir.scope { - cir.loop while(cond : { + cir.while { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) - }, step : { - cir.yield - }) { + } do { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i cir.store %3, %0 : !s32i, cir.ptr diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir index 0f20d4b01f18..3e5134b8abec 100644 --- a/clang/test/CIR/Lowering/loops-with-continue.cir +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -171,15 +171,13 @@ cir.func @testWhile() { %1 = cir.const(#cir.int<0> : !s32i) : !s32i cir.store %1, %0 : !s32i, cir.ptr cir.scope { - cir.loop while(cond : { + cir.while { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) - }, step : { - cir.yield - }) { + } do { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i cir.store %3, %0 : !s32i, cir.ptr From 643211650d9f939b161adecd258e8fae031dcca4 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola Date: Mon, 22 Jan 2024 17:21:02 -0300 Subject: [PATCH 1361/2301] [CIR][IR] Refactor for loops This patch completes the deprecation of the generic `cir.loop` operation by adding a new `cir.for` operation and removing the `cir.loop` op. The new representation removes some bloat and places the regions in order of execution. ghstack-source-id: 886e0dacc632e5809015e2212810d690ef3ec294 Pull Request resolved: https://github.com/llvm/clangir/pull/409 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 173 +++++++----------- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 9 + clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 16 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 44 +---- .../lib/CIR/Interfaces/CIRLoopOpInterface.cpp | 2 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 1 - clang/test/CIR/CodeGen/loop-scope.cpp | 6 +- clang/test/CIR/CodeGen/loop.cpp | 16 +- clang/test/CIR/CodeGen/rangefor.cpp | 10 +- clang/test/CIR/IR/for.cir | 22 +++ clang/test/CIR/IR/invalid.cir | 13 ++ clang/test/CIR/IR/loop.cir | 63 ------- clang/test/CIR/Lowering/dot.cir | 14 +- clang/test/CIR/Lowering/loop.cir | 14 +- clang/test/CIR/Lowering/loops-with-break.cir | 42 ++--- .../test/CIR/Lowering/loops-with-continue.cir | 42 ++--- 16 files changed, 204 insertions(+), 283 deletions(-) create mode 100644 clang/test/CIR/IR/for.cir delete mode 100644 clang/test/CIR/IR/loop.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e5ea6406d6a4..cf4f103e3b6e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -442,7 +442,7 @@ def StoreOp : CIR_Op<"store", [ def ReturnOp : CIR_Op<"return", [ParentOneOf<["FuncOp", "ScopeOp", "IfOp", "SwitchOp", "DoWhileOp", - "WhileOp", "LoopOp"]>, + "WhileOp", "ForOp"]>, Terminator]> { let summary = "Return from function"; let description = [{ @@ -635,7 +635,7 @@ def ConditionOp : CIR_Op<"condition", [ //===----------------------------------------------------------------------===// def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, - ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "WhileOp", "LoopOp", "AwaitOp", + ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "WhileOp", "ForOp", "AwaitOp", "TernaryOp", "GlobalOp", "DoWhileOp"]>]> { let summary = "Represents the default branching behaviour of a region"; let description = [{ @@ -1159,106 +1159,6 @@ def BrCondOp : CIR_Op<"brcond", }]; } -//===----------------------------------------------------------------------===// -// LoopOp -//===----------------------------------------------------------------------===// - -def LoopOpKind_For : I32EnumAttrCase<"For", 1, "for">; - -def LoopOpKind : I32EnumAttr< - "LoopOpKind", - "Loop kind", - [LoopOpKind_For]> { - let cppNamespace = "::mlir::cir"; -} - -def LoopOp : CIR_Op<"loop", - [LoopOpInterface, - DeclareOpInterfaceMethods, - DeclareOpInterfaceMethods, - RecursivelySpeculatable, NoRegionArguments]> { - let summary = "Loop"; - let description = [{ - `cir.loop` represents C/C++ loop forms. It defines 3 blocks: - - `cond`: region can contain multiple blocks, terminated by regular - `cir.yield` when control should yield back to the parent, and - `cir.continue` when execution continues to the next region. - The region destination depends on the loop form specified. - - `step`: region with one block, containing code to compute the - loop step, must be terminated with `cir.yield`. - - `body`: region for the loop's body, can contain an arbitrary - number of blocks. - - The loop form: `for`, `while` and `dowhile` must also be specified and - each implies the loop regions execution order. - - ```mlir - // while (true) { - // i = i + 1; - // } - cir.loop while(cond : { - cir.continue - }, step : { - cir.yield - }) { - %3 = cir.load %1 : cir.ptr , i32 - %4 = cir.const(1 : i32) : i32 - %5 = cir.binop(add, %3, %4) : i32 - cir.store %5, %1 : i32, cir.ptr - cir.yield - } - ``` - }]; - - let arguments = (ins Arg:$kind); - let regions = (region AnyRegion:$cond, AnyRegion:$body, - SizedRegion<1>:$step); - - let assemblyFormat = [{ - $kind - `(` - `cond` `:` $cond `,` - `step` `:` $step - `)` - $body - attr-dict - }]; - - let skipDefaultBuilders = 1; - let builders = [ - OpBuilder<(ins - "cir::LoopOpKind":$kind, - CArg<"function_ref", - "nullptr">:$condBuilder, - CArg<"function_ref", - "nullptr">:$bodyBuilder, - CArg<"function_ref", - "nullptr">:$stepBuilder - )> - ]; - - let hasVerifier = 1; - - let extraClassDeclaration = [{ - Region *maybeGetStep() { - if (getKind() == LoopOpKind::For) - return &getStep(); - return nullptr; - } - - llvm::SmallVector getRegionsInExecutionOrder() { - switch(getKind()) { - case LoopOpKind::For: - return llvm::SmallVector{&getCond(), &getBody(), &getStep()}; - // case LoopOpKind::While: - // return llvm::SmallVector{&getCond(), &getBody()}; - // case LoopOpKind::DoWhile: - // return llvm::SmallVector{&getBody(), &getCond()}; - } - } - }]; -} - //===----------------------------------------------------------------------===// // While & DoWhileOp //===----------------------------------------------------------------------===// @@ -1337,6 +1237,73 @@ def DoWhileOp : WhileOpBase<"do"> { }]; } +//===----------------------------------------------------------------------===// +// ForOp +//===----------------------------------------------------------------------===// + +def ForOp : CIR_Op<"for", [LoopOpInterface, NoRegionArguments]> { + let summary = "C/C++ for loop counterpart"; + let description = [{ + Represents a C/C++ for loop. It consists of three regions: + + - `cond`: single block region with the loop's condition. Should be + terminated with a `cir.condition` operation. + - `body`: contains the loop body and an arbitrary number of blocks. + - `step`: single block region with the loop's step. + + Example: + + ```mlir + cir.for cond { + cir.condition(%val) + } body { + cir.break + ^bb2: + cir.yield + } step { + cir.yield + } + ``` + }]; + + let regions = (region SizedRegion<1>:$cond, + MinSizedRegion<1>:$body, + SizedRegion<1>:$step); + let assemblyFormat = [{ + `:` `cond` $cond + `body` $body + `step` $step + attr-dict + }]; + + let builders = [ + OpBuilder<(ins "function_ref":$condBuilder, + "function_ref":$bodyBuilder, + "function_ref":$stepBuilder), [{ + OpBuilder::InsertionGuard guard($_builder); + + // Build condition region. + $_builder.createBlock($_state.addRegion()); + condBuilder($_builder, $_state.location); + + // Build body region. + $_builder.createBlock($_state.addRegion()); + bodyBuilder($_builder, $_state.location); + + // Build step region. + $_builder.createBlock($_state.addRegion()); + stepBuilder($_builder, $_state.location); + }]> + ]; + + let extraClassDeclaration = [{ + Region *maybeGetStep() { return &getStep(); } + llvm::SmallVector getRegionsInExecutionOrder() { + return llvm::SmallVector{&getCond(), &getBody(), &getStep()}; + } + }]; +} + //===----------------------------------------------------------------------===// // GlobalOp //===----------------------------------------------------------------------===// @@ -2684,7 +2651,7 @@ def AllocException : CIR_Op<"alloc_exception", [ def ThrowOp : CIR_Op<"throw", [ ParentOneOf<["FuncOp", "ScopeOp", "IfOp", "SwitchOp", - "DoWhileOp", "WhileOp", "LoopOp"]>, + "DoWhileOp", "WhileOp", "ForOp"]>, Terminator]> { let summary = "(Re)Throws an exception"; let description = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index bfbe0b86c0d9..4ebd02aa7f2a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -615,6 +615,15 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, condBuilder, bodyBuilder); } + /// Create a for operation. + mlir::cir::ForOp createFor( + mlir::Location loc, + llvm::function_ref condBuilder, + llvm::function_ref bodyBuilder, + llvm::function_ref stepBuilder) { + return create(loc, condBuilder, bodyBuilder, stepBuilder); + } + mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, mlir::Value src, mlir::Value len) { return create(loc, dst, src, len); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 5c9cc027b247..ed702465924a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -675,7 +675,7 @@ CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, mlir::Type condType, mlir::LogicalResult CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef ForAttrs) { - mlir::cir::LoopOp loopOp; + mlir::cir::ForOp forOp; // TODO(cir): pass in array of attributes. auto forStmtBuilder = [&]() -> mlir::LogicalResult { @@ -698,8 +698,8 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, // sure we handle all cases. assert(!UnimplementedFeature::requiresCleanups()); - loopOp = builder.create( - getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::For, + forOp = builder.createFor( + getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { assert(!UnimplementedFeature::createProfileWeightsForLoop()); @@ -744,12 +744,12 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, if (res.failed()) return res; - terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); + terminateBody(builder, forOp.getBody(), getLoc(S.getEndLoc())); return mlir::success(); } mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { - mlir::cir::LoopOp loopOp; + mlir::cir::ForOp forOp; // TODO: pass in array of attributes. auto forStmtBuilder = [&]() -> mlir::LogicalResult { @@ -765,8 +765,8 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { // sure we handle all cases. assert(!UnimplementedFeature::requiresCleanups()); - loopOp = builder.create( - getLoc(S.getSourceRange()), mlir::cir::LoopOpKind::For, + forOp = builder.createFor( + getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { assert(!UnimplementedFeature::createProfileWeightsForLoop()); @@ -823,7 +823,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { if (res.failed()) return res; - terminateBody(builder, loopOp.getBody(), getLoc(S.getEndLoc())); + terminateBody(builder, forOp.getBody(), getLoc(S.getEndLoc())); return mlir::success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index b5a8bfd2f698..24e8300e674f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1238,42 +1238,6 @@ void CatchOp::build( catchBuilder(builder, result.location, result); } -//===----------------------------------------------------------------------===// -// LoopOp -//===----------------------------------------------------------------------===// - -void LoopOp::build(OpBuilder &builder, OperationState &result, - cir::LoopOpKind kind, - function_ref condBuilder, - function_ref bodyBuilder, - function_ref stepBuilder) { - OpBuilder::InsertionGuard guard(builder); - ::mlir::cir::LoopOpKindAttr kindAttr = - cir::LoopOpKindAttr::get(builder.getContext(), kind); - result.addAttribute(getKindAttrName(result.name), kindAttr); - - Region *condRegion = result.addRegion(); - builder.createBlock(condRegion); - condBuilder(builder, result.location); - - Region *bodyRegion = result.addRegion(); - builder.createBlock(bodyRegion); - bodyBuilder(builder, result.location); - - Region *stepRegion = result.addRegion(); - builder.createBlock(stepRegion); - stepBuilder(builder, result.location); -} - -void LoopOp::getSuccessorRegions(mlir::RegionBranchPoint point, - SmallVectorImpl ®ions) { - LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); -} - -llvm::SmallVector LoopOp::getLoopRegions() { return {&getBody()}; } - -LogicalResult LoopOp::verify() { return success(); } - //===----------------------------------------------------------------------===// // LoopOpInterface Methods //===----------------------------------------------------------------------===// @@ -1296,6 +1260,14 @@ void WhileOp::getSuccessorRegions( ::llvm::SmallVector WhileOp::getLoopRegions() { return {&getBody()}; } +void ForOp::getSuccessorRegions( + ::mlir::RegionBranchPoint point, + ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { + LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); +} + +::llvm::SmallVector ForOp::getLoopRegions() { return {&getBody()}; } + //===----------------------------------------------------------------------===// // GlobalOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp index ae5947a56944..f3e2d1e61274 100644 --- a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp +++ b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp @@ -38,6 +38,8 @@ void LoopOpInterface::getLoopOpSuccessorRegions( // Branching from step: go to condition. else if (op.maybeGetStep() == point.getRegionOrNull()) { regions.emplace_back(&op.getCond(), op.getCond().getArguments()); + } else { + llvm_unreachable("unexpected branch origin"); } } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3317e2654bc6..f057bcdee302 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2261,7 +2261,6 @@ void ConvertCIRToLLVMPass::runOnOperation() { // ,ConstantOp // ,FuncOp // ,LoadOp - // ,LoopOp // ,ReturnOp // ,StoreOp // ,YieldOp diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index a67d58df76f7..c333654a38ad 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -15,15 +15,15 @@ void l0(void) { // CPPSCOPE-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} // CPPSCOPE-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i // CPPSCOPE-NEXT: cir.store %2, %0 : !s32i, cir.ptr -// CPPSCOPE-NEXT: cir.loop for(cond : { +// CPPSCOPE-NEXT: cir.for : cond { // CSCOPE: cir.func @l0() // CSCOPE-NEXT: cir.scope { // CSCOPE-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} // CSCOPE-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i // CSCOPE-NEXT: cir.store %1, %0 : !s32i, cir.ptr -// CSCOPE-NEXT: cir.loop for(cond : { +// CSCOPE-NEXT: cir.for : cond { -// CSCOPE: }) { +// CSCOPE: } body { // CSCOPE-NEXT: cir.scope { // CSCOPE-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index c21267013f6e..b6f63b2ce4fc 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -7,7 +7,7 @@ void l0() { } // CHECK: cir.func @_Z2l0v -// CHECK: cir.loop for(cond : { +// CHECK: cir.for : cond { // CHECK: %[[#TRUE:]] = cir.const(#true) : !cir.bool // CHECK: cir.condition(%[[#TRUE]]) @@ -19,22 +19,22 @@ void l1() { } // CHECK: cir.func @_Z2l1v -// CHECK: cir.loop for(cond : { +// CHECK: cir.for : cond { // CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool // CHECK-NEXT: cir.condition(%6) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: } body { +// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i -// CHECK-NEXT: cir.store %6, %2 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr // CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: } step { +// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i -// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %6, %2 : !s32i, cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 977eae88e22f..69c7b3a741f0 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -44,13 +44,10 @@ void init(unsigned numImages) { // CHECK: %10 = cir.load %4 : cir.ptr >, !cir.ptr // CHECK: %11 = cir.call @_ZNSt6vectorI6tripleE3endEv(%10) : (!cir.ptr) -> ![[VEC_IT]] // CHECK: cir.store %11, %6 : ![[VEC_IT]], cir.ptr -// CHECK: cir.loop for(cond : { +// CHECK: cir.for : cond { // CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool // CHECK: cir.condition(%12) -// CHECK: }, step : { -// CHECK: %12 = cir.call @_ZN17__vector_iteratorI6triplePS0_RS0_EppEv(%5) : (!cir.ptr) -> !cir.ptr -// CHECK: cir.yield -// CHECK: }) { +// CHECK: } body { // CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EdeEv(%5) : (!cir.ptr) -> !cir.ptr // CHECK: cir.store %12, %7 : !cir.ptr, cir.ptr > // CHECK: cir.scope { @@ -66,6 +63,9 @@ void init(unsigned numImages) { // CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } // CHECK: cir.yield +// CHECK: } step { +// CHECK: %12 = cir.call @_ZN17__vector_iteratorI6triplePS0_RS0_EppEv(%5) : (!cir.ptr) -> !cir.ptr +// CHECK: cir.yield // CHECK: } // CHECK: } // CHECK: cir.return diff --git a/clang/test/CIR/IR/for.cir b/clang/test/CIR/IR/for.cir new file mode 100644 index 000000000000..62b82976cc68 --- /dev/null +++ b/clang/test/CIR/IR/for.cir @@ -0,0 +1,22 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +cir.func @testPrintingParsing(%arg0 : !cir.bool) { + cir.for : cond { + cir.condition(%arg0) + } body { + cir.yield + } step { + cir.yield + } + cir.return +} + +// CHECK: @testPrintingParsing +// CHECK: cir.for : cond { +// CHECK: cir.condition(%arg0) +// CHECK: } body { +// CHECK: cir.yield +// CHECK: } step { +// CHECK: cir.yield +// CHECK: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 5d02ee27ff53..09d4f36dd03a 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -817,3 +817,16 @@ cir.func @invalid_cond_region_terminator(%arg0 : !cir.bool) -> !cir.void { } cir.return } + +// ----- + +cir.func @invalidConditionTerminator (%arg0 : !cir.bool) -> !cir.void { + cir.for : cond { // expected-error {{op expected condition region to terminate with 'cir.condition'}} + cir.yield + } body { + cir.yield + } step { + cir.yield + } + cir.return +} diff --git a/clang/test/CIR/IR/loop.cir b/clang/test/CIR/IR/loop.cir deleted file mode 100644 index b163da45d34c..000000000000 --- a/clang/test/CIR/IR/loop.cir +++ /dev/null @@ -1,63 +0,0 @@ -// RUN: cir-opt %s | FileCheck %s -#false = #cir.bool : !cir.bool -#true = #cir.bool : !cir.bool -!u32i = !cir.int - -cir.func @l0() { - %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<0> : !u32i) : !u32i - cir.store %1, %0 : !u32i, cir.ptr - cir.scope { - %2 = cir.alloca !u32i, cir.ptr , ["i", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<0> : !u32i) : !u32i - cir.store %3, %2 : !u32i, cir.ptr - cir.loop for(cond : { - %4 = cir.load %2 : cir.ptr , !u32i - %5 = cir.const(#cir.int<10> : !u32i) : !u32i - %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool - cir.condition(%6) - }, step : { - %4 = cir.load %2 : cir.ptr , !u32i - %5 = cir.const(#cir.int<1> : !u32i) : !u32i - %6 = cir.binop(add, %4, %5) : !u32i - cir.store %6, %2 : !u32i, cir.ptr - cir.yield - }) { - %4 = cir.load %0 : cir.ptr , !u32i - %5 = cir.const(#cir.int<1> : !u32i) : !u32i - %6 = cir.binop(add, %4, %5) : !u32i - cir.store %6, %0 : !u32i, cir.ptr - %7 = cir.const(#true) : !cir.bool - cir.if %7 { - cir.break - } - cir.yield - } - } - - cir.return -} - -// CHECK: cir.func @l0 -// CHECK: cir.loop for(cond : { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i -// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !u32i) : !u32i -// CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !u32i, !cir.bool -// CHECK-NEXT: cir.condition(%6) -// CHECK-NEXT: }, step : { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !u32i -// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u32i) : !u32i -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !u32i -// CHECK-NEXT: cir.store %6, %2 : !u32i, cir.ptr -// CHECK-NEXT: cir.yield -// CHECK-NEXT: }) { -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !u32i -// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u32i) : !u32i -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !u32i -// CHECK-NEXT: cir.store %6, %0 : !u32i, cir.ptr -// CHECK-NEXT: %7 = cir.const(#true) : !cir.bool -// CHECK-NEXT: cir.if %7 { -// CHECK-NEXT: cir.break -// CHECK-NEXT: } -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 4f588e1f05f9..401399f054a8 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -18,18 +18,13 @@ module { %8 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} %9 = cir.const(#cir.int<0> : !s32i) : !s32i cir.store %9, %8 : !s32i, cir.ptr - cir.loop for(cond : { + cir.for : cond { %10 = cir.load %8 : cir.ptr , !s32i %11 = cir.load %2 : cir.ptr , !s32i %12 = cir.cmp(lt, %10, %11) : !s32i, !s32i %13 = cir.cast(int_to_bool, %12 : !s32i), !cir.bool cir.condition(%13) - }, step : { - %10 = cir.load %8 : cir.ptr , !s32i - %11 = cir.unary(inc, %10) : !s32i, !s32i - cir.store %11, %8 : !s32i, cir.ptr - cir.yield - }) { + } body { %10 = cir.load %0 : cir.ptr >, !cir.ptr %11 = cir.load %8 : cir.ptr , !s32i %12 = cir.ptr_stride(%10 : !cir.ptr, %11 : !s32i), !cir.ptr @@ -43,6 +38,11 @@ module { %20 = cir.binop(add, %19, %18) : f64 cir.store %20, %4 : f64, cir.ptr cir.yield + } step { + %10 = cir.load %8 : cir.ptr , !s32i + %11 = cir.unary(inc, %10) : !s32i, !s32i + cir.store %11, %8 : !s32i, cir.ptr + cir.yield } } %6 = cir.load %4 : cir.ptr , f64 diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index 86097c379169..d15479a76a0d 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -7,11 +7,11 @@ module { cir.func @testFor(%arg0 : !cir.bool) { - cir.loop for(cond : { + cir.for : cond { cir.condition(%arg0) - }, step : { + } body { cir.yield - }) { + } step { cir.yield } cir.return @@ -97,15 +97,15 @@ module { // break; // } cir.func @forWithBreakTerminatedScopeInBody(%arg0 : !cir.bool) { - cir.loop for(cond : { + cir.for : cond { cir.condition(%arg0) - }, step : { - cir.yield - }) { + } body { cir.scope { // FIXME(cir): Redundant scope emitted during C codegen. cir.break } cir.yield + } step { + cir.yield } cir.return } diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir index 31a2bb99e0a9..ee5238c5748a 100644 --- a/clang/test/CIR/Lowering/loops-with-break.cir +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -8,18 +8,13 @@ module { %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !s32i) : !s32i cir.store %1, %0 : !s32i, cir.ptr - cir.loop for(cond : { + cir.for : cond { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) - }, step : { - %2 = cir.load %0 : cir.ptr , !s32i - %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr - cir.yield - }) { + } body { cir.scope { cir.scope { %2 = cir.load %0 : cir.ptr , !s32i @@ -32,6 +27,11 @@ module { } } cir.yield + } step { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield } } cir.return @@ -70,35 +70,25 @@ module { %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !s32i) : !s32i cir.store %1, %0 : !s32i, cir.ptr - cir.loop for(cond : { + cir.for : cond { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) - }, step : { - %2 = cir.load %0 : cir.ptr , !s32i - %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr - cir.yield - }) { + } body { cir.scope { cir.scope { %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} %3 = cir.const(#cir.int<1> : !s32i) : !s32i cir.store %3, %2 : !s32i, cir.ptr - cir.loop for(cond : { + cir.for : cond { %4 = cir.load %2 : cir.ptr , !s32i %5 = cir.const(#cir.int<10> : !s32i) : !s32i %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.condition(%7) - }, step : { - %4 = cir.load %2 : cir.ptr , !s32i - %5 = cir.unary(inc, %4) : !s32i, !s32i - cir.store %5, %2 : !s32i, cir.ptr - cir.yield - }) { + } body { cir.scope { cir.scope { %4 = cir.load %2 : cir.ptr , !s32i @@ -111,10 +101,20 @@ module { } } cir.yield + } step { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.unary(inc, %4) : !s32i, !s32i + cir.store %5, %2 : !s32i, cir.ptr + cir.yield } } } cir.yield + } step { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield } } cir.return diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir index 3e5134b8abec..9cfd3635d740 100644 --- a/clang/test/CIR/Lowering/loops-with-continue.cir +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -8,18 +8,13 @@ module { %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !s32i) : !s32i cir.store %1, %0 : !s32i, cir.ptr - cir.loop for(cond : { + cir.for : cond { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) - }, step : { - %2 = cir.load %0 : cir.ptr , !s32i - %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr - cir.yield - }) { + } body { cir.scope { cir.scope { %2 = cir.load %0 : cir.ptr , !s32i @@ -32,6 +27,11 @@ module { } } cir.yield + } step { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield } } cir.return @@ -71,35 +71,25 @@ module { %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !s32i) : !s32i cir.store %1, %0 : !s32i, cir.ptr - cir.loop for(cond : { + cir.for : cond { %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) - }, step : { - %2 = cir.load %0 : cir.ptr , !s32i - %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr - cir.yield - }) { + } body { cir.scope { cir.scope { %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} %3 = cir.const(#cir.int<1> : !s32i) : !s32i cir.store %3, %2 : !s32i, cir.ptr - cir.loop for(cond : { + cir.for : cond { %4 = cir.load %2 : cir.ptr , !s32i %5 = cir.const(#cir.int<10> : !s32i) : !s32i %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.condition(%7) - }, step : { - %4 = cir.load %2 : cir.ptr , !s32i - %5 = cir.unary(inc, %4) : !s32i, !s32i - cir.store %5, %2 : !s32i, cir.ptr - cir.yield - }) { + } body { cir.scope { cir.scope { %4 = cir.load %2 : cir.ptr , !s32i @@ -112,10 +102,20 @@ module { } } cir.yield + } step { + %4 = cir.load %2 : cir.ptr , !s32i + %5 = cir.unary(inc, %4) : !s32i, !s32i + cir.store %5, %2 : !s32i, cir.ptr + cir.yield } } } cir.yield + } step { + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, cir.ptr + cir.yield } } cir.return From 657418c86b5bdef878200613709786e28c959b14 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 22 Jan 2024 19:37:31 -0800 Subject: [PATCH 1362/2301] [CIR][CIRGen][Exceptions] Populate catch clauses and fix order of operations More machinery for exceptions. This time around we finally emit a cir.catch and fix the order of emitting operations. This allows a testcase to be added. I also added `CatchParamOp`, which fetches the arguments for the clauses from the !cir.eh_info object. Work coming next: - Dtors. - Use cir.try instead of cir.scope. - Eesume. - Documentation.` --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 56 +++-- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 7 +- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 2 + clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 218 ++++++++++++++---- clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 196 ++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 15 +- clang/test/CIR/CodeGen/try-catch.cpp | 43 ++++ 9 files changed, 464 insertions(+), 82 deletions(-) create mode 100644 clang/test/CIR/CodeGen/try-catch.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index cf4f103e3b6e..d518f13a659a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -636,7 +636,7 @@ def ConditionOp : CIR_Op<"condition", [ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "WhileOp", "ForOp", "AwaitOp", - "TernaryOp", "GlobalOp", "DoWhileOp"]>]> { + "TernaryOp", "GlobalOp", "DoWhileOp", "CatchOp"]>]> { let summary = "Represents the default branching behaviour of a region"; let description = [{ The `cir.yield` operation terminates regions on different CIR operations, @@ -726,24 +726,23 @@ def ContinueOp : CIR_Op<"continue", [Terminator]> { //===----------------------------------------------------------------------===// def ResumeOp : CIR_Op<"resume", [ReturnLike, Terminator, - ParentOneOf<["CatchOp"]>]> { + ParentOneOf<["CatchOp"]>]> { let summary = "Resumes execution after not catching exceptions"; let description = [{ The `cir.resume` operation terminates a region on `cir.catch`, "resuming" - or continuing the unwind process. The incoming argument is of !cir.eh_info - populated by `cir.try_call` and available in `cir.catch`. + or continuing the unwind process. Examples: ```mlir - cir.catch %4 { + cir.catch ... { ... - fallback { cir.resume(%0) }; + fallback { cir.resume }; } ``` }]; - let arguments = (ins ExceptionInfoPtr:$ptr); - let assemblyFormat = "$ptr attr-dict"; + let arguments = (ins); + let assemblyFormat = "attr-dict"; } //===----------------------------------------------------------------------===// @@ -2318,18 +2317,6 @@ def TryOp : CIR_Op<"try", // CatchOp //===----------------------------------------------------------------------===// -def CatchEntryAttr : AttrDef { - let parameters = (ins "mlir::Type":$exception_type, - "Attribute":$exception_type_info); - let mnemonic = "type"; - let assemblyFormat = "`<` struct(params) `>`"; -} - -def CatchArrayAttr : - TypedArrayAttrBase { - let constBuilderCall = ?; -} - def CatchOp : CIR_Op<"catch", [SameVariadicOperandSize, DeclareOpInterfaceMethods, @@ -2339,7 +2326,7 @@ def CatchOp : CIR_Op<"catch", }]; let arguments = (ins CIR_AnyType:$exception_info, - OptionalAttr:$catchers); + OptionalAttr:$catchers); let regions = (region VariadicRegion:$regions); // Already verified elsewhere @@ -2348,6 +2335,7 @@ def CatchOp : CIR_Op<"catch", let skipDefaultBuilders = 1; let builders = [ OpBuilder<(ins + "Value":$exception_info, "function_ref" :$catchBuilder)> ]; @@ -2360,6 +2348,32 @@ def CatchOp : CIR_Op<"catch", }]; } +//===----------------------------------------------------------------------===// +// CatchParamOp +//===----------------------------------------------------------------------===// + +def CatchParamOp : CIR_Op<"catch_param"> { + let summary = "Materialize the catch clause formal parameter"; + let description = [{ + The `cir.catch_param` binds to a the C/C++ catch clause param and allow + it to be materialized. This operantion grabs the param by looking into + a exception info `!cir.eh_info` argument. + + Example: + ```mlir + // TBD + ``` + }]; + + let arguments = (ins ExceptionInfoPtr:$exception_info); + let results = (outs CIR_AnyType:$param); + let assemblyFormat = [{ + `(` $exception_info `)` `->` qualified(type($param)) attr-dict + }]; + + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // CopyOp //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 341de9406c22..077beb86e0f9 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -267,20 +267,21 @@ def CIR_VoidType : CIR_Type<"Void", "void"> { def VoidPtr : Type< And<[ CPred<"$_self.isa<::mlir::cir::PointerType>()">, - CPred<"$_self.cast<::mlir::cir::PointerType>().getPointee().isa<::mlir::cir::VoidType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::VoidType>()">, ]>, "void*">, BuildableType< "mlir::cir::PointerType::get($_builder.getContext()," "mlir::cir::VoidType::get($_builder.getContext()))"> { } -// Pointer to exception info +// Pointers to exception info def ExceptionInfoPtr : Type< And<[ CPred<"$_self.isa<::mlir::cir::PointerType>()">, CPred<"$_self.cast<::mlir::cir::PointerType>()" ".getPointee().isa<::mlir::cir::ExceptionInfoType>()">, - ]>, "void*">, + ]>, "!cir.eh_info*">, BuildableType< "mlir::cir::PointerType::get($_builder.getContext()," "mlir::cir::ExceptionInfoType::get($_builder.getContext()))"> { diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index e1fa33a3f22f..a4e7808bf36b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -164,6 +164,8 @@ class CIRGenCXXABI { virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, FunctionArgList &Args) const = 0; + virtual void emitBeginCatch(CIRGenFunction &CGF, const CXXCatchStmt *C) = 0; + /// Get the address of the vtable for the given record decl which should be /// used for the vptr at the given offset in RD. virtual mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index dd86a854e01c..9ef1be205ad7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -33,7 +33,8 @@ using namespace cir; using namespace clang; CIRGenFunction::AutoVarEmission -CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { +CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, + mlir::OpBuilder::InsertPoint ip) { QualType Ty = D.getType(); // TODO: (|| Ty.getAddressSpace() == LangAS::opencl_private && // getLangOpts().OpenCL)) @@ -134,7 +135,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D) { // Create the temp alloca and declare variable using it. mlir::Value addrVal; address = CreateTempAlloca(allocaTy, allocaAlignment, loc, D.getName(), - /*ArraySize=*/nullptr, &allocaAddr); + /*ArraySize=*/nullptr, &allocaAddr, ip); if (failed(declare(address, &D, Ty, getLoc(D.getSourceRange()), alignment, addrVal))) { CGM.emitError("Cannot declare variable"); diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 0316536e7393..2dd279191fc5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -27,6 +27,7 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Value.h" +#include "llvm/Support/SaveAndRestore.h" using namespace cir; using namespace clang; @@ -275,13 +276,34 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup) { llvm_unreachable("NYI"); } - getBuilder().create(catchOp.getLoc(), - currExceptionInfo.exceptionAddr); + getBuilder().create(catchOp.getLoc()); getBuilder().restoreInsertionPoint(ip); return resumeBlock; } mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { + auto loc = getLoc(S.getSourceRange()); + mlir::OpBuilder::InsertPoint scopeIP; + + // Create a scope to hold try local storage for catch params. + [[maybe_unused]] auto s = builder.create( + loc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + scopeIP = getBuilder().saveInsertionPoint(); + }); + + auto r = mlir::success(); + { + mlir::OpBuilder::InsertionGuard guard(getBuilder()); + getBuilder().restoreInsertionPoint(scopeIP); + r = buildCXXTryStmtUnderScope(S); + getBuilder().create(loc); + } + return r; +} + +mlir::LogicalResult +CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { const llvm::Triple &T = getTarget().getTriple(); // If we encounter a try statement on in an OpenMP target region offloaded to // a GPU, we treat it as a basic block. @@ -289,56 +311,84 @@ mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { (CGM.getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN())); assert(!IsTargetDevice && "NYI"); - auto tryLoc = getLoc(S.getBeginLoc()); auto numHandlers = S.getNumHandlers(); - - // FIXME(cir): add catchOp to the lastest possible position - // inside the cleanup block. + auto tryLoc = getLoc(S.getBeginLoc()); auto scopeLoc = getLoc(S.getSourceRange()); - auto res = mlir::success(); - // This scope represents the higher level try {} statement. - builder.create( + mlir::OpBuilder::InsertPoint beginInsertTryBody; + auto ehPtrTy = mlir::cir::PointerType::get( + getBuilder().getContext(), + getBuilder().getType<::mlir::cir::ExceptionInfoType>()); + mlir::Value exceptionInfoInsideTry; + + // Create the scope to represent only the C/C++ `try {}` part. However, don't + // populate right away. Reserve some space to store the exception info but + // don't emit the bulk right away, for now only make sure the scope returns + // the exception information. + auto tryScope = builder.create( scopeLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScope lexScope{*this, loc, - builder.getInsertionBlock()}; + [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) { // Allocate space for our exception info that might be passed down // to `cir.try_call` everytime a call happens. - auto exceptionInfo = buildAlloca( - "__exception_ptr", - mlir::cir::PointerType::get( - b.getContext(), b.getType<::mlir::cir::ExceptionInfoType>()), - loc, CharUnits::One()); - - // Create the skeleton for the catch statements to be further populated - // by cir::CIRGenFunction::buildLandingPad. - auto catchOp = builder.create( - tryLoc, // FIXME(cir): we can do better source location here. - [&](mlir::OpBuilder &b, mlir::Location loc, - mlir::OperationState &result) { - mlir::OpBuilder::InsertionGuard guard(b); - // Once for each handler and one for fallback (which could be a - // resume or rethrow). - for (int i = 0, e = numHandlers + 1; i != e; ++i) { - auto *r = result.addRegion(); - builder.createBlock(r); - } - }); - ExceptionInfoRAIIObject ehx{*this, {exceptionInfo, catchOp}}; - - // Do actual emission. - enterCXXTryStmt(S, catchOp); - - if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) { - res = mlir::failure(); - return; - } + yieldTy = ehPtrTy; + exceptionInfoInsideTry = b.create( + loc, /*addr type*/ getBuilder().getPointerTo(yieldTy), + /*var type*/ yieldTy, "__exception_ptr", + CGM.getSize(CharUnits::One()), nullptr); - exitCXXTryStmt(S); + beginInsertTryBody = getBuilder().saveInsertionPoint(); + }); + + // The catch {} parts consume the exception information provided by a + // try scope. Also don't emit the code right away for catch clauses, for + // now create the regions and consume the try scope result. + // Note that clauses are later populated in CIRGenFunction::buildLandingPad. + auto catchOp = builder.create( + tryLoc, + tryScope->getResult( + 0), // FIXME(cir): we can do better source location here. + [&](mlir::OpBuilder &b, mlir::Location loc, + mlir::OperationState &result) { + mlir::OpBuilder::InsertionGuard guard(b); + // Once for each handler and one for fallback (which could be a + // resume or rethrow). + for (int i = 0, e = numHandlers + 1; i != e; ++i) { + auto *r = result.addRegion(); + builder.createBlock(r); + } }); - return res; + // Finally emit the body for try/catch. + auto emitTryCatchBody = [&]() -> mlir::LogicalResult { + auto loc = catchOp.getLoc(); + mlir::OpBuilder::InsertionGuard guard(getBuilder()); + getBuilder().restoreInsertionPoint(beginInsertTryBody); + CIRGenFunction::LexicalScope lexScope{*this, loc, + getBuilder().getInsertionBlock()}; + + { + ExceptionInfoRAIIObject ehx{*this, {exceptionInfoInsideTry, catchOp}}; + // Attach the basic blocks for the catchOp regions into ScopeCatch info. + enterCXXTryStmt(S, catchOp); + // Emit the body for the `try {}` part. + if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + + auto v = getBuilder().create(loc, ehPtrTy, + exceptionInfoInsideTry); + getBuilder().create(loc, v.getResult()); + } + + { + ExceptionInfoRAIIObject ehx{*this, {tryScope->getResult(0), catchOp}}; + // Emit catch clauses. + exitCXXTryStmt(S); + } + + return mlir::success(); + }; + + return emitTryCatchBody(); } /// Emit the structure of the dispatch block for the given catch scope. @@ -449,7 +499,85 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { // Emit the structure of the EH dispatch for this catch. buildCatchDispatchBlock(*this, CatchScope); - llvm_unreachable("NYI"); + + // Copy the handler blocks off before we pop the EH stack. Emitting + // the handlers might scribble on this memory. + SmallVector Handlers( + CatchScope.begin(), CatchScope.begin() + NumHandlers); + + EHStack.popCatch(); + + // Determine if we need an implicit rethrow for all these catch handlers; + // see the comment below. + bool doImplicitRethrow = false; + if (IsFnTryBlock) + doImplicitRethrow = isa(CurCodeDecl) || + isa(CurCodeDecl); + + // Wasm uses Windows-style EH instructions, but merges all catch clauses into + // one big catchpad. So we save the old funclet pad here before we traverse + // each catch handler. + SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad); + mlir::Block *WasmCatchStartBlock = nullptr; + if (EHPersonality::get(*this).isWasmPersonality()) { + llvm_unreachable("NYI"); + } + + bool HasCatchAll = false; + for (unsigned I = NumHandlers; I != 0; --I) { + HasCatchAll |= Handlers[I - 1].isCatchAll(); + mlir::Block *CatchBlock = Handlers[I - 1].Block; + mlir::OpBuilder::InsertionGuard guard(getBuilder()); + getBuilder().setInsertionPointToStart(CatchBlock); + + // Catch the exception if this isn't a catch-all. + const CXXCatchStmt *C = S.getHandler(I - 1); + + // Enter a cleanup scope, including the catch variable and the + // end-catch. + RunCleanupsScope CatchScope(*this); + + // Initialize the catch variable and set up the cleanups. + SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad); + CGM.getCXXABI().emitBeginCatch(*this, C); + + // Emit the PGO counter increment. + assert(!UnimplementedFeature::incrementProfileCounter()); + + // Perform the body of the catch. + (void)buildStmt(C->getHandlerBlock(), /*useCurrentScope=*/true); + + // [except.handle]p11: + // The currently handled exception is rethrown if control + // reaches the end of a handler of the function-try-block of a + // constructor or destructor. + + // It is important that we only do this on fallthrough and not on + // return. Note that it's illegal to put a return in a + // constructor function-try-block's catch handler (p14), so this + // really only applies to destructors. + if (doImplicitRethrow && HaveInsertPoint()) { + llvm_unreachable("NYI"); + } + + // Fall out through the catch cleanups. + CatchScope.ForceCleanup(); + } + + // Because in wasm we merge all catch clauses into one big catchpad, in case + // none of the types in catch handlers matches after we test against each of + // them, we should unwind to the next EH enclosing scope. We generate a call + // to rethrow function here to do that. + if (EHPersonality::get(*this).isWasmPersonality() && !HasCatchAll) { + assert(WasmCatchStartBlock); + // Navigate for the "rethrow" block we created in emitWasmCatchPadBlock(). + // Wasm uses landingpad-style conditional branches to compare selectors, so + // we follow the false destination for each of the cond branches to reach + // the rethrow block. + llvm_unreachable("NYI"); + } + + assert(!UnimplementedFeature::incrementProfileCounter()); } /// Check whether this is a non-EH scope, i.e. a scope which doesn't @@ -689,4 +817,4 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl() { } return LP; -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index de0f20718980..aee30a35eb8c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -855,6 +855,7 @@ class CIRGenFunction : public CIRGenTypeCache { ArrayRef Attrs = std::nullopt); mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); + mlir::LogicalResult buildCXXTryStmtUnderScope(const clang::CXXTryStmt &S); mlir::LogicalResult buildCXXTryStmt(const clang::CXXTryStmt &S); void enterCXXTryStmt(const CXXTryStmt &S, mlir::cir::CatchOp catchOp, bool IsFnTryBlock = false); @@ -1085,7 +1086,8 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emit the alloca and debug information for a /// local variable. Does not emit initialization or destruction. - AutoVarEmission buildAutoVarAlloca(const clang::VarDecl &D); + AutoVarEmission buildAutoVarAlloca(const clang::VarDecl &D, + mlir::OpBuilder::InsertPoint = {}); void buildAutoVarInit(const AutoVarEmission &emission); void buildAutoVarCleanups(const AutoVarEmission &emission); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 0c2989fe00e6..6d43f33fee3c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -182,6 +182,8 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { return CatchTypeInfo{rtti, 0}; } + void emitBeginCatch(CIRGenFunction &CGF, const CXXCatchStmt *C) override; + bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) override; @@ -582,6 +584,200 @@ void CIRGenItaniumCXXABI::buildCXXDestructors(const CXXDestructorDecl *D) { CGM.buildGlobal(GlobalDecl(D, Dtor_Deleting)); } +namespace { +/// From traditional LLVM, useful info for LLVM lowering support: +/// A cleanup to call __cxa_end_catch. In many cases, the caught +/// exception type lets us state definitively that the thrown exception +/// type does not have a destructor. In particular: +/// - Catch-alls tell us nothing, so we have to conservatively +/// assume that the thrown exception might have a destructor. +/// - Catches by reference behave according to their base types. +/// - Catches of non-record types will only trigger for exceptions +/// of non-record types, which never have destructors. +/// - Catches of record types can trigger for arbitrary subclasses +/// of the caught type, so we have to assume the actual thrown +/// exception type might have a throwing destructor, even if the +/// caught type's destructor is trivial or nothrow. +struct CallEndCatch final : EHScopeStack::Cleanup { + CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} + bool MightThrow; + + void Emit(CIRGenFunction &CGF, Flags flags) override { + if (!MightThrow) { + // Traditional LLVM codegen would emit a call to __cxa_end_catch + // here. For CIR, just let it pass since the cleanup is going + // to be emitted on a later pass when lowering the catch region. + // CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); + CGF.getBuilder().create(*CGF.currSrcLoc); + return; + } + + // Traditional LLVM codegen would emit a call to __cxa_end_catch + // here. For CIR, just let it pass since the cleanup is going + // to be emitted on a later pass when lowering the catch region. + // CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM)); + CGF.getBuilder().create(*CGF.currSrcLoc); + } +}; +} // namespace + +/// From traditional LLVM codegen, useful info for LLVM lowering support: +/// Emits a call to __cxa_begin_catch and enters a cleanup to call +/// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume +/// that the exception object's dtor is nothrow, therefore the __cxa_end_catch +/// call can be marked as nounwind even if EndMightThrow is true. +/// +/// \param EndMightThrow - true if __cxa_end_catch might throw +static mlir::Value CallBeginCatch(CIRGenFunction &CGF, mlir::Value Exn, + mlir::Type ParamTy, bool EndMightThrow) { + // llvm::CallInst *call = + // CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); + auto catchParam = CGF.getBuilder().create( + Exn.getLoc(), ParamTy, Exn); + + CGF.EHStack.pushCleanup( + NormalAndEHCleanup, + EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor); + + return catchParam; +} + +/// A "special initializer" callback for initializing a catch +/// parameter during catch initialization. +static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, + Address ParamAddr, SourceLocation Loc) { + // Load the exception from where the landing pad saved it. + auto Exn = CGF.currExceptionInfo.exceptionAddr; + + CanQualType CatchType = + CGF.CGM.getASTContext().getCanonicalType(CatchParam.getType()); + auto CIRCatchTy = CGF.convertTypeForMem(CatchType); + + // If we're catching by reference, we can just cast the object + // pointer to the appropriate pointer. + if (isa(CatchType)) { + llvm_unreachable("NYI"); + return; + } + + // Scalars and complexes. + TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); + if (TEK != TEK_Aggregate) { + // Notes for LLVM lowering: + // If the catch type is a pointer type, __cxa_begin_catch returns + // the pointer by value. + if (CatchType->hasPointerRepresentation()) { + auto catchParam = CallBeginCatch(CGF, Exn, CIRCatchTy, false); + + switch (CatchType.getQualifiers().getObjCLifetime()) { + case Qualifiers::OCL_Strong: + llvm_unreachable("NYI"); + // arc retain non block: + assert(!UnimplementedFeature::ARC()); + [[fallthrough]]; + + case Qualifiers::OCL_None: + case Qualifiers::OCL_ExplicitNone: + case Qualifiers::OCL_Autoreleasing: + CGF.getBuilder().createStore(Exn.getLoc(), catchParam, ParamAddr); + return; + + case Qualifiers::OCL_Weak: + llvm_unreachable("NYI"); + // arc init weak: + assert(!UnimplementedFeature::ARC()); + return; + } + llvm_unreachable("bad ownership qualifier!"); + } + + // Otherwise, it returns a pointer into the exception object. + auto catchParam = CallBeginCatch( + CGF, Exn, CGF.getBuilder().getPointerTo(CIRCatchTy), false); + LValue srcLV = CGF.MakeNaturalAlignAddrLValue(catchParam, CatchType); + LValue destLV = CGF.makeAddrLValue(ParamAddr, CatchType); + switch (TEK) { + case TEK_Complex: + llvm_unreachable("NYI"); + return; + case TEK_Scalar: { + auto exnLoad = CGF.buildLoadOfScalar(srcLV, catchParam.getLoc()); + CGF.buildStoreOfScalar(exnLoad, destLV, /*init*/ true); + return; + } + case TEK_Aggregate: + llvm_unreachable("evaluation kind filtered out!"); + } + llvm_unreachable("bad evaluation kind"); + } + + // Check for a copy expression. If we don't have a copy expression, + // that means a trivial copy is okay. + const Expr *copyExpr = CatchParam.getInit(); + if (!copyExpr) { + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); +} + +/// Begins a catch statement by initializing the catch variable and +/// calling __cxa_begin_catch. +void CIRGenItaniumCXXABI::emitBeginCatch(CIRGenFunction &CGF, + const CXXCatchStmt *S) { + // Notes for LLVM lowering: + // We have to be very careful with the ordering of cleanups here: + // C++ [except.throw]p4: + // The destruction [of the exception temporary] occurs + // immediately after the destruction of the object declared in + // the exception-declaration in the handler. + // + // So the precise ordering is: + // 1. Construct catch variable. + // 2. __cxa_begin_catch + // 3. Enter __cxa_end_catch cleanup + // 4. Enter dtor cleanup + // + // We do this by using a slightly abnormal initialization process. + // Delegation sequence: + // - ExitCXXTryStmt opens a RunCleanupsScope + // - EmitAutoVarAlloca creates the variable and debug info + // - InitCatchParam initializes the variable from the exception + // - CallBeginCatch calls __cxa_begin_catch + // - CallBeginCatch enters the __cxa_end_catch cleanup + // - EmitAutoVarCleanups enters the variable destructor cleanup + // - EmitCXXTryStmt emits the code for the catch body + // - EmitCXXTryStmt close the RunCleanupsScope + + VarDecl *CatchParam = S->getExceptionDecl(); + if (!CatchParam) { + llvm_unreachable("NYI"); + return; + } + + auto getCatchParamAllocaIP = [&]() { + auto currIns = CGF.getBuilder().saveInsertionPoint(); + auto currParent = currIns.getBlock()->getParentOp(); + mlir::Operation *scopeLikeOp = + currParent->getParentOfType(); + if (!scopeLikeOp) + scopeLikeOp = currParent->getParentOfType(); + assert(scopeLikeOp && "unknown outermost scope-like parent"); + assert(scopeLikeOp->getNumRegions() == 1 && "expected single region"); + + auto *insertBlock = &scopeLikeOp->getRegion(0).getBlocks().back(); + return CGF.getBuilder().getBestAllocaInsertPoint(insertBlock); + }; + + // Emit the local. Make sure the alloca's superseed the current scope, since + // these are going to be consumed by `cir.catch`, which is not within the + // current scope. + auto var = CGF.buildAutoVarAlloca(*CatchParam, getCatchParamAllocaIP()); + InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc()); + // FIXME(cir): double check cleanups here are happening in the right blocks. + CGF.buildAutoVarCleanups(var); +} + mlir::cir::GlobalOp CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 24e8300e674f..708a941eb436 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1157,9 +1157,7 @@ parseCatchOp(OpAsmParser &parser, if (parser.parseRParen().failed()) return parser.emitError(parser.getCurrentLocation(), "expected ')'"); } - catchList.push_back(mlir::cir::CatchEntryAttr::get( - parser.getContext(), exceptionType, exceptionTypeInfo)); - + catchList.push_back(exceptionTypeInfo); return parseAndCheckRegion(); }; @@ -1182,16 +1180,12 @@ void printCatchOp(OpAsmPrinter &p, CatchOp op, llvm::interleaveComma(catchList, p, [&](const Attribute &a) { p.printNewline(); p.increaseIndent(); - auto attr = a.cast(); - auto exType = attr.getExceptionType(); - auto exRtti = attr.getExceptionTypeInfo(); + auto exRtti = a; - if (!exType) { + if (!exRtti) { p << "all"; } else { p << "type ("; - p.printType(exType); - p << ", "; p.printAttribute(exRtti); p << ") "; } @@ -1231,9 +1225,10 @@ void CatchOp::getSuccessorRegions(mlir::RegionBranchPoint point, } void CatchOp::build( - OpBuilder &builder, OperationState &result, + OpBuilder &builder, OperationState &result, mlir::Value exceptionInfo, function_ref catchBuilder) { assert(catchBuilder && "the builder callback for regions must be present"); + result.addOperands(ValueRange{exceptionInfo}); OpBuilder::InsertionGuard guardCatch(builder); catchBuilder(builder, result.location, result); } diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp new file mode 100644 index 000000000000..908237ea4ff7 --- /dev/null +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -0,0 +1,43 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +double division(int a, int b); + +// CHECK: cir.func @_Z2tcv() +unsigned long long tc() { + int x = 50, y = 3; + unsigned long long z; + + try { + // CHECK: cir.scope { + // CHECK: %[[msg:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["msg"] + // CHECK: %[[idx:.*]] = cir.alloca !s32i, cir.ptr , ["idx"] + + // CHECK: %[[try_eh:.*]] = cir.scope { + // CHECK: %[[eh_info:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["__exception_ptr"] + // CHECK: %[[local_a:.*]] = cir.alloca !s32i, cir.ptr , ["a", init] + int a = 4; + z = division(x, y); + // CHECK: %[[div_res:.*]] = cir.call @_Z8divisionii(%14, %15) : (!s32i, !s32i) -> f64 + a++; + + // CHECK: cir.catch(%[[try_eh]] : !cir.ptr, [ + } catch (int idx) { + // CHECK: type (#cir.global_view<@_ZTIi> : !cir.ptr) + // CHECK: { + // CHECK: %[[catch_idx_addr:.*]] = cir.catch_param(%[[try_eh]]) -> !cir.ptr + // CHECK: %[[idx_load:.*]] = cir.load %[[catch_idx_addr]] : cir.ptr , !s32i + // CHECK: cir.store %[[idx_load]], %[[idx]] : !s32i, cir.ptr loc(#loc25) + z = 98; + idx++; + } catch (const char* msg) { + // CHECK: type (#cir.global_view<@_ZTIPKc> : !cir.ptr) + // CHECK: { + // CHECK: %[[msg_addr:.*]] = cir.catch_param(%[[try_eh]]) -> !cir.ptr loc(#loc37) + // CHECK: cir.store %[[msg_addr]], %[[msg]] : !cir.ptr, cir.ptr > loc(#loc37) + z = 99; + (void)msg[0]; + } + + return z; +} \ No newline at end of file From d114493a4f59c22f4042d1c77e6620f32906afdb Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 25 Jan 2024 12:24:19 -0800 Subject: [PATCH 1363/2301] [CIR] Fix a few depends to only apply to ClangIR --- clang/lib/Sema/CMakeLists.txt | 11 +++++++++-- clang/test/CMakeLists.txt | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt index fce42d60a615..d43c1d9c2086 100644 --- a/clang/lib/Sema/CMakeLists.txt +++ b/clang/lib/Sema/CMakeLists.txt @@ -13,6 +13,14 @@ clang_tablegen(OpenCLBuiltins.inc -gen-clang-opencl-builtins TARGET ClangOpenCLBuiltinsImpl ) +if(CLANG_ENABLE_CIR) + set(CIR_DEPS + MLIRCIROpsIncGen + MLIRCIR + ) +endif() + + add_clang_library(clangSema AnalysisBasedWarnings.cpp CheckExprLifetime.cpp @@ -100,8 +108,7 @@ add_clang_library(clangSema ClangOpenCLBuiltinsImpl omp_gen ClangDriverOptions - MLIRCIROpsIncGen - MLIRCIR + ${CIR_DEPS} LINK_LIBS clangAPINotes diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt index ee9646d4568d..b082568c3239 100644 --- a/clang/test/CMakeLists.txt +++ b/clang/test/CMakeLists.txt @@ -82,13 +82,13 @@ list(APPEND CLANG_TEST_DEPS clang-sycl-linker diagtool hmaptool - mlir-translate ) if(CLANG_ENABLE_CIR) list(APPEND CLANG_TEST_DEPS cir-opt cir-translate + mlir-translate ) endif() From a1da245f23d75658ffeb88e4b4c8f230b86030d8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 26 Jan 2024 19:16:57 -0500 Subject: [PATCH 1364/2301] [CIR] Make MLIRCIR depend on MLIRCIRInterfaces This is currently missing and Debug builds are failing without it. --- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index f4609c3aad32..27d826e84489 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -15,6 +15,7 @@ add_clang_library(MLIRCIR LINK_LIBS PUBLIC MLIRIR + MLIRCIRInterfaces MLIRDataLayoutInterfaces MLIRFuncDialect MLIRLoopLikeInterface From 6b9f3e9c573dd306af0f732ef4efe84991776a87 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 26 Jan 2024 09:57:07 -0800 Subject: [PATCH 1365/2301] [CIR] Remove LLVM_ENABLE_PROJECTS support ghstack-source-id: 855519648a4bf2dced501f96e6de1b9b164d85ad Pull Request resolved: https://github.com/llvm/clangir/pull/424 --- llvm/CMakeLists.txt | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt index bbd9a43ce8bd..ad12100fdb5b 100644 --- a/llvm/CMakeLists.txt +++ b/llvm/CMakeLists.txt @@ -116,7 +116,7 @@ endif() # LLVM_EXTERNAL_${project}_SOURCE_DIR using LLVM_ALL_PROJECTS # This allows an easy way of setting up a build directory for llvm and another # one for llvm+clang+... using the same sources. -set(LLVM_ALL_PROJECTS "bolt;cir;clang;clang-tools-extra;compiler-rt;cross-project-tests;libc;libclc;lld;lldb;mlir;openmp;polly;pstl") +set(LLVM_ALL_PROJECTS "bolt;clang;clang-tools-extra;compiler-rt;cross-project-tests;libc;libclc;lld;lldb;mlir;openmp;polly;pstl") if (${CMAKE_SYSTEM_NAME} MATCHES "AIX") # Disallow 'openmp' as a LLVM PROJECT on AIX as the supported way is to use # LLVM_ENABLE_RUNTIMES. @@ -157,17 +157,6 @@ if ("libc" IN_LIST LLVM_ENABLE_PROJECTS) "https://libc.llvm.org/ for building the runtimes.") endif() -if ("cir" IN_LIST LLVM_ENABLE_PROJECTS) - if (NOT "mlir" IN_LIST LLVM_ENABLE_PROJECTS) - message(STATUS "Enabling MLIR as a dependency to CIR") - list(APPEND LLVM_ENABLE_PROJECTS "mlir") - endif() - - if (NOT "clang" IN_LIST LLVM_ENABLE_PROJECTS) - message(FATAL_ERROR "Clang is not enabled, but is required to use CIR") - endif() -endif() - # Select the runtimes to build # # As we migrate runtimes to using the bootstrapping build, the set of default runtimes @@ -237,13 +226,6 @@ if (LLVM_ENABLE_PROJECTS_USED OR NOT LLVM_ENABLE_PROJECTS STREQUAL "") string(REGEX REPLACE "-" "_" upper_proj ${upper_proj}) if ("${proj}" IN_LIST LLVM_ENABLE_PROJECTS) message(STATUS "${proj} project is enabled") - # ClangIR is integrated inside clang and also provides the cir-opt, - # it needs some special handling. - if ("${proj}" STREQUAL "cir") - set(CLANG_ENABLE_CIR ON) - continue() - endif() - set(SHOULD_ENABLE_PROJECT TRUE) set(PROJ_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../${proj}") if(NOT EXISTS "${PROJ_DIR}" OR NOT IS_DIRECTORY "${PROJ_DIR}") From 8045f3c25862cb30e182dfaed1bcdaf24a568f02 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 26 Jan 2024 20:25:22 -0500 Subject: [PATCH 1366/2301] [CIR][Interfaces] Temporarily disable verifier to get out of circular dep --- .../lib/CIR/Interfaces/CIRLoopOpInterface.cpp | 9 ++-- clang/test/CIR/IR/invalid.cir | 41 ------------------ clang/test/CIR/IR/invalid_xfail.cir | 42 +++++++++++++++++++ 3 files changed, 47 insertions(+), 45 deletions(-) create mode 100644 clang/test/CIR/IR/invalid_xfail.cir diff --git a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp index f3e2d1e61274..8b1708fa815c 100644 --- a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp +++ b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp @@ -45,10 +45,11 @@ void LoopOpInterface::getLoopOpSuccessorRegions( /// Verify invariants of the LoopOpInterface. LogicalResult detail::verifyLoopOpInterface(Operation *op) { - auto loopOp = cast(op); - if (!isa(loopOp.getCond().back().getTerminator())) - return op->emitOpError( - "expected condition region to terminate with 'cir.condition'"); + // FIXME: fix this so the conditionop isn't requiring MLIRCIR + // auto loopOp = cast(op); + // if (!isa(loopOp.getCond().back().getTerminator())) + // return op->emitOpError( + // "expected condition region to terminate with 'cir.condition'"); return success(); } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 09d4f36dd03a..b71d92baa1e9 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -302,23 +302,6 @@ cir.func @cast24(%p : !u32i) { // ----- -#false = #cir.bool : !cir.bool -#true = #cir.bool : !cir.bool -cir.func @b0() { - cir.scope { - cir.while { // expected-error {{expected condition region to terminate with 'cir.condition'}} - cir.yield - } do { - cir.br ^bb1 - ^bb1: - cir.return - } - } - cir.return -} - -// ----- - !u32i = !cir.int !u8i = !cir.int module { @@ -806,27 +789,3 @@ cir.func @const_type_mismatch() -> () { %2 = cir.const(#cir.int<0> : !s8i) : !u8i cir.return } - -// ----- - -cir.func @invalid_cond_region_terminator(%arg0 : !cir.bool) -> !cir.void { - cir.do { // expected-error {{op expected condition region to terminate with 'cir.condition'}} - cir.yield - } while { - cir.yield - } - cir.return -} - -// ----- - -cir.func @invalidConditionTerminator (%arg0 : !cir.bool) -> !cir.void { - cir.for : cond { // expected-error {{op expected condition region to terminate with 'cir.condition'}} - cir.yield - } body { - cir.yield - } step { - cir.yield - } - cir.return -} diff --git a/clang/test/CIR/IR/invalid_xfail.cir b/clang/test/CIR/IR/invalid_xfail.cir new file mode 100644 index 000000000000..c29dbf075b6b --- /dev/null +++ b/clang/test/CIR/IR/invalid_xfail.cir @@ -0,0 +1,42 @@ +// Test attempts to build bogus CIR +// RUN: cir-opt %s -verify-diagnostics -split-input-file +// XFAIL: * + +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool +cir.func @b0() { + cir.scope { + cir.while { // expected-error {{expected condition region to terminate with 'cir.condition'}} + cir.yield + } do { + cir.br ^bb1 + ^bb1: + cir.return + } + } + cir.return +} + +// ----- + +cir.func @invalid_cond_region_terminator(%arg0 : !cir.bool) -> !cir.void { + cir.do { // expected-error {{op expected condition region to terminate with 'cir.condition'}} + cir.yield + } while { + cir.yield + } + cir.return +} + +// ----- + +cir.func @invalidConditionTerminator (%arg0 : !cir.bool) -> !cir.void { + cir.for : cond { // expected-error {{op expected condition region to terminate with 'cir.condition'}} + cir.yield + } body { + cir.yield + } step { + cir.yield + } + cir.return +} From 21657abd8ba265840a9f52e44ea93db7d83ce90f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 28 Jan 2024 23:29:39 -0500 Subject: [PATCH 1367/2301] [CIR] Move CI to CLANG_ENABLE_CIR ghstack-source-id: 0706d6bb81b5b8eefb04146719b4443aedb29ab1 Pull Request resolved: https://github.com/llvm/clangir/pull/427 --- .github/workflows/clang-cir-tests.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/clang-cir-tests.yml b/.github/workflows/clang-cir-tests.yml index 9add7ccc5b15..c38e952d1f02 100644 --- a/.github/workflows/clang-cir-tests.yml +++ b/.github/workflows/clang-cir-tests.yml @@ -34,4 +34,5 @@ jobs: uses: ./.github/workflows/llvm-project-tests.yml with: build_target: check-clang-cir - projects: clang;mlir;cir + projects: clang;mlir + extra_cmake_args: -DCLANG_ENABLE_CIR=ON From a26a50d0882b569f3ed7b4a8d69a6e398ceeb6f8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 29 Jan 2024 18:13:21 -0800 Subject: [PATCH 1368/2301] [CIR][CIRGen][Exceptions] Use cir.try instead of cir.scope One more step towards completing try/catch. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 48 +++++--------------- clang/lib/CIR/CodeGen/CIRGenException.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 16 +++++++ clang/test/CIR/CodeGen/try-catch.cpp | 2 +- 4 files changed, 30 insertions(+), 38 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d518f13a659a..9638f3fe4967 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -636,7 +636,7 @@ def ConditionOp : CIR_Op<"condition", [ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "WhileOp", "ForOp", "AwaitOp", - "TernaryOp", "GlobalOp", "DoWhileOp", "CatchOp"]>]> { + "TernaryOp", "GlobalOp", "DoWhileOp", "CatchOp", "TryOp"]>]> { let summary = "Represents the default branching behaviour of a region"; let description = [{ The `cir.yield` operation terminates regions on different CIR operations, @@ -2262,55 +2262,31 @@ def AwaitOp : CIR_Op<"await", def TryOp : CIR_Op<"try", [DeclareOpInterfaceMethods, - RecursivelySpeculatable, NoRegionArguments]> { + RecursivelySpeculatable, AutomaticAllocationScope, + NoRegionArguments]> { let summary = ""; let description = [{ ```mlir - cir.scope { - // Selector and exception control related allocas - // C++ `try {}` local variable declarations - %except_info = cir.try { - %res0, %exh = cir.call @return_something() - %if %exh - cir.yield %exh - - %exh2 = cir.call @return_void() - %if %exh2 - cir.yield %exh - cir.yield #zero : !except_type - } - ... - cir.br ^cleanup - ^cleanup: - // Run dtors - ... - // Catch based %except_info - cir.catch(%except_info, [ - /*catch A*/ {}, - /*catch B*/ {}, - ... - all {} - ]) - cir.yield - } + TBD ``` Note that variables declared inside a `try {}` in C++ will - have their allocas places in the surrounding scope. + have their allocas places in the surrounding (parent) scope. }]; let regions = (region SizedRegion<1>:$body); - // FIXME: should be exception type. - let results = (outs CIR_AnyType:$result); + let results = (outs ExceptionInfoPtr:$result); let assemblyFormat = [{ - `{` - $body - `}` `:` functional-type(operands, results) attr-dict + $body `:` functional-type(operands, results) attr-dict }]; - // Everything already covered elswhere. + // Everything already covered elsewhere. let hasVerifier = 0; + let builders = [ + OpBuilder<(ins + "function_ref":$tryBuilder)>, + ]; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 2dd279191fc5..ffd16a0ca474 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -325,7 +325,7 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { // populate right away. Reserve some space to store the exception info but // don't emit the bulk right away, for now only make sure the scope returns // the exception information. - auto tryScope = builder.create( + auto tryScope = builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) { // Allocate space for our exception info that might be passed down diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 708a941eb436..e055af5b187c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -736,6 +736,22 @@ LogicalResult ScopeOp::verify() { return success(); } // TryOp //===----------------------------------------------------------------------===// +void TryOp::build( + OpBuilder &builder, OperationState &result, + function_ref scopeBuilder) { + assert(scopeBuilder && "the builder callback for 'then' must be present"); + + OpBuilder::InsertionGuard guard(builder); + Region *scopeRegion = result.addRegion(); + builder.createBlock(scopeRegion); + + mlir::Type yieldTy; + scopeBuilder(builder, yieldTy, result.location); + + if (yieldTy) + result.addTypes(TypeRange{yieldTy}); +} + void TryOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // The only region always branch back to the parent operation. diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 908237ea4ff7..974997df9901 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -13,7 +13,7 @@ unsigned long long tc() { // CHECK: %[[msg:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["msg"] // CHECK: %[[idx:.*]] = cir.alloca !s32i, cir.ptr , ["idx"] - // CHECK: %[[try_eh:.*]] = cir.scope { + // CHECK: %[[try_eh:.*]] = cir.try { // CHECK: %[[eh_info:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["__exception_ptr"] // CHECK: %[[local_a:.*]] = cir.alloca !s32i, cir.ptr , ["a", init] int a = 4; From c002a062d99831c21aac69846c8f128f15710247 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 30 Jan 2024 12:21:25 -0800 Subject: [PATCH 1369/2301] [CIR][CIRGen][NFC] Make buildCall more generic by using CIRCallOpInterface This is prep work for introducing cir.try_call inside cir.try scopes. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 69 +++++++++++-------- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 7 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 2 +- 6 files changed, 50 insertions(+), 36 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index fb0322a0f341..e390b3812bb1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -362,7 +362,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &CallArgs, - mlir::cir::CallOp *callOrInvoke, + mlir::cir::CIRCallOpInterface *callOrTryCall, bool IsMustTail, mlir::Location loc, std::optional E) { auto builder = CGM.getBuilder(); @@ -598,33 +598,46 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, auto callLoc = loc; assert(builder.getInsertionBlock() && "expected valid basic block"); - mlir::cir::CallOp theCall; - if (auto fnOp = dyn_cast(CalleePtr)) { - assert(fnOp && "only direct call supported"); - theCall = builder.create(callLoc, fnOp, CIRCallArgs); - } else if (auto loadOp = dyn_cast(CalleePtr)) { - theCall = builder.create(callLoc, loadOp->getResult(0), - CIRFuncTy, CIRCallArgs); - } else if (auto getGlobalOp = dyn_cast(CalleePtr)) { - // FIXME(cir): This peephole optimization to avoids indirect calls for - // builtins. This should be fixed in the builting declaration instead by not - // emitting an unecessary get_global in the first place. - auto *globalOp = mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), - getGlobalOp.getName()); - assert(getGlobalOp && "undefined global function"); - auto callee = llvm::dyn_cast(globalOp); - assert(callee && "operation is not a function"); - theCall = builder.create(callLoc, callee, CIRCallArgs); - } else { - llvm_unreachable("expected call variant to be handled"); - } + mlir::cir::CIRCallOpInterface theCall = [&]() { + mlir::cir::FuncType indirectFuncTy; + mlir::Value indirectFuncVal; + mlir::cir::FuncOp directFuncOp; + + if (auto fnOp = dyn_cast(CalleePtr)) { + directFuncOp = fnOp; + } else if (auto loadOp = dyn_cast(CalleePtr)) { + indirectFuncTy = CIRFuncTy; + indirectFuncVal = loadOp->getResult(0); + } else if (auto getGlobalOp = dyn_cast(CalleePtr)) { + // FIXME(cir): This peephole optimization to avoids indirect calls for + // builtins. This should be fixed in the builting declaration instead by + // not emitting an unecessary get_global in the first place. + auto *globalOp = mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), + getGlobalOp.getName()); + assert(getGlobalOp && "undefined global function"); + directFuncOp = llvm::dyn_cast(globalOp); + assert(directFuncOp && "operation is not a function"); + } else { + llvm_unreachable("expected call variant to be handled"); + } - if (E) - theCall.setAstAttr( - mlir::cir::ASTCallExprAttr::get(builder.getContext(), *E)); + mlir::cir::CIRCallOpInterface callLikeOp; + if (indirectFuncTy) { + callLikeOp = builder.create( + callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs); + } else { + callLikeOp = + builder.create(callLoc, directFuncOp, CIRCallArgs); + } + + if (E) + callLikeOp->setAttr( + "ast", mlir::cir::ASTCallExprAttr::get(builder.getContext(), *E)); - if (callOrInvoke) - callOrInvoke = &theCall; + if (callOrTryCall) + *callOrTryCall = callLikeOp; + return callLikeOp; + }(); if (const auto *FD = dyn_cast_or_null(CurFuncDecl)) assert(!FD->getAttr() && "NYI"); @@ -666,7 +679,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, DestIsVolatile = false; } - auto Results = theCall.getResults(); + auto Results = theCall->getOpResults(); assert(Results.size() <= 1 && "multiple returns NYI"); SourceLocRAIIObject Loc{*this, callLoc}; @@ -676,7 +689,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, case TEK_Scalar: { // If the argument doesn't match, perform a bitcast to coerce it. This // can happen due to trivial type mismatches. - auto Results = theCall.getResults(); + auto Results = theCall->getOpResults(); assert(Results.size() <= 1 && "multiple returns NYI"); assert(Results[0].getType() == RetCIRTy && "Bitcast support NYI"); return RValue::get(Results[0]); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 777c9b57de31..84d117a1cd94 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1147,7 +1147,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, assert(!CGM.getLangOpts().HIP && "HIP NYI"); assert(!MustTailCall && "Must tail NYI"); - mlir::cir::CallOp callOP = nullptr; + mlir::cir::CIRCallOpInterface callOP; RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, &callOP, E == MustTailCall, getLoc(E->getExprLoc()), E); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 3e318abdbcd1..4de5f522dc27 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -847,13 +847,13 @@ static RValue buildNewDeleteCall(CIRGenFunction &CGF, const FunctionDecl *CalleeDecl, const FunctionProtoType *CalleeType, const CallArgList &Args) { - mlir::cir::CallOp CallOrInvoke{}; + mlir::cir::CIRCallOpInterface CallOrTryCall; auto CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl); CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl)); RValue RV = CGF.buildCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( Args, CalleeType, /*ChainCall=*/false), - Callee, ReturnValueSlot(), Args, &CallOrInvoke); + Callee, ReturnValueSlot(), Args, &CallOrTryCall); /// C++1y [expr.new]p10: /// [In a new-expression,] an implementation is allowed to omit a call diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index b0db17212dd0..2b0239a35e33 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -699,7 +699,7 @@ void CIRGenFunction::buildCXXConstructorCall( const CIRGenFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall( Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); - mlir::cir::CallOp C; + mlir::cir::CIRCallOpInterface C; buildCall(Info, Callee, ReturnValueSlot(), Args, &C, false, getLoc(Loc)); assert(CGM.getCodeGenOpts().OptimizationLevel == 0 || diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index aee30a35eb8c..457cae34f42d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -775,16 +775,17 @@ class CIRGenFunction : public CIRGenTypeCache { /// LLVM arguments and the types they were derived from. RValue buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, - const CallArgList &Args, mlir::cir::CallOp *callOrInvoke, + const CallArgList &Args, + mlir::cir::CIRCallOpInterface *callOrTryCall, bool IsMustTail, mlir::Location loc, std::optional E = std::nullopt); RValue buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, - mlir::cir::CallOp *callOrInvoke = nullptr, + mlir::cir::CIRCallOpInterface *callOrTryCall = nullptr, bool IsMustTail = false) { assert(currSrcLoc && "source location must have been set"); - return buildCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke, + return buildCall(CallInfo, Callee, ReturnValue, Args, callOrTryCall, IsMustTail, *currSrcLoc, std::nullopt); } RValue buildCall(clang::QualType FnType, const CIRGenCallee &Callee, diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 6d43f33fee3c..ad7785ac98e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -615,7 +615,7 @@ struct CallEndCatch final : EHScopeStack::Cleanup { // Traditional LLVM codegen would emit a call to __cxa_end_catch // here. For CIR, just let it pass since the cleanup is going // to be emitted on a later pass when lowering the catch region. - // CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM)); + // CGF.EmitRuntimeCallOrTryCall(getEndCatchFn(CGF.CGM)); CGF.getBuilder().create(*CGF.currSrcLoc); } }; From 81ea3073ae94c79da7b7addaa51cb266e2704b0f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 30 Jan 2024 14:28:36 -0800 Subject: [PATCH 1370/2301] [CIR][CIRGen][Exceptions] Use cir.try_call within cir.try regions One more incremental step towards try/catch: properly use cir.try_call instead of regular cir.call when within a cir.try region. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 62 +++++++++++-------- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 18 ++++++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 20 ++++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 31 +++++++--- clang/test/CIR/CodeGen/try-catch.cpp | 2 +- clang/test/CIR/IR/exceptions.cir | 22 +++---- clang/test/CIR/IR/invalid.cir | 25 ++++++++ 7 files changed, 129 insertions(+), 51 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 9638f3fe4967..c32773ccb5b0 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2053,16 +2053,18 @@ class CIR_CallOp extra_traits = []> : def CallOp : CIR_CallOp<"call"> { let summary = "call operation"; let description = [{ - The `call` operation represents a direct call to a function that is within - the same symbol scope as the call. The operands and result types of the - call must match the specified function type. The callee is encoded as a - symbol reference attribute named "callee". - - To walk the operands for this operation, use `getNumArgOperands()`, - `getArgOperand()`, `getArgOperands()`, `arg_operand_begin()` and - `arg_operand_begin()`. Avoid using `getNumOperands()`, `getOperand()`, - `operand_begin()`, etc, direclty - might be misleading given on indirect - calls the callee is encoded in the first operation operand. + Direct and indirect calls. + + For direct calls, the `call` operation represents a direct call to a + function that is within the same symbol scope as the call. The operands + and result types of the call must match the specified function type. + The callee is encoded as a aymbol reference attribute named "callee". + + For indirect calls, the first `mlir::Operation` operand is the call target. + + Given the way indirect calls are encoded, avoid using `mlir::Operation` + methods to walk the operands for this operation, instead use the methods + provided by `CIRCallOpInterface`. `` Example: @@ -2111,51 +2113,59 @@ def CallOp : CIR_CallOp<"call"> { def TryCallOp : CIR_CallOp<"try_call"> { let summary = "try call operation"; let description = [{ - Very similar to `cir.call` but passes down an exception object - in case anything is thrown by the callee. - - To walk the operands for this operation, use `getNumArgOperands()`, - `getArgOperand()`, `getArgOperands()`, `arg_operand_begin()` and - `arg_operand_begin()`. Avoid using `getNumOperands()`, `getOperand()`, - `operand_begin()`, etc, direclty - might be misleading given the - exception object address is also part of the raw operation's operands. - `` + Similar to `cir.call`, direct and indirect properties are the same. The + difference relies in an exception object address operand. It's encoded + as the first operands or second (for indirect calls). + + Similarly to `cir.call`, avoid using `mlir::Operation` methods to walk the + operands for this operation, instead use the methods provided by + `CIRCallOpInterface`. Example: ```mlir - %0 = cir.alloca !cir.eh.info, cir.ptr ... - %r = cir.try_call %exception(%0) @division(%1, %2), %0 + cir.try { + %0 = cir.alloca !cir.ptr, cir.ptr > + ... + %r = cir.try_call %exception(%0) @division(%1, %2) + } ... ``` }]; let arguments = (ins OptionalAttr:$callee, - ExceptionInfoPtr:$exceptionInfo, + ExceptionInfoPtrPtr:$exceptionInfo, Variadic:$callOps, OptionalAttr:$ast); let results = (outs Variadic); let builders = [ - OpBuilder<(ins "FuncOp":$callee, CArg<"ValueRange", "{}">:$operands), [{ + OpBuilder<(ins "FuncOp":$callee, "mlir::Value":$exception, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(ValueRange{exception}); $_state.addOperands(operands); $_state.addAttribute("callee", SymbolRefAttr::get(callee)); if (!callee.getFunctionType().isVoid()) $_state.addTypes(callee.getFunctionType().getReturnType()); }]>, - OpBuilder<(ins "Value":$ind_target, + OpBuilder<(ins "Value":$ind_target, "mlir::Value":$exception, "FuncType":$fn_type, CArg<"ValueRange", "{}">:$operands), [{ $_state.addOperands(ValueRange{ind_target}); + $_state.addOperands(ValueRange{exception}); $_state.addOperands(operands); if (!fn_type.isVoid()) $_state.addTypes(fn_type.getReturnType()); }]>, - OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Type":$resType, - CArg<"ValueRange", "{}">:$operands), [{ + OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Value":$exception, + "mlir::Type":$resType, CArg<"ValueRange", "{}">:$operands), + [{ + $_state.addOperands(ValueRange{exception}); $_state.addOperands(operands); $_state.addAttribute("callee", callee); $_state.addTypes(resType); }]>]; + + let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 077beb86e0f9..06d4b378f80b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -287,6 +287,24 @@ def ExceptionInfoPtr : Type< "mlir::cir::ExceptionInfoType::get($_builder.getContext()))"> { } +// Pooint to pointers to exception info +def ExceptionInfoPtrPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + And<[ + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::ExceptionInfoType>()">, + ]> + ]>, "!cir.eh_info**">, + BuildableType< + "mlir::cir::PointerType::get($_builder.getContext()," + "mlir::cir::PointerType::get($_builder.getContext()," + "mlir::cir::ExceptionInfoType::get($_builder.getContext())))"> { +} + //===----------------------------------------------------------------------===// // StructType (defined in cpp files) //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index e390b3812bb1..6b5c4ccbe7ba 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -623,11 +623,23 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, mlir::cir::CIRCallOpInterface callLikeOp; if (indirectFuncTy) { - callLikeOp = builder.create( - callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs); + if (InvokeDest) { + callLikeOp = builder.create( + callLoc, currExceptionInfo.exceptionAddr, indirectFuncVal, + indirectFuncTy, CIRCallArgs); + } else { + callLikeOp = builder.create( + callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs); + } } else { - callLikeOp = - builder.create(callLoc, directFuncOp, CIRCallArgs); + if (InvokeDest) { + callLikeOp = builder.create( + callLoc, directFuncOp, currExceptionInfo.exceptionAddr, + CIRCallArgs); + } else { + callLikeOp = builder.create(callLoc, directFuncOp, + CIRCallArgs); + } } if (E) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index e055af5b187c..a5ac89d45b9f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2030,13 +2030,14 @@ void printCallCommon( Operation *op, mlir::FlatSymbolRefAttr flatSym, ::mlir::OpAsmPrinter &state, llvm::function_ref customOpHandler = []() {}) { state << ' '; - auto ops = op->getOperands(); + + auto callLikeOp = mlir::cast(op); + auto ops = callLikeOp.getArgOperands(); if (flatSym) { // Direct calls state.printAttributeWithoutType(flatSym); } else { // Indirect calls - state << ops.front(); - ops = ops.drop_front(); + state << op->getOperand(0); } state << "("; state << ops; @@ -2077,7 +2078,8 @@ mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_begin() { // FIXME(cir): for this and all the other calculations in the other methods: // we currently have no basic block arguments on cir.try_call, but if it gets // to that, this needs further adjustment. - return arg_begin++; + arg_begin++; + return arg_begin; } mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_end() { return operand_end(); @@ -2088,7 +2090,8 @@ Value cir::TryCallOp::getArgOperand(unsigned i) { if (!getCallee()) i++; // First operand is the exception pointer, skip it. - return getOperand(i + 1); + i++; + return getOperand(i); } /// Return the number of operands, , accounts for indirect call. unsigned cir::TryCallOp::getNumArgOperands() { @@ -2105,6 +2108,13 @@ cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return verifyCallCommInSymbolUses(*this, symbolTable); } +LogicalResult cir::TryCallOp::verify() { + auto tryScope = (*this)->getParentOfType(); + if (!tryScope) + return emitOpError() << "expected to be within a 'cir.try' region"; + return mlir::success(); +} + ::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result) { return parseCallCommon( @@ -2131,10 +2141,13 @@ ::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, if (parser.parseRParen().failed()) return parser.emitError(parser.getCurrentLocation(), "expected ')'"); - auto exceptionPtrTy = cir::PointerType::get( - parser.getBuilder().getContext(), - parser.getBuilder().getType<::mlir::cir::ExceptionInfoType>()); - if (parser.resolveOperands(exceptionOperands, exceptionPtrTy, + auto &builder = parser.getBuilder(); + auto exceptionPtrPtrTy = cir::PointerType::get( + builder.getContext(), + cir::PointerType::get( + builder.getContext(), + builder.getType<::mlir::cir::ExceptionInfoType>())); + if (parser.resolveOperands(exceptionOperands, exceptionPtrPtrTy, exceptionOperandsLoc, result.operands)) return ::mlir::failure(); diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 974997df9901..4c4521820f7b 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -18,7 +18,7 @@ unsigned long long tc() { // CHECK: %[[local_a:.*]] = cir.alloca !s32i, cir.ptr , ["a", init] int a = 4; z = division(x, y); - // CHECK: %[[div_res:.*]] = cir.call @_Z8divisionii(%14, %15) : (!s32i, !s32i) -> f64 + // CHECK: %[[div_res:.*]] = cir.try_call exception(%[[eh_info]]) @_Z8divisionii({{.*}}) : (!cir.ptr>, !s32i, !s32i) -> f64 a++; // CHECK: cir.catch(%[[try_eh]] : !cir.ptr, [ diff --git a/clang/test/CIR/IR/exceptions.cir b/clang/test/CIR/IR/exceptions.cir index d11c720e4275..aa93eea43559 100644 --- a/clang/test/CIR/IR/exceptions.cir +++ b/clang/test/CIR/IR/exceptions.cir @@ -8,17 +8,17 @@ module { cir.return %3 : !s32i } - cir.func @foo(%x : !s32i, %y : !s32i) { - cir.scope { - %10 = cir.scope { - %0 = cir.alloca !cir.eh.info, cir.ptr , ["exception_info"] {alignment = 16 : i64} + cir.func @foo(%x : !s32i, %y : !s32i) -> !cir.ptr { + %11 = cir.scope { + %10 = cir.try { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["exception_info"] {alignment = 16 : i64} %d = cir.try_call exception(%0) @div(%x, %y) : (!s32i, !s32i) -> !s32i - // CHECK: cir.try_call exception(%1) @div(%1, %arg0, %arg1) : (!cir.ptr, !s32i, !s32i) -> !s32i - %1 = cir.load %0 : cir.ptr , !cir.eh.info - cir.yield %1 : !cir.eh.info - } : !cir.eh.info - cir.yield - } - cir.return + // CHECK: cir.try_call exception(%2) @div(%arg0, %arg1) : (!cir.ptr>, !s32i, !s32i) -> !s32i + %1 = cir.load %0 : cir.ptr >, !cir.ptr + cir.yield %1 : !cir.ptr + } : () -> !cir.ptr + cir.yield %10 : !cir.ptr + } : !cir.ptr + cir.return %11 : !cir.ptr } } \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index b71d92baa1e9..1f1e66386581 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -789,3 +789,28 @@ cir.func @const_type_mismatch() -> () { %2 = cir.const(#cir.int<0> : !s8i) : !u8i cir.return } + +// ----- + +!s32i = !cir.int + +module { + cir.func @div(%x : !s32i, %y : !s32i) -> !s32i { + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.return %3 : !s32i + } + + cir.func @foo(%x : !s32i, %y : !s32i) -> !cir.ptr { + %11 = cir.scope { + %10 = cir.scope { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["exception_info"] {alignment = 16 : i64} + %d = cir.try_call exception(%0) @div(%x, %y) : (!s32i, !s32i) -> !s32i + // expected-error@-1 {{'cir.try_call' op expected to be within a 'cir.try' region}} + %1 = cir.load %0 : cir.ptr >, !cir.ptr + cir.yield %1 : !cir.ptr + } : !cir.ptr + cir.yield %10 : !cir.ptr + } : !cir.ptr + cir.return %11 : !cir.ptr + } +} \ No newline at end of file From 61fa2f7204d3d288fa4ab09594d21deea2df21bb Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 31 Jan 2024 09:37:14 +0800 Subject: [PATCH 1371/2301] [CIR] Add a new volatile flag to distinguish volatile accesses (#402) This patch adds a new `volatile` tag to the following operations to distinguish volatile loads and stores from normal loads and stores: - `cir.load` - `cir.store` - `cir.get_bitfield` - `cir.set_bitfield` Besides, this patch also updates the CodeGen and LLVMIR lowering code to start emitting CIR and LLVMIR operations with volatile flag. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 50 +++++++++---- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 12 ++-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 24 ++++--- .../Dialect/Transforms/LoweringPrepare.cpp | 6 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 10 +-- clang/test/CIR/CodeGen/volatile.cpp | 70 +++++++++++++++++++ clang/test/CIR/Lowering/loadstorealloca.cir | 17 +++++ 7 files changed, 151 insertions(+), 38 deletions(-) create mode 100644 clang/test/CIR/CodeGen/volatile.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index c32773ccb5b0..dc0140550382 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -373,7 +373,8 @@ def LoadOp : CIR_Op<"load", [ `cir.load` reads a value (lvalue to rvalue conversion) given an address backed up by a `cir.ptr` type. A unit attribute `deref` can be used to mark the resulting value as used by another operation to dereference - a pointer. + a pointer. A unit attribute `volatile` can be used to indicate a volatile + loading. Example: @@ -385,18 +386,22 @@ def LoadOp : CIR_Op<"load", [ // Load address from memory at address %0. %3 is used by at least one // operation that dereferences a pointer. %3 = cir.load deref %0 : cir.ptr > + + // Perform a volatile load from address in %0. + %4 = cir.load volatile %0 : !cir.ptr, i32 ``` }]; let arguments = (ins Arg:$addr, UnitAttr:$isDeref); + [MemRead]>:$addr, UnitAttr:$isDeref, + UnitAttr:$is_volatile); let results = (outs CIR_AnyType:$result); // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. let assemblyFormat = [{ - (`deref` $isDeref^)? $addr `:` `cir.ptr` type($addr) `,` - type($result) attr-dict + (`deref` $isDeref^)? (`volatile` $is_volatile^)? + $addr `:` `cir.ptr` type($addr) `,` type($result) attr-dict }]; // FIXME: add verifier. @@ -414,24 +419,31 @@ def StoreOp : CIR_Op<"store", [ let summary = "Store value to memory address"; let description = [{ `cir.store` stores a value (first operand) to the memory address specified - in the second operand. + in the second operand. A unit attribute `volatile` can be used to indicate + a volatile store. Example: ```mlir // Store a function argument to local storage, address in %0. cir.store %arg0, %0 : i32, !cir.ptr + + // Perform a volatile store into memory location at the address in %0. + cir.store volatile %arg0, %0 : i32, !cir.ptr ``` }]; let arguments = (ins CIR_AnyType:$value, Arg:$addr); + [MemWrite]>:$addr, + UnitAttr:$is_volatile); // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. - let assemblyFormat = - "$value `,` $addr attr-dict `:` type($value) `,` `cir.ptr` type($addr)"; + let assemblyFormat = [{ + (`volatile` $is_volatile^)? + $value `,` $addr attr-dict `:` type($value) `,` `cir.ptr` type($addr) + }]; // FIXME: add verifier. } @@ -1554,6 +1566,9 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { base record, a size of the storage, a size the bit field, an offset of the bit field and a sign. Returns a value being stored. + A unit attribute `volatile` can be used to indicate a volatile load of the + bitfield. + Example. Suppose we have a struct with multiple bitfields stored in different storages. The `cir.set_bitfield` operation sets the value @@ -1587,7 +1602,8 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { let arguments = (ins CIR_PointerType:$dst, CIR_AnyType:$src, - BitfieldInfoAttr:$bitfield_info + BitfieldInfoAttr:$bitfield_info, + UnitAttr:$is_volatile ); let results = (outs CIR_IntType:$result); @@ -1603,14 +1619,15 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { "StringRef":$name, "unsigned":$size, "unsigned":$offset, - "bool":$is_signed + "bool":$is_signed, + "bool":$is_volatile ), [{ BitfieldInfoAttr info = BitfieldInfoAttr::get($_builder.getContext(), name, storage_type, size, offset, is_signed); - build($_builder, $_state, type, dst, src, info); + build($_builder, $_state, type, dst, src, info, is_volatile); }]> ]; } @@ -1629,6 +1646,9 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> { base record, a type of the storage, a name of the bitfield, a size the bit field, an offset of the bit field and a sign. + A unit attribute `volatile` can be used to indicate a volatile load of the + bitfield. + Example: Suppose we have a struct with multiple bitfields stored in different storages. The `cir.get_bitfield` operation gets the value @@ -1660,7 +1680,8 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> { let arguments = (ins CIR_PointerType:$addr, - BitfieldInfoAttr:$bitfield_info + BitfieldInfoAttr:$bitfield_info, + UnitAttr:$is_volatile ); let results = (outs CIR_IntType:$result); @@ -1675,14 +1696,15 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> { "StringRef":$name, "unsigned":$size, "unsigned":$offset, - "bool":$is_signed + "bool":$is_signed, + "bool":$is_volatile ), [{ BitfieldInfoAttr info = BitfieldInfoAttr::get($_builder.getContext(), name, storage_type, size, offset, is_signed); - build($_builder, $_state, type, addr, info); + build($_builder, $_state, type, addr, info, is_volatile); }]> ]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 4ebd02aa7f2a..22bccead3699 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -712,21 +712,21 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType, mlir::Value addr, mlir::Type storageType, const CIRGenBitFieldInfo &info, - bool useVolatile) { + bool isLvalueVolatile, bool useVolatile) { auto offset = useVolatile ? info.VolatileOffset : info.Offset; return create(loc, resultType, addr, storageType, info.Name, info.Size, offset, - info.IsSigned); + info.IsSigned, isLvalueVolatile); } mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType, mlir::Value dstAddr, mlir::Type storageType, mlir::Value src, const CIRGenBitFieldInfo &info, - bool useVolatile) { + bool isLvalueVolatile, bool useVolatile) { auto offset = useVolatile ? info.VolatileOffset : info.Offset; - return create(loc, resultType, dstAddr, - storageType, src, info.Name, - info.Size, offset, info.IsSigned); + return create( + loc, resultType, dstAddr, storageType, src, info.Name, info.Size, + offset, info.IsSigned, isLvalueVolatile); } /// Create a pointer to a record member. diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 84d117a1cd94..223616e56f3e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -537,9 +537,9 @@ mlir::Value CIRGenFunction::buildToMemory(mlir::Value Value, QualType Ty) { } void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue) { - // TODO: constant matrix type, volatile, no init, non temporal, TBAA - buildStoreOfScalar(value, lvalue.getAddress(), false, lvalue.getType(), - lvalue.getBaseInfo(), false, false); + // TODO: constant matrix type, no init, non temporal, TBAA + buildStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), lvalue.getBaseInfo(), false, false); } void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, @@ -571,7 +571,8 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, } assert(currSrcLoc && "must pass in source location"); - builder.create(*currSrcLoc, Value, Addr.getPointer()); + builder.create(*currSrcLoc, Value, Addr.getPointer(), + Volatile); if (isNontemporal) { llvm_unreachable("NYI"); @@ -618,9 +619,9 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, bool useVolatile = LV.isVolatileQualified() && info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); - auto field = - builder.createGetBitfield(getLoc(Loc), resLTy, ptr.getPointer(), - ptr.getElementType(), info, useVolatile); + auto field = builder.createGetBitfield(getLoc(Loc), resLTy, ptr.getPointer(), + ptr.getElementType(), info, + LV.isVolatile(), useVolatile); assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); return RValue::get(field); } @@ -678,9 +679,9 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, mlir::Value dstAddr = Dst.getAddress().getPointer(); - Result = builder.createSetBitfield(dstAddr.getLoc(), resLTy, dstAddr, - ptr.getElementType(), Src.getScalarVal(), - info, useVolatile); + Result = builder.createSetBitfield( + dstAddr.getLoc(), resLTy, dstAddr, ptr.getElementType(), + Src.getScalarVal(), info, Dst.isVolatileQualified(), useVolatile); } static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, @@ -2407,7 +2408,8 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, } mlir::cir::LoadOp Load = builder.create( - Loc, Addr.getElementType(), Addr.getPointer()); + Loc, Addr.getElementType(), Addr.getPointer(), /* deref */ false, + Volatile); if (isNontemporal) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 611a35eacc2a..8b592ca1f254 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -315,8 +315,8 @@ void LoweringPreparePass::lowerGetBitfieldOp(GetBitfieldOp op) { auto resultTy = op.getType(); auto addr = op.getAddr(); auto loc = addr.getLoc(); - mlir::Value val = - builder.create(loc, storageType, op.getAddr()); + mlir::Value val = builder.create( + loc, storageType, op.getAddr(), /* deref */ false, op.getIsVolatile()); auto valWidth = val.getType().cast().getWidth(); if (info.getIsSigned()) { @@ -384,7 +384,7 @@ void LoweringPreparePass::lowerSetBitfieldOp(SetBitfieldOp op) { srcVal = builder.createOr(val, srcVal); } - builder.create(loc, srcVal, addr); + builder.create(loc, srcVal, addr, op.getIsVolatile()); if (!op->getUses().empty()) { mlir::Value resultVal = maskedVal; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f057bcdee302..4a6e6f8af3e6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -890,8 +890,9 @@ class CIRLoadLowering : public mlir::OpConversionPattern { mlir::ConversionPatternRewriter &rewriter) const override { const auto llvmTy = getTypeConverter()->convertType(op.getResult().getType()); - rewriter.replaceOpWithNewOp(op, llvmTy, - adaptor.getAddr()); + rewriter.replaceOpWithNewOp( + op, llvmTy, adaptor.getAddr(), /* alignment */ 0, + /* volatile */ op.getIsVolatile()); return mlir::LogicalResult::success(); } }; @@ -903,8 +904,9 @@ class CIRStoreLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::StoreOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, adaptor.getValue(), - adaptor.getAddr()); + rewriter.replaceOpWithNewOp( + op, adaptor.getValue(), adaptor.getAddr(), + /* alignment */ 0, /* volatile */ op.getIsVolatile()); return mlir::LogicalResult::success(); } }; diff --git a/clang/test/CIR/CodeGen/volatile.cpp b/clang/test/CIR/CodeGen/volatile.cpp new file mode 100644 index 000000000000..10c1d309bb96 --- /dev/null +++ b/clang/test/CIR/CodeGen/volatile.cpp @@ -0,0 +1,70 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int test_load(volatile int *ptr) { + return *ptr; +} + +// CHECK: cir.func @_Z9test_loadPVi +// CHECK: %{{.+}} = cir.load volatile + +void test_store(volatile int *ptr) { + *ptr = 42; +} + +// CHECK: cir.func @_Z10test_storePVi +// CHECK: cir.store volatile + +struct Foo { + int x; + volatile int y; + volatile int z: 4; +}; + +int test_load_field1(volatile Foo *ptr) { + return ptr->x; +} + +// CHECK: cir.func @_Z16test_load_field1PV3Foo +// CHECK: %[[MemberAddr:.*]] = cir.get_member +// CHECK: %{{.+}} = cir.load volatile %[[MemberAddr]] + +int test_load_field2(Foo *ptr) { + return ptr->y; +} + +// CHECK: cir.func @_Z16test_load_field2P3Foo +// CHECK: %[[MemberAddr:.+]] = cir.get_member +// CHECK: %{{.+}} = cir.load volatile %[[MemberAddr]] + +int test_load_field3(Foo *ptr) { + return ptr->z; +} + +// CHECK: cir.func @_Z16test_load_field3P3Foo +// CHECK: %[[MemberAddr:.+]] = cir.get_member +// CHECK: %{{.+}} = cir.load volatile %[[MemberAddr]] + +void test_store_field1(volatile Foo *ptr) { + ptr->x = 42; +} + +// CHECK: cir.func @_Z17test_store_field1PV3Foo +// CHECK: %[[MemberAddr:.+]] = cir.get_member +// CHECK: cir.store volatile %{{.+}}, %[[MemberAddr]] + +void test_store_field2(Foo *ptr) { + ptr->y = 42; +} + +// CHECK: cir.func @_Z17test_store_field2P3Foo +// CHECK: %[[MemberAddr:.+]] = cir.get_member +// CHECK: cir.store volatile %{{.+}}, %[[MemberAddr]] + +void test_store_field3(Foo *ptr) { + ptr->z = 4; +} + +// CHECK: cir.func @_Z17test_store_field3P3Foo +// CHECK: %[[MemberAddr:.+]] = cir.get_member +// CHECK: cir.store volatile %{{.+}}, %[[MemberAddr]] diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index 833e2dbb469f..fc3f333db56d 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -10,6 +10,14 @@ module { %2 = cir.load %0 : cir.ptr , !u32i cir.return %2 : !u32i } + + cir.func @test_volatile() -> !u32i { + %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<1> : !u32i) : !u32i + cir.store volatile %1, %0 : !u32i, cir.ptr + %2 = cir.load volatile %0 : cir.ptr , !u32i + cir.return %2 : !u32i + } } // MLIR: module { @@ -20,3 +28,12 @@ module { // MLIR-NEXT: llvm.store %2, %1 : i32, !llvm.ptr // MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: return %3 : i32 + + +// MLIR: func @test_volatile() -> i32 +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 +// MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: llvm.store volatile %2, %1 : i32, !llvm.ptr +// MLIR-NEXT: %3 = llvm.load volatile %1 : !llvm.ptr -> i32 +// MLIR-NEXT: return %3 : i32 From e073b74e05bd148fc1e7a541c5b48eecbeb23fbc Mon Sep 17 00:00:00 2001 From: David Olsen Date: Tue, 30 Jan 2024 18:02:25 -0800 Subject: [PATCH 1372/2301] [CIR] Vector types, comparison operators (#432) This is part 3 of implementing vector types and vector operations in ClangIR, issue #284. Create new operation `cir.vec.cmp` which implements the relational comparison operators (`== != < > <= >=`) on vector types. A new operation was created rather than reusing `cir.cmp` because the result is a vector of a signed intergral type, not a `bool`. Add CodeGen and Lowering tests for vector comparisons. Fix the floating-point comparison predicate when lowering to LLVM. To handle NaN values correctly, the comparisons need to be ordered rather than unordered. (Except for `!=`, which needs to be unordered.) For example, "ueq" was changed to "oeq". --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 25 ++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 63 ++++---- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 137 +++++++++++------- clang/test/CIR/CodeGen/vectype.cpp | 29 ++++ clang/test/CIR/Lowering/cmp.cir | 10 +- clang/test/CIR/Lowering/vectype.cpp | 79 +++++++++- 6 files changed, 258 insertions(+), 85 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index dc0140550382..1e5145d0e15b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1846,6 +1846,31 @@ def VecCreateOp : CIR_Op<"vec.create", [Pure]> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// VecCmp +//===----------------------------------------------------------------------===// + +def VecCmpOp : CIR_Op<"vec.cmp", [Pure, SameTypeOperands]> { + + let summary = "Compare two vectors"; + let description = [{ + The `cir.vec.cmp` operation does an element-wise comparison of two vectors + of the same type. The result is a vector of the same size as the operands + whose element type is the signed integral type that is the same size as the + element type of the operands. The values in the result are 0 or -1. + }]; + + let arguments = (ins Arg:$kind, CIR_VectorType:$lhs, + CIR_VectorType:$rhs); + let results = (outs CIR_VectorType:$result); + + let assemblyFormat = [{ + `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) `,` type($result) attr-dict + }]; + + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // BaseClassAddr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 0f85e0da58dd..16056cc2b004 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -765,6 +765,26 @@ class ScalarExprEmitter : public StmtVisitor { QualType LHSTy = E->getLHS()->getType(); QualType RHSTy = E->getRHS()->getType(); + auto ClangCmpToCIRCmp = [](auto ClangCmp) -> mlir::cir::CmpOpKind { + switch (ClangCmp) { + case BO_LT: + return mlir::cir::CmpOpKind::lt; + case BO_GT: + return mlir::cir::CmpOpKind::gt; + case BO_LE: + return mlir::cir::CmpOpKind::le; + case BO_GE: + return mlir::cir::CmpOpKind::ge; + case BO_EQ: + return mlir::cir::CmpOpKind::eq; + case BO_NE: + return mlir::cir::CmpOpKind::ne; + default: + llvm_unreachable("unsupported comparison kind"); + return mlir::cir::CmpOpKind(-1); + } + }; + if (const MemberPointerType *MPT = LHSTy->getAs()) { assert(0 && "not implemented"); } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { @@ -773,12 +793,18 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value RHS = BOInfo.RHS; if (LHSTy->isVectorType()) { - // Cannot handle any vector just yet. - assert(0 && "not implemented"); - // If AltiVec, the comparison results in a numeric type, so we use - // intrinsics comparing vectors and giving 0 or 1 as a result - if (!E->getType()->isVectorType()) - assert(0 && "not implemented"); + if (!E->getType()->isVectorType()) { + // If AltiVec, the comparison results in a numeric type, so we use + // intrinsics comparing vectors and giving 0 or 1 as a result + llvm_unreachable("NYI: AltiVec comparison"); + } else { + // Other kinds of vectors. Element-wise comparison returning + // a vector. + mlir::cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); + return Builder.create( + CGF.getLoc(BOInfo.Loc), CGF.getCIRType(BOInfo.Ty), Kind, + BOInfo.LHS, BOInfo.RHS); + } } if (BOInfo.isFixedPointOp()) { assert(0 && "not implemented"); @@ -793,30 +819,7 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } - mlir::cir::CmpOpKind Kind; - switch (E->getOpcode()) { - case BO_LT: - Kind = mlir::cir::CmpOpKind::lt; - break; - case BO_GT: - Kind = mlir::cir::CmpOpKind::gt; - break; - case BO_LE: - Kind = mlir::cir::CmpOpKind::le; - break; - case BO_GE: - Kind = mlir::cir::CmpOpKind::ge; - break; - case BO_EQ: - Kind = mlir::cir::CmpOpKind::eq; - break; - case BO_NE: - Kind = mlir::cir::CmpOpKind::ne; - break; - default: - llvm_unreachable("unsupported"); - } - + mlir::cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); return Builder.create(CGF.getLoc(BOInfo.Loc), CGF.getCIRType(BOInfo.Ty), Kind, BOInfo.LHS, BOInfo.RHS); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 4a6e6f8af3e6..8f07d47684be 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -96,6 +96,51 @@ void walkRegionSkipping(mlir::Region ®ion, }); } +/// Convert from a CIR comparison kind to an LLVM IR integral comparison kind. +mlir::LLVM::ICmpPredicate +convertCmpKindToICmpPredicate(mlir::cir::CmpOpKind kind, bool isSigned) { + using CIR = mlir::cir::CmpOpKind; + using LLVMICmp = mlir::LLVM::ICmpPredicate; + switch (kind) { + case CIR::eq: + return LLVMICmp::eq; + case CIR::ne: + return LLVMICmp::ne; + case CIR::lt: + return (isSigned ? LLVMICmp::slt : LLVMICmp::ult); + case CIR::le: + return (isSigned ? LLVMICmp::sle : LLVMICmp::ule); + case CIR::gt: + return (isSigned ? LLVMICmp::sgt : LLVMICmp::ugt); + case CIR::ge: + return (isSigned ? LLVMICmp::sge : LLVMICmp::uge); + } + llvm_unreachable("Unknown CmpOpKind"); +} + +/// Convert from a CIR comparison kind to an LLVM IR floating-point comparison +/// kind. +mlir::LLVM::FCmpPredicate +convertCmpKindToFCmpPredicate(mlir::cir::CmpOpKind kind) { + using CIR = mlir::cir::CmpOpKind; + using LLVMFCmp = mlir::LLVM::FCmpPredicate; + switch (kind) { + case CIR::eq: + return LLVMFCmp::oeq; + case CIR::ne: + return LLVMFCmp::une; + case CIR::lt: + return LLVMFCmp::olt; + case CIR::le: + return LLVMFCmp::ole; + case CIR::gt: + return LLVMFCmp::ogt; + case CIR::ge: + return LLVMFCmp::oge; + } + llvm_unreachable("Unknown CmpOpKind"); +} + } // namespace //===----------------------------------------------------------------------===// @@ -1133,6 +1178,41 @@ class CIRVectorExtractLowering } }; +class CIRVectorCmpOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecCmpOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert(op.getType().isa() && + op.getLhs().getType().isa() && + op.getRhs().getType().isa() && + "Vector compare with non-vector type"); + // LLVM IR vector comparison returns a vector of i1. This one-bit vector + // must be sign-extended to the correct result type. + auto elementType = + op.getLhs().getType().dyn_cast().getEltType(); + mlir::Value bitResult; + if (auto intType = elementType.dyn_cast()) { + bitResult = rewriter.create( + op.getLoc(), + convertCmpKindToICmpPredicate(op.getKind(), intType.isSigned()), + adaptor.getLhs(), adaptor.getRhs()); + } else if (elementType.isa()) { + bitResult = rewriter.create( + op.getLoc(), convertCmpKindToFCmpPredicate(op.getKind()), + adaptor.getLhs(), adaptor.getRhs()); + } else { + return op.emitError() << "unsupported type for VecCmpOp: " << elementType; + } + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType()), bitResult); + return mlir::success(); + } +}; + class CIRVAStartLowering : public mlir::OpConversionPattern { public: @@ -1835,50 +1915,6 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; - mlir::LLVM::ICmpPredicate convertToICmpPredicate(mlir::cir::CmpOpKind kind, - bool isSigned) const { - using CIR = mlir::cir::CmpOpKind; - using LLVMICmp = mlir::LLVM::ICmpPredicate; - - switch (kind) { - case CIR::eq: - return LLVMICmp::eq; - case CIR::ne: - return LLVMICmp::ne; - case CIR::lt: - return (isSigned ? LLVMICmp::slt : LLVMICmp::ult); - case CIR::le: - return (isSigned ? LLVMICmp::sle : LLVMICmp::ule); - case CIR::gt: - return (isSigned ? LLVMICmp::sgt : LLVMICmp::ugt); - case CIR::ge: - return (isSigned ? LLVMICmp::sge : LLVMICmp::uge); - } - llvm_unreachable("Unknown CmpOpKind"); - } - - mlir::LLVM::FCmpPredicate - convertToFCmpPredicate(mlir::cir::CmpOpKind kind) const { - using CIR = mlir::cir::CmpOpKind; - using LLVMFCmp = mlir::LLVM::FCmpPredicate; - - switch (kind) { - case CIR::eq: - return LLVMFCmp::ueq; - case CIR::ne: - return LLVMFCmp::une; - case CIR::lt: - return LLVMFCmp::ult; - case CIR::le: - return LLVMFCmp::ule; - case CIR::gt: - return LLVMFCmp::ugt; - case CIR::ge: - return LLVMFCmp::uge; - } - llvm_unreachable("Unknown CmpOpKind"); - } - mlir::LogicalResult matchAndRewrite(mlir::cir::CmpOp cmpOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { @@ -1887,15 +1923,17 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { // Lower to LLVM comparison op. if (auto intTy = type.dyn_cast()) { - auto kind = convertToICmpPredicate(cmpOp.getKind(), intTy.isSigned()); + auto kind = + convertCmpKindToICmpPredicate(cmpOp.getKind(), intTy.isSigned()); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); } else if (auto ptrTy = type.dyn_cast()) { - auto kind = convertToICmpPredicate(cmpOp.getKind(), /* isSigned=*/false); + auto kind = convertCmpKindToICmpPredicate(cmpOp.getKind(), + /* isSigned=*/false); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); } else if (type.isa()) { - auto kind = convertToFCmpPredicate(cmpOp.getKind()); + auto kind = convertCmpKindToFCmpPredicate(cmpOp.getKind()); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); } else { @@ -2090,8 +2128,9 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, - CIRVectorInsertLowering, CIRVectorExtractLowering, CIRStackSaveLowering, - CIRStackRestoreLowering>(converter, patterns.getContext()); + CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, + CIRStackSaveLowering, CIRStackRestoreLowering>(converter, + patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index 54b2ade13c05..15ea7ae26c73 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -3,6 +3,7 @@ typedef int vi4 __attribute__((vector_size(16))); typedef double vd2 __attribute__((vector_size(16))); +typedef long long vll2 __attribute__((vector_size(16))); void vector_int_test(int x) { @@ -50,6 +51,20 @@ void vector_int_test(int x) { // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector vi4 n = ~a; // CHECK: %{{[0-9]+}} = cir.unary(not, %{{[0-9]+}}) : !cir.vector, !cir.vector + + // Comparisons + vi4 o = a == b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : , + vi4 p = a != b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : , + vi4 q = a < b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : , + vi4 r = a > b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : , + vi4 s = a <= b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : , + vi4 t = a >= b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : , } void vector_double_test(int x, double y) { @@ -87,4 +102,18 @@ void vector_double_test(int x, double y) { // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.vector, !cir.vector vd2 m = -a; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector + + // Comparisons + vll2 o = a == b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : , + vll2 p = a != b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : , + vll2 q = a < b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : , + vll2 r = a > b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : , + vll2 s = a <= b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : , + vll2 t = a >= b; + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : , } diff --git a/clang/test/CIR/Lowering/cmp.cir b/clang/test/CIR/Lowering/cmp.cir index 94df95173a7a..94b4b2cdd8a0 100644 --- a/clang/test/CIR/Lowering/cmp.cir +++ b/clang/test/CIR/Lowering/cmp.cir @@ -36,19 +36,19 @@ module { %23 = cir.load %2 : cir.ptr , f32 %24 = cir.load %3 : cir.ptr , f32 %25 = cir.cmp(gt, %23, %24) : f32, !cir.bool - // CHECK: llvm.fcmp "ugt" + // CHECK: llvm.fcmp "ogt" %26 = cir.load %2 : cir.ptr , f32 %27 = cir.load %3 : cir.ptr , f32 %28 = cir.cmp(eq, %26, %27) : f32, !cir.bool - // CHECK: llvm.fcmp "ueq" + // CHECK: llvm.fcmp "oeq" %29 = cir.load %2 : cir.ptr , f32 %30 = cir.load %3 : cir.ptr , f32 %31 = cir.cmp(lt, %29, %30) : f32, !cir.bool - // CHECK: llvm.fcmp "ult" + // CHECK: llvm.fcmp "olt" %32 = cir.load %2 : cir.ptr , f32 %33 = cir.load %3 : cir.ptr , f32 %34 = cir.cmp(ge, %32, %33) : f32, !cir.bool - // CHECK: llvm.fcmp "uge" + // CHECK: llvm.fcmp "oge" %35 = cir.load %2 : cir.ptr , f32 %36 = cir.load %3 : cir.ptr , f32 %37 = cir.cmp(ne, %35, %36) : f32, !cir.bool @@ -56,7 +56,7 @@ module { %38 = cir.load %2 : cir.ptr , f32 %39 = cir.load %3 : cir.ptr , f32 %40 = cir.cmp(le, %38, %39) : f32, !cir.bool - // CHECK: llvm.fcmp "ule" + // CHECK: llvm.fcmp "ole" // Pointer comparisons. %41 = cir.cmp(ne, %0, %1) : !cir.ptr, !cir.bool diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index 84686fcef505..7630ce63157b 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -5,6 +5,7 @@ typedef int vi4 __attribute__((vector_size(16))); typedef double vd2 __attribute__((vector_size(16))); +typedef long long vll2 __attribute__((vector_size(16))); void vector_int_test(int x) { @@ -125,6 +126,44 @@ void vector_int_test(int x) { // CHECK: %[[#T103:]] = llvm.insertelement %[[#T94]], %[[#T101]][%[[#T102]] : i64] : vector<4xi32> // CHECK: %[[#T104:]] = llvm.xor %[[#T103]], %[[#T93]] : vector<4xi32> // CHECK: llvm.store %[[#T104]], %[[#T29:]] : vector<4xi32>, !llvm.ptr + + // Comparisons + vi4 o = a == b; + // CHECK: %[[#T105:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T106:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T107:]] = llvm.icmp "eq" %[[#T105]], %[[#T106]] : vector<4xi32> + // CHECK: %[[#T108:]] = llvm.sext %[[#T107]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T108]], %[[#To:]] : vector<4xi32>, !llvm.ptr + vi4 p = a != b; + // CHECK: %[[#T109:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T110:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T111:]] = llvm.icmp "ne" %[[#T109]], %[[#T110]] : vector<4xi32> + // CHECK: %[[#T112:]] = llvm.sext %[[#T111]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T112]], %[[#Tp:]] : vector<4xi32>, !llvm.ptr + vi4 q = a < b; + // CHECK: %[[#T113:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T114:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T115:]] = llvm.icmp "slt" %[[#T113]], %[[#T114]] : vector<4xi32> + // CHECK: %[[#T116:]] = llvm.sext %[[#T115]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T116]], %[[#Tq:]] : vector<4xi32>, !llvm.ptr + vi4 r = a > b; + // CHECK: %[[#T117:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T118:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T119:]] = llvm.icmp "sgt" %[[#T117]], %[[#T118]] : vector<4xi32> + // CHECK: %[[#T120:]] = llvm.sext %[[#T119]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T120]], %[[#Tr:]] : vector<4xi32>, !llvm.ptr + vi4 s = a <= b; + // CHECK: %[[#T121:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T122:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T123:]] = llvm.icmp "sle" %[[#T121]], %[[#T122]] : vector<4xi32> + // CHECK: %[[#T124:]] = llvm.sext %[[#T123]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T124]], %[[#Ts:]] : vector<4xi32>, !llvm.ptr + vi4 t = a >= b; + // CHECK: %[[#T125:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T126:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T127:]] = llvm.icmp "sge" %[[#T125]], %[[#T126]] : vector<4xi32> + // CHECK: %[[#T128:]] = llvm.sext %[[#T127]] : vector<4xi1> to vector<4xi32> + // CHECK: llvm.store %[[#T128]], %[[#Tt:]] : vector<4xi32>, !llvm.ptr } void vector_double_test(int x, double y) { @@ -156,7 +195,7 @@ void vector_double_test(int x, double y) { // Extract element. double c = a[x]; - // CHECK: 38 = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T38:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T39:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 // CHECK: %[[#T40:]] = llvm.extractelement %[[#T38]][%[[#T39]] : i32] : vector<2xf64> // CHECK: llvm.store %[[#T40]], %[[#T9:]] : f64, !llvm.ptr @@ -199,4 +238,42 @@ void vector_double_test(int x, double y) { // CHECK: %[[#T58:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T59:]] = llvm.fneg %[[#T58]] : vector<2xf64> // CHECK: llvm.store %[[#T59]], %[[#T21:]] : vector<2xf64>, !llvm.ptr + + // Comparisons + vll2 o = a == b; + // CHECK: %[[#T60:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T61:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T62:]] = llvm.fcmp "oeq" %[[#T60]], %[[#T61]] : vector<2xf64> + // CHECK: %[[#T63:]] = llvm.sext %[[#T62]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T63]], %[[#To:]] : vector<2xi64>, !llvm.ptr + vll2 p = a != b; + // CHECK: %[[#T64:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T65:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T66:]] = llvm.fcmp "une" %[[#T64]], %[[#T65]] : vector<2xf64> + // CHECK: %[[#T67:]] = llvm.sext %[[#T66]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T67]], %[[#Tp:]] : vector<2xi64>, !llvm.ptr + vll2 q = a < b; + // CHECK: %[[#T68:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T69:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T70:]] = llvm.fcmp "olt" %[[#T68]], %[[#T69]] : vector<2xf64> + // CHECK: %[[#T71:]] = llvm.sext %[[#T70]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T71]], %[[#Tq:]] : vector<2xi64>, !llvm.ptr + vll2 r = a > b; + // CHECK: %[[#T72:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T73:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T74:]] = llvm.fcmp "ogt" %[[#T72]], %[[#T73]] : vector<2xf64> + // CHECK: %[[#T75:]] = llvm.sext %[[#T74]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T75]], %[[#Tr:]] : vector<2xi64>, !llvm.ptr + vll2 s = a <= b; + // CHECK: %[[#T76:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T77:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T78:]] = llvm.fcmp "ole" %[[#T76]], %[[#T77]] : vector<2xf64> + // CHECK: %[[#T79:]] = llvm.sext %[[#T78]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T79]], %[[#Ts:]] : vector<2xi64>, !llvm.ptr + vll2 t = a >= b; + // CHECK: %[[#T80:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T81:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T82:]] = llvm.fcmp "oge" %[[#T80]], %[[#T81]] : vector<2xf64> + // CHECK: %[[#T83:]] = llvm.sext %[[#T82]] : vector<2xi1> to vector<2xi64> + // CHECK: llvm.store %[[#T83]], %[[#Tt:]] : vector<2xi64>, !llvm.ptr } From 37e6f8769a1db06d0da544904760773cdc760a0a Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Wed, 31 Jan 2024 05:05:58 +0300 Subject: [PATCH 1373/2301] [CIR][CIRGen] Add missing visitor for ParenExpr (#428) Compilation of the following test ``` void foo6(A* a1) { A a2 = (*a1); } ``` fails with. ``` NYI UNREACHABLE executed at /home/huawei/cir/repo/llvm-project/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp:175! ``` Commit adds required visitor and fixes the issue. --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 2 +- clang/test/CIR/CodeGen/agg-copy.c | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index e4a9923a055d..86dda7667c67 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -172,7 +172,7 @@ class AggExprEmitter : public StmtVisitor { << S->getStmtClassName() << "\n"; llvm_unreachable("NYI"); } - void VisitParenExpr(ParenExpr *PE) { llvm_unreachable("NYI"); } + void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/agg-copy.c b/clang/test/CIR/CodeGen/agg-copy.c index 43f106c55c57..aa7a158e7464 100644 --- a/clang/test/CIR/CodeGen/agg-copy.c +++ b/clang/test/CIR/CodeGen/agg-copy.c @@ -72,4 +72,14 @@ A create() { A a; return a; } void foo5() { A a; a = create(); +} + +void foo6(A* a1) { + A a2 = (*a1); +// CHECK: cir.func {{.*@foo6}} +// CHECK: [[TMP0]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] {alignment = 8 : i64} +// CHECK: [[TMP1]] = cir.alloca !ty_22A22, cir.ptr , ["a2", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP2]] = cir.load deref [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr } \ No newline at end of file From 0666d7cad6416aee63a0806f5a3931de23a24b03 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 31 Jan 2024 22:37:04 +0300 Subject: [PATCH 1374/2301] [CIR][CodeGen][BugFix] use proper base type for derived class (#404) In the original codegen a new type is created for the base class, while in CIR we were rewriting the type being processed (due tp misused pointers). This PR fix this, and also makes CIR codegen even with the original one. --- clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 9 +++++---- clang/test/CIR/CodeGen/derived-to-base.cpp | 3 +++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 6cda57545ae5..62eb8d659748 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -601,15 +601,16 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, builder.lower(/*nonVirtualBaseType=*/false); // If we're in C++, compute the base subobject type. - mlir::cir::StructType *BaseTy = nullptr; + mlir::cir::StructType BaseTy; if (llvm::isa(D) && !D->isUnion() && !D->hasAttr()) { - BaseTy = Ty; + BaseTy = *Ty; if (builder.astRecordLayout.getNonVirtualSize() != builder.astRecordLayout.getSize()) { CIRRecordLowering baseBuilder(*this, D, /*Packed=*/builder.isPacked); + baseBuilder.lower(/*NonVirtualBaseType=*/true); auto baseIdentifier = getRecordTypeName(D, ".base"); - *BaseTy = + BaseTy = Builder.getCompleteStructTy(baseBuilder.fieldTypes, baseIdentifier, /*packed=*/false, D); // TODO(cir): add something like addRecordTypeName @@ -630,7 +631,7 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, auto RL = std::make_unique( Ty ? *Ty : mlir::cir::StructType{}, - BaseTy ? *BaseTy : mlir::cir::StructType{}, + BaseTy ? BaseTy : mlir::cir::StructType{}, (bool)builder.IsZeroInitializable, (bool)builder.IsZeroInitializableAsBase); diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index b6794b6d970e..2f8591a2810d 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -77,6 +77,9 @@ void C3::Layer::Initialize() { // CHECK-DAG: !ty_22C23A3ALayer22 = !cir.struct Date: Wed, 31 Jan 2024 22:40:09 +0300 Subject: [PATCH 1375/2301] [CIR][CodeGen] Initial variable length array support (#398) This is a first PR for variable length array support. There are one (or more :) ) ahead. Basically, we already did lot's of preliminary job in order to land VLA in CIR in #367 #346 #340. So now we add initial VLA support itself. Most of the changes are taken from the original codegen, so there is nothing to be scary of) I added just one test, and basically that's all we can test right now. Later, I will add more, from the original codegen tests. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 9 + clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 50 ++++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 175 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 26 +++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 8 +- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 7 files changed, 263 insertions(+), 8 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 3257d180c44e..71fadd74f6b2 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -118,6 +118,15 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); } + mlir::Value createMul(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::Mul, rhs); + } + + mlir::Value createMul(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::Mul, val); + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 9ef1be205ad7..ce0c3c4cfa93 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -40,7 +40,6 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, // getLangOpts().OpenCL)) assert(!UnimplementedFeature::openCL()); assert(Ty.getAddressSpace() == LangAS::Default); - assert(!Ty->isVariablyModifiedType() && "not implemented"); assert(!D.hasAttr() && "not implemented"); auto loc = getLoc(D.getSourceRange()); @@ -51,6 +50,11 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, emission.IsEscapingByRef = isEscapingByRef; CharUnits alignment = getContext().getDeclAlign(&D); + + // If the type is variably-modified, emit all the VLA sizes for it. + if (Ty->isVariablyModifiedType()) + buildVariablyModifiedType(Ty); + assert(!UnimplementedFeature::generateDebugInfo()); assert(!UnimplementedFeature::cxxABI()); @@ -146,7 +150,41 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, assert(!UnimplementedFeature::shouldEmitLifetimeMarkers()); } } else { // not openmp nor constant sized type - llvm_unreachable("NYI"); + bool VarAllocated = false; + if (getLangOpts().OpenMPIsTargetDevice) + llvm_unreachable("NYI"); + + if (!VarAllocated) { + if (!DidCallStackSave) { + // Save the stack. + auto defaultTy = AllocaInt8PtrTy; + CharUnits Align = CharUnits::fromQuantity( + CGM.getDataLayout().getAlignment(defaultTy, false)); + Address Stack = CreateTempAlloca(defaultTy, Align, loc, "saved_stack"); + + mlir::Value V = builder.createStackSave(loc, defaultTy); + assert(V.getType() == AllocaInt8PtrTy); + builder.createStore(loc, V, Stack); + + DidCallStackSave = true; + + // Push a cleanup block and restore the stack there. + // FIXME: in general circumstances, this should be an EH cleanup. + pushStackRestore(NormalCleanup, Stack); + } + + auto VlaSize = getVLASize(Ty); + mlir::Type mTy = convertTypeForMem(VlaSize.Type); + + // Allocate memory for the array. + address = CreateTempAlloca(mTy, alignment, loc, "vla", VlaSize.NumElts, + &allocaAddr, builder.saveInsertionPoint()); + } + + // If we have debug info enabled, properly describe the VLA dimensions for + // this type by registering the vla size expression for each of the + // dimensions. + assert(!UnimplementedFeature::generateDebugInfo()); } emission.Addr = address; @@ -858,7 +896,9 @@ struct CallStackRestore final : EHScopeStack::Cleanup { CallStackRestore(Address Stack) : Stack(Stack) {} bool isRedundantBeforeReturn() override { return true; } void Emit(CIRGenFunction &CGF, Flags flags) override { - llvm_unreachable("NYI"); + auto loc = Stack.getPointer().getLoc(); + mlir::Value V = CGF.getBuilder().createLoad(loc, Stack); + CGF.getBuilder().createStackRestore(loc, V); } }; @@ -941,6 +981,10 @@ CIRGenFunction::getDestroyer(QualType::DestructionKind kind) { llvm_unreachable("Unknown DestructionKind"); } +void CIRGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) { + EHStack.pushCleanup(Kind, SPMem); +} + /// Enter a destroy cleanup for the given local variable. void CIRGenFunction::buildAutoVarTypeCleanup( const CIRGenFunction::AutoVarEmission &emission, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 2b0239a35e33..57def877abae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -19,6 +19,7 @@ #include "clang/AST/ASTLambda.h" #include "clang/AST/ExprObjC.h" #include "clang/Basic/Builtins.h" +#include "clang/Basic/DiagnosticCategories.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/FPEnv.h" @@ -1397,3 +1398,177 @@ void CIRGenFunction::checkTargetFeatures(SourceLocation Loc, } } } + +CIRGenFunction::VlaSizePair CIRGenFunction::getVLASize(QualType type) { + const VariableArrayType *vla = + CGM.getASTContext().getAsVariableArrayType(type); + assert(vla && "type was not a variable array type!"); + return getVLASize(vla); +} + +CIRGenFunction::VlaSizePair +CIRGenFunction::getVLASize(const VariableArrayType *type) { + // The number of elements so far; always size_t. + mlir::Value numElements; + + QualType elementType; + do { + elementType = type->getElementType(); + mlir::Value vlaSize = VLASizeMap[type->getSizeExpr()]; + assert(vlaSize && "no size for VLA!"); + assert(vlaSize.getType() == SizeTy); + + if (!numElements) { + numElements = vlaSize; + } else { + // It's undefined behavior if this wraps around, so mark it that way. + // FIXME: Teach -fsanitize=undefined to trap this. + + numElements = builder.createMul(numElements, vlaSize); + } + } while ((type = getContext().getAsVariableArrayType(elementType))); + + assert(numElements && "Undefined elements number"); + return {numElements, elementType}; +} + +// TODO(cir): most part of this function can be shared between CIRGen +// and traditional LLVM codegen +void CIRGenFunction::buildVariablyModifiedType(QualType type) { + assert(type->isVariablyModifiedType() && + "Must pass variably modified type to EmitVLASizes!"); + + // We're going to walk down into the type and look for VLA + // expressions. + do { + assert(type->isVariablyModifiedType()); + + const Type *ty = type.getTypePtr(); + switch (ty->getTypeClass()) { + case clang::Type::CountAttributed: + case clang::Type::PackIndexing: + case clang::Type::ArrayParameter: + llvm_unreachable("NYI"); + +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_TYPE(Class, Base) +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("unexpected dependent type!"); + + // These types are never variably-modified. + case Type::Builtin: + case Type::Complex: + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::Record: + case Type::Enum: + case Type::Using: + case Type::TemplateSpecialization: + case Type::ObjCTypeParam: + case Type::ObjCObject: + case Type::ObjCInterface: + case Type::ObjCObjectPointer: + case Type::BitInt: + llvm_unreachable("type class is never variably-modified!"); + + case Type::Elaborated: + type = cast(ty)->getNamedType(); + break; + + case Type::Adjusted: + type = cast(ty)->getAdjustedType(); + break; + + case Type::Decayed: + type = cast(ty)->getPointeeType(); + break; + + case Type::Pointer: + type = cast(ty)->getPointeeType(); + break; + + case Type::BlockPointer: + type = cast(ty)->getPointeeType(); + break; + + case Type::LValueReference: + case Type::RValueReference: + type = cast(ty)->getPointeeType(); + break; + + case Type::MemberPointer: + type = cast(ty)->getPointeeType(); + break; + + case Type::ConstantArray: + case Type::IncompleteArray: + // Losing element qualification here is fine. + type = cast(ty)->getElementType(); + break; + + case Type::VariableArray: { + // Losing element qualification here is fine. + const VariableArrayType *vat = cast(ty); + + // Unknown size indication requires no size computation. + // Otherwise, evaluate and record it. + if (const Expr *sizeExpr = vat->getSizeExpr()) { + // It's possible that we might have emitted this already, + // e.g. with a typedef and a pointer to it. + mlir::Value &entry = VLASizeMap[sizeExpr]; + if (!entry) { + mlir::Value size = buildScalarExpr(sizeExpr); + assert(!UnimplementedFeature::sanitizeVLABound()); + + // Always zexting here would be wrong if it weren't + // undefined behavior to have a negative bound. + // FIXME: What about when size's type is larger than size_t? + entry = builder.createIntCast(size, SizeTy); + } + } + type = vat->getElementType(); + break; + } + + case Type::FunctionProto: + case Type::FunctionNoProto: + type = cast(ty)->getReturnType(); + break; + + case Type::Paren: + case Type::TypeOf: + case Type::UnaryTransform: + case Type::Attributed: + case Type::BTFTagAttributed: + case Type::SubstTemplateTypeParm: + case Type::MacroQualified: + // Keep walking after single level desugaring. + type = type.getSingleStepDesugaredType(getContext()); + break; + + case Type::Typedef: + case Type::Decltype: + case Type::Auto: + case Type::DeducedTemplateSpecialization: + // Stop walking: nothing to do. + return; + + case Type::TypeOfExpr: + // Stop walking: emit typeof expression. + buildIgnoredExpr(cast(ty)->getUnderlyingExpr()); + return; + + case Type::Atomic: + type = cast(ty)->getValueType(); + break; + + case Type::Pipe: + type = cast(ty)->getElementType(); + break; + } + } while (type->isVariablyModifiedType()); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 457cae34f42d..0a4149164687 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -93,6 +93,14 @@ class CIRGenFunction : public CIRGenTypeCache { llvm::DenseMap OpaqueLValues; llvm::DenseMap OpaqueRValues; + // This keeps track of the associated size for each VLA type. + // We track this by the size expression rather than the type itself because + // in certain situations, like a const qualifier applied to an VLA typedef, + // multiple VLA types can share the same size expression. + // FIXME: Maybe this could be a stack of maps that is pushed/popped as we + // enter/leave scopes. + llvm::DenseMap VLASizeMap; + public: /// A non-RAII class containing all the information about a bound /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for @@ -707,6 +715,22 @@ class CIRGenFunction : public CIRGenTypeCache { /// \returns SSA value with the argument. mlir::Value buildVAArg(VAArgExpr *VE, Address &VAListAddr); + void buildVariablyModifiedType(QualType Ty); + + struct VlaSizePair { + mlir::Value NumElts; + QualType Type; + + VlaSizePair(mlir::Value NE, QualType T) : NumElts(NE), Type(T) {} + }; + + /// Returns an MLIR value that corresponds to the size, + /// in non-variably-sized elements, of a variable length array type, + /// plus that largest non-variably-sized element type. Assumes that + /// the type has already been emitted with buildVariablyModifiedType. + VlaSizePair getVLASize(const VariableArrayType *vla); + VlaSizePair getVLASize(QualType vla); + mlir::Value emitBuiltinObjectSize(const Expr *E, unsigned Type, mlir::cir::IntType ResType, mlir::Value EmittedE, bool IsDynamic); @@ -1242,6 +1266,8 @@ class CIRGenFunction : public CIRGenTypeCache { void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type); + void pushStackRestore(CleanupKind kind, Address SPMem); + static bool IsConstructorDelegationValid(const clang::CXXConstructorDecl *Ctor); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 286149cd5539..59d88eafee21 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -158,7 +158,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, /*isSigned=*/false); UInt8PtrTy = builder.getPointerTo(UInt8Ty); UInt8PtrPtrTy = builder.getPointerTo(UInt8PtrTy); - // TODO: AllocaInt8PtrTy + AllocaInt8PtrTy = UInt8PtrTy; // TODO: GlobalsInt8PtrTy // TODO: ConstGlobalsPtrTy // TODO: ASTAllocaAddressSpace diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index ac3442626ca8..91290001d683 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -64,10 +64,10 @@ struct CIRGenTypeCache { }; /// void* in alloca address space - // union { - // mlir::cir::PointerType AllocaVoidPtrTy; - // mlir::cir::PointerType AllocaInt8PtrTy; - // }; + union { + mlir::cir::PointerType AllocaVoidPtrTy; + mlir::cir::PointerType AllocaInt8PtrTy; + }; /// void* in default globals address space // union { diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 6c699c709ab3..89a336146f68 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -58,6 +58,7 @@ struct UnimplementedFeature { static bool emitCheckedInBoundsGEP() { return false; } static bool pointerOverflowSanitizer() { return false; } static bool sanitizeDtor() { return false; } + static bool sanitizeVLABound() { return false; } // ObjC static bool setObjCGCLValueClass() { return false; } From ab7425cf36acdbad7f07c99d24014ba056303678 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Wed, 31 Jan 2024 23:35:58 +0300 Subject: [PATCH 1376/2301] [CIR][CIRGen][Bugfix] Fix source location in ctors (#415) --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 2 +- clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index ce0c3c4cfa93..51533c2310bf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -657,8 +657,8 @@ void CIRGenFunction::buildNullabilityCheck(LValue LHS, mlir::Value RHS, void CIRGenFunction::buildScalarInit(const Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit) { // TODO: this is where a lot of ObjC lifetime stuff would be done. - mlir::Value value = buildScalarExpr(init); SourceLocRAIIObject Loc{*this, loc}; + mlir::Value value = buildScalarExpr(init); buildStoreThroughLValue(RValue::get(value), lvalue); return; } diff --git a/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp b/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp new file mode 100644 index 000000000000..f70d1f8428d4 --- /dev/null +++ b/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct e { e(int); }; +e *g = new e(0); + +//CHECK: {{%.*}} = cir.const(#cir.int<1> : !u64i) : !u64i loc(#loc11) +//CHECK: {{%.*}} = cir.call @_Znwm(%1) : (!u64i) -> !cir.ptr loc(#loc6) From f49d8bd785201e4d679b15c3719baf38657e1cf7 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Wed, 31 Jan 2024 23:39:08 +0300 Subject: [PATCH 1377/2301] [CIR][CIRGen] Handle initilization of arrays (#431) --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 121 ++++++++++++++++++++++++ clang/test/CIR/CodeGen/array-init.c | 19 ++++ 2 files changed, 140 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 86dda7667c67..2c8ce45f46e2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -148,6 +148,10 @@ class AggExprEmitter : public StmtVisitor { void buildCopy(QualType type, const AggValueSlot &dest, const AggValueSlot &src); + void buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, + QualType ArrayQTy, Expr *ExprToVisit, + ArrayRef Args, Expr *ArrayFiller); + AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) llvm_unreachable("garbage collection is NYI"); @@ -394,6 +398,113 @@ void AggExprEmitter::buildCopy(QualType type, const AggValueSlot &dest, CGF.buildAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), false); } +// FIXME(cir): This function could be shared with traditional LLVM codegen +/// Determine if E is a trivial array filler, that is, one that is +/// equivalent to zero-initialization. +static bool isTrivialFiller(Expr *E) { + if (!E) + return true; + + if (isa(E)) + return true; + + if (auto *ILE = dyn_cast(E)) { + if (ILE->getNumInits()) + return false; + return isTrivialFiller(ILE->getArrayFiller()); + } + + if (auto *Cons = dyn_cast_or_null(E)) + return Cons->getConstructor()->isDefaultConstructor() && + Cons->getConstructor()->isTrivial(); + + // FIXME: Are there other cases where we can avoid emitting an initializer? + return false; +} + +void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, + QualType ArrayQTy, Expr *ExprToVisit, + ArrayRef Args, Expr *ArrayFiller) { + uint64_t NumInitElements = Args.size(); + + uint64_t NumArrayElements = AType.getSize(); + assert(NumInitElements != 0 && "expected at least one initializaed value"); + assert(NumInitElements <= NumArrayElements); + + QualType elementType = + CGF.getContext().getAsArrayType(ArrayQTy)->getElementType(); + + auto cirElementType = CGF.convertType(elementType); + auto cirElementPtrType = mlir::cir::PointerType::get( + CGF.getBuilder().getContext(), cirElementType); + auto loc = CGF.getLoc(ExprToVisit->getSourceRange()); + + // Cast from cir.ptr to cir.ptr + auto begin = CGF.getBuilder().create( + loc, cirElementPtrType, mlir::cir::CastKind::array_to_ptrdecay, + DestPtr.getPointer()); + + CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); + CharUnits elementAlign = + DestPtr.getAlignment().alignmentOfArrayElement(elementSize); + + // Exception safety requires us to destroy all the + // already-constructed members if an initializer throws. + // For that, we'll need an EH cleanup. + [[maybe_unused]] QualType::DestructionKind dtorKind = + elementType.isDestructedType(); + [[maybe_unused]] Address endOfInit = Address::invalid(); + assert(!CGF.needsEHCleanup(dtorKind) && "destructed types NIY"); + + // The 'current element to initialize'. The invariants on this + // variable are complicated. Essentially, after each iteration of + // the loop, it points to the last initialized element, except + // that it points to the beginning of the array before any + // elements have been initialized. + mlir::Value element = begin; + + // Don't build the 'one' before the cycle to avoid + // emmiting the redundant cir.const(1) instrs. + mlir::Value one; + + // Emit the explicit initializers. + for (uint64_t i = 0; i != NumInitElements; ++i) { + if (i == 1) + one = CGF.getBuilder().getConstInt( + loc, CGF.PtrDiffTy.cast(), 1); + + // Advance to the next element. + if (i > 0) { + element = CGF.getBuilder().create( + loc, cirElementPtrType, element, one); + + // Tell the cleanup that it needs to destroy up to this + // element. TODO: some of these stores can be trivially + // observed to be unnecessary. + assert(!endOfInit.isValid() && "destructed types NIY"); + } + + LValue elementLV = CGF.makeAddrLValue( + Address(element, cirElementType, elementAlign), elementType); + buildInitializationToLValue(Args[i], elementLV); + } + + // Check whether there's a non-trivial array-fill expression. + bool hasTrivialFiller = isTrivialFiller(ArrayFiller); + + // Any remaining elements need to be zero-initialized, possibly + // using the filler expression. We can skip this if the we're + // emitting to zeroed memory. + if (NumInitElements != NumArrayElements && + !(Dest.isZeroed() && hasTrivialFiller && + CGF.getTypes().isZeroInitializable(elementType))) { + llvm_unreachable("zero-initialization of arrays NIY"); + } + + // Leave the partial-array cleanup if we entered one. + assert(!dtorKind && "destructed types NIY"); +} + /// True if the given aggregate type requires special GC API calls. bool AggExprEmitter::TypeRequiresGCollection(QualType T) { // Only record types have members that might require garbage collection. @@ -888,6 +999,16 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), ExprToVisit->getType()); // Handle initialization of an array. + if (ExprToVisit->getType()->isConstantArrayType()) { + auto AType = cast(Dest.getAddress().getElementType()); + buildArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), + ExprToVisit, InitExprs, ArrayFiller); + return; + } else if (ExprToVisit->getType()->isVariableArrayType()) { + llvm_unreachable("variable arrays NYI"); + return; + } + if (ExprToVisit->getType()->isArrayType()) { llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c index 82ccc68a88a9..84f1a3a9d9ef 100644 --- a/clang/test/CIR/CodeGen/array-init.c +++ b/clang/test/CIR/CodeGen/array-init.c @@ -8,3 +8,22 @@ void foo() { // CHECK-NEXT: %1 = cir.const(#cir.const_array<[9.000000e+00, 8.000000e+00, 7.000000e+00]> : !cir.array) : !cir.array // CHECK-NEXT: cir.store %1, %0 : !cir.array, cir.ptr > +void bar(int a, int b, int c) { + int arr[] = {a,b,c}; +} + +// CHECK: cir.func @bar +// CHECK: [[ARR:%.*]] = cir.alloca !cir.array, cir.ptr >, ["arr", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, [[A:%.*]] : !s32i, cir.ptr +// CHECK-NEXT: cir.store %arg1, [[B:%.*]] : !s32i, cir.ptr +// CHECK-NEXT: cir.store %arg2, [[C:%.*]] : !s32i, cir.ptr +// CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: [[LOAD_A:%.*]] = cir.load [[A]] : cir.ptr , !s32i +// CHECK-NEXT: cir.store [[LOAD_A]], [[FI_EL]] : !s32i, cir.ptr +// CHECK-NEXT: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride(%4 : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: [[LOAD_B:%.*]] = cir.load [[B]] : cir.ptr , !s32i +// CHECK-NEXT: cir.store [[LOAD_B]], [[SE_EL]] : !s32i, cir.ptr +// CHECK-NEXT: [[TH_EL:%.*]] = cir.ptr_stride(%7 : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: [[LOAD_C:%.*]] = cir.load [[C]] : cir.ptr , !s32i +// CHECK-NEXT: cir.store [[LOAD_C]], [[TH_EL]] : !s32i, cir.ptr From 5630601ba5ae8544d09f29c79f64c97cca38b526 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Wed, 31 Jan 2024 23:40:22 +0300 Subject: [PATCH 1378/2301] [CIR][CIRGen] Handle __extension__ keyword (#421) Support \_\_extension\_\_ keyword in CIRGen --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 5 ++++- clang/test/CIR/CodeGen/gnu-extension.c | 11 +++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/gnu-extension.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 16056cc2b004..bd2ebec4da5a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -557,8 +557,11 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitUnaryImag(const UnaryOperator *E) { llvm_unreachable("NYI"); } + mlir::Value VisitUnaryExtension(const UnaryOperator *E) { - llvm_unreachable("NYI"); + // __extension__ doesn't requred any codegen + // just forward the value + return Visit(E->getSubExpr()); } mlir::Value buildUnaryOp(const UnaryOperator *E, mlir::cir::UnaryOpKind kind, diff --git a/clang/test/CIR/CodeGen/gnu-extension.c b/clang/test/CIR/CodeGen/gnu-extension.c new file mode 100644 index 000000000000..949f39edb3dd --- /dev/null +++ b/clang/test/CIR/CodeGen/gnu-extension.c @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int foo(void) { return __extension__ 0b101010; } + +//CHECK: cir.func @foo() -> !s32i extra( {inline = #cir.inline, optnone = #cir.optnone} ) { +//CHECK-NEXT: [[ADDR:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +//CHECK-NEXT: [[VAL:%.*]] = cir.const(#cir.int<42> : !s32i) : !s32i +//CHECK-NEXT: cir.store [[VAL]], [[ADDR]] : !s32i, cir.ptr +//CHECK-NEXT: [[LOAD_VAL:%.*]] = cir.load [[ADDR]] : cir.ptr , !s32i +//CHECK-NEXT: cir.return [[LOAD_VAL]] : !s32i From d435691e6ee3026b3909c66dc405872635f865c4 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Wed, 31 Jan 2024 23:53:32 +0300 Subject: [PATCH 1379/2301] [CIR][CIRGen] Add missing case to 'isNullValue' (#433) Support for BoolAttr in isNullValue --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 3 +++ clang/test/CIR/CodeGen/bool.c | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 22bccead3699..63533adf93cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -250,6 +250,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { if (const auto intVal = attr.dyn_cast()) return intVal.isNullValue(); + if (const auto boolVal = attr.dyn_cast()) + return !boolVal.getValue(); + if (const auto fpVal = attr.dyn_cast()) { bool ignored; llvm::APFloat FV(+0.0); diff --git a/clang/test/CIR/CodeGen/bool.c b/clang/test/CIR/CodeGen/bool.c index f1e487f35223..7af7527c4a76 100644 --- a/clang/test/CIR/CodeGen/bool.c +++ b/clang/test/CIR/CodeGen/bool.c @@ -7,6 +7,14 @@ typedef struct { bool x; } S; +// CHECK: cir.func @init_bool +// CHECK: [[ALLOC:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[ZERO:%.*]] = cir.const(#cir.zero : !ty_22S22) : !ty_22S22 +// CHECK: cir.store [[ZERO]], [[ALLOC]] : !ty_22S22, cir.ptr +void init_bool(void) { + S s = {0}; +} + // CHECK: cir.func @store_bool // CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > // CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > From c7baf7408b3272f0e8c3e6869387710bb61b739f Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Fri, 2 Feb 2024 04:46:51 +0300 Subject: [PATCH 1380/2301] [CIR][CIRGen] Support for section atttribute (#422) This PR adds support for section("$name") attribute --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 ++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 11 ++++------- .../CIR/CodeGen/UnimplementedFeatureGuarding.h | 1 - .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 16 ++++++++++++++-- clang/test/CIR/CodeGen/attributes.c | 14 ++++++++++++++ 5 files changed, 34 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/CodeGen/attributes.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1e5145d0e15b..ea4013cb5bbd 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1422,7 +1422,8 @@ def GlobalOp : CIR_Op<"global", [Symbol, DeclareOpInterfaceMethods:$initial_value, UnitAttr:$constant, OptionalAttr:$alignment, - OptionalAttr:$ast + OptionalAttr:$ast, + OptionalAttr:$section ); let regions = (region AnyRegion:$ctorRegion, AnyRegion:$dtorRegion); let assemblyFormat = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 59d88eafee21..40de4f53e32e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -716,10 +716,8 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // Emit section information for extern variables. if (D->hasExternalStorage()) { - if (const SectionAttr *SA = D->getAttr()) { - assert(!UnimplementedFeature::setGlobalVarSection()); - llvm_unreachable("section info for extern vars is NYI"); - } + if (const SectionAttr *SA = D->getAttr()) + GV.setSectionAttr(builder.getStringAttr(SA->getName())); } // Handle XCore specific ABI requirements. @@ -1019,9 +1017,8 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // isTypeConstant(D->getType(), true)); // If it is in a read-only section, mark it 'constant'. - if (const SectionAttr *SA = D->getAttr()) { - assert(0 && "not implemented"); - } + if (const SectionAttr *SA = D->getAttr()) + GV.setSectionAttr(builder.getStringAttr(SA->getName())); // TODO(cir): // GV->setAlignment(getContext().getDeclAlign(D).getAsAlign()); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 89a336146f68..bf894fe53acf 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -41,7 +41,6 @@ struct UnimplementedFeature { // Unhandled global/linkage information. static bool unnamedAddr() { return false; } static bool setComdat() { return false; } - static bool setGlobalVarSection() { return false; } static bool setDSOLocal() { return false; } static bool threadLocal() { return false; } static bool setDLLStorageClass() { return false; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8f07d47684be..12548f8b5af9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1505,12 +1505,21 @@ class CIRGlobalOpLowering const auto linkage = convertLinkage(op.getLinkage()); const auto symbol = op.getSymName(); const auto loc = op.getLoc(); + std::optional section = op.getSection(); std::optional init = op.getInitialValue(); + SmallVector attributes; + if (section.has_value()) + attributes.push_back(rewriter.getNamedAttr( + "section", rewriter.getStringAttr(section.value()))); + // Check for missing funcionalities. if (!init.has_value()) { rewriter.replaceOpWithNewOp( - op, llvmType, isConst, linkage, symbol, mlir::Attribute()); + op, llvmType, isConst, linkage, symbol, mlir::Attribute(), + /*alignment*/ 0, /*addrSpace*/ 0, + /*dsoLocal*/ false, /*threadLocal*/ false, + /*comdat*/ mlir::SymbolRefAttr(), attributes); return mlir::success(); } @@ -1584,7 +1593,10 @@ class CIRGlobalOpLowering // Rewrite op. rewriter.replaceOpWithNewOp( - op, llvmType, isConst, linkage, symbol, init.value()); + op, llvmType, isConst, linkage, symbol, init.value(), + /*alignment*/ 0, /*addrSpace*/ 0, + /*dsoLocal*/ false, /*threadLocal*/ false, + /*comdat*/ mlir::SymbolRefAttr(), attributes); return mlir::success(); } }; diff --git a/clang/test/CIR/CodeGen/attributes.c b/clang/test/CIR/CodeGen/attributes.c new file mode 100644 index 000000000000..67b625c11520 --- /dev/null +++ b/clang/test/CIR/CodeGen/attributes.c @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM +// XFAIL: * + +extern int __attribute__((section(".shared"))) ext; +int getExt() { + return ext; +} +// CIR: cir.global "private" external @ext : !s32i {section = ".shared"} +// LLVM: @ext = external global i32, section ".shared" + +int __attribute__((section(".shared"))) glob = 42; +// CIR: cir.global external @glob = #cir.int<42> : !s32i {section = ".shared"} +// LLVM @glob = global i32 42, section ".shared" From 7960c5839963550616baa31b89649bb7375a2976 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Fri, 2 Feb 2024 04:48:05 +0300 Subject: [PATCH 1381/2301] [CIR][CIRGen][Bugfix] Fix bool zero initialization (#411) Support missing zero initialization of Bools --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 3 +++ clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 ++ clang/test/CIR/CodeGen/globals.cpp | 4 +++- clang/test/CIR/Lowering/bool.cir | 7 +++++-- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 63533adf93cd..cbc4f0f9e864 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -232,6 +232,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return getConstPtrAttr(ptrTy, 0); if (auto structTy = ty.dyn_cast()) return getZeroAttr(structTy); + if (ty.isa()) { + return getCIRBoolAttr(false); + } llvm_unreachable("Zero initializer for given type is NYI"); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 12548f8b5af9..0dfe1bcf29cb 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1550,6 +1550,8 @@ class CIRGlobalOpLowering // Initializer is a constant integer: convert to MLIR builtin constant. else if (auto intAttr = init.value().dyn_cast()) { init = rewriter.getIntegerAttr(llvmType, intAttr.getValue()); + } else if (auto boolAttr = init.value().dyn_cast()) { + init = rewriter.getBoolAttr(boolAttr.getValue()); } else if (isa( init.value())) { // TODO(cir): once LLVM's dialect has a proper zeroinitializer attribute diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 4792cb341400..52c146cce421 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -6,6 +6,7 @@ const int b = 4; // unless used wont be generated unsigned long int c = 2; int d = a; +bool e; float y = 3.4; double w = 4.3; char x = '3'; @@ -41,7 +42,8 @@ int use_func() { return func(); } // CHECK-NEXT: [[TMP2:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i // CHECK-NEXT: cir.store [[TMP2]], [[TMP0]] : !s32i, cir.ptr -// CHECK: cir.global external @y = 3.400000e+00 : f32 +// CHECK: cir.global external @e = #false +// CHECK-NEXT: cir.global external @y = 3.400000e+00 : f32 // CHECK-NEXT: cir.global external @w = 4.300000e+00 : f64 // CHECK-NEXT: cir.global external @x = #cir.int<51> : !s8i // CHECK-NEXT: cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index 79b406cc1634..34175667ec39 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -5,14 +5,16 @@ #true = #cir.bool : !cir.bool module { + cir.global external @g_bl = #false +// MLIR: llvm.mlir.global external @g_bl(false) {addr_space = 0 : i32} : i8 +// LLVM: @g_bl = global i8 0 + cir.func @foo() { %1 = cir.const(#true) : !cir.bool %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} cir.store %1, %0 : !cir.bool, cir.ptr cir.return } -} - // MLIR: llvm.func @foo() // MLIR-DAG: = llvm.mlir.constant(1 : i8) : i8 // MLIR-DAG: [[Value:%[a-z0-9]+]] = llvm.mlir.constant(1 : index) : i64 @@ -24,3 +26,4 @@ module { // LLVM-NEXT: %1 = alloca i8, i64 1, align 1 // LLVM-NEXT: store i8 1, ptr %1, align 1 // LLVM-NEXT: ret void +} From db163344add64f00dd19cc7d4ab4e3c1fc3564ee Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Fri, 2 Feb 2024 04:51:21 +0300 Subject: [PATCH 1382/2301] [CIR][Lowering] Support conversion of cir.zero to dense consts (#413) Compiling the given c-code ``` void foo() { int i [2][1] = { { 1 }, { 0 } }; long int li[2][1] = { { 1 }, { 0 } }; float fl[2][1] = { { 1 }, { 0 } }; double d [2][1] = { { 1 }, { 0 } }; } ``` leads to compilation error ``` unknown element in ConstArrayAttr UNREACHABLE executed at /home/huawei/cir/repo/van/llvm-project/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp:951! ``` PR implements conversion the cir.zero attr to dense constant and fixed this error. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 39 +++++++++++++++++++ clang/test/CIR/Lowering/const.cir | 21 +++++++++- 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 0dfe1bcf29cb..ce5063285e0d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -969,6 +969,39 @@ convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, llvm::ArrayRef(values)); } +template StorageTy getZeroInitFromType(mlir::Type Ty); + +template <> mlir::APInt getZeroInitFromType(mlir::Type Ty) { + assert(Ty.isa() && "expected int type"); + auto IntTy = Ty.cast(); + return mlir::APInt::getZero(IntTy.getWidth()); +} + +template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty) { + assert((Ty.isF32() || Ty.isF64()) && "only float and double supported"); + if (Ty.isF32()) + return mlir::APFloat(0.f); + if (Ty.isF64()) + return mlir::APFloat(0.0); + llvm_unreachable("NYI"); +} + +// return the nested type and quiantity of elements for cir.array type. +// e.g: for !cir.array x 1> +// it returns !s32i as return value and stores 3 to elemQuantity. +mlir::Type getNestedTypeAndElemQuantity(mlir::Type Ty, unsigned &elemQuantity) { + assert(Ty.isa() && "expected ArrayType"); + + elemQuantity = 1; + mlir::Type nestTy = Ty; + while (auto ArrTy = nestTy.dyn_cast()) { + nestTy = ArrTy.getEltType(); + elemQuantity *= ArrTy.getSize(); + } + + return nestTy; +} + template void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, llvm::SmallVectorImpl &values) { @@ -979,6 +1012,12 @@ void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, } else if (auto subArrayAttr = eltAttr.dyn_cast()) { convertToDenseElementsAttrImpl(subArrayAttr, values); + } else if (auto zeroAttr = eltAttr.dyn_cast()) { + unsigned numStoredZeros = 0; + auto nestTy = + getNestedTypeAndElemQuantity(zeroAttr.getType(), numStoredZeros); + values.insert(values.end(), numStoredZeros, + getZeroInitFromType(nestTy)); } else { llvm_unreachable("unknown element in ConstArrayAttr"); } diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index deca881e2e6a..7d0c63f8ccbb 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -1,10 +1,10 @@ // RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -!s32i = !cir.int !s8i = !cir.int +!s32i = !cir.int +!s64i = !cir.int !ty_22anon2E122 = !cir.struct, !cir.int} #cir.record.decl.ast> - module { cir.func @testConstArrInit() { %0 = cir.const(#cir.const_array<"string\00" : !cir.array> : !cir.array) : !cir.array @@ -18,6 +18,23 @@ module { cir.return } + cir.func @testConvertConstArrayToDenseConst() { + %0 = cir.const(#cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> + %1 = cir.const(#cir.const_array<[#cir.const_array<[#cir.int<1> : !s64i]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> + %2 = cir.const(#cir.const_array<[#cir.const_array<[1.000000e+00 : f32]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> + %3 = cir.const(#cir.const_array<[#cir.const_array<[1.000000e+00]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> + %4 = cir.const(#cir.const_array<[#cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i]> : !cir.array]> : !cir.array x 1>, #cir.zero : !cir.array x 1>]> : !cir.array x 1> x 2>) : !cir.array x 1> x 2> + + cir.return + } + // CHECK: llvm.func @testConvertConstArrayToDenseConst() + // CHECK: {{%.*}} = llvm.mlir.constant(dense<{{\[\[}}1], [0{{\]\]}}> : tensor<2x1xi32>) : !llvm.array<2 x array<1 x i32>> + // CHECK: {{%.*}} = llvm.mlir.constant(dense<{{\[\[}}1], [0{{\]\]}}> : tensor<2x1xi64>) : !llvm.array<2 x array<1 x i64>> + // CHECK: {{%.*}} = llvm.mlir.constant(dense<{{\[\[}}1.000000e+00], [0.000000e+00{{\]\]}}> : tensor<2x1xf32>) : !llvm.array<2 x array<1 x f32>> + // CHECK: {{%.*}} = llvm.mlir.constant(dense<{{\[\[}}1.000000e+00], [0.000000e+00{{\]\]}}> : tensor<2x1xf64>) : !llvm.array<2 x array<1 x f64>> + // CHECK: {{%.*}} = llvm.mlir.constant(dense<{{\[\[\[}}1, 1, 1{{\]\]}}, {{\[\[}}0, 0, 0{{\]\]\]}}> : tensor<2x1x3xi32>) : !llvm.array<2 x array<1 x array<3 x i32>>> + // CHECK: llvm.return + cir.func @testConstArrayOfStructs() { %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 4 : i64} %1 = cir.const(#cir.const_array<[#cir.const_struct<{#cir.int<0> : !s32i, #cir.int<1> : !s32i}> : !ty_22anon2E122]> : !cir.array) : !cir.array From 8f1397103c82e5b14069d09a5408478ed66d41d0 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 2 Feb 2024 10:10:39 +0800 Subject: [PATCH 1383/2301] [CIR][CodeGen] Initial support for dynamic_cast (#426) This PR introduces CIR CodeGen support for `dynamic_cast`. The full feature set of `dynamic_cast` is not fully implemented in this PR as it's already pretty large. This PR only include support for downcasting and sidecasting a pointer or reference. `dynamic_cast` is not yet implemented. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 + clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 11 ++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 16 ++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 8 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 84 ++++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 7 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 8 + clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 155 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 11 ++ clang/lib/CIR/CodeGen/CIRGenModule.h | 10 ++ .../CodeGen/UnimplementedFeatureGuarding.h | 2 + clang/test/CIR/CodeGen/dynamic-cast.cpp | 52 ++++++ 12 files changed, 364 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/dynamic-cast.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index cbc4f0f9e864..62a3c0e9e069 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -855,6 +855,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { alloca->moveAfter(*std::prev(allocas.end())); } } + + mlir::Value createPtrIsNull(mlir::Value ptr) { + return createNot(createPtrToBoolCast(ptr)); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index a4e7808bf36b..b7dc7b66a4f3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -299,6 +299,17 @@ class CIRGenCXXABI { virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) = 0; virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) = 0; + + virtual void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) = 0; + + virtual bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, + QualType SrcRecordTy) = 0; + + virtual mlir::Value buildDynamicCastCall(CIRGenFunction &CGF, + mlir::Location Loc, Address Value, + QualType SrcRecordTy, + QualType DestTy, + QualType DestRecordTy) = 0; }; /// Creates and Itanium-family ABI diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 6b5c4ccbe7ba..50d5633a3dff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -741,6 +741,22 @@ RValue CIRGenFunction::GetUndefRValue(QualType Ty) { return RValue::get(nullptr); } +mlir::Value CIRGenFunction::buildRuntimeCall(mlir::Location loc, + mlir::cir::FuncOp callee, + ArrayRef args) { + // TODO(cir): set the calling convention to this runtime call. + assert(!UnimplementedFeature::setCallingConv()); + + auto call = builder.create(loc, callee, args); + assert(call->getNumResults() <= 1 && + "runtime functions have at most 1 result"); + + if (call->getNumResults() == 0) + return nullptr; + + return call->getResult(0); +} + void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, QualType type) { // TODO: Add the DisableDebugLocationUpdates helper diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 223616e56f3e..7aab7bae2b32 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -19,6 +19,7 @@ #include "CIRGenValue.h" #include "UnimplementedFeatureGuarding.h" +#include "clang/AST/ExprCXX.h" #include "clang/AST/GlobalDecl.h" #include "clang/Basic/Builtins.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" @@ -1569,7 +1570,10 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { assert(0 && "NYI"); case CK_Dynamic: { - assert(0 && "NYI"); + LValue LV = buildLValue(E->getSubExpr()); + Address V = LV.getAddress(); + const auto *DCE = cast(E); + return MakeNaturalAlignAddrLValue(buildDynamicCast(V, DCE), E->getType()); } case CK_ConstructorConversion: @@ -2176,7 +2180,6 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return buildPredefinedLValue(cast(E)); case Expr::CStyleCastExprClass: case Expr::CXXFunctionalCastExprClass: - case Expr::CXXDynamicCastExprClass: case Expr::CXXReinterpretCastExprClass: case Expr::CXXConstCastExprClass: case Expr::CXXAddrspaceCastExprClass: @@ -2185,6 +2188,7 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { << E->getStmtClassName() << "'"; assert(0 && "Use buildCastLValue below, remove me when adding testcase"); case Expr::CXXStaticCastExprClass: + case Expr::CXXDynamicCastExprClass: case Expr::ImplicitCastExprClass: return buildCastLValue(cast(E)); case Expr::OpaqueValueExprClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 4de5f522dc27..7efc6220bc87 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -911,3 +911,87 @@ void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, llvm_unreachable("NYI"); // DestroyingDeleteTag->eraseFromParent(); } } + +static mlir::Value buildDynamicCastToNull(CIRGenFunction &CGF, + mlir::Location Loc, QualType DestTy) { + mlir::Type DestCIRTy = CGF.ConvertType(DestTy); + assert(DestCIRTy.isa() && + "result of dynamic_cast should be a ptr"); + + mlir::Value NullPtrValue = CGF.getBuilder().getNullPtr(DestCIRTy, Loc); + + if (!DestTy->isPointerType()) { + /// C++ [expr.dynamic.cast]p9: + /// A failed cast to reference type throws std::bad_cast + CGF.CGM.getCXXABI().buildBadCastCall(CGF, Loc); + } + + return NullPtrValue; +} + +mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, + const CXXDynamicCastExpr *DCE) { + auto loc = getLoc(DCE->getSourceRange()); + + CGM.buildExplicitCastExprType(DCE, this); + QualType destTy = DCE->getTypeAsWritten(); + QualType srcTy = DCE->getSubExpr()->getType(); + + // C++ [expr.dynamic.cast]p7: + // If T is "pointer to cv void," then the result is a pointer to the most + // derived object pointed to by v. + bool isDynCastToVoid = destTy->isVoidPointerType(); + QualType srcRecordTy; + QualType destRecordTy; + if (isDynCastToVoid) { + llvm_unreachable("NYI"); + } else if (const PointerType *DestPTy = destTy->getAs()) { + srcRecordTy = srcTy->castAs()->getPointeeType(); + destRecordTy = DestPTy->getPointeeType(); + } else { + srcRecordTy = srcTy; + destRecordTy = destTy->castAs()->getPointeeType(); + } + + buildTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(), + srcRecordTy); + + if (DCE->isAlwaysNull()) + return buildDynamicCastToNull(*this, loc, destTy); + + assert(srcRecordTy->isRecordType() && "source type must be a record type!"); + + // C++ [expr.dynamic.cast]p4: + // If the value of v is a null pointer value in the pointer case, the result + // is the null pointer value of type T. + bool shouldNullCheckSrcValue = + CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(srcTy->isPointerType(), + srcRecordTy); + + auto buildDynamicCastAfterNullCheck = [&]() -> mlir::Value { + if (isDynCastToVoid) + llvm_unreachable("NYI"); + else { + assert(destRecordTy->isRecordType() && + "destination type must be a record type!"); + return CGM.getCXXABI().buildDynamicCastCall( + *this, loc, ThisAddr, srcRecordTy, destTy, destRecordTy); + } + }; + + if (!shouldNullCheckSrcValue) + return buildDynamicCastAfterNullCheck(); + + mlir::Value srcValueIsNull = builder.createPtrIsNull(ThisAddr.getPointer()); + return builder + .create( + loc, srcValueIsNull, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield(loc, + buildDynamicCastToNull(*this, loc, destTy)); + }, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield(loc, buildDynamicCastAfterNullCheck()); + }) + .getResult(); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index bd2ebec4da5a..5d56485d2c4c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1405,8 +1405,11 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // the alignment. return CGF.buildPointerWithAlignment(CE).getPointer(); } - case CK_Dynamic: - llvm_unreachable("NYI"); + case CK_Dynamic: { + Address V = CGF.buildPointerWithAlignment(E); + const auto *DCE = cast(CE); + return CGF.buildDynamicCast(V, DCE); + } case CK_ArrayToPointerDecay: return CGF.buildArrayToPointerDecay(E).getPointer(); case CK_FunctionToPointerDecay: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 0a4149164687..c736615f6f65 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -362,6 +362,9 @@ class CIRGenFunction : public CIRGenTypeCache { TCK_MemberCall, /// Checking the 'this' pointer for a constructor call. TCK_ConstructorCall, + /// Checking the operand of a dynamic_cast or a typeid expression. Must be + /// null or an object within its lifetime. + TCK_DynamicOperation }; // Holds coroutine data if the current function is a coroutine. We use a @@ -638,6 +641,8 @@ class CIRGenFunction : public CIRGenTypeCache { QualType DeleteTy, mlir::Value NumElements = nullptr, CharUnits CookieSize = CharUnits()); + mlir::Value buildDynamicCast(Address ThisAddr, const CXXDynamicCastExpr *DCE); + mlir::Value createLoad(const clang::VarDecl *VD, const char *Name); mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, @@ -819,6 +824,9 @@ class CIRGenFunction : public CIRGenTypeCache { RValue buildCallExpr(const clang::CallExpr *E, ReturnValueSlot ReturnValue = ReturnValueSlot()); + mlir::Value buildRuntimeCall(mlir::Location loc, mlir::cir::FuncOp callee, + ArrayRef args = {}); + /// Create a check for a function parameter that may potentially be /// declared as non-null. void buildNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index ad7785ac98e0..0f1f43b31913 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -281,6 +281,18 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { return Args.size() - 1; } + void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) override; + + bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, + QualType SrcRecordTy) override { + return SrcIsPtr; + } + + mlir::Value buildDynamicCastCall(CIRGenFunction &CGF, mlir::Location Loc, + Address Value, QualType SrcRecordTy, + QualType DestTy, + QualType DestRecordTy) override; + /**************************** RTTI Uniqueness ******************************/ protected: /// Returns true if the ABI requires RTTI type_info objects to be unique @@ -2173,3 +2185,146 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, builder.create(CGF.getLoc(E->getSourceRange()), exceptionPtr, typeInfo.getSymbol(), dtor); } + +static mlir::cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { + // Prototype: void __cxa_bad_cast(); + + // TODO(cir): set the calling convention of the runtime function. + assert(!UnimplementedFeature::setCallingConv()); + + mlir::cir::FuncType FTy = + CGF.getBuilder().getFuncType({}, CGF.getBuilder().getVoidTy()); + return CGF.CGM.getOrCreateRuntimeFunction(FTy, "__cxa_bad_cast"); +} + +void CIRGenItaniumCXXABI::buildBadCastCall(CIRGenFunction &CGF, + mlir::Location loc) { + // TODO(cir): set the calling convention to the runtime function. + assert(!UnimplementedFeature::setCallingConv()); + + CGF.buildRuntimeCall(loc, getBadCastFn(CGF)); + // TODO(cir): mark the current insertion point as unreachable. + assert(!UnimplementedFeature::unreachableOp()); +} + +static CharUnits computeOffsetHint(ASTContext &Context, + const CXXRecordDecl *Src, + const CXXRecordDecl *Dst) { + CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, + /*DetectVirtual=*/false); + + // If Dst is not derived from Src we can skip the whole computation below and + // return that Src is not a public base of Dst. Record all inheritance paths. + if (!Dst->isDerivedFrom(Src, Paths)) + return CharUnits::fromQuantity(-2ULL); + + unsigned NumPublicPaths = 0; + CharUnits Offset; + + // Now walk all possible inheritance paths. + for (const CXXBasePath &Path : Paths) { + if (Path.Access != AS_public) // Ignore non-public inheritance. + continue; + + ++NumPublicPaths; + + for (const CXXBasePathElement &PathElement : Path) { + // If the path contains a virtual base class we can't give any hint. + // -1: no hint. + if (PathElement.Base->isVirtual()) + return CharUnits::fromQuantity(-1ULL); + + if (NumPublicPaths > 1) // Won't use offsets, skip computation. + continue; + + // Accumulate the base class offsets. + const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class); + Offset += L.getBaseClassOffset( + PathElement.Base->getType()->getAsCXXRecordDecl()); + } + } + + // -2: Src is not a public base of Dst. + if (NumPublicPaths == 0) + return CharUnits::fromQuantity(-2ULL); + + // -3: Src is a multiple public base type but never a virtual base type. + if (NumPublicPaths > 1) + return CharUnits::fromQuantity(-3ULL); + + // Otherwise, the Src type is a unique public nonvirtual base type of Dst. + // Return the offset of Src from the origin of Dst. + return Offset; +} + +static mlir::cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { + // Prototype: + // void *__dynamic_cast(const void *sub, + // global_as const abi::__class_type_info *src, + // global_as const abi::__class_type_info *dst, + // std::ptrdiff_t src2dst_offset); + + mlir::Type VoidPtrTy = CGF.VoidPtrTy; + mlir::Type RTTIPtrTy = CGF.getBuilder().getUInt8PtrTy(); + mlir::Type PtrDiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); + + // TODO(cir): mark the function as nowind readonly. + + // TODO(cir): set the calling convention of the runtime function. + assert(!UnimplementedFeature::setCallingConv()); + + mlir::cir::FuncType FTy = CGF.getBuilder().getFuncType( + {VoidPtrTy, RTTIPtrTy, RTTIPtrTy, PtrDiffTy}, VoidPtrTy); + return CGF.CGM.getOrCreateRuntimeFunction(FTy, "__dynamic_cast"); +} + +mlir::Value CIRGenItaniumCXXABI::buildDynamicCastCall( + CIRGenFunction &CGF, mlir::Location Loc, Address Value, + QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy) { + mlir::Type ptrdiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); + + mlir::Value srcRtti = CGF.getBuilder().getConstant( + Loc, + CGF.CGM.getAddrOfRTTIDescriptor(Loc, SrcRecordTy.getUnqualifiedType()) + .cast()); + mlir::Value destRtti = CGF.getBuilder().getConstant( + Loc, + CGF.CGM.getAddrOfRTTIDescriptor(Loc, DestRecordTy.getUnqualifiedType()) + .cast()); + + // Compute the offset hint. + const CXXRecordDecl *srcDecl = SrcRecordTy->getAsCXXRecordDecl(); + const CXXRecordDecl *destDecl = DestRecordTy->getAsCXXRecordDecl(); + mlir::Value offsetHint = CGF.getBuilder().getConstAPInt( + Loc, ptrdiffTy, + llvm::APSInt::get(computeOffsetHint(CGF.getContext(), srcDecl, destDecl) + .getQuantity())); + + // Emit the call to __dynamic_cast. + mlir::Value srcPtr = + CGF.getBuilder().createBitcast(Value.getPointer(), CGF.VoidPtrTy); + mlir::Value args[4] = {srcPtr, srcRtti, destRtti, offsetHint}; + mlir::Value castedPtr = + CGF.buildRuntimeCall(Loc, getItaniumDynamicCastFn(CGF), args); + + assert(castedPtr.getType().isa() && + "the return value of __dynamic_cast should be a ptr"); + + /// C++ [expr.dynamic.cast]p9: + /// A failed cast to reference type throws std::bad_cast + if (DestTy->isReferenceType()) { + // Emit a cir.if that checks the casted value. + mlir::Value castedValueIsNull = CGF.getBuilder().createPtrIsNull(castedPtr); + CGF.getBuilder().create( + Loc, castedValueIsNull, false, [&](mlir::OpBuilder &, mlir::Location) { + buildBadCastCall(CGF, Loc); + // TODO(cir): remove this once buildBadCastCall inserts unreachable + CGF.getBuilder().createYield(Loc); + }); + } + + // Note that castedPtr is a void*. Cast it to a pointer to the destination + // type before return. + mlir::Type destCIRTy = CGF.ConvertType(DestTy); + return CGF.getBuilder().createBitcast(castedPtr, destCIRTy); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 40de4f53e32e..25b76b95a5ad 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1976,6 +1976,17 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, return f; } +mlir::cir::FuncOp +CIRGenModule::getOrCreateRuntimeFunction(mlir::cir::FuncType Ty, + StringRef Name) { + auto entry = cast_if_present(getGlobalValue(Name)); + if (entry) + return entry; + + return createCIRFunction(mlir::UnknownLoc::get(builder.getContext()), Name, + Ty, nullptr); +} + bool isDefaultedMethod(const clang::FunctionDecl *FD) { if (FD->isDefaulted() && isa(FD) && (cast(FD)->isCopyAssignmentOperator() || diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 900210a7c24a..b200b210b888 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -490,6 +490,13 @@ class CIRGenModule : public CIRGenTypeCache { GetAddrOfGlobal(clang::GlobalDecl GD, ForDefinition_t IsForDefinition = NotForDefinition); + // Return whether RTTI information should be emitted for this target. + bool shouldEmitRTTI(bool ForEH = false) { + return (ForEH || getLangOpts().RTTI) && !getLangOpts().CUDAIsDevice && + !(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && + getTriple().isNVPTX()); + } + // C++ related functions. void buildDeclContext(const DeclContext *DC); @@ -606,6 +613,9 @@ class CIRGenModule : public CIRGenTypeCache { mlir::cir::FuncType Ty, const clang::FunctionDecl *FD); + mlir::cir::FuncOp getOrCreateRuntimeFunction(mlir::cir::FuncType Ty, + StringRef Name); + /// Emit type info if type of an expression is a variably modified /// type. Also emit proper debug info for cast types. void buildExplicitCastExprType(const ExplicitCastExpr *E, diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index bf894fe53acf..65da4a7ac685 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -147,6 +147,8 @@ struct UnimplementedFeature { static bool isSEHTryScope() { return false; } static bool emitScalarRangeCheck() { return false; } static bool stmtExprEvaluation() { return false; } + static bool setCallingConv() { return false; } + static bool unreachableOp() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/dynamic-cast.cpp b/clang/test/CIR/CodeGen/dynamic-cast.cpp new file mode 100644 index 000000000000..13c06f266719 --- /dev/null +++ b/clang/test/CIR/CodeGen/dynamic-cast.cpp @@ -0,0 +1,52 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Base { + virtual ~Base(); +}; +// CHECK: !ty_22Base22 = !cir.struct + +struct Derived : Base {}; +// CHECK: !ty_22Derived22 = !cir.struct + +// CHECK: cir.func private @__dynamic_cast(!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr + +Derived *ptr_cast(Base *b) { + return dynamic_cast(b); +} +// CHECK: cir.func @_Z8ptr_castP4Base +// CHECK: %[[#V1:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr +// CHECK-NEXT: %[[#V2:]] = cir.cast(ptr_to_bool, %[[#V1]] : !cir.ptr), !cir.bool +// CHECK-NEXT: %[[#V3:]] = cir.unary(not, %[[#V2]]) : !cir.bool, !cir.bool +// CHECK-NEXT: %{{.+}} = cir.ternary(%[[#V3]], true { +// CHECK-NEXT: %[[#V4:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.yield %[[#V4]] : !cir.ptr +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[#V5:]] = cir.const(#cir.global_view<@_ZTI4Base> : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %[[#V6:]] = cir.const(#cir.global_view<@_ZTI7Derived> : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %[[#V7:]] = cir.const(#cir.int<0> : !s64i) : !s64i +// CHECK-NEXT: %[[#V8:]] = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr +// CHECK-NEXT: %[[#V9:]] = cir.call @__dynamic_cast(%[[#V8]], %[[#V5]], %[[#V6]], %[[#V7]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr +// CHECK-NEXT: %[[#V10:]] = cir.cast(bitcast, %[[#V9]] : !cir.ptr), !cir.ptr +// CHECK-NEXT: cir.yield %[[#V10]] : !cir.ptr +// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr + +// CHECK: cir.func private @__cxa_bad_cast() + +Derived &ref_cast(Base &b) { + return dynamic_cast(b); +} + +// CHECK: cir.func @_Z8ref_castR4Base +// CHECK: %[[#V11:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr +// CHECK-NEXT: %[[#V12:]] = cir.const(#cir.global_view<@_ZTI4Base> : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %[[#V13:]] = cir.const(#cir.global_view<@_ZTI7Derived> : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %[[#V14:]] = cir.const(#cir.int<0> : !s64i) : !s64i +// CHECK-NEXT: %[[#V15:]] = cir.cast(bitcast, %[[#V11]] : !cir.ptr), !cir.ptr +// CHECK-NEXT: %[[#V16:]] = cir.call @__dynamic_cast(%[[#V15]], %[[#V12]], %[[#V13]], %[[#V14]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr +// CHECK-NEXT: %[[#V17:]] = cir.cast(ptr_to_bool, %[[#V16]] : !cir.ptr), !cir.bool +// CHECK-NEXT: %[[#V18:]] = cir.unary(not, %[[#V17]]) : !cir.bool, !cir.bool +// CHECK-NEXT: cir.if %[[#V18]] { +// CHECK-NEXT: cir.call @__cxa_bad_cast() : () -> () +// CHECK-NEXT: } +// CHECK-NEXT: %{{.+}} = cir.cast(bitcast, %[[#V16]] : !cir.ptr), !cir.ptr From 5b51081cc4a04b0339abb7f2d757cd5dcffebcd2 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Sat, 3 Feb 2024 01:38:42 +0300 Subject: [PATCH 1384/2301] [CIR][CIRGen] Add codegen for branch prediction info builtins (#439) Initial support for the following builtins: ``` __builtin_expect __builtin_expect_with_probability __builtin_unpredictable ``` This PR supports codegen for this builtins on "-O0" compilation pipeline. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 10 ++++++- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/pred-info-builtins.c | 27 +++++++++++++++++++ 3 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/pred-info-builtins.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 962f01d44d58..16e9668b9345 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -385,6 +385,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return {}; } + case Builtin::BI__builtin_expect: + case Builtin::BI__builtin_expect_with_probability: + case Builtin::BI__builtin_unpredictable: { + if (CGM.getCodeGenOpts().OptimizationLevel != 0) + assert(!UnimplementedFeature::branchPredictionInfoBuiltin()); + return RValue::get(buildScalarExpr(E->getArg(0))); + } + // C++ std:: builtins. case Builtin::BImove: case Builtin::BImove_if_noexcept: @@ -701,4 +709,4 @@ mlir::cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *FD, auto Ty = getTypes().ConvertType(FD->getType()); return GetOrCreateCIRFunction(Name, Ty, D, /*ForVTable=*/false); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 65da4a7ac685..d2e7de9de062 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -130,6 +130,7 @@ struct UnimplementedFeature { static bool armComputeVolatileBitfields() { return false; } static bool setCommonAttributes() { return false; } static bool insertBuiltinUnpredictable() { return false; } + static bool branchPredictionInfoBuiltin() { return false; } static bool createInvariantGroup() { return false; } static bool addAutoInitAnnotation() { return false; } static bool addHeapAllocSiteMetadata() { return false; } diff --git a/clang/test/CIR/CodeGen/pred-info-builtins.c b/clang/test/CIR/CodeGen/pred-info-builtins.c new file mode 100644 index 000000000000..192eaf0691f2 --- /dev/null +++ b/clang/test/CIR/CodeGen/pred-info-builtins.c @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -O0 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +extern void __attribute__((noinline)) bar(void); + +void expect(int x) { + if (__builtin_expect(x, 0)) + bar(); +} +// CHECK: cir.func @expect +// CHECK: cir.if {{%.*}} { +// CHECK: cir.call @bar() : () -> () + +void expect_with_probability(int x) { + if (__builtin_expect_with_probability(x, 1, 0.8)) + bar(); +} +// CHECK: cir.func @expect_with_probability +// CHECK: cir.if {{%.*}} { +// CHECK: cir.call @bar() : () -> () + +void unpredictable(int x) { + if (__builtin_unpredictable(x > 1)) + bar(); +// CHECK: cir.func @unpredictable +// CHECK: cir.if {{%.*}} { +// CHECK: cir.call @bar() : () -> () +} From ffddc25fe0a1671f9f5867ca5426669a6aa42e93 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Sat, 3 Feb 2024 01:42:06 +0300 Subject: [PATCH 1385/2301] [CIR][CIRGen] Handle ternary op inside if cond (#440) Support for ConditionalOperator inside the if condition stmt --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 23 ++++++++++++++++++++++- clang/test/CIR/CodeGen/ternary.cpp | 22 +++++++++++++++++++++- 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 7aab7bae2b32..d0a02508cbc1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2304,7 +2304,28 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(const Expr *cond, } if (const ConditionalOperator *CondOp = dyn_cast(cond)) { - llvm_unreachable("NYI"); + auto *trueExpr = CondOp->getTrueExpr(); + auto *falseExpr = CondOp->getFalseExpr(); + mlir::Value condV = + buildOpOnBoolExpr(CondOp->getCond(), loc, trueExpr, falseExpr); + + auto ternaryOpRes = + builder + .create( + loc, condV, /*thenBuilder=*/ + [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) { + auto lhs = buildScalarExpr(trueExpr); + b.create(loc, lhs); + }, + /*elseBuilder=*/ + [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) { + auto rhs = buildScalarExpr(falseExpr); + b.create(loc, rhs); + }) + .getResult(); + + return buildScalarConversion(ternaryOpRes, CondOp->getType(), + getContext().BoolTy, CondOp->getExprLoc()); } if (const CXXThrowExpr *Throw = dyn_cast(cond)) { diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp index 7c62afdeffa1..5ce164624409 100644 --- a/clang/test/CIR/CodeGen/ternary.cpp +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -53,4 +53,24 @@ void m(APIType api) { // CHECK: cir.yield // CHECK: }) : (!cir.bool) -> () // CHECK: cir.return -// CHECK: } \ No newline at end of file +// CHECK: } + +int foo(int a, int b) { + if (a < b ? 0 : a) + return -1; + return 0; +} + +// CHECK: cir.func @_Z3fooii +// CHECK: [[A0:%.*]] = cir.load {{.*}} : cir.ptr , !s32i +// CHECK: [[B0:%.*]] = cir.load {{.*}} : cir.ptr , !s32i +// CHECK: [[CMP:%.*]] = cir.cmp(lt, [[A0]], [[B0]]) : !s32i, !cir.bool +// CHECK: [[RES:%.*]] = cir.ternary([[CMP]], true { +// CHECK: [[ZERO:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.yield [[ZERO]] : !s32i +// CHECK: }, false { +// CHECK: [[A1:%.*]] = cir.load {{.*}} : cir.ptr , !s32i +// CHECK: cir.yield [[A1]] : !s32i +// CHECK: }) : (!cir.bool) -> !s32i +// CHECK: [[RES_CAST:%.*]] = cir.cast(int_to_bool, [[RES]] : !s32i), !cir.bool +// CHECK: cir.if [[RES_CAST]] From 3a843f28b69b13bfafde01de0795961fd0628659 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Sat, 3 Feb 2024 01:49:36 +0300 Subject: [PATCH 1386/2301] [CIR][CIRGen] Support check for zero-init pointers (#441) --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 +++- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 7 +++---- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 5 +++++ clang/lib/CIR/CodeGen/CIRGenTypes.h | 5 +++++ clang/test/CIR/CodeGen/array-init.c | 26 +++++++++++++++++++++++++ 5 files changed, 42 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 62a3c0e9e069..3da914fcb150 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -550,7 +550,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { // TODO: dispatch creation for primitive types. - assert(ty.isa() && "NYI for other types"); + assert( + (ty.isa() || ty.isa()) && + "NYI for other types"); return create(loc, ty, getZeroAttr(ty)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 2c8ce45f46e2..316a3b774426 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -649,10 +649,9 @@ static bool isSimpleZero(const Expr *E, CIRGenFunction &CGF) { return true; // (int*)0 - Null pointer expressions. if (const CastExpr *ICE = dyn_cast(E)) { - llvm_unreachable("NYI"); - // return ICE->getCastKind() == CK_NullToPointer && - // CGF.getTypes().isPointerZeroInitializable(E->getType()) && - // !E->HasSideEffects(CGF.getContext()); + return ICE->getCastKind() == CK_NullToPointer && + CGF.getTypes().isPointerZeroInitializable(E->getType()) && + !E->HasSideEffects(CGF.getContext()); } // '\0' if (const CharacterLiteral *CL = dyn_cast(E)) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index fab6d65a833c..163a826b00b3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -850,6 +850,11 @@ CIRGenTypes::getCIRGenRecordLayout(const RecordDecl *RD) { return *I->second; } +bool CIRGenTypes::isPointerZeroInitializable(clang::QualType T) { + assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type"); + return isZeroInitializable(T); +} + bool CIRGenTypes::isZeroInitializable(QualType T) { if (T->getAs()) return Context.getTargetNullPointerValue(T) == 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 1e54d287ec72..0ec564e29385 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -149,6 +149,11 @@ class CIRGenTypes { /// Return whether a type can be zero-initialized (in the C++ sense) with an /// LLVM zeroinitializer. bool isZeroInitializable(clang::QualType T); + + /// Check if the pointer type can be zero-initialized (in the C++ sense) + /// with an LLVM zeroinitializer. + bool isPointerZeroInitializable(clang::QualType T); + /// Return whether a record type can be zero-initialized (in the C++ sense) /// with an LLVM zeroinitializer. bool isZeroInitializable(const clang::RecordDecl *RD); diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c index 84f1a3a9d9ef..ae27805f86fe 100644 --- a/clang/test/CIR/CodeGen/array-init.c +++ b/clang/test/CIR/CodeGen/array-init.c @@ -1,5 +1,31 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s +typedef struct { + int a; + long b; +} T; + +void buz(int x) { + T arr[] = { {0, x}, {0, 0} }; +} +// CHECK: cir.func @buz +// CHECK-NEXT: [[X_ALLOCA:%.*]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: [[ARR:%.*]] = cir.alloca !cir.array, cir.ptr >, ["arr", init] {alignment = 16 : i64} +// CHECK-NEXT: cir.store %arg0, [[X_ALLOCA]] : !s32i, cir.ptr +// CHECK-NEXT: [[ARR_INIT:%.*]] = cir.const(#cir.zero : !cir.array) : !cir.array +// CHECK-NEXT: cir.store [[ARR_INIT]], [[ARR]] : !cir.array, cir.ptr > +// CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: [[A_STORAGE0:%.*]] = cir.get_member [[FI_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: [[B_STORAGE0:%.*]] = cir.get_member [[FI_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: [[X_VAL:%.*]] = cir.load [[X_ALLOCA]] : cir.ptr , !s32i +// CHECK-NEXT: [[X_CASTED:%.*]] = cir.cast(integral, [[X_VAL]] : !s32i), !s64i +// CHECK-NEXT: cir.store [[X_CASTED]], [[B_STORAGE0]] : !s64i, cir.ptr +// CHECK-NEXT: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride([[FI_EL]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: [[A_STORAGE1:%.*]] = cir.get_member [[SE_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: [[B_STORAGE1:%.*]] = cir.get_member [[SE_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: cir.return + void foo() { double bar[] = {9,8,7}; } From 3536643c8ff3399cb1e9bd4f987f53a20649ca21 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Sat, 3 Feb 2024 06:52:55 +0800 Subject: [PATCH 1387/2301] [CIR][CIRGen] Support dynamic_cast to void ptr (#442) This patch adds CIRGen for downcasting a pointer to the complete object through `dynamic_cast`. Together with #426 , the full functionality of `dynamic_cast` should be supported in CIRGen after this PR merges. --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 4 ++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 7 +-- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 17 +++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 55 ++++++++++++++++++- clang/test/CIR/CodeGen/dynamic-cast.cpp | 22 ++++++++ 6 files changed, 93 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index b7dc7b66a4f3..b19cdb111ac3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -310,6 +310,10 @@ class CIRGenCXXABI { QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy) = 0; + + virtual mlir::Value buildDynamicCastToVoid(CIRGenFunction &CGF, + mlir::Location Loc, Address Value, + QualType SrcRecordTy) = 0; }; /// Creates and Itanium-family ABI diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 54483b0b5ea8..e8175cee087c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1461,12 +1461,11 @@ void CIRGenFunction::buildTypeMetadataCodeForVCall(const CXXRecordDecl *RD, } } -mlir::Value CIRGenFunction::getVTablePtr(SourceLocation Loc, Address This, +mlir::Value CIRGenFunction::getVTablePtr(mlir::Location Loc, Address This, mlir::Type VTableTy, const CXXRecordDecl *RD) { - auto loc = getLoc(Loc); - Address VTablePtrSrc = builder.createElementBitCast(loc, This, VTableTy); - auto VTable = builder.createLoad(loc, VTablePtrSrc); + Address VTablePtrSrc = builder.createElementBitCast(Loc, This, VTableTy); + auto VTable = builder.createLoad(Loc, VTablePtrSrc); assert(!UnimplementedFeature::tbaa()); if (CGM.getCodeGenOpts().OptimizationLevel > 0 && diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 7efc6220bc87..22559ce36ad5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -944,7 +944,8 @@ mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, QualType srcRecordTy; QualType destRecordTy; if (isDynCastToVoid) { - llvm_unreachable("NYI"); + srcRecordTy = srcTy->getPointeeType(); + // No destRecordTy. } else if (const PointerType *DestPTy = destTy->getAs()) { srcRecordTy = srcTy->castAs()->getPointeeType(); destRecordTy = DestPTy->getPointeeType(); @@ -970,13 +971,13 @@ mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, auto buildDynamicCastAfterNullCheck = [&]() -> mlir::Value { if (isDynCastToVoid) - llvm_unreachable("NYI"); - else { - assert(destRecordTy->isRecordType() && - "destination type must be a record type!"); - return CGM.getCXXABI().buildDynamicCastCall( - *this, loc, ThisAddr, srcRecordTy, destTy, destRecordTy); - } + return CGM.getCXXABI().buildDynamicCastToVoid(*this, loc, ThisAddr, + srcRecordTy); + + assert(destRecordTy->isRecordType() && + "destination type must be a record type!"); + return CGM.getCXXABI().buildDynamicCastCall( + *this, loc, ThisAddr, srcRecordTy, destTy, destRecordTy); }; if (!shouldNullCheckSrcValue) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index c736615f6f65..2cc38010808c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1298,7 +1298,7 @@ class CIRGenFunction : public CIRGenTypeCache { const clang::CXXRecordDecl *VTableClass, VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs); /// Return the Value of the vtable pointer member pointed to by This. - mlir::Value getVTablePtr(SourceLocation Loc, Address This, + mlir::Value getVTablePtr(mlir::Location Loc, Address This, mlir::Type VTableTy, const CXXRecordDecl *VTableClass); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 0f1f43b31913..336a547d8033 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -293,6 +293,10 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { QualType DestTy, QualType DestRecordTy) override; + mlir::Value buildDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, + Address Value, + QualType SrcRecordTy) override; + /**************************** RTTI Uniqueness ******************************/ protected: /// Returns true if the ABI requires RTTI type_info objects to be unique @@ -844,7 +848,7 @@ CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer( auto TyPtr = CGF.getBuilder().getPointerTo(Ty); auto *MethodDecl = cast(GD.getDecl()); auto VTable = CGF.getVTablePtr( - Loc, This, CGF.getBuilder().getPointerTo(TyPtr), MethodDecl->getParent()); + loc, This, CGF.getBuilder().getPointerTo(TyPtr), MethodDecl->getParent()); uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); mlir::Value VFunc{}; @@ -2328,3 +2332,52 @@ mlir::Value CIRGenItaniumCXXABI::buildDynamicCastCall( mlir::Type destCIRTy = CGF.ConvertType(DestTy); return CGF.getBuilder().createBitcast(castedPtr, destCIRTy); } + +mlir::Value CIRGenItaniumCXXABI::buildDynamicCastToVoid(CIRGenFunction &CGF, + mlir::Location Loc, + Address Value, + QualType SrcRecordTy) { + auto *clsDecl = + cast(SrcRecordTy->castAs()->getDecl()); + + // TODO(cir): consider address space in this function. + assert(!UnimplementedFeature::addressSpace()); + + auto loadOffsetToTopFromVTable = + [&](mlir::Type vtableElemTy, CharUnits vtableElemAlign) -> mlir::Value { + mlir::Type vtablePtrTy = CGF.getBuilder().getPointerTo(vtableElemTy); + mlir::Value vtablePtr = CGF.getVTablePtr(Loc, Value, vtablePtrTy, clsDecl); + + // Get the address point in the vtable that contains offset-to-top. + mlir::Value offsetToTopSlotPtr = + CGF.getBuilder().create( + Loc, vtablePtrTy, mlir::FlatSymbolRefAttr{}, vtablePtr, + /*vtable_index=*/0, -2ULL); + return CGF.getBuilder().createAlignedLoad( + Loc, vtableElemTy, offsetToTopSlotPtr, vtableElemAlign); + }; + + // Calculate the offset from the given object to its containing complete + // object. + mlir::Value offsetToTop; + if (CGM.getItaniumVTableContext().isRelativeLayout()) { + offsetToTop = loadOffsetToTopFromVTable(CGF.getBuilder().getSInt32Ty(), + CharUnits::fromQuantity(4)); + } else { + offsetToTop = loadOffsetToTopFromVTable( + CGF.convertType(CGF.getContext().getPointerDiffType()), + CGF.getPointerAlign()); + } + + // Finally, add the offset to the given pointer. + // Cast the input pointer to a uint8_t* to allow pointer arithmetic. + auto u8PtrTy = CGF.getBuilder().getUInt8PtrTy(); + mlir::Value srcBytePtr = + CGF.getBuilder().createBitcast(Value.getPointer(), u8PtrTy); + // Do the pointer arithmetic. + mlir::Value dstBytePtr = CGF.getBuilder().create( + Loc, u8PtrTy, srcBytePtr, offsetToTop); + // Cast the result to a void*. + return CGF.getBuilder().createBitcast(dstBytePtr, + CGF.getBuilder().getVoidPtrTy()); +} diff --git a/clang/test/CIR/CodeGen/dynamic-cast.cpp b/clang/test/CIR/CodeGen/dynamic-cast.cpp index 13c06f266719..52e7a3cee3d0 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast.cpp @@ -50,3 +50,25 @@ Derived &ref_cast(Base &b) { // CHECK-NEXT: cir.call @__cxa_bad_cast() : () -> () // CHECK-NEXT: } // CHECK-NEXT: %{{.+}} = cir.cast(bitcast, %[[#V16]] : !cir.ptr), !cir.ptr + +void *ptr_cast_to_complete(Base *ptr) { + return dynamic_cast(ptr); +} + +// CHECK: cir.func @_Z20ptr_cast_to_completeP4Base +// CHECK: %[[#V19:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr +// CHECK-NEXT: %[[#V20:]] = cir.cast(ptr_to_bool, %[[#V19]] : !cir.ptr), !cir.bool +// CHECK-NEXT: %[[#V21:]] = cir.unary(not, %[[#V20]]) : !cir.bool, !cir.bool +// CHECK-NEXT: %{{.+}} = cir.ternary(%[[#V21]], true { +// CHECK-NEXT: %[[#V22:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK-NEXT: cir.yield %[[#V22]] : !cir.ptr +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[#V23:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr> +// CHECK-NEXT: %[[#V24:]] = cir.load %[[#V23]] : cir.ptr >, !cir.ptr +// CHECK-NEXT: %[[#V25:]] = cir.vtable.address_point( %[[#V24]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : cir.ptr +// CHECK-NEXT: %[[#V26:]] = cir.load %[[#V25]] : cir.ptr , !s64i +// CHECK-NEXT: %[[#V27:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr +// CHECK-NEXT: %[[#V28:]] = cir.ptr_stride(%[[#V27]] : !cir.ptr, %[[#V26]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[#V29:]] = cir.cast(bitcast, %[[#V28]] : !cir.ptr), !cir.ptr +// CHECK-NEXT: cir.yield %[[#V29]] : !cir.ptr +// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr From 188981833753964b84178cc5a2fd811b1d114884 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Sat, 3 Feb 2024 01:59:49 +0300 Subject: [PATCH 1388/2301] [CIR][CodeGen][Lowering] Supports arrays with trailing zeros (#393) This PR adds support for constant arrays with trailing zeros. The original `CodeGen` does the following: once a constant array contain trailing zeros, a struct with two members is generated: initialized elements and `zeroinitializer` for the remaining part. And depending on some conditions, `memset` or `memcpy` are emitted. In the latter case a global const array is created. Well, we may go this way, but it requires us to implement [features](https://github.com/llvm/clangir/blob/main/clang/lib/CIR/CodeGen/CIRGenDecl.cpp#L182) that are not implemented yet. Another option is to add one more parameter to the `constArrayAttr` and utilize it during the lowering. So far I chose this way, but if you have any doubts, we can discuss here. So we just emit constant array as usually and once there are trailing zeros, lower this arrray (i.e. an attribute) as a value. I added a couple of tests and will add more, once we agree on the approach. So far I marked the PR as a draft one. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 16 +++++++++-- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 10 ++++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 25 ++++++++++++++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 28 +++++++++++++++++-- clang/test/CIR/CodeGen/const-array.c | 10 +++++++ clang/test/CIR/Lowering/const.cir | 13 +++++++++ 6 files changed, 92 insertions(+), 10 deletions(-) create mode 100644 clang/test/CIR/CodeGen/const-array.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index b9fe93b98de6..4ad2b4fbbe9a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -116,14 +116,22 @@ def ConstArrayAttr : CIR_Attr<"ConstArray", "const_array", [TypedAttrInterface]> }]; let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "Attribute":$elts); + "Attribute":$elts, + "int":$trailingZerosNum); // Define a custom builder for the type; that removes the need to pass // in an MLIRContext instance, as it can be infered from the `type`. let builders = [ AttrBuilderWithInferredContext<(ins "mlir::cir::ArrayType":$type, "Attribute":$elts), [{ - return $_get(type.getContext(), type, elts); + int zeros = 0; + auto typeSize = type.cast().getSize(); + if (auto str = elts.dyn_cast()) + zeros = typeSize - str.size(); + else + zeros = typeSize - elts.cast().size(); + + return $_get(type.getContext(), type, elts, zeros); }]> ]; @@ -132,6 +140,10 @@ def ConstArrayAttr : CIR_Attr<"ConstArray", "const_array", [TypedAttrInterface]> // Enable verifier. let genVerifyDecl = 1; + + let extraClassDeclaration = [{ + bool hasTrailingZeros() const { return getTrailingZerosNum() != 0; }; + }]; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 2fb84275751c..0241c7b3d5a9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -959,10 +959,18 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, // Add a zeroinitializer array filler if we have lots of trailing zeroes. unsigned TrailingZeroes = ArrayBound - NonzeroLength; if (TrailingZeroes >= 8) { - assert(0 && "NYE"); assert(Elements.size() >= NonzeroLength && "missing initializer for non-zero element"); + SmallVector Eles; + Eles.reserve(Elements.size()); + for (auto const &Element : Elements) + Eles.push_back(Element); + + return builder.getConstArray( + mlir::ArrayAttr::get(builder.getContext(), Eles), + mlir::cir::ArrayType::get(builder.getContext(), CommonElementType, + ArrayBound)); // TODO(cir): If all the elements had the same type up to the trailing // zeroes, emit a struct of two arrays (the nonzero data and the // zeroinitializer). Use DesiredType to get the element type. diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a5ac89d45b9f..f69e15cc21db 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2295,7 +2295,7 @@ mlir::OpTrait::impl::verifySameFirstSecondOperandAndResultType(Operation *op) { LogicalResult mlir::cir::ConstArrayAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ::mlir::Type type, Attribute attr) { + ::mlir::Type type, Attribute attr, int trailingZerosNum) { if (!(attr.isa() || attr.isa())) return emitError() << "constant array expects ArrayAttr or StringAttr"; @@ -2318,7 +2318,7 @@ LogicalResult mlir::cir::ConstArrayAttr::verify( auto at = type.cast(); // Make sure both number of elements and subelement types match type. - if (at.getSize() != arrayAttr.size()) + if (at.getSize() != arrayAttr.size() + trailingZerosNum) return emitError() << "constant array size should match type size"; LogicalResult eltTypeCheck = success(); arrayAttr.walkImmediateSubElements( @@ -2383,16 +2383,33 @@ ::mlir::Attribute ConstArrayAttr::parse(::mlir::AsmParser &parser, } } + auto zeros = 0; + if (parser.parseOptionalComma().succeeded()) { + if (parser.parseOptionalKeyword("trailing_zeros").succeeded()) { + auto typeSize = resultTy.value().cast().getSize(); + auto elts = resultVal.value(); + if (auto str = elts.dyn_cast()) + zeros = typeSize - str.size(); + else + zeros = typeSize - elts.cast().size(); + } else { + return {}; + } + } + // Parse literal '>' if (parser.parseGreater()) return {}; - return parser.getChecked(loc, parser.getContext(), - resultTy.value(), resultVal.value()); + + return parser.getChecked( + loc, parser.getContext(), resultTy.value(), resultVal.value(), zeros); } void ConstArrayAttr::print(::mlir::AsmPrinter &printer) const { printer << "<"; printer.printStrippedAttrOrType(getElts()); + if (auto zeros = getTrailingZerosNum()) + printer << ", trailing_zeros"; printer << ">"; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ce5063285e0d..6d2aa9875a8d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -258,7 +258,15 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(constArr.getType()); auto loc = parentOp->getLoc(); - mlir::Value result = rewriter.create(loc, llvmTy); + mlir::Value result; + + if (auto zeros = constArr.getTrailingZerosNum()) { + auto arrayTy = constArr.getType(); + result = rewriter.create( + loc, converter->convertType(arrayTy)); + } else { + result = rewriter.create(loc, llvmTy); + } // Iteratively lower each constant element of the array. if (auto arrayAttr = constArr.getElts().dyn_cast()) { @@ -1069,6 +1077,15 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, return std::nullopt; } +bool hasTrailingZeros(mlir::cir::ConstArrayAttr attr) { + auto array = attr.getElts().dyn_cast(); + return attr.hasTrailingZeros() || + (array && std::count_if(array.begin(), array.end(), [](auto elt) { + auto ar = dyn_cast(elt); + return ar && hasTrailingZeros(ar); + })); +} + class CIRConstantLowering : public mlir::OpConversionPattern { public: @@ -1120,8 +1137,13 @@ class CIRConstantLowering return op.emitError() << "array does not have a constant initializer"; std::optional denseAttr; - if (constArr && - (denseAttr = lowerConstArrayAttr(constArr, typeConverter))) { + if (constArr && hasTrailingZeros(constArr)) { + auto newOp = + lowerCirAttrAsValue(op, constArr, rewriter, getTypeConverter()); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } else if (constArr && + (denseAttr = lowerConstArrayAttr(constArr, typeConverter))) { attr = denseAttr.value(); } else { auto initVal = diff --git a/clang/test/CIR/CodeGen/const-array.c b/clang/test/CIR/CodeGen/const-array.c new file mode 100644 index 000000000000..c75ba59b8f17 --- /dev/null +++ b/clang/test/CIR/CodeGen/const-array.c @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +void foo() { + int a[10] = {1}; +} + +// CHECK: cir.func {{.*@foo}} +// CHECK: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} +// CHECK: %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i], trailing_zeros> : !cir.array) : !cir.array +// CHECK: cir.store %1, %0 : !cir.array, cir.ptr > diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 7d0c63f8ccbb..46b4677d40b4 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -54,4 +54,17 @@ module { // CHECK: llvm.store %8, %1 : !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>>, !llvm.ptr // CHECK: llvm.return + cir.func @testArrWithTrailingZeros() { + %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} + %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i], trailing_zeros> : !cir.array) : !cir.array + cir.store %1, %0 : !cir.array, cir.ptr > + cir.return + } + // CHECK: llvm.func @testArrWithTrailingZeros() + // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 + // CHECK: %1 = llvm.alloca %0 x !llvm.array<10 x i32> {alignment = 16 : i64} : (i64) -> !llvm.ptr + // CHECK: %2 = cir.llvmir.zeroinit : !llvm.array<10 x i32> + // CHECK: %3 = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.array<10 x i32> + } From a0152d181a47b8cffd9838970fc9cfeed3522d6e Mon Sep 17 00:00:00 2001 From: Nikolas Klauser Date: Sat, 3 Feb 2024 00:48:49 +0100 Subject: [PATCH 1389/2301] [CIR][LibOpt] Extend std::find optimization to all calls with raw pointers (#400) This also adds a missing check whether the pointer returned from `memchr` is null and changes the result to `last` in that case. --- clang/lib/CIR/Dialect/Transforms/LibOpt.cpp | 105 ++++++++++++-------- clang/test/CIR/Transforms/lib-opt-find.cpp | 50 ++++++++-- 2 files changed, 110 insertions(+), 45 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp index 2422613a5315..762ee961bcba 100644 --- a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp @@ -120,29 +120,18 @@ static bool containerHasStaticSize(StructType t, unsigned &size) { } void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { - // First and second operands need to be iterators begin() and end(). - // TODO: look over cir.loads until we have a mem2reg + other passes - // to help out here. - auto iterBegin = dyn_cast(findOp.getOperand(0).getDefiningOp()); - if (!iterBegin) - return; - if (!isa(findOp.getOperand(1).getDefiningOp())) - return; - - // Both operands have the same type, use iterBegin. - - // Look at this pointer to retrieve container information. - auto thisPtr = - iterBegin.getOperand().getType().cast().getPointee(); - auto containerTy = dyn_cast(thisPtr); - if (!containerTy) - return; - - if (!isSequentialContainer(containerTy)) - return; - - unsigned staticSize = 0; - if (!containerHasStaticSize(containerTy, staticSize)) + // template + // requires (sizeof(T) == 1 && is_integral_v) + // T* find(T* first, T* last, T value) { + // if (auto result = __builtin_memchr(first, value, last - first)) + // return result; + // return last; + // } + + auto first = findOp.getOperand(0); + auto last = findOp.getOperand(1); + auto value = findOp->getOperand(2); + if (!first.getType().isa() || !last.getType().isa()) return; // Transformation: @@ -150,9 +139,9 @@ void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { // - Assert the Iterator is a pointer to primitive type. // - Check IterBeginOp is char sized. TODO: add other types that map to // char size. - auto iterResTy = iterBegin.getResult().getType().dyn_cast(); + auto iterResTy = findOp.getType().dyn_cast(); assert(iterResTy && "expected pointer type for iterator"); - auto underlyingDataTy = iterResTy.getPointee().dyn_cast(); + auto underlyingDataTy = iterResTy.getPointee().dyn_cast(); if (!underlyingDataTy || underlyingDataTy.getWidth() != 8) return; @@ -160,7 +149,7 @@ void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { // - Check it's a pointer type. // - Load the pattern from memory // - cast it to `int`. - auto patternAddrTy = findOp.getOperand(2).getType().dyn_cast(); + auto patternAddrTy = value.getType().dyn_cast(); if (!patternAddrTy || patternAddrTy.getPointee() != underlyingDataTy) return; @@ -169,27 +158,65 @@ void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(findOp.getOperation()); - auto memchrOp0 = builder.createBitcast( - iterBegin.getLoc(), iterBegin.getResult(), builder.getVoidPtrTy()); + auto memchrOp0 = + builder.createBitcast(first.getLoc(), first, builder.getVoidPtrTy()); // FIXME: get datalayout based "int" instead of fixed size 4. - auto loadPattern = builder.create( - findOp.getOperand(2).getLoc(), underlyingDataTy, findOp.getOperand(2)); + auto loadPattern = + builder.create(value.getLoc(), underlyingDataTy, value); auto memchrOp1 = builder.createIntCast( loadPattern, IntType::get(builder.getContext(), 32, true)); - // FIXME: get datalayout based "size_t" instead of fixed size 64. - auto uInt64Ty = IntType::get(builder.getContext(), 64, false); - auto memchrOp2 = builder.create( - findOp.getLoc(), uInt64Ty, mlir::cir::IntAttr::get(uInt64Ty, staticSize)); + const auto uInt64Ty = IntType::get(builder.getContext(), 64, false); // Build memchr op: // void *memchr(const void *s, int c, size_t n); - auto memChr = builder.create(findOp.getLoc(), memchrOp0, memchrOp1, - memchrOp2); - mlir::Operation *result = - builder.createBitcast(findOp.getLoc(), memChr.getResult(), iterResTy) - .getDefiningOp(); + auto memChr = [&] { + if (auto iterBegin = dyn_cast(first.getDefiningOp()); + iterBegin && isa(last.getDefiningOp())) { + // Both operands have the same type, use iterBegin. + + // Look at this pointer to retrieve container information. + auto thisPtr = + iterBegin.getOperand().getType().cast().getPointee(); + auto containerTy = dyn_cast(thisPtr); + + unsigned staticSize = 0; + if (containerTy && isSequentialContainer(containerTy) && + containerHasStaticSize(containerTy, staticSize)) { + return builder.create( + findOp.getLoc(), memchrOp0, memchrOp1, + builder.create( + findOp.getLoc(), uInt64Ty, + mlir::cir::IntAttr::get(uInt64Ty, staticSize))); + } + } + return builder.create( + findOp.getLoc(), memchrOp0, memchrOp1, + builder.create(findOp.getLoc(), uInt64Ty, last, first)); + }(); + + auto MemChrResult = + builder.createBitcast(findOp.getLoc(), memChr.getResult(), iterResTy); + + // if (result) + // return result; + // else + // return last; + auto NullPtr = builder.create( + findOp.getLoc(), first.getType(), ConstPtrAttr::get(first.getType(), 0)); + auto CmpResult = builder.create( + findOp.getLoc(), BoolType::get(builder.getContext()), CmpOpKind::eq, + NullPtr.getRes(), MemChrResult); + + auto result = builder.create( + findOp.getLoc(), CmpResult.getResult(), + [&](mlir::OpBuilder &ob, mlir::Location Loc) { + ob.create(Loc, last); + }, + [&](mlir::OpBuilder &ob, mlir::Location Loc) { + ob.create(Loc, MemChrResult); + }); findOp.replaceAllUsesWith(result); findOp.erase(); diff --git a/clang/test/CIR/Transforms/lib-opt-find.cpp b/clang/test/CIR/Transforms/lib-opt-find.cpp index a1a3f81d065d..4812e72d8037 100644 --- a/clang/test/CIR/Transforms/lib-opt-find.cpp +++ b/clang/test/CIR/Transforms/lib-opt-find.cpp @@ -3,16 +3,18 @@ #include "std-cxx.h" -int test_find(unsigned char n = 3) +int test1(unsigned char n = 3) { + // CHECK: test1 unsigned num_found = 0; // CHECK: %[[pattern_addr:.*]] = cir.alloca !u8i, cir.ptr , ["n" std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; auto f = std::find(v.begin(), v.end(), n); - // CHECK: %[[begin:.*]] = cir.call @_ZNSt5arrayIhLj9EE5beginEv - // CHECK: cir.call @_ZNSt5arrayIhLj9EE3endEv - // CHECK: %[[cast_to_void:.*]] = cir.cast(bitcast, %[[begin]] : !cir.ptr), !cir.ptr + + // CHECK: %[[first:.*]] = cir.call @_ZNSt5arrayIhLj9EE5beginEv + // CHECK: %[[last:.*]] = cir.call @_ZNSt5arrayIhLj9EE3endEv + // CHECK: %[[cast_to_void:.*]] = cir.cast(bitcast, %[[first]] : !cir.ptr), !cir.ptr // CHECK: %[[load_pattern:.*]] = cir.load %[[pattern_addr]] : cir.ptr , !u8i // CHECK: %[[pattern:.*]] = cir.cast(integral, %[[load_pattern:.*]] : !u8i), !s32i @@ -20,9 +22,45 @@ int test_find(unsigned char n = 3) // CHECK: %[[array_size:.*]] = cir.const(#cir.int<9> : !u64i) : !u64i // CHECK: %[[result_cast:.*]] = cir.libc.memchr(%[[cast_to_void]], %[[pattern]], %[[array_size]]) - // CHECK: cir.cast(bitcast, %[[result_cast]] : !cir.ptr), !cir.ptr + // CHECK: %[[memchr_res:.*]] = cir.cast(bitcast, %[[result_cast]] : !cir.ptr), !cir.ptr + // CHECK: %[[nullptr:.*]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr + // CHECK: %[[cmp_res:.*]] = cir.cmp(eq, %[[nullptr]], %[[memchr_res]]) : !cir.ptr, !cir.bool + // CHECK: cir.ternary(%[[cmp_res]], true { + // CHECK: cir.yield %[[last]] : !cir.ptr + // CHECK: }, false { + // CHECK: cir.yield %[[memchr_res]] : !cir.ptr + // CHECK: }) : (!cir.bool) -> !cir.ptr + if (f != v.end()) num_found++; return num_found; -} \ No newline at end of file +} + +unsigned char* test2(unsigned char* first, unsigned char* last, unsigned char v) +{ + return std::find(first, last, v); + // CHECK: test2 + + // CHECK: %[[first_storage:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["first", init] + // CHECK: %[[last_storage:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["last", init] + // CHECK: %[[pattern_storage:.*]] = cir.alloca !u8i, cir.ptr , ["v", init] + // CHECK: %[[first:.*]] = cir.load %[[first_storage]] + // CHECK: %[[last:.*]] = cir.load %[[last_storage]] + // CHECK: %[[cast_to_void:.*]] = cir.cast(bitcast, %[[first]] : !cir.ptr), !cir.ptr + // CHECK: %[[load_pattern:.*]] = cir.load %[[pattern_storage]] : cir.ptr , !u8i + // CHECK: %[[pattern:.*]] = cir.cast(integral, %[[load_pattern:.*]] : !u8i), !s32i + + // CHECK-NOT: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( + // CHECK: %[[array_size:.*]] = cir.ptr_diff(%[[last]], %[[first]]) : !cir.ptr -> !u64i + + // CHECK: %[[result_cast:.*]] = cir.libc.memchr(%[[cast_to_void]], %[[pattern]], %[[array_size]]) + // CHECK: %[[memchr_res:.*]] = cir.cast(bitcast, %[[result_cast]] : !cir.ptr), !cir.ptr + // CHECK: %[[nullptr:.*]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr + // CHECK: %[[cmp_res:.*]] = cir.cmp(eq, %[[nullptr]], %[[memchr_res]]) : !cir.ptr, !cir.bool + // CHECK: cir.ternary(%[[cmp_res]], true { + // CHECK: cir.yield %[[last]] : !cir.ptr + // CHECK: }, false { + // CHECK: cir.yield %[[memchr_res]] : !cir.ptr + // CHECK: }) : (!cir.bool) -> !cir.ptr +} From 0cc504c33143c9c14c2ff2dcd98eeb873dcd62f9 Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Fri, 2 Feb 2024 16:10:41 -0800 Subject: [PATCH 1390/2301] [CIR][CIRGen] Implement "if consteval" code generation (#446) Emit the false-branch of the consteval if statement, if any. --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 11 +++++++-- clang/test/CIR/CodeGen/if-consteval.cpp | 33 +++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/if-consteval.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index ed702465924a..35270246f071 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -371,16 +371,23 @@ static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r, } mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { + mlir::LogicalResult res = mlir::success(); // The else branch of a consteval if statement is always the only branch // that can be runtime evaluated. + const Stmt *ConstevalExecuted; if (S.isConsteval()) { - llvm_unreachable("consteval nyi"); + ConstevalExecuted = S.isNegatedConsteval() ? S.getThen() : S.getElse(); + if (!ConstevalExecuted) + // No runtime code execution required + return res; } - mlir::LogicalResult res = mlir::success(); // C99 6.8.4.1: The first substatement is executed if the expression // compares unequal to 0. The condition must be a scalar type. auto ifStmtBuilder = [&]() -> mlir::LogicalResult { + if (S.isConsteval()) + return buildStmt(ConstevalExecuted, /*useCurrentScope=*/true); + if (S.getInit()) if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); diff --git a/clang/test/CIR/CodeGen/if-consteval.cpp b/clang/test/CIR/CodeGen/if-consteval.cpp new file mode 100644 index 000000000000..97468beb0ac5 --- /dev/null +++ b/clang/test/CIR/CodeGen/if-consteval.cpp @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -std=c++23 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void should_be_used_1(); +void should_be_used_2(); +void should_be_used_3(); +constexpr void should_not_be_used() {} + +constexpr void f() { + if consteval { + should_not_be_used(); // CHECK-NOT: call {{.*}}should_not_be_used + } else { + should_be_used_1(); // CHECK: call {{.*}}should_be_used_1 + } + + if !consteval { + should_be_used_2(); // CHECK: call {{.*}}should_be_used_2 + } else { + should_not_be_used(); // CHECK-NOT: call {{.*}}should_not_be_used + } + + if consteval { + should_not_be_used(); // CHECK-NOT: call {{.*}}should_not_be_used + } + + if !consteval { + should_be_used_3(); // CHECK: call {{.*}}should_be_used_3 + } +} + +void g() { + f(); +} From 9ac342c4b8a3f77bcdee32d6ea70d8411d3ef0fa Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Mon, 5 Feb 2024 07:14:59 +0800 Subject: [PATCH 1391/2301] [CIR] Allow mlir::UnknownLoc in function op (#448) Originally, the location associated with a function is checked to be an `mlir::FileLineColLoc` before the function is lowered to an LLVMIR FuncOp. However, runtime function declarations do not have such locations. This patch further allows `mlir::UnknownLoc` to be associated with a function. --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6d2aa9875a8d..ff8d31a19d3d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1397,7 +1397,8 @@ class CIRFuncLowering : public mlir::OpConversionPattern { auto FusedLoc = Loc.cast(); Loc = FusedLoc.getLocations()[0]; } - assert(Loc.isa() && "expected single location here"); + assert((Loc.isa() || Loc.isa()) && + "expected single location or unknown location here"); auto linkage = convertLinkage(op.getLinkage()); SmallVector attributes; From 8180c1d0a1a0e938458c2e9735fba5844eb6c282 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Mon, 5 Feb 2024 23:20:09 +0300 Subject: [PATCH 1392/2301] [CIR][CodeGen] Const structs with bitfields (#412) This PR adds a support for const structs with bitfields. Now only global structs are supported, the support of the local ones can be added more or less easily - there is one ugly thing need to be done though) So .. what is all about. First of all - as usually, I'm sorry for the big PR. But it's hard to break it down to peaces. The good news is that in the same time it's a copy-pasta from the original codegen, no surprises here. Basically, the most hard place to read is `ConstantAggregateBuilder::addBits` copied with minimum of changes. The main problem - and frankly speaking I have no idea why it's done this way in the original codegen - is that the data layout is different for such structures, I mean literally another type is used. For instance, the code: ``` struct T { int X : 15; int Y : 6; unsigned Z : 9; int W; }; struct T GV = { 1, 5, 256, -1}; ``` is represented in LLVM IR (with no CIR enabled) as: ``` %struct.T = type { i32, i32 } %struct.Inner = type { i8, i32 } @GV = dso_local global { i8, i8, i8, i8, i32 } ... ``` i.e. the global var `GV` is looks like a struct of single bytes (up to the last field, which is not a btfield). And my guess is that we want to have the same behavior in CIR. So we do. The main problem is that we have to treat the same data differently - and this is why one additional `bitcast` is needed when we create a global var. Actually, there was a comment there - and I really wonder where it came from. But anyways, I don't really like this and don't see any good workaround here. Well, maybe we may add a kind of map in order to store the correspondence between types and do a bitcast more wisely. The same is true for the const structs with bitfields defined locally. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 1 + clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 2 + clang/lib/CIR/CodeGen/CIRDataLayout.h | 2 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 16 ++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 196 ++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 7 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 10 + clang/test/CIR/CodeGen/const-bitfields.c | 47 +++++ 9 files changed, 271 insertions(+), 20 deletions(-) create mode 100644 clang/test/CIR/CodeGen/const-bitfields.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 4ad2b4fbbe9a..9fc8ce9efad0 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -209,6 +209,7 @@ def IntAttr : CIR_Attr<"Int", "int", [TypedAttrInterface]> { int64_t getSInt() const { return getValue().getSExtValue(); } uint64_t getUInt() const { return getValue().getZExtValue(); } bool isNullValue() const { return getValue() == 0; } + uint64_t getBitWidth() const { return getType().cast().getWidth(); } }]; let genVerifyDecl = 1; let hasCustomAssemblyFormat = 1; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 40a3486e12ee..46835e548a69 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -169,6 +169,8 @@ class StructType uint64_t getPreferredAlignment(const DataLayout &dataLayout, DataLayoutEntryListRef params) const; + bool isLayoutIdentical(const StructType &other); + // Utilities for lazily computing and cacheing data layout info. private: mutable Type largestMember{}; diff --git a/clang/lib/CIR/CodeGen/CIRDataLayout.h b/clang/lib/CIR/CodeGen/CIRDataLayout.h index b1b10ba6b6da..bc4c7762d5bc 100644 --- a/clang/lib/CIR/CodeGen/CIRDataLayout.h +++ b/clang/lib/CIR/CodeGen/CIRDataLayout.h @@ -26,7 +26,7 @@ class CIRDataLayout { mlir::DataLayout layout; CIRDataLayout(mlir::ModuleOp modOp); - bool isBigEndian() { return bigEndian; } + bool isBigEndian() const { return bigEndian; } // `useABI` is `true` if not using prefered alignment. unsigned getAlignment(mlir::Type ty, bool useABI) const { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 3da914fcb150..e866fe50638d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -470,6 +470,22 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return type; } + mlir::cir::StructType + getCompleteStructType(mlir::ArrayAttr fields, bool packed = false, + llvm::StringRef name = "", + const clang::RecordDecl *ast = nullptr) { + llvm::SmallVector members; + for (auto &attr : fields) { + const auto typedAttr = attr.dyn_cast(); + members.push_back(typedAttr.getType()); + } + + if (name.empty()) + return getAnonStructTy(members, packed, ast); + else + return getCompleteStructTy(members, name, packed, ast); + } + mlir::cir::ArrayType getArrayType(mlir::Type eltType, unsigned size) { return mlir::cir::ArrayType::get(getContext(), eltType, size); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index d0a02508cbc1..d8327706b457 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -700,9 +700,15 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, llvm_unreachable("not implemented"); auto V = CGF.CGM.getAddrOfGlobalVar(VD); + + if (VD->getTLSKind() != VarDecl::TLS_None) + llvm_unreachable("NYI"); + auto RealVarTy = CGF.getTypes().convertTypeForMem(VD->getType()); - // TODO(cir): do we need this for CIR? - // V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); + auto realPtrTy = CGF.getBuilder().getPointerTo(RealVarTy); + if (realPtrTy != V.getType()) + V = CGF.getBuilder().createBitcast(V.getLoc(), V, realPtrTy); + CharUnits Alignment = CGF.getContext().getDeclAlign(VD); Address Addr(V, RealVarTy, Alignment); // Emit reference to the private copy of the variable if it is an OpenMP diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 0241c7b3d5a9..2d87d679f185 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -67,8 +67,13 @@ struct ConstantAggregateBuilderUtils { return getSize(C.getType()); } - mlir::Attribute getPadding(CharUnits PadSize) const { - llvm_unreachable("NYI"); + mlir::TypedAttr getPadding(CharUnits size) const { + auto eltTy = CGM.UCharTy; + auto arSize = size.getQuantity(); + auto &bld = CGM.getBuilder(); + SmallVector elts(arSize, bld.getZeroAttr(eltTy)); + return bld.getConstArray(mlir::ArrayAttr::get(bld.getContext(), elts), + bld.getArrayType(eltTy, arSize)); } mlir::Attribute getZeroes(CharUnits ZeroSize) const { @@ -186,7 +191,111 @@ bool ConstantAggregateBuilder::add(mlir::Attribute A, CharUnits Offset, bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits, bool AllowOverwrite) { - llvm_unreachable("NYI"); + const ASTContext &Context = CGM.getASTContext(); + const uint64_t CharWidth = CGM.getASTContext().getCharWidth(); + auto charTy = CGM.getBuilder().getUIntNTy(CharWidth); + // Offset of where we want the first bit to go within the bits of the + // current char. + unsigned OffsetWithinChar = OffsetInBits % CharWidth; + + // We split bit-fields up into individual bytes. Walk over the bytes and + // update them. + for (CharUnits OffsetInChars = + Context.toCharUnitsFromBits(OffsetInBits - OffsetWithinChar); + /**/; ++OffsetInChars) { + // Number of bits we want to fill in this char. + unsigned WantedBits = + std::min((uint64_t)Bits.getBitWidth(), CharWidth - OffsetWithinChar); + + // Get a char containing the bits we want in the right places. The other + // bits have unspecified values. + llvm::APInt BitsThisChar = Bits; + if (BitsThisChar.getBitWidth() < CharWidth) + BitsThisChar = BitsThisChar.zext(CharWidth); + if (CGM.getDataLayout().isBigEndian()) { + // Figure out how much to shift by. We may need to left-shift if we have + // less than one byte of Bits left. + int Shift = Bits.getBitWidth() - CharWidth + OffsetWithinChar; + if (Shift > 0) + BitsThisChar.lshrInPlace(Shift); + else if (Shift < 0) + BitsThisChar = BitsThisChar.shl(-Shift); + } else { + BitsThisChar = BitsThisChar.shl(OffsetWithinChar); + } + if (BitsThisChar.getBitWidth() > CharWidth) + BitsThisChar = BitsThisChar.trunc(CharWidth); + + if (WantedBits == CharWidth) { + // Got a full byte: just add it directly. + add(mlir::cir::IntAttr::get(charTy, BitsThisChar), OffsetInChars, + AllowOverwrite); + } else { + // Partial byte: update the existing integer if there is one. If we + // can't split out a 1-CharUnit range to update, then we can't add + // these bits and fail the entire constant emission. + std::optional FirstElemToUpdate = splitAt(OffsetInChars); + if (!FirstElemToUpdate) + return false; + std::optional LastElemToUpdate = + splitAt(OffsetInChars + CharUnits::One()); + if (!LastElemToUpdate) + return false; + assert(*LastElemToUpdate - *FirstElemToUpdate < 2 && + "should have at most one element covering one byte"); + + // Figure out which bits we want and discard the rest. + llvm::APInt UpdateMask(CharWidth, 0); + if (CGM.getDataLayout().isBigEndian()) + UpdateMask.setBits(CharWidth - OffsetWithinChar - WantedBits, + CharWidth - OffsetWithinChar); + else + UpdateMask.setBits(OffsetWithinChar, OffsetWithinChar + WantedBits); + BitsThisChar &= UpdateMask; + bool isNull = false; + if (*FirstElemToUpdate < Elems.size()) { + auto firstEltToUpdate = + dyn_cast(Elems[*FirstElemToUpdate]); + isNull = firstEltToUpdate && firstEltToUpdate.isNullValue(); + } + + if (*FirstElemToUpdate == *LastElemToUpdate || isNull) { + // All existing bits are either zero or undef. + add(CGM.getBuilder().getAttr(charTy, BitsThisChar), + OffsetInChars, /*AllowOverwrite*/ true); + } else { + mlir::cir::IntAttr CI = + dyn_cast(Elems[*FirstElemToUpdate]); + // In order to perform a partial update, we need the existing bitwise + // value, which we can only extract for a constant int. + // auto *CI = dyn_cast(ToUpdate); + if (!CI) + return false; + // Because this is a 1-CharUnit range, the constant occupying it must + // be exactly one CharUnit wide. + assert(CI.getBitWidth() == CharWidth && "splitAt failed"); + assert((!(CI.getValue() & UpdateMask) || AllowOverwrite) && + "unexpectedly overwriting bitfield"); + BitsThisChar |= (CI.getValue() & ~UpdateMask); + Elems[*FirstElemToUpdate] = + CGM.getBuilder().getAttr(charTy, BitsThisChar); + } + } + + // Stop if we've added all the bits. + if (WantedBits == Bits.getBitWidth()) + break; + + // Remove the consumed bits from Bits. + if (!CGM.getDataLayout().isBigEndian()) + Bits.lshrInPlace(WantedBits); + Bits = Bits.trunc(Bits.getBitWidth() - WantedBits); + + // The remanining bits go at the start of the following bytes. + OffsetWithinChar = 0; + } + + return true; } /// Returns a position within Elems and Offsets such that all elements @@ -236,6 +345,7 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( if (Elems.empty()) return {}; + auto Offset = [&](size_t I) { return Offsets[I] - StartOffset; }; // If we want an array type, see if all the elements are the same type and // appropriately spaced. @@ -276,14 +386,44 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( // as a non-packed struct and do so opportunistically if possible. llvm::SmallVector PackedElems; if (!NaturalLayout) { - llvm_unreachable("NYI"); + CharUnits SizeSoFar = CharUnits::Zero(); + for (size_t I = 0; I != Elems.size(); ++I) { + mlir::TypedAttr C = Elems[I].dyn_cast(); + assert(C && "expected typed attribute"); + + CharUnits Align = Utils.getAlignment(C); + CharUnits NaturalOffset = SizeSoFar.alignTo(Align); + CharUnits DesiredOffset = Offset(I); + assert(DesiredOffset >= SizeSoFar && "elements out of order"); + + if (DesiredOffset != NaturalOffset) + Packed = true; + if (DesiredOffset != SizeSoFar) + PackedElems.push_back(Utils.getPadding(DesiredOffset - SizeSoFar)); + PackedElems.push_back(Elems[I]); + SizeSoFar = DesiredOffset + Utils.getSize(C); + } + // If we're using the packed layout, pad it out to the desired size if + // necessary. + if (Packed) { + assert(SizeSoFar <= DesiredSize && + "requested size is too small for contents"); + + if (SizeSoFar < DesiredSize) + PackedElems.push_back(Utils.getPadding(DesiredSize - SizeSoFar)); + } } - // TODO(cir): emit a #cir.zero if all elements are null values. auto &builder = CGM.getBuilder(); auto arrAttr = mlir::ArrayAttr::get(builder.getContext(), Packed ? PackedElems : UnpackedElems); - return builder.getConstStructOrZeroAttr(arrAttr, Packed, DesiredTy); + auto strType = builder.getCompleteStructType(arrAttr, Packed); + + if (auto desired = dyn_cast(DesiredTy)) + if (desired.isLayoutIdentical(strType)) + strType = desired; + + return builder.getConstStructOrZeroAttr(arrAttr, Packed, strType); } void ConstantAggregateBuilder::condense(CharUnits Offset, @@ -353,7 +493,7 @@ class ConstStructBuilder { bool AllowOverwrite = false); bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, - mlir::IntegerAttr InitExpr, bool AllowOverwrite = false); + mlir::cir::IntAttr InitExpr, bool AllowOverwrite = false); bool Build(InitListExpr *ILE, bool AllowOverwrite); bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase, @@ -380,9 +520,26 @@ bool ConstStructBuilder::AppendBytes(CharUnits FieldOffsetInChars, bool ConstStructBuilder::AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, - mlir::IntegerAttr CI, + mlir::cir::IntAttr CI, bool AllowOverwrite) { - llvm_unreachable("NYI"); + const auto &RL = CGM.getTypes().getCIRGenRecordLayout(Field->getParent()); + const auto &Info = RL.getBitFieldInfo(Field); + llvm::APInt FieldValue = CI.getValue(); + + // Promote the size of FieldValue if necessary + // FIXME: This should never occur, but currently it can because initializer + // constants are cast to bool, and because clang is not enforcing bitfield + // width limits. + if (Info.Size > FieldValue.getBitWidth()) + FieldValue = FieldValue.zext(Info.Size); + + // Truncate the size of FieldValue to the bit field size. + if (Info.Size < FieldValue.getBitWidth()) + FieldValue = FieldValue.trunc(Info.Size); + + return Builder.addBits(FieldValue, + CGM.getASTContext().toBits(StartOffset) + FieldOffset, + AllowOverwrite); } static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter, @@ -513,7 +670,16 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) { if (Field->hasAttr()) AllowOverwrite = true; } else { - llvm_unreachable("NYI"); + // Otherwise we have a bitfield. + if (auto constInt = dyn_cast(EltInit)) { + if (!AppendBitField(Field, Layout.getFieldOffset(FieldNo), constInt, + AllowOverwrite)) + return false; + } else { + // We are trying to initialize a bitfield with a non-trivial constant, + // this must require run-time code. + return false; + } } } @@ -994,9 +1160,13 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, ArrayBound)); } - // We have mixed types. Use a packed struct. - assert(0 && "NYE"); - return {}; + SmallVector Eles; + Eles.reserve(Elements.size()); + for (auto const &Element : Elements) + Eles.push_back(Element); + + auto arrAttr = mlir::ArrayAttr::get(builder.getContext(), Eles); + return builder.getAnonConstStruct(arrAttr, false); } } // end anonymous namespace. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 25b76b95a5ad..91cc63489033 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -654,11 +654,10 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // TODO(cir): LLVM codegen makes sure the result is of the correct type // by issuing a address space cast. - // TODO(cir): - // (In LLVM codgen, if global is requested for a definition, we always need - // to create a new global, otherwise return a bitcast.) + // (If global is requested for a definition, we always need to create a new + // global, not just return a bitcast.) if (!IsForDefinition) - assert(0 && "not implemented"); + return Entry; } // TODO(cir): auto DAddrSpace = GetGlobalVarAddressSpace(D); diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index e5268066fdbf..77a4a58e65a5 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -349,6 +349,16 @@ void StructType::complete(ArrayRef members, bool packed, llvm_unreachable("failed to complete struct"); } +bool StructType::isLayoutIdentical(const StructType &other) { + if (getImpl() == other.getImpl()) + return true; + + if (getPacked() != other.getPacked()) + return false; + + return getMembers() == other.getMembers(); +} + //===----------------------------------------------------------------------===// // Data Layout information for types //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/const-bitfields.c b/clang/test/CIR/CodeGen/const-bitfields.c new file mode 100644 index 000000000000..63ee4a25a671 --- /dev/null +++ b/clang/test/CIR/CodeGen/const-bitfields.c @@ -0,0 +1,47 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o - 2>&1 | FileCheck %s + +struct T { + int X : 5; + int Y : 6; + int Z : 9; + int W; +}; + +struct Inner { + unsigned a : 1; + unsigned b : 1; + unsigned c : 1; + unsigned d : 30; +}; + +// CHECK: !ty_22T22 = !cir.struct +// CHECK: !ty_anon_struct = !cir.struct +// CHECK: #bfi_Z = #cir.bitfield_info +// CHECK: !ty_anon_struct1 = !cir.struct, !u8i, !u8i, !u8i, !u8i}> + +struct T GV = { 1, 5, 256, 42 }; +// CHECK: cir.global external @GV = #cir.const_struct<{#cir.int<161> : !u8i, #cir.int<0> : !u8i, #cir.int<8> : !u8i, #cir.int<42> : !s32i}> : !ty_anon_struct + +// check padding is used (const array of zeros) +struct Inner var = { 1, 0, 1, 21}; +// CHECK: cir.global external @var = #cir.const_struct<{#cir.int<5> : !u8i, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array, #cir.int<21> : !u8i, #cir.int<0> : !u8i, #cir.int<0> : !u8i, #cir.int<0> : !u8i}> : !ty_anon_struct1 + + +// CHECK: cir.func {{.*@getZ()}} +// CHECK: %1 = cir.get_global @GV : cir.ptr +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: %3 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr +// CHECK: %4 = cir.get_bitfield(#bfi_Z, %3 : !cir.ptr) -> !s32i +int getZ() { + return GV.Z; +} + +// check the type used is the type of T struct for plain field +// CHECK: cir.func {{.*@getW()}} +// CHECK: %1 = cir.get_global @GV : cir.ptr +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: %3 = cir.get_member %2[1] {name = "W"} : !cir.ptr -> !cir.ptr +int getW() { + return GV.W; +} + From 79d396d0bcb3fa1911d960deee4776c27744600a Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Mon, 5 Feb 2024 23:20:57 +0300 Subject: [PATCH 1393/2301] [CIR][Lowering][Bugfix] Fix lowering of bool_to_int cast (#450) The minimal bug repro: ``` #include #include void bar() { bool x = true; uint8_t y = (uint8_t)x; } ``` Fails on verification stage: ``` loc("repro.c":5:24): error: integer width of the output type is smaller or equal to the integer width of the input type fatal error: error in backend: The pass manager failed to lower CIR to LLVMIR dialect! ``` The problem is that in some cases lowering from CIR emits the invalid zext operation. PR fixes this issue by emitting the llvm.bitcast instead of llvm.zext in such cases. --- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 12 +++++++++--- clang/test/CIR/Lowering/cast.cir | 15 +++++++++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ff8d31a19d3d..e6ef8438f0de 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -664,9 +664,15 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { case mlir::cir::CastKind::bool_to_int: { auto dstTy = castOp.getType().cast(); auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = getTypeConverter()->convertType(dstTy); - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, - llvmSrcVal); + auto llvmSrcTy = llvmSrcVal.getType().cast(); + auto llvmDstTy = + getTypeConverter()->convertType(dstTy).cast(); + if (llvmSrcTy.getWidth() == llvmDstTy.getWidth()) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); return mlir::success(); } case mlir::cir::CastKind::bool_to_float: { diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 70420c63d8f8..16e8ab968fc5 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -81,4 +81,19 @@ module { %19 = cir.load %2 : cir.ptr , !s32i cir.return %19 : !s32i } + + cir.func @testBoolToIntCast(%arg0: !cir.bool) { + // CHECK: llvm.func @testBoolToIntCast + %0 = cir.alloca !cir.bool, cir.ptr , ["bl", init] {alignment = 1 : i64} + %1 = cir.alloca !u8i, cir.ptr , ["y", init] {alignment = 1 : i64} + cir.store %arg0, %0 : !cir.bool, cir.ptr + + %2 = cir.load %0 : cir.ptr , !cir.bool + %3 = cir.cast(bool_to_int, %2 : !cir.bool), !u8i + // CHECK: %[[LOAD_BOOL:.*]] = llvm.load %{{.*}} : !llvm.ptr -> i8 + // CHECK: %{{.*}} = llvm.bitcast %[[LOAD_BOOL]] : i8 to i8 + + cir.store %3, %1 : !u8i, cir.ptr + cir.return + } } From d2d10efcb41613da0164639b10c2dd5b9ed55eb3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 5 Feb 2024 16:51:52 -0800 Subject: [PATCH 1394/2301] [CIR][CIRGen][Exceptions] Connect the unwind region to the rest of CatchOp --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 ++++++ clang/lib/CIR/CodeGen/CIRGenException.cpp | 5 +++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 +++- clang/test/CIR/CodeGen/try-catch.cpp | 4 +++- 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index ea4013cb5bbd..08273f8061a5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2351,6 +2351,12 @@ def TryOp : CIR_Op<"try", // CatchOp //===----------------------------------------------------------------------===// +// Represents the unwind region where unwind continues or +// the program std::terminate's. +def CatchUnwind : CIRUnitAttr<"CatchUnwind", "unwind"> { + let storageType = [{ CatchUnwind }]; +} + def CatchOp : CIR_Op<"catch", [SameVariadicOperandSize, DeclareOpInterfaceMethods, diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index ffd16a0ca474..6035099c722a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -697,6 +697,11 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { assert((clauses.size() > 0 || hasCleanup) && "CatchOp has no clauses!"); + // Attach the unwind region. This needs to be the last region in the + // CatchOp operation. + auto catchUnwind = mlir::cir::CatchUnwindAttr::get(builder.getContext()); + clauses.push_back(catchUnwind); + // Add final array of clauses into catchOp. catchOp.setCatchersAttr( mlir::ArrayAttr::get(builder.getContext(), clauses)); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index f69e15cc21db..7cceadd8044d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1198,7 +1198,9 @@ void printCatchOp(OpAsmPrinter &p, CatchOp op, p.increaseIndent(); auto exRtti = a; - if (!exRtti) { + if (a.isa()) { + p.printAttribute(a); + } else if (!exRtti) { p << "all"; } else { p << "type ("; diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 4c4521820f7b..8aafd04947bc 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -37,7 +37,9 @@ unsigned long long tc() { // CHECK: cir.store %[[msg_addr]], %[[msg]] : !cir.ptr, cir.ptr > loc(#loc37) z = 99; (void)msg[0]; - } + } // CHECK: #cir.unwind + // CHECK: cir.resume loc(#loc1) + // CHECK-NEXT: }]) return z; } \ No newline at end of file From 9c38dee77d362167ad76c1eb0a4cb4bbb03ec7ac Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 5 Feb 2024 21:46:06 -0800 Subject: [PATCH 1395/2301] [CIR][CIRGen][Exceptions] Add support for catch_all --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 ++ clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp | 4 ++ clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 2 + clang/lib/CIR/CodeGen/CIRGenException.cpp | 63 +++++++++++++------ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 3 +- clang/test/CIR/CodeGen/try-catch.cpp | 33 ++++++++-- 6 files changed, 85 insertions(+), 25 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 08273f8061a5..147e5c1cdd72 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2357,6 +2357,11 @@ def CatchUnwind : CIRUnitAttr<"CatchUnwind", "unwind"> { let storageType = [{ CatchUnwind }]; } +// Represents the catch_all region. +def CatchAllAttr : CIRUnitAttr<"CatchAll", "all"> { + let storageType = [{ CatchAllAttr }]; +} + def CatchOp : CIR_Op<"catch", [SameVariadicOperandSize, DeclareOpInterfaceMethods, diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp index 0b8500eb12b4..b17206772c3f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp @@ -39,6 +39,10 @@ CIRGenCXXABI::AddedStructorArgCounts CIRGenCXXABI::addImplicitConstructorArgs( AddedArgs.Suffix.size()); } +CatchTypeInfo CIRGenCXXABI::getCatchAllTypeInfo() { + return CatchTypeInfo{nullptr, 0}; +} + bool CIRGenCXXABI::NeedsVTTParameter(GlobalDecl GD) { return false; } void CIRGenCXXABI::buildThisParam(CIRGenFunction &CGF, diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index b19cdb111ac3..3a74daa1225e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -135,6 +135,8 @@ class CIRGenCXXABI { /// Loads the incoming C++ this pointer as it was passed by the caller. mlir::Value loadIncomingCXXThis(CIRGenFunction &CGF); + virtual CatchTypeInfo getCatchAllTypeInfo(); + /// Determine whether there's something special about the rules of the ABI /// tell us that 'this' is a complete object within the given function. /// Obvious common logic like being defined on a final class will have been diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 6035099c722a..edf438db76ab 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -311,6 +311,16 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { (CGM.getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN())); assert(!IsTargetDevice && "NYI"); + auto hasCatchAll = [&]() { + unsigned NumHandlers = S.getNumHandlers(); + for (unsigned I = NumHandlers - 1; I > 0; --I) { + auto *C = S.getHandler(I)->getExceptionDecl(); + if (!C) + return true; + } + return false; + }; + auto numHandlers = S.getNumHandlers(); auto tryLoc = getLoc(S.getBeginLoc()); auto scopeLoc = getLoc(S.getSourceRange()); @@ -321,10 +331,10 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { getBuilder().getType<::mlir::cir::ExceptionInfoType>()); mlir::Value exceptionInfoInsideTry; - // Create the scope to represent only the C/C++ `try {}` part. However, don't - // populate right away. Reserve some space to store the exception info but - // don't emit the bulk right away, for now only make sure the scope returns - // the exception information. + // Create the scope to represent only the C/C++ `try {}` part. However, + // don't populate right away. Reserve some space to store the exception + // info but don't emit the bulk right away, for now only make sure the + // scope returns the exception information. auto tryScope = builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) { @@ -342,7 +352,8 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { // The catch {} parts consume the exception information provided by a // try scope. Also don't emit the code right away for catch clauses, for // now create the regions and consume the try scope result. - // Note that clauses are later populated in CIRGenFunction::buildLandingPad. + // Note that clauses are later populated in + // CIRGenFunction::buildLandingPad. auto catchOp = builder.create( tryLoc, tryScope->getResult( @@ -350,9 +361,11 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &result) { mlir::OpBuilder::InsertionGuard guard(b); - // Once for each handler and one for fallback (which could be a - // resume or rethrow). - for (int i = 0, e = numHandlers + 1; i != e; ++i) { + auto numRegionsToCreate = numHandlers; + if (!hasCatchAll()) + numRegionsToCreate++; + // Once for each handler + (catch_all or unwind). + for (int i = 0, e = numRegionsToCreate; i != e; ++i) { auto *r = result.addRegion(); builder.createBlock(r); } @@ -368,7 +381,8 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { { ExceptionInfoRAIIObject ehx{*this, {exceptionInfoInsideTry, catchOp}}; - // Attach the basic blocks for the catchOp regions into ScopeCatch info. + // Attach the basic blocks for the catchOp regions into ScopeCatch + // info. enterCXXTryStmt(S, catchOp); // Emit the body for the `try {}` part. if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) @@ -412,10 +426,10 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, return; } - // In traditional LLVM codegen, the right handler is selected (with calls to - // eh_typeid_for) and the selector value is loaded. After that, blocks get - // connected for later codegen. In CIR, these are all implicit behaviors of - // cir.catch - not a lot of work to do. + // In traditional LLVM codegen, the right handler is selected (with + // calls to eh_typeid_for) and the selector value is loaded. After that, + // blocks get connected for later codegen. In CIR, these are all + // implicit behaviors of cir.catch - not a lot of work to do. // // Test against each of the exception types we claim to catch. for (unsigned i = 0, e = catchScope.getNumHandlers();; ++i) { @@ -425,7 +439,8 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, auto typeValue = handler.Type.RTTI; assert(handler.Type.Flags == 0 && "catch handler flags not supported"); assert(typeValue && "fell into catch-all case!"); - // Check for address space mismatch: if (typeValue->getType() != argTy) + // Check for address space mismatch: if (typeValue->getType() != + // argTy) assert(!UnimplementedFeature::addressSpace()); bool nextIsEnd = false; @@ -479,7 +494,11 @@ void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &S, CatchScope->setHandler(I, TypeInfo, Handler); } else { // No exception decl indicates '...', a catch-all. - llvm_unreachable("NYI"); + CatchScope->setHandler(I, CGM.getCXXABI().getCatchAllTypeInfo(), Handler); + // Under async exceptions, catch(...) need to catch HW exception too + // Mark scope with SehTryBegin as a SEH __try scope + if (getLangOpts().EHAsynch) + llvm_unreachable("NYI"); } } } @@ -680,7 +699,9 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { // If we have a catch-all, add null to the landingpad. assert(!(hasCatchAll && hasFilter)); if (hasCatchAll) { - llvm_unreachable("NYI"); + // Attach the catch_all region. Can't coexist with an unwind one. + auto catchAll = mlir::cir::CatchAllAttr::get(builder.getContext()); + clauses.push_back(catchAll); // If we have an EH filter, we need to add those handlers in the // right place in the landingpad, which is to say, at the end. @@ -697,10 +718,12 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { assert((clauses.size() > 0 || hasCleanup) && "CatchOp has no clauses!"); - // Attach the unwind region. This needs to be the last region in the - // CatchOp operation. - auto catchUnwind = mlir::cir::CatchUnwindAttr::get(builder.getContext()); - clauses.push_back(catchUnwind); + // If there's no catch_all, attach the unwind region. This needs to be the + // last region in the CatchOp operation. + if (!hasCatchAll) { + auto catchUnwind = mlir::cir::CatchUnwindAttr::get(builder.getContext()); + clauses.push_back(catchUnwind); + } // Add final array of clauses into catchOp. catchOp.setCatchersAttr( diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 336a547d8033..5a757fe5bdb1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -767,7 +767,8 @@ void CIRGenItaniumCXXABI::emitBeginCatch(CIRGenFunction &CGF, VarDecl *CatchParam = S->getExceptionDecl(); if (!CatchParam) { - llvm_unreachable("NYI"); + auto Exn = CGF.currExceptionInfo.exceptionAddr; + CallBeginCatch(CGF, Exn, CGF.getBuilder().getVoidPtrTy(), true); return; } diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 8aafd04947bc..9ead4d978cb6 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -27,19 +27,44 @@ unsigned long long tc() { // CHECK: { // CHECK: %[[catch_idx_addr:.*]] = cir.catch_param(%[[try_eh]]) -> !cir.ptr // CHECK: %[[idx_load:.*]] = cir.load %[[catch_idx_addr]] : cir.ptr , !s32i - // CHECK: cir.store %[[idx_load]], %[[idx]] : !s32i, cir.ptr loc(#loc25) + // CHECK: cir.store %[[idx_load]], %[[idx]] : !s32i, cir.ptr z = 98; idx++; } catch (const char* msg) { // CHECK: type (#cir.global_view<@_ZTIPKc> : !cir.ptr) // CHECK: { - // CHECK: %[[msg_addr:.*]] = cir.catch_param(%[[try_eh]]) -> !cir.ptr loc(#loc37) - // CHECK: cir.store %[[msg_addr]], %[[msg]] : !cir.ptr, cir.ptr > loc(#loc37) + // CHECK: %[[msg_addr:.*]] = cir.catch_param(%[[try_eh]]) -> !cir.ptr + // CHECK: cir.store %[[msg_addr]], %[[msg]] : !cir.ptr, cir.ptr > z = 99; (void)msg[0]; } // CHECK: #cir.unwind - // CHECK: cir.resume loc(#loc1) + // CHECK: cir.resume // CHECK-NEXT: }]) + return z; +} + +// CHECK: cir.func @_Z3tc2v +unsigned long long tc2() { + int x = 50, y = 3; + unsigned long long z; + + try { + int a = 4; + z = division(x, y); + a++; + } catch (int idx) { + z = 98; + idx++; + } catch (const char* msg) { + z = 99; + (void)msg[0]; + } catch (...) { + // CHECK: type (#cir.all) + // CHECK: cir.catch_param + // CHECK: cir.const(#cir.int<100> : !s32i) : !s32i + z = 100; + } + return z; } \ No newline at end of file From b06bf59a3733bd26f35b0ee89e273a55591cf58b Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Tue, 6 Feb 2024 21:06:32 +0300 Subject: [PATCH 1396/2301] [CIR][CIRGen] Add suppport for local typedefs (#451) The change is taken from original codegen. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 7 ++++++- clang/test/CIR/CodeGen/typedef.c | 10 ++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/typedef.c diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 51533c2310bf..9e9783244c11 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -831,7 +831,12 @@ void CIRGenFunction::buildDecl(const Decl &D) { case Decl::Typedef: // typedef int X; case Decl::TypeAlias: { // using X = int; [C++0x] - assert(0 && "Not implemented"); + QualType Ty = cast(D).getUnderlyingType(); + if (auto *DI = getDebugInfo()) + assert(!UnimplementedFeature::generateDebugInfo()); + if (Ty->isVariablyModifiedType()) + buildVariablyModifiedType(Ty); + return; } } } diff --git a/clang/test/CIR/CodeGen/typedef.c b/clang/test/CIR/CodeGen/typedef.c new file mode 100644 index 000000000000..aa55270ce13a --- /dev/null +++ b/clang/test/CIR/CodeGen/typedef.c @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +void local_typedef() { + typedef struct {int a;} Struct; + Struct s; +} + +//CHECK: cir.func no_proto @local_typedef() +//CHECK: {{.*}} = cir.alloca !ty_22Struct22, cir.ptr , ["s"] {alignment = 4 : i64} +//CHECK: cir.return From 0b0c9a433d506cfa5a770e5c315638b3d4ddd93e Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Tue, 6 Feb 2024 21:07:54 +0300 Subject: [PATCH 1397/2301] [CIR][CIRGen][BugFix] Fix building of calls (#452) The issue is that the CIR codegen assumes that function pointer is always result of cir.load op. But it isn't true because the funcion pointer may be result of other operations (f.e cir.call). --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 13 +++++++++---- clang/test/CIR/CodeGen/fun-ptr.c | 16 ++++++++++++++++ 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 50d5633a3dff..02792b3093c7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -605,9 +605,6 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, if (auto fnOp = dyn_cast(CalleePtr)) { directFuncOp = fnOp; - } else if (auto loadOp = dyn_cast(CalleePtr)) { - indirectFuncTy = CIRFuncTy; - indirectFuncVal = loadOp->getResult(0); } else if (auto getGlobalOp = dyn_cast(CalleePtr)) { // FIXME(cir): This peephole optimization to avoids indirect calls for // builtins. This should be fixed in the builting declaration instead by @@ -618,7 +615,15 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, directFuncOp = llvm::dyn_cast(globalOp); assert(directFuncOp && "operation is not a function"); } else { - llvm_unreachable("expected call variant to be handled"); + [[maybe_unused]] auto resultTypes = CalleePtr->getResultTypes(); + [[maybe_unused]] auto FuncPtrTy = + resultTypes.front().dyn_cast(); + assert((resultTypes.size() == 1) && FuncPtrTy && + FuncPtrTy.getPointee().isa() && + "expected pointer to function"); + + indirectFuncTy = CIRFuncTy; + indirectFuncVal = CalleePtr->getResult(0); } mlir::cir::CIRCallOpInterface callLikeOp; diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c index 5edf526ffbc4..e1f147b3d54c 100644 --- a/clang/test/CIR/CodeGen/fun-ptr.c +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -54,3 +54,19 @@ int foo(Data* d) { f = extract_a; return f(d); } + +// CIR: cir.func private {{@.*test.*}}() -> !cir.ptr> +// CIR: cir.func {{@.*bar.*}}() +// CIR: [[RET:%.*]] = cir.call {{@.*test.*}}() : () -> !cir.ptr> +// CIR: cir.call [[RET]]() : (!cir.ptr>) -> () +// CIR: cir.return + +// LLVM: declare {{.*}} ptr {{@.*test.*}}() +// LLVM: define void {{@.*bar.*}}() +// LLVM: [[RET:%.*]] = call ptr {{@.*test.*}}() +// LLVM: call void [[RET]]() +// LLVM: ret void +void (*test(void))(void); +void bar(void) { + test()(); +} From 2df481f01a2a1579d3a05b95e222ba8634fe273d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 6 Feb 2024 14:03:09 -0800 Subject: [PATCH 1398/2301] [CIR][CIRGen][Exceptions] Handle a catch_all corner case --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 12 +++++------- clang/test/CIR/CodeGen/try-catch.cpp | 17 +++++++++++++++++ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index edf438db76ab..546a76612c89 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -312,12 +312,11 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { assert(!IsTargetDevice && "NYI"); auto hasCatchAll = [&]() { - unsigned NumHandlers = S.getNumHandlers(); - for (unsigned I = NumHandlers - 1; I > 0; --I) { - auto *C = S.getHandler(I)->getExceptionDecl(); - if (!C) - return true; - } + if (!S.getNumHandlers()) + return false; + unsigned lastHandler = S.getNumHandlers() - 1; + if (!S.getHandler(lastHandler)->getExceptionDecl()) + return true; return false; }; @@ -421,7 +420,6 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, // that catch-all as the dispatch block. if (catchScope.getNumHandlers() == 1 && catchScope.getHandler(0).isCatchAll()) { - llvm_unreachable("NYI"); // Remove when adding testcase. assert(dispatchBlock == catchScope.getHandler(0).Block); return; } diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 9ead4d978cb6..67a9dc9aa73a 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -66,5 +66,22 @@ unsigned long long tc2() { z = 100; } + return z; +} + +// CHECK: cir.func @_Z3tc3v +unsigned long long tc3() { + int x = 50, y = 3; + unsigned long long z; + + try { + z = division(x, y); + } catch (...) { + // CHECK: type (#cir.all) + // CHECK: cir.catch_param + // CHECK: cir.const(#cir.int<100> : !s32i) : !s32i + z = 100; + } + return z; } \ No newline at end of file From e11c7578a9a9cee4097c38758875638a7dba2bff Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Tue, 6 Feb 2024 21:30:44 -0800 Subject: [PATCH 1399/2301] [CIR][CIRGen] Implement "if constexpr" code generation (#436) --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 11 +++ clang/test/CIR/CodeGen/if-constexpr.cpp | 95 +++++++++++++++++++++++++ 2 files changed, 106 insertions(+) create mode 100644 clang/test/CIR/CodeGen/if-constexpr.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 35270246f071..8b9a4357dc46 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -402,6 +402,17 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { bool CondConstant; if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, S.isConstexpr())) { + if (S.isConstexpr()) { + // Handle "if constexpr" explicitly here to avoid generating some + // ill-formed code since in CIR the "if" is no longer simplified + // in this lambda like in Clang but postponed to other MLIR + // passes. + if (const Stmt *Executed = CondConstant ? S.getThen() : S.getElse()) + return buildStmt(Executed, /*useCurrentScope=*/true); + // There is nothing to execute at runtime. + // TODO(cir): there is still an empty cir.scope generated by the caller. + return mlir::success(); + } assert(!UnimplementedFeature::constantFoldsToSimpleInteger()); } diff --git a/clang/test/CIR/CodeGen/if-constexpr.cpp b/clang/test/CIR/CodeGen/if-constexpr.cpp new file mode 100644 index 000000000000..8ef8315e1ad0 --- /dev/null +++ b/clang/test/CIR/CodeGen/if-constexpr.cpp @@ -0,0 +1,95 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void if0() { + int x = 0; + if constexpr (0 == 0) { + // Declare a variable with same name to be sure we handle the + // scopes correctly + int x = 2; + } else { + int x = 3; + } + if constexpr (0 == 1) { + int x = 4; + } else { + int x = 5; + } + if constexpr (int x = 7; 8 == 8) { + int y = x; + } else { + int y = 2*x; + } + if constexpr (int x = 9; 8 == 10) { + int y = x; + } else { + int y = 3*x; + } + if constexpr (10 == 10) { + int x = 20; + } + if constexpr (10 == 11) { + int x = 30; + } + if constexpr (int x = 70; 80 == 80) { + int y = 10*x; + } + if constexpr (int x = 90; 80 == 100) { + int y = 11*x; + } +} + +// CHECK: cir.func @_Z3if0v() {{.*}} +// CHECK: cir.store %1, %0 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.const(#cir.int<2> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.const(#cir.int<5> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.alloca !s32i, cir.ptr , ["y", init] {{.*}} +// CHECK-NEXT: %4 = cir.const(#cir.int<7> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %5, %3 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.alloca !s32i, cir.ptr , ["y", init] {{.*}} +// CHECK-NEXT: %4 = cir.const(#cir.int<9> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %6 = cir.load %2 : cir.ptr , !s32i loc({{.*}}) +// CHECK-NEXT: %7 = cir.binop(mul, %5, %6) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %7, %3 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.const(#cir.int<20> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// Note that Clang does not even emit a block in this case +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.alloca !s32i, cir.ptr , ["y", init] {{.*}} +// CHECK-NEXT: %4 = cir.const(#cir.int<70> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %6 = cir.load %2 : cir.ptr , !s32i loc({{.*}}) +// CHECK-NEXT: %7 = cir.binop(mul, %5, %6) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %7, %3 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.const(#cir.int<90> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: } loc({{.*}}) +// CHECK-NEXT: cir.return loc({{.*}}) From aaec28288d998d6a475571de5b917b6eef3833b8 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Thu, 8 Feb 2024 23:02:17 +0300 Subject: [PATCH 1400/2301] [CIR][CIRGen] Add codegen for global compound literals (#454) This PR adds support for global compound literals. The implementation is almost the same as in original codegen. But the original codegen can reuse the value of emitted compound literal global variable in case then the init expression of new variable and this variable are the same. It's easy to implement this feature. But I can't find any test-case then this feature will be applied. So I decided to ignore this optimization opportunity to avoid mistakes. --- clang/lib/CIR/CodeGen/CIRGenCstEmitter.h | 7 +++ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 56 +++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenModule.h | 6 +++ clang/test/CIR/CodeGen/compound-literal.c | 30 ++++++++++++ 4 files changed, 95 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/compound-literal.c diff --git a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h index 5c9e545f227f..086c68baec9c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h @@ -67,9 +67,14 @@ class ConstantEmitter { /// Is the current emission context abstract? bool isAbstract() const { return Abstract; } + bool isInConstantContext() const { return InConstantContext; } + void setInConstantContext(bool var) { InConstantContext = var; } + /// Try to emit the initiaizer of the given declaration as an abstract /// constant. If this succeeds, the emission must be finalized. mlir::Attribute tryEmitForInitializer(const VarDecl &D); + mlir::Attribute tryEmitForInitializer(const Expr *E, LangAS destAddrSpace, + QualType destType); void finalize(mlir::cir::GlobalOp global); @@ -106,6 +111,8 @@ class ConstantEmitter { mlir::Attribute emitAbstract(SourceLocation loc, const APValue &value, QualType T); + mlir::Attribute tryEmitConstantExpr(const ConstantExpr *CE); + // These are private helper routines of the constant emitter that // can't actually be private because things are split out into helper // functions and classes. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 2d87d679f185..8c6964e76b3c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -841,8 +841,9 @@ class ConstExprEmitter mlir::Attribute VisitStmt(Stmt *S, QualType T) { return nullptr; } mlir::Attribute VisitConstantExpr(ConstantExpr *CE, QualType T) { - assert(0 && "unimplemented"); - return {}; + if (mlir::Attribute Result = Emitter.tryEmitConstantExpr(CE)) + return Result; + return Visit(CE->getSubExpr(), T); } mlir::Attribute VisitParenExpr(ParenExpr *PE, QualType T) { @@ -1371,6 +1372,34 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) { return Visit(base.get()); } +static ConstantLValue +tryEmitGlobalCompoundLiteral(ConstantEmitter &emitter, + const CompoundLiteralExpr *E) { + CIRGenModule &CGM = emitter.CGM; + + LangAS addressSpace = E->getType().getAddressSpace(); + mlir::Attribute C = emitter.tryEmitForInitializer(E->getInitializer(), + addressSpace, E->getType()); + if (!C) { + assert(!E->isFileScope() && + "file-scope compound literal did not have constant initializer!"); + return nullptr; + } + + auto GV = CIRGenModule::createGlobalOp( + CGM, CGM.getLoc(E->getSourceRange()), + CGM.createGlobalCompoundLiteralName(), + CGM.getTypes().convertTypeForMem(E->getType()), + E->getType().isConstantStorage(CGM.getASTContext(), false, false)); + GV.setInitialValueAttr(C); + GV.setLinkage(mlir::cir::GlobalLinkageKind::InternalLinkage); + CharUnits Align = CGM.getASTContext().getTypeAlignInChars(E->getType()); + GV.setAlignment(Align.getAsAlign().value()); + + emitter.finalize(GV); + return CGM.getBuilder().getGlobalViewAttr(GV); +} + ConstantLValue ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) { assert(0 && "NYI"); return Visit(E->getSubExpr()); @@ -1378,8 +1407,9 @@ ConstantLValue ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) { ConstantLValue ConstantLValueEmitter::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { - assert(0 && "NYI"); - return nullptr; + ConstantEmitter CompoundLiteralEmitter(CGM, Emitter.CGF); + CompoundLiteralEmitter.setInConstantContext(Emitter.isInConstantContext()); + return tryEmitGlobalCompoundLiteral(CompoundLiteralEmitter, E); } ConstantLValue @@ -1460,6 +1490,13 @@ mlir::Attribute ConstantEmitter::tryEmitForInitializer(const VarDecl &D) { return markIfFailed(tryEmitPrivateForVarInit(D)); } +mlir::Attribute ConstantEmitter::tryEmitForInitializer(const Expr *E, + LangAS destAddrSpace, + QualType destType) { + initializeNonAbstract(destAddrSpace); + return markIfFailed(tryEmitPrivateForMemory(E, destType)); +} + void ConstantEmitter::finalize(mlir::cir::GlobalOp global) { assert(InitializedNonAbstract && "finalizing emitter that was used for abstract emission?"); @@ -1552,6 +1589,17 @@ mlir::Attribute ConstantEmitter::tryEmitAbstract(const APValue &value, return validateAndPopAbstract(C, state); } +mlir::Attribute ConstantEmitter::tryEmitConstantExpr(const ConstantExpr *CE) { + if (!CE->hasAPValueResult()) + return nullptr; + + QualType RetType = CE->getType(); + if (CE->isGLValue()) + RetType = CGM.getASTContext().getLValueReferenceType(RetType); + + return emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(), RetType); +} + mlir::Attribute ConstantEmitter::tryEmitAbstractForMemory(const Expr *E, QualType destType) { auto nonMemoryDestType = getNonMemoryType(CGM, destType); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index b200b210b888..a598400dd80c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -314,6 +314,12 @@ class CIRGenModule : public CIRGenTypeCache { StringRef Name = ".str"); unsigned StringLiteralCnt = 0; + unsigned CompoundLitaralCnt = 0; + /// Return the unique name for global compound literal + std::string createGlobalCompoundLiteralName() { + return (Twine(".compoundLiteral.") + Twine(CompoundLitaralCnt++)).str(); + } + /// Return the AST address space of constant literal, which is used to emit /// the constant literal as global variable in LLVM IR. /// Note: This is not necessarily the address space of the constant literal diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c new file mode 100644 index 000000000000..80cc0dc0ad39 --- /dev/null +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// XFAIL: * + + +typedef struct { + int *arr; +} S; + +S a = { + .arr = (int[]){} +}; + +// CIR: cir.global "private" internal @".compoundLiteral.0" = #cir.zero : !cir.array {alignment = 4 : i64} +// CIR: cir.global external @a = #cir.const_struct<{#cir.global_view<@".compoundLiteral.0"> : !cir.ptr}> : !ty_22S22 + +// LLVM: @.compoundLiteral.0 = internal global [0 x i32] zeroinitializer +// LLVM: @a = global %struct.S { ptr @.compoundLiteral.0 } + +S b = { + .arr = (int[]){1} +}; + +// CIR: cir.global "private" internal @".compoundLiteral.1" = #cir.const_array<[#cir.int<1> : !s32i]> : !cir.array {alignment = 4 : i64} +// CIR: cir.global external @b = #cir.const_struct<{#cir.global_view<@".compoundLiteral.1"> : !cir.ptr}> : !ty_22S22 + +// LLVM: @.compoundLiteral.1 = internal global [1 x i32] [i32 1] +// LLVM: @b = global %struct.S { ptr @.compoundLiteral.1 } From 4e1b41ca1adb149d1428d0ba16dfea6bb02c8710 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 8 Feb 2024 23:06:45 +0300 Subject: [PATCH 1401/2301] [CIR][CodeGen] VLA support next step (#453) Here is the next step in VLA support. Basically, these changes handle different expressions, like `int (*a[5])[n]` or `sizeof(a[n])`. I took tests from the original `codegen` - they don't check anything, just verify we don't fail. There is still an issue with a proper cleanup - there are cases when `stack_save` doesn't dominate a corresponded `stack_restore`. For example in the next example: ``` void test(unsigned x) { while (1) { char a[x]; if (x > 5) break; ++x; } } ``` Look like `break` here doesn't lead to `stack_restore`. But I would say this is less related to VLA, though probably I need to fix this as well. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 37 +++++++-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 44 +++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 7 +- clang/test/CIR/CodeGen/vla.c | 95 ++++++++++++++++++++++ 5 files changed, 174 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/CodeGen/vla.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index d8327706b457..c3dac1cdf6e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1208,6 +1208,9 @@ Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, Addr.getPointer().getType().dyn_cast(); assert(lvalueAddrTy && "expected pointer"); + if (E->getType()->isVariableArrayType()) + return Addr; + auto pointeeTy = lvalueAddrTy.getPointee().dyn_cast(); assert(pointeeTy && "expected array"); @@ -1215,10 +1218,6 @@ Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, assert(arrayTy.isa() && "expected array"); assert(pointeeTy == arrayTy); - // TODO(cir): in LLVM codegen VLA pointers are always decayed, so we don't - // need to do anything here. Revisit this for VAT when its supported in CIR. - assert(!E->getType()->isVariableArrayType() && "what now?"); - // The result of this decay conversion points to an array element within the // base lvalue. However, since TBAA currently does not support representing // accesses to elements of member arrays, we conservatively represent accesses @@ -1342,6 +1341,15 @@ buildArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, shouldDecay); } +static QualType getFixedSizeElementType(const ASTContext &ctx, + const VariableArrayType *vla) { + QualType eltType; + do { + eltType = vla->getElementType(); + } while ((vla = ctx.getAsVariableArrayType(eltType))); + return eltType; +} + static Address buildArraySubscriptPtr( CIRGenFunction &CGF, mlir::Location beginLoc, mlir::Location endLoc, Address addr, ArrayRef indices, QualType eltType, @@ -1351,7 +1359,7 @@ static Address buildArraySubscriptPtr( // Determine the element size of the statically-sized base. This is // the thing that the indices are expressed in terms of. if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) { - assert(0 && "not implemented"); + eltType = getFixedSizeElementType(CGF.getContext(), vla); } // We can use that to compute the best alignment of the element. @@ -1432,7 +1440,24 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, Address Addr = Address::invalid(); if (const VariableArrayType *vla = getContext().getAsVariableArrayType(E->getType())) { - llvm_unreachable("variable array subscript is NYI"); + // The base must be a pointer, which is not an aggregate. Emit + // it. It needs to be emitted first in case it's what captures + // the VLA bounds. + Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); + auto Idx = EmitIdxAfterBase(/*Promote*/ true); + + // The element count here is the total number of non-VLA elements. + mlir::Value numElements = getVLASize(vla).NumElts; + Idx = builder.createCast(mlir::cir::CastKind::integral, Idx, + numElements.getType()); + Idx = builder.createMul(Idx, numElements); + + QualType ptrType = E->getBase()->getType(); + Addr = buildArraySubscriptPtr( + *this, CGM.getLoc(E->getBeginLoc()), CGM.getLoc(E->getEndLoc()), Addr, + {Idx}, E->getType(), !getLangOpts().isSignedOverflowDefined(), + SignedIndices, CGM.getLoc(E->getExprLoc()), /*shouldDecay=*/false, + &ptrType, E->getBase()); } else if (const ObjCObjectType *OIT = E->getType()->getAs()) { llvm_unreachable("ObjC object type subscript is NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 5d56485d2c4c..96c5da4f2c82 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1130,9 +1130,29 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, QualType elementType = pointerType->getPointeeType(); if (const VariableArrayType *vla = - CGF.getContext().getAsVariableArrayType(elementType)) - llvm_unreachable("VLA pointer arithmetic is NYI"); + CGF.getContext().getAsVariableArrayType(elementType)) { + // The element count here is the total number of non-VLA elements. + mlir::Value numElements = CGF.getVLASize(vla).NumElts; + + // GEP indexes are signed, and scaling an index isn't permitted to + // signed-overflow, so we use the same semantics for our explicit + // multiply. We suppress this if overflow is not undefined behavior. + mlir::Type elemTy = CGF.convertTypeForMem(vla->getElementType()); + + index = CGF.getBuilder().createCast(mlir::cir::CastKind::integral, index, + numElements.getType()); + index = CGF.getBuilder().createMul(index, numElements); + + if (CGF.getLangOpts().isSignedOverflowDefined()) { + pointer = CGF.getBuilder().create( + CGF.getLoc(op.E->getExprLoc()), pointer.getType(), pointer, index); + } else { + pointer = CGF.buildCheckedInBoundsGEP(elemTy, pointer, index, isSigned, + isSubtraction, op.E->getExprLoc()); + } + return pointer; + } // Explicitly handle GNU void* and function pointer arithmetic extensions. The // GNU void* casts amount to no-ops since our void* type is i8*, but this is // future proof. @@ -2289,7 +2309,25 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( if (E->getKind() == UETT_SizeOf) { if (const VariableArrayType *VAT = CGF.getContext().getAsVariableArrayType(TypeToSize)) { - llvm_unreachable("NYI"); + + if (E->isArgumentType()) { + // sizeof(type) - make sure to emit the VLA size. + CGF.buildVariablyModifiedType(TypeToSize); + } else { + // C99 6.5.3.4p2: If the argument is an expression of type + // VLA, it is evaluated. + CGF.buildIgnoredExpr(E->getArgumentExpr()); + } + + auto VlaSize = CGF.getVLASize(VAT); + mlir::Value size = VlaSize.NumElts; + + // Scale the number of non-VLA elements by the non-VLA element size. + CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type); + if (!eltSize.isOne()) + size = Builder.createMul(size, CGF.CGM.getSize(eltSize).getValue()); + + return size; } } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 57def877abae..d2c280db619e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1085,7 +1085,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, Ty = VD->getType(); if (Ty->isVariablyModifiedType()) - llvm_unreachable("NYI"); + buildVariablyModifiedType(Ty); } // Emit a location at the end of the prologue. if (getDebugInfo()) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 163a826b00b3..6b774f14746e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -620,7 +620,12 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { } case Type::VariableArray: { - assert(0 && "not implemented"); + const VariableArrayType *A = cast(Ty); + assert(A->getIndexTypeCVRQualifiers() == 0 && + "FIXME: We only handle trivial array types so far!"); + // VLAs resolve to the innermost element type; this matches + // the return of alloca, and there isn't any obviously better choice. + ResultType = convertTypeForMem(A->getElementType()); break; } case Type::IncompleteArray: { diff --git a/clang/test/CIR/CodeGen/vla.c b/clang/test/CIR/CodeGen/vla.c new file mode 100644 index 000000000000..687d264987db --- /dev/null +++ b/clang/test/CIR/CodeGen/vla.c @@ -0,0 +1,95 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +// CHECK: cir.func @f0(%arg0: !s32i +// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["len", init] {alignment = 4 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["saved_stack"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !s32i, cir.ptr +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr , !s32i +// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u64i +// CHECK: [[TMP4:%.*]] = cir.stack_save : !cir.ptr +// CHECK: cir.store [[TMP4]], [[TMP1]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP5:%.*]] = cir.alloca !s32i, cir.ptr , [[TMP3]] : !u64i, ["vla"] {alignment = 16 : i64} +// CHECK: [[TMP6:%.*]] = cir.load [[TMP1]] : cir.ptr >, !cir.ptr +// CHECK: cir.stack_restore [[TMP6]] : !cir.ptr +void f0(int len) { + int a[len]; +} + +// CHECK: cir.func @f1 +// CHECK-NOT: cir.stack_save +// CHECK-NOT: cir.stack_restore +// CHECK: cir.return +int f1(int n) { + return sizeof(int[n]); +} + +// CHECK: cir.func @f2 +// CHECK: cir.stack_save +// DONT_CHECK: cir.stack_restore +// CHECK: cir.return +int f2(int x) { + int vla[x]; + return vla[x-1]; +} + +// CHECK: cir.func @f3 +// CHECK: cir.stack_save +// CHECK: cir.stack_restore +// CHECK: cir.return +void f3(int count) { + int a[count]; + + do { } while (0); + if (a[0] != 3) {} +} + + +// CHECK: cir.func @f4 +// CHECK-NOT: cir.stack_save +// CHECK-NOT: cir.stack_restore +// CHECK: cir.return +void f4(int count) { + // Make sure we emit sizes correctly in some obscure cases + int (*a[5])[count]; + int (*b)[][count]; +} + +// FIXME(cir): the test is commented due to stack_restore operation +// is not emitted for the if branch +// void f5(unsigned x) { +// while (1) { +// char s[x]; +// if (x > 5) //: stack restore here is missed +// break; +// } +// } + +// Check no errors happen +void function1(short width, int data[][width]) {} +void function2(short width, int data[][width][width]) {} +void f6(void) { + int bork[4][13][15]; + + function1(1, bork[2]); + function2(1, bork); +} + +static int GLOB; +int f7(int n) +{ + GLOB = 0; + char b[1][n+3]; + + __typeof__(b[GLOB++]) c; + return GLOB; +} + +double f8(int n, double (*p)[n][5]) { + return p[1][2][3]; +} + +int f9(unsigned n, char (*p)[n][n+1][6]) { + __typeof(p) p2 = (p + n/2) - n/4; + + return p2 - p; +} From 8a186ab1086b10dd589f0ba446f5c2f6759e27e5 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Thu, 8 Feb 2024 23:08:10 +0300 Subject: [PATCH 1402/2301] [CIR][CIRGen][Bugfix] Emit valid type for evaluated const (#456) This PR fixes the issue connected with folding a simple boolean expresion pattern (f.e. `0 && RHS = 0`). The problem is that the scalar expression emitter always creates a `cir.bool` attribute as a result of expression. But in some cases the result expression should be a `cir.int` attr. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 11 +---------- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 10 +++++++--- clang/test/CIR/CodeGen/evaluate-expr.c | 20 ++++++++++++++++++++ 3 files changed, 28 insertions(+), 13 deletions(-) create mode 100644 clang/test/CIR/CodeGen/evaluate-expr.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index e866fe50638d..f298540e1b56 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -552,16 +552,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { // Creates constant null value for integral type ty. mlir::cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc) { - if (ty.isa()) - return getNullPtr(ty, loc); - - mlir::TypedAttr attr; - if (ty.isa()) - attr = mlir::cir::IntAttr::get(ty, 0); - else - llvm_unreachable("NYI"); - - return create(loc, ty, attr); + return create(loc, ty, getZeroInitAttr(ty)); } mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 96c5da4f2c82..de18fa610970 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -2147,7 +2147,7 @@ mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { } // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. if (!CGF.ContainsLabel(E->getRHS())) - return Builder.getBool(false, Loc); + return Builder.getNullValue(ResTy, Loc); } CIRGenFunction::ConditionalEvaluation eval(CGF); @@ -2220,8 +2220,12 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { return Builder.createZExtOrBitCast(RHSCond.getLoc(), RHSCond, ResTy); } // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. - if (!CGF.ContainsLabel(E->getRHS())) - return Builder.getBool(true, Loc); + if (!CGF.ContainsLabel(E->getRHS())) { + if (auto intTy = ResTy.dyn_cast()) + return Builder.getConstInt(Loc, intTy, 1); + else + return Builder.getBool(true, Loc); + } } CIRGenFunction::ConditionalEvaluation eval(CGF); diff --git a/clang/test/CIR/CodeGen/evaluate-expr.c b/clang/test/CIR/CodeGen/evaluate-expr.c new file mode 100644 index 000000000000..b49496d3d076 --- /dev/null +++ b/clang/test/CIR/CodeGen/evaluate-expr.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +static const int g = 1; +void foo() { + if ((g != 1) && (g != 1)) + return; + if ((g == 1) || (g == 1)) + return; +} +// CHECK: cir.func no_proto @foo() +// CHECK: cir.scope { +// CHECK: [[ZERO:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: [[FALSE:%.*]] = cir.cast(int_to_bool, [[ZERO:%.*]] : !s32i), !cir.bool +// CHECK: cir.if [[FALSE]] { +// CHECK: cir.return +// CHECK: } +// CHECK: } +// CHECK: cir.return + From 2e387fe406ef2759157b58afc87a25d098d78aa7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 8 Feb 2024 16:26:51 -0800 Subject: [PATCH 1403/2301] [CIR][CIRGen][Exceptions][NFC] Add skeleton for some missing function start/end functionality --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 104 ++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 6 + .../CodeGen/UnimplementedFeatureGuarding.h | 11 ++ 3 files changed, 114 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index d2c280db619e..e7651a86add5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -434,6 +434,96 @@ void CIRGenFunction::LexicalScope::cleanup() { insertCleanupAndLeave(currBlock); } +void CIRGenFunction::finishFunction(SourceLocation EndLoc) { + // CIRGen doesn't use a BreakContinueStack or evaluates OnlySimpleReturnStmts. + + // Usually the return expression is evaluated before the cleanup + // code. If the function contains only a simple return statement, + // such as a constant, the location before the cleanup code becomes + // the last useful breakpoint in the function, because the simple + // return expression will be evaluated after the cleanup code. To be + // safe, set the debug location for cleanup code to the location of + // the return statement. Otherwise the cleanup code should be at the + // end of the function's lexical scope. + // + // If there are multiple branches to the return block, the branch + // instructions will get the location of the return statements and + // all will be fine. + if (auto *DI = getDebugInfo()) + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + + // Pop any cleanups that might have been associated with the + // parameters. Do this in whatever block we're currently in; it's + // important to do this before we enter the return block or return + // edges will be *really* confused. + bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth; + if (HasCleanups) { + // Make sure the line table doesn't jump back into the body for + // the ret after it's been at EndLoc. + if (auto *DI = getDebugInfo()) + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + // FIXME(cir): vla.c test currently crashes here. + // PopCleanupBlocks(PrologueCleanupDepth); + } + + // Emit function epilog (to return). + + // Original LLVM codegen does EmitReturnBlock() here, CIRGen handles + // this as part of LexicalScope instead, given CIR might have multiple + // blocks with `cir.return`. + if (ShouldInstrumentFunction()) { + assert(!UnimplementedFeature::shouldInstrumentFunction() && "NYI"); + } + + // Emit debug descriptor for function end. + if (auto *DI = getDebugInfo()) + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + + // Reset the debug location to that of the simple 'return' expression, if any + // rather than that of the end of the function's scope '}'. + assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + + assert(!UnimplementedFeature::emitFunctionEpilog() && "NYI"); + assert(!UnimplementedFeature::emitEndEHSpec() && "NYI"); + + // FIXME(cir): vla.c test currently crashes here. + // assert(EHStack.empty() && "did not remove all scopes from cleanup stack!"); + + // If someone did an indirect goto, emit the indirect goto block at the end of + // the function. + assert(!UnimplementedFeature::indirectBranch() && "NYI"); + + // If some of our locals escaped, insert a call to llvm.localescape in the + // entry block. + assert(!UnimplementedFeature::escapedLocals() && "NYI"); + + // If someone took the address of a label but never did an indirect goto, we + // made a zero entry PHI node, which is illegal, zap it now. + assert(!UnimplementedFeature::indirectBranch() && "NYI"); + + // CIRGen doesn't need to emit EHResumeBlock, TerminateLandingPad, + // TerminateHandler, UnreachableBlock, TerminateFunclets, NormalCleanupDest + // here because the basic blocks aren't shared. + + assert(!UnimplementedFeature::emitDeclMetadata() && "NYI"); + assert(!UnimplementedFeature::deferredReplacements() && "NYI"); + + // Add the min-legal-vector-width attribute. This contains the max width from: + // 1. min-vector-width attribute used in the source program. + // 2. Any builtins used that have a vector width specified. + // 3. Values passed in and out of inline assembly. + // 4. Width of vector arguments and return types for this function. + // 5. Width of vector arguments and return types for functions called by + // this function. + assert(!UnimplementedFeature::minLegalVectorWidthAttr() && "NYI"); + + // Add vscale_range attribute if appropriate. + assert(!UnimplementedFeature::vscaleRangeAttr() && "NYI"); + + // In traditional LLVM codegen, if clang generated an unreachable return + // block, it'd be deleted now. Same for unused ret allocas from ReturnValue +} + mlir::cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo) { @@ -596,11 +686,11 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, } // Emit the standard function epilogue. - // TODO: finishFunction(BodyRange.getEnd()); + finishFunction(BodyRange.getEnd()); // If we haven't marked the function nothrow through other means, do a quick // pass now to see if we can. - // TODO: if (!CurFn->doesNotThrow()) TryMarkNoThrow(CurFn); + assert(!UnimplementedFeature::tryMarkNoThrow()); return Fn; } @@ -974,9 +1064,9 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm_unreachable("NYI"); } - // TODO: emitstartehspec - - // TODO: prologuecleanupdepth + assert(!UnimplementedFeature::emitStartEHSpec() && "NYI"); + // FIXME(cir): vla.c test currently crashes here. + // PrologueCleanupDepth = EHStack.stable_begin(); if (getLangOpts().OpenMP && CurCodeDecl) CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl); @@ -1098,8 +1188,8 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm_unreachable("NYI"); } -/// ShouldInstrumentFunction - Return true if the current function should be -/// instrumented with __cyg_profile_func_* calls +/// Return true if the current function should be instrumented with +/// __cyg_profile_func_* calls bool CIRGenFunction::ShouldInstrumentFunction() { if (!CGM.getCodeGenOpts().InstrumentFunctions && !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 2cc38010808c..5d57f9f51b80 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -843,6 +843,8 @@ class CIRGenFunction : public CIRGenTypeCache { CIRGenCallee buildCallee(const clang::Expr *E); + void finishFunction(SourceLocation EndLoc); + /// Emit code to compute the specified expression which can have any type. The /// result is returned as an RValue struct. If this is an aggregate /// expression, the aggloc/agglocvolatile arguments indicate where the result @@ -1548,6 +1550,10 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Block *getEHResumeBlock(bool isCleanup); mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope); + /// The cleanup depth enclosing all the cleanups associated with the + /// parameters. + EHScopeStack::stable_iterator PrologueCleanupDepth; + mlir::Operation *getInvokeDestImpl(); bool getInvokeDest() { if (!EHStack.requiresLandingPad()) diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index d2e7de9de062..daba1ab454a3 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -69,6 +69,8 @@ struct UnimplementedFeature { static bool attributeBuiltin() { return false; } static bool attributeNoBuiltin() { return false; } static bool parameterAttributes() { return false; } + static bool minLegalVectorWidthAttr() { return false; } + static bool vscaleRangeAttr() { return false; } // Coroutines static bool unhandledException() { return false; } @@ -77,6 +79,9 @@ struct UnimplementedFeature { static bool variablyModifiedTypeEmission() { return false; } static bool buildLValueAlignmentAssumption() { return false; } static bool buildDerivedToBaseCastForDevirt() { return false; } + static bool emitStartEHSpec() { return false; } + static bool emitEndEHSpec() { return false; } + static bool emitFunctionEpilog() { return false; } // Data layout static bool dataLayoutGetIndexTypeSizeInBits() { return false; } @@ -145,11 +150,17 @@ struct UnimplementedFeature { static bool operandBundles() { return false; } static bool exceptions() { return false; } static bool metaDataNode() { return false; } + static bool emitDeclMetadata() { return false; } static bool isSEHTryScope() { return false; } static bool emitScalarRangeCheck() { return false; } static bool stmtExprEvaluation() { return false; } static bool setCallingConv() { return false; } static bool unreachableOp() { return false; } + static bool tryMarkNoThrow() { return false; } + static bool indirectBranch() { return false; } + static bool escapedLocals() { return false; } + static bool deferredReplacements() { return false; } + static bool shouldInstrumentFunction() { return false; } }; } // namespace cir From 706848f771ae51d305ac09b3adf83484a18866af Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Sat, 10 Feb 2024 01:25:32 +0300 Subject: [PATCH 1404/2301] [CIR][CIRGen] Support for local const arrays (#458) The change is taken from the original llvm codegen. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 18 ++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 +++ clang/test/CIR/CodeGen/const-array.c | 8 ++++++++ 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 9e9783244c11..0a36e76aff84 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -91,9 +91,15 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, if ((!getContext().getLangOpts().OpenCL || Ty.getAddressSpace() == LangAS::opencl_constant) && (!NRVO && !D.isEscapingByref() && - CGM.isTypeConstant(Ty, /*ExcludeCtor=*/true, /*ExcludeDtor=*/false))) - assert(0 && "not implemented"); + CGM.isTypeConstant(Ty, /*ExcludeCtor=*/true, + /*ExcludeDtor=*/false))) { + buildStaticVarDecl(D, mlir::cir::GlobalLinkageKind::InternalLinkage); + // Signal this condition to later callbacks. + emission.Addr = Address::invalid(); + assert(emission.wasEmittedAsGlobal()); + return emission; + } // Otherwise, tell the initialization code that we're in this case. emission.IsConstantAggregate = true; } @@ -235,6 +241,10 @@ static void emitStoresForConstant(CIRGenModule &CGM, const VarDecl &D, void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { assert(emission.Variable && "emission was not valid!"); + // If this was emitted as a global constant, we're done. + if (emission.wasEmittedAsGlobal()) + return; + const VarDecl &D = *emission.Variable; QualType type = D.getType(); @@ -335,6 +345,10 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { assert(emission.Variable && "emission was not valid!"); + // If this was emitted as a global constant, we're done. + if (emission.wasEmittedAsGlobal()) + return; + // TODO: in LLVM codegen if we are at an unreachable point codgen // is ignored. What we want for CIR? assert(builder.getInsertionBlock()); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 5d57f9f51b80..f4cd626022e6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1101,6 +1101,9 @@ class CIRGenFunction : public CIRGenTypeCache { : Variable(&variable), Addr(Address::invalid()) {} static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } + + bool wasEmittedAsGlobal() const { return !Addr.isValid(); } + /// Returns the raw, allocated address, which is not necessarily /// the address of the object itself. It is casted to default /// address space for address space agnostic languages. diff --git a/clang/test/CIR/CodeGen/const-array.c b/clang/test/CIR/CodeGen/const-array.c index c75ba59b8f17..eb0adceabdad 100644 --- a/clang/test/CIR/CodeGen/const-array.c +++ b/clang/test/CIR/CodeGen/const-array.c @@ -1,5 +1,13 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s +void bar() { + const int arr[1] = {1}; +} + +// CHECK: cir.global "private" constant internal @bar.arr = #cir.const_array<[#cir.int<1> : !s32i]> : !cir.array {alignment = 4 : i64} +// CHECK: cir.func no_proto @bar() +// CHECK: {{.*}} = cir.get_global @bar.arr : cir.ptr > + void foo() { int a[10] = {1}; } From bf3ca73a15928a9d6ea1bac04bf6d2632d652037 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 9 Feb 2024 17:38:14 -0800 Subject: [PATCH 1405/2301] [CIR][CIRGen][NFC] Relax asserts for using decls and namespace alias Originally those are only used for debug info generation, so get a bit more specific on what's missing here. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 0a36e76aff84..a91c3b7bd0c7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -808,20 +808,14 @@ void CIRGenFunction::buildDecl(const Decl &D) { return; case Decl::NamespaceAlias: - assert(0 && "Not implemented"); - return; case Decl::Using: // using X; [C++] - assert(0 && "Not implemented"); - return; - case Decl::UsingEnum: // using enum X; [C++] - assert(0 && "Not implemented"); + case Decl::UsingEnum: // using enum X; [C++] + case Decl::UsingDirective: // using namespace X; [C++] + assert(!UnimplementedFeature::generateDebugInfo()); return; case Decl::UsingPack: assert(0 && "Not implemented"); return; - case Decl::UsingDirective: // using namespace X; [C++] - assert(0 && "Not implemented"); - return; case Decl::Var: case Decl::Decomposition: { const VarDecl &VD = cast(D); From 57b0979ea9fe37428ff3911d6c27f4c5a7ce9691 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Mon, 12 Feb 2024 22:12:59 +0300 Subject: [PATCH 1406/2301] [CIR][CIRGen] Fix in replacing of no_proto func (#460) When replacing the no-proto functions with it's real definition, codegen assumes that only `cir.call` operation may use the replaced function. Such behaviour leads to compilation error because of the `cir.get_global` op can also use the function to get pointer to function. This PR adds handle the case with `cir.get_global` operation and fixes the issue. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 34 +++++++++++++---------- clang/test/CIR/CodeGen/no-proto-fun-ptr.c | 17 ++++++++++++ 2 files changed, 37 insertions(+), 14 deletions(-) create mode 100644 clang/test/CIR/CodeGen/no-proto-fun-ptr.c diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 91cc63489033..be125e8068a8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1643,22 +1643,28 @@ void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( NewFn.setNoProtoAttr(OldFn.getNoProtoAttr()); // Iterate through all calls of the no-proto function. - auto Calls = OldFn.getSymbolUses(OldFn->getParentOp()); - for (auto Call : Calls.value()) { + auto SymUses = OldFn.getSymbolUses(OldFn->getParentOp()); + for (auto Use : SymUses.value()) { mlir::OpBuilder::InsertionGuard guard(builder); - // Fetch no-proto call to be replaced. - auto noProtoCallOp = dyn_cast(Call.getUser()); - assert(noProtoCallOp && "unexpected use of no-proto function"); - builder.setInsertionPoint(noProtoCallOp); - - // Patch call type with the real function type. - auto realCallOp = builder.create( - noProtoCallOp.getLoc(), NewFn, noProtoCallOp.getOperands()); - - // Replace old no proto call with fixed call. - noProtoCallOp.replaceAllUsesWith(realCallOp); - noProtoCallOp.erase(); + if (auto noProtoCallOp = dyn_cast(Use.getUser())) { + builder.setInsertionPoint(noProtoCallOp); + + // Patch call type with the real function type. + auto realCallOp = builder.create( + noProtoCallOp.getLoc(), NewFn, noProtoCallOp.getOperands()); + + // Replace old no proto call with fixed call. + noProtoCallOp.replaceAllUsesWith(realCallOp); + noProtoCallOp.erase(); + } else if (auto getGlobalOp = + dyn_cast(Use.getUser())) { + // Replace type + getGlobalOp.getAddr().setType(mlir::cir::PointerType::get( + builder.getContext(), NewFn.getFunctionType())); + } else { + llvm_unreachable("NIY"); + } } } diff --git a/clang/test/CIR/CodeGen/no-proto-fun-ptr.c b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c new file mode 100644 index 000000000000..a6fcfa3f75d3 --- /dev/null +++ b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +void empty(); + +void check_noproto_ptr() { + void (*fun)(void) = empty; +} + +// CHECK: cir.func no_proto @check_noproto_ptr() +// CHECK: [[ALLOC:%.*]] = cir.alloca !cir.ptr>, cir.ptr >>, ["fun", init] {alignment = 8 : i64} +// CHECK: [[GGO:%.*]] = cir.get_global @empty : cir.ptr > +// CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> +// CHECK: cir.store [[CAST]], [[ALLOC]] : !cir.ptr>, cir.ptr >> +// CHECK: cir.return + +void empty(void) {} + From 61956eca325bc7f05c241d7b78029b1a2553d2d9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 12 Feb 2024 14:40:26 -0800 Subject: [PATCH 1407/2301] [CIR][CIRGen][Exceptions][NFC] Re-arrange CallLikeOp building --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 46 +++++++++++++---------- clang/lib/CIR/CodeGen/CIRGenException.cpp | 8 +++- 2 files changed, 33 insertions(+), 21 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 02792b3093c7..ad75dca5233a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -358,6 +358,29 @@ void CIRGenModule::ConstructAttributeList( CalleeInfo.getCalleeFunctionProtoType()); } +static mlir::cir::CIRCallOpInterface +buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, + mlir::cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal, + mlir::cir::FuncOp directFuncOp, + SmallVectorImpl &CIRCallArgs, bool InvokeDest) { + auto &builder = CGF.getBuilder(); + + if (InvokeDest) { + if (indirectFuncTy) + return builder.create( + callLoc, CGF.currExceptionInfo.exceptionAddr, indirectFuncVal, + indirectFuncTy, CIRCallArgs); + return builder.create( + callLoc, directFuncOp, CGF.currExceptionInfo.exceptionAddr, + CIRCallArgs); + } + + if (indirectFuncTy) + return builder.create(callLoc, indirectFuncVal, + indirectFuncTy, CIRCallArgs); + return builder.create(callLoc, directFuncOp, CIRCallArgs); +} + RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, @@ -626,26 +649,9 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, indirectFuncVal = CalleePtr->getResult(0); } - mlir::cir::CIRCallOpInterface callLikeOp; - if (indirectFuncTy) { - if (InvokeDest) { - callLikeOp = builder.create( - callLoc, currExceptionInfo.exceptionAddr, indirectFuncVal, - indirectFuncTy, CIRCallArgs); - } else { - callLikeOp = builder.create( - callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs); - } - } else { - if (InvokeDest) { - callLikeOp = builder.create( - callLoc, directFuncOp, currExceptionInfo.exceptionAddr, - CIRCallArgs); - } else { - callLikeOp = builder.create(callLoc, directFuncOp, - CIRCallArgs); - } - } + mlir::cir::CIRCallOpInterface callLikeOp = + buildCallLikeOp(*this, callLoc, indirectFuncTy, indirectFuncVal, + directFuncOp, CIRCallArgs, InvokeDest); if (E) callLikeOp->setAttr( diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 546a76612c89..236c1b85534c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -630,8 +630,14 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { return lpad; } + // If there's an existing CatchOp, it means we got a `cir.try` scope + // that leads to this "landing pad" creation site. Otherwise, exceptions + // are enabled but a throwing function is called anyways. auto catchOp = currExceptionInfo.catchOp; - assert(catchOp && "Should be valid"); + if (!catchOp) { + llvm_unreachable("NYI"); + } + { // Save the current CIR generation state. mlir::OpBuilder::InsertionGuard guard(builder); From e7da66b5e8e8cbed0a5919d131c277a6f20dfc3f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 12 Feb 2024 15:56:36 -0800 Subject: [PATCH 1408/2301] [CIR][CIRGen][Exceptions][NFC] Reuse lexical scope instead of custom RAII --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 9 +++--- clang/lib/CIR/CodeGen/CIRGenException.cpp | 8 +++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 28 +++++++------------ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 4 +-- 4 files changed, 20 insertions(+), 29 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index ad75dca5233a..ab0b35f8f322 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -366,13 +366,12 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, auto &builder = CGF.getBuilder(); if (InvokeDest) { + auto addr = CGF.currLexScope->getExceptionInfo().addr; if (indirectFuncTy) return builder.create( - callLoc, CGF.currExceptionInfo.exceptionAddr, indirectFuncVal, - indirectFuncTy, CIRCallArgs); - return builder.create( - callLoc, directFuncOp, CGF.currExceptionInfo.exceptionAddr, - CIRCallArgs); + callLoc, addr, indirectFuncVal, indirectFuncTy, CIRCallArgs); + return builder.create(callLoc, directFuncOp, addr, + CIRCallArgs); } if (indirectFuncTy) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 236c1b85534c..c7228393e6fb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -256,7 +256,7 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup) { // Just like some other try/catch related logic: return the basic block // pointer but only use it to denote we're tracking things, but there // shouldn't be any changes to that block after work done in this function. - auto catchOp = currExceptionInfo.catchOp; + auto catchOp = currLexScope->getExceptionInfo().catchOp; assert(catchOp.getNumRegions() && "expected at least one region"); auto &fallbackRegion = catchOp.getRegion(catchOp.getNumRegions() - 1); @@ -379,7 +379,7 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { getBuilder().getInsertionBlock()}; { - ExceptionInfoRAIIObject ehx{*this, {exceptionInfoInsideTry, catchOp}}; + lexScope.setExceptionInfo({exceptionInfoInsideTry, catchOp}); // Attach the basic blocks for the catchOp regions into ScopeCatch // info. enterCXXTryStmt(S, catchOp); @@ -393,7 +393,7 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { } { - ExceptionInfoRAIIObject ehx{*this, {tryScope->getResult(0), catchOp}}; + lexScope.setExceptionInfo({tryScope->getResult(0), catchOp}); // Emit catch clauses. exitCXXTryStmt(S); } @@ -633,7 +633,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { // If there's an existing CatchOp, it means we got a `cir.try` scope // that leads to this "landing pad" creation site. Otherwise, exceptions // are enabled but a throwing function is called anyways. - auto catchOp = currExceptionInfo.catchOp; + auto catchOp = currLexScope->getExceptionInfo().catchOp; if (!catchOp) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index f4cd626022e6..bb1b433a60c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -312,26 +312,9 @@ class CIRGenFunction : public CIRGenTypeCache { /// Try/Catch: calls within try statements need to refer to local /// allocas for the exception info struct CIRExceptionInfo { - mlir::Value exceptionAddr{}; + mlir::Value addr{}; mlir::cir::CatchOp catchOp{}; }; - CIRExceptionInfo currExceptionInfo{}; - class ExceptionInfoRAIIObject { - CIRGenFunction &P; - CIRExceptionInfo OldVal{}; - - public: - ExceptionInfoRAIIObject(CIRGenFunction &p, CIRExceptionInfo info) : P(p) { - if (P.currExceptionInfo.exceptionAddr) - OldVal = P.currExceptionInfo; - P.currExceptionInfo = info; - } - - /// Can be used to restore the state early, before the dtor - /// is run. - void restore() { P.currExceptionInfo = OldVal; } - ~ExceptionInfoRAIIObject() { restore(); } - }; enum class EvaluationOrder { ///! No langauge constraints on evaluation order. @@ -1773,6 +1756,9 @@ class CIRGenFunction : public CIRGenTypeCache { LexicalScope *ParentScope = nullptr; + // If there's exception information for this scope, store it. + CIRExceptionInfo exInfo{}; + // FIXME: perhaps we can use some info encoded in operations. enum Kind { Regular, // cir.if, cir.scope, if_regions @@ -1873,6 +1859,12 @@ class CIRGenFunction : public CIRGenTypeCache { // Labels solved inside this scope. llvm::SmallPtrSet SolvedLabels; + // --- + // Exception handling + // --- + CIRExceptionInfo &getExceptionInfo() { return exInfo; } + void setExceptionInfo(const CIRExceptionInfo &info) { exInfo = info; } + // --- // Return handling // --- diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 5a757fe5bdb1..cadbe85d1c07 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -663,7 +663,7 @@ static mlir::Value CallBeginCatch(CIRGenFunction &CGF, mlir::Value Exn, static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, Address ParamAddr, SourceLocation Loc) { // Load the exception from where the landing pad saved it. - auto Exn = CGF.currExceptionInfo.exceptionAddr; + auto Exn = CGF.currLexScope->getExceptionInfo().addr; CanQualType CatchType = CGF.CGM.getASTContext().getCanonicalType(CatchParam.getType()); @@ -767,7 +767,7 @@ void CIRGenItaniumCXXABI::emitBeginCatch(CIRGenFunction &CGF, VarDecl *CatchParam = S->getExceptionDecl(); if (!CatchParam) { - auto Exn = CGF.currExceptionInfo.exceptionAddr; + auto Exn = CGF.currLexScope->getExceptionInfo().addr; CallBeginCatch(CGF, Exn, CGF.getBuilder().getVoidPtrTy(), true); return; } From a78e5711109622e0c26b747fc0d6955c28fb975f Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Tue, 13 Feb 2024 22:08:02 +0300 Subject: [PATCH 1409/2301] [CIR][Lowering] add lowering of bool attribute (#461) This PR adds missing case to lowerCirAttrAsValue. --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 12 +++++++++++- clang/test/CIR/Lowering/const.cir | 11 +++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e6ef8438f0de..913d5ad4ca67 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -163,6 +163,16 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::IntAttr intAttr, loc, converter->convertType(intAttr.getType()), intAttr.getValue()); } +/// BoolAttr visitor. +inline mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::BoolAttr boolAttr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); + return rewriter.create( + loc, converter->convertType(boolAttr.getType()), boolAttr.getValue()); +} + /// ConstPtrAttr visitor. inline mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ConstPtrAttr ptrAttr, @@ -367,7 +377,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, if (const auto constArr = attr.dyn_cast()) return lowerCirAttrAsValue(parentOp, constArr, rewriter, converter); if (const auto boolAttr = attr.dyn_cast()) - llvm_unreachable("bool attribute is NYI"); + return lowerCirAttrAsValue(parentOp, boolAttr, rewriter, converter); if (const auto zeroAttr = attr.dyn_cast()) return lowerCirAttrAsValue(parentOp, zeroAttr, rewriter, converter); if (const auto globalAttr = attr.dyn_cast()) diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 46b4677d40b4..5cbcb757ddb6 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -67,4 +67,15 @@ module { // CHECK: %3 = llvm.mlir.constant(1 : i32) : i32 // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.array<10 x i32> + cir.func @testInitArrWithBool() { + %1 = cir.const(#cir.const_array<[#cir.bool : !cir.bool]> : !cir.array) : !cir.array + cir.return + } + + // CHECK: llvm.func @testInitArrWithBool() + // CHECK: [[ARR:%.*]] = llvm.mlir.undef : !llvm.array<1 x i8> + // CHECK: [[TRUE:%.*]] = llvm.mlir.constant(1 : i8) : i8 + // CHECK: {{.*}} = llvm.insertvalue [[TRUE]], [[ARR]][0] : !llvm.array<1 x i8> + // CHECL: llvm.return + } From b46c0b7ae64c15affff0d44f5b25616aab433b46 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 12 Feb 2024 16:19:28 -0800 Subject: [PATCH 1410/2301] [CIR][CIRGen][Exceptions] Prep work for using cir.try_call outside cir.try The final destination here is to support cir.try_calls that are not within a `try {}` statement in C++. This only affect untested paths that will assert a bit later than before, testcase coming soon. --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 54 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenException.cpp | 47 ++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 ++ .../CodeGen/UnimplementedFeatureGuarding.h | 12 +++-- 4 files changed, 106 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 6ed4c7049d83..7dc94348368b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -16,6 +16,8 @@ // //===----------------------------------------------------------------------===// +#include "llvm/Support/SaveAndRestore.h" + #include "CIRGenCleanup.h" #include "CIRGenFunction.h" @@ -159,6 +161,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { auto *EHEntry = Scope.getCachedEHDispatchBlock(); assert(Scope.hasEHBranches() == (EHEntry != nullptr)); bool RequiresEHCleanup = (EHEntry != nullptr); + EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope(); // Check the three conditions which might require a normal cleanup: @@ -270,7 +273,50 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // Emit the EH cleanup if required. if (RequiresEHCleanup) { - llvm_unreachable("NYI"); + // FIXME(cir): should we guard insertion point here? + auto *NextAction = getEHDispatchBlock(EHParent); + (void)NextAction; + + // Push a terminate scope or cleanupendpad scope around the potentially + // throwing cleanups. For funclet EH personalities, the cleanupendpad models + // program termination when cleanups throw. + bool PushedTerminate = false; + SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad); + mlir::Operation *CPI = nullptr; + + const EHPersonality &Personality = EHPersonality::get(*this); + if (Personality.usesFuncletPads()) { + llvm_unreachable("NYI"); + } + + // Non-MSVC personalities need to terminate when an EH cleanup throws. + if (!Personality.isMSVCPersonality()) { + EHStack.pushTerminate(); + PushedTerminate = true; + } else if (IsEHa && getInvokeDest()) { + llvm_unreachable("NYI"); + } + + // We only actually emit the cleanup code if the cleanup is either + // active or was used before it was deactivated. + if (EHActiveFlag.isValid() || IsActive) { + cleanupFlags.setIsForEHCleanup(); + buildCleanup(*this, Fn, cleanupFlags, EHActiveFlag); + } + + // In LLVM traditional codegen, here's where it branches off to + // NextAction. + if (CPI) + llvm_unreachable("NYI"); + + // Leave the terminate scope. + if (PushedTerminate) + EHStack.popTerminate(); + + // FIXME(cir): LLVM traditional codegen tries to simplify some of the + // codegen here. Once we are further down with EH support revisit whether we + // need to this during lowering. + assert(!UnimplementedFeature::simplifyCleanupEntry()); } } @@ -470,3 +516,9 @@ EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { InnermostEHScope = stable_begin(); return scope; } + +void EHScopeStack::pushTerminate() { + char *Buffer = allocate(EHTerminateScope::getSize()); + new (Buffer) EHTerminateScope(InnermostEHScope); + InnermostEHScope = stable_begin(); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index c7228393e6fb..1c0b686154f4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -621,7 +621,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { EHScope &innermostEHScope = *EHStack.find(EHStack.getInnermostEHScope()); switch (innermostEHScope.getKind()) { case EHScope::Terminate: - llvm_unreachable("NYI"); + return getTerminateLandingPad(); case EHScope::Catch: case EHScope::Cleanup: @@ -635,7 +635,38 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { // are enabled but a throwing function is called anyways. auto catchOp = currLexScope->getExceptionInfo().catchOp; if (!catchOp) { - llvm_unreachable("NYI"); + auto loc = *currSrcLoc; + auto ehPtrTy = mlir::cir::PointerType::get( + getBuilder().getContext(), + getBuilder().getType<::mlir::cir::ExceptionInfoType>()); + + mlir::Value exceptionAddr; + { + // Get a new alloca within the current scope. + mlir::OpBuilder::InsertionGuard guard(builder); + exceptionAddr = buildAlloca( + "__exception_ptr", ehPtrTy, loc, CharUnits::One(), + builder.getBestAllocaInsertPoint(builder.getInsertionBlock())); + } + + { + // Insert catch at the end of the block, and place the insert pointer + // back to where it was. + mlir::OpBuilder::InsertionGuard guard(builder); + auto exceptionPtr = + builder.create(loc, ehPtrTy, exceptionAddr); + catchOp = builder.create( + loc, exceptionPtr, + [&](mlir::OpBuilder &b, mlir::Location loc, + mlir::OperationState &result) { + // There's no source code level catch here, create one region for + // the resume block. + mlir::OpBuilder::InsertionGuard guard(b); + auto *r = result.addRegion(); + builder.createBlock(r); + }); + } + currLexScope->setExceptionInfo({exceptionAddr, catchOp}); } { @@ -660,7 +691,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { switch (I->getKind()) { case EHScope::Cleanup: // If we have a cleanup, remember that. - llvm_unreachable("NYI"); + hasCleanup = (hasCleanup || cast(*I).isEHCleanup()); continue; case EHScope::Filter: { @@ -717,7 +748,8 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { // Otherwise, signal that we at least have cleanups. } else if (hasCleanup) { - llvm_unreachable("NYI"); + // FIXME(cir): figure out whether and how we need this in CIR. + assert(!UnimplementedFeature::setLandingPadCleanup()); } assert((clauses.size() > 0 || hasCleanup) && "CatchOp has no clauses!"); @@ -782,7 +814,8 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) { } case EHScope::Cleanup: - llvm_unreachable("NYI"); + assert(!UnimplementedFeature::setLandingPadCleanup()); + dispatchBlock = currLexScope->getOrCreateCleanupBlock(builder); break; case EHScope::Filter: @@ -850,3 +883,7 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl() { return LP; } + +mlir::Operation *CIRGenFunction::getTerminateLandingPad() { + llvm_unreachable("NYI"); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index bb1b433a60c9..ea4a3ffae685 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -900,6 +900,9 @@ class CIRGenFunction : public CIRGenTypeCache { return false; } + /// Return a landing pad that just calls terminate. + mlir::Operation *getTerminateLandingPad(); + /// Emit code to compute the specified expression, /// ignoring the result. void buildIgnoredExpr(const clang::Expr *E); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index daba1ab454a3..0214e7ae521a 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -79,8 +79,6 @@ struct UnimplementedFeature { static bool variablyModifiedTypeEmission() { return false; } static bool buildLValueAlignmentAssumption() { return false; } static bool buildDerivedToBaseCastForDevirt() { return false; } - static bool emitStartEHSpec() { return false; } - static bool emitEndEHSpec() { return false; } static bool emitFunctionEpilog() { return false; } // Data layout @@ -110,6 +108,14 @@ struct UnimplementedFeature { static bool fastMathFlags() { return false; } static bool fastMathFuncAttributes() { return false; } + // Exception handling + static bool setLandingPadCleanup() { return false; } + static bool isSEHTryScope() { return false; } + static bool ehStack() { return false; } + static bool emitStartEHSpec() { return false; } + static bool emitEndEHSpec() { return false; } + static bool simplifyCleanupEntry() { return false; } + // Type qualifiers. static bool atomicTypes() { return false; } static bool volatileTypes() { return false; } @@ -128,7 +134,6 @@ struct UnimplementedFeature { static bool openMP() { return false; } static bool openMPRuntime() { return false; } static bool openMPTarget() { return false; } - static bool ehStack() { return false; } static bool isVarArg() { return false; } static bool setNonGC() { return false; } static bool volatileLoadOrStore() { return false; } @@ -151,7 +156,6 @@ struct UnimplementedFeature { static bool exceptions() { return false; } static bool metaDataNode() { return false; } static bool emitDeclMetadata() { return false; } - static bool isSEHTryScope() { return false; } static bool emitScalarRangeCheck() { return false; } static bool stmtExprEvaluation() { return false; } static bool setCallingConv() { return false; } From ceacd5f327f3b786891be3faaa4ccb9b4ecec89f Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 14 Feb 2024 22:55:12 +0300 Subject: [PATCH 1411/2301] [CIR][CodeGen] Locally inited structures with bitfields (#463) The second part of the job started in #412 , now about local structures. As it was mentioned previously, sometimes the layout for structures with bit fields inited with constants differ from the originally created in `CIRRecordLayoutBuilder` and it cause `storeOp` verification fail due to different structure type was used to allocation. This PR fix it. An example: ``` typedef struct { int a : 4; int b : 5; int c; } D; void bar () { D d = {1,2,3}; } ``` Well, I can't say I'm proud of these changes - it seems like a type safety violation, but looks like it's the best we can do here. The original codegen doesn't have this problem at all, there is just a `memcpy` there, I provide LLVM IR just for reference: ``` %struct.D = type { i16, i32 } @__const.bar.d = private unnamed_addr constant { i8, i8, i32 } { i8 33, i8 0, i32 3 }, align 4 ; Function Attrs: noinline nounwind optnone uwtable define dso_local void @bar() #0 { entry: %d = alloca %struct.D, align 4 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %d, ptr align 4 @__const.bar.d, i64 8, i1 false) ret void } ``` --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 7 +++++++ clang/test/CIR/CodeGen/bitfields.c | 21 ++++++++++++++++++++- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index a91c3b7bd0c7..2483d3d371ac 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -234,6 +234,13 @@ static void emitStoresForConstant(CIRGenModule &CGM, const VarDecl &D, // // FIXME(cir): This is closer to memcpy behavior but less optimal, instead of // copy from a global, we just create a cir.const out of it. + + if (addr.getElementType() != Ty) { + auto ptr = addr.getPointer(); + ptr = builder.createBitcast(ptr.getLoc(), ptr, builder.getPointerTo(Ty)); + addr = addr.withPointer(ptr, addr.isKnownNonNull()); + } + auto loc = CGM.getLoc(D.getSourceRange()); builder.createStore(loc, builder.getConstant(loc, constant), addr); } diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index ec8d7c66af3a..05bc94a4b112 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -14,6 +14,12 @@ void m() { struct __long l; } +typedef struct { + int a : 4; + int b : 5; + int c; +} D; + typedef struct { int a : 4; int b : 27; @@ -27,9 +33,12 @@ typedef struct { int a : 3; // one bitfield with size < 8 unsigned b; } T; + +// CHECK: !ty_22D22 = !cir.struct // CHECK: !ty_22S22 = !cir.struct // CHECK: !ty_22T22 = !cir.struct // CHECK: !ty_22anon2E122 = !cir.struct +// CHECK: !ty_anon_struct = !cir.struct // CHECK: !ty_22__long22 = !cir.struct}> // CHECK: cir.func {{.*@store_field}} @@ -96,4 +105,14 @@ unsigned load_non_bitfield(S *s) { // CHECK: cir.func {{.*@load_one_bitfield}} int load_one_bitfield(T* t) { return t->a; -} \ No newline at end of file +} + +// for this struct type we create an anon structure with different storage types in initialization +// CHECK: cir.func {{.*@createD}} +// CHECK: %0 = cir.alloca !ty_22D22, cir.ptr , ["d"] {alignment = 4 : i64} +// CHECK: %1 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<33> : !u8i, #cir.int<0> : !u8i, #cir.int<3> : !s32i}> : !ty_anon_struct) : !ty_anon_struct +// CHECK: cir.store %2, %1 : !ty_anon_struct, cir.ptr +void createD() { + D d = {1,2,3}; +} From 85cf762b87db24ac8ef30624f9088530be8ba639 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 15 Feb 2024 04:06:36 +0800 Subject: [PATCH 1412/2301] [CIR][CIRGen] Introduce cir.unreachable operation (#447) In #426 we confirmed that CIR needs a `cir.unreachable` operation to mark unreachable program points [(discussion)](https://github.com/llvm/clangir/pull/426#discussion_r1472287368). This PR adds it. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 16 +++++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 8 ++++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 6 ++++ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 4 ++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 +++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 4 +-- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 15 ---------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 17 +++++++++-- clang/test/CIR/CodeGen/dynamic-cast.cpp | 1 + clang/test/CIR/CodeGen/unreachable.cpp | 28 +++++++++++++++++++ clang/test/CIR/IR/unreachable.cir | 9 ++++++ clang/test/CIR/Lowering/intrinsics.cir | 10 +++++++ 13 files changed, 103 insertions(+), 20 deletions(-) create mode 100644 clang/test/CIR/CodeGen/unreachable.cpp create mode 100644 clang/test/CIR/IR/unreachable.cir create mode 100644 clang/test/CIR/Lowering/intrinsics.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 147e5c1cdd72..e282d3e8cc6b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2844,6 +2844,22 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { }]; } +//===----------------------------------------------------------------------===// +// UnreachableOp +//===----------------------------------------------------------------------===// + +def UnreachableOp : CIR_Op<"unreachable", [Terminator]> { + let summary = "invoke immediate undefined behavior"; + let description = [{ + If the program control flow reaches a `cir.unreachable` operation, the + program exhibits undefined behavior immediately. This operation is useful + in cases where the unreachability of a program point needs to be explicitly + marked. + }]; + + let assemblyFormat = "attr-dict"; +} + //===----------------------------------------------------------------------===// // Operations Lowered Directly to LLVM IR // diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 16e9668b9345..0c351c7ea1b7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -451,6 +451,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, /*EmittedE=*/nullptr, IsDynamic)); } + case Builtin::BI__builtin_unreachable: { + buildUnreachable(E->getExprLoc()); + + // We do need to preserve an insertion point. + builder.createBlock(builder.getBlock()->getParent()); + + return RValue::get(nullptr); + } case Builtin::BImemcpy: case Builtin::BI__builtin_memcpy: case Builtin::BImempcpy: diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index c3dac1cdf6e0..46a91b1aadab 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2540,6 +2540,12 @@ LValue CIRGenFunction::buildLoadOfReferenceLValue(LValue RefLVal, PointeeBaseInfo); } +void CIRGenFunction::buildUnreachable(SourceLocation Loc) { + if (SanOpts.has(SanitizerKind::Unreachable)) + llvm_unreachable("NYI"); + builder.create(getLoc(Loc)); +} + //===----------------------------------------------------------------------===// // CIR builder helpers //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 22559ce36ad5..42b06cfe7337 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -757,7 +757,9 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign, allocatorArgs); operatorDeleteCleanup = EHStack.stable_begin(); - // FIXME: cleanupDominator = Builder.CreateUnreachable(); + cleanupDominator = + builder.create(getLoc(E->getSourceRange())) + .getOperation(); } assert((allocSize == allocSizeWithoutCookie) == diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index e7651a86add5..77c8d05f61c2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -681,6 +681,7 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, } if (SanOpts.has(SanitizerKind::Return) || shouldEmitUnreachable) { // TODO: builder.createUnreachable(); + assert(!UnimplementedFeature::unreachableOp()); builder.clearInsertionPoint(); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index ea4a3ffae685..e0dedde5ced2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1516,6 +1516,10 @@ class CIRGenFunction : public CIRGenTypeCache { AggValueSlot::Overlap_t MayOverlap, bool isVolatile = false); + /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime + /// checking is enabled. Otherwise, just emit an unreachable instruction. + void buildUnreachable(SourceLocation Loc); + /// /// Cleanups /// -------- diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index cadbe85d1c07..8121010fd72e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2208,8 +2208,8 @@ void CIRGenItaniumCXXABI::buildBadCastCall(CIRGenFunction &CGF, assert(!UnimplementedFeature::setCallingConv()); CGF.buildRuntimeCall(loc, getBadCastFn(CGF)); - // TODO(cir): mark the current insertion point as unreachable. - assert(!UnimplementedFeature::unreachableOp()); + CGF.getBuilder().create(loc); + CGF.getBuilder().clearInsertionPoint(); } static CharUnits computeOffsetHint(ASTContext &Context, diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 8b9a4357dc46..b7bd43abc9ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -136,21 +136,6 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, mlir::Block *outgoing = builder.getInsertionBlock(); assert(outgoing && "expression emission cleared block!"); - // FIXME: Should we mimic LLVM emission here? - // The expression emitters assume (reasonably!) that the insertion - // point is always set. To maintain that, the call-emission code - // for noreturn functions has to enter a new block with no - // predecessors. We want to kill that block and mark the current - // insertion point unreachable in the common case of a call like - // "exit();". Since expression emission doesn't otherwise create - // blocks with no predecessors, we can just test for that. - // However, we must be careful not to do this to our incoming - // block, because *statement* emission does sometimes create - // reachable blocks which will have no predecessors until later in - // the function. This occurs with, e.g., labels that are not - // reachable by fallthrough. - if (incoming != outgoing && outgoing->use_empty()) - assert(0 && "not implemented"); break; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 913d5ad4ca67..e6cc2664ccb7 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2206,6 +2206,19 @@ class CIRStackRestoreLowering } }; +class CIRUnreachableLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::UnreachableOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -2221,8 +2234,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, - CIRStackSaveLowering, CIRStackRestoreLowering>(converter, - patterns.getContext()); + CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/dynamic-cast.cpp b/clang/test/CIR/CodeGen/dynamic-cast.cpp index 52e7a3cee3d0..f9648ff72f08 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast.cpp @@ -48,6 +48,7 @@ Derived &ref_cast(Base &b) { // CHECK-NEXT: %[[#V18:]] = cir.unary(not, %[[#V17]]) : !cir.bool, !cir.bool // CHECK-NEXT: cir.if %[[#V18]] { // CHECK-NEXT: cir.call @__cxa_bad_cast() : () -> () +// CHECK-NEXT: cir.unreachable // CHECK-NEXT: } // CHECK-NEXT: %{{.+}} = cir.cast(bitcast, %[[#V16]] : !cir.ptr), !cir.ptr diff --git a/clang/test/CIR/CodeGen/unreachable.cpp b/clang/test/CIR/CodeGen/unreachable.cpp new file mode 100644 index 000000000000..c617fe8c6212 --- /dev/null +++ b/clang/test/CIR/CodeGen/unreachable.cpp @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void foo(); + +void basic() { + foo(); + __builtin_unreachable(); +} + +// CHECK: cir.func @_Z5basicv() +// CHECK-NEXT: cir.call @_Z3foov() : () -> () +// CHECK-NEXT: cir.unreachable +// CHECK-NEXT: } + +void code_after_unreachable() { + foo(); + __builtin_unreachable(); + foo(); +} + +// CHECK: cir.func @_Z22code_after_unreachablev() +// CHECK: cir.call @_Z3foov() : () -> () +// CHECK: cir.unreachable +// CHECK: ^{{.+}}: +// CHECK: cir.call @_Z3foov() : () -> () +// CHECK: cir.return +// CHECK: } diff --git a/clang/test/CIR/IR/unreachable.cir b/clang/test/CIR/IR/unreachable.cir new file mode 100644 index 000000000000..d057f47ee2b3 --- /dev/null +++ b/clang/test/CIR/IR/unreachable.cir @@ -0,0 +1,9 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +cir.func @test() { + cir.unreachable +} + +// CHECK: cir.func @test +// CHECK-NEXT: cir.unreachable diff --git a/clang/test/CIR/Lowering/intrinsics.cir b/clang/test/CIR/Lowering/intrinsics.cir new file mode 100644 index 000000000000..f3bcf9fba492 --- /dev/null +++ b/clang/test/CIR/Lowering/intrinsics.cir @@ -0,0 +1,10 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR + +module { + cir.func @test_unreachable() { + cir.unreachable + } + + // MLIR: llvm.func @test_unreachable() + // MLIR-NEXT: llvm.unreachable +} From 6764995ff582af831a06536ca9ceced79b62a22b Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Wed, 14 Feb 2024 23:20:31 +0300 Subject: [PATCH 1413/2301] [CIR][CIRGen] Add missing case to VisitMemberExpr (#464) This PR adds support for evaluating constants in member exprs. The change is taken from original codegen. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 7 +++++-- clang/test/CIR/CodeGen/evaluate-expr.c | 12 ++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index de18fa610970..4799f68726c8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1593,8 +1593,11 @@ mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { // keep assertion for now. assert(!UnimplementedFeature::tryEmitAsConstant()); Expr::EvalResult Result; - if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) - assert(0 && "NYI"); + if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { + llvm::APSInt Value = Result.Val.getInt(); + CGF.buildIgnoredExpr(E->getBase()); + return Builder.getConstInt(CGF.getLoc(E->getExprLoc()), Value); + } return buildLoadOfLValue(E); } diff --git a/clang/test/CIR/CodeGen/evaluate-expr.c b/clang/test/CIR/CodeGen/evaluate-expr.c index b49496d3d076..81947ea181e9 100644 --- a/clang/test/CIR/CodeGen/evaluate-expr.c +++ b/clang/test/CIR/CodeGen/evaluate-expr.c @@ -18,3 +18,15 @@ void foo() { // CHECK: } // CHECK: cir.return +typedef struct { int x; } S; +static const S s = {0}; +void bar() { + int a = s.x; +} +// CHECK: cir.func no_proto @bar() +// CHECK: [[ALLOC:%.*]] = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: {{%.*}} = cir.get_global @s : cir.ptr +// CHECK: [[CONST:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.store [[CONST]], [[ALLOC]] : !s32i, cir.ptr +// CHECK: cir.return + From ab629ddab2704fd25b71b526e887c1ed30c0fe33 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 14 Feb 2024 14:45:50 -0800 Subject: [PATCH 1414/2301] [CIR][CIRGen][Exceptions] Add unwind attribute - Add it to functions but not yet on calls. - Add more skeleton for tagging function attributes. - Testcases One more incremental step towards cir.try_call outside cir.try scopes. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 4 + clang/lib/CIR/CodeGen/CIRGenCall.cpp | 118 +++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 28 +++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 4 +- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 +- .../Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 2 + .../test/CIR/CodeGen/array-unknown-bound.cpp | 2 +- .../CodeGen/builtin-constant-evaluated.cpp | 2 +- .../{inlineAttr.cpp => function-attrs.cpp} | 6 +- clang/test/CIR/CodeGen/gnu-extension.c | 2 +- clang/test/CIR/CodeGen/optnone.cpp | 6 +- clang/test/CIR/IR/invalid.cir | 25 ---- clang/test/CIR/IR/try.cir | 24 ++++ 14 files changed, 173 insertions(+), 58 deletions(-) rename clang/test/CIR/CodeGen/{inlineAttr.cpp => function-attrs.cpp} (83%) create mode 100644 clang/test/CIR/IR/try.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 9fc8ce9efad0..551cda9144b5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -519,6 +519,10 @@ def OptNoneAttr : CIRUnitAttr<"OptNone", "optnone"> { let storageType = [{ OptNoneAttr }]; } +def NoThrowAttr : CIRUnitAttr<"NoThrow", "nothrow"> { + let storageType = [{ NoThrowAttr }]; +} + def GlobalCtorAttr : CIR_Attr<"GlobalCtor", "globalCtor"> { let summary = "Indicates a function is a global constructor."; let description = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index ab0b35f8f322..f034638d2eb7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -308,14 +308,18 @@ static Address emitAddressAtOffset(CIRGenFunction &CGF, Address addr, return addr; } -static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, +static void AddAttributesFromFunctionProtoType(CIRGenBuilderTy &builder, + ASTContext &Ctx, + mlir::NamedAttrList &FuncAttrs, const FunctionProtoType *FPT) { if (!FPT) return; if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && - FPT->isNothrow()) - llvm_unreachable("NoUnwind NYI"); + FPT->isNothrow()) { + auto nu = mlir::cir::NoThrowAttr::get(builder.getContext()); + FuncAttrs.set(nu.getMnemonic(), nu); + } } /// Construct the CIR attribute list of a function or call. @@ -335,10 +339,11 @@ static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, /// attributes that restrict how the frontend generates code must be /// added here rather than getDefaultFunctionAttributes. /// -void CIRGenModule::ConstructAttributeList( - StringRef Name, const CIRGenFunctionInfo &FI, CIRGenCalleeInfo CalleeInfo, - llvm::SmallSet &Attrs, bool AttrOnCallSite, - bool IsThunk) { +void CIRGenModule::ConstructAttributeList(StringRef Name, + const CIRGenFunctionInfo &FI, + CIRGenCalleeInfo CalleeInfo, + mlir::DictionaryAttr &Attrs, + bool AttrOnCallSite, bool IsThunk) { // Implementation Disclaimer // // UnimplementedFeature and asserts are used throughout the code to track @@ -349,13 +354,92 @@ void CIRGenModule::ConstructAttributeList( // That said, for the most part, the approach here is very specific compared // to the rest of CIRGen and attributes and other handling should be done upon // demand. + mlir::NamedAttrList FuncAttrs; // Collect function CIR attributes from the CC lowering. // TODO: NoReturn, cmse_nonsecure_call // Collect function CIR attributes from the callee prototype if we have one. - AddAttributesFromFunctionProtoType(astCtx, + AddAttributesFromFunctionProtoType(getBuilder(), astCtx, FuncAttrs, CalleeInfo.getCalleeFunctionProtoType()); + + const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); + + // TODO(cir): Attach assumption attributes to the declaration. If this is a + // call site, attach assumptions from the caller to the call as well. + + bool HasOptnone = false; + (void)HasOptnone; + // The NoBuiltinAttr attached to the target FunctionDecl. + mlir::Attribute *NBA; + + if (TargetDecl) { + + if (TargetDecl->hasAttr()) { + auto nu = mlir::cir::NoThrowAttr::get(builder.getContext()); + FuncAttrs.set(nu.getMnemonic(), nu); + } + + if (const FunctionDecl *Fn = dyn_cast(TargetDecl)) { + AddAttributesFromFunctionProtoType( + getBuilder(), astCtx, FuncAttrs, + Fn->getType()->getAs()); + if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { + // A sane operator new returns a non-aliasing pointer. + auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); + if (getCodeGenOpts().AssumeSaneOperatorNew && + (Kind == OO_New || Kind == OO_Array_New)) + ; // llvm::Attribute::NoAlias + } + const CXXMethodDecl *MD = dyn_cast(Fn); + const bool IsVirtualCall = MD && MD->isVirtual(); + // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a + // virtual function. These attributes are not inherited by overloads. + if (!(AttrOnCallSite && IsVirtualCall)) { + if (Fn->isNoReturn()) + ; // NoReturn + // NBA = Fn->getAttr(); + (void)NBA; + } + } + + if (isa(TargetDecl) || isa(TargetDecl)) { + // Only place nomerge attribute on call sites, never functions. This + // allows it to work on indirect virtual function calls. + if (AttrOnCallSite && TargetDecl->hasAttr()) + ; + } + + // 'const', 'pure' and 'noalias' attributed functions are also nounwind. + if (TargetDecl->hasAttr()) { + // gcc specifies that 'const' functions have greater restrictions than + // 'pure' functions, so they also cannot have infinite loops. + } else if (TargetDecl->hasAttr()) { + // gcc specifies that 'pure' functions cannot have infinite loops. + } else if (TargetDecl->hasAttr()) { + } + + HasOptnone = TargetDecl->hasAttr(); + if (auto *AllocSize = TargetDecl->getAttr()) { + std::optional NumElemsParam; + if (AllocSize->getNumElemsParam().isValid()) + NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); + // TODO(cir): add alloc size attr. + } + + if (TargetDecl->hasAttr()) { + assert(!UnimplementedFeature::openCL()); + } + + if (TargetDecl->hasAttr() && + getLangOpts().OffloadUniformBlock) + assert(!UnimplementedFeature::CUDA()); + + if (TargetDecl->hasAttr()) + ; + } + + Attrs = mlir::DictionaryAttr::get(builder.getContext(), FuncAttrs); } static mlir::cir::CIRCallOpInterface @@ -566,7 +650,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // TODO: Update the largest vector width if any arguments have vector types. // Compute the calling convention and attributes. - llvm::SmallSet Attrs; + mlir::DictionaryAttr Attrs; StringRef FnName; if (auto calleeFnOp = dyn_cast(CalleePtr)) FnName = calleeFnOp.getName(); @@ -601,14 +685,16 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // We don't need to model anything in IR to get this behavior. CannotThrow = true; } else { - // FIXME(cir): pass down nounwind attribute - CannotThrow = false; + // Otherwise, nounwind call sites will never throw. + auto noThrowAttr = mlir::cir::NoThrowAttr::get(builder.getContext()); + CannotThrow = Attrs.contains(noThrowAttr.getMnemonic()); + + if (auto fptr = dyn_cast(CalleePtr)) + if (fptr.getExtraAttrs().getElements().contains( + noThrowAttr.getMnemonic())) + CannotThrow = true; } - (void)CannotThrow; - - // In LLVM this contains the basic block, in CIR we solely track for now. - bool InvokeDest = getInvokeDest(); - (void)InvokeDest; + auto InvokeDest = CannotThrow ? false : getInvokeDest(); // TODO: UnusedReturnSizePtr if (const FunctionDecl *FD = dyn_cast_or_null(CurFuncDecl)) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index be125e8068a8..6f4b7b3262b7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2011,10 +2011,38 @@ mlir::Location CIRGenModule::getLocForFunction(const clang::FunctionDecl *FD) { return theModule->getLoc(); } +/// Determines whether the language options require us to model +/// unwind exceptions. We treat -fexceptions as mandating this +/// except under the fragile ObjC ABI with only ObjC exceptions +/// enabled. This means, for example, that C with -fexceptions +/// enables this. +/// TODO(cir): can be shared with traditional LLVM codegen. +static bool hasUnwindExceptions(const LangOptions &LangOpts) { + // If exceptions are completely disabled, obviously this is false. + if (!LangOpts.Exceptions) + return false; + + // If C++ exceptions are enabled, this is true. + if (LangOpts.CXXExceptions) + return true; + + // If ObjC exceptions are enabled, this depends on the ABI. + if (LangOpts.ObjCExceptions) { + return LangOpts.ObjCRuntime.hasUnwindExceptions(); + } + + return true; +} + void CIRGenModule::setExtraAttributesForFunc(FuncOp f, const clang::FunctionDecl *FD) { mlir::NamedAttrList attrs; + if (!hasUnwindExceptions(getLangOpts())) { + auto attr = mlir::cir::NoThrowAttr::get(builder.getContext()); + attrs.set(attr.getMnemonic(), attr); + } + if (!FD) { // If we don't have a declaration to control inlining, the function isn't // explicitly marked as alwaysinline for semantic reasons, and inlining is diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index a598400dd80c..ff18a16784f7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -261,8 +261,8 @@ class CIRGenModule : public CIRGenTypeCache { /// \param Attrs [out] - On return, the attribute list to use. void ConstructAttributeList(StringRef Name, const CIRGenFunctionInfo &Info, CIRGenCalleeInfo CalleeInfo, - llvm::SmallSet &Attrs, - bool AttrOnCallSite, bool IsThunk); + mlir::DictionaryAttr &Attrs, bool AttrOnCallSite, + bool IsThunk); /// Will return a global variable of the given type. If a variable with a /// different type already exists then a new variable with the right type diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 0214e7ae521a..d6a7e1d89433 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -131,6 +131,7 @@ struct UnimplementedFeature { static bool CGCapturedStmtInfo() { return false; } static bool cxxABI() { return false; } static bool openCL() { return false; } + static bool CUDA() { return false; } static bool openMP() { return false; } static bool openMPRuntime() { return false; } static bool openMPTarget() { return false; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7cceadd8044d..07afd0a17a55 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2110,12 +2110,7 @@ cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return verifyCallCommInSymbolUses(*this, symbolTable); } -LogicalResult cir::TryCallOp::verify() { - auto tryScope = (*this)->getParentOfType(); - if (!tryScope) - return emitOpError() << "expected to be within a 'cir.try' region"; - return mlir::success(); -} +LogicalResult cir::TryCallOp::verify() { return mlir::success(); } ::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index c19831bda087..dac44ca4d8d0 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -59,6 +59,8 @@ class CIRDialectLLVMIRTranslationInterface llvm_unreachable("Unknown inline kind"); } else if (attr.getValue().dyn_cast()) { llvmFunc->addFnAttr(llvm::Attribute::OptimizeNone); + } else if (attr.getValue().dyn_cast()) { + llvmFunc->addFnAttr(llvm::Attribute::NoUnwind); } } } diff --git a/clang/test/CIR/CodeGen/array-unknown-bound.cpp b/clang/test/CIR/CodeGen/array-unknown-bound.cpp index 09f75ca27f27..82948bef34e2 100644 --- a/clang/test/CIR/CodeGen/array-unknown-bound.cpp +++ b/clang/test/CIR/CodeGen/array-unknown-bound.cpp @@ -7,7 +7,7 @@ int *table_ptr = table; // CHECK: cir.global external @table_ptr = #cir.global_view<@table> : !cir.ptr int test() { return table[1]; } -// CHECK: cir.func @_Z4testv() -> !s32i extra( {inline = #cir.inline, optnone = #cir.optnone} ) { +// CHECK: cir.func @_Z4testv() // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.get_global @table : cir.ptr > diff --git a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp index ad89dcd25484..9aa3175eeecd 100644 --- a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp +++ b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp @@ -2,7 +2,7 @@ auto func() { return __builtin_strcmp("", ""); - // CHECK: cir.func @_Z4funcv() -> !s32i extra( {inline = #cir.inline, optnone = #cir.optnone} ) { + // CHECK: cir.func @_Z4funcv() // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) // CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc7) // CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr loc(#loc8) diff --git a/clang/test/CIR/CodeGen/inlineAttr.cpp b/clang/test/CIR/CodeGen/function-attrs.cpp similarity index 83% rename from clang/test/CIR/CodeGen/inlineAttr.cpp rename to clang/test/CIR/CodeGen/function-attrs.cpp index 1d143e6d2aa3..fb5e3b43a464 100644 --- a/clang/test/CIR/CodeGen/inlineAttr.cpp +++ b/clang/test/CIR/CodeGen/function-attrs.cpp @@ -25,9 +25,9 @@ int s3(int a, int b) { } -// CIR: cir.func linkonce_odr @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline} ) -// CIR: cir.func @_Z2s1ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline} ) -// CIR: cir.func @_Z2s2ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline} ) +// CIR: cir.func linkonce_odr @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline, nothrow = #cir.nothrow} ) +// CIR: cir.func @_Z2s1ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline, nothrow = #cir.nothrow} ) +// CIR: cir.func @_Z2s2ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline, nothrow = #cir.nothrow} ) // CIR: cir.func @_Z2s3ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} { // LLVM: define i32 @_Z2s1ii(i32 %0, i32 %1) {{.*}} #[[#ATTR1:]] diff --git a/clang/test/CIR/CodeGen/gnu-extension.c b/clang/test/CIR/CodeGen/gnu-extension.c index 949f39edb3dd..2d91c403e173 100644 --- a/clang/test/CIR/CodeGen/gnu-extension.c +++ b/clang/test/CIR/CodeGen/gnu-extension.c @@ -3,7 +3,7 @@ int foo(void) { return __extension__ 0b101010; } -//CHECK: cir.func @foo() -> !s32i extra( {inline = #cir.inline, optnone = #cir.optnone} ) { +//CHECK: cir.func @foo() //CHECK-NEXT: [[ADDR:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} //CHECK-NEXT: [[VAL:%.*]] = cir.const(#cir.int<42> : !s32i) : !s32i //CHECK-NEXT: cir.store [[VAL]], [[ADDR]] : !s32i, cir.ptr diff --git a/clang/test/CIR/CodeGen/optnone.cpp b/clang/test/CIR/CodeGen/optnone.cpp index 08965675ff70..bf8e0a675468 100644 --- a/clang/test/CIR/CodeGen/optnone.cpp +++ b/clang/test/CIR/CodeGen/optnone.cpp @@ -17,9 +17,9 @@ int s0(int a, int b) { return x; } -// CIR-O0: cir.func @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline, optnone = #cir.optnone} ) +// CIR-O0: cir.func @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline, nothrow = #cir.nothrow, optnone = #cir.optnone} ) // CIR-O2-NOT: cir.func @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} optnone // LLVM-O0: define i32 @_Z2s0ii(i32 %0, i32 %1) #[[#ATTR:]] -// LLVM-O0: attributes #[[#ATTR]] = { noinline optnone } -// LLVM-O2-NOT: attributes #[[#]] = { noinline optnone } +// LLVM-O0: attributes #[[#ATTR]] = { noinline nounwind optnone } +// LLVM-O2-NOT: attributes #[[#]] = { noinline nounwind optnone } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 1f1e66386581..f8f746128998 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -788,29 +788,4 @@ cir.func @const_type_mismatch() -> () { // expected-error@+1 {{'cir.const' op result type ('!cir.int') does not match value type ('!cir.int')}} %2 = cir.const(#cir.int<0> : !s8i) : !u8i cir.return -} - -// ----- - -!s32i = !cir.int - -module { - cir.func @div(%x : !s32i, %y : !s32i) -> !s32i { - %3 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.return %3 : !s32i - } - - cir.func @foo(%x : !s32i, %y : !s32i) -> !cir.ptr { - %11 = cir.scope { - %10 = cir.scope { - %0 = cir.alloca !cir.ptr, cir.ptr >, ["exception_info"] {alignment = 16 : i64} - %d = cir.try_call exception(%0) @div(%x, %y) : (!s32i, !s32i) -> !s32i - // expected-error@-1 {{'cir.try_call' op expected to be within a 'cir.try' region}} - %1 = cir.load %0 : cir.ptr >, !cir.ptr - cir.yield %1 : !cir.ptr - } : !cir.ptr - cir.yield %10 : !cir.ptr - } : !cir.ptr - cir.return %11 : !cir.ptr - } } \ No newline at end of file diff --git a/clang/test/CIR/IR/try.cir b/clang/test/CIR/IR/try.cir new file mode 100644 index 000000000000..30a516e422e0 --- /dev/null +++ b/clang/test/CIR/IR/try.cir @@ -0,0 +1,24 @@ +// Test attempts to build bogus CIR +// RUN: cir-opt %s + +!s32i = !cir.int + +module { + cir.func @div(%x : !s32i, %y : !s32i) -> !s32i { + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.return %3 : !s32i + } + + cir.func @foo(%x : !s32i, %y : !s32i) -> !cir.ptr { + %11 = cir.scope { + %10 = cir.scope { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["exception_info"] {alignment = 16 : i64} + %d = cir.try_call exception(%0) @div(%x, %y) : (!s32i, !s32i) -> !s32i + %1 = cir.load %0 : cir.ptr >, !cir.ptr + cir.yield %1 : !cir.ptr + } : !cir.ptr + cir.yield %10 : !cir.ptr + } : !cir.ptr + cir.return %11 : !cir.ptr + } +} \ No newline at end of file From 110479749392fe02b3be668ed622f34a28b1f63a Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 15 Feb 2024 22:53:10 +0300 Subject: [PATCH 1415/2301] [CIR][CodeGen] Inline assembly: adds operands (#465) The next step in inline-assembly support: we add instruction operands! Nothing interesting, just some copy-pasta from the `codegen` with some sort of simplifications for now. Well, I'm not sure `functional-type` is the best way to print operands though it's used in mlir's `InlineAsmOp`. But anyways, maybe you have a better idea. There are two or three steps ahead, so we are not that far from being able to run something! --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 7 ++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 16 ++- clang/lib/CIR/CodeGen/CIRAsm.cpp | 115 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRDataLayout.h | 4 + clang/lib/CIR/CodeGen/CIRGenFunction.h | 12 ++ clang/lib/CIR/CodeGen/TargetInfo.h | 22 ++++ clang/test/CIR/CodeGen/asm.c | 12 +- 7 files changed, 176 insertions(+), 12 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 71fadd74f6b2..fed1176a1603 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -41,6 +41,13 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { public: CIRBaseBuilderTy(mlir::MLIRContext &C) : mlir::OpBuilder(&C) {} + mlir::Value getConstAPSInt(mlir::Location loc, const llvm::APSInt &val) { + auto ty = mlir::cir::IntType::get(getContext(), val.getBitWidth(), + val.isSigned()); + return create(loc, ty, + getAttr(ty, val)); + } + mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, const llvm::APInt &val) { return create(loc, typ, diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e282d3e8cc6b..7fe243bd0e69 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2825,22 +2825,28 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { ``` ```mlir - cir.asm(x86_att, {"foo" ""}) - cir.asm(x86_att, {"bar $$42 $0" "=r,=&r,1"}) - cir.asm(x86_att, {"baz $$42 $0" "=r,=&r,0,1"}) + %0 = cir.alloca !s32i, cir.ptr , ["x", init] + %1 = cir.alloca !s32i, cir.ptr , ["y", init] + ... + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.load %1 : cir.ptr , !s32i + cir.asm(x86_att, {"foo" ""} : () -> () + cir.asm(x86_att, {"bar $$42 $0" "=r,=&r,1"} %2 : (!s32i) -> () + cir.asm(x86_att, {"baz $$42 $0" "=r,=&r,0,1"} %3, %2 : (!s32i, !s32i) -> () ``` }]; let results = (outs Optional:$res); let arguments = ( - ins StrAttr:$asm_string, + ins Variadic:$operands, + StrAttr:$asm_string, StrAttr:$constraints, AsmFlavor:$asm_flavor); let assemblyFormat = [{ `(`$asm_flavor`,` `{` $asm_string $constraints `}` `)` attr-dict - `:` type($res) + operands `:` functional-type(operands, results) }]; } diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 1e2f11e66eac..4d1a97e86d58 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -137,6 +137,65 @@ static void collectInOutConstrainsInfos(const CIRGenFunction &cgf, } } +mlir::Value CIRGenFunction::buildAsmInputLValue( + const TargetInfo::ConstraintInfo &Info, LValue InputValue, + QualType InputType, std::string &ConstraintStr, SourceLocation Loc) { + + if (Info.allowsRegister() || !Info.allowsMemory()) { + if (hasScalarEvaluationKind(InputType)) + return buildLoadOfLValue(InputValue, Loc).getScalarVal(); + + mlir::Type Ty = convertType(InputType); + uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); + if ((Size <= 64 && llvm::isPowerOf2_64(Size)) || + getTargetHooks().isScalarizableAsmOperand(*this, Ty)) { + Ty = mlir::cir::IntType::get(builder.getContext(), Size, false); + + return builder.createLoad(getLoc(Loc), + InputValue.getAddress().withElementType(Ty)); + } + } + + Address Addr = InputValue.getAddress(); + ConstraintStr += '*'; + return Addr.getPointer(); +} + +mlir::Value +CIRGenFunction::buildAsmInput(const TargetInfo::ConstraintInfo &Info, + const Expr *InputExpr, + std::string &ConstraintStr) { + auto loc = getLoc(InputExpr->getExprLoc()); + + // If this can't be a register or memory, i.e., has to be a constant + // (immediate or symbolic), try to emit it as such. + if (!Info.allowsRegister() && !Info.allowsMemory()) { + if (Info.requiresImmediateConstant()) { + Expr::EvalResult EVResult; + InputExpr->EvaluateAsRValue(EVResult, getContext(), true); + + llvm::APSInt IntResult; + if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(), + getContext())) + return builder.getConstAPSInt(loc, IntResult); + } + + Expr::EvalResult Result; + if (InputExpr->EvaluateAsInt(Result, getContext())) + builder.getConstAPSInt(loc, Result.Val.getInt()); + } + + if (Info.allowsRegister() || !Info.allowsMemory()) + if (CIRGenFunction::hasScalarEvaluationKind(InputExpr->getType())) + return buildScalarExpr(InputExpr); + if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) + return buildScalarExpr(InputExpr); + InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); + LValue Dest = buildLValue(InputExpr); + return buildAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, + InputExpr->getExprLoc()); +} + mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { // Assemble the final asm string. std::string AsmString = S.generateAsmString(getContext()); @@ -153,6 +212,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { // Keep track of input constraints. std::string InOutConstraints; + std::vector InOutArgs; // Keep track of out constraints for tied input operand. std::vector OutputConstraints; @@ -176,6 +236,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { Info.earlyClobber(), &GCCReg); OutputConstraints.push_back(OutputConstraint); + LValue Dest = buildLValue(OutExpr); if (!Constraints.empty()) Constraints += ','; @@ -188,18 +249,40 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { if (!Info.allowsMemory() && IsScalarOrAggregate) { Constraints += "=" + OutputConstraint; } else { + Address DestAddr = Dest.getAddress(); + + // Matrix types in memory are represented by arrays, but accessed through + // vector pointers, with the alignment specified on the access operation. + // For inline assembly, update pointer arguments to use vector pointers. + // Otherwise there will be a mis-match if the matrix is also an + // input-argument which is represented as vector. + if (isa(OutExpr->getType().getCanonicalType())) + DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType())); + + Args.push_back(DestAddr.getPointer()); Constraints += "=*"; Constraints += OutputConstraint; } if (Info.isReadWrite()) { InOutConstraints += ','; + const Expr *InputExpr = S.getOutputExpr(i); + + mlir::Value Arg = + buildAsmInputLValue(Info, Dest, InputExpr->getType(), + InOutConstraints, InputExpr->getExprLoc()); + + if (mlir::Type AdjTy = getTargetHooks().adjustInlineAsmType( + *this, OutputConstraint, Arg.getType())) + Arg = builder.createBitcast(Arg, AdjTy); // Only tie earlyclobber physregs. if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber())) InOutConstraints += llvm::utostr(i); else InOutConstraints += OutputConstraint; + + InOutArgs.push_back(Arg); } } // iterate over output operands @@ -221,6 +304,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { getTarget(), CGM, S, false /* No EarlyClobber */); std::string ReplaceConstraint(InputConstraint); + mlir::Value Arg = buildAsmInput(Info, InputExpr, Constraints); // If this input argument is tied to a larger output result, extend the // input to be the same size as the output. The LLVM backend wants to see @@ -229,14 +313,42 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { // that is usually cheaper, but LLVM IR should really get an anyext someday. if (Info.hasTiedOperand()) { unsigned Output = Info.getTiedOperand(); + QualType OutputType = S.getOutputExpr(Output)->getType(); + QualType InputTy = InputExpr->getType(); + + if (getContext().getTypeSize(OutputType) > + getContext().getTypeSize(InputTy)) { + // Use ptrtoint as appropriate so that we can do our extension. + if (isa(Arg.getType())) + Arg = builder.createPtrToInt(Arg, UIntPtrTy); + mlir::Type OutputTy = convertType(OutputType); + if (isa(OutputTy)) + Arg = builder.createIntCast(Arg, OutputTy); + else if (isa(OutputTy)) + Arg = builder.createIntCast(Arg, UIntPtrTy); + else if (isa(OutputTy)) + Arg = builder.createFloatingCast(Arg, OutputTy); + } // Deal with the tied operands' constraint code in adjustInlineAsmType. ReplaceConstraint = OutputConstraints[Output]; } + if (mlir::Type AdjTy = getTargetHooks().adjustInlineAsmType( + *this, ReplaceConstraint, Arg.getType())) + Arg = builder.createBitcast(Arg, AdjTy); + else + CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) + << InputExpr->getType() << InputConstraint; + + Args.push_back(Arg); Constraints += InputConstraint; } // iterate over input operands + // Append the "input" part of inout constraints. + for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { + Args.push_back(InOutArgs[i]); + } Constraints += InOutConstraints; mlir::Type ResultType; @@ -252,7 +364,8 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { AsmFlavor AsmFlavor = inferFlavor(CGM, S); builder.create(getLoc(S.getAsmLoc()), ResultType, - AsmString, Constraints, AsmFlavor); + Args, AsmString, Constraints, + AsmFlavor); return mlir::success(); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRDataLayout.h b/clang/lib/CIR/CodeGen/CIRDataLayout.h index bc4c7762d5bc..bf9a49202cfb 100644 --- a/clang/lib/CIR/CodeGen/CIRDataLayout.h +++ b/clang/lib/CIR/CodeGen/CIRDataLayout.h @@ -69,6 +69,10 @@ class CIRDataLayout { return layout.getTypeSizeInBits(Ty); } + unsigned getTypeSizeInBits(mlir::Type Ty) const { + return layout.getTypeSizeInBits(Ty); + } + mlir::Type getIntPtrType(mlir::Type Ty) const { assert(Ty.isa() && "Expected pointer type"); auto IntTy = mlir::cir::IntType::get(Ty.getContext(), diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index e0dedde5ced2..60c409d82527 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -529,6 +529,10 @@ class CIRGenFunction : public CIRGenTypeCache { const TargetInfo &getTarget() const { return CGM.getTarget(); } + const TargetCIRGenInfo &getTargetHooks() const { + return CGM.getTargetCIRGenInfo(); + } + /// Helpers to convert Clang's SourceLocation to a MLIR Location. mlir::Location getLoc(clang::SourceLocation SLoc); @@ -938,6 +942,14 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildAsmStmt(const clang::AsmStmt &S); + mlir::Value buildAsmInputLValue(const TargetInfo::ConstraintInfo &Info, + LValue InputValue, QualType InputType, + std::string &ConstraintStr, + SourceLocation Loc); + + mlir::Value buildAsmInput(const TargetInfo::ConstraintInfo &Info, + const Expr *InputExpr, std::string &ConstraintStr); + mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); mlir::LogicalResult buildReturnStmt(const clang::ReturnStmt &S); diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index a06f59052302..c2869ccc1e49 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -15,11 +15,15 @@ #define LLVM_CLANG_LIB_CIR_TARGETINFO_H #include "ABIInfo.h" +#include "CIRGenValue.h" +#include "mlir/IR/Types.h" #include namespace cir { +class CIRGenFunction; + /// This class organizes various target-specific codegeneration issues, like /// target-specific attributes, builtins and so on. /// Equivalent to LLVM's TargetCodeGenInfo. @@ -31,6 +35,24 @@ class TargetCIRGenInfo { /// Returns ABI info helper for the target. const ABIInfo &getABIInfo() const { return *Info; } + + virtual bool isScalarizableAsmOperand(CIRGenFunction &CGF, + mlir::Type Ty) const { + return false; + } + + /// Corrects the MLIR type for a given constraint and "usual" + /// type. + /// + /// \returns A new MLIR type, possibly the same as the original + /// on success + virtual mlir::Type adjustInlineAsmType(CIRGenFunction &CGF, + llvm::StringRef Constraint, + mlir::Type Ty) const { + return Ty; + } + + virtual ~TargetCIRGenInfo() {} }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/asm.c b/clang/test/CIR/CodeGen/asm.c index 6aa4ef5e6355..fa070f845027 100644 --- a/clang/test/CIR/CodeGen/asm.c +++ b/clang/test/CIR/CodeGen/asm.c @@ -1,32 +1,32 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -//CHECK: cir.asm(x86_att, {"" ""}) +//CHECK: cir.asm(x86_att, {"" ""}) : () -> () void empty1() { __asm__ volatile("" : : : ); } -//CHECK: cir.asm(x86_att, {"xyz" ""}) +//CHECK: cir.asm(x86_att, {"xyz" ""}) : () -> () void empty2() { __asm__ volatile("xyz" : : : ); } -//CHECK: cir.asm(x86_att, {"" "=*m,m"}) +//CHECK: cir.asm(x86_att, {"" "=*m,*m"}) %0, %0 : (!cir.ptr, !cir.ptr) -> () void t1(int x) { __asm__ volatile("" : "+m"(x)); } -//CHECK: cir.asm(x86_att, {"" "m"}) +//CHECK: cir.asm(x86_att, {"" "*m"}) %0 : (!cir.ptr) -> () void t2(int x) { __asm__ volatile("" : : "m"(x)); } -//CHECK: cir.asm(x86_att, {"" "=*m"}) +//CHECK: cir.asm(x86_att, {"" "=*m"}) %0 : (!cir.ptr) -> () void t3(int x) { __asm__ volatile("" : "=m"(x)); } -//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1"}) +//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1"}) %1 : (!s32i) -> () void t4(int x) { __asm__ volatile("" : "=&r"(x), "+&r"(x)); } \ No newline at end of file From 42b3850d72cd1d740e36f38026251b3809a0b20b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 15 Feb 2024 15:33:06 -0800 Subject: [PATCH 1416/2301] [CIR][NFC] Refactor more call related mechanisms --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 23 +++++---- .../clang/CIR/Interfaces/CIROpInterfaces.td | 8 ++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 47 ++++++++++++------- 3 files changed, 50 insertions(+), 28 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7fe243bd0e69..b3cc6103ee2e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2091,11 +2091,20 @@ class CIR_CallOp extra_traits = []> : (*this)->setAttr(getCalleeAttrName(), callee.get()); setOperand(0, callee.get()); } + + bool isIndirect() { return !getCallee(); } + mlir::Value getIndirectCall(); }]; let hasCustomAssemblyFormat = 1; let skipDefaultBuilders = 1; let hasVerifier = 0; + + dag commonArgs = (ins + OptionalAttr:$callee, + Variadic:$arg_ops, + OptionalAttr:$ast + ); } def CallOp : CIR_CallOp<"call"> { @@ -2126,9 +2135,7 @@ def CallOp : CIR_CallOp<"call"> { ``` }]; - let arguments = (ins OptionalAttr:$callee, - Variadic:$operands, - OptionalAttr:$ast); + let arguments = commonArgs; let results = (outs Variadic); let builders = [ @@ -2180,10 +2187,10 @@ def TryCallOp : CIR_CallOp<"try_call"> { ``` }]; - let arguments = (ins OptionalAttr:$callee, - ExceptionInfoPtrPtr:$exceptionInfo, - Variadic:$callOps, - OptionalAttr:$ast); + let arguments = !con((ins + ExceptionInfoPtrPtr:$exceptionInfo + ), commonArgs); + let results = (outs Variadic); let builders = [ @@ -2198,8 +2205,8 @@ def TryCallOp : CIR_CallOp<"try_call"> { OpBuilder<(ins "Value":$ind_target, "mlir::Value":$exception, "FuncType":$fn_type, CArg<"ValueRange", "{}">:$operands), [{ - $_state.addOperands(ValueRange{ind_target}); $_state.addOperands(ValueRange{exception}); + $_state.addOperands(ValueRange{ind_target}); $_state.addOperands(operands); if (!fn_type.isVoid()) $_state.addTypes(fn_type.getReturnType()); diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td index 0b176f8a0701..b08e07a63d67 100644 --- a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td @@ -23,11 +23,11 @@ let cppNamespace = "::mlir::cir" in { InterfaceMethod<"", "mlir::Operation::operand_iterator", "arg_operand_end", (ins)>, InterfaceMethod< - "Return the operand at index 'i', accounts for indirect call", - "mlir::Value", "getArgOperand", (ins "unsigned":$i)>, + "Return the operand at index 'i', accounts for indirect call or " + "exception info", "mlir::Value", "getArgOperand", (ins "unsigned":$i)>, InterfaceMethod< - "Return the number of operands, accounts for indirect call", - "unsigned", "getNumArgOperands", (ins)>, + "Return the number of operands, accounts for indirect call or " + "exception info", "unsigned", "getNumArgOperands", (ins)>, ]; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 07afd0a17a55..1717cf3216d5 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1898,9 +1898,14 @@ LogicalResult cir::FuncOp::verify() { // CallOp //===----------------------------------------------------------------------===// +mlir::Value cir::CallOp::getIndirectCall() { + assert(isIndirect()); + return getOperand(0); +} + mlir::Operation::operand_iterator cir::CallOp::arg_operand_begin() { auto arg_begin = operand_begin(); - if (!getCallee()) + if (isIndirect()) arg_begin++; return arg_begin; } @@ -1910,13 +1915,13 @@ mlir::Operation::operand_iterator cir::CallOp::arg_operand_end() { /// Return the operand at index 'i', accounts for indirect call. Value cir::CallOp::getArgOperand(unsigned i) { - if (!getCallee()) + if (isIndirect()) i++; return getOperand(i); } -/// Return the number of operands, , accounts for indirect call. +/// Return the number of operands, accounts for indirect call. unsigned cir::CallOp::getNumArgOperands() { - if (!getCallee()) + if (isIndirect()) return this->getOperation()->getNumOperands() - 1; return this->getOperation()->getNumOperands(); } @@ -2029,7 +2034,8 @@ static ::mlir::ParseResult parseCallCommon( } void printCallCommon( - Operation *op, mlir::FlatSymbolRefAttr flatSym, ::mlir::OpAsmPrinter &state, + Operation *op, mlir::Value indirectCallee, mlir::FlatSymbolRefAttr flatSym, + ::mlir::OpAsmPrinter &state, llvm::function_ref customOpHandler = []() {}) { state << ' '; @@ -2039,7 +2045,8 @@ void printCallCommon( if (flatSym) { // Direct calls state.printAttributeWithoutType(flatSym); } else { // Indirect calls - state << op->getOperand(0); + assert(indirectCallee); + state << indirectCallee; } state << "("; state << ops; @@ -2064,23 +2071,30 @@ ::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, } void CallOp::print(::mlir::OpAsmPrinter &state) { - printCallCommon(*this, getCalleeAttr(), state); + mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; + printCallCommon(*this, indirectCallee, getCalleeAttr(), state); } //===----------------------------------------------------------------------===// // TryCallOp //===----------------------------------------------------------------------===// +mlir::Value cir::TryCallOp::getIndirectCall() { + // First operand is the exception pointer, skip it + assert(isIndirect()); + return getOperand(1); +} + mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_begin() { auto arg_begin = operand_begin(); - if (!getCallee()) - arg_begin++; // First operand is the exception pointer, skip it. - // + arg_begin++; + if (isIndirect()) + arg_begin++; + // FIXME(cir): for this and all the other calculations in the other methods: // we currently have no basic block arguments on cir.try_call, but if it gets // to that, this needs further adjustment. - arg_begin++; return arg_begin; } mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_end() { @@ -2089,19 +2103,19 @@ mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_end() { /// Return the operand at index 'i', accounts for indirect call. Value cir::TryCallOp::getArgOperand(unsigned i) { - if (!getCallee()) - i++; // First operand is the exception pointer, skip it. i++; + if (isIndirect()) + i++; return getOperand(i); } /// Return the number of operands, , accounts for indirect call. unsigned cir::TryCallOp::getNumArgOperands() { unsigned numOperands = this->getOperation()->getNumOperands(); - if (!getCallee()) - numOperands--; // First operand is the exception pointer, skip it. numOperands--; + if (isIndirect()) + numOperands--; return numOperands; } @@ -2156,7 +2170,8 @@ void TryCallOp::print(::mlir::OpAsmPrinter &state) { state << " exception("; state << getExceptionInfo(); state << ")"; - printCallCommon(*this, getCalleeAttr(), state); + mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; + printCallCommon(*this, indirectCallee, getCalleeAttr(), state); } //===----------------------------------------------------------------------===// From 03e2428ac2e8eca8e252b726d4e6788a82805a39 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Sat, 17 Feb 2024 01:12:27 +0300 Subject: [PATCH 1417/2301] [CIR][CodeGen] Adds clobbers to inline assembly (#469) One more tiny step! This a tiny PR that adds clobbers to constraint string. Note, that `~{dirflag},~{fpsr},~{flags}` is a [X86](https://github.com/llvm/clangir/blob/main/clang/lib/Basic/Targets/X86.h#L281) dependent clobbers. Basically, the next things remain: - lowering - store the results of the `cir.asm` --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 +- clang/lib/CIR/CodeGen/CIRAsm.cpp | 69 +++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 + clang/test/CIR/CodeGen/asm.c | 12 ++-- 4 files changed, 78 insertions(+), 10 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b3cc6103ee2e..d82424653183 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2837,9 +2837,9 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { ... %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.load %1 : cir.ptr , !s32i - cir.asm(x86_att, {"foo" ""} : () -> () - cir.asm(x86_att, {"bar $$42 $0" "=r,=&r,1"} %2 : (!s32i) -> () - cir.asm(x86_att, {"baz $$42 $0" "=r,=&r,0,1"} %3, %2 : (!s32i, !s32i) -> () + cir.asm(x86_att, {"foo" "~{dirflag},~{fpsr},~{flags}"} : () -> () + cir.asm(x86_att, {"bar $$42 $0" "=r,=&r,1,~{dirflag},~{fpsr},~{flags}"} %2 : (!s32i) -> () + cir.asm(x86_att, {"baz $$42 $0" "=r,=&r,0,1,~{dirflag},~{fpsr},~{flags}"} %3, %2 : (!s32i, !s32i) -> () ``` }]; diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 4d1a97e86d58..e3d0bfb1ad5a 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -108,6 +108,64 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; } +static void collectClobbers(const CIRGenFunction &cgf, const AsmStmt &S, + std::string &constraints, bool &hasUnwindClobber, + bool &readOnly, bool readNone) { + + hasUnwindClobber = false; + auto &cgm = cgf.getCIRGenModule(); + + // Clobbers + for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { + StringRef clobber = S.getClobber(i); + if (clobber == "memory") + readOnly = readNone = false; + else if (clobber == "unwind") { + hasUnwindClobber = true; + continue; + } else if (clobber != "cc") { + clobber = cgf.getTarget().getNormalizedGCCRegisterName(clobber); + if (cgm.getCodeGenOpts().StackClashProtector && + cgf.getTarget().isSPRegName(clobber)) { + cgm.getDiags().Report(S.getAsmLoc(), + diag::warn_stack_clash_protection_inline_asm); + } + } + + if (isa(&S)) { + if (clobber == "eax" || clobber == "edx") { + if (constraints.find("=&A") != std::string::npos) + continue; + std::string::size_type position1 = + constraints.find("={" + clobber.str() + "}"); + if (position1 != std::string::npos) { + constraints.insert(position1 + 1, "&"); + continue; + } + std::string::size_type position2 = constraints.find("=A"); + if (position2 != std::string::npos) { + constraints.insert(position2 + 1, "&"); + continue; + } + } + } + if (!constraints.empty()) + constraints += ','; + + constraints += "~{"; + constraints += clobber; + constraints += '}'; + } + + // Add machine specific clobbers + std::string_view machineClobbers = cgf.getTarget().getClobbers(); + if (!machineClobbers.empty()) { + if (!constraints.empty()) + constraints += ','; + constraints += machineClobbers; + } +} + using constraintInfos = SmallVector; static void collectInOutConstrainsInfos(const CIRGenFunction &cgf, @@ -217,7 +275,13 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { // Keep track of out constraints for tied input operand. std::vector OutputConstraints; - assert(!S.getNumClobbers() && "asm clobbers operands are NYI"); + // An inline asm can be marked readonly if it meets the following conditions: + // - it doesn't have any sideeffects + // - it doesn't clobber memory + // - it doesn't return a value by-reference + // It can be marked readnone if it doesn't have any input memory constraints + // in addition to meeting the conditions listed above. + bool ReadOnly = true, ReadNone = true; for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; @@ -351,6 +415,9 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { } Constraints += InOutConstraints; + bool HasUnwindClobber = false; + collectClobbers(*this, S, Constraints, HasUnwindClobber, ReadOnly, ReadNone); + mlir::Type ResultType; if (ResultRegTypes.size() == 1) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 60c409d82527..b66b7daf0aef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -441,6 +441,7 @@ class CIRGenFunction : public CIRGenTypeCache { CIRGenBuilderTy &getBuilder() { return builder; } CIRGenModule &getCIRGenModule() { return CGM; } + const CIRGenModule &getCIRGenModule() const { return CGM; } mlir::Block *getCurFunctionEntryBlock() { auto Fn = dyn_cast(CurFn); diff --git a/clang/test/CIR/CodeGen/asm.c b/clang/test/CIR/CodeGen/asm.c index fa070f845027..33fdac2e721b 100644 --- a/clang/test/CIR/CodeGen/asm.c +++ b/clang/test/CIR/CodeGen/asm.c @@ -1,32 +1,32 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -//CHECK: cir.asm(x86_att, {"" ""}) : () -> () +//CHECK: cir.asm(x86_att, {"" "~{dirflag},~{fpsr},~{flags}"}) : () -> () void empty1() { __asm__ volatile("" : : : ); } -//CHECK: cir.asm(x86_att, {"xyz" ""}) : () -> () +//CHECK: cir.asm(x86_att, {"xyz" "~{dirflag},~{fpsr},~{flags}"}) : () -> () void empty2() { __asm__ volatile("xyz" : : : ); } -//CHECK: cir.asm(x86_att, {"" "=*m,*m"}) %0, %0 : (!cir.ptr, !cir.ptr) -> () +//CHECK: cir.asm(x86_att, {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) %0, %0 : (!cir.ptr, !cir.ptr) -> () void t1(int x) { __asm__ volatile("" : "+m"(x)); } -//CHECK: cir.asm(x86_att, {"" "*m"}) %0 : (!cir.ptr) -> () +//CHECK: cir.asm(x86_att, {"" "*m,~{dirflag},~{fpsr},~{flags}"}) %0 : (!cir.ptr) -> () void t2(int x) { __asm__ volatile("" : : "m"(x)); } -//CHECK: cir.asm(x86_att, {"" "=*m"}) %0 : (!cir.ptr) -> () +//CHECK: cir.asm(x86_att, {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) %0 : (!cir.ptr) -> () void t3(int x) { __asm__ volatile("" : "=m"(x)); } -//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1"}) %1 : (!s32i) -> () +//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) %1 : (!s32i) -> () void t4(int x) { __asm__ volatile("" : "=&r"(x), "+&r"(x)); } \ No newline at end of file From 5c12af29e53f02b86769ef7a828a2c50a4f3ff8c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 16 Feb 2024 17:59:31 -0800 Subject: [PATCH 1418/2301] [CIR][NFC] Use an attribute alias for extra_func_attrs --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 23 ++++++++++++++--------- clang/test/CIR/CodeGen/function-attrs.cpp | 9 ++++++--- clang/test/CIR/CodeGen/optnone.cpp | 6 ++++-- clang/test/CIR/IR/inlineAttr.cir | 10 +++++++--- clang/test/CIR/Lowering/alloca.cir | 2 +- 5 files changed, 32 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 1717cf3216d5..3186c5573527 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -84,6 +84,11 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << "bfi_" << bitfield.getName().str(); return AliasResult::FinalAlias; } + if (auto extraFuncAttr = + attr.dyn_cast()) { + os << "fn_attr"; + return AliasResult::FinalAlias; + } return AliasResult::NoAlias; } @@ -1717,20 +1722,20 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { hasAlias = true; } - // If extra func attributes are present, parse them. - NamedAttrList extraAttrs; + Attribute extraAttrs; if (::mlir::succeeded(parser.parseOptionalKeyword("extra"))) { if (parser.parseLParen().failed()) return failure(); - if (parser.parseOptionalAttrDict(extraAttrs).failed()) + if (parser.parseAttribute(extraAttrs).failed()) return failure(); if (parser.parseRParen().failed()) return failure(); + } else { + NamedAttrList empty; + extraAttrs = mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), empty.getDictionary(builder.getContext())); } - state.addAttribute(getExtraAttrsAttrName(state.name), - mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), - extraAttrs.getDictionary(builder.getContext()))); + state.addAttribute(getExtraAttrsAttrName(state.name), extraAttrs); // Parse the optional function body. auto *body = state.addRegion(); @@ -1821,8 +1826,8 @@ void cir::FuncOp::print(OpAsmPrinter &p) { if (!getExtraAttrs().getElements().empty()) { p << " extra("; - p.printOptionalAttrDict(getExtraAttrs().getElements().getValue()); - p << " )"; + p.printAttributeWithoutType(getExtraAttrs()); + p << ")"; } // Print the body if this is not an external function. diff --git a/clang/test/CIR/CodeGen/function-attrs.cpp b/clang/test/CIR/CodeGen/function-attrs.cpp index fb5e3b43a464..4975a3f31253 100644 --- a/clang/test/CIR/CodeGen/function-attrs.cpp +++ b/clang/test/CIR/CodeGen/function-attrs.cpp @@ -24,10 +24,13 @@ int s3(int a, int b) { return x; } +// CIR: #fn_attr = #cir, nothrow = #cir.nothrow})> +// CIR: #fn_attr1 = #cir, nothrow = #cir.nothrow})> +// CIR: #fn_attr2 = #cir, nothrow = #cir.nothrow})> -// CIR: cir.func linkonce_odr @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline, nothrow = #cir.nothrow} ) -// CIR: cir.func @_Z2s1ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline, nothrow = #cir.nothrow} ) -// CIR: cir.func @_Z2s2ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline, nothrow = #cir.nothrow} ) +// CIR: cir.func linkonce_odr @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra(#fn_attr) +// CIR: cir.func @_Z2s1ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra(#fn_attr1) +// CIR: cir.func @_Z2s2ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra(#fn_attr2) // CIR: cir.func @_Z2s3ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} { // LLVM: define i32 @_Z2s1ii(i32 %0, i32 %1) {{.*}} #[[#ATTR1:]] diff --git a/clang/test/CIR/CodeGen/optnone.cpp b/clang/test/CIR/CodeGen/optnone.cpp index bf8e0a675468..7fa22865c274 100644 --- a/clang/test/CIR/CodeGen/optnone.cpp +++ b/clang/test/CIR/CodeGen/optnone.cpp @@ -17,8 +17,10 @@ int s0(int a, int b) { return x; } -// CIR-O0: cir.func @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra( {inline = #cir.inline, nothrow = #cir.nothrow, optnone = #cir.optnone} ) -// CIR-O2-NOT: cir.func @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} optnone +// CIR-O0: #fn_attr = #cir, nothrow = #cir.nothrow, optnone = #cir.optnone})> +// CIR-O0: cir.func @_Z2s0ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra(#fn_attr) + +// CIR-O2-NOT: #fn_attr ={{.*}} optnone // LLVM-O0: define i32 @_Z2s0ii(i32 %0, i32 %1) #[[#ATTR:]] // LLVM-O0: attributes #[[#ATTR]] = { noinline nounwind optnone } diff --git a/clang/test/CIR/IR/inlineAttr.cir b/clang/test/CIR/IR/inlineAttr.cir index 54275afae6db..76de9acbb736 100644 --- a/clang/test/CIR/IR/inlineAttr.cir +++ b/clang/test/CIR/IR/inlineAttr.cir @@ -1,11 +1,15 @@ // RUN: cir-opt %s | FileCheck %s -check-prefix=CIR // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +#fn_attr = #cir})> + module { - cir.func @l0() extra( {cir.inline = #cir.inline} ) { + cir.func @l0() extra(#fn_attr) { cir.return } } -// CIR: cir.func @l0() extra( {cir.inline = #cir.inline} ) { -// MLIR: llvm.func @l0() attributes {cir.extra_attrs = #cir})>} +// CIR: #fn_attr = #cir})> +// CIR: cir.func @l0() extra(#fn_attr) { + +// MLIR: llvm.func @l0() attributes {cir.extra_attrs = #fn_attr} diff --git a/clang/test/CIR/Lowering/alloca.cir b/clang/test/CIR/Lowering/alloca.cir index faa99843ca74..4c512a762068 100644 --- a/clang/test/CIR/Lowering/alloca.cir +++ b/clang/test/CIR/Lowering/alloca.cir @@ -10,7 +10,7 @@ module { } // MLIR: module { -// MLIR-NEXT: llvm.func @foo(%arg0: i32) attributes {cir.extra_attrs = #cir} { +// MLIR-NEXT: llvm.func @foo(%arg0: i32) attributes {cir.extra_attrs = #fn_attr} { // MLIR-NEXT: %0 = llvm.alloca %arg0 x i32 {alignment = 16 : i64} : (i32) -> !llvm.ptr // MLIR-NEXT: llvm.return // MLIR-NEXT: } From 5c0a4531187daebc2ff59ea3c1a02b927217c718 Mon Sep 17 00:00:00 2001 From: mrsoliman Date: Tue, 20 Feb 2024 17:12:36 -0500 Subject: [PATCH 1419/2301] [CIR][Lowering] Handle unsupported types for CIR-MLIR type conversion (#471) * Pointers to CIR types that do not have converters (e.g. cir.struct) cause crashes due to passing null types to construct mlir::MemRefType. * This commit adds checks for pointers and alloca lowering to fail gracefully if the underlying type can not be converted. --- clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 948d0a34e376..6437664e932f 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -103,6 +103,10 @@ class CIRAllocaOpLowering auto type = adaptor.getAllocaType(); auto mlirType = getTypeConverter()->convertType(type); + // FIXME: Some types can not be converted yet (e.g. struct) + if (!mlirType) + return mlir::LogicalResult::failure(); + auto memreftype = mlir::MemRefType::get({}, mlirType); rewriter.replaceOpWithNewOp(op, memreftype, op.getAlignmentAttr()); @@ -615,6 +619,9 @@ static mlir::TypeConverter prepareTypeConverter() { mlir::TypeConverter converter; converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { auto ty = converter.convertType(type.getPointee()); + // FIXME: The pointee type might not be converted (e.g. struct) + if (!ty) + return nullptr; return mlir::MemRefType::get({}, ty); }); converter.addConversion( From debbb680839802b12422667b13d0e4a5c73d95c6 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Wed, 21 Feb 2024 01:14:36 +0300 Subject: [PATCH 1420/2301] [CIR][CIRGen] Support for zero initialization of arrays (#468) As in original codegen this PR uses the do-while loop to initialize the array elements with the filler expression. But unlike the original codegen we allocates the temporary variable on stack. Allocation is necessary to store the pointer to the first uniinitialized element. --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 69 ++++++++++++++++++++++++- clang/test/CIR/CodeGen/array-init.c | 32 ++++++++++++ 2 files changed, 100 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 316a3b774426..82160e11df6f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -433,6 +433,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, QualType elementType = CGF.getContext().getAsArrayType(ArrayQTy)->getElementType(); + QualType elementPtrType = CGF.getContext().getPointerType(elementType); auto cirElementType = CGF.convertType(elementType); auto cirElementPtrType = mlir::cir::PointerType::get( @@ -498,7 +499,73 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, if (NumInitElements != NumArrayElements && !(Dest.isZeroed() && hasTrivialFiller && CGF.getTypes().isZeroInitializable(elementType))) { - llvm_unreachable("zero-initialization of arrays NIY"); + + // Use an actual loop. This is basically + // do { *array++ = filler; } while (array != end); + + auto &builder = CGF.getBuilder(); + + // Advance to the start of the rest of the array. + if (NumInitElements) { + auto one = + builder.getConstInt(loc, CGF.PtrDiffTy.cast(), 1); + element = builder.create(loc, cirElementPtrType, + element, one); + + assert(!endOfInit.isValid() && "destructed types NIY"); + } + + // Allocate the temporary variable + // to store the pointer to first unitialized element + auto tmpAddr = CGF.CreateTempAlloca( + cirElementPtrType, CGF.getPointerAlign(), loc, "arrayinit.temp"); + LValue tmpLV = CGF.makeAddrLValue(tmpAddr, elementPtrType); + CGF.buildStoreThroughLValue(RValue::get(element), tmpLV); + + // Compute the end of array + auto numArrayElementsConst = builder.getConstInt( + loc, CGF.PtrDiffTy.cast(), NumArrayElements); + mlir::Value end = builder.create( + loc, cirElementPtrType, begin, numArrayElementsConst); + + builder.createDoWhile( + loc, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto currentElement = builder.createLoad(loc, tmpAddr); + mlir::Type boolTy = CGF.getCIRType(CGF.getContext().BoolTy); + auto cmp = builder.create( + loc, boolTy, mlir::cir::CmpOpKind::ne, currentElement, end); + builder.createCondition(cmp); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto currentElement = builder.createLoad(loc, tmpAddr); + + if (UnimplementedFeature::cleanups()) + llvm_unreachable("NYI"); + + // Emit the actual filler expression. + LValue elementLV = CGF.makeAddrLValue( + Address(currentElement, cirElementType, elementAlign), + elementType); + if (ArrayFiller) + buildInitializationToLValue(ArrayFiller, elementLV); + else + buildNullInitializationToLValue(loc, elementLV); + + // Tell the EH cleanup that we finished with the last element. + assert(!endOfInit.isValid() && "destructed types NIY"); + + // Advance pointer and store them to temporary variable + auto one = builder.getConstInt( + loc, CGF.PtrDiffTy.cast(), 1); + auto nextElement = builder.create( + loc, cirElementPtrType, currentElement, one); + CGF.buildStoreThroughLValue(RValue::get(nextElement), tmpLV); + + builder.createYield(loc); + }); } // Leave the partial-array cleanup if we entered one. diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c index ae27805f86fe..0caf767a51d5 100644 --- a/clang/test/CIR/CodeGen/array-init.c +++ b/clang/test/CIR/CodeGen/array-init.c @@ -53,3 +53,35 @@ void bar(int a, int b, int c) { // CHECK-NEXT: [[TH_EL:%.*]] = cir.ptr_stride(%7 : !cir.ptr, [[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: [[LOAD_C:%.*]] = cir.load [[C]] : cir.ptr , !s32i // CHECK-NEXT: cir.store [[LOAD_C]], [[TH_EL]] : !s32i, cir.ptr + +void zero_init(int x) { + int arr[3] = {x}; +} + +// CHECK: cir.func @zero_init +// CHECK: [[VAR_ALLOC:%.*]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !cir.array, cir.ptr >, ["arr", init] {alignment = 4 : i64} +// CHECK: [[TEMP:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["arrayinit.temp", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, [[VAR_ALLOC]] : !s32i, cir.ptr +// CHECK: [[BEGIN:%.*]] = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CHECK: [[VAR:%.*]] = cir.load [[VAR_ALLOC]] : cir.ptr , !s32i +// CHECK: cir.store [[VAR]], [[BEGIN]] : !s32i, cir.ptr +// CHECK: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK: [[ZERO_INIT_START:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CHECK: cir.store [[ZERO_INIT_START]], [[TEMP]] : !cir.ptr, cir.ptr > +// CHECK: [[SIZE:%.*]] = cir.const(#cir.int<3> : !s64i) : !s64i +// CHECK: [[END:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[SIZE]] : !s64i), !cir.ptr +// CHECK: cir.do { +// CHECK: [[CUR:%.*]] = cir.load [[TEMP]] : cir.ptr >, !cir.ptr +// CHECK: [[FILLER:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: cir.store [[FILLER]], [[CUR]] : !s32i, cir.ptr +// CHECK: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK: [[NEXT:%.*]] = cir.ptr_stride([[CUR]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CHECK: cir.store [[NEXT]], [[TEMP]] : !cir.ptr, cir.ptr > +// CHECK: cir.yield +// CHECK: } while { +// CHECK: [[CUR:%.*]] = cir.load [[TEMP]] : cir.ptr >, !cir.ptr +// CHECK: [[CMP:%.*]] = cir.cmp(ne, [[CUR]], [[END]]) : !cir.ptr, !cir.bool +// CHECK: cir.condition([[CMP]]) +// CHECK: } +// CHECK: cir.return From 2407c2c9cfc0a2d3276abda219cd89002d66a6cf Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 21 Feb 2024 01:57:32 +0300 Subject: [PATCH 1421/2301] [CIR][CodeGen][Bugfix] Fix storage size for bitfields (#462) This PR fixes a bug caused by `IntType` size limitations in CIR (and by some magic of numbers as well). As you know, we need to create a storage for bit fields that usually contain several of them. There next code fails with `IntType` size check which exceeds 64 bits. ``` typedef struct { uint8_t a; uint8_t b; uint8_t c; int d: 2; int e: 2; int f: 4; int g: 25; int h: 3; int i: 4; int j: 3; int k: 8; int l: 14; } D; void foo() { D d; } ``` Note, if we remove first three fields (or even one) everything will be fine even without this fix, because [this](https://github.com/llvm/clangir/blob/main/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp#L553) check won't pass. The bug is kind of hard to reproduce and I would say it's a rare case. I mean the problem is not only in the number of bit fields that form together something bigger than 64 bits. Well, while iterating over the bit fields in some struct type, we need to stop accumulating bit fields in one storage and start to do the same in another one. Basically, we operate with `Tail` and `StartBitOffset` where the former is an offset of the next field. And once `Tail - StartBitOffset >= 64` we say that it's not possible to create a storage of such size due to `IntType` size limitation. Sounds reasonable. But it can be a case when we can not afford to take the next field because its `Tail` in turn leads to a storage of the size bigger then 64. Thus, we want to check it as well. From the implementation point of view I added one more check to the `IsBetterAsSingleFieldRun` in order to have all these checks for size in a single place. And the check I mentioned before were saving us from hitting this issue. --- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 18 ++++++++++--- clang/test/CIR/CodeGen/bitfields.c | 25 +++++++++++++++++++ 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 62eb8d659748..48146b3caa20 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -500,9 +500,14 @@ void CIRRecordLowering::accumulateBitFields( // bitfield a separate storage component so as it can be accessed directly // with lower cost. auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord, - uint64_t StartBitOffset) { - if (OffsetInRecord >= 64) // See IntType::verify + uint64_t StartBitOffset, + uint64_t nextTail = 0) { + if (OffsetInRecord >= 64 || + (nextTail > StartBitOffset && + nextTail - StartBitOffset >= 64)) { // See IntType::verify return true; + } + if (!cirGenTypes.getModule().getCodeGenOpts().FineGrainedBitfieldAccesses) return false; llvm_unreachable("NYI"); @@ -545,13 +550,18 @@ void CIRRecordLowering::accumulateBitFields( // field is inconsistent with the offset of previous field plus its offset, // skip the block below and go ahead to emit the storage. Otherwise, try to // add bitfields to the run. + uint64_t nextTail = Tail; + if (Field != FieldEnd) + nextTail += Field->getBitWidthValue(); + if (!StartFieldAsSingleRun && Field != FieldEnd && - !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) && + !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset, + nextTail) && (!Field->isZeroLengthBitField() || (!astContext.getTargetInfo().useZeroLengthBitfieldAlignment() && !astContext.getTargetInfo().useBitFieldTypeAlignment())) && Tail == getFieldBitOffset(*Field)) { - Tail += Field->getBitWidthValue(); + Tail = nextTail; ++Field; continue; } diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index 05bc94a4b112..566da42ed43f 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -34,9 +34,29 @@ typedef struct { unsigned b; } T; +typedef struct { + char a; + char b; + char c; + + // startOffset 24 bits, new storage from here + int d: 2; + int e: 2; + int f: 4; + int g: 25; + int h: 3; + int i: 4; + int j: 3; + int k: 8; + + int l: 14; // need to be a part of the new storage + // because (tail - startOffset) is 65 after 'l' field +} U; + // CHECK: !ty_22D22 = !cir.struct // CHECK: !ty_22S22 = !cir.struct // CHECK: !ty_22T22 = !cir.struct +// CHECK: !ty_22U22 = !cir.struct // CHECK: !ty_22anon2E122 = !cir.struct // CHECK: !ty_anon_struct = !cir.struct // CHECK: !ty_22__long22 = !cir.struct}> @@ -107,6 +127,11 @@ int load_one_bitfield(T* t) { return t->a; } +// CHECK: cir.func {{.*@createU}} +void createU() { + U u; +} + // for this struct type we create an anon structure with different storage types in initialization // CHECK: cir.func {{.*@createD}} // CHECK: %0 = cir.alloca !ty_22D22, cir.ptr , ["d"] {alignment = 4 : i64} From 84270054a9de5b2bde5be4bc2065d00f227b94b0 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 22 Feb 2024 03:13:54 +0800 Subject: [PATCH 1422/2301] [CIR] introduce CIR floating-point types (#385) This PR adds a dedicated `cir.float` type for representing floating-point types. There are several issues linked to this PR: #5, --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 27 ++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 1 + .../include/clang/CIR/Dialect/IR/CIRTypes.td | 40 +- .../clang/CIR/Interfaces/CIRFPTypeInterface.h | 22 + .../CIR/Interfaces/CIRFPTypeInterface.td | 52 +++ .../clang/CIR/Interfaces/CMakeLists.txt | 9 + clang/lib/CIR/CodeGen/CIRGenBuilder.h | 33 +- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 14 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 3 +- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 62 +++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 14 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 49 ++ .../lib/CIR/Interfaces/CIRFPTypeInterface.cpp | 14 + clang/lib/CIR/Interfaces/CMakeLists.txt | 2 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 74 +-- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 6 + clang/test/CIR/CodeGen/array-init.c | 7 +- clang/test/CIR/CodeGen/binop.c | 2 +- clang/test/CIR/CodeGen/binop.cpp | 8 +- .../test/CIR/CodeGen/builtin-floating-point.c | 420 +++++++++--------- clang/test/CIR/CodeGen/call.c | 48 +- clang/test/CIR/CodeGen/cast.cpp | 16 +- clang/test/CIR/CodeGen/globals.c | 8 +- clang/test/CIR/CodeGen/globals.cpp | 4 +- clang/test/CIR/CodeGen/lalg.c | 22 +- clang/test/CIR/CodeGen/libc.c | 4 +- clang/test/CIR/CodeGen/static-vars.c | 2 +- clang/test/CIR/CodeGen/static-vars.cpp | 2 +- clang/test/CIR/CodeGen/struct.c | 2 +- clang/test/CIR/CodeGen/try-catch.cpp | 2 +- clang/test/CIR/CodeGen/types.c | 14 +- clang/test/CIR/CodeGen/unary.c | 8 +- clang/test/CIR/CodeGen/unary.cpp | 28 +- clang/test/CIR/CodeGen/union.cpp | 16 +- clang/test/CIR/CodeGen/vectype.cpp | 36 +- clang/test/CIR/IR/invalid.cir | 40 +- clang/test/CIR/IR/libc-fabs.cir | 6 +- .../CIR/Lowering/ThroughMLIR/binop-fp.cir | 76 ++-- clang/test/CIR/Lowering/ThroughMLIR/cmp.cir | 40 +- clang/test/CIR/Lowering/binop-fp.cir | 76 ++-- clang/test/CIR/Lowering/cast.cir | 12 +- clang/test/CIR/Lowering/class.cir | 6 +- clang/test/CIR/Lowering/cmp.cir | 40 +- clang/test/CIR/Lowering/const.cir | 6 +- clang/test/CIR/Lowering/dot.cir | 46 +- clang/test/CIR/Lowering/float.cir | 20 + clang/test/CIR/Lowering/globals.cir | 8 +- clang/test/CIR/Lowering/libc.cir | 6 +- clang/test/CIR/Lowering/struct.cir | 6 +- clang/test/CIR/Lowering/unary-inc-dec.cir | 36 +- clang/test/CIR/Lowering/unary-not.cir | 28 +- clang/test/CIR/Lowering/unary-plus-minus.cir | 14 +- 55 files changed, 935 insertions(+), 615 deletions(-) create mode 100644 clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.h create mode 100644 clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td create mode 100644 clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp create mode 100644 clang/test/CIR/Lowering/float.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 551cda9144b5..4949a03908dc 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -215,6 +215,33 @@ def IntAttr : CIR_Attr<"Int", "int", [TypedAttrInterface]> { let hasCustomAssemblyFormat = 1; } +//===----------------------------------------------------------------------===// +// FPAttr +//===----------------------------------------------------------------------===// + +def FPAttr : CIR_Attr<"FP", "fp", [TypedAttrInterface]> { + let summary = "An attribute containing a floating-point value"; + let description = [{ + An fp attribute is a literal attribute that represents a floating-point + value of the specified floating-point type. + }]; + let parameters = (ins AttributeSelfTypeParameter<"">:$type, "APFloat":$value); + let builders = [ + AttrBuilderWithInferredContext<(ins "Type":$type, + "const APFloat &":$value), [{ + return $_get(type.getContext(), type, value); + }]>, + ]; + let extraClassDeclaration = [{ + static FPAttr getZero(mlir::Type type); + }]; + let genVerifyDecl = 1; + + let assemblyFormat = [{ + `<` custom($value, ref($type)) `>` + }]; +} + //===----------------------------------------------------------------------===// // ConstPointerAttr //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d82424653183..0d5d5e9e4ff8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2615,8 +2615,8 @@ def IterEndOp : CIR_Op<"iterator_end"> { class UnaryFPToFPBuiltinOp : CIR_Op { - let arguments = (ins AnyFloat:$src); - let results = (outs AnyFloat:$result); + let arguments = (ins CIR_AnyFloat:$src); + let results = (outs CIR_AnyFloat:$result); let summary = "libc builtin equivalent ignoring " "floating point exceptions and errno"; let assemblyFormat = "$src `:` type($src) attr-dict"; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 46835e548a69..512b9db01dfe 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -16,6 +16,7 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Types.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "clang/CIR/Interfaces/CIRFPTypeInterface.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 06d4b378f80b..442ce90cc54a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -15,15 +15,18 @@ include "clang/CIR/Dialect/IR/CIRDialect.td" include "clang/CIR/Interfaces/ASTAttrInterfaces.td" +include "clang/CIR/Interfaces/CIRFPTypeInterface.td" include "mlir/Interfaces/DataLayoutInterfaces.td" include "mlir/IR/AttrTypeBase.td" +include "mlir/IR/EnumAttr.td" //===----------------------------------------------------------------------===// // CIR Types //===----------------------------------------------------------------------===// -class CIR_Type traits = []> : - TypeDef { +class CIR_Type traits = [], + string baseCppClass = "::mlir::Type"> + : TypeDef { let mnemonic = typeMnemonic; } @@ -94,6 +97,37 @@ def SInt16 : SInt<16>; def SInt32 : SInt<32>; def SInt64 : SInt<64>; +//===----------------------------------------------------------------------===// +// FloatType +//===----------------------------------------------------------------------===// + +class CIR_FloatType + : CIR_Type, + DeclareTypeInterfaceMethods, + ]> {} + +def CIR_Single : CIR_FloatType<"Single", "float"> { + let summary = "CIR single-precision float type"; + let description = [{ + Floating-point type that represents the `float` type in C/C++. Its + underlying floating-point format is the IEEE-754 binary32 format. + }]; +} + +def CIR_Double : CIR_FloatType<"Double", "double"> { + let summary = "CIR double-precision float type"; + let description = [{ + Floating-point type that represents the `double` type in C/C++. Its + underlying floating-point format is the IEEE-754 binar64 format. + }]; +} + +// Constraints + +def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double]>; + //===----------------------------------------------------------------------===// // PointerType //===----------------------------------------------------------------------===// @@ -318,7 +352,7 @@ def CIR_StructType : Type()">, def CIR_AnyType : AnyTypeOf<[ CIR_IntType, CIR_PointerType, CIR_BoolType, CIR_ArrayType, CIR_VectorType, - CIR_FuncType, CIR_VoidType, CIR_StructType, CIR_ExceptionInfo, AnyFloat, + CIR_FuncType, CIR_VoidType, CIR_StructType, CIR_ExceptionInfo, CIR_AnyFloat, ]>; #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.h b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.h new file mode 100644 index 000000000000..b2d75d40496f --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.h @@ -0,0 +1,22 @@ +//===- CIRFPTypeInterface.h - Interface for CIR FP types -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// Defines the interface to generically handle CIR floating-point types. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_INTERFACES_CIR_CIR_FPTYPEINTERFACE_H +#define CLANG_INTERFACES_CIR_CIR_FPTYPEINTERFACE_H + +#include "mlir/IR/Types.h" +#include "llvm/ADT/APFloat.h" + +/// Include the tablegen'd interface declarations. +#include "clang/CIR/Interfaces/CIRFPTypeInterface.h.inc" + +#endif // CLANG_INTERFACES_CIR_CIR_FPTYPEINTERFACE_H diff --git a/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td new file mode 100644 index 000000000000..7438c8be52d9 --- /dev/null +++ b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td @@ -0,0 +1,52 @@ +//===- CIRFPTypeInterface.td - CIR FP Interface Definitions -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_INTERFACES_CIR_FP_TYPE_INTERFACE +#define MLIR_CIR_INTERFACES_CIR_FP_TYPE_INTERFACE + +include "mlir/IR/OpBase.td" + +def CIRFPTypeInterface : TypeInterface<"CIRFPTypeInterface"> { + let description = [{ + Contains helper functions to query properties about a floating-point type. + }]; + let cppNamespace = "::mlir::cir"; + + let methods = [ + InterfaceMethod<[{ + Returns the bit width of this floating-point type. + }], + /*retTy=*/"unsigned", + /*methodName=*/"getWidth", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + return llvm::APFloat::semanticsSizeInBits($_type.getFloatSemantics()); + }] + >, + InterfaceMethod<[{ + Return the mantissa width. + }], + /*retTy=*/"unsigned", + /*methodName=*/"getFPMantissaWidth", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + return llvm::APFloat::semanticsPrecision($_type.getFloatSemantics()); + }] + >, + InterfaceMethod<[{ + Return the float semantics of this floating-point type. + }], + /*retTy=*/"const llvm::fltSemantics &", + /*methodName=*/"getFloatSemantics" + >, + ]; +} + +#endif // MLIR_CIR_INTERFACES_CIR_FP_TYPE_INTERFACE diff --git a/clang/include/clang/CIR/Interfaces/CMakeLists.txt b/clang/include/clang/CIR/Interfaces/CMakeLists.txt index c7132abca833..86fffa3f9307 100644 --- a/clang/include/clang/CIR/Interfaces/CMakeLists.txt +++ b/clang/include/clang/CIR/Interfaces/CMakeLists.txt @@ -20,6 +20,15 @@ function(add_clang_mlir_op_interface interface) add_dependencies(mlir-generic-headers MLIR${interface}IncGen) endfunction() +function(add_clang_mlir_type_interface interface) + set(LLVM_TARGET_DEFINITIONS ${interface}.td) + mlir_tablegen(${interface}.h.inc -gen-type-interface-decls) + mlir_tablegen(${interface}.cpp.inc -gen-type-interface-defs) + add_public_tablegen_target(MLIR${interface}IncGen) + add_dependencies(mlir-generic-headers MLIR${interface}IncGen) +endfunction() + add_clang_mlir_attr_interface(ASTAttrInterfaces) add_clang_mlir_op_interface(CIROpInterfaces) add_clang_mlir_op_interface(CIRLoopOpInterface) +add_clang_mlir_type_interface(CIRFPTypeInterface) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index f298540e1b56..b6d3bd56f60c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -224,8 +224,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::TypedAttr getZeroInitAttr(mlir::Type ty) { if (ty.isa()) return mlir::cir::IntAttr::get(ty, 0); - if (ty.isa()) - return mlir::FloatAttr::get(ty, 0.0); + if (auto fltType = ty.dyn_cast()) + return mlir::cir::FPAttr::getZero(fltType); + if (auto fltType = ty.dyn_cast()) + return mlir::cir::FPAttr::getZero(fltType); if (auto arrTy = ty.dyn_cast()) return getZeroAttr(arrTy); if (auto ptrTy = ty.dyn_cast()) @@ -256,12 +258,13 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { if (const auto boolVal = attr.dyn_cast()) return !boolVal.getValue(); - if (const auto fpVal = attr.dyn_cast()) { + if (auto fpAttr = attr.dyn_cast()) { + auto fpVal = fpAttr.getValue(); bool ignored; llvm::APFloat FV(+0.0); - FV.convert(fpVal.getValue().getSemantics(), - llvm::APFloat::rmNearestTiesToEven, &ignored); - return FV.bitwiseIsEqual(fpVal.getValue()); + FV.convert(fpVal.getSemantics(), llvm::APFloat::rmNearestTiesToEven, + &ignored); + return FV.bitwiseIsEqual(fpVal); } if (const auto structVal = attr.dyn_cast()) { @@ -348,13 +351,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } bool isInt(mlir::Type i) { return i.isa(); } - mlir::FloatType getLongDouble80BitsTy() const { - return typeCache.LongDouble80BitsTy; - } + mlir::Type getLongDouble80BitsTy() const { llvm_unreachable("NYI"); } /// Get the proper floating point type for the given semantics. - mlir::FloatType getFloatTyForFormat(const llvm::fltSemantics &format, - bool useNativeHalf) const { + mlir::Type getFloatTyForFormat(const llvm::fltSemantics &format, + bool useNativeHalf) const { if (&format == &llvm::APFloat::IEEEhalf()) { llvm_unreachable("IEEEhalf float format is NYI"); } @@ -362,9 +363,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { if (&format == &llvm::APFloat::BFloat()) llvm_unreachable("BFloat float format is NYI"); if (&format == &llvm::APFloat::IEEEsingle()) - llvm_unreachable("IEEEsingle float format is NYI"); + return typeCache.FloatTy; if (&format == &llvm::APFloat::IEEEdouble()) - llvm_unreachable("IEEEdouble float format is NYI"); + return typeCache.DoubleTy; if (&format == &llvm::APFloat::IEEEquad()) llvm_unreachable("IEEEquad float format is NYI"); if (&format == &llvm::APFloat::PPCDoubleDouble()) @@ -491,9 +492,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } bool isSized(mlir::Type ty) { - if (ty.isIntOrFloat() || - ty.isa()) + if (ty.isa()) return true; assert(0 && "Unimplemented size for type"); return false; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 8c6964e76b3c..59551e6d4bdc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1708,7 +1708,9 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, assert(0 && "not implemented"); else { mlir::Type ty = CGM.getCIRType(DestType); - return builder.getFloatAttr(ty, Init); + assert(ty.isa() && + "expected floating-point type"); + return CGM.getBuilder().getAttr(ty, Init); } } case APValue::Array: { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 4799f68726c8..bcca17b17e31 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -165,9 +165,11 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitFloatingLiteral(const FloatingLiteral *E) { mlir::Type Ty = CGF.getCIRType(E->getType()); + assert(Ty.isa() && + "expect floating-point type"); return Builder.create( CGF.getLoc(E->getExprLoc()), Ty, - Builder.getFloatAttr(Ty, E->getValue())); + Builder.getAttr(Ty, E->getValue())); } mlir::Value VisitCharacterLiteral(const CharacterLiteral *E) { mlir::Type Ty = CGF.getCIRType(E->getType()); @@ -1227,7 +1229,7 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { llvm_unreachable("NYI"); assert(!UnimplementedFeature::cirVectorType()); - if (Ops.LHS.getType().isa()) { + if (Ops.LHS.getType().isa()) { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); return Builder.createFSub(Ops.LHS, Ops.RHS); } @@ -1701,7 +1703,7 @@ mlir::Value ScalarExprEmitter::buildScalarCast( llvm_unreachable("NYI: signed bool"); if (CGF.getBuilder().isInt(DstTy)) { CastKind = mlir::cir::CastKind::bool_to_int; - } else if (DstTy.isa()) { + } else if (DstTy.isa()) { CastKind = mlir::cir::CastKind::bool_to_float; } else { llvm_unreachable("Internal error: Cast to unexpected type"); @@ -1709,12 +1711,12 @@ mlir::Value ScalarExprEmitter::buildScalarCast( } else if (CGF.getBuilder().isInt(SrcTy)) { if (CGF.getBuilder().isInt(DstTy)) { CastKind = mlir::cir::CastKind::integral; - } else if (DstTy.isa()) { + } else if (DstTy.isa()) { CastKind = mlir::cir::CastKind::int_to_float; } else { llvm_unreachable("Internal error: Cast to unexpected type"); } - } else if (SrcTy.isa()) { + } else if (SrcTy.isa()) { if (CGF.getBuilder().isInt(DstTy)) { // If we can't recognize overflow as undefined behavior, assume that // overflow saturates. This protects against normal optimizations if we @@ -1724,7 +1726,7 @@ mlir::Value ScalarExprEmitter::buildScalarCast( if (Builder.getIsFPConstrained()) llvm_unreachable("NYI"); CastKind = mlir::cir::CastKind::float_to_int; - } else if (DstTy.isa()) { + } else if (DstTy.isa()) { // TODO: split this to createFPExt/createFPTrunc return Builder.createFloatingCast(Src, DstTy); } else { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 6f4b7b3262b7..6c831ddef8f7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -133,11 +133,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, // TODO: HalfTy // TODO: BFloatTy - FloatTy = builder.getF32Type(); - DoubleTy = builder.getF64Type(); + FloatTy = ::mlir::cir::SingleType::get(builder.getContext()); + DoubleTy = ::mlir::cir::DoubleType::get(builder.getContext()); // TODO(cir): perhaps we should abstract long double variations into a custom // cir.long_double type. Said type would also hold the semantics for lowering. - LongDouble80BitsTy = builder.getF80Type(); // TODO: PointerWidthInBits PointerAlignInBytes = diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 91290001d683..d5900694c43c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -37,7 +37,8 @@ struct CIRGenTypeCache { // mlir::Type HalfTy, BFloatTy; // TODO(cir): perhaps we should abstract long double variations into a custom // cir.long_double type. Said type would also hold the semantics for lowering. - mlir::FloatType FloatTy, DoubleTy, LongDouble80BitsTy; + mlir::cir::SingleType FloatTy; + mlir::cir::DoubleType DoubleTy; /// int mlir::Type UIntTy; diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index a3086b333806..965e7fa6713b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -37,6 +37,12 @@ static void printStructMembers(mlir::AsmPrinter &p, mlir::ArrayAttr members); static mlir::ParseResult parseStructMembers(::mlir::AsmParser &parser, mlir::ArrayAttr &members); +static void printFloatLiteral(mlir::AsmPrinter &p, llvm::APFloat value, + mlir::Type ty); +static mlir::ParseResult +parseFloatLiteral(mlir::AsmParser &parser, + mlir::FailureOr &value, mlir::Type ty); + #define GET_ATTRDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" @@ -298,6 +304,62 @@ LogicalResult IntAttr::verify(function_ref emitError, return success(); } +//===----------------------------------------------------------------------===// +// FPAttr definitions +//===----------------------------------------------------------------------===// + +static void printFloatLiteral(mlir::AsmPrinter &p, llvm::APFloat value, + mlir::Type ty) { + p << value; +} + +static mlir::ParseResult +parseFloatLiteral(mlir::AsmParser &parser, + mlir::FailureOr &value, mlir::Type ty) { + double rawValue; + if (parser.parseFloat(rawValue)) { + return parser.emitError(parser.getCurrentLocation(), + "expected floating-point value"); + } + + auto losesInfo = false; + value.emplace(rawValue); + + auto tyFpInterface = ty.dyn_cast(); + if (!tyFpInterface) { + // Parsing of the current floating-point literal has succeeded, but the + // given attribute type is invalid. This error will be reported later when + // the attribute is being verified. + return success(); + } + + value->convert(tyFpInterface.getFloatSemantics(), + llvm::RoundingMode::TowardZero, &losesInfo); + return success(); +} + +cir::FPAttr cir::FPAttr::getZero(mlir::Type type) { + return get(type, + APFloat::getZero( + type.cast().getFloatSemantics())); +} + +LogicalResult cir::FPAttr::verify(function_ref emitError, + Type type, APFloat value) { + auto fltTypeInterface = type.dyn_cast(); + if (!fltTypeInterface) { + emitError() << "expected floating-point type"; + return failure(); + } + if (APFloat::SemanticsToEnum(fltTypeInterface.getFloatSemantics()) != + APFloat::SemanticsToEnum(value.getSemantics())) { + emitError() << "floating-point semantics mismatch"; + return failure(); + } + + return success(); +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 3186c5573527..0efaa67df396 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -305,7 +305,7 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return success(); } - if (attrType.isa()) { + if (attrType.isa()) { auto at = attrType.cast(); if (at.getType() != opType) { return op->emitOpError("result type (") @@ -428,13 +428,13 @@ LogicalResult CastOp::verify() { return success(); } case cir::CastKind::floating: { - if (!srcType.dyn_cast() || - !resType.dyn_cast()) + if (!srcType.isa() || + !resType.isa()) return emitOpError() << "requries floating for source and result"; return success(); } case cir::CastKind::float_to_int: { - if (!srcType.dyn_cast()) + if (!srcType.isa()) return emitOpError() << "requires floating for source"; if (!resType.dyn_cast()) return emitOpError() << "requires !IntegerType for result"; @@ -455,7 +455,7 @@ LogicalResult CastOp::verify() { return success(); } case cir::CastKind::float_to_bool: { - if (!srcType.isa()) + if (!srcType.isa()) return emitOpError() << "requires float for source"; if (!resType.isa()) return emitOpError() << "requires !cir.bool for result"; @@ -471,14 +471,14 @@ LogicalResult CastOp::verify() { case cir::CastKind::int_to_float: { if (!srcType.isa()) return emitOpError() << "requires !cir.int for source"; - if (!resType.isa()) + if (!resType.isa()) return emitOpError() << "requires !cir.float for result"; return success(); } case cir::CastKind::bool_to_float: { if (!srcType.isa()) return emitOpError() << "requires !cir.bool for source"; - if (!resType.isa()) + if (!resType.isa()) return emitOpError() << "requires !cir.float for result"; return success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 77a4a58e65a5..5ddafd66231d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -25,6 +25,7 @@ #include "mlir/Support/LogicalResult.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "llvm/ADT/APFloat.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TypeSwitch.h" @@ -624,6 +625,54 @@ IntType::verify(llvm::function_ref emitError, return mlir::success(); } +//===----------------------------------------------------------------------===// +// Floating-point type definitions +//===----------------------------------------------------------------------===// + +const llvm::fltSemantics &SingleType::getFloatSemantics() const { + return llvm::APFloat::IEEEsingle(); +} + +llvm::TypeSize +SingleType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(getWidth()); +} + +uint64_t +SingleType::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +uint64_t +SingleType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +const llvm::fltSemantics &DoubleType::getFloatSemantics() const { + return llvm::APFloat::IEEEdouble(); +} + +llvm::TypeSize +DoubleType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(getWidth()); +} + +uint64_t +DoubleType::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +uint64_t +DoubleType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + //===----------------------------------------------------------------------===// // FuncType Definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp b/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp new file mode 100644 index 000000000000..6062a39be7fa --- /dev/null +++ b/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp @@ -0,0 +1,14 @@ +//====- CIRFPTypeInterface.cpp - Interface for floating-point types -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "clang/CIR/Interfaces/CIRFPTypeInterface.h" + +using namespace mlir::cir; + +/// Include the generated interfaces. +#include "clang/CIR/Interfaces/CIRFPTypeInterface.cpp.inc" diff --git a/clang/lib/CIR/Interfaces/CMakeLists.txt b/clang/lib/CIR/Interfaces/CMakeLists.txt index 84322f4836e0..2f4886d6a93a 100644 --- a/clang/lib/CIR/Interfaces/CMakeLists.txt +++ b/clang/lib/CIR/Interfaces/CMakeLists.txt @@ -2,6 +2,7 @@ add_clang_library(MLIRCIRInterfaces ASTAttrInterfaces.cpp CIROpInterfaces.cpp CIRLoopOpInterface.cpp + CIRFPTypeInterface.cpp ADDITIONAL_HEADER_DIRS ${MLIR_MAIN_INCLUDE_DIR}/mlir/Interfaces @@ -10,6 +11,7 @@ add_clang_library(MLIRCIRInterfaces MLIRCIRASTAttrInterfacesIncGen MLIRCIROpInterfacesIncGen MLIRCIRLoopOpInterfaceIncGen + MLIRCIRFPTypeInterfaceIncGen LINK_LIBS ${dialect_libs} diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e6cc2664ccb7..6f3d8ba805c6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -189,9 +189,9 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ConstPtrAttr ptrAttr, loc, converter->convertType(ptrAttr.getType()), ptrVal); } -/// FloatAttr visitor. +/// FPAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::FloatAttr fltAttr, +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::FPAttr fltAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto loc = parentOp->getLoc(); @@ -368,7 +368,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, const mlir::TypeConverter *converter) { if (const auto intAttr = attr.dyn_cast()) return lowerCirAttrAsValue(parentOp, intAttr, rewriter, converter); - if (const auto fltAttr = attr.dyn_cast()) + if (const auto fltAttr = attr.dyn_cast()) return lowerCirAttrAsValue(parentOp, fltAttr, rewriter, converter); if (const auto ptrAttr = attr.dyn_cast()) return lowerCirAttrAsValue(parentOp, ptrAttr, rewriter, converter); @@ -621,21 +621,29 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { break; } case mlir::cir::CastKind::floating: { - auto dstTy = castOp.getResult().getType().cast(); + auto dstTy = castOp.getResult().getType(); auto srcTy = castOp.getSrc().getType(); + + if (!dstTy.isa() || + !srcTy.isa()) + return castOp.emitError() + << "NYI cast from " << srcTy << " to " << dstTy; + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = + getTypeConverter()->convertType(dstTy).cast(); - if (auto fpSrcTy = srcTy.dyn_cast()) { - if (fpSrcTy.getWidth() > dstTy.getWidth()) - rewriter.replaceOpWithNewOp(castOp, dstTy, - llvmSrcVal); - else - rewriter.replaceOpWithNewOp(castOp, dstTy, - llvmSrcVal); - return mlir::success(); - } + auto getFloatWidth = [](mlir::Type ty) -> unsigned { + return ty.cast().getWidth(); + }; - return castOp.emitError() << "NYI cast from " << srcTy << " to " << dstTy; + if (getFloatWidth(srcTy) > getFloatWidth(dstTy)) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); } case mlir::cir::CastKind::int_to_ptr: { auto dstTy = castOp.getType().cast(); @@ -1002,10 +1010,11 @@ template <> mlir::APInt getZeroInitFromType(mlir::Type Ty) { } template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty) { - assert((Ty.isF32() || Ty.isF64()) && "only float and double supported"); - if (Ty.isF32()) + assert((Ty.isa()) && + "only float and double supported"); + if (Ty.isF32() || Ty.isa()) return mlir::APFloat(0.f); - if (Ty.isF64()) + if (Ty.isF64() || Ty.isa()) return mlir::APFloat(0.0); llvm_unreachable("NYI"); } @@ -1086,8 +1095,8 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, if (type.isa()) return convertToDenseElementsAttr( constArr, dims, converter->convertType(type)); - if (type.isa()) - return convertToDenseElementsAttr( + if (type.isa()) + return convertToDenseElementsAttr( constArr, dims, converter->convertType(type)); return std::nullopt; @@ -1123,8 +1132,10 @@ class CIRConstantLowering attr = rewriter.getIntegerAttr( typeConverter->convertType(op.getType()), op.getValue().cast().getValue()); - } else if (op.getType().isa()) { - attr = op.getValue(); + } else if (op.getType().isa()) { + attr = rewriter.getFloatAttr( + typeConverter->convertType(op.getType()), + op.getValue().cast().getValue()); } else if (op.getType().isa()) { // Optimize with dedicated LLVM op for null pointers. if (op.getValue().isa()) { @@ -1277,7 +1288,7 @@ class CIRVectorCmpOpLowering op.getLoc(), convertCmpKindToICmpPredicate(op.getKind(), intType.isSigned()), adaptor.getLhs(), adaptor.getRhs()); - } else if (elementType.isa()) { + } else if (elementType.isa()) { bitResult = rewriter.create( op.getLoc(), convertCmpKindToFCmpPredicate(op.getKind()), adaptor.getLhs(), adaptor.getRhs()); @@ -1621,9 +1632,10 @@ class CIRGlobalOpLowering << constArr.getElts(); return mlir::failure(); } - } else if (llvm::isa(init.value())) { - // Nothing to do since LLVM already supports these types as - // initializers. + } else if (auto fltAttr = init.value().dyn_cast()) { + // Initializer is a constant floating-point number: convert to MLIR + // builtin constant. + init = rewriter.getFloatAttr(llvmType, fltAttr.getValue()); } // Initializer is a constant integer: convert to MLIR builtin constant. else if (auto intAttr = init.value().dyn_cast()) { @@ -1766,7 +1778,7 @@ class CIRUnaryOpLowering } // Floating point unary operations: + - ++ -- - if (elementType.isa()) { + if (elementType.isa()) { switch (op.getKind()) { case mlir::cir::UnaryOpKind::Inc: { assert(!IsVector && "++ not allowed on vector types"); @@ -1845,7 +1857,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { assert((op.getLhs().getType() == op.getRhs().getType()) && "inconsistent operands' types not supported yet"); mlir::Type type = op.getRhs().getType(); - assert((type.isa()) && "operand type not supported yet"); @@ -2024,7 +2036,7 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { /* isSigned=*/false); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else if (type.isa()) { + } else if (type.isa()) { auto kind = convertCmpKindToFCmpPredicate(cmpOp.getKind()); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); @@ -2261,6 +2273,12 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, // LLVM doesn't work with signed types, so we drop the CIR signs here. return mlir::IntegerType::get(type.getContext(), type.getWidth()); }); + converter.addConversion([&](mlir::cir::SingleType type) -> mlir::Type { + return mlir::Float64Type::get(type.getContext()); + }); + converter.addConversion([&](mlir::cir::DoubleType type) -> mlir::Type { + return mlir::Float64Type::get(type.getContext()); + }); converter.addConversion([&](mlir::cir::FuncType type) -> mlir::Type { auto result = converter.convertType(type.getReturnType()); llvm::SmallVector arguments; diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 6437664e932f..d413307ce7ba 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -639,6 +639,12 @@ static mlir::TypeConverter prepareTypeConverter() { converter.addConversion([&](mlir::cir::BoolType type) -> mlir::Type { return mlir::IntegerType::get(type.getContext(), 8); }); + converter.addConversion([&](mlir::cir::SingleType type) -> mlir::Type { + return mlir::FloatType::getF32(type.getContext()); + }); + converter.addConversion([&](mlir::cir::DoubleType type) -> mlir::Type { + return mlir::FloatType::getF64(type.getContext()); + }); converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { auto elementType = converter.convertType(type.getEltType()); return mlir::MemRefType::get(type.getSize(), elementType); diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c index 0caf767a51d5..cdba1e30cb4b 100644 --- a/clang/test/CIR/CodeGen/array-init.c +++ b/clang/test/CIR/CodeGen/array-init.c @@ -30,10 +30,9 @@ void foo() { double bar[] = {9,8,7}; } -// CHECK: %0 = cir.alloca !cir.array, cir.ptr >, ["bar"] {alignment = 16 : i64} -// CHECK-NEXT: %1 = cir.const(#cir.const_array<[9.000000e+00, 8.000000e+00, 7.000000e+00]> : !cir.array) : !cir.array -// CHECK-NEXT: cir.store %1, %0 : !cir.array, cir.ptr > - +// CHECK: %0 = cir.alloca !cir.array, cir.ptr >, ["bar"] {alignment = 16 : i64} +// CHECK-NEXT: %1 = cir.const(#cir.const_array<[#cir.fp<9.000000e+00> : !cir.double, #cir.fp<8.000000e+00> : !cir.double, #cir.fp<7.000000e+00> : !cir.double]> : !cir.array) : !cir.array +// CHECK-NEXT: cir.store %1, %0 : !cir.array, cir.ptr > void bar(int a, int b, int c) { int arr[] = {a,b,c}; } diff --git a/clang/test/CIR/CodeGen/binop.c b/clang/test/CIR/CodeGen/binop.c index bc5093e43ac4..280fd29b067f 100644 --- a/clang/test/CIR/CodeGen/binop.c +++ b/clang/test/CIR/CodeGen/binop.c @@ -9,5 +9,5 @@ void conditionalResultIimplicitCast(int a, int b, float f) { float y = f && f; // CHECK: %[[#BOOL:]] = cir.ternary // CHECK: %[[#INT:]] = cir.cast(bool_to_int, %[[#BOOL]] : !cir.bool), !s32i - // CHECK: %{{.+}} = cir.cast(int_to_float, %[[#INT]] : !s32i), f32 + // CHECK: %{{.+}} = cir.cast(int_to_float, %[[#INT]] : !s32i), !cir.float } diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index 4384a9c391cb..7006ad48d83d 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -103,11 +103,11 @@ void b3(int a, int b, int c, int d) { void testFloatingPointBinOps(float a, float b) { a * b; - // CHECK: cir.binop(mul, %{{.+}}, %{{.+}}) : f32 + // CHECK: cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.float a / b; - // CHECK: cir.binop(div, %{{.+}}, %{{.+}}) : f32 + // CHECK: cir.binop(div, %{{.+}}, %{{.+}}) : !cir.float a + b; - // CHECK: cir.binop(add, %{{.+}}, %{{.+}}) : f32 + // CHECK: cir.binop(add, %{{.+}}, %{{.+}}) : !cir.float a - b; - // CHECK: cir.binop(sub, %{{.+}}, %{{.+}}) : f32 + // CHECK: cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.float } diff --git a/clang/test/CIR/CodeGen/builtin-floating-point.c b/clang/test/CIR/CodeGen/builtin-floating-point.c index f62f7ae0a3d4..fc9c407050ce 100644 --- a/clang/test/CIR/CodeGen/builtin-floating-point.c +++ b/clang/test/CIR/CodeGen/builtin-floating-point.c @@ -5,585 +5,585 @@ float my_ceilf(float f) { return __builtin_ceilf(f); // CHECK: cir.func @my_ceilf - // CHECK: {{.+}} = cir.ceil {{.+}} : f32 + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.float } double my_ceil(double f) { return __builtin_ceil(f); // CHECK: cir.func @my_ceil - // CHECK: {{.+}} = cir.ceil {{.+}} : f64 + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.double } -long double my_ceill(long double f) { - return __builtin_ceill(f); - // CHECK: cir.func @my_ceill - // CHECK: {{.+}} = cir.ceil {{.+}} : f80 -} +// long double my_ceill(long double f) { +// return __builtin_ceill(f); +// // DISABLED-CHECK: cir.func @my_ceill +// // DISABLED-CHECK: {{.+}} = cir.ceil {{.+}} : f80 +// } float ceilf(float); double ceil(double); -long double ceill(long double); +// long double ceill(long double); float call_ceilf(float f) { return ceilf(f); // CHECK: cir.func @call_ceilf - // CHECK: {{.+}} = cir.ceil {{.+}} : f32 + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.float } double call_ceil(double f) { return ceil(f); // CHECK: cir.func @call_ceil - // CHECK: {{.+}} = cir.ceil {{.+}} : f64 + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.double } -long double call_ceill(long double f) { - return ceill(f); - // CHECK: cir.func @call_ceill - // CHECK: {{.+}} = cir.ceil {{.+}} : f80 -} +// long double call_ceill(long double f) { +// return ceill(f); +// // DISABLED-CHECK: cir.func @call_ceill +// // DISABLED-CHECK: {{.+}} = cir.ceil {{.+}} : f80 +// } // cos float my_cosf(float f) { return __builtin_cosf(f); // CHECK: cir.func @my_cosf - // CHECK: {{.+}} = cir.cos {{.+}} : f32 + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.float } double my_cos(double f) { return __builtin_cos(f); // CHECK: cir.func @my_cos - // CHECK: {{.+}} = cir.cos {{.+}} : f64 + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double } -long double my_cosl(long double f) { - return __builtin_cosl(f); - // CHECK: cir.func @my_cosl - // CHECK: {{.+}} = cir.cos {{.+}} : f80 -} +// long double my_cosl(long double f) { +// return __builtin_cosl(f); +// // DISABLED-CHECK: cir.func @my_cosl +// // DISABLED-CHECK: {{.+}} = cir.cos {{.+}} : f80 +// } float cosf(float); double cos(double); -long double cosl(long double); +// long double cosl(long double); float call_cosf(float f) { return cosf(f); // CHECK: cir.func @call_cosf - // CHECK: {{.+}} = cir.cos {{.+}} : f32 + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.float } double call_cos(double f) { return cos(f); // CHECK: cir.func @call_cos - // CHECK: {{.+}} = cir.cos {{.+}} : f64 + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double } -long double call_cosl(long double f) { - return cosl(f); - // CHECK: cir.func @call_cosl - // CHECK: {{.+}} = cir.cos {{.+}} : f80 -} +// long double call_cosl(long double f) { +// return cosl(f); +// // DISABLED-CHECK: cir.func @call_cosl +// // DISABLED-CHECK: {{.+}} = cir.cos {{.+}} : f80 +// } // exp float my_expf(float f) { return __builtin_expf(f); // CHECK: cir.func @my_expf - // CHECK: {{.+}} = cir.exp {{.+}} : f32 + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.float } double my_exp(double f) { return __builtin_exp(f); // CHECK: cir.func @my_exp - // CHECK: {{.+}} = cir.exp {{.+}} : f64 + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double } -long double my_expl(long double f) { - return __builtin_expl(f); - // CHECK: cir.func @my_expl - // CHECK: {{.+}} = cir.exp {{.+}} : f80 -} +// long double my_expl(long double f) { +// return __builtin_expl(f); +// // DISABLED-CHECK: cir.func @my_expl +// // DISABLED-CHECK: {{.+}} = cir.exp {{.+}} : f80 +// } float expf(float); double exp(double); -long double expl(long double); +// long double expl(long double); float call_expf(float f) { return expf(f); // CHECK: cir.func @call_expf - // CHECK: {{.+}} = cir.exp {{.+}} : f32 + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.float } double call_exp(double f) { return exp(f); // CHECK: cir.func @call_exp - // CHECK: {{.+}} = cir.exp {{.+}} : f64 + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double } -long double call_expl(long double f) { - return expl(f); - // CHECK: cir.func @call_expl - // CHECK: {{.+}} = cir.exp {{.+}} : f80 -} +// long double call_expl(long double f) { +// return expl(f); +// // DISABLED-CHECK: cir.func @call_expl +// // DISABLED-CHECK: {{.+}} = cir.exp {{.+}} : f80 +// } // exp2 float my_exp2f(float f) { return __builtin_exp2f(f); // CHECK: cir.func @my_exp2f - // CHECK: {{.+}} = cir.exp2 {{.+}} : f32 + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.float } double my_exp2(double f) { return __builtin_exp2(f); // CHECK: cir.func @my_exp2 - // CHECK: {{.+}} = cir.exp2 {{.+}} : f64 + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double } -long double my_exp2l(long double f) { - return __builtin_exp2l(f); - // CHECK: cir.func @my_exp2l - // CHECK: {{.+}} = cir.exp2 {{.+}} : f80 -} +// long double my_exp2l(long double f) { +// return __builtin_exp2l(f); +// // DISABLED-CHECK: cir.func @my_exp2l +// // DISABLED-CHECK: {{.+}} = cir.exp2 {{.+}} : f80 +// } float exp2f(float); double exp2(double); -long double exp2l(long double); +// long double exp2l(long double); float call_exp2f(float f) { return exp2f(f); // CHECK: cir.func @call_exp2f - // CHECK: {{.+}} = cir.exp2 {{.+}} : f32 + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.float } double call_exp2(double f) { return exp2(f); // CHECK: cir.func @call_exp2 - // CHECK: {{.+}} = cir.exp2 {{.+}} : f64 + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double } -long double call_exp2l(long double f) { - return exp2l(f); - // CHECK: cir.func @call_exp2l - // CHECK: {{.+}} = cir.exp2 {{.+}} : f80 -} +// long double call_exp2l(long double f) { +// return exp2l(f); +// // DISABLED-CHECK: cir.func @call_exp2l +// // DISABLED-CHECK: {{.+}} = cir.exp2 {{.+}} : f80 +// } // floor float my_floorf(float f) { return __builtin_floorf(f); // CHECK: cir.func @my_floorf - // CHECK: {{.+}} = cir.floor {{.+}} : f32 + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.float } double my_floor(double f) { return __builtin_floor(f); // CHECK: cir.func @my_floor - // CHECK: {{.+}} = cir.floor {{.+}} : f64 + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.double } -long double my_floorl(long double f) { - return __builtin_floorl(f); - // CHECK: cir.func @my_floorl - // CHECK: {{.+}} = cir.floor {{.+}} : f80 -} +// long double my_floorl(long double f) { +// return __builtin_floorl(f); +// // DISABLED-CHECK: cir.func @my_floorl +// // DISABLED-CHECK: {{.+}} = cir.floor {{.+}} : f80 +// } float floorf(float); double floor(double); -long double floorl(long double); +// long double floorl(long double); float call_floorf(float f) { return floorf(f); // CHECK: cir.func @call_floorf - // CHECK: {{.+}} = cir.floor {{.+}} : f32 + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.float } double call_floor(double f) { return floor(f); // CHECK: cir.func @call_floor - // CHECK: {{.+}} = cir.floor {{.+}} : f64 + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.double } -long double call_floorl(long double f) { - return floorl(f); - // CHECK: cir.func @call_floorl - // CHECK: {{.+}} = cir.floor {{.+}} : f80 -} +// long double call_floorl(long double f) { +// return floorl(f); +// // DISABLED-CHECK: cir.func @call_floorl +// // DISABLED-CHECK: {{.+}} = cir.floor {{.+}} : f80 +// } // log float my_logf(float f) { return __builtin_logf(f); // CHECK: cir.func @my_logf - // CHECK: {{.+}} = cir.log {{.+}} : f32 + // CHECK: {{.+}} = cir.log {{.+}} : !cir.float } double my_log(double f) { return __builtin_log(f); // CHECK: cir.func @my_log - // CHECK: {{.+}} = cir.log {{.+}} : f64 + // CHECK: {{.+}} = cir.log {{.+}} : !cir.double } -long double my_logl(long double f) { - return __builtin_logl(f); - // CHECK: cir.func @my_logl - // CHECK: {{.+}} = cir.log {{.+}} : f80 -} +// long double my_logl(long double f) { +// return __builtin_logl(f); +// // DISABLED-CHECK: cir.func @my_logl +// // DISABLED-CHECK: {{.+}} = cir.log {{.+}} : f80 +// } float logf(float); double log(double); -long double logl(long double); +// long double logl(long double); float call_logf(float f) { return logf(f); // CHECK: cir.func @call_logf - // CHECK: {{.+}} = cir.log {{.+}} : f32 + // CHECK: {{.+}} = cir.log {{.+}} : !cir.float } double call_log(double f) { return log(f); // CHECK: cir.func @call_log - // CHECK: {{.+}} = cir.log {{.+}} : f64 + // CHECK: {{.+}} = cir.log {{.+}} : !cir.double } -long double call_logl(long double f) { - return logl(f); - // CHECK: cir.func @call_logl - // CHECK: {{.+}} = cir.log {{.+}} : f80 -} +// long double call_logl(long double f) { +// return logl(f); +// // DISABLED-CHECK: cir.func @call_logl +// // DISABLED-CHECK: {{.+}} = cir.log {{.+}} : f80 +// } // log10 float my_log10f(float f) { return __builtin_log10f(f); // CHECK: cir.func @my_log10f - // CHECK: {{.+}} = cir.log10 {{.+}} : f32 + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.float } double my_log10(double f) { return __builtin_log10(f); // CHECK: cir.func @my_log10 - // CHECK: {{.+}} = cir.log10 {{.+}} : f64 + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double } -long double my_log10l(long double f) { - return __builtin_log10l(f); - // CHECK: cir.func @my_log10l - // CHECK: {{.+}} = cir.log10 {{.+}} : f80 -} +// long double my_log10l(long double f) { +// return __builtin_log10l(f); +// // DISABLED-CHECK: cir.func @my_log10l +// // DISABLED-CHECK: {{.+}} = cir.log10 {{.+}} : f80 +// } float log10f(float); double log10(double); -long double log10l(long double); +// long double log10l(long double); float call_log10f(float f) { return log10f(f); // CHECK: cir.func @call_log10f - // CHECK: {{.+}} = cir.log10 {{.+}} : f32 + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.float } double call_log10(double f) { return log10(f); // CHECK: cir.func @call_log10 - // CHECK: {{.+}} = cir.log10 {{.+}} : f64 + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double } -long double call_log10l(long double f) { - return log10l(f); - // CHECK: cir.func @call_log10l - // CHECK: {{.+}} = cir.log10 {{.+}} : f80 -} +// long double call_log10l(long double f) { +// return log10l(f); +// // DISABLED-CHECK: cir.func @call_log10l +// // DISABLED-CHECK: {{.+}} = cir.log10 {{.+}} : f80 +// } // log2 float my_log2f(float f) { return __builtin_log2f(f); // CHECK: cir.func @my_log2f - // CHECK: {{.+}} = cir.log2 {{.+}} : f32 + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.float } double my_log2(double f) { return __builtin_log2(f); // CHECK: cir.func @my_log2 - // CHECK: {{.+}} = cir.log2 {{.+}} : f64 + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double } -long double my_log2l(long double f) { - return __builtin_log2l(f); - // CHECK: cir.func @my_log2l - // CHECK: {{.+}} = cir.log2 {{.+}} : f80 -} +// long double my_log2l(long double f) { +// return __builtin_log2l(f); +// // DISABLED-CHECK: cir.func @my_log2l +// // DISABLED-CHECK: {{.+}} = cir.log2 {{.+}} : f80 +// } float log2f(float); double log2(double); -long double log2l(long double); +// long double log2l(long double); float call_log2f(float f) { return log2f(f); // CHECK: cir.func @call_log2f - // CHECK: {{.+}} = cir.log2 {{.+}} : f32 + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.float } double call_log2(double f) { return log2(f); // CHECK: cir.func @call_log2 - // CHECK: {{.+}} = cir.log2 {{.+}} : f64 + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double } -long double call_log2l(long double f) { - return log2l(f); - // CHECK: cir.func @call_log2l - // CHECK: {{.+}} = cir.log2 {{.+}} : f80 -} +// long double call_log2l(long double f) { +// return log2l(f); +// // DISABLED-CHECK: cir.func @call_log2l +// // DISABLED-CHECK: {{.+}} = cir.log2 {{.+}} : f80 +// } // nearbyint float my_nearbyintf(float f) { return __builtin_nearbyintf(f); // CHECK: cir.func @my_nearbyintf - // CHECK: {{.+}} = cir.nearbyint {{.+}} : f32 + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.float } double my_nearbyint(double f) { return __builtin_nearbyint(f); // CHECK: cir.func @my_nearbyint - // CHECK: {{.+}} = cir.nearbyint {{.+}} : f64 + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.double } -long double my_nearbyintl(long double f) { - return __builtin_nearbyintl(f); - // CHECK: cir.func @my_nearbyintl - // CHECK: {{.+}} = cir.nearbyint {{.+}} : f80 -} +// long double my_nearbyintl(long double f) { +// return __builtin_nearbyintl(f); +// // DISABLED-CHECK: cir.func @my_nearbyintl +// // DISABLED-CHECK: {{.+}} = cir.nearbyint {{.+}} : f80 +// } float nearbyintf(float); double nearbyint(double); -long double nearbyintl(long double); +// long double nearbyintl(long double); float call_nearbyintf(float f) { return nearbyintf(f); // CHECK: cir.func @call_nearbyintf - // CHECK: {{.+}} = cir.nearbyint {{.+}} : f32 + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.float } double call_nearbyint(double f) { return nearbyint(f); // CHECK: cir.func @call_nearbyint - // CHECK: {{.+}} = cir.nearbyint {{.+}} : f64 + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.double } -long double call_nearbyintl(long double f) { - return nearbyintl(f); - // CHECK: cir.func @call_nearbyintl - // CHECK: {{.+}} = cir.nearbyint {{.+}} : f80 -} +// long double call_nearbyintl(long double f) { +// return nearbyintl(f); +// // DISABLED-CHECK: cir.func @call_nearbyintl +// // DISABLED-CHECK: {{.+}} = cir.nearbyint {{.+}} : f80 +// } // rint float my_rintf(float f) { return __builtin_rintf(f); // CHECK: cir.func @my_rintf - // CHECK: {{.+}} = cir.rint {{.+}} : f32 + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.float } double my_rint(double f) { return __builtin_rint(f); // CHECK: cir.func @my_rint - // CHECK: {{.+}} = cir.rint {{.+}} : f64 + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.double } -long double my_rintl(long double f) { - return __builtin_rintl(f); - // CHECK: cir.func @my_rintl - // CHECK: {{.+}} = cir.rint {{.+}} : f80 -} +// long double my_rintl(long double f) { +// return __builtin_rintl(f); +// // DISABLED-CHECK: cir.func @my_rintl +// // DISABLED-CHECK: {{.+}} = cir.rint {{.+}} : f80 +// } float rintf(float); double rint(double); -long double rintl(long double); +// long double rintl(long double); float call_rintf(float f) { return rintf(f); // CHECK: cir.func @call_rintf - // CHECK: {{.+}} = cir.rint {{.+}} : f32 + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.float } double call_rint(double f) { return rint(f); // CHECK: cir.func @call_rint - // CHECK: {{.+}} = cir.rint {{.+}} : f64 + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.double } -long double call_rintl(long double f) { - return rintl(f); - // CHECK: cir.func @call_rintl - // CHECK: {{.+}} = cir.rint {{.+}} : f80 -} +// long double call_rintl(long double f) { +// return rintl(f); +// // DISABLED-CHECK: cir.func @call_rintl +// // DISABLED-CHECK: {{.+}} = cir.rint {{.+}} : f80 +// } // round float my_roundf(float f) { return __builtin_roundf(f); // CHECK: cir.func @my_roundf - // CHECK: {{.+}} = cir.round {{.+}} : f32 + // CHECK: {{.+}} = cir.round {{.+}} : !cir.float } double my_round(double f) { return __builtin_round(f); // CHECK: cir.func @my_round - // CHECK: {{.+}} = cir.round {{.+}} : f64 + // CHECK: {{.+}} = cir.round {{.+}} : !cir.double } -long double my_roundl(long double f) { - return __builtin_roundl(f); - // CHECK: cir.func @my_roundl - // CHECK: {{.+}} = cir.round {{.+}} : f80 -} +// long double my_roundl(long double f) { +// return __builtin_roundl(f); +// // DISABLED-CHECK: cir.func @my_roundl +// // DISABLED-CHECK: {{.+}} = cir.round {{.+}} : f80 +// } float roundf(float); double round(double); -long double roundl(long double); +// long double roundl(long double); float call_roundf(float f) { return roundf(f); // CHECK: cir.func @call_roundf - // CHECK: {{.+}} = cir.round {{.+}} : f32 + // CHECK: {{.+}} = cir.round {{.+}} : !cir.float } double call_round(double f) { return round(f); // CHECK: cir.func @call_round - // CHECK: {{.+}} = cir.round {{.+}} : f64 + // CHECK: {{.+}} = cir.round {{.+}} : !cir.double } -long double call_roundl(long double f) { - return roundl(f); - // CHECK: cir.func @call_roundl - // CHECK: {{.+}} = cir.round {{.+}} : f80 -} +// long double call_roundl(long double f) { +// return roundl(f); +// // DISABLED-CHECK: cir.func @call_roundl +// // DISABLED-CHECK: {{.+}} = cir.round {{.+}} : f80 +// } // sin float my_sinf(float f) { return __builtin_sinf(f); // CHECK: cir.func @my_sinf - // CHECK: {{.+}} = cir.sin {{.+}} : f32 + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.float } double my_sin(double f) { return __builtin_sin(f); // CHECK: cir.func @my_sin - // CHECK: {{.+}} = cir.sin {{.+}} : f64 + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double } -long double my_sinl(long double f) { - return __builtin_sinl(f); - // CHECK: cir.func @my_sinl - // CHECK: {{.+}} = cir.sin {{.+}} : f80 -} +// long double my_sinl(long double f) { +// return __builtin_sinl(f); +// // DISABLED-CHECK: cir.func @my_sinl +// // DISABLED-CHECK: {{.+}} = cir.sin {{.+}} : f80 +// } float sinf(float); double sin(double); -long double sinl(long double); +// long double sinl(long double); float call_sinf(float f) { return sinf(f); // CHECK: cir.func @call_sinf - // CHECK: {{.+}} = cir.sin {{.+}} : f32 + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.float } double call_sin(double f) { return sin(f); // CHECK: cir.func @call_sin - // CHECK: {{.+}} = cir.sin {{.+}} : f64 + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double } -long double call_sinl(long double f) { - return sinl(f); - // CHECK: cir.func @call_sinl - // CHECK: {{.+}} = cir.sin {{.+}} : f80 -} +// long double call_sinl(long double f) { +// return sinl(f); +// // DISABLED-CHECK: cir.func @call_sinl +// // DISABLED-CHECK: {{.+}} = cir.sin {{.+}} : f80 +// } // sqrt float my_sqrtf(float f) { return __builtin_sqrtf(f); // CHECK: cir.func @my_sqrtf - // CHECK: {{.+}} = cir.sqrt {{.+}} : f32 + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.float } double my_sqrt(double f) { return __builtin_sqrt(f); // CHECK: cir.func @my_sqrt - // CHECK: {{.+}} = cir.sqrt {{.+}} : f64 + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double } -long double my_sqrtl(long double f) { - return __builtin_sqrtl(f); - // CHECK: cir.func @my_sqrtl - // CHECK: {{.+}} = cir.sqrt {{.+}} : f80 -} +// long double my_sqrtl(long double f) { +// return __builtin_sqrtl(f); +// // DISABLED-CHECK: cir.func @my_sqrtl +// // DISABLED-CHECK: {{.+}} = cir.sqrt {{.+}} : f80 +// } float sqrtf(float); double sqrt(double); -long double sqrtl(long double); +// long double sqrtl(long double); float call_sqrtf(float f) { return sqrtf(f); // CHECK: cir.func @call_sqrtf - // CHECK: {{.+}} = cir.sqrt {{.+}} : f32 + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.float } double call_sqrt(double f) { return sqrt(f); // CHECK: cir.func @call_sqrt - // CHECK: {{.+}} = cir.sqrt {{.+}} : f64 + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double } -long double call_sqrtl(long double f) { - return sqrtl(f); - // CHECK: cir.func @call_sqrtl - // CHECK: {{.+}} = cir.sqrt {{.+}} : f80 -} +// long double call_sqrtl(long double f) { +// return sqrtl(f); +// // DISABLED-CHECK: cir.func @call_sqrtl +// // DISABLED-CHECK: {{.+}} = cir.sqrt {{.+}} : f80 +// } // trunc float my_truncf(float f) { return __builtin_truncf(f); // CHECK: cir.func @my_truncf - // CHECK: {{.+}} = cir.trunc {{.+}} : f32 + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.float } double my_trunc(double f) { return __builtin_trunc(f); // CHECK: cir.func @my_trunc - // CHECK: {{.+}} = cir.trunc {{.+}} : f64 + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.double } -long double my_truncl(long double f) { - return __builtin_truncl(f); - // CHECK: cir.func @my_truncl - // CHECK: {{.+}} = cir.trunc {{.+}} : f80 -} +// long double my_truncl(long double f) { +// return __builtin_truncl(f); +// // DISABLED-CHECK: cir.func @my_truncl +// // DISABLED-CHECK: {{.+}} = cir.trunc {{.+}} : f80 +// } float truncf(float); double trunc(double); -long double truncl(long double); +// long double truncl(long double); float call_truncf(float f) { return truncf(f); // CHECK: cir.func @call_truncf - // CHECK: {{.+}} = cir.trunc {{.+}} : f32 + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.float } double call_trunc(double f) { return trunc(f); // CHECK: cir.func @call_trunc - // CHECK: {{.+}} = cir.trunc {{.+}} : f64 + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.double } -long double call_truncl(long double f) { - return truncl(f); - // CHECK: cir.func @call_truncl - // CHECK: {{.+}} = cir.trunc {{.+}} : f80 -} +// long double call_truncl(long double f) { +// return truncl(f); +// // DISABLED-CHECK: cir.func @call_truncl +// // DISABLED-CHECK: {{.+}} = cir.trunc {{.+}} : f80 +// } diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index 9b353f57aba0..956c72ca0bc5 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -31,18 +31,18 @@ void d(void) { // CHECK: %6 = cir.load %2 : cir.ptr , !s32i // CHECK: cir.return %6 // CHECK: } -// CHECK: cir.func @c(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 -// CHECK: %0 = cir.alloca f64, cir.ptr , ["a", init] -// CHECK: %1 = cir.alloca f64, cir.ptr , ["b", init] -// CHECK: %2 = cir.alloca f64, cir.ptr , ["__retval"] -// CHECK: cir.store %arg0, %0 : f64, cir.ptr -// CHECK: cir.store %arg1, %1 : f64, cir.ptr -// CHECK: %3 = cir.load %0 : cir.ptr , f64 -// CHECK: %4 = cir.load %1 : cir.ptr , f64 -// CHECK: %5 = cir.binop(add, %3, %4) : f64 -// CHECK: cir.store %5, %2 : f64, cir.ptr -// CHECK: %6 = cir.load %2 : cir.ptr , f64 -// CHECK: cir.return %6 : f64 +// CHECK: cir.func @c(%arg0: !cir.double {{.*}}, %arg1: !cir.double {{.*}}) -> !cir.double +// CHECK: %0 = cir.alloca !cir.double, cir.ptr , ["a", init] +// CHECK: %1 = cir.alloca !cir.double, cir.ptr , ["b", init] +// CHECK: %2 = cir.alloca !cir.double, cir.ptr , ["__retval"] +// CHECK: cir.store %arg0, %0 : !cir.double, cir.ptr +// CHECK: cir.store %arg1, %1 : !cir.double, cir.ptr +// CHECK: %3 = cir.load %0 : cir.ptr , !cir.double +// CHECK: %4 = cir.load %1 : cir.ptr , !cir.double +// CHECK: %5 = cir.binop(add, %3, %4) : !cir.double +// CHECK: cir.store %5, %2 : !cir.double, cir.ptr +// CHECK: %6 = cir.load %2 : cir.ptr , !cir.double +// CHECK: cir.return %6 : !cir.double // CHECK: } // CHECK: cir.func @d() // CHECK: call @a() : () -> () @@ -69,18 +69,18 @@ void d(void) { // CXX-NEXT: %6 = cir.load %2 : cir.ptr , !s32i // CXX-NEXT: cir.return %6 // CXX-NEXT: } -// CXX-NEXT: cir.func @_Z1cdd(%arg0: f64 {{.*}}, %arg1: f64 {{.*}}) -> f64 -// CXX-NEXT: %0 = cir.alloca f64, cir.ptr , ["a", init] -// CXX-NEXT: %1 = cir.alloca f64, cir.ptr , ["b", init] -// CXX-NEXT: %2 = cir.alloca f64, cir.ptr , ["__retval"] -// CXX-NEXT: cir.store %arg0, %0 : f64, cir.ptr -// CXX-NEXT: cir.store %arg1, %1 : f64, cir.ptr -// CXX-NEXT: %3 = cir.load %0 : cir.ptr , f64 -// CXX-NEXT: %4 = cir.load %1 : cir.ptr , f64 -// CXX-NEXT: %5 = cir.binop(add, %3, %4) : f64 -// CXX-NEXT: cir.store %5, %2 : f64, cir.ptr -// CXX-NEXT: %6 = cir.load %2 : cir.ptr , f64 -// CXX-NEXT: cir.return %6 : f64 +// CXX-NEXT: cir.func @_Z1cdd(%arg0: !cir.double {{.*}}, %arg1: !cir.double {{.*}}) -> !cir.double +// CXX-NEXT: %0 = cir.alloca !cir.double, cir.ptr , ["a", init] +// CXX-NEXT: %1 = cir.alloca !cir.double, cir.ptr , ["b", init] +// CXX-NEXT: %2 = cir.alloca !cir.double, cir.ptr , ["__retval"] +// CXX-NEXT: cir.store %arg0, %0 : !cir.double, cir.ptr +// CXX-NEXT: cir.store %arg1, %1 : !cir.double, cir.ptr +// CXX-NEXT: %3 = cir.load %0 : cir.ptr , !cir.double +// CXX-NEXT: %4 = cir.load %1 : cir.ptr , !cir.double +// CXX-NEXT: %5 = cir.binop(add, %3, %4) : !cir.double +// CXX-NEXT: cir.store %5, %2 : !cir.double, cir.ptr +// CXX-NEXT: %6 = cir.load %2 : cir.ptr , !cir.double +// CXX-NEXT: cir.return %6 : !cir.double // CXX-NEXT: } // CXX-NEXT: cir.func @_Z1dv() // CXX-NEXT: call @_Z1av() : () -> () diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index d1b6be12b15b..b760e90b131b 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -46,10 +46,10 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4, double x5) { // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %{{[0-9]+}} : !cir.ptr>), !cir.ptr int f = (int)x3; - // CHECK: %{{[0-9]+}} = cir.cast(float_to_int, %{{[0-9]+}} : f32), !s32i + // CHECK: %{{[0-9]+}} = cir.cast(float_to_int, %{{[0-9]+}} : !cir.float), !s32i double g = (double)x3; // FP extension - // %{{[0-9]+}} = cir.cast(floating, %{{[0-9]+}} : f32), f64 + // %{{[0-9]+}} = cir.cast(floating, %{{[0-9]+}} : !cir.float), !cir.double long l = (long)(void*)x4; // Must sign extend before casting to pointer // CHECK: %[[TMP:[0-9]+]] = cir.cast(integral, %{{[0-9]+}} : !s16i), !u64i @@ -57,16 +57,16 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4, double x5) { // CHECK: %{{[0-9]+}} = cir.cast(ptr_to_int, %[[TMP2]] : !cir.ptr), !s64i float sitofp = (float)x2; // Signed integer to floating point - // CHECK: %{{.+}} = cir.cast(int_to_float, %{{[0-9]+}} : !s32i), f32 + // CHECK: %{{.+}} = cir.cast(int_to_float, %{{[0-9]+}} : !s32i), !cir.float float uitofp = (float)x1; // Unsigned integer to floating point - // CHECK: %{{.+}} = cir.cast(int_to_float, %{{[0-9]+}} : !u32i), f32 + // CHECK: %{{.+}} = cir.cast(int_to_float, %{{[0-9]+}} : !u32i), !cir.float int fptosi = (int)x3; // Floating point to signed integer - // CHECK: %{{.+}} = cir.cast(float_to_int, %{{[0-9]+}} : f32), !s32i + // CHECK: %{{.+}} = cir.cast(float_to_int, %{{[0-9]+}} : !cir.float), !s32i unsigned fptoui = (unsigned)x3; // Floating point to unsigned integer - // CHECK: %{{.+}} = cir.cast(float_to_int, %{{[0-9]+}} : f32), !u32i + // CHECK: %{{.+}} = cir.cast(float_to_int, %{{[0-9]+}} : !cir.float), !u32i bool ib = (bool)x1; // No checking, because this isn't a regular cast. @@ -74,14 +74,14 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4, double x5) { // CHECK: %{{[0-9]+}} = cir.cast(bool_to_int, %{{[0-9]+}} : !cir.bool), !s32i float bf = (float)ib; // bool to float - // CHECK: %{{[0-9]+}} = cir.cast(bool_to_float, %{{[0-9]+}} : !cir.bool), f32 + // CHECK: %{{[0-9]+}} = cir.cast(bool_to_float, %{{[0-9]+}} : !cir.bool), !cir.float void* bpv = (void*)ib; // bool to pointer, which is done in two steps // CHECK: %[[TMP:[0-9]+]] = cir.cast(bool_to_int, %{{[0-9]+}} : !cir.bool), !u64i // CHECK: %{{[0-9]+}} = cir.cast(int_to_ptr, %[[TMP]] : !u64i), !cir.ptr float dptofp = (float)x5; - // CHECK: %{{.+}} = cir.cast(floating, %{{[0-9]+}} : f64), f32 + // CHECK: %{{.+}} = cir.cast(floating, %{{[0-9]+}} : !cir.double), !cir.float return 0; } diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 522687aac53f..e576fb30fc65 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -15,7 +15,7 @@ int filler_sint[4] = {1, 2}; // Ensure missing elements are zero-initialized. int excess_sint[2] = {1, 2, 3, 4}; // Ensure excess elements are ignored. // CHECK: cir.global external @excess_sint = #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array float flt[] = {1.0, 2.0}; -// CHECK: cir.global external @flt = #cir.const_array<[1.000000e+00 : f32, 2.000000e+00 : f32]> : !cir.array +// CHECK: cir.global external @flt = #cir.const_array<[#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float]> : !cir.array // Tentative definition is just a declaration. int tentativeB; @@ -90,7 +90,7 @@ struct Glob { } glob; double *const glob_ptr = &glob.b[1]; -// CHECK: cir.global external @glob_ptr = #cir.global_view<@glob, [2 : i32, 1 : i32]> : !cir.ptr +// CHECK: cir.global external @glob_ptr = #cir.global_view<@glob, [2 : i32, 1 : i32]> : !cir.ptr // TODO: test tentatives with internal linkage. @@ -100,6 +100,6 @@ float tentativeC; int tentativeD[]; float zeroInitFlt[2]; // CHECK: cir.global external @tentativeA = #cir.int<0> : !s32i -// CHECK: cir.global external @tentativeC = 0.000000e+00 : f32 +// CHECK: cir.global external @tentativeC = #cir.fp<0.000000e+00> : !cir.float // CHECK: cir.global external @tentativeD = #cir.zero : !cir.array -// CHECK: cir.global external @zeroInitFlt = #cir.zero : !cir.array +// CHECK: cir.global external @zeroInitFlt = #cir.zero : !cir.array diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 52c146cce421..ba5bb7eedba6 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -43,8 +43,8 @@ int use_func() { return func(); } // CHECK-NEXT: cir.store [[TMP2]], [[TMP0]] : !s32i, cir.ptr // CHECK: cir.global external @e = #false -// CHECK-NEXT: cir.global external @y = 3.400000e+00 : f32 -// CHECK-NEXT: cir.global external @w = 4.300000e+00 : f64 +// CHECK-NEXT: cir.global external @y = #cir.fp<3.400000e+00> : !cir.float +// CHECK-NEXT: cir.global external @w = #cir.fp<4.300000e+00> : !cir.double // CHECK-NEXT: cir.global external @x = #cir.int<51> : !s8i // CHECK-NEXT: cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array // CHECK-NEXT: cir.global external @alpha = #cir.const_array<"abc\00" : !cir.array> : !cir.array diff --git a/clang/test/CIR/CodeGen/lalg.c b/clang/test/CIR/CodeGen/lalg.c index 1725ecaf5505..5bdca87db150 100644 --- a/clang/test/CIR/CodeGen/lalg.c +++ b/clang/test/CIR/CodeGen/lalg.c @@ -7,14 +7,14 @@ double dot() { return result; } -// CHECK: %1 = cir.alloca f64, cir.ptr , ["x", init] -// CHECK-NEXT: %2 = cir.alloca f64, cir.ptr , ["y", init] -// CHECK-NEXT: %3 = cir.alloca f64, cir.ptr , ["result", init] -// CHECK-NEXT: %4 = cir.const(0.000000e+00 : f64) : f64 -// CHECK-NEXT: cir.store %4, %1 : f64, cir.ptr -// CHECK-NEXT: %5 = cir.const(0.000000e+00 : f32) : f32 -// CHECK-NEXT: %6 = cir.cast(floating, %5 : f32), f64 -// CHECK-NEXT: cir.store %6, %2 : f64, cir.ptr -// CHECK-NEXT: %7 = cir.load %1 : cir.ptr , f64 -// CHECK-NEXT: %8 = cir.load %2 : cir.ptr , f64 -// CHECK-NEXT: %9 = cir.binop(mul, %7, %8) : f64 +// CHECK: %1 = cir.alloca !cir.double, cir.ptr , ["x", init] +// CHECK-NEXT: %2 = cir.alloca !cir.double, cir.ptr , ["y", init] +// CHECK-NEXT: %3 = cir.alloca !cir.double, cir.ptr , ["result", init] +// CHECK-NEXT: %4 = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double +// CHECK-NEXT: cir.store %4, %1 : !cir.double, cir.ptr +// CHECK-NEXT: %5 = cir.const(#cir.fp<0.000000e+00> : !cir.float) : !cir.float +// CHECK-NEXT: %6 = cir.cast(floating, %5 : !cir.float), !cir.double +// CHECK-NEXT: cir.store %6, %2 : !cir.double, cir.ptr +// CHECK-NEXT: %7 = cir.load %1 : cir.ptr , !cir.double +// CHECK-NEXT: %8 = cir.load %2 : cir.ptr , !cir.double +// CHECK-NEXT: %9 = cir.binop(mul, %7, %8) : !cir.double diff --git a/clang/test/CIR/CodeGen/libc.c b/clang/test/CIR/CodeGen/libc.c index 1b4c7c34b068..f6cf6a8e50e6 100644 --- a/clang/test/CIR/CodeGen/libc.c +++ b/clang/test/CIR/CodeGen/libc.c @@ -11,11 +11,11 @@ void testMemcpy(void *src, const void *dst, unsigned long size) { double fabs(double); double testFabs(double x) { return fabs(x); - // CHECK: cir.fabs %{{.+}} : f64 + // CHECK: cir.fabs %{{.+}} : !cir.double } float fabsf(float); float testFabsf(float x) { return fabsf(x); - // CHECK: cir.fabs %{{.+}} : f32 + // CHECK: cir.fabs %{{.+}} : !cir.float } diff --git a/clang/test/CIR/CodeGen/static-vars.c b/clang/test/CIR/CodeGen/static-vars.c index 714ce76f930b..4981052bc9ac 100644 --- a/clang/test/CIR/CodeGen/static-vars.c +++ b/clang/test/CIR/CodeGen/static-vars.c @@ -33,7 +33,7 @@ void func2(void) { static char i; // CHECK-DAG: cir.global "private" internal @func2.i = #cir.int<0> : !s8i static float j; - // CHECK-DAG: cir.global "private" internal @func2.j = 0.000000e+00 : f32 + // CHECK-DAG: cir.global "private" internal @func2.j = #cir.fp<0.000000e+00> : !cir.float } // Should const initialize static vars with constant addresses. diff --git a/clang/test/CIR/CodeGen/static-vars.cpp b/clang/test/CIR/CodeGen/static-vars.cpp index 7acc8c1b70f3..1a075b7d968a 100644 --- a/clang/test/CIR/CodeGen/static-vars.cpp +++ b/clang/test/CIR/CodeGen/static-vars.cpp @@ -33,5 +33,5 @@ void func2(void) { static char i; // CHECK-DAG: cir.global "private" internal @_ZZ5func2vE1i = #cir.int<0> : !s8i static float j; - // CHECK-DAG: cir.global "private" internal @_ZZ5func2vE1j = 0.000000e+00 : f32 + // CHECK-DAG: cir.global "private" internal @_ZZ5func2vE1j = #cir.fp<0.000000e+00> : !cir.float } diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 5039e8abb8fd..8005d5ce0dc0 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -52,7 +52,7 @@ struct S1 { float f; int *p; } s1 = {1, .1, 0}; -// CHECK-DAG: cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.ptr : !cir.ptr}> : !ty_22S122 +// CHECK-DAG: cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_22S122 // Should initialize global nested structs. struct S2 { diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 67a9dc9aa73a..47a29ebc90df 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -18,7 +18,7 @@ unsigned long long tc() { // CHECK: %[[local_a:.*]] = cir.alloca !s32i, cir.ptr , ["a", init] int a = 4; z = division(x, y); - // CHECK: %[[div_res:.*]] = cir.try_call exception(%[[eh_info]]) @_Z8divisionii({{.*}}) : (!cir.ptr>, !s32i, !s32i) -> f64 + // CHECK: %[[div_res:.*]] = cir.try_call exception(%[[eh_info]]) @_Z8divisionii({{.*}}) : (!cir.ptr>, !s32i, !s32i) -> !cir.double a++; // CHECK: cir.catch(%[[try_eh]] : !cir.ptr, [ diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index 16fe03bc3597..b58b1969176e 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -14,7 +14,7 @@ unsigned short t5(unsigned short i) { return i; } float t6(float i) { return i; } double t7(double i) { return i; } -long double t10(long double i) { return i; } +// long double t10(long double i) { return i; } void t8(void) {} @@ -28,9 +28,9 @@ bool t9(bool b) { return b; } // CHECK: cir.func @t3(%arg0: !u8i loc({{.*}})) -> !u8i // CHECK: cir.func @t4(%arg0: !s16i loc({{.*}})) -> !s16i // CHECK: cir.func @t5(%arg0: !u16i loc({{.*}})) -> !u16i -// CHECK: cir.func @t6(%arg0: f32 loc({{.*}})) -> f32 -// CHECK: cir.func @t7(%arg0: f64 loc({{.*}})) -> f64 -// CHECK: cir.func @t10(%arg0: f80 loc({{.*}})) -> f80 +// CHECK: cir.func @t6(%arg0: !cir.float loc({{.*}})) -> !cir.float +// CHECK: cir.func @t7(%arg0: !cir.double loc({{.*}})) -> !cir.double +// DISABLED-CHECK: cir.func @t10(%arg0: f80 loc({{.*}})) -> f80 // CHECK: cir.func @t8() // CHECK-CPP: cir.func @_Z2t0i(%arg0: !s32i loc({{.*}})) -> !s32i @@ -39,8 +39,8 @@ bool t9(bool b) { return b; } // CHECK-CPP: cir.func @_Z2t3h(%arg0: !u8i loc({{.*}})) -> !u8i // CHECK-CPP: cir.func @_Z2t4s(%arg0: !s16i loc({{.*}})) -> !s16i // CHECK-CPP: cir.func @_Z2t5t(%arg0: !u16i loc({{.*}})) -> !u16i -// CHECK-CPP: cir.func @_Z2t6f(%arg0: f32 loc({{.*}})) -> f32 -// CHECK-CPP: cir.func @_Z2t7d(%arg0: f64 loc({{.*}})) -> f64 -// CHECK-CPP: cir.func @{{.+}}t10{{.+}}(%arg0: f80 loc({{.*}})) -> f80 +// CHECK-CPP: cir.func @_Z2t6f(%arg0: !cir.float loc({{.*}})) -> !cir.float +// CHECK-CPP: cir.func @_Z2t7d(%arg0: !cir.double loc({{.*}})) -> !cir.double +// DISABLED-CHECK-CPP: cir.func @{{.+}}t10{{.+}}(%arg0: f80 loc({{.*}})) -> f80 // CHECK-CPP: cir.func @_Z2t8v() // CHECK-CPP: cir.func @_Z2t9b(%arg0: !cir.bool loc({{.*}})) -> !cir.bool diff --git a/clang/test/CIR/CodeGen/unary.c b/clang/test/CIR/CodeGen/unary.c index cecd1cf042ec..e364808f9579 100644 --- a/clang/test/CIR/CodeGen/unary.c +++ b/clang/test/CIR/CodeGen/unary.c @@ -16,13 +16,13 @@ void valueNegation(int i, short s, long l, float f, double d) { // CHECK: %[[#LONG_TO_BOOL:]] = cir.cast(int_to_bool, %[[#LONG]] : !s64i), !cir.bool // CHECK: = cir.unary(not, %[[#LONG_TO_BOOL]]) : !cir.bool, !cir.bool !f; - // CHECK: %[[#FLOAT:]] = cir.load %{{[0-9]+}} : cir.ptr , f32 - // CHECK: %[[#FLOAT_TO_BOOL:]] = cir.cast(float_to_bool, %[[#FLOAT]] : f32), !cir.bool + // CHECK: %[[#FLOAT:]] = cir.load %{{[0-9]+}} : cir.ptr , !cir.float + // CHECK: %[[#FLOAT_TO_BOOL:]] = cir.cast(float_to_bool, %[[#FLOAT]] : !cir.float), !cir.bool // CHECK: %[[#FLOAT_NOT:]] = cir.unary(not, %[[#FLOAT_TO_BOOL]]) : !cir.bool, !cir.bool // CHECK: = cir.cast(bool_to_int, %[[#FLOAT_NOT]] : !cir.bool), !s32i !d; - // CHECK: %[[#DOUBLE:]] = cir.load %{{[0-9]+}} : cir.ptr , f64 - // CHECK: %[[#DOUBLE_TO_BOOL:]] = cir.cast(float_to_bool, %[[#DOUBLE]] : f64), !cir.bool + // CHECK: %[[#DOUBLE:]] = cir.load %{{[0-9]+}} : cir.ptr , !cir.double + // CHECK: %[[#DOUBLE_TO_BOOL:]] = cir.cast(float_to_bool, %[[#DOUBLE]] : !cir.double), !cir.bool // CHECK: %[[#DOUBLE_NOT:]] = cir.unary(not, %[[#DOUBLE_TO_BOOL]]) : !cir.bool, !cir.bool // CHECK: = cir.cast(bool_to_int, %[[#DOUBLE_NOT]] : !cir.bool), !s32i } diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index 532124d5e1a6..74a6c09b2f3c 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -155,29 +155,29 @@ int *inc_p(int *i) { void floats(float f) { // CHECK: cir.func @{{.+}}floats{{.+}} - +f; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : f32, f32 - -f; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : f32, f32 - ++f; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : f32, f32 - --f; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f32, f32 - f++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : f32, f32 - f--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f32, f32 + +f; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.float, !cir.float + -f; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.float, !cir.float + ++f; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.float, !cir.float + --f; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.float, !cir.float + f++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.float, !cir.float + f--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.float, !cir.float !f; - // CHECK: %[[#F_BOOL:]] = cir.cast(float_to_bool, %{{[0-9]+}} : f32), !cir.bool + // CHECK: %[[#F_BOOL:]] = cir.cast(float_to_bool, %{{[0-9]+}} : !cir.float), !cir.bool // CHECK: = cir.unary(not, %[[#F_BOOL]]) : !cir.bool, !cir.bool } void doubles(double d) { // CHECK: cir.func @{{.+}}doubles{{.+}} - +d; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : f64, f64 - -d; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : f64, f64 - ++d; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : f64, f64 - --d; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f64, f64 - d++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : f64, f64 - d--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : f64, f64 + +d; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.double, !cir.double + -d; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.double, !cir.double + ++d; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.double, !cir.double + --d; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.double, !cir.double + d++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.double, !cir.double + d--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.double, !cir.double !d; - // CHECK: %[[#D_BOOL:]] = cir.cast(float_to_bool, %{{[0-9]+}} : f64), !cir.bool + // CHECK: %[[#D_BOOL:]] = cir.cast(float_to_bool, %{{[0-9]+}} : !cir.double), !cir.bool // CHECK: = cir.unary(not, %[[#D_BOOL]]) : !cir.bool, !cir.bool } diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index b8bb4d0fc5be..c033a4cdf98c 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -6,7 +6,7 @@ typedef union { yolo y; struct { int lifecnt; }; } yolm; typedef union { yolo y; struct { int *lifecnt; int genpad; }; } yolm2; typedef union { yolo y; struct { bool life; int genpad; }; } yolm3; -// CHECK-DAG: !ty_22U23A3ADummy22 = !cir.struct +// CHECK-DAG: !ty_22U23A3ADummy22 = !cir.struct // CHECK-DAG: !ty_22anon2E522 = !cir.struct // CHECK-DAG: !ty_22anon2E122 = !cir.struct // CHECK-DAG: !ty_22yolo22 = !cir.struct @@ -24,7 +24,7 @@ union U { float f; double d; }; -// CHECK-DAG: !ty_22U22 = !cir.struct +// CHECK-DAG: !ty_22U22 = !cir.struct // Should generate unions with complex members. union U2 { @@ -66,13 +66,13 @@ void shouldGenerateUnionAccess(union U u) { u.i; // CHECK: %[[#BASE:]] = cir.get_member %0[2] {name = "i"} : !cir.ptr -> !cir.ptr u.f = 0.1F; - // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr - // CHECK: cir.store %{{.+}}, %[[#BASE]] : f32, cir.ptr + // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.float, cir.ptr u.f; - // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr + // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr u.d = 0.1; - // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr - // CHECK: cir.store %{{.+}}, %[[#BASE]] : f64, cir.ptr + // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.double, cir.ptr u.d; - // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr + // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr } diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index 15ea7ae26c73..8006014854c3 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -71,49 +71,49 @@ void vector_double_test(int x, double y) { // Vector constant. Not yet implemented. Expected results will change from // cir.vec.create to cir.const. vd2 a = { 1.5, 2.5 }; - // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : f64, f64) : + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : // Non-const vector initialization. vd2 b = { y, y + 1.0 }; - // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : f64, f64) : + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : // Extract element double c = a[x]; - // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : + // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : // Insert element a[x] = y; - // CHECK: %[[#LOADEDVF:]] = cir.load %[[#STORAGEVF:]] : cir.ptr >, !cir.vector - // CHECK: %[[#UPDATEDVF:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVF]][%{{[0-9]+}} : !s32i] : - // CHECK: cir.store %[[#UPDATEDVF]], %[[#STORAGEVF]] : !cir.vector, cir.ptr > + // CHECK: %[[#LOADEDVF:]] = cir.load %[[#STORAGEVF:]] : cir.ptr >, !cir.vector + // CHECK: %[[#UPDATEDVF:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVF]][%{{[0-9]+}} : !s32i] : + // CHECK: cir.store %[[#UPDATEDVF]], %[[#STORAGEVF]] : !cir.vector, cir.ptr > // Binary arithmetic operations vd2 d = a + b; - // CHECK: %{{[0-9]+}} = cir.binop(add, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // CHECK: %{{[0-9]+}} = cir.binop(add, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector vd2 e = a - b; - // CHECK: %{{[0-9]+}} = cir.binop(sub, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // CHECK: %{{[0-9]+}} = cir.binop(sub, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector vd2 f = a * b; - // CHECK: %{{[0-9]+}} = cir.binop(mul, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // CHECK: %{{[0-9]+}} = cir.binop(mul, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector vd2 g = a / b; - // CHECK: %{{[0-9]+}} = cir.binop(div, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // CHECK: %{{[0-9]+}} = cir.binop(div, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector // Unary arithmetic operations vd2 l = +a; - // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.vector, !cir.vector + // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.vector, !cir.vector vd2 m = -a; - // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector + // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector // Comparisons vll2 o = a == b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : , vll2 p = a != b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : , vll2 q = a < b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : , vll2 r = a > b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : , vll2 s = a <= b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : , vll2 t = a >= b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : , } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index f8f746128998..f9dce3a1ed05 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -101,7 +101,7 @@ cir.func @s1() { cir.func @badstride(%x: !cir.ptr>) { %idx = cir.const(#cir.int<2> : !cir.int) : !cir.int - %4 = cir.ptr_stride(%x : !cir.ptr>, %idx : !cir.int), !cir.ptr // expected-error {{requires the same type for first operand and result}} + %4 = cir.ptr_stride(%x : !cir.ptr>, %idx : !cir.int), !cir.ptr // expected-error {{requires the same type for first operand and result}} cir.return } @@ -115,8 +115,8 @@ cir.func @cast0(%arg0: !u32i) { // ----- -cir.func @cast1(%arg1: f32) { - %1 = cir.cast(int_to_bool, %arg1 : f32), !cir.bool // expected-error {{requires integral type for source}} +cir.func @cast1(%arg1: !cir.float) { + %1 = cir.cast(int_to_bool, %arg1 : !cir.float), !cir.bool // expected-error {{requires integral type for source}} cir.return } @@ -133,7 +133,7 @@ cir.func @cast2(%p: !cir.ptr) { !u32i = !cir.int cir.func @cast3(%p: !cir.ptr) { %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] - %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // expected-error {{requires same type for array element and pointee result}} + %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // expected-error {{requires same type for array element and pointee result}} cir.return } @@ -147,8 +147,8 @@ cir.func @cast4(%p: !cir.ptr) { // ----- -cir.func @cast5(%p: f32) { - %2 = cir.cast(bool_to_float, %p : f32), f32 // expected-error {{requires !cir.bool for source}} +cir.func @cast5(%p: !cir.float) { + %2 = cir.cast(bool_to_float, %p : !cir.float), !cir.float // expected-error {{requires !cir.bool for source}} cir.return } @@ -179,23 +179,23 @@ cir.func @cast8(%p: !u32i) { !u32i = !cir.int cir.func @cast9(%p : !u32i) { - %2 = cir.cast(integral, %p : !u32i), f32 // expected-error {{requires !IntegerType for result}} + %2 = cir.cast(integral, %p : !u32i), !cir.float // expected-error {{requires !IntegerType for result}} cir.return } // ----- !u32i = !cir.int -cir.func @cast10(%p : f32) { - %2 = cir.cast(integral, %p : f32), !u32i // expected-error {{requires !IntegerType for source}} +cir.func @cast10(%p : !cir.float) { + %2 = cir.cast(integral, %p : !cir.float), !u32i // expected-error {{requires !IntegerType for source}} cir.return } // ----- !u32i = !cir.int -cir.func @cast11(%p : f32) { - %2 = cir.cast(floating, %p : f32), !u32i // expected-error {{requries floating for source and result}} +cir.func @cast11(%p : !cir.float) { + %2 = cir.cast(floating, %p : !cir.float), !u32i // expected-error {{requries floating for source and result}} cir.return } @@ -203,7 +203,7 @@ cir.func @cast11(%p : f32) { !u32i = !cir.int cir.func @cast12(%p : !u32i) { - %2 = cir.cast(floating, %p : !u32i), f32 // expected-error {{requries floating for source and result}} + %2 = cir.cast(floating, %p : !u32i), !cir.float // expected-error {{requries floating for source and result}} cir.return } @@ -217,8 +217,8 @@ cir.func @cast13(%p : !u32i) { // ----- -cir.func @cast14(%p : f32) { - %2 = cir.cast(float_to_int, %p : f32), f32 // expected-error {{requires !IntegerType for result}} +cir.func @cast14(%p : !cir.float) { + %2 = cir.cast(float_to_int, %p : !cir.float), !cir.float // expected-error {{requires !IntegerType for result}} cir.return } @@ -265,8 +265,8 @@ cir.func @cast19(%p : !u32i) { // ----- !u32i = !cir.int -cir.func @cast20(%p : f32) { - %2 = cir.cast(float_to_bool, %p : f32), !u32i // expected-error {{requires !cir.bool for result}} +cir.func @cast20(%p : !cir.float) { + %2 = cir.cast(float_to_bool, %p : !cir.float), !u32i // expected-error {{requires !cir.bool for result}} cir.return } @@ -281,14 +281,14 @@ cir.func @cast21(%p : !u32i) { // ----- cir.func @cast22(%p : !cir.bool) { - %2 = cir.cast(bool_to_int, %p : !cir.bool), f32 // expected-error {{requires !cir.int for result}} + %2 = cir.cast(bool_to_int, %p : !cir.bool), !cir.float // expected-error {{requires !cir.int for result}} cir.return } // ----- cir.func @cast23(%p : !cir.bool) { - %2 = cir.cast(int_to_float, %p : !cir.bool), f32 // expected-error {{requires !cir.int for source}} + %2 = cir.cast(int_to_float, %p : !cir.bool), !cir.float // expected-error {{requires !cir.int for source}} cir.return } @@ -752,9 +752,9 @@ module { // ----- !s32i = !cir.int module { - cir.func @tmp(%arg0: f32) { + cir.func @tmp(%arg0: !cir.float) { // expected-error@+1 {{operand #0 must be Integer type}} - %0 = cir.alloca !s32i, cir.ptr , %arg0 : f32, ["tmp"] + %0 = cir.alloca !s32i, cir.ptr , %arg0 : !cir.float, ["tmp"] cir.return } } diff --git a/clang/test/CIR/IR/libc-fabs.cir b/clang/test/CIR/IR/libc-fabs.cir index cfd5129b6350..691849e0c3a5 100644 --- a/clang/test/CIR/IR/libc-fabs.cir +++ b/clang/test/CIR/IR/libc-fabs.cir @@ -2,8 +2,8 @@ !u32i = !cir.int module { - cir.func @foo(%arg0: f64) -> f64 { - %0 = cir.fabs %arg0 : f64 - cir.return %0 : f64 + cir.func @foo(%arg0: !cir.double) -> !cir.double { + %0 = cir.fabs %arg0 : !cir.double + cir.return %0 : !cir.double } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir index f6dfda5fa435..790d50d5510d 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir @@ -3,44 +3,44 @@ module { cir.func @foo() { - %0 = cir.alloca f32, cir.ptr , ["c"] {alignment = 4 : i64} - %1 = cir.alloca f32, cir.ptr , ["d"] {alignment = 4 : i64} - %2 = cir.alloca f32, cir.ptr , ["y", init] {alignment = 4 : i64} - %3 = cir.alloca f64, cir.ptr , ["e"] {alignment = 8 : i64} - %4 = cir.alloca f64, cir.ptr , ["f"] {alignment = 8 : i64} - %5 = cir.alloca f64, cir.ptr , ["g", init] {alignment = 8 : i64} - %6 = cir.load %0 : cir.ptr , f32 - %7 = cir.load %1 : cir.ptr , f32 - %8 = cir.binop(mul, %6, %7) : f32 - cir.store %8, %2 : f32, cir.ptr - %9 = cir.load %2 : cir.ptr , f32 - %10 = cir.load %1 : cir.ptr , f32 - %11 = cir.binop(div, %9, %10) : f32 - cir.store %11, %2 : f32, cir.ptr - %12 = cir.load %2 : cir.ptr , f32 - %13 = cir.load %1 : cir.ptr , f32 - %14 = cir.binop(add, %12, %13) : f32 - cir.store %14, %2 : f32, cir.ptr - %15 = cir.load %2 : cir.ptr , f32 - %16 = cir.load %1 : cir.ptr , f32 - %17 = cir.binop(sub, %15, %16) : f32 - cir.store %17, %2 : f32, cir.ptr - %18 = cir.load %3 : cir.ptr , f64 - %19 = cir.load %4 : cir.ptr , f64 - %20 = cir.binop(add, %18, %19) : f64 - cir.store %20, %5 : f64, cir.ptr - %21 = cir.load %3 : cir.ptr , f64 - %22 = cir.load %4 : cir.ptr , f64 - %23 = cir.binop(sub, %21, %22) : f64 - cir.store %23, %5 : f64, cir.ptr - %24 = cir.load %3 : cir.ptr , f64 - %25 = cir.load %4 : cir.ptr , f64 - %26 = cir.binop(mul, %24, %25) : f64 - cir.store %26, %5 : f64, cir.ptr - %27 = cir.load %3 : cir.ptr , f64 - %28 = cir.load %4 : cir.ptr , f64 - %29 = cir.binop(div, %27, %28) : f64 - cir.store %29, %5 : f64, cir.ptr + %0 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} + %1 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, cir.ptr , ["y", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.double, cir.ptr , ["e"] {alignment = 8 : i64} + %4 = cir.alloca !cir.double, cir.ptr , ["f"] {alignment = 8 : i64} + %5 = cir.alloca !cir.double, cir.ptr , ["g", init] {alignment = 8 : i64} + %6 = cir.load %0 : cir.ptr , !cir.float + %7 = cir.load %1 : cir.ptr , !cir.float + %8 = cir.binop(mul, %6, %7) : !cir.float + cir.store %8, %2 : !cir.float, cir.ptr + %9 = cir.load %2 : cir.ptr , !cir.float + %10 = cir.load %1 : cir.ptr , !cir.float + %11 = cir.binop(div, %9, %10) : !cir.float + cir.store %11, %2 : !cir.float, cir.ptr + %12 = cir.load %2 : cir.ptr , !cir.float + %13 = cir.load %1 : cir.ptr , !cir.float + %14 = cir.binop(add, %12, %13) : !cir.float + cir.store %14, %2 : !cir.float, cir.ptr + %15 = cir.load %2 : cir.ptr , !cir.float + %16 = cir.load %1 : cir.ptr , !cir.float + %17 = cir.binop(sub, %15, %16) : !cir.float + cir.store %17, %2 : !cir.float, cir.ptr + %18 = cir.load %3 : cir.ptr , !cir.double + %19 = cir.load %4 : cir.ptr , !cir.double + %20 = cir.binop(add, %18, %19) : !cir.double + cir.store %20, %5 : !cir.double, cir.ptr + %21 = cir.load %3 : cir.ptr , !cir.double + %22 = cir.load %4 : cir.ptr , !cir.double + %23 = cir.binop(sub, %21, %22) : !cir.double + cir.store %23, %5 : !cir.double, cir.ptr + %24 = cir.load %3 : cir.ptr , !cir.double + %25 = cir.load %4 : cir.ptr , !cir.double + %26 = cir.binop(mul, %24, %25) : !cir.double + cir.store %26, %5 : !cir.double, cir.ptr + %27 = cir.load %3 : cir.ptr , !cir.double + %28 = cir.load %4 : cir.ptr , !cir.double + %29 = cir.binop(div, %27, %28) : !cir.double + cir.store %29, %5 : !cir.double, cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir index 190d8a2256d4..99eea2260c26 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir @@ -6,8 +6,8 @@ module { cir.func @foo() { %0 = cir.alloca !s32i, cir.ptr , ["a"] {alignment = 4 : i64} %1 = cir.alloca !s32i, cir.ptr , ["b"] {alignment = 4 : i64} - %2 = cir.alloca f32, cir.ptr , ["c"] {alignment = 4 : i64} - %3 = cir.alloca f32, cir.ptr , ["d"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} + %3 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} %4 = cir.alloca !cir.bool, cir.ptr , ["e"] {alignment = 1 : i64} %5 = cir.load %0 : cir.ptr , !s32i %6 = cir.load %1 : cir.ptr , !s32i @@ -27,24 +27,24 @@ module { %20 = cir.load %0 : cir.ptr , !s32i %21 = cir.load %1 : cir.ptr , !s32i %22 = cir.cmp(le, %20, %21) : !s32i, !cir.bool - %23 = cir.load %2 : cir.ptr , f32 - %24 = cir.load %3 : cir.ptr , f32 - %25 = cir.cmp(gt, %23, %24) : f32, !cir.bool - %26 = cir.load %2 : cir.ptr , f32 - %27 = cir.load %3 : cir.ptr , f32 - %28 = cir.cmp(eq, %26, %27) : f32, !cir.bool - %29 = cir.load %2 : cir.ptr , f32 - %30 = cir.load %3 : cir.ptr , f32 - %31 = cir.cmp(lt, %29, %30) : f32, !cir.bool - %32 = cir.load %2 : cir.ptr , f32 - %33 = cir.load %3 : cir.ptr , f32 - %34 = cir.cmp(ge, %32, %33) : f32, !cir.bool - %35 = cir.load %2 : cir.ptr , f32 - %36 = cir.load %3 : cir.ptr , f32 - %37 = cir.cmp(ne, %35, %36) : f32, !cir.bool - %38 = cir.load %2 : cir.ptr , f32 - %39 = cir.load %3 : cir.ptr , f32 - %40 = cir.cmp(le, %38, %39) : f32, !cir.bool + %23 = cir.load %2 : cir.ptr , !cir.float + %24 = cir.load %3 : cir.ptr , !cir.float + %25 = cir.cmp(gt, %23, %24) : !cir.float, !cir.bool + %26 = cir.load %2 : cir.ptr , !cir.float + %27 = cir.load %3 : cir.ptr , !cir.float + %28 = cir.cmp(eq, %26, %27) : !cir.float, !cir.bool + %29 = cir.load %2 : cir.ptr , !cir.float + %30 = cir.load %3 : cir.ptr , !cir.float + %31 = cir.cmp(lt, %29, %30) : !cir.float, !cir.bool + %32 = cir.load %2 : cir.ptr , !cir.float + %33 = cir.load %3 : cir.ptr , !cir.float + %34 = cir.cmp(ge, %32, %33) : !cir.float, !cir.bool + %35 = cir.load %2 : cir.ptr , !cir.float + %36 = cir.load %3 : cir.ptr , !cir.float + %37 = cir.cmp(ne, %35, %36) : !cir.float, !cir.bool + %38 = cir.load %2 : cir.ptr , !cir.float + %39 = cir.load %3 : cir.ptr , !cir.float + %40 = cir.cmp(le, %38, %39) : !cir.float, !cir.bool cir.return } } diff --git a/clang/test/CIR/Lowering/binop-fp.cir b/clang/test/CIR/Lowering/binop-fp.cir index 33a9c6f2a20b..cb08205231e5 100644 --- a/clang/test/CIR/Lowering/binop-fp.cir +++ b/clang/test/CIR/Lowering/binop-fp.cir @@ -3,44 +3,44 @@ module { cir.func @foo() { - %0 = cir.alloca f32, cir.ptr , ["c"] {alignment = 4 : i64} - %1 = cir.alloca f32, cir.ptr , ["d"] {alignment = 4 : i64} - %2 = cir.alloca f32, cir.ptr , ["y", init] {alignment = 4 : i64} - %3 = cir.alloca f64, cir.ptr , ["e"] {alignment = 8 : i64} - %4 = cir.alloca f64, cir.ptr , ["f"] {alignment = 8 : i64} - %5 = cir.alloca f64, cir.ptr , ["g", init] {alignment = 8 : i64} - %6 = cir.load %0 : cir.ptr , f32 - %7 = cir.load %1 : cir.ptr , f32 - %8 = cir.binop(mul, %6, %7) : f32 - cir.store %8, %2 : f32, cir.ptr - %9 = cir.load %2 : cir.ptr , f32 - %10 = cir.load %1 : cir.ptr , f32 - %11 = cir.binop(div, %9, %10) : f32 - cir.store %11, %2 : f32, cir.ptr - %12 = cir.load %2 : cir.ptr , f32 - %13 = cir.load %1 : cir.ptr , f32 - %14 = cir.binop(add, %12, %13) : f32 - cir.store %14, %2 : f32, cir.ptr - %15 = cir.load %2 : cir.ptr , f32 - %16 = cir.load %1 : cir.ptr , f32 - %17 = cir.binop(sub, %15, %16) : f32 - cir.store %17, %2 : f32, cir.ptr - %18 = cir.load %3 : cir.ptr , f64 - %19 = cir.load %4 : cir.ptr , f64 - %20 = cir.binop(add, %18, %19) : f64 - cir.store %20, %5 : f64, cir.ptr - %21 = cir.load %3 : cir.ptr , f64 - %22 = cir.load %4 : cir.ptr , f64 - %23 = cir.binop(sub, %21, %22) : f64 - cir.store %23, %5 : f64, cir.ptr - %24 = cir.load %3 : cir.ptr , f64 - %25 = cir.load %4 : cir.ptr , f64 - %26 = cir.binop(mul, %24, %25) : f64 - cir.store %26, %5 : f64, cir.ptr - %27 = cir.load %3 : cir.ptr , f64 - %28 = cir.load %4 : cir.ptr , f64 - %29 = cir.binop(div, %27, %28) : f64 - cir.store %29, %5 : f64, cir.ptr + %0 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} + %1 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, cir.ptr , ["y", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.double, cir.ptr , ["e"] {alignment = 8 : i64} + %4 = cir.alloca !cir.double, cir.ptr , ["f"] {alignment = 8 : i64} + %5 = cir.alloca !cir.double, cir.ptr , ["g", init] {alignment = 8 : i64} + %6 = cir.load %0 : cir.ptr , !cir.float + %7 = cir.load %1 : cir.ptr , !cir.float + %8 = cir.binop(mul, %6, %7) : !cir.float + cir.store %8, %2 : !cir.float, cir.ptr + %9 = cir.load %2 : cir.ptr , !cir.float + %10 = cir.load %1 : cir.ptr , !cir.float + %11 = cir.binop(div, %9, %10) : !cir.float + cir.store %11, %2 : !cir.float, cir.ptr + %12 = cir.load %2 : cir.ptr , !cir.float + %13 = cir.load %1 : cir.ptr , !cir.float + %14 = cir.binop(add, %12, %13) : !cir.float + cir.store %14, %2 : !cir.float, cir.ptr + %15 = cir.load %2 : cir.ptr , !cir.float + %16 = cir.load %1 : cir.ptr , !cir.float + %17 = cir.binop(sub, %15, %16) : !cir.float + cir.store %17, %2 : !cir.float, cir.ptr + %18 = cir.load %3 : cir.ptr , !cir.double + %19 = cir.load %4 : cir.ptr , !cir.double + %20 = cir.binop(add, %18, %19) : !cir.double + cir.store %20, %5 : !cir.double, cir.ptr + %21 = cir.load %3 : cir.ptr , !cir.double + %22 = cir.load %4 : cir.ptr , !cir.double + %23 = cir.binop(sub, %21, %22) : !cir.double + cir.store %23, %5 : !cir.double, cir.ptr + %24 = cir.load %3 : cir.ptr , !cir.double + %25 = cir.load %4 : cir.ptr , !cir.double + %26 = cir.binop(mul, %24, %25) : !cir.double + cir.store %26, %5 : !cir.double, cir.ptr + %27 = cir.load %3 : cir.ptr , !cir.double + %28 = cir.load %4 : cir.ptr , !cir.double + %29 = cir.binop(div, %27, %28) : !cir.double + cir.store %29, %5 : !cir.double, cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 16e8ab968fc5..60ad48e4a644 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -10,7 +10,7 @@ !u64i = !cir.int module { - cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i, %arg2: f32, %arg3: f64) -> !s32i { + cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i, %arg2: !cir.float, %arg3: !cir.double) -> !s32i { // CHECK: llvm.func @cStyleCasts %0 = cir.alloca !u32i, cir.ptr , ["x1", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, cir.ptr , ["x2", init] {alignment = 4 : i64} @@ -65,17 +65,17 @@ module { %29 = cir.cast(ptr_to_bool, %23 : !cir.ptr), !cir.bool // Floating point casts. - %25 = cir.cast(int_to_float, %arg1 : !s32i), f32 + %25 = cir.cast(int_to_float, %arg1 : !s32i), !cir.float // CHECK: %{{.+}} = llvm.sitofp %{{.+}} : i32 to f32 - %26 = cir.cast(int_to_float, %arg0 : !u32i), f32 + %26 = cir.cast(int_to_float, %arg0 : !u32i), !cir.float // CHECK: %{{.+}} = llvm.uitofp %{{.+}} : i32 to f32 - %27 = cir.cast(float_to_int, %arg2 : f32), !s32i + %27 = cir.cast(float_to_int, %arg2 : !cir.float), !s32i // CHECK: %{{.+}} = llvm.fptosi %{{.+}} : f32 to i32 - %28 = cir.cast(float_to_int, %arg2 : f32), !u32i + %28 = cir.cast(float_to_int, %arg2 : !cir.float), !u32i // CHECK: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32 %18 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: %{{.+}} = llvm.fptrunc %{{.+}} : f64 to f32 - %34 = cir.cast(floating, %arg3 : f64), f32 + %34 = cir.cast(floating, %arg3 : !cir.double), !cir.float cir.store %18, %2 : !s32i, cir.ptr %19 = cir.load %2 : cir.ptr , !s32i diff --git a/clang/test/CIR/Lowering/class.cir b/clang/test/CIR/Lowering/class.cir index afaacbec1bac..6a390485eec5 100644 --- a/clang/test/CIR/Lowering/class.cir +++ b/clang/test/CIR/Lowering/class.cir @@ -6,7 +6,7 @@ !u32i = !cir.int !ty_22S22 = !cir.struct !ty_22S2A22 = !cir.struct -!ty_22S122 = !cir.struct} #cir.record.decl.ast> +!ty_22S122 = !cir.struct} #cir.record.decl.ast> !ty_22S222 = !cir.struct !ty_22S322 = !cir.struct @@ -39,12 +39,12 @@ module { // CHECK: } // Should lower basic #cir.const_struct initializer. - cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.ptr : !cir.ptr}> : !ty_22S122 + cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_22S122 // CHECK: llvm.mlir.global external @s1() {addr_space = 0 : i32} : !llvm.struct<"class.S1", (i32, f32, ptr)> { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"class.S1", (i32, f32, ptr)> // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 // CHECK: %2 = llvm.insertvalue %1, %0[0] : !llvm.struct<"class.S1", (i32, f32, ptr)> - // CHECK: %3 = llvm.mlir.constant(1.000000e-01 : f32) : f32 + // CHECK: %3 = llvm.mlir.constant(0.099999994 : f32) : f32 // CHECK: %4 = llvm.insertvalue %3, %2[1] : !llvm.struct<"class.S1", (i32, f32, ptr)> // CHECK: %5 = llvm.mlir.zero : !llvm.ptr // CHECK: %6 = llvm.insertvalue %5, %4[2] : !llvm.struct<"class.S1", (i32, f32, ptr)> diff --git a/clang/test/CIR/Lowering/cmp.cir b/clang/test/CIR/Lowering/cmp.cir index 94b4b2cdd8a0..06dd60ff5453 100644 --- a/clang/test/CIR/Lowering/cmp.cir +++ b/clang/test/CIR/Lowering/cmp.cir @@ -6,8 +6,8 @@ module { cir.func @foo() { %0 = cir.alloca !s32i, cir.ptr , ["a"] {alignment = 4 : i64} %1 = cir.alloca !s32i, cir.ptr , ["b"] {alignment = 4 : i64} - %2 = cir.alloca f32, cir.ptr , ["c"] {alignment = 4 : i64} - %3 = cir.alloca f32, cir.ptr , ["d"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} + %3 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} %4 = cir.alloca !cir.bool, cir.ptr , ["e"] {alignment = 1 : i64} %5 = cir.load %0 : cir.ptr , !s32i %6 = cir.load %1 : cir.ptr , !s32i @@ -33,29 +33,29 @@ module { %21 = cir.load %1 : cir.ptr , !s32i %22 = cir.cmp(le, %20, %21) : !s32i, !cir.bool // CHECK: llvm.icmp "sle" - %23 = cir.load %2 : cir.ptr , f32 - %24 = cir.load %3 : cir.ptr , f32 - %25 = cir.cmp(gt, %23, %24) : f32, !cir.bool + %23 = cir.load %2 : cir.ptr , !cir.float + %24 = cir.load %3 : cir.ptr , !cir.float + %25 = cir.cmp(gt, %23, %24) : !cir.float, !cir.bool // CHECK: llvm.fcmp "ogt" - %26 = cir.load %2 : cir.ptr , f32 - %27 = cir.load %3 : cir.ptr , f32 - %28 = cir.cmp(eq, %26, %27) : f32, !cir.bool + %26 = cir.load %2 : cir.ptr , !cir.float + %27 = cir.load %3 : cir.ptr , !cir.float + %28 = cir.cmp(eq, %26, %27) : !cir.float, !cir.bool // CHECK: llvm.fcmp "oeq" - %29 = cir.load %2 : cir.ptr , f32 - %30 = cir.load %3 : cir.ptr , f32 - %31 = cir.cmp(lt, %29, %30) : f32, !cir.bool + %29 = cir.load %2 : cir.ptr , !cir.float + %30 = cir.load %3 : cir.ptr , !cir.float + %31 = cir.cmp(lt, %29, %30) : !cir.float, !cir.bool // CHECK: llvm.fcmp "olt" - %32 = cir.load %2 : cir.ptr , f32 - %33 = cir.load %3 : cir.ptr , f32 - %34 = cir.cmp(ge, %32, %33) : f32, !cir.bool + %32 = cir.load %2 : cir.ptr , !cir.float + %33 = cir.load %3 : cir.ptr , !cir.float + %34 = cir.cmp(ge, %32, %33) : !cir.float, !cir.bool // CHECK: llvm.fcmp "oge" - %35 = cir.load %2 : cir.ptr , f32 - %36 = cir.load %3 : cir.ptr , f32 - %37 = cir.cmp(ne, %35, %36) : f32, !cir.bool + %35 = cir.load %2 : cir.ptr , !cir.float + %36 = cir.load %3 : cir.ptr , !cir.float + %37 = cir.cmp(ne, %35, %36) : !cir.float, !cir.bool // CHECK: llvm.fcmp "une" - %38 = cir.load %2 : cir.ptr , f32 - %39 = cir.load %3 : cir.ptr , f32 - %40 = cir.cmp(le, %38, %39) : f32, !cir.bool + %38 = cir.load %2 : cir.ptr , !cir.float + %39 = cir.load %3 : cir.ptr , !cir.float + %40 = cir.cmp(le, %38, %39) : !cir.float, !cir.bool // CHECK: llvm.fcmp "ole" // Pointer comparisons. diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 5cbcb757ddb6..76ec616bed21 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -11,7 +11,7 @@ module { // CHECK: llvm.mlir.constant(dense<[115, 116, 114, 105, 110, 103, 0]> : tensor<7xi8>) : !llvm.array<7 x i8> %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array) : !cir.array // CHECK: llvm.mlir.constant(dense<[1, 2]> : tensor<2xi32>) : !llvm.array<2 x i32> - %3 = cir.const(#cir.const_array<[1.000000e+00 : f32, 2.000000e+00 : f32]> : !cir.array) : !cir.array + %3 = cir.const(#cir.const_array<[#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float]> : !cir.array) : !cir.array // CHECK: llvm.mlir.constant(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) : !llvm.array<2 x f32> %4 = cir.const(#cir.zero : !cir.array) : !cir.array // CHECK: cir.llvmir.zeroinit : !llvm.array<3 x i32> @@ -21,8 +21,8 @@ module { cir.func @testConvertConstArrayToDenseConst() { %0 = cir.const(#cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> %1 = cir.const(#cir.const_array<[#cir.const_array<[#cir.int<1> : !s64i]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> - %2 = cir.const(#cir.const_array<[#cir.const_array<[1.000000e+00 : f32]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> - %3 = cir.const(#cir.const_array<[#cir.const_array<[1.000000e+00]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> + %2 = cir.const(#cir.const_array<[#cir.const_array<[#cir.fp<1.000000e+00> : !cir.float]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> + %3 = cir.const(#cir.const_array<[#cir.const_array<[#cir.fp<1.000000e+00> : !cir.double]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> %4 = cir.const(#cir.const_array<[#cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i]> : !cir.array]> : !cir.array x 1>, #cir.zero : !cir.array x 1>]> : !cir.array x 1> x 2>) : !cir.array x 1> x 2> cir.return diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 401399f054a8..02fb1c92affb 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -3,17 +3,17 @@ !s32i = !cir.int module { - cir.func @dot(%arg0: !cir.ptr, %arg1: !cir.ptr, %arg2: !s32i) -> f64 { - %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} - %1 = cir.alloca !cir.ptr, cir.ptr >, ["b", init] {alignment = 8 : i64} + cir.func @dot(%arg0: !cir.ptr, %arg1: !cir.ptr, %arg2: !s32i) -> !cir.double { + %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr, cir.ptr >, ["b", init] {alignment = 8 : i64} %2 = cir.alloca !s32i, cir.ptr , ["size", init] {alignment = 4 : i64} - %3 = cir.alloca f64, cir.ptr , ["__retval"] {alignment = 8 : i64} - %4 = cir.alloca f64, cir.ptr , ["q", init] {alignment = 8 : i64} - cir.store %arg0, %0 : !cir.ptr, cir.ptr > - cir.store %arg1, %1 : !cir.ptr, cir.ptr > + %3 = cir.alloca !cir.double, cir.ptr , ["__retval"] {alignment = 8 : i64} + %4 = cir.alloca !cir.double, cir.ptr , ["q", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, cir.ptr > + cir.store %arg1, %1 : !cir.ptr, cir.ptr > cir.store %arg2, %2 : !s32i, cir.ptr - %5 = cir.const(0.000000e+00 : f64) : f64 - cir.store %5, %4 : f64, cir.ptr + %5 = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double + cir.store %5, %4 : !cir.double, cir.ptr cir.scope { %8 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} %9 = cir.const(#cir.int<0> : !s32i) : !s32i @@ -25,18 +25,18 @@ module { %13 = cir.cast(int_to_bool, %12 : !s32i), !cir.bool cir.condition(%13) } body { - %10 = cir.load %0 : cir.ptr >, !cir.ptr + %10 = cir.load %0 : cir.ptr >, !cir.ptr %11 = cir.load %8 : cir.ptr , !s32i - %12 = cir.ptr_stride(%10 : !cir.ptr, %11 : !s32i), !cir.ptr - %13 = cir.load %12 : cir.ptr , f64 - %14 = cir.load %1 : cir.ptr >, !cir.ptr + %12 = cir.ptr_stride(%10 : !cir.ptr, %11 : !s32i), !cir.ptr + %13 = cir.load %12 : cir.ptr , !cir.double + %14 = cir.load %1 : cir.ptr >, !cir.ptr %15 = cir.load %8 : cir.ptr , !s32i - %16 = cir.ptr_stride(%14 : !cir.ptr, %15 : !s32i), !cir.ptr - %17 = cir.load %16 : cir.ptr , f64 - %18 = cir.binop(mul, %13, %17) : f64 - %19 = cir.load %4 : cir.ptr , f64 - %20 = cir.binop(add, %19, %18) : f64 - cir.store %20, %4 : f64, cir.ptr + %16 = cir.ptr_stride(%14 : !cir.ptr, %15 : !s32i), !cir.ptr + %17 = cir.load %16 : cir.ptr , !cir.double + %18 = cir.binop(mul, %13, %17) : !cir.double + %19 = cir.load %4 : cir.ptr , !cir.double + %20 = cir.binop(add, %19, %18) : !cir.double + cir.store %20, %4 : !cir.double, cir.ptr cir.yield } step { %10 = cir.load %8 : cir.ptr , !s32i @@ -45,10 +45,10 @@ module { cir.yield } } - %6 = cir.load %4 : cir.ptr , f64 - cir.store %6, %3 : f64, cir.ptr - %7 = cir.load %3 : cir.ptr , f64 - cir.return %7 : f64 + %6 = cir.load %4 : cir.ptr , !cir.double + cir.store %6, %3 : !cir.double, cir.ptr + %7 = cir.load %3 : cir.ptr , !cir.double + cir.return %7 : !cir.double } } diff --git a/clang/test/CIR/Lowering/float.cir b/clang/test/CIR/Lowering/float.cir new file mode 100644 index 000000000000..ea30674ff7fe --- /dev/null +++ b/clang/test/CIR/Lowering/float.cir @@ -0,0 +1,20 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir + +module { + cir.func @test() { + // %0 = cir.const(1.0 : f16) : f16 + // DISABLED-CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f16) : f16 + %1 = cir.const(#cir.fp<1.0> : !cir.float) : !cir.float + // CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f32) : f32 + %2 = cir.const(#cir.fp<1.0> : !cir.double) : !cir.double + // CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f64) : f64 + // %3 = cir.const(1.0 : f128) : f128 + // DISABLED-CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f128) : f128 + // %4 = cir.const(1.0 : f80) : f80 + // DISABLED-CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f80) : f80 + // %5 = cir.const(1.0 : bf16) : bf16 + // DISABLED-CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : bf16) : bf16 + cir.return + } +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 99bfa76dd3a8..dde8087fada6 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -20,8 +20,8 @@ module { cir.global external @a = #cir.int<3> : !s32i cir.global external @c = #cir.int<2> : !u64i - cir.global external @y = 3.400000e+00 : f32 - cir.global external @w = 4.300000e+00 : f64 + cir.global external @y = #cir.fp<3.400000e+00> : !cir.float + cir.global external @w = #cir.fp<4.300000e+00> : !cir.double cir.global external @x = #cir.int<51> : !s8i cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array cir.global external @alpha = #cir.const_array<[#cir.int<97> : !s8i, #cir.int<98> : !s8i, #cir.int<99> : !s8i, #cir.int<0> : !s8i]> : !cir.array @@ -130,8 +130,8 @@ module { cir.store %14, %4 : !cir.ptr, cir.ptr > cir.return } - cir.global external @flt = #cir.const_array<[1.000000e+00 : f32, 2.000000e+00 : f32]> : !cir.array - cir.global external @zeroInitFlt = #cir.const_array<[0.000000e+00 : f32, 0.000000e+00 : f32]> : !cir.array + cir.global external @flt = #cir.const_array<[#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float]> : !cir.array + cir.global external @zeroInitFlt = #cir.const_array<[#cir.fp<0.000000e+00> : !cir.float, #cir.fp<0.000000e+00> : !cir.float]> : !cir.array // MLIR: llvm.mlir.global external @flt(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) {addr_space = 0 : i32} : !llvm.array<2 x f32> // MLIR: llvm.mlir.global external @zeroInitFlt(dense<0.000000e+00> : tensor<2xf32>) {addr_space = 0 : i32} : !llvm.array<2 x f32> cir.global "private" internal @staticVar = #cir.int<0> : !s32i diff --git a/clang/test/CIR/Lowering/libc.cir b/clang/test/CIR/Lowering/libc.cir index 70a066854d46..5be5d44cd3c6 100644 --- a/clang/test/CIR/Lowering/libc.cir +++ b/clang/test/CIR/Lowering/libc.cir @@ -10,9 +10,9 @@ module { cir.return } - cir.func @shouldLowerLibcFAbsBuiltin(%arg0: f64) -> f64 { - %0 = cir.fabs %arg0 : f64 + cir.func @shouldLowerLibcFAbsBuiltin(%arg0: !cir.double) -> !cir.double { + %0 = cir.fabs %arg0 : !cir.double // CHECK: %0 = llvm.intr.fabs(%arg0) : (f64) -> f64 - cir.return %0 : f64 + cir.return %0 : !cir.double } } diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index 207aa6d47031..642bb1e53f60 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -6,7 +6,7 @@ !u32i = !cir.int !ty_22S22 = !cir.struct !ty_22S2A22 = !cir.struct -!ty_22S122 = !cir.struct} #cir.record.decl.ast> +!ty_22S122 = !cir.struct} #cir.record.decl.ast> !ty_22S222 = !cir.struct !ty_22S322 = !cir.struct @@ -39,12 +39,12 @@ module { // CHECK: } // Should lower basic #cir.const_struct initializer. - cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, 1.000000e-01 : f32, #cir.ptr : !cir.ptr}> : !ty_22S122 + cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_22S122 // CHECK: llvm.mlir.global external @s1() {addr_space = 0 : i32} : !llvm.struct<"struct.S1", (i32, f32, ptr)> { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S1", (i32, f32, ptr)> // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 // CHECK: %2 = llvm.insertvalue %1, %0[0] : !llvm.struct<"struct.S1", (i32, f32, ptr)> - // CHECK: %3 = llvm.mlir.constant(1.000000e-01 : f32) : f32 + // CHECK: %3 = llvm.mlir.constant(0.099999994 : f32) : f32 // CHECK: %4 = llvm.insertvalue %3, %2[1] : !llvm.struct<"struct.S1", (i32, f32, ptr)> // CHECK: %5 = llvm.mlir.zero : !llvm.ptr // CHECK: %6 = llvm.insertvalue %5, %4[2] : !llvm.struct<"struct.S1", (i32, f32, ptr)> diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir index 2b4a001dfc7c..a5ea94324b55 100644 --- a/clang/test/CIR/Lowering/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -27,34 +27,34 @@ module { // LLVM: = add i32 %[[#]], 1 // LLVM: = sub i32 %[[#]], 1 - cir.func @floatingPoint(%arg0: f32, %arg1: f64) { + cir.func @floatingPoint(%arg0: !cir.float, %arg1: !cir.double) { // MLIR: llvm.func @floatingPoint - %0 = cir.alloca f32, cir.ptr , ["f", init] {alignment = 4 : i64} - %1 = cir.alloca f64, cir.ptr , ["d", init] {alignment = 8 : i64} - cir.store %arg0, %0 : f32, cir.ptr - cir.store %arg1, %1 : f64, cir.ptr - - %2 = cir.load %0 : cir.ptr , f32 - %3 = cir.unary(inc, %2) : f32, f32 - cir.store %3, %0 : f32, cir.ptr + %0 = cir.alloca !cir.float, cir.ptr , ["f", init] {alignment = 4 : i64} + %1 = cir.alloca !cir.double, cir.ptr , ["d", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.float, cir.ptr + cir.store %arg1, %1 : !cir.double, cir.ptr + + %2 = cir.load %0 : cir.ptr , !cir.float + %3 = cir.unary(inc, %2) : !cir.float, !cir.float + cir.store %3, %0 : !cir.float, cir.ptr // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(1.000000e+00 : f32) : f32 // MLIR: = llvm.fadd %[[#F_ONE]], %{{[0-9]+}} : f32 - %4 = cir.load %0 : cir.ptr , f32 - %5 = cir.unary(dec, %4) : f32, f32 - cir.store %5, %0 : f32, cir.ptr + %4 = cir.load %0 : cir.ptr , !cir.float + %5 = cir.unary(dec, %4) : !cir.float, !cir.float + cir.store %5, %0 : !cir.float, cir.ptr // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f32) : f32 // MLIR: = llvm.fsub %[[#D_ONE]], %{{[0-9]+}} : f32 - %6 = cir.load %1 : cir.ptr , f64 - %7 = cir.unary(inc, %6) : f64, f64 - cir.store %7, %1 : f64, cir.ptr + %6 = cir.load %1 : cir.ptr , !cir.double + %7 = cir.unary(inc, %6) : !cir.double, !cir.double + cir.store %7, %1 : !cir.double, cir.ptr // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(1.000000e+00 : f64) : f64 // MLIR: = llvm.fadd %[[#D_ONE]], %{{[0-9]+}} : f64 - %8 = cir.load %1 : cir.ptr , f64 - %9 = cir.unary(dec, %8) : f64, f64 - cir.store %9, %1 : f64, cir.ptr + %8 = cir.load %1 : cir.ptr , !cir.double + %9 = cir.unary(dec, %8) : !cir.double, !cir.double + cir.store %9, %1 : !cir.double, cir.ptr // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f64) : f64 // MLIR: = llvm.fsub %[[#D_ONE]], %{{[0-9]+}} : f64 diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index 58f3357c9df2..21b12755ae02 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -21,22 +21,22 @@ module { // LLVM: = xor i32 -1, %[[#]] - cir.func @floatingPoint(%arg0: f32, %arg1: f64) { + cir.func @floatingPoint(%arg0: !cir.float, %arg1: !cir.double) { // MLIR: llvm.func @floatingPoint - %0 = cir.alloca f32, cir.ptr , ["f", init] {alignment = 4 : i64} - %1 = cir.alloca f64, cir.ptr , ["d", init] {alignment = 8 : i64} - cir.store %arg0, %0 : f32, cir.ptr - cir.store %arg1, %1 : f64, cir.ptr - %2 = cir.load %0 : cir.ptr , f32 - %3 = cir.cast(float_to_bool, %2 : f32), !cir.bool + %0 = cir.alloca !cir.float, cir.ptr , ["f", init] {alignment = 4 : i64} + %1 = cir.alloca !cir.double, cir.ptr , ["d", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.float, cir.ptr + cir.store %arg1, %1 : !cir.double, cir.ptr + %2 = cir.load %0 : cir.ptr , !cir.float + %3 = cir.cast(float_to_bool, %2 : !cir.float), !cir.bool // MLIR: %[[#F_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 // MLIR: %[[#F_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#F_ZERO]] : f32 // MLIR: %[[#F_ZEXT:]] = llvm.zext %[[#F_BOOL]] : i1 to i8 %4 = cir.unary(not, %3) : !cir.bool, !cir.bool // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(1 : i8) : i8 // MLIR: = llvm.xor %[[#F_ZEXT]], %[[#F_ONE]] : i8 - %5 = cir.load %1 : cir.ptr , f64 - %6 = cir.cast(float_to_bool, %5 : f64), !cir.bool + %5 = cir.load %1 : cir.ptr , !cir.double + %6 = cir.cast(float_to_bool, %5 : !cir.double), !cir.bool // MLIR: %[[#D_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 // MLIR: %[[#D_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#D_ZERO]] : f64 // MLIR: %[[#D_ZEXT:]] = llvm.zext %[[#D_BOOL]] : i1 to i8 @@ -46,12 +46,12 @@ module { cir.return } - cir.func @CStyleValueNegation(%arg0: !s32i, %arg1: f32) { + cir.func @CStyleValueNegation(%arg0: !s32i, %arg1: !cir.float) { // MLIR: llvm.func @CStyleValueNegation %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} - %3 = cir.alloca f32, cir.ptr , ["f", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.float, cir.ptr , ["f", init] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, cir.ptr - cir.store %arg1, %3 : f32, cir.ptr + cir.store %arg1, %3 : !cir.float, cir.ptr %5 = cir.load %0 : cir.ptr , !s32i %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool @@ -65,8 +65,8 @@ module { // MLIR: %[[#IXOR:]] = llvm.xor %[[#IEXT]], %[[#IONE]] : i8 // MLIR: = llvm.zext %[[#IXOR]] : i8 to i32 - %17 = cir.load %3 : cir.ptr , f32 - %18 = cir.cast(float_to_bool, %17 : f32), !cir.bool + %17 = cir.load %3 : cir.ptr , !cir.float + %18 = cir.cast(float_to_bool, %17 : !cir.float), !cir.bool %19 = cir.unary(not, %18) : !cir.bool, !cir.bool %20 = cir.cast(bool_to_int, %19 : !cir.bool), !s32i // MLIR: %[[#FLOAT:]] = llvm.load %{{.+}} : !llvm.ptr diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index a4e254939912..dbf71c2833bd 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -26,16 +26,16 @@ module { // MLIR: %[[ZERO:[a-z0-9_]+]] = llvm.mlir.constant(0 : i32) // MLIR: llvm.sub %[[ZERO]], %[[#INPUT_MINUS]] - cir.func @floatingPoints(%arg0: f64) { + cir.func @floatingPoints(%arg0: !cir.double) { // MLIR: llvm.func @floatingPoints(%arg0: f64) - %0 = cir.alloca f64, cir.ptr , ["X", init] {alignment = 8 : i64} - cir.store %arg0, %0 : f64, cir.ptr - %1 = cir.load %0 : cir.ptr , f64 - %2 = cir.unary(plus, %1) : f64, f64 + %0 = cir.alloca !cir.double, cir.ptr , ["X", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.double, cir.ptr + %1 = cir.load %0 : cir.ptr , !cir.double + %2 = cir.unary(plus, %1) : !cir.double, !cir.double // MLIR: llvm.store %arg0, %[[#F_PLUS:]] : f64, !llvm.ptr // MLIR: %{{[0-9]}} = llvm.load %[[#F_PLUS]] : !llvm.ptr -> f64 - %3 = cir.load %0 : cir.ptr , f64 - %4 = cir.unary(minus, %3) : f64, f64 + %3 = cir.load %0 : cir.ptr , !cir.double + %4 = cir.unary(minus, %3) : !cir.double, !cir.double // MLIR: %[[#F_MINUS:]] = llvm.load %{{[0-9]}} : !llvm.ptr -> f64 // MLIR: %{{[0-9]}} = llvm.fneg %[[#F_MINUS]] : f64 cir.return From 8b6220a93a67217c4cc189da19f6772cd1458c3c Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 22 Feb 2024 22:17:04 +0300 Subject: [PATCH 1423/2301] [CIR][Codegen] Fix bitfields unary and binary ops (#477) This PR fixes a couple of NIY features for bit fields. Basically, such expressions with bit fields `x->a++` and `x->a |= 42` are supported now. The main problem is `UnOp` verification - now it can be defined both by `loadOp` and by `GetBitfieldOp` or even by `CastOp`. So shame on me, I removed a test from `invalid.cir` --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 +-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 17 +------------ .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 4 +-- clang/test/CIR/CodeGen/bitfield-ops.c | 21 +++++++++++++++- clang/test/CIR/IR/invalid.cir | 25 ------------------- 6 files changed, 25 insertions(+), 50 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 0d5d5e9e4ff8..ae77df0bbe4b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -833,10 +833,6 @@ def UnaryOp : CIR_Op<"unary", [Pure, SameOperandsAndResultType]> { `cir.unary` performs the unary operation according to the specified opcode kind: [inc, dec, plus, minus, not]. - Note for inc and dec: the operation corresponds only to the - addition/subtraction, its input is expect to come from a load - and the result to be used by a corresponding store. - It requires one input operand and has one result, both types should be the same. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index bcca17b17e31..9e4e16e8bcea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -463,7 +463,7 @@ class ScalarExprEmitter : public StmtVisitor { // Store the updated result through the lvalue if (LV.isBitField()) - llvm_unreachable("no bitfield inc/dec yet"); + CGF.buildStoreThroughBitfieldLValue(RValue::get(value), LV, value); else CGF.buildStoreThroughLValue(RValue::get(value), LV); @@ -1836,7 +1836,7 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHSLV.isBitField()) - assert(0 && "not yet implemented"); + CGF.buildStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, Result); else CGF.buildStoreThroughLValue(RValue::get(Result), LHSLV); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 0efaa67df396..22c29360d783 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2186,22 +2186,7 @@ void TryCallOp::print(::mlir::OpAsmPrinter &state) { LogicalResult UnaryOp::verify() { switch (getKind()) { case cir::UnaryOpKind::Inc: - LLVM_FALLTHROUGH; - case cir::UnaryOpKind::Dec: { - // TODO: Consider looking at the memory interface instead of - // LoadOp/StoreOp. - auto loadOp = getInput().getDefiningOp(); - if (!loadOp) - return emitOpError() << "requires input to be defined by a memory load"; - - for (const auto user : getResult().getUsers()) { - auto storeOp = dyn_cast(user); - if (storeOp && storeOp.getAddr() == loadOp.getAddr()) - return success(); - } - return emitOpError() << "requires result to be used by a memory store " - "to the same address as the input memory load"; - } + case cir::UnaryOpKind::Dec: case cir::UnaryOpKind::Plus: case cir::UnaryOpKind::Minus: case cir::UnaryOpKind::Not: diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index d413307ce7ba..b46bc8252cb0 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -640,10 +640,10 @@ static mlir::TypeConverter prepareTypeConverter() { return mlir::IntegerType::get(type.getContext(), 8); }); converter.addConversion([&](mlir::cir::SingleType type) -> mlir::Type { - return mlir::FloatType::getF32(type.getContext()); + return mlir::Float32Type::get(type.getContext()); }); converter.addConversion([&](mlir::cir::DoubleType type) -> mlir::Type { - return mlir::FloatType::getF64(type.getContext()); + return mlir::Float64Type::get(type.getContext()); }); converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { auto elementType = converter.convertType(type.getEltType()); diff --git a/clang/test/CIR/CodeGen/bitfield-ops.c b/clang/test/CIR/CodeGen/bitfield-ops.c index 5957054f6c6d..837d2ba03d1d 100644 --- a/clang/test/CIR/CodeGen/bitfield-ops.c +++ b/clang/test/CIR/CodeGen/bitfield-ops.c @@ -30,4 +30,23 @@ void store_field() { // CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr) -> !s32i int load_field(S* s) { return s->d; -} \ No newline at end of file +} + +// CHECK: cir.func {{.*@unOp}} +// CHECK: [[TMP0:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP1:%.*]] = cir.get_bitfield(#bfi_d, [[TMP0]] : !cir.ptr) -> !s32i +// CHECK: [[TMP2:%.*]] = cir.unary(inc, [[TMP1]]) : !s32i, !s32i +// CHECK: [[TMP3:%.*]] = cir.set_bitfield(#bfi_d, [[TMP0]] : !cir.ptr, [[TMP2]] : !s32i) -> !s32i +void unOp(S* s) { + s->d++; +} + +// CHECK: cir.func {{.*@binOp}} +// CHECK: [[TMP0:%.*]] = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK: [[TMP1:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_bitfield(#bfi_d, [[TMP1]] : !cir.ptr) -> !s32i +// CHECK: [[TMP3:%.*]] = cir.binop(or, [[TMP2]], [[TMP0]]) : !s32i +// CHECK: [[TMP4:%.*]] = cir.set_bitfield(#bfi_d, [[TMP1]] : !cir.ptr, [[TMP3]] : !s32i) -> !s32i +void binOp(S* s) { + s->d |= 42; +} diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index f9dce3a1ed05..3b5e5f83a14e 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -351,31 +351,6 @@ module { // ----- -!u32i = !cir.int -cir.func @unary0() { - %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<2> : !u32i) : !u32i - - %3 = cir.unary(inc, %1) : !u32i, !u32i // expected-error {{'cir.unary' op requires input to be defined by a memory load}} - cir.store %3, %0 : !u32i, cir.ptr - cir.return -} - -// ----- - -!u32i = !cir.int -cir.func @unary1() { - %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<2> : !u32i) : !u32i - cir.store %1, %0 : !u32i, cir.ptr - - %2 = cir.load %0 : cir.ptr , !u32i - %3 = cir.unary(dec, %2) : !u32i, !u32i // // expected-error {{'cir.unary' op requires result to be used by a memory store to the same address as the input memory load}} - cir.return -} - -// ----- - !u32i = !cir.int module { cir.global external @v = #cir.zero : !u32i // expected-error {{zero expects struct or array type}} From 81912073e9426c0b9436a6e2eae326b58a04c87c Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sun, 26 Jan 2025 16:26:26 -0800 Subject: [PATCH 1424/2301] fixup! [CIR] introduce CIR floating-point types (#385) --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6f3d8ba805c6..dad14fbf3364 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2274,7 +2274,7 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, return mlir::IntegerType::get(type.getContext(), type.getWidth()); }); converter.addConversion([&](mlir::cir::SingleType type) -> mlir::Type { - return mlir::Float64Type::get(type.getContext()); + return mlir::Float32Type::get(type.getContext()); }); converter.addConversion([&](mlir::cir::DoubleType type) -> mlir::Type { return mlir::Float64Type::get(type.getContext()); From aa6cd8e844bcd711a1110298f3c8fe44ac6d194b Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Thu, 22 Feb 2024 22:27:46 +0300 Subject: [PATCH 1425/2301] [CIR][CIRGen] Fix calling a function through a function pointer (#467) CIR codegen always casts the no-proto function pointer to `FuncOp`. But the function pointer may be result of cir operations (f.e. `cir.load`). As a result in such cases the function pointer sets to `nullptr`. That leads to compilation error. So this PR removes the unecessary cast to 'FuncOp' and resolves the issue. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 21 +++++++++++++++++++-- clang/test/CIR/CodeGen/agg-copy.c | 12 ++++++------ clang/test/CIR/CodeGen/no-proto-fun-ptr.c | 11 +++++++++++ clang/test/CIR/CodeGen/no-prototype.c | 16 ++++++++++++---- 4 files changed, 48 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 46a91b1aadab..702f1fa2d709 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1146,9 +1146,26 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, if (isa(FnType) || Chain) { assert(!UnimplementedFeature::chainCalls()); assert(!UnimplementedFeature::addressSpace()); + auto CalleeTy = getTypes().GetFunctionType(FnInfo); + // get non-variadic function type + CalleeTy = mlir::cir::FuncType::get(CalleeTy.getInputs(), + CalleeTy.getReturnType(), false); + auto CalleePtrTy = + mlir::cir::PointerType::get(builder.getContext(), CalleeTy); + + auto *Fn = Callee.getFunctionPointer(); + mlir::Value Addr; + if (auto funcOp = llvm::dyn_cast(Fn)) { + Addr = builder.create( + getLoc(E->getSourceRange()), + mlir::cir::PointerType::get(builder.getContext(), + funcOp.getFunctionType()), + funcOp.getSymName()); + } else { + Addr = Fn->getResult(0); + } - // Set no-proto function as callee. - auto Fn = llvm::dyn_cast(Callee.getFunctionPointer()); + Fn = builder.createBitcast(Addr, CalleePtrTy).getDefiningOp(); Callee.setFunctionPointer(Fn); } diff --git a/clang/test/CIR/CodeGen/agg-copy.c b/clang/test/CIR/CodeGen/agg-copy.c index aa7a158e7464..0132bdb1132f 100644 --- a/clang/test/CIR/CodeGen/agg-copy.c +++ b/clang/test/CIR/CodeGen/agg-copy.c @@ -64,9 +64,9 @@ void foo4(A* a1) { A create() { A a; return a; } // CHECK: cir.func {{.*@foo5}} -// CHECK: [[TMP0]] = cir.alloca !ty_22A22, cir.ptr , -// CHECK: [[TMP1]] = cir.alloca !ty_22A22, cir.ptr , ["tmp"] {alignment = 4 : i64} -// CHECK: [[TMP2]] = cir.call @create() : () -> !ty_22A22 +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22A22, cir.ptr , +// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["tmp"] {alignment = 4 : i64} +// CHECK: [[TMP2:%.*]] = cir.call @create() : () -> !ty_22A22 // CHECK: cir.store [[TMP2]], [[TMP1]] : !ty_22A22, cir.ptr // CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr void foo5() { @@ -77,9 +77,9 @@ void foo5() { void foo6(A* a1) { A a2 = (*a1); // CHECK: cir.func {{.*@foo6}} -// CHECK: [[TMP0]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] {alignment = 8 : i64} -// CHECK: [[TMP1]] = cir.alloca !ty_22A22, cir.ptr , ["a2", init] {alignment = 4 : i64} +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] {alignment = 8 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["a2", init] {alignment = 4 : i64} // CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > -// CHECK: [[TMP2]] = cir.load deref [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.load deref [[TMP0]] : cir.ptr >, !cir.ptr // CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/no-proto-fun-ptr.c b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c index a6fcfa3f75d3..e396a606a73d 100644 --- a/clang/test/CIR/CodeGen/no-proto-fun-ptr.c +++ b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c @@ -15,3 +15,14 @@ void check_noproto_ptr() { void empty(void) {} +void buz() { + void (*func)(); + (*func)(); +} + +// CHECK: cir.func no_proto @buz() +// CHECK: [[FNPTR_ALLOC:%.*]] = cir.alloca !cir.ptr>, cir.ptr >>, ["func"] {alignment = 8 : i64} +// CHECK: [[FNPTR:%.*]] = cir.load deref [[FNPTR_ALLOC]] : cir.ptr >>, !cir.ptr> +// CHECK: [[CAST:%.*]] = cir.cast(bitcast, %1 : !cir.ptr>), !cir.ptr> +// CHECK: cir.call [[CAST]]() : (!cir.ptr>) -> () +// CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/no-prototype.c b/clang/test/CIR/CodeGen/no-prototype.c index d5ed15018454..4028d8e2ec32 100644 --- a/clang/test/CIR/CodeGen/no-prototype.c +++ b/clang/test/CIR/CodeGen/no-prototype.c @@ -35,7 +35,9 @@ int test1(int x) { int noProto2(); int test2(int x) { return noProto2(x); - // CHECK: %{{.+}} = cir.call @noProto2(%{{[0-9]+}}) : (!s32i) -> !s32i + // CHECK: [[GGO:%.*]] = cir.get_global @noProto2 : cir.ptr > + // CHECK: [[CAST:%.*]] = cir.cast(bitcast, %3 : !cir.ptr>), !cir.ptr> + // CHECK: {{.*}} = cir.call [[CAST]](%{{[0-9]+}}) : (!cir.ptr>, !s32i) -> !s32i } int noProto2(int x) { return x; } // CHECK: cir.func no_proto @noProto2(%arg0: !s32i {{.+}}) -> !s32i @@ -49,7 +51,9 @@ int noProto3(); int test3(int x) { // CHECK: cir.func @test3 return noProto3(x); - // CHECK: %{{.+}} = cir.call @noProto3(%{{[0-9]+}}) : (!s32i) -> !s32i + // CHECK: [[GGO:%.*]] = cir.get_global @noProto3 : cir.ptr > + // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> + // CHECK: {{%.*}} = cir.call [[CAST]](%{{[0-9]+}}) : (!cir.ptr>, !s32i) -> !s32i } @@ -64,14 +68,18 @@ int noProto4() { return 0; } // cir.func private no_proto @noProto4() -> !s32i int test4(int x) { return noProto4(x); // Even if we know the definition, this should compile. - // CHECK: %{{.+}} = cir.call @noProto4(%{{.+}}) : (!s32i) -> !s32i + // CHECK: [[GGO:%.*]] = cir.get_global @noProto4 : cir.ptr > + // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> + // CHECK: {{%.*}} = cir.call [[CAST]]({{%.*}}) : (!cir.ptr>, !s32i) -> !s32i } // No-proto definition followed by an incorrect call due to lack of args. int noProto5(); int test5(int x) { return noProto5(); - // CHECK: %{{.+}} = cir.call @noProto5() : () -> !s32i + // CHECK: [[GGO:%.*]] = cir.get_global @noProto5 : cir.ptr > + // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> + // CHECK: {{%.*}} = cir.call [[CAST]]() : (!cir.ptr>) -> !s32i } int noProto5(int x) { return x; } // CHECK: cir.func no_proto @noProto5(%arg0: !s32i {{.+}}) -> !s32i From 50797715584bf1459e3487e3353717809e0634e8 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 23 Feb 2024 03:30:22 +0800 Subject: [PATCH 1426/2301] [CIR] initial support for pointer-to-data-member type (#401) This patch adds initial support for the pointer-to-data-member type. Specifically, this commit includes: - New ops, types, and attributes: - CodeGen for pointer-to-data-member types and values - Lower C++ pointer-to-member type - Lower C++ expression `&C::D` - Lower C++ expression `c.*p` and `c->*p` This patch only includes an initial support. The following stuff related to pointer-to-member types are not supported yet: - Pointer to member function; - Conversion from `T Base::*` to `T Derived::*`; - LLVMIR lowering. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 34 +++++++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 57 +++++++++++++++ clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 14 ++-- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 36 +++++++++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 29 ++++++++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 58 +++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 26 ++++++- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 36 +++++++++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 27 +++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 7 ++ clang/lib/CIR/CodeGen/CIRGenModule.h | 8 +++ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 9 ++- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 37 ++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 28 ++++++++ clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 21 ++++++ .../CIR/CodeGen/pointer-to-data-member.cpp | 62 ++++++++++++++++ clang/test/CIR/IR/data-member-ptr.cir | 46 ++++++++++++ clang/test/CIR/IR/invalid.cir | 72 ++++++++++++++++++- 18 files changed, 589 insertions(+), 18 deletions(-) create mode 100644 clang/test/CIR/CodeGen/pointer-to-data-member.cpp create mode 100644 clang/test/CIR/IR/data-member-ptr.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 4949a03908dc..7ca5f1060f3f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -264,6 +264,40 @@ def ConstPtrAttr : CIR_Attr<"ConstPtr", "ptr", [TypedAttrInterface]> { let hasCustomAssemblyFormat = 1; } +//===----------------------------------------------------------------------===// +// DataMemberAttr +//===----------------------------------------------------------------------===// + +def DataMemberAttr : CIR_Attr<"DataMember", "data_member", + [TypedAttrInterface]> { + let summary = "Holds a constant data member pointer value"; + let parameters = (ins AttributeSelfTypeParameter< + "", "mlir::cir::DataMemberType">:$type, + OptionalParameter< + "std::optional">:$memberIndex); + let description = [{ + A data member attribute is a literal attribute that represents a constant + pointer-to-data-member value. + + The `memberIndex` parameter represents the index of the pointed-to member + within its containing struct. It is an optional parameter; lack of this + parameter indicates a null pointer-to-data-member value. + + Example: + ``` + #ptr = #cir.data_member<1> : !cir.data_member + + #null = #cir.data_member : !cir.data_member + ``` + }]; + + let genVerifyDecl = 1; + + let assemblyFormat = [{ + `<` ($memberIndex^):(`null`)? `>` + }]; +} + //===----------------------------------------------------------------------===// // SignedOverflowBehaviorAttr //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index ae77df0bbe4b..36623b1a9171 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1770,6 +1770,63 @@ def GetMemberOp : CIR_Op<"get_member"> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// GetRuntimeMemberOp +//===----------------------------------------------------------------------===// + +def GetRuntimeMemberOp : CIR_Op<"get_runtime_member"> { + let summary = "Get the address of a member of a struct"; + let description = [{ + The `cir.get_runtime_member` operation gets the address of a member from + the input record. The target member is given by a value of type + `!cir.data_member` (i.e. a pointer-to-data-member value). + + This operation differs from `cir.get_member` in when the target member can + be determined. For the `cir.get_member` operation, the target member is + specified as a constant index so the member it returns access to is known + when the operation is constructed. For the `cir.get_runtime_member` + operation, the target member is given through a pointer-to-data-member + value which is unknown until the program being compiled is executed. In + other words, `cir.get_member` represents a normal member access through the + `.` operator in C/C++: + + ```cpp + struct Foo { int x; }; + Foo f; + (void)f.x; // cir.get_member + ``` + + And `cir.get_runtime_member` represents a member access through the `.*` or + the `->*` operator in C++: + + ```cpp + struct Foo { int x; } + Foo f; + Foo *p; + int Foo::*member; + + (void)f.*member; // cir.get_runtime_member + (void)f->*member; // cir.get_runtime_member + ``` + + This operation expects a pointer to the base record as well as the pointer + to the target member. + }]; + + let arguments = (ins + Arg:$addr, + Arg:$member); + + let results = (outs Res:$result); + + let assemblyFormat = [{ + $addr `[` $member `:` qualified(type($member)) `]` attr-dict + `:` qualified(type($addr)) `->` qualified(type($result)) + }]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // VecInsertOp //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 512b9db01dfe..651790031dba 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -20,13 +20,6 @@ #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" -//===----------------------------------------------------------------------===// -// CIR Dialect Tablegen'd Types -//===----------------------------------------------------------------------===// - -#define GET_TYPEDEF_CLASSES -#include "clang/CIR/Dialect/IR/CIROpsTypes.h.inc" - //===----------------------------------------------------------------------===// // CIR StructType // @@ -184,4 +177,11 @@ class StructType } // namespace cir } // namespace mlir +//===----------------------------------------------------------------------===// +// CIR Dialect Tablegen'd Types +//===----------------------------------------------------------------------===// + +#define GET_TYPEDEF_CLASSES +#include "clang/CIR/Dialect/IR/CIROpsTypes.h.inc" + #endif // MLIR_DIALECT_CIR_IR_CIRTYPES_H_ diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 442ce90cc54a..1501cd1122ea 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -145,6 +145,28 @@ def CIR_PointerType : CIR_Type<"Pointer", "ptr", let hasCustomAssemblyFormat = 1; } +//===----------------------------------------------------------------------===// +// DataMemberType +//===----------------------------------------------------------------------===// + +def CIR_DataMemberType : CIR_Type<"DataMember", "data_member", + [DeclareTypeInterfaceMethods]> { + + let summary = "CIR type that represents pointer-to-data-member type in C++"; + let description = [{ + `cir.member_ptr` models the pointer-to-data-member type in C++. Values of + this type are essentially offsets of the pointed-to member within one of + its containing struct. + }]; + + let parameters = (ins "mlir::Type":$memberTy, + "mlir::cir::StructType":$clsTy); + + let assemblyFormat = [{ + `<` $memberTy `in` $clsTy `>` + }]; +} + //===----------------------------------------------------------------------===// // BoolType // @@ -309,6 +331,15 @@ def VoidPtr : Type< "mlir::cir::VoidType::get($_builder.getContext()))"> { } +// Pointer to struct +def StructPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::StructType>()">, + ]>, "!cir.struct*"> { +} + // Pointers to exception info def ExceptionInfoPtr : Type< And<[ @@ -351,8 +382,9 @@ def CIR_StructType : Type()">, //===----------------------------------------------------------------------===// def CIR_AnyType : AnyTypeOf<[ - CIR_IntType, CIR_PointerType, CIR_BoolType, CIR_ArrayType, CIR_VectorType, - CIR_FuncType, CIR_VoidType, CIR_StructType, CIR_ExceptionInfo, CIR_AnyFloat, + CIR_IntType, CIR_PointerType, CIR_DataMemberType, CIR_BoolType, CIR_ArrayType, + CIR_VectorType, CIR_FuncType, CIR_VoidType, CIR_StructType, CIR_ExceptionInfo, + CIR_AnyFloat, ]>; #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index b6d3bd56f60c..3ecb52c87d1e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -221,6 +221,16 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::TypeInfoAttr::get(anonStruct.getType(), fieldsAttr); } + mlir::cir::DataMemberAttr getDataMemberAttr(mlir::cir::DataMemberType ty, + size_t memberIndex) { + return mlir::cir::DataMemberAttr::get(getContext(), ty, memberIndex); + } + + mlir::cir::DataMemberAttr + getNullDataMemberAttr(mlir::cir::DataMemberType ty) { + return mlir::cir::DataMemberAttr::get(getContext(), ty, std::nullopt); + } + mlir::TypedAttr getZeroInitAttr(mlir::Type ty) { if (ty.isa()) return mlir::cir::IntAttr::get(ty, 0); @@ -551,6 +561,12 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, ty, getConstPtrAttr(ty, 0)); } + /// Create constant nullptr for pointer-to-data-member type ty. + mlir::cir::ConstantOp getNullDataMemberPtr(mlir::cir::DataMemberType ty, + mlir::Location loc) { + return create(loc, ty, getNullDataMemberAttr(ty)); + } + // Creates constant null value for integral type ty. mlir::cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc) { return create(loc, ty, getZeroInitAttr(ty)); @@ -866,6 +882,19 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } } + mlir::cir::GetRuntimeMemberOp createGetIndirectMember(mlir::Location loc, + mlir::Value objectPtr, + mlir::Value memberPtr) { + auto memberPtrTy = memberPtr.getType().cast(); + + // TODO(cir): consider address space. + assert(!UnimplementedFeature::addressSpace()); + auto resultTy = getPointerTo(memberPtrTy.getMemberTy()); + + return create(loc, resultTy, objectPtr, + memberPtr); + } + mlir::Value createPtrIsNull(mlir::Value ptr) { return createNot(createPtrToBoolCast(ptr)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index e8175cee087c..012bba17424f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1475,3 +1475,61 @@ mlir::Value CIRGenFunction::getVTablePtr(mlir::Location Loc, Address This, return VTable; } + +Address CIRGenFunction::buildCXXMemberDataPointerAddress( + const Expr *E, Address base, mlir::Value memberPtr, + const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo) { + assert(!UnimplementedFeature::cxxABI()); + + auto op = builder.createGetIndirectMember(getLoc(E->getSourceRange()), + base.getPointer(), memberPtr); + + QualType memberType = memberPtrType->getPointeeType(); + CharUnits memberAlign = CGM.getNaturalTypeAlignment(memberType, baseInfo); + memberAlign = CGM.getDynamicOffsetAlignment( + base.getAlignment(), memberPtrType->getClass()->getAsCXXRecordDecl(), + memberAlign); + + return Address(op, convertTypeForMem(memberPtrType->getPointeeType()), + memberAlign); +} + +clang::CharUnits +CIRGenModule::getDynamicOffsetAlignment(clang::CharUnits actualBaseAlign, + const clang::CXXRecordDecl *baseDecl, + clang::CharUnits expectedTargetAlign) { + // If the base is an incomplete type (which is, alas, possible with + // member pointers), be pessimistic. + if (!baseDecl->isCompleteDefinition()) + return std::min(actualBaseAlign, expectedTargetAlign); + + auto &baseLayout = getASTContext().getASTRecordLayout(baseDecl); + CharUnits expectedBaseAlign = baseLayout.getNonVirtualAlignment(); + + // If the class is properly aligned, assume the target offset is, too. + // + // This actually isn't necessarily the right thing to do --- if the + // class is a complete object, but it's only properly aligned for a + // base subobject, then the alignments of things relative to it are + // probably off as well. (Note that this requires the alignment of + // the target to be greater than the NV alignment of the derived + // class.) + // + // However, our approach to this kind of under-alignment can only + // ever be best effort; after all, we're never going to propagate + // alignments through variables or parameters. Note, in particular, + // that constructing a polymorphic type in an address that's less + // than pointer-aligned will generally trap in the constructor, + // unless we someday add some sort of attribute to change the + // assumed alignment of 'this'. So our goal here is pretty much + // just to allow the user to explicitly say that a pointer is + // under-aligned and then safely access its fields and vtables. + if (actualBaseAlign >= expectedBaseAlign) { + return expectedTargetAlign; + } + + // Otherwise, we might be offset by an arbitrary multiple of the + // actual alignment. The correct adjustment is to take the min of + // the two alignments. + return std::min(actualBaseAlign, expectedTargetAlign); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 702f1fa2d709..bcaa2655994d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -892,6 +892,30 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { llvm_unreachable("Unhandled DeclRefExpr"); } +LValue +CIRGenFunction::buildPointerToDataMemberBinaryExpr(const BinaryOperator *E) { + assert((E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) && + "unexpected binary operator opcode"); + + auto baseAddr = Address::invalid(); + if (E->getOpcode() == BO_PtrMemD) + baseAddr = buildLValue(E->getLHS()).getAddress(); + else + baseAddr = buildPointerWithAlignment(E->getLHS()); + + const auto *memberPtrTy = E->getRHS()->getType()->castAs(); + + auto memberPtr = buildScalarExpr(E->getRHS()); + + LValueBaseInfo baseInfo; + // TODO(cir): add TBAA + assert(!UnimplementedFeature::tbaa()); + auto memberAddr = buildCXXMemberDataPointerAddress(E, baseAddr, memberPtr, + memberPtrTy, &baseInfo); + + return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo); +} + LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { // Comma expressions just emit their LHS then their RHS as an l-value. if (E->getOpcode() == BO_Comma) { @@ -900,7 +924,7 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { } if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) - assert(0 && "not implemented"); + return buildPointerToDataMemberBinaryExpr(E); assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 59551e6d4bdc..8a5f61c60efd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1763,6 +1763,21 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, return buildArrayConstant(CGM, Desired, CommonElementType, NumElements, Elts, typedFiller); } + case APValue::MemberPointer: { + assert(!UnimplementedFeature::cxxABI()); + + const ValueDecl *memberDecl = Value.getMemberPointerDecl(); + assert(!Value.isMemberPointerToDerivedMember() && "NYI"); + + if (const auto *memberFuncDecl = dyn_cast(memberDecl)) + assert(0 && "not implemented"); + + auto cirTy = + CGM.getTypes().ConvertType(DestType).cast(); + + const auto *fieldDecl = cast(memberDecl); + return builder.getDataMemberAttr(cirTy, fieldDecl->getFieldIndex()); + } case APValue::LValue: return ConstantLValueEmitter(*this, Value, DestType).tryEmit(); case APValue::Struct: @@ -1773,7 +1788,6 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, case APValue::ComplexFloat: case APValue::Vector: case APValue::AddrLabelDiff: - case APValue::MemberPointer: assert(0 && "not implemented"); } llvm_unreachable("Unknown APValue kind"); @@ -1802,6 +1816,26 @@ mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { return {}; } +mlir::Value CIRGenModule::buildMemberPointerConstant(const UnaryOperator *E) { + assert(!UnimplementedFeature::cxxABI()); + + auto loc = getLoc(E->getSourceRange()); + + const auto *decl = cast(E->getSubExpr())->getDecl(); + + // A member function pointer. + // Member function pointer is not supported yet. + if (const auto *methodDecl = dyn_cast(decl)) + assert(0 && "not implemented"); + + auto ty = getCIRType(E->getType()).cast(); + + // Otherwise, a member data pointer. + const auto *fieldDecl = cast(decl); + return builder.create( + loc, ty, builder.getDataMemberAttr(ty, fieldDecl->getFieldIndex())); +} + mlir::Attribute ConstantEmitter::emitAbstract(const Expr *E, QualType destType) { auto state = pushAbstract(); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 9e4e16e8bcea..95844e1bb686 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -498,7 +498,9 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { - assert(!llvm::isa(E->getType()) && "not implemented"); + if (llvm::isa(E->getType())) + return CGF.CGM.buildMemberPointerConstant(E); + return CGF.buildLValue(E->getSubExpr()).getPointer(); } @@ -653,8 +655,13 @@ class ScalarExprEmitter : public StmtVisitor { return Visit(E->getRHS()); } - mlir::Value VisitBinPtrMemD(const Expr *E) { llvm_unreachable("NYI"); } - mlir::Value VisitBinPtrMemI(const Expr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitBinPtrMemD(const BinaryOperator *E) { + return buildLoadOfLValue(E); + } + + mlir::Value VisitBinPtrMemI(const BinaryOperator *E) { + return buildLoadOfLValue(E); + } mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { llvm_unreachable("NYI"); @@ -1447,8 +1454,18 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { mlir::cir::ConstPtrAttr::get(Builder.getContext(), Ty, 0)); } - case CK_NullToMemberPointer: - llvm_unreachable("NYI"); + case CK_NullToMemberPointer: { + if (MustVisitNullValue(E)) + CGF.buildIgnoredExpr(E); + + assert(!UnimplementedFeature::cxxABI()); + + const MemberPointerType *MPT = CE->getType()->getAs(); + assert(!MPT->isMemberFunctionPointerType() && "NYI"); + + auto Ty = CGF.getCIRType(DestTy).cast(); + return Builder.getNullDataMemberPtr(Ty, CGF.getLoc(E->getExprLoc())); + } case CK_ReinterpretMemberPointer: llvm_unreachable("NYI"); case CK_BaseToDerivedMemberPointer: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index b66b7daf0aef..c1a2c1843730 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -787,6 +787,13 @@ class CIRGenFunction : public CIRGenTypeCache { LValue buildStmtExprLValue(const StmtExpr *E); + LValue buildPointerToDataMemberBinaryExpr(const BinaryOperator *E); + + /// TODO: Add TBAAAccessInfo + Address buildCXXMemberDataPointerAddress( + const Expr *E, Address base, mlir::Value memberPtr, + const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo); + /// Generate a call of the given function, expecting the given /// result type, and using the given argument list which specifies both the /// LLVM arguments and the types they were derived from. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index ff18a16784f7..cba0b9fa1d10 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -384,6 +384,12 @@ class CIRGenModule : public CIRGenTypeCache { LValueBaseInfo *BaseInfo = nullptr, bool forPointeeType = false); + /// TODO: Add TBAAAccessInfo + clang::CharUnits + getDynamicOffsetAlignment(clang::CharUnits actualBaseAlign, + const clang::CXXRecordDecl *baseDecl, + clang::CharUnits expectedTargetAlign); + mlir::cir::FuncOp getAddrOfCXXStructor( clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, mlir::cir::FuncType FnType = nullptr, bool DontDefer = false, @@ -511,6 +517,8 @@ class CIRGenModule : public CIRGenTypeCache { /// null constant. mlir::Value buildNullConstant(QualType T, mlir::Location loc); + mlir::Value buildMemberPointerConstant(const UnaryOperator *E); + llvm::StringRef getMangledName(clang::GlobalDecl GD); void buildTentativeDefinition(const VarDecl *D); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 6b774f14746e..966558f170ea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -700,7 +700,14 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { } case Type::MemberPointer: { - assert(0 && "not implemented"); + const auto *MPT = cast(Ty); + assert(MPT->isMemberDataPointer() && "ptr-to-member-function is NYI"); + + auto memberTy = ConvertType(MPT->getPointeeType()); + auto clsTy = + ConvertType(QualType(MPT->getClass(), 0)).cast(); + ResultType = + mlir::cir::DataMemberType::get(Builder.getContext(), memberTy, clsTy); break; } diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 965e7fa6713b..0ded0f501483 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -360,6 +360,43 @@ LogicalResult cir::FPAttr::verify(function_ref emitError, return success(); } +//===----------------------------------------------------------------------===// +// DataMemberAttr definitions +//===----------------------------------------------------------------------===// + +LogicalResult +DataMemberAttr::verify(function_ref emitError, + mlir::cir::DataMemberType ty, + std::optional memberIndex) { + if (!memberIndex.has_value()) { + // DataMemberAttr without a given index represents a null value. + return success(); + } + + auto clsStructTy = ty.getClsTy(); + if (clsStructTy.isIncomplete()) { + emitError() << "incomplete 'cir.struct' cannot be used to build a non-null " + "data member pointer"; + return failure(); + } + + auto memberIndexValue = memberIndex.value(); + if (memberIndexValue >= clsStructTy.getNumElements()) { + emitError() + << "member index of a #cir.data_member attribute is out of range"; + return failure(); + } + + auto memberTy = clsStructTy.getMembers()[memberIndexValue]; + if (memberTy != ty.getMemberTy()) { + emitError() << "member type of a #cir.data_member attribute must match the " + "attribute type"; + return failure(); + } + + return success(); +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 22c29360d783..1f51087b472e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -292,6 +292,12 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return op->emitOpError("nullptr expects pointer type"); } + if (attrType.isa()) { + // More detailed type verifications are already done in + // DataMemberAttr::verify. Don't need to repeat here. + return success(); + } + if (attrType.isa()) { if (opType.isa<::mlir::cir::StructType, ::mlir::cir::ArrayType>()) return success(); @@ -2555,6 +2561,28 @@ LogicalResult GetMemberOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// GetRuntimeMemberOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult GetRuntimeMemberOp::verify() { + auto recordTy = + getAddr().getType().cast().getPointee().cast(); + auto memberPtrTy = getMember().getType(); + + if (recordTy != memberPtrTy.getClsTy()) { + emitError() << "record type does not match the member pointer type"; + return mlir::failure(); + } + + if (getType().getPointee() != memberPtrTy.getMemberTy()) { + emitError() << "result type does not match the member pointer type"; + return mlir::failure(); + } + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 5ddafd66231d..d99ab72cdcc5 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -403,6 +403,27 @@ uint64_t PointerType::getPreferredAlignment( return 8; } +llvm::TypeSize +DataMemberType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + // FIXME: consider size differences under different ABIs + return llvm::TypeSize::getFixed(64); +} + +uint64_t +DataMemberType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + // FIXME: consider alignment differences under different ABIs + return 8; +} + +uint64_t DataMemberType::getPreferredAlignment( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + // FIXME: consider alignment differences under different ABIs + return 8; +} + llvm::TypeSize ArrayType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { diff --git a/clang/test/CIR/CodeGen/pointer-to-data-member.cpp b/clang/test/CIR/CodeGen/pointer-to-data-member.cpp new file mode 100644 index 000000000000..9ffa714e4d70 --- /dev/null +++ b/clang/test/CIR/CodeGen/pointer-to-data-member.cpp @@ -0,0 +1,62 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Point { + int x; + int y; + int z; +}; +// CHECK-DAG: !ty_22Point22 = !cir.struct + +int Point::*pt_member = &Point::x; +// CHECK: cir.global external @pt_member = #cir.data_member<0> : !cir.data_member + +auto test1() -> int Point::* { + return &Point::y; +} +// CHECK: cir.func @_Z5test1v() -> !cir.data_member +// CHECK: %{{.+}} = cir.const(#cir.data_member<1> : !cir.data_member) : !cir.data_member +// CHECK: } + +int test2(const Point &pt, int Point::*member) { + return pt.*member; +} +// CHECK: cir.func @_Z5test2RK5PointMS_i +// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK: } + +int test3(const Point *pt, int Point::*member) { + return pt->*member; +} +// CHECK: cir.func @_Z5test3PK5PointMS_i +// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK: } + +auto test4(int Incomplete::*member) -> int Incomplete::* { + return member; +} +// CHECK: cir.func @_Z5test4M10Incompletei(%arg0: !cir.data_member loc({{.+}})) -> !cir.data_member + +int test5(Incomplete *ic, int Incomplete::*member) { + return ic->*member; +} +// CHECK: cir.func @_Z5test5P10IncompleteMS_i +// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK: } + +auto test_null() -> int Point::* { + return nullptr; +} +// CHECK: cir.func @_Z9test_nullv +// CHECK: %{{.+}} = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member +// CHECK: } + +auto test_null_incomplete() -> int Incomplete::* { + return nullptr; +} +// CHECK: cir.func @_Z20test_null_incompletev +// CHECK: %{{.+}} = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member +// CHECK: } diff --git a/clang/test/CIR/IR/data-member-ptr.cir b/clang/test/CIR/IR/data-member-ptr.cir new file mode 100644 index 000000000000..6370877291a4 --- /dev/null +++ b/clang/test/CIR/IR/data-member-ptr.cir @@ -0,0 +1,46 @@ +// RUN: cir-opt %s | cir-opt | FileCheck %s + +!s32i = !cir.int +!ty_22Foo22 = !cir.struct + +#global_ptr = #cir.data_member<0> : !cir.data_member + +module { + cir.func @null_member() { + %0 = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member + cir.return + } + + cir.func @get_runtime_member(%arg0: !cir.ptr) { + %0 = cir.const(#cir.data_member<0> : !cir.data_member) : !cir.data_member + %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.return + } + + cir.func @get_global_member(%arg0: !cir.ptr) { + %0 = cir.const(#global_ptr) : !cir.data_member + %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.return + } +} + +// CHECK: module { + +// CHECK-NEXT: cir.func @null_member() { +// CHECK-NEXT: %0 = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// CHECK-NEXT: cir.func @get_runtime_member(%arg0: !cir.ptr) { +// CHECK-NEXT: %0 = cir.const(#cir.data_member<0> : !cir.data_member) : !cir.data_member +// CHECK-NEXT: %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// CHECK-NEXT: cir.func @get_global_member(%arg0: !cir.ptr) { +// CHECK-NEXT: %0 = cir.const(#cir.data_member<0> : !cir.data_member) : !cir.data_member +// CHECK-NEXT: %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// CHECK: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 3b5e5f83a14e..986353e83447 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -763,4 +763,74 @@ cir.func @const_type_mismatch() -> () { // expected-error@+1 {{'cir.const' op result type ('!cir.int') does not match value type ('!cir.int')}} %2 = cir.const(#cir.int<0> : !s8i) : !u8i cir.return -} \ No newline at end of file +} + +// ----- + +!u16i = !cir.int + +// expected-error@+1 {{invalid kind of type specified}} +#invalid_type = #cir.data_member<0> : !u16i + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct1 = !cir.struct + +// expected-error@+1 {{member type of a #cir.data_member attribute must match the attribute type}} +#invalid_member_ty = #cir.data_member<0> : !cir.data_member + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct1 = !cir.struct + +module { + cir.func @invalid_base_type(%arg0 : !cir.data_member) { + %0 = cir.alloca !u32i, cir.ptr , ["tmp"] {alignment = 4 : i64} + // expected-error@+1 {{'cir.get_runtime_member' op operand #0 must be !cir.struct*}} + %1 = cir.get_runtime_member %0[%arg0 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct1 = !cir.struct +!struct2 = !cir.struct + +module { + cir.func @invalid_base_type(%arg0 : !cir.data_member) { + %0 = cir.alloca !struct2, cir.ptr , ["tmp"] {alignment = 4 : i64} + // expected-error@+1 {{record type does not match the member pointer type}} + %1 = cir.get_runtime_member %0[%arg0 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- + +!u16i = !cir.int +!u32i = !cir.int +!struct1 = !cir.struct + +module { + cir.func @invalid_base_type(%arg0 : !cir.data_member) { + %0 = cir.alloca !struct1, cir.ptr , ["tmp"] {alignment = 4 : i64} + // expected-error@+1 {{result type does not match the member pointer type}} + %1 = cir.get_runtime_member %0[%arg0 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.return + } +} + +// ----- + +!u16i = !cir.int +!incomplete_struct = !cir.struct + +// expected-error@+1 {{incomplete 'cir.struct' cannot be used to build a non-null data member pointer}} +#incomplete_cls_member = #cir.data_member<0> : !cir.data_member From d2c75965336c188b19fe3d619b60d2255e091855 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 1 Mar 2024 07:50:37 +0800 Subject: [PATCH 1427/2301] [CIR][NFC] Add unimplemented feature guard for dialect code (#481) As discussed in pull #401 , The present `UnimplementedFeature` class is made for the CIRGen submodule and we need similar facilities for code under `clang/lib/CIR/Dialect/IR`. This NFC patch adds a new `CIRDialectUnimplementedFeature` class that provides unimplemented feature guards for CIR dialect code. --- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 17 ++++++++++---- clang/lib/CIR/Dialect/IR/MissingFeatures.h | 27 ++++++++++++++++++++++ 2 files changed, 39 insertions(+), 5 deletions(-) create mode 100644 clang/lib/CIR/Dialect/IR/MissingFeatures.h diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index d99ab72cdcc5..093cea025952 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -10,6 +10,8 @@ // //===----------------------------------------------------------------------===// +#include "MissingFeatures.h" + #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" @@ -32,6 +34,8 @@ #include "llvm/Support/ErrorHandling.h" #include +using cir::MissingFeatures; + //===----------------------------------------------------------------------===// // CIR Custom Parser/Printer Signatures //===----------------------------------------------------------------------===// @@ -407,6 +411,7 @@ llvm::TypeSize DataMemberType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { // FIXME: consider size differences under different ABIs + assert(!MissingFeatures::cxxABI()); return llvm::TypeSize::getFixed(64); } @@ -414,6 +419,7 @@ uint64_t DataMemberType::getABIAlignment(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { // FIXME: consider alignment differences under different ABIs + assert(!MissingFeatures::cxxABI()); return 8; } @@ -421,6 +427,7 @@ uint64_t DataMemberType::getPreferredAlignment( const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { // FIXME: consider alignment differences under different ABIs + assert(!MissingFeatures::cxxABI()); return 8; } @@ -442,20 +449,20 @@ ArrayType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, return dataLayout.getTypePreferredAlignment(getEltType()); } -llvm::TypeSize cir::VectorType::getTypeSizeInBits( +llvm::TypeSize mlir::cir::VectorType::getTypeSizeInBits( const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { return llvm::TypeSize::getFixed(getSize() * dataLayout.getTypeSizeInBits(getEltType())); } -uint64_t -cir::VectorType::getABIAlignment(const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { +uint64_t mlir::cir::VectorType::getABIAlignment( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { return getSize() * dataLayout.getTypeABIAlignment(getEltType()); } -uint64_t cir::VectorType::getPreferredAlignment( +uint64_t mlir::cir::VectorType::getPreferredAlignment( const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { return getSize() * dataLayout.getTypePreferredAlignment(getEltType()); diff --git a/clang/lib/CIR/Dialect/IR/MissingFeatures.h b/clang/lib/CIR/Dialect/IR/MissingFeatures.h new file mode 100644 index 000000000000..d8271533cd98 --- /dev/null +++ b/clang/lib/CIR/Dialect/IR/MissingFeatures.h @@ -0,0 +1,27 @@ +//===---- UnimplementedFeatureGuarding.h - Checks against NYI ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file introduces some helper classes to guard against features that +// CIR dialect supports that we do not have and also do not have great ways to +// assert against. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_IR_UFG +#define LLVM_CLANG_LIB_CIR_DIALECT_IR_UFG + +namespace cir { + +struct MissingFeatures { + // C++ ABI support + static bool cxxABI() { return false; } +}; + +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_IR_UFG From 3dd28c29419f6660c61bb156937f332b6cc74b59 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 1 Mar 2024 02:59:13 +0300 Subject: [PATCH 1428/2301] [CIR][Lowering] More cir.asm lowering (#472) This PR adds lowering for `cir.asm`. Also, two flags were added to the `cir.asm` : `hasSideEffects` and `isStackAligned` in order to match with the llvm dialect. Also, I added several simple tests for lowering. I'm not sure but most likely the next PR will be the last one in this story about assembly support ) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 18 ++++++---- clang/lib/CIR/CodeGen/CIRAsm.cpp | 4 +-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 35 +++++++++++++++++-- clang/test/CIR/CodeGen/asm.c | 12 +++---- clang/test/CIR/Lowering/asm.cir | 33 +++++++++++++++++ 5 files changed, 86 insertions(+), 16 deletions(-) create mode 100644 clang/test/CIR/Lowering/asm.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 36623b1a9171..06f10fcbe8f6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2890,9 +2890,9 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { ... %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.load %1 : cir.ptr , !s32i - cir.asm(x86_att, {"foo" "~{dirflag},~{fpsr},~{flags}"} : () -> () - cir.asm(x86_att, {"bar $$42 $0" "=r,=&r,1,~{dirflag},~{fpsr},~{flags}"} %2 : (!s32i) -> () - cir.asm(x86_att, {"baz $$42 $0" "=r,=&r,0,1,~{dirflag},~{fpsr},~{flags}"} %3, %2 : (!s32i, !s32i) -> () + cir.asm(x86_att, {"foo" "~{dirflag},~{fpsr},~{flags}"}) side_effects : () -> () + cir.asm(x86_att, {"bar $$42 $0" "=r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) %2 : (!s32i) -> () + cir.asm(x86_att, {"baz $$42 $0" "=r,=&r,0,1,~{dirflag},~{fpsr},~{flags}"}) %3, %2 : (!s32i, !s32i) -> () ``` }]; @@ -2902,11 +2902,17 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { ins Variadic:$operands, StrAttr:$asm_string, StrAttr:$constraints, - AsmFlavor:$asm_flavor); + UnitAttr:$side_effects, + AsmFlavor:$asm_flavor); let assemblyFormat = [{ - `(`$asm_flavor`,` `{` $asm_string $constraints `}` `)` attr-dict - operands `:` functional-type(operands, results) + `(` + $asm_flavor`,` + `{` $asm_string $constraints `}` + `)` + (`side_effects` $side_effects^)? + attr-dict + operands `:` functional-type(operands, results) }]; } diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index e3d0bfb1ad5a..67c8d9e1b0c8 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -428,11 +428,11 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { builder.getCompleteStructTy(ResultRegTypes, sname, false, nullptr); } - AsmFlavor AsmFlavor = inferFlavor(CGM, S); + bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; builder.create(getLoc(S.getAsmLoc()), ResultType, Args, AsmString, Constraints, - AsmFlavor); + HasSideEffect, inferFlavor(CGM, S)); return mlir::success(); } \ No newline at end of file diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index dad14fbf3364..ac9750ddef25 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2231,6 +2231,37 @@ class CIRUnreachableLowering } }; +class CIRInlineAsmOpLowering + : public mlir::OpConversionPattern { + + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::InlineAsmOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + mlir::Type llResTy; + if (op.getNumResults()) + llResTy = getTypeConverter()->convertType(op.getType(0)); + + auto dialect = op.getAsmFlavor(); + auto llDialect = dialect == mlir::cir::AsmFlavor::x86_att + ? mlir::LLVM::AsmDialect::AD_ATT + : mlir::LLVM::AsmDialect::AD_Intel; + + std::vector opAttrs; + + rewriter.replaceOpWithNewOp( + op, llResTy, adaptor.getOperands(), op.getAsmStringAttr(), + op.getConstraintsAttr(), op.getSideEffectsAttr(), + /*is_align_stack*/ mlir::UnitAttr(), + mlir::LLVM::AsmDialectAttr::get(getContext(), llDialect), + rewriter.getArrayAttr(opAttrs)); + + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -2246,8 +2277,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, - CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering>( - converter, patterns.getContext()); + CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, + CIRInlineAsmOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/asm.c b/clang/test/CIR/CodeGen/asm.c index 33fdac2e721b..228840d8d750 100644 --- a/clang/test/CIR/CodeGen/asm.c +++ b/clang/test/CIR/CodeGen/asm.c @@ -1,32 +1,32 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -//CHECK: cir.asm(x86_att, {"" "~{dirflag},~{fpsr},~{flags}"}) : () -> () +//CHECK: cir.asm(x86_att, {"" "~{dirflag},~{fpsr},~{flags}"}) side_effects : () -> () void empty1() { __asm__ volatile("" : : : ); } -//CHECK: cir.asm(x86_att, {"xyz" "~{dirflag},~{fpsr},~{flags}"}) : () -> () +//CHECK: cir.asm(x86_att, {"xyz" "~{dirflag},~{fpsr},~{flags}"}) side_effects : () -> () void empty2() { __asm__ volatile("xyz" : : : ); } -//CHECK: cir.asm(x86_att, {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) %0, %0 : (!cir.ptr, !cir.ptr) -> () +//CHECK: cir.asm(x86_att, {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0, %0 : (!cir.ptr, !cir.ptr) -> () void t1(int x) { __asm__ volatile("" : "+m"(x)); } -//CHECK: cir.asm(x86_att, {"" "*m,~{dirflag},~{fpsr},~{flags}"}) %0 : (!cir.ptr) -> () +//CHECK: cir.asm(x86_att, {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0 : (!cir.ptr) -> () void t2(int x) { __asm__ volatile("" : : "m"(x)); } -//CHECK: cir.asm(x86_att, {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) %0 : (!cir.ptr) -> () +//CHECK: cir.asm(x86_att, {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0 : (!cir.ptr) -> () void t3(int x) { __asm__ volatile("" : "=m"(x)); } -//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) %1 : (!s32i) -> () +//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) side_effects %1 : (!s32i) -> () void t4(int x) { __asm__ volatile("" : "=&r"(x), "+&r"(x)); } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/asm.cir b/clang/test/CIR/Lowering/asm.cir new file mode 100644 index 000000000000..47960e105f94 --- /dev/null +++ b/clang/test/CIR/Lowering/asm.cir @@ -0,0 +1,33 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s + +!s32i = !cir.int + +module { + + cir.func @simple(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + + cir.asm(x86_att, {"" "~{dirflag},~{fpsr},~{flags}"}) : () -> () + // CHECK: llvm.inline_asm asm_dialect = att operand_attrs = [] "", "~{dirflag},~{fpsr},~{flags}" : () -> () + + cir.asm(x86_att, {"xyz" "~{dirflag},~{fpsr},~{flags}"}) side_effects : () -> () + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "xyz", "~{dirflag},~{fpsr},~{flags}" : () -> () + + cir.asm(x86_att, {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0, %0 : (!cir.ptr, !cir.ptr) -> () + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "", "=*m,*m,~{dirflag},~{fpsr},~{flags}" %1, %1 : (!llvm.ptr, !llvm.ptr) -> () + + cir.asm(x86_att, {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0 : (!cir.ptr) -> () + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "", "*m,~{dirflag},~{fpsr},~{flags}" %1 : (!llvm.ptr) -> () + + cir.asm(x86_att, {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0 : (!cir.ptr) -> () + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "", "=*m,~{dirflag},~{fpsr},~{flags}" %1 : (!llvm.ptr) -> () + + %1 = cir.load %0 : cir.ptr , !s32i + cir.asm(x86_att, {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) side_effects %1 : (!s32i) -> () + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "", "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}" %2 : (i32) -> () + + cir.return + } + +} \ No newline at end of file From 8840fc77b16049df0b1cfd67fb26e82d071d4a7d Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 6 Mar 2024 03:10:19 +0800 Subject: [PATCH 1429/2301] [CIR][CIRGen] Emit `cir.unreachable` on implicit returns (#486) This patch changes the emission of implicit returns from functions whose return type is not `void`. Instead of emitting `cir.return`, this PR aligns to the original clang CodeGen and emits a `cir.unreachable` operation. Related issue: #457 . --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 120 ++++++++++-------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 + .../CodeGen/UnimplementedFeatureGuarding.h | 3 +- clang/test/CIR/CodeGen/implicit-return.cpp | 15 +++ 4 files changed, 90 insertions(+), 51 deletions(-) create mode 100644 clang/test/CIR/CodeGen/implicit-return.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 77c8d05f61c2..b38433b4b499 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -323,22 +323,6 @@ void CIRGenFunction::LexicalScope::cleanup() { auto &builder = CGF.builder; auto *localScope = CGF.currLexScope; - auto buildReturn = [&](mlir::Location loc) { - // If we are on a coroutine, add the coro_end builtin call. - auto Fn = dyn_cast(CGF.CurFn); - assert(Fn && "other callables NYI"); - if (Fn.getCoroutine()) - CGF.buildCoroEndBuiltinCall( - loc, builder.getNullPtr(builder.getVoidPtrTy(), loc)); - - if (CGF.FnRetCIRTy.has_value()) { - // If there's anything to return, load it first. - auto val = builder.create(loc, *CGF.FnRetCIRTy, *CGF.FnRetAlloca); - return builder.create(loc, llvm::ArrayRef(val.getResult())); - } - return builder.create(loc); - }; - // Handle pending gotos and the solved labels in this scope. while (!localScope->PendingGotos.empty()) { auto gotoInfo = localScope->PendingGotos.back(); @@ -381,15 +365,18 @@ void CIRGenFunction::LexicalScope::cleanup() { // Leverage and defers to RunCleanupsScope's dtor and scope handling. applyCleanup(); - if (localScope->Depth != 0) { // end of any local scope != function - // Ternary ops have to deal with matching arms for yielding types - // and do return a value, it must do its own cir.yield insertion. - if (!localScope->isTernary()) { - !retVal ? builder.create(localScope->EndLoc) - : builder.create(localScope->EndLoc, retVal); - } - } else - (void)buildReturn(localScope->EndLoc); + if (localScope->Depth == 0) { + buildImplicitReturn(); + return; + } + + // End of any local scope != function + // Ternary ops have to deal with matching arms for yielding types + // and do return a value, it must do its own cir.yield insertion. + if (!localScope->isTernary()) { + !retVal ? builder.create(localScope->EndLoc) + : builder.create(localScope->EndLoc, retVal); + } }; // If a cleanup block has been created at some point, branch to it @@ -434,6 +421,64 @@ void CIRGenFunction::LexicalScope::cleanup() { insertCleanupAndLeave(currBlock); } +mlir::cir::ReturnOp +CIRGenFunction::LexicalScope::buildReturn(mlir::Location loc) { + auto &builder = CGF.getBuilder(); + + // If we are on a coroutine, add the coro_end builtin call. + auto Fn = dyn_cast(CGF.CurFn); + assert(Fn && "other callables NYI"); + if (Fn.getCoroutine()) + CGF.buildCoroEndBuiltinCall( + loc, builder.getNullPtr(builder.getVoidPtrTy(), loc)); + + if (CGF.FnRetCIRTy.has_value()) { + // If there's anything to return, load it first. + auto val = builder.create(loc, *CGF.FnRetCIRTy, *CGF.FnRetAlloca); + return builder.create(loc, llvm::ArrayRef(val.getResult())); + } + return builder.create(loc); +} + +void CIRGenFunction::LexicalScope::buildImplicitReturn() { + auto &builder = CGF.getBuilder(); + auto *localScope = CGF.currLexScope; + + const auto *FD = cast(CGF.CurGD.getDecl()); + + // C++11 [stmt.return]p2: + // Flowing off the end of a function [...] results in undefined behavior + // in a value-returning function. + // C11 6.9.1p12: + // If the '}' that terminates a function is reached, and the value of the + // function call is used by the caller, the behavior is undefined. + if (CGF.getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && + !CGF.SawAsmBlock && !FD->getReturnType()->isVoidType() && + builder.getInsertionBlock()) { + bool shouldEmitUnreachable = CGF.CGM.getCodeGenOpts().StrictReturn || + !CGF.CGM.MayDropFunctionReturn( + FD->getASTContext(), FD->getReturnType()); + + if (CGF.SanOpts.has(SanitizerKind::Return)) { + assert(!UnimplementedFeature::sanitizerReturn()); + llvm_unreachable("NYI"); + } else if (shouldEmitUnreachable) { + if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { + // TODO: buildTrapCall(llvm::Intrinsic::trap); + assert(!UnimplementedFeature::trap()); + } + } + + if (CGF.SanOpts.has(SanitizerKind::Return) || shouldEmitUnreachable) { + builder.create(localScope->EndLoc); + builder.clearInsertionPoint(); + return; + } + } + + (void)buildReturn(localScope->EndLoc); +} + void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // CIRGen doesn't use a BreakContinueStack or evaluates OnlySimpleReturnStmts. @@ -661,31 +706,6 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, if (mlir::failed(Fn.verifyBody())) return nullptr; - // C++11 [stmt.return]p2: - // Flowing off the end of a function [...] results in undefined behavior - // in a value-returning function. - // C11 6.9.1p12: - // If the '}' that terminates a function is reached, and the value of the - // function call is used by the caller, the behavior is undefined. - if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && - !FD->getReturnType()->isVoidType() && builder.getInsertionBlock()) { - bool shouldEmitUnreachable = - CGM.getCodeGenOpts().StrictReturn || - !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType()); - - if (SanOpts.has(SanitizerKind::Return)) { - llvm_unreachable("NYI"); - } else if (shouldEmitUnreachable) { - if (CGM.getCodeGenOpts().OptimizationLevel == 0) - ; // TODO: buildTrapCall(llvm::Intrinsic::trap); - } - if (SanOpts.has(SanitizerKind::Return) || shouldEmitUnreachable) { - // TODO: builder.createUnreachable(); - assert(!UnimplementedFeature::unreachableOp()); - builder.clearInsertionPoint(); - } - } - // Emit the standard function epilogue. finishFunction(BodyRange.getEnd()); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index c1a2c1843730..056fe393d191 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1918,6 +1918,9 @@ class CIRGenFunction : public CIRGenTypeCache { return b; } + mlir::cir::ReturnOp buildReturn(mlir::Location loc); + void buildImplicitReturn(); + public: void updateCurrentSwitchCaseRegion() { CurrentSwitchRegionIdx++; } llvm::ArrayRef getRetBlocks() { return RetBlocks; } diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index d6a7e1d89433..1b75451d9174 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -58,6 +58,7 @@ struct UnimplementedFeature { static bool pointerOverflowSanitizer() { return false; } static bool sanitizeDtor() { return false; } static bool sanitizeVLABound() { return false; } + static bool sanitizerReturn() { return false; } // ObjC static bool setObjCGCLValueClass() { return false; } @@ -160,12 +161,12 @@ struct UnimplementedFeature { static bool emitScalarRangeCheck() { return false; } static bool stmtExprEvaluation() { return false; } static bool setCallingConv() { return false; } - static bool unreachableOp() { return false; } static bool tryMarkNoThrow() { return false; } static bool indirectBranch() { return false; } static bool escapedLocals() { return false; } static bool deferredReplacements() { return false; } static bool shouldInstrumentFunction() { return false; } + static bool trap() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/implicit-return.cpp b/clang/test/CIR/CodeGen/implicit-return.cpp new file mode 100644 index 000000000000..6a8e33577c61 --- /dev/null +++ b/clang/test/CIR/CodeGen/implicit-return.cpp @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void ret_void() {} + +// CHECK: cir.func @_Z8ret_voidv() +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +int ret_non_void() {} + +// CHECK: cir.func @_Z12ret_non_voidv() -> !s32i +// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK-NEXT: cir.unreachable +// CHECK-NEXT: } From a63e25abb982afd2997862e5ab26118c9592a90d Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 6 Mar 2024 03:12:12 +0800 Subject: [PATCH 1430/2301] [CIR][CIRGen] Add support for builtin bit operations (#474) This PR adds CIRGen support for the following built-in bit operations: - `__builtin_ffs{,l,ll,g}` - `__builtin_clz{,l,ll,g}` - `__builtin_ctz{,l,ll,g}` - `__builtin_clrsb{,l,ll,g}` - `__builtin_popcount{,l,ll,g}` - `__builtin_parity{,l,ll,g}` This PR adds a new operation, `cir.bits`, to represent such bit operations on the input integers. LLVMIR lowering support is not included in this PR. > [!NOTE] > As a side note, C++20 adds the `` header which includes some bit operation functions with similar functionalities to the built-in functions mentioned above. However, these standard library functions have slightly different semantics than the built-in ones and this PR does not include support for these standard library functions. Support for these functions may be added later, or amended into this PR if the reviewers request so. Co-authored-by: Bruno Cardoso Lopes --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 161 +++++++++++++++++ .../include/clang/CIR/Dialect/IR/CIRTypes.td | 30 ++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 67 +++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 11 ++ .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/builtin-bits.cpp | 162 ++++++++++++++++++ clang/test/CIR/IR/bit.cir | 75 ++++++++ clang/test/CIR/IR/invalid.cir | 125 ++++++++++++++ 8 files changed, 631 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/builtin-bits.cpp create mode 100644 clang/test/CIR/IR/bit.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 06f10fcbe8f6..f440d96daaea 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -985,6 +985,167 @@ def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// BitsOp +//===----------------------------------------------------------------------===// + +class CIR_BitOp + : CIR_Op { + let arguments = (ins inputTy:$input); + let results = (outs SInt32:$result); + + let assemblyFormat = [{ + `(` $input `:` type($input) `)` `:` type($result) attr-dict + }]; +} + +def BitClrsbOp : CIR_BitOp<"bit.clrsb", SIntOfWidths<[32, 64]>> { + let summary = "Get the number of leading redundant sign bits in the input"; + let description = [{ + Compute the number of leading redundant sign bits in the input integer. + + The input integer must be a signed integer. The most significant bit of the + input integer is the sign bit. The `cir.bit.clrsb` operation returns the + number of redundant sign bits in the input, that is, the number of bits + following the most significant bit that are identical to it. + + The bit width of the input integer must be either 32 or 64. + + Examples: + + ```mlir + !s32i = !cir.int + + // %0 = 0xDEADBEEF, 0b1101_1110_1010_1101_1011_1110_1110_1111 + %0 = cir.const(#cir.int<3735928559> : !s32i) : !s32i + // %1 will be 1 because there is 1 bit following the most significant bit + // that is identical to it. + %1 = cir.bit.clrsb(%0 : !s32i) : !s32i + + // %2 = 1, 0b0000_0000_0000_0000_0000_0000_0000_0001 + %2 = cir.const(#cir.int<1> : !s32i) : !s32i + // %3 will be 30 + %3 = cir.bit.clrsb(%2 : !s32i) : !s32i + ``` + }]; +} + +def BitClzOp : CIR_BitOp<"bit.clz", UIntOfWidths<[16, 32, 64]>> { + let summary = "Get the number of leading 0-bits in the input"; + let description = [{ + Compute the number of leading 0-bits in the input. + + The input integer must be an unsigned integer. The `cir.bit.clz` operation + returns the number of consecutive 0-bits at the most significant bit + position in the input. + + This operation invokes undefined behavior if the input value is 0. + + Example: + + ```mlir + !s32i = !cir.int + !u32i = !cir.int + + // %0 = 0b0000_0000_0000_0000_0000_0000_0000_1000 + %0 = cir.const(#cir.int<8> : !u32i) : !u32i + // %1 will be 28 + %1 = cir.bit.clz(%0 : !u32i) : !s32i + ``` + }]; +} + +def BitCtzOp : CIR_BitOp<"bit.ctz", UIntOfWidths<[16, 32, 64]>> { + let summary = "Get the number of trailing 0-bits in the input"; + let description = [{ + Compute the number of trailing 0-bits in the input. + + The input integer must be an unsigned integer. The `cir.bit.ctz` operation + returns the number of consecutive 0-bits at the least significant bit + position in the input. + + This operation invokes undefined behavior if the input value is 0. + + Example: + + ```mlir + !s32i = !cir.int + !u32i = !cir.int + + // %0 = 0b1000 + %0 = cir.const(#cir.int<8> : !u32i) : !u32i + // %1 will be 3 + %1 = cir.bit.ctz(%0 : !u32i) : !s32i + ``` + }]; +} + +def BitFfsOp : CIR_BitOp<"bit.ffs", SIntOfWidths<[32, 64]>> { + let summary = "Get the position of the least significant 1-bit of input"; + let description = [{ + Compute the position of the least significant 1-bit of the input. + + The input integer must be a signed integer. The `cir.bit.ffs` operation + returns one plus the index of the least significant 1-bit of the input + signed integer. As a special case, if the input integer is 0, `cir.bit.ffs` + returns 0. + + Example: + + ```mlir + !s32i = !cir.int + + // %0 = 0x0010_1000 + %0 = cir.const(#cir.int<40> : !s32i) : !s32i + // #1 will be 4 since the 4th least significant bit is 1. + %1 = cir.bit.ffs(%0 : !s32i) : !s32i + ``` + }]; +} + +def BitParityOp : CIR_BitOp<"bit.parity", UIntOfWidths<[32, 64]>> { + let summary = "Get the parity of input"; + let description = [{ + Compute the parity of the input. The parity of an integer is the number of + 1-bits in it modulo 2. + + The input must be an unsigned integer. + + Example: + + ```mlir + !s32i = !cir.int + !u32i = !cir.int + + // %0 = 0x0110_1000 + %0 = cir.const(#cir.int<104> : !u32i) : !s32i + // %1 will be 1 since there are 3 1-bits in %0 + %1 = cir.bit.parity(%0 : !u32i) : !s32i + ``` + }]; +} + +def BitPopcountOp : CIR_BitOp<"bit.popcount", UIntOfWidths<[16, 32, 64]>> { + let summary = "Get the number of 1-bits in input"; + let description = [{ + Compute the number of 1-bits in the input. + + The input must be an unsigned integer. + + Example: + + ```mlir + !s32i = !cir.int + !u32i = !cir.int + + // %0 = 0x0110_1000 + %0 = cir.const(#cir.int<104> : !u32i) : !s32i + // %1 will be 3 since there are 3 1-bits in %0 + %1 = cir.bit.popcount(%0 : !u32i) : !s32i + ``` + }]; +} + //===----------------------------------------------------------------------===// // SwitchOp //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 1501cd1122ea..c02ee1bef916 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -97,6 +97,36 @@ def SInt16 : SInt<16>; def SInt32 : SInt<32>; def SInt64 : SInt<64>; +// A type constraint that allows unsigned integer type whose width is among the +// specified list of possible widths. +class UIntOfWidths widths> + : Type()">, + CPred<"$_self.cast<::mlir::cir::IntType>().isUnsigned()">, + Or().getWidth() == " # w> + )> + ]>, + !interleave(!foreach(w, widths, w # "-bit"), " or ") # " uint", + "::mlir::cir::IntType" + > {} + +// A type constraint that allows unsigned integer type whose width is among the +// specified list of possible widths. +class SIntOfWidths widths> + : Type()">, + CPred<"$_self.cast<::mlir::cir::IntType>().isSigned()">, + Or().getWidth() == " # w> + )> + ]>, + !interleave(!foreach(w, widths, w # "-bit"), " or ") # " sint", + "::mlir::cir::IntType" + > {} + //===----------------------------------------------------------------------===// // FloatType //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 0c351c7ea1b7..bee275371318 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -55,6 +55,22 @@ static RValue buildUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { return RValue::get(Call->getResult(0)); } +template +static RValue +buildBuiltinBitOp(CIRGenFunction &CGF, const CallExpr *E, + std::optional CK) { + mlir::Value arg; + if (CK.has_value()) + arg = CGF.buildCheckedArgForBuiltin(E->getArg(0), *CK); + else + arg = CGF.buildScalarExpr(E->getArg(0)); + + auto resultTy = CGF.ConvertType(E->getType()); + auto op = + CGF.getBuilder().create(CGF.getLoc(E->getExprLoc()), resultTy, arg); + return RValue::get(op); +} + RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue) { @@ -462,7 +478,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BImemcpy: case Builtin::BI__builtin_memcpy: case Builtin::BImempcpy: - case Builtin::BI__builtin_mempcpy: + case Builtin::BI__builtin_mempcpy: { Address Dest = buildPointerWithAlignment(E->getArg(0)); Address Src = buildPointerWithAlignment(E->getArg(1)); mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); @@ -480,6 +496,42 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(Dest.getPointer()); } + case Builtin::BI__builtin_clrsb: + case Builtin::BI__builtin_clrsbl: + case Builtin::BI__builtin_clrsbll: + return buildBuiltinBitOp(*this, E, std::nullopt); + + case Builtin::BI__builtin_ctzs: + case Builtin::BI__builtin_ctz: + case Builtin::BI__builtin_ctzl: + case Builtin::BI__builtin_ctzll: + return buildBuiltinBitOp(*this, E, BCK_CTZPassedZero); + + case Builtin::BI__builtin_clzs: + case Builtin::BI__builtin_clz: + case Builtin::BI__builtin_clzl: + case Builtin::BI__builtin_clzll: + return buildBuiltinBitOp(*this, E, BCK_CLZPassedZero); + + case Builtin::BI__builtin_ffs: + case Builtin::BI__builtin_ffsl: + case Builtin::BI__builtin_ffsll: + return buildBuiltinBitOp(*this, E, std::nullopt); + + case Builtin::BI__builtin_parity: + case Builtin::BI__builtin_parityl: + case Builtin::BI__builtin_parityll: + return buildBuiltinBitOp(*this, E, std::nullopt); + + case Builtin::BI__popcnt16: + case Builtin::BI__popcnt: + case Builtin::BI__popcnt64: + case Builtin::BI__builtin_popcount: + case Builtin::BI__builtin_popcountl: + case Builtin::BI__builtin_popcountll: + return buildBuiltinBitOp(*this, E, std::nullopt); + } + // If this is an alias for a lib function (e.g. __builtin_sin), emit // the call using the normal call path, but using the unmangled // version of the function name. @@ -543,6 +595,19 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return GetUndefRValue(E->getType()); } +mlir::Value CIRGenFunction::buildCheckedArgForBuiltin(const Expr *E, + BuiltinCheckKind Kind) { + assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && + "Unsupported builtin check kind"); + + auto value = buildScalarExpr(E); + if (!SanOpts.has(SanitizerKind::Builtin)) + return value; + + assert(!UnimplementedFeature::sanitizerBuiltin()); + llvm_unreachable("NYI"); +} + static mlir::Value buildTargetArchBuiltinExpr(CIRGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 056fe393d191..ed09b12b1eae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1509,6 +1509,17 @@ class CIRGenFunction : public CIRGenTypeCache { LValue buildCheckedLValue(const Expr *E, TypeCheckKind TCK); LValue buildMemberExpr(const MemberExpr *E); + /// Specifies which type of sanitizer check to apply when handling a + /// particular builtin. + enum BuiltinCheckKind { + BCK_CTZPassedZero, + BCK_CLZPassedZero, + }; + + /// Emits an argument for a call to a builtin. If the builtin sanitizer is + /// enabled, a runtime check specified by \p Kind is also emitted. + mlir::Value buildCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind); + /// returns true if aggregate type has a volatile member. /// TODO(cir): this could be a common AST helper between LLVM / CIR. bool hasVolatileMember(QualType T) { diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 1b75451d9174..dd28c31d4d59 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -58,6 +58,7 @@ struct UnimplementedFeature { static bool pointerOverflowSanitizer() { return false; } static bool sanitizeDtor() { return false; } static bool sanitizeVLABound() { return false; } + static bool sanitizerBuiltin() { return false; } static bool sanitizerReturn() { return false; } // ObjC diff --git a/clang/test/CIR/CodeGen/builtin-bits.cpp b/clang/test/CIR/CodeGen/builtin-bits.cpp new file mode 100644 index 000000000000..6f7f195cf1fb --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-bits.cpp @@ -0,0 +1,162 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int test_builtin_clrsb(int x) { + return __builtin_clrsb(x); +} + +// CHECK: cir.func @_Z18test_builtin_clrsbi +// CHECK: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s32i) : !s32i +// CHECK: } + +int test_builtin_clrsbl(long x) { + return __builtin_clrsbl(x); +} + +// CHECK: cir.func @_Z19test_builtin_clrsbll +// CHECK: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s64i) : !s32i +// CHECK: } + +int test_builtin_clrsbll(long long x) { + return __builtin_clrsbll(x); +} + +// CHECK: cir.func @_Z20test_builtin_clrsbllx +// CHECK: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s64i) : !s32i +// CHECK: } + +int test_builtin_ctzs(unsigned short x) { + return __builtin_ctzs(x); +} + +// CHECK: cir.func @_Z17test_builtin_ctzst +// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u16i) : !s32i +// CHEKC: } + +int test_builtin_ctz(unsigned x) { + return __builtin_ctz(x); +} + +// CHECK: cir.func @_Z16test_builtin_ctzj +// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u32i) : !s32i +// CHECK: } + +int test_builtin_ctzl(unsigned long x) { + return __builtin_ctzl(x); +} + +// CHECK: cir.func @_Z17test_builtin_ctzlm +// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_ctzll(unsigned long long x) { + return __builtin_ctzll(x); +} + +// CHECK: cir.func @_Z18test_builtin_ctzlly +// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_clzs(unsigned short x) { + return __builtin_clzs(x); +} + +// CHECK: cir.func @_Z17test_builtin_clzst +// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u16i) : !s32i +// CHECK: } + +int test_builtin_clz(unsigned x) { + return __builtin_clz(x); +} + +// CHECK: cir.func @_Z16test_builtin_clzj +// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u32i) : !s32i +// CHECK: } + +int test_builtin_clzl(unsigned long x) { + return __builtin_clzl(x); +} + +// CHECK: cir.func @_Z17test_builtin_clzlm +// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_clzll(unsigned long long x) { + return __builtin_clzll(x); +} + +// CHECK: cir.func @_Z18test_builtin_clzlly +// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_ffs(int x) { + return __builtin_ffs(x); +} + +// CHECK: cir.func @_Z16test_builtin_ffsi +// CHECK: %{{.+}} = cir.bit.ffs(%{{.+}} : !s32i) : !s32i +// CHECK: } + +int test_builtin_ffsl(long x) { + return __builtin_ffsl(x); +} + +// CHECK: cir.func @_Z17test_builtin_ffsll +// CHECK: %{{.+}} = cir.bit.ffs(%{{.+}} : !s64i) : !s32i +// CHECK: } + +int test_builtin_ffsll(long long x) { + return __builtin_ffsll(x); +} + +// CHECK: cir.func @_Z18test_builtin_ffsllx +// CHECK: %{{.+}} = cir.bit.ffs(%{{.+}} : !s64i) : !s32i +// CHECK: } + +int test_builtin_parity(unsigned x) { + return __builtin_parity(x); +} + +// CHECK: cir.func @_Z19test_builtin_parityj +// CHECK: %{{.+}} = cir.bit.parity(%{{.+}} : !u32i) : !s32i +// CHECK: } + +int test_builtin_parityl(unsigned long x) { + return __builtin_parityl(x); +} + +// CHECK: cir.func @_Z20test_builtin_paritylm +// CHECK: %{{.+}} = cir.bit.parity(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_parityll(unsigned long long x) { + return __builtin_parityll(x); +} + +// CHECK: cir.func @_Z21test_builtin_paritylly +// CHECK: %{{.+}} = cir.bit.parity(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_popcount(unsigned x) { + return __builtin_popcount(x); +} + +// CHECK: cir.func @_Z21test_builtin_popcountj +// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u32i) : !s32i +// CHECK: } + +int test_builtin_popcountl(unsigned long x) { + return __builtin_popcountl(x); +} + +// CHECK: cir.func @_Z22test_builtin_popcountlm +// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u64i) : !s32i +// CHECK: } + +int test_builtin_popcountll(unsigned long long x) { + return __builtin_popcountll(x); +} + +// CHECK: cir.func @_Z23test_builtin_popcountlly +// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u64i) : !s32i +// CHECK: } diff --git a/clang/test/CIR/IR/bit.cir b/clang/test/CIR/IR/bit.cir new file mode 100644 index 000000000000..974f22606cdc --- /dev/null +++ b/clang/test/CIR/IR/bit.cir @@ -0,0 +1,75 @@ +// RUN: cir-opt %s | cir-opt | FileCheck %s + +!s8i = !cir.int +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!u8i = !cir.int +!u16i = !cir.int +!u32i = !cir.int +!u64i = !cir.int + +module { + cir.func @test() { + %s8 = cir.const(#cir.int<1> : !s8i) : !s8i + %s16 = cir.const(#cir.int<1> : !s16i) : !s16i + %s32 = cir.const(#cir.int<1> : !s32i) : !s32i + %s64 = cir.const(#cir.int<1> : !s64i) : !s64i + %u8 = cir.const(#cir.int<1> : !u8i) : !u8i + %u16 = cir.const(#cir.int<1> : !u16i) : !u16i + %u32 = cir.const(#cir.int<1> : !u32i) : !u32i + %u64 = cir.const(#cir.int<1> : !u64i) : !u64i + + %2 = cir.bit.clrsb(%s32 : !s32i) : !s32i + %3 = cir.bit.clrsb(%s64 : !s64i) : !s32i + + %4 = cir.bit.clz(%u16 : !u16i) : !s32i + %5 = cir.bit.clz(%u32 : !u32i) : !s32i + %6 = cir.bit.clz(%u64 : !u64i) : !s32i + + %7 = cir.bit.ctz(%u16 : !u16i) : !s32i + %8 = cir.bit.ctz(%u32 : !u32i) : !s32i + %9 = cir.bit.ctz(%u64 : !u64i) : !s32i + + %10 = cir.bit.ffs(%s32 : !s32i) : !s32i + %11 = cir.bit.ffs(%s64 : !s64i) : !s32i + + %12 = cir.bit.parity(%u32 : !u32i) : !s32i + %13 = cir.bit.parity(%u64 : !u64i) : !s32i + + %14 = cir.bit.popcount(%u16 : !u16i) : !s32i + %15 = cir.bit.popcount(%u32 : !u32i) : !s32i + %16 = cir.bit.popcount(%u64 : !u64i) : !s32i + + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: cir.func @test() { +// CHECK-NEXT: %0 = cir.const(#cir.int<1> : !s8i) : !s8i +// CHECK-NEXT: %1 = cir.const(#cir.int<1> : !s16i) : !s16i +// CHECK-NEXT: %2 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !u8i) : !u8i +// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u16i) : !u16i +// CHECK-NEXT: %6 = cir.const(#cir.int<1> : !u32i) : !u32i +// CHECK-NEXT: %7 = cir.const(#cir.int<1> : !u64i) : !u64i +// CHECK-NEXT: %8 = cir.bit.clrsb(%2 : !s32i) : !s32i +// CHECK-NEXT: %9 = cir.bit.clrsb(%3 : !s64i) : !s32i +// CHECK-NEXT: %10 = cir.bit.clz(%5 : !u16i) : !s32i +// CHECK-NEXT: %11 = cir.bit.clz(%6 : !u32i) : !s32i +// CHECK-NEXT: %12 = cir.bit.clz(%7 : !u64i) : !s32i +// CHECK-NEXT: %13 = cir.bit.ctz(%5 : !u16i) : !s32i +// CHECK-NEXT: %14 = cir.bit.ctz(%6 : !u32i) : !s32i +// CHECK-NEXT: %15 = cir.bit.ctz(%7 : !u64i) : !s32i +// CHECK-NEXT: %16 = cir.bit.ffs(%2 : !s32i) : !s32i +// CHECK-NEXT: %17 = cir.bit.ffs(%3 : !s64i) : !s32i +// CHECK-NEXT: %18 = cir.bit.parity(%6 : !u32i) : !s32i +// CHECK-NEXT: %19 = cir.bit.parity(%7 : !u64i) : !s32i +// CHECK-NEXT: %20 = cir.bit.popcount(%5 : !u16i) : !s32i +// CHECK-NEXT: %21 = cir.bit.popcount(%6 : !u32i) : !s32i +// CHECK-NEXT: %22 = cir.bit.popcount(%7 : !u64i) : !s32i +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 986353e83447..2ff17558d866 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -834,3 +834,128 @@ module { // expected-error@+1 {{incomplete 'cir.struct' cannot be used to build a non-null data member pointer}} #incomplete_cls_member = #cir.data_member<0> : !cir.data_member + + +// ----- + +!s32i = !cir.int +!u32i = !cir.int + +cir.func @clrsb_invalid_input_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.clrsb' op operand #0 must be 32-bit or 64-bit sint, but got '!cir.int'}} + %0 = cir.bit.clrsb(%arg0 : !u32i) : !s32i + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int + +cir.func @clrsb_invalid_result_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.clrsb' op result #0 must be 32-bit signed integer, but got '!cir.int'}} + %0 = cir.bit.clrsb(%arg0 : !s32i) : !u32i + cir.return +} + +// ----- + +!s32i = !cir.int + +cir.func @clz_invalid_input_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.clz' op operand #0 must be 16-bit or 32-bit or 64-bit uint, but got '!cir.int'}} + %0 = cir.bit.clz(%arg0 : !s32i) : !s32i + cir.return +} + +// ----- + +!u32i = !cir.int + +cir.func @clz_invalid_result_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.clz' op result #0 must be 32-bit signed integer, but got '!cir.int}} + %0 = cir.bit.clz(%arg0 : !u32i) : !u32i + cir.return +} + +// ----- + +!s32i = !cir.int + +cir.func @ctz_invalid_input_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.ctz' op operand #0 must be 16-bit or 32-bit or 64-bit uint, but got '!cir.int'}} + %0 = cir.bit.ctz(%arg0 : !s32i) : !s32i + cir.return +} + +// ----- + +!u32i = !cir.int + +cir.func @ctz_invalid_result_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.ctz' op result #0 must be 32-bit signed integer, but got '!cir.int'}} + %0 = cir.bit.ctz(%arg0 : !u32i) : !u32i + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int + +cir.func @ffs_invalid_input_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.ffs' op operand #0 must be 32-bit or 64-bit sint, but got '!cir.int'}} + %0 = cir.bit.ffs(%arg0 : !u32i) : !s32i + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int + +cir.func @ffs_invalid_result_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.ffs' op result #0 must be 32-bit signed integer, but got '!cir.int'}} + %0 = cir.bit.ffs(%arg0 : !s32i) : !u32i + cir.return +} + +// ----- + +!s32i = !cir.int + +cir.func @parity_invalid_input_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.parity' op operand #0 must be 32-bit or 64-bit uint, but got '!cir.int'}} + %0 = cir.bit.parity(%arg0 : !s32i) : !s32i + cir.return +} + +// ----- + +!u32i = !cir.int + +cir.func @parity_invalid_result_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.parity' op result #0 must be 32-bit signed integer, but got '!cir.int}} + %0 = cir.bit.parity(%arg0 : !u32i) : !u32i + cir.return +} + +// ----- + +!s32i = !cir.int + +cir.func @popcount_invalid_input_ty(%arg0 : !s32i) -> () { + // expected-error@+1 {{'cir.bit.popcount' op operand #0 must be 16-bit or 32-bit or 64-bit uint, but got '!cir.int'}} + %0 = cir.bit.popcount(%arg0 : !s32i) : !s32i + cir.return +} + +// ----- + +!u32i = !cir.int + +cir.func @popcount_invalid_result_ty(%arg0 : !u32i) -> () { + // expected-error@+1 {{'cir.bit.popcount' op result #0 must be 32-bit signed integer, but got '!cir.int'}} + %0 = cir.bit.popcount(%arg0 : !u32i) : !u32i + cir.return +} From d557a350aee2d6de6492de32096ce6bef1ae4c83 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Tue, 5 Mar 2024 11:31:16 -0800 Subject: [PATCH 1431/2301] [CIR] Vector types - part 4 (#490) This is part 4 of implementing vector types and vector operations in ClangIR, issue #284. This change has three small additions. Implement a "vector splat" conversion, which converts a scalar into vector, initializing all the elements of the vector with the scalar. Implement incomplete initialization of a vector, where the number of explicit initializers is less than the number of elements in the vector. The rest of the elements are implicitly zero initialized. Implement conversions between different vector types. The language rules require that the two types be the same size (in bytes, not necessarily in the number of elements). These conversions are always implemented with a bitcast. The first two changes only required changes to the AST -> ClangIR code gen. There are no changes to the ClangIR dialect, so no changes to the LLVM lowering were needed. The third part only required a change to a validation rule. The code to implement a vector bitcast was already present. The compiler just needed to stop rejecting it as invalid ClangIR. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 29 +++++++++++++++++++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 9 ++++--- clang/test/CIR/CodeGen/vectype.cpp | 27 ++++++++++++++++++++ clang/test/CIR/IR/invalid.cir | 2 +- clang/test/CIR/Lowering/vectype.cpp | 6 +++++ 5 files changed, 65 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 95844e1bb686..1af5628208da 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1531,8 +1531,19 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } case CK_MatrixCast: llvm_unreachable("NYI"); - case CK_VectorSplat: - llvm_unreachable("NYI"); + case CK_VectorSplat: { + // Create a vector object and fill all elements with the same scalar value. + assert(DestTy->isVectorType() && "CK_VectorSplat to non-vector type"); + mlir::Value Value = Visit(E); + SmallVector Elements; + auto VecType = CGF.getCIRType(DestTy).dyn_cast(); + auto NumElements = VecType.getSize(); + for (uint64_t Index = 0; Index < NumElements; ++Index) { + Elements.push_back(Value); + } + return CGF.getBuilder().create( + CGF.getLoc(E->getSourceRange()), VecType, Elements); + } case CK_FixedPointCast: llvm_unreachable("NYI"); case CK_FixedPointToBoolean: @@ -1660,13 +1671,23 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { assert(!UnimplementedFeature::scalableVectors() && "NYI: scalable vector init"); assert(!UnimplementedFeature::vectorConstants() && "NYI: vector constants"); + auto VectorType = + CGF.getCIRType(E->getType()).dyn_cast(); SmallVector Elements; for (Expr *init : E->inits()) { Elements.push_back(Visit(init)); } + // Zero-initialize any remaining values. + if (NumInitElements < VectorType.getSize()) { + mlir::Value ZeroValue = CGF.getBuilder().create( + CGF.getLoc(E->getSourceRange()), VectorType.getEltType(), + CGF.getBuilder().getZeroInitAttr(VectorType.getEltType())); + for (uint64_t i = NumInitElements; i < VectorType.getSize(); ++i) { + Elements.push_back(ZeroValue); + } + } return CGF.getBuilder().create( - CGF.getLoc(E->getSourceRange()), CGF.getCIRType(E->getType()), - Elements); + CGF.getLoc(E->getSourceRange()), VectorType, Elements); } if (NumInitElements == 0) { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 1f51087b472e..8abf57dd942e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -428,9 +428,12 @@ LogicalResult CastOp::verify() { return success(); } case cir::CastKind::bitcast: { - if (!srcType.dyn_cast() || - !resType.dyn_cast()) - return emitOpError() << "requires !cir.ptr type for source and result"; + if ((!srcType.isa() || + !resType.isa()) && + (!srcType.isa() || + !resType.isa())) + return emitOpError() + << "requires !cir.ptr or !cir.vector type for source and result"; return success(); } case cir::CastKind::floating: { diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index 8006014854c3..6824fd5ac4fa 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -16,6 +16,22 @@ void vector_int_test(int x) { vi4 b = { x, 5, 6, x + 1 }; // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : + // Incomplete vector initialization. + vi4 bb = { x, x + 1 }; + // CHECK: %[[#zero:]] = cir.const(#cir.int<0> : !s32i) : !s32i + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %[[#zero]], %[[#zero]] : !s32i, !s32i, !s32i, !s32i) : + + // Scalar to vector conversion, a.k.a. vector splat. Only valid as an + // operand of a binary operator, not as a regular conversion. + bb = a + 7; + // CHECK: %[[#seven:]] = cir.const(#cir.int<7> : !s32i) : !s32i + // CHECK: %{{[0-9]+}} = cir.vec.create(%[[#seven]], %[[#seven]], %[[#seven]], %[[#seven]] : !s32i, !s32i, !s32i, !s32i) : + + // Vector to vector conversion + vd2 bbb = { }; + bb = (vi4)bbb; + // CHECK: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.vector), !cir.vector + // Extract element int c = a[x]; // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : @@ -77,6 +93,17 @@ void vector_double_test(int x, double y) { vd2 b = { y, y + 1.0 }; // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : + // Incomplete vector initialization + vd2 bb = { y }; + // CHECK: [[#dzero:]] = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %[[#dzero]] : !cir.double, !cir.double) : + + // Scalar to vector conversion, a.k.a. vector splat. Only valid as an + // operand of a binary operator, not as a regular conversion. + bb = a + 2.5; + // CHECK: %[[#twohalf:]] = cir.const(#cir.fp<2.500000e+00> : !cir.double) : !cir.double + // CHECK: %{{[0-9]+}} = cir.vec.create(%[[#twohalf]], %[[#twohalf]] : !cir.double, !cir.double) : + // Extract element double c = a[x]; // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 2ff17558d866..8e5545adf1f9 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -141,7 +141,7 @@ cir.func @cast3(%p: !cir.ptr) { !u32i = !cir.int cir.func @cast4(%p: !cir.ptr) { - %2 = cir.cast(bitcast, %p : !cir.ptr), !u32i // expected-error {{requires !cir.ptr type for source and result}} + %2 = cir.cast(bitcast, %p : !cir.ptr), !u32i // expected-error {{requires !cir.ptr or !cir.vector type for source and result}} cir.return } diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index 7630ce63157b..138e95f2c43c 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -46,6 +46,12 @@ void vector_int_test(int x) { // CHECK: %[[#T57:]] = llvm.insertelement %[[#T48]], %[[#T55]][%[[#T56]] : i64] : vector<4xi32> // CHECK: llvm.store %[[#T57]], %[[#T5:]] : vector<4xi32>, !llvm.ptr + // Vector to vector conversion + vd2 bb = (vd2)b; + // CHECK: %[[#bval:]] = llvm.load %[[#bmem:]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#bbval:]] = llvm.bitcast %[[#bval]] : vector<4xi32> to vector<2xf64> + // CHECK: llvm.store %[[#bbval]], %[[#bbmem:]] : vector<2xf64>, !llvm.ptr + // Extract element. int c = a[x]; // CHECK: %[[#T58:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> From 0e8537f62ddfebc9f10ca6889eba555d1d8658c0 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Wed, 6 Mar 2024 01:07:07 +0300 Subject: [PATCH 1432/2301] [CIR][CIRGen] Support for __builtin_expect (#478) This PR adds the new `cir.expect` opcode which is similar to llvm.expect intricnsics. Codegen of `__builtin_expect` emits `cir.expect` opcode. Then `cir.expect` will be lowered to `llvm.expect` intrinsic. When implementing __builtin_expect I faced with minor issue. CIR lowering of `if` often emits the lllvm IR with redundant cast instructions. Like this: ``` %0 = call i64 @llvm.expect.i64(i64 %any, i64 1), !dbg !13 %1 = icmp ne i64 %0, 0 %2 = zext i1 %0 to i8 // redundant %3 = trunc i8 %1 to i1 // redundant br i1 %3, label %l1, label %l2 ``` But the llvm pass `LowerExpectIntrinsicPass` (that should replace `llvm.expect` with branch metadata) performs only simple pattern-matching. And it can't handle this zext/trunc intructions. So this pass in such cases just removes the `llvm.expect` without updating a branch metadata. In this reason this PR also avoid emitting the redundant zext/cast instruction sequence. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 25 +++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 34 +++++++++++- .../CodeGen/UnimplementedFeatureGuarding.h | 1 - .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 52 +++++++++++++++--- clang/test/CIR/CodeGen/pred-info-builtins.c | 33 ++++++++---- clang/test/CIR/Lowering/expect.cir | 54 +++++++++++++++++++ clang/test/CIR/Lowering/if.cir | 30 +++++------ clang/test/CIR/Lowering/switch.cir | 2 +- 8 files changed, 192 insertions(+), 39 deletions(-) create mode 100644 clang/test/CIR/Lowering/expect.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index f440d96daaea..53ce8582f6b0 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2852,6 +2852,31 @@ def SinOp : UnaryFPToFPBuiltinOp<"sin">; def SqrtOp : UnaryFPToFPBuiltinOp<"sqrt">; def TruncOp : UnaryFPToFPBuiltinOp<"trunc">; +//===----------------------------------------------------------------------===// +// Branch Probability Operations +//===----------------------------------------------------------------------===// + +def ExpectOp : CIR_Op<"expect", + [Pure, AllTypesMatch<["result", "val", "expected"]>]> { + let summary = + "Compute whether expression is likely to evaluate to a specified value"; + let description = [{ + Provides __builtin_expect functionality in Clang IR. + + If $prob is not specified, then behaviour is same as __builtin_expect. + If specified, then behaviour is same as __builtin_expect_with_probability, + where probability = $prob. + }]; + + let arguments = (ins CIR_IntType:$val, + CIR_IntType:$expected, + OptionalAttr:$prob); + let results = (outs CIR_IntType:$result); + let assemblyFormat = [{ + `(` $val`,` $expected (`,` $prob^)? `)` `:` type($val) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // Variadic Operations //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index bee275371318..064f3b6744d2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -402,10 +402,40 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, } case Builtin::BI__builtin_expect: - case Builtin::BI__builtin_expect_with_probability: + case Builtin::BI__builtin_expect_with_probability: { + auto ArgValue = buildScalarExpr(E->getArg(0)); + auto ExpectedValue = buildScalarExpr(E->getArg(1)); + + // Don't generate cir.expect on -O0 as the backend won't use it for + // anything. Note, we still IRGen ExpectedValue because it could have + // side-effects. + if (CGM.getCodeGenOpts().OptimizationLevel == 0) + return RValue::get(ArgValue); + + mlir::FloatAttr ProbAttr = {}; + if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_expect_with_probability) { + llvm::APFloat Probability(0.0); + const Expr *ProbArg = E->getArg(2); + bool EvalSucceed = + ProbArg->EvaluateAsFloat(Probability, CGM.getASTContext()); + assert(EvalSucceed && "probability should be able to evaluate as float"); + (void)EvalSucceed; + bool LoseInfo = false; + Probability.convert(llvm::APFloat::IEEEdouble(), + llvm::RoundingMode::Dynamic, &LoseInfo); + ProbAttr = mlir::FloatAttr::get( + mlir::Float64Type::get(builder.getContext()), Probability); + } + + auto result = builder.create( + getLoc(E->getSourceRange()), ArgValue.getType(), ArgValue, + ExpectedValue, ProbAttr); + + return RValue::get(result); + } case Builtin::BI__builtin_unpredictable: { if (CGM.getCodeGenOpts().OptimizationLevel != 0) - assert(!UnimplementedFeature::branchPredictionInfoBuiltin()); + assert(!UnimplementedFeature::insertBuiltinUnpredictable()); return RValue::get(buildScalarExpr(E->getArg(0))); } diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index dd28c31d4d59..d522405eb567 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -143,7 +143,6 @@ struct UnimplementedFeature { static bool armComputeVolatileBitfields() { return false; } static bool setCommonAttributes() { return false; } static bool insertBuiltinUnpredictable() { return false; } - static bool branchPredictionInfoBuiltin() { return false; } static bool createInvariantGroup() { return false; } static bool addAutoInitAnnotation() { return false; } static bool addHeapAllocSiteMetadata() { return false; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ac9750ddef25..ce0cdacb17d9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -795,9 +795,27 @@ class CIRIfLowering : public mlir::OpConversionPattern { } rewriter.setInsertionPointToEnd(currentBlock); - auto trunc = rewriter.create(loc, rewriter.getI1Type(), - adaptor.getCondition()); - rewriter.create(loc, trunc.getRes(), thenBeforeBody, + + // FIXME: CIR always lowers !cir.bool to i8 type. + // In this reason CIR CodeGen often emits the redundant zext + trunc + // sequence that prevents lowering of llvm.expect in + // LowerExpectIntrinsicPass. + // We should fix that in a more appropriate way. But as a temporary solution + // just avoid the redundant casts here. + mlir::Value condition; + auto zext = + dyn_cast(adaptor.getCondition().getDefiningOp()); + if (zext && zext->getOperand(0).getType() == rewriter.getI1Type()) { + condition = zext->getOperand(0); + if (zext->use_empty()) + rewriter.eraseOp(zext); + } else { + auto trunc = rewriter.create( + loc, rewriter.getI1Type(), adaptor.getCondition()); + condition = trunc.getRes(); + } + + rewriter.create(loc, condition, thenBeforeBody, elseBeforeBody); if (!emptyElse) { @@ -2155,6 +2173,25 @@ class CIRFAbsOpLowering : public mlir::OpConversionPattern { } }; +class CIRExpectOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ExpectOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + std::optional prob = op.getProb(); + if (!prob) + rewriter.replaceOpWithNewOp(op, adaptor.getVal(), + adaptor.getExpected()); + else + rewriter.replaceOpWithNewOp( + op, adaptor.getVal(), adaptor.getExpected(), prob.value()); + return mlir::success(); + } +}; + class CIRVTableAddrPointOpLowering : public mlir::OpConversionPattern { public: @@ -2275,10 +2312,11 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, - CIRFAbsOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, - CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, - CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, - CIRInlineAsmOpLowering>(converter, patterns.getContext()); + CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, + CIRVectorCreateLowering, CIRVectorInsertLowering, + CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRStackSaveLowering, + CIRStackRestoreLowering, CIRUnreachableLowering, CIRInlineAsmOpLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/pred-info-builtins.c b/clang/test/CIR/CodeGen/pred-info-builtins.c index 192eaf0691f2..263274890e34 100644 --- a/clang/test/CIR/CodeGen/pred-info-builtins.c +++ b/clang/test/CIR/CodeGen/pred-info-builtins.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -O0 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -O0 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s --check-prefix=CIR-O0 +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s --check-prefix=CIR-O2 extern void __attribute__((noinline)) bar(void); @@ -6,22 +7,34 @@ void expect(int x) { if (__builtin_expect(x, 0)) bar(); } -// CHECK: cir.func @expect -// CHECK: cir.if {{%.*}} { -// CHECK: cir.call @bar() : () -> () +// CIR-O0: cir.func @expect +// CIR-O0: cir.if {{%.*}} { +// CIR-O0: cir.call @bar() : () -> () + +// CIR-O2: cir.func @expect +// CIR-O2: [[EXPECT:%.*]] = cir.expect({{.*}}, {{.*}}) : !s64i +// CIR-O2: [[EXPECT_BOOL:%.*]] = cir.cast(int_to_bool, [[EXPECT]] : !s64i), !cir.bool +// CIR-O2: cir.if [[EXPECT_BOOL]] +// CIR-O2: cir.call @bar() : () -> () void expect_with_probability(int x) { if (__builtin_expect_with_probability(x, 1, 0.8)) bar(); } -// CHECK: cir.func @expect_with_probability -// CHECK: cir.if {{%.*}} { -// CHECK: cir.call @bar() : () -> () +// CIR-O0: cir.func @expect_with_probability +// CIR-O0: cir.if {{%.*}} { +// CIR-O0: cir.call @bar() : () -> () + +// CIR-O2: cir.func @expect_with_probability +// CIR-O2: [[EXPECT:%.*]] = cir.expect({{.*}}, {{.*}}, 8.000000e-01) : !s64i +// CIR-O2: [[EXPECT_BOOL:%.*]] = cir.cast(int_to_bool, [[EXPECT]] : !s64i), !cir.bool +// CIR-O2: cir.if [[EXPECT_BOOL]] +// CIR-O2: cir.call @bar() : () -> () void unpredictable(int x) { if (__builtin_unpredictable(x > 1)) bar(); -// CHECK: cir.func @unpredictable -// CHECK: cir.if {{%.*}} { -// CHECK: cir.call @bar() : () -> () +// CIR-O0: cir.func @unpredictable +// CIR-O0: cir.if {{%.*}} { +// CIR-O0: cir.call @bar() : () -> () } diff --git a/clang/test/CIR/Lowering/expect.cir b/clang/test/CIR/Lowering/expect.cir new file mode 100644 index 000000000000..a221cca5f3dd --- /dev/null +++ b/clang/test/CIR/Lowering/expect.cir @@ -0,0 +1,54 @@ +// RUN: cir-opt %s -cir-to-llvm | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s64i = !cir.int +module { + cir.func @foo(%arg0: !s64i) { + %0 = cir.const(#cir.int<1> : !s64i) : !s64i + %1 = cir.expect(%arg0, %0) : !s64i + %2 = cir.cast(int_to_bool, %1 : !s64i), !cir.bool + cir.if %2 { + cir.yield + } + %3 = cir.expect(%arg0, %0, 1.000000e-01) : !s64i + %4 = cir.cast(int_to_bool, %3 : !s64i), !cir.bool + cir.if %4 { + cir.yield + } + cir.return + } +} + +// MLIR: llvm.func @foo(%arg0: i64) +// MLIR: [[ONE:%.*]] = llvm.mlir.constant(1 : i64) : i64 +// MLIR: [[EXPECT:%.*]] = llvm.intr.expect %arg0, [[ONE]] : i64 +// MLIR: [[ZERO:%.*]] = llvm.mlir.constant(0 : i64) : i64 +// MLIR: [[CMP_NE:%.*]] = llvm.icmp "ne" [[EXPECT]], [[ZERO]] : i64 +// MLIR: llvm.cond_br [[CMP_NE]], ^bb1, ^bb2 +// MLIR: ^bb1: // pred: ^bb0 +// MLIR: llvm.br ^bb2 +// MLIR: ^bb2: // 2 preds: ^bb0, ^bb1 +// MLIR: [[EXPECT_WITH_PROB:%.*]] = llvm.intr.expect.with.probability %arg0, [[ONE]], 1.000000e-01 : i64 +// MLIR: [[ZERO:%.*]] = llvm.mlir.constant(0 : i64) : i64 +// MLIR: [[CMP_NE:%.*]] = llvm.icmp "ne" [[EXPECT_WITH_PROB]], [[ZERO]] : i64 +// MLIR: llvm.cond_br [[CMP_NE]], ^bb3, ^bb4 +// MLIR: ^bb3: // pred: ^bb2 +// MLIR: llvm.br ^bb4 +// MLIR: ^bb4: // 2 preds: ^bb2, ^bb3 +// MLIR: llvm.return + +// LLVM: define void @foo(i64 %0) +// LLVM: [[EXPECT:%.*]] = call i64 @llvm.expect.i64(i64 %0, i64 1) +// LLVM: [[CMP_NE:%.*]] = icmp ne i64 [[EXPECT]], 0 +// LLVM: br i1 [[CMP_NE]], label %4, label %5 +// LLVM: 4: +// LLVM: br label %5 +// LLVM: 5: +// LLVM: [[EXPECT_WITH_PROB:%.*]] = call i64 @llvm.expect.with.probability.i64(i64 %0, i64 1, double 1.000000e-01) +// LLVM: [[CMP_NE:%.*]] = icmp ne i64 [[EXPECT_WITH_PROB]], 0 +// LLVM: br i1 [[CMP_NE]], label %8, label %9 +// LLVM: 8: +// LLVM: br label %9 +// LLVM: 9: +// LLVM: ret void + diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir index a6dfd8e65900..eac0b5e4467e 100644 --- a/clang/test/CIR/Lowering/if.cir +++ b/clang/test/CIR/Lowering/if.cir @@ -18,32 +18,28 @@ module { // MLIR: llvm.func @foo(%arg0: i32) -> i32 // MLIR-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %1 = llvm.icmp "ne" %arg0, %0 : i32 -// MLIR-NEXT: %2 = llvm.zext %1 : i1 to i8 -// MLIR-NEXT: %3 = llvm.trunc %2 : i8 to i1 -// MLIR-NEXT: llvm.cond_br %3, ^bb2, ^bb1 +// MLIR-NEXT: llvm.cond_br %1, ^bb2, ^bb1 // MLIR-NEXT: ^bb1: // pred: ^bb0 -// MLIR-NEXT: %4 = llvm.mlir.constant(0 : i32) : i32 -// MLIR-NEXT: llvm.return %4 : i32 +// MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: llvm.return %2 : i32 // MLIR-NEXT: ^bb2: // pred: ^bb0 -// MLIR-NEXT: %5 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: llvm.return %5 : i32 +// MLIR-NEXT: %3 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: llvm.return %3 : i32 // MLIR-NEXT: ^bb3: // no predecessors // MLIR-NEXT: llvm.return %arg0 : i32 // MLIR-NEXT: } // LLVM: define i32 @foo(i32 %0) // LLVM-NEXT: %2 = icmp ne i32 %0, 0 -// LLVM-NEXT: %3 = zext i1 %2 to i8 -// LLVM-NEXT: %4 = trunc i8 %3 to i1 -// LLVM-NEXT: br i1 %4, label %6, label %5 +// LLVM-NEXT: br i1 %2, label %4, label %3 // LLVM-EMPTY: -// LLVM-NEXT: 5: +// LLVM-NEXT: 3: // LLVM-NEXT: ret i32 0 // LLVM-EMPTY: -// LLVM-NEXT: 6: +// LLVM-NEXT: 4: // LLVM-NEXT: ret i32 1 // LLVM-EMPTY: -// LLVM-NEXT: 7: +// LLVM-NEXT: 5: // LLVM-NEXT: ret i32 %0 // LLVM-NEXT: } @@ -59,12 +55,10 @@ module { // MLIR: llvm.func @onlyIf(%arg0: i32) -> i32 // MLIR-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %1 = llvm.icmp "ne" %arg0, %0 : i32 - // MLIR-NEXT: %2 = llvm.zext %1 : i1 to i8 - // MLIR-NEXT: %3 = llvm.trunc %2 : i8 to i1 - // MLIR-NEXT: llvm.cond_br %3, ^bb1, ^bb2 + // MLIR-NEXT: llvm.cond_br %1, ^bb1, ^bb2 // MLIR-NEXT: ^bb1: // pred: ^bb0 - // MLIR-NEXT: %4 = llvm.mlir.constant(1 : i32) : i32 - // MLIR-NEXT: llvm.return %4 : i32 + // MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 + // MLIR-NEXT: llvm.return %2 : i32 // MLIR-NEXT: ^bb2: // pred: ^bb0 // MLIR-NEXT: llvm.return %arg0 : i32 // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir index 92f8e4654a40..5931d49de3a4 100644 --- a/clang/test/CIR/Lowering/switch.cir +++ b/clang/test/CIR/Lowering/switch.cir @@ -171,7 +171,7 @@ module { // CHECK: ^bb2: // pred: ^bb1 // CHECK: llvm.br ^bb3 // CHECK: ^bb3: // pred: ^bb2 - // CHECK: llvm.cond_br %14, ^bb4, ^bb5 + // CHECK: llvm.cond_br {{%.*}}, ^bb4, ^bb5 // CHECK: ^bb4: // pred: ^bb3 // CHECK: llvm.br ^bb7 // CHECK: ^bb5: // pred: ^bb3 From 432d7154cc9bd330d2fa7253a5a1f972f568a1dc Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Thu, 7 Mar 2024 01:24:59 +0300 Subject: [PATCH 1433/2301] [CIR][CIRGen] Partial support for `offsetof` (#492) Support `offset` expression in case when we can evaluate offset expression as integer. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 12 +++++++++++- clang/test/CIR/CodeGen/offsetof.c | 19 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/offsetof.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 1af5628208da..368055555f9d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -195,7 +195,17 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitGNUNullExpr(const GNUNullExpr *E) { llvm_unreachable("NYI"); } - mlir::Value VisitOffsetOfExpr(OffsetOfExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitOffsetOfExpr(OffsetOfExpr *E) { + // Try folding the offsetof to a constant. + Expr::EvalResult EVResult; + if (E->EvaluateAsInt(EVResult, CGF.getContext())) { + llvm::APSInt Value = EVResult.Val.getInt(); + return Builder.getConstInt(CGF.getLoc(E->getExprLoc()), Value); + } + + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); mlir::Value VisitAddrLabelExpr(const AddrLabelExpr *E) { llvm_unreachable("NYI"); diff --git a/clang/test/CIR/CodeGen/offsetof.c b/clang/test/CIR/CodeGen/offsetof.c new file mode 100644 index 000000000000..5259e14d4915 --- /dev/null +++ b/clang/test/CIR/CodeGen/offsetof.c @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +#include + +typedef struct { + int a; + int b; +} A; + +void foo() { + offsetof(A, a); + offsetof(A, b); +} + +// CHECK: cir.func no_proto @foo() +// CHECK: {{.*}} = cir.const(#cir.int<0> : !u64i) : !u64i +// CHECK: {{.*}} = cir.const(#cir.int<4> : !u64i) : !u64i +// CHECK: cir.return + From 3c020f5e81f2a56faa850f219e261a8246fabced Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Thu, 7 Mar 2024 01:40:19 +0300 Subject: [PATCH 1434/2301] [CIR][CIRGen] Support for CStyleCastExprClass in buildCastLValue (#494) Change is taken from original llvm codegen --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 ++-- clang/test/CIR/CodeGen/cast.c | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/cast.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index bcaa2655994d..9f403d74378a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1653,7 +1653,7 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_LValueToRValue: - assert(0 && "NYI"); + return buildLValue(E->getSubExpr()); case CK_NoOp: { // CK_NoOp can model a qualification conversion, which can remove an array @@ -2250,7 +2250,6 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return buildMemberExpr(cast(E)); case Expr::PredefinedExprClass: return buildPredefinedLValue(cast(E)); - case Expr::CStyleCastExprClass: case Expr::CXXFunctionalCastExprClass: case Expr::CXXReinterpretCastExprClass: case Expr::CXXConstCastExprClass: @@ -2259,6 +2258,7 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { emitError(getLoc(E->getExprLoc()), "l-value not implemented for '") << E->getStmtClassName() << "'"; assert(0 && "Use buildCastLValue below, remove me when adding testcase"); + case Expr::CStyleCastExprClass: case Expr::CXXStaticCastExprClass: case Expr::CXXDynamicCastExprClass: case Expr::ImplicitCastExprClass: diff --git a/clang/test/CIR/CodeGen/cast.c b/clang/test/CIR/CodeGen/cast.c new file mode 100644 index 000000000000..6e25fcc2abdc --- /dev/null +++ b/clang/test/CIR/CodeGen/cast.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +typedef struct { + int x; +} A; + +int cstyle_cast_lvalue(A a) { + return ((A)(a)).x; +} + +// CHECK: cir.func @cstyle_cast_lvalue(%arg0: !ty_22A22 loc({{.*}})) +// CHECK: [[ALLOC_A:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: [[ALLOC_RET:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[ALLOC_A]] : !ty_22A22, cir.ptr +// CHECK: [[X_ADDR:%.*]] = cir.get_member [[ALLOC_A]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: [[X:%.*]] = cir.load [[X_ADDR]] : cir.ptr , !s32i +// CHECK: cir.store [[X]], [[ALLOC_RET]] : !s32i, cir.ptr +// CHECK: [[RET:%.*]] = cir.load [[ALLOC_RET]] : cir.ptr , !s32i +// CHECK: cir.return [[RET]] : !s32i + From ca7618902571b5a031314626b35b0edba1bfbf7a Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 7 Mar 2024 08:54:30 +0800 Subject: [PATCH 1435/2301] [CIR] Add `cir.trap` operation (#496) This PR adds the `cir.trap` operation, which corresponds to the `__builtin_trap` builtin function. When executed, the operation terminates the program abnormally in an implementation-defined manner. This PR also includes CIRGen and LLVM lowering support for the new operation. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 17 +++++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 9 ++++++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 5 ++-- .../CodeGen/UnimplementedFeatureGuarding.h | 1 - .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 29 +++++++++++++++++-- clang/test/CIR/CodeGen/implicit-return.cpp | 29 +++++++++++++------ clang/test/CIR/CodeGen/trap.cpp | 28 ++++++++++++++++++ clang/test/CIR/Lowering/intrinsics.cir | 13 +++++++++ 8 files changed, 117 insertions(+), 14 deletions(-) create mode 100644 clang/test/CIR/CodeGen/trap.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 53ce8582f6b0..f216e2e77237 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3118,6 +3118,23 @@ def UnreachableOp : CIR_Op<"unreachable", [Terminator]> { let assemblyFormat = "attr-dict"; } +//===----------------------------------------------------------------------===// +// TrapOp +//===----------------------------------------------------------------------===// + +def TrapOp : CIR_Op<"trap", [Terminator]> { + let summary = "Exit the program abnormally"; + let description = [{ + The cir.trap operation causes the program to exit abnormally. The + implementations may implement this operation with different mechanisms. For + example, an implementation may implement this operation by calling abort, + while another implementation may implement this operation by executing an + illegal instruction. + }]; + + let assemblyFormat = "attr-dict"; +} + //===----------------------------------------------------------------------===// // Operations Lowered Directly to LLVM IR // diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 064f3b6744d2..3b7941d7c08a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -505,6 +505,15 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(nullptr); } + case Builtin::BI__builtin_trap: { + builder.create(getLoc(E->getExprLoc())); + + // Note that cir.trap is a terminator so we need to start a new block to + // preserve the insertion point. + builder.createBlock(builder.getBlock()->getParent()); + + return RValue::get(nullptr); + } case Builtin::BImemcpy: case Builtin::BI__builtin_memcpy: case Builtin::BImempcpy: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index b38433b4b499..b79dd53d8200 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -464,8 +464,9 @@ void CIRGenFunction::LexicalScope::buildImplicitReturn() { llvm_unreachable("NYI"); } else if (shouldEmitUnreachable) { if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { - // TODO: buildTrapCall(llvm::Intrinsic::trap); - assert(!UnimplementedFeature::trap()); + builder.create(localScope->EndLoc); + builder.clearInsertionPoint(); + return; } } diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index d522405eb567..921cc57cc59f 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -166,7 +166,6 @@ struct UnimplementedFeature { static bool escapedLocals() { return false; } static bool deferredReplacements() { return false; } static bool shouldInstrumentFunction() { return false; } - static bool trap() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ce0cdacb17d9..d7743e2e00ba 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2268,6 +2268,31 @@ class CIRUnreachableLowering } }; +class CIRTrapLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::TrapOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto loc = op->getLoc(); + rewriter.eraseOp(op); + + auto llvmTrapIntrinsicType = + mlir::LLVM::LLVMVoidType::get(op->getContext()); + rewriter.create( + loc, llvmTrapIntrinsicType, + mlir::StringAttr::get(op->getContext(), "llvm.trap"), + mlir::ValueRange{}); + // Note that the call to llvm.trap is not a terminator in LLVM dialect. + // So we must emit an additional llvm.unreachable to terminate the current + // block. + rewriter.create(loc); + + return mlir::success(); + } +}; + class CIRInlineAsmOpLowering : public mlir::OpConversionPattern { @@ -2315,8 +2340,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRStackSaveLowering, - CIRStackRestoreLowering, CIRUnreachableLowering, CIRInlineAsmOpLowering>( - converter, patterns.getContext()); + CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, + CIRInlineAsmOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/implicit-return.cpp b/clang/test/CIR/CodeGen/implicit-return.cpp index 6a8e33577c61..09b084b70ddb 100644 --- a/clang/test/CIR/CodeGen/implicit-return.cpp +++ b/clang/test/CIR/CodeGen/implicit-return.cpp @@ -1,15 +1,26 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -O0 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CHECK-O0 +// RUN: %clang_cc1 -O2 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CHECK-O2 void ret_void() {} -// CHECK: cir.func @_Z8ret_voidv() -// CHECK-NEXT: cir.return -// CHECK-NEXT: } +// CHECK-O0: cir.func @_Z8ret_voidv() +// CHECK-O0-NEXT: cir.return +// CHECK-O0-NEXT: } + +// CHECK-O2: cir.func @_Z8ret_voidv() +// CHECK-O2-NEXT: cir.return +// CHECK-O2-NEXT: } int ret_non_void() {} -// CHECK: cir.func @_Z12ret_non_voidv() -> !s32i -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] -// CHECK-NEXT: cir.unreachable -// CHECK-NEXT: } +// CHECK-O0: cir.func @_Z12ret_non_voidv() -> !s32i +// CHECK-O0-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK-O0-NEXT: cir.trap +// CHECK-O0-NEXT: } + +// CHECK-O2: cir.func @_Z12ret_non_voidv() -> !s32i +// CHECK-O2-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK-O2-NEXT: cir.unreachable +// CHECK-O2-NEXT: } diff --git a/clang/test/CIR/CodeGen/trap.cpp b/clang/test/CIR/CodeGen/trap.cpp new file mode 100644 index 000000000000..2d1089421876 --- /dev/null +++ b/clang/test/CIR/CodeGen/trap.cpp @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void foo(); + +void basic() { + foo(); + __builtin_trap(); +} + +// CHECK: cir.func @_Z5basicv() +// CHECK-NEXT: cir.call @_Z3foov() : () -> () +// CHECK-NEXT: cir.trap +// CHECK-NEXT: } + +void code_after_unreachable() { + foo(); + __builtin_trap(); + foo(); +} + +// CHECK: cir.func @_Z22code_after_unreachablev() +// CHECK-NEXT: cir.call @_Z3foov() : () -> () +// CHECK-NEXT: cir.trap +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: cir.call @_Z3foov() : () -> () +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/intrinsics.cir b/clang/test/CIR/Lowering/intrinsics.cir index f3bcf9fba492..ddf8e0708ad4 100644 --- a/clang/test/CIR/Lowering/intrinsics.cir +++ b/clang/test/CIR/Lowering/intrinsics.cir @@ -1,4 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM module { cir.func @test_unreachable() { @@ -7,4 +8,16 @@ module { // MLIR: llvm.func @test_unreachable() // MLIR-NEXT: llvm.unreachable + + cir.func @test_trap() { + cir.trap + } + + // MLIR: llvm.func @test_trap() + // MLIR-NEXT: llvm.call_intrinsic "llvm.trap"() : () -> !llvm.void + // MLIR-NEXT: llvm.unreachable + + // LLVM: define void @test_trap() + // LLVM-NEXT: call void @llvm.trap() + // LLVM-NEXT: unreachable } From 9a4187a621e24cb84529367ca1704c0bc6d51bc1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Mar 2024 15:49:22 -0800 Subject: [PATCH 1436/2301] [CIR][FrontendTool] Fix overly conservative compatible flags Remove check since paths that aren't related to ClangIR shouldn't be affected at all, even if the flag is ON. Also, this conservative error message wasn't tested. --- clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 93d2eebc9a9a..5ee79851ed89 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -53,16 +53,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) { auto UseCIR = CI.getFrontendOpts().UseClangIRPipeline; auto Act = CI.getFrontendOpts().ProgramAction; - auto EmitsCIR = Act == EmitCIR || Act == EmitCIROnly; - auto IsImplementedCIROutput = EmitsCIR || Act == EmitLLVM || - Act == EmitMLIR || Act == EmitAssembly || - Act == EmitObj; - if (UseCIR && !IsImplementedCIROutput) - llvm::report_fatal_error( - "-fclangir currently only works with -emit-cir, -emit-cir-only, " - "-emit-mlir, -emit-llvm, -emit-obj, and -S"); if (!UseCIR && EmitsCIR) llvm::report_fatal_error( "-emit-cir and -emit-cir-only only valid when using -fclangir"); From 64fd56f5f08128c672b0980257d6e0ab5294692b Mon Sep 17 00:00:00 2001 From: David Olsen Date: Fri, 8 Mar 2024 16:41:50 -0800 Subject: [PATCH 1437/2301] [CIR] Vector ternary operator (#500) Implement the vector version of the ternary (`?:`) operator. This is a separate MLIR op than the regular `?:` operator because the vector version is not short-circuiting and always evaluates all its arguments. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 30 ++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 7 +++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 22 +++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 32 +++++++++++++-- clang/test/CIR/CodeGen/vectype.cpp | 4 ++ clang/test/CIR/IR/invalid.cir | 39 +++++++++++++++++++ clang/test/CIR/Lowering/vectype.cpp | 6 +++ 7 files changed, 136 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index f216e2e77237..7dab06b35cd6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2086,6 +2086,36 @@ def VecCmpOp : CIR_Op<"vec.cmp", [Pure, SameTypeOperands]> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// VecTernary +//===----------------------------------------------------------------------===// + +def VecTernaryOp : CIR_Op<"vec.ternary", + [Pure, AllTypesMatch<["result", "vec1", "vec2"]>]> { + let summary = "The `cond ? a : b` ternary operator for vector types"; + let description = [{ + The `cir.vec.ternary` operation represents the C/C++ ternary operator, + `?:`, for vector types, which does a `select` on individual elements of the + vectors. Unlike a regular `?:` operator, there is no short circuiting. All + three arguments are always evaluated. Because there is no short + circuiting, there are no regions in this operation, unlike cir.ternary. + + The first argument is a vector of integral type. The second and third + arguments are vectors of the same type and have the same number of elements + as the first argument. + + The result is a vector of the same type as the second and third arguments. + Each element of the result is `(bool)a[n] ? b[n] : c[n]`. + }]; + let arguments = (ins CIR_VectorType:$cond, CIR_VectorType:$vec1, + CIR_VectorType:$vec2); + let results = (outs CIR_VectorType:$result); + let assemblyFormat = [{ + `(` $cond `,` $vec1 `,` $vec2 `)` `:` type($cond) `,` type($vec1) attr-dict + }]; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // BaseClassAddr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 368055555f9d..3923cc79b55c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -2059,7 +2059,12 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( if (condExpr->getType()->isVectorType() || condExpr->getType()->isSveVLSBuiltinType()) { - llvm_unreachable("NYI"); + assert(condExpr->getType()->isVectorType() && "?: op for SVE vector NYI"); + mlir::Value condValue = Visit(condExpr); + mlir::Value lhsValue = Visit(lhsExpr); + mlir::Value rhsValue = Visit(rhsExpr); + return builder.create(loc, condValue, lhsValue, + rhsValue); } // If this is a really simple expression (like x ? 4 : 5), emit this as a diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 8abf57dd942e..9c0ef96d055d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -521,6 +521,28 @@ LogicalResult VecCreateOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// VecTernaryOp +//===----------------------------------------------------------------------===// + +LogicalResult VecTernaryOp::verify() { + // Verify that the condition operand is a vector of integral type. + if (!getCond().getType().getEltType().isa()) { + return emitOpError() << "condition operand of type " << getCond().getType() + << " must be a vector type of !cir.int"; + } + + // Verify that the condition operand has the same number of elements as the + // other operands. (The automatic verification already checked that all + // operands are vector types and that the second and third operands are the + // same type.) + if (getCond().getType().getSize() != getVec1().getType().getSize()) { + return emitOpError() << "the number of elements in " << getCond().getType() + << " and " << getVec1().getType() << " don't match"; + } + return success(); +} + //===----------------------------------------------------------------------===// // ReturnOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d7743e2e00ba..947f12614b7b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1319,6 +1319,31 @@ class CIRVectorCmpOpLowering } }; +class CIRVectorTernaryLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecTernaryOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert(op.getType().isa() && + op.getCond().getType().isa() && + op.getVec1().getType().isa() && + op.getVec2().getType().isa() && + "Vector ternary op with non-vector type"); + // Convert `cond` into a vector of i1, then use that in a `select` op. + mlir::Value bitVec = rewriter.create( + op.getLoc(), mlir::LLVM::ICmpPredicate::ne, adaptor.getCond(), + rewriter.create( + op.getCond().getLoc(), + typeConverter->convertType(op.getCond().getType()))); + rewriter.replaceOpWithNewOp( + op, bitVec, adaptor.getVec1(), adaptor.getVec2()); + return mlir::success(); + } +}; + class CIRVAStartLowering : public mlir::OpConversionPattern { public: @@ -2339,9 +2364,10 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, - CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRStackSaveLowering, - CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, - CIRInlineAsmOpLowering>(converter, patterns.getContext()); + CIRVectorExtractLowering, CIRVectorCmpOpLowering, + CIRVectorTernaryLowering, CIRStackSaveLowering, CIRStackRestoreLowering, + CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index 6824fd5ac4fa..b6af5318cb18 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -68,6 +68,10 @@ void vector_int_test(int x) { vi4 n = ~a; // CHECK: %{{[0-9]+}} = cir.unary(not, %{{[0-9]+}}) : !cir.vector, !cir.vector + // Ternary conditional operator + vi4 tc = a ? b : d; + // CHECK: %{{[0-9]+}} = cir.vec.ternary(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) : , + // Comparisons vi4 o = a == b; // CHECK: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : , diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 8e5545adf1f9..7b3908aa8835 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -432,6 +432,45 @@ cir.func @vec_insert_non_vector() { // ----- +!s32i = !cir.int +cir.func @vec_ternary_non_vector1() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : + %2 = cir.vec.ternary(%0, %1, %1) : !s32i, // expected-error {{custom op 'cir.vec.ternary' invalid kind of Type specified}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_ternary_non_vector2() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : + %2 = cir.vec.ternary(%1, %0, %0) : , !s32i // expected-error {{custom op 'cir.vec.ternary' invalid kind of Type specified}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_ternary_different_size() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : + %2 = cir.vec.create(%0, %0, %0, %0 : !s32i, !s32i, !s32i, !s32i) : + %3 = cir.vec.ternary(%1, %2, %2) : , // expected-error {{'cir.vec.ternary' op the number of elements in '!cir.vector x 2>' and '!cir.vector x 4>' don't match}} + cir.return +} + +// ----- + +cir.func @vec_ternary_not_int(%p : !cir.float) { + %0 = cir.vec.create(%p, %p : !cir.float, !cir.float) : + %1 = cir.vec.ternary(%0, %0, %0) : , // expected-error {{'cir.vec.ternary' op condition operand of type '!cir.vector' must be a vector type of !cir.int}} + cir.return +} + +// ----- + cir.func coroutine @bad_task() { // expected-error {{coroutine body must use at least one cir.await op}} cir.return } diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index 138e95f2c43c..be1ca98a646d 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -133,6 +133,12 @@ void vector_int_test(int x) { // CHECK: %[[#T104:]] = llvm.xor %[[#T103]], %[[#T93]] : vector<4xi32> // CHECK: llvm.store %[[#T104]], %[[#T29:]] : vector<4xi32>, !llvm.ptr + // Ternary conditional operator + vi4 tc = a ? b : d; + // CHECK: %[[#Zero:]] = llvm.mlir.zero : vector<4xi32> + // CHECK: %[[#BitVec:]] = llvm.icmp "ne" %[[#A:]], %[[#Zero]] : vector<4xi32> + // CHECK: %[[#Res:]] = llvm.select %[[#BitVec]], %[[#B:]], %[[#D:]] : vector<4xi1>, vector<4xi32> + // Comparisons vi4 o = a == b; // CHECK: %[[#T105:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> From 0e45a9a2ec99588de38ab5eeb7a3db1b588e5246 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Mar 2024 17:43:30 -0800 Subject: [PATCH 1438/2301] [CIR][CIRGen][NFC] Use proper help with shouldEmitRTTI and clean up --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 6c831ddef8f7..9e7b41090202 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2700,9 +2700,7 @@ mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc, // Return a bogus pointer if RTTI is disabled, unless it's for EH. // FIXME: should we even be calling this method if RTTI is disabled // and it's not for EH? - if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice || - (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && - getTriple().isNVPTX())) + if (!shouldEmitRTTI(ForEH)) llvm_unreachable("NYI"); if (ForEH && Ty->isObjCObjectPointerType() && From 1ccb14d73b98dd46877694fd3e5cddf96841fc92 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Mar 2024 18:39:34 -0800 Subject: [PATCH 1439/2301] [CIR][CIRGen][RTTI] Handle vtables in face of -fno-rtti --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 +++++ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 3 +-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 5 +++-- clang/test/CIR/CodeGen/vtable-rtti.cpp | 11 +++++++++++ 5 files changed, 21 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 3ecb52c87d1e..da2060818065 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -144,6 +144,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::BoolAttr::get(getContext(), getBoolTy(), state); } + mlir::TypedAttr getConstNullPtrAttr(mlir::Type t) { + assert(t.isa() && "expected cir.ptr"); + return mlir::cir::ConstPtrAttr::get(getContext(), t, 0); + } + mlir::TypedAttr getConstPtrAttr(mlir::Type t, uint64_t v) { assert(t.isa() && "expected cir.ptr"); return mlir::cir::ConstPtrAttr::get(getContext(), t, v); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 8a5f61c60efd..a214ba50a8fa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -877,9 +877,8 @@ class ConstExprEmitter switch (E->getCastKind()) { case CK_HLSLArrayRValue: case CK_HLSLVectorTruncation: - case CK_ToUnion: { + case CK_ToUnion: llvm_unreachable("not implemented"); - } case CK_AddressSpaceConversion: { llvm_unreachable("not implemented"); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 9e7b41090202..938860831440 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2701,7 +2701,7 @@ mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc, // FIXME: should we even be calling this method if RTTI is disabled // and it's not for EH? if (!shouldEmitRTTI(ForEH)) - llvm_unreachable("NYI"); + return getBuilder().getConstNullPtrAttr(builder.getUInt8PtrTy()); if (ForEH && Ty->isObjCObjectPointerType() && getLangOpts().ObjCRuntime.isGNUFamily()) { diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 673f5feec48c..fb5a7ac876b9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -203,8 +203,9 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, // vtableHasLocalLinkage, // /*isCompleteDtor=*/false); } else { - assert(rtti.isa() && - "expected GlobalViewAttr"); + assert((rtti.isa() || + rtti.isa()) && + "expected GlobalViewAttr or ConstPtrAttr"); return builder.add(rtti); } diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index bdde79b926cf..0c5990394b11 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -fno-rtti -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t2.cir +// RUN: FileCheck --input-file=%t2.cir --check-prefix=RTTI_DISABLED %s // XFAIL: * class A @@ -23,17 +25,21 @@ class B : public A // vtable for A type // CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct> x 5>}> +// RTTI_DISABLED: ![[VTableTypeA:ty_.*]] = !cir.struct> x 5>}> // Class A // CHECK: ![[ClassA:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast> // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast>}> +// RTTI_DISABLED: ![[ClassB:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast>}> // B ctor => @B::B() // Calls @A::A() and initialize __vptr with address of B's vtable. // // CHECK: cir.func linkonce_odr @_ZN1BC2Ev(%arg0: !cir.ptr +// RTTI_DISABLED: cir.func linkonce_odr @_ZN1BC2Ev(%arg0: !cir.ptr + // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > // CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr @@ -75,18 +81,23 @@ class B : public A // vtable for B // CHECK: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr, #cir.global_view<@_ZN1BD2Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}> : ![[VTableTypeA]] +// RTTI_DISABLED: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZN1BD2Ev> : !cir.ptr, #cir.global_view<@_ZN1BD0Ev> : !cir.ptr, #cir.global_view<@_ZNK1A5quackEv> : !cir.ptr]> : !cir.array x 5>}> : ![[VTableTypeA]] // vtable for __cxxabiv1::__si_class_type_info // CHECK: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> +// RTTI_DISABLED-NOT: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> // typeinfo name for B // CHECK: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array {alignment = 1 : i64} +// RTTI_DISABLED-NOT: cir.global linkonce_odr @_ZTS1B // typeinfo for A // CHECK: cir.global "private" constant external @_ZTI1A : !cir.ptr +// RTTI_DISABLED-NOT: cir.global "private" constant external @_ZTI1A : !cir.ptr // typeinfo for B // CHECK: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr}> : ![[TypeInfoB]] +// RTTI_DISABLED-NOT: cir.global constant external @_ZTI1B // Checks for dtors in dtors.cpp From 78d2ecc17aa2bf9665ef506e53f2256b632e9a57 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 11 Mar 2024 15:10:22 -0700 Subject: [PATCH 1440/2301] [CIR][CIRGen] Emit more delete calls --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 7 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 170 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 + .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/delete.cpp | 15 ++ clang/test/CIR/CodeGen/vtable-rtti.cpp | 14 +- 7 files changed, 200 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/CodeGen/delete.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index da2060818065..6d6079ada6a3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -539,9 +539,12 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, uInt64Ty, mlir::cir::IntAttr::get(uInt64Ty, C)); } - mlir::cir::ConstantOp getConstInt(mlir::Location loc, mlir::cir::IntType t, + mlir::cir::ConstantOp getConstInt(mlir::Location loc, mlir::Type t, uint64_t C) { - return create(loc, t, mlir::cir::IntAttr::get(t, C)); + auto intTy = t.dyn_cast(); + assert(intTy && "expected mlir::cir::IntType"); + return create(loc, intTy, + mlir::cir::IntAttr::get(t, C)); } mlir::cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal) { bool isSigned = intVal.isSigned(); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 42b06cfe7337..39c6ebc555b3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -633,6 +633,151 @@ static CharUnits CalculateCookiePadding(CIRGenFunction &CGF, // return CGF.CGM.getCXXABI().GetArrayCookieSize(E); } +namespace { +/// Calls the given 'operator delete' on a single object. +struct CallObjectDelete final : EHScopeStack::Cleanup { + mlir::Value Ptr; + const FunctionDecl *OperatorDelete; + QualType ElementType; + + CallObjectDelete(mlir::Value Ptr, const FunctionDecl *OperatorDelete, + QualType ElementType) + : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + CGF.buildDeleteCall(OperatorDelete, Ptr, ElementType); + } +}; +} // namespace + +/// Emit the code for deleting a single object. +/// \return \c true if we started emitting UnconditionalDeleteBlock, \c false +/// if not. +static bool EmitObjectDelete(CIRGenFunction &CGF, const CXXDeleteExpr *DE, + Address Ptr, QualType ElementType) { + // C++11 [expr.delete]p3: + // If the static type of the object to be deleted is different from its + // dynamic type, the static type shall be a base class of the dynamic type + // of the object to be deleted and the static type shall have a virtual + // destructor or the behavior is undefined. + CGF.buildTypeCheck(CIRGenFunction::TCK_MemberCall, DE->getExprLoc(), + Ptr.getPointer(), ElementType); + + const FunctionDecl *OperatorDelete = DE->getOperatorDelete(); + assert(!OperatorDelete->isDestroyingOperatorDelete()); + + // Find the destructor for the type, if applicable. If the + // destructor is virtual, we'll just emit the vcall and return. + const CXXDestructorDecl *Dtor = nullptr; + if (const RecordType *RT = ElementType->getAs()) { + CXXRecordDecl *RD = cast(RT->getDecl()); + if (RD->hasDefinition() && !RD->hasTrivialDestructor()) { + Dtor = RD->getDestructor(); + + if (Dtor->isVirtual()) { + bool UseVirtualCall = true; + const Expr *Base = DE->getArgument(); + if (auto *DevirtualizedDtor = dyn_cast_or_null( + Dtor->getDevirtualizedMethod( + Base, CGF.CGM.getLangOpts().AppleKext))) { + UseVirtualCall = false; + const CXXRecordDecl *DevirtualizedClass = + DevirtualizedDtor->getParent(); + if (declaresSameEntity(getCXXRecord(Base), DevirtualizedClass)) { + // Devirtualized to the class of the base type (the type of the + // whole expression). + Dtor = DevirtualizedDtor; + } else { + // Devirtualized to some other type. Would need to cast the this + // pointer to that type but we don't have support for that yet, so + // do a virtual call. FIXME: handle the case where it is + // devirtualized to the derived type (the type of the inner + // expression) as in EmitCXXMemberOrOperatorMemberCallExpr. + UseVirtualCall = true; + } + } + if (UseVirtualCall) { + llvm_unreachable("NYI"); + return false; + } + } + } + } + + // Make sure that we call delete even if the dtor throws. + // This doesn't have to a conditional cleanup because we're going + // to pop it off in a second. + CGF.EHStack.pushCleanup( + NormalAndEHCleanup, Ptr.getPointer(), OperatorDelete, ElementType); + + if (Dtor) { + llvm_unreachable("NYI"); + } else if (auto Lifetime = ElementType.getObjCLifetime()) { + switch (Lifetime) { + case Qualifiers::OCL_None: + case Qualifiers::OCL_ExplicitNone: + case Qualifiers::OCL_Autoreleasing: + break; + + case Qualifiers::OCL_Strong: + llvm_unreachable("NYI"); + break; + + case Qualifiers::OCL_Weak: + llvm_unreachable("NYI"); + break; + } + } + + // In traditional LLVM codegen null checks are emitted to save a delete call. + // In CIR we optimize for size by default, the null check should be added into + // this function callers. + assert(!UnimplementedFeature::emitNullCheckForDeleteCalls()); + + CGF.PopCleanupBlock(); + return false; +} + +void CIRGenFunction::buildCXXDeleteExpr(const CXXDeleteExpr *E) { + const Expr *Arg = E->getArgument(); + Address Ptr = buildPointerWithAlignment(Arg); + + // Null check the pointer. + // + // We could avoid this null check if we can determine that the object + // destruction is trivial and doesn't require an array cookie; we can + // unconditionally perform the operator delete call in that case. For now, we + // assume that deleted pointers are null rarely enough that it's better to + // keep the branch. This might be worth revisiting for a -O0 code size win. + // + // CIR note: emit the code size friendly by default for now, such as mentioned + // in `EmitObjectDelete`. + assert(!UnimplementedFeature::emitNullCheckForDeleteCalls()); + QualType DeleteTy = E->getDestroyedType(); + + // A destroying operator delete overrides the entire operation of the + // delete expression. + if (E->getOperatorDelete()->isDestroyingOperatorDelete()) { + llvm_unreachable("NYI"); + return; + } + + // We might be deleting a pointer to array. If so, GEP down to the + // first non-array element. + // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) + if (DeleteTy->isConstantArrayType()) { + llvm_unreachable("NYI"); + } + + assert(convertTypeForMem(DeleteTy) == Ptr.getElementType()); + + if (E->isArrayForm()) { + llvm_unreachable("NYI"); + } else { + (void)EmitObjectDelete(*this, E, Ptr, DeleteTy); + } +} + mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { // The element type being allocated. QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); @@ -893,7 +1038,30 @@ void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, // Pass the size if the delete function has a size_t parameter. if (Params.Size) { - llvm_unreachable("NYI"); + QualType SizeType = *ParamTypeIt++; + CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); + assert(SizeTy && "expected mlir::cir::IntType"); + auto Size = builder.getConstInt(*currSrcLoc, ConvertType(SizeType), + DeleteTypeSize.getQuantity()); + + // For array new, multiply by the number of elements. + if (NumElements) { + // Uncomment upon adding testcase. + // Size = builder.createMul(Size, NumElements); + llvm_unreachable("NYI"); + } + + // If there is a cookie, add the cookie size. + if (!CookieSize.isZero()) { + // Uncomment upon adding testcase. + // builder.createBinop( + // Size, mlir::cir::BinOpKind::Add, + // builder.getConstInt(*currSrcLoc, SizeTy, + // CookieSize.getQuantity())); + llvm_unreachable("NYI"); + } + + DeleteArgs.add(RValue::get(Size), SizeType); } // Pass the alignment if the delete function has an align_val_t parameter. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 3923cc79b55c..43fbc3a39755 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -606,7 +606,8 @@ class ScalarExprEmitter : public StmtVisitor { return CGF.buildCXXNewExpr(E); } mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *E) { - llvm_unreachable("NYI"); + CGF.buildCXXDeleteExpr(E); + return {}; } mlir::Value VisitTypeTraitExpr(const TypeTraitExpr *E) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index ed09b12b1eae..f8bcbd8c0f60 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -624,6 +624,7 @@ class CIRGenFunction : public CIRGenTypeCache { void buildCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr); mlir::Value buildCXXNewExpr(const CXXNewExpr *E); + void buildCXXDeleteExpr(const CXXDeleteExpr *E); void buildDeleteCall(const FunctionDecl *DeleteFD, mlir::Value Ptr, QualType DeleteTy, mlir::Value NumElements = nullptr, diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 921cc57cc59f..ba90b108f9f3 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -101,6 +101,7 @@ struct UnimplementedFeature { static bool constructABIArgDirectExtend() { return false; } static bool mayHaveIntegerOverflow() { return false; } static bool llvmLoweringPtrDiffConsidersPointee() { return false; } + static bool emitNullCheckForDeleteCalls() { return false; } // Folding methods. static bool foldBinOpFMF() { return false; } diff --git a/clang/test/CIR/CodeGen/delete.cpp b/clang/test/CIR/CodeGen/delete.cpp new file mode 100644 index 000000000000..0f0ddcbc2c84 --- /dev/null +++ b/clang/test/CIR/CodeGen/delete.cpp @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef __typeof(sizeof(int)) size_t; + +namespace test1 { + struct A { void operator delete(void*,size_t); int x; }; + void a(A *x) { + delete x; + } + // CHECK: cir.func @_ZN5test11aEPNS_1AE + + // CHECK: %[[CONST:.*]] = cir.const(#cir.int<4> : !u64i) : !u64i + // CHECK: cir.call @_ZN5test11AdlEPvm({{.*}}, %[[CONST]]) +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 0c5990394b11..996e305d8796 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -2,7 +2,6 @@ // RUN: FileCheck --input-file=%t.cir %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -fno-rtti -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t2.cir // RUN: FileCheck --input-file=%t2.cir --check-prefix=RTTI_DISABLED %s -// XFAIL: * class A { @@ -21,18 +20,19 @@ class B : public A }; // Type info B. -// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct>, !cir.ptr>, !cir.ptr>}> +// CHECK: ![[TypeInfoB:ty_.*]] = !cir.struct, !cir.ptr, !cir.ptr}> // vtable for A type -// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct> x 5>}> -// RTTI_DISABLED: ![[VTableTypeA:ty_.*]] = !cir.struct> x 5>}> +// CHECK: ![[VTableTypeA:ty_.*]] = !cir.struct x 5>}> +// RTTI_DISABLED: ![[VTableTypeA:ty_.*]] = !cir.struct x 5>}> // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.record.decl.ast> +// RTTI_DISABLED: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.record.decl.ast> // Class B -// CHECK: ![[ClassB:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast>}> -// RTTI_DISABLED: ![[ClassB:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast>}> +// CHECK: ![[ClassB:ty_.*]] = !cir.struct +// RTTI_DISABLED: ![[ClassB:ty_.*]] = !cir.struct // B ctor => @B::B() // Calls @A::A() and initialize __vptr with address of B's vtable. From 2d1de7adcb5d6087ae58b3a213dec6a1df18e6f7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 11 Mar 2024 15:32:23 -0700 Subject: [PATCH 1441/2301] [CIR][CIRGen][NFC] Expand skeleton for constructing array types --- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 43 +++++++++++++------------ 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 39c6ebc555b3..6f22bfef485a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -343,29 +343,30 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, return; } - assert(!CGM.getASTContext().getAsArrayType(E->getType()) && - "array types NYI"); - - clang::CXXCtorType Type = Ctor_Complete; - bool ForVirtualBase = false; - bool Delegating = false; - - switch (E->getConstructionKind()) { - case CXXConstructionKind::Complete: - Type = Ctor_Complete; - break; - case CXXConstructionKind::Delegating: + if (const ArrayType *arrayType = getContext().getAsArrayType(E->getType())) { llvm_unreachable("NYI"); - break; - case CXXConstructionKind::VirtualBase: - ForVirtualBase = true; - [[fallthrough]]; - case CXXConstructionKind::NonVirtualBase: - Type = Ctor_Base; - break; - } + } else { + clang::CXXCtorType Type = Ctor_Complete; + bool ForVirtualBase = false; + bool Delegating = false; + + switch (E->getConstructionKind()) { + case CXXConstructionKind::Complete: + Type = Ctor_Complete; + break; + case CXXConstructionKind::Delegating: + llvm_unreachable("NYI"); + break; + case CXXConstructionKind::VirtualBase: + ForVirtualBase = true; + [[fallthrough]]; + case CXXConstructionKind::NonVirtualBase: + Type = Ctor_Base; + break; + } - buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); + buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); + } } namespace { From 8d203f6f0c6b48e4cb58d49102a028ab84fe372f Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 13 Mar 2024 01:43:48 +0800 Subject: [PATCH 1442/2301] [CIR][Lowering] Add LLVMIR lowering support for CIR bit operations (#501) This PR adds the LLVMIR lowering support for CIR bit operations. For `cir.bit.clz`, `cir.bit.ctz`, and `cir.bit.popcount`, they can be lowered directly to LLVM intrinsic calls to `@llvm.ctlz`, `@llvm.cttz`, and `@llvm.ctpop`, respectively. For the other three bit operations, namely `cir.bit.clrsb`, `cir.bit.ffs`, and `cir.bit.parity`, they are lowered to a sequence of LLVM IR instructions that implements their functionalities. This lowering scheme is also used by the original clang CodeGen. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 185 +++++++++++++++- clang/test/CIR/Lowering/bit.cir | 206 ++++++++++++++++++ 2 files changed, 390 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/bit.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 947f12614b7b..ef2e038bb405 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2098,6 +2098,187 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { } }; +static mlir::Value createLLVMBitOp(mlir::Location loc, + const llvm::Twine &llvmIntrinBaseName, + mlir::Type resultTy, mlir::Value operand, + std::optional poisonZeroInputFlag, + mlir::ConversionPatternRewriter &rewriter) { + auto operandIntTy = operand.getType().cast(); + auto resultIntTy = resultTy.cast(); + + std::string llvmIntrinName = + llvmIntrinBaseName.concat(".i") + .concat(std::to_string(operandIntTy.getWidth())) + .str(); + auto llvmIntrinNameAttr = + mlir::StringAttr::get(rewriter.getContext(), llvmIntrinName); + + // Note that LLVM intrinsic calls to bit intrinsics have the same type as the + // operand. + mlir::LLVM::CallIntrinsicOp op; + if (poisonZeroInputFlag.has_value()) { + auto poisonZeroInputValue = rewriter.create( + loc, rewriter.getI1Type(), static_cast(*poisonZeroInputFlag)); + op = rewriter.create( + loc, operand.getType(), llvmIntrinNameAttr, + mlir::ValueRange{operand, poisonZeroInputValue}); + } else { + op = rewriter.create( + loc, operand.getType(), llvmIntrinNameAttr, operand); + } + + mlir::Value result = op->getResult(0); + if (operandIntTy.getWidth() > resultIntTy.getWidth()) { + result = rewriter.create(loc, resultTy, result); + } else if (operandIntTy.getWidth() < resultIntTy.getWidth()) { + result = rewriter.create(loc, resultTy, result); + } + + return result; +} + +class CIRBitClrsbOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitClrsbOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto zero = rewriter.create( + op.getLoc(), adaptor.getInput().getType(), 0); + auto isNeg = rewriter.create( + op.getLoc(), + mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), + mlir::LLVM::ICmpPredicate::slt), + adaptor.getInput(), zero); + + auto negOne = rewriter.create( + op.getLoc(), adaptor.getInput().getType(), -1); + auto flipped = rewriter.create( + op.getLoc(), adaptor.getInput(), negOne); + + auto select = rewriter.create( + op.getLoc(), isNeg, flipped, adaptor.getInput()); + + auto resTy = getTypeConverter()->convertType(op.getType()); + auto clz = createLLVMBitOp(op.getLoc(), "llvm.ctlz", resTy, select, + /*poisonZeroInputFlag=*/false, rewriter); + + auto one = rewriter.create(op.getLoc(), resTy, 1); + auto res = rewriter.create(op.getLoc(), clz, one); + rewriter.replaceOp(op, res); + + return mlir::LogicalResult::success(); + } +}; + +class CIRBitClzOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitClzOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto llvmOp = + createLLVMBitOp(op.getLoc(), "llvm.ctlz", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/true, rewriter); + rewriter.replaceOp(op, llvmOp); + return mlir::LogicalResult::success(); + } +}; + +class CIRBitCtzOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitCtzOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto llvmOp = + createLLVMBitOp(op.getLoc(), "llvm.cttz", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/true, rewriter); + rewriter.replaceOp(op, llvmOp); + return mlir::LogicalResult::success(); + } +}; + +class CIRBitFfsOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitFfsOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto ctz = + createLLVMBitOp(op.getLoc(), "llvm.cttz", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/false, rewriter); + + auto one = rewriter.create(op.getLoc(), resTy, 1); + auto ctzAddOne = rewriter.create(op.getLoc(), ctz, one); + + auto zeroInputTy = rewriter.create( + op.getLoc(), adaptor.getInput().getType(), 0); + auto isZero = rewriter.create( + op.getLoc(), + mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), + mlir::LLVM::ICmpPredicate::eq), + adaptor.getInput(), zeroInputTy); + + auto zero = rewriter.create(op.getLoc(), resTy, 0); + auto res = rewriter.create(op.getLoc(), isZero, zero, + ctzAddOne); + rewriter.replaceOp(op, res); + + return mlir::LogicalResult::success(); + } +}; + +class CIRBitParityOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitParityOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto popcnt = + createLLVMBitOp(op.getLoc(), "llvm.ctpop", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/std::nullopt, rewriter); + + auto one = rewriter.create(op.getLoc(), resTy, 1); + auto popcntMod2 = + rewriter.create(op.getLoc(), popcnt, one); + rewriter.replaceOp(op, popcntMod2); + + return mlir::LogicalResult::success(); + } +}; + +class CIRBitPopcountOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitPopcountOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto llvmOp = + createLLVMBitOp(op.getLoc(), "llvm.ctpop", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/std::nullopt, rewriter); + rewriter.replaceOp(op, llvmOp); + return mlir::LogicalResult::success(); + } +}; + class CIRBrOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -2353,7 +2534,9 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); patterns.add< - CIRCmpOpLowering, CIRLoopOpInterfaceLowering, CIRBrCondOpLowering, + CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, + CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, + CIRBitPopcountOpLowering, CIRLoopOpInterfaceLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, diff --git a/clang/test/CIR/Lowering/bit.cir b/clang/test/CIR/Lowering/bit.cir new file mode 100644 index 000000000000..425248c66821 --- /dev/null +++ b/clang/test/CIR/Lowering/bit.cir @@ -0,0 +1,206 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!u16i = !cir.int +!u32i = !cir.int +!u64i = !cir.int + +cir.func @clrsb_s32(%arg : !s32i) { + %0 = cir.bit.clrsb(%arg : !s32i) : !s32i + cir.return +} + +// CHECK: llvm.func @clrsb_s32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %1 = llvm.icmp "slt" %arg0, %0 : i32 +// CHECK-NEXT: %2 = llvm.mlir.constant(-1 : i32) : i32 +// CHECK-NEXT: %3 = llvm.xor %arg0, %2 : i32 +// CHECK-NEXT: %4 = llvm.select %1, %3, %arg0 : i1, i32 +// CHECK-NEXT: %5 = llvm.mlir.constant(false) : i1 +// CHECK-NEXT: %6 = llvm.call_intrinsic "llvm.ctlz.i32"(%4, %5) : (i32, i1) -> i32 +// CHECK-NEXT: %7 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %8 = llvm.sub %6, %7 : i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @clrsb_s64(%arg : !s64i) { + %0 = cir.bit.clrsb(%arg : !s64i) : !s32i + cir.return +} + +// CHECK: llvm.func @clrsb_s64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.mlir.constant(0 : i64) : i64 +// CHECK-NEXT: %1 = llvm.icmp "slt" %arg0, %0 : i64 +// CHECK-NEXT: %2 = llvm.mlir.constant(-1 : i64) : i64 +// CHECK-NEXT: %3 = llvm.xor %arg0, %2 : i64 +// CHECK-NEXT: %4 = llvm.select %1, %3, %arg0 : i1, i64 +// CHECK-NEXT: %5 = llvm.mlir.constant(false) : i1 +// CHECK-NEXT: %6 = llvm.call_intrinsic "llvm.ctlz.i64"(%4, %5) : (i64, i1) -> i64 +// CHECK-NEXT: %7 = llvm.trunc %6 : i64 to i32 +// CHECK-NEXT: %8 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %9 = llvm.sub %7, %8 : i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @clz_u16(%arg : !u16i) { + %0 = cir.bit.clz(%arg : !u16i) : !s32i + cir.return +} + +// CHECK: llvm.func @clz_u16(%arg0: i16) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.ctlz.i16"(%arg0, %0) : (i16, i1) -> i16 +// CHECK-NEXT: %2 = llvm.zext %1 : i16 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @clz_u32(%arg : !u32i) { + %0 = cir.bit.clz(%arg : !u32i) : !s32i + cir.return +} + +// CHECK: llvm.func @clz_u32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.ctlz.i32"(%arg0, %0) : (i32, i1) -> i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @clz_u64(%arg : !u64i) { + %0 = cir.bit.clz(%arg : !u64i) : !s32i + cir.return +} + +// CHECK: llvm.func @clz_u64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.ctlz.i64"(%arg0, %0) : (i64, i1) -> i64 +// CHECK-NEXT: %2 = llvm.trunc %1 : i64 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @ctz_u16(%arg : !u16i) { + %0 = cir.bit.ctz(%arg : !u16i) : !s32i + cir.return +} + +// CHECK: llvm.func @ctz_u16(%arg0: i16) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.cttz.i16"(%arg0, %0) : (i16, i1) -> i16 +// CHECK-NEXT: %2 = llvm.zext %1 : i16 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @ctz_u32(%arg : !u32i) { + %0 = cir.bit.ctz(%arg : !u32i) : !s32i + cir.return +} + +// CHECK: llvm.func @ctz_u32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.cttz.i32"(%arg0, %0) : (i32, i1) -> i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @ctz_u64(%arg : !u64i) { + %0 = cir.bit.ctz(%arg : !u64i) : !s32i + cir.return +} + +// CHECK: llvm.func @ctz_u64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.mlir.constant(true) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.cttz.i64"(%arg0, %0) : (i64, i1) -> i64 +// CHECK-NEXT: %2 = llvm.trunc %1 : i64 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @ffs_s32(%arg : !s32i) { + %0 = cir.bit.ffs(%arg : !s32i) : !s32i + cir.return +} + +// CHECK: llvm.func @ffs_s32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.mlir.constant(false) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.cttz.i32"(%arg0, %0) : (i32, i1) -> i32 +// CHECK-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %3 = llvm.add %1, %2 : i32 +// CHECK-NEXT: %4 = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %5 = llvm.icmp "eq" %arg0, %4 : i32 +// CHECK-NEXT: %6 = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %7 = llvm.select %5, %6, %3 : i1, i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @ffs_s64(%arg : !s64i) { + %0 = cir.bit.ffs(%arg : !s64i) : !s32i + cir.return +} + +// CHECK: llvm.func @ffs_s64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.mlir.constant(false) : i1 +// CHECK-NEXT: %1 = llvm.call_intrinsic "llvm.cttz.i64"(%arg0, %0) : (i64, i1) -> i64 +// CHECK-NEXT: %2 = llvm.trunc %1 : i64 to i32 +// CHECK-NEXT: %3 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %4 = llvm.add %2, %3 : i32 +// CHECK-NEXT: %5 = llvm.mlir.constant(0 : i64) : i64 +// CHECK-NEXT: %6 = llvm.icmp "eq" %arg0, %5 : i64 +// CHECK-NEXT: %7 = llvm.mlir.constant(0 : i32) : i32 +// CHECK-NEXT: %8 = llvm.select %6, %7, %4 : i1, i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @parity_s32(%arg : !u32i) { + %0 = cir.bit.parity(%arg : !u32i) : !s32i + cir.return +} + +// CHECK: llvm.func @parity_s32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.call_intrinsic "llvm.ctpop.i32"(%arg0) : (i32) -> i32 +// CHECK-NEXT: %1 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %2 = llvm.and %0, %1 : i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @parity_s64(%arg : !u64i) { + %0 = cir.bit.parity(%arg : !u64i) : !s32i + cir.return +} + +// CHECK: llvm.func @parity_s64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.call_intrinsic "llvm.ctpop.i64"(%arg0) : (i64) -> i64 +// CHECK-NEXT: %1 = llvm.trunc %0 : i64 to i32 +// CHECK-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 +// CHECK-NEXT: %3 = llvm.and %1, %2 : i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @popcount_u16(%arg : !u16i) { + %0 = cir.bit.popcount(%arg : !u16i) : !s32i + cir.return +} + +// CHECK: llvm.func @popcount_u16(%arg0: i16) +// CHECK-NEXT: %0 = llvm.call_intrinsic "llvm.ctpop.i16"(%arg0) : (i16) -> i16 +// CHECK-NEXT: %1 = llvm.zext %0 : i16 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @popcount_u32(%arg : !u32i) { + %0 = cir.bit.popcount(%arg : !u32i) : !s32i + cir.return +} + +// CHECK: llvm.func @popcount_u32(%arg0: i32) +// CHECK-NEXT: %0 = llvm.call_intrinsic "llvm.ctpop.i32"(%arg0) : (i32) -> i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } + +cir.func @popcount_u64(%arg : !u64i) { + %0 = cir.bit.popcount(%arg : !u64i) : !s32i + cir.return +} + +// CHECK: llvm.func @popcount_u64(%arg0: i64) +// CHECK-NEXT: %0 = llvm.call_intrinsic "llvm.ctpop.i64"(%arg0) : (i64) -> i64 +// CHECK-NEXT: %1 = llvm.trunc %0 : i64 to i32 +// CHECK-NEXT: llvm.return +// CHECK-NEXT: } From 34efb68da001c7bde36945e553b79c31cdfad617 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Tue, 12 Mar 2024 20:47:06 +0300 Subject: [PATCH 1443/2301] [CIR][CIRGen] Enable support of bool increment (#493) CIRGenFunction::buildFromMemory can handle the `cir.bool` values. So we no longer need to emit the `NIY` error here. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 ---- clang/test/CIR/CodeGen/inc-bool.cpp | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/inc-bool.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 43fbc3a39755..53dbd1dbb724 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -368,10 +368,6 @@ class ScalarExprEmitter : public StmtVisitor { // An interesting aspect of this is that increment is always true. // Decrement does not have this property. if (isInc && type->isBooleanType()) { - llvm_unreachable("inc simplification for booleans not implemented yet"); - - // NOTE: We likely want the code below, but loading/store booleans need to - // work first. See CIRGenFunction::buildFromMemory(). value = Builder.create( CGF.getLoc(E->getExprLoc()), CGF.getCIRType(type), Builder.getCIRBoolAttr(true)); diff --git a/clang/test/CIR/CodeGen/inc-bool.cpp b/clang/test/CIR/CodeGen/inc-bool.cpp new file mode 100644 index 000000000000..05c3bb54aca3 --- /dev/null +++ b/clang/test/CIR/CodeGen/inc-bool.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++14 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void foo(bool x) { + x++; +} + +// CHECK: cir.func @_Z3foob(%arg0: !cir.bool loc({{.*}})) +// CHECK: [[ALLOC_X:%.*]] = cir.alloca !cir.bool, cir.ptr , ["x", init] {alignment = 1 : i64} +// CHECK: cir.store %arg0, [[ALLOC_X]] : !cir.bool, cir.ptr +// CHECK: {{.*}} = cir.load [[ALLOC_X]] : cir.ptr , !cir.bool +// CHECK: [[TRUE:%.*]] = cir.const(#true) : !cir.bool +// CHECK: cir.store [[TRUE]], [[ALLOC_X]] : !cir.bool, cir.ptr +// CHECK: cir.return From 402ed0a897ac589d0950e2f9ef1f6773d7a8a670 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 12 Mar 2024 20:53:37 +0300 Subject: [PATCH 1444/2301] [CIR][CIRGen] Inline asm: operand attributes (#491) This is the next step in inline assembly support and it's more like a service PR and mostly dedicated to the in/out argument types. Also, operand attributes are added and it's the last change in the `cir.asm` operation afaik. But I would wait untill the next PR, which will contain more examples and maybe will help us to get more readable format for the operation. Note, that we have to add an attribute for each operand - because the lowering of the llvm dialect to LLVM IR iterates over them in the same order. The next PR will be last one (so far) in the series of PRs dedicated to the inline assembly support. It will add storing of the results. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 11 +- clang/lib/CIR/CodeGen/Address.h | 5 + clang/lib/CIR/CodeGen/CIRAsm.cpp | 146 ++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 15 +- .../CodeGen/UnimplementedFeatureGuarding.h | 6 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 19 +++ clang/test/CIR/CodeGen/asm.c | 12 +- clang/test/CIR/Lowering/asm.cir | 4 +- 8 files changed, 186 insertions(+), 32 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7dab06b35cd6..8b8dbe1a2222 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3093,6 +3093,10 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { - the output variable index referenced by the input operands. - the index of early-clobber operand + Operand attributes is a storage of attributes, where each element corresponds + to the operand with the same index. The first index relates to the operation + result. + Example: ```C++ __asm__("foo" : : : ); @@ -3119,17 +3123,20 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { StrAttr:$asm_string, StrAttr:$constraints, UnitAttr:$side_effects, - AsmFlavor:$asm_flavor); + AsmFlavor:$asm_flavor, + OptionalAttr:$operand_attrs); let assemblyFormat = [{ `(` $asm_flavor`,` `{` $asm_string $constraints `}` `)` + (`operand_attrs` `=` $operand_attrs^)? (`side_effects` $side_effects^)? attr-dict operands `:` functional-type(operands, results) - }]; + }]; + } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 3213c6a633bc..e67f640a911f 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -93,6 +93,11 @@ class Address { return Alignment; } + /// Return the type of the pointer value. + mlir::cir::PointerType getType() const { + return getPointer().getType().cast(); + } + mlir::Type getElementType() const { assert(isValid()); return ElementType; diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 67c8d9e1b0c8..59b2a058527c 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -3,11 +3,16 @@ #include "CIRGenFunction.h" #include "TargetInfo.h" +#include "UnimplementedFeatureGuarding.h" using namespace cir; using namespace clang; using namespace mlir::cir; +static bool isAggregateType(mlir::Type typ) { + return isa(typ); +} + static AsmFlavor inferFlavor(const CIRGenModule &cgm, const AsmStmt &S) { AsmFlavor GnuAsmFlavor = cgm.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT @@ -195,13 +200,13 @@ static void collectInOutConstrainsInfos(const CIRGenFunction &cgf, } } -mlir::Value CIRGenFunction::buildAsmInputLValue( +std::pair CIRGenFunction::buildAsmInputLValue( const TargetInfo::ConstraintInfo &Info, LValue InputValue, QualType InputType, std::string &ConstraintStr, SourceLocation Loc) { if (Info.allowsRegister() || !Info.allowsMemory()) { if (hasScalarEvaluationKind(InputType)) - return buildLoadOfLValue(InputValue, Loc).getScalarVal(); + return {buildLoadOfLValue(InputValue, Loc).getScalarVal(), mlir::Type()}; mlir::Type Ty = convertType(InputType); uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); @@ -209,17 +214,18 @@ mlir::Value CIRGenFunction::buildAsmInputLValue( getTargetHooks().isScalarizableAsmOperand(*this, Ty)) { Ty = mlir::cir::IntType::get(builder.getContext(), Size, false); - return builder.createLoad(getLoc(Loc), - InputValue.getAddress().withElementType(Ty)); + return {builder.createLoad(getLoc(Loc), + InputValue.getAddress().withElementType(Ty)), + mlir::Type()}; } } Address Addr = InputValue.getAddress(); ConstraintStr += '*'; - return Addr.getPointer(); + return {Addr.getPointer(), Addr.getElementType()}; } -mlir::Value +std::pair CIRGenFunction::buildAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, std::string &ConstraintStr) { @@ -235,19 +241,19 @@ CIRGenFunction::buildAsmInput(const TargetInfo::ConstraintInfo &Info, llvm::APSInt IntResult; if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(), getContext())) - return builder.getConstAPSInt(loc, IntResult); + return {builder.getConstAPSInt(loc, IntResult), mlir::Type()}; } Expr::EvalResult Result; if (InputExpr->EvaluateAsInt(Result, getContext())) - builder.getConstAPSInt(loc, Result.Val.getInt()); + return {builder.getConstAPSInt(loc, Result.Val.getInt()), mlir::Type()}; } if (Info.allowsRegister() || !Info.allowsMemory()) if (CIRGenFunction::hasScalarEvaluationKind(InputExpr->getType())) - return buildScalarExpr(InputExpr); + return {buildScalarExpr(InputExpr), nullptr}; if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) - return buildScalarExpr(InputExpr); + return {buildScalarExpr(InputExpr), nullptr}; InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); LValue Dest = buildLValue(InputExpr); return buildAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, @@ -265,12 +271,21 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { InputConstraintInfos); std::string Constraints; + std::vector ResultRegDests; + std::vector ResultRegQualTys; std::vector ResultRegTypes; + std::vector ResultTruncRegTypes; + std::vector ArgTypes; + std::vector ArgElemTypes; std::vector Args; + llvm::BitVector ResultTypeRequiresCast; + llvm::BitVector ResultRegIsFlagReg; // Keep track of input constraints. std::string InOutConstraints; std::vector InOutArgs; + std::vector InOutArgTypes; + std::vector InOutArgElemTypes; // Keep track of out constraints for tied input operand. std::vector OutputConstraints; @@ -312,6 +327,58 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { hasScalarEvaluationKind(QTy) || hasAggregateEvaluationKind(QTy); if (!Info.allowsMemory() && IsScalarOrAggregate) { Constraints += "=" + OutputConstraint; + ResultRegQualTys.push_back(QTy); + ResultRegDests.push_back(Dest); + + bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc"); + ResultRegIsFlagReg.push_back(IsFlagReg); + + mlir::Type Ty = convertTypeForMem(QTy); + const bool RequiresCast = + Info.allowsRegister() && + (getTargetHooks().isScalarizableAsmOperand(*this, Ty) || + isAggregateType(Ty)); + + ResultTruncRegTypes.push_back(Ty); + ResultTypeRequiresCast.push_back(RequiresCast); + + if (RequiresCast) { + unsigned Size = getContext().getTypeSize(QTy); + Ty = mlir::cir::IntType::get(builder.getContext(), Size, false); + } + ResultRegTypes.push_back(Ty); + // If this output is tied to an input, and if the input is larger, then + // we need to set the actual result type of the inline asm node to be the + // same as the input type. + if (Info.hasMatchingInput()) { + unsigned InputNo; + for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) { + TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo]; + if (Input.hasTiedOperand() && Input.getTiedOperand() == i) + break; + } + assert(InputNo != S.getNumInputs() && "Didn't find matching input!"); + + QualType InputTy = S.getInputExpr(InputNo)->getType(); + QualType OutputType = OutExpr->getType(); + + uint64_t InputSize = getContext().getTypeSize(InputTy); + if (getContext().getTypeSize(OutputType) < InputSize) { + // Form the asm to return the value as a larger integer or fp type. + ResultRegTypes.back() = ConvertType(InputTy); + } + } + if (mlir::Type AdjTy = getTargetHooks().adjustInlineAsmType( + *this, OutputConstraint, ResultRegTypes.back())) + ResultRegTypes.back() = AdjTy; + else { + CGM.getDiags().Report(S.getAsmLoc(), + diag::err_asm_invalid_type_in_input) + << OutExpr->getType() << OutputConstraint; + } + + // Update largest vector width for any vector types. + assert(!UnimplementedFeature::asm_vector_type()); } else { Address DestAddr = Dest.getAddress(); @@ -323,16 +390,21 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { if (isa(OutExpr->getType().getCanonicalType())) DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType())); + ArgTypes.push_back(DestAddr.getType()); + ArgElemTypes.push_back(DestAddr.getElementType()); Args.push_back(DestAddr.getPointer()); Constraints += "=*"; Constraints += OutputConstraint; + ReadOnly = ReadNone = false; } if (Info.isReadWrite()) { InOutConstraints += ','; const Expr *InputExpr = S.getOutputExpr(i); - mlir::Value Arg = + mlir::Value Arg; + mlir::Type ArgElemType; + std::tie(Arg, ArgElemType) = buildAsmInputLValue(Info, Dest, InputExpr->getType(), InOutConstraints, InputExpr->getExprLoc()); @@ -346,6 +418,8 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { else InOutConstraints += OutputConstraint; + InOutArgTypes.push_back(Arg.getType()); + InOutArgElemTypes.push_back(ArgElemType); InOutArgs.push_back(Arg); } } // iterate over output operands @@ -368,7 +442,9 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { getTarget(), CGM, S, false /* No EarlyClobber */); std::string ReplaceConstraint(InputConstraint); - mlir::Value Arg = buildAsmInput(Info, InputExpr, Constraints); + mlir::Value Arg; + mlir::Type ArgElemType; + std::tie(Arg, ArgElemType) = buildAsmInput(Info, InputExpr, Constraints); // If this input argument is tied to a larger output result, extend the // input to be the same size as the output. The LLVM backend wants to see @@ -405,12 +481,16 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) << InputExpr->getType() << InputConstraint; + ArgTypes.push_back(Arg.getType()); + ArgElemTypes.push_back(ArgElemType); Args.push_back(Arg); Constraints += InputConstraint; } // iterate over input operands // Append the "input" part of inout constraints. for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { + ArgTypes.push_back(InOutArgTypes[i]); + ArgElemTypes.push_back(InOutArgElemTypes[i]); Args.push_back(InOutArgs[i]); } Constraints += InOutConstraints; @@ -430,9 +510,43 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; - builder.create(getLoc(S.getAsmLoc()), ResultType, - Args, AsmString, Constraints, - HasSideEffect, inferFlavor(CGM, S)); + auto IA = builder.create( + getLoc(S.getAsmLoc()), ResultType, Args, AsmString, Constraints, + HasSideEffect, inferFlavor(CGM, S), mlir::ArrayAttr()); + + if (false /*IsGCCAsmGoto*/) { + assert(!UnimplementedFeature::asm_goto()); + } else if (HasUnwindClobber) { + assert(!UnimplementedFeature::asm_unwind_clobber()); + } else { + assert(!UnimplementedFeature::asm_memory_effects()); + + mlir::Value result; + if (IA.getNumResults()) + result = IA.getResult(0); + + std::vector operandAttrs; + + // this is for the lowering to LLVM from LLVm dialect. Otherwise, if we + // don't have the result (i.e. void type as a result of operation), the + // element type attribute will be attached to the whole instruction, but not + // to the operand + if (!IA.getNumResults()) + operandAttrs.push_back(OptNoneAttr::get(builder.getContext())); + + for (auto typ : ArgElemTypes) { + if (typ) { + operandAttrs.push_back(mlir::TypeAttr::get(typ)); + } else { + // We need to add an attribute for every arg since later, during + // the lowering to LLVM IR the attributes will be assigned to the + // CallInsn argument by index, i.e. we can't skip null type here + operandAttrs.push_back(OptNoneAttr::get(builder.getContext())); + } + } + + IA.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs)); + } return mlir::success(); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index f8bcbd8c0f60..a02f8012ee22 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -951,13 +951,14 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildAsmStmt(const clang::AsmStmt &S); - mlir::Value buildAsmInputLValue(const TargetInfo::ConstraintInfo &Info, - LValue InputValue, QualType InputType, - std::string &ConstraintStr, - SourceLocation Loc); - - mlir::Value buildAsmInput(const TargetInfo::ConstraintInfo &Info, - const Expr *InputExpr, std::string &ConstraintStr); + std::pair + buildAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue, + QualType InputType, std::string &ConstraintStr, + SourceLocation Loc); + + std::pair + buildAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, + std::string &ConstraintStr); mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index ba90b108f9f3..3924ced09713 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -167,6 +167,12 @@ struct UnimplementedFeature { static bool escapedLocals() { return false; } static bool deferredReplacements() { return false; } static bool shouldInstrumentFunction() { return false; } + + // Inline assembly + static bool asm_goto() { return false; } + static bool asm_unwind_clobber() { return false; } + static bool asm_memory_effects() { return false; } + static bool asm_vector_type() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ef2e038bb405..93c52bac176b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2518,6 +2518,25 @@ class CIRInlineAsmOpLowering : mlir::LLVM::AsmDialect::AD_Intel; std::vector opAttrs; + auto llvmAttrName = mlir::LLVM::InlineAsmOp::getElementTypeAttrName(); + + if (auto operandAttrs = op.getOperandAttrs()) { + for (auto attr : *operandAttrs) { + if (isa(attr)) { + opAttrs.push_back(mlir::Attribute()); + continue; + } + + mlir::TypeAttr tAttr = cast(attr); + std::vector attrs; + auto typAttr = mlir::TypeAttr::get( + getTypeConverter()->convertType(tAttr.getValue())); + + attrs.push_back(rewriter.getNamedAttr(llvmAttrName, typAttr)); + auto newDict = rewriter.getDictionaryAttr(attrs); + opAttrs.push_back(newDict); + } + } rewriter.replaceOpWithNewOp( op, llResTy, adaptor.getOperands(), op.getAsmStringAttr(), diff --git a/clang/test/CIR/CodeGen/asm.c b/clang/test/CIR/CodeGen/asm.c index 228840d8d750..8d72101d4429 100644 --- a/clang/test/CIR/CodeGen/asm.c +++ b/clang/test/CIR/CodeGen/asm.c @@ -1,32 +1,32 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -//CHECK: cir.asm(x86_att, {"" "~{dirflag},~{fpsr},~{flags}"}) side_effects : () -> () +//CHECK: cir.asm(x86_att, {"" "~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone] side_effects : () -> () void empty1() { __asm__ volatile("" : : : ); } -//CHECK: cir.asm(x86_att, {"xyz" "~{dirflag},~{fpsr},~{flags}"}) side_effects : () -> () +//CHECK: cir.asm(x86_att, {"xyz" "~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone] side_effects : () -> () void empty2() { __asm__ volatile("xyz" : : : ); } -//CHECK: cir.asm(x86_att, {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0, %0 : (!cir.ptr, !cir.ptr) -> () +//CHECK: cir.asm(x86_att, {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone, !s32i, !s32i] side_effects %0, %0 : (!cir.ptr, !cir.ptr) -> () void t1(int x) { __asm__ volatile("" : "+m"(x)); } -//CHECK: cir.asm(x86_att, {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0 : (!cir.ptr) -> () +//CHECK: cir.asm(x86_att, {"" "*m,~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone, !s32i] side_effects %0 : (!cir.ptr) -> () void t2(int x) { __asm__ volatile("" : : "m"(x)); } -//CHECK: cir.asm(x86_att, {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0 : (!cir.ptr) -> () +//CHECK: cir.asm(x86_att, {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone, !s32i] side_effects %0 : (!cir.ptr) -> () void t3(int x) { __asm__ volatile("" : "=m"(x)); } -//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) side_effects %1 : (!s32i) -> () +//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone] side_effects %1 : (!s32i) -> !ty_22anon2E022 void t4(int x) { __asm__ volatile("" : "=&r"(x), "+&r"(x)); } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/asm.cir b/clang/test/CIR/Lowering/asm.cir index 47960e105f94..309bd22ae02e 100644 --- a/clang/test/CIR/Lowering/asm.cir +++ b/clang/test/CIR/Lowering/asm.cir @@ -26,7 +26,9 @@ module { %1 = cir.load %0 : cir.ptr , !s32i cir.asm(x86_att, {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) side_effects %1 : (!s32i) -> () // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "", "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}" %2 : (i32) -> () - + + cir.asm(x86_att, {"" "~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [!s32i] side_effects : () -> () + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [{elementtype = i32}] "", "~{dirflag},~{fpsr},~{flags}" : () -> () cir.return } From 7c5f6fd26126cab8e7cc892f8ac049874385ee2c Mon Sep 17 00:00:00 2001 From: David Olsen Date: Tue, 12 Mar 2024 10:56:28 -0700 Subject: [PATCH 1445/2301] [CIR] Vector type cleanup and refactoring (#503) Three small changes, all cleanup or refactoring in nature. 1. Fix the assemblyFormat for all the vector operations in the ClangIR dialect so that vector types appear in ClangIR as `!cir.vector` instead of as just ``. When I first created the vector ops, I forgot to use `qualified` as necessary when writing out types. This change fixes that. There is no change in behavior, but there is a change to the text version of ClangIR, which required changing the ClangIR expected results and ClangIR inputs in the tests. 2. Create a new `cir.vec.splat` operation and use that for "vector splat", i.e. a conversion from a scalar to a vector. A "vector splat" conversion had been implemented with `cir.vec.create` before. This change results in different ClangIR and different LLVM IR, which again required updating the tests, but no noticeable change in compiler behavior. 3. Create an `IntegerVector` type constraint, which requires that the given type be a vector whose element type is an integer. It can be any integral type, and the vector can be of any size. Use the new type constraint in the definition of `cir.vec.ternary`, whose condition operand must be an `IntegerVector`. Remove the integral type check from `VecTernaryOp::verify`, since doing the check there is now redundant. The only possibly visible change is to the text of an error message when validation of `cir.vec.ternary` fails. The expected output of a validation test was updated with the new message. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 44 +++++++++++++--- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 9 ++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 11 +--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 14 ++---- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 33 +++++++++++- clang/test/CIR/CodeGen/vectype.cpp | 50 +++++++++---------- clang/test/CIR/IR/invalid.cir | 38 +++++++------- clang/test/CIR/Lowering/vectype.cpp | 7 +++ 8 files changed, 137 insertions(+), 69 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8b8dbe1a2222..b8b8308e43ed 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2008,7 +2008,8 @@ def VecInsertOp : CIR_Op<"vec.insert", [Pure, let results = (outs CIR_VectorType:$result); let assemblyFormat = [{ - $value `,` $vec `[` $index `:` type($index) `]` attr-dict `:` type($vec) + $value `,` $vec `[` $index `:` type($index) `]` attr-dict `:` + qualified(type($vec)) }]; let hasVerifier = 0; @@ -2032,7 +2033,7 @@ def VecExtractOp : CIR_Op<"vec.extract", [Pure, let results = (outs CIR_AnyType:$result); let assemblyFormat = [{ - $vec `[` $index `:` type($index) `]` attr-dict `:` type($vec) + $vec `[` $index `:` type($index) `]` attr-dict `:` qualified(type($vec)) }]; let hasVerifier = 0; @@ -2055,12 +2056,41 @@ def VecCreateOp : CIR_Op<"vec.create", [Pure]> { let results = (outs CIR_VectorType:$result); let assemblyFormat = [{ - `(` ($elements^ `:` type($elements))? `)` `:` type($result) attr-dict + `(` ($elements^ `:` type($elements))? `)` `:` qualified(type($result)) + attr-dict }]; let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// VecSplat +//===----------------------------------------------------------------------===// + +// cir.vec.splat is a separate operation from cir.vec.create because more +// efficient LLVM IR can be generated for it, and because some optimization and +// analysis passes can benefit from knowing that all elements of the vector +// have the same value. + +def VecSplatOp : CIR_Op<"vec.splat", [Pure, + TypesMatchWith<"type of 'value' matches element type of 'result'", "result", + "value", "$_self.cast().getEltType()">]> { + + let summary = "Convert a scalar into a vector"; + let description = [{ + The `cir.vec.splat` operation creates a vector value from a scalar value. + All elements of the vector have the same value, that of the given scalar. + }]; + + let arguments = (ins CIR_AnyType:$value); + let results = (outs CIR_VectorType:$result); + + let assemblyFormat = [{ + $value `:` type($value) `,` qualified(type($result)) attr-dict + }]; + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // VecCmp //===----------------------------------------------------------------------===// @@ -2080,7 +2110,8 @@ def VecCmpOp : CIR_Op<"vec.cmp", [Pure, SameTypeOperands]> { let results = (outs CIR_VectorType:$result); let assemblyFormat = [{ - `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) `,` type($result) attr-dict + `(` $kind `,` $lhs `,` $rhs `)` `:` qualified(type($lhs)) `,` + qualified(type($result)) attr-dict }]; let hasVerifier = 0; @@ -2107,11 +2138,12 @@ def VecTernaryOp : CIR_Op<"vec.ternary", The result is a vector of the same type as the second and third arguments. Each element of the result is `(bool)a[n] ? b[n] : c[n]`. }]; - let arguments = (ins CIR_VectorType:$cond, CIR_VectorType:$vec1, + let arguments = (ins IntegerVector:$cond, CIR_VectorType:$vec1, CIR_VectorType:$vec2); let results = (outs CIR_VectorType:$result); let assemblyFormat = [{ - `(` $cond `,` $vec1 `,` $vec2 `)` `:` type($cond) `,` type($vec1) attr-dict + `(` $cond `,` $vec1 `,` $vec2 `)` `:` qualified(type($cond)) `,` + qualified(type($vec1)) attr-dict }]; let hasVerifier = 1; } diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index c02ee1bef916..c3a5cf7a05c3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -400,6 +400,15 @@ def ExceptionInfoPtrPtr : Type< "mlir::cir::ExceptionInfoType::get($_builder.getContext())))"> { } +// Vector of integral type +def IntegerVector : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::VectorType>()">, + CPred<"$_self.cast<::mlir::cir::VectorType>()" + ".getEltType().isa<::mlir::cir::IntType>()">, + ]>, "!cir.vector of !cir.int"> { +} + //===----------------------------------------------------------------------===// // StructType (defined in cpp files) //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 53dbd1dbb724..1f78dfd0fa75 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1541,15 +1541,8 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_VectorSplat: { // Create a vector object and fill all elements with the same scalar value. assert(DestTy->isVectorType() && "CK_VectorSplat to non-vector type"); - mlir::Value Value = Visit(E); - SmallVector Elements; - auto VecType = CGF.getCIRType(DestTy).dyn_cast(); - auto NumElements = VecType.getSize(); - for (uint64_t Index = 0; Index < NumElements; ++Index) { - Elements.push_back(Value); - } - return CGF.getBuilder().create( - CGF.getLoc(E->getSourceRange()), VecType, Elements); + return CGF.getBuilder().create( + CGF.getLoc(E->getSourceRange()), CGF.getCIRType(DestTy), Visit(E)); } case CK_FixedPointCast: llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9c0ef96d055d..d6f41d121a27 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -526,19 +526,15 @@ LogicalResult VecCreateOp::verify() { //===----------------------------------------------------------------------===// LogicalResult VecTernaryOp::verify() { - // Verify that the condition operand is a vector of integral type. - if (!getCond().getType().getEltType().isa()) { - return emitOpError() << "condition operand of type " << getCond().getType() - << " must be a vector type of !cir.int"; - } - // Verify that the condition operand has the same number of elements as the // other operands. (The automatic verification already checked that all // operands are vector types and that the second and third operands are the // same type.) - if (getCond().getType().getSize() != getVec1().getType().getSize()) { - return emitOpError() << "the number of elements in " << getCond().getType() - << " and " << getVec1().getType() << " don't match"; + if (getCond().getType().cast().getSize() != + getVec1().getType().getSize()) { + return emitOpError() << ": the number of elements in " + << getCond().getType() << " and " + << getVec1().getType() << " don't match"; } return success(); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 93c52bac176b..f894baa2c352 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1319,6 +1319,37 @@ class CIRVectorCmpOpLowering } }; +class CIRVectorSplatLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecSplatOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Vector splat can be implemented with an `insertelement` and a + // `shufflevector`, which is better than an `insertelement` for each + // element in vector. Start with an undef vector. Insert the value into + // the first element. Then use a `shufflevector` with a mask of all 0 to + // fill out the entire vector with that value. + auto vecTy = op.getType().dyn_cast(); + assert(vecTy && "result type of cir.vec.splat op is not VectorType"); + auto llvmTy = typeConverter->convertType(vecTy); + auto loc = op.getLoc(); + mlir::Value undef = rewriter.create(loc, llvmTy); + mlir::Value indexValue = + rewriter.create(loc, rewriter.getI64Type(), 0); + mlir::Value elementValue = adaptor.getValue(); + mlir::Value oneElement = rewriter.create( + loc, undef, elementValue, indexValue); + SmallVector zeroValues(vecTy.getSize(), 0); + mlir::Value shuffled = rewriter.create( + loc, oneElement, undef, zeroValues); + rewriter.replaceOp(op, shuffled); + return mlir::success(); + } +}; + class CIRVectorTernaryLowering : public mlir::OpConversionPattern { public: @@ -2566,7 +2597,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, - CIRVectorExtractLowering, CIRVectorCmpOpLowering, + CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, CIRVectorTernaryLowering, CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering>( converter, patterns.getContext()); diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index b6af5318cb18..ca93bcb1e8dc 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -10,22 +10,22 @@ void vector_int_test(int x) { // Vector constant. Not yet implemented. Expected results will change from // cir.vec.create to cir.const. vi4 a = { 1, 2, 3, 4 }; - // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : !cir.vector // Non-const vector initialization. vi4 b = { x, 5, 6, x + 1 }; - // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : !cir.vector // Incomplete vector initialization. vi4 bb = { x, x + 1 }; // CHECK: %[[#zero:]] = cir.const(#cir.int<0> : !s32i) : !s32i - // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %[[#zero]], %[[#zero]] : !s32i, !s32i, !s32i, !s32i) : + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %[[#zero]], %[[#zero]] : !s32i, !s32i, !s32i, !s32i) : !cir.vector // Scalar to vector conversion, a.k.a. vector splat. Only valid as an // operand of a binary operator, not as a regular conversion. bb = a + 7; // CHECK: %[[#seven:]] = cir.const(#cir.int<7> : !s32i) : !s32i - // CHECK: %{{[0-9]+}} = cir.vec.create(%[[#seven]], %[[#seven]], %[[#seven]], %[[#seven]] : !s32i, !s32i, !s32i, !s32i) : + // CHECK: %{{[0-9]+}} = cir.vec.splat %[[#seven]] : !s32i, !cir.vector // Vector to vector conversion vd2 bbb = { }; @@ -34,12 +34,12 @@ void vector_int_test(int x) { // Extract element int c = a[x]; - // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : + // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector // Insert element a[x] = x; // CHECK: %[[#LOADEDVI:]] = cir.load %[[#STORAGEVI:]] : cir.ptr >, !cir.vector - // CHECK: %[[#UPDATEDVI:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVI]][%{{[0-9]+}} : !s32i] : + // CHECK: %[[#UPDATEDVI:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVI]][%{{[0-9]+}} : !s32i] : !cir.vector // CHECK: cir.store %[[#UPDATEDVI]], %[[#STORAGEVI]] : !cir.vector, cir.ptr > // Binary arithmetic operations @@ -70,52 +70,52 @@ void vector_int_test(int x) { // Ternary conditional operator vi4 tc = a ? b : d; - // CHECK: %{{[0-9]+}} = cir.vec.ternary(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.ternary(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector // Comparisons vi4 o = a == b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vi4 p = a != b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vi4 q = a < b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vi4 r = a > b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vi4 s = a <= b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vi4 t = a >= b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector } void vector_double_test(int x, double y) { // Vector constant. Not yet implemented. Expected results will change from // cir.vec.create to cir.const. vd2 a = { 1.5, 2.5 }; - // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : !cir.vector // Non-const vector initialization. vd2 b = { y, y + 1.0 }; - // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : !cir.vector // Incomplete vector initialization vd2 bb = { y }; // CHECK: [[#dzero:]] = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double - // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %[[#dzero]] : !cir.double, !cir.double) : + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %[[#dzero]] : !cir.double, !cir.double) : !cir.vector // Scalar to vector conversion, a.k.a. vector splat. Only valid as an // operand of a binary operator, not as a regular conversion. bb = a + 2.5; // CHECK: %[[#twohalf:]] = cir.const(#cir.fp<2.500000e+00> : !cir.double) : !cir.double - // CHECK: %{{[0-9]+}} = cir.vec.create(%[[#twohalf]], %[[#twohalf]] : !cir.double, !cir.double) : + // CHECK: %{{[0-9]+}} = cir.vec.splat %[[#twohalf]] : !cir.double, !cir.vector // Extract element double c = a[x]; - // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : + // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector // Insert element a[x] = y; // CHECK: %[[#LOADEDVF:]] = cir.load %[[#STORAGEVF:]] : cir.ptr >, !cir.vector - // CHECK: %[[#UPDATEDVF:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVF]][%{{[0-9]+}} : !s32i] : + // CHECK: %[[#UPDATEDVF:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVF]][%{{[0-9]+}} : !s32i] : !cir.vector // CHECK: cir.store %[[#UPDATEDVF]], %[[#STORAGEVF]] : !cir.vector, cir.ptr > // Binary arithmetic operations @@ -136,15 +136,15 @@ void vector_double_test(int x, double y) { // Comparisons vll2 o = a == b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vll2 p = a != b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vll2 q = a < b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vll2 r = a > b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vll2 s = a <= b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vll2 t = a >= b; - // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : , + // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 7b3908aa8835..28cae0cc8214 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -361,7 +361,7 @@ module { !s32i = !cir.int cir.func @vec_op_size() { %0 = cir.const(#cir.int<1> : !s32i) : !s32i - %1 = cir.vec.create(%0 : !s32i) : // expected-error {{'cir.vec.create' op operand count of 1 doesn't match vector type '!cir.vector x 2>' element count of 2}} + %1 = cir.vec.create(%0 : !s32i) : !cir.vector // expected-error {{'cir.vec.create' op operand count of 1 doesn't match vector type '!cir.vector x 2>' element count of 2}} cir.return } @@ -372,7 +372,7 @@ cir.func @vec_op_size() { cir.func @vec_op_type() { %0 = cir.const(#cir.int<1> : !s32i) : !s32i %1 = cir.const(#cir.int<2> : !u32i) : !u32i - %2 = cir.vec.create(%0, %1 : !s32i, !u32i) : // expected-error {{'cir.vec.create' op operand type '!cir.int' doesn't match vector element type '!cir.int'}} + %2 = cir.vec.create(%0, %1 : !s32i, !u32i) : !cir.vector // expected-error {{'cir.vec.create' op operand type '!cir.int' doesn't match vector element type '!cir.int'}} cir.return } @@ -382,8 +382,8 @@ cir.func @vec_op_type() { cir.func @vec_extract_non_int_idx() { %0 = cir.const(1.5e+00 : f64) : f64 %1 = cir.const(#cir.int<0> : !s32i) : !s32i - %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : - %3 = cir.vec.extract %2[%0 : f64] : // expected-error {{expected '<'}} + %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : !cir.vector + %3 = cir.vec.extract %2[%0 : f64] : !cir.vector // expected-error {{expected '<'}} cir.return } @@ -394,8 +394,8 @@ cir.func @vec_extract_non_int_idx() { cir.func @vec_extract_bad_type() { %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<0> : !s32i) : !s32i - %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : - %3 = cir.vec.extract %2[%1 : !s32i] : // expected-note {{prior use here}} + %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : !cir.vector + %3 = cir.vec.extract %2[%1 : !s32i] : !cir.vector // expected-note {{prior use here}} cir.store %3, %0 : !u32i, cir.ptr // expected-error {{use of value '%3' expects different type than prior uses: '!cir.int' vs '!cir.int'}} cir.return } @@ -405,7 +405,7 @@ cir.func @vec_extract_bad_type() { !s32i = !cir.int cir.func @vec_extract_non_vector() { %0 = cir.const(#cir.int<0> : !s32i) : !s32i - %1 = cir.vec.extract %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.extract' invalid kind of Type specified}} + %1 = cir.vec.extract %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.extract' 'vec' must be CIR vector type, but got '!cir.int'}} cir.return } @@ -415,9 +415,9 @@ cir.func @vec_extract_non_vector() { !u32i = !cir.int cir.func @vec_insert_bad_type() { %0 = cir.const(#cir.int<0> : !s32i) : !s32i - %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector %2 = cir.const(#cir.int<0> : !u32i) : !u32i // expected-note {{prior use here}} - %3 = cir.vec.insert %2, %1[%0 : !s32i] : // expected-error {{use of value '%2' expects different type than prior uses: '!cir.int' vs '!cir.int'}} + %3 = cir.vec.insert %2, %1[%0 : !s32i] : !cir.vector // expected-error {{use of value '%2' expects different type than prior uses: '!cir.int' vs '!cir.int'}} cir.return } @@ -426,7 +426,7 @@ cir.func @vec_insert_bad_type() { !s32i = !cir.int cir.func @vec_insert_non_vector() { %0 = cir.const(#cir.int<0> : !s32i) : !s32i - %1 = cir.vec.insert %0, %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.insert' invalid kind of Type specified}} + %1 = cir.vec.insert %0, %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.insert' 'vec' must be CIR vector type, but got '!cir.int'}} cir.return } @@ -435,8 +435,8 @@ cir.func @vec_insert_non_vector() { !s32i = !cir.int cir.func @vec_ternary_non_vector1() { %0 = cir.const(#cir.int<0> : !s32i) : !s32i - %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : - %2 = cir.vec.ternary(%0, %1, %1) : !s32i, // expected-error {{custom op 'cir.vec.ternary' invalid kind of Type specified}} + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector + %2 = cir.vec.ternary(%0, %1, %1) : !s32i, !cir.vector // expected-error {{'cir.vec.ternary' op operand #0 must be !cir.vector of !cir.int, but got '!cir.int'}} cir.return } @@ -445,8 +445,8 @@ cir.func @vec_ternary_non_vector1() { !s32i = !cir.int cir.func @vec_ternary_non_vector2() { %0 = cir.const(#cir.int<0> : !s32i) : !s32i - %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : - %2 = cir.vec.ternary(%1, %0, %0) : , !s32i // expected-error {{custom op 'cir.vec.ternary' invalid kind of Type specified}} + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector + %2 = cir.vec.ternary(%1, %0, %0) : !cir.vector, !s32i // expected-error {{'cir.vec.ternary' op operand #1 must be CIR vector type, but got '!cir.int'}} cir.return } @@ -455,17 +455,17 @@ cir.func @vec_ternary_non_vector2() { !s32i = !cir.int cir.func @vec_ternary_different_size() { %0 = cir.const(#cir.int<0> : !s32i) : !s32i - %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : - %2 = cir.vec.create(%0, %0, %0, %0 : !s32i, !s32i, !s32i, !s32i) : - %3 = cir.vec.ternary(%1, %2, %2) : , // expected-error {{'cir.vec.ternary' op the number of elements in '!cir.vector x 2>' and '!cir.vector x 4>' don't match}} + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector + %2 = cir.vec.create(%0, %0, %0, %0 : !s32i, !s32i, !s32i, !s32i) : !cir.vector + %3 = cir.vec.ternary(%1, %2, %2) : !cir.vector, !cir.vector // expected-error {{'cir.vec.ternary' op : the number of elements in '!cir.vector x 2>' and '!cir.vector x 4>' don't match}} cir.return } // ----- cir.func @vec_ternary_not_int(%p : !cir.float) { - %0 = cir.vec.create(%p, %p : !cir.float, !cir.float) : - %1 = cir.vec.ternary(%0, %0, %0) : , // expected-error {{'cir.vec.ternary' op condition operand of type '!cir.vector' must be a vector type of !cir.int}} + %0 = cir.vec.create(%p, %p : !cir.float, !cir.float) : !cir.vector + %1 = cir.vec.ternary(%0, %0, %0) : !cir.vector, !cir.vector // expected-error {{'cir.vec.ternary' op operand #0 must be !cir.vector of !cir.int, but got '!cir.vector'}} cir.return } diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index be1ca98a646d..81e70be19264 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -52,6 +52,13 @@ void vector_int_test(int x) { // CHECK: %[[#bbval:]] = llvm.bitcast %[[#bval]] : vector<4xi32> to vector<2xf64> // CHECK: llvm.store %[[#bbval]], %[[#bbmem:]] : vector<2xf64>, !llvm.ptr + // Scalar to vector conversion, a.k.a. vector splat. + b = a + 7; + // CHECK: %[[#undef:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#zeroInt:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#inserted:]] = llvm.insertelement %[[#seven:]], %[[#undef]][%[[#zeroInt]] : i64] : vector<4xi32> + // CHECK: %[[#shuffled:]] = llvm.shufflevector %[[#inserted]], %[[#undef]] [0, 0, 0, 0] : vector<4xi32> + // Extract element. int c = a[x]; // CHECK: %[[#T58:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> From a45af8653cd91f2263be054c25784554a67c879e Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 12 Mar 2024 20:58:08 +0300 Subject: [PATCH 1446/2301] [CIR][Codegen][Lowering] Introduce new bitfield layout (#487) This PR intends to fix some problems with packed structures support, so the #473 will work. Basically, the main problem for the packed structures support is an absence of arbitrary sized integers in CIR. Well, one workaround is to use `mlir::IntegerType` for that, but it's kind of wrong way (please correct me if I'm wrong). Another way is to introduce this type in CIR. So far I suggest this way: instead of arbitrary sized integers we will create an array of bytes for bitfield storages whenever they doesn't fit into the CIR `IntType`. Well, the original codegen creates storages with alignment 8 - so it can be `i24` storage type for instance. Previously, we just created storages that could be represented as CIR `IntType`: 8, 16, 32, 64. And it was working before I came up with a necessity to support packed structures. At first glance it's not a problem - just add `determinePacked` method from the original codegen and that's it. But it turned out that this method _infers_ the fact if a structure is packed or not. It doesn't use the AST attribute for that as one could think - it works with offsets and alignments of fields. Thus, we either need to invent our own way to determine packed structures (which is error prone and maybe not doable at all) or try to use the existing one. Also, we go closer to the original lllvm's data layout in this case. 1) I had to move the lowering details from the `LoweringPrepare` to the `LowerToLLVM`, because it's not possible to do a `load` from the array of bytes to the integer type - and it's ok in llvm dialect. Thus, all the math operations can be expressed without any problems. Basically the most of the diff you see is because of the changes in the lowering. The remaining part is more or less easy to read. 2) There are minor changes in `CIRRecordLayoutBuilder` - as described above, we use may generate an array of bytes as a storage. 3) Some cosmetic changes in `CIRGenExpr` - since we don't want to infer the storage type again and just use the one stored in the `CIRGenBitFieldInfo`. 4) Helpers are introduced in the lowering - but nothing hard - just shifts and logical ops. 5) I removed `bitfield-ops` test - because now the test cases covered there are all in `bitfields.c` and `bitfields.cpp` . So ... This is still a suggestion, though I believe it's a good one. So you are welcome to discuss, suggest another ways to solve the problem and etc. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 12 ++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 17 +-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 +- clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 3 + .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 24 +++- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 5 +- .../Dialect/Transforms/LoweringPrepare.cpp | 112 +-------------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 136 +++++++++++++++++- .../Lowering/DirectToLLVM/LoweringHelpers.h | 74 ++++++++++ clang/test/CIR/CodeGen/bitfield-ops.c | 52 ------- clang/test/CIR/CodeGen/bitfields.c | 76 +++++----- clang/test/CIR/CodeGen/bitfields.cpp | 53 ++----- clang/test/CIR/CodeGen/const-bitfields.c | 12 +- clang/test/CIR/CodeGen/volatile.cpp | 4 +- clang/test/CIR/Lowering/bitfieils.c | 30 ++++ 15 files changed, 329 insertions(+), 283 deletions(-) create mode 100644 clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h delete mode 100644 clang/test/CIR/CodeGen/bitfield-ops.c create mode 100644 clang/test/CIR/Lowering/bitfieils.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index c3a5cf7a05c3..ad09b201cedc 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -53,6 +53,18 @@ def CIR_IntType : CIR_Type<"Int", "int", std::string getAlias() const { return (isSigned() ? 's' : 'u') + std::to_string(getWidth()) + 'i'; }; + + /// Returns a minimum bitwidth of cir::IntType + static unsigned minBitwidth() { return 8; } + /// Returns a maximum bitwidth of cir::IntType + static unsigned maxBitwidth() { return 64; } + + /// Returns true if cir::IntType can be constructed from the provided bitwidth + static bool isValidBitwidth(unsigned width) { + return width >= minBitwidth() + && width <= maxBitwidth() + && llvm::isPowerOf2_32(width); + } }]; let genVerifyDecl = 1; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 9f403d74378a..3c880506fe1c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -226,19 +226,15 @@ static bool isAAPCS(const TargetInfo &TargetInfo) { Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base, const FieldDecl *field, - unsigned index, - unsigned size) { + mlir::Type fieldType, + unsigned index) { if (index == 0) return base.getAddress(); - auto loc = getLoc(field->getLocation()); - auto fieldType = builder.getUIntNTy(size); - auto fieldPtr = mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); auto sea = getBuilder().createGetMember(loc, fieldPtr, base.getPointer(), field->getName(), index); - return Address(sea, CharUnits::One()); } @@ -268,14 +264,11 @@ LValue CIRGenFunction::buildLValueForBitField(LValue base, llvm_unreachable("NYI"); } - const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize; - Address Addr = getAddrOfBitFieldStorage(base, field, Idx, SS); - // Get the access type. - mlir::Type FieldIntTy = builder.getUIntNTy(SS); + Address Addr = getAddrOfBitFieldStorage(base, field, info.StorageType, Idx); auto loc = getLoc(field->getLocation()); - if (Addr.getElementType() != FieldIntTy) - Addr = builder.createElementBitCast(loc, Addr, FieldIntTy); + if (Addr.getElementType() != info.StorageType) + Addr = builder.createElementBitCast(loc, Addr, info.StorageType); QualType fieldType = field->getType().withCVRQualifiers(base.getVRQualifiers()); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index a02f8012ee22..eee9c8509f60 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1470,7 +1470,7 @@ class CIRGenFunction : public CIRGenTypeCache { } Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, - unsigned index, unsigned size); + mlir::Type fieldType, unsigned index); /// Given an opaque value expression, return its LValue mapping if it exists, /// otherwise create one. diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index fc198776511e..16a8a1e2894e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -94,6 +94,9 @@ struct CIRGenBitFieldInfo { /// The name of a bitfield llvm::StringRef Name; + // The actual storage type for the bitfield + mlir::Type StorageType; + CIRGenBitFieldInfo() : Offset(), Size(), IsSigned(), StorageSize(), VolatileOffset(), VolatileStorageSize() {} diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 48146b3caa20..cf89319b9c62 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -151,6 +151,20 @@ struct CIRRecordLowering final { numberOfChars.getQuantity()); } + // This is different from LLVM traditional codegen because CIRGen uses arrays + // of bytes instead of arbitrary-sized integers. This is important for packed + // structures support. + mlir::Type getBitfieldStorageType(unsigned numBits) { + unsigned alignedBits = llvm::alignTo(numBits, astContext.getCharWidth()); + if (mlir::cir::IntType::isValidBitwidth(alignedBits)) { + return builder.getUIntNTy(alignedBits); + } else { + mlir::Type type = getCharType(); + return mlir::cir::ArrayType::get(type.getContext(), type, + alignedBits / astContext.getCharWidth()); + } + } + // Gets the llvm Basesubobject type from a CXXRecordDecl. mlir::Type getStorageType(const CXXRecordDecl *RD) { return cirGenTypes.getCIRGenRecordLayout(RD).getBaseSubobjectCIRType(); @@ -228,6 +242,7 @@ void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, Info.Size = FD->getBitWidthValue(); Info.StorageSize = getSizeInBits(StorageType).getQuantity(); Info.StorageOffset = StartOffset; + Info.StorageType = StorageType; Info.Name = FD->getName(); if (Info.Size > Info.StorageSize) @@ -502,12 +517,6 @@ void CIRRecordLowering::accumulateBitFields( auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord, uint64_t StartBitOffset, uint64_t nextTail = 0) { - if (OffsetInRecord >= 64 || - (nextTail > StartBitOffset && - nextTail - StartBitOffset >= 64)) { // See IntType::verify - return true; - } - if (!cirGenTypes.getModule().getCodeGenOpts().FineGrainedBitfieldAccesses) return false; llvm_unreachable("NYI"); @@ -567,7 +576,8 @@ void CIRRecordLowering::accumulateBitFields( } // We've hit a break-point in the run and need to emit a storage field. - auto Type = getUIntNType(Tail - StartBitOffset); + auto Type = getBitfieldStorageType(Tail - StartBitOffset); + // Add the storage member to the record and set the bitfield info for all of // the bitfields in the run. Bitfields get the offset of their storage but // come afterward and remain there after a stable sort. diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 093cea025952..66bce73a9c11 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -641,8 +641,9 @@ mlir::LogicalResult IntType::verify(llvm::function_ref emitError, unsigned width, bool isSigned) { - if (width < 8 || width > 64) { - emitError() << "IntType only supports widths from 8 up to 64"; + if (width < IntType::minBitwidth() || width > IntType::maxBitwidth()) { + emitError() << "IntType only supports widths from " + << IntType::minBitwidth() << "up to " << IntType::maxBitwidth(); return mlir::failure(); } if (width % 8 != 0) { diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 8b592ca1f254..8c1ba9a59f2f 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -66,8 +66,6 @@ struct LoweringPreparePass : public LoweringPrepareBase { void runOnOp(Operation *op); void lowerGlobalOp(GlobalOp op); - void lowerGetBitfieldOp(GetBitfieldOp op); - void lowerSetBitfieldOp(SetBitfieldOp op); void lowerStdFindOp(StdFindOp op); void lowerIterBeginOp(IterBeginOp op); void lowerIterEndOp(IterEndOp op); @@ -303,109 +301,6 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { builder.create(f.getLoc()); } -void LoweringPreparePass::lowerGetBitfieldOp(GetBitfieldOp op) { - CIRBaseBuilderTy builder(getContext()); - builder.setInsertionPointAfter(op.getOperation()); - - auto info = op.getBitfieldInfo(); - auto size = info.getSize(); - auto storageType = info.getStorageType(); - auto storageSize = storageType.cast().getWidth(); - auto offset = info.getOffset(); - auto resultTy = op.getType(); - auto addr = op.getAddr(); - auto loc = addr.getLoc(); - mlir::Value val = builder.create( - loc, storageType, op.getAddr(), /* deref */ false, op.getIsVolatile()); - auto valWidth = val.getType().cast().getWidth(); - - if (info.getIsSigned()) { - assert(static_cast(offset + size) <= storageSize); - mlir::Type typ = - mlir::cir::IntType::get(builder.getContext(), valWidth, true); - - val = builder.createIntCast(val, typ); - - unsigned highBits = storageSize - offset - size; - if (highBits) - val = builder.createShiftLeft(val, highBits); - if (offset + highBits) - val = builder.createShiftRight(val, offset + highBits); - } else { - if (offset) - val = builder.createShiftRight(val, offset); - - if (static_cast(offset) + size < storageSize) - val = builder.createAnd(val, llvm::APInt::getLowBitsSet(valWidth, size)); - } - val = builder.createIntCast(val, resultTy); - - op.replaceAllUsesWith(val); - op.erase(); -} - -void LoweringPreparePass::lowerSetBitfieldOp(SetBitfieldOp op) { - CIRBaseBuilderTy builder(getContext()); - builder.setInsertionPointAfter(op.getOperation()); - - auto srcVal = op.getSrc(); - auto addr = op.getDst(); - auto info = op.getBitfieldInfo(); - auto size = info.getSize(); - auto storageType = info.getStorageType(); - auto storageSize = storageType.cast().getWidth(); - auto offset = info.getOffset(); - auto resultTy = op.getType(); - auto loc = addr.getLoc(); - - // Get the source value, truncated to the width of the bit-field. - srcVal = builder.createIntCast(op.getSrc(), storageType); - auto srcWidth = srcVal.getType().cast().getWidth(); - - mlir::Value maskedVal = srcVal; - - if (storageSize != size) { - assert(storageSize > size && "Invalid bitfield size."); - - mlir::Value val = builder.create(loc, storageType, addr); - - srcVal = - builder.createAnd(srcVal, llvm::APInt::getLowBitsSet(srcWidth, size)); - - maskedVal = srcVal; - if (offset) - srcVal = builder.createShiftLeft(srcVal, offset); - - // Mask out the original value. - val = builder.createAnd( - val, ~llvm::APInt::getBitsSet(srcWidth, offset, offset + size)); - - // Or together the unchanged values and the source value. - srcVal = builder.createOr(val, srcVal); - } - - builder.create(loc, srcVal, addr, op.getIsVolatile()); - - if (!op->getUses().empty()) { - mlir::Value resultVal = maskedVal; - resultVal = builder.createIntCast(resultVal, resultTy); - - if (info.getIsSigned()) { - assert(size <= storageSize); - unsigned highBits = storageSize - size; - - if (highBits) { - resultVal = builder.createShiftLeft(resultVal, highBits); - resultVal = builder.createShiftRight(resultVal, highBits); - } - } - - op.replaceAllUsesWith(resultVal); - } - - op.erase(); -} - void LoweringPreparePass::lowerStdFindOp(StdFindOp op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op.getOperation()); @@ -442,10 +337,6 @@ void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { void LoweringPreparePass::runOnOp(Operation *op) { if (auto getGlobal = dyn_cast(op)) { lowerGlobalOp(getGlobal); - } else if (auto getBitfield = dyn_cast(op)) { - lowerGetBitfieldOp(getBitfield); - } else if (auto setBitfield = dyn_cast(op)) { - lowerSetBitfieldOp(setBitfield); } else if (auto stdFind = dyn_cast(op)) { lowerStdFindOp(stdFind); } else if (auto iterBegin = dyn_cast(op)) { @@ -464,8 +355,7 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { - if (isa(op)) + if (isa(op)) opsToTransform.push_back(op); }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f894baa2c352..ee65588d44c1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -9,6 +9,7 @@ // This file implements lowering of CIR operations to LLVMIR. // //===----------------------------------------------------------------------===// +#include "LoweringHelpers.h" #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" @@ -2580,6 +2581,136 @@ class CIRInlineAsmOpLowering } }; +class CIRSetBitfieldLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::SetBitfieldOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + + auto info = op.getBitfieldInfo(); + auto size = info.getSize(); + auto offset = info.getOffset(); + auto storageType = info.getStorageType(); + auto context = storageType.getContext(); + + unsigned storageSize = 0; + + if (auto arTy = storageType.dyn_cast()) + storageSize = arTy.getSize() * 8; + else if (auto intTy = storageType.dyn_cast()) + storageSize = intTy.getWidth(); + else + llvm_unreachable( + "Either ArrayType or IntType expected for bitfields storage"); + + auto intType = mlir::IntegerType::get(context, storageSize); + auto srcVal = createIntCast(rewriter, adaptor.getSrc(), intType); + auto srcWidth = storageSize; + auto resultVal = srcVal; + + if (storageSize != size) { + assert(storageSize > size && "Invalid bitfield size."); + + mlir::Value val = rewriter.create( + op.getLoc(), intType, adaptor.getDst(), /* alignment */ 0, + op.getIsVolatile()); + + srcVal = createAnd(rewriter, srcVal, + llvm::APInt::getLowBitsSet(srcWidth, size)); + resultVal = srcVal; + srcVal = createShL(rewriter, srcVal, offset); + + // Mask out the original value. + val = + createAnd(rewriter, val, + ~llvm::APInt::getBitsSet(srcWidth, offset, offset + size)); + + // Or together the unchanged values and the source value. + srcVal = rewriter.create(op.getLoc(), val, srcVal); + } + + rewriter.create(op.getLoc(), srcVal, adaptor.getDst(), + /* alignment */ 0, op.getIsVolatile()); + + auto resultTy = getTypeConverter()->convertType(op.getType()); + + resultVal = + createIntCast(rewriter, resultVal, resultTy.cast()); + + if (info.getIsSigned()) { + assert(size <= storageSize); + unsigned highBits = storageSize - size; + + if (highBits) { + resultVal = createShL(rewriter, resultVal, highBits); + resultVal = createAShR(rewriter, resultVal, highBits); + } + } + + rewriter.replaceOp(op, resultVal); + return mlir::success(); + } +}; + +class CIRGetBitfieldLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::GetBitfieldOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + + auto info = op.getBitfieldInfo(); + auto size = info.getSize(); + auto offset = info.getOffset(); + auto storageType = info.getStorageType(); + auto context = storageType.getContext(); + unsigned storageSize = 0; + + if (auto arTy = storageType.dyn_cast()) + storageSize = arTy.getSize() * 8; + else if (auto intTy = storageType.dyn_cast()) + storageSize = intTy.getWidth(); + else + llvm_unreachable( + "Either ArrayType or IntType expected for bitfields storage"); + + auto intType = mlir::IntegerType::get(context, storageSize); + + mlir::Value val = rewriter.create( + op.getLoc(), intType, adaptor.getAddr(), 0, op.getIsVolatile()); + val = rewriter.create(op.getLoc(), intType, val); + + if (info.getIsSigned()) { + assert(static_cast(offset + size) <= storageSize); + unsigned highBits = storageSize - offset - size; + val = createShL(rewriter, val, highBits); + val = createAShR(rewriter, val, offset + highBits); + } else { + val = createLShR(rewriter, val, offset); + + if (static_cast(offset) + size < storageSize) + val = createAnd(rewriter, val, + llvm::APInt::getLowBitsSet(storageSize, size)); + } + + auto resTy = getTypeConverter()->convertType(op.getType()); + auto newOp = createIntCast(rewriter, val, resTy.cast(), + info.getIsSigned()); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -2599,8 +2730,9 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVectorCreateLowering, CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, CIRVectorTernaryLowering, CIRStackSaveLowering, CIRStackRestoreLowering, - CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering>( - converter, patterns.getContext()); + CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, + CIRSetBitfieldLowering, CIRGetBitfieldLowering>(converter, + patterns.getContext()); } namespace { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h b/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h new file mode 100644 index 000000000000..c9ee75a06352 --- /dev/null +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h @@ -0,0 +1,74 @@ +#ifndef LLVM_CLANG_LIB_LOWERINGHELPERS_H +#define LLVM_CLANG_LIB_LOWERINGHELPERS_H + +#include "mlir/IR/Types.h" +#include "mlir/IR/Value.h" + +#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/LLVMIR/LLVMTypes.h" + +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +using namespace llvm; + +mlir::Value createIntCast(mlir::OpBuilder &bld, mlir::Value src, + mlir::IntegerType dstTy, bool isSigned = false) { + auto srcTy = src.getType(); + assert(isa(srcTy)); + + auto srcWidth = srcTy.cast().getWidth(); + auto dstWidth = dstTy.cast().getWidth(); + auto loc = src.getLoc(); + + if (dstWidth > srcWidth && isSigned) + return bld.create(loc, dstTy, src); + else if (dstWidth > srcWidth) + return bld.create(loc, dstTy, src); + else if (dstWidth < srcWidth) + return bld.create(loc, dstTy, src); + else + return bld.create(loc, dstTy, src); +} + +mlir::Value getConstAPInt(mlir::OpBuilder &bld, mlir::Location loc, + mlir::Type typ, const llvm::APInt &val) { + return bld.create(loc, typ, val); +} + +mlir::Value getConst(mlir::OpBuilder &bld, mlir::Location loc, mlir::Type typ, + unsigned val) { + return bld.create(loc, typ, val); +} + +mlir::Value createShL(mlir::OpBuilder &bld, mlir::Value lhs, unsigned rhs) { + if (!rhs) + return lhs; + auto rhsVal = getConst(bld, lhs.getLoc(), lhs.getType(), rhs); + return bld.create(lhs.getLoc(), lhs, rhsVal); +} + +mlir::Value createLShR(mlir::OpBuilder &bld, mlir::Value lhs, unsigned rhs) { + if (!rhs) + return lhs; + auto rhsVal = getConst(bld, lhs.getLoc(), lhs.getType(), rhs); + return bld.create(lhs.getLoc(), lhs, rhsVal); +} + +mlir::Value createAShR(mlir::OpBuilder &bld, mlir::Value lhs, unsigned rhs) { + if (!rhs) + return lhs; + auto rhsVal = getConst(bld, lhs.getLoc(), lhs.getType(), rhs); + return bld.create(lhs.getLoc(), lhs, rhsVal); +} + +mlir::Value createAnd(mlir::OpBuilder &bld, mlir::Value lhs, + const llvm::APInt &rhs) { + auto rhsVal = getConstAPInt(bld, lhs.getLoc(), lhs.getType(), rhs); + return bld.create(lhs.getLoc(), lhs, rhsVal); +} + +#endif // LLVM_CLANG_LIB_LOWERINGHELPERS_H \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/bitfield-ops.c b/clang/test/CIR/CodeGen/bitfield-ops.c deleted file mode 100644 index 837d2ba03d1d..000000000000 --- a/clang/test/CIR/CodeGen/bitfield-ops.c +++ /dev/null @@ -1,52 +0,0 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o - 2>&1 | FileCheck %s - -// CHECK: !ty_22S22 = !cir.struct -typedef struct { - int a : 4; - int b : 27; - int c : 17; - int d : 2; - int e : 15; - unsigned f; -} S; - -// CHECK: #bfi_d = #cir.bitfield_info -// CHECK: #bfi_e = #cir.bitfield_info - -// CHECK: cir.func {{.*@store_field}} -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , ["s"] -// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP0]][2] {name = "e"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.set_bitfield(#bfi_e, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) -> !s32i -void store_field() { - S s; - s.e = 3; -} - -// CHECK: cir.func {{.*@load_field}} -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["s", init] -// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr -// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr) -> !s32i -int load_field(S* s) { - return s->d; -} - -// CHECK: cir.func {{.*@unOp}} -// CHECK: [[TMP0:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP1:%.*]] = cir.get_bitfield(#bfi_d, [[TMP0]] : !cir.ptr) -> !s32i -// CHECK: [[TMP2:%.*]] = cir.unary(inc, [[TMP1]]) : !s32i, !s32i -// CHECK: [[TMP3:%.*]] = cir.set_bitfield(#bfi_d, [[TMP0]] : !cir.ptr, [[TMP2]] : !s32i) -> !s32i -void unOp(S* s) { - s->d++; -} - -// CHECK: cir.func {{.*@binOp}} -// CHECK: [[TMP0:%.*]] = cir.const(#cir.int<42> : !s32i) : !s32i -// CHECK: [[TMP1:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP2:%.*]] = cir.get_bitfield(#bfi_d, [[TMP1]] : !cir.ptr) -> !s32i -// CHECK: [[TMP3:%.*]] = cir.binop(or, [[TMP2]], [[TMP0]]) : !s32i -// CHECK: [[TMP4:%.*]] = cir.set_bitfield(#bfi_d, [[TMP1]] : !cir.ptr, [[TMP3]] : !s32i) -> !s32i -void binOp(S* s) { - s->d |= 42; -} diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index 566da42ed43f..cb39a9b8ebfc 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -54,67 +54,55 @@ typedef struct { } U; // CHECK: !ty_22D22 = !cir.struct -// CHECK: !ty_22S22 = !cir.struct // CHECK: !ty_22T22 = !cir.struct -// CHECK: !ty_22U22 = !cir.struct // CHECK: !ty_22anon2E122 = !cir.struct // CHECK: !ty_anon_struct = !cir.struct +// CHECK: #bfi_a = #cir.bitfield_info +// CHECK: #bfi_e = #cir.bitfield_info +// CHECK: !ty_22S22 = !cir.struct, !u16i, !u32i}> +// CHECK: !ty_22U22 = !cir.struct}> // CHECK: !ty_22__long22 = !cir.struct}> +// CHECK: #bfi_d = #cir.bitfield_info, size = 2, offset = 17, is_signed = true> // CHECK: cir.func {{.*@store_field}} // CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr // CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i -// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i -// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i -// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i -// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i -// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i -// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP0]][2] {name = "e"} : !cir.ptr -> !cir.ptr +// CHECK: cir.set_bitfield(#bfi_e, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) void store_field() { S s; - s.a = 3; -} - -// CHECK: cir.func {{.*@store_neg_field}} -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr -// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i -// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i -// CHECK: [[TMP5:%.*]] = cir.load [[TMP3]] : cir.ptr , !u32i -// CHECK: [[TMP6:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i -// CHECK: [[TMP7:%.*]] = cir.binop(and, [[TMP4]], [[TMP6]]) : !u32i -// CHECK: [[TMP8:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i -// CHECK: [[TMP9:%.*]] = cir.shift(left, [[TMP7]] : !u32i, [[TMP8]] : !u32i) -> !u32i -// CHECK: [[TMP10:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i -// CHECK: [[TMP11:%.*]] = cir.binop(and, [[TMP5]], [[TMP10]]) : !u32i -// CHECK: [[TMP12:%.*]] = cir.binop(or, [[TMP11]], [[TMP9]]) : !u32i -// CHECK: cir.store [[TMP12]], [[TMP3]] : !u32i, cir.ptr -void store_neg_field() { - S s; - s.d = -1; + s.e = 3; } // CHECK: cir.func {{.*@load_field}} -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : cir.ptr , !u32i -// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP4]] : !u32i), !s32i -// CHECK: [[TMP6:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i -// CHECK: [[TMP7:%.*]] = cir.shift(left, [[TMP5]] : !s32i, [[TMP6]] : !s32i) -> !s32i -// CHECK: [[TMP8:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i -// CHECK: [[TMP9:%.*]] = cir.shift( right, [[TMP7]] : !s32i, [[TMP8]] : !s32i) -> !s32i -// CHECK: [[TMP10:%.*]] = cir.cast(integral, [[TMP9]] : !s32i), !s32i -// CHECK: cir.store [[TMP10]], [[TMP1]] : !s32i, cir.ptr -// CHECK: [[TMP11:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["s", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr>) -> !s32i int load_field(S* s) { return s->d; } +// CHECK: cir.func {{.*@unOp}} +// CHECK: [[TMP0:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP1:%.*]] = cir.get_bitfield(#bfi_d, [[TMP0]] : !cir.ptr>) -> !s32i +// CHECK: [[TMP2:%.*]] = cir.unary(inc, [[TMP1]]) : !s32i, !s32i +// CHECK: cir.set_bitfield(#bfi_d, [[TMP0]] : !cir.ptr>, [[TMP2]] : !s32i) +void unOp(S* s) { + s->d++; +} + +// CHECK: cir.func {{.*@binOp}} +// CHECK: [[TMP0:%.*]] = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK: [[TMP1:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP2:%.*]] = cir.get_bitfield(#bfi_d, [[TMP1]] : !cir.ptr>) -> !s32i +// CHECK: [[TMP3:%.*]] = cir.binop(or, [[TMP2]], [[TMP0]]) : !s32i +// CHECK: cir.set_bitfield(#bfi_d, [[TMP1]] : !cir.ptr>, [[TMP3]] : !s32i) +void binOp(S* s) { + s->d |= 42; +} + + // CHECK: cir.func {{.*@load_non_bitfield}} // CHECK: cir.get_member {{%.}}[3] {name = "f"} : !cir.ptr -> !cir.ptr unsigned load_non_bitfield(S *s) { diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index d9514a0ce2bf..322cea9dd78a 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -21,67 +21,32 @@ typedef struct { int d : 2; int e : 15; unsigned f; // type other than int above, not a bitfield -} S; +} S; typedef struct { int a : 3; // one bitfield with size < 8 unsigned b; -} T; -// CHECK: !ty_22S22 = !cir.struct +} T; // CHECK: !ty_22T22 = !cir.struct // CHECK: !ty_22anon2E122 = !cir.struct +// CHECK: !ty_22S22 = !cir.struct, !u16i, !u32i}> // CHECK: !ty_22__long22 = !cir.struct}> // CHECK: cir.func @_Z11store_field -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr // CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i // CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i -// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i -// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i -// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i -// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i -// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i -// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr +// CHECK: cir.set_bitfield(#bfi_a, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) void store_field() { S s; s.a = 3; } -// CHECK: cir.func @_Z15store_neg_field -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr -// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i -// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i -// CHECK: [[TMP5:%.*]] = cir.load [[TMP3]] : cir.ptr , !u32i -// CHECK: [[TMP6:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i -// CHECK: [[TMP7:%.*]] = cir.binop(and, [[TMP4]], [[TMP6]]) : !u32i -// CHECK: [[TMP8:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i -// CHECK: [[TMP9:%.*]] = cir.shift(left, [[TMP7]] : !u32i, [[TMP8]] : !u32i) -> !u32i -// CHECK: [[TMP10:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i -// CHECK: [[TMP11:%.*]] = cir.binop(and, [[TMP5]], [[TMP10]]) : !u32i -// CHECK: [[TMP12:%.*]] = cir.binop(or, [[TMP11]], [[TMP9]]) : !u32i -// CHECK: cir.store [[TMP12]], [[TMP3]] : !u32i, cir.ptr -void store_neg_field() { - S s; - s.d = -1; -} - // CHECK: cir.func @_Z10load_field -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : cir.ptr , !u32i -// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP4]] : !u32i), !s32i -// CHECK: [[TMP6:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i -// CHECK: [[TMP7:%.*]] = cir.shift(left, [[TMP5]] : !s32i, [[TMP6]] : !s32i) -> !s32i -// CHECK: [[TMP8:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i -// CHECK: [[TMP9:%.*]] = cir.shift( right, [[TMP7]] : !s32i, [[TMP8]] : !s32i) -> !s32i -// CHECK: [[TMP10:%.*]] = cir.cast(integral, [[TMP9]] : !s32i), !s32i -// CHECK: cir.store [[TMP10]], [[TMP1]] : !s32i, cir.ptr -// CHECK: [[TMP11:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["s", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr>) -> !s32i int load_field(S& s) { return s.d; } diff --git a/clang/test/CIR/CodeGen/const-bitfields.c b/clang/test/CIR/CodeGen/const-bitfields.c index 63ee4a25a671..bf54e1893d02 100644 --- a/clang/test/CIR/CodeGen/const-bitfields.c +++ b/clang/test/CIR/CodeGen/const-bitfields.c @@ -14,13 +14,13 @@ struct Inner { unsigned d : 30; }; -// CHECK: !ty_22T22 = !cir.struct // CHECK: !ty_anon_struct = !cir.struct -// CHECK: #bfi_Z = #cir.bitfield_info +// CHECK: !ty_22T22 = !cir.struct, !s32i} #cir.record.decl.ast> // CHECK: !ty_anon_struct1 = !cir.struct, !u8i, !u8i, !u8i, !u8i}> +// CHECK: #bfi_Z = #cir.bitfield_info, size = 9, offset = 11, is_signed = true> -struct T GV = { 1, 5, 256, 42 }; -// CHECK: cir.global external @GV = #cir.const_struct<{#cir.int<161> : !u8i, #cir.int<0> : !u8i, #cir.int<8> : !u8i, #cir.int<42> : !s32i}> : !ty_anon_struct +struct T GV = { 1, 5, 26, 42 }; +// CHECK: cir.global external @GV = #cir.const_struct<{#cir.int<161> : !u8i, #cir.int<208> : !u8i, #cir.int<0> : !u8i, #cir.int<42> : !s32i}> : !ty_anon_struct // check padding is used (const array of zeros) struct Inner var = { 1, 0, 1, 21}; @@ -30,8 +30,8 @@ struct Inner var = { 1, 0, 1, 21}; // CHECK: cir.func {{.*@getZ()}} // CHECK: %1 = cir.get_global @GV : cir.ptr // CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr -// CHECK: %3 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr -// CHECK: %4 = cir.get_bitfield(#bfi_Z, %3 : !cir.ptr) -> !s32i +// CHECK: %3 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr> +// CHECK: %4 = cir.get_bitfield(#bfi_Z, %3 : !cir.ptr>) -> !s32i int getZ() { return GV.Z; } diff --git a/clang/test/CIR/CodeGen/volatile.cpp b/clang/test/CIR/CodeGen/volatile.cpp index 10c1d309bb96..5b8c13334ecf 100644 --- a/clang/test/CIR/CodeGen/volatile.cpp +++ b/clang/test/CIR/CodeGen/volatile.cpp @@ -43,7 +43,7 @@ int test_load_field3(Foo *ptr) { // CHECK: cir.func @_Z16test_load_field3P3Foo // CHECK: %[[MemberAddr:.+]] = cir.get_member -// CHECK: %{{.+}} = cir.load volatile %[[MemberAddr]] +// CHECK: %{{.+}} = cir.get_bitfield(#bfi_z, %[[MemberAddr:.+]] {is_volatile} void test_store_field1(volatile Foo *ptr) { ptr->x = 42; @@ -67,4 +67,4 @@ void test_store_field3(Foo *ptr) { // CHECK: cir.func @_Z17test_store_field3P3Foo // CHECK: %[[MemberAddr:.+]] = cir.get_member -// CHECK: cir.store volatile %{{.+}}, %[[MemberAddr]] +// CHECK: cir.set_bitfield(#bfi_z, %[[MemberAddr:.+]] : !cir.ptr, %1 : !s32i) {is_volatile} diff --git a/clang/test/CIR/Lowering/bitfieils.c b/clang/test/CIR/Lowering/bitfieils.c new file mode 100644 index 000000000000..ec289bf1048b --- /dev/null +++ b/clang/test/CIR/Lowering/bitfieils.c @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +typedef struct { + int a : 4; +} B; + +// LLVM: define void @set_signed +// LLVM: [[TMP0:%.*]] = load ptr +// LLVM: [[TMP1:%.*]] = load i8, ptr [[TMP0]] +// LLVM: [[TMP2:%.*]] = and i8 [[TMP1]], -16 +// LLVM: [[TMP3:%.*]] = or i8 [[TMP2]], 14 +// LLVM: store i8 [[TMP3]], ptr [[TMP0]] +void set_signed(B* b) { + b->a = -2; +} + +// LLVM: define i32 @get_signed +// LLVM: [[TMP0:%.*]] = alloca i32 +// LLVM: [[TMP1:%.*]] = load ptr +// LLVM: [[TMP2:%.*]] = load i8, ptr [[TMP1]] +// LLVM: [[TMP3:%.*]] = shl i8 [[TMP2]], 4 +// LLVM: [[TMP4:%.*]] = ashr i8 [[TMP3]], 4 +// LLVM: [[TMP5:%.*]] = sext i8 [[TMP4]] to i32 +// LLVM: store i32 [[TMP5]], ptr [[TMP0]] +// LLVM: [[TMP6:%.*]] = load i32, ptr [[TMP0]] +// LLVM: ret i32 [[TMP6]] +int get_signed(B* b) { + return b->a; +} \ No newline at end of file From 0763c51f6439b9bccab986a3696569785c36dde7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 12 Mar 2024 11:20:53 -0700 Subject: [PATCH 1447/2301] [CIR][CIRGen] Add support for ctor/dtor based array init/destroy Still missing lowering support, which will come next. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 46 +++++- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 9 ++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 112 +++++++++++++ clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 149 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 50 ++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 32 ++++ clang/test/CIR/CodeGen/array-init-destroy.cpp | 34 ++++ 8 files changed, 432 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/array-init-destroy.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b8b8308e43ed..abadea449cdb 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -648,7 +648,8 @@ def ConditionOp : CIR_Op<"condition", [ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "WhileOp", "ForOp", "AwaitOp", - "TernaryOp", "GlobalOp", "DoWhileOp", "CatchOp", "TryOp"]>]> { + "TernaryOp", "GlobalOp", "DoWhileOp", "CatchOp", "TryOp", + "ArrayCtor", "ArrayDtor"]>]> { let summary = "Represents the default branching behaviour of a region"; let description = [{ The `cir.yield` operation terminates regions on different CIR operations, @@ -3204,6 +3205,49 @@ def TrapOp : CIR_Op<"trap", [Terminator]> { let assemblyFormat = "attr-dict"; } +//===----------------------------------------------------------------------===// +// ArrayCtor & ArrayDtor +//===----------------------------------------------------------------------===// + +class CIR_ArrayInitDestroy : CIR_Op { + let arguments = (ins Arg:$addr); + let regions = (region SizedRegion<1>:$body); + let assemblyFormat = [{ + `(` $addr `:` qualified(type($addr)) `)` $body attr-dict + }]; + + let builders = [ + OpBuilder<(ins "mlir::Value":$addr, + "function_ref":$regionBuilder), [{ + assert(regionBuilder && "builder callback expected"); + OpBuilder::InsertionGuard guard($_builder); + Region *r = $_state.addRegion(); + $_state.addOperands(ValueRange{addr}); + $_builder.createBlock(r); + regionBuilder($_builder, $_state.location); + }]> + ]; +} + +def ArrayCtor : CIR_ArrayInitDestroy<"array.ctor"> { + let summary = "Initialize array elements with C++ constructors"; + let description = [{ + Initialize each array element using the same C++ constructor. This + operation has one region, with one single block. The block has an + incoming argument for the current array index to initialize. + }]; +} + +def ArrayDtor : CIR_ArrayInitDestroy<"array.dtor"> { + let summary = "Destroy array elements with C++ dtors"; + let description = [{ + Destroy each array element using the same C++ destructor. This + operation has one region, with one single block. The block has an + incoming argument for the current array index to initialize. + }]; +} + //===----------------------------------------------------------------------===// // Operations Lowered Directly to LLVM IR // diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index ad09b201cedc..489afcff7d96 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -421,6 +421,15 @@ def IntegerVector : Type< ]>, "!cir.vector of !cir.int"> { } +// Pointer to Arrays +def ArrayPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::ArrayType>()">, + ]>, "!cir.ptr"> { +} + //===----------------------------------------------------------------------===// // StructType (defined in cpp files) //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 012bba17424f..cd2b3c4e331d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1533,3 +1533,115 @@ CIRGenModule::getDynamicOffsetAlignment(clang::CharUnits actualBaseAlign, // the two alignments. return std::min(actualBaseAlign, expectedTargetAlign); } + +/// Emit a loop to call a particular constructor for each of several members of +/// an array. +/// +/// \param ctor the constructor to call for each element +/// \param arrayType the type of the array to initialize +/// \param arrayBegin an arrayType* +/// \param zeroInitialize true if each element should be +/// zero-initialized before it is constructed +void CIRGenFunction::buildCXXAggrConstructorCall( + const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, + Address arrayBegin, const CXXConstructExpr *E, bool NewPointerIsChecked, + bool zeroInitialize) { + QualType elementType; + auto numElements = buildArrayLength(arrayType, elementType, arrayBegin); + buildCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, + NewPointerIsChecked, zeroInitialize); +} + +/// Emit a loop to call a particular constructor for each of several members of +/// an array. +/// +/// \param ctor the constructor to call for each element +/// \param numElements the number of elements in the array; +/// may be zero +/// \param arrayBase a T*, where T is the type constructed by ctor +/// \param zeroInitialize true if each element should be +/// zero-initialized before it is constructed +void CIRGenFunction::buildCXXAggrConstructorCall( + const CXXConstructorDecl *ctor, mlir::Value numElements, Address arrayBase, + const CXXConstructExpr *E, bool NewPointerIsChecked, bool zeroInitialize) { + // It's legal for numElements to be zero. This can happen both + // dynamically, because x can be zero in 'new A[x]', and statically, + // because of GCC extensions that permit zero-length arrays. There + // are probably legitimate places where we could assume that this + // doesn't happen, but it's not clear that it's worth it. + // llvm::BranchInst *zeroCheckBranch = nullptr; + + // Optimize for a constant count. + auto constantCount = + dyn_cast(numElements.getDefiningOp()); + if (constantCount) { + auto constIntAttr = constantCount.getValue().dyn_cast(); + // Just skip out if the constant count is zero. + if (constIntAttr && constIntAttr.getUInt() == 0) + return; + // Otherwise, emit the check. + } else { + llvm_unreachable("NYI"); + } + + auto arrayTy = arrayBase.getElementType().dyn_cast(); + assert(arrayTy && "expected array type"); + auto elementType = arrayTy.getEltType(); + auto ptrToElmType = builder.getPointerTo(elementType); + + // Tradional LLVM codegen emits a loop here. + // TODO(cir): Lower to a loop as part of LoweringPrepare. + + // The alignment of the base, adjusted by the size of a single element, + // provides a conservative estimate of the alignment of every element. + // (This assumes we never start tracking offsetted alignments.) + // + // Note that these are complete objects and so we don't need to + // use the non-virtual size or alignment. + QualType type = getContext().getTypeDeclType(ctor->getParent()); + CharUnits eltAlignment = arrayBase.getAlignment().alignmentOfArrayElement( + getContext().getTypeSizeInChars(type)); + + // Zero initialize the storage, if requested. + if (zeroInitialize) { + llvm_unreachable("NYI"); + } + + // C++ [class.temporary]p4: + // There are two contexts in which temporaries are destroyed at a different + // point than the end of the full-expression. The first context is when a + // default constructor is called to initialize an element of an array. + // If the constructor has one or more default arguments, the destruction of + // every temporary created in a default argument expression is sequenced + // before the construction of the next array element, if any. + { + RunCleanupsScope Scope(*this); + + // Evaluate the constructor and its arguments in a regular + // partial-destroy cleanup. + if (getLangOpts().Exceptions && + !ctor->getParent()->hasTrivialDestructor()) { + llvm_unreachable("NYI"); + } + + // Wmit the constructor call that will execute for every array element. + builder.create( + *currSrcLoc, arrayBase.getPointer(), + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc); + Address curAddr = Address(arg, ptrToElmType, eltAlignment); + auto currAVS = AggValueSlot::forAddr( + curAddr, type.getQualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap, AggValueSlot::IsNotZeroed, + NewPointerIsChecked ? AggValueSlot::IsSanitizerChecked + : AggValueSlot::IsNotSanitizerChecked); + buildCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false, + /*Delegating=*/false, currAVS, E); + builder.create(loc); + }); + } + + if (constantCount.use_empty()) + constantCount.erase(); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 2483d3d371ac..7200d8949f4c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -966,6 +966,128 @@ void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, useEHCleanupForArray); } +namespace { +/// A cleanup which performs a partial array destroy where the end pointer is +/// regularly determined and does not need to be loaded from a local. +class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup { + mlir::Value ArrayBegin; + mlir::Value ArrayEnd; + QualType ElementType; + [[maybe_unused]] CIRGenFunction::Destroyer *Destroyer; + CharUnits ElementAlign; + +public: + RegularPartialArrayDestroy(mlir::Value arrayBegin, mlir::Value arrayEnd, + QualType elementType, CharUnits elementAlign, + CIRGenFunction::Destroyer *destroyer) + : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd), ElementType(elementType), + Destroyer(destroyer), ElementAlign(elementAlign) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } +}; + +/// A cleanup which performs a partial array destroy where the end pointer is +/// irregularly determined and must be loaded from a local. +class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup { + mlir::Value ArrayBegin; + Address ArrayEndPointer; + QualType ElementType; + [[maybe_unused]] CIRGenFunction::Destroyer *Destroyer; + CharUnits ElementAlign; + +public: + IrregularPartialArrayDestroy(mlir::Value arrayBegin, Address arrayEndPointer, + QualType elementType, CharUnits elementAlign, + CIRGenFunction::Destroyer *destroyer) + : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer), + ElementType(elementType), Destroyer(destroyer), + ElementAlign(elementAlign) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + llvm_unreachable("NYI"); + } +}; +} // end anonymous namespace + +/// Push an EH cleanup to destroy already-constructed elements of the given +/// array. The cleanup may be popped with DeactivateCleanupBlock or +/// PopCleanupBlock. +/// +/// \param elementType - the immediate element type of the array; +/// possibly still an array type +void CIRGenFunction::pushIrregularPartialArrayCleanup(mlir::Value arrayBegin, + Address arrayEndPointer, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer) { + pushFullExprCleanup( + EHCleanup, arrayBegin, arrayEndPointer, elementType, elementAlign, + destroyer); +} + +/// Push an EH cleanup to destroy already-constructed elements of the given +/// array. The cleanup may be popped with DeactivateCleanupBlock or +/// PopCleanupBlock. +/// +/// \param elementType - the immediate element type of the array; +/// possibly still an array type +void CIRGenFunction::pushRegularPartialArrayCleanup(mlir::Value arrayBegin, + mlir::Value arrayEnd, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer) { + pushFullExprCleanup( + EHCleanup, arrayBegin, arrayEnd, elementType, elementAlign, destroyer); +} + +/// Destroys all the elements of the given array, beginning from last to first. +/// The array cannot be zero-length. +/// +/// \param begin - a type* denoting the first element of the array +/// \param end - a type* denoting one past the end of the array +/// \param elementType - the element type of the array +/// \param destroyer - the function to call to destroy elements +/// \param useEHCleanup - whether to push an EH cleanup to destroy +/// the remaining elements in case the destruction of a single +/// element throws +void CIRGenFunction::buildArrayDestroy(mlir::Value begin, mlir::Value end, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer, + bool checkZeroLength, + bool useEHCleanup) { + assert(!elementType->isArrayType()); + if (checkZeroLength) { + llvm_unreachable("NYI"); + } + + // Differently from LLVM traditional codegen, use a higher level + // representation instead of lowering directly to a loop. + mlir::Type cirElementType = convertTypeForMem(elementType); + auto ptrToElmType = builder.getPointerTo(cirElementType); + + // Emit the dtor call that will execute for every array element. + builder.create( + *currSrcLoc, begin, [&](mlir::OpBuilder &b, mlir::Location loc) { + auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc); + Address curAddr = Address(arg, ptrToElmType, elementAlign); + if (useEHCleanup) { + pushRegularPartialArrayCleanup(arg, arg, elementType, elementAlign, + destroyer); + } + + // Perform the actual destruction there. + destroyer(*this, curAddr, elementType); + + if (useEHCleanup) + PopCleanupBlock(); + + builder.create(loc); + }); +} + /// Immediately perform the destruction of the given object. /// /// \param addr - the address of the object; a type* @@ -983,7 +1105,32 @@ void CIRGenFunction::emitDestroy(Address addr, QualType type, if (!arrayType) return destroyer(*this, addr, type); - llvm_unreachable("Array destroy NYI"); + auto length = buildArrayLength(arrayType, type, addr); + + CharUnits elementAlign = addr.getAlignment().alignmentOfArrayElement( + getContext().getTypeSizeInChars(type)); + + // Normally we have to check whether the array is zero-length. + bool checkZeroLength = true; + + // But if the array length is constant, we can suppress that. + auto constantCount = dyn_cast(length.getDefiningOp()); + if (constantCount) { + auto constIntAttr = constantCount.getValue().dyn_cast(); + // ...and if it's constant zero, we can just skip the entire thing. + if (constIntAttr && constIntAttr.getUInt() == 0) + return; + checkZeroLength = false; + } else { + llvm_unreachable("NYI"); + } + + auto begin = addr.getPointer(); + mlir::Value end; // Use this for future non-constant counts. + buildArrayDestroy(begin, end, type, elementAlign, destroyer, checkZeroLength, + useEHCleanupForArray); + if (constantCount.use_empty()) + constantCount.erase(); } CIRGenFunction::Destroyer * diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 6f22bfef485a..1de67ca5897c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -344,7 +344,8 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, } if (const ArrayType *arrayType = getContext().getAsArrayType(E->getType())) { - llvm_unreachable("NYI"); + buildCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E, + Dest.isSanitizerChecked()); } else { clang::CXXCtorType Type = Ctor_Complete; bool ForVirtualBase = false; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index b79dd53d8200..5c9f5223eb93 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1684,3 +1684,53 @@ void CIRGenFunction::buildVariablyModifiedType(QualType type) { } } while (type->isVariablyModifiedType()); } + +/// Computes the length of an array in elements, as well as the base +/// element type and a properly-typed first element pointer. +mlir::Value +CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, + QualType &baseType, Address &addr) { + const auto *arrayType = origArrayType; + + // If it's a VLA, we have to load the stored size. Note that + // this is the size of the VLA in bytes, not its size in elements. + mlir::Value numVLAElements{}; + if (isa(arrayType)) { + llvm_unreachable("NYI"); + } + + uint64_t countFromCLAs = 1; + QualType eltType; + + // llvm::ArrayType *llvmArrayType = + // dyn_cast(addr.getElementType()); + auto cirArrayType = addr.getElementType().dyn_cast(); + + while (cirArrayType) { + assert(isa(arrayType)); + countFromCLAs *= cirArrayType.getSize(); + eltType = arrayType->getElementType(); + + cirArrayType = cirArrayType.getEltType().dyn_cast(); + + arrayType = getContext().getAsArrayType(arrayType->getElementType()); + assert((!cirArrayType || arrayType) && + "CIR and Clang types are out-of-synch"); + } + + if (arrayType) { + // From this point onwards, the Clang array type has been emitted + // as some other type (probably a packed struct). Compute the array + // size, and just emit the 'begin' expression as a bitcast. + llvm_unreachable("NYI"); + } + + baseType = eltType; + auto numElements = builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs); + + // If we had any VLA dimensions, factor them in. + if (numVLAElements) + llvm_unreachable("NYI"); + + return numElements; +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index eee9c8509f60..0c73811106de 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -626,6 +626,23 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value buildCXXNewExpr(const CXXNewExpr *E); void buildCXXDeleteExpr(const CXXDeleteExpr *E); + void buildCXXAggrConstructorCall(const CXXConstructorDecl *D, + const clang::ArrayType *ArrayTy, + Address ArrayPtr, const CXXConstructExpr *E, + bool NewPointerIsChecked, + bool ZeroInitialization = false); + + void buildCXXAggrConstructorCall(const CXXConstructorDecl *ctor, + mlir::Value numElements, Address arrayBase, + const CXXConstructExpr *E, + bool NewPointerIsChecked, + bool zeroInitialize); + + /// Compute the length of an array, even if it's a VLA, and drill down to the + /// base element type. + mlir::Value buildArrayLength(const clang::ArrayType *arrayType, + QualType &baseType, Address &addr); + void buildDeleteCall(const FunctionDecl *DeleteFD, mlir::Value Ptr, QualType DeleteTy, mlir::Value NumElements = nullptr, CharUnits CookieSize = CharUnits()); @@ -1674,6 +1691,21 @@ class CIRGenFunction : public CIRGenTypeCache { llvm_unreachable("NYI"); } + void pushIrregularPartialArrayCleanup(mlir::Value arrayBegin, + Address arrayEndPointer, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer); + void pushRegularPartialArrayCleanup(mlir::Value arrayBegin, + mlir::Value arrayEnd, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer); + void buildArrayDestroy(mlir::Value begin, mlir::Value end, + QualType elementType, CharUnits elementAlign, + Destroyer *destroyer, bool checkZeroLength, + bool useEHCleanup); + // Points to the outermost active conditional control. This is used so that // we know if a temporary should be destroyed conditionally. ConditionalEvaluation *OutermostConditional = nullptr; diff --git a/clang/test/CIR/CodeGen/array-init-destroy.cpp b/clang/test/CIR/CodeGen/array-init-destroy.cpp new file mode 100644 index 000000000000..d672ef41ccf9 --- /dev/null +++ b/clang/test/CIR/CodeGen/array-init-destroy.cpp @@ -0,0 +1,34 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck -check-prefix=BEFORE %s + +void foo() noexcept; + +class xpto { +public: + xpto() { + foo(); + } + int i; + float f; + ~xpto() { + foo(); + } +}; + +void x() { + xpto array[2]; +} + +// BEFORE: cir.func @_Z1xv() +// BEFORE: %[[ArrayAddr:.*]] = cir.alloca !cir.array + +// BEFORE: cir.array.ctor(%[[ArrayAddr]] : !cir.ptr>) { +// BEFORE: ^bb0(%arg0: !cir.ptr +// BEFORE: cir.call @_ZN4xptoC1Ev(%arg0) : (!cir.ptr) -> () +// BEFORE: cir.yield +// BEFORE: } + +// BEFORE: cir.array.dtor(%[[ArrayAddr]] : !cir.ptr>) { +// BEFORE: ^bb0(%arg0: !cir.ptr +// BEFORE: cir.call @_ZN4xptoD1Ev(%arg0) : (!cir.ptr) -> () +// BEFORE: cir.yield +// BEFORE: } From 88b97e1ae9cb65754dbaf08216472fcb5fe84c9a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 13 Mar 2024 17:48:57 -0700 Subject: [PATCH 1448/2301] [CIR][CIRGen] Lower C++ array init and destroy --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 76 ++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 84 ++-------------- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 6 +- .../Dialect/Transforms/LoweringPrepare.cpp | 99 ++++++++++++++++++- clang/test/CIR/CodeGen/array-init-destroy.cpp | 29 +++++- 5 files changed, 213 insertions(+), 81 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index fed1176a1603..1b93557b9fcd 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -191,6 +191,82 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return create(loc, newTy, mlir::cir::CastKind::bitcast, src); } + + // + // Block handling helpers + // ---------------------- + // + OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block) { + auto lastAlloca = + std::find_if(block->rbegin(), block->rend(), [](mlir::Operation &op) { + return mlir::isa(&op); + }); + + if (lastAlloca != block->rend()) + return OpBuilder::InsertPoint(block, + ++mlir::Block::iterator(&*lastAlloca)); + return OpBuilder::InsertPoint(block, block->begin()); + }; + + mlir::IntegerAttr getSizeFromCharUnits(mlir::MLIRContext *ctx, + clang::CharUnits size) { + // Note that mlir::IntegerType is used instead of mlir::cir::IntType here + // because we don't need sign information for this to be useful, so keep + // it simple. + return mlir::IntegerAttr::get(mlir::IntegerType::get(ctx, 64), + size.getQuantity()); + } + + mlir::cir::PointerType getPointerTo(mlir::Type ty, + unsigned addressSpace = 0) { + return mlir::cir::PointerType::get(getContext(), ty); + } + + /// Create a do-while operation. + mlir::cir::DoWhileOp createDoWhile( + mlir::Location loc, + llvm::function_ref condBuilder, + llvm::function_ref bodyBuilder) { + return create(loc, condBuilder, bodyBuilder); + } + + /// Create a while operation. + mlir::cir::WhileOp createWhile( + mlir::Location loc, + llvm::function_ref condBuilder, + llvm::function_ref bodyBuilder) { + return create(loc, condBuilder, bodyBuilder); + } + + /// Create a for operation. + mlir::cir::ForOp createFor( + mlir::Location loc, + llvm::function_ref condBuilder, + llvm::function_ref bodyBuilder, + llvm::function_ref stepBuilder) { + return create(loc, condBuilder, bodyBuilder, stepBuilder); + } + + mlir::TypedAttr getConstPtrAttr(mlir::Type t, uint64_t v) { + assert(t.isa() && "expected cir.ptr"); + return mlir::cir::ConstPtrAttr::get(getContext(), t, v); + } + + // Creates constant nullptr for pointer type ty. + mlir::cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { + return create(loc, ty, getConstPtrAttr(ty, 0)); + } + + /// Create a loop condition. + mlir::cir::ConditionOp createCondition(mlir::Value condition) { + return create(condition.getLoc(), condition); + } + + /// Create a yield operation. + mlir::cir::YieldOp createYield(mlir::Location loc, + mlir::ValueRange value = {}) { + return create(loc, value); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 6d6079ada6a3..6b291d0ffb16 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -149,11 +149,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::ConstPtrAttr::get(getContext(), t, 0); } - mlir::TypedAttr getConstPtrAttr(mlir::Type t, uint64_t v) { - assert(t.isa() && "expected cir.ptr"); - return mlir::cir::ConstPtrAttr::get(getContext(), t, v); - } - mlir::Attribute getString(llvm::StringRef str, mlir::Type eltTy, unsigned size = 0) { unsigned finalSize = size ? size : str.size(); @@ -414,11 +409,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::PointerType getUInt32PtrTy(unsigned AddrSpace = 0) { return mlir::cir::PointerType::get(getContext(), typeCache.UInt32Ty); } - mlir::cir::PointerType getPointerTo(mlir::Type ty, - unsigned addressSpace = 0) { - assert(!UnimplementedFeature::addressSpace() && "NYI"); - return mlir::cir::PointerType::get(getContext(), ty); - } /// Get a CIR anonymous struct type. mlir::cir::StructType @@ -539,13 +529,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, uInt64Ty, mlir::cir::IntAttr::get(uInt64Ty, C)); } - mlir::cir::ConstantOp getConstInt(mlir::Location loc, mlir::Type t, - uint64_t C) { - auto intTy = t.dyn_cast(); - assert(intTy && "expected mlir::cir::IntType"); - return create(loc, intTy, - mlir::cir::IntAttr::get(t, C)); - } + mlir::cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal) { bool isSigned = intVal.isSigned(); auto width = intVal.getBitWidth(); @@ -553,6 +537,15 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return getConstInt( loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); } + + mlir::cir::ConstantOp getConstInt(mlir::Location loc, mlir::Type t, + uint64_t C) { + auto intTy = t.dyn_cast(); + assert(intTy && "expected mlir::cir::IntType"); + return create(loc, intTy, + mlir::cir::IntAttr::get(t, C)); + } + mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { return create(loc, getBoolTy(), getCIRBoolAttr(state)); @@ -564,11 +557,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return getBool(true, loc); } - // Creates constant nullptr for pointer type ty. - mlir::cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { - return create(loc, ty, getConstPtrAttr(ty, 0)); - } - /// Create constant nullptr for pointer-to-data-member type ty. mlir::cir::ConstantOp getNullDataMemberPtr(mlir::cir::DataMemberType ty, mlir::Location loc) { @@ -592,22 +580,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, attr.getType(), attr); } - // - // Block handling helpers - // ---------------------- - // - OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block) { - auto lastAlloca = - std::find_if(block->rbegin(), block->rend(), [](mlir::Operation &op) { - return mlir::isa(&op); - }); - - if (lastAlloca != block->rend()) - return OpBuilder::InsertPoint(block, - ++mlir::Block::iterator(&*lastAlloca)); - return OpBuilder::InsertPoint(block, block->begin()); - }; - // // Operation creation helpers // -------------------------- @@ -618,52 +590,16 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(dst.getLoc(), dst, src); } - /// Create a loop condition. - mlir::cir::ConditionOp createCondition(mlir::Value condition) { - return create(condition.getLoc(), condition); - } - /// Create a break operation. mlir::cir::BreakOp createBreak(mlir::Location loc) { return create(loc); } - /// Create a yield operation. - mlir::cir::YieldOp createYield(mlir::Location loc, - mlir::ValueRange value = {}) { - return create(loc, value); - } - /// Create a continue operation. mlir::cir::ContinueOp createContinue(mlir::Location loc) { return create(loc); } - /// Create a do-while operation. - mlir::cir::DoWhileOp createDoWhile( - mlir::Location loc, - llvm::function_ref condBuilder, - llvm::function_ref bodyBuilder) { - return create(loc, condBuilder, bodyBuilder); - } - - /// Create a while operation. - mlir::cir::WhileOp createWhile( - mlir::Location loc, - llvm::function_ref condBuilder, - llvm::function_ref bodyBuilder) { - return create(loc, condBuilder, bodyBuilder); - } - - /// Create a for operation. - mlir::cir::ForOp createFor( - mlir::Location loc, - llvm::function_ref condBuilder, - llvm::function_ref bodyBuilder, - llvm::function_ref stepBuilder) { - return create(loc, condBuilder, bodyBuilder, stepBuilder); - } - mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, mlir::Value src, mlir::Value len) { return create(loc, dst, src, len); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 938860831440..018f923abb10 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2449,11 +2449,7 @@ void CIRGenModule::buildDefaultMethods() { } mlir::IntegerAttr CIRGenModule::getSize(CharUnits size) { - // Note that mlir::IntegerType is used instead of mlir::cir::IntType here - // because we don't need sign information for this to be useful, so keep - // it simple. - return mlir::IntegerAttr::get( - mlir::IntegerType::get(builder.getContext(), 64), size.getQuantity()); + return builder.getSizeFromCharUnits(builder.getContext(), size); } mlir::Operation * diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 8c1ba9a59f2f..a71921c3ac5d 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -11,6 +11,7 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Region.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/CharUnits.h" #include "clang/AST/Mangle.h" #include "clang/Basic/Module.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" @@ -69,6 +70,8 @@ struct LoweringPreparePass : public LoweringPrepareBase { void lowerStdFindOp(StdFindOp op); void lowerIterBeginOp(IterBeginOp op); void lowerIterEndOp(IterEndOp op); + void lowerArrayDtor(ArrayDtor op); + void lowerArrayCtor(ArrayCtor op); /// Build the function that initializes the specified global FuncOp buildCXXGlobalVarDeclInitFunc(GlobalOp op); @@ -301,6 +304,95 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { builder.create(f.getLoc()); } +static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, + mlir::Operation *op, mlir::Type eltTy, + mlir::Value arrayAddr, + uint64_t arrayLen) { + // Generate loop to call into ctor/dtor for every element. + auto loc = op->getLoc(); + + // TODO: instead of fixed integer size, create alias for PtrDiffTy and unify + // with CIRGen stuff. + auto ptrDiffTy = + mlir::cir::IntType::get(builder.getContext(), 64, /*signed=*/false); + auto numArrayElementsConst = builder.create( + loc, ptrDiffTy, mlir::cir::IntAttr::get(ptrDiffTy, arrayLen)); + + auto begin = builder.create( + loc, eltTy, mlir::cir::CastKind::array_to_ptrdecay, arrayAddr); + mlir::Value end = builder.create( + loc, eltTy, begin, numArrayElementsConst); + + auto tmpAddr = builder.create( + loc, /*addr type*/ builder.getPointerTo(eltTy), + /*var type*/ eltTy, "__array_idx", + builder.getSizeFromCharUnits(builder.getContext(), + clang::CharUnits::One()), + nullptr); + + auto loop = builder.createDoWhile( + loc, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto currentElement = b.create(loc, eltTy, tmpAddr); + mlir::Type boolTy = mlir::cir::BoolType::get(b.getContext()); + auto cmp = builder.create( + loc, boolTy, mlir::cir::CmpOpKind::eq, currentElement, end); + builder.createCondition(cmp); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto currentElement = b.create(loc, eltTy, tmpAddr); + + CallOp ctorCall; + op->walk([&](CallOp c) { ctorCall = c; }); + assert(ctorCall && "expected ctor call"); + + auto one = builder.create( + loc, ptrDiffTy, mlir::cir::IntAttr::get(ptrDiffTy, 1)); + + ctorCall->moveAfter(one); + ctorCall->setOperand(0, currentElement); + + // Advance pointer and store them to temporary variable + auto nextElement = builder.create( + loc, eltTy, currentElement, one); + b.create(loc, nextElement, tmpAddr); + builder.createYield(loc); + }); + + op->replaceAllUsesWith(loop); + op->erase(); +} + +void LoweringPreparePass::lowerArrayDtor(ArrayDtor op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + + auto eltTy = op->getRegion(0).getArgument(0).getType(); + auto arrayLen = op.getAddr() + .getType() + .cast() + .getPointee() + .cast() + .getSize(); + lowerArrayDtorCtorIntoLoop(builder, op, eltTy, op.getAddr(), arrayLen); +} + +void LoweringPreparePass::lowerArrayCtor(ArrayCtor op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op.getOperation()); + + auto eltTy = op->getRegion(0).getArgument(0).getType(); + auto arrayLen = op.getAddr() + .getType() + .cast() + .getPointee() + .cast() + .getSize(); + lowerArrayDtorCtorIntoLoop(builder, op, eltTy, op.getAddr(), arrayLen); +} + void LoweringPreparePass::lowerStdFindOp(StdFindOp op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op.getOperation()); @@ -343,6 +435,10 @@ void LoweringPreparePass::runOnOp(Operation *op) { lowerIterBeginOp(iterBegin); } else if (auto iterEnd = dyn_cast(op)) { lowerIterEndOp(iterEnd); + } else if (auto arrayCtor = dyn_cast(op)) { + lowerArrayCtor(arrayCtor); + } else if (auto arrayDtor = dyn_cast(op)) { + lowerArrayDtor(arrayDtor); } } @@ -355,7 +451,8 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { - if (isa(op)) + if (isa( + op)) opsToTransform.push_back(op); }); diff --git a/clang/test/CIR/CodeGen/array-init-destroy.cpp b/clang/test/CIR/CodeGen/array-init-destroy.cpp index d672ef41ccf9..ced22c8906e9 100644 --- a/clang/test/CIR/CodeGen/array-init-destroy.cpp +++ b/clang/test/CIR/CodeGen/array-init-destroy.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck -check-prefix=BEFORE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t1.cir 2>&1 | FileCheck -check-prefix=BEFORE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t2.cir 2>&1 | FileCheck -check-prefix=AFTER %s void foo() noexcept; @@ -32,3 +33,29 @@ void x() { // BEFORE: cir.call @_ZN4xptoD1Ev(%arg0) : (!cir.ptr) -> () // BEFORE: cir.yield // BEFORE: } + +// AFTER: cir.func @_Z1xv() +// AFTER: %[[ArrayAddr0:.*]] = cir.alloca !cir.array +// AFTER: %[[ConstTwo:.*]] = cir.const(#cir.int<2> : !u64i) : !u64i +// AFTER: %[[Cast2:.*]] = cir.cast(array_to_ptrdecay, %[[ArrayAddr0]] : !cir.ptr>), !cir.ptr +// AFTER: %[[ArrayPastEnd:.*]] = cir.ptr_stride(%[[Cast2]] : !cir.ptr, %[[ConstTwo]] : !u64i), !cir.ptr +// AFTER: %[[TmpIdx:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["__array_idx"] {alignment = 1 : i64} +// AFTER: cir.do { +// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : cir.ptr >, !cir.ptr +// AFTER: %[[ConstOne:.*]] = cir.const(#cir.int<1> : !u64i) : !u64i +// AFTER: cir.call @_ZN4xptoC1Ev(%[[ArrayElt]]) : (!cir.ptr) -> () +// AFTER: %[[NextElt:.*]] = cir.ptr_stride(%[[ArrayElt]] : !cir.ptr, %[[ConstOne]] : !u64i), !cir.ptr +// AFTER: cir.store %[[NextElt]], %[[TmpIdx]] : !cir.ptr, cir.ptr > +// AFTER: cir.yield +// AFTER: } while { +// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : cir.ptr >, !cir.ptr +// AFTER: %[[ExitCond:.*]] = cir.cmp(eq, %[[ArrayElt]], %[[ArrayPastEnd]]) : !cir.ptr, !cir.bool +// AFTER: cir.condition(%[[ExitCond]]) +// AFTER: } + +// AFTER: cir.do { +// AFTER: cir.call @_ZN4xptoD1Ev({{.*}}) : (!cir.ptr) -> () +// AFTER: } while { +// AFTER: } + +// AFTER: cir.return \ No newline at end of file From 03c3a1be5d15aee00d8a1911b0affc822506e404 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 13 Mar 2024 19:06:07 -0700 Subject: [PATCH 1449/2301] [CIR][LoweringPrepare] Add a missing store for current array idx --- clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp | 1 + clang/test/CIR/CodeGen/array-init-destroy.cpp | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index a71921c3ac5d..d8734bede1ef 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -329,6 +329,7 @@ static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, builder.getSizeFromCharUnits(builder.getContext(), clang::CharUnits::One()), nullptr); + builder.create(loc, begin, tmpAddr); auto loop = builder.createDoWhile( loc, diff --git a/clang/test/CIR/CodeGen/array-init-destroy.cpp b/clang/test/CIR/CodeGen/array-init-destroy.cpp index ced22c8906e9..bb464a687bf2 100644 --- a/clang/test/CIR/CodeGen/array-init-destroy.cpp +++ b/clang/test/CIR/CodeGen/array-init-destroy.cpp @@ -37,9 +37,10 @@ void x() { // AFTER: cir.func @_Z1xv() // AFTER: %[[ArrayAddr0:.*]] = cir.alloca !cir.array // AFTER: %[[ConstTwo:.*]] = cir.const(#cir.int<2> : !u64i) : !u64i -// AFTER: %[[Cast2:.*]] = cir.cast(array_to_ptrdecay, %[[ArrayAddr0]] : !cir.ptr>), !cir.ptr -// AFTER: %[[ArrayPastEnd:.*]] = cir.ptr_stride(%[[Cast2]] : !cir.ptr, %[[ConstTwo]] : !u64i), !cir.ptr +// AFTER: %[[ArrayBegin:.*]] = cir.cast(array_to_ptrdecay, %[[ArrayAddr0]] : !cir.ptr>), !cir.ptr +// AFTER: %[[ArrayPastEnd:.*]] = cir.ptr_stride(%[[ArrayBegin]] : !cir.ptr, %[[ConstTwo]] : !u64i), !cir.ptr // AFTER: %[[TmpIdx:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["__array_idx"] {alignment = 1 : i64} +// AFTER: cir.store %[[ArrayBegin]], %[[TmpIdx]] : !cir.ptr, cir.ptr > // AFTER: cir.do { // AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : cir.ptr >, !cir.ptr // AFTER: %[[ConstOne:.*]] = cir.const(#cir.int<1> : !u64i) : !u64i From 0cdbe27b01c4f71f62adb08b9a94f4037b061ced Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Mar 2024 14:34:04 -0700 Subject: [PATCH 1450/2301] [CIR][CIRGen][NFC] Skeleton for atomics support This doesn't change existing functionality, for existing crashes related to atomics we just hit asserts a bit further now, but no support added just yet. Next set of commits will introduce functionlity with testcases. --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 809 +++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 + clang/lib/CIR/CodeGen/CIRGenValue.h | 1 + clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + 5 files changed, 816 insertions(+), 1 deletion(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenAtomic.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp new file mode 100644 index 000000000000..ec33e9639f83 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -0,0 +1,809 @@ +//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the code for emitting atomic operations. +// +//===----------------------------------------------------------------------===// + +#include "Address.h" +#include "CIRDataLayout.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "CIRGenOpenMPRuntime.h" +#include "TargetInfo.h" +#include "UnimplementedFeatureGuarding.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CodeGen/CGFunctionInfo.h" +#include "clang/Frontend/FrontendDiagnostic.h" +#include "llvm/Support/ErrorHandling.h" +#include + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Value.h" + +using namespace cir; +using namespace clang; + +namespace { +class AtomicInfo { + CIRGenFunction &CGF; + QualType AtomicTy; + QualType ValueTy; + uint64_t AtomicSizeInBits; + uint64_t ValueSizeInBits; + CharUnits AtomicAlign; + CharUnits ValueAlign; + TypeEvaluationKind EvaluationKind; + bool UseLibcall; + LValue LVal; + CIRGenBitFieldInfo BFI; + mlir::Location loc; + +public: + AtomicInfo(CIRGenFunction &CGF, LValue &lvalue, mlir::Location l) + : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0), + EvaluationKind(TEK_Scalar), UseLibcall(true), loc(l) { + assert(!lvalue.isGlobalReg()); + ASTContext &C = CGF.getContext(); + if (lvalue.isSimple()) { + AtomicTy = lvalue.getType(); + if (auto *ATy = AtomicTy->getAs()) + ValueTy = ATy->getValueType(); + else + ValueTy = AtomicTy; + EvaluationKind = CGF.getEvaluationKind(ValueTy); + + uint64_t ValueAlignInBits; + uint64_t AtomicAlignInBits; + TypeInfo ValueTI = C.getTypeInfo(ValueTy); + ValueSizeInBits = ValueTI.Width; + ValueAlignInBits = ValueTI.Align; + + TypeInfo AtomicTI = C.getTypeInfo(AtomicTy); + AtomicSizeInBits = AtomicTI.Width; + AtomicAlignInBits = AtomicTI.Align; + + assert(ValueSizeInBits <= AtomicSizeInBits); + assert(ValueAlignInBits <= AtomicAlignInBits); + + AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits); + ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits); + if (lvalue.getAlignment().isZero()) + lvalue.setAlignment(AtomicAlign); + + LVal = lvalue; + } else if (lvalue.isBitField()) { + llvm_unreachable("NYI"); + } else if (lvalue.isVectorElt()) { + ValueTy = lvalue.getType()->castAs()->getElementType(); + ValueSizeInBits = C.getTypeSize(ValueTy); + AtomicTy = lvalue.getType(); + AtomicSizeInBits = C.getTypeSize(AtomicTy); + AtomicAlign = ValueAlign = lvalue.getAlignment(); + LVal = lvalue; + } else { + llvm_unreachable("NYI"); + } + UseLibcall = !C.getTargetInfo().hasBuiltinAtomic( + AtomicSizeInBits, C.toBits(lvalue.getAlignment())); + } + + QualType getAtomicType() const { return AtomicTy; } + QualType getValueType() const { return ValueTy; } + CharUnits getAtomicAlignment() const { return AtomicAlign; } + uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; } + uint64_t getValueSizeInBits() const { return ValueSizeInBits; } + TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; } + bool shouldUseLibcall() const { return UseLibcall; } + const LValue &getAtomicLValue() const { return LVal; } + mlir::Value getAtomicPointer() const { + if (LVal.isSimple()) + return LVal.getPointer(); + else if (LVal.isBitField()) + return LVal.getBitFieldPointer(); + else if (LVal.isVectorElt()) + return LVal.getVectorPointer(); + assert(LVal.isExtVectorElt()); + // TODO(cir): return LVal.getExtVectorPointer(); + llvm_unreachable("NYI"); + } + Address getAtomicAddress() const { + mlir::Type ElTy; + if (LVal.isSimple()) + ElTy = LVal.getAddress().getElementType(); + else if (LVal.isBitField()) + ElTy = LVal.getBitFieldAddress().getElementType(); + else if (LVal.isVectorElt()) + ElTy = LVal.getVectorAddress().getElementType(); + else // TODO(cir): ElTy = LVal.getExtVectorAddress().getElementType(); + llvm_unreachable("NYI"); + return Address(getAtomicPointer(), ElTy, getAtomicAlignment()); + } + + Address getAtomicAddressAsAtomicIntPointer() const { + return castToAtomicIntPointer(getAtomicAddress()); + } + + /// Is the atomic size larger than the underlying value type? + /// + /// Note that the absence of padding does not mean that atomic + /// objects are completely interchangeable with non-atomic + /// objects: we might have promoted the alignment of a type + /// without making it bigger. + bool hasPadding() const { return (ValueSizeInBits != AtomicSizeInBits); } + + bool emitMemSetZeroIfNecessary() const; + + mlir::Value getAtomicSizeValue() const { llvm_unreachable("NYI"); } + + /// Cast the given pointer to an integer pointer suitable for atomic + /// operations if the source. + Address castToAtomicIntPointer(Address Addr) const; + + /// If Addr is compatible with the iN that will be used for an atomic + /// operation, bitcast it. Otherwise, create a temporary that is suitable + /// and copy the value across. + Address convertToAtomicIntPointer(Address Addr) const; + + /// Turn an atomic-layout object into an r-value. + RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot, + SourceLocation loc, bool AsValue) const; + + /// Converts a rvalue to integer value. + mlir::Value convertRValueToInt(RValue RVal) const; + + RValue ConvertIntToValueOrAtomic(mlir::Value IntVal, AggValueSlot ResultSlot, + SourceLocation Loc, bool AsValue) const; + + /// Copy an atomic r-value into atomic-layout memory. + void emitCopyIntoMemory(RValue rvalue) const; + + /// Project an l-value down to the value field. + LValue projectValue() const { + assert(LVal.isSimple()); + Address addr = getAtomicAddress(); + if (hasPadding()) + llvm_unreachable("NYI"); + + return LValue::makeAddr(addr, getValueType(), CGF.getContext(), + LVal.getBaseInfo()); + } + + /// Emits atomic load. + /// \returns Loaded value. + RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, + bool AsValue, llvm::AtomicOrdering AO, bool IsVolatile); + + /// Emits atomic compare-and-exchange sequence. + /// \param Expected Expected value. + /// \param Desired Desired value. + /// \param Success Atomic ordering for success operation. + /// \param Failure Atomic ordering for failed operation. + /// \param IsWeak true if atomic operation is weak, false otherwise. + /// \returns Pair of values: previous value from storage (value type) and + /// boolean flag (i1 type) with true if success and false otherwise. + std::pair + EmitAtomicCompareExchange(RValue Expected, RValue Desired, + llvm::AtomicOrdering Success = + llvm::AtomicOrdering::SequentiallyConsistent, + llvm::AtomicOrdering Failure = + llvm::AtomicOrdering::SequentiallyConsistent, + bool IsWeak = false); + + /// Emits atomic update. + /// \param AO Atomic ordering. + /// \param UpdateOp Update operation for the current lvalue. + void EmitAtomicUpdate(llvm::AtomicOrdering AO, + const llvm::function_ref &UpdateOp, + bool IsVolatile); + /// Emits atomic update. + /// \param AO Atomic ordering. + void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal, + bool IsVolatile); + + /// Materialize an atomic r-value in atomic-layout memory. + Address materializeRValue(RValue rvalue) const; + + /// Creates temp alloca for intermediate operations on atomic value. + Address CreateTempAlloca() const; + +private: + bool requiresMemSetZero(llvm::Type *type) const; + + /// Emits atomic load as a libcall. + void EmitAtomicLoadLibcall(mlir::Value AddForLoaded, llvm::AtomicOrdering AO, + bool IsVolatile); + /// Emits atomic load as LLVM instruction. + mlir::Value EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile); + /// Emits atomic compare-and-exchange op as a libcall. + mlir::Value EmitAtomicCompareExchangeLibcall( + mlir::Value ExpectedAddr, mlir::Value DesiredAddr, + llvm::AtomicOrdering Success = + llvm::AtomicOrdering::SequentiallyConsistent, + llvm::AtomicOrdering Failure = + llvm::AtomicOrdering::SequentiallyConsistent); + /// Emits atomic compare-and-exchange op as LLVM instruction. + std::pair + EmitAtomicCompareExchangeOp(mlir::Value ExpectedVal, mlir::Value DesiredVal, + llvm::AtomicOrdering Success = + llvm::AtomicOrdering::SequentiallyConsistent, + llvm::AtomicOrdering Failure = + llvm::AtomicOrdering::SequentiallyConsistent, + bool IsWeak = false); + /// Emit atomic update as libcalls. + void + EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, + const llvm::function_ref &UpdateOp, + bool IsVolatile); + /// Emit atomic update as LLVM instructions. + void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, + const llvm::function_ref &UpdateOp, + bool IsVolatile); + /// Emit atomic update as libcalls. + void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal, + bool IsVolatile); + /// Emit atomic update as LLVM instructions. + void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal, + bool IsVolatile); +}; +} // namespace + +// This function emits any expression (scalar, complex, or aggregate) +// into a temporary alloca. +static Address buildValToTemp(CIRGenFunction &CGF, Expr *E) { + Address DeclPtr = CGF.CreateMemTemp( + E->getType(), CGF.getLoc(E->getSourceRange()), ".atomictmp"); + CGF.buildAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), + /*Init*/ true); + return DeclPtr; +} + +Address AtomicInfo::castToAtomicIntPointer(Address addr) const { + auto ty = CGF.getBuilder().getUIntNTy(AtomicSizeInBits); + return addr.withElementType(ty); +} + +Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const { + auto Ty = Addr.getElementType(); + uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty); + if (SourceSizeInBits != AtomicSizeInBits) { + llvm_unreachable("NYI"); + } + + return castToAtomicIntPointer(Addr); +} + +Address AtomicInfo::CreateTempAlloca() const { + Address TempAlloca = CGF.CreateMemTemp( + (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy + : AtomicTy, + getAtomicAlignment(), loc, "atomic-temp"); + // Cast to pointer to value type for bitfields. + if (LVal.isBitField()) { + llvm_unreachable("NYI"); + } + return TempAlloca; +} + +RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { + QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); + QualType MemTy = AtomicTy; + if (const AtomicType *AT = AtomicTy->getAs()) + MemTy = AT->getValueType(); + // mlir::Value IsWeak = nullptr, *OrderFail = nullptr; + + Address Val1 = Address::invalid(); + Address Val2 = Address::invalid(); + Address Dest = Address::invalid(); + Address Ptr = buildPointerWithAlignment(E->getPtr()); + + if (E->getOp() == AtomicExpr::AO__c11_atomic_init || + E->getOp() == AtomicExpr::AO__opencl_atomic_init) { + llvm_unreachable("NYI"); + } + + auto TInfo = getContext().getTypeInfoInChars(AtomicTy); + uint64_t Size = TInfo.Width.getQuantity(); + unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth(); + + bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits; + bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0; + bool UseLibcall = Misaligned | Oversized; + bool ShouldCastToIntPtrTy = true; + + CharUnits MaxInlineWidth = + getContext().toCharUnitsFromBits(MaxInlineWidthInBits); + + DiagnosticsEngine &Diags = CGM.getDiags(); + + if (Misaligned) { + Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned) + << (int)TInfo.Width.getQuantity() + << (int)Ptr.getAlignment().getQuantity(); + } + + if (Oversized) { + Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized) + << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity(); + } + + [[maybe_unused]] auto Order = buildScalarExpr(E->getOrder()); + [[maybe_unused]] auto Scope = + E->getScopeModel() ? buildScalarExpr(E->getScope()) : nullptr; + + switch (E->getOp()) { + case AtomicExpr::AO__c11_atomic_init: + case AtomicExpr::AO__opencl_atomic_init: + llvm_unreachable("Already handled above with EmitAtomicInit!"); + + case AtomicExpr::AO__atomic_load_n: + case AtomicExpr::AO__scoped_atomic_load_n: + case AtomicExpr::AO__c11_atomic_load: + case AtomicExpr::AO__opencl_atomic_load: + case AtomicExpr::AO__hip_atomic_load: + break; + + case AtomicExpr::AO__atomic_load: + case AtomicExpr::AO__scoped_atomic_load: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__scoped_atomic_store: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__atomic_exchange: + case AtomicExpr::AO__scoped_atomic_exchange: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__atomic_compare_exchange: + case AtomicExpr::AO__atomic_compare_exchange_n: + case AtomicExpr::AO__c11_atomic_compare_exchange_weak: + case AtomicExpr::AO__c11_atomic_compare_exchange_strong: + case AtomicExpr::AO__hip_atomic_compare_exchange_weak: + case AtomicExpr::AO__hip_atomic_compare_exchange_strong: + case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: + case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: + case AtomicExpr::AO__scoped_atomic_compare_exchange: + case AtomicExpr::AO__scoped_atomic_compare_exchange_n: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__c11_atomic_fetch_add: + case AtomicExpr::AO__c11_atomic_fetch_sub: + case AtomicExpr::AO__hip_atomic_fetch_add: + case AtomicExpr::AO__hip_atomic_fetch_sub: + case AtomicExpr::AO__opencl_atomic_fetch_add: + case AtomicExpr::AO__opencl_atomic_fetch_sub: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__atomic_fetch_add: + case AtomicExpr::AO__atomic_fetch_max: + case AtomicExpr::AO__atomic_fetch_min: + case AtomicExpr::AO__atomic_fetch_sub: + case AtomicExpr::AO__atomic_add_fetch: + case AtomicExpr::AO__atomic_max_fetch: + case AtomicExpr::AO__atomic_min_fetch: + case AtomicExpr::AO__atomic_sub_fetch: + case AtomicExpr::AO__c11_atomic_fetch_max: + case AtomicExpr::AO__c11_atomic_fetch_min: + case AtomicExpr::AO__opencl_atomic_fetch_max: + case AtomicExpr::AO__opencl_atomic_fetch_min: + case AtomicExpr::AO__hip_atomic_fetch_max: + case AtomicExpr::AO__hip_atomic_fetch_min: + case AtomicExpr::AO__scoped_atomic_fetch_add: + case AtomicExpr::AO__scoped_atomic_fetch_max: + case AtomicExpr::AO__scoped_atomic_fetch_min: + case AtomicExpr::AO__scoped_atomic_fetch_sub: + case AtomicExpr::AO__scoped_atomic_add_fetch: + case AtomicExpr::AO__scoped_atomic_max_fetch: + case AtomicExpr::AO__scoped_atomic_min_fetch: + case AtomicExpr::AO__scoped_atomic_sub_fetch: + ShouldCastToIntPtrTy = !MemTy->isFloatingType(); + [[fallthrough]]; + + case AtomicExpr::AO__atomic_fetch_and: + case AtomicExpr::AO__atomic_fetch_nand: + case AtomicExpr::AO__atomic_fetch_or: + case AtomicExpr::AO__atomic_fetch_xor: + case AtomicExpr::AO__atomic_and_fetch: + case AtomicExpr::AO__atomic_nand_fetch: + case AtomicExpr::AO__atomic_or_fetch: + case AtomicExpr::AO__atomic_xor_fetch: + case AtomicExpr::AO__atomic_store_n: + case AtomicExpr::AO__atomic_exchange_n: + case AtomicExpr::AO__c11_atomic_fetch_and: + case AtomicExpr::AO__c11_atomic_fetch_nand: + case AtomicExpr::AO__c11_atomic_fetch_or: + case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__c11_atomic_exchange: + case AtomicExpr::AO__hip_atomic_fetch_and: + case AtomicExpr::AO__hip_atomic_fetch_or: + case AtomicExpr::AO__hip_atomic_fetch_xor: + case AtomicExpr::AO__hip_atomic_store: + case AtomicExpr::AO__hip_atomic_exchange: + case AtomicExpr::AO__opencl_atomic_fetch_and: + case AtomicExpr::AO__opencl_atomic_fetch_or: + case AtomicExpr::AO__opencl_atomic_fetch_xor: + case AtomicExpr::AO__opencl_atomic_store: + case AtomicExpr::AO__opencl_atomic_exchange: + case AtomicExpr::AO__scoped_atomic_fetch_and: + case AtomicExpr::AO__scoped_atomic_fetch_nand: + case AtomicExpr::AO__scoped_atomic_fetch_or: + case AtomicExpr::AO__scoped_atomic_fetch_xor: + case AtomicExpr::AO__scoped_atomic_and_fetch: + case AtomicExpr::AO__scoped_atomic_nand_fetch: + case AtomicExpr::AO__scoped_atomic_or_fetch: + case AtomicExpr::AO__scoped_atomic_xor_fetch: + case AtomicExpr::AO__scoped_atomic_store_n: + case AtomicExpr::AO__scoped_atomic_exchange_n: + Val1 = buildValToTemp(*this, E->getVal1()); + break; + } + + QualType RValTy = E->getType().getUnqualifiedType(); + + // The inlined atomics only function on iN types, where N is a power of 2. We + // need to make sure (via temporaries if necessary) that all incoming values + // are compatible. + LValue AtomicVal = makeAddrLValue(Ptr, AtomicTy); + AtomicInfo Atomics(*this, AtomicVal, getLoc(E->getSourceRange())); + + if (ShouldCastToIntPtrTy) { + Ptr = Atomics.castToAtomicIntPointer(Ptr); + if (Val1.isValid()) + Val1 = Atomics.convertToAtomicIntPointer(Val1); + if (Val2.isValid()) + Val2 = Atomics.convertToAtomicIntPointer(Val2); + } + if (Dest.isValid()) { + llvm_unreachable("NYI"); + } else if (E->isCmpXChg()) + llvm_unreachable("NYI"); + else if (!RValTy->isVoidType()) { + Dest = Atomics.CreateTempAlloca(); + if (ShouldCastToIntPtrTy) + Dest = Atomics.castToAtomicIntPointer(Dest); + } + + // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . + if (UseLibcall) { + bool UseOptimizedLibcall = false; + switch (E->getOp()) { + case AtomicExpr::AO__c11_atomic_init: + case AtomicExpr::AO__opencl_atomic_init: + llvm_unreachable("Already handled above with EmitAtomicInit!"); + + case AtomicExpr::AO__atomic_fetch_add: + case AtomicExpr::AO__atomic_fetch_and: + case AtomicExpr::AO__atomic_fetch_max: + case AtomicExpr::AO__atomic_fetch_min: + case AtomicExpr::AO__atomic_fetch_nand: + case AtomicExpr::AO__atomic_fetch_or: + case AtomicExpr::AO__atomic_fetch_sub: + case AtomicExpr::AO__atomic_fetch_xor: + case AtomicExpr::AO__atomic_add_fetch: + case AtomicExpr::AO__atomic_and_fetch: + case AtomicExpr::AO__atomic_max_fetch: + case AtomicExpr::AO__atomic_min_fetch: + case AtomicExpr::AO__atomic_nand_fetch: + case AtomicExpr::AO__atomic_or_fetch: + case AtomicExpr::AO__atomic_sub_fetch: + case AtomicExpr::AO__atomic_xor_fetch: + case AtomicExpr::AO__c11_atomic_fetch_add: + case AtomicExpr::AO__c11_atomic_fetch_and: + case AtomicExpr::AO__c11_atomic_fetch_max: + case AtomicExpr::AO__c11_atomic_fetch_min: + case AtomicExpr::AO__c11_atomic_fetch_nand: + case AtomicExpr::AO__c11_atomic_fetch_or: + case AtomicExpr::AO__c11_atomic_fetch_sub: + case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__hip_atomic_fetch_add: + case AtomicExpr::AO__hip_atomic_fetch_and: + case AtomicExpr::AO__hip_atomic_fetch_max: + case AtomicExpr::AO__hip_atomic_fetch_min: + case AtomicExpr::AO__hip_atomic_fetch_or: + case AtomicExpr::AO__hip_atomic_fetch_sub: + case AtomicExpr::AO__hip_atomic_fetch_xor: + case AtomicExpr::AO__opencl_atomic_fetch_add: + case AtomicExpr::AO__opencl_atomic_fetch_and: + case AtomicExpr::AO__opencl_atomic_fetch_max: + case AtomicExpr::AO__opencl_atomic_fetch_min: + case AtomicExpr::AO__opencl_atomic_fetch_or: + case AtomicExpr::AO__opencl_atomic_fetch_sub: + case AtomicExpr::AO__opencl_atomic_fetch_xor: + case AtomicExpr::AO__scoped_atomic_fetch_add: + case AtomicExpr::AO__scoped_atomic_fetch_and: + case AtomicExpr::AO__scoped_atomic_fetch_max: + case AtomicExpr::AO__scoped_atomic_fetch_min: + case AtomicExpr::AO__scoped_atomic_fetch_nand: + case AtomicExpr::AO__scoped_atomic_fetch_or: + case AtomicExpr::AO__scoped_atomic_fetch_sub: + case AtomicExpr::AO__scoped_atomic_fetch_xor: + case AtomicExpr::AO__scoped_atomic_add_fetch: + case AtomicExpr::AO__scoped_atomic_and_fetch: + case AtomicExpr::AO__scoped_atomic_max_fetch: + case AtomicExpr::AO__scoped_atomic_min_fetch: + case AtomicExpr::AO__scoped_atomic_nand_fetch: + case AtomicExpr::AO__scoped_atomic_or_fetch: + case AtomicExpr::AO__scoped_atomic_sub_fetch: + case AtomicExpr::AO__scoped_atomic_xor_fetch: + // For these, only library calls for certain sizes exist. + UseOptimizedLibcall = true; + break; + + case AtomicExpr::AO__atomic_load: + case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__atomic_exchange: + case AtomicExpr::AO__atomic_compare_exchange: + case AtomicExpr::AO__scoped_atomic_load: + case AtomicExpr::AO__scoped_atomic_store: + case AtomicExpr::AO__scoped_atomic_exchange: + case AtomicExpr::AO__scoped_atomic_compare_exchange: + // Use the generic version if we don't know that the operand will be + // suitably aligned for the optimized version. + if (Misaligned) + break; + [[fallthrough]]; + case AtomicExpr::AO__atomic_load_n: + case AtomicExpr::AO__atomic_store_n: + case AtomicExpr::AO__atomic_exchange_n: + case AtomicExpr::AO__atomic_compare_exchange_n: + case AtomicExpr::AO__c11_atomic_load: + case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__c11_atomic_exchange: + case AtomicExpr::AO__c11_atomic_compare_exchange_weak: + case AtomicExpr::AO__c11_atomic_compare_exchange_strong: + case AtomicExpr::AO__hip_atomic_load: + case AtomicExpr::AO__hip_atomic_store: + case AtomicExpr::AO__hip_atomic_exchange: + case AtomicExpr::AO__hip_atomic_compare_exchange_weak: + case AtomicExpr::AO__hip_atomic_compare_exchange_strong: + case AtomicExpr::AO__opencl_atomic_load: + case AtomicExpr::AO__opencl_atomic_store: + case AtomicExpr::AO__opencl_atomic_exchange: + case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: + case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: + case AtomicExpr::AO__scoped_atomic_load_n: + case AtomicExpr::AO__scoped_atomic_store_n: + case AtomicExpr::AO__scoped_atomic_exchange_n: + case AtomicExpr::AO__scoped_atomic_compare_exchange_n: + // Only use optimized library calls for sizes for which they exist. + // FIXME: Size == 16 optimized library functions exist too. + if (Size == 1 || Size == 2 || Size == 4 || Size == 8) + UseOptimizedLibcall = true; + break; + } + + CallArgList Args; + if (!UseOptimizedLibcall) { + llvm_unreachable("NYI"); + } + // TODO(cir): Atomic address is the first or second parameter + // The OpenCL atomic library functions only accept pointer arguments to + // generic address space. + llvm_unreachable("NYI"); + + std::string LibCallName; + [[maybe_unused]] QualType LoweredMemTy = + MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy; + QualType RetTy; + [[maybe_unused]] bool HaveRetTy = false; + [[maybe_unused]] bool PostOpMinMax = false; + switch (E->getOp()) { + case AtomicExpr::AO__c11_atomic_init: + case AtomicExpr::AO__opencl_atomic_init: + llvm_unreachable("Already handled!"); + + // There is only one libcall for compare an exchange, because there is no + // optimisation benefit possible from a libcall version of a weak compare + // and exchange. + // bool __atomic_compare_exchange(size_t size, void *mem, void *expected, + // void *desired, int success, int failure) + // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired, + // int success, int failure) + case AtomicExpr::AO__atomic_compare_exchange: + case AtomicExpr::AO__atomic_compare_exchange_n: + case AtomicExpr::AO__c11_atomic_compare_exchange_weak: + case AtomicExpr::AO__c11_atomic_compare_exchange_strong: + case AtomicExpr::AO__hip_atomic_compare_exchange_weak: + case AtomicExpr::AO__hip_atomic_compare_exchange_strong: + case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: + case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: + case AtomicExpr::AO__scoped_atomic_compare_exchange: + case AtomicExpr::AO__scoped_atomic_compare_exchange_n: + LibCallName = "__atomic_compare_exchange"; + llvm_unreachable("NYI"); + break; + // void __atomic_exchange(size_t size, void *mem, void *val, void *return, + // int order) + // T __atomic_exchange_N(T *mem, T val, int order) + case AtomicExpr::AO__atomic_exchange: + case AtomicExpr::AO__atomic_exchange_n: + case AtomicExpr::AO__c11_atomic_exchange: + case AtomicExpr::AO__hip_atomic_exchange: + case AtomicExpr::AO__opencl_atomic_exchange: + case AtomicExpr::AO__scoped_atomic_exchange: + case AtomicExpr::AO__scoped_atomic_exchange_n: + LibCallName = "__atomic_exchange"; + llvm_unreachable("NYI"); + break; + // void __atomic_store(size_t size, void *mem, void *val, int order) + // void __atomic_store_N(T *mem, T val, int order) + case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__atomic_store_n: + case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__hip_atomic_store: + case AtomicExpr::AO__opencl_atomic_store: + case AtomicExpr::AO__scoped_atomic_store: + case AtomicExpr::AO__scoped_atomic_store_n: + LibCallName = "__atomic_store"; + llvm_unreachable("NYI"); + break; + // void __atomic_load(size_t size, void *mem, void *return, int order) + // T __atomic_load_N(T *mem, int order) + case AtomicExpr::AO__atomic_load: + case AtomicExpr::AO__atomic_load_n: + case AtomicExpr::AO__c11_atomic_load: + case AtomicExpr::AO__hip_atomic_load: + case AtomicExpr::AO__opencl_atomic_load: + case AtomicExpr::AO__scoped_atomic_load: + case AtomicExpr::AO__scoped_atomic_load_n: + LibCallName = "__atomic_load"; + llvm_unreachable("NYI"); + break; + // T __atomic_add_fetch_N(T *mem, T val, int order) + // T __atomic_fetch_add_N(T *mem, T val, int order) + case AtomicExpr::AO__atomic_add_fetch: + case AtomicExpr::AO__scoped_atomic_add_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__atomic_fetch_add: + case AtomicExpr::AO__c11_atomic_fetch_add: + case AtomicExpr::AO__hip_atomic_fetch_add: + case AtomicExpr::AO__opencl_atomic_fetch_add: + case AtomicExpr::AO__scoped_atomic_fetch_add: + LibCallName = "__atomic_fetch_add"; + llvm_unreachable("NYI"); + break; + // T __atomic_and_fetch_N(T *mem, T val, int order) + // T __atomic_fetch_and_N(T *mem, T val, int order) + case AtomicExpr::AO__atomic_and_fetch: + case AtomicExpr::AO__scoped_atomic_and_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__atomic_fetch_and: + case AtomicExpr::AO__c11_atomic_fetch_and: + case AtomicExpr::AO__hip_atomic_fetch_and: + case AtomicExpr::AO__opencl_atomic_fetch_and: + case AtomicExpr::AO__scoped_atomic_fetch_and: + LibCallName = "__atomic_fetch_and"; + llvm_unreachable("NYI"); + break; + // T __atomic_or_fetch_N(T *mem, T val, int order) + // T __atomic_fetch_or_N(T *mem, T val, int order) + case AtomicExpr::AO__atomic_or_fetch: + case AtomicExpr::AO__scoped_atomic_or_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__atomic_fetch_or: + case AtomicExpr::AO__c11_atomic_fetch_or: + case AtomicExpr::AO__hip_atomic_fetch_or: + case AtomicExpr::AO__opencl_atomic_fetch_or: + case AtomicExpr::AO__scoped_atomic_fetch_or: + LibCallName = "__atomic_fetch_or"; + llvm_unreachable("NYI"); + break; + // T __atomic_sub_fetch_N(T *mem, T val, int order) + // T __atomic_fetch_sub_N(T *mem, T val, int order) + case AtomicExpr::AO__atomic_sub_fetch: + case AtomicExpr::AO__scoped_atomic_sub_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__atomic_fetch_sub: + case AtomicExpr::AO__c11_atomic_fetch_sub: + case AtomicExpr::AO__hip_atomic_fetch_sub: + case AtomicExpr::AO__opencl_atomic_fetch_sub: + case AtomicExpr::AO__scoped_atomic_fetch_sub: + LibCallName = "__atomic_fetch_sub"; + llvm_unreachable("NYI"); + break; + // T __atomic_xor_fetch_N(T *mem, T val, int order) + // T __atomic_fetch_xor_N(T *mem, T val, int order) + case AtomicExpr::AO__atomic_xor_fetch: + case AtomicExpr::AO__scoped_atomic_xor_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__atomic_fetch_xor: + case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__hip_atomic_fetch_xor: + case AtomicExpr::AO__opencl_atomic_fetch_xor: + case AtomicExpr::AO__scoped_atomic_fetch_xor: + LibCallName = "__atomic_fetch_xor"; + llvm_unreachable("NYI"); + break; + case AtomicExpr::AO__atomic_min_fetch: + case AtomicExpr::AO__scoped_atomic_min_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__atomic_fetch_min: + case AtomicExpr::AO__c11_atomic_fetch_min: + case AtomicExpr::AO__scoped_atomic_fetch_min: + case AtomicExpr::AO__hip_atomic_fetch_min: + case AtomicExpr::AO__opencl_atomic_fetch_min: + LibCallName = E->getValueType()->isSignedIntegerType() + ? "__atomic_fetch_min" + : "__atomic_fetch_umin"; + llvm_unreachable("NYI"); + break; + case AtomicExpr::AO__atomic_max_fetch: + case AtomicExpr::AO__scoped_atomic_max_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__atomic_fetch_max: + case AtomicExpr::AO__c11_atomic_fetch_max: + case AtomicExpr::AO__hip_atomic_fetch_max: + case AtomicExpr::AO__opencl_atomic_fetch_max: + case AtomicExpr::AO__scoped_atomic_fetch_max: + LibCallName = E->getValueType()->isSignedIntegerType() + ? "__atomic_fetch_max" + : "__atomic_fetch_umax"; + llvm_unreachable("NYI"); + break; + // T __atomic_nand_fetch_N(T *mem, T val, int order) + // T __atomic_fetch_nand_N(T *mem, T val, int order) + case AtomicExpr::AO__atomic_nand_fetch: + case AtomicExpr::AO__scoped_atomic_nand_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__atomic_fetch_nand: + case AtomicExpr::AO__c11_atomic_fetch_nand: + case AtomicExpr::AO__scoped_atomic_fetch_nand: + LibCallName = "__atomic_fetch_nand"; + llvm_unreachable("NYI"); + break; + } + + if (E->isOpenCL()) { + LibCallName = + std::string("__opencl") + StringRef(LibCallName).drop_front(1).str(); + } + // Optimized functions have the size in their name. + if (UseOptimizedLibcall) { + llvm_unreachable("NYI"); + } + // By default, assume we return a value of the atomic type. + llvm_unreachable("NYI"); + } + + [[maybe_unused]] bool IsStore = + E->getOp() == AtomicExpr::AO__c11_atomic_store || + E->getOp() == AtomicExpr::AO__opencl_atomic_store || + E->getOp() == AtomicExpr::AO__hip_atomic_store || + E->getOp() == AtomicExpr::AO__atomic_store || + E->getOp() == AtomicExpr::AO__atomic_store_n || + E->getOp() == AtomicExpr::AO__scoped_atomic_store || + E->getOp() == AtomicExpr::AO__scoped_atomic_store_n; + [[maybe_unused]] bool IsLoad = + E->getOp() == AtomicExpr::AO__c11_atomic_load || + E->getOp() == AtomicExpr::AO__opencl_atomic_load || + E->getOp() == AtomicExpr::AO__hip_atomic_load || + E->getOp() == AtomicExpr::AO__atomic_load || + E->getOp() == AtomicExpr::AO__atomic_load_n || + E->getOp() == AtomicExpr::AO__scoped_atomic_load || + E->getOp() == AtomicExpr::AO__scoped_atomic_load_n; + llvm_unreachable("NYI"); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 1f78dfd0fa75..91559b08fa7e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -691,7 +691,9 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitAsTypeExpr(AsTypeExpr *E) { llvm_unreachable("NYI"); } - mlir::Value VisitAtomicExpr(AtomicExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitAtomicExpr(AtomicExpr *E) { + return CGF.buildAtomicExpr(E).getScalarVal(); + } // Emit a conversion from the specified type to the specified destination // type, both of which are CIR scalar types. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 0c73811106de..222a7162b3f3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1478,6 +1478,8 @@ class CIRGenFunction : public CIRGenTypeCache { void buildCXXThrowExpr(const CXXThrowExpr *E); + RValue buildAtomicExpr(AtomicExpr *E); + /// Return the address of a local variable. Address GetAddrOfLocalVar(const clang::VarDecl *VD) { auto it = LocalDeclMap.find(VD); diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 86b6f5443856..4862d32df245 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -246,6 +246,7 @@ class LValue { clang::CharUnits getAlignment() const { return clang::CharUnits::fromQuantity(Alignment); } + void setAlignment(clang::CharUnits A) { Alignment = A.getQuantity(); } Address getAddress() const { return Address(getPointer(), ElementType, getAlignment()); diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 1f9d0c6d1c0b..154aefbdba02 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -8,6 +8,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR CIRAsm.cpp + CIRGenAtomic.cpp CIRGenBuiltin.cpp CIRGenCXX.cpp CIRGenCXXABI.cpp From 14064337aede9ef8636ad6e2c7981b0a3a53ad8e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Mar 2024 16:50:23 -0700 Subject: [PATCH 1451/2301] [CIR][CIRGen][Atomics][NFC] Skeleton for constant order codegen Just like previous commit, add more infra pieces, still NFC since all relevant testcases hit asserts, just a bit deeper. --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 254 +++++++++++++++++- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 2 files changed, 251 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index ec33e9639f83..8f8ccd0e87a7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -295,12 +295,214 @@ Address AtomicInfo::CreateTempAlloca() const { return TempAlloca; } +// If the value comes from a ConstOp + IntAttr, retrieve and skip a series +// of casts if necessary. +// +// FIXME(cir): figure out warning issue and move this to CIRBaseBuilder.h +static mlir::cir::IntAttr getConstOpIntAttr(mlir::Value v) { + mlir::Operation *op = v.getDefiningOp(); + mlir::cir::IntAttr constVal; + while (auto c = dyn_cast(op)) + op = c.getOperand().getDefiningOp(); + if (auto c = dyn_cast(op)) { + if (c.getType().isa()) + constVal = c.getValue().cast(); + } + return constVal; +} + +static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, + Address Ptr, Address Val1, Address Val2, + mlir::Value IsWeak, mlir::Value FailureOrder, + uint64_t Size, llvm::AtomicOrdering Order, + uint8_t Scope) { + assert(!UnimplementedFeature::syncScopeID()); + [[maybe_unused]] bool PostOpMinMax = false; + + switch (E->getOp()) { + case AtomicExpr::AO__c11_atomic_init: + case AtomicExpr::AO__opencl_atomic_init: + llvm_unreachable("Already handled!"); + + case AtomicExpr::AO__c11_atomic_compare_exchange_strong: + case AtomicExpr::AO__hip_atomic_compare_exchange_strong: + case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: + llvm_unreachable("NYI"); + return; + case AtomicExpr::AO__c11_atomic_compare_exchange_weak: + case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: + case AtomicExpr::AO__hip_atomic_compare_exchange_weak: + llvm_unreachable("NYI"); + return; + case AtomicExpr::AO__atomic_compare_exchange: + case AtomicExpr::AO__atomic_compare_exchange_n: + case AtomicExpr::AO__scoped_atomic_compare_exchange: + case AtomicExpr::AO__scoped_atomic_compare_exchange_n: { + llvm_unreachable("NYI"); + return; + } + case AtomicExpr::AO__c11_atomic_load: + case AtomicExpr::AO__opencl_atomic_load: + case AtomicExpr::AO__hip_atomic_load: + case AtomicExpr::AO__atomic_load_n: + case AtomicExpr::AO__atomic_load: + case AtomicExpr::AO__scoped_atomic_load_n: + case AtomicExpr::AO__scoped_atomic_load: { + llvm_unreachable("NYI"); + return; + } + + case AtomicExpr::AO__c11_atomic_store: + case AtomicExpr::AO__opencl_atomic_store: + case AtomicExpr::AO__hip_atomic_store: + case AtomicExpr::AO__atomic_store: + case AtomicExpr::AO__atomic_store_n: + case AtomicExpr::AO__scoped_atomic_store: + case AtomicExpr::AO__scoped_atomic_store_n: { + llvm_unreachable("NYI"); + return; + } + + case AtomicExpr::AO__c11_atomic_exchange: + case AtomicExpr::AO__hip_atomic_exchange: + case AtomicExpr::AO__opencl_atomic_exchange: + case AtomicExpr::AO__atomic_exchange_n: + case AtomicExpr::AO__atomic_exchange: + case AtomicExpr::AO__scoped_atomic_exchange_n: + case AtomicExpr::AO__scoped_atomic_exchange: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__atomic_add_fetch: + case AtomicExpr::AO__scoped_atomic_add_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_add: + case AtomicExpr::AO__hip_atomic_fetch_add: + case AtomicExpr::AO__opencl_atomic_fetch_add: + case AtomicExpr::AO__atomic_fetch_add: + case AtomicExpr::AO__scoped_atomic_fetch_add: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__atomic_sub_fetch: + case AtomicExpr::AO__scoped_atomic_sub_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_sub: + case AtomicExpr::AO__hip_atomic_fetch_sub: + case AtomicExpr::AO__opencl_atomic_fetch_sub: + case AtomicExpr::AO__atomic_fetch_sub: + case AtomicExpr::AO__scoped_atomic_fetch_sub: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__atomic_min_fetch: + case AtomicExpr::AO__scoped_atomic_min_fetch: + PostOpMinMax = true; + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_min: + case AtomicExpr::AO__hip_atomic_fetch_min: + case AtomicExpr::AO__opencl_atomic_fetch_min: + case AtomicExpr::AO__atomic_fetch_min: + case AtomicExpr::AO__scoped_atomic_fetch_min: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__atomic_max_fetch: + case AtomicExpr::AO__scoped_atomic_max_fetch: + PostOpMinMax = true; + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_max: + case AtomicExpr::AO__hip_atomic_fetch_max: + case AtomicExpr::AO__opencl_atomic_fetch_max: + case AtomicExpr::AO__atomic_fetch_max: + case AtomicExpr::AO__scoped_atomic_fetch_max: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__atomic_and_fetch: + case AtomicExpr::AO__scoped_atomic_and_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_and: + case AtomicExpr::AO__hip_atomic_fetch_and: + case AtomicExpr::AO__opencl_atomic_fetch_and: + case AtomicExpr::AO__atomic_fetch_and: + case AtomicExpr::AO__scoped_atomic_fetch_and: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__atomic_or_fetch: + case AtomicExpr::AO__scoped_atomic_or_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_or: + case AtomicExpr::AO__hip_atomic_fetch_or: + case AtomicExpr::AO__opencl_atomic_fetch_or: + case AtomicExpr::AO__atomic_fetch_or: + case AtomicExpr::AO__scoped_atomic_fetch_or: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__atomic_xor_fetch: + case AtomicExpr::AO__scoped_atomic_xor_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_xor: + case AtomicExpr::AO__hip_atomic_fetch_xor: + case AtomicExpr::AO__opencl_atomic_fetch_xor: + case AtomicExpr::AO__atomic_fetch_xor: + case AtomicExpr::AO__scoped_atomic_fetch_xor: + llvm_unreachable("NYI"); + break; + + case AtomicExpr::AO__atomic_nand_fetch: + case AtomicExpr::AO__scoped_atomic_nand_fetch: + llvm_unreachable("NYI"); + [[fallthrough]]; + case AtomicExpr::AO__c11_atomic_fetch_nand: + case AtomicExpr::AO__atomic_fetch_nand: + case AtomicExpr::AO__scoped_atomic_fetch_nand: + llvm_unreachable("NYI"); + break; + } + llvm_unreachable("NYI"); +} + +static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, + Address Ptr, Address Val1, Address Val2, + mlir::Value IsWeak, mlir::Value FailureOrder, + uint64_t Size, llvm::AtomicOrdering Order, + mlir::Value Scope) { + auto ScopeModel = Expr->getScopeModel(); + + // LLVM atomic instructions always have synch scope. If clang atomic + // expression has no scope operand, use default LLVM synch scope. + if (!ScopeModel) { + assert(!UnimplementedFeature::syncScopeID()); + buildAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, + Order, /*FIXME(cir): LLVM default scope*/ 1); + return; + } + + // Handle constant scope. + if (getConstOpIntAttr(Scope)) { + assert(!UnimplementedFeature::syncScopeID()); + llvm_unreachable("NYI"); + return; + } + + // Handle non-constant scope. + llvm_unreachable("NYI"); +} + RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); QualType MemTy = AtomicTy; if (const AtomicType *AT = AtomicTy->getAs()) MemTy = AT->getValueType(); - // mlir::Value IsWeak = nullptr, *OrderFail = nullptr; + mlir::Value IsWeak = nullptr, OrderFail = nullptr; Address Val1 = Address::invalid(); Address Val2 = Address::invalid(); @@ -337,9 +539,8 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity(); } - [[maybe_unused]] auto Order = buildScalarExpr(E->getOrder()); - [[maybe_unused]] auto Scope = - E->getScopeModel() ? buildScalarExpr(E->getScope()) : nullptr; + auto Order = buildScalarExpr(E->getOrder()); + auto Scope = E->getScopeModel() ? buildScalarExpr(E->getScope()) : nullptr; switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: @@ -805,5 +1006,50 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { E->getOp() == AtomicExpr::AO__atomic_load_n || E->getOp() == AtomicExpr::AO__scoped_atomic_load || E->getOp() == AtomicExpr::AO__scoped_atomic_load_n; + + if (auto ordAttr = getConstOpIntAttr(Order)) { + // We should not ever get to a case where the ordering isn't a valid CABI + // value, but it's hard to enforce that in general. + auto ord = ordAttr.getUInt(); + if (llvm::isValidAtomicOrderingCABI(ord)) { + switch ((llvm::AtomicOrderingCABI)ord) { + case llvm::AtomicOrderingCABI::relaxed: + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + llvm::AtomicOrdering::Monotonic, Scope); + break; + case llvm::AtomicOrderingCABI::consume: + case llvm::AtomicOrderingCABI::acquire: + if (IsStore) + break; // Avoid crashing on code with undefined behavior + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + llvm::AtomicOrdering::Acquire, Scope); + break; + case llvm::AtomicOrderingCABI::release: + if (IsLoad) + break; // Avoid crashing on code with undefined behavior + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + llvm::AtomicOrdering::Release, Scope); + break; + case llvm::AtomicOrderingCABI::acq_rel: + if (IsLoad || IsStore) + break; // Avoid crashing on code with undefined behavior + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + llvm::AtomicOrdering::AcquireRelease, Scope); + break; + case llvm::AtomicOrderingCABI::seq_cst: + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + llvm::AtomicOrdering::SequentiallyConsistent, Scope); + break; + } + } + if (RValTy->isVoidType()) { + llvm_unreachable("NYI"); + } + + return convertTempToRValue(Dest.withElementType(convertTypeForMem(RValTy)), + RValTy, E->getExprLoc()); + } + + // Long case, when Order isn't obviously constant. llvm_unreachable("NYI"); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 3924ced09713..06bd8201834c 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -122,6 +122,7 @@ struct UnimplementedFeature { // Type qualifiers. static bool atomicTypes() { return false; } static bool volatileTypes() { return false; } + static bool syncScopeID() { return false; } static bool capturedByInit() { return false; } static bool tryEmitAsConstant() { return false; } From 03879889af56c82210fb42ccb11a881c716183d6 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 15 Mar 2024 06:07:29 +0300 Subject: [PATCH 1452/2301] [CIR][Codegen] Initial support for packed structures (#473) This PR adds a support for packed structures. Basically, now both `pragma pack(...)` and `__attribute__((aligned(...)))` should work. The only problem is that `getAlignment` is not a total one - I fix only a couple of issues I faced with - for struct types and arrays. --- clang/lib/CIR/CodeGen/CIRDataLayout.h | 11 ++++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 - clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 3 ++ .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 48 +++++++++++++++++-- clang/test/CIR/CodeGen/packed-structs.c | 37 ++++++++++++++ 5 files changed, 95 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/CodeGen/packed-structs.c diff --git a/clang/lib/CIR/CodeGen/CIRDataLayout.h b/clang/lib/CIR/CodeGen/CIRDataLayout.h index bf9a49202cfb..ef9f737a5620 100644 --- a/clang/lib/CIR/CodeGen/CIRDataLayout.h +++ b/clang/lib/CIR/CodeGen/CIRDataLayout.h @@ -30,9 +30,18 @@ class CIRDataLayout { // `useABI` is `true` if not using prefered alignment. unsigned getAlignment(mlir::Type ty, bool useABI) const { + if (llvm::isa(ty)) { + auto sTy = ty.cast(); + if (sTy.getPacked() && useABI) + return 1; + } else if (llvm::isa(ty)) { + return getAlignment(ty.cast().getEltType(), useABI); + } + return useABI ? layout.getTypeABIAlignment(ty) : layout.getTypePreferredAlignment(ty); } + unsigned getABITypeAlign(mlir::Type ty) const { return getAlignment(ty, true); } @@ -60,7 +69,7 @@ class CIRDataLayout { /// returns 12 or 16 for x86_fp80, depending on alignment. unsigned getTypeAllocSize(mlir::Type Ty) const { // Round up to the next alignment boundary. - return llvm::alignTo(getTypeStoreSize(Ty), layout.getTypeABIAlignment(Ty)); + return llvm::alignTo(getTypeStoreSize(Ty), getABITypeAlign(Ty)); } unsigned getPointerTypeSizeInBits(mlir::Type Ty) const { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 6b291d0ffb16..a4d4ad7c9a77 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -175,7 +175,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { llvm::SmallVector members; auto structTy = type.dyn_cast(); assert(structTy && "expected cir.struct"); - assert(!packed && "unpacked struct is NYI"); // Collect members and check if they are all zero. bool isZero = true; @@ -200,7 +199,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::ConstStructAttr getAnonConstStruct(mlir::ArrayAttr arrayAttr, bool packed = false, mlir::Type ty = {}) { - assert(!packed && "NYI"); llvm::SmallVector members; for (auto &f : arrayAttr) { auto ta = f.dyn_cast(); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index a214ba50a8fa..1ccd34a5d4bd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -378,6 +378,9 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( ArrayRef UnpackedElems = Elems; llvm::SmallVector UnpackedElemStorage; if (DesiredSize < AlignedSize || DesiredSize.alignTo(Align) != DesiredSize) { + NaturalLayout = false; + Packed = true; + } else if (DesiredSize > AlignedSize) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index cf89319b9c62..59936e1187f3 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -67,6 +67,9 @@ struct CIRRecordLowering final { void lower(bool nonVirtualBaseType); void lowerUnion(); + /// Determines if we need a packed llvm struct. + void determinePacked(bool NVBaseType); + void computeVolatileBitfields(); void accumulateBases(); void accumulateVPtrs(); @@ -294,6 +297,11 @@ void CIRRecordLowering::lower(bool nonVirtualBaseType) { // TODO: implemented packed structs // TODO: implement padding // TODO: support zeroInit + + members.push_back(StorageInfo(Size, getUIntNType(8))); + determinePacked(nonVirtualBaseType); + members.pop_back(); + fillOutputFields(); computeVolatileBitfields(); } @@ -613,6 +621,41 @@ void CIRRecordLowering::accumulateFields() { } } +void CIRRecordLowering::determinePacked(bool NVBaseType) { + if (isPacked) + return; + CharUnits Alignment = CharUnits::One(); + CharUnits NVAlignment = CharUnits::One(); + CharUnits NVSize = !NVBaseType && cxxRecordDecl + ? astRecordLayout.getNonVirtualSize() + : CharUnits::Zero(); + for (std::vector::const_iterator Member = members.begin(), + MemberEnd = members.end(); + Member != MemberEnd; ++Member) { + if (!Member->data) + continue; + // If any member falls at an offset that it not a multiple of its alignment, + // then the entire record must be packed. + if (Member->offset % getAlignment(Member->data)) + isPacked = true; + if (Member->offset < NVSize) + NVAlignment = std::max(NVAlignment, getAlignment(Member->data)); + Alignment = std::max(Alignment, getAlignment(Member->data)); + } + // If the size of the record (the capstone's offset) is not a multiple of the + // record's alignment, it must be packed. + if (members.back().offset % Alignment) + isPacked = true; + // If the non-virtual sub-object is not a multiple of the non-virtual + // sub-object's alignment, it must be packed. We cannot have a packed + // non-virtual sub-object and an unpacked complete object or vise versa. + if (NVSize % NVAlignment) + isPacked = true; + // Update the alignment of the sentinel. + if (!isPacked) + members.back().data = getUIntNType(astContext.toBits(Alignment)); +} + std::unique_ptr CIRGenTypes::computeRecordLayout(const RecordDecl *D, mlir::cir::StructType *Ty) { @@ -645,9 +688,8 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, // Fill in the struct *after* computing the base type. Filling in the body // signifies that the type is no longer opaque and record layout is complete, // but we may need to recursively layout D while laying D out as a base type. - *Ty = - Builder.getCompleteStructTy(builder.fieldTypes, getRecordTypeName(D, ""), - /*packed=*/false, D); + *Ty = Builder.getCompleteStructTy( + builder.fieldTypes, getRecordTypeName(D, ""), builder.isPacked, D); auto RL = std::make_unique( Ty ? *Ty : mlir::cir::StructType{}, diff --git a/clang/test/CIR/CodeGen/packed-structs.c b/clang/test/CIR/CodeGen/packed-structs.c new file mode 100644 index 000000000000..ac9c1383cad5 --- /dev/null +++ b/clang/test/CIR/CodeGen/packed-structs.c @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#pragma pack(1) + +typedef struct { + int a0; + char a1; +} A; + +typedef struct { + int b0; + char b1; + A a[6]; +} B; + +typedef struct { + int c0; + char c1; +} __attribute__((aligned(2))) C; + + +// CHECK: !ty_22A22 = !cir.struct +// CHECK: !ty_22C22 = !cir.struct +// CHECK: !ty_22B22 = !cir.struct}> + +// CHECK: cir.func {{.*@foo()}} +// CHECK: %0 = cir.alloca !ty_22A22, cir.ptr , ["a"] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !ty_22B22, cir.ptr , ["b"] {alignment = 1 : i64} +// CHECK: %2 = cir.alloca !ty_22C22, cir.ptr , ["c"] {alignment = 2 : i64} +void foo() { + A a; + B b; + C c; +} + + From 59366eeb752e949e25b10c0781e80d72e2f54dd5 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Fri, 15 Mar 2024 06:23:40 +0300 Subject: [PATCH 1453/2301] [CIR][CIRGen] Support for initialization of unions. (#495) PR adds support for initialization of unions. The change is copy-pasted from the original CodeGen. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 ++- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 38 +++++++++++++++++++--- clang/test/CIR/CodeGen/union-init.c | 43 +++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/union-init.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 3c880506fe1c..f8137cce2efc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -631,7 +631,9 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { builder.createStore(loc, Vector, Dst.getVectorAddress()); return; } - llvm_unreachable("NYI: non-simple store through lvalue"); + assert(Dst.isBitField() && "NIY LValue type"); + mlir::Value result; + return buildStoreThroughBitfieldLValue(Src, Dst, result); } assert(Dst.isSimple() && "only implemented simple"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 82160e11df6f..6245f6f20259 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -743,7 +743,8 @@ void AggExprEmitter::buildNullInitializationToLValue(mlir::Location loc, // Note that the following is not equivalent to // EmitStoreThroughBitfieldLValue for ARC types. if (lv.isBitField()) { - llvm_unreachable("NYI"); + mlir::Value result; + CGF.buildStoreThroughBitfieldLValue(RValue::get(null), lv, result); } else { assert(lv.isSimple()); CGF.buildStoreOfScalar(null, lv, /* isInitialization */ true); @@ -795,8 +796,7 @@ void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { if (LV.isSimple()) { CGF.buildScalarInit(E, CGF.getLoc(E->getSourceRange()), LV); } else { - llvm_unreachable("NYI"); - // CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); + CGF.buildStoreThroughLValue(RValue::get(CGF.buildScalarExpr(E)), LV); } return; } @@ -1114,7 +1114,37 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( CIRGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress()); if (record->isUnion()) { - llvm_unreachable("NYI"); + // Only initialize one field of a union. The field itself is + // specified by the initializer list. + if (!InitializedFieldInUnion) { + // Empty union; we have nothing to do. + +#ifndef NDEBUG + // Make sure that it's really an empty and not a failure of + // semantic analysis. + for (const auto *Field : record->fields()) + assert( + (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) && + "Only unnamed bitfields or ananymous class allowed"); +#endif + return; + } + + // FIXME: volatility + FieldDecl *Field = InitializedFieldInUnion; + + LValue FieldLoc = + CGF.buildLValueForFieldInitialization(DestLV, Field, Field->getName()); + if (NumInitElements) { + // Store the initializer into the field + buildInitializationToLValue(InitExprs[0], FieldLoc); + } else { + // Default-initialize to null. + buildNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), + FieldLoc); + } + + return; } // Here we iterate over the fields; this makes it simpler to both diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c new file mode 100644 index 000000000000..54d4e0516c25 --- /dev/null +++ b/clang/test/CIR/CodeGen/union-init.c @@ -0,0 +1,43 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s +// XFAIL: * + +typedef union { + int value; + struct { + int x : 16; + int y : 16; + }; +} A; + +void foo(int x) { + A a = {.x = x}; +} + +// CHECK: cir.func @foo(%arg0: !s32i loc({{.*}})) +// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !s32i, cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = ""} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.cast(bitcast, [[TMP2]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.load [[TMP0]] : cir.ptr , !s32i +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP4]] : !s32i), !u32i +// CHECK: [[TMP6:%.*]] = cir.load [[TMP3]] : cir.ptr , !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<4294901760> : !u32i) : !u32i +// CHECK: [[TMP10:%.*]] = cir.binop(and, [[TMP6]], [[TMP9]]) : !u32i +// CHECK: [[TMP11:%.*]] = cir.binop(or, [[TMP10]], [[TMP8]]) : !u32i +// CHECK: cir.store [[TMP11]], [[TMP3]] : !u32i, cir.ptr +// CHECK: [[TMP12:%.*]] = cir.cast(bitcast, [[TMP2]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP13:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: [[TMP14:%.*]] = cir.cast(integral, [[TMP13]] : !s32i), !u32i +// CHECK: [[TMP15:%.*]] = cir.load [[TMP12]] : cir.ptr , !u32i +// CHECK: [[TMP16:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i +// CHECK: [[TMP17:%.*]] = cir.binop(and, [[TMP14]], [[TMP16]]) : !u32i +// CHECK: [[TMP18:%.*]] = cir.const(#cir.int<16> : !u32i) : !u32i +// CHECK: [[TMP19:%.*]] = cir.shift(left, [[TMP17]] : !u32i, [[TMP18]] : !u32i) -> !u32i +// CHECK: [[TMP20:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i +// CHECK: [[TMP21:%.*]] = cir.binop(and, [[TMP15]], [[TMP20]]) : !u32i +// CHECK: [[TMP22:%.*]] = cir.binop(or, [[TMP21]], [[TMP19]]) : !u32i +// CHECK: cir.store [[TMP22]], [[TMP12]] : !u32i, cir.ptr +// CHECK: cir.return From a22275f8bcc287f5e95653e329c53a4bcc31aa1e Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Fri, 15 Mar 2024 06:37:49 +0300 Subject: [PATCH 1454/2301] [CIR][CIRGen] Support for __builtin_prefetch (#504) This PR adds support for `__builtin_prefetch`. CIRGen of this builtin emits the new 'cir.prefetch' opcode. Then `cir.prefetch` lowers to `llvm.prefetch` intrinsic. Co-authored-by: Bruno Cardoso Lopes --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 35 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 23 ++++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 18 ++++++++-- clang/test/CIR/CodeGen/builtin-prefetch.c | 20 +++++++++++ 4 files changed, 94 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-prefetch.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index abadea449cdb..0ae2436e3797 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3205,6 +3205,41 @@ def TrapOp : CIR_Op<"trap", [Terminator]> { let assemblyFormat = "attr-dict"; } +//===----------------------------------------------------------------------===// +// PrefetchOp +//===----------------------------------------------------------------------===// + +def PrefetchOp : CIR_Op<"prefetch"> { + let summary = "prefetch operation"; + let description = [{ + The `cir.prefetch` op prefetches data from the memmory address. + + ```mlir + cir.prefetch(%0 : !cir.ptr) locality(1) write + ``` + + This opcode has the three attributes: + 1. The $locality is a temporal locality specifier + ranging from (0) - no locality, to (3) - extremely local keep in cache. + 2. The $isWrite is the specifier determining if the prefetch is prepaired + for a 'read' or 'write'. + If $isWrite doesn't specified it means that prefetch is prepared for 'read'. + }]; + + let arguments = ( + ins VoidPtr:$addr, + ConfinedAttr, + IntMaxValue<3>]>:$locality, + UnitAttr:$isWrite); + + let assemblyFormat = [{ + `(` $addr `:` qualified(type($addr)) `)` + `locality``(` $locality `)` + (`write` $isWrite^) : (`read`)? + attr-dict + }]; +} + //===----------------------------------------------------------------------===// // ArrayCtor & ArrayDtor //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 3b7941d7c08a..c964f80923c6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -439,6 +439,29 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(buildScalarExpr(E->getArg(0))); } + case Builtin::BI__builtin_prefetch: { + auto evaluateOperandAsInt = [&](const Expr *Arg) { + Expr::EvalResult Res; + [[maybe_unused]] bool EvalSucceed = + Arg->EvaluateAsInt(Res, CGM.getASTContext()); + assert(EvalSucceed && "expression should be able to evaluate as int"); + return Res.Val.getInt().getZExtValue(); + }; + + bool IsWrite = false; + if (E->getNumArgs() > 1) + IsWrite = evaluateOperandAsInt(E->getArg(1)); + + int Locality = 0; + if (E->getNumArgs() > 2) + Locality = evaluateOperandAsInt(E->getArg(2)); + + mlir::Value Address = buildScalarExpr(E->getArg(0)); + builder.create(getLoc(E->getSourceRange()), Address, + Locality, IsWrite); + return RValue::get(nullptr); + } + // C++ std:: builtins. case Builtin::BImove: case Builtin::BImove_if_noexcept: diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ee65588d44c1..224767efa7c6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2576,7 +2576,21 @@ class CIRInlineAsmOpLowering /*is_align_stack*/ mlir::UnitAttr(), mlir::LLVM::AsmDialectAttr::get(getContext(), llDialect), rewriter.getArrayAttr(opAttrs)); + return mlir::success(); + } +}; +class CIRPrefetchLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::PrefetchOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getAddr(), adaptor.getIsWrite(), adaptor.getLocality(), + /*DataCache*/ 1); return mlir::success(); } }; @@ -2731,8 +2745,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, CIRVectorTernaryLowering, CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, - CIRSetBitfieldLowering, CIRGetBitfieldLowering>(converter, - patterns.getContext()); + CIRSetBitfieldLowering, CIRGetBitfieldLowering, CIRPrefetchLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/builtin-prefetch.c b/clang/test/CIR/CodeGen/builtin-prefetch.c new file mode 100644 index 000000000000..21b908d085bd --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-prefetch.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +void foo(void *a) { + __builtin_prefetch(a, 1, 1); +} + +// CIR: cir.func @foo(%arg0: !cir.ptr loc({{.*}})) +// CIR: [[PTR_ALLOC:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} +// CIR: cir.store %arg0, [[PTR_ALLOC]] : !cir.ptr, cir.ptr > +// CIR: [[PTR:%.*]] = cir.load [[PTR_ALLOC]] : cir.ptr >, !cir.ptr +// CIR: cir.prefetch([[PTR]] : !cir.ptr) locality(1) write +// CIR: cir.return + +// LLVM: define void @foo(ptr [[ARG0:%.*]]) +// LLVM: [[PTR_ALLOC:%.*]] = alloca ptr, i64 1 +// LLVM: store ptr [[ARG0]], ptr [[PTR_ALLOC]] +// LLVM: [[PTR:%.*]] = load ptr, ptr [[PTR_ALLOC]] +// LLVM: call void @llvm.prefetch.p0(ptr [[PTR]], i32 1, i32 1, i32 1) +// LLVM: ret void From 5bd132f22759ea81cc9b8d04e1bfeee041aa4683 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 15 Mar 2024 06:49:25 +0300 Subject: [PATCH 1455/2301] [CIR] Add MemRead/MemWrite markers to bitfield ops (#507) This PR adds MemRead/MemWrite markers to the `GetBitfieldOp` and `SetBitfieldOp` (as discussed in #487) Also, minor renaming in the `SetBitfieldOp` --------- Co-authored-by: Bruno Cardoso Lopes --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 10 +++++----- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 0ae2436e3797..a12ae2f6c7b8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1759,7 +1759,7 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { }]; let arguments = (ins - CIR_PointerType:$dst, + Arg:$addr, CIR_AnyType:$src, BitfieldInfoAttr:$bitfield_info, UnitAttr:$is_volatile @@ -1767,12 +1767,12 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { let results = (outs CIR_IntType:$result); - let assemblyFormat = [{ `(`$bitfield_info`,` $dst`:`qualified(type($dst))`,` + let assemblyFormat = [{ `(`$bitfield_info`,` $addr`:`qualified(type($addr))`,` $src`:`type($src) `)` attr-dict `->` type($result) }]; let builders = [ OpBuilder<(ins "Type":$type, - "Value":$dst, + "Value":$addr, "Type":$storage_type, "Value":$src, "StringRef":$name, @@ -1786,7 +1786,7 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { BitfieldInfoAttr::get($_builder.getContext(), name, storage_type, size, offset, is_signed); - build($_builder, $_state, type, dst, src, info, is_volatile); + build($_builder, $_state, type, addr, src, info, is_volatile); }]> ]; } @@ -1838,7 +1838,7 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> { }]; let arguments = (ins - CIR_PointerType:$addr, + Arg:$addr, BitfieldInfoAttr:$bitfield_info, UnitAttr:$is_volatile ); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 224767efa7c6..e147dbd7a049 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2631,7 +2631,7 @@ class CIRSetBitfieldLowering assert(storageSize > size && "Invalid bitfield size."); mlir::Value val = rewriter.create( - op.getLoc(), intType, adaptor.getDst(), /* alignment */ 0, + op.getLoc(), intType, adaptor.getAddr(), /* alignment */ 0, op.getIsVolatile()); srcVal = createAnd(rewriter, srcVal, @@ -2648,7 +2648,7 @@ class CIRSetBitfieldLowering srcVal = rewriter.create(op.getLoc(), val, srcVal); } - rewriter.create(op.getLoc(), srcVal, adaptor.getDst(), + rewriter.create(op.getLoc(), srcVal, adaptor.getAddr(), /* alignment */ 0, op.getIsVolatile()); auto resultTy = getTypeConverter()->convertType(op.getType()); From b002d38c63ddf1c678bf4633fa8891ca64268831 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Fri, 15 Mar 2024 23:58:29 +0300 Subject: [PATCH 1456/2301] [CIR][CIRGen] Support for __builtin_constant_p (#506) This PR adds support for `__builtin_constant_p`. Implementation introduces the new `cr.is_constant` opcode to it during the codegeneration of builtin. Codegeneration is taken from the original llvm codegen. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 17 +++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 36 +++++++++++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 23 ++++++++++-- clang/test/CIR/CodeGen/builtin-constant-p.c | 28 +++++++++++++++ 4 files changed, 102 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-constant-p.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a12ae2f6c7b8..26d2087c36f1 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3283,6 +3283,23 @@ def ArrayDtor : CIR_ArrayInitDestroy<"array.dtor"> { }]; } +//===----------------------------------------------------------------------===// +// IsConstantOp +//===----------------------------------------------------------------------===// + +def IsConstantOp : CIR_Op<"is_constant", [Pure]> { + let description = [{ + Returns `true` if the argument is known to be a compile-time constant + otherwise returns 'false'. + }]; + let arguments = (ins CIR_AnyType:$val); + let results = (outs CIR_BoolType:$result); + + let assemblyFormat = [{ + `(` $val `:` type($val) `)` `:` type($result) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // Operations Lowered Directly to LLVM IR // diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index c964f80923c6..03f8093616ba 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -592,6 +592,42 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_popcountl: case Builtin::BI__builtin_popcountll: return buildBuiltinBitOp(*this, E, std::nullopt); + + case Builtin::BI__builtin_constant_p: { + mlir::Type ResultType = ConvertType(E->getType()); + + const Expr *Arg = E->getArg(0); + QualType ArgType = Arg->getType(); + // FIXME: The allowance for Obj-C pointers and block pointers is historical + // and likely a mistake. + if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() && + !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType()) + // Per the GCC documentation, only numeric constants are recognized after + // inlining. + return RValue::get( + builder.getConstInt(getLoc(E->getSourceRange()), + ResultType.cast(), 0)); + + if (Arg->HasSideEffects(getContext())) + // The argument is unevaluated, so be conservative if it might have + // side-effects. + return RValue::get( + builder.getConstInt(getLoc(E->getSourceRange()), + ResultType.cast(), 0)); + + mlir::Value ArgValue = buildScalarExpr(Arg); + if (ArgType->isObjCObjectPointerType()) + // Convert Objective-C objects to id because we cannot distinguish between + // LLVM types for Obj-C classes as they are opaque. + ArgType = CGM.getASTContext().getObjCIdType(); + ArgValue = builder.createBitcast(ArgValue, ConvertType(ArgType)); + + mlir::Value Result = builder.create( + getLoc(E->getSourceRange()), ArgValue); + if (Result.getType() != ResultType) + Result = builder.createBoolToInt(Result, ResultType); + return RValue::get(Result); + } } // If this is an alias for a lib function (e.g. __builtin_sin), emit diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e147dbd7a049..991520bd99f5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2725,6 +2725,25 @@ class CIRGetBitfieldLowering } }; +class CIRIsConstantOpLowering + : public mlir::OpConversionPattern { + + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::IsConstantOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // FIXME(cir): llvm.intr.is.constant returns i1 value but the LLVM Lowering + // expects that cir.bool type will be lowered as i8 type. + // So we have to insert zext here. + auto isConstantOP = rewriter.create( + op.getLoc(), adaptor.getVal()); + rewriter.replaceOpWithNewOp(op, rewriter.getI8Type(), + isConstantOP); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -2745,8 +2764,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, CIRVectorTernaryLowering, CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, - CIRSetBitfieldLowering, CIRGetBitfieldLowering, CIRPrefetchLowering>( - converter, patterns.getContext()); + CIRSetBitfieldLowering, CIRGetBitfieldLowering, CIRPrefetchLowering, + CIRIsConstantOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/builtin-constant-p.c b/clang/test/CIR/CodeGen/builtin-constant-p.c new file mode 100644 index 000000000000..1b3dbe7e9275 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-constant-p.c @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s --check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +int a = 0; +int foo() { + return __builtin_constant_p(a); +} + +// CIR: cir.func no_proto @foo() -> !s32i extra(#fn_attr) +// CIR: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CIR: [[TMP1:%.*]] = cir.get_global @a : cir.ptr +// CIR: [[TMP2:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +// CIR: [[TMP3:%.*]] = cir.is_constant([[TMP2]] : !s32i) : !cir.bool +// CIR: [[TMP4:%.*]] = cir.cast(bool_to_int, [[TMP3]] : !cir.bool), !s32i +// CIR: cir.store [[TMP4]], [[TMP0]] : !s32i, cir.ptr +// CIR: [[TMP5:%.*]] = cir.load [[TMP0]] : cir.ptr , !s32i +// CIR: cir.return [[TMP5]] : !s32i + +// LLVM:define i32 @foo() +// LLVM: [[TMP1:%.*]] = alloca i32, i64 1 +// LLVM: [[TMP2:%.*]] = load i32, ptr @a +// LLVM: [[TMP3:%.*]] = call i1 @llvm.is.constant.i32(i32 [[TMP2]]) +// LLVM: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8 +// LLVM: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32 +// LLVM: store i32 [[TMP5]], ptr [[TMP1]] +// LLVM: [[TMP6:%.*]] = load i32, ptr [[TMP1]] +// LLVM: ret i32 [[TMP6]] + From ca142c67e98c5202c8587bfce668fd5627d00764 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 19 Mar 2024 08:46:31 +0800 Subject: [PATCH 1457/2301] [CIR][CIRGen] Support for C++20 three-way comparison (#485) This patch adds CIRGen support for the C++20 three-way comparison operator `<=>`. The binary operator is directly lowered to existing CIR operations. Most of the changes are tests. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 62 ++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 49 +++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 48 +++ clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 60 +++- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 52 +++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 19 ++ .../Dialect/Transforms/LoweringPrepare.cpp | 66 +++- clang/test/CIR/CodeGen/Inputs/std-compare.h | 307 ++++++++++++++++++ .../test/CIR/CodeGen/three-way-comparison.cpp | 72 ++++ 9 files changed, 731 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/Inputs/std-compare.h create mode 100644 clang/test/CIR/CodeGen/three-way-comparison.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 7ca5f1060f3f..60794e443829 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -264,6 +264,68 @@ def ConstPtrAttr : CIR_Attr<"ConstPtr", "ptr", [TypedAttrInterface]> { let hasCustomAssemblyFormat = 1; } +//===----------------------------------------------------------------------===// +// CmpThreeWayInfoAttr +//===----------------------------------------------------------------------===// + +def CmpOrdering_Strong : I32EnumAttrCase<"Strong", 1, "strong">; +def CmpOrdering_Partial : I32EnumAttrCase<"Partial", 2, "partial">; + +def CmpOrdering : I32EnumAttr< + "CmpOrdering", "three-way comparison ordering kind", + [CmpOrdering_Strong, CmpOrdering_Partial] +> { + let cppNamespace = "::mlir::cir"; +} + +def CmpThreeWayInfoAttr : CIR_Attr<"CmpThreeWayInfo", "cmp3way_info"> { + let summary = "Holds information about a three-way comparison operation"; + let description = [{ + The `#cmp3way_info` attribute contains information about a three-way + comparison operation `cir.cmp3way`. + + The `ordering` parameter gives the ordering kind of the three-way comparison + operation. It may be either strong ordering or partial ordering. + + Given the two input operands of the three-way comparison operation `lhs` and + `rhs`, the `lt`, `eq`, `gt`, and `unordered` parameters gives the result + value that should be produced by the three-way comparison operation when the + ordering between `lhs` and `rhs` is `lhs < rhs`, `lhs == rhs`, `lhs > rhs`, + or neither, respectively. + }]; + + let parameters = (ins "CmpOrdering":$ordering, "int64_t":$lt, "int64_t":$eq, + "int64_t":$gt, + OptionalParameter<"std::optional">:$unordered); + + let builders = [ + AttrBuilder<(ins "int64_t":$lt, "int64_t":$eq, "int64_t":$gt), [{ + return $_get($_ctxt, CmpOrdering::Strong, lt, eq, gt, std::nullopt); + }]>, + AttrBuilder<(ins "int64_t":$lt, "int64_t":$eq, "int64_t":$gt, + "int64_t":$unordered), [{ + return $_get($_ctxt, CmpOrdering::Partial, lt, eq, gt, unordered); + }]>, + ]; + + let extraClassDeclaration = [{ + /// Get attribute alias name for this attribute. + std::string getAlias() const; + }]; + + let assemblyFormat = [{ + `<` + $ordering `,` + `lt` `=` $lt `,` + `eq` `=` $eq `,` + `gt` `=` $gt + (`,` `unordered` `=` $unordered^)? + `>` + }]; + + let genVerifyDecl = 1; +} + //===----------------------------------------------------------------------===// // DataMemberAttr //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 26d2087c36f1..62b0da234457 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1147,6 +1147,55 @@ def BitPopcountOp : CIR_BitOp<"bit.popcount", UIntOfWidths<[16, 32, 64]>> { }]; } +//===----------------------------------------------------------------------===// +// CmpThreeWayOp +//===----------------------------------------------------------------------===// + +def CmpThreeWayOp : CIR_Op<"cmp3way", [Pure, SameTypeOperands]> { + let summary = "Compare two values with C++ three-way comparison semantics"; + let description = [{ + The `cir.cmp3way` operation models the `<=>` operator in C++20. It takes two + operands with the same type and produces a result indicating the ordering + between the two input operands. + + The result of the operation is a signed integer that indicates the ordering + between the two input operands. + + There are two kinds of ordering: strong ordering and partial ordering. + Comparing different types of values yields different kinds of orderings. + The `info` parameter gives the ordering kind and other necessary information + about the comparison. + + Example: + + ```mlir + !s32i = !cir.int + + #cmp3way_strong = #cmp3way_info + #cmp3way_partial = #cmp3way_info + + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.const(#cir.int<1> : !s32i) : !s32i + %2 = cir.cmp3way(%0 : !s32i, %1, #cmp3way_strong) : !s8i + + %3 = cir.const(#cir.fp<0.0> : !cir.float) : !cir.float + %4 = cir.const(#cir.fp<1.0> : !cir.float) : !cir.float + %5 = cir.cmp3way(%3 : !cir.float, %4, #cmp3way_partial) : !s8i + ``` + }]; + + let results = (outs CIR_IntType:$result); + let arguments = (ins CIR_AnyType:$lhs, CIR_AnyType:$rhs, + CmpThreeWayInfoAttr:$info); + + let assemblyFormat = [{ + `(` $lhs `:` type($lhs) `,` $rhs `,` qualified($info) `)` + `:` type($result) attr-dict + }]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // SwitchOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index a4d4ad7c9a77..2c2d0a3d0464 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -219,6 +219,20 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::TypeInfoAttr::get(anonStruct.getType(), fieldsAttr); } + mlir::cir::CmpThreeWayInfoAttr getCmpThreeWayInfoStrongOrdering( + const llvm::APSInt <, const llvm::APSInt &eq, const llvm::APSInt >) { + return mlir::cir::CmpThreeWayInfoAttr::get( + getContext(), lt.getSExtValue(), eq.getSExtValue(), gt.getSExtValue()); + } + + mlir::cir::CmpThreeWayInfoAttr getCmpThreeWayInfoPartialOrdering( + const llvm::APSInt <, const llvm::APSInt &eq, const llvm::APSInt >, + const llvm::APSInt &unordered) { + return mlir::cir::CmpThreeWayInfoAttr::get( + getContext(), lt.getSExtValue(), eq.getSExtValue(), gt.getSExtValue(), + unordered.getSExtValue()); + } + mlir::cir::DataMemberAttr getDataMemberAttr(mlir::cir::DataMemberType ty, size_t memberIndex) { return mlir::cir::DataMemberAttr::get(getContext(), ty, memberIndex); @@ -598,6 +612,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc); } + mlir::cir::CmpOp createCompare(mlir::Location loc, mlir::cir::CmpOpKind kind, + mlir::Value lhs, mlir::Value rhs) { + return create(loc, getBoolTy(), kind, lhs, rhs); + } + mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, mlir::Value src, mlir::Value len) { return create(loc, dst, src, len); @@ -824,6 +843,35 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } } + mlir::cir::CmpThreeWayOp + createThreeWayCmpStrong(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, + const llvm::APSInt <Res, const llvm::APSInt &eqRes, + const llvm::APSInt >Res) { + assert(ltRes.getBitWidth() == eqRes.getBitWidth() && + ltRes.getBitWidth() == gtRes.getBitWidth() && + "the three comparison results must have the same bit width"); + auto cmpResultTy = getSIntNTy(ltRes.getBitWidth()); + auto infoAttr = getCmpThreeWayInfoStrongOrdering(ltRes, eqRes, gtRes); + return create(loc, cmpResultTy, lhs, rhs, + infoAttr); + } + + mlir::cir::CmpThreeWayOp + createThreeWayCmpPartial(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, + const llvm::APSInt <Res, const llvm::APSInt &eqRes, + const llvm::APSInt >Res, + const llvm::APSInt &unorderedRes) { + assert(ltRes.getBitWidth() == eqRes.getBitWidth() && + ltRes.getBitWidth() == gtRes.getBitWidth() && + ltRes.getBitWidth() == unorderedRes.getBitWidth() && + "the four comparison results must have the same bit width"); + auto cmpResultTy = getSIntNTy(ltRes.getBitWidth()); + auto infoAttr = + getCmpThreeWayInfoPartialOrdering(ltRes, eqRes, gtRes, unorderedRes); + return create(loc, cmpResultTy, lhs, rhs, + infoAttr); + } + mlir::cir::GetRuntimeMemberOp createGetIndirectMember(mlir::Location loc, mlir::Value objectPtr, mlir::Value memberPtr) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 6245f6f20259..c0f88bb6383d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -18,6 +18,7 @@ #include "mlir/IR/Attributes.h" #include "clang/AST/Decl.h" +#include "clang/AST/Expr.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtVisitor.h" @@ -261,7 +262,7 @@ class AggExprEmitter : public StmtVisitor { } void VisitBinComma(const BinaryOperator *E) { llvm_unreachable("NYI"); } - void VisitBinCmp(const BinaryOperator *E) { llvm_unreachable("NYI"); } + void VisitBinCmp(const BinaryOperator *E); void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { llvm_unreachable("NYI"); } @@ -1027,6 +1028,63 @@ void AggExprEmitter::withReturnValueSlot( } } +void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { + assert(CGF.getContext().hasSameType(E->getLHS()->getType(), + E->getRHS()->getType())); + const ComparisonCategoryInfo &CmpInfo = + CGF.getContext().CompCategories.getInfoForType(E->getType()); + assert(CmpInfo.Record->isTriviallyCopyable() && + "cannot copy non-trivially copyable aggregate"); + + QualType ArgTy = E->getLHS()->getType(); + + if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() && + !ArgTy->isNullPtrType() && !ArgTy->isPointerType() && + !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) + llvm_unreachable("aggregate three-way comparison"); + + auto Loc = CGF.getLoc(E->getSourceRange()); + + if (E->getType()->isAnyComplexType()) + llvm_unreachable("NYI"); + + auto LHS = CGF.buildAnyExpr(E->getLHS()).getScalarVal(); + auto RHS = CGF.buildAnyExpr(E->getRHS()).getScalarVal(); + + mlir::Value ResultScalar; + if (ArgTy->isNullPtrType()) { + ResultScalar = + CGF.builder.getConstInt(Loc, CmpInfo.getEqualOrEquiv()->getIntValue()); + } else { + auto LtRes = CmpInfo.getLess()->getIntValue(); + auto EqRes = CmpInfo.getEqualOrEquiv()->getIntValue(); + auto GtRes = CmpInfo.getGreater()->getIntValue(); + if (!CmpInfo.isPartial()) { + // Strong ordering. + ResultScalar = CGF.builder.createThreeWayCmpStrong(Loc, LHS, RHS, LtRes, + EqRes, GtRes); + } else { + // Partial ordering. + auto UnorderedRes = CmpInfo.getUnordered()->getIntValue(); + ResultScalar = CGF.builder.createThreeWayCmpPartial( + Loc, LHS, RHS, LtRes, EqRes, GtRes, UnorderedRes); + } + } + + // Create the return value in the destination slot. + EnsureDest(Loc, E->getType()); + LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), E->getType()); + + // Emit the address of the first (and only) field in the comparison category + // type, and initialize it from the constant integer value produced above. + const FieldDecl *ResultField = *CmpInfo.Record->field_begin(); + LValue FieldLV = CGF.buildLValueForFieldInitialization( + DestLV, ResultField, ResultField->getName()); + CGF.buildStoreThroughLValue(RValue::get(ResultScalar), FieldLV); + + // All done! The result is in the Dest slot. +} + void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { // TODO(cir): use something like CGF.ErrorUnsupported if (E->hadArrayRangeDesignator()) diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 0ded0f501483..4d539a5fc139 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -360,6 +360,58 @@ LogicalResult cir::FPAttr::verify(function_ref emitError, return success(); } +//===----------------------------------------------------------------------===// +// CmpThreeWayInfoAttr definitions +//===----------------------------------------------------------------------===// + +std::string CmpThreeWayInfoAttr::getAlias() const { + std::string alias = "cmp3way_info"; + + if (getOrdering() == CmpOrdering::Strong) + alias.append("_strong_"); + else + alias.append("_partial_"); + + auto appendInt = [&](int64_t value) { + if (value < 0) { + alias.push_back('n'); + value = -value; + } + alias.append(std::to_string(value)); + }; + + alias.append("lt"); + appendInt(getLt()); + alias.append("eq"); + appendInt(getEq()); + alias.append("gt"); + appendInt(getGt()); + + if (auto unordered = getUnordered()) { + alias.append("un"); + appendInt(unordered.value()); + } + + return alias; +} + +LogicalResult +CmpThreeWayInfoAttr::verify(function_ref emitError, + CmpOrdering ordering, int64_t lt, int64_t eq, + int64_t gt, std::optional unordered) { + // The presense of unordered must match the value of ordering. + if (ordering == CmpOrdering::Strong && unordered) { + emitError() << "strong ordering does not include unordered ordering"; + return failure(); + } + if (ordering == CmpOrdering::Partial && !unordered) { + emitError() << "partial ordering lacks unordered ordering"; + return failure(); + } + + return success(); +} + //===----------------------------------------------------------------------===// // DataMemberAttr definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d6f41d121a27..d9b227519ccd 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -89,6 +89,11 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << "fn_attr"; return AliasResult::FinalAlias; } + if (auto cmpThreeWayInfoAttr = + attr.dyn_cast()) { + os << cmpThreeWayInfoAttr.getAlias(); + return AliasResult::FinalAlias; + } return AliasResult::NoAlias; } @@ -870,6 +875,20 @@ Block *BrCondOp::getSuccessorForOperands(ArrayRef operands) { return nullptr; } +//===----------------------------------------------------------------------===// +// CmpThreeWayOp +//===----------------------------------------------------------------------===// + +LogicalResult CmpThreeWayOp::verify() { + // Type of the result must be a signed integer type. + if (!getType().isSigned()) { + emitOpError() << "result type of cir.cmp3way must be a signed integer type"; + return failure(); + } + + return success(); +} + //===----------------------------------------------------------------------===// // SwitchOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index d8734bede1ef..577770ea9af8 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -66,6 +66,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { void runOnOperation() override; void runOnOp(Operation *op); + void lowerThreeWayCmpOp(CmpThreeWayOp op); void lowerGlobalOp(GlobalOp op); void lowerStdFindOp(StdFindOp op); void lowerIterBeginOp(IterBeginOp op); @@ -236,6 +237,63 @@ FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { return f; } +void LoweringPreparePass::lowerThreeWayCmpOp(CmpThreeWayOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op); + + auto loc = op->getLoc(); + auto cmpInfo = op.getInfo(); + + auto buildCmpRes = [&](int64_t value) -> mlir::Value { + return builder.create( + loc, op.getType(), mlir::cir::IntAttr::get(op.getType(), value)); + }; + auto ltRes = buildCmpRes(cmpInfo.getLt()); + auto eqRes = buildCmpRes(cmpInfo.getEq()); + auto gtRes = buildCmpRes(cmpInfo.getGt()); + + auto buildCmp = [&](CmpOpKind kind) -> mlir::Value { + auto ty = BoolType::get(&getContext()); + return builder.create(loc, ty, kind, op.getLhs(), + op.getRhs()); + }; + auto buildSelect = [&](mlir::Value condition, mlir::Value trueResult, + mlir::Value falseResult) -> mlir::Value { + return builder + .create( + loc, condition, + [&](OpBuilder &, Location) { + builder.create(loc, trueResult); + }, + [&](OpBuilder &, Location) { + builder.create(loc, falseResult); + }) + .getResult(); + }; + + mlir::Value transformedResult; + if (cmpInfo.getOrdering() == CmpOrdering::Strong) { + // Strong ordering. + auto lt = buildCmp(CmpOpKind::lt); + auto eq = buildCmp(CmpOpKind::eq); + auto selectOnEq = buildSelect(eq, eqRes, gtRes); + transformedResult = buildSelect(lt, ltRes, selectOnEq); + } else { + // Partial ordering. + auto unorderedRes = buildCmpRes(cmpInfo.getUnordered().value()); + + auto lt = buildCmp(CmpOpKind::lt); + auto eq = buildCmp(CmpOpKind::eq); + auto gt = buildCmp(CmpOpKind::gt); + auto selectOnEq = buildSelect(eq, eqRes, unorderedRes); + auto selectOnGt = buildSelect(gt, gtRes, selectOnEq); + transformedResult = buildSelect(lt, ltRes, selectOnGt); + } + + op.replaceAllUsesWith(transformedResult); + op.erase(); +} + void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { auto &ctorRegion = op.getCtorRegion(); auto &dtorRegion = op.getDtorRegion(); @@ -428,7 +486,9 @@ void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { } void LoweringPreparePass::runOnOp(Operation *op) { - if (auto getGlobal = dyn_cast(op)) { + if (auto threeWayCmp = dyn_cast(op)) { + lowerThreeWayCmpOp(threeWayCmp); + } else if (auto getGlobal = dyn_cast(op)) { lowerGlobalOp(getGlobal); } else if (auto stdFind = dyn_cast(op)) { lowerStdFindOp(stdFind); @@ -452,8 +512,8 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { - if (isa( - op)) + if (isa(op)) opsToTransform.push_back(op); }); diff --git a/clang/test/CIR/CodeGen/Inputs/std-compare.h b/clang/test/CIR/CodeGen/Inputs/std-compare.h new file mode 100644 index 000000000000..eaf7951edf79 --- /dev/null +++ b/clang/test/CIR/CodeGen/Inputs/std-compare.h @@ -0,0 +1,307 @@ +#ifndef STD_COMPARE_H +#define STD_COMPARE_H + +namespace std { +inline namespace __1 { + +// exposition only +enum class _EqResult : unsigned char { + __equal = 0, + __equiv = __equal, +}; + +enum class _OrdResult : signed char { + __less = -1, + __greater = 1 +}; + +enum class _NCmpResult : signed char { + __unordered = -127 +}; + +struct _CmpUnspecifiedType; +using _CmpUnspecifiedParam = void (_CmpUnspecifiedType::*)(); + +class partial_ordering { + using _ValueT = signed char; + explicit constexpr partial_ordering(_EqResult __v) noexcept + : __value_(_ValueT(__v)) {} + explicit constexpr partial_ordering(_OrdResult __v) noexcept + : __value_(_ValueT(__v)) {} + explicit constexpr partial_ordering(_NCmpResult __v) noexcept + : __value_(_ValueT(__v)) {} + + constexpr bool __is_ordered() const noexcept { + return __value_ != _ValueT(_NCmpResult::__unordered); + } + +public: + // valid values + static const partial_ordering less; + static const partial_ordering equivalent; + static const partial_ordering greater; + static const partial_ordering unordered; + + // comparisons + friend constexpr bool operator==(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator!=(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<=(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>=(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator==(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + friend constexpr bool operator!=(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + friend constexpr bool operator<(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + friend constexpr bool operator<=(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + friend constexpr bool operator>(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + friend constexpr bool operator>=(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + + friend constexpr partial_ordering operator<=>(partial_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr partial_ordering operator<=>(_CmpUnspecifiedParam, partial_ordering __v) noexcept; + + // test helper + constexpr bool test_eq(partial_ordering const &other) const noexcept { + return __value_ == other.__value_; + } + +private: + _ValueT __value_; +}; + +inline constexpr partial_ordering partial_ordering::less(_OrdResult::__less); +inline constexpr partial_ordering partial_ordering::equivalent(_EqResult::__equiv); +inline constexpr partial_ordering partial_ordering::greater(_OrdResult::__greater); +inline constexpr partial_ordering partial_ordering::unordered(_NCmpResult ::__unordered); +constexpr bool operator==(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__is_ordered() && __v.__value_ == 0; +} +constexpr bool operator<(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__is_ordered() && __v.__value_ < 0; +} +constexpr bool operator<=(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__is_ordered() && __v.__value_ <= 0; +} +constexpr bool operator>(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__is_ordered() && __v.__value_ > 0; +} +constexpr bool operator>=(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__is_ordered() && __v.__value_ >= 0; +} +constexpr bool operator==(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v.__is_ordered() && 0 == __v.__value_; +} +constexpr bool operator<(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v.__is_ordered() && 0 < __v.__value_; +} +constexpr bool operator<=(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v.__is_ordered() && 0 <= __v.__value_; +} +constexpr bool operator>(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v.__is_ordered() && 0 > __v.__value_; +} +constexpr bool operator>=(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v.__is_ordered() && 0 >= __v.__value_; +} +constexpr bool operator!=(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return !__v.__is_ordered() || __v.__value_ != 0; +} +constexpr bool operator!=(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return !__v.__is_ordered() || __v.__value_ != 0; +} + +constexpr partial_ordering operator<=>(partial_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v; +} +constexpr partial_ordering operator<=>(_CmpUnspecifiedParam, partial_ordering __v) noexcept { + return __v < 0 ? partial_ordering::greater : (__v > 0 ? partial_ordering::less : __v); +} + +class weak_ordering { + using _ValueT = signed char; + explicit constexpr weak_ordering(_EqResult __v) noexcept : __value_(_ValueT(__v)) {} + explicit constexpr weak_ordering(_OrdResult __v) noexcept : __value_(_ValueT(__v)) {} + +public: + static const weak_ordering less; + static const weak_ordering equivalent; + static const weak_ordering greater; + + // conversions + constexpr operator partial_ordering() const noexcept { + return __value_ == 0 ? partial_ordering::equivalent + : (__value_ < 0 ? partial_ordering::less : partial_ordering::greater); + } + + // comparisons + friend constexpr bool operator==(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator!=(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<=(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>=(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator==(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + friend constexpr bool operator!=(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + friend constexpr bool operator<(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + friend constexpr bool operator<=(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + friend constexpr bool operator>(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + friend constexpr bool operator>=(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + + friend constexpr weak_ordering operator<=>(weak_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr weak_ordering operator<=>(_CmpUnspecifiedParam, weak_ordering __v) noexcept; + + // test helper + constexpr bool test_eq(weak_ordering const &other) const noexcept { + return __value_ == other.__value_; + } + +private: + _ValueT __value_; +}; + +inline constexpr weak_ordering weak_ordering::less(_OrdResult::__less); +inline constexpr weak_ordering weak_ordering::equivalent(_EqResult::__equiv); +inline constexpr weak_ordering weak_ordering::greater(_OrdResult::__greater); +constexpr bool operator==(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ == 0; +} +constexpr bool operator!=(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ != 0; +} +constexpr bool operator<(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ < 0; +} +constexpr bool operator<=(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ <= 0; +} +constexpr bool operator>(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ > 0; +} +constexpr bool operator>=(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ >= 0; +} +constexpr bool operator==(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 == __v.__value_; +} +constexpr bool operator!=(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 != __v.__value_; +} +constexpr bool operator<(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 < __v.__value_; +} +constexpr bool operator<=(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 <= __v.__value_; +} +constexpr bool operator>(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 > __v.__value_; +} +constexpr bool operator>=(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return 0 >= __v.__value_; +} + +constexpr weak_ordering operator<=>(weak_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v; +} +constexpr weak_ordering operator<=>(_CmpUnspecifiedParam, weak_ordering __v) noexcept { + return __v < 0 ? weak_ordering::greater : (__v > 0 ? weak_ordering::less : __v); +} + +class strong_ordering { + using _ValueT = signed char; + explicit constexpr strong_ordering(_EqResult __v) noexcept : __value_(static_cast(__v)) {} + explicit constexpr strong_ordering(_OrdResult __v) noexcept : __value_(static_cast(__v)) {} + +public: + static const strong_ordering less; + static const strong_ordering equal; + static const strong_ordering equivalent; + static const strong_ordering greater; + + // conversions + constexpr operator partial_ordering() const noexcept { + return __value_ == 0 ? partial_ordering::equivalent + : (__value_ < 0 ? partial_ordering::less : partial_ordering::greater); + } + constexpr operator weak_ordering() const noexcept { + return __value_ == 0 ? weak_ordering::equivalent + : (__value_ < 0 ? weak_ordering::less : weak_ordering::greater); + } + + // comparisons + friend constexpr bool operator==(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator!=(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator<=(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator>=(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr bool operator==(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + friend constexpr bool operator!=(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + friend constexpr bool operator<(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + friend constexpr bool operator<=(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + friend constexpr bool operator>(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + friend constexpr bool operator>=(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + + friend constexpr strong_ordering operator<=>(strong_ordering __v, _CmpUnspecifiedParam) noexcept; + friend constexpr strong_ordering operator<=>(_CmpUnspecifiedParam, strong_ordering __v) noexcept; + + // test helper + constexpr bool test_eq(strong_ordering const &other) const noexcept { + return __value_ == other.__value_; + } + +private: + _ValueT __value_; +}; + +inline constexpr strong_ordering strong_ordering::less(_OrdResult::__less); +inline constexpr strong_ordering strong_ordering::equal(_EqResult::__equal); +inline constexpr strong_ordering strong_ordering::equivalent(_EqResult::__equiv); +inline constexpr strong_ordering strong_ordering::greater(_OrdResult::__greater); + +constexpr bool operator==(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ == 0; +} +constexpr bool operator!=(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ != 0; +} +constexpr bool operator<(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ < 0; +} +constexpr bool operator<=(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ <= 0; +} +constexpr bool operator>(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ > 0; +} +constexpr bool operator>=(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v.__value_ >= 0; +} +constexpr bool operator==(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 == __v.__value_; +} +constexpr bool operator!=(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 != __v.__value_; +} +constexpr bool operator<(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 < __v.__value_; +} +constexpr bool operator<=(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 <= __v.__value_; +} +constexpr bool operator>(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 > __v.__value_; +} +constexpr bool operator>=(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return 0 >= __v.__value_; +} + +constexpr strong_ordering operator<=>(strong_ordering __v, _CmpUnspecifiedParam) noexcept { + return __v; +} +constexpr strong_ordering operator<=>(_CmpUnspecifiedParam, strong_ordering __v) noexcept { + return __v < 0 ? strong_ordering::greater : (__v > 0 ? strong_ordering::less : __v); +} + +} // namespace __1 +} // end namespace std + +#endif // STD_COMPARE_H diff --git a/clang/test/CIR/CodeGen/three-way-comparison.cpp b/clang/test/CIR/CodeGen/three-way-comparison.cpp new file mode 100644 index 000000000000..09bb4cf9b461 --- /dev/null +++ b/clang/test/CIR/CodeGen/three-way-comparison.cpp @@ -0,0 +1,72 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER + +#include "Inputs/std-compare.h" + +// BEFORE: #cmp3way_info_partial_ltn1eq0gt1unn127_ = #cir.cmp3way_info +// BEFORE: #cmp3way_info_strong_ltn1eq0gt1_ = #cir.cmp3way_info +// BEFORE: !ty_22std3A3A__13A3Apartial_ordering22 = !cir.struct y; +} + +// BEFORE: cir.func @_Z16three_way_strongii +// BEFORE: %{{.+}} = cir.cmp3way(%{{.+}} : !s32i, %{{.+}}, #cmp3way_info_strong_ltn1eq0gt1_) : !s8i +// BEFORE: } + +// AFTER: cir.func @_Z16three_way_strongii +// AFTER: %[[#LHS:]] = cir.load %{{.+}} : cir.ptr , !s32i +// AFTER-NEXT: %[[#RHS:]] = cir.load %{{.+}} : cir.ptr , !s32i +// AFTER-NEXT: %[[#LT:]] = cir.const(#cir.int<-1> : !s8i) : !s8i +// AFTER-NEXT: %[[#EQ:]] = cir.const(#cir.int<0> : !s8i) : !s8i +// AFTER-NEXT: %[[#GT:]] = cir.const(#cir.int<1> : !s8i) : !s8i +// AFTER-NEXT: %[[#CMP_LT:]] = cir.cmp(lt, %[[#LHS]], %[[#RHS]]) : !s32i, !cir.bool +// AFTER-NEXT: %[[#CMP_EQ:]] = cir.cmp(eq, %[[#LHS]], %[[#RHS]]) : !s32i, !cir.bool +// AFTER-NEXT: %[[#CMP_EQ_RES:]] = cir.ternary(%[[#CMP_EQ]], true { +// AFTER-NEXT: cir.yield %[[#EQ]] : !s8i +// AFTER-NEXT: }, false { +// AFTER-NEXT: cir.yield %[[#GT]] : !s8i +// AFTER-NEXT: }) : (!cir.bool) -> !s8i +// AFTER-NEXT: %{{.+}} = cir.ternary(%[[#CMP_LT]], true { +// AFTER-NEXT: cir.yield %[[#LT]] : !s8i +// AFTER-NEXT: }, false { +// AFTER-NEXT: cir.yield %[[#CMP_EQ_RES]] : !s8i +// AFTER-NEXT: }) : (!cir.bool) -> !s8i +// AFTER: } + +auto three_way_weak(float x, float y) { + return x <=> y; +} + +// BEFORE: cir.func @_Z14three_way_weakff +// BEFORE: %{{.+}} = cir.cmp3way(%{{.+}} : !cir.float, %{{.+}}, #cmp3way_info_partial_ltn1eq0gt1unn127_) : !s8i +// BEFORE: } + +// AFTER: cir.func @_Z14three_way_weakff +// AFTER: %[[#LHS:]] = cir.load %0 : cir.ptr , !cir.float +// AFTER-NEXT: %[[#RHS:]] = cir.load %1 : cir.ptr , !cir.float +// AFTER-NEXT: %[[#LT:]] = cir.const(#cir.int<-1> : !s8i) : !s8i +// AFTER-NEXT: %[[#EQ:]] = cir.const(#cir.int<0> : !s8i) : !s8i +// AFTER-NEXT: %[[#GT:]] = cir.const(#cir.int<1> : !s8i) : !s8i +// AFTER-NEXT: %[[#UNORDERED:]] = cir.const(#cir.int<-127> : !s8i) : !s8i +// AFTER-NEXT: %[[#CMP_LT:]] = cir.cmp(lt, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool +// AFTER-NEXT: %[[#CMP_EQ:]] = cir.cmp(eq, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool +// AFTER-NEXT: %[[#CMP_GT:]] = cir.cmp(gt, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool +// AFTER-NEXT: %[[#CMP_EQ_RES:]] = cir.ternary(%[[#CMP_EQ]], true { +// AFTER-NEXT: cir.yield %[[#EQ]] : !s8i +// AFTER-NEXT: }, false { +// AFTER-NEXT: cir.yield %[[#UNORDERED]] : !s8i +// AFTER-NEXT: }) : (!cir.bool) -> !s8i +// AFTER-NEXT: %[[#CMP_GT_RES:]] = cir.ternary(%[[#CMP_GT]], true { +// AFTER-NEXT: cir.yield %[[#GT]] : !s8i +// AFTER-NEXT: }, false { +// AFTER-NEXT: cir.yield %[[#CMP_EQ_RES]] : !s8i +// AFTER-NEXT: }) : (!cir.bool) -> !s8i +// AFTER-NEXT: %{{.+}} = cir.ternary(%[[#CMP_LT]], true { +// AFTER-NEXT: cir.yield %[[#LT]] : !s8i +// AFTER-NEXT: }, false { +// AFTER-NEXT: cir.yield %[[#CMP_GT_RES]] : !s8i +// AFTER-NEXT: }) : (!cir.bool) -> !s8i +// AFTER: } From c8d47b8c37e04ee1951bfaaec233c8132384542f Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 20 Mar 2024 17:13:18 -0700 Subject: [PATCH 1458/2301] [CIR][Rebase] Add CountAttributed enum case in a switch --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 5c9f5223eb93..2a31af7ca00b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1733,4 +1733,4 @@ CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, llvm_unreachable("NYI"); return numElements; -} \ No newline at end of file +} From 1c8d3d472177120d18db502e7a03adf8ef5845e6 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Fri, 22 Mar 2024 08:15:34 +0300 Subject: [PATCH 1459/2301] [CIR][CIRGen] Support for compound literal lvalue (#515) This change is taken from the original codegen. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 29 +++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 + clang/test/CIR/CodeGen/compound-literal.c | 18 ++++++++++++++ 3 files changed, 48 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index f8137cce2efc..bb6af60d43c8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -411,6 +411,33 @@ LValue CIRGenFunction::buildLValueForFieldInitialization( return makeAddrLValue(V, FieldType, FieldBaseInfo); } +LValue +CIRGenFunction::buildCompoundLiteralLValue(const CompoundLiteralExpr *E) { + if (E->isFileScope()) { + llvm_unreachable("NYI"); + } + + if (E->getType()->isVariablyModifiedType()) { + llvm_unreachable("NYI"); + } + + Address DeclPtr = CreateMemTemp(E->getType(), getLoc(E->getSourceRange()), + ".compoundliteral"); + const Expr *InitExpr = E->getInitializer(); + LValue Result = makeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); + + buildAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), + /*Init*/ true); + + // Block-scope compound literals are destroyed at the end of the enclosing + // scope in C. + if (!getLangOpts().CPlusPlus) + if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) + llvm_unreachable("NYI"); + + return Result; +} + // Detect the unusual situation where an inline version is shadowed by a // non-inline version. In that case we should pick the external one // everywhere. That's GCC behavior too. @@ -2243,6 +2270,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return buildStringLiteralLValue(cast(E)); case Expr::MemberExprClass: return buildMemberExpr(cast(E)); + case Expr::CompoundLiteralExprClass: + return buildCompoundLiteralLValue(cast(E)); case Expr::PredefinedExprClass: return buildPredefinedLValue(cast(E)); case Expr::CXXFunctionalCastExprClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 222a7162b3f3..72a8ab60b69b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1529,6 +1529,7 @@ class CIRGenFunction : public CIRGenTypeCache { LValue buildCheckedLValue(const Expr *E, TypeCheckKind TCK); LValue buildMemberExpr(const MemberExpr *E); + LValue buildCompoundLiteralLValue(const CompoundLiteralExpr *E); /// Specifies which type of sanitizer check to apply when handling a /// particular builtin. diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c index 80cc0dc0ad39..ef692585c46d 100644 --- a/clang/test/CIR/CodeGen/compound-literal.c +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -28,3 +28,21 @@ S b = { // LLVM: @.compoundLiteral.1 = internal global [1 x i32] [i32 1] // LLVM: @b = global %struct.S { ptr @.compoundLiteral.1 } + +int foo() { + return (struct { + int i; + }){1} + .i; +} + +// CIR: cir.func no_proto @foo() -> !s32i +// CIR: [[RET_MEM:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CIR: [[COMPLITERAL_MEM:%.*]] = cir.alloca !ty_22anon2E122, cir.ptr , [".compoundliteral"] {alignment = 4 : i64} +// CIR: [[FIELD:%.*]] = cir.get_member [[COMPLITERAL_MEM]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CIR: [[ONE:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CIR: cir.store [[ONE]], [[FIELD]] : !s32i, cir.ptr +// CIR: [[ONE:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CIR: cir.store [[ONE]], [[RET_MEM]] : !s32i, cir.ptr +// CIR: [[RET:%.*]] = cir.load [[RET_MEM]] : cir.ptr , !s32i +// CIR: cir.return [[RET]] : !s32i From 5e216b844fe839684c0c6647a93e01c21ea114a5 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Fri, 22 Mar 2024 08:33:22 +0300 Subject: [PATCH 1460/2301] [CIR][CIRGen] Support for __attribute__((fallthrough)) statement (#517) This PR adds handling of AttributedStmt to support fallthrough attribute. --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 ++ clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 19 +++++++++++++++++++ clang/test/CIR/CodeGen/switch.cpp | 26 ++++++++++++++++++++++++++ 3 files changed, 47 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 72a8ab60b69b..1c993e872414 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -986,6 +986,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildLabel(const clang::LabelDecl *D); mlir::LogicalResult buildLabelStmt(const clang::LabelStmt &S); + mlir::LogicalResult buildAttributedStmt(const AttributedStmt &S); + mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); mlir::LogicalResult buildContinueStmt(const clang::ContinueStmt &S); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index b7bd43abc9ca..f214abe7358d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -307,6 +307,8 @@ mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, return buildBreakStmt(cast(*S)); case Stmt::AttributedStmtClass: + return buildAttributedStmt(cast(*S)); + case Stmt::SEHLeaveStmtClass: llvm::errs() << "CIR codegen for '" << S->getStmtClassName() << "' not implemented\n"; @@ -326,6 +328,23 @@ mlir::LogicalResult CIRGenFunction::buildLabelStmt(const clang::LabelStmt &S) { return buildStmt(S.getSubStmt(), /* useCurrentScope */ true); } +mlir::LogicalResult +CIRGenFunction::buildAttributedStmt(const AttributedStmt &S) { + for (const auto *A : S.getAttrs()) { + switch (A->getKind()) { + case attr::NoMerge: + case attr::NoInline: + case attr::AlwaysInline: + case attr::MustTail: + llvm_unreachable("NIY attributes"); + default: + break; + } + } + + return buildStmt(S.getSubStmt(), true, S.getAttrs()); +} + // Add terminating yield on body regions (loops, ...) in case there are // not other terminators used. // FIXME: make terminateCaseRegion use this too. diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 4f1ac78eb9c1..3c63e4ea4820 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -274,3 +274,29 @@ void sw12(int a) { // CHECK-NEXT: ^bb1: // no predecessors // CHECK-NEXT: cir.break // CHECK-NEXT: } + +void fallthrough(int x) { + switch (x) { + case 1: + __attribute__((fallthrough)); + case 2: + break; + default: + break; + } +} + +// CHECK: cir.func @_Z11fallthroughi +// CHECK: cir.scope { +// CHECK: cir.switch (%1 : !s32i) [ +// CHECK-NEXT: case (equal, 1) { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }, +// CHECK-NEXT: case (equal, 2) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: }, +// CHECK-NEXT: case (default) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: } +// CHECK-NEXT: ] +// CHECK-NEXT: } From 8eb70fb39e219ce2e2a86733475477757f20f409 Mon Sep 17 00:00:00 2001 From: Kirill Yansitov <36601354+YazZz1k@users.noreply.github.com> Date: Fri, 22 Mar 2024 16:50:01 +0300 Subject: [PATCH 1461/2301] [CIR][CIRGen] Add handling __extension__ keyword for lvalue (#519) This change is taken from the original codegen --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 3 ++- clang/test/CIR/CodeGen/gnu-extension.c | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index bb6af60d43c8..3d1109b0b7a2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1013,7 +1013,8 @@ mlir::Value CIRGenFunction::evaluateExprAsBool(const Expr *E) { LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { // __extension__ doesn't affect lvalue-ness. - assert(E->getOpcode() != UO_Extension && "not implemented"); + if (E->getOpcode() == UO_Extension) + return buildLValue(E->getSubExpr()); switch (E->getOpcode()) { default: diff --git a/clang/test/CIR/CodeGen/gnu-extension.c b/clang/test/CIR/CodeGen/gnu-extension.c index 2d91c403e173..e9cee90b57e9 100644 --- a/clang/test/CIR/CodeGen/gnu-extension.c +++ b/clang/test/CIR/CodeGen/gnu-extension.c @@ -9,3 +9,11 @@ int foo(void) { return __extension__ 0b101010; } //CHECK-NEXT: cir.store [[VAL]], [[ADDR]] : !s32i, cir.ptr //CHECK-NEXT: [[LOAD_VAL:%.*]] = cir.load [[ADDR]] : cir.ptr , !s32i //CHECK-NEXT: cir.return [[LOAD_VAL]] : !s32i + +void bar(void) { + __extension__ bar; +} + +//CHECK: cir.func @bar() +//CHECK: {{.*}} = cir.get_global @bar : cir.ptr > +//CHECK: cir.return From 76465fdc0c07699e06fd04a197dec733d4732f77 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 1 Apr 2024 22:37:29 -0700 Subject: [PATCH 1462/2301] [CIR] Add MLIRCIREnumsGen as a dep to MLIRCIRInterfaces --- clang/lib/CIR/Interfaces/CMakeLists.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Interfaces/CMakeLists.txt b/clang/lib/CIR/Interfaces/CMakeLists.txt index 2f4886d6a93a..dee0a1408250 100644 --- a/clang/lib/CIR/Interfaces/CMakeLists.txt +++ b/clang/lib/CIR/Interfaces/CMakeLists.txt @@ -9,12 +9,13 @@ add_clang_library(MLIRCIRInterfaces DEPENDS MLIRCIRASTAttrInterfacesIncGen - MLIRCIROpInterfacesIncGen - MLIRCIRLoopOpInterfaceIncGen + MLIRCIREnumsGen MLIRCIRFPTypeInterfaceIncGen + MLIRCIRLoopOpInterfaceIncGen + MLIRCIROpInterfacesIncGen LINK_LIBS ${dialect_libs} MLIRIR MLIRSupport - ) \ No newline at end of file + ) From 0bee85c9a1b108040b3e396428da5c4ba5913dae Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 3 Apr 2024 22:23:00 +0300 Subject: [PATCH 1463/2301] [CIR] Introduce a flattening pass (#516) We start our journey towards `goto` support and this is a first step on this way. There are some discussion in #508 and according to the plan we do the following here: - a new pass called `cir-flatten-cfg` that is a stub now but later will be responsible for the regions inlining. The pass works only if `-emit-flat-cir` is passed in cmd line. Thus, the clang behavior is not changed here from the user's point of view. - The pass will be accomplished with `goto` solver later, so we talk about several passes that are mandatory for the lowering into `llvm` dialect. There are at least two clients of this pass that will be affected: `clang` itself and `cir-opt`, so we need a common point for them: and `populateCIRFlatteningPasses` and `populateCIRToLLVMPasses` guarantee that `CIR` will be in the correct state for all the clients, whatever new passes we will add later. --- clang/include/clang/CIR/CIRToCIRPasses.h | 15 +++-- clang/include/clang/CIR/Dialect/Passes.h | 3 + clang/include/clang/CIR/Dialect/Passes.td | 14 +++++ clang/include/clang/CIR/Passes.h | 4 ++ .../clang/CIRFrontendAction/CIRGenAction.h | 8 +++ clang/include/clang/Driver/Options.td | 2 + clang/include/clang/Driver/Types.def | 1 + .../include/clang/Frontend/FrontendOptions.h | 3 + clang/lib/CIR/CodeGen/CIRPasses.cpp | 26 ++++++-- .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 60 +++++++++++++++++++ clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 10 +++- .../CIR/Lowering/DirectToLLVM/CMakeLists.txt | 1 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 13 +++- clang/lib/Driver/Driver.cpp | 4 ++ clang/lib/Driver/ToolChains/Clang.cpp | 6 +- clang/lib/Frontend/CompilerInvocation.cpp | 5 +- .../ExecuteCompilerInvocation.cpp | 5 +- clang/test/CIR/mlirprint.c | 14 ++++- clang/tools/cir-opt/cir-opt.cpp | 8 ++- 20 files changed, 180 insertions(+), 23 deletions(-) create mode 100644 clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index 162846c75184..ed089cd966f4 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -28,12 +28,15 @@ class ModuleOp; namespace cir { // Run set of cleanup/prepare/etc passes CIR <-> CIR. -mlir::LogicalResult runCIRToCIRPasses( - mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, - llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, - llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, - llvm::StringRef libOptOpts, std::string &passOptParsingFailure); +mlir::LogicalResult +runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, + bool enableLifetime, llvm::StringRef lifetimeOpts, + bool enableIdiomRecognizer, + llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, + llvm::StringRef libOptOpts, + std::string &passOptParsingFailure, bool flattenCIR); + } // namespace cir #endif // CLANG_CIR_CIRTOCIRPASSES_H_ diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index a685ab8ce3fa..2f713240944f 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -34,6 +34,9 @@ std::unique_ptr createIdiomRecognizerPass(); std::unique_ptr createIdiomRecognizerPass(clang::ASTContext *astCtx); std::unique_ptr createLibOptPass(); std::unique_ptr createLibOptPass(clang::ASTContext *astCtx); +std::unique_ptr createFlattenCFGPass(); + +void populateCIRPreLoweringPasses(mlir::OpPassManager &pm); //===----------------------------------------------------------------------===// // Registration diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index affc28b85003..e63b97469980 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -75,6 +75,20 @@ def LoweringPrepare : Pass<"cir-lowering-prepare"> { let dependentDialects = ["cir::CIRDialect"]; } +def FlattenCFG : Pass<"cir-flatten-cfg"> { + let summary = "Produces flatten cfg"; + let description = [{ + This pass transforms CIR and inline all the nested regions. Thus, + the next post condtions are met after the pass applied: + - there is not any nested region in a function body + - all the blocks in a function belong to the parent region + In other words, this pass removes such CIR operations like IfOp, LoopOp, + ScopeOp and etc. and produces a flat CIR. + }]; + let constructor = "mlir::createFlattenCFGPass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + def IdiomRecognizer : Pass<"cir-idiom-recognizer"> { let summary = "Raise calls to C/C++ libraries to CIR operations"; let description = [{ diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index 293af0412e6d..6b1d2fdc75c4 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -28,6 +28,10 @@ std::unique_ptr createConvertCIRToMLIRPass(); namespace direct { /// Create a pass that fully lowers CIR to the LLVMIR dialect. std::unique_ptr createConvertCIRToLLVMPass(); + +/// Adds passes that fully lower CIR to the LLVMIR dialect. +void populateCIRToLLVMPasses(mlir::OpPassManager &pm); + } // namespace direct } // end namespace cir diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIRFrontendAction/CIRGenAction.h index d61c90573ade..74d5e5e32611 100644 --- a/clang/include/clang/CIRFrontendAction/CIRGenAction.h +++ b/clang/include/clang/CIRFrontendAction/CIRGenAction.h @@ -32,6 +32,7 @@ class CIRGenAction : public clang::ASTFrontendAction { enum class OutputType { EmitAssembly, EmitCIR, + EmitCIRFlat, EmitLLVM, EmitMLIR, EmitObj, @@ -77,6 +78,13 @@ class EmitCIRAction : public CIRGenAction { EmitCIRAction(mlir::MLIRContext *mlirCtx = nullptr); }; +class EmitCIRFlatAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitCIRFlatAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + class EmitCIROnlyAction : public CIRGenAction { virtual void anchor(); diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index f37248573f34..5015af3a9b82 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3105,6 +3105,8 @@ def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, Group, HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; def emit_cir_only : Flag<["-"], "emit-cir-only">, HelpText<"Build ASTs and convert to CIR, discarding output">; +def emit_cir_flat : Flag<["-"], "emit-cir-flat">, Visibility<[ClangOption, CC1Option]>, + Group, HelpText<"Similar to -emit-cir but also lowers structured CFG into basic blocks.">; def emit_mlir : Flag<["-"], "emit-mlir">, Visibility<[CC1Option]>, Group, HelpText<"Build ASTs and then lower through ClangIR to MLIR, emit the .milr file">; /// ClangIR-specific options - END diff --git a/clang/include/clang/Driver/Types.def b/clang/include/clang/Driver/Types.def index b461473786fd..2c4d227bf07f 100644 --- a/clang/include/clang/Driver/Types.def +++ b/clang/include/clang/Driver/Types.def @@ -100,6 +100,7 @@ TYPE("lto-ir", LTO_IR, INVALID, "s", phases TYPE("lto-bc", LTO_BC, INVALID, "o", phases::Compile, phases::Backend, phases::Assemble, phases::Link) TYPE("cir", CIR, INVALID, "cir", phases::Compile, phases::Backend, phases::Assemble, phases::Link) +TYPE("cir-flat", CIR_FLAT, INVALID, "cir", phases::Compile, phases::Backend, phases::Assemble, phases::Link) TYPE("mlir", MLIR, INVALID, "mlir", phases::Compile, phases::Backend, phases::Assemble, phases::Link) // Misc. diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 3d2ebc1b30b8..f0ee2c1b816d 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -68,6 +68,9 @@ enum ActionKind { /// Emit a .cir file EmitCIR, + /// Emit a .cir file with flat ClangIR + EmitCIRFlat, + /// Generate CIR, bud don't emit anything. EmitCIROnly, diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index a83aa1cf41cf..d2b54af41cc0 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -19,12 +19,14 @@ #include "mlir/Support/LogicalResult.h" namespace cir { -mlir::LogicalResult runCIRToCIRPasses( - mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, - llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, - llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, - llvm::StringRef libOptOpts, std::string &passOptParsingFailure) { +mlir::LogicalResult +runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, + bool enableLifetime, llvm::StringRef lifetimeOpts, + bool enableIdiomRecognizer, + llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, + llvm::StringRef libOptOpts, + std::string &passOptParsingFailure, bool flattenCIR) { mlir::PassManager pm(mlirCtx); pm.addPass(mlir::createMergeCleanupsPass()); @@ -63,6 +65,8 @@ mlir::LogicalResult runCIRToCIRPasses( } pm.addPass(mlir::createLoweringPreparePass(&astCtx)); + if (flattenCIR) + mlir::populateCIRPreLoweringPasses(pm); // FIXME: once CIRCodenAction fixes emission other than CIR we // need to run this right before dialect emission. @@ -71,4 +75,14 @@ mlir::LogicalResult runCIRToCIRPasses( (void)mlir::applyPassManagerCLOptions(pm); return pm.run(theModule); } + } // namespace cir + +namespace mlir { + +void populateCIRPreLoweringPasses(OpPassManager &pm) { + pm.addPass(createFlattenCFGPass()); + // add other passes here +} + +} // namespace mlir \ No newline at end of file diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index a1ff2fc7d119..2a8a4dfeae92 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -6,6 +6,7 @@ add_clang_library(MLIRCIRTransforms IdiomRecognizer.cpp LibOpt.cpp StdHelpers.cpp + FlattenCFG.cpp DEPENDS MLIRCIRPassIncGen diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp new file mode 100644 index 000000000000..477a85df6c26 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -0,0 +1,60 @@ +//====- FlattenCFG.cpp - Flatten CIR CFG ----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements pass that inlines CIR operations regions into the parent +// function region. +// +//===----------------------------------------------------------------------===// +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/DialectConversion.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" + +using namespace mlir; +using namespace mlir::cir; + +namespace { + +struct FlattenCFGPass : public FlattenCFGBase { + + FlattenCFGPass() = default; + void runOnOperation() override; +}; + +void populateFlattenCFGPatterns(RewritePatternSet &patterns) { + // TODO: add patterns here +} + +void FlattenCFGPass::runOnOperation() { + RewritePatternSet patterns(&getContext()); + populateFlattenCFGPatterns(patterns); + + // Collect operations to apply patterns. + SmallVector ops; + getOperation()->walk([&](Operation *op) { + // TODO: push back operations here + }); + + // Apply patterns. + if (applyOpPatternsAndFold(ops, std::move(patterns)).failed()) + signalPassFailure(); +} + +} // namespace + +namespace mlir { + +std::unique_ptr createFlattenCFGPass() { + return std::make_unique(); +} + +} // namespace mlir diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 4e004f1fed83..5f10d256d7aa 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -188,7 +188,8 @@ class CIRGenConsumer : public clang::ASTConsumer { mlirMod, mlirCtx.get(), C, !feOptions.ClangIRDisableCIRVerifier, feOptions.ClangIRLifetimeCheck, lifetimeOpts, feOptions.ClangIRIdiomRecognizer, idiomRecognizerOpts, - feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure) + feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure, + action == CIRGenAction::OutputType::EmitCIRFlat) .failed()) { if (!passOptParsingFailure.empty()) diagnosticsEngine.Report(diag::err_drv_cir_pass_opt_parsing) @@ -236,6 +237,7 @@ class CIRGenConsumer : public clang::ASTConsumer { switch (action) { case CIRGenAction::OutputType::EmitCIR: + case CIRGenAction::OutputType::EmitCIRFlat: if (outputStream && mlirMod) { // Emit remaining defaulted C++ methods if (!feOptions.ClangIRDisableEmitCXXDefault) @@ -353,6 +355,8 @@ getOutputStream(CompilerInstance &ci, StringRef inFile, return ci.createDefaultOutputFile(false, inFile, "s"); case CIRGenAction::OutputType::EmitCIR: return ci.createDefaultOutputFile(false, inFile, "cir"); + case CIRGenAction::OutputType::EmitCIRFlat: + return ci.createDefaultOutputFile(false, inFile, "cir"); case CIRGenAction::OutputType::EmitMLIR: return ci.createDefaultOutputFile(false, inFile, "mlir"); case CIRGenAction::OutputType::EmitLLVM: @@ -446,6 +450,10 @@ void EmitCIRAction::anchor() {} EmitCIRAction::EmitCIRAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::EmitCIR, _MLIRContext) {} +void EmitCIRFlatAction::anchor() {} +EmitCIRFlatAction::EmitCIRFlatAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitCIRFlat, _MLIRContext) {} + void EmitCIROnlyAction::anchor() {} EmitCIROnlyAction::EmitCIROnlyAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::None, _MLIRContext) {} diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index 7cf80d0b0a0e..edabbaabec13 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -29,6 +29,7 @@ add_clang_library(clangCIRLoweringDirectToLLVM ${dialect_libs} MLIRCIR MLIRAnalysis + MLIRCIRTransforms MLIRIR MLIRParser MLIRSideEffectInterfaces diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 991520bd99f5..8ef45fe49888 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -50,6 +50,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" @@ -916,7 +917,9 @@ struct ConvertCIRToLLVMPass } void runOnOperation() final; - virtual StringRef getArgument() const override { return "cir-to-llvm"; } + virtual StringRef getArgument() const override { + return "cir-to-llvm-internal"; + } }; class CIRCallLowering : public mlir::OpConversionPattern { @@ -2969,6 +2972,11 @@ std::unique_ptr createConvertCIRToLLVMPass() { return std::make_unique(); } +void populateCIRToLLVMPasses(mlir::OpPassManager &pm) { + populateCIRPreLoweringPasses(pm); + pm.addPass(createConvertCIRToLLVMPass()); +} + extern void registerCIRDialectTranslation(mlir::MLIRContext &context); std::unique_ptr @@ -2976,8 +2984,7 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx, bool disableVerifier) { mlir::MLIRContext *mlirCtx = theModule.getContext(); mlir::PassManager pm(mlirCtx); - - pm.addPass(createConvertCIRToLLVMPass()); + populateCIRToLLVMPasses(pm); // This is necessary to have line tables emitted and basic // debugger working. In the future we will add proper debug information diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index 93c8dc4ad16d..384fd7f4f2a9 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -420,6 +420,8 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL, (PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) || (PhaseArg = DAL.getLastArg(options::OPT__migrate)) || (PhaseArg = DAL.getLastArg(options::OPT__analyze)) || + (PhaseArg = DAL.getLastArg(options::OPT_emit_cir)) || + (PhaseArg = DAL.getLastArg(options::OPT_emit_cir_flat)) || (PhaseArg = DAL.getLastArg(options::OPT_emit_ast))) { FinalPhase = phases::Compile; @@ -5083,6 +5085,8 @@ Action *Driver::ConstructPhaseAction( return C.MakeAction(Input, types::TY_AST); if (Args.hasArg(options::OPT_emit_cir)) return C.MakeAction(Input, types::TY_CIR); + if (Args.hasArg(options::OPT_emit_cir_flat)) + return C.MakeAction(Input, types::TY_CIR_FLAT); if (Args.hasArg(options::OPT_module_file_info)) return C.MakeAction(Input, types::TY_ModuleFile); if (Args.hasArg(options::OPT_verify_pch)) diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 8d86b32107cd..3a97f27c66e0 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5241,7 +5241,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } } - if (Args.hasArg(options::OPT_fclangir) || Args.hasArg(options::OPT_emit_cir)) + if (Args.hasArg(options::OPT_fclangir) || + Args.hasArg(options::OPT_emit_cir) || + Args.hasArg(options::OPT_emit_cir_flat)) CmdArgs.push_back("-fclangir"); if (Args.hasArg(options::OPT_fclangir_direct_lowering)) @@ -5398,6 +5400,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-emit-llvm"); } else if (JA.getType() == types::TY_CIR) { CmdArgs.push_back("-emit-cir"); + } else if (JA.getType() == types::TY_CIR_FLAT) { + CmdArgs.push_back("-emit-cir-flat"); } else if (JA.getType() == types::TY_LLVM_BC || JA.getType() == types::TY_LTO_BC) { // Emit textual llvm IR for AMDGPU offloading for -emit-llvm -S diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 2b90a7993b98..7ef1f905cf43 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -2735,6 +2735,7 @@ static const auto &getFrontendActionTable() { {frontend::EmitAssembly, OPT_S}, {frontend::EmitBC, OPT_emit_llvm_bc}, {frontend::EmitCIR, OPT_emit_cir}, + {frontend::EmitCIRFlat, OPT_emit_cir_flat}, {frontend::EmitCIROnly, OPT_emit_cir_only}, {frontend::EmitMLIR, OPT_emit_mlir}, {frontend::EmitHTML, OPT_emit_html}, @@ -3108,7 +3109,8 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Opts.ProgramAction != frontend::GenerateModule && Opts.IsSystemModule) Diags.Report(diag::err_drv_argument_only_allowed_with) << "-fsystem-module" << "-emit-module"; - if (Args.hasArg(OPT_fclangir) || Args.hasArg(OPT_emit_cir)) + if (Args.hasArg(OPT_fclangir) || Args.hasArg(OPT_emit_cir) || + Args.hasArg(OPT_emit_cir_flat)) Opts.UseClangIRPipeline = true; if (Args.hasArg(OPT_fclangir_direct_lowering)) @@ -4658,6 +4660,7 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) { case frontend::EmitAssembly: case frontend::EmitBC: case frontend::EmitCIR: + case frontend::EmitCIRFlat: case frontend::EmitCIROnly: case frontend::EmitMLIR: case frontend::EmitHTML: diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 5ee79851ed89..1e1f6b34012f 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -53,7 +53,7 @@ CreateFrontendBaseAction(CompilerInstance &CI) { auto UseCIR = CI.getFrontendOpts().UseClangIRPipeline; auto Act = CI.getFrontendOpts().ProgramAction; - auto EmitsCIR = Act == EmitCIR || Act == EmitCIROnly; + auto EmitsCIR = Act == EmitCIR || Act == EmitCIRFlat || Act == EmitCIROnly; if (!UseCIR && EmitsCIR) llvm::report_fatal_error( @@ -81,10 +81,13 @@ CreateFrontendBaseAction(CompilerInstance &CI) { case EmitBC: return std::make_unique(); #if CLANG_ENABLE_CIR case EmitCIR: return std::make_unique<::cir::EmitCIRAction>(); + case EmitCIRFlat: + return std::make_unique<::cir::EmitCIRFlatAction>(); case EmitCIROnly: return std::make_unique<::cir::EmitCIROnlyAction>(); case EmitMLIR: return std::make_unique<::cir::EmitMLIRAction>(); #else case EmitCIR: + case EmitCIRFlat: case EmitCIROnly: llvm_unreachable("CIR suppport not built into clang"); #endif diff --git a/clang/test/CIR/mlirprint.c b/clang/test/CIR/mlirprint.c index f61f2244af23..bc8d4889f854 100644 --- a/clang/test/CIR/mlirprint.c +++ b/clang/test/CIR/mlirprint.c @@ -1,6 +1,8 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIRFLAT // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -mmlir --mlir-print-ir-after-all -mllvm -print-after-all %s -o %t.ll 2>&1 | FileCheck %s -check-prefix=CIR -check-prefix=LLVM // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-drop-ast %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIRPASS +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -mmlir --mlir-print-ir-before=cir-flatten-cfg %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CFGPASS int foo(void) { int i = 3; @@ -12,12 +14,22 @@ int foo(void) { // CIR: cir.func @foo() -> !s32i // CIR: IR Dump After LoweringPrepare (cir-lowering-prepare) // CIR: cir.func @foo() -> !s32i +// CIR-NOT: IR Dump After FlattenCFG // CIR: IR Dump After DropAST (cir-drop-ast) // CIR: cir.func @foo() -> !s32i -// LLVM: IR Dump After cir::direct::ConvertCIRToLLVMPass (cir-to-llvm) +// CIRFLAT: IR Dump After MergeCleanups (cir-merge-cleanups) +// CIRFLAT: cir.func @foo() -> !s32i +// CIRFLAT: IR Dump After LoweringPrepare (cir-lowering-prepare) +// CIRFLAT: cir.func @foo() -> !s32i +// CIRFLAT: IR Dump After FlattenCFG (cir-flatten-cfg) +// CIRFLAT: IR Dump After DropAST (cir-drop-ast) +// CIRFLAT: cir.func @foo() -> !s32i +// LLVM: IR Dump After cir::direct::ConvertCIRToLLVMPass (cir-to-llvm-internal) // LLVM: llvm.func @foo() -> i32 // LLVM: IR Dump After // LLVM: define i32 @foo() // CIRPASS-NOT: IR Dump After MergeCleanups // CIRPASS: IR Dump After DropAST + +// CFGPASS: IR Dump Before FlattenCFG (cir-flatten-cfg) diff --git a/clang/tools/cir-opt/cir-opt.cpp b/clang/tools/cir-opt/cir-opt.cpp index deee67afa8a4..8e2044ecdc9e 100644 --- a/clang/tools/cir-opt/cir-opt.cpp +++ b/clang/tools/cir-opt/cir-opt.cpp @@ -20,6 +20,7 @@ #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/OpenMP/OpenMPDialect.h" #include "mlir/InitAllPasses.h" +#include "mlir/Pass/PassManager.h" #include "mlir/Pass/PassRegistry.h" #include "mlir/Tools/mlir-opt/MlirOptMain.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" @@ -45,9 +46,10 @@ int main(int argc, char **argv) { return cir::createConvertCIRToMLIRPass(); }); - ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { - return cir::direct::createConvertCIRToLLVMPass(); - }); + mlir::PassPipelineRegistration pipeline( + "cir-to-llvm", "", [](mlir::OpPassManager &pm) { + cir::direct::populateCIRToLLVMPasses(pm); + }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return mlir::createReconcileUnrealizedCastsPass(); From a17a1075794d4ddbb25ba5165511e6b8c75ab82b Mon Sep 17 00:00:00 2001 From: David Olsen Date: Thu, 4 Apr 2024 17:09:12 -0700 Subject: [PATCH 1464/2301] [CIR] shufflevector and convertvector built-ins (#530) Implement `__builtin_shufflevector` and `__builtin_convertvector` in ClangIR. This change contributes to the implemention of issue #284. `__builtin_convertvector` is implemented as a cast. LLVM IR uses the same instructions for arithmetic conversions of both individual scalars and entire vectors. So ClangIR does the same. The code for handling conversions, in both CodeGen and Lowering, is cleaned up to correctly handle vector types. To simplify the lowering code and avoid `if (type.isa())` statements everywhere, the utility function `elementTypeIfVector` was added to `LowerToLLVM.cpp`. `__builtin_shufflevector` has two forms, only one of which appears to be documented. The documented form, which takes a variable-sized list of integer constants for the indices, is implemented with the new ClangIR operation `cir.vec.shuffle.ints`. This operation is lowered to the `llvm.shufflevector` op. The undocumented form, which gets the indices from a vector operand, is implemented with the new ClangIR operation `cir.vec.shuffle.vec`. LLVM IR does not have an instruction for this, so it gets lowered to a long series of `llvm.extractelement` and `llvm.insertelement` operations. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 63 +++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 53 +++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 61 ++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 172 ++++++++++++++---- clang/test/CIR/CodeGen/vectype.cpp | 11 ++ clang/test/CIR/IR/invalid.cir | 40 +++- clang/test/CIR/Lowering/vectype.cpp | 41 +++++ 7 files changed, 388 insertions(+), 53 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 62b0da234457..d1c7cbe1e774 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2198,6 +2198,69 @@ def VecTernaryOp : CIR_Op<"vec.ternary", let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// VecShuffle +//===----------------------------------------------------------------------===// + +// TODO: Create an interface that both VecShuffleOp and VecShuffleDynamicOp +// implement. This could be useful for passes that don't care how the vector +// shuffle was specified. + +def VecShuffleOp : CIR_Op<"vec.shuffle", + [Pure, AllTypesMatch<["vec1", "vec2"]>]> { + let summary = "Combine two vectors using indices passed as constant integers"; + let description = [{ + The `cir.vec.shuffle` operation implements the documented form of Clang's + __builtin_shufflevector, where the indices of the shuffled result are + integer constants. + + The two input vectors, which must have the same type, are concatenated. + Each of the integer constant arguments is interpreted as an index into that + concatenated vector, with a value of -1 meaning that the result value + doesn't matter. The result vector, which must have the same element type as + the input vectors and the same number of elements as the list of integer + constant indices, is constructed by taking the elements at the given + indices from the concatenated vector. The size of the result vector does + not have to match the size of the individual input vectors or of the + concatenated vector. + }]; + let arguments = (ins CIR_VectorType:$vec1, CIR_VectorType:$vec2, + ArrayAttr:$indices); + let results = (outs CIR_VectorType:$result); + let assemblyFormat = [{ + `(` $vec1 `,` $vec2 `:` qualified(type($vec1)) `)` $indices `:` + qualified(type($result)) attr-dict + }]; + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// VecShuffleDynamic +//===----------------------------------------------------------------------===// + +def VecShuffleDynamicOp : CIR_Op<"vec.shuffle.dynamic", + [Pure, AllTypesMatch<["vec", "result"]>]> { + let summary = "Shuffle a vector using indices in another vector"; + let description = [{ + The `cir.vec.shuffle.dynamic` operation implements the undocumented form of + Clang's __builtin_shufflevector, where the indices of the shuffled result + can be runtime values. + + There are two input vectors, which must have the same number of elements. + The second input vector must have an integral element type. The elements of + the second vector are interpreted as indices into the first vector. The + result vector is constructed by taking the elements from the first input + vector from the indices indicated by the elements of the second vector. + }]; + let arguments = (ins CIR_VectorType:$vec, IntegerVector:$indices); + let results = (outs CIR_VectorType:$result); + let assemblyFormat = [{ + $vec `:` qualified(type($vec)) `,` $indices `:` qualified(type($indices)) + attr-dict + }]; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // BaseClassAddr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 91559b08fa7e..e6093c52ca84 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -280,10 +280,37 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *E) { - llvm_unreachable("NYI"); + if (E->getNumSubExprs() == 2) { + // The undocumented form of __builtin_shufflevector. + mlir::Value InputVec = Visit(E->getExpr(0)); + mlir::Value IndexVec = Visit(E->getExpr(1)); + return CGF.builder.create( + CGF.getLoc(E->getSourceRange()), InputVec, IndexVec); + } else { + // The documented form of __builtin_shufflevector, where the indices are + // a variable number of integer constants. The constants will be stored + // in an ArrayAttr. + mlir::Value Vec1 = Visit(E->getExpr(0)); + mlir::Value Vec2 = Visit(E->getExpr(1)); + SmallVector Indices; + for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { + Indices.push_back(mlir::cir::IntAttr::get( + CGF.builder.getSInt64Ty(), + E->getExpr(i) + ->EvaluateKnownConstInt(CGF.getContext()) + .getSExtValue())); + } + return CGF.builder.create( + CGF.getLoc(E->getSourceRange()), CGF.getCIRType(E->getType()), Vec1, + Vec2, CGF.builder.getArrayAttr(Indices)); + } } mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *E) { - llvm_unreachable("NYI"); + // __builtin_convertvector is an element-wise cast, and is implemented as a + // regular cast. The back end handles casts of vectors correctly. + return buildScalarConversion(Visit(E->getSrcExpr()), + E->getSrcExpr()->getType(), E->getType(), + E->getSourceRange().getBegin()); } mlir::Value VisitMemberExpr(MemberExpr *E); mlir::Value VisitExtVectorelementExpr(Expr *E) { llvm_unreachable("NYI"); } @@ -1725,9 +1752,9 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { } // Conversion from bool, integral, or floating-point to integral or -// floating-point. Conversions involving other types are handled elsewhere. +// floating-point. Conversions involving other types are handled elsewhere. // Conversion to bool is handled elsewhere because that's a comparison against -// zero, not a simple cast. +// zero, not a simple cast. This handles both individual scalars and vectors. mlir::Value ScalarExprEmitter::buildScalarCast( mlir::Value Src, QualType SrcType, QualType DstType, mlir::Type SrcTy, mlir::Type DstTy, ScalarConversionOpts Opts) { @@ -1736,9 +1763,20 @@ mlir::Value ScalarExprEmitter::buildScalarCast( if (SrcTy.isa() || DstTy.isa()) llvm_unreachable("Obsolete code. Don't use mlir::IntegerType with CIR."); + mlir::Type FullDstTy = DstTy; + if (SrcTy.isa() && + DstTy.isa()) { + // Use the element types of the vectors to figure out the CastKind. + SrcTy = SrcTy.dyn_cast().getEltType(); + DstTy = DstTy.dyn_cast().getEltType(); + } + assert(!SrcTy.isa() && + !DstTy.isa() && + "buildScalarCast given a vector type and a non-vector type"); + std::optional CastKind; - if (SrcType->isBooleanType()) { + if (SrcTy.isa()) { if (Opts.TreatBooleanAsSigned) llvm_unreachable("NYI: signed bool"); if (CGF.getBuilder().isInt(DstTy)) { @@ -1768,7 +1806,7 @@ mlir::Value ScalarExprEmitter::buildScalarCast( CastKind = mlir::cir::CastKind::float_to_int; } else if (DstTy.isa()) { // TODO: split this to createFPExt/createFPTrunc - return Builder.createFloatingCast(Src, DstTy); + return Builder.createFloatingCast(Src, FullDstTy); } else { llvm_unreachable("Internal error: Cast to unexpected type"); } @@ -1777,7 +1815,8 @@ mlir::Value ScalarExprEmitter::buildScalarCast( } assert(CastKind.has_value() && "Internal error: CastKind not set."); - return Builder.create(Src.getLoc(), DstTy, *CastKind, Src); + return Builder.create(Src.getLoc(), FullDstTy, *CastKind, + Src); } LValue diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d9b227519ccd..aa4364ffa169 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -395,6 +395,14 @@ LogicalResult CastOp::verify() { auto resType = getResult().getType(); auto srcType = getSrc().getType(); + if (srcType.isa() && + resType.isa()) { + // Use the element type of the vector to verify the cast kind. (Except for + // bitcast, see below.) + srcType = srcType.dyn_cast().getEltType(); + resType = resType.dyn_cast().getEltType(); + } + switch (getKind()) { case cir::CastKind::int_to_bool: { if (!resType.isa()) @@ -433,10 +441,12 @@ LogicalResult CastOp::verify() { return success(); } case cir::CastKind::bitcast: { - if ((!srcType.isa() || - !resType.isa()) && - (!srcType.isa() || - !resType.isa())) + // This is the only cast kind where we don't want vector types to decay + // into the element type. + if ((!getSrc().getType().isa() || + !getResult().getType().isa()) && + (!getSrc().getType().isa() || + !getResult().getType().isa())) return emitOpError() << "requires !cir.ptr or !cir.vector type for source and result"; return success(); @@ -444,7 +454,7 @@ LogicalResult CastOp::verify() { case cir::CastKind::floating: { if (!srcType.isa() || !resType.isa()) - return emitOpError() << "requries floating for source and result"; + return emitOpError() << "requires floating for source and result"; return success(); } case cir::CastKind::float_to_int: { @@ -544,6 +554,47 @@ LogicalResult VecTernaryOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// VecShuffle +//===----------------------------------------------------------------------===// + +LogicalResult VecShuffleOp::verify() { + // The number of elements in the indices array must match the number of + // elements in the result type. + if (getIndices().size() != getResult().getType().getSize()) { + return emitOpError() << ": the number of elements in " << getIndices() + << " and " << getResult().getType() << " don't match"; + } + // The element types of the two input vectors and of the result type must + // match. + if (getVec1().getType().getEltType() != getResult().getType().getEltType()) { + return emitOpError() << ": element types of " << getVec1().getType() + << " and " << getResult().getType() << " don't match"; + } + // The indices must all be integer constants + if (not std::all_of(getIndices().begin(), getIndices().end(), + [](mlir::Attribute attr) { + return attr.isa(); + })) { + return emitOpError() << "all index values must be integers"; + } + return success(); +} + +//===----------------------------------------------------------------------===// +// VecShuffleDynamic +//===----------------------------------------------------------------------===// + +LogicalResult VecShuffleDynamicOp::verify() { + // The number of elements in the two input vectors must match. + if (getVec().getType().getSize() != + getIndices().getType().cast().getSize()) { + return emitOpError() << ": the number of elements in " << getVec().getType() + << " and " << getIndices().getType() << " don't match"; + } + return success(); +} + //===----------------------------------------------------------------------===// // ReturnOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8ef45fe49888..5fceaa693729 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -143,6 +143,15 @@ convertCmpKindToFCmpPredicate(mlir::cir::CmpOpKind kind) { llvm_unreachable("Unknown CmpOpKind"); } +/// If the given type is a vector type, return the vector's element type. +/// Otherwise return the given type unchanged. +mlir::Type elementTypeIfVector(mlir::Type type) { + if (auto VecType = type.dyn_cast()) { + return VecType.getEltType(); + } + return type; +} + } // namespace //===----------------------------------------------------------------------===// @@ -575,6 +584,10 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::CastOp castOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { + // For arithmetic conversions, LLVM IR uses the same instruction to convert + // both individual scalars and entire vectors. This lowering pass handles + // both situations. + auto src = adaptor.getSrc(); switch (castOp.getKind()) { @@ -598,43 +611,46 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { break; } case mlir::cir::CastKind::integral: { - auto dstType = castOp.getResult().getType().cast(); - auto srcType = castOp.getSrc().getType().dyn_cast(); + auto srcType = castOp.getSrc().getType(); + auto dstType = castOp.getResult().getType(); auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = - getTypeConverter()->convertType(dstType).cast(); - - // Target integer is smaller: truncate source value. - if (dstType.getWidth() < srcType.getWidth()) { - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + auto llvmDstType = getTypeConverter()->convertType(dstType); + mlir::cir::IntType srcIntType = + elementTypeIfVector(srcType).cast(); + mlir::cir::IntType dstIntType = + elementTypeIfVector(dstType).cast(); + + if (dstIntType.getWidth() < srcIntType.getWidth()) { + // Bigger to smaller. Truncate. + rewriter.replaceOpWithNewOp(castOp, llvmDstType, llvmSrcVal); - } - // Target integer is larger: sign extend or zero extend. - else if (dstType.getWidth() > srcType.getWidth()) { - if (srcType.isUnsigned()) - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + } else if (dstIntType.getWidth() > srcIntType.getWidth()) { + // Smaller to bigger. Zero extend or sign extend based on signedness. + if (srcIntType.isUnsigned()) + rewriter.replaceOpWithNewOp(castOp, llvmDstType, llvmSrcVal); else - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + rewriter.replaceOpWithNewOp(castOp, llvmDstType, llvmSrcVal); - } else { // Target integer is of the same size: do nothing. + } else { + // Same size. Signedness changes doesn't matter to LLVM. Do nothing. rewriter.replaceOp(castOp, llvmSrcVal); } break; } case mlir::cir::CastKind::floating: { - auto dstTy = castOp.getResult().getType(); - auto srcTy = castOp.getSrc().getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = + getTypeConverter()->convertType(castOp.getResult().getType()); + + auto srcTy = elementTypeIfVector(castOp.getSrc().getType()); + auto dstTy = elementTypeIfVector(castOp.getResult().getType()); if (!dstTy.isa() || !srcTy.isa()) return castOp.emitError() << "NYI cast from " << srcTy << " to " << dstTy; - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = - getTypeConverter()->convertType(dstTy).cast(); - auto getFloatWidth = [](mlir::Type ty) -> unsigned { return ty.cast().getWidth(); }; @@ -707,7 +723,9 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto dstTy = castOp.getType(); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); - if (castOp.getSrc().getType().cast().isSigned()) + if (elementTypeIfVector(castOp.getSrc().getType()) + .cast() + .isSigned()) rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); else @@ -719,7 +737,9 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto dstTy = castOp.getType(); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); - if (castOp.getResult().getType().cast().isSigned()) + if (elementTypeIfVector(castOp.getResult().getType()) + .cast() + .isSigned()) rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); else @@ -1302,8 +1322,7 @@ class CIRVectorCmpOpLowering "Vector compare with non-vector type"); // LLVM IR vector comparison returns a vector of i1. This one-bit vector // must be sign-extended to the correct result type. - auto elementType = - op.getLhs().getType().dyn_cast().getEltType(); + auto elementType = elementTypeIfVector(op.getLhs().getType()); mlir::Value bitResult; if (auto intType = elementType.dyn_cast()) { bitResult = rewriter.create( @@ -1333,8 +1352,8 @@ class CIRVectorSplatLowering mlir::ConversionPatternRewriter &rewriter) const override { // Vector splat can be implemented with an `insertelement` and a // `shufflevector`, which is better than an `insertelement` for each - // element in vector. Start with an undef vector. Insert the value into - // the first element. Then use a `shufflevector` with a mask of all 0 to + // element in the vector. Start with an undef vector. Insert the value into + // the first element. Then use a `shufflevector` with a mask of all 0 to // fill out the entire vector with that value. auto vecTy = op.getType().dyn_cast(); assert(vecTy && "result type of cir.vec.splat op is not VectorType"); @@ -1379,6 +1398,84 @@ class CIRVectorTernaryLowering } }; +class CIRVectorShuffleIntsLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecShuffleOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // LLVM::ShuffleVectorOp takes an ArrayRef of int for the list of indices. + // Convert the ClangIR ArrayAttr of IntAttr constants into a + // SmallVector. + SmallVector indices; + std::transform( + op.getIndices().begin(), op.getIndices().end(), + std::back_inserter(indices), [](mlir::Attribute intAttr) { + return intAttr.cast().getValue().getSExtValue(); + }); + rewriter.replaceOpWithNewOp( + op, adaptor.getVec1(), adaptor.getVec2(), indices); + return mlir::success(); + } +}; + +class CIRVectorShuffleVecLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern< + mlir::cir::VecShuffleDynamicOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecShuffleDynamicOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // LLVM IR does not have an operation that corresponds to this form of + // the built-in. + // __builtin_shufflevector(V, I) + // is implemented as this pseudocode, where the for loop is unrolled + // and N is the number of elements: + // masked = I & (N-1) + // for (i in 0 <= i < N) + // result[i] = V[masked[i]] + auto loc = op.getLoc(); + mlir::Value input = adaptor.getVec(); + mlir::Type llvmIndexVecType = + getTypeConverter()->convertType(op.getIndices().getType()); + mlir::Type llvmIndexType = getTypeConverter()->convertType( + elementTypeIfVector(op.getIndices().getType())); + uint64_t numElements = + op.getVec().getType().cast().getSize(); + mlir::Value maskValue = rewriter.create( + loc, llvmIndexType, + mlir::IntegerAttr::get(llvmIndexType, numElements - 1)); + mlir::Value maskVector = + rewriter.create(loc, llvmIndexVecType); + for (uint64_t i = 0; i < numElements; ++i) { + mlir::Value iValue = rewriter.create( + loc, rewriter.getI64Type(), i); + maskVector = rewriter.create( + loc, maskVector, maskValue, iValue); + } + mlir::Value maskedIndices = rewriter.create( + loc, llvmIndexVecType, adaptor.getIndices(), maskVector); + mlir::Value result = rewriter.create( + loc, getTypeConverter()->convertType(op.getVec().getType())); + for (uint64_t i = 0; i < numElements; ++i) { + mlir::Value iValue = rewriter.create( + loc, rewriter.getI64Type(), i); + mlir::Value indexValue = rewriter.create( + loc, maskedIndices, iValue); + mlir::Value valueAtIndex = + rewriter.create(loc, input, indexValue); + result = rewriter.create( + loc, result, valueAtIndex, iValue); + } + rewriter.replaceOp(op, result); + return mlir::success(); + } +}; + class CIRVAStartLowering : public mlir::OpConversionPattern { public: @@ -1782,12 +1879,8 @@ class CIRUnaryOpLowering assert(op.getType() == op.getInput().getType() && "Unary operation's operand type and result type are different"); mlir::Type type = op.getType(); - mlir::Type elementType = type; - bool IsVector = false; - if (auto VecType = type.dyn_cast()) { - IsVector = true; - elementType = VecType.getEltType(); - } + mlir::Type elementType = elementTypeIfVector(type); + bool IsVector = type.isa(); auto llvmType = getTypeConverter()->convertType(type); auto loc = op.getLoc(); @@ -1943,8 +2036,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { auto rhs = adaptor.getRhs(); auto lhs = adaptor.getLhs(); - if (type.isa()) - type = type.dyn_cast().getEltType(); + type = elementTypeIfVector(type); switch (op.getKind()) { case mlir::cir::BinOpKind::Add: @@ -2765,10 +2857,12 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, - CIRVectorTernaryLowering, CIRStackSaveLowering, CIRStackRestoreLowering, - CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, - CIRSetBitfieldLowering, CIRGetBitfieldLowering, CIRPrefetchLowering, - CIRIsConstantOpLowering>(converter, patterns.getContext()); + CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, + CIRVectorShuffleVecLowering, CIRStackSaveLowering, + CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, + CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, + CIRPrefetchLowering, CIRIsConstantOpLowering>(converter, + patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index ca93bcb1e8dc..b774bfe7a457 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -4,6 +4,7 @@ typedef int vi4 __attribute__((vector_size(16))); typedef double vd2 __attribute__((vector_size(16))); typedef long long vll2 __attribute__((vector_size(16))); +typedef unsigned short vus2 __attribute__((vector_size(4))); void vector_int_test(int x) { @@ -85,6 +86,12 @@ void vector_int_test(int x) { // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vi4 t = a >= b; // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + + // __builtin_shufflevector + vi4 u = __builtin_shufflevector(a, b, 7, 5, 3, 1); + // CHECK: %{{[0-9]+}} = cir.vec.shuffle(%{{[0-9]+}}, %{{[0-9]+}} : !cir.vector) [#cir.int<7> : !s64i, #cir.int<5> : !s64i, #cir.int<3> : !s64i, #cir.int<1> : !s64i] : !cir.vector + vi4 v = __builtin_shufflevector(a, b); + // CHECK: %{{[0-9]+}} = cir.vec.shuffle.dynamic %{{[0-9]+}} : !cir.vector, %{{[0-9]+}} : !cir.vector } void vector_double_test(int x, double y) { @@ -147,4 +154,8 @@ void vector_double_test(int x, double y) { // CHECK: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector vll2 t = a >= b; // CHECK: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + + // __builtin_convertvector + vus2 w = __builtin_convertvector(a, vus2); + // CHECK: %{{[0-9]+}} = cir.cast(float_to_int, %{{[0-9]+}} : !cir.vector), !cir.vector } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 28cae0cc8214..74ab743bb2fd 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -195,7 +195,7 @@ cir.func @cast10(%p : !cir.float) { !u32i = !cir.int cir.func @cast11(%p : !cir.float) { - %2 = cir.cast(floating, %p : !cir.float), !u32i // expected-error {{requries floating for source and result}} + %2 = cir.cast(floating, %p : !cir.float), !u32i // expected-error {{requires floating for source and result}} cir.return } @@ -203,7 +203,7 @@ cir.func @cast11(%p : !cir.float) { !u32i = !cir.int cir.func @cast12(%p : !u32i) { - %2 = cir.cast(floating, %p : !u32i), !cir.float // expected-error {{requries floating for source and result}} + %2 = cir.cast(floating, %p : !u32i), !cir.float // expected-error {{requires floating for source and result}} cir.return } @@ -471,6 +471,42 @@ cir.func @vec_ternary_not_int(%p : !cir.float) { // ----- +!s32i = !cir.int +cir.func @vec_shuffle_mismatch_args(%f : !cir.float, %n : !s32i) { + %0 = cir.vec.create(%f, %f : !cir.float, !cir.float) : !cir.vector + %1 = cir.vec.create(%n, %n : !s32i, !s32i) : !cir.vector // expected-note {{prior use here}} + %2 = cir.vec.shuffle(%0, %1 : !cir.vector) [#cir.int<0> : !s32i, #cir.int<1> : !s32i] : !cir.vector // expected-error {{use of value '%1' expects different type than prior uses: '!cir.vector' vs '!cir.vector x 2>}} + cir.return +} + +// ----- + +cir.func @vec_shuffle_non_ints(%f : !cir.float) { + %0 = cir.vec.create(%f, %f : !cir.float, !cir.float) : !cir.vector + %1 = cir.vec.shuffle(%0, %0 : !cir.vector) [#cir.fp<1.000000e+00> : !cir.float, #cir.fp<1.000000e+00> : !cir.float] : !cir.vector // expected-error {{'cir.vec.shuffle' op all index values must be integers}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_shuffle_result_size(%f : !cir.float) { + %0 = cir.vec.create(%f, %f : !cir.float, !cir.float) : !cir.vector + %1 = cir.vec.shuffle(%0, %0 : !cir.vector) [#cir.int<1> : !s32i, #cir.int<1> : !s32i] : !cir.vector // expected-error {{'cir.vec.shuffle' op : the number of elements in [#cir.int<1> : !cir.int, #cir.int<1> : !cir.int] and '!cir.vector' don't match}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_shuffle_result_element(%f : !cir.float) { + %0 = cir.vec.create(%f, %f : !cir.float, !cir.float) : !cir.vector + %1 = cir.vec.shuffle(%0, %0 : !cir.vector) [#cir.int<1> : !s32i, #cir.int<1> : !s32i] : !cir.vector // expected-error {{'cir.vec.shuffle' op : element types of '!cir.vector' and '!cir.vector x 2>' don't match}} + cir.return +} + +// ----- + cir.func coroutine @bad_task() { // expected-error {{coroutine body must use at least one cir.await op}} cir.return } diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index 81e70be19264..8e4a758543b5 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -6,6 +6,7 @@ typedef int vi4 __attribute__((vector_size(16))); typedef double vd2 __attribute__((vector_size(16))); typedef long long vll2 __attribute__((vector_size(16))); +typedef unsigned short vus2 __attribute__((vector_size(4))); void vector_int_test(int x) { @@ -183,6 +184,42 @@ void vector_int_test(int x) { // CHECK: %[[#T127:]] = llvm.icmp "sge" %[[#T125]], %[[#T126]] : vector<4xi32> // CHECK: %[[#T128:]] = llvm.sext %[[#T127]] : vector<4xi1> to vector<4xi32> // CHECK: llvm.store %[[#T128]], %[[#Tt:]] : vector<4xi32>, !llvm.ptr + + // __builtin_shufflevector + vi4 u = __builtin_shufflevector(a, b, 7, 5, 3, 1); + // CHECK: %[[#Tu:]] = llvm.shufflevector %[[#bsva:]], %[[#bsvb:]] [7, 5, 3, 1] : vector<4xi32> + vi4 v = __builtin_shufflevector(a, b); + // CHECK: %[[#sv_a:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#sv_b:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#sv0:]] = llvm.mlir.constant(3 : i32) : i32 + // CHECK: %[[#sv1:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#sv2:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#sv3:]] = llvm.insertelement %[[#sv0]], %[[#sv1]][%[[#sv2]] : i64] : vector<4xi32> + // CHECK: %[[#sv4:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#sv5:]] = llvm.insertelement %[[#sv0]], %[[#sv3]][%[[#sv4]] : i64] : vector<4xi32> + // CHECK: %[[#sv6:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#sv7:]] = llvm.insertelement %[[#sv0]], %[[#sv5]][%[[#sv6]] : i64] : vector<4xi32> + // CHECK: %[[#sv8:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#sv9:]] = llvm.insertelement %[[#sv0]], %[[#sv7]][%[[#sv8]] : i64] : vector<4xi32> + // CHECK: %[[#svA:]] = llvm.and %[[#sv_b]], %[[#sv9]] : vector<4xi32> + // CHECK: %[[#svB:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#svC:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#svD:]] = llvm.extractelement %[[#svA]][%[[#svC]] : i64] : vector<4xi32> + // CHECK: %[[#svE:]] = llvm.extractelement %[[#sv_a]][%[[#svD]] : i32] : vector<4xi32> + // CHECK: %[[#svF:]] = llvm.insertelement %[[#svE]], %[[#svB]][%[[#svC]] : i64] : vector<4xi32> + // CHECK: %[[#svG:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#svH:]] = llvm.extractelement %[[#svA]][%[[#svG]] : i64] : vector<4xi32> + // CHECK: %[[#svI:]] = llvm.extractelement %[[#sv_a]][%[[#svH]] : i32] : vector<4xi32> + // CHECK: %[[#svJ:]] = llvm.insertelement %[[#svI]], %[[#svF]][%[[#svG]] : i64] : vector<4xi32> + // CHECK: %[[#svK:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#svL:]] = llvm.extractelement %[[#svA]][%[[#svK]] : i64] : vector<4xi32> + // CHECK: %[[#svM:]] = llvm.extractelement %[[#sv_a]][%[[#svL]] : i32] : vector<4xi32> + // CHECK: %[[#svN:]] = llvm.insertelement %[[#svM]], %[[#svJ]][%[[#svK]] : i64] : vector<4xi32> + // CHECK: %[[#svO:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#svP:]] = llvm.extractelement %[[#svA]][%[[#svO]] : i64] : vector<4xi32> + // CHECK: %[[#svQ:]] = llvm.extractelement %[[#sv_a]][%[[#svP:]] : i32] : vector<4xi32> + // CHECK: %[[#svR:]] = llvm.insertelement %[[#svQ]], %[[#svN]][%[[#svO]] : i64] : vector<4xi32> + // CHECK: llvm.store %[[#svR]], %[[#sv_v:]] : vector<4xi32>, !llvm.ptr } void vector_double_test(int x, double y) { @@ -295,4 +332,8 @@ void vector_double_test(int x, double y) { // CHECK: %[[#T82:]] = llvm.fcmp "oge" %[[#T80]], %[[#T81]] : vector<2xf64> // CHECK: %[[#T83:]] = llvm.sext %[[#T82]] : vector<2xi1> to vector<2xi64> // CHECK: llvm.store %[[#T83]], %[[#Tt:]] : vector<2xi64>, !llvm.ptr + + // __builtin_convertvector + vus2 w = __builtin_convertvector(a, vus2); + // CHECK: %[[#cv0:]] = llvm.fptoui %[[#cv1:]] : vector<2xf64> to vector<2xi16> } From 37ba3b0f97975ce7b8707990a3352a33ede45e4f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 2 Apr 2024 20:45:51 -0700 Subject: [PATCH 1465/2301] [CIR][CIRGen] Add support for __atomic_add_fetch This introduces CIRGen and LLVM lowering for the first of a bunch of these atomic operations, incremental work should generelize the current constructs. --- .../include/clang/CIR/Dialect/IR/CIRDialect.h | 15 +++- clang/include/clang/CIR/Dialect/IR/CIROps.td | 58 +++++++++++++-- .../clang/CIR/Dialect/IR/CIROpsEnums.h | 12 +++ .../include/clang/CIR/Dialect/IR/CIRTypes.td | 11 +++ clang/lib/CIR/CodeGen/Address.h | 1 + clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 72 +++++++++++++----- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 6 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 29 ++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 73 +++++++++++++++++-- clang/test/CIR/CodeGen/atomic.cpp | 27 ++++++- 10 files changed, 269 insertions(+), 35 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index 9d92cc20c0e2..159a1434cdb7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -43,6 +43,7 @@ namespace impl { // corresponding trait classes. This avoids them being template // instantiated/duplicated. LogicalResult verifySameFirstOperandAndResultType(Operation *op); +LogicalResult verifySameSecondOperandAndResultType(Operation *op); LogicalResult verifySameFirstSecondOperandAndResultType(Operation *op); } // namespace impl @@ -59,7 +60,19 @@ class SameFirstOperandAndResultType }; /// This class provides verification for ops that are known to have the same -/// first operand and result type. +/// second operand and result type. +/// +template +class SameSecondOperandAndResultType + : public TraitBase { +public: + static LogicalResult verifyTrait(Operation *op) { + return impl::verifySameSecondOperandAndResultType(op); + } +}; + +/// This class provides verification for ops that are known to have the same +/// first, second operand and result type. /// template class SameFirstSecondOperandAndResultType diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d1c7cbe1e774..ceff861c38a3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -39,6 +39,17 @@ include "mlir/IR/SymbolInterfaces.td" class CIR_Op traits = []> : Op; +//===----------------------------------------------------------------------===// +// CIR Op Traits +//===----------------------------------------------------------------------===// + +def SameFirstOperandAndResultType : + NativeOpTrait<"SameFirstOperandAndResultType">; +def SameSecondOperandAndResultType : + NativeOpTrait<"SameSecondOperandAndResultType">; +def SameFirstSecondOperandAndResultType : + NativeOpTrait<"SameFirstSecondOperandAndResultType">; + //===----------------------------------------------------------------------===// // CastOp //===----------------------------------------------------------------------===// @@ -109,6 +120,7 @@ def CastOp : CIR_Op<"cast", [Pure]> { // The input and output types should match the cast kind. let hasVerifier = 1; + let hasFolder = 1; } //===----------------------------------------------------------------------===// @@ -183,9 +195,6 @@ def PtrDiffOp : CIR_Op<"ptr_diff", [Pure, SameTypeOperands]> { // PtrStrideOp //===----------------------------------------------------------------------===// -def SameFirstOperandAndResultType : - NativeOpTrait<"SameFirstOperandAndResultType">; - def PtrStrideOp : CIR_Op<"ptr_stride", [Pure, SameFirstOperandAndResultType]> { let summary = "Pointer access with stride"; @@ -2933,9 +2942,6 @@ def MemChrOp : CIR_Op<"libc.memchr"> { // StdFindOp //===----------------------------------------------------------------------===// -def SameFirstSecondOperandAndResultType : - NativeOpTrait<"SameFirstSecondOperandAndResultType">; - def StdFindOp : CIR_Op<"std.find", [SameFirstSecondOperandAndResultType]> { let arguments = (ins FlatSymbolRefAttr:$original_fn, CIR_AnyType:$first, @@ -3412,6 +3418,46 @@ def IsConstantOp : CIR_Op<"is_constant", [Pure]> { }]; } +//===----------------------------------------------------------------------===// +// Atomic operations +//===----------------------------------------------------------------------===// + +def MemOrderRelaxed : I32EnumAttrCase<"Relaxed", 0, "relaxed">; +def MemOrderConsume : I32EnumAttrCase<"Consume", 1, "consume">; +def MemOrderAcquire : I32EnumAttrCase<"Acquire", 2, "acquire">; +def MemOrderRelease : I32EnumAttrCase<"Release", 3, "release">; +def MemOrderAcqRel : I32EnumAttrCase<"AcquireRelease", 4, "acq_rel">; +def MemOrderSeqCst : I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">; + +def MemOrder : I32EnumAttr< + "MemOrder", + "Memory order according to C++11 memory model", + [MemOrderRelaxed, MemOrderConsume, MemOrderAcquire, + MemOrderRelease, MemOrderAcqRel, MemOrderSeqCst]> { + let cppNamespace = "::mlir::cir"; +} + +def AtomicAddFetch : CIR_Op<"atomic.add_fetch", + [Pure, SameSecondOperandAndResultType]> { + let summary = "Represents the __atomic_add_fetch builtin"; + let description = [{}]; + let results = (outs CIR_AnyIntOrFloat:$result); + let arguments = (ins IntOrFPPtr:$ptr, CIR_AnyIntOrFloat:$val, + Arg:$mem_order, + UnitAttr:$is_volatile); + + let assemblyFormat = [{ + `(` + $ptr `:` type($ptr) `,` + $val `:` type($val) `,` + $mem_order `)` + (`volatile` $is_volatile^)? + `:` type($result) attr-dict + }]; + + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // Operations Lowered Directly to LLVM IR // diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h index 889cde696e91..06851947f24c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h +++ b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h @@ -115,6 +115,18 @@ LLVM_ATTRIBUTE_UNUSED static bool isValidLinkage(GlobalLinkageKind L) { isLinkOnceLinkage(L); } +bool operator<(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; +bool operator>(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; +bool operator<=(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; +bool operator>=(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; + +// Validate an integral value which isn't known to fit within the enum's range +// is a valid AtomicOrderingCABI. +template inline bool isValidCIRAtomicOrderingCABI(Int I) { + return (Int)mlir::cir::MemOrder::Relaxed <= I && + I <= (Int)mlir::cir::MemOrder::SequentiallyConsistent; +} + } // namespace cir } // namespace mlir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 489afcff7d96..c67d3013ed7f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -169,6 +169,7 @@ def CIR_Double : CIR_FloatType<"Double", "double"> { // Constraints def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double]>; +def CIR_AnyIntOrFloat: AnyTypeOf<[CIR_AnyFloat, CIR_IntType]>; //===----------------------------------------------------------------------===// // PointerType @@ -373,6 +374,16 @@ def VoidPtr : Type< "mlir::cir::VoidType::get($_builder.getContext()))"> { } +// Pointer to int, float or double +def IntOrFPPtr : Type< + And<[ + CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"$_self.cast<::mlir::cir::PointerType>()" + ".getPointee().isa<::mlir::cir::IntType," + "::mlir::cir::SingleType, ::mlir::cir::DoubleType>()">, + ]>, "{int,void}*"> { +} + // Pointer to struct def StructPtr : Type< And<[ diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index e67f640a911f..99544623ad2b 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -79,6 +79,7 @@ class Address { /// Return address with different element type, but same pointer and /// alignment. Address withElementType(mlir::Type ElemTy) const { + // TODO(cir): hasOffset() check return Address(getPointer(), ElemTy, getAlignment(), isKnownNonNull()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 8f8ccd0e87a7..23ca168d9765 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -269,6 +269,10 @@ static Address buildValToTemp(CIRGenFunction &CGF, Expr *E) { } Address AtomicInfo::castToAtomicIntPointer(Address addr) const { + auto intTy = addr.getElementType().dyn_cast(); + // Don't bother with int casts if the integer size is the same. + if (intTy && intTy.getWidth() == AtomicSizeInBits) + return addr; auto ty = CGF.getBuilder().getUIntNTy(AtomicSizeInBits); return addr.withElementType(ty); } @@ -314,10 +318,12 @@ static mlir::cir::IntAttr getConstOpIntAttr(mlir::Value v) { static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value IsWeak, mlir::Value FailureOrder, - uint64_t Size, llvm::AtomicOrdering Order, + uint64_t Size, mlir::cir::MemOrder Order, uint8_t Scope) { assert(!UnimplementedFeature::syncScopeID()); + StringRef Op; [[maybe_unused]] bool PostOpMinMax = false; + auto loc = CGF.getLoc(E->getSourceRange()); switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: @@ -375,18 +381,19 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_add_fetch: case AtomicExpr::AO__scoped_atomic_add_fetch: - llvm_unreachable("NYI"); + // In LLVM codegen, the post operation codegen is tracked here. [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__hip_atomic_fetch_add: case AtomicExpr::AO__opencl_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__scoped_atomic_fetch_add: - llvm_unreachable("NYI"); + Op = mlir::cir::AtomicAddFetch::getOperationName(); break; case AtomicExpr::AO__atomic_sub_fetch: case AtomicExpr::AO__scoped_atomic_sub_fetch: + // In LLVM codegen, the post operation codegen is tracked here. llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_sub: @@ -423,6 +430,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_and_fetch: case AtomicExpr::AO__scoped_atomic_and_fetch: + // In LLVM codegen, the post operation codegen is tracked here. llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_and: @@ -435,6 +443,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_or_fetch: case AtomicExpr::AO__scoped_atomic_or_fetch: + // In LLVM codegen, the post operation codegen is tracked here. llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_or: @@ -447,6 +456,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_xor_fetch: case AtomicExpr::AO__scoped_atomic_xor_fetch: + // In LLVM codegen, the post operation codegen is tracked here. llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_xor: @@ -459,6 +469,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_nand_fetch: case AtomicExpr::AO__scoped_atomic_nand_fetch: + // In LLVM codegen, the post operation codegen is tracked here. llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_nand: @@ -467,13 +478,38 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, llvm_unreachable("NYI"); break; } - llvm_unreachable("NYI"); + + assert(Op.size() && "expected operation name to build"); + auto &builder = CGF.getBuilder(); + + auto LoadVal1 = builder.createLoad(loc, Val1); + + SmallVector atomicOperands = {Ptr.getPointer(), LoadVal1}; + SmallVector atomicResTys = { + Ptr.getPointer().getType().cast().getPointee()}; + auto orderAttr = mlir::cir::MemOrderAttr::get(builder.getContext(), Order); + auto RMWI = builder.create(loc, builder.getStringAttr(Op), atomicOperands, + atomicResTys, {}); + RMWI->setAttr("mem_order", orderAttr); + if (E->isVolatile()) + RMWI->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); + auto Result = RMWI->getResult(0); + + if (PostOpMinMax) + llvm_unreachable("NYI"); + + // This should be handled in LowerToLLVM.cpp, still tracking here for now. + if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch || + E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch) + llvm_unreachable("NYI"); + + builder.createStore(loc, Result, Dest); } static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value IsWeak, mlir::Value FailureOrder, - uint64_t Size, llvm::AtomicOrdering Order, + uint64_t Size, mlir::cir::MemOrder Order, mlir::Value Scope) { auto ScopeModel = Expr->getScopeModel(); @@ -1011,34 +1047,34 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // We should not ever get to a case where the ordering isn't a valid CABI // value, but it's hard to enforce that in general. auto ord = ordAttr.getUInt(); - if (llvm::isValidAtomicOrderingCABI(ord)) { - switch ((llvm::AtomicOrderingCABI)ord) { - case llvm::AtomicOrderingCABI::relaxed: + if (mlir::cir::isValidCIRAtomicOrderingCABI(ord)) { + switch ((mlir::cir::MemOrder)ord) { + case mlir::cir::MemOrder::Relaxed: buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Monotonic, Scope); + mlir::cir::MemOrder::Relaxed, Scope); break; - case llvm::AtomicOrderingCABI::consume: - case llvm::AtomicOrderingCABI::acquire: + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: if (IsStore) break; // Avoid crashing on code with undefined behavior buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Acquire, Scope); + mlir::cir::MemOrder::Acquire, Scope); break; - case llvm::AtomicOrderingCABI::release: + case mlir::cir::MemOrder::Release: if (IsLoad) break; // Avoid crashing on code with undefined behavior buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Release, Scope); + mlir::cir::MemOrder::Release, Scope); break; - case llvm::AtomicOrderingCABI::acq_rel: + case mlir::cir::MemOrder::AcquireRelease: if (IsLoad || IsStore) break; // Avoid crashing on code with undefined behavior buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::AcquireRelease, Scope); + mlir::cir::MemOrder::AcquireRelease, Scope); break; - case llvm::AtomicOrderingCABI::seq_cst: + case mlir::cir::MemOrder::SequentiallyConsistent: buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::SequentiallyConsistent, Scope); + mlir::cir::MemOrder::SequentiallyConsistent, Scope); break; } } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 2c2d0a3d0464..bedd83551990 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -742,8 +742,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } mlir::Value createLoad(mlir::Location loc, Address addr) { - return create(loc, addr.getElementType(), - addr.getPointer()); + auto ptrTy = addr.getPointer().getType().dyn_cast(); + return create( + loc, addr.getElementType(), + createElementBitCast(loc, addr, ptrTy.getPointee()).getPointer()); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index aa4364ffa169..9893d9b3c27a 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -511,6 +511,20 @@ LogicalResult CastOp::verify() { llvm_unreachable("Unknown CastOp kind?"); } +OpFoldResult CastOp::fold(FoldAdaptor adaptor) { + if (getKind() != mlir::cir::CastKind::integral) + return {}; + if (getSrc().getType() != getResult().getType()) + return {}; + // TODO: for sign differences, it's possible in certain conditions to + // create a new attributes that's capable or representing the source. + SmallVector foldResults; + auto foldOrder = getSrc().getDefiningOp()->fold(foldResults); + if (foldOrder.succeeded() && foldResults[0].is()) + return foldResults[0].get(); + return {}; +} + //===----------------------------------------------------------------------===// // VecCreateOp //===----------------------------------------------------------------------===// @@ -2373,6 +2387,21 @@ mlir::OpTrait::impl::verifySameFirstOperandAndResultType(Operation *op) { return success(); } +LogicalResult +mlir::OpTrait::impl::verifySameSecondOperandAndResultType(Operation *op) { + if (failed(verifyAtLeastNOperands(op, 2)) || failed(verifyOneResult(op))) + return failure(); + + auto type = op->getResult(0).getType(); + auto opType = op->getOperand(1).getType(); + + if (type != opType) + return op->emitOpError() + << "requires the same type for first operand and result"; + + return success(); +} + LogicalResult mlir::OpTrait::impl::verifySameFirstSecondOperandAndResultType(Operation *op) { if (failed(verifyAtLeastNOperands(op, 3)) || failed(verifyOneResult(op))) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5fceaa693729..29192aea5e69 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2406,6 +2406,65 @@ class CIRBitPopcountOpLowering } }; +class CIRAtomicFetchLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LLVM::AtomicOrdering + getLLVMAtomicOrder(mlir::cir::MemOrder memo) const { + switch (memo) { + case mlir::cir::MemOrder::Relaxed: + return mlir::LLVM::AtomicOrdering::monotonic; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + return mlir::LLVM::AtomicOrdering::acquire; + case mlir::cir::MemOrder::Release: + return mlir::LLVM::AtomicOrdering::release; + case mlir::cir::MemOrder::AcquireRelease: + return mlir::LLVM::AtomicOrdering::acq_rel; + case mlir::cir::MemOrder::SequentiallyConsistent: + return mlir::LLVM::AtomicOrdering::seq_cst; + } + llvm_unreachable("shouldn't get here"); + } + + mlir::LogicalResult buildPostOp(mlir::cir::AtomicAddFetch op, + OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::Value rmwVal) const { + if (op.getVal().getType().isa()) + rewriter.replaceOpWithNewOp(op, rmwVal, + adaptor.getVal()); + else if (op.getVal() + .getType() + .isa()) + rewriter.replaceOpWithNewOp(op, rmwVal, + adaptor.getVal()); + else + return op.emitError() << "Unsupported type"; + return mlir::success(); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AtomicAddFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); + + // FIXME: add syncscope. + auto rmwVal = rewriter.create( + op.getLoc(), mlir::LLVM::AtomicBinOp::add, adaptor.getPtr(), + adaptor.getVal(), llvmOrder); + + // FIXME: Make the rewrite generic and expand this to more opcodes. + bool hasPostOp = isa(op); + + if (hasPostOp) + return buildPostOp(op, adaptor, rewriter, rmwVal.getRes()); + return mlir::success(); + } +}; + class CIRBrOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -2845,13 +2904,13 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add< CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, - CIRBitPopcountOpLowering, CIRLoopOpInterfaceLowering, CIRBrCondOpLowering, - CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, - CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, - CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, - CIRScopeOpLowering, CIRCastOpLowering, CIRIfLowering, CIRGlobalOpLowering, - CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, - CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, + CIRBitPopcountOpLowering, CIRAtomicFetchLowering, + CIRLoopOpInterfaceLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, + CIRCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, + CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, + CIRFuncLowering, CIRScopeOpLowering, CIRCastOpLowering, CIRIfLowering, + CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, + CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index cd6852beccd7..ff79be74e0d3 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s typedef struct _a { _Atomic(int) d; @@ -7,4 +9,27 @@ typedef struct _a { void m() { at y; } -// CHECK: !ty_22_a22 = !cir.struct \ No newline at end of file +// CHECK: ![[A:.*]] = !cir.struct + +enum memory_order { + memory_order_relaxed, memory_order_consume, memory_order_acquire, + memory_order_release, memory_order_acq_rel, memory_order_seq_cst +}; + +int fi3b(int *i) { + return __atomic_add_fetch(i, 1, memory_order_seq_cst); +} + +// CHECK: cir.func @_Z4fi3bPi +// CHECK: %[[ARGI:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["i", init] {alignment = 8 : i64} +// CHECK: %[[ONE_ADDR:.*]] = cir.alloca !s32i, cir.ptr , [".atomictmp"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %[[ARGI]] : !cir.ptr, cir.ptr > +// CHECK: %[[I:.*]] = cir.load %[[ARGI]] : cir.ptr >, !cir.ptr +// CHECK: %[[ONE:.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: cir.store %[[ONE]], %[[ONE_ADDR]] : !s32i, cir.ptr +// CHECK: %[[VAL:.*]] = cir.load %[[ONE_ADDR]] : cir.ptr , !s32i +// CHECK: cir.atomic.add_fetch(%[[I]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) : !s32i + +// LLVM: define i32 @_Z4fi3bPi +// LLVM: %[[RMW:.*]] = atomicrmw add ptr {{.*}}, i32 %[[VAL:.*]] seq_cst, align 4 +// LLVM: add i32 %[[RMW]], %[[VAL]] \ No newline at end of file From bdbf9116702e28fbb7c7451210fc5b58fd861231 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Sat, 6 Apr 2024 09:23:40 +0800 Subject: [PATCH 1466/2301] [CIR] Add support for byteswap intrinsic (#523) This PR adds support for the following intrinsic functions: - `__builtin_bswap{16, 32, 64}` - `_byteswap_{ushort, ulong, uint64}` This PR adds a new `cir.bswap` operation to represent such an intrinsic call. CIRGen and LLVMIR lowering for the new operation is included in this PR. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 34 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 11 ++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 28 ++++++++++++++- clang/test/CIR/CodeGen/bswap.cpp | 30 ++++++++++++++++ clang/test/CIR/Lowering/bswap.cir | 19 +++++++++++ 5 files changed, 121 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/bswap.cpp create mode 100644 clang/test/CIR/Lowering/bswap.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index ceff861c38a3..4178115be236 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1156,6 +1156,40 @@ def BitPopcountOp : CIR_BitOp<"bit.popcount", UIntOfWidths<[16, 32, 64]>> { }]; } +//===----------------------------------------------------------------------===// +// ByteswapOp +//===----------------------------------------------------------------------===// + +def ByteswapOp : CIR_Op<"bswap", [Pure, SameOperandsAndResultType]> { + let summary = "Reverse the bytes that constitute the operand integer"; + let description = [{ + The `cir.bswap` operation takes an integer as operand, and returns it with + the order of bytes that constitute the operand reversed. + + The operand integer must be an unsigned integer. Its widths must be either + 16, 32, or 64. + + Example: + + ```mlir + !u32i = !cir.int + + // %0 = 0x12345678 + %0 = cir.const(#cir.int<305419896> : !u32i) : !u32i + + // %1 should be 0x78563412 + %1 = cir.bswap(%0 : !u32i) : !u32i + ``` + }]; + + let results = (outs CIR_IntType:$result); + let arguments = (ins UIntOfWidths<[16, 32, 64]>:$input); + + let assemblyFormat = [{ + `(` $input `:` type($input) `)` `:` type($result) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // CmpThreeWayOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 03f8093616ba..bcd8ab8fe5f3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -593,6 +593,17 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_popcountll: return buildBuiltinBitOp(*this, E, std::nullopt); + case Builtin::BI__builtin_bswap16: + case Builtin::BI__builtin_bswap32: + case Builtin::BI__builtin_bswap64: + case Builtin::BI_byteswap_ushort: + case Builtin::BI_byteswap_ulong: + case Builtin::BI_byteswap_uint64: { + auto arg = buildScalarExpr(E->getArg(0)); + return RValue::get(builder.create( + getLoc(E->getSourceRange()), arg)); + } + case Builtin::BI__builtin_constant_p: { mlir::Type ResultType = ConvertType(E->getType()); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 29192aea5e69..e60476bddcc1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2465,6 +2465,32 @@ class CIRAtomicFetchLowering } }; +class CIRByteswapOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ByteswapOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Note that LLVM intrinsic calls to @llvm.bswap.i* have the same type as + // the operand. + + auto resTy = + getTypeConverter()->convertType(op.getType()).cast(); + + std::string llvmIntrinName = "llvm.bswap.i"; + llvmIntrinName.append(std::to_string(resTy.getWidth())); + auto llvmIntrinNameAttr = + mlir::StringAttr::get(rewriter.getContext(), llvmIntrinName); + + rewriter.replaceOpWithNewOp( + op, resTy, llvmIntrinNameAttr, adaptor.getInput()); + + return mlir::LogicalResult::success(); + } +}; + class CIRBrOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -2904,7 +2930,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add< CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, - CIRBitPopcountOpLowering, CIRAtomicFetchLowering, + CIRBitPopcountOpLowering, CIRAtomicFetchLowering, CIRByteswapOpLowering, CIRLoopOpInterfaceLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, diff --git a/clang/test/CIR/CodeGen/bswap.cpp b/clang/test/CIR/CodeGen/bswap.cpp new file mode 100644 index 000000000000..66a6ccf3ffec --- /dev/null +++ b/clang/test/CIR/CodeGen/bswap.cpp @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +using u16 = unsigned short; +using u32 = unsigned int; +using u64 = unsigned long long; + +u16 bswap_u16(u16 x) { + return __builtin_bswap16(x); +} + +// CHECK: cir.func @_Z9bswap_u16t +// CHECK: %{{.+}} = cir.bswap(%{{.+}} : !u16i) : !u16i +// CHECK: } + +u32 bswap_u32(u32 x) { + return __builtin_bswap32(x); +} + +// CHECK: cir.func @_Z9bswap_u32j +// CHECK: %{{.+}} = cir.bswap(%{{.+}} : !u32i) : !u32i +// CHECK: } + +u64 bswap_u64(u64 x) { + return __builtin_bswap64(x); +} + +// CHECK: cir.func @_Z9bswap_u64y +// CHECK: %{{.+}} = cir.bswap(%{{.+}} : !u64i) : !u64i +// CHECK: } diff --git a/clang/test/CIR/Lowering/bswap.cir b/clang/test/CIR/Lowering/bswap.cir new file mode 100644 index 000000000000..7e778820a131 --- /dev/null +++ b/clang/test/CIR/Lowering/bswap.cir @@ -0,0 +1,19 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!u32i = !cir.int + +cir.func @test(%arg0: !u32i) -> !u32i { + %0 = cir.bswap(%arg0 : !u32i) : !u32i + cir.return %0 : !u32i +} + +// MLIR: llvm.func @test(%arg0: i32) -> i32 +// MLIR-NEXT: %0 = llvm.call_intrinsic "llvm.bswap.i32"(%arg0) : (i32) -> i32 +// MLIR-NEXT: llvm.return %0 : i32 +// MLIR-NEXT: } + +// LLVM: define i32 @test(i32 %0) +// LLVM-NEXT: %2 = call i32 @llvm.bswap.i32(i32 %0) +// LLVM-NEXT: ret i32 %2 +// LLVM-NEXT: } From eb30d9d0ea89b1aaa6d5c307b361b196392c4198 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Fri, 5 Apr 2024 18:26:14 -0700 Subject: [PATCH 1467/2301] [CIR] GNU vector type cleanup (#531) This is the final commit for issue #284. Vector types other than GNU vector types will be covered by other yet-to-be-created issues. Now that GNU vector types (the ones defined via the vector_size attribute) are implemented, do a final cleanup of the assertions and other checks related to vector types. Remove `UnimplementedFeature::cirVectorType()`. Deal with the remaining calls to that function. When the that is not yet implemented has to do with Arm SVE vectors, the assert was changed to `UnimplementedFeature::scalableVectors()` instead. The assertion was removed in cases where the code correctly handle GNU vector types. While cleaning up the assertion checks, I noticed that BinOp handling of vector types wasn't quite complete. Any special handling for integer or floating-point types wasn't happening when the operands were vector types. To fix this, split `BinOpInfo::Ty` into two fields, `FullType` and `CompType`. `FullType` is the type of the operands. `CompType` is normally the same as `FullType`, but is the element type when `FullType` is a vector type. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 94 +++++++++---------- .../CodeGen/UnimplementedFeatureGuarding.h | 5 +- 2 files changed, 48 insertions(+), 51 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index e6093c52ca84..22c8ffdf96ff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -37,7 +37,9 @@ struct BinOpInfo { mlir::Value LHS; mlir::Value RHS; SourceRange Loc; - QualType Ty; // Computation Type. + QualType FullType; // Type of operands and result + QualType CompType; // Type used for computations. Element type + // for vectors, otherwise same as FullType. BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform FPOptions FPFeatures; const Expr *E; // Entire expr, for error unsupported. May not be binop. @@ -749,7 +751,11 @@ class ScalarExprEmitter : public StmtVisitor { BinOpInfo Result; Result.LHS = Visit(E->getLHS()); Result.RHS = Visit(E->getRHS()); - Result.Ty = E->getType(); + Result.FullType = E->getType(); + Result.CompType = E->getType(); + if (auto VecType = dyn_cast_or_null(E->getType())) { + Result.CompType = VecType->getElementType(); + } Result.Opcode = E->getOpcode(); Result.Loc = E->getSourceRange(); // TODO: Result.FPFeatures @@ -850,7 +856,7 @@ class ScalarExprEmitter : public StmtVisitor { // a vector. mlir::cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); return Builder.create( - CGF.getLoc(BOInfo.Loc), CGF.getCIRType(BOInfo.Ty), Kind, + CGF.getLoc(BOInfo.Loc), CGF.getCIRType(BOInfo.FullType), Kind, BOInfo.LHS, BOInfo.RHS); } } @@ -869,15 +875,9 @@ class ScalarExprEmitter : public StmtVisitor { mlir::cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); return Builder.create(CGF.getLoc(BOInfo.Loc), - CGF.getCIRType(BOInfo.Ty), Kind, - BOInfo.LHS, BOInfo.RHS); + CGF.getCIRType(BOInfo.FullType), + Kind, BOInfo.LHS, BOInfo.RHS); } - - // If this is a vector comparison, sign extend the result to the - // appropriate vector integer type and return it (don't convert to - // bool). - if (LHSTy->isVectorType()) - assert(0 && "not implemented"); } else { // Complex Comparison: can only be an equality comparison. assert(0 && "not implemented"); } @@ -994,10 +994,7 @@ class ScalarExprEmitter : public StmtVisitor { assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && "Internal error: conversion between matrix type and scalar type"); - // TODO(CIR): Support VectorTypes - assert(!UnimplementedFeature::cirVectorType() && "NYI: vector cast"); - - // Finally, we have the arithmetic types: real int/float. + // Finally, we have the arithmetic types or vectors of arithmetic types. mlir::Value Res = nullptr; mlir::Type ResTy = DstTy; @@ -1214,18 +1211,18 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Mul, - Ops.LHS, Ops.RHS); + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildDiv(const BinOpInfo &Ops) { return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Div, - Ops.LHS, Ops.RHS); + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Div, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildRem(const BinOpInfo &Ops) { return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Rem, - Ops.LHS, Ops.RHS); + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { @@ -1234,25 +1231,25 @@ mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/false); return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Add, - Ops.LHS, Ops.RHS); + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Add, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { // The LHS is always a pointer if either side is. if (!Ops.LHS.getType().isa()) { - if (Ops.Ty->isSignedIntegerOrEnumerationType()) { + if (Ops.CompType->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: { llvm_unreachable("NYI"); return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); } case LangOptions::SOB_Undefined: if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); [[fallthrough]]; case LangOptions::SOB_Trapping: @@ -1262,17 +1259,16 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { } } - if (Ops.Ty->isConstantMatrixType()) { + if (Ops.FullType->isConstantMatrixType()) { llvm_unreachable("NYI"); } - if (Ops.Ty->isUnsignedIntegerType() && + if (Ops.CompType->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && !CanElideOverflowCheck(CGF.getContext(), Ops)) llvm_unreachable("NYI"); - assert(!UnimplementedFeature::cirVectorType()); - if (Ops.LHS.getType().isa()) { + if (Ops.CompType->isFloatingType()) { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); return Builder.createFSub(Ops.LHS, Ops.RHS); } @@ -1281,8 +1277,8 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { llvm_unreachable("NYI"); return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Sub, - Ops.LHS, Ops.RHS); + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); } // If the RHS is not a pointer, then we have normal pointer @@ -1313,12 +1309,12 @@ mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { // promote or truncate the RHS to the same size as the LHS. bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && - Ops.Ty->hasSignedIntegerRepresentation() && + Ops.CompType->hasSignedIntegerRepresentation() && !CGF.getLangOpts().isSignedOverflowDefined() && !CGF.getLangOpts().CPlusPlus20; bool SanitizeUnsignedBase = CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) && - Ops.Ty->hasUnsignedIntegerRepresentation(); + Ops.CompType->hasUnsignedIntegerRepresentation(); bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase; bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); @@ -1331,7 +1327,7 @@ mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { } return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), Ops.LHS, Ops.RHS, + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), Ops.LHS, Ops.RHS, CGF.getBuilder().getUnitAttr()); } @@ -1355,23 +1351,23 @@ mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { // Note that we don't need to distinguish unsigned treatment at this // point since it will be handled later by LLVM lowering. return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), Ops.LHS, Ops.RHS); + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildAnd(const BinOpInfo &Ops) { return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::And, - Ops.LHS, Ops.RHS); + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::And, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildXor(const BinOpInfo &Ops) { return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Xor, - Ops.LHS, Ops.RHS); + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildOr(const BinOpInfo &Ops) { return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.Ty), mlir::cir::BinOpKind::Or, - Ops.LHS, Ops.RHS); + CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), + mlir::cir::BinOpKind::Or, Ops.LHS, Ops.RHS); } // Emit code for an explicit or implicit cast. Implicit @@ -1410,7 +1406,6 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { auto Src = Visit(const_cast(E)); mlir::Type DstTy = CGF.convertType(DestTy); - assert(!UnimplementedFeature::cirVectorType()); assert(!UnimplementedFeature::addressSpace()); if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { llvm_unreachable("NYI"); @@ -1426,12 +1421,12 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // If Src is a fixed vector and Dst is a scalable vector, and both have the // same element type, use the llvm.vector.insert intrinsic to perform the // bitcast. - assert(!UnimplementedFeature::cirVectorType()); + assert(!UnimplementedFeature::scalableVectors()); // If Src is a scalable vector and Dst is a fixed vector, and both have the // same element type, use the llvm.vector.extract intrinsic to perform the // bitcast. - assert(!UnimplementedFeature::cirVectorType()); + assert(!UnimplementedFeature::scalableVectors()); // Perform VLAT <-> VLST bitcast through memory. // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics @@ -1439,7 +1434,8 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // need to keep this around for bitcasts between VLAT <-> VLST where // the element types of the vectors are not the same, until we figure // out a better way of doing these casts. - assert(!UnimplementedFeature::cirVectorType()); + assert(!UnimplementedFeature::scalableVectors()); + return CGF.getBuilder().createBitcast(CGF.getLoc(E->getSourceRange()), Src, DstTy); } @@ -1881,7 +1877,11 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( // Emit the RHS first. __block variables need to have the rhs evaluated // first, plus this should improve codegen a little. OpInfo.RHS = Visit(E->getRHS()); - OpInfo.Ty = E->getComputationResultType(); + OpInfo.FullType = E->getComputationResultType(); + OpInfo.CompType = OpInfo.FullType; + if (auto VecType = dyn_cast_or_null(OpInfo.FullType)) { + OpInfo.CompType = VecType->getElementType(); + } OpInfo.Opcode = E->getOpcode(); OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); OpInfo.E = E; diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 06bd8201834c..1a8d1328f90c 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -23,10 +23,7 @@ struct UnimplementedFeature { static bool tbaa() { return false; } static bool cleanups() { return false; } - // cir::VectorType is in progress, so cirVectorType() will go away soon. - // Start adding feature flags for more advanced vector types and operations - // that will take longer to implement. - static bool cirVectorType() { return false; } + // GNU vectors are done, but other kinds of vectors haven't been implemented. static bool scalableVectors() { return false; } static bool vectorConstants() { return false; } From 59f423d5d3bd24311b22cd7d42ff097570b32960 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 8 Apr 2024 11:19:48 -0700 Subject: [PATCH 1468/2301] [CIR][CIRGen] Add support for other __atomic_{binop}_fetch --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 25 ++++- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 42 ++++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 106 ++++++++++++++---- clang/test/CIR/CodeGen/atomic.cpp | 59 +++++++++- 4 files changed, 183 insertions(+), 49 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 4178115be236..e43052523bb2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3456,6 +3456,7 @@ def IsConstantOp : CIR_Op<"is_constant", [Pure]> { // Atomic operations //===----------------------------------------------------------------------===// +// Memory order related definitions. def MemOrderRelaxed : I32EnumAttrCase<"Relaxed", 0, "relaxed">; def MemOrderConsume : I32EnumAttrCase<"Consume", 1, "consume">; def MemOrderAcquire : I32EnumAttrCase<"Acquire", 2, "acquire">; @@ -3471,17 +3472,35 @@ def MemOrder : I32EnumAttr< let cppNamespace = "::mlir::cir"; } -def AtomicAddFetch : CIR_Op<"atomic.add_fetch", - [Pure, SameSecondOperandAndResultType]> { - let summary = "Represents the __atomic_add_fetch builtin"; +// Binary opcodes for atomic fetch. +def Atomic_Add : I32EnumAttrCase<"Add", 0, "add">; +def Atomic_Sub : I32EnumAttrCase<"Sub", 1, "sub">; +def Atomic_And : I32EnumAttrCase<"And", 2, "and">; +def Atomic_Xor : I32EnumAttrCase<"Xor", 3, "xor">; +def Atomic_Or : I32EnumAttrCase<"Or", 4, "or">; +def Atomic_Nand : I32EnumAttrCase<"Nand", 5, "nand">; + +def AtomicFetchKind : I32EnumAttr< + "AtomicFetchKind", + "Binary opcode for cir.atomic.fetch.binop and cir.atomic.binop.fetch", + [Atomic_Add, Atomic_Sub, Atomic_And, + Atomic_Xor, Atomic_Or, Atomic_Nand]> { + let cppNamespace = "::mlir::cir"; +} + +def AtomicBinopFetch : CIR_Op<"atomic.binop_fetch", + [Pure, SameSecondOperandAndResultType]> { + let summary = "Represents the __atomic_binop_fetch builtin"; let description = [{}]; let results = (outs CIR_AnyIntOrFloat:$result); let arguments = (ins IntOrFPPtr:$ptr, CIR_AnyIntOrFloat:$val, + AtomicFetchKind:$binop, Arg:$mem_order, UnitAttr:$is_volatile); let assemblyFormat = [{ `(` + $binop `,` $ptr `:` type($ptr) `,` $val `:` type($val) `,` $mem_order `)` diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 23ca168d9765..5a1ba29227e8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -323,7 +323,10 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, assert(!UnimplementedFeature::syncScopeID()); StringRef Op; [[maybe_unused]] bool PostOpMinMax = false; + + auto &builder = CGF.getBuilder(); auto loc = CGF.getLoc(E->getSourceRange()); + mlir::cir::AtomicFetchKindAttr fetchAttr; switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: @@ -388,20 +391,23 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__opencl_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__scoped_atomic_fetch_add: - Op = mlir::cir::AtomicAddFetch::getOperationName(); + Op = mlir::cir::AtomicBinopFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Add); break; case AtomicExpr::AO__atomic_sub_fetch: case AtomicExpr::AO__scoped_atomic_sub_fetch: // In LLVM codegen, the post operation codegen is tracked here. - llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__hip_atomic_fetch_sub: case AtomicExpr::AO__opencl_atomic_fetch_sub: case AtomicExpr::AO__atomic_fetch_sub: case AtomicExpr::AO__scoped_atomic_fetch_sub: - llvm_unreachable("NYI"); + Op = mlir::cir::AtomicBinopFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Sub); break; case AtomicExpr::AO__atomic_min_fetch: @@ -431,57 +437,59 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_and_fetch: case AtomicExpr::AO__scoped_atomic_and_fetch: // In LLVM codegen, the post operation codegen is tracked here. - llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__hip_atomic_fetch_and: case AtomicExpr::AO__opencl_atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_and: case AtomicExpr::AO__scoped_atomic_fetch_and: - llvm_unreachable("NYI"); + Op = mlir::cir::AtomicBinopFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::And); break; case AtomicExpr::AO__atomic_or_fetch: case AtomicExpr::AO__scoped_atomic_or_fetch: // In LLVM codegen, the post operation codegen is tracked here. - llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__hip_atomic_fetch_or: case AtomicExpr::AO__opencl_atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_or: case AtomicExpr::AO__scoped_atomic_fetch_or: - llvm_unreachable("NYI"); + Op = mlir::cir::AtomicBinopFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Or); break; case AtomicExpr::AO__atomic_xor_fetch: case AtomicExpr::AO__scoped_atomic_xor_fetch: // In LLVM codegen, the post operation codegen is tracked here. - llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__hip_atomic_fetch_xor: case AtomicExpr::AO__opencl_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_xor: case AtomicExpr::AO__scoped_atomic_fetch_xor: - llvm_unreachable("NYI"); + Op = mlir::cir::AtomicBinopFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Xor); break; case AtomicExpr::AO__atomic_nand_fetch: case AtomicExpr::AO__scoped_atomic_nand_fetch: // In LLVM codegen, the post operation codegen is tracked here. - llvm_unreachable("NYI"); [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__atomic_fetch_nand: case AtomicExpr::AO__scoped_atomic_fetch_nand: - llvm_unreachable("NYI"); + Op = mlir::cir::AtomicBinopFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Nand); break; } assert(Op.size() && "expected operation name to build"); - auto &builder = CGF.getBuilder(); - auto LoadVal1 = builder.createLoad(loc, Val1); SmallVector atomicOperands = {Ptr.getPointer(), LoadVal1}; @@ -490,6 +498,9 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, auto orderAttr = mlir::cir::MemOrderAttr::get(builder.getContext(), Order); auto RMWI = builder.create(loc, builder.getStringAttr(Op), atomicOperands, atomicResTys, {}); + + if (fetchAttr) + RMWI->setAttr("binop", fetchAttr); RMWI->setAttr("mem_order", orderAttr); if (E->isVolatile()) RMWI->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); @@ -498,11 +509,6 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, if (PostOpMinMax) llvm_unreachable("NYI"); - // This should be handled in LowerToLLVM.cpp, still tracking here for now. - if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch || - E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch) - llvm_unreachable("NYI"); - builder.createStore(loc, Result, Dest); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e60476bddcc1..b30b59548049 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2407,9 +2407,9 @@ class CIRBitPopcountOpLowering }; class CIRAtomicFetchLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LLVM::AtomicOrdering getLLVMAtomicOrder(mlir::cir::MemOrder memo) const { @@ -2429,38 +2429,98 @@ class CIRAtomicFetchLowering llvm_unreachable("shouldn't get here"); } - mlir::LogicalResult buildPostOp(mlir::cir::AtomicAddFetch op, - OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter, - mlir::Value rmwVal) const { - if (op.getVal().getType().isa()) - rewriter.replaceOpWithNewOp(op, rmwVal, - adaptor.getVal()); - else if (op.getVal() - .getType() - .isa()) - rewriter.replaceOpWithNewOp(op, rmwVal, - adaptor.getVal()); - else - return op.emitError() << "Unsupported type"; - return mlir::success(); + mlir::Value buildPostOp(mlir::cir::AtomicBinopFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::Value rmwVal, bool isInt) const { + SmallVector atomicOperands = {rmwVal, adaptor.getVal()}; + SmallVector atomicResTys = {rmwVal.getType()}; + return rewriter + .create(op.getLoc(), + rewriter.getStringAttr(getLLVMBinop(op.getBinop(), isInt)), + atomicOperands, atomicResTys, {}) + ->getResult(0); + } + + llvm::StringLiteral getLLVMBinop(mlir::cir::AtomicFetchKind k, + bool isInt) const { + switch (k) { + case mlir::cir::AtomicFetchKind::Add: + return isInt ? mlir::LLVM::AddOp::getOperationName() + : mlir::LLVM::FAddOp::getOperationName(); + case mlir::cir::AtomicFetchKind::Sub: + return isInt ? mlir::LLVM::SubOp::getOperationName() + : mlir::LLVM::FSubOp::getOperationName(); + case mlir::cir::AtomicFetchKind::And: + return mlir::LLVM::AndOp::getOperationName(); + case mlir::cir::AtomicFetchKind::Xor: + return mlir::LLVM::XOrOp::getOperationName(); + case mlir::cir::AtomicFetchKind::Or: + return mlir::LLVM::OrOp::getOperationName(); + case mlir::cir::AtomicFetchKind::Nand: + // There's no nand binop in LLVM, this is later fixed with a not. + return mlir::LLVM::AndOp::getOperationName(); + } + llvm_unreachable("Unknown atomic fetch opcode"); + } + + mlir::LLVM::AtomicBinOp getLLVMAtomicBinOp(mlir::cir::AtomicFetchKind k, + bool isInt) const { + switch (k) { + case mlir::cir::AtomicFetchKind::Add: + return isInt ? mlir::LLVM::AtomicBinOp::add + : mlir::LLVM::AtomicBinOp::fadd; + case mlir::cir::AtomicFetchKind::Sub: + return isInt ? mlir::LLVM::AtomicBinOp::sub + : mlir::LLVM::AtomicBinOp::fsub; + case mlir::cir::AtomicFetchKind::And: + return mlir::LLVM::AtomicBinOp::_and; + case mlir::cir::AtomicFetchKind::Xor: + return mlir::LLVM::AtomicBinOp::_xor; + case mlir::cir::AtomicFetchKind::Or: + return mlir::LLVM::AtomicBinOp::_or; + case mlir::cir::AtomicFetchKind::Nand: + return mlir::LLVM::AtomicBinOp::nand; + } + llvm_unreachable("Unknown atomic fetch opcode"); } mlir::LogicalResult - matchAndRewrite(mlir::cir::AtomicAddFetch op, OpAdaptor adaptor, + matchAndRewrite(mlir::cir::AtomicBinopFetch op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); + + bool isInt; // otherwise it's float. + if (op.getVal().getType().isa()) + isInt = true; + else if (op.getVal() + .getType() + .isa()) + isInt = false; + else { + return op.emitError() + << "Unsupported type: " << adaptor.getVal().getType(); + } // FIXME: add syncscope. + auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); + auto llvmBinOpc = getLLVMAtomicBinOp(op.getBinop(), isInt); auto rmwVal = rewriter.create( - op.getLoc(), mlir::LLVM::AtomicBinOp::add, adaptor.getPtr(), - adaptor.getVal(), llvmOrder); + op.getLoc(), llvmBinOpc, adaptor.getPtr(), adaptor.getVal(), llvmOrder); // FIXME: Make the rewrite generic and expand this to more opcodes. - bool hasPostOp = isa(op); + bool hasPostOp = isa(op); + mlir::Value result = rmwVal.getRes(); if (hasPostOp) - return buildPostOp(op, adaptor, rewriter, rmwVal.getRes()); + result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt); + + // Compensate lack of nand binop in LLVM IR. + if (op.getBinop() == mlir::cir::AtomicFetchKind::Nand) { + auto negOne = rewriter.create( + op.getLoc(), result.getType(), -1); + result = rewriter.create(op.getLoc(), result, negOne); + } + + rewriter.replaceOp(op, result); return mlir::success(); } }; diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index ff79be74e0d3..4ef73e6d6ce9 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -16,11 +16,11 @@ enum memory_order { memory_order_release, memory_order_acq_rel, memory_order_seq_cst }; -int fi3b(int *i) { +int basic_binop_fetch(int *i) { return __atomic_add_fetch(i, 1, memory_order_seq_cst); } -// CHECK: cir.func @_Z4fi3bPi +// CHECK: cir.func @_Z17basic_binop_fetchPi // CHECK: %[[ARGI:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["i", init] {alignment = 8 : i64} // CHECK: %[[ONE_ADDR:.*]] = cir.alloca !s32i, cir.ptr , [".atomictmp"] {alignment = 4 : i64} // CHECK: cir.store %arg0, %[[ARGI]] : !cir.ptr, cir.ptr > @@ -28,8 +28,57 @@ int fi3b(int *i) { // CHECK: %[[ONE:.*]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: cir.store %[[ONE]], %[[ONE_ADDR]] : !s32i, cir.ptr // CHECK: %[[VAL:.*]] = cir.load %[[ONE_ADDR]] : cir.ptr , !s32i -// CHECK: cir.atomic.add_fetch(%[[I]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) : !s32i +// CHECK: cir.atomic.binop_fetch(add, %[[I]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) : !s32i -// LLVM: define i32 @_Z4fi3bPi +// LLVM: define i32 @_Z17basic_binop_fetchPi // LLVM: %[[RMW:.*]] = atomicrmw add ptr {{.*}}, i32 %[[VAL:.*]] seq_cst, align 4 -// LLVM: add i32 %[[RMW]], %[[VAL]] \ No newline at end of file +// LLVM: add i32 %[[RMW]], %[[VAL]] + +int other_binop_fetch(int *i) { + __atomic_sub_fetch(i, 1, memory_order_relaxed); + __atomic_and_fetch(i, 1, memory_order_consume); + __atomic_or_fetch(i, 1, memory_order_acquire); + return __atomic_xor_fetch(i, 1, memory_order_release); +} + +// CHECK: cir.func @_Z17other_binop_fetchPi +// CHECK: cir.atomic.binop_fetch(sub, {{.*}}, relaxed +// CHECK: cir.atomic.binop_fetch(and, {{.*}}, acquire +// CHECK: cir.atomic.binop_fetch(or, {{.*}}, acquire +// CHECK: cir.atomic.binop_fetch(xor, {{.*}}, release + +// LLVM: define i32 @_Z17other_binop_fetchPi +// LLVM: %[[RMW_SUB:.*]] = atomicrmw sub ptr {{.*}} monotonic +// LLVM: sub i32 %[[RMW_SUB]], {{.*}} +// LLVM: %[[RMW_AND:.*]] = atomicrmw and ptr {{.*}} acquire +// LLVM: and i32 %[[RMW_AND]], {{.*}} +// LLVM: %[[RMW_OR:.*]] = atomicrmw or ptr {{.*}} acquire +// LLVM: or i32 %[[RMW_OR]], {{.*}} +// LLVM: %[[RMW_XOR:.*]] = atomicrmw xor ptr {{.*}} release +// LLVM: xor i32 %[[RMW_XOR]], {{.*}} + +int nand_binop_fetch(int *i) { + return __atomic_nand_fetch(i, 1, memory_order_acq_rel); +} + +// CHECK: cir.func @_Z16nand_binop_fetchPi +// CHECK: cir.atomic.binop_fetch(nand, {{.*}}, acq_rel + +// LLVM: define i32 @_Z16nand_binop_fetchPi +// LLVM: %[[RMW_NAND:.*]] = atomicrmw nand ptr {{.*}} acq_rel +// LLVM: %[[AND:.*]] = and i32 %[[RMW_NAND]] +// LLVM: = xor i32 %[[AND]], -1 + +int fp_binop_fetch(float *i) { + __atomic_add_fetch(i, 1, memory_order_seq_cst); + return __atomic_sub_fetch(i, 1, memory_order_seq_cst); +} + +// CHECK: cir.func @_Z14fp_binop_fetchPf +// CHECK: cir.atomic.binop_fetch(add, +// CHECK: cir.atomic.binop_fetch(sub, + +// LLVM: %[[RMW_FADD:.*]] = atomicrmw fadd ptr +// LLVM: fadd float %[[RMW_FADD]] +// LLVM: %[[RMW_FSUB:.*]] = atomicrmw fsub ptr +// LLVM: fsub float %[[RMW_FSUB]] \ No newline at end of file From 1c3adb94e5828b469a16b9a817450462ead1ed84 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 8 Apr 2024 14:09:16 -0700 Subject: [PATCH 1469/2301] [CIR][CIRGen] Add a verifier to catch binop_fetch's that do not support fp --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 ++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 15 +++++++++++++++ clang/test/CIR/IR/invalid.cir | 8 ++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e43052523bb2..ed02d28367ed 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3482,7 +3482,7 @@ def Atomic_Nand : I32EnumAttrCase<"Nand", 5, "nand">; def AtomicFetchKind : I32EnumAttr< "AtomicFetchKind", - "Binary opcode for cir.atomic.fetch.binop and cir.atomic.binop.fetch", + "Binary opcode for atomic fetch operations", [Atomic_Add, Atomic_Sub, Atomic_And, Atomic_Xor, Atomic_Or, Atomic_Nand]> { let cppNamespace = "::mlir::cir"; @@ -3508,7 +3508,7 @@ def AtomicBinopFetch : CIR_Op<"atomic.binop_fetch", `:` type($result) attr-dict }]; - let hasVerifier = 0; + let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9893d9b3c27a..7406d9369e13 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2703,6 +2703,21 @@ LogicalResult GetRuntimeMemberOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// Atomic Definitions +//===----------------------------------------------------------------------===// + +LogicalResult AtomicBinopFetch::verify() { + if (getBinop() == mlir::cir::AtomicFetchKind::Add || + getBinop() == mlir::cir::AtomicFetchKind::Sub) + return mlir::success(); + + if (!getVal().getType().isa()) + return emitError() << "only operates on integer values"; + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 74ab743bb2fd..514c578ac88b 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1034,3 +1034,11 @@ cir.func @popcount_invalid_result_ty(%arg0 : !u32i) -> () { %0 = cir.bit.popcount(%arg0 : !u32i) : !u32i cir.return } + +// ----- + +cir.func @bad_fetch(%x: !cir.ptr, %y: !cir.float) -> () { + // expected-error@+1 {{only operates on integer values}} + %12 = cir.atomic.binop_fetch(xor, %x : !cir.ptr, %y : !cir.float, seq_cst) : !cir.float + cir.return +} From 145d63647be51f3353e1d338179f201ea65fd826 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 8 Apr 2024 14:47:38 -0700 Subject: [PATCH 1470/2301] [CIR][CIRGen] Add support for __atomic_fetch_binop Note this is different from __atomic_binop_fetch. See added docs. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 25 ++++++++-- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 28 ++++++----- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 2 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 25 +++++----- clang/test/CIR/CodeGen/atomic.cpp | 50 +++++++++++++++---- clang/test/CIR/IR/invalid.cir | 2 +- 6 files changed, 91 insertions(+), 41 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index ed02d28367ed..a512339039b7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3488,15 +3488,29 @@ def AtomicFetchKind : I32EnumAttr< let cppNamespace = "::mlir::cir"; } -def AtomicBinopFetch : CIR_Op<"atomic.binop_fetch", - [Pure, SameSecondOperandAndResultType]> { - let summary = "Represents the __atomic_binop_fetch builtin"; - let description = [{}]; +def AtomicFetch : CIR_Op<"atomic.fetch", + [Pure, SameSecondOperandAndResultType]> { + let summary = "Atomic fetch with unary and binary operations"; + let description = [{ + Represents `__atomic_binop_fetch` and `__atomic_fetch_binop` builtins, + where `binop` is on of the binary opcodes : `add`, `sub`, `and`, `xor`, + `or` and `nand`. + + `ptr` is an integer or fp pointer, followed by `val`, which must be + an integer or fp (only supported for `add` and `sub`). The operation + can also be marked `volatile`. + + If `fetch_first` is present, the operation works like + `__atomic_fetch_binop` and returns the value that had + previously been in *ptr, otherwise it returns the final result + of the computation (`__atomic_binop_fetch`). + }]; let results = (outs CIR_AnyIntOrFloat:$result); let arguments = (ins IntOrFPPtr:$ptr, CIR_AnyIntOrFloat:$val, AtomicFetchKind:$binop, Arg:$mem_order, - UnitAttr:$is_volatile); + UnitAttr:$is_volatile, + UnitAttr:$fetch_first); let assemblyFormat = [{ `(` @@ -3505,6 +3519,7 @@ def AtomicBinopFetch : CIR_Op<"atomic.binop_fetch", $val `:` type($val) `,` $mem_order `)` (`volatile` $is_volatile^)? + (`fetch_first` $fetch_first^)? `:` type($result) attr-dict }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 5a1ba29227e8..d673ef5c9009 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -327,6 +327,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, auto &builder = CGF.getBuilder(); auto loc = CGF.getLoc(E->getSourceRange()); mlir::cir::AtomicFetchKindAttr fetchAttr; + bool fetchFirst = true; switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: @@ -384,28 +385,28 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_add_fetch: case AtomicExpr::AO__scoped_atomic_add_fetch: - // In LLVM codegen, the post operation codegen is tracked here. + fetchFirst = false; [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__hip_atomic_fetch_add: case AtomicExpr::AO__opencl_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__scoped_atomic_fetch_add: - Op = mlir::cir::AtomicBinopFetch::getOperationName(); + Op = mlir::cir::AtomicFetch::getOperationName(); fetchAttr = mlir::cir::AtomicFetchKindAttr::get( builder.getContext(), mlir::cir::AtomicFetchKind::Add); break; case AtomicExpr::AO__atomic_sub_fetch: case AtomicExpr::AO__scoped_atomic_sub_fetch: - // In LLVM codegen, the post operation codegen is tracked here. + fetchFirst = false; [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__hip_atomic_fetch_sub: case AtomicExpr::AO__opencl_atomic_fetch_sub: case AtomicExpr::AO__atomic_fetch_sub: case AtomicExpr::AO__scoped_atomic_fetch_sub: - Op = mlir::cir::AtomicBinopFetch::getOperationName(); + Op = mlir::cir::AtomicFetch::getOperationName(); fetchAttr = mlir::cir::AtomicFetchKindAttr::get( builder.getContext(), mlir::cir::AtomicFetchKind::Sub); break; @@ -436,54 +437,54 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_and_fetch: case AtomicExpr::AO__scoped_atomic_and_fetch: - // In LLVM codegen, the post operation codegen is tracked here. + fetchFirst = false; [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__hip_atomic_fetch_and: case AtomicExpr::AO__opencl_atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_and: case AtomicExpr::AO__scoped_atomic_fetch_and: - Op = mlir::cir::AtomicBinopFetch::getOperationName(); + Op = mlir::cir::AtomicFetch::getOperationName(); fetchAttr = mlir::cir::AtomicFetchKindAttr::get( builder.getContext(), mlir::cir::AtomicFetchKind::And); break; case AtomicExpr::AO__atomic_or_fetch: case AtomicExpr::AO__scoped_atomic_or_fetch: - // In LLVM codegen, the post operation codegen is tracked here. + fetchFirst = false; [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__hip_atomic_fetch_or: case AtomicExpr::AO__opencl_atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_or: case AtomicExpr::AO__scoped_atomic_fetch_or: - Op = mlir::cir::AtomicBinopFetch::getOperationName(); + Op = mlir::cir::AtomicFetch::getOperationName(); fetchAttr = mlir::cir::AtomicFetchKindAttr::get( builder.getContext(), mlir::cir::AtomicFetchKind::Or); break; case AtomicExpr::AO__atomic_xor_fetch: case AtomicExpr::AO__scoped_atomic_xor_fetch: - // In LLVM codegen, the post operation codegen is tracked here. + fetchFirst = false; [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__hip_atomic_fetch_xor: case AtomicExpr::AO__opencl_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_xor: case AtomicExpr::AO__scoped_atomic_fetch_xor: - Op = mlir::cir::AtomicBinopFetch::getOperationName(); + Op = mlir::cir::AtomicFetch::getOperationName(); fetchAttr = mlir::cir::AtomicFetchKindAttr::get( builder.getContext(), mlir::cir::AtomicFetchKind::Xor); break; case AtomicExpr::AO__atomic_nand_fetch: case AtomicExpr::AO__scoped_atomic_nand_fetch: - // In LLVM codegen, the post operation codegen is tracked here. + fetchFirst = false; [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__atomic_fetch_nand: case AtomicExpr::AO__scoped_atomic_fetch_nand: - Op = mlir::cir::AtomicBinopFetch::getOperationName(); + Op = mlir::cir::AtomicFetch::getOperationName(); fetchAttr = mlir::cir::AtomicFetchKindAttr::get( builder.getContext(), mlir::cir::AtomicFetchKind::Nand); break; @@ -504,6 +505,9 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, RMWI->setAttr("mem_order", orderAttr); if (E->isVolatile()) RMWI->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); + if (fetchFirst) + RMWI->setAttr("fetch_first", mlir::UnitAttr::get(builder.getContext())); + auto Result = RMWI->getResult(0); if (PostOpMinMax) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7406d9369e13..eadf4d38334a 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2707,7 +2707,7 @@ LogicalResult GetRuntimeMemberOp::verify() { // Atomic Definitions //===----------------------------------------------------------------------===// -LogicalResult AtomicBinopFetch::verify() { +LogicalResult AtomicFetch::verify() { if (getBinop() == mlir::cir::AtomicFetchKind::Add || getBinop() == mlir::cir::AtomicFetchKind::Sub) return mlir::success(); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b30b59548049..51f7f7aa6575 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2407,9 +2407,9 @@ class CIRBitPopcountOpLowering }; class CIRAtomicFetchLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LLVM::AtomicOrdering getLLVMAtomicOrder(mlir::cir::MemOrder memo) const { @@ -2429,7 +2429,7 @@ class CIRAtomicFetchLowering llvm_unreachable("shouldn't get here"); } - mlir::Value buildPostOp(mlir::cir::AtomicBinopFetch op, OpAdaptor adaptor, + mlir::Value buildPostOp(mlir::cir::AtomicFetch op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal, bool isInt) const { SmallVector atomicOperands = {rmwVal, adaptor.getVal()}; @@ -2485,7 +2485,7 @@ class CIRAtomicFetchLowering } mlir::LogicalResult - matchAndRewrite(mlir::cir::AtomicBinopFetch op, OpAdaptor adaptor, + matchAndRewrite(mlir::cir::AtomicFetch op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { bool isInt; // otherwise it's float. @@ -2506,18 +2506,17 @@ class CIRAtomicFetchLowering auto rmwVal = rewriter.create( op.getLoc(), llvmBinOpc, adaptor.getPtr(), adaptor.getVal(), llvmOrder); - // FIXME: Make the rewrite generic and expand this to more opcodes. - bool hasPostOp = isa(op); - mlir::Value result = rmwVal.getRes(); - if (hasPostOp) + if (!op.getFetchFirst()) { result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt); - // Compensate lack of nand binop in LLVM IR. - if (op.getBinop() == mlir::cir::AtomicFetchKind::Nand) { - auto negOne = rewriter.create( - op.getLoc(), result.getType(), -1); - result = rewriter.create(op.getLoc(), result, negOne); + // Compensate lack of nand binop in LLVM IR. + if (op.getBinop() == mlir::cir::AtomicFetchKind::Nand) { + auto negOne = rewriter.create( + op.getLoc(), result.getType(), -1); + result = + rewriter.create(op.getLoc(), result, negOne); + } } rewriter.replaceOp(op, result); diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 4ef73e6d6ce9..b100109fcd09 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -28,7 +28,7 @@ int basic_binop_fetch(int *i) { // CHECK: %[[ONE:.*]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: cir.store %[[ONE]], %[[ONE_ADDR]] : !s32i, cir.ptr // CHECK: %[[VAL:.*]] = cir.load %[[ONE_ADDR]] : cir.ptr , !s32i -// CHECK: cir.atomic.binop_fetch(add, %[[I]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) : !s32i +// CHECK: cir.atomic.fetch(add, %[[I]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) : !s32i // LLVM: define i32 @_Z17basic_binop_fetchPi // LLVM: %[[RMW:.*]] = atomicrmw add ptr {{.*}}, i32 %[[VAL:.*]] seq_cst, align 4 @@ -42,10 +42,10 @@ int other_binop_fetch(int *i) { } // CHECK: cir.func @_Z17other_binop_fetchPi -// CHECK: cir.atomic.binop_fetch(sub, {{.*}}, relaxed -// CHECK: cir.atomic.binop_fetch(and, {{.*}}, acquire -// CHECK: cir.atomic.binop_fetch(or, {{.*}}, acquire -// CHECK: cir.atomic.binop_fetch(xor, {{.*}}, release +// CHECK: cir.atomic.fetch(sub, {{.*}}, relaxed +// CHECK: cir.atomic.fetch(and, {{.*}}, acquire +// CHECK: cir.atomic.fetch(or, {{.*}}, acquire +// CHECK: cir.atomic.fetch(xor, {{.*}}, release // LLVM: define i32 @_Z17other_binop_fetchPi // LLVM: %[[RMW_SUB:.*]] = atomicrmw sub ptr {{.*}} monotonic @@ -62,7 +62,7 @@ int nand_binop_fetch(int *i) { } // CHECK: cir.func @_Z16nand_binop_fetchPi -// CHECK: cir.atomic.binop_fetch(nand, {{.*}}, acq_rel +// CHECK: cir.atomic.fetch(nand, {{.*}}, acq_rel // LLVM: define i32 @_Z16nand_binop_fetchPi // LLVM: %[[RMW_NAND:.*]] = atomicrmw nand ptr {{.*}} acq_rel @@ -75,10 +75,42 @@ int fp_binop_fetch(float *i) { } // CHECK: cir.func @_Z14fp_binop_fetchPf -// CHECK: cir.atomic.binop_fetch(add, -// CHECK: cir.atomic.binop_fetch(sub, +// CHECK: cir.atomic.fetch(add, +// CHECK: cir.atomic.fetch(sub, +// LLVM: define i32 @_Z14fp_binop_fetchPf // LLVM: %[[RMW_FADD:.*]] = atomicrmw fadd ptr // LLVM: fadd float %[[RMW_FADD]] // LLVM: %[[RMW_FSUB:.*]] = atomicrmw fsub ptr -// LLVM: fsub float %[[RMW_FSUB]] \ No newline at end of file +// LLVM: fsub float %[[RMW_FSUB]] + +int fetch_binop(int *i) { + __atomic_fetch_add(i, 1, memory_order_seq_cst); + __atomic_fetch_sub(i, 1, memory_order_seq_cst); + __atomic_fetch_and(i, 1, memory_order_seq_cst); + __atomic_fetch_or(i, 1, memory_order_seq_cst); + __atomic_fetch_xor(i, 1, memory_order_seq_cst); + return __atomic_fetch_nand(i, 1, memory_order_seq_cst); +} + +// CHECK: cir.func @_Z11fetch_binopPi +// CHECK: cir.atomic.fetch(add, {{.*}}) fetch_first +// CHECK: cir.atomic.fetch(sub, {{.*}}) fetch_first +// CHECK: cir.atomic.fetch(and, {{.*}}) fetch_first +// CHECK: cir.atomic.fetch(or, {{.*}}) fetch_first +// CHECK: cir.atomic.fetch(xor, {{.*}}) fetch_first +// CHECK: cir.atomic.fetch(nand, {{.*}}) fetch_first + +// LLVM: define i32 @_Z11fetch_binopPi +// LLVM: atomicrmw add ptr +// LLVM-NOT: add {{.*}} +// LLVM: atomicrmw sub ptr +// LLVM-NOT: sub {{.*}} +// LLVM: atomicrmw and ptr +// LLVM-NOT: and {{.*}} +// LLVM: atomicrmw or ptr +// LLVM-NOT: or {{.*}} +// LLVM: atomicrmw xor ptr +// LLVM-NOT: xor {{.*}} +// LLVM: atomicrmw nand ptr +// LLVM-NOT: nand {{.*}} \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 514c578ac88b..0246b47b934e 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1039,6 +1039,6 @@ cir.func @popcount_invalid_result_ty(%arg0 : !u32i) -> () { cir.func @bad_fetch(%x: !cir.ptr, %y: !cir.float) -> () { // expected-error@+1 {{only operates on integer values}} - %12 = cir.atomic.binop_fetch(xor, %x : !cir.ptr, %y : !cir.float, seq_cst) : !cir.float + %12 = cir.atomic.fetch(xor, %x : !cir.ptr, %y : !cir.float, seq_cst) : !cir.float cir.return } From 442815402d8c331f2a7659a30b9c049ac7ef370a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 8 Apr 2024 15:22:58 -0700 Subject: [PATCH 1471/2301] [CIR][CIRGen] Add min/max atomic fetch variants --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 +- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 17 +++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 58 ++++++++++++++++--- clang/test/CIR/CodeGen/atomic.cpp | 27 ++++++++- 4 files changed, 88 insertions(+), 20 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a512339039b7..9e6bcb164238 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3479,12 +3479,14 @@ def Atomic_And : I32EnumAttrCase<"And", 2, "and">; def Atomic_Xor : I32EnumAttrCase<"Xor", 3, "xor">; def Atomic_Or : I32EnumAttrCase<"Or", 4, "or">; def Atomic_Nand : I32EnumAttrCase<"Nand", 5, "nand">; +def Atomic_Max : I32EnumAttrCase<"Max", 6, "max">; +def Atomic_Min : I32EnumAttrCase<"Min", 7, "min">; def AtomicFetchKind : I32EnumAttr< "AtomicFetchKind", "Binary opcode for atomic fetch operations", - [Atomic_Add, Atomic_Sub, Atomic_And, - Atomic_Xor, Atomic_Or, Atomic_Nand]> { + [Atomic_Add, Atomic_Sub, Atomic_And, Atomic_Xor, Atomic_Or, Atomic_Nand, + Atomic_Max, Atomic_Min]> { let cppNamespace = "::mlir::cir"; } diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index d673ef5c9009..8d3dbc450271 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -322,7 +322,6 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, uint8_t Scope) { assert(!UnimplementedFeature::syncScopeID()); StringRef Op; - [[maybe_unused]] bool PostOpMinMax = false; auto &builder = CGF.getBuilder(); auto loc = CGF.getLoc(E->getSourceRange()); @@ -413,26 +412,30 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_min_fetch: case AtomicExpr::AO__scoped_atomic_min_fetch: - PostOpMinMax = true; + fetchFirst = false; [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_min: case AtomicExpr::AO__hip_atomic_fetch_min: case AtomicExpr::AO__opencl_atomic_fetch_min: case AtomicExpr::AO__atomic_fetch_min: case AtomicExpr::AO__scoped_atomic_fetch_min: - llvm_unreachable("NYI"); + Op = mlir::cir::AtomicFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Min); break; case AtomicExpr::AO__atomic_max_fetch: case AtomicExpr::AO__scoped_atomic_max_fetch: - PostOpMinMax = true; + fetchFirst = false; [[fallthrough]]; case AtomicExpr::AO__c11_atomic_fetch_max: case AtomicExpr::AO__hip_atomic_fetch_max: case AtomicExpr::AO__opencl_atomic_fetch_max: case AtomicExpr::AO__atomic_fetch_max: case AtomicExpr::AO__scoped_atomic_fetch_max: - llvm_unreachable("NYI"); + Op = mlir::cir::AtomicFetch::getOperationName(); + fetchAttr = mlir::cir::AtomicFetchKindAttr::get( + builder.getContext(), mlir::cir::AtomicFetchKind::Max); break; case AtomicExpr::AO__atomic_and_fetch: @@ -509,10 +512,6 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, RMWI->setAttr("fetch_first", mlir::UnitAttr::get(builder.getContext())); auto Result = RMWI->getResult(0); - - if (PostOpMinMax) - llvm_unreachable("NYI"); - builder.createStore(loc, Result, Dest); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 51f7f7aa6575..5f532688fc50 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2441,6 +2441,26 @@ class CIRAtomicFetchLowering ->getResult(0); } + mlir::Value buildMinMaxPostOp(mlir::cir::AtomicFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::Value rmwVal, bool isSigned) const { + auto loc = op.getLoc(); + mlir::LLVM::ICmpPredicate pred; + if (op.getBinop() == mlir::cir::AtomicFetchKind::Max) { + pred = isSigned ? mlir::LLVM::ICmpPredicate::sgt + : mlir::LLVM::ICmpPredicate::ugt; + } else { // Min + pred = isSigned ? mlir::LLVM::ICmpPredicate::slt + : mlir::LLVM::ICmpPredicate::ult; + } + + auto cmp = rewriter.create( + loc, mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), pred), + rmwVal, adaptor.getVal()); + return rewriter.create(loc, cmp, rmwVal, + adaptor.getVal()); + } + llvm::StringLiteral getLLVMBinop(mlir::cir::AtomicFetchKind k, bool isInt) const { switch (k) { @@ -2459,12 +2479,16 @@ class CIRAtomicFetchLowering case mlir::cir::AtomicFetchKind::Nand: // There's no nand binop in LLVM, this is later fixed with a not. return mlir::LLVM::AndOp::getOperationName(); + case mlir::cir::AtomicFetchKind::Max: + case mlir::cir::AtomicFetchKind::Min: + llvm_unreachable("handled in buildMinMaxPostOp"); } llvm_unreachable("Unknown atomic fetch opcode"); } mlir::LLVM::AtomicBinOp getLLVMAtomicBinOp(mlir::cir::AtomicFetchKind k, - bool isInt) const { + bool isInt, + bool isSignedInt) const { switch (k) { case mlir::cir::AtomicFetchKind::Add: return isInt ? mlir::LLVM::AtomicBinOp::add @@ -2480,6 +2504,18 @@ class CIRAtomicFetchLowering return mlir::LLVM::AtomicBinOp::_or; case mlir::cir::AtomicFetchKind::Nand: return mlir::LLVM::AtomicBinOp::nand; + case mlir::cir::AtomicFetchKind::Max: { + if (!isInt) + return mlir::LLVM::AtomicBinOp::fmax; + return isSignedInt ? mlir::LLVM::AtomicBinOp::max + : mlir::LLVM::AtomicBinOp::umax; + } + case mlir::cir::AtomicFetchKind::Min: { + if (!isInt) + return mlir::LLVM::AtomicBinOp::fmin; + return isSignedInt ? mlir::LLVM::AtomicBinOp::min + : mlir::LLVM::AtomicBinOp::umin; + } } llvm_unreachable("Unknown atomic fetch opcode"); } @@ -2488,12 +2524,13 @@ class CIRAtomicFetchLowering matchAndRewrite(mlir::cir::AtomicFetch op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - bool isInt; // otherwise it's float. - if (op.getVal().getType().isa()) + bool isInt, isSignedInt = false; // otherwise it's float. + if (auto intTy = op.getVal().getType().dyn_cast()) { isInt = true; - else if (op.getVal() - .getType() - .isa()) + isSignedInt = intTy.isSigned(); + } else if (op.getVal() + .getType() + .isa()) isInt = false; else { return op.emitError() @@ -2502,13 +2539,18 @@ class CIRAtomicFetchLowering // FIXME: add syncscope. auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); - auto llvmBinOpc = getLLVMAtomicBinOp(op.getBinop(), isInt); + auto llvmBinOpc = getLLVMAtomicBinOp(op.getBinop(), isInt, isSignedInt); auto rmwVal = rewriter.create( op.getLoc(), llvmBinOpc, adaptor.getPtr(), adaptor.getVal(), llvmOrder); mlir::Value result = rmwVal.getRes(); if (!op.getFetchFirst()) { - result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt); + if (op.getBinop() == mlir::cir::AtomicFetchKind::Max || + op.getBinop() == mlir::cir::AtomicFetchKind::Min) + result = buildMinMaxPostOp(op, adaptor, rewriter, rmwVal.getRes(), + isSignedInt); + else + result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt); // Compensate lack of nand binop in LLVM IR. if (op.getBinop() == mlir::cir::AtomicFetchKind::Nand) { diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index b100109fcd09..3c8a19f18add 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -113,4 +113,29 @@ int fetch_binop(int *i) { // LLVM: atomicrmw xor ptr // LLVM-NOT: xor {{.*}} // LLVM: atomicrmw nand ptr -// LLVM-NOT: nand {{.*}} \ No newline at end of file +// LLVM-NOT: nand {{.*}} + +void min_max_fetch(int *i) { + __atomic_fetch_max(i, 1, memory_order_seq_cst); + __atomic_fetch_min(i, 1, memory_order_seq_cst); + __atomic_max_fetch(i, 1, memory_order_seq_cst); + __atomic_min_fetch(i, 1, memory_order_seq_cst); +} + +// CHECK: cir.func @_Z13min_max_fetchPi +// CHECK: = cir.atomic.fetch(max, {{.*}}) fetch_first +// CHECK: = cir.atomic.fetch(min, {{.*}}) fetch_first +// CHECK: = cir.atomic.fetch(max, {{.*}}) : !s32i +// CHECK: = cir.atomic.fetch(min, {{.*}}) : !s32i + +// LLVM: define void @_Z13min_max_fetchPi +// LLVM: atomicrmw max ptr +// LLVM-NOT: icmp {{.*}} +// LLVM: atomicrmw min ptr +// LLVM-NOT: icmp {{.*}} +// LLVM: %[[MAX:.*]] = atomicrmw max ptr +// LLVM: %[[ICMP_MAX:.*]] = icmp sgt i32 %[[MAX]] +// LLVM: select i1 %[[ICMP_MAX]], i32 %[[MAX]] +// LLVM: %[[MIN:.*]] = atomicrmw min ptr +// LLVM: %[[ICMP_MIN:.*]] = icmp slt i32 %[[MIN]] +// LLVM: select i1 %[[ICMP_MIN]], i32 %[[MIN]] \ No newline at end of file From 8567db9cdd1230b1c547b7805c790e7b9a0724c7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 9 Apr 2024 11:39:54 -0700 Subject: [PATCH 1472/2301] [CIR][CIRGen] Add atomic load support --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 42 ++++++++------- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 20 ++++--- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- .../Dialect/Transforms/LoweringPrepare.cpp | 6 ++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 35 ++++++++++++- clang/test/CIR/CodeGen/atomic.cpp | 52 ++++++++++++++++--- 6 files changed, 121 insertions(+), 36 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 9e6bcb164238..73f8f55f122c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -271,6 +271,25 @@ def ConstantOp : CIR_Op<"const", let hasFolder = 1; } +//===----------------------------------------------------------------------===// +// C/C++ memory order definitions +//===----------------------------------------------------------------------===// + +def MemOrderRelaxed : I32EnumAttrCase<"Relaxed", 0, "relaxed">; +def MemOrderConsume : I32EnumAttrCase<"Consume", 1, "consume">; +def MemOrderAcquire : I32EnumAttrCase<"Acquire", 2, "acquire">; +def MemOrderRelease : I32EnumAttrCase<"Release", 3, "release">; +def MemOrderAcqRel : I32EnumAttrCase<"AcquireRelease", 4, "acq_rel">; +def MemOrderSeqCst : I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">; + +def MemOrder : I32EnumAttr< + "MemOrder", + "Memory order according to C++11 memory model", + [MemOrderRelaxed, MemOrderConsume, MemOrderAcquire, + MemOrderRelease, MemOrderAcqRel, MemOrderSeqCst]> { + let cppNamespace = "::mlir::cir"; +} + //===----------------------------------------------------------------------===// // AllocaOp //===----------------------------------------------------------------------===// @@ -403,13 +422,16 @@ def LoadOp : CIR_Op<"load", [ let arguments = (ins Arg:$addr, UnitAttr:$isDeref, - UnitAttr:$is_volatile); + UnitAttr:$is_volatile, + OptionalAttr:$mem_order); let results = (outs CIR_AnyType:$result); // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. let assemblyFormat = [{ - (`deref` $isDeref^)? (`volatile` $is_volatile^)? + (`deref` $isDeref^)? + (`volatile` $is_volatile^)? + (`atomic` `(` $mem_order^ `)`)? $addr `:` `cir.ptr` type($addr) `,` type($result) attr-dict }]; @@ -3456,22 +3478,6 @@ def IsConstantOp : CIR_Op<"is_constant", [Pure]> { // Atomic operations //===----------------------------------------------------------------------===// -// Memory order related definitions. -def MemOrderRelaxed : I32EnumAttrCase<"Relaxed", 0, "relaxed">; -def MemOrderConsume : I32EnumAttrCase<"Consume", 1, "consume">; -def MemOrderAcquire : I32EnumAttrCase<"Acquire", 2, "acquire">; -def MemOrderRelease : I32EnumAttrCase<"Release", 3, "release">; -def MemOrderAcqRel : I32EnumAttrCase<"AcquireRelease", 4, "acq_rel">; -def MemOrderSeqCst : I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">; - -def MemOrder : I32EnumAttr< - "MemOrder", - "Memory order according to C++11 memory model", - [MemOrderRelaxed, MemOrderConsume, MemOrderAcquire, - MemOrderRelease, MemOrderAcqRel, MemOrderSeqCst]> { - let cppNamespace = "::mlir::cir"; -} - // Binary opcodes for atomic fetch. def Atomic_Add : I32EnumAttrCase<"Add", 0, "add">; def Atomic_Sub : I32EnumAttrCase<"Sub", 1, "sub">; diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 8d3dbc450271..1841ed168242 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -325,6 +325,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, auto &builder = CGF.getBuilder(); auto loc = CGF.getLoc(E->getSourceRange()); + auto orderAttr = mlir::cir::MemOrderAttr::get(builder.getContext(), Order); mlir::cir::AtomicFetchKindAttr fetchAttr; bool fetchFirst = true; @@ -357,7 +358,13 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_load: case AtomicExpr::AO__scoped_atomic_load_n: case AtomicExpr::AO__scoped_atomic_load: { - llvm_unreachable("NYI"); + auto *load = builder.createLoad(loc, Ptr).getDefiningOp(); + // FIXME(cir): add scope information. + assert(!UnimplementedFeature::syncScopeID()); + load->setAttr("mem_order", orderAttr); + if (E->isVolatile()) + load->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); + builder.createStore(loc, load->getResult(0), Dest); return; } @@ -499,7 +506,6 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, SmallVector atomicOperands = {Ptr.getPointer(), LoadVal1}; SmallVector atomicResTys = { Ptr.getPointer().getType().cast().getPointee()}; - auto orderAttr = mlir::cir::MemOrderAttr::get(builder.getContext(), Order); auto RMWI = builder.create(loc, builder.getStringAttr(Op), atomicOperands, atomicResTys, {}); @@ -601,7 +607,7 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__atomic_load: case AtomicExpr::AO__scoped_atomic_load: - llvm_unreachable("NYI"); + Dest = buildPointerWithAlignment(E->getVal1()); break; case AtomicExpr::AO__atomic_store: @@ -716,7 +722,8 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { Val2 = Atomics.convertToAtomicIntPointer(Val2); } if (Dest.isValid()) { - llvm_unreachable("NYI"); + if (ShouldCastToIntPtrTy) + Dest = Atomics.castToAtomicIntPointer(Dest); } else if (E->isCmpXChg()) llvm_unreachable("NYI"); else if (!RValTy->isVoidType()) { @@ -1087,9 +1094,8 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { break; } } - if (RValTy->isVoidType()) { - llvm_unreachable("NYI"); - } + if (RValTy->isVoidType()) + return RValue::get(nullptr); return convertTempToRValue(Dest.withElementType(convertTypeForMem(RValTy)), RValTy, E->getExprLoc()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 3d1109b0b7a2..6be56a803f68 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2531,7 +2531,7 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, mlir::cir::LoadOp Load = builder.create( Loc, Addr.getElementType(), Addr.getPointer(), /* deref */ false, - Volatile); + Volatile, ::mlir::cir::MemOrderAttr{}); if (isNontemporal) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 577770ea9af8..877a31f90f23 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -393,7 +393,8 @@ static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, loc, /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto currentElement = b.create(loc, eltTy, tmpAddr); + auto currentElement = + b.create(loc, eltTy, tmpAddr.getResult()); mlir::Type boolTy = mlir::cir::BoolType::get(b.getContext()); auto cmp = builder.create( loc, boolTy, mlir::cir::CmpOpKind::eq, currentElement, end); @@ -401,7 +402,8 @@ static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto currentElement = b.create(loc, eltTy, tmpAddr); + auto currentElement = + b.create(loc, eltTy, tmpAddr.getResult()); CallOp ctorCall; op->walk([&](CallOp c) { ctorCall = c; }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5f532688fc50..d530e77e7a86 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1009,9 +1009,40 @@ class CIRLoadLowering : public mlir::OpConversionPattern { mlir::ConversionPatternRewriter &rewriter) const override { const auto llvmTy = getTypeConverter()->convertType(op.getResult().getType()); + // FIXME: right now we only pass in the alignment when the memory access is + // atomic, we should always pass it instead. + unsigned alignment = 0; + auto ordering = mlir::LLVM::AtomicOrdering::not_atomic; + if (op.getMemOrder()) { + switch (*op.getMemOrder()) { + case mlir::cir::MemOrder::Relaxed: + ordering = mlir::LLVM::AtomicOrdering::monotonic; + break; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + ordering = mlir::LLVM::AtomicOrdering::acquire; + break; + case mlir::cir::MemOrder::Release: + ordering = mlir::LLVM::AtomicOrdering::release; + break; + case mlir::cir::MemOrder::AcquireRelease: + ordering = mlir::LLVM::AtomicOrdering::acq_rel; + break; + case mlir::cir::MemOrder::SequentiallyConsistent: + ordering = mlir::LLVM::AtomicOrdering::seq_cst; + break; + } + + mlir::DataLayout layout(op->getParentOfType()); + alignment = (unsigned)layout.getTypeABIAlignment(llvmTy); + } + + // TODO: nontemporal, invariant, syncscope. rewriter.replaceOpWithNewOp( - op, llvmTy, adaptor.getAddr(), /* alignment */ 0, - /* volatile */ op.getIsVolatile()); + op, llvmTy, adaptor.getAddr(), /* alignment */ alignment, + /* volatile */ op.getIsVolatile(), + /* nontemporal */ false, + /* invariant */ false, /* invariantGroup */ false, ordering); return mlir::LogicalResult::success(); } }; diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 3c8a19f18add..51c0fa34ca87 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -3,6 +3,9 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s +// Available on resource dir. +#include + typedef struct _a { _Atomic(int) d; } at; @@ -11,11 +14,6 @@ void m() { at y; } // CHECK: ![[A:.*]] = !cir.struct -enum memory_order { - memory_order_relaxed, memory_order_consume, memory_order_acquire, - memory_order_release, memory_order_acq_rel, memory_order_seq_cst -}; - int basic_binop_fetch(int *i) { return __atomic_add_fetch(i, 1, memory_order_seq_cst); } @@ -138,4 +136,46 @@ void min_max_fetch(int *i) { // LLVM: select i1 %[[ICMP_MAX]], i32 %[[MAX]] // LLVM: %[[MIN:.*]] = atomicrmw min ptr // LLVM: %[[ICMP_MIN:.*]] = icmp slt i32 %[[MIN]] -// LLVM: select i1 %[[ICMP_MIN]], i32 %[[MIN]] \ No newline at end of file +// LLVM: select i1 %[[ICMP_MIN]], i32 %[[MIN]] + +int fi1(_Atomic(int) *i) { + return __c11_atomic_load(i, memory_order_seq_cst); +} + +// CHECK: cir.func @_Z3fi1PU7_Atomici +// CHECK: cir.load atomic(seq_cst) + +// LLVM-LABEL: @_Z3fi1PU7_Atomici +// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4 + +int fi1a(int *i) { + int v; + __atomic_load(i, &v, memory_order_seq_cst); + return v; +} + +// CHECK-LABEL: @_Z4fi1aPi +// CHECK: cir.load atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi1aPi +// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4 + +int fi1b(int *i) { + return __atomic_load_n(i, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z4fi1bPi +// CHECK: cir.load atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi1bPi +// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4 + +int fi1c(atomic_int *i) { + return atomic_load(i); +} + +// CHECK-LABEL: @_Z4fi1cPU7_Atomici +// CHECK: cir.load atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi1cPU7_Atomici +// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4 \ No newline at end of file From ec853cac2a71d1697f00162b9f9564abd869e75a Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 10 Apr 2024 09:43:57 +0800 Subject: [PATCH 1473/2301] [CIR][CIRGen] Add missing CIRGen for generic bit operation builtins (#540) This patch adds the CIRGen for the following builtin functions: - `__builtin_clzg`; - `__builtin_ctzg`; - `__builtin_popcountg`. CIRGen for these three functions are missing in the original PR which introduces CIR bit ops. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 3 +++ clang/test/CIR/CodeGen/builtin-bits.cpp | 24 ++++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index bcd8ab8fe5f3..8c5d26b6a06d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -567,12 +567,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_ctz: case Builtin::BI__builtin_ctzl: case Builtin::BI__builtin_ctzll: + case Builtin::BI__builtin_ctzg: return buildBuiltinBitOp(*this, E, BCK_CTZPassedZero); case Builtin::BI__builtin_clzs: case Builtin::BI__builtin_clz: case Builtin::BI__builtin_clzl: case Builtin::BI__builtin_clzll: + case Builtin::BI__builtin_clzg: return buildBuiltinBitOp(*this, E, BCK_CLZPassedZero); case Builtin::BI__builtin_ffs: @@ -591,6 +593,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_popcount: case Builtin::BI__builtin_popcountl: case Builtin::BI__builtin_popcountll: + case Builtin::BI__builtin_popcountg: return buildBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__builtin_bswap16: diff --git a/clang/test/CIR/CodeGen/builtin-bits.cpp b/clang/test/CIR/CodeGen/builtin-bits.cpp index 6f7f195cf1fb..6b82f75187b8 100644 --- a/clang/test/CIR/CodeGen/builtin-bits.cpp +++ b/clang/test/CIR/CodeGen/builtin-bits.cpp @@ -57,6 +57,14 @@ int test_builtin_ctzll(unsigned long long x) { // CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u64i) : !s32i // CHECK: } +int test_builtin_ctzg(unsigned x) { + return __builtin_ctzg(x); +} + +// CHECK: cir.func @_Z17test_builtin_ctzgj +// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u32i) : !s32i +// CHECK: } + int test_builtin_clzs(unsigned short x) { return __builtin_clzs(x); } @@ -89,6 +97,14 @@ int test_builtin_clzll(unsigned long long x) { // CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u64i) : !s32i // CHECK: } +int test_builtin_clzg(unsigned x) { + return __builtin_clzg(x); +} + +// CHECK: cir.func @_Z17test_builtin_clzgj +// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u32i) : !s32i +// CHECK: } + int test_builtin_ffs(int x) { return __builtin_ffs(x); } @@ -160,3 +176,11 @@ int test_builtin_popcountll(unsigned long long x) { // CHECK: cir.func @_Z23test_builtin_popcountlly // CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u64i) : !s32i // CHECK: } + +int test_builtin_popcountg(unsigned x) { + return __builtin_popcountg(x); +} + +// CHECK: cir.func @_Z22test_builtin_popcountgj +// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u32i) : !s32i +// CHECK: } From cb24f847fc3d3c51e2170faa8509113406a7e2d4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 10 Apr 2024 13:29:00 -0700 Subject: [PATCH 1474/2301] [CIR][CIRGen] Add atomic store support --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 6 ++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 9 ++- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 7 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 ++- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 8 +-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 5 +- .../Dialect/Transforms/LoweringPrepare.cpp | 4 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 69 ++++++++++++------- clang/test/CIR/CodeGen/atomic.cpp | 43 +++++++++++- 12 files changed, 118 insertions(+), 48 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 1b93557b9fcd..b111913a126e 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -134,6 +134,12 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createBinop(lhs, mlir::cir::BinOpKind::Mul, val); } + mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, + mlir::Value dst, bool _volatile = false, + ::mlir::cir::MemOrderAttr order = {}) { + return create(loc, val, dst, _volatile, order); + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 73f8f55f122c..425ba9cddb8c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -402,7 +402,7 @@ def LoadOp : CIR_Op<"load", [ backed up by a `cir.ptr` type. A unit attribute `deref` can be used to mark the resulting value as used by another operation to dereference a pointer. A unit attribute `volatile` can be used to indicate a volatile - loading. + loading. Load can be marked atomic by using `atomic()`. Example: @@ -451,7 +451,8 @@ def StoreOp : CIR_Op<"store", [ let description = [{ `cir.store` stores a value (first operand) to the memory address specified in the second operand. A unit attribute `volatile` can be used to indicate - a volatile store. + a volatile store. Store's can be marked atomic by using + `atomic()`. Example: @@ -467,12 +468,14 @@ def StoreOp : CIR_Op<"store", [ let arguments = (ins CIR_AnyType:$value, Arg:$addr, - UnitAttr:$is_volatile); + UnitAttr:$is_volatile, + OptionalAttr:$mem_order); // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. let assemblyFormat = [{ (`volatile` $is_volatile^)? + (`atomic` `(` $mem_order^ `)`)? $value `,` $addr attr-dict `:` type($value) `,` `cir.ptr` type($addr) }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 1841ed168242..5407875dcd74 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -375,7 +375,10 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_store_n: case AtomicExpr::AO__scoped_atomic_store: case AtomicExpr::AO__scoped_atomic_store_n: { - llvm_unreachable("NYI"); + auto loadVal1 = builder.createLoad(loc, Val1); + // FIXME(cir): add scope information. + assert(!UnimplementedFeature::syncScopeID()); + builder.createStore(loc, loadVal1, Ptr, E->isVolatile(), orderAttr); return; } @@ -612,7 +615,7 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__atomic_store: case AtomicExpr::AO__scoped_atomic_store: - llvm_unreachable("NYI"); + Val1 = buildPointerWithAlignment(E->getVal1()); break; case AtomicExpr::AO__atomic_exchange: diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index bedd83551990..205a14baaa8a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -769,14 +769,16 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, - Address dst) { - return create(loc, val, dst.getPointer()); + Address dst, bool _volatile = false, + ::mlir::cir::MemOrderAttr order = {}) { + return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), _volatile, + order); } mlir::cir::StoreOp createFlagStore(mlir::Location loc, bool val, mlir::Value dst) { auto flag = getBool(val, loc); - return create(loc, flag, dst); + return CIRBaseBuilderTy::createStore(loc, flag, dst); } // Convert byte offset to sequence of high-level indices suitable for diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index f034638d2eb7..418f9f9fb21e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -297,7 +297,7 @@ void CIRGenFunction::buildAggregateStore(mlir::Value Val, Address Dest, // struct), which can later be broken down in other CIR levels (or prior // to dialect codegen). (void)DestIsVolatile; - builder.create(*currSrcLoc, Val, Dest.getPointer()); + builder.createStore(*currSrcLoc, Val, Dest); } static Address emitAddressAtOffset(CIRGenFunction &CGF, Address addr, diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 360cccb6bf3a..2a23fd1a73ed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -272,12 +272,12 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { /*ArraySize=*/nullptr); auto storeAddr = coroFrame.getPointer(); - builder.create(openCurlyLoc, nullPtrCst, storeAddr); + builder.CIRBaseBuilderTy::createStore(openCurlyLoc, nullPtrCst, storeAddr); builder.create(openCurlyLoc, coroAlloc.getResult(0), /*withElseRegion=*/false, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - builder.create( + builder.CIRBaseBuilderTy::createStore( loc, buildScalarExpr(S.getAllocate()), storeAddr); builder.create(loc); @@ -476,8 +476,8 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, loc, CharUnits::One(), builder.getBestAllocaInsertPoint(scopeParentBlock)); // Store the rvalue so we can reload it before the promise call. - builder.create(loc, awaitRes.RV.getScalarVal(), - tmpResumeRValAddr); + builder.CIRBaseBuilderTy::createStore( + loc, awaitRes.RV.getScalarVal(), tmpResumeRValAddr); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 6be56a803f68..954ea854e026 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -592,8 +592,7 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, } assert(currSrcLoc && "must pass in source location"); - builder.create(*currSrcLoc, Value, Addr.getPointer(), - Volatile); + builder.createStore(*currSrcLoc, Value, Addr, Volatile); if (isNontemporal) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 2a31af7ca00b..8256bd6db71c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1121,7 +1121,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // Location of the store to the param storage tracked as beginning of // the function body. auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); - builder.create(fnBodyBegin, paramVal, addr); + builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addr); } assert(builder.getInsertionBlock() && "Should be valid"); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index f214abe7358d..17264d36e588 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -492,14 +492,13 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { // If this function returns a reference, take the address of the // expression rather than the value. RValue Result = buildReferenceBindingToExpr(RV); - builder.create(loc, Result.getScalarVal(), - ReturnValue.getPointer()); + builder.createStore(loc, Result.getScalarVal(), ReturnValue); } else { mlir::Value V = nullptr; switch (CIRGenFunction::getEvaluationKind(RV->getType())) { case TEK_Scalar: V = buildScalarExpr(RV); - builder.create(loc, V, *FnRetAlloca); + builder.CIRBaseBuilderTy::createStore(loc, V, *FnRetAlloca); break; case TEK_Complex: llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 877a31f90f23..0643d2d525ab 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -387,7 +387,7 @@ static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, builder.getSizeFromCharUnits(builder.getContext(), clang::CharUnits::One()), nullptr); - builder.create(loc, begin, tmpAddr); + builder.createStore(loc, begin, tmpAddr); auto loop = builder.createDoWhile( loc, @@ -418,7 +418,7 @@ static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, // Advance pointer and store them to temporary variable auto nextElement = builder.create( loc, eltTy, currentElement, one); - b.create(loc, nextElement, tmpAddr); + builder.createStore(loc, nextElement, tmpAddr); builder.createYield(loc); }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d530e77e7a86..0ffabaf6f922 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1000,6 +1000,26 @@ class CIRAllocaLowering } }; +static mlir::LLVM::AtomicOrdering +getLLVMMemOrder(std::optional &memorder) { + if (!memorder) + return mlir::LLVM::AtomicOrdering::not_atomic; + switch (*memorder) { + case mlir::cir::MemOrder::Relaxed: + return mlir::LLVM::AtomicOrdering::monotonic; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + return mlir::LLVM::AtomicOrdering::acquire; + case mlir::cir::MemOrder::Release: + return mlir::LLVM::AtomicOrdering::release; + case mlir::cir::MemOrder::AcquireRelease: + return mlir::LLVM::AtomicOrdering::acq_rel; + case mlir::cir::MemOrder::SequentiallyConsistent: + return mlir::LLVM::AtomicOrdering::seq_cst; + } + llvm_unreachable("unknown memory order"); +} + class CIRLoadLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -1009,30 +1029,13 @@ class CIRLoadLowering : public mlir::OpConversionPattern { mlir::ConversionPatternRewriter &rewriter) const override { const auto llvmTy = getTypeConverter()->convertType(op.getResult().getType()); - // FIXME: right now we only pass in the alignment when the memory access is - // atomic, we should always pass it instead. unsigned alignment = 0; - auto ordering = mlir::LLVM::AtomicOrdering::not_atomic; - if (op.getMemOrder()) { - switch (*op.getMemOrder()) { - case mlir::cir::MemOrder::Relaxed: - ordering = mlir::LLVM::AtomicOrdering::monotonic; - break; - case mlir::cir::MemOrder::Consume: - case mlir::cir::MemOrder::Acquire: - ordering = mlir::LLVM::AtomicOrdering::acquire; - break; - case mlir::cir::MemOrder::Release: - ordering = mlir::LLVM::AtomicOrdering::release; - break; - case mlir::cir::MemOrder::AcquireRelease: - ordering = mlir::LLVM::AtomicOrdering::acq_rel; - break; - case mlir::cir::MemOrder::SequentiallyConsistent: - ordering = mlir::LLVM::AtomicOrdering::seq_cst; - break; - } + auto memorder = op.getMemOrder(); + auto ordering = getLLVMMemOrder(memorder); + // FIXME: right now we only pass in the alignment when the memory access + // is atomic, we should always pass it instead. + if (ordering != mlir::LLVM::AtomicOrdering::not_atomic) { mlir::DataLayout layout(op->getParentOfType()); alignment = (unsigned)layout.getTypeABIAlignment(llvmTy); } @@ -1040,8 +1043,7 @@ class CIRLoadLowering : public mlir::OpConversionPattern { // TODO: nontemporal, invariant, syncscope. rewriter.replaceOpWithNewOp( op, llvmTy, adaptor.getAddr(), /* alignment */ alignment, - /* volatile */ op.getIsVolatile(), - /* nontemporal */ false, + op.getIsVolatile(), /* nontemporal */ false, /* invariant */ false, /* invariantGroup */ false, ordering); return mlir::LogicalResult::success(); } @@ -1054,9 +1056,24 @@ class CIRStoreLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::StoreOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { + unsigned alignment = 0; + auto memorder = op.getMemOrder(); + auto ordering = getLLVMMemOrder(memorder); + + // FIXME: right now we only pass in the alignment when the memory access + // is atomic, we should always pass it instead. + if (ordering != mlir::LLVM::AtomicOrdering::not_atomic) { + const auto llvmTy = + getTypeConverter()->convertType(op.getValue().getType()); + mlir::DataLayout layout(op->getParentOfType()); + alignment = (unsigned)layout.getTypeABIAlignment(llvmTy); + } + + // TODO: nontemporal, syncscope. rewriter.replaceOpWithNewOp( - op, adaptor.getValue(), adaptor.getAddr(), - /* alignment */ 0, /* volatile */ op.getIsVolatile()); + op, adaptor.getValue(), adaptor.getAddr(), alignment, + op.getIsVolatile(), /* nontemporal */ false, /* invariantGroup */ false, + ordering); return mlir::LogicalResult::success(); } }; diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 51c0fa34ca87..88d1d38bc549 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -178,4 +178,45 @@ int fi1c(atomic_int *i) { // CHECK: cir.load atomic(seq_cst) // LLVM-LABEL: @_Z4fi1cPU7_Atomici -// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4 \ No newline at end of file +// LLVM: load atomic i32, ptr {{.*}} seq_cst, align 4 + +void fi2(_Atomic(int) *i) { + __c11_atomic_store(i, 1, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z3fi2PU7_Atomici +// CHECK: cir.store atomic(seq_cst) + +// LLVM-LABEL: @_Z3fi2PU7_Atomici +// LLVM: store atomic i32 {{.*}} seq_cst, align 4 + +void fi2a(int *i) { + int v = 1; + __atomic_store(i, &v, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z4fi2aPi +// CHECK: cir.store atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi2aPi +// LLVM: store atomic i32 {{.*}} seq_cst, align 4 + +void fi2b(int *i) { + __atomic_store_n(i, 1, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z4fi2bPi +// CHECK: cir.store atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi2bPi +// LLVM: store atomic i32 {{.*}} seq_cst, align 4 + +void fi2c(atomic_int *i) { + atomic_store(i, 1); +} + +// CHECK-LABEL: @_Z4fi2cPU7_Atomici +// CHECK: cir.store atomic(seq_cst) + +// LLVM-LABEL: @_Z4fi2cPU7_Atomici +// LLVM: store atomic i32 {{.*}} seq_cst, align 4 \ No newline at end of file From 5c1d922e7787b2ac4ce78b4cc7b5d6c3b443bbde Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 10 Apr 2024 15:36:40 -0700 Subject: [PATCH 1475/2301] [CIR][CIRGen] Add support for __attribute__((constructor)) Also add skeleton for upcoming dtor support. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 +++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 35 +++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenModule.h | 5 +++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 34 ++++++++++++++++-- .../Dialect/Transforms/LoweringPrepare.cpp | 27 ++++++++++---- clang/test/CIR/CodeGen/ctor-global.cpp | 24 +++++++++++++ clang/test/CIR/IR/global.cir | 11 ++++++ 7 files changed, 131 insertions(+), 10 deletions(-) create mode 100644 clang/test/CIR/CodeGen/ctor-global.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 425ba9cddb8c..763d465c0611 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2410,6 +2410,10 @@ def FuncOp : CIR_Op<"func", [ above. Though mandatory, the prining of the attribute can be omitted if it is empty. + The `global_ctor` indicates whether a function should execute before `main()` + function, as specified by `__attribute__((constructor))`. A execution priority + can also be specified `global_ctor()`. + Example: ```mlir @@ -2449,6 +2453,7 @@ def FuncOp : CIR_Op<"func", [ OptionalAttr:$arg_attrs, OptionalAttr:$res_attrs, OptionalAttr:$aliasee, + OptionalAttr:$global_ctor, OptionalAttr:$ast); let regions = (region AnyRegion:$body); let skipDefaultBuilders = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 018f923abb10..1606c87c1dc4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -493,11 +493,42 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, // TODO: setNonAliasAttributes // TODO: SetLLVMFunctionAttributesForDeclaration - assert(!D->getAttr() && "NYI"); - assert(!D->getAttr() && "NYI"); + if (const ConstructorAttr *CA = D->getAttr()) + AddGlobalCtor(Fn, CA->getPriority()); + if (const DestructorAttr *DA = D->getAttr()) + AddGlobalDtor(Fn, DA->getPriority(), true); + assert(!D->getAttr() && "NYI"); } +/// Track functions to be called before main() runs. +void CIRGenModule::AddGlobalCtor(mlir::cir::FuncOp Ctor, int Priority) { + // FIXME(cir): handle LexOrder and Associated data upon testcases. + // + // Traditional LLVM codegen directly adds the function to the list of global + // ctors. In CIR we just add a global_ctor attribute to the function. The + // global list is created in LoweringPrepare. + // + // FIXME(from traditional LLVM): Type coercion of void()* types. + Ctor->setAttr(Ctor.getGlobalCtorAttrName(), + mlir::cir::GlobalCtorAttr::get(builder.getContext(), + Ctor.getName(), Priority)); +} + +/// Add a function to the list that will be called when the module is unloaded. +void CIRGenModule::AddGlobalDtor(mlir::cir::FuncOp Dtor, int Priority, + bool IsDtorAttrFunc) { + assert(IsDtorAttrFunc && "NYI"); + if (codeGenOpts.RegisterGlobalDtorsWithAtExit && + (!getASTContext().getTargetInfo().getTriple().isOSAIX() || + IsDtorAttrFunc)) { + llvm_unreachable("NYI"); + } + + // FIXME(from traditional LLVM): Type coercion of void()* types. + llvm_unreachable("NYI"); +} + mlir::Operation *CIRGenModule::getGlobalValue(StringRef Name) { auto global = mlir::SymbolTable::lookupSymbolIn(theModule, Name); if (!global) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index cba0b9fa1d10..affc295b3e9c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -227,6 +227,11 @@ class CIRGenModule : public CIRGenTypeCache { mlir::Type t, bool isCst = false, mlir::Operation *insertPoint = nullptr); + // FIXME: Hardcoding priority here is gross. + void AddGlobalCtor(mlir::cir::FuncOp Ctor, int Priority = 65535); + void AddGlobalDtor(mlir::cir::FuncOp Dtor, int Priority = 65535, + bool IsDtorAttrFunc = false); + /// Return the mlir::Value for the address of the given global variable. /// If Ty is non-null and if the global doesn't exist, then it will be created /// with the specified type instead of whatever the normal requested type diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index eadf4d38334a..db6734d83cbe 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1773,7 +1773,6 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { StringAttr nameAttr; SmallVector arguments; - SmallVector argAttrs; SmallVector resultAttrs; SmallVector argTypes; SmallVector resultTypes; @@ -1833,6 +1832,28 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { hasAlias = true; } + if (::mlir::succeeded(parser.parseOptionalKeyword("global_ctor"))) { + std::optional prio; + if (mlir::succeeded(parser.parseOptionalLParen())) { + auto parsedPrio = mlir::FieldParser>::parse(parser); + if (mlir::failed(parsedPrio)) { + return parser.emitError( + parser.getCurrentLocation(), + "failed to parse GlobalCtorAttr parameter " + "'priority' which is to be a `std::optional`"); + return failure(); + } + prio = parsedPrio.value_or(std::optional()); + + // Parse literal ')' + if (parser.parseRParen()) + return failure(); + } + auto globalCtorAttr = + mlir::cir::GlobalCtorAttr::get(builder.getContext(), nameAttr, prio); + state.addAttribute(getGlobalCtorAttrName(state.name), globalCtorAttr); + } + Attribute extraAttrs; if (::mlir::succeeded(parser.parseOptionalKeyword("extra"))) { if (parser.parseLParen().failed()) @@ -1925,9 +1946,10 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p, *this, fnType.getInputs(), fnType.isVarArg(), {}); function_interface_impl::printFunctionAttributes( p, *this, + // These are all omitted since they are custom printed already. {getSymVisibilityAttrName(), getAliaseeAttrName(), getFunctionTypeAttrName(), getLinkageAttrName(), getBuiltinAttrName(), - getNoProtoAttrName(), getExtraAttrsAttrName()}); + getNoProtoAttrName(), getGlobalCtorAttrName(), getExtraAttrsAttrName()}); if (auto aliaseeName = getAliasee()) { p << " alias("; @@ -1935,6 +1957,13 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p << ")"; } + if (auto globalCtor = getGlobalCtorAttr()) { + p << " global_ctor"; + auto prio = globalCtor.getPriority(); + if (prio) + p << "(" << *prio << ")"; + } + if (!getExtraAttrs().getElements().empty()) { p << " extra("; p.printAttributeWithoutType(getExtraAttrs()); @@ -2007,6 +2036,7 @@ LogicalResult cir::FuncOp::verify() { return emitOpError() << "a function alias '" << *fn << "' must have empty body"; } + return success(); } diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 0643d2d525ab..e3e7c94cd3bd 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -80,6 +80,9 @@ struct LoweringPreparePass : public LoweringPrepareBase { /// Build a module init function that calls all the dynamic initializers. void buildCXXGlobalInitFunc(); + /// Materialize global ctor/dtor list + void buildGlobalCtorDtorList(); + FuncOp buildRuntimeFunction(mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, mlir::cir::FuncType type, @@ -105,6 +108,9 @@ struct LoweringPreparePass : public LoweringPrepareBase { /// Tracks existing dynamic initializers. llvm::StringMap dynamicInitializerNames; llvm::SmallVector dynamicInitializers; + + /// List of ctors to be called before main() + SmallVector globalCtorList; }; } // namespace @@ -315,20 +321,24 @@ void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { } } +void LoweringPreparePass::buildGlobalCtorDtorList() { + // TODO: dtors + if (globalCtorList.empty()) + return; + theModule->setAttr("cir.globalCtors", + mlir::ArrayAttr::get(&getContext(), globalCtorList)); +} + void LoweringPreparePass::buildCXXGlobalInitFunc() { if (dynamicInitializers.empty()) return; - SmallVector attrs; for (auto &f : dynamicInitializers) { // TODO: handle globals with a user-specified initialzation priority. auto ctorAttr = mlir::cir::GlobalCtorAttr::get(&getContext(), f.getName()); - attrs.push_back(ctorAttr); + globalCtorList.push_back(ctorAttr); } - theModule->setAttr("cir.globalCtors", - mlir::ArrayAttr::get(&getContext(), attrs)); - SmallString<256> fnName; // Include the filename in the symbol name. Including "sub_" matches gcc // and makes sure these symbols appear lexicographically behind the symbols @@ -502,6 +512,10 @@ void LoweringPreparePass::runOnOp(Operation *op) { lowerArrayCtor(arrayCtor); } else if (auto arrayDtor = dyn_cast(op)) { lowerArrayDtor(arrayDtor); + } else if (auto fnOp = dyn_cast(op)) { + if (auto globalCtor = fnOp.getGlobalCtorAttr()) { + globalCtorList.push_back(globalCtor); + } } } @@ -515,7 +529,7 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { if (isa(op)) + ArrayCtor, ArrayDtor, mlir::cir::FuncOp>(op)) opsToTransform.push_back(op); }); @@ -523,6 +537,7 @@ void LoweringPreparePass::runOnOperation() { runOnOp(o); buildCXXGlobalInitFunc(); + buildGlobalCtorDtorList(); } std::unique_ptr mlir::createLoweringPreparePass() { diff --git a/clang/test/CIR/CodeGen/ctor-global.cpp b/clang/test/CIR/CodeGen/ctor-global.cpp new file mode 100644 index 000000000000..e02b408c59e7 --- /dev/null +++ b/clang/test/CIR/CodeGen/ctor-global.cpp @@ -0,0 +1,24 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=BEFORE --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t2.cir 2>&1 +// RUN: FileCheck --check-prefix=AFTER --input-file=%t2.cir %s +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +extern int bar(); +void foo(void) __attribute__((constructor)); +void foo(void) { + bar(); +} + +// BEFORE: cir.func @_Z3foov() global_ctor(65535) + +void foo2(void) __attribute__((constructor(777))); +void foo2(void) { + bar(); +} + +// BEFORE: cir.func @_Z4foo2v() global_ctor(777) + +// AFTER: module @{{.*}} attributes {cir.globalCtors = [#cir.globalCtor<"_Z3foov", 65535>, #cir.globalCtor<"_Z4foo2v", 777>], +// LLVM: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_Z3foov, ptr null }, { i32, ptr, ptr } { i32 777, ptr @_Z4foo2v, ptr null }] diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index ff81597d8171..3a2f419b2d33 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -42,6 +42,14 @@ module { %0 = cir.get_global @_ZL8__ioinit : cir.ptr cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () } + + cir.func @f31() global_ctor { + cir.return + } + + cir.func @f32() global_ctor(777) { + cir.return + } } // CHECK: cir.global external @a = #cir.int<3> : !s32i @@ -61,3 +69,6 @@ module { // CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s8i) : !s8i // CHECK-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () // CHECK-NEXT: } + +// CHECK: cir.func @f31() global_ctor +// CHECK: cir.func @f32() global_ctor(777) \ No newline at end of file From acf8c632ced915d8de5dbc470d76cab0eeff0f39 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 11 Apr 2024 14:13:26 -0700 Subject: [PATCH 1476/2301] [CIR][CIRGen][NFC] Rename globalCtor to global_ctor --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 2 +- clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp | 2 +- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +- clang/test/CIR/CodeGen/ctor-global.cpp | 2 +- clang/test/CIR/CodeGen/static.cpp | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 60794e443829..905733b50aee 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -646,7 +646,7 @@ def NoThrowAttr : CIRUnitAttr<"NoThrow", "nothrow"> { let storageType = [{ NoThrowAttr }]; } -def GlobalCtorAttr : CIR_Attr<"GlobalCtor", "globalCtor"> { +def GlobalCtorAttr : CIR_Attr<"GlobalCtor", "global_ctor"> { let summary = "Indicates a function is a global constructor."; let description = [{ Describing a global constructor with an optional priority. diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index e3e7c94cd3bd..273b001efe12 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -325,7 +325,7 @@ void LoweringPreparePass::buildGlobalCtorDtorList() { // TODO: dtors if (globalCtorList.empty()) return; - theModule->setAttr("cir.globalCtors", + theModule->setAttr("cir.global_ctors", mlir::ArrayAttr::get(&getContext(), globalCtorList)); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 0ffabaf6f922..8be295c37c51 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3180,7 +3180,7 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, static void buildCtorList(mlir::ModuleOp module) { llvm::SmallVector, 2> globalCtors; for (auto namedAttr : module->getAttrs()) { - if (namedAttr.getName() == "cir.globalCtors") { + if (namedAttr.getName() == "cir.global_ctors") { for (auto attr : namedAttr.getValue().cast()) { assert(attr.isa() && "must be a GlobalCtorAttr"); diff --git a/clang/test/CIR/CodeGen/ctor-global.cpp b/clang/test/CIR/CodeGen/ctor-global.cpp index e02b408c59e7..824708be3abe 100644 --- a/clang/test/CIR/CodeGen/ctor-global.cpp +++ b/clang/test/CIR/CodeGen/ctor-global.cpp @@ -20,5 +20,5 @@ void foo2(void) { // BEFORE: cir.func @_Z4foo2v() global_ctor(777) -// AFTER: module @{{.*}} attributes {cir.globalCtors = [#cir.globalCtor<"_Z3foov", 65535>, #cir.globalCtor<"_Z4foo2v", 777>], +// AFTER: module @{{.*}} attributes {cir.global_ctors = [#cir.global_ctor<"_Z3foov", 65535>, #cir.global_ctor<"_Z4foo2v", 777>], // LLVM: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_Z3foov, ptr null }, { i32, ptr, ptr } { i32 777, ptr @_Z4foo2v, ptr null }] diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index 108b13c6009c..cc0a86363e57 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -38,7 +38,7 @@ static Init __ioinit2(false); // BEFORE-NEXT: } -// AFTER: module {{.*}} attributes {{.*}}cir.globalCtors = [#cir.globalCtor<"__cxx_global_var_init">, #cir.globalCtor<"__cxx_global_var_init.1">] +// AFTER: module {{.*}} attributes {{.*}}cir.global_ctors = [#cir.global_ctor<"__cxx_global_var_init">, #cir.global_ctor<"__cxx_global_var_init.1">] // AFTER-NEXT: cir.global "private" external @__dso_handle : i8 // AFTER-NEXT: cir.func private @__cxa_atexit(!cir.ptr)>>, !cir.ptr, !cir.ptr) // AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) From 6e9de02ac1421c02895023ac4e14b891638613b5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 11 Apr 2024 14:27:08 -0700 Subject: [PATCH 1477/2301] [CIR][CIRGen][NFC] More prep work before global dtors --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 16 +++++++++++----- .../{ctor-global.cpp => global-ctor-dtor.cpp} | 0 2 files changed, 11 insertions(+), 5 deletions(-) rename clang/test/CIR/CodeGen/{ctor-global.cpp => global-ctor-dtor.cpp} (100%) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 905733b50aee..f9b7ba3b92e1 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -646,11 +646,12 @@ def NoThrowAttr : CIRUnitAttr<"NoThrow", "nothrow"> { let storageType = [{ NoThrowAttr }]; } -def GlobalCtorAttr : CIR_Attr<"GlobalCtor", "global_ctor"> { - let summary = "Indicates a function is a global constructor."; - let description = [{ - Describing a global constructor with an optional priority. - }]; +class CIR_GlobalCtorDtor + : CIR_Attr<"Global" # name, "global_" # attrMnemonic> { + let summary = sum; + let description = desc; + let parameters = (ins "StringAttr":$name, OptionalParameter<"std::optional">:$priority); let assemblyFormat = [{ @@ -668,6 +669,11 @@ def GlobalCtorAttr : CIR_Attr<"GlobalCtor", "global_ctor"> { let skipDefaultBuilders = 1; } +def GlobalCtorAttr : CIR_GlobalCtorDtor<"Ctor", "ctor", + "Marks a function as a global constructor", + "Describes a global constructor with optional priority" +>; + def BitfieldInfoAttr : CIR_Attr<"BitfieldInfo", "bitfield_info"> { let summary = "Represents a bit field info"; let description = [{ diff --git a/clang/test/CIR/CodeGen/ctor-global.cpp b/clang/test/CIR/CodeGen/global-ctor-dtor.cpp similarity index 100% rename from clang/test/CIR/CodeGen/ctor-global.cpp rename to clang/test/CIR/CodeGen/global-ctor-dtor.cpp From 8cba60e67b817ad0eab3610fe3d725d8ff245779 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 11 Apr 2024 14:15:02 -0700 Subject: [PATCH 1478/2301] [CIR][NFC] Fixup a few changes requested from upstream PRs --- clang/include/clang/CIR/Dialect/IR/CIRDialect.h | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index 159a1434cdb7..e78471035cf1 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -95,4 +95,3 @@ void buildTerminatedBody(OpBuilder &builder, Location loc); #include "clang/CIR/Dialect/IR/CIROps.h.inc" #endif // LLVM_CLANG_CIR_DIALECT_IR_CIRDIALECT_H - From eae28c74e83c4e0fa2336db9415dfe0777668f62 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 11 Apr 2024 14:46:21 -0700 Subject: [PATCH 1479/2301] [CIR][NFC] Remove `SYSTEM` from MLIR include_directories calls --- clang/include/clang/CIR/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/CMakeLists.txt b/clang/include/clang/CIR/CMakeLists.txt index 2028af5232c2..25497fc222d1 100644 --- a/clang/include/clang/CIR/CMakeLists.txt +++ b/clang/include/clang/CIR/CMakeLists.txt @@ -1,8 +1,8 @@ set(MLIR_MAIN_SRC_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --src-root set(MLIR_INCLUDE_DIR ${LLVM_MAIN_SRC_DIR}/../mlir/include ) # --includedir set(MLIR_TABLEGEN_OUTPUT_DIR ${CMAKE_BINARY_DIR}/tools/mlir/include) -include_directories(SYSTEM ${MLIR_INCLUDE_DIR}) -include_directories(SYSTEM ${MLIR_TABLEGEN_OUTPUT_DIR}) +include_directories(${MLIR_INCLUDE_DIR}) +include_directories(${MLIR_TABLEGEN_OUTPUT_DIR}) add_subdirectory(Dialect) add_subdirectory(Interfaces) From 7dcefcafdd0c3545dea124b867e41226ec829fe0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 11 Apr 2024 14:54:28 -0700 Subject: [PATCH 1480/2301] [CIR][CIRGen] Add support for __attribute__((destructor)) --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 6 +- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 64 +++++++++++++------ .../Dialect/Transforms/LoweringPrepare.cpp | 17 +++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 55 ++++++++++------ clang/test/CIR/CodeGen/global-ctor-dtor.cpp | 17 ++++- clang/test/CIR/IR/global.cir | 12 +++- 8 files changed, 129 insertions(+), 50 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index f9b7ba3b92e1..9eda63a20938 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -671,7 +671,11 @@ class CIR_GlobalCtorDtor; +def GlobalDtorAttr : CIR_GlobalCtorDtor<"Dtor", "dtor", + "Marks a function as a global destructor", + "A function with this attribute excutes before module unloading" >; def BitfieldInfoAttr : CIR_Attr<"BitfieldInfo", "bitfield_info"> { diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 763d465c0611..7da421324fb3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2412,7 +2412,8 @@ def FuncOp : CIR_Op<"func", [ The `global_ctor` indicates whether a function should execute before `main()` function, as specified by `__attribute__((constructor))`. A execution priority - can also be specified `global_ctor()`. + can also be specified `global_ctor()`. Similarly, for global destructors + both `global_dtor` and `global_dtor()` are available. Example: @@ -2454,6 +2455,7 @@ def FuncOp : CIR_Op<"func", [ OptionalAttr:$res_attrs, OptionalAttr:$aliasee, OptionalAttr:$global_ctor, + OptionalAttr:$global_dtor, OptionalAttr:$ast); let regions = (region AnyRegion:$body); let skipDefaultBuilders = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 1606c87c1dc4..0380ffffec6d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -526,7 +526,9 @@ void CIRGenModule::AddGlobalDtor(mlir::cir::FuncOp Dtor, int Priority, } // FIXME(from traditional LLVM): Type coercion of void()* types. - llvm_unreachable("NYI"); + Dtor->setAttr(Dtor.getGlobalDtorAttrName(), + mlir::cir::GlobalDtorAttr::get(builder.getContext(), + Dtor.getName(), Priority)); } mlir::Operation *CIRGenModule::getGlobalValue(StringRef Name) { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index db6734d83cbe..cb8d8d98aaa2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1832,27 +1832,43 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { hasAlias = true; } - if (::mlir::succeeded(parser.parseOptionalKeyword("global_ctor"))) { - std::optional prio; - if (mlir::succeeded(parser.parseOptionalLParen())) { - auto parsedPrio = mlir::FieldParser>::parse(parser); - if (mlir::failed(parsedPrio)) { - return parser.emitError( - parser.getCurrentLocation(), - "failed to parse GlobalCtorAttr parameter " - "'priority' which is to be a `std::optional`"); - return failure(); + auto parseGlobalDtorCtor = + [&](StringRef keyword, + llvm::function_ref prio)> createAttr) + -> mlir::LogicalResult { + if (::mlir::succeeded(parser.parseOptionalKeyword(keyword))) { + std::optional prio; + if (mlir::succeeded(parser.parseOptionalLParen())) { + auto parsedPrio = mlir::FieldParser>::parse(parser); + if (mlir::failed(parsedPrio)) { + return parser.emitError( + parser.getCurrentLocation(), + "failed to parse 'priority', of type 'std::optional'"); + return failure(); + } + prio = parsedPrio.value_or(std::optional()); + // Parse literal ')' + if (parser.parseRParen()) + return failure(); } - prio = parsedPrio.value_or(std::optional()); - - // Parse literal ')' - if (parser.parseRParen()) - return failure(); + createAttr(prio); } - auto globalCtorAttr = - mlir::cir::GlobalCtorAttr::get(builder.getContext(), nameAttr, prio); - state.addAttribute(getGlobalCtorAttrName(state.name), globalCtorAttr); - } + return success(); + }; + + if (parseGlobalDtorCtor("global_ctor", [&](std::optional prio) { + auto globalCtorAttr = mlir::cir::GlobalCtorAttr::get( + builder.getContext(), nameAttr, prio); + state.addAttribute(getGlobalCtorAttrName(state.name), globalCtorAttr); + }).failed()) + return failure(); + + if (parseGlobalDtorCtor("global_dtor", [&](std::optional prio) { + auto globalDtorAttr = mlir::cir::GlobalDtorAttr::get( + builder.getContext(), nameAttr, prio); + state.addAttribute(getGlobalDtorAttrName(state.name), globalDtorAttr); + }).failed()) + return failure(); Attribute extraAttrs; if (::mlir::succeeded(parser.parseOptionalKeyword("extra"))) { @@ -1949,7 +1965,8 @@ void cir::FuncOp::print(OpAsmPrinter &p) { // These are all omitted since they are custom printed already. {getSymVisibilityAttrName(), getAliaseeAttrName(), getFunctionTypeAttrName(), getLinkageAttrName(), getBuiltinAttrName(), - getNoProtoAttrName(), getGlobalCtorAttrName(), getExtraAttrsAttrName()}); + getNoProtoAttrName(), getGlobalCtorAttrName(), getGlobalDtorAttrName(), + getExtraAttrsAttrName()}); if (auto aliaseeName = getAliasee()) { p << " alias("; @@ -1964,6 +1981,13 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p << "(" << *prio << ")"; } + if (auto globalDtor = getGlobalDtorAttr()) { + p << " global_dtor"; + auto prio = globalDtor.getPriority(); + if (prio) + p << "(" << *prio << ")"; + } + if (!getExtraAttrs().getElements().empty()) { p << " extra("; p.printAttributeWithoutType(getExtraAttrs()); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 273b001efe12..e59e5eb8a7fe 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -111,6 +111,8 @@ struct LoweringPreparePass : public LoweringPrepareBase { /// List of ctors to be called before main() SmallVector globalCtorList; + /// List of dtors to be called when unloading module. + SmallVector globalDtorList; }; } // namespace @@ -322,11 +324,14 @@ void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { } void LoweringPreparePass::buildGlobalCtorDtorList() { - // TODO: dtors - if (globalCtorList.empty()) - return; - theModule->setAttr("cir.global_ctors", - mlir::ArrayAttr::get(&getContext(), globalCtorList)); + if (!globalCtorList.empty()) { + theModule->setAttr("cir.global_ctors", + mlir::ArrayAttr::get(&getContext(), globalCtorList)); + } + if (!globalDtorList.empty()) { + theModule->setAttr("cir.global_dtors", + mlir::ArrayAttr::get(&getContext(), globalDtorList)); + } } void LoweringPreparePass::buildCXXGlobalInitFunc() { @@ -515,6 +520,8 @@ void LoweringPreparePass::runOnOp(Operation *op) { } else if (auto fnOp = dyn_cast(op)) { if (auto globalCtor = fnOp.getGlobalCtorAttr()) { globalCtorList.push_back(globalCtor); + } else if (auto globalDtor = fnOp.getGlobalDtorAttr()) { + globalDtorList.push_back(globalDtor); } } } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8be295c37c51..c4ed6eafa3e5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3177,26 +3177,19 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, } } // namespace -static void buildCtorList(mlir::ModuleOp module) { - llvm::SmallVector, 2> globalCtors; +static void buildCtorDtorList( + mlir::ModuleOp module, StringRef globalXtorName, StringRef llvmXtorName, + llvm::function_ref(mlir::Attribute)> createXtor) { + llvm::SmallVector, 2> globalXtors; for (auto namedAttr : module->getAttrs()) { - if (namedAttr.getName() == "cir.global_ctors") { - for (auto attr : namedAttr.getValue().cast()) { - assert(attr.isa() && - "must be a GlobalCtorAttr"); - if (auto ctorAttr = attr.cast()) { - // default priority is 65536 - int priority = 65536; - if (ctorAttr.getPriority()) - priority = *ctorAttr.getPriority(); - globalCtors.emplace_back(ctorAttr.getName(), priority); - } - } + if (namedAttr.getName() == globalXtorName) { + for (auto attr : namedAttr.getValue().cast()) + globalXtors.emplace_back(createXtor(attr)); break; } } - if (globalCtors.empty()) + if (globalXtors.empty()) return; mlir::OpBuilder builder(module.getContext()); @@ -3213,12 +3206,12 @@ static void buildCtorList(mlir::ModuleOp module) { auto CtorStructTy = mlir::LLVM::LLVMStructType::getLiteral( builder.getContext(), CtorStructFields); auto CtorStructArrayTy = - mlir::LLVM::LLVMArrayType::get(CtorStructTy, globalCtors.size()); + mlir::LLVM::LLVMArrayType::get(CtorStructTy, globalXtors.size()); auto loc = module.getLoc(); auto newGlobalOp = builder.create( loc, CtorStructArrayTy, true, mlir::LLVM::Linkage::Appending, - "llvm.global_ctors", mlir::Attribute()); + llvmXtorName, mlir::Attribute()); newGlobalOp.getRegion().push_back(new mlir::Block()); builder.setInsertionPointToEnd(newGlobalOp.getInitializerBlock()); @@ -3226,8 +3219,8 @@ static void buildCtorList(mlir::ModuleOp module) { mlir::Value result = builder.create(loc, CtorStructArrayTy); - for (uint64_t I = 0; I < globalCtors.size(); I++) { - auto fn = globalCtors[I]; + for (uint64_t I = 0; I < globalXtors.size(); I++) { + auto fn = globalXtors[I]; mlir::Value structInit = builder.create(loc, CtorStructTy); mlir::Value initPriority = builder.create( @@ -3293,7 +3286,29 @@ void ConvertCIRToLLVMPass::runOnOperation() { signalPassFailure(); // Emit the llvm.global_ctors array. - buildCtorList(module); + buildCtorDtorList(module, "cir.global_ctors", "llvm.global_ctors", + [](mlir::Attribute attr) { + assert(attr.isa() && + "must be a GlobalCtorAttr"); + auto ctorAttr = attr.cast(); + // default priority is 65536 + int priority = 65536; + if (ctorAttr.getPriority()) + priority = *ctorAttr.getPriority(); + return std::make_pair(ctorAttr.getName(), priority); + }); + // Emit the llvm.global_dtors array. + buildCtorDtorList(module, "cir.global_dtors", "llvm.global_dtors", + [](mlir::Attribute attr) { + assert(attr.isa() && + "must be a GlobalDtorAttr"); + auto dtorAttr = attr.cast(); + // default priority is 65536 + int priority = 65536; + if (dtorAttr.getPriority()) + priority = *dtorAttr.getPriority(); + return std::make_pair(dtorAttr.getName(), priority); + }); } std::unique_ptr createConvertCIRToLLVMPass() { diff --git a/clang/test/CIR/CodeGen/global-ctor-dtor.cpp b/clang/test/CIR/CodeGen/global-ctor-dtor.cpp index 824708be3abe..178da976324f 100644 --- a/clang/test/CIR/CodeGen/global-ctor-dtor.cpp +++ b/clang/test/CIR/CodeGen/global-ctor-dtor.cpp @@ -20,5 +20,20 @@ void foo2(void) { // BEFORE: cir.func @_Z4foo2v() global_ctor(777) -// AFTER: module @{{.*}} attributes {cir.global_ctors = [#cir.global_ctor<"_Z3foov", 65535>, #cir.global_ctor<"_Z4foo2v", 777>], +void foo3(void) __attribute__((destructor)); +void foo3(void) { + bar(); +} + +// BEFORE: cir.func @_Z4foo3v() global_dtor(65535) + +void foo4(void) __attribute__((destructor(789))); +void foo4(void) { + bar(); +} + +// BEFORE: cir.func @_Z4foo4v() global_dtor(789) + +// AFTER: module @{{.*}} attributes {cir.global_ctors = [#cir.global_ctor<"_Z3foov", 65535>, #cir.global_ctor<"_Z4foo2v", 777>], cir.global_dtors = [#cir.global_dtor<"_Z4foo3v", 65535>, #cir.global_dtor<"_Z4foo4v", 789>] // LLVM: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_Z3foov, ptr null }, { i32, ptr, ptr } { i32 777, ptr @_Z4foo2v, ptr null }] +// LLVM-NEXT: @llvm.global_dtors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_Z4foo3v, ptr null }, { i32, ptr, ptr } { i32 789, ptr @_Z4foo4v, ptr null }] diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 3a2f419b2d33..47b09a946973 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -50,6 +50,14 @@ module { cir.func @f32() global_ctor(777) { cir.return } + + cir.func @f33() global_dtor { + cir.return + } + + cir.func @f34() global_dtor(777) { + cir.return + } } // CHECK: cir.global external @a = #cir.int<3> : !s32i @@ -71,4 +79,6 @@ module { // CHECK-NEXT: } // CHECK: cir.func @f31() global_ctor -// CHECK: cir.func @f32() global_ctor(777) \ No newline at end of file +// CHECK: cir.func @f32() global_ctor(777) +// CHECK: cir.func @f33() global_dtor +// CHECK: cir.func @f34() global_dtor(777) \ No newline at end of file From 28d68292910ca6d0aa319f332a97a3d9c43d76b2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 11 Apr 2024 15:50:19 -0700 Subject: [PATCH 1481/2301] [CIR][CIRGen][NFC] Simplify global ctor/dtor priority handling --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 11 ++++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 33 ++++++++++--------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 14 +++----- clang/test/CIR/CodeGen/static.cpp | 2 +- 4 files changed, 29 insertions(+), 31 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 9eda63a20938..b03c5cc82c20 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -652,20 +652,21 @@ class CIR_GlobalCtorDtor">:$priority); + let parameters = (ins "StringAttr":$name, "int":$priority); let assemblyFormat = [{ `<` - $name - (`,` $priority^)? + $name `,` $priority `>` }]; let builders = [ AttrBuilder<(ins "StringRef":$name, - CArg<"std::optional", "{}">:$priority), [{ + CArg<"int", "65536">:$priority), [{ return $_get($_ctxt, StringAttr::get($_ctxt, name), priority); }]> ]; + let extraClassDeclaration = [{ + bool isDefaultPriority() const { return getPriority() == 65536; }; + }]; let skipDefaultBuilders = 1; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index cb8d8d98aaa2..9a08d82b60bd 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1839,14 +1839,13 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { if (::mlir::succeeded(parser.parseOptionalKeyword(keyword))) { std::optional prio; if (mlir::succeeded(parser.parseOptionalLParen())) { - auto parsedPrio = mlir::FieldParser>::parse(parser); + auto parsedPrio = mlir::FieldParser::parse(parser); if (mlir::failed(parsedPrio)) { - return parser.emitError( - parser.getCurrentLocation(), - "failed to parse 'priority', of type 'std::optional'"); + return parser.emitError(parser.getCurrentLocation(), + "failed to parse 'priority', of type 'int'"); return failure(); } - prio = parsedPrio.value_or(std::optional()); + prio = parsedPrio.value_or(int()); // Parse literal ')' if (parser.parseRParen()) return failure(); @@ -1857,15 +1856,21 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { }; if (parseGlobalDtorCtor("global_ctor", [&](std::optional prio) { - auto globalCtorAttr = mlir::cir::GlobalCtorAttr::get( - builder.getContext(), nameAttr, prio); + mlir::cir::GlobalCtorAttr globalCtorAttr = + prio ? mlir::cir::GlobalCtorAttr::get(builder.getContext(), + nameAttr, *prio) + : mlir::cir::GlobalCtorAttr::get(builder.getContext(), + nameAttr); state.addAttribute(getGlobalCtorAttrName(state.name), globalCtorAttr); }).failed()) return failure(); if (parseGlobalDtorCtor("global_dtor", [&](std::optional prio) { - auto globalDtorAttr = mlir::cir::GlobalDtorAttr::get( - builder.getContext(), nameAttr, prio); + mlir::cir::GlobalDtorAttr globalDtorAttr = + prio ? mlir::cir::GlobalDtorAttr::get(builder.getContext(), + nameAttr, *prio) + : mlir::cir::GlobalDtorAttr::get(builder.getContext(), + nameAttr); state.addAttribute(getGlobalDtorAttrName(state.name), globalDtorAttr); }).failed()) return failure(); @@ -1976,16 +1981,14 @@ void cir::FuncOp::print(OpAsmPrinter &p) { if (auto globalCtor = getGlobalCtorAttr()) { p << " global_ctor"; - auto prio = globalCtor.getPriority(); - if (prio) - p << "(" << *prio << ")"; + if (!globalCtor.isDefaultPriority()) + p << "(" << globalCtor.getPriority() << ")"; } if (auto globalDtor = getGlobalDtorAttr()) { p << " global_dtor"; - auto prio = globalDtor.getPriority(); - if (prio) - p << "(" << *prio << ")"; + if (!globalDtor.isDefaultPriority()) + p << "(" << globalDtor.getPriority() << ")"; } if (!getExtraAttrs().getElements().empty()) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index c4ed6eafa3e5..53826ee1d8dc 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3291,11 +3291,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { assert(attr.isa() && "must be a GlobalCtorAttr"); auto ctorAttr = attr.cast(); - // default priority is 65536 - int priority = 65536; - if (ctorAttr.getPriority()) - priority = *ctorAttr.getPriority(); - return std::make_pair(ctorAttr.getName(), priority); + return std::make_pair(ctorAttr.getName(), + ctorAttr.getPriority()); }); // Emit the llvm.global_dtors array. buildCtorDtorList(module, "cir.global_dtors", "llvm.global_dtors", @@ -3303,11 +3300,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { assert(attr.isa() && "must be a GlobalDtorAttr"); auto dtorAttr = attr.cast(); - // default priority is 65536 - int priority = 65536; - if (dtorAttr.getPriority()) - priority = *dtorAttr.getPriority(); - return std::make_pair(dtorAttr.getName(), priority); + return std::make_pair(dtorAttr.getName(), + dtorAttr.getPriority()); }); } diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index cc0a86363e57..998bd5c6457d 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -38,7 +38,7 @@ static Init __ioinit2(false); // BEFORE-NEXT: } -// AFTER: module {{.*}} attributes {{.*}}cir.global_ctors = [#cir.global_ctor<"__cxx_global_var_init">, #cir.global_ctor<"__cxx_global_var_init.1">] +// AFTER: module {{.*}} attributes {{.*}}cir.global_ctors = [#cir.global_ctor<"__cxx_global_var_init", 65536>, #cir.global_ctor<"__cxx_global_var_init.1", 65536>] // AFTER-NEXT: cir.global "private" external @__dso_handle : i8 // AFTER-NEXT: cir.func private @__cxa_atexit(!cir.ptr)>>, !cir.ptr, !cir.ptr) // AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) From bebc566c33f6969002504552fa412ed154492477 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 11 Apr 2024 17:07:45 -0700 Subject: [PATCH 1482/2301] [CIR][CIRGen][NFC] Add more skeleton to VisitOpaqueValueExpr --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 22c8ffdf96ff..aabc654c1d44 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -222,6 +222,9 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *E) { + if (E->isGLValue()) + llvm_unreachable("NYI"); + llvm_unreachable("NYI"); } From dd14ce41ee8680762e637565731c2bdc62f9c60f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 Apr 2024 10:55:33 -0700 Subject: [PATCH 1483/2301] [CIR][CIRGen] Scalar handling for part of VisitOpaqueValueExpr --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 3 ++- clang/test/CIR/CodeGen/opaque.c | 12 ++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/opaque.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index aabc654c1d44..cc95199fa963 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -225,7 +225,8 @@ class ScalarExprEmitter : public StmtVisitor { if (E->isGLValue()) llvm_unreachable("NYI"); - llvm_unreachable("NYI"); + // Otherwise, assume the mapping is the scalar directly. + return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal(); } /// Emits the address of the l-value, then loads and returns the result. diff --git a/clang/test/CIR/CodeGen/opaque.c b/clang/test/CIR/CodeGen/opaque.c new file mode 100644 index 000000000000..00c11d7c65d1 --- /dev/null +++ b/clang/test/CIR/CodeGen/opaque.c @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int foo(int x, short y) { + return x ?: y; +} + +// CHECK: cir.func @foo +// CHECK: %[[Load:.*]] = cir.load +// CHECK: %[[Bool:.*]] = cir.cast(int_to_bool, %[[Load]] : !s32i), !cir.bool loc(#loc8) +// CHECK: = cir.ternary(%[[Bool]], true { +// CHECK: cir.yield %[[Load]] \ No newline at end of file From 56662cd64b52fbea59412710de9a36d37ef8b5e1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 Apr 2024 11:06:46 -0700 Subject: [PATCH 1484/2301] [CIR][CIRGen] Add skeleton for AggExprEmitter::VisitCompoundLiteralExpr --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index c0f88bb6383d..de26c9fad279 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -197,9 +197,7 @@ class AggExprEmitter : public StmtVisitor { void VisitMemberExpr(MemberExpr *E) { buildAggLoadOfLValue(E); } void VisitUnaryDeref(UnaryOperator *E) { buildAggLoadOfLValue(E); } void VisitStringLiteral(StringLiteral *E) { llvm_unreachable("NYI"); } - void VisitCompoundLIteralExpr(CompoundLiteralExpr *E) { - llvm_unreachable("NYI"); - } + void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { buildAggLoadOfLValue(E); } @@ -814,6 +812,27 @@ void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { CGF.buildCXXConstructExpr(E, Slot); } +void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { + if (Dest.isPotentiallyAliased() && E->getType().isPODType(CGF.getContext())) { + llvm_unreachable("NYI"); + } + + AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); + + // Block-scope compound literals are destroyed at the end of the enclosing + // scope in C. + bool Destruct = + !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed(); + if (Destruct) + llvm_unreachable("NYI"); + + llvm_unreachable("NYI"); + + if (Destruct) + if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) + llvm_unreachable("NYI"); +} + void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { if (UnimplementedFeature::cleanups()) llvm_unreachable("NYI"); From fffcff132914929c00adcae33fc0b1a551f6649a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 Apr 2024 11:46:10 -0700 Subject: [PATCH 1485/2301] [CIR][CIRGen] Add more variations of compound literal support --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 2 +- clang/test/CIR/CodeGen/compound-literal.c | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index de26c9fad279..619ef026f410 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -826,7 +826,7 @@ void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { if (Destruct) llvm_unreachable("NYI"); - llvm_unreachable("NYI"); + CGF.buildAggExpr(E->getInitializer(), Slot); if (Destruct) if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c index ef692585c46d..32077445af1c 100644 --- a/clang/test/CIR/CodeGen/compound-literal.c +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -46,3 +46,23 @@ int foo() { // CIR: cir.store [[ONE]], [[RET_MEM]] : !s32i, cir.ptr // CIR: [[RET:%.*]] = cir.load [[RET_MEM]] : cir.ptr , !s32i // CIR: cir.return [[RET]] : !s32i + +struct G { short x, y, z; }; +struct G g(int x, int y, int z) { + return (struct G) { x, y, z }; +} + +// CIR: cir.func @g +// CIR: %[[RETVAL:.*]] = cir.alloca !ty_22G22, cir.ptr , ["__retval"] {alignment = 2 : i64} loc(#loc18) +// CIR: %[[X:.*]] = cir.get_member %[[RETVAL]][0] {name = "x"} +// CIR: cir.store {{.*}}, %[[X]] : !s16i +// CIR: %[[Y:.*]] = cir.get_member %[[RETVAL]][1] {name = "y"} +// CIR: cir.store {{.*}}, %[[Y]] : !s16i +// CIR: %[[Z:.*]] = cir.get_member %[[RETVAL]][2] {name = "z"} +// CIR: cir.store {{.*}}, %[[Z]] : !s16i +// CIR: %[[RES:.*]] = cir.load %[[RETVAL]] +// CIR: cir.return %[[RES]] + +// Nothing meaningful to test for LLVM codegen here. +// FIXME: ABI note, LLVM lowering differs from traditional LLVM codegen here, +// because the former does a memcopy + i48 load. \ No newline at end of file From 131c94f2b92a691b8369f4aec2c8f61c795e2c09 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 Apr 2024 12:12:45 -0700 Subject: [PATCH 1486/2301] [CIR][CIRGen] Add TLS codegen skeleton while building globals --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 16 +++++++++++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 4 ++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 0380ffffec6d..4b39b96c5fa0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -610,6 +610,18 @@ void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, Old.erase(); } +void CIRGenModule::setTLSMode(mlir::Operation *Op, const VarDecl &D) const { + assert(D.getTLSKind() && "setting TLS mode on non-TLS var!"); + llvm_unreachable("NYI"); + + // Override the TLS model if it is explicitly specified. + if (const TLSModelAttr *Attr = D.getAttr()) { + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); +} + /// If the specified mangled name is not in the module, /// create and return an mlir GlobalOp with the specified type (TODO(cir): /// address space). @@ -734,7 +746,9 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // setLinkageForGV(GV, D); if (D->getTLSKind()) { - assert(0 && "not implemented"); + if (D->getTLSKind() == VarDecl::TLS_Dynamic) + llvm_unreachable("NYI"); + setTLSMode(GV, *D); } setGVProperties(GV, D); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index affc295b3e9c..a430889abb0e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -467,6 +467,10 @@ class CIRGenModule : public CIRGenTypeCache { void setGVProperties(mlir::Operation *Op, const NamedDecl *D) const; void setGVPropertiesAux(mlir::Operation *Op, const NamedDecl *D) const; + /// Set the TLS mode for the given global Op for the thread-local + /// variable declaration D. + void setTLSMode(mlir::Operation *Op, const VarDecl &D) const; + /// Replace the present global `Old` with the given global `New`. Their symbol /// names must match; their types can be different. Usages of the old global /// will be automatically updated if their types mismatch. From 419c2721333c156e35ae93859cc9588e4aaa72fe Mon Sep 17 00:00:00 2001 From: gitoleg Date: Mon, 15 Apr 2024 23:16:39 +0300 Subject: [PATCH 1487/2301] [CIR][Codegen] IfOp flattening (#537) This PR perform flattening for `cir::IfOp` Basically, we just move the code from `LowerToLLVM.cpp` to `FlattenCFG.cpp`. There are several important things though I would like to highlight. 1) Consider the next code from the tests: ``` cir.func @foo(%arg0: !s32i) -> !s32i { %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool cir.if %4 { %5 = cir.const(#cir.int<1> : !s32i) : !s32i cir.return %5 : !s32i } else { %5 = cir.const(#cir.int<0> : !s32i) : !s32i cir.return %5 : !s32i } cir.return %arg0 : !s32i } ``` The last `cir.return` becomes unreachable after flattening and hence is not reachable in the lowering. So we got the next error: ``` error: 'cir.return' op expects parent op to be one of 'cir.func, cir.scope, cir.if, cir.switch, cir.do, cir.while, cir.for' cir.return %arg0 : !s32i ``` the parent after lowering is `llvm.func`. And this is only the beginning - the more operations will be flatten, the more similar fails will happen. Thus, I added lowering for the unreachable code as well in `LowerToLLVM.cpp`. But may be you have another solution in your mind. 2) Please, pay attention on the flattening pass - I'm not that familiar with `mlir` builders as you are, so may be I'm doing something wrong. The idea was to start flattening from the most nested operations. 3) As you requested in #516, `cir-to-llvm-internal` is renamed to `cir-flat-to-llvm`. The only thing remain undone is related to the following: > Since it would be wrong to run cir-flat-to-llvm without running cir-flatten-cfg, we should make cir-flat-to-llvm pass to require cir-flatten-cfg pass to be run before. And I'm not sure I know how to do it exactly - is there something similar to pass dependencies from LLVM IR? 4) The part of `IfOp` lowering related to elimination of the vain casts for condition branch moved directly to the lowering of `BrCondOp` with some refactoring and guarding. 5) Just note, that now `cir-opt` is able to dump the flat cir as well: `cir-opt -cir-flat-cfg` --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 67 ++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 180 +++++++++--------- clang/test/CIR/CodeGen/if.cir | 48 +++++ clang/test/CIR/mlirprint.c | 2 +- clang/tools/cir-opt/cir-opt.cpp | 4 + 5 files changed, 203 insertions(+), 98 deletions(-) create mode 100644 clang/test/CIR/CodeGen/if.cir diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 477a85df6c26..aa061c51680c 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -30,8 +30,70 @@ struct FlattenCFGPass : public FlattenCFGBase { void runOnOperation() override; }; +struct CIRIfFlattening : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::IfOp ifOp, + mlir::PatternRewriter &rewriter) const override { + mlir::OpBuilder::InsertionGuard guard(rewriter); + auto loc = ifOp.getLoc(); + auto emptyElse = ifOp.getElseRegion().empty(); + + auto *currentBlock = rewriter.getInsertionBlock(); + auto *remainingOpsBlock = + rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + mlir::Block *continueBlock; + if (ifOp->getResults().size() == 0) + continueBlock = remainingOpsBlock; + else + llvm_unreachable("NYI"); + + // Inline then region + auto *thenBeforeBody = &ifOp.getThenRegion().front(); + auto *thenAfterBody = &ifOp.getThenRegion().back(); + rewriter.inlineRegionBefore(ifOp.getThenRegion(), continueBlock); + + rewriter.setInsertionPointToEnd(thenAfterBody); + if (auto thenYieldOp = + dyn_cast(thenAfterBody->getTerminator())) { + rewriter.replaceOpWithNewOp( + thenYieldOp, thenYieldOp.getArgs(), continueBlock); + } + + rewriter.setInsertionPointToEnd(continueBlock); + + // Has else region: inline it. + mlir::Block *elseBeforeBody = nullptr; + mlir::Block *elseAfterBody = nullptr; + if (!emptyElse) { + elseBeforeBody = &ifOp.getElseRegion().front(); + elseAfterBody = &ifOp.getElseRegion().back(); + rewriter.inlineRegionBefore(ifOp.getElseRegion(), thenAfterBody); + } else { + elseBeforeBody = elseAfterBody = continueBlock; + } + + rewriter.setInsertionPointToEnd(currentBlock); + rewriter.create(loc, ifOp.getCondition(), + thenBeforeBody, elseBeforeBody); + + if (!emptyElse) { + rewriter.setInsertionPointToEnd(elseAfterBody); + if (auto elseYieldOp = + dyn_cast(elseAfterBody->getTerminator())) { + rewriter.replaceOpWithNewOp( + elseYieldOp, elseYieldOp.getArgs(), continueBlock); + } + } + + rewriter.replaceOp(ifOp, continueBlock->getArguments()); + return mlir::success(); + } +}; + void populateFlattenCFGPatterns(RewritePatternSet &patterns) { - // TODO: add patterns here + patterns.add(patterns.getContext()); } void FlattenCFGPass::runOnOperation() { @@ -41,7 +103,8 @@ void FlattenCFGPass::runOnOperation() { // Collect operations to apply patterns. SmallVector ops; getOperation()->walk([&](Operation *op) { - // TODO: push back operations here + if (isa(op)) + ops.push_back(op); }); // Apply patterns. diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 53826ee1d8dc..adac4391d0d5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -62,7 +62,9 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include +#include #include +#include using namespace cir; using namespace llvm; @@ -561,13 +563,25 @@ class CIRBrCondOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::BrCondOp brOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto condition = adaptor.getCond(); - auto i1Condition = rewriter.create( - brOp.getLoc(), rewriter.getI1Type(), condition); + mlir::Value i1Condition; + + if (auto defOp = adaptor.getCond().getDefiningOp()) { + if (auto zext = dyn_cast(defOp)) { + if (zext->use_empty() && + zext->getOperand(0).getType() == rewriter.getI1Type()) { + i1Condition = zext->getOperand(0); + rewriter.eraseOp(zext); + } + } + } + + if (!i1Condition) + i1Condition = rewriter.create( + brOp.getLoc(), rewriter.getI1Type(), adaptor.getCond()); + rewriter.replaceOpWithNewOp( - brOp, i1Condition.getResult(), brOp.getDestTrue(), - adaptor.getDestOperandsTrue(), brOp.getDestFalse(), - adaptor.getDestOperandsFalse()); + brOp, i1Condition, brOp.getDestTrue(), adaptor.getDestOperandsTrue(), + brOp.getDestFalse(), adaptor.getDestOperandsFalse()); return mlir::success(); } @@ -771,90 +785,6 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } }; -class CIRIfLowering : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(mlir::cir::IfOp ifOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - mlir::OpBuilder::InsertionGuard guard(rewriter); - auto loc = ifOp.getLoc(); - auto emptyElse = ifOp.getElseRegion().empty(); - - auto *currentBlock = rewriter.getInsertionBlock(); - auto *remainingOpsBlock = - rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); - mlir::Block *continueBlock; - if (ifOp->getResults().size() == 0) - continueBlock = remainingOpsBlock; - else - llvm_unreachable("NYI"); - - // Inline then region - auto *thenBeforeBody = &ifOp.getThenRegion().front(); - auto *thenAfterBody = &ifOp.getThenRegion().back(); - rewriter.inlineRegionBefore(ifOp.getThenRegion(), continueBlock); - - rewriter.setInsertionPointToEnd(thenAfterBody); - if (auto thenYieldOp = - dyn_cast(thenAfterBody->getTerminator())) { - rewriter.replaceOpWithNewOp( - thenYieldOp, thenYieldOp.getArgs(), continueBlock); - } - - rewriter.setInsertionPointToEnd(continueBlock); - - // Has else region: inline it. - mlir::Block *elseBeforeBody = nullptr; - mlir::Block *elseAfterBody = nullptr; - if (!emptyElse) { - elseBeforeBody = &ifOp.getElseRegion().front(); - elseAfterBody = &ifOp.getElseRegion().back(); - rewriter.inlineRegionBefore(ifOp.getElseRegion(), thenAfterBody); - } else { - elseBeforeBody = elseAfterBody = continueBlock; - } - - rewriter.setInsertionPointToEnd(currentBlock); - - // FIXME: CIR always lowers !cir.bool to i8 type. - // In this reason CIR CodeGen often emits the redundant zext + trunc - // sequence that prevents lowering of llvm.expect in - // LowerExpectIntrinsicPass. - // We should fix that in a more appropriate way. But as a temporary solution - // just avoid the redundant casts here. - mlir::Value condition; - auto zext = - dyn_cast(adaptor.getCondition().getDefiningOp()); - if (zext && zext->getOperand(0).getType() == rewriter.getI1Type()) { - condition = zext->getOperand(0); - if (zext->use_empty()) - rewriter.eraseOp(zext); - } else { - auto trunc = rewriter.create( - loc, rewriter.getI1Type(), adaptor.getCondition()); - condition = trunc.getRes(); - } - - rewriter.create(loc, condition, thenBeforeBody, - elseBeforeBody); - - if (!emptyElse) { - rewriter.setInsertionPointToEnd(elseAfterBody); - if (auto elseYieldOp = - dyn_cast(elseAfterBody->getTerminator())) { - rewriter.replaceOpWithNewOp( - elseYieldOp, elseYieldOp.getArgs(), continueBlock); - } - } - - rewriter.replaceOp(ifOp, continueBlock->getArguments()); - - return mlir::success(); - } -}; - class CIRScopeOpLowering : public mlir::OpConversionPattern { public: @@ -937,9 +867,7 @@ struct ConvertCIRToLLVMPass } void runOnOperation() final; - virtual StringRef getArgument() const override { - return "cir-to-llvm-internal"; - } + virtual StringRef getArgument() const override { return "cir-flat-to-llvm"; } }; class CIRCallLowering : public mlir::OpConversionPattern { @@ -3083,7 +3011,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRLoopOpInterfaceLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, - CIRFuncLowering, CIRScopeOpLowering, CIRCastOpLowering, CIRIfLowering, + CIRFuncLowering, CIRScopeOpLowering, CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, @@ -3243,6 +3171,64 @@ static void buildCtorDtorList( builder.create(loc, result); } +// The unreachable code is not lowered by applyPartialConversion function +// since it traverses blocks in the dominance order. At the same time we +// do need to lower such code - otherwise verification errors occur. +// For instance, the next CIR code: +// +// cir.func @foo(%arg0: !s32i) -> !s32i { +// %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// cir.if %4 { +// %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// cir.return %5 : !s32i +// } else { +// %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// cir.return %5 : !s32i +// } +// cir.return %arg0 : !s32i +// } +// +// contains an unreachable return operation (the last one). After the flattening +// pass it will be placed into the unreachable block. And the possible error +// after the lowering pass is: error: 'cir.return' op expects parent op to be +// one of 'cir.func, cir.scope, cir.if ... The reason that this operation was +// not lowered and the new parent is lllvm.func. +// +// In the future we may want to get rid of this function and use DCE pass or +// something similar. But now we need to guarantee the absence of the dialect +// verification errors. +void collect_unreachable(mlir::Operation *parent, + llvm::SmallVector &ops) { + + llvm::SmallVector unreachable_blocks; + parent->walk([&](mlir::Block *blk) { // check + if (blk->hasNoPredecessors() && !blk->isEntryBlock()) + unreachable_blocks.push_back(blk); + }); + + std::set visited; + for (auto *root : unreachable_blocks) { + // We create a work list for each unreachable block. + // Thus we traverse operations in some order. + std::deque workList; + workList.push_back(root); + + while (!workList.empty()) { + auto *blk = workList.back(); + workList.pop_back(); + if (visited.count(blk)) + continue; + visited.emplace(blk); + + for (auto &op : *blk) + ops.push_back(&op); + + for (auto it = blk->succ_begin(); it != blk->succ_end(); ++it) + workList.push_back(*it); + } + } +} + void ConvertCIRToLLVMPass::runOnOperation() { auto module = getOperation(); mlir::DataLayout dataLayout(module); @@ -3282,7 +3268,11 @@ void ConvertCIRToLLVMPass::runOnOperation() { getOperation()->removeAttr("cir.sob"); getOperation()->removeAttr("cir.lang"); - if (failed(applyPartialConversion(module, target, std::move(patterns)))) + llvm::SmallVector ops; + ops.push_back(module); + collect_unreachable(module, ops); + + if (failed(applyPartialConversion(ops, target, std::move(patterns)))) signalPassFailure(); // Emit the llvm.global_ctors array. diff --git a/clang/test/CIR/CodeGen/if.cir b/clang/test/CIR/CodeGen/if.cir new file mode 100644 index 000000000000..b3104fd42d66 --- /dev/null +++ b/clang/test/CIR/CodeGen/if.cir @@ -0,0 +1,48 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!s32i = !cir.int + +module { + cir.func @foo(%arg0: !s32i) -> !s32i { + %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + cir.if %4 { + %5 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.return %5 : !s32i + } else { + %5 = cir.const(#cir.int<0> : !s32i) : !s32i + cir.return %5 : !s32i + } + cir.return %arg0 : !s32i + } +// CHECK: cir.func @foo(%arg0: !s32i) -> !s32i { +// CHECK-NEXT: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// CHECK-NEXT: cir.brcond %0 ^bb2, ^bb1 +// CHECK-NEXT: ^bb1: // pred: ^bb0 +// CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: cir.return %1 : !s32i +// CHECK-NEXT: ^bb2: // pred: ^bb0 +// CHECK-NEXT: %2 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: cir.return %2 : !s32i +// CHECK-NEXT: ^bb3: // no predecessors +// CHECK-NEXT: cir.return %arg0 : !s32i +// CHECK-NEXT: } + + cir.func @onlyIf(%arg0: !s32i) -> !s32i { + %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + cir.if %4 { + %5 = cir.const(#cir.int<1> : !s32i) : !s32i + cir.return %5 : !s32i + } + cir.return %arg0 : !s32i + } +// CHECK: cir.func @onlyIf(%arg0: !s32i) -> !s32i { +// CHECK-NEXT: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 +// CHECK-NEXT: ^bb1: // pred: ^bb0 +// CHECK-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: cir.return %1 : !s32i +// CHECK-NEXT: ^bb2: // pred: ^bb0 +// CHECK-NEXT: cir.return %arg0 : !s32i +// CHECK-NEXT: } + +} \ No newline at end of file diff --git a/clang/test/CIR/mlirprint.c b/clang/test/CIR/mlirprint.c index bc8d4889f854..2f6fe5651f60 100644 --- a/clang/test/CIR/mlirprint.c +++ b/clang/test/CIR/mlirprint.c @@ -24,7 +24,7 @@ int foo(void) { // CIRFLAT: IR Dump After FlattenCFG (cir-flatten-cfg) // CIRFLAT: IR Dump After DropAST (cir-drop-ast) // CIRFLAT: cir.func @foo() -> !s32i -// LLVM: IR Dump After cir::direct::ConvertCIRToLLVMPass (cir-to-llvm-internal) +// LLVM: IR Dump After cir::direct::ConvertCIRToLLVMPass (cir-flat-to-llvm) // LLVM: llvm.func @foo() -> i32 // LLVM: IR Dump After // LLVM: define i32 @foo() diff --git a/clang/tools/cir-opt/cir-opt.cpp b/clang/tools/cir-opt/cir-opt.cpp index 8e2044ecdc9e..064aa7241c8e 100644 --- a/clang/tools/cir-opt/cir-opt.cpp +++ b/clang/tools/cir-opt/cir-opt.cpp @@ -51,6 +51,10 @@ int main(int argc, char **argv) { cir::direct::populateCIRToLLVMPasses(pm); }); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return mlir::createFlattenCFGPass(); + }); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return mlir::createReconcileUnrealizedCastsPass(); }); From 7eaff676afef70336f35a5fdda93cf34068c08c1 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 16 Apr 2024 04:20:47 +0800 Subject: [PATCH 1488/2301] [CIR] Add initial support for bit-precise integer types (#538) This PR adds initial support for the bit-precise integer type `_BitInt(N)`. This type goes into the C23 standard, and has already been supported by clang since 2020, previously known as `_ExtInt(N)`. This PR is quite simple and straight-forward. Basically it leverages the existing `cir.int` type to represent such types. Previously `cir.int` verifies that its width must be a multiple of 8, and this verification has been removed in this PR. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 41 ++++----- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 61 ++++++------- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 10 ++- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 18 +--- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 12 +-- clang/test/CIR/CodeGen/bitint.c | 22 +++++ clang/test/CIR/CodeGen/bitint.cpp | 86 +++++++++++++++++++ clang/test/CIR/IR/invalid.cir | 25 ++---- clang/test/CIR/Lowering/bitint.cir | 30 +++++++ 11 files changed, 208 insertions(+), 103 deletions(-) create mode 100644 clang/test/CIR/CodeGen/bitint.c create mode 100644 clang/test/CIR/CodeGen/bitint.cpp create mode 100644 clang/test/CIR/Lowering/bitint.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7da421324fb3..07a79d503f30 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -144,7 +144,7 @@ def ObjSizeOp : CIR_Op<"objsize", [Pure]> { let arguments = (ins CIR_PointerType:$ptr, SizeInfoType:$kind, UnitAttr:$dynamic); - let results = (outs CIR_IntType:$result); + let results = (outs PrimitiveInt:$result); let assemblyFormat = [{ `(` @@ -180,7 +180,7 @@ def PtrDiffOp : CIR_Op<"ptr_diff", [Pure, SameTypeOperands]> { ``` }]; - let results = (outs CIR_IntType:$result); + let results = (outs PrimitiveInt:$result); let arguments = (ins CIR_PointerType:$lhs, CIR_PointerType:$rhs); let assemblyFormat = [{ @@ -208,7 +208,7 @@ def PtrStrideOp : CIR_Op<"ptr_stride", ``` }]; - let arguments = (ins CIR_PointerType:$base, CIR_IntType:$stride); + let arguments = (ins CIR_PointerType:$base, PrimitiveInt:$stride); let results = (outs CIR_PointerType:$result); let assemblyFormat = [{ @@ -337,7 +337,7 @@ def AllocaOp : CIR_Op<"alloca", [ }]; let arguments = (ins - Optional:$dynAllocSize, + Optional:$dynAllocSize, TypeAttr:$allocaType, StrAttr:$name, UnitAttr:$init, @@ -1034,7 +1034,7 @@ class CIR_BitOp }]; } -def BitClrsbOp : CIR_BitOp<"bit.clrsb", SIntOfWidths<[32, 64]>> { +def BitClrsbOp : CIR_BitOp<"bit.clrsb", AnyTypeOf<[SInt32, SInt64]>> { let summary = "Get the number of leading redundant sign bits in the input"; let description = [{ Compute the number of leading redundant sign bits in the input integer. @@ -1065,7 +1065,7 @@ def BitClrsbOp : CIR_BitOp<"bit.clrsb", SIntOfWidths<[32, 64]>> { }]; } -def BitClzOp : CIR_BitOp<"bit.clz", UIntOfWidths<[16, 32, 64]>> { +def BitClzOp : CIR_BitOp<"bit.clz", AnyTypeOf<[UInt16, UInt32, UInt64]>> { let summary = "Get the number of leading 0-bits in the input"; let description = [{ Compute the number of leading 0-bits in the input. @@ -1090,7 +1090,7 @@ def BitClzOp : CIR_BitOp<"bit.clz", UIntOfWidths<[16, 32, 64]>> { }]; } -def BitCtzOp : CIR_BitOp<"bit.ctz", UIntOfWidths<[16, 32, 64]>> { +def BitCtzOp : CIR_BitOp<"bit.ctz", AnyTypeOf<[UInt16, UInt32, UInt64]>> { let summary = "Get the number of trailing 0-bits in the input"; let description = [{ Compute the number of trailing 0-bits in the input. @@ -1115,7 +1115,7 @@ def BitCtzOp : CIR_BitOp<"bit.ctz", UIntOfWidths<[16, 32, 64]>> { }]; } -def BitFfsOp : CIR_BitOp<"bit.ffs", SIntOfWidths<[32, 64]>> { +def BitFfsOp : CIR_BitOp<"bit.ffs", AnyTypeOf<[SInt32, SInt64]>> { let summary = "Get the position of the least significant 1-bit of input"; let description = [{ Compute the position of the least significant 1-bit of the input. @@ -1138,7 +1138,7 @@ def BitFfsOp : CIR_BitOp<"bit.ffs", SIntOfWidths<[32, 64]>> { }]; } -def BitParityOp : CIR_BitOp<"bit.parity", UIntOfWidths<[32, 64]>> { +def BitParityOp : CIR_BitOp<"bit.parity", AnyTypeOf<[UInt32, UInt64]>> { let summary = "Get the parity of input"; let description = [{ Compute the parity of the input. The parity of an integer is the number of @@ -1160,7 +1160,8 @@ def BitParityOp : CIR_BitOp<"bit.parity", UIntOfWidths<[32, 64]>> { }]; } -def BitPopcountOp : CIR_BitOp<"bit.popcount", UIntOfWidths<[16, 32, 64]>> { +def BitPopcountOp + : CIR_BitOp<"bit.popcount", AnyTypeOf<[UInt16, UInt32, UInt64]>> { let summary = "Get the number of 1-bits in input"; let description = [{ Compute the number of 1-bits in the input. @@ -1208,7 +1209,7 @@ def ByteswapOp : CIR_Op<"bswap", [Pure, SameOperandsAndResultType]> { }]; let results = (outs CIR_IntType:$result); - let arguments = (ins UIntOfWidths<[16, 32, 64]>:$input); + let arguments = (ins AnyTypeOf<[UInt16, UInt32, UInt64]>:$input); let assemblyFormat = [{ `(` $input `:` type($input) `)` `:` type($result) attr-dict @@ -1252,7 +1253,7 @@ def CmpThreeWayOp : CIR_Op<"cmp3way", [Pure, SameTypeOperands]> { ``` }]; - let results = (outs CIR_IntType:$result); + let results = (outs PrimitiveSInt:$result); let arguments = (ins CIR_AnyType:$lhs, CIR_AnyType:$rhs, CmpThreeWayInfoAttr:$info); @@ -1261,7 +1262,7 @@ def CmpThreeWayOp : CIR_Op<"cmp3way", [Pure, SameTypeOperands]> { `:` type($result) attr-dict }]; - let hasVerifier = 1; + let hasVerifier = 0; } //===----------------------------------------------------------------------===// @@ -2122,7 +2123,7 @@ def VecInsertOp : CIR_Op<"vec.insert", [Pure, element is returned. }]; - let arguments = (ins CIR_VectorType:$vec, AnyType:$value, CIR_IntType:$index); + let arguments = (ins CIR_VectorType:$vec, AnyType:$value, PrimitiveInt:$index); let results = (outs CIR_VectorType:$result); let assemblyFormat = [{ @@ -2147,7 +2148,7 @@ def VecExtractOp : CIR_Op<"vec.extract", [Pure, from a vector object. }]; - let arguments = (ins CIR_VectorType:$vec, CIR_IntType:$index); + let arguments = (ins CIR_VectorType:$vec, PrimitiveInt:$index); let results = (outs CIR_AnyType:$result); let assemblyFormat = [{ @@ -2935,7 +2936,7 @@ def CopyOp : CIR_Op<"copy", [SameTypeOperands]> { def MemCpyOp : CIR_Op<"libc.memcpy"> { let arguments = (ins Arg:$dst, Arg:$src, - CIR_IntType:$len); + PrimitiveInt:$len); let summary = "Equivalent to libc's `memcpy`"; let description = [{ Given two CIR pointers, `src` and `dst`, `cir.libc.memcpy` will copy `len` @@ -3115,10 +3116,10 @@ def ExpectOp : CIR_Op<"expect", where probability = $prob. }]; - let arguments = (ins CIR_IntType:$val, - CIR_IntType:$expected, + let arguments = (ins PrimitiveInt:$val, + PrimitiveInt:$expected, OptionalAttr:$prob); - let results = (outs CIR_IntType:$result); + let results = (outs PrimitiveInt:$result); let assemblyFormat = [{ `(` $val`,` $expected (`,` $prob^)? `)` `:` type($val) attr-dict }]; @@ -3524,7 +3525,7 @@ def AtomicFetch : CIR_Op<"atomic.fetch", of the computation (`__atomic_binop_fetch`). }]; let results = (outs CIR_AnyIntOrFloat:$result); - let arguments = (ins IntOrFPPtr:$ptr, CIR_AnyIntOrFloat:$val, + let arguments = (ins PrimitiveIntOrFPPtr:$ptr, CIR_AnyIntOrFloat:$val, AtomicFetchKind:$binop, Arg:$mem_order, UnitAttr:$is_volatile, diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index c67d3013ed7f..5b53ab6bac6b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -53,17 +53,21 @@ def CIR_IntType : CIR_Type<"Int", "int", std::string getAlias() const { return (isSigned() ? 's' : 'u') + std::to_string(getWidth()) + 'i'; }; + /// Return true if this is a primitive integer type (i.e. signed or unsigned + /// integer types whose bit width is 8, 16, 32, or 64). + bool isPrimitive() const { + return isValidPrimitiveIntBitwidth(getWidth()); + } /// Returns a minimum bitwidth of cir::IntType - static unsigned minBitwidth() { return 8; } + static unsigned minBitwidth() { return 1; } /// Returns a maximum bitwidth of cir::IntType static unsigned maxBitwidth() { return 64; } - /// Returns true if cir::IntType can be constructed from the provided bitwidth - static bool isValidBitwidth(unsigned width) { - return width >= minBitwidth() - && width <= maxBitwidth() - && llvm::isPowerOf2_32(width); + /// Returns true if cir::IntType that represents a primitive integer type + /// can be constructed from the provided bitwidth. + static bool isValidPrimitiveIntBitwidth(unsigned width) { + return width == 8 || width == 16 || width == 32 || width == 64; } }]; let genVerifyDecl = 1; @@ -109,35 +113,15 @@ def SInt16 : SInt<16>; def SInt32 : SInt<32>; def SInt64 : SInt<64>; -// A type constraint that allows unsigned integer type whose width is among the -// specified list of possible widths. -class UIntOfWidths widths> - : Type()">, - CPred<"$_self.cast<::mlir::cir::IntType>().isUnsigned()">, - Or().getWidth() == " # w> - )> - ]>, - !interleave(!foreach(w, widths, w # "-bit"), " or ") # " uint", - "::mlir::cir::IntType" - > {} - -// A type constraint that allows unsigned integer type whose width is among the -// specified list of possible widths. -class SIntOfWidths widths> - : Type()">, - CPred<"$_self.cast<::mlir::cir::IntType>().isSigned()">, - Or().getWidth() == " # w> - )> - ]>, - !interleave(!foreach(w, widths, w # "-bit"), " or ") # " sint", - "::mlir::cir::IntType" - > {} +def PrimitiveUInt + : AnyTypeOf<[UInt8, UInt16, UInt32, UInt64], "primitive unsigned int", + "::mlir::cir::IntType">; +def PrimitiveSInt + : AnyTypeOf<[SInt8, SInt16, SInt32, SInt64], "primitive signed int", + "::mlir::cir::IntType">; +def PrimitiveInt + : AnyTypeOf<[UInt8, UInt16, UInt32, UInt64, SInt8, SInt16, SInt32, SInt64], + "primitive int", "::mlir::cir::IntType">; //===----------------------------------------------------------------------===// // FloatType @@ -374,8 +358,8 @@ def VoidPtr : Type< "mlir::cir::VoidType::get($_builder.getContext()))"> { } -// Pointer to int, float or double -def IntOrFPPtr : Type< +// Pointer to a primitive int, float or double +def PrimitiveIntOrFPPtr : Type< And<[ CPred<"$_self.isa<::mlir::cir::PointerType>()">, CPred<"$_self.cast<::mlir::cir::PointerType>()" @@ -429,6 +413,9 @@ def IntegerVector : Type< CPred<"$_self.isa<::mlir::cir::VectorType>()">, CPred<"$_self.cast<::mlir::cir::VectorType>()" ".getEltType().isa<::mlir::cir::IntType>()">, + CPred<"$_self.cast<::mlir::cir::VectorType>()" + ".getEltType().cast<::mlir::cir::IntType>()" + ".isPrimitive()"> ]>, "!cir.vector of !cir.int"> { } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 205a14baaa8a..6784a1ce9c94 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -328,7 +328,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { case 64: return getUInt64Ty(); default: - llvm_unreachable("Unknown bit-width"); + return mlir::cir::IntType::get(getContext(), N, false); } } @@ -343,7 +343,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { case 64: return getSInt64Ty(); default: - llvm_unreachable("Unknown bit-width"); + return mlir::cir::IntType::get(getContext(), N, true); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 966558f170ea..c34a3211c383 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -251,7 +251,11 @@ mlir::Type CIRGenTypes::convertTypeForMem(clang::QualType qualType, mlir::Type convertedType = ConvertType(qualType); assert(!forBitField && "Bit fields NYI"); - assert(!qualType->isBitIntType() && "BitIntType NYI"); + + // If this is a bit-precise integer type in a bitfield representation, map + // this integer to the target-specified size. + if (forBitField && qualType->isBitIntType()) + assert(!qualType->isBitIntType() && "Bit field with type _BitInt NYI"); return convertedType; } @@ -728,7 +732,9 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { break; } case Type::BitInt: { - assert(0 && "not implemented"); + const auto *bitIntTy = cast(Ty); + ResultType = mlir::cir::IntType::get( + Builder.getContext(), bitIntTy->getNumBits(), bitIntTy->isSigned()); break; } } diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 59936e1187f3..ef4ad098f4e6 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -159,7 +159,7 @@ struct CIRRecordLowering final { // structures support. mlir::Type getBitfieldStorageType(unsigned numBits) { unsigned alignedBits = llvm::alignTo(numBits, astContext.getCharWidth()); - if (mlir::cir::IntType::isValidBitwidth(alignedBits)) { + if (mlir::cir::IntType::isValidPrimitiveIntBitwidth(alignedBits)) { return builder.getUIntNTy(alignedBits); } else { mlir::Type type = getCharType(); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9a08d82b60bd..f21d5e4e4b7b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -64,6 +64,10 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { return AliasResult::OverridableAlias; } if (auto intType = type.dyn_cast()) { + // We only provide alias for standard integer types (i.e. integer types + // whose width is divisible by 8). + if (intType.getWidth() % 8 != 0) + return AliasResult::NoAlias; os << intType.getAlias(); return AliasResult::OverridableAlias; } @@ -940,20 +944,6 @@ Block *BrCondOp::getSuccessorForOperands(ArrayRef operands) { return nullptr; } -//===----------------------------------------------------------------------===// -// CmpThreeWayOp -//===----------------------------------------------------------------------===// - -LogicalResult CmpThreeWayOp::verify() { - // Type of the result must be a signed integer type. - if (!getType().isSigned()) { - emitOpError() << "result type of cir.cmp3way must be a signed integer type"; - return failure(); - } - - return success(); -} - //===----------------------------------------------------------------------===// // SwitchOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 66bce73a9c11..a667494b6f89 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -600,12 +600,8 @@ Type IntType::parse(mlir::AsmParser &parser) { // Fetch integer size. if (parser.parseInteger(width)) return {}; - if (width % 8 != 0) { - parser.emitError(loc, "expected integer width to be a multiple of 8"); - return {}; - } - if (width < 8 || width > 64) { - parser.emitError(loc, "expected integer width to be from 8 up to 64"); + if (width < 1 || width > 64) { + parser.emitError(loc, "expected integer width to be from 1 up to 64"); return {}; } @@ -646,10 +642,6 @@ IntType::verify(llvm::function_ref emitError, << IntType::minBitwidth() << "up to " << IntType::maxBitwidth(); return mlir::failure(); } - if (width % 8 != 0) { - emitError() << "IntType width is not a multiple of 8"; - return mlir::failure(); - } return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/bitint.c b/clang/test/CIR/CodeGen/bitint.c new file mode 100644 index 000000000000..51111ee1dafc --- /dev/null +++ b/clang/test/CIR/CodeGen/bitint.c @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void VLATest(_BitInt(3) A, _BitInt(42) B, _BitInt(17) C) { + int AR1[A]; + int AR2[B]; + int AR3[C]; +} + +// CHECK: cir.func @VLATest +// CHECK: %[[#A:]] = cir.load %{{.+}} : cir.ptr >, !cir.int +// CHECK-NEXT: %[[#A_PROMOTED:]] = cir.cast(integral, %[[#A]] : !cir.int), !u64i +// CHECK-NEXT: %[[#SP:]] = cir.stack_save : !cir.ptr +// CHECK-NEXT: cir.store %[[#SP]], %{{.+}} : !cir.ptr, cir.ptr > +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, cir.ptr , %[[#A_PROMOTED]] : !u64i +// CHECK-NEXT: %[[#B:]] = cir.load %1 : cir.ptr >, !cir.int +// CHECK-NEXT: %[[#B_PROMOTED:]] = cir.cast(integral, %[[#B]] : !cir.int), !u64i +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, cir.ptr , %[[#B_PROMOTED]] : !u64i +// CHECK-NEXT: %[[#C:]] = cir.load %2 : cir.ptr >, !cir.int +// CHECK-NEXT: %[[#C_PROMOTED:]] = cir.cast(integral, %[[#C]] : !cir.int), !u64i +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, cir.ptr , %[[#C_PROMOTED]] : !u64i +// CHECK: } diff --git a/clang/test/CIR/CodeGen/bitint.cpp b/clang/test/CIR/CodeGen/bitint.cpp new file mode 100644 index 000000000000..fad50e1ee858 --- /dev/null +++ b/clang/test/CIR/CodeGen/bitint.cpp @@ -0,0 +1,86 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +using i10 = signed _BitInt(10); +using u10 = unsigned _BitInt(10); + +unsigned _BitInt(1) GlobSize1 = 0; +// CHECK: cir.global external @GlobSize1 = #cir.int<0> : !cir.int + +i10 test_signed(i10 arg) { + return arg; +} + +// CHECK: cir.func @_Z11test_signedDB10_(%arg0: !cir.int loc({{.*}}) -> !cir.int +// CHECK: } + +u10 test_unsigned(u10 arg) { + return arg; +} + +// CHECK: cir.func @_Z13test_unsignedDU10_(%arg0: !cir.int loc({{.*}}) -> !cir.int +// CHECK: } + +i10 test_init() { + return 42; +} + +// CHECK: cir.func @_Z9test_initv() -> !cir.int +// CHECK: %[[#LITERAL:]] = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#LITERAL]] : !s32i), !cir.int +// CHECK: } + +void test_init_for_mem() { + i10 x = 42; +} + +// CHECK: cir.func @_Z17test_init_for_memv() +// CHECK: %[[#LITERAL:]] = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK-NEXT: %[[#INIT:]] = cir.cast(integral, %[[#LITERAL]] : !s32i), !cir.int +// CHECK-NEXT: cir.store %[[#INIT]], %{{.+}} : !cir.int, cir.ptr > +// CHECK: } + +i10 test_arith(i10 lhs, i10 rhs) { + return lhs + rhs; +} + +// CHECK: cir.func @_Z10test_arithDB10_S_(%arg0: !cir.int loc({{.+}}), %arg1: !cir.int loc({{.+}})) -> !cir.int +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : cir.ptr >, !cir.int +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : cir.ptr >, !cir.int +// CHECK-NEXT: %{{.+}} = cir.binop(add, %[[#LHS]], %[[#RHS]]) : !cir.int +// CHECK: } + +void Size1ExtIntParam(unsigned _BitInt(1) A) { + unsigned _BitInt(1) B[5]; + B[2] = A; +} + +// CHECK: cir.func @_Z16Size1ExtIntParamDU1_ +// CHECK: %[[#A:]] = cir.load %{{.+}} : cir.ptr >, !cir.int +// CHECK-NEXT: %[[#IDX:]] = cir.const(#cir.int<2> : !s32i) : !s32i +// CHECK-NEXT: %[[#ARRAY:]] = cir.cast(array_to_ptrdecay, %1 : !cir.ptr x 5>>), !cir.ptr> +// CHECK-NEXT: %[[#PTR:]] = cir.ptr_stride(%[[#ARRAY]] : !cir.ptr>, %[[#IDX]] : !s32i), !cir.ptr> +// CHECK-NEXT: cir.store %[[#A]], %[[#PTR]] : !cir.int, cir.ptr > +// CHECK: } + +struct S { + _BitInt(17) A; + _BitInt(10) B; + _BitInt(17) C; +}; + +void OffsetOfTest(void) { + int A = __builtin_offsetof(struct S,A); + int B = __builtin_offsetof(struct S,B); + int C = __builtin_offsetof(struct S,C); +} + +// CHECK: cir.func @_Z12OffsetOfTestv() +// CHECK: %{{.+}} = cir.const(#cir.int<0> : !u64i) : !u64i +// CHECK: %{{.+}} = cir.const(#cir.int<4> : !u64i) : !u64i +// CHECK: %{{.+}} = cir.const(#cir.int<8> : !u64i) : !u64i +// CHECK: } + +_BitInt(2) ParamPassing(_BitInt(15) a, _BitInt(31) b) {} + +// CHECK: cir.func @_Z12ParamPassingDB15_DB31_(%arg0: !cir.int loc({{.+}}), %arg1: !cir.int loc({{.+}})) -> !cir.int diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 0246b47b934e..ba7758b2ca56 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -564,7 +564,7 @@ module { // // ----- module { - // expected-error@below {{expected integer width to be from 8 up to 64}} + // expected-error@below {{expected integer width to be from 1 up to 64}} cir.func @l0(%arg0: !cir.int) -> () { cir.return } @@ -572,15 +572,6 @@ module { // ----- -module { - // expected-error@below {{expected integer width to be a multiple of 8}} - cir.func @l0(%arg0: !cir.int) -> () { - cir.return - } -} - -// ----- - module { // expected-error@below {{integer value too large for the given type}} cir.global external @a = #cir.int<256> : !cir.int @@ -803,7 +794,7 @@ module { !s32i = !cir.int module { cir.func @tmp(%arg0: !cir.float) { - // expected-error@+1 {{operand #0 must be Integer type}} + // expected-error@+1 {{operand #0 must be primitive int}} %0 = cir.alloca !s32i, cir.ptr , %arg0 : !cir.float, ["tmp"] cir.return } @@ -917,7 +908,7 @@ module { !u32i = !cir.int cir.func @clrsb_invalid_input_ty(%arg0 : !u32i) -> () { - // expected-error@+1 {{'cir.bit.clrsb' op operand #0 must be 32-bit or 64-bit sint, but got '!cir.int'}} + // expected-error@+1 {{'cir.bit.clrsb' op operand #0 must be 32-bit signed integer or 64-bit signed integer, but got '!cir.int'}} %0 = cir.bit.clrsb(%arg0 : !u32i) : !s32i cir.return } @@ -938,7 +929,7 @@ cir.func @clrsb_invalid_result_ty(%arg0 : !s32i) -> () { !s32i = !cir.int cir.func @clz_invalid_input_ty(%arg0 : !s32i) -> () { - // expected-error@+1 {{'cir.bit.clz' op operand #0 must be 16-bit or 32-bit or 64-bit uint, but got '!cir.int'}} + // expected-error@+1 {{'cir.bit.clz' op operand #0 must be 16-bit unsigned integer or 32-bit unsigned integer or 64-bit unsigned integer, but got '!cir.int'}} %0 = cir.bit.clz(%arg0 : !s32i) : !s32i cir.return } @@ -958,7 +949,7 @@ cir.func @clz_invalid_result_ty(%arg0 : !u32i) -> () { !s32i = !cir.int cir.func @ctz_invalid_input_ty(%arg0 : !s32i) -> () { - // expected-error@+1 {{'cir.bit.ctz' op operand #0 must be 16-bit or 32-bit or 64-bit uint, but got '!cir.int'}} + // expected-error@+1 {{'cir.bit.ctz' op operand #0 must be 16-bit unsigned integer or 32-bit unsigned integer or 64-bit unsigned integer, but got '!cir.int'}} %0 = cir.bit.ctz(%arg0 : !s32i) : !s32i cir.return } @@ -979,7 +970,7 @@ cir.func @ctz_invalid_result_ty(%arg0 : !u32i) -> () { !u32i = !cir.int cir.func @ffs_invalid_input_ty(%arg0 : !u32i) -> () { - // expected-error@+1 {{'cir.bit.ffs' op operand #0 must be 32-bit or 64-bit sint, but got '!cir.int'}} + // expected-error@+1 {{'cir.bit.ffs' op operand #0 must be 32-bit signed integer or 64-bit signed integer, but got '!cir.int'}} %0 = cir.bit.ffs(%arg0 : !u32i) : !s32i cir.return } @@ -1000,7 +991,7 @@ cir.func @ffs_invalid_result_ty(%arg0 : !s32i) -> () { !s32i = !cir.int cir.func @parity_invalid_input_ty(%arg0 : !s32i) -> () { - // expected-error@+1 {{'cir.bit.parity' op operand #0 must be 32-bit or 64-bit uint, but got '!cir.int'}} + // expected-error@+1 {{'cir.bit.parity' op operand #0 must be 32-bit unsigned integer or 64-bit unsigned integer, but got '!cir.int'}} %0 = cir.bit.parity(%arg0 : !s32i) : !s32i cir.return } @@ -1020,7 +1011,7 @@ cir.func @parity_invalid_result_ty(%arg0 : !u32i) -> () { !s32i = !cir.int cir.func @popcount_invalid_input_ty(%arg0 : !s32i) -> () { - // expected-error@+1 {{'cir.bit.popcount' op operand #0 must be 16-bit or 32-bit or 64-bit uint, but got '!cir.int'}} + // expected-error@+1 {{'cir.bit.popcount' op operand #0 must be 16-bit unsigned integer or 32-bit unsigned integer or 64-bit unsigned integer, but got '!cir.int'}} %0 = cir.bit.popcount(%arg0 : !s32i) : !s32i cir.return } diff --git a/clang/test/CIR/Lowering/bitint.cir b/clang/test/CIR/Lowering/bitint.cir new file mode 100644 index 000000000000..f89278b5faf7 --- /dev/null +++ b/clang/test/CIR/Lowering/bitint.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int + +module { + cir.func @ParamPassing(%arg0: !cir.int, %arg1: !cir.int) -> !cir.int { + %0 = cir.cast(integral, %arg0 : !cir.int), !s32i + %1 = cir.cast(integral, %arg1 : !cir.int), !s32i + %2 = cir.binop(add, %0, %1) : !s32i + %3 = cir.cast(integral, %2 : !s32i), !cir.int + cir.return %3 : !cir.int + } +} + +// MLIR: llvm.func @ParamPassing(%arg0: i15, %arg1: i31) -> i2 +// MLIR-NEXT: %0 = llvm.sext %arg0 : i15 to i32 +// MLIR-NEXT: %1 = llvm.sext %arg1 : i31 to i32 +// MLIR-NEXT: %2 = llvm.add %0, %1 : i32 +// MLIR-NEXT: %3 = llvm.trunc %2 : i32 to i2 +// MLIR-NEXT: llvm.return %3 : i2 +// MLIR-NEXT: } + +// LLVM: define i2 @ParamPassing(i15 %0, i31 %1) !dbg !3 { +// LLVM-NEXT: %3 = sext i15 %0 to i32, !dbg !6 +// LLVM-NEXT: %4 = sext i31 %1 to i32, !dbg !7 +// LLVM-NEXT: %5 = add i32 %3, %4, !dbg !8 +// LLVM-NEXT: %6 = trunc i32 %5 to i2, !dbg !9 +// LLVM-NEXT: ret i2 %6, !dbg !10 +// LLVM-NEXT: } From 6810bbc8d53d02e4b3b53e0d749c3fc03619e7fd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 15 Apr 2024 15:40:42 -0700 Subject: [PATCH 1489/2301] [CIR] Add TLS models to CIR and teach GlobalOp about them --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 18 ++++++++++++++++++ clang/test/CIR/IR/global.cir | 12 +++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 07a79d503f30..fa02176b0a5d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1659,6 +1659,22 @@ def SignedOverflowBehaviorEnum : I32EnumAttr< let cppNamespace = "::mlir::cir::sob"; } +/// Definition of TLS related kinds. +def TLS_GeneralDynamic : + I32EnumAttrCase<"GeneralDynamic", 0, "tls_dyn">; +def TLS_LocalDynamic : + I32EnumAttrCase<"LocalDynamic", 1, "tls_local_dyn">; +def TLS_InitialExec : + I32EnumAttrCase<"InitialExec", 2, "tls_init_exec">; +def TLS_LocalExec : + I32EnumAttrCase<"LocalExec", 3, "tls_local_exec">; + +def TLSModel : I32EnumAttr< + "TLS_Model", + "TLS model", + [TLS_GeneralDynamic, TLS_LocalDynamic, TLS_InitialExec, TLS_LocalExec]> { + let cppNamespace = "::mlir::cir"; +} def GlobalOp : CIR_Op<"global", [Symbol, DeclareOpInterfaceMethods, NoRegionArguments]> { let summary = "Declares or defines a global variable"; @@ -1694,6 +1710,7 @@ def GlobalOp : CIR_Op<"global", [Symbol, DeclareOpInterfaceMethods:$sym_visibility, TypeAttr:$sym_type, Arg:$linkage, + OptionalAttr:$tls_model, // Note this can also be a FlatSymbolRefAttr OptionalAttr:$initial_value, UnitAttr:$constant, @@ -1706,6 +1723,7 @@ def GlobalOp : CIR_Op<"global", [Symbol, DeclareOpInterfaceMethods($sym_type, $initial_value, $ctorRegion, $dtorRegion) attr-dict diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 47b09a946973..786d212e9692 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -58,6 +58,11 @@ module { cir.func @f34() global_dtor(777) { cir.return } + + cir.global external tls_dyn @model0 = #cir.int<0> : !s32i + cir.global external tls_local_dyn @model1 = #cir.int<0> : !s32i + cir.global external tls_init_exec @model2 = #cir.int<0> : !s32i + cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i } // CHECK: cir.global external @a = #cir.int<3> : !s32i @@ -81,4 +86,9 @@ module { // CHECK: cir.func @f31() global_ctor // CHECK: cir.func @f32() global_ctor(777) // CHECK: cir.func @f33() global_dtor -// CHECK: cir.func @f34() global_dtor(777) \ No newline at end of file +// CHECK: cir.func @f34() global_dtor(777) + +// CHECK: cir.global external tls_dyn @model0 = #cir.int<0> : !s32i +// CHECK: cir.global external tls_local_dyn @model1 = #cir.int<0> : !s32i +// CHECK: cir.global external tls_init_exec @model2 = #cir.int<0> : !s32i +// CHECK: cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i \ No newline at end of file From 679762e8a512b6fbbb6660b00f2d1fe13ff11fda Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 15 Apr 2024 15:49:16 -0700 Subject: [PATCH 1490/2301] [CIR][CIRGen] Add initial __thread support --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 28 +++++++++++++++---- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 9 ++++-- clang/test/CIR/CodeGen/tls.c | 8 ++++++ 4 files changed, 40 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/tls.c diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 4b39b96c5fa0..0814f82209c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -610,16 +610,33 @@ void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, Old.erase(); } +mlir::cir::TLS_Model CIRGenModule::GetDefaultCIRTLSModel() const { + switch (getCodeGenOpts().getDefaultTLSModel()) { + case CodeGenOptions::GeneralDynamicTLSModel: + return mlir::cir::TLS_Model::GeneralDynamic; + case CodeGenOptions::LocalDynamicTLSModel: + return mlir::cir::TLS_Model::LocalDynamic; + case CodeGenOptions::InitialExecTLSModel: + return mlir::cir::TLS_Model::InitialExec; + case CodeGenOptions::LocalExecTLSModel: + return mlir::cir::TLS_Model::LocalExec; + } + llvm_unreachable("Invalid TLS model!"); +} + void CIRGenModule::setTLSMode(mlir::Operation *Op, const VarDecl &D) const { assert(D.getTLSKind() && "setting TLS mode on non-TLS var!"); - llvm_unreachable("NYI"); + + auto TLM = GetDefaultCIRTLSModel(); // Override the TLS model if it is explicitly specified. if (const TLSModelAttr *Attr = D.getAttr()) { llvm_unreachable("NYI"); } - llvm_unreachable("NYI"); + auto global = dyn_cast(Op); + assert(global && "NYI for other operations"); + global.setTlsModel(TLM); } /// If the specified mangled name is not in the module, @@ -1116,9 +1133,10 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // TODO(cir): setNonAliasAttributes(D, GV); - // TODO(cir): handle TLSKind if GV is not thread local - if (D->getTLSKind()) { // && !GV->isThreadLocal()) - assert(0 && "not implemented"); + if (D->getTLSKind() && !GV.getTlsModelAttr()) { + if (D->getTLSKind() == VarDecl::TLS_Dynamic) + llvm_unreachable("NYI"); + setTLSMode(GV, *D); } // TODO(cir): maybeSetTrivialComdat(*D, *GV); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index a430889abb0e..053aad7e6862 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -471,6 +471,9 @@ class CIRGenModule : public CIRGenTypeCache { /// variable declaration D. void setTLSMode(mlir::Operation *Op, const VarDecl &D) const; + /// Get TLS mode from CodeGenOptions. + mlir::cir::TLS_Model GetDefaultCIRTLSModel() const; + /// Replace the present global `Old` with the given global `New`. Their symbol /// names must match; their types can be different. Usages of the old global /// will be automatically updated if their types mismatch. diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index adac4391d0d5..2371aca38a08 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1728,9 +1728,12 @@ class CIRGlobalOpLowering inline void setupRegionInitializedLLVMGlobalOp( mlir::cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const { const auto llvmType = getTypeConverter()->convertType(op.getSymType()); + SmallVector attributes; auto newGlobalOp = rewriter.replaceOpWithNewOp( op, llvmType, op.getConstant(), convertLinkage(op.getLinkage()), - op.getSymName(), nullptr); + op.getSymName(), nullptr, /*alignment*/ 0, /*addrSpace*/ 0, + /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), + /*comdat*/ mlir::SymbolRefAttr(), attributes); newGlobalOp.getRegion().push_back(new mlir::Block()); rewriter.setInsertionPointToEnd(newGlobalOp.getInitializerBlock()); } @@ -1758,7 +1761,7 @@ class CIRGlobalOpLowering rewriter.replaceOpWithNewOp( op, llvmType, isConst, linkage, symbol, mlir::Attribute(), /*alignment*/ 0, /*addrSpace*/ 0, - /*dsoLocal*/ false, /*threadLocal*/ false, + /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); return mlir::success(); } @@ -1838,7 +1841,7 @@ class CIRGlobalOpLowering rewriter.replaceOpWithNewOp( op, llvmType, isConst, linkage, symbol, init.value(), /*alignment*/ 0, /*addrSpace*/ 0, - /*dsoLocal*/ false, /*threadLocal*/ false, + /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/tls.c b/clang/test/CIR/CodeGen/tls.c new file mode 100644 index 000000000000..c63390152a76 --- /dev/null +++ b/clang/test/CIR/CodeGen/tls.c @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +__thread int a; +// CIR: cir.global external tls_dyn @a = #cir.int<0> : !s32i +// LLVM: @a = thread_local global i32 0 From 9e1ca2750010a56b012e463995dbec1525d1040a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 15 Apr 2024 16:49:26 -0700 Subject: [PATCH 1491/2301] [CIR][CIRGen][LLVMLowering] Add support retrieving thread local global addresses --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 27 ++++++++++++------- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 +++--- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 ++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 5 ++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 8 ++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 11 +++++++- clang/test/CIR/CodeGen/tls.c | 11 ++++++++ clang/test/CIR/IR/global.cir | 14 +++++++++- clang/test/CIR/IR/invalid.cir | 13 +++++++++ 9 files changed, 80 insertions(+), 22 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index fa02176b0a5d..8b0fe38950e5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1775,24 +1775,31 @@ def GetGlobalOp : CIR_Op<"get_global", [Pure, DeclareOpInterfaceMethods]> { let summary = "Get the address of a global variable"; let description = [{ - The `cir.get_global` operation retrieves the address pointing to a - named global variable. If the global variable is marked constant, writing - to the resulting address (such as through a `cir.store` operation) is - undefined. Resulting type must always be a `!cir.ptr<...>` type. + The `cir.get_global` operation retrieves the address pointing to a + named global variable. If the global variable is marked constant, writing + to the resulting address (such as through a `cir.store` operation) is + undefined. Resulting type must always be a `!cir.ptr<...>` type. - Example: + Addresses of thread local globals can only be retrieved if this operation + is marked `thread_local`, which indicates the address isn't constant. - ```mlir - %x = cir.get_global @foo : !cir.ptr - ``` + Example: + ```mlir + %x = cir.get_global @foo : !cir.ptr + ... + %y = cir.get_global thread_local @batata : !cir.ptr + ``` }]; - let arguments = (ins FlatSymbolRefAttr:$name); + let arguments = (ins FlatSymbolRefAttr:$name, UnitAttr:$tls); let results = (outs Res:$addr); // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. - let assemblyFormat = "$name `:` `cir.ptr` type($addr) attr-dict"; + let assemblyFormat = [{ + (`thread_local` $tls^)? + $name `:` `cir.ptr` type($addr) attr-dict + }]; // `GetGlobalOp` is fully verified by its traits. let hasVerifier = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 6784a1ce9c94..deed6ffe63d8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -697,9 +697,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, uniqueName, type, isConst, linkage); } - mlir::Value createGetGlobal(mlir::cir::GlobalOp global) { - return create( - global.getLoc(), getPointerTo(global.getSymType()), global.getName()); + mlir::Value createGetGlobal(mlir::cir::GlobalOp global, + bool threadLocal = false) { + return create(global.getLoc(), + getPointerTo(global.getSymType()), + global.getName(), threadLocal); } mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 954ea854e026..6698e320c007 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -720,11 +720,10 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, if (CGF.getLangOpts().OpenMP) llvm_unreachable("not implemented"); + // Traditional LLVM codegen handles thread local separately, CIR handles + // as part of getAddrOfGlobalVar. auto V = CGF.CGM.getAddrOfGlobalVar(VD); - if (VD->getTLSKind() != VarDecl::TLS_None) - llvm_unreachable("NYI"); - auto RealVarTy = CGF.getTypes().convertTypeForMem(VD->getType()); auto realPtrTy = CGF.getBuilder().getPointerTo(RealVarTy); if (realPtrTy != V.getType()) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 0814f82209c9..aee1fa47edad 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -837,11 +837,12 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty, if (!Ty) Ty = getTypes().convertTypeForMem(ASTTy); + bool tlsAccess = D->getTLSKind() != VarDecl::TLS_None; auto g = buildGlobal(D, Ty, IsForDefinition); auto ptrTy = mlir::cir::PointerType::get(builder.getContext(), g.getSymType()); - return builder.create(getLoc(D->getSourceRange()), - ptrTy, g.getSymName()); + return builder.create( + getLoc(D->getSourceRange()), ptrTy, g.getSymName(), tlsAccess); } mlir::cir::GlobalViewAttr diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index f21d5e4e4b7b..38b6a1a8cfb6 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1634,9 +1634,13 @@ GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { << "' does not reference a valid cir.global or cir.func"; mlir::Type symTy; - if (auto g = dyn_cast(op)) + if (auto g = dyn_cast(op)) { symTy = g.getSymType(); - else if (auto f = dyn_cast(op)) + // Verify that for thread local global access, the global needs to + // be marked with tls bits. + if (getTls() && !g.getTlsModel()) + return emitOpError("access to global not marked thread local"); + } else if (auto f = dyn_cast(op)) symTy = f.getFunctionType(); else llvm_unreachable("shall not get here"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 2371aca38a08..f19de97f9160 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1614,7 +1614,16 @@ class CIRGetGlobalOpLowering auto type = getTypeConverter()->convertType(op.getType()); auto symbol = op.getName(); - rewriter.replaceOpWithNewOp(op, type, symbol); + mlir::Operation *newop = + rewriter.create(op.getLoc(), type, symbol); + + if (op.getTls()) { + // Handle access to TLS via intrinsic. + newop = rewriter.create( + op.getLoc(), type, newop->getResult(0)); + } + + rewriter.replaceOp(op, newop); return mlir::success(); } }; diff --git a/clang/test/CIR/CodeGen/tls.c b/clang/test/CIR/CodeGen/tls.c index c63390152a76..2a3ebda00744 100644 --- a/clang/test/CIR/CodeGen/tls.c +++ b/clang/test/CIR/CodeGen/tls.c @@ -3,6 +3,17 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s +extern __thread int b; +int c(void) { return *&b; } +// CIR: cir.global "private" external tls_dyn @b : !s32i +// CIR: cir.func @c() -> !s32i +// CIR: %[[TLS_ADDR:.*]] = cir.get_global thread_local @b : cir.ptr + __thread int a; // CIR: cir.global external tls_dyn @a = #cir.int<0> : !s32i + +// LLVM: @b = external thread_local global i32 // LLVM: @a = thread_local global i32 0 + +// LLVM-LABEL: @c +// LLVM: = call ptr @llvm.threadlocal.address.p0(ptr @b) \ No newline at end of file diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 786d212e9692..a9a5e1e5809c 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -63,6 +63,12 @@ module { cir.global external tls_local_dyn @model1 = #cir.int<0> : !s32i cir.global external tls_init_exec @model2 = #cir.int<0> : !s32i cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i + + cir.global "private" external tls_dyn @batata : !s32i + cir.func @f35() { + %0 = cir.get_global thread_local @batata : cir.ptr + cir.return + } } // CHECK: cir.global external @a = #cir.int<3> : !s32i @@ -91,4 +97,10 @@ module { // CHECK: cir.global external tls_dyn @model0 = #cir.int<0> : !s32i // CHECK: cir.global external tls_local_dyn @model1 = #cir.int<0> : !s32i // CHECK: cir.global external tls_init_exec @model2 = #cir.int<0> : !s32i -// CHECK: cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i \ No newline at end of file +// CHECK: cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i + +// CHECK: cir.global "private" external tls_dyn @batata : !s32i +// CHECK: cir.func @f35() { +// CHECK: %0 = cir.get_global thread_local @batata : cir.ptr +// CHECK: cir.return +// CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index ba7758b2ca56..6d738aac0ae4 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1033,3 +1033,16 @@ cir.func @bad_fetch(%x: !cir.ptr, %y: !cir.float) -> () { %12 = cir.atomic.fetch(xor, %x : !cir.ptr, %y : !cir.float, seq_cst) : !cir.float cir.return } + +// ----- + +!s32i = !cir.int + +module { + cir.global "private" external @batata : !s32i + cir.func @f35() { + // expected-error@+1 {{access to global not marked thread local}} + %0 = cir.get_global thread_local @batata : cir.ptr + cir.return + } +} \ No newline at end of file From 736cacabba2ebdc887625fe74952dc644fdfbd23 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Tue, 16 Apr 2024 17:44:51 -0400 Subject: [PATCH 1492/2301] [CIR][LLVMLowering] Lower cir.objectsize (#545) Lowers `cir.objectsize` to `llvm.objectsize` --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 27 +++++------ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 ++-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 ++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 5 +-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 8 +--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 45 ++++++++++++++----- clang/test/CIR/CodeGen/pass-object-size.c | 29 ++++++++++++ clang/test/CIR/CodeGen/tls.c | 11 ----- clang/test/CIR/IR/global.cir | 14 +----- clang/test/CIR/IR/invalid.cir | 13 ------ 10 files changed, 83 insertions(+), 82 deletions(-) create mode 100644 clang/test/CIR/CodeGen/pass-object-size.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8b0fe38950e5..fa02176b0a5d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1775,31 +1775,24 @@ def GetGlobalOp : CIR_Op<"get_global", [Pure, DeclareOpInterfaceMethods]> { let summary = "Get the address of a global variable"; let description = [{ - The `cir.get_global` operation retrieves the address pointing to a - named global variable. If the global variable is marked constant, writing - to the resulting address (such as through a `cir.store` operation) is - undefined. Resulting type must always be a `!cir.ptr<...>` type. + The `cir.get_global` operation retrieves the address pointing to a + named global variable. If the global variable is marked constant, writing + to the resulting address (such as through a `cir.store` operation) is + undefined. Resulting type must always be a `!cir.ptr<...>` type. - Addresses of thread local globals can only be retrieved if this operation - is marked `thread_local`, which indicates the address isn't constant. + Example: - Example: - ```mlir - %x = cir.get_global @foo : !cir.ptr - ... - %y = cir.get_global thread_local @batata : !cir.ptr - ``` + ```mlir + %x = cir.get_global @foo : !cir.ptr + ``` }]; - let arguments = (ins FlatSymbolRefAttr:$name, UnitAttr:$tls); + let arguments = (ins FlatSymbolRefAttr:$name); let results = (outs Res:$addr); // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. - let assemblyFormat = [{ - (`thread_local` $tls^)? - $name `:` `cir.ptr` type($addr) attr-dict - }]; + let assemblyFormat = "$name `:` `cir.ptr` type($addr) attr-dict"; // `GetGlobalOp` is fully verified by its traits. let hasVerifier = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index deed6ffe63d8..6784a1ce9c94 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -697,11 +697,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, uniqueName, type, isConst, linkage); } - mlir::Value createGetGlobal(mlir::cir::GlobalOp global, - bool threadLocal = false) { - return create(global.getLoc(), - getPointerTo(global.getSymType()), - global.getName(), threadLocal); + mlir::Value createGetGlobal(mlir::cir::GlobalOp global) { + return create( + global.getLoc(), getPointerTo(global.getSymType()), global.getName()); } mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 6698e320c007..954ea854e026 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -720,10 +720,11 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, if (CGF.getLangOpts().OpenMP) llvm_unreachable("not implemented"); - // Traditional LLVM codegen handles thread local separately, CIR handles - // as part of getAddrOfGlobalVar. auto V = CGF.CGM.getAddrOfGlobalVar(VD); + if (VD->getTLSKind() != VarDecl::TLS_None) + llvm_unreachable("NYI"); + auto RealVarTy = CGF.getTypes().convertTypeForMem(VD->getType()); auto realPtrTy = CGF.getBuilder().getPointerTo(RealVarTy); if (realPtrTy != V.getType()) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index aee1fa47edad..0814f82209c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -837,12 +837,11 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty, if (!Ty) Ty = getTypes().convertTypeForMem(ASTTy); - bool tlsAccess = D->getTLSKind() != VarDecl::TLS_None; auto g = buildGlobal(D, Ty, IsForDefinition); auto ptrTy = mlir::cir::PointerType::get(builder.getContext(), g.getSymType()); - return builder.create( - getLoc(D->getSourceRange()), ptrTy, g.getSymName(), tlsAccess); + return builder.create(getLoc(D->getSourceRange()), + ptrTy, g.getSymName()); } mlir::cir::GlobalViewAttr diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 38b6a1a8cfb6..f21d5e4e4b7b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1634,13 +1634,9 @@ GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { << "' does not reference a valid cir.global or cir.func"; mlir::Type symTy; - if (auto g = dyn_cast(op)) { + if (auto g = dyn_cast(op)) symTy = g.getSymType(); - // Verify that for thread local global access, the global needs to - // be marked with tls bits. - if (getTls() && !g.getTlsModel()) - return emitOpError("access to global not marked thread local"); - } else if (auto f = dyn_cast(op)) + else if (auto f = dyn_cast(op)) symTy = f.getFunctionType(); else llvm_unreachable("shall not get here"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f19de97f9160..bd2110af7108 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1614,16 +1614,7 @@ class CIRGetGlobalOpLowering auto type = getTypeConverter()->convertType(op.getType()); auto symbol = op.getName(); - mlir::Operation *newop = - rewriter.create(op.getLoc(), type, symbol); - - if (op.getTls()) { - // Handle access to TLS via intrinsic. - newop = rewriter.create( - op.getLoc(), type, newop->getResult(0)); - } - - rewriter.replaceOp(op, newop); + rewriter.replaceOpWithNewOp(op, type, symbol); return mlir::success(); } }; @@ -2288,6 +2279,36 @@ class CIRBitClrsbOpLowering } }; +class CIRObjSizeOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ObjSizeOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto llvmResTy = getTypeConverter()->convertType(op.getType()); + auto loc = op->getLoc(); + + auto llvmIntrinNameAttr = + mlir::StringAttr::get(rewriter.getContext(), "llvm.objectsize"); + mlir::cir::SizeInfoType kindInfo = op.getKind(); + auto falseValue = rewriter.create( + loc, rewriter.getI1Type(), false); + auto trueValue = rewriter.create( + loc, rewriter.getI1Type(), true); + + rewriter.replaceOpWithNewOp( + op, llvmResTy, llvmIntrinNameAttr, + mlir::ValueRange{adaptor.getPtr(), + kindInfo == mlir::cir::SizeInfoType::max ? falseValue + : trueValue, + trueValue, op.getDynamic() ? trueValue : falseValue}); + + return mlir::LogicalResult::success(); + } +}; + class CIRBitClzOpLowering : public mlir::OpConversionPattern { public: @@ -3035,8 +3056,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVectorShuffleVecLowering, CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, - CIRPrefetchLowering, CIRIsConstantOpLowering>(converter, - patterns.getContext()); + CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/pass-object-size.c b/clang/test/CIR/CodeGen/pass-object-size.c new file mode 100644 index 000000000000..851b912b0ad5 --- /dev/null +++ b/clang/test/CIR/CodeGen/pass-object-size.c @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +void b(void *__attribute__((pass_object_size(0)))); +void e(void *__attribute__((pass_object_size(2)))); +void c() { + int a; + int d[a]; + b(d); + e(d); +} + +// CIR: cir.func no_proto @c() +// CIR: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , %{{[0-9]+}} : !u64i, ["vla"] {alignment = 16 : i64} +// CIR: [[TMP1:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CIR-NEXT: [[TMP2:%.*]] = cir.objsize([[TMP1]] : , max) -> !u64i +// CIR-NEXT: cir.call @b([[TMP1]], [[TMP2]]) : (!cir.ptr, !u64i) -> () +// CIR: [[TMP3:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CIR: [[TMP4:%.*]] = cir.objsize([[TMP3]] : , min) -> !u64i +// CIR-NEXT: cir.call @e([[TMP3]], [[TMP4]]) : (!cir.ptr, !u64i) -> () + +// LLVM: define void @c() +// LLVM: [[TMP0:%.*]] = alloca i32, i64 %{{[0-9]+}}, +// LLVM: [[TMP1:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 false, i1 true, i1 false), +// LLVM-NEXT: call void @b(ptr [[TMP0]], i64 [[TMP1]]) +// LLVM: [[TMP2:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 true, i1 true, i1 false), +// LLVM-NEXT: call void @e(ptr [[TMP0]], i64 [[TMP2]]) diff --git a/clang/test/CIR/CodeGen/tls.c b/clang/test/CIR/CodeGen/tls.c index 2a3ebda00744..c63390152a76 100644 --- a/clang/test/CIR/CodeGen/tls.c +++ b/clang/test/CIR/CodeGen/tls.c @@ -3,17 +3,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s -extern __thread int b; -int c(void) { return *&b; } -// CIR: cir.global "private" external tls_dyn @b : !s32i -// CIR: cir.func @c() -> !s32i -// CIR: %[[TLS_ADDR:.*]] = cir.get_global thread_local @b : cir.ptr - __thread int a; // CIR: cir.global external tls_dyn @a = #cir.int<0> : !s32i - -// LLVM: @b = external thread_local global i32 // LLVM: @a = thread_local global i32 0 - -// LLVM-LABEL: @c -// LLVM: = call ptr @llvm.threadlocal.address.p0(ptr @b) \ No newline at end of file diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index a9a5e1e5809c..786d212e9692 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -63,12 +63,6 @@ module { cir.global external tls_local_dyn @model1 = #cir.int<0> : !s32i cir.global external tls_init_exec @model2 = #cir.int<0> : !s32i cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i - - cir.global "private" external tls_dyn @batata : !s32i - cir.func @f35() { - %0 = cir.get_global thread_local @batata : cir.ptr - cir.return - } } // CHECK: cir.global external @a = #cir.int<3> : !s32i @@ -97,10 +91,4 @@ module { // CHECK: cir.global external tls_dyn @model0 = #cir.int<0> : !s32i // CHECK: cir.global external tls_local_dyn @model1 = #cir.int<0> : !s32i // CHECK: cir.global external tls_init_exec @model2 = #cir.int<0> : !s32i -// CHECK: cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i - -// CHECK: cir.global "private" external tls_dyn @batata : !s32i -// CHECK: cir.func @f35() { -// CHECK: %0 = cir.get_global thread_local @batata : cir.ptr -// CHECK: cir.return -// CHECK: } \ No newline at end of file +// CHECK: cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 6d738aac0ae4..ba7758b2ca56 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1033,16 +1033,3 @@ cir.func @bad_fetch(%x: !cir.ptr, %y: !cir.float) -> () { %12 = cir.atomic.fetch(xor, %x : !cir.ptr, %y : !cir.float, seq_cst) : !cir.float cir.return } - -// ----- - -!s32i = !cir.int - -module { - cir.global "private" external @batata : !s32i - cir.func @f35() { - // expected-error@+1 {{access to global not marked thread local}} - %0 = cir.get_global thread_local @batata : cir.ptr - cir.return - } -} \ No newline at end of file From 21b37d460ee9bfd556a823875a51633199dbc179 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 16 Apr 2024 18:26:59 -0700 Subject: [PATCH 1493/2301] [CIR][LLVMLowering] Fix handling of dense array conversions from const arrays We were lacking handling of trailing zeros for constant arrays. --- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 15 ++++++++++++++- clang/test/CIR/Lowering/const-array.cir | 15 +++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/const-array.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index bd2110af7108..3bbe2884ce13 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1037,7 +1037,7 @@ template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty) { llvm_unreachable("NYI"); } -// return the nested type and quiantity of elements for cir.array type. +// return the nested type and quantity of elements for cir.array type. // e.g: for !cir.array x 1> // it returns !s32i as return value and stores 3 to elemQuantity. mlir::Type getNestedTypeAndElemQuantity(mlir::Type Ty, unsigned &elemQuantity) { @@ -1073,6 +1073,19 @@ void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, llvm_unreachable("unknown element in ConstArrayAttr"); } } + + // Only fill in trailing zeros at the local cir.array level where the element + // type isn't another array (for the mult-dim case). + auto numTrailingZeros = attr.getTrailingZerosNum(); + if (numTrailingZeros) { + auto localArrayTy = attr.getType().dyn_cast(); + assert(localArrayTy && "expected !cir.array"); + + auto nestTy = localArrayTy.getEltType(); + if (!nestTy.isa()) + values.insert(values.end(), localArrayTy.getSize() - numTrailingZeros, + getZeroInitFromType(nestTy)); + } } template diff --git a/clang/test/CIR/Lowering/const-array.cir b/clang/test/CIR/Lowering/const-array.cir new file mode 100644 index 000000000000..7aff779a04fa --- /dev/null +++ b/clang/test/CIR/Lowering/const-array.cir @@ -0,0 +1,15 @@ +// RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM + +!u8i = !cir.int + +module { + cir.global "private" internal @normal_url_char = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<1> : !u8i], trailing_zeros> : !cir.array + // LLVM: @normal_url_char = internal global [4 x i8] c"\00\01\00\00" + + cir.func @c0() -> !cir.ptr> { + %0 = cir.get_global @normal_url_char : cir.ptr > + cir.return %0 : !cir.ptr> + } + // LLVM: define ptr @c0() + // LLVM: ret ptr @normal_url_char +} From 82c0e367911df07a645120cc9c839c630a932440 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 16 Apr 2024 22:14:02 -0700 Subject: [PATCH 1494/2301] [CIR][Rebase] Stub out error handling for Pass option initialization --- clang/lib/CIR/CodeGen/CIRPasses.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index d2b54af41cc0..7819d6db21ea 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -85,4 +85,4 @@ void populateCIRPreLoweringPasses(OpPassManager &pm) { // add other passes here } -} // namespace mlir \ No newline at end of file +} // namespace mlir From 721fb5ef9db55d57a19f8d96bf8a27a0ac8d3f05 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 17 Apr 2024 21:15:37 +0300 Subject: [PATCH 1495/2301] [CIR][CodeGen] Inline assembly: store the results (#512) This PR adds storing of the results of inline assembly operation. This is a **final** step (I hope: ) ) from my side to support inline assembly. There are some features that remains unimplemented, but basic things should work now, For example, we can do addition and get the results - I explicitly added several tests for that, so you can test them in real. For instance, the next program being compiled with CIR should give you 7 as the result: ``` int add(int x, int y) { int a; __asm__("addl %[y], %[x]" : "=r" (a) : [x] "r" (x), [y] "r" (y) ); return a; } int main() { printf("run %d\n", add(3, 4)); return 0; } ``` So, the main thing remains is pretty printing. As I said I added several examples, and may be it will become more clear how to print better. Also, I added several tests from original codegen in order to check that we don't fail. And I can add some checks there as well when we come to better solution on printing. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 63 +++- clang/lib/CIR/CodeGen/CIRAsm.cpp | 178 ++++++++- clang/lib/CIR/CodeGen/TargetInfo.h | 8 + .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 179 +++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 48 ++- clang/test/CIR/CodeGen/asm.c | 339 +++++++++++++++++- clang/test/CIR/Lowering/asm.cir | 62 ++-- 8 files changed, 796 insertions(+), 82 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index fa02176b0a5d..40fb6cc04539 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -31,6 +31,7 @@ include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/IR/BuiltinAttributeInterfaces.td" include "mlir/IR/EnumAttr.td" include "mlir/IR/SymbolInterfaces.td" +include "mlir/IR/CommonAttrConstraints.td" //===----------------------------------------------------------------------===// // CIR Ops @@ -3329,9 +3330,13 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { - the output variable index referenced by the input operands. - the index of early-clobber operand - Operand attributes is a storage of attributes, where each element corresponds - to the operand with the same index. The first index relates to the operation - result. + Operand attributes is a storage, where each element corresponds to the operand with + the same index. The first index relates to the operation result (if any). + Note, the operands themselves are stored as VariadicOfVariadic in the next order: + output, input and then in/out operands. + + Note, when several output operands are present, the result type may be represented as + an anon struct type. Example: ```C++ @@ -3341,38 +3346,58 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { ``` ```mlir + !ty_22anon2E022 = !cir.struct, !cir.int}> + !ty_22anon2E122 = !cir.struct, !cir.int}> + ... %0 = cir.alloca !s32i, cir.ptr , ["x", init] %1 = cir.alloca !s32i, cir.ptr , ["y", init] ... %2 = cir.load %0 : cir.ptr , !s32i %3 = cir.load %1 : cir.ptr , !s32i - cir.asm(x86_att, {"foo" "~{dirflag},~{fpsr},~{flags}"}) side_effects : () -> () - cir.asm(x86_att, {"bar $$42 $0" "=r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) %2 : (!s32i) -> () - cir.asm(x86_att, {"baz $$42 $0" "=r,=&r,0,1,~{dirflag},~{fpsr},~{flags}"}) %3, %2 : (!s32i, !s32i) -> () + + cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"foo" "~{dirflag},~{fpsr},~{flags}"}) side_effects + + cir.asm(x86_att, + out = [], + in = [], + in_out = [%2 : !s32i], + {"bar $$42 $0" "=r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) -> !ty_22anon2E022 + + cir.asm(x86_att, + out = [], + in = [%3 : !s32i], + in_out = [%2 : !s32i], + {"baz $$42 $0" "=r,=&r,0,1,~{dirflag},~{fpsr},~{flags}"}) -> !ty_22anon2E122 ``` }]; let results = (outs Optional:$res); let arguments = ( - ins Variadic:$operands, + ins VariadicOfVariadic:$asm_operands, StrAttr:$asm_string, StrAttr:$constraints, UnitAttr:$side_effects, AsmFlavor:$asm_flavor, - OptionalAttr:$operand_attrs); - - let assemblyFormat = [{ - `(` - $asm_flavor`,` - `{` $asm_string $constraints `}` - `)` - (`operand_attrs` `=` $operand_attrs^)? - (`side_effects` $side_effects^)? - attr-dict - operands `:` functional-type(operands, results) - }]; + ArrayAttr:$operand_attrs, + DenseI32ArrayAttr:$operands_segments + ); + let builders = [OpBuilder<(ins + "ArrayRef":$asm_operands, + "StringRef":$asm_string, + "StringRef":$constraints, + "bool":$side_effects, + "AsmFlavor":$asm_flavor, + "ArrayRef":$operand_attrs + )> + ]; + + let hasCustomAssemblyFormat = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 59b2a058527c..27193f718ece 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -251,15 +251,96 @@ CIRGenFunction::buildAsmInput(const TargetInfo::ConstraintInfo &Info, if (Info.allowsRegister() || !Info.allowsMemory()) if (CIRGenFunction::hasScalarEvaluationKind(InputExpr->getType())) - return {buildScalarExpr(InputExpr), nullptr}; + return {buildScalarExpr(InputExpr), mlir::Type()}; if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) - return {buildScalarExpr(InputExpr), nullptr}; + return {buildScalarExpr(InputExpr), mlir::Type()}; InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); LValue Dest = buildLValue(InputExpr); return buildAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, InputExpr->getExprLoc()); } +static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, + const llvm::ArrayRef RegResults, + const llvm::ArrayRef ResultRegTypes, + const llvm::ArrayRef ResultTruncRegTypes, + const llvm::ArrayRef ResultRegDests, + const llvm::ArrayRef ResultRegQualTys, + const llvm::BitVector &ResultTypeRequiresCast, + const llvm::BitVector &ResultRegIsFlagReg) { + CIRGenBuilderTy &Builder = CGF.getBuilder(); + CIRGenModule &CGM = CGF.CGM; + auto CTX = Builder.getContext(); + + assert(RegResults.size() == ResultRegTypes.size()); + assert(RegResults.size() == ResultTruncRegTypes.size()); + assert(RegResults.size() == ResultRegDests.size()); + // ResultRegDests can be also populated by addReturnRegisterOutputs() above, + // in which case its size may grow. + assert(ResultTypeRequiresCast.size() <= ResultRegDests.size()); + assert(ResultRegIsFlagReg.size() <= ResultRegDests.size()); + + for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { + mlir::Value Tmp = RegResults[i]; + mlir::Type TruncTy = ResultTruncRegTypes[i]; + + if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) { + assert(!UnimplementedFeature::asm_llvm_assume()); + } + + // If the result type of the LLVM IR asm doesn't match the result type of + // the expression, do the conversion. + if (ResultRegTypes[i] != TruncTy) { + + // Truncate the integer result to the right size, note that TruncTy can be + // a pointer. + if (TruncTy.isa()) + Tmp = Builder.createFloatingCast(Tmp, TruncTy); + else if (isa(TruncTy) && + isa(Tmp.getType())) { + uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); + Tmp = Builder.createIntCast( + Tmp, mlir::cir::IntType::get(CTX, (unsigned)ResSize, false)); + Tmp = Builder.createIntToPtr(Tmp, TruncTy); + } else if (isa(Tmp.getType()) && + isa(TruncTy)) { + uint64_t TmpSize = CGM.getDataLayout().getTypeSizeInBits(Tmp.getType()); + Tmp = Builder.createPtrToInt( + Tmp, mlir::cir::IntType::get(CTX, (unsigned)TmpSize, false)); + Tmp = Builder.createIntCast(Tmp, TruncTy); + } else if (isa(TruncTy)) { + Tmp = Builder.createIntCast(Tmp, TruncTy); + } else if (false /*TruncTy->isVectorTy()*/) { + assert(!UnimplementedFeature::asm_vector_type()); + } + } + + LValue Dest = ResultRegDests[i]; + // ResultTypeRequiresCast elements correspond to the first + // ResultTypeRequiresCast.size() elements of RegResults. + if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) { + unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]); + Address A = Dest.getAddress().withElementType(ResultRegTypes[i]); + if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) { + Builder.createStore(CGF.getLoc(S.getAsmLoc()), Tmp, A); + continue; + } + + QualType Ty = + CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false); + if (Ty.isNull()) { + const Expr *OutExpr = S.getOutputExpr(i); + CGM.getDiags().Report(OutExpr->getExprLoc(), + diag::err_store_value_to_reg); + return; + } + Dest = CGF.makeAddrLValue(A, Ty); + } + + CGF.buildStoreThroughLValue(RValue::get(Tmp), Dest); + } +} + mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { // Assemble the final asm string. std::string AsmString = S.generateAsmString(getContext()); @@ -277,19 +358,24 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { std::vector ResultTruncRegTypes; std::vector ArgTypes; std::vector ArgElemTypes; + std::vector OutArgs; + std::vector InArgs; + std::vector InOutArgs; std::vector Args; llvm::BitVector ResultTypeRequiresCast; llvm::BitVector ResultRegIsFlagReg; // Keep track of input constraints. std::string InOutConstraints; - std::vector InOutArgs; std::vector InOutArgTypes; std::vector InOutArgElemTypes; // Keep track of out constraints for tied input operand. std::vector OutputConstraints; + // Keep track of defined physregs. + llvm::SmallSet PhysRegOutputs; + // An inline asm can be marked readonly if it meets the following conditions: // - it doesn't have any sideeffects // - it doesn't clobber memory @@ -314,6 +400,10 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { AddVariableConstraints(OutputConstraint, *OutExpr, getTarget(), CGM, S, Info.earlyClobber(), &GCCReg); + // Give an error on multiple outputs to same physreg. + if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second) + CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg); + OutputConstraints.push_back(OutputConstraint); LValue Dest = buildLValue(OutExpr); @@ -392,6 +482,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { ArgTypes.push_back(DestAddr.getType()); ArgElemTypes.push_back(DestAddr.getElementType()); + OutArgs.push_back(DestAddr.getPointer()); Args.push_back(DestAddr.getPointer()); Constraints += "=*"; Constraints += OutputConstraint; @@ -412,6 +503,9 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { *this, OutputConstraint, Arg.getType())) Arg = builder.createBitcast(Arg, AdjTy); + // Update largest vector width for any vector types. + assert(!UnimplementedFeature::asm_vector_type()); + // Only tie earlyclobber physregs. if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber())) InOutConstraints += llvm::utostr(i); @@ -424,11 +518,28 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { } } // iterate over output operands + // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX) + // to the return value slot. Only do this when returning in registers. + if (isa(&S)) { + const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); + if (RetAI.isDirect() || RetAI.isExtend()) { + // Make a fake lvalue for the return value slot. + LValue ReturnSlot = makeAddrLValue(ReturnValue, FnRetTy); + CGM.getTargetCIRGenInfo().addReturnRegisterOutputs( + *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes, + ResultRegDests, AsmString, S.getNumOutputs()); + SawAsmBlock = true; + } + } + for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { const Expr *InputExpr = S.getInputExpr(i); TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; + if (Info.allowsMemory()) + ReadNone = false; + if (!Constraints.empty()) Constraints += ','; @@ -481,17 +592,21 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) << InputExpr->getType() << InputConstraint; + // Update largest vector width for any vector types. + assert(!UnimplementedFeature::asm_vector_type()); + ArgTypes.push_back(Arg.getType()); ArgElemTypes.push_back(ArgElemType); + InArgs.push_back(Arg); Args.push_back(Arg); Constraints += InputConstraint; } // iterate over input operands // Append the "input" part of inout constraints. for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { + Args.push_back(InOutArgs[i]); ArgTypes.push_back(InOutArgTypes[i]); ArgElemTypes.push_back(InOutArgElemTypes[i]); - Args.push_back(InOutArgs[i]); } Constraints += InOutConstraints; @@ -509,9 +624,15 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { } bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; + std::vector RegResults; + + llvm::SmallVector operands; + operands.push_back(OutArgs); + operands.push_back(InArgs); + operands.push_back(InOutArgs); auto IA = builder.create( - getLoc(S.getAsmLoc()), ResultType, Args, AsmString, Constraints, + getLoc(S.getAsmLoc()), ResultType, operands, AsmString, Constraints, HasSideEffect, inferFlavor(CGM, S), mlir::ArrayAttr()); if (false /*IsGCCAsmGoto*/) { @@ -525,28 +646,55 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { if (IA.getNumResults()) result = IA.getResult(0); - std::vector operandAttrs; - - // this is for the lowering to LLVM from LLVm dialect. Otherwise, if we - // don't have the result (i.e. void type as a result of operation), the - // element type attribute will be attached to the whole instruction, but not - // to the operand - if (!IA.getNumResults()) - operandAttrs.push_back(OptNoneAttr::get(builder.getContext())); + llvm::SmallVector operandAttrs; + int i = 0; for (auto typ : ArgElemTypes) { if (typ) { - operandAttrs.push_back(mlir::TypeAttr::get(typ)); + auto op = Args[i++]; + assert(op.getType().isa() && + "pointer type expected"); + assert(cast(op.getType()).getPointee() == typ && + "element type differs from pointee type!"); + + operandAttrs.push_back(mlir::UnitAttr::get(builder.getContext())); } else { // We need to add an attribute for every arg since later, during // the lowering to LLVM IR the attributes will be assigned to the // CallInsn argument by index, i.e. we can't skip null type here - operandAttrs.push_back(OptNoneAttr::get(builder.getContext())); + operandAttrs.push_back(mlir::Attribute()); } } + assert(Args.size() == operandAttrs.size() && + "The number of attributes is not even with the number of operands"); + IA.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs)); + + if (ResultRegTypes.size() == 1) { + RegResults.push_back(result); + } else if (ResultRegTypes.size() > 1) { + auto alignment = CharUnits::One(); + auto sname = cast(ResultType).getName(); + auto dest = buildAlloca(sname, ResultType, getLoc(S.getAsmLoc()), + alignment, false); + auto addr = Address(dest, alignment); + builder.createStore(getLoc(S.getAsmLoc()), result, addr); + + for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) { + auto typ = builder.getPointerTo(ResultRegTypes[i]); + auto ptr = + builder.createGetMember(getLoc(S.getAsmLoc()), typ, dest, "", i); + auto tmp = + builder.createLoad(getLoc(S.getAsmLoc()), Address(ptr, alignment)); + RegResults.push_back(tmp); + } + } } + buildAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes, + ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast, + ResultRegIsFlagReg); + return mlir::success(); } diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index c2869ccc1e49..13ed96763775 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -52,6 +52,14 @@ class TargetCIRGenInfo { return Ty; } + virtual void + addReturnRegisterOutputs(CIRGenFunction &CGF, LValue ReturnValue, + std::string &Constraints, + std::vector &ResultRegTypes, + std::vector &ResultTruncRegTypes, + std::vector &ResultRegDests, + std::string &AsmString, unsigned NumOutputs) const {} + virtual ~TargetCIRGenInfo() {} }; diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 1a8d1328f90c..02f765befce6 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -171,6 +171,7 @@ struct UnimplementedFeature { static bool asm_unwind_clobber() { return false; } static bool asm_memory_effects() { return false; } static bool asm_vector_type() { return false; } + static bool asm_llvm_assume() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index f21d5e4e4b7b..1e0782da0c8d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/AST/Attrs.inc" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" @@ -2750,6 +2751,184 @@ LogicalResult GetRuntimeMemberOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// InlineAsmOp Definitions +//===----------------------------------------------------------------------===// + +void cir::InlineAsmOp::print(OpAsmPrinter &p) { + p << '(' << getAsmFlavor() << ", "; + p.increaseIndent(); + p.printNewline(); + + llvm::SmallVector names{"out", "in", "in_out"}; + auto nameIt = names.begin(); + auto attrIt = getOperandAttrs().begin(); + + for (auto ops : getAsmOperands()) { + p << *nameIt << " = "; + + p << '['; + llvm::interleaveComma(llvm::make_range(ops.begin(), ops.end()), p, + [&](Value value) { + p.printOperand(value); + p << " : " << value.getType(); + if (*attrIt) + p << " (maybe_memory)"; + attrIt++; + }); + p << "],"; + p.printNewline(); + ++nameIt; + } + + p << "{"; + p.printString(getAsmString()); + p << " "; + p.printString(getConstraints()); + p << "}"; + p.decreaseIndent(); + p << ')'; + if (getSideEffects()) + p << " side_effects"; + + llvm::SmallVector<::llvm::StringRef, 2> elidedAttrs; + elidedAttrs.push_back("asm_flavor"); + elidedAttrs.push_back("asm_string"); + elidedAttrs.push_back("constraints"); + elidedAttrs.push_back("operand_attrs"); + elidedAttrs.push_back("operands_segments"); + elidedAttrs.push_back("side_effects"); + p.printOptionalAttrDict(getOperation()->getAttrs(), elidedAttrs); + + if (auto v = getRes()) + p << " -> " << v.getType(); +} + +ParseResult cir::InlineAsmOp::parse(OpAsmParser &parser, + OperationState &result) { + llvm::SmallVector operand_attrs; + llvm::SmallVector operandsGroupSizes; + std::string asm_string, constraints; + Type resType; + auto *ctxt = parser.getBuilder().getContext(); + + auto error = [&](const Twine &msg) { + parser.emitError(parser.getCurrentLocation(), msg); + ; + return mlir::failure(); + }; + + auto expected = [&](const std::string &c) { + return error("expected '" + c + "'"); + }; + + if (parser.parseLParen().failed()) + return expected("("); + + auto flavor = mlir::FieldParser::parse(parser); + if (failed(flavor)) + return error("Unknown AsmFlavor"); + + if (parser.parseComma().failed()) + return expected(","); + + auto parseValue = [&](Value &v) { + OpAsmParser::UnresolvedOperand op; + + if (parser.parseOperand(op) || parser.parseColon()) + return mlir::failure(); + + Type typ; + if (parser.parseType(typ).failed()) + return error("can't parse operand type"); + llvm::SmallVector tmp; + if (parser.resolveOperand(op, typ, tmp)) + return error("can't resolve operand"); + v = tmp[0]; + return mlir::success(); + }; + + auto parseOperands = [&](llvm::StringRef name) { + if (parser.parseKeyword(name).failed()) + return error("expected " + name + " operands here"); + if (parser.parseEqual().failed()) + return expected("="); + if (parser.parseLSquare().failed()) + return expected("["); + + int size = 0; + if (parser.parseOptionalRSquare().succeeded()) { + operandsGroupSizes.push_back(size); + if (parser.parseComma()) + return expected(","); + return mlir::success(); + } + + if (parser.parseCommaSeparatedList([&]() { + Value val; + if (parseValue(val).succeeded()) { + result.operands.push_back(val); + size++; + + if (parser.parseOptionalLParen().failed()) { + operand_attrs.push_back(mlir::Attribute()); + return mlir::success(); + } + + if (parser.parseKeyword("maybe_memory").succeeded()) { + operand_attrs.push_back(mlir::UnitAttr::get(ctxt)); + if (parser.parseRParen()) + return expected(")"); + return mlir::success(); + } + } + return mlir::failure(); + })) + return mlir::failure(); + + if (parser.parseRSquare().failed() || parser.parseComma().failed()) + return expected("]"); + operandsGroupSizes.push_back(size); + return mlir::success(); + }; + + if (parseOperands("out").failed() || parseOperands("in").failed() || + parseOperands("in_out").failed()) + return error("failed to parse operands"); + + if (parser.parseLBrace()) + return expected("{"); + if (parser.parseString(&asm_string)) + return error("asm string parsing failed"); + if (parser.parseString(&constraints)) + return error("constraints string parsing failed"); + if (parser.parseRBrace()) + return expected("}"); + if (parser.parseRParen()) + return expected(")"); + + if (parser.parseOptionalKeyword("side_effects").succeeded()) + result.attributes.set("side_effects", UnitAttr::get(ctxt)); + + if (parser.parseOptionalArrow().succeeded()) + ; + [[maybe_unused]] auto x = parser.parseType(resType); + + if (parser.parseOptionalAttrDict(result.attributes)) + return mlir::failure(); + + result.attributes.set("asm_flavor", AsmFlavorAttr::get(ctxt, *flavor)); + result.attributes.set("asm_string", StringAttr::get(ctxt, asm_string)); + result.attributes.set("constraints", StringAttr::get(ctxt, constraints)); + result.attributes.set("operand_attrs", ArrayAttr::get(ctxt, operand_attrs)); + result.getOrAddProperties().operands_segments = + parser.getBuilder().getDenseI32ArrayAttr(operandsGroupSizes); + if (resType) + result.addTypes(TypeRange{resType}); + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // Atomic Definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3bbe2884ce13..fd5409605e49 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2842,7 +2842,6 @@ class CIRInlineAsmOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::InlineAsmOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - mlir::Type llResTy; if (op.getNumResults()) llResTy = getTypeConverter()->convertType(op.getType(0)); @@ -2855,30 +2854,47 @@ class CIRInlineAsmOpLowering std::vector opAttrs; auto llvmAttrName = mlir::LLVM::InlineAsmOp::getElementTypeAttrName(); - if (auto operandAttrs = op.getOperandAttrs()) { - for (auto attr : *operandAttrs) { - if (isa(attr)) { - opAttrs.push_back(mlir::Attribute()); - continue; - } + // this is for the lowering to LLVM from LLVm dialect. Otherwise, if we + // don't have the result (i.e. void type as a result of operation), the + // element type attribute will be attached to the whole instruction, but not + // to the operand + if (!op.getNumResults()) + opAttrs.push_back(mlir::Attribute()); + + llvm::SmallVector llvmOperands; + llvm::SmallVector cirOperands; + for (size_t i = 0; i < op.getAsmOperands().size(); ++i) { + auto llvmOps = adaptor.getAsmOperands()[i]; + auto cirOps = op.getAsmOperands()[i]; + llvmOperands.insert(llvmOperands.end(), llvmOps.begin(), llvmOps.end()); + cirOperands.insert(cirOperands.end(), cirOps.begin(), cirOps.end()); + } + + // so far we infer the llvm dialect element type attr from + // CIR operand type. + for (std::size_t i = 0; i < op.getOperandAttrs().size(); ++i) { + if (!op.getOperandAttrs()[i]) { + opAttrs.push_back(mlir::Attribute()); + continue; + } - mlir::TypeAttr tAttr = cast(attr); - std::vector attrs; - auto typAttr = mlir::TypeAttr::get( - getTypeConverter()->convertType(tAttr.getValue())); + std::vector attrs; + auto typ = cast(cirOperands[i].getType()); + auto typAttr = mlir::TypeAttr::get( + getTypeConverter()->convertType(typ.getPointee())); - attrs.push_back(rewriter.getNamedAttr(llvmAttrName, typAttr)); - auto newDict = rewriter.getDictionaryAttr(attrs); - opAttrs.push_back(newDict); - } + attrs.push_back(rewriter.getNamedAttr(llvmAttrName, typAttr)); + auto newDict = rewriter.getDictionaryAttr(attrs); + opAttrs.push_back(newDict); } rewriter.replaceOpWithNewOp( - op, llResTy, adaptor.getOperands(), op.getAsmStringAttr(), + op, llResTy, llvmOperands, op.getAsmStringAttr(), op.getConstraintsAttr(), op.getSideEffectsAttr(), /*is_align_stack*/ mlir::UnitAttr(), mlir::LLVM::AsmDialectAttr::get(getContext(), llDialect), rewriter.getArrayAttr(opAttrs)); + return mlir::success(); } }; diff --git a/clang/test/CIR/CodeGen/asm.c b/clang/test/CIR/CodeGen/asm.c index 8d72101d4429..56d8cf2bf57c 100644 --- a/clang/test/CIR/CodeGen/asm.c +++ b/clang/test/CIR/CodeGen/asm.c @@ -1,32 +1,349 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -//CHECK: cir.asm(x86_att, {"" "~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone] side_effects : () -> () + +// CHECK: cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [], +// CHECK: in_out = [], +// CHECK: {"" "~{dirflag},~{fpsr},~{flags}"}) side_effects void empty1() { __asm__ volatile("" : : : ); } -//CHECK: cir.asm(x86_att, {"xyz" "~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone] side_effects : () -> () +// CHECK: cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [], +// CHECK: in_out = [], +// CHECK: {"xyz" "~{dirflag},~{fpsr},~{flags}"}) side_effects void empty2() { __asm__ volatile("xyz" : : : ); } -//CHECK: cir.asm(x86_att, {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone, !s32i, !s32i] side_effects %0, %0 : (!cir.ptr, !cir.ptr) -> () -void t1(int x) { +// CHECK: cir.asm(x86_att, +// CHECK: out = [%0 : !cir.ptr (maybe_memory)], +// CHECK: in = [], +// CHECK: in_out = [%0 : !cir.ptr (maybe_memory)], +// CHECK: {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) side_effects +void empty3(int x) { __asm__ volatile("" : "+m"(x)); } -//CHECK: cir.asm(x86_att, {"" "*m,~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone, !s32i] side_effects %0 : (!cir.ptr) -> () -void t2(int x) { +// CHECK: cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [%0 : !cir.ptr (maybe_memory)], +// CHECK: in_out = [], +// CHECK: {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects +void empty4(int x) { __asm__ volatile("" : : "m"(x)); } -//CHECK: cir.asm(x86_att, {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone, !s32i] side_effects %0 : (!cir.ptr) -> () -void t3(int x) { +// CHECK: cir.asm(x86_att, +// CHECK: out = [%0 : !cir.ptr (maybe_memory)], +// CHECK: in = [], +// CHECK: in_out = [], +// CHECK: {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) side_effects +void empty5(int x) { __asm__ volatile("" : "=m"(x)); } -//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [#cir.optnone] side_effects %1 : (!s32i) -> !ty_22anon2E022 -void t4(int x) { +// CHECK: %3 = cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [], +// CHECK: in_out = [%2 : !s32i], +// CHECK: {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !ty_22anon2E022 +void empty6(int x) { __asm__ volatile("" : "=&r"(x), "+&r"(x)); -} \ No newline at end of file +} + +// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["a"] +// CHECK: [[TMP1:%.*]] = cir.load %0 : cir.ptr , !u32i +// CHECK: [[TMP2:%.*]] = cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [%3 : !u32i], +// CHECK: in_out = [], +// CHECK: {"addl $$42, $1" "=r,r,~{dirflag},~{fpsr},~{flags}"}) -> !s32i +// CHECK: cir.store [[TMP2]], [[TMP0]] : !s32i, cir.ptr loc(#loc42) +unsigned add1(unsigned int x) { + int a; + __asm__("addl $42, %[val]" + : "=r" (a) + : [val] "r" (x) + ); + + return a; +} + +// CHECK: [[TMP0:%.*]] = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !u32i, cir.ptr +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr , !u32i +// CHECK: [[TMP2:%.*]] = cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [], +// CHECK: in_out = [%2 : !u32i], +// CHECK: {"addl $$42, $0" "=r,0,~{dirflag},~{fpsr},~{flags}"}) -> !u32i +// CHECK: cir.store [[TMP2]], [[TMP0]] : !u32i, cir.ptr +unsigned add2(unsigned int x) { + __asm__("addl $42, %[val]" + : [val] "+r" (x) + ); + return x; +} + + +// CHECK: [[TMP0:%.*]] = cir.alloca !u32i, cir.ptr , ["x", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr , !u32i +// CHECK: [[TMP2:%.*]] = cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [], +// CHECK: in_out = [%2 : !u32i], +// CHECK: {"addl $$42, $0 \0A\09 subl $$1, $0 \0A\09 imul $$2, $0" "=r,0,~{dirflag},~{fpsr},~{flags}"}) -> !u32i +// CHECK: cir.store [[TMP2]], [[TMP0]] : !u32i, cir.ptr +unsigned add3(unsigned int x) { // ((42 + x) - 1) * 2 + __asm__("addl $42, %[val] \n\t\ + subl $1, %[val] \n\t\ + imul $2, %[val]" + : [val] "+r" (x) + ); + return x; +} + +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["x", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP1:%.*]] = cir.load deref [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: cir.asm(x86_att, +// CHECK: out = [%1 : !cir.ptr (maybe_memory)], +// CHECK: in = [], +// CHECK: in_out = [], +// CHECK: {"addl $$42, $0" "=*m,~{dirflag},~{fpsr},~{flags}"}) +// CHECK-NEXT: cir.return +void add4(int *x) { + __asm__("addl $42, %[addr]" : [addr] "=m" (*x)); +} + + +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.float, cir.ptr , ["x", init] +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.float, cir.ptr , ["y", init] +// CHECK: [[TMP2:%.*]] = cir.alloca !cir.float, cir.ptr , ["r"] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.float, cir.ptr +// CHECK: cir.store %arg1, [[TMP1]] : !cir.float, cir.ptr +// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : cir.ptr , !cir.float +// CHECK: [[TMP4:%.*]] = cir.load [[TMP1]] : cir.ptr , !cir.float +// CHECK: [[TMP5:%.*]] = cir.asm(x86_att, +// CHECK: out = [], +// CHECK: in = [%4 : !cir.float, %5 : !cir.float], +// CHECK: in_out = [], +// CHECK: {"flds $1; flds $2; faddp" "=&{st},imr,imr,~{dirflag},~{fpsr},~{flags}"}) -> !cir.float +// CHECK: cir.store [[TMP5]], [[TMP2]] : !cir.float, cir.ptr +float add5(float x, float y) { + float r; + __asm__("flds %[x]; flds %[y]; faddp" + : "=&t" (r) + : [x] "g" (x), [y] "g" (y)); + return r; +} + +/* +There are tests from clang/test/CodeGen/asm.c. No checks for now - we just make +sure no crashes happen +*/ + + +void t1(int len) { + __asm__ volatile("" : "=&r"(len), "+&r"(len)); +} + +void t2(unsigned long long t) { + __asm__ volatile("" : "+m"(t)); +} + +void t3(unsigned char *src, unsigned long long temp) { + __asm__ volatile("" : "+m"(temp), "+r"(src)); +} + +void t4(void) { + unsigned long long a; + struct reg { unsigned long long a, b; } b; + + __asm__ volatile ("":: "m"(a), "m"(b)); +} + +void t5(int i) { + asm("nop" : "=r"(i) : "0"(t5)); +} + +void t6(void) { + __asm__ volatile("" : : "i" (t6)); +} + +void t7(int a) { + __asm__ volatile("T7 NAMED: %[input]" : "+r"(a): [input] "i" (4)); +} + +void t8(void) { + __asm__ volatile("T8 NAMED MODIFIER: %c[input]" :: [input] "i" (4)); +} + +unsigned t9(unsigned int a) { + asm("bswap %0 %1" : "+r" (a)); + return a; +} + +void t10(int r) { + __asm__("PR3908 %[lf] %[xx] %[li] %[r]" : [r] "+r" (r) : [lf] "mx" (0), [li] "mr" (0), [xx] "x" ((double)(0))); +} + +unsigned t11(signed char input) { + unsigned output; + __asm__("xyz" + : "=a" (output) + : "0" (input)); + return output; +} + +unsigned char t12(unsigned input) { + unsigned char output; + __asm__("xyz" + : "=a" (output) + : "0" (input)); + return output; +} + +unsigned char t13(unsigned input) { + unsigned char output; + __asm__("xyz %1" + : "=a" (output) + : "0" (input)); + return output; +} + +struct large { + int x[1000]; +}; + +unsigned long t15(int x, struct large *P) { + __asm__("xyz " + : "=r" (x) + : "m" (*P), "0" (x)); + return x; +} + +// bitfield destination of an asm. +struct S { + int a : 4; +}; + +void t14(struct S *P) { + __asm__("abc %0" : "=r"(P->a) ); +} + +int t16(void) { + int a,b; + asm ( "nop;" + :"=%c" (a) + : "r" (b) + ); + return 0; +} + +void t17(void) { + int i; + __asm__ ( "nop": "=m"(i)); +} + +int t18(unsigned data) { + int a, b; + + asm("xyz" :"=a"(a), "=d"(b) : "a"(data)); + return a + b; +} + +int t19(unsigned data) { + int a, b; + + asm("x{abc|def|ghi}z" :"=r"(a): "r"(data)); + return a + b; +} + +// skip t20 and t21: long double is not supported + +// accept 'l' constraint +unsigned char t22(unsigned char a, unsigned char b) { + unsigned int la = a; + unsigned int lb = b; + unsigned int bigres; + unsigned char res; + __asm__ ("0:\n1:\n" : [bigres] "=la"(bigres) : [la] "0"(la), [lb] "c"(lb) : + "edx", "cc"); + res = bigres; + return res; +} + +// accept 'l' constraint +unsigned char t23(unsigned char a, unsigned char b) { + unsigned int la = a; + unsigned int lb = b; + unsigned char res; + __asm__ ("0:\n1:\n" : [res] "=la"(res) : [la] "0"(la), [lb] "c"(lb) : + "edx", "cc"); + return res; +} + +void *t24(char c) { + void *addr; + __asm__ ("foobar" : "=a" (addr) : "0" (c)); + return addr; +} + +void t25(void) +{ + __asm__ __volatile__( \ + "finit" \ + : \ + : \ + :"st","st(1)","st(2)","st(3)", \ + "st(4)","st(5)","st(6)","st(7)", \ + "fpsr","fpcr" \ + ); +} + +//t26 skipped - no vector type support + +// Check to make sure the inline asm non-standard dialect attribute _not_ is +// emitted. +void t27(void) { + asm volatile("nop"); +} + +// Check handling of '*' and '#' constraint modifiers. +void t28(void) +{ + asm volatile ("/* %0 */" : : "i#*X,*r" (1)); +} + +static unsigned t29_var[1]; + +void t29(void) { + asm volatile("movl %%eax, %0" + : + : "m"(t29_var)); +} + +void t30(int len) { + __asm__ volatile("" + : "+&&rm"(len)); +} + +void t31(int len) { + __asm__ volatile("" + : "+%%rm"(len), "+rm"(len)); +} + +//t32 skipped: no goto + +void *t33(void *ptr) +{ + void *ret; + asm ("lea %1, %0" : "=r" (ret) : "p" (ptr)); + return ret; +} diff --git a/clang/test/CIR/Lowering/asm.cir b/clang/test/CIR/Lowering/asm.cir index 309bd22ae02e..3aa753fbb91f 100644 --- a/clang/test/CIR/Lowering/asm.cir +++ b/clang/test/CIR/Lowering/asm.cir @@ -8,27 +8,47 @@ module { %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, cir.ptr - cir.asm(x86_att, {"" "~{dirflag},~{fpsr},~{flags}"}) : () -> () - // CHECK: llvm.inline_asm asm_dialect = att operand_attrs = [] "", "~{dirflag},~{fpsr},~{flags}" : () -> () - - cir.asm(x86_att, {"xyz" "~{dirflag},~{fpsr},~{flags}"}) side_effects : () -> () - // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "xyz", "~{dirflag},~{fpsr},~{flags}" : () -> () - - cir.asm(x86_att, {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0, %0 : (!cir.ptr, !cir.ptr) -> () - // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "", "=*m,*m,~{dirflag},~{fpsr},~{flags}" %1, %1 : (!llvm.ptr, !llvm.ptr) -> () - - cir.asm(x86_att, {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0 : (!cir.ptr) -> () - // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "", "*m,~{dirflag},~{fpsr},~{flags}" %1 : (!llvm.ptr) -> () - - cir.asm(x86_att, {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) side_effects %0 : (!cir.ptr) -> () - // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "", "=*m,~{dirflag},~{fpsr},~{flags}" %1 : (!llvm.ptr) -> () - - %1 = cir.load %0 : cir.ptr , !s32i - cir.asm(x86_att, {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) side_effects %1 : (!s32i) -> () - // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "", "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}" %2 : (i32) -> () - - cir.asm(x86_att, {"" "~{dirflag},~{fpsr},~{flags}"}) operand_attrs = [!s32i] side_effects : () -> () - // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [{elementtype = i32}] "", "~{dirflag},~{fpsr},~{flags}" : () -> () + cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"" "~{dirflag},~{fpsr},~{flags}"}) -> !s32i + // CHECK: llvm.inline_asm asm_dialect = att operand_attrs = [] "", "~{dirflag},~{fpsr},~{flags}" : () -> i32 + + cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"xyz" "~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "xyz", "~{dirflag},~{fpsr},~{flags}" : () -> i32 + + cir.asm(x86_att, + out = [%0 : !cir.ptr (maybe_memory)], + in = [], + in_out = [%0 : !cir.ptr (maybe_memory)], + {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [{elementtype = i32}, {elementtype = i32}] "", "=*m,*m,~{dirflag},~{fpsr},~{flags}" %1, %1 : (!llvm.ptr, !llvm.ptr) -> i32 + + cir.asm(x86_att, + out = [], + in = [%0 : !cir.ptr (maybe_memory)], + in_out = [], + {"" "*m,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [{elementtype = i32}] "", "*m,~{dirflag},~{fpsr},~{flags}" %1 : (!llvm.ptr) -> i32 + + cir.asm(x86_att, + out = [%0 : !cir.ptr (maybe_memory)], + in = [], + in_out = [], + {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [{elementtype = i32}] "", "=*m,~{dirflag},~{fpsr},~{flags}" %1 : (!llvm.ptr) -> i32 + + cir.asm(x86_att, + out = [], + in = [], + in_out = [], + {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !s32i + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "", "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}" : () -> i32 cir.return } From 4bbdb42ae061941dd3906a8c06a67c450aaaa70d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 Apr 2024 11:48:38 -0700 Subject: [PATCH 1496/2301] Revert "[CIR][LLVMLowering] Lower cir.objectsize (#545)" This reverts commit 87a61f3a952975c2fdf9f84c23ab5fda3465bb89. It's deleting code it isn't supposed to touch. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 27 ++++++----- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 ++-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 +-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 5 ++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 8 +++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 45 +++++-------------- clang/test/CIR/CodeGen/pass-object-size.c | 29 ------------ clang/test/CIR/CodeGen/tls.c | 11 +++++ clang/test/CIR/IR/global.cir | 14 +++++- clang/test/CIR/IR/invalid.cir | 13 ++++++ 10 files changed, 82 insertions(+), 83 deletions(-) delete mode 100644 clang/test/CIR/CodeGen/pass-object-size.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 40fb6cc04539..5a08648676dd 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1776,24 +1776,31 @@ def GetGlobalOp : CIR_Op<"get_global", [Pure, DeclareOpInterfaceMethods]> { let summary = "Get the address of a global variable"; let description = [{ - The `cir.get_global` operation retrieves the address pointing to a - named global variable. If the global variable is marked constant, writing - to the resulting address (such as through a `cir.store` operation) is - undefined. Resulting type must always be a `!cir.ptr<...>` type. + The `cir.get_global` operation retrieves the address pointing to a + named global variable. If the global variable is marked constant, writing + to the resulting address (such as through a `cir.store` operation) is + undefined. Resulting type must always be a `!cir.ptr<...>` type. - Example: + Addresses of thread local globals can only be retrieved if this operation + is marked `thread_local`, which indicates the address isn't constant. - ```mlir - %x = cir.get_global @foo : !cir.ptr - ``` + Example: + ```mlir + %x = cir.get_global @foo : !cir.ptr + ... + %y = cir.get_global thread_local @batata : !cir.ptr + ``` }]; - let arguments = (ins FlatSymbolRefAttr:$name); + let arguments = (ins FlatSymbolRefAttr:$name, UnitAttr:$tls); let results = (outs Res:$addr); // FIXME: we should not be printing `cir.ptr` below, that should come // from the pointer type directly. - let assemblyFormat = "$name `:` `cir.ptr` type($addr) attr-dict"; + let assemblyFormat = [{ + (`thread_local` $tls^)? + $name `:` `cir.ptr` type($addr) attr-dict + }]; // `GetGlobalOp` is fully verified by its traits. let hasVerifier = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 6784a1ce9c94..deed6ffe63d8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -697,9 +697,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, uniqueName, type, isConst, linkage); } - mlir::Value createGetGlobal(mlir::cir::GlobalOp global) { - return create( - global.getLoc(), getPointerTo(global.getSymType()), global.getName()); + mlir::Value createGetGlobal(mlir::cir::GlobalOp global, + bool threadLocal = false) { + return create(global.getLoc(), + getPointerTo(global.getSymType()), + global.getName(), threadLocal); } mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 954ea854e026..6698e320c007 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -720,11 +720,10 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, if (CGF.getLangOpts().OpenMP) llvm_unreachable("not implemented"); + // Traditional LLVM codegen handles thread local separately, CIR handles + // as part of getAddrOfGlobalVar. auto V = CGF.CGM.getAddrOfGlobalVar(VD); - if (VD->getTLSKind() != VarDecl::TLS_None) - llvm_unreachable("NYI"); - auto RealVarTy = CGF.getTypes().convertTypeForMem(VD->getType()); auto realPtrTy = CGF.getBuilder().getPointerTo(RealVarTy); if (realPtrTy != V.getType()) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 0814f82209c9..aee1fa47edad 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -837,11 +837,12 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty, if (!Ty) Ty = getTypes().convertTypeForMem(ASTTy); + bool tlsAccess = D->getTLSKind() != VarDecl::TLS_None; auto g = buildGlobal(D, Ty, IsForDefinition); auto ptrTy = mlir::cir::PointerType::get(builder.getContext(), g.getSymType()); - return builder.create(getLoc(D->getSourceRange()), - ptrTy, g.getSymName()); + return builder.create( + getLoc(D->getSourceRange()), ptrTy, g.getSymName(), tlsAccess); } mlir::cir::GlobalViewAttr diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 1e0782da0c8d..ca7dc8c07425 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1635,9 +1635,13 @@ GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { << "' does not reference a valid cir.global or cir.func"; mlir::Type symTy; - if (auto g = dyn_cast(op)) + if (auto g = dyn_cast(op)) { symTy = g.getSymType(); - else if (auto f = dyn_cast(op)) + // Verify that for thread local global access, the global needs to + // be marked with tls bits. + if (getTls() && !g.getTlsModel()) + return emitOpError("access to global not marked thread local"); + } else if (auto f = dyn_cast(op)) symTy = f.getFunctionType(); else llvm_unreachable("shall not get here"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index fd5409605e49..2a3742113f5c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1627,7 +1627,16 @@ class CIRGetGlobalOpLowering auto type = getTypeConverter()->convertType(op.getType()); auto symbol = op.getName(); - rewriter.replaceOpWithNewOp(op, type, symbol); + mlir::Operation *newop = + rewriter.create(op.getLoc(), type, symbol); + + if (op.getTls()) { + // Handle access to TLS via intrinsic. + newop = rewriter.create( + op.getLoc(), type, newop->getResult(0)); + } + + rewriter.replaceOp(op, newop); return mlir::success(); } }; @@ -2292,36 +2301,6 @@ class CIRBitClrsbOpLowering } }; -class CIRObjSizeOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(mlir::cir::ObjSizeOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto llvmResTy = getTypeConverter()->convertType(op.getType()); - auto loc = op->getLoc(); - - auto llvmIntrinNameAttr = - mlir::StringAttr::get(rewriter.getContext(), "llvm.objectsize"); - mlir::cir::SizeInfoType kindInfo = op.getKind(); - auto falseValue = rewriter.create( - loc, rewriter.getI1Type(), false); - auto trueValue = rewriter.create( - loc, rewriter.getI1Type(), true); - - rewriter.replaceOpWithNewOp( - op, llvmResTy, llvmIntrinNameAttr, - mlir::ValueRange{adaptor.getPtr(), - kindInfo == mlir::cir::SizeInfoType::max ? falseValue - : trueValue, - trueValue, op.getDynamic() ? trueValue : falseValue}); - - return mlir::LogicalResult::success(); - } -}; - class CIRBitClzOpLowering : public mlir::OpConversionPattern { public: @@ -3085,8 +3064,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVectorShuffleVecLowering, CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, - CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering>( - converter, patterns.getContext()); + CIRPrefetchLowering, CIRIsConstantOpLowering>(converter, + patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/pass-object-size.c b/clang/test/CIR/CodeGen/pass-object-size.c deleted file mode 100644 index 851b912b0ad5..000000000000 --- a/clang/test/CIR/CodeGen/pass-object-size.c +++ /dev/null @@ -1,29 +0,0 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM - -void b(void *__attribute__((pass_object_size(0)))); -void e(void *__attribute__((pass_object_size(2)))); -void c() { - int a; - int d[a]; - b(d); - e(d); -} - -// CIR: cir.func no_proto @c() -// CIR: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , %{{[0-9]+}} : !u64i, ["vla"] {alignment = 16 : i64} -// CIR: [[TMP1:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr -// CIR-NEXT: [[TMP2:%.*]] = cir.objsize([[TMP1]] : , max) -> !u64i -// CIR-NEXT: cir.call @b([[TMP1]], [[TMP2]]) : (!cir.ptr, !u64i) -> () -// CIR: [[TMP3:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr -// CIR: [[TMP4:%.*]] = cir.objsize([[TMP3]] : , min) -> !u64i -// CIR-NEXT: cir.call @e([[TMP3]], [[TMP4]]) : (!cir.ptr, !u64i) -> () - -// LLVM: define void @c() -// LLVM: [[TMP0:%.*]] = alloca i32, i64 %{{[0-9]+}}, -// LLVM: [[TMP1:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 false, i1 true, i1 false), -// LLVM-NEXT: call void @b(ptr [[TMP0]], i64 [[TMP1]]) -// LLVM: [[TMP2:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 true, i1 true, i1 false), -// LLVM-NEXT: call void @e(ptr [[TMP0]], i64 [[TMP2]]) diff --git a/clang/test/CIR/CodeGen/tls.c b/clang/test/CIR/CodeGen/tls.c index c63390152a76..2a3ebda00744 100644 --- a/clang/test/CIR/CodeGen/tls.c +++ b/clang/test/CIR/CodeGen/tls.c @@ -3,6 +3,17 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s +extern __thread int b; +int c(void) { return *&b; } +// CIR: cir.global "private" external tls_dyn @b : !s32i +// CIR: cir.func @c() -> !s32i +// CIR: %[[TLS_ADDR:.*]] = cir.get_global thread_local @b : cir.ptr + __thread int a; // CIR: cir.global external tls_dyn @a = #cir.int<0> : !s32i + +// LLVM: @b = external thread_local global i32 // LLVM: @a = thread_local global i32 0 + +// LLVM-LABEL: @c +// LLVM: = call ptr @llvm.threadlocal.address.p0(ptr @b) \ No newline at end of file diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 786d212e9692..a9a5e1e5809c 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -63,6 +63,12 @@ module { cir.global external tls_local_dyn @model1 = #cir.int<0> : !s32i cir.global external tls_init_exec @model2 = #cir.int<0> : !s32i cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i + + cir.global "private" external tls_dyn @batata : !s32i + cir.func @f35() { + %0 = cir.get_global thread_local @batata : cir.ptr + cir.return + } } // CHECK: cir.global external @a = #cir.int<3> : !s32i @@ -91,4 +97,10 @@ module { // CHECK: cir.global external tls_dyn @model0 = #cir.int<0> : !s32i // CHECK: cir.global external tls_local_dyn @model1 = #cir.int<0> : !s32i // CHECK: cir.global external tls_init_exec @model2 = #cir.int<0> : !s32i -// CHECK: cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i \ No newline at end of file +// CHECK: cir.global external tls_local_exec @model3 = #cir.int<0> : !s32i + +// CHECK: cir.global "private" external tls_dyn @batata : !s32i +// CHECK: cir.func @f35() { +// CHECK: %0 = cir.get_global thread_local @batata : cir.ptr +// CHECK: cir.return +// CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index ba7758b2ca56..6d738aac0ae4 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1033,3 +1033,16 @@ cir.func @bad_fetch(%x: !cir.ptr, %y: !cir.float) -> () { %12 = cir.atomic.fetch(xor, %x : !cir.ptr, %y : !cir.float, seq_cst) : !cir.float cir.return } + +// ----- + +!s32i = !cir.int + +module { + cir.global "private" external @batata : !s32i + cir.func @f35() { + // expected-error@+1 {{access to global not marked thread local}} + %0 = cir.get_global thread_local @batata : cir.ptr + cir.return + } +} \ No newline at end of file From 9e39626efc728e1e7f9c40a2fc38bbd89426cfef Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 Apr 2024 11:53:52 -0700 Subject: [PATCH 1497/2301] Re-introduce [CIR][LLVMLowering] Lower cir.objectsize --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 34 +++++++++++++++++-- clang/test/CIR/CodeGen/pass-object-size.c | 29 ++++++++++++++++ 2 files changed, 61 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/pass-object-size.c diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 2a3742113f5c..e128f9ce86dd 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2301,6 +2301,36 @@ class CIRBitClrsbOpLowering } }; +class CIRObjSizeOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ObjSizeOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto llvmResTy = getTypeConverter()->convertType(op.getType()); + auto loc = op->getLoc(); + + auto llvmIntrinNameAttr = + mlir::StringAttr::get(rewriter.getContext(), "llvm.objectsize"); + mlir::cir::SizeInfoType kindInfo = op.getKind(); + auto falseValue = rewriter.create( + loc, rewriter.getI1Type(), false); + auto trueValue = rewriter.create( + loc, rewriter.getI1Type(), true); + + rewriter.replaceOpWithNewOp( + op, llvmResTy, llvmIntrinNameAttr, + mlir::ValueRange{adaptor.getPtr(), + kindInfo == mlir::cir::SizeInfoType::max ? falseValue + : trueValue, + trueValue, op.getDynamic() ? trueValue : falseValue}); + + return mlir::LogicalResult::success(); + } +}; + class CIRBitClzOpLowering : public mlir::OpConversionPattern { public: @@ -3064,8 +3094,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVectorShuffleVecLowering, CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, - CIRPrefetchLowering, CIRIsConstantOpLowering>(converter, - patterns.getContext()); + CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/pass-object-size.c b/clang/test/CIR/CodeGen/pass-object-size.c new file mode 100644 index 000000000000..851b912b0ad5 --- /dev/null +++ b/clang/test/CIR/CodeGen/pass-object-size.c @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +void b(void *__attribute__((pass_object_size(0)))); +void e(void *__attribute__((pass_object_size(2)))); +void c() { + int a; + int d[a]; + b(d); + e(d); +} + +// CIR: cir.func no_proto @c() +// CIR: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , %{{[0-9]+}} : !u64i, ["vla"] {alignment = 16 : i64} +// CIR: [[TMP1:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CIR-NEXT: [[TMP2:%.*]] = cir.objsize([[TMP1]] : , max) -> !u64i +// CIR-NEXT: cir.call @b([[TMP1]], [[TMP2]]) : (!cir.ptr, !u64i) -> () +// CIR: [[TMP3:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CIR: [[TMP4:%.*]] = cir.objsize([[TMP3]] : , min) -> !u64i +// CIR-NEXT: cir.call @e([[TMP3]], [[TMP4]]) : (!cir.ptr, !u64i) -> () + +// LLVM: define void @c() +// LLVM: [[TMP0:%.*]] = alloca i32, i64 %{{[0-9]+}}, +// LLVM: [[TMP1:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 false, i1 true, i1 false), +// LLVM-NEXT: call void @b(ptr [[TMP0]], i64 [[TMP1]]) +// LLVM: [[TMP2:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 true, i1 true, i1 false), +// LLVM-NEXT: call void @e(ptr [[TMP0]], i64 [[TMP2]]) From f3dd3254c637f26c26fd83cb29ac7e1a5d94d97c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 17 Apr 2024 11:59:19 -0700 Subject: [PATCH 1498/2301] [CIR][NFC] Fix few compiler warnings --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index ca7dc8c07425..cb5315857ed7 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2914,9 +2914,11 @@ ParseResult cir::InlineAsmOp::parse(OpAsmParser &parser, if (parser.parseOptionalKeyword("side_effects").succeeded()) result.attributes.set("side_effects", UnitAttr::get(ctxt)); - if (parser.parseOptionalArrow().succeeded()) - ; - [[maybe_unused]] auto x = parser.parseType(resType); + if (parser.parseOptionalArrow().failed()) + return mlir::failure(); + + if (parser.parseType(resType).failed()) + return mlir::failure(); if (parser.parseOptionalAttrDict(result.attributes)) return mlir::failure(); From 1f5ac2e7acca91049aeee09f439fb831ae1ae848 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 17 Apr 2024 22:59:32 +0300 Subject: [PATCH 1499/2301] [CIR][CodeGen] Flattening for ScopeOp and LoopOpInterface (#546) This PR is the next step towards goto support and adds flattening for `ScopeOp` and `LoopOpInterface`. Looks like I can't separate this operations and create two PRs, since some errors occur if I do so, e.g. `reference to block defined in another region`. Seems we need to flatten both operations in the same time. Given it's a copy-pasta, I think there is no need to try to make several PRs. I added several tests - just copied them from the lowering part just to demonstrate how it looks like. Note, that changes in `dot.cir` caused by `BrCondOp` updates in the previous PR, when we removed the following casts: ``` %20 = llvm.zext %19 : i1 to i8 %21 = llvm.trunc %20 : i8 to i1 llvm.cond_br %21 ... ``` --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 158 +++++++++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 168 ++---------------- clang/test/CIR/CodeGen/loop.cir | 122 +++++++++++++ clang/test/CIR/CodeGen/scope.cir | 60 +++++++ clang/test/CIR/Lowering/dot.cir | 44 +++-- 5 files changed, 376 insertions(+), 176 deletions(-) create mode 100644 clang/test/CIR/CodeGen/loop.cir create mode 100644 clang/test/CIR/CodeGen/scope.cir diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index aa061c51680c..b9c9481805d7 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -24,6 +24,28 @@ using namespace mlir::cir; namespace { +/// Lowers operations with the terminator trait that have a single successor. +void lowerTerminator(mlir::Operation *op, mlir::Block *dest, + mlir::PatternRewriter &rewriter) { + assert(op->hasTrait() && "not a terminator"); + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp(op, dest); +} + +/// Walks a region while skipping operations of type `Ops`. This ensures the +/// callback is not applied to said operations and its children. +template +void walkRegionSkipping(mlir::Region ®ion, + mlir::function_ref callback) { + region.walk([&](mlir::Operation *op) { + if (isa(op)) + return mlir::WalkResult::skip(); + callback(op); + return mlir::WalkResult::advance(); + }); +} + struct FlattenCFGPass : public FlattenCFGBase { FlattenCFGPass() = default; @@ -92,8 +114,140 @@ struct CIRIfFlattening : public OpRewritePattern { } }; +class CIRScopeOpFlattening : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ScopeOp scopeOp, + mlir::PatternRewriter &rewriter) const override { + mlir::OpBuilder::InsertionGuard guard(rewriter); + auto loc = scopeOp.getLoc(); + + // Empty scope: just remove it. + if (scopeOp.getRegion().empty()) { + rewriter.eraseOp(scopeOp); + return mlir::success(); + } + + // Split the current block before the ScopeOp to create the inlining + // point. + auto *currentBlock = rewriter.getInsertionBlock(); + auto *remainingOpsBlock = + rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + mlir::Block *continueBlock; + if (scopeOp.getNumResults() == 0) + continueBlock = remainingOpsBlock; + else + llvm_unreachable("NYI"); + + // Inline body region. + auto *beforeBody = &scopeOp.getRegion().front(); + auto *afterBody = &scopeOp.getRegion().back(); + rewriter.inlineRegionBefore(scopeOp.getRegion(), continueBlock); + + // Save stack and then branch into the body of the region. + rewriter.setInsertionPointToEnd(currentBlock); + // TODO(CIR): stackSaveOp + // auto stackSaveOp = rewriter.create( + // loc, mlir::LLVM::LLVMPointerType::get( + // mlir::IntegerType::get(scopeOp.getContext(), 8))); + rewriter.create(loc, mlir::ValueRange(), beforeBody); + + // Replace the scopeop return with a branch that jumps out of the body. + // Stack restore before leaving the body region. + rewriter.setInsertionPointToEnd(afterBody); + if (auto yieldOp = + dyn_cast(afterBody->getTerminator())) { + rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getArgs(), + continueBlock); + } + + // TODO(cir): stackrestore? + + // Replace the op with values return from the body region. + rewriter.replaceOp(scopeOp, continueBlock->getArguments()); + + return mlir::success(); + } +}; + +class CIRLoopOpInterfaceFlattening + : public mlir::OpInterfaceRewritePattern { +public: + using mlir::OpInterfaceRewritePattern< + mlir::cir::LoopOpInterface>::OpInterfaceRewritePattern; + + inline void lowerConditionOp(mlir::cir::ConditionOp op, mlir::Block *body, + mlir::Block *exit, + mlir::PatternRewriter &rewriter) const { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp(op, op.getCondition(), + body, exit); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::LoopOpInterface op, + mlir::PatternRewriter &rewriter) const final { + // Setup CFG blocks. + auto *entry = rewriter.getInsertionBlock(); + auto *exit = rewriter.splitBlock(entry, rewriter.getInsertionPoint()); + auto *cond = &op.getCond().front(); + auto *body = &op.getBody().front(); + auto *step = (op.maybeGetStep() ? &op.maybeGetStep()->front() : nullptr); + + // Setup loop entry branch. + rewriter.setInsertionPointToEnd(entry); + rewriter.create(op.getLoc(), &op.getEntry().front()); + + // Branch from condition region to body or exit. + auto conditionOp = cast(cond->getTerminator()); + lowerConditionOp(conditionOp, body, exit, rewriter); + + // TODO(cir): Remove the walks below. It visits operations unnecessarily, + // however, to solve this we would likely need a custom DialecConversion + // driver to customize the order that operations are visited. + + // Lower continue statements. + mlir::Block *dest = (step ? step : cond); + op.walkBodySkippingNestedLoops([&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, dest, rewriter); + }); + + // Lower break statements. + walkRegionSkipping( + op.getBody(), [&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, exit, rewriter); + }); + + // Lower optional body region yield. + auto bodyYield = dyn_cast(body->getTerminator()); + if (bodyYield) + lowerTerminator(bodyYield, (step ? step : cond), rewriter); + + // Lower mandatory step region yield. + if (step) + lowerTerminator(cast(step->getTerminator()), cond, + rewriter); + + // Move region contents out of the loop op. + rewriter.inlineRegionBefore(op.getCond(), exit); + rewriter.inlineRegionBefore(op.getBody(), exit); + if (step) + rewriter.inlineRegionBefore(*op.maybeGetStep(), exit); + + rewriter.eraseOp(op); + return mlir::success(); + } +}; + void populateFlattenCFGPatterns(RewritePatternSet &patterns) { - patterns.add(patterns.getContext()); + patterns + .add( + patterns.getContext()); } void FlattenCFGPass::runOnOperation() { @@ -103,7 +257,7 @@ void FlattenCFGPass::runOnOperation() { // Collect operations to apply patterns. SmallVector ops; getOperation()->walk([&](Operation *op) { - if (isa(op)) + if (isa(op)) ops.push_back(op); }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e128f9ce86dd..e314b9b1457c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -481,80 +481,6 @@ class CIRPtrStrideOpLowering } }; -class CIRLoopOpInterfaceLowering - : public mlir::OpInterfaceConversionPattern { -public: - using mlir::OpInterfaceConversionPattern< - mlir::cir::LoopOpInterface>::OpInterfaceConversionPattern; - - inline void - lowerConditionOp(mlir::cir::ConditionOp op, mlir::Block *body, - mlir::Block *exit, - mlir::ConversionPatternRewriter &rewriter) const { - mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPoint(op); - rewriter.replaceOpWithNewOp(op, op.getCondition(), - body, exit); - } - - mlir::LogicalResult - matchAndRewrite(mlir::cir::LoopOpInterface op, - mlir::ArrayRef operands, - mlir::ConversionPatternRewriter &rewriter) const final { - // Setup CFG blocks. - auto *entry = rewriter.getInsertionBlock(); - auto *exit = rewriter.splitBlock(entry, rewriter.getInsertionPoint()); - auto *cond = &op.getCond().front(); - auto *body = &op.getBody().front(); - auto *step = (op.maybeGetStep() ? &op.maybeGetStep()->front() : nullptr); - - // Setup loop entry branch. - rewriter.setInsertionPointToEnd(entry); - rewriter.create(op.getLoc(), &op.getEntry().front()); - - // Branch from condition region to body or exit. - auto conditionOp = cast(cond->getTerminator()); - lowerConditionOp(conditionOp, body, exit, rewriter); - - // TODO(cir): Remove the walks below. It visits operations unnecessarily, - // however, to solve this we would likely need a custom DialecConversion - // driver to customize the order that operations are visited. - - // Lower continue statements. - mlir::Block *dest = (step ? step : cond); - op.walkBodySkippingNestedLoops([&](mlir::Operation *op) { - if (isa(op)) - lowerTerminator(op, dest, rewriter); - }); - - // Lower break statements. - walkRegionSkipping( - op.getBody(), [&](mlir::Operation *op) { - if (isa(op)) - lowerTerminator(op, exit, rewriter); - }); - - // Lower optional body region yield. - auto bodyYield = dyn_cast(body->getTerminator()); - if (bodyYield) - lowerTerminator(bodyYield, (step ? step : cond), rewriter); - - // Lower mandatory step region yield. - if (step) - lowerTerminator(cast(step->getTerminator()), cond, - rewriter); - - // Move region contents out of the loop op. - rewriter.inlineRegionBefore(op.getCond(), exit); - rewriter.inlineRegionBefore(op.getBody(), exit); - if (step) - rewriter.inlineRegionBefore(*op.maybeGetStep(), exit); - - rewriter.eraseOp(op); - return mlir::success(); - } -}; - class CIRBrCondOpLowering : public mlir::OpConversionPattern { public: @@ -785,65 +711,6 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } }; -class CIRScopeOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(mlir::cir::ScopeOp scopeOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - mlir::OpBuilder::InsertionGuard guard(rewriter); - auto loc = scopeOp.getLoc(); - - // Empty scope: just remove it. - if (scopeOp.getRegion().empty()) { - rewriter.eraseOp(scopeOp); - return mlir::success(); - } - - // Split the current block before the ScopeOp to create the inlining - // point. - auto *currentBlock = rewriter.getInsertionBlock(); - auto *remainingOpsBlock = - rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); - mlir::Block *continueBlock; - if (scopeOp.getNumResults() == 0) - continueBlock = remainingOpsBlock; - else - llvm_unreachable("NYI"); - - // Inline body region. - auto *beforeBody = &scopeOp.getRegion().front(); - auto *afterBody = &scopeOp.getRegion().back(); - rewriter.inlineRegionBefore(scopeOp.getRegion(), continueBlock); - - // Save stack and then branch into the body of the region. - rewriter.setInsertionPointToEnd(currentBlock); - // TODO(CIR): stackSaveOp - // auto stackSaveOp = rewriter.create( - // loc, mlir::LLVM::LLVMPointerType::get( - // mlir::IntegerType::get(scopeOp.getContext(), 8))); - rewriter.create(loc, mlir::ValueRange(), beforeBody); - - // Replace the scopeop return with a branch that jumps out of the body. - // Stack restore before leaving the body region. - rewriter.setInsertionPointToEnd(afterBody); - if (auto yieldOp = - dyn_cast(afterBody->getTerminator())) { - rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getArgs(), - continueBlock); - } - - // TODO(cir): stackrestore? - - // Replace the op with values return from the body region. - rewriter.replaceOp(scopeOp, continueBlock->getArguments()); - - return mlir::success(); - } -}; - class CIRReturnLowering : public mlir::OpConversionPattern { public: @@ -3079,23 +2946,22 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, CIRBitPopcountOpLowering, CIRAtomicFetchLowering, CIRByteswapOpLowering, - CIRLoopOpInterfaceLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, - CIRCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, - CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, - CIRFuncLowering, CIRScopeOpLowering, CIRCastOpLowering, - CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, - CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, - CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, - CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, - CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, - CIRVectorCreateLowering, CIRVectorInsertLowering, - CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, - CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, - CIRVectorShuffleVecLowering, CIRStackSaveLowering, - CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, - CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, - CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering>( - converter, patterns.getContext()); + CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, + CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, + CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, + CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, + CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, + CIRBrOpLowering, CIRTernaryOpLowering, CIRGetMemberOpLowering, + CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, + CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, + CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, + CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, + CIRVectorSplatLowering, CIRVectorTernaryLowering, + CIRVectorShuffleIntsLowering, CIRVectorShuffleVecLowering, + CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, + CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, + CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, + CIRIsConstantOpLowering>(converter, patterns.getContext()); } namespace { @@ -3263,7 +3129,7 @@ static void buildCtorDtorList( // pass it will be placed into the unreachable block. And the possible error // after the lowering pass is: error: 'cir.return' op expects parent op to be // one of 'cir.func, cir.scope, cir.if ... The reason that this operation was -// not lowered and the new parent is lllvm.func. +// not lowered and the new parent is llvm.func. // // In the future we may want to get rid of this function and use DCE pass or // something similar. But now we need to guarantee the absence of the dialect diff --git a/clang/test/CIR/CodeGen/loop.cir b/clang/test/CIR/CodeGen/loop.cir new file mode 100644 index 000000000000..8204216b6f52 --- /dev/null +++ b/clang/test/CIR/CodeGen/loop.cir @@ -0,0 +1,122 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!s32i = !cir.int + +module { + + cir.func @testFor(%arg0 : !cir.bool) { + cir.for : cond { + cir.condition(%arg0) + } body { + cir.yield + } step { + cir.yield + } + cir.return + } +// CHECK: cir.func @testFor(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // Test while cir.loop operation lowering. + cir.func @testWhile(%arg0 : !cir.bool) { + cir.while { + cir.condition(%arg0) + } do { + cir.yield + } + cir.return + } +// CHECK: cir.func @testWhile(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // Test do-while cir.loop operation lowering. + cir.func @testDoWhile(%arg0 : !cir.bool) { + cir.do { + cir.yield + } while { + cir.condition(%arg0) + } + cir.return + } +// CHECK: cir.func @testDoWhile(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#BODY:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // test corner case + // while (1) { + // break; + // } + cir.func @testWhileWithBreakTerminatedBody(%arg0 : !cir.bool) { + cir.while { + cir.condition(%arg0) + } do { + cir.break + } + cir.return + } +// CHECK: cir.func @testWhileWithBreakTerminatedBody(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // test C only corner case - no fails during the lowering + // for (;;) { + // break; + // } + cir.func @forWithBreakTerminatedScopeInBody(%arg0 : !cir.bool) { + cir.for : cond { + cir.condition(%arg0) + } body { + cir.scope { // FIXME(cir): Redundant scope emitted during C codegen. + cir.break + } + cir.yield + } step { + cir.yield + } + cir.return + } +// CHECK: cir.func @forWithBreakTerminatedScopeInBody(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#EX_SCOPE_IN:]] +// CHECK: ^bb[[#EX_SCOPE_IN]]: +// CHECK: cir.br ^bb[[#EXIT:]] +// CHECK: ^bb[[#EX_SCOPE_EXIT:]]: +// CHECK: cir.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/scope.cir b/clang/test/CIR/CodeGen/scope.cir new file mode 100644 index 000000000000..813862e7c2fb --- /dev/null +++ b/clang/test/CIR/CodeGen/scope.cir @@ -0,0 +1,60 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!u32i = !cir.int + +module { + cir.func @foo() { + cir.scope { + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<4> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr + } + cir.return + } +// CHECK: cir.func @foo() { +// CHECK: cir.br ^bb1 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: %1 = cir.const(#cir.int<4> : !u32i) : !u32i +// CHECK: cir.store %1, %0 : !u32i, cir.ptr +// CHECK: cir.br ^bb2 +// CHECK: ^bb2: // pred: ^bb1 +// CHECK: cir.return +// CHECK: } + + // Should drop empty scopes. + cir.func @empty_scope() { + cir.scope { + } + cir.return + } +// CHECK: cir.func @empty_scope() { +// CHECK: cir.return +// CHECK: } + + cir.func @scope_with_return() -> !u32i { + %0 = cir.alloca !u32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.scope { + %2 = cir.const(#cir.int<0> : !u32i) : !u32i + cir.store %2, %0 : !u32i, cir.ptr + %3 = cir.load %0 : cir.ptr , !u32i + cir.return %3 : !u32i + } + %1 = cir.load %0 : cir.ptr , !u32i + cir.return %1 : !u32i + } + +// CHECK: cir.func @scope_with_return() -> !u32i { +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.br ^bb1 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: %1 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: cir.store %1, %0 : !u32i, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , !u32i +// CHECK: cir.return %2 : !u32i +// CHECK: ^bb2: // no predecessors +// CHECK: %3 = cir.load %0 : cir.ptr , !u32i +// CHECK: cir.return %3 : !u32i +// CHECK: } + +} diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 02fb1c92affb..5b7742fc1400 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -83,35 +83,33 @@ module { // MLIR-NEXT: %17 = llvm.zext %16 : i1 to i32 // MLIR-NEXT: %18 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %19 = llvm.icmp "ne" %17, %18 : i32 -// MLIR-NEXT: %20 = llvm.zext %19 : i1 to i8 -// MLIR-NEXT: %21 = llvm.trunc %20 : i8 to i1 -// MLIR-NEXT: llvm.cond_br %21, ^bb3, ^bb5 +// MLIR-NEXT: llvm.cond_br %19, ^bb3, ^bb5 // MLIR-NEXT: ^bb3: // pred: ^bb2 -// MLIR-NEXT: %22 = llvm.load %1 : !llvm.ptr -> !llvm.ptr -// MLIR-NEXT: %23 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %24 = llvm.getelementptr %22[%23] : (!llvm.ptr, i32) -> !llvm.ptr, f64 -// MLIR-NEXT: %25 = llvm.load %24 : !llvm.ptr -> f64 -// MLIR-NEXT: %26 = llvm.load %3 : !llvm.ptr -> !llvm.ptr -// MLIR-NEXT: %27 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %28 = llvm.getelementptr %26[%27] : (!llvm.ptr, i32) -> !llvm.ptr, f64 -// MLIR-NEXT: %29 = llvm.load %28 : !llvm.ptr -> f64 -// MLIR-NEXT: %30 = llvm.fmul %25, %29 : f64 -// MLIR-NEXT: %31 = llvm.load %9 : !llvm.ptr -> f64 -// MLIR-NEXT: %32 = llvm.fadd %31, %30 : f64 -// MLIR-NEXT: llvm.store %32, %9 : f64, !llvm.ptr +// MLIR-NEXT: %20 = llvm.load %1 : !llvm.ptr -> !llvm.ptr +// MLIR-NEXT: %21 = llvm.load %12 : !llvm.ptr -> i32 +// MLIR-NEXT: %22 = llvm.getelementptr %20[%21] : (!llvm.ptr, i32) -> !llvm.ptr, f64 +// MLIR-NEXT: %23 = llvm.load %22 : !llvm.ptr -> f64 +// MLIR-NEXT: %24 = llvm.load %3 : !llvm.ptr -> !llvm.ptr +// MLIR-NEXT: %25 = llvm.load %12 : !llvm.ptr -> i32 +// MLIR-NEXT: %26 = llvm.getelementptr %24[%25] : (!llvm.ptr, i32) -> !llvm.ptr, f64 +// MLIR-NEXT: %27 = llvm.load %26 : !llvm.ptr -> f64 +// MLIR-NEXT: %28 = llvm.fmul %23, %27 : f64 +// MLIR-NEXT: %29 = llvm.load %9 : !llvm.ptr -> f64 +// MLIR-NEXT: %30 = llvm.fadd %29, %28 : f64 +// MLIR-NEXT: llvm.store %30, %9 : f64, !llvm.ptr // MLIR-NEXT: llvm.br ^bb4 // MLIR-NEXT: ^bb4: // pred: ^bb3 -// MLIR-NEXT: %33 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %34 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: %35 = llvm.add %33, %34 : i32 -// MLIR-NEXT: llvm.store %35, %12 : i32, !llvm.ptr +// MLIR-NEXT: %31 = llvm.load %12 : !llvm.ptr -> i32 +// MLIR-NEXT: %32 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: %33 = llvm.add %31, %32 : i32 +// MLIR-NEXT: llvm.store %33, %12 : i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 // MLIR-NEXT: ^bb5: // pred: ^bb2 // MLIR-NEXT: llvm.br ^bb6 // MLIR-NEXT: ^bb6: // pred: ^bb5 -// MLIR-NEXT: %36 = llvm.load %9 : !llvm.ptr -> f64 -// MLIR-NEXT: llvm.store %36, %7 : f64, !llvm.ptr -// MLIR-NEXT: %37 = llvm.load %7 : !llvm.ptr -> f64 -// MLIR-NEXT: llvm.return %37 : f64 +// MLIR-NEXT: %34 = llvm.load %9 : !llvm.ptr -> f64 +// MLIR-NEXT: llvm.store %34, %7 : f64, !llvm.ptr +// MLIR-NEXT: %35 = llvm.load %7 : !llvm.ptr -> f64 +// MLIR-NEXT: llvm.return %35 : f64 // MLIR-NEXT: } // MLIR-NEXT: } From fe11d50bca83dee08ad845e0706725e2a2b3dee8 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 17 Apr 2024 23:10:17 +0300 Subject: [PATCH 1500/2301] [CIR][Codegen] Fix union init with constant (#548) Minor fix for the case when union fields have different sizes and union is inited with a constant. Example: ``` typedef union { short a; int b; } A; ``` --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 6 +++++- clang/test/CIR/CodeGen/union.cpp | 13 +++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 1ccd34a5d4bd..08644dc163d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -381,7 +381,11 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( NaturalLayout = false; Packed = true; } else if (DesiredSize > AlignedSize) { - llvm_unreachable("NYI"); + // The natural layout would be too small. Add padding to fix it. (This + // is ignored if we choose a packed layout.) + UnpackedElemStorage.assign(Elems.begin(), Elems.end()); + UnpackedElemStorage.push_back(Utils.getPadding(DesiredSize - Size)); + UnpackedElems = UnpackedElemStorage; } // If we don't have a natural layout, insert padding as necessary. diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index c033a4cdf98c..23f1b496bd0f 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -76,3 +76,16 @@ void shouldGenerateUnionAccess(union U u) { u.d; // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr } + +typedef union { + short a; + int b; +} A; + +void noCrushOnDifferentSizes() { + A a = {0}; + // CHECK: %[[#TMP0:]] = cir.alloca !ty_22A22, cir.ptr , ["a"] {alignment = 4 : i64} + // CHECK: %[[#TMP1:]] = cir.cast(bitcast, %[[#TMP0]] : !cir.ptr), !cir.ptr + // CHECK: %[[#TMP2:]] = cir.const(#cir.zero : !ty_anon_struct) : !ty_anon_struct + // CHECK: cir.store %[[#TMP2]], %[[#TMP1]] : !ty_anon_struct, cir.ptr +} \ No newline at end of file From 6d5d0f6d0eee974d3773ed564eebeea3249cd463 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 18 Apr 2024 00:03:03 +0300 Subject: [PATCH 1501/2301] [CIR][CodeGen][Lowering] Support Integer overflow with fwrap (#539) This PR fixes some cases when a program compiled with `-fwrapv` fails with `NYI` . Basically, the default behavior is no overlap: ``` void baz(int x, int y) { int z = x - y; } ``` LLVM IR (no CIR enabled): ``` %sub = sub nsw i32 %0, %1 ``` and with `-fwrapv` : ``` %sub = sub i32 %0, %1 ``` We need something similar in CIR. The only way I see how to implement it is to add a couple of attributes to the `BinOp` to make things even with the llvm dialect. Well, are there any other ideas? --------- Co-authored-by: Bruno Cardoso Lopes --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 15 ++++++++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 12 +++++--- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 19 ++++++------ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 18 +++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 21 +++++++++++-- clang/test/CIR/CodeGen/binop.cpp | 2 +- clang/test/CIR/CodeGen/call.cpp | 2 +- clang/test/CIR/CodeGen/int-wrap.c | 30 +++++++++++++++++++ clang/test/CIR/IR/invalid.cir | 18 ++++++++++- clang/test/CIR/Lowering/int-wrap.cir | 24 +++++++++++++++ 10 files changed, 141 insertions(+), 20 deletions(-) create mode 100644 clang/test/CIR/CodeGen/int-wrap.c create mode 100644 clang/test/CIR/Lowering/int-wrap.cir diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index b111913a126e..5f4bc1874674 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -140,6 +140,21 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return create(loc, val, dst, _volatile, order); } + mlir::Value createSub(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, + bool hasNSW = false) { + auto op = create(lhs.getLoc(), lhs.getType(), + mlir::cir::BinOpKind::Sub, lhs, rhs); + if (hasNUW) + op.setNoUnsignedWrap(true); + if (hasNSW) + op.setNoSignedWrap(true); + return op; + } + + mlir::Value createNSWSub(mlir::Value lhs, mlir::Value rhs) { + return createSub(lhs, rhs, false, true); + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 5a08648676dd..3f337b6250c2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -934,14 +934,18 @@ def BinOp : CIR_Op<"binop", [Pure, // TODO: get more accurate than CIR_AnyType let results = (outs CIR_AnyType:$result); let arguments = (ins Arg:$kind, - CIR_AnyType:$lhs, CIR_AnyType:$rhs); + CIR_AnyType:$lhs, CIR_AnyType:$rhs, + UnitAttr:$no_unsigned_wrap, + UnitAttr:$no_signed_wrap); let assemblyFormat = [{ - `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) attr-dict + `(` $kind `,` $lhs `,` $rhs `)` + (`nsw` $no_signed_wrap^)? + (`nuw` $no_unsigned_wrap^)? + `:` type($lhs) attr-dict }]; - // Already covered by the traits - let hasVerifier = 0; + let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index cc95199fa963..188969798cbd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -467,7 +467,8 @@ class ScalarExprEmitter : public StmtVisitor { auto &builder = CGF.getBuilder(); auto amt = builder.getSInt32(amount, loc); if (CGF.getLangOpts().isSignedOverflowDefined()) { - llvm_unreachable("NYI"); + value = builder.create(loc, value.getType(), + value, amt); } else { value = builder.create(loc, value.getType(), value, amt); @@ -1207,7 +1208,8 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, mlir::Type elemTy = CGF.convertTypeForMem(elementType); if (CGF.getLangOpts().isSignedOverflowDefined()) - llvm_unreachable("ptr arithmetic with signed overflow is NYI"); + return CGF.getBuilder().create( + CGF.getLoc(op.E->getExprLoc()), pointer.getType(), pointer, index); return CGF.buildCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc()); @@ -1245,20 +1247,17 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { if (Ops.CompType->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: { - llvm_unreachable("NYI"); - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), - mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); + if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return Builder.createSub(Ops.LHS, Ops.RHS); + [[fallthrough]]; } case LangOptions::SOB_Undefined: if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), - mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); + return Builder.createNSWSub(Ops.LHS, Ops.RHS); [[fallthrough]]; case LangOptions::SOB_Trapping: if (CanElideOverflowCheck(CGF.getContext(), Ops)) - llvm_unreachable("NYI"); + return Builder.createNSWSub(Ops.LHS, Ops.RHS); llvm_unreachable("NYI"); } } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index cb5315857ed7..5f1c2b0d24df 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2950,6 +2950,24 @@ LogicalResult AtomicFetch::verify() { return mlir::success(); } +LogicalResult BinOp::verify() { + bool noWrap = getNoUnsignedWrap() || getNoSignedWrap(); + + if (!isa(getType()) && noWrap) + return emitError() + << "only operations on integer values may have nsw/nuw flags"; + + bool noWrapOps = getKind() == mlir::cir::BinOpKind::Add || + getKind() == mlir::cir::BinOpKind::Sub || + getKind() == mlir::cir::BinOpKind::Mul; + + if (noWrap && !noWrapOps) + return emitError() << "The nsw/nuw flags are applicable to opcodes: 'add', " + "'sub' and 'mul'"; + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e314b9b1457c..5e91c4c64228 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1887,6 +1887,18 @@ class CIRUnaryOpLowering }; class CIRBinOpLowering : public mlir::OpConversionPattern { + + mlir::LLVM::IntegerOverflowFlags + getIntOverflowFlag(mlir::cir::BinOp op) const { + if (op.getNoUnsignedWrap()) + return mlir::LLVM::IntegerOverflowFlags::nuw; + + if (op.getNoSignedWrap()) + return mlir::LLVM::IntegerOverflowFlags::nsw; + + return mlir::LLVM::IntegerOverflowFlags::none; + } + public: using OpConversionPattern::OpConversionPattern; @@ -1909,19 +1921,22 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { switch (op.getKind()) { case mlir::cir::BinOpKind::Add: if (type.isa()) - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Sub: if (type.isa()) - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Mul: if (type.isa()) - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index 7006ad48d83d..0564e9c8e89f 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -18,7 +18,7 @@ void b0(int a, int b) { // CHECK: = cir.binop(div, %6, %7) : !s32i // CHECK: = cir.binop(rem, %9, %10) : !s32i // CHECK: = cir.binop(add, %12, %13) : !s32i -// CHECK: = cir.binop(sub, %15, %16) : !s32i +// CHECK: = cir.binop(sub, %15, %16) nsw : !s32i // CHECK: = cir.shift( right, %18 : !s32i, %19 : !s32i) -> !s32i // CHECK: = cir.shift(left, %21 : !s32i, %22 : !s32i) -> !s32i // CHECK: = cir.binop(and, %24, %25) : !s32i diff --git a/clang/test/CIR/CodeGen/call.cpp b/clang/test/CIR/CodeGen/call.cpp index 2fbe34b316ff..7f2a8497bad0 100644 --- a/clang/test/CIR/CodeGen/call.cpp +++ b/clang/test/CIR/CodeGen/call.cpp @@ -11,4 +11,4 @@ int f() { // CHECK: %1 = cir.call @_Z1pv() : () -> !cir.ptr // CHECK: %2 = cir.load %1 : cir.ptr , !s32i // CHECK: %3 = cir.const(#cir.int<22> : !s32i) : !s32i -// CHECK: %4 = cir.binop(sub, %2, %3) : !s32i +// CHECK: %4 = cir.binop(sub, %2, %3) nsw : !s32i diff --git a/clang/test/CIR/CodeGen/int-wrap.c b/clang/test/CIR/CodeGen/int-wrap.c new file mode 100644 index 000000000000..f23e216143fc --- /dev/null +++ b/clang/test/CIR/CodeGen/int-wrap.c @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fwrapv -fclangir -emit-cir %s -o - 2>&1 | FileCheck %s --check-prefix=WRAP +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - 2>&1 | FileCheck %s --check-prefix=NOWRAP + +#define N 42 + +typedef struct { + const char* ptr; +} A; + +// WRAP: cir.binop(sub, {{.*}}, {{.*}}) : !s32i +// NOWRAP: cir.binop(sub, {{.*}}, {{.*}}) nsw : !s32i +void foo(int* ar, int len) { + int x = ar[len - N]; +} + +// check that the ptr_stride is generated in both cases (i.e. no NYI fails) + +// WRAP: cir.ptr_stride +// NOWRAP: cir.ptr_stride +void bar(A* a, unsigned n) { + a->ptr = a->ptr + n; +} + +// WRAP cir.ptr_stride +// NOWRAP: cir.ptr_stride +void baz(A* a) { + a->ptr--; +} + + diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 6d738aac0ae4..78956fab3980 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1036,6 +1036,22 @@ cir.func @bad_fetch(%x: !cir.ptr, %y: !cir.float) -> () { // ----- +cir.func @bad_operands_for_nowrap(%x: !cir.float, %y: !cir.float) { + // expected-error@+1 {{only operations on integer values may have nsw/nuw flags}} + %0 = cir.binop(add, %x, %y) nsw : !cir.float +} + +// ----- + +!u32i = !cir.int + +cir.func @bad_binop_for_nowrap(%x: !u32i, %y: !u32i) { + // expected-error@+1 {{The nsw/nuw flags are applicable to opcodes: 'add', 'sub' and 'mul'}} + %0 = cir.binop(div, %x, %y) nsw : !u32i +} + +// ----- + !s32i = !cir.int module { @@ -1045,4 +1061,4 @@ module { %0 = cir.get_global thread_local @batata : cir.ptr cir.return } -} \ No newline at end of file +} diff --git a/clang/test/CIR/Lowering/int-wrap.cir b/clang/test/CIR/Lowering/int-wrap.cir new file mode 100644 index 000000000000..3de5ec85b526 --- /dev/null +++ b/clang/test/CIR/Lowering/int-wrap.cir @@ -0,0 +1,24 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +module { + cir.func @test(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["len", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + %1 = cir.load %0 : cir.ptr , !s32i + %2 = cir.const(#cir.int<42> : !s32i) : !s32i + %3 = cir.binop(sub, %1, %2) nsw : !s32i + %4 = cir.binop(sub, %1, %2) nuw : !s32i + %5 = cir.binop(sub, %1, %2) : !s32i + cir.return + } +} + +// MLIR: llvm.sub {{.*}}, {{.*}} overflow : i32 +// MLIR-NEXT: llvm.sub {{.*}}, {{.*}} overflow : i32 +// MLIR-NEXT: llvm.sub {{.*}}, {{.*}} : i32 + +// LLVM: sub nsw i32 {{.*}}, {{.*}}, !dbg !9 +// LLVM-NEXT: sub nuw i32 {{.*}}, {{.*}}, !dbg !10 +// LLVM-NEXT: sub i32 {{.*}}, {{.*}}, !dbg !11 \ No newline at end of file From aa2c005a6210613a3112475cfdb2bc455afa5223 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 18 Apr 2024 12:33:02 -0700 Subject: [PATCH 1502/2301] [CIR][CIRGen] Clean up call arrangement Catch up with upstream and also update Address.h with one more helper method for `cir::Address`. --- clang/lib/CIR/CodeGen/Address.h | 8 +++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 52 +++++++++---------- clang/lib/CIR/CodeGen/CIRGenCall.h | 7 +++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 4 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenTypes.h | 10 ++-- 8 files changed, 54 insertions(+), 40 deletions(-) diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 99544623ad2b..68ca69b1de31 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -94,6 +94,14 @@ class Address { return Alignment; } + /// Return the pointer contained in this class after authenticating it and + /// adding offset to it if necessary. + mlir::Value emitRawPointer() const { + // TODO(cir): update this class with latest traditional LLVM codegen bits + // and the replace the call below to getBasePointer(). + return getPointer(); + } + /// Return the type of the pointer value. mlir::cir::PointerType getType() const { return getPointer().getType().cast(); diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 418f9f9fb21e..9763b2492e21 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1119,9 +1119,8 @@ CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { CanQualType resultType = Context.VoidTy; (void)resultType; - return arrangeCIRFunctionInfo(resultType, /*instanceMethod=*/true, - /*chainCall=*/false, argTypes, extInfo, - paramInfos, required); + return arrangeCIRFunctionInfo(resultType, FnInfoOpts::IsInstanceMethod, + argTypes, extInfo, paramInfos, required); } /// Derives the 'this' type for CIRGen purposes, i.e. ignoring method CVR @@ -1145,7 +1144,7 @@ CanQualType CIRGenTypes::DeriveThisType(const CXXRecordDecl *RD, /// Arrange the CIR function layout for a value of the given function type, on /// top of any implicit parameters already stored. static const CIRGenFunctionInfo & -arrangeCIRFunctionInfo(CIRGenTypes &CGT, bool instanceMethod, +arrangeCIRFunctionInfo(CIRGenTypes &CGT, FnInfoOpts instanceMethod, SmallVectorImpl &prefix, CanQual FTP) { SmallVector paramInfos; @@ -1154,8 +1153,7 @@ arrangeCIRFunctionInfo(CIRGenTypes &CGT, bool instanceMethod, appendParameterTypes(CGT, prefix, paramInfos, FTP); CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); - return CGT.arrangeCIRFunctionInfo(resultType, instanceMethod, - /*chainCall=*/false, prefix, + return CGT.arrangeCIRFunctionInfo(resultType, instanceMethod, prefix, FTP->getExtInfo(), paramInfos, Required); } @@ -1164,8 +1162,7 @@ arrangeCIRFunctionInfo(CIRGenTypes &CGT, bool instanceMethod, const CIRGenFunctionInfo & CIRGenTypes::arrangeFreeFunctionType(CanQual FTP) { SmallVector argTypes; - return ::arrangeCIRFunctionInfo(*this, /*instanceMethod=*/false, argTypes, - FTP); + return ::arrangeCIRFunctionInfo(*this, FnInfoOpts::None, argTypes, FTP); } /// Arrange the argument and result information for a value of the given @@ -1175,8 +1172,7 @@ CIRGenTypes::arrangeFreeFunctionType(CanQual FTNP) { // When translating an unprototyped function type, always use a // variadic type. return arrangeCIRFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), - /*instanceMethod=*/false, - /*chainCall=*/false, std::nullopt, + FnInfoOpts::None, std::nullopt, FTNP->getExtInfo(), {}, RequiredArgs(0)); } @@ -1217,9 +1213,8 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXConstructorCall( // which never have param info. assert(!FPT->hasExtParameterInfos() && "NYI"); - return arrangeCIRFunctionInfo(ResultType, /*instanceMethod=*/true, - /*chainCall=*/false, ArgTypes, Info, ParamInfos, - Required); + return arrangeCIRFunctionInfo(ResultType, FnInfoOpts::IsInstanceMethod, + ArgTypes, Info, ParamInfos, Required); } bool CIRGenTypes::inheritingCtorHasParams(const InheritedConstructor &Inherited, @@ -1299,9 +1294,10 @@ static CanQualType GetReturnType(QualType RetTy) { static const CIRGenFunctionInfo & arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, const CallArgList &args, const FunctionType *fnType, - unsigned numExtraRequiredArgs, bool chainCall) { + unsigned numExtraRequiredArgs, + FnInfoOpts chainCall) { assert(args.size() >= numExtraRequiredArgs); - assert(!chainCall && "Chain call NYI"); + assert((chainCall != FnInfoOpts::IsChainCall) && "Chain call NYI"); llvm::SmallVector paramInfos; @@ -1326,9 +1322,9 @@ arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, SmallVector argTypes; for (const auto &arg : args) argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); - return CGT.arrangeCIRFunctionInfo( - GetReturnType(fnType->getReturnType()), /*instanceMethod=*/false, - chainCall, argTypes, fnType->getExtInfo(), paramInfos, required); + return CGT.arrangeCIRFunctionInfo(GetReturnType(fnType->getReturnType()), + chainCall, argTypes, fnType->getExtInfo(), + paramInfos, required); } static llvm::SmallVector @@ -1367,9 +1363,9 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXMethodCall( auto argTypes = getArgTypesForCall(Context, args); auto info = proto->getExtInfo(); - return arrangeCIRFunctionInfo( - GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, - /*chainCall=*/false, argTypes, info, paramInfos, required); + return arrangeCIRFunctionInfo(GetReturnType(proto->getReturnType()), + FnInfoOpts::IsInstanceMethod, argTypes, info, + paramInfos, required); } /// Figure out the rules for calling a function with the given formal type using @@ -1378,8 +1374,9 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXMethodCall( const CIRGenFunctionInfo &CIRGenTypes::arrangeFreeFunctionCall( const CallArgList &args, const FunctionType *fnType, bool ChainCall) { assert(!ChainCall && "ChainCall NYI"); - return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, - ChainCall ? 1 : 0, ChainCall); + return arrangeFreeFunctionLikeCall( + *this, CGM, args, fnType, ChainCall ? 1 : 0, + ChainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None); } /// Set calling convention for CUDA/HIP kernel. @@ -1426,7 +1423,7 @@ CIRGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, argTypes.push_back(DeriveThisType(RD, MD)); return ::arrangeCIRFunctionInfo( - *this, true, argTypes, + *this, FnInfoOpts::IsChainCall, argTypes, FTP->getCanonicalTypeUnqualified().getAs()); } @@ -1446,10 +1443,9 @@ CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { // When declaring a function without a prototype, always use a non-variadic // type. if (CanQual noProto = FTy.getAs()) { - return arrangeCIRFunctionInfo(noProto->getReturnType(), - /*instanceMethod=*/false, - /*chainCall=*/false, std::nullopt, - noProto->getExtInfo(), {}, RequiredArgs::All); + return arrangeCIRFunctionInfo(noProto->getReturnType(), FnInfoOpts::None, + std::nullopt, noProto->getExtInfo(), {}, + RequiredArgs::All); } return arrangeFreeFunctionType(FTy.castAs()); diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 80941919e2e1..a192c6e1db80 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -286,6 +286,13 @@ class ReturnValueSlot { bool isExternallyDestructed() const { return IsExternallyDestructed; } }; +enum class FnInfoOpts { + None = 0, + IsInstanceMethod = 1 << 0, + IsChainCall = 1 << 1, + IsDelegateCall = 1 << 2, +}; + } // namespace cir #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 8121010fd72e..b05623a2e5b9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2199,7 +2199,7 @@ static mlir::cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { mlir::cir::FuncType FTy = CGF.getBuilder().getFuncType({}, CGF.getBuilder().getVoidTy()); - return CGF.CGM.getOrCreateRuntimeFunction(FTy, "__cxa_bad_cast"); + return CGF.CGM.createRuntimeFunction(FTy, "__cxa_bad_cast"); } void CIRGenItaniumCXXABI::buildBadCastCall(CIRGenFunction &CGF, @@ -2280,7 +2280,7 @@ static mlir::cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { mlir::cir::FuncType FTy = CGF.getBuilder().getFuncType( {VoidPtrTy, RTTIPtrTy, RTTIPtrTy, PtrDiffTy}, VoidPtrTy); - return CGF.CGM.getOrCreateRuntimeFunction(FTy, "__dynamic_cast"); + return CGF.CGM.createRuntimeFunction(FTy, "__dynamic_cast"); } mlir::Value CIRGenItaniumCXXABI::buildDynamicCastCall( diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index aee1fa47edad..b1557311b577 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2046,9 +2046,8 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, return f; } -mlir::cir::FuncOp -CIRGenModule::getOrCreateRuntimeFunction(mlir::cir::FuncType Ty, - StringRef Name) { +mlir::cir::FuncOp CIRGenModule::createRuntimeFunction(mlir::cir::FuncType Ty, + StringRef Name) { auto entry = cast_if_present(getGlobalValue(Name)); if (entry) return entry; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 053aad7e6862..55fb0ab4924c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -639,8 +639,8 @@ class CIRGenModule : public CIRGenTypeCache { mlir::cir::FuncType Ty, const clang::FunctionDecl *FD); - mlir::cir::FuncOp getOrCreateRuntimeFunction(mlir::cir::FuncType Ty, - StringRef Name); + mlir::cir::FuncOp createRuntimeFunction(mlir::cir::FuncType Ty, + StringRef Name); /// Emit type info if type of an expression is a variably modified /// type. Also emit proper debug info for cast types. diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index c34a3211c383..9f2c961a74b7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -746,12 +746,14 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { } const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( - CanQualType resultType, bool instanceMethod, bool chainCall, + CanQualType resultType, FnInfoOpts opts, llvm::ArrayRef argTypes, FunctionType::ExtInfo info, llvm::ArrayRef paramInfos, RequiredArgs required) { assert(llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })); + bool instanceMethod = opts == FnInfoOpts::IsInstanceMethod; + bool chainCall = opts == FnInfoOpts::IsChainCall; // Lookup or create unique function info. llvm::FoldingSetNodeID ID; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 0ec564e29385..192871908bb1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -14,6 +14,7 @@ #define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H #include "ABIInfo.h" +#include "CIRGenCall.h" #include "CIRGenFunctionInfo.h" #include "CIRGenRecordLayout.h" @@ -257,13 +258,14 @@ class CIRGenTypes { const CIRGenFunctionInfo & arrangeFreeFunctionType(clang::CanQual FTNP); - /// "Arrange" the CIR information for a call or type with the given - /// signature. This is largely an internal method; other clients should use - /// one of the above routines, which ultimatley defer to this. + /// "Arrange" the LLVM information for a call or type with the given + /// signature. This is largely an internal method; other clients + /// should use one of the above routines, which ultimately defer to + /// this. /// /// \param argTypes - must all actually be canonical as params const CIRGenFunctionInfo &arrangeCIRFunctionInfo( - clang::CanQualType returnType, bool instanceMethod, bool chainCall, + clang::CanQualType returnType, FnInfoOpts opts, llvm::ArrayRef argTypes, clang::FunctionType::ExtInfo info, llvm::ArrayRef paramInfos, From 694b4ddaa4882fa01d73140e01892e27c56d244d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 18 Apr 2024 12:43:25 -0700 Subject: [PATCH 1503/2301] [CIR][CIRGen][NFC] Update atomic libcall skeleton to match upstream --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 252 ++++++------------------- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 10 + clang/lib/CIR/CodeGen/CIRGenTypes.h | 4 + 3 files changed, 76 insertions(+), 190 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 5407875dcd74..89aafd03cbe2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -524,6 +524,14 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, builder.createStore(loc, Result, Dest); } +static RValue buildAtomicLibcall(CIRGenFunction &CGF, StringRef fnName, + QualType resultType, CallArgList &args) { + [[maybe_unused]] const CIRGenFunctionInfo &fnInfo = + CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args); + [[maybe_unused]] auto fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo); + llvm_unreachable("NYI"); +} + static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value IsWeak, mlir::Value FailureOrder, @@ -735,130 +743,39 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { Dest = Atomics.castToAtomicIntPointer(Dest); } - // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . + // For atomics larger than 16 bytes, emit a libcall from the frontend. This + // avoids the overhead of dealing with excessively-large value types in IR. + // Non-power-of-2 values also lower to libcall here, as they are not currently + // permitted in IR instructions (although that constraint could be relaxed in + // the future). For other cases where a libcall is required on a given + // platform, we let the backend handle it (this includes handling for all of + // the size-optimized libcall variants, which are only valid up to 16 bytes.) + // + // See: https://llvm.org/docs/Atomics.html#libcalls-atomic if (UseLibcall) { - bool UseOptimizedLibcall = false; - switch (E->getOp()) { - case AtomicExpr::AO__c11_atomic_init: - case AtomicExpr::AO__opencl_atomic_init: - llvm_unreachable("Already handled above with EmitAtomicInit!"); - - case AtomicExpr::AO__atomic_fetch_add: - case AtomicExpr::AO__atomic_fetch_and: - case AtomicExpr::AO__atomic_fetch_max: - case AtomicExpr::AO__atomic_fetch_min: - case AtomicExpr::AO__atomic_fetch_nand: - case AtomicExpr::AO__atomic_fetch_or: - case AtomicExpr::AO__atomic_fetch_sub: - case AtomicExpr::AO__atomic_fetch_xor: - case AtomicExpr::AO__atomic_add_fetch: - case AtomicExpr::AO__atomic_and_fetch: - case AtomicExpr::AO__atomic_max_fetch: - case AtomicExpr::AO__atomic_min_fetch: - case AtomicExpr::AO__atomic_nand_fetch: - case AtomicExpr::AO__atomic_or_fetch: - case AtomicExpr::AO__atomic_sub_fetch: - case AtomicExpr::AO__atomic_xor_fetch: - case AtomicExpr::AO__c11_atomic_fetch_add: - case AtomicExpr::AO__c11_atomic_fetch_and: - case AtomicExpr::AO__c11_atomic_fetch_max: - case AtomicExpr::AO__c11_atomic_fetch_min: - case AtomicExpr::AO__c11_atomic_fetch_nand: - case AtomicExpr::AO__c11_atomic_fetch_or: - case AtomicExpr::AO__c11_atomic_fetch_sub: - case AtomicExpr::AO__c11_atomic_fetch_xor: - case AtomicExpr::AO__hip_atomic_fetch_add: - case AtomicExpr::AO__hip_atomic_fetch_and: - case AtomicExpr::AO__hip_atomic_fetch_max: - case AtomicExpr::AO__hip_atomic_fetch_min: - case AtomicExpr::AO__hip_atomic_fetch_or: - case AtomicExpr::AO__hip_atomic_fetch_sub: - case AtomicExpr::AO__hip_atomic_fetch_xor: - case AtomicExpr::AO__opencl_atomic_fetch_add: - case AtomicExpr::AO__opencl_atomic_fetch_and: - case AtomicExpr::AO__opencl_atomic_fetch_max: - case AtomicExpr::AO__opencl_atomic_fetch_min: - case AtomicExpr::AO__opencl_atomic_fetch_or: - case AtomicExpr::AO__opencl_atomic_fetch_sub: - case AtomicExpr::AO__opencl_atomic_fetch_xor: - case AtomicExpr::AO__scoped_atomic_fetch_add: - case AtomicExpr::AO__scoped_atomic_fetch_and: - case AtomicExpr::AO__scoped_atomic_fetch_max: - case AtomicExpr::AO__scoped_atomic_fetch_min: - case AtomicExpr::AO__scoped_atomic_fetch_nand: - case AtomicExpr::AO__scoped_atomic_fetch_or: - case AtomicExpr::AO__scoped_atomic_fetch_sub: - case AtomicExpr::AO__scoped_atomic_fetch_xor: - case AtomicExpr::AO__scoped_atomic_add_fetch: - case AtomicExpr::AO__scoped_atomic_and_fetch: - case AtomicExpr::AO__scoped_atomic_max_fetch: - case AtomicExpr::AO__scoped_atomic_min_fetch: - case AtomicExpr::AO__scoped_atomic_nand_fetch: - case AtomicExpr::AO__scoped_atomic_or_fetch: - case AtomicExpr::AO__scoped_atomic_sub_fetch: - case AtomicExpr::AO__scoped_atomic_xor_fetch: - // For these, only library calls for certain sizes exist. - UseOptimizedLibcall = true; - break; - - case AtomicExpr::AO__atomic_load: - case AtomicExpr::AO__atomic_store: - case AtomicExpr::AO__atomic_exchange: - case AtomicExpr::AO__atomic_compare_exchange: - case AtomicExpr::AO__scoped_atomic_load: - case AtomicExpr::AO__scoped_atomic_store: - case AtomicExpr::AO__scoped_atomic_exchange: - case AtomicExpr::AO__scoped_atomic_compare_exchange: - // Use the generic version if we don't know that the operand will be - // suitably aligned for the optimized version. - if (Misaligned) - break; - [[fallthrough]]; - case AtomicExpr::AO__atomic_load_n: - case AtomicExpr::AO__atomic_store_n: - case AtomicExpr::AO__atomic_exchange_n: - case AtomicExpr::AO__atomic_compare_exchange_n: - case AtomicExpr::AO__c11_atomic_load: - case AtomicExpr::AO__c11_atomic_store: - case AtomicExpr::AO__c11_atomic_exchange: - case AtomicExpr::AO__c11_atomic_compare_exchange_weak: - case AtomicExpr::AO__c11_atomic_compare_exchange_strong: - case AtomicExpr::AO__hip_atomic_load: - case AtomicExpr::AO__hip_atomic_store: - case AtomicExpr::AO__hip_atomic_exchange: - case AtomicExpr::AO__hip_atomic_compare_exchange_weak: - case AtomicExpr::AO__hip_atomic_compare_exchange_strong: - case AtomicExpr::AO__opencl_atomic_load: - case AtomicExpr::AO__opencl_atomic_store: - case AtomicExpr::AO__opencl_atomic_exchange: - case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: - case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: - case AtomicExpr::AO__scoped_atomic_load_n: - case AtomicExpr::AO__scoped_atomic_store_n: - case AtomicExpr::AO__scoped_atomic_exchange_n: - case AtomicExpr::AO__scoped_atomic_compare_exchange_n: - // Only use optimized library calls for sizes for which they exist. - // FIXME: Size == 16 optimized library functions exist too. - if (Size == 1 || Size == 2 || Size == 4 || Size == 8) - UseOptimizedLibcall = true; - break; - } - CallArgList Args; - if (!UseOptimizedLibcall) { - llvm_unreachable("NYI"); - } - // TODO(cir): Atomic address is the first or second parameter + // For non-optimized library calls, the size is the first parameter. + Args.add(RValue::get(builder.getConstInt(getLoc(E->getSourceRange()), + SizeTy, Size)), + getContext().getSizeType()); + + // The atomic address is the second parameter. // The OpenCL atomic library functions only accept pointer arguments to // generic address space. - llvm_unreachable("NYI"); + auto CastToGenericAddrSpace = [&](mlir::Value V, QualType PT) { + if (!E->isOpenCL()) + return V; + llvm_unreachable("NYI"); + }; + + Args.add(RValue::get(CastToGenericAddrSpace(Ptr.emitRawPointer(), + E->getPtr()->getType())), + getContext().VoidPtrTy); + // The next 1-3 parameters are op-dependent. std::string LibCallName; - [[maybe_unused]] QualType LoweredMemTy = - MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy; QualType RetTy; - [[maybe_unused]] bool HaveRetTy = false; - [[maybe_unused]] bool PostOpMinMax = false; + bool HaveRetTy = false; switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: case AtomicExpr::AO__opencl_atomic_init: @@ -869,8 +786,6 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // and exchange. // bool __atomic_compare_exchange(size_t size, void *mem, void *expected, // void *desired, int success, int failure) - // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired, - // int success, int failure) case AtomicExpr::AO__atomic_compare_exchange: case AtomicExpr::AO__atomic_compare_exchange_n: case AtomicExpr::AO__c11_atomic_compare_exchange_weak: @@ -886,7 +801,6 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { break; // void __atomic_exchange(size_t size, void *mem, void *val, void *return, // int order) - // T __atomic_exchange_N(T *mem, T val, int order) case AtomicExpr::AO__atomic_exchange: case AtomicExpr::AO__atomic_exchange_n: case AtomicExpr::AO__c11_atomic_exchange: @@ -898,7 +812,6 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { llvm_unreachable("NYI"); break; // void __atomic_store(size_t size, void *mem, void *val, int order) - // void __atomic_store_N(T *mem, T val, int order) case AtomicExpr::AO__atomic_store: case AtomicExpr::AO__atomic_store_n: case AtomicExpr::AO__c11_atomic_store: @@ -910,7 +823,6 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { llvm_unreachable("NYI"); break; // void __atomic_load(size_t size, void *mem, void *return, int order) - // T __atomic_load_N(T *mem, int order) case AtomicExpr::AO__atomic_load: case AtomicExpr::AO__atomic_load_n: case AtomicExpr::AO__c11_atomic_load: @@ -919,129 +831,89 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__scoped_atomic_load: case AtomicExpr::AO__scoped_atomic_load_n: LibCallName = "__atomic_load"; - llvm_unreachable("NYI"); break; - // T __atomic_add_fetch_N(T *mem, T val, int order) - // T __atomic_fetch_add_N(T *mem, T val, int order) case AtomicExpr::AO__atomic_add_fetch: case AtomicExpr::AO__scoped_atomic_add_fetch: - llvm_unreachable("NYI"); - [[fallthrough]]; case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__hip_atomic_fetch_add: case AtomicExpr::AO__opencl_atomic_fetch_add: case AtomicExpr::AO__scoped_atomic_fetch_add: - LibCallName = "__atomic_fetch_add"; - llvm_unreachable("NYI"); - break; - // T __atomic_and_fetch_N(T *mem, T val, int order) - // T __atomic_fetch_and_N(T *mem, T val, int order) case AtomicExpr::AO__atomic_and_fetch: case AtomicExpr::AO__scoped_atomic_and_fetch: - llvm_unreachable("NYI"); - [[fallthrough]]; case AtomicExpr::AO__atomic_fetch_and: case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__hip_atomic_fetch_and: case AtomicExpr::AO__opencl_atomic_fetch_and: case AtomicExpr::AO__scoped_atomic_fetch_and: - LibCallName = "__atomic_fetch_and"; - llvm_unreachable("NYI"); - break; - // T __atomic_or_fetch_N(T *mem, T val, int order) - // T __atomic_fetch_or_N(T *mem, T val, int order) case AtomicExpr::AO__atomic_or_fetch: case AtomicExpr::AO__scoped_atomic_or_fetch: - llvm_unreachable("NYI"); - [[fallthrough]]; case AtomicExpr::AO__atomic_fetch_or: case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__hip_atomic_fetch_or: case AtomicExpr::AO__opencl_atomic_fetch_or: case AtomicExpr::AO__scoped_atomic_fetch_or: - LibCallName = "__atomic_fetch_or"; - llvm_unreachable("NYI"); - break; - // T __atomic_sub_fetch_N(T *mem, T val, int order) - // T __atomic_fetch_sub_N(T *mem, T val, int order) case AtomicExpr::AO__atomic_sub_fetch: case AtomicExpr::AO__scoped_atomic_sub_fetch: - llvm_unreachable("NYI"); - [[fallthrough]]; case AtomicExpr::AO__atomic_fetch_sub: case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__hip_atomic_fetch_sub: case AtomicExpr::AO__opencl_atomic_fetch_sub: case AtomicExpr::AO__scoped_atomic_fetch_sub: - LibCallName = "__atomic_fetch_sub"; - llvm_unreachable("NYI"); - break; - // T __atomic_xor_fetch_N(T *mem, T val, int order) - // T __atomic_fetch_xor_N(T *mem, T val, int order) case AtomicExpr::AO__atomic_xor_fetch: case AtomicExpr::AO__scoped_atomic_xor_fetch: - llvm_unreachable("NYI"); - [[fallthrough]]; case AtomicExpr::AO__atomic_fetch_xor: case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__hip_atomic_fetch_xor: case AtomicExpr::AO__opencl_atomic_fetch_xor: case AtomicExpr::AO__scoped_atomic_fetch_xor: - LibCallName = "__atomic_fetch_xor"; - llvm_unreachable("NYI"); - break; + case AtomicExpr::AO__atomic_nand_fetch: + case AtomicExpr::AO__atomic_fetch_nand: + case AtomicExpr::AO__c11_atomic_fetch_nand: + case AtomicExpr::AO__scoped_atomic_fetch_nand: + case AtomicExpr::AO__scoped_atomic_nand_fetch: case AtomicExpr::AO__atomic_min_fetch: - case AtomicExpr::AO__scoped_atomic_min_fetch: - llvm_unreachable("NYI"); - [[fallthrough]]; case AtomicExpr::AO__atomic_fetch_min: case AtomicExpr::AO__c11_atomic_fetch_min: - case AtomicExpr::AO__scoped_atomic_fetch_min: case AtomicExpr::AO__hip_atomic_fetch_min: case AtomicExpr::AO__opencl_atomic_fetch_min: - LibCallName = E->getValueType()->isSignedIntegerType() - ? "__atomic_fetch_min" - : "__atomic_fetch_umin"; - llvm_unreachable("NYI"); - break; + case AtomicExpr::AO__scoped_atomic_fetch_min: + case AtomicExpr::AO__scoped_atomic_min_fetch: case AtomicExpr::AO__atomic_max_fetch: - case AtomicExpr::AO__scoped_atomic_max_fetch: - llvm_unreachable("NYI"); - [[fallthrough]]; case AtomicExpr::AO__atomic_fetch_max: case AtomicExpr::AO__c11_atomic_fetch_max: case AtomicExpr::AO__hip_atomic_fetch_max: case AtomicExpr::AO__opencl_atomic_fetch_max: case AtomicExpr::AO__scoped_atomic_fetch_max: - LibCallName = E->getValueType()->isSignedIntegerType() - ? "__atomic_fetch_max" - : "__atomic_fetch_umax"; - llvm_unreachable("NYI"); - break; - // T __atomic_nand_fetch_N(T *mem, T val, int order) - // T __atomic_fetch_nand_N(T *mem, T val, int order) - case AtomicExpr::AO__atomic_nand_fetch: - case AtomicExpr::AO__scoped_atomic_nand_fetch: - llvm_unreachable("NYI"); - [[fallthrough]]; - case AtomicExpr::AO__atomic_fetch_nand: - case AtomicExpr::AO__c11_atomic_fetch_nand: - case AtomicExpr::AO__scoped_atomic_fetch_nand: - LibCallName = "__atomic_fetch_nand"; - llvm_unreachable("NYI"); - break; + case AtomicExpr::AO__scoped_atomic_max_fetch: + llvm_unreachable("Integral atomic operations always become atomicrmw!"); } if (E->isOpenCL()) { LibCallName = std::string("__opencl") + StringRef(LibCallName).drop_front(1).str(); } - // Optimized functions have the size in their name. - if (UseOptimizedLibcall) { + // By default, assume we return a value of the atomic type. + if (!HaveRetTy) { llvm_unreachable("NYI"); } - // By default, assume we return a value of the atomic type. + // Order is always the last parameter. + Args.add(RValue::get(Order), getContext().IntTy); + if (E->isOpenCL()) { + llvm_unreachable("NYI"); + } + + [[maybe_unused]] RValue Res = + buildAtomicLibcall(*this, LibCallName, RetTy, Args); + // The value is returned directly from the libcall. + if (E->isCmpXChg()) { + llvm_unreachable("NYI"); + } + + if (RValTy->isVoidType()) { + llvm_unreachable("NYI"); + } + llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 9763b2492e21..00aa6511169e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1176,6 +1176,16 @@ CIRGenTypes::arrangeFreeFunctionType(CanQual FTNP) { FTNP->getExtInfo(), {}, RequiredArgs(0)); } +const CIRGenFunctionInfo & +CIRGenTypes::arrangeBuiltinFunctionCall(QualType resultType, + const CallArgList &args) { + // FIXME: Kill copy. + SmallVector argTypes; + for (const auto &Arg : args) + argTypes.push_back(getContext().getCanonicalParamType(Arg.Ty)); + llvm_unreachable("NYI"); +} + /// Arrange a call to a C++ method, passing the given arguments. /// /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 192871908bb1..51350c9ea70e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -228,6 +228,10 @@ class CIRGenTypes { const CIRGenFunctionInfo & arrangeFunctionDeclaration(const clang::FunctionDecl *FD); + const CIRGenFunctionInfo & + arrangeBuiltinFunctionCall(clang::QualType resultType, + const CallArgList &args); + const CIRGenFunctionInfo &arrangeCXXConstructorCall( const CallArgList &Args, const clang::CXXConstructorDecl *D, clang::CXXCtorType CtorKind, unsigned ExtraPrefixArgs, From d3ebcc68001174285a79e8d3ce8b92293b585652 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 18 Apr 2024 14:50:23 -0700 Subject: [PATCH 1504/2301] [CIR][CIRGen][NFC] Make createRuntimeFunction use proper function create call --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 32 ++++++++++++++++---------- clang/lib/CIR/CodeGen/CIRGenModule.h | 4 +++- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index b1557311b577..e0194ccd4211 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2046,14 +2046,24 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, return f; } -mlir::cir::FuncOp CIRGenModule::createRuntimeFunction(mlir::cir::FuncType Ty, - StringRef Name) { - auto entry = cast_if_present(getGlobalValue(Name)); - if (entry) - return entry; +mlir::cir::FuncOp CIRGenModule::createRuntimeFunction( + mlir::cir::FuncType Ty, StringRef Name, mlir::ArrayAttr, + [[maybe_unused]] bool Local, bool AssumeConvergent) { + if (AssumeConvergent) { + llvm_unreachable("NYI"); + } + + auto entry = GetOrCreateCIRFunction(Name, Ty, GlobalDecl(), + /*ForVtable=*/false); + + // Traditional codegen checks for a valid dyn_cast llvm::Function for `entry`, + // no testcase that cover this path just yet though. + if (!entry) { + // Setup runtime CC, DLL support for windows and set dso local. + llvm_unreachable("NYI"); + } - return createCIRFunction(mlir::UnknownLoc::get(builder.getContext()), Name, - Ty, nullptr); + return entry; } bool isDefaultedMethod(const clang::FunctionDecl *FD) { @@ -2065,9 +2075,8 @@ bool isDefaultedMethod(const clang::FunctionDecl *FD) { } mlir::Location CIRGenModule::getLocForFunction(const clang::FunctionDecl *FD) { - assert(FD && "Not sure which location to use yet"); - bool invalidLoc = (FD->getSourceRange().getBegin().isInvalid() || - FD->getSourceRange().getEnd().isInvalid()); + bool invalidLoc = !FD || (FD->getSourceRange().getBegin().isInvalid() || + FD->getSourceRange().getEnd().isInvalid()); if (!invalidLoc) return getLoc(FD->getSourceRange()); @@ -2267,8 +2276,7 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( IsIncompleteFunction = true; } - auto *FD = llvm::cast(D); - assert(FD && "Only FunctionDecl supported so far."); + auto *FD = llvm::cast_or_null(D); // TODO: CodeGen includeds the linkage (ExternalLinkage) and only passes the // mangledname if Entry is nullptr diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 55fb0ab4924c..76b393ab930d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -640,7 +640,9 @@ class CIRGenModule : public CIRGenTypeCache { const clang::FunctionDecl *FD); mlir::cir::FuncOp createRuntimeFunction(mlir::cir::FuncType Ty, - StringRef Name); + StringRef Name, mlir::ArrayAttr = {}, + bool Local = false, + bool AssumeConvergent = false); /// Emit type info if type of an expression is a variably modified /// type. Also emit proper debug info for cast types. From d94032da9bb113ca32677eda8f984877eddbd77a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 18 Apr 2024 15:04:03 -0700 Subject: [PATCH 1505/2301] [CIR][CIRGen][NFC] Add entry points for later addition of more function attributes --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 22 ++++++++++--------- clang/lib/CIR/CodeGen/CIRGenModule.h | 7 +++++- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index e0194ccd4211..c09ab95d2f97 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2191,6 +2191,12 @@ void CIRGenModule::setExtraAttributesForFunc(FuncOp f, builder.getContext(), attrs.getDictionary(builder.getContext()))); } +void CIRGenModule::setFunctionAttributes(GlobalDecl GD, mlir::cir::FuncOp F, + bool IsIncompleteFunction, + bool IsThunk) { + assert(!UnimplementedFeature::setFunctionAttributes()); +} + /// If the specified mangled name is not in the module, /// create and return a CIR Function with the specified type. If there is /// something in the module with the specified name, return it potentially @@ -2200,7 +2206,8 @@ void CIRGenModule::setExtraAttributesForFunc(FuncOp f, /// used to set the attributes on the function when it is first created. mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( StringRef MangledName, mlir::Type Ty, GlobalDecl GD, bool ForVTable, - bool DontDefer, bool IsThunk, ForDefinition_t IsForDefinition) { + bool DontDefer, bool IsThunk, ForDefinition_t IsForDefinition, + mlir::ArrayAttr ExtraAttrs) { assert(!IsThunk && "NYI"); const auto *D = GD.getDecl(); @@ -2311,16 +2318,11 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( Entry->erase(); } - // TODO: This might not be valid, seems the uniqueing system doesn't make - // sense for MLIR - // assert(F->getName().getStringRef() == MangledName && "name was uniqued!"); - if (D) - ; // TODO: set function attributes from the declaration - - // TODO: set function attributes from the missing attributes param - - // TODO: Handle extra attributes + setFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk); + if (ExtraAttrs) { + llvm_unreachable("NYI"); + } if (!DontDefer) { // All MSVC dtors other than the base dtor are linkonce_odr and delegate to diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 76b393ab930d..946d3d2e5a7b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -538,6 +538,10 @@ class CIRGenModule : public CIRGenTypeCache { // Make sure that this type is translated. void UpdateCompletedType(const clang::TagDecl *TD); + /// Set function attributes for a function declaration. + void setFunctionAttributes(GlobalDecl GD, mlir::cir::FuncOp F, + bool IsIncompleteFunction, bool IsThunk); + void buildGlobalDefinition(clang::GlobalDecl D, mlir::Operation *Op = nullptr); void buildGlobalFunctionDefinition(clang::GlobalDecl D, mlir::Operation *Op); @@ -632,7 +636,8 @@ class CIRGenModule : public CIRGenTypeCache { GetOrCreateCIRFunction(llvm::StringRef MangledName, mlir::Type Ty, clang::GlobalDecl D, bool ForVTable, bool DontDefer = false, bool IsThunk = false, - ForDefinition_t IsForDefinition = NotForDefinition); + ForDefinition_t IsForDefinition = NotForDefinition, + mlir::ArrayAttr ExtraAttrs = {}); // Effectively create the CIR instruction, properly handling insertion // points. mlir::cir::FuncOp createCIRFunction(mlir::Location loc, StringRef name, diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 02f765befce6..742939d4b32c 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -65,6 +65,7 @@ struct UnimplementedFeature { static bool generateDebugInfo() { return false; } // LLVM Attributes + static bool setFunctionAttributes() { return false; } static bool attributeBuiltin() { return false; } static bool attributeNoBuiltin() { return false; } static bool parameterAttributes() { return false; } From 0a9dc21c1e4e122e9bd33a570ee10bcd76fcd5db Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 18 Apr 2024 17:28:57 -0700 Subject: [PATCH 1506/2301] [CIR][CIRGen][NFC] Update other remainig parts of buildAtomicExpr to match upstream --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 89aafd03cbe2..4e5190216751 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -580,22 +580,16 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { uint64_t Size = TInfo.Width.getQuantity(); unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth(); - bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits; - bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0; - bool UseLibcall = Misaligned | Oversized; - bool ShouldCastToIntPtrTy = true; - CharUnits MaxInlineWidth = getContext().toCharUnitsFromBits(MaxInlineWidthInBits); - DiagnosticsEngine &Diags = CGM.getDiags(); - + bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0; + bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits; if (Misaligned) { Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned) << (int)TInfo.Width.getQuantity() << (int)Ptr.getAlignment().getQuantity(); } - if (Oversized) { Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized) << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity(); @@ -603,6 +597,7 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { auto Order = buildScalarExpr(E->getOrder()); auto Scope = E->getScopeModel() ? buildScalarExpr(E->getScope()) : nullptr; + bool ShouldCastToIntPtrTy = true; switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: @@ -743,6 +738,9 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { Dest = Atomics.castToAtomicIntPointer(Dest); } + bool PowerOf2Size = (Size & (Size - 1)) == 0; + bool UseLibcall = !PowerOf2Size || (Size > 16); + // For atomics larger than 16 bytes, emit a libcall from the frontend. This // avoids the overhead of dealing with excessively-large value types in IR. // Non-power-of-2 values also lower to libcall here, as they are not currently From c830c7d614ebe774ec78c20a8dfc74190e8c7405 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 19 Apr 2024 11:50:33 +0800 Subject: [PATCH 1507/2301] [CIR] Add `cir.dyn_cast` operation (#483) This PR adds the `cir.dyn_cast` operation for representing `dynamic_cast` in C++. It contains the following contents: - [x] A new `cir.dyn_cast` operation. - [x] ~Two new attributes that will be attached to `cir.dyn_cast` operations:~ - [x] ~`#cir.dyn_cast_info` attributes, which gives general information about a dynamic cast (e.g. the source RTTI pointer, the dest RTTI pointer, etc.)~ - [x] ~`#cir.downcast_info` attribute, which gives even more detailed information about a dynamic cast that is a down-cast. These information will be used when rewriting the `cir.dyn_cast` operation with more fundamental CIR operations.~ - [x] CIRGen support for the new operation and attributes. - [x] Rewrite the new operation with more fundamental CIR operations in LoweringPrepare. ~This is a draft PR. Now I only added the new operation / attributes, and updated the CIRGen part. The LoweringPrepare for the new operation is not implemented. Hopefully the draft can get some initial feedbacks from the community and make sure it is on the right direction so we don't waste time on wrong things.~ Related issue: #470 . --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 21 ++++ .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 52 ++++++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 65 ++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 20 +-- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 12 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 60 ++++----- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 79 ++++-------- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 48 +++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 5 + clang/lib/CIR/Dialect/IR/MissingFeatures.h | 7 ++ .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + .../Dialect/Transforms/LoweringPrepare.cpp | 37 +++++- .../Transforms/LoweringPrepareCXXABI.h | 36 ++++++ .../LoweringPrepareItaniumCXXABI.cpp | 117 ++++++++++++++++++ clang/test/CIR/CodeGen/dynamic-cast.cpp | 112 +++++++++-------- clang/test/CIR/IR/invalid.cir | 44 +++++++ 16 files changed, 553 insertions(+), 163 deletions(-) create mode 100644 clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h create mode 100644 clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 5f4bc1874674..659819b17ab2 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -54,6 +54,18 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { getAttr(typ, val)); } + mlir::cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr) { + return create(loc, attr.getType(), attr); + } + + mlir::cir::BoolType getBoolTy() { + return ::mlir::cir::BoolType::get(getContext()); + } + + mlir::cir::VoidType getVoidTy() { + return ::mlir::cir::VoidType::get(getContext()); + } + mlir::cir::PointerType getVoidPtrTy(unsigned AddrSpace = 0) { if (AddrSpace) llvm_unreachable("address space is NYI"); @@ -181,6 +193,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::cir::CastKind::ptr_to_int, src); } + mlir::Value createPtrToBoolCast(mlir::Value v) { + return create(v.getLoc(), getBoolTy(), + mlir::cir::CastKind::ptr_to_bool, v); + } + // TODO(cir): the following function was introduced to keep in sync with LLVM // codegen. CIR does not have "zext" operations. It should eventually be // renamed or removed. For now, we just add whatever cast is required here. @@ -213,6 +230,10 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { src); } + mlir::Value createPtrIsNull(mlir::Value ptr) { + return createNot(createPtrToBoolCast(ptr)); + } + // // Block handling helpers // ---------------------- diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index b03c5cc82c20..14e8ac9de21e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -511,6 +511,58 @@ def VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> { }]; } +//===----------------------------------------------------------------------===// +// DynamicCastInfoAttr +//===----------------------------------------------------------------------===// + +def DynamicCastInfoAttr + : CIR_Attr<"DynamicCastInfo", "dyn_cast_info"> { + let summary = "ABI specific information about a dynamic cast"; + let description = [{ + Provide ABI specific information about a dynamic cast operation. + + The `srcRtti` and the `destRtti` parameters give the RTTI of the source + struct type and the destination struct type, respectively. + + The `runtimeFunc` parameter gives the `__dynamic_cast` function which is + provided by the runtime. The `badCastFunc` parameter gives the + `__cxa_bad_cast` function which is also provided by the runtime. + + The `offsetHint` parameter gives the hint value that should be passed to the + `__dynamic_cast` runtime function. + }]; + + let parameters = (ins GlobalViewAttr:$srcRtti, + GlobalViewAttr:$destRtti, + "FlatSymbolRefAttr":$runtimeFunc, + "FlatSymbolRefAttr":$badCastFunc, + IntAttr:$offsetHint); + + let builders = [ + AttrBuilderWithInferredContext<(ins "GlobalViewAttr":$srcRtti, + "GlobalViewAttr":$destRtti, + "FlatSymbolRefAttr":$runtimeFunc, + "FlatSymbolRefAttr":$badCastFunc, + "IntAttr":$offsetHint), [{ + return $_get(srcRtti.getContext(), srcRtti, destRtti, runtimeFunc, + badCastFunc, offsetHint); + }]>, + ]; + + let genVerifyDecl = 1; + let assemblyFormat = [{ + `<` + qualified($srcRtti) `,` qualified($destRtti) `,` + $runtimeFunc `,` $badCastFunc `,` qualified($offsetHint) + `>` + }]; + + let extraClassDeclaration = [{ + /// Get attribute alias name for this attribute. + std::string getAlias() const; + }]; +} + //===----------------------------------------------------------------------===// // AST Wrappers //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 3f337b6250c2..558afa26c889 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -124,6 +124,66 @@ def CastOp : CIR_Op<"cast", [Pure]> { let hasFolder = 1; } +//===----------------------------------------------------------------------===// +// DynamicCastOp +//===----------------------------------------------------------------------===// + +def DCK_PtrCast : I32EnumAttrCase<"ptr", 1>; +def DCK_RefCast : I32EnumAttrCase<"ref", 2>; + +def DynamicCastKind : I32EnumAttr< + "DynamicCastKind", "dynamic cast kind", [DCK_PtrCast, DCK_RefCast]> { + let cppNamespace = "::mlir::cir"; +} + +def DynamicCastOp : CIR_Op<"dyn_cast"> { + let summary = "Perform dynamic cast on struct pointers"; + let description = [{ + The `cir.dyn_cast` operation models part of the semantics of the + `dynamic_cast` operator in C++. It can be used to perform 2 kinds of casts + on struct pointers: + + - Down-cast, which casts a base class pointer to a derived class pointer; + - Side-cast, which casts a class pointer to a sibling class pointer. + + The input of the operation must be a struct pointer. The result of the + operation is also a struct pointer. + + The parameter `kind` specifies the semantics of this operation. If its value + is `ptr`, then the operation models dynamic casts on pointers. Otherwise, if + its value is `ref`, the operation models dynamic casts on references. + Specifically: + + - When the input pointer is a null pointer value: + - If `kind` is `ref`, the operation will invoke undefined behavior. A + sanitizer check will be emitted if sanitizer is on. + - Otherwise, the operation will return a null pointer value as its result. + - When the runtime type check fails: + - If `kind` is `ref`, the operation will throw a `bad_cast` exception. + - Otherwise, the operation will return a null pointer value as its result. + + The `info` argument gives detailed information about the requested dynamic + cast operation. + }]; + + let arguments = (ins DynamicCastKind:$kind, + StructPtr:$src, + DynamicCastInfoAttr:$info); + let results = (outs StructPtr:$result); + + let assemblyFormat = [{ + `(` $kind `,` $src `:` type($src) `,` qualified($info) `)` + `->` type($result) attr-dict + }]; + + let extraClassDeclaration = [{ + /// Determine whether this operation models reference casting in C++. + bool isRefcast() { + return getKind() == ::mlir::cir::DynamicCastKind::ref; + } + }]; +} + //===----------------------------------------------------------------------===// // ObjSizeOp //===----------------------------------------------------------------------===// @@ -2653,6 +2713,11 @@ def CallOp : CIR_CallOp<"call"> { $_state.addOperands(operands); $_state.addAttribute("callee", callee); $_state.addTypes(resType); + }]>, + OpBuilder<(ins "SymbolRefAttr":$callee, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addOperands(operands); + $_state.addAttribute("callee", callee); }]>]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index deed6ffe63d8..c1e57b0989f1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -398,9 +398,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { llvm_unreachable("Unknown float format!"); } - mlir::cir::BoolType getBoolTy() { - return ::mlir::cir::BoolType::get(getContext()); - } mlir::Type getVirtualFnPtrType(bool isVarArg = false) { // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special // type so it's a bit more clear and C++ idiomatic. @@ -588,10 +585,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, ty, getZeroAttr(ty)); } - mlir::cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr) { - return create(loc, attr.getType(), attr); - } - // // Operation creation helpers // -------------------------- @@ -655,9 +648,12 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { lhs, rhs); } - mlir::Value createPtrToBoolCast(mlir::Value v) { - return create(v.getLoc(), getBoolTy(), - mlir::cir::CastKind::ptr_to_bool, v); + mlir::Value createDynCast(mlir::Location loc, mlir::Value src, + mlir::cir::PointerType destType, bool isRefCast, + mlir::cir::DynamicCastInfoAttr info) { + auto castKind = isRefCast ? mlir::cir::DynamicCastKind::ref + : mlir::cir::DynamicCastKind::ptr; + return create(loc, destType, castKind, src, info); } cir::Address createBaseClassAddr(mlir::Location loc, cir::Address addr, @@ -890,10 +886,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, resultTy, objectPtr, memberPtr); } - - mlir::Value createPtrIsNull(mlir::Value ptr) { - return createNot(createPtrToBoolCast(ptr)); - } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 3a74daa1225e..0e1ffd53c79d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -19,6 +19,7 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "mlir/IR/Attributes.h" #include "clang/AST/Mangle.h" namespace cir { @@ -304,14 +305,9 @@ class CIRGenCXXABI { virtual void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) = 0; - virtual bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, - QualType SrcRecordTy) = 0; - - virtual mlir::Value buildDynamicCastCall(CIRGenFunction &CGF, - mlir::Location Loc, Address Value, - QualType SrcRecordTy, - QualType DestTy, - QualType DestRecordTy) = 0; + virtual mlir::cir::DynamicCastInfoAttr + buildDynamicCastInfo(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, QualType DestRecordTy) = 0; virtual mlir::Value buildDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, Address Value, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 1de67ca5897c..7201cfa4fe8a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include #include #include @@ -1113,6 +1114,8 @@ mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, // If T is "pointer to cv void," then the result is a pointer to the most // derived object pointed to by v. bool isDynCastToVoid = destTy->isVoidPointerType(); + bool isRefCast = destTy->isReferenceType(); + QualType srcRecordTy; QualType destRecordTy; if (isDynCastToVoid) { @@ -1126,45 +1129,36 @@ mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, destRecordTy = destTy->castAs()->getPointeeType(); } + assert(srcRecordTy->isRecordType() && "source type must be a record type!"); buildTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(), srcRecordTy); if (DCE->isAlwaysNull()) return buildDynamicCastToNull(*this, loc, destTy); - assert(srcRecordTy->isRecordType() && "source type must be a record type!"); + if (isDynCastToVoid) { + auto srcIsNull = builder.createPtrIsNull(ThisAddr.getPointer()); + return builder + .create( + loc, srcIsNull, + [&](mlir::OpBuilder &, mlir::Location) { + auto nullPtr = + builder.getNullPtr(builder.getVoidPtrTy(), loc).getResult(); + builder.createYield(loc, nullPtr); + }, + [&](mlir::OpBuilder &, mlir::Location) { + auto castedPtr = CGM.getCXXABI().buildDynamicCastToVoid( + *this, loc, ThisAddr, srcRecordTy); + builder.createYield(loc, castedPtr); + }) + .getResult(); + } - // C++ [expr.dynamic.cast]p4: - // If the value of v is a null pointer value in the pointer case, the result - // is the null pointer value of type T. - bool shouldNullCheckSrcValue = - CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(srcTy->isPointerType(), - srcRecordTy); - - auto buildDynamicCastAfterNullCheck = [&]() -> mlir::Value { - if (isDynCastToVoid) - return CGM.getCXXABI().buildDynamicCastToVoid(*this, loc, ThisAddr, - srcRecordTy); - - assert(destRecordTy->isRecordType() && - "destination type must be a record type!"); - return CGM.getCXXABI().buildDynamicCastCall( - *this, loc, ThisAddr, srcRecordTy, destTy, destRecordTy); - }; + assert(destRecordTy->isRecordType() && "dest type must be a record type!"); - if (!shouldNullCheckSrcValue) - return buildDynamicCastAfterNullCheck(); - - mlir::Value srcValueIsNull = builder.createPtrIsNull(ThisAddr.getPointer()); - return builder - .create( - loc, srcValueIsNull, - [&](mlir::OpBuilder &, mlir::Location) { - builder.createYield(loc, - buildDynamicCastToNull(*this, loc, destTy)); - }, - [&](mlir::OpBuilder &, mlir::Location) { - builder.createYield(loc, buildDynamicCastAfterNullCheck()); - }) - .getResult(); + auto destCirTy = ConvertType(destTy).cast(); + auto castInfo = CGM.getCXXABI().buildDynamicCastInfo(*this, loc, srcRecordTy, + destRecordTy); + return builder.createDynCast(loc, ThisAddr.getPointer(), destCirTy, isRefCast, + castInfo); } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index b05623a2e5b9..87175d545ac0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -27,6 +27,7 @@ #include "clang/AST/VTableBuilder.h" #include "clang/Basic/Linkage.h" #include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "llvm/Support/ErrorHandling.h" using namespace cir; @@ -283,15 +284,15 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) override; - bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, - QualType SrcRecordTy) override { - return SrcIsPtr; - } + // The traditional clang CodeGen emits calls to `__dynamic_cast` directly into + // LLVM in the `emitDynamicCastCall` function. In CIR, `dynamic_cast` + // expressions are lowered to `cir.dyn_cast` ops instead of calls to runtime + // functions. So during CIRGen we don't need the `emitDynamicCastCall` + // function that clang CodeGen has. - mlir::Value buildDynamicCastCall(CIRGenFunction &CGF, mlir::Location Loc, - Address Value, QualType SrcRecordTy, - QualType DestTy, - QualType DestRecordTy) override; + mlir::cir::DynamicCastInfoAttr + buildDynamicCastInfo(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, QualType DestRecordTy) override; mlir::Value buildDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, Address Value, @@ -2283,55 +2284,29 @@ static mlir::cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { return CGF.CGM.createRuntimeFunction(FTy, "__dynamic_cast"); } -mlir::Value CIRGenItaniumCXXABI::buildDynamicCastCall( - CIRGenFunction &CGF, mlir::Location Loc, Address Value, - QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy) { - mlir::Type ptrdiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); +mlir::cir::DynamicCastInfoAttr CIRGenItaniumCXXABI::buildDynamicCastInfo( + CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, + QualType DestRecordTy) { + auto srcRtti = CGF.CGM.getAddrOfRTTIDescriptor(Loc, SrcRecordTy) + .cast(); + auto destRtti = CGF.CGM.getAddrOfRTTIDescriptor(Loc, DestRecordTy) + .cast(); - mlir::Value srcRtti = CGF.getBuilder().getConstant( - Loc, - CGF.CGM.getAddrOfRTTIDescriptor(Loc, SrcRecordTy.getUnqualifiedType()) - .cast()); - mlir::Value destRtti = CGF.getBuilder().getConstant( - Loc, - CGF.CGM.getAddrOfRTTIDescriptor(Loc, DestRecordTy.getUnqualifiedType()) - .cast()); + auto runtimeFuncOp = getItaniumDynamicCastFn(CGF); + auto badCastFuncOp = getBadCastFn(CGF); + auto runtimeFuncRef = mlir::FlatSymbolRefAttr::get(runtimeFuncOp); + auto badCastFuncRef = mlir::FlatSymbolRefAttr::get(badCastFuncOp); - // Compute the offset hint. const CXXRecordDecl *srcDecl = SrcRecordTy->getAsCXXRecordDecl(); const CXXRecordDecl *destDecl = DestRecordTy->getAsCXXRecordDecl(); - mlir::Value offsetHint = CGF.getBuilder().getConstAPInt( - Loc, ptrdiffTy, - llvm::APSInt::get(computeOffsetHint(CGF.getContext(), srcDecl, destDecl) - .getQuantity())); - - // Emit the call to __dynamic_cast. - mlir::Value srcPtr = - CGF.getBuilder().createBitcast(Value.getPointer(), CGF.VoidPtrTy); - mlir::Value args[4] = {srcPtr, srcRtti, destRtti, offsetHint}; - mlir::Value castedPtr = - CGF.buildRuntimeCall(Loc, getItaniumDynamicCastFn(CGF), args); - - assert(castedPtr.getType().isa() && - "the return value of __dynamic_cast should be a ptr"); - - /// C++ [expr.dynamic.cast]p9: - /// A failed cast to reference type throws std::bad_cast - if (DestTy->isReferenceType()) { - // Emit a cir.if that checks the casted value. - mlir::Value castedValueIsNull = CGF.getBuilder().createPtrIsNull(castedPtr); - CGF.getBuilder().create( - Loc, castedValueIsNull, false, [&](mlir::OpBuilder &, mlir::Location) { - buildBadCastCall(CGF, Loc); - // TODO(cir): remove this once buildBadCastCall inserts unreachable - CGF.getBuilder().createYield(Loc); - }); - } + auto offsetHint = computeOffsetHint(CGF.getContext(), srcDecl, destDecl); + + mlir::Type ptrdiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); + auto offsetHintAttr = + mlir::cir::IntAttr::get(ptrdiffTy, offsetHint.getQuantity()); - // Note that castedPtr is a void*. Cast it to a pointer to the destination - // type before return. - mlir::Type destCIRTy = CGF.ConvertType(DestTy); - return CGF.getBuilder().createBitcast(castedPtr, destCIRTy); + return mlir::cir::DynamicCastInfoAttr::get(srcRtti, destRtti, runtimeFuncRef, + badCastFuncRef, offsetHintAttr); } mlir::Value CIRGenItaniumCXXABI::buildDynamicCastToVoid(CIRGenFunction &CGF, diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 4d539a5fc139..68cb1837966a 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -449,6 +449,54 @@ DataMemberAttr::verify(function_ref emitError, return success(); } +//===----------------------------------------------------------------------===// +// DynamicCastInfoAtttr definitions +//===----------------------------------------------------------------------===// + +std::string DynamicCastInfoAttr::getAlias() const { + // The alias looks like: `dyn_cast_info__` + + std::string alias = "dyn_cast_info_"; + + alias.append(getSrcRtti().getSymbol().getValue()); + alias.push_back('_'); + alias.append(getDestRtti().getSymbol().getValue()); + + return alias; +} + +LogicalResult DynamicCastInfoAttr::verify( + function_ref emitError, + mlir::cir::GlobalViewAttr srcRtti, mlir::cir::GlobalViewAttr destRtti, + mlir::FlatSymbolRefAttr runtimeFunc, mlir::FlatSymbolRefAttr badCastFunc, + mlir::cir::IntAttr offsetHint) { + auto isRttiPtr = [](mlir::Type ty) { + // RTTI pointers are !cir.ptr. + + auto ptrTy = ty.dyn_cast(); + if (!ptrTy) + return false; + + auto pointeeIntTy = ptrTy.getPointee().dyn_cast(); + if (!pointeeIntTy) + return false; + + return pointeeIntTy.isUnsigned() && pointeeIntTy.getWidth() == 8; + }; + + if (!isRttiPtr(srcRtti.getType())) { + emitError() << "srcRtti must be an RTTI pointer"; + return failure(); + } + + if (!isRttiPtr(destRtti.getType())) { + emitError() << "destRtti must be an RTTI pointer"; + return failure(); + } + + return success(); +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 5f1c2b0d24df..2729fae7fbb6 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -99,6 +99,11 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << cmpThreeWayInfoAttr.getAlias(); return AliasResult::FinalAlias; } + if (auto dynCastInfoAttr = + attr.dyn_cast()) { + os << dynCastInfoAttr.getAlias(); + return AliasResult::FinalAlias; + } return AliasResult::NoAlias; } diff --git a/clang/lib/CIR/Dialect/IR/MissingFeatures.h b/clang/lib/CIR/Dialect/IR/MissingFeatures.h index d8271533cd98..e21fc0e0b191 100644 --- a/clang/lib/CIR/Dialect/IR/MissingFeatures.h +++ b/clang/lib/CIR/Dialect/IR/MissingFeatures.h @@ -20,6 +20,13 @@ namespace cir { struct MissingFeatures { // C++ ABI support static bool cxxABI() { return false; } + static bool setCallingConv() { return false; } + + // Address space related + static bool addressSpace() { return false; } + + // Sanitizers + static bool buildTypeCheck() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 2a8a4dfeae92..8bd6a06b7c4e 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -1,6 +1,7 @@ add_clang_library(MLIRCIRTransforms LifetimeCheck.cpp LoweringPrepare.cpp + LoweringPrepareItaniumCXXABI.cpp MergeCleanups.cpp DropAST.cpp IdiomRecognizer.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index e59e5eb8a7fe..00ec54867c9f 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "LoweringPrepareCXXABI.h" #include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinAttributes.h" @@ -25,6 +26,8 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Path.h" +#include + using cir::CIRBaseBuilderTy; using namespace mlir; using namespace mlir::cir; @@ -68,6 +71,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { void runOnOp(Operation *op); void lowerThreeWayCmpOp(CmpThreeWayOp op); void lowerGlobalOp(GlobalOp op); + void lowerDynamicCastOp(DynamicCastOp op); void lowerStdFindOp(StdFindOp op); void lowerIterBeginOp(IterBeginOp op); void lowerIterEndOp(IterEndOp op); @@ -100,7 +104,23 @@ struct LoweringPreparePass : public LoweringPrepareBase { /// ----------- clang::ASTContext *astCtx; - void setASTContext(clang::ASTContext *c) { astCtx = c; } + std::shared_ptr<::cir::LoweringPrepareCXXABI> cxxABI; + + void setASTContext(clang::ASTContext *c) { + astCtx = c; + switch (c->getCXXABIKind()) { + case clang::TargetCXXABI::GenericItanium: + case clang::TargetCXXABI::GenericAArch64: + case clang::TargetCXXABI::AppleARM64: + // TODO: this isn't quite right, clang uses AppleARM64CXXABI which + // inherits from ARMCXXABI. We'll have to follow suit. + cxxABI.reset(::cir::LoweringPrepareCXXABI::createItaniumABI()); + break; + + default: + llvm_unreachable("NYI"); + } + } /// Tracks current module. ModuleOp theModule; @@ -377,6 +397,15 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { builder.create(f.getLoc()); } +void LoweringPreparePass::lowerDynamicCastOp(DynamicCastOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op); + + auto loweredValue = cxxABI->lowerDynamicCast(builder, op); + op.replaceAllUsesWith(loweredValue); + op.erase(); +} + static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, mlir::Operation *op, mlir::Type eltTy, mlir::Value arrayAddr, @@ -507,6 +536,8 @@ void LoweringPreparePass::runOnOp(Operation *op) { lowerThreeWayCmpOp(threeWayCmp); } else if (auto getGlobal = dyn_cast(op)) { lowerGlobalOp(getGlobal); + } else if (auto dynamicCast = dyn_cast(op)) { + lowerDynamicCastOp(dynamicCast); } else if (auto stdFind = dyn_cast(op)) { lowerStdFindOp(stdFind); } else if (auto iterBegin = dyn_cast(op)) { @@ -535,8 +566,8 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { - if (isa(op)) + if (isa(op)) opsToTransform.push_back(op); }); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h new file mode 100644 index 000000000000..2a094bad8702 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h @@ -0,0 +1,36 @@ +//====- LoweringPrepareCXXABI.h -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides the LoweringPrepareCXXABI class, which is the base class +// for ABI specific functionalities that are required during LLVM lowering +// prepare. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_LOWERING_PREPARE_CXX_ABI_H +#define LLVM_CLANG_LIB_CIR_LOWERING_PREPARE_CXX_ABI_H + +#include "mlir/IR/Value.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +namespace cir { + +class LoweringPrepareCXXABI { +public: + static LoweringPrepareCXXABI *createItaniumABI(); + + virtual mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, + mlir::cir::DynamicCastOp op) = 0; + + virtual ~LoweringPrepareCXXABI() {} +}; + +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_LOWERING_PREPARE_CXX_ABI_H diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp new file mode 100644 index 000000000000..3619648056cc --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp @@ -0,0 +1,117 @@ +//====- LoweringPrepareItaniumCXXABI.h - Itanium ABI specific code --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides Itanium C++ ABI specific code that is used during LLVMIR +// lowering prepare. +// +//===----------------------------------------------------------------------===// + +#include "../IR/MissingFeatures.h" +#include "LoweringPrepareCXXABI.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Value.h" +#include "mlir/IR/ValueRange.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +using namespace cir; + +namespace { + +class LoweringPrepareItaniumCXXABI : public LoweringPrepareCXXABI { +public: + mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, + mlir::cir::DynamicCastOp op) override; +}; + +} // namespace + +LoweringPrepareCXXABI *LoweringPrepareCXXABI::createItaniumABI() { + return new LoweringPrepareItaniumCXXABI(); +} + +static void buildBadCastCall(CIRBaseBuilderTy &builder, mlir::Location loc, + mlir::FlatSymbolRefAttr badCastFuncRef) { + // TODO(cir): set the calling convention to __cxa_bad_cast. + assert(!MissingFeatures::setCallingConv()); + + builder.create(loc, badCastFuncRef, mlir::ValueRange{}); + builder.create(loc); + builder.clearInsertionPoint(); +} + +static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, + mlir::cir::DynamicCastOp op) { + auto loc = op->getLoc(); + auto srcValue = op.getSrc(); + auto castInfo = op.getInfo().cast(); + + // TODO(cir): consider address space + assert(!MissingFeatures::addressSpace()); + + auto srcPtr = builder.createBitcast(srcValue, builder.getVoidPtrTy()); + auto srcRtti = builder.getConstant(loc, castInfo.getSrcRtti()); + auto destRtti = builder.getConstant(loc, castInfo.getDestRtti()); + auto offsetHint = builder.getConstant(loc, castInfo.getOffsetHint()); + + auto dynCastFuncRef = castInfo.getRuntimeFunc(); + mlir::Value dynCastFuncArgs[4] = {srcPtr, srcRtti, destRtti, offsetHint}; + + // TODO(cir): set the calling convention for __dynamic_cast. + assert(!MissingFeatures::setCallingConv()); + mlir::Value castedPtr = + builder + .create(loc, dynCastFuncRef, + builder.getVoidPtrTy(), dynCastFuncArgs) + .getResult(0); + + assert(castedPtr.getType().isa() && + "the return value of __dynamic_cast should be a ptr"); + + /// C++ [expr.dynamic.cast]p9: + /// A failed cast to reference type throws std::bad_cast + if (op.isRefcast()) { + // Emit a cir.if that checks the casted value. + mlir::Value castedValueIsNull = builder.createPtrIsNull(castedPtr); + builder.create( + loc, castedValueIsNull, false, [&](mlir::OpBuilder &, mlir::Location) { + buildBadCastCall(builder, loc, castInfo.getBadCastFunc()); + }); + } + + // Note that castedPtr is a void*. Cast it to a pointer to the destination + // type before return. + return builder.createBitcast(castedPtr, op.getType()); +} + +mlir::Value +LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, + mlir::cir::DynamicCastOp op) { + auto loc = op->getLoc(); + auto srcValue = op.getSrc(); + + assert(!MissingFeatures::buildTypeCheck()); + + if (op.isRefcast()) + return buildDynamicCastAfterNullCheck(builder, op); + + auto srcValueIsNull = builder.createPtrToBoolCast(srcValue); + return builder + .create( + loc, srcValueIsNull, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield( + loc, builder.getNullPtr(op.getType(), loc).getResult()); + }, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield(loc, + buildDynamicCastAfterNullCheck(builder, op)); + }) + .getResult(); +} diff --git a/clang/test/CIR/CodeGen/dynamic-cast.cpp b/clang/test/CIR/CodeGen/dynamic-cast.cpp index f9648ff72f08..ea31b4460c12 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast.cpp @@ -1,75 +1,81 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER struct Base { virtual ~Base(); }; -// CHECK: !ty_22Base22 = !cir.struct struct Derived : Base {}; -// CHECK: !ty_22Derived22 = !cir.struct -// CHECK: cir.func private @__dynamic_cast(!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr +// BEFORE: #dyn_cast_info__ZTI4Base__ZTI7Derived = #cir.dyn_cast_info<#cir.global_view<@_ZTI4Base> : !cir.ptr, #cir.global_view<@_ZTI7Derived> : !cir.ptr, @__dynamic_cast, @__cxa_bad_cast, #cir.int<0> : !s64i> +// BEFORE: !ty_22Base22 = !cir.struct +// BEFORE: !ty_22Derived22 = !cir.struct Derived *ptr_cast(Base *b) { return dynamic_cast(b); } -// CHECK: cir.func @_Z8ptr_castP4Base -// CHECK: %[[#V1:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr -// CHECK-NEXT: %[[#V2:]] = cir.cast(ptr_to_bool, %[[#V1]] : !cir.ptr), !cir.bool -// CHECK-NEXT: %[[#V3:]] = cir.unary(not, %[[#V2]]) : !cir.bool, !cir.bool -// CHECK-NEXT: %{{.+}} = cir.ternary(%[[#V3]], true { -// CHECK-NEXT: %[[#V4:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr -// CHECK-NEXT: cir.yield %[[#V4]] : !cir.ptr -// CHECK-NEXT: }, false { -// CHECK-NEXT: %[[#V5:]] = cir.const(#cir.global_view<@_ZTI4Base> : !cir.ptr) : !cir.ptr -// CHECK-NEXT: %[[#V6:]] = cir.const(#cir.global_view<@_ZTI7Derived> : !cir.ptr) : !cir.ptr -// CHECK-NEXT: %[[#V7:]] = cir.const(#cir.int<0> : !s64i) : !s64i -// CHECK-NEXT: %[[#V8:]] = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr -// CHECK-NEXT: %[[#V9:]] = cir.call @__dynamic_cast(%[[#V8]], %[[#V5]], %[[#V6]], %[[#V7]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[#V10:]] = cir.cast(bitcast, %[[#V9]] : !cir.ptr), !cir.ptr -// CHECK-NEXT: cir.yield %[[#V10]] : !cir.ptr -// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr -// CHECK: cir.func private @__cxa_bad_cast() +// BEFORE: cir.func @_Z8ptr_castP4Base +// BEFORE: %{{.+}} = cir.dyn_cast(ptr, %{{.+}} : !cir.ptr, #dyn_cast_info__ZTI4Base__ZTI7Derived) -> !cir.ptr +// BEFORE: } + +// AFTER: cir.func @_Z8ptr_castP4Base +// AFTER: %[[#SRC_IS_NULL:]] = cir.cast(ptr_to_bool, %{{.+}} : !cir.ptr), !cir.bool +// AFTER-NEXT: %{{.+}} = cir.ternary(%[[#SRC_IS_NULL]], true { +// AFTER-NEXT: %[[#NULL:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// AFTER-NEXT: cir.yield %[[#NULL]] : !cir.ptr +// AFTER-NEXT: }, false { +// AFTER-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr +// AFTER-NEXT: %[[#SRC_RTTI:]] = cir.const(#cir.global_view<@_ZTI4Base> : !cir.ptr) : !cir.ptr +// AFTER-NEXT: %[[#DEST_RTTI:]] = cir.const(#cir.global_view<@_ZTI7Derived> : !cir.ptr) : !cir.ptr +// AFTER-NEXT: %[[#OFFSET_HINT:]] = cir.const(#cir.int<0> : !s64i) : !s64i +// AFTER-NEXT: %[[#CASTED_PTR:]] = cir.call @__dynamic_cast(%[[#SRC_VOID_PTR]], %[[#SRC_RTTI]], %[[#DEST_RTTI]], %[[#OFFSET_HINT]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr +// AFTER-NEXT: %[[#RESULT:]] = cir.cast(bitcast, %[[#CASTED_PTR]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.yield %[[#RESULT]] : !cir.ptr +// AFTER-NEXT: }) : (!cir.bool) -> !cir.ptr +// AFTER: } Derived &ref_cast(Base &b) { return dynamic_cast(b); } -// CHECK: cir.func @_Z8ref_castR4Base -// CHECK: %[[#V11:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr -// CHECK-NEXT: %[[#V12:]] = cir.const(#cir.global_view<@_ZTI4Base> : !cir.ptr) : !cir.ptr -// CHECK-NEXT: %[[#V13:]] = cir.const(#cir.global_view<@_ZTI7Derived> : !cir.ptr) : !cir.ptr -// CHECK-NEXT: %[[#V14:]] = cir.const(#cir.int<0> : !s64i) : !s64i -// CHECK-NEXT: %[[#V15:]] = cir.cast(bitcast, %[[#V11]] : !cir.ptr), !cir.ptr -// CHECK-NEXT: %[[#V16:]] = cir.call @__dynamic_cast(%[[#V15]], %[[#V12]], %[[#V13]], %[[#V14]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr -// CHECK-NEXT: %[[#V17:]] = cir.cast(ptr_to_bool, %[[#V16]] : !cir.ptr), !cir.bool -// CHECK-NEXT: %[[#V18:]] = cir.unary(not, %[[#V17]]) : !cir.bool, !cir.bool -// CHECK-NEXT: cir.if %[[#V18]] { -// CHECK-NEXT: cir.call @__cxa_bad_cast() : () -> () -// CHECK-NEXT: cir.unreachable -// CHECK-NEXT: } -// CHECK-NEXT: %{{.+}} = cir.cast(bitcast, %[[#V16]] : !cir.ptr), !cir.ptr +// BEFORE: cir.func @_Z8ref_castR4Base +// BEFORE: %{{.+}} = cir.dyn_cast(ref, %{{.+}} : !cir.ptr, #dyn_cast_info__ZTI4Base__ZTI7Derived) -> !cir.ptr +// BEFORE: } + +// AFTER: cir.func @_Z8ref_castR4Base +// AFTER: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr +// AFTER-NEXT: %[[#SRC_RTTI:]] = cir.const(#cir.global_view<@_ZTI4Base> : !cir.ptr) : !cir.ptr +// AFTER-NEXT: %[[#DEST_RTTI:]] = cir.const(#cir.global_view<@_ZTI7Derived> : !cir.ptr) : !cir.ptr +// AFTER-NEXT: %[[#OFFSET_HINT:]] = cir.const(#cir.int<0> : !s64i) : !s64i +// AFTER-NEXT: %[[#CASTED_PTR:]] = cir.call @__dynamic_cast(%[[#SRC_VOID_PTR]], %[[#SRC_RTTI]], %[[#DEST_RTTI]], %[[#OFFSET_HINT]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr +// AFTER-NEXT: %[[#CASTED_PTR_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#CASTED_PTR]] : !cir.ptr), !cir.bool +// AFTER-NEXT: %[[#CASTED_PTR_IS_NULL:]] = cir.unary(not, %[[#CASTED_PTR_IS_NOT_NULL]]) : !cir.bool, !cir.bool +// AFTER-NEXT: cir.if %[[#CASTED_PTR_IS_NULL]] { +// AFTER-NEXT: cir.call @__cxa_bad_cast() : () -> () +// AFTER-NEXT: cir.unreachable +// AFTER-NEXT: } +// AFTER-NEXT: %{{.+}} = cir.cast(bitcast, %[[#CASTED_PTR]] : !cir.ptr), !cir.ptr +// AFTER: } void *ptr_cast_to_complete(Base *ptr) { return dynamic_cast(ptr); } -// CHECK: cir.func @_Z20ptr_cast_to_completeP4Base -// CHECK: %[[#V19:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr -// CHECK-NEXT: %[[#V20:]] = cir.cast(ptr_to_bool, %[[#V19]] : !cir.ptr), !cir.bool -// CHECK-NEXT: %[[#V21:]] = cir.unary(not, %[[#V20]]) : !cir.bool, !cir.bool -// CHECK-NEXT: %{{.+}} = cir.ternary(%[[#V21]], true { -// CHECK-NEXT: %[[#V22:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr -// CHECK-NEXT: cir.yield %[[#V22]] : !cir.ptr -// CHECK-NEXT: }, false { -// CHECK-NEXT: %[[#V23:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr> -// CHECK-NEXT: %[[#V24:]] = cir.load %[[#V23]] : cir.ptr >, !cir.ptr -// CHECK-NEXT: %[[#V25:]] = cir.vtable.address_point( %[[#V24]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : cir.ptr -// CHECK-NEXT: %[[#V26:]] = cir.load %[[#V25]] : cir.ptr , !s64i -// CHECK-NEXT: %[[#V27:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr -// CHECK-NEXT: %[[#V28:]] = cir.ptr_stride(%[[#V27]] : !cir.ptr, %[[#V26]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[#V29:]] = cir.cast(bitcast, %[[#V28]] : !cir.ptr), !cir.ptr -// CHECK-NEXT: cir.yield %[[#V29]] : !cir.ptr -// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr +// BEFORE: cir.func @_Z20ptr_cast_to_completeP4Base +// BEFORE: %[[#V19:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr +// BEFORE-NEXT: %[[#V20:]] = cir.cast(ptr_to_bool, %[[#V19]] : !cir.ptr), !cir.bool +// BEFORE-NEXT: %[[#V21:]] = cir.unary(not, %[[#V20]]) : !cir.bool, !cir.bool +// BEFORE-NEXT: %{{.+}} = cir.ternary(%[[#V21]], true { +// BEFORE-NEXT: %[[#V22:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// BEFORE-NEXT: cir.yield %[[#V22]] : !cir.ptr +// BEFORE-NEXT: }, false { +// BEFORE-NEXT: %[[#V23:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr> +// BEFORE-NEXT: %[[#V24:]] = cir.load %[[#V23]] : cir.ptr >, !cir.ptr +// BEFORE-NEXT: %[[#V25:]] = cir.vtable.address_point( %[[#V24]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : cir.ptr +// BEFORE-NEXT: %[[#V26:]] = cir.load %[[#V25]] : cir.ptr , !s64i +// BEFORE-NEXT: %[[#V27:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr +// BEFORE-NEXT: %[[#V28:]] = cir.ptr_stride(%[[#V27]] : !cir.ptr, %[[#V26]] : !s64i), !cir.ptr +// BEFORE-NEXT: %[[#V29:]] = cir.cast(bitcast, %[[#V28]] : !cir.ptr), !cir.ptr +// BEFORE-NEXT: cir.yield %[[#V29]] : !cir.ptr +// BEFORE-NEXT: }) : (!cir.bool) -> !cir.ptr diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 78956fab3980..abc24a79bc4b 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1062,3 +1062,47 @@ module { cir.return } } + +// ----- + +!s64i = !cir.int +!s8i = !cir.int +!u32i = !cir.int +!u8i = !cir.int +!void = !cir.void + +!Base = !cir.struct ()>>>}> +!Derived = !cir.struct ()>>>}>}> + +module { + cir.global "private" constant external @_ZTI4Base : !cir.ptr + cir.global "private" constant external @_ZTI7Derived : !cir.ptr + cir.func private @__dynamic_cast(!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr + cir.func private @__cxa_bad_cast() + cir.func @test(%arg0 : !cir.ptr) { + // expected-error@+1 {{srcRtti must be an RTTI pointer}} + %0 = cir.dyn_cast(ptr, %arg0 : !cir.ptr, #cir.dyn_cast_info<#cir.global_view<@_ZTI4Base> : !cir.ptr, #cir.global_view<@_ZTI7Derived> : !cir.ptr, @__dynamic_cast, @__cxa_bad_cast, #cir.int<0> : !s64i>) -> !cir.ptr + } +} + +// ----- + +!s64i = !cir.int +!s8i = !cir.int +!u32i = !cir.int +!u8i = !cir.int +!void = !cir.void + +!Base = !cir.struct ()>>>}> +!Derived = !cir.struct ()>>>}>}> + +module { + cir.global "private" constant external @_ZTI4Base : !cir.ptr + cir.global "private" constant external @_ZTI7Derived : !cir.ptr + cir.func private @__dynamic_cast(!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr + cir.func private @__cxa_bad_cast() + cir.func @test(%arg0 : !cir.ptr) { + // expected-error@+1 {{destRtti must be an RTTI pointer}} + %0 = cir.dyn_cast(ptr, %arg0 : !cir.ptr, #cir.dyn_cast_info<#cir.global_view<@_ZTI4Base> : !cir.ptr, #cir.global_view<@_ZTI7Derived> : !cir.ptr, @__dynamic_cast, @__cxa_bad_cast, #cir.int<0> : !s64i>) -> !cir.ptr + } +} From df77dbd38174ef0c5247f50e11859ab32516a0eb Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 19 Apr 2024 07:18:28 +0300 Subject: [PATCH 1508/2301] [CIR][Codegen] TernaryOp flattening (#550) This PR adds flattening for the `cir.ternary`. This PR is just a copy pasta from the lowering + tests added/fixed. Given the complexity of `switch` flattening, I decided to open one more PR for flattening. --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 57 +++++++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 74 +++---------------- clang/test/CIR/CodeGen/ternary.cir | 47 ++++++++++++ .../CIR/Lowering/{tenary.cir => ternary.cir} | 20 +++-- 4 files changed, 120 insertions(+), 78 deletions(-) create mode 100644 clang/test/CIR/CodeGen/ternary.cir rename clang/test/CIR/Lowering/{tenary.cir => ternary.cir} (75%) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index b9c9481805d7..24385a223f32 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -244,10 +244,59 @@ class CIRLoopOpInterfaceFlattening } }; +class CIRTernaryOpFlattening + : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::TernaryOp op, + mlir::PatternRewriter &rewriter) const override { + auto loc = op->getLoc(); + auto *condBlock = rewriter.getInsertionBlock(); + auto opPosition = rewriter.getInsertionPoint(); + auto *remainingOpsBlock = rewriter.splitBlock(condBlock, opPosition); + auto *continueBlock = rewriter.createBlock( + remainingOpsBlock, op->getResultTypes(), + SmallVector(/* result number always 1 */ 1, loc)); + rewriter.create(loc, remainingOpsBlock); + + auto &trueRegion = op.getTrueRegion(); + auto *trueBlock = &trueRegion.front(); + mlir::Operation *trueTerminator = trueRegion.back().getTerminator(); + rewriter.setInsertionPointToEnd(&trueRegion.back()); + auto trueYieldOp = dyn_cast(trueTerminator); + + rewriter.replaceOpWithNewOp( + trueYieldOp, trueYieldOp.getArgs(), continueBlock); + rewriter.inlineRegionBefore(trueRegion, continueBlock); + + auto *falseBlock = continueBlock; + auto &falseRegion = op.getFalseRegion(); + + falseBlock = &falseRegion.front(); + mlir::Operation *falseTerminator = falseRegion.back().getTerminator(); + rewriter.setInsertionPointToEnd(&falseRegion.back()); + auto falseYieldOp = dyn_cast(falseTerminator); + rewriter.replaceOpWithNewOp( + falseYieldOp, falseYieldOp.getArgs(), continueBlock); + rewriter.inlineRegionBefore(falseRegion, continueBlock); + + rewriter.setInsertionPointToEnd(condBlock); + rewriter.create(loc, op.getCond(), trueBlock, + falseBlock); + + rewriter.replaceOp(op, continueBlock->getArguments()); + + // Ok, we're done! + return mlir::success(); + } +}; + void populateFlattenCFGPatterns(RewritePatternSet &patterns) { - patterns - .add( - patterns.getContext()); + patterns.add( + patterns.getContext()); } void FlattenCFGPass::runOnOperation() { @@ -257,7 +306,7 @@ void FlattenCFGPass::runOnOperation() { // Collect operations to apply patterns. SmallVector ops; getOperation()->walk([&](Operation *op) { - if (isa(op)) + if (isa(op)) ops.push_back(op); }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5e91c4c64228..e545d2218dba 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2016,58 +2016,6 @@ class CIRShiftOpLowering } }; -class CIRTernaryOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(mlir::cir::TernaryOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto loc = op->getLoc(); - auto *condBlock = rewriter.getInsertionBlock(); - auto opPosition = rewriter.getInsertionPoint(); - auto *remainingOpsBlock = rewriter.splitBlock(condBlock, opPosition); - auto *continueBlock = rewriter.createBlock( - remainingOpsBlock, op->getResultTypes(), - SmallVector(/* result number always 1 */ 1, loc)); - rewriter.create(loc, remainingOpsBlock); - - auto &trueRegion = op.getTrueRegion(); - auto *trueBlock = &trueRegion.front(); - mlir::Operation *trueTerminator = trueRegion.back().getTerminator(); - rewriter.setInsertionPointToEnd(&trueRegion.back()); - auto trueYieldOp = dyn_cast(trueTerminator); - - rewriter.replaceOpWithNewOp( - trueYieldOp, trueYieldOp.getArgs(), continueBlock); - rewriter.inlineRegionBefore(trueRegion, continueBlock); - - auto *falseBlock = continueBlock; - auto &falseRegion = op.getFalseRegion(); - - falseBlock = &falseRegion.front(); - mlir::Operation *falseTerminator = falseRegion.back().getTerminator(); - rewriter.setInsertionPointToEnd(&falseRegion.back()); - auto falseYieldOp = dyn_cast(falseTerminator); - rewriter.replaceOpWithNewOp( - falseYieldOp, falseYieldOp.getArgs(), continueBlock); - rewriter.inlineRegionBefore(falseRegion, continueBlock); - - rewriter.setInsertionPointToEnd(condBlock); - auto condition = adaptor.getCond(); - auto i1Condition = rewriter.create( - op.getLoc(), rewriter.getI1Type(), condition); - rewriter.create(loc, i1Condition.getResult(), - trueBlock, falseBlock); - - rewriter.replaceOp(op, continueBlock->getArguments()); - - // Ok, we're done! - return mlir::success(); - } -}; - class CIRCmpOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -2966,17 +2914,17 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, - CIRBrOpLowering, CIRTernaryOpLowering, CIRGetMemberOpLowering, - CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, - CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, - CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, - CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, - CIRVectorSplatLowering, CIRVectorTernaryLowering, - CIRVectorShuffleIntsLowering, CIRVectorShuffleVecLowering, - CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, - CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, - CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, - CIRIsConstantOpLowering>(converter, patterns.getContext()); + CIRBrOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, + CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, + CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, + CIRVectorCreateLowering, CIRVectorInsertLowering, + CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, + CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, + CIRVectorShuffleVecLowering, CIRStackSaveLowering, + CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, + CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, + CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/ternary.cir b/clang/test/CIR/CodeGen/ternary.cir new file mode 100644 index 000000000000..1589fee6f6be --- /dev/null +++ b/clang/test/CIR/CodeGen/ternary.cir @@ -0,0 +1,47 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!s32i = !cir.int + +module { + cir.func @foo(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + %2 = cir.load %0 : cir.ptr , !s32i + %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool + %5 = cir.ternary(%4, true { + %7 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.yield %7 : !s32i + }, false { + %7 = cir.const(#cir.int<5> : !s32i) : !s32i + cir.yield %7 : !s32i + }) : (!cir.bool) -> !s32i + cir.store %5, %1 : !s32i, cir.ptr + %6 = cir.load %1 : cir.ptr , !s32i + cir.return %6 : !s32i + } + +// CHECK: cir.func @foo(%arg0: !s32i) -> !s32i { +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool +// CHECK: cir.brcond %4 ^bb1, ^bb2 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: %5 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: cir.br ^bb3(%5 : !s32i) +// CHECK: ^bb2: // pred: ^bb0 +// CHECK: %6 = cir.const(#cir.int<5> : !s32i) : !s32i +// CHECK: cir.br ^bb3(%6 : !s32i) +// CHECK: ^bb3(%7: !s32i): // 2 preds: ^bb1, ^bb2 +// CHECK: cir.br ^bb4 +// CHECK: ^bb4: // pred: ^bb3 +// CHECK: cir.store %7, %1 : !s32i, cir.ptr +// CHECK: %8 = cir.load %1 : cir.ptr , !s32i +// CHECK: cir.return %8 : !s32i +// CHECK: } + +} diff --git a/clang/test/CIR/Lowering/tenary.cir b/clang/test/CIR/Lowering/ternary.cir similarity index 75% rename from clang/test/CIR/Lowering/tenary.cir rename to clang/test/CIR/Lowering/ternary.cir index 213dcc5b3ade..b80ff86c9bbc 100644 --- a/clang/test/CIR/Lowering/tenary.cir +++ b/clang/test/CIR/Lowering/ternary.cir @@ -33,19 +33,17 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { // MLIR-NEXT: %4 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: %5 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %6 = llvm.icmp "sgt" %4, %5 : i32 -// MLIR-NEXT: %7 = llvm.zext %6 : i1 to i8 -// MLIR-NEXT: %8 = llvm.trunc %7 : i8 to i1 -// MLIR-NEXT: llvm.cond_br %8, ^bb1, ^bb2 +// MLIR-NEXT: llvm.cond_br %6, ^bb1, ^bb2 // MLIR-NEXT: ^bb1: // pred: ^bb0 -// MLIR-NEXT: %9 = llvm.mlir.constant(3 : i32) : i32 -// MLIR-NEXT: llvm.br ^bb3(%9 : i32) +// MLIR-NEXT: %7 = llvm.mlir.constant(3 : i32) : i32 +// MLIR-NEXT: llvm.br ^bb3(%7 : i32) // MLIR-NEXT: ^bb2: // pred: ^bb0 -// MLIR-NEXT: %10 = llvm.mlir.constant(5 : i32) : i32 -// MLIR-NEXT: llvm.br ^bb3(%10 : i32) -// MLIR-NEXT: ^bb3(%11: i32): // 2 preds: ^bb1, ^bb2 +// MLIR-NEXT: %8 = llvm.mlir.constant(5 : i32) : i32 +// MLIR-NEXT: llvm.br ^bb3(%8 : i32) +// MLIR-NEXT: ^bb3(%9: i32): // 2 preds: ^bb1, ^bb2 // MLIR-NEXT: llvm.br ^bb4 // MLIR-NEXT: ^bb4: // pred: ^bb3 -// MLIR-NEXT: llvm.store %11, %3 : i32, !llvm.ptr -// MLIR-NEXT: %12 = llvm.load %3 : !llvm.ptr -> i32 -// MLIR-NEXT: llvm.return %12 : i32 +// MLIR-NEXT: llvm.store %9, %3 : i32, !llvm.ptr +// MLIR-NEXT: %10 = llvm.load %3 : !llvm.ptr -> i32 +// MLIR-NEXT: llvm.return %10 : i32 // MLIR-NEXT: } From bf6ab610c2bbb5ba4969ee3b2aa445a234bb29a1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 18 Apr 2024 21:29:32 -0700 Subject: [PATCH 1509/2301] [CIR][CIRGen][NFC] Update buildPointerWithAlignment with LLVM upstream codegen approach --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 20 +++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 13 +++++++++++++ 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 6698e320c007..bc87b110e255 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -212,11 +212,25 @@ static Address buildPointerWithAlignment(const Expr *E, } } + // std::addressof and variants. + if (auto *Call = dyn_cast(E)) { + switch (Call->getBuiltinCallee()) { + default: + break; + case Builtin::BIaddressof: + case Builtin::BI__addressof: + case Builtin::BI__builtin_addressof: { + llvm_unreachable("NYI"); + } + } + } + // TODO: conditional operators, comma. + // Otherwise, use the alignment of the type. - CharUnits Align = - CGF.CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo); - return Address(CGF.buildScalarExpr(E), Align); + return CGF.makeNaturalAddressForPointer( + CGF.buildScalarExpr(E), E->getType()->getPointeeType(), CharUnits(), + /*ForPointeeType=*/true, BaseInfo, IsKnownNonNull); } /// Helper method to check if the underlying ABI is AAPCS diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 1c993e872414..9ece64bc89bb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1408,6 +1408,19 @@ class CIRGenFunction : public CIRGenTypeCache { LValue MakeNaturalAlignPointeeAddrLValue(mlir::Value V, clang::QualType T); LValue MakeNaturalAlignAddrLValue(mlir::Value V, QualType T); + /// Construct an address with the natural alignment of T. If a pointer to T + /// is expected to be signed, the pointer passed to this function must have + /// been signed, and the returned Address will have the pointer authentication + /// information needed to authenticate the signed pointer. + Address makeNaturalAddressForPointer( + mlir::Value Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(), + bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr, + KnownNonNull_t IsKnownNonNull = NotKnownNonNull) { + if (Alignment.isZero()) + Alignment = CGM.getNaturalTypeAlignment(T, BaseInfo, ForPointeeType); + return Address(Ptr, convertTypeForMem(T), Alignment, IsKnownNonNull); + } + /// Load the value for 'this'. This function is only valid while generating /// code for an C++ member function. /// FIXME(cir): this should return a mlir::Value! From 62e7efc1bea0f4172f7c55e64e7e815945808526 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 19 Apr 2024 13:08:54 +0800 Subject: [PATCH 1510/2301] [CIR] Add support for long double type (#536) This PR adds support for the `long double` type in C/C++. It includes a new CIR type `!cir.long_double` to represent the `long double` type. CIRGen and LLVMIR lowering support for the new type is also added. Since the underlying floating point format used by a `long double` value is implementation-defined, the `!cir.long_double` type is parameterized to include information about the underlying floating point format. Specifically, a `long double` value may have one of the following formats: 1) IEEE-754 binary64 format (i.e. the same format used by `double`); 2) x87 80-bit floating point format; 3) IEEE-754 binary128 format; 4) PowerPC double double format. This PR invents 3 more CIR types to represent the above floating-point formats, and `!cir.long_double` is parameterized by another CIR floating-point type which represents its underlying format: - `!cir.long_double` represents the 1st variant above; - `!cir.long_double` represents the 2nd variant above; - `!cir.long_double` represents the 3rd variant above; - `!cir.long_double` represents the 4th variant above. Co-authored-by: Bruno Cardoso Lopes --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 29 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 27 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 1 + clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 3 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 61 ++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 + .../test/CIR/CodeGen/builtin-floating-point.c | 337 ++++++++++-------- clang/test/CIR/CodeGen/types.c | 6 +- clang/test/CIR/IR/invalid.cir | 7 + clang/test/CIR/Lowering/float.cir | 6 +- 11 files changed, 301 insertions(+), 183 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 5b53ab6bac6b..82a12963b425 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -150,9 +150,36 @@ def CIR_Double : CIR_FloatType<"Double", "double"> { }]; } +def CIR_FP80 : CIR_FloatType<"FP80", "f80"> { + let summary = "CIR type that represents x87 80-bit floating-point format"; + let description = [{ + Floating-point type that represents the x87 80-bit floating-point format. + }]; +} + +def CIR_LongDouble : CIR_FloatType<"LongDouble", "long_double"> { + let summary = "CIR extended-precision float type"; + let description = [{ + Floating-point type that represents the `long double` type in C/C++. + + The underlying floating-point format of a long double value depends on the + implementation. The `underlying` parameter specifies the CIR floating-point + type that corresponds to this format. For now, it can only be either + `!cir.double` or `!cir.fp80`. + }]; + + let parameters = (ins "mlir::Type":$underlying); + + let assemblyFormat = [{ + `<` $underlying `>` + }]; + + let genVerifyDecl = 1; +} + // Constraints -def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double]>; +def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_LongDouble]>; def CIR_AnyIntOrFloat: AnyTypeOf<[CIR_AnyFloat, CIR_IntType]>; //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index c1e57b0989f1..a705fc3fcd67 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -373,29 +373,18 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } bool isInt(mlir::Type i) { return i.isa(); } - mlir::Type getLongDouble80BitsTy() const { llvm_unreachable("NYI"); } - - /// Get the proper floating point type for the given semantics. - mlir::Type getFloatTyForFormat(const llvm::fltSemantics &format, - bool useNativeHalf) const { - if (&format == &llvm::APFloat::IEEEhalf()) { - llvm_unreachable("IEEEhalf float format is NYI"); - } - - if (&format == &llvm::APFloat::BFloat()) - llvm_unreachable("BFloat float format is NYI"); - if (&format == &llvm::APFloat::IEEEsingle()) - return typeCache.FloatTy; + mlir::cir::LongDoubleType + getLongDoubleTy(const llvm::fltSemantics &format) const { if (&format == &llvm::APFloat::IEEEdouble()) - return typeCache.DoubleTy; + return mlir::cir::LongDoubleType::get(getContext(), typeCache.DoubleTy); + if (&format == &llvm::APFloat::x87DoubleExtended()) + return mlir::cir::LongDoubleType::get(getContext(), typeCache.FP80Ty); if (&format == &llvm::APFloat::IEEEquad()) - llvm_unreachable("IEEEquad float format is NYI"); + llvm_unreachable("NYI"); if (&format == &llvm::APFloat::PPCDoubleDouble()) - llvm_unreachable("PPCDoubleDouble float format is NYI"); - if (&format == &llvm::APFloat::x87DoubleExtended()) - return getLongDouble80BitsTy(); + llvm_unreachable("NYI"); - llvm_unreachable("Unknown float format!"); + llvm_unreachable("unsupported long double format"); } mlir::Type getVirtualFnPtrType(bool isVarArg = false) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c09ab95d2f97..5c0866ce2457 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -135,6 +135,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, // TODO: BFloatTy FloatTy = ::mlir::cir::SingleType::get(builder.getContext()); DoubleTy = ::mlir::cir::DoubleType::get(builder.getContext()); + FP80Ty = ::mlir::cir::FP80Type::get(builder.getContext()); // TODO(cir): perhaps we should abstract long double variations into a custom // cir.long_double type. Said type would also hold the semantics for lowering. diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index d5900694c43c..abd7d9d03603 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -39,6 +39,7 @@ struct CIRGenTypeCache { // cir.long_double type. Said type would also hold the semantics for lowering. mlir::cir::SingleType FloatTy; mlir::cir::DoubleType DoubleTy; + mlir::cir::FP80Type FP80Ty; /// int mlir::Type UIntTy; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 9f2c961a74b7..cc235bb89451 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -480,8 +480,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { ResultType = CGM.DoubleTy; break; case BuiltinType::LongDouble: - ResultType = Builder.getFloatTyForFormat(Context.getFloatTypeSemantics(T), - /*useNativeHalf=*/false); + ResultType = Builder.getLongDoubleTy(Context.getFloatTypeSemantics(T)); break; case BuiltinType::Float128: case BuiltinType::Ibm128: diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index a667494b6f89..323b9ab593a7 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -694,6 +694,67 @@ DoubleType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, return (uint64_t)(getWidth() / 8); } +const llvm::fltSemantics &FP80Type::getFloatSemantics() const { + return llvm::APFloat::x87DoubleExtended(); +} + +llvm::TypeSize +FP80Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(16); +} + +uint64_t FP80Type::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return 16; +} + +uint64_t +FP80Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return 16; +} + +const llvm::fltSemantics &LongDoubleType::getFloatSemantics() const { + return getUnderlying() + .cast() + .getFloatSemantics(); +} + +llvm::TypeSize +LongDoubleType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return getUnderlying() + .cast() + .getTypeSizeInBits(dataLayout, params); +} + +uint64_t +LongDoubleType::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return getUnderlying().cast().getABIAlignment( + dataLayout, params); +} + +uint64_t LongDoubleType::getPreferredAlignment( + const ::mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return getUnderlying() + .cast() + .getPreferredAlignment(dataLayout, params); +} + +LogicalResult +LongDoubleType::verify(function_ref emitError, + mlir::Type underlying) { + if (!underlying.isa()) { + emitError() << "invalid underlying type for long double"; + return failure(); + } + + return success(); +} + //===----------------------------------------------------------------------===// // FuncType Definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e545d2218dba..abb42bd62b98 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2956,6 +2956,12 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, converter.addConversion([&](mlir::cir::DoubleType type) -> mlir::Type { return mlir::Float64Type::get(type.getContext()); }); + converter.addConversion([&](mlir::cir::FP80Type type) -> mlir::Type { + return mlir::Float80Type::get(type.getContext()); + }); + converter.addConversion([&](mlir::cir::LongDoubleType type) -> mlir::Type { + return converter.convertType(type.getUnderlying()); + }); converter.addConversion([&](mlir::cir::FuncType type) -> mlir::Type { auto result = converter.convertType(type.getReturnType()); llvm::SmallVector arguments; diff --git a/clang/test/CIR/CodeGen/builtin-floating-point.c b/clang/test/CIR/CodeGen/builtin-floating-point.c index fc9c407050ce..82099f666f45 100644 --- a/clang/test/CIR/CodeGen/builtin-floating-point.c +++ b/clang/test/CIR/CodeGen/builtin-floating-point.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -ffast-math -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple aarch64-apple-darwin-macho -ffast-math -fclangir -emit-cir %s -o - | FileCheck %s --check-prefix=AARCH64 // ceil @@ -14,15 +15,16 @@ double my_ceil(double f) { // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.double } -// long double my_ceill(long double f) { -// return __builtin_ceill(f); -// // DISABLED-CHECK: cir.func @my_ceill -// // DISABLED-CHECK: {{.+}} = cir.ceil {{.+}} : f80 -// } +long double my_ceill(long double f) { + return __builtin_ceill(f); + // CHECK: cir.func @my_ceill + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.ceil {{.+}} : !cir.long_double +} float ceilf(float); double ceil(double); -// long double ceill(long double); +long double ceill(long double); float call_ceilf(float f) { return ceilf(f); @@ -36,11 +38,12 @@ double call_ceil(double f) { // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.double } -// long double call_ceill(long double f) { -// return ceill(f); -// // DISABLED-CHECK: cir.func @call_ceill -// // DISABLED-CHECK: {{.+}} = cir.ceil {{.+}} : f80 -// } +long double call_ceill(long double f) { + return ceill(f); + // CHECK: cir.func @call_ceill + // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.ceil {{.+}} : !cir.long_double +} // cos @@ -56,15 +59,16 @@ double my_cos(double f) { // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double } -// long double my_cosl(long double f) { -// return __builtin_cosl(f); -// // DISABLED-CHECK: cir.func @my_cosl -// // DISABLED-CHECK: {{.+}} = cir.cos {{.+}} : f80 -// } +long double my_cosl(long double f) { + return __builtin_cosl(f); + // CHECK: cir.func @my_cosl + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.cos {{.+}} : !cir.long_double +} float cosf(float); double cos(double); -// long double cosl(long double); +long double cosl(long double); float call_cosf(float f) { return cosf(f); @@ -78,11 +82,12 @@ double call_cos(double f) { // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double } -// long double call_cosl(long double f) { -// return cosl(f); -// // DISABLED-CHECK: cir.func @call_cosl -// // DISABLED-CHECK: {{.+}} = cir.cos {{.+}} : f80 -// } +long double call_cosl(long double f) { + return cosl(f); + // CHECK: cir.func @call_cosl + // CHECK: {{.+}} = cir.cos {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.cos {{.+}} : !cir.long_double +} // exp @@ -98,15 +103,16 @@ double my_exp(double f) { // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double } -// long double my_expl(long double f) { -// return __builtin_expl(f); -// // DISABLED-CHECK: cir.func @my_expl -// // DISABLED-CHECK: {{.+}} = cir.exp {{.+}} : f80 -// } +long double my_expl(long double f) { + return __builtin_expl(f); + // CHECK: cir.func @my_expl + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.exp {{.+}} : !cir.long_double +} float expf(float); double exp(double); -// long double expl(long double); +long double expl(long double); float call_expf(float f) { return expf(f); @@ -120,11 +126,12 @@ double call_exp(double f) { // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double } -// long double call_expl(long double f) { -// return expl(f); -// // DISABLED-CHECK: cir.func @call_expl -// // DISABLED-CHECK: {{.+}} = cir.exp {{.+}} : f80 -// } +long double call_expl(long double f) { + return expl(f); + // CHECK: cir.func @call_expl + // CHECK: {{.+}} = cir.exp {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.exp {{.+}} : !cir.long_double +} // exp2 @@ -140,15 +147,16 @@ double my_exp2(double f) { // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double } -// long double my_exp2l(long double f) { -// return __builtin_exp2l(f); -// // DISABLED-CHECK: cir.func @my_exp2l -// // DISABLED-CHECK: {{.+}} = cir.exp2 {{.+}} : f80 -// } +long double my_exp2l(long double f) { + return __builtin_exp2l(f); + // CHECK: cir.func @my_exp2l + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.exp2 {{.+}} : !cir.long_double +} float exp2f(float); double exp2(double); -// long double exp2l(long double); +long double exp2l(long double); float call_exp2f(float f) { return exp2f(f); @@ -162,11 +170,12 @@ double call_exp2(double f) { // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double } -// long double call_exp2l(long double f) { -// return exp2l(f); -// // DISABLED-CHECK: cir.func @call_exp2l -// // DISABLED-CHECK: {{.+}} = cir.exp2 {{.+}} : f80 -// } +long double call_exp2l(long double f) { + return exp2l(f); + // CHECK: cir.func @call_exp2l + // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.exp2 {{.+}} : !cir.long_double +} // floor @@ -182,15 +191,16 @@ double my_floor(double f) { // CHECK: {{.+}} = cir.floor {{.+}} : !cir.double } -// long double my_floorl(long double f) { -// return __builtin_floorl(f); -// // DISABLED-CHECK: cir.func @my_floorl -// // DISABLED-CHECK: {{.+}} = cir.floor {{.+}} : f80 -// } +long double my_floorl(long double f) { + return __builtin_floorl(f); + // CHECK: cir.func @my_floorl + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.floor {{.+}} : !cir.long_double +} float floorf(float); double floor(double); -// long double floorl(long double); +long double floorl(long double); float call_floorf(float f) { return floorf(f); @@ -204,11 +214,12 @@ double call_floor(double f) { // CHECK: {{.+}} = cir.floor {{.+}} : !cir.double } -// long double call_floorl(long double f) { -// return floorl(f); -// // DISABLED-CHECK: cir.func @call_floorl -// // DISABLED-CHECK: {{.+}} = cir.floor {{.+}} : f80 -// } +long double call_floorl(long double f) { + return floorl(f); + // CHECK: cir.func @call_floorl + // CHECK: {{.+}} = cir.floor {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.floor {{.+}} : !cir.long_double +} // log @@ -224,15 +235,16 @@ double my_log(double f) { // CHECK: {{.+}} = cir.log {{.+}} : !cir.double } -// long double my_logl(long double f) { -// return __builtin_logl(f); -// // DISABLED-CHECK: cir.func @my_logl -// // DISABLED-CHECK: {{.+}} = cir.log {{.+}} : f80 -// } +long double my_logl(long double f) { + return __builtin_logl(f); + // CHECK: cir.func @my_logl + // CHECK: {{.+}} = cir.log {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log {{.+}} : !cir.long_double +} float logf(float); double log(double); -// long double logl(long double); +long double logl(long double); float call_logf(float f) { return logf(f); @@ -246,11 +258,12 @@ double call_log(double f) { // CHECK: {{.+}} = cir.log {{.+}} : !cir.double } -// long double call_logl(long double f) { -// return logl(f); -// // DISABLED-CHECK: cir.func @call_logl -// // DISABLED-CHECK: {{.+}} = cir.log {{.+}} : f80 -// } +long double call_logl(long double f) { + return logl(f); + // CHECK: cir.func @call_logl + // CHECK: {{.+}} = cir.log {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log {{.+}} : !cir.long_double +} // log10 @@ -266,15 +279,16 @@ double my_log10(double f) { // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double } -// long double my_log10l(long double f) { -// return __builtin_log10l(f); -// // DISABLED-CHECK: cir.func @my_log10l -// // DISABLED-CHECK: {{.+}} = cir.log10 {{.+}} : f80 -// } +long double my_log10l(long double f) { + return __builtin_log10l(f); + // CHECK: cir.func @my_log10l + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log10 {{.+}} : !cir.long_double +} float log10f(float); double log10(double); -// long double log10l(long double); +long double log10l(long double); float call_log10f(float f) { return log10f(f); @@ -288,11 +302,12 @@ double call_log10(double f) { // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double } -// long double call_log10l(long double f) { -// return log10l(f); -// // DISABLED-CHECK: cir.func @call_log10l -// // DISABLED-CHECK: {{.+}} = cir.log10 {{.+}} : f80 -// } +long double call_log10l(long double f) { + return log10l(f); + // CHECK: cir.func @call_log10l + // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log10 {{.+}} : !cir.long_double +} // log2 @@ -308,15 +323,16 @@ double my_log2(double f) { // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double } -// long double my_log2l(long double f) { -// return __builtin_log2l(f); -// // DISABLED-CHECK: cir.func @my_log2l -// // DISABLED-CHECK: {{.+}} = cir.log2 {{.+}} : f80 -// } +long double my_log2l(long double f) { + return __builtin_log2l(f); + // CHECK: cir.func @my_log2l + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log2 {{.+}} : !cir.long_double +} float log2f(float); double log2(double); -// long double log2l(long double); +long double log2l(long double); float call_log2f(float f) { return log2f(f); @@ -330,11 +346,12 @@ double call_log2(double f) { // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double } -// long double call_log2l(long double f) { -// return log2l(f); -// // DISABLED-CHECK: cir.func @call_log2l -// // DISABLED-CHECK: {{.+}} = cir.log2 {{.+}} : f80 -// } +long double call_log2l(long double f) { + return log2l(f); + // CHECK: cir.func @call_log2l + // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.log2 {{.+}} : !cir.long_double +} // nearbyint @@ -350,15 +367,16 @@ double my_nearbyint(double f) { // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.double } -// long double my_nearbyintl(long double f) { -// return __builtin_nearbyintl(f); -// // DISABLED-CHECK: cir.func @my_nearbyintl -// // DISABLED-CHECK: {{.+}} = cir.nearbyint {{.+}} : f80 -// } +long double my_nearbyintl(long double f) { + return __builtin_nearbyintl(f); + // CHECK: cir.func @my_nearbyintl + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double +} float nearbyintf(float); double nearbyint(double); -// long double nearbyintl(long double); +long double nearbyintl(long double); float call_nearbyintf(float f) { return nearbyintf(f); @@ -372,11 +390,12 @@ double call_nearbyint(double f) { // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.double } -// long double call_nearbyintl(long double f) { -// return nearbyintl(f); -// // DISABLED-CHECK: cir.func @call_nearbyintl -// // DISABLED-CHECK: {{.+}} = cir.nearbyint {{.+}} : f80 -// } +long double call_nearbyintl(long double f) { + return nearbyintl(f); + // CHECK: cir.func @call_nearbyintl + // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double +} // rint @@ -392,15 +411,16 @@ double my_rint(double f) { // CHECK: {{.+}} = cir.rint {{.+}} : !cir.double } -// long double my_rintl(long double f) { -// return __builtin_rintl(f); -// // DISABLED-CHECK: cir.func @my_rintl -// // DISABLED-CHECK: {{.+}} = cir.rint {{.+}} : f80 -// } +long double my_rintl(long double f) { + return __builtin_rintl(f); + // CHECK: cir.func @my_rintl + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.rint {{.+}} : !cir.long_double +} float rintf(float); double rint(double); -// long double rintl(long double); +long double rintl(long double); float call_rintf(float f) { return rintf(f); @@ -414,11 +434,12 @@ double call_rint(double f) { // CHECK: {{.+}} = cir.rint {{.+}} : !cir.double } -// long double call_rintl(long double f) { -// return rintl(f); -// // DISABLED-CHECK: cir.func @call_rintl -// // DISABLED-CHECK: {{.+}} = cir.rint {{.+}} : f80 -// } +long double call_rintl(long double f) { + return rintl(f); + // CHECK: cir.func @call_rintl + // CHECK: {{.+}} = cir.rint {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.rint {{.+}} : !cir.long_double +} // round @@ -434,15 +455,16 @@ double my_round(double f) { // CHECK: {{.+}} = cir.round {{.+}} : !cir.double } -// long double my_roundl(long double f) { -// return __builtin_roundl(f); -// // DISABLED-CHECK: cir.func @my_roundl -// // DISABLED-CHECK: {{.+}} = cir.round {{.+}} : f80 -// } +long double my_roundl(long double f) { + return __builtin_roundl(f); + // CHECK: cir.func @my_roundl + // CHECK: {{.+}} = cir.round {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.round {{.+}} : !cir.long_double +} float roundf(float); double round(double); -// long double roundl(long double); +long double roundl(long double); float call_roundf(float f) { return roundf(f); @@ -456,11 +478,12 @@ double call_round(double f) { // CHECK: {{.+}} = cir.round {{.+}} : !cir.double } -// long double call_roundl(long double f) { -// return roundl(f); -// // DISABLED-CHECK: cir.func @call_roundl -// // DISABLED-CHECK: {{.+}} = cir.round {{.+}} : f80 -// } +long double call_roundl(long double f) { + return roundl(f); + // CHECK: cir.func @call_roundl + // CHECK: {{.+}} = cir.round {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.round {{.+}} : !cir.long_double +} // sin @@ -476,15 +499,16 @@ double my_sin(double f) { // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double } -// long double my_sinl(long double f) { -// return __builtin_sinl(f); -// // DISABLED-CHECK: cir.func @my_sinl -// // DISABLED-CHECK: {{.+}} = cir.sin {{.+}} : f80 -// } +long double my_sinl(long double f) { + return __builtin_sinl(f); + // CHECK: cir.func @my_sinl + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.sin {{.+}} : !cir.long_double +} float sinf(float); double sin(double); -// long double sinl(long double); +long double sinl(long double); float call_sinf(float f) { return sinf(f); @@ -498,11 +522,12 @@ double call_sin(double f) { // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double } -// long double call_sinl(long double f) { -// return sinl(f); -// // DISABLED-CHECK: cir.func @call_sinl -// // DISABLED-CHECK: {{.+}} = cir.sin {{.+}} : f80 -// } +long double call_sinl(long double f) { + return sinl(f); + // CHECK: cir.func @call_sinl + // CHECK: {{.+}} = cir.sin {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.sin {{.+}} : !cir.long_double +} // sqrt @@ -518,15 +543,16 @@ double my_sqrt(double f) { // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double } -// long double my_sqrtl(long double f) { -// return __builtin_sqrtl(f); -// // DISABLED-CHECK: cir.func @my_sqrtl -// // DISABLED-CHECK: {{.+}} = cir.sqrt {{.+}} : f80 -// } +long double my_sqrtl(long double f) { + return __builtin_sqrtl(f); + // CHECK: cir.func @my_sqrtl + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.sqrt {{.+}} : !cir.long_double +} float sqrtf(float); double sqrt(double); -// long double sqrtl(long double); +long double sqrtl(long double); float call_sqrtf(float f) { return sqrtf(f); @@ -540,11 +566,12 @@ double call_sqrt(double f) { // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double } -// long double call_sqrtl(long double f) { -// return sqrtl(f); -// // DISABLED-CHECK: cir.func @call_sqrtl -// // DISABLED-CHECK: {{.+}} = cir.sqrt {{.+}} : f80 -// } +long double call_sqrtl(long double f) { + return sqrtl(f); + // CHECK: cir.func @call_sqrtl + // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.sqrt {{.+}} : !cir.long_double +} // trunc @@ -560,15 +587,16 @@ double my_trunc(double f) { // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.double } -// long double my_truncl(long double f) { -// return __builtin_truncl(f); -// // DISABLED-CHECK: cir.func @my_truncl -// // DISABLED-CHECK: {{.+}} = cir.trunc {{.+}} : f80 -// } +long double my_truncl(long double f) { + return __builtin_truncl(f); + // CHECK: cir.func @my_truncl + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.trunc {{.+}} : !cir.long_double +} float truncf(float); double trunc(double); -// long double truncl(long double); +long double truncl(long double); float call_truncf(float f) { return truncf(f); @@ -582,8 +610,9 @@ double call_trunc(double f) { // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.double } -// long double call_truncl(long double f) { -// return truncl(f); -// // DISABLED-CHECK: cir.func @call_truncl -// // DISABLED-CHECK: {{.+}} = cir.trunc {{.+}} : f80 -// } +long double call_truncl(long double f) { + return truncl(f); + // CHECK: cir.func @call_truncl + // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.long_double + // AARCH64: {{.+}} = cir.trunc {{.+}} : !cir.long_double +} diff --git a/clang/test/CIR/CodeGen/types.c b/clang/test/CIR/CodeGen/types.c index b58b1969176e..18db058b67e5 100644 --- a/clang/test/CIR/CodeGen/types.c +++ b/clang/test/CIR/CodeGen/types.c @@ -14,7 +14,7 @@ unsigned short t5(unsigned short i) { return i; } float t6(float i) { return i; } double t7(double i) { return i; } -// long double t10(long double i) { return i; } +long double t10(long double i) { return i; } void t8(void) {} @@ -30,7 +30,7 @@ bool t9(bool b) { return b; } // CHECK: cir.func @t5(%arg0: !u16i loc({{.*}})) -> !u16i // CHECK: cir.func @t6(%arg0: !cir.float loc({{.*}})) -> !cir.float // CHECK: cir.func @t7(%arg0: !cir.double loc({{.*}})) -> !cir.double -// DISABLED-CHECK: cir.func @t10(%arg0: f80 loc({{.*}})) -> f80 +// CHECK: cir.func @t10(%arg0: !cir.long_double loc({{.*}})) -> !cir.long_double // CHECK: cir.func @t8() // CHECK-CPP: cir.func @_Z2t0i(%arg0: !s32i loc({{.*}})) -> !s32i @@ -41,6 +41,6 @@ bool t9(bool b) { return b; } // CHECK-CPP: cir.func @_Z2t5t(%arg0: !u16i loc({{.*}})) -> !u16i // CHECK-CPP: cir.func @_Z2t6f(%arg0: !cir.float loc({{.*}})) -> !cir.float // CHECK-CPP: cir.func @_Z2t7d(%arg0: !cir.double loc({{.*}})) -> !cir.double -// DISABLED-CHECK-CPP: cir.func @{{.+}}t10{{.+}}(%arg0: f80 loc({{.*}})) -> f80 +// CHECK-CPP: cir.func @{{.+}}t10{{.+}}(%arg0: !cir.long_double loc({{.*}})) -> !cir.long_double // CHECK-CPP: cir.func @_Z2t8v() // CHECK-CPP: cir.func @_Z2t9b(%arg0: !cir.bool loc({{.*}})) -> !cir.bool diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index abc24a79bc4b..cedeb2bd652d 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1065,6 +1065,13 @@ module { // ----- +// expected-error@+1 {{invalid underlying type for long double}} +cir.func @bad_long_double(%arg0 : !cir.long_double) -> () { + cir.return +} + +// ----- + !s64i = !cir.int !s8i = !cir.int !u32i = !cir.int diff --git a/clang/test/CIR/Lowering/float.cir b/clang/test/CIR/Lowering/float.cir index ea30674ff7fe..463768a35935 100644 --- a/clang/test/CIR/Lowering/float.cir +++ b/clang/test/CIR/Lowering/float.cir @@ -9,10 +9,8 @@ module { // CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f32) : f32 %2 = cir.const(#cir.fp<1.0> : !cir.double) : !cir.double // CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f64) : f64 - // %3 = cir.const(1.0 : f128) : f128 - // DISABLED-CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f128) : f128 - // %4 = cir.const(1.0 : f80) : f80 - // DISABLED-CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f80) : f80 + %3 = cir.const(#cir.fp<1.0> : !cir.long_double) : !cir.long_double + // CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f80) : f80 // %5 = cir.const(1.0 : bf16) : bf16 // DISABLED-CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : bf16) : bf16 cir.return From 90de3b34a4ed7cac8206c0f73448688f9e81ee47 Mon Sep 17 00:00:00 2001 From: orbiri Date: Fri, 19 Apr 2024 22:06:31 +0300 Subject: [PATCH 1511/2301] [CIR][CIRGen] Introduce initial support for ASTAllocaAddressSpace (#551) ASTAllocaAddressSpace is a target-specific definition specificed by the codegen target info. In this commit, initial support is introduced which asserts that only the default (no qualifier) address space is supported. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 +++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 14 ++++++++++---- clang/lib/CIR/CodeGen/TargetInfo.h | 5 +++++ .../lib/CIR/CodeGen/UnimplementedFeatureGuarding.h | 1 - 5 files changed, 19 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index bc87b110e255..e880eb6af77c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2674,7 +2674,9 @@ Address CIRGenFunction::CreateTempAlloca(mlir::Type Ty, CharUnits Align, // be different from the type defined by the language. For example, // in C++ the auto variables are in the default address space. Therefore // cast alloca to the default address space when necessary. - assert(!UnimplementedFeature::getASTAllocaAddressSpace()); + if (getASTAllocaAddressSpace() != LangAS::Default) { + llvm_unreachable("Requires address space cast which is NYI"); + } return Address(V, Ty, Align); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 5c0866ce2457..a10c3937de64 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -161,7 +161,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, AllocaInt8PtrTy = UInt8PtrTy; // TODO: GlobalsInt8PtrTy // TODO: ConstGlobalsPtrTy - // TODO: ASTAllocaAddressSpace + ASTAllocaAddressSpace = getTargetCIRGenInfo().getASTAllocaAddressSpace(); PtrDiffTy = ::mlir::cir::IntType::get( builder.getContext(), astCtx.getTargetInfo().getMaxPointerWidth(), diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index abd7d9d03603..96d3ed851e8a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_LIB_CIR_CODEGENTYPECACHE_H #define LLVM_CLANG_LIB_CIR_CODEGENTYPECACHE_H +#include "UnimplementedFeatureGuarding.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Types.h" #include "clang/AST/CharUnits.h" @@ -108,7 +109,7 @@ struct CIRGenTypeCache { // unsigned char SizeAlignInBytes; // }; - // clang::LangAS ASTAllocaAddressSpace; + clang::LangAS ASTAllocaAddressSpace; // clang::CharUnits getSizeSize() const { // return clang::CharUnits::fromQuantity(SizeSizeInBytes); @@ -123,9 +124,14 @@ struct CIRGenTypeCache { return clang::CharUnits::fromQuantity(PointerAlignInBytes); } - // clang::LangAS getASTAllocaAddressSpace() const { - // return ASTAllocaAddressSpace; - // } + clang::LangAS getASTAllocaAddressSpace() const { + // Address spaces are not yet fully supported, but the usage of the default + // alloca address space can be used for now only for comparison with the + // default address space. + assert(!UnimplementedFeature::addressSpace()); + assert(ASTAllocaAddressSpace == clang::LangAS::Default); + return ASTAllocaAddressSpace; + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index 13ed96763775..e4fee4f2c330 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -60,6 +60,11 @@ class TargetCIRGenInfo { std::vector &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const {} + /// Get the AST address space for alloca. + virtual clang::LangAS getASTAllocaAddressSpace() const { + return clang::LangAS::Default; + } + virtual ~TargetCIRGenInfo() {} }; diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 742939d4b32c..e93a564ce076 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -30,7 +30,6 @@ struct UnimplementedFeature { // Address space related static bool addressSpace() { return false; } static bool addressSpaceInGlobalVar() { return false; } - static bool getASTAllocaAddressSpace() { return false; } // Clang codegen options static bool strictVTablePointers() { return false; } From ed27229cb28206d9ac09a0eab55be0988fb600a1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 19 Apr 2024 18:16:22 -0700 Subject: [PATCH 1512/2301] [CIR] Introduce StructLayoutAttr Mostly NFC. StructType currently holds optional member variables to track layout specific information. Those are lazily computed at time of layout queries. Change the implementation to use an attribute as the internal implementation - later on we should perhaps incorporate parsing/printing. This is pre req work for computing alignment based on elements offsets, very soon we're gonna also store of array of offsets. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 36 ++++++++++++++ clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 7 +-- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 6 +++ clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 48 +++++++++---------- 4 files changed, 70 insertions(+), 27 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 14e8ac9de21e..c5c98372eab1 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -511,6 +511,42 @@ def VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> { }]; } +//===----------------------------------------------------------------------===// +// StructLayoutAttr +//===----------------------------------------------------------------------===// + +// Used to decouple layout information from the struct type. StructType's +// uses this attribute to cache that information. + +def StructLayoutAttr : CIR_Attr<"StructLayout", "struct_layout"> { + let summary = "ABI specific information about a struct layout"; + let description = [{ + }]; + + let parameters = (ins "unsigned":$size, + "unsigned":$alignment, + "bool":$padded, + "mlir::Type":$largest_member); + + let builders = [ + AttrBuilderWithInferredContext<(ins "unsigned":$size, + "unsigned":$alignment, + "bool":$padded, + "mlir::Type":$largest_member + ), [{ + return $_get(largest_member.getContext(), size, alignment, padded, + largest_member); + }]>, + ]; + + let genVerifyDecl = 1; + let assemblyFormat = [{ + `<` + struct($size, $alignment, $padded, $largest_member) + `>` + }]; +} + //===----------------------------------------------------------------------===// // DynamicCastInfoAttr //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 651790031dba..a96f3d9889ed 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -167,9 +167,10 @@ class StructType // Utilities for lazily computing and cacheing data layout info. private: - mutable Type largestMember{}; - mutable std::optional padded{}; - mutable std::optional size{}, align{}; + // FIXME: currently opaque because there's a cycle if CIRTypes.types include + // from CIRAttrs.h. The implementation operates in terms of StructLayoutAttr + // instead. + mutable mlir::Attribute layoutInfo; bool isPadded(const DataLayout &dataLayout) const; void computeSizeAndAlignment(const DataLayout &dataLayout) const; }; diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 68cb1837966a..1472fd587232 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -165,6 +165,12 @@ LogicalResult ConstStructAttr::verify( return success(); } +LogicalResult StructLayoutAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, unsigned size, + unsigned alignment, bool padded, mlir::Type largest_member) { + return success(); +} + //===----------------------------------------------------------------------===// // LangAttr definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 323b9ab593a7..8ac3d895d836 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -127,9 +127,9 @@ void BoolType::print(mlir::AsmPrinter &printer) const {} /// /// Recurses into union members never returning a union as the largest member. Type StructType::getLargestMember(const ::mlir::DataLayout &dataLayout) const { - if (!largestMember) + if (!layoutInfo) computeSizeAndAlignment(dataLayout); - return largestMember; + return layoutInfo.cast().getLargestMember(); } Type StructType::parse(mlir::AsmParser &parser) { @@ -471,17 +471,18 @@ uint64_t mlir::cir::VectorType::getPreferredAlignment( llvm::TypeSize StructType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { - if (!size) + if (!layoutInfo) computeSizeAndAlignment(dataLayout); - return llvm::TypeSize::getFixed(*size * 8); + return llvm::TypeSize::getFixed( + layoutInfo.cast().getSize() * 8); } uint64_t StructType::getABIAlignment(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { - if (!align) + if (!layoutInfo) computeSizeAndAlignment(dataLayout); - return *align; + return layoutInfo.cast().getAlignment(); } uint64_t @@ -491,24 +492,25 @@ StructType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, } bool StructType::isPadded(const ::mlir::DataLayout &dataLayout) const { - if (!padded) + if (!layoutInfo) computeSizeAndAlignment(dataLayout); - return *padded; + return layoutInfo.cast().getPadded(); } void StructType::computeSizeAndAlignment( const ::mlir::DataLayout &dataLayout) const { assert(isComplete() && "Cannot get layout of incomplete structs"); // Do not recompute. - if (size || align || padded || largestMember) + if (layoutInfo) return; // This is a similar algorithm to LLVM's StructLayout. unsigned structSize = 0; llvm::Align structAlignment{1}; - [[maybe_unused]] bool isPadded = false; + bool isPadded = false; unsigned numElements = getNumElements(); auto members = getMembers(); + mlir::Type largestMember; unsigned largestMemberSize = 0; // Loop over each of the elements, placing them in memory. @@ -550,22 +552,20 @@ void StructType::computeSizeAndAlignment( // For unions, the size and aligment is that of the largest element. if (isUnion()) { - size = largestMemberSize; - align = structAlignment.value(); - padded = false; - return; - } - - // Add padding to the end of the struct so that it could be put in an array - // and all array elements would be aligned correctly. - if (!llvm::isAligned(structAlignment, structSize)) { - isPadded = true; - structSize = llvm::alignTo(structSize, structAlignment); + structSize = largestMemberSize; + isPadded = false; + } else { + // Add padding to the end of the struct so that it could be put in an array + // and all array elements would be aligned correctly. + if (!llvm::isAligned(structAlignment, structSize)) { + isPadded = true; + structSize = llvm::alignTo(structSize, structAlignment); + } } - size = structSize; - align = structAlignment.value(); - padded = isPadded; + layoutInfo = mlir::cir::StructLayoutAttr::get(getContext(), structSize, + structAlignment.value(), + isPadded, largestMember); } //===----------------------------------------------------------------------===// From 42b44059fcde114345917f7bdcca99504e78bd92 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 19 Apr 2024 18:59:39 -0700 Subject: [PATCH 1513/2301] [CIR] Extend StructLayoutAttr to support querying offset for members Testcase not added because we are not using the printers and parsers, but upcoming atomic work will exercise the path and testcases. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 13 ++++++---- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 2 ++ clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 8 ++++++- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 24 +++++++++++++++---- 4 files changed, 36 insertions(+), 11 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index c5c98372eab1..820ffe9c0a24 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -521,28 +521,31 @@ def VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> { def StructLayoutAttr : CIR_Attr<"StructLayout", "struct_layout"> { let summary = "ABI specific information about a struct layout"; let description = [{ + Holds layout information often queried by !cir.struct users + during lowering passes and optimizations. }]; let parameters = (ins "unsigned":$size, "unsigned":$alignment, "bool":$padded, - "mlir::Type":$largest_member); + "mlir::Type":$largest_member, + "mlir::ArrayAttr":$offsets); let builders = [ AttrBuilderWithInferredContext<(ins "unsigned":$size, "unsigned":$alignment, "bool":$padded, - "mlir::Type":$largest_member - ), [{ + "mlir::Type":$largest_member, + "mlir::ArrayAttr":$offsets), [{ return $_get(largest_member.getContext(), size, alignment, padded, - largest_member); + largest_member, offsets); }]>, ]; let genVerifyDecl = 1; let assemblyFormat = [{ `<` - struct($size, $alignment, $padded, $largest_member) + struct($size, $alignment, $padded, $largest_member, $offsets) `>` }]; } diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index a96f3d9889ed..23190bf03312 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -172,6 +172,8 @@ class StructType // instead. mutable mlir::Attribute layoutInfo; bool isPadded(const DataLayout &dataLayout) const; + uint64_t getElementOffset(const DataLayout &dataLayout, unsigned idx) const; + void computeSizeAndAlignment(const DataLayout &dataLayout) const; }; diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 1472fd587232..df3032587f33 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -167,7 +167,13 @@ LogicalResult ConstStructAttr::verify( LogicalResult StructLayoutAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, unsigned size, - unsigned alignment, bool padded, mlir::Type largest_member) { + unsigned alignment, bool padded, mlir::Type largest_member, + mlir::ArrayAttr offsets) { + if (not std::all_of(offsets.begin(), offsets.end(), [](mlir::Attribute attr) { + return attr.isa(); + })) { + return emitError() << "all index values must be integers"; + } return success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 8ac3d895d836..83d1b5202d7b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -497,6 +497,16 @@ bool StructType::isPadded(const ::mlir::DataLayout &dataLayout) const { return layoutInfo.cast().getPadded(); } +uint64_t StructType::getElementOffset(const ::mlir::DataLayout &dataLayout, + unsigned idx) const { + assert(idx < getMembers().size() && "access not valid"); + if (!layoutInfo) + computeSizeAndAlignment(dataLayout); + auto offsets = layoutInfo.cast().getOffsets(); + auto intAttr = offsets[idx].cast(); + return intAttr.getInt(); +} + void StructType::computeSizeAndAlignment( const ::mlir::DataLayout &dataLayout) const { assert(isComplete() && "Cannot get layout of incomplete structs"); @@ -512,8 +522,10 @@ void StructType::computeSizeAndAlignment( auto members = getMembers(); mlir::Type largestMember; unsigned largestMemberSize = 0; + SmallVector memberOffsets; // Loop over each of the elements, placing them in memory. + memberOffsets.reserve(numElements); for (unsigned i = 0, e = numElements; i != e; ++i) { auto ty = members[i]; @@ -543,8 +555,9 @@ void StructType::computeSizeAndAlignment( // Keep track of maximum alignment constraint. structAlignment = std::max(tyAlign, structAlignment); - // FIXME: track struct size up to each element. - // getMemberOffsets()[i] = structSize; + // Struct size up to each element is the element offset. + memberOffsets.push_back(mlir::IntegerAttr::get( + mlir::IntegerType::get(getContext(), 32), structSize)); // Consume space for this data item structSize += dataLayout.getTypeSize(ty); @@ -563,9 +576,10 @@ void StructType::computeSizeAndAlignment( } } - layoutInfo = mlir::cir::StructLayoutAttr::get(getContext(), structSize, - structAlignment.value(), - isPadded, largestMember); + auto offsets = mlir::ArrayAttr::get(getContext(), memberOffsets); + layoutInfo = mlir::cir::StructLayoutAttr::get( + getContext(), structSize, structAlignment.value(), isPadded, + largestMember, offsets); } //===----------------------------------------------------------------------===// From 12cf18e14c126ebb442ac458a5dfb7d8be62b3c5 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Mon, 22 Apr 2024 21:18:20 +0300 Subject: [PATCH 1514/2301] [CIR][CodeGen] Adds SwitchOp flattening (#549) This PR adds flattening for `SwitchOp`. Despite of the previous PRs, here we have to introduce an operation for the flattening, since later we'll need to create `llvm.switch` in the lowering. So `cir.flat.switch` is a new operation, which barely copied from the dialect. I added several tests as well. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 39 ++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 95 ++++++++ .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 108 ++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 107 ++------- clang/test/CIR/CodeGen/switch.cir | 207 ++++++++++++++++++ 5 files changed, 463 insertions(+), 93 deletions(-) create mode 100644 clang/test/CIR/CodeGen/switch.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 558afa26c889..e49b51496451 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3604,6 +3604,45 @@ def IsConstantOp : CIR_Op<"is_constant", [Pure]> { }]; } + +def SwitchFlatOp : CIR_Op<"switch.flat", [AttrSizedOperandSegments, Terminator]> { + + let description = [{ + The `cir.switch.flat` operation is a region-less and simplified version of the `cir.switch`. + It's representation is closer to LLVM IR dialect than the C/C++ language feature. + }]; + + let arguments = (ins + CIR_IntType:$condition, + Variadic:$defaultOperands, + VariadicOfVariadic:$caseOperands, + ArrayAttr:$case_values, + DenseI32ArrayAttr:$case_operand_segments + ); + + let successors = (successor + AnySuccessor:$defaultDestination, + VariadicSuccessor:$caseDestinations + ); + + let assemblyFormat = [{ + $condition `:` type($condition) `,` + $defaultDestination (`(` $defaultOperands^ `:` type($defaultOperands) `)`)? + custom(ref(type($condition)), $case_values, $caseDestinations, + $caseOperands, type($caseOperands)) + attr-dict + }]; + + let builders = [ + OpBuilder<(ins "Value":$condition, + "Block *":$defaultDestination, + "ValueRange":$defaultOperands, + CArg<"ArrayRef", "{}">:$caseValues, + CArg<"BlockRange", "{}">:$caseDestinations, + CArg<"ArrayRef", "{}">:$caseOperands)> + ]; +} + //===----------------------------------------------------------------------===// // Atomic operations //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 2729fae7fbb6..213c65990905 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -17,6 +17,7 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Interfaces/CIRLoopOpInterface.h" #include "llvm/Support/ErrorHandling.h" +#include #include #include "mlir/Dialect/Func/IR/FuncOps.h" @@ -1222,6 +1223,100 @@ void SwitchOp::build( switchBuilder(builder, result.location, result); } +//===----------------------------------------------------------------------===// +// SwitchFlatOp +//===----------------------------------------------------------------------===// + +void SwitchFlatOp::build(OpBuilder &builder, OperationState &result, + Value value, Block *defaultDestination, + ValueRange defaultOperands, ArrayRef caseValues, + BlockRange caseDestinations, + ArrayRef caseOperands) { + + std::vector caseValuesAttrs; + for (auto &val : caseValues) { + caseValuesAttrs.push_back(mlir::cir::IntAttr::get(value.getType(), val)); + } + auto attrs = ArrayAttr::get(builder.getContext(), caseValuesAttrs); + + build(builder, result, value, defaultOperands, caseOperands, attrs, + defaultDestination, caseDestinations); +} + +/// ::= `[` (case (`,` case )* )? `]` +/// ::= integer `:` bb-id (`(` ssa-use-and-type-list `)`)? +static ParseResult parseSwitchFlatOpCases( + OpAsmParser &parser, Type flagType, mlir::ArrayAttr &caseValues, + SmallVectorImpl &caseDestinations, + SmallVectorImpl> &caseOperands, + SmallVectorImpl> &caseOperandTypes) { + if (failed(parser.parseLSquare())) + return failure(); + if (succeeded(parser.parseOptionalRSquare())) + return success(); + SmallVector values; + + auto parseCase = [&]() { + int64_t value = 0; + if (failed(parser.parseInteger(value))) + return failure(); + + values.push_back(IntAttr::get(flagType, value)); + + Block *destination; + SmallVector operands; + SmallVector operandTypes; + if (parser.parseColon() || parser.parseSuccessor(destination)) + return failure(); + if (!parser.parseOptionalLParen()) { + if (parser.parseOperandList(operands, OpAsmParser::Delimiter::None, + /*allowResultNumber=*/false) || + parser.parseColonTypeList(operandTypes) || parser.parseRParen()) + return failure(); + } + caseDestinations.push_back(destination); + caseOperands.emplace_back(operands); + caseOperandTypes.emplace_back(operandTypes); + return success(); + }; + if (failed(parser.parseCommaSeparatedList(parseCase))) + return failure(); + + caseValues = ArrayAttr::get(flagType.getContext(), values); + + return parser.parseRSquare(); +} + +static void printSwitchFlatOpCases(OpAsmPrinter &p, SwitchFlatOp op, + Type flagType, mlir::ArrayAttr caseValues, + SuccessorRange caseDestinations, + OperandRangeRange caseOperands, + const TypeRangeRange &caseOperandTypes) { + p << '['; + p.printNewline(); + if (!caseValues) { + p << ']'; + return; + } + + size_t index = 0; + llvm::interleave( + llvm::zip(caseValues, caseDestinations), + [&](auto i) { + p << " "; + mlir::Attribute a = std::get<0>(i); + p << a.cast().getValue(); + p << ": "; + p.printSuccessorAndUseList(std::get<1>(i), caseOperands[index++]); + }, + [&] { + p << ','; + p.printNewline(); + }); + p.printNewline(); + p << ']'; +} + //===----------------------------------------------------------------------===// // CatchOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 24385a223f32..ea1b413fc685 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -244,6 +244,105 @@ class CIRLoopOpInterfaceFlattening } }; +class CIRSwitchOpFlattening + : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + inline void rewriteYieldOp(mlir::PatternRewriter &rewriter, + mlir::cir::YieldOp yieldOp, + mlir::Block *destination) const { + rewriter.setInsertionPoint(yieldOp); + rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getOperands(), + destination); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::SwitchOp op, + mlir::PatternRewriter &rewriter) const override { + // Empty switch statement: just erase it. + if (!op.getCases().has_value() || op.getCases()->empty()) { + rewriter.eraseOp(op); + return mlir::success(); + } + + // Create exit block. + rewriter.setInsertionPointAfter(op); + auto *exitBlock = + rewriter.splitBlock(rewriter.getBlock(), rewriter.getInsertionPoint()); + + // Allocate required data structures (disconsider default case in + // vectors). + llvm::SmallVector caseValues; + llvm::SmallVector caseDestinations; + llvm::SmallVector caseOperands; + + // Initialize default case as optional. + mlir::Block *defaultDestination = exitBlock; + mlir::ValueRange defaultOperands = exitBlock->getArguments(); + + // Track fallthrough between cases. + mlir::cir::YieldOp fallthroughYieldOp = nullptr; + + // Digest the case statements values and bodies. + for (size_t i = 0; i < op.getCases()->size(); ++i) { + auto ®ion = op.getRegion(i); + auto caseAttr = op.getCases()->getValue()[i].cast(); + + // Found default case: save destination and operands. + if (caseAttr.getKind().getValue() == mlir::cir::CaseOpKind::Default) { + defaultDestination = ®ion.front(); + defaultOperands = region.getArguments(); + } else { + // AnyOf cases kind can have multiple values, hence the loop below. + for (auto &value : caseAttr.getValue()) { + caseValues.push_back(value.cast().getValue()); + caseOperands.push_back(region.getArguments()); + caseDestinations.push_back(®ion.front()); + } + } + + // Previous case is a fallthrough: branch it to this case. + if (fallthroughYieldOp) { + rewriteYieldOp(rewriter, fallthroughYieldOp, ®ion.front()); + fallthroughYieldOp = nullptr; + } + + for (auto &blk : region.getBlocks()) { + if (blk.getNumSuccessors()) + continue; + + // Handle switch-case yields. + if (auto yieldOp = dyn_cast(blk.getTerminator())) + fallthroughYieldOp = yieldOp; + } + + // Handle break statements. + walkRegionSkipping( + region, [&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, exitBlock, rewriter); + }); + + // Extract region contents before erasing the switch op. + rewriter.inlineRegionBefore(region, exitBlock); + } + + // Last case is a fallthrough: branch it to exit. + if (fallthroughYieldOp) { + rewriteYieldOp(rewriter, fallthroughYieldOp, exitBlock); + fallthroughYieldOp = nullptr; + } + + // Set switch op to branch to the newly created blocks. + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp( + op, op.getCondition(), defaultDestination, defaultOperands, caseValues, + caseDestinations, caseOperands); + + return mlir::success(); + } +}; class CIRTernaryOpFlattening : public mlir::OpRewritePattern { public: @@ -294,9 +393,10 @@ class CIRTernaryOpFlattening }; void populateFlattenCFGPatterns(RewritePatternSet &patterns) { - patterns.add( - patterns.getContext()); + patterns + .add( + patterns.getContext()); } void FlattenCFGPass::runOnOperation() { @@ -306,7 +406,7 @@ void FlattenCFGPass::runOnOperation() { // Collect operations to apply patterns. SmallVector ops; getOperation()->walk([&](Operation *op) { - if (isa(op)) + if (isa(op)) ops.push_back(op); }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index abb42bd62b98..938de6897ce9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -78,15 +78,6 @@ namespace direct { namespace { -/// Lowers operations with the terminator trait that have a single successor. -void lowerTerminator(mlir::Operation *op, mlir::Block *dest, - mlir::ConversionPatternRewriter &rewriter) { - assert(op->hasTrait() && "not a terminator"); - mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPoint(op); - rewriter.replaceOpWithNewOp(op, dest); -} - /// Walks a region while skipping operations of type `Ops`. This ensures the /// callback is not applied to said operations and its children. template @@ -1508,101 +1499,39 @@ class CIRGetGlobalOpLowering } }; -class CIRSwitchOpLowering - : public mlir::OpConversionPattern { +class CIRSwitchFlatOpLowering + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; - - inline void rewriteYieldOp(mlir::ConversionPatternRewriter &rewriter, - mlir::cir::YieldOp yieldOp, - mlir::Block *destination) const { - rewriter.setInsertionPoint(yieldOp); - rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getOperands(), - destination); - } + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::SwitchOp op, OpAdaptor adaptor, + matchAndRewrite(mlir::cir::SwitchFlatOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - // Empty switch statement: just erase it. - if (!op.getCases().has_value() || op.getCases()->empty()) { - rewriter.eraseOp(op); - return mlir::success(); - } - - // Create exit block. - rewriter.setInsertionPointAfter(op); - auto *exitBlock = - rewriter.splitBlock(rewriter.getBlock(), rewriter.getInsertionPoint()); - // Allocate required data structures (disconsider default case in - // vectors). llvm::SmallVector caseValues; - llvm::SmallVector caseDestinations; - llvm::SmallVector caseOperands; - - // Initialize default case as optional. - mlir::Block *defaultDestination = exitBlock; - mlir::ValueRange defaultOperands = exitBlock->getArguments(); - - // Track fallthrough between cases. - mlir::cir::YieldOp fallthroughYieldOp = nullptr; - - // Digest the case statements values and bodies. - for (size_t i = 0; i < op.getCases()->size(); ++i) { - auto ®ion = op.getRegion(i); - auto caseAttr = op.getCases()->getValue()[i].cast(); - - // Found default case: save destination and operands. - if (caseAttr.getKind().getValue() == mlir::cir::CaseOpKind::Default) { - defaultDestination = ®ion.front(); - defaultOperands = region.getArguments(); - } else { - // AnyOf cases kind can have multiple values, hence the loop below. - for (auto &value : caseAttr.getValue()) { - caseValues.push_back(value.cast().getValue()); - caseOperands.push_back(region.getArguments()); - caseDestinations.push_back(®ion.front()); - } - } - - // Previous case is a fallthrough: branch it to this case. - if (fallthroughYieldOp) { - rewriteYieldOp(rewriter, fallthroughYieldOp, ®ion.front()); - fallthroughYieldOp = nullptr; - } - - for (auto &blk : region.getBlocks()) { - if (blk.getNumSuccessors()) - continue; - - // Handle switch-case yields. - if (auto yieldOp = dyn_cast(blk.getTerminator())) - fallthroughYieldOp = yieldOp; + if (op.getCaseValues()) { + for (auto val : op.getCaseValues()) { + auto intAttr = dyn_cast(val); + caseValues.push_back(intAttr.getValue()); } + } - // Handle break statements. - walkRegionSkipping( - region, [&](mlir::Operation *op) { - if (isa(op)) - lowerTerminator(op, exitBlock, rewriter); - }); + llvm::SmallVector caseDestinations; + llvm::SmallVector caseOperands; - // Extract region contents before erasing the switch op. - rewriter.inlineRegionBefore(region, exitBlock); + for (auto x : op.getCaseDestinations()) { + caseDestinations.push_back(x); } - // Last case is a fallthrough: branch it to exit. - if (fallthroughYieldOp) { - rewriteYieldOp(rewriter, fallthroughYieldOp, exitBlock); - fallthroughYieldOp = nullptr; + for (auto x : op.getCaseOperands()) { + caseOperands.push_back(x); } // Set switch op to branch to the newly created blocks. rewriter.setInsertionPoint(op); rewriter.replaceOpWithNewOp( - op, adaptor.getCondition(), defaultDestination, defaultOperands, - caseValues, caseDestinations, caseOperands); + op, adaptor.getCondition(), op.getDefaultDestination(), + op.getDefaultOperands(), caseValues, caseDestinations, caseOperands); return mlir::success(); } }; @@ -2914,7 +2843,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, - CIRBrOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, + CIRBrOpLowering, CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, diff --git a/clang/test/CIR/CodeGen/switch.cir b/clang/test/CIR/CodeGen/switch.cir new file mode 100644 index 000000000000..da99dffa1fa8 --- /dev/null +++ b/clang/test/CIR/CodeGen/switch.cir @@ -0,0 +1,207 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!s8i = !cir.int +!s32i = !cir.int +!s64i = !cir.int + +module { + cir.func @shouldFlatSwitchWithDefault(%arg0: !s8i) { + cir.switch (%arg0 : !s8i) [ + case (equal, 1) { + cir.break + }, + case (default) { + cir.break + } + ] + cir.return + } +// CHECK: cir.func @shouldFlatSwitchWithDefault(%arg0: !s8i) { +// CHECK: cir.switch.flat %arg0 : !s8i, ^bb[[#DEFAULT:]] [ +// CHECK: 1: ^bb[[#CASE1:]] +// CHECK: ] +// CHECK: ^bb[[#CASE1]]: +// CHECK: cir.br ^bb3 +// CHECK: ^bb[[#DEFAULT]]: +// CHECK: cir.br ^bb[[#EXIT:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + cir.func @shouldFlatSwitchWithoutDefault(%arg0: !s32i) { + cir.switch (%arg0 : !s32i) [ + case (equal, 1) { + cir.break + } + ] + cir.return + } +// CHECK: cir.func @shouldFlatSwitchWithoutDefault(%arg0: !s32i) { +// CHECK: cir.switch.flat %arg0 : !s32i, ^bb[[#EXIT:]] [ +// CHECK: 1: ^bb[[#CASE1:]] +// CHECK: ] +// CHECK: ^bb[[#CASE1]]: +// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + + cir.func @shouldFlatSwitchWithImplicitFallthrough(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + case (anyof, [1, 2] : !s64i) { + cir.break + } + ] + cir.return + } +// CHECK: cir.func @shouldFlatSwitchWithImplicitFallthrough(%arg0: !s64i) { +// CHECK: cir.switch.flat %arg0 : !s64i, ^bb[[#EXIT:]] [ +// CHECK: 1: ^bb[[#CASE1N2:]], +// CHECK: 2: ^bb[[#CASE1N2]] +// CHECK: ] +// CHECK: ^bb[[#CASE1N2]]: +// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + + + cir.func @shouldFlatSwitchWithExplicitFallthrough(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + case (equal, 1 : !s64i) { // case 1 has its own region + cir.yield // fallthrough to case 2 + }, + case (equal, 2 : !s64i) { + cir.break + } + ] + cir.return + } +// CHECK: cir.func @shouldFlatSwitchWithExplicitFallthrough(%arg0: !s64i) { +// CHECK: cir.switch.flat %arg0 : !s64i, ^bb[[#EXIT:]] [ +// CHECK: 1: ^bb[[#CASE1:]], +// CHECK: 2: ^bb[[#CASE2:]] +// CHECK: ] +// CHECK: ^bb[[#CASE1]]: +// CHECK: cir.br ^bb[[#CASE2]] +// CHECK: ^bb[[#CASE2]]: +// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + cir.func @shouldFlatSwitchWithFallthroughToExit(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + case (equal, 1 : !s64i) { + cir.yield // fallthrough to exit + } + ] + cir.return + } +// CHECK: cir.func @shouldFlatSwitchWithFallthroughToExit(%arg0: !s64i) { +// CHECK: cir.switch.flat %arg0 : !s64i, ^bb[[#EXIT:]] [ +// CHECK: 1: ^bb[[#CASE1:]] +// CHECK: ] +// CHECK: ^bb[[#CASE1]]: +// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + cir.func @shouldDropEmptySwitch(%arg0: !s64i) { + cir.switch (%arg0 : !s64i) [ + ] + // CHECK-NOT: llvm.switch + cir.return + } +// CHECK: cir.func @shouldDropEmptySwitch(%arg0: !s64i) +// CHECK-NOT: cir.switch.flat + + + cir.func @shouldFlatMultiBlockCase(%arg0: !s32i) { + %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.scope { + %1 = cir.load %0 : cir.ptr , !s32i + cir.switch (%1 : !s32i) [ + case (equal, 3) { + cir.return + ^bb1: // no predecessors + cir.break + } + ] + } + cir.return + } + +// CHECK: cir.func @shouldFlatMultiBlockCase(%arg0: !s32i) { +// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK: cir.br ^bb1 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK: cir.switch.flat %1 : !s32i, ^bb4 [ +// CHECK: 3: ^bb2 +// CHECK: ] +// CHECK: ^bb2: // pred: ^bb1 +// CHECK: cir.return +// CHECK: ^bb3: // no predecessors +// CHECK: cir.br ^bb4 +// CHECK: ^bb4: // 2 preds: ^bb1, ^bb3 +// CHECK: cir.br ^bb5 +// CHECK: ^bb5: // pred: ^bb4 +// CHECK: cir.return +// CHECK: } + + + cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { + %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, cir.ptr + cir.store %arg1, %1 : !s32i, cir.ptr + cir.scope { + %5 = cir.load %0 : cir.ptr , !s32i + cir.switch (%5 : !s32i) [ + case (equal, 0) { + cir.scope { + %6 = cir.load %1 : cir.ptr , !s32i + %7 = cir.const(#cir.int<0> : !s32i) : !s32i + %8 = cir.cmp(ge, %6, %7) : !s32i, !s32i + %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool + cir.if %9 { + cir.break + } + } + cir.break + } + ] + } + %3 = cir.const(#cir.int<3> : !s32i) : !s32i + cir.store %3, %2 : !s32i, cir.ptr + %4 = cir.load %2 : cir.ptr , !s32i + cir.return %4 : !s32i + } +// CHECK: cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { +// CHECK: cir.switch.flat %3 : !s32i, ^bb7 [ +// CHECK: 0: ^bb2 +// CHECK: ] +// CHECK: ^bb2: // pred: ^bb1 +// CHECK: cir.br ^bb3 +// CHECK: ^bb3: // pred: ^bb2 +// CHECK: cir.brcond {{%.*}} ^bb4, ^bb5 +// CHECK: ^bb4: // pred: ^bb3 +// CHECK: cir.br ^bb7 +// CHECK: ^bb5: // pred: ^bb3 +// CHECK: cir.br ^bb6 +// CHECK: ^bb6: // pred: ^bb5 +// CHECK: cir.br ^bb7 +// CHECK: ^bb7: // 3 preds: ^bb1, ^bb4, ^bb6 +// CHECK: cir.br ^bb8 +// CHECK: ^bb8: // pred: ^bb7 +// CHECK: cir.return %9 : !s32i +// CHECK: } + +} From 246b271066b8ab346071a30cc5688844adb9ef22 Mon Sep 17 00:00:00 2001 From: GaoXiang <47854660+gxsoar@users.noreply.github.com> Date: Tue, 23 Apr 2024 02:48:34 +0800 Subject: [PATCH 1515/2301] [CIR][NFC] Improve verifier related error messages (#553) Fix `CastOp::Verify` and invalid.cir error message. Let these error messages with consistent format. #318 --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 38 +++++++++++------------ clang/test/CIR/IR/invalid.cir | 40 ++++++++++++------------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 213c65990905..0740935fb761 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -419,21 +419,21 @@ LogicalResult CastOp::verify() { if (!resType.isa()) return emitOpError() << "requires !cir.bool type for result"; if (!srcType.isa()) - return emitOpError() << "requires integral type for source"; + return emitOpError() << "requires !cir.int type for source"; return success(); } case cir::CastKind::ptr_to_bool: { if (!resType.isa()) return emitOpError() << "requires !cir.bool type for result"; if (!srcType.isa()) - return emitOpError() << "requires pointer type for source"; + return emitOpError() << "requires !cir.ptr type for source"; return success(); } case cir::CastKind::integral: { if (!resType.isa()) - return emitOpError() << "requires !IntegerType for result"; + return emitOpError() << "requires !cir.int type for result"; if (!srcType.isa()) - return emitOpError() << "requires !IntegerType for source"; + return emitOpError() << "requires !cir.int type for source"; return success(); } case cir::CastKind::array_to_ptrdecay: { @@ -465,56 +465,56 @@ LogicalResult CastOp::verify() { case cir::CastKind::floating: { if (!srcType.isa() || !resType.isa()) - return emitOpError() << "requires floating for source and result"; + return emitOpError() << "requires !cir.float type for source and result"; return success(); } case cir::CastKind::float_to_int: { if (!srcType.isa()) - return emitOpError() << "requires floating for source"; + return emitOpError() << "requires !cir.float type for source"; if (!resType.dyn_cast()) - return emitOpError() << "requires !IntegerType for result"; + return emitOpError() << "requires !cir.int type for result"; return success(); } case cir::CastKind::int_to_ptr: { if (!srcType.dyn_cast()) - return emitOpError() << "requires integer for source"; + return emitOpError() << "requires !cir.int type for source"; if (!resType.dyn_cast()) - return emitOpError() << "requires pointer for result"; + return emitOpError() << "requires !cir.ptr type for result"; return success(); } case cir::CastKind::ptr_to_int: { if (!srcType.dyn_cast()) - return emitOpError() << "requires pointer for source"; + return emitOpError() << "requires !cir.ptr type for source"; if (!resType.dyn_cast()) - return emitOpError() << "requires integer for result"; + return emitOpError() << "requires !cir.int type for result"; return success(); } case cir::CastKind::float_to_bool: { if (!srcType.isa()) - return emitOpError() << "requires float for source"; + return emitOpError() << "requires !cir.float type for source"; if (!resType.isa()) - return emitOpError() << "requires !cir.bool for result"; + return emitOpError() << "requires !cir.bool type for result"; return success(); } case cir::CastKind::bool_to_int: { if (!srcType.isa()) - return emitOpError() << "requires !cir.bool for source"; + return emitOpError() << "requires !cir.bool type for source"; if (!resType.isa()) - return emitOpError() << "requires !cir.int for result"; + return emitOpError() << "requires !cir.int type for result"; return success(); } case cir::CastKind::int_to_float: { if (!srcType.isa()) - return emitOpError() << "requires !cir.int for source"; + return emitOpError() << "requires !cir.int type for source"; if (!resType.isa()) - return emitOpError() << "requires !cir.float for result"; + return emitOpError() << "requires !cir.float type for result"; return success(); } case cir::CastKind::bool_to_float: { if (!srcType.isa()) - return emitOpError() << "requires !cir.bool for source"; + return emitOpError() << "requires !cir.bool type for source"; if (!resType.isa()) - return emitOpError() << "requires !cir.float for result"; + return emitOpError() << "requires !cir.float type for result"; return success(); } } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index cedeb2bd652d..8d680f2f3a12 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -116,7 +116,7 @@ cir.func @cast0(%arg0: !u32i) { // ----- cir.func @cast1(%arg1: !cir.float) { - %1 = cir.cast(int_to_bool, %arg1 : !cir.float), !cir.bool // expected-error {{requires integral type for source}} + %1 = cir.cast(int_to_bool, %arg1 : !cir.float), !cir.bool // expected-error {{requires !cir.int type for source}} cir.return } @@ -148,14 +148,14 @@ cir.func @cast4(%p: !cir.ptr) { // ----- cir.func @cast5(%p: !cir.float) { - %2 = cir.cast(bool_to_float, %p : !cir.float), !cir.float // expected-error {{requires !cir.bool for source}} + %2 = cir.cast(bool_to_float, %p : !cir.float), !cir.float // expected-error {{requires !cir.bool type for source}} cir.return } // ----- cir.func @cast6(%p: !cir.bool) { - %2 = cir.cast(bool_to_float, %p : !cir.bool), !cir.int // expected-error {{requires !cir.float for result}} + %2 = cir.cast(bool_to_float, %p : !cir.bool), !cir.int // expected-error {{requires !cir.float type for result}} cir.return } @@ -171,7 +171,7 @@ cir.func @cast7(%p: !cir.ptr) { !u32i = !cir.int cir.func @cast8(%p: !u32i) { - %2 = cir.cast(ptr_to_bool, %p : !u32i), !cir.bool // expected-error {{requires pointer type for source}} + %2 = cir.cast(ptr_to_bool, %p : !u32i), !cir.bool // expected-error {{requires !cir.ptr type for source}} cir.return } @@ -179,7 +179,7 @@ cir.func @cast8(%p: !u32i) { !u32i = !cir.int cir.func @cast9(%p : !u32i) { - %2 = cir.cast(integral, %p : !u32i), !cir.float // expected-error {{requires !IntegerType for result}} + %2 = cir.cast(integral, %p : !u32i), !cir.float // expected-error {{requires !cir.int type for result}} cir.return } @@ -187,7 +187,7 @@ cir.func @cast9(%p : !u32i) { !u32i = !cir.int cir.func @cast10(%p : !cir.float) { - %2 = cir.cast(integral, %p : !cir.float), !u32i // expected-error {{requires !IntegerType for source}} + %2 = cir.cast(integral, %p : !cir.float), !u32i // expected-error {{requires !cir.int type for source}} cir.return } @@ -195,7 +195,7 @@ cir.func @cast10(%p : !cir.float) { !u32i = !cir.int cir.func @cast11(%p : !cir.float) { - %2 = cir.cast(floating, %p : !cir.float), !u32i // expected-error {{requires floating for source and result}} + %2 = cir.cast(floating, %p : !cir.float), !u32i // expected-error {{requires !cir.float type for source and result}} cir.return } @@ -203,7 +203,7 @@ cir.func @cast11(%p : !cir.float) { !u32i = !cir.int cir.func @cast12(%p : !u32i) { - %2 = cir.cast(floating, %p : !u32i), !cir.float // expected-error {{requires floating for source and result}} + %2 = cir.cast(floating, %p : !u32i), !cir.float // expected-error {{requires !cir.float type for source and result}} cir.return } @@ -211,14 +211,14 @@ cir.func @cast12(%p : !u32i) { !u32i = !cir.int cir.func @cast13(%p : !u32i) { - %2 = cir.cast(float_to_int, %p : !u32i), !u32i // expected-error {{requires floating for source}} + %2 = cir.cast(float_to_int, %p : !u32i), !u32i // expected-error {{requires !cir.float type for source}} cir.return } // ----- cir.func @cast14(%p : !cir.float) { - %2 = cir.cast(float_to_int, %p : !cir.float), !cir.float // expected-error {{requires !IntegerType for result}} + %2 = cir.cast(float_to_int, %p : !cir.float), !cir.float // expected-error {{requires !cir.int type for result}} cir.return } @@ -226,7 +226,7 @@ cir.func @cast14(%p : !cir.float) { !u64i = !cir.int cir.func @cast15(%p : !cir.ptr) { - %2 = cir.cast(int_to_ptr, %p : !cir.ptr), !cir.ptr // expected-error {{requires integer for source}} + %2 = cir.cast(int_to_ptr, %p : !cir.ptr), !cir.ptr // expected-error {{requires !cir.int type for source}} cir.return } @@ -234,7 +234,7 @@ cir.func @cast15(%p : !cir.ptr) { !u64i = !cir.int cir.func @cast16(%p : !u64i) { - %2 = cir.cast(int_to_ptr, %p : !u64i), !u64i // expected-error {{requires pointer for result}} + %2 = cir.cast(int_to_ptr, %p : !u64i), !u64i // expected-error {{requires !cir.ptr type for result}} cir.return } @@ -242,7 +242,7 @@ cir.func @cast16(%p : !u64i) { !u64i = !cir.int cir.func @cast17(%p : !u64i) { - %2 = cir.cast(ptr_to_int, %p : !u64i), !u64i // expected-error {{requires pointer for source}} + %2 = cir.cast(ptr_to_int, %p : !u64i), !u64i // expected-error {{requires !cir.ptr type for source}} cir.return } @@ -250,7 +250,7 @@ cir.func @cast17(%p : !u64i) { !u64i = !cir.int cir.func @cast18(%p : !cir.ptr) { - %2 = cir.cast(ptr_to_int, %p : !cir.ptr), !cir.ptr // expected-error {{requires integer for result}} + %2 = cir.cast(ptr_to_int, %p : !cir.ptr), !cir.ptr // expected-error {{requires !cir.int type for result}} cir.return } @@ -258,7 +258,7 @@ cir.func @cast18(%p : !cir.ptr) { !u32i = !cir.int cir.func @cast19(%p : !u32i) { - %2 = cir.cast(float_to_bool, %p : !u32i), !cir.bool // expected-error {{requires float for source}} + %2 = cir.cast(float_to_bool, %p : !u32i), !cir.bool // expected-error {{requires !cir.float type for source}} cir.return } @@ -266,7 +266,7 @@ cir.func @cast19(%p : !u32i) { !u32i = !cir.int cir.func @cast20(%p : !cir.float) { - %2 = cir.cast(float_to_bool, %p : !cir.float), !u32i // expected-error {{requires !cir.bool for result}} + %2 = cir.cast(float_to_bool, %p : !cir.float), !u32i // expected-error {{requires !cir.bool type for result}} cir.return } @@ -274,21 +274,21 @@ cir.func @cast20(%p : !cir.float) { !u32i = !cir.int cir.func @cast21(%p : !u32i) { - %2 = cir.cast(bool_to_int, %p : !u32i), !u32i // expected-error {{requires !cir.bool for source}} + %2 = cir.cast(bool_to_int, %p : !u32i), !u32i // expected-error {{requires !cir.bool type for source}} cir.return } // ----- cir.func @cast22(%p : !cir.bool) { - %2 = cir.cast(bool_to_int, %p : !cir.bool), !cir.float // expected-error {{requires !cir.int for result}} + %2 = cir.cast(bool_to_int, %p : !cir.bool), !cir.float // expected-error {{requires !cir.int type for result}} cir.return } // ----- cir.func @cast23(%p : !cir.bool) { - %2 = cir.cast(int_to_float, %p : !cir.bool), !cir.float // expected-error {{requires !cir.int for source}} + %2 = cir.cast(int_to_float, %p : !cir.bool), !cir.float // expected-error {{requires !cir.int type for source}} cir.return } @@ -296,7 +296,7 @@ cir.func @cast23(%p : !cir.bool) { !u32i = !cir.int cir.func @cast24(%p : !u32i) { - %2 = cir.cast(int_to_float, %p : !u32i), !cir.bool // expected-error {{requires !cir.float for result}} + %2 = cir.cast(int_to_float, %p : !u32i), !cir.bool // expected-error {{requires !cir.float type for result}} cir.return } From 15045c94ef559e6d67975dca2db5722758ba1504 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 22 Apr 2024 12:52:49 -0700 Subject: [PATCH 1516/2301] [CIR][NFC] Fix warnings, remove redudant comments and cleanup helpers --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 49 ++++++++++--------- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 6 +-- 2 files changed, 29 insertions(+), 26 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 659819b17ab2..ff4d4575436f 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -66,11 +66,14 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return ::mlir::cir::VoidType::get(getContext()); } - mlir::cir::PointerType getVoidPtrTy(unsigned AddrSpace = 0) { - if (AddrSpace) - llvm_unreachable("address space is NYI"); - return ::mlir::cir::PointerType::get( - getContext(), ::mlir::cir::VoidType::get(getContext())); + mlir::cir::PointerType getPointerTo(mlir::Type ty, + unsigned addressSpace = 0) { + assert(!addressSpace && "address space is NYI"); + return mlir::cir::PointerType::get(getContext(), ty); + } + + mlir::cir::PointerType getVoidPtrTy(unsigned addressSpace = 0) { + return getPointerTo(::mlir::cir::VoidType::get(getContext()), addressSpace); } mlir::Value createNot(mlir::Value value) { @@ -171,31 +174,34 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { // Cast/Conversion Operators //===--------------------------------------------------------------------===// + mlir::Value createCast(mlir::Location loc, mlir::cir::CastKind kind, + mlir::Value src, mlir::Type newTy) { + if (newTy == src.getType()) + return src; + return create(loc, newTy, kind, src); + } + mlir::Value createCast(mlir::cir::CastKind kind, mlir::Value src, mlir::Type newTy) { if (newTy == src.getType()) return src; - return create(src.getLoc(), newTy, kind, src); + return createCast(src.getLoc(), kind, src, newTy); } mlir::Value createIntCast(mlir::Value src, mlir::Type newTy) { - return create(src.getLoc(), newTy, - mlir::cir::CastKind::integral, src); + return createCast(mlir::cir::CastKind::integral, src, newTy); } mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy) { - return create(src.getLoc(), newTy, - mlir::cir::CastKind::int_to_ptr, src); + return createCast(mlir::cir::CastKind::int_to_ptr, src, newTy); } mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy) { - return create(src.getLoc(), newTy, - mlir::cir::CastKind::ptr_to_int, src); + return createCast(mlir::cir::CastKind::ptr_to_int, src, newTy); } mlir::Value createPtrToBoolCast(mlir::Value v) { - return create(v.getLoc(), getBoolTy(), - mlir::cir::CastKind::ptr_to_bool, v); + return createCast(mlir::cir::CastKind::ptr_to_bool, v, getBoolTy()); } // TODO(cir): the following function was introduced to keep in sync with LLVM @@ -224,10 +230,12 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::Value createBitcast(mlir::Location loc, mlir::Value src, mlir::Type newTy) { - if (newTy == src.getType()) - return src; - return create(loc, newTy, mlir::cir::CastKind::bitcast, - src); + return createCast(loc, mlir::cir::CastKind::bitcast, src, newTy); + } + + mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy) { + assert(src.getType().isa() && "expected ptr src"); + return createBitcast(src, getPointerTo(newPointeeTy)); } mlir::Value createPtrIsNull(mlir::Value ptr) { @@ -259,11 +267,6 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { size.getQuantity()); } - mlir::cir::PointerType getPointerTo(mlir::Type ty, - unsigned addressSpace = 0) { - return mlir::cir::PointerType::get(getContext(), ty); - } - /// Create a do-while operation. mlir::cir::DoWhileOp createDoWhile( mlir::Location loc, diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index cc235bb89451..157d68435571 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -811,8 +811,8 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { return arrangeFunctionDeclaration(FD); } -// UpdateCompletedType - When we find the full definition for a TagDecl, -// replace the 'opaque' type we previously made for it if applicable. +// When we find the full definition for a TagDecl, replace the 'opaque' type we +// previously made for it if applicable. void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { // If this is an enum being completed, then we flush all non-struct types // from the cache. This allows function types and other things that may be @@ -849,7 +849,7 @@ void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { llvm_unreachable("NYI"); } -/// getCIRGenRecordLayout - Return record layout info for the given record decl. +/// Return record layout info for the given record decl. const CIRGenRecordLayout & CIRGenTypes::getCIRGenRecordLayout(const RecordDecl *RD) { const auto *Key = Context.getTagDeclType(RD).getTypePtr(); From 6e410159cb463a40fea5ed604ebd0e6127bb74a8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 22 Apr 2024 12:53:21 -0700 Subject: [PATCH 1517/2301] [CIR][CIRGen][LLVMLowering] Support atomic exchange --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 23 ++++++++ clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 3 +- clang/lib/CIR/CodeGen/Address.h | 2 +- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 15 ++++-- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 9 ++-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 13 +++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 54 +++++++++++++++---- clang/test/CIR/CodeGen/atomic-xchg-field.c | 43 +++++++++++++++ 9 files changed, 141 insertions(+), 25 deletions(-) create mode 100644 clang/test/CIR/CodeGen/atomic-xchg-field.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e49b51496451..66f30b3e9b7a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3703,6 +3703,29 @@ def AtomicFetch : CIR_Op<"atomic.fetch", let hasVerifier = 1; } +def AtomicXchg : CIR_Op<"atomic.xchg", [Pure, SameSecondOperandAndResultType]> { + let summary = "Atomic exchange"; + let description = [{ + Atomic exchange functionality mapped from different use of builtins in + C/C++. + }]; + let results = (outs CIR_AnyType:$result); + let arguments = (ins CIR_PointerType:$ptr, CIR_AnyType:$val, + Arg:$mem_order, + UnitAttr:$is_volatile); + + let assemblyFormat = [{ + `(` + $ptr `:` qualified(type($ptr)) `,` + $val `:` type($val) `,` + $mem_order `)` + (`volatile` $is_volatile^)? + `:` type($result) attr-dict + }]; + + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // Operations Lowered Directly to LLVM IR // diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 23190bf03312..f5be28277e94 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -162,6 +162,7 @@ class StructType DataLayoutEntryListRef params) const; uint64_t getPreferredAlignment(const DataLayout &dataLayout, DataLayoutEntryListRef params) const; + uint64_t getElementOffset(const DataLayout &dataLayout, unsigned idx) const; bool isLayoutIdentical(const StructType &other); @@ -172,8 +173,6 @@ class StructType // instead. mutable mlir::Attribute layoutInfo; bool isPadded(const DataLayout &dataLayout) const; - uint64_t getElementOffset(const DataLayout &dataLayout, unsigned idx) const; - void computeSizeAndAlignment(const DataLayout &dataLayout) const; }; diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 68ca69b1de31..ac3afd779919 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -64,7 +64,7 @@ class Address { /// Return address with different pointer, but same element type and /// alignment. Address withPointer(mlir::Value NewPointer, - KnownNonNull_t IsKnownNonNull) const { + KnownNonNull_t IsKnownNonNull = NotKnownNonNull) const { return Address(NewPointer, getElementType(), getAlignment(), IsKnownNonNull); } diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 4e5190216751..0ffe9ecbe3af 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -389,7 +389,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_exchange: case AtomicExpr::AO__scoped_atomic_exchange_n: case AtomicExpr::AO__scoped_atomic_exchange: - llvm_unreachable("NYI"); + Op = mlir::cir::AtomicXchg::getOperationName(); break; case AtomicExpr::AO__atomic_add_fetch: @@ -507,8 +507,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, auto LoadVal1 = builder.createLoad(loc, Val1); SmallVector atomicOperands = {Ptr.getPointer(), LoadVal1}; - SmallVector atomicResTys = { - Ptr.getPointer().getType().cast().getPointee()}; + SmallVector atomicResTys = {LoadVal1.getType()}; auto RMWI = builder.create(loc, builder.getStringAttr(Op), atomicOperands, atomicResTys, {}); @@ -517,10 +516,18 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, RMWI->setAttr("mem_order", orderAttr); if (E->isVolatile()) RMWI->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); - if (fetchFirst) + if (fetchFirst && Op == mlir::cir::AtomicFetch::getOperationName()) RMWI->setAttr("fetch_first", mlir::UnitAttr::get(builder.getContext())); auto Result = RMWI->getResult(0); + + // TODO(cir): this logic should be part of createStore, but doing so currently + // breaks CodeGen/union.cpp and CodeGen/union.cpp. + auto ptrTy = Dest.getPointer().getType().cast(); + if (Dest.getElementType() != ptrTy.getPointee()) { + Dest = Dest.withPointer( + builder.createPtrBitcast(Dest.getPointer(), Dest.getElementType())); + } builder.createStore(loc, Result, Dest); } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index a705fc3fcd67..c8c2cd4876b5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -730,9 +730,12 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Value createLoad(mlir::Location loc, Address addr) { auto ptrTy = addr.getPointer().getType().dyn_cast(); - return create( - loc, addr.getElementType(), - createElementBitCast(loc, addr, ptrTy.getPointee()).getPointer()); + if (addr.getElementType() != ptrTy.getPointee()) + addr = addr.withPointer( + createPtrBitcast(addr.getPointer(), addr.getElementType())); + + return create(loc, addr.getElementType(), + addr.getPointer()); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index e880eb6af77c..3c85ca217d37 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -79,10 +79,15 @@ static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, auto memberAddr = CGF.getBuilder().createGetMember( loc, fieldPtr, Base.getPointer(), fieldName, fieldIndex); - // TODO: We could get the alignment from the CIRGenRecordLayout, but given the - // member name based lookup of the member here we probably shouldn't be. We'll - // have to consider this later. - auto addr = Address(memberAddr, CharUnits::One()); + // Retrieve layout information, compute alignment and return the final + // address. + const RecordDecl *rec = field->getParent(); + auto &layout = CGF.CGM.getTypes().getCIRGenRecordLayout(rec); + unsigned idx = layout.getCIRFieldNo(field); + auto offset = CharUnits::fromQuantity(layout.getCIRType().getElementOffset( + CGF.CGM.getDataLayout().layout, idx)); + auto addr = + Address(memberAddr, Base.getAlignment().alignmentAtOffset(offset)); return addr; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 0740935fb761..9df58bf9eb54 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2549,7 +2549,7 @@ mlir::OpTrait::impl::verifySameSecondOperandAndResultType(Operation *op) { if (type != opType) return op->emitOpError() - << "requires the same type for first operand and result"; + << "requires the same type for second operand and result"; return success(); } @@ -2563,7 +2563,7 @@ mlir::OpTrait::impl::verifySameFirstSecondOperandAndResultType(Operation *op) { if (checkType != op->getOperand(0).getType() && checkType != op->getOperand(1).getType()) return op->emitOpError() - << "requires the same type for first operand and result"; + << "requires the same type for first, second operand and result"; return success(); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 938de6897ce9..19b6cf818236 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2196,6 +2196,41 @@ class CIRBitPopcountOpLowering } }; +class CIRAtomicXchgLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LLVM::AtomicOrdering + getLLVMAtomicOrder(mlir::cir::MemOrder memo) const { + switch (memo) { + case mlir::cir::MemOrder::Relaxed: + return mlir::LLVM::AtomicOrdering::monotonic; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + return mlir::LLVM::AtomicOrdering::acquire; + case mlir::cir::MemOrder::Release: + return mlir::LLVM::AtomicOrdering::release; + case mlir::cir::MemOrder::AcquireRelease: + return mlir::LLVM::AtomicOrdering::acq_rel; + case mlir::cir::MemOrder::SequentiallyConsistent: + return mlir::LLVM::AtomicOrdering::seq_cst; + } + llvm_unreachable("shouldn't get here"); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AtomicXchg op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // FIXME: add syncscope. + auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); + rewriter.replaceOpWithNewOp( + op, mlir::LLVM::AtomicBinOp::xchg, adaptor.getPtr(), adaptor.getVal(), + llvmOrder); + return mlir::success(); + } +}; + class CIRAtomicFetchLowering : public mlir::OpConversionPattern { public: @@ -2837,15 +2872,16 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add< CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, - CIRBitPopcountOpLowering, CIRAtomicFetchLowering, CIRByteswapOpLowering, - CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, - CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, - CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, - CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, - CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, - CIRBrOpLowering, CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, - CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, - CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, + CIRBitPopcountOpLowering, CIRAtomicXchgLowering, CIRAtomicFetchLowering, + CIRByteswapOpLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, + CIRCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, + CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, + CIRFuncLowering, CIRCastOpLowering, CIRGlobalOpLowering, + CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, + CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, + CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, + CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, + CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, diff --git a/clang/test/CIR/CodeGen/atomic-xchg-field.c b/clang/test/CIR/CodeGen/atomic-xchg-field.c new file mode 100644 index 000000000000..b5d01190c5a6 --- /dev/null +++ b/clang/test/CIR/CodeGen/atomic-xchg-field.c @@ -0,0 +1,43 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +typedef struct __Base { + unsigned long id; + unsigned int a; + unsigned int n; + unsigned char x; + unsigned short u; +} Base; + +struct w { + Base _base; + const void * ref; +}; + +typedef struct w *wPtr; + +void field_access(wPtr item) { + __atomic_exchange_n((&item->ref), (((void*)0)), 5); +} + +// CHECK: ![[W:.*]] = !cir.struct, {{.*}} {alignment = 8 : i64} +// CHECK: %[[FIELD:.*]] = cir.load %[[WADDR]] +// CHECK: %[[MEMBER:.*]] = cir.get_member %[[FIELD]][1] {name = "ref"} +// CHECK: cir.atomic.xchg(%[[MEMBER]] : !cir.ptr>, {{.*}} : !u64i, seq_cst) + +// LLVM: define void @field_access +// LLVM: = alloca ptr, i64 1, align 8 +// LLVM: %[[VAL_ADDR:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[RES_ADDR:.*]] = alloca ptr, i64 1, align 8 + +// LLVM: %[[MEMBER:.*]] = getelementptr %struct.w, ptr {{.*}}, i32 0, i32 1 +// LLVM: store ptr null, ptr %[[VAL_ADDR]], align 8 +// LLVM: %[[VAL:.*]] = load i64, ptr %[[VAL_ADDR]], align 8 +// LLVM: %[[RES:.*]] = atomicrmw xchg ptr %[[MEMBER]], i64 %[[VAL]] seq_cst, align 8 +// LLVM: store i64 %[[RES]], ptr %4, align 8 +// LLVM: load ptr, ptr %[[RES_ADDR]], align 8 +// LLVM: ret void \ No newline at end of file From 0a903729982437759617ef7937e8fdf13a00f3f5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 23 Apr 2024 11:14:16 -0700 Subject: [PATCH 1518/2301] [CIR][CIRGen] Add support for more variations of __atomic_exchange --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 3 ++- clang/test/CIR/CodeGen/atomic.cpp | 28 +++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 0ffe9ecbe3af..f3ab4a4bcb82 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -630,7 +630,8 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__atomic_exchange: case AtomicExpr::AO__scoped_atomic_exchange: - llvm_unreachable("NYI"); + Val1 = buildPointerWithAlignment(E->getVal1()); + Dest = buildPointerWithAlignment(E->getVal2()); break; case AtomicExpr::AO__atomic_compare_exchange: diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 88d1d38bc549..25377616c188 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -215,8 +215,34 @@ void fi2c(atomic_int *i) { atomic_store(i, 1); } +struct S { + double x; +}; + // CHECK-LABEL: @_Z4fi2cPU7_Atomici // CHECK: cir.store atomic(seq_cst) // LLVM-LABEL: @_Z4fi2cPU7_Atomici -// LLVM: store atomic i32 {{.*}} seq_cst, align 4 \ No newline at end of file +// LLVM: store atomic i32 {{.*}} seq_cst, align 4 + +void fd3(struct S *a, struct S *b, struct S *c) { + __atomic_exchange(a, b, c, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z3fd3P1SS0_S0_ +// CHECK: cir.atomic.xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, seq_cst) : !u64i + +// FIXME: CIR is producing an over alignment of 8, only 4 needed. +// LLVM-LABEL: @_Z3fd3P1SS0_S0_ +// LLVM: [[A_ADDR:%.*]] = alloca ptr +// LLVM-NEXT: [[B_ADDR:%.*]] = alloca ptr +// LLVM-NEXT: [[C_ADDR:%.*]] = alloca ptr +// LLVM-NEXT: store ptr {{.*}}, ptr [[A_ADDR]] +// LLVM-NEXT: store ptr {{.*}}, ptr [[B_ADDR]] +// LLVM-NEXT: store ptr {{.*}}, ptr [[C_ADDR]] +// LLVM-NEXT: [[LOAD_A_PTR:%.*]] = load ptr, ptr [[A_ADDR]] +// LLVM-NEXT: [[LOAD_B_PTR:%.*]] = load ptr, ptr [[B_ADDR]] +// LLVM-NEXT: [[LOAD_C_PTR:%.*]] = load ptr, ptr [[C_ADDR]] +// LLVM-NEXT: [[LOAD_B:%.*]] = load i64, ptr [[LOAD_B_PTR]] +// LLVM-NEXT: [[RESULT:%.*]] = atomicrmw xchg ptr [[LOAD_A_PTR]], i64 [[LOAD_B]] seq_cst +// LLVM-NEXT: store i64 [[RESULT]], ptr [[LOAD_C_PTR]] \ No newline at end of file From 25cbb13c73399c24b68d421711a4399995e425e1 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 24 Apr 2024 05:24:39 +0800 Subject: [PATCH 1519/2301] [CIR] Lower certain `cir.cmp3way` operations to LLVM intrinsics (#556) LLVM recently added two families of intrinsics named `llvm.scmp.*` and `llvm.ucmp.*` that generate potentially better code for three-way comparison operations. This PR lowers certain `cir.cmp3way` operations to these intrinsics. Not all `cir.cmp3way` operations can be lowered to these intrinsics. The qualifying conditions are: 1) the comparison is between two integers, and 2) the comparison produces a strong ordering. `cir.cmp3way` operations that are not qualified are not affected by this PR. Qualifying `cir.cmp3way` operations may still need some canonicalization work before lowering. The "canonicalized" form of a qualifying three-way comparison operation yields -1 for lt, 0 for eq, and 1 for gt. This PR converts those non-canonicalized but qualifying `cir.cmp3way` operations to their canonical forms in the LLVM lowering prepare pass. This PR addresses #514 . --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 5 ++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 12 +++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 -- .../Dialect/Transforms/LoweringPrepare.cpp | 72 +++++++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 89 +++++++++++++++++-- clang/test/CIR/CodeGen/Inputs/std-compare.h | 17 ++++ .../test/CIR/CodeGen/three-way-comparison.cpp | 58 ++++++++---- clang/test/CIR/Lowering/cmp3way.cir | 40 +++++++++ 8 files changed, 265 insertions(+), 33 deletions(-) create mode 100644 clang/test/CIR/Lowering/cmp3way.cir diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index ff4d4575436f..f5635b7ca33b 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -81,6 +81,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::cir::UnaryOpKind::Not, value); } + mlir::cir::CmpOp createCompare(mlir::Location loc, mlir::cir::CmpOpKind kind, + mlir::Value lhs, mlir::Value rhs) { + return create(loc, getBoolTy(), kind, lhs, rhs); + } + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, const llvm::APInt &rhs) { return create( diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 66f30b3e9b7a..620c2bb252de 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1328,6 +1328,18 @@ def CmpThreeWayOp : CIR_Op<"cmp3way", [Pure, SameTypeOperands]> { }]; let hasVerifier = 0; + + let extraClassDeclaration = [{ + /// Determine whether this three-way comparison produces a strong ordering. + bool isStrongOrdering() { + return getInfo().getOrdering() == mlir::cir::CmpOrdering::Strong; + } + + /// Determine whether this three-way comparison compares integral operands. + bool isIntegralComparison() { + return getLhs().getType().isa(); + } + }]; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index c8c2cd4876b5..d99790e68c38 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -594,11 +594,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc); } - mlir::cir::CmpOp createCompare(mlir::Location loc, mlir::cir::CmpOpKind kind, - mlir::Value lhs, mlir::Value rhs) { - return create(loc, getBoolTy(), kind, lhs, rhs); - } - mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, mlir::Value src, mlir::Value len) { return create(loc, dst, src, len); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 00ec54867c9f..059a5b82167f 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -265,10 +265,82 @@ FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { return f; } +static void canonicalizeIntrinsicThreeWayCmp(CIRBaseBuilderTy &builder, + CmpThreeWayOp op) { + auto loc = op->getLoc(); + auto cmpInfo = op.getInfo(); + + if (cmpInfo.getLt() == -1 && cmpInfo.getEq() == 0 && cmpInfo.getGt() == 1) { + // The comparison is already in canonicalized form. + return; + } + + auto canonicalizedCmpInfo = + mlir::cir::CmpThreeWayInfoAttr::get(builder.getContext(), -1, 0, 1); + mlir::Value result = + builder + .create(loc, op.getType(), op.getLhs(), + op.getRhs(), canonicalizedCmpInfo) + .getResult(); + + auto compareAndYield = [&](mlir::Value input, int64_t test, + int64_t yield) -> mlir::Value { + // Create a conditional branch that tests whether `input` is equal to + // `test`. If `input` is equal to `test`, yield `yield`. Otherwise, yield + // `input` as is. + auto testValue = builder.getConstant( + loc, mlir::cir::IntAttr::get(input.getType(), test)); + auto yieldValue = builder.getConstant( + loc, mlir::cir::IntAttr::get(input.getType(), yield)); + auto eqToTest = + builder.createCompare(loc, mlir::cir::CmpOpKind::eq, input, testValue); + return builder + .create( + loc, eqToTest, + [&](OpBuilder &, Location) { + builder.create(loc, + mlir::ValueRange{yieldValue}); + }, + [&](OpBuilder &, Location) { + builder.create(loc, mlir::ValueRange{input}); + }) + ->getResult(0); + }; + + if (cmpInfo.getLt() != -1) + result = compareAndYield(result, -1, cmpInfo.getLt()); + + if (cmpInfo.getEq() != 0) + result = compareAndYield(result, 0, cmpInfo.getEq()); + + if (cmpInfo.getGt() != 1) + result = compareAndYield(result, 1, cmpInfo.getGt()); + + op.replaceAllUsesWith(result); + op.erase(); +} + void LoweringPreparePass::lowerThreeWayCmpOp(CmpThreeWayOp op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op); + if (op.isIntegralComparison() && op.isStrongOrdering()) { + // For three-way comparisons on integral operands that produce strong + // ordering, we can generate potentially better code with the `llvm.scmp.*` + // and `llvm.ucmp.*` intrinsics. Thus we don't replace these comparisons + // here. They will be lowered directly to LLVMIR during the LLVM lowering + // pass. + // + // But we still need to take a step here. `llvm.scmp.*` and `llvm.ucmp.*` + // returns -1, 0, or 1 to represent lt, eq, and gt, which are the + // "canonicalized" result values of three-way comparisons. However, + // `cir.cmp3way` may not produce canonicalized result. We need to + // canonicalize the comparison if necessary. This is what we're doing in + // this special branch. + canonicalizeIntrinsicThreeWayCmp(builder, op); + return; + } + auto loc = op->getLoc(); auto cmpInfo = op.getInfo(); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 19b6cf818236..818b49fd8aee 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -57,6 +57,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/Support/Casting.h" @@ -1985,6 +1986,16 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { } }; +static mlir::LLVM::CallIntrinsicOp +createCallLLVMIntrinsicOp(mlir::ConversionPatternRewriter &rewriter, + mlir::Location loc, const llvm::Twine &intrinsicName, + mlir::Type resultTy, mlir::ValueRange operands) { + auto intrinsicNameAttr = + mlir::StringAttr::get(rewriter.getContext(), intrinsicName); + return rewriter.create( + loc, resultTy, intrinsicNameAttr, operands); +} + static mlir::Value createLLVMBitOp(mlir::Location loc, const llvm::Twine &llvmIntrinBaseName, mlir::Type resultTy, mlir::Value operand, @@ -1997,8 +2008,6 @@ static mlir::Value createLLVMBitOp(mlir::Location loc, llvmIntrinBaseName.concat(".i") .concat(std::to_string(operandIntTy.getWidth())) .str(); - auto llvmIntrinNameAttr = - mlir::StringAttr::get(rewriter.getContext(), llvmIntrinName); // Note that LLVM intrinsic calls to bit intrinsics have the same type as the // operand. @@ -2006,12 +2015,12 @@ static mlir::Value createLLVMBitOp(mlir::Location loc, if (poisonZeroInputFlag.has_value()) { auto poisonZeroInputValue = rewriter.create( loc, rewriter.getI1Type(), static_cast(*poisonZeroInputFlag)); - op = rewriter.create( - loc, operand.getType(), llvmIntrinNameAttr, - mlir::ValueRange{operand, poisonZeroInputValue}); + op = createCallLLVMIntrinsicOp(rewriter, loc, llvmIntrinName, + operand.getType(), + {operand, poisonZeroInputValue}); } else { - op = rewriter.create( - loc, operand.getType(), llvmIntrinNameAttr, operand); + op = createCallLLVMIntrinsicOp(rewriter, loc, llvmIntrinName, + operand.getType(), operand); } mlir::Value result = op->getResult(0); @@ -2866,6 +2875,68 @@ class CIRIsConstantOpLowering } }; +class CIRCmpThreeWayOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern< + mlir::cir::CmpThreeWayOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CmpThreeWayOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + if (!op.isIntegralComparison() || !op.isStrongOrdering()) { + op.emitError() << "unsupported three-way comparison type"; + return mlir::failure(); + } + + auto cmpInfo = op.getInfo(); + assert(cmpInfo.getLt() == -1 && cmpInfo.getEq() == 0 && + cmpInfo.getGt() == 1); + + auto operandTy = op.getLhs().getType().cast(); + auto resultTy = op.getType(); + auto llvmIntrinsicName = getLLVMIntrinsicName( + operandTy.isSigned(), operandTy.getWidth(), resultTy.getWidth()); + + rewriter.setInsertionPoint(op); + + auto llvmLhs = adaptor.getLhs(); + auto llvmRhs = adaptor.getRhs(); + auto llvmResultTy = getTypeConverter()->convertType(resultTy); + auto callIntrinsicOp = + createCallLLVMIntrinsicOp(rewriter, op.getLoc(), llvmIntrinsicName, + llvmResultTy, {llvmLhs, llvmRhs}); + + rewriter.replaceOp(op, callIntrinsicOp); + return mlir::success(); + } + +private: + static std::string getLLVMIntrinsicName(bool signedCmp, unsigned operandWidth, + unsigned resultWidth) { + // The intrinsic's name takes the form: + // `llvm..i.i` + + std::string result = "llvm."; + + if (signedCmp) + result.append("scmp."); + else + result.append("ucmp."); + + // Result type part. + result.push_back('i'); + result.append(std::to_string(resultWidth)); + result.push_back('.'); + + // Operand type part. + result.push_back('i'); + result.append(std::to_string(operandWidth)); + + return result; + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -2888,8 +2959,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRVectorShuffleVecLowering, CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, - CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering>( - converter, patterns.getContext()); + CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, + CIRCmpThreeWayOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/Inputs/std-compare.h b/clang/test/CIR/CodeGen/Inputs/std-compare.h index eaf7951edf79..f7f0c9b06db6 100644 --- a/clang/test/CIR/CodeGen/Inputs/std-compare.h +++ b/clang/test/CIR/CodeGen/Inputs/std-compare.h @@ -4,6 +4,21 @@ namespace std { inline namespace __1 { +#ifdef NON_CANONICAL_CMP_RESULTS + +// exposition only +enum class _EqResult : unsigned char { + __equal = 2, + __equiv = __equal, +}; + +enum class _OrdResult : signed char { + __less = 1, + __greater = 3 +}; + +#else + // exposition only enum class _EqResult : unsigned char { __equal = 0, @@ -15,6 +30,8 @@ enum class _OrdResult : signed char { __greater = 1 }; +#endif + enum class _NCmpResult : signed char { __unordered = -127 }; diff --git a/clang/test/CIR/CodeGen/three-way-comparison.cpp b/clang/test/CIR/CodeGen/three-way-comparison.cpp index 09bb4cf9b461..c6aee921f24e 100644 --- a/clang/test/CIR/CodeGen/three-way-comparison.cpp +++ b/clang/test/CIR/CodeGen/three-way-comparison.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -DNON_CANONICAL_CMP_RESULTS -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=NONCANONICAL-BEFORE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -DNON_CANONICAL_CMP_RESULTS -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=NONCANONICAL-AFTER #include "Inputs/std-compare.h" @@ -16,25 +18,43 @@ auto three_way_strong(int x, int y) { // BEFORE: %{{.+}} = cir.cmp3way(%{{.+}} : !s32i, %{{.+}}, #cmp3way_info_strong_ltn1eq0gt1_) : !s8i // BEFORE: } -// AFTER: cir.func @_Z16three_way_strongii -// AFTER: %[[#LHS:]] = cir.load %{{.+}} : cir.ptr , !s32i -// AFTER-NEXT: %[[#RHS:]] = cir.load %{{.+}} : cir.ptr , !s32i -// AFTER-NEXT: %[[#LT:]] = cir.const(#cir.int<-1> : !s8i) : !s8i -// AFTER-NEXT: %[[#EQ:]] = cir.const(#cir.int<0> : !s8i) : !s8i -// AFTER-NEXT: %[[#GT:]] = cir.const(#cir.int<1> : !s8i) : !s8i -// AFTER-NEXT: %[[#CMP_LT:]] = cir.cmp(lt, %[[#LHS]], %[[#RHS]]) : !s32i, !cir.bool -// AFTER-NEXT: %[[#CMP_EQ:]] = cir.cmp(eq, %[[#LHS]], %[[#RHS]]) : !s32i, !cir.bool -// AFTER-NEXT: %[[#CMP_EQ_RES:]] = cir.ternary(%[[#CMP_EQ]], true { -// AFTER-NEXT: cir.yield %[[#EQ]] : !s8i -// AFTER-NEXT: }, false { -// AFTER-NEXT: cir.yield %[[#GT]] : !s8i -// AFTER-NEXT: }) : (!cir.bool) -> !s8i -// AFTER-NEXT: %{{.+}} = cir.ternary(%[[#CMP_LT]], true { -// AFTER-NEXT: cir.yield %[[#LT]] : !s8i -// AFTER-NEXT: }, false { -// AFTER-NEXT: cir.yield %[[#CMP_EQ_RES]] : !s8i -// AFTER-NEXT: }) : (!cir.bool) -> !s8i -// AFTER: } +// AFTER: cir.func @_Z16three_way_strongii +// AFTER: %{{.+}} = cir.cmp3way(%{{.+}} : !s32i, %{{.+}}, #cmp3way_info_strong_ltn1eq0gt1_) : !s8i +// AFTER: } + +// NONCANONICAL-BEFORE: #cmp3way_info_strong_lt1eq2gt3_ = #cir.cmp3way_info +// NONCANONICAL-BEFORE: cir.func @_Z16three_way_strongii +// NONCANONICAL-BEFORE: %{{.+}} = cir.cmp3way(%{{.+}} : !s32i, %{{.+}}, #cmp3way_info_strong_lt1eq2gt3_) : !s8i +// NONCANONICAL-BEFORE: } + +// NONCANONICAL-AFTER: #cmp3way_info_strong_ltn1eq0gt1_ = #cir.cmp3way_info +// NONCANONICAL-AFTER: cir.func @_Z16three_way_strongii +// NONCANONICAL-AFTER: %[[#CMP3WAY_RESULT:]] = cir.cmp3way(%{{.+}} : !s32i, %{{.+}}, #cmp3way_info_strong_ltn1eq0gt1_) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#NEGONE:]] = cir.const(#cir.int<-1> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#ONE:]] = cir.const(#cir.int<1> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_NEGONE:]] = cir.cmp(eq, %[[#CMP3WAY_RESULT]], %[[#NEGONE]]) : !s8i, !cir.bool +// NONCANONICAL-AFTER-NEXT: %[[#A:]] = cir.ternary(%[[#CMP_TO_NEGONE]], true { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#ONE]] : !s8i +// NONCANONICAL-AFTER-NEXT: }, false { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#CMP3WAY_RESULT]] : !s8i +// NONCANONICAL-AFTER-NEXT: }) : (!cir.bool) -> !s8i +// NONCANONICAL-AFTER-NEXT: %[[#ZERO:]] = cir.const(#cir.int<0> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#TWO:]] = cir.const(#cir.int<2> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_ZERO:]] = cir.cmp(eq, %[[#A]], %[[#ZERO]]) : !s8i, !cir.bool +// NONCANONICAL-AFTER-NEXT: %[[#B:]] = cir.ternary(%[[#CMP_TO_ZERO]], true { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#TWO]] : !s8i +// NONCANONICAL-AFTER-NEXT: }, false { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#A]] : !s8i +// NONCANONICAL-AFTER-NEXT: }) : (!cir.bool) -> !s8i +// NONCANONICAL-AFTER-NEXT: %[[#ONE2:]] = cir.const(#cir.int<1> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#THREE:]] = cir.const(#cir.int<3> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_ONE:]] = cir.cmp(eq, %[[#B]], %[[#ONE2]]) : !s8i, !cir.bool +// NONCANONICAL-AFTER-NEXT: %{{.+}} = cir.ternary(%[[#CMP_TO_ONE]], true { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#THREE]] : !s8i +// NONCANONICAL-AFTER-NEXT: }, false { +// NONCANONICAL-AFTER-NEXT: cir.yield %[[#B]] : !s8i +// NONCANONICAL-AFTER-NEXT: }) : (!cir.bool) -> !s8i +// NONCANONICAL-AFTER: } auto three_way_weak(float x, float y) { return x <=> y; diff --git a/clang/test/CIR/Lowering/cmp3way.cir b/clang/test/CIR/Lowering/cmp3way.cir new file mode 100644 index 000000000000..6e00a9440f59 --- /dev/null +++ b/clang/test/CIR/Lowering/cmp3way.cir @@ -0,0 +1,40 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s8i = !cir.int +!s32i = !cir.int +!u32i = !cir.int + +#cmp3way_info = #cir.cmp3way_info + +module { + cir.func @test_scmp(%arg0 : !s32i, %arg1 : !s32i) -> !s8i { + %0 = cir.cmp3way(%arg0 : !s32i, %arg1, #cmp3way_info) : !s8i + cir.return %0 : !s8i + } + + // MLIR: llvm.func @test_scmp(%arg0: i32, %arg1: i32) -> i8 + // MLIR-NEXT: %0 = llvm.call_intrinsic "llvm.scmp.i8.i32"(%arg0, %arg1) : (i32, i32) -> i8 + // MLIR-NEXT: llvm.return %0 : i8 + // MLIR-NEXT: } + + // LLVM: define i8 @test_scmp(i32 %0, i32 %1) + // LLVM-NEXT: %[[#RET:]] = call i8 @llvm.scmp.i8.i32(i32 %0, i32 %1) + // LLVM-NEXT: ret i8 %[[#RET]] + // LLVM-NEXT: } + + cir.func @test_ucmp(%arg0 : !u32i, %arg1 : !u32i) -> !s8i { + %0 = cir.cmp3way(%arg0 : !u32i, %arg1, #cmp3way_info) : !s8i + cir.return %0 : !s8i + } + + // MLIR: llvm.func @test_ucmp(%arg0: i32, %arg1: i32) -> i8 + // MLIR-NEXT: %0 = llvm.call_intrinsic "llvm.ucmp.i8.i32"(%arg0, %arg1) : (i32, i32) -> i8 + // MLIR-NEXT: llvm.return %0 : i8 + // MLIR-NEXT: } + + // LLVM: define i8 @test_ucmp(i32 %0, i32 %1) + // LLVM-NEXT: %[[#RET:]] = call i8 @llvm.ucmp.i8.i32(i32 %0, i32 %1) + // LLVM-NEXT: ret i8 %[[#RET]] + // LLVM-NEXT: } +} From d704b93aac6cb46ff1e6d4a410fd2d21ffae724c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 23 Apr 2024 14:57:19 -0700 Subject: [PATCH 1520/2301] [CIR][CIRGen] Atomics: Add skeleton for compare and exchange NFCI. Any input code still hits an assertion, just a bit down the road. --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 87 +++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index f3ab4a4bcb82..326afd8822c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -315,6 +315,68 @@ static mlir::cir::IntAttr getConstOpIntAttr(mlir::Value v) { return constVal; } +static bool isCstWeak(mlir::Value weakVal, uint64_t &val) { + auto intAttr = getConstOpIntAttr(weakVal); + if (!intAttr) + return false; + val = intAttr.getUInt(); + return true; +} + +static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, + Address Dest, Address Ptr, Address Val1, + Address Val2, uint64_t Size, + mlir::cir::MemOrder SuccessOrder, + mlir::cir::MemOrder FailureOrder, + llvm::SyncScope::ID Scope) { + llvm_unreachable("NYI"); +} + +/// Given an ordering required on success, emit all possible cmpxchg +/// instructions to cope with the provided (but possibly only dynamically known) +/// FailureOrder. +static void buildAtomicCmpXchgFailureSet( + CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, + Address Val1, Address Val2, mlir::Value FailureOrderVal, uint64_t Size, + mlir::cir::MemOrder SuccessOrder, llvm::SyncScope::ID Scope) { + + mlir::cir::MemOrder FailureOrder; + if (auto ordAttr = getConstOpIntAttr(FailureOrderVal)) { + // We should not ever get to a case where the ordering isn't a valid CABI + // value, but it's hard to enforce that in general. + auto ord = ordAttr.getUInt(); + if (!mlir::cir::isValidCIRAtomicOrderingCABI(ord)) { + FailureOrder = mlir::cir::MemOrder::Relaxed; + } else { + switch ((mlir::cir::MemOrder)ord) { + case mlir::cir::MemOrder::Relaxed: + // 31.7.2.18: "The failure argument shall not be memory_order_release + // nor memory_order_acq_rel". Fallback to monotonic. + case mlir::cir::MemOrder::Release: + case mlir::cir::MemOrder::AcquireRelease: + FailureOrder = mlir::cir::MemOrder::Relaxed; + break; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + FailureOrder = mlir::cir::MemOrder::Acquire; + break; + case mlir::cir::MemOrder::SequentiallyConsistent: + FailureOrder = mlir::cir::MemOrder::SequentiallyConsistent; + break; + } + } + // Prior to c++17, "the failure argument shall be no stronger than the + // success argument". This condition has been lifted and the only + // precondition is 31.7.2.18. Effectively treat this as a DR and skip + // language version checks. + buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, FailureOrder, Scope); + return; + } + + llvm_unreachable("NYI"); +} + static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value IsWeak, mlir::Value FailureOrder, @@ -348,7 +410,13 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_compare_exchange_n: case AtomicExpr::AO__scoped_atomic_compare_exchange: case AtomicExpr::AO__scoped_atomic_compare_exchange_n: { - llvm_unreachable("NYI"); + uint64_t weakVal; + if (isCstWeak(IsWeak, weakVal)) { + buildAtomicCmpXchgFailureSet(CGF, E, weakVal, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Order, Scope); + } else { + llvm_unreachable("NYI"); + } return; } case AtomicExpr::AO__c11_atomic_load: @@ -644,7 +712,20 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: case AtomicExpr::AO__scoped_atomic_compare_exchange: case AtomicExpr::AO__scoped_atomic_compare_exchange_n: - llvm_unreachable("NYI"); + Val1 = buildPointerWithAlignment(E->getVal1()); + if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange || + E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange) + Val2 = buildPointerWithAlignment(E->getVal2()); + else { + llvm_unreachable("NYI"); + } + OrderFail = buildScalarExpr(E->getOrderFail()); + if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n || + E->getOp() == AtomicExpr::AO__atomic_compare_exchange || + E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n || + E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange) { + IsWeak = buildScalarExpr(E->getWeak()); + } break; case AtomicExpr::AO__c11_atomic_fetch_add: @@ -739,7 +820,7 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { if (ShouldCastToIntPtrTy) Dest = Atomics.castToAtomicIntPointer(Dest); } else if (E->isCmpXChg()) - llvm_unreachable("NYI"); + Dest = CreateMemTemp(RValTy, getLoc(E->getSourceRange()), "cmpxchg.bool"); else if (!RValTy->isVoidType()) { Dest = Atomics.CreateTempAlloca(); if (ShouldCastToIntPtrTy) From e1406cf692ad24abb6d592535c415d0f9f33a54a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 23 Apr 2024 18:20:52 -0700 Subject: [PATCH 1521/2301] [CIR][CIRGen] Atomics: initial compare and exchange support --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 31 +++++ clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 24 +++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 108 ++++++++++++++++-- clang/test/CIR/CodeGen/atomic-xchg-field.c | 31 ++++- 4 files changed, 177 insertions(+), 17 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 620c2bb252de..41c20d48e4ce 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3738,6 +3738,37 @@ def AtomicXchg : CIR_Op<"atomic.xchg", [Pure, SameSecondOperandAndResultType]> { let hasVerifier = 0; } +def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", [Pure]> { + let summary = "Atomic compare exchange"; + let description = [{ + C/C++ Atomic compare and exchange. Example: + + }]; + let results = (outs CIR_BoolType:$cmp); + let arguments = (ins CIR_PointerType:$ptr, + CIR_PointerType:$expected, + CIR_AnyType:$desired, + Arg:$succ_order, + Arg:$fail_order, + UnitAttr:$weak, + UnitAttr:$is_volatile); + + let assemblyFormat = [{ + `(` + $ptr `:` qualified(type($ptr)) `,` + $expected `:` type($expected) `,` + $desired `:` type($desired) `,` + `success` `=` $succ_order `,` + `failure` `=` $fail_order + `)` + (`weak` $weak^)? + (`volatile` $is_volatile^)? + `:` type($cmp) attr-dict + }]; + + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // Operations Lowered Directly to LLVM IR // diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 326afd8822c9..85dfff104cc7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -329,7 +329,24 @@ static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, mlir::cir::MemOrder SuccessOrder, mlir::cir::MemOrder FailureOrder, llvm::SyncScope::ID Scope) { - llvm_unreachable("NYI"); + auto &builder = CGF.getBuilder(); + auto loc = CGF.getLoc(E->getSourceRange()); + mlir::Value Expected = Val1.getPointer(); + mlir::Value Desired = Val2.getPointer(); + + // The approach here is a bit different from traditional LLVM codegen: we pass + // the pointers instead, and let the operation hide the details of storing the + // old value into expected in case of failure (handled during LLVM lowering). + auto boolTy = builder.getBoolTy(); + auto cmpxchg = builder.create( + loc, boolTy, Ptr.getPointer(), Expected, Desired, SuccessOrder, + FailureOrder); + cmpxchg.setIsVolatile(E->isVolatile()); + cmpxchg.setWeak(IsWeak); + + // Update the memory at Dest with Cmp's value. + CGF.buildStoreOfScalar(cmpxchg.getCmp(), + CGF.makeAddrLValue(Dest, E->getType())); } /// Given an ordering required on success, emit all possible cmpxchg @@ -716,9 +733,8 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange || E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange) Val2 = buildPointerWithAlignment(E->getVal2()); - else { - llvm_unreachable("NYI"); - } + else + Val2 = buildValToTemp(*this, E->getVal2()); OrderFail = buildScalarExpr(E->getOrderFail()); if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n || E->getOp() == AtomicExpr::AO__atomic_compare_exchange || diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 818b49fd8aee..ee8a87f09169 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2205,6 +2205,94 @@ class CIRBitPopcountOpLowering } }; +class CIRAtomicCmpXchgLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LLVM::AtomicOrdering + getLLVMAtomicOrder(mlir::cir::MemOrder memo) const { + switch (memo) { + case mlir::cir::MemOrder::Relaxed: + return mlir::LLVM::AtomicOrdering::monotonic; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + return mlir::LLVM::AtomicOrdering::acquire; + case mlir::cir::MemOrder::Release: + return mlir::LLVM::AtomicOrdering::release; + case mlir::cir::MemOrder::AcquireRelease: + return mlir::LLVM::AtomicOrdering::acq_rel; + case mlir::cir::MemOrder::SequentiallyConsistent: + return mlir::LLVM::AtomicOrdering::seq_cst; + } + llvm_unreachable("shouldn't get here"); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AtomicCmpXchg op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // This basic block is the exit point of the operation, we should end up + // here regardless of whether or not the operation succeeded. + mlir::Block *continueBB = nullptr; + { + mlir::OpBuilder::InsertionGuard guard(rewriter); + continueBB = rewriter.splitBlock(rewriter.getInsertionBlock(), + std::next(op->getIterator())); + } + + const auto llvmTy = getTypeConverter()->convertType( + op.getExpected().getType().cast().getPointee()); + auto expected = rewriter.create(op.getLoc(), llvmTy, + adaptor.getExpected(), + /*alignment=*/0); + auto desired = rewriter.create(op.getLoc(), llvmTy, + adaptor.getDesired(), + /*alignment=*/0); + + // FIXME: add syncscope. + auto cmpxchg = rewriter.create( + op.getLoc(), adaptor.getPtr(), expected, desired, + getLLVMAtomicOrder(adaptor.getSuccOrder()), + getLLVMAtomicOrder(adaptor.getFailOrder())); + cmpxchg.setWeak(adaptor.getWeak()); + cmpxchg.setVolatile_(adaptor.getIsVolatile()); + + // Check result and apply stores accordingly. + auto old = rewriter.create( + op.getLoc(), cmpxchg.getResult(), 0); + auto cmp = rewriter.create( + op.getLoc(), cmpxchg.getResult(), 1); + + // This basic block is used to hold the store instruction if the operation + // failed. Create it here and populate CondBrOp. + mlir::Block *storeExpectedBB = nullptr; + { + mlir::OpBuilder::InsertionGuard guard(rewriter); + storeExpectedBB = rewriter.createBlock(cmpxchg->getParentRegion()); + } + + rewriter.create(op.getLoc(), cmp, continueBB, + storeExpectedBB, mlir::ValueRange{}); + + // Fill in storeExpectedBB + rewriter.setInsertionPoint(storeExpectedBB, storeExpectedBB->begin()); + rewriter.create(op.getLoc(), old, + adaptor.getExpected(), + /*alignment=*/0, + /* volatile */ false, + /* nontemporal */ false); + rewriter.create(op.getLoc(), continueBB); + + // Fill in continueBB + // Zero-extend the cmp result so it matches the bool type on the other side. + rewriter.setInsertionPoint(continueBB, continueBB->begin()); + auto extCmp = rewriter.create( + op.getLoc(), rewriter.getI8Type(), cmp); + rewriter.replaceOp(op, extCmp); + return mlir::success(); + } +}; + class CIRAtomicXchgLowering : public mlir::OpConversionPattern { public: @@ -2943,16 +3031,16 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add< CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, - CIRBitPopcountOpLowering, CIRAtomicXchgLowering, CIRAtomicFetchLowering, - CIRByteswapOpLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, - CIRCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, - CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, - CIRFuncLowering, CIRCastOpLowering, CIRGlobalOpLowering, - CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, - CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, - CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, - CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, - CIRExpectOpLowering, CIRVTableAddrPointOpLowering, + CIRBitPopcountOpLowering, CIRAtomicCmpXchgLowering, CIRAtomicXchgLowering, + CIRAtomicFetchLowering, CIRByteswapOpLowering, CIRBrCondOpLowering, + CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, + CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, + CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, + CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, + CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, + CIRBrOpLowering, CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, + CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, + CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, diff --git a/clang/test/CIR/CodeGen/atomic-xchg-field.c b/clang/test/CIR/CodeGen/atomic-xchg-field.c index b5d01190c5a6..e2af3ef48656 100644 --- a/clang/test/CIR/CodeGen/atomic-xchg-field.c +++ b/clang/test/CIR/CodeGen/atomic-xchg-field.c @@ -23,13 +23,13 @@ void field_access(wPtr item) { } // CHECK: ![[W:.*]] = !cir.struct, {{.*}} {alignment = 8 : i64} // CHECK: %[[FIELD:.*]] = cir.load %[[WADDR]] // CHECK: %[[MEMBER:.*]] = cir.get_member %[[FIELD]][1] {name = "ref"} // CHECK: cir.atomic.xchg(%[[MEMBER]] : !cir.ptr>, {{.*}} : !u64i, seq_cst) -// LLVM: define void @field_access +// LLVM-LABEL: @field_access // LLVM: = alloca ptr, i64 1, align 8 // LLVM: %[[VAL_ADDR:.*]] = alloca ptr, i64 1, align 8 // LLVM: %[[RES_ADDR:.*]] = alloca ptr, i64 1, align 8 @@ -40,4 +40,29 @@ void field_access(wPtr item) { // LLVM: %[[RES:.*]] = atomicrmw xchg ptr %[[MEMBER]], i64 %[[VAL]] seq_cst, align 8 // LLVM: store i64 %[[RES]], ptr %4, align 8 // LLVM: load ptr, ptr %[[RES_ADDR]], align 8 -// LLVM: ret void \ No newline at end of file +// LLVM: ret void + +void structAtomicExchange(unsigned referenceCount, wPtr item) { + __atomic_compare_exchange_n((&item->_base.a), (&referenceCount), (referenceCount + 1), 1 , 5, 5); +} + +// CHECK-LABEL: @structAtomicExchange +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : , {{.*}} : !cir.ptr, success = seq_cst, failure = seq_cst) weak : !cir.bool + +// LLVM-LABEL: @structAtomicExchange +// LLVM: load i32 +// LLVM: add i32 +// LLVM: store i32 +// LLVM: %[[EXP:.*]] = load i32 +// LLVM: %[[DES:.*]] = load i32 +// LLVM: %[[RES:.*]] = cmpxchg weak ptr %9, i32 %[[EXP]], i32 %[[DES]] seq_cst seq_cst +// LLVM: %[[OLD:.*]] = extractvalue { i32, i1 } %[[RES]], 0 +// LLVM: %[[CMP:.*]] = extractvalue { i32, i1 } %[[RES]], 1 +// LLVM: br i1 %[[CMP]], label %[[CONTINUE:.*]], label %[[STORE_OLD:.*]], +// LLVM: [[CONTINUE]]: +// LLVM: zext i1 %[[CMP]] to i8 +// LLVM: ret void + +// LLVM: [[STORE_OLD]]: +// LLVM: store i32 %[[OLD]], ptr +// LLVM: br label %[[CONTINUE]] \ No newline at end of file From f1d5af56900e97287487c0575e1067f6b0f3d885 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 24 Apr 2024 15:04:17 -0700 Subject: [PATCH 1522/2301] [CIR][LLVMLowering][NFC] Refactor and share getLLVMAtomicOrder --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 74 +++++-------------- 1 file changed, 19 insertions(+), 55 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ee8a87f09169..be70b45304e7 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2205,29 +2205,28 @@ class CIRBitPopcountOpLowering } }; +static mlir::LLVM::AtomicOrdering getLLVMAtomicOrder(mlir::cir::MemOrder memo) { + switch (memo) { + case mlir::cir::MemOrder::Relaxed: + return mlir::LLVM::AtomicOrdering::monotonic; + case mlir::cir::MemOrder::Consume: + case mlir::cir::MemOrder::Acquire: + return mlir::LLVM::AtomicOrdering::acquire; + case mlir::cir::MemOrder::Release: + return mlir::LLVM::AtomicOrdering::release; + case mlir::cir::MemOrder::AcquireRelease: + return mlir::LLVM::AtomicOrdering::acq_rel; + case mlir::cir::MemOrder::SequentiallyConsistent: + return mlir::LLVM::AtomicOrdering::seq_cst; + } + llvm_unreachable("shouldn't get here"); +} + class CIRAtomicCmpXchgLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; - mlir::LLVM::AtomicOrdering - getLLVMAtomicOrder(mlir::cir::MemOrder memo) const { - switch (memo) { - case mlir::cir::MemOrder::Relaxed: - return mlir::LLVM::AtomicOrdering::monotonic; - case mlir::cir::MemOrder::Consume: - case mlir::cir::MemOrder::Acquire: - return mlir::LLVM::AtomicOrdering::acquire; - case mlir::cir::MemOrder::Release: - return mlir::LLVM::AtomicOrdering::release; - case mlir::cir::MemOrder::AcquireRelease: - return mlir::LLVM::AtomicOrdering::acq_rel; - case mlir::cir::MemOrder::SequentiallyConsistent: - return mlir::LLVM::AtomicOrdering::seq_cst; - } - llvm_unreachable("shouldn't get here"); - } - mlir::LogicalResult matchAndRewrite(mlir::cir::AtomicCmpXchg op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { @@ -2284,7 +2283,8 @@ class CIRAtomicCmpXchgLowering rewriter.create(op.getLoc(), continueBB); // Fill in continueBB - // Zero-extend the cmp result so it matches the bool type on the other side. + // Zero-extend the cmp result so it matches the bool type on the other + // side. rewriter.setInsertionPoint(continueBB, continueBB->begin()); auto extCmp = rewriter.create( op.getLoc(), rewriter.getI8Type(), cmp); @@ -2298,24 +2298,6 @@ class CIRAtomicXchgLowering public: using OpConversionPattern::OpConversionPattern; - mlir::LLVM::AtomicOrdering - getLLVMAtomicOrder(mlir::cir::MemOrder memo) const { - switch (memo) { - case mlir::cir::MemOrder::Relaxed: - return mlir::LLVM::AtomicOrdering::monotonic; - case mlir::cir::MemOrder::Consume: - case mlir::cir::MemOrder::Acquire: - return mlir::LLVM::AtomicOrdering::acquire; - case mlir::cir::MemOrder::Release: - return mlir::LLVM::AtomicOrdering::release; - case mlir::cir::MemOrder::AcquireRelease: - return mlir::LLVM::AtomicOrdering::acq_rel; - case mlir::cir::MemOrder::SequentiallyConsistent: - return mlir::LLVM::AtomicOrdering::seq_cst; - } - llvm_unreachable("shouldn't get here"); - } - mlir::LogicalResult matchAndRewrite(mlir::cir::AtomicXchg op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { @@ -2333,24 +2315,6 @@ class CIRAtomicFetchLowering public: using OpConversionPattern::OpConversionPattern; - mlir::LLVM::AtomicOrdering - getLLVMAtomicOrder(mlir::cir::MemOrder memo) const { - switch (memo) { - case mlir::cir::MemOrder::Relaxed: - return mlir::LLVM::AtomicOrdering::monotonic; - case mlir::cir::MemOrder::Consume: - case mlir::cir::MemOrder::Acquire: - return mlir::LLVM::AtomicOrdering::acquire; - case mlir::cir::MemOrder::Release: - return mlir::LLVM::AtomicOrdering::release; - case mlir::cir::MemOrder::AcquireRelease: - return mlir::LLVM::AtomicOrdering::acq_rel; - case mlir::cir::MemOrder::SequentiallyConsistent: - return mlir::LLVM::AtomicOrdering::seq_cst; - } - llvm_unreachable("shouldn't get here"); - } - mlir::Value buildPostOp(mlir::cir::AtomicFetch op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal, bool isInt) const { From 9e91d4307ad2164f6c52544daea8dd65cc6d37a2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 Apr 2024 11:16:15 -0700 Subject: [PATCH 1523/2301] [CIR][CIRGen] Fix follow up stores for atomic loads --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 8 ++++++++ clang/test/CIR/CodeGen/atomic-xchg-field.c | 16 +++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 85dfff104cc7..2e736206c775 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -449,6 +449,14 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, load->setAttr("mem_order", orderAttr); if (E->isVolatile()) load->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); + + // TODO(cir): this logic should be part of createStore, but doing so + // currently breaks CodeGen/union.cpp and CodeGen/union.cpp. + auto ptrTy = Dest.getPointer().getType().cast(); + if (Dest.getElementType() != ptrTy.getPointee()) { + Dest = Dest.withPointer( + builder.createPtrBitcast(Dest.getPointer(), Dest.getElementType())); + } builder.createStore(loc, load->getResult(0), Dest); return; } diff --git a/clang/test/CIR/CodeGen/atomic-xchg-field.c b/clang/test/CIR/CodeGen/atomic-xchg-field.c index e2af3ef48656..1309023e1c4a 100644 --- a/clang/test/CIR/CodeGen/atomic-xchg-field.c +++ b/clang/test/CIR/CodeGen/atomic-xchg-field.c @@ -65,4 +65,18 @@ void structAtomicExchange(unsigned referenceCount, wPtr item) { // LLVM: [[STORE_OLD]]: // LLVM: store i32 %[[OLD]], ptr -// LLVM: br label %[[CONTINUE]] \ No newline at end of file +// LLVM: br label %[[CONTINUE]] + +void f2(const void *cf); + +void structLoad(unsigned referenceCount, wPtr item) { + f2(__atomic_load_n(&item->ref, 5)); +} + +// CHECK-LABEL: @structLoad +// CHECK: %[[ATOMIC_TEMP:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["atomic-temp"] +// CHECK: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %6 : cir.ptr , !u64i +// CHECK: %[[RES:.*]] = cir.cast(bitcast, %[[ATOMIC_TEMP]] : !cir.ptr>), !cir.ptr +// CHECK: cir.store %[[ATOMIC_LOAD]], %[[RES]] : !u64i, cir.ptr + +// No LLVM tests needed for this one, already covered elsewhere. From 2a3f59f4a744e6b75dd09fbba58a15a27e560f38 Mon Sep 17 00:00:00 2001 From: orbiri Date: Fri, 26 Apr 2024 00:05:42 +0300 Subject: [PATCH 1524/2301] [CIR][CIRGen] Add dynamic builtin alloca intrinsics support (#547) This patch adds the CIRGen for the following builtin functions: - `alloca`; - `_alloca`; - `__builtin_alloca`; - `__builtin_alloca_uninitialized`. Missing support to add in the future: - Non-default auto initialization setting. The default is to not initialize the allocated buffer, which is simpler to implement. This commit is leaving the skeleton to implement this feature following clang's codegen pattern. - It may be possible that the frontend has set non-default address space for the alloca's return value. This is the case for OpenCL or AMDGPU codes for example. This is handled in clang codegen via address space cast, and is left for future implementation. This commit introduces a guard-rail around this behaviour. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 30 +++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 61 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 +- .../Dialect/Transforms/LoweringPrepare.cpp | 13 ++-- clang/test/CIR/CodeGen/builtin-alloca.c | 62 +++++++++++++++++++ clang/test/CIR/CodeGen/builtin-ms-alloca.c | 23 +++++++ 6 files changed, 182 insertions(+), 12 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-alloca.c create mode 100644 clang/test/CIR/CodeGen/builtin-ms-alloca.c diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index f5635b7ca33b..a3df0ef0dcdc 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -160,6 +160,36 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return create(loc, val, dst, _volatile, order); } + mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Type type, llvm::StringRef name, + mlir::IntegerAttr alignment, + mlir::Value dynAllocSize) { + return create(loc, addrType, type, name, alignment, + dynAllocSize); + } + + mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Type type, llvm::StringRef name, + clang::CharUnits alignment, + mlir::Value dynAllocSize) { + auto alignmentIntAttr = getSizeFromCharUnits(getContext(), alignment); + return createAlloca(loc, addrType, type, name, alignmentIntAttr, + dynAllocSize); + } + + mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Type type, llvm::StringRef name, + mlir::IntegerAttr alignment) { + return create(loc, addrType, type, name, alignment); + } + + mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Type type, llvm::StringRef name, + clang::CharUnits alignment) { + auto alignmentIntAttr = getSizeFromCharUnits(getContext(), alignment); + return createAlloca(loc, addrType, type, name, alignmentIntAttr); + } + mlir::Value createSub(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, bool hasNSW = false) { auto op = create(lhs.getLoc(), lhs.getType(), diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 8c5d26b6a06d..4c943f25178c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -71,6 +71,24 @@ buildBuiltinBitOp(CIRGenFunction &CGF, const CallExpr *E, return RValue::get(op); } +// Initialize the alloca with the given size and alignment according to the lang +// opts. Supporting only the trivial non-initialization for now. +static void initializeAlloca(CIRGenFunction &CGF, + [[maybe_unused]] mlir::Value AllocaAddr, + [[maybe_unused]] mlir::Value Size, + [[maybe_unused]] CharUnits AlignmentInBytes) { + + switch (CGF.getLangOpts().getTrivialAutoVarInit()) { + case LangOptions::TrivialAutoVarInitKind::Uninitialized: + // Nothing to initialize. + return; + case LangOptions::TrivialAutoVarInitKind::Zero: + case LangOptions::TrivialAutoVarInitKind::Pattern: + assert(false && "unexpected trivial auto var init kind NYI"); + return; + } +} + RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue) { @@ -642,6 +660,49 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Result = builder.createBoolToInt(Result, ResultType); return RValue::get(Result); } + + case Builtin::BIalloca: + case Builtin::BI_alloca: + case Builtin::BI__builtin_alloca_uninitialized: + case Builtin::BI__builtin_alloca: { + // Get alloca size input + mlir::Value Size = buildScalarExpr(E->getArg(0)); + + // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__. + const TargetInfo &TI = getContext().getTargetInfo(); + const CharUnits SuitableAlignmentInBytes = + getContext().toCharUnitsFromBits(TI.getSuitableAlign()); + + // Emit the alloca op with type `u8 *` to match the semantics of + // `llvm.alloca`. We later bitcast the type to `void *` to match the + // semantics of C/C++ + // FIXME(cir): It may make sense to allow AllocaOp of type `u8` to return a + // pointer of type `void *`. This will require a change to the allocaOp + // verifier. + auto AllocaAddr = builder.createAlloca( + getLoc(E->getSourceRange()), builder.getUInt8PtrTy(), + builder.getUInt8Ty(), "bi_alloca", SuitableAlignmentInBytes, Size); + + // Initialize the allocated buffer if required. + if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized) + initializeAlloca(*this, AllocaAddr, Size, SuitableAlignmentInBytes); + + // An alloca will always return a pointer to the alloca (stack) address + // space. This address space need not be the same as the AST / Language + // default (e.g. in C / C++ auto vars are in the generic address space). At + // the AST level this is handled within CreateTempAlloca et al., but for the + // builtin / dynamic alloca we have to handle it here. + assert(!UnimplementedFeature::addressSpace()); + LangAS AAS = getASTAllocaAddressSpace(); + LangAS EAS = E->getType()->getPointeeType().getAddressSpace(); + if (EAS != AAS) { + assert(false && "Non-default address space for alloca NYI"); + } + + // Bitcast the alloca to the expected type. + return RValue::get( + builder.createBitcast(AllocaAddr, builder.getVoidPtrTy())); + } } // If this is an alias for a lib function (e.g. __builtin_sin), emit diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 3c85ca217d37..1daa31120573 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2480,9 +2480,8 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, { mlir::OpBuilder::InsertionGuard guard(builder); builder.restoreInsertionPoint(ip); - addr = builder.create(loc, /*addr type*/ localVarPtrTy, - /*var type*/ ty, name, - alignIntAttr, arraySize); + addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy, + /*var type*/ ty, name, alignIntAttr, arraySize); if (currVarDecl) { auto alloca = cast(addr.getDefiningOp()); alloca.setAstAttr(ASTVarDeclAttr::get(builder.getContext(), currVarDecl)); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 059a5b82167f..e92e40b7ccd4 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -497,20 +497,16 @@ static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, mlir::Value end = builder.create( loc, eltTy, begin, numArrayElementsConst); - auto tmpAddr = builder.create( + auto tmpAddr = builder.createAlloca( loc, /*addr type*/ builder.getPointerTo(eltTy), - /*var type*/ eltTy, "__array_idx", - builder.getSizeFromCharUnits(builder.getContext(), - clang::CharUnits::One()), - nullptr); + /*var type*/ eltTy, "__array_idx", clang::CharUnits::One()); builder.createStore(loc, begin, tmpAddr); auto loop = builder.createDoWhile( loc, /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto currentElement = - b.create(loc, eltTy, tmpAddr.getResult()); + auto currentElement = b.create(loc, eltTy, tmpAddr); mlir::Type boolTy = mlir::cir::BoolType::get(b.getContext()); auto cmp = builder.create( loc, boolTy, mlir::cir::CmpOpKind::eq, currentElement, end); @@ -518,8 +514,7 @@ static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto currentElement = - b.create(loc, eltTy, tmpAddr.getResult()); + auto currentElement = b.create(loc, eltTy, tmpAddr); CallOp ctorCall; op->walk([&](CallOp c) { ctorCall = c; }); diff --git a/clang/test/CIR/CodeGen/builtin-alloca.c b/clang/test/CIR/CodeGen/builtin-alloca.c new file mode 100644 index 000000000000..a02f328cc12f --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-alloca.c @@ -0,0 +1,62 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s --check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +typedef __SIZE_TYPE__ size_t; +void *alloca(size_t size); +void *_alloca(size_t size); + +void my_alloca(size_t n) +{ + int *c1 = alloca(n); +} +// CIR: cir.func @my_alloca([[ALLOCA_SIZE:%.*]]: !u64i +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr +// CIR: } + + +// LLVM: define void @my_alloca(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], +// LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], +// LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 +// LLVM: } + +void my___builtin_alloca(size_t n) +{ + int *c1 = (int *)__builtin_alloca(n); +} + +// CIR: cir.func @my___builtin_alloca([[ALLOCA_SIZE:%.*]]: !u64i +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr +// CIR: } + + +// LLVM: define void @my___builtin_alloca(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], +// LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], +// LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 +// LLVM: } + +void my__builtin_alloca_uninitialized(size_t n) +{ + int *c1 = (int *)__builtin_alloca_uninitialized(n); +} + +// CIR: cir.func @my__builtin_alloca_uninitialized([[ALLOCA_SIZE:%.*]]: !u64i +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr +// CIR: } + + +// LLVM: define void @my__builtin_alloca_uninitialized(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], +// LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], +// LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 +// LLVM: } diff --git a/clang/test/CIR/CodeGen/builtin-ms-alloca.c b/clang/test/CIR/CodeGen/builtin-ms-alloca.c new file mode 100644 index 000000000000..d500304d7f6d --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-ms-alloca.c @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fms-extensions -emit-cir %s -o - | FileCheck %s --check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fms-extensions -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +typedef __SIZE_TYPE__ size_t; + +void my_win_alloca(size_t n) +{ + int *c1 = (int *)_alloca(n); +} + +// CIR: cir.func @my_win_alloca([[ALLOCA_SIZE:%.*]]: !u64i +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr +// CIR: } + + +// LLVM: define void @my_win_alloca(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], +// LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], +// LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 +// LLVM: } From c61ae87c718b36e683d48a030762704dbf9ff51c Mon Sep 17 00:00:00 2001 From: axp Date: Fri, 26 Apr 2024 05:06:05 +0800 Subject: [PATCH 1525/2301] [CIR[CIRGen][NFC] Refactor build switch op (#552) Make logic cleaner and more extensible. Separate collecting `SwitchStmt` information and building op logic into different functions. Add more UT to cover nested switch, which also worked before this pr. This pr is split from #528. --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 32 +++++--- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 104 +++++++++++++------------ clang/test/CIR/CodeGen/switch.cpp | 27 +++++++ 3 files changed, 105 insertions(+), 58 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9ece64bc89bb..22b863ba0d7c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1085,18 +1085,23 @@ class CIRGenFunction : public CIRGenTypeCache { template mlir::LogicalResult buildCaseDefaultCascade(const T *stmt, mlir::Type condType, - SmallVector &caseAttrs, - mlir::OperationState &os); + SmallVector &caseAttrs); mlir::LogicalResult buildCaseStmt(const clang::CaseStmt &S, mlir::Type condType, - SmallVector &caseAttrs, - mlir::OperationState &op); + SmallVector &caseAttrs); mlir::LogicalResult buildDefaultStmt(const clang::DefaultStmt &S, mlir::Type condType, - SmallVector &caseAttrs, - mlir::OperationState &op); + SmallVector &caseAttrs); + + mlir::LogicalResult + buildSwitchCase(const clang::SwitchCase &S, mlir::Type condType, + SmallVector &caseAttrs); + + mlir::LogicalResult + buildSwitchBody(const clang::Stmt *S, mlir::Type condType, + SmallVector &caseAttrs); mlir::cir::FuncOp generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo); @@ -1964,7 +1969,7 @@ class CIRGenFunction : public CIRGenTypeCache { // have their own scopes but are distinct regions nonetheless. llvm::SmallVector RetBlocks; llvm::SmallVector> RetLocs; - unsigned int CurrentSwitchRegionIdx = -1; + llvm::SmallVector> SwitchRegions; // There's usually only one ret block per scope, but this needs to be // get or create because of potential unreachable return statements, note @@ -1985,16 +1990,25 @@ class CIRGenFunction : public CIRGenTypeCache { void buildImplicitReturn(); public: - void updateCurrentSwitchCaseRegion() { CurrentSwitchRegionIdx++; } llvm::ArrayRef getRetBlocks() { return RetBlocks; } llvm::ArrayRef> getRetLocs() { return RetLocs; } + llvm::MutableArrayRef> getSwitchRegions() { + assert(isSwitch() && "expected switch scope"); + return SwitchRegions; + } + + mlir::Region *createSwitchRegion() { + assert(isSwitch() && "expected switch scope"); + SwitchRegions.push_back(std::make_unique()); + return SwitchRegions.back().get(); + } mlir::Block *getOrCreateRetBlock(CIRGenFunction &CGF, mlir::Location loc) { unsigned int regionIdx = 0; if (isSwitch()) - regionIdx = CurrentSwitchRegionIdx; + regionIdx = SwitchRegions.size() - 1; if (regionIdx >= RetBlocks.size()) return createRetBlock(CGF, loc); return &*RetBlocks.back(); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 17264d36e588..981804892ebb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -637,7 +637,7 @@ CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, template mlir::LogicalResult CIRGenFunction::buildCaseDefaultCascade( const T *stmt, mlir::Type condType, - SmallVector &caseAttrs, mlir::OperationState &os) { + SmallVector &caseAttrs) { assert((isa(stmt)) && "only case or default stmt go here"); @@ -647,20 +647,18 @@ mlir::LogicalResult CIRGenFunction::buildCaseDefaultCascade( // Update scope information with the current region we are // emitting code for. This is useful to allow return blocks to be // automatically and properly placed during cleanup. - auto *region = os.addRegion(); + auto *region = currLexScope->createSwitchRegion(); auto *block = builder.createBlock(region); builder.setInsertionPointToEnd(block); - currLexScope->updateCurrentSwitchCaseRegion(); auto *sub = stmt->getSubStmt(); if (isa(sub) && isa(stmt)) { builder.createYield(getLoc(stmt->getBeginLoc())); - res = - buildDefaultStmt(*dyn_cast(sub), condType, caseAttrs, os); + res = buildDefaultStmt(*dyn_cast(sub), condType, caseAttrs); } else if (isa(sub) && isa(stmt)) { builder.createYield(getLoc(stmt->getBeginLoc())); - res = buildCaseStmt(*dyn_cast(sub), condType, caseAttrs, os); + res = buildCaseStmt(*dyn_cast(sub), condType, caseAttrs); } else { res = buildStmt(sub, /*useCurrentScope=*/!isa(sub)); } @@ -670,19 +668,17 @@ mlir::LogicalResult CIRGenFunction::buildCaseDefaultCascade( mlir::LogicalResult CIRGenFunction::buildCaseStmt(const CaseStmt &S, mlir::Type condType, - SmallVector &caseAttrs, - mlir::OperationState &os) { + SmallVector &caseAttrs) { assert((!S.getRHS() || !S.caseStmtIsGNURange()) && "case ranges not implemented"); auto *caseStmt = foldCaseStmt(S, condType, caseAttrs); - return buildCaseDefaultCascade(caseStmt, condType, caseAttrs, os); + return buildCaseDefaultCascade(caseStmt, condType, caseAttrs); } mlir::LogicalResult CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, mlir::Type condType, - SmallVector &caseAttrs, - mlir::OperationState &os) { + SmallVector &caseAttrs) { auto ctxt = builder.getContext(); auto defAttr = mlir::cir::CaseAttr::get( @@ -690,7 +686,19 @@ CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, mlir::Type condType, CaseOpKindAttr::get(ctxt, mlir::cir::CaseOpKind::Default)); caseAttrs.push_back(defAttr); - return buildCaseDefaultCascade(&S, condType, caseAttrs, os); + return buildCaseDefaultCascade(&S, condType, caseAttrs); +} + +mlir::LogicalResult +CIRGenFunction::buildSwitchCase(const SwitchCase &S, mlir::Type condType, + SmallVector &caseAttrs) { + if (S.getStmtClass() == Stmt::CaseStmtClass) + return buildCaseStmt(cast(S), condType, caseAttrs); + + if (S.getStmtClass() == Stmt::DefaultStmtClass) + return buildDefaultStmt(cast(S), condType, caseAttrs); + + llvm_unreachable("expect case or default stmt"); } mlir::LogicalResult @@ -953,6 +961,36 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { return mlir::success(); } +mlir::LogicalResult CIRGenFunction::buildSwitchBody( + const Stmt *S, mlir::Type condType, + llvm::SmallVector &caseAttrs) { + if (auto *compoundStmt = dyn_cast(S)) { + mlir::Block *lastCaseBlock = nullptr; + auto res = mlir::success(); + for (auto *c : compoundStmt->body()) { + if (auto *switchCase = dyn_cast(c)) { + res = buildSwitchCase(*switchCase, condType, caseAttrs); + } else if (lastCaseBlock) { + // This means it's a random stmt following up a case, just + // emit it as part of previous known case. + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(lastCaseBlock); + res = buildStmt(c, /*useCurrentScope=*/!isa(c)); + } else { + llvm_unreachable("statement doesn't belong to any case region, NYI"); + } + + lastCaseBlock = builder.getBlock(); + + if (res.failed()) + break; + } + return res; + } + + llvm_unreachable("switch body is not CompoundStmt, NYI"); +} + mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { // TODO: LLVM codegen does some early optimization to fold the condition and // only emit live cases. CIR should use MLIR to achieve similar things, @@ -975,49 +1013,17 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts()) // TODO: if the switch has a condition wrapped by __builtin_unpredictable? - // FIXME: track switch to handle nested stmts. swop = builder.create( getLoc(S.getBeginLoc()), condV, /*switchBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { - auto *cs = dyn_cast(S.getBody()); - assert(cs && "expected compound stmt"); - SmallVector caseAttrs; - currLexScope->setAsSwitch(); - mlir::Block *lastCaseBlock = nullptr; - for (auto *c : cs->body()) { - bool caseLike = isa(c); - if (!caseLike) { - // This means it's a random stmt following up a case, just - // emit it as part of previous known case. - assert(lastCaseBlock && "expects pre-existing case block"); - mlir::OpBuilder::InsertionGuard guardCase(builder); - builder.setInsertionPointToEnd(lastCaseBlock); - res = buildStmt(c, /*useCurrentScope=*/!isa(c)); - lastCaseBlock = builder.getBlock(); - if (res.failed()) - break; - continue; - } - - auto *caseStmt = dyn_cast(c); - - if (caseStmt) - res = buildCaseStmt(*caseStmt, condV.getType(), caseAttrs, os); - else { - auto *defaultStmt = dyn_cast(c); - assert(defaultStmt && "expected default stmt"); - res = buildDefaultStmt(*defaultStmt, condV.getType(), caseAttrs, - os); - } - - lastCaseBlock = builder.getBlock(); - - if (res.failed()) - break; - } + llvm::SmallVector caseAttrs; + + res = buildSwitchBody(S.getBody(), condV.getType(), caseAttrs); + + os.addRegions(currLexScope->getSwitchRegions()); os.addAttribute("cases", builder.getArrayAttr(caseAttrs)); }); diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 3c63e4ea4820..b378c7364475 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -266,6 +266,7 @@ void sw12(int a) { break; } } + // CHECK: cir.func @_Z4sw12i // CHECK: cir.scope { // CHECK: cir.switch @@ -275,6 +276,32 @@ void sw12(int a) { // CHECK-NEXT: cir.break // CHECK-NEXT: } +void sw13(int a, int b) { + switch (a) { + case 1: + switch (b) { + case 2: + break; + } + } +} + +// CHECK: cir.func @_Z4sw13ii +// CHECK: cir.scope { +// CHECK: cir.switch +// CHECK-NEXT: case (equal, 1) { +// CHECK-NEXT: cir.scope { +// CHECK: cir.switch +// CHECK-NEXT: case (equal, 2) { +// CHECK-NEXT: cir.break +// CHECK-NEXT: } +// CHECK-NEXT: ] +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK: } +// CHECK: cir.return + void fallthrough(int x) { switch (x) { case 1: From 65bc46af598c0197e9461906969609adc4acecb2 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 26 Apr 2024 06:09:16 +0800 Subject: [PATCH 1526/2301] [CIR][NFC] Create LLVM intrinsic calls through `createCallLLVMIntrinsicOp` (#564) This PR does not introduce any functional changes. It cleans up code in `LowerToLLVM.cpp` and creates all LLVM intrinsic calls through the unified `createCallLLVMIntrinsicOp` function, as suggested by [this comment](https://github.com/llvm/clangir/pull/556#discussion_r1575198259) in #556 . Some LLVM intrinsics already have specialized LLVMIR operations. CIR operations that depend on these intrinsics are lowered to those specialized operations rather than `llvm.call_intrinsic` operation. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 29 ++++++++++--------- clang/test/CIR/Lowering/bswap.cir | 2 +- clang/test/CIR/Lowering/intrinsics.cir | 2 +- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index be70b45304e7..f8c97067e810 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1996,6 +1996,16 @@ createCallLLVMIntrinsicOp(mlir::ConversionPatternRewriter &rewriter, loc, resultTy, intrinsicNameAttr, operands); } +static mlir::LLVM::CallIntrinsicOp replaceOpWithCallLLVMIntrinsicOp( + mlir::ConversionPatternRewriter &rewriter, mlir::Operation *op, + const llvm::Twine &intrinsicName, mlir::Type resultTy, + mlir::ValueRange operands) { + auto callIntrinOp = createCallLLVMIntrinsicOp( + rewriter, op->getLoc(), intrinsicName, resultTy, operands); + rewriter.replaceOp(op, callIntrinOp.getOperation()); + return callIntrinOp; +} + static mlir::Value createLLVMBitOp(mlir::Location loc, const llvm::Twine &llvmIntrinBaseName, mlir::Type resultTy, mlir::Value operand, @@ -2080,16 +2090,14 @@ class CIRObjSizeOpLowering auto llvmResTy = getTypeConverter()->convertType(op.getType()); auto loc = op->getLoc(); - auto llvmIntrinNameAttr = - mlir::StringAttr::get(rewriter.getContext(), "llvm.objectsize"); mlir::cir::SizeInfoType kindInfo = op.getKind(); auto falseValue = rewriter.create( loc, rewriter.getI1Type(), false); auto trueValue = rewriter.create( loc, rewriter.getI1Type(), true); - rewriter.replaceOpWithNewOp( - op, llvmResTy, llvmIntrinNameAttr, + replaceOpWithCallLLVMIntrinsicOp( + rewriter, op, "llvm.objectsize", llvmResTy, mlir::ValueRange{adaptor.getPtr(), kindInfo == mlir::cir::SizeInfoType::max ? falseValue : trueValue, @@ -2468,11 +2476,8 @@ class CIRByteswapOpLowering std::string llvmIntrinName = "llvm.bswap.i"; llvmIntrinName.append(std::to_string(resTy.getWidth())); - auto llvmIntrinNameAttr = - mlir::StringAttr::get(rewriter.getContext(), llvmIntrinName); - rewriter.replaceOpWithNewOp( - op, resTy, llvmIntrinNameAttr, adaptor.getInput()); + rewriter.replaceOpWithNewOp(op, adaptor.getInput()); return mlir::LogicalResult::success(); } @@ -2683,12 +2688,8 @@ class CIRTrapLowering : public mlir::OpConversionPattern { auto loc = op->getLoc(); rewriter.eraseOp(op); - auto llvmTrapIntrinsicType = - mlir::LLVM::LLVMVoidType::get(op->getContext()); - rewriter.create( - loc, llvmTrapIntrinsicType, - mlir::StringAttr::get(op->getContext(), "llvm.trap"), - mlir::ValueRange{}); + rewriter.create(loc); + // Note that the call to llvm.trap is not a terminator in LLVM dialect. // So we must emit an additional llvm.unreachable to terminate the current // block. diff --git a/clang/test/CIR/Lowering/bswap.cir b/clang/test/CIR/Lowering/bswap.cir index 7e778820a131..7733b4de1dae 100644 --- a/clang/test/CIR/Lowering/bswap.cir +++ b/clang/test/CIR/Lowering/bswap.cir @@ -9,7 +9,7 @@ cir.func @test(%arg0: !u32i) -> !u32i { } // MLIR: llvm.func @test(%arg0: i32) -> i32 -// MLIR-NEXT: %0 = llvm.call_intrinsic "llvm.bswap.i32"(%arg0) : (i32) -> i32 +// MLIR-NEXT: %0 = llvm.intr.bswap(%arg0) : (i32) -> i32 // MLIR-NEXT: llvm.return %0 : i32 // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/intrinsics.cir b/clang/test/CIR/Lowering/intrinsics.cir index ddf8e0708ad4..25b0b34738bc 100644 --- a/clang/test/CIR/Lowering/intrinsics.cir +++ b/clang/test/CIR/Lowering/intrinsics.cir @@ -14,7 +14,7 @@ module { } // MLIR: llvm.func @test_trap() - // MLIR-NEXT: llvm.call_intrinsic "llvm.trap"() : () -> !llvm.void + // MLIR-NEXT: "llvm.intr.trap"() : () -> () // MLIR-NEXT: llvm.unreachable // LLVM: define void @test_trap() From 5c632e737caeab5f306ce8ce979877c178247b5e Mon Sep 17 00:00:00 2001 From: zhoujingya <104264072+zhoujingya@users.noreply.github.com> Date: Fri, 26 Apr 2024 06:12:32 +0800 Subject: [PATCH 1527/2301] [CIR][Lowering] Add MLIR lowering support for CIR cos operations (#565) #563 This PR add cir.cos lowering to MLIR math dialect, now it only surpport single and double float types, I add an assertation for the long double and other unimplemented types --------- Signed-off-by: zhoujing --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 26 ++++++++++++++++--- clang/test/CIR/Lowering/ThroughMLIR/cos.cir | 22 ++++++++++++++++ 2 files changed, 45 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/cos.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index b46bc8252cb0..d7eacb6918c3 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -24,6 +24,7 @@ #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/Math/IR/Math.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" @@ -68,7 +69,7 @@ struct ConvertCIRToMLIRPass registry.insert(); + mlir::scf::SCFDialect, mlir::math::MathDialect>(); } void runOnOperation() final; @@ -140,6 +141,18 @@ class CIRStoreOpLowering } }; +class CIRCosOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CosOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + class CIRConstantOpLowering : public mlir::OpConversionPattern { public: @@ -153,6 +166,11 @@ class CIRConstantOpLowering if (mlir::isa(op.getType())) { auto boolValue = mlir::cast(op.getValue()); value = rewriter.getIntegerAttr(ty, boolValue.getValue()); + } else if (op.getType().isa()) { + assert(ty.isF32() || ty.isF64() && "NYI"); + value = rewriter.getFloatAttr( + typeConverter->convertType(op.getType()), + op.getValue().cast().getValue()); } else { auto cirIntAttr = mlir::dyn_cast(op.getValue()); assert(cirIntAttr && "NYI non cir.int attr"); @@ -612,7 +630,8 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, CIRBinOpLowering, CIRLoadOpLowering, CIRConstantOpLowering, CIRStoreOpLowering, CIRAllocaOpLowering, CIRFuncOpLowering, CIRScopeOpLowering, CIRBrCondOpLowering, CIRTernaryOpLowering, - CIRYieldOpLowering>(converter, patterns.getContext()); + CIRYieldOpLowering, CIRCosOpLowering>(converter, + patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { @@ -666,7 +685,8 @@ void ConvertCIRToMLIRPass::runOnOperation() { target.addLegalOp(); target.addLegalDialect(); + mlir::scf::SCFDialect, mlir::cf::ControlFlowDialect, + mlir::math::MathDialect>(); target.addIllegalDialect(); if (failed(applyPartialConversion(module, target, std::move(patterns)))) diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cos.cir b/clang/test/CIR/Lowering/ThroughMLIR/cos.cir new file mode 100644 index 000000000000..05e913866202 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/cos.cir @@ -0,0 +1,22 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +module { + cir.func @foo() { + %1 = cir.const(#cir.fp<1.0> : !cir.float) : !cir.float + %2 = cir.const(#cir.fp<1.0> : !cir.double) : !cir.double + %3 = cir.cos %1 : !cir.float + %4 = cir.cos %2 : !cir.double + cir.return + } +} + +//CHECK: module { +//CHECK: func.func @foo() { +//CHECK: %cst = arith.constant 1.000000e+00 : f32 +//CHECK: %cst_0 = arith.constant 1.000000e+00 : f64 +//CHECK: %0 = math.cos %cst : f32 +//CHECK: %1 = math.cos %cst_0 : f64 +//CHECK: return +//CHECK: } +//CHECK: } From c4ec159a5dd2eab5e2069cc2c62cc077edf61920 Mon Sep 17 00:00:00 2001 From: orbiri Date: Fri, 26 Apr 2024 01:15:05 +0300 Subject: [PATCH 1528/2301] [CIR] Remove redundant error from parseConstantValue (#567) ASMParser::parseAttribute is responsible for emitting its own errors or forwarding errors of the parsers below it. There is no reason to emit a subsequent error as it doesn't add extra information to the user. As a driveby, beutify a bit the tests that "relied" on this error and make the expected error easier to read by moving it to the line before. --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 +------ clang/test/CIR/IR/invalid.cir | 28 +++++++++++++++---------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9df58bf9eb54..c885d435a2ce 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -367,12 +367,7 @@ LogicalResult ConstantOp::verify() { static ParseResult parseConstantValue(OpAsmParser &parser, mlir::Attribute &valueAttr) { NamedAttrList attr; - if (parser.parseAttribute(valueAttr, "value", attr).failed()) { - return parser.emitError(parser.getCurrentLocation(), - "expected constant attribute to match type"); - } - - return success(); + return parser.parseAttribute(valueAttr, "value", attr); } // FIXME: create a CIRConstAttr and hide this away for both global diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 8d680f2f3a12..1522a7202f8d 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -305,34 +305,39 @@ cir.func @cast24(%p : !u32i) { !u32i = !cir.int !u8i = !cir.int module { - cir.global external @a = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<23> : !u8i, #cir.int<33> : !u8i] : !cir.array> // expected-error {{constant array element should match array element type}} -} // expected-error {{expected constant attribute to match type}} + // expected-error@+1 {{constant array element should match array element type}} + cir.global external @a = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<23> : !u8i, #cir.int<33> : !u8i] : !cir.array> +} // ----- !u8i = !cir.int module { - cir.global external @a = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<23> : !u8i, #cir.int<33> : !u8i] : !cir.array> // expected-error {{constant array size should match type size}} -} // expected-error {{expected constant attribute to match type}} + // expected-error@+1 {{constant array size should match type size}} + cir.global external @a = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<23> : !u8i, #cir.int<33> : !u8i] : !cir.array> +} // ----- !u32i = !cir.int module { - cir.global external @b = #cir.const_array<"example\00" : !cir.array> // expected-error {{constant array element for string literals expects !cir.int element type}} -} // expected-error {{expected constant attribute to match type}} + // expected-error@+1 {{constant array element for string literals expects !cir.int element type}} + cir.global external @b = #cir.const_array<"example\00" : !cir.array> +} // ----- module { - cir.global "private" constant external @".str2" = #cir.const_array<"example\00"> {alignment = 1 : i64} // expected-error {{expected type declaration for string literal}} -} // expected-error@-1 {{expected constant attribute to match type}} + // expected-error@+1 {{expected type declaration for string literal}} + cir.global "private" constant external @".str2" = #cir.const_array<"example\00"> {alignment = 1 : i64} +} // ----- !u32i = !cir.int module { - cir.global @a = #cir.const_array<[0 : !u8i, -23 : !u8i, 33 : !u8i] : !cir.array> // expected-error {{expected string or keyword containing one of the following enum values for attribute 'linkage' [external, available_externally, linkonce, linkonce_odr, weak, weak_odr, internal, cir_private, extern_weak, common]}} + // expected-error@+1 {{expected string or keyword containing one of the following enum values for attribute 'linkage' [external, available_externally, linkonce, linkonce_odr, weak, weak_odr, internal, cir_private, extern_weak, common]}} + cir.global @a = #cir.const_array<[0 : !u8i, -23 : !u8i, 33 : !u8i] : !cir.array> } // ----- @@ -535,10 +540,11 @@ module { // rid of this somehow in favor of clarity? cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr - cir.global external @type_info_B = #cir.typeinfo<{ // expected-error {{element at index 0 has type '!cir.ptr>' but return type for this element is '!cir.ptr>'}} + // expected-error@+1 {{element at index 0 has type '!cir.ptr>' but return type for this element is '!cir.ptr>'}} + cir.global external @type_info_B = #cir.typeinfo<{ #cir.global_view<@_ZTVN10__cxxabiv120__si_class_type_infoE, [2]> : !cir.ptr}> : !cir.struct}> -} // expected-error {{'cir.global' expected constant attribute to match type}} +} // ----- From 8cfb891d855ff87ddcb46b720dddff6a39d752be Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 Apr 2024 18:45:46 -0700 Subject: [PATCH 1529/2301] [CIR][CIRGen] Add more testcases for atomic xchg/cmp_xchg --- clang/test/CIR/CodeGen/atomic.cpp | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 25377616c188..03fbe80cced5 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -245,4 +245,31 @@ void fd3(struct S *a, struct S *b, struct S *c) { // LLVM-NEXT: [[LOAD_C_PTR:%.*]] = load ptr, ptr [[C_ADDR]] // LLVM-NEXT: [[LOAD_B:%.*]] = load i64, ptr [[LOAD_B_PTR]] // LLVM-NEXT: [[RESULT:%.*]] = atomicrmw xchg ptr [[LOAD_A_PTR]], i64 [[LOAD_B]] seq_cst -// LLVM-NEXT: store i64 [[RESULT]], ptr [[LOAD_C_PTR]] \ No newline at end of file +// LLVM-NEXT: store i64 [[RESULT]], ptr [[LOAD_C_PTR]] + +bool fi4a(int *i) { + int cmp = 0; + int desired = 1; + return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire); +} + +// CHECK-LABEL: @_Z4fi4aPi +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : , {{.*}} : !cir.ptr, success = acquire, failure = acquire) : !cir.bool + +// LLVM-LABEL: @_Z4fi4aPi +// LLVM: %[[RES:.*]] = cmpxchg ptr %7, i32 %8, i32 %9 acquire acquire, align 4 +// LLVM: extractvalue { i32, i1 } %[[RES]], 0 +// LLVM: extractvalue { i32, i1 } %[[RES]], 1 + +bool fi4b(int *i) { + int cmp = 0; + return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire); +} + +// CHECK-LABEL: @_Z4fi4bPi +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : , {{.*}} : !cir.ptr, success = acquire, failure = acquire) weak : !cir.bool + +// LLVM-LABEL: @_Z4fi4bPi +// LLVM: %[[R:.*]] = cmpxchg weak ptr {{.*}}, i32 {{.*}}, i32 {{.*}} acquire acquire, align 4 +// LLVM: extractvalue { i32, i1 } %[[R]], 0 +// LLVM: extractvalue { i32, i1 } %[[R]], 1 \ No newline at end of file From 3e48f3d20abcb7eed412791d407cbe5ee883a4c7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 Apr 2024 18:58:18 -0700 Subject: [PATCH 1530/2301] [CIR][CIRGen] Atomics: handle atomic_compare_exchange_strong --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 3 ++- clang/test/CIR/CodeGen/atomic.cpp | 13 ++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 2e736206c775..d5799ddf0e93 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -416,7 +416,8 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__c11_atomic_compare_exchange_strong: case AtomicExpr::AO__hip_atomic_compare_exchange_strong: case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: - llvm_unreachable("NYI"); + buildAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Order, Scope); return; case AtomicExpr::AO__c11_atomic_compare_exchange_weak: case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 03fbe80cced5..297fced001a0 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -272,4 +272,15 @@ bool fi4b(int *i) { // LLVM-LABEL: @_Z4fi4bPi // LLVM: %[[R:.*]] = cmpxchg weak ptr {{.*}}, i32 {{.*}}, i32 {{.*}} acquire acquire, align 4 // LLVM: extractvalue { i32, i1 } %[[R]], 0 -// LLVM: extractvalue { i32, i1 } %[[R]], 1 \ No newline at end of file +// LLVM: extractvalue { i32, i1 } %[[R]], 1 + +bool fi4c(atomic_int *i) { + int cmp = 0; + return atomic_compare_exchange_strong(i, &cmp, 1); +} + +// CHECK-LABEL: @_Z4fi4cPU7_Atomici +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : , {{.*}} : !cir.ptr, success = seq_cst, failure = seq_cst) : !cir.bool + +// LLVM-LABEL: @_Z4fi4cPU7_Atomici +// LLVM: cmpxchg ptr {{.*}}, i32 {{.*}}, i32 {{.*}} seq_cst seq_cst, align 4 \ No newline at end of file From f40c0f0936a8ab7a767e4f14b6df11e7f0d9bced Mon Sep 17 00:00:00 2001 From: zhoujingya <104264072+zhoujingya@users.noreply.github.com> Date: Sat, 27 Apr 2024 01:24:27 +0800 Subject: [PATCH 1531/2301] [CIR][Lowering] Add long double types for cos operation lowering (#568) Add left long double types lowering for cos operation #565 --------- Signed-off-by: zhoujing --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 10 +++++-- clang/test/CIR/Lowering/ThroughMLIR/cos.cir | 30 ++++++++++++------- 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index d7eacb6918c3..11ce1d9ac631 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -167,10 +167,8 @@ class CIRConstantOpLowering auto boolValue = mlir::cast(op.getValue()); value = rewriter.getIntegerAttr(ty, boolValue.getValue()); } else if (op.getType().isa()) { - assert(ty.isF32() || ty.isF64() && "NYI"); value = rewriter.getFloatAttr( - typeConverter->convertType(op.getType()), - op.getValue().cast().getValue()); + ty, op.getValue().cast().getValue()); } else { auto cirIntAttr = mlir::dyn_cast(op.getValue()); assert(cirIntAttr && "NYI non cir.int attr"); @@ -664,6 +662,12 @@ static mlir::TypeConverter prepareTypeConverter() { converter.addConversion([&](mlir::cir::DoubleType type) -> mlir::Type { return mlir::Float64Type::get(type.getContext()); }); + converter.addConversion([&](mlir::cir::FP80Type type) -> mlir::Type { + return mlir::Float80Type::get(type.getContext()); + }); + converter.addConversion([&](mlir::cir::LongDoubleType type) -> mlir::Type { + return converter.convertType(type.getUnderlying()); + }); converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { auto elementType = converter.convertType(type.getEltType()); return mlir::MemRefType::get(type.getSize(), elementType); diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cos.cir b/clang/test/CIR/Lowering/ThroughMLIR/cos.cir index 05e913866202..0530d3cb19e8 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/cos.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/cos.cir @@ -5,18 +5,26 @@ module { cir.func @foo() { %1 = cir.const(#cir.fp<1.0> : !cir.float) : !cir.float %2 = cir.const(#cir.fp<1.0> : !cir.double) : !cir.double - %3 = cir.cos %1 : !cir.float - %4 = cir.cos %2 : !cir.double + %3 = cir.const(#cir.fp<1.0> : !cir.long_double) : !cir.long_double + %4 = cir.const(#cir.fp<1.0> : !cir.long_double) : !cir.long_double + %5 = cir.cos %1 : !cir.float + %6 = cir.cos %2 : !cir.double + %7 = cir.cos %3 : !cir.long_double + %8 = cir.cos %4 : !cir.long_double cir.return } } -//CHECK: module { -//CHECK: func.func @foo() { -//CHECK: %cst = arith.constant 1.000000e+00 : f32 -//CHECK: %cst_0 = arith.constant 1.000000e+00 : f64 -//CHECK: %0 = math.cos %cst : f32 -//CHECK: %1 = math.cos %cst_0 : f64 -//CHECK: return -//CHECK: } -//CHECK: } +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %[[C0:.+]] = arith.constant 1.000000e+00 : f32 +// CHECK-NEXT: %[[C1:.+]] = arith.constant 1.000000e+00 : f64 +// CHECK-NEXT: %[[C2:.+]] = arith.constant 1.000000e+00 : f80 +// CHECK-NEXT: %[[C3:.+]] = arith.constant 1.000000e+00 : f64 +// CHECK-NEXT: %{{.+}} = math.cos %[[C0]] : f32 +// CHECK-NEXT: %{{.+}} = math.cos %[[C1]] : f64 +// CHECK-NEXT: %{{.+}} = math.cos %[[C2]] : f80 +// CHECK-NEXT: %{{.+}} = math.cos %[[C3]] : f64 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } From 4d7428edadbd6a3946a05f4fed9e055f510a5e08 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 26 Apr 2024 13:00:35 -0700 Subject: [PATCH 1532/2301] [CIR][CIRGen] Atomics: make cir.atomic.cmp_xchg a bit more lower level --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 8 ++-- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 24 ++++++---- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 45 ++----------------- clang/test/CIR/CodeGen/atomic-xchg-field.c | 14 +++--- clang/test/CIR/CodeGen/atomic.cpp | 20 +++++++-- 5 files changed, 49 insertions(+), 62 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 41c20d48e4ce..5d508259e600 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3744,9 +3744,9 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", [Pure]> { C/C++ Atomic compare and exchange. Example: }]; - let results = (outs CIR_BoolType:$cmp); - let arguments = (ins CIR_PointerType:$ptr, - CIR_PointerType:$expected, + let results = (outs CIR_AnyType:$old, CIR_BoolType:$cmp); + let arguments = (ins CIR_AnyType:$ptr, + CIR_AnyType:$expected, CIR_AnyType:$desired, Arg:$succ_order, Arg:$fail_order, @@ -3763,7 +3763,7 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", [Pure]> { `)` (`weak` $weak^)? (`volatile` $is_volatile^)? - `:` type($cmp) attr-dict + `:` `(` type($old) `,` type($cmp) `)` attr-dict }]; let hasVerifier = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index d5799ddf0e93..aa88b4bedf17 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -331,19 +331,27 @@ static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, llvm::SyncScope::ID Scope) { auto &builder = CGF.getBuilder(); auto loc = CGF.getLoc(E->getSourceRange()); - mlir::Value Expected = Val1.getPointer(); - mlir::Value Desired = Val2.getPointer(); - - // The approach here is a bit different from traditional LLVM codegen: we pass - // the pointers instead, and let the operation hide the details of storing the - // old value into expected in case of failure (handled during LLVM lowering). + auto Expected = builder.createLoad(loc, Val1); + auto Desired = builder.createLoad(loc, Val2); auto boolTy = builder.getBoolTy(); auto cmpxchg = builder.create( - loc, boolTy, Ptr.getPointer(), Expected, Desired, SuccessOrder, - FailureOrder); + loc, Expected.getType(), boolTy, Ptr.getPointer(), Expected, Desired, + SuccessOrder, FailureOrder); cmpxchg.setIsVolatile(E->isVolatile()); cmpxchg.setWeak(IsWeak); + auto cmp = builder.createNot(cmpxchg.getCmp()); + builder.create( + loc, cmp, false, [&](mlir::OpBuilder &, mlir::Location) { + auto ptrTy = Val1.getPointer().getType().cast(); + if (Val1.getElementType() != ptrTy.getPointee()) { + Val1 = Val1.withPointer(builder.createPtrBitcast( + Val1.getPointer(), Val1.getElementType())); + } + builder.createStore(loc, cmpxchg.getOld(), Val1); + builder.createYield(loc); + }); + // Update the memory at Dest with Cmp's value. CGF.buildStoreOfScalar(cmpxchg.getCmp(), CGF.makeAddrLValue(Dest, E->getType())); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f8c97067e810..015a2ddf58fe 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2238,23 +2238,8 @@ class CIRAtomicCmpXchgLowering mlir::LogicalResult matchAndRewrite(mlir::cir::AtomicCmpXchg op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - // This basic block is the exit point of the operation, we should end up - // here regardless of whether or not the operation succeeded. - mlir::Block *continueBB = nullptr; - { - mlir::OpBuilder::InsertionGuard guard(rewriter); - continueBB = rewriter.splitBlock(rewriter.getInsertionBlock(), - std::next(op->getIterator())); - } - - const auto llvmTy = getTypeConverter()->convertType( - op.getExpected().getType().cast().getPointee()); - auto expected = rewriter.create(op.getLoc(), llvmTy, - adaptor.getExpected(), - /*alignment=*/0); - auto desired = rewriter.create(op.getLoc(), llvmTy, - adaptor.getDesired(), - /*alignment=*/0); + auto expected = adaptor.getExpected(); + auto desired = adaptor.getDesired(); // FIXME: add syncscope. auto cmpxchg = rewriter.create( @@ -2270,33 +2255,9 @@ class CIRAtomicCmpXchgLowering auto cmp = rewriter.create( op.getLoc(), cmpxchg.getResult(), 1); - // This basic block is used to hold the store instruction if the operation - // failed. Create it here and populate CondBrOp. - mlir::Block *storeExpectedBB = nullptr; - { - mlir::OpBuilder::InsertionGuard guard(rewriter); - storeExpectedBB = rewriter.createBlock(cmpxchg->getParentRegion()); - } - - rewriter.create(op.getLoc(), cmp, continueBB, - storeExpectedBB, mlir::ValueRange{}); - - // Fill in storeExpectedBB - rewriter.setInsertionPoint(storeExpectedBB, storeExpectedBB->begin()); - rewriter.create(op.getLoc(), old, - adaptor.getExpected(), - /*alignment=*/0, - /* volatile */ false, - /* nontemporal */ false); - rewriter.create(op.getLoc(), continueBB); - - // Fill in continueBB - // Zero-extend the cmp result so it matches the bool type on the other - // side. - rewriter.setInsertionPoint(continueBB, continueBB->begin()); auto extCmp = rewriter.create( op.getLoc(), rewriter.getI8Type(), cmp); - rewriter.replaceOp(op, extCmp); + rewriter.replaceOp(op, {old, extCmp}); return mlir::success(); } }; diff --git a/clang/test/CIR/CodeGen/atomic-xchg-field.c b/clang/test/CIR/CodeGen/atomic-xchg-field.c index 1309023e1c4a..5325c0d98388 100644 --- a/clang/test/CIR/CodeGen/atomic-xchg-field.c +++ b/clang/test/CIR/CodeGen/atomic-xchg-field.c @@ -47,7 +47,7 @@ void structAtomicExchange(unsigned referenceCount, wPtr item) { } // CHECK-LABEL: @structAtomicExchange -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : , {{.*}} : !cir.ptr, success = seq_cst, failure = seq_cst) weak : !cir.bool +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u32i, {{.*}} : !u32i, success = seq_cst, failure = seq_cst) weak : (!u32i, !cir.bool) // LLVM-LABEL: @structAtomicExchange // LLVM: load i32 @@ -58,15 +58,19 @@ void structAtomicExchange(unsigned referenceCount, wPtr item) { // LLVM: %[[RES:.*]] = cmpxchg weak ptr %9, i32 %[[EXP]], i32 %[[DES]] seq_cst seq_cst // LLVM: %[[OLD:.*]] = extractvalue { i32, i1 } %[[RES]], 0 // LLVM: %[[CMP:.*]] = extractvalue { i32, i1 } %[[RES]], 1 -// LLVM: br i1 %[[CMP]], label %[[CONTINUE:.*]], label %[[STORE_OLD:.*]], -// LLVM: [[CONTINUE]]: -// LLVM: zext i1 %[[CMP]] to i8 -// LLVM: ret void +// LLVM: %[[Z:.*]] = zext i1 %[[CMP]] to i8, !dbg !16 +// LLVM: %[[X:.*]] = xor i8 %[[Z]], 1, !dbg !16 +// LLVM: %[[FAIL:.*]] = trunc i8 %[[X]] to i1, !dbg !16 +// LLVM: br i1 %[[FAIL:.*]], label %[[STORE_OLD:.*]], label %[[CONTINUE:.*]], // LLVM: [[STORE_OLD]]: // LLVM: store i32 %[[OLD]], ptr // LLVM: br label %[[CONTINUE]] +// LLVM: [[CONTINUE]]: +// LLVM: store i8 %[[Z]], ptr {{.*}}, align 1 +// LLVM: ret void + void f2(const void *cf); void structLoad(unsigned referenceCount, wPtr item) { diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 297fced001a0..b2478d4ff78c 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -247,6 +247,16 @@ void fd3(struct S *a, struct S *b, struct S *c) { // LLVM-NEXT: [[RESULT:%.*]] = atomicrmw xchg ptr [[LOAD_A_PTR]], i64 [[LOAD_B]] seq_cst // LLVM-NEXT: store i64 [[RESULT]], ptr [[LOAD_C_PTR]] +bool fd4(struct S *a, struct S *b, struct S *c) { + return __atomic_compare_exchange(a, b, c, 1, 5, 5); +} + +// CHECK-LABEL: @_Z3fd4P1SS0_S0_ +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) weak : (!u64i, !cir.bool) + +// LLVM-LABEL: @_Z3fd4P1SS0_S0_ +// LLVM: cmpxchg weak ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 + bool fi4a(int *i) { int cmp = 0; int desired = 1; @@ -254,7 +264,7 @@ bool fi4a(int *i) { } // CHECK-LABEL: @_Z4fi4aPi -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : , {{.*}} : !cir.ptr, success = acquire, failure = acquire) : !cir.bool +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) : (!s32i, !cir.bool) // LLVM-LABEL: @_Z4fi4aPi // LLVM: %[[RES:.*]] = cmpxchg ptr %7, i32 %8, i32 %9 acquire acquire, align 4 @@ -267,7 +277,7 @@ bool fi4b(int *i) { } // CHECK-LABEL: @_Z4fi4bPi -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : , {{.*}} : !cir.ptr, success = acquire, failure = acquire) weak : !cir.bool +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) weak : (!s32i, !cir.bool) // LLVM-LABEL: @_Z4fi4bPi // LLVM: %[[R:.*]] = cmpxchg weak ptr {{.*}}, i32 {{.*}}, i32 {{.*}} acquire acquire, align 4 @@ -280,7 +290,11 @@ bool fi4c(atomic_int *i) { } // CHECK-LABEL: @_Z4fi4cPU7_Atomici -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : , {{.*}} : !cir.ptr, success = seq_cst, failure = seq_cst) : !cir.bool +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = seq_cst, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %[[CMP:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[CMP:.*]] { +// CHECK: cir.store %old, {{.*}} : !s32i, cir.ptr +// CHECK: } // LLVM-LABEL: @_Z4fi4cPU7_Atomici // LLVM: cmpxchg ptr {{.*}}, i32 {{.*}}, i32 {{.*}} seq_cst seq_cst, align 4 \ No newline at end of file From b9561f4bce76bfbfaa03e1327096376713077097 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 26 Apr 2024 14:39:04 -0700 Subject: [PATCH 1533/2301] [CIR][CIRGen] Atomics: add one more testcase --- clang/test/CIR/CodeGen/atomic.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index b2478d4ff78c..bcb802561580 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -297,4 +297,14 @@ bool fi4c(atomic_int *i) { // CHECK: } // LLVM-LABEL: @_Z4fi4cPU7_Atomici -// LLVM: cmpxchg ptr {{.*}}, i32 {{.*}}, i32 {{.*}} seq_cst seq_cst, align 4 \ No newline at end of file +// LLVM: cmpxchg ptr {{.*}}, i32 {{.*}}, i32 {{.*}} seq_cst seq_cst, align 4 + +bool fsb(bool *c) { + return __atomic_exchange_n(c, 1, memory_order_seq_cst); +} + +// CHECK-LABEL: @_Z3fsbPb +// CHECK: cir.atomic.xchg({{.*}} : !cir.ptr, {{.*}} : !u8i, seq_cst) : !u8i + +// LLVM-LABEL: @_Z3fsbPb +// LLVM: atomicrmw xchg ptr {{.*}}, i8 {{.*}} seq_cst, align 1 \ No newline at end of file From daf3993312d90809ca2280064939ba20822b7356 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 26 Apr 2024 15:05:02 -0700 Subject: [PATCH 1534/2301] [CIR][CIRGen] Atomics: improve docs and constraints --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 42 +++++++++++++++----- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 5d508259e600..358cc42b4dec 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3678,12 +3678,12 @@ def AtomicFetchKind : I32EnumAttr< } def AtomicFetch : CIR_Op<"atomic.fetch", - [Pure, SameSecondOperandAndResultType]> { + [AllTypesMatch<["result", "val"]>]> { let summary = "Atomic fetch with unary and binary operations"; let description = [{ - Represents `__atomic_binop_fetch` and `__atomic_fetch_binop` builtins, + Represents `__atomic__fetch` and `__atomic_fetch_` builtins, where `binop` is on of the binary opcodes : `add`, `sub`, `and`, `xor`, - `or` and `nand`. + `or`, `nand`, `max` and `min`. `ptr` is an integer or fp pointer, followed by `val`, which must be an integer or fp (only supported for `add` and `sub`). The operation @@ -3693,9 +3693,14 @@ def AtomicFetch : CIR_Op<"atomic.fetch", `__atomic_fetch_binop` and returns the value that had previously been in *ptr, otherwise it returns the final result of the computation (`__atomic_binop_fetch`). + + Example: + %res = cir.atomic.fetch(add, %ptr : !cir.ptr, + %val : !s32i, seq_cst) : !s32i }]; let results = (outs CIR_AnyIntOrFloat:$result); - let arguments = (ins PrimitiveIntOrFPPtr:$ptr, CIR_AnyIntOrFloat:$val, + let arguments = (ins Arg:$ptr, + CIR_AnyIntOrFloat:$val, AtomicFetchKind:$binop, Arg:$mem_order, UnitAttr:$is_volatile, @@ -3715,14 +3720,19 @@ def AtomicFetch : CIR_Op<"atomic.fetch", let hasVerifier = 1; } -def AtomicXchg : CIR_Op<"atomic.xchg", [Pure, SameSecondOperandAndResultType]> { +def AtomicXchg : CIR_Op<"atomic.xchg", [AllTypesMatch<["result", "val"]>]> { let summary = "Atomic exchange"; let description = [{ - Atomic exchange functionality mapped from different use of builtins in - C/C++. + Atomic exchange operations. Implements C/C++ builtins such as + `__atomic_exchange`and `__atomic_exchange_n`. + + Example: + %res = cir.atomic.xchg(%ptr : !cir.ptr, + %val : !u64i, seq_cst) : !u64i }]; let results = (outs CIR_AnyType:$result); - let arguments = (ins CIR_PointerType:$ptr, CIR_AnyType:$val, + let arguments = (ins Arg:$ptr, + CIR_AnyType:$val, Arg:$mem_order, UnitAttr:$is_volatile); @@ -3738,14 +3748,24 @@ def AtomicXchg : CIR_Op<"atomic.xchg", [Pure, SameSecondOperandAndResultType]> { let hasVerifier = 0; } -def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", [Pure]> { +def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", + [AllTypesMatch<["old", "expected", "desired"]>]> { let summary = "Atomic compare exchange"; let description = [{ - C/C++ Atomic compare and exchange. Example: + C/C++ Atomic compare and exchange operation. Implements builtins like + `__atomic_compare_exchange_n` and `__atomic_compare_exchange`. + + Example: + %old, %cmp = cir.atomic.cmp_xchg(%ptr : !cir.ptr, + %expected : !u64i, + %desired : !u64i, + success = seq_cst, + failure = seq_cst) weak + : (!u64i, !cir.bool) }]; let results = (outs CIR_AnyType:$old, CIR_BoolType:$cmp); - let arguments = (ins CIR_AnyType:$ptr, + let arguments = (ins Arg:$ptr, CIR_AnyType:$expected, CIR_AnyType:$desired, Arg:$succ_order, From d8acc1c1f90dc16b6a1e1e3d2f364ebe36628dcd Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 27 Apr 2024 01:04:56 -0400 Subject: [PATCH 1535/2301] [CIR][CodeGen] Fix a usage of volatile -> volatileQualified --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 188969798cbd..6f143812858e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1949,7 +1949,7 @@ mlir::Value ScalarExprEmitter::buildCompoundAssign( return RHS; // If the lvalue is non-volatile, return the computed value of the assignment. - if (!LHS.isVolatile()) + if (!LHS.isVolatileQualified()) return RHS; // Otherwise, reload the value. From 9b53c30eed17f03b114ae729dba20e0733f5cd38 Mon Sep 17 00:00:00 2001 From: ShivaChen <32083954+ShivaChen@users.noreply.github.com> Date: Tue, 30 Apr 2024 12:12:10 +0800 Subject: [PATCH 1536/2301] [CIR] Support lowering GlobalOp and GetGlobalOp to memref (#574) This commit introduce CIRGlobalOpLowering and CIRGetGlobalOpLowering for lowering to memref. --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 108 +++++++++++++++++- .../test/CIR/Lowering/ThroughMLIR/global.cir | 55 +++++++++ 2 files changed, 159 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/global.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 11ce1d9ac631..fcfd572b8480 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -620,6 +620,95 @@ class CIRYieldOpLowering } }; +class CIRGlobalOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + mlir::LogicalResult + matchAndRewrite(mlir::cir::GlobalOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto moduleOp = op->getParentOfType(); + if (!moduleOp) + return mlir::failure(); + + mlir::OpBuilder b(moduleOp.getContext()); + + const auto CIRSymType = op.getSymType(); + auto convertedType = getTypeConverter()->convertType(CIRSymType); + if (!convertedType) + return mlir::failure(); + auto memrefType = dyn_cast(convertedType); + if (!memrefType) + memrefType = mlir::MemRefType::get({}, convertedType); + // Add an optional alignment to the global memref. + mlir::IntegerAttr memrefAlignment = + op.getAlignment() + ? mlir::IntegerAttr::get(b.getI64Type(), op.getAlignment().value()) + : mlir::IntegerAttr(); + // Add an optional initial value to the global memref. + mlir::Attribute initialValue = mlir::Attribute(); + std::optional init = op.getInitialValue(); + if (init.has_value()) { + if (auto constArr = init.value().dyn_cast()) { + if (memrefType.getShape().size()) { + auto rtt = mlir::RankedTensorType::get(memrefType.getShape(), + memrefType.getElementType()); + initialValue = mlir::DenseIntElementsAttr::get(rtt, 0); + } else { + auto rtt = mlir::RankedTensorType::get({}, convertedType); + initialValue = mlir::DenseIntElementsAttr::get(rtt, 0); + } + } else if (auto intAttr = init.value().dyn_cast()) { + auto rtt = mlir::RankedTensorType::get({}, convertedType); + initialValue = mlir::DenseIntElementsAttr::get(rtt, intAttr.getValue()); + } else if (auto fltAttr = init.value().dyn_cast()) { + auto rtt = mlir::RankedTensorType::get({}, convertedType); + initialValue = mlir::DenseFPElementsAttr::get(rtt, fltAttr.getValue()); + } else if (auto boolAttr = init.value().dyn_cast()) { + auto rtt = mlir::RankedTensorType::get({}, convertedType); + initialValue = + mlir::DenseIntElementsAttr::get(rtt, (char)boolAttr.getValue()); + } else + llvm_unreachable( + "GlobalOp lowering with initial value is not fully supported yet"); + } + + // Add symbol visibility + std::string sym_visibility = op.isPrivate() ? "private" : "public"; + + rewriter.replaceOpWithNewOp( + op, b.getStringAttr(op.getSymName()), + /*sym_visibility=*/b.getStringAttr(sym_visibility), + /*type=*/memrefType, initialValue, + /*constant=*/op.getConstant(), + /*alignment=*/memrefAlignment); + + return mlir::success(); + } +}; + +class CIRGetGlobalOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::GetGlobalOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // FIXME(cir): Premature DCE to avoid lowering stuff we're not using. + // CIRGen should mitigate this and not emit the get_global. + if (op->getUses().empty()) { + rewriter.eraseOp(op); + return mlir::success(); + } + + auto type = getTypeConverter()->convertType(op.getType()); + auto symbol = op.getName(); + rewriter.replaceOpWithNewOp(op, type, symbol); + return mlir::success(); + } +}; + void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -628,8 +717,8 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, CIRBinOpLowering, CIRLoadOpLowering, CIRConstantOpLowering, CIRStoreOpLowering, CIRAllocaOpLowering, CIRFuncOpLowering, CIRScopeOpLowering, CIRBrCondOpLowering, CIRTernaryOpLowering, - CIRYieldOpLowering, CIRCosOpLowering>(converter, - patterns.getContext()); + CIRYieldOpLowering, CIRCosOpLowering, CIRGlobalOpLowering, + CIRGetGlobalOpLowering>(converter, patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { @@ -639,6 +728,8 @@ static mlir::TypeConverter prepareTypeConverter() { // FIXME: The pointee type might not be converted (e.g. struct) if (!ty) return nullptr; + if (isa(type.getPointee())) + return ty; return mlir::MemRefType::get({}, ty); }); converter.addConversion( @@ -669,8 +760,17 @@ static mlir::TypeConverter prepareTypeConverter() { return converter.convertType(type.getUnderlying()); }); converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { - auto elementType = converter.convertType(type.getEltType()); - return mlir::MemRefType::get(type.getSize(), elementType); + SmallVector shape; + mlir::Type curType = type; + while (auto arrayType = dyn_cast(curType)) { + shape.push_back(arrayType.getSize()); + curType = arrayType.getEltType(); + } + auto elementType = converter.convertType(curType); + // FIXME: The element type might not be converted (e.g. struct) + if (!elementType) + return nullptr; + return mlir::MemRefType::get(shape, elementType); }); return converter; diff --git a/clang/test/CIR/Lowering/ThroughMLIR/global.cir b/clang/test/CIR/Lowering/ThroughMLIR/global.cir new file mode 100644 index 000000000000..3b1ed83239c6 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/global.cir @@ -0,0 +1,55 @@ +// RUN: cir-opt %s -cir-to-mlir | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!u32i = !cir.int +module { + cir.global external @i = #cir.int<2> : !u32i + cir.global external @f = #cir.fp<3.000000e+00> : !cir.float + cir.global external @b = #cir.bool : !cir.bool + cir.global "private" external @a : !cir.array + cir.global external @aa = #cir.zero : !cir.array x 256> + + cir.func @get_global_int_value() -> !u32i { + %0 = cir.get_global @i : cir.ptr + %1 = cir.load %0 : cir.ptr , !u32i + cir.return %1 : !u32i + } + cir.func @get_global_float_value() -> !cir.float { + %0 = cir.get_global @f : cir.ptr + %1 = cir.load %0 : cir.ptr , !cir.float + cir.return %1 : !cir.float + } + cir.func @get_global_bool_value() -> !cir.bool { + %0 = cir.get_global @b : cir.ptr + %1 = cir.load %0 : cir.ptr , !cir.bool + cir.return %1 : !cir.bool + } + cir.func @get_global_array_pointer() -> !cir.ptr> { + %0 = cir.get_global @a : cir.ptr > + cir.return %0 : !cir.ptr> + } + cir.func @get_global_multi_array_pointer() -> !cir.ptr x 256>> { + %0 = cir.get_global @aa : cir.ptr x 256>> + cir.return %0 : !cir.ptr x 256>> + } +} + +// MLIR: memref.global "public" @i : memref = dense<2> +// MLIR: memref.global "public" @f : memref = dense<3.000000e+00> +// MLIR: memref.global "public" @b : memref = dense<1> +// MLIR: memref.global "private" @a : memref<100xi32> +// MLIR: memref.global "public" @aa : memref<256x256xi32> = dense<0> +// MLIR: memref.get_global @i : memref +// MLIR: memref.get_global @f : memref +// MLIR: memref.get_global @b : memref +// MLIR: memref.get_global @a : memref<100xi32> +// MLIR: memref.get_global @aa : memref<256x256xi32> + +// LLVM: @i = global i32 2 +// LLVM: @f = global float 3.000000e+00 +// LLVM: @b = global i8 1 +// LLVM: @a = private global [100 x i32] undef +// LLVM: @aa = global [256 x [256 x i32]] zeroinitializer +// LLVM: load i32, ptr @i +// LLVM: load float, ptr @f +// LLVM: load i8, ptr @b From 48ae319608c57556ae98c1266ba124532e415106 Mon Sep 17 00:00:00 2001 From: Johannes de Fine Licht Date: Wed, 1 May 2024 19:21:54 +0200 Subject: [PATCH 1537/2301] [CIR][NFC] Homogenize printing/parsing of CIR_PointerType (#575) This PR relegates the responsibility of printing/parsing CIR_PointerType back to the type itself, getting rid of explicit `cir.ptr` tokens in the assembly format of CIR operations. This means that CIR pointers would now always be printed as `!cir.ptr`, so update all tests that had a space before the bracket (i.e., `!cir.ptr `) or missing the type alias prefix (`cir.ptr` instead of `!cir.ptr`). --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 68 +++++++-------- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 4 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 17 ---- clang/test/CIR/CodeGen/String.cpp | 34 ++++---- clang/test/CIR/CodeGen/agg-copy.c | 52 ++++++------ clang/test/CIR/CodeGen/agg-init.cpp | 18 ++-- clang/test/CIR/CodeGen/agg-init2.cpp | 4 +- clang/test/CIR/CodeGen/array-init-destroy.cpp | 10 +-- clang/test/CIR/CodeGen/array-init.c | 58 ++++++------- .../test/CIR/CodeGen/array-unknown-bound.cpp | 4 +- clang/test/CIR/CodeGen/array.c | 6 +- clang/test/CIR/CodeGen/array.cpp | 30 +++---- clang/test/CIR/CodeGen/asm.c | 42 +++++----- clang/test/CIR/CodeGen/assign-operator.cpp | 42 +++++----- clang/test/CIR/CodeGen/atomic-xchg-field.c | 6 +- clang/test/CIR/CodeGen/atomic.cpp | 14 ++-- clang/test/CIR/CodeGen/basic.c | 32 ++++---- clang/test/CIR/CodeGen/basic.cpp | 68 +++++++-------- clang/test/CIR/CodeGen/binassign.cpp | 6 +- clang/test/CIR/CodeGen/bitfields.c | 10 +-- clang/test/CIR/CodeGen/bitfields.cpp | 6 +- clang/test/CIR/CodeGen/bitint.c | 14 ++-- clang/test/CIR/CodeGen/bitint.cpp | 10 +-- clang/test/CIR/CodeGen/bool.c | 22 ++--- clang/test/CIR/CodeGen/builtin-alloca.c | 18 ++-- .../CodeGen/builtin-constant-evaluated.cpp | 6 +- clang/test/CIR/CodeGen/builtin-constant-p.c | 10 +-- clang/test/CIR/CodeGen/builtin-ms-alloca.c | 6 +- clang/test/CIR/CodeGen/builtin-prefetch.c | 6 +- clang/test/CIR/CodeGen/call.c | 72 ++++++++-------- clang/test/CIR/CodeGen/call.cpp | 4 +- clang/test/CIR/CodeGen/cast.c | 12 +-- clang/test/CIR/CodeGen/cast.cpp | 28 +++---- clang/test/CIR/CodeGen/comma.cpp | 16 ++-- clang/test/CIR/CodeGen/compound-literal.c | 12 +-- clang/test/CIR/CodeGen/cond.cpp | 22 ++--- clang/test/CIR/CodeGen/const-array.c | 6 +- clang/test/CIR/CodeGen/const-bitfields.c | 4 +- clang/test/CIR/CodeGen/coro-task.cpp | 36 ++++---- clang/test/CIR/CodeGen/ctor-alias.cpp | 20 ++--- .../CodeGen/ctor-member-lvalue-to-rvalue.cpp | 12 +-- clang/test/CIR/CodeGen/ctor.cpp | 14 ++-- clang/test/CIR/CodeGen/derived-to-base.cpp | 44 +++++----- clang/test/CIR/CodeGen/dtors-scopes.cpp | 2 +- clang/test/CIR/CodeGen/dtors.cpp | 10 +-- clang/test/CIR/CodeGen/dynamic-cast.cpp | 8 +- clang/test/CIR/CodeGen/evaluate-expr.c | 6 +- clang/test/CIR/CodeGen/expressions.cpp | 2 +- clang/test/CIR/CodeGen/forward-decls.cpp | 6 +- clang/test/CIR/CodeGen/fullexpr.cpp | 8 +- clang/test/CIR/CodeGen/fun-ptr.c | 20 ++--- clang/test/CIR/CodeGen/globals.c | 2 +- clang/test/CIR/CodeGen/globals.cpp | 50 +++++------ clang/test/CIR/CodeGen/gnu-extension.c | 8 +- clang/test/CIR/CodeGen/goto.cpp | 28 +++---- clang/test/CIR/CodeGen/hello.c | 10 +-- clang/test/CIR/CodeGen/if-constexpr.cpp | 48 +++++------ clang/test/CIR/CodeGen/implicit-return.cpp | 4 +- clang/test/CIR/CodeGen/inc-bool.cpp | 8 +- clang/test/CIR/CodeGen/inc-dec.cpp | 16 ++-- clang/test/CIR/CodeGen/lalg.c | 14 ++-- clang/test/CIR/CodeGen/lambda.cpp | 62 +++++++------- clang/test/CIR/CodeGen/libcall.cpp | 14 ++-- clang/test/CIR/CodeGen/loop-scope.cpp | 12 +-- clang/test/CIR/CodeGen/loop.cpp | 44 +++++----- clang/test/CIR/CodeGen/lvalue-refs.cpp | 4 +- clang/test/CIR/CodeGen/new.cpp | 30 +++---- clang/test/CIR/CodeGen/no-proto-fun-ptr.c | 10 +-- clang/test/CIR/CodeGen/no-prototype.c | 8 +- clang/test/CIR/CodeGen/nrvo.cpp | 16 ++-- clang/test/CIR/CodeGen/packed-structs.c | 6 +- clang/test/CIR/CodeGen/pass-object-size.c | 2 +- clang/test/CIR/CodeGen/pointers.cpp | 10 +-- clang/test/CIR/CodeGen/predefined.cpp | 6 +- clang/test/CIR/CodeGen/ptr_diff.cpp | 4 +- clang/test/CIR/CodeGen/rangefor.cpp | 36 ++++---- clang/test/CIR/CodeGen/return.cpp | 12 +-- clang/test/CIR/CodeGen/scope.cir | 24 +++--- clang/test/CIR/CodeGen/sourcelocation.cpp | 30 +++---- clang/test/CIR/CodeGen/static-vars.c | 6 +- clang/test/CIR/CodeGen/static-vars.cpp | 6 +- clang/test/CIR/CodeGen/static.cpp | 24 +++--- clang/test/CIR/CodeGen/std-find.cpp | 2 +- clang/test/CIR/CodeGen/stmt-expr.c | 16 ++-- clang/test/CIR/CodeGen/stmt-expr.cpp | 4 +- clang/test/CIR/CodeGen/store.c | 6 +- clang/test/CIR/CodeGen/struct.c | 12 +-- clang/test/CIR/CodeGen/struct.cpp | 66 +++++++-------- clang/test/CIR/CodeGen/switch.cir | 30 +++---- clang/test/CIR/CodeGen/switch.cpp | 28 +++---- clang/test/CIR/CodeGen/ternary.cir | 24 +++--- clang/test/CIR/CodeGen/ternary.cpp | 26 +++--- .../test/CIR/CodeGen/three-way-comparison.cpp | 4 +- clang/test/CIR/CodeGen/throw.cpp | 4 +- clang/test/CIR/CodeGen/tls.c | 2 +- clang/test/CIR/CodeGen/try-catch.cpp | 14 ++-- clang/test/CIR/CodeGen/typedef.c | 2 +- clang/test/CIR/CodeGen/types-nullptr.cpp | 4 +- clang/test/CIR/CodeGen/unary-deref.cpp | 2 +- clang/test/CIR/CodeGen/unary.c | 10 +-- clang/test/CIR/CodeGen/unary.cpp | 50 +++++------ clang/test/CIR/CodeGen/union-init.c | 16 ++-- clang/test/CIR/CodeGen/union.cpp | 18 ++-- clang/test/CIR/CodeGen/vector.cpp | 16 ++-- clang/test/CIR/CodeGen/vectype.cpp | 8 +- clang/test/CIR/CodeGen/vla.c | 14 ++-- clang/test/CIR/CodeGen/vtable-rtti.cpp | 24 +++--- clang/test/CIR/IR/aliases.cir | 12 +-- clang/test/CIR/IR/alloca.cir | 4 +- clang/test/CIR/IR/array.cir | 4 +- clang/test/CIR/IR/cast.cir | 2 +- clang/test/CIR/IR/cir-ops.cir | 60 +++++++------- clang/test/CIR/IR/exceptions.cir | 4 +- clang/test/CIR/IR/func.cir | 2 +- clang/test/CIR/IR/global.cir | 14 ++-- clang/test/CIR/IR/invalid.cir | 22 ++--- clang/test/CIR/IR/ptr_stride.cir | 4 +- clang/test/CIR/IR/scope.cir | 2 +- clang/test/CIR/IR/struct.cir | 8 +- clang/test/CIR/IR/try.cir | 4 +- clang/test/CIR/IR/types.cir | 4 +- clang/test/CIR/Lowering/ThroughMLIR/array.cir | 2 +- .../CIR/Lowering/ThroughMLIR/binop-fp.cir | 60 +++++++------- .../ThroughMLIR/binop-unsigned-int.cir | 70 ++++++++-------- clang/test/CIR/Lowering/ThroughMLIR/bool.cir | 4 +- clang/test/CIR/Lowering/ThroughMLIR/cmp.cir | 58 ++++++------- clang/test/CIR/Lowering/ThroughMLIR/dot.cir | 14 ++-- .../test/CIR/Lowering/ThroughMLIR/global.cir | 16 ++-- clang/test/CIR/Lowering/ThroughMLIR/goto.cir | 12 +-- .../test/CIR/Lowering/ThroughMLIR/memref.cir | 6 +- clang/test/CIR/Lowering/ThroughMLIR/scope.cir | 4 +- .../test/CIR/Lowering/ThroughMLIR/tenary.cir | 12 +-- .../Lowering/ThroughMLIR/unary-inc-dec.cir | 16 ++-- .../Lowering/ThroughMLIR/unary-plus-minus.cir | 16 ++-- clang/test/CIR/Lowering/alloca.cir | 2 +- clang/test/CIR/Lowering/array.cir | 2 +- clang/test/CIR/Lowering/asm.cir | 4 +- clang/test/CIR/Lowering/binop-fp.cir | 60 +++++++------- clang/test/CIR/Lowering/binop-signed-int.cir | 70 ++++++++-------- .../test/CIR/Lowering/binop-unsigned-int.cir | 70 ++++++++-------- clang/test/CIR/Lowering/bool.cir | 4 +- clang/test/CIR/Lowering/cast.cir | 58 ++++++------- clang/test/CIR/Lowering/class.cir | 10 +-- clang/test/CIR/Lowering/cmp.cir | 58 ++++++------- clang/test/CIR/Lowering/const-array.cir | 2 +- clang/test/CIR/Lowering/const.cir | 8 +- clang/test/CIR/Lowering/dot.cir | 52 ++++++------ clang/test/CIR/Lowering/func.cir | 2 +- clang/test/CIR/Lowering/globals.cir | 82 +++++++++---------- clang/test/CIR/Lowering/goto.cir | 12 +-- clang/test/CIR/Lowering/hello.cir | 10 +-- clang/test/CIR/Lowering/int-wrap.cir | 6 +- clang/test/CIR/Lowering/loadstorealloca.cir | 12 +-- clang/test/CIR/Lowering/loops-with-break.cir | 58 ++++++------- .../test/CIR/Lowering/loops-with-continue.cir | 58 ++++++------- clang/test/CIR/Lowering/openmp.cir | 12 +-- clang/test/CIR/Lowering/ptrstride.cir | 8 +- clang/test/CIR/Lowering/scope.cir | 12 +-- clang/test/CIR/Lowering/struct.cir | 10 +-- clang/test/CIR/Lowering/switch.cir | 24 +++--- clang/test/CIR/Lowering/ternary.cir | 12 +-- clang/test/CIR/Lowering/unary-inc-dec.cir | 40 ++++----- clang/test/CIR/Lowering/unary-not.cir | 36 ++++---- clang/test/CIR/Lowering/unary-plus-minus.cir | 24 +++--- clang/test/CIR/Lowering/unions.cir | 4 +- clang/test/CIR/Lowering/variadics.cir | 14 ++-- clang/test/CIR/Transforms/lib-opt-find.cpp | 12 +-- clang/test/CIR/Transforms/merge-cleanups.cir | 42 +++++----- 168 files changed, 1620 insertions(+), 1649 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 358cc42b4dec..1991884b9ef5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -392,7 +392,7 @@ def AllocaOp : CIR_Op<"alloca", [ %0 = cir.alloca i32, !cir.ptr, ["count", init] {alignment = 4 : i64} // int *ptr; - %1 = cir.alloca !cir.ptr, cir.ptr >, ["ptr"] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr, !cir.ptr>, ["ptr"] {alignment = 8 : i64} ... ``` }]; @@ -434,10 +434,8 @@ def AllocaOp : CIR_Op<"alloca", [ bool isDynamic() { return (bool)getDynAllocSize(); } }]; - // FIXME: we should not be printing `cir.ptr` below, that should come - // from the pointer type directly. let assemblyFormat = [{ - $allocaType `,` `cir.ptr` type($addr) `,` + $allocaType `,` qualified(type($addr)) `,` ($dynAllocSize^ `:` type($dynAllocSize) `,`)? `[` $name (`,` `init` $init^)? @@ -474,7 +472,7 @@ def LoadOp : CIR_Op<"load", [ // Load address from memory at address %0. %3 is used by at least one // operation that dereferences a pointer. - %3 = cir.load deref %0 : cir.ptr > + %3 = cir.load deref %0 : !cir.ptr> // Perform a volatile load from address in %0. %4 = cir.load volatile %0 : !cir.ptr, i32 @@ -487,13 +485,11 @@ def LoadOp : CIR_Op<"load", [ OptionalAttr:$mem_order); let results = (outs CIR_AnyType:$result); - // FIXME: we should not be printing `cir.ptr` below, that should come - // from the pointer type directly. let assemblyFormat = [{ (`deref` $isDeref^)? (`volatile` $is_volatile^)? (`atomic` `(` $mem_order^ `)`)? - $addr `:` `cir.ptr` type($addr) `,` type($result) attr-dict + $addr `:` qualified(type($addr)) `,` type($result) attr-dict }]; // FIXME: add verifier. @@ -532,12 +528,10 @@ def StoreOp : CIR_Op<"store", [ UnitAttr:$is_volatile, OptionalAttr:$mem_order); - // FIXME: we should not be printing `cir.ptr` below, that should come - // from the pointer type directly. let assemblyFormat = [{ (`volatile` $is_volatile^)? (`atomic` `(` $mem_order^ `)`)? - $value `,` $addr attr-dict `:` type($value) `,` `cir.ptr` type($addr) + $value `,` $addr attr-dict `:` type($value) `,` qualified(type($addr)) }]; // FIXME: add verifier. @@ -999,7 +993,7 @@ def BinOp : CIR_Op<"binop", [Pure, UnitAttr:$no_signed_wrap); let assemblyFormat = [{ - `(` $kind `,` $lhs `,` $rhs `)` + `(` $kind `,` $lhs `,` $rhs `)` (`nsw` $no_signed_wrap^)? (`nuw` $no_unsigned_wrap^)? `:` type($lhs) attr-dict @@ -1871,11 +1865,9 @@ def GetGlobalOp : CIR_Op<"get_global", let arguments = (ins FlatSymbolRefAttr:$name, UnitAttr:$tls); let results = (outs Res:$addr); - // FIXME: we should not be printing `cir.ptr` below, that should come - // from the pointer type directly. let assemblyFormat = [{ (`thread_local` $tls^)? - $name `:` `cir.ptr` type($addr) attr-dict + $name `:` qualified(type($addr)) attr-dict }]; // `GetGlobalOp` is fully verified by its traits. @@ -1904,7 +1896,7 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", ```mlir cir.global linkonce_odr @_ZTV1B = ... ... - %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr i32>> + %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : !cir.ptr i32>> ``` }]; @@ -1914,8 +1906,6 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", I32Attr:$address_point_index); let results = (outs Res:$addr); - // FIXME: we should not be printing `cir.ptr` below, that should come - // from the pointer type directly. let assemblyFormat = [{ `(` ($name^)? @@ -1924,7 +1914,7 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", `vtable_index` `=` $vtable_index `,` `address_point_index` `=` $address_point_index `)` - `:` `cir.ptr` type($addr) attr-dict + `:` qualified(type($addr)) attr-dict }]; let hasVerifier = 1; @@ -1972,7 +1962,7 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { #bfi_d = #cir.bitfield_info %1 = cir.const(#cir.int<3> : !s32i) : !s32i - %2 = cir.load %0 : cir.ptr >, !cir.ptr + %2 = cir.load %0 : !cir.ptr>, !cir.ptr %3 = cir.get_member %2[1] {name = "d"} : !cir.ptr -> !cir.ptr %4 = cir.set_bitfield(#bfi_d, %3 : !cir.ptr, %1 : !s32i) -> !s32i ``` @@ -2051,7 +2041,7 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> { !struct_type = !cir.struct, !cir.int, !cir.int} #cir.record.decl.ast> #bfi_d = #cir.bitfield_info - %2 = cir.load %0 : cir.ptr >, !cir.ptr + %2 = cir.load %0 : !cir.ptr>, !cir.ptr %3 = cir.get_member %2[1] {name = "d"} : !cir.ptr -> !cir.ptr %4 = cir.get_bitfield(#bfi_d, %3 : !cir.ptr) -> !s32i ``` @@ -2453,12 +2443,10 @@ def BaseClassAddrOp : CIR_Op<"base_class_addr"> { let results = (outs Res:$base_addr); - // FIXME: we should not be printing `cir.ptr` below, that should come - // from the pointer type directly. let assemblyFormat = [{ `(` - $derived_addr `:` `cir.ptr` type($derived_addr) - `)` `->` `cir.ptr` type($base_addr) attr-dict + $derived_addr `:` qualified(type($derived_addr)) + `)` `->` qualified(type($base_addr)) attr-dict }]; // FIXME: add verifier. @@ -2752,7 +2740,7 @@ def TryCallOp : CIR_CallOp<"try_call"> { ```mlir cir.try { - %0 = cir.alloca !cir.ptr, cir.ptr > + %0 = cir.alloca !cir.ptr, !cir.ptr> ... %r = cir.try_call %exception(%0) @division(%1, %2) } ... @@ -3384,10 +3372,10 @@ def StackRestoreOp : CIR_Op<"stack_restore"> { Useful for implementing language features like variable length arrays. ```mlir - %0 = cir.alloca !cir.ptr, cir.ptr >, ["saved_stack"] {alignment = 8 : i64} + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["saved_stack"] {alignment = 8 : i64} %1 = cir.stack_save : - cir.store %1, %0 : !cir.ptr, cir.ptr > - %2 = cir.load %0 : cir.ptr >, !cir.ptr + cir.store %1, %0 : !cir.ptr, !cir.ptr> + %2 = cir.load %0 : !cir.ptr>, !cir.ptr cir.stack_restore %2 : !cir.ptr ``` }]; @@ -3410,16 +3398,16 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { let description = [{ The `cir.asm` operation represents C/C++ asm inline. - CIR constraints strings follow barelly the same rules that are established - for the C level assembler constraints with several differences caused by - clang::AsmStmt processing. + CIR constraints strings follow barelly the same rules that are established + for the C level assembler constraints with several differences caused by + clang::AsmStmt processing. Thus, numbers that appears in the constraint string may also refer to: - the output variable index referenced by the input operands. - the index of early-clobber operand Operand attributes is a storage, where each element corresponds to the operand with - the same index. The first index relates to the operation result (if any). + the same index. The first index relates to the operation result (if any). Note, the operands themselves are stored as VariadicOfVariadic in the next order: output, input and then in/out operands. @@ -3432,16 +3420,16 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { __asm__("bar $42 %[val]" : [val] "=r" (x), "+&r"(x)); __asm__("baz $42 %[val]" : [val] "=r" (x), "+&r"(x) : "[val]"(y)); ``` - + ```mlir !ty_22anon2E022 = !cir.struct, !cir.int}> !ty_22anon2E122 = !cir.struct, !cir.int}> ... - %0 = cir.alloca !s32i, cir.ptr , ["x", init] - %1 = cir.alloca !s32i, cir.ptr , ["y", init] - ... - %2 = cir.load %0 : cir.ptr , !s32i - %3 = cir.load %1 : cir.ptr , !s32i + %0 = cir.alloca !s32i, !cir.ptr, ["x", init] + %1 = cir.alloca !s32i, !cir.ptr, ["y", init] + ... + %2 = cir.load %0 : !cir.ptr, !s32i + %3 = cir.load %1 : !cir.ptr, !s32i cir.asm(x86_att, out = [], @@ -3484,7 +3472,7 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { "ArrayRef":$operand_attrs )> ]; - + let hasCustomAssemblyFormat = 1; } diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 82a12963b425..288cce02afce 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -127,7 +127,7 @@ def PrimitiveInt // FloatType //===----------------------------------------------------------------------===// -class CIR_FloatType +class CIR_FloatType : CIR_Type, @@ -196,7 +196,7 @@ def CIR_PointerType : CIR_Type<"Pointer", "ptr", let parameters = (ins "mlir::Type":$pointee); - let hasCustomAssemblyFormat = 1; + let assemblyFormat = "`<` $pointee `>`"; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 83d1b5202d7b..ec1b74caa0d2 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -96,23 +96,6 @@ void CIRDialect::printType(Type type, DialectAsmPrinter &os) const { }); } -Type PointerType::parse(mlir::AsmParser &parser) { - if (parser.parseLess()) - return Type(); - Type pointeeType; - if (parser.parseType(pointeeType)) - return Type(); - if (parser.parseGreater()) - return Type(); - return get(parser.getContext(), pointeeType); -} - -void PointerType::print(mlir::AsmPrinter &printer) const { - printer << "<"; - printer.printType(getPointee()); - printer << '>'; -} - Type BoolType::parse(mlir::AsmParser &parser) { return get(parser.getContext()); } diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index 576eed964761..fd87f3f287b1 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -23,16 +23,16 @@ void test() { // CHECK-NEXT: %1 = cir.load %0 // CHECK-NEXT: %2 = cir.get_member %1[0] {name = "storage"} // CHECK-NEXT: %3 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr -// CHECK-NEXT: cir.store %3, %2 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %3, %2 : !cir.ptr, !cir.ptr> // CHECK-NEXT: %4 = cir.get_member %1[1] {name = "size"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.cast(integral, %5 : !s32i), !s64i -// CHECK-NEXT: cir.store %6, %4 : !s64i, cir.ptr +// CHECK-NEXT: cir.store %6, %4 : !s64i, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2Ei // CHECK-NEXT: %0 = cir.alloca !cir.ptr -// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["size", init] +// CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["size", init] // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: cir.store %arg1, %1 // CHECK-NEXT: %2 = cir.load %0 @@ -40,30 +40,30 @@ void test() { // CHECK-NEXT: %4 = cir.const(#cir.ptr : !cir.ptr) // CHECK-NEXT: cir.store %4, %3 // CHECK-NEXT: %5 = cir.get_member %2[1] {name = "size"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %6 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %7 = cir.cast(integral, %6 : !s32i), !s64i -// CHECK-NEXT: cir.store %7, %5 : !s64i, cir.ptr +// CHECK-NEXT: cir.store %7, %5 : !s64i, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} : !cir.ptr -> !cir.ptr> // CHECK-NEXT: %4 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr -// CHECK-NEXT: cir.store %4, %3 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %4, %3 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.return // CHECK: cir.func linkonce_odr @_ZN6StringC1EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/agg-copy.c b/clang/test/CIR/CodeGen/agg-copy.c index 0132bdb1132f..91e7d52c1d12 100644 --- a/clang/test/CIR/CodeGen/agg-copy.c +++ b/clang/test/CIR/CodeGen/agg-copy.c @@ -10,14 +10,14 @@ typedef struct { } A; // CHECK: cir.func @foo1 -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] -// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a2", init] -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > -// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, cir.ptr > -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: [[TMP3:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: [[TMP4:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[TMP3]] : !s32i), !cir.ptr -// CHECK: [[TMP5:%.*]] = cir.load [[TMP1]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP1]] : !cir.ptr>, !cir.ptr // CHECK: [[TMP6:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: [[TMP7:%.*]] = cir.ptr_stride([[TMP5]] : !cir.ptr, [[TMP6]] : !s32i), !cir.ptr // CHECK: cir.copy [[TMP7]] to [[TMP4]] : !cir.ptr @@ -26,13 +26,13 @@ void foo1(A* a1, A* a2) { } // CHECK: cir.func @foo2 -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] -// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a2", init] -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > -// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, cir.ptr > -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][2] {name = "s"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.load [[TMP1]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.load [[TMP1]] : !cir.ptr>, !cir.ptr // CHECK: [[TMP5:%.*]] = cir.get_member [[TMP4]][2] {name = "s"} : !cir.ptr -> !cir.ptr // CHECK: cir.copy [[TMP5]] to [[TMP3]] : !cir.ptr void foo2(A* a1, A* a2) { @@ -41,10 +41,10 @@ void foo2(A* a1, A* a2) { // CHECK: cir.global external @a = #cir.zero : !ty_22A22 // CHECK: cir.func @foo3 -// CHECK: [[TMP0]] = cir.alloca !ty_22A22, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: [[TMP1]] = cir.get_global @a : cir.ptr +// CHECK: [[TMP0]] = cir.alloca !ty_22A22, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: [[TMP1]] = cir.get_global @a : !cir.ptr // CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr -// CHECK: [[TMP2]] = cir.load [[TMP0]] : cir.ptr , !ty_22A22 +// CHECK: [[TMP2]] = cir.load [[TMP0]] : !cir.ptr, !ty_22A22 // CHECK: cir.return [[TMP2]] : !ty_22A22 A a; A foo3(void) { @@ -52,10 +52,10 @@ A foo3(void) { } // CHECK: cir.func @foo4 -// CHECK: [[TMP0]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] -// CHECK: [[TMP1]] = cir.alloca !ty_22A22, cir.ptr , ["a2", init] -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > -// CHECK: [[TMP2]] = cir.load deref [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP0]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] +// CHECK: [[TMP1]] = cir.alloca !ty_22A22, !cir.ptr, ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP2]] = cir.load deref [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr void foo4(A* a1) { A a2 = *a1; @@ -64,10 +64,10 @@ void foo4(A* a1) { A create() { A a; return a; } // CHECK: cir.func {{.*@foo5}} -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22A22, cir.ptr , -// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["tmp"] {alignment = 4 : i64} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22A22, !cir.ptr, +// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, !cir.ptr, ["tmp"] {alignment = 4 : i64} // CHECK: [[TMP2:%.*]] = cir.call @create() : () -> !ty_22A22 -// CHECK: cir.store [[TMP2]], [[TMP1]] : !ty_22A22, cir.ptr +// CHECK: cir.store [[TMP2]], [[TMP1]] : !ty_22A22, !cir.ptr // CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr void foo5() { A a; @@ -77,9 +77,9 @@ void foo5() { void foo6(A* a1) { A a2 = (*a1); // CHECK: cir.func {{.*@foo6}} -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a1", init] {alignment = 8 : i64} -// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["a2", init] {alignment = 4 : i64} -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > -// CHECK: [[TMP2:%.*]] = cir.load deref [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] {alignment = 8 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, !cir.ptr, ["a2", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP2:%.*]] = cir.load deref [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index 198ac6004d7e..40af18348a8a 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -17,13 +17,13 @@ typedef struct yep_ { void use() { yop{}; } // CHECK: cir.func @_Z3usev() -// CHECK: %0 = cir.alloca !ty_22yep_22, cir.ptr , ["agg.tmp.ensured"] {alignment = 4 : i64} +// CHECK: %0 = cir.alloca !ty_22yep_22, !cir.ptr, ["agg.tmp.ensured"] {alignment = 4 : i64} // CHECK: %1 = cir.get_member %0[0] {name = "Status"} : !cir.ptr -> !cir.ptr // CHECK: %2 = cir.const(#cir.int<0> : !u32i) : !u32i -// CHECK: cir.store %2, %1 : !u32i, cir.ptr +// CHECK: cir.store %2, %1 : !u32i, !cir.ptr // CHECK: %3 = cir.get_member %0[1] {name = "HC"} : !cir.ptr -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<0> : !u32i) : !u32i -// CHECK: cir.store %4, %3 : !u32i, cir.ptr +// CHECK: cir.store %4, %3 : !u32i, !cir.ptr // CHECK: cir.return // CHECK: } @@ -47,16 +47,16 @@ void yo() { } // CHECK: cir.func @_Z2yov() -// CHECK: %0 = cir.alloca !ty_22Yo22, cir.ptr , ["ext"] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !ty_22Yo22, cir.ptr , ["ext2", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !ty_22Yo22, !cir.ptr, ["ext"] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_22Yo22, !cir.ptr, ["ext2", init] {alignment = 8 : i64} // CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.ptr : !cir.ptr, #cir.int<0> : !u64i}> : !ty_22Yo22) : !ty_22Yo22 -// CHECK: cir.store %2, %0 : !ty_22Yo22, cir.ptr +// CHECK: cir.store %2, %0 : !ty_22Yo22, !cir.ptr // CHECK: %3 = cir.get_member %1[0] {name = "type"} : !cir.ptr -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000066001> : !u32i) : !u32i -// CHECK: cir.store %4, %3 : !u32i, cir.ptr +// CHECK: cir.store %4, %3 : !u32i, !cir.ptr // CHECK: %5 = cir.get_member %1[1] {name = "next"} : !cir.ptr -> !cir.ptr> // CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr -// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > +// CHECK: cir.store %6, %5 : !cir.ptr, !cir.ptr> // CHECK: %7 = cir.get_member %1[2] {name = "createFlags"} : !cir.ptr -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !u64i) : !u64i -// CHECK: cir.store %8, %7 : !u64i, cir.ptr +// CHECK: cir.store %8, %7 : !u64i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/agg-init2.cpp b/clang/test/CIR/CodeGen/agg-init2.cpp index 3790d493d0ce..d534f38fc169 100644 --- a/clang/test/CIR/CodeGen/agg-init2.cpp +++ b/clang/test/CIR/CodeGen/agg-init2.cpp @@ -14,7 +14,7 @@ void f() { } // CHECK: cir.func @_Z1fv() -// CHECK: %0 = cir.alloca !ty_22Zero22, cir.ptr , ["z0", init] -// CHECK: %1 = cir.alloca !ty_22Zero22, cir.ptr , ["z1"] +// CHECK: %0 = cir.alloca !ty_22Zero22, !cir.ptr, ["z0", init] +// CHECK: %1 = cir.alloca !ty_22Zero22, !cir.ptr, ["z1"] // CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/array-init-destroy.cpp b/clang/test/CIR/CodeGen/array-init-destroy.cpp index bb464a687bf2..975c14635e16 100644 --- a/clang/test/CIR/CodeGen/array-init-destroy.cpp +++ b/clang/test/CIR/CodeGen/array-init-destroy.cpp @@ -39,17 +39,17 @@ void x() { // AFTER: %[[ConstTwo:.*]] = cir.const(#cir.int<2> : !u64i) : !u64i // AFTER: %[[ArrayBegin:.*]] = cir.cast(array_to_ptrdecay, %[[ArrayAddr0]] : !cir.ptr>), !cir.ptr // AFTER: %[[ArrayPastEnd:.*]] = cir.ptr_stride(%[[ArrayBegin]] : !cir.ptr, %[[ConstTwo]] : !u64i), !cir.ptr -// AFTER: %[[TmpIdx:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["__array_idx"] {alignment = 1 : i64} -// AFTER: cir.store %[[ArrayBegin]], %[[TmpIdx]] : !cir.ptr, cir.ptr > +// AFTER: %[[TmpIdx:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// AFTER: cir.store %[[ArrayBegin]], %[[TmpIdx]] : !cir.ptr, !cir.ptr> // AFTER: cir.do { -// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : cir.ptr >, !cir.ptr +// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : !cir.ptr>, !cir.ptr // AFTER: %[[ConstOne:.*]] = cir.const(#cir.int<1> : !u64i) : !u64i // AFTER: cir.call @_ZN4xptoC1Ev(%[[ArrayElt]]) : (!cir.ptr) -> () // AFTER: %[[NextElt:.*]] = cir.ptr_stride(%[[ArrayElt]] : !cir.ptr, %[[ConstOne]] : !u64i), !cir.ptr -// AFTER: cir.store %[[NextElt]], %[[TmpIdx]] : !cir.ptr, cir.ptr > +// AFTER: cir.store %[[NextElt]], %[[TmpIdx]] : !cir.ptr, !cir.ptr> // AFTER: cir.yield // AFTER: } while { -// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : cir.ptr >, !cir.ptr +// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : !cir.ptr>, !cir.ptr // AFTER: %[[ExitCond:.*]] = cir.cmp(eq, %[[ArrayElt]], %[[ArrayPastEnd]]) : !cir.ptr, !cir.bool // AFTER: cir.condition(%[[ExitCond]]) // AFTER: } diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c index cdba1e30cb4b..854cf377e2fc 100644 --- a/clang/test/CIR/CodeGen/array-init.c +++ b/clang/test/CIR/CodeGen/array-init.c @@ -9,17 +9,17 @@ void buz(int x) { T arr[] = { {0, x}, {0, 0} }; } // CHECK: cir.func @buz -// CHECK-NEXT: [[X_ALLOCA:%.*]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} -// CHECK-NEXT: [[ARR:%.*]] = cir.alloca !cir.array, cir.ptr >, ["arr", init] {alignment = 16 : i64} -// CHECK-NEXT: cir.store %arg0, [[X_ALLOCA]] : !s32i, cir.ptr +// CHECK-NEXT: [[X_ALLOCA:%.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: [[ARR:%.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 16 : i64} +// CHECK-NEXT: cir.store %arg0, [[X_ALLOCA]] : !s32i, !cir.ptr // CHECK-NEXT: [[ARR_INIT:%.*]] = cir.const(#cir.zero : !cir.array) : !cir.array -// CHECK-NEXT: cir.store [[ARR_INIT]], [[ARR]] : !cir.array, cir.ptr > +// CHECK-NEXT: cir.store [[ARR_INIT]], [[ARR]] : !cir.array, !cir.ptr> // CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr // CHECK-NEXT: [[A_STORAGE0:%.*]] = cir.get_member [[FI_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: [[B_STORAGE0:%.*]] = cir.get_member [[FI_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: [[X_VAL:%.*]] = cir.load [[X_ALLOCA]] : cir.ptr , !s32i +// CHECK-NEXT: [[X_VAL:%.*]] = cir.load [[X_ALLOCA]] : !cir.ptr, !s32i // CHECK-NEXT: [[X_CASTED:%.*]] = cir.cast(integral, [[X_VAL]] : !s32i), !s64i -// CHECK-NEXT: cir.store [[X_CASTED]], [[B_STORAGE0]] : !s64i, cir.ptr +// CHECK-NEXT: cir.store [[X_CASTED]], [[B_STORAGE0]] : !s64i, !cir.ptr // CHECK-NEXT: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i // CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride([[FI_EL]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: [[A_STORAGE1:%.*]] = cir.get_member [[SE_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr @@ -30,56 +30,56 @@ void foo() { double bar[] = {9,8,7}; } -// CHECK: %0 = cir.alloca !cir.array, cir.ptr >, ["bar"] {alignment = 16 : i64} +// CHECK: %0 = cir.alloca !cir.array, !cir.ptr>, ["bar"] {alignment = 16 : i64} // CHECK-NEXT: %1 = cir.const(#cir.const_array<[#cir.fp<9.000000e+00> : !cir.double, #cir.fp<8.000000e+00> : !cir.double, #cir.fp<7.000000e+00> : !cir.double]> : !cir.array) : !cir.array -// CHECK-NEXT: cir.store %1, %0 : !cir.array, cir.ptr > +// CHECK-NEXT: cir.store %1, %0 : !cir.array, !cir.ptr> void bar(int a, int b, int c) { int arr[] = {a,b,c}; } // CHECK: cir.func @bar -// CHECK: [[ARR:%.*]] = cir.alloca !cir.array, cir.ptr >, ["arr", init] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, [[A:%.*]] : !s32i, cir.ptr -// CHECK-NEXT: cir.store %arg1, [[B:%.*]] : !s32i, cir.ptr -// CHECK-NEXT: cir.store %arg2, [[C:%.*]] : !s32i, cir.ptr +// CHECK: [[ARR:%.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, [[A:%.*]] : !s32i, !cir.ptr +// CHECK-NEXT: cir.store %arg1, [[B:%.*]] : !s32i, !cir.ptr +// CHECK-NEXT: cir.store %arg2, [[C:%.*]] : !s32i, !cir.ptr // CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: [[LOAD_A:%.*]] = cir.load [[A]] : cir.ptr , !s32i -// CHECK-NEXT: cir.store [[LOAD_A]], [[FI_EL]] : !s32i, cir.ptr +// CHECK-NEXT: [[LOAD_A:%.*]] = cir.load [[A]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store [[LOAD_A]], [[FI_EL]] : !s32i, !cir.ptr // CHECK-NEXT: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i // CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride(%4 : !cir.ptr, [[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: [[LOAD_B:%.*]] = cir.load [[B]] : cir.ptr , !s32i -// CHECK-NEXT: cir.store [[LOAD_B]], [[SE_EL]] : !s32i, cir.ptr +// CHECK-NEXT: [[LOAD_B:%.*]] = cir.load [[B]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store [[LOAD_B]], [[SE_EL]] : !s32i, !cir.ptr // CHECK-NEXT: [[TH_EL:%.*]] = cir.ptr_stride(%7 : !cir.ptr, [[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: [[LOAD_C:%.*]] = cir.load [[C]] : cir.ptr , !s32i -// CHECK-NEXT: cir.store [[LOAD_C]], [[TH_EL]] : !s32i, cir.ptr +// CHECK-NEXT: [[LOAD_C:%.*]] = cir.load [[C]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store [[LOAD_C]], [[TH_EL]] : !s32i, !cir.ptr void zero_init(int x) { int arr[3] = {x}; } // CHECK: cir.func @zero_init -// CHECK: [[VAR_ALLOC:%.*]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !cir.array, cir.ptr >, ["arr", init] {alignment = 4 : i64} -// CHECK: [[TEMP:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["arrayinit.temp", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, [[VAR_ALLOC]] : !s32i, cir.ptr +// CHECK: [[VAR_ALLOC:%.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 4 : i64} +// CHECK: [[TEMP:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, [[VAR_ALLOC]] : !s32i, !cir.ptr // CHECK: [[BEGIN:%.*]] = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr -// CHECK: [[VAR:%.*]] = cir.load [[VAR_ALLOC]] : cir.ptr , !s32i -// CHECK: cir.store [[VAR]], [[BEGIN]] : !s32i, cir.ptr +// CHECK: [[VAR:%.*]] = cir.load [[VAR_ALLOC]] : !cir.ptr, !s32i +// CHECK: cir.store [[VAR]], [[BEGIN]] : !s32i, !cir.ptr // CHECK: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i // CHECK: [[ZERO_INIT_START:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr -// CHECK: cir.store [[ZERO_INIT_START]], [[TEMP]] : !cir.ptr, cir.ptr > +// CHECK: cir.store [[ZERO_INIT_START]], [[TEMP]] : !cir.ptr, !cir.ptr> // CHECK: [[SIZE:%.*]] = cir.const(#cir.int<3> : !s64i) : !s64i // CHECK: [[END:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[SIZE]] : !s64i), !cir.ptr // CHECK: cir.do { -// CHECK: [[CUR:%.*]] = cir.load [[TEMP]] : cir.ptr >, !cir.ptr +// CHECK: [[CUR:%.*]] = cir.load [[TEMP]] : !cir.ptr>, !cir.ptr // CHECK: [[FILLER:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK: cir.store [[FILLER]], [[CUR]] : !s32i, cir.ptr +// CHECK: cir.store [[FILLER]], [[CUR]] : !s32i, !cir.ptr // CHECK: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i // CHECK: [[NEXT:%.*]] = cir.ptr_stride([[CUR]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr -// CHECK: cir.store [[NEXT]], [[TEMP]] : !cir.ptr, cir.ptr > +// CHECK: cir.store [[NEXT]], [[TEMP]] : !cir.ptr, !cir.ptr> // CHECK: cir.yield // CHECK: } while { -// CHECK: [[CUR:%.*]] = cir.load [[TEMP]] : cir.ptr >, !cir.ptr +// CHECK: [[CUR:%.*]] = cir.load [[TEMP]] : !cir.ptr>, !cir.ptr // CHECK: [[CMP:%.*]] = cir.cmp(ne, [[CUR]], [[END]]) : !cir.ptr, !cir.bool // CHECK: cir.condition([[CMP]]) // CHECK: } diff --git a/clang/test/CIR/CodeGen/array-unknown-bound.cpp b/clang/test/CIR/CodeGen/array-unknown-bound.cpp index 82948bef34e2..805b8c5d5867 100644 --- a/clang/test/CIR/CodeGen/array-unknown-bound.cpp +++ b/clang/test/CIR/CodeGen/array-unknown-bound.cpp @@ -8,7 +8,7 @@ int *table_ptr = table; int test() { return table[1]; } // CHECK: cir.func @_Z4testv() -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.get_global @table : cir.ptr > +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.get_global @table : !cir.ptr> int table[3] {1, 2, 3}; diff --git a/clang/test/CIR/CodeGen/array.c b/clang/test/CIR/CodeGen/array.c index c98e97961602..5079f687b2f5 100644 --- a/clang/test/CIR/CodeGen/array.c +++ b/clang/test/CIR/CodeGen/array.c @@ -25,8 +25,8 @@ void useFoo(int i) { foo[i] = 42; } // CHECK: @useFoo -// CHECK: %[[#V2:]] = cir.get_global @foo : cir.ptr > -// CHECK: %[[#V3:]] = cir.load %{{.+}} : cir.ptr , !s32i +// CHECK: %[[#V2:]] = cir.get_global @foo : !cir.ptr> +// CHECK: %[[#V3:]] = cir.load %{{.+}} : !cir.ptr, !s32i // CHECK: %[[#V4:]] = cir.cast(array_to_ptrdecay, %[[#V2]] : !cir.ptr>), !cir.ptr // CHECK: %[[#V5:]] = cir.ptr_stride(%[[#V4]] : !cir.ptr, %[[#V3]] : !s32i), !cir.ptr -// CHECK: cir.store %{{.+}}, %[[#V5]] : !s32i, cir.ptr +// CHECK: cir.store %{{.+}}, %[[#V5]] : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 61bde35e261c..20cbc48f387e 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -6,7 +6,7 @@ void a0() { } // CHECK: cir.func @_Z2a0v() -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} void a1() { int a[10]; @@ -14,12 +14,12 @@ void a1() { } // CHECK: cir.func @_Z2a1v() -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} // CHECK-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : !s32i), !cir.ptr -// CHECK-NEXT: cir.store %1, %4 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %1, %4 : !s32i, !cir.ptr int *a2() { int a[4]; @@ -27,13 +27,13 @@ int *a2() { } // CHECK: cir.func @_Z2a2v() -> !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} // CHECK-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr // CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : !s32i), !cir.ptr -// CHECK-NEXT: cir.store %4, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %5 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: cir.store %4, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %5 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.return %5 : !cir.ptr void local_stringlit() { @@ -42,23 +42,23 @@ void local_stringlit() { // CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.func @_Z15local_stringlitv() -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.get_global @".str" : !cir.ptr> // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store %2, %0 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %2, %0 : !cir.ptr, !cir.ptr> int multidim(int i, int j) { int arr[2][2]; return arr[i][j]; } -// CHECK: %3 = cir.alloca !cir.array x 2>, cir.ptr x 2>> +// CHECK: %3 = cir.alloca !cir.array x 2>, !cir.ptr x 2>> // Stride first dimension (stride = 2) -// CHECK: %4 = cir.load %{{.+}} : cir.ptr , !s32i +// CHECK: %4 = cir.load %{{.+}} : !cir.ptr, !s32i // CHECK: %5 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr x 2>>), !cir.ptr> // CHECK: %6 = cir.ptr_stride(%5 : !cir.ptr>, %4 : !s32i), !cir.ptr> // Stride second dimension (stride = 1) -// CHECK: %7 = cir.load %{{.+}} : cir.ptr , !s32i +// CHECK: %7 = cir.load %{{.+}} : !cir.ptr, !s32i // CHECK: %8 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr // CHECK: %9 = cir.ptr_stride(%8 : !cir.ptr, %7 : !s32i), !cir.ptr @@ -75,7 +75,7 @@ struct S { void testPointerDecaySubscriptAccess(int arr[]) { // CHECK: cir.func @{{.+}}testPointerDecaySubscriptAccess arr[1]; - // CHECK: %[[#BASE:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr + // CHECK: %[[#BASE:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr // CHECK: %[[#DIM1:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: cir.ptr_stride(%[[#BASE]] : !cir.ptr, %[[#DIM1]] : !s32i), !cir.ptr } @@ -83,7 +83,7 @@ void testPointerDecaySubscriptAccess(int arr[]) { void testPointerDecayedArrayMultiDimSubscriptAccess(int arr[][3]) { // CHECK: cir.func @{{.+}}testPointerDecayedArrayMultiDimSubscriptAccess arr[1][2]; - // CHECK: %[[#V1:]] = cir.load %{{.+}} : cir.ptr >>, !cir.ptr> + // CHECK: %[[#V1:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %[[#V3:]] = cir.ptr_stride(%[[#V1]] : !cir.ptr>, %[[#V2]] : !s32i), !cir.ptr> // CHECK: %[[#V4:]] = cir.const(#cir.int<2> : !s32i) : !s32i diff --git a/clang/test/CIR/CodeGen/asm.c b/clang/test/CIR/CodeGen/asm.c index 56d8cf2bf57c..2079f9b0573d 100644 --- a/clang/test/CIR/CodeGen/asm.c +++ b/clang/test/CIR/CodeGen/asm.c @@ -56,14 +56,14 @@ void empty6(int x) { __asm__ volatile("" : "=&r"(x), "+&r"(x)); } -// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["a"] -// CHECK: [[TMP1:%.*]] = cir.load %0 : cir.ptr , !u32i +// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, !cir.ptr, ["a"] +// CHECK: [[TMP1:%.*]] = cir.load %0 : !cir.ptr, !u32i // CHECK: [[TMP2:%.*]] = cir.asm(x86_att, // CHECK: out = [], // CHECK: in = [%3 : !u32i], // CHECK: in_out = [], // CHECK: {"addl $$42, $1" "=r,r,~{dirflag},~{fpsr},~{flags}"}) -> !s32i -// CHECK: cir.store [[TMP2]], [[TMP0]] : !s32i, cir.ptr loc(#loc42) +// CHECK: cir.store [[TMP2]], [[TMP0]] : !s32i, !cir.ptr loc(#loc42) unsigned add1(unsigned int x) { int a; __asm__("addl $42, %[val]" @@ -74,15 +74,15 @@ unsigned add1(unsigned int x) { return a; } -// CHECK: [[TMP0:%.*]] = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} -// CHECK: cir.store %arg0, [[TMP0]] : !u32i, cir.ptr -// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr , !u32i +// CHECK: [[TMP0:%.*]] = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !u32i, !cir.ptr +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr, !u32i // CHECK: [[TMP2:%.*]] = cir.asm(x86_att, // CHECK: out = [], // CHECK: in = [], // CHECK: in_out = [%2 : !u32i], // CHECK: {"addl $$42, $0" "=r,0,~{dirflag},~{fpsr},~{flags}"}) -> !u32i -// CHECK: cir.store [[TMP2]], [[TMP0]] : !u32i, cir.ptr +// CHECK: cir.store [[TMP2]], [[TMP0]] : !u32i, !cir.ptr unsigned add2(unsigned int x) { __asm__("addl $42, %[val]" : [val] "+r" (x) @@ -91,14 +91,14 @@ unsigned add2(unsigned int x) { } -// CHECK: [[TMP0:%.*]] = cir.alloca !u32i, cir.ptr , ["x", init] -// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr , !u32i +// CHECK: [[TMP0:%.*]] = cir.alloca !u32i, !cir.ptr, ["x", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr, !u32i // CHECK: [[TMP2:%.*]] = cir.asm(x86_att, // CHECK: out = [], // CHECK: in = [], // CHECK: in_out = [%2 : !u32i], // CHECK: {"addl $$42, $0 \0A\09 subl $$1, $0 \0A\09 imul $$2, $0" "=r,0,~{dirflag},~{fpsr},~{flags}"}) -> !u32i -// CHECK: cir.store [[TMP2]], [[TMP0]] : !u32i, cir.ptr +// CHECK: cir.store [[TMP2]], [[TMP0]] : !u32i, !cir.ptr unsigned add3(unsigned int x) { // ((42 + x) - 1) * 2 __asm__("addl $42, %[val] \n\t\ subl $1, %[val] \n\t\ @@ -108,9 +108,9 @@ unsigned add3(unsigned int x) { // ((42 + x) - 1) * 2 return x; } -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["x", init] -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > -// CHECK: [[TMP1:%.*]] = cir.load deref [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["x", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP1:%.*]] = cir.load deref [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: cir.asm(x86_att, // CHECK: out = [%1 : !cir.ptr (maybe_memory)], // CHECK: in = [], @@ -122,19 +122,19 @@ void add4(int *x) { } -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.float, cir.ptr , ["x", init] -// CHECK: [[TMP1:%.*]] = cir.alloca !cir.float, cir.ptr , ["y", init] -// CHECK: [[TMP2:%.*]] = cir.alloca !cir.float, cir.ptr , ["r"] -// CHECK: cir.store %arg0, [[TMP0]] : !cir.float, cir.ptr -// CHECK: cir.store %arg1, [[TMP1]] : !cir.float, cir.ptr -// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : cir.ptr , !cir.float -// CHECK: [[TMP4:%.*]] = cir.load [[TMP1]] : cir.ptr , !cir.float +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.float, !cir.ptr, ["x", init] +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.float, !cir.ptr, ["y", init] +// CHECK: [[TMP2:%.*]] = cir.alloca !cir.float, !cir.ptr, ["r"] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.float, !cir.ptr +// CHECK: cir.store %arg1, [[TMP1]] : !cir.float, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : !cir.ptr, !cir.float +// CHECK: [[TMP4:%.*]] = cir.load [[TMP1]] : !cir.ptr, !cir.float // CHECK: [[TMP5:%.*]] = cir.asm(x86_att, // CHECK: out = [], // CHECK: in = [%4 : !cir.float, %5 : !cir.float], // CHECK: in_out = [], // CHECK: {"flds $1; flds $2; faddp" "=&{st},imr,imr,~{dirflag},~{fpsr},~{flags}"}) -> !cir.float -// CHECK: cir.store [[TMP5]], [[TMP2]] : !cir.float, cir.ptr +// CHECK: cir.store [[TMP5]], [[TMP2]] : !cir.float, !cir.ptr float add5(float x, float y) { float r; __asm__("flds %[x]; flds %[y]; faddp" diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index a7adf5f11502..a90642945562 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -15,11 +15,11 @@ struct String { // StringView::StringView(String const&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewC2ERK6String - // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} + // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr // CHECK: cir.store %arg1, %1 : !cir.ptr - // CHECK: %2 = cir.load %0 : cir.ptr > + // CHECK: %2 = cir.load %0 : !cir.ptr> // Get address of `this->size` @@ -27,7 +27,7 @@ struct String { // Get address of `s` - // CHECK: %4 = cir.load %1 : cir.ptr > + // CHECK: %4 = cir.load %1 : !cir.ptr> // Get the address of s.size @@ -35,30 +35,30 @@ struct String { // Load value from s.size and store in this->size - // CHECK: %6 = cir.load %5 : cir.ptr , !s64i - // CHECK: cir.store %6, %3 : !s64i, cir.ptr + // CHECK: %6 = cir.load %5 : !cir.ptr, !s64i + // CHECK: cir.store %6, %3 : !s64i, !cir.ptr // CHECK: cir.return // CHECK: } // DISABLE: cir.func linkonce_odr @_ZN10StringViewC2ERK6String - // DISABLE-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} + // DISABLE-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // StringView::operator=(StringView&&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewaSEOS_ - // CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["", init] {alignment = 8 : i64} - // CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} + // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["", init] {alignment = 8 : i64} + // CHECK: %2 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr // CHECK: cir.store %arg1, %1 : !cir.ptr - // CHECK: %3 = cir.load deref %0 : cir.ptr > - // CHECK: %4 = cir.load %1 : cir.ptr > + // CHECK: %3 = cir.load deref %0 : !cir.ptr> + // CHECK: %4 = cir.load %1 : !cir.ptr> // CHECK: %5 = cir.get_member %4[0] {name = "size"} - // CHECK: %6 = cir.load %5 : cir.ptr , !s64i + // CHECK: %6 = cir.load %5 : !cir.ptr, !s64i // CHECK: %7 = cir.get_member %3[0] {name = "size"} - // CHECK: cir.store %6, %7 : !s64i, cir.ptr + // CHECK: cir.store %6, %7 : !s64i, !cir.ptr // CHECK: cir.store %3, %2 : !cir.ptr - // CHECK: %8 = cir.load %2 : cir.ptr > + // CHECK: %8 = cir.load %2 : !cir.ptr> // CHECK: cir.return %8 : !cir.ptr // CHECK: } @@ -82,20 +82,20 @@ int main() { } // CHECK: cir.func @main() -> !s32i -// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !ty_22StringView22, cir.ptr , ["sv", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !ty_22StringView22, !cir.ptr, ["sv", init] {alignment = 8 : i64} // CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () // CHECK: cir.scope { -// CHECK: %3 = cir.alloca !ty_22String22, cir.ptr , ["s", init] {alignment = 8 : i64} -// CHECK: %4 = cir.get_global @".str" : cir.ptr > +// CHECK: %3 = cir.alloca !ty_22String22, !cir.ptr, ["s", init] {alignment = 8 : i64} +// CHECK: %4 = cir.get_global @".str" : !cir.ptr> // CHECK: %5 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr // CHECK: cir.call @_ZN6StringC2EPKc(%3, %5) : (!cir.ptr, !cir.ptr) -> () // CHECK: cir.scope { -// CHECK: %6 = cir.alloca !ty_22StringView22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %6 = cir.alloca !ty_22StringView22, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} // CHECK: cir.call @_ZN10StringViewC2ERK6String(%6, %3) : (!cir.ptr, !cir.ptr) -> () // CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %6) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } // CHECK: } -// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: %2 = cir.load %0 : !cir.ptr, !s32i // CHECK: cir.return %2 : !s32i // CHECK: } diff --git a/clang/test/CIR/CodeGen/atomic-xchg-field.c b/clang/test/CIR/CodeGen/atomic-xchg-field.c index 5325c0d98388..cecd745725f1 100644 --- a/clang/test/CIR/CodeGen/atomic-xchg-field.c +++ b/clang/test/CIR/CodeGen/atomic-xchg-field.c @@ -78,9 +78,9 @@ void structLoad(unsigned referenceCount, wPtr item) { } // CHECK-LABEL: @structLoad -// CHECK: %[[ATOMIC_TEMP:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["atomic-temp"] -// CHECK: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %6 : cir.ptr , !u64i +// CHECK: %[[ATOMIC_TEMP:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["atomic-temp"] +// CHECK: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %6 : !cir.ptr, !u64i // CHECK: %[[RES:.*]] = cir.cast(bitcast, %[[ATOMIC_TEMP]] : !cir.ptr>), !cir.ptr -// CHECK: cir.store %[[ATOMIC_LOAD]], %[[RES]] : !u64i, cir.ptr +// CHECK: cir.store %[[ATOMIC_LOAD]], %[[RES]] : !u64i, !cir.ptr // No LLVM tests needed for this one, already covered elsewhere. diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index bcb802561580..2d92c1619ae9 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -19,13 +19,13 @@ int basic_binop_fetch(int *i) { } // CHECK: cir.func @_Z17basic_binop_fetchPi -// CHECK: %[[ARGI:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["i", init] {alignment = 8 : i64} -// CHECK: %[[ONE_ADDR:.*]] = cir.alloca !s32i, cir.ptr , [".atomictmp"] {alignment = 4 : i64} -// CHECK: cir.store %arg0, %[[ARGI]] : !cir.ptr, cir.ptr > -// CHECK: %[[I:.*]] = cir.load %[[ARGI]] : cir.ptr >, !cir.ptr +// CHECK: %[[ARGI:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["i", init] {alignment = 8 : i64} +// CHECK: %[[ONE_ADDR:.*]] = cir.alloca !s32i, !cir.ptr, [".atomictmp"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %[[ARGI]] : !cir.ptr, !cir.ptr> +// CHECK: %[[I:.*]] = cir.load %[[ARGI]] : !cir.ptr>, !cir.ptr // CHECK: %[[ONE:.*]] = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: cir.store %[[ONE]], %[[ONE_ADDR]] : !s32i, cir.ptr -// CHECK: %[[VAL:.*]] = cir.load %[[ONE_ADDR]] : cir.ptr , !s32i +// CHECK: cir.store %[[ONE]], %[[ONE_ADDR]] : !s32i, !cir.ptr +// CHECK: %[[VAL:.*]] = cir.load %[[ONE_ADDR]] : !cir.ptr, !s32i // CHECK: cir.atomic.fetch(add, %[[I]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) : !s32i // LLVM: define i32 @_Z17basic_binop_fetchPi @@ -293,7 +293,7 @@ bool fi4c(atomic_int *i) { // CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = seq_cst, failure = seq_cst) : (!s32i, !cir.bool) // CHECK: %[[CMP:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[CMP:.*]] { -// CHECK: cir.store %old, {{.*}} : !s32i, cir.ptr +// CHECK: cir.store %old, {{.*}} : !s32i, !cir.ptr // CHECK: } // LLVM-LABEL: @_Z4fi4cPU7_Atomici diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index c99ecce64090..be9db2ab8ec5 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -12,22 +12,22 @@ int foo(int i) { // CIR: module @"{{.*}}basic.c" attributes {{{.*}}cir.lang = #cir.lang // CIR-NEXT: cir.func @foo(%arg0: !s32i loc({{.*}})) -> !s32i -// CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} -// CIR-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CIR-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr -// CIR-NEXT: %2 = cir.load %0 : cir.ptr , !s32i -// CIR-NEXT: %3 = cir.load %0 : cir.ptr , !s32i -// CIR-NEXT: cir.store %3, %1 : !s32i, cir.ptr -// CIR-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} +// CIR-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CIR-NEXT: cir.store %arg0, %0 : !s32i, !cir.ptr +// CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i +// CIR-NEXT: %3 = cir.load %0 : !cir.ptr, !s32i +// CIR-NEXT: cir.store %3, %1 : !s32i, !cir.ptr +// CIR-NEXT: %4 = cir.load %1 : !cir.ptr, !s32i // CIR-NEXT: cir.return %4 : !s32i int f2(void) { return 3; } // CIR: cir.func @f2() -> !s32i -// CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CIR-NEXT: %1 = cir.const(#cir.int<3> : !s32i) : !s32i -// CIR-NEXT: cir.store %1, %0 : !s32i, cir.ptr -// CIR-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CIR-NEXT: cir.store %1, %0 : !s32i, !cir.ptr +// CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i // CIR-NEXT: cir.return %2 : !s32i // LLVM: define i32 @f2() @@ -44,11 +44,11 @@ int f3(void) { } // CIR: cir.func @f3() -> !s32i -// CIR-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CIR-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CIR-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CIR-NEXT: %2 = cir.const(#cir.int<3> : !s32i) : !s32i -// CIR-NEXT: cir.store %2, %1 : !s32i, cir.ptr -// CIR-NEXT: %3 = cir.load %1 : cir.ptr , !s32i -// CIR-NEXT: cir.store %3, %0 : !s32i, cir.ptr -// CIR-NEXT: %4 = cir.load %0 : cir.ptr , !s32i +// CIR-NEXT: cir.store %2, %1 : !s32i, !cir.ptr +// CIR-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i +// CIR-NEXT: cir.store %3, %0 : !s32i, !cir.ptr +// CIR-NEXT: %4 = cir.load %0 : !cir.ptr, !s32i // CIR-NEXT: cir.return %4 : !s32i diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 83c423ea917c..c1b60288e981 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -7,9 +7,9 @@ int *p0() { } // CHECK: cir.func @_Z2p0v() -> !cir.ptr -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] // CHECK: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr -// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > +// CHECK: cir.store %2, %1 : !cir.ptr, !cir.ptr> int *p1() { int *p; @@ -18,9 +18,9 @@ int *p1() { } // CHECK: cir.func @_Z2p1v() -> !cir.ptr -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p"] +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["p"] // CHECK: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr -// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > +// CHECK: cir.store %2, %1 : !cir.ptr, !cir.ptr> int *p2() { int *p = nullptr; @@ -34,25 +34,25 @@ int *p2() { } // CHECK: cir.func @_Z2p2v() -> !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["p", init] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} // CHECK-NEXT: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr -// CHECK-NEXT: cir.store %2, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %2, %1 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %7 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: %7 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} // CHECK-NEXT: %8 = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK-NEXT: cir.store %8, %7 : !s32i, cir.ptr -// CHECK-NEXT: cir.store %7, %1 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %8, %7 : !s32i, !cir.ptr +// CHECK-NEXT: cir.store %7, %1 : !cir.ptr, !cir.ptr> // CHECK-NEXT: %9 = cir.const(#cir.int<42> : !s32i) : !s32i -// CHECK-NEXT: %10 = cir.load deref %1 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.store %9, %10 : !s32i, cir.ptr +// CHECK-NEXT: %10 = cir.load deref %1 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.store %9, %10 : !s32i, !cir.ptr // CHECK-NEXT: } loc(#[[locScope:loc[0-9]+]]) // CHECK-NEXT: %3 = cir.const(#cir.int<42> : !s32i) : !s32i -// CHECK-NEXT: %4 = cir.load deref %1 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.store %3, %4 : !s32i, cir.ptr -// CHECK-NEXT: %5 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK-NEXT: cir.store %5, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %6 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %4 = cir.load deref %1 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.store %3, %4 : !s32i, !cir.ptr +// CHECK-NEXT: %5 = cir.load %1 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.store %5, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %6 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.return %6 : !cir.ptr void b0() { bool x = true, y = false; } @@ -64,9 +64,9 @@ void b0() { bool x = true, y = false; } void b1(int a) { bool b = a; } // CHECK: cir.func @_Z2b1i(%arg0: !s32i loc({{.*}})) -// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: %2 = cir.load %0 : !cir.ptr, !s32i // CHECK: %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool -// CHECK: cir.store %3, %1 : !cir.bool, cir.ptr +// CHECK: cir.store %3, %1 : !cir.bool, !cir.ptr void if0(int a) { int x = 0; @@ -79,14 +79,14 @@ void if0(int a) { // CHECK: cir.func @_Z3if0i(%arg0: !s32i loc({{.*}})) // CHECK: cir.scope { -// CHECK: %3 = cir.load %0 : cir.ptr , !s32i +// CHECK: %3 = cir.load %0 : !cir.ptr, !s32i // CHECK: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.if %4 { // CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: } else { // CHECK-NEXT: %5 = cir.const(#cir.int<4> : !s32i) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: } // CHECK: } @@ -107,28 +107,28 @@ void if1(int a, bool b, bool c) { // CHECK: cir.func @_Z3if1ibb(%arg0: !s32i loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) // CHECK: cir.scope { -// CHECK: %5 = cir.load %0 : cir.ptr , !s32i +// CHECK: %5 = cir.load %0 : !cir.ptr, !s32i // CHECK: %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool // CHECK: cir.if %6 { // CHECK: %7 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: cir.store %7, %3 : !s32i, cir.ptr +// CHECK: cir.store %7, %3 : !s32i, !cir.ptr // CHECK: cir.scope { -// CHECK: %8 = cir.load %1 : cir.ptr , !cir.bool +// CHECK: %8 = cir.load %1 : !cir.ptr, !cir.bool // CHECK-NEXT: cir.if %8 { // CHECK-NEXT: %9 = cir.const(#cir.int<8> : !s32i) : !s32i -// CHECK-NEXT: cir.store %9, %3 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %9, %3 : !s32i, !cir.ptr // CHECK-NEXT: } // CHECK: } // CHECK: } else { // CHECK: cir.scope { -// CHECK: %8 = cir.load %2 : cir.ptr , !cir.bool +// CHECK: %8 = cir.load %2 : !cir.ptr, !cir.bool // CHECK-NEXT: cir.if %8 { // CHECK-NEXT: %9 = cir.const(#cir.int<14> : !s32i) : !s32i -// CHECK-NEXT: cir.store %9, %3 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %9, %3 : !s32i, !cir.ptr // CHECK-NEXT: } // CHECK: } // CHECK: %7 = cir.const(#cir.int<4> : !s32i) : !s32i -// CHECK: cir.store %7, %3 : !s32i, cir.ptr +// CHECK: cir.store %7, %3 : !s32i, !cir.ptr // CHECK: } // CHECK: } @@ -155,12 +155,12 @@ void x() { } // CHECK: cir.func @_Z1xv() -// CHECK: %0 = cir.alloca !cir.bool, cir.ptr , ["b0", init] {alignment = 1 : i64} -// CHECK: %1 = cir.alloca !cir.bool, cir.ptr , ["b1", init] {alignment = 1 : i64} +// CHECK: %0 = cir.alloca !cir.bool, !cir.ptr, ["b0", init] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !cir.bool, !cir.ptr, ["b1", init] {alignment = 1 : i64} // CHECK: %2 = cir.const(#true) : !cir.bool -// CHECK: cir.store %2, %0 : !cir.bool, cir.ptr +// CHECK: cir.store %2, %0 : !cir.bool, !cir.ptr // CHECK: %3 = cir.const(#false) : !cir.bool -// CHECK: cir.store %3, %1 : !cir.bool, cir.ptr +// CHECK: cir.store %3, %1 : !cir.bool, !cir.ptr typedef unsigned long size_type; typedef unsigned long _Tp; @@ -170,7 +170,7 @@ size_type max_size() { } // CHECK: cir.func @_Z8max_sizev() -// CHECK: %0 = cir.alloca !u64i, cir.ptr , ["__retval"] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !u64i, !cir.ptr, ["__retval"] {alignment = 8 : i64} // CHECK: %1 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: %2 = cir.unary(not, %1) : !s32i, !s32i // CHECK: %3 = cir.cast(integral, %2 : !s32i), !u64i diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp index a0a029ef7e9a..79cbc8baa96c 100644 --- a/clang/test/CIR/CodeGen/binassign.cpp +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -16,7 +16,7 @@ int foo(int a, int b) { return x; } -// CHECK: [[Value:%[0-9]+]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: [[Value:%[0-9]+]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} // CHECK: = cir.binop(mul, // CHECK: = cir.load {{.*}}[[Value]] // CHECK: = cir.binop(mul, @@ -61,10 +61,10 @@ void exec() { } // CHECK: cir.func @_Z4execv() -// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["r"] {alignment = 4 : i64} +// CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["r"] {alignment = 4 : i64} // CHECK: cir.scope { // CHECK: %1 = cir.call @_Z5gettyv() : () -> !u32i -// CHECK: cir.store %1, %0 : !u32i, cir.ptr +// CHECK: cir.store %1, %0 : !u32i, !cir.ptr // CHECK: %2 = cir.cast(integral, %1 : !u32i), !s32i // CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index cb39a9b8ebfc..93b1c918fe33 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -65,7 +65,7 @@ typedef struct { // CHECK: #bfi_d = #cir.bitfield_info, size = 2, offset = 17, is_signed = true> // CHECK: cir.func {{.*@store_field}} -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, !cir.ptr // CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i // CHECK: [[TMP2:%.*]] = cir.get_member [[TMP0]][2] {name = "e"} : !cir.ptr -> !cir.ptr // CHECK: cir.set_bitfield(#bfi_e, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) @@ -75,8 +75,8 @@ void store_field() { } // CHECK: cir.func {{.*@load_field}} -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["s", init] -// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr> // CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr>) -> !s32i int load_field(S* s) { @@ -122,10 +122,10 @@ void createU() { // for this struct type we create an anon structure with different storage types in initialization // CHECK: cir.func {{.*@createD}} -// CHECK: %0 = cir.alloca !ty_22D22, cir.ptr , ["d"] {alignment = 4 : i64} +// CHECK: %0 = cir.alloca !ty_22D22, !cir.ptr, ["d"] {alignment = 4 : i64} // CHECK: %1 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr // CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<33> : !u8i, #cir.int<0> : !u8i, #cir.int<3> : !s32i}> : !ty_anon_struct) : !ty_anon_struct -// CHECK: cir.store %2, %1 : !ty_anon_struct, cir.ptr +// CHECK: cir.store %2, %1 : !ty_anon_struct, !cir.ptr void createD() { D d = {1,2,3}; } diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 322cea9dd78a..4a7f16beff0a 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -33,7 +33,7 @@ typedef struct { // CHECK: !ty_22__long22 = !cir.struct}> // CHECK: cir.func @_Z11store_field -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, !cir.ptr // CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i // CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr // CHECK: cir.set_bitfield(#bfi_a, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) @@ -43,8 +43,8 @@ void store_field() { } // CHECK: cir.func @_Z10load_field -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["s", init] -// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr> // CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr>) -> !s32i int load_field(S& s) { diff --git a/clang/test/CIR/CodeGen/bitint.c b/clang/test/CIR/CodeGen/bitint.c index 51111ee1dafc..176339c81af3 100644 --- a/clang/test/CIR/CodeGen/bitint.c +++ b/clang/test/CIR/CodeGen/bitint.c @@ -8,15 +8,15 @@ void VLATest(_BitInt(3) A, _BitInt(42) B, _BitInt(17) C) { } // CHECK: cir.func @VLATest -// CHECK: %[[#A:]] = cir.load %{{.+}} : cir.ptr >, !cir.int +// CHECK: %[[#A:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int // CHECK-NEXT: %[[#A_PROMOTED:]] = cir.cast(integral, %[[#A]] : !cir.int), !u64i // CHECK-NEXT: %[[#SP:]] = cir.stack_save : !cir.ptr -// CHECK-NEXT: cir.store %[[#SP]], %{{.+}} : !cir.ptr, cir.ptr > -// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, cir.ptr , %[[#A_PROMOTED]] : !u64i -// CHECK-NEXT: %[[#B:]] = cir.load %1 : cir.ptr >, !cir.int +// CHECK-NEXT: cir.store %[[#SP]], %{{.+}} : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, !cir.ptr, %[[#A_PROMOTED]] : !u64i +// CHECK-NEXT: %[[#B:]] = cir.load %1 : !cir.ptr>, !cir.int // CHECK-NEXT: %[[#B_PROMOTED:]] = cir.cast(integral, %[[#B]] : !cir.int), !u64i -// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, cir.ptr , %[[#B_PROMOTED]] : !u64i -// CHECK-NEXT: %[[#C:]] = cir.load %2 : cir.ptr >, !cir.int +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, !cir.ptr, %[[#B_PROMOTED]] : !u64i +// CHECK-NEXT: %[[#C:]] = cir.load %2 : !cir.ptr>, !cir.int // CHECK-NEXT: %[[#C_PROMOTED:]] = cir.cast(integral, %[[#C]] : !cir.int), !u64i -// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, cir.ptr , %[[#C_PROMOTED]] : !u64i +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, !cir.ptr, %[[#C_PROMOTED]] : !u64i // CHECK: } diff --git a/clang/test/CIR/CodeGen/bitint.cpp b/clang/test/CIR/CodeGen/bitint.cpp index fad50e1ee858..32bda23e663a 100644 --- a/clang/test/CIR/CodeGen/bitint.cpp +++ b/clang/test/CIR/CodeGen/bitint.cpp @@ -37,7 +37,7 @@ void test_init_for_mem() { // CHECK: cir.func @_Z17test_init_for_memv() // CHECK: %[[#LITERAL:]] = cir.const(#cir.int<42> : !s32i) : !s32i // CHECK-NEXT: %[[#INIT:]] = cir.cast(integral, %[[#LITERAL]] : !s32i), !cir.int -// CHECK-NEXT: cir.store %[[#INIT]], %{{.+}} : !cir.int, cir.ptr > +// CHECK-NEXT: cir.store %[[#INIT]], %{{.+}} : !cir.int, !cir.ptr> // CHECK: } i10 test_arith(i10 lhs, i10 rhs) { @@ -45,8 +45,8 @@ i10 test_arith(i10 lhs, i10 rhs) { } // CHECK: cir.func @_Z10test_arithDB10_S_(%arg0: !cir.int loc({{.+}}), %arg1: !cir.int loc({{.+}})) -> !cir.int -// CHECK: %[[#LHS:]] = cir.load %{{.+}} : cir.ptr >, !cir.int -// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : cir.ptr >, !cir.int +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int // CHECK-NEXT: %{{.+}} = cir.binop(add, %[[#LHS]], %[[#RHS]]) : !cir.int // CHECK: } @@ -56,11 +56,11 @@ void Size1ExtIntParam(unsigned _BitInt(1) A) { } // CHECK: cir.func @_Z16Size1ExtIntParamDU1_ -// CHECK: %[[#A:]] = cir.load %{{.+}} : cir.ptr >, !cir.int +// CHECK: %[[#A:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int // CHECK-NEXT: %[[#IDX:]] = cir.const(#cir.int<2> : !s32i) : !s32i // CHECK-NEXT: %[[#ARRAY:]] = cir.cast(array_to_ptrdecay, %1 : !cir.ptr x 5>>), !cir.ptr> // CHECK-NEXT: %[[#PTR:]] = cir.ptr_stride(%[[#ARRAY]] : !cir.ptr>, %[[#IDX]] : !s32i), !cir.ptr> -// CHECK-NEXT: cir.store %[[#A]], %[[#PTR]] : !cir.int, cir.ptr > +// CHECK-NEXT: cir.store %[[#A]], %[[#PTR]] : !cir.int, !cir.ptr> // CHECK: } struct S { diff --git a/clang/test/CIR/CodeGen/bool.c b/clang/test/CIR/CodeGen/bool.c index 7af7527c4a76..d3f4f2748672 100644 --- a/clang/test/CIR/CodeGen/bool.c +++ b/clang/test/CIR/CodeGen/bool.c @@ -8,32 +8,32 @@ typedef struct { } S; // CHECK: cir.func @init_bool -// CHECK: [[ALLOC:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[ALLOC:%.*]] = cir.alloca !ty_22S22, !cir.ptr // CHECK: [[ZERO:%.*]] = cir.const(#cir.zero : !ty_22S22) : !ty_22S22 -// CHECK: cir.store [[ZERO]], [[ALLOC]] : !ty_22S22, cir.ptr +// CHECK: cir.store [[ZERO]], [[ALLOC]] : !ty_22S22, !cir.ptr void init_bool(void) { S s = {0}; } // CHECK: cir.func @store_bool -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> // CHECK: [[TMP1:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: [[TMP2:%.*]] = cir.cast(int_to_bool, [[TMP1]] : !s32i), !cir.bool -// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: [[TMP4:%.*]] = cir.get_member [[TMP3]][0] {name = "x"} : !cir.ptr -> !cir.ptr -// CHECK: cir.store [[TMP2]], [[TMP4]] : !cir.bool, cir.ptr +// CHECK: cir.store [[TMP2]], [[TMP4]] : !cir.bool, !cir.ptr void store_bool(S *s) { s->x = false; } // CHECK: cir.func @load_bool -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK: [[TMP1:%.*]] = cir.alloca !cir.bool, cir.ptr , ["x", init] {alignment = 1 : i64} -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.bool, !cir.ptr, ["x", init] {alignment = 1 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "x"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : cir.ptr , !cir.bool +// CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : !cir.ptr, !cir.bool void load_bool(S *s) { bool x = s->x; } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/builtin-alloca.c b/clang/test/CIR/CodeGen/builtin-alloca.c index a02f328cc12f..f79a9f8c9a83 100644 --- a/clang/test/CIR/CodeGen/builtin-alloca.c +++ b/clang/test/CIR/CodeGen/builtin-alloca.c @@ -10,9 +10,9 @@ void my_alloca(size_t n) int *c1 = alloca(n); } // CIR: cir.func @my_alloca([[ALLOCA_SIZE:%.*]]: !u64i -// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr -// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i -// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, !cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : !cir.ptr, !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, !cir.ptr, [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} // CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr // CIR: } @@ -29,9 +29,9 @@ void my___builtin_alloca(size_t n) } // CIR: cir.func @my___builtin_alloca([[ALLOCA_SIZE:%.*]]: !u64i -// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr -// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i -// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, !cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : !cir.ptr, !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, !cir.ptr, [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} // CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr // CIR: } @@ -48,9 +48,9 @@ void my__builtin_alloca_uninitialized(size_t n) } // CIR: cir.func @my__builtin_alloca_uninitialized([[ALLOCA_SIZE:%.*]]: !u64i -// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr -// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i -// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, !cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : !cir.ptr, !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, !cir.ptr, [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} // CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr // CIR: } diff --git a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp index 9aa3175eeecd..be633108597f 100644 --- a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp +++ b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp @@ -3,9 +3,9 @@ auto func() { return __builtin_strcmp("", ""); // CHECK: cir.func @_Z4funcv() - // CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) + // CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} loc(#loc2) // CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc7) - // CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr loc(#loc8) - // CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i loc(#loc8) + // CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr loc(#loc8) + // CHECK-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i loc(#loc8) // CHECK-NEXT: cir.return %2 : !s32i loc(#loc8) } diff --git a/clang/test/CIR/CodeGen/builtin-constant-p.c b/clang/test/CIR/CodeGen/builtin-constant-p.c index 1b3dbe7e9275..4d6b5c9e5597 100644 --- a/clang/test/CIR/CodeGen/builtin-constant-p.c +++ b/clang/test/CIR/CodeGen/builtin-constant-p.c @@ -7,13 +7,13 @@ int foo() { } // CIR: cir.func no_proto @foo() -> !s32i extra(#fn_attr) -// CIR: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CIR: [[TMP1:%.*]] = cir.get_global @a : cir.ptr -// CIR: [[TMP2:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +// CIR: [[TMP0:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CIR: [[TMP1:%.*]] = cir.get_global @a : !cir.ptr +// CIR: [[TMP2:%.*]] = cir.load [[TMP1]] : !cir.ptr, !s32i // CIR: [[TMP3:%.*]] = cir.is_constant([[TMP2]] : !s32i) : !cir.bool // CIR: [[TMP4:%.*]] = cir.cast(bool_to_int, [[TMP3]] : !cir.bool), !s32i -// CIR: cir.store [[TMP4]], [[TMP0]] : !s32i, cir.ptr -// CIR: [[TMP5:%.*]] = cir.load [[TMP0]] : cir.ptr , !s32i +// CIR: cir.store [[TMP4]], [[TMP0]] : !s32i, !cir.ptr +// CIR: [[TMP5:%.*]] = cir.load [[TMP0]] : !cir.ptr, !s32i // CIR: cir.return [[TMP5]] : !s32i // LLVM:define i32 @foo() diff --git a/clang/test/CIR/CodeGen/builtin-ms-alloca.c b/clang/test/CIR/CodeGen/builtin-ms-alloca.c index d500304d7f6d..2a3176955bc0 100644 --- a/clang/test/CIR/CodeGen/builtin-ms-alloca.c +++ b/clang/test/CIR/CodeGen/builtin-ms-alloca.c @@ -9,9 +9,9 @@ void my_win_alloca(size_t n) } // CIR: cir.func @my_win_alloca([[ALLOCA_SIZE:%.*]]: !u64i -// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, cir.ptr -// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : cir.ptr , !u64i -// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, cir.ptr , [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// CIR: cir.store [[ALLOCA_SIZE]], [[LOCAL_VAR_ALLOCA_SIZE:%.*]] : !u64i, !cir.ptr +// CIR: [[TMP_ALLOCA_SIZE:%.*]] = cir.load [[LOCAL_VAR_ALLOCA_SIZE]] : !cir.ptr, !u64i +// CIR: [[ALLOCA_RES:%.*]] = cir.alloca !u8i, !cir.ptr, [[TMP_ALLOCA_SIZE]] : !u64i, ["bi_alloca"] {alignment = 16 : i64} // CIR-NEXT: cir.cast(bitcast, [[ALLOCA_RES]] : !cir.ptr), !cir.ptr // CIR: } diff --git a/clang/test/CIR/CodeGen/builtin-prefetch.c b/clang/test/CIR/CodeGen/builtin-prefetch.c index 21b908d085bd..fb84a1204892 100644 --- a/clang/test/CIR/CodeGen/builtin-prefetch.c +++ b/clang/test/CIR/CodeGen/builtin-prefetch.c @@ -6,9 +6,9 @@ void foo(void *a) { } // CIR: cir.func @foo(%arg0: !cir.ptr loc({{.*}})) -// CIR: [[PTR_ALLOC:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} -// CIR: cir.store %arg0, [[PTR_ALLOC]] : !cir.ptr, cir.ptr > -// CIR: [[PTR:%.*]] = cir.load [[PTR_ALLOC]] : cir.ptr >, !cir.ptr +// CIR: [[PTR_ALLOC:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a", init] {alignment = 8 : i64} +// CIR: cir.store %arg0, [[PTR_ALLOC]] : !cir.ptr, !cir.ptr> +// CIR: [[PTR:%.*]] = cir.load [[PTR_ALLOC]] : !cir.ptr>, !cir.ptr // CIR: cir.prefetch([[PTR]] : !cir.ptr) locality(1) write // CIR: cir.return diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index 956c72ca0bc5..1ed5e8ca57d3 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -19,29 +19,29 @@ void d(void) { // CHECK: cir.return // CHECK: } // CHECK: cir.func @b(%arg0: !s32i {{.*}}, %arg1: !s32i {{.*}}) -> !s32i -// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["a", init] -// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["b", init] -// CHECK: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] -// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr -// CHECK: cir.store %arg1, %1 : !s32i, cir.ptr -// CHECK: %3 = cir.load %0 : cir.ptr , !s32i -// CHECK: %4 = cir.load %1 : cir.ptr , !s32i +// CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["a", init] +// CHECK: %1 = cir.alloca !s32i, !cir.ptr, ["b", init] +// CHECK: %2 = cir.alloca !s32i, !cir.ptr, ["__retval"] +// CHECK: cir.store %arg0, %0 : !s32i, !cir.ptr +// CHECK: cir.store %arg1, %1 : !s32i, !cir.ptr +// CHECK: %3 = cir.load %0 : !cir.ptr, !s32i +// CHECK: %4 = cir.load %1 : !cir.ptr, !s32i // CHECK: %5 = cir.binop(add, %3, %4) : !s32i -// CHECK: cir.store %5, %2 : !s32i, cir.ptr -// CHECK: %6 = cir.load %2 : cir.ptr , !s32i +// CHECK: cir.store %5, %2 : !s32i, !cir.ptr +// CHECK: %6 = cir.load %2 : !cir.ptr, !s32i // CHECK: cir.return %6 // CHECK: } // CHECK: cir.func @c(%arg0: !cir.double {{.*}}, %arg1: !cir.double {{.*}}) -> !cir.double -// CHECK: %0 = cir.alloca !cir.double, cir.ptr , ["a", init] -// CHECK: %1 = cir.alloca !cir.double, cir.ptr , ["b", init] -// CHECK: %2 = cir.alloca !cir.double, cir.ptr , ["__retval"] -// CHECK: cir.store %arg0, %0 : !cir.double, cir.ptr -// CHECK: cir.store %arg1, %1 : !cir.double, cir.ptr -// CHECK: %3 = cir.load %0 : cir.ptr , !cir.double -// CHECK: %4 = cir.load %1 : cir.ptr , !cir.double +// CHECK: %0 = cir.alloca !cir.double, !cir.ptr, ["a", init] +// CHECK: %1 = cir.alloca !cir.double, !cir.ptr, ["b", init] +// CHECK: %2 = cir.alloca !cir.double, !cir.ptr, ["__retval"] +// CHECK: cir.store %arg0, %0 : !cir.double, !cir.ptr +// CHECK: cir.store %arg1, %1 : !cir.double, !cir.ptr +// CHECK: %3 = cir.load %0 : !cir.ptr, !cir.double +// CHECK: %4 = cir.load %1 : !cir.ptr, !cir.double // CHECK: %5 = cir.binop(add, %3, %4) : !cir.double -// CHECK: cir.store %5, %2 : !cir.double, cir.ptr -// CHECK: %6 = cir.load %2 : cir.ptr , !cir.double +// CHECK: cir.store %5, %2 : !cir.double, !cir.ptr +// CHECK: %6 = cir.load %2 : !cir.ptr, !cir.double // CHECK: cir.return %6 : !cir.double // CHECK: } // CHECK: cir.func @d() @@ -57,29 +57,29 @@ void d(void) { // CXX-NEXT: cir.return // CXX-NEXT: } // CXX-NEXT: cir.func @_Z1bii(%arg0: !s32i {{.*}}, %arg1: !s32i {{.*}}) -> !s32i -// CXX-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["a", init] -// CXX-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["b", init] -// CXX-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] -// CXX-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr -// CXX-NEXT: cir.store %arg1, %1 : !s32i, cir.ptr -// CXX-NEXT: %3 = cir.load %0 : cir.ptr , !s32i -// CXX-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CXX-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["a", init] +// CXX-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["b", init] +// CXX-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["__retval"] +// CXX-NEXT: cir.store %arg0, %0 : !s32i, !cir.ptr +// CXX-NEXT: cir.store %arg1, %1 : !s32i, !cir.ptr +// CXX-NEXT: %3 = cir.load %0 : !cir.ptr, !s32i +// CXX-NEXT: %4 = cir.load %1 : !cir.ptr, !s32i // CXX-NEXT: %5 = cir.binop(add, %3, %4) : !s32i -// CXX-NEXT: cir.store %5, %2 : !s32i, cir.ptr -// CXX-NEXT: %6 = cir.load %2 : cir.ptr , !s32i +// CXX-NEXT: cir.store %5, %2 : !s32i, !cir.ptr +// CXX-NEXT: %6 = cir.load %2 : !cir.ptr, !s32i // CXX-NEXT: cir.return %6 // CXX-NEXT: } // CXX-NEXT: cir.func @_Z1cdd(%arg0: !cir.double {{.*}}, %arg1: !cir.double {{.*}}) -> !cir.double -// CXX-NEXT: %0 = cir.alloca !cir.double, cir.ptr , ["a", init] -// CXX-NEXT: %1 = cir.alloca !cir.double, cir.ptr , ["b", init] -// CXX-NEXT: %2 = cir.alloca !cir.double, cir.ptr , ["__retval"] -// CXX-NEXT: cir.store %arg0, %0 : !cir.double, cir.ptr -// CXX-NEXT: cir.store %arg1, %1 : !cir.double, cir.ptr -// CXX-NEXT: %3 = cir.load %0 : cir.ptr , !cir.double -// CXX-NEXT: %4 = cir.load %1 : cir.ptr , !cir.double +// CXX-NEXT: %0 = cir.alloca !cir.double, !cir.ptr, ["a", init] +// CXX-NEXT: %1 = cir.alloca !cir.double, !cir.ptr, ["b", init] +// CXX-NEXT: %2 = cir.alloca !cir.double, !cir.ptr, ["__retval"] +// CXX-NEXT: cir.store %arg0, %0 : !cir.double, !cir.ptr +// CXX-NEXT: cir.store %arg1, %1 : !cir.double, !cir.ptr +// CXX-NEXT: %3 = cir.load %0 : !cir.ptr, !cir.double +// CXX-NEXT: %4 = cir.load %1 : !cir.ptr, !cir.double // CXX-NEXT: %5 = cir.binop(add, %3, %4) : !cir.double -// CXX-NEXT: cir.store %5, %2 : !cir.double, cir.ptr -// CXX-NEXT: %6 = cir.load %2 : cir.ptr , !cir.double +// CXX-NEXT: cir.store %5, %2 : !cir.double, !cir.ptr +// CXX-NEXT: %6 = cir.load %2 : !cir.ptr, !cir.double // CXX-NEXT: cir.return %6 : !cir.double // CXX-NEXT: } // CXX-NEXT: cir.func @_Z1dv() diff --git a/clang/test/CIR/CodeGen/call.cpp b/clang/test/CIR/CodeGen/call.cpp index 7f2a8497bad0..ac2ff489db69 100644 --- a/clang/test/CIR/CodeGen/call.cpp +++ b/clang/test/CIR/CodeGen/call.cpp @@ -7,8 +7,8 @@ int f() { } // CHECK: cir.func @_Z1fv() -> !s32i -// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK: %1 = cir.call @_Z1pv() : () -> !cir.ptr -// CHECK: %2 = cir.load %1 : cir.ptr , !s32i +// CHECK: %2 = cir.load %1 : !cir.ptr, !s32i // CHECK: %3 = cir.const(#cir.int<22> : !s32i) : !s32i // CHECK: %4 = cir.binop(sub, %2, %3) nsw : !s32i diff --git a/clang/test/CIR/CodeGen/cast.c b/clang/test/CIR/CodeGen/cast.c index 6e25fcc2abdc..4490910cad43 100644 --- a/clang/test/CIR/CodeGen/cast.c +++ b/clang/test/CIR/CodeGen/cast.c @@ -9,12 +9,12 @@ int cstyle_cast_lvalue(A a) { } // CHECK: cir.func @cstyle_cast_lvalue(%arg0: !ty_22A22 loc({{.*}})) -// CHECK: [[ALLOC_A:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK: [[ALLOC_RET:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: cir.store %arg0, [[ALLOC_A]] : !ty_22A22, cir.ptr +// CHECK: [[ALLOC_A:%.*]] = cir.alloca !ty_22A22, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK: [[ALLOC_RET:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[ALLOC_A]] : !ty_22A22, !cir.ptr // CHECK: [[X_ADDR:%.*]] = cir.get_member [[ALLOC_A]][0] {name = "x"} : !cir.ptr -> !cir.ptr -// CHECK: [[X:%.*]] = cir.load [[X_ADDR]] : cir.ptr , !s32i -// CHECK: cir.store [[X]], [[ALLOC_RET]] : !s32i, cir.ptr -// CHECK: [[RET:%.*]] = cir.load [[ALLOC_RET]] : cir.ptr , !s32i +// CHECK: [[X:%.*]] = cir.load [[X_ADDR]] : !cir.ptr, !s32i +// CHECK: cir.store [[X]], [[ALLOC_RET]] : !s32i, !cir.ptr +// CHECK: [[RET:%.*]] = cir.load [[ALLOC_RET]] : !cir.ptr, !s32i // CHECK: cir.return [[RET]] : !s32i diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index b760e90b131b..b5e15ba784ca 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -6,13 +6,13 @@ unsigned char cxxstaticcast_0(unsigned int x) { } // CHECK: cir.func @_Z15cxxstaticcast_0j -// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !u8i, cir.ptr , ["__retval"] {alignment = 1 : i64} -// CHECK: cir.store %arg0, %0 : !u32i, cir.ptr -// CHECK: %2 = cir.load %0 : cir.ptr , !u32i +// CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !u8i, !cir.ptr, ["__retval"] {alignment = 1 : i64} +// CHECK: cir.store %arg0, %0 : !u32i, !cir.ptr +// CHECK: %2 = cir.load %0 : !cir.ptr, !u32i // CHECK: %3 = cir.cast(integral, %2 : !u32i), !u8i -// CHECK: cir.store %3, %1 : !u8i, cir.ptr -// CHECK: %4 = cir.load %1 : cir.ptr , !u8i +// CHECK: cir.store %3, %1 : !u8i, !cir.ptr +// CHECK: %4 = cir.load %1 : !cir.ptr, !u8i // CHECK: cir.return %4 : !u8i // CHECK: } @@ -92,9 +92,9 @@ bool cptr(void *d) { } // CHECK: cir.func @_Z4cptrPv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["d", init] {alignment = 8 : i64} -// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %3 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %4 = cir.cast(ptr_to_bool, %3 : !cir.ptr), !cir.bool void call_cptr(void *d) { @@ -103,10 +103,10 @@ void call_cptr(void *d) { } // CHECK: cir.func @_Z9call_cptrPv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["d", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["d", init] {alignment = 8 : i64} // CHECK: cir.scope { -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %2 = cir.call @_Z4cptrPv(%1) : (!cir.ptr) -> !cir.bool // CHECK: %3 = cir.unary(not, %2) : !cir.bool, !cir.bool // CHECK: cir.if %3 { @@ -117,7 +117,7 @@ void lvalue_cast(int x) { // CHECK: cir.func @_Z11lvalue_cast // CHECK: %1 = cir.const(#cir.int<42> : !s32i) : !s32i -// CHECK: cir.store %1, %0 : !s32i, cir.ptr +// CHECK: cir.store %1, %0 : !s32i, !cir.ptr struct A { int x; }; @@ -128,10 +128,10 @@ void null_cast(long ptr) { // CHECK: cir.func @_Z9null_castl // CHECK: %[[ADDR:[0-9]+]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr -// CHECK: cir.store %{{[0-9]+}}, %[[ADDR]] : !s32i, cir.ptr +// CHECK: cir.store %{{[0-9]+}}, %[[ADDR]] : !s32i, !cir.ptr // CHECK: %[[BASE:[0-9]+]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK: %[[FIELD:[0-9]+]] = cir.get_member %[[BASE]][0] {name = "x"} : !cir.ptr -> !cir.ptr -// CHECK: cir.store %{{[0-9]+}}, %[[FIELD]] : !s32i, cir.ptr +// CHECK: cir.store %{{[0-9]+}}, %[[FIELD]] : !s32i, !cir.ptr void int_cast(long ptr) { ((A *)ptr)->x = 0; @@ -140,5 +140,5 @@ void int_cast(long ptr) { // CHECK: cir.func @_Z8int_castl // CHECK: %[[BASE:[0-9]+]] = cir.cast(int_to_ptr, %{{[0-9]+}} : !u64i), !cir.ptr // CHECK: %[[FIELD:[0-9]+]] = cir.get_member %[[BASE]][0] {name = "x"} : !cir.ptr -> !cir.ptr -// CHECK: cir.store %{{[0-9]+}}, %[[FIELD]] : !s32i, cir.ptr +// CHECK: cir.store %{{[0-9]+}}, %[[FIELD]] : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/comma.cpp b/clang/test/CIR/CodeGen/comma.cpp index 4d2ce88b9d26..fd3f11f81d02 100644 --- a/clang/test/CIR/CodeGen/comma.cpp +++ b/clang/test/CIR/CodeGen/comma.cpp @@ -8,13 +8,13 @@ int c0() { } // CHECK: cir.func @_Z2c0v() -> !s32i -// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] -// CHECK: %[[#B:]] = cir.alloca !s32i, cir.ptr , ["b", init] -// CHECK: %[[#LOADED_B:]] = cir.load %[[#B]] : cir.ptr , !s32i +// CHECK: %[[#RET:]] = cir.alloca !s32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] +// CHECK: %[[#B:]] = cir.alloca !s32i, !cir.ptr, ["b", init] +// CHECK: %[[#LOADED_B:]] = cir.load %[[#B]] : !cir.ptr, !s32i // CHECK: %[[#]] = cir.binop(add, %[[#LOADED_B]], %[[#]]) : !s32i -// CHECK: %[[#LOADED_A:]] = cir.load %[[#A]] : cir.ptr , !s32i -// CHECK: cir.store %[[#LOADED_A]], %[[#RET]] : !s32i, cir.ptr +// CHECK: %[[#LOADED_A:]] = cir.load %[[#A]] : !cir.ptr, !s32i +// CHECK: cir.store %[[#LOADED_A]], %[[#RET]] : !s32i, !cir.ptr int &foo1(); int &foo2(); @@ -24,7 +24,7 @@ void c1() { } // CHECK: cir.func @_Z2c1v() -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr > +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr> // CHECK: %1 = cir.call @_Z4foo1v() : () -> !cir.ptr // CHECK: %2 = cir.call @_Z4foo2v() : () -> !cir.ptr -// CHECK: cir.store %2, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %2, %0 : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c index 32077445af1c..6f4ca8cc4ab5 100644 --- a/clang/test/CIR/CodeGen/compound-literal.c +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -37,14 +37,14 @@ int foo() { } // CIR: cir.func no_proto @foo() -> !s32i -// CIR: [[RET_MEM:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CIR: [[COMPLITERAL_MEM:%.*]] = cir.alloca !ty_22anon2E122, cir.ptr , [".compoundliteral"] {alignment = 4 : i64} +// CIR: [[RET_MEM:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CIR: [[COMPLITERAL_MEM:%.*]] = cir.alloca !ty_22anon2E122, !cir.ptr, [".compoundliteral"] {alignment = 4 : i64} // CIR: [[FIELD:%.*]] = cir.get_member [[COMPLITERAL_MEM]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CIR: [[ONE:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i -// CIR: cir.store [[ONE]], [[FIELD]] : !s32i, cir.ptr +// CIR: cir.store [[ONE]], [[FIELD]] : !s32i, !cir.ptr // CIR: [[ONE:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i -// CIR: cir.store [[ONE]], [[RET_MEM]] : !s32i, cir.ptr -// CIR: [[RET:%.*]] = cir.load [[RET_MEM]] : cir.ptr , !s32i +// CIR: cir.store [[ONE]], [[RET_MEM]] : !s32i, !cir.ptr +// CIR: [[RET:%.*]] = cir.load [[RET_MEM]] : !cir.ptr, !s32i // CIR: cir.return [[RET]] : !s32i struct G { short x, y, z; }; @@ -53,7 +53,7 @@ struct G g(int x, int y, int z) { } // CIR: cir.func @g -// CIR: %[[RETVAL:.*]] = cir.alloca !ty_22G22, cir.ptr , ["__retval"] {alignment = 2 : i64} loc(#loc18) +// CIR: %[[RETVAL:.*]] = cir.alloca !ty_22G22, !cir.ptr, ["__retval"] {alignment = 2 : i64} loc(#loc18) // CIR: %[[X:.*]] = cir.get_member %[[RETVAL]][0] {name = "x"} // CIR: cir.store {{.*}}, %[[X]] : !s16i // CIR: %[[Y:.*]] = cir.get_member %[[RETVAL]][1] {name = "y"} diff --git a/clang/test/CIR/CodeGen/cond.cpp b/clang/test/CIR/CodeGen/cond.cpp index fdeab5942e7b..a1395baf9eee 100644 --- a/clang/test/CIR/CodeGen/cond.cpp +++ b/clang/test/CIR/CodeGen/cond.cpp @@ -11,22 +11,22 @@ min(const unsigned long& __a, const unsigned long& __b) { } // CHECK: cir.func @_Z3minRKmS0_(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["__a", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__b", init] {alignment = 8 : i64} -// CHECK: %2 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["__a", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["__b", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> // CHECK: cir.scope { -// CHECK: %4 = cir.alloca !ty_22__less22, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: %4 = cir.alloca !ty_22__less22, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} // CHECK: cir.call @_ZN6__lessC1Ev(%4) : (!cir.ptr) -> () -// CHECK: %5 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK: %6 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %5 = cir.load %1 : !cir.ptr>, !cir.ptr +// CHECK: %6 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %7 = cir.call @_ZNK6__lessclERKmS1_(%4, %5, %6) : (!cir.ptr, !cir.ptr, !cir.ptr) -> !cir.bool // CHECK: %8 = cir.ternary(%7, true { -// CHECK: %9 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %9 = cir.load %1 : !cir.ptr>, !cir.ptr // CHECK: cir.yield %9 : !cir.ptr // CHECK: }, false { -// CHECK: %9 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %9 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: cir.yield %9 : !cir.ptr // CHECK: }) : (!cir.bool) -> !cir.ptr -// CHECK: cir.store %8, %2 : !cir.ptr, cir.ptr > \ No newline at end of file +// CHECK: cir.store %8, %2 : !cir.ptr, !cir.ptr> \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/const-array.c b/clang/test/CIR/CodeGen/const-array.c index eb0adceabdad..7ae6f852d7ca 100644 --- a/clang/test/CIR/CodeGen/const-array.c +++ b/clang/test/CIR/CodeGen/const-array.c @@ -6,13 +6,13 @@ void bar() { // CHECK: cir.global "private" constant internal @bar.arr = #cir.const_array<[#cir.int<1> : !s32i]> : !cir.array {alignment = 4 : i64} // CHECK: cir.func no_proto @bar() -// CHECK: {{.*}} = cir.get_global @bar.arr : cir.ptr > +// CHECK: {{.*}} = cir.get_global @bar.arr : !cir.ptr> void foo() { int a[10] = {1}; } // CHECK: cir.func {{.*@foo}} -// CHECK: %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} +// CHECK: %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} // CHECK: %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i], trailing_zeros> : !cir.array) : !cir.array -// CHECK: cir.store %1, %0 : !cir.array, cir.ptr > +// CHECK: cir.store %1, %0 : !cir.array, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/const-bitfields.c b/clang/test/CIR/CodeGen/const-bitfields.c index bf54e1893d02..24a6710af516 100644 --- a/clang/test/CIR/CodeGen/const-bitfields.c +++ b/clang/test/CIR/CodeGen/const-bitfields.c @@ -28,7 +28,7 @@ struct Inner var = { 1, 0, 1, 21}; // CHECK: cir.func {{.*@getZ()}} -// CHECK: %1 = cir.get_global @GV : cir.ptr +// CHECK: %1 = cir.get_global @GV : !cir.ptr // CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr // CHECK: %3 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr> // CHECK: %4 = cir.get_bitfield(#bfi_Z, %3 : !cir.ptr>) -> !s32i @@ -38,7 +38,7 @@ int getZ() { // check the type used is the type of T struct for plain field // CHECK: cir.func {{.*@getW()}} -// CHECK: %1 = cir.get_global @GV : cir.ptr +// CHECK: %1 = cir.get_global @GV : !cir.ptr // CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr // CHECK: %3 = cir.get_member %2[1] {name = "W"} : !cir.ptr -> !cir.ptr int getW() { diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index a67447572b16..e292806175de 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -153,7 +153,7 @@ VoidTask silly_task() { // Allocate promise. // CHECK: %[[#VoidTaskAddr:]] = cir.alloca ![[VoidTask]], {{.*}}, ["__retval"] -// CHECK: %[[#SavedFrameAddr:]] = cir.alloca !cir.ptr, cir.ptr >, ["__coro_frame_addr"] {alignment = 8 : i64} +// CHECK: %[[#SavedFrameAddr:]] = cir.alloca !cir.ptr, !cir.ptr>, ["__coro_frame_addr"] {alignment = 8 : i64} // CHECK: %[[#VoidPromisseAddr:]] = cir.alloca ![[VoidPromisse]], {{.*}}, ["__promise"] // Get coroutine id with __builtin_coro_id. @@ -166,13 +166,13 @@ VoidTask silly_task() { // call __builtin_coro_begin for the final coroutine frame address. // CHECK: %[[#ShouldAlloc:]] = cir.call @__builtin_coro_alloc(%[[#CoroId]]) : (!u32i) -> !cir.bool -// CHECK: cir.store %[[#NullPtr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > +// CHECK: cir.store %[[#NullPtr]], %[[#SavedFrameAddr]] : !cir.ptr, !cir.ptr> // CHECK: cir.if %[[#ShouldAlloc]] { // CHECK: %[[#CoroSize:]] = cir.call @__builtin_coro_size() : () -> !u64i // CHECK: %[[#AllocAddr:]] = cir.call @_Znwm(%[[#CoroSize]]) : (!u64i) -> !cir.ptr -// CHECK: cir.store %[[#AllocAddr]], %[[#SavedFrameAddr]] : !cir.ptr, cir.ptr > +// CHECK: cir.store %[[#AllocAddr]], %[[#SavedFrameAddr]] : !cir.ptr, !cir.ptr> // CHECK: } -// CHECK: %[[#Load0:]] = cir.load %[[#SavedFrameAddr]] : cir.ptr >, !cir.ptr +// CHECK: %[[#Load0:]] = cir.load %[[#SavedFrameAddr]] : !cir.ptr>, !cir.ptr // CHECK: %[[#CoroFrameAddr:]] = cir.call @__builtin_coro_begin(%[[#CoroId]], %[[#Load0]]) // Call promise.get_return_object() to retrieve the task object. @@ -222,7 +222,7 @@ VoidTask silly_task() { // CHECK: cir.store %[[#FromAddrRes]], %[[#CoroHandlePromiseAddr]] : ![[CoroHandlePromise]] // CHECK: %[[#CoroHandlePromiseReload:]] = cir.load %[[#CoroHandlePromiseAddr]] // CHECK: cir.call @_ZNSt16coroutine_handleIvEC1IN5folly4coro4TaskIvE12promise_typeEEES_IT_E(%[[#CoroHandleVoidAddr]], %[[#CoroHandlePromiseReload]]) -// CHECK: %[[#CoroHandleVoidReload:]] = cir.load %[[#CoroHandleVoidAddr]] : cir.ptr , ![[CoroHandleVoid]] +// CHECK: %[[#CoroHandleVoidReload:]] = cir.load %[[#CoroHandleVoidAddr]] : !cir.ptr, ![[CoroHandleVoid]] // CHECK: cir.call @_ZNSt14suspend_always13await_suspendESt16coroutine_handleIvE(%[[#SuspendAlwaysAddr]], %[[#CoroHandleVoidReload]]) // CHECK: cir.yield @@ -304,7 +304,7 @@ folly::coro::Task go1() { } // CHECK: cir.func coroutine @_Z3go1v() -// CHECK: %[[#IntTaskAddr:]] = cir.alloca ![[IntTask]], cir.ptr , ["task", init] +// CHECK: %[[#IntTaskAddr:]] = cir.alloca ![[IntTask]], !cir.ptr, ["task", init] // CHECK: cir.await(init, ready : { // CHECK: }, suspend : { @@ -314,21 +314,21 @@ folly::coro::Task go1() { // The call to go(1) has its own scope due to full-expression rules. // CHECK: cir.scope { -// CHECK: %[[#OneAddr:]] = cir.alloca !s32i, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} +// CHECK: %[[#OneAddr:]] = cir.alloca !s32i, !cir.ptr, ["ref.tmp1", init] {alignment = 4 : i64} // CHECK: %[[#One:]] = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: cir.store %[[#One]], %[[#OneAddr]] : !s32i, cir.ptr +// CHECK: cir.store %[[#One]], %[[#OneAddr]] : !s32i, !cir.ptr // CHECK: %[[#IntTaskTmp:]] = cir.call @_Z2goRKi(%[[#OneAddr]]) : (!cir.ptr) -> ![[IntTask]] -// CHECK: cir.store %[[#IntTaskTmp]], %[[#IntTaskAddr]] : ![[IntTask]], cir.ptr +// CHECK: cir.store %[[#IntTaskTmp]], %[[#IntTaskAddr]] : ![[IntTask]], !cir.ptr // CHECK: } -// CHECK: %[[#CoReturnValAddr:]] = cir.alloca !s32i, cir.ptr , ["__coawait_resume_rval"] {alignment = 1 : i64} +// CHECK: %[[#CoReturnValAddr:]] = cir.alloca !s32i, !cir.ptr, ["__coawait_resume_rval"] {alignment = 1 : i64} // CHECK: cir.await(user, ready : { // CHECK: }, suspend : { // CHECK: }, resume : { // CHECK: %[[#ResumeVal:]] = cir.call @_ZN5folly4coro4TaskIiE12await_resumeEv(%3) -// CHECK: cir.store %[[#ResumeVal]], %[[#CoReturnValAddr]] : !s32i, cir.ptr +// CHECK: cir.store %[[#ResumeVal]], %[[#CoReturnValAddr]] : !s32i, !cir.ptr // CHECK: },) -// CHECK: %[[#V:]] = cir.load %[[#CoReturnValAddr]] : cir.ptr , !s32i +// CHECK: %[[#V:]] = cir.load %[[#CoReturnValAddr]] : !cir.ptr, !s32i // CHECK: cir.call @_ZN5folly4coro4TaskIiE12promise_type12return_valueEi({{.*}}, %[[#V]]) folly::coro::Task go1_lambda() { @@ -356,23 +356,23 @@ folly::coro::Task go4() { // CHECK: } // CHECK: %12 = cir.scope { -// CHECK: %17 = cir.alloca !ty_22anon2E522, cir.ptr , ["ref.tmp1"] {alignment = 1 : i64} +// CHECK: %17 = cir.alloca !ty_22anon2E522, !cir.ptr, ["ref.tmp1"] {alignment = 1 : i64} // Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` // CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> // CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> // CHECK: cir.yield %19 : !cir.ptr)>> // CHECK: } -// CHECK: cir.store %12, %3 : !cir.ptr)>>, cir.ptr )>>> +// CHECK: cir.store %12, %3 : !cir.ptr)>>, !cir.ptr)>>> // CHECK: cir.scope { -// CHECK: %17 = cir.alloca !s32i, cir.ptr , ["ref.tmp2", init] {alignment = 4 : i64} -// CHECK: %18 = cir.load %3 : cir.ptr )>>>, !cir.ptr)>> +// CHECK: %17 = cir.alloca !s32i, !cir.ptr, ["ref.tmp2", init] {alignment = 4 : i64} +// CHECK: %18 = cir.load %3 : !cir.ptr)>>>, !cir.ptr)>> // CHECK: %19 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: cir.store %19, %17 : !s32i, cir.ptr +// CHECK: cir.store %19, %17 : !s32i, !cir.ptr // Call invoker, which calls operator() indirectly. // CHECK: %20 = cir.call %18(%17) : (!cir.ptr)>>, !cir.ptr) -> ![[IntTask]] -// CHECK: cir.store %20, %4 : ![[IntTask]], cir.ptr +// CHECK: cir.store %20, %4 : ![[IntTask]], !cir.ptr // CHECK: } // CHECK: cir.await(user, ready : { diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp index 865b05b267b5..6476659ef41f 100644 --- a/clang/test/CIR/CodeGen/ctor-alias.cpp +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -9,18 +9,18 @@ void t() { } // CHECK: cir.func linkonce_odr @_ZN11DummyStringC2EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NOT: cir.fun @_ZN11DummyStringC1EPKc // CHECK: cir.func @_Z1tv -// CHECK-NEXT: %0 = cir.alloca !ty_22DummyString22, cir.ptr , ["s4", init] {alignment = 1 : i64} -// CHECK-NEXT: %1 = cir.get_global @".str" : cir.ptr > +// CHECK-NEXT: %0 = cir.alloca !ty_22DummyString22, !cir.ptr, ["s4", init] {alignment = 1 : i64} +// CHECK-NEXT: %1 = cir.get_global @".str" : !cir.ptr> // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr // CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return @@ -32,9 +32,9 @@ B::B() { } // CHECK: cir.func @_ZN1BC2Ev(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: cir.return // CHECK: } // CHECK: cir.func @_ZN1BC1Ev(!cir.ptr) alias(@_ZN1BC2Ev) \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp index 743e1db42584..a777b1ef3dd4 100644 --- a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -6,16 +6,16 @@ struct String { long size; String(const String &s) : size{s.size} {} // CHECK: cir.func linkonce_odr @_ZN6StringC2ERKS_ -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 // CHECK: cir.store %arg1, %1 // CHECK: %2 = cir.load %0 // CHECK: %3 = cir.get_member %2[0] {name = "size"} // CHECK: %4 = cir.load %1 // CHECK: %5 = cir.get_member %4[0] {name = "size"} -// CHECK: %6 = cir.load %5 : cir.ptr , !s64i -// CHECK: cir.store %6, %3 : !s64i, cir.ptr +// CHECK: %6 = cir.load %5 : !cir.ptr, !s64i +// CHECK: cir.store %6, %3 : !s64i, !cir.ptr // CHECK: cir.return // CHECK: } @@ -28,8 +28,8 @@ void foo() { // FIXME: s1 shouldn't be uninitialized. // cir.func @_Z3foov() { - // %0 = cir.alloca !ty_22String22, cir.ptr , ["s"] {alignment = 8 : i64} - // %1 = cir.alloca !ty_22String22, cir.ptr , ["s1"] {alignment = 8 : i64} + // %0 = cir.alloca !ty_22String22, !cir.ptr, ["s"] {alignment = 8 : i64} + // %1 = cir.alloca !ty_22String22, !cir.ptr, ["s1"] {alignment = 8 : i64} // cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () // cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () // cir.return diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index 09f54c37f7b6..18288b15d241 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -14,19 +14,19 @@ void baz() { // CHECK: !ty_22Struk22 = !cir.struct // CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.return // CHECK: cir.func linkonce_odr @_ZN5StrukC1Ev(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () // CHECK-NEXT: cir.return // CHECK: cir.func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !ty_22Struk22, cir.ptr , ["s", init] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22Struk22, !cir.ptr, ["s", init] {alignment = 4 : i64} // CHECK-NEXT: cir.call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 2f8591a2810d..253ab3907aa4 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -84,9 +84,9 @@ void C3::Layer::Initialize() { // CHECK: cir.func @_ZN2C35Layer10InitializeEv // CHECK: cir.scope { -// CHECK: %2 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr +// CHECK: %2 = cir.base_class_addr(%1 : !cir.ptr) -> !cir.ptr // CHECK: %3 = cir.get_member %2[1] {name = "m_C1"} : !cir.ptr -> !cir.ptr> -// CHECK: %4 = cir.load %3 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.load %3 : !cir.ptr>, !cir.ptr // CHECK: %5 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr // CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool @@ -95,11 +95,11 @@ enumy C3::Initialize() { } // CHECK: cir.func @_ZN2C310InitializeEv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %3 = cir.base_class_addr(%2 : cir.ptr ) -> cir.ptr +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %2 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: %3 = cir.base_class_addr(%2 : !cir.ptr) -> !cir.ptr // CHECK: %4 = cir.call @_ZN2C210InitializeEv(%3) : (!cir.ptr) -> !s32i void vcall(C1 &c1) { @@ -109,19 +109,19 @@ void vcall(C1 &c1) { } // CHECK: cir.func @_Z5vcallR2C1(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["c1", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !ty_22buffy22, cir.ptr , ["b"] {alignment = 8 : i64} -// CHECK: %2 = cir.alloca !s32i, cir.ptr , ["e"] {alignment = 4 : i64} -// CHECK: %3 = cir.alloca !ty_22buffy22, cir.ptr , ["agg.tmp0"] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %4 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %5 = cir.load %2 : cir.ptr , !s32i +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["c1", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_22buffy22, !cir.ptr, ["b"] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !s32i, !cir.ptr, ["e"] {alignment = 4 : i64} +// CHECK: %3 = cir.alloca !ty_22buffy22, !cir.ptr, ["agg.tmp0"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %4 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: %5 = cir.load %2 : !cir.ptr, !s32i // CHECK: cir.call @_ZN5buffyC2ERKS_(%3, %1) : (!cir.ptr, !cir.ptr) -> () -// CHECK: %6 = cir.load %3 : cir.ptr , !ty_22buffy22 +// CHECK: %6 = cir.load %3 : !cir.ptr, !ty_22buffy22 // CHECK: %7 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr, !s32i, !ty_22buffy22)>>>> -// CHECK: %8 = cir.load %7 : cir.ptr , !s32i, !ty_22buffy22)>>>>, !cir.ptr, !s32i, !ty_22buffy22)>>> -// CHECK: %9 = cir.vtable.address_point( %8 : !cir.ptr, !s32i, !ty_22buffy22)>>>, vtable_index = 0, address_point_index = 2) : cir.ptr , !s32i, !ty_22buffy22)>>> -// CHECK: %10 = cir.load %9 : cir.ptr , !s32i, !ty_22buffy22)>>>, !cir.ptr, !s32i, !ty_22buffy22)>> +// CHECK: %8 = cir.load %7 : !cir.ptr, !s32i, !ty_22buffy22)>>>>, !cir.ptr, !s32i, !ty_22buffy22)>>> +// CHECK: %9 = cir.vtable.address_point( %8 : !cir.ptr, !s32i, !ty_22buffy22)>>>, vtable_index = 0, address_point_index = 2) : !cir.ptr, !s32i, !ty_22buffy22)>>> +// CHECK: %10 = cir.load %9 : !cir.ptr, !s32i, !ty_22buffy22)>>>, !cir.ptr, !s32i, !ty_22buffy22)>> // CHECK: %11 = cir.call %10(%4, %5, %6) : (!cir.ptr, !s32i, !ty_22buffy22)>>, !cir.ptr, !s32i, !ty_22buffy22) -> !s32i // CHECK: cir.return // CHECK: } @@ -139,12 +139,12 @@ class B : public A { }; // CHECK: cir.func linkonce_odr @_ZN1B3fooEv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load deref %0 : cir.ptr >, !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load deref %0 : !cir.ptr>, !cir.ptr // CHECK: cir.scope { -// CHECK: %2 = cir.alloca !ty_22A22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} -// CHECK: %3 = cir.base_class_addr(%1 : cir.ptr ) -> cir.ptr +// CHECK: %2 = cir.alloca !ty_22A22, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %3 = cir.base_class_addr(%1 : !cir.ptr) -> !cir.ptr // Call @A::A(A const&) // CHECK: cir.call @_ZN1AC2ERKS_(%2, %3) : (!cir.ptr, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/dtors-scopes.cpp b/clang/test/CIR/CodeGen/dtors-scopes.cpp index d0991dc304c1..4c10b4d64426 100644 --- a/clang/test/CIR/CodeGen/dtors-scopes.cpp +++ b/clang/test/CIR/CodeGen/dtors-scopes.cpp @@ -17,7 +17,7 @@ void dtor1() { // CHECK: cir.func @_Z5dtor1v() // CHECK: cir.scope { -// CHECK: %4 = cir.alloca !ty_22C22, cir.ptr , ["c", init] {alignment = 1 : i64} +// CHECK: %4 = cir.alloca !ty_22C22, !cir.ptr, ["c", init] {alignment = 1 : i64} // CHECK: cir.call @_ZN1CC2Ev(%4) : (!cir.ptr) -> () // CHECK: cir.call @_ZN1CD2Ev(%4) : (!cir.ptr) -> () // CHECK: } diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index 35ae4b1ff395..f4abee8de303 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -44,9 +44,9 @@ class B : public A // CHECK: ![[ClassB:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast>}> // CHECK: cir.func @_Z4bluev() -// CHECK: %0 = cir.alloca !ty_22PSEvent22, cir.ptr , ["p", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !ty_22PSEvent22, !cir.ptr, ["p", init] {alignment = 8 : i64} // CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: %2 = cir.get_global @".str" : cir.ptr > +// CHECK: %2 = cir.get_global @".str" : !cir.ptr> // CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr // CHECK: cir.call @_ZN7PSEventC1E6EFModePKc(%0, %1, %3) : (!cir.ptr, !s32i, !cir.ptr) -> () // CHECK: cir.return @@ -70,9 +70,9 @@ class B : public A // Calls operator delete // // CHECK: cir.func linkonce_odr @_ZN1BD0Ev(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: cir.call @_ZN1BD2Ev(%1) : (!cir.ptr) -> () // CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr // CHECK: cir.call @_ZdlPv(%2) : (!cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/dynamic-cast.cpp b/clang/test/CIR/CodeGen/dynamic-cast.cpp index ea31b4460c12..0c74504a7faa 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast.cpp @@ -63,7 +63,7 @@ void *ptr_cast_to_complete(Base *ptr) { } // BEFORE: cir.func @_Z20ptr_cast_to_completeP4Base -// BEFORE: %[[#V19:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr +// BEFORE: %[[#V19:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr // BEFORE-NEXT: %[[#V20:]] = cir.cast(ptr_to_bool, %[[#V19]] : !cir.ptr), !cir.bool // BEFORE-NEXT: %[[#V21:]] = cir.unary(not, %[[#V20]]) : !cir.bool, !cir.bool // BEFORE-NEXT: %{{.+}} = cir.ternary(%[[#V21]], true { @@ -71,9 +71,9 @@ void *ptr_cast_to_complete(Base *ptr) { // BEFORE-NEXT: cir.yield %[[#V22]] : !cir.ptr // BEFORE-NEXT: }, false { // BEFORE-NEXT: %[[#V23:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr> -// BEFORE-NEXT: %[[#V24:]] = cir.load %[[#V23]] : cir.ptr >, !cir.ptr -// BEFORE-NEXT: %[[#V25:]] = cir.vtable.address_point( %[[#V24]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : cir.ptr -// BEFORE-NEXT: %[[#V26:]] = cir.load %[[#V25]] : cir.ptr , !s64i +// BEFORE-NEXT: %[[#V24:]] = cir.load %[[#V23]] : !cir.ptr>, !cir.ptr +// BEFORE-NEXT: %[[#V25:]] = cir.vtable.address_point( %[[#V24]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : !cir.ptr +// BEFORE-NEXT: %[[#V26:]] = cir.load %[[#V25]] : !cir.ptr, !s64i // BEFORE-NEXT: %[[#V27:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr // BEFORE-NEXT: %[[#V28:]] = cir.ptr_stride(%[[#V27]] : !cir.ptr, %[[#V26]] : !s64i), !cir.ptr // BEFORE-NEXT: %[[#V29:]] = cir.cast(bitcast, %[[#V28]] : !cir.ptr), !cir.ptr diff --git a/clang/test/CIR/CodeGen/evaluate-expr.c b/clang/test/CIR/CodeGen/evaluate-expr.c index 81947ea181e9..805fa5c01fd7 100644 --- a/clang/test/CIR/CodeGen/evaluate-expr.c +++ b/clang/test/CIR/CodeGen/evaluate-expr.c @@ -24,9 +24,9 @@ void bar() { int a = s.x; } // CHECK: cir.func no_proto @bar() -// CHECK: [[ALLOC:%.*]] = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK: {{%.*}} = cir.get_global @s : cir.ptr +// CHECK: [[ALLOC:%.*]] = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK: {{%.*}} = cir.get_global @s : !cir.ptr // CHECK: [[CONST:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK: cir.store [[CONST]], [[ALLOC]] : !s32i, cir.ptr +// CHECK: cir.store [[CONST]], [[ALLOC]] : !s32i, !cir.ptr // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/expressions.cpp b/clang/test/CIR/CodeGen/expressions.cpp index fa17f0921fcd..283acfca2d42 100644 --- a/clang/test/CIR/CodeGen/expressions.cpp +++ b/clang/test/CIR/CodeGen/expressions.cpp @@ -7,5 +7,5 @@ void test(int a) { // Should generate LValue parenthesis expression. (a) = 1; // CHECK: %[[#C:]] = cir.const(#cir.int<1> : !s32i) : !s32i - // CHECK: cir.store %[[#C]], %{{.+}} : !s32i, cir.ptr + // CHECK: cir.store %[[#C]], %{{.+}} : !s32i, !cir.ptr } diff --git a/clang/test/CIR/CodeGen/forward-decls.cpp b/clang/test/CIR/CodeGen/forward-decls.cpp index a67807a540bb..66ae59c226ec 100644 --- a/clang/test/CIR/CodeGen/forward-decls.cpp +++ b/clang/test/CIR/CodeGen/forward-decls.cpp @@ -50,7 +50,7 @@ struct RecursiveStruct { // CHECK3: testRecursiveStruct(%arg0: !cir.ptr void testRecursiveStruct(struct RecursiveStruct *arg) { // CHECK3: %[[#NEXT:]] = cir.get_member %{{.+}}[1] {name = "next"} : !cir.ptr -> !cir.ptr> - // CHECK3: %[[#DEREF:]] = cir.load %[[#NEXT]] : cir.ptr >, !cir.ptr + // CHECK3: %[[#DEREF:]] = cir.load %[[#NEXT]] : !cir.ptr>, !cir.ptr // CHECK3: cir.get_member %[[#DEREF]][0] {name = "value"} : !cir.ptr -> !cir.ptr arg->next->value; } @@ -81,9 +81,9 @@ struct StructNodeB { void testIndirectSelfReference(struct StructNodeA arg) { // CHECK4: %[[#V1:]] = cir.get_member %{{.+}}[1] {name = "next"} : !cir.ptr -> !cir.ptr> - // CHECK4: %[[#V2:]] = cir.load %[[#V1]] : cir.ptr >, !cir.ptr + // CHECK4: %[[#V2:]] = cir.load %[[#V1]] : !cir.ptr>, !cir.ptr // CHECK4: %[[#V3:]] = cir.get_member %[[#V2]][1] {name = "next"} : !cir.ptr -> !cir.ptr> - // CHECK4: %[[#V4:]] = cir.load %[[#V3]] : cir.ptr >, !cir.ptr + // CHECK4: %[[#V4:]] = cir.load %[[#V3]] : !cir.ptr>, !cir.ptr // CHECK4: cir.get_member %[[#V4]][0] {name = "value"} : !cir.ptr -> !cir.ptr arg.next->next->value; } diff --git a/clang/test/CIR/CodeGen/fullexpr.cpp b/clang/test/CIR/CodeGen/fullexpr.cpp index bb8f30d9af6d..2fd42b9b6db7 100644 --- a/clang/test/CIR/CodeGen/fullexpr.cpp +++ b/clang/test/CIR/CodeGen/fullexpr.cpp @@ -9,12 +9,12 @@ int go1() { } // CHECK: cir.func @_Z3go1v() -> !s32i -// CHECK: %[[#XAddr:]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} +// CHECK: %[[#XAddr:]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} // CHECK: %[[#RVal:]] = cir.scope { -// CHECK-NEXT: %[[#TmpAddr:]] = cir.alloca !s32i, cir.ptr , ["ref.tmp0", init] {alignment = 4 : i64} +// CHECK-NEXT: %[[#TmpAddr:]] = cir.alloca !s32i, !cir.ptr, ["ref.tmp0", init] {alignment = 4 : i64} // CHECK-NEXT: %[[#One:]] = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK-NEXT: cir.store %[[#One]], %[[#TmpAddr]] : !s32i, cir.ptr +// CHECK-NEXT: cir.store %[[#One]], %[[#TmpAddr]] : !s32i, !cir.ptr // CHECK-NEXT: %[[#RValTmp:]] = cir.call @_Z2goRKi(%[[#TmpAddr]]) : (!cir.ptr) -> !s32i // CHECK-NEXT: cir.yield %[[#RValTmp]] : !s32i // CHECK-NEXT: } -// CHECK-NEXT: cir.store %[[#RVal]], %[[#XAddr]] : !s32i, cir.ptr +// CHECK-NEXT: cir.store %[[#RVal]], %[[#XAddr]] : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c index e1f147b3d54c..69179ece2c9b 100644 --- a/clang/test/CIR/CodeGen/fun-ptr.c +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -25,18 +25,18 @@ int extract_a(Data* d) { } // CIR: cir.func {{@.*foo.*}}(%arg0: !cir.ptr -// CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["d", init] -// CIR: [[TMP1:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] -// CIR: [[TMP2:%.*]] = cir.alloca !cir.ptr)>>, cir.ptr )>>>, ["f", init] -// CIR: cir.store %arg0, [[TMP0]] : !cir.ptr, cir.ptr > +// CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["d", init] +// CIR: [[TMP1:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] +// CIR: [[TMP2:%.*]] = cir.alloca !cir.ptr)>>, !cir.ptr)>>>, ["f", init] +// CIR: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> // CIR: [[TMP3:%.*]] = cir.const(#cir.ptr : !cir.ptr)>>) : !cir.ptr)>> -// CIR: cir.store [[TMP3]], [[TMP2]] : !cir.ptr)>>, cir.ptr )>>> -// CIR: [[TMP4:%.*]] = cir.get_global {{@.*extract_a.*}} : cir.ptr )>> -// CIR: cir.store [[TMP4]], [[TMP2]] : !cir.ptr)>>, cir.ptr )>>> -// CIR: [[TMP5:%.*]] = cir.load [[TMP2]] : cir.ptr )>>>, !cir.ptr)>> -// CIR: [[TMP6:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CIR: cir.store [[TMP3]], [[TMP2]] : !cir.ptr)>>, !cir.ptr)>>> +// CIR: [[TMP4:%.*]] = cir.get_global {{@.*extract_a.*}} : !cir.ptr)>> +// CIR: cir.store [[TMP4]], [[TMP2]] : !cir.ptr)>>, !cir.ptr)>>> +// CIR: [[TMP5:%.*]] = cir.load [[TMP2]] : !cir.ptr)>>>, !cir.ptr)>> +// CIR: [[TMP6:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr // CIR: [[TMP7:%.*]] = cir.call [[TMP5]]([[TMP6]]) : (!cir.ptr)>>, !cir.ptr) -> !s32i -// CIR: cir.store [[TMP7]], [[TMP1]] : !s32i, cir.ptr +// CIR: cir.store [[TMP7]], [[TMP1]] : !s32i, !cir.ptr // LLVM: define i32 {{@.*foo.*}}(ptr %0) // LLVM: [[TMP1:%.*]] = alloca ptr, i64 1 diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index e576fb30fc65..3c58d8bbfd29 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -81,7 +81,7 @@ int foo() { } // CHECK: cir.global "private" external @optind : !s32i // CHECK: cir.func {{.*@foo}} -// CHECK: {{.*}} = cir.get_global @optind : cir.ptr +// CHECK: {{.*}} = cir.get_global @optind : !cir.ptr struct Glob { double a[42]; diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index ba5bb7eedba6..da9f9397164f 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -37,10 +37,10 @@ int use_func() { return func(); } // CHECK-NEXT: cir.global external @d = #cir.int<0> : !s32i // CHECK-NEXT: cir.func internal private @__cxx_global_var_init() -// CHECK-NEXT: [[TMP0:%.*]] = cir.get_global @d : cir.ptr -// CHECK-NEXT: [[TMP1:%.*]] = cir.get_global @a : cir.ptr -// CHECK-NEXT: [[TMP2:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i -// CHECK-NEXT: cir.store [[TMP2]], [[TMP0]] : !s32i, cir.ptr +// CHECK-NEXT: [[TMP0:%.*]] = cir.get_global @d : !cir.ptr +// CHECK-NEXT: [[TMP1:%.*]] = cir.get_global @a : !cir.ptr +// CHECK-NEXT: [[TMP2:%.*]] = cir.load [[TMP1]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store [[TMP2]], [[TMP0]] : !s32i, !cir.ptr // CHECK: cir.global external @e = #false // CHECK-NEXT: cir.global external @y = #cir.fp<3.400000e+00> : !cir.float @@ -58,34 +58,34 @@ int use_func() { return func(); } // CHECK-NEXT: cir.global external @s2 = #cir.global_view<@".str"> : !cir.ptr // CHECK: cir.func @_Z10use_globalv() -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["li", init] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.get_global @a : cir.ptr -// CHECK-NEXT: %2 = cir.load %1 : cir.ptr , !s32i -// CHECK-NEXT: cir.store %2, %0 : !s32i, cir.ptr +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["li", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.get_global @a : !cir.ptr +// CHECK-NEXT: %2 = cir.load %1 : !cir.ptr, !s32i +// CHECK-NEXT: cir.store %2, %0 : !s32i, !cir.ptr // CHECK: cir.func @_Z17use_global_stringv() -// CHECK-NEXT: %0 = cir.alloca !u8i, cir.ptr , ["c", init] {alignment = 1 : i64} -// CHECK-NEXT: %1 = cir.get_global @s2 : cir.ptr > -// CHECK-NEXT: %2 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !u8i, !cir.ptr, ["c", init] {alignment = 1 : i64} +// CHECK-NEXT: %1 = cir.get_global @s2 : !cir.ptr> +// CHECK-NEXT: %2 = cir.load %1 : !cir.ptr>, !cir.ptr // CHECK-NEXT: %3 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr -// CHECK-NEXT: %5 = cir.load %4 : cir.ptr , !s8i +// CHECK-NEXT: %5 = cir.load %4 : !cir.ptr, !s8i // CHECK-NEXT: %6 = cir.cast(integral, %5 : !s8i), !u8i -// CHECK-NEXT: cir.store %6, %0 : !u8i, cir.ptr +// CHECK-NEXT: cir.store %6, %0 : !u8i, !cir.ptr // CHECK-NEXT: cir.return // CHECK: cir.func linkonce_odr @_Z4funcIiET_v() -> !s32i -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %2 : !s32i // CHECK-NEXT: } // CHECK-NEXT: cir.func @_Z8use_funcv() -> !s32i -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.call @_Z4funcIiET_v() : () -> !s32i -// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %2 : !s32i // CHECK-NEXT: } @@ -104,19 +104,19 @@ long long ll[] = {999999999, 0, 0, 0}; void get_globals() { // CHECK: cir.func @_Z11get_globalsv() char *s = string; - // CHECK: %[[RES:[0-9]+]] = cir.get_global @string : cir.ptr > + // CHECK: %[[RES:[0-9]+]] = cir.get_global @string : !cir.ptr> // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr unsigned *u = uint; - // CHECK: %[[RES:[0-9]+]] = cir.get_global @uint : cir.ptr > + // CHECK: %[[RES:[0-9]+]] = cir.get_global @uint : !cir.ptr> // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr short *ss = sshort; - // CHECK: %[[RES:[0-9]+]] = cir.get_global @sshort : cir.ptr > + // CHECK: %[[RES:[0-9]+]] = cir.get_global @sshort : !cir.ptr> // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr int *si = sint; - // CHECK: %[[RES:[0-9]+]] = cir.get_global @sint : cir.ptr > + // CHECK: %[[RES:[0-9]+]] = cir.get_global @sint : !cir.ptr> // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr long long *l = ll; - // CHECK: %[[RES:[0-9]+]] = cir.get_global @ll : cir.ptr > + // CHECK: %[[RES:[0-9]+]] = cir.get_global @ll : !cir.ptr> // CHECK: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %[[RES]] : !cir.ptr>), !cir.ptr } @@ -125,7 +125,7 @@ extern int externVar; int testExternVar(void) { return externVar; } // CHECK: cir.global "private" external @externVar : !s32i // CHECK: cir.func @{{.+}}testExternVar -// CHECK: cir.get_global @externVar : cir.ptr +// CHECK: cir.get_global @externVar : !cir.ptr // Should constant initialize global with constant address. int var = 1; diff --git a/clang/test/CIR/CodeGen/gnu-extension.c b/clang/test/CIR/CodeGen/gnu-extension.c index e9cee90b57e9..0eb7e750ddff 100644 --- a/clang/test/CIR/CodeGen/gnu-extension.c +++ b/clang/test/CIR/CodeGen/gnu-extension.c @@ -4,10 +4,10 @@ int foo(void) { return __extension__ 0b101010; } //CHECK: cir.func @foo() -//CHECK-NEXT: [[ADDR:%.*]] = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +//CHECK-NEXT: [[ADDR:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} //CHECK-NEXT: [[VAL:%.*]] = cir.const(#cir.int<42> : !s32i) : !s32i -//CHECK-NEXT: cir.store [[VAL]], [[ADDR]] : !s32i, cir.ptr -//CHECK-NEXT: [[LOAD_VAL:%.*]] = cir.load [[ADDR]] : cir.ptr , !s32i +//CHECK-NEXT: cir.store [[VAL]], [[ADDR]] : !s32i, !cir.ptr +//CHECK-NEXT: [[LOAD_VAL:%.*]] = cir.load [[ADDR]] : !cir.ptr, !s32i //CHECK-NEXT: cir.return [[LOAD_VAL]] : !s32i void bar(void) { @@ -15,5 +15,5 @@ void bar(void) { } //CHECK: cir.func @bar() -//CHECK: {{.*}} = cir.get_global @bar : cir.ptr > +//CHECK: {{.*}} = cir.get_global @bar : !cir.ptr> //CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 153bd3d3445d..fd21360e399d 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -10,23 +10,23 @@ void g0(int a) { } // CHECK: cir.func @_Z2g0i -// CHECK-NEXT %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK-NEXT %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} -// CHECK-NEXT cir.store %arg0, %0 : !s32i, cir.ptr -// CHECK-NEXT %2 = cir.load %0 : cir.ptr , !s32i -// CHECK-NEXT cir.store %2, %1 : !s32i, cir.ptr +// CHECK-NEXT %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK-NEXT %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} +// CHECK-NEXT cir.store %arg0, %0 : !s32i, !cir.ptr +// CHECK-NEXT %2 = cir.load %0 : !cir.ptr, !s32i +// CHECK-NEXT cir.store %2, %1 : !s32i, !cir.ptr // CHECK-NEXT cir.br ^bb2 // CHECK-NEXT ^bb1: // no predecessors -// CHECK-NEXT %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT %4 = cir.const(1 : !s32i) : !s32i // CHECK-NEXT %5 = cir.binop(add, %3, %4) : !s32i -// CHECK-NEXT cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT cir.br ^bb2 // CHECK-NEXT ^bb2: // 2 preds: ^bb0, ^bb1 -// CHECK-NEXT %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT %6 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT %7 = cir.const(2 : !s32i) : !s32i // CHECK-NEXT %8 = cir.binop(add, %6, %7) : !s32i -// CHECK-NEXT cir.store %8, %1 : !s32i, cir.ptr +// CHECK-NEXT cir.store %8, %1 : !s32i, !cir.ptr // CHECK-NEXT cir.return void g1(int a) { @@ -38,10 +38,10 @@ void g1(int a) { // Make sure alloca for "y" shows up in the entry block // CHECK: cir.func @_Z2g1i(%arg0: !s32i -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !s32i, !cir.ptr int g2() { int b = 1; @@ -61,6 +61,6 @@ int g2() { // CHECK-NEXT: ^bb1: // no predecessors // CHECK: ^bb2: // 2 preds: ^bb0, ^bb1 -// CHECK: [[R:%[0-9]+]] = cir.load %0 : cir.ptr , !s32i +// CHECK: [[R:%[0-9]+]] = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: [[R]] : !s32i // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/hello.c b/clang/test/CIR/CodeGen/hello.c index 3b7155c36ff7..07ba213419fd 100644 --- a/clang/test/CIR/CodeGen/hello.c +++ b/clang/test/CIR/CodeGen/hello.c @@ -10,13 +10,13 @@ int main (void) { // CHECK: cir.func private @printf(!cir.ptr, ...) -> !s32i // CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.func @main() -> !s32i -// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: %1 = cir.get_global @printf : cir.ptr , ...)>> -// CHECK: %2 = cir.get_global @".str" : cir.ptr > +// CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.get_global @printf : !cir.ptr, ...)>> +// CHECK: %2 = cir.get_global @".str" : !cir.ptr> // CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr // CHECK: %4 = cir.call @printf(%3) : (!cir.ptr) -> !s32i // CHECK: %5 = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK: cir.store %5, %0 : !s32i, cir.ptr -// CHECK: %6 = cir.load %0 : cir.ptr , !s32i +// CHECK: cir.store %5, %0 : !s32i, !cir.ptr +// CHECK: %6 = cir.load %0 : !cir.ptr, !s32i // CHECK: cir.return %6 : !s32i // CHECK: } diff --git a/clang/test/CIR/CodeGen/if-constexpr.cpp b/clang/test/CIR/CodeGen/if-constexpr.cpp index 8ef8315e1ad0..18b09de54758 100644 --- a/clang/test/CIR/CodeGen/if-constexpr.cpp +++ b/clang/test/CIR/CodeGen/if-constexpr.cpp @@ -40,56 +40,56 @@ void if0() { } // CHECK: cir.func @_Z3if0v() {{.*}} -// CHECK: cir.store %1, %0 : !s32i, cir.ptr loc({{.*}}) +// CHECK: cir.store %1, %0 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} // CHECK-NEXT: %3 = cir.const(#cir.int<2> : !s32i) : !s32i loc({{.*}}) -// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} // CHECK-NEXT: %3 = cir.const(#cir.int<5> : !s32i) : !s32i loc({{.*}}) -// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} -// CHECK-NEXT: %3 = cir.alloca !s32i, cir.ptr , ["y", init] {{.*}} +// CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.alloca !s32i, !cir.ptr, ["y", init] {{.*}} // CHECK-NEXT: %4 = cir.const(#cir.int<7> : !s32i) : !s32i loc({{.*}}) -// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr loc({{.*}}) -// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i loc({{.*}}) -// CHECK-NEXT: cir.store %5, %3 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: cir.store %4, %2 : !s32i, !cir.ptr loc({{.*}}) +// CHECK-NEXT: %5 = cir.load %2 : !cir.ptr, !s32i loc({{.*}}) +// CHECK-NEXT: cir.store %5, %3 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} -// CHECK-NEXT: %3 = cir.alloca !s32i, cir.ptr , ["y", init] {{.*}} +// CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.alloca !s32i, !cir.ptr, ["y", init] {{.*}} // CHECK-NEXT: %4 = cir.const(#cir.int<9> : !s32i) : !s32i loc({{.*}}) -// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: cir.store %4, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i loc({{.*}}) -// CHECK-NEXT: %6 = cir.load %2 : cir.ptr , !s32i loc({{.*}}) +// CHECK-NEXT: %6 = cir.load %2 : !cir.ptr, !s32i loc({{.*}}) // CHECK-NEXT: %7 = cir.binop(mul, %5, %6) : !s32i loc({{.*}}) -// CHECK-NEXT: cir.store %7, %3 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: cir.store %7, %3 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} // CHECK-NEXT: %3 = cir.const(#cir.int<20> : !s32i) : !s32i loc({{.*}}) -// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { // Note that Clang does not even emit a block in this case // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} -// CHECK-NEXT: %3 = cir.alloca !s32i, cir.ptr , ["y", init] {{.*}} +// CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} +// CHECK-NEXT: %3 = cir.alloca !s32i, !cir.ptr, ["y", init] {{.*}} // CHECK-NEXT: %4 = cir.const(#cir.int<70> : !s32i) : !s32i loc({{.*}}) -// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: cir.store %4, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i loc({{.*}}) -// CHECK-NEXT: %6 = cir.load %2 : cir.ptr , !s32i loc({{.*}}) +// CHECK-NEXT: %6 = cir.load %2 : !cir.ptr, !s32i loc({{.*}}) // CHECK-NEXT: %7 = cir.binop(mul, %5, %6) : !s32i loc({{.*}}) -// CHECK-NEXT: cir.store %7, %3 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: cir.store %7, %3 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["x", init] {{.*}} +// CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} // CHECK-NEXT: %3 = cir.const(#cir.int<90> : !s32i) : !s32i loc({{.*}}) -// CHECK-NEXT: cir.store %3, %2 : !s32i, cir.ptr loc({{.*}}) +// CHECK-NEXT: cir.store %3, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.return loc({{.*}}) diff --git a/clang/test/CIR/CodeGen/implicit-return.cpp b/clang/test/CIR/CodeGen/implicit-return.cpp index 09b084b70ddb..fa64d244957d 100644 --- a/clang/test/CIR/CodeGen/implicit-return.cpp +++ b/clang/test/CIR/CodeGen/implicit-return.cpp @@ -16,11 +16,11 @@ void ret_void() {} int ret_non_void() {} // CHECK-O0: cir.func @_Z12ret_non_voidv() -> !s32i -// CHECK-O0-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK-O0-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] // CHECK-O0-NEXT: cir.trap // CHECK-O0-NEXT: } // CHECK-O2: cir.func @_Z12ret_non_voidv() -> !s32i -// CHECK-O2-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] +// CHECK-O2-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] // CHECK-O2-NEXT: cir.unreachable // CHECK-O2-NEXT: } diff --git a/clang/test/CIR/CodeGen/inc-bool.cpp b/clang/test/CIR/CodeGen/inc-bool.cpp index 05c3bb54aca3..adeb39f73938 100644 --- a/clang/test/CIR/CodeGen/inc-bool.cpp +++ b/clang/test/CIR/CodeGen/inc-bool.cpp @@ -6,9 +6,9 @@ void foo(bool x) { } // CHECK: cir.func @_Z3foob(%arg0: !cir.bool loc({{.*}})) -// CHECK: [[ALLOC_X:%.*]] = cir.alloca !cir.bool, cir.ptr , ["x", init] {alignment = 1 : i64} -// CHECK: cir.store %arg0, [[ALLOC_X]] : !cir.bool, cir.ptr -// CHECK: {{.*}} = cir.load [[ALLOC_X]] : cir.ptr , !cir.bool +// CHECK: [[ALLOC_X:%.*]] = cir.alloca !cir.bool, !cir.ptr, ["x", init] {alignment = 1 : i64} +// CHECK: cir.store %arg0, [[ALLOC_X]] : !cir.bool, !cir.ptr +// CHECK: {{.*}} = cir.load [[ALLOC_X]] : !cir.ptr, !cir.bool // CHECK: [[TRUE:%.*]] = cir.const(#true) : !cir.bool -// CHECK: cir.store [[TRUE]], [[ALLOC_X]] : !cir.bool, cir.ptr +// CHECK: cir.store [[TRUE]], [[ALLOC_X]] : !cir.bool, !cir.ptr // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/inc-dec.cpp b/clang/test/CIR/CodeGen/inc-dec.cpp index 1005299027a1..5207db364ed4 100644 --- a/clang/test/CIR/CodeGen/inc-dec.cpp +++ b/clang/test/CIR/CodeGen/inc-dec.cpp @@ -7,8 +7,8 @@ unsigned id0() { } // CHECK: cir.func @_Z3id0v() -> !u32i -// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#RET:]] = cir.alloca !u32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, !cir.ptr, ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(inc, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] @@ -21,8 +21,8 @@ unsigned id1() { } // CHECK: cir.func @_Z3id1v() -> !u32i -// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#RET:]] = cir.alloca !u32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, !cir.ptr, ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(dec, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] @@ -34,8 +34,8 @@ unsigned id2() { } // CHECK: cir.func @_Z3id2v() -> !u32i -// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#RET:]] = cir.alloca !u32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, !cir.ptr, ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(inc, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] @@ -47,8 +47,8 @@ unsigned id3() { } // CHECK: cir.func @_Z3id3v() -> !u32i -// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#RET:]] = cir.alloca !u32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, !cir.ptr, ["a", init] // CHECK: %[[#BEFORE_A:]] = cir.load %[[#A]] // CHECK: %[[#AFTER_A:]] = cir.unary(dec, %[[#BEFORE_A]]) // CHECK: cir.store %[[#AFTER_A]], %[[#A]] diff --git a/clang/test/CIR/CodeGen/lalg.c b/clang/test/CIR/CodeGen/lalg.c index 5bdca87db150..5115f967d314 100644 --- a/clang/test/CIR/CodeGen/lalg.c +++ b/clang/test/CIR/CodeGen/lalg.c @@ -7,14 +7,14 @@ double dot() { return result; } -// CHECK: %1 = cir.alloca !cir.double, cir.ptr , ["x", init] -// CHECK-NEXT: %2 = cir.alloca !cir.double, cir.ptr , ["y", init] -// CHECK-NEXT: %3 = cir.alloca !cir.double, cir.ptr , ["result", init] +// CHECK: %1 = cir.alloca !cir.double, !cir.ptr, ["x", init] +// CHECK-NEXT: %2 = cir.alloca !cir.double, !cir.ptr, ["y", init] +// CHECK-NEXT: %3 = cir.alloca !cir.double, !cir.ptr, ["result", init] // CHECK-NEXT: %4 = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double -// CHECK-NEXT: cir.store %4, %1 : !cir.double, cir.ptr +// CHECK-NEXT: cir.store %4, %1 : !cir.double, !cir.ptr // CHECK-NEXT: %5 = cir.const(#cir.fp<0.000000e+00> : !cir.float) : !cir.float // CHECK-NEXT: %6 = cir.cast(floating, %5 : !cir.float), !cir.double -// CHECK-NEXT: cir.store %6, %2 : !cir.double, cir.ptr -// CHECK-NEXT: %7 = cir.load %1 : cir.ptr , !cir.double -// CHECK-NEXT: %8 = cir.load %2 : cir.ptr , !cir.double +// CHECK-NEXT: cir.store %6, %2 : !cir.double, !cir.ptr +// CHECK-NEXT: %7 = cir.load %1 : !cir.ptr, !cir.double +// CHECK-NEXT: %8 = cir.load %2 : !cir.ptr, !cir.double // CHECK-NEXT: %9 = cir.binop(mul, %7, %8) : !cir.double diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index d9eca2c0fbc2..77a531253fcc 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -12,7 +12,7 @@ void fn() { // CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv // CHECK: cir.func @_Z2fnv() -// CHECK-NEXT: %0 = cir.alloca !ty_22anon2E222, cir.ptr , ["a"] +// CHECK-NEXT: %0 = cir.alloca !ty_22anon2E222, !cir.ptr, ["a"] // CHECK: cir.call @_ZZ2fnvENK3$_0clEv void l0() { @@ -23,17 +23,17 @@ void l0() { // CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv( -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %2 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> -// CHECK: %3 = cir.load %2 : cir.ptr >, !cir.ptr -// CHECK: %4 = cir.load %3 : cir.ptr , !s32i +// CHECK: %3 = cir.load %2 : !cir.ptr>, !cir.ptr +// CHECK: %4 = cir.load %3 : !cir.ptr, !s32i // CHECK: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %6 = cir.binop(add, %4, %5) : !s32i // CHECK: %7 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> -// CHECK: %8 = cir.load %7 : cir.ptr >, !cir.ptr -// CHECK: cir.store %6, %8 : !s32i, cir.ptr +// CHECK: %8 = cir.load %7 : !cir.ptr>, !cir.ptr +// CHECK: cir.store %6, %8 : !s32i, !cir.ptr // CHECK: cir.func @_Z2l0v() @@ -46,13 +46,13 @@ auto g() { } // CHECK: cir.func @_Z1gv() -> !ty_22anon2E622 -// CHECK: %0 = cir.alloca !ty_22anon2E622, cir.ptr , ["__retval"] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CHECK: %0 = cir.alloca !ty_22anon2E622, !cir.ptr, ["__retval"] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CHECK: %2 = cir.const(#cir.int<12> : !s32i) : !s32i -// CHECK: cir.store %2, %1 : !s32i, cir.ptr +// CHECK: cir.store %2, %1 : !s32i, !cir.ptr // CHECK: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> -// CHECK: cir.store %1, %3 : !cir.ptr, cir.ptr > -// CHECK: %4 = cir.load %0 : cir.ptr , !ty_22anon2E622 +// CHECK: cir.store %1, %3 : !cir.ptr, !cir.ptr> +// CHECK: %4 = cir.load %0 : !cir.ptr, !ty_22anon2E622 // CHECK: cir.return %4 : !ty_22anon2E622 auto g2() { @@ -66,13 +66,13 @@ auto g2() { // Should be same as above because of NRVO // CHECK: cir.func @_Z2g2v() -> !ty_22anon2E822 -// CHECK-NEXT: %0 = cir.alloca !ty_22anon2E822, cir.ptr , ["__retval", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22anon2E822, !cir.ptr, ["__retval", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.const(#cir.int<12> : !s32i) : !s32i -// CHECK-NEXT: cir.store %2, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %2, %1 : !s32i, !cir.ptr // CHECK-NEXT: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> -// CHECK-NEXT: cir.store %1, %3 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !ty_22anon2E822 +// CHECK-NEXT: cir.store %1, %3 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !ty_22anon2E822 // CHECK-NEXT: cir.return %4 : !ty_22anon2E822 int f() { @@ -80,15 +80,15 @@ int f() { } // CHECK: cir.func @_Z1fv() -> !s32i -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !ty_22anon2E822, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK-NEXT: %2 = cir.alloca !ty_22anon2E822, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} // CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_22anon2E822 -// CHECK-NEXT: cir.store %3, %2 : !ty_22anon2E822, cir.ptr +// CHECK-NEXT: cir.store %3, %2 : !ty_22anon2E822, !cir.ptr // CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> !s32i -// CHECK-NEXT: cir.store %4, %0 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %4, %0 : !s32i, !cir.ptr // CHECK-NEXT: } -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %1 : !s32i // CHECK-NEXT: } @@ -108,25 +108,25 @@ int g3() { // CHECK: cir.func internal private @_ZZ2g3vENK3$_0cvPFiRKiEEv // CHECK: cir.func @_Z2g3v() -> !s32i -// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !cir.ptr)>>, cir.ptr )>>>, ["fn", init] {alignment = 8 : i64} -// CHECK: %2 = cir.alloca !s32i, cir.ptr , ["task", init] {alignment = 4 : i64} +// CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !cir.ptr)>>, !cir.ptr)>>>, ["fn", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !s32i, !cir.ptr, ["task", init] {alignment = 4 : i64} // 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`. // CHECK: %3 = cir.scope { -// CHECK: %7 = cir.alloca !ty_22anon2E1122, cir.ptr , ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: %7 = cir.alloca !ty_22anon2E1122, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} // CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> // CHECK: %9 = cir.unary(plus, %8) : !cir.ptr)>>, !cir.ptr)>> // CHECK: cir.yield %9 : !cir.ptr)>> // CHECK: } // 2. Load ptr to `__invoke()`. -// CHECK: cir.store %3, %1 : !cir.ptr)>>, cir.ptr )>>> +// CHECK: cir.store %3, %1 : !cir.ptr)>>, !cir.ptr)>>> // CHECK: %4 = cir.scope { -// CHECK: %7 = cir.alloca !s32i, cir.ptr , ["ref.tmp1", init] {alignment = 4 : i64} -// CHECK: %8 = cir.load %1 : cir.ptr )>>>, !cir.ptr)>> +// CHECK: %7 = cir.alloca !s32i, !cir.ptr, ["ref.tmp1", init] {alignment = 4 : i64} +// CHECK: %8 = cir.load %1 : !cir.ptr)>>>, !cir.ptr)>> // CHECK: %9 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: cir.store %9, %7 : !s32i, cir.ptr +// CHECK: cir.store %9, %7 : !s32i, !cir.ptr // 3. Call `__invoke()`, which effectively executes `operator()`. // CHECK: %10 = cir.call %8(%7) : (!cir.ptr)>>, !cir.ptr) -> !s32i diff --git a/clang/test/CIR/CodeGen/libcall.cpp b/clang/test/CIR/CodeGen/libcall.cpp index 3df45c43e124..62944de443eb 100644 --- a/clang/test/CIR/CodeGen/libcall.cpp +++ b/clang/test/CIR/CodeGen/libcall.cpp @@ -40,9 +40,9 @@ void t(const char* fmt, ...) { } // CHECK: cir.func @_Z15consume_messagePKc(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["m", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["m", init] {alignment = 8 : i64} -// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %3 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %4 = cir.objsize(%3 : , max) -> !u64i // CHECK: %5 = cir.call @_ZL6strlenPKcU17pass_object_size0(%3, %4) : (!cir.ptr, !u64i) -> !u64i @@ -53,11 +53,11 @@ void t(const char* fmt, ...) { // // FIXME: tag the param with an attribute to designate the size information. // -// CHECK: %1 = cir.alloca !u64i, cir.ptr , ["", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !u64i, !cir.ptr, ["", init] {alignment = 8 : i64} -// CHECK: cir.store %arg1, %1 : !u64i, cir.ptr +// CHECK: cir.store %arg1, %1 : !u64i, !cir.ptr -// CHECK: %10 = cir.load %1 : cir.ptr , !u64i -// CHECK: %11 = cir.load %3 : cir.ptr >, !cir.ptr -// CHECK: %12 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %10 = cir.load %1 : !cir.ptr, !u64i +// CHECK: %11 = cir.load %3 : !cir.ptr>, !cir.ptr +// CHECK: %12 = cir.load %4 : !cir.ptr>, !cir.ptr // CHECK: %13 = cir.call @__vsnprintf_chk(%6, %8, %9, %10, %11, %12) \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index c333654a38ad..9f9e3058d1bf 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -11,19 +11,19 @@ void l0(void) { // CPPSCOPE: cir.func @_Z2l0v() // CPPSCOPE-NEXT: cir.scope { -// CPPSCOPE-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} -// CPPSCOPE-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} +// CPPSCOPE-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} +// CPPSCOPE-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["j", init] {alignment = 4 : i64} // CPPSCOPE-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i -// CPPSCOPE-NEXT: cir.store %2, %0 : !s32i, cir.ptr +// CPPSCOPE-NEXT: cir.store %2, %0 : !s32i, !cir.ptr // CPPSCOPE-NEXT: cir.for : cond { // CSCOPE: cir.func @l0() // CSCOPE-NEXT: cir.scope { -// CSCOPE-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} +// CSCOPE-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CSCOPE-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i -// CSCOPE-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CSCOPE-NEXT: cir.store %1, %0 : !s32i, !cir.ptr // CSCOPE-NEXT: cir.for : cond { // CSCOPE: } body { // CSCOPE-NEXT: cir.scope { -// CSCOPE-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} +// CSCOPE-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["j", init] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index b6f63b2ce4fc..67ba64cbb6f2 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -20,21 +20,21 @@ void l1() { // CHECK: cir.func @_Z2l1v // CHECK: cir.for : cond { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.load %2 : !cir.ptr, !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool // CHECK-NEXT: cir.condition(%6) // CHECK-NEXT: } body { -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i -// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } step { -// CHECK-NEXT: %4 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.load %2 : !cir.ptr, !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i -// CHECK-NEXT: cir.store %6, %2 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %6, %2 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } @@ -54,13 +54,13 @@ void l2(bool cond) { // CHECK: cir.func @_Z2l2b // CHECK: cir.scope { // CHECK-NEXT: cir.while { -// CHECK-NEXT: %3 = cir.load %0 : cir.ptr , !cir.bool +// CHECK-NEXT: %3 = cir.load %0 : !cir.ptr, !cir.bool // CHECK-NEXT: cir.condition(%3) // CHECK-NEXT: } do { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } @@ -69,10 +69,10 @@ void l2(bool cond) { // CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: } do { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } @@ -82,10 +82,10 @@ void l2(bool cond) { // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.condition(%4) // CHECK-NEXT: } do { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } @@ -106,22 +106,22 @@ void l3(bool cond) { // CHECK: cir.func @_Z2l3b // CHECK: cir.scope { // CHECK-NEXT: cir.do { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { -// CHECK-NEXT: %[[#TRUE:]] = cir.load %0 : cir.ptr , !cir.bool +// CHECK-NEXT: %[[#TRUE:]] = cir.load %0 : !cir.ptr, !cir.bool // CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.do { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { // CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool @@ -130,10 +130,10 @@ void l3(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.do { -// CHECK-NEXT: %3 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { // CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i @@ -157,12 +157,12 @@ void l4() { // CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool // CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: } do { -// CHECK-NEXT: %4 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i -// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %10 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %10 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: %11 = cir.const(#cir.int<10> : !s32i) : !s32i // CHECK-NEXT: %12 = cir.cmp(lt, %10, %11) : !s32i, !cir.bool // CHECK-NEXT: cir.if %12 { diff --git a/clang/test/CIR/CodeGen/lvalue-refs.cpp b/clang/test/CIR/CodeGen/lvalue-refs.cpp index ad56d820c5e9..c4d1866d0291 100644 --- a/clang/test/CIR/CodeGen/lvalue-refs.cpp +++ b/clang/test/CIR/CodeGen/lvalue-refs.cpp @@ -7,7 +7,7 @@ struct String { void split(String &S) {} // CHECK: cir.func @_Z5splitR6String(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["S", init] +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["S", init] void foo() { String s; @@ -15,5 +15,5 @@ void foo() { } // CHECK: cir.func @_Z3foov() -// CHECK: %0 = cir.alloca !ty_22String22, cir.ptr , ["s"] +// CHECK: %0 = cir.alloca !ty_22String22, !cir.ptr, ["s"] // CHECK: cir.call @_Z5splitR6String(%0) : (!cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index 93e3f7f5c40e..cfb5d52d4daf 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -12,19 +12,19 @@ void m(int a, int b) { } // CHECK: cir.func linkonce_odr @_ZSt11make_sharedI1SJRiS1_EESt10shared_ptrIT_EDpOT0_( -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["args", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["args", init] {alignment = 8 : i64} -// CHECK: %2 = cir.alloca !ty_22std3A3Ashared_ptr3CS3E22, cir.ptr , ["__retval"] {alignment = 1 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["args", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["args", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !ty_22std3A3Ashared_ptr3CS3E22, !cir.ptr, ["__retval"] {alignment = 1 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> // CHECK: cir.scope { // CHECK: %4 = cir.const(#cir.int<1> : !u64i) : !u64i // CHECK: %5 = cir.call @_Znwm(%4) : (!u64i) -> !cir.ptr // CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr -// CHECK: %7 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %8 = cir.load %7 : cir.ptr , !s32i -// CHECK: %9 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK: %10 = cir.load %9 : cir.ptr , !s32i +// CHECK: %7 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: %8 = cir.load %7 : !cir.ptr, !s32i +// CHECK: %9 = cir.load %1 : !cir.ptr>, !cir.ptr +// CHECK: %10 = cir.load %9 : !cir.ptr, !s32i // CHECK: cir.call @_ZN1SC1Eii(%6, %8, %10) : (!cir.ptr, !s32i, !s32i) -> () // CHECK: cir.call @_ZNSt10shared_ptrI1SEC1EPS0_(%2, %6) : (!cir.ptr, !cir.ptr) -> () // CHECK: } @@ -37,13 +37,13 @@ class B { }; // CHECK: cir.func linkonce_odr @_ZN1B9constructEPS_(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__p", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: cir.store %arg1, %1 : !cir.ptr, cir.ptr > -// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["__p", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> +// CHECK: %2 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %3 = cir.const(#cir.int<1> : !u64i) : !u64i -// CHECK: %4 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.load %1 : !cir.ptr>, !cir.ptr // CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr // CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr diff --git a/clang/test/CIR/CodeGen/no-proto-fun-ptr.c b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c index e396a606a73d..a399f9b1f9c8 100644 --- a/clang/test/CIR/CodeGen/no-proto-fun-ptr.c +++ b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c @@ -7,10 +7,10 @@ void check_noproto_ptr() { } // CHECK: cir.func no_proto @check_noproto_ptr() -// CHECK: [[ALLOC:%.*]] = cir.alloca !cir.ptr>, cir.ptr >>, ["fun", init] {alignment = 8 : i64} -// CHECK: [[GGO:%.*]] = cir.get_global @empty : cir.ptr > +// CHECK: [[ALLOC:%.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["fun", init] {alignment = 8 : i64} +// CHECK: [[GGO:%.*]] = cir.get_global @empty : !cir.ptr> // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> -// CHECK: cir.store [[CAST]], [[ALLOC]] : !cir.ptr>, cir.ptr >> +// CHECK: cir.store [[CAST]], [[ALLOC]] : !cir.ptr>, !cir.ptr>> // CHECK: cir.return void empty(void) {} @@ -21,8 +21,8 @@ void buz() { } // CHECK: cir.func no_proto @buz() -// CHECK: [[FNPTR_ALLOC:%.*]] = cir.alloca !cir.ptr>, cir.ptr >>, ["func"] {alignment = 8 : i64} -// CHECK: [[FNPTR:%.*]] = cir.load deref [[FNPTR_ALLOC]] : cir.ptr >>, !cir.ptr> +// CHECK: [[FNPTR_ALLOC:%.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["func"] {alignment = 8 : i64} +// CHECK: [[FNPTR:%.*]] = cir.load deref [[FNPTR_ALLOC]] : !cir.ptr>>, !cir.ptr> // CHECK: [[CAST:%.*]] = cir.cast(bitcast, %1 : !cir.ptr>), !cir.ptr> // CHECK: cir.call [[CAST]]() : (!cir.ptr>) -> () // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/no-prototype.c b/clang/test/CIR/CodeGen/no-prototype.c index 4028d8e2ec32..f13b3d7a9676 100644 --- a/clang/test/CIR/CodeGen/no-prototype.c +++ b/clang/test/CIR/CodeGen/no-prototype.c @@ -35,7 +35,7 @@ int test1(int x) { int noProto2(); int test2(int x) { return noProto2(x); - // CHECK: [[GGO:%.*]] = cir.get_global @noProto2 : cir.ptr > + // CHECK: [[GGO:%.*]] = cir.get_global @noProto2 : !cir.ptr> // CHECK: [[CAST:%.*]] = cir.cast(bitcast, %3 : !cir.ptr>), !cir.ptr> // CHECK: {{.*}} = cir.call [[CAST]](%{{[0-9]+}}) : (!cir.ptr>, !s32i) -> !s32i } @@ -51,7 +51,7 @@ int noProto3(); int test3(int x) { // CHECK: cir.func @test3 return noProto3(x); - // CHECK: [[GGO:%.*]] = cir.get_global @noProto3 : cir.ptr > + // CHECK: [[GGO:%.*]] = cir.get_global @noProto3 : !cir.ptr> // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> // CHECK: {{%.*}} = cir.call [[CAST]](%{{[0-9]+}}) : (!cir.ptr>, !s32i) -> !s32i } @@ -68,7 +68,7 @@ int noProto4() { return 0; } // cir.func private no_proto @noProto4() -> !s32i int test4(int x) { return noProto4(x); // Even if we know the definition, this should compile. - // CHECK: [[GGO:%.*]] = cir.get_global @noProto4 : cir.ptr > + // CHECK: [[GGO:%.*]] = cir.get_global @noProto4 : !cir.ptr> // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> // CHECK: {{%.*}} = cir.call [[CAST]]({{%.*}}) : (!cir.ptr>, !s32i) -> !s32i } @@ -77,7 +77,7 @@ int test4(int x) { int noProto5(); int test5(int x) { return noProto5(); - // CHECK: [[GGO:%.*]] = cir.get_global @noProto5 : cir.ptr > + // CHECK: [[GGO:%.*]] = cir.get_global @noProto5 : !cir.ptr> // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> // CHECK: {{%.*}} = cir.call [[CAST]]() : (!cir.ptr>) -> !s32i } diff --git a/clang/test/CIR/CodeGen/nrvo.cpp b/clang/test/CIR/CodeGen/nrvo.cpp index 9aeedb4614c3..527f064bfa4a 100644 --- a/clang/test/CIR/CodeGen/nrvo.cpp +++ b/clang/test/CIR/CodeGen/nrvo.cpp @@ -12,20 +12,20 @@ std::vector test_nrvo() { // CHECK: ![[VEC:.*]] = !cir.struct" {!cir.ptr>, !cir.ptr>, !cir.ptr>}> // CHECK: cir.func @_Z9test_nrvov() -> ![[VEC]] -// CHECK: %0 = cir.alloca ![[VEC]], cir.ptr , ["__retval", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.bool, cir.ptr , ["nrvo"] {alignment = 1 : i64} +// CHECK: %0 = cir.alloca ![[VEC]], !cir.ptr, ["__retval", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.bool, !cir.ptr, ["nrvo"] {alignment = 1 : i64} // CHECK: %2 = cir.const(#false) : !cir.bool -// CHECK: cir.store %2, %1 : !cir.bool, cir.ptr +// CHECK: cir.store %2, %1 : !cir.bool, !cir.ptr // CHECK: cir.call @_ZNSt6vectorIPKcEC1Ev(%0) : (!cir.ptr) -> () // CHECK: cir.scope { -// CHECK: %5 = cir.alloca !cir.ptr, cir.ptr >, ["ref.tmp0"] {alignment = 8 : i64} -// CHECK: %6 = cir.get_global @".str" : cir.ptr > +// CHECK: %5 = cir.alloca !cir.ptr, !cir.ptr>, ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %6 = cir.get_global @".str" : !cir.ptr> // CHECK: %7 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr -// CHECK: cir.store %7, %5 : !cir.ptr, cir.ptr > +// CHECK: cir.store %7, %5 : !cir.ptr, !cir.ptr> // CHECK: cir.call @_ZNSt6vectorIPKcE9push_backEOS1_(%0, %5) : (!cir.ptr, !cir.ptr>) -> () // CHECK: } // CHECK: %3 = cir.const(#true) : !cir.bool -// CHECK: cir.store %3, %1 : !cir.bool, cir.ptr -// CHECK: %4 = cir.load %0 : cir.ptr , ![[VEC]] +// CHECK: cir.store %3, %1 : !cir.bool, !cir.ptr +// CHECK: %4 = cir.load %0 : !cir.ptr, ![[VEC]] // CHECK: cir.return %4 : ![[VEC]] // CHECK: } diff --git a/clang/test/CIR/CodeGen/packed-structs.c b/clang/test/CIR/CodeGen/packed-structs.c index ac9c1383cad5..264701b9efe2 100644 --- a/clang/test/CIR/CodeGen/packed-structs.c +++ b/clang/test/CIR/CodeGen/packed-structs.c @@ -25,9 +25,9 @@ typedef struct { // CHECK: !ty_22B22 = !cir.struct}> // CHECK: cir.func {{.*@foo()}} -// CHECK: %0 = cir.alloca !ty_22A22, cir.ptr , ["a"] {alignment = 1 : i64} -// CHECK: %1 = cir.alloca !ty_22B22, cir.ptr , ["b"] {alignment = 1 : i64} -// CHECK: %2 = cir.alloca !ty_22C22, cir.ptr , ["c"] {alignment = 2 : i64} +// CHECK: %0 = cir.alloca !ty_22A22, !cir.ptr, ["a"] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !ty_22B22, !cir.ptr, ["b"] {alignment = 1 : i64} +// CHECK: %2 = cir.alloca !ty_22C22, !cir.ptr, ["c"] {alignment = 2 : i64} void foo() { A a; B b; diff --git a/clang/test/CIR/CodeGen/pass-object-size.c b/clang/test/CIR/CodeGen/pass-object-size.c index 851b912b0ad5..21753d05db88 100644 --- a/clang/test/CIR/CodeGen/pass-object-size.c +++ b/clang/test/CIR/CodeGen/pass-object-size.c @@ -13,7 +13,7 @@ void c() { } // CIR: cir.func no_proto @c() -// CIR: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , %{{[0-9]+}} : !u64i, ["vla"] {alignment = 16 : i64} +// CIR: [[TMP0:%.*]] = cir.alloca !s32i, !cir.ptr, %{{[0-9]+}} : !u64i, ["vla"] {alignment = 16 : i64} // CIR: [[TMP1:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr // CIR-NEXT: [[TMP2:%.*]] = cir.objsize([[TMP1]] : , max) -> !u64i // CIR-NEXT: cir.call @b([[TMP1]], [[TMP2]]) : (!cir.ptr, !u64i) -> () diff --git a/clang/test/CIR/CodeGen/pointers.cpp b/clang/test/CIR/CodeGen/pointers.cpp index 8df8a0f6b658..874e0984aad2 100644 --- a/clang/test/CIR/CodeGen/pointers.cpp +++ b/clang/test/CIR/CodeGen/pointers.cpp @@ -18,12 +18,12 @@ void foo(int *iptr, char *cptr, unsigned ustride) { // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#STRIDE]]) : !s32i, !s32i // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr iptr + ustride; - // CHECK: %[[#STRIDE:]] = cir.load %{{.+}} : cir.ptr , !u32i + // CHECK: %[[#STRIDE:]] = cir.load %{{.+}} : !cir.ptr, !u32i // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#STRIDE]] : !u32i), !cir.ptr // Must convert unsigned stride to a signed one. iptr - ustride; - // CHECK: %[[#STRIDE:]] = cir.load %{{.+}} : cir.ptr , !u32i + // CHECK: %[[#STRIDE:]] = cir.load %{{.+}} : !cir.ptr, !u32i // CHECK: %[[#SIGNSTRIDE:]] = cir.cast(integral, %[[#STRIDE]] : !u32i), !s32i // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#SIGNSTRIDE]]) : !s32i, !s32i // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr @@ -32,7 +32,7 @@ void foo(int *iptr, char *cptr, unsigned ustride) { void testPointerSubscriptAccess(int *ptr) { // CHECK: testPointerSubscriptAccess ptr[1]; - // CHECK: %[[#V1:]] = cir.load %{{.+}} : cir.ptr >, !cir.ptr + // CHECK: %[[#V1:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: cir.ptr_stride(%[[#V1]] : !cir.ptr, %[[#V2]] : !s32i), !cir.ptr } @@ -40,10 +40,10 @@ void testPointerSubscriptAccess(int *ptr) { void testPointerMultiDimSubscriptAccess(int **ptr) { // CHECK: testPointerMultiDimSubscriptAccess ptr[1][2]; - // CHECK: %[[#V1:]] = cir.load %{{.+}} : cir.ptr >>, !cir.ptr> + // CHECK: %[[#V1:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %[[#V3:]] = cir.ptr_stride(%[[#V1]] : !cir.ptr>, %[[#V2]] : !s32i), !cir.ptr> - // CHECK: %[[#V4:]] = cir.load %[[#V3]] : cir.ptr >, !cir.ptr + // CHECK: %[[#V4:]] = cir.load %[[#V3]] : !cir.ptr>, !cir.ptr // CHECK: %[[#V5:]] = cir.const(#cir.int<2> : !s32i) : !s32i // CHECK: cir.ptr_stride(%[[#V4]] : !cir.ptr, %[[#V5]] : !s32i), !cir.ptr } diff --git a/clang/test/CIR/CodeGen/predefined.cpp b/clang/test/CIR/CodeGen/predefined.cpp index dc849d915598..35bc4bff8e73 100644 --- a/clang/test/CIR/CodeGen/predefined.cpp +++ b/clang/test/CIR/CodeGen/predefined.cpp @@ -10,12 +10,12 @@ void m() { } // CHECK: cir.func @_Z1mv() -// CHECK: %0 = cir.get_global @".str" : cir.ptr > +// CHECK: %0 = cir.get_global @".str" : !cir.ptr> // CHECK: %1 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // CHECK: %2 = cir.const(#cir.int<79> : !s32i) : !s32i -// CHECK: %3 = cir.get_global @".str1" : cir.ptr > +// CHECK: %3 = cir.get_global @".str1" : !cir.ptr> // CHECK: %4 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr -// CHECK: %5 = cir.get_global @".str2" : cir.ptr > +// CHECK: %5 = cir.get_global @".str2" : !cir.ptr> // CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr // CHECK: cir.call @__assert2(%1, %2, %4, %6) : (!cir.ptr, !s32i, !cir.ptr, !cir.ptr) -> () // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/ptr_diff.cpp b/clang/test/CIR/CodeGen/ptr_diff.cpp index ebaa5ec6bfac..33149e2aee53 100644 --- a/clang/test/CIR/CodeGen/ptr_diff.cpp +++ b/clang/test/CIR/CodeGen/ptr_diff.cpp @@ -7,8 +7,8 @@ size_type size(unsigned long *_start, unsigned long *_finish) { } // CHECK: cir.func @_Z4sizePmS_(%arg0: !cir.ptr -// CHECK: %3 = cir.load %1 : cir.ptr >, !cir.ptr -// CHECK: %4 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %3 = cir.load %1 : !cir.ptr>, !cir.ptr +// CHECK: %4 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %5 = cir.ptr_diff(%3, %4) : !cir.ptr -> !s64i // CHECK: %6 = cir.cast(integral, %5 : !s64i), !u64i diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 69c7b3a741f0..d56567b55cac 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -26,40 +26,40 @@ void init(unsigned numImages) { // CHECK-DAG: ![[VEC_IT:.*]] = !cir.struct" {!cir.ptr}> // CHECK: cir.func @_Z4initj(%arg0: !u32i -// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["numImages", init] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca ![[VEC]], cir.ptr , ["images", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !u32i, cir.ptr -// CHECK: %2 = cir.load %0 : cir.ptr , !u32i +// CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["numImages", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca ![[VEC]], !cir.ptr, ["images", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !u32i, !cir.ptr +// CHECK: %2 = cir.load %0 : !cir.ptr, !u32i // CHECK: %3 = cir.cast(integral, %2 : !u32i), !u64i // CHECK: cir.call @_ZNSt6vectorI6tripleEC1Em(%1, %3) : (!cir.ptr, !u64i) -> () // CHECK: cir.scope { -// CHECK: %4 = cir.alloca !cir.ptr, cir.ptr >, ["__range1", init] {alignment = 8 : i64} -// CHECK: %5 = cir.alloca ![[VEC_IT]], cir.ptr , ["__begin1", init] {alignment = 8 : i64} -// CHECK: %6 = cir.alloca ![[VEC_IT]], cir.ptr , ["__end1", init] {alignment = 8 : i64} -// CHECK: %7 = cir.alloca !cir.ptr, cir.ptr >, ["image", init] {alignment = 8 : i64} -// CHECK: cir.store %1, %4 : !cir.ptr, cir.ptr > -// CHECK: %8 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: %4 = cir.alloca !cir.ptr, !cir.ptr>, ["__range1", init] {alignment = 8 : i64} +// CHECK: %5 = cir.alloca ![[VEC_IT]], !cir.ptr, ["__begin1", init] {alignment = 8 : i64} +// CHECK: %6 = cir.alloca ![[VEC_IT]], !cir.ptr, ["__end1", init] {alignment = 8 : i64} +// CHECK: %7 = cir.alloca !cir.ptr, !cir.ptr>, ["image", init] {alignment = 8 : i64} +// CHECK: cir.store %1, %4 : !cir.ptr, !cir.ptr> +// CHECK: %8 = cir.load %4 : !cir.ptr>, !cir.ptr // CHECK: %9 = cir.call @_ZNSt6vectorI6tripleE5beginEv(%8) : (!cir.ptr) -> ![[VEC_IT]] -// CHECK: cir.store %9, %5 : ![[VEC_IT]], cir.ptr -// CHECK: %10 = cir.load %4 : cir.ptr >, !cir.ptr +// CHECK: cir.store %9, %5 : ![[VEC_IT]], !cir.ptr +// CHECK: %10 = cir.load %4 : !cir.ptr>, !cir.ptr // CHECK: %11 = cir.call @_ZNSt6vectorI6tripleE3endEv(%10) : (!cir.ptr) -> ![[VEC_IT]] -// CHECK: cir.store %11, %6 : ![[VEC_IT]], cir.ptr +// CHECK: cir.store %11, %6 : ![[VEC_IT]], !cir.ptr // CHECK: cir.for : cond { // CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool // CHECK: cir.condition(%12) // CHECK: } body { // CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EdeEv(%5) : (!cir.ptr) -> !cir.ptr -// CHECK: cir.store %12, %7 : !cir.ptr, cir.ptr > +// CHECK: cir.store %12, %7 : !cir.ptr, !cir.ptr> // CHECK: cir.scope { -// CHECK: %13 = cir.alloca !ty_22triple22, cir.ptr , ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %13 = cir.alloca !ty_22triple22, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} // CHECK: %14 = cir.const(#cir.zero : !ty_22triple22) : !ty_22triple22 -// CHECK: cir.store %14, %13 : !ty_22triple22, cir.ptr +// CHECK: cir.store %14, %13 : !ty_22triple22, !cir.ptr // CHECK: %15 = cir.get_member %13[0] {name = "type"} : !cir.ptr -> !cir.ptr // CHECK: %16 = cir.const(#cir.int<1000024002> : !u32i) : !u32i -// CHECK: cir.store %16, %15 : !u32i, cir.ptr +// CHECK: cir.store %16, %15 : !u32i, !cir.ptr // CHECK: %17 = cir.get_member %13[1] {name = "next"} : !cir.ptr -> !cir.ptr> // CHECK: %18 = cir.get_member %13[2] {name = "image"} : !cir.ptr -> !cir.ptr -// CHECK: %19 = cir.load %7 : cir.ptr >, !cir.ptr +// CHECK: %19 = cir.load %7 : !cir.ptr>, !cir.ptr // CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } // CHECK: cir.yield diff --git a/clang/test/CIR/CodeGen/return.cpp b/clang/test/CIR/CodeGen/return.cpp index ee7eef915c38..54855b003d06 100644 --- a/clang/test/CIR/CodeGen/return.cpp +++ b/clang/test/CIR/CodeGen/return.cpp @@ -5,10 +5,10 @@ int &ret0(int &x) { } // CHECK: cir.func @_Z4ret0Ri -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["x", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["__retval"] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: cir.store %2, %1 : !cir.ptr, cir.ptr > -// CHECK: %3 = cir.load %1 : cir.ptr >, !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["x", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %2 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: cir.store %2, %1 : !cir.ptr, !cir.ptr> +// CHECK: %3 = cir.load %1 : !cir.ptr>, !cir.ptr // CHECK: cir.return %3 : !cir.ptr diff --git a/clang/test/CIR/CodeGen/scope.cir b/clang/test/CIR/CodeGen/scope.cir index 813862e7c2fb..0c4a5df73fae 100644 --- a/clang/test/CIR/CodeGen/scope.cir +++ b/clang/test/CIR/CodeGen/scope.cir @@ -5,18 +5,18 @@ module { cir.func @foo() { cir.scope { - %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<4> : !u32i) : !u32i - cir.store %1, %0 : !u32i, cir.ptr + cir.store %1, %0 : !u32i, !cir.ptr } cir.return } // CHECK: cir.func @foo() { // CHECK: cir.br ^bb1 // CHECK: ^bb1: // pred: ^bb0 -// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK: %1 = cir.const(#cir.int<4> : !u32i) : !u32i -// CHECK: cir.store %1, %0 : !u32i, cir.ptr +// CHECK: cir.store %1, %0 : !u32i, !cir.ptr // CHECK: cir.br ^bb2 // CHECK: ^bb2: // pred: ^bb1 // CHECK: cir.return @@ -33,27 +33,27 @@ module { // CHECK: } cir.func @scope_with_return() -> !u32i { - %0 = cir.alloca !u32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} cir.scope { %2 = cir.const(#cir.int<0> : !u32i) : !u32i - cir.store %2, %0 : !u32i, cir.ptr - %3 = cir.load %0 : cir.ptr , !u32i + cir.store %2, %0 : !u32i, !cir.ptr + %3 = cir.load %0 : !cir.ptr, !u32i cir.return %3 : !u32i } - %1 = cir.load %0 : cir.ptr , !u32i + %1 = cir.load %0 : !cir.ptr, !u32i cir.return %1 : !u32i } // CHECK: cir.func @scope_with_return() -> !u32i { -// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK: cir.br ^bb1 // CHECK: ^bb1: // pred: ^bb0 // CHECK: %1 = cir.const(#cir.int<0> : !u32i) : !u32i -// CHECK: cir.store %1, %0 : !u32i, cir.ptr -// CHECK: %2 = cir.load %0 : cir.ptr , !u32i +// CHECK: cir.store %1, %0 : !u32i, !cir.ptr +// CHECK: %2 = cir.load %0 : !cir.ptr, !u32i // CHECK: cir.return %2 : !u32i // CHECK: ^bb2: // no predecessors -// CHECK: %3 = cir.load %0 : cir.ptr , !u32i +// CHECK: %3 = cir.load %0 : !cir.ptr, !u32i // CHECK: cir.return %3 : !u32i // CHECK: } diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 9c1fe9760c8c..b8122b3acb7f 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -20,31 +20,31 @@ int s0(int a, int b) { // CIR: #loc22 = loc(fused[#loc5, #loc6]) // CIR: module @"{{.*}}sourcelocation.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior // CIR: cir.func @_Z2s0ii(%arg0: !s32i loc(fused[#loc3, #loc4]), %arg1: !s32i loc(fused[#loc5, #loc6])) -> !s32i -// CIR: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} loc(#loc21) -// CIR: %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} loc(#loc22) -// CIR: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} loc(#loc2) -// CIR: %3 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} loc(#loc23) -// CIR: cir.store %arg0, %0 : !s32i, cir.ptr loc(#loc9) -// CIR: cir.store %arg1, %1 : !s32i, cir.ptr loc(#loc9) -// CIR: %4 = cir.load %0 : cir.ptr , !s32i loc(#loc10) -// CIR: %5 = cir.load %1 : cir.ptr , !s32i loc(#loc8) +// CIR: %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} loc(#loc21) +// CIR: %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} loc(#loc22) +// CIR: %2 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} loc(#loc2) +// CIR: %3 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} loc(#loc23) +// CIR: cir.store %arg0, %0 : !s32i, !cir.ptr loc(#loc9) +// CIR: cir.store %arg1, %1 : !s32i, !cir.ptr loc(#loc9) +// CIR: %4 = cir.load %0 : !cir.ptr, !s32i loc(#loc10) +// CIR: %5 = cir.load %1 : !cir.ptr, !s32i loc(#loc8) // CIR: %6 = cir.binop(add, %4, %5) : !s32i loc(#loc24) -// CIR: cir.store %6, %3 : !s32i, cir.ptr loc(#loc23) +// CIR: cir.store %6, %3 : !s32i, !cir.ptr loc(#loc23) // CIR: cir.scope { -// CIR: %9 = cir.load %3 : cir.ptr , !s32i loc(#loc13) +// CIR: %9 = cir.load %3 : !cir.ptr, !s32i loc(#loc13) // CIR: %10 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc14) // CIR: %11 = cir.cmp(gt, %9, %10) : !s32i, !cir.bool loc(#loc26) // CIR: cir.if %11 { // CIR: %12 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc16) -// CIR: cir.store %12, %3 : !s32i, cir.ptr loc(#loc28) +// CIR: cir.store %12, %3 : !s32i, !cir.ptr loc(#loc28) // CIR: } else { // CIR: %12 = cir.const(#cir.int<1> : !s32i) : !s32i loc(#loc12) -// CIR: cir.store %12, %3 : !s32i, cir.ptr loc(#loc29) +// CIR: cir.store %12, %3 : !s32i, !cir.ptr loc(#loc29) // CIR: } loc(#loc27) // CIR: } loc(#loc25) -// CIR: %7 = cir.load %3 : cir.ptr , !s32i loc(#loc18) -// CIR: cir.store %7, %2 : !s32i, cir.ptr loc(#loc30) -// CIR: %8 = cir.load %2 : cir.ptr , !s32i loc(#loc30) +// CIR: %7 = cir.load %3 : !cir.ptr, !s32i loc(#loc18) +// CIR: cir.store %7, %2 : !s32i, !cir.ptr loc(#loc30) +// CIR: %8 = cir.load %2 : !cir.ptr, !s32i loc(#loc30) // CIR: cir.return %8 : !s32i loc(#loc30) // CIR: } loc(#loc20) // CIR: } loc(#loc) diff --git a/clang/test/CIR/CodeGen/static-vars.c b/clang/test/CIR/CodeGen/static-vars.c index 4981052bc9ac..cec8544fc967 100644 --- a/clang/test/CIR/CodeGen/static-vars.c +++ b/clang/test/CIR/CodeGen/static-vars.c @@ -22,10 +22,10 @@ void func1(void) { // Should lower basic static vars arithmetics. j++; - // CHECK-DAG: %[[#V2:]] = cir.get_global @func1.j : cir.ptr - // CHECK-DAG: %[[#V3:]] = cir.load %[[#V2]] : cir.ptr , !s32i + // CHECK-DAG: %[[#V2:]] = cir.get_global @func1.j : !cir.ptr + // CHECK-DAG: %[[#V3:]] = cir.load %[[#V2]] : !cir.ptr, !s32i // CHECK-DAG: %[[#V4:]] = cir.unary(inc, %[[#V3]]) : !s32i, !s32i - // CHECK-DAG: cir.store %[[#V4]], %[[#V2]] : !s32i, cir.ptr + // CHECK-DAG: cir.store %[[#V4]], %[[#V2]] : !s32i, !cir.ptr } // Should shadow static vars on different functions. diff --git a/clang/test/CIR/CodeGen/static-vars.cpp b/clang/test/CIR/CodeGen/static-vars.cpp index 1a075b7d968a..bc971d3d9cee 100644 --- a/clang/test/CIR/CodeGen/static-vars.cpp +++ b/clang/test/CIR/CodeGen/static-vars.cpp @@ -22,10 +22,10 @@ void func1(void) { // Should lower basic static vars arithmetics. j++; - // CHECK-DAG: %[[#V2:]] = cir.get_global @_ZZ5func1vE1j : cir.ptr - // CHECK-DAG: %[[#V3:]] = cir.load %[[#V2]] : cir.ptr , !s32i + // CHECK-DAG: %[[#V2:]] = cir.get_global @_ZZ5func1vE1j : !cir.ptr + // CHECK-DAG: %[[#V3:]] = cir.load %[[#V2]] : !cir.ptr, !s32i // CHECK-DAG: %[[#V4:]] = cir.unary(inc, %[[#V3]]) : !s32i, !s32i - // CHECK-DAG: cir.store %[[#V4]], %[[#V2]] : !s32i, cir.ptr + // CHECK-DAG: cir.store %[[#V4]], %[[#V2]] : !s32i, !cir.ptr } // Should shadow static vars on different functions. diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index 998bd5c6457d..390dd8fb456a 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -20,19 +20,19 @@ static Init __ioinit2(false); // BEFORE-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) // BEFORE-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) // BEFORE-NEXT: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { -// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // BEFORE-NEXT: %1 = cir.const(#true) : !cir.bool // BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // BEFORE-NEXT: } dtor { -// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () // BEFORE-NEXT: } {ast = #cir.var.decl.ast} // BEFORE: cir.global "private" internal @_ZL9__ioinit2 = ctor : !ty_22Init22 { -// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr // BEFORE-NEXT: %1 = cir.const(#false) : !cir.bool // BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // BEFORE-NEXT: } dtor { -// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr // BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () // BEFORE-NEXT: } {ast = #cir.var.decl.ast} // BEFORE-NEXT: } @@ -45,26 +45,26 @@ static Init __ioinit2(false); // AFTER-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) // AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init() -// AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // AFTER-NEXT: %1 = cir.const(#true) : !cir.bool // AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () -// AFTER-NEXT: %2 = cir.get_global @_ZL8__ioinit : cir.ptr -// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : cir.ptr )>> +// AFTER-NEXT: %2 = cir.get_global @_ZL8__ioinit : !cir.ptr +// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> // AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> // AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr -// AFTER-NEXT: %6 = cir.get_global @__dso_handle : cir.ptr +// AFTER-NEXT: %6 = cir.get_global @__dso_handle : !cir.ptr // AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () // AFTER-NEXT: cir.return // AFTER: cir.global "private" internal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init.1() -// AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : cir.ptr +// AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr // AFTER-NEXT: %1 = cir.const(#false) : !cir.bool // AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () -// AFTER-NEXT: %2 = cir.get_global @_ZL9__ioinit2 : cir.ptr -// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : cir.ptr )>> +// AFTER-NEXT: %2 = cir.get_global @_ZL9__ioinit2 : !cir.ptr +// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> // AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> // AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr -// AFTER-NEXT: %6 = cir.get_global @__dso_handle : cir.ptr +// AFTER-NEXT: %6 = cir.get_global @__dso_handle : !cir.ptr // AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () // AFTER-NEXT: cir.return // AFTER: cir.func private @_GLOBAL__sub_I_static.cpp() diff --git a/clang/test/CIR/CodeGen/std-find.cpp b/clang/test/CIR/CodeGen/std-find.cpp index 3b043a6e3766..ec3ac05eb23a 100644 --- a/clang/test/CIR/CodeGen/std-find.cpp +++ b/clang/test/CIR/CodeGen/std-find.cpp @@ -10,7 +10,7 @@ int test_find(unsigned char n = 3) // CHECK: cir.func @_Z9test_findh(%arg0: !u8i unsigned num_found = 0; std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; - // CHECK: %[[array_addr:.*]] = cir.alloca ![[array]], cir.ptr , ["v"] + // CHECK: %[[array_addr:.*]] = cir.alloca ![[array]], !cir.ptr, ["v"] auto f = std::find(v.begin(), v.end(), n); // CHECK: {{.*}} cir.call @_ZNSt5arrayIhLj9EE5beginEv(%[[array_addr]]) diff --git a/clang/test/CIR/CodeGen/stmt-expr.c b/clang/test/CIR/CodeGen/stmt-expr.c index 8029358887e0..d3944cb72505 100644 --- a/clang/test/CIR/CodeGen/stmt-expr.c +++ b/clang/test/CIR/CodeGen/stmt-expr.c @@ -11,27 +11,27 @@ void test1() { ({ }); } // Yields an out-of-scope scalar. void test2() { ({int x = 3; x; }); } // CHECK: @test2 -// CHECK: %[[#RETVAL:]] = cir.alloca !s32i, cir.ptr +// CHECK: %[[#RETVAL:]] = cir.alloca !s32i, !cir.ptr // CHECK: cir.scope { -// CHECK: %[[#VAR:]] = cir.alloca !s32i, cir.ptr , ["x", init] +// CHECK: %[[#VAR:]] = cir.alloca !s32i, !cir.ptr, ["x", init] // [...] -// CHECK: %[[#TMP:]] = cir.load %[[#VAR]] : cir.ptr , !s32i -// CHECK: cir.store %[[#TMP]], %[[#RETVAL]] : !s32i, cir.ptr +// CHECK: %[[#TMP:]] = cir.load %[[#VAR]] : !cir.ptr, !s32i +// CHECK: cir.store %[[#TMP]], %[[#RETVAL]] : !s32i, !cir.ptr // CHECK: } -// CHECK: %{{.+}} = cir.load %[[#RETVAL]] : cir.ptr , !s32i +// CHECK: %{{.+}} = cir.load %[[#RETVAL]] : !cir.ptr, !s32i // Yields an aggregate. struct S { int x; }; int test3() { return ({ struct S s = {1}; s; }).x; } // CHECK: @test3 -// CHECK: %[[#RETVAL:]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: %[[#RETVAL:]] = cir.alloca !ty_22S22, !cir.ptr // CHECK: cir.scope { -// CHECK: %[[#VAR:]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: %[[#VAR:]] = cir.alloca !ty_22S22, !cir.ptr // [...] // CHECK: cir.copy %[[#VAR]] to %[[#RETVAL]] : !cir.ptr // CHECK: } // CHECK: %[[#RETADDR:]] = cir.get_member %1[0] {name = "x"} : !cir.ptr -> !cir.ptr -// CHECK: %{{.+}} = cir.load %[[#RETADDR]] : cir.ptr , !s32i +// CHECK: %{{.+}} = cir.load %[[#RETADDR]] : !cir.ptr, !s32i // Expression is wrapped in an expression attribute (just ensure it does not crash). void test4(int x) { ({[[gsl::suppress("foo")]] x;}); } diff --git a/clang/test/CIR/CodeGen/stmt-expr.cpp b/clang/test/CIR/CodeGen/stmt-expr.cpp index d9d619f70a92..9d6ba7466855 100644 --- a/clang/test/CIR/CodeGen/stmt-expr.cpp +++ b/clang/test/CIR/CodeGen/stmt-expr.cpp @@ -19,9 +19,9 @@ void test1() { } // CHECK: @_Z5test1v // CHECK: cir.scope { -// CHECK: %[[#RETVAL:]] = cir.alloca !ty_22A22, cir.ptr +// CHECK: %[[#RETVAL:]] = cir.alloca !ty_22A22, !cir.ptr // CHECK: cir.scope { -// CHECK: %[[#VAR:]] = cir.alloca !ty_22A22, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: %[[#VAR:]] = cir.alloca !ty_22A22, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK: cir.call @_ZN1AC1Ev(%[[#VAR]]) : (!cir.ptr) -> () // CHECK: cir.call @_ZN1AC1ERS_(%[[#RETVAL]], %[[#VAR]]) : (!cir.ptr, !cir.ptr) -> () // TODO(cir): the local VAR should be destroyed here. diff --git a/clang/test/CIR/CodeGen/store.c b/clang/test/CIR/CodeGen/store.c index 14e8d8a37fdb..3e33641a2dd7 100644 --- a/clang/test/CIR/CodeGen/store.c +++ b/clang/test/CIR/CodeGen/store.c @@ -7,10 +7,10 @@ void foo(void) { } // CHECK: cir.func @foo() -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr // CHECK-NEXT: %2 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK-NEXT: cir.store %2, %0 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %2, %0 : !s32i, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 8005d5ce0dc0..88be38d9df3d 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -27,17 +27,17 @@ void baz(void) { // CHECK-DAG: !ty_22Foo22 = !cir.struct // CHECK-DAG: module {{.*}} { // CHECK: cir.func @baz() -// CHECK-NEXT: %0 = cir.alloca !ty_22Bar22, cir.ptr , ["b"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !ty_22Foo22, cir.ptr , ["f"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22Bar22, !cir.ptr, ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !ty_22Foo22, !cir.ptr, ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.return // CHECK-NEXT: } void shouldConstInitStructs(void) { // CHECK: cir.func @shouldConstInitStructs struct Foo f = {1, 2, {3, 4}}; - // CHECK: %[[#V0:]] = cir.alloca !ty_22Foo22, cir.ptr , ["f"] {alignment = 4 : i64} + // CHECK: %[[#V0:]] = cir.alloca !ty_22Foo22, !cir.ptr, ["f"] {alignment = 4 : i64} // CHECK: %[[#V1:]] = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_22Bar22}> : !ty_22Foo22) : !ty_22Foo22 - // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22Foo22, cir.ptr + // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22Foo22, !cir.ptr } // Should zero-initialize uninitialized global structs. @@ -71,7 +71,7 @@ struct S3 { void shouldCopyStructAsCallArg(struct S1 s) { // CHECK-DAG: cir.func @shouldCopyStructAsCallArg shouldCopyStructAsCallArg(s); - // CHECK-DAG: %[[#LV:]] = cir.load %{{.+}} : cir.ptr , !ty_22S122 + // CHECK-DAG: %[[#LV:]] = cir.load %{{.+}} : !cir.ptr, !ty_22S122 // CHECK-DAG: cir.call @shouldCopyStructAsCallArg(%[[#LV]]) : (!ty_22S122) -> () } @@ -86,7 +86,7 @@ struct Bar shouldGenerateAndAccessStructArrays(void) { // CHECK-DAG: cir.copy %[[#ELT]] to %{{.+}} : !cir.ptr // CHECK-DAG: cir.func @local_decl -// CHECK-DAG: {{%.}} = cir.alloca !ty_22Local22, cir.ptr , ["a"] +// CHECK-DAG: {{%.}} = cir.alloca !ty_22Local22, !cir.ptr, ["a"] void local_decl(void) { struct Local { int i; diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index a4df34c3bd8d..75e3e694d800 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -35,44 +35,44 @@ void yoyo(incomplete *i) {} // CHECK-DAG: !ty_22Entry22 = !cir.struct, !cir.ptr)>>}> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: cir.store %arg1, %1 : !s32i, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg1, %1 : !s32i, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK-NEXT: cir.store %arg1, %1 : !s32i, cir.ptr -// CHECK-NEXT: %3 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , !s32i -// CHECK-NEXT: cir.store %4, %2 : !s32i, cir.ptr -// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg1, %1 : !s32i, !cir.ptr +// CHECK-NEXT: %3 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %4 = cir.load %1 : !cir.ptr, !s32i +// CHECK-NEXT: cir.store %4, %2 : !s32i, !cir.ptr +// CHECK-NEXT: %5 = cir.load %2 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %5 // CHECK-NEXT: } // CHECK: cir.func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !ty_22Bar22, cir.ptr , ["b"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["result", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca !ty_22Foo22, cir.ptr , ["f"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_22Bar22, !cir.ptr, ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["result", init] {alignment = 4 : i64} +// CHECK-NEXT: %2 = cir.alloca !ty_22Foo22, !cir.ptr, ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () // CHECK-NEXT: %3 = cir.const(#cir.int<4> : !s32i) : !s32i // CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, !s32i) -> () // CHECK-NEXT: %4 = cir.const(#cir.int<4> : !s32i) : !s32i // CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, !s32i) -> !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } @@ -95,19 +95,19 @@ class Adv { void m() { Adv C; } // CHECK: cir.func linkonce_odr @_ZN3AdvC2Ev(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %2 = cir.get_member %1[0] {name = "x"} : !cir.ptr -> !cir.ptr // CHECK: %3 = cir.get_member %2[0] {name = "w"} : !cir.ptr -> !cir.ptr // CHECK: %4 = cir.const(#cir.int<1000024001> : !u32i) : !u32i -// CHECK: cir.store %4, %3 : !u32i, cir.ptr +// CHECK: cir.store %4, %3 : !u32i, !cir.ptr // CHECK: %5 = cir.get_member %2[1] {name = "n"} : !cir.ptr -> !cir.ptr> // CHECK: %6 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr -// CHECK: cir.store %6, %5 : !cir.ptr, cir.ptr > +// CHECK: cir.store %6, %5 : !cir.ptr, !cir.ptr> // CHECK: %7 = cir.get_member %2[2] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK: %8 = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK: cir.store %8, %7 : !s32i, cir.ptr +// CHECK: cir.store %8, %7 : !s32i, !cir.ptr // CHECK: cir.return // CHECK: } @@ -126,9 +126,9 @@ struct A arrConstInit[1] = {{1}}; // Should locally copy struct members. void shouldLocallyCopyStructAssignments(void) { struct A a = { 3 }; - // CHECK: %[[#SA:]] = cir.alloca !ty_22A22, cir.ptr , ["a"] {alignment = 4 : i64} + // CHECK: %[[#SA:]] = cir.alloca !ty_22A22, !cir.ptr, ["a"] {alignment = 4 : i64} struct A b = a; - // CHECK: %[[#SB:]] = cir.alloca !ty_22A22, cir.ptr , ["b", init] {alignment = 4 : i64} + // CHECK: %[[#SB:]] = cir.alloca !ty_22A22, !cir.ptr, ["b", init] {alignment = 4 : i64} // cir.copy %[[#SA]] to %[[SB]] : !cir.ptr } @@ -141,11 +141,11 @@ struct S { void h() { S s; } // CHECK: cir.func @_Z1hv() -// CHECK: %0 = cir.alloca !ty_22S22, cir.ptr , ["s", init] {alignment = 1 : i64} -// CHECK: %1 = cir.alloca !ty_22A22, cir.ptr , ["agg.tmp0"] {alignment = 4 : i64} +// CHECK: %0 = cir.alloca !ty_22S22, !cir.ptr, ["s", init] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !ty_22A22, !cir.ptr, ["agg.tmp0"] {alignment = 4 : i64} // CHECK: %2 = cir.call @_Z11get_defaultv() : () -> !ty_22A22 -// CHECK: cir.store %2, %1 : !ty_22A22, cir.ptr -// CHECK: %3 = cir.load %1 : cir.ptr , !ty_22A22 +// CHECK: cir.store %2, %1 : !ty_22A22, !cir.ptr +// CHECK: %3 = cir.load %1 : !cir.ptr, !ty_22A22 // CHECK: cir.call @_ZN1SC1E1A(%0, %3) : (!cir.ptr, !ty_22A22) -> () // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/switch.cir b/clang/test/CIR/CodeGen/switch.cir index da99dffa1fa8..39664bfa3957 100644 --- a/clang/test/CIR/CodeGen/switch.cir +++ b/clang/test/CIR/CodeGen/switch.cir @@ -121,10 +121,10 @@ module { cir.func @shouldFlatMultiBlockCase(%arg0: !s32i) { - %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr + %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr cir.scope { - %1 = cir.load %0 : cir.ptr , !s32i + %1 = cir.load %0 : !cir.ptr, !s32i cir.switch (%1 : !s32i) [ case (equal, 3) { cir.return @@ -137,11 +137,11 @@ module { } // CHECK: cir.func @shouldFlatMultiBlockCase(%arg0: !s32i) { -// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr +// CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !s32i, !cir.ptr // CHECK: cir.br ^bb1 // CHECK: ^bb1: // pred: ^bb0 -// CHECK: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK: %1 = cir.load %0 : !cir.ptr, !s32i // CHECK: cir.switch.flat %1 : !s32i, ^bb4 [ // CHECK: 3: ^bb2 // CHECK: ] @@ -157,17 +157,17 @@ module { cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} - %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr - cir.store %arg1, %1 : !s32i, cir.ptr + %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + cir.store %arg1, %1 : !s32i, !cir.ptr cir.scope { - %5 = cir.load %0 : cir.ptr , !s32i + %5 = cir.load %0 : !cir.ptr, !s32i cir.switch (%5 : !s32i) [ case (equal, 0) { cir.scope { - %6 = cir.load %1 : cir.ptr , !s32i + %6 = cir.load %1 : !cir.ptr, !s32i %7 = cir.const(#cir.int<0> : !s32i) : !s32i %8 = cir.cmp(ge, %6, %7) : !s32i, !s32i %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool @@ -180,8 +180,8 @@ module { ] } %3 = cir.const(#cir.int<3> : !s32i) : !s32i - cir.store %3, %2 : !s32i, cir.ptr - %4 = cir.load %2 : cir.ptr , !s32i + cir.store %3, %2 : !s32i, !cir.ptr + %4 = cir.load %2 : !cir.ptr, !s32i cir.return %4 : !s32i } // CHECK: cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index b378c7364475..7ba1185c660c 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -18,10 +18,10 @@ void sw1(int a) { // CHECK: cir.func @_Z3sw1i // CHECK: cir.switch (%3 : !s32i) [ // CHECK-NEXT: case (equal, 0) { -// CHECK-NEXT: %4 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i -// CHECK-NEXT: cir.store %6, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %6, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.break // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 1) { @@ -29,13 +29,13 @@ void sw1(int a) { // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 2) { // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %4 = cir.alloca !s32i, cir.ptr , ["yolo", init] -// CHECK-NEXT: %5 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %4 = cir.alloca !s32i, !cir.ptr, ["yolo", init] +// CHECK-NEXT: %5 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %6 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %7 = cir.binop(add, %5, %6) : !s32i -// CHECK-NEXT: cir.store %7, %1 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %7, %1 : !s32i, !cir.ptr // CHECK-NEXT: %8 = cir.const(#cir.int<100> : !s32i) : !s32i -// CHECK-NEXT: cir.store %8, %4 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %8, %4 : !s32i, !cir.ptr // CHECK-NEXT: cir.break // CHECK-NEXT: } // CHECK-NEXT: cir.yield @@ -53,12 +53,12 @@ void sw2(int a) { // CHECK: cir.func @_Z3sw2i // CHECK: cir.scope { -// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["yolo", init] -// CHECK-NEXT: %2 = cir.alloca !s32i, cir.ptr , ["fomo", init] +// CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["yolo", init] +// CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["fomo", init] // CHECK: cir.switch (%4 : !s32i) [ // CHECK-NEXT: case (equal, 3) { // CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK-NEXT: cir.store %5, %2 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %5, %2 : !s32i, !cir.ptr void sw3(int a) { switch (a) { @@ -69,7 +69,7 @@ void sw3(int a) { // CHECK: cir.func @_Z3sw3i // CHECK: cir.scope { -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: cir.switch (%1 : !s32i) [ // CHECK-NEXT: case (default) { // CHECK-NEXT: cir.break @@ -92,16 +92,16 @@ int sw4(int a) { // CHECK-NEXT: case (equal, 42) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr -// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: %6 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %6 : !s32i // CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: }, // CHECK-NEXT: case (default) { // CHECK-NEXT: %5 = cir.const(#cir.int<2> : !s32i) : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, cir.ptr -// CHECK-NEXT: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: %6 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %6 : !s32i // CHECK-NEXT: } // CHECK-NEXT: ] diff --git a/clang/test/CIR/CodeGen/ternary.cir b/clang/test/CIR/CodeGen/ternary.cir index 1589fee6f6be..715061f15fb9 100644 --- a/clang/test/CIR/CodeGen/ternary.cir +++ b/clang/test/CIR/CodeGen/ternary.cir @@ -4,10 +4,10 @@ module { cir.func @foo(%arg0: !s32i) -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr - %2 = cir.load %0 : cir.ptr , !s32i + %0 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<0> : !s32i) : !s32i %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool %5 = cir.ternary(%4, true { @@ -17,16 +17,16 @@ module { %7 = cir.const(#cir.int<5> : !s32i) : !s32i cir.yield %7 : !s32i }) : (!cir.bool) -> !s32i - cir.store %5, %1 : !s32i, cir.ptr - %6 = cir.load %1 : cir.ptr , !s32i + cir.store %5, %1 : !s32i, !cir.ptr + %6 = cir.load %1 : !cir.ptr, !s32i cir.return %6 : !s32i } // CHECK: cir.func @foo(%arg0: !s32i) -> !s32i { -// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr -// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !s32i, !cir.ptr +// CHECK: %2 = cir.load %0 : !cir.ptr, !s32i // CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool // CHECK: cir.brcond %4 ^bb1, ^bb2 @@ -39,8 +39,8 @@ module { // CHECK: ^bb3(%7: !s32i): // 2 preds: ^bb1, ^bb2 // CHECK: cir.br ^bb4 // CHECK: ^bb4: // pred: ^bb3 -// CHECK: cir.store %7, %1 : !s32i, cir.ptr -// CHECK: %8 = cir.load %1 : cir.ptr , !s32i +// CHECK: cir.store %7, %1 : !s32i, !cir.ptr +// CHECK: %8 = cir.load %1 : !cir.ptr, !s32i // CHECK: cir.return %8 : !s32i // CHECK: } diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp index 5ce164624409..452745633d85 100644 --- a/clang/test/CIR/CodeGen/ternary.cpp +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -6,10 +6,10 @@ int x(int y) { } // CHECK: cir.func @_Z1xi -// CHECK: %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} -// CHECK: cir.store %arg0, %0 : !s32i, cir.ptr -// CHECK: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} +// CHECK: %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !s32i, !cir.ptr +// CHECK: %2 = cir.load %0 : !cir.ptr, !s32i // CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool // CHECK: %5 = cir.ternary(%4, true { @@ -19,8 +19,8 @@ int x(int y) { // CHECK: %7 = cir.const(#cir.int<5> : !s32i) : !s32i // CHECK: cir.yield %7 : !s32i // CHECK: }) : (!cir.bool) -> !s32i -// CHECK: cir.store %5, %1 : !s32i, cir.ptr -// CHECK: %6 = cir.load %1 : cir.ptr , !s32i +// CHECK: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK: %6 = cir.load %1 : !cir.ptr, !s32i // CHECK: cir.return %6 : !s32i // CHECK: } @@ -36,9 +36,9 @@ void m(APIType api) { } // CHECK: cir.func @_Z1m7APIType -// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["api", init] {alignment = 4 : i64} -// CHECK: cir.store %arg0, %0 : !u32i, cir.ptr -// CHECK: %1 = cir.load %0 : cir.ptr , !u32i +// CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["api", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %0 : !u32i, !cir.ptr +// CHECK: %1 = cir.load %0 : !cir.ptr, !u32i // CHECK: %2 = cir.cast(integral, %1 : !u32i), !s32i // CHECK: %3 = cir.const(#cir.int<0> : !u32i) : !u32i // CHECK: %4 = cir.cast(integral, %3 : !u32i), !s32i @@ -47,7 +47,7 @@ void m(APIType api) { // CHECK: %6 = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: cir.yield // CHECK: }, false { -// CHECK: %6 = cir.get_global @".str" : cir.ptr > +// CHECK: %6 = cir.get_global @".str" : !cir.ptr> // CHECK: %7 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr // CHECK: cir.call @_Z3obaPKc(%7) : (!cir.ptr) -> () // CHECK: cir.yield @@ -62,14 +62,14 @@ int foo(int a, int b) { } // CHECK: cir.func @_Z3fooii -// CHECK: [[A0:%.*]] = cir.load {{.*}} : cir.ptr , !s32i -// CHECK: [[B0:%.*]] = cir.load {{.*}} : cir.ptr , !s32i +// CHECK: [[A0:%.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: [[B0:%.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: [[CMP:%.*]] = cir.cmp(lt, [[A0]], [[B0]]) : !s32i, !cir.bool // CHECK: [[RES:%.*]] = cir.ternary([[CMP]], true { // CHECK: [[ZERO:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: cir.yield [[ZERO]] : !s32i // CHECK: }, false { -// CHECK: [[A1:%.*]] = cir.load {{.*}} : cir.ptr , !s32i +// CHECK: [[A1:%.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: cir.yield [[A1]] : !s32i // CHECK: }) : (!cir.bool) -> !s32i // CHECK: [[RES_CAST:%.*]] = cir.cast(int_to_bool, [[RES]] : !s32i), !cir.bool diff --git a/clang/test/CIR/CodeGen/three-way-comparison.cpp b/clang/test/CIR/CodeGen/three-way-comparison.cpp index c6aee921f24e..074c5c70f4a1 100644 --- a/clang/test/CIR/CodeGen/three-way-comparison.cpp +++ b/clang/test/CIR/CodeGen/three-way-comparison.cpp @@ -65,8 +65,8 @@ auto three_way_weak(float x, float y) { // BEFORE: } // AFTER: cir.func @_Z14three_way_weakff -// AFTER: %[[#LHS:]] = cir.load %0 : cir.ptr , !cir.float -// AFTER-NEXT: %[[#RHS:]] = cir.load %1 : cir.ptr , !cir.float +// AFTER: %[[#LHS:]] = cir.load %0 : !cir.ptr, !cir.float +// AFTER-NEXT: %[[#RHS:]] = cir.load %1 : !cir.ptr, !cir.float // AFTER-NEXT: %[[#LT:]] = cir.const(#cir.int<-1> : !s8i) : !s8i // AFTER-NEXT: %[[#EQ:]] = cir.const(#cir.int<0> : !s8i) : !s8i // AFTER-NEXT: %[[#GT:]] = cir.const(#cir.int<1> : !s8i) : !s8i diff --git a/clang/test/CIR/CodeGen/throw.cpp b/clang/test/CIR/CodeGen/throw.cpp index 9c390ebb8136..0ae33db072cb 100644 --- a/clang/test/CIR/CodeGen/throw.cpp +++ b/clang/test/CIR/CodeGen/throw.cpp @@ -9,8 +9,8 @@ double d(int a, int b) { // CHECK: cir.if %10 { // CHECK-NEXT: %11 = cir.alloc_exception(!cir.ptr) -> > -// CHECK-NEXT: %12 = cir.get_global @".str" : cir.ptr > +// CHECK-NEXT: %12 = cir.get_global @".str" : !cir.ptr> // CHECK-NEXT: %13 = cir.cast(array_to_ptrdecay, %12 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store %13, %11 : !cir.ptr, cir.ptr > +// CHECK-NEXT: cir.store %13, %11 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.throw(%11 : !cir.ptr>, @_ZTIPKc) // CHECK-NEXT: } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/tls.c b/clang/test/CIR/CodeGen/tls.c index 2a3ebda00744..499afad56ee2 100644 --- a/clang/test/CIR/CodeGen/tls.c +++ b/clang/test/CIR/CodeGen/tls.c @@ -7,7 +7,7 @@ extern __thread int b; int c(void) { return *&b; } // CIR: cir.global "private" external tls_dyn @b : !s32i // CIR: cir.func @c() -> !s32i -// CIR: %[[TLS_ADDR:.*]] = cir.get_global thread_local @b : cir.ptr +// CIR: %[[TLS_ADDR:.*]] = cir.get_global thread_local @b : !cir.ptr __thread int a; // CIR: cir.global external tls_dyn @a = #cir.int<0> : !s32i diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 47a29ebc90df..99ee97d76719 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -10,12 +10,12 @@ unsigned long long tc() { try { // CHECK: cir.scope { - // CHECK: %[[msg:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["msg"] - // CHECK: %[[idx:.*]] = cir.alloca !s32i, cir.ptr , ["idx"] + // CHECK: %[[msg:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["msg"] + // CHECK: %[[idx:.*]] = cir.alloca !s32i, !cir.ptr, ["idx"] // CHECK: %[[try_eh:.*]] = cir.try { - // CHECK: %[[eh_info:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["__exception_ptr"] - // CHECK: %[[local_a:.*]] = cir.alloca !s32i, cir.ptr , ["a", init] + // CHECK: %[[eh_info:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__exception_ptr"] + // CHECK: %[[local_a:.*]] = cir.alloca !s32i, !cir.ptr, ["a", init] int a = 4; z = division(x, y); // CHECK: %[[div_res:.*]] = cir.try_call exception(%[[eh_info]]) @_Z8divisionii({{.*}}) : (!cir.ptr>, !s32i, !s32i) -> !cir.double @@ -26,15 +26,15 @@ unsigned long long tc() { // CHECK: type (#cir.global_view<@_ZTIi> : !cir.ptr) // CHECK: { // CHECK: %[[catch_idx_addr:.*]] = cir.catch_param(%[[try_eh]]) -> !cir.ptr - // CHECK: %[[idx_load:.*]] = cir.load %[[catch_idx_addr]] : cir.ptr , !s32i - // CHECK: cir.store %[[idx_load]], %[[idx]] : !s32i, cir.ptr + // CHECK: %[[idx_load:.*]] = cir.load %[[catch_idx_addr]] : !cir.ptr, !s32i + // CHECK: cir.store %[[idx_load]], %[[idx]] : !s32i, !cir.ptr z = 98; idx++; } catch (const char* msg) { // CHECK: type (#cir.global_view<@_ZTIPKc> : !cir.ptr) // CHECK: { // CHECK: %[[msg_addr:.*]] = cir.catch_param(%[[try_eh]]) -> !cir.ptr - // CHECK: cir.store %[[msg_addr]], %[[msg]] : !cir.ptr, cir.ptr > + // CHECK: cir.store %[[msg_addr]], %[[msg]] : !cir.ptr, !cir.ptr> z = 99; (void)msg[0]; } // CHECK: #cir.unwind diff --git a/clang/test/CIR/CodeGen/typedef.c b/clang/test/CIR/CodeGen/typedef.c index aa55270ce13a..ea841c238b6f 100644 --- a/clang/test/CIR/CodeGen/typedef.c +++ b/clang/test/CIR/CodeGen/typedef.c @@ -6,5 +6,5 @@ void local_typedef() { } //CHECK: cir.func no_proto @local_typedef() -//CHECK: {{.*}} = cir.alloca !ty_22Struct22, cir.ptr , ["s"] {alignment = 4 : i64} +//CHECK: {{.*}} = cir.alloca !ty_22Struct22, !cir.ptr, ["s"] {alignment = 4 : i64} //CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/types-nullptr.cpp b/clang/test/CIR/CodeGen/types-nullptr.cpp index e84c386417a7..55f42be785fb 100644 --- a/clang/test/CIR/CodeGen/types-nullptr.cpp +++ b/clang/test/CIR/CodeGen/types-nullptr.cpp @@ -4,6 +4,6 @@ typedef decltype(nullptr) nullptr_t; void f() { nullptr_t t = nullptr; } -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr > +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr> // CHECK: %1 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr -// CHECK: cir.store %1, %0 : !cir.ptr, cir.ptr > +// CHECK: cir.store %1, %0 : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/unary-deref.cpp b/clang/test/CIR/CodeGen/unary-deref.cpp index 92eb404b1204..b5ceb4cceb7f 100644 --- a/clang/test/CIR/CodeGen/unary-deref.cpp +++ b/clang/test/CIR/CodeGen/unary-deref.cpp @@ -13,5 +13,5 @@ void foo() { // CHECK: cir.func linkonce_odr @_ZNK12MyIntPointer4readEv // CHECK: %2 = cir.load %0 // CHECK: %3 = cir.get_member %2[0] {name = "ptr"} -// CHECK: %4 = cir.load deref %3 : cir.ptr > +// CHECK: %4 = cir.load deref %3 : !cir.ptr> // CHECK: %5 = cir.load %4 \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/unary.c b/clang/test/CIR/CodeGen/unary.c index e364808f9579..d0ff62201343 100644 --- a/clang/test/CIR/CodeGen/unary.c +++ b/clang/test/CIR/CodeGen/unary.c @@ -4,24 +4,24 @@ void valueNegation(int i, short s, long l, float f, double d) { // CHECK: cir.func @valueNegation( !i; - // CHECK: %[[#INT:]] = cir.load %{{[0-9]+}} : cir.ptr , !s32i + // CHECK: %[[#INT:]] = cir.load %{{[0-9]+}} : !cir.ptr, !s32i // CHECK: %[[#INT_TO_BOOL:]] = cir.cast(int_to_bool, %[[#INT]] : !s32i), !cir.bool // CHECK: = cir.unary(not, %[[#INT_TO_BOOL]]) : !cir.bool, !cir.bool !s; - // CHECK: %[[#SHORT:]] = cir.load %{{[0-9]+}} : cir.ptr , !s16i + // CHECK: %[[#SHORT:]] = cir.load %{{[0-9]+}} : !cir.ptr, !s16i // CHECK: %[[#SHORT_TO_BOOL:]] = cir.cast(int_to_bool, %[[#SHORT]] : !s16i), !cir.bool // CHECK: = cir.unary(not, %[[#SHORT_TO_BOOL]]) : !cir.bool, !cir.bool !l; - // CHECK: %[[#LONG:]] = cir.load %{{[0-9]+}} : cir.ptr , !s64i + // CHECK: %[[#LONG:]] = cir.load %{{[0-9]+}} : !cir.ptr, !s64i // CHECK: %[[#LONG_TO_BOOL:]] = cir.cast(int_to_bool, %[[#LONG]] : !s64i), !cir.bool // CHECK: = cir.unary(not, %[[#LONG_TO_BOOL]]) : !cir.bool, !cir.bool !f; - // CHECK: %[[#FLOAT:]] = cir.load %{{[0-9]+}} : cir.ptr , !cir.float + // CHECK: %[[#FLOAT:]] = cir.load %{{[0-9]+}} : !cir.ptr, !cir.float // CHECK: %[[#FLOAT_TO_BOOL:]] = cir.cast(float_to_bool, %[[#FLOAT]] : !cir.float), !cir.bool // CHECK: %[[#FLOAT_NOT:]] = cir.unary(not, %[[#FLOAT_TO_BOOL]]) : !cir.bool, !cir.bool // CHECK: = cir.cast(bool_to_int, %[[#FLOAT_NOT]] : !cir.bool), !s32i !d; - // CHECK: %[[#DOUBLE:]] = cir.load %{{[0-9]+}} : cir.ptr , !cir.double + // CHECK: %[[#DOUBLE:]] = cir.load %{{[0-9]+}} : !cir.ptr, !cir.double // CHECK: %[[#DOUBLE_TO_BOOL:]] = cir.cast(float_to_bool, %[[#DOUBLE]] : !cir.double), !cir.bool // CHECK: %[[#DOUBLE_NOT:]] = cir.unary(not, %[[#DOUBLE_TO_BOOL]]) : !cir.bool, !cir.bool // CHECK: = cir.cast(bool_to_int, %[[#DOUBLE_NOT]] : !cir.bool), !s32i diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index 74a6c09b2f3c..f6d99af23b5a 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -7,8 +7,8 @@ unsigned up0() { } // CHECK: cir.func @_Z3up0v() -> !u32i -// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#RET:]] = cir.alloca !u32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, !cir.ptr, ["a", init] // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#OUTPUT:]] = cir.unary(plus, %[[#INPUT]]) // CHECK: cir.store %[[#OUTPUT]], %[[#RET]] @@ -19,8 +19,8 @@ unsigned um0() { } // CHECK: cir.func @_Z3um0v() -> !u32i -// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#RET:]] = cir.alloca !u32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, !cir.ptr, ["a", init] // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#OUTPUT:]] = cir.unary(minus, %[[#INPUT]]) // CHECK: cir.store %[[#OUTPUT]], %[[#RET]] @@ -31,8 +31,8 @@ unsigned un0() { } // CHECK: cir.func @_Z3un0v() -> !u32i -// CHECK: %[[#RET:]] = cir.alloca !u32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !u32i, cir.ptr , ["a", init] +// CHECK: %[[#RET:]] = cir.alloca !u32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !u32i, !cir.ptr, ["a", init] // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#OUTPUT:]] = cir.unary(not, %[[#INPUT]]) // CHECK: cir.store %[[#OUTPUT]], %[[#RET]] @@ -44,8 +44,8 @@ int inc0() { } // CHECK: cir.func @_Z4inc0v() -> !s32i -// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#RET:]] = cir.alloca !s32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] // CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] @@ -63,8 +63,8 @@ int dec0() { } // CHECK: cir.func @_Z4dec0v() -> !s32i -// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#RET:]] = cir.alloca !s32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] // CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] @@ -83,8 +83,8 @@ int inc1() { } // CHECK: cir.func @_Z4inc1v() -> !s32i -// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#RET:]] = cir.alloca !s32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] // CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] @@ -102,8 +102,8 @@ int dec1() { } // CHECK: cir.func @_Z4dec1v() -> !s32i -// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] +// CHECK: %[[#RET:]] = cir.alloca !s32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] // CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] @@ -122,9 +122,9 @@ int inc2() { } // CHECK: cir.func @_Z4inc2v() -> !s32i -// CHECK: %[[#RET:]] = cir.alloca !s32i, cir.ptr , ["__retval"] -// CHECK: %[[#A:]] = cir.alloca !s32i, cir.ptr , ["a", init] -// CHECK: %[[#B:]] = cir.alloca !s32i, cir.ptr , ["b", init] +// CHECK: %[[#RET:]] = cir.alloca !s32i, !cir.ptr, ["__retval"] +// CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] +// CHECK: %[[#B:]] = cir.alloca !s32i, !cir.ptr, ["b", init] // CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#ATOB:]] = cir.load %[[#A]] @@ -144,12 +144,12 @@ int *inc_p(int *i) { // CHECK: cir.func @_Z5inc_pPi(%arg0: !cir.ptr -// CHECK: %[[#i_addr:]] = cir.alloca !cir.ptr, cir.ptr >, ["i", init] {alignment = 8 : i64} -// CHECK: %[[#i_dec:]] = cir.load %[[#i_addr]] : cir.ptr >, !cir.ptr +// CHECK: %[[#i_addr:]] = cir.alloca !cir.ptr, !cir.ptr>, ["i", init] {alignment = 8 : i64} +// CHECK: %[[#i_dec:]] = cir.load %[[#i_addr]] : !cir.ptr>, !cir.ptr // CHECK: %[[#dec_const:]] = cir.const(#cir.int<-1> : !s32i) : !s32i // CHECK: = cir.ptr_stride(%[[#i_dec]] : !cir.ptr, %[[#dec_const]] : !s32i), !cir.ptr -// CHECK: %[[#i_inc:]] = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %[[#i_inc:]] = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %[[#inc_const:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: = cir.ptr_stride(%[[#i_inc]] : !cir.ptr, %[[#inc_const]] : !s32i), !cir.ptr @@ -183,7 +183,7 @@ void doubles(double d) { void pointers(int *p) { // CHECK: cir.func @{{[^ ]+}}pointers - // CHECK: %[[#P:]] = cir.alloca !cir.ptr, cir.ptr > + // CHECK: %[[#P:]] = cir.alloca !cir.ptr, !cir.ptr> +p; // CHECK: cir.unary(plus, %{{.+}}) : !cir.ptr, !cir.ptr @@ -191,19 +191,19 @@ void pointers(int *p) { ++p; // CHECK: %[[#INC:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#INC]] : !s32i), !cir.ptr - // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, !cir.ptr> --p; // CHECK: %[[#DEC:]] = cir.const(#cir.int<-1> : !s32i) : !s32i // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#DEC]] : !s32i), !cir.ptr - // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, !cir.ptr> p++; // CHECK: %[[#INC:]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#INC]] : !s32i), !cir.ptr - // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, !cir.ptr> p--; // CHECK: %[[#DEC:]] = cir.const(#cir.int<-1> : !s32i) : !s32i // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#DEC]] : !s32i), !cir.ptr - // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, cir.ptr > + // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, !cir.ptr> !p; // %[[BOOLPTR:]] = cir.cast(ptr_to_bool, %15 : !cir.ptr), !cir.bool diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index 54d4e0516c25..b041bc5533c7 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -14,24 +14,24 @@ void foo(int x) { } // CHECK: cir.func @foo(%arg0: !s32i loc({{.*}})) -// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} -// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, cir.ptr , ["a", init] {alignment = 4 : i64} -// CHECK: cir.store %arg0, [[TMP0]] : !s32i, cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !s32i, !cir.ptr // CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = ""} : !cir.ptr -> !cir.ptr // CHECK: [[TMP3:%.*]] = cir.cast(bitcast, [[TMP2]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.load [[TMP0]] : cir.ptr , !s32i +// CHECK: [[TMP4:%.*]] = cir.load [[TMP0]] : !cir.ptr, !s32i // CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP4]] : !s32i), !u32i -// CHECK: [[TMP6:%.*]] = cir.load [[TMP3]] : cir.ptr , !u32i +// CHECK: [[TMP6:%.*]] = cir.load [[TMP3]] : !cir.ptr, !u32i // CHECK: [[TMP7:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i // CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i // CHECK: [[TMP9:%.*]] = cir.const(#cir.int<4294901760> : !u32i) : !u32i // CHECK: [[TMP10:%.*]] = cir.binop(and, [[TMP6]], [[TMP9]]) : !u32i // CHECK: [[TMP11:%.*]] = cir.binop(or, [[TMP10]], [[TMP8]]) : !u32i -// CHECK: cir.store [[TMP11]], [[TMP3]] : !u32i, cir.ptr +// CHECK: cir.store [[TMP11]], [[TMP3]] : !u32i, !cir.ptr // CHECK: [[TMP12:%.*]] = cir.cast(bitcast, [[TMP2]] : !cir.ptr), !cir.ptr // CHECK: [[TMP13:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i // CHECK: [[TMP14:%.*]] = cir.cast(integral, [[TMP13]] : !s32i), !u32i -// CHECK: [[TMP15:%.*]] = cir.load [[TMP12]] : cir.ptr , !u32i +// CHECK: [[TMP15:%.*]] = cir.load [[TMP12]] : !cir.ptr, !u32i // CHECK: [[TMP16:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i // CHECK: [[TMP17:%.*]] = cir.binop(and, [[TMP14]], [[TMP16]]) : !u32i // CHECK: [[TMP18:%.*]] = cir.const(#cir.int<16> : !u32i) : !u32i @@ -39,5 +39,5 @@ void foo(int x) { // CHECK: [[TMP20:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i // CHECK: [[TMP21:%.*]] = cir.binop(and, [[TMP15]], [[TMP20]]) : !u32i // CHECK: [[TMP22:%.*]] = cir.binop(or, [[TMP21]], [[TMP19]]) : !u32i -// CHECK: cir.store [[TMP22]], [[TMP12]] : !u32i, cir.ptr +// CHECK: cir.store [[TMP22]], [[TMP12]] : !u32i, !cir.ptr // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index 23f1b496bd0f..ef252f4fa3e5 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -50,29 +50,29 @@ void m() { } // CHECK: cir.func @_Z1mv() -// CHECK: cir.alloca !ty_22yolm22, cir.ptr , ["q"] {alignment = 4 : i64} -// CHECK: cir.alloca !ty_22yolm222, cir.ptr , ["q2"] {alignment = 8 : i64} -// CHECK: cir.alloca !ty_22yolm322, cir.ptr , ["q3"] {alignment = 4 : i64} +// CHECK: cir.alloca !ty_22yolm22, !cir.ptr, ["q"] {alignment = 4 : i64} +// CHECK: cir.alloca !ty_22yolm222, !cir.ptr, ["q2"] {alignment = 8 : i64} +// CHECK: cir.alloca !ty_22yolm322, !cir.ptr, ["q3"] {alignment = 4 : i64} void shouldGenerateUnionAccess(union U u) { u.b = true; // CHECK: %[[#BASE:]] = cir.get_member %0[0] {name = "b"} : !cir.ptr -> !cir.ptr - // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.bool, cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.bool, !cir.ptr u.b; // CHECK: cir.get_member %0[0] {name = "b"} : !cir.ptr -> !cir.ptr u.i = 1; // CHECK: %[[#BASE:]] = cir.get_member %0[2] {name = "i"} : !cir.ptr -> !cir.ptr - // CHECK: cir.store %{{.+}}, %[[#BASE]] : !s32i, cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !s32i, !cir.ptr u.i; // CHECK: %[[#BASE:]] = cir.get_member %0[2] {name = "i"} : !cir.ptr -> !cir.ptr u.f = 0.1F; // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr - // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.float, cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.float, !cir.ptr u.f; // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr u.d = 0.1; // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr - // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.double, cir.ptr + // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.double, !cir.ptr u.d; // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr } @@ -84,8 +84,8 @@ typedef union { void noCrushOnDifferentSizes() { A a = {0}; - // CHECK: %[[#TMP0:]] = cir.alloca !ty_22A22, cir.ptr , ["a"] {alignment = 4 : i64} + // CHECK: %[[#TMP0:]] = cir.alloca !ty_22A22, !cir.ptr, ["a"] {alignment = 4 : i64} // CHECK: %[[#TMP1:]] = cir.cast(bitcast, %[[#TMP0]] : !cir.ptr), !cir.ptr // CHECK: %[[#TMP2:]] = cir.const(#cir.zero : !ty_anon_struct) : !ty_anon_struct - // CHECK: cir.store %[[#TMP2]], %[[#TMP1]] : !ty_anon_struct, cir.ptr + // CHECK: cir.store %[[#TMP2]], %[[#TMP1]] : !ty_anon_struct, !cir.ptr } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp index 8464ce3173e2..2ff9dfceacae 100644 --- a/clang/test/CIR/CodeGen/vector.cpp +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -12,16 +12,16 @@ namespace std { } // namespace std // CHECK: cir.func linkonce_odr @_ZNSt6vectorIyE6resizeEm( -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !u64i, cir.ptr , ["__sz", init] {alignment = 8 : i64} -// CHECK: %2 = cir.alloca !u64i, cir.ptr , ["__cs", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: cir.store %arg1, %1 : !u64i, cir.ptr -// CHECK: %3 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !u64i, !cir.ptr, ["__sz", init] {alignment = 8 : i64} +// CHECK: %2 = cir.alloca !u64i, !cir.ptr, ["__cs", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg1, %1 : !u64i, !cir.ptr +// CHECK: %3 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %4 = cir.call @_ZNKSt6vectorIyE4sizeEv(%3) : (!cir.ptr) -> !u64i -// CHECK: cir.store %4, %2 : !u64i, cir.ptr +// CHECK: cir.store %4, %2 : !u64i, !cir.ptr // CHECK: cir.scope { -// CHECK: %5 = cir.load %2 : cir.ptr , !u64i +// CHECK: %5 = cir.load %2 : !cir.ptr, !u64i // CHECK: %6 = cir.cast(int_to_bool, %5 : !u64i), !cir.bool // CHECK: cir.if %6 { // CHECK: } diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index b774bfe7a457..f802e8d5fa57 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -39,9 +39,9 @@ void vector_int_test(int x) { // Insert element a[x] = x; - // CHECK: %[[#LOADEDVI:]] = cir.load %[[#STORAGEVI:]] : cir.ptr >, !cir.vector + // CHECK: %[[#LOADEDVI:]] = cir.load %[[#STORAGEVI:]] : !cir.ptr>, !cir.vector // CHECK: %[[#UPDATEDVI:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVI]][%{{[0-9]+}} : !s32i] : !cir.vector - // CHECK: cir.store %[[#UPDATEDVI]], %[[#STORAGEVI]] : !cir.vector, cir.ptr > + // CHECK: cir.store %[[#UPDATEDVI]], %[[#STORAGEVI]] : !cir.vector, !cir.ptr> // Binary arithmetic operations vi4 d = a + b; @@ -121,9 +121,9 @@ void vector_double_test(int x, double y) { // Insert element a[x] = y; - // CHECK: %[[#LOADEDVF:]] = cir.load %[[#STORAGEVF:]] : cir.ptr >, !cir.vector + // CHECK: %[[#LOADEDVF:]] = cir.load %[[#STORAGEVF:]] : !cir.ptr>, !cir.vector // CHECK: %[[#UPDATEDVF:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVF]][%{{[0-9]+}} : !s32i] : !cir.vector - // CHECK: cir.store %[[#UPDATEDVF]], %[[#STORAGEVF]] : !cir.vector, cir.ptr > + // CHECK: cir.store %[[#UPDATEDVF]], %[[#STORAGEVF]] : !cir.vector, !cir.ptr> // Binary arithmetic operations vd2 d = a + b; diff --git a/clang/test/CIR/CodeGen/vla.c b/clang/test/CIR/CodeGen/vla.c index 687d264987db..d54c1300bd8d 100644 --- a/clang/test/CIR/CodeGen/vla.c +++ b/clang/test/CIR/CodeGen/vla.c @@ -1,15 +1,15 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s // CHECK: cir.func @f0(%arg0: !s32i -// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, cir.ptr , ["len", init] {alignment = 4 : i64} -// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, cir.ptr >, ["saved_stack"] {alignment = 8 : i64} -// CHECK: cir.store %arg0, [[TMP0]] : !s32i, cir.ptr -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr , !s32i +// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, !cir.ptr, ["len", init] {alignment = 4 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["saved_stack"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !s32i, !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr, !s32i // CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u64i // CHECK: [[TMP4:%.*]] = cir.stack_save : !cir.ptr -// CHECK: cir.store [[TMP4]], [[TMP1]] : !cir.ptr, cir.ptr > -// CHECK: [[TMP5:%.*]] = cir.alloca !s32i, cir.ptr , [[TMP3]] : !u64i, ["vla"] {alignment = 16 : i64} -// CHECK: [[TMP6:%.*]] = cir.load [[TMP1]] : cir.ptr >, !cir.ptr +// CHECK: cir.store [[TMP4]], [[TMP1]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP5:%.*]] = cir.alloca !s32i, !cir.ptr, [[TMP3]] : !u64i, ["vla"] {alignment = 16 : i64} +// CHECK: [[TMP6:%.*]] = cir.load [[TMP1]] : !cir.ptr>, !cir.ptr // CHECK: cir.stack_restore [[TMP6]] : !cir.ptr void f0(int len) { int a[len]; diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 996e305d8796..3aa8ee7826a4 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -40,14 +40,14 @@ class B : public A // CHECK: cir.func linkonce_odr @_ZN1BC2Ev(%arg0: !cir.ptr // RTTI_DISABLED: cir.func linkonce_odr @_ZN1BC2Ev(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr // CHECK: cir.call @_ZN1AC2Ev(%2) : (!cir.ptr) -> () -// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : cir.ptr >> +// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : !cir.ptr>> // CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> -// CHECK: cir.store %3, %4 : !cir.ptr>>, cir.ptr >>> +// CHECK: cir.store %3, %4 : !cir.ptr>>, !cir.ptr>>> // CHECK: cir.return // CHECK: } @@ -55,9 +55,9 @@ class B : public A // // CHECK: cir.func @_Z3foov() // CHECK: cir.scope { -// CHECK: %0 = cir.alloca !ty_22B22, cir.ptr , ["agg.tmp.ensured"] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !ty_22B22, !cir.ptr, ["agg.tmp.ensured"] {alignment = 8 : i64} // CHECK: %1 = cir.const(#cir.zero : ![[ClassB]]) : ![[ClassB]] -// CHECK: cir.store %1, %0 : ![[ClassB]], cir.ptr +// CHECK: cir.store %1, %0 : ![[ClassB]], !cir.ptr // CHECK: cir.call @_ZN1BC2Ev(%0) : (!cir.ptr) -> () // CHECK: } // CHECK: cir.return @@ -70,12 +70,12 @@ class B : public A // Calls @A::A() and initialize __vptr with address of A's vtable // // CHECK: cir.func linkonce_odr @_ZN1AC2Ev(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, cir.ptr > -// CHECK: %1 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : cir.ptr >> +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : !cir.ptr>> // CHECK: %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> -// CHECK: cir.store %2, %3 : !cir.ptr>>, cir.ptr >>> +// CHECK: cir.store %2, %3 : !cir.ptr>>, !cir.ptr>>> // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/IR/aliases.cir b/clang/test/CIR/IR/aliases.cir index 8d6cbd04c7a2..6d2fd8190464 100644 --- a/clang/test/CIR/IR/aliases.cir +++ b/clang/test/CIR/IR/aliases.cir @@ -4,12 +4,12 @@ module { // CHECK: @testAnonRecordsAlias cir.func @testAnonRecordsAlias() { - // CHECK: cir.alloca !ty_anon_struct, cir.ptr - %0 = cir.alloca !cir.struct}>, cir.ptr }>>, ["A"] - // CHECK: cir.alloca !ty_anon_struct1, cir.ptr - %1 = cir.alloca !cir.struct}>, cir.ptr }>>, ["B"] - // CHECK: cir.alloca !ty_anon_union, cir.ptr - %2 = cir.alloca !cir.struct}>, cir.ptr }>>, ["C"] + // CHECK: cir.alloca !ty_anon_struct, !cir.ptr + %0 = cir.alloca !cir.struct}>, !cir.ptr}>>, ["A"] + // CHECK: cir.alloca !ty_anon_struct1, !cir.ptr + %1 = cir.alloca !cir.struct}>, !cir.ptr}>>, ["B"] + // CHECK: cir.alloca !ty_anon_union, !cir.ptr + %2 = cir.alloca !cir.struct}>, !cir.ptr}>>, ["C"] cir.return } } diff --git a/clang/test/CIR/IR/alloca.cir b/clang/test/CIR/IR/alloca.cir index 71293f6a0948..3c5b7ab6036e 100644 --- a/clang/test/CIR/IR/alloca.cir +++ b/clang/test/CIR/IR/alloca.cir @@ -6,7 +6,7 @@ module { cir.func @foo(%arg0: !s32i) { - %0 = cir.alloca !s32i, cir.ptr , %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} + %0 = cir.alloca !s32i, !cir.ptr, %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} cir.return } } @@ -14,7 +14,7 @@ module { //CHECK: module { //CHECK-NEXT: cir.func @foo(%arg0: !s32i) { -//CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} +//CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} //CHECK-NEXT: cir.return //CHECK-NEXT: } diff --git a/clang/test/CIR/IR/array.cir b/clang/test/CIR/IR/array.cir index 6653cdbfbe2e..b69439924a41 100644 --- a/clang/test/CIR/IR/array.cir +++ b/clang/test/CIR/IR/array.cir @@ -4,10 +4,10 @@ module { cir.func @arrays() { - %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] + %0 = cir.alloca !cir.array, !cir.ptr>, ["x", init] cir.return } } // CHECK: cir.func @arrays() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] +// CHECK-NEXT: %0 = cir.alloca !cir.array, !cir.ptr>, ["x", init] diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index e8b5989fd8ad..be4bffbe7fe1 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -3,7 +3,7 @@ module { cir.func @yolo(%arg0 : !s32i) { - %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] + %0 = cir.alloca !cir.array, !cir.ptr>, ["x", init] %a = cir.cast (int_to_bool, %arg0 : !s32i), !cir.bool %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 97d58223b1db..24abc26e66d6 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -7,50 +7,50 @@ module { cir.func @foo(%arg0: !s32i) -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["x", init] - cir.store %arg0, %0 : !s32i, cir.ptr - %1 = cir.load %0 : cir.ptr , !s32i + %0 = cir.alloca !s32i, !cir.ptr, ["x", init] + cir.store %arg0, %0 : !s32i, !cir.ptr + %1 = cir.load %0 : !cir.ptr, !s32i cir.return %1 : !s32i } cir.func @f3() -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["x", init] + %0 = cir.alloca !s32i, !cir.ptr, ["x", init] %1 = cir.const(#cir.int<3> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr - %2 = cir.load %0 : cir.ptr , !s32i + cir.store %1, %0 : !s32i, !cir.ptr + %2 = cir.load %0 : !cir.ptr, !s32i cir.return %2 : !s32i } cir.func @if0(%arg0: !s32i) -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} - cir.store %arg0, %1 : !s32i, cir.ptr + %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + cir.store %arg0, %1 : !s32i, !cir.ptr %2 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.store %2, %0 : !s32i, cir.ptr - %3 = cir.load %1 : cir.ptr , !s32i + cir.store %2, %0 : !s32i, !cir.ptr + %3 = cir.load %1 : !cir.ptr, !s32i %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool cir.if %4 { %6 = cir.const(#cir.int<3> : !s32i) : !s32i - cir.store %6, %0 : !s32i, cir.ptr + cir.store %6, %0 : !s32i, !cir.ptr } else { %6 = cir.const(#cir.int<4> : !s32i) : !s32i - cir.store %6, %0 : !s32i, cir.ptr + cir.store %6, %0 : !s32i, !cir.ptr } - %5 = cir.load %0 : cir.ptr , !s32i + %5 = cir.load %0 : !cir.ptr, !s32i cir.return %5 : !s32i } cir.func @s0() { - %0 = cir.alloca !s32i, cir.ptr , ["x"] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["x"] {alignment = 4 : i64} cir.scope { - %1 = cir.alloca !s32i, cir.ptr , ["y"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["y"] {alignment = 4 : i64} } cir.return } cir.func @os() { - %0 = cir.alloca !cir.ptr, cir.ptr >, ["m", init] {alignment = 8 : i64} - %3 = cir.load %0 : cir.ptr >, !cir.ptr + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["m", init] {alignment = 8 : i64} + %3 = cir.load %0 : !cir.ptr>, !cir.ptr %4 = cir.objsize(%3 : , max) -> !u64i %5 = cir.objsize(%3 : , min) -> !u64i cir.return @@ -60,17 +60,17 @@ module { // CHECK: module { // CHECK-NEXT: cir.func @foo(%arg0: !s32i) -> !s32i { -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["x", init] -// CHECK-NEXT: cir.store %arg0, %0 : !s32i, cir.ptr -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["x", init] +// CHECK-NEXT: cir.store %arg0, %0 : !s32i, !cir.ptr +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %1 : !s32i // CHECK-NEXT: } // CHECK-NEXT: cir.func @f3() -> !s32i { -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["x", init] +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["x", init] // CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK-NEXT: cir.store %1, %0 : !s32i, cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : cir.ptr , !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %2 : !s32i // CHECK-NEXT: } @@ -78,21 +78,21 @@ module { // CHECK: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.if %4 { // CHECK-NEXT: %6 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr // CHECK-NEXT: } else { // CHECK-NEXT: %6 = cir.const(#cir.int<4> : !s32i) : !s32i -// CHECK-NEXT: cir.store %6, %0 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr // CHECK-NEXT: } // CHECK: cir.func @s0() { -// CHECK-NEXT: %0 = cir.alloca !s32i, cir.ptr , ["x"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["x"] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %1 = cir.alloca !s32i, cir.ptr , ["y"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["y"] {alignment = 4 : i64} // CHECK-NEXT: } // CHECK: cir.func @os() { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, cir.ptr >, ["m", init] {alignment = 8 : i64} -// CHECK-NEXT: %1 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["m", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: %2 = cir.objsize(%1 : , max) -> !u64i // CHECK-NEXT: %3 = cir.objsize(%1 : , min) -> !u64i // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/IR/exceptions.cir b/clang/test/CIR/IR/exceptions.cir index aa93eea43559..f74a0a7ce1e4 100644 --- a/clang/test/CIR/IR/exceptions.cir +++ b/clang/test/CIR/IR/exceptions.cir @@ -11,10 +11,10 @@ module { cir.func @foo(%x : !s32i, %y : !s32i) -> !cir.ptr { %11 = cir.scope { %10 = cir.try { - %0 = cir.alloca !cir.ptr, cir.ptr >, ["exception_info"] {alignment = 16 : i64} + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["exception_info"] {alignment = 16 : i64} %d = cir.try_call exception(%0) @div(%x, %y) : (!s32i, !s32i) -> !s32i // CHECK: cir.try_call exception(%2) @div(%arg0, %arg1) : (!cir.ptr>, !s32i, !s32i) -> !s32i - %1 = cir.load %0 : cir.ptr >, !cir.ptr + %1 = cir.load %0 : !cir.ptr>, !cir.ptr cir.yield %1 : !cir.ptr } : () -> !cir.ptr cir.yield %10 : !cir.ptr diff --git a/clang/test/CIR/IR/func.cir b/clang/test/CIR/IR/func.cir index 01f6b54877c8..1ebee93ba2b1 100644 --- a/clang/test/CIR/IR/func.cir +++ b/clang/test/CIR/IR/func.cir @@ -24,7 +24,7 @@ module { // Should parse custom assembly format. cir.func @parse_func_type() -> () { - %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["fn", init] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr>, !cir.ptr>>, ["fn", init] {alignment = 8 : i64} cir.return } diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index a9a5e1e5809c..05fa0bca4a22 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -14,7 +14,7 @@ module { cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global external @s = #cir.global_view<@".str2"> : !cir.ptr cir.func @use_global() { - %0 = cir.get_global @a : cir.ptr + %0 = cir.get_global @a : !cir.ptr cir.return } cir.global external @table = #cir.global_view<@s> : !cir.ptr @@ -35,11 +35,11 @@ module { cir.func private @_ZN4InitC1Eb(!cir.ptr, !s8i) cir.func private @_ZN4InitD1Ev(!cir.ptr) cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { - %0 = cir.get_global @_ZL8__ioinit : cir.ptr + %0 = cir.get_global @_ZL8__ioinit : !cir.ptr %1 = cir.const(#cir.int<3> : !s8i) : !s8i cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () } dtor { - %0 = cir.get_global @_ZL8__ioinit : cir.ptr + %0 = cir.get_global @_ZL8__ioinit : !cir.ptr cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () } @@ -66,7 +66,7 @@ module { cir.global "private" external tls_dyn @batata : !s32i cir.func @f35() { - %0 = cir.get_global thread_local @batata : cir.ptr + %0 = cir.get_global thread_local @batata : !cir.ptr cir.return } } @@ -81,10 +81,10 @@ module { // CHECK: cir.func @use_global() -// CHECK-NEXT: %0 = cir.get_global @a : cir.ptr +// CHECK-NEXT: %0 = cir.get_global @a : !cir.ptr // CHECK: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { -// CHECK-NEXT: %0 = cir.get_global @_ZL8__ioinit : cir.ptr +// CHECK-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s8i) : !s8i // CHECK-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () // CHECK-NEXT: } @@ -101,6 +101,6 @@ module { // CHECK: cir.global "private" external tls_dyn @batata : !s32i // CHECK: cir.func @f35() { -// CHECK: %0 = cir.get_global thread_local @batata : cir.ptr +// CHECK: %0 = cir.get_global thread_local @batata : !cir.ptr // CHECK: cir.return // CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 1522a7202f8d..2a78139200e6 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -132,7 +132,7 @@ cir.func @cast2(%p: !cir.ptr) { !u32i = !cir.int cir.func @cast3(%p: !cir.ptr) { - %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] + %0 = cir.alloca !cir.array, !cir.ptr>, ["x", init] %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // expected-error {{requires same type for array element and pointee result}} cir.return } @@ -397,11 +397,11 @@ cir.func @vec_extract_non_int_idx() { !s32i = !cir.int !u32i = !cir.int cir.func @vec_extract_bad_type() { - %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<0> : !s32i) : !s32i %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : !cir.vector %3 = cir.vec.extract %2[%1 : !s32i] : !cir.vector // expected-note {{prior use here}} - cir.store %3, %0 : !u32i, cir.ptr // expected-error {{use of value '%3' expects different type than prior uses: '!cir.int' vs '!cir.int'}} + cir.store %3, %0 : !u32i, !cir.ptr // expected-error {{use of value '%3' expects different type than prior uses: '!cir.int' vs '!cir.int'}} cir.return } @@ -727,7 +727,7 @@ module { module { cir.func private @_ZN4InitC1Eb(!cir.ptr) cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { - %0 = cir.get_global @_ZL8__ioinit : cir.ptr + %0 = cir.get_global @_ZL8__ioinit : !cir.ptr cir.call @_ZN4InitC1Eb(%0) : (!cir.ptr) -> () } dtor {} // expected-error@+1 {{custom op 'cir.global' dtor region must have exactly one block}} @@ -741,9 +741,9 @@ module { cir.global "private" constant external @_ZTIPKc : !cir.ptr cir.func @_Z8divisionii() { %11 = cir.alloc_exception(!cir.ptr) -> > - %12 = cir.get_global @".str" : cir.ptr > + %12 = cir.get_global @".str" : !cir.ptr> %13 = cir.cast(array_to_ptrdecay, %12 : !cir.ptr>), !cir.ptr - cir.store %13, %11 : !cir.ptr, cir.ptr > + cir.store %13, %11 : !cir.ptr, !cir.ptr> cir.throw(%11 : !cir.ptr>) // expected-error {{'type_info' symbol attribute missing}} } } @@ -801,7 +801,7 @@ module { module { cir.func @tmp(%arg0: !cir.float) { // expected-error@+1 {{operand #0 must be primitive int}} - %0 = cir.alloca !s32i, cir.ptr , %arg0 : !cir.float, ["tmp"] + %0 = cir.alloca !s32i, !cir.ptr, %arg0 : !cir.float, ["tmp"] cir.return } } @@ -861,7 +861,7 @@ cir.func @const_type_mismatch() -> () { module { cir.func @invalid_base_type(%arg0 : !cir.data_member) { - %0 = cir.alloca !u32i, cir.ptr , ["tmp"] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["tmp"] {alignment = 4 : i64} // expected-error@+1 {{'cir.get_runtime_member' op operand #0 must be !cir.struct*}} %1 = cir.get_runtime_member %0[%arg0 : !cir.data_member] : !cir.ptr -> !cir.ptr cir.return @@ -877,7 +877,7 @@ module { module { cir.func @invalid_base_type(%arg0 : !cir.data_member) { - %0 = cir.alloca !struct2, cir.ptr , ["tmp"] {alignment = 4 : i64} + %0 = cir.alloca !struct2, !cir.ptr, ["tmp"] {alignment = 4 : i64} // expected-error@+1 {{record type does not match the member pointer type}} %1 = cir.get_runtime_member %0[%arg0 : !cir.data_member] : !cir.ptr -> !cir.ptr cir.return @@ -892,7 +892,7 @@ module { module { cir.func @invalid_base_type(%arg0 : !cir.data_member) { - %0 = cir.alloca !struct1, cir.ptr , ["tmp"] {alignment = 4 : i64} + %0 = cir.alloca !struct1, !cir.ptr, ["tmp"] {alignment = 4 : i64} // expected-error@+1 {{result type does not match the member pointer type}} %1 = cir.get_runtime_member %0[%arg0 : !cir.data_member] : !cir.ptr -> !cir.ptr cir.return @@ -1064,7 +1064,7 @@ module { cir.global "private" external @batata : !s32i cir.func @f35() { // expected-error@+1 {{access to global not marked thread local}} - %0 = cir.get_global thread_local @batata : cir.ptr + %0 = cir.get_global thread_local @batata : !cir.ptr cir.return } } diff --git a/clang/test/CIR/IR/ptr_stride.cir b/clang/test/CIR/IR/ptr_stride.cir index 826ed571c3cb..750e5764a72f 100644 --- a/clang/test/CIR/IR/ptr_stride.cir +++ b/clang/test/CIR/IR/ptr_stride.cir @@ -3,7 +3,7 @@ module { cir.func @arraysubscript(%arg0: !s32i) { - %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] + %0 = cir.alloca !cir.array, !cir.ptr>, ["x", init] %1 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr %3 = cir.const(#cir.int<0> : !s32i) : !s32i @@ -13,7 +13,7 @@ module { } // CHECK: cir.func @arraysubscript(%arg0: !s32i) { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] +// CHECK-NEXT: %0 = cir.alloca !cir.array, !cir.ptr>, ["x", init] // CHECK-NEXT: %1 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // CHECK-NEXT: %3 = cir.const(#cir.int<0> : !s32i) : !s32i diff --git a/clang/test/CIR/IR/scope.cir b/clang/test/CIR/IR/scope.cir index 0cc45c8e389b..f756355be0a0 100644 --- a/clang/test/CIR/IR/scope.cir +++ b/clang/test/CIR/IR/scope.cir @@ -15,7 +15,7 @@ module { // Should properly print/parse scope with explicit yield. cir.func @explicit_yield() { %0 = cir.scope { - %1 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} cir.yield %1 : !cir.ptr } : !cir.ptr // CHECK: %0 = cir.scope { diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index 65a319538d1a..b44b0d3eeee3 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -24,14 +24,14 @@ module { } cir.func @structs() { - %0 = cir.alloca !cir.ptr>, cir.ptr >>, ["s", init] - %1 = cir.alloca !cir.ptr>, cir.ptr >>, ["i", init] + %0 = cir.alloca !cir.ptr>, !cir.ptr>>, ["s", init] + %1 = cir.alloca !cir.ptr>, !cir.ptr>>, ["i", init] cir.return } // CHECK: cir.func @structs() { -// CHECK: %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] -// CHECK: %1 = cir.alloca !cir.ptr, cir.ptr >, ["i", init] +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["i", init] cir.func @shouldSuccessfullyParseConstStructAttrs() { %0 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22S122) : !ty_22S122 diff --git a/clang/test/CIR/IR/try.cir b/clang/test/CIR/IR/try.cir index 30a516e422e0..2eb186e65f04 100644 --- a/clang/test/CIR/IR/try.cir +++ b/clang/test/CIR/IR/try.cir @@ -12,9 +12,9 @@ module { cir.func @foo(%x : !s32i, %y : !s32i) -> !cir.ptr { %11 = cir.scope { %10 = cir.scope { - %0 = cir.alloca !cir.ptr, cir.ptr >, ["exception_info"] {alignment = 16 : i64} + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["exception_info"] {alignment = 16 : i64} %d = cir.try_call exception(%0) @div(%x, %y) : (!s32i, !s32i) -> !s32i - %1 = cir.load %0 : cir.ptr >, !cir.ptr + %1 = cir.load %0 : !cir.ptr>, !cir.ptr cir.yield %1 : !cir.ptr } : !cir.ptr cir.yield %10 : !cir.ptr diff --git a/clang/test/CIR/IR/types.cir b/clang/test/CIR/IR/types.cir index 6653cdbfbe2e..b69439924a41 100644 --- a/clang/test/CIR/IR/types.cir +++ b/clang/test/CIR/IR/types.cir @@ -4,10 +4,10 @@ module { cir.func @arrays() { - %0 = cir.alloca !cir.array, cir.ptr>, ["x", init] + %0 = cir.alloca !cir.array, !cir.ptr>, ["x", init] cir.return } } // CHECK: cir.func @arrays() { -// CHECK-NEXT: %0 = cir.alloca !cir.array, cir.ptr >, ["x", init] +// CHECK-NEXT: %0 = cir.alloca !cir.array, !cir.ptr>, ["x", init] diff --git a/clang/test/CIR/Lowering/ThroughMLIR/array.cir b/clang/test/CIR/Lowering/ThroughMLIR/array.cir index 1a7e15531fd8..dc1eb97c80b3 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/array.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/array.cir @@ -4,7 +4,7 @@ !s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} + %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} cir.return } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir index 790d50d5510d..59db7ccb7959 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir @@ -3,44 +3,44 @@ module { cir.func @foo() { - %0 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} - %1 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} - %2 = cir.alloca !cir.float, cir.ptr , ["y", init] {alignment = 4 : i64} - %3 = cir.alloca !cir.double, cir.ptr , ["e"] {alignment = 8 : i64} - %4 = cir.alloca !cir.double, cir.ptr , ["f"] {alignment = 8 : i64} - %5 = cir.alloca !cir.double, cir.ptr , ["g", init] {alignment = 8 : i64} - %6 = cir.load %0 : cir.ptr , !cir.float - %7 = cir.load %1 : cir.ptr , !cir.float + %0 = cir.alloca !cir.float, !cir.ptr, ["c"] {alignment = 4 : i64} + %1 = cir.alloca !cir.float, !cir.ptr, ["d"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, !cir.ptr, ["y", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.double, !cir.ptr, ["e"] {alignment = 8 : i64} + %4 = cir.alloca !cir.double, !cir.ptr, ["f"] {alignment = 8 : i64} + %5 = cir.alloca !cir.double, !cir.ptr, ["g", init] {alignment = 8 : i64} + %6 = cir.load %0 : !cir.ptr, !cir.float + %7 = cir.load %1 : !cir.ptr, !cir.float %8 = cir.binop(mul, %6, %7) : !cir.float - cir.store %8, %2 : !cir.float, cir.ptr - %9 = cir.load %2 : cir.ptr , !cir.float - %10 = cir.load %1 : cir.ptr , !cir.float + cir.store %8, %2 : !cir.float, !cir.ptr + %9 = cir.load %2 : !cir.ptr, !cir.float + %10 = cir.load %1 : !cir.ptr, !cir.float %11 = cir.binop(div, %9, %10) : !cir.float - cir.store %11, %2 : !cir.float, cir.ptr - %12 = cir.load %2 : cir.ptr , !cir.float - %13 = cir.load %1 : cir.ptr , !cir.float + cir.store %11, %2 : !cir.float, !cir.ptr + %12 = cir.load %2 : !cir.ptr, !cir.float + %13 = cir.load %1 : !cir.ptr, !cir.float %14 = cir.binop(add, %12, %13) : !cir.float - cir.store %14, %2 : !cir.float, cir.ptr - %15 = cir.load %2 : cir.ptr , !cir.float - %16 = cir.load %1 : cir.ptr , !cir.float + cir.store %14, %2 : !cir.float, !cir.ptr + %15 = cir.load %2 : !cir.ptr, !cir.float + %16 = cir.load %1 : !cir.ptr, !cir.float %17 = cir.binop(sub, %15, %16) : !cir.float - cir.store %17, %2 : !cir.float, cir.ptr - %18 = cir.load %3 : cir.ptr , !cir.double - %19 = cir.load %4 : cir.ptr , !cir.double + cir.store %17, %2 : !cir.float, !cir.ptr + %18 = cir.load %3 : !cir.ptr, !cir.double + %19 = cir.load %4 : !cir.ptr, !cir.double %20 = cir.binop(add, %18, %19) : !cir.double - cir.store %20, %5 : !cir.double, cir.ptr - %21 = cir.load %3 : cir.ptr , !cir.double - %22 = cir.load %4 : cir.ptr , !cir.double + cir.store %20, %5 : !cir.double, !cir.ptr + %21 = cir.load %3 : !cir.ptr, !cir.double + %22 = cir.load %4 : !cir.ptr, !cir.double %23 = cir.binop(sub, %21, %22) : !cir.double - cir.store %23, %5 : !cir.double, cir.ptr - %24 = cir.load %3 : cir.ptr , !cir.double - %25 = cir.load %4 : cir.ptr , !cir.double + cir.store %23, %5 : !cir.double, !cir.ptr + %24 = cir.load %3 : !cir.ptr, !cir.double + %25 = cir.load %4 : !cir.ptr, !cir.double %26 = cir.binop(mul, %24, %25) : !cir.double - cir.store %26, %5 : !cir.double, cir.ptr - %27 = cir.load %3 : cir.ptr , !cir.double - %28 = cir.load %4 : cir.ptr , !cir.double + cir.store %26, %5 : !cir.double, !cir.ptr + %27 = cir.load %3 : !cir.ptr, !cir.double + %28 = cir.load %4 : !cir.ptr, !cir.double %29 = cir.binop(div, %27, %28) : !cir.double - cir.store %29, %5 : !cir.double, cir.ptr + cir.store %29, %5 : !cir.double, !cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir index 51c89f564efa..d9e3c36f80ef 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir @@ -4,53 +4,53 @@ module { cir.func @foo() { - %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<2> : !u32i) : !u32i cir.store %3, %0 : !u32i, cir.ptr - %4 = cir.const(#cir.int<1> : !u32i) : !u32i cir.store %4, %1 : !u32i, cir.ptr - %5 = cir.load %0 : cir.ptr , !u32i - %6 = cir.load %1 : cir.ptr , !u32i + %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<2> : !u32i) : !u32i cir.store %3, %0 : !u32i, !cir.ptr + %4 = cir.const(#cir.int<1> : !u32i) : !u32i cir.store %4, %1 : !u32i, !cir.ptr + %5 = cir.load %0 : !cir.ptr, !u32i + %6 = cir.load %1 : !cir.ptr, !u32i %7 = cir.binop(mul, %5, %6) : !u32i - cir.store %7, %2 : !u32i, cir.ptr - %8 = cir.load %2 : cir.ptr , !u32i - %9 = cir.load %1 : cir.ptr , !u32i + cir.store %7, %2 : !u32i, !cir.ptr + %8 = cir.load %2 : !cir.ptr, !u32i + %9 = cir.load %1 : !cir.ptr, !u32i %10 = cir.binop(div, %8, %9) : !u32i - cir.store %10, %2 : !u32i, cir.ptr - %11 = cir.load %2 : cir.ptr , !u32i - %12 = cir.load %1 : cir.ptr , !u32i + cir.store %10, %2 : !u32i, !cir.ptr + %11 = cir.load %2 : !cir.ptr, !u32i + %12 = cir.load %1 : !cir.ptr, !u32i %13 = cir.binop(rem, %11, %12) : !u32i - cir.store %13, %2 : !u32i, cir.ptr - %14 = cir.load %2 : cir.ptr , !u32i - %15 = cir.load %1 : cir.ptr , !u32i + cir.store %13, %2 : !u32i, !cir.ptr + %14 = cir.load %2 : !cir.ptr, !u32i + %15 = cir.load %1 : !cir.ptr, !u32i %16 = cir.binop(add, %14, %15) : !u32i - cir.store %16, %2 : !u32i, cir.ptr - %17 = cir.load %2 : cir.ptr , !u32i - %18 = cir.load %1 : cir.ptr , !u32i + cir.store %16, %2 : !u32i, !cir.ptr + %17 = cir.load %2 : !cir.ptr, !u32i + %18 = cir.load %1 : !cir.ptr, !u32i %19 = cir.binop(sub, %17, %18) : !u32i - cir.store %19, %2 : !u32i, cir.ptr + cir.store %19, %2 : !u32i, !cir.ptr // should move to cir.shift, which only accepts // CIR types. - // %20 = cir.load %2 : cir.ptr , !u32i - // %21 = cir.load %1 : cir.ptr , !u32i + // %20 = cir.load %2 : !cir.ptr, !u32i + // %21 = cir.load %1 : !cir.ptr, !u32i // %22 = cir.binop(shr, %20, %21) : !u32i - // cir.store %22, %2 : !u32i, cir.ptr - // %23 = cir.load %2 : cir.ptr , !u32i - // %24 = cir.load %1 : cir.ptr , !u32i + // cir.store %22, %2 : !u32i, !cir.ptr + // %23 = cir.load %2 : !cir.ptr, !u32i + // %24 = cir.load %1 : !cir.ptr, !u32i // %25 = cir.binop(shl, %23, %24) : !u32i - // cir.store %25, %2 : !u32i, cir.ptr - %26 = cir.load %2 : cir.ptr , !u32i - %27 = cir.load %1 : cir.ptr , !u32i + // cir.store %25, %2 : !u32i, !cir.ptr + %26 = cir.load %2 : !cir.ptr, !u32i + %27 = cir.load %1 : !cir.ptr, !u32i %28 = cir.binop(and, %26, %27) : !u32i - cir.store %28, %2 : !u32i, cir.ptr - %29 = cir.load %2 : cir.ptr , !u32i - %30 = cir.load %1 : cir.ptr , !u32i + cir.store %28, %2 : !u32i, !cir.ptr + %29 = cir.load %2 : !cir.ptr, !u32i + %30 = cir.load %1 : !cir.ptr, !u32i %31 = cir.binop(xor, %29, %30) : !u32i - cir.store %31, %2 : !u32i, cir.ptr - %32 = cir.load %2 : cir.ptr , !u32i - %33 = cir.load %1 : cir.ptr , !u32i + cir.store %31, %2 : !u32i, !cir.ptr + %32 = cir.load %2 : !cir.ptr, !u32i + %33 = cir.load %1 : !cir.ptr, !u32i %34 = cir.binop(or, %32, %33) : !u32i - cir.store %34, %2 : !u32i, cir.ptr + cir.store %34, %2 : !u32i, !cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir index 2163f063d9e9..1b41d8b1d0f7 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir @@ -5,9 +5,9 @@ #true = #cir.bool : !cir.bool module { cir.func @foo() { - %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} + %0 = cir.alloca !cir.bool, !cir.ptr, ["a", init] {alignment = 1 : i64} %1 = cir.const(#true) : !cir.bool - cir.store %1, %0 : !cir.bool, cir.ptr + cir.store %1, %0 : !cir.bool, !cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir index 99eea2260c26..0efd41de816c 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir @@ -4,46 +4,46 @@ !s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca !s32i, cir.ptr , ["a"] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["b"] {alignment = 4 : i64} - %2 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} - %3 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} - %4 = cir.alloca !cir.bool, cir.ptr , ["e"] {alignment = 1 : i64} - %5 = cir.load %0 : cir.ptr , !s32i - %6 = cir.load %1 : cir.ptr , !s32i + %0 = cir.alloca !s32i, !cir.ptr, ["a"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["b"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, !cir.ptr, ["c"] {alignment = 4 : i64} + %3 = cir.alloca !cir.float, !cir.ptr, ["d"] {alignment = 4 : i64} + %4 = cir.alloca !cir.bool, !cir.ptr, ["e"] {alignment = 1 : i64} + %5 = cir.load %0 : !cir.ptr, !s32i + %6 = cir.load %1 : !cir.ptr, !s32i %7 = cir.cmp(gt, %5, %6) : !s32i, !cir.bool - %8 = cir.load %0 : cir.ptr , !s32i - %9 = cir.load %1 : cir.ptr , !s32i + %8 = cir.load %0 : !cir.ptr, !s32i + %9 = cir.load %1 : !cir.ptr, !s32i %10 = cir.cmp(eq, %8, %9) : !s32i, !cir.bool - %11 = cir.load %0 : cir.ptr , !s32i - %12 = cir.load %1 : cir.ptr , !s32i + %11 = cir.load %0 : !cir.ptr, !s32i + %12 = cir.load %1 : !cir.ptr, !s32i %13 = cir.cmp(lt, %11, %12) : !s32i, !cir.bool - %14 = cir.load %0 : cir.ptr , !s32i - %15 = cir.load %1 : cir.ptr , !s32i + %14 = cir.load %0 : !cir.ptr, !s32i + %15 = cir.load %1 : !cir.ptr, !s32i %16 = cir.cmp(ge, %14, %15) : !s32i, !cir.bool - %17 = cir.load %0 : cir.ptr , !s32i - %18 = cir.load %1 : cir.ptr , !s32i + %17 = cir.load %0 : !cir.ptr, !s32i + %18 = cir.load %1 : !cir.ptr, !s32i %19 = cir.cmp(ne, %17, %18) : !s32i, !cir.bool - %20 = cir.load %0 : cir.ptr , !s32i - %21 = cir.load %1 : cir.ptr , !s32i + %20 = cir.load %0 : !cir.ptr, !s32i + %21 = cir.load %1 : !cir.ptr, !s32i %22 = cir.cmp(le, %20, %21) : !s32i, !cir.bool - %23 = cir.load %2 : cir.ptr , !cir.float - %24 = cir.load %3 : cir.ptr , !cir.float + %23 = cir.load %2 : !cir.ptr, !cir.float + %24 = cir.load %3 : !cir.ptr, !cir.float %25 = cir.cmp(gt, %23, %24) : !cir.float, !cir.bool - %26 = cir.load %2 : cir.ptr , !cir.float - %27 = cir.load %3 : cir.ptr , !cir.float + %26 = cir.load %2 : !cir.ptr, !cir.float + %27 = cir.load %3 : !cir.ptr, !cir.float %28 = cir.cmp(eq, %26, %27) : !cir.float, !cir.bool - %29 = cir.load %2 : cir.ptr , !cir.float - %30 = cir.load %3 : cir.ptr , !cir.float + %29 = cir.load %2 : !cir.ptr, !cir.float + %30 = cir.load %3 : !cir.ptr, !cir.float %31 = cir.cmp(lt, %29, %30) : !cir.float, !cir.bool - %32 = cir.load %2 : cir.ptr , !cir.float - %33 = cir.load %3 : cir.ptr , !cir.float + %32 = cir.load %2 : !cir.ptr, !cir.float + %33 = cir.load %3 : !cir.ptr, !cir.float %34 = cir.cmp(ge, %32, %33) : !cir.float, !cir.bool - %35 = cir.load %2 : cir.ptr , !cir.float - %36 = cir.load %3 : cir.ptr , !cir.float + %35 = cir.load %2 : !cir.ptr, !cir.float + %36 = cir.load %3 : !cir.ptr, !cir.float %37 = cir.cmp(ne, %35, %36) : !cir.float, !cir.bool - %38 = cir.load %2 : cir.ptr , !cir.float - %39 = cir.load %3 : cir.ptr , !cir.float + %38 = cir.load %2 : !cir.ptr, !cir.float + %39 = cir.load %3 : !cir.ptr, !cir.float %40 = cir.cmp(le, %38, %39) : !cir.float, !cir.bool cir.return } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir index cd82f88d9e46..ff73eca3667f 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir @@ -5,14 +5,14 @@ !s32i = !cir.int module { cir.func @dot(%arg0: !cir.ptr) -> !s32i { - %0 = cir.alloca !cir.ptr, cir.ptr >, ["x", init] {alignment = 8 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} - %2 = cir.alloca !cir.ptr, cir.ptr >, ["y", init] {alignment = 8 : i64} - cir.store %arg0, %0 : !cir.ptr, cir.ptr > - %3 = cir.load %0 : cir.ptr >, !cir.ptr - cir.store %3, %2 : !cir.ptr, cir.ptr > + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["x", init] {alignment = 8 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + %2 = cir.alloca !cir.ptr, !cir.ptr>, ["y", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, !cir.ptr> + %3 = cir.load %0 : !cir.ptr>, !cir.ptr + cir.store %3, %2 : !cir.ptr, !cir.ptr> %4 = cir.const(#cir.int<0> : !s32i) : !s32i - %5 = cir.load %1 : cir.ptr , !s32i + %5 = cir.load %1 : !cir.ptr, !s32i cir.return %5 : !s32i } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/global.cir b/clang/test/CIR/Lowering/ThroughMLIR/global.cir index 3b1ed83239c6..4415c6409a0b 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/global.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/global.cir @@ -10,26 +10,26 @@ module { cir.global external @aa = #cir.zero : !cir.array x 256> cir.func @get_global_int_value() -> !u32i { - %0 = cir.get_global @i : cir.ptr - %1 = cir.load %0 : cir.ptr , !u32i + %0 = cir.get_global @i : !cir.ptr + %1 = cir.load %0 : !cir.ptr, !u32i cir.return %1 : !u32i } cir.func @get_global_float_value() -> !cir.float { - %0 = cir.get_global @f : cir.ptr - %1 = cir.load %0 : cir.ptr , !cir.float + %0 = cir.get_global @f : !cir.ptr + %1 = cir.load %0 : !cir.ptr, !cir.float cir.return %1 : !cir.float } cir.func @get_global_bool_value() -> !cir.bool { - %0 = cir.get_global @b : cir.ptr - %1 = cir.load %0 : cir.ptr , !cir.bool + %0 = cir.get_global @b : !cir.ptr + %1 = cir.load %0 : !cir.ptr, !cir.bool cir.return %1 : !cir.bool } cir.func @get_global_array_pointer() -> !cir.ptr> { - %0 = cir.get_global @a : cir.ptr > + %0 = cir.get_global @a : !cir.ptr> cir.return %0 : !cir.ptr> } cir.func @get_global_multi_array_pointer() -> !cir.ptr x 256>> { - %0 = cir.get_global @aa : cir.ptr x 256>> + %0 = cir.get_global @aa : !cir.ptr x 256>> cir.return %0 : !cir.ptr x 256>> } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir index 9cc9cc45b65f..170366d55af9 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir @@ -4,21 +4,21 @@ !u32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !u32i) : !u32i - cir.store %1, %0 : !u32i, cir.ptr + cir.store %1, %0 : !u32i, !cir.ptr cir.br ^bb2 ^bb1: // no predecessors - %2 = cir.load %0 : cir.ptr , !u32i + %2 = cir.load %0 : !cir.ptr, !u32i %3 = cir.const(#cir.int<1> : !u32i) : !u32i %4 = cir.binop(add, %2, %3) : !u32i - cir.store %4, %0 : !u32i, cir.ptr + cir.store %4, %0 : !u32i, !cir.ptr cir.br ^bb2 ^bb2: // 2 preds: ^bb0, ^bb1 - %5 = cir.load %0 : cir.ptr , !u32i + %5 = cir.load %0 : !cir.ptr, !u32i %6 = cir.const(#cir.int<2> : !u32i) : !u32i %7 = cir.binop(add, %5, %6) : !u32i - cir.store %7, %0 : !u32i, cir.ptr + cir.store %7, %0 : !u32i, !cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir index ad338992806b..8aabe4cd0ffa 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir @@ -4,10 +4,10 @@ !u32i = !cir.int module { cir.func @foo() -> !u32i { - %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !u32i) : !u32i - cir.store %1, %0 : !u32i, cir.ptr - %2 = cir.load %0 : cir.ptr , !u32i + cir.store %1, %0 : !u32i, !cir.ptr + %2 = cir.load %0 : !cir.ptr, !u32i cir.return %2 : !u32i } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/scope.cir b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir index 4ebd7749a72f..779363e21b2f 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/scope.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir @@ -7,9 +7,9 @@ module { cir.func @foo() { cir.scope { - %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<4> : !u32i) : !u32i - cir.store %1, %0 : !u32i, cir.ptr + cir.store %1, %0 : !u32i, !cir.ptr } cir.return } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir index df6e6a09a5ff..e1cae6aa9bd4 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir @@ -6,10 +6,10 @@ module { cir.func @_Z1xi(%arg0: !s32i) -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr - %2 = cir.load %0 : cir.ptr , !s32i + %0 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<0> : !s32i) : !s32i %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool %5 = cir.ternary(%4, true { @@ -19,8 +19,8 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { %7 = cir.const(#cir.int<5> : !s32i) : !s32i cir.yield %7 : !s32i }) : (!cir.bool) -> !s32i - cir.store %5, %1 : !s32i, cir.ptr - %6 = cir.load %1 : cir.ptr , !s32i + cir.store %5, %1 : !s32i, !cir.ptr + %6 = cir.load %1 : !cir.ptr, !s32i cir.return %6 : !s32i } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir index 45368fb48f40..13fef83e435a 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir @@ -4,19 +4,19 @@ !s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %2 = cir.const(#cir.int<2> : !s32i) : !s32i - cir.store %2, %0 : !s32i, cir.ptr - cir.store %2, %1 : !s32i, cir.ptr + cir.store %2, %0 : !s32i, !cir.ptr + cir.store %2, %1 : !s32i, !cir.ptr - %3 = cir.load %0 : cir.ptr , !s32i + %3 = cir.load %0 : !cir.ptr, !s32i %4 = cir.unary(inc, %3) : !s32i, !s32i - cir.store %4, %0 : !s32i, cir.ptr + cir.store %4, %0 : !s32i, !cir.ptr - %5 = cir.load %1 : cir.ptr , !s32i + %5 = cir.load %1 : !cir.ptr, !s32i %6 = cir.unary(dec, %5) : !s32i, !s32i - cir.store %6, %1 : !s32i, cir.ptr + cir.store %6, %1 : !s32i, !cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir index 013bc65e95e3..c013be35dfe7 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir @@ -4,19 +4,19 @@ !s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %2 = cir.const(#cir.int<2> : !s32i) : !s32i - cir.store %2, %0 : !s32i, cir.ptr - cir.store %2, %1 : !s32i, cir.ptr + cir.store %2, %0 : !s32i, !cir.ptr + cir.store %2, %1 : !s32i, !cir.ptr - %3 = cir.load %0 : cir.ptr , !s32i + %3 = cir.load %0 : !cir.ptr, !s32i %4 = cir.unary(plus, %3) : !s32i, !s32i - cir.store %4, %0 : !s32i, cir.ptr + cir.store %4, %0 : !s32i, !cir.ptr - %5 = cir.load %1 : cir.ptr , !s32i + %5 = cir.load %1 : !cir.ptr, !s32i %6 = cir.unary(minus, %5) : !s32i, !s32i - cir.store %6, %1 : !s32i, cir.ptr + cir.store %6, %1 : !s32i, !cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/alloca.cir b/clang/test/CIR/Lowering/alloca.cir index 4c512a762068..33da38e9e69e 100644 --- a/clang/test/CIR/Lowering/alloca.cir +++ b/clang/test/CIR/Lowering/alloca.cir @@ -4,7 +4,7 @@ module { cir.func @foo(%arg0: !s32i) { - %0 = cir.alloca !s32i, cir.ptr , %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} + %0 = cir.alloca !s32i, !cir.ptr, %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} cir.return } } diff --git a/clang/test/CIR/Lowering/array.cir b/clang/test/CIR/Lowering/array.cir index 56f4fd3a6331..278225761d33 100644 --- a/clang/test/CIR/Lowering/array.cir +++ b/clang/test/CIR/Lowering/array.cir @@ -6,7 +6,7 @@ module { cir.func @foo() { - %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} + %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} cir.return } diff --git a/clang/test/CIR/Lowering/asm.cir b/clang/test/CIR/Lowering/asm.cir index 3aa753fbb91f..3ba57ac17b2c 100644 --- a/clang/test/CIR/Lowering/asm.cir +++ b/clang/test/CIR/Lowering/asm.cir @@ -5,8 +5,8 @@ module { cir.func @simple(%arg0: !s32i) { - %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr + %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr cir.asm(x86_att, out = [], diff --git a/clang/test/CIR/Lowering/binop-fp.cir b/clang/test/CIR/Lowering/binop-fp.cir index cb08205231e5..dfda6e91cb51 100644 --- a/clang/test/CIR/Lowering/binop-fp.cir +++ b/clang/test/CIR/Lowering/binop-fp.cir @@ -3,44 +3,44 @@ module { cir.func @foo() { - %0 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} - %1 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} - %2 = cir.alloca !cir.float, cir.ptr , ["y", init] {alignment = 4 : i64} - %3 = cir.alloca !cir.double, cir.ptr , ["e"] {alignment = 8 : i64} - %4 = cir.alloca !cir.double, cir.ptr , ["f"] {alignment = 8 : i64} - %5 = cir.alloca !cir.double, cir.ptr , ["g", init] {alignment = 8 : i64} - %6 = cir.load %0 : cir.ptr , !cir.float - %7 = cir.load %1 : cir.ptr , !cir.float + %0 = cir.alloca !cir.float, !cir.ptr, ["c"] {alignment = 4 : i64} + %1 = cir.alloca !cir.float, !cir.ptr, ["d"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, !cir.ptr, ["y", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.double, !cir.ptr, ["e"] {alignment = 8 : i64} + %4 = cir.alloca !cir.double, !cir.ptr, ["f"] {alignment = 8 : i64} + %5 = cir.alloca !cir.double, !cir.ptr, ["g", init] {alignment = 8 : i64} + %6 = cir.load %0 : !cir.ptr, !cir.float + %7 = cir.load %1 : !cir.ptr, !cir.float %8 = cir.binop(mul, %6, %7) : !cir.float - cir.store %8, %2 : !cir.float, cir.ptr - %9 = cir.load %2 : cir.ptr , !cir.float - %10 = cir.load %1 : cir.ptr , !cir.float + cir.store %8, %2 : !cir.float, !cir.ptr + %9 = cir.load %2 : !cir.ptr, !cir.float + %10 = cir.load %1 : !cir.ptr, !cir.float %11 = cir.binop(div, %9, %10) : !cir.float - cir.store %11, %2 : !cir.float, cir.ptr - %12 = cir.load %2 : cir.ptr , !cir.float - %13 = cir.load %1 : cir.ptr , !cir.float + cir.store %11, %2 : !cir.float, !cir.ptr + %12 = cir.load %2 : !cir.ptr, !cir.float + %13 = cir.load %1 : !cir.ptr, !cir.float %14 = cir.binop(add, %12, %13) : !cir.float - cir.store %14, %2 : !cir.float, cir.ptr - %15 = cir.load %2 : cir.ptr , !cir.float - %16 = cir.load %1 : cir.ptr , !cir.float + cir.store %14, %2 : !cir.float, !cir.ptr + %15 = cir.load %2 : !cir.ptr, !cir.float + %16 = cir.load %1 : !cir.ptr, !cir.float %17 = cir.binop(sub, %15, %16) : !cir.float - cir.store %17, %2 : !cir.float, cir.ptr - %18 = cir.load %3 : cir.ptr , !cir.double - %19 = cir.load %4 : cir.ptr , !cir.double + cir.store %17, %2 : !cir.float, !cir.ptr + %18 = cir.load %3 : !cir.ptr, !cir.double + %19 = cir.load %4 : !cir.ptr, !cir.double %20 = cir.binop(add, %18, %19) : !cir.double - cir.store %20, %5 : !cir.double, cir.ptr - %21 = cir.load %3 : cir.ptr , !cir.double - %22 = cir.load %4 : cir.ptr , !cir.double + cir.store %20, %5 : !cir.double, !cir.ptr + %21 = cir.load %3 : !cir.ptr, !cir.double + %22 = cir.load %4 : !cir.ptr, !cir.double %23 = cir.binop(sub, %21, %22) : !cir.double - cir.store %23, %5 : !cir.double, cir.ptr - %24 = cir.load %3 : cir.ptr , !cir.double - %25 = cir.load %4 : cir.ptr , !cir.double + cir.store %23, %5 : !cir.double, !cir.ptr + %24 = cir.load %3 : !cir.ptr, !cir.double + %25 = cir.load %4 : !cir.ptr, !cir.double %26 = cir.binop(mul, %24, %25) : !cir.double - cir.store %26, %5 : !cir.double, cir.ptr - %27 = cir.load %3 : cir.ptr , !cir.double - %28 = cir.load %4 : cir.ptr , !cir.double + cir.store %26, %5 : !cir.double, !cir.ptr + %27 = cir.load %3 : !cir.ptr, !cir.double + %28 = cir.load %4 : !cir.ptr, !cir.double %29 = cir.binop(div, %27, %28) : !cir.double - cir.store %29, %5 : !cir.double, cir.ptr + cir.store %29, %5 : !cir.double, !cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/binop-signed-int.cir b/clang/test/CIR/Lowering/binop-signed-int.cir index 855cd8cfbe92..8ce730ad5234 100644 --- a/clang/test/CIR/Lowering/binop-signed-int.cir +++ b/clang/test/CIR/Lowering/binop-signed-int.cir @@ -4,61 +4,61 @@ !s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<2> : !s32i) : !s32i cir.store %3, %0 : !s32i, cir.ptr - %4 = cir.const(#cir.int<1> : !s32i) : !s32i cir.store %4, %1 : !s32i, cir.ptr - %5 = cir.load %0 : cir.ptr , !s32i - %6 = cir.load %1 : cir.ptr , !s32i + %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<2> : !s32i) : !s32i cir.store %3, %0 : !s32i, !cir.ptr + %4 = cir.const(#cir.int<1> : !s32i) : !s32i cir.store %4, %1 : !s32i, !cir.ptr + %5 = cir.load %0 : !cir.ptr, !s32i + %6 = cir.load %1 : !cir.ptr, !s32i %7 = cir.binop(mul, %5, %6) : !s32i // CHECK: = llvm.mul - cir.store %7, %2 : !s32i, cir.ptr - %8 = cir.load %2 : cir.ptr , !s32i - %9 = cir.load %1 : cir.ptr , !s32i + cir.store %7, %2 : !s32i, !cir.ptr + %8 = cir.load %2 : !cir.ptr, !s32i + %9 = cir.load %1 : !cir.ptr, !s32i %10 = cir.binop(div, %8, %9) : !s32i // CHECK: = llvm.sdiv - cir.store %10, %2 : !s32i, cir.ptr - %11 = cir.load %2 : cir.ptr , !s32i - %12 = cir.load %1 : cir.ptr , !s32i + cir.store %10, %2 : !s32i, !cir.ptr + %11 = cir.load %2 : !cir.ptr, !s32i + %12 = cir.load %1 : !cir.ptr, !s32i %13 = cir.binop(rem, %11, %12) : !s32i // CHECK: = llvm.srem - cir.store %13, %2 : !s32i, cir.ptr - %14 = cir.load %2 : cir.ptr , !s32i - %15 = cir.load %1 : cir.ptr , !s32i + cir.store %13, %2 : !s32i, !cir.ptr + %14 = cir.load %2 : !cir.ptr, !s32i + %15 = cir.load %1 : !cir.ptr, !s32i %16 = cir.binop(add, %14, %15) : !s32i // CHECK: = llvm.add - cir.store %16, %2 : !s32i, cir.ptr - %17 = cir.load %2 : cir.ptr , !s32i - %18 = cir.load %1 : cir.ptr , !s32i + cir.store %16, %2 : !s32i, !cir.ptr + %17 = cir.load %2 : !cir.ptr, !s32i + %18 = cir.load %1 : !cir.ptr, !s32i %19 = cir.binop(sub, %17, %18) : !s32i // CHECK: = llvm.sub - cir.store %19, %2 : !s32i, cir.ptr - %20 = cir.load %2 : cir.ptr , !s32i - %21 = cir.load %1 : cir.ptr , !s32i + cir.store %19, %2 : !s32i, !cir.ptr + %20 = cir.load %2 : !cir.ptr, !s32i + %21 = cir.load %1 : !cir.ptr, !s32i %22 = cir.shift(right, %20 : !s32i, %21 : !s32i) -> !s32i // CHECK: = llvm.ashr - cir.store %22, %2 : !s32i, cir.ptr - %23 = cir.load %2 : cir.ptr , !s32i - %24 = cir.load %1 : cir.ptr , !s32i + cir.store %22, %2 : !s32i, !cir.ptr + %23 = cir.load %2 : !cir.ptr, !s32i + %24 = cir.load %1 : !cir.ptr, !s32i %25 = cir.shift(left, %23 : !s32i, %24 : !s32i) -> !s32i // CHECK: = llvm.shl - cir.store %25, %2 : !s32i, cir.ptr - %26 = cir.load %2 : cir.ptr , !s32i - %27 = cir.load %1 : cir.ptr , !s32i + cir.store %25, %2 : !s32i, !cir.ptr + %26 = cir.load %2 : !cir.ptr, !s32i + %27 = cir.load %1 : !cir.ptr, !s32i %28 = cir.binop(and, %26, %27) : !s32i // CHECK: = llvm.and - cir.store %28, %2 : !s32i, cir.ptr - %29 = cir.load %2 : cir.ptr , !s32i - %30 = cir.load %1 : cir.ptr , !s32i + cir.store %28, %2 : !s32i, !cir.ptr + %29 = cir.load %2 : !cir.ptr, !s32i + %30 = cir.load %1 : !cir.ptr, !s32i %31 = cir.binop(xor, %29, %30) : !s32i // CHECK: = llvm.xor - cir.store %31, %2 : !s32i, cir.ptr - %32 = cir.load %2 : cir.ptr , !s32i - %33 = cir.load %1 : cir.ptr , !s32i + cir.store %31, %2 : !s32i, !cir.ptr + %32 = cir.load %2 : !cir.ptr, !s32i + %33 = cir.load %1 : !cir.ptr, !s32i %34 = cir.binop(or, %32, %33) : !s32i // CHECK: = llvm.or - cir.store %34, %2 : !s32i, cir.ptr + cir.store %34, %2 : !s32i, !cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir index 29076c52f51f..53066225857a 100644 --- a/clang/test/CIR/Lowering/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -4,51 +4,51 @@ module { cir.func @foo() { - %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} - %2 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<2> : !u32i) : !u32i cir.store %3, %0 : !u32i, cir.ptr - %4 = cir.const(#cir.int<1> : !u32i) : !u32i cir.store %4, %1 : !u32i, cir.ptr - %5 = cir.load %0 : cir.ptr , !u32i - %6 = cir.load %1 : cir.ptr , !u32i + %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %3 = cir.const(#cir.int<2> : !u32i) : !u32i cir.store %3, %0 : !u32i, !cir.ptr + %4 = cir.const(#cir.int<1> : !u32i) : !u32i cir.store %4, %1 : !u32i, !cir.ptr + %5 = cir.load %0 : !cir.ptr, !u32i + %6 = cir.load %1 : !cir.ptr, !u32i %7 = cir.binop(mul, %5, %6) : !u32i - cir.store %7, %2 : !u32i, cir.ptr - %8 = cir.load %2 : cir.ptr , !u32i - %9 = cir.load %1 : cir.ptr , !u32i + cir.store %7, %2 : !u32i, !cir.ptr + %8 = cir.load %2 : !cir.ptr, !u32i + %9 = cir.load %1 : !cir.ptr, !u32i %10 = cir.binop(div, %8, %9) : !u32i - cir.store %10, %2 : !u32i, cir.ptr - %11 = cir.load %2 : cir.ptr , !u32i - %12 = cir.load %1 : cir.ptr , !u32i + cir.store %10, %2 : !u32i, !cir.ptr + %11 = cir.load %2 : !cir.ptr, !u32i + %12 = cir.load %1 : !cir.ptr, !u32i %13 = cir.binop(rem, %11, %12) : !u32i - cir.store %13, %2 : !u32i, cir.ptr - %14 = cir.load %2 : cir.ptr , !u32i - %15 = cir.load %1 : cir.ptr , !u32i + cir.store %13, %2 : !u32i, !cir.ptr + %14 = cir.load %2 : !cir.ptr, !u32i + %15 = cir.load %1 : !cir.ptr, !u32i %16 = cir.binop(add, %14, %15) : !u32i - cir.store %16, %2 : !u32i, cir.ptr - %17 = cir.load %2 : cir.ptr , !u32i - %18 = cir.load %1 : cir.ptr , !u32i + cir.store %16, %2 : !u32i, !cir.ptr + %17 = cir.load %2 : !cir.ptr, !u32i + %18 = cir.load %1 : !cir.ptr, !u32i %19 = cir.binop(sub, %17, %18) : !u32i - cir.store %19, %2 : !u32i, cir.ptr - %20 = cir.load %2 : cir.ptr , !u32i - %21 = cir.load %1 : cir.ptr , !u32i + cir.store %19, %2 : !u32i, !cir.ptr + %20 = cir.load %2 : !cir.ptr, !u32i + %21 = cir.load %1 : !cir.ptr, !u32i %22 = cir.shift(right, %20 : !u32i, %21 : !u32i) -> !u32i - cir.store %22, %2 : !u32i, cir.ptr - %23 = cir.load %2 : cir.ptr , !u32i - %24 = cir.load %1 : cir.ptr , !u32i + cir.store %22, %2 : !u32i, !cir.ptr + %23 = cir.load %2 : !cir.ptr, !u32i + %24 = cir.load %1 : !cir.ptr, !u32i %25 = cir.shift(left, %23 : !u32i, %24 : !u32i) -> !u32i - cir.store %25, %2 : !u32i, cir.ptr - %26 = cir.load %2 : cir.ptr , !u32i - %27 = cir.load %1 : cir.ptr , !u32i + cir.store %25, %2 : !u32i, !cir.ptr + %26 = cir.load %2 : !cir.ptr, !u32i + %27 = cir.load %1 : !cir.ptr, !u32i %28 = cir.binop(and, %26, %27) : !u32i - cir.store %28, %2 : !u32i, cir.ptr - %29 = cir.load %2 : cir.ptr , !u32i - %30 = cir.load %1 : cir.ptr , !u32i + cir.store %28, %2 : !u32i, !cir.ptr + %29 = cir.load %2 : !cir.ptr, !u32i + %30 = cir.load %1 : !cir.ptr, !u32i %31 = cir.binop(xor, %29, %30) : !u32i - cir.store %31, %2 : !u32i, cir.ptr - %32 = cir.load %2 : cir.ptr , !u32i - %33 = cir.load %1 : cir.ptr , !u32i + cir.store %31, %2 : !u32i, !cir.ptr + %32 = cir.load %2 : !cir.ptr, !u32i + %33 = cir.load %1 : !cir.ptr, !u32i %34 = cir.binop(or, %32, %33) : !u32i - cir.store %34, %2 : !u32i, cir.ptr + cir.store %34, %2 : !u32i, !cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index 34175667ec39..fb94f8135a4b 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -11,8 +11,8 @@ module { cir.func @foo() { %1 = cir.const(#true) : !cir.bool - %0 = cir.alloca !cir.bool, cir.ptr , ["a", init] {alignment = 1 : i64} - cir.store %1, %0 : !cir.bool, cir.ptr + %0 = cir.alloca !cir.bool, !cir.ptr, ["a", init] {alignment = 1 : i64} + cir.store %1, %0 : !cir.bool, !cir.ptr cir.return } // MLIR: llvm.func @foo() diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 60ad48e4a644..f6ec6a94be91 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -12,40 +12,40 @@ module { cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i, %arg2: !cir.float, %arg3: !cir.double) -> !s32i { // CHECK: llvm.func @cStyleCasts - %0 = cir.alloca !u32i, cir.ptr , ["x1", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["x2", init] {alignment = 4 : i64} - %20 = cir.alloca !s16i, cir.ptr , ["x4", init] {alignment = 2 : i64} - %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} - %3 = cir.alloca !s8i, cir.ptr , ["a", init] {alignment = 1 : i64} - %4 = cir.alloca !s16i, cir.ptr , ["b", init] {alignment = 2 : i64} - %5 = cir.alloca !s64i, cir.ptr , ["c", init] {alignment = 8 : i64} - %6 = cir.alloca !s64i, cir.ptr , ["d", init] {alignment = 8 : i64} - %7 = cir.alloca !cir.array, cir.ptr >, ["arr"] {alignment = 4 : i64} - %8 = cir.alloca !cir.ptr, cir.ptr >, ["e", init] {alignment = 8 : i64} - cir.store %arg0, %0 : !u32i, cir.ptr - cir.store %arg1, %1 : !s32i, cir.ptr + %0 = cir.alloca !u32i, !cir.ptr, ["x1", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["x2", init] {alignment = 4 : i64} + %20 = cir.alloca !s16i, !cir.ptr, ["x4", init] {alignment = 2 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + %3 = cir.alloca !s8i, !cir.ptr, ["a", init] {alignment = 1 : i64} + %4 = cir.alloca !s16i, !cir.ptr, ["b", init] {alignment = 2 : i64} + %5 = cir.alloca !s64i, !cir.ptr, ["c", init] {alignment = 8 : i64} + %6 = cir.alloca !s64i, !cir.ptr, ["d", init] {alignment = 8 : i64} + %7 = cir.alloca !cir.array, !cir.ptr>, ["arr"] {alignment = 4 : i64} + %8 = cir.alloca !cir.ptr, !cir.ptr>, ["e", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !u32i, !cir.ptr + cir.store %arg1, %1 : !s32i, !cir.ptr // Integer casts. - %9 = cir.load %0 : cir.ptr , !u32i + %9 = cir.load %0 : !cir.ptr, !u32i %10 = cir.cast(integral, %9 : !u32i), !s8i // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i8 - cir.store %10, %3 : !s8i, cir.ptr - %11 = cir.load %1 : cir.ptr , !s32i + cir.store %10, %3 : !s8i, !cir.ptr + %11 = cir.load %1 : !cir.ptr, !s32i %12 = cir.cast(integral, %11 : !s32i), !s16i // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i16 - cir.store %12, %4 : !s16i, cir.ptr - %13 = cir.load %0 : cir.ptr , !u32i + cir.store %12, %4 : !s16i, !cir.ptr + %13 = cir.load %0 : !cir.ptr, !u32i %14 = cir.cast(integral, %13 : !u32i), !s64i // CHECK: %{{[0-9]+}} = llvm.zext %{{[0-9]+}} : i32 to i64 - cir.store %14, %5 : !s64i, cir.ptr - %15 = cir.load %1 : cir.ptr , !s32i + cir.store %14, %5 : !s64i, !cir.ptr + %15 = cir.load %1 : !cir.ptr, !s32i %16 = cir.cast(integral, %15 : !s32i), !s64i // CHECK: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 %30 = cir.cast(integral, %arg1 : !s32i), !u32i // Should not produce a cast. %32 = cir.cast(integral, %arg0 : !u32i), !s32i // Should not produce a cast. - %21 = cir.load %20 : cir.ptr , !s16i + %21 = cir.load %20 : !cir.ptr, !s16i %22 = cir.cast(integral, %21 : !s16i), !u64i // CHECK: %[[TMP:[0-9]+]] = llvm.sext %{{[0-9]+}} : i16 to i64 %33 = cir.cast(int_to_bool, %arg1 : !s32i), !cir.bool @@ -54,9 +54,9 @@ module { // CHECK: %{{.+}} = llvm.zext %[[#CMP]] : i1 to i8 // Pointer casts. - cir.store %16, %6 : !s64i, cir.ptr + cir.store %16, %6 : !s64i, !cir.ptr %17 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr - cir.store %17, %8 : !cir.ptr, cir.ptr > + cir.store %17, %8 : !cir.ptr, !cir.ptr> // CHECK: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, i32 %23 = cir.cast(int_to_ptr, %22 : !u64i), !cir.ptr // CHECK: %[[TMP2:[0-9]+]] = llvm.inttoptr %[[TMP]] : i64 to !llvm.ptr @@ -77,23 +77,23 @@ module { // CHECK: %{{.+}} = llvm.fptrunc %{{.+}} : f64 to f32 %34 = cir.cast(floating, %arg3 : !cir.double), !cir.float - cir.store %18, %2 : !s32i, cir.ptr - %19 = cir.load %2 : cir.ptr , !s32i + cir.store %18, %2 : !s32i, !cir.ptr + %19 = cir.load %2 : !cir.ptr, !s32i cir.return %19 : !s32i } cir.func @testBoolToIntCast(%arg0: !cir.bool) { // CHECK: llvm.func @testBoolToIntCast - %0 = cir.alloca !cir.bool, cir.ptr , ["bl", init] {alignment = 1 : i64} - %1 = cir.alloca !u8i, cir.ptr , ["y", init] {alignment = 1 : i64} - cir.store %arg0, %0 : !cir.bool, cir.ptr + %0 = cir.alloca !cir.bool, !cir.ptr, ["bl", init] {alignment = 1 : i64} + %1 = cir.alloca !u8i, !cir.ptr, ["y", init] {alignment = 1 : i64} + cir.store %arg0, %0 : !cir.bool, !cir.ptr - %2 = cir.load %0 : cir.ptr , !cir.bool + %2 = cir.load %0 : !cir.ptr, !cir.bool %3 = cir.cast(bool_to_int, %2 : !cir.bool), !u8i // CHECK: %[[LOAD_BOOL:.*]] = llvm.load %{{.*}} : !llvm.ptr -> i8 // CHECK: %{{.*}} = llvm.bitcast %[[LOAD_BOOL]] : i8 to i8 - cir.store %3, %1 : !u8i, cir.ptr + cir.store %3, %1 : !u8i, !cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/class.cir b/clang/test/CIR/Lowering/class.cir index 6a390485eec5..cc5046be57ac 100644 --- a/clang/test/CIR/Lowering/class.cir +++ b/clang/test/CIR/Lowering/class.cir @@ -12,7 +12,7 @@ module { cir.func @test() { - %1 = cir.alloca !ty_22S22, cir.ptr , ["x"] {alignment = 4 : i64} + %1 = cir.alloca !ty_22S22, !cir.ptr, ["x"] {alignment = 4 : i64} // CHECK: %[[#ARRSIZE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#CLASS:]] = llvm.alloca %[[#ARRSIZE]] x !llvm.struct<"class.S", (i8, i32)> %3 = cir.get_member %1[0] {name = "c"} : !cir.ptr -> !cir.ptr @@ -23,9 +23,9 @@ module { } cir.func @shouldConstInitLocalClassesWithConstStructAttr() { - %0 = cir.alloca !ty_22S2A22, cir.ptr , ["s"] {alignment = 4 : i64} + %0 = cir.alloca !ty_22S2A22, !cir.ptr, ["s"] {alignment = 4 : i64} %1 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22) : !ty_22S2A22 - cir.store %1, %0 : !ty_22S2A22, cir.ptr + cir.store %1, %0 : !ty_22S2A22, !cir.ptr cir.return } // CHECK: llvm.func @shouldConstInitLocalClassesWithConstStructAttr() @@ -82,10 +82,10 @@ module { cir.func @shouldLowerClassCopies() { // CHECK: llvm.func @shouldLowerClassCopies() - %1 = cir.alloca !ty_22S22, cir.ptr , ["a"] {alignment = 4 : i64} + %1 = cir.alloca !ty_22S22, !cir.ptr, ["a"] {alignment = 4 : i64} // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#SA:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"class.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr - %2 = cir.alloca !ty_22S22, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !ty_22S22, !cir.ptr, ["b", init] {alignment = 4 : i64} // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#SB:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"class.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr cir.copy %1 to %2 : !cir.ptr diff --git a/clang/test/CIR/Lowering/cmp.cir b/clang/test/CIR/Lowering/cmp.cir index 06dd60ff5453..a905e8490b20 100644 --- a/clang/test/CIR/Lowering/cmp.cir +++ b/clang/test/CIR/Lowering/cmp.cir @@ -4,57 +4,57 @@ !s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca !s32i, cir.ptr , ["a"] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["b"] {alignment = 4 : i64} - %2 = cir.alloca !cir.float, cir.ptr , ["c"] {alignment = 4 : i64} - %3 = cir.alloca !cir.float, cir.ptr , ["d"] {alignment = 4 : i64} - %4 = cir.alloca !cir.bool, cir.ptr , ["e"] {alignment = 1 : i64} - %5 = cir.load %0 : cir.ptr , !s32i - %6 = cir.load %1 : cir.ptr , !s32i + %0 = cir.alloca !s32i, !cir.ptr, ["a"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["b"] {alignment = 4 : i64} + %2 = cir.alloca !cir.float, !cir.ptr, ["c"] {alignment = 4 : i64} + %3 = cir.alloca !cir.float, !cir.ptr, ["d"] {alignment = 4 : i64} + %4 = cir.alloca !cir.bool, !cir.ptr, ["e"] {alignment = 1 : i64} + %5 = cir.load %0 : !cir.ptr, !s32i + %6 = cir.load %1 : !cir.ptr, !s32i %7 = cir.cmp(gt, %5, %6) : !s32i, !cir.bool // CHECK: llvm.icmp "sgt" - %8 = cir.load %0 : cir.ptr , !s32i - %9 = cir.load %1 : cir.ptr , !s32i + %8 = cir.load %0 : !cir.ptr, !s32i + %9 = cir.load %1 : !cir.ptr, !s32i %10 = cir.cmp(eq, %8, %9) : !s32i, !cir.bool // CHECK: llvm.icmp "eq" - %11 = cir.load %0 : cir.ptr , !s32i - %12 = cir.load %1 : cir.ptr , !s32i + %11 = cir.load %0 : !cir.ptr, !s32i + %12 = cir.load %1 : !cir.ptr, !s32i %13 = cir.cmp(lt, %11, %12) : !s32i, !cir.bool // CHECK: llvm.icmp "slt" - %14 = cir.load %0 : cir.ptr , !s32i - %15 = cir.load %1 : cir.ptr , !s32i + %14 = cir.load %0 : !cir.ptr, !s32i + %15 = cir.load %1 : !cir.ptr, !s32i %16 = cir.cmp(ge, %14, %15) : !s32i, !cir.bool // CHECK: llvm.icmp "sge" - %17 = cir.load %0 : cir.ptr , !s32i - %18 = cir.load %1 : cir.ptr , !s32i + %17 = cir.load %0 : !cir.ptr, !s32i + %18 = cir.load %1 : !cir.ptr, !s32i %19 = cir.cmp(ne, %17, %18) : !s32i, !cir.bool // CHECK: llvm.icmp "ne" - %20 = cir.load %0 : cir.ptr , !s32i - %21 = cir.load %1 : cir.ptr , !s32i + %20 = cir.load %0 : !cir.ptr, !s32i + %21 = cir.load %1 : !cir.ptr, !s32i %22 = cir.cmp(le, %20, %21) : !s32i, !cir.bool // CHECK: llvm.icmp "sle" - %23 = cir.load %2 : cir.ptr , !cir.float - %24 = cir.load %3 : cir.ptr , !cir.float + %23 = cir.load %2 : !cir.ptr, !cir.float + %24 = cir.load %3 : !cir.ptr, !cir.float %25 = cir.cmp(gt, %23, %24) : !cir.float, !cir.bool // CHECK: llvm.fcmp "ogt" - %26 = cir.load %2 : cir.ptr , !cir.float - %27 = cir.load %3 : cir.ptr , !cir.float + %26 = cir.load %2 : !cir.ptr, !cir.float + %27 = cir.load %3 : !cir.ptr, !cir.float %28 = cir.cmp(eq, %26, %27) : !cir.float, !cir.bool // CHECK: llvm.fcmp "oeq" - %29 = cir.load %2 : cir.ptr , !cir.float - %30 = cir.load %3 : cir.ptr , !cir.float + %29 = cir.load %2 : !cir.ptr, !cir.float + %30 = cir.load %3 : !cir.ptr, !cir.float %31 = cir.cmp(lt, %29, %30) : !cir.float, !cir.bool // CHECK: llvm.fcmp "olt" - %32 = cir.load %2 : cir.ptr , !cir.float - %33 = cir.load %3 : cir.ptr , !cir.float + %32 = cir.load %2 : !cir.ptr, !cir.float + %33 = cir.load %3 : !cir.ptr, !cir.float %34 = cir.cmp(ge, %32, %33) : !cir.float, !cir.bool // CHECK: llvm.fcmp "oge" - %35 = cir.load %2 : cir.ptr , !cir.float - %36 = cir.load %3 : cir.ptr , !cir.float + %35 = cir.load %2 : !cir.ptr, !cir.float + %36 = cir.load %3 : !cir.ptr, !cir.float %37 = cir.cmp(ne, %35, %36) : !cir.float, !cir.bool // CHECK: llvm.fcmp "une" - %38 = cir.load %2 : cir.ptr , !cir.float - %39 = cir.load %3 : cir.ptr , !cir.float + %38 = cir.load %2 : !cir.ptr, !cir.float + %39 = cir.load %3 : !cir.ptr, !cir.float %40 = cir.cmp(le, %38, %39) : !cir.float, !cir.bool // CHECK: llvm.fcmp "ole" diff --git a/clang/test/CIR/Lowering/const-array.cir b/clang/test/CIR/Lowering/const-array.cir index 7aff779a04fa..69917ddb3a36 100644 --- a/clang/test/CIR/Lowering/const-array.cir +++ b/clang/test/CIR/Lowering/const-array.cir @@ -7,7 +7,7 @@ module { // LLVM: @normal_url_char = internal global [4 x i8] c"\00\01\00\00" cir.func @c0() -> !cir.ptr> { - %0 = cir.get_global @normal_url_char : cir.ptr > + %0 = cir.get_global @normal_url_char : !cir.ptr> cir.return %0 : !cir.ptr> } // LLVM: define ptr @c0() diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 76ec616bed21..396006541a1c 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -36,9 +36,9 @@ module { // CHECK: llvm.return cir.func @testConstArrayOfStructs() { - %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 4 : i64} + %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 4 : i64} %1 = cir.const(#cir.const_array<[#cir.const_struct<{#cir.int<0> : !s32i, #cir.int<1> : !s32i}> : !ty_22anon2E122]> : !cir.array) : !cir.array - cir.store %1, %0 : !cir.array, cir.ptr > + cir.store %1, %0 : !cir.array, !cir.ptr> cir.return } // CHECK: llvm.func @testConstArrayOfStructs() @@ -55,9 +55,9 @@ module { // CHECK: llvm.return cir.func @testArrWithTrailingZeros() { - %0 = cir.alloca !cir.array, cir.ptr >, ["a"] {alignment = 16 : i64} + %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i], trailing_zeros> : !cir.array) : !cir.array - cir.store %1, %0 : !cir.array, cir.ptr > + cir.store %1, %0 : !cir.array, !cir.ptr> cir.return } // CHECK: llvm.func @testArrWithTrailingZeros() diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 5b7742fc1400..c43e52a971a8 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -4,50 +4,50 @@ !s32i = !cir.int module { cir.func @dot(%arg0: !cir.ptr, %arg1: !cir.ptr, %arg2: !s32i) -> !cir.double { - %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} - %1 = cir.alloca !cir.ptr, cir.ptr >, ["b", init] {alignment = 8 : i64} - %2 = cir.alloca !s32i, cir.ptr , ["size", init] {alignment = 4 : i64} - %3 = cir.alloca !cir.double, cir.ptr , ["__retval"] {alignment = 8 : i64} - %4 = cir.alloca !cir.double, cir.ptr , ["q", init] {alignment = 8 : i64} - cir.store %arg0, %0 : !cir.ptr, cir.ptr > - cir.store %arg1, %1 : !cir.ptr, cir.ptr > - cir.store %arg2, %2 : !s32i, cir.ptr + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["a", init] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr, !cir.ptr>, ["b", init] {alignment = 8 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["size", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.double, !cir.ptr, ["__retval"] {alignment = 8 : i64} + %4 = cir.alloca !cir.double, !cir.ptr, ["q", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, !cir.ptr> + cir.store %arg1, %1 : !cir.ptr, !cir.ptr> + cir.store %arg2, %2 : !s32i, !cir.ptr %5 = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double - cir.store %5, %4 : !cir.double, cir.ptr + cir.store %5, %4 : !cir.double, !cir.ptr cir.scope { - %8 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %8 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} %9 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.store %9, %8 : !s32i, cir.ptr + cir.store %9, %8 : !s32i, !cir.ptr cir.for : cond { - %10 = cir.load %8 : cir.ptr , !s32i - %11 = cir.load %2 : cir.ptr , !s32i + %10 = cir.load %8 : !cir.ptr, !s32i + %11 = cir.load %2 : !cir.ptr, !s32i %12 = cir.cmp(lt, %10, %11) : !s32i, !s32i %13 = cir.cast(int_to_bool, %12 : !s32i), !cir.bool cir.condition(%13) } body { - %10 = cir.load %0 : cir.ptr >, !cir.ptr - %11 = cir.load %8 : cir.ptr , !s32i + %10 = cir.load %0 : !cir.ptr>, !cir.ptr + %11 = cir.load %8 : !cir.ptr, !s32i %12 = cir.ptr_stride(%10 : !cir.ptr, %11 : !s32i), !cir.ptr - %13 = cir.load %12 : cir.ptr , !cir.double - %14 = cir.load %1 : cir.ptr >, !cir.ptr - %15 = cir.load %8 : cir.ptr , !s32i + %13 = cir.load %12 : !cir.ptr, !cir.double + %14 = cir.load %1 : !cir.ptr>, !cir.ptr + %15 = cir.load %8 : !cir.ptr, !s32i %16 = cir.ptr_stride(%14 : !cir.ptr, %15 : !s32i), !cir.ptr - %17 = cir.load %16 : cir.ptr , !cir.double + %17 = cir.load %16 : !cir.ptr, !cir.double %18 = cir.binop(mul, %13, %17) : !cir.double - %19 = cir.load %4 : cir.ptr , !cir.double + %19 = cir.load %4 : !cir.ptr, !cir.double %20 = cir.binop(add, %19, %18) : !cir.double - cir.store %20, %4 : !cir.double, cir.ptr + cir.store %20, %4 : !cir.double, !cir.ptr cir.yield } step { - %10 = cir.load %8 : cir.ptr , !s32i + %10 = cir.load %8 : !cir.ptr, !s32i %11 = cir.unary(inc, %10) : !s32i, !s32i - cir.store %11, %8 : !s32i, cir.ptr + cir.store %11, %8 : !s32i, !cir.ptr cir.yield } } - %6 = cir.load %4 : cir.ptr , !cir.double - cir.store %6, %3 : !cir.double, cir.ptr - %7 = cir.load %3 : cir.ptr , !cir.double + %6 = cir.load %4 : !cir.ptr, !cir.double + cir.store %6, %3 : !cir.double, !cir.ptr + %7 = cir.load %3 : !cir.ptr, !cir.double cir.return %7 : !cir.double } } diff --git a/clang/test/CIR/Lowering/func.cir b/clang/test/CIR/Lowering/func.cir index 6dcb7bdb42d0..76e6d4f0d181 100644 --- a/clang/test/CIR/Lowering/func.cir +++ b/clang/test/CIR/Lowering/func.cir @@ -6,7 +6,7 @@ module { cir.func no_proto private @noProto3(...) -> !s32i // MLIR: llvm.func @noProto3(...) -> i32 cir.func @test3(%arg0: !s32i) { - %3 = cir.get_global @noProto3 : cir.ptr > + %3 = cir.get_global @noProto3 : !cir.ptr> // MLIR: %[[#FN_PTR:]] = llvm.mlir.addressof @noProto3 : !llvm.ptr %4 = cir.cast(bitcast, %3 : !cir.ptr>), !cir.ptr> // MLIR: %[[#FUNC:]] = llvm.bitcast %[[#FN_PTR]] : !llvm.ptr to !llvm.ptr diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index dde8087fada6..594015c77467 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -44,35 +44,35 @@ module { cir.global external @s1 = #cir.global_view<@".str1"> : !cir.ptr cir.global external @s2 = #cir.global_view<@".str"> : !cir.ptr cir.func @_Z10use_globalv() { - %0 = cir.alloca !s32i, cir.ptr , ["li", init] {alignment = 4 : i64} - %1 = cir.get_global @a : cir.ptr - %2 = cir.load %1 : cir.ptr , !s32i - cir.store %2, %0 : !s32i, cir.ptr + %0 = cir.alloca !s32i, !cir.ptr, ["li", init] {alignment = 4 : i64} + %1 = cir.get_global @a : !cir.ptr + %2 = cir.load %1 : !cir.ptr, !s32i + cir.store %2, %0 : !s32i, !cir.ptr cir.return } cir.func @_Z17use_global_stringv() { - %0 = cir.alloca !u8i, cir.ptr , ["c", init] {alignment = 1 : i64} - %1 = cir.get_global @s2 : cir.ptr > - %2 = cir.load %1 : cir.ptr >, !cir.ptr + %0 = cir.alloca !u8i, !cir.ptr, ["c", init] {alignment = 1 : i64} + %1 = cir.get_global @s2 : !cir.ptr> + %2 = cir.load %1 : !cir.ptr>, !cir.ptr %3 = cir.const(#cir.int<0> : !s32i) : !s32i %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr - %5 = cir.load %4 : cir.ptr , !s8i + %5 = cir.load %4 : !cir.ptr, !s8i %6 = cir.cast(integral, %5 : !s8i), !u8i - cir.store %6, %0 : !u8i, cir.ptr + cir.store %6, %0 : !u8i, !cir.ptr cir.return } cir.func linkonce_odr @_Z4funcIiET_v() -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} %1 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr - %2 = cir.load %0 : cir.ptr , !s32i + cir.store %1, %0 : !s32i, !cir.ptr + %2 = cir.load %0 : !cir.ptr, !s32i cir.return %2 : !s32i } cir.func @_Z8use_funcv() -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} %1 = cir.call @_Z4funcIiET_v() : () -> !s32i - cir.store %1, %0 : !s32i, cir.ptr - %2 = cir.load %0 : cir.ptr , !s32i + cir.store %1, %0 : !s32i, !cir.ptr + %2 = cir.load %0 : !cir.ptr, !s32i cir.return %2 : !s32i } cir.global external @string = #cir.const_array<[#cir.int<119> : !s8i, #cir.int<104> : !s8i, #cir.int<97> : !s8i, #cir.int<116> : !s8i, #cir.int<110> : !s8i, #cir.int<111> : !s8i, #cir.int<119> : !s8i, #cir.int<0> : !s8i]> : !cir.array @@ -98,36 +98,36 @@ module { // LLVM: @nestedStringPtr = global %struct.StringStructPtr { ptr @.str } cir.func @_Z11get_globalsv() { - %0 = cir.alloca !cir.ptr, cir.ptr >, ["s", init] {alignment = 8 : i64} - %1 = cir.alloca !cir.ptr, cir.ptr >, ["u", init] {alignment = 8 : i64} - %2 = cir.alloca !cir.ptr, cir.ptr >, ["ss", init] {alignment = 8 : i64} - %3 = cir.alloca !cir.ptr, cir.ptr >, ["si", init] {alignment = 8 : i64} - %4 = cir.alloca !cir.ptr, cir.ptr >, ["l", init] {alignment = 8 : i64} - %5 = cir.get_global @string : cir.ptr > + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr, !cir.ptr>, ["u", init] {alignment = 8 : i64} + %2 = cir.alloca !cir.ptr, !cir.ptr>, ["ss", init] {alignment = 8 : i64} + %3 = cir.alloca !cir.ptr, !cir.ptr>, ["si", init] {alignment = 8 : i64} + %4 = cir.alloca !cir.ptr, !cir.ptr>, ["l", init] {alignment = 8 : i64} + %5 = cir.get_global @string : !cir.ptr> %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @string : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i8 - cir.store %6, %0 : !cir.ptr, cir.ptr > - %7 = cir.get_global @uint : cir.ptr > + cir.store %6, %0 : !cir.ptr, !cir.ptr> + %7 = cir.get_global @uint : !cir.ptr> %8 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @uint : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i32 - cir.store %8, %1 : !cir.ptr, cir.ptr > - %9 = cir.get_global @sshort : cir.ptr > + cir.store %8, %1 : !cir.ptr, !cir.ptr> + %9 = cir.get_global @sshort : !cir.ptr> %10 = cir.cast(array_to_ptrdecay, %9 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sshort : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i16 - cir.store %10, %2 : !cir.ptr, cir.ptr > - %11 = cir.get_global @sint : cir.ptr > + cir.store %10, %2 : !cir.ptr, !cir.ptr> + %11 = cir.get_global @sint : !cir.ptr> %12 = cir.cast(array_to_ptrdecay, %11 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @sint : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i32 - cir.store %12, %3 : !cir.ptr, cir.ptr > - %13 = cir.get_global @ll : cir.ptr > + cir.store %12, %3 : !cir.ptr, !cir.ptr> + %13 = cir.get_global @ll : !cir.ptr> %14 = cir.cast(array_to_ptrdecay, %13 : !cir.ptr>), !cir.ptr // MLIR: %[[RES:[0-9]+]] = llvm.mlir.addressof @ll : !llvm.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %[[RES]][0] : (!llvm.ptr) -> !llvm.ptr, i64 - cir.store %14, %4 : !cir.ptr, cir.ptr > + cir.store %14, %4 : !cir.ptr, !cir.ptr> cir.return } cir.global external @flt = #cir.const_array<[#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float]> : !cir.array @@ -151,22 +151,22 @@ module { cir.global "private" internal @Handlers = #cir.const_array<[#cir.const_struct<{#cir.global_view<@myfun> : !cir.ptr>}> : !ty_22anon2E122]> : !cir.array cir.func internal private @myfun(%arg0: !s32i) { - %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr + %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr cir.return } cir.func @foo(%arg0: !s32i, %arg1: !s32i) { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["flag", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr - cir.store %arg1, %1 : !s32i, cir.ptr - %2 = cir.get_global @Handlers : cir.ptr > - %3 = cir.load %0 : cir.ptr , !s32i + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["flag", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + cir.store %arg1, %1 : !s32i, !cir.ptr + %2 = cir.get_global @Handlers : !cir.ptr> + %3 = cir.load %0 : !cir.ptr, !s32i %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr %5 = cir.ptr_stride(%4 : !cir.ptr, %3 : !s32i), !cir.ptr %6 = cir.get_member %5[0] {name = "func"} : !cir.ptr -> !cir.ptr>> - %7 = cir.load %6 : cir.ptr >>, !cir.ptr> - %8 = cir.load %1 : cir.ptr , !s32i + %7 = cir.load %6 : !cir.ptr>>, !cir.ptr> + %8 = cir.load %1 : !cir.ptr, !s32i cir.call %7(%8) : (!cir.ptr>, !s32i) -> () cir.return } @@ -183,7 +183,7 @@ module { %0 = cir.const(#cir.global_view<@zero_array> : !cir.ptr) : !cir.ptr %1 = cir.const(#cir.int<0> : !s32i) : !s32i %2 = cir.ptr_stride(%0 : !cir.ptr, %1 : !s32i), !cir.ptr - %3 = cir.load %2 : cir.ptr , !s32i + %3 = cir.load %2 : !cir.ptr, !s32i cir.return } // MLIR: %0 = llvm.mlir.addressof @zero_array diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index 6dc2191c916e..784a8f473724 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -5,21 +5,21 @@ module { cir.func @foo() { - %0 = cir.alloca !u32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !u32i) : !u32i - cir.store %1, %0 : !u32i, cir.ptr + cir.store %1, %0 : !u32i, !cir.ptr cir.br ^bb2 ^bb1: // no predecessors - %2 = cir.load %0 : cir.ptr , !u32i + %2 = cir.load %0 : !cir.ptr, !u32i %3 = cir.const(#cir.int<1> : !u32i) : !u32i %4 = cir.binop(add, %2, %3) : !u32i - cir.store %4, %0 : !u32i, cir.ptr + cir.store %4, %0 : !u32i, !cir.ptr cir.br ^bb2 ^bb2: // 2 preds: ^bb0, ^bb1 - %5 = cir.load %0 : cir.ptr , !u32i + %5 = cir.load %0 : !cir.ptr, !u32i %6 = cir.const(#cir.int<2> : !u32i) : !u32i %7 = cir.binop(add, %5, %6) : !u32i - cir.store %7, %0 : !u32i, cir.ptr + cir.store %7, %0 : !u32i, !cir.ptr cir.return } } diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index 1f912805553f..7c808d31c2fb 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -8,14 +8,14 @@ module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.sign cir.func private @printf(!cir.ptr, ...) -> !s32i cir.global "private" constant internal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.func @main() -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} - %1 = cir.get_global @printf : cir.ptr , ...)>> - %2 = cir.get_global @".str" : cir.ptr > + %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + %1 = cir.get_global @printf : !cir.ptr, ...)>> + %2 = cir.get_global @".str" : !cir.ptr> %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr %4 = cir.call @printf(%3) : (!cir.ptr) -> !s32i %5 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.store %5, %0 : !s32i, cir.ptr - %6 = cir.load %0 : cir.ptr , !s32i + cir.store %5, %0 : !s32i, !cir.ptr + %6 = cir.load %0 : !cir.ptr, !s32i cir.return %6 : !s32i } } diff --git a/clang/test/CIR/Lowering/int-wrap.cir b/clang/test/CIR/Lowering/int-wrap.cir index 3de5ec85b526..ca4ee05b5354 100644 --- a/clang/test/CIR/Lowering/int-wrap.cir +++ b/clang/test/CIR/Lowering/int-wrap.cir @@ -4,9 +4,9 @@ !s32i = !cir.int module { cir.func @test(%arg0: !s32i) { - %0 = cir.alloca !s32i, cir.ptr , ["len", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr - %1 = cir.load %0 : cir.ptr , !s32i + %0 = cir.alloca !s32i, !cir.ptr, ["len", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + %1 = cir.load %0 : !cir.ptr, !s32i %2 = cir.const(#cir.int<42> : !s32i) : !s32i %3 = cir.binop(sub, %1, %2) nsw : !s32i %4 = cir.binop(sub, %1, %2) nuw : !s32i diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index fc3f333db56d..e2fce2972385 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -4,18 +4,18 @@ module { cir.func @foo() -> !u32i { - %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !u32i) : !u32i - cir.store %1, %0 : !u32i, cir.ptr - %2 = cir.load %0 : cir.ptr , !u32i + cir.store %1, %0 : !u32i, !cir.ptr + %2 = cir.load %0 : !cir.ptr, !u32i cir.return %2 : !u32i } cir.func @test_volatile() -> !u32i { - %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !u32i) : !u32i - cir.store volatile %1, %0 : !u32i, cir.ptr - %2 = cir.load volatile %0 : cir.ptr , !u32i + cir.store volatile %1, %0 : !u32i, !cir.ptr + %2 = cir.load volatile %0 : !cir.ptr, !u32i cir.return %2 : !u32i } } diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir index ee5238c5748a..02af716f703e 100644 --- a/clang/test/CIR/Lowering/loops-with-break.cir +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -5,11 +5,11 @@ module { cir.func @testFor() { cir.scope { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr + cir.store %1, %0 : !s32i, !cir.ptr cir.for : cond { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool @@ -17,7 +17,7 @@ module { } body { cir.scope { cir.scope { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<5> : !s32i) : !s32i %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool @@ -28,9 +28,9 @@ module { } cir.yield } step { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr + cir.store %3, %0 : !s32i, !cir.ptr cir.yield } } @@ -67,11 +67,11 @@ module { cir.func @testForNested() { cir.scope { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr + cir.store %1, %0 : !s32i, !cir.ptr cir.for : cond { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool @@ -79,11 +79,11 @@ module { } body { cir.scope { cir.scope { - %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["j", init] {alignment = 4 : i64} %3 = cir.const(#cir.int<1> : !s32i) : !s32i - cir.store %3, %2 : !s32i, cir.ptr + cir.store %3, %2 : !s32i, !cir.ptr cir.for : cond { - %4 = cir.load %2 : cir.ptr , !s32i + %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.const(#cir.int<10> : !s32i) : !s32i %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool @@ -91,7 +91,7 @@ module { } body { cir.scope { cir.scope { - %4 = cir.load %2 : cir.ptr , !s32i + %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.const(#cir.int<5> : !s32i) : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool @@ -102,18 +102,18 @@ module { } cir.yield } step { - %4 = cir.load %2 : cir.ptr , !s32i + %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.unary(inc, %4) : !s32i, !s32i - cir.store %5, %2 : !s32i, cir.ptr + cir.store %5, %2 : !s32i, !cir.ptr cir.yield } } } cir.yield } step { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr + cir.store %3, %0 : !s32i, !cir.ptr cir.yield } } @@ -167,22 +167,22 @@ module { // CHECK: } cir.func @testWhile() { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr + cir.store %1, %0 : !s32i, !cir.ptr cir.scope { cir.while { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) } do { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr + cir.store %3, %0 : !s32i, !cir.ptr cir.scope { - %4 = cir.load %0 : cir.ptr , !s32i + %4 = cir.load %0 : !cir.ptr, !s32i %5 = cir.const(#cir.int<5> : !s32i) : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool @@ -222,16 +222,16 @@ module { // CHECK: } cir.func @testDoWhile() { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr + cir.store %1, %0 : !s32i, !cir.ptr cir.scope { cir.do { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr + cir.store %3, %0 : !s32i, !cir.ptr cir.scope { - %4 = cir.load %0 : cir.ptr , !s32i + %4 = cir.load %0 : !cir.ptr, !s32i %5 = cir.const(#cir.int<5> : !s32i) : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool @@ -241,7 +241,7 @@ cir.func @testDoWhile() { } cir.yield } while { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir index 9cfd3635d740..13db11549099 100644 --- a/clang/test/CIR/Lowering/loops-with-continue.cir +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -5,11 +5,11 @@ module { cir.func @testFor() { cir.scope { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr + cir.store %1, %0 : !s32i, !cir.ptr cir.for : cond { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool @@ -17,7 +17,7 @@ module { } body { cir.scope { cir.scope { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<5> : !s32i) : !s32i %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool @@ -28,9 +28,9 @@ module { } cir.yield } step { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr + cir.store %3, %0 : !s32i, !cir.ptr cir.yield } } @@ -68,11 +68,11 @@ module { cir.func @testForNested() { cir.scope { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<1> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr + cir.store %1, %0 : !s32i, !cir.ptr cir.for : cond { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool @@ -80,11 +80,11 @@ module { } body { cir.scope { cir.scope { - %2 = cir.alloca !s32i, cir.ptr , ["j", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["j", init] {alignment = 4 : i64} %3 = cir.const(#cir.int<1> : !s32i) : !s32i - cir.store %3, %2 : !s32i, cir.ptr + cir.store %3, %2 : !s32i, !cir.ptr cir.for : cond { - %4 = cir.load %2 : cir.ptr , !s32i + %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.const(#cir.int<10> : !s32i) : !s32i %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool @@ -92,7 +92,7 @@ module { } body { cir.scope { cir.scope { - %4 = cir.load %2 : cir.ptr , !s32i + %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.const(#cir.int<5> : !s32i) : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool @@ -103,18 +103,18 @@ module { } cir.yield } step { - %4 = cir.load %2 : cir.ptr , !s32i + %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.unary(inc, %4) : !s32i, !s32i - cir.store %5, %2 : !s32i, cir.ptr + cir.store %5, %2 : !s32i, !cir.ptr cir.yield } } } cir.yield } step { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr + cir.store %3, %0 : !s32i, !cir.ptr cir.yield } } @@ -167,22 +167,22 @@ module { // CHECK: } cir.func @testWhile() { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr + cir.store %1, %0 : !s32i, !cir.ptr cir.scope { cir.while { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) } do { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr + cir.store %3, %0 : !s32i, !cir.ptr cir.scope { - %4 = cir.load %0 : cir.ptr , !s32i + %4 = cir.load %0 : !cir.ptr, !s32i %5 = cir.const(#cir.int<5> : !s32i) : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool @@ -219,16 +219,16 @@ cir.func @testWhile() { // CHECK: } cir.func @testDoWhile() { - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr + cir.store %1, %0 : !s32i, !cir.ptr cir.scope { cir.do { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i - cir.store %3, %0 : !s32i, cir.ptr + cir.store %3, %0 : !s32i, !cir.ptr cir.scope { - %4 = cir.load %0 : cir.ptr , !s32i + %4 = cir.load %0 : !cir.ptr, !s32i %5 = cir.const(#cir.int<5> : !s32i) : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool @@ -238,7 +238,7 @@ cir.func @testWhile() { } cir.yield } while { - %2 = cir.load %0 : cir.ptr , !s32i + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<10> : !s32i) : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool diff --git a/clang/test/CIR/Lowering/openmp.cir b/clang/test/CIR/Lowering/openmp.cir index 73b3155252cc..2457d929781f 100644 --- a/clang/test/CIR/Lowering/openmp.cir +++ b/clang/test/CIR/Lowering/openmp.cir @@ -3,18 +3,18 @@ !s32i = !cir.int module { cir.func @omp_parallel() { - %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.store %1, %0 : !s32i, cir.ptr + cir.store %1, %0 : !s32i, !cir.ptr omp.parallel { cir.scope { - %2 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} %3 = cir.const(#cir.int<1> : !s32i) : !s32i - cir.store %3, %2 : !s32i, cir.ptr - %4 = cir.load %2 : cir.ptr , !s32i + cir.store %3, %2 : !s32i, !cir.ptr + %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.const(#cir.int<1> : !s32i) : !s32i %6 = cir.binop(add, %4, %5) : !s32i - cir.store %6, %0 : !s32i, cir.ptr + cir.store %6, %0 : !s32i, !cir.ptr } omp.terminator } diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index 9c01fd7fde01..1262f1d815bd 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -4,12 +4,12 @@ !s32i = !cir.int module { cir.func @f(%arg0: !cir.ptr) { - %0 = cir.alloca !cir.ptr, cir.ptr >, ["a", init] {alignment = 8 : i64} - cir.store %arg0, %0 : !cir.ptr, cir.ptr > - %1 = cir.load %0 : cir.ptr >, !cir.ptr + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["a", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, !cir.ptr> + %1 = cir.load %0 : !cir.ptr>, !cir.ptr %2 = cir.const(#cir.int<1> : !s32i) : !s32i %3 = cir.ptr_stride(%1 : !cir.ptr, %2 : !s32i), !cir.ptr - %4 = cir.load %3 : cir.ptr , !s32i + %4 = cir.load %3 : !cir.ptr, !s32i cir.return } } diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index 8afa84d0c247..1c99823b0584 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -6,9 +6,9 @@ module { cir.func @foo() { cir.scope { - %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.const(#cir.int<4> : !u32i) : !u32i - cir.store %1, %0 : !u32i, cir.ptr + cir.store %1, %0 : !u32i, !cir.ptr } cir.return } @@ -50,14 +50,14 @@ module { cir.func @scope_with_return() -> !u32i { - %0 = cir.alloca !u32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + %0 = cir.alloca !u32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} cir.scope { %2 = cir.const(#cir.int<0> : !u32i) : !u32i - cir.store %2, %0 : !u32i, cir.ptr - %3 = cir.load %0 : cir.ptr , !u32i + cir.store %2, %0 : !u32i, !cir.ptr + %3 = cir.load %0 : !cir.ptr, !u32i cir.return %3 : !u32i } - %1 = cir.load %0 : cir.ptr , !u32i + %1 = cir.load %0 : !cir.ptr, !u32i cir.return %1 : !u32i } diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index 642bb1e53f60..edf279755627 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -12,7 +12,7 @@ module { cir.func @test() { - %1 = cir.alloca !ty_22S22, cir.ptr , ["x"] {alignment = 4 : i64} + %1 = cir.alloca !ty_22S22, !cir.ptr, ["x"] {alignment = 4 : i64} // CHECK: %[[#ARRSIZE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#STRUCT:]] = llvm.alloca %[[#ARRSIZE]] x !llvm.struct<"struct.S", (i8, i32)> %3 = cir.get_member %1[0] {name = "c"} : !cir.ptr -> !cir.ptr @@ -23,9 +23,9 @@ module { } cir.func @shouldConstInitLocalStructsWithConstStructAttr() { - %0 = cir.alloca !ty_22S2A22, cir.ptr , ["s"] {alignment = 4 : i64} + %0 = cir.alloca !ty_22S2A22, !cir.ptr, ["s"] {alignment = 4 : i64} %1 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22) : !ty_22S2A22 - cir.store %1, %0 : !ty_22S2A22, cir.ptr + cir.store %1, %0 : !ty_22S2A22, !cir.ptr cir.return } // CHECK: llvm.func @shouldConstInitLocalStructsWithConstStructAttr() @@ -82,10 +82,10 @@ module { cir.func @shouldLowerStructCopies() { // CHECK: llvm.func @shouldLowerStructCopies() - %1 = cir.alloca !ty_22S22, cir.ptr , ["a"] {alignment = 4 : i64} + %1 = cir.alloca !ty_22S22, !cir.ptr, ["a"] {alignment = 4 : i64} // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#SA:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"struct.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr - %2 = cir.alloca !ty_22S22, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !ty_22S22, !cir.ptr, ["b", init] {alignment = 4 : i64} // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#SB:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"struct.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr cir.copy %1 to %2 : !cir.ptr diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir index 5931d49de3a4..4737cba64b8b 100644 --- a/clang/test/CIR/Lowering/switch.cir +++ b/clang/test/CIR/Lowering/switch.cir @@ -107,10 +107,10 @@ module { } cir.func @shouldLowerMultiBlockCase(%arg0: !s32i) { - %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr + %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr cir.scope { - %1 = cir.load %0 : cir.ptr , !s32i + %1 = cir.load %0 : !cir.ptr, !s32i cir.switch (%1 : !s32i) [ case (equal, 3) { cir.return @@ -137,17 +137,17 @@ module { // CHECK: } cir.func @shouldLowerNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["x", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} - %2 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr - cir.store %arg1, %1 : !s32i, cir.ptr + %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + cir.store %arg1, %1 : !s32i, !cir.ptr cir.scope { - %5 = cir.load %0 : cir.ptr , !s32i + %5 = cir.load %0 : !cir.ptr, !s32i cir.switch (%5 : !s32i) [ case (equal, 0) { cir.scope { - %6 = cir.load %1 : cir.ptr , !s32i + %6 = cir.load %1 : !cir.ptr, !s32i %7 = cir.const(#cir.int<0> : !s32i) : !s32i %8 = cir.cmp(ge, %6, %7) : !s32i, !s32i %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool @@ -160,8 +160,8 @@ module { ] } %3 = cir.const(#cir.int<3> : !s32i) : !s32i - cir.store %3, %2 : !s32i, cir.ptr - %4 = cir.load %2 : cir.ptr , !s32i + cir.store %3, %2 : !s32i, !cir.ptr + %4 = cir.load %2 : !cir.ptr, !s32i cir.return %4 : !s32i } // CHECK: llvm.func @shouldLowerNestedBreak diff --git a/clang/test/CIR/Lowering/ternary.cir b/clang/test/CIR/Lowering/ternary.cir index b80ff86c9bbc..6ccd9c4ed323 100644 --- a/clang/test/CIR/Lowering/ternary.cir +++ b/clang/test/CIR/Lowering/ternary.cir @@ -5,10 +5,10 @@ module { cir.func @_Z1xi(%arg0: !s32i) -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["y", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr - %2 = cir.load %0 : cir.ptr , !s32i + %0 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const(#cir.int<0> : !s32i) : !s32i %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool %5 = cir.ternary(%4, true { @@ -18,8 +18,8 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { %7 = cir.const(#cir.int<5> : !s32i) : !s32i cir.yield %7 : !s32i }) : (!cir.bool) -> !s32i - cir.store %5, %1 : !s32i, cir.ptr - %6 = cir.load %1 : cir.ptr , !s32i + cir.store %5, %1 : !s32i, !cir.ptr + %6 = cir.load %1 : !cir.ptr, !s32i cir.return %6 : !s32i } } diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir index a5ea94324b55..1be10c992c7b 100644 --- a/clang/test/CIR/Lowering/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -3,19 +3,19 @@ !s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %2 = cir.const(#cir.int<2> : !s32i) : !s32i - cir.store %2, %0 : !s32i, cir.ptr - cir.store %2, %1 : !s32i, cir.ptr + cir.store %2, %0 : !s32i, !cir.ptr + cir.store %2, %1 : !s32i, !cir.ptr - %3 = cir.load %0 : cir.ptr , !s32i + %3 = cir.load %0 : !cir.ptr, !s32i %4 = cir.unary(inc, %3) : !s32i, !s32i - cir.store %4, %0 : !s32i, cir.ptr + cir.store %4, %0 : !s32i, !cir.ptr - %5 = cir.load %1 : cir.ptr , !s32i + %5 = cir.load %1 : !cir.ptr, !s32i %6 = cir.unary(dec, %5) : !s32i, !s32i - cir.store %6, %1 : !s32i, cir.ptr + cir.store %6, %1 : !s32i, !cir.ptr cir.return } @@ -29,32 +29,32 @@ module { cir.func @floatingPoint(%arg0: !cir.float, %arg1: !cir.double) { // MLIR: llvm.func @floatingPoint - %0 = cir.alloca !cir.float, cir.ptr , ["f", init] {alignment = 4 : i64} - %1 = cir.alloca !cir.double, cir.ptr , ["d", init] {alignment = 8 : i64} - cir.store %arg0, %0 : !cir.float, cir.ptr - cir.store %arg1, %1 : !cir.double, cir.ptr + %0 = cir.alloca !cir.float, !cir.ptr, ["f", init] {alignment = 4 : i64} + %1 = cir.alloca !cir.double, !cir.ptr, ["d", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.float, !cir.ptr + cir.store %arg1, %1 : !cir.double, !cir.ptr - %2 = cir.load %0 : cir.ptr , !cir.float + %2 = cir.load %0 : !cir.ptr, !cir.float %3 = cir.unary(inc, %2) : !cir.float, !cir.float - cir.store %3, %0 : !cir.float, cir.ptr + cir.store %3, %0 : !cir.float, !cir.ptr // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(1.000000e+00 : f32) : f32 // MLIR: = llvm.fadd %[[#F_ONE]], %{{[0-9]+}} : f32 - %4 = cir.load %0 : cir.ptr , !cir.float + %4 = cir.load %0 : !cir.ptr, !cir.float %5 = cir.unary(dec, %4) : !cir.float, !cir.float - cir.store %5, %0 : !cir.float, cir.ptr + cir.store %5, %0 : !cir.float, !cir.ptr // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f32) : f32 // MLIR: = llvm.fsub %[[#D_ONE]], %{{[0-9]+}} : f32 - %6 = cir.load %1 : cir.ptr , !cir.double + %6 = cir.load %1 : !cir.ptr, !cir.double %7 = cir.unary(inc, %6) : !cir.double, !cir.double - cir.store %7, %1 : !cir.double, cir.ptr + cir.store %7, %1 : !cir.double, !cir.ptr // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(1.000000e+00 : f64) : f64 // MLIR: = llvm.fadd %[[#D_ONE]], %{{[0-9]+}} : f64 - %8 = cir.load %1 : cir.ptr , !cir.double + %8 = cir.load %1 : !cir.ptr, !cir.double %9 = cir.unary(dec, %8) : !cir.double, !cir.double - cir.store %9, %1 : !cir.double, cir.ptr + cir.store %9, %1 : !cir.double, !cir.ptr // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f64) : f64 // MLIR: = llvm.fsub %[[#D_ONE]], %{{[0-9]+}} : f64 diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index 21b12755ae02..773b57181de9 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -3,14 +3,14 @@ !s32i = !cir.int module { cir.func @foo() -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %2 = cir.const(#cir.int<1> : !s32i) : !s32i - cir.store %2, %1 : !s32i, cir.ptr - %3 = cir.load %1 : cir.ptr , !s32i + cir.store %2, %1 : !s32i, !cir.ptr + %3 = cir.load %1 : !cir.ptr, !s32i %4 = cir.unary(not, %3) : !s32i, !s32i - cir.store %4, %0 : !s32i, cir.ptr - %5 = cir.load %0 : cir.ptr , !s32i + cir.store %4, %0 : !s32i, !cir.ptr + %5 = cir.load %0 : !cir.ptr, !s32i cir.return %5 : !s32i } @@ -23,11 +23,11 @@ module { cir.func @floatingPoint(%arg0: !cir.float, %arg1: !cir.double) { // MLIR: llvm.func @floatingPoint - %0 = cir.alloca !cir.float, cir.ptr , ["f", init] {alignment = 4 : i64} - %1 = cir.alloca !cir.double, cir.ptr , ["d", init] {alignment = 8 : i64} - cir.store %arg0, %0 : !cir.float, cir.ptr - cir.store %arg1, %1 : !cir.double, cir.ptr - %2 = cir.load %0 : cir.ptr , !cir.float + %0 = cir.alloca !cir.float, !cir.ptr, ["f", init] {alignment = 4 : i64} + %1 = cir.alloca !cir.double, !cir.ptr, ["d", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.float, !cir.ptr + cir.store %arg1, %1 : !cir.double, !cir.ptr + %2 = cir.load %0 : !cir.ptr, !cir.float %3 = cir.cast(float_to_bool, %2 : !cir.float), !cir.bool // MLIR: %[[#F_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 // MLIR: %[[#F_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#F_ZERO]] : f32 @@ -35,7 +35,7 @@ module { %4 = cir.unary(not, %3) : !cir.bool, !cir.bool // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(1 : i8) : i8 // MLIR: = llvm.xor %[[#F_ZEXT]], %[[#F_ONE]] : i8 - %5 = cir.load %1 : cir.ptr , !cir.double + %5 = cir.load %1 : !cir.ptr, !cir.double %6 = cir.cast(float_to_bool, %5 : !cir.double), !cir.bool // MLIR: %[[#D_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 // MLIR: %[[#D_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#D_ZERO]] : f64 @@ -48,12 +48,12 @@ module { cir.func @CStyleValueNegation(%arg0: !s32i, %arg1: !cir.float) { // MLIR: llvm.func @CStyleValueNegation - %0 = cir.alloca !s32i, cir.ptr , ["i", init] {alignment = 4 : i64} - %3 = cir.alloca !cir.float, cir.ptr , ["f", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr - cir.store %arg1, %3 : !cir.float, cir.ptr + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + %3 = cir.alloca !cir.float, !cir.ptr, ["f", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + cir.store %arg1, %3 : !cir.float, !cir.ptr - %5 = cir.load %0 : cir.ptr , !s32i + %5 = cir.load %0 : !cir.ptr, !s32i %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool %7 = cir.unary(not, %6) : !cir.bool, !cir.bool %8 = cir.cast(bool_to_int, %7 : !cir.bool), !s32i @@ -65,7 +65,7 @@ module { // MLIR: %[[#IXOR:]] = llvm.xor %[[#IEXT]], %[[#IONE]] : i8 // MLIR: = llvm.zext %[[#IXOR]] : i8 to i32 - %17 = cir.load %3 : cir.ptr , !cir.float + %17 = cir.load %3 : !cir.ptr, !cir.float %18 = cir.cast(float_to_bool, %17 : !cir.float), !cir.bool %19 = cir.unary(not, %18) : !cir.bool, !cir.bool %20 = cir.cast(bool_to_int, %19 : !cir.bool), !s32i diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index dbf71c2833bd..d998f494855e 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -4,19 +4,19 @@ !s32i = !cir.int module { cir.func @foo() { - %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %2 = cir.const(#cir.int<2> : !s32i) : !s32i - cir.store %2, %0 : !s32i, cir.ptr - cir.store %2, %1 : !s32i, cir.ptr + cir.store %2, %0 : !s32i, !cir.ptr + cir.store %2, %1 : !s32i, !cir.ptr - %3 = cir.load %0 : cir.ptr , !s32i + %3 = cir.load %0 : !cir.ptr, !s32i %4 = cir.unary(plus, %3) : !s32i, !s32i - cir.store %4, %0 : !s32i, cir.ptr + cir.store %4, %0 : !s32i, !cir.ptr - %5 = cir.load %1 : cir.ptr , !s32i + %5 = cir.load %1 : !cir.ptr, !s32i %6 = cir.unary(minus, %5) : !s32i, !s32i - cir.store %6, %1 : !s32i, cir.ptr + cir.store %6, %1 : !s32i, !cir.ptr cir.return } @@ -28,13 +28,13 @@ module { cir.func @floatingPoints(%arg0: !cir.double) { // MLIR: llvm.func @floatingPoints(%arg0: f64) - %0 = cir.alloca !cir.double, cir.ptr , ["X", init] {alignment = 8 : i64} - cir.store %arg0, %0 : !cir.double, cir.ptr - %1 = cir.load %0 : cir.ptr , !cir.double + %0 = cir.alloca !cir.double, !cir.ptr, ["X", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.double, !cir.ptr + %1 = cir.load %0 : !cir.ptr, !cir.double %2 = cir.unary(plus, %1) : !cir.double, !cir.double // MLIR: llvm.store %arg0, %[[#F_PLUS:]] : f64, !llvm.ptr // MLIR: %{{[0-9]}} = llvm.load %[[#F_PLUS]] : !llvm.ptr -> f64 - %3 = cir.load %0 : cir.ptr , !cir.double + %3 = cir.load %0 : !cir.ptr, !cir.double %4 = cir.unary(minus, %3) : !cir.double, !cir.double // MLIR: %[[#F_MINUS:]] = llvm.load %{{[0-9]}} : !llvm.ptr -> f64 // MLIR: %{{[0-9]}} = llvm.fneg %[[#F_MINUS]] : f64 diff --git a/clang/test/CIR/Lowering/unions.cir b/clang/test/CIR/Lowering/unions.cir index dac1006cd8d2..92beda0ee4b8 100644 --- a/clang/test/CIR/Lowering/unions.cir +++ b/clang/test/CIR/Lowering/unions.cir @@ -24,7 +24,7 @@ module { // Should store directly to the union's base address. %5 = cir.const(#true) : !cir.bool %6 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr - cir.store %5, %6 : !cir.bool, cir.ptr + cir.store %5, %6 : !cir.bool, !cir.ptr // CHECK: %[[#VAL:]] = llvm.mlir.constant(1 : i8) : i8 // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. // CHECK: %[[#ADDR:]] = llvm.bitcast %{{.+}} : !llvm.ptr @@ -32,7 +32,7 @@ module { // Should load direclty from the union's base address. %7 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr - %8 = cir.load %7 : cir.ptr , !cir.bool + %8 = cir.load %7 : !cir.ptr, !cir.bool // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. // CHECK: %[[#BASE:]] = llvm.bitcast %{{.+}} : !llvm.ptr // CHECK: %{{.+}} = llvm.load %[[#BASE]] : !llvm.ptr -> i8 diff --git a/clang/test/CIR/Lowering/variadics.cir b/clang/test/CIR/Lowering/variadics.cir index 8e5cb670fa30..6e8c8f89233e 100644 --- a/clang/test/CIR/Lowering/variadics.cir +++ b/clang/test/CIR/Lowering/variadics.cir @@ -9,11 +9,11 @@ module { cir.func @average(%arg0: !s32i, ...) -> !s32i { - %0 = cir.alloca !s32i, cir.ptr , ["count", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["__retval"] {alignment = 4 : i64} - %2 = cir.alloca !cir.array, cir.ptr >, ["args"] {alignment = 16 : i64} - %3 = cir.alloca !cir.array, cir.ptr >, ["args_copy"] {alignment = 16 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr + %0 = cir.alloca !s32i, !cir.ptr, ["count", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + %2 = cir.alloca !cir.array, !cir.ptr>, ["args"] {alignment = 16 : i64} + %3 = cir.alloca !cir.array, !cir.ptr>, ["args_copy"] {alignment = 16 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr cir.va.start %4 : !cir.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> @@ -33,8 +33,8 @@ module { // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: llvm.intr.vaend %{{[0-9]+}} : !llvm.ptr %8 = cir.const(#cir.int<0> : !s32i) : !s32i - cir.store %8, %1 : !s32i, cir.ptr - %9 = cir.load %1 : cir.ptr , !s32i + cir.store %8, %1 : !s32i, !cir.ptr + %9 = cir.load %1 : !cir.ptr, !s32i cir.return %9 : !s32i } } diff --git a/clang/test/CIR/Transforms/lib-opt-find.cpp b/clang/test/CIR/Transforms/lib-opt-find.cpp index 4812e72d8037..14c464ff9448 100644 --- a/clang/test/CIR/Transforms/lib-opt-find.cpp +++ b/clang/test/CIR/Transforms/lib-opt-find.cpp @@ -7,7 +7,7 @@ int test1(unsigned char n = 3) { // CHECK: test1 unsigned num_found = 0; - // CHECK: %[[pattern_addr:.*]] = cir.alloca !u8i, cir.ptr , ["n" + // CHECK: %[[pattern_addr:.*]] = cir.alloca !u8i, !cir.ptr, ["n" std::array v = {1, 2, 3, 4, 5, 6, 7, 8, 9}; auto f = std::find(v.begin(), v.end(), n); @@ -15,7 +15,7 @@ int test1(unsigned char n = 3) // CHECK: %[[first:.*]] = cir.call @_ZNSt5arrayIhLj9EE5beginEv // CHECK: %[[last:.*]] = cir.call @_ZNSt5arrayIhLj9EE3endEv // CHECK: %[[cast_to_void:.*]] = cir.cast(bitcast, %[[first]] : !cir.ptr), !cir.ptr - // CHECK: %[[load_pattern:.*]] = cir.load %[[pattern_addr]] : cir.ptr , !u8i + // CHECK: %[[load_pattern:.*]] = cir.load %[[pattern_addr]] : !cir.ptr, !u8i // CHECK: %[[pattern:.*]] = cir.cast(integral, %[[load_pattern:.*]] : !u8i), !s32i // CHECK-NOT: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( @@ -42,13 +42,13 @@ unsigned char* test2(unsigned char* first, unsigned char* last, unsigned char v) return std::find(first, last, v); // CHECK: test2 - // CHECK: %[[first_storage:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["first", init] - // CHECK: %[[last_storage:.*]] = cir.alloca !cir.ptr, cir.ptr >, ["last", init] - // CHECK: %[[pattern_storage:.*]] = cir.alloca !u8i, cir.ptr , ["v", init] + // CHECK: %[[first_storage:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["first", init] + // CHECK: %[[last_storage:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["last", init] + // CHECK: %[[pattern_storage:.*]] = cir.alloca !u8i, !cir.ptr, ["v", init] // CHECK: %[[first:.*]] = cir.load %[[first_storage]] // CHECK: %[[last:.*]] = cir.load %[[last_storage]] // CHECK: %[[cast_to_void:.*]] = cir.cast(bitcast, %[[first]] : !cir.ptr), !cir.ptr - // CHECK: %[[load_pattern:.*]] = cir.load %[[pattern_storage]] : cir.ptr , !u8i + // CHECK: %[[load_pattern:.*]] = cir.load %[[pattern_storage]] : !cir.ptr, !u8i // CHECK: %[[pattern:.*]] = cir.cast(integral, %[[load_pattern:.*]] : !u8i), !s32i // CHECK-NOT: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 17880efeac2a..9306958f944e 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -6,21 +6,21 @@ !s32i = !cir.int module { cir.func @sw1(%arg0: !s32i, %arg1: !s32i) { - %0 = cir.alloca !s32i, cir.ptr , ["a", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, cir.ptr , ["c", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, cir.ptr - cir.store %arg1, %1 : !s32i, cir.ptr + %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["c", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + cir.store %arg1, %1 : !s32i, !cir.ptr cir.scope { - %2 = cir.alloca !s32i, cir.ptr , ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %3 = cir.const(#cir.int<1> : !s32i) : !s32i - cir.store %3, %2 : !s32i, cir.ptr - %4 = cir.load %0 : cir.ptr , !s32i + cir.store %3, %2 : !s32i, !cir.ptr + %4 = cir.load %0 : !cir.ptr, !s32i cir.switch (%4 : !s32i) [ case (equal, 0 : !s32i) { - %5 = cir.load %2 : cir.ptr , !s32i + %5 = cir.load %2 : !cir.ptr, !s32i %6 = cir.const(#cir.int<1> : !s32i) : !s32i %7 = cir.binop(add, %5, %6) : !s32i - cir.store %7, %2 : !s32i, cir.ptr + cir.store %7, %2 : !s32i, !cir.ptr cir.br ^bb1 ^bb1: // pred: ^bb0 cir.return @@ -28,7 +28,7 @@ module { case (equal, 1 : !s32i) { cir.scope { cir.scope { - %5 = cir.load %1 : cir.ptr , !s32i + %5 = cir.load %1 : !cir.ptr, !s32i %6 = cir.const(#cir.int<3> : !s32i) : !s32i %7 = cir.cmp(eq, %5, %6) : !s32i, !cir.bool cir.if %7 { @@ -43,13 +43,13 @@ module { }, case (equal, 2 : !s32i) { cir.scope { - %5 = cir.alloca !s32i, cir.ptr , ["yolo", init] {alignment = 4 : i64} - %6 = cir.load %2 : cir.ptr , !s32i + %5 = cir.alloca !s32i, !cir.ptr, ["yolo", init] {alignment = 4 : i64} + %6 = cir.load %2 : !cir.ptr, !s32i %7 = cir.const(#cir.int<1> : !s32i) : !s32i %8 = cir.binop(add, %6, %7) : !s32i - cir.store %8, %2 : !s32i, cir.ptr + cir.store %8, %2 : !s32i, !cir.ptr %9 = cir.const(#cir.int<100> : !s32i) : !s32i - cir.store %9, %5 : !s32i, cir.ptr + cir.store %9, %5 : !s32i, !cir.ptr cir.br ^bb1 ^bb1: // pred: ^bb0 cir.return @@ -63,16 +63,16 @@ module { // CHECK: cir.switch (%4 : !s32i) [ // CHECK-NEXT: case (equal, 0) { -// CHECK-NEXT: %5 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.load %2 : !cir.ptr, !s32i // CHECK-NEXT: %6 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %7 = cir.binop(add, %5, %6) : !s32i -// CHECK-NEXT: cir.store %7, %2 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %7, %2 : !s32i, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 1) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %5 = cir.load %1 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %6 = cir.const(#cir.int<3> : !s32i) : !s32i // CHECK-NEXT: %7 = cir.cmp(eq, %5, %6) : !s32i, !cir.bool // CHECK-NEXT: cir.if %7 { @@ -85,13 +85,13 @@ module { // CHECK-NEXT: }, // CHECK-NEXT: case (equal, 2) { // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %5 = cir.alloca !s32i, cir.ptr , ["yolo", init] {alignment = 4 : i64} -// CHECK-NEXT: %6 = cir.load %2 : cir.ptr , !s32i +// CHECK-NEXT: %5 = cir.alloca !s32i, !cir.ptr, ["yolo", init] {alignment = 4 : i64} +// CHECK-NEXT: %6 = cir.load %2 : !cir.ptr, !s32i // CHECK-NEXT: %7 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK-NEXT: %8 = cir.binop(add, %6, %7) : !s32i -// CHECK-NEXT: cir.store %8, %2 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %8, %2 : !s32i, !cir.ptr // CHECK-NEXT: %9 = cir.const(#cir.int<100> : !s32i) : !s32i -// CHECK-NEXT: cir.store %9, %5 : !s32i, cir.ptr +// CHECK-NEXT: cir.store %9, %5 : !s32i, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: cir.yield From 82813b76580cdb411c824ccee5fca2bec605ec9b Mon Sep 17 00:00:00 2001 From: "Walter J.T.V" <81811777+eZWALT@users.noreply.github.com> Date: Wed, 1 May 2024 22:41:16 +0200 Subject: [PATCH 1538/2301] [CIR][OpenMP] Taskwait, Taskyield and Barrier implementation (#555) This PR is the final fix for issue #499. --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 ++ clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp | 53 +++++++++++++ clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h | 36 +++++++++ clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 9 ++- clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp | 76 +++++++++++++++++++ .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/OpenMP/barrier.cpp | 8 ++ .../{openmp.cpp => OpenMP/parallel.cpp} | 0 clang/test/CIR/CodeGen/OpenMP/taskwait.cpp | 9 +++ clang/test/CIR/CodeGen/OpenMP/taskyield.cpp | 8 ++ clang/test/CIR/Lowering/OpenMP/barrier.cir | 15 ++++ .../{openmp.cir => OpenMP/parallel.cir} | 0 clang/test/CIR/Lowering/OpenMP/taskwait.cir | 14 ++++ clang/test/CIR/Lowering/OpenMP/taskyield.cir | 14 ++++ 14 files changed, 245 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/OpenMP/barrier.cpp rename clang/test/CIR/CodeGen/{openmp.cpp => OpenMP/parallel.cpp} (100%) create mode 100644 clang/test/CIR/CodeGen/OpenMP/taskwait.cpp create mode 100644 clang/test/CIR/CodeGen/OpenMP/taskyield.cpp create mode 100644 clang/test/CIR/Lowering/OpenMP/barrier.cir rename clang/test/CIR/Lowering/{openmp.cir => OpenMP/parallel.cir} (100%) create mode 100644 clang/test/CIR/Lowering/OpenMP/taskwait.cir create mode 100644 clang/test/CIR/Lowering/OpenMP/taskyield.cir diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 22b863ba0d7c..63450258ad99 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -30,6 +30,7 @@ #include "mlir/IR/TypeRange.h" #include "mlir/IR/Value.h" +#include "mlir/Support/LogicalResult.h" namespace clang { class Expr; @@ -993,6 +994,10 @@ class CIRGenFunction : public CIRGenTypeCache { // OpenMP gen functions: mlir::LogicalResult buildOMPParallelDirective(const OMPParallelDirective &S); + mlir::LogicalResult buildOMPTaskwaitDirective(const OMPTaskwaitDirective &S); + mlir::LogicalResult + buildOMPTaskyieldDirective(const OMPTaskyieldDirective &S); + mlir::LogicalResult buildOMPBarrierDirective(const OMPBarrierDirective &S); LValue buildOpaqueValueLValue(const OpaqueValueExpr *e); diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp index 2060ce8e2d31..a42d84b12cb0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp @@ -52,3 +52,56 @@ bool CIRGenOpenMPRuntime::emitTargetGlobal(clang::GlobalDecl &GD) { assert(!UnimplementedFeature::openMPRuntime()); return false; } + +void CIRGenOpenMPRuntime::emitTaskWaitCall(CIRGenBuilderTy &builder, + CIRGenFunction &CGF, + mlir::Location Loc, + const OMPTaskDataTy &Data) { + + if (!CGF.HaveInsertPoint()) + return; + + if (CGF.CGM.getLangOpts().OpenMPIRBuilder && Data.Dependences.empty()) { + // TODO: Need to support taskwait with dependences in the OpenMPIRBuilder. + // TODO(cir): This could change in the near future when OpenMP 5.0 gets + // supported by MLIR + llvm_unreachable("NYI"); + // builder.create(Loc); + } else { + llvm_unreachable("NYI"); + } + assert(!UnimplementedFeature::openMPRegionInfo()); +} + +void CIRGenOpenMPRuntime::emitBarrierCall(CIRGenBuilderTy &builder, + CIRGenFunction &CGF, + mlir::Location Loc) { + + assert(!UnimplementedFeature::openMPRegionInfo()); + + if (CGF.CGM.getLangOpts().OpenMPIRBuilder) { + builder.create(Loc); + return; + } + + if (!CGF.HaveInsertPoint()) + return; + + llvm_unreachable("NYI"); +} + +void CIRGenOpenMPRuntime::emitTaskyieldCall(CIRGenBuilderTy &builder, + CIRGenFunction &CGF, + mlir::Location Loc) { + + if (!CGF.HaveInsertPoint()) + return; + + if (CGF.CGM.getLangOpts().OpenMPIRBuilder) { + builder.create(Loc); + } else { + llvm_unreachable("NYI"); + } + + assert(!UnimplementedFeature::openMPRegionInfo()); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h index c4a53db44c92..a27b04a4866b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h @@ -13,9 +13,21 @@ #ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENOPENMPRUNTIME_H +#include "CIRGenBuilder.h" #include "CIRGenValue.h" + +#include "clang/AST/Redeclarable.h" +#include "clang/Basic/OpenMPKinds.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "llvm/Support/ErrorHandling.h" + +#include "mlir/Dialect/OpenMP/OpenMPDialect.h" +#include "mlir/IR/Dialect.h" +#include "mlir/IR/Location.h" + +#include "UnimplementedFeatureGuarding.h" + namespace clang { class Decl; class Expr; @@ -27,6 +39,20 @@ namespace cir { class CIRGenModule; class CIRGenFunction; +struct OMPTaskDataTy final { + struct DependData { + clang::OpenMPDependClauseKind DepKind = clang::OMPC_DEPEND_unknown; + const clang::Expr *IteratorExpr = nullptr; + llvm::SmallVector DepExprs; + explicit DependData() = default; + DependData(clang::OpenMPDependClauseKind DepKind, + const clang::Expr *IteratorExpr) + : DepKind(DepKind), IteratorExpr(IteratorExpr) {} + }; + llvm::SmallVector Dependences; + bool HasNowaitClause = false; +}; + class CIRGenOpenMPRuntime { public: explicit CIRGenOpenMPRuntime(CIRGenModule &CGM); @@ -69,6 +95,16 @@ class CIRGenOpenMPRuntime { /// \param GD Global to scan. virtual bool emitTargetGlobal(clang::GlobalDecl &D); + /// Emit code for 'taskwait' directive + virtual void emitTaskWaitCall(CIRGenBuilderTy &builder, CIRGenFunction &CGF, + mlir::Location Loc, const OMPTaskDataTy &Data); + + virtual void emitBarrierCall(CIRGenBuilderTy &builder, CIRGenFunction &CGF, + mlir::Location Loc); + + virtual void emitTaskyieldCall(CIRGenBuilderTy &builder, CIRGenFunction &CGF, + mlir::Location Loc); + protected: CIRGenModule &CGM; }; diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 981804892ebb..b83b8795f841 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -180,6 +180,12 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, // OMP directives: case Stmt::OMPParallelDirectiveClass: return buildOMPParallelDirective(cast(*S)); + case Stmt::OMPTaskwaitDirectiveClass: + return buildOMPTaskwaitDirective(cast(*S)); + case Stmt::OMPTaskyieldDirectiveClass: + return buildOMPTaskyieldDirective(cast(*S)); + case Stmt::OMPBarrierDirectiveClass: + return buildOMPBarrierDirective(cast(*S)); // Unsupported AST nodes: case Stmt::CapturedStmtClass: case Stmt::ObjCAtTryStmtClass: @@ -205,9 +211,6 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, case Stmt::OMPParallelMasterDirectiveClass: case Stmt::OMPParallelSectionsDirectiveClass: case Stmt::OMPTaskDirectiveClass: - case Stmt::OMPTaskyieldDirectiveClass: - case Stmt::OMPBarrierDirectiveClass: - case Stmt::OMPTaskwaitDirectiveClass: case Stmt::OMPTaskgroupDirectiveClass: case Stmt::OMPFlushDirectiveClass: case Stmt::OMPDepobjDirectiveClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp index 3874ef3dcee6..0c996156f71e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp @@ -9,15 +9,63 @@ // This contains code to emit OpenMP Stmt nodes as MLIR code. // //===----------------------------------------------------------------------===// +#include "clang/AST/ASTFwd.h" +#include "clang/AST/StmtIterator.h" +#include "clang/AST/StmtOpenMP.h" +#include "clang/Basic/OpenMPKinds.h" #include "CIRGenFunction.h" #include "CIRGenOpenMPRuntime.h" + +#include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/OpenMP/OpenMPDialect.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Location.h" +#include "mlir/IR/Value.h" +#include "mlir/Support/LogicalResult.h" using namespace cir; using namespace clang; using namespace mlir::omp; +static void buildDependences(const OMPExecutableDirective &S, + OMPTaskDataTy &Data) { + + // First look for 'omp_all_memory' and add this first. + bool OmpAllMemory = false; + if (llvm::any_of( + S.getClausesOfKind(), [](const OMPDependClause *C) { + return C->getDependencyKind() == OMPC_DEPEND_outallmemory || + C->getDependencyKind() == OMPC_DEPEND_inoutallmemory; + })) { + OmpAllMemory = true; + // Since both OMPC_DEPEND_outallmemory and OMPC_DEPEND_inoutallmemory are + // equivalent to the runtime, always use OMPC_DEPEND_outallmemory to + // simplify. + OMPTaskDataTy::DependData &DD = + Data.Dependences.emplace_back(OMPC_DEPEND_outallmemory, + /*IteratorExpr=*/nullptr); + // Add a nullptr Expr to simplify the codegen in emitDependData. + DD.DepExprs.push_back(nullptr); + } + // Add remaining dependences skipping any 'out' or 'inout' if they are + // overridden by 'omp_all_memory'. + for (const auto *C : S.getClausesOfKind()) { + OpenMPDependClauseKind Kind = C->getDependencyKind(); + if (Kind == OMPC_DEPEND_outallmemory || Kind == OMPC_DEPEND_inoutallmemory) + continue; + if (OmpAllMemory && (Kind == OMPC_DEPEND_out || Kind == OMPC_DEPEND_inout)) + continue; + OMPTaskDataTy::DependData &DD = + Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); + DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); + } +} + mlir::LogicalResult CIRGenFunction::buildOMPParallelDirective(const OMPParallelDirective &S) { mlir::LogicalResult res = mlir::success(); @@ -43,3 +91,31 @@ CIRGenFunction::buildOMPParallelDirective(const OMPParallelDirective &S) { builder.create(getLoc(S.getSourceRange().getEnd())); return res; } + +mlir::LogicalResult +CIRGenFunction::buildOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { + mlir::LogicalResult res = mlir::success(); + OMPTaskDataTy Data; + buildDependences(S, Data); + Data.HasNowaitClause = S.hasClausesOfKind(); + CGM.getOpenMPRuntime().emitTaskWaitCall(builder, *this, + getLoc(S.getSourceRange()), Data); + return res; +} +mlir::LogicalResult +CIRGenFunction::buildOMPTaskyieldDirective(const OMPTaskyieldDirective &S) { + mlir::LogicalResult res = mlir::success(); + // Creation of an omp.taskyield operation + CGM.getOpenMPRuntime().emitTaskyieldCall(builder, *this, + getLoc(S.getSourceRange())); + return res; +} + +mlir::LogicalResult +CIRGenFunction::buildOMPBarrierDirective(const OMPBarrierDirective &S) { + mlir::LogicalResult res = mlir::success(); + // Creation of an omp.barrier operation + CGM.getOpenMPRuntime().emitBarrierCall(builder, *this, + getLoc(S.getSourceRange())); + return res; +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index e93a564ce076..bde8defb1147 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -135,6 +135,7 @@ struct UnimplementedFeature { static bool CUDA() { return false; } static bool openMP() { return false; } static bool openMPRuntime() { return false; } + static bool openMPRegionInfo() { return false; } static bool openMPTarget() { return false; } static bool isVarArg() { return false; } static bool setNonGC() { return false; } diff --git a/clang/test/CIR/CodeGen/OpenMP/barrier.cpp b/clang/test/CIR/CodeGen/OpenMP/barrier.cpp new file mode 100644 index 000000000000..b93016a3f1e4 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenMP/barrier.cpp @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fopenmp-enable-irbuilder -fopenmp -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// CHECK: cir.func +void omp_barrier_1(){ +// CHECK: omp.barrier + #pragma omp barrier +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/openmp.cpp b/clang/test/CIR/CodeGen/OpenMP/parallel.cpp similarity index 100% rename from clang/test/CIR/CodeGen/openmp.cpp rename to clang/test/CIR/CodeGen/OpenMP/parallel.cpp diff --git a/clang/test/CIR/CodeGen/OpenMP/taskwait.cpp b/clang/test/CIR/CodeGen/OpenMP/taskwait.cpp new file mode 100644 index 000000000000..448679622a7c --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenMP/taskwait.cpp @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fopenmp-enable-irbuilder -fopenmp -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +// CHECK: cir.func +void omp_taskwait_1(){ +// CHECK: omp.taskwait + #pragma omp taskwait +} diff --git a/clang/test/CIR/CodeGen/OpenMP/taskyield.cpp b/clang/test/CIR/CodeGen/OpenMP/taskyield.cpp new file mode 100644 index 000000000000..aa2903c07f74 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenMP/taskyield.cpp @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fopenmp-enable-irbuilder -fopenmp -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// CHECK: cir.func +void omp_taskyield_1(){ +// CHECK: omp.taskyield + #pragma omp taskyield +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/OpenMP/barrier.cir b/clang/test/CIR/Lowering/OpenMP/barrier.cir new file mode 100644 index 000000000000..52fee8fff6c1 --- /dev/null +++ b/clang/test/CIR/Lowering/OpenMP/barrier.cir @@ -0,0 +1,15 @@ + +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s + + +module { + cir.func @omp_barrier_1() { + omp.barrier + cir.return + } +} + +// CHECK: define void @omp_barrier_1() +// CHECK: call i32 @__kmpc_global_thread_num(ptr {{.*}}) +// CHECK: call void @__kmpc_barrier(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void diff --git a/clang/test/CIR/Lowering/openmp.cir b/clang/test/CIR/Lowering/OpenMP/parallel.cir similarity index 100% rename from clang/test/CIR/Lowering/openmp.cir rename to clang/test/CIR/Lowering/OpenMP/parallel.cir diff --git a/clang/test/CIR/Lowering/OpenMP/taskwait.cir b/clang/test/CIR/Lowering/OpenMP/taskwait.cir new file mode 100644 index 000000000000..336bbda4f1bf --- /dev/null +++ b/clang/test/CIR/Lowering/OpenMP/taskwait.cir @@ -0,0 +1,14 @@ +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s + + +module { + cir.func @omp_taskwait_1() { + omp.taskwait + cir.return + } +} + +// CHECK: define void @omp_taskwait_1() +// CHECK: call i32 @__kmpc_global_thread_num(ptr {{.*}}) +// CHECK: call i32 @__kmpc_omp_taskwait(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void \ No newline at end of file diff --git a/clang/test/CIR/Lowering/OpenMP/taskyield.cir b/clang/test/CIR/Lowering/OpenMP/taskyield.cir new file mode 100644 index 000000000000..5104e9c31be1 --- /dev/null +++ b/clang/test/CIR/Lowering/OpenMP/taskyield.cir @@ -0,0 +1,14 @@ +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s + + +module { + cir.func @omp_taskyield_1() { + omp.taskyield + cir.return + } +} + +// CHECK: define void @omp_taskyield_1() +// CHECK: call i32 @__kmpc_global_thread_num(ptr {{.*}}) +// CHECK: call i32 @__kmpc_omp_taskyield(ptr {{.*}}, i32 {{.*}}, i32 {{.*}}) +// CHECK: ret void \ No newline at end of file From 857680ca703e9feeb378952f3933929cd42ab275 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 1 May 2024 16:31:53 -0700 Subject: [PATCH 1539/2301] [CIR][CIRGen][LLVMLowering] Initial support for GNU void* and func ptr arithmetic extensions More generalization coming next. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 6 ++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 36 ++++++++++++++++--- clang/test/CIR/CodeGen/pointer-arith-ext.c | 18 ++++++++++ 3 files changed, 54 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/CodeGen/pointer-arith-ext.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 6f143812858e..b1610292d5e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1203,10 +1203,12 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, // Explicitly handle GNU void* and function pointer arithmetic extensions. The // GNU void* casts amount to no-ops since our void* type is i8*, but this is // future proof. + mlir::Type elemTy; if (elementType->isVoidType() || elementType->isFunctionType()) - llvm_unreachable("GNU void* and func ptr arithmetic extensions are NYI"); + elemTy = CGF.UInt8Ty; + else + elemTy = CGF.convertTypeForMem(elementType); - mlir::Type elemTy = CGF.convertTypeForMem(elementType); if (CGF.getLangOpts().isSignedOverflowDefined()) return CGF.getBuilder().create( CGF.getLoc(op.E->getExprLoc()), pointer.getType(), pointer, index); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 015a2ddf58fe..64c8c871d6f5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -465,10 +465,38 @@ class CIRPtrStrideOpLowering mlir::ConversionPatternRewriter &rewriter) const override { auto *tc = getTypeConverter(); const auto resultTy = tc->convertType(ptrStrideOp.getType()); - const auto elementTy = tc->convertType(ptrStrideOp.getElementTy()); - rewriter.replaceOpWithNewOp(ptrStrideOp, resultTy, - elementTy, adaptor.getBase(), - adaptor.getStride()); + auto elementTy = tc->convertType(ptrStrideOp.getElementTy()); + auto ctx = elementTy.getContext(); + + // void doesn't really have a layout to use in GEPs, make it i8 instead. + bool isVoid = false; + if (elementTy.isa()) { + elementTy = mlir::IntegerType::get(elementTy.getContext(), 8, + mlir::IntegerType::Signless); + isVoid = true; + } + + // Zero-extend or sign-extend the pointer value according to + // whether the index is signed or not. + // FIXME: generalize this logic when element type isn't void. + auto index = adaptor.getStride(); + auto width = index.getType().cast().getWidth(); + mlir::DataLayout LLVMLayout( + index.getDefiningOp()->getParentOfType()); + auto layoutWidth = + LLVMLayout.getTypeIndexBitwidth(adaptor.getBase().getType()); + if (isVoid && layoutWidth && width < *layoutWidth) { + auto llvmDstType = mlir::IntegerType::get(ctx, *layoutWidth); + if (ptrStrideOp.getStride().getType().isUnsigned()) + index = rewriter.create(ptrStrideOp.getLoc(), + llvmDstType, index); + else + index = rewriter.create(ptrStrideOp.getLoc(), + llvmDstType, index); + } + + rewriter.replaceOpWithNewOp( + ptrStrideOp, resultTy, elementTy, adaptor.getBase(), index); return mlir::success(); } }; diff --git a/clang/test/CIR/CodeGen/pointer-arith-ext.c b/clang/test/CIR/CodeGen/pointer-arith-ext.c new file mode 100644 index 000000000000..51fa4576ee09 --- /dev/null +++ b/clang/test/CIR/CodeGen/pointer-arith-ext.c @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-int-conversions -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-int-conversions -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// GNU extensions +typedef void (*FP)(void); +void *f2(void *a, int b) { return a + b; } +// CIR-LABEL: f2 +// CIR: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// CIR: %[[STRIDE:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CIR: cir.ptr_stride(%[[PTR]] : !cir.ptr, %[[STRIDE]] : !s32i) + +// LLVM-LABEL: f2 +// LLVM: %[[PTR:.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: %[[TOEXT:.*]] = load i32, ptr {{.*}}, align 4 +// LLVM: %[[STRIDE:.*]] = sext i32 %[[TOEXT]] to i64 +// LLVM: getelementptr i8, ptr %[[PTR]], i64 %[[STRIDE]] \ No newline at end of file From 4e3be386893b9e8bab1806e6c260d322d29a576e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 1 May 2024 19:24:20 -0700 Subject: [PATCH 1540/2301] [CIR][LowerToLLVM][NFC] Refactor int cast creation --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 72 +++++++++---------- clang/test/CIR/Lowering/bitint.cir | 10 +-- 2 files changed, 37 insertions(+), 45 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 64c8c871d6f5..d5304839298d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -455,6 +455,25 @@ class CIRMemCpyOpLowering } }; +static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter, + mlir::Value llvmSrc, + mlir::IntegerType llvmDstIntTy, + bool isUnsigned, uint64_t cirDstIntWidth) { + auto cirSrcWidth = llvmSrc.getType().cast().getWidth(); + if (cirSrcWidth == cirDstIntWidth) + return llvmSrc; + + auto loc = llvmSrc.getLoc(); + if (cirSrcWidth < cirDstIntWidth) { + if (isUnsigned) + return rewriter.create(loc, llvmDstIntTy, llvmSrc); + return rewriter.create(loc, llvmDstIntTy, llvmSrc); + } + + // Otherwise truncate + return rewriter.create(loc, llvmDstIntTy, llvmSrc); +} + class CIRPtrStrideOpLowering : public mlir::OpConversionPattern { public: @@ -487,12 +506,9 @@ class CIRPtrStrideOpLowering LLVMLayout.getTypeIndexBitwidth(adaptor.getBase().getType()); if (isVoid && layoutWidth && width < *layoutWidth) { auto llvmDstType = mlir::IntegerType::get(ctx, *layoutWidth); - if (ptrStrideOp.getStride().getType().isUnsigned()) - index = rewriter.create(ptrStrideOp.getLoc(), - llvmDstType, index); - else - index = rewriter.create(ptrStrideOp.getLoc(), - llvmDstType, index); + index = getLLVMIntCast(rewriter, index, llvmDstType, + ptrStrideOp.getStride().getType().isUnsigned(), + *layoutWidth); } rewriter.replaceOpWithNewOp( @@ -579,23 +595,11 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { elementTypeIfVector(srcType).cast(); mlir::cir::IntType dstIntType = elementTypeIfVector(dstType).cast(); - - if (dstIntType.getWidth() < srcIntType.getWidth()) { - // Bigger to smaller. Truncate. - rewriter.replaceOpWithNewOp(castOp, llvmDstType, - llvmSrcVal); - } else if (dstIntType.getWidth() > srcIntType.getWidth()) { - // Smaller to bigger. Zero extend or sign extend based on signedness. - if (srcIntType.isUnsigned()) - rewriter.replaceOpWithNewOp(castOp, llvmDstType, - llvmSrcVal); - else - rewriter.replaceOpWithNewOp(castOp, llvmDstType, - llvmSrcVal); - } else { - // Same size. Signedness changes doesn't matter to LLVM. Do nothing. - rewriter.replaceOp(castOp, llvmSrcVal); - } + rewriter.replaceOp(castOp, + getLLVMIntCast(rewriter, llvmSrcVal, + llvmDstType.cast(), + srcIntType.isUnsigned(), + dstIntType.getWidth())); break; } case mlir::cir::CastKind::floating: { @@ -1942,7 +1946,6 @@ class CIRShiftOpLowering auto cirAmtTy = op.getAmount().getType().dyn_cast(); auto cirValTy = op.getValue().getType().dyn_cast(); auto llvmTy = getTypeConverter()->convertType(op.getType()); - auto loc = op.getLoc(); mlir::Value amt = adaptor.getAmount(); mlir::Value val = adaptor.getValue(); @@ -1951,14 +1954,8 @@ class CIRShiftOpLowering // Ensure shift amount is the same type as the value. Some undefined // behavior might occur in the casts below as per [C99 6.5.7.3]. - if (cirAmtTy.getWidth() > cirValTy.getWidth()) { - amt = rewriter.create(loc, llvmTy, amt); - } else if (cirAmtTy.getWidth() < cirValTy.getWidth()) { - if (cirAmtTy.isSigned()) - amt = rewriter.create(loc, llvmTy, amt); - else - amt = rewriter.create(loc, llvmTy, amt); - } + amt = getLLVMIntCast(rewriter, amt, llvmTy.cast(), + !cirAmtTy.isSigned(), cirValTy.getWidth()); // Lower to the proper LLVM shift operation. if (op.getIsShiftleft()) @@ -2061,14 +2058,9 @@ static mlir::Value createLLVMBitOp(mlir::Location loc, operand.getType(), operand); } - mlir::Value result = op->getResult(0); - if (operandIntTy.getWidth() > resultIntTy.getWidth()) { - result = rewriter.create(loc, resultTy, result); - } else if (operandIntTy.getWidth() < resultIntTy.getWidth()) { - result = rewriter.create(loc, resultTy, result); - } - - return result; + return getLLVMIntCast(rewriter, op->getResult(0), + resultTy.cast(), + /*isUnsigned=*/true, resultIntTy.getWidth()); } class CIRBitClrsbOpLowering diff --git a/clang/test/CIR/Lowering/bitint.cir b/clang/test/CIR/Lowering/bitint.cir index f89278b5faf7..b1c9d031b7cc 100644 --- a/clang/test/CIR/Lowering/bitint.cir +++ b/clang/test/CIR/Lowering/bitint.cir @@ -22,9 +22,9 @@ module { // MLIR-NEXT: } // LLVM: define i2 @ParamPassing(i15 %0, i31 %1) !dbg !3 { -// LLVM-NEXT: %3 = sext i15 %0 to i32, !dbg !6 -// LLVM-NEXT: %4 = sext i31 %1 to i32, !dbg !7 -// LLVM-NEXT: %5 = add i32 %3, %4, !dbg !8 -// LLVM-NEXT: %6 = trunc i32 %5 to i2, !dbg !9 -// LLVM-NEXT: ret i2 %6, !dbg !10 +// LLVM-NEXT: %3 = sext i15 %0 to i32 +// LLVM-NEXT: %4 = sext i31 %1 to i32 +// LLVM-NEXT: %5 = add i32 %3, %4 +// LLVM-NEXT: %6 = trunc i32 %5 to i2 +// LLVM-NEXT: ret i2 %6 // LLVM-NEXT: } From d2738747ca7334683e8d64dc5386af8b284fe52f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 1 May 2024 20:14:43 -0700 Subject: [PATCH 1541/2301] [CIR][LowerToLLVM] Generalize ptr_stride proper int casts --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 8 +- clang/test/CIR/Lowering/dot.cir | 122 +++++++++--------- clang/test/CIR/Lowering/globals.cir | 10 +- clang/test/CIR/Lowering/ptrstride.cir | 26 ++-- 4 files changed, 81 insertions(+), 85 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d5304839298d..19f0d36cb350 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -488,23 +488,19 @@ class CIRPtrStrideOpLowering auto ctx = elementTy.getContext(); // void doesn't really have a layout to use in GEPs, make it i8 instead. - bool isVoid = false; - if (elementTy.isa()) { + if (elementTy.isa()) elementTy = mlir::IntegerType::get(elementTy.getContext(), 8, mlir::IntegerType::Signless); - isVoid = true; - } // Zero-extend or sign-extend the pointer value according to // whether the index is signed or not. - // FIXME: generalize this logic when element type isn't void. auto index = adaptor.getStride(); auto width = index.getType().cast().getWidth(); mlir::DataLayout LLVMLayout( index.getDefiningOp()->getParentOfType()); auto layoutWidth = LLVMLayout.getTypeIndexBitwidth(adaptor.getBase().getType()); - if (isVoid && layoutWidth && width < *layoutWidth) { + if (layoutWidth && width != *layoutWidth) { auto llvmDstType = mlir::IntegerType::get(ctx, *layoutWidth); index = getLLVMIntCast(rewriter, index, llvmDstType, ptrStrideOp.getStride().getType().isUnsigned(), diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index c43e52a971a8..37958b0bfea3 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -52,64 +52,64 @@ module { } } -// MLIR: module { -// MLIR-NEXT: llvm.func @dot(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: i32) -> f64 -// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 -// MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: %2 = llvm.mlir.constant(1 : index) : i64 -// MLIR-NEXT: %3 = llvm.alloca %2 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: %4 = llvm.mlir.constant(1 : index) : i64 -// MLIR-NEXT: %5 = llvm.alloca %4 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: %6 = llvm.mlir.constant(1 : index) : i64 -// MLIR-NEXT: %7 = llvm.alloca %6 x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: %8 = llvm.mlir.constant(1 : index) : i64 -// MLIR-NEXT: %9 = llvm.alloca %8 x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: llvm.store %arg0, %1 : !llvm.ptr, !llvm.ptr -// MLIR-NEXT: llvm.store %arg1, %3 : !llvm.ptr, !llvm.ptr -// MLIR-NEXT: llvm.store %arg2, %5 : i32, !llvm.ptr -// MLIR-NEXT: %10 = llvm.mlir.constant(0.000000e+00 : f64) : f64 -// MLIR-NEXT: llvm.store %10, %9 : f64, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb1 -// MLIR-NEXT: ^bb1: // pred: ^bb0 -// MLIR-NEXT: %11 = llvm.mlir.constant(1 : index) : i64 -// MLIR-NEXT: %12 = llvm.alloca %11 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: %13 = llvm.mlir.constant(0 : i32) : i32 -// MLIR-NEXT: llvm.store %13, %12 : i32, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb2 -// MLIR-NEXT: ^bb2: // 2 preds: ^bb1, ^bb4 -// MLIR-NEXT: %14 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %15 = llvm.load %5 : !llvm.ptr -> i32 -// MLIR-NEXT: %16 = llvm.icmp "slt" %14, %15 : i32 -// MLIR-NEXT: %17 = llvm.zext %16 : i1 to i32 -// MLIR-NEXT: %18 = llvm.mlir.constant(0 : i32) : i32 -// MLIR-NEXT: %19 = llvm.icmp "ne" %17, %18 : i32 -// MLIR-NEXT: llvm.cond_br %19, ^bb3, ^bb5 -// MLIR-NEXT: ^bb3: // pred: ^bb2 -// MLIR-NEXT: %20 = llvm.load %1 : !llvm.ptr -> !llvm.ptr -// MLIR-NEXT: %21 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %22 = llvm.getelementptr %20[%21] : (!llvm.ptr, i32) -> !llvm.ptr, f64 -// MLIR-NEXT: %23 = llvm.load %22 : !llvm.ptr -> f64 -// MLIR-NEXT: %24 = llvm.load %3 : !llvm.ptr -> !llvm.ptr -// MLIR-NEXT: %25 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %26 = llvm.getelementptr %24[%25] : (!llvm.ptr, i32) -> !llvm.ptr, f64 -// MLIR-NEXT: %27 = llvm.load %26 : !llvm.ptr -> f64 -// MLIR-NEXT: %28 = llvm.fmul %23, %27 : f64 -// MLIR-NEXT: %29 = llvm.load %9 : !llvm.ptr -> f64 -// MLIR-NEXT: %30 = llvm.fadd %29, %28 : f64 -// MLIR-NEXT: llvm.store %30, %9 : f64, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb4 -// MLIR-NEXT: ^bb4: // pred: ^bb3 -// MLIR-NEXT: %31 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %32 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: %33 = llvm.add %31, %32 : i32 -// MLIR-NEXT: llvm.store %33, %12 : i32, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb2 -// MLIR-NEXT: ^bb5: // pred: ^bb2 -// MLIR-NEXT: llvm.br ^bb6 -// MLIR-NEXT: ^bb6: // pred: ^bb5 -// MLIR-NEXT: %34 = llvm.load %9 : !llvm.ptr -> f64 -// MLIR-NEXT: llvm.store %34, %7 : f64, !llvm.ptr -// MLIR-NEXT: %35 = llvm.load %7 : !llvm.ptr -> f64 -// MLIR-NEXT: llvm.return %35 : f64 -// MLIR-NEXT: } -// MLIR-NEXT: } +// MLIR-LABEL: llvm.func @dot( +// MLIR: %[[VAL_3:.*]] = llvm.mlir.constant(1 : index) : i64 +// MLIR: %[[VAL_4:.*]] = llvm.alloca %[[VAL_3]] x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR: %[[VAL_5:.*]] = llvm.mlir.constant(1 : index) : i64 +// MLIR: %[[VAL_6:.*]] = llvm.alloca %[[VAL_5]] x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR: %[[VAL_7:.*]] = llvm.mlir.constant(1 : index) : i64 +// MLIR: %[[VAL_8:.*]] = llvm.alloca %[[VAL_7]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR: %[[VAL_9:.*]] = llvm.mlir.constant(1 : index) : i64 +// MLIR: %[[VAL_10:.*]] = llvm.alloca %[[VAL_9]] x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR: %[[VAL_11:.*]] = llvm.mlir.constant(1 : index) : i64 +// MLIR: %[[VAL_12:.*]] = llvm.alloca %[[VAL_11]] x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR: llvm.store {{.*}}, %[[VAL_4]] : !llvm.ptr, !llvm.ptr +// MLIR: llvm.store {{.*}}, %[[VAL_6]] : !llvm.ptr, !llvm.ptr +// MLIR: llvm.store {{.*}}, %[[VAL_8]] : i32, !llvm.ptr +// MLIR: %[[VAL_13:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 +// MLIR: llvm.store %[[VAL_13]], %[[VAL_12]] : f64, !llvm.ptr +// MLIR: llvm.br ^bb1 +// MLIR: ^bb1: +// MLIR: %[[VAL_14:.*]] = llvm.mlir.constant(1 : index) : i64 +// MLIR: %[[VAL_15:.*]] = llvm.alloca %[[VAL_14]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR: %[[VAL_16:.*]] = llvm.mlir.constant(0 : i32) : i32 +// MLIR: llvm.store %[[VAL_16]], %[[VAL_15]] : i32, !llvm.ptr +// MLIR: llvm.br ^bb2 +// MLIR: ^bb2: +// MLIR: %[[VAL_17:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i32 +// MLIR: %[[VAL_18:.*]] = llvm.load %[[VAL_8]] : !llvm.ptr -> i32 +// MLIR: %[[VAL_19:.*]] = llvm.icmp "slt" %[[VAL_17]], %[[VAL_18]] : i32 +// MLIR: %[[VAL_20:.*]] = llvm.zext %[[VAL_19]] : i1 to i32 +// MLIR: %[[VAL_21:.*]] = llvm.mlir.constant(0 : i32) : i32 +// MLIR: %[[VAL_22:.*]] = llvm.icmp "ne" %[[VAL_20]], %[[VAL_21]] : i32 +// MLIR: llvm.cond_br %[[VAL_22]], ^bb3, ^bb5 +// MLIR: ^bb3: +// MLIR: %[[VAL_23:.*]] = llvm.load %[[VAL_4]] : !llvm.ptr -> !llvm.ptr +// MLIR: %[[VAL_24:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i32 +// MLIR: %[[VAL_25:.*]] = llvm.sext %[[VAL_24]] : i32 to i64 +// MLIR: %[[VAL_26:.*]] = llvm.getelementptr %[[VAL_23]]{{\[}}%[[VAL_25]]] : (!llvm.ptr, i64) -> !llvm.ptr, f64 +// MLIR: %[[VAL_27:.*]] = llvm.load %[[VAL_26]] : !llvm.ptr -> f64 +// MLIR: %[[VAL_28:.*]] = llvm.load %[[VAL_6]] : !llvm.ptr -> !llvm.ptr +// MLIR: %[[VAL_29:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i32 +// MLIR: %[[VAL_30:.*]] = llvm.sext %[[VAL_29]] : i32 to i64 +// MLIR: %[[VAL_31:.*]] = llvm.getelementptr %[[VAL_28]]{{\[}}%[[VAL_30]]] : (!llvm.ptr, i64) -> !llvm.ptr, f64 +// MLIR: %[[VAL_32:.*]] = llvm.load %[[VAL_31]] : !llvm.ptr -> f64 +// MLIR: %[[VAL_33:.*]] = llvm.fmul %[[VAL_27]], %[[VAL_32]] : f64 +// MLIR: %[[VAL_34:.*]] = llvm.load %[[VAL_12]] : !llvm.ptr -> f64 +// MLIR: %[[VAL_35:.*]] = llvm.fadd %[[VAL_34]], %[[VAL_33]] : f64 +// MLIR: llvm.store %[[VAL_35]], %[[VAL_12]] : f64, !llvm.ptr +// MLIR: llvm.br ^bb4 +// MLIR: ^bb4: +// MLIR: %[[VAL_36:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i32 +// MLIR: %[[VAL_37:.*]] = llvm.mlir.constant(1 : i32) : i32 +// MLIR: %[[VAL_38:.*]] = llvm.add %[[VAL_36]], %[[VAL_37]] : i32 +// MLIR: llvm.store %[[VAL_38]], %[[VAL_15]] : i32, !llvm.ptr +// MLIR: llvm.br ^bb2 +// MLIR: ^bb5: +// MLIR: llvm.br ^bb6 +// MLIR: ^bb6: +// MLIR: %[[VAL_39:.*]] = llvm.load %[[VAL_12]] : !llvm.ptr -> f64 +// MLIR: llvm.store %[[VAL_39]], %[[VAL_10]] : f64, !llvm.ptr +// MLIR: %[[VAL_40:.*]] = llvm.load %[[VAL_10]] : !llvm.ptr -> f64 +// MLIR: llvm.return %[[VAL_40]] : f64 +// MLIR: } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 594015c77467..3bfbd8b21846 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -1,5 +1,5 @@ -// RUN: cir-opt %s -cir-to-llvm -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: cir-translate %s -cir-to-llvmir -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM @@ -170,10 +170,12 @@ module { cir.call %7(%8) : (!cir.ptr>, !s32i) -> () cir.return } + //MLIR-LABEL: @foo //MLIR: %[[RES4:.*]] = llvm.mlir.addressof @Handlers : !llvm.ptr - //MLIR: %[[RES5:.*]] = llvm.load {{.*}} : !llvm.ptr -> i32 + //MLIR: %[[LOAD:.*]] = llvm.load {{.*}} : !llvm.ptr -> i32 //MLIR: %[[RES6:.*]] = llvm.getelementptr %[[RES4]][0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> - //MLIR: %[[RES7:.*]] = llvm.getelementptr %[[RES6]][%[[RES5]]] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> + //MLIR: %[[RES5:.*]] = llvm.sext %[[LOAD]] : i32 to i64 + //MLIR: %[[RES7:.*]] = llvm.getelementptr %[[RES6]][%[[RES5]]] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> //MLIR: %[[RES8:.*]] = llvm.getelementptr %[[RES7]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> //MLIR: %[[RES9:.*]] = llvm.load %[[RES8]] : !llvm.ptr -> !llvm.ptr //MLIR: llvm.call %[[RES9]]({{.*}}) : !llvm.ptr, (i32) -> () diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index 1262f1d815bd..2516791d68a7 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -1,5 +1,5 @@ -// RUN: cir-opt %s -cir-to-llvm -o %t.cir -// RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir -check-prefix=MLIR !s32i = !cir.int module { @@ -14,15 +14,13 @@ module { } } -// MLIR: module { -// MLIR-NEXT: llvm.func @f(%arg0: !llvm.ptr) -// MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 -// MLIR-NEXT: %1 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: llvm.store %arg0, %1 : !llvm.ptr, !llvm.ptr -// MLIR-NEXT: %2 = llvm.load %1 : !llvm.ptr -> !llvm.ptr -// MLIR-NEXT: %3 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: %4 = llvm.getelementptr %2[%3] : (!llvm.ptr, i32) -> !llvm.ptr, i32 -// MLIR-NEXT: %5 = llvm.load %4 : !llvm.ptr -> i32 -// MLIR-NEXT: llvm.return -// MLIR-NEXT: } -// MLIR-NEXT: } +// MLIR-LABEL: @f +// MLIR: %[[VAL_1:.*]] = llvm.mlir.constant(1 : index) : i64 +// MLIR: %[[VAL_2:.*]] = llvm.alloca %[[VAL_1]] x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr +// MLIR: llvm.store {{.*}}, %[[VAL_2]] : !llvm.ptr, !llvm.ptr +// MLIR: %[[VAL_3:.*]] = llvm.load %[[VAL_2]] : !llvm.ptr -> !llvm.ptr +// MLIR: %[[VAL_4:.*]] = llvm.mlir.constant(1 : i32) : i32 +// MLIR: %[[VAL_5:.*]] = llvm.sext %[[VAL_4]] : i32 to i64 +// MLIR: %[[VAL_6:.*]] = llvm.getelementptr %[[VAL_3]]{{\[}}%[[VAL_5]]] : (!llvm.ptr, i64) -> !llvm.ptr, i32 +// MLIR: %[[VAL_7:.*]] = llvm.load %[[VAL_6]] : !llvm.ptr -> i32 +// MLIR: llvm.return From f2f6a4eb57cb1f406782b452f21ab64a576039d4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 2 May 2024 16:33:38 -0700 Subject: [PATCH 1542/2301] [CIR][CIRGen][LowerToLLVM] Fix LLVM lowering for ptr arith extensions --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 8 ++---- .../CodeGen/UnimplementedFeatureGuarding.h | 3 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 24 +++++++++++++++-- clang/test/CIR/CodeGen/pointer-arith-ext.c | 27 ++++++++++++++++++- 4 files changed, 50 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index b1610292d5e0..22c23c3347f4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1156,12 +1156,8 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, CGF.getContext(), op.Opcode, expr->getLHS(), expr->getRHS())) llvm_unreachable("null pointer arithmetic extension is NYI"); - if (UnimplementedFeature::dataLayoutGetIndexTypeSizeInBits()) { - // TODO(cir): original codegen zero/sign-extends the index to the same width - // as the pointer. Since CIR's pointer stride doesn't care about that, it's - // skiped here. - llvm_unreachable("target-specific pointer width is NYI"); - } + // Differently from LLVM codegen, ABI bits for index sizes is handled during + // LLVM lowering. // If this is subtraction, negate the index. if (isSubtraction) diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index bde8defb1147..c2077f141821 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -80,9 +80,6 @@ struct UnimplementedFeature { static bool buildDerivedToBaseCastForDevirt() { return false; } static bool emitFunctionEpilog() { return false; } - // Data layout - static bool dataLayoutGetIndexTypeSizeInBits() { return false; } - // References related stuff static bool ARC() { return false; } // Automatic reference counting diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 19f0d36cb350..eb248a557f74 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -492,8 +492,7 @@ class CIRPtrStrideOpLowering elementTy = mlir::IntegerType::get(elementTy.getContext(), 8, mlir::IntegerType::Signless); - // Zero-extend or sign-extend the pointer value according to - // whether the index is signed or not. + // Zero-extend, sign-extend or trunc the pointer value. auto index = adaptor.getStride(); auto width = index.getType().cast().getWidth(); mlir::DataLayout LLVMLayout( @@ -501,10 +500,31 @@ class CIRPtrStrideOpLowering auto layoutWidth = LLVMLayout.getTypeIndexBitwidth(adaptor.getBase().getType()); if (layoutWidth && width != *layoutWidth) { + // If the index comes from a subtraction, make sure the extension happens + // before it. To achieve that, look at unary minus, which already got + // lowered to "sub 0, x". + auto sub = dyn_cast(index.getDefiningOp()); + auto unary = + dyn_cast(ptrStrideOp.getStride().getDefiningOp()); + if (unary && unary.getKind() == mlir::cir::UnaryOpKind::Minus && sub) + index = index.getDefiningOp()->getOperand(1); + + // Handle the cast auto llvmDstType = mlir::IntegerType::get(ctx, *layoutWidth); index = getLLVMIntCast(rewriter, index, llvmDstType, ptrStrideOp.getStride().getType().isUnsigned(), *layoutWidth); + + // Rewrite the sub in front of extensions/trunc + if (sub) { + index = rewriter.create( + index.getLoc(), index.getType(), + rewriter.create( + index.getLoc(), index.getType(), + mlir::IntegerAttr::get(index.getType(), 0)), + index); + sub->erase(); + } } rewriter.replaceOpWithNewOp( diff --git a/clang/test/CIR/CodeGen/pointer-arith-ext.c b/clang/test/CIR/CodeGen/pointer-arith-ext.c index 51fa4576ee09..11e86c1b4059 100644 --- a/clang/test/CIR/CodeGen/pointer-arith-ext.c +++ b/clang/test/CIR/CodeGen/pointer-arith-ext.c @@ -15,4 +15,29 @@ void *f2(void *a, int b) { return a + b; } // LLVM: %[[PTR:.*]] = load ptr, ptr {{.*}}, align 8 // LLVM: %[[TOEXT:.*]] = load i32, ptr {{.*}}, align 4 // LLVM: %[[STRIDE:.*]] = sext i32 %[[TOEXT]] to i64 -// LLVM: getelementptr i8, ptr %[[PTR]], i64 %[[STRIDE]] \ No newline at end of file +// LLVM: getelementptr i8, ptr %[[PTR]], i64 %[[STRIDE]] + +// These test the same paths above, just make sure it does not crash. +void *f2_0(void *a, int b) { return &a[b]; } +void *f2_1(void *a, int b) { return (a += b); } +void *f3(int a, void *b) { return a + b; } + +// FIXME: currently crashes +// void *f3_1(int a, void *b) { return (a += b); } + +void *f4(void *a, int b) { return a - b; } +// CIR-LABEL: f4 +// CIR: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// CIR: %[[STRIDE:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CIR: %[[SUB:.*]] = cir.unary(minus, %[[STRIDE]]) : !s32i, !s32i +// CIR: cir.ptr_stride(%[[PTR]] : !cir.ptr, %[[SUB]] : !s32i) + +// LLVM-LABEL: f4 +// LLVM: %[[PTR:.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: %[[TOEXT:.*]] = load i32, ptr {{.*}}, align 4 +// LLVM: %[[STRIDE:.*]] = sext i32 %[[TOEXT]] to i64 +// LLVM: %[[SUB:.*]] = sub i64 0, %[[STRIDE]] +// LLVM: getelementptr i8, ptr %[[PTR]], i64 %[[SUB]] + +// Same as f4, just make sure it does not crash. +void *f4_1(void *a, int b) { return (a -= b); } \ No newline at end of file From ab965b17afb6841434ee6cd1983dbc06cae9ada8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 2 May 2024 17:31:42 -0700 Subject: [PATCH 1543/2301] [CIR][CIRGen] One more variation of ptr arith extensions --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 15 +++++++++++--- clang/test/CIR/CodeGen/pointer-arith-ext.c | 24 ++++++++++++++++++---- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 22c23c3347f4..72330d214812 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -979,9 +979,18 @@ class ScalarExprEmitter : public StmtVisitor { return Src; } - assert(!SrcTy.isa<::mlir::cir::PointerType>() && - !DstTy.isa<::mlir::cir::PointerType>() && - "Internal error: pointer conversions are handled elsewhere"); + // Handle pointer conversions next: pointers can only be converted to/from + // other pointers and integers. Check for pointer types in terms of LLVM, as + // some native types (like Obj-C id) may map to a pointer type. + if (auto DstPT = dyn_cast(DstTy)) { + llvm_unreachable("NYI"); + } + + if (isa(SrcTy)) { + // Must be an ptr to int cast. + assert(isa(DstTy) && "not ptr->int?"); + return Builder.createPtrToInt(Src, DstTy); + } // A scalar can be splatted to an extended vector of the same element type if (DstType->isExtVectorType() && !SrcType->isVectorType()) { diff --git a/clang/test/CIR/CodeGen/pointer-arith-ext.c b/clang/test/CIR/CodeGen/pointer-arith-ext.c index 11e86c1b4059..a580c7d49a06 100644 --- a/clang/test/CIR/CodeGen/pointer-arith-ext.c +++ b/clang/test/CIR/CodeGen/pointer-arith-ext.c @@ -22,8 +22,14 @@ void *f2_0(void *a, int b) { return &a[b]; } void *f2_1(void *a, int b) { return (a += b); } void *f3(int a, void *b) { return a + b; } -// FIXME: currently crashes -// void *f3_1(int a, void *b) { return (a += b); } +void *f3_1(int a, void *b) { return (a += b); } +// CIR-LABEL: @f3_1 +// CIR: %[[NEW_PTR:.*]] = cir.ptr_stride +// CIR: cir.cast(ptr_to_int, %[[NEW_PTR]] : !cir.ptr), !s32i + +// LLVM-LABEL: @f3_1 +// LLVM: %[[NEW_PTR:.*]] = getelementptr +// LLVM: ptrtoint ptr %[[NEW_PTR]] to i32 void *f4(void *a, int b) { return a - b; } // CIR-LABEL: f4 @@ -39,5 +45,15 @@ void *f4(void *a, int b) { return a - b; } // LLVM: %[[SUB:.*]] = sub i64 0, %[[STRIDE]] // LLVM: getelementptr i8, ptr %[[PTR]], i64 %[[SUB]] -// Same as f4, just make sure it does not crash. -void *f4_1(void *a, int b) { return (a -= b); } \ No newline at end of file +// Similar to f4, just make sure it does not crash. +void *f4_1(void *a, int b) { return (a -= b); } + +// FIXME: add support for the remaining ones. +// FP f5(FP a, int b) { return a + b; } +// FP f5_1(FP a, int b) { return (a += b); } +// FP f6(int a, FP b) { return a + b; } +// FP f6_1(int a, FP b) { return (a += b); } +// FP f7(FP a, int b) { return a - b; } +// FP f7_1(FP a, int b) { return (a -= b); } +// void f8(void *a, int b) { return *(a + b); } +// void f8_1(void *a, int b) { return a[b]; } \ No newline at end of file From d502c12b7f13b22aa50c0eb522b372303569e0eb Mon Sep 17 00:00:00 2001 From: ShivaChen <32083954+ShivaChen@users.noreply.github.com> Date: Sat, 4 May 2024 01:34:50 +0800 Subject: [PATCH 1544/2301] [CIR][ThroughMLIR] Support lowering CastOp to arith (#577) This commit introduce CIRCastOpLowering for lowering to arith. --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 132 +++++++++++++++- clang/test/CIR/Lowering/ThroughMLIR/cast.cir | 147 ++++++++++++++++++ 2 files changed, 278 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/cast.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index fcfd572b8480..392f404b497e 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -709,6 +709,135 @@ class CIRGetGlobalOpLowering } }; +static mlir::Value createIntCast(mlir::ConversionPatternRewriter &rewriter, + mlir::Value src, mlir::Type dstTy, + bool isSigned = false) { + auto srcTy = src.getType(); + assert(isa(srcTy)); + assert(isa(dstTy)); + + auto srcWidth = srcTy.cast().getWidth(); + auto dstWidth = dstTy.cast().getWidth(); + auto loc = src.getLoc(); + + if (dstWidth > srcWidth && isSigned) + return rewriter.create(loc, dstTy, src); + else if (dstWidth > srcWidth) + return rewriter.create(loc, dstTy, src); + else if (dstWidth < srcWidth) + return rewriter.create(loc, dstTy, src); + else + return rewriter.create(loc, dstTy, src); +} + +class CIRCastOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + inline mlir::Type convertTy(mlir::Type ty) const { + return getTypeConverter()->convertType(ty); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CastOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + if (isa(op.getSrc().getType())) + llvm_unreachable("CastOp lowering for vector type is not supported yet"); + auto src = adaptor.getSrc(); + auto dstType = op.getResult().getType(); + using CIR = mlir::cir::CastKind; + switch (op.getKind()) { + case CIR::int_to_bool: { + auto zero = rewriter.create( + src.getLoc(), op.getSrc().getType(), + mlir::cir::IntAttr::get(op.getSrc().getType(), 0)); + rewriter.replaceOpWithNewOp( + op, mlir::cir::BoolType::get(getContext()), mlir::cir::CmpOpKind::ne, + op.getSrc(), zero); + return mlir::success(); + } + case CIR::integral: { + auto newDstType = convertTy(dstType); + auto srcType = op.getSrc().getType(); + mlir::cir::IntType srcIntType = srcType.cast(); + auto newOp = + createIntCast(rewriter, src, newDstType, srcIntType.isSigned()); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } + case CIR::floating: { + auto newDstType = convertTy(dstType); + auto srcTy = op.getSrc().getType(); + auto dstTy = op.getResult().getType(); + + if (!dstTy.isa() || + !srcTy.isa()) + return op.emitError() << "NYI cast from " << srcTy << " to " << dstTy; + + auto getFloatWidth = [](mlir::Type ty) -> unsigned { + return ty.cast().getWidth(); + }; + + if (getFloatWidth(srcTy) > getFloatWidth(dstTy)) + rewriter.replaceOpWithNewOp(op, newDstType, src); + else + rewriter.replaceOpWithNewOp(op, newDstType, src); + return mlir::success(); + } + case CIR::float_to_bool: { + auto dstTy = op.getType().cast(); + auto newDstType = convertTy(dstTy); + auto kind = mlir::arith::CmpFPredicate::UNE; + + // Check if float is not equal to zero. + auto zeroFloat = rewriter.create( + op.getLoc(), src.getType(), mlir::FloatAttr::get(src.getType(), 0.0)); + + // Extend comparison result to either bool (C++) or int (C). + mlir::Value cmpResult = rewriter.create( + op.getLoc(), kind, src, zeroFloat); + rewriter.replaceOpWithNewOp(op, newDstType, + cmpResult); + return mlir::success(); + } + case CIR::bool_to_int: { + auto dstTy = op.getType().cast(); + auto newDstType = convertTy(dstTy).cast(); + auto newOp = createIntCast(rewriter, src, newDstType); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } + case CIR::bool_to_float: { + auto dstTy = op.getType(); + auto newDstType = convertTy(dstTy); + rewriter.replaceOpWithNewOp(op, newDstType, src); + return mlir::success(); + } + case CIR::int_to_float: { + auto dstTy = op.getType(); + auto newDstType = convertTy(dstTy); + if (op.getSrc().getType().cast().isSigned()) + rewriter.replaceOpWithNewOp(op, newDstType, src); + else + rewriter.replaceOpWithNewOp(op, newDstType, src); + return mlir::success(); + } + case CIR::float_to_int: { + auto dstTy = op.getType(); + auto newDstType = convertTy(dstTy); + if (op.getResult().getType().cast().isSigned()) + rewriter.replaceOpWithNewOp(op, newDstType, src); + else + rewriter.replaceOpWithNewOp(op, newDstType, src); + return mlir::success(); + } + default: + break; + } + return mlir::failure(); + } +}; + void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -718,7 +847,8 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, CIRStoreOpLowering, CIRAllocaOpLowering, CIRFuncOpLowering, CIRScopeOpLowering, CIRBrCondOpLowering, CIRTernaryOpLowering, CIRYieldOpLowering, CIRCosOpLowering, CIRGlobalOpLowering, - CIRGetGlobalOpLowering>(converter, patterns.getContext()); + CIRGetGlobalOpLowering, CIRCastOpLowering>( + converter, patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cast.cir b/clang/test/CIR/Lowering/ThroughMLIR/cast.cir new file mode 100644 index 000000000000..18452a456880 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/cast.cir @@ -0,0 +1,147 @@ +// RUN: cir-opt %s -cir-to-mlir | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +!s16i = !cir.int +!u32i = !cir.int +!u16i = !cir.int +!u8i = !cir.int +module { + // MLIR-LABEL: func.func @cast_int_to_bool(%arg0: i32) -> i8 + // LLVM-LABEL: define i8 @cast_int_to_bool(i32 %0) + cir.func @cast_int_to_bool(%i : !u32i) -> !cir.bool { + // MLIR-NEXT: %[[ZERO:.*]] = arith.constant 0 : i32 + // MLIR-NEXT: arith.cmpi ne, %arg0, %[[ZERO]] + // LLVM-NEXT: icmp ne i32 %0, 0 + + %1 = cir.cast(int_to_bool, %i : !u32i), !cir.bool + cir.return %1 : !cir.bool + } + // MLIR-LABEL: func.func @cast_integral_trunc(%arg0: i32) -> i16 + // LLVM-LABEL: define i16 @cast_integral_trunc(i32 %0) + cir.func @cast_integral_trunc(%i : !u32i) -> !u16i { + // MLIR-NEXT: arith.trunci %arg0 : i32 to i16 + // LLVM-NEXT: trunc i32 %0 to i16 + + %1 = cir.cast(integral, %i : !u32i), !u16i + cir.return %1 : !u16i + } + // MLIR-LABEL: func.func @cast_integral_extu(%arg0: i16) -> i32 + // LLVM-LABEL: define i32 @cast_integral_extu(i16 %0) + cir.func @cast_integral_extu(%i : !u16i) -> !u32i { + // MLIR-NEXT: arith.extui %arg0 : i16 to i32 + // LLVM-NEXT: zext i16 %0 to i32 + + %1 = cir.cast(integral, %i : !u16i), !u32i + cir.return %1 : !u32i + } + // MLIR-LABEL: func.func @cast_integral_exts(%arg0: i16) -> i32 + // LLVM-LABEL: define i32 @cast_integral_exts(i16 %0) + cir.func @cast_integral_exts(%i : !s16i) -> !s32i { + // MLIR-NEXT: arith.extsi %arg0 : i16 to i32 + // LLVM-NEXT: sext i16 %0 to i32 + + %1 = cir.cast(integral, %i : !s16i), !s32i + cir.return %1 : !s32i + } + // MLIR-LABEL: func.func @cast_integral_same_size(%arg0: i32) -> i32 + // LLVM-LABEL: define i32 @cast_integral_same_size(i32 %0) + cir.func @cast_integral_same_size(%i : !u32i) -> !s32i { + // MLIR-NEXT: %0 = arith.bitcast %arg0 : i32 to i32 + // LLVM-NEXT: ret i32 %0 + + %1 = cir.cast(integral, %i : !u32i), !s32i + cir.return %1 : !s32i + } + // MLIR-LABEL: func.func @cast_floating_trunc(%arg0: f64) -> f32 + // LLVM-LABEL: define float @cast_floating_trunc(double %0) + cir.func @cast_floating_trunc(%d : !cir.double) -> !cir.float { + // MLIR-NEXT: arith.truncf %arg0 : f64 to f32 + // LLVM-NEXT: fptrunc double %0 to float + + %1 = cir.cast(floating, %d : !cir.double), !cir.float + cir.return %1 : !cir.float + } + // MLIR-LABEL: func.func @cast_floating_extf(%arg0: f32) -> f64 + // LLVM-LABEL: define double @cast_floating_extf(float %0) + cir.func @cast_floating_extf(%f : !cir.float) -> !cir.double { + // MLIR-NEXT: arith.extf %arg0 : f32 to f64 + // LLVM-NEXT: fpext float %0 to double + + %1 = cir.cast(floating, %f : !cir.float), !cir.double + cir.return %1 : !cir.double + } + // MLIR-LABEL: func.func @cast_float_to_bool(%arg0: f32) -> i8 + // LLVM-LABEL: define i8 @cast_float_to_bool(float %0) + cir.func @cast_float_to_bool(%f : !cir.float) -> !cir.bool { + // MLIR-NEXT: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 + // MLIR-NEXT: arith.cmpf une, %arg0, %[[ZERO]] : f32 + // LLVM-NEXT: fcmp une float %0, 0.000000e+00 + + %1 = cir.cast(float_to_bool, %f : !cir.float), !cir.bool + cir.return %1 : !cir.bool + } + // MLIR-LABEL: func.func @cast_bool_to_int8(%arg0: i8) -> i8 + // LLVM-LABEL: define i8 @cast_bool_to_int8(i8 %0) + cir.func @cast_bool_to_int8(%b : !cir.bool) -> !u8i { + // MLIR-NEXT: arith.bitcast %arg0 : i8 to i8 + // LLVM-NEXT: ret i8 %0 + + %1 = cir.cast(bool_to_int, %b : !cir.bool), !u8i + cir.return %1 : !u8i + } + // MLIR-LABEL: func.func @cast_bool_to_int(%arg0: i8) -> i32 + // LLVM-LABEL: define i32 @cast_bool_to_int(i8 %0) + cir.func @cast_bool_to_int(%b : !cir.bool) -> !u32i { + // MLIR-NEXT: arith.extui %arg0 : i8 to i32 + // LLVM-NEXT: zext i8 %0 to i32 + + %1 = cir.cast(bool_to_int, %b : !cir.bool), !u32i + cir.return %1 : !u32i + } + // MLIR-LABEL: func.func @cast_bool_to_float(%arg0: i8) -> f32 + // LLVM-LABEL: define float @cast_bool_to_float(i8 %0) + cir.func @cast_bool_to_float(%b : !cir.bool) -> !cir.float { + // MLIR-NEXT: arith.uitofp %arg0 : i8 to f32 + // LLVM-NEXT: uitofp i8 %0 to float + + %1 = cir.cast(bool_to_float, %b : !cir.bool), !cir.float + cir.return %1 : !cir.float + } + // MLIR-LABEL: func.func @cast_signed_int_to_float(%arg0: i32) -> f32 + // LLVM-LABEL: define float @cast_signed_int_to_float(i32 %0) + cir.func @cast_signed_int_to_float(%i : !s32i) -> !cir.float { + // MLIR-NEXT: arith.sitofp %arg0 : i32 to f32 + // LLVM-NEXT: sitofp i32 %0 to float + + %1 = cir.cast(int_to_float, %i : !s32i), !cir.float + cir.return %1 : !cir.float + } + // MLIR-LABEL: func.func @cast_unsigned_int_to_float(%arg0: i32) -> f32 + // LLVM-LABEL: define float @cast_unsigned_int_to_float(i32 %0) + cir.func @cast_unsigned_int_to_float(%i : !u32i) -> !cir.float { + // MLIR-NEXT: arith.uitofp %arg0 : i32 to f32 + // LLVM-NEXT: uitofp i32 %0 to float + + %1 = cir.cast(int_to_float, %i : !u32i), !cir.float + cir.return %1 : !cir.float + } + // MLIR-LABEL: func.func @cast_float_to_int_signed(%arg0: f32) -> i32 + // LLVM-LABEL: define i32 @cast_float_to_int_signed(float %0) + cir.func @cast_float_to_int_signed(%f : !cir.float) -> !s32i { + // MLIR-NEXT: arith.fptosi %arg0 : f32 to i32 + // LLVM-NEXT: fptosi float %0 to i32 + + %1 = cir.cast(float_to_int, %f : !cir.float), !s32i + cir.return %1 : !s32i + } + // MLIR-LABEL: func.func @cast_float_to_int_unsigned(%arg0: f32) -> i32 + // LLVM-LABEL: define i32 @cast_float_to_int_unsigned(float %0) + cir.func @cast_float_to_int_unsigned(%f : !cir.float) -> !u32i { + // MLIR-NEXT: arith.fptoui %arg0 : f32 to i32 + // LLVM-NEXT: fptoui float %0 to i32 + + %1 = cir.cast(float_to_int, %f : !cir.float), !u32i + cir.return %1 : !u32i + } +} From 3c954e89a11903442ef5fe484fd6ba4d3c889555 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 3 May 2024 12:17:27 -0700 Subject: [PATCH 1545/2301] [CIR][LowerToLLVM] Fix ptrdiffs in face of !cir.void --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 24 ++++++++++++------- clang/test/CIR/CodeGen/ptrdiff.c | 18 ++++++++++++++ .../CIR/CodeGen/{ptr_diff.cpp => ptrdiff.cpp} | 0 3 files changed, 34 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/ptrdiff.c rename clang/test/CIR/CodeGen/{ptr_diff.cpp => ptrdiff.cpp} (100%) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index eb248a557f74..25e27cb1a726 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2534,6 +2534,9 @@ class CIRPtrDiffOpLowering uint64_t getTypeSize(mlir::Type type, mlir::Operation &op) const { mlir::DataLayout layout(op.getParentOfType()); + // For LLVM purposes we treat void as u8. + if (isa(type)) + type = mlir::cir::IntType::get(type.getContext(), 8, /*isSigned=*/false); return llvm::divideCeil(layout.getTypeSizeInBits(type), 8); } @@ -2553,16 +2556,21 @@ class CIRPtrDiffOpLowering auto ptrTy = op.getLhs().getType().cast(); auto typeSize = getTypeSize(ptrTy.getPointee(), *op); - auto typeSizeVal = rewriter.create( - op.getLoc(), llvmDstTy, mlir::IntegerAttr::get(llvmDstTy, typeSize)); - if (dstTy.isUnsigned()) - rewriter.replaceOpWithNewOp(op, llvmDstTy, diff, - typeSizeVal); - else - rewriter.replaceOpWithNewOp(op, llvmDstTy, diff, - typeSizeVal); + // Avoid silly division by 1. + auto resultVal = diff.getResult(); + if (typeSize != 1) { + auto typeSizeVal = rewriter.create( + op.getLoc(), llvmDstTy, mlir::IntegerAttr::get(llvmDstTy, typeSize)); + if (dstTy.isUnsigned()) + resultVal = rewriter.create(op.getLoc(), llvmDstTy, + diff, typeSizeVal); + else + resultVal = rewriter.create(op.getLoc(), llvmDstTy, + diff, typeSizeVal); + } + rewriter.replaceOp(op, resultVal); return mlir::success(); } }; diff --git a/clang/test/CIR/CodeGen/ptrdiff.c b/clang/test/CIR/CodeGen/ptrdiff.c new file mode 100644 index 000000000000..1a937d5f4272 --- /dev/null +++ b/clang/test/CIR/CodeGen/ptrdiff.c @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +int addrcmp(const void* a, const void* b) { + // CIR-LABEL: addrcmp + // CIR: %[[R:.*]] = cir.ptr_diff + // CIR: cir.cast(integral, %[[R]] : !s64i), !s32 + + // LLVM-LABEL: addrcmp + // LLVM: %[[PTR_A:.*]] = ptrtoint ptr {{.*}} to i64 + // LLVM: %[[PTR_B:.*]] = ptrtoint ptr {{.*}} to i64 + // LLVM: %[[SUB:.*]] = sub i64 %[[PTR_A]], %[[PTR_B]] + // LLVM-NOT: sdiv + // LLVM: trunc i64 %[[SUB]] to i32 + return *(const void**)a - *(const void**)b; +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/ptr_diff.cpp b/clang/test/CIR/CodeGen/ptrdiff.cpp similarity index 100% rename from clang/test/CIR/CodeGen/ptr_diff.cpp rename to clang/test/CIR/CodeGen/ptrdiff.cpp From 8a265df0c813cffbb40adea96e8f4b8b9344b8af Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Sat, 4 May 2024 00:03:20 +0200 Subject: [PATCH 1546/2301] [CIR][Asm] Remove duplicated lambda & coroutine attributes (#580) Do not print in cir.func definition the 'attr { ... }' with coroutine or lambda attributes since they are already printed before the function name. Otherwise redundancy breaks a future parsing. Sort the attributes to be skipped so it is more obvious to see the list of attributes. Improve the tests to check there are no spurious attributes anymore. --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 20 ++++++++++++++++---- clang/test/CIR/CodeGen/coro-task.cpp | 14 +++++++------- clang/test/CIR/CodeGen/lambda.cpp | 6 +++--- 3 files changed, 26 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index c885d435a2ce..923a013706b6 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2025,6 +2025,9 @@ ::mlir::Region *cir::FuncOp::getCallableRegion() { void cir::FuncOp::print(OpAsmPrinter &p) { p << ' '; + // When adding a specific keyword here, do not forget to omit it in + // printFunctionAttributes below or there will be a syntax error when + // parsing if (getBuiltin()) p << "builtin "; @@ -2058,10 +2061,19 @@ void cir::FuncOp::print(OpAsmPrinter &p) { function_interface_impl::printFunctionAttributes( p, *this, // These are all omitted since they are custom printed already. - {getSymVisibilityAttrName(), getAliaseeAttrName(), - getFunctionTypeAttrName(), getLinkageAttrName(), getBuiltinAttrName(), - getNoProtoAttrName(), getGlobalCtorAttrName(), getGlobalDtorAttrName(), - getExtraAttrsAttrName()}); + { + getAliaseeAttrName(), + getBuiltinAttrName(), + getCoroutineAttrName(), + getExtraAttrsAttrName(), + getFunctionTypeAttrName(), + getGlobalCtorAttrName(), + getGlobalDtorAttrName(), + getLambdaAttrName(), + getLinkageAttrName(), + getNoProtoAttrName(), + getSymVisibilityAttrName(), + }); if (auto aliaseeName = getAliasee()) { p << " alias("; diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index e292806175de..43edbf451464 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -148,7 +148,7 @@ VoidTask silly_task() { co_await std::suspend_always(); } -// CHECK: cir.func coroutine @_Z10silly_taskv() -> ![[VoidTask]] {{.*}} { +// CHECK: cir.func coroutine @_Z10silly_taskv() -> ![[VoidTask]] extra{{.*}}{ // Allocate promise. @@ -274,7 +274,7 @@ folly::coro::Task byRef(const std::string& s) { } // FIXME: this could be less redundant than two allocas + reloads -// CHECK: cir.func coroutine @_Z5byRefRKSt6string(%arg0: !cir.ptr +// CHECK: cir.func coroutine @_Z5byRefRKSt6string(%arg0: !cir.ptr {{.*}}22 extra{{.*}}{ // CHECK: %[[#AllocaParam:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] // CHECK: %[[#AllocaFnUse:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] @@ -291,7 +291,7 @@ folly::coro::Task silly_coro() { // Make sure we properly handle OnFallthrough coro body sub stmt and // check there are not multiple co_returns emitted. -// CHECK: cir.func coroutine @_Z10silly_corov() +// CHECK: cir.func coroutine @_Z10silly_corov() {{.*}}22 extra{{.*}}{ // CHECK: cir.await(init, ready : { // CHECK: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv // CHECK-NOT: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv @@ -303,7 +303,7 @@ folly::coro::Task go1() { co_return co_await task; } -// CHECK: cir.func coroutine @_Z3go1v() +// CHECK: cir.func coroutine @_Z3go1v() {{.*}}22 extra{{.*}}{ // CHECK: %[[#IntTaskAddr:]] = cir.alloca ![[IntTask]], !cir.ptr, ["task", init] // CHECK: cir.await(init, ready : { @@ -338,8 +338,8 @@ folly::coro::Task go1_lambda() { co_return co_await task; } -// CHECK: cir.func coroutine lambda internal private @_ZZ10go1_lambdavENK3$_0clEv -// CHECK: cir.func coroutine @_Z10go1_lambdav() +// CHECK: cir.func coroutine lambda internal private @_ZZ10go1_lambdavENK3$_0clEv{{.*}}22 extra{{.*}}{ +// CHECK: cir.func coroutine @_Z10go1_lambdav() {{.*}}22 extra{{.*}}{ folly::coro::Task go4() { auto* fn = +[](int const& i) -> folly::coro::Task { co_return i; }; @@ -347,7 +347,7 @@ folly::coro::Task go4() { co_return co_await std::move(task); } -// CHECK: cir.func coroutine @_Z3go4v() +// CHECK: cir.func coroutine @_Z3go4v() {{.*}}22 extra{{.*}}{ // CHECK: cir.await(init, ready : { // CHECK: }, suspend : { diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 77a531253fcc..60f7b98f0600 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -9,7 +9,7 @@ void fn() { // CHECK: !ty_22anon2E222 = !cir.struct // CHECK-DAG: module -// CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv +// CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv{{.*}}) extra // CHECK: cir.func @_Z2fnv() // CHECK-NEXT: %0 = cir.alloca !ty_22anon2E222, !cir.ptr, ["a"] @@ -21,7 +21,7 @@ void l0() { a(); } -// CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv( +// CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv({{.*}}) extra // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> @@ -99,7 +99,7 @@ int g3() { } // lambda operator() -// CHECK: cir.func lambda internal private @_ZZ2g3vENK3$_0clERKi +// CHECK: cir.func lambda internal private @_ZZ2g3vENK3$_0clERKi{{.*}}!s32i extra // lambda __invoke() // CHECK: cir.func internal private @_ZZ2g3vEN3$_08__invokeERKi From d9d68bd8d3231582e3f82dd2fe8d845518e39d3c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 3 May 2024 15:49:32 -0700 Subject: [PATCH 1547/2301] [CIR][CIRGen] Add skeleton for AArch64 and x86/x86_64 builtin/instrinsics specific emission Note that this is a bit different than original LLVM codegen because we are splitting down target specific intrinsics to different files. For now only add AArch64 and x86* as examples, more should come when support for more targets happen. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 56 +- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 689 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 37 + clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 + clang/lib/CIR/CodeGen/CMakeLists.txt | 2 + .../CodeGen/UnimplementedFeatureGuarding.h | 6 + 6 files changed, 792 insertions(+), 3 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp create mode 100644 clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 4c943f25178c..1567b78e7ff4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -15,6 +15,7 @@ #include "CIRGenCall.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "TargetInfo.h" #include "UnimplementedFeatureGuarding.h" // TODO(cir): we shouldn't need this but we currently reuse intrinsic IDs for @@ -24,6 +25,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/Basic/Builtins.h" +#include "clang/Basic/TargetBuiltins.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Value.h" @@ -786,8 +788,56 @@ static mlir::Value buildTargetArchBuiltinExpr(CIRGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch) { - llvm_unreachable("NYI"); - return {}; + // When compiling in HipStdPar mode we have to be conservative in rejecting + // target specific features in the FE, and defer the possible error to the + // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is + // referenced by an accelerator executable function, we emit an error. + // Returning nullptr here leads to the builtin being handled in + // EmitStdParUnsupportedBuiltin. + if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice && + Arch != CGF->getTarget().getTriple().getArch()) + return nullptr; + + switch (Arch) { + case llvm::Triple::arm: + case llvm::Triple::armeb: + case llvm::Triple::thumb: + case llvm::Triple::thumbeb: + llvm_unreachable("NYI"); + case llvm::Triple::aarch64: + case llvm::Triple::aarch64_32: + case llvm::Triple::aarch64_be: + return CGF->buildAArch64BuiltinExpr(BuiltinID, E, Arch); + case llvm::Triple::bpfeb: + case llvm::Triple::bpfel: + llvm_unreachable("NYI"); + case llvm::Triple::x86: + case llvm::Triple::x86_64: + return CGF->buildX86BuiltinExpr(BuiltinID, E); + case llvm::Triple::ppc: + case llvm::Triple::ppcle: + case llvm::Triple::ppc64: + case llvm::Triple::ppc64le: + llvm_unreachable("NYI"); + case llvm::Triple::r600: + case llvm::Triple::amdgcn: + llvm_unreachable("NYI"); + case llvm::Triple::systemz: + llvm_unreachable("NYI"); + case llvm::Triple::nvptx: + case llvm::Triple::nvptx64: + llvm_unreachable("NYI"); + case llvm::Triple::wasm32: + case llvm::Triple::wasm64: + llvm_unreachable("NYI"); + case llvm::Triple::hexagon: + llvm_unreachable("NYI"); + case llvm::Triple::riscv32: + case llvm::Triple::riscv64: + llvm_unreachable("NYI"); + default: + return {}; + } } mlir::Value @@ -955,4 +1005,4 @@ mlir::cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *FD, auto Ty = getTypes().ConvertType(FD->getType()); return GetOrCreateCIRFunction(Name, Ty, D, /*ForVTable=*/false); -} +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp new file mode 100644 index 000000000000..232da07983c7 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -0,0 +1,689 @@ +//===---- CIRGenBuiltinAArch64.cpp - Emit CIR for AArch64 builtins --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit ARM64 Builtin calls as CIR or a function call +// to be later resolved. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" +#include "CIRGenCall.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "TargetInfo.h" +#include "UnimplementedFeatureGuarding.h" + +// TODO(cir): we shouldn't need this but we currently reuse intrinsic IDs for +// convenience. +#include "llvm/IR/Intrinsics.h" + +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Value.h" +#include "clang/AST/GlobalDecl.h" +#include "clang/Basic/Builtins.h" +#include "clang/Basic/TargetBuiltins.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; + +mlir::Value +CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, + llvm::Triple::ArchType Arch) { + if (BuiltinID >= clang::AArch64::FirstSVEBuiltin && + BuiltinID <= clang::AArch64::LastSVEBuiltin) + llvm_unreachable("NYI"); + + if (BuiltinID >= clang::AArch64::FirstSMEBuiltin && + BuiltinID <= clang::AArch64::LastSMEBuiltin) + llvm_unreachable("NYI"); + + if (BuiltinID == Builtin::BI__builtin_cpu_supports) + llvm_unreachable("NYI"); + + unsigned HintID = static_cast(-1); + switch (BuiltinID) { + default: + break; + case clang::AArch64::BI__builtin_arm_nop: + HintID = 0; + break; + case clang::AArch64::BI__builtin_arm_yield: + case clang::AArch64::BI__yield: + HintID = 1; + break; + case clang::AArch64::BI__builtin_arm_wfe: + case clang::AArch64::BI__wfe: + HintID = 2; + break; + case clang::AArch64::BI__builtin_arm_wfi: + case clang::AArch64::BI__wfi: + HintID = 3; + break; + case clang::AArch64::BI__builtin_arm_sev: + case clang::AArch64::BI__sev: + HintID = 4; + break; + case clang::AArch64::BI__builtin_arm_sevl: + case clang::AArch64::BI__sevl: + HintID = 5; + break; + } + + if (HintID != static_cast(-1)) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_trap) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_get_sme_state) { + // Create call to __arm_sme_state and store the results to the two pointers. + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit) { + assert((getContext().getTypeSize(E->getType()) == 32) && + "rbit of unusual size!"); + llvm_unreachable("NYI"); + } + if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit64) { + assert((getContext().getTypeSize(E->getType()) == 64) && + "rbit of unusual size!"); + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_clz || + BuiltinID == clang::AArch64::BI__builtin_arm_clz64) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_cls) { + llvm_unreachable("NYI"); + } + if (BuiltinID == clang::AArch64::BI__builtin_arm_cls64) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32zf || + BuiltinID == clang::AArch64::BI__builtin_arm_rint32z) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64zf || + BuiltinID == clang::AArch64::BI__builtin_arm_rint64z) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32xf || + BuiltinID == clang::AArch64::BI__builtin_arm_rint32x) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64xf || + BuiltinID == clang::AArch64::BI__builtin_arm_rint64x) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_jcvt) { + assert((getContext().getTypeSize(E->getType()) == 32) && + "__jcvt of unusual size!"); + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b || + BuiltinID == clang::AArch64::BI__builtin_arm_st64b || + BuiltinID == clang::AArch64::BI__builtin_arm_st64bv || + BuiltinID == clang::AArch64::BI__builtin_arm_st64bv0) { + llvm_unreachable("NYI"); + + if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b) { + // Load from the address via an LLVM intrinsic, receiving a + // tuple of 8 i64 words, and store each one to ValPtr. + llvm_unreachable("NYI"); + } else { + // Load 8 i64 words from ValPtr, and store them to the address + // via an LLVM intrinsic. + llvm_unreachable("NYI"); + } + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_rndr || + BuiltinID == clang::AArch64::BI__builtin_arm_rndrrs) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__clear_cache) { + assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"); + llvm_unreachable("NYI"); + } + + if ((BuiltinID == clang::AArch64::BI__builtin_arm_ldrex || + BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) && + getContext().getTypeSize(E->getType()) == 128) { + llvm_unreachable("NYI"); + } else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex || + BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) { + llvm_unreachable("NYI"); + } + + if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex || + BuiltinID == clang::AArch64::BI__builtin_arm_stlex) && + getContext().getTypeSize(E->getArg(0)->getType()) == 128) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_strex || + BuiltinID == clang::AArch64::BI__builtin_arm_stlex) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__getReg) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__break) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_clrex) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI_ReadWriteBarrier) + llvm_unreachable("NYI"); + + // CRC32 + // FIXME(cir): get rid of LLVM when this gets implemented. + llvm::Intrinsic::ID CRCIntrinsicID = llvm::Intrinsic::not_intrinsic; + switch (BuiltinID) { + case clang::AArch64::BI__builtin_arm_crc32b: + case clang::AArch64::BI__builtin_arm_crc32cb: + case clang::AArch64::BI__builtin_arm_crc32h: + case clang::AArch64::BI__builtin_arm_crc32ch: + case clang::AArch64::BI__builtin_arm_crc32w: + case clang::AArch64::BI__builtin_arm_crc32cw: + case clang::AArch64::BI__builtin_arm_crc32d: + case clang::AArch64::BI__builtin_arm_crc32cd: + llvm_unreachable("NYI"); + } + + if (CRCIntrinsicID != llvm::Intrinsic::not_intrinsic) { + llvm_unreachable("NYI"); + } + + // Memory Operations (MOPS) + if (BuiltinID == AArch64::BI__builtin_arm_mops_memset_tag) { + llvm_unreachable("NYI"); + } + + // Memory Tagging Extensions (MTE) Intrinsics + // FIXME(cir): get rid of LLVM when this gets implemented. + llvm::Intrinsic::ID MTEIntrinsicID = llvm::Intrinsic::not_intrinsic; + switch (BuiltinID) { + case clang::AArch64::BI__builtin_arm_irg: + case clang::AArch64::BI__builtin_arm_addg: + case clang::AArch64::BI__builtin_arm_gmi: + case clang::AArch64::BI__builtin_arm_ldg: + case clang::AArch64::BI__builtin_arm_stg: + case clang::AArch64::BI__builtin_arm_subp: + llvm_unreachable("NYI"); + } + + if (MTEIntrinsicID != llvm::Intrinsic::not_intrinsic) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr || + BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 || + BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 || + BuiltinID == clang::AArch64::BI__builtin_arm_rsrp || + BuiltinID == clang::AArch64::BI__builtin_arm_wsr || + BuiltinID == clang::AArch64::BI__builtin_arm_wsr64 || + BuiltinID == clang::AArch64::BI__builtin_arm_wsr128 || + BuiltinID == clang::AArch64::BI__builtin_arm_wsrp) { + + llvm_unreachable("NYI"); + if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr || + BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 || + BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 || + BuiltinID == clang::AArch64::BI__builtin_arm_rsrp) + llvm_unreachable("NYI"); + + bool IsPointerBuiltin = BuiltinID == clang::AArch64::BI__builtin_arm_rsrp || + BuiltinID == clang::AArch64::BI__builtin_arm_wsrp; + + bool Is32Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr || + BuiltinID == clang::AArch64::BI__builtin_arm_wsr; + + bool Is128Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 || + BuiltinID == clang::AArch64::BI__builtin_arm_wsr128; + + if (Is32Bit) { + llvm_unreachable("NYI"); + } else if (Is128Bit) { + llvm_unreachable("NYI"); + } else if (IsPointerBuiltin) { + llvm_unreachable("NYI"); + } else { + llvm_unreachable("NYI"); + }; + + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI_ReadStatusReg || + BuiltinID == clang::AArch64::BI_WriteStatusReg) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__builtin_sponentry) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == clang::AArch64::BI__mulh || + BuiltinID == clang::AArch64::BI__umulh) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == AArch64::BI__writex18byte || + BuiltinID == AArch64::BI__writex18word || + BuiltinID == AArch64::BI__writex18dword || + BuiltinID == AArch64::BI__writex18qword) { + // Read x18 as i8* + llvm_unreachable("NYI"); + } + + if (BuiltinID == AArch64::BI__readx18byte || + BuiltinID == AArch64::BI__readx18word || + BuiltinID == AArch64::BI__readx18dword || + BuiltinID == AArch64::BI__readx18qword) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == AArch64::BI_CopyDoubleFromInt64 || + BuiltinID == AArch64::BI_CopyFloatFromInt32 || + BuiltinID == AArch64::BI_CopyInt32FromFloat || + BuiltinID == AArch64::BI_CopyInt64FromDouble) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == AArch64::BI_CountLeadingOnes || + BuiltinID == AArch64::BI_CountLeadingOnes64 || + BuiltinID == AArch64::BI_CountLeadingZeros || + BuiltinID == AArch64::BI_CountLeadingZeros64) { + llvm_unreachable("NYI"); + + if (BuiltinID == AArch64::BI_CountLeadingOnes || + BuiltinID == AArch64::BI_CountLeadingOnes64) + llvm_unreachable("NYI"); + + llvm_unreachable("NYI"); + } + + if (BuiltinID == AArch64::BI_CountLeadingSigns || + BuiltinID == AArch64::BI_CountLeadingSigns64) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == AArch64::BI_CountOneBits || + BuiltinID == AArch64::BI_CountOneBits64) { + llvm_unreachable("NYI"); + } + + if (BuiltinID == AArch64::BI__prefetch) { + llvm_unreachable("NYI"); + } + + // Handle MSVC intrinsics before argument evaluation to prevent double + // evaluation. + assert(!UnimplementedFeature::translateAarch64ToMsvcIntrin()); + + // Some intrinsics are equivalent - if they are use the base intrinsic ID. + assert(!UnimplementedFeature::neonEquivalentIntrinsicMap()); + + // Find out if any arguments are required to be integer constant + // expressions. + unsigned ICEArguments = 0; + ASTContext::GetBuiltinTypeError Error; + getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); + assert(Error == ASTContext::GE_None && "Should not codegen an error"); + + llvm::SmallVector Ops; + for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { + if (i == 0) { + switch (BuiltinID) { + case NEON::BI__builtin_neon_vld1_v: + case NEON::BI__builtin_neon_vld1q_v: + case NEON::BI__builtin_neon_vld1_dup_v: + case NEON::BI__builtin_neon_vld1q_dup_v: + case NEON::BI__builtin_neon_vld1_lane_v: + case NEON::BI__builtin_neon_vld1q_lane_v: + case NEON::BI__builtin_neon_vst1_v: + case NEON::BI__builtin_neon_vst1q_v: + case NEON::BI__builtin_neon_vst1_lane_v: + case NEON::BI__builtin_neon_vst1q_lane_v: + case NEON::BI__builtin_neon_vldap1_lane_s64: + case NEON::BI__builtin_neon_vldap1q_lane_s64: + case NEON::BI__builtin_neon_vstl1_lane_s64: + case NEON::BI__builtin_neon_vstl1q_lane_s64: + // Get the alignment for the argument in addition to the value; + // we'll use it later. + llvm_unreachable("NYI"); + } + } + llvm_unreachable("NYI"); + } + + assert(!UnimplementedFeature::arm64SISDIntrinsicMap()); + + const Expr *Arg = E->getArg(E->getNumArgs() - 1); + NeonTypeFlags Type(0); + if (std::optional Result = + Arg->getIntegerConstantExpr(getContext())) + // Determine the type of this overloaded NEON intrinsic. + Type = NeonTypeFlags(Result->getZExtValue()); + + bool usgn = Type.isUnsigned(); + + // Handle non-overloaded intrinsics first. + switch (BuiltinID) { + default: + break; + case NEON::BI__builtin_neon_vabsh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vaddq_p128: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vldrq_p128: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vstrq_p128: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvts_f32_u32: + case NEON::BI__builtin_neon_vcvtd_f64_u64: + usgn = true; + [[fallthrough]]; + case NEON::BI__builtin_neon_vcvts_f32_s32: + case NEON::BI__builtin_neon_vcvtd_f64_s64: { + if (usgn) + llvm_unreachable("NYI"); + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvth_f16_u16: + case NEON::BI__builtin_neon_vcvth_f16_u32: + case NEON::BI__builtin_neon_vcvth_f16_u64: + usgn = true; + [[fallthrough]]; + case NEON::BI__builtin_neon_vcvth_f16_s16: + case NEON::BI__builtin_neon_vcvth_f16_s32: + case NEON::BI__builtin_neon_vcvth_f16_s64: { + if (usgn) + llvm_unreachable("NYI"); + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvtah_u16_f16: + case NEON::BI__builtin_neon_vcvtmh_u16_f16: + case NEON::BI__builtin_neon_vcvtnh_u16_f16: + case NEON::BI__builtin_neon_vcvtph_u16_f16: + case NEON::BI__builtin_neon_vcvth_u16_f16: + case NEON::BI__builtin_neon_vcvtah_s16_f16: + case NEON::BI__builtin_neon_vcvtmh_s16_f16: + case NEON::BI__builtin_neon_vcvtnh_s16_f16: + case NEON::BI__builtin_neon_vcvtph_s16_f16: + case NEON::BI__builtin_neon_vcvth_s16_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcaleh_f16: + case NEON::BI__builtin_neon_vcalth_f16: + case NEON::BI__builtin_neon_vcageh_f16: + case NEON::BI__builtin_neon_vcagth_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvth_n_s16_f16: + case NEON::BI__builtin_neon_vcvth_n_u16_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvth_n_f16_s16: + case NEON::BI__builtin_neon_vcvth_n_f16_u16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vpaddd_s64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vpaddd_f64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vpadds_f32: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vceqzd_s64: + case NEON::BI__builtin_neon_vceqzd_f64: + case NEON::BI__builtin_neon_vceqzs_f32: + case NEON::BI__builtin_neon_vceqzh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vcgezd_s64: + case NEON::BI__builtin_neon_vcgezd_f64: + case NEON::BI__builtin_neon_vcgezs_f32: + case NEON::BI__builtin_neon_vcgezh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vclezd_s64: + case NEON::BI__builtin_neon_vclezd_f64: + case NEON::BI__builtin_neon_vclezs_f32: + case NEON::BI__builtin_neon_vclezh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vcgtzd_s64: + case NEON::BI__builtin_neon_vcgtzd_f64: + case NEON::BI__builtin_neon_vcgtzs_f32: + case NEON::BI__builtin_neon_vcgtzh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vcltzd_s64: + case NEON::BI__builtin_neon_vcltzd_f64: + case NEON::BI__builtin_neon_vcltzs_f32: + case NEON::BI__builtin_neon_vcltzh_f16: + llvm_unreachable("NYI"); + + case NEON::BI__builtin_neon_vceqzd_u64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vceqd_f64: + case NEON::BI__builtin_neon_vcled_f64: + case NEON::BI__builtin_neon_vcltd_f64: + case NEON::BI__builtin_neon_vcged_f64: + case NEON::BI__builtin_neon_vcgtd_f64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vceqs_f32: + case NEON::BI__builtin_neon_vcles_f32: + case NEON::BI__builtin_neon_vclts_f32: + case NEON::BI__builtin_neon_vcges_f32: + case NEON::BI__builtin_neon_vcgts_f32: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vceqh_f16: + case NEON::BI__builtin_neon_vcleh_f16: + case NEON::BI__builtin_neon_vclth_f16: + case NEON::BI__builtin_neon_vcgeh_f16: + case NEON::BI__builtin_neon_vcgth_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vceqd_s64: + case NEON::BI__builtin_neon_vceqd_u64: + case NEON::BI__builtin_neon_vcgtd_s64: + case NEON::BI__builtin_neon_vcgtd_u64: + case NEON::BI__builtin_neon_vcltd_s64: + case NEON::BI__builtin_neon_vcltd_u64: + case NEON::BI__builtin_neon_vcged_u64: + case NEON::BI__builtin_neon_vcged_s64: + case NEON::BI__builtin_neon_vcled_u64: + case NEON::BI__builtin_neon_vcled_s64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vtstd_s64: + case NEON::BI__builtin_neon_vtstd_u64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vset_lane_i8: + case NEON::BI__builtin_neon_vset_lane_i16: + case NEON::BI__builtin_neon_vset_lane_i32: + case NEON::BI__builtin_neon_vset_lane_i64: + case NEON::BI__builtin_neon_vset_lane_bf16: + case NEON::BI__builtin_neon_vset_lane_f32: + case NEON::BI__builtin_neon_vsetq_lane_i8: + case NEON::BI__builtin_neon_vsetq_lane_i16: + case NEON::BI__builtin_neon_vsetq_lane_i32: + case NEON::BI__builtin_neon_vsetq_lane_i64: + case NEON::BI__builtin_neon_vsetq_lane_bf16: + case NEON::BI__builtin_neon_vsetq_lane_f32: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vset_lane_f64: + // The vector type needs a cast for the v1f64 variant. + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vsetq_lane_f64: + // The vector type needs a cast for the v2f64 variant. + llvm_unreachable("NYI"); + + case NEON::BI__builtin_neon_vget_lane_i8: + case NEON::BI__builtin_neon_vdupb_lane_i8: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vgetq_lane_i8: + case NEON::BI__builtin_neon_vdupb_laneq_i8: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vget_lane_i16: + case NEON::BI__builtin_neon_vduph_lane_i16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vgetq_lane_i16: + case NEON::BI__builtin_neon_vduph_laneq_i16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vget_lane_i32: + case NEON::BI__builtin_neon_vdups_lane_i32: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vdups_lane_f32: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vgetq_lane_i32: + case NEON::BI__builtin_neon_vdups_laneq_i32: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vget_lane_i64: + case NEON::BI__builtin_neon_vdupd_lane_i64: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vdupd_lane_f64: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vgetq_lane_i64: + case NEON::BI__builtin_neon_vdupd_laneq_i64: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vget_lane_f32: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vget_lane_f64: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vgetq_lane_f32: + case NEON::BI__builtin_neon_vdups_laneq_f32: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vgetq_lane_f64: + case NEON::BI__builtin_neon_vdupd_laneq_f64: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vaddh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vsubh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vmulh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vdivh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vfmah_f16: + // NEON intrinsic puts accumulator first, unlike the LLVM fma. + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vfmsh_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddd_s64: + case NEON::BI__builtin_neon_vaddd_u64: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vsubd_s64: + case NEON::BI__builtin_neon_vsubd_u64: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vqdmlalh_s16: + case NEON::BI__builtin_neon_vqdmlslh_s16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqshlud_n_s64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqshld_n_u64: + case NEON::BI__builtin_neon_vqshld_n_s64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrshrd_n_u64: + case NEON::BI__builtin_neon_vrshrd_n_s64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrsrad_n_u64: + case NEON::BI__builtin_neon_vrsrad_n_s64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vshld_n_s64: + case NEON::BI__builtin_neon_vshld_n_u64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vshrd_n_s64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vshrd_n_u64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vsrad_n_s64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vsrad_n_u64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqdmlalh_lane_s16: + case NEON::BI__builtin_neon_vqdmlalh_laneq_s16: + case NEON::BI__builtin_neon_vqdmlslh_lane_s16: + case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqdmlals_s32: + case NEON::BI__builtin_neon_vqdmlsls_s32: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqdmlals_lane_s32: + case NEON::BI__builtin_neon_vqdmlals_laneq_s32: + case NEON::BI__builtin_neon_vqdmlsls_lane_s32: + case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vget_lane_bf16: + case NEON::BI__builtin_neon_vduph_lane_bf16: + case NEON::BI__builtin_neon_vduph_lane_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vgetq_lane_bf16: + case NEON::BI__builtin_neon_vduph_laneq_bf16: + case NEON::BI__builtin_neon_vduph_laneq_f16: { + llvm_unreachable("NYI"); + } + + case clang::AArch64::BI_InterlockedAdd: + case clang::AArch64::BI_InterlockedAdd64: { + llvm_unreachable("NYI"); + } + } + + // From here on it's pure NEON based + assert(UnimplementedFeature::getNeonType() && "NYI"); + return {}; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp new file mode 100644 index 000000000000..d26b73c0dfe1 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp @@ -0,0 +1,37 @@ +//===---- CIRGenBuiltinX86.cpp - Emit CIR for X86 builtins ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit x86/x86_64 Builtin calls as CIR or a function +// call to be later resolved. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCXXABI.h" +#include "CIRGenCall.h" +#include "CIRGenFunction.h" +#include "CIRGenModule.h" +#include "TargetInfo.h" +#include "UnimplementedFeatureGuarding.h" + +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Value.h" +#include "clang/AST/GlobalDecl.h" +#include "clang/Basic/Builtins.h" +#include "clang/Basic/TargetBuiltins.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace cir; +using namespace clang; +using namespace mlir::cir; + +mlir::Value CIRGenFunction::buildX86BuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { + llvm_unreachable("NYI"); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 63450258ad99..f1bd21390540 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -655,6 +655,11 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre); + // Target specific builtin emission + mlir::Value buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, + llvm::Triple::ArchType Arch); + mlir::Value buildX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); + // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. struct PrototypeWrapper { diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 154aefbdba02..40134a0cb113 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -10,6 +10,8 @@ add_clang_library(clangCIR CIRAsm.cpp CIRGenAtomic.cpp CIRGenBuiltin.cpp + CIRGenBuiltinAArch64.cpp + CIRGenBuiltinX86.cpp CIRGenCXX.cpp CIRGenCXXABI.cpp CIRGenCall.cpp diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index c2077f141821..2bb5f13a3d3a 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -170,6 +170,12 @@ struct UnimplementedFeature { static bool asm_memory_effects() { return false; } static bool asm_vector_type() { return false; } static bool asm_llvm_assume() { return false; } + + // ARM builtins + static bool translateAarch64ToMsvcIntrin() { return false; } + static bool neonEquivalentIntrinsicMap() { return false; } + static bool arm64SISDIntrinsicMap() { return false; } + static bool getNeonType() { return false; } }; } // namespace cir From 28056ff1c54d7d13630a01e4ff4fdc7d0b4c987c Mon Sep 17 00:00:00 2001 From: orbiri Date: Sat, 4 May 2024 22:03:01 +0300 Subject: [PATCH 1548/2301] [CIR] Remove redundant result type of cir.const operation (#581) --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 4 +- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 15 ++- clang/include/clang/CIR/Dialect/IR/CIROps.td | 47 ++++---- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 37 +++---- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 29 +++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 +- clang/test/CIR/CodeGen/OpenMP/parallel.cpp | 4 +- clang/test/CIR/CodeGen/String.cpp | 8 +- clang/test/CIR/CodeGen/agg-copy.c | 6 +- clang/test/CIR/CodeGen/agg-init.cpp | 10 +- clang/test/CIR/CodeGen/array-init-destroy.cpp | 4 +- clang/test/CIR/CodeGen/array-init.c | 16 +-- clang/test/CIR/CodeGen/array.cpp | 12 +-- clang/test/CIR/CodeGen/atomic.cpp | 2 +- clang/test/CIR/CodeGen/basic.c | 4 +- clang/test/CIR/CodeGen/basic.cpp | 36 +++---- clang/test/CIR/CodeGen/binassign.cpp | 2 +- clang/test/CIR/CodeGen/binop.cpp | 16 +-- clang/test/CIR/CodeGen/bitfields.c | 22 ++-- clang/test/CIR/CodeGen/bitfields.cpp | 4 +- clang/test/CIR/CodeGen/bitint.cpp | 12 +-- clang/test/CIR/CodeGen/bool.c | 28 ++--- .../CodeGen/builtin-constant-evaluated.cpp | 2 +- clang/test/CIR/CodeGen/call.c | 8 +- clang/test/CIR/CodeGen/call.cpp | 2 +- clang/test/CIR/CodeGen/cast.cpp | 12 +-- clang/test/CIR/CodeGen/compound-literal.c | 4 +- clang/test/CIR/CodeGen/const-array.c | 2 +- clang/test/CIR/CodeGen/coro-task.cpp | 12 +-- clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp | 2 +- clang/test/CIR/CodeGen/delete.cpp | 2 +- clang/test/CIR/CodeGen/derived-to-base.cpp | 4 +- clang/test/CIR/CodeGen/dtors.cpp | 2 +- clang/test/CIR/CodeGen/dynamic-cast.cpp | 16 +-- clang/test/CIR/CodeGen/evaluate-expr.c | 4 +- clang/test/CIR/CodeGen/expressions.cpp | 2 +- clang/test/CIR/CodeGen/fullexpr.cpp | 2 +- clang/test/CIR/CodeGen/fun-ptr.c | 4 +- clang/test/CIR/CodeGen/globals.cpp | 4 +- clang/test/CIR/CodeGen/gnu-extension.c | 2 +- clang/test/CIR/CodeGen/goto.cpp | 4 +- clang/test/CIR/CodeGen/hello.c | 2 +- clang/test/CIR/CodeGen/if-constexpr.cpp | 18 ++-- clang/test/CIR/CodeGen/if.cir | 12 +-- clang/test/CIR/CodeGen/inc-bool.cpp | 2 +- clang/test/CIR/CodeGen/lalg.c | 4 +- clang/test/CIR/CodeGen/lambda.cpp | 8 +- clang/test/CIR/CodeGen/literals.c | 2 +- clang/test/CIR/CodeGen/literals.cpp | 2 +- clang/test/CIR/CodeGen/loop-scope.cpp | 4 +- clang/test/CIR/CodeGen/loop.cpp | 38 +++---- clang/test/CIR/CodeGen/new.cpp | 4 +- clang/test/CIR/CodeGen/nrvo.cpp | 4 +- clang/test/CIR/CodeGen/offsetof.c | 4 +- .../CIR/CodeGen/pointer-to-data-member.cpp | 6 +- clang/test/CIR/CodeGen/pointers.cpp | 16 +-- clang/test/CIR/CodeGen/predefined.cpp | 2 +- clang/test/CIR/CodeGen/ptrdiff.cpp | 2 +- clang/test/CIR/CodeGen/rangefor.cpp | 4 +- clang/test/CIR/CodeGen/scope.cir | 8 +- clang/test/CIR/CodeGen/sourcelocation.cpp | 6 +- clang/test/CIR/CodeGen/static.cpp | 8 +- clang/test/CIR/CodeGen/std-array.cpp | 2 +- clang/test/CIR/CodeGen/store.c | 4 +- clang/test/CIR/CodeGen/struct.c | 6 +- clang/test/CIR/CodeGen/struct.cpp | 10 +- clang/test/CIR/CodeGen/switch.cir | 30 +++--- clang/test/CIR/CodeGen/switch.cpp | 24 ++--- clang/test/CIR/CodeGen/ternary.cir | 14 +-- clang/test/CIR/CodeGen/ternary.cpp | 12 +-- .../test/CIR/CodeGen/three-way-comparison.cpp | 20 ++-- clang/test/CIR/CodeGen/try-catch.cpp | 4 +- clang/test/CIR/CodeGen/types-nullptr.cpp | 2 +- clang/test/CIR/CodeGen/unary.cpp | 22 ++-- clang/test/CIR/CodeGen/union-init.c | 12 +-- clang/test/CIR/CodeGen/union.cpp | 4 +- clang/test/CIR/CodeGen/vectype.cpp | 8 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- clang/test/CIR/IR/bit.cir | 32 +++--- clang/test/CIR/IR/cast.cir | 2 +- clang/test/CIR/IR/cir-ops.cir | 14 +-- clang/test/CIR/IR/data-member-ptr.cir | 12 +-- clang/test/CIR/IR/exceptions.cir | 2 +- clang/test/CIR/IR/global.cir | 4 +- clang/test/CIR/IR/int.cir | 24 ++--- clang/test/CIR/IR/invalid.cir | 68 +++++------- clang/test/CIR/IR/ptr_stride.cir | 4 +- clang/test/CIR/IR/struct.cir | 4 +- clang/test/CIR/IR/switch.cir | 2 +- clang/test/CIR/IR/ternary.cir | 8 +- clang/test/CIR/IR/try.cir | 2 +- clang/test/CIR/Lowering/OpenMP/parallel.cir | 6 +- .../ThroughMLIR/binop-unsigned-int.cir | 4 +- clang/test/CIR/Lowering/ThroughMLIR/bool.cir | 2 +- .../test/CIR/Lowering/ThroughMLIR/branch.cir | 4 +- clang/test/CIR/Lowering/ThroughMLIR/cos.cir | 8 +- clang/test/CIR/Lowering/ThroughMLIR/dot.cir | 2 +- clang/test/CIR/Lowering/ThroughMLIR/goto.cir | 6 +- .../test/CIR/Lowering/ThroughMLIR/memref.cir | 2 +- clang/test/CIR/Lowering/ThroughMLIR/scope.cir | 2 +- .../test/CIR/Lowering/ThroughMLIR/tenary.cir | 6 +- .../Lowering/ThroughMLIR/unary-inc-dec.cir | 2 +- .../Lowering/ThroughMLIR/unary-plus-minus.cir | 2 +- clang/test/CIR/Lowering/binop-signed-int.cir | 4 +- .../test/CIR/Lowering/binop-unsigned-int.cir | 4 +- clang/test/CIR/Lowering/bool-to-int.cir | 4 +- clang/test/CIR/Lowering/bool.cir | 2 +- clang/test/CIR/Lowering/branch.cir | 4 +- clang/test/CIR/Lowering/cast.cir | 4 +- clang/test/CIR/Lowering/class.cir | 2 +- clang/test/CIR/Lowering/const.cir | 24 ++--- clang/test/CIR/Lowering/dot.cir | 4 +- clang/test/CIR/Lowering/expect.cir | 2 +- clang/test/CIR/Lowering/float.cir | 10 +- clang/test/CIR/Lowering/globals.cir | 8 +- clang/test/CIR/Lowering/goto.cir | 6 +- clang/test/CIR/Lowering/hello.cir | 2 +- clang/test/CIR/Lowering/if.cir | 6 +- clang/test/CIR/Lowering/int-wrap.cir | 2 +- clang/test/CIR/Lowering/loadstorealloca.cir | 4 +- clang/test/CIR/Lowering/loops-with-break.cir | 92 ++++++++-------- .../test/CIR/Lowering/loops-with-continue.cir | 100 +++++++++--------- clang/test/CIR/Lowering/ptrstride.cir | 2 +- clang/test/CIR/Lowering/scope.cir | 4 +- clang/test/CIR/Lowering/struct.cir | 2 +- clang/test/CIR/Lowering/switch.cir | 12 +-- clang/test/CIR/Lowering/ternary.cir | 6 +- clang/test/CIR/Lowering/types.cir | 4 +- clang/test/CIR/Lowering/unary-inc-dec.cir | 2 +- clang/test/CIR/Lowering/unary-not.cir | 2 +- clang/test/CIR/Lowering/unary-plus-minus.cir | 2 +- clang/test/CIR/Lowering/unions.cir | 2 +- clang/test/CIR/Lowering/variadics.cir | 2 +- clang/test/CIR/Transforms/lib-opt-find.cpp | 6 +- clang/test/CIR/Transforms/merge-cleanups.cir | 18 ++-- 136 files changed, 649 insertions(+), 669 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index a3df0ef0dcdc..110bedf5e456 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -328,8 +328,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { } mlir::TypedAttr getConstPtrAttr(mlir::Type t, uint64_t v) { - assert(t.isa() && "expected cir.ptr"); - return mlir::cir::ConstPtrAttr::get(getContext(), t, v); + return mlir::cir::ConstPtrAttr::get(getContext(), + t.cast(), v); } // Creates constant nullptr for pointer type ty. diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 820ffe9c0a24..6c015d45ec70 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -248,20 +248,29 @@ def FPAttr : CIR_Attr<"FP", "fp", [TypedAttrInterface]> { def ConstPtrAttr : CIR_Attr<"ConstPtr", "ptr", [TypedAttrInterface]> { let summary = "Holds a constant pointer value"; - let parameters = (ins AttributeSelfTypeParameter<"">:$type, "uint64_t":$value); + let parameters = (ins + AttributeSelfTypeParameter<"", "::mlir::cir::PointerType">:$type, + "uint64_t":$value); let description = [{ A pointer attribute is a literal attribute that represents an integral value of a pointer type. }]; let builders = [ AttrBuilderWithInferredContext<(ins "Type":$type, "uint64_t":$value), [{ - return $_get(type.getContext(), type, value); + return $_get(type.getContext(), type.cast(), value); + }]>, + AttrBuilder<(ins "Type":$type, + "uint64_t":$value), [{ + return $_get($_ctxt, type.cast(), value); }]>, ]; let extraClassDeclaration = [{ bool isNullValue() const { return getValue() == 0; } }]; - let hasCustomAssemblyFormat = 1; + + let assemblyFormat = [{ + `<` custom($value) `>` + }]; } //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1991884b9ef5..6c5a1171b3d5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -264,7 +264,7 @@ def PtrStrideOp : CIR_Op<"ptr_stride", a stride (second operand). ```mlir - %3 = cir.const(0 : i32) : i32 + %3 = cir.const 0 : i32 %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : i32), !cir.ptr ``` }]; @@ -293,7 +293,7 @@ def PtrStrideOp : CIR_Op<"ptr_stride", //===----------------------------------------------------------------------===// def ConstantOp : CIR_Op<"const", - [ConstantLike, Pure]> { + [ConstantLike, Pure, AllTypesMatch<["value", "res"]>]> { // FIXME: Use SameOperandsAndResultType or similar and prevent eye bleeding // type repetition in the assembly form. @@ -303,9 +303,9 @@ def ConstantOp : CIR_Op<"const", attached to the operation as an attribute. ```mlir - %0 = cir.const(42 : i32) : i32 - %1 = cir.const(4.2 : f32) : f32 - %2 = cir.const(nullptr : !cir.ptr) : !cir.ptr + %0 = cir.const 42 : i32 + %1 = cir.const 4.2 : f32 + %2 = cir.const nullptr : !cir.ptr ``` }]; @@ -315,9 +315,7 @@ def ConstantOp : CIR_Op<"const", // The constant operation returns a single value of CIR_AnyType. let results = (outs CIR_AnyType:$res); - let assemblyFormat = [{ - `(` custom($value) `)` attr-dict `:` type($res) - }]; + let assemblyFormat = "attr-dict $value"; let hasVerifier = 1; @@ -1111,13 +1109,13 @@ def BitClrsbOp : CIR_BitOp<"bit.clrsb", AnyTypeOf<[SInt32, SInt64]>> { !s32i = !cir.int // %0 = 0xDEADBEEF, 0b1101_1110_1010_1101_1011_1110_1110_1111 - %0 = cir.const(#cir.int<3735928559> : !s32i) : !s32i + %0 = cir.const #cir.int<3735928559> : !s32i // %1 will be 1 because there is 1 bit following the most significant bit // that is identical to it. %1 = cir.bit.clrsb(%0 : !s32i) : !s32i // %2 = 1, 0b0000_0000_0000_0000_0000_0000_0000_0001 - %2 = cir.const(#cir.int<1> : !s32i) : !s32i + %2 = cir.const #cir.int<1> : !s32i // %3 will be 30 %3 = cir.bit.clrsb(%2 : !s32i) : !s32i ``` @@ -1142,7 +1140,7 @@ def BitClzOp : CIR_BitOp<"bit.clz", AnyTypeOf<[UInt16, UInt32, UInt64]>> { !u32i = !cir.int // %0 = 0b0000_0000_0000_0000_0000_0000_0000_1000 - %0 = cir.const(#cir.int<8> : !u32i) : !u32i + %0 = cir.const #cir.int<8> : !u32i // %1 will be 28 %1 = cir.bit.clz(%0 : !u32i) : !s32i ``` @@ -1167,7 +1165,7 @@ def BitCtzOp : CIR_BitOp<"bit.ctz", AnyTypeOf<[UInt16, UInt32, UInt64]>> { !u32i = !cir.int // %0 = 0b1000 - %0 = cir.const(#cir.int<8> : !u32i) : !u32i + %0 = cir.const #cir.int<8> : !u32i // %1 will be 3 %1 = cir.bit.ctz(%0 : !u32i) : !s32i ``` @@ -1190,7 +1188,7 @@ def BitFfsOp : CIR_BitOp<"bit.ffs", AnyTypeOf<[SInt32, SInt64]>> { !s32i = !cir.int // %0 = 0x0010_1000 - %0 = cir.const(#cir.int<40> : !s32i) : !s32i + %0 = cir.const #cir.int<40> : !s32i // #1 will be 4 since the 4th least significant bit is 1. %1 = cir.bit.ffs(%0 : !s32i) : !s32i ``` @@ -1212,9 +1210,9 @@ def BitParityOp : CIR_BitOp<"bit.parity", AnyTypeOf<[UInt32, UInt64]>> { !u32i = !cir.int // %0 = 0x0110_1000 - %0 = cir.const(#cir.int<104> : !u32i) : !s32i + %0 = cir.const #cir.int<104> : !u32i // %1 will be 1 since there are 3 1-bits in %0 - %1 = cir.bit.parity(%0 : !u32i) : !s32i + %1 = cir.bit.parity(%0 : !u32i) : !u32i ``` }]; } @@ -1230,13 +1228,12 @@ def BitPopcountOp Example: ```mlir - !s32i = !cir.int !u32i = !cir.int // %0 = 0x0110_1000 - %0 = cir.const(#cir.int<104> : !u32i) : !s32i + %0 = cir.const #cir.int<104> : !u32i // %1 will be 3 since there are 3 1-bits in %0 - %1 = cir.bit.popcount(%0 : !u32i) : !s32i + %1 = cir.bit.popcount(%0 : !u32i) : !u32i ``` }]; } @@ -1260,7 +1257,7 @@ def ByteswapOp : CIR_Op<"bswap", [Pure, SameOperandsAndResultType]> { !u32i = !cir.int // %0 = 0x12345678 - %0 = cir.const(#cir.int<305419896> : !u32i) : !u32i + %0 = cir.const #cir.int<305419896> : !u32i // %1 should be 0x78563412 %1 = cir.bswap(%0 : !u32i) : !u32i @@ -1302,12 +1299,12 @@ def CmpThreeWayOp : CIR_Op<"cmp3way", [Pure, SameTypeOperands]> { #cmp3way_strong = #cmp3way_info #cmp3way_partial = #cmp3way_info - %0 = cir.const(#cir.int<0> : !s32i) : !s32i - %1 = cir.const(#cir.int<1> : !s32i) : !s32i + %0 = cir.const #cir.int<0> : !s32i + %1 = cir.const #cir.int<1> : !s32i %2 = cir.cmp3way(%0 : !s32i, %1, #cmp3way_strong) : !s8i - %3 = cir.const(#cir.fp<0.0> : !cir.float) : !cir.float - %4 = cir.const(#cir.fp<1.0> : !cir.float) : !cir.float + %3 = cir.const #cir.fp<0.0> : !cir.float + %4 = cir.const #cir.fp<1.0> : !cir.float %5 = cir.cmp3way(%3 : !cir.float, %4, #cmp3way_partial) : !s8i ``` }]; @@ -1961,7 +1958,7 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { !struct_type = !cir.struct, !cir.int, !cir.int} #cir.record.decl.ast> #bfi_d = #cir.bitfield_info - %1 = cir.const(#cir.int<3> : !s32i) : !s32i + %1 = cir.const #cir.int<3> : !s32i %2 = cir.load %0 : !cir.ptr>, !cir.ptr %3 = cir.get_member %2[1] {name = "d"} : !cir.ptr -> !cir.ptr %4 = cir.set_bitfield(#bfi_d, %3 : !cir.ptr, %1 : !s32i) -> !s32i @@ -3045,7 +3042,7 @@ def MemCpyOp : CIR_Op<"libc.memcpy"> { ```mlir // Copying 2 bytes from one array to a struct: - %2 = cir.const(#cir.int<2> : !u32i) : !u32i + %2 = cir.const #cir.int<2> : !u32i cir.libc.memcpy %2 bytes from %arr to %struct : !cir.ptr -> !cir.ptr ``` }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 619ef026f410..aee7280881be 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -464,7 +464,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, mlir::Value element = begin; // Don't build the 'one' before the cycle to avoid - // emmiting the redundant cir.const(1) instrs. + // emmiting the redundant `cir.const 1` instrs. mlir::Value one; // Emit the explicit initializers. diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index df3032587f33..21f2f7396c75 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -43,6 +43,11 @@ static mlir::ParseResult parseFloatLiteral(mlir::AsmParser &parser, mlir::FailureOr &value, mlir::Type ty); +static mlir::ParseResult parseConstPtr(mlir::AsmParser &parser, + uint64_t &value); + +static void printConstPtr(mlir::AsmPrinter &p, uint64_t value); + #define GET_ATTRDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" @@ -213,37 +218,23 @@ void LangAttr::print(AsmPrinter &printer) const { // ConstPtrAttr definitions //===----------------------------------------------------------------------===// -Attribute ConstPtrAttr::parse(AsmParser &parser, Type odsType) { - uint64_t value; - - if (!odsType.isa()) - return {}; - - // Consume the '<' symbol. - if (parser.parseLess()) - return {}; +// TODO: Consider encoding the null value differently and use conditional +// assembly format instead of custom parsing/printing. +static ParseResult parseConstPtr(AsmParser &parser, uint64_t &value) { if (parser.parseOptionalKeyword("null").succeeded()) { value = 0; - } else { - if (parser.parseInteger(value)) - parser.emitError(parser.getCurrentLocation(), "expected integer value"); + return success(); } - // Consume the '>' symbol. - if (parser.parseGreater()) - return {}; - - return ConstPtrAttr::get(odsType, value); + return parser.parseInteger(value); } -void ConstPtrAttr::print(AsmPrinter &printer) const { - printer << '<'; - if (isNullValue()) - printer << "null"; +static void printConstPtr(AsmPrinter &p, uint64_t value) { + if (!value) + p << "null"; else - printer << getValue(); - printer << '>'; + p << value; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 923a013706b6..030d41c6163d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -364,23 +364,6 @@ LogicalResult ConstantOp::verify() { return checkConstantTypes(getOperation(), getType(), getValue()); } -static ParseResult parseConstantValue(OpAsmParser &parser, - mlir::Attribute &valueAttr) { - NamedAttrList attr; - return parser.parseAttribute(valueAttr, "value", attr); -} - -// FIXME: create a CIRConstAttr and hide this away for both global -// initialization and cir.const operation. -static void printConstant(OpAsmPrinter &p, Attribute value) { - p.printAttribute(value); -} - -static void printConstantValue(OpAsmPrinter &p, cir::ConstantOp op, - Attribute value) { - printConstant(p, value); -} - OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } //===----------------------------------------------------------------------===// @@ -1487,6 +1470,18 @@ ::llvm::SmallVector ForOp::getLoopRegions() { return {&getBody()}; } // GlobalOp //===----------------------------------------------------------------------===// +static ParseResult parseConstantValue(OpAsmParser &parser, + mlir::Attribute &valueAttr) { + NamedAttrList attr; + return parser.parseAttribute(valueAttr, "value", attr); +} + +// FIXME: create a CIRConstAttr and hide this away for both global +// initialization and cir.const operation. +static void printConstant(OpAsmPrinter &p, Attribute value) { + p.printAttribute(value); +} + static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, TypeAttr type, Attribute initAttr, mlir::Region &ctorRegion, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 25e27cb1a726..3a1661dd2898 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3179,10 +3179,10 @@ static void buildCtorDtorList( // cir.func @foo(%arg0: !s32i) -> !s32i { // %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool // cir.if %4 { -// %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// %5 = cir.const #cir.int<1> : !s32i // cir.return %5 : !s32i // } else { -// %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// %5 = cir.const #cir.int<0> : !s32i // cir.return %5 : !s32i // } // cir.return %arg0 : !s32i diff --git a/clang/test/CIR/CodeGen/OpenMP/parallel.cpp b/clang/test/CIR/CodeGen/OpenMP/parallel.cpp index a3c37da349b4..d2523d7b5396 100644 --- a/clang/test/CIR/CodeGen/OpenMP/parallel.cpp +++ b/clang/test/CIR/CodeGen/OpenMP/parallel.cpp @@ -18,10 +18,10 @@ void omp_parallel_2() { // CHECK: omp.parallel { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %[[XVarDecl:.+]] = {{.*}} ["x", init] -// CHECK-NEXT: %[[C1:.+]] = cir.const(#cir.int<1> : !s32i) +// CHECK-NEXT: %[[C1:.+]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store %[[C1]], %[[XVarDecl]] // CHECK-NEXT: %[[XVal:.+]] = cir.load %[[XVarDecl]] -// CHECK-NEXT: %[[COne:.+]] = cir.const(#cir.int<1> : !s32i) +// CHECK-NEXT: %[[COne:.+]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %[[BinOpVal:.+]] = cir.binop(add, %[[XVal]], %[[COne]]) // CHECK-NEXT: cir.store %[[BinOpVal]], %[[YVarDecl]] // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index fd87f3f287b1..c504c1d99d0c 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -22,10 +22,10 @@ void test() { // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 // CHECK-NEXT: %2 = cir.get_member %1[0] {name = "storage"} -// CHECK-NEXT: %3 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %3 = cir.const #cir.ptr : !cir.ptr // CHECK-NEXT: cir.store %3, %2 : !cir.ptr, !cir.ptr> // CHECK-NEXT: %4 = cir.get_member %1[1] {name = "size"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: %6 = cir.cast(integral, %5 : !s32i), !s64i // CHECK-NEXT: cir.store %6, %4 : !s64i, !cir.ptr // CHECK-NEXT: cir.return @@ -37,7 +37,7 @@ void test() { // CHECK-NEXT: cir.store %arg1, %1 // CHECK-NEXT: %2 = cir.load %0 // CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} -// CHECK-NEXT: %4 = cir.const(#cir.ptr : !cir.ptr) +// CHECK-NEXT: %4 = cir.const #cir.ptr : !cir.ptr // CHECK-NEXT: cir.store %4, %3 // CHECK-NEXT: %5 = cir.get_member %2[1] {name = "size"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : !cir.ptr, !s32i @@ -53,7 +53,7 @@ void test() { // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> // CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} : !cir.ptr -> !cir.ptr> -// CHECK-NEXT: %4 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %4 = cir.const #cir.ptr : !cir.ptr // CHECK-NEXT: cir.store %4, %3 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/agg-copy.c b/clang/test/CIR/CodeGen/agg-copy.c index 91e7d52c1d12..52d292d30b1a 100644 --- a/clang/test/CIR/CodeGen/agg-copy.c +++ b/clang/test/CIR/CodeGen/agg-copy.c @@ -15,10 +15,10 @@ typedef struct { // CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> // CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, !cir.ptr> // CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP3:%.*]] = cir.const #cir.int<1> : !s32i // CHECK: [[TMP4:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[TMP3]] : !s32i), !cir.ptr // CHECK: [[TMP5:%.*]] = cir.load [[TMP1]] : !cir.ptr>, !cir.ptr -// CHECK: [[TMP6:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP6:%.*]] = cir.const #cir.int<1> : !s32i // CHECK: [[TMP7:%.*]] = cir.ptr_stride([[TMP5]] : !cir.ptr, [[TMP6]] : !s32i), !cir.ptr // CHECK: cir.copy [[TMP7]] to [[TMP4]] : !cir.ptr void foo1(A* a1, A* a2) { @@ -68,7 +68,7 @@ A create() { A a; return a; } // CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, !cir.ptr, ["tmp"] {alignment = 4 : i64} // CHECK: [[TMP2:%.*]] = cir.call @create() : () -> !ty_22A22 // CHECK: cir.store [[TMP2]], [[TMP1]] : !ty_22A22, !cir.ptr -// CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr +// CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr void foo5() { A a; a = create(); diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index 40af18348a8a..3d0d2a279797 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -19,10 +19,10 @@ void use() { yop{}; } // CHECK: cir.func @_Z3usev() // CHECK: %0 = cir.alloca !ty_22yep_22, !cir.ptr, ["agg.tmp.ensured"] {alignment = 4 : i64} // CHECK: %1 = cir.get_member %0[0] {name = "Status"} : !cir.ptr -> !cir.ptr -// CHECK: %2 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: %2 = cir.const #cir.int<0> : !u32i // CHECK: cir.store %2, %1 : !u32i, !cir.ptr // CHECK: %3 = cir.get_member %0[1] {name = "HC"} : !cir.ptr -> !cir.ptr -// CHECK: %4 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: %4 = cir.const #cir.int<0> : !u32i // CHECK: cir.store %4, %3 : !u32i, !cir.ptr // CHECK: cir.return // CHECK: } @@ -49,14 +49,14 @@ void yo() { // CHECK: cir.func @_Z2yov() // CHECK: %0 = cir.alloca !ty_22Yo22, !cir.ptr, ["ext"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !ty_22Yo22, !cir.ptr, ["ext2", init] {alignment = 8 : i64} -// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.ptr : !cir.ptr, #cir.int<0> : !u64i}> : !ty_22Yo22) : !ty_22Yo22 +// CHECK: %2 = cir.const #cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.ptr : !cir.ptr, #cir.int<0> : !u64i}> : !ty_22Yo22 // CHECK: cir.store %2, %0 : !ty_22Yo22, !cir.ptr // CHECK: %3 = cir.get_member %1[0] {name = "type"} : !cir.ptr -> !cir.ptr -// CHECK: %4 = cir.const(#cir.int<1000066001> : !u32i) : !u32i +// CHECK: %4 = cir.const #cir.int<1000066001> : !u32i // CHECK: cir.store %4, %3 : !u32i, !cir.ptr // CHECK: %5 = cir.get_member %1[1] {name = "next"} : !cir.ptr -> !cir.ptr> // CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, !cir.ptr> // CHECK: %7 = cir.get_member %1[2] {name = "createFlags"} : !cir.ptr -> !cir.ptr -// CHECK: %8 = cir.const(#cir.int<0> : !u64i) : !u64i +// CHECK: %8 = cir.const #cir.int<0> : !u64i // CHECK: cir.store %8, %7 : !u64i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/array-init-destroy.cpp b/clang/test/CIR/CodeGen/array-init-destroy.cpp index 975c14635e16..2959e0085314 100644 --- a/clang/test/CIR/CodeGen/array-init-destroy.cpp +++ b/clang/test/CIR/CodeGen/array-init-destroy.cpp @@ -36,14 +36,14 @@ void x() { // AFTER: cir.func @_Z1xv() // AFTER: %[[ArrayAddr0:.*]] = cir.alloca !cir.array -// AFTER: %[[ConstTwo:.*]] = cir.const(#cir.int<2> : !u64i) : !u64i +// AFTER: %[[ConstTwo:.*]] = cir.const #cir.int<2> : !u64i // AFTER: %[[ArrayBegin:.*]] = cir.cast(array_to_ptrdecay, %[[ArrayAddr0]] : !cir.ptr>), !cir.ptr // AFTER: %[[ArrayPastEnd:.*]] = cir.ptr_stride(%[[ArrayBegin]] : !cir.ptr, %[[ConstTwo]] : !u64i), !cir.ptr // AFTER: %[[TmpIdx:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // AFTER: cir.store %[[ArrayBegin]], %[[TmpIdx]] : !cir.ptr, !cir.ptr> // AFTER: cir.do { // AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : !cir.ptr>, !cir.ptr -// AFTER: %[[ConstOne:.*]] = cir.const(#cir.int<1> : !u64i) : !u64i +// AFTER: %[[ConstOne:.*]] = cir.const #cir.int<1> : !u64i // AFTER: cir.call @_ZN4xptoC1Ev(%[[ArrayElt]]) : (!cir.ptr) -> () // AFTER: %[[NextElt:.*]] = cir.ptr_stride(%[[ArrayElt]] : !cir.ptr, %[[ConstOne]] : !u64i), !cir.ptr // AFTER: cir.store %[[NextElt]], %[[TmpIdx]] : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c index 854cf377e2fc..d805d4ea1f4a 100644 --- a/clang/test/CIR/CodeGen/array-init.c +++ b/clang/test/CIR/CodeGen/array-init.c @@ -12,7 +12,7 @@ void buz(int x) { // CHECK-NEXT: [[X_ALLOCA:%.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} // CHECK-NEXT: [[ARR:%.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 16 : i64} // CHECK-NEXT: cir.store %arg0, [[X_ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: [[ARR_INIT:%.*]] = cir.const(#cir.zero : !cir.array) : !cir.array +// CHECK-NEXT: [[ARR_INIT:%.*]] = cir.const #cir.zero : !cir.array // CHECK-NEXT: cir.store [[ARR_INIT]], [[ARR]] : !cir.array, !cir.ptr> // CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr // CHECK-NEXT: [[A_STORAGE0:%.*]] = cir.get_member [[FI_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr @@ -20,7 +20,7 @@ void buz(int x) { // CHECK-NEXT: [[X_VAL:%.*]] = cir.load [[X_ALLOCA]] : !cir.ptr, !s32i // CHECK-NEXT: [[X_CASTED:%.*]] = cir.cast(integral, [[X_VAL]] : !s32i), !s64i // CHECK-NEXT: cir.store [[X_CASTED]], [[B_STORAGE0]] : !s64i, !cir.ptr -// CHECK-NEXT: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK-NEXT: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride([[FI_EL]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: [[A_STORAGE1:%.*]] = cir.get_member [[SE_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: [[B_STORAGE1:%.*]] = cir.get_member [[SE_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr @@ -31,7 +31,7 @@ void foo() { } // CHECK: %0 = cir.alloca !cir.array, !cir.ptr>, ["bar"] {alignment = 16 : i64} -// CHECK-NEXT: %1 = cir.const(#cir.const_array<[#cir.fp<9.000000e+00> : !cir.double, #cir.fp<8.000000e+00> : !cir.double, #cir.fp<7.000000e+00> : !cir.double]> : !cir.array) : !cir.array +// CHECK-NEXT: %1 = cir.const #cir.const_array<[#cir.fp<9.000000e+00> : !cir.double, #cir.fp<8.000000e+00> : !cir.double, #cir.fp<7.000000e+00> : !cir.double]> : !cir.array // CHECK-NEXT: cir.store %1, %0 : !cir.array, !cir.ptr> void bar(int a, int b, int c) { int arr[] = {a,b,c}; @@ -45,7 +45,7 @@ void bar(int a, int b, int c) { // CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr // CHECK-NEXT: [[LOAD_A:%.*]] = cir.load [[A]] : !cir.ptr, !s32i // CHECK-NEXT: cir.store [[LOAD_A]], [[FI_EL]] : !s32i, !cir.ptr -// CHECK-NEXT: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK-NEXT: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride(%4 : !cir.ptr, [[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: [[LOAD_B:%.*]] = cir.load [[B]] : !cir.ptr, !s32i // CHECK-NEXT: cir.store [[LOAD_B]], [[SE_EL]] : !s32i, !cir.ptr @@ -65,16 +65,16 @@ void zero_init(int x) { // CHECK: [[BEGIN:%.*]] = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr // CHECK: [[VAR:%.*]] = cir.load [[VAR_ALLOC]] : !cir.ptr, !s32i // CHECK: cir.store [[VAR]], [[BEGIN]] : !s32i, !cir.ptr -// CHECK: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i // CHECK: [[ZERO_INIT_START:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr // CHECK: cir.store [[ZERO_INIT_START]], [[TEMP]] : !cir.ptr, !cir.ptr> -// CHECK: [[SIZE:%.*]] = cir.const(#cir.int<3> : !s64i) : !s64i +// CHECK: [[SIZE:%.*]] = cir.const #cir.int<3> : !s64i // CHECK: [[END:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[SIZE]] : !s64i), !cir.ptr // CHECK: cir.do { // CHECK: [[CUR:%.*]] = cir.load [[TEMP]] : !cir.ptr>, !cir.ptr -// CHECK: [[FILLER:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: [[FILLER:%.*]] = cir.const #cir.int<0> : !s32i // CHECK: cir.store [[FILLER]], [[CUR]] : !s32i, !cir.ptr -// CHECK: [[ONE:%.*]] = cir.const(#cir.int<1> : !s64i) : !s64i +// CHECK: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i // CHECK: [[NEXT:%.*]] = cir.ptr_stride([[CUR]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr // CHECK: cir.store [[NEXT]], [[TEMP]] : !cir.ptr, !cir.ptr> // CHECK: cir.yield diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 20cbc48f387e..31649406fac1 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -15,8 +15,8 @@ void a1() { // CHECK: cir.func @_Z2a1v() // CHECK-NEXT: %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} -// CHECK-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %2 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : !s32i), !cir.ptr // CHECK-NEXT: cir.store %1, %4 : !s32i, !cir.ptr @@ -29,7 +29,7 @@ int *a2() { // CHECK: cir.func @_Z2a2v() -> !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} -// CHECK-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %2 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: %3 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr // CHECK-NEXT: %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : !s32i), !cir.ptr // CHECK-NEXT: cir.store %4, %0 : !cir.ptr, !cir.ptr> @@ -76,7 +76,7 @@ void testPointerDecaySubscriptAccess(int arr[]) { // CHECK: cir.func @{{.+}}testPointerDecaySubscriptAccess arr[1]; // CHECK: %[[#BASE:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr - // CHECK: %[[#DIM1:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#DIM1:]] = cir.const #cir.int<1> : !s32i // CHECK: cir.ptr_stride(%[[#BASE]] : !cir.ptr, %[[#DIM1]] : !s32i), !cir.ptr } @@ -84,9 +84,9 @@ void testPointerDecayedArrayMultiDimSubscriptAccess(int arr[][3]) { // CHECK: cir.func @{{.+}}testPointerDecayedArrayMultiDimSubscriptAccess arr[1][2]; // CHECK: %[[#V1:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> - // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#V2:]] = cir.const #cir.int<1> : !s32i // CHECK: %[[#V3:]] = cir.ptr_stride(%[[#V1]] : !cir.ptr>, %[[#V2]] : !s32i), !cir.ptr> - // CHECK: %[[#V4:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: %[[#V4:]] = cir.const #cir.int<2> : !s32i // CHECK: %[[#V5:]] = cir.cast(array_to_ptrdecay, %[[#V3]] : !cir.ptr>), !cir.ptr // CHECK: cir.ptr_stride(%[[#V5]] : !cir.ptr, %[[#V4]] : !s32i), !cir.ptr } diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 2d92c1619ae9..501946ad2bf0 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -23,7 +23,7 @@ int basic_binop_fetch(int *i) { // CHECK: %[[ONE_ADDR:.*]] = cir.alloca !s32i, !cir.ptr, [".atomictmp"] {alignment = 4 : i64} // CHECK: cir.store %arg0, %[[ARGI]] : !cir.ptr, !cir.ptr> // CHECK: %[[I:.*]] = cir.load %[[ARGI]] : !cir.ptr>, !cir.ptr -// CHECK: %[[ONE:.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK: cir.store %[[ONE]], %[[ONE_ADDR]] : !s32i, !cir.ptr // CHECK: %[[VAL:.*]] = cir.load %[[ONE_ADDR]] : !cir.ptr, !s32i // CHECK: cir.atomic.fetch(add, %[[I]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) : !s32i diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index be9db2ab8ec5..5ef5dbf21a6e 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -25,7 +25,7 @@ int f2(void) { return 3; } // CIR: cir.func @f2() -> !s32i // CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} -// CIR-NEXT: %1 = cir.const(#cir.int<3> : !s32i) : !s32i +// CIR-NEXT: %1 = cir.const #cir.int<3> : !s32i // CIR-NEXT: cir.store %1, %0 : !s32i, !cir.ptr // CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i // CIR-NEXT: cir.return %2 : !s32i @@ -46,7 +46,7 @@ int f3(void) { // CIR: cir.func @f3() -> !s32i // CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CIR-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} -// CIR-NEXT: %2 = cir.const(#cir.int<3> : !s32i) : !s32i +// CIR-NEXT: %2 = cir.const #cir.int<3> : !s32i // CIR-NEXT: cir.store %2, %1 : !s32i, !cir.ptr // CIR-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CIR-NEXT: cir.store %3, %0 : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index c1b60288e981..8817f97dca10 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -8,7 +8,7 @@ int *p0() { // CHECK: cir.func @_Z2p0v() -> !cir.ptr // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] -// CHECK: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: %2 = cir.const #cir.ptr : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, !cir.ptr> int *p1() { @@ -19,7 +19,7 @@ int *p1() { // CHECK: cir.func @_Z2p1v() -> !cir.ptr // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["p"] -// CHECK: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: %2 = cir.const #cir.ptr : !cir.ptr // CHECK: cir.store %2, %1 : !cir.ptr, !cir.ptr> int *p2() { @@ -36,18 +36,18 @@ int *p2() { // CHECK: cir.func @_Z2p2v() -> !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} -// CHECK-NEXT: %2 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK-NEXT: %2 = cir.const #cir.ptr : !cir.ptr // CHECK-NEXT: cir.store %2, %1 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.scope { // CHECK-NEXT: %7 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} -// CHECK-NEXT: %8 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %8 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store %8, %7 : !s32i, !cir.ptr // CHECK-NEXT: cir.store %7, %1 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %9 = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK-NEXT: %9 = cir.const #cir.int<42> : !s32i // CHECK-NEXT: %10 = cir.load deref %1 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.store %9, %10 : !s32i, !cir.ptr // CHECK-NEXT: } loc(#[[locScope:loc[0-9]+]]) -// CHECK-NEXT: %3 = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.const #cir.int<42> : !s32i // CHECK-NEXT: %4 = cir.load deref %1 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.store %3, %4 : !s32i, !cir.ptr // CHECK-NEXT: %5 = cir.load %1 : !cir.ptr>, !cir.ptr @@ -58,8 +58,8 @@ int *p2() { void b0() { bool x = true, y = false; } // CHECK: cir.func @_Z2b0v() -// CHECK: %2 = cir.const(#true) : !cir.bool -// CHECK: %3 = cir.const(#false) : !cir.bool +// CHECK: %2 = cir.const #true +// CHECK: %3 = cir.const #false void b1(int a) { bool b = a; } @@ -82,10 +82,10 @@ void if0(int a) { // CHECK: %3 = cir.load %0 : !cir.ptr, !s32i // CHECK: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.if %4 { -// CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<3> : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: } else { -// CHECK-NEXT: %5 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<4> : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: } // CHECK: } @@ -110,12 +110,12 @@ void if1(int a, bool b, bool c) { // CHECK: %5 = cir.load %0 : !cir.ptr, !s32i // CHECK: %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool // CHECK: cir.if %6 { -// CHECK: %7 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: %7 = cir.const #cir.int<3> : !s32i // CHECK: cir.store %7, %3 : !s32i, !cir.ptr // CHECK: cir.scope { // CHECK: %8 = cir.load %1 : !cir.ptr, !cir.bool // CHECK-NEXT: cir.if %8 { -// CHECK-NEXT: %9 = cir.const(#cir.int<8> : !s32i) : !s32i +// CHECK-NEXT: %9 = cir.const #cir.int<8> : !s32i // CHECK-NEXT: cir.store %9, %3 : !s32i, !cir.ptr // CHECK-NEXT: } // CHECK: } @@ -123,11 +123,11 @@ void if1(int a, bool b, bool c) { // CHECK: cir.scope { // CHECK: %8 = cir.load %2 : !cir.ptr, !cir.bool // CHECK-NEXT: cir.if %8 { -// CHECK-NEXT: %9 = cir.const(#cir.int<14> : !s32i) : !s32i +// CHECK-NEXT: %9 = cir.const #cir.int<14> : !s32i // CHECK-NEXT: cir.store %9, %3 : !s32i, !cir.ptr // CHECK-NEXT: } // CHECK: } -// CHECK: %7 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK: %7 = cir.const #cir.int<4> : !s32i // CHECK: cir.store %7, %3 : !s32i, !cir.ptr // CHECK: } // CHECK: } @@ -157,9 +157,9 @@ void x() { // CHECK: cir.func @_Z1xv() // CHECK: %0 = cir.alloca !cir.bool, !cir.ptr, ["b0", init] {alignment = 1 : i64} // CHECK: %1 = cir.alloca !cir.bool, !cir.ptr, ["b1", init] {alignment = 1 : i64} -// CHECK: %2 = cir.const(#true) : !cir.bool +// CHECK: %2 = cir.const #true // CHECK: cir.store %2, %0 : !cir.bool, !cir.ptr -// CHECK: %3 = cir.const(#false) : !cir.bool +// CHECK: %3 = cir.const #false // CHECK: cir.store %3, %1 : !cir.bool, !cir.ptr typedef unsigned long size_type; @@ -171,10 +171,10 @@ size_type max_size() { // CHECK: cir.func @_Z8max_sizev() // CHECK: %0 = cir.alloca !u64i, !cir.ptr, ["__retval"] {alignment = 8 : i64} -// CHECK: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %1 = cir.const #cir.int<0> : !s32i // CHECK: %2 = cir.unary(not, %1) : !s32i, !s32i // CHECK: %3 = cir.cast(integral, %2 : !s32i), !u64i -// CHECK: %4 = cir.const(#cir.int<8> : !u64i) : !u64i +// CHECK: %4 = cir.const #cir.int<8> : !u64i // CHECK: %5 = cir.binop(div, %3, %4) : !u64i // CHECK-DAG: #[[locScope]] = loc(fused[#[[locScopeA:loc[0-9]+]], #[[locScopeB:loc[0-9]+]]]) diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp index 79cbc8baa96c..3e09281072e2 100644 --- a/clang/test/CIR/CodeGen/binassign.cpp +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -66,7 +66,7 @@ void exec() { // CHECK: %1 = cir.call @_Z5gettyv() : () -> !u32i // CHECK: cir.store %1, %0 : !u32i, !cir.ptr // CHECK: %2 = cir.cast(integral, %1 : !u32i), !s32i -// CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %3 = cir.const #cir.int<0> : !s32i // CHECK: %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool // CHECK: cir.if %4 { diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index 0564e9c8e89f..30b54beab761 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -33,26 +33,26 @@ void b1(bool a, bool b) { // CHECK: cir.ternary(%3, true // CHECK-NEXT: %7 = cir.load %1 // CHECK-NEXT: cir.ternary(%7, true -// CHECK-NEXT: cir.const(#true) +// CHECK-NEXT: cir.const #true // CHECK-NEXT: cir.yield // CHECK-NEXT: false { -// CHECK-NEXT: cir.const(#false) +// CHECK-NEXT: cir.const #false // CHECK-NEXT: cir.yield // CHECK: cir.yield // CHECK-NEXT: false { -// CHECK-NEXT: cir.const(#false) +// CHECK-NEXT: cir.const #false // CHECK-NEXT: cir.yield // CHECK: cir.ternary(%5, true -// CHECK-NEXT: cir.const(#true) +// CHECK-NEXT: cir.const #true // CHECK-NEXT: cir.yield // CHECK-NEXT: false { // CHECK-NEXT: %7 = cir.load %1 // CHECK-NEXT: cir.ternary(%7, true -// CHECK-NEXT: cir.const(#true) +// CHECK-NEXT: cir.const #true // CHECK-NEXT: cir.yield // CHECK-NEXT: false { -// CHECK-NEXT: cir.const(#false) +// CHECK-NEXT: cir.const #false // CHECK-NEXT: cir.yield void b2(bool a) { @@ -64,13 +64,13 @@ void b2(bool a) { // CHECK: %0 = cir.alloca {{.*}} ["a", init] // CHECK: %1 = cir.alloca {{.*}} ["x", init] -// CHECK: %2 = cir.const(#false) +// CHECK: %2 = cir.const #false // CHECK-NEXT: cir.store %2, %1 // CHECK-NEXT: %3 = cir.load %0 // CHECK-NEXT: cir.store %3, %1 // CHECK-NEXT: %4 = cir.load %0 // CHECK-NEXT: cir.store %4, %1 -// CHECK-NEXT: %5 = cir.const(#true) +// CHECK-NEXT: %5 = cir.const #true // CHECK-NEXT: cir.store %5, %1 void b3(int a, int b, int c, int d) { diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index 93b1c918fe33..57cd3b9ba250 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -27,30 +27,30 @@ typedef struct { int d : 2; int e : 15; unsigned f; // type other than int above, not a bitfield -} S; +} S; typedef struct { int a : 3; // one bitfield with size < 8 unsigned b; -} T; +} T; typedef struct { char a; char b; char c; - + // startOffset 24 bits, new storage from here - int d: 2; + int d: 2; int e: 2; int f: 4; int g: 25; int h: 3; - int i: 4; + int i: 4; int j: 3; int k: 8; int l: 14; // need to be a part of the new storage - // because (tail - startOffset) is 65 after 'l' field + // because (tail - startOffset) is 65 after 'l' field } U; // CHECK: !ty_22D22 = !cir.struct @@ -66,9 +66,9 @@ typedef struct { // CHECK: cir.func {{.*@store_field}} // CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, !cir.ptr -// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i // CHECK: [[TMP2:%.*]] = cir.get_member [[TMP0]][2] {name = "e"} : !cir.ptr -> !cir.ptr -// CHECK: cir.set_bitfield(#bfi_e, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) +// CHECK: cir.set_bitfield(#bfi_e, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) void store_field() { S s; s.e = 3; @@ -93,7 +93,7 @@ void unOp(S* s) { } // CHECK: cir.func {{.*@binOp}} -// CHECK: [[TMP0:%.*]] = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK: [[TMP0:%.*]] = cir.const #cir.int<42> : !s32i // CHECK: [[TMP1:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr> // CHECK: [[TMP2:%.*]] = cir.get_bitfield(#bfi_d, [[TMP1]] : !cir.ptr>) -> !s32i // CHECK: [[TMP3:%.*]] = cir.binop(or, [[TMP2]], [[TMP0]]) : !s32i @@ -109,7 +109,7 @@ unsigned load_non_bitfield(S *s) { return s->f; } -// just create a usage of T type +// just create a usage of T type // CHECK: cir.func {{.*@load_one_bitfield}} int load_one_bitfield(T* t) { return t->a; @@ -124,7 +124,7 @@ void createU() { // CHECK: cir.func {{.*@createD}} // CHECK: %0 = cir.alloca !ty_22D22, !cir.ptr, ["d"] {alignment = 4 : i64} // CHECK: %1 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr -// CHECK: %2 = cir.const(#cir.const_struct<{#cir.int<33> : !u8i, #cir.int<0> : !u8i, #cir.int<3> : !s32i}> : !ty_anon_struct) : !ty_anon_struct +// CHECK: %2 = cir.const #cir.const_struct<{#cir.int<33> : !u8i, #cir.int<0> : !u8i, #cir.int<3> : !s32i}> : !ty_anon_struct // CHECK: cir.store %2, %1 : !ty_anon_struct, !cir.ptr void createD() { D d = {1,2,3}; diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 4a7f16beff0a..be31118064bd 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -34,7 +34,7 @@ typedef struct { // CHECK: cir.func @_Z11store_field // CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, !cir.ptr -// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i // CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr // CHECK: cir.set_bitfield(#bfi_a, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) void store_field() { @@ -57,7 +57,7 @@ unsigned load_non_bitfield(S& s) { return s.f; } -// just create a usage of T type +// just create a usage of T type // CHECK: cir.func @_Z17load_one_bitfield int load_one_bitfield(T& t) { return t.a; diff --git a/clang/test/CIR/CodeGen/bitint.cpp b/clang/test/CIR/CodeGen/bitint.cpp index 32bda23e663a..09c133d0e1be 100644 --- a/clang/test/CIR/CodeGen/bitint.cpp +++ b/clang/test/CIR/CodeGen/bitint.cpp @@ -26,7 +26,7 @@ i10 test_init() { } // CHECK: cir.func @_Z9test_initv() -> !cir.int -// CHECK: %[[#LITERAL:]] = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK: %[[#LITERAL:]] = cir.const #cir.int<42> : !s32i // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#LITERAL]] : !s32i), !cir.int // CHECK: } @@ -35,7 +35,7 @@ void test_init_for_mem() { } // CHECK: cir.func @_Z17test_init_for_memv() -// CHECK: %[[#LITERAL:]] = cir.const(#cir.int<42> : !s32i) : !s32i +// CHECK: %[[#LITERAL:]] = cir.const #cir.int<42> : !s32i // CHECK-NEXT: %[[#INIT:]] = cir.cast(integral, %[[#LITERAL]] : !s32i), !cir.int // CHECK-NEXT: cir.store %[[#INIT]], %{{.+}} : !cir.int, !cir.ptr> // CHECK: } @@ -57,7 +57,7 @@ void Size1ExtIntParam(unsigned _BitInt(1) A) { // CHECK: cir.func @_Z16Size1ExtIntParamDU1_ // CHECK: %[[#A:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int -// CHECK-NEXT: %[[#IDX:]] = cir.const(#cir.int<2> : !s32i) : !s32i +// CHECK-NEXT: %[[#IDX:]] = cir.const #cir.int<2> : !s32i // CHECK-NEXT: %[[#ARRAY:]] = cir.cast(array_to_ptrdecay, %1 : !cir.ptr x 5>>), !cir.ptr> // CHECK-NEXT: %[[#PTR:]] = cir.ptr_stride(%[[#ARRAY]] : !cir.ptr>, %[[#IDX]] : !s32i), !cir.ptr> // CHECK-NEXT: cir.store %[[#A]], %[[#PTR]] : !cir.int, !cir.ptr> @@ -76,9 +76,9 @@ void OffsetOfTest(void) { } // CHECK: cir.func @_Z12OffsetOfTestv() -// CHECK: %{{.+}} = cir.const(#cir.int<0> : !u64i) : !u64i -// CHECK: %{{.+}} = cir.const(#cir.int<4> : !u64i) : !u64i -// CHECK: %{{.+}} = cir.const(#cir.int<8> : !u64i) : !u64i +// CHECK: %{{.+}} = cir.const #cir.int<0> : !u64i +// CHECK: %{{.+}} = cir.const #cir.int<4> : !u64i +// CHECK: %{{.+}} = cir.const #cir.int<8> : !u64i // CHECK: } _BitInt(2) ParamPassing(_BitInt(15) a, _BitInt(31) b) {} diff --git a/clang/test/CIR/CodeGen/bool.c b/clang/test/CIR/CodeGen/bool.c index d3f4f2748672..038de348797b 100644 --- a/clang/test/CIR/CodeGen/bool.c +++ b/clang/test/CIR/CodeGen/bool.c @@ -3,13 +3,13 @@ #include -typedef struct { +typedef struct { bool x; } S; // CHECK: cir.func @init_bool // CHECK: [[ALLOC:%.*]] = cir.alloca !ty_22S22, !cir.ptr -// CHECK: [[ZERO:%.*]] = cir.const(#cir.zero : !ty_22S22) : !ty_22S22 +// CHECK: [[ZERO:%.*]] = cir.const #cir.zero : !ty_22S22 // CHECK: cir.store [[ZERO]], [[ALLOC]] : !ty_22S22, !cir.ptr void init_bool(void) { S s = {0}; @@ -17,23 +17,23 @@ void init_bool(void) { // CHECK: cir.func @store_bool // CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr> -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> -// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK: [[TMP2:%.*]] = cir.cast(int_to_bool, [[TMP1]] : !s32i), !cir.bool -// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.get_member [[TMP3]][0] {name = "x"} : !cir.ptr -> !cir.ptr -// CHECK: cir.store [[TMP2]], [[TMP4]] : !cir.bool, !cir.ptr +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP1:%.*]] = cir.const #cir.int<0> : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(int_to_bool, [[TMP1]] : !s32i), !cir.bool +// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.get_member [[TMP3]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: cir.store [[TMP2]], [[TMP4]] : !cir.bool, !cir.ptr void store_bool(S *s) { s->x = false; } // CHECK: cir.func @load_bool -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} -// CHECK: [[TMP1:%.*]] = cir.alloca !cir.bool, !cir.ptr, ["x", init] {alignment = 1 : i64} -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "x"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : !cir.ptr, !cir.bool +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.bool, !cir.ptr, ["x", init] {alignment = 1 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : !cir.ptr, !cir.bool void load_bool(S *s) { bool x = s->x; } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp index be633108597f..d09a60085f81 100644 --- a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp +++ b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp @@ -4,7 +4,7 @@ auto func() { return __builtin_strcmp("", ""); // CHECK: cir.func @_Z4funcv() // CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} loc(#loc2) - // CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc7) + // CHECK-NEXT: %1 = cir.const #cir.int<0> : !s32i loc(#loc7) // CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr loc(#loc8) // CHECK-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i loc(#loc8) // CHECK-NEXT: cir.return %2 : !s32i loc(#loc8) diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index 1ed5e8ca57d3..8129288bbd68 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -46,8 +46,8 @@ void d(void) { // CHECK: } // CHECK: cir.func @d() // CHECK: call @a() : () -> () -// CHECK: %0 = cir.const(#cir.int<0> : !s32i) : !s32i -// CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %0 = cir.const #cir.int<0> : !s32i +// CHECK: %1 = cir.const #cir.int<1> : !s32i // CHECK: call @b(%0, %1) : (!s32i, !s32i) -> !s32i // CHECK: cir.return // CHECK: } @@ -84,8 +84,8 @@ void d(void) { // CXX-NEXT: } // CXX-NEXT: cir.func @_Z1dv() // CXX-NEXT: call @_Z1av() : () -> () -// CXX-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i -// CXX-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CXX-NEXT: %0 = cir.const #cir.int<0> : !s32i +// CXX-NEXT: %1 = cir.const #cir.int<1> : !s32i // CXX-NEXT: call @_Z1bii(%0, %1) : (!s32i, !s32i) -> !s32i // CXX-NEXT: cir.return // CXX-NEXT: } diff --git a/clang/test/CIR/CodeGen/call.cpp b/clang/test/CIR/CodeGen/call.cpp index ac2ff489db69..26db637fdb1d 100644 --- a/clang/test/CIR/CodeGen/call.cpp +++ b/clang/test/CIR/CodeGen/call.cpp @@ -10,5 +10,5 @@ int f() { // CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK: %1 = cir.call @_Z1pv() : () -> !cir.ptr // CHECK: %2 = cir.load %1 : !cir.ptr, !s32i -// CHECK: %3 = cir.const(#cir.int<22> : !s32i) : !s32i +// CHECK: %3 = cir.const #cir.int<22> : !s32i // CHECK: %4 = cir.binop(sub, %2, %3) nsw : !s32i diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index b5e15ba784ca..15991a8f1fd3 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -111,13 +111,13 @@ void call_cptr(void *d) { // CHECK: %3 = cir.unary(not, %2) : !cir.bool, !cir.bool // CHECK: cir.if %3 { -void lvalue_cast(int x) { +void lvalue_cast(int x) { *(int *)&x = 42; -} +} // CHECK: cir.func @_Z11lvalue_cast -// CHECK: %1 = cir.const(#cir.int<42> : !s32i) : !s32i -// CHECK: cir.store %1, %0 : !s32i, !cir.ptr +// CHECK: %1 = cir.const #cir.int<42> : !s32i +// CHECK: cir.store %1, %0 : !s32i, !cir.ptr struct A { int x; }; @@ -127,9 +127,9 @@ void null_cast(long ptr) { } // CHECK: cir.func @_Z9null_castl -// CHECK: %[[ADDR:[0-9]+]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: %[[ADDR:[0-9]+]] = cir.const #cir.ptr : !cir.ptr // CHECK: cir.store %{{[0-9]+}}, %[[ADDR]] : !s32i, !cir.ptr -// CHECK: %[[BASE:[0-9]+]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: %[[BASE:[0-9]+]] = cir.const #cir.ptr : !cir.ptr // CHECK: %[[FIELD:[0-9]+]] = cir.get_member %[[BASE]][0] {name = "x"} : !cir.ptr -> !cir.ptr // CHECK: cir.store %{{[0-9]+}}, %[[FIELD]] : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c index 6f4ca8cc4ab5..36bb7d324768 100644 --- a/clang/test/CIR/CodeGen/compound-literal.c +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -40,9 +40,9 @@ int foo() { // CIR: [[RET_MEM:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CIR: [[COMPLITERAL_MEM:%.*]] = cir.alloca !ty_22anon2E122, !cir.ptr, [".compoundliteral"] {alignment = 4 : i64} // CIR: [[FIELD:%.*]] = cir.get_member [[COMPLITERAL_MEM]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CIR: [[ONE:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i // CIR: cir.store [[ONE]], [[FIELD]] : !s32i, !cir.ptr -// CIR: [[ONE:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i // CIR: cir.store [[ONE]], [[RET_MEM]] : !s32i, !cir.ptr // CIR: [[RET:%.*]] = cir.load [[RET_MEM]] : !cir.ptr, !s32i // CIR: cir.return [[RET]] : !s32i diff --git a/clang/test/CIR/CodeGen/const-array.c b/clang/test/CIR/CodeGen/const-array.c index 7ae6f852d7ca..91a77d113daf 100644 --- a/clang/test/CIR/CodeGen/const-array.c +++ b/clang/test/CIR/CodeGen/const-array.c @@ -14,5 +14,5 @@ void foo() { // CHECK: cir.func {{.*@foo}} // CHECK: %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} -// CHECK: %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i], trailing_zeros> : !cir.array) : !cir.array +// CHECK: %1 = cir.const #cir.const_array<[#cir.int<1> : !s32i], trailing_zeros> : !cir.array // CHECK: cir.store %1, %0 : !cir.array, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 43edbf451464..ba9c6cdf973e 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -158,8 +158,8 @@ VoidTask silly_task() { // Get coroutine id with __builtin_coro_id. -// CHECK: %[[#NullPtr:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr -// CHECK: %[[#Align:]] = cir.const(#cir.int<16> : !u32i) : !u32i +// CHECK: %[[#NullPtr:]] = cir.const #cir.ptr : !cir.ptr +// CHECK: %[[#Align:]] = cir.const #cir.int<16> : !u32i // CHECK: %[[#CoroId:]] = cir.call @__builtin_coro_id(%[[#Align]], %[[#NullPtr]], %[[#NullPtr]], %[[#NullPtr]]) // Perform allocation calling operator 'new' depending on __builtin_coro_alloc and @@ -261,8 +261,8 @@ VoidTask silly_task() { // Call builtin coro end and return -// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.const(#cir.ptr : !cir.ptr) -// CHECK-NEXT: %[[#CoroEndArg1:]] = cir.const(#false) : !cir.bool +// CHECK-NEXT: %[[#CoroEndArg0:]] = cir.const #cir.ptr : !cir.ptr +// CHECK-NEXT: %[[#CoroEndArg1:]] = cir.const #false // CHECK-NEXT: = cir.call @__builtin_coro_end(%[[#CoroEndArg0]], %[[#CoroEndArg1]]) // CHECK: %[[#Tmp1:]] = cir.load %[[#VoidTaskAddr]] @@ -315,7 +315,7 @@ folly::coro::Task go1() { // The call to go(1) has its own scope due to full-expression rules. // CHECK: cir.scope { // CHECK: %[[#OneAddr:]] = cir.alloca !s32i, !cir.ptr, ["ref.tmp1", init] {alignment = 4 : i64} -// CHECK: %[[#One:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %[[#One:]] = cir.const #cir.int<1> : !s32i // CHECK: cir.store %[[#One]], %[[#OneAddr]] : !s32i, !cir.ptr // CHECK: %[[#IntTaskTmp:]] = cir.call @_Z2goRKi(%[[#OneAddr]]) : (!cir.ptr) -> ![[IntTask]] // CHECK: cir.store %[[#IntTaskTmp]], %[[#IntTaskAddr]] : ![[IntTask]], !cir.ptr @@ -367,7 +367,7 @@ folly::coro::Task go4() { // CHECK: cir.scope { // CHECK: %17 = cir.alloca !s32i, !cir.ptr, ["ref.tmp2", init] {alignment = 4 : i64} // CHECK: %18 = cir.load %3 : !cir.ptr)>>>, !cir.ptr)>> -// CHECK: %19 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: %19 = cir.const #cir.int<3> : !s32i // CHECK: cir.store %19, %17 : !s32i, !cir.ptr // Call invoker, which calls operator() indirectly. diff --git a/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp b/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp index f70d1f8428d4..2f55a395c4b1 100644 --- a/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp +++ b/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp @@ -4,5 +4,5 @@ struct e { e(int); }; e *g = new e(0); -//CHECK: {{%.*}} = cir.const(#cir.int<1> : !u64i) : !u64i loc(#loc11) +//CHECK: {{%.*}} = cir.const #cir.int<1> : !u64i loc(#loc11) //CHECK: {{%.*}} = cir.call @_Znwm(%1) : (!u64i) -> !cir.ptr loc(#loc6) diff --git a/clang/test/CIR/CodeGen/delete.cpp b/clang/test/CIR/CodeGen/delete.cpp index 0f0ddcbc2c84..b02641ff87b0 100644 --- a/clang/test/CIR/CodeGen/delete.cpp +++ b/clang/test/CIR/CodeGen/delete.cpp @@ -10,6 +10,6 @@ namespace test1 { } // CHECK: cir.func @_ZN5test11aEPNS_1AE - // CHECK: %[[CONST:.*]] = cir.const(#cir.int<4> : !u64i) : !u64i + // CHECK: %[[CONST:.*]] = cir.const #cir.int<4> : !u64i // CHECK: cir.call @_ZN5test11AdlEPvm({{.*}}, %[[CONST]]) } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 253ab3907aa4..4932e8d7a944 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -87,7 +87,7 @@ void C3::Layer::Initialize() { // CHECK: %2 = cir.base_class_addr(%1 : !cir.ptr) -> !cir.ptr // CHECK: %3 = cir.get_member %2[1] {name = "m_C1"} : !cir.ptr -> !cir.ptr> // CHECK: %4 = cir.load %3 : !cir.ptr>, !cir.ptr -// CHECK: %5 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: %5 = cir.const #cir.ptr : !cir.ptr // CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool enumy C3::Initialize() { @@ -166,7 +166,7 @@ struct C : public A { }; // CHECK: cir.func @_Z8test_refv() -// CHECK: cir.get_member %2[1] {name = "ref"} +// CHECK: cir.get_member %2[1] {name = "ref"} int test_ref() { int x = 42; C c(x); diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index f4abee8de303..5c34604a623a 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -45,7 +45,7 @@ class B : public A // CHECK: cir.func @_Z4bluev() // CHECK: %0 = cir.alloca !ty_22PSEvent22, !cir.ptr, ["p", init] {alignment = 8 : i64} -// CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %1 = cir.const #cir.int<1> : !s32i // CHECK: %2 = cir.get_global @".str" : !cir.ptr> // CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr // CHECK: cir.call @_ZN7PSEventC1E6EFModePKc(%0, %1, %3) : (!cir.ptr, !s32i, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/dynamic-cast.cpp b/clang/test/CIR/CodeGen/dynamic-cast.cpp index 0c74504a7faa..536fcf844845 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast.cpp @@ -22,13 +22,13 @@ Derived *ptr_cast(Base *b) { // AFTER: cir.func @_Z8ptr_castP4Base // AFTER: %[[#SRC_IS_NULL:]] = cir.cast(ptr_to_bool, %{{.+}} : !cir.ptr), !cir.bool // AFTER-NEXT: %{{.+}} = cir.ternary(%[[#SRC_IS_NULL]], true { -// AFTER-NEXT: %[[#NULL:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// AFTER-NEXT: %[[#NULL:]] = cir.const #cir.ptr : !cir.ptr // AFTER-NEXT: cir.yield %[[#NULL]] : !cir.ptr // AFTER-NEXT: }, false { // AFTER-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr -// AFTER-NEXT: %[[#SRC_RTTI:]] = cir.const(#cir.global_view<@_ZTI4Base> : !cir.ptr) : !cir.ptr -// AFTER-NEXT: %[[#DEST_RTTI:]] = cir.const(#cir.global_view<@_ZTI7Derived> : !cir.ptr) : !cir.ptr -// AFTER-NEXT: %[[#OFFSET_HINT:]] = cir.const(#cir.int<0> : !s64i) : !s64i +// AFTER-NEXT: %[[#SRC_RTTI:]] = cir.const #cir.global_view<@_ZTI4Base> : !cir.ptr +// AFTER-NEXT: %[[#DEST_RTTI:]] = cir.const #cir.global_view<@_ZTI7Derived> : !cir.ptr +// AFTER-NEXT: %[[#OFFSET_HINT:]] = cir.const #cir.int<0> : !s64i // AFTER-NEXT: %[[#CASTED_PTR:]] = cir.call @__dynamic_cast(%[[#SRC_VOID_PTR]], %[[#SRC_RTTI]], %[[#DEST_RTTI]], %[[#OFFSET_HINT]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr // AFTER-NEXT: %[[#RESULT:]] = cir.cast(bitcast, %[[#CASTED_PTR]] : !cir.ptr), !cir.ptr // AFTER-NEXT: cir.yield %[[#RESULT]] : !cir.ptr @@ -45,9 +45,9 @@ Derived &ref_cast(Base &b) { // AFTER: cir.func @_Z8ref_castR4Base // AFTER: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr -// AFTER-NEXT: %[[#SRC_RTTI:]] = cir.const(#cir.global_view<@_ZTI4Base> : !cir.ptr) : !cir.ptr -// AFTER-NEXT: %[[#DEST_RTTI:]] = cir.const(#cir.global_view<@_ZTI7Derived> : !cir.ptr) : !cir.ptr -// AFTER-NEXT: %[[#OFFSET_HINT:]] = cir.const(#cir.int<0> : !s64i) : !s64i +// AFTER-NEXT: %[[#SRC_RTTI:]] = cir.const #cir.global_view<@_ZTI4Base> : !cir.ptr +// AFTER-NEXT: %[[#DEST_RTTI:]] = cir.const #cir.global_view<@_ZTI7Derived> : !cir.ptr +// AFTER-NEXT: %[[#OFFSET_HINT:]] = cir.const #cir.int<0> : !s64i // AFTER-NEXT: %[[#CASTED_PTR:]] = cir.call @__dynamic_cast(%[[#SRC_VOID_PTR]], %[[#SRC_RTTI]], %[[#DEST_RTTI]], %[[#OFFSET_HINT]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr // AFTER-NEXT: %[[#CASTED_PTR_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#CASTED_PTR]] : !cir.ptr), !cir.bool // AFTER-NEXT: %[[#CASTED_PTR_IS_NULL:]] = cir.unary(not, %[[#CASTED_PTR_IS_NOT_NULL]]) : !cir.bool, !cir.bool @@ -67,7 +67,7 @@ void *ptr_cast_to_complete(Base *ptr) { // BEFORE-NEXT: %[[#V20:]] = cir.cast(ptr_to_bool, %[[#V19]] : !cir.ptr), !cir.bool // BEFORE-NEXT: %[[#V21:]] = cir.unary(not, %[[#V20]]) : !cir.bool, !cir.bool // BEFORE-NEXT: %{{.+}} = cir.ternary(%[[#V21]], true { -// BEFORE-NEXT: %[[#V22:]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// BEFORE-NEXT: %[[#V22:]] = cir.const #cir.ptr : !cir.ptr // BEFORE-NEXT: cir.yield %[[#V22]] : !cir.ptr // BEFORE-NEXT: }, false { // BEFORE-NEXT: %[[#V23:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr> diff --git a/clang/test/CIR/CodeGen/evaluate-expr.c b/clang/test/CIR/CodeGen/evaluate-expr.c index 805fa5c01fd7..101f423c8e14 100644 --- a/clang/test/CIR/CodeGen/evaluate-expr.c +++ b/clang/test/CIR/CodeGen/evaluate-expr.c @@ -10,7 +10,7 @@ void foo() { } // CHECK: cir.func no_proto @foo() // CHECK: cir.scope { -// CHECK: [[ZERO:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i // CHECK: [[FALSE:%.*]] = cir.cast(int_to_bool, [[ZERO:%.*]] : !s32i), !cir.bool // CHECK: cir.if [[FALSE]] { // CHECK: cir.return @@ -26,7 +26,7 @@ void bar() { // CHECK: cir.func no_proto @bar() // CHECK: [[ALLOC:%.*]] = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK: {{%.*}} = cir.get_global @s : !cir.ptr -// CHECK: [[CONST:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: [[CONST:%.*]] = cir.const #cir.int<0> : !s32i // CHECK: cir.store [[CONST]], [[ALLOC]] : !s32i, !cir.ptr // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/expressions.cpp b/clang/test/CIR/CodeGen/expressions.cpp index 283acfca2d42..fb29394fbe2d 100644 --- a/clang/test/CIR/CodeGen/expressions.cpp +++ b/clang/test/CIR/CodeGen/expressions.cpp @@ -6,6 +6,6 @@ void test(int a) { // Should generate LValue parenthesis expression. (a) = 1; - // CHECK: %[[#C:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#C:]] = cir.const #cir.int<1> : !s32i // CHECK: cir.store %[[#C]], %{{.+}} : !s32i, !cir.ptr } diff --git a/clang/test/CIR/CodeGen/fullexpr.cpp b/clang/test/CIR/CodeGen/fullexpr.cpp index 2fd42b9b6db7..a83ce7d530cc 100644 --- a/clang/test/CIR/CodeGen/fullexpr.cpp +++ b/clang/test/CIR/CodeGen/fullexpr.cpp @@ -12,7 +12,7 @@ int go1() { // CHECK: %[[#XAddr:]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} // CHECK: %[[#RVal:]] = cir.scope { // CHECK-NEXT: %[[#TmpAddr:]] = cir.alloca !s32i, !cir.ptr, ["ref.tmp0", init] {alignment = 4 : i64} -// CHECK-NEXT: %[[#One:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %[[#One:]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store %[[#One]], %[[#TmpAddr]] : !s32i, !cir.ptr // CHECK-NEXT: %[[#RValTmp:]] = cir.call @_Z2goRKi(%[[#TmpAddr]]) : (!cir.ptr) -> !s32i // CHECK-NEXT: cir.yield %[[#RValTmp]] : !s32i diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c index 69179ece2c9b..a9c5b38b8cf1 100644 --- a/clang/test/CIR/CodeGen/fun-ptr.c +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -14,7 +14,7 @@ struct A; typedef int (*fun_typ)(struct A*); typedef struct A { - fun_typ fun; + fun_typ fun; } A; // CIR: !ty_22A22 = !cir.struct>)>>} #cir.record.decl.ast> @@ -29,7 +29,7 @@ int extract_a(Data* d) { // CIR: [[TMP1:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] // CIR: [[TMP2:%.*]] = cir.alloca !cir.ptr)>>, !cir.ptr)>>>, ["f", init] // CIR: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> -// CIR: [[TMP3:%.*]] = cir.const(#cir.ptr : !cir.ptr)>>) : !cir.ptr)>> +// CIR: [[TMP3:%.*]] = cir.const #cir.ptr : !cir.ptr)>> // CIR: cir.store [[TMP3]], [[TMP2]] : !cir.ptr)>>, !cir.ptr)>>> // CIR: [[TMP4:%.*]] = cir.get_global {{@.*extract_a.*}} : !cir.ptr)>> // CIR: cir.store [[TMP4]], [[TMP2]] : !cir.ptr)>>, !cir.ptr)>>> diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index da9f9397164f..cd2c235db672 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -67,7 +67,7 @@ int use_func() { return func(); } // CHECK-NEXT: %0 = cir.alloca !u8i, !cir.ptr, ["c", init] {alignment = 1 : i64} // CHECK-NEXT: %1 = cir.get_global @s2 : !cir.ptr> // CHECK-NEXT: %2 = cir.load %1 : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr // CHECK-NEXT: %5 = cir.load %4 : !cir.ptr, !s8i // CHECK-NEXT: %6 = cir.cast(integral, %5 : !s8i), !u8i @@ -76,7 +76,7 @@ int use_func() { return func(); } // CHECK: cir.func linkonce_odr @_Z4funcIiET_v() -> !s32i // CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %1 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr // CHECK-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %2 : !s32i diff --git a/clang/test/CIR/CodeGen/gnu-extension.c b/clang/test/CIR/CodeGen/gnu-extension.c index 0eb7e750ddff..7386de78176f 100644 --- a/clang/test/CIR/CodeGen/gnu-extension.c +++ b/clang/test/CIR/CodeGen/gnu-extension.c @@ -5,7 +5,7 @@ int foo(void) { return __extension__ 0b101010; } //CHECK: cir.func @foo() //CHECK-NEXT: [[ADDR:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} -//CHECK-NEXT: [[VAL:%.*]] = cir.const(#cir.int<42> : !s32i) : !s32i +//CHECK-NEXT: [[VAL:%.*]] = cir.const #cir.int<42> : !s32i //CHECK-NEXT: cir.store [[VAL]], [[ADDR]] : !s32i, !cir.ptr //CHECK-NEXT: [[LOAD_VAL:%.*]] = cir.load [[ADDR]] : !cir.ptr, !s32i //CHECK-NEXT: cir.return [[LOAD_VAL]] : !s32i diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index fd21360e399d..204b00303fca 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -18,13 +18,13 @@ void g0(int a) { // CHECK-NEXT cir.br ^bb2 // CHECK-NEXT ^bb1: // no predecessors // CHECK-NEXT %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT %4 = cir.const(1 : !s32i) : !s32i +// CHECK-NEXT %4 = cir.const 1 : !s32i // CHECK-NEXT %5 = cir.binop(add, %3, %4) : !s32i // CHECK-NEXT cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT cir.br ^bb2 // CHECK-NEXT ^bb2: // 2 preds: ^bb0, ^bb1 // CHECK-NEXT %6 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT %7 = cir.const(2 : !s32i) : !s32i +// CHECK-NEXT %7 = cir.const 2 : !s32i // CHECK-NEXT %8 = cir.binop(add, %6, %7) : !s32i // CHECK-NEXT cir.store %8, %1 : !s32i, !cir.ptr // CHECK-NEXT cir.return diff --git a/clang/test/CIR/CodeGen/hello.c b/clang/test/CIR/CodeGen/hello.c index 07ba213419fd..8aa29c05a211 100644 --- a/clang/test/CIR/CodeGen/hello.c +++ b/clang/test/CIR/CodeGen/hello.c @@ -15,7 +15,7 @@ int main (void) { // CHECK: %2 = cir.get_global @".str" : !cir.ptr> // CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr // CHECK: %4 = cir.call @printf(%3) : (!cir.ptr) -> !s32i -// CHECK: %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %5 = cir.const #cir.int<0> : !s32i // CHECK: cir.store %5, %0 : !s32i, !cir.ptr // CHECK: %6 = cir.load %0 : !cir.ptr, !s32i // CHECK: cir.return %6 : !s32i diff --git a/clang/test/CIR/CodeGen/if-constexpr.cpp b/clang/test/CIR/CodeGen/if-constexpr.cpp index 18b09de54758..1e487389cc62 100644 --- a/clang/test/CIR/CodeGen/if-constexpr.cpp +++ b/clang/test/CIR/CodeGen/if-constexpr.cpp @@ -43,18 +43,18 @@ void if0() { // CHECK: cir.store %1, %0 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: cir.scope { // CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} -// CHECK-NEXT: %3 = cir.const(#cir.int<2> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %3 = cir.const #cir.int<2> : !s32i loc({{.*}}) // CHECK-NEXT: cir.store %3, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { // CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} -// CHECK-NEXT: %3 = cir.const(#cir.int<5> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %3 = cir.const #cir.int<5> : !s32i loc({{.*}}) // CHECK-NEXT: cir.store %3, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { // CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} // CHECK-NEXT: %3 = cir.alloca !s32i, !cir.ptr, ["y", init] {{.*}} -// CHECK-NEXT: %4 = cir.const(#cir.int<7> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %4 = cir.const #cir.int<7> : !s32i loc({{.*}}) // CHECK-NEXT: cir.store %4, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: %5 = cir.load %2 : !cir.ptr, !s32i loc({{.*}}) // CHECK-NEXT: cir.store %5, %3 : !s32i, !cir.ptr loc({{.*}}) @@ -62,16 +62,16 @@ void if0() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} // CHECK-NEXT: %3 = cir.alloca !s32i, !cir.ptr, ["y", init] {{.*}} -// CHECK-NEXT: %4 = cir.const(#cir.int<9> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %4 = cir.const #cir.int<9> : !s32i loc({{.*}}) // CHECK-NEXT: cir.store %4, %2 : !s32i, !cir.ptr loc({{.*}}) -// CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %5 = cir.const #cir.int<3> : !s32i loc({{.*}}) // CHECK-NEXT: %6 = cir.load %2 : !cir.ptr, !s32i loc({{.*}}) // CHECK-NEXT: %7 = cir.binop(mul, %5, %6) : !s32i loc({{.*}}) // CHECK-NEXT: cir.store %7, %3 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { // CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} -// CHECK-NEXT: %3 = cir.const(#cir.int<20> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %3 = cir.const #cir.int<20> : !s32i loc({{.*}}) // CHECK-NEXT: cir.store %3, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { @@ -80,16 +80,16 @@ void if0() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} // CHECK-NEXT: %3 = cir.alloca !s32i, !cir.ptr, ["y", init] {{.*}} -// CHECK-NEXT: %4 = cir.const(#cir.int<70> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %4 = cir.const #cir.int<70> : !s32i loc({{.*}}) // CHECK-NEXT: cir.store %4, %2 : !s32i, !cir.ptr loc({{.*}}) -// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %5 = cir.const #cir.int<10> : !s32i loc({{.*}}) // CHECK-NEXT: %6 = cir.load %2 : !cir.ptr, !s32i loc({{.*}}) // CHECK-NEXT: %7 = cir.binop(mul, %5, %6) : !s32i loc({{.*}}) // CHECK-NEXT: cir.store %7, %3 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { // CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} -// CHECK-NEXT: %3 = cir.const(#cir.int<90> : !s32i) : !s32i loc({{.*}}) +// CHECK-NEXT: %3 = cir.const #cir.int<90> : !s32i loc({{.*}}) // CHECK-NEXT: cir.store %3, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.return loc({{.*}}) diff --git a/clang/test/CIR/CodeGen/if.cir b/clang/test/CIR/CodeGen/if.cir index b3104fd42d66..7ca069fe9399 100644 --- a/clang/test/CIR/CodeGen/if.cir +++ b/clang/test/CIR/CodeGen/if.cir @@ -6,10 +6,10 @@ module { cir.func @foo(%arg0: !s32i) -> !s32i { %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool cir.if %4 { - %5 = cir.const(#cir.int<1> : !s32i) : !s32i + %5 = cir.const #cir.int<1> : !s32i cir.return %5 : !s32i } else { - %5 = cir.const(#cir.int<0> : !s32i) : !s32i + %5 = cir.const #cir.int<0> : !s32i cir.return %5 : !s32i } cir.return %arg0 : !s32i @@ -18,10 +18,10 @@ module { // CHECK-NEXT: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool // CHECK-NEXT: cir.brcond %0 ^bb2, ^bb1 // CHECK-NEXT: ^bb1: // pred: ^bb0 -// CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %1 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.return %1 : !s32i // CHECK-NEXT: ^bb2: // pred: ^bb0 -// CHECK-NEXT: %2 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %2 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.return %2 : !s32i // CHECK-NEXT: ^bb3: // no predecessors // CHECK-NEXT: cir.return %arg0 : !s32i @@ -30,7 +30,7 @@ module { cir.func @onlyIf(%arg0: !s32i) -> !s32i { %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool cir.if %4 { - %5 = cir.const(#cir.int<1> : !s32i) : !s32i + %5 = cir.const #cir.int<1> : !s32i cir.return %5 : !s32i } cir.return %arg0 : !s32i @@ -39,7 +39,7 @@ module { // CHECK-NEXT: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool // CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // pred: ^bb0 -// CHECK-NEXT: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.return %1 : !s32i // CHECK-NEXT: ^bb2: // pred: ^bb0 // CHECK-NEXT: cir.return %arg0 : !s32i diff --git a/clang/test/CIR/CodeGen/inc-bool.cpp b/clang/test/CIR/CodeGen/inc-bool.cpp index adeb39f73938..193d63314960 100644 --- a/clang/test/CIR/CodeGen/inc-bool.cpp +++ b/clang/test/CIR/CodeGen/inc-bool.cpp @@ -9,6 +9,6 @@ void foo(bool x) { // CHECK: [[ALLOC_X:%.*]] = cir.alloca !cir.bool, !cir.ptr, ["x", init] {alignment = 1 : i64} // CHECK: cir.store %arg0, [[ALLOC_X]] : !cir.bool, !cir.ptr // CHECK: {{.*}} = cir.load [[ALLOC_X]] : !cir.ptr, !cir.bool -// CHECK: [[TRUE:%.*]] = cir.const(#true) : !cir.bool +// CHECK: [[TRUE:%.*]] = cir.const #true // CHECK: cir.store [[TRUE]], [[ALLOC_X]] : !cir.bool, !cir.ptr // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/lalg.c b/clang/test/CIR/CodeGen/lalg.c index 5115f967d314..26b41591d7dd 100644 --- a/clang/test/CIR/CodeGen/lalg.c +++ b/clang/test/CIR/CodeGen/lalg.c @@ -10,9 +10,9 @@ double dot() { // CHECK: %1 = cir.alloca !cir.double, !cir.ptr, ["x", init] // CHECK-NEXT: %2 = cir.alloca !cir.double, !cir.ptr, ["y", init] // CHECK-NEXT: %3 = cir.alloca !cir.double, !cir.ptr, ["result", init] -// CHECK-NEXT: %4 = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double +// CHECK-NEXT: %4 = cir.const #cir.fp<0.000000e+00> : !cir.double // CHECK-NEXT: cir.store %4, %1 : !cir.double, !cir.ptr -// CHECK-NEXT: %5 = cir.const(#cir.fp<0.000000e+00> : !cir.float) : !cir.float +// CHECK-NEXT: %5 = cir.const #cir.fp<0.000000e+00> : !cir.float // CHECK-NEXT: %6 = cir.cast(floating, %5 : !cir.float), !cir.double // CHECK-NEXT: cir.store %6, %2 : !cir.double, !cir.ptr // CHECK-NEXT: %7 = cir.load %1 : !cir.ptr, !cir.double diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 60f7b98f0600..7ab80bf18aff 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -29,7 +29,7 @@ void l0() { // CHECK: %2 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: %3 = cir.load %2 : !cir.ptr>, !cir.ptr // CHECK: %4 = cir.load %3 : !cir.ptr, !s32i -// CHECK: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %5 = cir.const #cir.int<1> : !s32i // CHECK: %6 = cir.binop(add, %4, %5) : !s32i // CHECK: %7 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: %8 = cir.load %7 : !cir.ptr>, !cir.ptr @@ -48,7 +48,7 @@ auto g() { // CHECK: cir.func @_Z1gv() -> !ty_22anon2E622 // CHECK: %0 = cir.alloca !ty_22anon2E622, !cir.ptr, ["__retval"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} -// CHECK: %2 = cir.const(#cir.int<12> : !s32i) : !s32i +// CHECK: %2 = cir.const #cir.int<12> : !s32i // CHECK: cir.store %2, %1 : !s32i, !cir.ptr // CHECK: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: cir.store %1, %3 : !cir.ptr, !cir.ptr> @@ -68,7 +68,7 @@ auto g2() { // CHECK: cir.func @_Z2g2v() -> !ty_22anon2E822 // CHECK-NEXT: %0 = cir.alloca !ty_22anon2E822, !cir.ptr, ["__retval", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.const(#cir.int<12> : !s32i) : !s32i +// CHECK-NEXT: %2 = cir.const #cir.int<12> : !s32i // CHECK-NEXT: cir.store %2, %1 : !s32i, !cir.ptr // CHECK-NEXT: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK-NEXT: cir.store %1, %3 : !cir.ptr, !cir.ptr> @@ -125,7 +125,7 @@ int g3() { // CHECK: %4 = cir.scope { // CHECK: %7 = cir.alloca !s32i, !cir.ptr, ["ref.tmp1", init] {alignment = 4 : i64} // CHECK: %8 = cir.load %1 : !cir.ptr)>>>, !cir.ptr)>> -// CHECK: %9 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: %9 = cir.const #cir.int<3> : !s32i // CHECK: cir.store %9, %7 : !s32i, !cir.ptr // 3. Call `__invoke()`, which effectively executes `operator()`. diff --git a/clang/test/CIR/CodeGen/literals.c b/clang/test/CIR/CodeGen/literals.c index 47665212c287..b8a33ad11559 100644 --- a/clang/test/CIR/CodeGen/literals.c +++ b/clang/test/CIR/CodeGen/literals.c @@ -2,7 +2,7 @@ int literals(void) { char a = 'a'; // char literals are int in C - // CHECK: %[[RES:[0-9]+]] = cir.const(#cir.int<97> : !s32i) : !s32i + // CHECK: %[[RES:[0-9]+]] = cir.const #cir.int<97> : !s32i // CHECK: %{{[0-9]+}} = cir.cast(integral, %[[RES]] : !s32i), !s8i return 0; diff --git a/clang/test/CIR/CodeGen/literals.cpp b/clang/test/CIR/CodeGen/literals.cpp index 537ebc8557e1..87290b888185 100644 --- a/clang/test/CIR/CodeGen/literals.cpp +++ b/clang/test/CIR/CodeGen/literals.cpp @@ -2,7 +2,7 @@ int literals() { char a = 'a'; // char literals have char type in C++ - // CHECK: %{{[0-9]+}} = cir.const(#cir.int<97> : !s8i) : !s8i + // CHECK: %{{[0-9]+}} = cir.const #cir.int<97> : !s8i return 0; } diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index 9f9e3058d1bf..fcc45a892e3d 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -13,14 +13,14 @@ void l0(void) { // CPPSCOPE-NEXT: cir.scope { // CPPSCOPE-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CPPSCOPE-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["j", init] {alignment = 4 : i64} -// CPPSCOPE-NEXT: %2 = cir.const(#cir.int<0> : !s32i) : !s32i +// CPPSCOPE-NEXT: %2 = cir.const #cir.int<0> : !s32i // CPPSCOPE-NEXT: cir.store %2, %0 : !s32i, !cir.ptr // CPPSCOPE-NEXT: cir.for : cond { // CSCOPE: cir.func @l0() // CSCOPE-NEXT: cir.scope { // CSCOPE-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} -// CSCOPE-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CSCOPE-NEXT: %1 = cir.const #cir.int<0> : !s32i // CSCOPE-NEXT: cir.store %1, %0 : !s32i, !cir.ptr // CSCOPE-NEXT: cir.for : cond { diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 67ba64cbb6f2..4cda3fba3410 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -8,7 +8,7 @@ void l0() { // CHECK: cir.func @_Z2l0v // CHECK: cir.for : cond { -// CHECK: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK: %[[#TRUE:]] = cir.const #true // CHECK: cir.condition(%[[#TRUE]]) void l1() { @@ -21,18 +21,18 @@ void l1() { // CHECK: cir.func @_Z2l1v // CHECK: cir.for : cond { // CHECK-NEXT: %4 = cir.load %2 : !cir.ptr, !s32i -// CHECK-NEXT: %5 = cir.const(#cir.int<10> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<10> : !s32i // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool // CHECK-NEXT: cir.condition(%6) // CHECK-NEXT: } body { // CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !s32i -// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i // CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } step { // CHECK-NEXT: %4 = cir.load %2 : !cir.ptr, !s32i -// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i // CHECK-NEXT: cir.store %6, %2 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield @@ -58,7 +58,7 @@ void l2(bool cond) { // CHECK-NEXT: cir.condition(%3) // CHECK-NEXT: } do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield @@ -66,11 +66,11 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.while { -// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: %[[#TRUE:]] = cir.const #true // CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: } do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield @@ -78,12 +78,12 @@ void l2(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.while { -// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.condition(%4) // CHECK-NEXT: } do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield @@ -107,7 +107,7 @@ void l3(bool cond) { // CHECK: cir.scope { // CHECK-NEXT: cir.do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield @@ -119,24 +119,24 @@ void l3(bool cond) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { -// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: %[[#TRUE:]] = cir.const #true // CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { -// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.condition(%4) // CHECK-NEXT: } @@ -154,16 +154,16 @@ void l4() { // CHECK: cir.func @_Z2l4v // CHECK: cir.while { -// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: %[[#TRUE:]] = cir.const #true // CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: } do { // CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !s32i -// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i // CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr // CHECK-NEXT: cir.scope { // CHECK-NEXT: %10 = cir.load %0 : !cir.ptr, !s32i -// CHECK-NEXT: %11 = cir.const(#cir.int<10> : !s32i) : !s32i +// CHECK-NEXT: %11 = cir.const #cir.int<10> : !s32i // CHECK-NEXT: %12 = cir.cmp(lt, %10, %11) : !s32i, !cir.bool // CHECK-NEXT: cir.if %12 { // CHECK-NEXT: cir.continue @@ -180,7 +180,7 @@ void l5() { // CHECK-NEXT: cir.do { // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { -// CHECK-NEXT: %0 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %0 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool // CHECK-NEXT: cir.condition(%1) // CHECK-NEXT: } @@ -197,7 +197,7 @@ void l6() { // CHECK: cir.func @_Z2l6v() // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.while { -// CHECK-NEXT: %[[#TRUE:]] = cir.const(#true) : !cir.bool +// CHECK-NEXT: %[[#TRUE:]] = cir.const #true // CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: } do { // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index cfb5d52d4daf..4f7226376ddd 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -18,7 +18,7 @@ void m(int a, int b) { // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> // CHECK: cir.scope { -// CHECK: %4 = cir.const(#cir.int<1> : !u64i) : !u64i +// CHECK: %4 = cir.const #cir.int<1> : !u64i // CHECK: %5 = cir.call @_Znwm(%4) : (!u64i) -> !cir.ptr // CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr // CHECK: %7 = cir.load %0 : !cir.ptr>, !cir.ptr @@ -42,7 +42,7 @@ class B { // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> // CHECK: %2 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK: %3 = cir.const(#cir.int<1> : !u64i) : !u64i +// CHECK: %3 = cir.const #cir.int<1> : !u64i // CHECK: %4 = cir.load %1 : !cir.ptr>, !cir.ptr // CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr // CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr diff --git a/clang/test/CIR/CodeGen/nrvo.cpp b/clang/test/CIR/CodeGen/nrvo.cpp index 527f064bfa4a..8edc47aa2c8a 100644 --- a/clang/test/CIR/CodeGen/nrvo.cpp +++ b/clang/test/CIR/CodeGen/nrvo.cpp @@ -14,7 +14,7 @@ std::vector test_nrvo() { // CHECK: cir.func @_Z9test_nrvov() -> ![[VEC]] // CHECK: %0 = cir.alloca ![[VEC]], !cir.ptr, ["__retval", init] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !cir.bool, !cir.ptr, ["nrvo"] {alignment = 1 : i64} -// CHECK: %2 = cir.const(#false) : !cir.bool +// CHECK: %2 = cir.const #false // CHECK: cir.store %2, %1 : !cir.bool, !cir.ptr // CHECK: cir.call @_ZNSt6vectorIPKcEC1Ev(%0) : (!cir.ptr) -> () // CHECK: cir.scope { @@ -24,7 +24,7 @@ std::vector test_nrvo() { // CHECK: cir.store %7, %5 : !cir.ptr, !cir.ptr> // CHECK: cir.call @_ZNSt6vectorIPKcE9push_backEOS1_(%0, %5) : (!cir.ptr, !cir.ptr>) -> () // CHECK: } -// CHECK: %3 = cir.const(#true) : !cir.bool +// CHECK: %3 = cir.const #true // CHECK: cir.store %3, %1 : !cir.bool, !cir.ptr // CHECK: %4 = cir.load %0 : !cir.ptr, ![[VEC]] // CHECK: cir.return %4 : ![[VEC]] diff --git a/clang/test/CIR/CodeGen/offsetof.c b/clang/test/CIR/CodeGen/offsetof.c index 5259e14d4915..5cd0d76ff46c 100644 --- a/clang/test/CIR/CodeGen/offsetof.c +++ b/clang/test/CIR/CodeGen/offsetof.c @@ -13,7 +13,7 @@ void foo() { } // CHECK: cir.func no_proto @foo() -// CHECK: {{.*}} = cir.const(#cir.int<0> : !u64i) : !u64i -// CHECK: {{.*}} = cir.const(#cir.int<4> : !u64i) : !u64i +// CHECK: {{.*}} = cir.const #cir.int<0> : !u64i +// CHECK: {{.*}} = cir.const #cir.int<4> : !u64i // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/pointer-to-data-member.cpp b/clang/test/CIR/CodeGen/pointer-to-data-member.cpp index 9ffa714e4d70..077506cec432 100644 --- a/clang/test/CIR/CodeGen/pointer-to-data-member.cpp +++ b/clang/test/CIR/CodeGen/pointer-to-data-member.cpp @@ -18,7 +18,7 @@ auto test1() -> int Point::* { return &Point::y; } // CHECK: cir.func @_Z5test1v() -> !cir.data_member -// CHECK: %{{.+}} = cir.const(#cir.data_member<1> : !cir.data_member) : !cir.data_member +// CHECK: %{{.+}} = cir.const #cir.data_member<1> : !cir.data_member // CHECK: } int test2(const Point &pt, int Point::*member) { @@ -51,12 +51,12 @@ auto test_null() -> int Point::* { return nullptr; } // CHECK: cir.func @_Z9test_nullv -// CHECK: %{{.+}} = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member +// CHECK: %{{.+}} = cir.const #cir.data_member : !cir.data_member // CHECK: } auto test_null_incomplete() -> int Incomplete::* { return nullptr; } // CHECK: cir.func @_Z20test_null_incompletev -// CHECK: %{{.+}} = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member +// CHECK: %{{.+}} = cir.const #cir.data_member : !cir.data_member // CHECK: } diff --git a/clang/test/CIR/CodeGen/pointers.cpp b/clang/test/CIR/CodeGen/pointers.cpp index 874e0984aad2..dfea22ebadd6 100644 --- a/clang/test/CIR/CodeGen/pointers.cpp +++ b/clang/test/CIR/CodeGen/pointers.cpp @@ -4,23 +4,23 @@ // Should generate basic pointer arithmetics. void foo(int *iptr, char *cptr, unsigned ustride) { iptr + 2; - // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: %[[#STRIDE:]] = cir.const #cir.int<2> : !s32i // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr cptr + 3; - // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<3> : !s32i) : !s32i + // CHECK: %[[#STRIDE:]] = cir.const #cir.int<3> : !s32i // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr iptr - 2; - // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: %[[#STRIDE:]] = cir.const #cir.int<2> : !s32i // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#STRIDE]]) : !s32i, !s32i // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr cptr - 3; - // CHECK: %[[#STRIDE:]] = cir.const(#cir.int<3> : !s32i) : !s32i + // CHECK: %[[#STRIDE:]] = cir.const #cir.int<3> : !s32i // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#STRIDE]]) : !s32i, !s32i // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr iptr + ustride; // CHECK: %[[#STRIDE:]] = cir.load %{{.+}} : !cir.ptr, !u32i // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#STRIDE]] : !u32i), !cir.ptr - + // Must convert unsigned stride to a signed one. iptr - ustride; // CHECK: %[[#STRIDE:]] = cir.load %{{.+}} : !cir.ptr, !u32i @@ -33,7 +33,7 @@ void testPointerSubscriptAccess(int *ptr) { // CHECK: testPointerSubscriptAccess ptr[1]; // CHECK: %[[#V1:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr - // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#V2:]] = cir.const #cir.int<1> : !s32i // CHECK: cir.ptr_stride(%[[#V1]] : !cir.ptr, %[[#V2]] : !s32i), !cir.ptr } @@ -41,9 +41,9 @@ void testPointerMultiDimSubscriptAccess(int **ptr) { // CHECK: testPointerMultiDimSubscriptAccess ptr[1][2]; // CHECK: %[[#V1:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> - // CHECK: %[[#V2:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#V2:]] = cir.const #cir.int<1> : !s32i // CHECK: %[[#V3:]] = cir.ptr_stride(%[[#V1]] : !cir.ptr>, %[[#V2]] : !s32i), !cir.ptr> // CHECK: %[[#V4:]] = cir.load %[[#V3]] : !cir.ptr>, !cir.ptr - // CHECK: %[[#V5:]] = cir.const(#cir.int<2> : !s32i) : !s32i + // CHECK: %[[#V5:]] = cir.const #cir.int<2> : !s32i // CHECK: cir.ptr_stride(%[[#V4]] : !cir.ptr, %[[#V5]] : !s32i), !cir.ptr } diff --git a/clang/test/CIR/CodeGen/predefined.cpp b/clang/test/CIR/CodeGen/predefined.cpp index 35bc4bff8e73..b5ec86d41aff 100644 --- a/clang/test/CIR/CodeGen/predefined.cpp +++ b/clang/test/CIR/CodeGen/predefined.cpp @@ -12,7 +12,7 @@ void m() { // CHECK: cir.func @_Z1mv() // CHECK: %0 = cir.get_global @".str" : !cir.ptr> // CHECK: %1 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr -// CHECK: %2 = cir.const(#cir.int<79> : !s32i) : !s32i +// CHECK: %2 = cir.const #cir.int<79> : !s32i // CHECK: %3 = cir.get_global @".str1" : !cir.ptr> // CHECK: %4 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr // CHECK: %5 = cir.get_global @".str2" : !cir.ptr> diff --git a/clang/test/CIR/CodeGen/ptrdiff.cpp b/clang/test/CIR/CodeGen/ptrdiff.cpp index 33149e2aee53..e322c9c6388a 100644 --- a/clang/test/CIR/CodeGen/ptrdiff.cpp +++ b/clang/test/CIR/CodeGen/ptrdiff.cpp @@ -18,7 +18,7 @@ long add(char *a, char *b) { // CHECK: cir.func @_Z3addPcS_(%arg0: !cir.ptr // %5 = cir.ptr_diff(%3, %4) : !cir.ptr -> !s64i -// %6 = cir.const(#cir.int<1> : !s32i) : !s32i +// %6 = cir.const #cir.int<1> : !s32i // %7 = cir.cast(integral, %6 : !s32i), !s64i // %8 = cir.binop(add, %5, %7) : !s64i diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index d56567b55cac..2f5932f51e94 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -52,10 +52,10 @@ void init(unsigned numImages) { // CHECK: cir.store %12, %7 : !cir.ptr, !cir.ptr> // CHECK: cir.scope { // CHECK: %13 = cir.alloca !ty_22triple22, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} -// CHECK: %14 = cir.const(#cir.zero : !ty_22triple22) : !ty_22triple22 +// CHECK: %14 = cir.const #cir.zero : !ty_22triple22 // CHECK: cir.store %14, %13 : !ty_22triple22, !cir.ptr // CHECK: %15 = cir.get_member %13[0] {name = "type"} : !cir.ptr -> !cir.ptr -// CHECK: %16 = cir.const(#cir.int<1000024002> : !u32i) : !u32i +// CHECK: %16 = cir.const #cir.int<1000024002> : !u32i // CHECK: cir.store %16, %15 : !u32i, !cir.ptr // CHECK: %17 = cir.get_member %13[1] {name = "next"} : !cir.ptr -> !cir.ptr> // CHECK: %18 = cir.get_member %13[2] {name = "image"} : !cir.ptr -> !cir.ptr diff --git a/clang/test/CIR/CodeGen/scope.cir b/clang/test/CIR/CodeGen/scope.cir index 0c4a5df73fae..2d14784c33f8 100644 --- a/clang/test/CIR/CodeGen/scope.cir +++ b/clang/test/CIR/CodeGen/scope.cir @@ -6,7 +6,7 @@ module { cir.func @foo() { cir.scope { %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<4> : !u32i) : !u32i + %1 = cir.const #cir.int<4> : !u32i cir.store %1, %0 : !u32i, !cir.ptr } cir.return @@ -15,7 +15,7 @@ module { // CHECK: cir.br ^bb1 // CHECK: ^bb1: // pred: ^bb0 // CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} -// CHECK: %1 = cir.const(#cir.int<4> : !u32i) : !u32i +// CHECK: %1 = cir.const #cir.int<4> : !u32i // CHECK: cir.store %1, %0 : !u32i, !cir.ptr // CHECK: cir.br ^bb2 // CHECK: ^bb2: // pred: ^bb1 @@ -35,7 +35,7 @@ module { cir.func @scope_with_return() -> !u32i { %0 = cir.alloca !u32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} cir.scope { - %2 = cir.const(#cir.int<0> : !u32i) : !u32i + %2 = cir.const #cir.int<0> : !u32i cir.store %2, %0 : !u32i, !cir.ptr %3 = cir.load %0 : !cir.ptr, !u32i cir.return %3 : !u32i @@ -48,7 +48,7 @@ module { // CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK: cir.br ^bb1 // CHECK: ^bb1: // pred: ^bb0 -// CHECK: %1 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: %1 = cir.const #cir.int<0> : !u32i // CHECK: cir.store %1, %0 : !u32i, !cir.ptr // CHECK: %2 = cir.load %0 : !cir.ptr, !u32i // CHECK: cir.return %2 : !u32i diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index b8122b3acb7f..cc456e6cf58b 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -32,13 +32,13 @@ int s0(int a, int b) { // CIR: cir.store %6, %3 : !s32i, !cir.ptr loc(#loc23) // CIR: cir.scope { // CIR: %9 = cir.load %3 : !cir.ptr, !s32i loc(#loc13) -// CIR: %10 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc14) +// CIR: %10 = cir.const #cir.int<0> : !s32i loc(#loc14) // CIR: %11 = cir.cmp(gt, %9, %10) : !s32i, !cir.bool loc(#loc26) // CIR: cir.if %11 { -// CIR: %12 = cir.const(#cir.int<0> : !s32i) : !s32i loc(#loc16) +// CIR: %12 = cir.const #cir.int<0> : !s32i loc(#loc16) // CIR: cir.store %12, %3 : !s32i, !cir.ptr loc(#loc28) // CIR: } else { -// CIR: %12 = cir.const(#cir.int<1> : !s32i) : !s32i loc(#loc12) +// CIR: %12 = cir.const #cir.int<1> : !s32i loc(#loc12) // CIR: cir.store %12, %3 : !s32i, !cir.ptr loc(#loc29) // CIR: } loc(#loc27) // CIR: } loc(#loc25) diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index 390dd8fb456a..2d51cb1514d9 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -21,7 +21,7 @@ static Init __ioinit2(false); // BEFORE-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) // BEFORE-NEXT: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { // BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr -// BEFORE-NEXT: %1 = cir.const(#true) : !cir.bool +// BEFORE-NEXT: %1 = cir.const #true // BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // BEFORE-NEXT: } dtor { // BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr @@ -29,7 +29,7 @@ static Init __ioinit2(false); // BEFORE-NEXT: } {ast = #cir.var.decl.ast} // BEFORE: cir.global "private" internal @_ZL9__ioinit2 = ctor : !ty_22Init22 { // BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr -// BEFORE-NEXT: %1 = cir.const(#false) : !cir.bool +// BEFORE-NEXT: %1 = cir.const #false // BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // BEFORE-NEXT: } dtor { // BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr @@ -46,7 +46,7 @@ static Init __ioinit2(false); // AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init() // AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr -// AFTER-NEXT: %1 = cir.const(#true) : !cir.bool +// AFTER-NEXT: %1 = cir.const #true // AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // AFTER-NEXT: %2 = cir.get_global @_ZL8__ioinit : !cir.ptr // AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> @@ -58,7 +58,7 @@ static Init __ioinit2(false); // AFTER: cir.global "private" internal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init.1() // AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr -// AFTER-NEXT: %1 = cir.const(#false) : !cir.bool +// AFTER-NEXT: %1 = cir.const #false // AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // AFTER-NEXT: %2 = cir.get_global @_ZL9__ioinit2 : !cir.ptr // AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> diff --git a/clang/test/CIR/CodeGen/std-array.cpp b/clang/test/CIR/CodeGen/std-array.cpp index ac4b119bdeb0..7b7fe1f86782 100644 --- a/clang/test/CIR/CodeGen/std-array.cpp +++ b/clang/test/CIR/CodeGen/std-array.cpp @@ -12,6 +12,6 @@ void t() { // CHECK: {{.*}} = cir.get_member // CHECK: {{.*}} = cir.cast(array_to_ptrdecay -// CHECK: {{.*}} = cir.const(#cir.int<9> : !u32i) : !u32i +// CHECK: {{.*}} = cir.const #cir.int<9> : !u32i // CHECK: cir.call @_ZNSt5arrayIhLj9EE3endEv \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/store.c b/clang/test/CIR/CodeGen/store.c index 3e33641a2dd7..1bc215f75c3b 100644 --- a/clang/test/CIR/CodeGen/store.c +++ b/clang/test/CIR/CodeGen/store.c @@ -8,9 +8,9 @@ void foo(void) { // CHECK: cir.func @foo() // CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %1 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr -// CHECK-NEXT: %2 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %2 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store %2, %0 : !s32i, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 88be38d9df3d..7f4b3d78f6a5 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -36,7 +36,7 @@ void shouldConstInitStructs(void) { // CHECK: cir.func @shouldConstInitStructs struct Foo f = {1, 2, {3, 4}}; // CHECK: %[[#V0:]] = cir.alloca !ty_22Foo22, !cir.ptr, ["f"] {alignment = 4 : i64} - // CHECK: %[[#V1:]] = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_22Bar22}> : !ty_22Foo22) : !ty_22Foo22 + // CHECK: %[[#V1:]] = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_22Bar22}> : !ty_22Foo22 // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22Foo22, !cir.ptr } @@ -80,13 +80,13 @@ struct Bar shouldGenerateAndAccessStructArrays(void) { return s[0]; } // CHECK-DAG: cir.func @shouldGenerateAndAccessStructArrays -// CHECK-DAG: %[[#STRIDE:]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-DAG: %[[#STRIDE:]] = cir.const #cir.int<0> : !s32i // CHECK-DAG: %[[#DARR:]] = cir.cast(array_to_ptrdecay, %{{.+}} : !cir.ptr>), !cir.ptr // CHECK-DAG: %[[#ELT:]] = cir.ptr_stride(%[[#DARR]] : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr // CHECK-DAG: cir.copy %[[#ELT]] to %{{.+}} : !cir.ptr // CHECK-DAG: cir.func @local_decl -// CHECK-DAG: {{%.}} = cir.alloca !ty_22Local22, !cir.ptr, ["a"] +// CHECK-DAG: {{%.}} = cir.alloca !ty_22Local22, !cir.ptr, ["a"] void local_decl(void) { struct Local { int i; diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 75e3e694d800..1a594f3756ab 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -68,9 +68,9 @@ void yoyo(incomplete *i) {} // CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["result", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.alloca !ty_22Foo22, !cir.ptr, ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () -// CHECK-NEXT: %3 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.const #cir.int<4> : !s32i // CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, !s32i) -> () -// CHECK-NEXT: %4 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<4> : !s32i // CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, !s32i) -> !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.return @@ -100,13 +100,13 @@ void m() { Adv C; } // CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %2 = cir.get_member %1[0] {name = "x"} : !cir.ptr -> !cir.ptr // CHECK: %3 = cir.get_member %2[0] {name = "w"} : !cir.ptr -> !cir.ptr -// CHECK: %4 = cir.const(#cir.int<1000024001> : !u32i) : !u32i +// CHECK: %4 = cir.const #cir.int<1000024001> : !u32i // CHECK: cir.store %4, %3 : !u32i, !cir.ptr // CHECK: %5 = cir.get_member %2[1] {name = "n"} : !cir.ptr -> !cir.ptr> -// CHECK: %6 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: %6 = cir.const #cir.ptr : !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, !cir.ptr> // CHECK: %7 = cir.get_member %2[2] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: %8 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %8 = cir.const #cir.int<0> : !s32i // CHECK: cir.store %8, %7 : !s32i, !cir.ptr // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/switch.cir b/clang/test/CIR/CodeGen/switch.cir index 39664bfa3957..1ea6dba49c98 100644 --- a/clang/test/CIR/CodeGen/switch.cir +++ b/clang/test/CIR/CodeGen/switch.cir @@ -33,7 +33,7 @@ module { case (equal, 1) { cir.break } - ] + ] cir.return } // CHECK: cir.func @shouldFlatSwitchWithoutDefault(%arg0: !s32i) { @@ -52,7 +52,7 @@ module { case (anyof, [1, 2] : !s64i) { cir.break } - ] + ] cir.return } // CHECK: cir.func @shouldFlatSwitchWithImplicitFallthrough(%arg0: !s64i) { @@ -72,11 +72,11 @@ module { cir.switch (%arg0 : !s64i) [ case (equal, 1 : !s64i) { // case 1 has its own region cir.yield // fallthrough to case 2 - }, + }, case (equal, 2 : !s64i) { cir.break - } - ] + } + ] cir.return } // CHECK: cir.func @shouldFlatSwitchWithExplicitFallthrough(%arg0: !s64i) { @@ -96,8 +96,8 @@ module { cir.switch (%arg0 : !s64i) [ case (equal, 1 : !s64i) { cir.yield // fallthrough to exit - } - ] + } + ] cir.return } // CHECK: cir.func @shouldFlatSwitchWithFallthroughToExit(%arg0: !s64i) { @@ -116,7 +116,7 @@ module { // CHECK-NOT: llvm.switch cir.return } -// CHECK: cir.func @shouldDropEmptySwitch(%arg0: !s64i) +// CHECK: cir.func @shouldDropEmptySwitch(%arg0: !s64i) // CHECK-NOT: cir.switch.flat @@ -131,7 +131,7 @@ module { ^bb1: // no predecessors cir.break } - ] + ] } cir.return } @@ -168,7 +168,7 @@ module { case (equal, 0) { cir.scope { %6 = cir.load %1 : !cir.ptr, !s32i - %7 = cir.const(#cir.int<0> : !s32i) : !s32i + %7 = cir.const #cir.int<0> : !s32i %8 = cir.cmp(ge, %6, %7) : !s32i, !s32i %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool cir.if %9 { @@ -177,12 +177,12 @@ module { } cir.break } - ] - } - %3 = cir.const(#cir.int<3> : !s32i) : !s32i + ] + } + %3 = cir.const #cir.int<3> : !s32i cir.store %3, %2 : !s32i, !cir.ptr %4 = cir.load %2 : !cir.ptr, !s32i - cir.return %4 : !s32i + cir.return %4 : !s32i } // CHECK: cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { // CHECK: cir.switch.flat %3 : !s32i, ^bb7 [ @@ -200,7 +200,7 @@ module { // CHECK: cir.br ^bb7 // CHECK: ^bb7: // 3 preds: ^bb1, ^bb4, ^bb6 // CHECK: cir.br ^bb8 -// CHECK: ^bb8: // pred: ^bb7 +// CHECK: ^bb8: // pred: ^bb7 // CHECK: cir.return %9 : !s32i // CHECK: } diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 7ba1185c660c..a19a55348ff6 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -19,7 +19,7 @@ void sw1(int a) { // CHECK: cir.switch (%3 : !s32i) [ // CHECK-NEXT: case (equal, 0) { // CHECK-NEXT: %4 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i // CHECK-NEXT: cir.store %6, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.break @@ -31,10 +31,10 @@ void sw1(int a) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %4 = cir.alloca !s32i, !cir.ptr, ["yolo", init] // CHECK-NEXT: %5 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %6 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %7 = cir.binop(add, %5, %6) : !s32i // CHECK-NEXT: cir.store %7, %1 : !s32i, !cir.ptr -// CHECK-NEXT: %8 = cir.const(#cir.int<100> : !s32i) : !s32i +// CHECK-NEXT: %8 = cir.const #cir.int<100> : !s32i // CHECK-NEXT: cir.store %8, %4 : !s32i, !cir.ptr // CHECK-NEXT: cir.break // CHECK-NEXT: } @@ -57,7 +57,7 @@ void sw2(int a) { // CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["fomo", init] // CHECK: cir.switch (%4 : !s32i) [ // CHECK-NEXT: case (equal, 3) { -// CHECK-NEXT: %5 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store %5, %2 : !s32i, !cir.ptr void sw3(int a) { @@ -91,7 +91,7 @@ int sw4(int a) { // CHECK: cir.switch (%4 : !s32i) [ // CHECK-NEXT: case (equal, 42) { // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %5 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<3> : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %6 : !s32i @@ -99,7 +99,7 @@ int sw4(int a) { // CHECK-NEXT: cir.yield // CHECK-NEXT: }, // CHECK-NEXT: case (default) { -// CHECK-NEXT: %5 = cir.const(#cir.int<2> : !s32i) : !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<2> : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %6 : !s32i @@ -180,13 +180,13 @@ void sw8(int a) { //CHECK-NEXT: } //CHECK-NEXT: case (default) { //CHECK-NEXT: cir.break -//CHECK-NEXT: } +//CHECK-NEXT: } void sw9(int a) { switch (a) { case 3: - break; + break; default: case 4: break; @@ -208,8 +208,8 @@ void sw10(int a) { switch (a) { case 3: - break; - case 4: + break; + case 4: default: case 5: break; @@ -234,9 +234,9 @@ void sw11(int a) { switch (a) { case 3: - break; + break; case 4: - case 5: + case 5: default: case 6: case 7: diff --git a/clang/test/CIR/CodeGen/ternary.cir b/clang/test/CIR/CodeGen/ternary.cir index 715061f15fb9..fedfbcbbc5ea 100644 --- a/clang/test/CIR/CodeGen/ternary.cir +++ b/clang/test/CIR/CodeGen/ternary.cir @@ -8,13 +8,13 @@ module { %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, !cir.ptr %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %3 = cir.const #cir.int<0> : !s32i %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool %5 = cir.ternary(%4, true { - %7 = cir.const(#cir.int<3> : !s32i) : !s32i + %7 = cir.const #cir.int<3> : !s32i cir.yield %7 : !s32i }, false { - %7 = cir.const(#cir.int<5> : !s32i) : !s32i + %7 = cir.const #cir.int<5> : !s32i cir.yield %7 : !s32i }) : (!cir.bool) -> !s32i cir.store %5, %1 : !s32i, !cir.ptr @@ -27,14 +27,14 @@ module { // CHECK: %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK: cir.store %arg0, %0 : !s32i, !cir.ptr // CHECK: %2 = cir.load %0 : !cir.ptr, !s32i -// CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %3 = cir.const #cir.int<0> : !s32i // CHECK: %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool // CHECK: cir.brcond %4 ^bb1, ^bb2 // CHECK: ^bb1: // pred: ^bb0 -// CHECK: %5 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: %5 = cir.const #cir.int<3> : !s32i // CHECK: cir.br ^bb3(%5 : !s32i) // CHECK: ^bb2: // pred: ^bb0 -// CHECK: %6 = cir.const(#cir.int<5> : !s32i) : !s32i +// CHECK: %6 = cir.const #cir.int<5> : !s32i // CHECK: cir.br ^bb3(%6 : !s32i) // CHECK: ^bb3(%7: !s32i): // 2 preds: ^bb1, ^bb2 // CHECK: cir.br ^bb4 @@ -43,5 +43,5 @@ module { // CHECK: %8 = cir.load %1 : !cir.ptr, !s32i // CHECK: cir.return %8 : !s32i // CHECK: } - + } diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp index 452745633d85..6475add8e2b4 100644 --- a/clang/test/CIR/CodeGen/ternary.cpp +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -10,13 +10,13 @@ int x(int y) { // CHECK: %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK: cir.store %arg0, %0 : !s32i, !cir.ptr // CHECK: %2 = cir.load %0 : !cir.ptr, !s32i -// CHECK: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %3 = cir.const #cir.int<0> : !s32i // CHECK: %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool // CHECK: %5 = cir.ternary(%4, true { -// CHECK: %7 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: %7 = cir.const #cir.int<3> : !s32i // CHECK: cir.yield %7 : !s32i // CHECK: }, false { -// CHECK: %7 = cir.const(#cir.int<5> : !s32i) : !s32i +// CHECK: %7 = cir.const #cir.int<5> : !s32i // CHECK: cir.yield %7 : !s32i // CHECK: }) : (!cir.bool) -> !s32i // CHECK: cir.store %5, %1 : !s32i, !cir.ptr @@ -40,11 +40,11 @@ void m(APIType api) { // CHECK: cir.store %arg0, %0 : !u32i, !cir.ptr // CHECK: %1 = cir.load %0 : !cir.ptr, !u32i // CHECK: %2 = cir.cast(integral, %1 : !u32i), !s32i -// CHECK: %3 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: %3 = cir.const #cir.int<0> : !u32i // CHECK: %4 = cir.cast(integral, %3 : !u32i), !s32i // CHECK: %5 = cir.cmp(eq, %2, %4) : !s32i, !cir.bool // CHECK: cir.ternary(%5, true { -// CHECK: %6 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: %6 = cir.const #cir.int<0> : !s32i // CHECK: cir.yield // CHECK: }, false { // CHECK: %6 = cir.get_global @".str" : !cir.ptr> @@ -66,7 +66,7 @@ int foo(int a, int b) { // CHECK: [[B0:%.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: [[CMP:%.*]] = cir.cmp(lt, [[A0]], [[B0]]) : !s32i, !cir.bool // CHECK: [[RES:%.*]] = cir.ternary([[CMP]], true { -// CHECK: [[ZERO:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i // CHECK: cir.yield [[ZERO]] : !s32i // CHECK: }, false { // CHECK: [[A1:%.*]] = cir.load {{.*}} : !cir.ptr, !s32i diff --git a/clang/test/CIR/CodeGen/three-way-comparison.cpp b/clang/test/CIR/CodeGen/three-way-comparison.cpp index 074c5c70f4a1..28e094574de5 100644 --- a/clang/test/CIR/CodeGen/three-way-comparison.cpp +++ b/clang/test/CIR/CodeGen/three-way-comparison.cpp @@ -30,24 +30,24 @@ auto three_way_strong(int x, int y) { // NONCANONICAL-AFTER: #cmp3way_info_strong_ltn1eq0gt1_ = #cir.cmp3way_info // NONCANONICAL-AFTER: cir.func @_Z16three_way_strongii // NONCANONICAL-AFTER: %[[#CMP3WAY_RESULT:]] = cir.cmp3way(%{{.+}} : !s32i, %{{.+}}, #cmp3way_info_strong_ltn1eq0gt1_) : !s8i -// NONCANONICAL-AFTER-NEXT: %[[#NEGONE:]] = cir.const(#cir.int<-1> : !s8i) : !s8i -// NONCANONICAL-AFTER-NEXT: %[[#ONE:]] = cir.const(#cir.int<1> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#NEGONE:]] = cir.const #cir.int<-1> : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#ONE:]] = cir.const #cir.int<1> : !s8i // NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_NEGONE:]] = cir.cmp(eq, %[[#CMP3WAY_RESULT]], %[[#NEGONE]]) : !s8i, !cir.bool // NONCANONICAL-AFTER-NEXT: %[[#A:]] = cir.ternary(%[[#CMP_TO_NEGONE]], true { // NONCANONICAL-AFTER-NEXT: cir.yield %[[#ONE]] : !s8i // NONCANONICAL-AFTER-NEXT: }, false { // NONCANONICAL-AFTER-NEXT: cir.yield %[[#CMP3WAY_RESULT]] : !s8i // NONCANONICAL-AFTER-NEXT: }) : (!cir.bool) -> !s8i -// NONCANONICAL-AFTER-NEXT: %[[#ZERO:]] = cir.const(#cir.int<0> : !s8i) : !s8i -// NONCANONICAL-AFTER-NEXT: %[[#TWO:]] = cir.const(#cir.int<2> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#ZERO:]] = cir.const #cir.int<0> : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#TWO:]] = cir.const #cir.int<2> : !s8i // NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_ZERO:]] = cir.cmp(eq, %[[#A]], %[[#ZERO]]) : !s8i, !cir.bool // NONCANONICAL-AFTER-NEXT: %[[#B:]] = cir.ternary(%[[#CMP_TO_ZERO]], true { // NONCANONICAL-AFTER-NEXT: cir.yield %[[#TWO]] : !s8i // NONCANONICAL-AFTER-NEXT: }, false { // NONCANONICAL-AFTER-NEXT: cir.yield %[[#A]] : !s8i // NONCANONICAL-AFTER-NEXT: }) : (!cir.bool) -> !s8i -// NONCANONICAL-AFTER-NEXT: %[[#ONE2:]] = cir.const(#cir.int<1> : !s8i) : !s8i -// NONCANONICAL-AFTER-NEXT: %[[#THREE:]] = cir.const(#cir.int<3> : !s8i) : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#ONE2:]] = cir.const #cir.int<1> : !s8i +// NONCANONICAL-AFTER-NEXT: %[[#THREE:]] = cir.const #cir.int<3> : !s8i // NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_ONE:]] = cir.cmp(eq, %[[#B]], %[[#ONE2]]) : !s8i, !cir.bool // NONCANONICAL-AFTER-NEXT: %{{.+}} = cir.ternary(%[[#CMP_TO_ONE]], true { // NONCANONICAL-AFTER-NEXT: cir.yield %[[#THREE]] : !s8i @@ -67,10 +67,10 @@ auto three_way_weak(float x, float y) { // AFTER: cir.func @_Z14three_way_weakff // AFTER: %[[#LHS:]] = cir.load %0 : !cir.ptr, !cir.float // AFTER-NEXT: %[[#RHS:]] = cir.load %1 : !cir.ptr, !cir.float -// AFTER-NEXT: %[[#LT:]] = cir.const(#cir.int<-1> : !s8i) : !s8i -// AFTER-NEXT: %[[#EQ:]] = cir.const(#cir.int<0> : !s8i) : !s8i -// AFTER-NEXT: %[[#GT:]] = cir.const(#cir.int<1> : !s8i) : !s8i -// AFTER-NEXT: %[[#UNORDERED:]] = cir.const(#cir.int<-127> : !s8i) : !s8i +// AFTER-NEXT: %[[#LT:]] = cir.const #cir.int<-1> : !s8i +// AFTER-NEXT: %[[#EQ:]] = cir.const #cir.int<0> : !s8i +// AFTER-NEXT: %[[#GT:]] = cir.const #cir.int<1> : !s8i +// AFTER-NEXT: %[[#UNORDERED:]] = cir.const #cir.int<-127> : !s8i // AFTER-NEXT: %[[#CMP_LT:]] = cir.cmp(lt, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool // AFTER-NEXT: %[[#CMP_EQ:]] = cir.cmp(eq, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool // AFTER-NEXT: %[[#CMP_GT:]] = cir.cmp(gt, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 99ee97d76719..46d6315e53a2 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -62,7 +62,7 @@ unsigned long long tc2() { } catch (...) { // CHECK: type (#cir.all) // CHECK: cir.catch_param - // CHECK: cir.const(#cir.int<100> : !s32i) : !s32i + // CHECK: cir.const #cir.int<100> : !s32i z = 100; } @@ -79,7 +79,7 @@ unsigned long long tc3() { } catch (...) { // CHECK: type (#cir.all) // CHECK: cir.catch_param - // CHECK: cir.const(#cir.int<100> : !s32i) : !s32i + // CHECK: cir.const #cir.int<100> : !s32i z = 100; } diff --git a/clang/test/CIR/CodeGen/types-nullptr.cpp b/clang/test/CIR/CodeGen/types-nullptr.cpp index 55f42be785fb..00250f438940 100644 --- a/clang/test/CIR/CodeGen/types-nullptr.cpp +++ b/clang/test/CIR/CodeGen/types-nullptr.cpp @@ -5,5 +5,5 @@ typedef decltype(nullptr) nullptr_t; void f() { nullptr_t t = nullptr; } // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr> -// CHECK: %1 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr +// CHECK: %1 = cir.const #cir.ptr : !cir.ptr // CHECK: cir.store %1, %0 : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index f6d99af23b5a..5f6a451cee75 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -46,7 +46,7 @@ int inc0() { // CHECK: cir.func @_Z4inc0v() -> !s32i // CHECK: %[[#RET:]] = cir.alloca !s32i, !cir.ptr, ["__retval"] // CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] -// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %[[#ATMP:]] = cir.const #cir.int<1> : !s32i // CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#INPUT]]) @@ -65,7 +65,7 @@ int dec0() { // CHECK: cir.func @_Z4dec0v() -> !s32i // CHECK: %[[#RET:]] = cir.alloca !s32i, !cir.ptr, ["__retval"] // CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] -// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %[[#ATMP:]] = cir.const #cir.int<1> : !s32i // CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#INCREMENTED:]] = cir.unary(dec, %[[#INPUT]]) @@ -85,7 +85,7 @@ int inc1() { // CHECK: cir.func @_Z4inc1v() -> !s32i // CHECK: %[[#RET:]] = cir.alloca !s32i, !cir.ptr, ["__retval"] // CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] -// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %[[#ATMP:]] = cir.const #cir.int<1> : !s32i // CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#INPUT]]) @@ -104,7 +104,7 @@ int dec1() { // CHECK: cir.func @_Z4dec1v() -> !s32i // CHECK: %[[#RET:]] = cir.alloca !s32i, !cir.ptr, ["__retval"] // CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] -// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %[[#ATMP:]] = cir.const #cir.int<1> : !s32i // CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#INPUT:]] = cir.load %[[#A]] // CHECK: %[[#INCREMENTED:]] = cir.unary(dec, %[[#INPUT]]) @@ -125,7 +125,7 @@ int inc2() { // CHECK: %[[#RET:]] = cir.alloca !s32i, !cir.ptr, ["__retval"] // CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] // CHECK: %[[#B:]] = cir.alloca !s32i, !cir.ptr, ["b", init] -// CHECK: %[[#ATMP:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %[[#ATMP:]] = cir.const #cir.int<1> : !s32i // CHECK: cir.store %[[#ATMP]], %[[#A]] : !s32i // CHECK: %[[#ATOB:]] = cir.load %[[#A]] // CHECK: %[[#INCREMENTED:]] = cir.unary(inc, %[[#ATOB]]) @@ -146,11 +146,11 @@ int *inc_p(int *i) { // CHECK: %[[#i_addr:]] = cir.alloca !cir.ptr, !cir.ptr>, ["i", init] {alignment = 8 : i64} // CHECK: %[[#i_dec:]] = cir.load %[[#i_addr]] : !cir.ptr>, !cir.ptr -// CHECK: %[[#dec_const:]] = cir.const(#cir.int<-1> : !s32i) : !s32i +// CHECK: %[[#dec_const:]] = cir.const #cir.int<-1> : !s32i // CHECK: = cir.ptr_stride(%[[#i_dec]] : !cir.ptr, %[[#dec_const]] : !s32i), !cir.ptr // CHECK: %[[#i_inc:]] = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK: %[[#inc_const:]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %[[#inc_const:]] = cir.const #cir.int<1> : !s32i // CHECK: = cir.ptr_stride(%[[#i_inc]] : !cir.ptr, %[[#inc_const]] : !s32i), !cir.ptr void floats(float f) { @@ -189,19 +189,19 @@ void pointers(int *p) { // CHECK: cir.unary(plus, %{{.+}}) : !cir.ptr, !cir.ptr ++p; - // CHECK: %[[#INC:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#INC:]] = cir.const #cir.int<1> : !s32i // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#INC]] : !s32i), !cir.ptr // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, !cir.ptr> --p; - // CHECK: %[[#DEC:]] = cir.const(#cir.int<-1> : !s32i) : !s32i + // CHECK: %[[#DEC:]] = cir.const #cir.int<-1> : !s32i // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#DEC]] : !s32i), !cir.ptr // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, !cir.ptr> p++; - // CHECK: %[[#INC:]] = cir.const(#cir.int<1> : !s32i) : !s32i + // CHECK: %[[#INC:]] = cir.const #cir.int<1> : !s32i // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#INC]] : !s32i), !cir.ptr // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, !cir.ptr> p--; - // CHECK: %[[#DEC:]] = cir.const(#cir.int<-1> : !s32i) : !s32i + // CHECK: %[[#DEC:]] = cir.const #cir.int<-1> : !s32i // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#DEC]] : !s32i), !cir.ptr // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index b041bc5533c7..9cf96adcaf48 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -22,21 +22,21 @@ void foo(int x) { // CHECK: [[TMP4:%.*]] = cir.load [[TMP0]] : !cir.ptr, !s32i // CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP4]] : !s32i), !u32i // CHECK: [[TMP6:%.*]] = cir.load [[TMP3]] : !cir.ptr, !u32i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i +// CHECK: [[TMP7:%.*]] = cir.const #cir.int<65535> : !u32i // CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i -// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<4294901760> : !u32i) : !u32i +// CHECK: [[TMP9:%.*]] = cir.const #cir.int<4294901760> : !u32i // CHECK: [[TMP10:%.*]] = cir.binop(and, [[TMP6]], [[TMP9]]) : !u32i // CHECK: [[TMP11:%.*]] = cir.binop(or, [[TMP10]], [[TMP8]]) : !u32i // CHECK: cir.store [[TMP11]], [[TMP3]] : !u32i, !cir.ptr // CHECK: [[TMP12:%.*]] = cir.cast(bitcast, [[TMP2]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP13:%.*]] = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK: [[TMP13:%.*]] = cir.const #cir.int<0> : !s32i // CHECK: [[TMP14:%.*]] = cir.cast(integral, [[TMP13]] : !s32i), !u32i // CHECK: [[TMP15:%.*]] = cir.load [[TMP12]] : !cir.ptr, !u32i -// CHECK: [[TMP16:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i +// CHECK: [[TMP16:%.*]] = cir.const #cir.int<65535> : !u32i // CHECK: [[TMP17:%.*]] = cir.binop(and, [[TMP14]], [[TMP16]]) : !u32i -// CHECK: [[TMP18:%.*]] = cir.const(#cir.int<16> : !u32i) : !u32i +// CHECK: [[TMP18:%.*]] = cir.const #cir.int<16> : !u32i // CHECK: [[TMP19:%.*]] = cir.shift(left, [[TMP17]] : !u32i, [[TMP18]] : !u32i) -> !u32i -// CHECK: [[TMP20:%.*]] = cir.const(#cir.int<65535> : !u32i) : !u32i +// CHECK: [[TMP20:%.*]] = cir.const #cir.int<65535> : !u32i // CHECK: [[TMP21:%.*]] = cir.binop(and, [[TMP15]], [[TMP20]]) : !u32i // CHECK: [[TMP22:%.*]] = cir.binop(or, [[TMP21]], [[TMP19]]) : !u32i // CHECK: cir.store [[TMP22]], [[TMP12]] : !u32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index ef252f4fa3e5..d9f28057cd2e 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -81,11 +81,11 @@ typedef union { short a; int b; } A; - + void noCrushOnDifferentSizes() { A a = {0}; // CHECK: %[[#TMP0:]] = cir.alloca !ty_22A22, !cir.ptr, ["a"] {alignment = 4 : i64} // CHECK: %[[#TMP1:]] = cir.cast(bitcast, %[[#TMP0]] : !cir.ptr), !cir.ptr - // CHECK: %[[#TMP2:]] = cir.const(#cir.zero : !ty_anon_struct) : !ty_anon_struct + // CHECK: %[[#TMP2:]] = cir.const #cir.zero : !ty_anon_struct // CHECK: cir.store %[[#TMP2]], %[[#TMP1]] : !ty_anon_struct, !cir.ptr } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index f802e8d5fa57..0496e58097a1 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -19,13 +19,13 @@ void vector_int_test(int x) { // Incomplete vector initialization. vi4 bb = { x, x + 1 }; - // CHECK: %[[#zero:]] = cir.const(#cir.int<0> : !s32i) : !s32i + // CHECK: %[[#zero:]] = cir.const #cir.int<0> : !s32i // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %[[#zero]], %[[#zero]] : !s32i, !s32i, !s32i, !s32i) : !cir.vector // Scalar to vector conversion, a.k.a. vector splat. Only valid as an // operand of a binary operator, not as a regular conversion. bb = a + 7; - // CHECK: %[[#seven:]] = cir.const(#cir.int<7> : !s32i) : !s32i + // CHECK: %[[#seven:]] = cir.const #cir.int<7> : !s32i // CHECK: %{{[0-9]+}} = cir.vec.splat %[[#seven]] : !s32i, !cir.vector // Vector to vector conversion @@ -106,13 +106,13 @@ void vector_double_test(int x, double y) { // Incomplete vector initialization vd2 bb = { y }; - // CHECK: [[#dzero:]] = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double + // CHECK: [[#dzero:]] = cir.const #cir.fp<0.000000e+00> : !cir.double // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %[[#dzero]] : !cir.double, !cir.double) : !cir.vector // Scalar to vector conversion, a.k.a. vector splat. Only valid as an // operand of a binary operator, not as a regular conversion. bb = a + 2.5; - // CHECK: %[[#twohalf:]] = cir.const(#cir.fp<2.500000e+00> : !cir.double) : !cir.double + // CHECK: %[[#twohalf:]] = cir.const #cir.fp<2.500000e+00> : !cir.double // CHECK: %{{[0-9]+}} = cir.vec.splat %[[#twohalf]] : !cir.double, !cir.vector // Extract element diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 3aa8ee7826a4..5a92da4c87b2 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -56,7 +56,7 @@ class B : public A // CHECK: cir.func @_Z3foov() // CHECK: cir.scope { // CHECK: %0 = cir.alloca !ty_22B22, !cir.ptr, ["agg.tmp.ensured"] {alignment = 8 : i64} -// CHECK: %1 = cir.const(#cir.zero : ![[ClassB]]) : ![[ClassB]] +// CHECK: %1 = cir.const #cir.zero : ![[ClassB]] // CHECK: cir.store %1, %0 : ![[ClassB]], !cir.ptr // CHECK: cir.call @_ZN1BC2Ev(%0) : (!cir.ptr) -> () // CHECK: } diff --git a/clang/test/CIR/IR/bit.cir b/clang/test/CIR/IR/bit.cir index 974f22606cdc..7a6d4d2e4bdb 100644 --- a/clang/test/CIR/IR/bit.cir +++ b/clang/test/CIR/IR/bit.cir @@ -11,14 +11,14 @@ module { cir.func @test() { - %s8 = cir.const(#cir.int<1> : !s8i) : !s8i - %s16 = cir.const(#cir.int<1> : !s16i) : !s16i - %s32 = cir.const(#cir.int<1> : !s32i) : !s32i - %s64 = cir.const(#cir.int<1> : !s64i) : !s64i - %u8 = cir.const(#cir.int<1> : !u8i) : !u8i - %u16 = cir.const(#cir.int<1> : !u16i) : !u16i - %u32 = cir.const(#cir.int<1> : !u32i) : !u32i - %u64 = cir.const(#cir.int<1> : !u64i) : !u64i + %s8 = cir.const #cir.int<1> : !s8i + %s16 = cir.const #cir.int<1> : !s16i + %s32 = cir.const #cir.int<1> : !s32i + %s64 = cir.const #cir.int<1> : !s64i + %u8 = cir.const #cir.int<1> : !u8i + %u16 = cir.const #cir.int<1> : !u16i + %u32 = cir.const #cir.int<1> : !u32i + %u64 = cir.const #cir.int<1> : !u64i %2 = cir.bit.clrsb(%s32 : !s32i) : !s32i %3 = cir.bit.clrsb(%s64 : !s64i) : !s32i @@ -47,14 +47,14 @@ module { // CHECK: module { // CHECK-NEXT: cir.func @test() { -// CHECK-NEXT: %0 = cir.const(#cir.int<1> : !s8i) : !s8i -// CHECK-NEXT: %1 = cir.const(#cir.int<1> : !s16i) : !s16i -// CHECK-NEXT: %2 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK-NEXT: %3 = cir.const(#cir.int<1> : !s64i) : !s64i -// CHECK-NEXT: %4 = cir.const(#cir.int<1> : !u8i) : !u8i -// CHECK-NEXT: %5 = cir.const(#cir.int<1> : !u16i) : !u16i -// CHECK-NEXT: %6 = cir.const(#cir.int<1> : !u32i) : !u32i -// CHECK-NEXT: %7 = cir.const(#cir.int<1> : !u64i) : !u64i +// CHECK-NEXT: %0 = cir.const #cir.int<1> : !s8i +// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s16i +// CHECK-NEXT: %2 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %3 = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !u8i +// CHECK-NEXT: %5 = cir.const #cir.int<1> : !u16i +// CHECK-NEXT: %6 = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: %7 = cir.const #cir.int<1> : !u64i // CHECK-NEXT: %8 = cir.bit.clrsb(%2 : !s32i) : !s32i // CHECK-NEXT: %9 = cir.bit.clrsb(%3 : !s64i) : !s32i // CHECK-NEXT: %10 = cir.bit.clz(%5 : !u16i) : !s32i diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index be4bffbe7fe1..986ed13a906e 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -7,7 +7,7 @@ module { %a = cir.cast (int_to_bool, %arg0 : !s32i), !cir.bool %3 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr - %4 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.const #cir.int<0> : !s32i cir.return } diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 24abc26e66d6..10fd9010cc1b 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -15,7 +15,7 @@ module { cir.func @f3() -> !s32i { %0 = cir.alloca !s32i, !cir.ptr, ["x", init] - %1 = cir.const(#cir.int<3> : !s32i) : !s32i + %1 = cir.const #cir.int<3> : !s32i cir.store %1, %0 : !s32i, !cir.ptr %2 = cir.load %0 : !cir.ptr, !s32i cir.return %2 : !s32i @@ -25,15 +25,15 @@ module { %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} cir.store %arg0, %1 : !s32i, !cir.ptr - %2 = cir.const(#cir.int<0> : !s32i) : !s32i + %2 = cir.const #cir.int<0> : !s32i cir.store %2, %0 : !s32i, !cir.ptr %3 = cir.load %1 : !cir.ptr, !s32i %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool cir.if %4 { - %6 = cir.const(#cir.int<3> : !s32i) : !s32i + %6 = cir.const #cir.int<3> : !s32i cir.store %6, %0 : !s32i, !cir.ptr } else { - %6 = cir.const(#cir.int<4> : !s32i) : !s32i + %6 = cir.const #cir.int<4> : !s32i cir.store %6, %0 : !s32i, !cir.ptr } %5 = cir.load %0 : !cir.ptr, !s32i @@ -68,7 +68,7 @@ module { // CHECK-NEXT: cir.func @f3() -> !s32i { // CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["x", init] -// CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: %1 = cir.const #cir.int<3> : !s32i // CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr // CHECK-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %2 : !s32i @@ -77,10 +77,10 @@ module { // CHECK: @if0(%arg0: !s32i) -> !s32i { // CHECK: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.if %4 { -// CHECK-NEXT: %6 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.const #cir.int<3> : !s32i // CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr // CHECK-NEXT: } else { -// CHECK-NEXT: %6 = cir.const(#cir.int<4> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.const #cir.int<4> : !s32i // CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/data-member-ptr.cir b/clang/test/CIR/IR/data-member-ptr.cir index 6370877291a4..d8332514c07b 100644 --- a/clang/test/CIR/IR/data-member-ptr.cir +++ b/clang/test/CIR/IR/data-member-ptr.cir @@ -7,18 +7,18 @@ module { cir.func @null_member() { - %0 = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member + %0 = cir.const #cir.data_member : !cir.data_member cir.return } cir.func @get_runtime_member(%arg0: !cir.ptr) { - %0 = cir.const(#cir.data_member<0> : !cir.data_member) : !cir.data_member + %0 = cir.const #cir.data_member<0> : !cir.data_member %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr cir.return } cir.func @get_global_member(%arg0: !cir.ptr) { - %0 = cir.const(#global_ptr) : !cir.data_member + %0 = cir.const #global_ptr %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr cir.return } @@ -27,18 +27,18 @@ module { // CHECK: module { // CHECK-NEXT: cir.func @null_member() { -// CHECK-NEXT: %0 = cir.const(#cir.data_member : !cir.data_member) : !cir.data_member +// CHECK-NEXT: %0 = cir.const #cir.data_member : !cir.data_member // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: cir.func @get_runtime_member(%arg0: !cir.ptr) { -// CHECK-NEXT: %0 = cir.const(#cir.data_member<0> : !cir.data_member) : !cir.data_member +// CHECK-NEXT: %0 = cir.const #cir.data_member<0> : !cir.data_member // CHECK-NEXT: %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK-NEXT: cir.func @get_global_member(%arg0: !cir.ptr) { -// CHECK-NEXT: %0 = cir.const(#cir.data_member<0> : !cir.data_member) : !cir.data_member +// CHECK-NEXT: %0 = cir.const #cir.data_member<0> : !cir.data_member // CHECK-NEXT: %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/exceptions.cir b/clang/test/CIR/IR/exceptions.cir index f74a0a7ce1e4..c1a981d7a9f7 100644 --- a/clang/test/CIR/IR/exceptions.cir +++ b/clang/test/CIR/IR/exceptions.cir @@ -4,7 +4,7 @@ module { cir.func @div(%x : !s32i, %y : !s32i) -> !s32i { - %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %3 = cir.const #cir.int<0> : !s32i cir.return %3 : !s32i } diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 05fa0bca4a22..0db5f9f11b1a 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -36,7 +36,7 @@ module { cir.func private @_ZN4InitD1Ev(!cir.ptr) cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { %0 = cir.get_global @_ZL8__ioinit : !cir.ptr - %1 = cir.const(#cir.int<3> : !s8i) : !s8i + %1 = cir.const #cir.int<3> : !s8i cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () } dtor { %0 = cir.get_global @_ZL8__ioinit : !cir.ptr @@ -85,7 +85,7 @@ module { // CHECK: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { // CHECK-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr -// CHECK-NEXT: %1 = cir.const(#cir.int<3> : !s8i) : !s8i +// CHECK-NEXT: %1 = cir.const #cir.int<3> : !s8i // CHECK-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/int.cir b/clang/test/CIR/IR/int.cir index 3acaacd011f7..af972da9340b 100644 --- a/clang/test/CIR/IR/int.cir +++ b/clang/test/CIR/IR/int.cir @@ -15,22 +15,22 @@ cir.func @validIntTypesAndAttributes() -> () { - %1 = cir.const(#cir.int<-128> : !cir.int) : !s8i - %2 = cir.const(#cir.int<127> : !cir.int) : !s8i - %3 = cir.const(#cir.int<255> : !cir.int) : !u8i + %1 = cir.const #cir.int<-128> : !cir.int + %2 = cir.const #cir.int<127> : !cir.int + %3 = cir.const #cir.int<255> : !cir.int - %4 = cir.const(#cir.int<-32768> : !cir.int) : !s16i - %5 = cir.const(#cir.int<32767> : !cir.int) : !s16i - %6 = cir.const(#cir.int<65535> : !cir.int) : !u16i + %4 = cir.const #cir.int<-32768> : !cir.int + %5 = cir.const #cir.int<32767> : !cir.int + %6 = cir.const #cir.int<65535> : !cir.int - %7 = cir.const(#cir.int<-2147483648> : !cir.int) : !s32i - %8 = cir.const(#cir.int<2147483647> : !cir.int) : !s32i - %9 = cir.const(#cir.int<4294967295> : !cir.int) : !u32i + %7 = cir.const #cir.int<-2147483648> : !cir.int + %8 = cir.const #cir.int<2147483647> : !cir.int + %9 = cir.const #cir.int<4294967295> : !cir.int // FIXME: MLIR is emitting a "too large" error for this one. Not sure why. - // %10 = cir.const(#cir.int<-9223372036854775808> : !cir.int) : !s64i - %11 = cir.const(#cir.int<9223372036854775807> : !cir.int) : !s64i - %12 = cir.const(#cir.int<18446744073709551615> : !cir.int) : !u64i + // %10 = cir.const #cir.int<-9223372036854775808> : !cir.int + %11 = cir.const #cir.int<9223372036854775807> : !cir.int + %12 = cir.const #cir.int<18446744073709551615> : !cir.int cir.return } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 2a78139200e6..6128fa42b824 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -3,20 +3,18 @@ !u32i = !cir.int -// expected-error@+2 {{'cir.const' op nullptr expects pointer type}} cir.func @p0() { - %1 = cir.const(#cir.ptr : !cir.ptr) : !u32i + // expected-error @below {{invalid kind of type specified}} + %1 = cir.const #cir.ptr : !u32i cir.return } // ----- -#false = #cir.bool : !cir.bool -#true = #cir.bool : !cir.bool !u32i = !cir.int -// expected-error@+2 {{op result type ('!cir.int') must be '!cir.bool' for '#cir.bool : !cir.bool'}} +// expected-error@+2 {{invalid kind of type specified}} cir.func @b0() { - %1 = cir.const(#true) : !u32i + %1 = cir.const #cir.bool : !u32i cir.return } @@ -26,10 +24,10 @@ cir.func @b0() { #true = #cir.bool : !cir.bool !u32i = !cir.int cir.func @if0() { - %0 = cir.const(#true) : !cir.bool + %0 = cir.const #true // expected-error@+1 {{'cir.if' op region control flow edge from Region #0 to parent results: source has 1 operands, but target successor needs 0}} cir.if %0 { - %6 = cir.const(#cir.int<3> : !u32i) : !u32i + %6 = cir.const #cir.int<3> : !u32i cir.yield %6 : !u32i } cir.return @@ -40,7 +38,7 @@ cir.func @if0() { #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool cir.func @yield0() { - %0 = cir.const(#true) : !cir.bool + %0 = cir.const #true cir.if %0 { // expected-error {{custom op 'cir.if' multi-block region must not omit terminator}} cir.br ^a ^a: @@ -53,7 +51,7 @@ cir.func @yield0() { #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool cir.func @yieldbreak() { - %0 = cir.const(#true) : !cir.bool + %0 = cir.const #true cir.if %0 { cir.break // expected-error {{op must be within a loop or switch}} } @@ -65,7 +63,7 @@ cir.func @yieldbreak() { #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool cir.func @yieldcontinue() { - %0 = cir.const(#true) : !cir.bool + %0 = cir.const #true cir.if %0 { cir.continue // expected-error {{op must be within a loop}} } @@ -76,10 +74,10 @@ cir.func @yieldcontinue() { !s32i = !cir.int cir.func @s0() { - %1 = cir.const(#cir.int<2> : !s32i) : !s32i + %1 = cir.const #cir.int<2> : !s32i cir.switch (%1 : !s32i) [ case (equal, 5) { // expected-error {{custom op 'cir.switch' case regions must be explicitly terminated}} - %2 = cir.const(#cir.int<3> : !s32i) : !s32i + %2 = cir.const #cir.int<3> : !s32i } ] cir.return @@ -89,7 +87,7 @@ cir.func @s0() { !s32i = !cir.int cir.func @s1() { - %1 = cir.const(#cir.int<2> : !s32i) : !s32i + %1 = cir.const #cir.int<2> : !s32i cir.switch (%1 : !s32i) [ case (equal, 5) { } @@ -100,7 +98,7 @@ cir.func @s1() { // ----- cir.func @badstride(%x: !cir.ptr>) { - %idx = cir.const(#cir.int<2> : !cir.int) : !cir.int + %idx = cir.const #cir.int<2> : !cir.int %4 = cir.ptr_stride(%x : !cir.ptr>, %idx : !cir.int), !cir.ptr // expected-error {{requires the same type for first operand and result}} cir.return } @@ -365,7 +363,7 @@ module { !s32i = !cir.int cir.func @vec_op_size() { - %0 = cir.const(#cir.int<1> : !s32i) : !s32i + %0 = cir.const #cir.int<1> : !s32i %1 = cir.vec.create(%0 : !s32i) : !cir.vector // expected-error {{'cir.vec.create' op operand count of 1 doesn't match vector type '!cir.vector x 2>' element count of 2}} cir.return } @@ -375,8 +373,8 @@ cir.func @vec_op_size() { !s32i = !cir.int !u32i = !cir.int cir.func @vec_op_type() { - %0 = cir.const(#cir.int<1> : !s32i) : !s32i - %1 = cir.const(#cir.int<2> : !u32i) : !u32i + %0 = cir.const #cir.int<1> : !s32i + %1 = cir.const #cir.int<2> : !u32i %2 = cir.vec.create(%0, %1 : !s32i, !u32i) : !cir.vector // expected-error {{'cir.vec.create' op operand type '!cir.int' doesn't match vector element type '!cir.int'}} cir.return } @@ -385,8 +383,8 @@ cir.func @vec_op_type() { !s32i = !cir.int cir.func @vec_extract_non_int_idx() { - %0 = cir.const(1.5e+00 : f64) : f64 - %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %0 = cir.const 1.5e+00 : f64 + %1 = cir.const #cir.int<0> : !s32i %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : !cir.vector %3 = cir.vec.extract %2[%0 : f64] : !cir.vector // expected-error {{expected '<'}} cir.return @@ -398,7 +396,7 @@ cir.func @vec_extract_non_int_idx() { !u32i = !cir.int cir.func @vec_extract_bad_type() { %0 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.const #cir.int<0> : !s32i %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : !cir.vector %3 = cir.vec.extract %2[%1 : !s32i] : !cir.vector // expected-note {{prior use here}} cir.store %3, %0 : !u32i, !cir.ptr // expected-error {{use of value '%3' expects different type than prior uses: '!cir.int' vs '!cir.int'}} @@ -409,7 +407,7 @@ cir.func @vec_extract_bad_type() { !s32i = !cir.int cir.func @vec_extract_non_vector() { - %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %0 = cir.const #cir.int<0> : !s32i %1 = cir.vec.extract %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.extract' 'vec' must be CIR vector type, but got '!cir.int'}} cir.return } @@ -419,9 +417,9 @@ cir.func @vec_extract_non_vector() { !s32i = !cir.int !u32i = !cir.int cir.func @vec_insert_bad_type() { - %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %0 = cir.const #cir.int<0> : !s32i %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector - %2 = cir.const(#cir.int<0> : !u32i) : !u32i // expected-note {{prior use here}} + %2 = cir.const #cir.int<0> : !u32i // expected-note {{prior use here}} %3 = cir.vec.insert %2, %1[%0 : !s32i] : !cir.vector // expected-error {{use of value '%2' expects different type than prior uses: '!cir.int' vs '!cir.int'}} cir.return } @@ -430,7 +428,7 @@ cir.func @vec_insert_bad_type() { !s32i = !cir.int cir.func @vec_insert_non_vector() { - %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %0 = cir.const #cir.int<0> : !s32i %1 = cir.vec.insert %0, %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.insert' 'vec' must be CIR vector type, but got '!cir.int'}} cir.return } @@ -439,7 +437,7 @@ cir.func @vec_insert_non_vector() { !s32i = !cir.int cir.func @vec_ternary_non_vector1() { - %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %0 = cir.const #cir.int<0> : !s32i %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector %2 = cir.vec.ternary(%0, %1, %1) : !s32i, !cir.vector // expected-error {{'cir.vec.ternary' op operand #0 must be !cir.vector of !cir.int, but got '!cir.int'}} cir.return @@ -449,7 +447,7 @@ cir.func @vec_ternary_non_vector1() { !s32i = !cir.int cir.func @vec_ternary_non_vector2() { - %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %0 = cir.const #cir.int<0> : !s32i %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector %2 = cir.vec.ternary(%1, %0, %0) : !cir.vector, !s32i // expected-error {{'cir.vec.ternary' op operand #1 must be CIR vector type, but got '!cir.int'}} cir.return @@ -459,7 +457,7 @@ cir.func @vec_ternary_non_vector2() { !s32i = !cir.int cir.func @vec_ternary_different_size() { - %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %0 = cir.const #cir.int<0> : !s32i %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : !cir.vector %2 = cir.vec.create(%0, %0, %0, %0 : !s32i, !s32i, !s32i, !s32i) : !cir.vector %3 = cir.vec.ternary(%1, %2, %2) : !cir.vector, !cir.vector // expected-error {{'cir.vec.ternary' op : the number of elements in '!cir.vector x 2>' and '!cir.vector x 4>' don't match}} @@ -621,7 +619,7 @@ module { !s32i = !cir.int cir.func @test_br() -> !s32i { - %0 = cir.const(#cir.int<0>: !s32i) : !s32i + %0 = cir.const #cir.int<0>: !s32i // expected-error@below {{branch has 1 operands for successor #0, but target block has 0}} cir.br ^bb1(%0 : !s32i) ^bb1: @@ -829,16 +827,6 @@ module { // ----- -!s8i = !cir.int -!u8i = !cir.int -cir.func @const_type_mismatch() -> () { - // expected-error@+1 {{'cir.const' op result type ('!cir.int') does not match value type ('!cir.int')}} - %2 = cir.const(#cir.int<0> : !s8i) : !u8i - cir.return -} - -// ----- - !u16i = !cir.int // expected-error@+1 {{invalid kind of type specified}} @@ -1044,7 +1032,7 @@ cir.func @bad_fetch(%x: !cir.ptr, %y: !cir.float) -> () { cir.func @bad_operands_for_nowrap(%x: !cir.float, %y: !cir.float) { // expected-error@+1 {{only operations on integer values may have nsw/nuw flags}} - %0 = cir.binop(add, %x, %y) nsw : !cir.float + %0 = cir.binop(add, %x, %y) nsw : !cir.float } // ----- diff --git a/clang/test/CIR/IR/ptr_stride.cir b/clang/test/CIR/IR/ptr_stride.cir index 750e5764a72f..6791f830fd48 100644 --- a/clang/test/CIR/IR/ptr_stride.cir +++ b/clang/test/CIR/IR/ptr_stride.cir @@ -6,7 +6,7 @@ module { %0 = cir.alloca !cir.array, !cir.ptr>, ["x", init] %1 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr - %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %3 = cir.const #cir.int<0> : !s32i %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr cir.return } @@ -16,7 +16,7 @@ module { // CHECK-NEXT: %0 = cir.alloca !cir.array, !cir.ptr>, ["x", init] // CHECK-NEXT: %1 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %3 = cir.const(#cir.int<0> : !s32i) : !s32i +// CHECK-NEXT: %3 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index b44b0d3eeee3..abaaf8766e4b 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -34,8 +34,8 @@ module { // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["i", init] cir.func @shouldSuccessfullyParseConstStructAttrs() { - %0 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22S122) : !ty_22S122 - // CHECK: cir.const(#cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22S122) : !ty_22S122 + %0 = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22S122 + // CHECK: cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22S122 cir.return } } diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index db63a2928862..b5c0c9cafb6c 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -2,7 +2,7 @@ !s32i = !cir.int cir.func @s0() { - %1 = cir.const(#cir.int<2> : !s32i) : !s32i + %1 = cir.const #cir.int<2> : !s32i cir.switch (%1 : !s32i) [ case (default) { cir.return diff --git a/clang/test/CIR/IR/ternary.cir b/clang/test/CIR/IR/ternary.cir index 127d8ed8f2dc..3827dc77726d 100644 --- a/clang/test/CIR/IR/ternary.cir +++ b/clang/test/CIR/IR/ternary.cir @@ -4,10 +4,10 @@ module { cir.func @blue(%arg0: !cir.bool) -> !u32i { %0 = cir.ternary(%arg0, true { - %a = cir.const(#cir.int<0> : !u32i) : !u32i + %a = cir.const #cir.int<0> : !u32i cir.yield %a : !u32i }, false { - %b = cir.const(#cir.int<1> : !u32i) : !u32i + %b = cir.const #cir.int<1> : !u32i cir.yield %b : !u32i }) : (!cir.bool) -> !u32i cir.return %0 : !u32i @@ -18,10 +18,10 @@ module { // CHECK: cir.func @blue(%arg0: !cir.bool) -> !u32i { // CHECK: %0 = cir.ternary(%arg0, true { -// CHECK: %1 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: %1 = cir.const #cir.int<0> : !u32i // CHECK: cir.yield %1 : !u32i // CHECK: }, false { -// CHECK: %1 = cir.const(#cir.int<1> : !u32i) : !u32i +// CHECK: %1 = cir.const #cir.int<1> : !u32i // CHECK: cir.yield %1 : !u32i // CHECK: }) : (!cir.bool) -> !u32i // CHECK: cir.return %0 : !u32i diff --git a/clang/test/CIR/IR/try.cir b/clang/test/CIR/IR/try.cir index 2eb186e65f04..3bcb44e070bd 100644 --- a/clang/test/CIR/IR/try.cir +++ b/clang/test/CIR/IR/try.cir @@ -5,7 +5,7 @@ module { cir.func @div(%x : !s32i, %y : !s32i) -> !s32i { - %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %3 = cir.const #cir.int<0> : !s32i cir.return %3 : !s32i } diff --git a/clang/test/CIR/Lowering/OpenMP/parallel.cir b/clang/test/CIR/Lowering/OpenMP/parallel.cir index 2457d929781f..da98868eddb1 100644 --- a/clang/test/CIR/Lowering/OpenMP/parallel.cir +++ b/clang/test/CIR/Lowering/OpenMP/parallel.cir @@ -4,15 +4,15 @@ module { cir.func @omp_parallel() { %0 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.const #cir.int<0> : !s32i cir.store %1, %0 : !s32i, !cir.ptr omp.parallel { cir.scope { %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<1> : !s32i) : !s32i + %3 = cir.const #cir.int<1> : !s32i cir.store %3, %2 : !s32i, !cir.ptr %4 = cir.load %2 : !cir.ptr, !s32i - %5 = cir.const(#cir.int<1> : !s32i) : !s32i + %5 = cir.const #cir.int<1> : !s32i %6 = cir.binop(add, %4, %5) : !s32i cir.store %6, %0 : !s32i, !cir.ptr } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir index d9e3c36f80ef..eb5b747d8761 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir @@ -7,8 +7,8 @@ module { %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %2 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<2> : !u32i) : !u32i cir.store %3, %0 : !u32i, !cir.ptr - %4 = cir.const(#cir.int<1> : !u32i) : !u32i cir.store %4, %1 : !u32i, !cir.ptr + %3 = cir.const #cir.int<2> : !u32i cir.store %3, %0 : !u32i, !cir.ptr + %4 = cir.const #cir.int<1> : !u32i cir.store %4, %1 : !u32i, !cir.ptr %5 = cir.load %0 : !cir.ptr, !u32i %6 = cir.load %1 : !cir.ptr, !u32i %7 = cir.binop(mul, %5, %6) : !u32i diff --git a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir index 1b41d8b1d0f7..408cac97ee41 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir @@ -6,7 +6,7 @@ module { cir.func @foo() { %0 = cir.alloca !cir.bool, !cir.ptr, ["a", init] {alignment = 1 : i64} - %1 = cir.const(#true) : !cir.bool + %1 = cir.const #true cir.store %1, %0 : !cir.bool, !cir.ptr cir.return } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/branch.cir b/clang/test/CIR/Lowering/ThroughMLIR/branch.cir index 83c980838890..2b78484627d5 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/branch.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/branch.cir @@ -5,10 +5,10 @@ cir.func @foo(%arg0: !cir.bool) -> !s32i { cir.brcond %arg0 ^bb1, ^bb2 ^bb1: - %0 = cir.const(#cir.int<1>: !s32i) : !s32i + %0 = cir.const #cir.int<1>: !s32i cir.return %0 : !s32i ^bb2: - %1 = cir.const(#cir.int<0>: !s32i) : !s32i + %1 = cir.const #cir.int<0>: !s32i cir.return %1 : !s32i } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cos.cir b/clang/test/CIR/Lowering/ThroughMLIR/cos.cir index 0530d3cb19e8..93b102b7a854 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/cos.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/cos.cir @@ -3,10 +3,10 @@ module { cir.func @foo() { - %1 = cir.const(#cir.fp<1.0> : !cir.float) : !cir.float - %2 = cir.const(#cir.fp<1.0> : !cir.double) : !cir.double - %3 = cir.const(#cir.fp<1.0> : !cir.long_double) : !cir.long_double - %4 = cir.const(#cir.fp<1.0> : !cir.long_double) : !cir.long_double + %1 = cir.const #cir.fp<1.0> : !cir.float + %2 = cir.const #cir.fp<1.0> : !cir.double + %3 = cir.const #cir.fp<1.0> : !cir.long_double + %4 = cir.const #cir.fp<1.0> : !cir.long_double %5 = cir.cos %1 : !cir.float %6 = cir.cos %2 : !cir.double %7 = cir.cos %3 : !cir.long_double diff --git a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir index ff73eca3667f..27ab0e9c33f6 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir @@ -11,7 +11,7 @@ module { cir.store %arg0, %0 : !cir.ptr, !cir.ptr> %3 = cir.load %0 : !cir.ptr>, !cir.ptr cir.store %3, %2 : !cir.ptr, !cir.ptr> - %4 = cir.const(#cir.int<0> : !s32i) : !s32i + %4 = cir.const #cir.int<0> : !s32i %5 = cir.load %1 : !cir.ptr, !s32i cir.return %5 : !s32i } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir index 170366d55af9..6c1d5c66fffa 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir @@ -5,18 +5,18 @@ module { cir.func @foo() { %0 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<1> : !u32i) : !u32i + %1 = cir.const #cir.int<1> : !u32i cir.store %1, %0 : !u32i, !cir.ptr cir.br ^bb2 ^bb1: // no predecessors %2 = cir.load %0 : !cir.ptr, !u32i - %3 = cir.const(#cir.int<1> : !u32i) : !u32i + %3 = cir.const #cir.int<1> : !u32i %4 = cir.binop(add, %2, %3) : !u32i cir.store %4, %0 : !u32i, !cir.ptr cir.br ^bb2 ^bb2: // 2 preds: ^bb0, ^bb1 %5 = cir.load %0 : !cir.ptr, !u32i - %6 = cir.const(#cir.int<2> : !u32i) : !u32i + %6 = cir.const #cir.int<2> : !u32i %7 = cir.binop(add, %5, %6) : !u32i cir.store %7, %0 : !u32i, !cir.ptr cir.return diff --git a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir index 8aabe4cd0ffa..d51c4425d702 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/memref.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/memref.cir @@ -5,7 +5,7 @@ module { cir.func @foo() -> !u32i { %0 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<1> : !u32i) : !u32i + %1 = cir.const #cir.int<1> : !u32i cir.store %1, %0 : !u32i, !cir.ptr %2 = cir.load %0 : !cir.ptr, !u32i cir.return %2 : !u32i diff --git a/clang/test/CIR/Lowering/ThroughMLIR/scope.cir b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir index 779363e21b2f..cf5effc02ce6 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/scope.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/scope.cir @@ -8,7 +8,7 @@ module { cir.func @foo() { cir.scope { %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<4> : !u32i) : !u32i + %1 = cir.const #cir.int<4> : !u32i cir.store %1, %0 : !u32i, !cir.ptr } cir.return diff --git a/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir index e1cae6aa9bd4..bcac62912fa9 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir @@ -10,13 +10,13 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, !cir.ptr %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %3 = cir.const #cir.int<0> : !s32i %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool %5 = cir.ternary(%4, true { - %7 = cir.const(#cir.int<3> : !s32i) : !s32i + %7 = cir.const #cir.int<3> : !s32i cir.yield %7 : !s32i }, false { - %7 = cir.const(#cir.int<5> : !s32i) : !s32i + %7 = cir.const #cir.int<5> : !s32i cir.yield %7 : !s32i }) : (!cir.bool) -> !s32i cir.store %5, %1 : !s32i, !cir.ptr diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir index 13fef83e435a..1db339fe34fc 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-inc-dec.cir @@ -6,7 +6,7 @@ module { cir.func @foo() { %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} - %2 = cir.const(#cir.int<2> : !s32i) : !s32i + %2 = cir.const #cir.int<2> : !s32i cir.store %2, %0 : !s32i, !cir.ptr cir.store %2, %1 : !s32i, !cir.ptr diff --git a/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir index c013be35dfe7..ecb7e7ef6734 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/unary-plus-minus.cir @@ -6,7 +6,7 @@ module { cir.func @foo() { %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} - %2 = cir.const(#cir.int<2> : !s32i) : !s32i + %2 = cir.const #cir.int<2> : !s32i cir.store %2, %0 : !s32i, !cir.ptr cir.store %2, %1 : !s32i, !cir.ptr diff --git a/clang/test/CIR/Lowering/binop-signed-int.cir b/clang/test/CIR/Lowering/binop-signed-int.cir index 8ce730ad5234..5f028a6c901b 100644 --- a/clang/test/CIR/Lowering/binop-signed-int.cir +++ b/clang/test/CIR/Lowering/binop-signed-int.cir @@ -7,8 +7,8 @@ module { %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<2> : !s32i) : !s32i cir.store %3, %0 : !s32i, !cir.ptr - %4 = cir.const(#cir.int<1> : !s32i) : !s32i cir.store %4, %1 : !s32i, !cir.ptr + %3 = cir.const #cir.int<2> : !s32i cir.store %3, %0 : !s32i, !cir.ptr + %4 = cir.const #cir.int<1> : !s32i cir.store %4, %1 : !s32i, !cir.ptr %5 = cir.load %0 : !cir.ptr, !s32i %6 = cir.load %1 : !cir.ptr, !s32i %7 = cir.binop(mul, %5, %6) : !s32i diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir index 53066225857a..9633a7f4d966 100644 --- a/clang/test/CIR/Lowering/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -7,8 +7,8 @@ module { %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %2 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<2> : !u32i) : !u32i cir.store %3, %0 : !u32i, !cir.ptr - %4 = cir.const(#cir.int<1> : !u32i) : !u32i cir.store %4, %1 : !u32i, !cir.ptr + %3 = cir.const #cir.int<2> : !u32i cir.store %3, %0 : !u32i, !cir.ptr + %4 = cir.const #cir.int<1> : !u32i cir.store %4, %1 : !u32i, !cir.ptr %5 = cir.load %0 : !cir.ptr, !u32i %6 = cir.load %1 : !cir.ptr, !u32i %7 = cir.binop(mul, %5, %6) : !u32i diff --git a/clang/test/CIR/Lowering/bool-to-int.cir b/clang/test/CIR/Lowering/bool-to-int.cir index d7e2e45686cc..1b4bb73f80f9 100644 --- a/clang/test/CIR/Lowering/bool-to-int.cir +++ b/clang/test/CIR/Lowering/bool-to-int.cir @@ -6,12 +6,12 @@ module { cir.func @foo(%arg0: !s32i, %arg1: !s32i) -> !s32i { - %1 = cir.const(#true) : !cir.bool + %1 = cir.const #true %2 = cir.cast(bool_to_int, %1 : !cir.bool), !s32i cir.return %2 : !s32i } cir.func @bar(%arg0: !s32i, %arg1: !s32i) -> !s32i { - %1 = cir.const(#false) : !cir.bool + %1 = cir.const #false %2 = cir.cast(bool_to_int, %1 : !cir.bool), !s32i cir.return %2 : !s32i } diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index fb94f8135a4b..ee743e8376fa 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -10,7 +10,7 @@ module { // LLVM: @g_bl = global i8 0 cir.func @foo() { - %1 = cir.const(#true) : !cir.bool + %1 = cir.const #true %0 = cir.alloca !cir.bool, !cir.ptr, ["a", init] {alignment = 1 : i64} cir.store %1, %0 : !cir.bool, !cir.ptr cir.return diff --git a/clang/test/CIR/Lowering/branch.cir b/clang/test/CIR/Lowering/branch.cir index 90e143913d50..bbfb61e582a0 100644 --- a/clang/test/CIR/Lowering/branch.cir +++ b/clang/test/CIR/Lowering/branch.cir @@ -5,10 +5,10 @@ cir.func @foo(%arg0: !cir.bool) -> !s32i { cir.brcond %arg0 ^bb1, ^bb2 ^bb1: - %0 = cir.const(#cir.int<1>: !s32i) : !s32i + %0 = cir.const #cir.int<1>: !s32i cir.return %0 : !s32i ^bb2: - %1 = cir.const(#cir.int<0>: !s32i) : !s32i + %1 = cir.const #cir.int<0>: !s32i cir.return %1 : !s32i } diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index f6ec6a94be91..e100e0c2f07e 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -24,7 +24,7 @@ module { %8 = cir.alloca !cir.ptr, !cir.ptr>, ["e", init] {alignment = 8 : i64} cir.store %arg0, %0 : !u32i, !cir.ptr cir.store %arg1, %1 : !s32i, !cir.ptr - + // Integer casts. %9 = cir.load %0 : !cir.ptr, !u32i %10 = cir.cast(integral, %9 : !u32i), !s8i @@ -73,7 +73,7 @@ module { // CHECK: %{{.+}} = llvm.fptosi %{{.+}} : f32 to i32 %28 = cir.cast(float_to_int, %arg2 : !cir.float), !u32i // CHECK: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32 - %18 = cir.const(#cir.int<0> : !s32i) : !s32i + %18 = cir.const #cir.int<0> : !s32i // CHECK: %{{.+}} = llvm.fptrunc %{{.+}} : f64 to f32 %34 = cir.cast(floating, %arg3 : !cir.double), !cir.float diff --git a/clang/test/CIR/Lowering/class.cir b/clang/test/CIR/Lowering/class.cir index cc5046be57ac..75366182ad3c 100644 --- a/clang/test/CIR/Lowering/class.cir +++ b/clang/test/CIR/Lowering/class.cir @@ -24,7 +24,7 @@ module { cir.func @shouldConstInitLocalClassesWithConstStructAttr() { %0 = cir.alloca !ty_22S2A22, !cir.ptr, ["s"] {alignment = 4 : i64} - %1 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22) : !ty_22S2A22 + %1 = cir.const #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22 cir.store %1, %0 : !ty_22S2A22, !cir.ptr cir.return } diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 396006541a1c..7ac57b833fed 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -7,23 +7,23 @@ !ty_22anon2E122 = !cir.struct, !cir.int} #cir.record.decl.ast> module { cir.func @testConstArrInit() { - %0 = cir.const(#cir.const_array<"string\00" : !cir.array> : !cir.array) : !cir.array + %0 = cir.const #cir.const_array<"string\00" : !cir.array> : !cir.array // CHECK: llvm.mlir.constant(dense<[115, 116, 114, 105, 110, 103, 0]> : tensor<7xi8>) : !llvm.array<7 x i8> - %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array) : !cir.array + %1 = cir.const #cir.const_array<[#cir.int<1> : !s32i, #cir.int<2> : !s32i]> : !cir.array // CHECK: llvm.mlir.constant(dense<[1, 2]> : tensor<2xi32>) : !llvm.array<2 x i32> - %3 = cir.const(#cir.const_array<[#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float]> : !cir.array) : !cir.array + %3 = cir.const #cir.const_array<[#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float]> : !cir.array // CHECK: llvm.mlir.constant(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) : !llvm.array<2 x f32> - %4 = cir.const(#cir.zero : !cir.array) : !cir.array + %4 = cir.const #cir.zero : !cir.array // CHECK: cir.llvmir.zeroinit : !llvm.array<3 x i32> cir.return } cir.func @testConvertConstArrayToDenseConst() { - %0 = cir.const(#cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> - %1 = cir.const(#cir.const_array<[#cir.const_array<[#cir.int<1> : !s64i]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> - %2 = cir.const(#cir.const_array<[#cir.const_array<[#cir.fp<1.000000e+00> : !cir.float]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> - %3 = cir.const(#cir.const_array<[#cir.const_array<[#cir.fp<1.000000e+00> : !cir.double]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2>) : !cir.array x 2> - %4 = cir.const(#cir.const_array<[#cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i]> : !cir.array]> : !cir.array x 1>, #cir.zero : !cir.array x 1>]> : !cir.array x 1> x 2>) : !cir.array x 1> x 2> + %0 = cir.const #cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2> + %1 = cir.const #cir.const_array<[#cir.const_array<[#cir.int<1> : !s64i]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2> + %2 = cir.const #cir.const_array<[#cir.const_array<[#cir.fp<1.000000e+00> : !cir.float]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2> + %3 = cir.const #cir.const_array<[#cir.const_array<[#cir.fp<1.000000e+00> : !cir.double]> : !cir.array, #cir.zero : !cir.array]> : !cir.array x 2> + %4 = cir.const #cir.const_array<[#cir.const_array<[#cir.const_array<[#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i]> : !cir.array]> : !cir.array x 1>, #cir.zero : !cir.array x 1>]> : !cir.array x 1> x 2> cir.return } @@ -37,7 +37,7 @@ module { cir.func @testConstArrayOfStructs() { %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 4 : i64} - %1 = cir.const(#cir.const_array<[#cir.const_struct<{#cir.int<0> : !s32i, #cir.int<1> : !s32i}> : !ty_22anon2E122]> : !cir.array) : !cir.array + %1 = cir.const #cir.const_array<[#cir.const_struct<{#cir.int<0> : !s32i, #cir.int<1> : !s32i}> : !ty_22anon2E122]> : !cir.array cir.store %1, %0 : !cir.array, !cir.ptr> cir.return } @@ -56,7 +56,7 @@ module { cir.func @testArrWithTrailingZeros() { %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} - %1 = cir.const(#cir.const_array<[#cir.int<1> : !s32i], trailing_zeros> : !cir.array) : !cir.array + %1 = cir.const #cir.const_array<[#cir.int<1> : !s32i], trailing_zeros> : !cir.array cir.store %1, %0 : !cir.array, !cir.ptr> cir.return } @@ -68,7 +68,7 @@ module { // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.array<10 x i32> cir.func @testInitArrWithBool() { - %1 = cir.const(#cir.const_array<[#cir.bool : !cir.bool]> : !cir.array) : !cir.array + %1 = cir.const #cir.const_array<[#cir.bool : !cir.bool]> : !cir.array cir.return } diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 37958b0bfea3..d4823df783a7 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -12,11 +12,11 @@ module { cir.store %arg0, %0 : !cir.ptr, !cir.ptr> cir.store %arg1, %1 : !cir.ptr, !cir.ptr> cir.store %arg2, %2 : !s32i, !cir.ptr - %5 = cir.const(#cir.fp<0.000000e+00> : !cir.double) : !cir.double + %5 = cir.const #cir.fp<0.000000e+00> : !cir.double cir.store %5, %4 : !cir.double, !cir.ptr cir.scope { %8 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %9 = cir.const(#cir.int<0> : !s32i) : !s32i + %9 = cir.const #cir.int<0> : !s32i cir.store %9, %8 : !s32i, !cir.ptr cir.for : cond { %10 = cir.load %8 : !cir.ptr, !s32i diff --git a/clang/test/CIR/Lowering/expect.cir b/clang/test/CIR/Lowering/expect.cir index a221cca5f3dd..64c9c10e6277 100644 --- a/clang/test/CIR/Lowering/expect.cir +++ b/clang/test/CIR/Lowering/expect.cir @@ -4,7 +4,7 @@ !s64i = !cir.int module { cir.func @foo(%arg0: !s64i) { - %0 = cir.const(#cir.int<1> : !s64i) : !s64i + %0 = cir.const #cir.int<1> : !s64i %1 = cir.expect(%arg0, %0) : !s64i %2 = cir.cast(int_to_bool, %1 : !s64i), !cir.bool cir.if %2 { diff --git a/clang/test/CIR/Lowering/float.cir b/clang/test/CIR/Lowering/float.cir index 463768a35935..d4b66500b210 100644 --- a/clang/test/CIR/Lowering/float.cir +++ b/clang/test/CIR/Lowering/float.cir @@ -3,15 +3,15 @@ module { cir.func @test() { - // %0 = cir.const(1.0 : f16) : f16 + // %0 = cir.const 1.0 : f16 // DISABLED-CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f16) : f16 - %1 = cir.const(#cir.fp<1.0> : !cir.float) : !cir.float + %1 = cir.const #cir.fp<1.0> : !cir.float // CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f32) : f32 - %2 = cir.const(#cir.fp<1.0> : !cir.double) : !cir.double + %2 = cir.const #cir.fp<1.0> : !cir.double // CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f64) : f64 - %3 = cir.const(#cir.fp<1.0> : !cir.long_double) : !cir.long_double + %3 = cir.const #cir.fp<1.0> : !cir.long_double // CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : f80) : f80 - // %5 = cir.const(1.0 : bf16) : bf16 + // %5 = cir.const 1.0 : bf16 // DISABLED-CHECK: %{{.+}} = llvm.mlir.constant(1.000000e+00 : bf16) : bf16 cir.return } diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 3bfbd8b21846..e536f02eddda 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -54,7 +54,7 @@ module { %0 = cir.alloca !u8i, !cir.ptr, ["c", init] {alignment = 1 : i64} %1 = cir.get_global @s2 : !cir.ptr> %2 = cir.load %1 : !cir.ptr>, !cir.ptr - %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %3 = cir.const #cir.int<0> : !s32i %4 = cir.ptr_stride(%2 : !cir.ptr, %3 : !s32i), !cir.ptr %5 = cir.load %4 : !cir.ptr, !s8i %6 = cir.cast(integral, %5 : !s8i), !u8i @@ -63,7 +63,7 @@ module { } cir.func linkonce_odr @_Z4funcIiET_v() -> !s32i { %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} - %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.const #cir.int<0> : !s32i cir.store %1, %0 : !s32i, !cir.ptr %2 = cir.load %0 : !cir.ptr, !s32i cir.return %2 : !s32i @@ -182,8 +182,8 @@ module { cir.global external @zero_array = #cir.zero : !cir.array cir.func @use_zero_array() { - %0 = cir.const(#cir.global_view<@zero_array> : !cir.ptr) : !cir.ptr - %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %0 = cir.const #cir.global_view<@zero_array> : !cir.ptr + %1 = cir.const #cir.int<0> : !s32i %2 = cir.ptr_stride(%0 : !cir.ptr, %1 : !s32i), !cir.ptr %3 = cir.load %2 : !cir.ptr, !s32i cir.return diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index 784a8f473724..271666744d6c 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -6,18 +6,18 @@ module { cir.func @foo() { %0 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<1> : !u32i) : !u32i + %1 = cir.const #cir.int<1> : !u32i cir.store %1, %0 : !u32i, !cir.ptr cir.br ^bb2 ^bb1: // no predecessors %2 = cir.load %0 : !cir.ptr, !u32i - %3 = cir.const(#cir.int<1> : !u32i) : !u32i + %3 = cir.const #cir.int<1> : !u32i %4 = cir.binop(add, %2, %3) : !u32i cir.store %4, %0 : !u32i, !cir.ptr cir.br ^bb2 ^bb2: // 2 preds: ^bb0, ^bb1 %5 = cir.load %0 : !cir.ptr, !u32i - %6 = cir.const(#cir.int<2> : !u32i) : !u32i + %6 = cir.const #cir.int<2> : !u32i %7 = cir.binop(add, %5, %6) : !u32i cir.store %7, %0 : !u32i, !cir.ptr cir.return diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index 7c808d31c2fb..5d35cd6e81bb 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -13,7 +13,7 @@ module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.sign %2 = cir.get_global @".str" : !cir.ptr> %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr %4 = cir.call @printf(%3) : (!cir.ptr) -> !s32i - %5 = cir.const(#cir.int<0> : !s32i) : !s32i + %5 = cir.const #cir.int<0> : !s32i cir.store %5, %0 : !s32i, !cir.ptr %6 = cir.load %0 : !cir.ptr, !s32i cir.return %6 : !s32i diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir index eac0b5e4467e..cd42497983e4 100644 --- a/clang/test/CIR/Lowering/if.cir +++ b/clang/test/CIR/Lowering/if.cir @@ -6,10 +6,10 @@ module { cir.func @foo(%arg0: !s32i) -> !s32i { %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool cir.if %4 { - %5 = cir.const(#cir.int<1> : !s32i) : !s32i + %5 = cir.const #cir.int<1> : !s32i cir.return %5 : !s32i } else { - %5 = cir.const(#cir.int<0> : !s32i) : !s32i + %5 = cir.const #cir.int<0> : !s32i cir.return %5 : !s32i } cir.return %arg0 : !s32i @@ -46,7 +46,7 @@ module { cir.func @onlyIf(%arg0: !s32i) -> !s32i { %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool cir.if %4 { - %5 = cir.const(#cir.int<1> : !s32i) : !s32i + %5 = cir.const #cir.int<1> : !s32i cir.return %5 : !s32i } cir.return %arg0 : !s32i diff --git a/clang/test/CIR/Lowering/int-wrap.cir b/clang/test/CIR/Lowering/int-wrap.cir index ca4ee05b5354..b6b8bd385b89 100644 --- a/clang/test/CIR/Lowering/int-wrap.cir +++ b/clang/test/CIR/Lowering/int-wrap.cir @@ -7,7 +7,7 @@ module { %0 = cir.alloca !s32i, !cir.ptr, ["len", init] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, !cir.ptr %1 = cir.load %0 : !cir.ptr, !s32i - %2 = cir.const(#cir.int<42> : !s32i) : !s32i + %2 = cir.const #cir.int<42> : !s32i %3 = cir.binop(sub, %1, %2) nsw : !s32i %4 = cir.binop(sub, %1, %2) nuw : !s32i %5 = cir.binop(sub, %1, %2) : !s32i diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index e2fce2972385..f6d3a6eb521f 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -5,7 +5,7 @@ module { cir.func @foo() -> !u32i { %0 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<1> : !u32i) : !u32i + %1 = cir.const #cir.int<1> : !u32i cir.store %1, %0 : !u32i, !cir.ptr %2 = cir.load %0 : !cir.ptr, !u32i cir.return %2 : !u32i @@ -13,7 +13,7 @@ module { cir.func @test_volatile() -> !u32i { %0 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<1> : !u32i) : !u32i + %1 = cir.const #cir.int<1> : !u32i cir.store volatile %1, %0 : !u32i, !cir.ptr %2 = cir.load volatile %0 : !cir.ptr, !u32i cir.return %2 : !u32i diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir index 02af716f703e..34b6bfd7618e 100644 --- a/clang/test/CIR/Lowering/loops-with-break.cir +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -6,11 +6,11 @@ module { cir.func @testFor() { cir.scope { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<1> : !s32i) : !s32i + %1 = cir.const #cir.int<1> : !s32i cir.store %1, %0 : !s32i, !cir.ptr cir.for : cond { %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.const #cir.int<10> : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) @@ -18,7 +18,7 @@ module { cir.scope { cir.scope { %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<5> : !s32i) : !s32i + %3 = cir.const #cir.int<5> : !s32i %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.if %5 { @@ -61,18 +61,18 @@ module { // CHECK: ^bb[[#STEP]]: // [...] // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#EXIT]]: + // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } cir.func @testForNested() { cir.scope { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<1> : !s32i) : !s32i + %1 = cir.const #cir.int<1> : !s32i cir.store %1, %0 : !s32i, !cir.ptr cir.for : cond { %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.const #cir.int<10> : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) @@ -80,11 +80,11 @@ module { cir.scope { cir.scope { %2 = cir.alloca !s32i, !cir.ptr, ["j", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<1> : !s32i) : !s32i + %3 = cir.const #cir.int<1> : !s32i cir.store %3, %2 : !s32i, !cir.ptr cir.for : cond { %4 = cir.load %2 : !cir.ptr, !s32i - %5 = cir.const(#cir.int<10> : !s32i) : !s32i + %5 = cir.const #cir.int<10> : !s32i %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.condition(%7) @@ -92,7 +92,7 @@ module { cir.scope { cir.scope { %4 = cir.load %2 : !cir.ptr, !s32i - %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %5 = cir.const #cir.int<5> : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { @@ -120,60 +120,60 @@ module { cir.return } - // CHECK: llvm.func @testForNested() + // CHECK: llvm.func @testForNested() // [...] // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED1:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#preNESTED1]]: + // CHECK: ^bb[[#preNESTED1]]: // CHECK: llvm.br ^bb[[#preNESTED2:]] - // CHECK: ^bb[[#preNESTED2]]: + // CHECK: ^bb[[#preNESTED2]]: // CHECK: llvm.br ^bb[[#NESTED:]] - // CHECK: ^bb[[#NESTED]]: + // CHECK: ^bb[[#NESTED]]: // [...] // CHECK: llvm.br ^bb[[#COND_NESTED:]] // CHECK: ^bb[[#COND_NESTED]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK1:]], ^bb[[#EXIT_NESTED:]] - // CHECK: ^bb[[#preBREAK1]]: + // CHECK: ^bb[[#preBREAK1]]: // CHECK: llvm.br ^bb[[#preBREAK2:]] - // CHECK: ^bb[[#preBREAK2]]: + // CHECK: ^bb[[#preBREAK2]]: // CHECK: llvm.br ^bb[[#BREAK:]] - // CHECK: ^bb[[#BREAK]]: + // CHECK: ^bb[[#BREAK]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT2:]], ^bb[[#preBODY0:]] - // CHECK: ^bb[[#preEXIT2]]: + // CHECK: ^bb[[#preEXIT2]]: // CHECK: llvm.br ^bb[[#EXIT_NESTED:]] - // CHECK: ^bb[[#preBODY0]]: + // CHECK: ^bb[[#preBODY0]]: // CHECK: llvm.br ^bb[[#preBODY1:]] - // CHECK: ^bb[[#preBODY1]]: + // CHECK: ^bb[[#preBODY1]]: // CHECK: llvm.br ^bb[[#BODY_NESTED:]] - // CHECK: ^bb[[#BODY_NESTED]]: + // CHECK: ^bb[[#BODY_NESTED]]: // CHECK: llvm.br ^bb[[#STEP_NESTED:]] - // CHECK: ^bb[[#STEP_NESTED]]: + // CHECK: ^bb[[#STEP_NESTED]]: // [...] // CHECK: llvm.br ^bb[[#COND_NESTED:]] - // CHECK: ^bb[[#EXIT_NESTED]]: + // CHECK: ^bb[[#EXIT_NESTED]]: // [...] // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#BODY]]: + // CHECK: ^bb[[#BODY]]: // CHECK: llvm.br ^bb[[#STEP:]] - // CHECK: ^bb[[#STEP]]: + // CHECK: ^bb[[#STEP]]: // [...] // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#EXIT]]: + // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } cir.func @testWhile() { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.const #cir.int<0> : !s32i cir.store %1, %0 : !s32i, !cir.ptr cir.scope { cir.while { %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.const #cir.int<10> : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) @@ -183,7 +183,7 @@ module { cir.store %3, %0 : !s32i, !cir.ptr cir.scope { %4 = cir.load %0 : !cir.ptr, !s32i - %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %5 = cir.const #cir.int<5> : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { @@ -203,27 +203,27 @@ module { // CHECK: ^bb[[#COND]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#BODY]]: + // CHECK: ^bb[[#BODY]]: // [...] // CHECK: llvm.br ^bb[[#BREAK:]] - // CHECK: ^bb[[#BREAK]]: + // CHECK: ^bb[[#BREAK]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preCOND0:]] - // CHECK: ^bb[[#preEXIT1]]: + // CHECK: ^bb[[#preEXIT1]]: // CHECK: llvm.br ^bb[[#preEXIT2:]] - // CHECK: ^bb[[#preCOND0]]: + // CHECK: ^bb[[#preCOND0]]: // CHECK: llvm.br ^bb[[#preCOND1:]] - // CHECK: ^bb[[#preCOND1]]: + // CHECK: ^bb[[#preCOND1]]: // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#preEXIT2]]: + // CHECK: ^bb[[#preEXIT2]]: // CHECK: llvm.br ^bb[[#EXIT:]] - // CHECK: ^bb[[#EXIT]]: + // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } - + cir.func @testDoWhile() { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.const #cir.int<0> : !s32i cir.store %1, %0 : !s32i, !cir.ptr cir.scope { cir.do { @@ -232,7 +232,7 @@ cir.func @testDoWhile() { cir.store %3, %0 : !s32i, !cir.ptr cir.scope { %4 = cir.load %0 : !cir.ptr, !s32i - %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %5 = cir.const #cir.int<5> : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { @@ -242,7 +242,7 @@ cir.func @testDoWhile() { cir.yield } while { %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.const #cir.int<10> : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) @@ -257,21 +257,21 @@ cir.func @testDoWhile() { // CHECK: ^bb[[#COND]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#BODY]]: + // CHECK: ^bb[[#BODY]]: // [...] // CHECK: llvm.br ^bb[[#BREAK:]] - // CHECK: ^bb[[#BREAK]]: + // CHECK: ^bb[[#BREAK]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preCOND0:]] - // CHECK: ^bb[[#preEXIT1]]: + // CHECK: ^bb[[#preEXIT1]]: // CHECK: llvm.br ^bb[[#preEXIT2:]] - // CHECK: ^bb[[#preCOND0]]: + // CHECK: ^bb[[#preCOND0]]: // CHECK: llvm.br ^bb[[#preCOND1:]] - // CHECK: ^bb[[#preCOND1]]: + // CHECK: ^bb[[#preCOND1]]: // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#preEXIT2]]: + // CHECK: ^bb[[#preEXIT2]]: // CHECK: llvm.br ^bb[[#EXIT:]] - // CHECK: ^bb[[#EXIT]]: + // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir index 13db11549099..0371d416b61d 100644 --- a/clang/test/CIR/Lowering/loops-with-continue.cir +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -6,11 +6,11 @@ module { cir.func @testFor() { cir.scope { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<1> : !s32i) : !s32i + %1 = cir.const #cir.int<1> : !s32i cir.store %1, %0 : !s32i, !cir.ptr cir.for : cond { %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.const #cir.int<10> : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) @@ -18,7 +18,7 @@ module { cir.scope { cir.scope { %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<5> : !s32i) : !s32i + %3 = cir.const #cir.int<5> : !s32i %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.if %5 { @@ -43,25 +43,25 @@ module { // CHECK: ^bb[[#COND]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE1:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#preCONTINUE1]]: + // CHECK: ^bb[[#preCONTINUE1]]: // CHECK: llvm.br ^bb[[#preCONTINUE2:]] - // CHECK: ^bb[[#preCONTINUE2]]: + // CHECK: ^bb[[#preCONTINUE2]]: // CHECK: llvm.br ^bb[[#CONTINUE:]] - // CHECK: ^bb[[#CONTINUE]]: + // CHECK: ^bb[[#CONTINUE]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preSTEP:]], ^bb[[#preBODY0:]] - // CHECK: ^bb[[#preSTEP]]: + // CHECK: ^bb[[#preSTEP]]: // CHECK: llvm.br ^bb[[#STEP:]] - // CHECK: ^bb[[#preBODY0]]: + // CHECK: ^bb[[#preBODY0]]: // CHECK: llvm.br ^bb[[#preBODY1:]] - // CHECK: ^bb[[#preBODY1]]: + // CHECK: ^bb[[#preBODY1]]: // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#BODY]]: + // CHECK: ^bb[[#BODY]]: // CHECK: llvm.br ^bb[[#STEP:]] - // CHECK: ^bb[[#STEP]]: + // CHECK: ^bb[[#STEP]]: // [...] // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#EXIT]]: + // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } @@ -69,11 +69,11 @@ module { cir.func @testForNested() { cir.scope { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<1> : !s32i) : !s32i + %1 = cir.const #cir.int<1> : !s32i cir.store %1, %0 : !s32i, !cir.ptr cir.for : cond { %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.const #cir.int<10> : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) @@ -81,11 +81,11 @@ module { cir.scope { cir.scope { %2 = cir.alloca !s32i, !cir.ptr, ["j", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<1> : !s32i) : !s32i + %3 = cir.const #cir.int<1> : !s32i cir.store %3, %2 : !s32i, !cir.ptr cir.for : cond { %4 = cir.load %2 : !cir.ptr, !s32i - %5 = cir.const(#cir.int<10> : !s32i) : !s32i + %5 = cir.const #cir.int<10> : !s32i %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.condition(%7) @@ -93,7 +93,7 @@ module { cir.scope { cir.scope { %4 = cir.load %2 : !cir.ptr, !s32i - %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %5 = cir.const #cir.int<5> : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { @@ -127,53 +127,53 @@ module { // CHECK: ^bb[[#COND]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED1:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#preNESTED1]]: + // CHECK: ^bb[[#preNESTED1]]: // CHECK: llvm.br ^bb[[#preNESTED2:]] - // CHECK: ^bb[[#preNESTED2]]: + // CHECK: ^bb[[#preNESTED2]]: // CHECK: llvm.br ^bb[[#NESTED:]] - // CHECK: ^bb[[#NESTED]]: + // CHECK: ^bb[[#NESTED]]: // [...] // CHECK: llvm.br ^bb[[#COND_NESTED:]] // CHECK: ^bb[[#COND_NESTED]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE1:]], ^bb[[#EXIT_NESTED:]] - // CHECK: ^bb[[#preCONTINUE1]]: + // CHECK: ^bb[[#preCONTINUE1]]: // CHECK: llvm.br ^bb[[#preCONTINUE2:]] - // CHECK: ^bb[[#preCONTINUE2]]: + // CHECK: ^bb[[#preCONTINUE2]]: // CHECK: llvm.br ^bb[[#CONTINUE:]] - // CHECK: ^bb[[#CONTINUE]]: + // CHECK: ^bb[[#CONTINUE]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preSTEP0:]], ^bb[[#preBODY0:]] - // CHECK: ^bb[[#preSTEP0]]: + // CHECK: ^bb[[#preSTEP0]]: // CHECK: llvm.br ^bb[[#STEP_NESTED:]] - // CHECK: ^bb[[#preBODY0]]: + // CHECK: ^bb[[#preBODY0]]: // CHECK: llvm.br ^bb[[#preBODY1:]] - // CHECK: ^bb[[#preBODY1]]: + // CHECK: ^bb[[#preBODY1]]: // CHECK: llvm.br ^bb[[#BODY_NESTED:]] - // CHECK: ^bb[[#BODY_NESTED]]: + // CHECK: ^bb[[#BODY_NESTED]]: // CHECK: llvm.br ^bb[[#STEP_NESTED:]] - // CHECK: ^bb[[#STEP_NESTED]]: + // CHECK: ^bb[[#STEP_NESTED]]: // [...] // CHECK: llvm.br ^bb[[#COND_NESTED:]] - // CHECK: ^bb[[#EXIT_NESTED]]: + // CHECK: ^bb[[#EXIT_NESTED]]: // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#BODY]]: + // CHECK: ^bb[[#BODY]]: // CHECK: llvm.br ^bb[[#STEP:]] - // CHECK: ^bb[[#STEP]]: + // CHECK: ^bb[[#STEP]]: // [...] // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#EXIT]]: + // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } cir.func @testWhile() { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.const #cir.int<0> : !s32i cir.store %1, %0 : !s32i, !cir.ptr cir.scope { cir.while { %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.const #cir.int<10> : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) @@ -183,7 +183,7 @@ cir.func @testWhile() { cir.store %3, %0 : !s32i, !cir.ptr cir.scope { %4 = cir.load %0 : !cir.ptr, !s32i - %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %5 = cir.const #cir.int<5> : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { @@ -202,25 +202,25 @@ cir.func @testWhile() { // CHECK: ^bb[[#COND]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#BODY]]: + // CHECK: ^bb[[#BODY]]: // [...] // CHECK: llvm.br ^bb[[#CONTINUE:]] - // CHECK: ^bb[[#CONTINUE]]: + // CHECK: ^bb[[#CONTINUE]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCOND0:]], ^bb[[#preCOND1:]] - // CHECK: ^bb[[#preCOND0]]: + // CHECK: ^bb[[#preCOND0]]: // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#preCOND1]]: + // CHECK: ^bb[[#preCOND1]]: // CHECK: llvm.br ^bb[[#preCOND2:]] - // CHECK: ^bb[[#preCOND2]]: + // CHECK: ^bb[[#preCOND2]]: // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#EXIT]]: + // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } cir.func @testDoWhile() { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.const #cir.int<0> : !s32i cir.store %1, %0 : !s32i, !cir.ptr cir.scope { cir.do { @@ -229,7 +229,7 @@ cir.func @testWhile() { cir.store %3, %0 : !s32i, !cir.ptr cir.scope { %4 = cir.load %0 : !cir.ptr, !s32i - %5 = cir.const(#cir.int<5> : !s32i) : !s32i + %5 = cir.const #cir.int<5> : !s32i %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool cir.if %7 { @@ -239,7 +239,7 @@ cir.func @testWhile() { cir.yield } while { %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<10> : !s32i) : !s32i + %3 = cir.const #cir.int<10> : !s32i %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool cir.condition(%5) @@ -255,19 +255,19 @@ cir.func @testWhile() { // CHECK: ^bb[[#COND]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#BODY]]: + // CHECK: ^bb[[#BODY]]: // [...] // CHECK: llvm.br ^bb[[#CONTINUE:]] - // CHECK: ^bb[[#CONTINUE]]: + // CHECK: ^bb[[#CONTINUE]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCOND0:]], ^bb[[#preCOND1:]] - // CHECK: ^bb[[#preCOND0]]: + // CHECK: ^bb[[#preCOND0]]: // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#preCOND1]]: + // CHECK: ^bb[[#preCOND1]]: // CHECK: llvm.br ^bb[[#preCOND2:]] - // CHECK: ^bb[[#preCOND2]]: + // CHECK: ^bb[[#preCOND2]]: // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#EXIT]]: + // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index 2516791d68a7..1616ede8934a 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -7,7 +7,7 @@ module { %0 = cir.alloca !cir.ptr, !cir.ptr>, ["a", init] {alignment = 8 : i64} cir.store %arg0, %0 : !cir.ptr, !cir.ptr> %1 = cir.load %0 : !cir.ptr>, !cir.ptr - %2 = cir.const(#cir.int<1> : !s32i) : !s32i + %2 = cir.const #cir.int<1> : !s32i %3 = cir.ptr_stride(%1 : !cir.ptr, %2 : !s32i), !cir.ptr %4 = cir.load %3 : !cir.ptr, !s32i cir.return diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index 1c99823b0584..9da4910499e4 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -7,7 +7,7 @@ module { cir.func @foo() { cir.scope { %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} - %1 = cir.const(#cir.int<4> : !u32i) : !u32i + %1 = cir.const #cir.int<4> : !u32i cir.store %1, %0 : !u32i, !cir.ptr } cir.return @@ -52,7 +52,7 @@ module { cir.func @scope_with_return() -> !u32i { %0 = cir.alloca !u32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} cir.scope { - %2 = cir.const(#cir.int<0> : !u32i) : !u32i + %2 = cir.const #cir.int<0> : !u32i cir.store %2, %0 : !u32i, !cir.ptr %3 = cir.load %0 : !cir.ptr, !u32i cir.return %3 : !u32i diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index edf279755627..334ca781fd8e 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -24,7 +24,7 @@ module { cir.func @shouldConstInitLocalStructsWithConstStructAttr() { %0 = cir.alloca !ty_22S2A22, !cir.ptr, ["s"] {alignment = 4 : i64} - %1 = cir.const(#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22) : !ty_22S2A22 + %1 = cir.const #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22 cir.store %1, %0 : !ty_22S2A22, !cir.ptr cir.return } diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir index 4737cba64b8b..dee8e98db858 100644 --- a/clang/test/CIR/Lowering/switch.cir +++ b/clang/test/CIR/Lowering/switch.cir @@ -117,7 +117,7 @@ module { ^bb1: // no predecessors cir.break } - ] + ] } cir.return } @@ -148,7 +148,7 @@ module { case (equal, 0) { cir.scope { %6 = cir.load %1 : !cir.ptr, !s32i - %7 = cir.const(#cir.int<0> : !s32i) : !s32i + %7 = cir.const #cir.int<0> : !s32i %8 = cir.cmp(ge, %6, %7) : !s32i, !s32i %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool cir.if %9 { @@ -157,12 +157,12 @@ module { } cir.break } - ] - } - %3 = cir.const(#cir.int<3> : !s32i) : !s32i + ] + } + %3 = cir.const #cir.int<3> : !s32i cir.store %3, %2 : !s32i, !cir.ptr %4 = cir.load %2 : !cir.ptr, !s32i - cir.return %4 : !s32i + cir.return %4 : !s32i } // CHECK: llvm.func @shouldLowerNestedBreak // CHECK: llvm.switch %6 : i32, ^bb7 [ diff --git a/clang/test/CIR/Lowering/ternary.cir b/clang/test/CIR/Lowering/ternary.cir index 6ccd9c4ed323..9fccea8043f8 100644 --- a/clang/test/CIR/Lowering/ternary.cir +++ b/clang/test/CIR/Lowering/ternary.cir @@ -9,13 +9,13 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, !cir.ptr %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.const(#cir.int<0> : !s32i) : !s32i + %3 = cir.const #cir.int<0> : !s32i %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool %5 = cir.ternary(%4, true { - %7 = cir.const(#cir.int<3> : !s32i) : !s32i + %7 = cir.const #cir.int<3> : !s32i cir.yield %7 : !s32i }, false { - %7 = cir.const(#cir.int<5> : !s32i) : !s32i + %7 = cir.const #cir.int<5> : !s32i cir.yield %7 : !s32i }) : (!cir.bool) -> !s32i cir.store %5, %1 : !s32i, !cir.ptr diff --git a/clang/test/CIR/Lowering/types.cir b/clang/test/CIR/Lowering/types.cir index 12bb892bd4c4..5e5be9192e8a 100644 --- a/clang/test/CIR/Lowering/types.cir +++ b/clang/test/CIR/Lowering/types.cir @@ -5,9 +5,9 @@ module { cir.func @testTypeLowering() { // Should lower void pointers as opaque pointers. - %0 = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr + %0 = cir.const #cir.ptr : !cir.ptr // CHECK: llvm.mlir.zero : !llvm.ptr - %1 = cir.const(#cir.ptr : !cir.ptr>) : !cir.ptr> + %1 = cir.const #cir.ptr : !cir.ptr> // CHECK: llvm.mlir.zero : !llvm.ptr cir.return } diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir index 1be10c992c7b..9e8856428c84 100644 --- a/clang/test/CIR/Lowering/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -5,7 +5,7 @@ module { cir.func @foo() { %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} - %2 = cir.const(#cir.int<2> : !s32i) : !s32i + %2 = cir.const #cir.int<2> : !s32i cir.store %2, %0 : !s32i, !cir.ptr cir.store %2, %1 : !s32i, !cir.ptr diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index 773b57181de9..48e2705e756d 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -5,7 +5,7 @@ module { cir.func @foo() -> !s32i { %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} - %2 = cir.const(#cir.int<1> : !s32i) : !s32i + %2 = cir.const #cir.int<1> : !s32i cir.store %2, %1 : !s32i, !cir.ptr %3 = cir.load %1 : !cir.ptr, !s32i %4 = cir.unary(not, %3) : !s32i, !s32i diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index d998f494855e..6734845ad03d 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -6,7 +6,7 @@ module { cir.func @foo() { %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} - %2 = cir.const(#cir.int<2> : !s32i) : !s32i + %2 = cir.const #cir.int<2> : !s32i cir.store %2, %0 : !s32i, !cir.ptr cir.store %2, %1 : !s32i, !cir.ptr diff --git a/clang/test/CIR/Lowering/unions.cir b/clang/test/CIR/Lowering/unions.cir index 92beda0ee4b8..694c3ae17465 100644 --- a/clang/test/CIR/Lowering/unions.cir +++ b/clang/test/CIR/Lowering/unions.cir @@ -22,7 +22,7 @@ module { cir.func @test(%arg0: !cir.ptr) { // Should store directly to the union's base address. - %5 = cir.const(#true) : !cir.bool + %5 = cir.const #true %6 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr cir.store %5, %6 : !cir.bool, !cir.ptr // CHECK: %[[#VAL:]] = llvm.mlir.constant(1 : i8) : i8 diff --git a/clang/test/CIR/Lowering/variadics.cir b/clang/test/CIR/Lowering/variadics.cir index 6e8c8f89233e..b4a5a30c5e82 100644 --- a/clang/test/CIR/Lowering/variadics.cir +++ b/clang/test/CIR/Lowering/variadics.cir @@ -32,7 +32,7 @@ module { // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: llvm.intr.vaend %{{[0-9]+}} : !llvm.ptr - %8 = cir.const(#cir.int<0> : !s32i) : !s32i + %8 = cir.const #cir.int<0> : !s32i cir.store %8, %1 : !s32i, !cir.ptr %9 = cir.load %1 : !cir.ptr, !s32i cir.return %9 : !s32i diff --git a/clang/test/CIR/Transforms/lib-opt-find.cpp b/clang/test/CIR/Transforms/lib-opt-find.cpp index 14c464ff9448..c11daba10f28 100644 --- a/clang/test/CIR/Transforms/lib-opt-find.cpp +++ b/clang/test/CIR/Transforms/lib-opt-find.cpp @@ -19,11 +19,11 @@ int test1(unsigned char n = 3) // CHECK: %[[pattern:.*]] = cir.cast(integral, %[[load_pattern:.*]] : !u8i), !s32i // CHECK-NOT: {{.*}} cir.call @_ZSt4findIPhhET_S1_S1_RKT0_( - // CHECK: %[[array_size:.*]] = cir.const(#cir.int<9> : !u64i) : !u64i + // CHECK: %[[array_size:.*]] = cir.const #cir.int<9> : !u64i // CHECK: %[[result_cast:.*]] = cir.libc.memchr(%[[cast_to_void]], %[[pattern]], %[[array_size]]) // CHECK: %[[memchr_res:.*]] = cir.cast(bitcast, %[[result_cast]] : !cir.ptr), !cir.ptr - // CHECK: %[[nullptr:.*]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr + // CHECK: %[[nullptr:.*]] = cir.const #cir.ptr : !cir.ptr // CHECK: %[[cmp_res:.*]] = cir.cmp(eq, %[[nullptr]], %[[memchr_res]]) : !cir.ptr, !cir.bool // CHECK: cir.ternary(%[[cmp_res]], true { // CHECK: cir.yield %[[last]] : !cir.ptr @@ -56,7 +56,7 @@ unsigned char* test2(unsigned char* first, unsigned char* last, unsigned char v) // CHECK: %[[result_cast:.*]] = cir.libc.memchr(%[[cast_to_void]], %[[pattern]], %[[array_size]]) // CHECK: %[[memchr_res:.*]] = cir.cast(bitcast, %[[result_cast]] : !cir.ptr), !cir.ptr - // CHECK: %[[nullptr:.*]] = cir.const(#cir.ptr : !cir.ptr) : !cir.ptr + // CHECK: %[[nullptr:.*]] = cir.const #cir.ptr : !cir.ptr // CHECK: %[[cmp_res:.*]] = cir.cmp(eq, %[[nullptr]], %[[memchr_res]]) : !cir.ptr, !cir.bool // CHECK: cir.ternary(%[[cmp_res]], true { // CHECK: cir.yield %[[last]] : !cir.ptr diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 9306958f944e..5cb52d15c686 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -12,13 +12,13 @@ module { cir.store %arg1, %1 : !s32i, !cir.ptr cir.scope { %2 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} - %3 = cir.const(#cir.int<1> : !s32i) : !s32i + %3 = cir.const #cir.int<1> : !s32i cir.store %3, %2 : !s32i, !cir.ptr %4 = cir.load %0 : !cir.ptr, !s32i cir.switch (%4 : !s32i) [ case (equal, 0 : !s32i) { %5 = cir.load %2 : !cir.ptr, !s32i - %6 = cir.const(#cir.int<1> : !s32i) : !s32i + %6 = cir.const #cir.int<1> : !s32i %7 = cir.binop(add, %5, %6) : !s32i cir.store %7, %2 : !s32i, !cir.ptr cir.br ^bb1 @@ -29,7 +29,7 @@ module { cir.scope { cir.scope { %5 = cir.load %1 : !cir.ptr, !s32i - %6 = cir.const(#cir.int<3> : !s32i) : !s32i + %6 = cir.const #cir.int<3> : !s32i %7 = cir.cmp(eq, %5, %6) : !s32i, !cir.bool cir.if %7 { cir.br ^bb1 @@ -45,10 +45,10 @@ module { cir.scope { %5 = cir.alloca !s32i, !cir.ptr, ["yolo", init] {alignment = 4 : i64} %6 = cir.load %2 : !cir.ptr, !s32i - %7 = cir.const(#cir.int<1> : !s32i) : !s32i + %7 = cir.const #cir.int<1> : !s32i %8 = cir.binop(add, %6, %7) : !s32i cir.store %8, %2 : !s32i, !cir.ptr - %9 = cir.const(#cir.int<100> : !s32i) : !s32i + %9 = cir.const #cir.int<100> : !s32i cir.store %9, %5 : !s32i, !cir.ptr cir.br ^bb1 ^bb1: // pred: ^bb0 @@ -64,7 +64,7 @@ module { // CHECK: cir.switch (%4 : !s32i) [ // CHECK-NEXT: case (equal, 0) { // CHECK-NEXT: %5 = cir.load %2 : !cir.ptr, !s32i -// CHECK-NEXT: %6 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %7 = cir.binop(add, %5, %6) : !s32i // CHECK-NEXT: cir.store %7, %2 : !s32i, !cir.ptr // CHECK-NEXT: cir.return @@ -73,7 +73,7 @@ module { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %5 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %6 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK-NEXT: %6 = cir.const #cir.int<3> : !s32i // CHECK-NEXT: %7 = cir.cmp(eq, %5, %6) : !s32i, !cir.bool // CHECK-NEXT: cir.if %7 { // CHECK-NEXT: cir.return @@ -87,10 +87,10 @@ module { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %5 = cir.alloca !s32i, !cir.ptr, ["yolo", init] {alignment = 4 : i64} // CHECK-NEXT: %6 = cir.load %2 : !cir.ptr, !s32i -// CHECK-NEXT: %7 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK-NEXT: %7 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %8 = cir.binop(add, %6, %7) : !s32i // CHECK-NEXT: cir.store %8, %2 : !s32i, !cir.ptr -// CHECK-NEXT: %9 = cir.const(#cir.int<100> : !s32i) : !s32i +// CHECK-NEXT: %9 = cir.const #cir.int<100> : !s32i // CHECK-NEXT: cir.store %9, %5 : !s32i, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } From 6090fbd220b94d1cb0d6232dc28d52622ae60e4f Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Mon, 6 May 2024 07:53:43 +0800 Subject: [PATCH 1549/2301] [CIR][CIRGen][LLVMLowering] Add support for checked arithmetic builtins (#560) This patch adds support for checked arithmetic builtins, including: - `__builtin_add_overflow` and `__builtin_{s|u}add{|l|ll}_overflow`; - `__builtin_sub_overflow` and `__builtin_{s|u}sub{|l|ll}_overflow`; - `__builtin_mul_overflow` and `__builtin_{s|u}mul{|l|ll}_overflow`. This patch adds a new operation `cir.checked_arith` to represent these builtins. Unlike other CIR operations, this new operation has two result values. One for the possibly truncated result, and the other for a boolean flag that indicates whether the operation has overflowed. CIRGen and LLVMIR lowering support for the new operation is both included in this PR. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 13 + clang/include/clang/CIR/Dialect/IR/CIROps.td | 58 +++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 197 ++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 144 ++++++- clang/test/CIR/CodeGen/builtins-overflow.cpp | 364 ++++++++++++++++++ clang/test/CIR/Lowering/binop-overflow.cir | 67 ++++ 6 files changed, 836 insertions(+), 7 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtins-overflow.cpp create mode 100644 clang/test/CIR/Lowering/binop-overflow.cir diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 110bedf5e456..7102b2da91d0 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -205,6 +205,19 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createSub(lhs, rhs, false, true); } + struct BinOpOverflowResults { + mlir::Value result; + mlir::Value overflow; + }; + + BinOpOverflowResults createBinOpOverflowOp(mlir::Location loc, + mlir::cir::IntType resultTy, + mlir::cir::BinOpOverflowKind kind, + mlir::Value lhs, mlir::Value rhs) { + auto op = create(loc, resultTy, kind, lhs, rhs); + return {op.getResult(), op.getOverflow()}; + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 6c5a1171b3d5..14642f4db55d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1077,6 +1077,64 @@ def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// BinOpOverflowOp +//===----------------------------------------------------------------------===// + +def BinOpOverflowKind : I32EnumAttr< + "BinOpOverflowKind", + "checked binary arithmetic operation kind", + [BinOpKind_Add, BinOpKind_Sub, BinOpKind_Mul]> { + let cppNamespace = "::mlir::cir"; +} + +def BinOpOverflowOp : CIR_Op<"binop.overflow", [Pure, SameTypeOperands]> { + let summary = "Perform binary integral arithmetic with overflow checking"; + let description = [{ + `cir.binop.overflow` performs binary arithmetic operations with overflow + checking on integral operands. + + The `kind` argument specifies the kind of arithmetic operation to perform. + It can be either `add`, `sub`, or `mul`. The `lhs` and `rhs` arguments + specify the input operands of the arithmetic operation. The types of `lhs` + and `rhs` must be the same. + + `cir.binop.overflow` produces two SSA values. `result` is the result of the + arithmetic operation truncated to its specified type. `overflow` is a + boolean value indicating whether overflow happens during the operation. + + The exact semantic of this operation is as follows: + + - `lhs` and `rhs` are promoted to an imaginary integral type that has + infinite precision. + - The arithmetic operation is performed on the promoted operands. + - The infinite-precision result is truncated to the type of `result`. The + truncated result is assigned to `result`. + - If the truncated result is equal to the un-truncated result, `overflow` + is assigned to false. Otherwise, `overflow` is assigned to true. + }]; + + let arguments = (ins Arg:$kind, + CIR_IntType:$lhs, CIR_IntType:$rhs); + let results = (outs CIR_IntType:$result, CIR_BoolType:$overflow); + + let assemblyFormat = [{ + `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) `,` + `(` type($result) `,` type($overflow) `)` + attr-dict + }]; + + let builders = [ + OpBuilder<(ins "mlir::cir::IntType":$resultTy, + "mlir::cir::BinOpOverflowKind":$kind, + "mlir::Value":$lhs, + "mlir::Value":$rhs), [{ + auto overflowTy = mlir::cir::BoolType::get($_builder.getContext()); + build($_builder, $_state, resultTy, overflowTy, kind, lhs, rhs); + }]> + ]; +} + //===----------------------------------------------------------------------===// // BitsOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 1567b78e7ff4..48575bb2e56b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -91,6 +91,52 @@ static void initializeAlloca(CIRGenFunction &CGF, } } +namespace { +struct WidthAndSignedness { + unsigned Width; + bool Signed; +}; +} // namespace + +static WidthAndSignedness +getIntegerWidthAndSignedness(const clang::ASTContext &context, + const clang::QualType Type) { + assert(Type->isIntegerType() && "Given type is not an integer."); + unsigned Width = Type->isBooleanType() ? 1 + : Type->isBitIntType() ? context.getIntWidth(Type) + : context.getTypeInfo(Type).Width; + bool Signed = Type->isSignedIntegerType(); + return {Width, Signed}; +} + +// Given one or more integer types, this function produces an integer type that +// encompasses them: any value in one of the given types could be expressed in +// the encompassing type. +static struct WidthAndSignedness +EncompassingIntegerType(ArrayRef Types) { + assert(Types.size() > 0 && "Empty list of types."); + + // If any of the given types is signed, we must return a signed type. + bool Signed = false; + for (const auto &Type : Types) { + Signed |= Type.Signed; + } + + // The encompassing type must have a width greater than or equal to the width + // of the specified types. Additionally, if the encompassing type is signed, + // its width must be strictly greater than the width of any unsigned types + // given. + unsigned Width = 0; + for (const auto &Type : Types) { + unsigned MinWidth = Type.Width + (Signed && !Type.Signed); + if (Width < MinWidth) { + Width = MinWidth; + } + } + + return {Width, Signed}; +} + RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue) { @@ -705,6 +751,157 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get( builder.createBitcast(AllocaAddr, builder.getVoidPtrTy())); } + + case Builtin::BI__builtin_add_overflow: + case Builtin::BI__builtin_sub_overflow: + case Builtin::BI__builtin_mul_overflow: { + const clang::Expr *LeftArg = E->getArg(0); + const clang::Expr *RightArg = E->getArg(1); + const clang::Expr *ResultArg = E->getArg(2); + + clang::QualType ResultQTy = + ResultArg->getType()->castAs()->getPointeeType(); + + WidthAndSignedness LeftInfo = + getIntegerWidthAndSignedness(CGM.getASTContext(), LeftArg->getType()); + WidthAndSignedness RightInfo = + getIntegerWidthAndSignedness(CGM.getASTContext(), RightArg->getType()); + WidthAndSignedness ResultInfo = + getIntegerWidthAndSignedness(CGM.getASTContext(), ResultQTy); + + // Note we compute the encompassing type with the consideration to the + // result type, so later in LLVM lowering we don't get redundant integral + // extension casts. + WidthAndSignedness EncompassingInfo = + EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo}); + + auto EncompassingCIRTy = mlir::cir::IntType::get( + builder.getContext(), EncompassingInfo.Width, EncompassingInfo.Signed); + auto ResultCIRTy = + CGM.getTypes().ConvertType(ResultQTy).cast(); + + mlir::Value Left = buildScalarExpr(LeftArg); + mlir::Value Right = buildScalarExpr(RightArg); + Address ResultPtr = buildPointerWithAlignment(ResultArg); + + // Extend each operand to the encompassing type, if necessary. + if (Left.getType() != EncompassingCIRTy) + Left = builder.createCast(mlir::cir::CastKind::integral, Left, + EncompassingCIRTy); + if (Right.getType() != EncompassingCIRTy) + Right = builder.createCast(mlir::cir::CastKind::integral, Right, + EncompassingCIRTy); + + // Perform the operation on the extended values. + mlir::cir::BinOpOverflowKind OpKind; + switch (BuiltinID) { + default: + llvm_unreachable("Unknown overflow builtin id."); + case Builtin::BI__builtin_add_overflow: + OpKind = mlir::cir::BinOpOverflowKind::Add; + break; + case Builtin::BI__builtin_sub_overflow: + OpKind = mlir::cir::BinOpOverflowKind::Sub; + break; + case Builtin::BI__builtin_mul_overflow: + OpKind = mlir::cir::BinOpOverflowKind::Mul; + break; + } + + auto Loc = getLoc(E->getSourceRange()); + auto ArithResult = + builder.createBinOpOverflowOp(Loc, ResultCIRTy, OpKind, Left, Right); + + // Here is a slight difference from the original clang CodeGen: + // - In the original clang CodeGen, the checked arithmetic result is + // first computed as a value of the encompassing type, and then it is + // truncated to the actual result type with a second overflow checking. + // - In CIRGen, the checked arithmetic operation directly produce the + // checked arithmetic result in its expected type. + // + // So we don't need a truncation and a second overflow checking here. + + // Finally, store the result using the pointer. + bool isVolatile = + ResultArg->getType()->getPointeeType().isVolatileQualified(); + builder.createStore(Loc, buildToMemory(ArithResult.result, ResultQTy), + ResultPtr, isVolatile); + + return RValue::get(ArithResult.overflow); + } + + case Builtin::BI__builtin_uadd_overflow: + case Builtin::BI__builtin_uaddl_overflow: + case Builtin::BI__builtin_uaddll_overflow: + case Builtin::BI__builtin_usub_overflow: + case Builtin::BI__builtin_usubl_overflow: + case Builtin::BI__builtin_usubll_overflow: + case Builtin::BI__builtin_umul_overflow: + case Builtin::BI__builtin_umull_overflow: + case Builtin::BI__builtin_umulll_overflow: + case Builtin::BI__builtin_sadd_overflow: + case Builtin::BI__builtin_saddl_overflow: + case Builtin::BI__builtin_saddll_overflow: + case Builtin::BI__builtin_ssub_overflow: + case Builtin::BI__builtin_ssubl_overflow: + case Builtin::BI__builtin_ssubll_overflow: + case Builtin::BI__builtin_smul_overflow: + case Builtin::BI__builtin_smull_overflow: + case Builtin::BI__builtin_smulll_overflow: { + // Scalarize our inputs. + mlir::Value X = buildScalarExpr(E->getArg(0)); + mlir::Value Y = buildScalarExpr(E->getArg(1)); + + const clang::Expr *ResultArg = E->getArg(2); + Address ResultPtr = buildPointerWithAlignment(ResultArg); + + // Decide which of the arithmetic operation we are lowering to: + mlir::cir::BinOpOverflowKind ArithKind; + switch (BuiltinID) { + default: + llvm_unreachable("Unknown overflow builtin id."); + case Builtin::BI__builtin_uadd_overflow: + case Builtin::BI__builtin_uaddl_overflow: + case Builtin::BI__builtin_uaddll_overflow: + case Builtin::BI__builtin_sadd_overflow: + case Builtin::BI__builtin_saddl_overflow: + case Builtin::BI__builtin_saddll_overflow: + ArithKind = mlir::cir::BinOpOverflowKind::Add; + break; + case Builtin::BI__builtin_usub_overflow: + case Builtin::BI__builtin_usubl_overflow: + case Builtin::BI__builtin_usubll_overflow: + case Builtin::BI__builtin_ssub_overflow: + case Builtin::BI__builtin_ssubl_overflow: + case Builtin::BI__builtin_ssubll_overflow: + ArithKind = mlir::cir::BinOpOverflowKind::Sub; + break; + case Builtin::BI__builtin_umul_overflow: + case Builtin::BI__builtin_umull_overflow: + case Builtin::BI__builtin_umulll_overflow: + case Builtin::BI__builtin_smul_overflow: + case Builtin::BI__builtin_smull_overflow: + case Builtin::BI__builtin_smulll_overflow: + ArithKind = mlir::cir::BinOpOverflowKind::Mul; + break; + } + + clang::QualType ResultQTy = + ResultArg->getType()->castAs()->getPointeeType(); + auto ResultCIRTy = + CGM.getTypes().ConvertType(ResultQTy).cast(); + + auto Loc = getLoc(E->getSourceRange()); + auto ArithResult = + builder.createBinOpOverflowOp(Loc, ResultCIRTy, ArithKind, X, Y); + + bool isVolatile = + ResultArg->getType()->getPointeeType().isVolatileQualified(); + builder.createStore(Loc, buildToMemory(ArithResult.result, ResultQTy), + ResultPtr, isVolatile); + + return RValue::get(ArithResult.overflow); + } } // If this is an alias for a lib function (e.g. __builtin_sin), emit diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3a1661dd2898..1e4e59d5d3eb 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1951,6 +1951,135 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { } }; +class CIRBinOpOverflowOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BinOpOverflowOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto loc = op.getLoc(); + auto arithKind = op.getKind(); + auto operandTy = op.getLhs().getType(); + auto resultTy = op.getResult().getType(); + + auto encompassedTyInfo = computeEncompassedTypeWidth(operandTy, resultTy); + auto encompassedLLVMTy = rewriter.getIntegerType(encompassedTyInfo.width); + + auto lhs = adaptor.getLhs(); + auto rhs = adaptor.getRhs(); + if (operandTy.getWidth() < encompassedTyInfo.width) { + if (operandTy.isSigned()) { + lhs = rewriter.create(loc, encompassedLLVMTy, lhs); + rhs = rewriter.create(loc, encompassedLLVMTy, rhs); + } else { + lhs = rewriter.create(loc, encompassedLLVMTy, lhs); + rhs = rewriter.create(loc, encompassedLLVMTy, rhs); + } + } + + auto intrinName = getLLVMIntrinName(arithKind, encompassedTyInfo.sign, + encompassedTyInfo.width); + auto intrinNameAttr = mlir::StringAttr::get(op.getContext(), intrinName); + + auto overflowLLVMTy = rewriter.getI1Type(); + auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral( + rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy}); + + auto callLLVMIntrinOp = rewriter.create( + loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs}); + auto intrinRet = callLLVMIntrinOp.getResult(0); + + auto result = rewriter + .create(loc, intrinRet, + ArrayRef{0}) + .getResult(); + auto overflow = rewriter + .create( + loc, intrinRet, ArrayRef{1}) + .getResult(); + + if (resultTy.getWidth() < encompassedTyInfo.width) { + auto resultLLVMTy = getTypeConverter()->convertType(resultTy); + auto truncResult = + rewriter.create(loc, resultLLVMTy, result); + + // Extend the truncated result back to the encompassing type to check for + // any overflows during the truncation. + mlir::Value truncResultExt; + if (resultTy.isSigned()) + truncResultExt = rewriter.create( + loc, encompassedLLVMTy, truncResult); + else + truncResultExt = rewriter.create( + loc, encompassedLLVMTy, truncResult); + auto truncOverflow = rewriter.create( + loc, mlir::LLVM::ICmpPredicate::ne, truncResultExt, result); + + result = truncResult; + overflow = + rewriter.create(loc, overflow, truncOverflow); + } + + auto boolLLVMTy = + getTypeConverter()->convertType(op.getOverflow().getType()); + if (boolLLVMTy != rewriter.getI1Type()) + overflow = rewriter.create(loc, boolLLVMTy, overflow); + + rewriter.replaceOp(op, mlir::ValueRange{result, overflow}); + + return mlir::success(); + } + +private: + static std::string getLLVMIntrinName(mlir::cir::BinOpOverflowKind opKind, + bool isSigned, unsigned width) { + // The intrinsic name is `@llvm.{s|u}{opKind}.with.overflow.i{width}` + + std::string name = "llvm."; + + if (isSigned) + name.push_back('s'); + else + name.push_back('u'); + + switch (opKind) { + case mlir::cir::BinOpOverflowKind::Add: + name.append("add."); + break; + case mlir::cir::BinOpOverflowKind::Sub: + name.append("sub."); + break; + case mlir::cir::BinOpOverflowKind::Mul: + name.append("mul."); + break; + default: + llvm_unreachable("unknown checked arith kind"); + } + + name.append("with.overflow.i"); + name.append(std::to_string(width)); + + return name; + } + + struct EncompassedTypeInfo { + bool sign; + unsigned width; + }; + + static EncompassedTypeInfo + computeEncompassedTypeWidth(mlir::cir::IntType operandTy, + mlir::cir::IntType resultTy) { + auto sign = operandTy.getIsSigned() || resultTy.getIsSigned(); + auto width = + std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()), + resultTy.getWidth() + (sign && resultTy.isUnsigned())); + return {sign, width}; + } +}; + class CIRShiftOpLowering : public mlir::OpConversionPattern { public: @@ -3004,13 +3133,14 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRBitPopcountOpLowering, CIRAtomicCmpXchgLowering, CIRAtomicXchgLowering, CIRAtomicFetchLowering, CIRByteswapOpLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, - CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, - CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, - CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, - CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, - CIRBrOpLowering, CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, - CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, - CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, + CIRBinOpLowering, CIRBinOpOverflowOpLowering, CIRShiftOpLowering, + CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, + CIRFuncLowering, CIRCastOpLowering, CIRGlobalOpLowering, + CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, + CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, + CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, + CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, + CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, diff --git a/clang/test/CIR/CodeGen/builtins-overflow.cpp b/clang/test/CIR/CodeGen/builtins-overflow.cpp new file mode 100644 index 000000000000..d4652527cb56 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtins-overflow.cpp @@ -0,0 +1,364 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +bool test_add_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) { + return __builtin_add_overflow(x, y, res); +} + +// CHECK: cir.func @_Z32test_add_overflow_uint_uint_uintjjPj +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_add_overflow_int_int_int(int x, int y, int *res) { + return __builtin_add_overflow(x, y, res); +} + +// CHECK: cir.func @_Z29test_add_overflow_int_int_intiiPi +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_add_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) { + return __builtin_add_overflow(x, y, res); +} + +// CHECK: cir.func @_Z38test_add_overflow_xint31_xint31_xint31DB31_S_PS_ +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> +// CHECK: } + +bool test_sub_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) { + return __builtin_sub_overflow(x, y, res); +} + +// CHECK: cir.func @_Z32test_sub_overflow_uint_uint_uintjjPj +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_sub_overflow_int_int_int(int x, int y, int *res) { + return __builtin_sub_overflow(x, y, res); +} + +// CHECK: cir.func @_Z29test_sub_overflow_int_int_intiiPi +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_sub_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) { + return __builtin_sub_overflow(x, y, res); +} + +// CHECK: cir.func @_Z38test_sub_overflow_xint31_xint31_xint31DB31_S_PS_ +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> +// CHECK: } + +bool test_mul_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) { + return __builtin_mul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z32test_mul_overflow_uint_uint_uintjjPj +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_mul_overflow_int_int_int(int x, int y, int *res) { + return __builtin_mul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z29test_mul_overflow_int_int_intiiPi +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_mul_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) { + return __builtin_mul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z38test_mul_overflow_xint31_xint31_xint31DB31_S_PS_ +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> +// CHECK: } + +bool test_mul_overflow_ulong_ulong_long(unsigned long x, unsigned long y, unsigned long *res) { + return __builtin_mul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z34test_mul_overflow_ulong_ulong_longmmPm +// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_add_overflow_uint_int_int(unsigned x, int y, int *res) { + return __builtin_add_overflow(x, y, res); +} + +// CHECK: cir.func @_Z30test_add_overflow_uint_int_intjiPi +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[#PROM_X:]] = cir.cast(integral, %[[#X]] : !u32i), !cir.int +// CHECK-NEXT: %[[#PROM_Y:]] = cir.cast(integral, %[[#Y]] : !s32i), !cir.int +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#PROM_X]], %[[#PROM_Y]]) : , (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_add_overflow_volatile(int x, int y, volatile int *res) { + return __builtin_add_overflow(x, y, res); +} + +// CHECK: cir.func @_Z26test_add_overflow_volatileiiPVi +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store volatile %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_uadd_overflow(unsigned x, unsigned y, unsigned *res) { + return __builtin_uadd_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_uadd_overflowjjPj +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_uaddl_overflow(unsigned long x, unsigned long y, unsigned long *res) { + return __builtin_uaddl_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_uaddl_overflowmmPm +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_uaddll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) { + return __builtin_uaddll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_uaddll_overflowyyPy +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_usub_overflow(unsigned x, unsigned y, unsigned *res) { + return __builtin_usub_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_usub_overflowjjPj +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_usubl_overflow(unsigned long x, unsigned long y, unsigned long *res) { + return __builtin_usubl_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_usubl_overflowmmPm +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_usubll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) { + return __builtin_usubll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_usubll_overflowyyPy +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_umul_overflow(unsigned x, unsigned y, unsigned *res) { + return __builtin_umul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_umul_overflowjjPj +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CHECK: } + +bool test_umull_overflow(unsigned long x, unsigned long y, unsigned long *res) { + return __builtin_umull_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_umull_overflowmmPm +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_umulll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) { + return __builtin_umulll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_umulll_overflowyyPy +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CHECK: } + +bool test_sadd_overflow(int x, int y, int *res) { + return __builtin_sadd_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_sadd_overflowiiPi +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_saddl_overflow(long x, long y, long *res) { + return __builtin_saddl_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_saddl_overflowllPl +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } + +bool test_saddll_overflow(long long x, long long y, long long *res) { + return __builtin_saddll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_saddll_overflowxxPx +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } + +bool test_ssub_overflow(int x, int y, int *res) { + return __builtin_ssub_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_ssub_overflowiiPi +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_ssubl_overflow(long x, long y, long *res) { + return __builtin_ssubl_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_ssubl_overflowllPl +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } + +bool test_ssubll_overflow(long long x, long long y, long long *res) { + return __builtin_ssubll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_ssubll_overflowxxPx +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } + +bool test_smul_overflow(int x, int y, int *res) { + return __builtin_smul_overflow(x, y, res); +} + +// CHECK: cir.func @_Z18test_smul_overflowiiPi +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CHECK: } + +bool test_smull_overflow(long x, long y, long *res) { + return __builtin_smull_overflow(x, y, res); +} + +// CHECK: cir.func @_Z19test_smull_overflowllPl +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } + +bool test_smulll_overflow(long long x, long long y, long long *res) { + return __builtin_smulll_overflow(x, y, res); +} + +// CHECK: cir.func @_Z20test_smulll_overflowxxPx +// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CHECK: } diff --git a/clang/test/CIR/Lowering/binop-overflow.cir b/clang/test/CIR/Lowering/binop-overflow.cir new file mode 100644 index 000000000000..c73e708e5320 --- /dev/null +++ b/clang/test/CIR/Lowering/binop-overflow.cir @@ -0,0 +1,67 @@ +// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM + +!u32i = !cir.int +!s32i = !cir.int + +module { + cir.func @test_add_u32_u32_u32(%lhs: !u32i, %rhs: !u32i, %res: !cir.ptr) -> !cir.bool { + %result, %overflow = cir.binop.overflow(add, %lhs, %rhs) : !u32i, (!u32i, !cir.bool) + cir.store %result, %res : !u32i, !cir.ptr + cir.return %overflow : !cir.bool + } + + // MLIR: llvm.func @test_add_u32_u32_u32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i8 + // MLIR-NEXT: %[[#INTRIN_RET:]] = llvm.call_intrinsic "llvm.uadd.with.overflow.i32"(%[[LHS]], %[[RHS]]) : (i32, i32) -> !llvm.struct<(i32, i1)> + // MLIR-NEXT: %[[#RES:]] = llvm.extractvalue %[[#INTRIN_RET]][0] : !llvm.struct<(i32, i1)> + // MLIR-NEXT: %[[#OVFL:]] = llvm.extractvalue %[[#INTRIN_RET]][1] : !llvm.struct<(i32, i1)> + // MLIR-NEXT: %[[#OVFL_EXT:]] = llvm.zext %[[#OVFL]] : i1 to i8 + // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] : i32, !llvm.ptr + // MLIR-NEXT: llvm.return %[[#OVFL_EXT]] : i8 + // MLIR-NEXT: } + + // LLVM: define i8 @test_add_u32_u32_u32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]]) + // LLVM-NEXT: %[[#INTRIN_RET:]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %[[#LHS]], i32 %[[#RHS]]) + // LLVM-NEXT: %[[#RES:]] = extractvalue { i32, i1 } %[[#INTRIN_RET]], 0 + // LLVM-NEXT: %[[#OVFL:]] = extractvalue { i32, i1 } %[[#INTRIN_RET]], 1 + // LLVM-NEXT: %[[#OVFL_EXT:]] = zext i1 %[[#OVFL]] to i8 + // LLVM-NEXT: store i32 %[[#RES]], ptr %[[#RES_PTR]], align 4 + // LLVM-NEXT: ret i8 %[[#OVFL_EXT]] + // LLVM-NEXT: } + + cir.func @test_add_u32_u32_i32(%lhs: !u32i, %rhs: !u32i, %res: !cir.ptr) -> !cir.bool { + %result, %overflow = cir.binop.overflow(add, %lhs, %rhs) : !u32i, (!s32i, !cir.bool) + cir.store %result, %res : !s32i, !cir.ptr + cir.return %overflow : !cir.bool + } + + // MLIR: llvm.func @test_add_u32_u32_i32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i8 + // MLIR-NEXT: %[[#LHS_EXT:]] = llvm.zext %[[LHS]] : i32 to i33 + // MLIR-NEXT: %[[#RHS_EXT:]] = llvm.zext %[[RHS]] : i32 to i33 + // MLIR-NEXT: %[[#INTRIN_RET:]] = llvm.call_intrinsic "llvm.sadd.with.overflow.i33"(%[[#LHS_EXT]], %[[#RHS_EXT]]) : (i33, i33) -> !llvm.struct<(i33, i1)> + // MLIR-NEXT: %[[#RES_EXT:]] = llvm.extractvalue %[[#INTRIN_RET]][0] : !llvm.struct<(i33, i1)> + // MLIR-NEXT: %[[#ARITH_OVFL:]] = llvm.extractvalue %[[#INTRIN_RET]][1] : !llvm.struct<(i33, i1)> + // MLIR-NEXT: %[[#RES:]] = llvm.trunc %[[#RES_EXT]] : i33 to i32 + // MLIR-NEXT: %[[#RES_EXT_2:]] = llvm.sext %[[#RES]] : i32 to i33 + // MLIR-NEXT: %[[#TRUNC_OVFL:]] = llvm.icmp "ne" %[[#RES_EXT_2]], %[[#RES_EXT]] : i33 + // MLIR-NEXT: %[[#OVFL:]] = llvm.or %[[#ARITH_OVFL]], %[[#TRUNC_OVFL]] : i1 + // MLIR-NEXT: %[[#OVFL_EXT:]] = llvm.zext %[[#OVFL]] : i1 to i8 + // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] : i32, !llvm.ptr + // MLIR-NEXT: llvm.return %[[#OVFL_EXT]] : i8 + // MLIR-NEXT: } + + // LLVM: define i8 @test_add_u32_u32_i32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]]) + // LLVM-NEXT: %[[#LHS_EXT:]] = zext i32 %[[#LHS]] to i33 + // LLVM-NEXT: %[[#RHS_EXT:]] = zext i32 %[[#RHS]] to i33 + // LLVM-NEXT: %[[#INTRIN_RET:]] = call { i33, i1 } @llvm.sadd.with.overflow.i33(i33 %[[#LHS_EXT]], i33 %[[#RHS_EXT]]) + // LLVM-NEXT: %[[#RES_EXT:]] = extractvalue { i33, i1 } %[[#INTRIN_RET]], 0 + // LLVM-NEXT: %[[#ARITH_OVFL:]] = extractvalue { i33, i1 } %[[#INTRIN_RET]], 1 + // LLVM-NEXT: %[[#RES:]] = trunc i33 %[[#RES_EXT]] to i32 + // LLVM-NEXT: %[[#RES_EXT_2:]] = sext i32 %[[#RES]] to i33 + // LLVM-NEXT: %[[#TRUNC_OVFL:]] = icmp ne i33 %[[#RES_EXT_2]], %[[#RES_EXT]] + // LLVM-NEXT: %[[#OVFL:]] = or i1 %[[#ARITH_OVFL]], %[[#TRUNC_OVFL]] + // LLVM-NEXT: %[[#OVFL_EXT:]] = zext i1 %[[#OVFL]] to i8 + // LLVM-NEXT: store i32 %[[#RES]], ptr %[[#RES_PTR]], align 4 + // LLVM-NEXT: ret i8 %[[#OVFL_EXT]] + // LLVM-NEXT: } +} From b6aec722737215fdc3bf31c35c3fa3fbcbd6beac Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 2 Nov 2024 19:49:40 -0700 Subject: [PATCH 1550/2301] Disable Wdeprecated-declarations --- clang/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt index b79e570667b2..b5a8d465e2d4 100644 --- a/clang/CMakeLists.txt +++ b/clang/CMakeLists.txt @@ -25,6 +25,8 @@ list(INSERT CMAKE_MODULE_PATH 0 include(GNUInstallDirs) include(GetDarwinLinkerVersion) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations") + if(CLANG_BUILT_STANDALONE) set(CMAKE_CXX_STANDARD 17 CACHE STRING "C++ standard to conform to") set(CMAKE_CXX_STANDARD_REQUIRED YES) From 8381f1e8d552262832ef477d2e79bcd427ca5f04 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Mon, 6 May 2024 07:55:59 +0800 Subject: [PATCH 1551/2301] [CIR][NFC] remove redundant test in CIR/IR/data-member-ptr.cir (#582) As suggested in #401, this PR removes the `get_global_member` test in `CIR/IR/data-member-ptr.cir` as it is redundant. The original comment: https://github.com/llvm/clangir/pull/401#discussion_r1589952990 --- clang/test/CIR/IR/data-member-ptr.cir | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/clang/test/CIR/IR/data-member-ptr.cir b/clang/test/CIR/IR/data-member-ptr.cir index d8332514c07b..a05193c21108 100644 --- a/clang/test/CIR/IR/data-member-ptr.cir +++ b/clang/test/CIR/IR/data-member-ptr.cir @@ -3,8 +3,6 @@ !s32i = !cir.int !ty_22Foo22 = !cir.struct -#global_ptr = #cir.data_member<0> : !cir.data_member - module { cir.func @null_member() { %0 = cir.const #cir.data_member : !cir.data_member @@ -16,12 +14,6 @@ module { %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr cir.return } - - cir.func @get_global_member(%arg0: !cir.ptr) { - %0 = cir.const #global_ptr - %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr - cir.return - } } // CHECK: module { @@ -37,10 +29,4 @@ module { // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK-NEXT: cir.func @get_global_member(%arg0: !cir.ptr) { -// CHECK-NEXT: %0 = cir.const #cir.data_member<0> : !cir.data_member -// CHECK-NEXT: %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr -// CHECK-NEXT: cir.return -// CHECK-NEXT: } - // CHECK: } From 2fb9b491c6560e268fec38219489eeb4e10b2cc4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 6 May 2024 11:11:11 -0700 Subject: [PATCH 1552/2301] [CIR][CIRGen][NFC] Builtins: update/modernize skeleton against upstream --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 99 ++++++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 11 +-- 2 files changed, 102 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 48575bb2e56b..72a8a8d54a02 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -137,6 +137,10 @@ EncompassingIntegerType(ArrayRef Types) { return {Width, Signed}; } +RValue CIRGenFunction::buildRotate(const CallExpr *E, bool IsRotateRight) { + llvm_unreachable("NYI"); +} + RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue) { @@ -170,6 +174,24 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const unsigned BuiltinIDIfNoAsmLabel = FD->hasAttr() ? 0 : BuiltinID; + std::optional ErrnoOverriden; + // ErrnoOverriden is true if math-errno is overriden via the + // '#pragma float_control(precise, on)'. This pragma disables fast-math, + // which implies math-errno. + if (E->hasStoredFPFeatures()) { + llvm_unreachable("NYI"); + } + // True if 'atttibute__((optnone)) is used. This attibute overrides + // fast-math which implies math-errno. + bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr(); + + // True if we are compiling at -O2 and errno has been disabled + // using the '#pragma float_control(precise, off)', and + // attribute opt-none hasn't been seen. + [[maybe_unused]] bool ErrnoOverridenToFalseWithOpt = + ErrnoOverriden.has_value() && !ErrnoOverriden.value() && !OptNone && + CGM.getCodeGenOpts().OptimizationLevel != 0; + // There are LLVM math intrinsics/instructions corresponding to math library // functions except the LLVM op will never set errno while the math library // might. Also, math builtins have the same semantics as their math library @@ -177,13 +199,70 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // LLVM counterparts if the call is marked 'const' (known to never set errno). // In case FP exceptions are enabled, the experimental versions of the // intrinsics model those. + [[maybe_unused]] bool ConstAlways = + getContext().BuiltinInfo.isConst(BuiltinID); + + // There's a special case with the fma builtins where they are always const + // if the target environment is GNU or the target is OS is Windows and we're + // targeting the MSVCRT.dll environment. + // FIXME: This list can be become outdated. Need to find a way to get it some + // other way. + switch (BuiltinID) { + case Builtin::BI__builtin_fma: + case Builtin::BI__builtin_fmaf: + case Builtin::BI__builtin_fmal: + case Builtin::BIfma: + case Builtin::BIfmaf: + case Builtin::BIfmal: { + auto &Trip = CGM.getTriple(); + if (Trip.isGNUEnvironment() || Trip.isOSMSVCRT()) + ConstAlways = true; + break; + } + default: + break; + } + bool ConstWithoutErrnoAndExceptions = getContext().BuiltinInfo.isConstWithoutErrnoAndExceptions(BuiltinID); bool ConstWithoutExceptions = getContext().BuiltinInfo.isConstWithoutExceptions(BuiltinID); - if (FD->hasAttr() || - ((ConstWithoutErrnoAndExceptions || ConstWithoutExceptions) && - (!ConstWithoutErrnoAndExceptions || (!getLangOpts().MathErrno)))) { + + // ConstAttr is enabled in fast-math mode. In fast-math mode, math-errno is + // disabled. + // Math intrinsics are generated only when math-errno is disabled. Any pragmas + // or attributes that affect math-errno should prevent or allow math + // intrincs to be generated. Intrinsics are generated: + // 1- In fast math mode, unless math-errno is overriden + // via '#pragma float_control(precise, on)', or via an + // 'attribute__((optnone))'. + // 2- If math-errno was enabled on command line but overriden + // to false via '#pragma float_control(precise, off))' and + // 'attribute__((optnone))' hasn't been used. + // 3- If we are compiling with optimization and errno has been disabled + // via '#pragma float_control(precise, off)', and + // 'attribute__((optnone))' hasn't been used. + + bool ConstWithoutErrnoOrExceptions = + ConstWithoutErrnoAndExceptions || ConstWithoutExceptions; + bool GenerateIntrinsics = + (ConstAlways && !OptNone) || + (!getLangOpts().MathErrno && + !(ErrnoOverriden.has_value() && ErrnoOverriden.value()) && !OptNone); + if (!GenerateIntrinsics) { + GenerateIntrinsics = + ConstWithoutErrnoOrExceptions && !ConstWithoutErrnoAndExceptions; + if (!GenerateIntrinsics) + GenerateIntrinsics = + ConstWithoutErrnoOrExceptions && + (!getLangOpts().MathErrno && + !(ErrnoOverriden.has_value() && ErrnoOverriden.value()) && !OptNone); + if (!GenerateIntrinsics) + GenerateIntrinsics = + ConstWithoutErrnoOrExceptions && ErrnoOverridenToFalseWithOpt; + } + + if (GenerateIntrinsics) { switch (BuiltinIDIfNoAsmLabel) { case Builtin::BIceil: case Builtin::BIceilf: @@ -441,6 +520,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, switch (BuiltinIDIfNoAsmLabel) { default: break; + case Builtin::BI__builtin___CFStringMakeConstantString: + case Builtin::BI__builtin___NSStringMakeConstantString: + llvm_unreachable("NYI"); case Builtin::BIprintf: if (getTarget().getTriple().isNVPTX() || @@ -673,6 +755,17 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, getLoc(E->getSourceRange()), arg)); } + case Builtin::BI__builtin_rotateleft8: + case Builtin::BI__builtin_rotateleft16: + case Builtin::BI__builtin_rotateleft32: + case Builtin::BI__builtin_rotateleft64: + case Builtin::BI_rotl8: // Microsoft variants of rotate left + case Builtin::BI_rotl16: + case Builtin::BI_rotl: + case Builtin::BI_lrotl: + case Builtin::BI_rotl64: + return buildRotate(E, false); + case Builtin::BI__builtin_constant_p: { mlir::Type ResultType = ConvertType(E->getType()); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index f1bd21390540..902b51b0568b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -655,11 +655,6 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre); - // Target specific builtin emission - mlir::Value buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, - llvm::Triple::ArchType Arch); - mlir::Value buildX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); - // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. struct PrototypeWrapper { @@ -1219,10 +1214,16 @@ class CIRGenFunction : public CIRGenTypeCache { RValue buildBuiltinExpr(const clang::GlobalDecl GD, unsigned BuiltinID, const clang::CallExpr *E, ReturnValueSlot ReturnValue); + RValue buildRotate(const CallExpr *E, bool IsRotateRight); mlir::Value buildTargetBuiltinExpr(unsigned BuiltinID, const clang::CallExpr *E, ReturnValueSlot ReturnValue); + // Target specific builtin emission + mlir::Value buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, + llvm::Triple::ArchType Arch); + mlir::Value buildX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); + /// Given an expression with a pointer type, emit the value and compute our /// best estimate of the alignment of the pointee. /// From 016e4a2efe27a77bfc41fc487e1b9ce087e275bd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 6 May 2024 12:19:18 -0700 Subject: [PATCH 1553/2301] [CIR][CIRGen] Builtins: add rotate left/right --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 34 +++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 24 ++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 35 ++++++-- clang/test/CIR/CodeGen/builtin-rotate.c | 89 +++++++++++++++++++ 4 files changed, 174 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-rotate.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 14642f4db55d..f637ff085aa2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1330,6 +1330,40 @@ def ByteswapOp : CIR_Op<"bswap", [Pure, SameOperandsAndResultType]> { }]; } +//===----------------------------------------------------------------------===// +// RotateOp +//===----------------------------------------------------------------------===// + +def RotateOp : CIR_Op<"rotate", [Pure, SameOperandsAndResultType]> { + let summary = "Reverse the bytes that constitute the operand integer"; + let description = [{ + The `cir.rotate` rotates operand in `src` by the given bit amount `amt`. + Its widths must be either 8, 16, 32, or 64 and both `src`, `amt` and + `result` be of the same type. The rotate direction is specified by a + `left`/`right` keyword. + + This operation covers different C/C++ + builtins, some examples: `__builtin_rotateleft8`, `__builtin_rotateleft16`, + `__builtin_rotateleft32`, `__builtin_rotateleft64`, `_rotl8`, `_rotl16`, + `_rotl`, `_lrotl`, `_rotl64`, etc and their "right" variants. + + Example: + + ```mlir + %r = cir.rotate left %0, %1 -> !u32i + ``` + }]; + + let results = (outs CIR_IntType:$result); + let arguments = (ins PrimitiveInt:$src, PrimitiveInt:$amt, + UnitAttr:$left); + + let assemblyFormat = [{ + (`left` $left^) : (`right`)? + $src `,` $amt `->` type($result) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // CmpThreeWayOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 72a8a8d54a02..cac650a4b6c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -138,7 +138,18 @@ EncompassingIntegerType(ArrayRef Types) { } RValue CIRGenFunction::buildRotate(const CallExpr *E, bool IsRotateRight) { - llvm_unreachable("NYI"); + auto src = buildScalarExpr(E->getArg(0)); + auto shiftAmt = buildScalarExpr(E->getArg(1)); + + // The builtin's shift arg may have a different type than the source arg and + // result, but the CIR ops uses the same type for all values. + auto ty = src.getType(); + shiftAmt = builder.createIntCast(shiftAmt, ty); + auto r = builder.create(getLoc(E->getSourceRange()), src, + shiftAmt); + if (!IsRotateRight) + r->setAttr("left", mlir::UnitAttr::get(src.getContext())); + return RValue::get(r); } RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, @@ -766,6 +777,17 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI_rotl64: return buildRotate(E, false); + case Builtin::BI__builtin_rotateright8: + case Builtin::BI__builtin_rotateright16: + case Builtin::BI__builtin_rotateright32: + case Builtin::BI__builtin_rotateright64: + case Builtin::BI_rotr8: // Microsoft variants of rotate right + case Builtin::BI_rotr16: + case Builtin::BI_rotr: + case Builtin::BI_lrotr: + case Builtin::BI_rotr64: + return buildRotate(E, true); + case Builtin::BI__builtin_constant_p: { mlir::Type ResultType = ConvertType(E->getType()); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 1e4e59d5d3eb..5276c512a6f9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2609,6 +2609,27 @@ class CIRByteswapOpLowering } }; +class CIRRotateOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::RotateOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Note that LLVM intrinsic calls to @llvm.fsh{r,l}.i* have the same type as + // the operand. + auto src = adaptor.getSrc(); + if (op.getLeft()) + rewriter.replaceOpWithNewOp(op, src, src, + adaptor.getAmt()); + else + rewriter.replaceOpWithNewOp(op, src, src, + adaptor.getAmt()); + return mlir::LogicalResult::success(); + } +}; + class CIRBrOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -3131,13 +3152,13 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, CIRBitPopcountOpLowering, CIRAtomicCmpXchgLowering, CIRAtomicXchgLowering, - CIRAtomicFetchLowering, CIRByteswapOpLowering, CIRBrCondOpLowering, - CIRPtrStrideOpLowering, CIRCallLowering, CIRUnaryOpLowering, - CIRBinOpLowering, CIRBinOpOverflowOpLowering, CIRShiftOpLowering, - CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, - CIRFuncLowering, CIRCastOpLowering, CIRGlobalOpLowering, - CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, - CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, + CIRAtomicFetchLowering, CIRByteswapOpLowering, CIRRotateOpLowering, + CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, + CIRUnaryOpLowering, CIRBinOpLowering, CIRBinOpOverflowOpLowering, + CIRShiftOpLowering, CIRLoadLowering, CIRConstantLowering, + CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, CIRCastOpLowering, + CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, + CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, diff --git a/clang/test/CIR/CodeGen/builtin-rotate.c b/clang/test/CIR/CodeGen/builtin-rotate.c new file mode 100644 index 000000000000..bc0c93690658 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-rotate.c @@ -0,0 +1,89 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +void f() { +// CIR-LABEL: @f +// LLVM-LABEL: @f + unsigned int v[4]; + unsigned int h = __builtin_rotateleft32(v[0], 1); +// CIR: %[[CONST:.*]] = cir.const #cir.int<1> : !s32i +// CIR: %[[CAST:.*]] = cir.cast(integral, %[[CONST]] : !s32i), !u32i +// CIR: cir.rotate left {{.*}}, %[[CAST]] -> !u32i + +// LLVM: %[[SRC:.*]] = load i32, ptr +// LLVM: call i32 @llvm.fshl.i32(i32 %[[SRC]], i32 %[[SRC]], i32 1) +} + +unsigned char rotl8(unsigned char x, unsigned char y) { +// CIR-LABEL: rotl8 +// CIR: cir.rotate left {{.*}}, {{.*}} -> !u8i + +// LLVM-LABEL: rotl8 +// LLVM: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]]) + return __builtin_rotateleft8(x, y); +} + +short rotl16(short x, short y) { +// CIR-LABEL: rotl16 +// CIR: cir.rotate left {{.*}}, {{.*}} -> !u16i + +// LLVM-LABEL: rotl16 +// LLVM: [[F:%.*]] = call i16 @llvm.fshl.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]]) + return __builtin_rotateleft16(x, y); +} + +int rotl32(int x, unsigned int y) { +// CIR-LABEL: rotl32 +// CIR: cir.rotate left {{.*}}, {{.*}} -> !u32i + +// LLVM-LABEL: rotl32 +// LLVM: [[F:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]]) + return __builtin_rotateleft32(x, y); +} + +unsigned long long rotl64(unsigned long long x, long long y) { +// CIR-LABEL: rotl64 +// CIR: cir.rotate left {{.*}}, {{.*}} -> !u64i + +// LLVM-LABEL: rotl64 +// LLVM: [[F:%.*]] = call i64 @llvm.fshl.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]]) + return __builtin_rotateleft64(x, y); +} + +char rotr8(char x, char y) { +// CIR-LABEL: rotr8 +// CIR: cir.rotate right {{.*}}, {{.*}} -> !u8i + +// LLVM-LABEL: rotr8 +// LLVM: [[F:%.*]] = call i8 @llvm.fshr.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]]) + return __builtin_rotateright8(x, y); +} + +unsigned short rotr16(unsigned short x, unsigned short y) { +// CIR-LABEL: rotr16 +// CIR: cir.rotate right {{.*}}, {{.*}} -> !u16i + +// LLVM-LABEL: rotr16 +// LLVM: [[F:%.*]] = call i16 @llvm.fshr.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]]) + return __builtin_rotateright16(x, y); +} + +unsigned int rotr32(unsigned int x, int y) { +// CIR-LABEL: rotr32 +// CIR: cir.rotate right {{.*}}, {{.*}} -> !u32i + +// LLVM-LABEL: rotr32 +// LLVM: [[F:%.*]] = call i32 @llvm.fshr.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]]) + return __builtin_rotateright32(x, y); +} + +long long rotr64(long long x, unsigned long long y) { +// CIR-LABEL: rotr64 +// CIR: cir.rotate right {{.*}}, {{.*}} -> !u64i + +// LLVM-LABEL: rotr64 +// LLVM: [[F:%.*]] = call i64 @llvm.fshr.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]]) + return __builtin_rotateright64(x, y); +} \ No newline at end of file From da3d23241a3cbae59fd2e0a0fadce1c849146884 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 6 May 2024 15:34:26 -0700 Subject: [PATCH 1554/2301] [CIR][LowerToLLVM] Fix bug in ptr_stride lowering --- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 ++++-- clang/test/CIR/CodeGen/pointer-arith-ext.c | 15 ++++++++++++++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5276c512a6f9..cd9413aabdf1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -506,7 +506,9 @@ class CIRPtrStrideOpLowering auto sub = dyn_cast(index.getDefiningOp()); auto unary = dyn_cast(ptrStrideOp.getStride().getDefiningOp()); - if (unary && unary.getKind() == mlir::cir::UnaryOpKind::Minus && sub) + bool rewriteSub = + unary && unary.getKind() == mlir::cir::UnaryOpKind::Minus && sub; + if (rewriteSub) index = index.getDefiningOp()->getOperand(1); // Handle the cast @@ -516,7 +518,7 @@ class CIRPtrStrideOpLowering *layoutWidth); // Rewrite the sub in front of extensions/trunc - if (sub) { + if (rewriteSub) { index = rewriter.create( index.getLoc(), index.getType(), rewriter.create( diff --git a/clang/test/CIR/CodeGen/pointer-arith-ext.c b/clang/test/CIR/CodeGen/pointer-arith-ext.c index a580c7d49a06..5db612254d6e 100644 --- a/clang/test/CIR/CodeGen/pointer-arith-ext.c +++ b/clang/test/CIR/CodeGen/pointer-arith-ext.c @@ -56,4 +56,17 @@ void *f4_1(void *a, int b) { return (a -= b); } // FP f7(FP a, int b) { return a - b; } // FP f7_1(FP a, int b) { return (a -= b); } // void f8(void *a, int b) { return *(a + b); } -// void f8_1(void *a, int b) { return a[b]; } \ No newline at end of file +// void f8_1(void *a, int b) { return a[b]; } + +unsigned char *p(unsigned int x) { + unsigned char *p; + p += 16-x; + return p; +} + +// CIR-LABEL: @p +// CIR: %[[SUB:.*]] = cir.binop(sub +// CIR: cir.ptr_stride({{.*}} : !cir.ptr, %[[SUB]] : !u32i), !cir.ptr + +// LLVM-LABEL: @p +// LLVM: getelementptr i8, ptr {{.*}} \ No newline at end of file From cea8430bef95d129ec5037837fbdfbd99c467cfe Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 6 May 2024 15:40:13 -0700 Subject: [PATCH 1555/2301] [CIR][LowerToLLVM][NFC] Fix warning --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index cd9413aabdf1..ad3f026c2c28 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2056,8 +2056,6 @@ class CIRBinOpOverflowOpLowering case mlir::cir::BinOpOverflowKind::Mul: name.append("mul."); break; - default: - llvm_unreachable("unknown checked arith kind"); } name.append("with.overflow.i"); From d27db511c784af499f55da906310b9bdbb6f2aea Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 6 May 2024 16:50:35 -0700 Subject: [PATCH 1556/2301] [CIR][CIRGen][NFC] Atomics: add skeleton to support __c11_atomic_init --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 49 ++++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 7 +++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 1 - clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 ++ clang/test/CIR/CodeGen/atomic.cpp | 9 +++- 5 files changed, 65 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index aa88b4bedf17..f60607ff51bd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -1097,5 +1097,54 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { } // Long case, when Order isn't obviously constant. + llvm_unreachable("NYI"); +} + +void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue lvalue, + bool isInit) { + bool IsVolatile = lvalue.isVolatileQualified(); + mlir::cir::MemOrder MO; + if (lvalue.getType()->isAtomicType()) { + MO = mlir::cir::MemOrder::SequentiallyConsistent; + } else { + MO = mlir::cir::MemOrder::Release; + IsVolatile = true; + } + return buildAtomicStore(rvalue, lvalue, MO, IsVolatile, isInit); +} + +/// Emit a store to an l-value of atomic type. +/// +/// Note that the r-value is expected to be an r-value *of the atomic +/// type*; this means that for aggregate r-values, it should include +/// storage for any padding that was necessary. +void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, + mlir::cir::MemOrder AO, bool IsVolatile, + bool isInit) { + // If this is an aggregate r-value, it should agree in type except + // maybe for address-space qualification. + assert(!rvalue.isAggregate() || + rvalue.getAggregateAddress().getElementType() == + dest.getAddress().getElementType()); + + AtomicInfo atomics(*this, dest, dest.getPointer().getLoc()); + LValue LVal = atomics.getAtomicLValue(); + + // If this is an initialization, just put the value there normally. + if (LVal.isSimple()) { + if (isInit) { + llvm_unreachable("NYI"); + return; + } + + // Check whether we should use a library call. + if (atomics.shouldUseLibcall()) { + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); + return; + } + llvm_unreachable("NYI"); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 1daa31120573..dded51b6488a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -590,8 +590,11 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, Value = buildToMemory(Value, Ty); - if (Ty->isAtomicType()) { - llvm_unreachable("NYI"); + LValue AtomicLValue = LValue::makeAddr(Addr, Ty, getContext(), BaseInfo); + if (Ty->isAtomicType() || + (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) { + buildAtomicStore(RValue::get(Value), AtomicLValue, isInit); + return; } if (const auto *ClangVecTy = Ty->getAs()) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 72330d214812..8d20e2a8c1a4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1454,7 +1454,6 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_AtomicToNonAtomic: llvm_unreachable("NYI"); case CK_NonAtomicToAtomic: - llvm_unreachable("NYI"); case CK_UserDefinedConversion: return Visit(const_cast(E)); case CK_NoOp: { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 902b51b0568b..0d823df5338b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1510,6 +1510,9 @@ class CIRGenFunction : public CIRGenTypeCache { void buildCXXThrowExpr(const CXXThrowExpr *E); RValue buildAtomicExpr(AtomicExpr *E); + void buildAtomicStore(RValue rvalue, LValue lvalue, bool isInit); + void buildAtomicStore(RValue rvalue, LValue lvalue, mlir::cir::MemOrder MO, + bool IsVolatile, bool isInit); /// Return the address of a local variable. Address GetAddrOfLocalVar(const clang::VarDecl *VD) { diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 501946ad2bf0..66db5b728aad 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -307,4 +307,11 @@ bool fsb(bool *c) { // CHECK: cir.atomic.xchg({{.*}} : !cir.ptr, {{.*}} : !u8i, seq_cst) : !u8i // LLVM-LABEL: @_Z3fsbPb -// LLVM: atomicrmw xchg ptr {{.*}}, i8 {{.*}} seq_cst, align 1 \ No newline at end of file +// LLVM: atomicrmw xchg ptr {{.*}}, i8 {{.*}} seq_cst, align 1 + +// FIXME: crashes +// void atomicinit(void) +// { +// _Atomic(unsigned int) j = 12; +// __c11_atomic_init(&j, 1); +// } \ No newline at end of file From 66d4cd0189ace4fbd51f4afb39e606dc4da920f3 Mon Sep 17 00:00:00 2001 From: ShivaChen <32083954+ShivaChen@users.noreply.github.com> Date: Tue, 7 May 2024 11:50:39 +0800 Subject: [PATCH 1557/2301] [CIR][ThroughMLIR] Support lowering ptrStrideOp with loadOp or storeOp to memref (#585) This commit introduce CIRPtrStrideOpLowering to lower the following pattern to memref load or store. Rewrite %0 = cir.cast(array_to_ptrdecay, %base) %1 = cir.ptr_stride(%0, %index) cir.load %1 To memref.load %base[%index] --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 166 ++++++++++++++++-- .../CIR/Lowering/ThroughMLIR/ptrstride.cir | 78 ++++++++ 2 files changed, 234 insertions(+), 10 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/ptrstride.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 392f404b497e..b0120cf224a4 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -115,6 +115,44 @@ class CIRAllocaOpLowering } }; +// Find base and indices from memref.reinterpret_cast +// and put it into eraseList. +static bool findBaseAndIndices(mlir::Value addr, mlir::Value &base, + SmallVector &indices, + SmallVector &eraseList, + mlir::ConversionPatternRewriter &rewriter) { + while (mlir::Operation *addrOp = addr.getDefiningOp()) { + if (!isa(addrOp)) + break; + indices.push_back(addrOp->getOperand(1)); + addr = addrOp->getOperand(0); + eraseList.push_back(addrOp); + } + base = addr; + if (indices.size() == 0) + return false; + std::reverse(indices.begin(), indices.end()); + return true; +} + +// For memref.reinterpret_cast has multiple users, erasing the operation +// after the last load or store been generated. +static void eraseIfSafe(mlir::Value oldAddr, mlir::Value newAddr, + SmallVector &eraseList, + mlir::ConversionPatternRewriter &rewriter) { + unsigned oldUsedNum = + std::distance(oldAddr.getUses().begin(), oldAddr.getUses().end()); + unsigned newUsedNum = 0; + for (auto *user : newAddr.getUsers()) { + if (isa(*user) || isa(*user)) + ++newUsedNum; + } + if (oldUsedNum == newUsedNum) { + for (auto op : eraseList) + rewriter.eraseOp(op); + } +} + class CIRLoadOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -122,7 +160,15 @@ class CIRLoadOpLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::LoadOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, adaptor.getAddr()); + mlir::Value base; + SmallVector indices; + SmallVector eraseList; + if (findBaseAndIndices(adaptor.getAddr(), base, indices, eraseList, + rewriter)) { + rewriter.replaceOpWithNewOp(op, base, indices); + eraseIfSafe(op.getAddr(), base, eraseList, rewriter); + } else + rewriter.replaceOpWithNewOp(op, adaptor.getAddr()); return mlir::LogicalResult::success(); } }; @@ -135,8 +181,17 @@ class CIRStoreOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::StoreOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, adaptor.getValue(), - adaptor.getAddr()); + mlir::Value base; + SmallVector indices; + SmallVector eraseList; + if (findBaseAndIndices(adaptor.getAddr(), base, indices, eraseList, + rewriter)) { + rewriter.replaceOpWithNewOp(op, adaptor.getValue(), + base, indices); + eraseIfSafe(op.getAddr(), base, eraseList, rewriter); + } else + rewriter.replaceOpWithNewOp(op, adaptor.getValue(), + adaptor.getAddr()); return mlir::LogicalResult::success(); } }; @@ -747,6 +802,12 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto dstType = op.getResult().getType(); using CIR = mlir::cir::CastKind; switch (op.getKind()) { + case CIR::array_to_ptrdecay: { + auto newDstType = convertTy(dstType).cast(); + rewriter.replaceOpWithNewOp( + op, newDstType, src, 0, std::nullopt, std::nullopt); + return mlir::success(); + } case CIR::int_to_bool: { auto zero = rewriter.create( src.getLoc(), op.getSrc().getType(), @@ -838,17 +899,102 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } }; +class CIRPtrStrideOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + // Return true if PtrStrideOp is produced by cast with array_to_ptrdecay kind + // and they are in the same block. + inline bool isCastArrayToPtrConsumer(mlir::cir::PtrStrideOp op) const { + auto defOp = op->getOperand(0).getDefiningOp(); + if (!defOp) + return false; + auto castOp = dyn_cast(defOp); + if (!castOp) + return false; + if (castOp.getKind() != mlir::cir::CastKind::array_to_ptrdecay) + return false; + if (!castOp->hasOneUse()) + return false; + if (!castOp->isBeforeInBlock(op)) + return false; + return true; + } + + // Return true if all the PtrStrideOp users are load, store or cast + // with array_to_ptrdecay kind and they are in the same block. + inline bool + isLoadStoreOrCastArrayToPtrProduer(mlir::cir::PtrStrideOp op) const { + if (op.use_empty()) + return false; + for (auto *user : op->getUsers()) { + if (!op->isBeforeInBlock(user)) + return false; + if (isa(*user) || isa(*user)) + continue; + auto castOp = dyn_cast(*user); + if (castOp && + (castOp.getKind() == mlir::cir::CastKind::array_to_ptrdecay)) + continue; + return false; + } + return true; + } + + inline mlir::Type convertTy(mlir::Type ty) const { + return getTypeConverter()->convertType(ty); + } + + // Rewrite + // %0 = cir.cast(array_to_ptrdecay, %base) + // cir.ptr_stride(%0, %stride) + // to + // memref.reinterpret_cast (%base, %stride) + // + // MemRef Dialect doesn't have GEP-like operation. memref.reinterpret_cast + // only been used to propogate %base and %stride to memref.load/store and + // should be erased after the conversion. + mlir::LogicalResult + matchAndRewrite(mlir::cir::PtrStrideOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + if (!isCastArrayToPtrConsumer(op)) + return mlir::failure(); + if (!isLoadStoreOrCastArrayToPtrProduer(op)) + return mlir::failure(); + auto baseOp = adaptor.getBase().getDefiningOp(); + if (!baseOp) + return mlir::failure(); + if (!isa(baseOp)) + return mlir::failure(); + auto base = baseOp->getOperand(0); + auto dstType = op.getResult().getType(); + auto newDstType = convertTy(dstType).cast(); + auto stride = adaptor.getStride(); + auto indexType = rewriter.getIndexType(); + // Generate casting if the stride is not index type. + if (stride.getType() != indexType) + stride = rewriter.create(op.getLoc(), indexType, + stride); + rewriter.replaceOpWithNewOp( + op, newDstType, base, stride, std::nullopt, std::nullopt); + rewriter.eraseOp(baseOp); + return mlir::success(); + } +}; + void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns.add( - converter, patterns.getContext()); + patterns + .add( + converter, patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/ptrstride.cir b/clang/test/CIR/Lowering/ThroughMLIR/ptrstride.cir new file mode 100644 index 000000000000..19782c9d0ba7 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/ptrstride.cir @@ -0,0 +1,78 @@ +// RUN: cir-opt %s -cir-to-mlir | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +module { + cir.global "private" external @a : !cir.array + cir.global "private" external @aa : !cir.array x 100> + + // int get_1d_array_value() { return a[1]; } + // MLIR-LABEL: func.func @get_1d_array_value() -> i32 + // LLVM-LABEL: define i32 @get_1d_array_value() + cir.func @get_1d_array_value() -> !s32i { + // MLIR-NEXT: %[[BASE:.*]] = memref.get_global @a : memref<100xi32> + // MLIR-NEXT: %[[ONE:.*]] = arith.constant 1 : i32 + // MLIR-NEXT: %[[INDEX:.*]] = arith.index_cast %[[ONE]] : i32 to index + // MLIR-NEXT: %[[VALUE:.*]] = memref.load %[[BASE]][%[[INDEX]]] : memref<100xi32> + + // LLVM-NEXT: load i32, ptr getelementptr (i32, ptr @a, i64 1) + + %1 = cir.get_global @a : !cir.ptr> + %2 = cir.const #cir.int<1> : !s32i + %3 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr + %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : !s32i), !cir.ptr + %5 = cir.load %4 : !cir.ptr, !s32i + cir.return %5 : !s32i + } + + // int get_2d_array_value() { return aa[1][2]; } + // MLIR-LABEL: func.func @get_2d_array_value() -> i32 + // LLVM-LABEL: define i32 @get_2d_array_value() + cir.func @get_2d_array_value() -> !s32i { + // MLIR-NEXT: %[[BASE:.*]] = memref.get_global @aa : memref<100x100xi32> + // MLIR-NEXT: %[[ONE:.*]] = arith.constant 1 : i32 + // MLIR-NEXT: %[[INDEX1:.*]] = arith.index_cast %[[ONE]] : i32 to index + // MLIR-NEXT: %[[TWO:.*]] = arith.constant 2 : i32 + // MLIR-NEXT: %[[INDEX2:.*]] = arith.index_cast %[[TWO]] : i32 to index + // MLIR-NEXT: %[[VALUE:.*]] = memref.load %[[BASE]][%[[INDEX1]], %[[INDEX2]]] : memref<100x100xi32> + + // LLVM-NEXT: load i32, ptr getelementptr (i32, ptr @aa, i64 102) + + %1 = cir.get_global @aa : !cir.ptr x 100>> + %2 = cir.const #cir.int<1> : !s32i + %3 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr x 100>>), !cir.ptr> + %4 = cir.ptr_stride(%3 : !cir.ptr>, %2 : !s32i), !cir.ptr> + %5 = cir.const #cir.int<2> : !s32i + %6 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr + %7 = cir.ptr_stride(%6 : !cir.ptr, %5 : !s32i), !cir.ptr + %8 = cir.load %7 : !cir.ptr, !s32i + cir.return %8 : !s32i + } + + // void inc_1d_array_value() { a[1] += 2; } + // MLIR-LABEL: func.func @inc_1d_array_value() + // LLVM-LABEL: define void @inc_1d_array_value() + cir.func @inc_1d_array_value() { + // MLIR-NEXT: %[[TWO:.*]] = arith.constant 2 : i32 + // MLIR-NEXT: %[[BASE:.*]] = memref.get_global @a : memref<100xi32> + // MLIR-NEXT: %[[ONE:.*]] = arith.constant 1 : i32 + // MLIR-NEXT: %[[INDEX:.*]] = arith.index_cast %[[ONE]] : i32 to index + // MLIR-NEXT: %[[VALUE:.*]] = memref.load %[[BASE]][%[[INDEX]]] : memref<100xi32> + // MLIR-NEXT: %[[VALUE_INC:.*]] = arith.addi %[[VALUE]], %[[TWO]] : i32 + // MLIR-NEXT: memref.store %[[VALUE_INC]], %[[BASE]][%[[INDEX]]] : memref<100xi32> + + // LLVM-NEXT: %[[VALUE:.*]] = load i32, ptr getelementptr (i32, ptr @a, i64 1) + // LLVM-NEXT: %[[VALUE_INC:.*]] = add i32 %[[VALUE]], 2 + // LLVM-NEXT: store i32 %[[VALUE_INC]], ptr getelementptr (i32, ptr @a, i64 1) + + %0 = cir.const #cir.int<2> : !s32i + %1 = cir.get_global @a : !cir.ptr> + %2 = cir.const #cir.int<1> : !s32i + %3 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr + %4 = cir.ptr_stride(%3 : !cir.ptr, %2 : !s32i), !cir.ptr + %5 = cir.load %4 : !cir.ptr, !s32i + %6 = cir.binop(add, %5, %0) : !s32i + cir.store %6, %4 : !s32i, !cir.ptr + cir.return + } +} From 850c14b924be979eb357c6b5b35b376485211010 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 8 May 2024 02:03:43 +0800 Subject: [PATCH 1558/2301] [CIR] Generate `cir.dyn_cast` for dynamic casts to void ptr (#557) This patch update the CIRGen of `dynamic_cast` expressions and make it start to generate `cir.dyn_cast` operations for `dynamic_cast` expressions that cast to a void pointer. This patch also updates the lowering prepare pass so that it lowers such `cir.dyn_cast` operations to the code emitted before this patch. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 20 +++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 35 ++++++-- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 6 ++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 13 ++- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 12 ++- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 27 +----- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 83 +++++++------------ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 13 +++ .../Dialect/Transforms/LoweringPrepare.cpp | 4 +- .../Transforms/LoweringPrepareCXXABI.h | 6 +- .../LoweringPrepareItaniumCXXABI.cpp | 68 +++++++++++++-- .../CodeGen/dynamic-cast-relative-layout.cpp | 34 ++++++++ clang/test/CIR/CodeGen/dynamic-cast.cpp | 61 +++++++------- 13 files changed, 249 insertions(+), 133 deletions(-) create mode 100644 clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 7102b2da91d0..07cebf4948e6 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -66,6 +66,14 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return ::mlir::cir::VoidType::get(getContext()); } + mlir::cir::IntType getUIntNTy(int N) { + return mlir::cir::IntType::get(getContext(), N, false); + } + + mlir::cir::IntType getSIntNTy(int N) { + return mlir::cir::IntType::get(getContext(), N, true); + } + mlir::cir::PointerType getPointerTo(mlir::Type ty, unsigned addressSpace = 0) { assert(!addressSpace && "address space is NYI"); @@ -76,6 +84,18 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return getPointerTo(::mlir::cir::VoidType::get(getContext()), addressSpace); } + mlir::Value createLoad(mlir::Location loc, mlir::Value ptr) { + return create(loc, ptr, /*isDeref=*/false, + /*is_volatile=*/false, + /*mem_order=*/mlir::cir::MemOrderAttr{}); + } + + mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr, + uint64_t alignment) { + // TODO(cir): implement aligned load in CIRBaseBuilder. + return createLoad(loc, ptr); + } + mlir::Value createNot(mlir::Value value) { return create(value.getLoc(), value.getType(), mlir::cir::UnaryOpKind::Not, value); diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index f637ff085aa2..13cf81b09f2e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -140,14 +140,15 @@ def DynamicCastOp : CIR_Op<"dyn_cast"> { let summary = "Perform dynamic cast on struct pointers"; let description = [{ The `cir.dyn_cast` operation models part of the semantics of the - `dynamic_cast` operator in C++. It can be used to perform 2 kinds of casts + `dynamic_cast` operator in C++. It can be used to perform 3 kinds of casts on struct pointers: - Down-cast, which casts a base class pointer to a derived class pointer; - - Side-cast, which casts a class pointer to a sibling class pointer. + - Side-cast, which casts a class pointer to a sibling class pointer; + - Cast-to-complete, which casts a class pointer to a void pointer. The input of the operation must be a struct pointer. The result of the - operation is also a struct pointer. + operation is either a struct pointer or a void pointer. The parameter `kind` specifies the semantics of this operation. If its value is `ptr`, then the operation models dynamic casts on pointers. Otherwise, if @@ -163,17 +164,27 @@ def DynamicCastOp : CIR_Op<"dyn_cast"> { - Otherwise, the operation will return a null pointer value as its result. The `info` argument gives detailed information about the requested dynamic - cast operation. + cast operation. It is an optional `#cir.dyn_cast_info` attribute that is + only present when the operation models a down-cast or a side-cast. + + The `relative_layout` argument specifies whether the Itanium C++ ABI vtable + uses relative layout. It is only meaningful when the operation models a + cast-to-complete operation. }]; let arguments = (ins DynamicCastKind:$kind, StructPtr:$src, - DynamicCastInfoAttr:$info); - let results = (outs StructPtr:$result); + OptionalAttr:$info, + UnitAttr:$relative_layout); + let results = (outs CIR_PointerType:$result); let assemblyFormat = [{ - `(` $kind `,` $src `:` type($src) `,` qualified($info) `)` - `->` type($result) attr-dict + `(` + $kind `,` $src `:` type($src) + (`,` qualified($info)^)? + (`relative_layout` $relative_layout^)? + `)` + `->` qualified(type($result)) attr-dict }]; let extraClassDeclaration = [{ @@ -181,7 +192,15 @@ def DynamicCastOp : CIR_Op<"dyn_cast"> { bool isRefcast() { return getKind() == ::mlir::cir::DynamicCastKind::ref; } + + /// Determine whether this operation represents a dynamic cast to a void + /// pointer. + bool isCastToVoid() { + return getType().isVoidPtr(); + } }]; + + let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 288cce02afce..ee8b266ed962 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -197,6 +197,12 @@ def CIR_PointerType : CIR_Type<"Pointer", "ptr", let parameters = (ins "mlir::Type":$pointee); let assemblyFormat = "`<` $pointee `>`"; + + let extraClassDeclaration = [{ + bool isVoidPtr() const { + return getPointee().isa(); + } + }]; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index d99790e68c38..1489c4fb9a5b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -637,7 +637,18 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::DynamicCastInfoAttr info) { auto castKind = isRefCast ? mlir::cir::DynamicCastKind::ref : mlir::cir::DynamicCastKind::ptr; - return create(loc, destType, castKind, src, info); + return create(loc, destType, castKind, src, info, + /*relative_layout=*/false); + } + + mlir::Value createDynCastToVoid(mlir::Location loc, mlir::Value src, + bool vtableUseRelativeLayout) { + // TODO(cir): consider address space here. + assert(!UnimplementedFeature::addressSpace()); + auto destTy = getVoidPtrTy(); + return create( + loc, destTy, mlir::cir::DynamicCastKind::ptr, src, + mlir::cir::DynamicCastInfoAttr{}, vtableUseRelativeLayout); } cir::Address createBaseClassAddr(mlir::Location loc, cir::Address addr, diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 0e1ffd53c79d..5f6ea8c1d852 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -305,13 +305,11 @@ class CIRGenCXXABI { virtual void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) = 0; - virtual mlir::cir::DynamicCastInfoAttr - buildDynamicCastInfo(CIRGenFunction &CGF, mlir::Location Loc, - QualType SrcRecordTy, QualType DestRecordTy) = 0; - - virtual mlir::Value buildDynamicCastToVoid(CIRGenFunction &CGF, - mlir::Location Loc, Address Value, - QualType SrcRecordTy) = 0; + virtual mlir::Value buildDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, + QualType DestRecordTy, + mlir::cir::PointerType DestCIRTy, + bool isRefCast, mlir::Value Src) = 0; }; /// Creates and Itanium-family ABI diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 7201cfa4fe8a..c7b528d35497 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -1136,29 +1136,8 @@ mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, if (DCE->isAlwaysNull()) return buildDynamicCastToNull(*this, loc, destTy); - if (isDynCastToVoid) { - auto srcIsNull = builder.createPtrIsNull(ThisAddr.getPointer()); - return builder - .create( - loc, srcIsNull, - [&](mlir::OpBuilder &, mlir::Location) { - auto nullPtr = - builder.getNullPtr(builder.getVoidPtrTy(), loc).getResult(); - builder.createYield(loc, nullPtr); - }, - [&](mlir::OpBuilder &, mlir::Location) { - auto castedPtr = CGM.getCXXABI().buildDynamicCastToVoid( - *this, loc, ThisAddr, srcRecordTy); - builder.createYield(loc, castedPtr); - }) - .getResult(); - } - - assert(destRecordTy->isRecordType() && "dest type must be a record type!"); - auto destCirTy = ConvertType(destTy).cast(); - auto castInfo = CGM.getCXXABI().buildDynamicCastInfo(*this, loc, srcRecordTy, - destRecordTy); - return builder.createDynCast(loc, ThisAddr.getPointer(), destCirTy, isRefCast, - castInfo); + return CGM.getCXXABI().buildDynamicCast(*this, loc, srcRecordTy, destRecordTy, + destCirTy, isRefCast, + ThisAddr.getPointer()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 87175d545ac0..7cbf253d0104 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -290,13 +290,10 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { // functions. So during CIRGen we don't need the `emitDynamicCastCall` // function that clang CodeGen has. - mlir::cir::DynamicCastInfoAttr - buildDynamicCastInfo(CIRGenFunction &CGF, mlir::Location Loc, - QualType SrcRecordTy, QualType DestRecordTy) override; - - mlir::Value buildDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, - Address Value, - QualType SrcRecordTy) override; + mlir::Value buildDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, QualType DestRecordTy, + mlir::cir::PointerType DestCIRTy, bool isRefCast, + mlir::Value Src) override; /**************************** RTTI Uniqueness ******************************/ protected: @@ -2284,9 +2281,19 @@ static mlir::cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { return CGF.CGM.createRuntimeFunction(FTy, "__dynamic_cast"); } -mlir::cir::DynamicCastInfoAttr CIRGenItaniumCXXABI::buildDynamicCastInfo( - CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, - QualType DestRecordTy) { +static mlir::Value buildDynamicCastToVoid(CIRGenFunction &CGF, + mlir::Location Loc, + QualType SrcRecordTy, + mlir::Value Src) { + auto vtableUsesRelativeLayout = + CGF.CGM.getItaniumVTableContext().isRelativeLayout(); + return CGF.getBuilder().createDynCastToVoid(Loc, Src, + vtableUsesRelativeLayout); +} + +static mlir::cir::DynamicCastInfoAttr +buildDynamicCastInfo(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, QualType DestRecordTy) { auto srcRtti = CGF.CGM.getAddrOfRTTIDescriptor(Loc, SrcRecordTy) .cast(); auto destRtti = CGF.CGM.getAddrOfRTTIDescriptor(Loc, DestRecordTy) @@ -2309,51 +2316,17 @@ mlir::cir::DynamicCastInfoAttr CIRGenItaniumCXXABI::buildDynamicCastInfo( badCastFuncRef, offsetHintAttr); } -mlir::Value CIRGenItaniumCXXABI::buildDynamicCastToVoid(CIRGenFunction &CGF, - mlir::Location Loc, - Address Value, - QualType SrcRecordTy) { - auto *clsDecl = - cast(SrcRecordTy->castAs()->getDecl()); - - // TODO(cir): consider address space in this function. - assert(!UnimplementedFeature::addressSpace()); - - auto loadOffsetToTopFromVTable = - [&](mlir::Type vtableElemTy, CharUnits vtableElemAlign) -> mlir::Value { - mlir::Type vtablePtrTy = CGF.getBuilder().getPointerTo(vtableElemTy); - mlir::Value vtablePtr = CGF.getVTablePtr(Loc, Value, vtablePtrTy, clsDecl); - - // Get the address point in the vtable that contains offset-to-top. - mlir::Value offsetToTopSlotPtr = - CGF.getBuilder().create( - Loc, vtablePtrTy, mlir::FlatSymbolRefAttr{}, vtablePtr, - /*vtable_index=*/0, -2ULL); - return CGF.getBuilder().createAlignedLoad( - Loc, vtableElemTy, offsetToTopSlotPtr, vtableElemAlign); - }; +mlir::Value CIRGenItaniumCXXABI::buildDynamicCast( + CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, + QualType DestRecordTy, mlir::cir::PointerType DestCIRTy, bool isRefCast, + mlir::Value Src) { + bool isCastToVoid = DestRecordTy.isNull(); + assert((!isCastToVoid || !isRefCast) && "cannot cast to void reference"); - // Calculate the offset from the given object to its containing complete - // object. - mlir::Value offsetToTop; - if (CGM.getItaniumVTableContext().isRelativeLayout()) { - offsetToTop = loadOffsetToTopFromVTable(CGF.getBuilder().getSInt32Ty(), - CharUnits::fromQuantity(4)); - } else { - offsetToTop = loadOffsetToTopFromVTable( - CGF.convertType(CGF.getContext().getPointerDiffType()), - CGF.getPointerAlign()); - } + if (isCastToVoid) + return buildDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src); - // Finally, add the offset to the given pointer. - // Cast the input pointer to a uint8_t* to allow pointer arithmetic. - auto u8PtrTy = CGF.getBuilder().getUInt8PtrTy(); - mlir::Value srcBytePtr = - CGF.getBuilder().createBitcast(Value.getPointer(), u8PtrTy); - // Do the pointer arithmetic. - mlir::Value dstBytePtr = CGF.getBuilder().create( - Loc, u8PtrTy, srcBytePtr, offsetToTop); - // Cast the result to a void*. - return CGF.getBuilder().createBitcast(dstBytePtr, - CGF.getBuilder().getVoidPtrTy()); + auto castInfo = buildDynamicCastInfo(CGF, Loc, SrcRecordTy, DestRecordTy); + return CGF.getBuilder().createDynCast(Loc, Src, DestCIRTy, isRefCast, + castInfo); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 030d41c6163d..e8660b59cc3e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -514,6 +514,19 @@ OpFoldResult CastOp::fold(FoldAdaptor adaptor) { return {}; } +//===----------------------------------------------------------------------===// +// DynamicCastOp +//===----------------------------------------------------------------------===// + +LogicalResult DynamicCastOp::verify() { + auto resultPointeeTy = getType().cast().getPointee(); + if (!resultPointeeTy.isa()) + return emitOpError() + << "cir.dyn_cast must produce a void ptr or struct ptr"; + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // VecCreateOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index e92e40b7ccd4..4e9ded77b441 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -473,7 +473,9 @@ void LoweringPreparePass::lowerDynamicCastOp(DynamicCastOp op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op); - auto loweredValue = cxxABI->lowerDynamicCast(builder, op); + assert(astCtx && "AST context is not available during lowering prepare"); + auto loweredValue = cxxABI->lowerDynamicCast(builder, *astCtx, op); + op.replaceAllUsesWith(loweredValue); op.erase(); } diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h index 2a094bad8702..549a93e07c37 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h @@ -16,6 +16,7 @@ #define LLVM_CLANG_LIB_CIR_LOWERING_PREPARE_CXX_ABI_H #include "mlir/IR/Value.h" +#include "clang/AST/ASTContext.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" @@ -25,10 +26,11 @@ class LoweringPrepareCXXABI { public: static LoweringPrepareCXXABI *createItaniumABI(); + virtual ~LoweringPrepareCXXABI() {} + virtual mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, + clang::ASTContext &astCtx, mlir::cir::DynamicCastOp op) = 0; - - virtual ~LoweringPrepareCXXABI() {} }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp index 3619648056cc..dc997e604a96 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp @@ -16,6 +16,7 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" +#include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" @@ -27,6 +28,7 @@ namespace { class LoweringPrepareItaniumCXXABI : public LoweringPrepareCXXABI { public: mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, + clang::ASTContext &astCtx, mlir::cir::DynamicCastOp op) override; }; @@ -50,7 +52,7 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, mlir::cir::DynamicCastOp op) { auto loc = op->getLoc(); auto srcValue = op.getSrc(); - auto castInfo = op.getInfo().cast(); + auto castInfo = op.getInfo().value(); // TODO(cir): consider address space assert(!MissingFeatures::addressSpace()); @@ -90,8 +92,57 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, return builder.createBitcast(castedPtr, op.getType()); } +static mlir::Value +buildDynamicCastToVoidAfterNullCheck(CIRBaseBuilderTy &builder, + clang::ASTContext &astCtx, + mlir::cir::DynamicCastOp op) { + auto loc = op.getLoc(); + bool vtableUsesRelativeLayout = op.getRelativeLayout(); + + // TODO(cir): consider address space in this function. + assert(!MissingFeatures::addressSpace()); + + mlir::Type vtableElemTy; + uint64_t vtableElemAlign; + if (vtableUsesRelativeLayout) { + vtableElemTy = builder.getSIntNTy(32); + vtableElemAlign = 4; + } else { + const auto &targetInfo = astCtx.getTargetInfo(); + auto ptrdiffTy = targetInfo.getPtrDiffType(clang::LangAS::Default); + auto ptrdiffTyIsSigned = clang::TargetInfo::isTypeSigned(ptrdiffTy); + auto ptrdiffTyWidth = targetInfo.getTypeWidth(ptrdiffTy); + + vtableElemTy = mlir::cir::IntType::get(builder.getContext(), ptrdiffTyWidth, + ptrdiffTyIsSigned); + vtableElemAlign = targetInfo.getPointerAlign(clang::LangAS::Default); + } + + // Access vtable to get the offset from the given object to its containing + // complete object. + auto vtablePtrTy = builder.getPointerTo(vtableElemTy); + auto vtablePtrPtr = + builder.createBitcast(op.getSrc(), builder.getPointerTo(vtablePtrTy)); + auto vtablePtr = builder.createLoad(loc, vtablePtrPtr); + auto offsetToTopSlotPtr = builder.create( + loc, vtablePtrTy, mlir::FlatSymbolRefAttr{}, vtablePtr, + /*vtable_index=*/0, -2ULL); + auto offsetToTop = + builder.createAlignedLoad(loc, offsetToTopSlotPtr, vtableElemAlign); + + // Add the offset to the given pointer to get the cast result. + // Cast the input pointer to a uint8_t* to allow pointer arithmetic. + auto u8PtrTy = builder.getPointerTo(builder.getUIntNTy(8)); + auto srcBytePtr = builder.createBitcast(op.getSrc(), u8PtrTy); + auto dstBytePtr = builder.create( + loc, u8PtrTy, srcBytePtr, offsetToTop); + // Cast the result to a void*. + return builder.createBitcast(dstBytePtr, builder.getVoidPtrTy()); +} + mlir::Value LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, + clang::ASTContext &astCtx, mlir::cir::DynamicCastOp op) { auto loc = op->getLoc(); auto srcValue = op.getSrc(); @@ -101,17 +152,20 @@ LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, if (op.isRefcast()) return buildDynamicCastAfterNullCheck(builder, op); - auto srcValueIsNull = builder.createPtrToBoolCast(srcValue); + auto srcValueIsNotNull = builder.createPtrToBoolCast(srcValue); return builder .create( - loc, srcValueIsNull, + loc, srcValueIsNotNull, [&](mlir::OpBuilder &, mlir::Location) { - builder.createYield( - loc, builder.getNullPtr(op.getType(), loc).getResult()); + mlir::Value castedValue = + op.isCastToVoid() + ? buildDynamicCastToVoidAfterNullCheck(builder, astCtx, op) + : buildDynamicCastAfterNullCheck(builder, op); + builder.createYield(loc, castedValue); }, [&](mlir::OpBuilder &, mlir::Location) { - builder.createYield(loc, - buildDynamicCastAfterNullCheck(builder, op)); + builder.createYield( + loc, builder.getNullPtr(op.getType(), loc).getResult()); }) .getResult(); } diff --git a/clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp b/clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp new file mode 100644 index 000000000000..f778592e9951 --- /dev/null +++ b/clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp @@ -0,0 +1,34 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fexperimental-relative-c++-abi-vtables -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fexperimental-relative-c++-abi-vtables -std=c++20 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER + +struct Base { + virtual ~Base(); +}; + +// BEFORE: !ty_22Base22 = !cir.struct(ptr); +} + +// BEFORE: cir.func @_Z20ptr_cast_to_completeP4Base +// BEFORE: %{{.+}} = cir.dyn_cast(ptr, %{{.+}} : !cir.ptr relative_layout) -> !cir.ptr +// BEFORE: } + +// AFTER: cir.func @_Z20ptr_cast_to_completeP4Base +// AFTER: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// AFTER-NEXT: %[[#SRC_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#SRC]] : !cir.ptr), !cir.bool +// AFTER-NEXT: %{{.+}} = cir.ternary(%[[#SRC_IS_NOT_NULL]], true { +// AFTER-NEXT: %[[#VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr> +// AFTER-NEXT: %[[#VPTR:]] = cir.load %[[#VPTR_PTR]] : !cir.ptr>, !cir.ptr +// AFTER-NEXT: %[[#OFFSET_TO_TOP_PTR:]] = cir.vtable.address_point( %[[#VPTR]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : !cir.ptr +// AFTER-NEXT: %[[#OFFSET_TO_TOP:]] = cir.load %[[#OFFSET_TO_TOP_PTR]] : !cir.ptr, !s32i +// AFTER-NEXT: %[[#SRC_BYTES_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: %[[#DST_BYTES_PTR:]] = cir.ptr_stride(%[[#SRC_BYTES_PTR]] : !cir.ptr, %[[#OFFSET_TO_TOP]] : !s32i), !cir.ptr +// AFTER-NEXT: %[[#DST:]] = cir.cast(bitcast, %[[#DST_BYTES_PTR]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.yield %[[#DST]] : !cir.ptr +// AFTER-NEXT: }, false { +// AFTER-NEXT: %[[#NULL:]] = cir.const #cir.ptr : !cir.ptr +// AFTER-NEXT: cir.yield %[[#NULL]] : !cir.ptr +// AFTER-NEXT: }) : (!cir.bool) -> !cir.ptr +// AFTER: } diff --git a/clang/test/CIR/CodeGen/dynamic-cast.cpp b/clang/test/CIR/CodeGen/dynamic-cast.cpp index 536fcf844845..19701eb9e28b 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast.cpp @@ -20,18 +20,19 @@ Derived *ptr_cast(Base *b) { // BEFORE: } // AFTER: cir.func @_Z8ptr_castP4Base -// AFTER: %[[#SRC_IS_NULL:]] = cir.cast(ptr_to_bool, %{{.+}} : !cir.ptr), !cir.bool -// AFTER-NEXT: %{{.+}} = cir.ternary(%[[#SRC_IS_NULL]], true { -// AFTER-NEXT: %[[#NULL:]] = cir.const #cir.ptr : !cir.ptr -// AFTER-NEXT: cir.yield %[[#NULL]] : !cir.ptr +// AFTER: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// AFTER-NEXT: %[[#SRC_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#SRC]] : !cir.ptr), !cir.bool +// AFTER-NEXT: %{{.+}} = cir.ternary(%[[#SRC_IS_NOT_NULL]], true { +// AFTER-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: %[[#BASE_RTTI:]] = cir.const #cir.global_view<@_ZTI4Base> : !cir.ptr +// AFTER-NEXT: %[[#DERIVED_RTTI:]] = cir.const #cir.global_view<@_ZTI7Derived> : !cir.ptr +// AFTER-NEXT: %[[#HINT:]] = cir.const #cir.int<0> : !s64i +// AFTER-NEXT: %[[#RT_CALL_RET:]] = cir.call @__dynamic_cast(%[[#SRC_VOID_PTR]], %[[#BASE_RTTI]], %[[#DERIVED_RTTI]], %[[#HINT]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr +// AFTER-NEXT: %[[#CASTED:]] = cir.cast(bitcast, %[[#RT_CALL_RET]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.yield %[[#CASTED]] : !cir.ptr // AFTER-NEXT: }, false { -// AFTER-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr -// AFTER-NEXT: %[[#SRC_RTTI:]] = cir.const #cir.global_view<@_ZTI4Base> : !cir.ptr -// AFTER-NEXT: %[[#DEST_RTTI:]] = cir.const #cir.global_view<@_ZTI7Derived> : !cir.ptr -// AFTER-NEXT: %[[#OFFSET_HINT:]] = cir.const #cir.int<0> : !s64i -// AFTER-NEXT: %[[#CASTED_PTR:]] = cir.call @__dynamic_cast(%[[#SRC_VOID_PTR]], %[[#SRC_RTTI]], %[[#DEST_RTTI]], %[[#OFFSET_HINT]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr -// AFTER-NEXT: %[[#RESULT:]] = cir.cast(bitcast, %[[#CASTED_PTR]] : !cir.ptr), !cir.ptr -// AFTER-NEXT: cir.yield %[[#RESULT]] : !cir.ptr +// AFTER-NEXT: %[[#NULL_PTR:]] = cir.const #cir.ptr : !cir.ptr +// AFTER-NEXT: cir.yield %[[#NULL_PTR]] : !cir.ptr // AFTER-NEXT: }) : (!cir.bool) -> !cir.ptr // AFTER: } @@ -62,20 +63,24 @@ void *ptr_cast_to_complete(Base *ptr) { return dynamic_cast(ptr); } -// BEFORE: cir.func @_Z20ptr_cast_to_completeP4Base -// BEFORE: %[[#V19:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// BEFORE-NEXT: %[[#V20:]] = cir.cast(ptr_to_bool, %[[#V19]] : !cir.ptr), !cir.bool -// BEFORE-NEXT: %[[#V21:]] = cir.unary(not, %[[#V20]]) : !cir.bool, !cir.bool -// BEFORE-NEXT: %{{.+}} = cir.ternary(%[[#V21]], true { -// BEFORE-NEXT: %[[#V22:]] = cir.const #cir.ptr : !cir.ptr -// BEFORE-NEXT: cir.yield %[[#V22]] : !cir.ptr -// BEFORE-NEXT: }, false { -// BEFORE-NEXT: %[[#V23:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr> -// BEFORE-NEXT: %[[#V24:]] = cir.load %[[#V23]] : !cir.ptr>, !cir.ptr -// BEFORE-NEXT: %[[#V25:]] = cir.vtable.address_point( %[[#V24]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : !cir.ptr -// BEFORE-NEXT: %[[#V26:]] = cir.load %[[#V25]] : !cir.ptr, !s64i -// BEFORE-NEXT: %[[#V27:]] = cir.cast(bitcast, %[[#V19]] : !cir.ptr), !cir.ptr -// BEFORE-NEXT: %[[#V28:]] = cir.ptr_stride(%[[#V27]] : !cir.ptr, %[[#V26]] : !s64i), !cir.ptr -// BEFORE-NEXT: %[[#V29:]] = cir.cast(bitcast, %[[#V28]] : !cir.ptr), !cir.ptr -// BEFORE-NEXT: cir.yield %[[#V29]] : !cir.ptr -// BEFORE-NEXT: }) : (!cir.bool) -> !cir.ptr +// BEFORE: cir.func @_Z20ptr_cast_to_completeP4Base +// BEFORE: %{{.+}} = cir.dyn_cast(ptr, %{{.+}} : !cir.ptr) -> !cir.ptr +// BEFORE: } + +// AFTER: cir.func @_Z20ptr_cast_to_completeP4Base +// AFTER: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// AFTER-NEXT: %[[#SRC_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#SRC]] : !cir.ptr), !cir.bool +// AFTER-NEXT: %{{.+}} = cir.ternary(%[[#SRC_IS_NOT_NULL]], true { +// AFTER-NEXT: %[[#VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr> +// AFTER-NEXT: %[[#VPTR:]] = cir.load %[[#VPTR_PTR]] : !cir.ptr>, !cir.ptr +// AFTER-NEXT: %[[#BASE_OFFSET_PTR:]] = cir.vtable.address_point( %[[#VPTR]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : !cir.ptr +// AFTER-NEXT: %[[#BASE_OFFSET:]] = cir.load %[[#BASE_OFFSET_PTR]] : !cir.ptr, !s64i +// AFTER-NEXT: %[[#SRC_BYTES_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: %[[#DST_BYTES_PTR:]] = cir.ptr_stride(%[[#SRC_BYTES_PTR]] : !cir.ptr, %[[#BASE_OFFSET]] : !s64i), !cir.ptr +// AFTER-NEXT: %[[#CASTED_PTR:]] = cir.cast(bitcast, %[[#DST_BYTES_PTR]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.yield %[[#CASTED_PTR]] : !cir.ptr +// AFTER-NEXT: }, false { +// AFTER-NEXT: %[[#NULL_PTR:]] = cir.const #cir.ptr : !cir.ptr +// AFTER-NEXT: cir.yield %[[#NULL_PTR]] : !cir.ptr +// AFTER-NEXT: }) : (!cir.bool) -> !cir.ptr +// AFTER: } From d7fad858296e7ea2615f81ad3a6a0258d92cecc0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 7 May 2024 17:44:07 -0700 Subject: [PATCH 1559/2301] [CIR][CIRGen][NFCI] Atomics: more skeleton and helpers for c11 init Testcase introduced in previous commit still commented, with the work in this patch we just move the assertion further. --- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 2 + clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 182 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 + clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 9 + 4 files changed, 187 insertions(+), 7 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index f5be28277e94..725cf6871083 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -176,6 +176,8 @@ class StructType void computeSizeAndAlignment(const DataLayout &dataLayout) const; }; +bool isAnyFloatingPointType(mlir::Type t); + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index f60607ff51bd..ad226068aa0d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -146,6 +146,8 @@ class AtomicInfo { mlir::Value getAtomicSizeValue() const { llvm_unreachable("NYI"); } + mlir::Value getScalarRValValueOrNull(RValue RVal) const; + /// Cast the given pointer to an integer pointer suitable for atomic /// operations if the source. Address castToAtomicIntPointer(Address Addr) const; @@ -160,7 +162,7 @@ class AtomicInfo { SourceLocation loc, bool AsValue) const; /// Converts a rvalue to integer value. - mlir::Value convertRValueToInt(RValue RVal) const; + mlir::Value convertRValueToInt(RValue RVal, bool CmpXchg = false) const; RValue ConvertIntToValueOrAtomic(mlir::Value IntVal, AggValueSlot ResultSlot, SourceLocation Loc, bool AsValue) const; @@ -218,7 +220,7 @@ class AtomicInfo { Address CreateTempAlloca() const; private: - bool requiresMemSetZero(llvm::Type *type) const; + bool requiresMemSetZero(mlir::Type ty) const; /// Emits atomic load as a libcall. void EmitAtomicLoadLibcall(mlir::Value AddForLoaded, llvm::AtomicOrdering AO, @@ -268,6 +270,36 @@ static Address buildValToTemp(CIRGenFunction &CGF, Expr *E) { return DeclPtr; } +/// Does a store of the given IR type modify the full expected width? +static bool isFullSizeType(CIRGenModule &CGM, mlir::Type ty, + uint64_t expectedSize) { + return (CGM.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize); +} + +/// Does the atomic type require memsetting to zero before initialization? +/// +/// The IR type is provided as a way of making certain queries faster. +bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const { + // If the atomic type has size padding, we definitely need a memset. + if (hasPadding()) + return true; + + // Otherwise, do some simple heuristics to try to avoid it: + switch (getEvaluationKind()) { + // For scalars and complexes, check whether the store size of the + // type uses the full size. + case TEK_Scalar: + return !isFullSizeType(CGF.CGM, ty, AtomicSizeInBits); + case TEK_Complex: + llvm_unreachable("NYI"); + + // Padding in structs has an undefined bit pattern. User beware. + case TEK_Aggregate: + return false; + } + llvm_unreachable("bad evaluation kind"); +} + Address AtomicInfo::castToAtomicIntPointer(Address addr) const { auto intTy = addr.getElementType().dyn_cast(); // Don't bother with int casts if the integer size is the same. @@ -682,7 +714,9 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { if (E->getOp() == AtomicExpr::AO__c11_atomic_init || E->getOp() == AtomicExpr::AO__opencl_atomic_init) { - llvm_unreachable("NYI"); + LValue lvalue = makeAddrLValue(Ptr, AtomicTy); + buildAtomicInit(E->getVal1(), lvalue); + return RValue::get(nullptr); } auto TInfo = getContext().getTypeInfoInChars(AtomicTy); @@ -1113,21 +1147,105 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue lvalue, return buildAtomicStore(rvalue, lvalue, MO, IsVolatile, isInit); } +/// Return true if \param ValTy is a type that should be casted to integer +/// around the atomic memory operation. If \param CmpXchg is true, then the +/// cast of a floating point type is made as that instruction can not have +/// floating point operands. TODO: Allow compare-and-exchange and FP - see +/// comment in CIRGenAtomicExpandPass.cpp. +static bool shouldCastToInt(mlir::Type ValTy, bool CmpXchg) { + if (mlir::cir::isAnyFloatingPointType(ValTy)) + return isa(ValTy) || CmpXchg; + return !isa(ValTy) && !isa(ValTy); +} + +mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue RVal) const { + if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) + return RVal.getScalarVal(); + return nullptr; +} + +/// Materialize an r-value into memory for the purposes of storing it +/// to an atomic type. +Address AtomicInfo::materializeRValue(RValue rvalue) const { + // Aggregate r-values are already in memory, and EmitAtomicStore + // requires them to be values of the atomic type. + if (rvalue.isAggregate()) + return rvalue.getAggregateAddress(); + + // Otherwise, make a temporary and materialize into it. + LValue TempLV = CGF.makeAddrLValue(CreateTempAlloca(), getAtomicType()); + AtomicInfo Atomics(CGF, TempLV, TempLV.getAddress().getPointer().getLoc()); + Atomics.emitCopyIntoMemory(rvalue); + return TempLV.getAddress(); +} + +bool AtomicInfo::emitMemSetZeroIfNecessary() const { + assert(LVal.isSimple()); + Address addr = LVal.getAddress(); + if (!requiresMemSetZero(addr.getElementType())) + return false; + + llvm_unreachable("NYI"); +} + +/// Copy an r-value into memory as part of storing to an atomic type. +/// This needs to create a bit-pattern suitable for atomic operations. +void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const { + assert(LVal.isSimple()); + // If we have an r-value, the rvalue should be of the atomic type, + // which means that the caller is responsible for having zeroed + // any padding. Just do an aggregate copy of that type. + if (rvalue.isAggregate()) { + llvm_unreachable("NYI"); + return; + } + + // Okay, otherwise we're copying stuff. + + // Zero out the buffer if necessary. + emitMemSetZeroIfNecessary(); + + // Drill past the padding if present. + llvm_unreachable("NYI"); + + // Okay, store the rvalue in. + if (rvalue.isScalar()) { + llvm_unreachable("NYI"); + } else { + llvm_unreachable("NYI"); + } +} + +mlir::Value AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const { + // If we've got a scalar value of the right size, try to avoid going + // through memory. Floats get casted if needed by AtomicExpandPass. + if (auto Value = getScalarRValValueOrNull(RVal)) { + if (!shouldCastToInt(Value.getType(), CmpXchg)) { + return CGF.buildToMemory(Value, ValueTy); + } else { + llvm_unreachable("NYI"); + } + } + + llvm_unreachable("NYI"); +} + /// Emit a store to an l-value of atomic type. /// /// Note that the r-value is expected to be an r-value *of the atomic /// type*; this means that for aggregate r-values, it should include /// storage for any padding that was necessary. void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, - mlir::cir::MemOrder AO, bool IsVolatile, + mlir::cir::MemOrder MO, bool IsVolatile, bool isInit) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. + auto loc = dest.getPointer().getLoc(); assert(!rvalue.isAggregate() || rvalue.getAggregateAddress().getElementType() == dest.getAddress().getElementType()); - AtomicInfo atomics(*this, dest, dest.getPointer().getLoc()); + AtomicInfo atomics(*this, dest, loc); LValue LVal = atomics.getAtomicLValue(); // If this is an initialization, just put the value there normally. @@ -1142,9 +1260,59 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, llvm_unreachable("NYI"); } - llvm_unreachable("NYI"); + // Okay, we're doing this natively. + auto ValToStore = atomics.convertRValueToInt(rvalue); + + // Do the atomic store. + Address Addr = atomics.getAtomicAddress(); + if (auto Value = atomics.getScalarRValValueOrNull(rvalue)) + if (shouldCastToInt(Value.getType(), /*CmpXchg=*/false)) { + Addr = atomics.castToAtomicIntPointer(Addr); + ValToStore = builder.createIntCast(ValToStore, Addr.getElementType()); + } + auto store = builder.createStore(loc, ValToStore, Addr); + + if (MO == mlir::cir::MemOrder::Acquire) + MO = mlir::cir::MemOrder::Relaxed; // Monotonic + else if (MO == mlir::cir::MemOrder::AcquireRelease) + MO = mlir::cir::MemOrder::Release; + // Initializations don't need to be atomic. + if (!isInit) + store.setMemOrder(MO); + + // Other decoration. + if (IsVolatile) + store.setIsVolatile(true); + + // DecorateInstructionWithTBAA + assert(!UnimplementedFeature::tbaa()); return; } llvm_unreachable("NYI"); -} \ No newline at end of file +} + +void CIRGenFunction::buildAtomicInit(Expr *init, LValue dest) { + AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange())); + + switch (atomics.getEvaluationKind()) { + case TEK_Scalar: { + mlir::Value value = buildScalarExpr(init); + atomics.emitCopyIntoMemory(RValue::get(value)); + return; + } + + case TEK_Complex: { + llvm_unreachable("NYI"); + return; + } + + case TEK_Aggregate: { + // Fix up the destination if the initializer isn't an expression + // of atomic type. + llvm_unreachable("NYI"); + return; + } + } + llvm_unreachable("bad evaluation kind"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 0d823df5338b..2b68a13bc840 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1513,6 +1513,7 @@ class CIRGenFunction : public CIRGenTypeCache { void buildAtomicStore(RValue rvalue, LValue lvalue, bool isInit); void buildAtomicStore(RValue rvalue, LValue lvalue, mlir::cir::MemOrder MO, bool IsVolatile, bool isInit); + void buildAtomicInit(Expr *init, LValue dest); /// Return the address of a local variable. Address GetAddrOfLocalVar(const clang::VarDecl *VD) { diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index ec1b74caa0d2..3443e69a8fb3 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -752,6 +752,15 @@ LongDoubleType::verify(function_ref emitError, return success(); } +//===----------------------------------------------------------------------===// +// Floating-point type helpers +//===----------------------------------------------------------------------===// + +bool mlir::cir::isAnyFloatingPointType(mlir::Type t) { + return isa(t); +} + //===----------------------------------------------------------------------===// // FuncType Definitions //===----------------------------------------------------------------------===// From ad420a2f6c979b70aaa34a0021c4505ed518e407 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 8 May 2024 21:20:30 +0300 Subject: [PATCH 1560/2301] [CIR][CodeGen] Goto pass (#562) - Add new operations: `GotoOp` and `LabelOp` and inserts them in the codegen - Adds a pass that replaces `goto` operations with branches to the corresponded blocks (and erases `LabelOp` from CIR) - Update verifiers and tests --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 42 ++++ clang/include/clang/CIR/Dialect/Passes.h | 1 + clang/include/clang/CIR/Dialect/Passes.td | 10 + clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 16 -- clang/lib/CIR/CodeGen/CIRGenFunction.h | 7 - clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 30 +-- clang/lib/CIR/CodeGen/CIRPasses.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 31 +++ .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 8 +- .../lib/CIR/Dialect/Transforms/GotoSolver.cpp | 54 ++++ .../CIR/Dialect/Transforms/MergeCleanups.cpp | 3 + clang/test/CIR/CodeGen/goto.cpp | 232 +++++++++++++++++- clang/test/CIR/IR/invalid.cir | 11 + clang/test/CIR/Lowering/goto.cir | 80 +++--- clang/test/CIR/Lowering/region-simplify.cir | 38 +++ 16 files changed, 489 insertions(+), 77 deletions(-) create mode 100644 clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp create mode 100644 clang/test/CIR/Lowering/region-simplify.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 13cf81b09f2e..fe548ea7782c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3751,6 +3751,48 @@ def SwitchFlatOp : CIR_Op<"switch.flat", [AttrSizedOperandSegments, Terminator]> ]; } +//===----------------------------------------------------------------------===// +// GotoOp +//===----------------------------------------------------------------------===// + +def GotoOp : CIR_Op<"goto", [Terminator]> { + let description = [{ Transfers control to the specified label. + + Example: + ```C++ + void foo() { + goto exit; + + exit: + return; + } + ``` + + ```mlir + cir.func @foo() { + cir.goto "exit" + ^bb1: + cir.label "exit" + cir.return + } + ``` + }]; + let arguments = (ins StrAttr:$label); + let assemblyFormat = [{ $label attr-dict }]; +} + +//===----------------------------------------------------------------------===// +// LabelOp +//===----------------------------------------------------------------------===// + +// The LabelOp has AlwaysSpeculatable trait in order to not to be swept by canonicalizer +def LabelOp : CIR_Op<"label", [AlwaysSpeculatable]> { + let description = [{ An identifier which may be referred by cir.goto operation }]; + let arguments = (ins StrAttr:$label); + let assemblyFormat = [{ $label attr-dict }]; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // Atomic operations //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index 2f713240944f..30ec06114476 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -35,6 +35,7 @@ std::unique_ptr createIdiomRecognizerPass(clang::ASTContext *astCtx); std::unique_ptr createLibOptPass(); std::unique_ptr createLibOptPass(clang::ASTContext *astCtx); std::unique_ptr createFlattenCFGPass(); +std::unique_ptr createGotoSolverPass(); void populateCIRPreLoweringPasses(mlir::OpPassManager &pm); diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index e63b97469980..1253bccf77b8 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -89,6 +89,16 @@ def FlattenCFG : Pass<"cir-flatten-cfg"> { let dependentDialects = ["cir::CIRDialect"]; } +def GotoSolver : Pass<"cir-goto-solver"> { + let summary = "Replaces goto operatations with branches"; + let description = [{ + This pass transforms CIR and replaces goto-s with branch + operations to the proper blocks. + }]; + let constructor = "mlir::createGotoSolverPass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + def IdiomRecognizer : Pass<"cir-idiom-recognizer"> { let summary = "Raise calls to C/C++ libraries to CIR operations"; let description = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 8256bd6db71c..1f95be2f542d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -323,22 +323,6 @@ void CIRGenFunction::LexicalScope::cleanup() { auto &builder = CGF.builder; auto *localScope = CGF.currLexScope; - // Handle pending gotos and the solved labels in this scope. - while (!localScope->PendingGotos.empty()) { - auto gotoInfo = localScope->PendingGotos.back(); - // FIXME: Currently only support resolving goto labels inside the - // same lexical ecope. - assert(localScope->SolvedLabels.count(gotoInfo.second) && - "goto across scopes not yet supported"); - - // The goto in this lexical context actually maps to a basic - // block. - auto g = cast(gotoInfo.first); - g.setSuccessor(CGF.LabelMap[gotoInfo.second].getBlock()); - localScope->PendingGotos.pop_back(); - } - localScope->SolvedLabels.clear(); - auto applyCleanup = [&]() { if (PerformCleanup) { // ApplyDebugLocation diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 2b68a13bc840..75a9f4333c9d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1962,13 +1962,6 @@ class CIRGenFunction : public CIRGenTypeCache { return CleanupBlock; } - // Goto's introduced in this scope but didn't get fixed. - llvm::SmallVector, 4> - PendingGotos; - - // Labels solved inside this scope. - llvm::SmallPtrSet SolvedLabels; - // --- // Exception handling // --- diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index b83b8795f841..019df15e1ce4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -551,14 +551,19 @@ mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { // info support just yet, look at this again once we have it. assert(builder.getInsertionBlock() && "not yet implemented"); + mlir::Block *currBlock = builder.getBlock(); + mlir::Block *gotoBlock = currBlock; + if (!currBlock->empty() && + currBlock->back().hasTrait()) { + gotoBlock = builder.createBlock(builder.getBlock()->getParent()); + builder.setInsertionPointToEnd(gotoBlock); + } + // A goto marks the end of a block, create a new one for codegen after // buildGotoStmt can resume building in that block. - // Build a cir.br to the target label. - auto &JD = LabelMap[S.getLabel()]; - auto brOp = buildBranchThroughCleanup(getLoc(S.getSourceRange()), JD); - if (!JD.isValid()) - currLexScope->PendingGotos.push_back(std::make_pair(brOp, S.getLabel())); + builder.create(getLoc(S.getSourceRange()), + S.getLabel()->getName()); // Insert the new block to continue codegen after goto. builder.createBlock(builder.getBlock()->getParent()); @@ -568,31 +573,22 @@ mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { } mlir::LogicalResult CIRGenFunction::buildLabel(const LabelDecl *D) { - JumpDest &Dest = LabelMap[D]; - // Create a new block to tag with a label and add a branch from // the current one to it. If the block is empty just call attach it // to this label. mlir::Block *currBlock = builder.getBlock(); mlir::Block *labelBlock = currBlock; if (!currBlock->empty()) { - { mlir::OpBuilder::InsertionGuard guard(builder); labelBlock = builder.createBlock(builder.getBlock()->getParent()); } - builder.create(getLoc(D->getSourceRange()), labelBlock); - builder.setInsertionPointToEnd(labelBlock); } - if (!Dest.isValid()) { - Dest.Block = labelBlock; - currLexScope->SolvedLabels.insert(D); - // FIXME: add a label attribute to block... - } else { - assert(0 && "unimplemented"); - } + builder.setInsertionPointToEnd(labelBlock); + builder.create(getLoc(D->getSourceRange()), D->getName()); + builder.setInsertionPointToEnd(labelBlock); // FIXME: emit debug info for labels, incrementProfileCounter return mlir::success(); diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 7819d6db21ea..edffd66a9357 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -82,7 +82,7 @@ namespace mlir { void populateCIRPreLoweringPasses(OpPassManager &pm) { pm.addPass(createFlattenCFGPass()); - // add other passes here + pm.addPass(createGotoSolverPass()); } } // namespace mlir diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index e8660b59cc3e..51f81e1b8611 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -19,6 +19,7 @@ #include "llvm/Support/ErrorHandling.h" #include #include +#include #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" @@ -2174,6 +2175,24 @@ LogicalResult cir::FuncOp::verify() { << "' must have empty body"; } + std::set labels; + std::set gotos; + + getOperation()->walk([&](mlir::Operation *op) { + if (auto lab = dyn_cast(op)) { + labels.emplace(lab.getLabel()); + } else if (auto goTo = dyn_cast(op)) { + gotos.emplace(goTo.getLabel()); + } + }); + + std::vector mismatched; + std::set_difference(gotos.begin(), gotos.end(), labels.begin(), labels.end(), + std::back_inserter(mismatched)); + + if (!mismatched.empty()) + return emitOpError() << "goto/label mismatch"; + return success(); } @@ -3083,6 +3102,18 @@ LogicalResult BinOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// LabelOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult LabelOp::verify() { + auto *op = getOperation(); + auto *blk = op->getBlock(); + if (&blk->front() != op) + return emitError() << "must be the first operation in a block"; + return mlir::success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 8bd6a06b7c4e..647d15aea8dc 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -8,6 +8,7 @@ add_clang_library(MLIRCIRTransforms LibOpt.cpp StdHelpers.cpp FlattenCFG.cpp + GotoSolver.cpp DEPENDS MLIRCIRPassIncGen diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index ea1b413fc685..ce643e6735fa 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -224,9 +224,11 @@ class CIRLoopOpInterfaceFlattening }); // Lower optional body region yield. - auto bodyYield = dyn_cast(body->getTerminator()); - if (bodyYield) - lowerTerminator(bodyYield, (step ? step : cond), rewriter); + for (auto &blk : op.getBody().getBlocks()) { + auto bodyYield = dyn_cast(blk.getTerminator()); + if (bodyYield) + lowerTerminator(bodyYield, (step ? step : cond), rewriter); + } // Lower mandatory step region yield. if (step) diff --git a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp new file mode 100644 index 000000000000..34eb488b732c --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp @@ -0,0 +1,54 @@ +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/DialectConversion.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" + +using namespace mlir; +using namespace mlir::cir; + +namespace { + +struct GotoSolverPass : public GotoSolverBase { + + GotoSolverPass() = default; + void runOnOperation() override; +}; + +static void process(mlir::cir::FuncOp func) { + + mlir::OpBuilder rewriter(func.getContext()); + std::map labels; + std::vector gotos; + + func.getBody().walk([&](mlir::Operation *op) { + if (auto lab = dyn_cast(op)) { + labels.emplace(lab.getLabel().str(), lab->getBlock()); + lab.erase(); + } else if (auto goTo = dyn_cast(op)) { + gotos.push_back(goTo); + } + }); + + for (auto goTo : gotos) { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(goTo); + auto dest = labels[goTo.getLabel().str()]; + rewriter.create(goTo.getLoc(), dest); + goTo.erase(); + } +} + +void GotoSolverPass::runOnOperation() { + SmallVector ops; + getOperation()->walk([&](mlir::cir::FuncOp op) { process(op); }); +} + +} // namespace + +std::unique_ptr mlir::createGotoSolverPass() { + return std::make_unique(); +} \ No newline at end of file diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index e4848a21d0bd..05b951c01ad2 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -42,6 +42,9 @@ struct RemoveRedudantBranches : public OpRewritePattern { Block *block = op.getOperation()->getBlock(); Block *dest = op.getDest(); + if (isa(dest->front())) + return failure(); + // Single edge between blocks: merge it. if (block->getNumSuccessors() == 1 && dest->getSinglePredecessor() == block) { diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 204b00303fca..dc36517863ef 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -1,5 +1,8 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat %s -o %t1.cir +// RUN: FileCheck --input-file=%t1.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t2.cir +// RUN: FileCheck --input-file=%t2.cir %s -check-prefix=NOFLAT + void g0(int a) { int b = a; @@ -64,3 +67,228 @@ int g2() { // CHECK: [[R:%[0-9]+]] = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: [[R]] : !s32i // CHECK-NEXT: } + + +int shouldNotGenBranchRet(int x) { + if (x > 5) + goto err; + return 0; +err: + return -1; +} +// NOFLAT: cir.func @_Z21shouldNotGenBranchReti +// NOFLAT: cir.if %8 { +// NOFLAT: cir.goto "err" +// NOFLAT: } +// NOFLAT: ^bb1: +// NOFLAT: %3 = cir.load %1 : !cir.ptr, !s32i +// NOFLAT: cir.return %3 : !s32i +// NOFLAT: ^bb2: // no predecessors +// NOFLAT: cir.label "err" + +int shouldGenBranch(int x) { + if (x > 5) + goto err; + x++; +err: + return -1; +} +// NOFLAT: cir.func @_Z15shouldGenBranchi +// NOFLAT: cir.if %9 { +// NOFLAT: cir.goto "err" +// NOFLAT: } +// NOFLAT: cir.br ^bb1 +// NOFLAT: ^bb1: +// NOFLAT: cir.label "err" + +int shouldCreateBlkForGoto(int a) { + switch (a) { + case(42): + break; + goto exit; + default: + return 0; + }; + +exit: + return -1; + +} +// NOFLAT: cir.func @_Z22shouldCreateBlkForGotoi +// NOFLAT: case (equal, 42) { +// NOFLAT: cir.break +// NOFLAT: ^bb1: // no predecessors +// NOFLAT: cir.goto "exit" +// NOFLAT: } + +void severalLabelsInARow(int a) { + int b = a; + goto end1; + b = b + 1; + goto end2; +end1: +end2: + b = b + 2; +} +// NOFLAT: cir.func @_Z19severalLabelsInARowi +// NOFLAT: ^bb[[#BLK1:]]: +// NOFLAT: cir.label "end1" +// NOFLAT: cir.br ^bb[[#BLK2:]] +// NOFLAT: ^bb[[#BLK2]]: +// NOFLAT: cir.label "end2" + +void severalGotosInARow(int a) { + int b = a; + goto end; + goto end; +end: + b = b + 2; +} +// NOFLAT: cir.func @_Z18severalGotosInARowi +// NOFLAT: cir.goto "end" +// NOFLAT: ^bb[[#BLK1:]]: +// NOFLAT: cir.goto "end" +// NOFLAT: ^bb[[#BLK2:]]: +// NOFLAT: cir.label "end" + + +void labelWithoutMatch() { +end: + return; +} +// NOFLAT: cir.func @_Z17labelWithoutMatchv() +// NOFLAT: cir.label "end" +// NOFLAT: cir.return +// NOFLAT: } + + +int jumpIntoLoop(int* ar) { + + if (ar) + goto label; + return -1; + + while (ar) { + label: + ++ar; + } + + return 0; +} + +// CHECK: cir.func @_Z12jumpIntoLoopPi +// CHECK: cir.brcond {{.*}} ^bb[[#BLK2:]], ^bb[[#BLK3:]] +// CHECK: ^bb[[#BLK2]]: +// CHECK: cir.br ^bb[[#BODY:]] +// CHECK: ^bb[[#BLK3]]: +// CHECK: cir.br ^bb[[#BLK4:]] +// CHECK: ^bb[[#BLK4]]: +// CHECK: cir.br ^bb[[#RETURN:]] +// CHECK: ^bb[[#RETURN]]: +// CHECK: cir.return +// CHECK: ^bb[[#BLK5:]]: +// CHECK: cir.br ^bb[[#BLK6:]] +// CHECK: ^bb[[#BLK6]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond {{.*}} ^bb[[#BODY]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.br ^bb[[#BLK7:]] +// CHECK: ^bb[[#BLK7]]: +// CHECK: cir.br ^bb[[#RETURN]] + + + +int jumpFromLoop(int* ar) { + + if (!ar) { +err: + return -1; +} + + while (ar) { + if (*ar == 42) + goto err; + ++ar; + } + + return 0; +} +// CHECK: cir.func @_Z12jumpFromLoopPi +// CHECK: cir.brcond {{.*}} ^bb[[#RETURN1:]], ^bb[[#BLK3:]] +// CHECK: ^bb[[#RETURN1]]: +// CHECK: cir.return +// CHECK: ^bb[[#BLK3]]: +// CHECK: cir.br ^bb[[#BLK4:]] +// CHECK: ^bb[[#BLK4]]: +// CHECK: cir.br ^bb[[#BLK5:]] +// CHECK: ^bb[[#BLK5]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond {{.*}} ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#IF42:]] +// CHECK: ^bb[[#IF42]]: +// CHECK: cir.brcond {{.*}} ^bb[[#IF42TRUE:]], ^bb[[#IF42FALSE:]] +// CHECK: ^bb[[#IF42TRUE]]: +// CHECK: cir.br ^bb[[#RETURN1]] +// CHECK: ^bb[[#IF42FALSE]]: +// CHECK: cir.br ^bb[[#BLK11:]] +// CHECK: ^bb[[#BLK11]]: +// CHECK: cir.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.br ^bb[[#RETURN2:]] +// CHECK: ^bb[[#RETURN2]]: +// CHECK: cir.return + + +void flatLoopWithNoTerminatorInFront(int* ptr) { + + if (ptr) + goto loop; + + do { + if (!ptr) + goto end; + loop: + ptr++; + } while(ptr); + + end: + ; +} + +// CHECK: cir.func @_Z31flatLoopWithNoTerminatorInFrontPi +// CHECK: cir.brcond {{.*}} ^bb[[#BLK2:]], ^bb[[#BLK3:]] +// CHECK: ^bb[[#BLK2]]: +// CHECK: cir.br ^bb[[#LABEL_LOOP:]] +// CHECK: ^bb[[#BLK3]]: +// CHECK: cir.br ^bb[[#BLK4:]] +// CHECK: ^bb[[#BLK4]]: +// CHECK: cir.br ^bb[[#BLK5:]] +// CHECK: ^bb[[#BLK5]]: +// CHECK: cir.br ^bb[[#BODY:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond {{.*}} ^bb[[#BODY]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#BLK8:]] +// CHECK: ^bb[[#BLK8]]: +// CHECK: cir.brcond {{.*}} ^bb[[#BLK9:]], ^bb[[#BLK10:]] +// CHECK: ^bb[[#BLK9]]: +// CHECK: cir.br ^bb[[#RETURN:]] +// CHECK: ^bb[[#BLK10]]: +// CHECK: cir.br ^bb[[#BLK11:]] +// CHECK: ^bb[[#BLK11]]: +// CHECK: cir.br ^bb[[#LABEL_LOOP]] +// CHECK: ^bb[[#LABEL_LOOP]]: +// CHECK: cir.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.br ^bb[[#BLK14:]] +// CHECK: ^bb[[#BLK14]]: +// CHECK: cir.br ^bb[[#RETURN]] +// CHECK: ^bb[[#RETURN]]: +// CHECK: cir.return +// CHECK: } +// CHECK:} \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 6128fa42b824..2ed718d6176f 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1107,3 +1107,14 @@ module { %0 = cir.dyn_cast(ptr, %arg0 : !cir.ptr, #cir.dyn_cast_info<#cir.global_view<@_ZTI4Base> : !cir.ptr, #cir.global_view<@_ZTI7Derived> : !cir.ptr, @__dynamic_cast, @__cxa_bad_cast, #cir.int<0> : !s64i>) -> !cir.ptr } } + + +// ----- + +// expected-error@+1 {{goto/label mismatch}} +cir.func @bad_goto() -> () { + cir.goto "somewhere" +^bb1: + cir.label "label" + cir.return +} diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index 271666744d6c..a7b02c0fe875 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -1,36 +1,54 @@ -// RUN: cir-opt %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-opt %s -canonicalize -o - | cir-translate -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s --pass-pipeline='builtin.module(cir-to-llvm,canonicalize{region-simplify=false})' -o - | FileCheck %s -check-prefix=MLIR +// XFAIL: * -!u32i = !cir.int +!s32i = !cir.int module { - cir.func @foo() { - %0 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} - %1 = cir.const #cir.int<1> : !u32i - cir.store %1, %0 : !u32i, !cir.ptr - cir.br ^bb2 - ^bb1: // no predecessors - %2 = cir.load %0 : !cir.ptr, !u32i - %3 = cir.const #cir.int<1> : !u32i - %4 = cir.binop(add, %2, %3) : !u32i - cir.store %4, %0 : !u32i, !cir.ptr - cir.br ^bb2 - ^bb2: // 2 preds: ^bb0, ^bb1 - %5 = cir.load %0 : !cir.ptr, !u32i - %6 = cir.const #cir.int<2> : !u32i - %7 = cir.binop(add, %5, %6) : !u32i - cir.store %7, %0 : !u32i, !cir.ptr - cir.return - } -} -// MLIR: module { -// MLIR-NEXT: llvm.func @foo -// MLIR: llvm.br ^bb1 -// MLIR: ^bb1: -// MLIR: return + cir.func @gotoFromIf(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + cir.scope { + %6 = cir.load %0 : !cir.ptr, !s32i + %7 = cir.const #cir.int<5> : !s32i + %8 = cir.cmp(gt, %6, %7) : !s32i, !s32i + %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool + cir.if %9 { + cir.goto "err" + } + } + %2 = cir.const #cir.int<0> : !s32i + cir.store %2, %1 : !s32i, !cir.ptr + cir.br ^bb1 + ^bb1: + %3 = cir.load %1 : !cir.ptr, !s32i + cir.return %3 : !s32i + ^bb2: + cir.label "err" + %4 = cir.const #cir.int<1> : !s32i + %5 = cir.unary(minus, %4) : !s32i, !s32i + cir.store %5, %1 : !s32i, !cir.ptr + cir.br ^bb1 + } -// LLVM: br label %[[Value:[0-9]+]] -// LLVM-EMPTY: -// LLVM-NEXT: [[Value]]: ; preds = -// LLVM: ret void +// MLIR: llvm.func @gotoFromIf +// MLIR: %[[#One:]] = llvm.mlir.constant(1 : i32) : i32 +// MLIR: %[[#Zero:]] = llvm.mlir.constant(0 : i32) : i32 +// MLIR: llvm.cond_br {{.*}}, ^bb[[#COND_YES:]], ^bb[[#COND_NO:]] +// MLIR: ^bb[[#COND_YES]]: +// MLIR: llvm.br ^bb[[#GOTO_BLK:]] +// MLIR: ^bb[[#COND_NO]]: +// MLIR: llvm.br ^bb[[#BLK:]] +// MLIR: ^bb[[#BLK]]: +// MLIR: llvm.store %[[#Zero]], %[[#Ret_val_addr:]] : i32, !llvm.ptr +// MLIR: llvm.br ^bb[[#RETURN:]] +// MLIR: ^bb[[#RETURN]]: +// MLIR: %[[#Ret_val:]] = llvm.load %[[#Ret_val_addr]] : !llvm.ptr -> i32 +// MLIR: llvm.return %[[#Ret_val]] : i32 +// MLIR: ^bb[[#GOTO_BLK]]: +// MLIR: %[[#Neg_one:]] = llvm.sub %[[#Zero]], %[[#One]] : i32 +// MLIR: llvm.store %[[#Neg_one]], %[[#Ret_val_addr]] : i32, !llvm.ptr +// MLIR: llvm.br ^bb[[#RETURN]] +// MLIR: } +} diff --git a/clang/test/CIR/Lowering/region-simplify.cir b/clang/test/CIR/Lowering/region-simplify.cir new file mode 100644 index 000000000000..5f32205cb032 --- /dev/null +++ b/clang/test/CIR/Lowering/region-simplify.cir @@ -0,0 +1,38 @@ +// RUN: cir-opt %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -canonicalize -o - | cir-translate -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!u32i = !cir.int + +module { + cir.func @foo() { + %0 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} + %1 = cir.const #cir.int<1> : !u32i + cir.store %1, %0 : !u32i, !cir.ptr + cir.br ^bb2 + ^bb1: // no predecessors + %2 = cir.load %0 : !cir.ptr, !u32i + %3 = cir.const #cir.int<1> : !u32i + %4 = cir.binop(add, %2, %3) : !u32i + cir.store %4, %0 : !u32i, !cir.ptr + cir.br ^bb2 + ^bb2: // 2 preds: ^bb0, ^bb1 + %5 = cir.load %0 : !cir.ptr, !u32i + %6 = cir.const #cir.int<2> : !u32i + %7 = cir.binop(add, %5, %6) : !u32i + cir.store %7, %0 : !u32i, !cir.ptr + cir.return + } + + // MLIR: module { +// MLIR-NEXT: llvm.func @foo +// MLIR: llvm.br ^bb1 +// MLIR: ^bb1: +// MLIR: return + +// LLVM: br label %[[Value:[0-9]+]] +// LLVM-EMPTY: +// LLVM-NEXT: [[Value]]: ; preds = +// LLVM: ret void + + +} \ No newline at end of file From 00f42e2ac472265dba2f84db78fcec3dd11c1634 Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 9 May 2024 02:41:46 +0800 Subject: [PATCH 1561/2301] [CIR][Transforms] Simplify redundant bitcasts (#591) Fix #479 . There are three available stages to place the simplification in. * A straightforward method is to extend `fold` method for CastOp. But CIR does not use CanonicalizerPass, so it does not work. * As for somehow equivalent to it, append a pattern to `MergeCleanupsPass`. But now it is mainly for CFG-related simplifications like block merging. I don't know if this is the proper way. Shall we rename it to a broader definition? * Add a new pass for this issue. This is definitely not very reasonable XD. We won't consider it unless we're really out of options. This PR includes the second option. What do you think @bcardosolopes ? --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 25 ++++++++++------ .../CIR/Dialect/Transforms/MergeCleanups.cpp | 8 +++-- clang/test/CIR/CodeGen/no-proto-fun-ptr.c | 3 +- clang/test/CIR/CodeGen/no-prototype.c | 3 +- clang/test/CIR/CodeGen/unary.c | 30 ++++++++++++++----- clang/test/CIR/Transforms/merge-cleanups.cir | 9 ++++++ 6 files changed, 55 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 51f81e1b8611..37393f1e5350 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -502,17 +502,24 @@ LogicalResult CastOp::verify() { } OpFoldResult CastOp::fold(FoldAdaptor adaptor) { - if (getKind() != mlir::cir::CastKind::integral) - return {}; if (getSrc().getType() != getResult().getType()) return {}; - // TODO: for sign differences, it's possible in certain conditions to - // create a new attributes that's capable or representing the source. - SmallVector foldResults; - auto foldOrder = getSrc().getDefiningOp()->fold(foldResults); - if (foldOrder.succeeded() && foldResults[0].is()) - return foldResults[0].get(); - return {}; + switch (getKind()) { + case mlir::cir::CastKind::integral: { + // TODO: for sign differences, it's possible in certain conditions to + // create a new attribute that's capable of representing the source. + SmallVector foldResults; + auto foldOrder = getSrc().getDefiningOp()->fold(foldResults); + if (foldOrder.succeeded() && foldResults[0].is()) + return foldResults[0].get(); + return {}; + } + case mlir::cir::CastKind::bitcast: { + return getSrc(); + } + default: + return {}; + } } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index 05b951c01ad2..106065c6b6e3 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -34,7 +34,7 @@ namespace { /// To: /// ^bb0: /// cir.return -struct RemoveRedudantBranches : public OpRewritePattern { +struct RemoveRedundantBranches : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(BrOp op, @@ -104,7 +104,7 @@ struct MergeCleanupsPass : public MergeCleanupsBase { void populateMergeCleanupPatterns(RewritePatternSet &patterns) { // clang-format off patterns.add< - RemoveRedudantBranches, + RemoveRedundantBranches, RemoveEmptyScope, RemoveEmptySwitch >(patterns.getContext()); @@ -119,7 +119,9 @@ void MergeCleanupsPass::runOnOperation() { // Collect operations to apply patterns. SmallVector ops; getOperation()->walk([&](Operation *op) { - if (isa(op)) + // CastOp here is to perform a manual `fold` in + // applyOpPatternsAndFold + if (isa(op)) ops.push_back(op); }); diff --git a/clang/test/CIR/CodeGen/no-proto-fun-ptr.c b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c index a399f9b1f9c8..b4d92db11963 100644 --- a/clang/test/CIR/CodeGen/no-proto-fun-ptr.c +++ b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c @@ -9,8 +9,7 @@ void check_noproto_ptr() { // CHECK: cir.func no_proto @check_noproto_ptr() // CHECK: [[ALLOC:%.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["fun", init] {alignment = 8 : i64} // CHECK: [[GGO:%.*]] = cir.get_global @empty : !cir.ptr> -// CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> -// CHECK: cir.store [[CAST]], [[ALLOC]] : !cir.ptr>, !cir.ptr>> +// CHECK: cir.store [[GGO]], [[ALLOC]] : !cir.ptr>, !cir.ptr>> // CHECK: cir.return void empty(void) {} diff --git a/clang/test/CIR/CodeGen/no-prototype.c b/clang/test/CIR/CodeGen/no-prototype.c index f13b3d7a9676..c119304ce54d 100644 --- a/clang/test/CIR/CodeGen/no-prototype.c +++ b/clang/test/CIR/CodeGen/no-prototype.c @@ -36,8 +36,7 @@ int noProto2(); int test2(int x) { return noProto2(x); // CHECK: [[GGO:%.*]] = cir.get_global @noProto2 : !cir.ptr> - // CHECK: [[CAST:%.*]] = cir.cast(bitcast, %3 : !cir.ptr>), !cir.ptr> - // CHECK: {{.*}} = cir.call [[CAST]](%{{[0-9]+}}) : (!cir.ptr>, !s32i) -> !s32i + // CHECK: {{.*}} = cir.call [[GGO]](%{{[0-9]+}}) : (!cir.ptr>, !s32i) -> !s32i } int noProto2(int x) { return x; } // CHECK: cir.func no_proto @noProto2(%arg0: !s32i {{.+}}) -> !s32i diff --git a/clang/test/CIR/CodeGen/unary.c b/clang/test/CIR/CodeGen/unary.c index d0ff62201343..63f355c09f45 100644 --- a/clang/test/CIR/CodeGen/unary.c +++ b/clang/test/CIR/CodeGen/unary.c @@ -1,26 +1,42 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -void valueNegation(int i, short s, long l, float f, double d) { -// CHECK: cir.func @valueNegation( - !i; +int valueNegationInt(int i) { +// CHECK: cir.func @valueNegationInt( + return !i; // CHECK: %[[#INT:]] = cir.load %{{[0-9]+}} : !cir.ptr, !s32i // CHECK: %[[#INT_TO_BOOL:]] = cir.cast(int_to_bool, %[[#INT]] : !s32i), !cir.bool // CHECK: = cir.unary(not, %[[#INT_TO_BOOL]]) : !cir.bool, !cir.bool - !s; +} + +short valueNegationShort(short s) { +// CHECK: cir.func @valueNegationShort( + return !s; // CHECK: %[[#SHORT:]] = cir.load %{{[0-9]+}} : !cir.ptr, !s16i // CHECK: %[[#SHORT_TO_BOOL:]] = cir.cast(int_to_bool, %[[#SHORT]] : !s16i), !cir.bool // CHECK: = cir.unary(not, %[[#SHORT_TO_BOOL]]) : !cir.bool, !cir.bool - !l; +} + +long valueNegationLong(long l) { +// CHECK: cir.func @valueNegationLong( + return !l; // CHECK: %[[#LONG:]] = cir.load %{{[0-9]+}} : !cir.ptr, !s64i // CHECK: %[[#LONG_TO_BOOL:]] = cir.cast(int_to_bool, %[[#LONG]] : !s64i), !cir.bool // CHECK: = cir.unary(not, %[[#LONG_TO_BOOL]]) : !cir.bool, !cir.bool - !f; +} + +float valueNegationFloat(float f) { +// CHECK: cir.func @valueNegationFloat( + return !f; // CHECK: %[[#FLOAT:]] = cir.load %{{[0-9]+}} : !cir.ptr, !cir.float // CHECK: %[[#FLOAT_TO_BOOL:]] = cir.cast(float_to_bool, %[[#FLOAT]] : !cir.float), !cir.bool // CHECK: %[[#FLOAT_NOT:]] = cir.unary(not, %[[#FLOAT_TO_BOOL]]) : !cir.bool, !cir.bool // CHECK: = cir.cast(bool_to_int, %[[#FLOAT_NOT]] : !cir.bool), !s32i - !d; +} + +double valueNegationDouble(double d) { +// CHECK: cir.func @valueNegationDouble( + return !d; // CHECK: %[[#DOUBLE:]] = cir.load %{{[0-9]+}} : !cir.ptr, !cir.double // CHECK: %[[#DOUBLE_TO_BOOL:]] = cir.cast(float_to_bool, %[[#DOUBLE]] : !cir.double), !cir.bool // CHECK: %[[#DOUBLE_NOT:]] = cir.unary(not, %[[#DOUBLE_TO_BOOL]]) : !cir.bool, !cir.bool diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 5cb52d15c686..f6def4d34107 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -117,4 +117,13 @@ module { // CHECK: cir.return } + // Should remove redundant bitcasts. + // CHECK-LABEL: @ptrbitcastfold + // CHECK: %[[ARG0:.+]]: !cir.ptr + // CHECK: cir.return %[[ARG0]] : !cir.ptr + cir.func @ptrbitcastfold(%arg0: !cir.ptr) -> !cir.ptr { + %0 = cir.cast(bitcast, %arg0: !cir.ptr), !cir.ptr + cir.return %0 : !cir.ptr + } + } From 75ae14debeab041df8cf71c0b079cfeb54351f20 Mon Sep 17 00:00:00 2001 From: GaoXiangYa <168072492+GaoXiangYa@users.noreply.github.com> Date: Thu, 9 May 2024 03:01:27 +0800 Subject: [PATCH 1562/2301] [CIR][Lowering] Add MLIR lowering support for CIR sin operations (#586) This PR add cir.sin lowering to MLIR math dialect. In the future, I will submit a PR to lowering cir.floor, cir.fabs and other operations to MLIR. --------- Co-authored-by: Gao Xiang --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 29 +++++++++++++----- clang/test/CIR/Lowering/ThroughMLIR/sin.cir | 30 +++++++++++++++++++ 2 files changed, 51 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/sin.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index b0120cf224a4..e5797c0789ef 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -32,6 +32,7 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" +#include "mlir/Support/LogicalResult.h" #include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" @@ -208,6 +209,18 @@ class CIRCosOpLowering : public mlir::OpConversionPattern { } }; +class CIRSinOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::SinOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + class CIRConstantOpLowering : public mlir::OpConversionPattern { public: @@ -987,14 +1000,14 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns - .add( - converter, patterns.getContext()); + patterns.add(converter, + patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/sin.cir b/clang/test/CIR/Lowering/ThroughMLIR/sin.cir new file mode 100644 index 000000000000..c433b52e105c --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/sin.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +module { + cir.func @foo() { + %1 = cir.const #cir.fp<1.0> : !cir.float + %2 = cir.const #cir.fp<1.0> : !cir.double + %3 = cir.const #cir.fp<1.0> : !cir.long_double + %4 = cir.const #cir.fp<1.0> : !cir.long_double + %5 = cir.sin %1 : !cir.float + %6 = cir.sin %2 : !cir.double + %7 = cir.sin %3 : !cir.long_double + %8 = cir.sin %4 : !cir.long_double + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %[[C0:.+]] = arith.constant 1.000000e+00 : f32 +// CHECK-NEXT: %[[C1:.+]] = arith.constant 1.000000e+00 : f64 +// CHECK-NEXT: %[[C2:.+]] = arith.constant 1.000000e+00 : f80 +// CHECK-NEXT: %[[C3:.+]] = arith.constant 1.000000e+00 : f64 +// CHECK-NEXT: %{{.+}} = math.sin %[[C0]] : f32 +// CHECK-NEXT: %{{.+}} = math.sin %[[C1]] : f64 +// CHECK-NEXT: %{{.+}} = math.sin %[[C2]] : f80 +// CHECK-NEXT: %{{.+}} = math.sin %[[C3]] : f64 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } From c59ff97f8378aa4ff7db120ce506a65916638c12 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 8 May 2024 12:44:06 -0700 Subject: [PATCH 1563/2301] [CIR][CIRGen] Atomics: add initial support for c11 init --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 6 +++--- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 19 ++++++++++++----- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 +++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 +- .../CodeGen/UnimplementedFeatureGuarding.h | 2 ++ clang/test/CIR/CodeGen/atomic.cpp | 21 +++++++++++++------ 6 files changed, 38 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index ad226068aa0d..59ad4177d785 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -1206,11 +1206,11 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const { emitMemSetZeroIfNecessary(); // Drill past the padding if present. - llvm_unreachable("NYI"); + LValue TempLVal = projectValue(); // Okay, store the rvalue in. if (rvalue.isScalar()) { - llvm_unreachable("NYI"); + CGF.buildStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true); } else { llvm_unreachable("NYI"); } @@ -1251,7 +1251,7 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, // If this is an initialization, just put the value there normally. if (LVal.isSimple()) { if (isInit) { - llvm_unreachable("NYI"); + atomics.emitCopyIntoMemory(rvalue); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 7200d8949f4c..d1abfeeeb8c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -677,11 +677,20 @@ void CIRGenFunction::buildNullabilityCheck(LValue LHS, mlir::Value RHS, void CIRGenFunction::buildScalarInit(const Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit) { - // TODO: this is where a lot of ObjC lifetime stuff would be done. - SourceLocRAIIObject Loc{*this, loc}; - mlir::Value value = buildScalarExpr(init); - buildStoreThroughLValue(RValue::get(value), lvalue); - return; + Qualifiers::ObjCLifetime lifetime = Qualifiers::ObjCLifetime::OCL_None; + assert(!UnimplementedFeature::objCLifetime()); + + if (!lifetime) { + SourceLocRAIIObject Loc{*this, loc}; + mlir::Value value = buildScalarExpr(init); + if (capturedByInit) + llvm_unreachable("NYI"); + assert(!UnimplementedFeature::emitNullabilityCheck()); + buildStoreThroughLValue(RValue::get(value), lvalue, true); + return; + } + + llvm_unreachable("NYI"); } void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index dded51b6488a..9a0661493be9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -668,7 +668,8 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, return RValue::get(field); } -void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { +void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, + bool isInit) { if (!Dst.isSimple()) { if (Dst.isVectorElt()) { // Read/modify/write the vector, inserting the new element @@ -699,7 +700,7 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { } assert(Src.isScalar() && "Can't emit an agg store with this method"); - buildStoreOfScalar(Src.getScalarVal(), Dst); + buildStoreOfScalar(Src.getScalarVal(), Dst, isInit); } void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 75a9f4333c9d..cf029bad1181 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1181,7 +1181,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. - void buildStoreThroughLValue(RValue Src, LValue Dst); + void buildStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false); void buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, mlir::Value &Result); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 2bb5f13a3d3a..3c4bcec93ec6 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -22,6 +22,7 @@ struct UnimplementedFeature { static bool buildTypeCheck() { return false; } static bool tbaa() { return false; } static bool cleanups() { return false; } + static bool emitNullabilityCheck() { return false; } // GNU vectors are done, but other kinds of vectors haven't been implemented. static bool scalableVectors() { return false; } @@ -59,6 +60,7 @@ struct UnimplementedFeature { // ObjC static bool setObjCGCLValueClass() { return false; } + static bool objCLifetime() { return false; } // Debug info static bool generateDebugInfo() { return false; } diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 66db5b728aad..9d935d978fe7 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -309,9 +309,18 @@ bool fsb(bool *c) { // LLVM-LABEL: @_Z3fsbPb // LLVM: atomicrmw xchg ptr {{.*}}, i8 {{.*}} seq_cst, align 1 -// FIXME: crashes -// void atomicinit(void) -// { -// _Atomic(unsigned int) j = 12; -// __c11_atomic_init(&j, 1); -// } \ No newline at end of file +void atomicinit(void) +{ + _Atomic(unsigned int) j = 12; + __c11_atomic_init(&j, 1); +} + +// CHECK-LABEL: @_Z10atomicinitv +// CHECK: %[[ADDR:.*]] = cir.alloca !u32i, !cir.ptr, ["j" +// CHECK: cir.store {{.*}}, %[[ADDR]] : !u32i, !cir.ptr +// CHECK: cir.store {{.*}}, %[[ADDR]] : !u32i, !cir.ptr + +// LLVM-LABEL: @_Z10atomicinitv +// LLVM: %[[ADDR:.*]] = alloca i32, i64 1, align 4 +// LLVM: store i32 12, ptr %[[ADDR]], align 4 +// LLVM: store i32 1, ptr %[[ADDR]], align 4 \ No newline at end of file From 0b54a3f7cf38c1d1aac487714abf9cbd6fcb7591 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 8 May 2024 14:33:34 -0700 Subject: [PATCH 1564/2301] [CIR][CIRGen] Add most simple form of __c11_atomic_fetch_{add,sub} --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 4 +++- clang/test/CIR/CodeGen/atomic.cpp | 16 +++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 59ad4177d785..95d3b28682b4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -801,7 +801,9 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__hip_atomic_fetch_sub: case AtomicExpr::AO__opencl_atomic_fetch_add: case AtomicExpr::AO__opencl_atomic_fetch_sub: - llvm_unreachable("NYI"); + if (MemTy->isPointerType()) { + llvm_unreachable("NYI"); + } [[fallthrough]]; case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_max: diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 9d935d978fe7..3a9b60d8facf 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -323,4 +323,18 @@ void atomicinit(void) // LLVM-LABEL: @_Z10atomicinitv // LLVM: %[[ADDR:.*]] = alloca i32, i64 1, align 4 // LLVM: store i32 12, ptr %[[ADDR]], align 4 -// LLVM: store i32 1, ptr %[[ADDR]], align 4 \ No newline at end of file +// LLVM: store i32 1, ptr %[[ADDR]], align 4 + +void incdec() { + _Atomic(unsigned int) j = 12; + __c11_atomic_fetch_add(&j, 1, 0); + __c11_atomic_fetch_sub(&j, 1, 0); +} + +// CHECK-LABEL: @_Z6incdecv +// CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !u32i, relaxed) fetch_first +// CHECK: cir.atomic.fetch(sub, {{.*}} : !cir.ptr, {{.*}} : !u32i, relaxed) fetch_first + +// LLVM-LABEL: @_Z6incdecv +// LLVM: atomicrmw add ptr {{.*}}, i32 {{.*}} monotonic, align 4 +// LLVM: atomicrmw sub ptr {{.*}}, i32 {{.*}} monotonic, align 4 \ No newline at end of file From a84dc84a55a0336819c5e7053c48dc2af75014b6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 8 May 2024 19:00:25 -0700 Subject: [PATCH 1565/2301] [CIR][CIRGen][NFC] Cleanup buildIfOnBoolExpr --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 63 ++++++++++++---------- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 10 ++-- 3 files changed, 42 insertions(+), 33 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 9a0661493be9..01dfa5abe2b4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2113,8 +2113,7 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, auto *trueExpr = E->getTrueExpr(); auto *falseExpr = E->getFalseExpr(); - mlir::Value condV = - CGF.buildOpOnBoolExpr(E->getCond(), loc, trueExpr, falseExpr); + mlir::Value condV = CGF.buildOpOnBoolExpr(loc, E->getCond()); SmallVector insertPoints{}; mlir::Type yieldTy{}; @@ -2354,54 +2353,64 @@ bool CIRGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, const Stmt *thenS, const Stmt *elseS) { + // Attempt to be more accurate as possible with IfOp location, generate + // one fused location that has either 2 or 4 total locations, depending + // on else's availability. auto getStmtLoc = [this](const Stmt &s) { return mlir::FusedLoc::get(builder.getContext(), {getLoc(s.getSourceRange().getBegin()), getLoc(s.getSourceRange().getEnd())}); }; - auto thenLoc = getStmtLoc(*thenS); std::optional elseLoc; - SmallVector ifLocs{thenLoc}; - - if (elseS) { + if (elseS) elseLoc = getStmtLoc(*elseS); - ifLocs.push_back(*elseLoc); - } - // Attempt to be more accurate as possible with IfOp location, generate - // one fused location that has either 2 or 4 total locations, depending - // on else's availability. - auto loc = mlir::FusedLoc::get(builder.getContext(), ifLocs); - - // Emit the code with the fully general case. - mlir::Value condV = buildOpOnBoolExpr(cond, loc, thenS, elseS); mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); - - builder.create( - loc, condV, elseS, - /*thenBuilder=*/ + buildIfOnBoolExpr( + cond, /*thenBuilder=*/ [&](mlir::OpBuilder &, mlir::Location) { LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()}; resThen = buildStmt(thenS, /*useCurrentScope=*/true); }, + thenLoc, /*elseBuilder=*/ [&](mlir::OpBuilder &, mlir::Location) { assert(elseLoc && "Invalid location for elseS."); LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()}; resElse = buildStmt(elseS, /*useCurrentScope=*/true); - }); + }, + elseLoc); return mlir::LogicalResult::success(resThen.succeeded() && resElse.succeeded()); } +/// Emit an `if` on a boolean condition, filling `then` and `else` into +/// appropriated regions. +mlir::cir::IfOp CIRGenFunction::buildIfOnBoolExpr( + const clang::Expr *cond, + llvm::function_ref thenBuilder, + mlir::Location thenLoc, + llvm::function_ref elseBuilder, + std::optional elseLoc) { + + SmallVector ifLocs{thenLoc}; + if (elseLoc) + ifLocs.push_back(*elseLoc); + auto loc = mlir::FusedLoc::get(builder.getContext(), ifLocs); + + // Emit the code with the fully general case. + mlir::Value condV = buildOpOnBoolExpr(loc, cond); + return builder.create(loc, condV, elseLoc.has_value(), + /*thenBuilder=*/thenBuilder, + /*elseBuilder=*/elseBuilder); +} + /// TODO(cir): PGO data /// TODO(cir): see EmitBranchOnBoolExpr for extra ideas). -mlir::Value CIRGenFunction::buildOpOnBoolExpr(const Expr *cond, - mlir::Location loc, - const Stmt *thenS, - const Stmt *elseS) { +mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, + const Expr *cond) { // TODO(CIR): scoped ApplyDebugLocation DL(*this, Cond); // TODO(CIR): __builtin_unpredictable and profile counts? cond = cond->IgnoreParens(); @@ -2415,17 +2424,13 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(const Expr *cond, // This should be done in CIR prior to LLVM lowering, if we do now // we can make CIR based diagnostics misleading. // cir.ternary(!x, t, f) -> cir.ternary(x, f, t) - // if (CondUOp->getOpcode() == UO_LNot) { - // buildOpOnBoolExpr(CondUOp->getSubExpr(), loc, elseS, thenS); - // } assert(!UnimplementedFeature::shouldReverseUnaryCondOnBoolExpr()); } if (const ConditionalOperator *CondOp = dyn_cast(cond)) { auto *trueExpr = CondOp->getTrueExpr(); auto *falseExpr = CondOp->getFalseExpr(); - mlir::Value condV = - buildOpOnBoolExpr(CondOp->getCond(), loc, trueExpr, falseExpr); + mlir::Value condV = buildOpOnBoolExpr(loc, CondOp->getCond()); auto ternaryOpRes = builder diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 8d20e2a8c1a4..b3b42abc026e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -2139,7 +2139,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( .getResult(); } - mlir::Value condV = CGF.buildOpOnBoolExpr(condExpr, loc, lhsExpr, rhsExpr); + mlir::Value condV = CGF.buildOpOnBoolExpr(loc, condExpr); CIRGenFunction::ConditionalEvaluation eval(CGF); SmallVector insertPoints{}; mlir::Type yieldTy{}; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index cf029bad1181..115c9226a360 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1031,13 +1031,17 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS); + mlir::cir::IfOp buildIfOnBoolExpr( + const clang::Expr *cond, + llvm::function_ref thenBuilder, + mlir::Location thenLoc, + llvm::function_ref elseBuilder, + std::optional elseLoc = {}); mlir::Value buildTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, const clang::Stmt *thenS, const clang::Stmt *elseS); - mlir::Value buildOpOnBoolExpr(const clang::Expr *cond, mlir::Location loc, - const clang::Stmt *thenS, - const clang::Stmt *elseS); + mlir::Value buildOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond); class ConstantEmission { // Cannot use mlir::TypedAttr directly here because of bit availability. From 22763b72e2b98d9c406d91ecc0ee172d81683d85 Mon Sep 17 00:00:00 2001 From: GaoXiangYa <168072492+GaoXiangYa@users.noreply.github.com> Date: Fri, 10 May 2024 01:20:12 +0800 Subject: [PATCH 1566/2301] [CIR][Lowering] Add MLIR lowering support for CIR math operations (#592) This pr adds `cir.ceil` `cir.exp2` `cir.exp` `cir.fabs` `cir.floor` `cir.log` `cir.log10` `cir.log2` `cir.round` `cir.sqrt` lowering to MLIR passes and test files. --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 143 +++++++++++++++++- clang/test/CIR/Lowering/ThroughMLIR/ceil.cir | 30 ++++ clang/test/CIR/Lowering/ThroughMLIR/exp.cir | 30 ++++ clang/test/CIR/Lowering/ThroughMLIR/fabs.cir | 30 ++++ clang/test/CIR/Lowering/ThroughMLIR/floor.cir | 30 ++++ clang/test/CIR/Lowering/ThroughMLIR/log.cir | 30 ++++ clang/test/CIR/Lowering/ThroughMLIR/round.cir | 30 ++++ clang/test/CIR/Lowering/ThroughMLIR/sqrt.cir | 30 ++++ 8 files changed, 345 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/ceil.cir create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/exp.cir create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/fabs.cir create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/floor.cir create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/log.cir create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/round.cir create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/sqrt.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index e5797c0789ef..24f55b4e79fd 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -209,6 +209,129 @@ class CIRCosOpLowering : public mlir::OpConversionPattern { } }; +class CIRSqrtOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::SqrtOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + +class CIRFAbsOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::FAbsOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + +class CIRFloorOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::FloorOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + +class CIRCeilOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CeilOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + +class CIRLog10OpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::Log10Op op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + +class CIRLogOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::LogOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + +class CIRLog2OpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::Log2Op op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + +class CIRRoundOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::RoundOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + +class CIRExpOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ExpOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + +class CIRExp2OpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::Exp2Op op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; + class CIRSinOpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -1000,14 +1123,18 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns.add(converter, - patterns.getContext()); + patterns + .add( + converter, patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/ceil.cir b/clang/test/CIR/Lowering/ThroughMLIR/ceil.cir new file mode 100644 index 000000000000..dce0012a451b --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/ceil.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +module { + cir.func @foo() { + %0 = cir.const #cir.fp<1.31> : !cir.float + %1 = cir.const #cir.fp<3.0> : !cir.long_double + %2 = cir.const #cir.fp<2.73> : !cir.double + %3 = cir.const #cir.fp<4.67> : !cir.long_double + %4 = cir.ceil %0 : !cir.float + %5 = cir.ceil %1 : !cir.long_double + %6 = cir.ceil %2 : !cir.double + %7 = cir.ceil %3 : !cir.long_double + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %[[C0:.+]] = arith.constant 1.310000e+00 : f32 +// CHECK-NEXT: %[[C1:.+]] = arith.constant 3.000000e+00 : f80 +// CHECK-NEXT: %[[C2:.+]] = arith.constant 2.730000e+00 : f64 +// CHECK-NEXT: %[[C3:.+]] = arith.constant 4.670000e+00 : f64 +// CHECK-NEXT: %{{.+}} = math.ceil %[[C0]] : f32 +// CHECK-NEXT: %{{.+}} = math.ceil %[[C1]] : f80 +// CHECK-NEXT: %{{.+}} = math.ceil %[[C2]] : f64 +// CHECK-NEXT: %{{.+}} = math.ceil %[[C3]] : f64 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/exp.cir b/clang/test/CIR/Lowering/ThroughMLIR/exp.cir new file mode 100644 index 000000000000..13294b7532dc --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/exp.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +module { + cir.func @foo() { + %0 = cir.const #cir.fp<1.0> : !cir.float + %1 = cir.const #cir.fp<3.0> : !cir.long_double + %2 = cir.const #cir.fp<2.0> : !cir.double + %3 = cir.const #cir.fp<4.00> : !cir.long_double + %4 = cir.exp %0 : !cir.float + %5 = cir.exp %1 : !cir.long_double + %6 = cir.exp2 %2 : !cir.double + %7 = cir.exp2 %3 : !cir.long_double + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %[[C0:.+]] = arith.constant 1.000000e+00 : f32 +// CHECK-NEXT: %[[C1:.+]] = arith.constant 3.000000e+00 : f80 +// CHECK-NEXT: %[[C2:.+]] = arith.constant 2.000000e+00 : f64 +// CHECK-NEXT: %[[C3:.+]] = arith.constant 4.000000e+00 : f64 +// CHECK-NEXT: %{{.+}} = math.exp %[[C0]] : f32 +// CHECK-NEXT: %{{.+}} = math.exp %[[C1]] : f80 +// CHECK-NEXT: %{{.+}} = math.exp2 %[[C2]] : f64 +// CHECK-NEXT: %{{.+}} = math.exp2 %[[C3]] : f64 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/fabs.cir b/clang/test/CIR/Lowering/ThroughMLIR/fabs.cir new file mode 100644 index 000000000000..9a6c33fd8ab6 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/fabs.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +module { + cir.func @foo() { + %0 = cir.const #cir.fp<-1.0> : !cir.float + %1 = cir.const #cir.fp<-3.0> : !cir.long_double + %2 = cir.const #cir.fp<-2.0> : !cir.double + %3 = cir.const #cir.fp<-4.00> : !cir.long_double + %4 = cir.fabs %0 : !cir.float + %5 = cir.fabs %1 : !cir.long_double + %6 = cir.fabs %2 : !cir.double + %7 = cir.fabs %3 : !cir.long_double + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %[[C0:.+]] = arith.constant -1.000000e+00 : f32 +// CHECK-NEXT: %[[C1:.+]] = arith.constant -3.000000e+00 : f80 +// CHECK-NEXT: %[[C2:.+]] = arith.constant -2.000000e+00 : f64 +// CHECK-NEXT: %[[C3:.+]] = arith.constant -4.000000e+00 : f64 +// CHECK-NEXT: %{{.+}} = math.absf %[[C0]] : f32 +// CHECK-NEXT: %{{.+}} = math.absf %[[C1]] : f80 +// CHECK-NEXT: %{{.+}} = math.absf %[[C2]] : f64 +// CHECK-NEXT: %{{.+}} = math.absf %[[C3]] : f64 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/floor.cir b/clang/test/CIR/Lowering/ThroughMLIR/floor.cir new file mode 100644 index 000000000000..e4718468966c --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/floor.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +module { + cir.func @foo() { + %0 = cir.const #cir.fp<1.51> : !cir.float + %1 = cir.const #cir.fp<3.0> : !cir.long_double + %2 = cir.const #cir.fp<2.73> : !cir.double + %3 = cir.const #cir.fp<4.67> : !cir.long_double + %4 = cir.floor %0 : !cir.float + %5 = cir.floor %1 : !cir.long_double + %6 = cir.floor %2 : !cir.double + %7 = cir.floor %3 : !cir.long_double + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %[[C0:.+]] = arith.constant 1.510000e+00 : f32 +// CHECK-NEXT: %[[C1:.+]] = arith.constant 3.000000e+00 : f80 +// CHECK-NEXT: %[[C2:.+]] = arith.constant 2.730000e+00 : f64 +// CHECK-NEXT: %[[C3:.+]] = arith.constant 4.670000e+00 : f64 +// CHECK-NEXT: %{{.+}} = math.floor %[[C0]] : f32 +// CHECK-NEXT: %{{.+}} = math.floor %[[C1]] : f80 +// CHECK-NEXT: %{{.+}} = math.floor %[[C2]] : f64 +// CHECK-NEXT: %{{.+}} = math.floor %[[C3]] : f64 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/log.cir b/clang/test/CIR/Lowering/ThroughMLIR/log.cir new file mode 100644 index 000000000000..e9af7c88ca8a --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/log.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +module { + cir.func @foo() { + %0 = cir.const #cir.fp<1.0> : !cir.float + %1 = cir.const #cir.fp<3.0> : !cir.long_double + %2 = cir.const #cir.fp<2.0> : !cir.double + %3 = cir.const #cir.fp<4.0> : !cir.long_double + %4 = cir.log %0 : !cir.float + %5 = cir.log %1 : !cir.long_double + %6 = cir.log2 %2 : !cir.double + %7 = cir.log10 %3 : !cir.long_double + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %[[C0:.+]] = arith.constant 1.000000e+00 : f32 +// CHECK-NEXT: %[[C1:.+]] = arith.constant 3.000000e+00 : f80 +// CHECK-NEXT: %[[C2:.+]] = arith.constant 2.000000e+00 : f64 +// CHECK-NEXT: %[[C3:.+]] = arith.constant 4.000000e+00 : f64 +// CHECK-NEXT: %{{.+}} = math.log %[[C0]] : f32 +// CHECK-NEXT: %{{.+}} = math.log %[[C1]] : f80 +// CHECK-NEXT: %{{.+}} = math.log2 %[[C2]] : f64 +// CHECK-NEXT: %{{.+}} = math.log10 %[[C3]] : f64 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/round.cir b/clang/test/CIR/Lowering/ThroughMLIR/round.cir new file mode 100644 index 000000000000..117a93bcba9b --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/round.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +module { + cir.func @foo() { + %0 = cir.const #cir.fp<1.31> : !cir.float + %1 = cir.const #cir.fp<3.0> : !cir.long_double + %2 = cir.const #cir.fp<2.73> : !cir.double + %3 = cir.const #cir.fp<4.67> : !cir.long_double + %4 = cir.round %0 : !cir.float + %5 = cir.round %1 : !cir.long_double + %6 = cir.round %2 : !cir.double + %7 = cir.round %3 : !cir.long_double + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %[[C0:.+]] = arith.constant 1.310000e+00 : f32 +// CHECK-NEXT: %[[C1:.+]] = arith.constant 3.000000e+00 : f80 +// CHECK-NEXT: %[[C2:.+]] = arith.constant 2.730000e+00 : f64 +// CHECK-NEXT: %[[C3:.+]] = arith.constant 4.670000e+00 : f64 +// CHECK-NEXT: %{{.+}} = math.round %[[C0]] : f32 +// CHECK-NEXT: %{{.+}} = math.round %[[C1]] : f80 +// CHECK-NEXT: %{{.+}} = math.round %[[C2]] : f64 +// CHECK-NEXT: %{{.+}} = math.round %[[C3]] : f64 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/sqrt.cir b/clang/test/CIR/Lowering/ThroughMLIR/sqrt.cir new file mode 100644 index 000000000000..a9b8c1a7efa6 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/sqrt.cir @@ -0,0 +1,30 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +module { + cir.func @foo() { + %0 = cir.const #cir.fp<9.0> : !cir.float + %1 = cir.const #cir.fp<100.0> : !cir.long_double + %2 = cir.const #cir.fp<1.0> : !cir.double + %3 = cir.const #cir.fp<2.56> : !cir.long_double + %4 = cir.sqrt %0 : !cir.float + %5 = cir.sqrt %1 : !cir.long_double + %6 = cir.sqrt %2 : !cir.double + %7 = cir.sqrt %3 : !cir.long_double + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %[[C0:.+]] = arith.constant 9.000000e+00 : f32 +// CHECK-NEXT: %[[C1:.+]] = arith.constant 1.000000e+02 : f80 +// CHECK-NEXT: %[[C2:.+]] = arith.constant 1.000000e+00 : f64 +// CHECK-NEXT: %[[C3:.+]] = arith.constant 2.560000e+00 : f64 +// CHECK-NEXT: %{{.+}} = math.sqrt %[[C0]] : f32 +// CHECK-NEXT: %{{.+}} = math.sqrt %[[C1]] : f80 +// CHECK-NEXT: %{{.+}} = math.sqrt %[[C2]] : f64 +// CHECK-NEXT: %{{.+}} = math.sqrt %[[C3]] : f64 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } From f2b90018e0ec19cd4f74a97fcf71746ffa96bd33 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 8 May 2024 19:22:40 -0700 Subject: [PATCH 1567/2301] [CIR][CIRGen] Add VisitAbstractConditionalOperator aggregate edition --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 69 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 + .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/abstract-cond.c | 37 ++++++++++ 5 files changed, 110 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/abstract-cond.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 01dfa5abe2b4..9e27d77ede6a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2193,7 +2193,10 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, LValue CIRGenFunction::buildConditionalOperatorLValue( const AbstractConditionalOperator *expr) { if (!expr->isGLValue()) { - llvm_unreachable("NYI"); + // ?: here should be an aggregate. + assert(hasAggregateEvaluationKind(expr->getType()) && + "Unexpected conditional operator!"); + return buildAggExprToLValue(expr); } OpaqueValueMapping binding(*this, expr); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index aee7280881be..c22f69971f35 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -271,9 +271,7 @@ class AggExprEmitter : public StmtVisitor { void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) { llvm_unreachable("NYI"); } - void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { - llvm_unreachable("NYI"); - } + void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E); void VisitChooseExpr(const ChooseExpr *E) { llvm_unreachable("NYI"); } void VisitInitListExpr(InitListExpr *E); void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef Args, @@ -1299,6 +1297,60 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { CGF.buildCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); } +void AggExprEmitter::VisitAbstractConditionalOperator( + const AbstractConditionalOperator *E) { + auto &builder = CGF.getBuilder(); + auto loc = CGF.getLoc(E->getSourceRange()); + + // Bind the common expression if necessary. + CIRGenFunction::OpaqueValueMapping binding(CGF, E); + CIRGenFunction::ConditionalEvaluation eval(CGF); + assert(!UnimplementedFeature::getProfileCount()); + + // Save whether the destination's lifetime is externally managed. + bool isExternallyDestructed = Dest.isExternallyDestructed(); + bool destructNonTrivialCStruct = + !isExternallyDestructed && + E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; + isExternallyDestructed |= destructNonTrivialCStruct; + + CGF.buildIfOnBoolExpr( + E->getCond(), /*thenBuilder=*/ + [&](mlir::OpBuilder &, mlir::Location) { + eval.begin(CGF); + { + CIRGenFunction::LexicalScope lexScope{CGF, loc, + builder.getInsertionBlock()}; + Dest.setExternallyDestructed(isExternallyDestructed); + assert(!UnimplementedFeature::incrementProfileCounter()); + Visit(E->getTrueExpr()); + } + eval.end(CGF); + }, + loc, + /*elseBuilder=*/ + [&](mlir::OpBuilder &, mlir::Location) { + eval.begin(CGF); + { + CIRGenFunction::LexicalScope lexScope{CGF, loc, + builder.getInsertionBlock()}; + // If the result of an agg expression is unused, then the emission + // of the LHS might need to create a destination slot. That's fine + // with us, and we can safely emit the RHS into the same slot, but + // we shouldn't claim that it's already being destructed. + Dest.setExternallyDestructed(isExternallyDestructed); + assert(!UnimplementedFeature::incrementProfileCounter()); + Visit(E->getFalseExpr()); + } + eval.end(CGF); + }, + loc); + + if (destructNonTrivialCStruct) + llvm_unreachable("NYI"); + assert(!UnimplementedFeature::incrementProfileCounter()); +} + //===----------------------------------------------------------------------===// // Helpers and dispatcher //===----------------------------------------------------------------------===// @@ -1568,3 +1620,14 @@ CIRGenFunction::getOverlapForFieldInit(const FieldDecl *FD) { // The tail padding may contain values we need to preserve. return AggValueSlot::MayOverlap; } + +LValue CIRGenFunction::buildAggExprToLValue(const Expr *E) { + assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!"); + Address Temp = CreateMemTemp(E->getType(), getLoc(E->getSourceRange())); + LValue LV = makeAddrLValue(Temp, E->getType()); + buildAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap)); + return LV; +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 115c9226a360..5cfae69b4202 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1597,6 +1597,8 @@ class CIRGenFunction : public CIRGenTypeCache { buildAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile); } + LValue buildAggExprToLValue(const Expr *E); + /// Emit an aggregate copy. /// /// \param isVolatile \c true iff either the source or the destination is diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 3c4bcec93ec6..e2e9f0a4d91f 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -124,6 +124,7 @@ struct UnimplementedFeature { static bool tryEmitAsConstant() { return false; } static bool incrementProfileCounter() { return false; } static bool createProfileWeightsForLoop() { return false; } + static bool getProfileCount() { return false; } static bool emitCondLikelihoodViaExpectIntrinsic() { return false; } static bool requiresReturnValueCheck() { return false; } static bool shouldEmitLifetimeMarkers() { return false; } diff --git a/clang/test/CIR/CodeGen/abstract-cond.c b/clang/test/CIR/CodeGen/abstract-cond.c new file mode 100644 index 000000000000..426adf7337a6 --- /dev/null +++ b/clang/test/CIR/CodeGen/abstract-cond.c @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// ?: in "lvalue" +struct s6 { int f0; }; +int f6(int a0, struct s6 a1, struct s6 a2) { + return (a0 ? a1 : a2).f0; +} + +// CIR-LABEL: @f6 +// CIR: %[[A0:.*]] = cir.alloca !s32i, !cir.ptr, ["a0" +// CIR: %[[A1:.*]] = cir.alloca !ty_22s622, !cir.ptr, ["a1" +// CIR: %[[A2:.*]] = cir.alloca !ty_22s622, !cir.ptr, ["a2" +// CIR: %[[TMP:.*]] = cir.alloca !ty_22s622, !cir.ptr, ["tmp"] {alignment = 4 : i64} +// CIR: %[[LOAD_A0:.*]] = cir.load %[[A0]] : !cir.ptr, !s32i +// CIR: %[[COND:.*]] = cir.cast(int_to_bool, %[[LOAD_A0]] : !s32i), !cir.bool +// CIR: cir.if %[[COND]] { +// CIR: cir.copy %[[A1]] to %[[TMP]] : !cir.ptr +// CIR: } else { +// CIR: cir.copy %[[A2]] to %[[TMP]] : !cir.ptr +// CIR: } +// CIR: cir.get_member %[[TMP]][0] {name = "f0"} : !cir.ptr -> !cir.ptr + +// LLVM-LABEL: @f6 +// LLVM: %[[LOAD_A0:.*]] = load i32, ptr {{.*}} +// LLVM: %[[COND:.*]] = icmp ne i32 %[[LOAD_A0]], 0 +// LLVM: br i1 %[[COND]], label %[[A1_PATH:.*]], label %[[A2_PATH:.*]], +// LLVM: [[A2_PATH]]: +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[TMP:.*]], ptr {{.*}}, i32 4, i1 false) +// LLVM: br label %[[EXIT:[a-z0-9]+]] +// LLVM: [[A1_PATH]]: +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[TMP]], ptr {{.*}}, i32 4, i1 false) +// LLVM: br label %[[EXIT]] +// LLVM: [[EXIT]]: +// LLVM: getelementptr {{.*}}, ptr %[[TMP]], i32 0, i32 0 \ No newline at end of file From 4a7c6fc90f6431b99caf00d3bf13ace863c3b3ef Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 9 May 2024 12:11:03 -0700 Subject: [PATCH 1568/2301] [CIR][Tests] Update test to newer approach and remove XFAIL --- clang/test/CIR/CodeGen/union-init.c | 44 ++++++++++------------------- 1 file changed, 15 insertions(+), 29 deletions(-) diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index 9cf96adcaf48..d7a06ee19651 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -1,5 +1,4 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s -// XFAIL: * typedef union { int value; @@ -13,31 +12,18 @@ void foo(int x) { A a = {.x = x}; } -// CHECK: cir.func @foo(%arg0: !s32i loc({{.*}})) -// CHECK: [[TMP0:%.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} -// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, !cir.ptr, ["a", init] {alignment = 4 : i64} -// CHECK: cir.store %arg0, [[TMP0]] : !s32i, !cir.ptr -// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = ""} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.cast(bitcast, [[TMP2]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.load [[TMP0]] : !cir.ptr, !s32i -// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP4]] : !s32i), !u32i -// CHECK: [[TMP6:%.*]] = cir.load [[TMP3]] : !cir.ptr, !u32i -// CHECK: [[TMP7:%.*]] = cir.const #cir.int<65535> : !u32i -// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i -// CHECK: [[TMP9:%.*]] = cir.const #cir.int<4294901760> : !u32i -// CHECK: [[TMP10:%.*]] = cir.binop(and, [[TMP6]], [[TMP9]]) : !u32i -// CHECK: [[TMP11:%.*]] = cir.binop(or, [[TMP10]], [[TMP8]]) : !u32i -// CHECK: cir.store [[TMP11]], [[TMP3]] : !u32i, !cir.ptr -// CHECK: [[TMP12:%.*]] = cir.cast(bitcast, [[TMP2]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP13:%.*]] = cir.const #cir.int<0> : !s32i -// CHECK: [[TMP14:%.*]] = cir.cast(integral, [[TMP13]] : !s32i), !u32i -// CHECK: [[TMP15:%.*]] = cir.load [[TMP12]] : !cir.ptr, !u32i -// CHECK: [[TMP16:%.*]] = cir.const #cir.int<65535> : !u32i -// CHECK: [[TMP17:%.*]] = cir.binop(and, [[TMP14]], [[TMP16]]) : !u32i -// CHECK: [[TMP18:%.*]] = cir.const #cir.int<16> : !u32i -// CHECK: [[TMP19:%.*]] = cir.shift(left, [[TMP17]] : !u32i, [[TMP18]] : !u32i) -> !u32i -// CHECK: [[TMP20:%.*]] = cir.const #cir.int<65535> : !u32i -// CHECK: [[TMP21:%.*]] = cir.binop(and, [[TMP15]], [[TMP20]]) : !u32i -// CHECK: [[TMP22:%.*]] = cir.binop(or, [[TMP21]], [[TMP19]]) : !u32i -// CHECK: cir.store [[TMP22]], [[TMP12]] : !u32i, !cir.ptr -// CHECK: cir.return +// CHECK: #[[bfi_x:.*]] = #cir.bitfield_info +// CHECK: #[[bfi_y:.*]] = #cir.bitfield_info + +// CHECK-LABEL: cir.func @foo( +// CHECK: %[[VAL_1:.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK: %[[VAL_2:.*]] = cir.alloca !ty_22A22, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK: cir.store {{.*}}, %[[VAL_1]] : !s32i, !cir.ptr +// CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][1] {name = ""} : !cir.ptr -> !cir.ptr +// CHECK: %[[VAL_4:.*]] = cir.cast(bitcast, %[[VAL_3]] : !cir.ptr), !cir.ptr +// CHECK: %[[VAL_5:.*]] = cir.load %[[VAL_1]] : !cir.ptr, !s32i +// CHECK: %[[VAL_6:.*]] = cir.set_bitfield(#[[bfi_x]], %[[VAL_4]] : !cir.ptr, %[[VAL_5]] : !s32i) -> !s32i +// CHECK: %[[VAL_7:.*]] = cir.cast(bitcast, %[[VAL_3]] : !cir.ptr), !cir.ptr +// CHECK: %[[VAL_8:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: %[[VAL_9:.*]] = cir.set_bitfield(#[[bfi_y]], %[[VAL_7]] : !cir.ptr, %[[VAL_8]] : !s32i) -> !s32i +// CHECK: cir.return From cd1bfd9e68bcbeb5685df97f87bb4a4276028637 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 9 May 2024 12:26:34 -0700 Subject: [PATCH 1569/2301] [CIR][CIRGen] Add a missing union init scenario --- clang/lib/CIR/CodeGen/CIRGenCstEmitter.h | 6 +++++- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 12 +++++++++++- clang/test/CIR/CodeGen/union-init.c | 4 ++++ 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h index 086c68baec9c..28b8a925b1da 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h @@ -95,7 +95,11 @@ class ConstantEmitter { return emitForMemory(CGM, C, T); } - // static llvm::Constant *emitNullForMemory(CodeGenModule &CGM, QualType T); + mlir::Attribute emitNullForMemory(mlir::Location loc, QualType T) { + return emitNullForMemory(loc, CGM, T); + } + static mlir::Attribute emitNullForMemory(mlir::Location loc, + CIRGenModule &CGM, QualType T); static mlir::Attribute emitForMemory(CIRGenModule &CGM, mlir::Attribute C, clang::QualType T); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 08644dc163d0..ab40081c42e7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -662,7 +662,8 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) { if (Init) EltInit = Emitter.tryEmitPrivateForMemory(Init, Field->getType()); else - llvm_unreachable("NYI"); + EltInit = Emitter.emitNullForMemory(CGM.getLoc(ILE->getSourceRange()), + Field->getType()); if (!EltInit) return false; @@ -1866,3 +1867,12 @@ mlir::Attribute ConstantEmitter::emitAbstract(SourceLocation loc, } return C; } + +mlir::Attribute ConstantEmitter::emitNullForMemory(mlir::Location loc, + CIRGenModule &CGM, + QualType T) { + auto cstOp = dyn_cast( + CGM.buildNullConstant(T, loc).getDefiningOp()); + assert(cstOp && "expected cir.const op"); + return emitForMemory(CGM, cstOp.getValue(), T); +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index d7a06ee19651..7e756392ee62 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -12,6 +12,7 @@ void foo(int x) { A a = {.x = x}; } +// CHECK: ![[anon:.*]] = !cir.struct // CHECK: #[[bfi_x:.*]] = #cir.bitfield_info // CHECK: #[[bfi_y:.*]] = #cir.bitfield_info @@ -27,3 +28,6 @@ void foo(int x) { // CHECK: %[[VAL_8:.*]] = cir.const #cir.int<0> : !s32i // CHECK: %[[VAL_9:.*]] = cir.set_bitfield(#[[bfi_y]], %[[VAL_7]] : !cir.ptr, %[[VAL_8]] : !s32i) -> !s32i // CHECK: cir.return + +union { int i; float f; } u = { }; +// CHECK: cir.global external @u = #cir.zero : ![[anon]] \ No newline at end of file From 0ce2263d0044cc5059d839add9f2d97e87cf342b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 9 May 2024 15:33:47 -0700 Subject: [PATCH 1570/2301] [CIR][CIRGen] Implement VisitBinComma for aggregates --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 7 ++++++- clang/test/CIR/CodeGen/struct-comma.c | 14 ++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/struct-comma.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index c22f69971f35..7afdbd9ce77f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -259,7 +259,7 @@ class AggExprEmitter : public StmtVisitor { E->getType()); } - void VisitBinComma(const BinaryOperator *E) { llvm_unreachable("NYI"); } + void VisitBinComma(const BinaryOperator *E); void VisitBinCmp(const BinaryOperator *E); void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { llvm_unreachable("NYI"); @@ -1351,6 +1351,11 @@ void AggExprEmitter::VisitAbstractConditionalOperator( assert(!UnimplementedFeature::incrementProfileCounter()); } +void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { + CGF.buildIgnoredExpr(E->getLHS()); + Visit(E->getRHS()); +} + //===----------------------------------------------------------------------===// // Helpers and dispatcher //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/struct-comma.c b/clang/test/CIR/CodeGen/struct-comma.c new file mode 100644 index 000000000000..ffd0544fda43 --- /dev/null +++ b/clang/test/CIR/CodeGen/struct-comma.c @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct AA {int a, b;} x; +extern int r(void); +void a(struct AA* b) {*b = (r(), x);} + +// CHECK-LABEL: @a +// CHECK: %[[ADDR:.*]] = cir.alloca {{.*}} ["b" +// CHECK: cir.store {{.*}}, %[[ADDR]] +// CHECK: %[[LOAD:.*]] = cir.load deref %[[ADDR]] +// CHECK: cir.call @r +// CHECK: %[[GADDR:.*]] = cir.get_global @x +// CHECK: cir.copy %[[GADDR]] to %[[LOAD]] \ No newline at end of file From b2ec421927bc71ac61a90010904d6dfcd9e28428 Mon Sep 17 00:00:00 2001 From: Twice Date: Fri, 10 May 2024 07:36:52 +0900 Subject: [PATCH 1571/2301] [CIR][LowerToLLVM] Support pointer arithmetic for function types (#594) Same as void pointers `void *`, we treat function pointer arithmetic as `GEP i8`, according to the original behavior of clang ([godbolt](https://godbolt.org/z/EMdvfdTe7)). --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 8 ++-- clang/test/CIR/CodeGen/pointer-arith-ext.c | 42 +++++++++++++++---- 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ad3f026c2c28..ada50bc9a879 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -485,10 +485,12 @@ class CIRPtrStrideOpLowering auto *tc = getTypeConverter(); const auto resultTy = tc->convertType(ptrStrideOp.getType()); auto elementTy = tc->convertType(ptrStrideOp.getElementTy()); - auto ctx = elementTy.getContext(); + auto *ctx = elementTy.getContext(); - // void doesn't really have a layout to use in GEPs, make it i8 instead. - if (elementTy.isa()) + // void and function types doesn't really have a layout to use in GEPs, + // make it i8 instead. + if (elementTy.isa() || + elementTy.isa()) elementTy = mlir::IntegerType::get(elementTy.getContext(), 8, mlir::IntegerType::Signless); diff --git a/clang/test/CIR/CodeGen/pointer-arith-ext.c b/clang/test/CIR/CodeGen/pointer-arith-ext.c index 5db612254d6e..f64915b0569c 100644 --- a/clang/test/CIR/CodeGen/pointer-arith-ext.c +++ b/clang/test/CIR/CodeGen/pointer-arith-ext.c @@ -48,13 +48,41 @@ void *f4(void *a, int b) { return a - b; } // Similar to f4, just make sure it does not crash. void *f4_1(void *a, int b) { return (a -= b); } +FP f5(FP a, int b) { return a + b; } +// CIR-LABEL: f5 +// CIR: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>>, !cir.ptr> +// CIR: %[[STRIDE:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CIR: cir.ptr_stride(%[[PTR]] : !cir.ptr>, %[[STRIDE]] : !s32i) + +// LLVM-LABEL: f5 +// LLVM: %[[PTR:.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: %[[TOEXT:.*]] = load i32, ptr {{.*}}, align 4 +// LLVM: %[[STRIDE:.*]] = sext i32 %[[TOEXT]] to i64 +// LLVM: getelementptr i8, ptr %[[PTR]], i64 %[[STRIDE]] + +// These test the same paths above, just make sure it does not crash. +FP f5_1(FP a, int b) { return (a += b); } +FP f6(int a, FP b) { return a + b; } +FP f6_1(int a, FP b) { return (a += b); } + +FP f7(FP a, int b) { return a - b; } +// CIR-LABEL: f7 +// CIR: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>>, !cir.ptr> +// CIR: %[[STRIDE:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CIR: %[[SUB:.*]] = cir.unary(minus, %[[STRIDE]]) : !s32i, !s32i +// CIR: cir.ptr_stride(%[[PTR]] : !cir.ptr>, %[[SUB]] : !s32i) + +// LLVM-LABEL: f7 +// LLVM: %[[PTR:.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: %[[TOEXT:.*]] = load i32, ptr {{.*}}, align 4 +// LLVM: %[[STRIDE:.*]] = sext i32 %[[TOEXT]] to i64 +// LLVM: %[[SUB:.*]] = sub i64 0, %[[STRIDE]] +// LLVM: getelementptr i8, ptr %[[PTR]], i64 %[[SUB]] + +// Similar to f7, just make sure it does not crash. +FP f7_1(FP a, int b) { return (a -= b); } + // FIXME: add support for the remaining ones. -// FP f5(FP a, int b) { return a + b; } -// FP f5_1(FP a, int b) { return (a += b); } -// FP f6(int a, FP b) { return a + b; } -// FP f6_1(int a, FP b) { return (a += b); } -// FP f7(FP a, int b) { return a - b; } -// FP f7_1(FP a, int b) { return (a -= b); } // void f8(void *a, int b) { return *(a + b); } // void f8_1(void *a, int b) { return a[b]; } @@ -69,4 +97,4 @@ unsigned char *p(unsigned int x) { // CIR: cir.ptr_stride({{.*}} : !cir.ptr, %[[SUB]] : !u32i), !cir.ptr // LLVM-LABEL: @p -// LLVM: getelementptr i8, ptr {{.*}} \ No newline at end of file +// LLVM: getelementptr i8, ptr {{.*}} From 9a891d6a1c216b580aa6396a69e35a6c963122e6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 9 May 2024 16:01:41 -0700 Subject: [PATCH 1572/2301] [CIR][CIRGen] Handle one more case of VisitCompoundLiteralExpr for aggregates --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 2 +- clang/test/CIR/CodeGen/compound-literal.c | 47 +++++++++++++++++++++-- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 7afdbd9ce77f..87f770999b59 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -822,7 +822,7 @@ void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { bool Destruct = !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed(); if (Destruct) - llvm_unreachable("NYI"); + Slot.setExternallyDestructed(); CGF.buildAggExpr(E->getInitializer(), Slot); diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c index 36bb7d324768..62f15826dd64 100644 --- a/clang/test/CIR/CodeGen/compound-literal.c +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -S -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM // XFAIL: * @@ -53,7 +53,7 @@ struct G g(int x, int y, int z) { } // CIR: cir.func @g -// CIR: %[[RETVAL:.*]] = cir.alloca !ty_22G22, !cir.ptr, ["__retval"] {alignment = 2 : i64} loc(#loc18) +// CIR: %[[RETVAL:.*]] = cir.alloca !ty_22G22, !cir.ptr, ["__retval"] {alignment = 2 : i64} // CIR: %[[X:.*]] = cir.get_member %[[RETVAL]][0] {name = "x"} // CIR: cir.store {{.*}}, %[[X]] : !s16i // CIR: %[[Y:.*]] = cir.get_member %[[RETVAL]][1] {name = "y"} @@ -65,4 +65,43 @@ struct G g(int x, int y, int z) { // Nothing meaningful to test for LLVM codegen here. // FIXME: ABI note, LLVM lowering differs from traditional LLVM codegen here, -// because the former does a memcopy + i48 load. \ No newline at end of file +// because the former does a memcopy + i48 load. + +typedef struct { unsigned long pgprot; } pgprot_t; +void split_large_page(unsigned long addr, pgprot_t prot) +{ + (addr ? prot : ((pgprot_t) { 0x001 } )).pgprot; +} + +// CIR-LABEL: @split_large_page +// CIR: %[[VAL_2:.*]] = cir.alloca !u64i, !cir.ptr, ["addr", init] {alignment = 8 : i64} +// CIR: %[[VAL_3:.*]] = cir.alloca !ty_22pgprot_t22, !cir.ptr, ["prot", init] {alignment = 8 : i64} +// CIR: %[[VAL_4:.*]] = cir.alloca !ty_22pgprot_t22, !cir.ptr, ["tmp"] {alignment = 8 : i64} +// CIR: cir.store {{.*}}, %[[VAL_2]] : !u64i, !cir.ptr +// CIR: cir.store {{.*}}, %[[VAL_3]] : !ty_22pgprot_t22, !cir.ptr +// CIR: %[[VAL_5:.*]] = cir.load %[[VAL_2]] : !cir.ptr, !u64i +// CIR: %[[VAL_6:.*]] = cir.cast(int_to_bool, %[[VAL_5]] : !u64i), !cir.bool +// CIR: cir.if %[[VAL_6]] { +// CIR: cir.copy %[[VAL_3]] to %[[VAL_4]] : !cir.ptr +// CIR: } else { +// CIR: %[[VAL_7:.*]] = cir.get_member %[[VAL_4]][0] {name = "pgprot"} : !cir.ptr -> !cir.ptr +// CIR: %[[VAL_8:.*]] = cir.const #cir.int<1> : !s32i +// CIR: %[[VAL_9:.*]] = cir.cast(integral, %[[VAL_8]] : !s32i), !u64i +// CIR: cir.store %[[VAL_9]], %[[VAL_7]] : !u64i, !cir.ptr +// CIR: } +// CIR: %[[VAL_10:.*]] = cir.get_member %[[VAL_4]][0] {name = "pgprot"} : !cir.ptr -> !cir.ptr +// CIR: %[[VAL_11:.*]] = cir.load %[[VAL_10]] : !cir.ptr, !u64i +// CIR: cir.return +// CIR: } + +// CHECK-LABEL: @split_large_page +// CHECK: br i1 {{.*}}, label %[[TRUE:[a-z0-9]+]], label %[[FALSE:[a-z0-9]+]] +// CHECK: [[FALSE]]: +// CHECK: %[[GEP:.*]] = getelementptr {{.*}}, ptr %[[ADDR:.*]], i32 0, i32 0 +// CHECK: store i64 1, ptr %[[GEP]], align 8 +// CHECK: br label %[[EXIT:[a-z0-9]+]] +// CHECK: [[TRUE]]: +// CHECK: call void @llvm.memcpy.p0.p0.i32(ptr %[[ADDR]], ptr {{.*}}, i32 8, i1 false) +// CHECK: br label %[[EXIT]] +// CHECK: [[EXIT]]: +// CHECK: ret void \ No newline at end of file From c31ff2adfa389714fdb46e14d2b901a14935a6f0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 9 May 2024 16:40:52 -0700 Subject: [PATCH 1573/2301] [CIR][NFC] Move tests to more appropriate subdir --- clang/test/CIR/{CodeGen => Transforms}/if.cir | 0 clang/test/CIR/{CodeGen => Transforms}/loop.cir | 0 clang/test/CIR/{CodeGen => Transforms}/scope.cir | 0 clang/test/CIR/{CodeGen => Transforms}/switch.cir | 0 clang/test/CIR/{CodeGen => Transforms}/ternary.cir | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename clang/test/CIR/{CodeGen => Transforms}/if.cir (100%) rename clang/test/CIR/{CodeGen => Transforms}/loop.cir (100%) rename clang/test/CIR/{CodeGen => Transforms}/scope.cir (100%) rename clang/test/CIR/{CodeGen => Transforms}/switch.cir (100%) rename clang/test/CIR/{CodeGen => Transforms}/ternary.cir (100%) diff --git a/clang/test/CIR/CodeGen/if.cir b/clang/test/CIR/Transforms/if.cir similarity index 100% rename from clang/test/CIR/CodeGen/if.cir rename to clang/test/CIR/Transforms/if.cir diff --git a/clang/test/CIR/CodeGen/loop.cir b/clang/test/CIR/Transforms/loop.cir similarity index 100% rename from clang/test/CIR/CodeGen/loop.cir rename to clang/test/CIR/Transforms/loop.cir diff --git a/clang/test/CIR/CodeGen/scope.cir b/clang/test/CIR/Transforms/scope.cir similarity index 100% rename from clang/test/CIR/CodeGen/scope.cir rename to clang/test/CIR/Transforms/scope.cir diff --git a/clang/test/CIR/CodeGen/switch.cir b/clang/test/CIR/Transforms/switch.cir similarity index 100% rename from clang/test/CIR/CodeGen/switch.cir rename to clang/test/CIR/Transforms/switch.cir diff --git a/clang/test/CIR/CodeGen/ternary.cir b/clang/test/CIR/Transforms/ternary.cir similarity index 100% rename from clang/test/CIR/CodeGen/ternary.cir rename to clang/test/CIR/Transforms/ternary.cir From 211f3959f232a25733fd4ab96656d1e61a8c456c Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 15 May 2024 02:46:25 +0800 Subject: [PATCH 1574/2301] [CIR][CIRGen] Add CIRGen support for float16 and bfloat (#571) This PR adds two new CIR floating-point types, namely `!cir.f16` and `!cir.bf16`, to represent the float16 format and bfloat format, respectively. This PR converts the clang extension type `_Float16` to `!cir.f16`, and converts the clang extension type `__bf16` type to `!cir.bf16`. The type conversion for clang extension type `__fp16` is not included in this PR since it requires additional work during CIRGen. Only CIRGen is implemented here, LLVMIR lowering / MLIR lowering should come next. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 18 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 + clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 175 +++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 + clang/lib/CIR/CodeGen/CIRGenModule.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 7 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 4 +- .../CodeGen/UnimplementedFeatureGuarding.h | 2 + clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 42 + clang/test/CIR/CodeGen/bf16-ops.c | 986 ++++++++++++++++++ clang/test/CIR/CodeGen/fp16-ops.c | 986 ++++++++++++++++++ 11 files changed, 2191 insertions(+), 42 deletions(-) create mode 100644 clang/test/CIR/CodeGen/bf16-ops.c create mode 100644 clang/test/CIR/CodeGen/fp16-ops.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index ee8b266ed962..2babdee1d289 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -150,6 +150,20 @@ def CIR_Double : CIR_FloatType<"Double", "double"> { }]; } +def CIR_FP16 : CIR_FloatType<"FP16", "f16"> { + let summary = "CIR type that represents IEEE-754 binary16 format"; + let description = [{ + Floating-point type that represents the IEEE-754 binary16 format. + }]; +} + +def CIR_BFloat16 : CIR_FloatType<"BF16", "bf16"> { + let summary = "CIR type that represents"; + let description = [{ + Floating-point type that represents the bfloat16 format. + }]; +} + def CIR_FP80 : CIR_FloatType<"FP80", "f80"> { let summary = "CIR type that represents x87 80-bit floating-point format"; let description = [{ @@ -179,7 +193,7 @@ def CIR_LongDouble : CIR_FloatType<"LongDouble", "long_double"> { // Constraints -def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_LongDouble]>; +def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_FP80, CIR_LongDouble]>; def CIR_AnyIntOrFloat: AnyTypeOf<[CIR_AnyFloat, CIR_IntType]>; //===----------------------------------------------------------------------===// @@ -475,7 +489,7 @@ def CIR_StructType : Type()">, def CIR_AnyType : AnyTypeOf<[ CIR_IntType, CIR_PointerType, CIR_DataMemberType, CIR_BoolType, CIR_ArrayType, CIR_VectorType, CIR_FuncType, CIR_VoidType, CIR_StructType, CIR_ExceptionInfo, - CIR_AnyFloat, + CIR_AnyFloat, CIR_FP16, CIR_BFloat16 ]>; #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 1489c4fb9a5b..3584f21eef4d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -250,6 +250,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::FPAttr::getZero(fltType); if (auto fltType = ty.dyn_cast()) return mlir::cir::FPAttr::getZero(fltType); + if (auto fltType = ty.dyn_cast()) + return mlir::cir::FPAttr::getZero(fltType); + if (auto fltType = ty.dyn_cast()) + return mlir::cir::FPAttr::getZero(fltType); if (auto arrTy = ty.dyn_cast()) return getZeroAttr(arrTy); if (auto ptrTy = ty.dyn_cast()) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index b3b42abc026e..6522a756effc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -115,6 +115,16 @@ class ScalarExprEmitter : public StmtVisitor { /// Emit a value that corresponds to null for the given type. mlir::Value buildNullValue(QualType Ty, mlir::Location loc); + mlir::Value buildPromotedValue(mlir::Value result, QualType PromotionType) { + return Builder.createFloatingCast(result, ConvertType(PromotionType)); + } + + mlir::Value buildUnPromotedValue(mlir::Value result, QualType ExprType) { + return Builder.createFloatingCast(result, ConvertType(ExprType)); + } + + mlir::Value buildPromoted(const Expr *E, QualType PromotionType); + //===--------------------------------------------------------------------===// // Visitor Methods //===--------------------------------------------------------------------===// @@ -478,14 +488,45 @@ class ScalarExprEmitter : public StmtVisitor { } else if (type->isVectorType()) { llvm_unreachable("no vector inc/dec yet"); } else if (type->isRealFloatingType()) { - auto isFloatOrDouble = type->isSpecificBuiltinType(BuiltinType::Float) || - type->isSpecificBuiltinType(BuiltinType::Double); - assert(isFloatOrDouble && "Non-float/double NYI"); + // TODO(cir): CGFPOptionsRAII + assert(!UnimplementedFeature::CGFPOptionsRAII()); - // Create the inc/dec operation. - auto kind = - (isInc ? mlir::cir::UnaryOpKind::Inc : mlir::cir::UnaryOpKind::Dec); - value = buildUnaryOp(E, kind, input); + if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) + llvm_unreachable("__fp16 type NYI"); + + if (value.getType().isa()) { + // Create the inc/dec operation. + // NOTE(CIR): clang calls CreateAdd but folds this to a unary op + auto kind = + (isInc ? mlir::cir::UnaryOpKind::Inc : mlir::cir::UnaryOpKind::Dec); + value = buildUnaryOp(E, kind, input); + } else { + // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or + // __float128. Convert from float. + + llvm::APFloat F(static_cast(amount)); + bool ignored; + const llvm::fltSemantics *FS; + // Don't use getFloatTypeSemantics because Half isn't + // necessarily represented using the "half" LLVM type. + if (value.getType().isa()) + FS = &CGF.getTarget().getLongDoubleFormat(); + else if (value.getType().isa()) + FS = &CGF.getTarget().getHalfFormat(); + else if (value.getType().isa()) + FS = &CGF.getTarget().getBFloat16Format(); + else + llvm_unreachable("fp128 / ppc_fp128 NYI"); + F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored); + + auto loc = CGF.getLoc(E->getExprLoc()); + auto amt = Builder.getConstant( + loc, mlir::cir::FPAttr::get(value.getType(), F)); + value = Builder.createBinop(value, mlir::cir::BinOpKind::Add, amt); + } + + if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) + llvm_unreachable("NYI"); } else if (type->isFixedPointType()) { llvm_unreachable("no fixed point inc/dec yet"); @@ -549,13 +590,14 @@ class ScalarExprEmitter : public StmtVisitor { return Visit(E->getSubExpr()); // the actual value should be unused return buildLoadOfLValue(E); } - mlir::Value VisitUnaryPlus(const UnaryOperator *E) { - // NOTE(cir): QualType function parameter still not used, so don´t replicate - // it here yet. - QualType promotionTy = getPromotionType(E->getSubExpr()->getType()); + mlir::Value VisitUnaryPlus(const UnaryOperator *E, + QualType PromotionType = QualType()) { + QualType promotionTy = PromotionType.isNull() + ? getPromotionType(E->getSubExpr()->getType()) + : PromotionType; auto result = VisitPlus(E, promotionTy); if (result && !promotionTy.isNull()) - assert(0 && "not implemented yet"); + result = buildUnPromotedValue(result, E->getType()); return buildUnaryOp(E, mlir::cir::UnaryOpKind::Plus, result); } @@ -563,7 +605,7 @@ class ScalarExprEmitter : public StmtVisitor { // This differs from gcc, though, most likely due to a bug in gcc. TestAndClearIgnoreResultAssign(); if (!PromotionType.isNull()) - assert(0 && "scalar promotion not implemented yet"); + return CGF.buildPromotedScalarExpr(E->getSubExpr(), PromotionType); return Visit(E->getSubExpr()); } @@ -573,14 +615,14 @@ class ScalarExprEmitter : public StmtVisitor { QualType promotionTy = getPromotionType(E->getSubExpr()->getType()); auto result = VisitMinus(E, promotionTy); if (result && !promotionTy.isNull()) - assert(0 && "not implemented yet"); + result = buildUnPromotedValue(result, E->getType()); return buildUnaryOp(E, mlir::cir::UnaryOpKind::Minus, result); } mlir::Value VisitMinus(const UnaryOperator *E, QualType PromotionType) { TestAndClearIgnoreResultAssign(); if (!PromotionType.isNull()) - assert(0 && "scalar promotion not implemented yet"); + return CGF.buildPromotedScalarExpr(E->getSubExpr(), PromotionType); // NOTE: LLVM codegen will lower this directly to either a FNeg // or a Sub instruction. In CIR this will be handled later in LowerToLLVM. @@ -752,18 +794,23 @@ class ScalarExprEmitter : public StmtVisitor { QualType DstType, mlir::Type SrcTy, mlir::Type DstTy, ScalarConversionOpts Opts); - BinOpInfo buildBinOps(const BinaryOperator *E) { + BinOpInfo buildBinOps(const BinaryOperator *E, + QualType PromotionType = QualType()) { BinOpInfo Result; - Result.LHS = Visit(E->getLHS()); - Result.RHS = Visit(E->getRHS()); - Result.FullType = E->getType(); - Result.CompType = E->getType(); - if (auto VecType = dyn_cast_or_null(E->getType())) { + Result.LHS = CGF.buildPromotedScalarExpr(E->getLHS(), PromotionType); + Result.RHS = CGF.buildPromotedScalarExpr(E->getRHS(), PromotionType); + if (!PromotionType.isNull()) + Result.FullType = PromotionType; + else + Result.FullType = E->getType(); + Result.CompType = Result.FullType; + if (const auto *VecType = dyn_cast_or_null(Result.FullType)) { Result.CompType = VecType->getElementType(); } Result.Opcode = E->getOpcode(); Result.Loc = E->getSourceRange(); // TODO: Result.FPFeatures + assert(!UnimplementedFeature::getFPFeaturesInEffect()); Result.E = E; return Result; } @@ -793,15 +840,22 @@ class ScalarExprEmitter : public StmtVisitor { if (auto *CT = Ty->getAs()) { llvm_unreachable("NYI"); } - if (Ty.UseExcessPrecision(CGF.getContext())) - llvm_unreachable("NYI"); + if (Ty.UseExcessPrecision(CGF.getContext())) { + if (auto *VT = Ty->getAs()) + llvm_unreachable("NYI"); + return CGF.getContext().FloatTy; + } return QualType(); } // Binary operators and binary compound assignment operators. #define HANDLEBINOP(OP) \ mlir::Value VisitBin##OP(const BinaryOperator *E) { \ - return build##OP(buildBinOps(E)); \ + QualType promotionTy = getPromotionType(E->getType()); \ + auto result = build##OP(buildBinOps(E, promotionTy)); \ + if (result && !promotionTy.isNull()) \ + result = buildUnPromotedValue(result, E->getType()); \ + return result; \ } \ mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *E) { \ return buildCompoundAssign(E, &ScalarExprEmitter::build##OP); \ @@ -1053,6 +1107,13 @@ mlir::Value CIRGenFunction::buildScalarExpr(const Expr *E) { return ScalarExprEmitter(*this, builder).Visit(const_cast(E)); } +mlir::Value CIRGenFunction::buildPromotedScalarExpr(const Expr *E, + QualType PromotionType) { + if (!PromotionType.isNull()) + return ScalarExprEmitter(*this, builder).buildPromoted(E, PromotionType); + return ScalarExprEmitter(*this, builder).Visit(const_cast(E)); +} + [[maybe_unused]] static bool MustVisitNullValue(const Expr *E) { // If a null pointer expression's type is the C++0x nullptr_t, then // it's not necessarily a simple constant and it must be evaluated @@ -1885,8 +1946,20 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( // Emit the RHS first. __block variables need to have the rhs evaluated // first, plus this should improve codegen a little. - OpInfo.RHS = Visit(E->getRHS()); - OpInfo.FullType = E->getComputationResultType(); + + QualType PromotionTypeCR = getPromotionType(E->getComputationResultType()); + if (PromotionTypeCR.isNull()) + PromotionTypeCR = E->getComputationResultType(); + + QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType()); + QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType()); + + if (!PromotionTypeRHS.isNull()) + OpInfo.RHS = CGF.buildPromotedScalarExpr(E->getRHS(), PromotionTypeRHS); + else + OpInfo.RHS = Visit(E->getRHS()); + + OpInfo.FullType = PromotionTypeCR; OpInfo.CompType = OpInfo.FullType; if (auto VecType = dyn_cast_or_null(OpInfo.FullType)) { OpInfo.CompType = VecType->getElementType(); @@ -1908,16 +1981,20 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( CIRGenFunction::SourceLocRAIIObject sourceloc{ CGF, CGF.getLoc(E->getSourceRange())}; SourceLocation Loc = E->getExprLoc(); - OpInfo.LHS = - buildScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc); + if (!PromotionTypeLHS.isNull()) + OpInfo.LHS = buildScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS, + E->getExprLoc()); + else + OpInfo.LHS = buildScalarConversion(OpInfo.LHS, LHSTy, + E->getComputationLHSType(), Loc); // Expand the binary operator. Result = (this->*Func)(OpInfo); // Convert the result back to the LHS type, // potentially with Implicit Conversion sanitizer check. - Result = buildScalarConversion(Result, E->getComputationResultType(), LHSTy, - Loc, ScalarConversionOpts(CGF.SanOpts)); + Result = buildScalarConversion(Result, PromotionTypeCR, LHSTy, Loc, + ScalarConversionOpts(CGF.SanOpts)); // Store the result value into the LHS lvalue. Bit-fields are handled // specially because the result is altered by the store, i.e., [C99 6.5.16p1] @@ -1938,6 +2015,44 @@ mlir::Value ScalarExprEmitter::buildNullValue(QualType Ty, mlir::Location loc) { return CGF.buildFromMemory(CGF.CGM.buildNullConstant(Ty, loc), Ty); } +mlir::Value ScalarExprEmitter::buildPromoted(const Expr *E, + QualType PromotionType) { + E = E->IgnoreParens(); + if (const auto *BO = dyn_cast(E)) { + switch (BO->getOpcode()) { +#define HANDLE_BINOP(OP) \ + case BO_##OP: \ + return build##OP(buildBinOps(BO, PromotionType)); + HANDLE_BINOP(Add) + HANDLE_BINOP(Sub) + HANDLE_BINOP(Mul) + HANDLE_BINOP(Div) +#undef HANDLE_BINOP + default: + break; + } + } else if (const auto *UO = dyn_cast(E)) { + switch (UO->getOpcode()) { + case UO_Imag: + case UO_Real: + llvm_unreachable("NYI"); + case UO_Minus: + return VisitMinus(UO, PromotionType); + case UO_Plus: + return VisitPlus(UO, PromotionType); + default: + break; + } + } + auto result = Visit(const_cast(E)); + if (result) { + if (!PromotionType.isNull()) + return buildPromotedValue(result, PromotionType); + return buildUnPromotedValue(result, E->getType()); + } + return result; +} + mlir::Value ScalarExprEmitter::buildCompoundAssign( const CompoundAssignOperator *E, mlir::Value (ScalarExprEmitter::*Func)(const BinOpInfo &)) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 5cfae69b4202..92f29b088050 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1086,6 +1086,9 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value buildScalarExpr(const clang::Expr *E); mlir::Value buildScalarConstant(const ConstantEmission &Constant, Expr *E); + mlir::Value buildPromotedScalarExpr(const clang::Expr *E, + QualType PromotionType); + mlir::Type getCIRType(const clang::QualType &type); const CaseStmt *foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index a10c3937de64..292a9c22da15 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -131,13 +131,11 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, // Initialize CIR pointer types cache. VoidPtrTy = ::mlir::cir::PointerType::get(builder.getContext(), VoidTy); - // TODO: HalfTy - // TODO: BFloatTy + FP16Ty = ::mlir::cir::FP16Type::get(builder.getContext()); + BFloat16Ty = ::mlir::cir::BF16Type::get(builder.getContext()); FloatTy = ::mlir::cir::SingleType::get(builder.getContext()); DoubleTy = ::mlir::cir::DoubleType::get(builder.getContext()); FP80Ty = ::mlir::cir::FP80Type::get(builder.getContext()); - // TODO(cir): perhaps we should abstract long double variations into a custom - // cir.long_double type. Said type would also hold the semantics for lowering. // TODO: PointerWidthInBits PointerAlignInBytes = diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 96d3ed851e8a..fd0f26e47af6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -34,10 +34,9 @@ struct CIRGenTypeCache { mlir::cir::IntType SInt8Ty, SInt16Ty, SInt32Ty, SInt64Ty; // usigned char, unsigned, unsigned short, unsigned long mlir::cir::IntType UInt8Ty, UInt16Ty, UInt32Ty, UInt64Ty; - /// half, bfloat, float, double - // mlir::Type HalfTy, BFloatTy; - // TODO(cir): perhaps we should abstract long double variations into a custom - // cir.long_double type. Said type would also hold the semantics for lowering. + /// half, bfloat, float, double, fp80 + mlir::cir::FP16Type FP16Ty; + mlir::cir::BF16Type BFloat16Ty; mlir::cir::SingleType FloatTy; mlir::cir::DoubleType DoubleTy; mlir::cir::FP80Type FP80Ty; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 157d68435571..6b3b0c2268fa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -464,14 +464,14 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { break; case BuiltinType::Float16: - ResultType = Builder.getF16Type(); + ResultType = CGM.FP16Ty; break; case BuiltinType::Half: // Should be the same as above? assert(0 && "not implemented"); break; case BuiltinType::BFloat16: - ResultType = Builder.getBF16Type(); + ResultType = CGM.BFloat16Ty; break; case BuiltinType::Float: ResultType = CGM.FloatTy; diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index e2e9f0a4d91f..0dcfe19107e0 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -130,6 +130,8 @@ struct UnimplementedFeature { static bool shouldEmitLifetimeMarkers() { return false; } static bool peepholeProtection() { return false; } static bool CGCapturedStmtInfo() { return false; } + static bool CGFPOptionsRAII() { return false; } + static bool getFPFeaturesInEffect() { return false; } static bool cxxABI() { return false; } static bool openCL() { return false; } static bool CUDA() { return false; } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 3443e69a8fb3..b3101f6b73ce 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -691,6 +691,48 @@ DoubleType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, return (uint64_t)(getWidth() / 8); } +const llvm::fltSemantics &FP16Type::getFloatSemantics() const { + return llvm::APFloat::IEEEhalf(); +} + +llvm::TypeSize +FP16Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(getWidth()); +} + +uint64_t FP16Type::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +uint64_t +FP16Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +const llvm::fltSemantics &BF16Type::getFloatSemantics() const { + return llvm::APFloat::BFloat(); +} + +llvm::TypeSize +BF16Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(getWidth()); +} + +uint64_t BF16Type::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + +uint64_t +BF16Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return (uint64_t)(getWidth() / 8); +} + const llvm::fltSemantics &FP80Type::getFloatSemantics() const { return llvm::APFloat::x87DoubleExtended(); } diff --git a/clang/test/CIR/CodeGen/bf16-ops.c b/clang/test/CIR/CodeGen/bf16-ops.c new file mode 100644 index 000000000000..6a55e9acfe09 --- /dev/null +++ b/clang/test/CIR/CodeGen/bf16-ops.c @@ -0,0 +1,986 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -o - %s | FileCheck --check-prefix=NONATIVE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -target-feature +fullbf16 -fclangir -emit-cir -o - %s | FileCheck --check-prefix=NATIVE %s + +volatile unsigned test; +volatile int i0; +volatile __bf16 h0 = 0.0, h1 = 1.0, h2; +volatile float f0, f1, f2; +volatile double d0; +short s0; + +void foo(void) { + test = (h0); + // NONATIVE: %{{.+}} = cir.cast(float_to_int, %{{.+}} : !cir.bf16), !u32i + // NATIVE: %{{.+}} = cir.cast(float_to_int, %{{.+}} : !cir.bf16), !u32i + + h0 = (test); + // NONATIVE: %{{.+}} = cir.cast(int_to_float, %{{.+}} : !u32i), !cir.bf16 + // NATIVE: %{{.+}} = cir.cast(int_to_float, %{{.+}} : !u32i), !cir.bf16 + + test = (!h1); + // NONATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.bf16), !cir.bool + // NONATIVE-NEXT: %[[#B:]] = cir.unary(not, %[[#A]]) : !cir.bool, !cir.bool + // NONATIVE-NEXT: %[[#C:]] = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.bf16), !cir.bool + // NATIVE-NEXT: %[[#B:]] = cir.unary(not, %[[#A]]) : !cir.bool, !cir.bool + // NATIVE-NEXT: %[[#C:]] = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + h1 = -h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: %{{.+}} = cir.unary(minus, %[[#B]]) : !cir.bf16, !cir.bf16 + + // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.bf16 + // NATIVE: %{{.+}} = cir.unary(minus, %{{.+}}) : !cir.bf16, !cir.bf16 + + h1 = +h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: %{{.+}} = cir.unary(plus, %[[#B]]) : !cir.bf16, !cir.bf16 + + // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.bf16 + // NATIVE: %{{.+}} = cir.unary(plus, %{{.+}}) : !cir.bf16, !cir.bf16 + + h1++; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.bf16 + // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.bf16 + // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + + ++h1; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.bf16 + // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.bf16 + // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + + --h1; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.bf16 + // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.bf16 + // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + + h1--; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.bf16 + // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.bf16 + // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + + h1 = h0 * h2; + // NONATIVE: %[[#LHS:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#RHS:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHS]], %[[#RHS]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#A]] : !cir.float), !cir.bf16 + + // NATIVE: %{{.+}} = cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.bf16 + + h1 = h0 * (__bf16) -2.0f; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.unary(minus, %[[#B]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.binop(mul, %[[#A]], %[[#E]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#F]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NATIVE-NEXT: %{{.+}} = cir.binop(mul, %{{.+}}, %[[#C]]) : !cir.bf16 + + h1 = h0 * f2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + h1 = f0 * h2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + h1 = h0 * i0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(mul, %[[#A]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#D]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %{{.+}} = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.bf16 + + h1 = (h0 / h2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(div, %[[#A]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#C]] : !cir.float), !cir.bf16 + + // NATIVE: %{{.+}} = cir.binop(div, %{{.+}}, %{{.+}}) : !cir.bf16 + + h1 = (h0 / (__bf16) -2.0f); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.unary(minus, %[[#B]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.binop(div, %[[#A]], %[[#E]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#F]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NATIVE-NEXT: %{{.+}} = cir.binop(div, %{{.+}}, %[[#C]]) : !cir.bf16 + + h1 = (h0 / f2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + h1 = (f0 / h2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + h1 = (h0 / i0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(div, %[[#A]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#D]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %{{.+}} = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.bf16 + + h1 = (h2 + h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(add, %[[#A]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#C]] : !cir.float), !cir.bf16 + + // NATIVE: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.bf16 + + h1 = ((__bf16)-2.0 + h0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.bf16 + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.bf16), !cir.float + // NONATIVE: %[[#E:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.binop(add, %[[#D]], %[[#E]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#F]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.bf16 + // NATIVE: %{{.+}} = cir.binop(add, %[[#C]], %{{.+}}) : !cir.bf16 + + h1 = (h2 + f0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + h1 = (f2 + h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + h1 = (h0 + i0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(add, %[[#A]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#D]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + + h1 = (h2 - h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(sub, %[[#A]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#C]] : !cir.float), !cir.bf16 + + // NATIVE: %{{.+}} = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.bf16 + + h1 = ((__bf16)-2.0f - h0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.bf16), !cir.float + // NONATIVE: %[[#E:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.binop(sub, %[[#D]], %[[#E]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#F]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NATIVE: %{{.+}} = cir.binop(sub, %[[#C]], %{{.+}}) : !cir.bf16 + + h1 = (h2 - f0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + h1 = (f2 - h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + + h1 = (h0 - i0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(sub, %[[#A]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#D]] : !cir.float), !cir.bf16 + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %{{.+}} = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.bf16 + + test = (h2 < h0); + // NONATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = (h2 < (__bf16)42.0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + test = (h2 < f0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f2 < h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 < h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 < i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 > h2); + // NONATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = ((__bf16)42.0 > h2); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NONATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + test = (h0 > f2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f0 > h2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 > h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 > i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h2 <= h0); + // NONATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = (h2 <= (__bf16)42.0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + test = (h2 <= f0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f2 <= h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 <= h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 <= i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 >= h2); + // NONATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-NEXT: %{{.+}} = cir.get_global @test : !cir.ptr + + // NATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = (h0 >= (__bf16)-2.0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.bf16 + // NONATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.bf16 + // NATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i + + test = (h0 >= f2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f0 >= h2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 >= h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 >= i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h1 == h2); + // NONATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = (h1 == (__bf16)1.0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + test = (h1 == f1); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f1 == h1); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 == h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 == i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h1 != h2); + // NONATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = (h1 != (__bf16)1.0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.cmp(ne, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + test = (h1 != f1); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f1 != h1); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 != h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 != i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + h1 = (h1 ? h2 : h0); + // NONATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.bf16), !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.ternary(%[[#A]], true { + // NONATIVE: cir.yield %{{.+}} : !cir.bf16 + // NONATIVE-NEXT: }, false { + // NONATIVE: cir.yield %{{.+}} : !cir.bf16 + // NONATIVE-NEXT: }) : (!cir.bool) -> !cir.bf16 + // NONATIVE: %{{.+}} = cir.get_global @h1 : !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.bf16), !cir.bool + // NATIVE-NEXT: %[[#B:]] = cir.ternary(%[[#A]], true { + // NATIVE: cir.yield %{{.+}} : !cir.bf16 + // NATIVE-NEXT: }, false { + // NATIVE: cir.yield %{{.+}} : !cir.bf16 + // NATIVE-NEXT: }) : (!cir.bool) -> !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.get_global @h1 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.bf16, !cir.ptr + + h0 = h1; + // NONATIVE: %[[#A:]] = cir.get_global @h1 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @h1 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.bf16, !cir.ptr + + h0 = (__bf16)-2.0f; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + + h0 = f0; + // NONATIVE: %[[#A:]] = cir.get_global @f0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @f0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + + h0 = i0; + // NONATIVE: %[[#A:]] = cir.get_global @i0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !s32i + // NONATIVE-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @i0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !s32i + // NATIVE-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s32i), !cir.bf16 + // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + + i0 = h0; + // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.bf16), !s32i + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @i0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !s32i, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.bf16), !s32i + // NATIVE-NEXT: %[[#D:]] = cir.get_global @i0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !s32i, !cir.ptr + + h0 += h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(add, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 += (__bf16)1.0f; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.bf16), !cir.float + // NONATIVE: %[[#D:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.binop(add, %[[#D]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.cast(floating, %[[#E]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#F]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.bf16 + // NATIVE: %[[#C:]] = cir.binop(add, %{{.+}}, %[[#B]]) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 += f2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + i0 += h0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(add, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(float_to_int, %[[#C]] : !cir.float), !s32i + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !s32i, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.bf16), !s32i + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + h0 += i0; + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(add, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 -= h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(sub, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 -= (__bf16)1.0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(sub, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NATIVE: %[[#C:]] = cir.binop(sub, %{{.+}}, %[[#B]]) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 -= f2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + i0 -= h0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(sub, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(float_to_int, %[[#C]] : !cir.float), !s32i + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !s32i, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.bf16), !s32i + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + h0 -= i0; + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(sub, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 *= h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(mul, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 *= (__bf16)1.0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(mul, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NATIVE: %[[#C:]] = cir.binop(mul, %{{.+}}, %[[#B]]) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 *= f2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + i0 *= h0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(mul, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(float_to_int, %[[#C]] : !cir.float), !s32i + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !s32i, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.bf16), !s32i + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + h0 *= i0; + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(mul, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 /= h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(div, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.binop(div, %{{.+}}, %{{.+}}) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 /= (__bf16)1.0; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.bf16), !cir.float + // NONATIVE: %[[#D:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.binop(div, %[[#D]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.cast(floating, %[[#E]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#F]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 + // NATIVE: %[[#C:]] = cir.binop(div, %{{.+}}, %[[#B]]) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 /= f2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + + i0 /= h0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(div, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(float_to_int, %[[#C]] : !cir.float), !s32i + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !s32i, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE-NEXT: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.bf16), !s32i + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + h0 /= i0; + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(div, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 + // NATIVE: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.bf16 + // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.bf16, !cir.ptr + + h0 = d0; + // NONATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.bf16 + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.bf16 + // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + + h0 = (float)d0; + // NONATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.bf16 + // NONATIVE-NEXT: %[[#E:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.float + // NATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.bf16 + // NATIVE-NEXT: %[[#E:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.bf16, !cir.ptr + + d0 = h0; + // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.bf16), !cir.double + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @d0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.double, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.bf16), !cir.double + // NATIVE-NEXT: %[[#D:]] = cir.get_global @d0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.double, !cir.ptr + + d0 = (float)h0; + // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.bf16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.double + // NONATIVE-NEXT: %[[#E:]] = cir.get_global @d0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.double, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.bf16), !cir.float + // NATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.double + // NATIVE-NEXT: %[[#E:]] = cir.get_global @d0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.double, !cir.ptr + + h0 = s0; + // NONATIVE: %[[#A:]] = cir.get_global @s0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr, !s16i + // NONATIVE-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s16i), !cir.bf16 + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @s0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr, !s16i + // NATIVE-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s16i), !cir.bf16 + // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr +} diff --git a/clang/test/CIR/CodeGen/fp16-ops.c b/clang/test/CIR/CodeGen/fp16-ops.c new file mode 100644 index 000000000000..e39b4fd4e9a9 --- /dev/null +++ b/clang/test/CIR/CodeGen/fp16-ops.c @@ -0,0 +1,986 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -o - %s | FileCheck --check-prefix=NONATIVE %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fnative-half-type -fclangir -emit-cir -o - %s | FileCheck --check-prefix=NATIVE %s + +volatile unsigned test; +volatile int i0; +volatile _Float16 h0 = 0.0, h1 = 1.0, h2; +volatile float f0, f1, f2; +volatile double d0; +short s0; + +void foo(void) { + test = (h0); + // NONATIVE: %{{.+}} = cir.cast(float_to_int, %{{.+}} : !cir.f16), !u32i + // NATIVE: %{{.+}} = cir.cast(float_to_int, %{{.+}} : !cir.f16), !u32i + + h0 = (test); + // NONATIVE: %{{.+}} = cir.cast(int_to_float, %{{.+}} : !u32i), !cir.f16 + // NATIVE: %{{.+}} = cir.cast(int_to_float, %{{.+}} : !u32i), !cir.f16 + + test = (!h1); + // NONATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.f16), !cir.bool + // NONATIVE-NEXT: %[[#B:]] = cir.unary(not, %[[#A]]) : !cir.bool, !cir.bool + // NONATIVE-NEXT: %[[#C:]] = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.f16), !cir.bool + // NATIVE-NEXT: %[[#B:]] = cir.unary(not, %[[#A]]) : !cir.bool, !cir.bool + // NATIVE-NEXT: %[[#C:]] = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + h1 = -h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: %{{.+}} = cir.unary(minus, %[[#B]]) : !cir.f16, !cir.f16 + + // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.f16 + // NATIVE: %{{.+}} = cir.unary(minus, %{{.+}}) : !cir.f16, !cir.f16 + + h1 = +h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: %{{.+}} = cir.unary(plus, %[[#B]]) : !cir.f16, !cir.f16 + + // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.f16 + // NATIVE: %{{.+}} = cir.unary(plus, %{{.+}}) : !cir.f16, !cir.f16 + + h1++; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.f16 + // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.f16 + // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + ++h1; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.f16 + // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.f16 + // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + --h1; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.f16 + // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.f16 + // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + h1--; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.f16 + // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.f16 + // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + h1 = h0 * h2; + // NONATIVE: %[[#LHS:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#RHS:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHS]], %[[#RHS]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#A]] : !cir.float), !cir.f16 + + // NATIVE: %{{.+}} = cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.f16 + + h1 = h0 * (_Float16) -2.0f; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.unary(minus, %[[#B]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.binop(mul, %[[#A]], %[[#E]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#F]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NATIVE-NEXT: %{{.+}} = cir.binop(mul, %{{.+}}, %[[#C]]) : !cir.f16 + + h1 = h0 * f2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + h1 = f0 * h2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + h1 = h0 * i0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(mul, %[[#A]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#D]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %{{.+}} = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.f16 + + h1 = (h0 / h2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(div, %[[#A]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + + // NATIVE: %{{.+}} = cir.binop(div, %{{.+}}, %{{.+}}) : !cir.f16 + + h1 = (h0 / (_Float16) -2.0f); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.unary(minus, %[[#B]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.binop(div, %[[#A]], %[[#E]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#F]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NATIVE-NEXT: %{{.+}} = cir.binop(div, %{{.+}}, %[[#C]]) : !cir.f16 + + h1 = (h0 / f2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + h1 = (f0 / h2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + h1 = (h0 / i0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(div, %[[#A]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#D]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %{{.+}} = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.f16 + + h1 = (h2 + h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(add, %[[#A]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + + // NATIVE: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.f16 + + h1 = ((_Float16)-2.0 + h0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.f16), !cir.float + // NONATIVE: %[[#E:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.binop(add, %[[#D]], %[[#E]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#F]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 + // NATIVE: %{{.+}} = cir.binop(add, %[[#C]], %{{.+}}) : !cir.f16 + + h1 = (h2 + f0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + h1 = (f2 + h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + h1 = (h0 + i0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(add, %[[#A]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#D]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + h1 = (h2 - h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(sub, %[[#A]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + + // NATIVE: %{{.+}} = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.f16 + + h1 = ((_Float16)-2.0f - h0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.f16), !cir.float + // NONATIVE: %[[#E:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.binop(sub, %[[#D]], %[[#E]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#F]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NATIVE: %{{.+}} = cir.binop(sub, %[[#C]], %{{.+}}) : !cir.f16 + + h1 = (h2 - f0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + h1 = (f2 - h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.float + // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + h1 = (h0 - i0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(sub, %[[#A]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#D]] : !cir.float), !cir.f16 + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %{{.+}} = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.f16 + + test = (h2 < h0); + // NONATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = (h2 < (_Float16)42.0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + test = (h2 < f0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f2 < h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 < h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 < i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 > h2); + // NONATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = ((_Float16)42.0 > h2); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NONATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + test = (h0 > f2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f0 > h2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 > h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 > i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h2 <= h0); + // NONATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = (h2 <= (_Float16)42.0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + test = (h2 <= f0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f2 <= h0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 <= h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 <= i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 >= h2); + // NONATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-NEXT: %{{.+}} = cir.get_global @test : !cir.ptr + + // NATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = (h0 >= (_Float16)-2.0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 + // NONATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 + // NATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i + + test = (h0 >= f2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f0 >= h2); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 >= h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 >= i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h1 == h2); + // NONATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = (h1 == (_Float16)1.0); + // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + test = (h1 == f1); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f1 == h1); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 == h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 == i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h1 != h2); + // NONATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + test = (h1 != (_Float16)1.0); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.cmp(ne, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + test = (h1 != f1); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (f1 != h1); + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (i0 != h0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + test = (h0 != i0); + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + h1 = (h1 ? h2 : h0); + // NONATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.f16), !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.ternary(%[[#A]], true { + // NONATIVE: cir.yield %{{.+}} : !cir.f16 + // NONATIVE-NEXT: }, false { + // NONATIVE: cir.yield %{{.+}} : !cir.f16 + // NONATIVE-NEXT: }) : (!cir.bool) -> !cir.f16 + // NONATIVE: %{{.+}} = cir.get_global @h1 : !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.f16), !cir.bool + // NATIVE-NEXT: %[[#B:]] = cir.ternary(%[[#A]], true { + // NATIVE: cir.yield %{{.+}} : !cir.f16 + // NATIVE-NEXT: }, false { + // NATIVE: cir.yield %{{.+}} : !cir.f16 + // NATIVE-NEXT: }) : (!cir.bool) -> !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.get_global @h1 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.f16, !cir.ptr + + h0 = h1; + // NONATIVE: %[[#A:]] = cir.get_global @h1 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @h1 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.f16, !cir.ptr + + h0 = (_Float16)-2.0f; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + h0 = f0; + // NONATIVE: %[[#A:]] = cir.get_global @f0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @f0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + h0 = i0; + // NONATIVE: %[[#A:]] = cir.get_global @i0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !s32i + // NONATIVE-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @i0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !s32i + // NATIVE-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s32i), !cir.f16 + // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + i0 = h0; + // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @i0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !s32i, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i + // NATIVE-NEXT: %[[#D:]] = cir.get_global @i0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !s32i, !cir.ptr + + h0 += h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(add, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + + h0 += (_Float16)1.0f; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.float + // NONATIVE: %[[#D:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.binop(add, %[[#D]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.cast(floating, %[[#E]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#F]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.f16 + // NATIVE: %[[#C:]] = cir.binop(add, %{{.+}}, %[[#B]]) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + h0 += f2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + i0 += h0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(add, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(float_to_int, %[[#C]] : !cir.float), !s32i + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !s32i, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + h0 += i0; + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(add, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + + h0 -= h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(sub, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + + h0 -= (_Float16)1.0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(sub, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NATIVE: %[[#C:]] = cir.binop(sub, %{{.+}}, %[[#B]]) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + h0 -= f2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + i0 -= h0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(sub, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(float_to_int, %[[#C]] : !cir.float), !s32i + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !s32i, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + h0 -= i0; + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(sub, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + + h0 *= h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(mul, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + + h0 *= (_Float16)1.0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(mul, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NATIVE: %[[#C:]] = cir.binop(mul, %{{.+}}, %[[#B]]) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + h0 *= f2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + i0 *= h0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(mul, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(float_to_int, %[[#C]] : !cir.float), !s32i + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !s32i, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + h0 *= i0; + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(mul, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + + h0 /= h1; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(div, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.binop(div, %{{.+}}, %{{.+}}) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + + h0 /= (_Float16)1.0; + // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.float + // NONATIVE: %[[#D:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.binop(div, %[[#D]], %[[#C]]) : !cir.float + // NONATIVE-NEXT: %[[#F:]] = cir.cast(floating, %[[#E]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#F]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // NATIVE: %[[#C:]] = cir.binop(div, %{{.+}}, %[[#B]]) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + h0 /= f2; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + i0 /= h0; + // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float + // NONATIVE-NEXT: %[[#C:]] = cir.binop(div, %[[#B]], %[[#A]]) : !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(float_to_int, %[[#C]] : !cir.float), !s32i + // NONATIVE-NEXT: cir.store volatile %[[#D]], %{{.+}} : !s32i, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE-NEXT: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i + // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + h0 /= i0; + // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float + // NONATIVE: %[[#C:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.binop(div, %[[#C]], %[[#B]]) : !cir.float + // NONATIVE-NEXT: %[[#E:]] = cir.cast(floating, %[[#D]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: cir.store volatile %[[#E]], %{{.+}} : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // NATIVE: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.f16 + // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + + h0 = d0; + // NONATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 + // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + h0 = (float)d0; + // NONATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + // NONATIVE-NEXT: %[[#E:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.float + // NATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + // NATIVE-NEXT: %[[#E:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.f16, !cir.ptr + + d0 = h0; + // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.double + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @d0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.double, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.double + // NATIVE-NEXT: %[[#D:]] = cir.get_global @d0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.double, !cir.ptr + + d0 = (float)h0; + // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.float + // NONATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.double + // NONATIVE-NEXT: %[[#E:]] = cir.get_global @d0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.double, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.float + // NATIVE-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.double + // NATIVE-NEXT: %[[#E:]] = cir.get_global @d0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.double, !cir.ptr + + h0 = s0; + // NONATIVE: %[[#A:]] = cir.get_global @s0 : !cir.ptr + // NONATIVE-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr, !s16i + // NONATIVE-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s16i), !cir.f16 + // NONATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NONATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + // NATIVE: %[[#A:]] = cir.get_global @s0 : !cir.ptr + // NATIVE-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr, !s16i + // NATIVE-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s16i), !cir.f16 + // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr +} From 06845cd507bc53a14751215e3832819ceb5dfb9b Mon Sep 17 00:00:00 2001 From: Twice Date: Sat, 18 May 2024 11:30:40 +0900 Subject: [PATCH 1575/2301] [CIR][CIRGen] Support for dereferencing void pointers (#595) In this PR, we support for dereferencing void pointers as a GNU C extension. This include two modification: - In CIRGen, we support to build ReturnStmt with void return type. - In LowerToLLVM, we support to lower CIR load with void result type to LLVM. It's a part of https://github.com/llvm/clangir/issues/579, since I would like to split it to two tasks: - support pointer arithmetic for function types (#594) - **support to dereference void pointer (this PR)** --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 15 ++++++++--- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 2 +- clang/test/CIR/CodeGen/pointer-arith-ext.c | 30 +++++++++++++++++++--- 3 files changed, 40 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 9e27d77ede6a..e91d32462c11 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2557,9 +2557,18 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, llvm_unreachable("NYI: Special treatment of 3-element vector load"); } - mlir::cir::LoadOp Load = builder.create( - Loc, Addr.getElementType(), Addr.getPointer(), /* deref */ false, - Volatile, ::mlir::cir::MemOrderAttr{}); + auto Ptr = Addr.getPointer(); + auto ElemTy = Addr.getElementType(); + if (ElemTy.isa()) { + ElemTy = mlir::cir::IntType::get(builder.getContext(), 8, true); + auto ElemPtrTy = mlir::cir::PointerType::get(builder.getContext(), ElemTy); + Ptr = builder.create(Loc, ElemPtrTy, + mlir::cir::CastKind::bitcast, Ptr); + } + + mlir::cir::LoadOp Load = + builder.create(Loc, ElemTy, Ptr, /* deref */ false, + Volatile, ::mlir::cir::MemOrderAttr{}); if (isNontemporal) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 019df15e1ce4..9ee61498e182 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -487,7 +487,7 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { // Make sure not to return anything, but evaluate the expression // for side effects. if (RV) { - assert(0 && "not implemented"); + buildAnyExpr(RV); } } else if (!RV) { // Do nothing (return value is left uninitialized) diff --git a/clang/test/CIR/CodeGen/pointer-arith-ext.c b/clang/test/CIR/CodeGen/pointer-arith-ext.c index f64915b0569c..558ad823cae4 100644 --- a/clang/test/CIR/CodeGen/pointer-arith-ext.c +++ b/clang/test/CIR/CodeGen/pointer-arith-ext.c @@ -82,9 +82,33 @@ FP f7(FP a, int b) { return a - b; } // Similar to f7, just make sure it does not crash. FP f7_1(FP a, int b) { return (a -= b); } -// FIXME: add support for the remaining ones. -// void f8(void *a, int b) { return *(a + b); } -// void f8_1(void *a, int b) { return a[b]; } +void f8(void *a, int b) { return *(a + b); } +// CIR-LABEL: f8 +// CIR: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// CIR: %[[STRIDE:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CIR: cir.ptr_stride(%[[PTR]] : !cir.ptr, %[[STRIDE]] : !s32i) +// CIR: cir.return + +// LLVM-LABEL: f8 +// LLVM: %[[PTR:.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: %[[TOEXT:.*]] = load i32, ptr {{.*}}, align 4 +// LLVM: %[[STRIDE:.*]] = sext i32 %[[TOEXT]] to i64 +// LLVM: getelementptr i8, ptr %[[PTR]], i64 %[[STRIDE]] +// LLVM: ret void + +void f8_1(void *a, int b) { return a[b]; } +// CIR-LABEL: f8_1 +// CIR: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// CIR: %[[STRIDE:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CIR: cir.ptr_stride(%[[PTR]] : !cir.ptr, %[[STRIDE]] : !s32i) +// CIR: cir.return + +// LLVM-LABEL: f8_1 +// LLVM: %[[PTR:.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: %[[TOEXT:.*]] = load i32, ptr {{.*}}, align 4 +// LLVM: %[[STRIDE:.*]] = sext i32 %[[TOEXT]] to i64 +// LLVM: getelementptr i8, ptr %[[PTR]], i64 %[[STRIDE]] +// LLVM: ret void unsigned char *p(unsigned int x) { unsigned char *p; From fdb40558981488e171999944450677264c83a27b Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Fri, 17 May 2024 23:32:01 -0300 Subject: [PATCH 1576/2301] [CIR][IR] Fix FuncOp duplicate attr printing (#609) This patch ensures that only the pretty-print version of function param and result attributes is printed. The tailing dictionary attributes are no longer printed. It also ensures some FuncOp tests are properly validating both parsing and printing. --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 2 ++ clang/test/CIR/IR/func.cir | 21 ++++++++++++++++----- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 37393f1e5350..5c1d310a9fdd 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2089,6 +2089,8 @@ void cir::FuncOp::print(OpAsmPrinter &p) { getLinkageAttrName(), getNoProtoAttrName(), getSymVisibilityAttrName(), + getArgAttrsAttrName(), + getResAttrsAttrName(), }); if (auto aliaseeName = getAliasee()) { diff --git a/clang/test/CIR/IR/func.cir b/clang/test/CIR/IR/func.cir index 1ebee93ba2b1..a1468e6462f4 100644 --- a/clang/test/CIR/IR/func.cir +++ b/clang/test/CIR/IR/func.cir @@ -6,8 +6,12 @@ module { cir.return } + // Should print/parse function aliases. + // CHECK: cir.func @l1() alias(@l0) cir.func @l1() alias(@l0) + // Should print/parse variadic function types. + // CHECK: cir.func private @variadic(!s32i, ...) -> !s32i cir.func private @variadic(!s32i, ...) -> !s32i // Should accept call with only the required parameters. @@ -28,18 +32,25 @@ module { cir.return } - // Should parse void return types. + // Should drop void return types. + // CHECK: cir.func @parse_explicit_void_func() { cir.func @parse_explicit_void_func() -> !cir.void { cir.return } - // Should parse omitted void return type. + // Should print/parse omitted void return type. + // CHECK: cir.func @parse_func_type_with_omitted_void() { cir.func @parse_func_type_with_omitted_void() { cir.return } - // Should parse variadic no-proto functions. + // Should print/parse variadic no-proto functions. + // CHECK: cir.func no_proto private @no_proto(...) -> !s32i cir.func no_proto private @no_proto(...) -> !s32i -} -// CHECK: cir.func @l0() + // Should print/parse argument and result attributes. + // CHECK: cir.func @parse_arg_res_attrs(%arg0: !u8i {cir.zeroext}) -> (!u8i {cir.zeroext}) { + cir.func @parse_arg_res_attrs(%0: !u8i {cir.zeroext}) -> (!u8i {cir.zeroext}) { + cir.return %0 : !u8i + } +} From 0e8f3f33afcca0d9abab8f762988dd23aa23041e Mon Sep 17 00:00:00 2001 From: Twice Date: Sat, 18 May 2024 13:09:27 +0900 Subject: [PATCH 1577/2301] [NFC][CIR] Complete CIR check in test case ctor-member-lvalue-to-rvalue (#608) Seems the FIXME has been solved since I've confirmed that these CHECK can pass now. Remove the FIXME and recover these CHECK. --- .../CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp index a777b1ef3dd4..70fff5a81dfe 100644 --- a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -25,13 +25,11 @@ struct String { void foo() { String s; String s1{s}; - // FIXME: s1 shouldn't be uninitialized. - - // cir.func @_Z3foov() { - // %0 = cir.alloca !ty_22String22, !cir.ptr, ["s"] {alignment = 8 : i64} - // %1 = cir.alloca !ty_22String22, !cir.ptr, ["s1"] {alignment = 8 : i64} - // cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () - // cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () - // cir.return - // } } +// CHECK: cir.func @_Z3foov() {{.*}} { +// CHECK: %0 = cir.alloca !ty_22String22, !cir.ptr, ["s", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_22String22, !cir.ptr, ["s1", init] {alignment = 8 : i64} +// CHECK: cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.return +// } From 1acdf099c5ab126fdc60b67144d2cc8323a37e48 Mon Sep 17 00:00:00 2001 From: 7mile Date: Sat, 18 May 2024 12:11:06 +0800 Subject: [PATCH 1578/2301] [CIR][CIRGen] Fix compound assignment for vector types (#610) There is [a code path](https://github.com/llvm/clangir/blob/3da10fafac66ff125fb59c602e41ad4b4f5cb382/clang/lib/CodeGen/CGExpr.cpp#L2190) missing the counterpart in CIRGen of vector types. When using compound assignments like `a[0] += a[1]`, this code path is activated and end up with NYI. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 7 +++++++ clang/test/CIR/CodeGen/vectype.cpp | 12 ++++++++++++ clang/test/CIR/Lowering/vectype.cpp | 12 ++++++++++++ 3 files changed, 31 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index e91d32462c11..45c0a813bcd6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -647,6 +647,13 @@ RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { if (LV.isSimple()) return RValue::get(buildLoadOfScalar(LV, Loc)); + + if (LV.isVectorElt()) { + auto load = builder.createLoad(getLoc(Loc), LV.getVectorAddress()); + return RValue::get(builder.create( + getLoc(Loc), load, LV.getVectorIdx())); + } + llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index 0496e58097a1..fddfba552619 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -43,6 +43,18 @@ void vector_int_test(int x) { // CHECK: %[[#UPDATEDVI:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVI]][%{{[0-9]+}} : !s32i] : !cir.vector // CHECK: cir.store %[[#UPDATEDVI]], %[[#STORAGEVI]] : !cir.vector, !cir.ptr> + // Compound assignment + a[x] += a[0]; + // CHECK: %[[#LOADCA1:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector + // CHECK: %[[#RHSCA:]] = cir.vec.extract %[[#LOADCA1]][%{{[0-9]+}} : !s32i] : !cir.vector + // CHECK: %[[#LOADCAIDX2:]] = cir.load %{{[0-9]+}} : !cir.ptr, !s32i + // CHECK: %[[#LOADCAVEC3:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector + // CHECK: %[[#LHSCA:]] = cir.vec.extract %[[#LOADCAVEC3]][%[[#LOADCAIDX2]] : !s32i] : !cir.vector + // CHECK: %[[#SUMCA:]] = cir.binop(add, %[[#LHSCA]], %[[#RHSCA]]) : !s32i + // CHECK: %[[#LOADCAVEC4:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector + // CHECK: %[[#RESULTCAVEC:]] = cir.vec.insert %[[#SUMCA]], %[[#LOADCAVEC4]][%[[#LOADCAIDX2]] : !s32i] : !cir.vector + // CHECK: cir.store %[[#RESULTCAVEC]], %{{[0-9]+}} : !cir.vector, !cir.ptr> + // Binary arithmetic operations vi4 d = a + b; // CHECK: %{{[0-9]+}} = cir.binop(add, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index 8e4a758543b5..3679c4b9d802 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -75,6 +75,18 @@ void vector_int_test(int x) { // CHECK: %[[#T64:]] = llvm.insertelement %[[#T61]], %[[#T63]][%[[#T62]] : i32] : vector<4xi32> // CHECK: llvm.store %[[#T64]], %[[#T3]] : vector<4xi32>, !llvm.ptr + // Compound assignment + a[x] += a[0]; + // CHECK: %[[#LOADCA1:]] = llvm.load %{{[0-9]+}} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#RHSCA:]] = llvm.extractelement %[[#LOADCA1:]][%{{[0-9]+}} : i32] : vector<4xi32> + // CHECK: %[[#LOADCAIDX2:]] = llvm.load %{{[0-9]+}} : !llvm.ptr -> i32 + // CHECK: %[[#LOADCAVEC3:]] = llvm.load %{{[0-9]+}} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#LHSCA:]] = llvm.extractelement %[[#LOADCAVEC3:]][%[[#LOADCAIDX2:]] : i32] : vector<4xi32> + // CHECK: %[[#SUMCA:]] = llvm.add %[[#LHSCA:]], %[[#RHSCA:]] : i32 + // CHECK: %[[#LOADCAVEC4:]] = llvm.load %{{[0-9]+}} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#RESULTCAVEC:]] = llvm.insertelement %[[#SUMCA:]], %[[#LOADCAVEC4:]][%[[#LOADCAIDX2:]] : i32] : vector<4xi32> + // CHECK: llvm.store %[[#RESULTCAVEC:]], %{{[0-9]+}} : vector<4xi32>, !llvm.ptr + // Binary arithmetic operators. vi4 d = a + b; // CHECK: %[[#T65:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> From 602fe1f29c8a0269ed7e84220bdb698011a60f21 Mon Sep 17 00:00:00 2001 From: Jing Zhang <625740704@qq.com> Date: Sat, 18 May 2024 12:27:23 +0800 Subject: [PATCH 1579/2301] [CIR][CIRGen] Support for signed #cir.ptr (#598) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The constant initialization isn't related to the pointee. We should be able to write #cir.ptr<-1 : i64> : !cir.ptr --- .../clang/CIR/Dialect/Builder/CIRBaseBuilder.h | 6 ++++-- clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 8 ++++---- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 ++-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 +--- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 5 ++--- clang/lib/CIR/CodeGen/ConstantInitBuilder.h | 4 +++- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 15 ++++++++------- clang/lib/CIR/Dialect/Transforms/LibOpt.cpp | 3 +-- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 8 ++++++-- clang/test/CIR/CodeGen/constptr.c | 2 +- clang/test/CIR/CodeGen/vbase.cpp | 2 +- clang/test/CIR/IR/constptrattr.cir | 6 ++++-- clang/test/CIR/Lowering/types.cir | 6 +++++- 13 files changed, 42 insertions(+), 31 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 07cebf4948e6..dc61fe08e96f 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -360,9 +360,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return create(loc, condBuilder, bodyBuilder, stepBuilder); } - mlir::TypedAttr getConstPtrAttr(mlir::Type t, uint64_t v) { + mlir::TypedAttr getConstPtrAttr(mlir::Type t, int64_t v) { + auto val = + mlir::IntegerAttr::get(mlir::IntegerType::get(t.getContext(), 64), v); return mlir::cir::ConstPtrAttr::get(getContext(), - t.cast(), v); + t.cast(), val); } // Creates constant nullptr for pointer type ty. diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 6c015d45ec70..8c68c54a4159 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -250,22 +250,22 @@ def ConstPtrAttr : CIR_Attr<"ConstPtr", "ptr", [TypedAttrInterface]> { let summary = "Holds a constant pointer value"; let parameters = (ins AttributeSelfTypeParameter<"", "::mlir::cir::PointerType">:$type, - "uint64_t":$value); + "mlir::IntegerAttr":$value); let description = [{ A pointer attribute is a literal attribute that represents an integral value of a pointer type. }]; let builders = [ - AttrBuilderWithInferredContext<(ins "Type":$type, "uint64_t":$value), [{ + AttrBuilderWithInferredContext<(ins "Type":$type, "mlir::IntegerAttr":$value), [{ return $_get(type.getContext(), type.cast(), value); }]>, AttrBuilder<(ins "Type":$type, - "uint64_t":$value), [{ + "mlir::IntegerAttr":$value), [{ return $_get($_ctxt, type.cast(), value); }]>, ]; let extraClassDeclaration = [{ - bool isNullValue() const { return getValue() == 0; } + bool isNullValue() const { return getValue().getInt() == 0; } }]; let assemblyFormat = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 3584f21eef4d..505fb275511c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -146,7 +146,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::TypedAttr getConstNullPtrAttr(mlir::Type t) { assert(t.isa() && "expected cir.ptr"); - return mlir::cir::ConstPtrAttr::get(getContext(), t, 0); + return getConstPtrAttr(t, 0); } mlir::Attribute getString(llvm::StringRef str, mlir::Type eltTy, @@ -257,7 +257,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { if (auto arrTy = ty.dyn_cast()) return getZeroAttr(arrTy); if (auto ptrTy = ty.dyn_cast()) - return getConstPtrAttr(ptrTy, 0); + return getConstNullPtrAttr(ptrTy); if (auto structTy = ty.dyn_cast()) return getZeroAttr(structTy); if (ty.isa()) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 6522a756effc..d1284d268a2c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1551,9 +1551,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // Note that DestTy is used as the MLIR type instead of a custom // nullptr type. mlir::Type Ty = CGF.getCIRType(DestTy); - return Builder.create( - CGF.getLoc(E->getExprLoc()), Ty, - mlir::cir::ConstPtrAttr::get(Builder.getContext(), Ty, 0)); + return Builder.getNullPtr(Ty, CGF.getLoc(E->getExprLoc())); } case CK_NullToMemberPointer: { diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index fb5a7ac876b9..319adf4619a8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -162,9 +162,8 @@ void CIRGenVTables::GenerateClassData(const CXXRecordDecl *RD) { static void AddPointerLayoutOffset(CIRGenModule &CGM, ConstantArrayBuilder &builder, CharUnits offset) { - builder.add(mlir::cir::ConstPtrAttr::get(CGM.getBuilder().getContext(), - CGM.getBuilder().getUInt8PtrTy(), - offset.getQuantity())); + builder.add(CGM.getBuilder().getConstPtrAttr(CGM.getBuilder().getUInt8PtrTy(), + offset.getQuantity())); } static void AddRelativeLayoutOffset(CIRGenModule &CGM, diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h index 7a32aa591182..1bddd1323473 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -197,7 +197,9 @@ class ConstantAggregateBuilderBase { /// Add a pointer of a specific type. void addPointer(mlir::cir::PointerType ptrTy, uint64_t value) { - add(mlir::cir::ConstPtrAttr::get(ptrTy.getContext(), ptrTy, value)); + auto val = mlir::IntegerAttr::get( + mlir::IntegerType::get(ptrTy.getContext(), 64), value); + add(mlir::cir::ConstPtrAttr::get(ptrTy.getContext(), ptrTy, val)); } /// Add a bitcast of a value to a specific type. diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 21f2f7396c75..8e44cb1cf2b7 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -44,9 +44,9 @@ parseFloatLiteral(mlir::AsmParser &parser, mlir::FailureOr &value, mlir::Type ty); static mlir::ParseResult parseConstPtr(mlir::AsmParser &parser, - uint64_t &value); + mlir::IntegerAttr &value); -static void printConstPtr(mlir::AsmPrinter &p, uint64_t value); +static void printConstPtr(mlir::AsmPrinter &p, mlir::IntegerAttr value); #define GET_ATTRDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" @@ -220,18 +220,19 @@ void LangAttr::print(AsmPrinter &printer) const { // TODO: Consider encoding the null value differently and use conditional // assembly format instead of custom parsing/printing. -static ParseResult parseConstPtr(AsmParser &parser, uint64_t &value) { +static ParseResult parseConstPtr(AsmParser &parser, mlir::IntegerAttr &value) { if (parser.parseOptionalKeyword("null").succeeded()) { - value = 0; + value = mlir::IntegerAttr::get( + mlir::IntegerType::get(parser.getContext(), 64), 0); return success(); } - return parser.parseInteger(value); + return parser.parseAttribute(value); } -static void printConstPtr(AsmPrinter &p, uint64_t value) { - if (!value) +static void printConstPtr(AsmPrinter &p, mlir::IntegerAttr value) { + if (!value.getInt()) p << "null"; else p << value; diff --git a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp index 762ee961bcba..08c2586bc3cf 100644 --- a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp @@ -203,8 +203,7 @@ void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { // return result; // else // return last; - auto NullPtr = builder.create( - findOp.getLoc(), first.getType(), ConstPtrAttr::get(first.getType(), 0)); + auto NullPtr = builder.getNullPtr(first.getType(), findOp.getLoc()); auto CmpResult = builder.create( findOp.getLoc(), BoolType::get(builder.getContext()), CmpOpKind::eq, NullPtr.getRes(), MemChrResult); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ada50bc9a879..dba92e0a23f4 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -188,8 +188,10 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ConstPtrAttr ptrAttr, return rewriter.create( loc, converter->convertType(ptrAttr.getType())); } + mlir::DataLayout layout(parentOp->getParentOfType()); mlir::Value ptrVal = rewriter.create( - loc, rewriter.getI64Type(), ptrAttr.getValue()); + loc, rewriter.getIntegerType(layout.getTypeSizeInBits(ptrAttr.getType())), + ptrAttr.getValue().getInt()); return rewriter.create( loc, converter->convertType(ptrAttr.getType()), ptrVal); } @@ -740,10 +742,12 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return mlir::success(); } case mlir::cir::CastKind::ptr_to_bool: { + auto zero = + mlir::IntegerAttr::get(mlir::IntegerType::get(getContext(), 64), 0); auto null = rewriter.create( src.getLoc(), castOp.getSrc().getType(), mlir::cir::ConstPtrAttr::get(getContext(), castOp.getSrc().getType(), - 0)); + zero)); rewriter.replaceOpWithNewOp( castOp, mlir::cir::BoolType::get(getContext()), mlir::cir::CmpOpKind::ne, castOp.getSrc(), null); diff --git a/clang/test/CIR/CodeGen/constptr.c b/clang/test/CIR/CodeGen/constptr.c index b400cb8c444f..e19f7574566b 100644 --- a/clang/test/CIR/CodeGen/constptr.c +++ b/clang/test/CIR/CodeGen/constptr.c @@ -4,5 +4,5 @@ int *p = (int*)0x1234; -// CIR: cir.global external @p = #cir.ptr<4660> : !cir.ptr +// CIR: cir.global external @p = #cir.ptr<4660 : i64> : !cir.ptr // LLVM: @p = global ptr inttoptr (i64 4660 to ptr) diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp index a966f82069c6..b32fb0eb59ab 100644 --- a/clang/test/CIR/CodeGen/vbase.cpp +++ b/clang/test/CIR/CodeGen/vbase.cpp @@ -15,7 +15,7 @@ void ppp() { B b; } // Vtable definition for B -// CIR: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr<12> : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr]> : !cir.array x 3>}> +// CIR: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr<12 : i64> : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr]> : !cir.array x 3>}> // VTT for B. // CIR: cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 1> diff --git a/clang/test/CIR/IR/constptrattr.cir b/clang/test/CIR/IR/constptrattr.cir index 30b79a882ac1..98b215caacf7 100644 --- a/clang/test/CIR/IR/constptrattr.cir +++ b/clang/test/CIR/IR/constptrattr.cir @@ -2,7 +2,9 @@ !s32i = !cir.int -cir.global external @const_ptr = #cir.ptr<4660> : !cir.ptr -// CHECK: cir.global external @const_ptr = #cir.ptr<4660> : !cir.ptr +cir.global external @const_ptr = #cir.ptr<4660 : i64> : !cir.ptr +// CHECK: cir.global external @const_ptr = #cir.ptr<4660 : i64> : !cir.ptr +cir.global external @signed_ptr = #cir.ptr<-1> : !cir.ptr +// CHECK: cir.global external @signed_ptr = #cir.ptr<-1 : i64> : !cir.ptr cir.global external @null_ptr = #cir.ptr : !cir.ptr // CHECK: cir.global external @null_ptr = #cir.ptr : !cir.ptr diff --git a/clang/test/CIR/Lowering/types.cir b/clang/test/CIR/Lowering/types.cir index 5e5be9192e8a..f91f25cb5e41 100644 --- a/clang/test/CIR/Lowering/types.cir +++ b/clang/test/CIR/Lowering/types.cir @@ -2,7 +2,11 @@ // RUN: FileCheck --input-file=%t.mlir %s !void = !cir.void +!u8i = !cir.int module { + cir.global external @testVTable = #cir.vtable<{#cir.const_array<[#cir.ptr<-8> : !cir.ptr]> : !cir.array x 1>}> : !cir.struct x 1>}> + // CHECK: llvm.mlir.constant(-8 : i64) : i64 + // CHECK: llvm.inttoptr %{{[0-9]+}} : i64 to !llvm.ptr cir.func @testTypeLowering() { // Should lower void pointers as opaque pointers. %0 = cir.const #cir.ptr : !cir.ptr @@ -11,4 +15,4 @@ module { // CHECK: llvm.mlir.zero : !llvm.ptr cir.return } -} +} \ No newline at end of file From ef0e60caaf96482503a599bacb80980de47d144a Mon Sep 17 00:00:00 2001 From: 7mile Date: Tue, 21 May 2024 03:12:27 +0800 Subject: [PATCH 1580/2301] [CIR][CIRGen][LowerToLLVM] Add address space attribute for pointer type (#606) This is the prelude of address space support. Linked issue: #418 . - Add the attribute and implement asm format & type conversion. - Make ops like `cir.global` and `cir.get_global` aware of address space, and solve the latter flag. - Relax the restriction of default alloca address space. Then we can use correct address spaces for languages like OpenCL in future. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 20 +++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 12 +++++------ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 ++- clang/test/CIR/CodeGen/address-space.c | 11 ++++++++++ clang/test/CIR/IR/address-space.cir | 11 ++++++++++ clang/test/CIR/Lowering/address-space.cir | 20 +++++++++++++++++++ 6 files changed, 68 insertions(+), 9 deletions(-) create mode 100644 clang/test/CIR/CodeGen/address-space.c create mode 100644 clang/test/CIR/IR/address-space.cir create mode 100644 clang/test/CIR/Lowering/address-space.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 2babdee1d289..ca7ed7730220 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -208,9 +208,25 @@ def CIR_PointerType : CIR_Type<"Pointer", "ptr", `CIR.ptr` is a type returned by any op generating a pointer in C++. }]; - let parameters = (ins "mlir::Type":$pointee); + let parameters = (ins "mlir::Type":$pointee, + DefaultValuedParameter<"unsigned", "0">:$addrSpace); - let assemblyFormat = "`<` $pointee `>`"; + let builders = [ + TypeBuilderWithInferredContext<(ins + "mlir::Type":$pointee, CArg<"unsigned", "0">:$addrSpace), [{ + return Base::get(pointee.getContext(), pointee, addrSpace); + }]>, + TypeBuilder<(ins + "mlir::Type":$pointee, CArg<"unsigned", "0">:$addrSpace), [{ + return Base::get($_ctxt, pointee, addrSpace); + }]>, + ]; + + let assemblyFormat = [{ + `<` $pointee ( `,` `addrspace` `(` $addrSpace^ `)` )? `>` + }]; + + let skipDefaultBuilders = 1; let extraClassDeclaration = [{ bool isVoidPtr() const { diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 6b3b0c2268fa..fd398c1128fe 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -598,9 +598,9 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { const ReferenceType *RTy = cast(Ty); QualType ETy = RTy->getPointeeType(); auto PointeeType = convertTypeForMem(ETy); - // TODO(cir): use Context.getTargetAddressSpace(ETy) on pointer - ResultType = - ::mlir::cir::PointerType::get(Builder.getContext(), PointeeType); + ResultType = ::mlir::cir::PointerType::get( + Builder.getContext(), PointeeType, + Context.getTargetAddressSpace(ETy.getAddressSpace())); assert(ResultType && "Cannot get pointer type?"); break; } @@ -615,9 +615,9 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { // if (PointeeType->isVoidTy()) // PointeeType = Builder.getI8Type(); - // FIXME: add address specifier to cir::PointerType? - ResultType = - ::mlir::cir::PointerType::get(Builder.getContext(), PointeeType); + ResultType = ::mlir::cir::PointerType::get( + Builder.getContext(), PointeeType, + Context.getTargetAddressSpace(ETy.getAddressSpace())); assert(ResultType && "Cannot get pointer type?"); break; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index dba92e0a23f4..a86459f62677 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3183,7 +3183,8 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, mlir::DataLayout &dataLayout) { converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { // Drop pointee type since LLVM dialect only allows opaque pointers. - return mlir::LLVM::LLVMPointerType::get(type.getContext()); + return mlir::LLVM::LLVMPointerType::get(type.getContext(), + type.getAddrSpace()); }); converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { auto ty = converter.convertType(type.getEltType()); diff --git a/clang/test/CIR/CodeGen/address-space.c b/clang/test/CIR/CodeGen/address-space.c new file mode 100644 index 000000000000..047aa25bbcc7 --- /dev/null +++ b/clang/test/CIR/CodeGen/address-space.c @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// XFAIL: * + +// CIR: cir.func {{@.*foo.*}}(%arg0: !cir.ptr +// LLVM: define void @foo(ptr addrspace(1) %0) +void foo(int __attribute__((address_space(1))) *arg) { + return; +} diff --git a/clang/test/CIR/IR/address-space.cir b/clang/test/CIR/IR/address-space.cir new file mode 100644 index 000000000000..dde39bdd4d73 --- /dev/null +++ b/clang/test/CIR/IR/address-space.cir @@ -0,0 +1,11 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +!s32i = !cir.int + +module { + // CHECK: @test_addrspace_assembly_format(%arg0: !cir.ptr) + cir.func @test_addrspace_assembly_format(%arg0: !cir.ptr) { + cir.return + } +} diff --git a/clang/test/CIR/Lowering/address-space.cir b/clang/test/CIR/Lowering/address-space.cir new file mode 100644 index 000000000000..c7d5a84829c4 --- /dev/null +++ b/clang/test/CIR/Lowering/address-space.cir @@ -0,0 +1,20 @@ +// RUN: cir-translate %s -cir-to-llvmir -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +!s32i = !cir.int + +module { + // LLVM: define void @foo(ptr %0) + cir.func @foo(%arg0: !cir.ptr) { + // LLVM-NEXT: alloca ptr, + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["arg", init] {alignment = 8 : i64} + cir.return + } + + // LLVM: define void @bar(ptr addrspace(1) %0) + cir.func @bar(%arg0: !cir.ptr) { + // LLVM-NEXT: alloca ptr addrspace(1) + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["arg", init] {alignment = 8 : i64} + cir.return + } +} From c838592526991a2bdc0ce09e7718aa8ed7ae41fa Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 22 May 2024 06:06:18 +0800 Subject: [PATCH 1581/2301] [CIR][CIRGen][LLVMLowering] Vtable support for simple multiple inhertance without thunk (#569) This PR adds Vtable support for C++ multiple inheritance without thunk. This change contains the CIR codegen and lowering work: 1. `VTableAttr` should allow adding multiple `ArrayAttr` for multi-inheritance. 3. `VTableAddrPointOpLowering` has been fixed for the multi-vtable during the MLIR lowering phase. Example: ```c++ class Mother { virtual void MotherFoo() {} virtual void MotherFoo2() {} } class Father { virtual void FatherFoo() {} } class Child : public Mother, public Father { void MotherFoo() override {} } ``` ```mlir cir.global linkonce_odr @_ZTV5Child = #cir.vtable< {#cir.const_array<[ #cir.ptr : #!cir.ptr, #cir.global_view<@_ZTI5Child> : !cir.ptr, #cir.global_view<@_ZN5Child9MotherFooEv> : !cir.ptr, #cir.global_view<@_ZN6Mother10MotherFoo2Ev> : !cir.ptr]> : !cir.array x 4>, #cir.const_array<[ #cir.ptr<-8> : !cir.ptr, #cir.global_view<@_ZTI5Child> : !cir.ptr, #cir.global_view<@_ZN6Father9FatherFooEv> : !cir.ptr] > : !cir.array x 3>}> : !ty_anon_struct3 ``` --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 57 ++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 8 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 48 ++++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 23 +++- clang/test/CIR/CodeGen/multi-vtable.cpp | 122 ++++++++++++++++++ clang/test/CIR/IR/constptrattr.cir | 2 +- .../test/CIR/Lowering/ThroughMLIR/vtable.cir | 73 +++++++++++ 8 files changed, 294 insertions(+), 42 deletions(-) create mode 100644 clang/test/CIR/CodeGen/multi-vtable.cpp create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/vtable.cir diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index cd2b3c4e331d..829b2063a503 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -656,11 +656,49 @@ void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, } static Address ApplyNonVirtualAndVirtualOffset( - CIRGenFunction &CGF, Address addr, CharUnits nonVirtualOffset, - mlir::Value virtualOffset, const CXXRecordDecl *derivedClass, - const CXXRecordDecl *nearestVBase) { - llvm_unreachable("NYI"); - return Address::invalid(); + mlir::Location loc, CIRGenFunction &CGF, Address addr, + CharUnits nonVirtualOffset, mlir::Value virtualOffset, + const CXXRecordDecl *derivedClass, const CXXRecordDecl *nearestVBase) { + // Assert that we have something to do. + assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr); + + // Compute the offset from the static and dynamic components. + mlir::Value baseOffset; + if (!nonVirtualOffset.isZero()) { + mlir::Type OffsetType = + (CGF.CGM.getTarget().getCXXABI().isItaniumFamily() && + CGF.CGM.getItaniumVTableContext().isRelativeLayout()) + ? CGF.SInt32Ty + : CGF.PtrDiffTy; + baseOffset = CGF.getBuilder().getConstInt(loc, OffsetType, + nonVirtualOffset.getQuantity()); + if (virtualOffset) { + baseOffset = CGF.getBuilder().createBinop( + virtualOffset, mlir::cir::BinOpKind::Add, baseOffset); + } + } else { + baseOffset = virtualOffset; + } + + // Apply the base offset. + mlir::Value ptr = addr.getPointer(); + ptr = CGF.getBuilder().create(loc, ptr.getType(), ptr, + baseOffset); + + // If we have a virtual component, the alignment of the result will + // be relative only to the known alignment of that vbase. + CharUnits alignment; + if (virtualOffset) { + assert(nearestVBase && "virtual offset without vbase?"); + llvm_unreachable("NYI"); + // alignment = CGF.CGM.getVBaseAlignment(addr.getAlignment(), + // derivedClass, nearestVBase); + } else { + alignment = addr.getAlignment(); + } + alignment = alignment.alignmentAtOffset(nonVirtualOffset); + + return Address(ptr, alignment); } void CIRGenFunction::initializeVTablePointer(mlir::Location loc, @@ -687,8 +725,8 @@ void CIRGenFunction::initializeVTablePointer(mlir::Location loc, Address VTableField = LoadCXXThisAddress(); if (!NonVirtualOffset.isZero() || VirtualOffset) { VTableField = ApplyNonVirtualAndVirtualOffset( - *this, VTableField, NonVirtualOffset, VirtualOffset, Vptr.VTableClass, - Vptr.NearestVBase); + loc, *this, VTableField, NonVirtualOffset, VirtualOffset, + Vptr.VTableClass, Vptr.NearestVBase); } // Finally, store the address point. Use the same CIR types as the field. @@ -1415,8 +1453,9 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, } // Apply both offsets. - Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset, - VirtualOffset, Derived, VBase); + Value = ApplyNonVirtualAndVirtualOffset(getLoc(Loc), *this, Value, + NonVirtualOffset, VirtualOffset, + Derived, VBase); // Cast to the destination type. Value = builder.createElementBitCast(Value.getPointer().getLoc(), Value, BaseValueTy); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 1f95be2f542d..9f82686cb20b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -645,13 +645,12 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, builder.setInsertionPointToStart(EntryBB); { + // Initialize lexical scope information. LexicalScope lexScope{*this, fusedLoc, EntryBB}; // Emit the standard function prologue. StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); - // Initialize lexical scope information. - // Save parameters for coroutine function. if (Body && isa_and_nonnull(Body)) llvm::append_range(FnArgs, FD->parameters()); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 7cbf253d0104..4a9b168e578b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1758,9 +1758,11 @@ void CIRGenItaniumRTTIBuilder::BuildVMIClassTypeInfo(mlir::Location loc, if (Base.isVirtual()) Offset = CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset( RD, BaseDecl); - else - llvm_unreachable("Multi-inheritence NYI"); - + else { + const ASTRecordLayout &Layout = + CGM.getASTContext().getASTRecordLayout(RD); + Offset = Layout.getBaseClassOffset(BaseDecl); + } OffsetFlags = uint64_t(Offset.getQuantity()) << 8; // The low-order byte of __offset_flags contains flags, as given by the diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 5c1d310a9fdd..c0b6f2ef6910 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2790,32 +2790,38 @@ VTableAttr::verify(::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, emitError() << "expected !cir.struct type result"; return failure(); } - if (sTy.getMembers().size() != 1 || vtableData.size() != 1) { - emitError() << "expected struct type with only one subtype"; + if (sTy.getMembers().empty() || vtableData.empty()) { + emitError() << "expected struct type with one or more subtype"; return failure(); } - auto arrayTy = sTy.getMembers()[0].dyn_cast(); - auto constArrayAttr = vtableData[0].dyn_cast(); - if (!arrayTy || !constArrayAttr) { - emitError() << "expected struct type with one array element"; - return failure(); - } + for (size_t i = 0; i < sTy.getMembers().size(); ++i) { - if (mlir::cir::ConstStructAttr::verify(emitError, type, vtableData).failed()) - return failure(); + auto arrayTy = sTy.getMembers()[i].dyn_cast(); + auto constArrayAttr = vtableData[i].dyn_cast(); + if (!arrayTy || !constArrayAttr) { + emitError() << "expected struct type with one array element"; + return failure(); + } - LogicalResult eltTypeCheck = success(); - if (auto arrayElts = constArrayAttr.getElts().dyn_cast()) { - arrayElts.walkImmediateSubElements( - [&](Attribute attr) { - if (attr.isa() || attr.isa()) - return; - emitError() << "expected GlobalViewAttr attribute"; - eltTypeCheck = failure(); - }, - [&](Type type) {}); - return eltTypeCheck; + if (mlir::cir::ConstStructAttr::verify(emitError, type, vtableData) + .failed()) + return failure(); + + LogicalResult eltTypeCheck = success(); + if (auto arrayElts = constArrayAttr.getElts().dyn_cast()) { + arrayElts.walkImmediateSubElements( + [&](Attribute attr) { + if (attr.isa() || attr.isa()) + return; + emitError() << "expected GlobalViewAttr attribute"; + eltTypeCheck = failure(); + }, + [&](Type type) {}); + if (eltTypeCheck.failed()) { + return eltTypeCheck; + } + } } return success(); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index a86459f62677..c41c7165fd5a 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2774,20 +2774,31 @@ class CIRVTableAddrPointOpLowering const auto *converter = getTypeConverter(); auto targetType = converter->convertType(op.getType()); mlir::Value symAddr = op.getSymAddr(); - + llvm::SmallVector offsets; mlir::Type eltType; if (!symAddr) { + // Get the vtable address point from a global variable auto module = op->getParentOfType(); - auto symbol = dyn_cast( - mlir::SymbolTable::lookupSymbolIn(module, op.getNameAttr())); + auto *symbol = + mlir::SymbolTable::lookupSymbolIn(module, op.getNameAttr()); + if (auto llvmSymbol = dyn_cast(symbol)) { + eltType = llvmSymbol.getType(); + } else if (auto cirSymbol = dyn_cast(symbol)) { + eltType = converter->convertType(cirSymbol.getSymType()); + } symAddr = rewriter.create( op.getLoc(), mlir::LLVM::LLVMPointerType::get(getContext()), *op.getName()); - eltType = converter->convertType(symbol.getType()); + offsets = llvm::SmallVector{ + 0, op.getVtableIndex(), op.getAddressPointIndex()}; + } else { + // Get indirect vtable address point retrieval + symAddr = adaptor.getSymAddr(); + eltType = converter->convertType(symAddr.getType()); + offsets = + llvm::SmallVector{op.getAddressPointIndex()}; } - auto offsets = llvm::SmallVector{ - 0, op.getVtableIndex(), op.getAddressPointIndex()}; if (eltType) rewriter.replaceOpWithNewOp(op, targetType, eltType, symAddr, offsets, true); diff --git a/clang/test/CIR/CodeGen/multi-vtable.cpp b/clang/test/CIR/CodeGen/multi-vtable.cpp new file mode 100644 index 000000000000..ad9e51500b6a --- /dev/null +++ b/clang/test/CIR/CodeGen/multi-vtable.cpp @@ -0,0 +1,122 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +class Mother { +public: + virtual void MotherFoo() {} + void simple() { } + virtual void MotherFoo2() {} +}; + +class Father { +public: + virtual void FatherFoo() {} +}; + +class Child : public Mother, public Father { +public: + void MotherFoo() override {} +}; + +int main() { + Mother *b = new Mother(); + b->MotherFoo(); + b->simple(); + Child *c = new Child(); + c->MotherFoo(); + return 0; +} + +// CIR: ![[VTypeInfoA:ty_.*]] = !cir.struct, !cir.ptr}> +// CIR: ![[VTypeInfoB:ty_.*]] = !cir.struct, !cir.ptr, !u32i, !u32i, !cir.ptr, !s64i, !cir.ptr, !s64i}> +// CIR: ![[VTableTypeMother:ty_.*]] = !cir.struct x 4>}> +// CIR: ![[VTableTypeFather:ty_.*]] = !cir.struct x 3>}> +// CIR: ![[VTableTypeChild:ty_.*]] = !cir.struct x 4>, !cir.array x 3>}> +// CIR: !ty_22Father22 = !cir.struct>>} #cir.record.decl.ast> +// CIR: !ty_22Mother22 = !cir.struct>>} #cir.record.decl.ast> +// CIR: !ty_22Child22 = !cir.struct + +// CIR: cir.func linkonce_odr @_ZN6MotherC2Ev(%arg0: !cir.ptr +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV6Mother, vtable_index = 0, address_point_index = 2) : !cir.ptr>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: cir.store %2, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> +// CIR: cir.return +// CIR: } + +// LLVM-DAG: define linkonce_odr void @_ZN6MotherC2Ev(ptr %0) +// LLVM-DAG: store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV6Mother, i32 0, i32 0, i32 2), ptr %{{[0-9]+}}, align 8 +// LLVM-DAG: ret void +// LLVM-DAG: } + +// CIR: cir.func linkonce_odr @_ZN5ChildC2Ev(%arg0: !cir.ptr +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV5Child, vtable_index = 0, address_point_index = 2) : !cir.ptr>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV5Child, vtable_index = 1, address_point_index = 2) : !cir.ptr>> +// CIR: %{{[0-9]+}} = cir.const #cir.int<8> : !s64i +// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr +// CIR: %11 = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> +// CIR: cir.return +// CIR: } + +// LLVM-DAG: define linkonce_odr void @_ZN5ChildC2Ev(ptr %0) +// LLVM-DAG: store ptr getelementptr inbounds ({ [4 x ptr], [3 x ptr] }, ptr @_ZTV5Child, i32 0, i32 0, i32 2), ptr %{{[0-9]+}}, align 8 +// LLVM-DAG: %{{[0-9]+}} = getelementptr %class.Child, ptr %3, i64 8 +// LLVM-DAG: store ptr getelementptr inbounds ({ [4 x ptr], [3 x ptr] }, ptr @_ZTV5Child, i32 0, i32 1, i32 2), ptr %{{[0-9]+}}, align 8 +// LLVM-DAG: ret void +// } + +// CIR: cir.func @main() -> !s32i extra(#fn_attr) { + +// CIR: %{{[0-9]+}} = cir.vtable.address_point( %{{[0-9]+}} : !cir.ptr)>>>, vtable_index = 0, address_point_index = 0) : !cir.ptr)>>> + +// CIR: %{{[0-9]+}} = cir.vtable.address_point( %{{[0-9]+}} : !cir.ptr)>>>, vtable_index = 0, address_point_index = 0) : !cir.ptr)>>> + +// CIR: } + +// vtable for Mother +// CIR: cir.global linkonce_odr @_ZTV6Mother = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI6Mother> : !cir.ptr, #cir.global_view<@_ZN6Mother9MotherFooEv> : !cir.ptr, #cir.global_view<@_ZN6Mother10MotherFoo2Ev> : !cir.ptr]> : !cir.array x 4>}> : ![[VTableTypeMother]] {alignment = 8 : i64} +// LLVM-DAG: @_ZTV6Mother = linkonce_odr global { [4 x ptr] } { [4 x ptr] [ptr null, ptr @_ZTI6Mother, ptr @_ZN6Mother9MotherFooEv, ptr @_ZN6Mother10MotherFoo2Ev] } + +// vtable for __cxxabiv1::__class_type_info +// CIR: cir.global "private" external @_ZTVN10__cxxabiv117__class_type_infoE : !cir.ptr> +// LLVM-DAG: @_ZTVN10__cxxabiv117__class_type_infoE = external global ptr + +// typeinfo name for Mother +// CIR: cir.global linkonce_odr @_ZTS6Mother = #cir.const_array<"6Mother" : !cir.array> : !cir.array {alignment = 1 : i64} +// LLVM-DAG: @_ZTS6Mother = linkonce_odr global [7 x i8] c"6Mother" + +// typeinfo for Mother +// CIR: cir.global constant external @_ZTI6Mother = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS6Mother> : !cir.ptr}> : ![[VTypeInfoA]] {alignment = 8 : i64} +// LLVM-DAG: @_ZTI6Mother = constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 2), ptr @_ZTS6Mother } + +// vtable for Father +// CIR: cir.global linkonce_odr @_ZTV6Father = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI6Father> : !cir.ptr, #cir.global_view<@_ZN6Father9FatherFooEv> : !cir.ptr]> : !cir.array x 3>}> : ![[VTableTypeFather]] {alignment = 8 : i64} +// LLVM-DAG: @_ZTV6Father = linkonce_odr global { [3 x ptr] } { [3 x ptr] [ptr null, ptr @_ZTI6Father, ptr @_ZN6Father9FatherFooEv] } + +// vtable for Child +// CIR: cir.global linkonce_odr @_ZTV5Child = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI5Child> : !cir.ptr, #cir.global_view<@_ZN5Child9MotherFooEv> : !cir.ptr, #cir.global_view<@_ZN6Mother10MotherFoo2Ev> : !cir.ptr]> : !cir.array x 4>, #cir.const_array<[#cir.ptr<-8 : i64> : !cir.ptr, #cir.global_view<@_ZTI5Child> : !cir.ptr, #cir.global_view<@_ZN6Father9FatherFooEv> : !cir.ptr]> : !cir.array x 3>}> : ![[VTableTypeChild]] {alignment = 8 : i64} +// LLVM-DAG: @_ZTV5Child = linkonce_odr global { [4 x ptr], [3 x ptr] } { [4 x ptr] [ptr null, ptr @_ZTI5Child, ptr @_ZN5Child9MotherFooEv, ptr @_ZN6Mother10MotherFoo2Ev], [3 x ptr] [ptr inttoptr (i64 -8 to ptr), ptr @_ZTI5Child, ptr @_ZN6Father9FatherFooEv] } + +// vtable for __cxxabiv1::__vmi_class_type_info +// CIR: cir.global "private" external @_ZTVN10__cxxabiv121__vmi_class_type_infoE : !cir.ptr> +// LLVM-DAG: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global ptr + +// typeinfo name for Child +// CIR: cir.global linkonce_odr @_ZTS5Child = #cir.const_array<"5Child" : !cir.array> : !cir.array {alignment = 1 : i64} +// LLVM-DAG: @_ZTS5Child = linkonce_odr global [6 x i8] c"5Child" + +// typeinfo name for Father +// CIR: cir.global linkonce_odr @_ZTS6Father = #cir.const_array<"6Father" : !cir.array> : !cir.array {alignment = 1 : i64} +// LLVM-DAG: @_ZTS6Father = linkonce_odr global [7 x i8] c"6Father" + +// typeinfo for Father +// CIR: cir.global constant external @_ZTI6Father = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS6Father> : !cir.ptr}> : !ty_anon_struct {alignment = 8 : i64} +// LLVM-DAG: @_ZTI6Father = constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 2), ptr @_ZTS6Father } + +// typeinfo for Child +// CIR: cir.global constant external @_ZTI5Child = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS5Child> : !cir.ptr, #cir.int<0> : !u32i, #cir.int<2> : !u32i, #cir.global_view<@_ZTI6Mother> : !cir.ptr, #cir.int<2> : !s64i, #cir.global_view<@_ZTI6Father> : !cir.ptr, #cir.int<2050> : !s64i}> : ![[VTypeInfoB]] {alignment = 8 : i64} +// LLVM-DAG: @_ZTI5Child = constant { ptr, ptr, i32, i32, ptr, i64, ptr, i64 } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i32 2), ptr @_ZTS5Child, i32 0, i32 2, ptr @_ZTI6Mother, i64 2, ptr @_ZTI6Father, i64 2050 } diff --git a/clang/test/CIR/IR/constptrattr.cir b/clang/test/CIR/IR/constptrattr.cir index 98b215caacf7..21e14283b320 100644 --- a/clang/test/CIR/IR/constptrattr.cir +++ b/clang/test/CIR/IR/constptrattr.cir @@ -4,7 +4,7 @@ cir.global external @const_ptr = #cir.ptr<4660 : i64> : !cir.ptr // CHECK: cir.global external @const_ptr = #cir.ptr<4660 : i64> : !cir.ptr -cir.global external @signed_ptr = #cir.ptr<-1> : !cir.ptr +cir.global external @signed_ptr = #cir.ptr<-1 : i64> : !cir.ptr // CHECK: cir.global external @signed_ptr = #cir.ptr<-1 : i64> : !cir.ptr cir.global external @null_ptr = #cir.ptr : !cir.ptr // CHECK: cir.global external @null_ptr = #cir.ptr : !cir.ptr diff --git a/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir b/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir new file mode 100644 index 000000000000..acd0117925b9 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir @@ -0,0 +1,73 @@ +// RUN: cir-opt %s --cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR + +!s32i = !cir.int +!s64i = !cir.int +!s8i = !cir.int +!u32i = !cir.int +!u64i = !cir.int +!u8i = !cir.int +!void = !cir.void + +!ty_anon_struct = !cir.struct>, !cir.ptr>}> +!ty_anon_struct1 = !cir.struct>, !cir.ptr>, !cir.int, !cir.int, !cir.ptr>, !cir.int, !cir.ptr>, !cir.int}> +!ty_anon_struct2 = !cir.struct> x 4>}> +!ty_anon_struct3 = !cir.struct> x 3>}> +!ty_anon_struct4 = !cir.struct> x 4>, !cir.array> x 3>}> +!ty_22Father22 = !cir.struct ()>>>} #cir.record.decl.ast> +!ty_22Mother22 = !cir.struct ()>>>} #cir.record.decl.ast> +!ty_22Child22 = !cir.struct ()>>>} #cir.record.decl.ast>, !cir.struct ()>>>} #cir.record.decl.ast>} #cir.record.decl.ast> + +module { + cir.func linkonce_odr @_ZN6Mother6simpleEv(%arg0: !cir.ptr) { + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, !cir.ptr> + %1 = cir.load %0 : !cir.ptr>, !cir.ptr + cir.return + } + cir.func private @_ZN5ChildC2Ev(%arg0: !cir.ptr) { cir.return } + cir.global linkonce_odr @_ZTV6Mother = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI6Mother> : !cir.ptr, #cir.global_view<@_ZN6Mother9MotherFooEv> : !cir.ptr, #cir.global_view<@_ZN6Mother10MotherFoo2Ev> : !cir.ptr]> : !cir.array x 4>}> : !ty_anon_struct2 {alignment = 8 : i64} + cir.global "private" external @_ZTVN10__cxxabiv117__class_type_infoE : !cir.ptr> + cir.global linkonce_odr @_ZTS6Mother = #cir.const_array<"6Mother" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global constant external @_ZTI6Mother = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS6Mother> : !cir.ptr}> : !ty_anon_struct {alignment = 8 : i64} + cir.func linkonce_odr @_ZN6Mother9MotherFooEv(%arg0: !cir.ptr ) { cir.return } + cir.func linkonce_odr @_ZN6Mother10MotherFoo2Ev(%arg0: !cir.ptr ) { cir.return } + cir.global linkonce_odr @_ZTV6Father = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI6Father> : !cir.ptr, #cir.global_view<@_ZN6Father9FatherFooEv> : !cir.ptr]> : !cir.array x 3>}> : !ty_anon_struct3 {alignment = 8 : i64} + cir.func linkonce_odr @_ZN6FatherC2Ev(%arg0: !cir.ptr ) { cir.return } + cir.global linkonce_odr @_ZTV5Child = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI5Child> : !cir.ptr, #cir.global_view<@_ZN5Child9MotherFooEv> : !cir.ptr, #cir.global_view<@_ZN6Mother10MotherFoo2Ev> : !cir.ptr]> : !cir.array x 4>, #cir.const_array<[#cir.ptr<-8 : i64> : !cir.ptr, #cir.global_view<@_ZTI5Child> : !cir.ptr, #cir.global_view<@_ZN6Father9FatherFooEv> : !cir.ptr]> : !cir.array x 3>}> : !ty_anon_struct4 {alignment = 8 : i64} + cir.global "private" external @_ZTVN10__cxxabiv121__vmi_class_type_infoE : !cir.ptr> + cir.global linkonce_odr @_ZTS5Child = #cir.const_array<"5Child" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global linkonce_odr @_ZTS6Father = #cir.const_array<"6Father" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global constant external @_ZTI6Father = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS6Father> : !cir.ptr}> : !ty_anon_struct {alignment = 8 : i64} + cir.global constant external @_ZTI5Child = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS5Child> : !cir.ptr, #cir.int<0> : !u32i, #cir.int<2> : !u32i, #cir.global_view<@_ZTI6Mother> : !cir.ptr, #cir.int<2> : !s64i, #cir.global_view<@_ZTI6Father> : !cir.ptr, #cir.int<2050> : !s64i}> : !ty_anon_struct1 {alignment = 8 : i64} + cir.func linkonce_odr @_ZN5Child9MotherFooEv(%arg0: !cir.ptr ) { cir.return } + cir.func linkonce_odr @_ZN6Father9FatherFooEv(%arg0: !cir.ptr ) { cir.return } +} + +// MLIR: llvm.mlir.global linkonce_odr @_ZTV5Child() {addr_space = 0 : i32} : !llvm.struct<(array<4 x ptr>, array<3 x ptr>)> { +// MLIR: %{{[0-9]+}} = llvm.mlir.undef : !llvm.struct<(array<4 x ptr>, array<3 x ptr>)> +// MLIR: %{{[0-9]+}} = llvm.mlir.undef : !llvm.array<4 x ptr> +// MLIR: %{{[0-9]+}} = llvm.mlir.zero : !llvm.ptr +// MLIR: %{{[0-9]+}} = llvm.insertvalue %{{[0-9]+}}, %{{[0-9]+}}[0] : !llvm.array<4 x ptr> +// MLIR: %{{[0-9]+}} = llvm.mlir.addressof @_ZTI5Child : !llvm.ptr + +// MLIR: %{{[0-9]+}} = llvm.insertvalue %{{[0-9]+}}, %{{[0-9]+}}[1] : !llvm.array<4 x ptr> +// MLIR: %{{[0-9]+}} = llvm.mlir.addressof @_ZN5Child9MotherFooEv : !llvm.ptr + +// MLIR: %{{[0-9]+}} = llvm.insertvalue %{{[0-9]+}}, %{{[0-9]+}}[2] : !llvm.array<4 x ptr> +// MLIR: %{{[0-9]+}} = llvm.mlir.addressof @_ZN6Mother10MotherFoo2Ev : !llvm.ptr + +// MLIR: %{{[0-9]+}} = llvm.insertvalue %{{[0-9]+}}, %{{[0-9]+}}[3] : !llvm.array<4 x ptr> +// MLIR: %{{[0-9]+}} = llvm.insertvalue %{{[0-9]+}}, %{{[0-9]+}}[0] : !llvm.struct<(array<4 x ptr>, array<3 x ptr>)> +// MLIR: %{{[0-9]+}} = llvm.mlir.undef : !llvm.array<3 x ptr> +// MLIR: %{{[0-9]+}} = llvm.mlir.constant(-8 : i64) : i64 +// MLIR: %{{[0-9]+}} = llvm.inttoptr %{{[0-9]+}} : i64 to !llvm.ptr +// MLIR: %{{[0-9]+}} = llvm.insertvalue %{{[0-9]+}}, %{{[0-9]+}}[0] : !llvm.array<3 x ptr> +// MLIR: %{{[0-9]+}} = llvm.mlir.addressof @_ZTI5Child : !llvm.ptr + +// MLIR: %{{[0-9]+}} = llvm.insertvalue %{{[0-9]+}}, %{{[0-9]+}}[1] : !llvm.array<3 x ptr> +// MLIR: %{{[0-9]+}} = llvm.mlir.addressof @_ZN6Father9FatherFooEv : !llvm.ptr + +// MLIR: %{{[0-9]+}} = llvm.insertvalue %{{[0-9]+}}, %{{[0-9]+}}[2] : !llvm.array<3 x ptr> +// MLIR: %{{[0-9]+}} = llvm.insertvalue %{{[0-9]+}}, %{{[0-9]+}}[1] : !llvm.struct<(array<4 x ptr>, array<3 x ptr>)> +// MLIR: llvm.return %{{[0-9]+}} : !llvm.struct<(array<4 x ptr>, array<3 x ptr>)> +// MLIR: } \ No newline at end of file From f094c3320dd3017001aeba823f44ebe45c93b275 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 1 Oct 2024 10:48:48 -0700 Subject: [PATCH 1582/2301] [CIR][Build] Fix missing deps --- clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt | 2 ++ clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt | 1 + 2 files changed, 3 insertions(+) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index edabbaabec13..c87f7531d996 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -29,8 +29,10 @@ add_clang_library(clangCIRLoweringDirectToLLVM ${dialect_libs} MLIRCIR MLIRAnalysis + MLIRBuiltinToLLVMIRTranslation MLIRCIRTransforms MLIRIR + MLIRLLVMToLLVMIRTranslation MLIRParser MLIRSideEffectInterfaces MLIRTransforms diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt index c5f3d21e363d..788cd4e396db 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -31,6 +31,7 @@ add_clang_library(clangCIRLoweringThroughMLIR MLIRAnalysis MLIRBuiltinToLLVMIRTranslation MLIRIR + MLIRLLVMToLLVMIRTranslation MLIRParser MLIRSideEffectInterfaces MLIRTransforms From 3e4d5d15f3189c05068483ce775a14085eeeb130 Mon Sep 17 00:00:00 2001 From: axp Date: Wed, 22 May 2024 06:33:50 +0800 Subject: [PATCH 1583/2301] [CIR][CIRGen] Add support for gnu range on switch stmts (#599) Fix #596 --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 17 +- clang/test/CIR/CodeGen/switch-gnurange.cpp | 205 +++++++++++++++++++++ 2 files changed, 217 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/switch-gnurange.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 9ee61498e182..9766b5df78fe 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -615,11 +615,21 @@ CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, // Fold cascading cases whenever possible to simplify codegen a bit. while (caseStmt) { lastCase = caseStmt; - auto intVal = caseStmt->getLHS()->EvaluateKnownConstInt(getContext()); - caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(condType, intVal)); + + auto startVal = caseStmt->getLHS()->EvaluateKnownConstInt(getContext()); + auto endVal = startVal; + if (auto *rhs = caseStmt->getRHS()) { + endVal = rhs->EvaluateKnownConstInt(getContext()); + } + for (auto intVal = startVal; intVal <= endVal; ++intVal) { + caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(condType, intVal)); + } + caseStmt = dyn_cast_or_null(caseStmt->getSubStmt()); } + assert(!caseEltValueListAttr.empty() && "empty case value NYI"); + auto *ctxt = builder.getContext(); auto caseAttr = mlir::cir::CaseAttr::get( @@ -668,9 +678,6 @@ mlir::LogicalResult CIRGenFunction::buildCaseDefaultCascade( mlir::LogicalResult CIRGenFunction::buildCaseStmt(const CaseStmt &S, mlir::Type condType, SmallVector &caseAttrs) { - assert((!S.getRHS() || !S.caseStmtIsGNURange()) && - "case ranges not implemented"); - auto *caseStmt = foldCaseStmt(S, condType, caseAttrs); return buildCaseDefaultCascade(caseStmt, condType, caseAttrs); } diff --git a/clang/test/CIR/CodeGen/switch-gnurange.cpp b/clang/test/CIR/CodeGen/switch-gnurange.cpp new file mode 100644 index 000000000000..7fbd49ad704c --- /dev/null +++ b/clang/test/CIR/CodeGen/switch-gnurange.cpp @@ -0,0 +1,205 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +enum letter { + A, B, C, D, E, F, G, H, I, J, L +}; + +int sw1(enum letter c) { + switch (c) { + case A ... C: + case D: + case E ... F: + case G ... L: + return 1; + default: + return 0; + } +} + +// CIR: cir.func @_Z3sw16letter +// CIR: cir.scope { +// CIR: cir.switch +// CIR-NEXT: case (anyof, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] : !s32i) { +// CIR: cir.return +// CIR-NEXT: }, +// CIR-NEXT: case (default) { +// CIR: cir.return +// CIR-NEXT: } +// CIR-NEXT: ] +// CIR-NEXT: } + +// LLVM: @_Z3sw16letter +// LLVM: switch i32 %[[C:[0-9]+]], label %[[DEFAULT:[0-9]+]] [ +// LLVM-NEXT: i32 0, label %[[CASE:[0-9]+]] +// LLVM-NEXT: i32 1, label %[[CASE]] +// LLVM-NEXT: i32 2, label %[[CASE]] +// LLVM-NEXT: i32 3, label %[[CASE]] +// LLVM-NEXT: i32 4, label %[[CASE]] +// LLVM-NEXT: i32 5, label %[[CASE]] +// LLVM-NEXT: i32 6, label %[[CASE]] +// LLVM-NEXT: i32 7, label %[[CASE]] +// LLVM-NEXT: i32 8, label %[[CASE]] +// LLVM-NEXT: i32 9, label %[[CASE]] +// LLVM-NEXT: i32 10, label %[[CASE]] +// LLVM-NEXT: ] +// LLVM: [[CASE]]: +// LLVM: store i32 1 +// LLVM: ret +// LLVM: [[DEFAULT]]: +// LLVM: store i32 0 +// LLVM: ret + + +int sw2(enum letter c) { + switch (c) { + case A ... C: + case L ... A: + return 1; + default: + return 0; + } +} + +// CIR: cir.func @_Z3sw26letter +// CIR: cir.scope { +// CIR: cir.switch +// CIR-NEXT: case (anyof, [0, 1, 2] : !s32i) { +// CIR: cir.return +// CIR-NEXT: }, +// CIR-NEXT: case (default) { +// CIR: cir.return +// CIR-NEXT: } +// CIR-NEXT: ] +// CIR-NEXT: } + +// LLVM: @_Z3sw26letter +// LLVM: switch i32 %[[C:[0-9]+]], label %[[DEFAULT:[0-9]+]] [ +// LLVM-NEXT: i32 0, label %[[CASE:[0-9]+]] +// LLVM-NEXT: i32 1, label %[[CASE]] +// LLVM-NEXT: i32 2, label %[[CASE]] +// LLVM-NEXT: ] +// LLVM: [[CASE]]: +// LLVM: store i32 1 +// LLVM: ret +// LLVM: [[DEFAULT]]: +// LLVM: store i32 0 +// LLVM: ret + +void sw3(enum letter c) { + int x = 0; + switch (c) { + case A ... C: + x = 1; + break; + case D ... F: + x = 2; + break; + case G ... I: + x = 3; + break; + case J ... L: + x = 4; + break; + } +} + +// CIR: cir.func @_Z3sw36letter +// CIR: cir.scope { +// CIR: cir.switch +// CIR-NEXT: case (anyof, [0, 1, 2] : !s32i) { +// CIR-NEXT: cir.int<1> +// CIR: cir.break +// CIR-NEXT: }, +// CIR-NEXT: case (anyof, [3, 4, 5] : !s32i) { +// CIR-NEXT: cir.int<2> +// CIR: cir.break +// CIR-NEXT: }, +// CIR-NEXT: case (anyof, [6, 7, 8] : !s32i) { +// CIR-NEXT: cir.int<3> +// CIR: cir.break +// CIR-NEXT: }, +// CIR-NEXT: case (anyof, [9, 10] : !s32i) { +// CIR-NEXT: cir.int<4> +// CIR: cir.break +// CIR-NEXT: } +// CIR-NEXT: ] +// CIR-NEXT: } + +// LLVM: @_Z3sw36letter +// LLVM: switch i32 %[[C:[0-9]+]], label %[[DEFAULT:[0-9]+]] [ +// LLVM-NEXT: i32 0, label %[[CASE_AC:[0-9]+]] +// LLVM-NEXT: i32 1, label %[[CASE_AC]] +// LLVM-NEXT: i32 2, label %[[CASE_AC]] +// LLVM-NEXT: i32 3, label %[[CASE_DF:[0-9]+]] +// LLVM-NEXT: i32 4, label %[[CASE_DF]] +// LLVM-NEXT: i32 5, label %[[CASE_DF]] +// LLVM-NEXT: i32 6, label %[[CASE_GI:[0-9]+]] +// LLVM-NEXT: i32 7, label %[[CASE_GI]] +// LLVM-NEXT: i32 8, label %[[CASE_GI]] +// LLVM-NEXT: i32 9, label %[[CASE_JL:[0-9]+]] +// LLVM-NEXT: i32 10, label %[[CASE_JL]] +// LLVM-NEXT: ] +// LLVM: [[CASE_AC]]: +// LLVM: store i32 1, ptr %[[X:[0-9]+]] +// LLVM: br label %[[EPILOG:[0-9]+]] +// LLVM: [[CASE_DF]]: +// LLVM: store i32 2, ptr %[[X]] +// LLVM: br label %[[EPILOG]] +// LLVM: [[CASE_GI]]: +// LLVM: store i32 3, ptr %[[X]] +// LLVM: br label %[[EPILOG]] +// LLVM: [[CASE_JL]]: +// LLVM: store i32 4, ptr %[[X]] +// LLVM: br label %[[EPILOG]] +// LLVM: [[EPILOG]]: +// LLVM: ret void + +void sw4(int x) { + switch (x) { + case 66 ... 233: + break; + case -50 ... 50: + break; + } +} + +// CIR: cir.func @_Z3sw4i +// CIR: cir.scope { +// CIR: cir.switch +// CIR-NEXT: case (anyof, [66, 67, 68, 69, {{[0-9, ]+}}, 230, 231, 232, 233] : !s32i) { +// CIR-NEXT: cir.break +// CIR-NEXT: }, +// CIR-NEXT: case (anyof, [-50, -49, -48, -47, {{[0-9, -]+}}, -1, 0, 1, {{[0-9, ]+}}, 47, 48, 49, 50] : !s32i) { +// CIR-NEXT: cir.break +// CIR-NEXT: } +// CIR-NEXT: ] +// CIR-NEXT: } + +// LLVM: @_Z3sw4i +// LLVM: switch i32 %[[X:[0-9]+]], label %[[DEFAULT:[0-9]+]] [ +// LLVM-NEXT: i32 66, label %[[CASE_66_233:[0-9]+]] +// LLVM-NEXT: i32 67, label %[[CASE_66_233]] +// ... +// LLVM: i32 232, label %[[CASE_66_233]] +// LLVM-NEXT: i32 233, label %[[CASE_66_233]] +// LLVM-NEXT: i32 -50, label %[[CASE_NEG50_50:[0-9]+]] +// LLVM-NEXT: i32 -49, label %[[CASE_NEG50_50]] +// ... +// LLVM: i32 -1, label %[[CASE_NEG50_50]] +// LLVM-NEXT: i32 0, label %[[CASE_NEG50_50]] +// LLVM-NEXT: i32 1, label %[[CASE_NEG50_50]] +// ... +// LLVM: i32 49, label %[[CASE_NEG50_50]] +// LLVM-NEXT: i32 50, label %[[CASE_NEG50_50]] +// LLVM-NEXT: ] +// LLVM: [[CASE_66_233]]: +// LLVM: br label %[[EPILOG:[0-9]+]] +// LLVM: [[CASE_NEG50_50]]: +// LLVM: br label %[[EPILOG]] +// LLVM: [[EPILOG]]: +// LLVM: ret void + + From cbb9dc5518c538e5aaa3b13f065bb600136a529d Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Tue, 21 May 2024 20:40:47 -0400 Subject: [PATCH 1584/2301] [CIR] Move CIRDataLayout.h into include/clang/CIR/Dialect/IR (#621) Move it up for visibility, just like the other dialect headers. --- .../clang/CIR/Dialect/IR}/CIRDataLayout.h | 7 +++---- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 3 ++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenException.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 10 +++++----- clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 2 +- 9 files changed, 19 insertions(+), 19 deletions(-) rename clang/{lib/CIR/CodeGen => include/clang/CIR/Dialect/IR}/CIRDataLayout.h (96%) diff --git a/clang/lib/CIR/CodeGen/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h similarity index 96% rename from clang/lib/CIR/CodeGen/CIRDataLayout.h rename to clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h index ef9f737a5620..88e030fb424a 100644 --- a/clang/lib/CIR/CodeGen/CIRDataLayout.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h @@ -9,10 +9,9 @@ // it easier to port some of LLVM codegen layout logic to CIR. //===----------------------------------------------------------------------===// -#ifndef LLVM_CLANG_LIB_CIR_CIRDATALAYOUT_H -#define LLVM_CLANG_LIB_CIR_CIRDATALAYOUT_H +#ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRDATALAYOUT_H +#define LLVM_CLANG_CIR_DIALECT_IR_CIRDATALAYOUT_H -#include "UnimplementedFeatureGuarding.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/IR/BuiltinOps.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" @@ -92,4 +91,4 @@ class CIRDataLayout { } // namespace cir -#endif \ No newline at end of file +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 95d3b28682b4..6f6b8c91d84e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -11,7 +11,7 @@ //===----------------------------------------------------------------------===// #include "Address.h" -#include "CIRDataLayout.h" + #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "CIRGenOpenMPRuntime.h" @@ -20,6 +20,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/StmtVisitor.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 505fb275511c..41248e8f9caf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -10,7 +10,6 @@ #define LLVM_CLANG_LIB_CIR_CIRGENBUILDER_H #include "Address.h" -#include "CIRDataLayout.h" #include "CIRGenRecordLayout.h" #include "CIRGenTypeCache.h" #include "UnimplementedFeatureGuarding.h" @@ -19,6 +18,7 @@ #include "clang/AST/Type.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index d1abfeeeb8c5..79623f4f362f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -10,7 +10,6 @@ // //===----------------------------------------------------------------------===// -#include "CIRDataLayout.h" #include "CIRGenBuilder.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" @@ -24,6 +23,7 @@ #include "clang/AST/Decl.h" #include "clang/AST/ExprCXX.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/ErrorHandling.h" @@ -824,7 +824,7 @@ void CIRGenFunction::buildDecl(const Decl &D) { return; case Decl::NamespaceAlias: - case Decl::Using: // using X; [C++] + case Decl::Using: // using X; [C++] case Decl::UsingEnum: // using enum X; [C++] case Decl::UsingDirective: // using namespace X; [C++] assert(!UnimplementedFeature::generateDebugInfo()); diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 1c0b686154f4..da34c762ac96 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -10,7 +10,6 @@ // //===----------------------------------------------------------------------===// -#include "CIRDataLayout.h" #include "CIRGenCXXABI.h" #include "CIRGenCleanup.h" #include "CIRGenFunction.h" @@ -19,6 +18,7 @@ #include "clang/AST/StmtVisitor.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" @@ -886,4 +886,4 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl() { mlir::Operation *CIRGenFunction::getTerminateLandingPad() { llvm_unreachable("NYI"); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index ab40081c42e7..bad23ad72560 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -10,7 +10,6 @@ // //===----------------------------------------------------------------------===// #include "Address.h" -#include "CIRDataLayout.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" @@ -25,6 +24,7 @@ #include "clang/AST/StmtVisitor.h" #include "clang/Basic/Builtins.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Sequence.h" @@ -1875,4 +1875,4 @@ mlir::Attribute ConstantEmitter::emitNullForMemory(mlir::Location loc, CGM.buildNullConstant(T, loc).getDefiningOp()); assert(cstOp && "expected cir.const op"); return emitForMemory(CGM, cstOp.getValue(), T); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index d1284d268a2c..d0f2181044fe 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -11,7 +11,6 @@ //===----------------------------------------------------------------------===// #include "Address.h" -#include "CIRDataLayout.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "CIRGenOpenMPRuntime.h" @@ -19,6 +18,7 @@ #include "clang/AST/StmtVisitor.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 946d3d2e5a7b..205dcaf62393 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -13,7 +13,6 @@ #ifndef LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H #define LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H -#include "CIRDataLayout.h" #include "CIRGenBuilder.h" #include "CIRGenCall.h" #include "CIRGenTypeCache.h" @@ -27,6 +26,7 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" @@ -222,10 +222,10 @@ class CIRGenModule : public CIRGenTypeCache { mlir::cir::GlobalOp getOrInsertGlobal(mlir::Location loc, StringRef Name, mlir::Type Ty); - static mlir::cir::GlobalOp createGlobalOp(CIRGenModule &CGM, - mlir::Location loc, StringRef name, - mlir::Type t, bool isCst = false, - mlir::Operation *insertPoint = nullptr); + static mlir::cir::GlobalOp + createGlobalOp(CIRGenModule &CGM, mlir::Location loc, StringRef name, + mlir::Type t, bool isCst = false, + mlir::Operation *insertPoint = nullptr); // FIXME: Hardcoding priority here is gross. void AddGlobalCtor(mlir::cir::FuncOp Ctor, int Priority = 65535); diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index ef4ad098f4e6..390a6ccc1d62 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -1,5 +1,4 @@ -#include "CIRDataLayout.h" #include "CIRGenBuilder.h" #include "CIRGenModule.h" #include "CIRGenTypes.h" @@ -10,6 +9,7 @@ #include "clang/AST/DeclCXX.h" #include "clang/AST/RecordLayout.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "llvm/IR/DataLayout.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" From 71489a8db2147496e807293ec6fe7d77bd726452 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 21 May 2024 17:20:06 -0700 Subject: [PATCH 1585/2301] [CIR][CIRGen][NFC] More AArch64 builtins skeleton Just mimic the table approach from OG codegen, there are thousands of these, it's massive! This doesn't add any new feature yet, continues asserting as before. Coming next: the plan is to reuse the tablegen generated LLVM intrinsics, and pass that down to LLVM lowering. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 2 +- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 2110 ++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 12 + .../CodeGen/UnimplementedFeatureGuarding.h | 6 - 4 files changed, 2108 insertions(+), 22 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index cac650a4b6c5..26d911030d77 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1119,7 +1119,7 @@ static mlir::Value buildTargetArchBuiltinExpr(CIRGenFunction *CGF, case llvm::Triple::aarch64: case llvm::Triple::aarch64_32: case llvm::Triple::aarch64_be: - return CGF->buildAArch64BuiltinExpr(BuiltinID, E, Arch); + return CGF->buildAArch64BuiltinExpr(BuiltinID, E, ReturnValue, Arch); case llvm::Triple::bpfeb: case llvm::Triple::bpfel: llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 232da07983c7..fde7145ab61c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -18,9 +18,12 @@ #include "TargetInfo.h" #include "UnimplementedFeatureGuarding.h" -// TODO(cir): we shouldn't need this but we currently reuse intrinsic IDs for -// convenience. +// TODO(cir): once all builtins are covered, decide whether we still +// need to use LLVM intrinsics or if there's a better approach to follow. Right +// now the intrinsics are reused to make it convenient to encode all thousands +// of them and passing down to LLVM lowering. #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicsAArch64.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Value.h" @@ -34,17 +37,1522 @@ using namespace cir; using namespace clang; using namespace mlir::cir; +using namespace llvm; + +enum { + AddRetType = (1 << 0), + Add1ArgType = (1 << 1), + Add2ArgTypes = (1 << 2), + + VectorizeRetType = (1 << 3), + VectorizeArgTypes = (1 << 4), + + InventFloatType = (1 << 5), + UnsignedAlts = (1 << 6), + + Use64BitVectors = (1 << 7), + Use128BitVectors = (1 << 8), + + Vectorize1ArgType = Add1ArgType | VectorizeArgTypes, + VectorRet = AddRetType | VectorizeRetType, + VectorRetGetArgs01 = + AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes, + FpCmpzModifiers = + AddRetType | VectorizeRetType | Add1ArgType | InventFloatType +}; + +namespace { +struct ARMVectorIntrinsicInfo { + const char *NameHint; + unsigned BuiltinID; + unsigned LLVMIntrinsic; + unsigned AltLLVMIntrinsic; + uint64_t TypeModifier; + + bool operator<(unsigned RHSBuiltinID) const { + return BuiltinID < RHSBuiltinID; + } + bool operator<(const ARMVectorIntrinsicInfo &TE) const { + return BuiltinID < TE.BuiltinID; + } +}; +} // end anonymous namespace + +#define NEONMAP0(NameBase) \ + { #NameBase, NEON::BI__builtin_neon_##NameBase, 0, 0, 0 } + +#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ + { \ + #NameBase, NEON::BI__builtin_neon_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ + TypeModifier \ + } + +#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \ + { \ + #NameBase, NEON::BI__builtin_neon_##NameBase, Intrinsic::LLVMIntrinsic, \ + Intrinsic::AltLLVMIntrinsic, TypeModifier \ + } + +static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = { + NEONMAP0(splat_lane_v), + NEONMAP0(splat_laneq_v), + NEONMAP0(splatq_lane_v), + NEONMAP0(splatq_laneq_v), + NEONMAP1(vabs_v, aarch64_neon_abs, 0), + NEONMAP1(vabsq_v, aarch64_neon_abs, 0), + NEONMAP0(vadd_v), + NEONMAP0(vaddhn_v), + NEONMAP0(vaddq_p128), + NEONMAP0(vaddq_v), + NEONMAP1(vaesdq_u8, aarch64_crypto_aesd, 0), + NEONMAP1(vaeseq_u8, aarch64_crypto_aese, 0), + NEONMAP1(vaesimcq_u8, aarch64_crypto_aesimc, 0), + NEONMAP1(vaesmcq_u8, aarch64_crypto_aesmc, 0), + NEONMAP2(vbcaxq_s16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, + Add1ArgType | UnsignedAlts), + NEONMAP2(vbcaxq_s32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, + Add1ArgType | UnsignedAlts), + NEONMAP2(vbcaxq_s64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, + Add1ArgType | UnsignedAlts), + NEONMAP2(vbcaxq_s8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, + Add1ArgType | UnsignedAlts), + NEONMAP2(vbcaxq_u16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, + Add1ArgType | UnsignedAlts), + NEONMAP2(vbcaxq_u32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, + Add1ArgType | UnsignedAlts), + NEONMAP2(vbcaxq_u64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, + Add1ArgType | UnsignedAlts), + NEONMAP2(vbcaxq_u8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, + Add1ArgType | UnsignedAlts), + NEONMAP1(vbfdot_f32, aarch64_neon_bfdot, 0), + NEONMAP1(vbfdotq_f32, aarch64_neon_bfdot, 0), + NEONMAP1(vbfmlalbq_f32, aarch64_neon_bfmlalb, 0), + NEONMAP1(vbfmlaltq_f32, aarch64_neon_bfmlalt, 0), + NEONMAP1(vbfmmlaq_f32, aarch64_neon_bfmmla, 0), + NEONMAP1(vcadd_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType), + NEONMAP1(vcadd_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType), + NEONMAP1(vcadd_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType), + NEONMAP1(vcadd_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType), + NEONMAP1(vcaddq_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType), + NEONMAP1(vcaddq_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType), + NEONMAP1(vcaddq_rot270_f64, aarch64_neon_vcadd_rot270, Add1ArgType), + NEONMAP1(vcaddq_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType), + NEONMAP1(vcaddq_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType), + NEONMAP1(vcaddq_rot90_f64, aarch64_neon_vcadd_rot90, Add1ArgType), + NEONMAP1(vcage_v, aarch64_neon_facge, 0), + NEONMAP1(vcageq_v, aarch64_neon_facge, 0), + NEONMAP1(vcagt_v, aarch64_neon_facgt, 0), + NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0), + NEONMAP1(vcale_v, aarch64_neon_facge, 0), + NEONMAP1(vcaleq_v, aarch64_neon_facge, 0), + NEONMAP1(vcalt_v, aarch64_neon_facgt, 0), + NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0), + NEONMAP0(vceqz_v), + NEONMAP0(vceqzq_v), + NEONMAP0(vcgez_v), + NEONMAP0(vcgezq_v), + NEONMAP0(vcgtz_v), + NEONMAP0(vcgtzq_v), + NEONMAP0(vclez_v), + NEONMAP0(vclezq_v), + NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType), + NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType), + NEONMAP0(vcltz_v), + NEONMAP0(vcltzq_v), + NEONMAP1(vclz_v, ctlz, Add1ArgType), + NEONMAP1(vclzq_v, ctlz, Add1ArgType), + NEONMAP1(vcmla_f16, aarch64_neon_vcmla_rot0, Add1ArgType), + NEONMAP1(vcmla_f32, aarch64_neon_vcmla_rot0, Add1ArgType), + NEONMAP1(vcmla_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType), + NEONMAP1(vcmla_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType), + NEONMAP1(vcmla_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType), + NEONMAP1(vcmla_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType), + NEONMAP1(vcmla_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType), + NEONMAP1(vcmla_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType), + NEONMAP1(vcmlaq_f16, aarch64_neon_vcmla_rot0, Add1ArgType), + NEONMAP1(vcmlaq_f32, aarch64_neon_vcmla_rot0, Add1ArgType), + NEONMAP1(vcmlaq_f64, aarch64_neon_vcmla_rot0, Add1ArgType), + NEONMAP1(vcmlaq_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType), + NEONMAP1(vcmlaq_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType), + NEONMAP1(vcmlaq_rot180_f64, aarch64_neon_vcmla_rot180, Add1ArgType), + NEONMAP1(vcmlaq_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType), + NEONMAP1(vcmlaq_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType), + NEONMAP1(vcmlaq_rot270_f64, aarch64_neon_vcmla_rot270, Add1ArgType), + NEONMAP1(vcmlaq_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType), + NEONMAP1(vcmlaq_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType), + NEONMAP1(vcmlaq_rot90_f64, aarch64_neon_vcmla_rot90, Add1ArgType), + NEONMAP1(vcnt_v, ctpop, Add1ArgType), + NEONMAP1(vcntq_v, ctpop, Add1ArgType), + NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0), + NEONMAP0(vcvt_f16_s16), + NEONMAP0(vcvt_f16_u16), + NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0), + NEONMAP0(vcvt_f32_v), + NEONMAP1(vcvt_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0), + NEONMAP1(vcvt_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0), + NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), + NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), + NEONMAP1(vcvt_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvt_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP0(vcvtq_f16_s16), + NEONMAP0(vcvtq_f16_u16), + NEONMAP0(vcvtq_f32_v), + NEONMAP0(vcvtq_high_bf16_f32), + NEONMAP0(vcvtq_low_bf16_f32), + NEONMAP1(vcvtq_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0), + NEONMAP1(vcvtq_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0), + NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, + 0), + NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, + 0), + NEONMAP1(vcvtq_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvtq_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType), + NEONMAP1(vdot_s32, aarch64_neon_sdot, 0), + NEONMAP1(vdot_u32, aarch64_neon_udot, 0), + NEONMAP1(vdotq_s32, aarch64_neon_sdot, 0), + NEONMAP1(vdotq_u32, aarch64_neon_udot, 0), + NEONMAP2(veor3q_s16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, + Add1ArgType | UnsignedAlts), + NEONMAP2(veor3q_s32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, + Add1ArgType | UnsignedAlts), + NEONMAP2(veor3q_s64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, + Add1ArgType | UnsignedAlts), + NEONMAP2(veor3q_s8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, + Add1ArgType | UnsignedAlts), + NEONMAP2(veor3q_u16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, + Add1ArgType | UnsignedAlts), + NEONMAP2(veor3q_u32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, + Add1ArgType | UnsignedAlts), + NEONMAP2(veor3q_u64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, + Add1ArgType | UnsignedAlts), + NEONMAP2(veor3q_u8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, + Add1ArgType | UnsignedAlts), + NEONMAP0(vext_v), + NEONMAP0(vextq_v), + NEONMAP0(vfma_v), + NEONMAP0(vfmaq_v), + NEONMAP1(vfmlal_high_f16, aarch64_neon_fmlal2, 0), + NEONMAP1(vfmlal_low_f16, aarch64_neon_fmlal, 0), + NEONMAP1(vfmlalq_high_f16, aarch64_neon_fmlal2, 0), + NEONMAP1(vfmlalq_low_f16, aarch64_neon_fmlal, 0), + NEONMAP1(vfmlsl_high_f16, aarch64_neon_fmlsl2, 0), + NEONMAP1(vfmlsl_low_f16, aarch64_neon_fmlsl, 0), + NEONMAP1(vfmlslq_high_f16, aarch64_neon_fmlsl2, 0), + NEONMAP1(vfmlslq_low_f16, aarch64_neon_fmlsl, 0), + NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, + Add1ArgType | UnsignedAlts), + NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, + Add1ArgType | UnsignedAlts), + NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, + Add1ArgType | UnsignedAlts), + NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, + Add1ArgType | UnsignedAlts), + NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0), + NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0), + NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0), + NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0), + NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0), + NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0), + NEONMAP1(vmmlaq_s32, aarch64_neon_smmla, 0), + NEONMAP1(vmmlaq_u32, aarch64_neon_ummla, 0), + NEONMAP0(vmovl_v), + NEONMAP0(vmovn_v), + NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType), + NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType), + NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType), + NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), + NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), + NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType), + NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType), + NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType), + NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0), + NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0), + NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0), + NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0), + NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType), + NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0), + NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0), + NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType), + NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType), + NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, + Add1ArgType | UnsignedAlts), + NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType), + NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqrdmlah_s16, aarch64_neon_sqrdmlah, Add1ArgType), + NEONMAP1(vqrdmlah_s32, aarch64_neon_sqrdmlah, Add1ArgType), + NEONMAP1(vqrdmlahq_s16, aarch64_neon_sqrdmlah, Add1ArgType), + NEONMAP1(vqrdmlahq_s32, aarch64_neon_sqrdmlah, Add1ArgType), + NEONMAP1(vqrdmlsh_s16, aarch64_neon_sqrdmlsh, Add1ArgType), + NEONMAP1(vqrdmlsh_s32, aarch64_neon_sqrdmlsh, Add1ArgType), + NEONMAP1(vqrdmlshq_s16, aarch64_neon_sqrdmlsh, Add1ArgType), + NEONMAP1(vqrdmlshq_s32, aarch64_neon_sqrdmlsh, Add1ArgType), + NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0), + NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), + NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType), + NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0), + NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), + NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType), + NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), + NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), + NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, + Add1ArgType | UnsignedAlts), + NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0), + NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0), + NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, + Add1ArgType | UnsignedAlts), + NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType), + NEONMAP1(vrax1q_u64, aarch64_crypto_rax1, 0), + NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), + NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), + NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType), + NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType), + NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, + Add1ArgType | UnsignedAlts), + NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, + Add1ArgType | UnsignedAlts), + NEONMAP1(vrnd32x_f32, aarch64_neon_frint32x, Add1ArgType), + NEONMAP1(vrnd32x_f64, aarch64_neon_frint32x, Add1ArgType), + NEONMAP1(vrnd32xq_f32, aarch64_neon_frint32x, Add1ArgType), + NEONMAP1(vrnd32xq_f64, aarch64_neon_frint32x, Add1ArgType), + NEONMAP1(vrnd32z_f32, aarch64_neon_frint32z, Add1ArgType), + NEONMAP1(vrnd32z_f64, aarch64_neon_frint32z, Add1ArgType), + NEONMAP1(vrnd32zq_f32, aarch64_neon_frint32z, Add1ArgType), + NEONMAP1(vrnd32zq_f64, aarch64_neon_frint32z, Add1ArgType), + NEONMAP1(vrnd64x_f32, aarch64_neon_frint64x, Add1ArgType), + NEONMAP1(vrnd64x_f64, aarch64_neon_frint64x, Add1ArgType), + NEONMAP1(vrnd64xq_f32, aarch64_neon_frint64x, Add1ArgType), + NEONMAP1(vrnd64xq_f64, aarch64_neon_frint64x, Add1ArgType), + NEONMAP1(vrnd64z_f32, aarch64_neon_frint64z, Add1ArgType), + NEONMAP1(vrnd64z_f64, aarch64_neon_frint64z, Add1ArgType), + NEONMAP1(vrnd64zq_f32, aarch64_neon_frint64z, Add1ArgType), + NEONMAP1(vrnd64zq_f64, aarch64_neon_frint64z, Add1ArgType), + NEONMAP0(vrndi_v), + NEONMAP0(vrndiq_v), + NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, + Add1ArgType | UnsignedAlts), + NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, + Add1ArgType | UnsignedAlts), + NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), + NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), + NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), + NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), + NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType), + NEONMAP1(vsha1su0q_u32, aarch64_crypto_sha1su0, 0), + NEONMAP1(vsha1su1q_u32, aarch64_crypto_sha1su1, 0), + NEONMAP1(vsha256h2q_u32, aarch64_crypto_sha256h2, 0), + NEONMAP1(vsha256hq_u32, aarch64_crypto_sha256h, 0), + NEONMAP1(vsha256su0q_u32, aarch64_crypto_sha256su0, 0), + NEONMAP1(vsha256su1q_u32, aarch64_crypto_sha256su1, 0), + NEONMAP1(vsha512h2q_u64, aarch64_crypto_sha512h2, 0), + NEONMAP1(vsha512hq_u64, aarch64_crypto_sha512h, 0), + NEONMAP1(vsha512su0q_u64, aarch64_crypto_sha512su0, 0), + NEONMAP1(vsha512su1q_u64, aarch64_crypto_sha512su1, 0), + NEONMAP0(vshl_n_v), + NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, + Add1ArgType | UnsignedAlts), + NEONMAP0(vshll_n_v), + NEONMAP0(vshlq_n_v), + NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, + Add1ArgType | UnsignedAlts), + NEONMAP0(vshr_n_v), + NEONMAP0(vshrn_n_v), + NEONMAP0(vshrq_n_v), + NEONMAP1(vsm3partw1q_u32, aarch64_crypto_sm3partw1, 0), + NEONMAP1(vsm3partw2q_u32, aarch64_crypto_sm3partw2, 0), + NEONMAP1(vsm3ss1q_u32, aarch64_crypto_sm3ss1, 0), + NEONMAP1(vsm3tt1aq_u32, aarch64_crypto_sm3tt1a, 0), + NEONMAP1(vsm3tt1bq_u32, aarch64_crypto_sm3tt1b, 0), + NEONMAP1(vsm3tt2aq_u32, aarch64_crypto_sm3tt2a, 0), + NEONMAP1(vsm3tt2bq_u32, aarch64_crypto_sm3tt2b, 0), + NEONMAP1(vsm4ekeyq_u32, aarch64_crypto_sm4ekey, 0), + NEONMAP1(vsm4eq_u32, aarch64_crypto_sm4e, 0), + NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0), + NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0), + NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0), + NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0), + NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0), + NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0), + NEONMAP0(vsubhn_v), + NEONMAP0(vtst_v), + NEONMAP0(vtstq_v), + NEONMAP1(vusdot_s32, aarch64_neon_usdot, 0), + NEONMAP1(vusdotq_s32, aarch64_neon_usdot, 0), + NEONMAP1(vusmmlaq_s32, aarch64_neon_usmmla, 0), + NEONMAP1(vxarq_u64, aarch64_crypto_xar, 0), +}; + +static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = { + NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType), + NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType), + NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType), + NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), + NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), + NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType), + NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType), + NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType), + NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, + AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, + AddRetType | Add1ArgType), + NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), + NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), + NEONMAP0(vcvth_bf16_f32), + NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType), + NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), + NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType), + NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), + NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType), + NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), + NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType), + NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), + NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType), + NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), + NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType), + NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, + AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, + AddRetType | Add1ArgType), + NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), + NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), + NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0), + NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), + NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), + NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), + NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0), + NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType), + NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType), + NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType), + NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType), + NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType), + NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType), + NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType), + NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType), + NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType), + NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors), + NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0), + NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType), + NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType), + NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, + AddRetType | Add1ArgType), + NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), + NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), + NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType), + NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType), + NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType), + NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType), + NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType), + NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType), + NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType), + NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType), + NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType), + NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType), + NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, + VectorRet | Use64BitVectors), + NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, + VectorRet | Use64BitVectors), + NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType), + NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType), + NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType), + NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType), + NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType), + NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType), + NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType), + NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType), + NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType), + NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType), + NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), + NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), + NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType), + NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType), + NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType), + NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType), + NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType), + NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType), + NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType), + NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType), + NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType), + NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType), + NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType), + NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType), + NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0), + NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0), + NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0), + NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0), + NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType), + NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType), + NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType), + NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType), + NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType), + NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType), + NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType), + NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType), + NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType), + NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType), + // FP16 scalar intrinisics go here. + NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType), + NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), + NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), + NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), + NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), + NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), + NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), + NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), + NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), + NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), + NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), + NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), + NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), + NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), + NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), + NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), + NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), + NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), + NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), + NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), + NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType), + NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType), + NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType), + NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType), + NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType), +}; + +// Some intrinsics are equivalent for codegen. +static const std::pair NEONEquivalentIntrinsicMap[] = { + { + NEON::BI__builtin_neon_splat_lane_bf16, + NEON::BI__builtin_neon_splat_lane_v, + }, + { + NEON::BI__builtin_neon_splat_laneq_bf16, + NEON::BI__builtin_neon_splat_laneq_v, + }, + { + NEON::BI__builtin_neon_splatq_lane_bf16, + NEON::BI__builtin_neon_splatq_lane_v, + }, + { + NEON::BI__builtin_neon_splatq_laneq_bf16, + NEON::BI__builtin_neon_splatq_laneq_v, + }, + { + NEON::BI__builtin_neon_vabd_f16, + NEON::BI__builtin_neon_vabd_v, + }, + { + NEON::BI__builtin_neon_vabdq_f16, + NEON::BI__builtin_neon_vabdq_v, + }, + { + NEON::BI__builtin_neon_vabs_f16, + NEON::BI__builtin_neon_vabs_v, + }, + { + NEON::BI__builtin_neon_vabsq_f16, + NEON::BI__builtin_neon_vabsq_v, + }, + { + NEON::BI__builtin_neon_vcage_f16, + NEON::BI__builtin_neon_vcage_v, + }, + { + NEON::BI__builtin_neon_vcageq_f16, + NEON::BI__builtin_neon_vcageq_v, + }, + { + NEON::BI__builtin_neon_vcagt_f16, + NEON::BI__builtin_neon_vcagt_v, + }, + { + NEON::BI__builtin_neon_vcagtq_f16, + NEON::BI__builtin_neon_vcagtq_v, + }, + { + NEON::BI__builtin_neon_vcale_f16, + NEON::BI__builtin_neon_vcale_v, + }, + { + NEON::BI__builtin_neon_vcaleq_f16, + NEON::BI__builtin_neon_vcaleq_v, + }, + { + NEON::BI__builtin_neon_vcalt_f16, + NEON::BI__builtin_neon_vcalt_v, + }, + { + NEON::BI__builtin_neon_vcaltq_f16, + NEON::BI__builtin_neon_vcaltq_v, + }, + { + NEON::BI__builtin_neon_vceqz_f16, + NEON::BI__builtin_neon_vceqz_v, + }, + { + NEON::BI__builtin_neon_vceqzq_f16, + NEON::BI__builtin_neon_vceqzq_v, + }, + { + NEON::BI__builtin_neon_vcgez_f16, + NEON::BI__builtin_neon_vcgez_v, + }, + { + NEON::BI__builtin_neon_vcgezq_f16, + NEON::BI__builtin_neon_vcgezq_v, + }, + { + NEON::BI__builtin_neon_vcgtz_f16, + NEON::BI__builtin_neon_vcgtz_v, + }, + { + NEON::BI__builtin_neon_vcgtzq_f16, + NEON::BI__builtin_neon_vcgtzq_v, + }, + { + NEON::BI__builtin_neon_vclez_f16, + NEON::BI__builtin_neon_vclez_v, + }, + { + NEON::BI__builtin_neon_vclezq_f16, + NEON::BI__builtin_neon_vclezq_v, + }, + { + NEON::BI__builtin_neon_vcltz_f16, + NEON::BI__builtin_neon_vcltz_v, + }, + { + NEON::BI__builtin_neon_vcltzq_f16, + NEON::BI__builtin_neon_vcltzq_v, + }, + { + NEON::BI__builtin_neon_vfma_f16, + NEON::BI__builtin_neon_vfma_v, + }, + { + NEON::BI__builtin_neon_vfma_lane_f16, + NEON::BI__builtin_neon_vfma_lane_v, + }, + { + NEON::BI__builtin_neon_vfma_laneq_f16, + NEON::BI__builtin_neon_vfma_laneq_v, + }, + { + NEON::BI__builtin_neon_vfmaq_f16, + NEON::BI__builtin_neon_vfmaq_v, + }, + { + NEON::BI__builtin_neon_vfmaq_lane_f16, + NEON::BI__builtin_neon_vfmaq_lane_v, + }, + { + NEON::BI__builtin_neon_vfmaq_laneq_f16, + NEON::BI__builtin_neon_vfmaq_laneq_v, + }, + {NEON::BI__builtin_neon_vld1_bf16_x2, NEON::BI__builtin_neon_vld1_x2_v}, + {NEON::BI__builtin_neon_vld1_bf16_x3, NEON::BI__builtin_neon_vld1_x3_v}, + {NEON::BI__builtin_neon_vld1_bf16_x4, NEON::BI__builtin_neon_vld1_x4_v}, + {NEON::BI__builtin_neon_vld1_bf16, NEON::BI__builtin_neon_vld1_v}, + {NEON::BI__builtin_neon_vld1_dup_bf16, NEON::BI__builtin_neon_vld1_dup_v}, + {NEON::BI__builtin_neon_vld1_lane_bf16, NEON::BI__builtin_neon_vld1_lane_v}, + {NEON::BI__builtin_neon_vld1q_bf16_x2, NEON::BI__builtin_neon_vld1q_x2_v}, + {NEON::BI__builtin_neon_vld1q_bf16_x3, NEON::BI__builtin_neon_vld1q_x3_v}, + {NEON::BI__builtin_neon_vld1q_bf16_x4, NEON::BI__builtin_neon_vld1q_x4_v}, + {NEON::BI__builtin_neon_vld1q_bf16, NEON::BI__builtin_neon_vld1q_v}, + {NEON::BI__builtin_neon_vld1q_dup_bf16, NEON::BI__builtin_neon_vld1q_dup_v}, + {NEON::BI__builtin_neon_vld1q_lane_bf16, + NEON::BI__builtin_neon_vld1q_lane_v}, + {NEON::BI__builtin_neon_vld2_bf16, NEON::BI__builtin_neon_vld2_v}, + {NEON::BI__builtin_neon_vld2_dup_bf16, NEON::BI__builtin_neon_vld2_dup_v}, + {NEON::BI__builtin_neon_vld2_lane_bf16, NEON::BI__builtin_neon_vld2_lane_v}, + {NEON::BI__builtin_neon_vld2q_bf16, NEON::BI__builtin_neon_vld2q_v}, + {NEON::BI__builtin_neon_vld2q_dup_bf16, NEON::BI__builtin_neon_vld2q_dup_v}, + {NEON::BI__builtin_neon_vld2q_lane_bf16, + NEON::BI__builtin_neon_vld2q_lane_v}, + {NEON::BI__builtin_neon_vld3_bf16, NEON::BI__builtin_neon_vld3_v}, + {NEON::BI__builtin_neon_vld3_dup_bf16, NEON::BI__builtin_neon_vld3_dup_v}, + {NEON::BI__builtin_neon_vld3_lane_bf16, NEON::BI__builtin_neon_vld3_lane_v}, + {NEON::BI__builtin_neon_vld3q_bf16, NEON::BI__builtin_neon_vld3q_v}, + {NEON::BI__builtin_neon_vld3q_dup_bf16, NEON::BI__builtin_neon_vld3q_dup_v}, + {NEON::BI__builtin_neon_vld3q_lane_bf16, + NEON::BI__builtin_neon_vld3q_lane_v}, + {NEON::BI__builtin_neon_vld4_bf16, NEON::BI__builtin_neon_vld4_v}, + {NEON::BI__builtin_neon_vld4_dup_bf16, NEON::BI__builtin_neon_vld4_dup_v}, + {NEON::BI__builtin_neon_vld4_lane_bf16, NEON::BI__builtin_neon_vld4_lane_v}, + {NEON::BI__builtin_neon_vld4q_bf16, NEON::BI__builtin_neon_vld4q_v}, + {NEON::BI__builtin_neon_vld4q_dup_bf16, NEON::BI__builtin_neon_vld4q_dup_v}, + {NEON::BI__builtin_neon_vld4q_lane_bf16, + NEON::BI__builtin_neon_vld4q_lane_v}, + { + NEON::BI__builtin_neon_vmax_f16, + NEON::BI__builtin_neon_vmax_v, + }, + { + NEON::BI__builtin_neon_vmaxnm_f16, + NEON::BI__builtin_neon_vmaxnm_v, + }, + { + NEON::BI__builtin_neon_vmaxnmq_f16, + NEON::BI__builtin_neon_vmaxnmq_v, + }, + { + NEON::BI__builtin_neon_vmaxq_f16, + NEON::BI__builtin_neon_vmaxq_v, + }, + { + NEON::BI__builtin_neon_vmin_f16, + NEON::BI__builtin_neon_vmin_v, + }, + { + NEON::BI__builtin_neon_vminnm_f16, + NEON::BI__builtin_neon_vminnm_v, + }, + { + NEON::BI__builtin_neon_vminnmq_f16, + NEON::BI__builtin_neon_vminnmq_v, + }, + { + NEON::BI__builtin_neon_vminq_f16, + NEON::BI__builtin_neon_vminq_v, + }, + { + NEON::BI__builtin_neon_vmulx_f16, + NEON::BI__builtin_neon_vmulx_v, + }, + { + NEON::BI__builtin_neon_vmulxq_f16, + NEON::BI__builtin_neon_vmulxq_v, + }, + { + NEON::BI__builtin_neon_vpadd_f16, + NEON::BI__builtin_neon_vpadd_v, + }, + { + NEON::BI__builtin_neon_vpaddq_f16, + NEON::BI__builtin_neon_vpaddq_v, + }, + { + NEON::BI__builtin_neon_vpmax_f16, + NEON::BI__builtin_neon_vpmax_v, + }, + { + NEON::BI__builtin_neon_vpmaxnm_f16, + NEON::BI__builtin_neon_vpmaxnm_v, + }, + { + NEON::BI__builtin_neon_vpmaxnmq_f16, + NEON::BI__builtin_neon_vpmaxnmq_v, + }, + { + NEON::BI__builtin_neon_vpmaxq_f16, + NEON::BI__builtin_neon_vpmaxq_v, + }, + { + NEON::BI__builtin_neon_vpmin_f16, + NEON::BI__builtin_neon_vpmin_v, + }, + { + NEON::BI__builtin_neon_vpminnm_f16, + NEON::BI__builtin_neon_vpminnm_v, + }, + { + NEON::BI__builtin_neon_vpminnmq_f16, + NEON::BI__builtin_neon_vpminnmq_v, + }, + { + NEON::BI__builtin_neon_vpminq_f16, + NEON::BI__builtin_neon_vpminq_v, + }, + { + NEON::BI__builtin_neon_vrecpe_f16, + NEON::BI__builtin_neon_vrecpe_v, + }, + { + NEON::BI__builtin_neon_vrecpeq_f16, + NEON::BI__builtin_neon_vrecpeq_v, + }, + { + NEON::BI__builtin_neon_vrecps_f16, + NEON::BI__builtin_neon_vrecps_v, + }, + { + NEON::BI__builtin_neon_vrecpsq_f16, + NEON::BI__builtin_neon_vrecpsq_v, + }, + { + NEON::BI__builtin_neon_vrnd_f16, + NEON::BI__builtin_neon_vrnd_v, + }, + { + NEON::BI__builtin_neon_vrnda_f16, + NEON::BI__builtin_neon_vrnda_v, + }, + { + NEON::BI__builtin_neon_vrndaq_f16, + NEON::BI__builtin_neon_vrndaq_v, + }, + { + NEON::BI__builtin_neon_vrndi_f16, + NEON::BI__builtin_neon_vrndi_v, + }, + { + NEON::BI__builtin_neon_vrndiq_f16, + NEON::BI__builtin_neon_vrndiq_v, + }, + { + NEON::BI__builtin_neon_vrndm_f16, + NEON::BI__builtin_neon_vrndm_v, + }, + { + NEON::BI__builtin_neon_vrndmq_f16, + NEON::BI__builtin_neon_vrndmq_v, + }, + { + NEON::BI__builtin_neon_vrndn_f16, + NEON::BI__builtin_neon_vrndn_v, + }, + { + NEON::BI__builtin_neon_vrndnq_f16, + NEON::BI__builtin_neon_vrndnq_v, + }, + { + NEON::BI__builtin_neon_vrndp_f16, + NEON::BI__builtin_neon_vrndp_v, + }, + { + NEON::BI__builtin_neon_vrndpq_f16, + NEON::BI__builtin_neon_vrndpq_v, + }, + { + NEON::BI__builtin_neon_vrndq_f16, + NEON::BI__builtin_neon_vrndq_v, + }, + { + NEON::BI__builtin_neon_vrndx_f16, + NEON::BI__builtin_neon_vrndx_v, + }, + { + NEON::BI__builtin_neon_vrndxq_f16, + NEON::BI__builtin_neon_vrndxq_v, + }, + { + NEON::BI__builtin_neon_vrsqrte_f16, + NEON::BI__builtin_neon_vrsqrte_v, + }, + { + NEON::BI__builtin_neon_vrsqrteq_f16, + NEON::BI__builtin_neon_vrsqrteq_v, + }, + { + NEON::BI__builtin_neon_vrsqrts_f16, + NEON::BI__builtin_neon_vrsqrts_v, + }, + { + NEON::BI__builtin_neon_vrsqrtsq_f16, + NEON::BI__builtin_neon_vrsqrtsq_v, + }, + { + NEON::BI__builtin_neon_vsqrt_f16, + NEON::BI__builtin_neon_vsqrt_v, + }, + { + NEON::BI__builtin_neon_vsqrtq_f16, + NEON::BI__builtin_neon_vsqrtq_v, + }, + {NEON::BI__builtin_neon_vst1_bf16_x2, NEON::BI__builtin_neon_vst1_x2_v}, + {NEON::BI__builtin_neon_vst1_bf16_x3, NEON::BI__builtin_neon_vst1_x3_v}, + {NEON::BI__builtin_neon_vst1_bf16_x4, NEON::BI__builtin_neon_vst1_x4_v}, + {NEON::BI__builtin_neon_vst1_bf16, NEON::BI__builtin_neon_vst1_v}, + {NEON::BI__builtin_neon_vst1_lane_bf16, NEON::BI__builtin_neon_vst1_lane_v}, + {NEON::BI__builtin_neon_vst1q_bf16_x2, NEON::BI__builtin_neon_vst1q_x2_v}, + {NEON::BI__builtin_neon_vst1q_bf16_x3, NEON::BI__builtin_neon_vst1q_x3_v}, + {NEON::BI__builtin_neon_vst1q_bf16_x4, NEON::BI__builtin_neon_vst1q_x4_v}, + {NEON::BI__builtin_neon_vst1q_bf16, NEON::BI__builtin_neon_vst1q_v}, + {NEON::BI__builtin_neon_vst1q_lane_bf16, + NEON::BI__builtin_neon_vst1q_lane_v}, + {NEON::BI__builtin_neon_vst2_bf16, NEON::BI__builtin_neon_vst2_v}, + {NEON::BI__builtin_neon_vst2_lane_bf16, NEON::BI__builtin_neon_vst2_lane_v}, + {NEON::BI__builtin_neon_vst2q_bf16, NEON::BI__builtin_neon_vst2q_v}, + {NEON::BI__builtin_neon_vst2q_lane_bf16, + NEON::BI__builtin_neon_vst2q_lane_v}, + {NEON::BI__builtin_neon_vst3_bf16, NEON::BI__builtin_neon_vst3_v}, + {NEON::BI__builtin_neon_vst3_lane_bf16, NEON::BI__builtin_neon_vst3_lane_v}, + {NEON::BI__builtin_neon_vst3q_bf16, NEON::BI__builtin_neon_vst3q_v}, + {NEON::BI__builtin_neon_vst3q_lane_bf16, + NEON::BI__builtin_neon_vst3q_lane_v}, + {NEON::BI__builtin_neon_vst4_bf16, NEON::BI__builtin_neon_vst4_v}, + {NEON::BI__builtin_neon_vst4_lane_bf16, NEON::BI__builtin_neon_vst4_lane_v}, + {NEON::BI__builtin_neon_vst4q_bf16, NEON::BI__builtin_neon_vst4q_v}, + {NEON::BI__builtin_neon_vst4q_lane_bf16, + NEON::BI__builtin_neon_vst4q_lane_v}, + // The mangling rules cause us to have one ID for each type for + // vldap1(q)_lane and vstl1(q)_lane, but codegen is equivalent for all of + // them. Choose an arbitrary one to be handled as tha canonical variation. + {NEON::BI__builtin_neon_vldap1_lane_u64, + NEON::BI__builtin_neon_vldap1_lane_s64}, + {NEON::BI__builtin_neon_vldap1_lane_f64, + NEON::BI__builtin_neon_vldap1_lane_s64}, + {NEON::BI__builtin_neon_vldap1_lane_p64, + NEON::BI__builtin_neon_vldap1_lane_s64}, + {NEON::BI__builtin_neon_vldap1q_lane_u64, + NEON::BI__builtin_neon_vldap1q_lane_s64}, + {NEON::BI__builtin_neon_vldap1q_lane_f64, + NEON::BI__builtin_neon_vldap1q_lane_s64}, + {NEON::BI__builtin_neon_vldap1q_lane_p64, + NEON::BI__builtin_neon_vldap1q_lane_s64}, + {NEON::BI__builtin_neon_vstl1_lane_u64, + NEON::BI__builtin_neon_vstl1_lane_s64}, + {NEON::BI__builtin_neon_vstl1_lane_f64, + NEON::BI__builtin_neon_vstl1_lane_s64}, + {NEON::BI__builtin_neon_vstl1_lane_p64, + NEON::BI__builtin_neon_vstl1_lane_s64}, + {NEON::BI__builtin_neon_vstl1q_lane_u64, + NEON::BI__builtin_neon_vstl1q_lane_s64}, + {NEON::BI__builtin_neon_vstl1q_lane_f64, + NEON::BI__builtin_neon_vstl1q_lane_s64}, + {NEON::BI__builtin_neon_vstl1q_lane_p64, + NEON::BI__builtin_neon_vstl1q_lane_s64}, +}; + +#undef NEONMAP0 +#undef NEONMAP1 +#undef NEONMAP2 + +#define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ + { \ + #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ + TypeModifier \ + } + +#define SVEMAP2(NameBase, TypeModifier) \ + { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier } +static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = { +#define GET_SVE_LLVM_INTRINSIC_MAP +#include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def" +#include "clang/Basic/arm_sve_builtin_cg.inc" +#undef GET_SVE_LLVM_INTRINSIC_MAP +}; + +#undef SVEMAP1 +#undef SVEMAP2 + +#define SMEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ + { \ + #NameBase, SME::BI__builtin_sme_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ + TypeModifier \ + } + +#define SMEMAP2(NameBase, TypeModifier) \ + { #NameBase, SME::BI__builtin_sme_##NameBase, 0, 0, TypeModifier } +static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] = { +#define GET_SME_LLVM_INTRINSIC_MAP +#include "clang/Basic/arm_sme_builtin_cg.inc" +#undef GET_SME_LLVM_INTRINSIC_MAP +}; + +#undef SMEMAP1 +#undef SMEMAP2 + +// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code, +// we handle them here. +enum class CIRGenFunction::MSVCIntrin { + _BitScanForward, + _BitScanReverse, + _InterlockedAnd, + _InterlockedDecrement, + _InterlockedExchange, + _InterlockedExchangeAdd, + _InterlockedExchangeSub, + _InterlockedIncrement, + _InterlockedOr, + _InterlockedXor, + _InterlockedExchangeAdd_acq, + _InterlockedExchangeAdd_rel, + _InterlockedExchangeAdd_nf, + _InterlockedExchange_acq, + _InterlockedExchange_rel, + _InterlockedExchange_nf, + _InterlockedCompareExchange_acq, + _InterlockedCompareExchange_rel, + _InterlockedCompareExchange_nf, + _InterlockedCompareExchange128, + _InterlockedCompareExchange128_acq, + _InterlockedCompareExchange128_rel, + _InterlockedCompareExchange128_nf, + _InterlockedOr_acq, + _InterlockedOr_rel, + _InterlockedOr_nf, + _InterlockedXor_acq, + _InterlockedXor_rel, + _InterlockedXor_nf, + _InterlockedAnd_acq, + _InterlockedAnd_rel, + _InterlockedAnd_nf, + _InterlockedIncrement_acq, + _InterlockedIncrement_rel, + _InterlockedIncrement_nf, + _InterlockedDecrement_acq, + _InterlockedDecrement_rel, + _InterlockedDecrement_nf, + __fastfail, +}; + +static std::optional +translateAarch64ToMsvcIntrin(unsigned BuiltinID) { + using MSVCIntrin = CIRGenFunction::MSVCIntrin; + switch (BuiltinID) { + default: + return std::nullopt; + case clang::AArch64::BI_BitScanForward: + case clang::AArch64::BI_BitScanForward64: + return MSVCIntrin::_BitScanForward; + case clang::AArch64::BI_BitScanReverse: + case clang::AArch64::BI_BitScanReverse64: + return MSVCIntrin::_BitScanReverse; + case clang::AArch64::BI_InterlockedAnd64: + return MSVCIntrin::_InterlockedAnd; + case clang::AArch64::BI_InterlockedExchange64: + return MSVCIntrin::_InterlockedExchange; + case clang::AArch64::BI_InterlockedExchangeAdd64: + return MSVCIntrin::_InterlockedExchangeAdd; + case clang::AArch64::BI_InterlockedExchangeSub64: + return MSVCIntrin::_InterlockedExchangeSub; + case clang::AArch64::BI_InterlockedOr64: + return MSVCIntrin::_InterlockedOr; + case clang::AArch64::BI_InterlockedXor64: + return MSVCIntrin::_InterlockedXor; + case clang::AArch64::BI_InterlockedDecrement64: + return MSVCIntrin::_InterlockedDecrement; + case clang::AArch64::BI_InterlockedIncrement64: + return MSVCIntrin::_InterlockedIncrement; + case clang::AArch64::BI_InterlockedExchangeAdd8_acq: + case clang::AArch64::BI_InterlockedExchangeAdd16_acq: + case clang::AArch64::BI_InterlockedExchangeAdd_acq: + case clang::AArch64::BI_InterlockedExchangeAdd64_acq: + return MSVCIntrin::_InterlockedExchangeAdd_acq; + case clang::AArch64::BI_InterlockedExchangeAdd8_rel: + case clang::AArch64::BI_InterlockedExchangeAdd16_rel: + case clang::AArch64::BI_InterlockedExchangeAdd_rel: + case clang::AArch64::BI_InterlockedExchangeAdd64_rel: + return MSVCIntrin::_InterlockedExchangeAdd_rel; + case clang::AArch64::BI_InterlockedExchangeAdd8_nf: + case clang::AArch64::BI_InterlockedExchangeAdd16_nf: + case clang::AArch64::BI_InterlockedExchangeAdd_nf: + case clang::AArch64::BI_InterlockedExchangeAdd64_nf: + return MSVCIntrin::_InterlockedExchangeAdd_nf; + case clang::AArch64::BI_InterlockedExchange8_acq: + case clang::AArch64::BI_InterlockedExchange16_acq: + case clang::AArch64::BI_InterlockedExchange_acq: + case clang::AArch64::BI_InterlockedExchange64_acq: + return MSVCIntrin::_InterlockedExchange_acq; + case clang::AArch64::BI_InterlockedExchange8_rel: + case clang::AArch64::BI_InterlockedExchange16_rel: + case clang::AArch64::BI_InterlockedExchange_rel: + case clang::AArch64::BI_InterlockedExchange64_rel: + return MSVCIntrin::_InterlockedExchange_rel; + case clang::AArch64::BI_InterlockedExchange8_nf: + case clang::AArch64::BI_InterlockedExchange16_nf: + case clang::AArch64::BI_InterlockedExchange_nf: + case clang::AArch64::BI_InterlockedExchange64_nf: + return MSVCIntrin::_InterlockedExchange_nf; + case clang::AArch64::BI_InterlockedCompareExchange8_acq: + case clang::AArch64::BI_InterlockedCompareExchange16_acq: + case clang::AArch64::BI_InterlockedCompareExchange_acq: + case clang::AArch64::BI_InterlockedCompareExchange64_acq: + return MSVCIntrin::_InterlockedCompareExchange_acq; + case clang::AArch64::BI_InterlockedCompareExchange8_rel: + case clang::AArch64::BI_InterlockedCompareExchange16_rel: + case clang::AArch64::BI_InterlockedCompareExchange_rel: + case clang::AArch64::BI_InterlockedCompareExchange64_rel: + return MSVCIntrin::_InterlockedCompareExchange_rel; + case clang::AArch64::BI_InterlockedCompareExchange8_nf: + case clang::AArch64::BI_InterlockedCompareExchange16_nf: + case clang::AArch64::BI_InterlockedCompareExchange_nf: + case clang::AArch64::BI_InterlockedCompareExchange64_nf: + return MSVCIntrin::_InterlockedCompareExchange_nf; + case clang::AArch64::BI_InterlockedCompareExchange128: + return MSVCIntrin::_InterlockedCompareExchange128; + case clang::AArch64::BI_InterlockedCompareExchange128_acq: + return MSVCIntrin::_InterlockedCompareExchange128_acq; + case clang::AArch64::BI_InterlockedCompareExchange128_nf: + return MSVCIntrin::_InterlockedCompareExchange128_nf; + case clang::AArch64::BI_InterlockedCompareExchange128_rel: + return MSVCIntrin::_InterlockedCompareExchange128_rel; + case clang::AArch64::BI_InterlockedOr8_acq: + case clang::AArch64::BI_InterlockedOr16_acq: + case clang::AArch64::BI_InterlockedOr_acq: + case clang::AArch64::BI_InterlockedOr64_acq: + return MSVCIntrin::_InterlockedOr_acq; + case clang::AArch64::BI_InterlockedOr8_rel: + case clang::AArch64::BI_InterlockedOr16_rel: + case clang::AArch64::BI_InterlockedOr_rel: + case clang::AArch64::BI_InterlockedOr64_rel: + return MSVCIntrin::_InterlockedOr_rel; + case clang::AArch64::BI_InterlockedOr8_nf: + case clang::AArch64::BI_InterlockedOr16_nf: + case clang::AArch64::BI_InterlockedOr_nf: + case clang::AArch64::BI_InterlockedOr64_nf: + return MSVCIntrin::_InterlockedOr_nf; + case clang::AArch64::BI_InterlockedXor8_acq: + case clang::AArch64::BI_InterlockedXor16_acq: + case clang::AArch64::BI_InterlockedXor_acq: + case clang::AArch64::BI_InterlockedXor64_acq: + return MSVCIntrin::_InterlockedXor_acq; + case clang::AArch64::BI_InterlockedXor8_rel: + case clang::AArch64::BI_InterlockedXor16_rel: + case clang::AArch64::BI_InterlockedXor_rel: + case clang::AArch64::BI_InterlockedXor64_rel: + return MSVCIntrin::_InterlockedXor_rel; + case clang::AArch64::BI_InterlockedXor8_nf: + case clang::AArch64::BI_InterlockedXor16_nf: + case clang::AArch64::BI_InterlockedXor_nf: + case clang::AArch64::BI_InterlockedXor64_nf: + return MSVCIntrin::_InterlockedXor_nf; + case clang::AArch64::BI_InterlockedAnd8_acq: + case clang::AArch64::BI_InterlockedAnd16_acq: + case clang::AArch64::BI_InterlockedAnd_acq: + case clang::AArch64::BI_InterlockedAnd64_acq: + return MSVCIntrin::_InterlockedAnd_acq; + case clang::AArch64::BI_InterlockedAnd8_rel: + case clang::AArch64::BI_InterlockedAnd16_rel: + case clang::AArch64::BI_InterlockedAnd_rel: + case clang::AArch64::BI_InterlockedAnd64_rel: + return MSVCIntrin::_InterlockedAnd_rel; + case clang::AArch64::BI_InterlockedAnd8_nf: + case clang::AArch64::BI_InterlockedAnd16_nf: + case clang::AArch64::BI_InterlockedAnd_nf: + case clang::AArch64::BI_InterlockedAnd64_nf: + return MSVCIntrin::_InterlockedAnd_nf; + case clang::AArch64::BI_InterlockedIncrement16_acq: + case clang::AArch64::BI_InterlockedIncrement_acq: + case clang::AArch64::BI_InterlockedIncrement64_acq: + return MSVCIntrin::_InterlockedIncrement_acq; + case clang::AArch64::BI_InterlockedIncrement16_rel: + case clang::AArch64::BI_InterlockedIncrement_rel: + case clang::AArch64::BI_InterlockedIncrement64_rel: + return MSVCIntrin::_InterlockedIncrement_rel; + case clang::AArch64::BI_InterlockedIncrement16_nf: + case clang::AArch64::BI_InterlockedIncrement_nf: + case clang::AArch64::BI_InterlockedIncrement64_nf: + return MSVCIntrin::_InterlockedIncrement_nf; + case clang::AArch64::BI_InterlockedDecrement16_acq: + case clang::AArch64::BI_InterlockedDecrement_acq: + case clang::AArch64::BI_InterlockedDecrement64_acq: + return MSVCIntrin::_InterlockedDecrement_acq; + case clang::AArch64::BI_InterlockedDecrement16_rel: + case clang::AArch64::BI_InterlockedDecrement_rel: + case clang::AArch64::BI_InterlockedDecrement64_rel: + return MSVCIntrin::_InterlockedDecrement_rel; + case clang::AArch64::BI_InterlockedDecrement16_nf: + case clang::AArch64::BI_InterlockedDecrement_nf: + case clang::AArch64::BI_InterlockedDecrement64_nf: + return MSVCIntrin::_InterlockedDecrement_nf; + } + llvm_unreachable("must return from switch"); +} + +static bool AArch64SIMDIntrinsicsProvenSorted = false; +static bool AArch64SISDIntrinsicsProvenSorted = false; +static bool AArch64SVEIntrinsicsProvenSorted = false; +static bool AArch64SMEIntrinsicsProvenSorted = false; + +static const ARMVectorIntrinsicInfo * +findARMVectorIntrinsicInMap(ArrayRef IntrinsicMap, + unsigned BuiltinID, bool &MapProvenSorted) { + +#ifndef NDEBUG + if (!MapProvenSorted) { + assert(llvm::is_sorted(IntrinsicMap)); + MapProvenSorted = true; + } +#endif + + const ARMVectorIntrinsicInfo *Builtin = + llvm::lower_bound(IntrinsicMap, BuiltinID); + + if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID) + return Builtin; + + return nullptr; +} + +static mlir::Type GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags, + bool HasLegalHalfType = true, bool V1Ty = false, + bool AllowBFloatArgsAndRet = true) { + int IsQuad = TypeFlags.isQuad(); + switch (TypeFlags.getEltType()) { + case NeonTypeFlags::Int8: + case NeonTypeFlags::Poly8: + return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), + CGF->UInt8PtrTy, + V1Ty ? 1 : (8 << IsQuad)); + case NeonTypeFlags::Int16: + case NeonTypeFlags::Poly16: + llvm_unreachable("NYI"); + case NeonTypeFlags::BFloat16: + if (AllowBFloatArgsAndRet) + llvm_unreachable("NYI"); + else + llvm_unreachable("NYI"); + case NeonTypeFlags::Float16: + if (HasLegalHalfType) + llvm_unreachable("NYI"); + else + llvm_unreachable("NYI"); + case NeonTypeFlags::Int32: + llvm_unreachable("NYI"); + case NeonTypeFlags::Int64: + case NeonTypeFlags::Poly64: + llvm_unreachable("NYI"); + case NeonTypeFlags::Poly128: + // FIXME: i128 and f128 doesn't get fully support in Clang and llvm. + // There is a lot of i128 and f128 API missing. + // so we use v16i8 to represent poly128 and get pattern matched. + llvm_unreachable("NYI"); + case NeonTypeFlags::Float32: + llvm_unreachable("NYI"); + case NeonTypeFlags::Float64: + llvm_unreachable("NYI"); + } + llvm_unreachable("Unknown vector element type!"); +} + +static mlir::Value buildAArch64TblBuiltinExpr(CIRGenFunction &CGF, + unsigned BuiltinID, + const CallExpr *E, + SmallVectorImpl &Ops, + llvm::Triple::ArchType Arch) { + unsigned int Int = 0; + [[maybe_unused]] const char *s = nullptr; + + switch (BuiltinID) { + default: + return {}; + case NEON::BI__builtin_neon_vtbl1_v: + case NEON::BI__builtin_neon_vqtbl1_v: + case NEON::BI__builtin_neon_vqtbl1q_v: + case NEON::BI__builtin_neon_vtbl2_v: + case NEON::BI__builtin_neon_vqtbl2_v: + case NEON::BI__builtin_neon_vqtbl2q_v: + case NEON::BI__builtin_neon_vtbl3_v: + case NEON::BI__builtin_neon_vqtbl3_v: + case NEON::BI__builtin_neon_vqtbl3q_v: + case NEON::BI__builtin_neon_vtbl4_v: + case NEON::BI__builtin_neon_vqtbl4_v: + case NEON::BI__builtin_neon_vqtbl4q_v: + break; + case NEON::BI__builtin_neon_vtbx1_v: + case NEON::BI__builtin_neon_vqtbx1_v: + case NEON::BI__builtin_neon_vqtbx1q_v: + case NEON::BI__builtin_neon_vtbx2_v: + case NEON::BI__builtin_neon_vqtbx2_v: + case NEON::BI__builtin_neon_vqtbx2q_v: + case NEON::BI__builtin_neon_vtbx3_v: + case NEON::BI__builtin_neon_vqtbx3_v: + case NEON::BI__builtin_neon_vqtbx3q_v: + case NEON::BI__builtin_neon_vtbx4_v: + case NEON::BI__builtin_neon_vqtbx4_v: + case NEON::BI__builtin_neon_vqtbx4q_v: + break; + } + + assert(E->getNumArgs() >= 3); + + // Get the last argument, which specifies the vector type. + const Expr *Arg = E->getArg(E->getNumArgs() - 1); + std::optional Result = + Arg->getIntegerConstantExpr(CGF.getContext()); + if (!Result) + return nullptr; + + // Determine the type of this overloaded NEON intrinsic. + NeonTypeFlags Type = Result->getZExtValue(); + auto Ty = GetNeonType(&CGF, Type); + if (!Ty) + return nullptr; + + // AArch64 scalar builtins are not overloaded, they do not have an extra + // argument that specifies the vector type, need to handle each case. + switch (BuiltinID) { + case NEON::BI__builtin_neon_vtbl1_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vtbl2_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vtbl3_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vtbl4_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vtbx1_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vtbx2_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vtbx3_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqtbl1_v: + case NEON::BI__builtin_neon_vqtbl1q_v: + Int = Intrinsic::aarch64_neon_tbl1; + s = "vtbl1"; + break; + case NEON::BI__builtin_neon_vqtbl2_v: + case NEON::BI__builtin_neon_vqtbl2q_v: { + Int = Intrinsic::aarch64_neon_tbl2; + s = "vtbl2"; + break; + case NEON::BI__builtin_neon_vqtbl3_v: + case NEON::BI__builtin_neon_vqtbl3q_v: + Int = Intrinsic::aarch64_neon_tbl3; + s = "vtbl3"; + break; + case NEON::BI__builtin_neon_vqtbl4_v: + case NEON::BI__builtin_neon_vqtbl4q_v: + Int = Intrinsic::aarch64_neon_tbl4; + s = "vtbl4"; + break; + case NEON::BI__builtin_neon_vqtbx1_v: + case NEON::BI__builtin_neon_vqtbx1q_v: + Int = Intrinsic::aarch64_neon_tbx1; + s = "vtbx1"; + break; + case NEON::BI__builtin_neon_vqtbx2_v: + case NEON::BI__builtin_neon_vqtbx2q_v: + Int = Intrinsic::aarch64_neon_tbx2; + s = "vtbx2"; + break; + case NEON::BI__builtin_neon_vqtbx3_v: + case NEON::BI__builtin_neon_vqtbx3q_v: + Int = Intrinsic::aarch64_neon_tbx3; + s = "vtbx3"; + break; + case NEON::BI__builtin_neon_vqtbx4_v: + case NEON::BI__builtin_neon_vqtbx4q_v: + Int = Intrinsic::aarch64_neon_tbx4; + s = "vtbx4"; + break; + } + } + + if (!Int) + return nullptr; + + llvm_unreachable("NYI"); +} + +mlir::Value CIRGenFunction::buildAArch64SMEBuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { + auto *Builtin = findARMVectorIntrinsicInMap(AArch64SMEIntrinsicMap, BuiltinID, + AArch64SMEIntrinsicsProvenSorted); + (void)Builtin; + llvm_unreachable("NYI"); +} + +mlir::Value CIRGenFunction::buildAArch64SVEBuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { + if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 && + BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64_x4) { + llvm_unreachable("NYI"); + } + auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID, + AArch64SVEIntrinsicsProvenSorted); + (void)Builtin; + llvm_unreachable("NYI"); +} mlir::Value CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch) { if (BuiltinID >= clang::AArch64::FirstSVEBuiltin && BuiltinID <= clang::AArch64::LastSVEBuiltin) - llvm_unreachable("NYI"); + return buildAArch64SVEBuiltinExpr(BuiltinID, E); if (BuiltinID >= clang::AArch64::FirstSMEBuiltin && BuiltinID <= clang::AArch64::LastSMEBuiltin) - llvm_unreachable("NYI"); + return buildAArch64SMEBuiltinExpr(BuiltinID, E); if (BuiltinID == Builtin::BI__builtin_cpu_supports) llvm_unreachable("NYI"); @@ -281,16 +1789,16 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } - if (BuiltinID == clang::AArch64::BI_ReadStatusReg || - BuiltinID == clang::AArch64::BI_WriteStatusReg) { + if (BuiltinID == clang::AArch64::BI__builtin_sponentry) { llvm_unreachable("NYI"); } - if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) { + if (BuiltinID == clang::AArch64::BI_ReadStatusReg || + BuiltinID == clang::AArch64::BI_WriteStatusReg) { llvm_unreachable("NYI"); } - if (BuiltinID == clang::AArch64::BI__builtin_sponentry) { + if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) { llvm_unreachable("NYI"); } @@ -348,12 +1856,21 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } + if (BuiltinID == NEON::BI__builtin_neon_vcvth_bf16_f32) + llvm_unreachable("NYI"); + // Handle MSVC intrinsics before argument evaluation to prevent double // evaluation. - assert(!UnimplementedFeature::translateAarch64ToMsvcIntrin()); + if (std::optional MsvcIntId = + translateAarch64ToMsvcIntrin(BuiltinID)) + llvm_unreachable("NYI"); // Some intrinsics are equivalent - if they are use the base intrinsic ID. - assert(!UnimplementedFeature::neonEquivalentIntrinsicMap()); + auto It = llvm::find_if(NEONEquivalentIntrinsicMap, [BuiltinID](auto &P) { + return P.first == BuiltinID; + }); + if (It != end(NEONEquivalentIntrinsicMap)) + BuiltinID = It->second; // Find out if any arguments are required to be integer constant // expressions. @@ -363,6 +1880,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, assert(Error == ASTContext::GE_None && "Should not codegen an error"); llvm::SmallVector Ops; + Address PtrOp0 = Address::invalid(); for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { if (i == 0) { switch (BuiltinID) { @@ -382,13 +1900,21 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vstl1q_lane_s64: // Get the alignment for the argument in addition to the value; // we'll use it later. - llvm_unreachable("NYI"); + PtrOp0 = buildPointerWithAlignment(E->getArg(0)); + Ops.push_back(PtrOp0.emitRawPointer()); + continue; } } llvm_unreachable("NYI"); } - assert(!UnimplementedFeature::arm64SISDIntrinsicMap()); + auto SISDMap = ArrayRef(AArch64SISDIntrinsicMap); + const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( + SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted); + + if (Builtin) { + llvm_unreachable("NYI"); + } const Expr *Arg = E->getArg(E->getNumArgs() - 1); NeonTypeFlags Type(0); @@ -676,6 +2202,10 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vduph_laneq_f16: { llvm_unreachable("NYI"); } + case NEON::BI__builtin_neon_vcvt_bf16_f32: + case NEON::BI__builtin_neon_vcvtq_low_bf16_f32: + case NEON::BI__builtin_neon_vcvtq_high_bf16_f32: + llvm_unreachable("NYI"); case clang::AArch64::BI_InterlockedAdd: case clang::AArch64::BI_InterlockedAdd64: { @@ -683,7 +2213,557 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } } - // From here on it's pure NEON based - assert(UnimplementedFeature::getNeonType() && "NYI"); - return {}; + auto Ty = GetNeonType(this, Type); + if (!Ty) + return nullptr; + + // Not all intrinsics handled by the common case work for AArch64 yet, so only + // defer to common code if it's been added to our special map. + Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, + AArch64SIMDIntrinsicsProvenSorted); + if (Builtin) { + llvm_unreachable("NYI"); + } + + if (mlir::Value V = + buildAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) + return V; + + switch (BuiltinID) { + default: + return nullptr; + case NEON::BI__builtin_neon_vbsl_v: + case NEON::BI__builtin_neon_vbslq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vfma_lane_v: + case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types + // The ARM builtins (and instructions) have the addend as the first + // operand, but the 'fma' intrinsics have it last. Swap it around here. + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vfma_laneq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vfmaq_laneq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vfmah_lane_f16: + case NEON::BI__builtin_neon_vfmas_lane_f32: + case NEON::BI__builtin_neon_vfmah_laneq_f16: + case NEON::BI__builtin_neon_vfmas_laneq_f32: + case NEON::BI__builtin_neon_vfmad_lane_f64: + case NEON::BI__builtin_neon_vfmad_laneq_f64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmull_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vmax_v: + case NEON::BI__builtin_neon_vmaxq_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vmaxh_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmin_v: + case NEON::BI__builtin_neon_vminq_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vminh_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vabd_v: + case NEON::BI__builtin_neon_vabdq_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vpadal_v: + case NEON::BI__builtin_neon_vpadalq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vpmin_v: + case NEON::BI__builtin_neon_vpminq_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vpmax_v: + case NEON::BI__builtin_neon_vpmaxq_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vminnm_v: + case NEON::BI__builtin_neon_vminnmq_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vminnmh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vmaxnm_v: + case NEON::BI__builtin_neon_vmaxnmq_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vmaxnmh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vrecpss_f32: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrecpsd_f64: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vrecpsh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vqshrun_n_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vqrshrun_n_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vqshrn_n_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vrshrn_n_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vqrshrn_n_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vrndah_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrnda_v: + case NEON::BI__builtin_neon_vrndaq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrndih_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrndmh_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrndm_v: + case NEON::BI__builtin_neon_vrndmq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrndnh_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrndn_v: + case NEON::BI__builtin_neon_vrndnq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrndns_f32: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrndph_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrndp_v: + case NEON::BI__builtin_neon_vrndpq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrndxh_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrndx_v: + case NEON::BI__builtin_neon_vrndxq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrndh_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrnd32x_f32: + case NEON::BI__builtin_neon_vrnd32xq_f32: + case NEON::BI__builtin_neon_vrnd32x_f64: + case NEON::BI__builtin_neon_vrnd32xq_f64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrnd32z_f32: + case NEON::BI__builtin_neon_vrnd32zq_f32: + case NEON::BI__builtin_neon_vrnd32z_f64: + case NEON::BI__builtin_neon_vrnd32zq_f64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrnd64x_f32: + case NEON::BI__builtin_neon_vrnd64xq_f32: + case NEON::BI__builtin_neon_vrnd64x_f64: + case NEON::BI__builtin_neon_vrnd64xq_f64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrnd64z_f32: + case NEON::BI__builtin_neon_vrnd64zq_f32: + case NEON::BI__builtin_neon_vrnd64z_f64: + case NEON::BI__builtin_neon_vrnd64zq_f64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrnd_v: + case NEON::BI__builtin_neon_vrndq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvt_f64_v: + case NEON::BI__builtin_neon_vcvtq_f64_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vcvt_f64_f32: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvt_f32_f64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvt_s32_v: + case NEON::BI__builtin_neon_vcvt_u32_v: + case NEON::BI__builtin_neon_vcvt_s64_v: + case NEON::BI__builtin_neon_vcvt_u64_v: + case NEON::BI__builtin_neon_vcvt_s16_f16: + case NEON::BI__builtin_neon_vcvt_u16_f16: + case NEON::BI__builtin_neon_vcvtq_s32_v: + case NEON::BI__builtin_neon_vcvtq_u32_v: + case NEON::BI__builtin_neon_vcvtq_s64_v: + case NEON::BI__builtin_neon_vcvtq_u64_v: + case NEON::BI__builtin_neon_vcvtq_s16_f16: + case NEON::BI__builtin_neon_vcvtq_u16_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvta_s16_f16: + case NEON::BI__builtin_neon_vcvta_u16_f16: + case NEON::BI__builtin_neon_vcvta_s32_v: + case NEON::BI__builtin_neon_vcvtaq_s16_f16: + case NEON::BI__builtin_neon_vcvtaq_s32_v: + case NEON::BI__builtin_neon_vcvta_u32_v: + case NEON::BI__builtin_neon_vcvtaq_u16_f16: + case NEON::BI__builtin_neon_vcvtaq_u32_v: + case NEON::BI__builtin_neon_vcvta_s64_v: + case NEON::BI__builtin_neon_vcvtaq_s64_v: + case NEON::BI__builtin_neon_vcvta_u64_v: + case NEON::BI__builtin_neon_vcvtaq_u64_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvtm_s16_f16: + case NEON::BI__builtin_neon_vcvtm_s32_v: + case NEON::BI__builtin_neon_vcvtmq_s16_f16: + case NEON::BI__builtin_neon_vcvtmq_s32_v: + case NEON::BI__builtin_neon_vcvtm_u16_f16: + case NEON::BI__builtin_neon_vcvtm_u32_v: + case NEON::BI__builtin_neon_vcvtmq_u16_f16: + case NEON::BI__builtin_neon_vcvtmq_u32_v: + case NEON::BI__builtin_neon_vcvtm_s64_v: + case NEON::BI__builtin_neon_vcvtmq_s64_v: + case NEON::BI__builtin_neon_vcvtm_u64_v: + case NEON::BI__builtin_neon_vcvtmq_u64_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvtn_s16_f16: + case NEON::BI__builtin_neon_vcvtn_s32_v: + case NEON::BI__builtin_neon_vcvtnq_s16_f16: + case NEON::BI__builtin_neon_vcvtnq_s32_v: + case NEON::BI__builtin_neon_vcvtn_u16_f16: + case NEON::BI__builtin_neon_vcvtn_u32_v: + case NEON::BI__builtin_neon_vcvtnq_u16_f16: + case NEON::BI__builtin_neon_vcvtnq_u32_v: + case NEON::BI__builtin_neon_vcvtn_s64_v: + case NEON::BI__builtin_neon_vcvtnq_s64_v: + case NEON::BI__builtin_neon_vcvtn_u64_v: + case NEON::BI__builtin_neon_vcvtnq_u64_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vcvtp_s16_f16: + case NEON::BI__builtin_neon_vcvtp_s32_v: + case NEON::BI__builtin_neon_vcvtpq_s16_f16: + case NEON::BI__builtin_neon_vcvtpq_s32_v: + case NEON::BI__builtin_neon_vcvtp_u16_f16: + case NEON::BI__builtin_neon_vcvtp_u32_v: + case NEON::BI__builtin_neon_vcvtpq_u16_f16: + case NEON::BI__builtin_neon_vcvtpq_u32_v: + case NEON::BI__builtin_neon_vcvtp_s64_v: + case NEON::BI__builtin_neon_vcvtpq_s64_v: + case NEON::BI__builtin_neon_vcvtp_u64_v: + case NEON::BI__builtin_neon_vcvtpq_u64_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmulx_v: + case NEON::BI__builtin_neon_vmulxq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmulxh_lane_f16: + case NEON::BI__builtin_neon_vmulxh_laneq_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmul_lane_v: + case NEON::BI__builtin_neon_vmul_laneq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vnegd_s64: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vnegh_f16: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vpmaxnm_v: + case NEON::BI__builtin_neon_vpmaxnmq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vpminnm_v: + case NEON::BI__builtin_neon_vpminnmq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vsqrth_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vsqrt_v: + case NEON::BI__builtin_neon_vsqrtq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vrbit_v: + case NEON::BI__builtin_neon_vrbitq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddv_u8: + // FIXME: These are handled by the AArch64 scalar code. + llvm_unreachable("NYI"); + [[fallthrough]]; + case NEON::BI__builtin_neon_vaddv_s8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddv_u16: + llvm_unreachable("NYI"); + [[fallthrough]]; + case NEON::BI__builtin_neon_vaddv_s16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddvq_u8: + llvm_unreachable("NYI"); + [[fallthrough]]; + case NEON::BI__builtin_neon_vaddvq_s8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddvq_u16: + llvm_unreachable("NYI"); + [[fallthrough]]; + case NEON::BI__builtin_neon_vaddvq_s16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxv_u8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxv_u16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxvq_u8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxvq_u16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxv_s8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxv_s16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxvq_s8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxvq_s16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxv_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxvq_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminv_u8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminv_u16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminvq_u8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminvq_u16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminv_s8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminv_s16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminvq_s8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminvq_s16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminv_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminvq_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxnmv_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmaxnmvq_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminnmv_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vminnmvq_f16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vmul_n_f64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddlv_u8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddlv_u16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddlvq_u8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddlvq_u16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddlv_s8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddlv_s16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddlvq_s8: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vaddlvq_s16: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vsri_n_v: + case NEON::BI__builtin_neon_vsriq_n_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vsli_n_v: + case NEON::BI__builtin_neon_vsliq_n_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vsra_n_v: + case NEON::BI__builtin_neon_vsraq_n_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vrsra_n_v: + case NEON::BI__builtin_neon_vrsraq_n_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vld1_v: + case NEON::BI__builtin_neon_vld1q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vst1_v: + case NEON::BI__builtin_neon_vst1q_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vld1_lane_v: + case NEON::BI__builtin_neon_vld1q_lane_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vldap1_lane_s64: + case NEON::BI__builtin_neon_vldap1q_lane_s64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vld1_dup_v: + case NEON::BI__builtin_neon_vld1q_dup_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vst1_lane_v: + case NEON::BI__builtin_neon_vst1q_lane_v: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vstl1_lane_s64: + case NEON::BI__builtin_neon_vstl1q_lane_s64: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vld2_v: + case NEON::BI__builtin_neon_vld2q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vld3_v: + case NEON::BI__builtin_neon_vld3q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vld4_v: + case NEON::BI__builtin_neon_vld4q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vld2_dup_v: + case NEON::BI__builtin_neon_vld2q_dup_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vld3_dup_v: + case NEON::BI__builtin_neon_vld3q_dup_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vld4_dup_v: + case NEON::BI__builtin_neon_vld4q_dup_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vld2_lane_v: + case NEON::BI__builtin_neon_vld2q_lane_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vld3_lane_v: + case NEON::BI__builtin_neon_vld3q_lane_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vld4_lane_v: + case NEON::BI__builtin_neon_vld4q_lane_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vst2_v: + case NEON::BI__builtin_neon_vst2q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vst2_lane_v: + case NEON::BI__builtin_neon_vst2q_lane_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vst3_v: + case NEON::BI__builtin_neon_vst3q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vst3_lane_v: + case NEON::BI__builtin_neon_vst3q_lane_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vst4_v: + case NEON::BI__builtin_neon_vst4q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vst4_lane_v: + case NEON::BI__builtin_neon_vst4q_lane_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vtrn_v: + case NEON::BI__builtin_neon_vtrnq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vuzp_v: + case NEON::BI__builtin_neon_vuzpq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vzip_v: + case NEON::BI__builtin_neon_vzipq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqtbl1q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqtbl2q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqtbl3q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqtbl4q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqtbx1q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqtbx2q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqtbx3q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vqtbx4q_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vsqadd_v: + case NEON::BI__builtin_neon_vsqaddq_v: { + llvm_unreachable("NYI"); + } + case NEON::BI__builtin_neon_vuqadd_v: + case NEON::BI__builtin_neon_vuqaddq_v: { + llvm_unreachable("NYI"); + } + } } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 92f29b088050..b9032842ecab 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -885,6 +885,15 @@ class CIRGenFunction : public CIRGenTypeCache { RValue buildCoroutineIntrinsic(const CallExpr *E, unsigned int IID); RValue buildCoroutineFrame(); + enum class MSVCIntrin; + + mlir::Value buildARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); + mlir::Value buildARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); + /// Build a debug stoppoint if we are emitting debug info. void buildStopPoint(const Stmt *S); @@ -1228,7 +1237,10 @@ class CIRGenFunction : public CIRGenTypeCache { // Target specific builtin emission mlir::Value buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch); + mlir::Value buildAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); + mlir::Value buildAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); mlir::Value buildX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); /// Given an expression with a pointer type, emit the value and compute our diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 0dcfe19107e0..0ad929405984 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -175,12 +175,6 @@ struct UnimplementedFeature { static bool asm_memory_effects() { return false; } static bool asm_vector_type() { return false; } static bool asm_llvm_assume() { return false; } - - // ARM builtins - static bool translateAarch64ToMsvcIntrin() { return false; } - static bool neonEquivalentIntrinsicMap() { return false; } - static bool arm64SISDIntrinsicMap() { return false; } - static bool getNeonType() { return false; } }; } // namespace cir From 756fe01263e0db7c039b60c2a44fd38da254978b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 23 May 2024 11:42:24 -0700 Subject: [PATCH 1586/2301] [CIR][CIRGen][NFC] Update ReturnValueSlot according to upstream --- clang/lib/CIR/CodeGen/CIRGenCall.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index a192c6e1db80..866ba9af7a3b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -267,8 +267,11 @@ class ReturnValueSlot { Address Addr = Address::invalid(); // Return value slot flags + LLVM_PREFERRED_TYPE(bool) unsigned IsVolatile : 1; + LLVM_PREFERRED_TYPE(bool) unsigned IsUnused : 1; + LLVM_PREFERRED_TYPE(bool) unsigned IsExternallyDestructed : 1; public: @@ -284,6 +287,7 @@ class ReturnValueSlot { Address getValue() const { return Addr; } bool isUnused() const { return IsUnused; } bool isExternallyDestructed() const { return IsExternallyDestructed; } + Address getAddress() const { return Addr; } }; enum class FnInfoOpts { From 83db593795cba58ea2052dabb57f31f7a611193f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 23 May 2024 11:43:25 -0700 Subject: [PATCH 1587/2301] [CIR][CIRGen] AArch64 builtins: add support for neon vld1/vst1 The alignment is still super conversative but proper support should come next. The added test file also contains a huge pile of builtins we need to support and should allow for incremental support here. Next steps: fix alignement and enable testing for other vld1/vst1 variants. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 3 + clang/lib/CIR/CodeGen/CIRGenBuilder.h | 17 + clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 17 +- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 34 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 + .../CodeGen/UnimplementedFeatureGuarding.h | 1 + .../CIR/CodeGen/aarch64-neon-intrinsics.c | 17433 ++++++++++++++++ 7 files changed, 17497 insertions(+), 10 deletions(-) create mode 100644 clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index dc61fe08e96f..a46d90d4b8af 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -177,6 +177,9 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, mlir::Value dst, bool _volatile = false, ::mlir::cir::MemOrderAttr order = {}) { + if (dst.getType().cast().getPointee() != + val.getType()) + dst = createPtrBitcast(dst, val.getType()); return create(loc, val, dst, _volatile, order); } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 41248e8f9caf..9f2c34dd84be 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -754,6 +754,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { [[maybe_unused]] bool isVolatile) { assert(!UnimplementedFeature::volatileLoadOrStore()); assert(!UnimplementedFeature::alignedLoad()); + // FIXME: create a more generic version of createLoad and rewrite this and + // others in terms of that. Ideally there should only be one call to + // create in all helpers. + if (ty != ptr.getType().cast().getPointee()) + ptr = createPtrBitcast(ptr, ty); return create(loc, ty, ptr); } @@ -781,6 +786,18 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return CIRBaseBuilderTy::createStore(loc, flag, dst); } + mlir::cir::StoreOp createAlignedStore(mlir::Location loc, mlir::Value val, + mlir::Value dst, + [[maybe_unused]] clang::CharUnits align, + bool _volatile = false, + ::mlir::cir::MemOrderAttr order = {}) { + // TODO: add alignment for LoadOp/StoreOp, right now LowerToLLVM knows + // how to figure out for most part, but it's possible the client might want + // to enforce a different alignment. + assert(!UnimplementedFeature::alignedStore()); + return CIRBaseBuilderTy::createStore(loc, val, dst, _volatile, order); + } + // Convert byte offset to sequence of high-level indices suitable for // GlobalViewAttr. Ideally we shouldn't deal with low-level offsets at all // but currently some parts of Clang AST, which we don't want to touch just diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 26d911030d77..cb4cae9c9c26 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1071,12 +1071,21 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, } // Now see if we can emit a target-specific builtin. - if (auto v = buildTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { - llvm_unreachable("NYI"); + if (auto V = buildTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { + switch (EvalKind) { + case TEK_Scalar: + if (V.getType().isa()) + return RValue::get(nullptr); + return RValue::get(V); + case TEK_Aggregate: + llvm_unreachable("NYI"); + case TEK_Complex: + llvm_unreachable("No current target builtin returns complex"); + } + llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr"); } - llvm_unreachable("NYI"); - // ErrorUnsupported(E, "builtin function"); + CGM.ErrorUnsupported(E, "builtin function"); // Unknown builtin, for now just dump it out and return undef. return GetUndefRValue(E->getType()); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index fde7145ab61c..a3e494574a12 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1362,8 +1362,7 @@ static mlir::Type GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags, case NeonTypeFlags::Int8: case NeonTypeFlags::Poly8: return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), - CGF->UInt8PtrTy, - V1Ty ? 1 : (8 << IsQuad)); + CGF->UInt8Ty, V1Ty ? 1 : (8 << IsQuad)); case NeonTypeFlags::Int16: case NeonTypeFlags::Poly16: llvm_unreachable("NYI"); @@ -1542,6 +1541,23 @@ mlir::Value CIRGenFunction::buildAArch64SVEBuiltinExpr(unsigned BuiltinID, llvm_unreachable("NYI"); } +mlir::Value CIRGenFunction::buildScalarOrConstFoldImmArg(unsigned ICEArguments, + unsigned Idx, + const CallExpr *E) { + mlir::Value Arg = {}; + if ((ICEArguments & (1 << Idx)) == 0) { + Arg = buildScalarExpr(E->getArg(Idx)); + } else { + // If this is required to be a constant, constant fold it so that we + // know that the generated intrinsic gets a ConstantInt. + std::optional Result = + E->getArg(Idx)->getIntegerConstantExpr(getContext()); + assert(Result && "Expected argument to be a constant"); + Arg = builder.getConstInt(getLoc(E->getSourceRange()), *Result); + } + return Arg; +} + mlir::Value CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, @@ -1905,7 +1921,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, continue; } } - llvm_unreachable("NYI"); + Ops.push_back(buildScalarOrConstFoldImmArg(ICEArguments, i, E)); } auto SISDMap = ArrayRef(AArch64SISDIntrinsicMap); @@ -2229,6 +2245,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, buildAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) return V; + mlir::Type VTy = Ty; switch (BuiltinID) { default: return nullptr; @@ -2637,11 +2654,16 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vld1_v: case NEON::BI__builtin_neon_vld1q_v: { - llvm_unreachable("NYI"); + return builder.createAlignedLoad(Ops[0].getLoc(), VTy, Ops[0], + PtrOp0.getAlignment()); } case NEON::BI__builtin_neon_vst1_v: - case NEON::BI__builtin_neon_vst1q_v: - llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vst1q_v: { + Ops[1] = builder.createBitcast(Ops[1], VTy); + (void)builder.createAlignedStore(Ops[1].getLoc(), Ops[1], Ops[0], + PtrOp0.getAlignment()); + return Ops[1]; + } case NEON::BI__builtin_neon_vld1_lane_v: case NEON::BI__builtin_neon_vld1q_lane_v: { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index b9032842ecab..e8e192768a4d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1236,6 +1236,8 @@ class CIRGenFunction : public CIRGenTypeCache { ReturnValueSlot ReturnValue); // Target specific builtin emission + mlir::Value buildScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, + const CallExpr *E); mlir::Value buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 0ad929405984..19ea03daf7a7 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -152,6 +152,7 @@ struct UnimplementedFeature { static bool requiresCleanups() { return false; } static bool constantFoldsToSimpleInteger() { return false; } static bool alignedLoad() { return false; } + static bool alignedStore() { return false; } static bool checkFunctionCallABI() { return false; } static bool zeroInitializer() { return false; } static bool targetCodeGenInfoIsProtoCallVariadic() { return false; } diff --git a/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c b/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c new file mode 100644 index 000000000000..56af730ba81e --- /dev/null +++ b/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c @@ -0,0 +1,17433 @@ +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-llvm -o - %s \ +// RUN: | opt -S -passes=mem2reg,simplifycfg \ +// RUN: | FileCheck --check-prefix=LLVM %s + +// REQUIRES: aarch64-registered-target || arm-registered-target + +// This test mimics clang/test/CodeGen/aarch64-neon-intrinsics.c, which eventually +// CIR shall be able to support fully. Since this is going to take some time to converge, +// the unsupported/NYI code is commented out, so that we can incrementally improve this. +// The NYI filecheck used contains the LLVM output from OG codegen that should guide the +// correct result when implementing this into the CIR pipeline. + +#include + +// NYI-LABEL: @test_vadd_s8( +// NYI: [[ADD_I:%.*]] = add <8 x i8> %v1, %v2 +// NYI: ret <8 x i8> [[ADD_I]] +// int8x8_t test_vadd_s8(int8x8_t v1, int8x8_t v2) { +// return vadd_s8(v1, v2); +// } + +// NYI-LABEL: @test_vadd_s16( +// NYI: [[ADD_I:%.*]] = add <4 x i16> %v1, %v2 +// NYI: ret <4 x i16> [[ADD_I]] +// int16x4_t test_vadd_s16(int16x4_t v1, int16x4_t v2) { +// return vadd_s16(v1, v2); +// } + +// NYI-LABEL: @test_vadd_s32( +// NYI: [[ADD_I:%.*]] = add <2 x i32> %v1, %v2 +// NYI: ret <2 x i32> [[ADD_I]] +// int32x2_t test_vadd_s32(int32x2_t v1, int32x2_t v2) { +// return vadd_s32(v1, v2); +// } + +// NYI-LABEL: @test_vadd_s64( +// NYI: [[ADD_I:%.*]] = add <1 x i64> %v1, %v2 +// NYI: ret <1 x i64> [[ADD_I]] +// int64x1_t test_vadd_s64(int64x1_t v1, int64x1_t v2) { +// return vadd_s64(v1, v2); +// } + +// NYI-LABEL: @test_vadd_f32( +// NYI: [[ADD_I:%.*]] = fadd <2 x float> %v1, %v2 +// NYI: ret <2 x float> [[ADD_I]] +// float32x2_t test_vadd_f32(float32x2_t v1, float32x2_t v2) { +// return vadd_f32(v1, v2); +// } + +// NYI-LABEL: @test_vadd_u8( +// NYI: [[ADD_I:%.*]] = add <8 x i8> %v1, %v2 +// NYI: ret <8 x i8> [[ADD_I]] +// uint8x8_t test_vadd_u8(uint8x8_t v1, uint8x8_t v2) { +// return vadd_u8(v1, v2); +// } + +// NYI-LABEL: @test_vadd_u16( +// NYI: [[ADD_I:%.*]] = add <4 x i16> %v1, %v2 +// NYI: ret <4 x i16> [[ADD_I]] +// uint16x4_t test_vadd_u16(uint16x4_t v1, uint16x4_t v2) { +// return vadd_u16(v1, v2); +// } + +// NYI-LABEL: @test_vadd_u32( +// NYI: [[ADD_I:%.*]] = add <2 x i32> %v1, %v2 +// NYI: ret <2 x i32> [[ADD_I]] +// uint32x2_t test_vadd_u32(uint32x2_t v1, uint32x2_t v2) { +// return vadd_u32(v1, v2); +// } + +// NYI-LABEL: @test_vadd_u64( +// NYI: [[ADD_I:%.*]] = add <1 x i64> %v1, %v2 +// NYI: ret <1 x i64> [[ADD_I]] +// uint64x1_t test_vadd_u64(uint64x1_t v1, uint64x1_t v2) { +// return vadd_u64(v1, v2); +// } + +// NYI-LABEL: @test_vaddq_s8( +// NYI: [[ADD_I:%.*]] = add <16 x i8> %v1, %v2 +// NYI: ret <16 x i8> [[ADD_I]] +// int8x16_t test_vaddq_s8(int8x16_t v1, int8x16_t v2) { +// return vaddq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vaddq_s16( +// NYI: [[ADD_I:%.*]] = add <8 x i16> %v1, %v2 +// NYI: ret <8 x i16> [[ADD_I]] +// int16x8_t test_vaddq_s16(int16x8_t v1, int16x8_t v2) { +// return vaddq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vaddq_s32( +// NYI: [[ADD_I:%.*]] = add <4 x i32> %v1, %v2 +// NYI: ret <4 x i32> [[ADD_I]] +// int32x4_t test_vaddq_s32(int32x4_t v1, int32x4_t v2) { +// return vaddq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vaddq_s64( +// NYI: [[ADD_I:%.*]] = add <2 x i64> %v1, %v2 +// NYI: ret <2 x i64> [[ADD_I]] +// int64x2_t test_vaddq_s64(int64x2_t v1, int64x2_t v2) { +// return vaddq_s64(v1, v2); +// } + +// NYI-LABEL: @test_vaddq_f32( +// NYI: [[ADD_I:%.*]] = fadd <4 x float> %v1, %v2 +// NYI: ret <4 x float> [[ADD_I]] +// float32x4_t test_vaddq_f32(float32x4_t v1, float32x4_t v2) { +// return vaddq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vaddq_f64( +// NYI: [[ADD_I:%.*]] = fadd <2 x double> %v1, %v2 +// NYI: ret <2 x double> [[ADD_I]] +// float64x2_t test_vaddq_f64(float64x2_t v1, float64x2_t v2) { +// return vaddq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vaddq_u8( +// NYI: [[ADD_I:%.*]] = add <16 x i8> %v1, %v2 +// NYI: ret <16 x i8> [[ADD_I]] +// uint8x16_t test_vaddq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vaddq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vaddq_u16( +// NYI: [[ADD_I:%.*]] = add <8 x i16> %v1, %v2 +// NYI: ret <8 x i16> [[ADD_I]] +// uint16x8_t test_vaddq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vaddq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vaddq_u32( +// NYI: [[ADD_I:%.*]] = add <4 x i32> %v1, %v2 +// NYI: ret <4 x i32> [[ADD_I]] +// uint32x4_t test_vaddq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vaddq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vaddq_u64( +// NYI: [[ADD_I:%.*]] = add <2 x i64> %v1, %v2 +// NYI: ret <2 x i64> [[ADD_I]] +// uint64x2_t test_vaddq_u64(uint64x2_t v1, uint64x2_t v2) { +// return vaddq_u64(v1, v2); +// } + +// NYI-LABEL: @test_vsub_s8( +// NYI: [[SUB_I:%.*]] = sub <8 x i8> %v1, %v2 +// NYI: ret <8 x i8> [[SUB_I]] +// int8x8_t test_vsub_s8(int8x8_t v1, int8x8_t v2) { +// return vsub_s8(v1, v2); +// } + +// NYI-LABEL: @test_vsub_s16( +// NYI: [[SUB_I:%.*]] = sub <4 x i16> %v1, %v2 +// NYI: ret <4 x i16> [[SUB_I]] +// int16x4_t test_vsub_s16(int16x4_t v1, int16x4_t v2) { +// return vsub_s16(v1, v2); +// } + +// NYI-LABEL: @test_vsub_s32( +// NYI: [[SUB_I:%.*]] = sub <2 x i32> %v1, %v2 +// NYI: ret <2 x i32> [[SUB_I]] +// int32x2_t test_vsub_s32(int32x2_t v1, int32x2_t v2) { +// return vsub_s32(v1, v2); +// } + +// NYI-LABEL: @test_vsub_s64( +// NYI: [[SUB_I:%.*]] = sub <1 x i64> %v1, %v2 +// NYI: ret <1 x i64> [[SUB_I]] +// int64x1_t test_vsub_s64(int64x1_t v1, int64x1_t v2) { +// return vsub_s64(v1, v2); +// } + +// NYI-LABEL: @test_vsub_f32( +// NYI: [[SUB_I:%.*]] = fsub <2 x float> %v1, %v2 +// NYI: ret <2 x float> [[SUB_I]] +// float32x2_t test_vsub_f32(float32x2_t v1, float32x2_t v2) { +// return vsub_f32(v1, v2); +// } + +// NYI-LABEL: @test_vsub_u8( +// NYI: [[SUB_I:%.*]] = sub <8 x i8> %v1, %v2 +// NYI: ret <8 x i8> [[SUB_I]] +// uint8x8_t test_vsub_u8(uint8x8_t v1, uint8x8_t v2) { +// return vsub_u8(v1, v2); +// } + +// NYI-LABEL: @test_vsub_u16( +// NYI: [[SUB_I:%.*]] = sub <4 x i16> %v1, %v2 +// NYI: ret <4 x i16> [[SUB_I]] +// uint16x4_t test_vsub_u16(uint16x4_t v1, uint16x4_t v2) { +// return vsub_u16(v1, v2); +// } + +// NYI-LABEL: @test_vsub_u32( +// NYI: [[SUB_I:%.*]] = sub <2 x i32> %v1, %v2 +// NYI: ret <2 x i32> [[SUB_I]] +// uint32x2_t test_vsub_u32(uint32x2_t v1, uint32x2_t v2) { +// return vsub_u32(v1, v2); +// } + +// NYI-LABEL: @test_vsub_u64( +// NYI: [[SUB_I:%.*]] = sub <1 x i64> %v1, %v2 +// NYI: ret <1 x i64> [[SUB_I]] +// uint64x1_t test_vsub_u64(uint64x1_t v1, uint64x1_t v2) { +// return vsub_u64(v1, v2); +// } + +// NYI-LABEL: @test_vsubq_s8( +// NYI: [[SUB_I:%.*]] = sub <16 x i8> %v1, %v2 +// NYI: ret <16 x i8> [[SUB_I]] +// int8x16_t test_vsubq_s8(int8x16_t v1, int8x16_t v2) { +// return vsubq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vsubq_s16( +// NYI: [[SUB_I:%.*]] = sub <8 x i16> %v1, %v2 +// NYI: ret <8 x i16> [[SUB_I]] +// int16x8_t test_vsubq_s16(int16x8_t v1, int16x8_t v2) { +// return vsubq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vsubq_s32( +// NYI: [[SUB_I:%.*]] = sub <4 x i32> %v1, %v2 +// NYI: ret <4 x i32> [[SUB_I]] +// int32x4_t test_vsubq_s32(int32x4_t v1, int32x4_t v2) { +// return vsubq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vsubq_s64( +// NYI: [[SUB_I:%.*]] = sub <2 x i64> %v1, %v2 +// NYI: ret <2 x i64> [[SUB_I]] +// int64x2_t test_vsubq_s64(int64x2_t v1, int64x2_t v2) { +// return vsubq_s64(v1, v2); +// } + +// NYI-LABEL: @test_vsubq_f32( +// NYI: [[SUB_I:%.*]] = fsub <4 x float> %v1, %v2 +// NYI: ret <4 x float> [[SUB_I]] +// float32x4_t test_vsubq_f32(float32x4_t v1, float32x4_t v2) { +// return vsubq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vsubq_f64( +// NYI: [[SUB_I:%.*]] = fsub <2 x double> %v1, %v2 +// NYI: ret <2 x double> [[SUB_I]] +// float64x2_t test_vsubq_f64(float64x2_t v1, float64x2_t v2) { +// return vsubq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vsubq_u8( +// NYI: [[SUB_I:%.*]] = sub <16 x i8> %v1, %v2 +// NYI: ret <16 x i8> [[SUB_I]] +// uint8x16_t test_vsubq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vsubq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vsubq_u16( +// NYI: [[SUB_I:%.*]] = sub <8 x i16> %v1, %v2 +// NYI: ret <8 x i16> [[SUB_I]] +// uint16x8_t test_vsubq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vsubq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vsubq_u32( +// NYI: [[SUB_I:%.*]] = sub <4 x i32> %v1, %v2 +// NYI: ret <4 x i32> [[SUB_I]] +// uint32x4_t test_vsubq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vsubq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vsubq_u64( +// NYI: [[SUB_I:%.*]] = sub <2 x i64> %v1, %v2 +// NYI: ret <2 x i64> [[SUB_I]] +// uint64x2_t test_vsubq_u64(uint64x2_t v1, uint64x2_t v2) { +// return vsubq_u64(v1, v2); +// } + +// NYI-LABEL: @test_vmul_s8( +// NYI: [[MUL_I:%.*]] = mul <8 x i8> %v1, %v2 +// NYI: ret <8 x i8> [[MUL_I]] +// int8x8_t test_vmul_s8(int8x8_t v1, int8x8_t v2) { +// return vmul_s8(v1, v2); +// } + +// NYI-LABEL: @test_vmul_s16( +// NYI: [[MUL_I:%.*]] = mul <4 x i16> %v1, %v2 +// NYI: ret <4 x i16> [[MUL_I]] +// int16x4_t test_vmul_s16(int16x4_t v1, int16x4_t v2) { +// return vmul_s16(v1, v2); +// } + +// NYI-LABEL: @test_vmul_s32( +// NYI: [[MUL_I:%.*]] = mul <2 x i32> %v1, %v2 +// NYI: ret <2 x i32> [[MUL_I]] +// int32x2_t test_vmul_s32(int32x2_t v1, int32x2_t v2) { +// return vmul_s32(v1, v2); +// } + +// NYI-LABEL: @test_vmul_f32( +// NYI: [[MUL_I:%.*]] = fmul <2 x float> %v1, %v2 +// NYI: ret <2 x float> [[MUL_I]] +// float32x2_t test_vmul_f32(float32x2_t v1, float32x2_t v2) { +// return vmul_f32(v1, v2); +// } + +// NYI-LABEL: @test_vmul_u8( +// NYI: [[MUL_I:%.*]] = mul <8 x i8> %v1, %v2 +// NYI: ret <8 x i8> [[MUL_I]] +// uint8x8_t test_vmul_u8(uint8x8_t v1, uint8x8_t v2) { +// return vmul_u8(v1, v2); +// } + +// NYI-LABEL: @test_vmul_u16( +// NYI: [[MUL_I:%.*]] = mul <4 x i16> %v1, %v2 +// NYI: ret <4 x i16> [[MUL_I]] +// uint16x4_t test_vmul_u16(uint16x4_t v1, uint16x4_t v2) { +// return vmul_u16(v1, v2); +// } + +// NYI-LABEL: @test_vmul_u32( +// NYI: [[MUL_I:%.*]] = mul <2 x i32> %v1, %v2 +// NYI: ret <2 x i32> [[MUL_I]] +// uint32x2_t test_vmul_u32(uint32x2_t v1, uint32x2_t v2) { +// return vmul_u32(v1, v2); +// } + +// NYI-LABEL: @test_vmulq_s8( +// NYI: [[MUL_I:%.*]] = mul <16 x i8> %v1, %v2 +// NYI: ret <16 x i8> [[MUL_I]] +// int8x16_t test_vmulq_s8(int8x16_t v1, int8x16_t v2) { +// return vmulq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vmulq_s16( +// NYI: [[MUL_I:%.*]] = mul <8 x i16> %v1, %v2 +// NYI: ret <8 x i16> [[MUL_I]] +// int16x8_t test_vmulq_s16(int16x8_t v1, int16x8_t v2) { +// return vmulq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vmulq_s32( +// NYI: [[MUL_I:%.*]] = mul <4 x i32> %v1, %v2 +// NYI: ret <4 x i32> [[MUL_I]] +// int32x4_t test_vmulq_s32(int32x4_t v1, int32x4_t v2) { +// return vmulq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vmulq_u8( +// NYI: [[MUL_I:%.*]] = mul <16 x i8> %v1, %v2 +// NYI: ret <16 x i8> [[MUL_I]] +// uint8x16_t test_vmulq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vmulq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vmulq_u16( +// NYI: [[MUL_I:%.*]] = mul <8 x i16> %v1, %v2 +// NYI: ret <8 x i16> [[MUL_I]] +// uint16x8_t test_vmulq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vmulq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vmulq_u32( +// NYI: [[MUL_I:%.*]] = mul <4 x i32> %v1, %v2 +// NYI: ret <4 x i32> [[MUL_I]] +// uint32x4_t test_vmulq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vmulq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vmulq_f32( +// NYI: [[MUL_I:%.*]] = fmul <4 x float> %v1, %v2 +// NYI: ret <4 x float> [[MUL_I]] +// float32x4_t test_vmulq_f32(float32x4_t v1, float32x4_t v2) { +// return vmulq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vmulq_f64( +// NYI: [[MUL_I:%.*]] = fmul <2 x double> %v1, %v2 +// NYI: ret <2 x double> [[MUL_I]] +// float64x2_t test_vmulq_f64(float64x2_t v1, float64x2_t v2) { +// return vmulq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vmul_p8( +// NYI: [[VMUL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.pmul.v8i8(<8 x i8> %v1, <8 x i8> %v2) +// NYI: ret <8 x i8> [[VMUL_V_I]] +// poly8x8_t test_vmul_p8(poly8x8_t v1, poly8x8_t v2) { +// return vmul_p8(v1, v2); +// } + +// NYI-LABEL: @test_vmulq_p8( +// NYI: [[VMULQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %v1, <16 x i8> %v2) +// NYI: ret <16 x i8> [[VMULQ_V_I]] +// poly8x16_t test_vmulq_p8(poly8x16_t v1, poly8x16_t v2) { +// return vmulq_p8(v1, v2); +// } + +// NYI-LABEL: @test_vmla_s8( +// NYI: [[MUL_I:%.*]] = mul <8 x i8> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <8 x i8> %v1, [[MUL_I]] +// NYI: ret <8 x i8> [[ADD_I]] +// int8x8_t test_vmla_s8(int8x8_t v1, int8x8_t v2, int8x8_t v3) { +// return vmla_s8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmla_s16( +// NYI: [[MUL_I:%.*]] = mul <4 x i16> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <4 x i16> %v1, [[MUL_I]] +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[ADD_I]] to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vmla_s16(int16x4_t v1, int16x4_t v2, int16x4_t v3) { +// return (int8x8_t)vmla_s16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmla_s32( +// NYI: [[MUL_I:%.*]] = mul <2 x i32> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <2 x i32> %v1, [[MUL_I]] +// NYI: ret <2 x i32> [[ADD_I]] +// int32x2_t test_vmla_s32(int32x2_t v1, int32x2_t v2, int32x2_t v3) { +// return vmla_s32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmla_f32( +// NYI: [[MUL_I:%.*]] = fmul <2 x float> %v2, %v3 +// NYI: [[ADD_I:%.*]] = fadd <2 x float> %v1, [[MUL_I]] +// NYI: ret <2 x float> [[ADD_I]] +// float32x2_t test_vmla_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) { +// return vmla_f32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmla_u8( +// NYI: [[MUL_I:%.*]] = mul <8 x i8> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <8 x i8> %v1, [[MUL_I]] +// NYI: ret <8 x i8> [[ADD_I]] +// uint8x8_t test_vmla_u8(uint8x8_t v1, uint8x8_t v2, uint8x8_t v3) { +// return vmla_u8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmla_u16( +// NYI: [[MUL_I:%.*]] = mul <4 x i16> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <4 x i16> %v1, [[MUL_I]] +// NYI: ret <4 x i16> [[ADD_I]] +// uint16x4_t test_vmla_u16(uint16x4_t v1, uint16x4_t v2, uint16x4_t v3) { +// return vmla_u16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmla_u32( +// NYI: [[MUL_I:%.*]] = mul <2 x i32> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <2 x i32> %v1, [[MUL_I]] +// NYI: ret <2 x i32> [[ADD_I]] +// uint32x2_t test_vmla_u32(uint32x2_t v1, uint32x2_t v2, uint32x2_t v3) { +// return vmla_u32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlaq_s8( +// NYI: [[MUL_I:%.*]] = mul <16 x i8> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <16 x i8> %v1, [[MUL_I]] +// NYI: ret <16 x i8> [[ADD_I]] +// int8x16_t test_vmlaq_s8(int8x16_t v1, int8x16_t v2, int8x16_t v3) { +// return vmlaq_s8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlaq_s16( +// NYI: [[MUL_I:%.*]] = mul <8 x i16> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <8 x i16> %v1, [[MUL_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// int16x8_t test_vmlaq_s16(int16x8_t v1, int16x8_t v2, int16x8_t v3) { +// return vmlaq_s16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlaq_s32( +// NYI: [[MUL_I:%.*]] = mul <4 x i32> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <4 x i32> %v1, [[MUL_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// int32x4_t test_vmlaq_s32(int32x4_t v1, int32x4_t v2, int32x4_t v3) { +// return vmlaq_s32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlaq_f32( +// NYI: [[MUL_I:%.*]] = fmul <4 x float> %v2, %v3 +// NYI: [[ADD_I:%.*]] = fadd <4 x float> %v1, [[MUL_I]] +// NYI: ret <4 x float> [[ADD_I]] +// float32x4_t test_vmlaq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) { +// return vmlaq_f32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlaq_u8( +// NYI: [[MUL_I:%.*]] = mul <16 x i8> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <16 x i8> %v1, [[MUL_I]] +// NYI: ret <16 x i8> [[ADD_I]] +// uint8x16_t test_vmlaq_u8(uint8x16_t v1, uint8x16_t v2, uint8x16_t v3) { +// return vmlaq_u8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlaq_u16( +// NYI: [[MUL_I:%.*]] = mul <8 x i16> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <8 x i16> %v1, [[MUL_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// uint16x8_t test_vmlaq_u16(uint16x8_t v1, uint16x8_t v2, uint16x8_t v3) { +// return vmlaq_u16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlaq_u32( +// NYI: [[MUL_I:%.*]] = mul <4 x i32> %v2, %v3 +// NYI: [[ADD_I:%.*]] = add <4 x i32> %v1, [[MUL_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// uint32x4_t test_vmlaq_u32(uint32x4_t v1, uint32x4_t v2, uint32x4_t v3) { +// return vmlaq_u32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlaq_f64( +// NYI: [[MUL_I:%.*]] = fmul <2 x double> %v2, %v3 +// NYI: [[ADD_I:%.*]] = fadd <2 x double> %v1, [[MUL_I]] +// NYI: ret <2 x double> [[ADD_I]] +// float64x2_t test_vmlaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { +// return vmlaq_f64(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmls_s8( +// NYI: [[MUL_I:%.*]] = mul <8 x i8> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <8 x i8> %v1, [[MUL_I]] +// NYI: ret <8 x i8> [[SUB_I]] +// int8x8_t test_vmls_s8(int8x8_t v1, int8x8_t v2, int8x8_t v3) { +// return vmls_s8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmls_s16( +// NYI: [[MUL_I:%.*]] = mul <4 x i16> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <4 x i16> %v1, [[MUL_I]] +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SUB_I]] to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vmls_s16(int16x4_t v1, int16x4_t v2, int16x4_t v3) { +// return (int8x8_t)vmls_s16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmls_s32( +// NYI: [[MUL_I:%.*]] = mul <2 x i32> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <2 x i32> %v1, [[MUL_I]] +// NYI: ret <2 x i32> [[SUB_I]] +// int32x2_t test_vmls_s32(int32x2_t v1, int32x2_t v2, int32x2_t v3) { +// return vmls_s32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmls_f32( +// NYI: [[MUL_I:%.*]] = fmul <2 x float> %v2, %v3 +// NYI: [[SUB_I:%.*]] = fsub <2 x float> %v1, [[MUL_I]] +// NYI: ret <2 x float> [[SUB_I]] +// float32x2_t test_vmls_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) { +// return vmls_f32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmls_u8( +// NYI: [[MUL_I:%.*]] = mul <8 x i8> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <8 x i8> %v1, [[MUL_I]] +// NYI: ret <8 x i8> [[SUB_I]] +// uint8x8_t test_vmls_u8(uint8x8_t v1, uint8x8_t v2, uint8x8_t v3) { +// return vmls_u8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmls_u16( +// NYI: [[MUL_I:%.*]] = mul <4 x i16> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <4 x i16> %v1, [[MUL_I]] +// NYI: ret <4 x i16> [[SUB_I]] +// uint16x4_t test_vmls_u16(uint16x4_t v1, uint16x4_t v2, uint16x4_t v3) { +// return vmls_u16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmls_u32( +// NYI: [[MUL_I:%.*]] = mul <2 x i32> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <2 x i32> %v1, [[MUL_I]] +// NYI: ret <2 x i32> [[SUB_I]] +// uint32x2_t test_vmls_u32(uint32x2_t v1, uint32x2_t v2, uint32x2_t v3) { +// return vmls_u32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlsq_s8( +// NYI: [[MUL_I:%.*]] = mul <16 x i8> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <16 x i8> %v1, [[MUL_I]] +// NYI: ret <16 x i8> [[SUB_I]] +// int8x16_t test_vmlsq_s8(int8x16_t v1, int8x16_t v2, int8x16_t v3) { +// return vmlsq_s8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlsq_s16( +// NYI: [[MUL_I:%.*]] = mul <8 x i16> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <8 x i16> %v1, [[MUL_I]] +// NYI: ret <8 x i16> [[SUB_I]] +// int16x8_t test_vmlsq_s16(int16x8_t v1, int16x8_t v2, int16x8_t v3) { +// return vmlsq_s16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlsq_s32( +// NYI: [[MUL_I:%.*]] = mul <4 x i32> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <4 x i32> %v1, [[MUL_I]] +// NYI: ret <4 x i32> [[SUB_I]] +// int32x4_t test_vmlsq_s32(int32x4_t v1, int32x4_t v2, int32x4_t v3) { +// return vmlsq_s32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlsq_f32( +// NYI: [[MUL_I:%.*]] = fmul <4 x float> %v2, %v3 +// NYI: [[SUB_I:%.*]] = fsub <4 x float> %v1, [[MUL_I]] +// NYI: ret <4 x float> [[SUB_I]] +// float32x4_t test_vmlsq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) { +// return vmlsq_f32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlsq_u8( +// NYI: [[MUL_I:%.*]] = mul <16 x i8> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <16 x i8> %v1, [[MUL_I]] +// NYI: ret <16 x i8> [[SUB_I]] +// uint8x16_t test_vmlsq_u8(uint8x16_t v1, uint8x16_t v2, uint8x16_t v3) { +// return vmlsq_u8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlsq_u16( +// NYI: [[MUL_I:%.*]] = mul <8 x i16> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <8 x i16> %v1, [[MUL_I]] +// NYI: ret <8 x i16> [[SUB_I]] +// uint16x8_t test_vmlsq_u16(uint16x8_t v1, uint16x8_t v2, uint16x8_t v3) { +// return vmlsq_u16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlsq_u32( +// NYI: [[MUL_I:%.*]] = mul <4 x i32> %v2, %v3 +// NYI: [[SUB_I:%.*]] = sub <4 x i32> %v1, [[MUL_I]] +// NYI: ret <4 x i32> [[SUB_I]] +// uint32x4_t test_vmlsq_u32(uint32x4_t v1, uint32x4_t v2, uint32x4_t v3) { +// return vmlsq_u32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vmlsq_f64( +// NYI: [[MUL_I:%.*]] = fmul <2 x double> %v2, %v3 +// NYI: [[SUB_I:%.*]] = fsub <2 x double> %v1, [[MUL_I]] +// NYI: ret <2 x double> [[SUB_I]] +// float64x2_t test_vmlsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { +// return vmlsq_f64(v1, v2, v3); +// } + +// NYI-LABEL: @test_vfma_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x float> %v3 to <8 x i8> +// NYI: [[TMP3:%.*]] = call <2 x float> @llvm.fma.v2f32(<2 x float> %v2, <2 x float> %v3, <2 x float> %v1) +// NYI: ret <2 x float> [[TMP3]] +// float32x2_t test_vfma_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) { +// return vfma_f32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vfmaq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x float> %v3 to <16 x i8> +// NYI: [[TMP3:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %v2, <4 x float> %v3, <4 x float> %v1) +// NYI: ret <4 x float> [[TMP3]] +// float32x4_t test_vfmaq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) { +// return vfmaq_f32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vfmaq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x double> %v3 to <16 x i8> +// NYI: [[TMP3:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %v2, <2 x double> %v3, <2 x double> %v1) +// NYI: ret <2 x double> [[TMP3]] +// float64x2_t test_vfmaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { +// return vfmaq_f64(v1, v2, v3); +// } + +// NYI-LABEL: @test_vfms_f32( +// NYI: [[SUB_I:%.*]] = fneg <2 x float> %v2 +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> [[SUB_I]] to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x float> %v3 to <8 x i8> +// NYI: [[TMP3:%.*]] = call <2 x float> @llvm.fma.v2f32(<2 x float> [[SUB_I]], <2 x float> %v3, <2 x float> %v1) +// NYI: ret <2 x float> [[TMP3]] +// float32x2_t test_vfms_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) { +// return vfms_f32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vfmsq_f32( +// NYI: [[SUB_I:%.*]] = fneg <4 x float> %v2 +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> [[SUB_I]] to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x float> %v3 to <16 x i8> +// NYI: [[TMP3:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> [[SUB_I]], <4 x float> %v3, <4 x float> %v1) +// NYI: ret <4 x float> [[TMP3]] +// float32x4_t test_vfmsq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) { +// return vfmsq_f32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vfmsq_f64( +// NYI: [[SUB_I:%.*]] = fneg <2 x double> %v2 +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> [[SUB_I]] to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x double> %v3 to <16 x i8> +// NYI: [[TMP3:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> [[SUB_I]], <2 x double> %v3, <2 x double> %v1) +// NYI: ret <2 x double> [[TMP3]] +// float64x2_t test_vfmsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { +// return vfmsq_f64(v1, v2, v3); +// } + +// NYI-LABEL: @test_vdivq_f64( +// NYI: [[DIV_I:%.*]] = fdiv <2 x double> %v1, %v2 +// NYI: ret <2 x double> [[DIV_I]] +// float64x2_t test_vdivq_f64(float64x2_t v1, float64x2_t v2) { +// return vdivq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vdivq_f32( +// NYI: [[DIV_I:%.*]] = fdiv <4 x float> %v1, %v2 +// NYI: ret <4 x float> [[DIV_I]] +// float32x4_t test_vdivq_f32(float32x4_t v1, float32x4_t v2) { +// return vdivq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vdiv_f32( +// NYI: [[DIV_I:%.*]] = fdiv <2 x float> %v1, %v2 +// NYI: ret <2 x float> [[DIV_I]] +// float32x2_t test_vdiv_f32(float32x2_t v1, float32x2_t v2) { +// return vdiv_f32(v1, v2); +// } + +// NYI-LABEL: @test_vaba_s8( +// NYI: [[VABD_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %v2, <8 x i8> %v3) +// NYI: [[ADD_I:%.*]] = add <8 x i8> %v1, [[VABD_I_I]] +// NYI: ret <8 x i8> [[ADD_I]] +// int8x8_t test_vaba_s8(int8x8_t v1, int8x8_t v2, int8x8_t v3) { +// return vaba_s8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vaba_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v3 to <8 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %v2, <4 x i16> %v3) +// NYI: [[ADD_I:%.*]] = add <4 x i16> %v1, [[VABD2_I_I]] +// NYI: ret <4 x i16> [[ADD_I]] +// int16x4_t test_vaba_s16(int16x4_t v1, int16x4_t v2, int16x4_t v3) { +// return vaba_s16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vaba_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v3 to <8 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %v2, <2 x i32> %v3) +// NYI: [[ADD_I:%.*]] = add <2 x i32> %v1, [[VABD2_I_I]] +// NYI: ret <2 x i32> [[ADD_I]] +// int32x2_t test_vaba_s32(int32x2_t v1, int32x2_t v2, int32x2_t v3) { +// return vaba_s32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vaba_u8( +// NYI: [[VABD_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %v2, <8 x i8> %v3) +// NYI: [[ADD_I:%.*]] = add <8 x i8> %v1, [[VABD_I_I]] +// NYI: ret <8 x i8> [[ADD_I]] +// uint8x8_t test_vaba_u8(uint8x8_t v1, uint8x8_t v2, uint8x8_t v3) { +// return vaba_u8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vaba_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v3 to <8 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %v2, <4 x i16> %v3) +// NYI: [[ADD_I:%.*]] = add <4 x i16> %v1, [[VABD2_I_I]] +// NYI: ret <4 x i16> [[ADD_I]] +// uint16x4_t test_vaba_u16(uint16x4_t v1, uint16x4_t v2, uint16x4_t v3) { +// return vaba_u16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vaba_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v3 to <8 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %v2, <2 x i32> %v3) +// NYI: [[ADD_I:%.*]] = add <2 x i32> %v1, [[VABD2_I_I]] +// NYI: ret <2 x i32> [[ADD_I]] +// uint32x2_t test_vaba_u32(uint32x2_t v1, uint32x2_t v2, uint32x2_t v3) { +// return vaba_u32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vabaq_s8( +// NYI: [[VABD_I_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> %v2, <16 x i8> %v3) +// NYI: [[ADD_I:%.*]] = add <16 x i8> %v1, [[VABD_I_I]] +// NYI: ret <16 x i8> [[ADD_I]] +// int8x16_t test_vabaq_s8(int8x16_t v1, int8x16_t v2, int8x16_t v3) { +// return vabaq_s8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vabaq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v3 to <16 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %v2, <8 x i16> %v3) +// NYI: [[ADD_I:%.*]] = add <8 x i16> %v1, [[VABD2_I_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// int16x8_t test_vabaq_s16(int16x8_t v1, int16x8_t v2, int16x8_t v3) { +// return vabaq_s16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vabaq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v3 to <16 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %v2, <4 x i32> %v3) +// NYI: [[ADD_I:%.*]] = add <4 x i32> %v1, [[VABD2_I_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// int32x4_t test_vabaq_s32(int32x4_t v1, int32x4_t v2, int32x4_t v3) { +// return vabaq_s32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vabaq_u8( +// NYI: [[VABD_I_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %v2, <16 x i8> %v3) +// NYI: [[ADD_I:%.*]] = add <16 x i8> %v1, [[VABD_I_I]] +// NYI: ret <16 x i8> [[ADD_I]] +// uint8x16_t test_vabaq_u8(uint8x16_t v1, uint8x16_t v2, uint8x16_t v3) { +// return vabaq_u8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vabaq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v3 to <16 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %v2, <8 x i16> %v3) +// NYI: [[ADD_I:%.*]] = add <8 x i16> %v1, [[VABD2_I_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// uint16x8_t test_vabaq_u16(uint16x8_t v1, uint16x8_t v2, uint16x8_t v3) { +// return vabaq_u16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vabaq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v3 to <16 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %v2, <4 x i32> %v3) +// NYI: [[ADD_I:%.*]] = add <4 x i32> %v1, [[VABD2_I_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// uint32x4_t test_vabaq_u32(uint32x4_t v1, uint32x4_t v2, uint32x4_t v3) { +// return vabaq_u32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vabd_s8( +// NYI: [[VABD_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %v1, <8 x i8> %v2) +// NYI: ret <8 x i8> [[VABD_I]] +// int8x8_t test_vabd_s8(int8x8_t v1, int8x8_t v2) { +// return vabd_s8(v1, v2); +// } + +// NYI-LABEL: @test_vabd_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[VABD2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %v1, <4 x i16> %v2) +// NYI: ret <4 x i16> [[VABD2_I]] +// int16x4_t test_vabd_s16(int16x4_t v1, int16x4_t v2) { +// return vabd_s16(v1, v2); +// } + +// NYI-LABEL: @test_vabd_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[VABD2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %v1, <2 x i32> %v2) +// NYI: ret <2 x i32> [[VABD2_I]] +// int32x2_t test_vabd_s32(int32x2_t v1, int32x2_t v2) { +// return vabd_s32(v1, v2); +// } + +// NYI-LABEL: @test_vabd_u8( +// NYI: [[VABD_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %v1, <8 x i8> %v2) +// NYI: ret <8 x i8> [[VABD_I]] +// uint8x8_t test_vabd_u8(uint8x8_t v1, uint8x8_t v2) { +// return vabd_u8(v1, v2); +// } + +// NYI-LABEL: @test_vabd_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[VABD2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %v1, <4 x i16> %v2) +// NYI: ret <4 x i16> [[VABD2_I]] +// uint16x4_t test_vabd_u16(uint16x4_t v1, uint16x4_t v2) { +// return vabd_u16(v1, v2); +// } + +// NYI-LABEL: @test_vabd_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[VABD2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %v1, <2 x i32> %v2) +// NYI: ret <2 x i32> [[VABD2_I]] +// uint32x2_t test_vabd_u32(uint32x2_t v1, uint32x2_t v2) { +// return vabd_u32(v1, v2); +// } + +// NYI-LABEL: @test_vabd_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8> +// NYI: [[VABD2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fabd.v2f32(<2 x float> %v1, <2 x float> %v2) +// NYI: ret <2 x float> [[VABD2_I]] +// float32x2_t test_vabd_f32(float32x2_t v1, float32x2_t v2) { +// return vabd_f32(v1, v2); +// } + +// NYI-LABEL: @test_vabdq_s8( +// NYI: [[VABD_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> %v1, <16 x i8> %v2) +// NYI: ret <16 x i8> [[VABD_I]] +// int8x16_t test_vabdq_s8(int8x16_t v1, int8x16_t v2) { +// return vabdq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vabdq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[VABD2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %v1, <8 x i16> %v2) +// NYI: ret <8 x i16> [[VABD2_I]] +// int16x8_t test_vabdq_s16(int16x8_t v1, int16x8_t v2) { +// return vabdq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vabdq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[VABD2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %v1, <4 x i32> %v2) +// NYI: ret <4 x i32> [[VABD2_I]] +// int32x4_t test_vabdq_s32(int32x4_t v1, int32x4_t v2) { +// return vabdq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vabdq_u8( +// NYI: [[VABD_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %v1, <16 x i8> %v2) +// NYI: ret <16 x i8> [[VABD_I]] +// uint8x16_t test_vabdq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vabdq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vabdq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[VABD2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %v1, <8 x i16> %v2) +// NYI: ret <8 x i16> [[VABD2_I]] +// uint16x8_t test_vabdq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vabdq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vabdq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[VABD2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %v1, <4 x i32> %v2) +// NYI: ret <4 x i32> [[VABD2_I]] +// uint32x4_t test_vabdq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vabdq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vabdq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8> +// NYI: [[VABD2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fabd.v4f32(<4 x float> %v1, <4 x float> %v2) +// NYI: ret <4 x float> [[VABD2_I]] +// float32x4_t test_vabdq_f32(float32x4_t v1, float32x4_t v2) { +// return vabdq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vabdq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8> +// NYI: [[VABD2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fabd.v2f64(<2 x double> %v1, <2 x double> %v2) +// NYI: ret <2 x double> [[VABD2_I]] +// float64x2_t test_vabdq_f64(float64x2_t v1, float64x2_t v2) { +// return vabdq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vbsl_s8( +// NYI: [[VBSL_I:%.*]] = and <8 x i8> %v1, %v2 +// NYI: [[TMP0:%.*]] = xor <8 x i8> %v1, +// NYI: [[VBSL1_I:%.*]] = and <8 x i8> [[TMP0]], %v3 +// NYI: [[VBSL2_I:%.*]] = or <8 x i8> [[VBSL_I]], [[VBSL1_I]] +// NYI: ret <8 x i8> [[VBSL2_I]] +// int8x8_t test_vbsl_s8(uint8x8_t v1, int8x8_t v2, int8x8_t v3) { +// return vbsl_s8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbsl_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> %v3 to <8 x i8> +// NYI: [[VBSL3_I:%.*]] = and <4 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <4 x i16> %v1, +// NYI: [[VBSL4_I:%.*]] = and <4 x i16> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <4 x i16> [[VBSL3_I]], [[VBSL4_I]] +// NYI: [[TMP4:%.*]] = bitcast <4 x i16> [[VBSL5_I]] to <8 x i8> +// NYI: ret <8 x i8> [[TMP4]] +// int8x8_t test_vbsl_s16(uint16x4_t v1, int16x4_t v2, int16x4_t v3) { +// return (int8x8_t)vbsl_s16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbsl_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> %v3 to <8 x i8> +// NYI: [[VBSL3_I:%.*]] = and <2 x i32> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <2 x i32> %v1, +// NYI: [[VBSL4_I:%.*]] = and <2 x i32> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <2 x i32> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <2 x i32> [[VBSL5_I]] +// int32x2_t test_vbsl_s32(uint32x2_t v1, int32x2_t v2, int32x2_t v3) { +// return vbsl_s32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbsl_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <1 x i64> %v3 to <8 x i8> +// NYI: [[VBSL3_I:%.*]] = and <1 x i64> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <1 x i64> %v1, +// NYI: [[VBSL4_I:%.*]] = and <1 x i64> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <1 x i64> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <1 x i64> [[VBSL5_I]] +// int64x1_t test_vbsl_s64(uint64x1_t v1, int64x1_t v2, int64x1_t v3) { +// return vbsl_s64(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbsl_u8( +// NYI: [[VBSL_I:%.*]] = and <8 x i8> %v1, %v2 +// NYI: [[TMP0:%.*]] = xor <8 x i8> %v1, +// NYI: [[VBSL1_I:%.*]] = and <8 x i8> [[TMP0]], %v3 +// NYI: [[VBSL2_I:%.*]] = or <8 x i8> [[VBSL_I]], [[VBSL1_I]] +// NYI: ret <8 x i8> [[VBSL2_I]] +// uint8x8_t test_vbsl_u8(uint8x8_t v1, uint8x8_t v2, uint8x8_t v3) { +// return vbsl_u8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbsl_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> %v3 to <8 x i8> +// NYI: [[VBSL3_I:%.*]] = and <4 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <4 x i16> %v1, +// NYI: [[VBSL4_I:%.*]] = and <4 x i16> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <4 x i16> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <4 x i16> [[VBSL5_I]] +// uint16x4_t test_vbsl_u16(uint16x4_t v1, uint16x4_t v2, uint16x4_t v3) { +// return vbsl_u16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbsl_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> %v3 to <8 x i8> +// NYI: [[VBSL3_I:%.*]] = and <2 x i32> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <2 x i32> %v1, +// NYI: [[VBSL4_I:%.*]] = and <2 x i32> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <2 x i32> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <2 x i32> [[VBSL5_I]] +// uint32x2_t test_vbsl_u32(uint32x2_t v1, uint32x2_t v2, uint32x2_t v3) { +// return vbsl_u32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbsl_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <1 x i64> %v3 to <8 x i8> +// NYI: [[VBSL3_I:%.*]] = and <1 x i64> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <1 x i64> %v1, +// NYI: [[VBSL4_I:%.*]] = and <1 x i64> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <1 x i64> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <1 x i64> [[VBSL5_I]] +// uint64x1_t test_vbsl_u64(uint64x1_t v1, uint64x1_t v2, uint64x1_t v3) { +// return vbsl_u64(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbsl_f32( +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x float> %v2 to <8 x i8> +// NYI: [[TMP3:%.*]] = bitcast <2 x float> %v3 to <8 x i8> +// NYI: [[VBSL1_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x i32> +// NYI: [[VBSL2_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x i32> +// NYI: [[VBSL3_I:%.*]] = and <2 x i32> %v1, [[VBSL1_I]] +// NYI: [[TMP4:%.*]] = xor <2 x i32> %v1, +// NYI: [[VBSL4_I:%.*]] = and <2 x i32> [[TMP4]], [[VBSL2_I]] +// NYI: [[VBSL5_I:%.*]] = or <2 x i32> [[VBSL3_I]], [[VBSL4_I]] +// NYI: [[TMP5:%.*]] = bitcast <2 x i32> [[VBSL5_I]] to <2 x float> +// NYI: ret <2 x float> [[TMP5]] +// float32x2_t test_vbsl_f32(uint32x2_t v1, float32x2_t v2, float32x2_t v3) { +// return vbsl_f32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbsl_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <1 x double> %v3 to <8 x i8> +// NYI: [[VBSL1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: [[VBSL2_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x i64> +// NYI: [[VBSL3_I:%.*]] = and <1 x i64> %v1, [[VBSL1_I]] +// NYI: [[TMP3:%.*]] = xor <1 x i64> %v1, +// NYI: [[VBSL4_I:%.*]] = and <1 x i64> [[TMP3]], [[VBSL2_I]] +// NYI: [[VBSL5_I:%.*]] = or <1 x i64> [[VBSL3_I]], [[VBSL4_I]] +// NYI: [[TMP4:%.*]] = bitcast <1 x i64> [[VBSL5_I]] to <1 x double> +// NYI: ret <1 x double> [[TMP4]] +// float64x1_t test_vbsl_f64(uint64x1_t v1, float64x1_t v2, float64x1_t v3) { +// return vbsl_f64(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbsl_p8( +// NYI: [[VBSL_I:%.*]] = and <8 x i8> %v1, %v2 +// NYI: [[TMP0:%.*]] = xor <8 x i8> %v1, +// NYI: [[VBSL1_I:%.*]] = and <8 x i8> [[TMP0]], %v3 +// NYI: [[VBSL2_I:%.*]] = or <8 x i8> [[VBSL_I]], [[VBSL1_I]] +// NYI: ret <8 x i8> [[VBSL2_I]] +// poly8x8_t test_vbsl_p8(uint8x8_t v1, poly8x8_t v2, poly8x8_t v3) { +// return vbsl_p8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbsl_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> %v3 to <8 x i8> +// NYI: [[VBSL3_I:%.*]] = and <4 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <4 x i16> %v1, +// NYI: [[VBSL4_I:%.*]] = and <4 x i16> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <4 x i16> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <4 x i16> [[VBSL5_I]] +// poly16x4_t test_vbsl_p16(uint16x4_t v1, poly16x4_t v2, poly16x4_t v3) { +// return vbsl_p16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_s8( +// NYI: [[VBSL_I:%.*]] = and <16 x i8> %v1, %v2 +// NYI: [[TMP0:%.*]] = xor <16 x i8> %v1, +// NYI: [[VBSL1_I:%.*]] = and <16 x i8> [[TMP0]], %v3 +// NYI: [[VBSL2_I:%.*]] = or <16 x i8> [[VBSL_I]], [[VBSL1_I]] +// NYI: ret <16 x i8> [[VBSL2_I]] +// int8x16_t test_vbslq_s8(uint8x16_t v1, int8x16_t v2, int8x16_t v3) { +// return vbslq_s8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <8 x i16> %v3 to <16 x i8> +// NYI: [[VBSL3_I:%.*]] = and <8 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <8 x i16> %v1, +// NYI: [[VBSL4_I:%.*]] = and <8 x i16> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <8 x i16> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <8 x i16> [[VBSL5_I]] +// int16x8_t test_vbslq_s16(uint16x8_t v1, int16x8_t v2, int16x8_t v3) { +// return vbslq_s16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x i32> %v3 to <16 x i8> +// NYI: [[VBSL3_I:%.*]] = and <4 x i32> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <4 x i32> %v1, +// NYI: [[VBSL4_I:%.*]] = and <4 x i32> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <4 x i32> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <4 x i32> [[VBSL5_I]] +// int32x4_t test_vbslq_s32(uint32x4_t v1, int32x4_t v2, int32x4_t v3) { +// return vbslq_s32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x i64> %v3 to <16 x i8> +// NYI: [[VBSL3_I:%.*]] = and <2 x i64> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <2 x i64> %v1, +// NYI: [[VBSL4_I:%.*]] = and <2 x i64> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <2 x i64> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <2 x i64> [[VBSL5_I]] +// int64x2_t test_vbslq_s64(uint64x2_t v1, int64x2_t v2, int64x2_t v3) { +// return vbslq_s64(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_u8( +// NYI: [[VBSL_I:%.*]] = and <16 x i8> %v1, %v2 +// NYI: [[TMP0:%.*]] = xor <16 x i8> %v1, +// NYI: [[VBSL1_I:%.*]] = and <16 x i8> [[TMP0]], %v3 +// NYI: [[VBSL2_I:%.*]] = or <16 x i8> [[VBSL_I]], [[VBSL1_I]] +// NYI: ret <16 x i8> [[VBSL2_I]] +// uint8x16_t test_vbslq_u8(uint8x16_t v1, uint8x16_t v2, uint8x16_t v3) { +// return vbslq_u8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <8 x i16> %v3 to <16 x i8> +// NYI: [[VBSL3_I:%.*]] = and <8 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <8 x i16> %v1, +// NYI: [[VBSL4_I:%.*]] = and <8 x i16> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <8 x i16> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <8 x i16> [[VBSL5_I]] +// uint16x8_t test_vbslq_u16(uint16x8_t v1, uint16x8_t v2, uint16x8_t v3) { +// return vbslq_u16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x i32> %v3 to <16 x i8> +// NYI: [[VBSL3_I:%.*]] = and <4 x i32> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <4 x i32> %v1, +// NYI: [[VBSL4_I:%.*]] = and <4 x i32> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <4 x i32> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <4 x i32> [[VBSL5_I]] +// int32x4_t test_vbslq_u32(uint32x4_t v1, int32x4_t v2, int32x4_t v3) { +// return vbslq_s32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x i64> %v3 to <16 x i8> +// NYI: [[VBSL3_I:%.*]] = and <2 x i64> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <2 x i64> %v1, +// NYI: [[VBSL4_I:%.*]] = and <2 x i64> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <2 x i64> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <2 x i64> [[VBSL5_I]] +// uint64x2_t test_vbslq_u64(uint64x2_t v1, uint64x2_t v2, uint64x2_t v3) { +// return vbslq_u64(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x float> %v3 to <16 x i8> +// NYI: [[VBSL1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> +// NYI: [[VBSL2_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x i32> +// NYI: [[VBSL3_I:%.*]] = and <4 x i32> %v1, [[VBSL1_I]] +// NYI: [[TMP3:%.*]] = xor <4 x i32> %v1, +// NYI: [[VBSL4_I:%.*]] = and <4 x i32> [[TMP3]], [[VBSL2_I]] +// NYI: [[VBSL5_I:%.*]] = or <4 x i32> [[VBSL3_I]], [[VBSL4_I]] +// NYI: [[TMP4:%.*]] = bitcast <4 x i32> [[VBSL5_I]] to <4 x float> +// NYI: ret <4 x float> [[TMP4]] +// float32x4_t test_vbslq_f32(uint32x4_t v1, float32x4_t v2, float32x4_t v3) { +// return vbslq_f32(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_p8( +// NYI: [[VBSL_I:%.*]] = and <16 x i8> %v1, %v2 +// NYI: [[TMP0:%.*]] = xor <16 x i8> %v1, +// NYI: [[VBSL1_I:%.*]] = and <16 x i8> [[TMP0]], %v3 +// NYI: [[VBSL2_I:%.*]] = or <16 x i8> [[VBSL_I]], [[VBSL1_I]] +// NYI: ret <16 x i8> [[VBSL2_I]] +// poly8x16_t test_vbslq_p8(uint8x16_t v1, poly8x16_t v2, poly8x16_t v3) { +// return vbslq_p8(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <8 x i16> %v3 to <16 x i8> +// NYI: [[VBSL3_I:%.*]] = and <8 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = xor <8 x i16> %v1, +// NYI: [[VBSL4_I:%.*]] = and <8 x i16> [[TMP3]], %v3 +// NYI: [[VBSL5_I:%.*]] = or <8 x i16> [[VBSL3_I]], [[VBSL4_I]] +// NYI: ret <8 x i16> [[VBSL5_I]] +// poly16x8_t test_vbslq_p16(uint16x8_t v1, poly16x8_t v2, poly16x8_t v3) { +// return vbslq_p16(v1, v2, v3); +// } + +// NYI-LABEL: @test_vbslq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x double> %v3 to <16 x i8> +// NYI: [[VBSL1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: [[VBSL2_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> +// NYI: [[VBSL3_I:%.*]] = and <2 x i64> %v1, [[VBSL1_I]] +// NYI: [[TMP3:%.*]] = xor <2 x i64> %v1, +// NYI: [[VBSL4_I:%.*]] = and <2 x i64> [[TMP3]], [[VBSL2_I]] +// NYI: [[VBSL5_I:%.*]] = or <2 x i64> [[VBSL3_I]], [[VBSL4_I]] +// NYI: [[TMP4:%.*]] = bitcast <2 x i64> [[VBSL5_I]] to <2 x double> +// NYI: ret <2 x double> [[TMP4]] +// float64x2_t test_vbslq_f64(uint64x2_t v1, float64x2_t v2, float64x2_t v3) { +// return vbslq_f64(v1, v2, v3); +// } + +// NYI-LABEL: @test_vrecps_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8> +// NYI: [[VRECPS_V2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float> %v1, <2 x float> %v2) +// NYI: ret <2 x float> [[VRECPS_V2_I]] +// float32x2_t test_vrecps_f32(float32x2_t v1, float32x2_t v2) { +// return vrecps_f32(v1, v2); +// } + +// NYI-LABEL: @test_vrecpsq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8> +// NYI: [[VRECPSQ_V2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float> %v1, <4 x float> %v2) +// NYI: [[VRECPSQ_V3_I:%.*]] = bitcast <4 x float> [[VRECPSQ_V2_I]] to <16 x i8> +// NYI: ret <4 x float> [[VRECPSQ_V2_I]] +// float32x4_t test_vrecpsq_f32(float32x4_t v1, float32x4_t v2) { +// return vrecpsq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vrecpsq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8> +// NYI: [[VRECPSQ_V2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double> %v1, <2 x double> %v2) +// NYI: [[VRECPSQ_V3_I:%.*]] = bitcast <2 x double> [[VRECPSQ_V2_I]] to <16 x i8> +// NYI: ret <2 x double> [[VRECPSQ_V2_I]] +// float64x2_t test_vrecpsq_f64(float64x2_t v1, float64x2_t v2) { +// return vrecpsq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vrsqrts_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8> +// NYI: [[VRSQRTS_V2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.frsqrts.v2f32(<2 x float> %v1, <2 x float> %v2) +// NYI: [[VRSQRTS_V3_I:%.*]] = bitcast <2 x float> [[VRSQRTS_V2_I]] to <8 x i8> +// NYI: ret <2 x float> [[VRSQRTS_V2_I]] +// float32x2_t test_vrsqrts_f32(float32x2_t v1, float32x2_t v2) { +// return vrsqrts_f32(v1, v2); +// } + +// NYI-LABEL: @test_vrsqrtsq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8> +// NYI: [[VRSQRTSQ_V2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.frsqrts.v4f32(<4 x float> %v1, <4 x float> %v2) +// NYI: [[VRSQRTSQ_V3_I:%.*]] = bitcast <4 x float> [[VRSQRTSQ_V2_I]] to <16 x i8> +// NYI: ret <4 x float> [[VRSQRTSQ_V2_I]] +// float32x4_t test_vrsqrtsq_f32(float32x4_t v1, float32x4_t v2) { +// return vrsqrtsq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vrsqrtsq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8> +// NYI: [[VRSQRTSQ_V2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.frsqrts.v2f64(<2 x double> %v1, <2 x double> %v2) +// NYI: [[VRSQRTSQ_V3_I:%.*]] = bitcast <2 x double> [[VRSQRTSQ_V2_I]] to <16 x i8> +// NYI: ret <2 x double> [[VRSQRTSQ_V2_I]] +// float64x2_t test_vrsqrtsq_f64(float64x2_t v1, float64x2_t v2) { +// return vrsqrtsq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vcage_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8> +// NYI: [[VCAGE_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.facge.v2i32.v2f32(<2 x float> %v1, <2 x float> %v2) +// NYI: ret <2 x i32> [[VCAGE_V2_I]] +// uint32x2_t test_vcage_f32(float32x2_t v1, float32x2_t v2) { +// return vcage_f32(v1, v2); +// } + +// NYI-LABEL: @test_vcage_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VCAGE_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.facge.v1i64.v1f64(<1 x double> %a, <1 x double> %b) +// NYI: ret <1 x i64> [[VCAGE_V2_I]] +// uint64x1_t test_vcage_f64(float64x1_t a, float64x1_t b) { +// return vcage_f64(a, b); +// } + +// NYI-LABEL: @test_vcageq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8> +// NYI: [[VCAGEQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.facge.v4i32.v4f32(<4 x float> %v1, <4 x float> %v2) +// NYI: ret <4 x i32> [[VCAGEQ_V2_I]] +// uint32x4_t test_vcageq_f32(float32x4_t v1, float32x4_t v2) { +// return vcageq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vcageq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8> +// NYI: [[VCAGEQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.facge.v2i64.v2f64(<2 x double> %v1, <2 x double> %v2) +// NYI: ret <2 x i64> [[VCAGEQ_V2_I]] +// uint64x2_t test_vcageq_f64(float64x2_t v1, float64x2_t v2) { +// return vcageq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vcagt_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8> +// NYI: [[VCAGT_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.facgt.v2i32.v2f32(<2 x float> %v1, <2 x float> %v2) +// NYI: ret <2 x i32> [[VCAGT_V2_I]] +// uint32x2_t test_vcagt_f32(float32x2_t v1, float32x2_t v2) { +// return vcagt_f32(v1, v2); +// } + +// NYI-LABEL: @test_vcagt_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VCAGT_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.facgt.v1i64.v1f64(<1 x double> %a, <1 x double> %b) +// NYI: ret <1 x i64> [[VCAGT_V2_I]] +// uint64x1_t test_vcagt_f64(float64x1_t a, float64x1_t b) { +// return vcagt_f64(a, b); +// } + +// NYI-LABEL: @test_vcagtq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8> +// NYI: [[VCAGTQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.facgt.v4i32.v4f32(<4 x float> %v1, <4 x float> %v2) +// NYI: ret <4 x i32> [[VCAGTQ_V2_I]] +// uint32x4_t test_vcagtq_f32(float32x4_t v1, float32x4_t v2) { +// return vcagtq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vcagtq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8> +// NYI: [[VCAGTQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.facgt.v2i64.v2f64(<2 x double> %v1, <2 x double> %v2) +// NYI: ret <2 x i64> [[VCAGTQ_V2_I]] +// uint64x2_t test_vcagtq_f64(float64x2_t v1, float64x2_t v2) { +// return vcagtq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vcale_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8> +// NYI: [[VCALE_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.facge.v2i32.v2f32(<2 x float> %v2, <2 x float> %v1) +// NYI: ret <2 x i32> [[VCALE_V2_I]] +// uint32x2_t test_vcale_f32(float32x2_t v1, float32x2_t v2) { +// return vcale_f32(v1, v2); +// // Using registers other than v0, v1 are possible, but would be odd. +// } + +// NYI-LABEL: @test_vcale_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VCALE_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.facge.v1i64.v1f64(<1 x double> %b, <1 x double> %a) +// NYI: ret <1 x i64> [[VCALE_V2_I]] +// uint64x1_t test_vcale_f64(float64x1_t a, float64x1_t b) { +// return vcale_f64(a, b); +// } + +// NYI-LABEL: @test_vcaleq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8> +// NYI: [[VCALEQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.facge.v4i32.v4f32(<4 x float> %v2, <4 x float> %v1) +// NYI: ret <4 x i32> [[VCALEQ_V2_I]] +// uint32x4_t test_vcaleq_f32(float32x4_t v1, float32x4_t v2) { +// return vcaleq_f32(v1, v2); +// // Using registers other than v0, v1 are possible, but would be odd. +// } + +// NYI-LABEL: @test_vcaleq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8> +// NYI: [[VCALEQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.facge.v2i64.v2f64(<2 x double> %v2, <2 x double> %v1) +// NYI: ret <2 x i64> [[VCALEQ_V2_I]] +// uint64x2_t test_vcaleq_f64(float64x2_t v1, float64x2_t v2) { +// return vcaleq_f64(v1, v2); +// // Using registers other than v0, v1 are possible, but would be odd. +// } + +// NYI-LABEL: @test_vcalt_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8> +// NYI: [[VCALT_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.facgt.v2i32.v2f32(<2 x float> %v2, <2 x float> %v1) +// NYI: ret <2 x i32> [[VCALT_V2_I]] +// uint32x2_t test_vcalt_f32(float32x2_t v1, float32x2_t v2) { +// return vcalt_f32(v1, v2); +// // Using registers other than v0, v1 are possible, but would be odd. +// } + +// NYI-LABEL: @test_vcalt_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VCALT_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.facgt.v1i64.v1f64(<1 x double> %b, <1 x double> %a) +// NYI: ret <1 x i64> [[VCALT_V2_I]] +// uint64x1_t test_vcalt_f64(float64x1_t a, float64x1_t b) { +// return vcalt_f64(a, b); +// } + +// NYI-LABEL: @test_vcaltq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8> +// NYI: [[VCALTQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.facgt.v4i32.v4f32(<4 x float> %v2, <4 x float> %v1) +// NYI: ret <4 x i32> [[VCALTQ_V2_I]] +// uint32x4_t test_vcaltq_f32(float32x4_t v1, float32x4_t v2) { +// return vcaltq_f32(v1, v2); +// // Using registers other than v0, v1 are possible, but would be odd. +// } + +// NYI-LABEL: @test_vcaltq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8> +// NYI: [[VCALTQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.facgt.v2i64.v2f64(<2 x double> %v2, <2 x double> %v1) +// NYI: ret <2 x i64> [[VCALTQ_V2_I]] +// uint64x2_t test_vcaltq_f64(float64x2_t v1, float64x2_t v2) { +// return vcaltq_f64(v1, v2); +// // Using registers other than v0, v1 are possible, but would be odd. +// } + +// NYI-LABEL: @test_vtst_s8( +// NYI: [[TMP0:%.*]] = and <8 x i8> %v1, %v2 +// NYI: [[TMP1:%.*]] = icmp ne <8 x i8> [[TMP0]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i8> +// NYI: ret <8 x i8> [[VTST_I]] +// uint8x8_t test_vtst_s8(int8x8_t v1, int8x8_t v2) { +// return vtst_s8(v1, v2); +// } + +// NYI-LABEL: @test_vtst_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = and <4 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <4 x i16> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i16> +// NYI: ret <4 x i16> [[VTST_I]] +// uint16x4_t test_vtst_s16(int16x4_t v1, int16x4_t v2) { +// return vtst_s16(v1, v2); +// } + +// NYI-LABEL: @test_vtst_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = and <2 x i32> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i32> +// NYI: ret <2 x i32> [[VTST_I]] +// uint32x2_t test_vtst_s32(int32x2_t v1, int32x2_t v2) { +// return vtst_s32(v1, v2); +// } + +// NYI-LABEL: @test_vtst_u8( +// NYI: [[TMP0:%.*]] = and <8 x i8> %v1, %v2 +// NYI: [[TMP1:%.*]] = icmp ne <8 x i8> [[TMP0]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i8> +// NYI: ret <8 x i8> [[VTST_I]] +// uint8x8_t test_vtst_u8(uint8x8_t v1, uint8x8_t v2) { +// return vtst_u8(v1, v2); +// } + +// NYI-LABEL: @test_vtst_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = and <4 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <4 x i16> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i16> +// NYI: ret <4 x i16> [[VTST_I]] +// uint16x4_t test_vtst_u16(uint16x4_t v1, uint16x4_t v2) { +// return vtst_u16(v1, v2); +// } + +// NYI-LABEL: @test_vtst_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = and <2 x i32> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i32> +// NYI: ret <2 x i32> [[VTST_I]] +// uint32x2_t test_vtst_u32(uint32x2_t v1, uint32x2_t v2) { +// return vtst_u32(v1, v2); +// } + +// NYI-LABEL: @test_vtstq_s8( +// NYI: [[TMP0:%.*]] = and <16 x i8> %v1, %v2 +// NYI: [[TMP1:%.*]] = icmp ne <16 x i8> [[TMP0]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <16 x i1> [[TMP1]] to <16 x i8> +// NYI: ret <16 x i8> [[VTST_I]] +// uint8x16_t test_vtstq_s8(int8x16_t v1, int8x16_t v2) { +// return vtstq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vtstq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = and <8 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16> +// NYI: ret <8 x i16> [[VTST_I]] +// uint16x8_t test_vtstq_s16(int16x8_t v1, int16x8_t v2) { +// return vtstq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vtstq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = and <4 x i32> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +// NYI: ret <4 x i32> [[VTST_I]] +// uint32x4_t test_vtstq_s32(int32x4_t v1, int32x4_t v2) { +// return vtstq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vtstq_u8( +// NYI: [[TMP0:%.*]] = and <16 x i8> %v1, %v2 +// NYI: [[TMP1:%.*]] = icmp ne <16 x i8> [[TMP0]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <16 x i1> [[TMP1]] to <16 x i8> +// NYI: ret <16 x i8> [[VTST_I]] +// uint8x16_t test_vtstq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vtstq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vtstq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = and <8 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16> +// NYI: ret <8 x i16> [[VTST_I]] +// uint16x8_t test_vtstq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vtstq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vtstq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = and <4 x i32> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +// NYI: ret <4 x i32> [[VTST_I]] +// uint32x4_t test_vtstq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vtstq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vtstq_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = and <2 x i64> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64> +// NYI: ret <2 x i64> [[VTST_I]] +// uint64x2_t test_vtstq_s64(int64x2_t v1, int64x2_t v2) { +// return vtstq_s64(v1, v2); +// } + +// NYI-LABEL: @test_vtstq_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = and <2 x i64> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64> +// NYI: ret <2 x i64> [[VTST_I]] +// uint64x2_t test_vtstq_u64(uint64x2_t v1, uint64x2_t v2) { +// return vtstq_u64(v1, v2); +// } + +// NYI-LABEL: @test_vtst_p8( +// NYI: [[TMP0:%.*]] = and <8 x i8> %v1, %v2 +// NYI: [[TMP1:%.*]] = icmp ne <8 x i8> [[TMP0]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i8> +// NYI: ret <8 x i8> [[VTST_I]] +// uint8x8_t test_vtst_p8(poly8x8_t v1, poly8x8_t v2) { +// return vtst_p8(v1, v2); +// } + +// NYI-LABEL: @test_vtst_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[TMP2:%.*]] = and <4 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <4 x i16> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i16> +// NYI: ret <4 x i16> [[VTST_I]] +// uint16x4_t test_vtst_p16(poly16x4_t v1, poly16x4_t v2) { +// return vtst_p16(v1, v2); +// } + +// NYI-LABEL: @test_vtstq_p8( +// NYI: [[TMP0:%.*]] = and <16 x i8> %v1, %v2 +// NYI: [[TMP1:%.*]] = icmp ne <16 x i8> [[TMP0]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <16 x i1> [[TMP1]] to <16 x i8> +// NYI: ret <16 x i8> [[VTST_I]] +// uint8x16_t test_vtstq_p8(poly8x16_t v1, poly8x16_t v2) { +// return vtstq_p8(v1, v2); +// } + +// NYI-LABEL: @test_vtstq_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[TMP2:%.*]] = and <8 x i16> %v1, %v2 +// NYI: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16> +// NYI: ret <8 x i16> [[VTST_I]] +// uint16x8_t test_vtstq_p16(poly16x8_t v1, poly16x8_t v2) { +// return vtstq_p16(v1, v2); +// } + +// NYI-LABEL: @test_vtst_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = and <1 x i64> %a, %b +// NYI: [[TMP3:%.*]] = icmp ne <1 x i64> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <1 x i1> [[TMP3]] to <1 x i64> +// NYI: ret <1 x i64> [[VTST_I]] +// uint64x1_t test_vtst_s64(int64x1_t a, int64x1_t b) { +// return vtst_s64(a, b); +// } + +// NYI-LABEL: @test_vtst_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = and <1 x i64> %a, %b +// NYI: [[TMP3:%.*]] = icmp ne <1 x i64> [[TMP2]], zeroinitializer +// NYI: [[VTST_I:%.*]] = sext <1 x i1> [[TMP3]] to <1 x i64> +// NYI: ret <1 x i64> [[VTST_I]] +// uint64x1_t test_vtst_u64(uint64x1_t a, uint64x1_t b) { +// return vtst_u64(a, b); +// } + +// NYI-LABEL: @test_vceq_s8( +// NYI: [[CMP_I:%.*]] = icmp eq <8 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i8> +// NYI: ret <8 x i8> [[SEXT_I]] +// uint8x8_t test_vceq_s8(int8x8_t v1, int8x8_t v2) { +// return vceq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vceq_s16( +// NYI: [[CMP_I:%.*]] = icmp eq <4 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i16> +// NYI: ret <4 x i16> [[SEXT_I]] +// uint16x4_t test_vceq_s16(int16x4_t v1, int16x4_t v2) { +// return vceq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vceq_s32( +// NYI: [[CMP_I:%.*]] = icmp eq <2 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vceq_s32(int32x2_t v1, int32x2_t v2) { +// return vceq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vceq_s64( +// NYI: [[CMP_I:%.*]] = icmp eq <1 x i64> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vceq_s64(int64x1_t a, int64x1_t b) { +// return vceq_s64(a, b); +// } + +// NYI-LABEL: @test_vceq_u64( +// NYI: [[CMP_I:%.*]] = icmp eq <1 x i64> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vceq_u64(uint64x1_t a, uint64x1_t b) { +// return vceq_u64(a, b); +// } + +// NYI-LABEL: @test_vceq_f32( +// NYI: [[CMP_I:%.*]] = fcmp oeq <2 x float> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vceq_f32(float32x2_t v1, float32x2_t v2) { +// return vceq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vceq_f64( +// NYI: [[CMP_I:%.*]] = fcmp oeq <1 x double> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vceq_f64(float64x1_t a, float64x1_t b) { +// return vceq_f64(a, b); +// } + +// NYI-LABEL: @test_vceq_u8( +// NYI: [[CMP_I:%.*]] = icmp eq <8 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i8> +// NYI: ret <8 x i8> [[SEXT_I]] +// uint8x8_t test_vceq_u8(uint8x8_t v1, uint8x8_t v2) { +// return vceq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vceq_u16( +// NYI: [[CMP_I:%.*]] = icmp eq <4 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i16> +// NYI: ret <4 x i16> [[SEXT_I]] +// uint16x4_t test_vceq_u16(uint16x4_t v1, uint16x4_t v2) { +// return vceq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vceq_u32( +// NYI: [[CMP_I:%.*]] = icmp eq <2 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vceq_u32(uint32x2_t v1, uint32x2_t v2) { +// return vceq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vceq_p8( +// NYI: [[CMP_I:%.*]] = icmp eq <8 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i8> +// NYI: ret <8 x i8> [[SEXT_I]] +// uint8x8_t test_vceq_p8(poly8x8_t v1, poly8x8_t v2) { +// return vceq_p8(v1, v2); +// } + +// NYI-LABEL: @test_vceqq_s8( +// NYI: [[CMP_I:%.*]] = icmp eq <16 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <16 x i1> [[CMP_I]] to <16 x i8> +// NYI: ret <16 x i8> [[SEXT_I]] +// uint8x16_t test_vceqq_s8(int8x16_t v1, int8x16_t v2) { +// return vceqq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vceqq_s16( +// NYI: [[CMP_I:%.*]] = icmp eq <8 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i16> +// NYI: ret <8 x i16> [[SEXT_I]] +// uint16x8_t test_vceqq_s16(int16x8_t v1, int16x8_t v2) { +// return vceqq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vceqq_s32( +// NYI: [[CMP_I:%.*]] = icmp eq <4 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vceqq_s32(int32x4_t v1, int32x4_t v2) { +// return vceqq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vceqq_f32( +// NYI: [[CMP_I:%.*]] = fcmp oeq <4 x float> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vceqq_f32(float32x4_t v1, float32x4_t v2) { +// return vceqq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vceqq_u8( +// NYI: [[CMP_I:%.*]] = icmp eq <16 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <16 x i1> [[CMP_I]] to <16 x i8> +// NYI: ret <16 x i8> [[SEXT_I]] +// uint8x16_t test_vceqq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vceqq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vceqq_u16( +// NYI: [[CMP_I:%.*]] = icmp eq <8 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i16> +// NYI: ret <8 x i16> [[SEXT_I]] +// uint16x8_t test_vceqq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vceqq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vceqq_u32( +// NYI: [[CMP_I:%.*]] = icmp eq <4 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vceqq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vceqq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vceqq_p8( +// NYI: [[CMP_I:%.*]] = icmp eq <16 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <16 x i1> [[CMP_I]] to <16 x i8> +// NYI: ret <16 x i8> [[SEXT_I]] +// uint8x16_t test_vceqq_p8(poly8x16_t v1, poly8x16_t v2) { +// return vceqq_p8(v1, v2); +// } + +// NYI-LABEL: @test_vceqq_s64( +// NYI: [[CMP_I:%.*]] = icmp eq <2 x i64> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vceqq_s64(int64x2_t v1, int64x2_t v2) { +// return vceqq_s64(v1, v2); +// } + +// NYI-LABEL: @test_vceqq_u64( +// NYI: [[CMP_I:%.*]] = icmp eq <2 x i64> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vceqq_u64(uint64x2_t v1, uint64x2_t v2) { +// return vceqq_u64(v1, v2); +// } + +// NYI-LABEL: @test_vceqq_f64( +// NYI: [[CMP_I:%.*]] = fcmp oeq <2 x double> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vceqq_f64(float64x2_t v1, float64x2_t v2) { +// return vceqq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vcge_s8( +// NYI: [[CMP_I:%.*]] = icmp sge <8 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i8> +// NYI: ret <8 x i8> [[SEXT_I]] +// uint8x8_t test_vcge_s8(int8x8_t v1, int8x8_t v2) { +// return vcge_s8(v1, v2); +// } + +// NYI-LABEL: @test_vcge_s16( +// NYI: [[CMP_I:%.*]] = icmp sge <4 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i16> +// NYI: ret <4 x i16> [[SEXT_I]] +// uint16x4_t test_vcge_s16(int16x4_t v1, int16x4_t v2) { +// return vcge_s16(v1, v2); +// } + +// NYI-LABEL: @test_vcge_s32( +// NYI: [[CMP_I:%.*]] = icmp sge <2 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vcge_s32(int32x2_t v1, int32x2_t v2) { +// return vcge_s32(v1, v2); +// } + +// NYI-LABEL: @test_vcge_s64( +// NYI: [[CMP_I:%.*]] = icmp sge <1 x i64> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vcge_s64(int64x1_t a, int64x1_t b) { +// return vcge_s64(a, b); +// } + +// NYI-LABEL: @test_vcge_u64( +// NYI: [[CMP_I:%.*]] = icmp uge <1 x i64> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vcge_u64(uint64x1_t a, uint64x1_t b) { +// return vcge_u64(a, b); +// } + +// NYI-LABEL: @test_vcge_f32( +// NYI: [[CMP_I:%.*]] = fcmp oge <2 x float> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vcge_f32(float32x2_t v1, float32x2_t v2) { +// return vcge_f32(v1, v2); +// } + +// NYI-LABEL: @test_vcge_f64( +// NYI: [[CMP_I:%.*]] = fcmp oge <1 x double> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vcge_f64(float64x1_t a, float64x1_t b) { +// return vcge_f64(a, b); +// } + +// NYI-LABEL: @test_vcge_u8( +// NYI: [[CMP_I:%.*]] = icmp uge <8 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i8> +// NYI: ret <8 x i8> [[SEXT_I]] +// uint8x8_t test_vcge_u8(uint8x8_t v1, uint8x8_t v2) { +// return vcge_u8(v1, v2); +// } + +// NYI-LABEL: @test_vcge_u16( +// NYI: [[CMP_I:%.*]] = icmp uge <4 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i16> +// NYI: ret <4 x i16> [[SEXT_I]] +// uint16x4_t test_vcge_u16(uint16x4_t v1, uint16x4_t v2) { +// return vcge_u16(v1, v2); +// } + +// NYI-LABEL: @test_vcge_u32( +// NYI: [[CMP_I:%.*]] = icmp uge <2 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vcge_u32(uint32x2_t v1, uint32x2_t v2) { +// return vcge_u32(v1, v2); +// } + +// NYI-LABEL: @test_vcgeq_s8( +// NYI: [[CMP_I:%.*]] = icmp sge <16 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <16 x i1> [[CMP_I]] to <16 x i8> +// NYI: ret <16 x i8> [[SEXT_I]] +// uint8x16_t test_vcgeq_s8(int8x16_t v1, int8x16_t v2) { +// return vcgeq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vcgeq_s16( +// NYI: [[CMP_I:%.*]] = icmp sge <8 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i16> +// NYI: ret <8 x i16> [[SEXT_I]] +// uint16x8_t test_vcgeq_s16(int16x8_t v1, int16x8_t v2) { +// return vcgeq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vcgeq_s32( +// NYI: [[CMP_I:%.*]] = icmp sge <4 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcgeq_s32(int32x4_t v1, int32x4_t v2) { +// return vcgeq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vcgeq_f32( +// NYI: [[CMP_I:%.*]] = fcmp oge <4 x float> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcgeq_f32(float32x4_t v1, float32x4_t v2) { +// return vcgeq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vcgeq_u8( +// NYI: [[CMP_I:%.*]] = icmp uge <16 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <16 x i1> [[CMP_I]] to <16 x i8> +// NYI: ret <16 x i8> [[SEXT_I]] +// uint8x16_t test_vcgeq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vcgeq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vcgeq_u16( +// NYI: [[CMP_I:%.*]] = icmp uge <8 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i16> +// NYI: ret <8 x i16> [[SEXT_I]] +// uint16x8_t test_vcgeq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vcgeq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vcgeq_u32( +// NYI: [[CMP_I:%.*]] = icmp uge <4 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcgeq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vcgeq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vcgeq_s64( +// NYI: [[CMP_I:%.*]] = icmp sge <2 x i64> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcgeq_s64(int64x2_t v1, int64x2_t v2) { +// return vcgeq_s64(v1, v2); +// } + +// NYI-LABEL: @test_vcgeq_u64( +// NYI: [[CMP_I:%.*]] = icmp uge <2 x i64> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcgeq_u64(uint64x2_t v1, uint64x2_t v2) { +// return vcgeq_u64(v1, v2); +// } + +// NYI-LABEL: @test_vcgeq_f64( +// NYI: [[CMP_I:%.*]] = fcmp oge <2 x double> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcgeq_f64(float64x2_t v1, float64x2_t v2) { +// return vcgeq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vcle_s8( +// NYI: [[CMP_I:%.*]] = icmp sle <8 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i8> +// NYI: ret <8 x i8> [[SEXT_I]] +// Notes about vcle: +// LE condition predicate implemented as GE, so check reversed operands. +// Using registers other than v0, v1 are possible, but would be odd. +// uint8x8_t test_vcle_s8(int8x8_t v1, int8x8_t v2) { +// return vcle_s8(v1, v2); +// } + +// NYI-LABEL: @test_vcle_s16( +// NYI: [[CMP_I:%.*]] = icmp sle <4 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i16> +// NYI: ret <4 x i16> [[SEXT_I]] +// uint16x4_t test_vcle_s16(int16x4_t v1, int16x4_t v2) { +// return vcle_s16(v1, v2); +// } + +// NYI-LABEL: @test_vcle_s32( +// NYI: [[CMP_I:%.*]] = icmp sle <2 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vcle_s32(int32x2_t v1, int32x2_t v2) { +// return vcle_s32(v1, v2); +// } + +// NYI-LABEL: @test_vcle_s64( +// NYI: [[CMP_I:%.*]] = icmp sle <1 x i64> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vcle_s64(int64x1_t a, int64x1_t b) { +// return vcle_s64(a, b); +// } + +// NYI-LABEL: @test_vcle_u64( +// NYI: [[CMP_I:%.*]] = icmp ule <1 x i64> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vcle_u64(uint64x1_t a, uint64x1_t b) { +// return vcle_u64(a, b); +// } + +// NYI-LABEL: @test_vcle_f32( +// NYI: [[CMP_I:%.*]] = fcmp ole <2 x float> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vcle_f32(float32x2_t v1, float32x2_t v2) { +// return vcle_f32(v1, v2); +// } + +// NYI-LABEL: @test_vcle_f64( +// NYI: [[CMP_I:%.*]] = fcmp ole <1 x double> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vcle_f64(float64x1_t a, float64x1_t b) { +// return vcle_f64(a, b); +// } + +// NYI-LABEL: @test_vcle_u8( +// NYI: [[CMP_I:%.*]] = icmp ule <8 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i8> +// NYI: ret <8 x i8> [[SEXT_I]] +// uint8x8_t test_vcle_u8(uint8x8_t v1, uint8x8_t v2) { +// return vcle_u8(v1, v2); +// } + +// NYI-LABEL: @test_vcle_u16( +// NYI: [[CMP_I:%.*]] = icmp ule <4 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i16> +// NYI: ret <4 x i16> [[SEXT_I]] +// uint16x4_t test_vcle_u16(uint16x4_t v1, uint16x4_t v2) { +// return vcle_u16(v1, v2); +// } + +// NYI-LABEL: @test_vcle_u32( +// NYI: [[CMP_I:%.*]] = icmp ule <2 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vcle_u32(uint32x2_t v1, uint32x2_t v2) { +// return vcle_u32(v1, v2); +// } + +// NYI-LABEL: @test_vcleq_s8( +// NYI: [[CMP_I:%.*]] = icmp sle <16 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <16 x i1> [[CMP_I]] to <16 x i8> +// NYI: ret <16 x i8> [[SEXT_I]] +// uint8x16_t test_vcleq_s8(int8x16_t v1, int8x16_t v2) { +// return vcleq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vcleq_s16( +// NYI: [[CMP_I:%.*]] = icmp sle <8 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i16> +// NYI: ret <8 x i16> [[SEXT_I]] +// uint16x8_t test_vcleq_s16(int16x8_t v1, int16x8_t v2) { +// return vcleq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vcleq_s32( +// NYI: [[CMP_I:%.*]] = icmp sle <4 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcleq_s32(int32x4_t v1, int32x4_t v2) { +// return vcleq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vcleq_f32( +// NYI: [[CMP_I:%.*]] = fcmp ole <4 x float> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcleq_f32(float32x4_t v1, float32x4_t v2) { +// return vcleq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vcleq_u8( +// NYI: [[CMP_I:%.*]] = icmp ule <16 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <16 x i1> [[CMP_I]] to <16 x i8> +// NYI: ret <16 x i8> [[SEXT_I]] +// uint8x16_t test_vcleq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vcleq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vcleq_u16( +// NYI: [[CMP_I:%.*]] = icmp ule <8 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i16> +// NYI: ret <8 x i16> [[SEXT_I]] +// uint16x8_t test_vcleq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vcleq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vcleq_u32( +// NYI: [[CMP_I:%.*]] = icmp ule <4 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcleq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vcleq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vcleq_s64( +// NYI: [[CMP_I:%.*]] = icmp sle <2 x i64> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcleq_s64(int64x2_t v1, int64x2_t v2) { +// return vcleq_s64(v1, v2); +// } + +// NYI-LABEL: @test_vcleq_u64( +// NYI: [[CMP_I:%.*]] = icmp ule <2 x i64> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcleq_u64(uint64x2_t v1, uint64x2_t v2) { +// return vcleq_u64(v1, v2); +// } + +// NYI-LABEL: @test_vcleq_f64( +// NYI: [[CMP_I:%.*]] = fcmp ole <2 x double> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcleq_f64(float64x2_t v1, float64x2_t v2) { +// return vcleq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vcgt_s8( +// NYI: [[CMP_I:%.*]] = icmp sgt <8 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i8> +// NYI: ret <8 x i8> [[SEXT_I]] +// uint8x8_t test_vcgt_s8(int8x8_t v1, int8x8_t v2) { +// return vcgt_s8(v1, v2); +// } + +// NYI-LABEL: @test_vcgt_s16( +// NYI: [[CMP_I:%.*]] = icmp sgt <4 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i16> +// NYI: ret <4 x i16> [[SEXT_I]] +// uint16x4_t test_vcgt_s16(int16x4_t v1, int16x4_t v2) { +// return vcgt_s16(v1, v2); +// } + +// NYI-LABEL: @test_vcgt_s32( +// NYI: [[CMP_I:%.*]] = icmp sgt <2 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vcgt_s32(int32x2_t v1, int32x2_t v2) { +// return vcgt_s32(v1, v2); +// } + +// NYI-LABEL: @test_vcgt_s64( +// NYI: [[CMP_I:%.*]] = icmp sgt <1 x i64> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vcgt_s64(int64x1_t a, int64x1_t b) { +// return vcgt_s64(a, b); +// } + +// NYI-LABEL: @test_vcgt_u64( +// NYI: [[CMP_I:%.*]] = icmp ugt <1 x i64> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vcgt_u64(uint64x1_t a, uint64x1_t b) { +// return vcgt_u64(a, b); +// } + +// NYI-LABEL: @test_vcgt_f32( +// NYI: [[CMP_I:%.*]] = fcmp ogt <2 x float> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vcgt_f32(float32x2_t v1, float32x2_t v2) { +// return vcgt_f32(v1, v2); +// } + +// NYI-LABEL: @test_vcgt_f64( +// NYI: [[CMP_I:%.*]] = fcmp ogt <1 x double> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vcgt_f64(float64x1_t a, float64x1_t b) { +// return vcgt_f64(a, b); +// } + +// NYI-LABEL: @test_vcgt_u8( +// NYI: [[CMP_I:%.*]] = icmp ugt <8 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i8> +// NYI: ret <8 x i8> [[SEXT_I]] +// uint8x8_t test_vcgt_u8(uint8x8_t v1, uint8x8_t v2) { +// return vcgt_u8(v1, v2); +// } + +// NYI-LABEL: @test_vcgt_u16( +// NYI: [[CMP_I:%.*]] = icmp ugt <4 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i16> +// NYI: ret <4 x i16> [[SEXT_I]] +// uint16x4_t test_vcgt_u16(uint16x4_t v1, uint16x4_t v2) { +// return vcgt_u16(v1, v2); +// } + +// NYI-LABEL: @test_vcgt_u32( +// NYI: [[CMP_I:%.*]] = icmp ugt <2 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vcgt_u32(uint32x2_t v1, uint32x2_t v2) { +// return vcgt_u32(v1, v2); +// } + +// NYI-LABEL: @test_vcgtq_s8( +// NYI: [[CMP_I:%.*]] = icmp sgt <16 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <16 x i1> [[CMP_I]] to <16 x i8> +// NYI: ret <16 x i8> [[SEXT_I]] +// uint8x16_t test_vcgtq_s8(int8x16_t v1, int8x16_t v2) { +// return vcgtq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vcgtq_s16( +// NYI: [[CMP_I:%.*]] = icmp sgt <8 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i16> +// NYI: ret <8 x i16> [[SEXT_I]] +// uint16x8_t test_vcgtq_s16(int16x8_t v1, int16x8_t v2) { +// return vcgtq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vcgtq_s32( +// NYI: [[CMP_I:%.*]] = icmp sgt <4 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcgtq_s32(int32x4_t v1, int32x4_t v2) { +// return vcgtq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vcgtq_f32( +// NYI: [[CMP_I:%.*]] = fcmp ogt <4 x float> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcgtq_f32(float32x4_t v1, float32x4_t v2) { +// return vcgtq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vcgtq_u8( +// NYI: [[CMP_I:%.*]] = icmp ugt <16 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <16 x i1> [[CMP_I]] to <16 x i8> +// NYI: ret <16 x i8> [[SEXT_I]] +// uint8x16_t test_vcgtq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vcgtq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vcgtq_u16( +// NYI: [[CMP_I:%.*]] = icmp ugt <8 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i16> +// NYI: ret <8 x i16> [[SEXT_I]] +// uint16x8_t test_vcgtq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vcgtq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vcgtq_u32( +// NYI: [[CMP_I:%.*]] = icmp ugt <4 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcgtq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vcgtq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vcgtq_s64( +// NYI: [[CMP_I:%.*]] = icmp sgt <2 x i64> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcgtq_s64(int64x2_t v1, int64x2_t v2) { +// return vcgtq_s64(v1, v2); +// } + +// NYI-LABEL: @test_vcgtq_u64( +// NYI: [[CMP_I:%.*]] = icmp ugt <2 x i64> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcgtq_u64(uint64x2_t v1, uint64x2_t v2) { +// return vcgtq_u64(v1, v2); +// } + +// NYI-LABEL: @test_vcgtq_f64( +// NYI: [[CMP_I:%.*]] = fcmp ogt <2 x double> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcgtq_f64(float64x2_t v1, float64x2_t v2) { +// return vcgtq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vclt_s8( +// NYI: [[CMP_I:%.*]] = icmp slt <8 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i8> +// NYI: ret <8 x i8> [[SEXT_I]] +// Notes about vclt: +// LT condition predicate implemented as GT, so check reversed operands. +// Using registers other than v0, v1 are possible, but would be odd. +// uint8x8_t test_vclt_s8(int8x8_t v1, int8x8_t v2) { +// return vclt_s8(v1, v2); +// } + +// NYI-LABEL: @test_vclt_s16( +// NYI: [[CMP_I:%.*]] = icmp slt <4 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i16> +// NYI: ret <4 x i16> [[SEXT_I]] +// uint16x4_t test_vclt_s16(int16x4_t v1, int16x4_t v2) { +// return vclt_s16(v1, v2); +// } + +// NYI-LABEL: @test_vclt_s32( +// NYI: [[CMP_I:%.*]] = icmp slt <2 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vclt_s32(int32x2_t v1, int32x2_t v2) { +// return vclt_s32(v1, v2); +// } + +// NYI-LABEL: @test_vclt_s64( +// NYI: [[CMP_I:%.*]] = icmp slt <1 x i64> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vclt_s64(int64x1_t a, int64x1_t b) { +// return vclt_s64(a, b); +// } + +// NYI-LABEL: @test_vclt_u64( +// NYI: [[CMP_I:%.*]] = icmp ult <1 x i64> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vclt_u64(uint64x1_t a, uint64x1_t b) { +// return vclt_u64(a, b); +// } + +// NYI-LABEL: @test_vclt_f32( +// NYI: [[CMP_I:%.*]] = fcmp olt <2 x float> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vclt_f32(float32x2_t v1, float32x2_t v2) { +// return vclt_f32(v1, v2); +// } + +// NYI-LABEL: @test_vclt_f64( +// NYI: [[CMP_I:%.*]] = fcmp olt <1 x double> %a, %b +// NYI: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> +// NYI: ret <1 x i64> [[SEXT_I]] +// uint64x1_t test_vclt_f64(float64x1_t a, float64x1_t b) { +// return vclt_f64(a, b); +// } + +// NYI-LABEL: @test_vclt_u8( +// NYI: [[CMP_I:%.*]] = icmp ult <8 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i8> +// NYI: ret <8 x i8> [[SEXT_I]] +// uint8x8_t test_vclt_u8(uint8x8_t v1, uint8x8_t v2) { +// return vclt_u8(v1, v2); +// } + +// NYI-LABEL: @test_vclt_u16( +// NYI: [[CMP_I:%.*]] = icmp ult <4 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i16> +// NYI: ret <4 x i16> [[SEXT_I]] +// uint16x4_t test_vclt_u16(uint16x4_t v1, uint16x4_t v2) { +// return vclt_u16(v1, v2); +// } + +// NYI-LABEL: @test_vclt_u32( +// NYI: [[CMP_I:%.*]] = icmp ult <2 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> +// NYI: ret <2 x i32> [[SEXT_I]] +// uint32x2_t test_vclt_u32(uint32x2_t v1, uint32x2_t v2) { +// return vclt_u32(v1, v2); +// } + +// NYI-LABEL: @test_vcltq_s8( +// NYI: [[CMP_I:%.*]] = icmp slt <16 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <16 x i1> [[CMP_I]] to <16 x i8> +// NYI: ret <16 x i8> [[SEXT_I]] +// uint8x16_t test_vcltq_s8(int8x16_t v1, int8x16_t v2) { +// return vcltq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vcltq_s16( +// NYI: [[CMP_I:%.*]] = icmp slt <8 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i16> +// NYI: ret <8 x i16> [[SEXT_I]] +// uint16x8_t test_vcltq_s16(int16x8_t v1, int16x8_t v2) { +// return vcltq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vcltq_s32( +// NYI: [[CMP_I:%.*]] = icmp slt <4 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcltq_s32(int32x4_t v1, int32x4_t v2) { +// return vcltq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vcltq_f32( +// NYI: [[CMP_I:%.*]] = fcmp olt <4 x float> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcltq_f32(float32x4_t v1, float32x4_t v2) { +// return vcltq_f32(v1, v2); +// } + +// NYI-LABEL: @test_vcltq_u8( +// NYI: [[CMP_I:%.*]] = icmp ult <16 x i8> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <16 x i1> [[CMP_I]] to <16 x i8> +// NYI: ret <16 x i8> [[SEXT_I]] +// uint8x16_t test_vcltq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vcltq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vcltq_u16( +// NYI: [[CMP_I:%.*]] = icmp ult <8 x i16> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <8 x i1> [[CMP_I]] to <8 x i16> +// NYI: ret <8 x i16> [[SEXT_I]] +// uint16x8_t test_vcltq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vcltq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vcltq_u32( +// NYI: [[CMP_I:%.*]] = icmp ult <4 x i32> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> +// NYI: ret <4 x i32> [[SEXT_I]] +// uint32x4_t test_vcltq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vcltq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vcltq_s64( +// NYI: [[CMP_I:%.*]] = icmp slt <2 x i64> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcltq_s64(int64x2_t v1, int64x2_t v2) { +// return vcltq_s64(v1, v2); +// } + +// NYI-LABEL: @test_vcltq_u64( +// NYI: [[CMP_I:%.*]] = icmp ult <2 x i64> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcltq_u64(uint64x2_t v1, uint64x2_t v2) { +// return vcltq_u64(v1, v2); +// } + +// NYI-LABEL: @test_vcltq_f64( +// NYI: [[CMP_I:%.*]] = fcmp olt <2 x double> %v1, %v2 +// NYI: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> +// NYI: ret <2 x i64> [[SEXT_I]] +// uint64x2_t test_vcltq_f64(float64x2_t v1, float64x2_t v2) { +// return vcltq_f64(v1, v2); +// } + +// NYI-LABEL: @test_vhadd_s8( +// NYI: [[VHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %v1, <8 x i8> %v2) +// NYI: ret <8 x i8> [[VHADD_V_I]] +// int8x8_t test_vhadd_s8(int8x8_t v1, int8x8_t v2) { +// return vhadd_s8(v1, v2); +// } + +// NYI-LABEL: @test_vhadd_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[VHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %v1, <4 x i16> %v2) +// NYI: [[VHADD_V3_I:%.*]] = bitcast <4 x i16> [[VHADD_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VHADD_V2_I]] +// int16x4_t test_vhadd_s16(int16x4_t v1, int16x4_t v2) { +// return vhadd_s16(v1, v2); +// } + +// NYI-LABEL: @test_vhadd_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[VHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %v1, <2 x i32> %v2) +// NYI: [[VHADD_V3_I:%.*]] = bitcast <2 x i32> [[VHADD_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VHADD_V2_I]] +// int32x2_t test_vhadd_s32(int32x2_t v1, int32x2_t v2) { +// return vhadd_s32(v1, v2); +// } + +// NYI-LABEL: @test_vhadd_u8( +// NYI: [[VHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %v1, <8 x i8> %v2) +// NYI: ret <8 x i8> [[VHADD_V_I]] +// uint8x8_t test_vhadd_u8(uint8x8_t v1, uint8x8_t v2) { +// return vhadd_u8(v1, v2); +// } + +// NYI-LABEL: @test_vhadd_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[VHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> %v1, <4 x i16> %v2) +// NYI: [[VHADD_V3_I:%.*]] = bitcast <4 x i16> [[VHADD_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VHADD_V2_I]] +// uint16x4_t test_vhadd_u16(uint16x4_t v1, uint16x4_t v2) { +// return vhadd_u16(v1, v2); +// } + +// NYI-LABEL: @test_vhadd_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[VHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32> %v1, <2 x i32> %v2) +// NYI: [[VHADD_V3_I:%.*]] = bitcast <2 x i32> [[VHADD_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VHADD_V2_I]] +// uint32x2_t test_vhadd_u32(uint32x2_t v1, uint32x2_t v2) { +// return vhadd_u32(v1, v2); +// } + +// NYI-LABEL: @test_vhaddq_s8( +// NYI: [[VHADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %v1, <16 x i8> %v2) +// NYI: ret <16 x i8> [[VHADDQ_V_I]] +// int8x16_t test_vhaddq_s8(int8x16_t v1, int8x16_t v2) { +// return vhaddq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vhaddq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[VHADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %v1, <8 x i16> %v2) +// NYI: [[VHADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VHADDQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VHADDQ_V2_I]] +// int16x8_t test_vhaddq_s16(int16x8_t v1, int16x8_t v2) { +// return vhaddq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vhaddq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[VHADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> %v1, <4 x i32> %v2) +// NYI: [[VHADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VHADDQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VHADDQ_V2_I]] +// int32x4_t test_vhaddq_s32(int32x4_t v1, int32x4_t v2) { +// return vhaddq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vhaddq_u8( +// NYI: [[VHADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %v1, <16 x i8> %v2) +// NYI: ret <16 x i8> [[VHADDQ_V_I]] +// uint8x16_t test_vhaddq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vhaddq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vhaddq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[VHADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %v1, <8 x i16> %v2) +// NYI: [[VHADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VHADDQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VHADDQ_V2_I]] +// uint16x8_t test_vhaddq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vhaddq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vhaddq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[VHADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32> %v1, <4 x i32> %v2) +// NYI: [[VHADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VHADDQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VHADDQ_V2_I]] +// uint32x4_t test_vhaddq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vhaddq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vhsub_s8( +// NYI: [[VHSUB_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.shsub.v8i8(<8 x i8> %v1, <8 x i8> %v2) +// NYI: ret <8 x i8> [[VHSUB_V_I]] +// int8x8_t test_vhsub_s8(int8x8_t v1, int8x8_t v2) { +// return vhsub_s8(v1, v2); +// } + +// NYI-LABEL: @test_vhsub_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[VHSUB_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.shsub.v4i16(<4 x i16> %v1, <4 x i16> %v2) +// NYI: [[VHSUB_V3_I:%.*]] = bitcast <4 x i16> [[VHSUB_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VHSUB_V2_I]] +// int16x4_t test_vhsub_s16(int16x4_t v1, int16x4_t v2) { +// return vhsub_s16(v1, v2); +// } + +// NYI-LABEL: @test_vhsub_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[VHSUB_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.shsub.v2i32(<2 x i32> %v1, <2 x i32> %v2) +// NYI: [[VHSUB_V3_I:%.*]] = bitcast <2 x i32> [[VHSUB_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VHSUB_V2_I]] +// int32x2_t test_vhsub_s32(int32x2_t v1, int32x2_t v2) { +// return vhsub_s32(v1, v2); +// } + +// NYI-LABEL: @test_vhsub_u8( +// NYI: [[VHSUB_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uhsub.v8i8(<8 x i8> %v1, <8 x i8> %v2) +// NYI: ret <8 x i8> [[VHSUB_V_I]] +// uint8x8_t test_vhsub_u8(uint8x8_t v1, uint8x8_t v2) { +// return vhsub_u8(v1, v2); +// } + +// NYI-LABEL: @test_vhsub_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[VHSUB_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uhsub.v4i16(<4 x i16> %v1, <4 x i16> %v2) +// NYI: [[VHSUB_V3_I:%.*]] = bitcast <4 x i16> [[VHSUB_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VHSUB_V2_I]] +// uint16x4_t test_vhsub_u16(uint16x4_t v1, uint16x4_t v2) { +// return vhsub_u16(v1, v2); +// } + +// NYI-LABEL: @test_vhsub_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[VHSUB_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uhsub.v2i32(<2 x i32> %v1, <2 x i32> %v2) +// NYI: [[VHSUB_V3_I:%.*]] = bitcast <2 x i32> [[VHSUB_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VHSUB_V2_I]] +// uint32x2_t test_vhsub_u32(uint32x2_t v1, uint32x2_t v2) { +// return vhsub_u32(v1, v2); +// } + +// NYI-LABEL: @test_vhsubq_s8( +// NYI: [[VHSUBQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.shsub.v16i8(<16 x i8> %v1, <16 x i8> %v2) +// NYI: ret <16 x i8> [[VHSUBQ_V_I]] +// int8x16_t test_vhsubq_s8(int8x16_t v1, int8x16_t v2) { +// return vhsubq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vhsubq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[VHSUBQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.shsub.v8i16(<8 x i16> %v1, <8 x i16> %v2) +// NYI: [[VHSUBQ_V3_I:%.*]] = bitcast <8 x i16> [[VHSUBQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VHSUBQ_V2_I]] +// int16x8_t test_vhsubq_s16(int16x8_t v1, int16x8_t v2) { +// return vhsubq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vhsubq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[VHSUBQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.shsub.v4i32(<4 x i32> %v1, <4 x i32> %v2) +// NYI: [[VHSUBQ_V3_I:%.*]] = bitcast <4 x i32> [[VHSUBQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VHSUBQ_V2_I]] +// int32x4_t test_vhsubq_s32(int32x4_t v1, int32x4_t v2) { +// return vhsubq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vhsubq_u8( +// NYI: [[VHSUBQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uhsub.v16i8(<16 x i8> %v1, <16 x i8> %v2) +// NYI: ret <16 x i8> [[VHSUBQ_V_I]] +// uint8x16_t test_vhsubq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vhsubq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vhsubq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[VHSUBQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uhsub.v8i16(<8 x i16> %v1, <8 x i16> %v2) +// NYI: [[VHSUBQ_V3_I:%.*]] = bitcast <8 x i16> [[VHSUBQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VHSUBQ_V2_I]] +// uint16x8_t test_vhsubq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vhsubq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vhsubq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[VHSUBQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uhsub.v4i32(<4 x i32> %v1, <4 x i32> %v2) +// NYI: [[VHSUBQ_V3_I:%.*]] = bitcast <4 x i32> [[VHSUBQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VHSUBQ_V2_I]] +// uint32x4_t test_vhsubq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vhsubq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vrhadd_s8( +// NYI: [[VRHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %v1, <8 x i8> %v2) +// NYI: ret <8 x i8> [[VRHADD_V_I]] +// int8x8_t test_vrhadd_s8(int8x8_t v1, int8x8_t v2) { +// return vrhadd_s8(v1, v2); +// } + +// NYI-LABEL: @test_vrhadd_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[VRHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> %v1, <4 x i16> %v2) +// NYI: [[VRHADD_V3_I:%.*]] = bitcast <4 x i16> [[VRHADD_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VRHADD_V2_I]] +// int16x4_t test_vrhadd_s16(int16x4_t v1, int16x4_t v2) { +// return vrhadd_s16(v1, v2); +// } + +// NYI-LABEL: @test_vrhadd_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[VRHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> %v1, <2 x i32> %v2) +// NYI: [[VRHADD_V3_I:%.*]] = bitcast <2 x i32> [[VRHADD_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VRHADD_V2_I]] +// int32x2_t test_vrhadd_s32(int32x2_t v1, int32x2_t v2) { +// return vrhadd_s32(v1, v2); +// } + +// NYI-LABEL: @test_vrhadd_u8( +// NYI: [[VRHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> %v1, <8 x i8> %v2) +// NYI: ret <8 x i8> [[VRHADD_V_I]] +// uint8x8_t test_vrhadd_u8(uint8x8_t v1, uint8x8_t v2) { +// return vrhadd_u8(v1, v2); +// } + +// NYI-LABEL: @test_vrhadd_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> +// NYI: [[VRHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16> %v1, <4 x i16> %v2) +// NYI: [[VRHADD_V3_I:%.*]] = bitcast <4 x i16> [[VRHADD_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VRHADD_V2_I]] +// uint16x4_t test_vrhadd_u16(uint16x4_t v1, uint16x4_t v2) { +// return vrhadd_u16(v1, v2); +// } + +// NYI-LABEL: @test_vrhadd_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> +// NYI: [[VRHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32> %v1, <2 x i32> %v2) +// NYI: [[VRHADD_V3_I:%.*]] = bitcast <2 x i32> [[VRHADD_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VRHADD_V2_I]] +// uint32x2_t test_vrhadd_u32(uint32x2_t v1, uint32x2_t v2) { +// return vrhadd_u32(v1, v2); +// } + +// NYI-LABEL: @test_vrhaddq_s8( +// NYI: [[VRHADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %v1, <16 x i8> %v2) +// NYI: ret <16 x i8> [[VRHADDQ_V_I]] +// int8x16_t test_vrhaddq_s8(int8x16_t v1, int8x16_t v2) { +// return vrhaddq_s8(v1, v2); +// } + +// NYI-LABEL: @test_vrhaddq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[VRHADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %v1, <8 x i16> %v2) +// NYI: [[VRHADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VRHADDQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VRHADDQ_V2_I]] +// int16x8_t test_vrhaddq_s16(int16x8_t v1, int16x8_t v2) { +// return vrhaddq_s16(v1, v2); +// } + +// NYI-LABEL: @test_vrhaddq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[VRHADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> %v1, <4 x i32> %v2) +// NYI: [[VRHADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VRHADDQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VRHADDQ_V2_I]] +// int32x4_t test_vrhaddq_s32(int32x4_t v1, int32x4_t v2) { +// return vrhaddq_s32(v1, v2); +// } + +// NYI-LABEL: @test_vrhaddq_u8( +// NYI: [[VRHADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %v1, <16 x i8> %v2) +// NYI: ret <16 x i8> [[VRHADDQ_V_I]] +// uint8x16_t test_vrhaddq_u8(uint8x16_t v1, uint8x16_t v2) { +// return vrhaddq_u8(v1, v2); +// } + +// NYI-LABEL: @test_vrhaddq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> +// NYI: [[VRHADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %v1, <8 x i16> %v2) +// NYI: [[VRHADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VRHADDQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VRHADDQ_V2_I]] +// uint16x8_t test_vrhaddq_u16(uint16x8_t v1, uint16x8_t v2) { +// return vrhaddq_u16(v1, v2); +// } + +// NYI-LABEL: @test_vrhaddq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> +// NYI: [[VRHADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32> %v1, <4 x i32> %v2) +// NYI: [[VRHADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VRHADDQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VRHADDQ_V2_I]] +// uint32x4_t test_vrhaddq_u32(uint32x4_t v1, uint32x4_t v2) { +// return vrhaddq_u32(v1, v2); +// } + +// NYI-LABEL: @test_vqadd_s8( +// NYI: [[VQADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VQADD_V_I]] +// int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { +// return vqadd_s8(a, b); +// } + +// NYI-LABEL: @test_vqadd_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VQADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VQADD_V3_I:%.*]] = bitcast <4 x i16> [[VQADD_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VQADD_V2_I]] +// int16x4_t test_vqadd_s16(int16x4_t a, int16x4_t b) { +// return vqadd_s16(a, b); +// } + +// NYI-LABEL: @test_vqadd_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VQADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VQADD_V3_I:%.*]] = bitcast <2 x i32> [[VQADD_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VQADD_V2_I]] +// int32x2_t test_vqadd_s32(int32x2_t a, int32x2_t b) { +// return vqadd_s32(a, b); +// } + +// NYI-LABEL: @test_vqadd_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VQADD_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqadd.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VQADD_V3_I:%.*]] = bitcast <1 x i64> [[VQADD_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VQADD_V2_I]] +// int64x1_t test_vqadd_s64(int64x1_t a, int64x1_t b) { +// return vqadd_s64(a, b); +// } + +// NYI-LABEL: @test_vqadd_u8( +// NYI: [[VQADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VQADD_V_I]] +// uint8x8_t test_vqadd_u8(uint8x8_t a, uint8x8_t b) { +// return vqadd_u8(a, b); +// } + +// NYI-LABEL: @test_vqadd_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VQADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VQADD_V3_I:%.*]] = bitcast <4 x i16> [[VQADD_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VQADD_V2_I]] +// uint16x4_t test_vqadd_u16(uint16x4_t a, uint16x4_t b) { +// return vqadd_u16(a, b); +// } + +// NYI-LABEL: @test_vqadd_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VQADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VQADD_V3_I:%.*]] = bitcast <2 x i32> [[VQADD_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VQADD_V2_I]] +// uint32x2_t test_vqadd_u32(uint32x2_t a, uint32x2_t b) { +// return vqadd_u32(a, b); +// } + +// NYI-LABEL: @test_vqadd_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VQADD_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uqadd.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VQADD_V3_I:%.*]] = bitcast <1 x i64> [[VQADD_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VQADD_V2_I]] +// uint64x1_t test_vqadd_u64(uint64x1_t a, uint64x1_t b) { +// return vqadd_u64(a, b); +// } + +// NYI-LABEL: @test_vqaddq_s8( +// NYI: [[VQADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VQADDQ_V_I]] +// int8x16_t test_vqaddq_s8(int8x16_t a, int8x16_t b) { +// return vqaddq_s8(a, b); +// } + +// NYI-LABEL: @test_vqaddq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VQADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VQADDQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VQADDQ_V2_I]] +// int16x8_t test_vqaddq_s16(int16x8_t a, int16x8_t b) { +// return vqaddq_s16(a, b); +// } + +// NYI-LABEL: @test_vqaddq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VQADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VQADDQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQADDQ_V2_I]] +// int32x4_t test_vqaddq_s32(int32x4_t a, int32x4_t b) { +// return vqaddq_s32(a, b); +// } + +// NYI-LABEL: @test_vqaddq_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQADDQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VQADDQ_V3_I:%.*]] = bitcast <2 x i64> [[VQADDQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VQADDQ_V2_I]] +// int64x2_t test_vqaddq_s64(int64x2_t a, int64x2_t b) { +// return vqaddq_s64(a, b); +// } + +// NYI-LABEL: @test_vqaddq_u8( +// NYI: [[VQADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uqadd.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VQADDQ_V_I]] +// uint8x16_t test_vqaddq_u8(uint8x16_t a, uint8x16_t b) { +// return vqaddq_u8(a, b); +// } + +// NYI-LABEL: @test_vqaddq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uqadd.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VQADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VQADDQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VQADDQ_V2_I]] +// uint16x8_t test_vqaddq_u16(uint16x8_t a, uint16x8_t b) { +// return vqaddq_u16(a, b); +// } + +// NYI-LABEL: @test_vqaddq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uqadd.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VQADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VQADDQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQADDQ_V2_I]] +// uint32x4_t test_vqaddq_u32(uint32x4_t a, uint32x4_t b) { +// return vqaddq_u32(a, b); +// } + +// NYI-LABEL: @test_vqaddq_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQADDQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VQADDQ_V3_I:%.*]] = bitcast <2 x i64> [[VQADDQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VQADDQ_V2_I]] +// uint64x2_t test_vqaddq_u64(uint64x2_t a, uint64x2_t b) { +// return vqaddq_u64(a, b); +// } + +// NYI-LABEL: @test_vqsub_s8( +// NYI: [[VQSUB_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VQSUB_V_I]] +// int8x8_t test_vqsub_s8(int8x8_t a, int8x8_t b) { +// return vqsub_s8(a, b); +// } + +// NYI-LABEL: @test_vqsub_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VQSUB_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VQSUB_V3_I:%.*]] = bitcast <4 x i16> [[VQSUB_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VQSUB_V2_I]] +// int16x4_t test_vqsub_s16(int16x4_t a, int16x4_t b) { +// return vqsub_s16(a, b); +// } + +// NYI-LABEL: @test_vqsub_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VQSUB_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VQSUB_V3_I:%.*]] = bitcast <2 x i32> [[VQSUB_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VQSUB_V2_I]] +// int32x2_t test_vqsub_s32(int32x2_t a, int32x2_t b) { +// return vqsub_s32(a, b); +// } + +// NYI-LABEL: @test_vqsub_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VQSUB_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqsub.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VQSUB_V3_I:%.*]] = bitcast <1 x i64> [[VQSUB_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VQSUB_V2_I]] +// int64x1_t test_vqsub_s64(int64x1_t a, int64x1_t b) { +// return vqsub_s64(a, b); +// } + +// NYI-LABEL: @test_vqsub_u8( +// NYI: [[VQSUB_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VQSUB_V_I]] +// uint8x8_t test_vqsub_u8(uint8x8_t a, uint8x8_t b) { +// return vqsub_u8(a, b); +// } + +// NYI-LABEL: @test_vqsub_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VQSUB_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VQSUB_V3_I:%.*]] = bitcast <4 x i16> [[VQSUB_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VQSUB_V2_I]] +// uint16x4_t test_vqsub_u16(uint16x4_t a, uint16x4_t b) { +// return vqsub_u16(a, b); +// } + +// NYI-LABEL: @test_vqsub_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VQSUB_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VQSUB_V3_I:%.*]] = bitcast <2 x i32> [[VQSUB_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VQSUB_V2_I]] +// uint32x2_t test_vqsub_u32(uint32x2_t a, uint32x2_t b) { +// return vqsub_u32(a, b); +// } + +// NYI-LABEL: @test_vqsub_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VQSUB_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uqsub.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VQSUB_V3_I:%.*]] = bitcast <1 x i64> [[VQSUB_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VQSUB_V2_I]] +// uint64x1_t test_vqsub_u64(uint64x1_t a, uint64x1_t b) { +// return vqsub_u64(a, b); +// } + +// NYI-LABEL: @test_vqsubq_s8( +// NYI: [[VQSUBQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VQSUBQ_V_I]] +// int8x16_t test_vqsubq_s8(int8x16_t a, int8x16_t b) { +// return vqsubq_s8(a, b); +// } + +// NYI-LABEL: @test_vqsubq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQSUBQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VQSUBQ_V3_I:%.*]] = bitcast <8 x i16> [[VQSUBQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VQSUBQ_V2_I]] +// int16x8_t test_vqsubq_s16(int16x8_t a, int16x8_t b) { +// return vqsubq_s16(a, b); +// } + +// NYI-LABEL: @test_vqsubq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQSUBQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VQSUBQ_V3_I:%.*]] = bitcast <4 x i32> [[VQSUBQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQSUBQ_V2_I]] +// int32x4_t test_vqsubq_s32(int32x4_t a, int32x4_t b) { +// return vqsubq_s32(a, b); +// } + +// NYI-LABEL: @test_vqsubq_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQSUBQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VQSUBQ_V3_I:%.*]] = bitcast <2 x i64> [[VQSUBQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VQSUBQ_V2_I]] +// int64x2_t test_vqsubq_s64(int64x2_t a, int64x2_t b) { +// return vqsubq_s64(a, b); +// } + +// NYI-LABEL: @test_vqsubq_u8( +// NYI: [[VQSUBQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VQSUBQ_V_I]] +// uint8x16_t test_vqsubq_u8(uint8x16_t a, uint8x16_t b) { +// return vqsubq_u8(a, b); +// } + +// NYI-LABEL: @test_vqsubq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQSUBQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VQSUBQ_V3_I:%.*]] = bitcast <8 x i16> [[VQSUBQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VQSUBQ_V2_I]] +// uint16x8_t test_vqsubq_u16(uint16x8_t a, uint16x8_t b) { +// return vqsubq_u16(a, b); +// } + +// NYI-LABEL: @test_vqsubq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQSUBQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uqsub.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VQSUBQ_V3_I:%.*]] = bitcast <4 x i32> [[VQSUBQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQSUBQ_V2_I]] +// uint32x4_t test_vqsubq_u32(uint32x4_t a, uint32x4_t b) { +// return vqsubq_u32(a, b); +// } + +// NYI-LABEL: @test_vqsubq_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQSUBQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VQSUBQ_V3_I:%.*]] = bitcast <2 x i64> [[VQSUBQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VQSUBQ_V2_I]] +// uint64x2_t test_vqsubq_u64(uint64x2_t a, uint64x2_t b) { +// return vqsubq_u64(a, b); +// } + +// NYI-LABEL: @test_vshl_s8( +// NYI: [[VSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sshl.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VSHL_V_I]] +// int8x8_t test_vshl_s8(int8x8_t a, int8x8_t b) { +// return vshl_s8(a, b); +// } + +// NYI-LABEL: @test_vshl_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sshl.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VSHL_V3_I:%.*]] = bitcast <4 x i16> [[VSHL_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VSHL_V2_I]] +// int16x4_t test_vshl_s16(int16x4_t a, int16x4_t b) { +// return vshl_s16(a, b); +// } + +// NYI-LABEL: @test_vshl_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sshl.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VSHL_V3_I:%.*]] = bitcast <2 x i32> [[VSHL_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VSHL_V2_I]] +// int32x2_t test_vshl_s32(int32x2_t a, int32x2_t b) { +// return vshl_s32(a, b); +// } + +// NYI-LABEL: @test_vshl_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sshl.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VSHL_V3_I:%.*]] = bitcast <1 x i64> [[VSHL_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VSHL_V2_I]] +// int64x1_t test_vshl_s64(int64x1_t a, int64x1_t b) { +// return vshl_s64(a, b); +// } + +// NYI-LABEL: @test_vshl_u8( +// NYI: [[VSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.ushl.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VSHL_V_I]] +// uint8x8_t test_vshl_u8(uint8x8_t a, int8x8_t b) { +// return vshl_u8(a, b); +// } + +// NYI-LABEL: @test_vshl_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.ushl.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VSHL_V3_I:%.*]] = bitcast <4 x i16> [[VSHL_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VSHL_V2_I]] +// uint16x4_t test_vshl_u16(uint16x4_t a, int16x4_t b) { +// return vshl_u16(a, b); +// } + +// NYI-LABEL: @test_vshl_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.ushl.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VSHL_V3_I:%.*]] = bitcast <2 x i32> [[VSHL_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VSHL_V2_I]] +// uint32x2_t test_vshl_u32(uint32x2_t a, int32x2_t b) { +// return vshl_u32(a, b); +// } + +// NYI-LABEL: @test_vshl_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.ushl.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VSHL_V3_I:%.*]] = bitcast <1 x i64> [[VSHL_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VSHL_V2_I]] +// uint64x1_t test_vshl_u64(uint64x1_t a, int64x1_t b) { +// return vshl_u64(a, b); +// } + +// NYI-LABEL: @test_vshlq_s8( +// NYI: [[VSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VSHLQ_V_I]] +// int8x16_t test_vshlq_s8(int8x16_t a, int8x16_t b) { +// return vshlq_s8(a, b); +// } + +// NYI-LABEL: @test_vshlq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sshl.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VSHLQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VSHLQ_V2_I]] +// int16x8_t test_vshlq_s16(int16x8_t a, int16x8_t b) { +// return vshlq_s16(a, b); +// } + +// NYI-LABEL: @test_vshlq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VSHLQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VSHLQ_V2_I]] +// int32x4_t test_vshlq_s32(int32x4_t a, int32x4_t b) { +// return vshlq_s32(a, b); +// } + +// NYI-LABEL: @test_vshlq_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VSHLQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VSHLQ_V2_I]] +// int64x2_t test_vshlq_s64(int64x2_t a, int64x2_t b) { +// return vshlq_s64(a, b); +// } + +// NYI-LABEL: @test_vshlq_u8( +// NYI: [[VSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.ushl.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VSHLQ_V_I]] +// uint8x16_t test_vshlq_u8(uint8x16_t a, int8x16_t b) { +// return vshlq_u8(a, b); +// } + +// NYI-LABEL: @test_vshlq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.ushl.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VSHLQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VSHLQ_V2_I]] +// uint16x8_t test_vshlq_u16(uint16x8_t a, int16x8_t b) { +// return vshlq_u16(a, b); +// } + +// NYI-LABEL: @test_vshlq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VSHLQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VSHLQ_V2_I]] +// uint32x4_t test_vshlq_u32(uint32x4_t a, int32x4_t b) { +// return vshlq_u32(a, b); +// } + +// NYI-LABEL: @test_vshlq_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.ushl.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VSHLQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VSHLQ_V2_I]] +// uint64x2_t test_vshlq_u64(uint64x2_t a, int64x2_t b) { +// return vshlq_u64(a, b); +// } + +// NYI-LABEL: @test_vqshl_s8( +// NYI: [[VQSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VQSHL_V_I]] +// int8x8_t test_vqshl_s8(int8x8_t a, int8x8_t b) { +// return vqshl_s8(a, b); +// } + +// NYI-LABEL: @test_vqshl_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VQSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VQSHL_V3_I:%.*]] = bitcast <4 x i16> [[VQSHL_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VQSHL_V2_I]] +// int16x4_t test_vqshl_s16(int16x4_t a, int16x4_t b) { +// return vqshl_s16(a, b); +// } + +// NYI-LABEL: @test_vqshl_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VQSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VQSHL_V3_I:%.*]] = bitcast <2 x i32> [[VQSHL_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VQSHL_V2_I]] +// int32x2_t test_vqshl_s32(int32x2_t a, int32x2_t b) { +// return vqshl_s32(a, b); +// } + +// NYI-LABEL: @test_vqshl_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VQSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqshl.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VQSHL_V3_I:%.*]] = bitcast <1 x i64> [[VQSHL_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VQSHL_V2_I]] +// int64x1_t test_vqshl_s64(int64x1_t a, int64x1_t b) { +// return vqshl_s64(a, b); +// } + +// NYI-LABEL: @test_vqshl_u8( +// NYI: [[VQSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VQSHL_V_I]] +// uint8x8_t test_vqshl_u8(uint8x8_t a, int8x8_t b) { +// return vqshl_u8(a, b); +// } + +// NYI-LABEL: @test_vqshl_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VQSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VQSHL_V3_I:%.*]] = bitcast <4 x i16> [[VQSHL_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VQSHL_V2_I]] +// uint16x4_t test_vqshl_u16(uint16x4_t a, int16x4_t b) { +// return vqshl_u16(a, b); +// } + +// NYI-LABEL: @test_vqshl_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VQSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VQSHL_V3_I:%.*]] = bitcast <2 x i32> [[VQSHL_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VQSHL_V2_I]] +// uint32x2_t test_vqshl_u32(uint32x2_t a, int32x2_t b) { +// return vqshl_u32(a, b); +// } + +// NYI-LABEL: @test_vqshl_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VQSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uqshl.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VQSHL_V3_I:%.*]] = bitcast <1 x i64> [[VQSHL_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VQSHL_V2_I]] +// uint64x1_t test_vqshl_u64(uint64x1_t a, int64x1_t b) { +// return vqshl_u64(a, b); +// } + +// NYI-LABEL: @test_vqshlq_s8( +// NYI: [[VQSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VQSHLQ_V_I]] +// int8x16_t test_vqshlq_s8(int8x16_t a, int8x16_t b) { +// return vqshlq_s8(a, b); +// } + +// NYI-LABEL: @test_vqshlq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VQSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VQSHLQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VQSHLQ_V2_I]] +// int16x8_t test_vqshlq_s16(int16x8_t a, int16x8_t b) { +// return vqshlq_s16(a, b); +// } + +// NYI-LABEL: @test_vqshlq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VQSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VQSHLQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQSHLQ_V2_I]] +// int32x4_t test_vqshlq_s32(int32x4_t a, int32x4_t b) { +// return vqshlq_s32(a, b); +// } + +// NYI-LABEL: @test_vqshlq_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VQSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VQSHLQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VQSHLQ_V2_I]] +// int64x2_t test_vqshlq_s64(int64x2_t a, int64x2_t b) { +// return vqshlq_s64(a, b); +// } + +// NYI-LABEL: @test_vqshlq_u8( +// NYI: [[VQSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VQSHLQ_V_I]] +// uint8x16_t test_vqshlq_u8(uint8x16_t a, int8x16_t b) { +// return vqshlq_u8(a, b); +// } + +// NYI-LABEL: @test_vqshlq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VQSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VQSHLQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VQSHLQ_V2_I]] +// uint16x8_t test_vqshlq_u16(uint16x8_t a, int16x8_t b) { +// return vqshlq_u16(a, b); +// } + +// NYI-LABEL: @test_vqshlq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VQSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VQSHLQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQSHLQ_V2_I]] +// uint32x4_t test_vqshlq_u32(uint32x4_t a, int32x4_t b) { +// return vqshlq_u32(a, b); +// } + +// NYI-LABEL: @test_vqshlq_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VQSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VQSHLQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VQSHLQ_V2_I]] +// uint64x2_t test_vqshlq_u64(uint64x2_t a, int64x2_t b) { +// return vqshlq_u64(a, b); +// } + +// NYI-LABEL: @test_vrshl_s8( +// NYI: [[VRSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VRSHL_V_I]] +// int8x8_t test_vrshl_s8(int8x8_t a, int8x8_t b) { +// return vrshl_s8(a, b); +// } + +// NYI-LABEL: @test_vrshl_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VRSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VRSHL_V3_I:%.*]] = bitcast <4 x i16> [[VRSHL_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VRSHL_V2_I]] +// int16x4_t test_vrshl_s16(int16x4_t a, int16x4_t b) { +// return vrshl_s16(a, b); +// } + +// NYI-LABEL: @test_vrshl_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VRSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VRSHL_V3_I:%.*]] = bitcast <2 x i32> [[VRSHL_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VRSHL_V2_I]] +// int32x2_t test_vrshl_s32(int32x2_t a, int32x2_t b) { +// return vrshl_s32(a, b); +// } + +// NYI-LABEL: @test_vrshl_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VRSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VRSHL_V3_I:%.*]] = bitcast <1 x i64> [[VRSHL_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VRSHL_V2_I]] +// int64x1_t test_vrshl_s64(int64x1_t a, int64x1_t b) { +// return vrshl_s64(a, b); +// } + +// NYI-LABEL: @test_vrshl_u8( +// NYI: [[VRSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VRSHL_V_I]] +// uint8x8_t test_vrshl_u8(uint8x8_t a, int8x8_t b) { +// return vrshl_u8(a, b); +// } + +// NYI-LABEL: @test_vrshl_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VRSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VRSHL_V3_I:%.*]] = bitcast <4 x i16> [[VRSHL_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VRSHL_V2_I]] +// uint16x4_t test_vrshl_u16(uint16x4_t a, int16x4_t b) { +// return vrshl_u16(a, b); +// } + +// NYI-LABEL: @test_vrshl_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VRSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VRSHL_V3_I:%.*]] = bitcast <2 x i32> [[VRSHL_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VRSHL_V2_I]] +// uint32x2_t test_vrshl_u32(uint32x2_t a, int32x2_t b) { +// return vrshl_u32(a, b); +// } + +// NYI-LABEL: @test_vrshl_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VRSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VRSHL_V3_I:%.*]] = bitcast <1 x i64> [[VRSHL_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VRSHL_V2_I]] +// uint64x1_t test_vrshl_u64(uint64x1_t a, int64x1_t b) { +// return vrshl_u64(a, b); +// } + +// NYI-LABEL: @test_vrshlq_s8( +// NYI: [[VRSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VRSHLQ_V_I]] +// int8x16_t test_vrshlq_s8(int8x16_t a, int8x16_t b) { +// return vrshlq_s8(a, b); +// } + +// NYI-LABEL: @test_vrshlq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VRSHLQ_V2_I]] +// int16x8_t test_vrshlq_s16(int16x8_t a, int16x8_t b) { +// return vrshlq_s16(a, b); +// } + +// NYI-LABEL: @test_vrshlq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VRSHLQ_V2_I]] +// int32x4_t test_vrshlq_s32(int32x4_t a, int32x4_t b) { +// return vrshlq_s32(a, b); +// } + +// NYI-LABEL: @test_vrshlq_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VRSHLQ_V2_I]] +// int64x2_t test_vrshlq_s64(int64x2_t a, int64x2_t b) { +// return vrshlq_s64(a, b); +// } + +// NYI-LABEL: @test_vrshlq_u8( +// NYI: [[VRSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VRSHLQ_V_I]] +// uint8x16_t test_vrshlq_u8(uint8x16_t a, int8x16_t b) { +// return vrshlq_u8(a, b); +// } + +// NYI-LABEL: @test_vrshlq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VRSHLQ_V2_I]] +// uint16x8_t test_vrshlq_u16(uint16x8_t a, int16x8_t b) { +// return vrshlq_u16(a, b); +// } + +// NYI-LABEL: @test_vrshlq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VRSHLQ_V2_I]] +// uint32x4_t test_vrshlq_u32(uint32x4_t a, int32x4_t b) { +// return vrshlq_u32(a, b); +// } + +// NYI-LABEL: @test_vrshlq_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VRSHLQ_V2_I]] +// uint64x2_t test_vrshlq_u64(uint64x2_t a, int64x2_t b) { +// return vrshlq_u64(a, b); +// } + +// NYI-LABEL: @test_vqrshl_s8( +// NYI: [[VQRSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshl.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VQRSHL_V_I]] +// int8x8_t test_vqrshl_s8(int8x8_t a, int8x8_t b) { +// return vqrshl_s8(a, b); +// } + +// NYI-LABEL: @test_vqrshl_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VQRSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshl.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VQRSHL_V3_I:%.*]] = bitcast <4 x i16> [[VQRSHL_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VQRSHL_V2_I]] +// int16x4_t test_vqrshl_s16(int16x4_t a, int16x4_t b) { +// return vqrshl_s16(a, b); +// } + +// NYI-LABEL: @test_vqrshl_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VQRSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrshl.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VQRSHL_V3_I:%.*]] = bitcast <2 x i32> [[VQRSHL_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VQRSHL_V2_I]] +// int32x2_t test_vqrshl_s32(int32x2_t a, int32x2_t b) { +// return vqrshl_s32(a, b); +// } + +// NYI-LABEL: @test_vqrshl_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VQRSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqrshl.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VQRSHL_V3_I:%.*]] = bitcast <1 x i64> [[VQRSHL_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VQRSHL_V2_I]] +// int64x1_t test_vqrshl_s64(int64x1_t a, int64x1_t b) { +// return vqrshl_s64(a, b); +// } + +// NYI-LABEL: @test_vqrshl_u8( +// NYI: [[VQRSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqrshl.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VQRSHL_V_I]] +// uint8x8_t test_vqrshl_u8(uint8x8_t a, int8x8_t b) { +// return vqrshl_u8(a, b); +// } + +// NYI-LABEL: @test_vqrshl_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VQRSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqrshl.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VQRSHL_V3_I:%.*]] = bitcast <4 x i16> [[VQRSHL_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VQRSHL_V2_I]] +// uint16x4_t test_vqrshl_u16(uint16x4_t a, int16x4_t b) { +// return vqrshl_u16(a, b); +// } + +// NYI-LABEL: @test_vqrshl_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VQRSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqrshl.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VQRSHL_V3_I:%.*]] = bitcast <2 x i32> [[VQRSHL_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VQRSHL_V2_I]] +// uint32x2_t test_vqrshl_u32(uint32x2_t a, int32x2_t b) { +// return vqrshl_u32(a, b); +// } + +// NYI-LABEL: @test_vqrshl_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VQRSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uqrshl.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: [[VQRSHL_V3_I:%.*]] = bitcast <1 x i64> [[VQRSHL_V2_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VQRSHL_V2_I]] +// uint64x1_t test_vqrshl_u64(uint64x1_t a, int64x1_t b) { +// return vqrshl_u64(a, b); +// } + +// NYI-LABEL: @test_vqrshlq_s8( +// NYI: [[VQRSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqrshl.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VQRSHLQ_V_I]] +// int8x16_t test_vqrshlq_s8(int8x16_t a, int8x16_t b) { +// return vqrshlq_s8(a, b); +// } + +// NYI-LABEL: @test_vqrshlq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQRSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqrshl.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VQRSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VQRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VQRSHLQ_V2_I]] +// int16x8_t test_vqrshlq_s16(int16x8_t a, int16x8_t b) { +// return vqrshlq_s16(a, b); +// } + +// NYI-LABEL: @test_vqrshlq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQRSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqrshl.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VQRSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VQRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQRSHLQ_V2_I]] +// int32x4_t test_vqrshlq_s32(int32x4_t a, int32x4_t b) { +// return vqrshlq_s32(a, b); +// } + +// NYI-LABEL: @test_vqrshlq_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQRSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqrshl.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VQRSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VQRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VQRSHLQ_V2_I]] +// int64x2_t test_vqrshlq_s64(int64x2_t a, int64x2_t b) { +// return vqrshlq_s64(a, b); +// } + +// NYI-LABEL: @test_vqrshlq_u8( +// NYI: [[VQRSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uqrshl.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VQRSHLQ_V_I]] +// uint8x16_t test_vqrshlq_u8(uint8x16_t a, int8x16_t b) { +// return vqrshlq_u8(a, b); +// } + +// NYI-LABEL: @test_vqrshlq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQRSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uqrshl.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VQRSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VQRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VQRSHLQ_V2_I]] +// uint16x8_t test_vqrshlq_u16(uint16x8_t a, int16x8_t b) { +// return vqrshlq_u16(a, b); +// } + +// NYI-LABEL: @test_vqrshlq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQRSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uqrshl.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VQRSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VQRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQRSHLQ_V2_I]] +// uint32x4_t test_vqrshlq_u32(uint32x4_t a, int32x4_t b) { +// return vqrshlq_u32(a, b); +// } + +// NYI-LABEL: @test_vqrshlq_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQRSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uqrshl.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VQRSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VQRSHLQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VQRSHLQ_V2_I]] +// uint64x2_t test_vqrshlq_u64(uint64x2_t a, int64x2_t b) { +// return vqrshlq_u64(a, b); +// } + +// NYI-LABEL: @test_vsli_n_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VSLI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: [[VSLI_N2:%.*]] = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> [[VSLI_N]], <1 x i64> [[VSLI_N1]], i32 0) +// NYI: ret <1 x i64> [[VSLI_N2]] +// poly64x1_t test_vsli_n_p64(poly64x1_t a, poly64x1_t b) { +// return vsli_n_p64(a, b, 0); +// } + +// NYI-LABEL: @test_vsliq_n_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VSLI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: [[VSLI_N2:%.*]] = call <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64> [[VSLI_N]], <2 x i64> [[VSLI_N1]], i32 0) +// NYI: ret <2 x i64> [[VSLI_N2]] +// poly64x2_t test_vsliq_n_p64(poly64x2_t a, poly64x2_t b) { +// return vsliq_n_p64(a, b, 0); +// } + +// NYI-LABEL: @test_vmax_s8( +// NYI: [[VMAX_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VMAX_I]] +// int8x8_t test_vmax_s8(int8x8_t a, int8x8_t b) { +// return vmax_s8(a, b); +// } + +// NYI-LABEL: @test_vmax_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMAX2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: ret <4 x i16> [[VMAX2_I]] +// int16x4_t test_vmax_s16(int16x4_t a, int16x4_t b) { +// return vmax_s16(a, b); +// } + +// NYI-LABEL: @test_vmax_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMAX2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: ret <2 x i32> [[VMAX2_I]] +// int32x2_t test_vmax_s32(int32x2_t a, int32x2_t b) { +// return vmax_s32(a, b); +// } + +// NYI-LABEL: @test_vmax_u8( +// NYI: [[VMAX_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umax.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VMAX_I]] +// uint8x8_t test_vmax_u8(uint8x8_t a, uint8x8_t b) { +// return vmax_u8(a, b); +// } + +// NYI-LABEL: @test_vmax_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMAX2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umax.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: ret <4 x i16> [[VMAX2_I]] +// uint16x4_t test_vmax_u16(uint16x4_t a, uint16x4_t b) { +// return vmax_u16(a, b); +// } + +// NYI-LABEL: @test_vmax_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMAX2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umax.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: ret <2 x i32> [[VMAX2_I]] +// uint32x2_t test_vmax_u32(uint32x2_t a, uint32x2_t b) { +// return vmax_u32(a, b); +// } + +// NYI-LABEL: @test_vmax_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> +// NYI: [[VMAX2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmax.v2f32(<2 x float> %a, <2 x float> %b) +// NYI: ret <2 x float> [[VMAX2_I]] +// float32x2_t test_vmax_f32(float32x2_t a, float32x2_t b) { +// return vmax_f32(a, b); +// } + +// NYI-LABEL: @test_vmaxq_s8( +// NYI: [[VMAX_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VMAX_I]] +// int8x16_t test_vmaxq_s8(int8x16_t a, int8x16_t b) { +// return vmaxq_s8(a, b); +// } + +// NYI-LABEL: @test_vmaxq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VMAX2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i16> [[VMAX2_I]] +// int16x8_t test_vmaxq_s16(int16x8_t a, int16x8_t b) { +// return vmaxq_s16(a, b); +// } + +// NYI-LABEL: @test_vmaxq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VMAX2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smax.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: ret <4 x i32> [[VMAX2_I]] +// int32x4_t test_vmaxq_s32(int32x4_t a, int32x4_t b) { +// return vmaxq_s32(a, b); +// } + +// NYI-LABEL: @test_vmaxq_u8( +// NYI: [[VMAX_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umax.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VMAX_I]] +// uint8x16_t test_vmaxq_u8(uint8x16_t a, uint8x16_t b) { +// return vmaxq_u8(a, b); +// } + +// NYI-LABEL: @test_vmaxq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VMAX2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umax.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i16> [[VMAX2_I]] +// uint16x8_t test_vmaxq_u16(uint16x8_t a, uint16x8_t b) { +// return vmaxq_u16(a, b); +// } + +// NYI-LABEL: @test_vmaxq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VMAX2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umax.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: ret <4 x i32> [[VMAX2_I]] +// uint32x4_t test_vmaxq_u32(uint32x4_t a, uint32x4_t b) { +// return vmaxq_u32(a, b); +// } + +// NYI-LABEL: @test_vmaxq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> +// NYI: [[VMAX2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmax.v4f32(<4 x float> %a, <4 x float> %b) +// NYI: ret <4 x float> [[VMAX2_I]] +// float32x4_t test_vmaxq_f32(float32x4_t a, float32x4_t b) { +// return vmaxq_f32(a, b); +// } + +// NYI-LABEL: @test_vmaxq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> +// NYI: [[VMAX2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmax.v2f64(<2 x double> %a, <2 x double> %b) +// NYI: ret <2 x double> [[VMAX2_I]] +// float64x2_t test_vmaxq_f64(float64x2_t a, float64x2_t b) { +// return vmaxq_f64(a, b); +// } + +// NYI-LABEL: @test_vmin_s8( +// NYI: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VMIN_I]] +// int8x8_t test_vmin_s8(int8x8_t a, int8x8_t b) { +// return vmin_s8(a, b); +// } + +// NYI-LABEL: @test_vmin_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: ret <4 x i16> [[VMIN2_I]] +// int16x4_t test_vmin_s16(int16x4_t a, int16x4_t b) { +// return vmin_s16(a, b); +// } + +// NYI-LABEL: @test_vmin_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: ret <2 x i32> [[VMIN2_I]] +// int32x2_t test_vmin_s32(int32x2_t a, int32x2_t b) { +// return vmin_s32(a, b); +// } + +// NYI-LABEL: @test_vmin_u8( +// NYI: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VMIN_I]] +// uint8x8_t test_vmin_u8(uint8x8_t a, uint8x8_t b) { +// return vmin_u8(a, b); +// } + +// NYI-LABEL: @test_vmin_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: ret <4 x i16> [[VMIN2_I]] +// uint16x4_t test_vmin_u16(uint16x4_t a, uint16x4_t b) { +// return vmin_u16(a, b); +// } + +// NYI-LABEL: @test_vmin_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: ret <2 x i32> [[VMIN2_I]] +// uint32x2_t test_vmin_u32(uint32x2_t a, uint32x2_t b) { +// return vmin_u32(a, b); +// } + +// NYI-LABEL: @test_vmin_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> +// NYI: [[VMIN2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> %a, <2 x float> %b) +// NYI: ret <2 x float> [[VMIN2_I]] +// float32x2_t test_vmin_f32(float32x2_t a, float32x2_t b) { +// return vmin_f32(a, b); +// } + +// NYI-LABEL: @test_vminq_s8( +// NYI: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VMIN_I]] +// int8x16_t test_vminq_s8(int8x16_t a, int8x16_t b) { +// return vminq_s8(a, b); +// } + +// NYI-LABEL: @test_vminq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i16> [[VMIN2_I]] +// int16x8_t test_vminq_s16(int16x8_t a, int16x8_t b) { +// return vminq_s16(a, b); +// } + +// NYI-LABEL: @test_vminq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: ret <4 x i32> [[VMIN2_I]] +// int32x4_t test_vminq_s32(int32x4_t a, int32x4_t b) { +// return vminq_s32(a, b); +// } + +// NYI-LABEL: @test_vminq_u8( +// NYI: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VMIN_I]] +// uint8x16_t test_vminq_u8(uint8x16_t a, uint8x16_t b) { +// return vminq_u8(a, b); +// } + +// NYI-LABEL: @test_vminq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i16> [[VMIN2_I]] +// uint16x8_t test_vminq_u16(uint16x8_t a, uint16x8_t b) { +// return vminq_u16(a, b); +// } + +// NYI-LABEL: @test_vminq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: ret <4 x i32> [[VMIN2_I]] +// uint32x4_t test_vminq_u32(uint32x4_t a, uint32x4_t b) { +// return vminq_u32(a, b); +// } + +// NYI-LABEL: @test_vminq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> +// NYI: [[VMIN2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> %a, <4 x float> %b) +// NYI: ret <4 x float> [[VMIN2_I]] +// float32x4_t test_vminq_f32(float32x4_t a, float32x4_t b) { +// return vminq_f32(a, b); +// } + +// NYI-LABEL: @test_vminq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> +// NYI: [[VMIN2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> %a, <2 x double> %b) +// NYI: ret <2 x double> [[VMIN2_I]] +// float64x2_t test_vminq_f64(float64x2_t a, float64x2_t b) { +// return vminq_f64(a, b); +// } + +// NYI-LABEL: @test_vmaxnm_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> +// NYI: [[VMAXNM2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmaxnm.v2f32(<2 x float> %a, <2 x float> %b) +// NYI: ret <2 x float> [[VMAXNM2_I]] +// float32x2_t test_vmaxnm_f32(float32x2_t a, float32x2_t b) { +// return vmaxnm_f32(a, b); +// } + +// NYI-LABEL: @test_vmaxnmq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> +// NYI: [[VMAXNM2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmaxnm.v4f32(<4 x float> %a, <4 x float> %b) +// NYI: ret <4 x float> [[VMAXNM2_I]] +// float32x4_t test_vmaxnmq_f32(float32x4_t a, float32x4_t b) { +// return vmaxnmq_f32(a, b); +// } + +// NYI-LABEL: @test_vmaxnmq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> +// NYI: [[VMAXNM2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmaxnm.v2f64(<2 x double> %a, <2 x double> %b) +// NYI: ret <2 x double> [[VMAXNM2_I]] +// float64x2_t test_vmaxnmq_f64(float64x2_t a, float64x2_t b) { +// return vmaxnmq_f64(a, b); +// } + +// NYI-LABEL: @test_vminnm_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> +// NYI: [[VMINNM2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fminnm.v2f32(<2 x float> %a, <2 x float> %b) +// NYI: ret <2 x float> [[VMINNM2_I]] +// float32x2_t test_vminnm_f32(float32x2_t a, float32x2_t b) { +// return vminnm_f32(a, b); +// } + +// NYI-LABEL: @test_vminnmq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> +// NYI: [[VMINNM2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> %a, <4 x float> %b) +// NYI: ret <4 x float> [[VMINNM2_I]] +// float32x4_t test_vminnmq_f32(float32x4_t a, float32x4_t b) { +// return vminnmq_f32(a, b); +// } + +// NYI-LABEL: @test_vminnmq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> +// NYI: [[VMINNM2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> %a, <2 x double> %b) +// NYI: ret <2 x double> [[VMINNM2_I]] +// float64x2_t test_vminnmq_f64(float64x2_t a, float64x2_t b) { +// return vminnmq_f64(a, b); +// } + +// NYI-LABEL: @test_vpmax_s8( +// NYI: [[VPMAX_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smaxp.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VPMAX_I]] +// int8x8_t test_vpmax_s8(int8x8_t a, int8x8_t b) { +// return vpmax_s8(a, b); +// } + +// NYI-LABEL: @test_vpmax_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VPMAX2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smaxp.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: ret <4 x i16> [[VPMAX2_I]] +// int16x4_t test_vpmax_s16(int16x4_t a, int16x4_t b) { +// return vpmax_s16(a, b); +// } + +// NYI-LABEL: @test_vpmax_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VPMAX2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smaxp.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: ret <2 x i32> [[VPMAX2_I]] +// int32x2_t test_vpmax_s32(int32x2_t a, int32x2_t b) { +// return vpmax_s32(a, b); +// } + +// NYI-LABEL: @test_vpmax_u8( +// NYI: [[VPMAX_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umaxp.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VPMAX_I]] +// uint8x8_t test_vpmax_u8(uint8x8_t a, uint8x8_t b) { +// return vpmax_u8(a, b); +// } + +// NYI-LABEL: @test_vpmax_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VPMAX2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umaxp.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: ret <4 x i16> [[VPMAX2_I]] +// uint16x4_t test_vpmax_u16(uint16x4_t a, uint16x4_t b) { +// return vpmax_u16(a, b); +// } + +// NYI-LABEL: @test_vpmax_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VPMAX2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umaxp.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: ret <2 x i32> [[VPMAX2_I]] +// uint32x2_t test_vpmax_u32(uint32x2_t a, uint32x2_t b) { +// return vpmax_u32(a, b); +// } + +// NYI-LABEL: @test_vpmax_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> +// NYI: [[VPMAX2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmaxp.v2f32(<2 x float> %a, <2 x float> %b) +// NYI: ret <2 x float> [[VPMAX2_I]] +// float32x2_t test_vpmax_f32(float32x2_t a, float32x2_t b) { +// return vpmax_f32(a, b); +// } + +// NYI-LABEL: @test_vpmaxq_s8( +// NYI: [[VPMAX_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smaxp.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VPMAX_I]] +// int8x16_t test_vpmaxq_s8(int8x16_t a, int8x16_t b) { +// return vpmaxq_s8(a, b); +// } + +// NYI-LABEL: @test_vpmaxq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VPMAX2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smaxp.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i16> [[VPMAX2_I]] +// int16x8_t test_vpmaxq_s16(int16x8_t a, int16x8_t b) { +// return vpmaxq_s16(a, b); +// } + +// NYI-LABEL: @test_vpmaxq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VPMAX2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smaxp.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: ret <4 x i32> [[VPMAX2_I]] +// int32x4_t test_vpmaxq_s32(int32x4_t a, int32x4_t b) { +// return vpmaxq_s32(a, b); +// } + +// NYI-LABEL: @test_vpmaxq_u8( +// NYI: [[VPMAX_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umaxp.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VPMAX_I]] +// uint8x16_t test_vpmaxq_u8(uint8x16_t a, uint8x16_t b) { +// return vpmaxq_u8(a, b); +// } + +// NYI-LABEL: @test_vpmaxq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VPMAX2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umaxp.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i16> [[VPMAX2_I]] +// uint16x8_t test_vpmaxq_u16(uint16x8_t a, uint16x8_t b) { +// return vpmaxq_u16(a, b); +// } + +// NYI-LABEL: @test_vpmaxq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VPMAX2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umaxp.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: ret <4 x i32> [[VPMAX2_I]] +// uint32x4_t test_vpmaxq_u32(uint32x4_t a, uint32x4_t b) { +// return vpmaxq_u32(a, b); +// } + +// NYI-LABEL: @test_vpmaxq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> +// NYI: [[VPMAX2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmaxp.v4f32(<4 x float> %a, <4 x float> %b) +// NYI: ret <4 x float> [[VPMAX2_I]] +// float32x4_t test_vpmaxq_f32(float32x4_t a, float32x4_t b) { +// return vpmaxq_f32(a, b); +// } + +// NYI-LABEL: @test_vpmaxq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> +// NYI: [[VPMAX2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmaxp.v2f64(<2 x double> %a, <2 x double> %b) +// NYI: ret <2 x double> [[VPMAX2_I]] +// float64x2_t test_vpmaxq_f64(float64x2_t a, float64x2_t b) { +// return vpmaxq_f64(a, b); +// } + +// NYI-LABEL: @test_vpmin_s8( +// NYI: [[VPMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sminp.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VPMIN_I]] +// int8x8_t test_vpmin_s8(int8x8_t a, int8x8_t b) { +// return vpmin_s8(a, b); +// } + +// NYI-LABEL: @test_vpmin_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VPMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sminp.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: ret <4 x i16> [[VPMIN2_I]] +// int16x4_t test_vpmin_s16(int16x4_t a, int16x4_t b) { +// return vpmin_s16(a, b); +// } + +// NYI-LABEL: @test_vpmin_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VPMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sminp.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: ret <2 x i32> [[VPMIN2_I]] +// int32x2_t test_vpmin_s32(int32x2_t a, int32x2_t b) { +// return vpmin_s32(a, b); +// } + +// NYI-LABEL: @test_vpmin_u8( +// NYI: [[VPMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uminp.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VPMIN_I]] +// uint8x8_t test_vpmin_u8(uint8x8_t a, uint8x8_t b) { +// return vpmin_u8(a, b); +// } + +// NYI-LABEL: @test_vpmin_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VPMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uminp.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: ret <4 x i16> [[VPMIN2_I]] +// uint16x4_t test_vpmin_u16(uint16x4_t a, uint16x4_t b) { +// return vpmin_u16(a, b); +// } + +// NYI-LABEL: @test_vpmin_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VPMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uminp.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: ret <2 x i32> [[VPMIN2_I]] +// uint32x2_t test_vpmin_u32(uint32x2_t a, uint32x2_t b) { +// return vpmin_u32(a, b); +// } + +// NYI-LABEL: @test_vpmin_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> +// NYI: [[VPMIN2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fminp.v2f32(<2 x float> %a, <2 x float> %b) +// NYI: ret <2 x float> [[VPMIN2_I]] +// float32x2_t test_vpmin_f32(float32x2_t a, float32x2_t b) { +// return vpmin_f32(a, b); +// } + +// NYI-LABEL: @test_vpminq_s8( +// NYI: [[VPMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sminp.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VPMIN_I]] +// int8x16_t test_vpminq_s8(int8x16_t a, int8x16_t b) { +// return vpminq_s8(a, b); +// } + +// NYI-LABEL: @test_vpminq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VPMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sminp.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i16> [[VPMIN2_I]] +// int16x8_t test_vpminq_s16(int16x8_t a, int16x8_t b) { +// return vpminq_s16(a, b); +// } + +// NYI-LABEL: @test_vpminq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VPMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sminp.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: ret <4 x i32> [[VPMIN2_I]] +// int32x4_t test_vpminq_s32(int32x4_t a, int32x4_t b) { +// return vpminq_s32(a, b); +// } + +// NYI-LABEL: @test_vpminq_u8( +// NYI: [[VPMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uminp.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VPMIN_I]] +// uint8x16_t test_vpminq_u8(uint8x16_t a, uint8x16_t b) { +// return vpminq_u8(a, b); +// } + +// NYI-LABEL: @test_vpminq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VPMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uminp.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i16> [[VPMIN2_I]] +// uint16x8_t test_vpminq_u16(uint16x8_t a, uint16x8_t b) { +// return vpminq_u16(a, b); +// } + +// NYI-LABEL: @test_vpminq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VPMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uminp.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: ret <4 x i32> [[VPMIN2_I]] +// uint32x4_t test_vpminq_u32(uint32x4_t a, uint32x4_t b) { +// return vpminq_u32(a, b); +// } + +// NYI-LABEL: @test_vpminq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> +// NYI: [[VPMIN2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminp.v4f32(<4 x float> %a, <4 x float> %b) +// NYI: ret <4 x float> [[VPMIN2_I]] +// float32x4_t test_vpminq_f32(float32x4_t a, float32x4_t b) { +// return vpminq_f32(a, b); +// } + +// NYI-LABEL: @test_vpminq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> +// NYI: [[VPMIN2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminp.v2f64(<2 x double> %a, <2 x double> %b) +// NYI: ret <2 x double> [[VPMIN2_I]] +// float64x2_t test_vpminq_f64(float64x2_t a, float64x2_t b) { +// return vpminq_f64(a, b); +// } + +// NYI-LABEL: @test_vpmaxnm_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> +// NYI: [[VPMAXNM2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmaxnmp.v2f32(<2 x float> %a, <2 x float> %b) +// NYI: ret <2 x float> [[VPMAXNM2_I]] +// float32x2_t test_vpmaxnm_f32(float32x2_t a, float32x2_t b) { +// return vpmaxnm_f32(a, b); +// } + +// NYI-LABEL: @test_vpmaxnmq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> +// NYI: [[VPMAXNM2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmaxnmp.v4f32(<4 x float> %a, <4 x float> %b) +// NYI: ret <4 x float> [[VPMAXNM2_I]] +// float32x4_t test_vpmaxnmq_f32(float32x4_t a, float32x4_t b) { +// return vpmaxnmq_f32(a, b); +// } + +// NYI-LABEL: @test_vpmaxnmq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> +// NYI: [[VPMAXNM2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmaxnmp.v2f64(<2 x double> %a, <2 x double> %b) +// NYI: ret <2 x double> [[VPMAXNM2_I]] +// float64x2_t test_vpmaxnmq_f64(float64x2_t a, float64x2_t b) { +// return vpmaxnmq_f64(a, b); +// } + +// NYI-LABEL: @test_vpminnm_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> +// NYI: [[VPMINNM2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fminnmp.v2f32(<2 x float> %a, <2 x float> %b) +// NYI: ret <2 x float> [[VPMINNM2_I]] +// float32x2_t test_vpminnm_f32(float32x2_t a, float32x2_t b) { +// return vpminnm_f32(a, b); +// } + +// NYI-LABEL: @test_vpminnmq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> +// NYI: [[VPMINNM2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnmp.v4f32(<4 x float> %a, <4 x float> %b) +// NYI: ret <4 x float> [[VPMINNM2_I]] +// float32x4_t test_vpminnmq_f32(float32x4_t a, float32x4_t b) { +// return vpminnmq_f32(a, b); +// } + +// NYI-LABEL: @test_vpminnmq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> +// NYI: [[VPMINNM2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnmp.v2f64(<2 x double> %a, <2 x double> %b) +// NYI: ret <2 x double> [[VPMINNM2_I]] +// float64x2_t test_vpminnmq_f64(float64x2_t a, float64x2_t b) { +// return vpminnmq_f64(a, b); +// } + +// NYI-LABEL: @test_vpadd_s8( +// NYI: [[VPADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VPADD_V_I]] +// int8x8_t test_vpadd_s8(int8x8_t a, int8x8_t b) { +// return vpadd_s8(a, b); +// } + +// NYI-LABEL: @test_vpadd_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VPADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VPADD_V3_I:%.*]] = bitcast <4 x i16> [[VPADD_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VPADD_V2_I]] +// int16x4_t test_vpadd_s16(int16x4_t a, int16x4_t b) { +// return vpadd_s16(a, b); +// } + +// NYI-LABEL: @test_vpadd_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VPADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VPADD_V3_I:%.*]] = bitcast <2 x i32> [[VPADD_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VPADD_V2_I]] +// int32x2_t test_vpadd_s32(int32x2_t a, int32x2_t b) { +// return vpadd_s32(a, b); +// } + +// NYI-LABEL: @test_vpadd_u8( +// NYI: [[VPADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VPADD_V_I]] +// uint8x8_t test_vpadd_u8(uint8x8_t a, uint8x8_t b) { +// return vpadd_u8(a, b); +// } + +// NYI-LABEL: @test_vpadd_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VPADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VPADD_V3_I:%.*]] = bitcast <4 x i16> [[VPADD_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VPADD_V2_I]] +// uint16x4_t test_vpadd_u16(uint16x4_t a, uint16x4_t b) { +// return vpadd_u16(a, b); +// } + +// NYI-LABEL: @test_vpadd_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VPADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VPADD_V3_I:%.*]] = bitcast <2 x i32> [[VPADD_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VPADD_V2_I]] +// uint32x2_t test_vpadd_u32(uint32x2_t a, uint32x2_t b) { +// return vpadd_u32(a, b); +// } + +// NYI-LABEL: @test_vpadd_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> +// NYI: [[VPADD_V2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.faddp.v2f32(<2 x float> %a, <2 x float> %b) +// NYI: [[VPADD_V3_I:%.*]] = bitcast <2 x float> [[VPADD_V2_I]] to <8 x i8> +// NYI: ret <2 x float> [[VPADD_V2_I]] +// float32x2_t test_vpadd_f32(float32x2_t a, float32x2_t b) { +// return vpadd_f32(a, b); +// } + +// NYI-LABEL: @test_vpaddq_s8( +// NYI: [[VPADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VPADDQ_V_I]] +// int8x16_t test_vpaddq_s8(int8x16_t a, int8x16_t b) { +// return vpaddq_s8(a, b); +// } + +// NYI-LABEL: @test_vpaddq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VPADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VPADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VPADDQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VPADDQ_V2_I]] +// int16x8_t test_vpaddq_s16(int16x8_t a, int16x8_t b) { +// return vpaddq_s16(a, b); +// } + +// NYI-LABEL: @test_vpaddq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VPADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VPADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VPADDQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VPADDQ_V2_I]] +// int32x4_t test_vpaddq_s32(int32x4_t a, int32x4_t b) { +// return vpaddq_s32(a, b); +// } + +// NYI-LABEL: @test_vpaddq_u8( +// NYI: [[VPADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VPADDQ_V_I]] +// uint8x16_t test_vpaddq_u8(uint8x16_t a, uint8x16_t b) { +// return vpaddq_u8(a, b); +// } + +// NYI-LABEL: @test_vpaddq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VPADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VPADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VPADDQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VPADDQ_V2_I]] +// uint16x8_t test_vpaddq_u16(uint16x8_t a, uint16x8_t b) { +// return vpaddq_u16(a, b); +// } + +// NYI-LABEL: @test_vpaddq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VPADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VPADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VPADDQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VPADDQ_V2_I]] +// uint32x4_t test_vpaddq_u32(uint32x4_t a, uint32x4_t b) { +// return vpaddq_u32(a, b); +// } + +// NYI-LABEL: @test_vpaddq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> +// NYI: [[VPADDQ_V2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.faddp.v4f32(<4 x float> %a, <4 x float> %b) +// NYI: [[VPADDQ_V3_I:%.*]] = bitcast <4 x float> [[VPADDQ_V2_I]] to <16 x i8> +// NYI: ret <4 x float> [[VPADDQ_V2_I]] +// float32x4_t test_vpaddq_f32(float32x4_t a, float32x4_t b) { +// return vpaddq_f32(a, b); +// } + +// NYI-LABEL: @test_vpaddq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> +// NYI: [[VPADDQ_V2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.faddp.v2f64(<2 x double> %a, <2 x double> %b) +// NYI: [[VPADDQ_V3_I:%.*]] = bitcast <2 x double> [[VPADDQ_V2_I]] to <16 x i8> +// NYI: ret <2 x double> [[VPADDQ_V2_I]] +// float64x2_t test_vpaddq_f64(float64x2_t a, float64x2_t b) { +// return vpaddq_f64(a, b); +// } + +// NYI-LABEL: @test_vqdmulh_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VQDMULH_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VQDMULH_V3_I:%.*]] = bitcast <4 x i16> [[VQDMULH_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VQDMULH_V2_I]] +// int16x4_t test_vqdmulh_s16(int16x4_t a, int16x4_t b) { +// return vqdmulh_s16(a, b); +// } + +// NYI-LABEL: @test_vqdmulh_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VQDMULH_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VQDMULH_V3_I:%.*]] = bitcast <2 x i32> [[VQDMULH_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VQDMULH_V2_I]] +// int32x2_t test_vqdmulh_s32(int32x2_t a, int32x2_t b) { +// return vqdmulh_s32(a, b); +// } + +// NYI-LABEL: @test_vqdmulhq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQDMULHQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VQDMULHQ_V3_I:%.*]] = bitcast <8 x i16> [[VQDMULHQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VQDMULHQ_V2_I]] +// int16x8_t test_vqdmulhq_s16(int16x8_t a, int16x8_t b) { +// return vqdmulhq_s16(a, b); +// } + +// NYI-LABEL: @test_vqdmulhq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQDMULHQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VQDMULHQ_V3_I:%.*]] = bitcast <4 x i32> [[VQDMULHQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQDMULHQ_V2_I]] +// int32x4_t test_vqdmulhq_s32(int32x4_t a, int32x4_t b) { +// return vqdmulhq_s32(a, b); +// } + +// NYI-LABEL: @test_vqrdmulh_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VQRDMULH_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VQRDMULH_V3_I:%.*]] = bitcast <4 x i16> [[VQRDMULH_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VQRDMULH_V2_I]] +// int16x4_t test_vqrdmulh_s16(int16x4_t a, int16x4_t b) { +// return vqrdmulh_s16(a, b); +// } + +// NYI-LABEL: @test_vqrdmulh_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VQRDMULH_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VQRDMULH_V3_I:%.*]] = bitcast <2 x i32> [[VQRDMULH_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VQRDMULH_V2_I]] +// int32x2_t test_vqrdmulh_s32(int32x2_t a, int32x2_t b) { +// return vqrdmulh_s32(a, b); +// } + +// NYI-LABEL: @test_vqrdmulhq_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQRDMULHQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: [[VQRDMULHQ_V3_I:%.*]] = bitcast <8 x i16> [[VQRDMULHQ_V2_I]] to <16 x i8> +// NYI: ret <8 x i16> [[VQRDMULHQ_V2_I]] +// int16x8_t test_vqrdmulhq_s16(int16x8_t a, int16x8_t b) { +// return vqrdmulhq_s16(a, b); +// } + +// NYI-LABEL: @test_vqrdmulhq_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQRDMULHQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VQRDMULHQ_V3_I:%.*]] = bitcast <4 x i32> [[VQRDMULHQ_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQRDMULHQ_V2_I]] +// int32x4_t test_vqrdmulhq_s32(int32x4_t a, int32x4_t b) { +// return vqrdmulhq_s32(a, b); +// } + +// NYI-LABEL: @test_vmulx_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> +// NYI: [[VMULX2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %a, <2 x float> %b) +// NYI: ret <2 x float> [[VMULX2_I]] +// float32x2_t test_vmulx_f32(float32x2_t a, float32x2_t b) { +// return vmulx_f32(a, b); +// } + +// NYI-LABEL: @test_vmulxq_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> +// NYI: [[VMULX2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %a, <4 x float> %b) +// NYI: ret <4 x float> [[VMULX2_I]] +// float32x4_t test_vmulxq_f32(float32x4_t a, float32x4_t b) { +// return vmulxq_f32(a, b); +// } + +// NYI-LABEL: @test_vmulxq_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> +// NYI: [[VMULX2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %a, <2 x double> %b) +// NYI: ret <2 x double> [[VMULX2_I]] +// float64x2_t test_vmulxq_f64(float64x2_t a, float64x2_t b) { +// return vmulxq_f64(a, b); +// } + +// NYI-LABEL: @test_vshl_n_s8( +// NYI: [[VSHL_N:%.*]] = shl <8 x i8> %a, +// NYI: ret <8 x i8> [[VSHL_N]] +// int8x8_t test_vshl_n_s8(int8x8_t a) { +// return vshl_n_s8(a, 3); +// } + +// NYI-LABEL: @test_vshl_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VSHL_N:%.*]] = shl <4 x i16> [[TMP1]], +// NYI: ret <4 x i16> [[VSHL_N]] +// int16x4_t test_vshl_n_s16(int16x4_t a) { +// return vshl_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vshl_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VSHL_N:%.*]] = shl <2 x i32> [[TMP1]], +// NYI: ret <2 x i32> [[VSHL_N]] +// int32x2_t test_vshl_n_s32(int32x2_t a) { +// return vshl_n_s32(a, 3); +// } + +// NYI-LABEL: @test_vshlq_n_s8( +// NYI: [[VSHL_N:%.*]] = shl <16 x i8> %a, +// NYI: ret <16 x i8> [[VSHL_N]] +// int8x16_t test_vshlq_n_s8(int8x16_t a) { +// return vshlq_n_s8(a, 3); +// } + +// NYI-LABEL: @test_vshlq_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VSHL_N:%.*]] = shl <8 x i16> [[TMP1]], +// NYI: ret <8 x i16> [[VSHL_N]] +// int16x8_t test_vshlq_n_s16(int16x8_t a) { +// return vshlq_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vshlq_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VSHL_N:%.*]] = shl <4 x i32> [[TMP1]], +// NYI: ret <4 x i32> [[VSHL_N]] +// int32x4_t test_vshlq_n_s32(int32x4_t a) { +// return vshlq_n_s32(a, 3); +// } + +// NYI-LABEL: @test_vshlq_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VSHL_N:%.*]] = shl <2 x i64> [[TMP1]], +// NYI: ret <2 x i64> [[VSHL_N]] +// int64x2_t test_vshlq_n_s64(int64x2_t a) { +// return vshlq_n_s64(a, 3); +// } + +// NYI-LABEL: @test_vshl_n_u8( +// NYI: [[VSHL_N:%.*]] = shl <8 x i8> %a, +// NYI: ret <8 x i8> [[VSHL_N]] +// uint8x8_t test_vshl_n_u8(uint8x8_t a) { +// return vshl_n_u8(a, 3); +// } + +// NYI-LABEL: @test_vshl_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VSHL_N:%.*]] = shl <4 x i16> [[TMP1]], +// NYI: ret <4 x i16> [[VSHL_N]] +// uint16x4_t test_vshl_n_u16(uint16x4_t a) { +// return vshl_n_u16(a, 3); +// } + +// NYI-LABEL: @test_vshl_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VSHL_N:%.*]] = shl <2 x i32> [[TMP1]], +// NYI: ret <2 x i32> [[VSHL_N]] +// uint32x2_t test_vshl_n_u32(uint32x2_t a) { +// return vshl_n_u32(a, 3); +// } + +// NYI-LABEL: @test_vshlq_n_u8( +// NYI: [[VSHL_N:%.*]] = shl <16 x i8> %a, +// NYI: ret <16 x i8> [[VSHL_N]] +// uint8x16_t test_vshlq_n_u8(uint8x16_t a) { +// return vshlq_n_u8(a, 3); +// } + +// NYI-LABEL: @test_vshlq_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VSHL_N:%.*]] = shl <8 x i16> [[TMP1]], +// NYI: ret <8 x i16> [[VSHL_N]] +// uint16x8_t test_vshlq_n_u16(uint16x8_t a) { +// return vshlq_n_u16(a, 3); +// } + +// NYI-LABEL: @test_vshlq_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VSHL_N:%.*]] = shl <4 x i32> [[TMP1]], +// NYI: ret <4 x i32> [[VSHL_N]] +// uint32x4_t test_vshlq_n_u32(uint32x4_t a) { +// return vshlq_n_u32(a, 3); +// } + +// NYI-LABEL: @test_vshlq_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VSHL_N:%.*]] = shl <2 x i64> [[TMP1]], +// NYI: ret <2 x i64> [[VSHL_N]] +// uint64x2_t test_vshlq_n_u64(uint64x2_t a) { +// return vshlq_n_u64(a, 3); +// } + +// NYI-LABEL: @test_vshr_n_s8( +// NYI: [[VSHR_N:%.*]] = ashr <8 x i8> %a, +// NYI: ret <8 x i8> [[VSHR_N]] +// int8x8_t test_vshr_n_s8(int8x8_t a) { +// return vshr_n_s8(a, 3); +// } + +// NYI-LABEL: @test_vshr_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VSHR_N:%.*]] = ashr <4 x i16> [[TMP1]], +// NYI: ret <4 x i16> [[VSHR_N]] +// int16x4_t test_vshr_n_s16(int16x4_t a) { +// return vshr_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vshr_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VSHR_N:%.*]] = ashr <2 x i32> [[TMP1]], +// NYI: ret <2 x i32> [[VSHR_N]] +// int32x2_t test_vshr_n_s32(int32x2_t a) { +// return vshr_n_s32(a, 3); +// } + +// NYI-LABEL: @test_vshrq_n_s8( +// NYI: [[VSHR_N:%.*]] = ashr <16 x i8> %a, +// NYI: ret <16 x i8> [[VSHR_N]] +// int8x16_t test_vshrq_n_s8(int8x16_t a) { +// return vshrq_n_s8(a, 3); +// } + +// NYI-LABEL: @test_vshrq_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VSHR_N:%.*]] = ashr <8 x i16> [[TMP1]], +// NYI: ret <8 x i16> [[VSHR_N]] +// int16x8_t test_vshrq_n_s16(int16x8_t a) { +// return vshrq_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vshrq_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VSHR_N:%.*]] = ashr <4 x i32> [[TMP1]], +// NYI: ret <4 x i32> [[VSHR_N]] +// int32x4_t test_vshrq_n_s32(int32x4_t a) { +// return vshrq_n_s32(a, 3); +// } + +// NYI-LABEL: @test_vshrq_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VSHR_N:%.*]] = ashr <2 x i64> [[TMP1]], +// NYI: ret <2 x i64> [[VSHR_N]] +// int64x2_t test_vshrq_n_s64(int64x2_t a) { +// return vshrq_n_s64(a, 3); +// } + +// NYI-LABEL: @test_vshr_n_u8( +// NYI: [[VSHR_N:%.*]] = lshr <8 x i8> %a, +// NYI: ret <8 x i8> [[VSHR_N]] +// uint8x8_t test_vshr_n_u8(uint8x8_t a) { +// return vshr_n_u8(a, 3); +// } + +// NYI-LABEL: @test_vshr_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VSHR_N:%.*]] = lshr <4 x i16> [[TMP1]], +// NYI: ret <4 x i16> [[VSHR_N]] +// uint16x4_t test_vshr_n_u16(uint16x4_t a) { +// return vshr_n_u16(a, 3); +// } + +// NYI-LABEL: @test_vshr_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VSHR_N:%.*]] = lshr <2 x i32> [[TMP1]], +// NYI: ret <2 x i32> [[VSHR_N]] +// uint32x2_t test_vshr_n_u32(uint32x2_t a) { +// return vshr_n_u32(a, 3); +// } + +// NYI-LABEL: @test_vshrq_n_u8( +// NYI: [[VSHR_N:%.*]] = lshr <16 x i8> %a, +// NYI: ret <16 x i8> [[VSHR_N]] +// uint8x16_t test_vshrq_n_u8(uint8x16_t a) { +// return vshrq_n_u8(a, 3); +// } + +// NYI-LABEL: @test_vshrq_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VSHR_N:%.*]] = lshr <8 x i16> [[TMP1]], +// NYI: ret <8 x i16> [[VSHR_N]] +// uint16x8_t test_vshrq_n_u16(uint16x8_t a) { +// return vshrq_n_u16(a, 3); +// } + +// NYI-LABEL: @test_vshrq_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VSHR_N:%.*]] = lshr <4 x i32> [[TMP1]], +// NYI: ret <4 x i32> [[VSHR_N]] +// uint32x4_t test_vshrq_n_u32(uint32x4_t a) { +// return vshrq_n_u32(a, 3); +// } + +// NYI-LABEL: @test_vshrq_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VSHR_N:%.*]] = lshr <2 x i64> [[TMP1]], +// NYI: ret <2 x i64> [[VSHR_N]] +// uint64x2_t test_vshrq_n_u64(uint64x2_t a) { +// return vshrq_n_u64(a, 3); +// } + +// NYI-LABEL: @test_vsra_n_s8( +// NYI: [[VSRA_N:%.*]] = ashr <8 x i8> %b, +// NYI: [[TMP0:%.*]] = add <8 x i8> %a, [[VSRA_N]] +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vsra_n_s8(int8x8_t a, int8x8_t b) { +// return vsra_n_s8(a, b, 3); +// } + +// NYI-LABEL: @test_vsra_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: [[VSRA_N:%.*]] = ashr <4 x i16> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <4 x i16> [[TMP2]], [[VSRA_N]] +// NYI: ret <4 x i16> [[TMP4]] +// int16x4_t test_vsra_n_s16(int16x4_t a, int16x4_t b) { +// return vsra_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vsra_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> +// NYI: [[VSRA_N:%.*]] = ashr <2 x i32> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <2 x i32> [[TMP2]], [[VSRA_N]] +// NYI: ret <2 x i32> [[TMP4]] +// int32x2_t test_vsra_n_s32(int32x2_t a, int32x2_t b) { +// return vsra_n_s32(a, b, 3); +// } + +// NYI-LABEL: @test_vsraq_n_s8( +// NYI: [[VSRA_N:%.*]] = ashr <16 x i8> %b, +// NYI: [[TMP0:%.*]] = add <16 x i8> %a, [[VSRA_N]] +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vsraq_n_s8(int8x16_t a, int8x16_t b) { +// return vsraq_n_s8(a, b, 3); +// } + +// NYI-LABEL: @test_vsraq_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: [[VSRA_N:%.*]] = ashr <8 x i16> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <8 x i16> [[TMP2]], [[VSRA_N]] +// NYI: ret <8 x i16> [[TMP4]] +// int16x8_t test_vsraq_n_s16(int16x8_t a, int16x8_t b) { +// return vsraq_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vsraq_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> +// NYI: [[VSRA_N:%.*]] = ashr <4 x i32> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <4 x i32> [[TMP2]], [[VSRA_N]] +// NYI: ret <4 x i32> [[TMP4]] +// int32x4_t test_vsraq_n_s32(int32x4_t a, int32x4_t b) { +// return vsraq_n_s32(a, b, 3); +// } + +// NYI-LABEL: @test_vsraq_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: [[VSRA_N:%.*]] = ashr <2 x i64> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <2 x i64> [[TMP2]], [[VSRA_N]] +// NYI: ret <2 x i64> [[TMP4]] +// int64x2_t test_vsraq_n_s64(int64x2_t a, int64x2_t b) { +// return vsraq_n_s64(a, b, 3); +// } + +// NYI-LABEL: @test_vsra_n_u8( +// NYI: [[VSRA_N:%.*]] = lshr <8 x i8> %b, +// NYI: [[TMP0:%.*]] = add <8 x i8> %a, [[VSRA_N]] +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vsra_n_u8(uint8x8_t a, uint8x8_t b) { +// return vsra_n_u8(a, b, 3); +// } + +// NYI-LABEL: @test_vsra_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: [[VSRA_N:%.*]] = lshr <4 x i16> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <4 x i16> [[TMP2]], [[VSRA_N]] +// NYI: ret <4 x i16> [[TMP4]] +// uint16x4_t test_vsra_n_u16(uint16x4_t a, uint16x4_t b) { +// return vsra_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vsra_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> +// NYI: [[VSRA_N:%.*]] = lshr <2 x i32> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <2 x i32> [[TMP2]], [[VSRA_N]] +// NYI: ret <2 x i32> [[TMP4]] +// uint32x2_t test_vsra_n_u32(uint32x2_t a, uint32x2_t b) { +// return vsra_n_u32(a, b, 3); +// } + +// NYI-LABEL: @test_vsraq_n_u8( +// NYI: [[VSRA_N:%.*]] = lshr <16 x i8> %b, +// NYI: [[TMP0:%.*]] = add <16 x i8> %a, [[VSRA_N]] +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vsraq_n_u8(uint8x16_t a, uint8x16_t b) { +// return vsraq_n_u8(a, b, 3); +// } + +// NYI-LABEL: @test_vsraq_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: [[VSRA_N:%.*]] = lshr <8 x i16> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <8 x i16> [[TMP2]], [[VSRA_N]] +// NYI: ret <8 x i16> [[TMP4]] +// uint16x8_t test_vsraq_n_u16(uint16x8_t a, uint16x8_t b) { +// return vsraq_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vsraq_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> +// NYI: [[VSRA_N:%.*]] = lshr <4 x i32> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <4 x i32> [[TMP2]], [[VSRA_N]] +// NYI: ret <4 x i32> [[TMP4]] +// uint32x4_t test_vsraq_n_u32(uint32x4_t a, uint32x4_t b) { +// return vsraq_n_u32(a, b, 3); +// } + +// NYI-LABEL: @test_vsraq_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: [[VSRA_N:%.*]] = lshr <2 x i64> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <2 x i64> [[TMP2]], [[VSRA_N]] +// NYI: ret <2 x i64> [[TMP4]] +// uint64x2_t test_vsraq_n_u64(uint64x2_t a, uint64x2_t b) { +// return vsraq_n_u64(a, b, 3); +// } + +// NYI-LABEL: @test_vrshr_n_s8( +// NYI: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %a, <8 x i8> ) +// NYI: ret <8 x i8> [[VRSHR_N]] +// int8x8_t test_vrshr_n_s8(int8x8_t a) { +// return vrshr_n_s8(a, 3); +// } + +// NYI-LABEL: @test_vrshr_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> ) +// NYI: ret <4 x i16> [[VRSHR_N1]] +// int16x4_t test_vrshr_n_s16(int16x4_t a) { +// return vrshr_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vrshr_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> ) +// NYI: ret <2 x i32> [[VRSHR_N1]] +// int32x2_t test_vrshr_n_s32(int32x2_t a) { +// return vrshr_n_s32(a, 3); +// } + +// NYI-LABEL: @test_vrshrq_n_s8( +// NYI: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %a, <16 x i8> ) +// NYI: ret <16 x i8> [[VRSHR_N]] +// int8x16_t test_vrshrq_n_s8(int8x16_t a) { +// return vrshrq_n_s8(a, 3); +// } + +// NYI-LABEL: @test_vrshrq_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> ) +// NYI: ret <8 x i16> [[VRSHR_N1]] +// int16x8_t test_vrshrq_n_s16(int16x8_t a) { +// return vrshrq_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vrshrq_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> ) +// NYI: ret <4 x i32> [[VRSHR_N1]] +// int32x4_t test_vrshrq_n_s32(int32x4_t a) { +// return vrshrq_n_s32(a, 3); +// } + +// NYI-LABEL: @test_vrshrq_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> ) +// NYI: ret <2 x i64> [[VRSHR_N1]] +// int64x2_t test_vrshrq_n_s64(int64x2_t a) { +// return vrshrq_n_s64(a, 3); +// } + +// NYI-LABEL: @test_vrshr_n_u8( +// NYI: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %a, <8 x i8> ) +// NYI: ret <8 x i8> [[VRSHR_N]] +// uint8x8_t test_vrshr_n_u8(uint8x8_t a) { +// return vrshr_n_u8(a, 3); +// } + +// NYI-LABEL: @test_vrshr_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> ) +// NYI: ret <4 x i16> [[VRSHR_N1]] +// uint16x4_t test_vrshr_n_u16(uint16x4_t a) { +// return vrshr_n_u16(a, 3); +// } + +// NYI-LABEL: @test_vrshr_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> ) +// NYI: ret <2 x i32> [[VRSHR_N1]] +// uint32x2_t test_vrshr_n_u32(uint32x2_t a) { +// return vrshr_n_u32(a, 3); +// } + +// NYI-LABEL: @test_vrshrq_n_u8( +// NYI: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %a, <16 x i8> ) +// NYI: ret <16 x i8> [[VRSHR_N]] +// uint8x16_t test_vrshrq_n_u8(uint8x16_t a) { +// return vrshrq_n_u8(a, 3); +// } + +// NYI-LABEL: @test_vrshrq_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> ) +// NYI: ret <8 x i16> [[VRSHR_N1]] +// uint16x8_t test_vrshrq_n_u16(uint16x8_t a) { +// return vrshrq_n_u16(a, 3); +// } + +// NYI-LABEL: @test_vrshrq_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> ) +// NYI: ret <4 x i32> [[VRSHR_N1]] +// uint32x4_t test_vrshrq_n_u32(uint32x4_t a) { +// return vrshrq_n_u32(a, 3); +// } + +// NYI-LABEL: @test_vrshrq_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> ) +// NYI: ret <2 x i64> [[VRSHR_N1]] +// uint64x2_t test_vrshrq_n_u64(uint64x2_t a) { +// return vrshrq_n_u64(a, 3); +// } + +// NYI-LABEL: @test_vrsra_n_s8( +// NYI: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %b, <8 x i8> ) +// NYI: [[TMP0:%.*]] = add <8 x i8> %a, [[VRSHR_N]] +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vrsra_n_s8(int8x8_t a, int8x8_t b) { +// return vrsra_n_s8(a, b, 3); +// } + +// NYI-LABEL: @test_vrsra_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> ) +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[TMP3:%.*]] = add <4 x i16> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <4 x i16> [[TMP3]] +// int16x4_t test_vrsra_n_s16(int16x4_t a, int16x4_t b) { +// return vrsra_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vrsra_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> +// NYI: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> ) +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <2 x i32> [[TMP3]] +// int32x2_t test_vrsra_n_s32(int32x2_t a, int32x2_t b) { +// return vrsra_n_s32(a, b, 3); +// } + +// NYI-LABEL: @test_vrsraq_n_s8( +// NYI: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %b, <16 x i8> ) +// NYI: [[TMP0:%.*]] = add <16 x i8> %a, [[VRSHR_N]] +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vrsraq_n_s8(int8x16_t a, int8x16_t b) { +// return vrsraq_n_s8(a, b, 3); +// } + +// NYI-LABEL: @test_vrsraq_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> ) +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[TMP3:%.*]] = add <8 x i16> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <8 x i16> [[TMP3]] +// int16x8_t test_vrsraq_n_s16(int16x8_t a, int16x8_t b) { +// return vrsraq_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vrsraq_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> +// NYI: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> ) +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[TMP3:%.*]] = add <4 x i32> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <4 x i32> [[TMP3]] +// int32x4_t test_vrsraq_n_s32(int32x4_t a, int32x4_t b) { +// return vrsraq_n_s32(a, b, 3); +// } + +// NYI-LABEL: @test_vrsraq_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> ) +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[TMP3:%.*]] = add <2 x i64> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <2 x i64> [[TMP3]] +// int64x2_t test_vrsraq_n_s64(int64x2_t a, int64x2_t b) { +// return vrsraq_n_s64(a, b, 3); +// } + +// NYI-LABEL: @test_vrsra_n_u8( +// NYI: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %b, <8 x i8> ) +// NYI: [[TMP0:%.*]] = add <8 x i8> %a, [[VRSHR_N]] +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vrsra_n_u8(uint8x8_t a, uint8x8_t b) { +// return vrsra_n_u8(a, b, 3); +// } + +// NYI-LABEL: @test_vrsra_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> ) +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[TMP3:%.*]] = add <4 x i16> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <4 x i16> [[TMP3]] +// uint16x4_t test_vrsra_n_u16(uint16x4_t a, uint16x4_t b) { +// return vrsra_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vrsra_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> +// NYI: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> ) +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <2 x i32> [[TMP3]] +// uint32x2_t test_vrsra_n_u32(uint32x2_t a, uint32x2_t b) { +// return vrsra_n_u32(a, b, 3); +// } + +// NYI-LABEL: @test_vrsraq_n_u8( +// NYI: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %b, <16 x i8> ) +// NYI: [[TMP0:%.*]] = add <16 x i8> %a, [[VRSHR_N]] +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vrsraq_n_u8(uint8x16_t a, uint8x16_t b) { +// return vrsraq_n_u8(a, b, 3); +// } + +// NYI-LABEL: @test_vrsraq_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> ) +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[TMP3:%.*]] = add <8 x i16> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <8 x i16> [[TMP3]] +// uint16x8_t test_vrsraq_n_u16(uint16x8_t a, uint16x8_t b) { +// return vrsraq_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vrsraq_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> +// NYI: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> ) +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[TMP3:%.*]] = add <4 x i32> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <4 x i32> [[TMP3]] +// uint32x4_t test_vrsraq_n_u32(uint32x4_t a, uint32x4_t b) { +// return vrsraq_n_u32(a, b, 3); +// } + +// NYI-LABEL: @test_vrsraq_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> ) +// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[TMP3:%.*]] = add <2 x i64> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <2 x i64> [[TMP3]] +// uint64x2_t test_vrsraq_n_u64(uint64x2_t a, uint64x2_t b) { +// return vrsraq_n_u64(a, b, 3); +// } + +// NYI-LABEL: @test_vsri_n_s8( +// NYI: [[VSRI_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.vsri.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3) +// NYI: ret <8 x i8> [[VSRI_N]] +// int8x8_t test_vsri_n_s8(int8x8_t a, int8x8_t b) { +// return vsri_n_s8(a, b, 3); +// } + +// NYI-LABEL: @test_vsri_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VSRI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: [[VSRI_N2:%.*]] = call <4 x i16> @llvm.aarch64.neon.vsri.v4i16(<4 x i16> [[VSRI_N]], <4 x i16> [[VSRI_N1]], i32 3) +// NYI: ret <4 x i16> [[VSRI_N2]] +// int16x4_t test_vsri_n_s16(int16x4_t a, int16x4_t b) { +// return vsri_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vsri_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VSRI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> +// NYI: [[VSRI_N2:%.*]] = call <2 x i32> @llvm.aarch64.neon.vsri.v2i32(<2 x i32> [[VSRI_N]], <2 x i32> [[VSRI_N1]], i32 3) +// NYI: ret <2 x i32> [[VSRI_N2]] +// int32x2_t test_vsri_n_s32(int32x2_t a, int32x2_t b) { +// return vsri_n_s32(a, b, 3); +// } + +// NYI-LABEL: @test_vsriq_n_s8( +// NYI: [[VSRI_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.vsri.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3) +// NYI: ret <16 x i8> [[VSRI_N]] +// int8x16_t test_vsriq_n_s8(int8x16_t a, int8x16_t b) { +// return vsriq_n_s8(a, b, 3); +// } + +// NYI-LABEL: @test_vsriq_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VSRI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: [[VSRI_N2:%.*]] = call <8 x i16> @llvm.aarch64.neon.vsri.v8i16(<8 x i16> [[VSRI_N]], <8 x i16> [[VSRI_N1]], i32 3) +// NYI: ret <8 x i16> [[VSRI_N2]] +// int16x8_t test_vsriq_n_s16(int16x8_t a, int16x8_t b) { +// return vsriq_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vsriq_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VSRI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> +// NYI: [[VSRI_N2:%.*]] = call <4 x i32> @llvm.aarch64.neon.vsri.v4i32(<4 x i32> [[VSRI_N]], <4 x i32> [[VSRI_N1]], i32 3) +// NYI: ret <4 x i32> [[VSRI_N2]] +// int32x4_t test_vsriq_n_s32(int32x4_t a, int32x4_t b) { +// return vsriq_n_s32(a, b, 3); +// } + +// NYI-LABEL: @test_vsriq_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VSRI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: [[VSRI_N2:%.*]] = call <2 x i64> @llvm.aarch64.neon.vsri.v2i64(<2 x i64> [[VSRI_N]], <2 x i64> [[VSRI_N1]], i32 3) +// NYI: ret <2 x i64> [[VSRI_N2]] +// int64x2_t test_vsriq_n_s64(int64x2_t a, int64x2_t b) { +// return vsriq_n_s64(a, b, 3); +// } + +// NYI-LABEL: @test_vsri_n_u8( +// NYI: [[VSRI_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.vsri.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3) +// NYI: ret <8 x i8> [[VSRI_N]] +// uint8x8_t test_vsri_n_u8(uint8x8_t a, uint8x8_t b) { +// return vsri_n_u8(a, b, 3); +// } + +// NYI-LABEL: @test_vsri_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VSRI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: [[VSRI_N2:%.*]] = call <4 x i16> @llvm.aarch64.neon.vsri.v4i16(<4 x i16> [[VSRI_N]], <4 x i16> [[VSRI_N1]], i32 3) +// NYI: ret <4 x i16> [[VSRI_N2]] +// uint16x4_t test_vsri_n_u16(uint16x4_t a, uint16x4_t b) { +// return vsri_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vsri_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VSRI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> +// NYI: [[VSRI_N2:%.*]] = call <2 x i32> @llvm.aarch64.neon.vsri.v2i32(<2 x i32> [[VSRI_N]], <2 x i32> [[VSRI_N1]], i32 3) +// NYI: ret <2 x i32> [[VSRI_N2]] +// uint32x2_t test_vsri_n_u32(uint32x2_t a, uint32x2_t b) { +// return vsri_n_u32(a, b, 3); +// } + +// NYI-LABEL: @test_vsriq_n_u8( +// NYI: [[VSRI_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.vsri.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3) +// NYI: ret <16 x i8> [[VSRI_N]] +// uint8x16_t test_vsriq_n_u8(uint8x16_t a, uint8x16_t b) { +// return vsriq_n_u8(a, b, 3); +// } + +// NYI-LABEL: @test_vsriq_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VSRI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: [[VSRI_N2:%.*]] = call <8 x i16> @llvm.aarch64.neon.vsri.v8i16(<8 x i16> [[VSRI_N]], <8 x i16> [[VSRI_N1]], i32 3) +// NYI: ret <8 x i16> [[VSRI_N2]] +// uint16x8_t test_vsriq_n_u16(uint16x8_t a, uint16x8_t b) { +// return vsriq_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vsriq_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VSRI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> +// NYI: [[VSRI_N2:%.*]] = call <4 x i32> @llvm.aarch64.neon.vsri.v4i32(<4 x i32> [[VSRI_N]], <4 x i32> [[VSRI_N1]], i32 3) +// NYI: ret <4 x i32> [[VSRI_N2]] +// uint32x4_t test_vsriq_n_u32(uint32x4_t a, uint32x4_t b) { +// return vsriq_n_u32(a, b, 3); +// } + +// NYI-LABEL: @test_vsriq_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VSRI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: [[VSRI_N2:%.*]] = call <2 x i64> @llvm.aarch64.neon.vsri.v2i64(<2 x i64> [[VSRI_N]], <2 x i64> [[VSRI_N1]], i32 3) +// NYI: ret <2 x i64> [[VSRI_N2]] +// uint64x2_t test_vsriq_n_u64(uint64x2_t a, uint64x2_t b) { +// return vsriq_n_u64(a, b, 3); +// } + +// NYI-LABEL: @test_vsri_n_p8( +// NYI: [[VSRI_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.vsri.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3) +// NYI: ret <8 x i8> [[VSRI_N]] +// poly8x8_t test_vsri_n_p8(poly8x8_t a, poly8x8_t b) { +// return vsri_n_p8(a, b, 3); +// } + +// NYI-LABEL: @test_vsri_n_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VSRI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: [[VSRI_N2:%.*]] = call <4 x i16> @llvm.aarch64.neon.vsri.v4i16(<4 x i16> [[VSRI_N]], <4 x i16> [[VSRI_N1]], i32 15) +// NYI: ret <4 x i16> [[VSRI_N2]] +// poly16x4_t test_vsri_n_p16(poly16x4_t a, poly16x4_t b) { +// return vsri_n_p16(a, b, 15); +// } + +// NYI-LABEL: @test_vsriq_n_p8( +// NYI: [[VSRI_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.vsri.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3) +// NYI: ret <16 x i8> [[VSRI_N]] +// poly8x16_t test_vsriq_n_p8(poly8x16_t a, poly8x16_t b) { +// return vsriq_n_p8(a, b, 3); +// } + +// NYI-LABEL: @test_vsriq_n_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VSRI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: [[VSRI_N2:%.*]] = call <8 x i16> @llvm.aarch64.neon.vsri.v8i16(<8 x i16> [[VSRI_N]], <8 x i16> [[VSRI_N1]], i32 15) +// NYI: ret <8 x i16> [[VSRI_N2]] +// poly16x8_t test_vsriq_n_p16(poly16x8_t a, poly16x8_t b) { +// return vsriq_n_p16(a, b, 15); +// } + +// NYI-LABEL: @test_vsli_n_s8( +// NYI: [[VSLI_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3) +// NYI: ret <8 x i8> [[VSLI_N]] +// int8x8_t test_vsli_n_s8(int8x8_t a, int8x8_t b) { +// return vsli_n_s8(a, b, 3); +// } + +// NYI-LABEL: @test_vsli_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VSLI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: [[VSLI_N2:%.*]] = call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> [[VSLI_N]], <4 x i16> [[VSLI_N1]], i32 3) +// NYI: ret <4 x i16> [[VSLI_N2]] +// int16x4_t test_vsli_n_s16(int16x4_t a, int16x4_t b) { +// return vsli_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vsli_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VSLI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> +// NYI: [[VSLI_N2:%.*]] = call <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32> [[VSLI_N]], <2 x i32> [[VSLI_N1]], i32 3) +// NYI: ret <2 x i32> [[VSLI_N2]] +// int32x2_t test_vsli_n_s32(int32x2_t a, int32x2_t b) { +// return vsli_n_s32(a, b, 3); +// } + +// NYI-LABEL: @test_vsliq_n_s8( +// NYI: [[VSLI_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3) +// NYI: ret <16 x i8> [[VSLI_N]] +// int8x16_t test_vsliq_n_s8(int8x16_t a, int8x16_t b) { +// return vsliq_n_s8(a, b, 3); +// } + +// NYI-LABEL: @test_vsliq_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VSLI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: [[VSLI_N2:%.*]] = call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> [[VSLI_N]], <8 x i16> [[VSLI_N1]], i32 3) +// NYI: ret <8 x i16> [[VSLI_N2]] +// int16x8_t test_vsliq_n_s16(int16x8_t a, int16x8_t b) { +// return vsliq_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vsliq_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VSLI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> +// NYI: [[VSLI_N2:%.*]] = call <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32> [[VSLI_N]], <4 x i32> [[VSLI_N1]], i32 3) +// NYI: ret <4 x i32> [[VSLI_N2]] +// int32x4_t test_vsliq_n_s32(int32x4_t a, int32x4_t b) { +// return vsliq_n_s32(a, b, 3); +// } + +// NYI-LABEL: @test_vsliq_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VSLI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: [[VSLI_N2:%.*]] = call <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64> [[VSLI_N]], <2 x i64> [[VSLI_N1]], i32 3) +// NYI: ret <2 x i64> [[VSLI_N2]] +// int64x2_t test_vsliq_n_s64(int64x2_t a, int64x2_t b) { +// return vsliq_n_s64(a, b, 3); +// } + +// NYI-LABEL: @test_vsli_n_u8( +// NYI: [[VSLI_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3) +// NYI: ret <8 x i8> [[VSLI_N]] +// uint8x8_t test_vsli_n_u8(uint8x8_t a, uint8x8_t b) { +// return vsli_n_u8(a, b, 3); +// } + +// NYI-LABEL: @test_vsli_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VSLI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: [[VSLI_N2:%.*]] = call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> [[VSLI_N]], <4 x i16> [[VSLI_N1]], i32 3) +// NYI: ret <4 x i16> [[VSLI_N2]] +// uint16x4_t test_vsli_n_u16(uint16x4_t a, uint16x4_t b) { +// return vsli_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vsli_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VSLI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> +// NYI: [[VSLI_N2:%.*]] = call <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32> [[VSLI_N]], <2 x i32> [[VSLI_N1]], i32 3) +// NYI: ret <2 x i32> [[VSLI_N2]] +// uint32x2_t test_vsli_n_u32(uint32x2_t a, uint32x2_t b) { +// return vsli_n_u32(a, b, 3); +// } + +// NYI-LABEL: @test_vsliq_n_u8( +// NYI: [[VSLI_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3) +// NYI: ret <16 x i8> [[VSLI_N]] +// uint8x16_t test_vsliq_n_u8(uint8x16_t a, uint8x16_t b) { +// return vsliq_n_u8(a, b, 3); +// } + +// NYI-LABEL: @test_vsliq_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VSLI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: [[VSLI_N2:%.*]] = call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> [[VSLI_N]], <8 x i16> [[VSLI_N1]], i32 3) +// NYI: ret <8 x i16> [[VSLI_N2]] +// uint16x8_t test_vsliq_n_u16(uint16x8_t a, uint16x8_t b) { +// return vsliq_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vsliq_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VSLI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> +// NYI: [[VSLI_N2:%.*]] = call <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32> [[VSLI_N]], <4 x i32> [[VSLI_N1]], i32 3) +// NYI: ret <4 x i32> [[VSLI_N2]] +// uint32x4_t test_vsliq_n_u32(uint32x4_t a, uint32x4_t b) { +// return vsliq_n_u32(a, b, 3); +// } + +// NYI-LABEL: @test_vsliq_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VSLI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: [[VSLI_N2:%.*]] = call <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64> [[VSLI_N]], <2 x i64> [[VSLI_N1]], i32 3) +// NYI: ret <2 x i64> [[VSLI_N2]] +// uint64x2_t test_vsliq_n_u64(uint64x2_t a, uint64x2_t b) { +// return vsliq_n_u64(a, b, 3); +// } + +// NYI-LABEL: @test_vsli_n_p8( +// NYI: [[VSLI_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3) +// NYI: ret <8 x i8> [[VSLI_N]] +// poly8x8_t test_vsli_n_p8(poly8x8_t a, poly8x8_t b) { +// return vsli_n_p8(a, b, 3); +// } + +// NYI-LABEL: @test_vsli_n_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VSLI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: [[VSLI_N2:%.*]] = call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> [[VSLI_N]], <4 x i16> [[VSLI_N1]], i32 15) +// NYI: ret <4 x i16> [[VSLI_N2]] +// poly16x4_t test_vsli_n_p16(poly16x4_t a, poly16x4_t b) { +// return vsli_n_p16(a, b, 15); +// } + +// NYI-LABEL: @test_vsliq_n_p8( +// NYI: [[VSLI_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3) +// NYI: ret <16 x i8> [[VSLI_N]] +// poly8x16_t test_vsliq_n_p8(poly8x16_t a, poly8x16_t b) { +// return vsliq_n_p8(a, b, 3); +// } + +// NYI-LABEL: @test_vsliq_n_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VSLI_N1:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: [[VSLI_N2:%.*]] = call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> [[VSLI_N]], <8 x i16> [[VSLI_N1]], i32 15) +// NYI: ret <8 x i16> [[VSLI_N2]] +// poly16x8_t test_vsliq_n_p16(poly16x8_t a, poly16x8_t b) { +// return vsliq_n_p16(a, b, 15); +// } + +// NYI-LABEL: @test_vqshlu_n_s8( +// NYI: [[VQSHLU_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8> %a, <8 x i8> ) +// NYI: ret <8 x i8> [[VQSHLU_N]] +// uint8x8_t test_vqshlu_n_s8(int8x8_t a) { +// return vqshlu_n_s8(a, 3); +// } + +// NYI-LABEL: @test_vqshlu_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[VQSHLU_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VQSHLU_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16> [[VQSHLU_N]], <4 x i16> ) +// NYI: ret <4 x i16> [[VQSHLU_N1]] +// uint16x4_t test_vqshlu_n_s16(int16x4_t a) { +// return vqshlu_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vqshlu_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VQSHLU_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VQSHLU_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32> [[VQSHLU_N]], <2 x i32> ) +// NYI: ret <2 x i32> [[VQSHLU_N1]] +// uint32x2_t test_vqshlu_n_s32(int32x2_t a) { +// return vqshlu_n_s32(a, 3); +// } + +// NYI-LABEL: @test_vqshluq_n_s8( +// NYI: [[VQSHLU_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8> %a, <16 x i8> ) +// NYI: ret <16 x i8> [[VQSHLU_N]] +// uint8x16_t test_vqshluq_n_s8(int8x16_t a) { +// return vqshluq_n_s8(a, 3); +// } + +// NYI-LABEL: @test_vqshluq_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VQSHLU_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQSHLU_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16> [[VQSHLU_N]], <8 x i16> ) +// NYI: ret <8 x i16> [[VQSHLU_N1]] +// uint16x8_t test_vqshluq_n_s16(int16x8_t a) { +// return vqshluq_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vqshluq_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VQSHLU_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQSHLU_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32> [[VQSHLU_N]], <4 x i32> ) +// NYI: ret <4 x i32> [[VQSHLU_N1]] +// uint32x4_t test_vqshluq_n_s32(int32x4_t a) { +// return vqshluq_n_s32(a, 3); +// } + +// NYI-LABEL: @test_vqshluq_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VQSHLU_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQSHLU_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64> [[VQSHLU_N]], <2 x i64> ) +// NYI: ret <2 x i64> [[VQSHLU_N1]] +// uint64x2_t test_vqshluq_n_s64(int64x2_t a) { +// return vqshluq_n_s64(a, 3); +// } + +// NYI-LABEL: @test_vshrn_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[TMP2:%.*]] = ashr <8 x i16> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <8 x i16> [[TMP2]] to <8 x i8> +// NYI: ret <8 x i8> [[VSHRN_N]] +// int8x8_t test_vshrn_n_s16(int16x8_t a) { +// return vshrn_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vshrn_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[TMP2:%.*]] = ashr <4 x i32> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16> +// NYI: ret <4 x i16> [[VSHRN_N]] +// int16x4_t test_vshrn_n_s32(int32x4_t a) { +// return vshrn_n_s32(a, 9); +// } + +// NYI-LABEL: @test_vshrn_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[TMP2:%.*]] = ashr <2 x i64> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <2 x i64> [[TMP2]] to <2 x i32> +// NYI: ret <2 x i32> [[VSHRN_N]] +// int32x2_t test_vshrn_n_s64(int64x2_t a) { +// return vshrn_n_s64(a, 19); +// } + +// NYI-LABEL: @test_vshrn_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[TMP2:%.*]] = lshr <8 x i16> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <8 x i16> [[TMP2]] to <8 x i8> +// NYI: ret <8 x i8> [[VSHRN_N]] +// uint8x8_t test_vshrn_n_u16(uint16x8_t a) { +// return vshrn_n_u16(a, 3); +// } + +// NYI-LABEL: @test_vshrn_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[TMP2:%.*]] = lshr <4 x i32> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16> +// NYI: ret <4 x i16> [[VSHRN_N]] +// uint16x4_t test_vshrn_n_u32(uint32x4_t a) { +// return vshrn_n_u32(a, 9); +// } + +// NYI-LABEL: @test_vshrn_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[TMP2:%.*]] = lshr <2 x i64> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <2 x i64> [[TMP2]] to <2 x i32> +// NYI: ret <2 x i32> [[VSHRN_N]] +// uint32x2_t test_vshrn_n_u64(uint64x2_t a) { +// return vshrn_n_u64(a, 19); +// } + +// NYI-LABEL: @test_vshrn_high_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[TMP2:%.*]] = ashr <8 x i16> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <8 x i16> [[TMP2]] to <8 x i8> +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VSHRN_N]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I]] +// int8x16_t test_vshrn_high_n_s16(int8x8_t a, int16x8_t b) { +// return vshrn_high_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vshrn_high_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[TMP2:%.*]] = ashr <4 x i32> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16> +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VSHRN_N]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I]] +// int16x8_t test_vshrn_high_n_s32(int16x4_t a, int32x4_t b) { +// return vshrn_high_n_s32(a, b, 9); +// } + +// NYI-LABEL: @test_vshrn_high_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[TMP2:%.*]] = ashr <2 x i64> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <2 x i64> [[TMP2]] to <2 x i32> +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VSHRN_N]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I]] +// int32x4_t test_vshrn_high_n_s64(int32x2_t a, int64x2_t b) { +// return vshrn_high_n_s64(a, b, 19); +// } + +// NYI-LABEL: @test_vshrn_high_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[TMP2:%.*]] = lshr <8 x i16> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <8 x i16> [[TMP2]] to <8 x i8> +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VSHRN_N]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I]] +// uint8x16_t test_vshrn_high_n_u16(uint8x8_t a, uint16x8_t b) { +// return vshrn_high_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vshrn_high_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[TMP2:%.*]] = lshr <4 x i32> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16> +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VSHRN_N]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I]] +// uint16x8_t test_vshrn_high_n_u32(uint16x4_t a, uint32x4_t b) { +// return vshrn_high_n_u32(a, b, 9); +// } + +// NYI-LABEL: @test_vshrn_high_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[TMP2:%.*]] = lshr <2 x i64> [[TMP1]], +// NYI: [[VSHRN_N:%.*]] = trunc <2 x i64> [[TMP2]] to <2 x i32> +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VSHRN_N]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I]] +// uint32x4_t test_vshrn_high_n_u64(uint32x2_t a, uint64x2_t b) { +// return vshrn_high_n_u64(a, b, 19); +// } + +// NYI-LABEL: @test_vqshrun_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VQSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQSHRUN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> [[VQSHRUN_N]], i32 3) +// NYI: ret <8 x i8> [[VQSHRUN_N1]] +// uint8x8_t test_vqshrun_n_s16(int16x8_t a) { +// return vqshrun_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vqshrun_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VQSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQSHRUN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> [[VQSHRUN_N]], i32 9) +// NYI: ret <4 x i16> [[VQSHRUN_N1]] +// uint16x4_t test_vqshrun_n_s32(int32x4_t a) { +// return vqshrun_n_s32(a, 9); +// } + +// NYI-LABEL: @test_vqshrun_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VQSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQSHRUN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> [[VQSHRUN_N]], i32 19) +// NYI: ret <2 x i32> [[VQSHRUN_N1]] +// uint32x2_t test_vqshrun_n_s64(int64x2_t a) { +// return vqshrun_n_s64(a, 19); +// } + +// NYI-LABEL: @test_vqshrun_high_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQSHRUN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> [[VQSHRUN_N]], i32 3) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VQSHRUN_N1]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I]] +// int8x16_t test_vqshrun_high_n_s16(int8x8_t a, int16x8_t b) { +// return vqshrun_high_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vqshrun_high_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQSHRUN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> [[VQSHRUN_N]], i32 9) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VQSHRUN_N1]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I]] +// int16x8_t test_vqshrun_high_n_s32(int16x4_t a, int32x4_t b) { +// return vqshrun_high_n_s32(a, b, 9); +// } + +// NYI-LABEL: @test_vqshrun_high_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQSHRUN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> [[VQSHRUN_N]], i32 19) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VQSHRUN_N1]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I]] +// int32x4_t test_vqshrun_high_n_s64(int32x2_t a, int64x2_t b) { +// return vqshrun_high_n_s64(a, b, 19); +// } + +// NYI-LABEL: @test_vrshrn_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> [[VRSHRN_N]], i32 3) +// NYI: ret <8 x i8> [[VRSHRN_N1]] +// int8x8_t test_vrshrn_n_s16(int16x8_t a) { +// return vrshrn_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vrshrn_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> [[VRSHRN_N]], i32 9) +// NYI: ret <4 x i16> [[VRSHRN_N1]] +// int16x4_t test_vrshrn_n_s32(int32x4_t a) { +// return vrshrn_n_s32(a, 9); +// } + +// NYI-LABEL: @test_vrshrn_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> [[VRSHRN_N]], i32 19) +// NYI: ret <2 x i32> [[VRSHRN_N1]] +// int32x2_t test_vrshrn_n_s64(int64x2_t a) { +// return vrshrn_n_s64(a, 19); +// } + +// NYI-LABEL: @test_vrshrn_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> [[VRSHRN_N]], i32 3) +// NYI: ret <8 x i8> [[VRSHRN_N1]] +// uint8x8_t test_vrshrn_n_u16(uint16x8_t a) { +// return vrshrn_n_u16(a, 3); +// } + +// NYI-LABEL: @test_vrshrn_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> [[VRSHRN_N]], i32 9) +// NYI: ret <4 x i16> [[VRSHRN_N1]] +// uint16x4_t test_vrshrn_n_u32(uint32x4_t a) { +// return vrshrn_n_u32(a, 9); +// } + +// NYI-LABEL: @test_vrshrn_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> [[VRSHRN_N]], i32 19) +// NYI: ret <2 x i32> [[VRSHRN_N1]] +// uint32x2_t test_vrshrn_n_u64(uint64x2_t a) { +// return vrshrn_n_u64(a, 19); +// } + +// NYI-LABEL: @test_vrshrn_high_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> [[VRSHRN_N]], i32 3) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VRSHRN_N1]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I]] +// int8x16_t test_vrshrn_high_n_s16(int8x8_t a, int16x8_t b) { +// return vrshrn_high_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vrshrn_high_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> [[VRSHRN_N]], i32 9) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VRSHRN_N1]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I]] +// int16x8_t test_vrshrn_high_n_s32(int16x4_t a, int32x4_t b) { +// return vrshrn_high_n_s32(a, b, 9); +// } + +// NYI-LABEL: @test_vrshrn_high_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> [[VRSHRN_N]], i32 19) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VRSHRN_N1]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I]] +// int32x4_t test_vrshrn_high_n_s64(int32x2_t a, int64x2_t b) { +// return vrshrn_high_n_s64(a, b, 19); +// } + +// NYI-LABEL: @test_vrshrn_high_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> [[VRSHRN_N]], i32 3) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VRSHRN_N1]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I]] +// uint8x16_t test_vrshrn_high_n_u16(uint8x8_t a, uint16x8_t b) { +// return vrshrn_high_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vrshrn_high_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> [[VRSHRN_N]], i32 9) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VRSHRN_N1]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I]] +// uint16x8_t test_vrshrn_high_n_u32(uint16x4_t a, uint32x4_t b) { +// return vrshrn_high_n_u32(a, b, 9); +// } + +// NYI-LABEL: @test_vrshrn_high_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> [[VRSHRN_N]], i32 19) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VRSHRN_N1]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I]] +// uint32x4_t test_vrshrn_high_n_u64(uint32x2_t a, uint64x2_t b) { +// return vrshrn_high_n_u64(a, b, 19); +// } + +// NYI-LABEL: @test_vqrshrun_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQRSHRUN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> [[VQRSHRUN_N]], i32 3) +// NYI: ret <8 x i8> [[VQRSHRUN_N1]] +// uint8x8_t test_vqrshrun_n_s16(int16x8_t a) { +// return vqrshrun_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vqrshrun_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQRSHRUN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> [[VQRSHRUN_N]], i32 9) +// NYI: ret <4 x i16> [[VQRSHRUN_N1]] +// uint16x4_t test_vqrshrun_n_s32(int32x4_t a) { +// return vqrshrun_n_s32(a, 9); +// } + +// NYI-LABEL: @test_vqrshrun_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQRSHRUN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> [[VQRSHRUN_N]], i32 19) +// NYI: ret <2 x i32> [[VQRSHRUN_N1]] +// uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { +// return vqrshrun_n_s64(a, 19); +// } + +// NYI-LABEL: @test_vqrshrun_high_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQRSHRUN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> [[VQRSHRUN_N]], i32 3) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VQRSHRUN_N1]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I]] +// int8x16_t test_vqrshrun_high_n_s16(int8x8_t a, int16x8_t b) { +// return vqrshrun_high_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vqrshrun_high_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQRSHRUN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> [[VQRSHRUN_N]], i32 9) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VQRSHRUN_N1]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I]] +// int16x8_t test_vqrshrun_high_n_s32(int16x4_t a, int32x4_t b) { +// return vqrshrun_high_n_s32(a, b, 9); +// } + +// NYI-LABEL: @test_vqrshrun_high_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQRSHRUN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> [[VQRSHRUN_N]], i32 19) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VQRSHRUN_N1]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I]] +// int32x4_t test_vqrshrun_high_n_s64(int32x2_t a, int64x2_t b) { +// return vqrshrun_high_n_s64(a, b, 19); +// } + +// NYI-LABEL: @test_vqshrn_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> [[VQSHRN_N]], i32 3) +// NYI: ret <8 x i8> [[VQSHRN_N1]] +// int8x8_t test_vqshrn_n_s16(int16x8_t a) { +// return vqshrn_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vqshrn_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> [[VQSHRN_N]], i32 9) +// NYI: ret <4 x i16> [[VQSHRN_N1]] +// int16x4_t test_vqshrn_n_s32(int32x4_t a) { +// return vqshrn_n_s32(a, 9); +// } + +// NYI-LABEL: @test_vqshrn_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> [[VQSHRN_N]], i32 19) +// NYI: ret <2 x i32> [[VQSHRN_N1]] +// int32x2_t test_vqshrn_n_s64(int64x2_t a) { +// return vqshrn_n_s64(a, 19); +// } + +// NYI-LABEL: @test_vqshrn_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> [[VQSHRN_N]], i32 3) +// NYI: ret <8 x i8> [[VQSHRN_N1]] +// uint8x8_t test_vqshrn_n_u16(uint16x8_t a) { +// return vqshrn_n_u16(a, 3); +// } + +// NYI-LABEL: @test_vqshrn_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> [[VQSHRN_N]], i32 9) +// NYI: ret <4 x i16> [[VQSHRN_N1]] +// uint16x4_t test_vqshrn_n_u32(uint32x4_t a) { +// return vqshrn_n_u32(a, 9); +// } + +// NYI-LABEL: @test_vqshrn_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> [[VQSHRN_N]], i32 19) +// NYI: ret <2 x i32> [[VQSHRN_N1]] +// uint32x2_t test_vqshrn_n_u64(uint64x2_t a) { +// return vqshrn_n_u64(a, 19); +// } + +// NYI-LABEL: @test_vqshrn_high_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> [[VQSHRN_N]], i32 3) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VQSHRN_N1]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I]] +// int8x16_t test_vqshrn_high_n_s16(int8x8_t a, int16x8_t b) { +// return vqshrn_high_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vqshrn_high_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> [[VQSHRN_N]], i32 9) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VQSHRN_N1]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I]] +// int16x8_t test_vqshrn_high_n_s32(int16x4_t a, int32x4_t b) { +// return vqshrn_high_n_s32(a, b, 9); +// } + +// NYI-LABEL: @test_vqshrn_high_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> [[VQSHRN_N]], i32 19) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VQSHRN_N1]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I]] +// int32x4_t test_vqshrn_high_n_s64(int32x2_t a, int64x2_t b) { +// return vqshrn_high_n_s64(a, b, 19); +// } + +// NYI-LABEL: @test_vqshrn_high_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> [[VQSHRN_N]], i32 3) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VQSHRN_N1]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I]] +// uint8x16_t test_vqshrn_high_n_u16(uint8x8_t a, uint16x8_t b) { +// return vqshrn_high_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vqshrn_high_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> [[VQSHRN_N]], i32 9) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VQSHRN_N1]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I]] +// uint16x8_t test_vqshrn_high_n_u32(uint16x4_t a, uint32x4_t b) { +// return vqshrn_high_n_u32(a, b, 9); +// } + +// NYI-LABEL: @test_vqshrn_high_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> [[VQSHRN_N]], i32 19) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VQSHRN_N1]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I]] +// uint32x4_t test_vqshrn_high_n_u64(uint32x2_t a, uint64x2_t b) { +// return vqshrn_high_n_u64(a, b, 19); +// } + +// NYI-LABEL: @test_vqrshrn_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> [[VQRSHRN_N]], i32 3) +// NYI: ret <8 x i8> [[VQRSHRN_N1]] +// int8x8_t test_vqrshrn_n_s16(int16x8_t a) { +// return vqrshrn_n_s16(a, 3); +// } + +// NYI-LABEL: @test_vqrshrn_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> [[VQRSHRN_N]], i32 9) +// NYI: ret <4 x i16> [[VQRSHRN_N1]] +// int16x4_t test_vqrshrn_n_s32(int32x4_t a) { +// return vqrshrn_n_s32(a, 9); +// } + +// NYI-LABEL: @test_vqrshrn_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> [[VQRSHRN_N]], i32 19) +// NYI: ret <2 x i32> [[VQRSHRN_N1]] +// int32x2_t test_vqrshrn_n_s64(int64x2_t a) { +// return vqrshrn_n_s64(a, 19); +// } + +// NYI-LABEL: @test_vqrshrn_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> [[VQRSHRN_N]], i32 3) +// NYI: ret <8 x i8> [[VQRSHRN_N1]] +// uint8x8_t test_vqrshrn_n_u16(uint16x8_t a) { +// return vqrshrn_n_u16(a, 3); +// } + +// NYI-LABEL: @test_vqrshrn_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> [[VQRSHRN_N]], i32 9) +// NYI: ret <4 x i16> [[VQRSHRN_N1]] +// uint16x4_t test_vqrshrn_n_u32(uint32x4_t a) { +// return vqrshrn_n_u32(a, 9); +// } + +// NYI-LABEL: @test_vqrshrn_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> [[VQRSHRN_N]], i32 19) +// NYI: ret <2 x i32> [[VQRSHRN_N1]] +// uint32x2_t test_vqrshrn_n_u64(uint64x2_t a) { +// return vqrshrn_n_u64(a, 19); +// } + +// NYI-LABEL: @test_vqrshrn_high_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> [[VQRSHRN_N]], i32 3) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VQRSHRN_N1]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I]] +// int8x16_t test_vqrshrn_high_n_s16(int8x8_t a, int16x8_t b) { +// return vqrshrn_high_n_s16(a, b, 3); +// } + +// NYI-LABEL: @test_vqrshrn_high_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> [[VQRSHRN_N]], i32 9) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VQRSHRN_N1]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I]] +// int16x8_t test_vqrshrn_high_n_s32(int16x4_t a, int32x4_t b) { +// return vqrshrn_high_n_s32(a, b, 9); +// } + +// NYI-LABEL: @test_vqrshrn_high_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> [[VQRSHRN_N]], i32 19) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VQRSHRN_N1]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I]] +// int32x4_t test_vqrshrn_high_n_s64(int32x2_t a, int64x2_t b) { +// return vqrshrn_high_n_s64(a, b, 19); +// } + +// NYI-LABEL: @test_vqrshrn_high_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> [[VQRSHRN_N]], i32 3) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> [[VQRSHRN_N1]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I]] +// uint8x16_t test_vqrshrn_high_n_u16(uint8x8_t a, uint16x8_t b) { +// return vqrshrn_high_n_u16(a, b, 3); +// } + +// NYI-LABEL: @test_vqrshrn_high_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> [[VQRSHRN_N]], i32 9) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> [[VQRSHRN_N1]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I]] +// uint16x8_t test_vqrshrn_high_n_u32(uint16x4_t a, uint32x4_t b) { +// return vqrshrn_high_n_u32(a, b, 9); +// } + +// NYI-LABEL: @test_vqrshrn_high_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> [[VQRSHRN_N]], i32 19) +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> [[VQRSHRN_N1]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I]] +// uint32x4_t test_vqrshrn_high_n_u64(uint32x2_t a, uint64x2_t b) { +// return vqrshrn_high_n_u64(a, b, 19); +// } + +// NYI-LABEL: @test_vshll_n_s8( +// NYI: [[TMP0:%.*]] = sext <8 x i8> %a to <8 x i16> +// NYI: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], +// NYI: ret <8 x i16> [[VSHLL_N]] +// int16x8_t test_vshll_n_s8(int8x8_t a) { +// return vshll_n_s8(a, 3); +// } + +// NYI-LABEL: @test_vshll_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i32> +// NYI: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], +// NYI: ret <4 x i32> [[VSHLL_N]] +// int32x4_t test_vshll_n_s16(int16x4_t a) { +// return vshll_n_s16(a, 9); +// } + +// NYI-LABEL: @test_vshll_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[TMP2:%.*]] = sext <2 x i32> [[TMP1]] to <2 x i64> +// NYI: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], +// NYI: ret <2 x i64> [[VSHLL_N]] +// int64x2_t test_vshll_n_s32(int32x2_t a) { +// return vshll_n_s32(a, 19); +// } + +// NYI-LABEL: @test_vshll_n_u8( +// NYI: [[TMP0:%.*]] = zext <8 x i8> %a to <8 x i16> +// NYI: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], +// NYI: ret <8 x i16> [[VSHLL_N]] +// uint16x8_t test_vshll_n_u8(uint8x8_t a) { +// return vshll_n_u8(a, 3); +// } + +// NYI-LABEL: @test_vshll_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32> +// NYI: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], +// NYI: ret <4 x i32> [[VSHLL_N]] +// uint32x4_t test_vshll_n_u16(uint16x4_t a) { +// return vshll_n_u16(a, 9); +// } + +// NYI-LABEL: @test_vshll_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64> +// NYI: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], +// NYI: ret <2 x i64> [[VSHLL_N]] +// uint64x2_t test_vshll_n_u32(uint32x2_t a) { +// return vshll_n_u32(a, 19); +// } + +// NYI-LABEL: @test_vshll_high_n_s8( +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[TMP0:%.*]] = sext <8 x i8> [[SHUFFLE_I]] to <8 x i16> +// NYI: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], +// NYI: ret <8 x i16> [[VSHLL_N]] +// int16x8_t test_vshll_high_n_s8(int8x16_t a) { +// return vshll_high_n_s8(a, 3); +// } + +// NYI-LABEL: @test_vshll_high_n_s16( +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i32> +// NYI: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], +// NYI: ret <4 x i32> [[VSHLL_N]] +// int32x4_t test_vshll_high_n_s16(int16x8_t a) { +// return vshll_high_n_s16(a, 9); +// } + +// NYI-LABEL: @test_vshll_high_n_s32( +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[TMP2:%.*]] = sext <2 x i32> [[TMP1]] to <2 x i64> +// NYI: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], +// NYI: ret <2 x i64> [[VSHLL_N]] +// int64x2_t test_vshll_high_n_s32(int32x4_t a) { +// return vshll_high_n_s32(a, 19); +// } + +// NYI-LABEL: @test_vshll_high_n_u8( +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[TMP0:%.*]] = zext <8 x i8> [[SHUFFLE_I]] to <8 x i16> +// NYI: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], +// NYI: ret <8 x i16> [[VSHLL_N]] +// uint16x8_t test_vshll_high_n_u8(uint8x16_t a) { +// return vshll_high_n_u8(a, 3); +// } + +// NYI-LABEL: @test_vshll_high_n_u16( +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32> +// NYI: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], +// NYI: ret <4 x i32> [[VSHLL_N]] +// uint32x4_t test_vshll_high_n_u16(uint16x8_t a) { +// return vshll_high_n_u16(a, 9); +// } + +// NYI-LABEL: @test_vshll_high_n_u32( +// NYI: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64> +// NYI: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], +// NYI: ret <2 x i64> [[VSHLL_N]] +// uint64x2_t test_vshll_high_n_u32(uint32x4_t a) { +// return vshll_high_n_u32(a, 19); +// } + +// NYI-LABEL: @test_vmovl_s8( +// NYI: [[VMOVL_I:%.*]] = sext <8 x i8> %a to <8 x i16> +// NYI: ret <8 x i16> [[VMOVL_I]] +// int16x8_t test_vmovl_s8(int8x8_t a) { +// return vmovl_s8(a); +// } + +// NYI-LABEL: @test_vmovl_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[VMOVL_I:%.*]] = sext <4 x i16> %a to <4 x i32> +// NYI: ret <4 x i32> [[VMOVL_I]] +// int32x4_t test_vmovl_s16(int16x4_t a) { +// return vmovl_s16(a); +// } + +// NYI-LABEL: @test_vmovl_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VMOVL_I:%.*]] = sext <2 x i32> %a to <2 x i64> +// NYI: ret <2 x i64> [[VMOVL_I]] +// int64x2_t test_vmovl_s32(int32x2_t a) { +// return vmovl_s32(a); +// } + +// NYI-LABEL: @test_vmovl_u8( +// NYI: [[VMOVL_I:%.*]] = zext <8 x i8> %a to <8 x i16> +// NYI: ret <8 x i16> [[VMOVL_I]] +// uint16x8_t test_vmovl_u8(uint8x8_t a) { +// return vmovl_u8(a); +// } + +// NYI-LABEL: @test_vmovl_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[VMOVL_I:%.*]] = zext <4 x i16> %a to <4 x i32> +// NYI: ret <4 x i32> [[VMOVL_I]] +// uint32x4_t test_vmovl_u16(uint16x4_t a) { +// return vmovl_u16(a); +// } + +// NYI-LABEL: @test_vmovl_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VMOVL_I:%.*]] = zext <2 x i32> %a to <2 x i64> +// NYI: ret <2 x i64> [[VMOVL_I]] +// uint64x2_t test_vmovl_u32(uint32x2_t a) { +// return vmovl_u32(a); +// } + +// NYI-LABEL: @test_vmovl_high_s8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[TMP0:%.*]] = sext <8 x i8> [[SHUFFLE_I_I]] to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vmovl_high_s8(int8x16_t a) { +// return vmovl_high_s8(a); +// } + +// NYI-LABEL: @test_vmovl_high_s16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = sext <4 x i16> [[SHUFFLE_I_I]] to <4 x i32> +// NYI: ret <4 x i32> [[TMP1]] +// int32x4_t test_vmovl_high_s16(int16x8_t a) { +// return vmovl_high_s16(a); +// } + +// NYI-LABEL: @test_vmovl_high_s32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = sext <2 x i32> [[SHUFFLE_I_I]] to <2 x i64> +// NYI: ret <2 x i64> [[TMP1]] +// int64x2_t test_vmovl_high_s32(int32x4_t a) { +// return vmovl_high_s32(a); +// } + +// NYI-LABEL: @test_vmovl_high_u8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[TMP0:%.*]] = zext <8 x i8> [[SHUFFLE_I_I]] to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vmovl_high_u8(uint8x16_t a) { +// return vmovl_high_u8(a); +// } + +// NYI-LABEL: @test_vmovl_high_u16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = zext <4 x i16> [[SHUFFLE_I_I]] to <4 x i32> +// NYI: ret <4 x i32> [[TMP1]] +// uint32x4_t test_vmovl_high_u16(uint16x8_t a) { +// return vmovl_high_u16(a); +// } + +// NYI-LABEL: @test_vmovl_high_u32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = zext <2 x i32> [[SHUFFLE_I_I]] to <2 x i64> +// NYI: ret <2 x i64> [[TMP1]] +// uint64x2_t test_vmovl_high_u32(uint32x4_t a) { +// return vmovl_high_u32(a); +// } + +// NYI-LABEL: @test_vcvt_n_f32_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VCVT_N1:%.*]] = call <2 x float> @llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> [[VCVT_N]], i32 31) +// NYI: ret <2 x float> [[VCVT_N1]] +// float32x2_t test_vcvt_n_f32_s32(int32x2_t a) { +// return vcvt_n_f32_s32(a, 31); +// } + +// NYI-LABEL: @test_vcvtq_n_f32_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VCVT_N1:%.*]] = call <4 x float> @llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> [[VCVT_N]], i32 31) +// NYI: ret <4 x float> [[VCVT_N1]] +// float32x4_t test_vcvtq_n_f32_s32(int32x4_t a) { +// return vcvtq_n_f32_s32(a, 31); +// } + +// NYI-LABEL: @test_vcvtq_n_f64_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VCVT_N1:%.*]] = call <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64> [[VCVT_N]], i32 50) +// NYI: ret <2 x double> [[VCVT_N1]] +// float64x2_t test_vcvtq_n_f64_s64(int64x2_t a) { +// return vcvtq_n_f64_s64(a, 50); +// } + +// NYI-LABEL: @test_vcvt_n_f32_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VCVT_N1:%.*]] = call <2 x float> @llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> [[VCVT_N]], i32 31) +// NYI: ret <2 x float> [[VCVT_N1]] +// float32x2_t test_vcvt_n_f32_u32(uint32x2_t a) { +// return vcvt_n_f32_u32(a, 31); +// } + +// NYI-LABEL: @test_vcvtq_n_f32_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VCVT_N1:%.*]] = call <4 x float> @llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> [[VCVT_N]], i32 31) +// NYI: ret <4 x float> [[VCVT_N1]] +// float32x4_t test_vcvtq_n_f32_u32(uint32x4_t a) { +// return vcvtq_n_f32_u32(a, 31); +// } + +// NYI-LABEL: @test_vcvtq_n_f64_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VCVT_N1:%.*]] = call <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64> [[VCVT_N]], i32 50) +// NYI: ret <2 x double> [[VCVT_N1]] +// float64x2_t test_vcvtq_n_f64_u64(uint64x2_t a) { +// return vcvtq_n_f64_u64(a, 50); +// } + +// NYI-LABEL: @test_vcvt_n_s32_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> +// NYI: [[VCVT_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float> [[VCVT_N]], i32 31) +// NYI: ret <2 x i32> [[VCVT_N1]] +// int32x2_t test_vcvt_n_s32_f32(float32x2_t a) { +// return vcvt_n_s32_f32(a, 31); +// } + +// NYI-LABEL: @test_vcvtq_n_s32_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> +// NYI: [[VCVT_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float> [[VCVT_N]], i32 31) +// NYI: ret <4 x i32> [[VCVT_N1]] +// int32x4_t test_vcvtq_n_s32_f32(float32x4_t a) { +// return vcvtq_n_s32_f32(a, 31); +// } + +// NYI-LABEL: @test_vcvtq_n_s64_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double> +// NYI: [[VCVT_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64(<2 x double> [[VCVT_N]], i32 50) +// NYI: ret <2 x i64> [[VCVT_N1]] +// int64x2_t test_vcvtq_n_s64_f64(float64x2_t a) { +// return vcvtq_n_s64_f64(a, 50); +// } + +// NYI-LABEL: @test_vcvt_n_u32_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> +// NYI: [[VCVT_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float> [[VCVT_N]], i32 31) +// NYI: ret <2 x i32> [[VCVT_N1]] +// uint32x2_t test_vcvt_n_u32_f32(float32x2_t a) { +// return vcvt_n_u32_f32(a, 31); +// } + +// NYI-LABEL: @test_vcvtq_n_u32_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> +// NYI: [[VCVT_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float> [[VCVT_N]], i32 31) +// NYI: ret <4 x i32> [[VCVT_N1]] +// uint32x4_t test_vcvtq_n_u32_f32(float32x4_t a) { +// return vcvtq_n_u32_f32(a, 31); +// } + +// NYI-LABEL: @test_vcvtq_n_u64_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double> +// NYI: [[VCVT_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64(<2 x double> [[VCVT_N]], i32 50) +// NYI: ret <2 x i64> [[VCVT_N1]] +// uint64x2_t test_vcvtq_n_u64_f64(float64x2_t a) { +// return vcvtq_n_u64_f64(a, 50); +// } + +// NYI-LABEL: @test_vaddl_s8( +// NYI: [[VMOVL_I_I:%.*]] = sext <8 x i8> %a to <8 x i16> +// NYI: [[VMOVL_I4_I:%.*]] = sext <8 x i8> %b to <8 x i16> +// NYI: [[ADD_I:%.*]] = add <8 x i16> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// int16x8_t test_vaddl_s8(int8x8_t a, int8x8_t b) { +// return vaddl_s8(a, b); +// } + +// NYI-LABEL: @test_vaddl_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = sext <4 x i16> %a to <4 x i32> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMOVL_I4_I:%.*]] = sext <4 x i16> %b to <4 x i32> +// NYI: [[ADD_I:%.*]] = add <4 x i32> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// int32x4_t test_vaddl_s16(int16x4_t a, int16x4_t b) { +// return vaddl_s16(a, b); +// } + +// NYI-LABEL: @test_vaddl_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = sext <2 x i32> %a to <2 x i64> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMOVL_I4_I:%.*]] = sext <2 x i32> %b to <2 x i64> +// NYI: [[ADD_I:%.*]] = add <2 x i64> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <2 x i64> [[ADD_I]] +// int64x2_t test_vaddl_s32(int32x2_t a, int32x2_t b) { +// return vaddl_s32(a, b); +// } + +// NYI-LABEL: @test_vaddl_u8( +// NYI: [[VMOVL_I_I:%.*]] = zext <8 x i8> %a to <8 x i16> +// NYI: [[VMOVL_I4_I:%.*]] = zext <8 x i8> %b to <8 x i16> +// NYI: [[ADD_I:%.*]] = add <8 x i16> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// uint16x8_t test_vaddl_u8(uint8x8_t a, uint8x8_t b) { +// return vaddl_u8(a, b); +// } + +// NYI-LABEL: @test_vaddl_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <4 x i16> %a to <4 x i32> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMOVL_I4_I:%.*]] = zext <4 x i16> %b to <4 x i32> +// NYI: [[ADD_I:%.*]] = add <4 x i32> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// uint32x4_t test_vaddl_u16(uint16x4_t a, uint16x4_t b) { +// return vaddl_u16(a, b); +// } + +// NYI-LABEL: @test_vaddl_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <2 x i32> %a to <2 x i64> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMOVL_I4_I:%.*]] = zext <2 x i32> %b to <2 x i64> +// NYI: [[ADD_I:%.*]] = add <2 x i64> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <2 x i64> [[ADD_I]] +// uint64x2_t test_vaddl_u32(uint32x2_t a, uint32x2_t b) { +// return vaddl_u32(a, b); +// } + +// NYI-LABEL: @test_vaddl_high_s8( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[TMP0:%.*]] = sext <8 x i8> [[SHUFFLE_I_I_I]] to <8 x i16> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[TMP1:%.*]] = sext <8 x i8> [[SHUFFLE_I_I10_I]] to <8 x i16> +// NYI: [[ADD_I:%.*]] = add <8 x i16> [[TMP0]], [[TMP1]] +// NYI: ret <8 x i16> [[ADD_I]] +// int16x8_t test_vaddl_high_s8(int8x16_t a, int8x16_t b) { +// return vaddl_high_s8(a, b); +// } + +// NYI-LABEL: @test_vaddl_high_s16( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = sext <4 x i16> [[SHUFFLE_I_I_I]] to <4 x i32> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I10_I]] to <8 x i8> +// NYI: [[TMP3:%.*]] = sext <4 x i16> [[SHUFFLE_I_I10_I]] to <4 x i32> +// NYI: [[ADD_I:%.*]] = add <4 x i32> [[TMP1]], [[TMP3]] +// NYI: ret <4 x i32> [[ADD_I]] +// int32x4_t test_vaddl_high_s16(int16x8_t a, int16x8_t b) { +// return vaddl_high_s16(a, b); +// } + +// NYI-LABEL: @test_vaddl_high_s32( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = sext <2 x i32> [[SHUFFLE_I_I_I]] to <2 x i64> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I10_I]] to <8 x i8> +// NYI: [[TMP3:%.*]] = sext <2 x i32> [[SHUFFLE_I_I10_I]] to <2 x i64> +// NYI: [[ADD_I:%.*]] = add <2 x i64> [[TMP1]], [[TMP3]] +// NYI: ret <2 x i64> [[ADD_I]] +// int64x2_t test_vaddl_high_s32(int32x4_t a, int32x4_t b) { +// return vaddl_high_s32(a, b); +// } + +// NYI-LABEL: @test_vaddl_high_u8( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[TMP0:%.*]] = zext <8 x i8> [[SHUFFLE_I_I_I]] to <8 x i16> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[TMP1:%.*]] = zext <8 x i8> [[SHUFFLE_I_I10_I]] to <8 x i16> +// NYI: [[ADD_I:%.*]] = add <8 x i16> [[TMP0]], [[TMP1]] +// NYI: ret <8 x i16> [[ADD_I]] +// uint16x8_t test_vaddl_high_u8(uint8x16_t a, uint8x16_t b) { +// return vaddl_high_u8(a, b); +// } + +// NYI-LABEL: @test_vaddl_high_u16( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = zext <4 x i16> [[SHUFFLE_I_I_I]] to <4 x i32> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I10_I]] to <8 x i8> +// NYI: [[TMP3:%.*]] = zext <4 x i16> [[SHUFFLE_I_I10_I]] to <4 x i32> +// NYI: [[ADD_I:%.*]] = add <4 x i32> [[TMP1]], [[TMP3]] +// NYI: ret <4 x i32> [[ADD_I]] +// uint32x4_t test_vaddl_high_u16(uint16x8_t a, uint16x8_t b) { +// return vaddl_high_u16(a, b); +// } + +// NYI-LABEL: @test_vaddl_high_u32( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = zext <2 x i32> [[SHUFFLE_I_I_I]] to <2 x i64> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I10_I]] to <8 x i8> +// NYI: [[TMP3:%.*]] = zext <2 x i32> [[SHUFFLE_I_I10_I]] to <2 x i64> +// NYI: [[ADD_I:%.*]] = add <2 x i64> [[TMP1]], [[TMP3]] +// NYI: ret <2 x i64> [[ADD_I]] +// uint64x2_t test_vaddl_high_u32(uint32x4_t a, uint32x4_t b) { +// return vaddl_high_u32(a, b); +// } + +// NYI-LABEL: @test_vaddw_s8( +// NYI: [[VMOVL_I_I:%.*]] = sext <8 x i8> %b to <8 x i16> +// NYI: [[ADD_I:%.*]] = add <8 x i16> %a, [[VMOVL_I_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// int16x8_t test_vaddw_s8(int16x8_t a, int8x8_t b) { +// return vaddw_s8(a, b); +// } + +// NYI-LABEL: @test_vaddw_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = sext <4 x i16> %b to <4 x i32> +// NYI: [[ADD_I:%.*]] = add <4 x i32> %a, [[VMOVL_I_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// int32x4_t test_vaddw_s16(int32x4_t a, int16x4_t b) { +// return vaddw_s16(a, b); +// } + +// NYI-LABEL: @test_vaddw_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = sext <2 x i32> %b to <2 x i64> +// NYI: [[ADD_I:%.*]] = add <2 x i64> %a, [[VMOVL_I_I]] +// NYI: ret <2 x i64> [[ADD_I]] +// int64x2_t test_vaddw_s32(int64x2_t a, int32x2_t b) { +// return vaddw_s32(a, b); +// } + +// NYI-LABEL: @test_vaddw_u8( +// NYI: [[VMOVL_I_I:%.*]] = zext <8 x i8> %b to <8 x i16> +// NYI: [[ADD_I:%.*]] = add <8 x i16> %a, [[VMOVL_I_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// uint16x8_t test_vaddw_u8(uint16x8_t a, uint8x8_t b) { +// return vaddw_u8(a, b); +// } + +// NYI-LABEL: @test_vaddw_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <4 x i16> %b to <4 x i32> +// NYI: [[ADD_I:%.*]] = add <4 x i32> %a, [[VMOVL_I_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// uint32x4_t test_vaddw_u16(uint32x4_t a, uint16x4_t b) { +// return vaddw_u16(a, b); +// } + +// NYI-LABEL: @test_vaddw_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <2 x i32> %b to <2 x i64> +// NYI: [[ADD_I:%.*]] = add <2 x i64> %a, [[VMOVL_I_I]] +// NYI: ret <2 x i64> [[ADD_I]] +// uint64x2_t test_vaddw_u32(uint64x2_t a, uint32x2_t b) { +// return vaddw_u32(a, b); +// } + +// NYI-LABEL: @test_vaddw_high_s8( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[TMP0:%.*]] = sext <8 x i8> [[SHUFFLE_I_I_I]] to <8 x i16> +// NYI: [[ADD_I:%.*]] = add <8 x i16> %a, [[TMP0]] +// NYI: ret <8 x i16> [[ADD_I]] +// int16x8_t test_vaddw_high_s8(int16x8_t a, int8x16_t b) { +// return vaddw_high_s8(a, b); +// } + +// NYI-LABEL: @test_vaddw_high_s16( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = sext <4 x i16> [[SHUFFLE_I_I_I]] to <4 x i32> +// NYI: [[ADD_I:%.*]] = add <4 x i32> %a, [[TMP1]] +// NYI: ret <4 x i32> [[ADD_I]] +// int32x4_t test_vaddw_high_s16(int32x4_t a, int16x8_t b) { +// return vaddw_high_s16(a, b); +// } + +// NYI-LABEL: @test_vaddw_high_s32( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = sext <2 x i32> [[SHUFFLE_I_I_I]] to <2 x i64> +// NYI: [[ADD_I:%.*]] = add <2 x i64> %a, [[TMP1]] +// NYI: ret <2 x i64> [[ADD_I]] +// int64x2_t test_vaddw_high_s32(int64x2_t a, int32x4_t b) { +// return vaddw_high_s32(a, b); +// } + +// NYI-LABEL: @test_vaddw_high_u8( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[TMP0:%.*]] = zext <8 x i8> [[SHUFFLE_I_I_I]] to <8 x i16> +// NYI: [[ADD_I:%.*]] = add <8 x i16> %a, [[TMP0]] +// NYI: ret <8 x i16> [[ADD_I]] +// uint16x8_t test_vaddw_high_u8(uint16x8_t a, uint8x16_t b) { +// return vaddw_high_u8(a, b); +// } + +// NYI-LABEL: @test_vaddw_high_u16( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = zext <4 x i16> [[SHUFFLE_I_I_I]] to <4 x i32> +// NYI: [[ADD_I:%.*]] = add <4 x i32> %a, [[TMP1]] +// NYI: ret <4 x i32> [[ADD_I]] +// uint32x4_t test_vaddw_high_u16(uint32x4_t a, uint16x8_t b) { +// return vaddw_high_u16(a, b); +// } + +// NYI-LABEL: @test_vaddw_high_u32( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = zext <2 x i32> [[SHUFFLE_I_I_I]] to <2 x i64> +// NYI: [[ADD_I:%.*]] = add <2 x i64> %a, [[TMP1]] +// NYI: ret <2 x i64> [[ADD_I]] +// uint64x2_t test_vaddw_high_u32(uint64x2_t a, uint32x4_t b) { +// return vaddw_high_u32(a, b); +// } + +// NYI-LABEL: @test_vsubl_s8( +// NYI: [[VMOVL_I_I:%.*]] = sext <8 x i8> %a to <8 x i16> +// NYI: [[VMOVL_I4_I:%.*]] = sext <8 x i8> %b to <8 x i16> +// NYI: [[SUB_I:%.*]] = sub <8 x i16> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <8 x i16> [[SUB_I]] +// int16x8_t test_vsubl_s8(int8x8_t a, int8x8_t b) { +// return vsubl_s8(a, b); +// } + +// NYI-LABEL: @test_vsubl_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = sext <4 x i16> %a to <4 x i32> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMOVL_I4_I:%.*]] = sext <4 x i16> %b to <4 x i32> +// NYI: [[SUB_I:%.*]] = sub <4 x i32> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <4 x i32> [[SUB_I]] +// int32x4_t test_vsubl_s16(int16x4_t a, int16x4_t b) { +// return vsubl_s16(a, b); +// } + +// NYI-LABEL: @test_vsubl_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = sext <2 x i32> %a to <2 x i64> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMOVL_I4_I:%.*]] = sext <2 x i32> %b to <2 x i64> +// NYI: [[SUB_I:%.*]] = sub <2 x i64> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <2 x i64> [[SUB_I]] +// int64x2_t test_vsubl_s32(int32x2_t a, int32x2_t b) { +// return vsubl_s32(a, b); +// } + +// NYI-LABEL: @test_vsubl_u8( +// NYI: [[VMOVL_I_I:%.*]] = zext <8 x i8> %a to <8 x i16> +// NYI: [[VMOVL_I4_I:%.*]] = zext <8 x i8> %b to <8 x i16> +// NYI: [[SUB_I:%.*]] = sub <8 x i16> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <8 x i16> [[SUB_I]] +// uint16x8_t test_vsubl_u8(uint8x8_t a, uint8x8_t b) { +// return vsubl_u8(a, b); +// } + +// NYI-LABEL: @test_vsubl_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <4 x i16> %a to <4 x i32> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMOVL_I4_I:%.*]] = zext <4 x i16> %b to <4 x i32> +// NYI: [[SUB_I:%.*]] = sub <4 x i32> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <4 x i32> [[SUB_I]] +// uint32x4_t test_vsubl_u16(uint16x4_t a, uint16x4_t b) { +// return vsubl_u16(a, b); +// } + +// NYI-LABEL: @test_vsubl_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <2 x i32> %a to <2 x i64> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMOVL_I4_I:%.*]] = zext <2 x i32> %b to <2 x i64> +// NYI: [[SUB_I:%.*]] = sub <2 x i64> [[VMOVL_I_I]], [[VMOVL_I4_I]] +// NYI: ret <2 x i64> [[SUB_I]] +// uint64x2_t test_vsubl_u32(uint32x2_t a, uint32x2_t b) { +// return vsubl_u32(a, b); +// } + +// NYI-LABEL: @test_vsubl_high_s8( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[TMP0:%.*]] = sext <8 x i8> [[SHUFFLE_I_I_I]] to <8 x i16> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[TMP1:%.*]] = sext <8 x i8> [[SHUFFLE_I_I10_I]] to <8 x i16> +// NYI: [[SUB_I:%.*]] = sub <8 x i16> [[TMP0]], [[TMP1]] +// NYI: ret <8 x i16> [[SUB_I]] +// int16x8_t test_vsubl_high_s8(int8x16_t a, int8x16_t b) { +// return vsubl_high_s8(a, b); +// } + +// NYI-LABEL: @test_vsubl_high_s16( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = sext <4 x i16> [[SHUFFLE_I_I_I]] to <4 x i32> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I10_I]] to <8 x i8> +// NYI: [[TMP3:%.*]] = sext <4 x i16> [[SHUFFLE_I_I10_I]] to <4 x i32> +// NYI: [[SUB_I:%.*]] = sub <4 x i32> [[TMP1]], [[TMP3]] +// NYI: ret <4 x i32> [[SUB_I]] +// int32x4_t test_vsubl_high_s16(int16x8_t a, int16x8_t b) { +// return vsubl_high_s16(a, b); +// } + +// NYI-LABEL: @test_vsubl_high_s32( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = sext <2 x i32> [[SHUFFLE_I_I_I]] to <2 x i64> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I10_I]] to <8 x i8> +// NYI: [[TMP3:%.*]] = sext <2 x i32> [[SHUFFLE_I_I10_I]] to <2 x i64> +// NYI: [[SUB_I:%.*]] = sub <2 x i64> [[TMP1]], [[TMP3]] +// NYI: ret <2 x i64> [[SUB_I]] +// int64x2_t test_vsubl_high_s32(int32x4_t a, int32x4_t b) { +// return vsubl_high_s32(a, b); +// } + +// NYI-LABEL: @test_vsubl_high_u8( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[TMP0:%.*]] = zext <8 x i8> [[SHUFFLE_I_I_I]] to <8 x i16> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[TMP1:%.*]] = zext <8 x i8> [[SHUFFLE_I_I10_I]] to <8 x i16> +// NYI: [[SUB_I:%.*]] = sub <8 x i16> [[TMP0]], [[TMP1]] +// NYI: ret <8 x i16> [[SUB_I]] +// uint16x8_t test_vsubl_high_u8(uint8x16_t a, uint8x16_t b) { +// return vsubl_high_u8(a, b); +// } + +// NYI-LABEL: @test_vsubl_high_u16( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = zext <4 x i16> [[SHUFFLE_I_I_I]] to <4 x i32> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I10_I]] to <8 x i8> +// NYI: [[TMP3:%.*]] = zext <4 x i16> [[SHUFFLE_I_I10_I]] to <4 x i32> +// NYI: [[SUB_I:%.*]] = sub <4 x i32> [[TMP1]], [[TMP3]] +// NYI: ret <4 x i32> [[SUB_I]] +// uint32x4_t test_vsubl_high_u16(uint16x8_t a, uint16x8_t b) { +// return vsubl_high_u16(a, b); +// } + +// NYI-LABEL: @test_vsubl_high_u32( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = zext <2 x i32> [[SHUFFLE_I_I_I]] to <2 x i64> +// NYI: [[SHUFFLE_I_I10_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I10_I]] to <8 x i8> +// NYI: [[TMP3:%.*]] = zext <2 x i32> [[SHUFFLE_I_I10_I]] to <2 x i64> +// NYI: [[SUB_I:%.*]] = sub <2 x i64> [[TMP1]], [[TMP3]] +// NYI: ret <2 x i64> [[SUB_I]] +// uint64x2_t test_vsubl_high_u32(uint32x4_t a, uint32x4_t b) { +// return vsubl_high_u32(a, b); +// } + +// NYI-LABEL: @test_vsubw_s8( +// NYI: [[VMOVL_I_I:%.*]] = sext <8 x i8> %b to <8 x i16> +// NYI: [[SUB_I:%.*]] = sub <8 x i16> %a, [[VMOVL_I_I]] +// NYI: ret <8 x i16> [[SUB_I]] +// int16x8_t test_vsubw_s8(int16x8_t a, int8x8_t b) { +// return vsubw_s8(a, b); +// } + +// NYI-LABEL: @test_vsubw_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = sext <4 x i16> %b to <4 x i32> +// NYI: [[SUB_I:%.*]] = sub <4 x i32> %a, [[VMOVL_I_I]] +// NYI: ret <4 x i32> [[SUB_I]] +// int32x4_t test_vsubw_s16(int32x4_t a, int16x4_t b) { +// return vsubw_s16(a, b); +// } + +// NYI-LABEL: @test_vsubw_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = sext <2 x i32> %b to <2 x i64> +// NYI: [[SUB_I:%.*]] = sub <2 x i64> %a, [[VMOVL_I_I]] +// NYI: ret <2 x i64> [[SUB_I]] +// int64x2_t test_vsubw_s32(int64x2_t a, int32x2_t b) { +// return vsubw_s32(a, b); +// } + +// NYI-LABEL: @test_vsubw_u8( +// NYI: [[VMOVL_I_I:%.*]] = zext <8 x i8> %b to <8 x i16> +// NYI: [[SUB_I:%.*]] = sub <8 x i16> %a, [[VMOVL_I_I]] +// NYI: ret <8 x i16> [[SUB_I]] +// uint16x8_t test_vsubw_u8(uint16x8_t a, uint8x8_t b) { +// return vsubw_u8(a, b); +// } + +// NYI-LABEL: @test_vsubw_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <4 x i16> %b to <4 x i32> +// NYI: [[SUB_I:%.*]] = sub <4 x i32> %a, [[VMOVL_I_I]] +// NYI: ret <4 x i32> [[SUB_I]] +// uint32x4_t test_vsubw_u16(uint32x4_t a, uint16x4_t b) { +// return vsubw_u16(a, b); +// } + +// NYI-LABEL: @test_vsubw_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <2 x i32> %b to <2 x i64> +// NYI: [[SUB_I:%.*]] = sub <2 x i64> %a, [[VMOVL_I_I]] +// NYI: ret <2 x i64> [[SUB_I]] +// uint64x2_t test_vsubw_u32(uint64x2_t a, uint32x2_t b) { +// return vsubw_u32(a, b); +// } + +// NYI-LABEL: @test_vsubw_high_s8( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[TMP0:%.*]] = sext <8 x i8> [[SHUFFLE_I_I_I]] to <8 x i16> +// NYI: [[SUB_I:%.*]] = sub <8 x i16> %a, [[TMP0]] +// NYI: ret <8 x i16> [[SUB_I]] +// int16x8_t test_vsubw_high_s8(int16x8_t a, int8x16_t b) { +// return vsubw_high_s8(a, b); +// } + +// NYI-LABEL: @test_vsubw_high_s16( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = sext <4 x i16> [[SHUFFLE_I_I_I]] to <4 x i32> +// NYI: [[SUB_I:%.*]] = sub <4 x i32> %a, [[TMP1]] +// NYI: ret <4 x i32> [[SUB_I]] +// int32x4_t test_vsubw_high_s16(int32x4_t a, int16x8_t b) { +// return vsubw_high_s16(a, b); +// } + +// NYI-LABEL: @test_vsubw_high_s32( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = sext <2 x i32> [[SHUFFLE_I_I_I]] to <2 x i64> +// NYI: [[SUB_I:%.*]] = sub <2 x i64> %a, [[TMP1]] +// NYI: ret <2 x i64> [[SUB_I]] +// int64x2_t test_vsubw_high_s32(int64x2_t a, int32x4_t b) { +// return vsubw_high_s32(a, b); +// } + +// NYI-LABEL: @test_vsubw_high_u8( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[TMP0:%.*]] = zext <8 x i8> [[SHUFFLE_I_I_I]] to <8 x i16> +// NYI: [[SUB_I:%.*]] = sub <8 x i16> %a, [[TMP0]] +// NYI: ret <8 x i16> [[SUB_I]] +// uint16x8_t test_vsubw_high_u8(uint16x8_t a, uint8x16_t b) { +// return vsubw_high_u8(a, b); +// } + +// NYI-LABEL: @test_vsubw_high_u16( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = zext <4 x i16> [[SHUFFLE_I_I_I]] to <4 x i32> +// NYI: [[SUB_I:%.*]] = sub <4 x i32> %a, [[TMP1]] +// NYI: ret <4 x i32> [[SUB_I]] +// uint32x4_t test_vsubw_high_u16(uint32x4_t a, uint16x8_t b) { +// return vsubw_high_u16(a, b); +// } + +// NYI-LABEL: @test_vsubw_high_u32( +// NYI: [[SHUFFLE_I_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = zext <2 x i32> [[SHUFFLE_I_I_I]] to <2 x i64> +// NYI: [[SUB_I:%.*]] = sub <2 x i64> %a, [[TMP1]] +// NYI: ret <2 x i64> [[SUB_I]] +// uint64x2_t test_vsubw_high_u32(uint64x2_t a, uint32x4_t b) { +// return vsubw_high_u32(a, b); +// } + +// NYI-LABEL: @test_vaddhn_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VADDHN_I:%.*]] = add <8 x i16> %a, %b +// NYI: [[VADDHN1_I:%.*]] = lshr <8 x i16> [[VADDHN_I]], +// NYI: [[VADDHN2_I:%.*]] = trunc <8 x i16> [[VADDHN1_I]] to <8 x i8> +// NYI: ret <8 x i8> [[VADDHN2_I]] +// int8x8_t test_vaddhn_s16(int16x8_t a, int16x8_t b) { +// return vaddhn_s16(a, b); +// } + +// NYI-LABEL: @test_vaddhn_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VADDHN_I:%.*]] = add <4 x i32> %a, %b +// NYI: [[VADDHN1_I:%.*]] = lshr <4 x i32> [[VADDHN_I]], +// NYI: [[VADDHN2_I:%.*]] = trunc <4 x i32> [[VADDHN1_I]] to <4 x i16> +// NYI: ret <4 x i16> [[VADDHN2_I]] +// int16x4_t test_vaddhn_s32(int32x4_t a, int32x4_t b) { +// return vaddhn_s32(a, b); +// } + +// NYI-LABEL: @test_vaddhn_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VADDHN_I:%.*]] = add <2 x i64> %a, %b +// NYI: [[VADDHN1_I:%.*]] = lshr <2 x i64> [[VADDHN_I]], +// NYI: [[VADDHN2_I:%.*]] = trunc <2 x i64> [[VADDHN1_I]] to <2 x i32> +// NYI: ret <2 x i32> [[VADDHN2_I]] +// int32x2_t test_vaddhn_s64(int64x2_t a, int64x2_t b) { +// return vaddhn_s64(a, b); +// } + +// NYI-LABEL: @test_vaddhn_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VADDHN_I:%.*]] = add <8 x i16> %a, %b +// NYI: [[VADDHN1_I:%.*]] = lshr <8 x i16> [[VADDHN_I]], +// NYI: [[VADDHN2_I:%.*]] = trunc <8 x i16> [[VADDHN1_I]] to <8 x i8> +// NYI: ret <8 x i8> [[VADDHN2_I]] +// uint8x8_t test_vaddhn_u16(uint16x8_t a, uint16x8_t b) { +// return vaddhn_u16(a, b); +// } + +// NYI-LABEL: @test_vaddhn_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VADDHN_I:%.*]] = add <4 x i32> %a, %b +// NYI: [[VADDHN1_I:%.*]] = lshr <4 x i32> [[VADDHN_I]], +// NYI: [[VADDHN2_I:%.*]] = trunc <4 x i32> [[VADDHN1_I]] to <4 x i16> +// NYI: ret <4 x i16> [[VADDHN2_I]] +// uint16x4_t test_vaddhn_u32(uint32x4_t a, uint32x4_t b) { +// return vaddhn_u32(a, b); +// } + +// NYI-LABEL: @test_vaddhn_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VADDHN_I:%.*]] = add <2 x i64> %a, %b +// NYI: [[VADDHN1_I:%.*]] = lshr <2 x i64> [[VADDHN_I]], +// NYI: [[VADDHN2_I:%.*]] = trunc <2 x i64> [[VADDHN1_I]] to <2 x i32> +// NYI: ret <2 x i32> [[VADDHN2_I]] +// uint32x2_t test_vaddhn_u64(uint64x2_t a, uint64x2_t b) { +// return vaddhn_u64(a, b); +// } + +// NYI-LABEL: @test_vaddhn_high_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VADDHN_I_I:%.*]] = add <8 x i16> %a, %b +// NYI: [[VADDHN1_I_I:%.*]] = lshr <8 x i16> [[VADDHN_I_I]], +// NYI: [[VADDHN2_I_I:%.*]] = trunc <8 x i16> [[VADDHN1_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %r, <8 x i8> [[VADDHN2_I_I]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I_I]] +// int8x16_t test_vaddhn_high_s16(int8x8_t r, int16x8_t a, int16x8_t b) { +// return vaddhn_high_s16(r, a, b); +// } + +// NYI-LABEL: @test_vaddhn_high_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VADDHN_I_I:%.*]] = add <4 x i32> %a, %b +// NYI: [[VADDHN1_I_I:%.*]] = lshr <4 x i32> [[VADDHN_I_I]], +// NYI: [[VADDHN2_I_I:%.*]] = trunc <4 x i32> [[VADDHN1_I_I]] to <4 x i16> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %r, <4 x i16> [[VADDHN2_I_I]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I_I]] +// int16x8_t test_vaddhn_high_s32(int16x4_t r, int32x4_t a, int32x4_t b) { +// return vaddhn_high_s32(r, a, b); +// } + +// NYI-LABEL: @test_vaddhn_high_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VADDHN_I_I:%.*]] = add <2 x i64> %a, %b +// NYI: [[VADDHN1_I_I:%.*]] = lshr <2 x i64> [[VADDHN_I_I]], +// NYI: [[VADDHN2_I_I:%.*]] = trunc <2 x i64> [[VADDHN1_I_I]] to <2 x i32> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %r, <2 x i32> [[VADDHN2_I_I]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I_I]] +// int32x4_t test_vaddhn_high_s64(int32x2_t r, int64x2_t a, int64x2_t b) { +// return vaddhn_high_s64(r, a, b); +// } + +// NYI-LABEL: @test_vaddhn_high_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VADDHN_I_I:%.*]] = add <8 x i16> %a, %b +// NYI: [[VADDHN1_I_I:%.*]] = lshr <8 x i16> [[VADDHN_I_I]], +// NYI: [[VADDHN2_I_I:%.*]] = trunc <8 x i16> [[VADDHN1_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %r, <8 x i8> [[VADDHN2_I_I]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I_I]] +// uint8x16_t test_vaddhn_high_u16(uint8x8_t r, uint16x8_t a, uint16x8_t b) { +// return vaddhn_high_u16(r, a, b); +// } + +// NYI-LABEL: @test_vaddhn_high_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VADDHN_I_I:%.*]] = add <4 x i32> %a, %b +// NYI: [[VADDHN1_I_I:%.*]] = lshr <4 x i32> [[VADDHN_I_I]], +// NYI: [[VADDHN2_I_I:%.*]] = trunc <4 x i32> [[VADDHN1_I_I]] to <4 x i16> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %r, <4 x i16> [[VADDHN2_I_I]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I_I]] +// uint16x8_t test_vaddhn_high_u32(uint16x4_t r, uint32x4_t a, uint32x4_t b) { +// return vaddhn_high_u32(r, a, b); +// } + +// NYI-LABEL: @test_vaddhn_high_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VADDHN_I_I:%.*]] = add <2 x i64> %a, %b +// NYI: [[VADDHN1_I_I:%.*]] = lshr <2 x i64> [[VADDHN_I_I]], +// NYI: [[VADDHN2_I_I:%.*]] = trunc <2 x i64> [[VADDHN1_I_I]] to <2 x i32> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %r, <2 x i32> [[VADDHN2_I_I]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I_I]] +// uint32x4_t test_vaddhn_high_u64(uint32x2_t r, uint64x2_t a, uint64x2_t b) { +// return vaddhn_high_u64(r, a, b); +// } + +// NYI-LABEL: @test_vraddhn_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i8> [[VRADDHN_V2_I]] +// int8x8_t test_vraddhn_s16(int16x8_t a, int16x8_t b) { +// return vraddhn_s16(a, b); +// } + +// NYI-LABEL: @test_vraddhn_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VRADDHN_V3_I:%.*]] = bitcast <4 x i16> [[VRADDHN_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VRADDHN_V2_I]] +// int16x4_t test_vraddhn_s32(int32x4_t a, int32x4_t b) { +// return vraddhn_s32(a, b); +// } + +// NYI-LABEL: @test_vraddhn_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VRADDHN_V3_I:%.*]] = bitcast <2 x i32> [[VRADDHN_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VRADDHN_V2_I]] +// int32x2_t test_vraddhn_s64(int64x2_t a, int64x2_t b) { +// return vraddhn_s64(a, b); +// } + +// NYI-LABEL: @test_vraddhn_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i8> [[VRADDHN_V2_I]] +// uint8x8_t test_vraddhn_u16(uint16x8_t a, uint16x8_t b) { +// return vraddhn_u16(a, b); +// } + +// NYI-LABEL: @test_vraddhn_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VRADDHN_V3_I:%.*]] = bitcast <4 x i16> [[VRADDHN_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VRADDHN_V2_I]] +// uint16x4_t test_vraddhn_u32(uint32x4_t a, uint32x4_t b) { +// return vraddhn_u32(a, b); +// } + +// NYI-LABEL: @test_vraddhn_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VRADDHN_V3_I:%.*]] = bitcast <2 x i32> [[VRADDHN_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VRADDHN_V2_I]] +// uint32x2_t test_vraddhn_u64(uint64x2_t a, uint64x2_t b) { +// return vraddhn_u64(a, b); +// } + +// NYI-LABEL: @test_vraddhn_high_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b) +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %r, <8 x i8> [[VRADDHN_V2_I_I]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I_I]] +// int8x16_t test_vraddhn_high_s16(int8x8_t r, int16x8_t a, int16x8_t b) { +// return vraddhn_high_s16(r, a, b); +// } + +// NYI-LABEL: @test_vraddhn_high_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VRADDHN_V3_I_I:%.*]] = bitcast <4 x i16> [[VRADDHN_V2_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %r, <4 x i16> [[VRADDHN_V2_I_I]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I_I]] +// int16x8_t test_vraddhn_high_s32(int16x4_t r, int32x4_t a, int32x4_t b) { +// return vraddhn_high_s32(r, a, b); +// } + +// NYI-LABEL: @test_vraddhn_high_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VRADDHN_V3_I_I:%.*]] = bitcast <2 x i32> [[VRADDHN_V2_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %r, <2 x i32> [[VRADDHN_V2_I_I]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I_I]] +// int32x4_t test_vraddhn_high_s64(int32x2_t r, int64x2_t a, int64x2_t b) { +// return vraddhn_high_s64(r, a, b); +// } + +// NYI-LABEL: @test_vraddhn_high_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b) +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %r, <8 x i8> [[VRADDHN_V2_I_I]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I_I]] +// uint8x16_t test_vraddhn_high_u16(uint8x8_t r, uint16x8_t a, uint16x8_t b) { +// return vraddhn_high_u16(r, a, b); +// } + +// NYI-LABEL: @test_vraddhn_high_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VRADDHN_V3_I_I:%.*]] = bitcast <4 x i16> [[VRADDHN_V2_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %r, <4 x i16> [[VRADDHN_V2_I_I]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I_I]] +// uint16x8_t test_vraddhn_high_u32(uint16x4_t r, uint32x4_t a, uint32x4_t b) { +// return vraddhn_high_u32(r, a, b); +// } + +// NYI-LABEL: @test_vraddhn_high_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRADDHN_V2_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VRADDHN_V3_I_I:%.*]] = bitcast <2 x i32> [[VRADDHN_V2_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %r, <2 x i32> [[VRADDHN_V2_I_I]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I_I]] +// uint32x4_t test_vraddhn_high_u64(uint32x2_t r, uint64x2_t a, uint64x2_t b) { +// return vraddhn_high_u64(r, a, b); +// } + +// NYI-LABEL: @test_vsubhn_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSUBHN_I:%.*]] = sub <8 x i16> %a, %b +// NYI: [[VSUBHN1_I:%.*]] = lshr <8 x i16> [[VSUBHN_I]], +// NYI: [[VSUBHN2_I:%.*]] = trunc <8 x i16> [[VSUBHN1_I]] to <8 x i8> +// NYI: ret <8 x i8> [[VSUBHN2_I]] +// int8x8_t test_vsubhn_s16(int16x8_t a, int16x8_t b) { +// return vsubhn_s16(a, b); +// } + +// NYI-LABEL: @test_vsubhn_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VSUBHN_I:%.*]] = sub <4 x i32> %a, %b +// NYI: [[VSUBHN1_I:%.*]] = lshr <4 x i32> [[VSUBHN_I]], +// NYI: [[VSUBHN2_I:%.*]] = trunc <4 x i32> [[VSUBHN1_I]] to <4 x i16> +// NYI: ret <4 x i16> [[VSUBHN2_I]] +// int16x4_t test_vsubhn_s32(int32x4_t a, int32x4_t b) { +// return vsubhn_s32(a, b); +// } + +// NYI-LABEL: @test_vsubhn_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSUBHN_I:%.*]] = sub <2 x i64> %a, %b +// NYI: [[VSUBHN1_I:%.*]] = lshr <2 x i64> [[VSUBHN_I]], +// NYI: [[VSUBHN2_I:%.*]] = trunc <2 x i64> [[VSUBHN1_I]] to <2 x i32> +// NYI: ret <2 x i32> [[VSUBHN2_I]] +// int32x2_t test_vsubhn_s64(int64x2_t a, int64x2_t b) { +// return vsubhn_s64(a, b); +// } + +// NYI-LABEL: @test_vsubhn_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSUBHN_I:%.*]] = sub <8 x i16> %a, %b +// NYI: [[VSUBHN1_I:%.*]] = lshr <8 x i16> [[VSUBHN_I]], +// NYI: [[VSUBHN2_I:%.*]] = trunc <8 x i16> [[VSUBHN1_I]] to <8 x i8> +// NYI: ret <8 x i8> [[VSUBHN2_I]] +// uint8x8_t test_vsubhn_u16(uint16x8_t a, uint16x8_t b) { +// return vsubhn_u16(a, b); +// } + +// NYI-LABEL: @test_vsubhn_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VSUBHN_I:%.*]] = sub <4 x i32> %a, %b +// NYI: [[VSUBHN1_I:%.*]] = lshr <4 x i32> [[VSUBHN_I]], +// NYI: [[VSUBHN2_I:%.*]] = trunc <4 x i32> [[VSUBHN1_I]] to <4 x i16> +// NYI: ret <4 x i16> [[VSUBHN2_I]] +// uint16x4_t test_vsubhn_u32(uint32x4_t a, uint32x4_t b) { +// return vsubhn_u32(a, b); +// } + +// NYI-LABEL: @test_vsubhn_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSUBHN_I:%.*]] = sub <2 x i64> %a, %b +// NYI: [[VSUBHN1_I:%.*]] = lshr <2 x i64> [[VSUBHN_I]], +// NYI: [[VSUBHN2_I:%.*]] = trunc <2 x i64> [[VSUBHN1_I]] to <2 x i32> +// NYI: ret <2 x i32> [[VSUBHN2_I]] +// uint32x2_t test_vsubhn_u64(uint64x2_t a, uint64x2_t b) { +// return vsubhn_u64(a, b); +// } + +// NYI-LABEL: @test_vsubhn_high_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSUBHN_I_I:%.*]] = sub <8 x i16> %a, %b +// NYI: [[VSUBHN1_I_I:%.*]] = lshr <8 x i16> [[VSUBHN_I_I]], +// NYI: [[VSUBHN2_I_I:%.*]] = trunc <8 x i16> [[VSUBHN1_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %r, <8 x i8> [[VSUBHN2_I_I]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I_I]] +// int8x16_t test_vsubhn_high_s16(int8x8_t r, int16x8_t a, int16x8_t b) { +// return vsubhn_high_s16(r, a, b); +// } + +// NYI-LABEL: @test_vsubhn_high_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VSUBHN_I_I:%.*]] = sub <4 x i32> %a, %b +// NYI: [[VSUBHN1_I_I:%.*]] = lshr <4 x i32> [[VSUBHN_I_I]], +// NYI: [[VSUBHN2_I_I:%.*]] = trunc <4 x i32> [[VSUBHN1_I_I]] to <4 x i16> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %r, <4 x i16> [[VSUBHN2_I_I]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I_I]] +// int16x8_t test_vsubhn_high_s32(int16x4_t r, int32x4_t a, int32x4_t b) { +// return vsubhn_high_s32(r, a, b); +// } + +// NYI-LABEL: @test_vsubhn_high_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSUBHN_I_I:%.*]] = sub <2 x i64> %a, %b +// NYI: [[VSUBHN1_I_I:%.*]] = lshr <2 x i64> [[VSUBHN_I_I]], +// NYI: [[VSUBHN2_I_I:%.*]] = trunc <2 x i64> [[VSUBHN1_I_I]] to <2 x i32> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %r, <2 x i32> [[VSUBHN2_I_I]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I_I]] +// int32x4_t test_vsubhn_high_s64(int32x2_t r, int64x2_t a, int64x2_t b) { +// return vsubhn_high_s64(r, a, b); +// } + +// NYI-LABEL: @test_vsubhn_high_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSUBHN_I_I:%.*]] = sub <8 x i16> %a, %b +// NYI: [[VSUBHN1_I_I:%.*]] = lshr <8 x i16> [[VSUBHN_I_I]], +// NYI: [[VSUBHN2_I_I:%.*]] = trunc <8 x i16> [[VSUBHN1_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %r, <8 x i8> [[VSUBHN2_I_I]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I_I]] +// uint8x16_t test_vsubhn_high_u16(uint8x8_t r, uint16x8_t a, uint16x8_t b) { +// return vsubhn_high_u16(r, a, b); +// } + +// NYI-LABEL: @test_vsubhn_high_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VSUBHN_I_I:%.*]] = sub <4 x i32> %a, %b +// NYI: [[VSUBHN1_I_I:%.*]] = lshr <4 x i32> [[VSUBHN_I_I]], +// NYI: [[VSUBHN2_I_I:%.*]] = trunc <4 x i32> [[VSUBHN1_I_I]] to <4 x i16> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %r, <4 x i16> [[VSUBHN2_I_I]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I_I]] +// uint16x8_t test_vsubhn_high_u32(uint16x4_t r, uint32x4_t a, uint32x4_t b) { +// return vsubhn_high_u32(r, a, b); +// } + +// NYI-LABEL: @test_vsubhn_high_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSUBHN_I_I:%.*]] = sub <2 x i64> %a, %b +// NYI: [[VSUBHN1_I_I:%.*]] = lshr <2 x i64> [[VSUBHN_I_I]], +// NYI: [[VSUBHN2_I_I:%.*]] = trunc <2 x i64> [[VSUBHN1_I_I]] to <2 x i32> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %r, <2 x i32> [[VSUBHN2_I_I]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I_I]] +// uint32x4_t test_vsubhn_high_u64(uint32x2_t r, uint64x2_t a, uint64x2_t b) { +// return vsubhn_high_u64(r, a, b); +// } + +// NYI-LABEL: @test_vrsubhn_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i8> [[VRSUBHN_V2_I]] +// int8x8_t test_vrsubhn_s16(int16x8_t a, int16x8_t b) { +// return vrsubhn_s16(a, b); +// } + +// NYI-LABEL: @test_vrsubhn_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VRSUBHN_V3_I:%.*]] = bitcast <4 x i16> [[VRSUBHN_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VRSUBHN_V2_I]] +// int16x4_t test_vrsubhn_s32(int32x4_t a, int32x4_t b) { +// return vrsubhn_s32(a, b); +// } + +// NYI-LABEL: @test_vrsubhn_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VRSUBHN_V3_I:%.*]] = bitcast <2 x i32> [[VRSUBHN_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VRSUBHN_V2_I]] +// int32x2_t test_vrsubhn_s64(int64x2_t a, int64x2_t b) { +// return vrsubhn_s64(a, b); +// } + +// NYI-LABEL: @test_vrsubhn_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i8> [[VRSUBHN_V2_I]] +// uint8x8_t test_vrsubhn_u16(uint16x8_t a, uint16x8_t b) { +// return vrsubhn_u16(a, b); +// } + +// NYI-LABEL: @test_vrsubhn_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VRSUBHN_V3_I:%.*]] = bitcast <4 x i16> [[VRSUBHN_V2_I]] to <8 x i8> +// NYI: ret <4 x i16> [[VRSUBHN_V2_I]] +// uint16x4_t test_vrsubhn_u32(uint32x4_t a, uint32x4_t b) { +// return vrsubhn_u32(a, b); +// } + +// NYI-LABEL: @test_vrsubhn_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VRSUBHN_V3_I:%.*]] = bitcast <2 x i32> [[VRSUBHN_V2_I]] to <8 x i8> +// NYI: ret <2 x i32> [[VRSUBHN_V2_I]] +// uint32x2_t test_vrsubhn_u64(uint64x2_t a, uint64x2_t b) { +// return vrsubhn_u64(a, b); +// } + +// NYI-LABEL: @test_vrsubhn_high_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b) +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %r, <8 x i8> [[VRSUBHN_V2_I_I]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I_I]] +// int8x16_t test_vrsubhn_high_s16(int8x8_t r, int16x8_t a, int16x8_t b) { +// return vrsubhn_high_s16(r, a, b); +// } + +// NYI-LABEL: @test_vrsubhn_high_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VRSUBHN_V3_I_I:%.*]] = bitcast <4 x i16> [[VRSUBHN_V2_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %r, <4 x i16> [[VRSUBHN_V2_I_I]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I_I]] +// int16x8_t test_vrsubhn_high_s32(int16x4_t r, int32x4_t a, int32x4_t b) { +// return vrsubhn_high_s32(r, a, b); +// } + +// NYI-LABEL: @test_vrsubhn_high_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VRSUBHN_V3_I_I:%.*]] = bitcast <2 x i32> [[VRSUBHN_V2_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %r, <2 x i32> [[VRSUBHN_V2_I_I]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I_I]] +// int32x4_t test_vrsubhn_high_s64(int32x2_t r, int64x2_t a, int64x2_t b) { +// return vrsubhn_high_s64(r, a, b); +// } + +// NYI-LABEL: @test_vrsubhn_high_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b) +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i8> %r, <8 x i8> [[VRSUBHN_V2_I_I]], <16 x i32> +// NYI: ret <16 x i8> [[SHUFFLE_I_I]] +// uint8x16_t test_vrsubhn_high_u16(uint8x8_t r, uint16x8_t a, uint16x8_t b) { +// return vrsubhn_high_u16(r, a, b); +// } + +// NYI-LABEL: @test_vrsubhn_high_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b) +// NYI: [[VRSUBHN_V3_I_I:%.*]] = bitcast <4 x i16> [[VRSUBHN_V2_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i16> %r, <4 x i16> [[VRSUBHN_V2_I_I]], <8 x i32> +// NYI: ret <8 x i16> [[SHUFFLE_I_I]] +// uint16x8_t test_vrsubhn_high_u32(uint16x4_t r, uint32x4_t a, uint32x4_t b) { +// return vrsubhn_high_u32(r, a, b); +// } + +// NYI-LABEL: @test_vrsubhn_high_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VRSUBHN_V2_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VRSUBHN_V3_I_I:%.*]] = bitcast <2 x i32> [[VRSUBHN_V2_I_I]] to <8 x i8> +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i32> %r, <2 x i32> [[VRSUBHN_V2_I_I]], <4 x i32> +// NYI: ret <4 x i32> [[SHUFFLE_I_I]] +// uint32x4_t test_vrsubhn_high_u64(uint32x2_t r, uint64x2_t a, uint64x2_t b) { +// return vrsubhn_high_u64(r, a, b); +// } + +// NYI-LABEL: @test_vabdl_s8( +// NYI: [[VABD_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: [[VMOVL_I_I:%.*]] = zext <8 x i8> [[VABD_I_I]] to <8 x i16> +// NYI: ret <8 x i16> [[VMOVL_I_I]] +// int16x8_t test_vabdl_s8(int8x8_t a, int8x8_t b) { +// return vabdl_s8(a, b); +// } + +// NYI-LABEL: @test_vabdl_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[VABD2_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <4 x i16> [[VABD2_I_I]] to <4 x i32> +// NYI: ret <4 x i32> [[VMOVL_I_I]] +// int32x4_t test_vabdl_s16(int16x4_t a, int16x4_t b) { +// return vabdl_s16(a, b); +// } + +// NYI-LABEL: @test_vabdl_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[VABD2_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <2 x i32> [[VABD2_I_I]] to <2 x i64> +// NYI: ret <2 x i64> [[VMOVL_I_I]] +// int64x2_t test_vabdl_s32(int32x2_t a, int32x2_t b) { +// return vabdl_s32(a, b); +// } + +// NYI-LABEL: @test_vabdl_u8( +// NYI: [[VABD_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: [[VMOVL_I_I:%.*]] = zext <8 x i8> [[VABD_I_I]] to <8 x i16> +// NYI: ret <8 x i16> [[VMOVL_I_I]] +// uint16x8_t test_vabdl_u8(uint8x8_t a, uint8x8_t b) { +// return vabdl_u8(a, b); +// } + +// NYI-LABEL: @test_vabdl_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[VABD2_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <4 x i16> [[VABD2_I_I]] to <4 x i32> +// NYI: ret <4 x i32> [[VMOVL_I_I]] +// uint32x4_t test_vabdl_u16(uint16x4_t a, uint16x4_t b) { +// return vabdl_u16(a, b); +// } + +// NYI-LABEL: @test_vabdl_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VABD2_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[VABD2_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I:%.*]] = zext <2 x i32> [[VABD2_I_I]] to <2 x i64> +// NYI: ret <2 x i64> [[VMOVL_I_I]] +// uint64x2_t test_vabdl_u32(uint32x2_t a, uint32x2_t b) { +// return vabdl_u32(a, b); +// } + +// NYI-LABEL: @test_vabal_s8( +// NYI: [[VABD_I_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %b, <8 x i8> %c) +// NYI: [[VMOVL_I_I_I:%.*]] = zext <8 x i8> [[VABD_I_I_I]] to <8 x i16> +// NYI: [[ADD_I:%.*]] = add <8 x i16> %a, [[VMOVL_I_I_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// int16x8_t test_vabal_s8(int16x8_t a, int8x8_t b, int8x8_t c) { +// return vabal_s8(a, b, c); +// } + +// NYI-LABEL: @test_vabal_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %c to <8 x i8> +// NYI: [[VABD2_I_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %b, <4 x i16> %c) +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[VABD2_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I:%.*]] = zext <4 x i16> [[VABD2_I_I_I]] to <4 x i32> +// NYI: [[ADD_I:%.*]] = add <4 x i32> %a, [[VMOVL_I_I_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// int32x4_t test_vabal_s16(int32x4_t a, int16x4_t b, int16x4_t c) { +// return vabal_s16(a, b, c); +// } + +// NYI-LABEL: @test_vabal_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %c to <8 x i8> +// NYI: [[VABD2_I_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %b, <2 x i32> %c) +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[VABD2_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I:%.*]] = zext <2 x i32> [[VABD2_I_I_I]] to <2 x i64> +// NYI: [[ADD_I:%.*]] = add <2 x i64> %a, [[VMOVL_I_I_I]] +// NYI: ret <2 x i64> [[ADD_I]] +// int64x2_t test_vabal_s32(int64x2_t a, int32x2_t b, int32x2_t c) { +// return vabal_s32(a, b, c); +// } + +// NYI-LABEL: @test_vabal_u8( +// NYI: [[VABD_I_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %b, <8 x i8> %c) +// NYI: [[VMOVL_I_I_I:%.*]] = zext <8 x i8> [[VABD_I_I_I]] to <8 x i16> +// NYI: [[ADD_I:%.*]] = add <8 x i16> %a, [[VMOVL_I_I_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// uint16x8_t test_vabal_u8(uint16x8_t a, uint8x8_t b, uint8x8_t c) { +// return vabal_u8(a, b, c); +// } + +// NYI-LABEL: @test_vabal_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %c to <8 x i8> +// NYI: [[VABD2_I_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %b, <4 x i16> %c) +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[VABD2_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I:%.*]] = zext <4 x i16> [[VABD2_I_I_I]] to <4 x i32> +// NYI: [[ADD_I:%.*]] = add <4 x i32> %a, [[VMOVL_I_I_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// uint32x4_t test_vabal_u16(uint32x4_t a, uint16x4_t b, uint16x4_t c) { +// return vabal_u16(a, b, c); +// } + +// NYI-LABEL: @test_vabal_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %c to <8 x i8> +// NYI: [[VABD2_I_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %b, <2 x i32> %c) +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[VABD2_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I:%.*]] = zext <2 x i32> [[VABD2_I_I_I]] to <2 x i64> +// NYI: [[ADD_I:%.*]] = add <2 x i64> %a, [[VMOVL_I_I_I]] +// NYI: ret <2 x i64> [[ADD_I]] +// uint64x2_t test_vabal_u32(uint64x2_t a, uint32x2_t b, uint32x2_t c) { +// return vabal_u32(a, b, c); +// } + +// NYI-LABEL: @test_vabdl_high_s8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[VABD_I_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> [[SHUFFLE_I_I]], <8 x i8> [[SHUFFLE_I7_I]]) +// NYI: [[VMOVL_I_I_I:%.*]] = zext <8 x i8> [[VABD_I_I_I]] to <8 x i16> +// NYI: ret <8 x i16> [[VMOVL_I_I_I]] +// int16x8_t test_vabdl_high_s8(int8x16_t a, int8x16_t b) { +// return vabdl_high_s8(a, b); +// } + +// NYI-LABEL: @test_vabdl_high_s16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VABD2_I_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[VABD2_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I:%.*]] = zext <4 x i16> [[VABD2_I_I_I]] to <4 x i32> +// NYI: ret <4 x i32> [[VMOVL_I_I_I]] +// int32x4_t test_vabdl_high_s16(int16x8_t a, int16x8_t b) { +// return vabdl_high_s16(a, b); +// } + +// NYI-LABEL: @test_vabdl_high_s32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VABD2_I_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[VABD2_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I:%.*]] = zext <2 x i32> [[VABD2_I_I_I]] to <2 x i64> +// NYI: ret <2 x i64> [[VMOVL_I_I_I]] +// int64x2_t test_vabdl_high_s32(int32x4_t a, int32x4_t b) { +// return vabdl_high_s32(a, b); +// } + +// NYI-LABEL: @test_vabdl_high_u8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[VABD_I_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> [[SHUFFLE_I_I]], <8 x i8> [[SHUFFLE_I7_I]]) +// NYI: [[VMOVL_I_I_I:%.*]] = zext <8 x i8> [[VABD_I_I_I]] to <8 x i16> +// NYI: ret <8 x i16> [[VMOVL_I_I_I]] +// uint16x8_t test_vabdl_high_u8(uint8x16_t a, uint8x16_t b) { +// return vabdl_high_u8(a, b); +// } + +// NYI-LABEL: @test_vabdl_high_u16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VABD2_I_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[VABD2_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I:%.*]] = zext <4 x i16> [[VABD2_I_I_I]] to <4 x i32> +// NYI: ret <4 x i32> [[VMOVL_I_I_I]] +// uint32x4_t test_vabdl_high_u16(uint16x8_t a, uint16x8_t b) { +// return vabdl_high_u16(a, b); +// } + +// NYI-LABEL: @test_vabdl_high_u32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VABD2_I_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[VABD2_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I:%.*]] = zext <2 x i32> [[VABD2_I_I_I]] to <2 x i64> +// NYI: ret <2 x i64> [[VMOVL_I_I_I]] +// uint64x2_t test_vabdl_high_u32(uint32x4_t a, uint32x4_t b) { +// return vabdl_high_u32(a, b); +// } + +// NYI-LABEL: @test_vabal_high_s8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <16 x i8> %c, <16 x i8> %c, <8 x i32> +// NYI: [[VABD_I_I_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> [[SHUFFLE_I_I]], <8 x i8> [[SHUFFLE_I7_I]]) +// NYI: [[VMOVL_I_I_I_I:%.*]] = zext <8 x i8> [[VABD_I_I_I_I]] to <8 x i16> +// NYI: [[ADD_I_I:%.*]] = add <8 x i16> %a, [[VMOVL_I_I_I_I]] +// NYI: ret <8 x i16> [[ADD_I_I]] +// int16x8_t test_vabal_high_s8(int16x8_t a, int8x16_t b, int8x16_t c) { +// return vabal_high_s8(a, b, c); +// } + +// NYI-LABEL: @test_vabal_high_s16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %c, <8 x i16> %c, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VABD2_I_I_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[VABD2_I_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I_I:%.*]] = zext <4 x i16> [[VABD2_I_I_I_I]] to <4 x i32> +// NYI: [[ADD_I_I:%.*]] = add <4 x i32> %a, [[VMOVL_I_I_I_I]] +// NYI: ret <4 x i32> [[ADD_I_I]] +// int32x4_t test_vabal_high_s16(int32x4_t a, int16x8_t b, int16x8_t c) { +// return vabal_high_s16(a, b, c); +// } + +// NYI-LABEL: @test_vabal_high_s32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %c, <4 x i32> %c, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VABD2_I_I_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[VABD2_I_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I_I:%.*]] = zext <2 x i32> [[VABD2_I_I_I_I]] to <2 x i64> +// NYI: [[ADD_I_I:%.*]] = add <2 x i64> %a, [[VMOVL_I_I_I_I]] +// NYI: ret <2 x i64> [[ADD_I_I]] +// int64x2_t test_vabal_high_s32(int64x2_t a, int32x4_t b, int32x4_t c) { +// return vabal_high_s32(a, b, c); +// } + +// NYI-LABEL: @test_vabal_high_u8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <16 x i8> %c, <16 x i8> %c, <8 x i32> +// NYI: [[VABD_I_I_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> [[SHUFFLE_I_I]], <8 x i8> [[SHUFFLE_I7_I]]) +// NYI: [[VMOVL_I_I_I_I:%.*]] = zext <8 x i8> [[VABD_I_I_I_I]] to <8 x i16> +// NYI: [[ADD_I_I:%.*]] = add <8 x i16> %a, [[VMOVL_I_I_I_I]] +// NYI: ret <8 x i16> [[ADD_I_I]] +// uint16x8_t test_vabal_high_u8(uint16x8_t a, uint8x16_t b, uint8x16_t c) { +// return vabal_high_u8(a, b, c); +// } + +// NYI-LABEL: @test_vabal_high_u16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %c, <8 x i16> %c, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VABD2_I_I_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[VABD2_I_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I_I:%.*]] = zext <4 x i16> [[VABD2_I_I_I_I]] to <4 x i32> +// NYI: [[ADD_I_I:%.*]] = add <4 x i32> %a, [[VMOVL_I_I_I_I]] +// NYI: ret <4 x i32> [[ADD_I_I]] +// uint32x4_t test_vabal_high_u16(uint32x4_t a, uint16x8_t b, uint16x8_t c) { +// return vabal_high_u16(a, b, c); +// } + +// NYI-LABEL: @test_vabal_high_u32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %c, <4 x i32> %c, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VABD2_I_I_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[VABD2_I_I_I_I]] to <8 x i8> +// NYI: [[VMOVL_I_I_I_I:%.*]] = zext <2 x i32> [[VABD2_I_I_I_I]] to <2 x i64> +// NYI: [[ADD_I_I:%.*]] = add <2 x i64> %a, [[VMOVL_I_I_I_I]] +// NYI: ret <2 x i64> [[ADD_I_I]] +// uint64x2_t test_vabal_high_u32(uint64x2_t a, uint32x4_t b, uint32x4_t c) { +// return vabal_high_u32(a, b, c); +// } + +// NYI-LABEL: @test_vmull_s8( +// NYI: [[VMULL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i16> [[VMULL_I]] +// int16x8_t test_vmull_s8(int8x8_t a, int8x8_t b) { +// return vmull_s8(a, b); +// } + +// NYI-LABEL: @test_vmull_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMULL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %b) +// NYI: ret <4 x i32> [[VMULL2_I]] +// int32x4_t test_vmull_s16(int16x4_t a, int16x4_t b) { +// return vmull_s16(a, b); +// } + +// NYI-LABEL: @test_vmull_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMULL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %b) +// NYI: ret <2 x i64> [[VMULL2_I]] +// int64x2_t test_vmull_s32(int32x2_t a, int32x2_t b) { +// return vmull_s32(a, b); +// } + +// NYI-LABEL: @test_vmull_u8( +// NYI: [[VMULL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i16> [[VMULL_I]] +// uint16x8_t test_vmull_u8(uint8x8_t a, uint8x8_t b) { +// return vmull_u8(a, b); +// } + +// NYI-LABEL: @test_vmull_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VMULL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %b) +// NYI: ret <4 x i32> [[VMULL2_I]] +// uint32x4_t test_vmull_u16(uint16x4_t a, uint16x4_t b) { +// return vmull_u16(a, b); +// } + +// NYI-LABEL: @test_vmull_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VMULL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %b) +// NYI: ret <2 x i64> [[VMULL2_I]] +// uint64x2_t test_vmull_u32(uint32x2_t a, uint32x2_t b) { +// return vmull_u32(a, b); +// } + +// NYI-LABEL: @test_vmull_high_s8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[VMULL_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> [[SHUFFLE_I_I]], <8 x i8> [[SHUFFLE_I7_I]]) +// NYI: ret <8 x i16> [[VMULL_I_I]] +// int16x8_t test_vmull_high_s8(int8x16_t a, int8x16_t b) { +// return vmull_high_s8(a, b); +// } + +// NYI-LABEL: @test_vmull_high_s16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: ret <4 x i32> [[VMULL2_I_I]] +// int32x4_t test_vmull_high_s16(int16x8_t a, int16x8_t b) { +// return vmull_high_s16(a, b); +// } + +// NYI-LABEL: @test_vmull_high_s32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: ret <2 x i64> [[VMULL2_I_I]] +// int64x2_t test_vmull_high_s32(int32x4_t a, int32x4_t b) { +// return vmull_high_s32(a, b); +// } + +// NYI-LABEL: @test_vmull_high_u8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[VMULL_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> [[SHUFFLE_I_I]], <8 x i8> [[SHUFFLE_I7_I]]) +// NYI: ret <8 x i16> [[VMULL_I_I]] +// uint16x8_t test_vmull_high_u8(uint8x16_t a, uint8x16_t b) { +// return vmull_high_u8(a, b); +// } + +// NYI-LABEL: @test_vmull_high_u16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: ret <4 x i32> [[VMULL2_I_I]] +// uint32x4_t test_vmull_high_u16(uint16x8_t a, uint16x8_t b) { +// return vmull_high_u16(a, b); +// } + +// NYI-LABEL: @test_vmull_high_u32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: ret <2 x i64> [[VMULL2_I_I]] +// uint64x2_t test_vmull_high_u32(uint32x4_t a, uint32x4_t b) { +// return vmull_high_u32(a, b); +// } + +// NYI-LABEL: @test_vmlal_s8( +// NYI: [[VMULL_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %b, <8 x i8> %c) +// NYI: [[ADD_I:%.*]] = add <8 x i16> %a, [[VMULL_I_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// int16x8_t test_vmlal_s8(int16x8_t a, int8x8_t b, int8x8_t c) { +// return vmlal_s8(a, b, c); +// } + +// NYI-LABEL: @test_vmlal_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %c to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %c) +// NYI: [[ADD_I:%.*]] = add <4 x i32> %a, [[VMULL2_I_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// int32x4_t test_vmlal_s16(int32x4_t a, int16x4_t b, int16x4_t c) { +// return vmlal_s16(a, b, c); +// } + +// NYI-LABEL: @test_vmlal_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %c to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %c) +// NYI: [[ADD_I:%.*]] = add <2 x i64> %a, [[VMULL2_I_I]] +// NYI: ret <2 x i64> [[ADD_I]] +// int64x2_t test_vmlal_s32(int64x2_t a, int32x2_t b, int32x2_t c) { +// return vmlal_s32(a, b, c); +// } + +// NYI-LABEL: @test_vmlal_u8( +// NYI: [[VMULL_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %b, <8 x i8> %c) +// NYI: [[ADD_I:%.*]] = add <8 x i16> %a, [[VMULL_I_I]] +// NYI: ret <8 x i16> [[ADD_I]] +// uint16x8_t test_vmlal_u8(uint16x8_t a, uint8x8_t b, uint8x8_t c) { +// return vmlal_u8(a, b, c); +// } + +// NYI-LABEL: @test_vmlal_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %c to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %c) +// NYI: [[ADD_I:%.*]] = add <4 x i32> %a, [[VMULL2_I_I]] +// NYI: ret <4 x i32> [[ADD_I]] +// uint32x4_t test_vmlal_u16(uint32x4_t a, uint16x4_t b, uint16x4_t c) { +// return vmlal_u16(a, b, c); +// } + +// NYI-LABEL: @test_vmlal_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %c to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %c) +// NYI: [[ADD_I:%.*]] = add <2 x i64> %a, [[VMULL2_I_I]] +// NYI: ret <2 x i64> [[ADD_I]] +// uint64x2_t test_vmlal_u32(uint64x2_t a, uint32x2_t b, uint32x2_t c) { +// return vmlal_u32(a, b, c); +// } + +// NYI-LABEL: @test_vmlal_high_s8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <16 x i8> %c, <16 x i8> %c, <8 x i32> +// NYI: [[VMULL_I_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> [[SHUFFLE_I_I]], <8 x i8> [[SHUFFLE_I7_I]]) +// NYI: [[ADD_I_I:%.*]] = add <8 x i16> %a, [[VMULL_I_I_I]] +// NYI: ret <8 x i16> [[ADD_I_I]] +// int16x8_t test_vmlal_high_s8(int16x8_t a, int8x16_t b, int8x16_t c) { +// return vmlal_high_s8(a, b, c); +// } + +// NYI-LABEL: @test_vmlal_high_s16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %c, <8 x i16> %c, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: [[ADD_I_I:%.*]] = add <4 x i32> %a, [[VMULL2_I_I_I]] +// NYI: ret <4 x i32> [[ADD_I_I]] +// int32x4_t test_vmlal_high_s16(int32x4_t a, int16x8_t b, int16x8_t c) { +// return vmlal_high_s16(a, b, c); +// } + +// NYI-LABEL: @test_vmlal_high_s32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %c, <4 x i32> %c, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: [[ADD_I_I:%.*]] = add <2 x i64> %a, [[VMULL2_I_I_I]] +// NYI: ret <2 x i64> [[ADD_I_I]] +// int64x2_t test_vmlal_high_s32(int64x2_t a, int32x4_t b, int32x4_t c) { +// return vmlal_high_s32(a, b, c); +// } + +// NYI-LABEL: @test_vmlal_high_u8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <16 x i8> %c, <16 x i8> %c, <8 x i32> +// NYI: [[VMULL_I_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> [[SHUFFLE_I_I]], <8 x i8> [[SHUFFLE_I7_I]]) +// NYI: [[ADD_I_I:%.*]] = add <8 x i16> %a, [[VMULL_I_I_I]] +// NYI: ret <8 x i16> [[ADD_I_I]] +// uint16x8_t test_vmlal_high_u8(uint16x8_t a, uint8x16_t b, uint8x16_t c) { +// return vmlal_high_u8(a, b, c); +// } + +// NYI-LABEL: @test_vmlal_high_u16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %c, <8 x i16> %c, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: [[ADD_I_I:%.*]] = add <4 x i32> %a, [[VMULL2_I_I_I]] +// NYI: ret <4 x i32> [[ADD_I_I]] +// uint32x4_t test_vmlal_high_u16(uint32x4_t a, uint16x8_t b, uint16x8_t c) { +// return vmlal_high_u16(a, b, c); +// } + +// NYI-LABEL: @test_vmlal_high_u32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %c, <4 x i32> %c, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: [[ADD_I_I:%.*]] = add <2 x i64> %a, [[VMULL2_I_I_I]] +// NYI: ret <2 x i64> [[ADD_I_I]] +// uint64x2_t test_vmlal_high_u32(uint64x2_t a, uint32x4_t b, uint32x4_t c) { +// return vmlal_high_u32(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_s8( +// NYI: [[VMULL_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %b, <8 x i8> %c) +// NYI: [[SUB_I:%.*]] = sub <8 x i16> %a, [[VMULL_I_I]] +// NYI: ret <8 x i16> [[SUB_I]] +// int16x8_t test_vmlsl_s8(int16x8_t a, int8x8_t b, int8x8_t c) { +// return vmlsl_s8(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %c to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %c) +// NYI: [[SUB_I:%.*]] = sub <4 x i32> %a, [[VMULL2_I_I]] +// NYI: ret <4 x i32> [[SUB_I]] +// int32x4_t test_vmlsl_s16(int32x4_t a, int16x4_t b, int16x4_t c) { +// return vmlsl_s16(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %c to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %c) +// NYI: [[SUB_I:%.*]] = sub <2 x i64> %a, [[VMULL2_I_I]] +// NYI: ret <2 x i64> [[SUB_I]] +// int64x2_t test_vmlsl_s32(int64x2_t a, int32x2_t b, int32x2_t c) { +// return vmlsl_s32(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_u8( +// NYI: [[VMULL_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %b, <8 x i8> %c) +// NYI: [[SUB_I:%.*]] = sub <8 x i16> %a, [[VMULL_I_I]] +// NYI: ret <8 x i16> [[SUB_I]] +// uint16x8_t test_vmlsl_u8(uint16x8_t a, uint8x8_t b, uint8x8_t c) { +// return vmlsl_u8(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %c to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %c) +// NYI: [[SUB_I:%.*]] = sub <4 x i32> %a, [[VMULL2_I_I]] +// NYI: ret <4 x i32> [[SUB_I]] +// uint32x4_t test_vmlsl_u16(uint32x4_t a, uint16x4_t b, uint16x4_t c) { +// return vmlsl_u16(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %c to <8 x i8> +// NYI: [[VMULL2_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %c) +// NYI: [[SUB_I:%.*]] = sub <2 x i64> %a, [[VMULL2_I_I]] +// NYI: ret <2 x i64> [[SUB_I]] +// uint64x2_t test_vmlsl_u32(uint64x2_t a, uint32x2_t b, uint32x2_t c) { +// return vmlsl_u32(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_high_s8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <16 x i8> %c, <16 x i8> %c, <8 x i32> +// NYI: [[VMULL_I_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> [[SHUFFLE_I_I]], <8 x i8> [[SHUFFLE_I7_I]]) +// NYI: [[SUB_I_I:%.*]] = sub <8 x i16> %a, [[VMULL_I_I_I]] +// NYI: ret <8 x i16> [[SUB_I_I]] +// int16x8_t test_vmlsl_high_s8(int16x8_t a, int8x16_t b, int8x16_t c) { +// return vmlsl_high_s8(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_high_s16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %c, <8 x i16> %c, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: [[SUB_I_I:%.*]] = sub <4 x i32> %a, [[VMULL2_I_I_I]] +// NYI: ret <4 x i32> [[SUB_I_I]] +// int32x4_t test_vmlsl_high_s16(int32x4_t a, int16x8_t b, int16x8_t c) { +// return vmlsl_high_s16(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_high_s32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %c, <4 x i32> %c, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: [[SUB_I_I:%.*]] = sub <2 x i64> %a, [[VMULL2_I_I_I]] +// NYI: ret <2 x i64> [[SUB_I_I]] +// int64x2_t test_vmlsl_high_s32(int64x2_t a, int32x4_t b, int32x4_t c) { +// return vmlsl_high_s32(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_high_u8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <16 x i8> %c, <16 x i8> %c, <8 x i32> +// NYI: [[VMULL_I_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> [[SHUFFLE_I_I]], <8 x i8> [[SHUFFLE_I7_I]]) +// NYI: [[SUB_I_I:%.*]] = sub <8 x i16> %a, [[VMULL_I_I_I]] +// NYI: ret <8 x i16> [[SUB_I_I]] +// uint16x8_t test_vmlsl_high_u8(uint16x8_t a, uint8x16_t b, uint8x16_t c) { +// return vmlsl_high_u8(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_high_u16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %c, <8 x i16> %c, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: [[SUB_I_I:%.*]] = sub <4 x i32> %a, [[VMULL2_I_I_I]] +// NYI: ret <4 x i32> [[SUB_I_I]] +// uint32x4_t test_vmlsl_high_u16(uint32x4_t a, uint16x8_t b, uint16x8_t c) { +// return vmlsl_high_u16(a, b, c); +// } + +// NYI-LABEL: @test_vmlsl_high_u32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %c, <4 x i32> %c, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VMULL2_I_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: [[SUB_I_I:%.*]] = sub <2 x i64> %a, [[VMULL2_I_I_I]] +// NYI: ret <2 x i64> [[SUB_I_I]] +// uint64x2_t test_vmlsl_high_u32(uint64x2_t a, uint32x4_t b, uint32x4_t c) { +// return vmlsl_high_u32(a, b, c); +// } + +// NYI-LABEL: @test_vqdmull_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VQDMULL_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %a, <4 x i16> %b) +// NYI: [[VQDMULL_V3_I:%.*]] = bitcast <4 x i32> [[VQDMULL_V2_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQDMULL_V2_I]] +// int32x4_t test_vqdmull_s16(int16x4_t a, int16x4_t b) { +// return vqdmull_s16(a, b); +// } + +// NYI-LABEL: @test_vqdmull_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VQDMULL_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %a, <2 x i32> %b) +// NYI: [[VQDMULL_V3_I:%.*]] = bitcast <2 x i64> [[VQDMULL_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VQDMULL_V2_I]] +// int64x2_t test_vqdmull_s32(int32x2_t a, int32x2_t b) { +// return vqdmull_s32(a, b); +// } + +// NYI-LABEL: @test_vqdmlal_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> %c to <8 x i8> +// NYI: [[VQDMLAL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %c) +// NYI: [[VQDMLAL_V3_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> [[VQDMLAL2_I]]) +// NYI: ret <4 x i32> [[VQDMLAL_V3_I]] +// int32x4_t test_vqdmlal_s16(int32x4_t a, int16x4_t b, int16x4_t c) { +// return vqdmlal_s16(a, b, c); +// } + +// NYI-LABEL: @test_vqdmlal_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> %c to <8 x i8> +// NYI: [[VQDMLAL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %c) +// NYI: [[VQDMLAL_V3_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> [[VQDMLAL2_I]]) +// NYI: ret <2 x i64> [[VQDMLAL_V3_I]] +// int64x2_t test_vqdmlal_s32(int64x2_t a, int32x2_t b, int32x2_t c) { +// return vqdmlal_s32(a, b, c); +// } + +// NYI-LABEL: @test_vqdmlsl_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> %c to <8 x i8> +// NYI: [[VQDMLAL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %c) +// NYI: [[VQDMLSL_V3_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> [[VQDMLAL2_I]]) +// NYI: ret <4 x i32> [[VQDMLSL_V3_I]] +// int32x4_t test_vqdmlsl_s16(int32x4_t a, int16x4_t b, int16x4_t c) { +// return vqdmlsl_s16(a, b, c); +// } + +// NYI-LABEL: @test_vqdmlsl_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> %c to <8 x i8> +// NYI: [[VQDMLAL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %c) +// NYI: [[VQDMLSL_V3_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> [[VQDMLAL2_I]]) +// NYI: ret <2 x i64> [[VQDMLSL_V3_I]] +// int64x2_t test_vqdmlsl_s32(int64x2_t a, int32x2_t b, int32x2_t c) { +// return vqdmlsl_s32(a, b, c); +// } + +// NYI-LABEL: @test_vqdmull_high_s16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VQDMULL_V2_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: [[VQDMULL_V3_I_I:%.*]] = bitcast <4 x i32> [[VQDMULL_V2_I_I]] to <16 x i8> +// NYI: ret <4 x i32> [[VQDMULL_V2_I_I]] +// int32x4_t test_vqdmull_high_s16(int16x8_t a, int16x8_t b) { +// return vqdmull_high_s16(a, b); +// } + +// NYI-LABEL: @test_vqdmull_high_s32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VQDMULL_V2_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: [[VQDMULL_V3_I_I:%.*]] = bitcast <2 x i64> [[VQDMULL_V2_I_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VQDMULL_V2_I_I]] +// int64x2_t test_vqdmull_high_s32(int32x4_t a, int32x4_t b) { +// return vqdmull_high_s32(a, b); +// } + +// NYI-LABEL: @test_vqdmlal_high_s16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %c, <8 x i16> %c, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VQDMLAL2_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: [[VQDMLAL_V3_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> [[VQDMLAL2_I_I]]) +// NYI: ret <4 x i32> [[VQDMLAL_V3_I_I]] +// int32x4_t test_vqdmlal_high_s16(int32x4_t a, int16x8_t b, int16x8_t c) { +// return vqdmlal_high_s16(a, b, c); +// } + +// NYI-LABEL: @test_vqdmlal_high_s32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %c, <4 x i32> %c, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VQDMLAL2_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: [[VQDMLAL_V3_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> [[VQDMLAL2_I_I]]) +// NYI: ret <2 x i64> [[VQDMLAL_V3_I_I]] +// int64x2_t test_vqdmlal_high_s32(int64x2_t a, int32x4_t b, int32x4_t c) { +// return vqdmlal_high_s32(a, b, c); +// } + +// NYI-LABEL: @test_vqdmlsl_high_s16( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %b, <8 x i16> %b, <4 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <8 x i16> %c, <8 x i16> %c, <4 x i32> +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <4 x i16> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VQDMLAL2_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[SHUFFLE_I_I]], <4 x i16> [[SHUFFLE_I7_I]]) +// NYI: [[VQDMLSL_V3_I_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> [[VQDMLAL2_I_I]]) +// NYI: ret <4 x i32> [[VQDMLSL_V3_I_I]] +// int32x4_t test_vqdmlsl_high_s16(int32x4_t a, int16x8_t b, int16x8_t c) { +// return vqdmlsl_high_s16(a, b, c); +// } + +// NYI-LABEL: @test_vqdmlsl_high_s32( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <4 x i32> %b, <4 x i32> %b, <2 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <4 x i32> %c, <4 x i32> %c, <2 x i32> +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> [[SHUFFLE_I_I]] to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <2 x i32> [[SHUFFLE_I7_I]] to <8 x i8> +// NYI: [[VQDMLAL2_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> [[SHUFFLE_I_I]], <2 x i32> [[SHUFFLE_I7_I]]) +// NYI: [[VQDMLSL_V3_I_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> [[VQDMLAL2_I_I]]) +// NYI: ret <2 x i64> [[VQDMLSL_V3_I_I]] +// int64x2_t test_vqdmlsl_high_s32(int64x2_t a, int32x4_t b, int32x4_t c) { +// return vqdmlsl_high_s32(a, b, c); +// } + +// NYI-LABEL: @test_vmull_p8( +// NYI: [[VMULL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i16> [[VMULL_I]] +// poly16x8_t test_vmull_p8(poly8x8_t a, poly8x8_t b) { +// return vmull_p8(a, b); +// } + +// NYI-LABEL: @test_vmull_high_p8( +// NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> +// NYI: [[SHUFFLE_I7_I:%.*]] = shufflevector <16 x i8> %b, <16 x i8> %b, <8 x i32> +// NYI: [[VMULL_I_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> [[SHUFFLE_I_I]], <8 x i8> [[SHUFFLE_I7_I]]) +// NYI: ret <8 x i16> [[VMULL_I_I]] +// poly16x8_t test_vmull_high_p8(poly8x16_t a, poly8x16_t b) { +// return vmull_high_p8(a, b); +// } + +// NYI-LABEL: @test_vaddd_s64( +// NYI: [[VADDD_I:%.*]] = add i64 %a, %b +// NYI: ret i64 [[VADDD_I]] +// int64_t test_vaddd_s64(int64_t a, int64_t b) { +// return vaddd_s64(a, b); +// } + +// NYI-LABEL: @test_vaddd_u64( +// NYI: [[VADDD_I:%.*]] = add i64 %a, %b +// NYI: ret i64 [[VADDD_I]] +// uint64_t test_vaddd_u64(uint64_t a, uint64_t b) { +// return vaddd_u64(a, b); +// } + +// NYI-LABEL: @test_vsubd_s64( +// NYI: [[VSUBD_I:%.*]] = sub i64 %a, %b +// NYI: ret i64 [[VSUBD_I]] +// int64_t test_vsubd_s64(int64_t a, int64_t b) { +// return vsubd_s64(a, b); +// } + +// NYI-LABEL: @test_vsubd_u64( +// NYI: [[VSUBD_I:%.*]] = sub i64 %a, %b +// NYI: ret i64 [[VSUBD_I]] +// uint64_t test_vsubd_u64(uint64_t a, uint64_t b) { +// return vsubd_u64(a, b); +// } + +// NYI-LABEL: @test_vqaddb_s8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 %b, i64 0 +// NYI: [[VQADDB_S8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <8 x i8> [[VQADDB_S8_I]], i64 0 +// NYI: ret i8 [[TMP2]] +// int8_t test_vqaddb_s8(int8_t a, int8_t b) { +// return vqaddb_s8(a, b); +// } + +// NYI-LABEL: @test_vqaddh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VQADDH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VQADDH_S16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// int16_t test_vqaddh_s16(int16_t a, int16_t b) { +// return vqaddh_s16(a, b); +// } + +// NYI-LABEL: @test_vqadds_s32( +// NYI: [[VQADDS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqadd.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VQADDS_S32_I]] +// int32_t test_vqadds_s32(int32_t a, int32_t b) { +// return vqadds_s32(a, b); +// } + +// NYI-LABEL: @test_vqaddd_s64( +// NYI: [[VQADDD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VQADDD_S64_I]] +// int64_t test_vqaddd_s64(int64_t a, int64_t b) { +// return vqaddd_s64(a, b); +// } + +// NYI-LABEL: @test_vqaddb_u8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 %b, i64 0 +// NYI: [[VQADDB_U8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <8 x i8> [[VQADDB_U8_I]], i64 0 +// NYI: ret i8 [[TMP2]] +// uint8_t test_vqaddb_u8(uint8_t a, uint8_t b) { +// return vqaddb_u8(a, b); +// } + +// NYI-LABEL: @test_vqaddh_u16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VQADDH_U16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VQADDH_U16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vqaddh_u16(uint16_t a, uint16_t b) { +// return vqaddh_u16(a, b); +// } + +// NYI-LABEL: @test_vqadds_u32( +// NYI: [[VQADDS_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uqadd.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VQADDS_U32_I]] +// uint32_t test_vqadds_u32(uint32_t a, uint32_t b) { +// return vqadds_u32(a, b); +// } + +// NYI-LABEL: @test_vqaddd_u64( +// NYI: [[VQADDD_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uqadd.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VQADDD_U64_I]] +// uint64_t test_vqaddd_u64(uint64_t a, uint64_t b) { +// return vqaddd_u64(a, b); +// } + +// NYI-LABEL: @test_vqsubb_s8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 %b, i64 0 +// NYI: [[VQSUBB_S8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <8 x i8> [[VQSUBB_S8_I]], i64 0 +// NYI: ret i8 [[TMP2]] +// int8_t test_vqsubb_s8(int8_t a, int8_t b) { +// return vqsubb_s8(a, b); +// } + +// NYI-LABEL: @test_vqsubh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VQSUBH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VQSUBH_S16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// int16_t test_vqsubh_s16(int16_t a, int16_t b) { +// return vqsubh_s16(a, b); +// } + +// NYI-LABEL: @test_vqsubs_s32( +// NYI: [[VQSUBS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqsub.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VQSUBS_S32_I]] +// int32_t test_vqsubs_s32(int32_t a, int32_t b) { +// return vqsubs_s32(a, b); +// } + +// NYI-LABEL: @test_vqsubd_s64( +// NYI: [[VQSUBD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VQSUBD_S64_I]] +// int64_t test_vqsubd_s64(int64_t a, int64_t b) { +// return vqsubd_s64(a, b); +// } + +// NYI-LABEL: @test_vqsubb_u8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 %b, i64 0 +// NYI: [[VQSUBB_U8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <8 x i8> [[VQSUBB_U8_I]], i64 0 +// NYI: ret i8 [[TMP2]] +// uint8_t test_vqsubb_u8(uint8_t a, uint8_t b) { +// return vqsubb_u8(a, b); +// } + +// NYI-LABEL: @test_vqsubh_u16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VQSUBH_U16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VQSUBH_U16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vqsubh_u16(uint16_t a, uint16_t b) { +// return vqsubh_u16(a, b); +// } + +// NYI-LABEL: @test_vqsubs_u32( +// NYI: [[VQSUBS_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uqsub.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VQSUBS_U32_I]] +// uint32_t test_vqsubs_u32(uint32_t a, uint32_t b) { +// return vqsubs_u32(a, b); +// } + +// NYI-LABEL: @test_vqsubd_u64( +// NYI: [[VQSUBD_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uqsub.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VQSUBD_U64_I]] +// uint64_t test_vqsubd_u64(uint64_t a, uint64_t b) { +// return vqsubd_u64(a, b); +// } + +// NYI-LABEL: @test_vshld_s64( +// NYI: [[VSHLD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.sshl.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VSHLD_S64_I]] +// int64_t test_vshld_s64(int64_t a, int64_t b) { +// return vshld_s64(a, b); +// } + +// NYI-LABEL: @test_vshld_u64( +// NYI: [[VSHLD_U64_I:%.*]] = call i64 @llvm.aarch64.neon.ushl.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VSHLD_U64_I]] +// uint64_t test_vshld_u64(uint64_t a, int64_t b) { +// return vshld_u64(a, b); +// } + +// NYI-LABEL: @test_vqshlb_s8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 %b, i64 0 +// NYI: [[VQSHLB_S8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <8 x i8> [[VQSHLB_S8_I]], i64 0 +// NYI: ret i8 [[TMP2]] +// int8_t test_vqshlb_s8(int8_t a, int8_t b) { +// return vqshlb_s8(a, b); +// } + +// NYI-LABEL: @test_vqshlh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VQSHLH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VQSHLH_S16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// int16_t test_vqshlh_s16(int16_t a, int16_t b) { +// return vqshlh_s16(a, b); +// } + +// NYI-LABEL: @test_vqshls_s32( +// NYI: [[VQSHLS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqshl.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VQSHLS_S32_I]] +// int32_t test_vqshls_s32(int32_t a, int32_t b) { +// return vqshls_s32(a, b); +// } + +// NYI-LABEL: @test_vqshld_s64( +// NYI: [[VQSHLD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.sqshl.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VQSHLD_S64_I]] +// int64_t test_vqshld_s64(int64_t a, int64_t b) { +// return vqshld_s64(a, b); +// } + +// NYI-LABEL: @test_vqshlb_u8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 %b, i64 0 +// NYI: [[VQSHLB_U8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <8 x i8> [[VQSHLB_U8_I]], i64 0 +// NYI: ret i8 [[TMP2]] +// uint8_t test_vqshlb_u8(uint8_t a, int8_t b) { +// return vqshlb_u8(a, b); +// } + +// NYI-LABEL: @test_vqshlh_u16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VQSHLH_U16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VQSHLH_U16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vqshlh_u16(uint16_t a, int16_t b) { +// return vqshlh_u16(a, b); +// } + +// NYI-LABEL: @test_vqshls_u32( +// NYI: [[VQSHLS_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uqshl.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VQSHLS_U32_I]] +// uint32_t test_vqshls_u32(uint32_t a, int32_t b) { +// return vqshls_u32(a, b); +// } + +// NYI-LABEL: @test_vqshld_u64( +// NYI: [[VQSHLD_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uqshl.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VQSHLD_U64_I]] +// uint64_t test_vqshld_u64(uint64_t a, int64_t b) { +// return vqshld_u64(a, b); +// } + +// NYI-LABEL: @test_vrshld_s64( +// NYI: [[VRSHLD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VRSHLD_S64_I]] +// int64_t test_vrshld_s64(int64_t a, int64_t b) { +// return vrshld_s64(a, b); +// } + +// NYI-LABEL: @test_vrshld_u64( +// NYI: [[VRSHLD_U64_I:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VRSHLD_U64_I]] +// uint64_t test_vrshld_u64(uint64_t a, int64_t b) { +// return vrshld_u64(a, b); +// } + +// NYI-LABEL: @test_vqrshlb_s8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 %b, i64 0 +// NYI: [[VQRSHLB_S8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshl.v8i8(<8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <8 x i8> [[VQRSHLB_S8_I]], i64 0 +// NYI: ret i8 [[TMP2]] +// int8_t test_vqrshlb_s8(int8_t a, int8_t b) { +// return vqrshlb_s8(a, b); +// } + +// NYI-LABEL: @test_vqrshlh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VQRSHLH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshl.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VQRSHLH_S16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// int16_t test_vqrshlh_s16(int16_t a, int16_t b) { +// return vqrshlh_s16(a, b); +// } + +// NYI-LABEL: @test_vqrshls_s32( +// NYI: [[VQRSHLS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqrshl.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VQRSHLS_S32_I]] +// int32_t test_vqrshls_s32(int32_t a, int32_t b) { +// return vqrshls_s32(a, b); +// } + +// NYI-LABEL: @test_vqrshld_s64( +// NYI: [[VQRSHLD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.sqrshl.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VQRSHLD_S64_I]] +// int64_t test_vqrshld_s64(int64_t a, int64_t b) { +// return vqrshld_s64(a, b); +// } + +// NYI-LABEL: @test_vqrshlb_u8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 %b, i64 0 +// NYI: [[VQRSHLB_U8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqrshl.v8i8(<8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <8 x i8> [[VQRSHLB_U8_I]], i64 0 +// NYI: ret i8 [[TMP2]] +// uint8_t test_vqrshlb_u8(uint8_t a, int8_t b) { +// return vqrshlb_u8(a, b); +// } + +// NYI-LABEL: @test_vqrshlh_u16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VQRSHLH_U16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqrshl.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VQRSHLH_U16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vqrshlh_u16(uint16_t a, int16_t b) { +// return vqrshlh_u16(a, b); +// } + +// NYI-LABEL: @test_vqrshls_u32( +// NYI: [[VQRSHLS_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uqrshl.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VQRSHLS_U32_I]] +// uint32_t test_vqrshls_u32(uint32_t a, int32_t b) { +// return vqrshls_u32(a, b); +// } + +// NYI-LABEL: @test_vqrshld_u64( +// NYI: [[VQRSHLD_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uqrshl.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VQRSHLD_U64_I]] +// uint64_t test_vqrshld_u64(uint64_t a, int64_t b) { +// return vqrshld_u64(a, b); +// } + +// NYI-LABEL: @test_vpaddd_s64( +// NYI: [[VPADDD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a) +// NYI: ret i64 [[VPADDD_S64_I]] +// int64_t test_vpaddd_s64(int64x2_t a) { +// return vpaddd_s64(a); +// } + +// NYI-LABEL: @test_vpadds_f32( +// NYI: [[LANE0_I:%.*]] = extractelement <2 x float> %a, i64 0 +// NYI: [[LANE1_I:%.*]] = extractelement <2 x float> %a, i64 1 +// NYI: [[VPADDD_I:%.*]] = fadd float [[LANE0_I]], [[LANE1_I]] +// NYI: ret float [[VPADDD_I]] +// float32_t test_vpadds_f32(float32x2_t a) { +// return vpadds_f32(a); +// } + +// NYI-LABEL: @test_vpaddd_f64( +// NYI: [[LANE0_I:%.*]] = extractelement <2 x double> %a, i64 0 +// NYI: [[LANE1_I:%.*]] = extractelement <2 x double> %a, i64 1 +// NYI: [[VPADDD_I:%.*]] = fadd double [[LANE0_I]], [[LANE1_I]] +// NYI: ret double [[VPADDD_I]] +// float64_t test_vpaddd_f64(float64x2_t a) { +// return vpaddd_f64(a); +// } + +// NYI-LABEL: @test_vpmaxnms_f32( +// NYI: [[VPMAXNMS_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxnmv.f32.v2f32(<2 x float> %a) +// NYI: ret float [[VPMAXNMS_F32_I]] +// float32_t test_vpmaxnms_f32(float32x2_t a) { +// return vpmaxnms_f32(a); +// } + +// NYI-LABEL: @test_vpmaxnmqd_f64( +// NYI: [[VPMAXNMQD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %a) +// NYI: ret double [[VPMAXNMQD_F64_I]] +// float64_t test_vpmaxnmqd_f64(float64x2_t a) { +// return vpmaxnmqd_f64(a); +// } + +// NYI-LABEL: @test_vpmaxs_f32( +// NYI: [[VPMAXS_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float> %a) +// NYI: ret float [[VPMAXS_F32_I]] +// float32_t test_vpmaxs_f32(float32x2_t a) { +// return vpmaxs_f32(a); +// } + +// NYI-LABEL: @test_vpmaxqd_f64( +// NYI: [[VPMAXQD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double> %a) +// NYI: ret double [[VPMAXQD_F64_I]] +// float64_t test_vpmaxqd_f64(float64x2_t a) { +// return vpmaxqd_f64(a); +// } + +// NYI-LABEL: @test_vpminnms_f32( +// NYI: [[VPMINNMS_F32_I:%.*]] = call float @llvm.aarch64.neon.fminnmv.f32.v2f32(<2 x float> %a) +// NYI: ret float [[VPMINNMS_F32_I]] +// float32_t test_vpminnms_f32(float32x2_t a) { +// return vpminnms_f32(a); +// } + +// NYI-LABEL: @test_vpminnmqd_f64( +// NYI: [[VPMINNMQD_F64_I:%.*]] = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %a) +// NYI: ret double [[VPMINNMQD_F64_I]] +// float64_t test_vpminnmqd_f64(float64x2_t a) { +// return vpminnmqd_f64(a); +// } + +// NYI-LABEL: @test_vpmins_f32( +// NYI: [[VPMINS_F32_I:%.*]] = call float @llvm.aarch64.neon.fminv.f32.v2f32(<2 x float> %a) +// NYI: ret float [[VPMINS_F32_I]] +// float32_t test_vpmins_f32(float32x2_t a) { +// return vpmins_f32(a); +// } + +// NYI-LABEL: @test_vpminqd_f64( +// NYI: [[VPMINQD_F64_I:%.*]] = call double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double> %a) +// NYI: ret double [[VPMINQD_F64_I]] +// float64_t test_vpminqd_f64(float64x2_t a) { +// return vpminqd_f64(a); +// } + +// NYI-LABEL: @test_vqdmulhh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VQDMULHH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VQDMULHH_S16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// int16_t test_vqdmulhh_s16(int16_t a, int16_t b) { +// return vqdmulhh_s16(a, b); +// } + +// NYI-LABEL: @test_vqdmulhs_s32( +// NYI: [[VQDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VQDMULHS_S32_I]] +// int32_t test_vqdmulhs_s32(int32_t a, int32_t b) { +// return vqdmulhs_s32(a, b); +// } + +// NYI-LABEL: @test_vqrdmulhh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VQRDMULHH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VQRDMULHH_S16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// int16_t test_vqrdmulhh_s16(int16_t a, int16_t b) { +// return vqrdmulhh_s16(a, b); +// } + +// NYI-LABEL: @test_vqrdmulhs_s32( +// NYI: [[VQRDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VQRDMULHS_S32_I]] +// int32_t test_vqrdmulhs_s32(int32_t a, int32_t b) { +// return vqrdmulhs_s32(a, b); +// } + +// NYI-LABEL: @test_vmulxs_f32( +// NYI: [[VMULXS_F32_I:%.*]] = call float @llvm.aarch64.neon.fmulx.f32(float %a, float %b) +// NYI: ret float [[VMULXS_F32_I]] +// float32_t test_vmulxs_f32(float32_t a, float32_t b) { +// return vmulxs_f32(a, b); +// } + +// NYI-LABEL: @test_vmulxd_f64( +// NYI: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double %a, double %b) +// NYI: ret double [[VMULXD_F64_I]] +// float64_t test_vmulxd_f64(float64_t a, float64_t b) { +// return vmulxd_f64(a, b); +// } + +// NYI-LABEL: @test_vmulx_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VMULX2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmulx.v1f64(<1 x double> %a, <1 x double> %b) +// NYI: ret <1 x double> [[VMULX2_I]] +// float64x1_t test_vmulx_f64(float64x1_t a, float64x1_t b) { +// return vmulx_f64(a, b); +// } + +// NYI-LABEL: @test_vrecpss_f32( +// NYI: [[VRECPS_I:%.*]] = call float @llvm.aarch64.neon.frecps.f32(float %a, float %b) +// NYI: ret float [[VRECPS_I]] +// float32_t test_vrecpss_f32(float32_t a, float32_t b) { +// return vrecpss_f32(a, b); +// } + +// NYI-LABEL: @test_vrecpsd_f64( +// NYI: [[VRECPS_I:%.*]] = call double @llvm.aarch64.neon.frecps.f64(double %a, double %b) +// NYI: ret double [[VRECPS_I]] +// float64_t test_vrecpsd_f64(float64_t a, float64_t b) { +// return vrecpsd_f64(a, b); +// } + +// NYI-LABEL: @test_vrsqrtss_f32( +// NYI: [[VRSQRTSS_F32_I:%.*]] = call float @llvm.aarch64.neon.frsqrts.f32(float %a, float %b) +// NYI: ret float [[VRSQRTSS_F32_I]] +// float32_t test_vrsqrtss_f32(float32_t a, float32_t b) { +// return vrsqrtss_f32(a, b); +// } + +// NYI-LABEL: @test_vrsqrtsd_f64( +// NYI: [[VRSQRTSD_F64_I:%.*]] = call double @llvm.aarch64.neon.frsqrts.f64(double %a, double %b) +// NYI: ret double [[VRSQRTSD_F64_I]] +// float64_t test_vrsqrtsd_f64(float64_t a, float64_t b) { +// return vrsqrtsd_f64(a, b); +// } + +// NYI-LABEL: @test_vcvts_f32_s32( +// NYI: [[TMP0:%.*]] = sitofp i32 %a to float +// NYI: ret float [[TMP0]] +// float32_t test_vcvts_f32_s32(int32_t a) { +// return vcvts_f32_s32(a); +// } + +// NYI-LABEL: @test_vcvtd_f64_s64( +// NYI: [[TMP0:%.*]] = sitofp i64 %a to double +// NYI: ret double [[TMP0]] +// float64_t test_vcvtd_f64_s64(int64_t a) { +// return vcvtd_f64_s64(a); +// } + +// NYI-LABEL: @test_vcvts_f32_u32( +// NYI: [[TMP0:%.*]] = uitofp i32 %a to float +// NYI: ret float [[TMP0]] +// float32_t test_vcvts_f32_u32(uint32_t a) { +// return vcvts_f32_u32(a); +// } + +// NYI-LABEL: @test_vcvtd_f64_u64( +// NYI: [[TMP0:%.*]] = uitofp i64 %a to double +// NYI: ret double [[TMP0]] +// float64_t test_vcvtd_f64_u64(uint64_t a) { +// return vcvtd_f64_u64(a); +// } + +// NYI-LABEL: @test_vrecpes_f32( +// NYI: [[VRECPES_F32_I:%.*]] = call float @llvm.aarch64.neon.frecpe.f32(float %a) +// NYI: ret float [[VRECPES_F32_I]] +// float32_t test_vrecpes_f32(float32_t a) { +// return vrecpes_f32(a); +// } + +// NYI-LABEL: @test_vrecped_f64( +// NYI: [[VRECPED_F64_I:%.*]] = call double @llvm.aarch64.neon.frecpe.f64(double %a) +// NYI: ret double [[VRECPED_F64_I]] +// float64_t test_vrecped_f64(float64_t a) { +// return vrecped_f64(a); +// } + +// NYI-LABEL: @test_vrecpxs_f32( +// NYI: [[VRECPXS_F32_I:%.*]] = call float @llvm.aarch64.neon.frecpx.f32(float %a) +// NYI: ret float [[VRECPXS_F32_I]] +// float32_t test_vrecpxs_f32(float32_t a) { +// return vrecpxs_f32(a); +// } + +// NYI-LABEL: @test_vrecpxd_f64( +// NYI: [[VRECPXD_F64_I:%.*]] = call double @llvm.aarch64.neon.frecpx.f64(double %a) +// NYI: ret double [[VRECPXD_F64_I]] +// float64_t test_vrecpxd_f64(float64_t a) { +// return vrecpxd_f64(a); +// } + +// NYI-LABEL: @test_vrsqrte_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VRSQRTE_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.ursqrte.v2i32(<2 x i32> %a) +// NYI: ret <2 x i32> [[VRSQRTE_V1_I]] +// uint32x2_t test_vrsqrte_u32(uint32x2_t a) { +// return vrsqrte_u32(a); +// } + +// NYI-LABEL: @test_vrsqrteq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VRSQRTEQ_V1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.ursqrte.v4i32(<4 x i32> %a) +// NYI: ret <4 x i32> [[VRSQRTEQ_V1_I]] +// uint32x4_t test_vrsqrteq_u32(uint32x4_t a) { +// return vrsqrteq_u32(a); +// } + +// NYI-LABEL: @test_vrsqrtes_f32( +// NYI: [[VRSQRTES_F32_I:%.*]] = call float @llvm.aarch64.neon.frsqrte.f32(float %a) +// NYI: ret float [[VRSQRTES_F32_I]] +// float32_t test_vrsqrtes_f32(float32_t a) { +// return vrsqrtes_f32(a); +// } + +// NYI-LABEL: @test_vrsqrted_f64( +// NYI: [[VRSQRTED_F64_I:%.*]] = call double @llvm.aarch64.neon.frsqrte.f64(double %a) +// NYI: ret double [[VRSQRTED_F64_I]] +// float64_t test_vrsqrted_f64(float64_t a) { +// return vrsqrted_f64(a); +// } + +// FIXME: alignment should be 1. +uint8x16_t test_vld1q_u8(uint8_t const *a) { + return vld1q_u8(a); + // LLVM-LABEL: @test_vld1q_u8 + // LLVM: [[TMP1:%.*]] = load <16 x i8>, ptr %0, align 16 + + // CIR-LABEL: @test_vld1q_u8 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.load %[[CAST]] : !cir.ptr>, !cir.vector +} + +// NYI-LABEL: @test_vld1q_u16( +// NYI: [[TMP2:%.*]] = load <8 x i16>, ptr %a, align 2 +// NYI: ret <8 x i16> [[TMP2]] +// uint16x8_t test_vld1q_u16(uint16_t const *a) { +// return vld1q_u16(a); +// } + +// NYI-LABEL: @test_vld1q_u32( +// NYI: [[TMP2:%.*]] = load <4 x i32>, ptr %a, align 4 +// NYI: ret <4 x i32> [[TMP2]] +// uint32x4_t test_vld1q_u32(uint32_t const *a) { +// return vld1q_u32(a); +// } + +// NYI-LABEL: @test_vld1q_u64( +// NYI: [[TMP2:%.*]] = load <2 x i64>, ptr %a, align 8 +// NYI: ret <2 x i64> [[TMP2]] +// uint64x2_t test_vld1q_u64(uint64_t const *a) { +// return vld1q_u64(a); +// } + +// NYI-LABEL: @test_vld1q_s8( +// NYI: [[TMP1:%.*]] = load <16 x i8>, ptr %a, align 1 +// NYI: ret <16 x i8> [[TMP1]] +// int8x16_t test_vld1q_s8(int8_t const *a) { +// return vld1q_s8(a); +// } + +// NYI-LABEL: @test_vld1q_s16( +// NYI: [[TMP2:%.*]] = load <8 x i16>, ptr %a, align 2 +// NYI: ret <8 x i16> [[TMP2]] +// int16x8_t test_vld1q_s16(int16_t const *a) { +// return vld1q_s16(a); +// } + +// NYI-LABEL: @test_vld1q_s32( +// NYI: [[TMP2:%.*]] = load <4 x i32>, ptr %a, align 4 +// NYI: ret <4 x i32> [[TMP2]] +// int32x4_t test_vld1q_s32(int32_t const *a) { +// return vld1q_s32(a); +// } + +// NYI-LABEL: @test_vld1q_s64( +// NYI: [[TMP2:%.*]] = load <2 x i64>, ptr %a, align 8 +// NYI: ret <2 x i64> [[TMP2]] +// int64x2_t test_vld1q_s64(int64_t const *a) { +// return vld1q_s64(a); +// } + +// NYI-LABEL: @test_vld1q_f16( +// NYI: [[TMP2:%.*]] = load <8 x half>, ptr %a, align 2 +// NYI: ret <8 x half> [[TMP2]] +// float16x8_t test_vld1q_f16(float16_t const *a) { +// return vld1q_f16(a); +// } + +// NYI-LABEL: @test_vld1q_f32( +// NYI: [[TMP2:%.*]] = load <4 x float>, ptr %a, align 4 +// NYI: ret <4 x float> [[TMP2]] +// float32x4_t test_vld1q_f32(float32_t const *a) { +// return vld1q_f32(a); +// } + +// NYI-LABEL: @test_vld1q_f64( +// NYI: [[TMP2:%.*]] = load <2 x double>, ptr %a, align 8 +// NYI: ret <2 x double> [[TMP2]] +// float64x2_t test_vld1q_f64(float64_t const *a) { +// return vld1q_f64(a); +// } + +// NYI-LABEL: @test_vld1q_p8( +// NYI: [[TMP1:%.*]] = load <16 x i8>, ptr %a, align 1 +// NYI: ret <16 x i8> [[TMP1]] +// poly8x16_t test_vld1q_p8(poly8_t const *a) { +// return vld1q_p8(a); +// } + +// NYI-LABEL: @test_vld1q_p16( +// NYI: [[TMP2:%.*]] = load <8 x i16>, ptr %a, align 2 +// NYI: ret <8 x i16> [[TMP2]] +// poly16x8_t test_vld1q_p16(poly16_t const *a) { +// return vld1q_p16(a); +// } + +// NYI-LABEL: @test_vld1_u8( +// NYI: [[TMP1:%.*]] = load <8 x i8>, ptr %a, align 1 +// NYI: ret <8 x i8> [[TMP1]] +// uint8x8_t test_vld1_u8(uint8_t const *a) { +// return vld1_u8(a); +// } + +// NYI-LABEL: @test_vld1_u16( +// NYI: [[TMP2:%.*]] = load <4 x i16>, ptr %a, align 2 +// NYI: ret <4 x i16> [[TMP2]] +// uint16x4_t test_vld1_u16(uint16_t const *a) { +// return vld1_u16(a); +// } + +// NYI-LABEL: @test_vld1_u32( +// NYI: [[TMP2:%.*]] = load <2 x i32>, ptr %a, align 4 +// NYI: ret <2 x i32> [[TMP2]] +// uint32x2_t test_vld1_u32(uint32_t const *a) { +// return vld1_u32(a); +// } + +// NYI-LABEL: @test_vld1_u64( +// NYI: [[TMP2:%.*]] = load <1 x i64>, ptr %a, align 8 +// NYI: ret <1 x i64> [[TMP2]] +// uint64x1_t test_vld1_u64(uint64_t const *a) { +// return vld1_u64(a); +// } + +// NYI-LABEL: @test_vld1_s8( +// NYI: [[TMP1:%.*]] = load <8 x i8>, ptr %a, align 1 +// NYI: ret <8 x i8> [[TMP1]] +// int8x8_t test_vld1_s8(int8_t const *a) { +// return vld1_s8(a); +// } + +// NYI-LABEL: @test_vld1_s16( +// NYI: [[TMP2:%.*]] = load <4 x i16>, ptr %a, align 2 +// NYI: ret <4 x i16> [[TMP2]] +// int16x4_t test_vld1_s16(int16_t const *a) { +// return vld1_s16(a); +// } + +// NYI-LABEL: @test_vld1_s32( +// NYI: [[TMP2:%.*]] = load <2 x i32>, ptr %a, align 4 +// NYI: ret <2 x i32> [[TMP2]] +// int32x2_t test_vld1_s32(int32_t const *a) { +// return vld1_s32(a); +// } + +// NYI-LABEL: @test_vld1_s64( +// NYI: [[TMP2:%.*]] = load <1 x i64>, ptr %a, align 8 +// NYI: ret <1 x i64> [[TMP2]] +// int64x1_t test_vld1_s64(int64_t const *a) { +// return vld1_s64(a); +// } + +// NYI-LABEL: @test_vld1_f16( +// NYI: [[TMP2:%.*]] = load <4 x half>, ptr %a, align 2 +// NYI: ret <4 x half> [[TMP2]] +// float16x4_t test_vld1_f16(float16_t const *a) { +// return vld1_f16(a); +// } + +// NYI-LABEL: @test_vld1_f32( +// NYI: [[TMP2:%.*]] = load <2 x float>, ptr %a, align 4 +// NYI: ret <2 x float> [[TMP2]] +// float32x2_t test_vld1_f32(float32_t const *a) { +// return vld1_f32(a); +// } + +// NYI-LABEL: @test_vld1_f64( +// NYI: [[TMP2:%.*]] = load <1 x double>, ptr %a, align 8 +// NYI: ret <1 x double> [[TMP2]] +// float64x1_t test_vld1_f64(float64_t const *a) { +// return vld1_f64(a); +// } + +// NYI-LABEL: @test_vld1_p8( +// NYI: [[TMP1:%.*]] = load <8 x i8>, ptr %a, align 1 +// NYI: ret <8 x i8> [[TMP1]] +// poly8x8_t test_vld1_p8(poly8_t const *a) { +// return vld1_p8(a); +// } + +// NYI-LABEL: @test_vld1_p16( +// NYI: [[TMP2:%.*]] = load <4 x i16>, ptr %a, align 2 +// NYI: ret <4 x i16> [[TMP2]] +// poly16x4_t test_vld1_p16(poly16_t const *a) { +// return vld1_p16(a); +// } + +// NYI-LABEL: @test_vld1_u8_void( +// NYI: [[TMP1:%.*]] = load <8 x i8>, ptr %a, align 1 +// NYI: ret <8 x i8> [[TMP1]] +// uint8x8_t test_vld1_u8_void(void *a) { +// return vld1_u8(a); +// } + +// NYI-LABEL: @test_vld1_u16_void( +// NYI: [[TMP1:%.*]] = load <4 x i16>, ptr %a, align 1 +// NYI: ret <4 x i16> [[TMP1]] +// uint16x4_t test_vld1_u16_void(void *a) { +// return vld1_u16(a); +// } + +// NYI-LABEL: @test_vld1_u32_void( +// NYI: [[TMP1:%.*]] = load <2 x i32>, ptr %a, align 1 +// NYI: ret <2 x i32> [[TMP1]] +// uint32x2_t test_vld1_u32_void(void *a) { +// return vld1_u32(a); +// } + +// NYI-LABEL: @test_vld1_u64_void( +// NYI: [[TMP1:%.*]] = load <1 x i64>, ptr %a, align 1 +// NYI: ret <1 x i64> [[TMP1]] +// uint64x1_t test_vld1_u64_void(void *a) { +// return vld1_u64(a); +// } + +// NYI-LABEL: @test_vld1_s8_void( +// NYI: [[TMP1:%.*]] = load <8 x i8>, ptr %a, align 1 +// NYI: ret <8 x i8> [[TMP1]] +// int8x8_t test_vld1_s8_void(void *a) { +// return vld1_s8(a); +// } + +// NYI-LABEL: @test_vld1_s16_void( +// NYI: [[TMP1:%.*]] = load <4 x i16>, ptr %a, align 1 +// NYI: ret <4 x i16> [[TMP1]] +// int16x4_t test_vld1_s16_void(void *a) { +// return vld1_s16(a); +// } + +// NYI-LABEL: @test_vld1_s32_void( +// NYI: [[TMP1:%.*]] = load <2 x i32>, ptr %a, align 1 +// NYI: ret <2 x i32> [[TMP1]] +// int32x2_t test_vld1_s32_void(void *a) { +// return vld1_s32(a); +// } + +// NYI-LABEL: @test_vld1_s64_void( +// NYI: [[TMP1:%.*]] = load <1 x i64>, ptr %a, align 1 +// NYI: ret <1 x i64> [[TMP1]] +// int64x1_t test_vld1_s64_void(void *a) { +// return vld1_s64(a); +// } + +// NYI-LABEL: @test_vld1_f16_void( +// NYI: [[TMP1:%.*]] = load <4 x half>, ptr %a, align 1 +// NYI: ret <4 x half> [[TMP1]] +// float16x4_t test_vld1_f16_void(void *a) { +// return vld1_f16(a); +// } + +// NYI-LABEL: @test_vld1_f32_void( +// NYI: [[TMP1:%.*]] = load <2 x float>, ptr %a, align 1 +// NYI: ret <2 x float> [[TMP1]] +// float32x2_t test_vld1_f32_void(void *a) { +// return vld1_f32(a); +// } + +// NYI-LABEL: @test_vld1_f64_void( +// NYI: [[TMP1:%.*]] = load <1 x double>, ptr %a, align 1 +// NYI: ret <1 x double> [[TMP1]] +// float64x1_t test_vld1_f64_void(void *a) { +// return vld1_f64(a); +// } + +// NYI-LABEL: @test_vld1_p8_void( +// NYI: [[TMP1:%.*]] = load <8 x i8>, ptr %a, align 1 +// NYI: ret <8 x i8> [[TMP1]] +// poly8x8_t test_vld1_p8_void(void *a) { +// return vld1_p8(a); +// } + +// NYI-LABEL: @test_vld1_p16_void( +// NYI: [[TMP1:%.*]] = load <4 x i16>, ptr %a, align 1 +// NYI: ret <4 x i16> [[TMP1]] +// poly16x4_t test_vld1_p16_void(void *a) { +// return vld1_p16(a); +// } + +// NYI-LABEL: @test_vld2q_u8( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint8x16x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint8x16x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr %a) +// NYI: store { <16 x i8>, <16 x i8> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.uint8x16x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint8x16x2_t [[TMP5]] +// uint8x16x2_t test_vld2q_u8(uint8_t const *a) { +// return vld2q_u8(a); +// } + +// NYI-LABEL: @test_vld2q_u16( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint16x8x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint16x8x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr %a) +// NYI: store { <8 x i16>, <8 x i16> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint16x8x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint16x8x2_t [[TMP6]] +// uint16x8x2_t test_vld2q_u16(uint16_t const *a) { +// return vld2q_u16(a); +// } + +// NYI-LABEL: @test_vld2q_u32( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint32x4x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint32x4x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr %a) +// NYI: store { <4 x i32>, <4 x i32> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint32x4x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint32x4x2_t [[TMP6]] +// uint32x4x2_t test_vld2q_u32(uint32_t const *a) { +// return vld2q_u32(a); +// } + +// NYI-LABEL: @test_vld2q_u64( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint64x2x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint64x2x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0(ptr %a) +// NYI: store { <2 x i64>, <2 x i64> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint64x2x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint64x2x2_t [[TMP6]] +// uint64x2x2_t test_vld2q_u64(uint64_t const *a) { +// return vld2q_u64(a); +// } + +// NYI-LABEL: @test_vld2q_s8( +// NYI: [[RETVAL:%.*]] = alloca %struct.int8x16x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int8x16x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr %a) +// NYI: store { <16 x i8>, <16 x i8> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.int8x16x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int8x16x2_t [[TMP5]] +// int8x16x2_t test_vld2q_s8(int8_t const *a) { +// return vld2q_s8(a); +// } + +// NYI-LABEL: @test_vld2q_s16( +// NYI: [[RETVAL:%.*]] = alloca %struct.int16x8x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int16x8x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr %a) +// NYI: store { <8 x i16>, <8 x i16> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int16x8x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int16x8x2_t [[TMP6]] +// int16x8x2_t test_vld2q_s16(int16_t const *a) { +// return vld2q_s16(a); +// } + +// NYI-LABEL: @test_vld2q_s32( +// NYI: [[RETVAL:%.*]] = alloca %struct.int32x4x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int32x4x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr %a) +// NYI: store { <4 x i32>, <4 x i32> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int32x4x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int32x4x2_t [[TMP6]] +// int32x4x2_t test_vld2q_s32(int32_t const *a) { +// return vld2q_s32(a); +// } + +// NYI-LABEL: @test_vld2q_s64( +// NYI: [[RETVAL:%.*]] = alloca %struct.int64x2x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int64x2x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0(ptr %a) +// NYI: store { <2 x i64>, <2 x i64> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int64x2x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int64x2x2_t [[TMP6]] +// int64x2x2_t test_vld2q_s64(int64_t const *a) { +// return vld2q_s64(a); +// } + +// NYI-LABEL: @test_vld2q_f16( +// NYI: [[RETVAL:%.*]] = alloca %struct.float16x8x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float16x8x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0(ptr %a) +// NYI: store { <8 x half>, <8 x half> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float16x8x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float16x8x2_t [[TMP6]] +// float16x8x2_t test_vld2q_f16(float16_t const *a) { +// return vld2q_f16(a); +// } + +// NYI-LABEL: @test_vld2q_f32( +// NYI: [[RETVAL:%.*]] = alloca %struct.float32x4x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float32x4x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr %a) +// NYI: store { <4 x float>, <4 x float> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float32x4x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float32x4x2_t [[TMP6]] +// float32x4x2_t test_vld2q_f32(float32_t const *a) { +// return vld2q_f32(a); +// } + +// NYI-LABEL: @test_vld2q_f64( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x2x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float64x2x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0(ptr %a) +// NYI: store { <2 x double>, <2 x double> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x2x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float64x2x2_t [[TMP6]] +// float64x2x2_t test_vld2q_f64(float64_t const *a) { +// return vld2q_f64(a); +// } + +// NYI-LABEL: @test_vld2q_p8( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly8x16x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.poly8x16x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr %a) +// NYI: store { <16 x i8>, <16 x i8> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.poly8x16x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.poly8x16x2_t [[TMP5]] +// poly8x16x2_t test_vld2q_p8(poly8_t const *a) { +// return vld2q_p8(a); +// } + +// NYI-LABEL: @test_vld2q_p16( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly16x8x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.poly16x8x2_t, align 16 +// NYI: [[VLD2:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr %a) +// NYI: store { <8 x i16>, <8 x i16> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly16x8x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.poly16x8x2_t [[TMP6]] +// poly16x8x2_t test_vld2q_p16(poly16_t const *a) { +// return vld2q_p16(a); +// } + +// NYI-LABEL: @test_vld2_u8( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint8x8x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint8x8x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %a) +// NYI: store { <8 x i8>, <8 x i8> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.uint8x8x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint8x8x2_t [[TMP5]] +// uint8x8x2_t test_vld2_u8(uint8_t const *a) { +// return vld2_u8(a); +// } + +// NYI-LABEL: @test_vld2_u16( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint16x4x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint16x4x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr %a) +// NYI: store { <4 x i16>, <4 x i16> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint16x4x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint16x4x2_t [[TMP6]] +// uint16x4x2_t test_vld2_u16(uint16_t const *a) { +// return vld2_u16(a); +// } + +// NYI-LABEL: @test_vld2_u32( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint32x2x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint32x2x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr %a) +// NYI: store { <2 x i32>, <2 x i32> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint32x2x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint32x2x2_t [[TMP6]] +// uint32x2x2_t test_vld2_u32(uint32_t const *a) { +// return vld2_u32(a); +// } + +// NYI-LABEL: @test_vld2_u64( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint64x1x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint64x1x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0(ptr %a) +// NYI: store { <1 x i64>, <1 x i64> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint64x1x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint64x1x2_t [[TMP6]] +// uint64x1x2_t test_vld2_u64(uint64_t const *a) { +// return vld2_u64(a); +// } + +// NYI-LABEL: @test_vld2_s8( +// NYI: [[RETVAL:%.*]] = alloca %struct.int8x8x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int8x8x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %a) +// NYI: store { <8 x i8>, <8 x i8> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.int8x8x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int8x8x2_t [[TMP5]] +// int8x8x2_t test_vld2_s8(int8_t const *a) { +// return vld2_s8(a); +// } + +// NYI-LABEL: @test_vld2_s16( +// NYI: [[RETVAL:%.*]] = alloca %struct.int16x4x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int16x4x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr %a) +// NYI: store { <4 x i16>, <4 x i16> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int16x4x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int16x4x2_t [[TMP6]] +// int16x4x2_t test_vld2_s16(int16_t const *a) { +// return vld2_s16(a); +// } + +// NYI-LABEL: @test_vld2_s32( +// NYI: [[RETVAL:%.*]] = alloca %struct.int32x2x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int32x2x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr %a) +// NYI: store { <2 x i32>, <2 x i32> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int32x2x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int32x2x2_t [[TMP6]] +// int32x2x2_t test_vld2_s32(int32_t const *a) { +// return vld2_s32(a); +// } + +// NYI-LABEL: @test_vld2_s64( +// NYI: [[RETVAL:%.*]] = alloca %struct.int64x1x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int64x1x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0(ptr %a) +// NYI: store { <1 x i64>, <1 x i64> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int64x1x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int64x1x2_t [[TMP6]] +// int64x1x2_t test_vld2_s64(int64_t const *a) { +// return vld2_s64(a); +// } + +// NYI-LABEL: @test_vld2_f16( +// NYI: [[RETVAL:%.*]] = alloca %struct.float16x4x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float16x4x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0(ptr %a) +// NYI: store { <4 x half>, <4 x half> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float16x4x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float16x4x2_t [[TMP6]] +// float16x4x2_t test_vld2_f16(float16_t const *a) { +// return vld2_f16(a); +// } + +// NYI-LABEL: @test_vld2_f32( +// NYI: [[RETVAL:%.*]] = alloca %struct.float32x2x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float32x2x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0(ptr %a) +// NYI: store { <2 x float>, <2 x float> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float32x2x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float32x2x2_t [[TMP6]] +// float32x2x2_t test_vld2_f32(float32_t const *a) { +// return vld2_f32(a); +// } + +// NYI-LABEL: @test_vld2_f64( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x1x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float64x1x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0(ptr %a) +// NYI: store { <1 x double>, <1 x double> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x1x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float64x1x2_t [[TMP6]] +// float64x1x2_t test_vld2_f64(float64_t const *a) { +// return vld2_f64(a); +// } + +// NYI-LABEL: @test_vld2_p8( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly8x8x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.poly8x8x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %a) +// NYI: store { <8 x i8>, <8 x i8> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.poly8x8x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.poly8x8x2_t [[TMP5]] +// poly8x8x2_t test_vld2_p8(poly8_t const *a) { +// return vld2_p8(a); +// } + +// NYI-LABEL: @test_vld2_p16( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly16x4x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.poly16x4x2_t, align 8 +// NYI: [[VLD2:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr %a) +// NYI: store { <4 x i16>, <4 x i16> } [[VLD2]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly16x4x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.poly16x4x2_t [[TMP6]] +// poly16x4x2_t test_vld2_p16(poly16_t const *a) { +// return vld2_p16(a); +// } + +// NYI-LABEL: @test_vld3q_u8( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint8x16x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint8x16x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr %a) +// NYI: store { <16 x i8>, <16 x i8>, <16 x i8> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.uint8x16x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint8x16x3_t [[TMP5]] +// uint8x16x3_t test_vld3q_u8(uint8_t const *a) { +// return vld3q_u8(a); +// } + +// NYI-LABEL: @test_vld3q_u16( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint16x8x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint16x8x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr %a) +// NYI: store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint16x8x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint16x8x3_t [[TMP6]] +// uint16x8x3_t test_vld3q_u16(uint16_t const *a) { +// return vld3q_u16(a); +// } + +// NYI-LABEL: @test_vld3q_u32( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint32x4x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint32x4x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr %a) +// NYI: store { <4 x i32>, <4 x i32>, <4 x i32> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint32x4x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint32x4x3_t [[TMP6]] +// uint32x4x3_t test_vld3q_u32(uint32_t const *a) { +// return vld3q_u32(a); +// } + +// NYI-LABEL: @test_vld3q_u64( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint64x2x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint64x2x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0(ptr %a) +// NYI: store { <2 x i64>, <2 x i64>, <2 x i64> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint64x2x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint64x2x3_t [[TMP6]] +// uint64x2x3_t test_vld3q_u64(uint64_t const *a) { +// return vld3q_u64(a); +// } + +// NYI-LABEL: @test_vld3q_s8( +// NYI: [[RETVAL:%.*]] = alloca %struct.int8x16x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int8x16x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr %a) +// NYI: store { <16 x i8>, <16 x i8>, <16 x i8> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.int8x16x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int8x16x3_t [[TMP5]] +// int8x16x3_t test_vld3q_s8(int8_t const *a) { +// return vld3q_s8(a); +// } + +// NYI-LABEL: @test_vld3q_s16( +// NYI: [[RETVAL:%.*]] = alloca %struct.int16x8x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int16x8x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr %a) +// NYI: store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int16x8x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int16x8x3_t [[TMP6]] +// int16x8x3_t test_vld3q_s16(int16_t const *a) { +// return vld3q_s16(a); +// } + +// NYI-LABEL: @test_vld3q_s32( +// NYI: [[RETVAL:%.*]] = alloca %struct.int32x4x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int32x4x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr %a) +// NYI: store { <4 x i32>, <4 x i32>, <4 x i32> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int32x4x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int32x4x3_t [[TMP6]] +// int32x4x3_t test_vld3q_s32(int32_t const *a) { +// return vld3q_s32(a); +// } + +// NYI-LABEL: @test_vld3q_s64( +// NYI: [[RETVAL:%.*]] = alloca %struct.int64x2x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int64x2x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0(ptr %a) +// NYI: store { <2 x i64>, <2 x i64>, <2 x i64> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int64x2x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int64x2x3_t [[TMP6]] +// int64x2x3_t test_vld3q_s64(int64_t const *a) { +// return vld3q_s64(a); +// } + +// NYI-LABEL: @test_vld3q_f16( +// NYI: [[RETVAL:%.*]] = alloca %struct.float16x8x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float16x8x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0(ptr %a) +// NYI: store { <8 x half>, <8 x half>, <8 x half> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float16x8x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float16x8x3_t [[TMP6]] +// float16x8x3_t test_vld3q_f16(float16_t const *a) { +// return vld3q_f16(a); +// } + +// NYI-LABEL: @test_vld3q_f32( +// NYI: [[RETVAL:%.*]] = alloca %struct.float32x4x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float32x4x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0(ptr %a) +// NYI: store { <4 x float>, <4 x float>, <4 x float> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float32x4x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float32x4x3_t [[TMP6]] +// float32x4x3_t test_vld3q_f32(float32_t const *a) { +// return vld3q_f32(a); +// } + +// NYI-LABEL: @test_vld3q_f64( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x2x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float64x2x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0(ptr %a) +// NYI: store { <2 x double>, <2 x double>, <2 x double> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x2x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float64x2x3_t [[TMP6]] +// float64x2x3_t test_vld3q_f64(float64_t const *a) { +// return vld3q_f64(a); +// } + +// NYI-LABEL: @test_vld3q_p8( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly8x16x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.poly8x16x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr %a) +// NYI: store { <16 x i8>, <16 x i8>, <16 x i8> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.poly8x16x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.poly8x16x3_t [[TMP5]] +// poly8x16x3_t test_vld3q_p8(poly8_t const *a) { +// return vld3q_p8(a); +// } + +// NYI-LABEL: @test_vld3q_p16( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly16x8x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.poly16x8x3_t, align 16 +// NYI: [[VLD3:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr %a) +// NYI: store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly16x8x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.poly16x8x3_t [[TMP6]] +// poly16x8x3_t test_vld3q_p16(poly16_t const *a) { +// return vld3q_p16(a); +// } + +// NYI-LABEL: @test_vld3_u8( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint8x8x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint8x8x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr %a) +// NYI: store { <8 x i8>, <8 x i8>, <8 x i8> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.uint8x8x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint8x8x3_t [[TMP5]] +// uint8x8x3_t test_vld3_u8(uint8_t const *a) { +// return vld3_u8(a); +// } + +// NYI-LABEL: @test_vld3_u16( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint16x4x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint16x4x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr %a) +// NYI: store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint16x4x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint16x4x3_t [[TMP6]] +// uint16x4x3_t test_vld3_u16(uint16_t const *a) { +// return vld3_u16(a); +// } + +// NYI-LABEL: @test_vld3_u32( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint32x2x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint32x2x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0(ptr %a) +// NYI: store { <2 x i32>, <2 x i32>, <2 x i32> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint32x2x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint32x2x3_t [[TMP6]] +// uint32x2x3_t test_vld3_u32(uint32_t const *a) { +// return vld3_u32(a); +// } + +// NYI-LABEL: @test_vld3_u64( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint64x1x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint64x1x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0(ptr %a) +// NYI: store { <1 x i64>, <1 x i64>, <1 x i64> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint64x1x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint64x1x3_t [[TMP6]] +// uint64x1x3_t test_vld3_u64(uint64_t const *a) { +// return vld3_u64(a); +// } + +// NYI-LABEL: @test_vld3_s8( +// NYI: [[RETVAL:%.*]] = alloca %struct.int8x8x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int8x8x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr %a) +// NYI: store { <8 x i8>, <8 x i8>, <8 x i8> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.int8x8x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int8x8x3_t [[TMP5]] +// int8x8x3_t test_vld3_s8(int8_t const *a) { +// return vld3_s8(a); +// } + +// NYI-LABEL: @test_vld3_s16( +// NYI: [[RETVAL:%.*]] = alloca %struct.int16x4x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int16x4x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr %a) +// NYI: store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int16x4x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int16x4x3_t [[TMP6]] +// int16x4x3_t test_vld3_s16(int16_t const *a) { +// return vld3_s16(a); +// } + +// NYI-LABEL: @test_vld3_s32( +// NYI: [[RETVAL:%.*]] = alloca %struct.int32x2x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int32x2x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0(ptr %a) +// NYI: store { <2 x i32>, <2 x i32>, <2 x i32> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int32x2x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int32x2x3_t [[TMP6]] +// int32x2x3_t test_vld3_s32(int32_t const *a) { +// return vld3_s32(a); +// } + +// NYI-LABEL: @test_vld3_s64( +// NYI: [[RETVAL:%.*]] = alloca %struct.int64x1x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int64x1x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0(ptr %a) +// NYI: store { <1 x i64>, <1 x i64>, <1 x i64> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int64x1x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int64x1x3_t [[TMP6]] +// int64x1x3_t test_vld3_s64(int64_t const *a) { +// return vld3_s64(a); +// } + +// NYI-LABEL: @test_vld3_f16( +// NYI: [[RETVAL:%.*]] = alloca %struct.float16x4x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float16x4x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0(ptr %a) +// NYI: store { <4 x half>, <4 x half>, <4 x half> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float16x4x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float16x4x3_t [[TMP6]] +// float16x4x3_t test_vld3_f16(float16_t const *a) { +// return vld3_f16(a); +// } + +// NYI-LABEL: @test_vld3_f32( +// NYI: [[RETVAL:%.*]] = alloca %struct.float32x2x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float32x2x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0(ptr %a) +// NYI: store { <2 x float>, <2 x float>, <2 x float> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float32x2x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float32x2x3_t [[TMP6]] +// float32x2x3_t test_vld3_f32(float32_t const *a) { +// return vld3_f32(a); +// } + +// NYI-LABEL: @test_vld3_f64( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x1x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float64x1x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0(ptr %a) +// NYI: store { <1 x double>, <1 x double>, <1 x double> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x1x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float64x1x3_t [[TMP6]] +// float64x1x3_t test_vld3_f64(float64_t const *a) { +// return vld3_f64(a); +// } + +// NYI-LABEL: @test_vld3_p8( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly8x8x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.poly8x8x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr %a) +// NYI: store { <8 x i8>, <8 x i8>, <8 x i8> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.poly8x8x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.poly8x8x3_t [[TMP5]] +// poly8x8x3_t test_vld3_p8(poly8_t const *a) { +// return vld3_p8(a); +// } + +// NYI-LABEL: @test_vld3_p16( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly16x4x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.poly16x4x3_t, align 8 +// NYI: [[VLD3:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr %a) +// NYI: store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly16x4x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.poly16x4x3_t [[TMP6]] +// poly16x4x3_t test_vld3_p16(poly16_t const *a) { +// return vld3_p16(a); +// } + +// NYI-LABEL: @test_vld4q_u8( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint8x16x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint8x16x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr %a) +// NYI: store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.uint8x16x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint8x16x4_t [[TMP5]] +// uint8x16x4_t test_vld4q_u8(uint8_t const *a) { +// return vld4q_u8(a); +// } + +// NYI-LABEL: @test_vld4q_u16( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint16x8x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint16x8x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr %a) +// NYI: store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint16x8x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint16x8x4_t [[TMP6]] +// uint16x8x4_t test_vld4q_u16(uint16_t const *a) { +// return vld4q_u16(a); +// } + +// NYI-LABEL: @test_vld4q_u32( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint32x4x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint32x4x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0(ptr %a) +// NYI: store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint32x4x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint32x4x4_t [[TMP6]] +// uint32x4x4_t test_vld4q_u32(uint32_t const *a) { +// return vld4q_u32(a); +// } + +// NYI-LABEL: @test_vld4q_u64( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint64x2x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.uint64x2x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr %a) +// NYI: store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint64x2x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.uint64x2x4_t [[TMP6]] +// uint64x2x4_t test_vld4q_u64(uint64_t const *a) { +// return vld4q_u64(a); +// } + +// NYI-LABEL: @test_vld4q_s8( +// NYI: [[RETVAL:%.*]] = alloca %struct.int8x16x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int8x16x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr %a) +// NYI: store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.int8x16x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int8x16x4_t [[TMP5]] +// int8x16x4_t test_vld4q_s8(int8_t const *a) { +// return vld4q_s8(a); +// } + +// NYI-LABEL: @test_vld4q_s16( +// NYI: [[RETVAL:%.*]] = alloca %struct.int16x8x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int16x8x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr %a) +// NYI: store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int16x8x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int16x8x4_t [[TMP6]] +// int16x8x4_t test_vld4q_s16(int16_t const *a) { +// return vld4q_s16(a); +// } + +// NYI-LABEL: @test_vld4q_s32( +// NYI: [[RETVAL:%.*]] = alloca %struct.int32x4x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int32x4x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0(ptr %a) +// NYI: store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int32x4x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int32x4x4_t [[TMP6]] +// int32x4x4_t test_vld4q_s32(int32_t const *a) { +// return vld4q_s32(a); +// } + +// NYI-LABEL: @test_vld4q_s64( +// NYI: [[RETVAL:%.*]] = alloca %struct.int64x2x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.int64x2x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr %a) +// NYI: store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int64x2x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.int64x2x4_t [[TMP6]] +// int64x2x4_t test_vld4q_s64(int64_t const *a) { +// return vld4q_s64(a); +// } + +// NYI-LABEL: @test_vld4q_f16( +// NYI: [[RETVAL:%.*]] = alloca %struct.float16x8x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float16x8x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0(ptr %a) +// NYI: store { <8 x half>, <8 x half>, <8 x half>, <8 x half> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float16x8x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float16x8x4_t [[TMP6]] +// float16x8x4_t test_vld4q_f16(float16_t const *a) { +// return vld4q_f16(a); +// } + +// NYI-LABEL: @test_vld4q_f32( +// NYI: [[RETVAL:%.*]] = alloca %struct.float32x4x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float32x4x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0(ptr %a) +// NYI: store { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float32x4x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float32x4x4_t [[TMP6]] +// float32x4x4_t test_vld4q_f32(float32_t const *a) { +// return vld4q_f32(a); +// } + +// NYI-LABEL: @test_vld4q_f64( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x2x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float64x2x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0(ptr %a) +// NYI: store { <2 x double>, <2 x double>, <2 x double>, <2 x double> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x2x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float64x2x4_t [[TMP6]] +// float64x2x4_t test_vld4q_f64(float64_t const *a) { +// return vld4q_f64(a); +// } + +// NYI-LABEL: @test_vld4q_p8( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly8x16x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.poly8x16x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr %a) +// NYI: store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.poly8x16x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.poly8x16x4_t [[TMP5]] +// poly8x16x4_t test_vld4q_p8(poly8_t const *a) { +// return vld4q_p8(a); +// } + +// NYI-LABEL: @test_vld4q_p16( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly16x8x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.poly16x8x4_t, align 16 +// NYI: [[VLD4:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr %a) +// NYI: store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly16x8x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.poly16x8x4_t [[TMP6]] +// poly16x8x4_t test_vld4q_p16(poly16_t const *a) { +// return vld4q_p16(a); +// } + +// NYI-LABEL: @test_vld4_u8( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint8x8x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint8x8x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr %a) +// NYI: store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.uint8x8x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint8x8x4_t [[TMP5]] +// uint8x8x4_t test_vld4_u8(uint8_t const *a) { +// return vld4_u8(a); +// } + +// NYI-LABEL: @test_vld4_u16( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint16x4x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint16x4x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr %a) +// NYI: store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint16x4x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint16x4x4_t [[TMP6]] +// uint16x4x4_t test_vld4_u16(uint16_t const *a) { +// return vld4_u16(a); +// } + +// NYI-LABEL: @test_vld4_u32( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint32x2x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint32x2x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0(ptr %a) +// NYI: store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint32x2x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint32x2x4_t [[TMP6]] +// uint32x2x4_t test_vld4_u32(uint32_t const *a) { +// return vld4_u32(a); +// } + +// NYI-LABEL: @test_vld4_u64( +// NYI: [[RETVAL:%.*]] = alloca %struct.uint64x1x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.uint64x1x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0(ptr %a) +// NYI: store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.uint64x1x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.uint64x1x4_t [[TMP6]] +// uint64x1x4_t test_vld4_u64(uint64_t const *a) { +// return vld4_u64(a); +// } + +// NYI-LABEL: @test_vld4_s8( +// NYI: [[RETVAL:%.*]] = alloca %struct.int8x8x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int8x8x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr %a) +// NYI: store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.int8x8x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int8x8x4_t [[TMP5]] +// int8x8x4_t test_vld4_s8(int8_t const *a) { +// return vld4_s8(a); +// } + +// NYI-LABEL: @test_vld4_s16( +// NYI: [[RETVAL:%.*]] = alloca %struct.int16x4x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int16x4x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr %a) +// NYI: store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int16x4x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int16x4x4_t [[TMP6]] +// int16x4x4_t test_vld4_s16(int16_t const *a) { +// return vld4_s16(a); +// } + +// NYI-LABEL: @test_vld4_s32( +// NYI: [[RETVAL:%.*]] = alloca %struct.int32x2x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int32x2x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0(ptr %a) +// NYI: store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int32x2x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int32x2x4_t [[TMP6]] +// int32x2x4_t test_vld4_s32(int32_t const *a) { +// return vld4_s32(a); +// } + +// NYI-LABEL: @test_vld4_s64( +// NYI: [[RETVAL:%.*]] = alloca %struct.int64x1x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.int64x1x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0(ptr %a) +// NYI: store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.int64x1x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.int64x1x4_t [[TMP6]] +// int64x1x4_t test_vld4_s64(int64_t const *a) { +// return vld4_s64(a); +// } + +// NYI-LABEL: @test_vld4_f16( +// NYI: [[RETVAL:%.*]] = alloca %struct.float16x4x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float16x4x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0(ptr %a) +// NYI: store { <4 x half>, <4 x half>, <4 x half>, <4 x half> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float16x4x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float16x4x4_t [[TMP6]] +// float16x4x4_t test_vld4_f16(float16_t const *a) { +// return vld4_f16(a); +// } + +// NYI-LABEL: @test_vld4_f32( +// NYI: [[RETVAL:%.*]] = alloca %struct.float32x2x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float32x2x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0(ptr %a) +// NYI: store { <2 x float>, <2 x float>, <2 x float>, <2 x float> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float32x2x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float32x2x4_t [[TMP6]] +// float32x2x4_t test_vld4_f32(float32_t const *a) { +// return vld4_f32(a); +// } + +// NYI-LABEL: @test_vld4_f64( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x1x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float64x1x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0(ptr %a) +// NYI: store { <1 x double>, <1 x double>, <1 x double>, <1 x double> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x1x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float64x1x4_t [[TMP6]] +// float64x1x4_t test_vld4_f64(float64_t const *a) { +// return vld4_f64(a); +// } + +// NYI-LABEL: @test_vld4_p8( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly8x8x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.poly8x8x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr %a) +// NYI: store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP5:%.*]] = load %struct.poly8x8x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.poly8x8x4_t [[TMP5]] +// poly8x8x4_t test_vld4_p8(poly8_t const *a) { +// return vld4_p8(a); +// } + +// NYI-LABEL: @test_vld4_p16( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly16x4x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.poly16x4x4_t, align 8 +// NYI: [[VLD4:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr %a) +// NYI: store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly16x4x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.poly16x4x4_t [[TMP6]] +// poly16x4x4_t test_vld4_p16(poly16_t const *a) { +// return vld4_p16(a); +// } + +// NYI-LABEL: @test_vst1q_u8( +// NYI: store <16 x i8> %b, ptr %a +// NYI: ret void +// void test_vst1q_u8(uint8_t *a, uint8x16_t b) { +// vst1q_u8(a, b); +// } + +// NYI-LABEL: @test_vst1q_u16( +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: store <8 x i16> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1q_u16(uint16_t *a, uint16x8_t b) { +// vst1q_u16(a, b); +// } + +// NYI-LABEL: @test_vst1q_u32( +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> +// NYI: store <4 x i32> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1q_u32(uint32_t *a, uint32x4_t b) { +// vst1q_u32(a, b); +// } + +// NYI-LABEL: @test_vst1q_u64( +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: store <2 x i64> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1q_u64(uint64_t *a, uint64x2_t b) { +// vst1q_u64(a, b); +// } + +// NYI-LABEL: @test_vst1q_s8( +// NYI: store <16 x i8> %b, ptr %a +// NYI: ret void +// void test_vst1q_s8(int8_t *a, int8x16_t b) { +// vst1q_s8(a, b); +// } + +// NYI-LABEL: @test_vst1q_s16( +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: store <8 x i16> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1q_s16(int16_t *a, int16x8_t b) { +// vst1q_s16(a, b); +// } + +// NYI-LABEL: @test_vst1q_s32( +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> +// NYI: store <4 x i32> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1q_s32(int32_t *a, int32x4_t b) { +// vst1q_s32(a, b); +// } + +// NYI-LABEL: @test_vst1q_s64( +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> +// NYI: store <2 x i64> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1q_s64(int64_t *a, int64x2_t b) { +// vst1q_s64(a, b); +// } + +// NYI-LABEL: @test_vst1q_f16( +// NYI: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> +// NYI: store <8 x half> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1q_f16(float16_t *a, float16x8_t b) { +// vst1q_f16(a, b); +// } + +// NYI-LABEL: @test_vst1q_f32( +// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> +// NYI: store <4 x float> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1q_f32(float32_t *a, float32x4_t b) { +// vst1q_f32(a, b); +// } + +// NYI-LABEL: @test_vst1q_f64( +// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> +// NYI: store <2 x double> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1q_f64(float64_t *a, float64x2_t b) { +// vst1q_f64(a, b); +// } + +// NYI-LABEL: @test_vst1q_p8( +// NYI: store <16 x i8> %b, ptr %a +// NYI: ret void +// void test_vst1q_p8(poly8_t *a, poly8x16_t b) { +// vst1q_p8(a, b); +// } + +// NYI-LABEL: @test_vst1q_p16( +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// NYI: store <8 x i16> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1q_p16(poly16_t *a, poly16x8_t b) { +// vst1q_p16(a, b); +// } + +// NYI-LABEL: @test_vst1_u8( +// NYI: store <8 x i8> %b, ptr %a +// NYI: ret void +// void test_vst1_u8(uint8_t *a, uint8x8_t b) { +// vst1_u8(a, b); +// } + +// NYI-LABEL: @test_vst1_u16( +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: store <4 x i16> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1_u16(uint16_t *a, uint16x4_t b) { +// vst1_u16(a, b); +// } + +// NYI-LABEL: @test_vst1_u32( +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> +// NYI: store <2 x i32> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1_u32(uint32_t *a, uint32x2_t b) { +// vst1_u32(a, b); +// } + +// NYI-LABEL: @test_vst1_u64( +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: store <1 x i64> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1_u64(uint64_t *a, uint64x1_t b) { +// vst1_u64(a, b); +// } + +// NYI-LABEL: @test_vst1_s8( +// NYI: store <8 x i8> %b, ptr %a +// NYI: ret void +// void test_vst1_s8(int8_t *a, int8x8_t b) { +// vst1_s8(a, b); +// } + +// NYI-LABEL: @test_vst1_s16( +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: store <4 x i16> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1_s16(int16_t *a, int16x4_t b) { +// vst1_s16(a, b); +// } + +// NYI-LABEL: @test_vst1_s32( +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> +// NYI: store <2 x i32> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1_s32(int32_t *a, int32x2_t b) { +// vst1_s32(a, b); +// } + +// NYI-LABEL: @test_vst1_s64( +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: store <1 x i64> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1_s64(int64_t *a, int64x1_t b) { +// vst1_s64(a, b); +// } + +// NYI-LABEL: @test_vst1_f16( +// NYI: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> +// NYI: store <4 x half> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1_f16(float16_t *a, float16x4_t b) { +// vst1_f16(a, b); +// } + +// NYI-LABEL: @test_vst1_f32( +// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float> +// NYI: store <2 x float> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1_f32(float32_t *a, float32x2_t b) { +// vst1_f32(a, b); +// } + +// NYI-LABEL: @test_vst1_f64( +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> +// NYI: store <1 x double> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1_f64(float64_t *a, float64x1_t b) { +// vst1_f64(a, b); +// } + +// NYI-LABEL: @test_vst1_p8( +// NYI: store <8 x i8> %b, ptr %a +// NYI: ret void +// void test_vst1_p8(poly8_t *a, poly8x8_t b) { +// vst1_p8(a, b); +// } + +// NYI-LABEL: @test_vst1_p16( +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// NYI: store <4 x i16> [[TMP3]], ptr %a +// NYI: ret void +// void test_vst1_p16(poly16_t *a, poly16x4_t b) { +// vst1_p16(a, b); +// } + +// NYI-LABEL: @test_vst2q_u8( +// NYI: [[B:%.*]] = alloca %struct.uint8x16x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint8x16x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <16 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16 +// NYI: call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], ptr %a) +// NYI: ret void +// void test_vst2q_u8(uint8_t *a, uint8x16x2_t b) { +// vst2q_u8(a, b); +// } + +// NYI-LABEL: @test_vst2q_u16( +// NYI: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <8 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// NYI: call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP7]], <8 x i16> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2q_u16(uint16_t *a, uint16x8x2_t b) { +// vst2q_u16(a, b); +// } + +// NYI-LABEL: @test_vst2q_u32( +// NYI: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <4 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x i32> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32> +// NYI: call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[TMP7]], <4 x i32> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2q_u32(uint32_t *a, uint32x4x2_t b) { +// vst2q_u32(a, b); +// } + +// NYI-LABEL: @test_vst2q_u64( +// NYI: [[B:%.*]] = alloca %struct.uint64x2x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint64x2x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <2 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint64x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint64x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x i64> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x i64> +// NYI: call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[TMP7]], <2 x i64> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2q_u64(uint64_t *a, uint64x2x2_t b) { +// vst2q_u64(a, b); +// } + +// NYI-LABEL: @test_vst2q_s8( +// NYI: [[B:%.*]] = alloca %struct.int8x16x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int8x16x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <16 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int8x16x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int8x16x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16 +// NYI: call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], ptr %a) +// NYI: ret void +// void test_vst2q_s8(int8_t *a, int8x16x2_t b) { +// vst2q_s8(a, b); +// } + +// NYI-LABEL: @test_vst2q_s16( +// NYI: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <8 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// NYI: call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP7]], <8 x i16> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2q_s16(int16_t *a, int16x8x2_t b) { +// vst2q_s16(a, b); +// } + +// NYI-LABEL: @test_vst2q_s32( +// NYI: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <4 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x i32> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32> +// NYI: call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[TMP7]], <4 x i32> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2q_s32(int32_t *a, int32x4x2_t b) { +// vst2q_s32(a, b); +// } + +// NYI-LABEL: @test_vst2q_s64( +// NYI: [[B:%.*]] = alloca %struct.int64x2x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int64x2x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <2 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int64x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int64x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x i64> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x i64> +// NYI: call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[TMP7]], <2 x i64> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2q_s64(int64_t *a, int64x2x2_t b) { +// vst2q_s64(a, b); +// } + +// NYI-LABEL: @test_vst2q_f16( +// NYI: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <8 x half>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x half>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x half>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x half> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x half>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x half> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half> +// NYI: call void @llvm.aarch64.neon.st2.v8f16.p0(<8 x half> [[TMP7]], <8 x half> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2q_f16(float16_t *a, float16x8x2_t b) { +// vst2q_f16(a, b); +// } + +// NYI-LABEL: @test_vst2q_f32( +// NYI: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <4 x float>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x float>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <4 x float> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x float>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <4 x float> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x float> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x float> +// NYI: call void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float> [[TMP7]], <4 x float> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2q_f32(float32_t *a, float32x4x2_t b) { +// vst2q_f32(a, b); +// } + +// NYI-LABEL: @test_vst2q_f64( +// NYI: [[B:%.*]] = alloca %struct.float64x2x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float64x2x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <2 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x double> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x double> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x double> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x double> +// NYI: call void @llvm.aarch64.neon.st2.v2f64.p0(<2 x double> [[TMP7]], <2 x double> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2q_f64(float64_t *a, float64x2x2_t b) { +// vst2q_f64(a, b); +// } + +// NYI-LABEL: @test_vst2q_p8( +// NYI: [[B:%.*]] = alloca %struct.poly8x16x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.poly8x16x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <16 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16 +// NYI: call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], ptr %a) +// NYI: ret void +// void test_vst2q_p8(poly8_t *a, poly8x16x2_t b) { +// vst2q_p8(a, b); +// } + +// NYI-LABEL: @test_vst2q_p16( +// NYI: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <8 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// NYI: call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP7]], <8 x i16> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2q_p16(poly16_t *a, poly16x8x2_t b) { +// vst2q_p16(a, b); +// } + +// NYI-LABEL: @test_vst2_u8( +// NYI: [[B:%.*]] = alloca %struct.uint8x8x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint8x8x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <8 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint8x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint8x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8 +// NYI: call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], ptr %a) +// NYI: ret void +// void test_vst2_u8(uint8_t *a, uint8x8x2_t b) { +// vst2_u8(a, b); +// } + +// NYI-LABEL: @test_vst2_u16( +// NYI: [[B:%.*]] = alloca %struct.uint16x4x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint16x4x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <4 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// NYI: call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP7]], <4 x i16> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2_u16(uint16_t *a, uint16x4x2_t b) { +// vst2_u16(a, b); +// } + +// NYI-LABEL: @test_vst2_u32( +// NYI: [[B:%.*]] = alloca %struct.uint32x2x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint32x2x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <2 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x i32> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x i32> +// NYI: call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[TMP7]], <2 x i32> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2_u32(uint32_t *a, uint32x2x2_t b) { +// vst2_u32(a, b); +// } + +// NYI-LABEL: @test_vst2_u64( +// NYI: [[B:%.*]] = alloca %struct.uint64x1x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint64x1x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <1 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint64x1x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint64x1x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x i64> +// NYI: call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[TMP7]], <1 x i64> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2_u64(uint64_t *a, uint64x1x2_t b) { +// vst2_u64(a, b); +// } + +// NYI-LABEL: @test_vst2_s8( +// NYI: [[B:%.*]] = alloca %struct.int8x8x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int8x8x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <8 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int8x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int8x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8 +// NYI: call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], ptr %a) +// NYI: ret void +// void test_vst2_s8(int8_t *a, int8x8x2_t b) { +// vst2_s8(a, b); +// } + +// NYI-LABEL: @test_vst2_s16( +// NYI: [[B:%.*]] = alloca %struct.int16x4x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int16x4x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <4 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int16x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// NYI: call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP7]], <4 x i16> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2_s16(int16_t *a, int16x4x2_t b) { +// vst2_s16(a, b); +// } + +// NYI-LABEL: @test_vst2_s32( +// NYI: [[B:%.*]] = alloca %struct.int32x2x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int32x2x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <2 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int32x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x i32> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x i32> +// NYI: call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[TMP7]], <2 x i32> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2_s32(int32_t *a, int32x2x2_t b) { +// vst2_s32(a, b); +// } + +// NYI-LABEL: @test_vst2_s64( +// NYI: [[B:%.*]] = alloca %struct.int64x1x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int64x1x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <1 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int64x1x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int64x1x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x i64> +// NYI: call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[TMP7]], <1 x i64> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2_s64(int64_t *a, int64x1x2_t b) { +// vst2_s64(a, b); +// } + +// NYI-LABEL: @test_vst2_f16( +// NYI: [[B:%.*]] = alloca %struct.float16x4x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float16x4x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <4 x half>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float16x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x half>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x half>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x half> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x half>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x half> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half> +// NYI: call void @llvm.aarch64.neon.st2.v4f16.p0(<4 x half> [[TMP7]], <4 x half> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2_f16(float16_t *a, float16x4x2_t b) { +// vst2_f16(a, b); +// } + +// NYI-LABEL: @test_vst2_f32( +// NYI: [[B:%.*]] = alloca %struct.float32x2x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float32x2x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <2 x float>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float32x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x float>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x float>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <2 x float> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x float>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x float>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <2 x float> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x float> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x float> +// NYI: call void @llvm.aarch64.neon.st2.v2f32.p0(<2 x float> [[TMP7]], <2 x float> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2_f32(float32_t *a, float32x2x2_t b) { +// vst2_f32(a, b); +// } + +// NYI-LABEL: @test_vst2_f64( +// NYI: [[B:%.*]] = alloca %struct.float64x1x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float64x1x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <1 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x1x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x double> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x1x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x double> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x double> +// NYI: call void @llvm.aarch64.neon.st2.v1f64.p0(<1 x double> [[TMP7]], <1 x double> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2_f64(float64_t *a, float64x1x2_t b) { +// vst2_f64(a, b); +// } + +// NYI-LABEL: @test_vst2_p8( +// NYI: [[B:%.*]] = alloca %struct.poly8x8x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.poly8x8x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <8 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly8x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly8x8x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8 +// NYI: call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], ptr %a) +// NYI: ret void +// void test_vst2_p8(poly8_t *a, poly8x8x2_t b) { +// vst2_p8(a, b); +// } + +// NYI-LABEL: @test_vst2_p16( +// NYI: [[B:%.*]] = alloca %struct.poly16x4x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.poly16x4x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <4 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x4x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// NYI: call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP7]], <4 x i16> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst2_p16(poly16_t *a, poly16x4x2_t b) { +// vst2_p16(a, b); +// } + +// NYI-LABEL: @test_vst3q_u8( +// NYI: [[B:%.*]] = alloca %struct.uint8x16x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint8x16x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <16 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16 +// NYI: call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], ptr %a) +// NYI: ret void +// void test_vst3q_u8(uint8_t *a, uint8x16x3_t b) { +// vst3q_u8(a, b); +// } + +// NYI-LABEL: @test_vst3q_u16( +// NYI: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <8 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16> +// NYI: call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP9]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3q_u16(uint16_t *a, uint16x8x3_t b) { +// vst3q_u16(a, b); +// } + +// NYI-LABEL: @test_vst3q_u32( +// NYI: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <4 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x i32> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x i32> +// NYI: call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[TMP9]], <4 x i32> [[TMP10]], <4 x i32> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3q_u32(uint32_t *a, uint32x4x3_t b) { +// vst3q_u32(a, b); +// } + +// NYI-LABEL: @test_vst3q_u64( +// NYI: [[B:%.*]] = alloca %struct.uint64x2x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint64x2x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <2 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <2 x i64> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x i64> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x i64> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x i64> +// NYI: call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[TMP9]], <2 x i64> [[TMP10]], <2 x i64> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3q_u64(uint64_t *a, uint64x2x3_t b) { +// vst3q_u64(a, b); +// } + +// NYI-LABEL: @test_vst3q_s8( +// NYI: [[B:%.*]] = alloca %struct.int8x16x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int8x16x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <16 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int8x16x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int8x16x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int8x16x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16 +// NYI: call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], ptr %a) +// NYI: ret void +// void test_vst3q_s8(int8_t *a, int8x16x3_t b) { +// vst3q_s8(a, b); +// } + +// NYI-LABEL: @test_vst3q_s16( +// NYI: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <8 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16> +// NYI: call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP9]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3q_s16(int16_t *a, int16x8x3_t b) { +// vst3q_s16(a, b); +// } + +// NYI-LABEL: @test_vst3q_s32( +// NYI: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <4 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int32x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x i32> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x i32> +// NYI: call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[TMP9]], <4 x i32> [[TMP10]], <4 x i32> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3q_s32(int32_t *a, int32x4x3_t b) { +// vst3q_s32(a, b); +// } + +// NYI-LABEL: @test_vst3q_s64( +// NYI: [[B:%.*]] = alloca %struct.int64x2x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int64x2x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <2 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <2 x i64> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x i64> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x i64> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x i64> +// NYI: call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[TMP9]], <2 x i64> [[TMP10]], <2 x i64> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3q_s64(int64_t *a, int64x2x3_t b) { +// vst3q_s64(a, b); +// } + +// NYI-LABEL: @test_vst3q_f16( +// NYI: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <8 x half>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x half>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x half>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x half> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x half>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x half>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x half> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <8 x half>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <8 x half> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half> +// NYI: call void @llvm.aarch64.neon.st3.v8f16.p0(<8 x half> [[TMP9]], <8 x half> [[TMP10]], <8 x half> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3q_f16(float16_t *a, float16x8x3_t b) { +// vst3q_f16(a, b); +// } + +// NYI-LABEL: @test_vst3q_f32( +// NYI: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <4 x float>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x float>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <4 x float> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x float>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <4 x float> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float32x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x float>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x float>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <4 x float> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x float> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x float> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x float> +// NYI: call void @llvm.aarch64.neon.st3.v4f32.p0(<4 x float> [[TMP9]], <4 x float> [[TMP10]], <4 x float> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3q_f32(float32_t *a, float32x4x3_t b) { +// vst3q_f32(a, b); +// } + +// NYI-LABEL: @test_vst3q_f64( +// NYI: [[B:%.*]] = alloca %struct.float64x2x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float64x2x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <2 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x double> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x double> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <2 x double> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x double> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x double> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x double> +// NYI: call void @llvm.aarch64.neon.st3.v2f64.p0(<2 x double> [[TMP9]], <2 x double> [[TMP10]], <2 x double> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3q_f64(float64_t *a, float64x2x3_t b) { +// vst3q_f64(a, b); +// } + +// NYI-LABEL: @test_vst3q_p8( +// NYI: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.poly8x16x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <16 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16 +// NYI: call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], ptr %a) +// NYI: ret void +// void test_vst3q_p8(poly8_t *a, poly8x16x3_t b) { +// vst3q_p8(a, b); +// } + +// NYI-LABEL: @test_vst3q_p16( +// NYI: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <8 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16> +// NYI: call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP9]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3q_p16(poly16_t *a, poly16x8x3_t b) { +// vst3q_p16(a, b); +// } + +// NYI-LABEL: @test_vst3_u8( +// NYI: [[B:%.*]] = alloca %struct.uint8x8x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint8x8x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <8 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint8x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint8x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint8x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8 +// NYI: call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], <8 x i8> [[TMP4]], ptr %a) +// NYI: ret void +// void test_vst3_u8(uint8_t *a, uint8x8x3_t b) { +// vst3_u8(a, b); +// } + +// NYI-LABEL: @test_vst3_u16( +// NYI: [[B:%.*]] = alloca %struct.uint16x4x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint16x4x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <4 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <4 x i16> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16> +// NYI: call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3_u16(uint16_t *a, uint16x4x3_t b) { +// vst3_u16(a, b); +// } + +// NYI-LABEL: @test_vst3_u32( +// NYI: [[B:%.*]] = alloca %struct.uint32x2x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint32x2x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <2 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint32x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <2 x i32> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x i32> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x i32> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <2 x i32> +// NYI: call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[TMP9]], <2 x i32> [[TMP10]], <2 x i32> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3_u32(uint32_t *a, uint32x2x3_t b) { +// vst3_u32(a, b); +// } + +// NYI-LABEL: @test_vst3_u64( +// NYI: [[B:%.*]] = alloca %struct.uint64x1x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint64x1x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <1 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x i64> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <1 x i64> +// NYI: call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[TMP9]], <1 x i64> [[TMP10]], <1 x i64> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3_u64(uint64_t *a, uint64x1x3_t b) { +// vst3_u64(a, b); +// } + +// NYI-LABEL: @test_vst3_s8( +// NYI: [[B:%.*]] = alloca %struct.int8x8x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int8x8x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <8 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int8x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int8x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int8x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8 +// NYI: call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], <8 x i8> [[TMP4]], ptr %a) +// NYI: ret void +// void test_vst3_s8(int8_t *a, int8x8x3_t b) { +// vst3_s8(a, b); +// } + +// NYI-LABEL: @test_vst3_s16( +// NYI: [[B:%.*]] = alloca %struct.int16x4x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int16x4x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <4 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <4 x i16> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16> +// NYI: call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3_s16(int16_t *a, int16x4x3_t b) { +// vst3_s16(a, b); +// } + +// NYI-LABEL: @test_vst3_s32( +// NYI: [[B:%.*]] = alloca %struct.int32x2x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int32x2x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <2 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int32x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int32x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <2 x i32> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x i32> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x i32> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <2 x i32> +// NYI: call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[TMP9]], <2 x i32> [[TMP10]], <2 x i32> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3_s32(int32_t *a, int32x2x3_t b) { +// vst3_s32(a, b); +// } + +// NYI-LABEL: @test_vst3_s64( +// NYI: [[B:%.*]] = alloca %struct.int64x1x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int64x1x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <1 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x i64> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <1 x i64> +// NYI: call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[TMP9]], <1 x i64> [[TMP10]], <1 x i64> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3_s64(int64_t *a, int64x1x3_t b) { +// vst3_s64(a, b); +// } + +// NYI-LABEL: @test_vst3_f16( +// NYI: [[B:%.*]] = alloca %struct.float16x4x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float16x4x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <4 x half>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x half>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x half>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x half> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x half>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x half>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x half> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x half>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <4 x half> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half> +// NYI: call void @llvm.aarch64.neon.st3.v4f16.p0(<4 x half> [[TMP9]], <4 x half> [[TMP10]], <4 x half> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3_f16(float16_t *a, float16x4x3_t b) { +// vst3_f16(a, b); +// } + +// NYI-LABEL: @test_vst3_f32( +// NYI: [[B:%.*]] = alloca %struct.float32x2x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float32x2x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <2 x float>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float32x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x float>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x float>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <2 x float> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x float>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x float>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <2 x float> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float32x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x float>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x float>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <2 x float> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x float> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x float> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <2 x float> +// NYI: call void @llvm.aarch64.neon.st3.v2f32.p0(<2 x float> [[TMP9]], <2 x float> [[TMP10]], <2 x float> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3_f32(float32_t *a, float32x2x3_t b) { +// vst3_f32(a, b); +// } + +// NYI-LABEL: @test_vst3_f64( +// NYI: [[B:%.*]] = alloca %struct.float64x1x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float64x1x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <1 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x double> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x double> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <1 x double> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x double> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <1 x double> +// NYI: call void @llvm.aarch64.neon.st3.v1f64.p0(<1 x double> [[TMP9]], <1 x double> [[TMP10]], <1 x double> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3_f64(float64_t *a, float64x1x3_t b) { +// vst3_f64(a, b); +// } + +// NYI-LABEL: @test_vst3_p8( +// NYI: [[B:%.*]] = alloca %struct.poly8x8x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.poly8x8x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <8 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly8x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly8x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly8x8x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8 +// NYI: call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], <8 x i8> [[TMP4]], ptr %a) +// NYI: ret void +// void test_vst3_p8(poly8_t *a, poly8x8x3_t b) { +// vst3_p8(a, b); +// } + +// NYI-LABEL: @test_vst3_p16( +// NYI: [[B:%.*]] = alloca %struct.poly16x4x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.poly16x4x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <4 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly16x4x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <4 x i16> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16> +// NYI: call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst3_p16(poly16_t *a, poly16x4x3_t b) { +// vst3_p16(a, b); +// } + +// NYI-LABEL: @test_vst4q_u8( +// NYI: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint8x16x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <16 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP5:%.*]] = load <16 x i8>, ptr [[ARRAYIDX6]], align 16 +// NYI: call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], ptr %a) +// NYI: ret void +// void test_vst4q_u8(uint8_t *a, uint8x16x4_t b) { +// vst4q_u8(a, b); +// } + +// NYI-LABEL: @test_vst4q_u16( +// NYI: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <8 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <8 x i16>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x i16> +// NYI: call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP11]], <8 x i16> [[TMP12]], <8 x i16> [[TMP13]], <8 x i16> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4q_u16(uint16_t *a, uint16x8x4_t b) { +// vst4q_u16(a, b); +// } + +// NYI-LABEL: @test_vst4q_u32( +// NYI: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <4 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <4 x i32>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <4 x i32> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x i32> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x i32> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <4 x i32> +// NYI: call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[TMP11]], <4 x i32> [[TMP12]], <4 x i32> [[TMP13]], <4 x i32> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4q_u32(uint32_t *a, uint32x4x4_t b) { +// vst4q_u32(a, b); +// } + +// NYI-LABEL: @test_vst4q_u64( +// NYI: [[B:%.*]] = alloca %struct.uint64x2x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.uint64x2x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <2 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <2 x i64> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.uint64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <2 x i64>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <2 x i64> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x i64> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x i64> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x i64> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <2 x i64> +// NYI: call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[TMP11]], <2 x i64> [[TMP12]], <2 x i64> [[TMP13]], <2 x i64> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4q_u64(uint64_t *a, uint64x2x4_t b) { +// vst4q_u64(a, b); +// } + +// NYI-LABEL: @test_vst4q_s8( +// NYI: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int8x16x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <16 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.int8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP5:%.*]] = load <16 x i8>, ptr [[ARRAYIDX6]], align 16 +// NYI: call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], ptr %a) +// NYI: ret void +// void test_vst4q_s8(int8_t *a, int8x16x4_t b) { +// vst4q_s8(a, b); +// } + +// NYI-LABEL: @test_vst4q_s16( +// NYI: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <8 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.int16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <8 x i16>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x i16> +// NYI: call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP11]], <8 x i16> [[TMP12]], <8 x i16> [[TMP13]], <8 x i16> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4q_s16(int16_t *a, int16x8x4_t b) { +// vst4q_s16(a, b); +// } + +// NYI-LABEL: @test_vst4q_s32( +// NYI: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <4 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.int32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <4 x i32>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <4 x i32> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x i32> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x i32> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x i32> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <4 x i32> +// NYI: call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[TMP11]], <4 x i32> [[TMP12]], <4 x i32> [[TMP13]], <4 x i32> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4q_s32(int32_t *a, int32x4x4_t b) { +// vst4q_s32(a, b); +// } + +// NYI-LABEL: @test_vst4q_s64( +// NYI: [[B:%.*]] = alloca %struct.int64x2x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.int64x2x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <2 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <2 x i64> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.int64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <2 x i64>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <2 x i64> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x i64> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x i64> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x i64> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <2 x i64> +// NYI: call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[TMP11]], <2 x i64> [[TMP12]], <2 x i64> [[TMP13]], <2 x i64> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4q_s64(int64_t *a, int64x2x4_t b) { +// vst4q_s64(a, b); +// } + +// NYI-LABEL: @test_vst4q_f16( +// NYI: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <8 x half>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x half>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x half> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x half>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x half> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <8 x half>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <8 x half> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.float16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <8 x half>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <8 x half> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x half> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x half> +// NYI: call void @llvm.aarch64.neon.st4.v8f16.p0(<8 x half> [[TMP11]], <8 x half> [[TMP12]], <8 x half> [[TMP13]], <8 x half> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4q_f16(float16_t *a, float16x8x4_t b) { +// vst4q_f16(a, b); +// } + +// NYI-LABEL: @test_vst4q_f32( +// NYI: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <4 x float>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <4 x float> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <4 x float> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x float>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <4 x float> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.float32x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <4 x float>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <4 x float> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x float> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x float> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <4 x float> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <4 x float> +// NYI: call void @llvm.aarch64.neon.st4.v4f32.p0(<4 x float> [[TMP11]], <4 x float> [[TMP12]], <4 x float> [[TMP13]], <4 x float> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4q_f32(float32_t *a, float32x4x4_t b) { +// vst4q_f32(a, b); +// } + +// NYI-LABEL: @test_vst4q_f64( +// NYI: [[B:%.*]] = alloca %struct.float64x2x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float64x2x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <2 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x double> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x double> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <2 x double> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.float64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <2 x double>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <2 x double> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x double> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x double> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x double> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <2 x double> +// NYI: call void @llvm.aarch64.neon.st4.v2f64.p0(<2 x double> [[TMP11]], <2 x double> [[TMP12]], <2 x double> [[TMP13]], <2 x double> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4q_f64(float64_t *a, float64x2x4_t b) { +// vst4q_f64(a, b); +// } + +// NYI-LABEL: @test_vst4q_p8( +// NYI: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.poly8x16x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <16 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP5:%.*]] = load <16 x i8>, ptr [[ARRAYIDX6]], align 16 +// NYI: call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], ptr %a) +// NYI: ret void +// void test_vst4q_p8(poly8_t *a, poly8x16x4_t b) { +// vst4q_p8(a, b); +// } + +// NYI-LABEL: @test_vst4q_p16( +// NYI: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <8 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <8 x i16>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x i16> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x i16> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <8 x i16> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <8 x i16> +// NYI: call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP11]], <8 x i16> [[TMP12]], <8 x i16> [[TMP13]], <8 x i16> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4q_p16(poly16_t *a, poly16x8x4_t b) { +// vst4q_p16(a, b); +// } + +// NYI-LABEL: @test_vst4_u8( +// NYI: [[B:%.*]] = alloca %struct.uint8x8x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint8x8x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <8 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.uint8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP5:%.*]] = load <8 x i8>, ptr [[ARRAYIDX6]], align 8 +// NYI: call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], <8 x i8> [[TMP4]], <8 x i8> [[TMP5]], ptr %a) +// NYI: ret void +// void test_vst4_u8(uint8_t *a, uint8x8x4_t b) { +// vst4_u8(a, b); +// } + +// NYI-LABEL: @test_vst4_u16( +// NYI: [[B:%.*]] = alloca %struct.uint16x4x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint16x4x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <4 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <4 x i16> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.uint16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <4 x i16>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <4 x i16> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16> +// NYI: call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP11]], <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i16> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4_u16(uint16_t *a, uint16x4x4_t b) { +// vst4_u16(a, b); +// } + +// NYI-LABEL: @test_vst4_u32( +// NYI: [[B:%.*]] = alloca %struct.uint32x2x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint32x2x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <2 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <2 x i32> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.uint32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <2 x i32>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <2 x i32> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x i32> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x i32> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <2 x i32> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <2 x i32> +// NYI: call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[TMP11]], <2 x i32> [[TMP12]], <2 x i32> [[TMP13]], <2 x i32> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4_u32(uint32_t *a, uint32x2x4_t b) { +// vst4_u32(a, b); +// } + +// NYI-LABEL: @test_vst4_u64( +// NYI: [[B:%.*]] = alloca %struct.uint64x1x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.uint64x1x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <1 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.uint64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.uint64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.uint64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.uint64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <1 x i64>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x i64> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <1 x i64> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <1 x i64> +// NYI: call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[TMP11]], <1 x i64> [[TMP12]], <1 x i64> [[TMP13]], <1 x i64> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4_u64(uint64_t *a, uint64x1x4_t b) { +// vst4_u64(a, b); +// } + +// NYI-LABEL: @test_vst4_s8( +// NYI: [[B:%.*]] = alloca %struct.int8x8x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int8x8x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <8 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.int8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP5:%.*]] = load <8 x i8>, ptr [[ARRAYIDX6]], align 8 +// NYI: call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], <8 x i8> [[TMP4]], <8 x i8> [[TMP5]], ptr %a) +// NYI: ret void +// void test_vst4_s8(int8_t *a, int8x8x4_t b) { +// vst4_s8(a, b); +// } + +// NYI-LABEL: @test_vst4_s16( +// NYI: [[B:%.*]] = alloca %struct.int16x4x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int16x4x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <4 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <4 x i16> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.int16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <4 x i16>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <4 x i16> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16> +// NYI: call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP11]], <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i16> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4_s16(int16_t *a, int16x4x4_t b) { +// vst4_s16(a, b); +// } + +// NYI-LABEL: @test_vst4_s32( +// NYI: [[B:%.*]] = alloca %struct.int32x2x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int32x2x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <2 x i32>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <2 x i32> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.int32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <2 x i32>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <2 x i32> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x i32> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x i32> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <2 x i32> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <2 x i32> +// NYI: call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[TMP11]], <2 x i32> [[TMP12]], <2 x i32> [[TMP13]], <2 x i32> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4_s32(int32_t *a, int32x2x4_t b) { +// vst4_s32(a, b); +// } + +// NYI-LABEL: @test_vst4_s64( +// NYI: [[B:%.*]] = alloca %struct.int64x1x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.int64x1x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <1 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.int64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.int64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.int64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.int64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <1 x i64>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x i64> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <1 x i64> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <1 x i64> +// NYI: call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[TMP11]], <1 x i64> [[TMP12]], <1 x i64> [[TMP13]], <1 x i64> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4_s64(int64_t *a, int64x1x4_t b) { +// vst4_s64(a, b); +// } + +// NYI-LABEL: @test_vst4_f16( +// NYI: [[B:%.*]] = alloca %struct.float16x4x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float16x4x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <4 x half>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x half>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x half> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x half>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x half> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x half>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <4 x half> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.float16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <4 x half>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <4 x half> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x half> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x half> +// NYI: call void @llvm.aarch64.neon.st4.v4f16.p0(<4 x half> [[TMP11]], <4 x half> [[TMP12]], <4 x half> [[TMP13]], <4 x half> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4_f16(float16_t *a, float16x4x4_t b) { +// vst4_f16(a, b); +// } + +// NYI-LABEL: @test_vst4_f32( +// NYI: [[B:%.*]] = alloca %struct.float32x2x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float32x2x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <2 x float>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x float>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <2 x float> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x float>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <2 x float> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x float>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <2 x float> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.float32x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <2 x float>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <2 x float> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x float> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x float> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <2 x float> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <2 x float> +// NYI: call void @llvm.aarch64.neon.st4.v2f32.p0(<2 x float> [[TMP11]], <2 x float> [[TMP12]], <2 x float> [[TMP13]], <2 x float> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4_f32(float32_t *a, float32x2x4_t b) { +// vst4_f32(a, b); +// } + +// NYI-LABEL: @test_vst4_f64( +// NYI: [[B:%.*]] = alloca %struct.float64x1x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float64x1x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <1 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x double> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x double> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <1 x double> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.float64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <1 x double>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <1 x double> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x double> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <1 x double> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <1 x double> +// NYI: call void @llvm.aarch64.neon.st4.v1f64.p0(<1 x double> [[TMP11]], <1 x double> [[TMP12]], <1 x double> [[TMP13]], <1 x double> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4_f64(float64_t *a, float64x1x4_t b) { +// vst4_f64(a, b); +// } + +// NYI-LABEL: @test_vst4_p8( +// NYI: [[B:%.*]] = alloca %struct.poly8x8x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.poly8x8x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <8 x i8>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP2:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8 +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP3:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP4:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.poly8x8x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP5:%.*]] = load <8 x i8>, ptr [[ARRAYIDX6]], align 8 +// NYI: call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], <8 x i8> [[TMP4]], <8 x i8> [[TMP5]], ptr %a) +// NYI: ret void +// void test_vst4_p8(poly8_t *a, poly8x8x4_t b) { +// vst4_p8(a, b); +// } + +// NYI-LABEL: @test_vst4_p16( +// NYI: [[B:%.*]] = alloca %struct.poly16x4x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.poly16x4x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <4 x i16>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <4 x i16> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.poly16x4x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <4 x i16>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <4 x i16> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <4 x i16> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <4 x i16> +// NYI: call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP11]], <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i16> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst4_p16(poly16_t *a, poly16x4x4_t b) { +// vst4_p16(a, b); +// } + +// NYI-LABEL: @test_vld1q_f64_x2( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x2x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float64x2x2_t, align 16 +// NYI: [[VLD1XN:%.*]] = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0(ptr %a) +// NYI: store { <2 x double>, <2 x double> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x2x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float64x2x2_t [[TMP6]] +// float64x2x2_t test_vld1q_f64_x2(float64_t const *a) { +// return vld1q_f64_x2(a); +// } + +// NYI-LABEL: @test_vld1q_p64_x2( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.poly64x2x2_t, align 16 +// NYI: [[VLD1XN:%.*]] = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0(ptr %a) +// NYI: store { <2 x i64>, <2 x i64> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly64x2x2_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.poly64x2x2_t [[TMP6]] +// poly64x2x2_t test_vld1q_p64_x2(poly64_t const *a) { +// return vld1q_p64_x2(a); +// } + +// NYI-LABEL: @test_vld1_f64_x2( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x1x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float64x1x2_t, align 8 +// NYI: [[VLD1XN:%.*]] = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0(ptr %a) +// NYI: store { <1 x double>, <1 x double> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x1x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float64x1x2_t [[TMP6]] +// float64x1x2_t test_vld1_f64_x2(float64_t const *a) { +// return vld1_f64_x2(a); +// } + +// NYI-LABEL: @test_vld1_p64_x2( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.poly64x1x2_t, align 8 +// NYI: [[VLD1XN:%.*]] = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0(ptr %a) +// NYI: store { <1 x i64>, <1 x i64> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 16, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly64x1x2_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.poly64x1x2_t [[TMP6]] +// poly64x1x2_t test_vld1_p64_x2(poly64_t const *a) { +// return vld1_p64_x2(a); +// } + +// NYI-LABEL: @test_vld1q_f64_x3( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x2x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float64x2x3_t, align 16 +// NYI: [[VLD1XN:%.*]] = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0(ptr %a) +// NYI: store { <2 x double>, <2 x double>, <2 x double> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x2x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float64x2x3_t [[TMP6]] +// float64x2x3_t test_vld1q_f64_x3(float64_t const *a) { +// return vld1q_f64_x3(a); +// } + +// NYI-LABEL: @test_vld1q_p64_x3( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.poly64x2x3_t, align 16 +// NYI: [[VLD1XN:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0(ptr %a) +// NYI: store { <2 x i64>, <2 x i64>, <2 x i64> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 48, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly64x2x3_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.poly64x2x3_t [[TMP6]] +// poly64x2x3_t test_vld1q_p64_x3(poly64_t const *a) { +// return vld1q_p64_x3(a); +// } + +// NYI-LABEL: @test_vld1_f64_x3( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x1x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float64x1x3_t, align 8 +// NYI: [[VLD1XN:%.*]] = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0(ptr %a) +// NYI: store { <1 x double>, <1 x double>, <1 x double> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x1x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float64x1x3_t [[TMP6]] +// float64x1x3_t test_vld1_f64_x3(float64_t const *a) { +// return vld1_f64_x3(a); +// } + +// NYI-LABEL: @test_vld1_p64_x3( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.poly64x1x3_t, align 8 +// NYI: [[VLD1XN:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0(ptr %a) +// NYI: store { <1 x i64>, <1 x i64>, <1 x i64> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 24, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly64x1x3_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.poly64x1x3_t [[TMP6]] +// poly64x1x3_t test_vld1_p64_x3(poly64_t const *a) { +// return vld1_p64_x3(a); +// } + +// NYI-LABEL: @test_vld1q_f64_x4( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x2x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.float64x2x4_t, align 16 +// NYI: [[VLD1XN:%.*]] = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0(ptr %a) +// NYI: store { <2 x double>, <2 x double>, <2 x double>, <2 x double> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x2x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.float64x2x4_t [[TMP6]] +// float64x2x4_t test_vld1q_f64_x4(float64_t const *a) { +// return vld1q_f64_x4(a); +// } + +// NYI-LABEL: @test_vld1q_p64_x4( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 +// NYI: [[__RET:%.*]] = alloca %struct.poly64x2x4_t, align 16 +// NYI: [[VLD1XN:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0(ptr %a) +// NYI: store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[__RET]], i64 64, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly64x2x4_t, ptr [[RETVAL]], align 16 +// NYI: ret %struct.poly64x2x4_t [[TMP6]] +// poly64x2x4_t test_vld1q_p64_x4(poly64_t const *a) { +// return vld1q_p64_x4(a); +// } + +// NYI-LABEL: @test_vld1_f64_x4( +// NYI: [[RETVAL:%.*]] = alloca %struct.float64x1x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.float64x1x4_t, align 8 +// NYI: [[VLD1XN:%.*]] = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0(ptr %a) +// NYI: store { <1 x double>, <1 x double>, <1 x double>, <1 x double> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.float64x1x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.float64x1x4_t [[TMP6]] +// float64x1x4_t test_vld1_f64_x4(float64_t const *a) { +// return vld1_f64_x4(a); +// } + +// NYI-LABEL: @test_vld1_p64_x4( +// NYI: [[RETVAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 +// NYI: [[__RET:%.*]] = alloca %struct.poly64x1x4_t, align 8 +// NYI: [[VLD1XN:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0(ptr %a) +// NYI: store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } [[VLD1XN]], ptr [[__RET]] +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[__RET]], i64 32, i1 false) +// NYI: [[TMP6:%.*]] = load %struct.poly64x1x4_t, ptr [[RETVAL]], align 8 +// NYI: ret %struct.poly64x1x4_t [[TMP6]] +// poly64x1x4_t test_vld1_p64_x4(poly64_t const *a) { +// return vld1_p64_x4(a); +// } + +// NYI-LABEL: @test_vst1q_f64_x2( +// NYI: [[B:%.*]] = alloca %struct.float64x2x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float64x2x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <2 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x double> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x double> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x double> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x double> +// NYI: call void @llvm.aarch64.neon.st1x2.v2f64.p0(<2 x double> [[TMP7]], <2 x double> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst1q_f64_x2(float64_t *a, float64x2x2_t b) { +// vst1q_f64_x2(a, b); +// } + +// NYI-LABEL: @test_vst1q_p64_x2( +// NYI: [[B:%.*]] = alloca %struct.poly64x2x2_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.poly64x2x2_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <2 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly64x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly64x2x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP5]] to <16 x i8> +// NYI: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x i64> +// NYI: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x i64> +// NYI: call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> [[TMP7]], <2 x i64> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst1q_p64_x2(poly64_t *a, poly64x2x2_t b) { +// vst1q_p64_x2(a, b); +// } + +// NYI-LABEL: @test_vst1_f64_x2( +// NYI: [[B:%.*]] = alloca %struct.float64x1x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float64x1x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <1 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x1x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x double> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x1x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x double> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x double> +// NYI: call void @llvm.aarch64.neon.st1x2.v1f64.p0(<1 x double> [[TMP7]], <1 x double> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst1_f64_x2(float64_t *a, float64x1x2_t b) { +// vst1_f64_x2(a, b); +// } + +// NYI-LABEL: @test_vst1_p64_x2( +// NYI: [[B:%.*]] = alloca %struct.poly64x1x2_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.poly64x1x2_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x2_t, ptr [[B]], i32 0, i32 0 +// NYI: store [2 x <1 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 16, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly64x1x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly64x1x2_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8> +// NYI: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> +// NYI: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x i64> +// NYI: call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> [[TMP7]], <1 x i64> [[TMP8]], ptr %a) +// NYI: ret void +// void test_vst1_p64_x2(poly64_t *a, poly64x1x2_t b) { +// vst1_p64_x2(a, b); +// } + +// NYI-LABEL: @test_vst1q_f64_x3( +// NYI: [[B:%.*]] = alloca %struct.float64x2x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float64x2x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <2 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x double> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x double> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <2 x double> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x double> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x double> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x double> +// NYI: call void @llvm.aarch64.neon.st1x3.v2f64.p0(<2 x double> [[TMP9]], <2 x double> [[TMP10]], <2 x double> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst1q_f64_x3(float64_t *a, float64x2x3_t b) { +// vst1q_f64_x3(a, b); +// } + +// NYI-LABEL: @test_vst1q_p64_x3( +// NYI: [[B:%.*]] = alloca %struct.poly64x2x3_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.poly64x2x3_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <2 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 48, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly64x2x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <2 x i64> [[TMP7]] to <16 x i8> +// NYI: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x i64> +// NYI: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x i64> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x i64> +// NYI: call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> [[TMP9]], <2 x i64> [[TMP10]], <2 x i64> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst1q_p64_x3(poly64_t *a, poly64x2x3_t b) { +// vst1q_p64_x3(a, b); +// } + +// NYI-LABEL: @test_vst1_f64_x3( +// NYI: [[B:%.*]] = alloca %struct.float64x1x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float64x1x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <1 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x double> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x double> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <1 x double> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x double> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <1 x double> +// NYI: call void @llvm.aarch64.neon.st1x3.v1f64.p0(<1 x double> [[TMP9]], <1 x double> [[TMP10]], <1 x double> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst1_f64_x3(float64_t *a, float64x1x3_t b) { +// vst1_f64_x3(a, b); +// } + +// NYI-LABEL: @test_vst1_p64_x3( +// NYI: [[B:%.*]] = alloca %struct.poly64x1x3_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.poly64x1x3_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x3_t, ptr [[B]], i32 0, i32 0 +// NYI: store [3 x <1 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 24, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly64x1x3_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <8 x i8> +// NYI: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> +// NYI: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x i64> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP8]] to <1 x i64> +// NYI: call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> [[TMP9]], <1 x i64> [[TMP10]], <1 x i64> [[TMP11]], ptr %a) +// NYI: ret void +// void test_vst1_p64_x3(poly64_t *a, poly64x1x3_t b) { +// vst1_p64_x3(a, b); +// } + +// NYI-LABEL: @test_vst1q_f64_x4( +// NYI: [[B:%.*]] = alloca %struct.float64x2x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.float64x2x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <2 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x double> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x double> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <2 x double> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.float64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <2 x double>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <2 x double> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x double> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x double> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x double> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <2 x double> +// NYI: call void @llvm.aarch64.neon.st1x4.v2f64.p0(<2 x double> [[TMP11]], <2 x double> [[TMP12]], <2 x double> [[TMP13]], <2 x double> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst1q_f64_x4(float64_t *a, float64x2x4_t b) { +// vst1q_f64_x4(a, b); +// } + +// NYI-LABEL: @test_vst1q_p64_x4( +// NYI: [[B:%.*]] = alloca %struct.poly64x2x4_t, align 16 +// NYI: [[__S1:%.*]] = alloca %struct.poly64x2x4_t, align 16 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <2 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 16 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[__S1]], ptr align 16 [[B]], i64 64, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16 +// NYI: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP3]] to <16 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16 +// NYI: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP5]] to <16 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16 +// NYI: [[TMP8:%.*]] = bitcast <2 x i64> [[TMP7]] to <16 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.poly64x2x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <2 x i64>, ptr [[ARRAYIDX6]], align 16 +// NYI: [[TMP10:%.*]] = bitcast <2 x i64> [[TMP9]] to <16 x i8> +// NYI: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x i64> +// NYI: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP6]] to <2 x i64> +// NYI: [[TMP13:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x i64> +// NYI: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP10]] to <2 x i64> +// NYI: call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> [[TMP11]], <2 x i64> [[TMP12]], <2 x i64> [[TMP13]], <2 x i64> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst1q_p64_x4(poly64_t *a, poly64x2x4_t b) { +// vst1q_p64_x4(a, b); +// } + +// NYI-LABEL: @test_vst1_f64_x4( +// NYI: [[B:%.*]] = alloca %struct.float64x1x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.float64x1x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <1 x double>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.float64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x double> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.float64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x double> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.float64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <1 x double> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.float64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <1 x double>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <1 x double> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x double> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <1 x double> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <1 x double> +// NYI: call void @llvm.aarch64.neon.st1x4.v1f64.p0(<1 x double> [[TMP11]], <1 x double> [[TMP12]], <1 x double> [[TMP13]], <1 x double> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst1_f64_x4(float64_t *a, float64x1x4_t b) { +// vst1_f64_x4(a, b); +// } + +// NYI-LABEL: @test_vst1_p64_x4( +// NYI: [[B:%.*]] = alloca %struct.poly64x1x4_t, align 8 +// NYI: [[__S1:%.*]] = alloca %struct.poly64x1x4_t, align 8 +// NYI: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x4_t, ptr [[B]], i32 0, i32 0 +// NYI: store [4 x <1 x i64>] [[B]].coerce, ptr [[COERCE_DIVE]], align 8 +// NYI: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[__S1]], ptr align 8 [[B]], i64 32, i1 false) +// NYI: [[VAL:%.*]] = getelementptr inbounds %struct.poly64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL]], i64 0, i64 0 +// NYI: [[TMP3:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8 +// NYI: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP3]] to <8 x i8> +// NYI: [[VAL1:%.*]] = getelementptr inbounds %struct.poly64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1 +// NYI: [[TMP5:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8 +// NYI: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8> +// NYI: [[VAL3:%.*]] = getelementptr inbounds %struct.poly64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2 +// NYI: [[TMP7:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8 +// NYI: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <8 x i8> +// NYI: [[VAL5:%.*]] = getelementptr inbounds %struct.poly64x1x4_t, ptr [[__S1]], i32 0, i32 0 +// NYI: [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL5]], i64 0, i64 3 +// NYI: [[TMP9:%.*]] = load <1 x i64>, ptr [[ARRAYIDX6]], align 8 +// NYI: [[TMP10:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8> +// NYI: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> +// NYI: [[TMP12:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x i64> +// NYI: [[TMP13:%.*]] = bitcast <8 x i8> [[TMP8]] to <1 x i64> +// NYI: [[TMP14:%.*]] = bitcast <8 x i8> [[TMP10]] to <1 x i64> +// NYI: call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> [[TMP11]], <1 x i64> [[TMP12]], <1 x i64> [[TMP13]], <1 x i64> [[TMP14]], ptr %a) +// NYI: ret void +// void test_vst1_p64_x4(poly64_t *a, poly64x1x4_t b) { +// vst1_p64_x4(a, b); +// } + +// NYI-LABEL: @test_vceqd_s64( +// NYI: [[TMP0:%.*]] = icmp eq i64 %a, %b +// NYI: [[VCEQD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQD_I]] +// uint64_t test_vceqd_s64(int64_t a, int64_t b) { +// return (uint64_t)vceqd_s64(a, b); +// } + +// NYI-LABEL: @test_vceqd_u64( +// NYI: [[TMP0:%.*]] = icmp eq i64 %a, %b +// NYI: [[VCEQD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQD_I]] +// uint64_t test_vceqd_u64(uint64_t a, uint64_t b) { +// return (int64_t)vceqd_u64(a, b); +// } + +// NYI-LABEL: @test_vceqzd_s64( +// NYI: [[TMP0:%.*]] = icmp eq i64 %a, 0 +// NYI: [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQZ_I]] +// uint64_t test_vceqzd_s64(int64_t a) { +// return (uint64_t)vceqzd_s64(a); +// } + +// NYI-LABEL: @test_vceqzd_u64( +// NYI: [[TMP0:%.*]] = icmp eq i64 %a, 0 +// NYI: [[VCEQZD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQZD_I]] +// int64_t test_vceqzd_u64(int64_t a) { +// return (int64_t)vceqzd_u64(a); +// } + +// NYI-LABEL: @test_vcged_s64( +// NYI: [[TMP0:%.*]] = icmp sge i64 %a, %b +// NYI: [[VCEQD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQD_I]] +// uint64_t test_vcged_s64(int64_t a, int64_t b) { +// return (uint64_t)vcged_s64(a, b); +// } + +// NYI-LABEL: @test_vcged_u64( +// NYI: [[TMP0:%.*]] = icmp uge i64 %a, %b +// NYI: [[VCEQD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQD_I]] +// uint64_t test_vcged_u64(uint64_t a, uint64_t b) { +// return (uint64_t)vcged_u64(a, b); +// } + +// NYI-LABEL: @test_vcgezd_s64( +// NYI: [[TMP0:%.*]] = icmp sge i64 %a, 0 +// NYI: [[VCGEZ_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCGEZ_I]] +// uint64_t test_vcgezd_s64(int64_t a) { +// return (uint64_t)vcgezd_s64(a); +// } + +// NYI-LABEL: @test_vcgtd_s64( +// NYI: [[TMP0:%.*]] = icmp sgt i64 %a, %b +// NYI: [[VCEQD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQD_I]] +// uint64_t test_vcgtd_s64(int64_t a, int64_t b) { +// return (uint64_t)vcgtd_s64(a, b); +// } + +// NYI-LABEL: @test_vcgtd_u64( +// NYI: [[TMP0:%.*]] = icmp ugt i64 %a, %b +// NYI: [[VCEQD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQD_I]] +// uint64_t test_vcgtd_u64(uint64_t a, uint64_t b) { +// return (uint64_t)vcgtd_u64(a, b); +// } + +// NYI-LABEL: @test_vcgtzd_s64( +// NYI: [[TMP0:%.*]] = icmp sgt i64 %a, 0 +// NYI: [[VCGTZ_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCGTZ_I]] +// uint64_t test_vcgtzd_s64(int64_t a) { +// return (uint64_t)vcgtzd_s64(a); +// } + +// NYI-LABEL: @test_vcled_s64( +// NYI: [[TMP0:%.*]] = icmp sle i64 %a, %b +// NYI: [[VCEQD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQD_I]] +// uint64_t test_vcled_s64(int64_t a, int64_t b) { +// return (uint64_t)vcled_s64(a, b); +// } + +// NYI-LABEL: @test_vcled_u64( +// NYI: [[TMP0:%.*]] = icmp ule i64 %a, %b +// NYI: [[VCEQD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQD_I]] +// uint64_t test_vcled_u64(uint64_t a, uint64_t b) { +// return (uint64_t)vcled_u64(a, b); +// } + +// NYI-LABEL: @test_vclezd_s64( +// NYI: [[TMP0:%.*]] = icmp sle i64 %a, 0 +// NYI: [[VCLEZ_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCLEZ_I]] +// uint64_t test_vclezd_s64(int64_t a) { +// return (uint64_t)vclezd_s64(a); +// } + +// NYI-LABEL: @test_vcltd_s64( +// NYI: [[TMP0:%.*]] = icmp slt i64 %a, %b +// NYI: [[VCEQD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQD_I]] +// uint64_t test_vcltd_s64(int64_t a, int64_t b) { +// return (uint64_t)vcltd_s64(a, b); +// } + +// NYI-LABEL: @test_vcltd_u64( +// NYI: [[TMP0:%.*]] = icmp ult i64 %a, %b +// NYI: [[VCEQD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQD_I]] +// uint64_t test_vcltd_u64(uint64_t a, uint64_t b) { +// return (uint64_t)vcltd_u64(a, b); +// } + +// NYI-LABEL: @test_vcltzd_s64( +// NYI: [[TMP0:%.*]] = icmp slt i64 %a, 0 +// NYI: [[VCLTZ_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCLTZ_I]] +// uint64_t test_vcltzd_s64(int64_t a) { +// return (uint64_t)vcltzd_s64(a); +// } + +// NYI-LABEL: @test_vtstd_s64( +// NYI: [[TMP0:%.*]] = and i64 %a, %b +// NYI: [[TMP1:%.*]] = icmp ne i64 [[TMP0]], 0 +// NYI: [[VTSTD_I:%.*]] = sext i1 [[TMP1]] to i64 +// NYI: ret i64 [[VTSTD_I]] +// uint64_t test_vtstd_s64(int64_t a, int64_t b) { +// return (uint64_t)vtstd_s64(a, b); +// } + +// NYI-LABEL: @test_vtstd_u64( +// NYI: [[TMP0:%.*]] = and i64 %a, %b +// NYI: [[TMP1:%.*]] = icmp ne i64 [[TMP0]], 0 +// NYI: [[VTSTD_I:%.*]] = sext i1 [[TMP1]] to i64 +// NYI: ret i64 [[VTSTD_I]] +// uint64_t test_vtstd_u64(uint64_t a, uint64_t b) { +// return (uint64_t)vtstd_u64(a, b); +// } + +// NYI-LABEL: @test_vabsd_s64( +// NYI: [[VABSD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.abs.i64(i64 %a) +// NYI: ret i64 [[VABSD_S64_I]] +// int64_t test_vabsd_s64(int64_t a) { +// return (int64_t)vabsd_s64(a); +// } + +// NYI-LABEL: @test_vqabsb_s8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[VQABSB_S8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqabs.v8i8(<8 x i8> [[TMP0]]) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQABSB_S8_I]], i64 0 +// NYI: ret i8 [[TMP1]] +// int8_t test_vqabsb_s8(int8_t a) { +// return (int8_t)vqabsb_s8(a); +// } + +// NYI-LABEL: @test_vqabsh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[VQABSH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqabs.v4i16(<4 x i16> [[TMP0]]) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQABSH_S16_I]], i64 0 +// NYI: ret i16 [[TMP1]] +// int16_t test_vqabsh_s16(int16_t a) { +// return (int16_t)vqabsh_s16(a); +// } + +// NYI-LABEL: @test_vqabss_s32( +// NYI: [[VQABSS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqabs.i32(i32 %a) +// NYI: ret i32 [[VQABSS_S32_I]] +// int32_t test_vqabss_s32(int32_t a) { +// return (int32_t)vqabss_s32(a); +// } + +// NYI-LABEL: @test_vqabsd_s64( +// NYI: [[VQABSD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.sqabs.i64(i64 %a) +// NYI: ret i64 [[VQABSD_S64_I]] +// int64_t test_vqabsd_s64(int64_t a) { +// return (int64_t)vqabsd_s64(a); +// } + +// NYI-LABEL: @test_vnegd_s64( +// NYI: [[VNEGD_I:%.*]] = sub i64 0, %a +// NYI: ret i64 [[VNEGD_I]] +// int64_t test_vnegd_s64(int64_t a) { +// return (int64_t)vnegd_s64(a); +// } + +// NYI-LABEL: @test_vqnegb_s8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[VQNEGB_S8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqneg.v8i8(<8 x i8> [[TMP0]]) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQNEGB_S8_I]], i64 0 +// NYI: ret i8 [[TMP1]] +// int8_t test_vqnegb_s8(int8_t a) { +// return (int8_t)vqnegb_s8(a); +// } + +// NYI-LABEL: @test_vqnegh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[VQNEGH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqneg.v4i16(<4 x i16> [[TMP0]]) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQNEGH_S16_I]], i64 0 +// NYI: ret i16 [[TMP1]] +// int16_t test_vqnegh_s16(int16_t a) { +// return (int16_t)vqnegh_s16(a); +// } + +// NYI-LABEL: @test_vqnegs_s32( +// NYI: [[VQNEGS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqneg.i32(i32 %a) +// NYI: ret i32 [[VQNEGS_S32_I]] +// int32_t test_vqnegs_s32(int32_t a) { +// return (int32_t)vqnegs_s32(a); +// } + +// NYI-LABEL: @test_vqnegd_s64( +// NYI: [[VQNEGD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.sqneg.i64(i64 %a) +// NYI: ret i64 [[VQNEGD_S64_I]] +// int64_t test_vqnegd_s64(int64_t a) { +// return (int64_t)vqnegd_s64(a); +// } + +// NYI-LABEL: @test_vuqaddb_s8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 %b, i64 0 +// NYI: [[VUQADDB_S8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <8 x i8> [[VUQADDB_S8_I]], i64 0 +// NYI: ret i8 [[TMP2]] +// int8_t test_vuqaddb_s8(int8_t a, uint8_t b) { +// return (int8_t)vuqaddb_s8(a, b); +// } + +// NYI-LABEL: @test_vuqaddh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VUQADDH_S16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VUQADDH_S16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// int16_t test_vuqaddh_s16(int16_t a, uint16_t b) { +// return (int16_t)vuqaddh_s16(a, b); +// } + +// NYI-LABEL: @test_vuqadds_s32( +// NYI: [[VUQADDS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.suqadd.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VUQADDS_S32_I]] +// int32_t test_vuqadds_s32(int32_t a, uint32_t b) { +// return (int32_t)vuqadds_s32(a, b); +// } + +// NYI-LABEL: @test_vuqaddd_s64( +// NYI: [[VUQADDD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.suqadd.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VUQADDD_S64_I]] +// int64_t test_vuqaddd_s64(int64_t a, uint64_t b) { +// return (int64_t)vuqaddd_s64(a, b); +// } + +// NYI-LABEL: @test_vsqaddb_u8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 %b, i64 0 +// NYI: [[VSQADDB_U8_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.usqadd.v8i8(<8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <8 x i8> [[VSQADDB_U8_I]], i64 0 +// NYI: ret i8 [[TMP2]] +// uint8_t test_vsqaddb_u8(uint8_t a, int8_t b) { +// return (uint8_t)vsqaddb_u8(a, b); +// } + +// NYI-LABEL: @test_vsqaddh_u16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VSQADDH_U16_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.usqadd.v4i16(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i16> [[VSQADDH_U16_I]], i64 0 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vsqaddh_u16(uint16_t a, int16_t b) { +// return (uint16_t)vsqaddh_u16(a, b); +// } + +// NYI-LABEL: @test_vsqadds_u32( +// NYI: [[VSQADDS_U32_I:%.*]] = call i32 @llvm.aarch64.neon.usqadd.i32(i32 %a, i32 %b) +// NYI: ret i32 [[VSQADDS_U32_I]] +// uint32_t test_vsqadds_u32(uint32_t a, int32_t b) { +// return (uint32_t)vsqadds_u32(a, b); +// } + +// NYI-LABEL: @test_vsqaddd_u64( +// NYI: [[VSQADDD_U64_I:%.*]] = call i64 @llvm.aarch64.neon.usqadd.i64(i64 %a, i64 %b) +// NYI: ret i64 [[VSQADDD_U64_I]] +// uint64_t test_vsqaddd_u64(uint64_t a, int64_t b) { +// return (uint64_t)vsqaddd_u64(a, b); +// } + +// NYI-LABEL: @test_vqdmlalh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %c, i64 0 +// NYI: [[VQDMLXL_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[LANE0_I:%.*]] = extractelement <4 x i32> [[VQDMLXL_I]], i64 0 +// NYI: [[VQDMLXL1_I:%.*]] = call i32 @llvm.aarch64.neon.sqadd.i32(i32 %a, i32 [[LANE0_I]]) +// NYI: ret i32 [[VQDMLXL1_I]] +// int32_t test_vqdmlalh_s16(int32_t a, int16_t b, int16_t c) { +// return (int32_t)vqdmlalh_s16(a, b, c); +// } + +// NYI-LABEL: @test_vqdmlals_s32( +// NYI: [[VQDMLXL_I:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %b, i32 %c) +// NYI: [[VQDMLXL1_I:%.*]] = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %a, i64 [[VQDMLXL_I]]) +// NYI: ret i64 [[VQDMLXL1_I]] +// int64_t test_vqdmlals_s32(int64_t a, int32_t b, int32_t c) { +// return (int64_t)vqdmlals_s32(a, b, c); +// } + +// NYI-LABEL: @test_vqdmlslh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %c, i64 0 +// NYI: [[VQDMLXL_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[LANE0_I:%.*]] = extractelement <4 x i32> [[VQDMLXL_I]], i64 0 +// NYI: [[VQDMLXL1_I:%.*]] = call i32 @llvm.aarch64.neon.sqsub.i32(i32 %a, i32 [[LANE0_I]]) +// NYI: ret i32 [[VQDMLXL1_I]] +// int32_t test_vqdmlslh_s16(int32_t a, int16_t b, int16_t c) { +// return (int32_t)vqdmlslh_s16(a, b, c); +// } + +// NYI-LABEL: @test_vqdmlsls_s32( +// NYI: [[VQDMLXL_I:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %b, i32 %c) +// NYI: [[VQDMLXL1_I:%.*]] = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %a, i64 [[VQDMLXL_I]]) +// NYI: ret i64 [[VQDMLXL1_I]] +// int64_t test_vqdmlsls_s32(int64_t a, int32_t b, int32_t c) { +// return (int64_t)vqdmlsls_s32(a, b, c); +// } + +// NYI-LABEL: @test_vqdmullh_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 +// NYI: [[VQDMULLH_S16_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[TMP0]], <4 x i16> [[TMP1]]) +// NYI: [[TMP2:%.*]] = extractelement <4 x i32> [[VQDMULLH_S16_I]], i64 0 +// NYI: ret i32 [[TMP2]] +// int32_t test_vqdmullh_s16(int16_t a, int16_t b) { +// return (int32_t)vqdmullh_s16(a, b); +// } + +// NYI-LABEL: @test_vqdmulls_s32( +// NYI: [[VQDMULLS_S32_I:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %a, i32 %b) +// NYI: ret i64 [[VQDMULLS_S32_I]] +// int64_t test_vqdmulls_s32(int32_t a, int32_t b) { +// return (int64_t)vqdmulls_s32(a, b); +// } + +// NYI-LABEL: @test_vqmovunh_s16( +// NYI: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 %a, i64 0 +// NYI: [[VQMOVUNH_S16_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> [[TMP0]]) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQMOVUNH_S16_I]], i64 0 +// NYI: ret i8 [[TMP1]] +// uint8_t test_vqmovunh_s16(int16_t a) { +// return (uint8_t)vqmovunh_s16(a); +// } + +// NYI-LABEL: @test_vqmovuns_s32( +// NYI: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 %a, i64 0 +// NYI: [[VQMOVUNS_S32_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> [[TMP0]]) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQMOVUNS_S32_I]], i64 0 +// NYI: ret i16 [[TMP1]] +// uint16_t test_vqmovuns_s32(int32_t a) { +// return (uint16_t)vqmovuns_s32(a); +// } + +// NYI-LABEL: @test_vqmovund_s64( +// NYI: [[VQMOVUND_S64_I:%.*]] = call i32 @llvm.aarch64.neon.scalar.sqxtun.i32.i64(i64 %a) +// NYI: ret i32 [[VQMOVUND_S64_I]] +// uint32_t test_vqmovund_s64(int64_t a) { +// return (uint32_t)vqmovund_s64(a); +// } + +// NYI-LABEL: @test_vqmovnh_s16( +// NYI: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 %a, i64 0 +// NYI: [[VQMOVNH_S16_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> [[TMP0]]) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQMOVNH_S16_I]], i64 0 +// NYI: ret i8 [[TMP1]] +// int8_t test_vqmovnh_s16(int16_t a) { +// return (int8_t)vqmovnh_s16(a); +// } + +// NYI-LABEL: @test_vqmovns_s32( +// NYI: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 %a, i64 0 +// NYI: [[VQMOVNS_S32_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> [[TMP0]]) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQMOVNS_S32_I]], i64 0 +// NYI: ret i16 [[TMP1]] +// int16_t test_vqmovns_s32(int32_t a) { +// return (int16_t)vqmovns_s32(a); +// } + +// NYI-LABEL: @test_vqmovnd_s64( +// NYI: [[VQMOVND_S64_I:%.*]] = call i32 @llvm.aarch64.neon.scalar.sqxtn.i32.i64(i64 %a) +// NYI: ret i32 [[VQMOVND_S64_I]] +// int32_t test_vqmovnd_s64(int64_t a) { +// return (int32_t)vqmovnd_s64(a); +// } + +// NYI-LABEL: @test_vqmovnh_u16( +// NYI: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 %a, i64 0 +// NYI: [[VQMOVNH_U16_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> [[TMP0]]) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQMOVNH_U16_I]], i64 0 +// NYI: ret i8 [[TMP1]] +// int8_t test_vqmovnh_u16(int16_t a) { +// return (int8_t)vqmovnh_u16(a); +// } + +// NYI-LABEL: @test_vqmovns_u32( +// NYI: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 %a, i64 0 +// NYI: [[VQMOVNS_U32_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> [[TMP0]]) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQMOVNS_U32_I]], i64 0 +// NYI: ret i16 [[TMP1]] +// int16_t test_vqmovns_u32(int32_t a) { +// return (int16_t)vqmovns_u32(a); +// } + +// NYI-LABEL: @test_vqmovnd_u64( +// NYI: [[VQMOVND_U64_I:%.*]] = call i32 @llvm.aarch64.neon.scalar.uqxtn.i32.i64(i64 %a) +// NYI: ret i32 [[VQMOVND_U64_I]] +// int32_t test_vqmovnd_u64(int64_t a) { +// return (int32_t)vqmovnd_u64(a); +// } + +// NYI-LABEL: @test_vceqs_f32( +// NYI: [[TMP0:%.*]] = fcmp oeq float %a, %b +// NYI: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 +// NYI: ret i32 [[VCMPD_I]] +// uint32_t test_vceqs_f32(float32_t a, float32_t b) { +// return (uint32_t)vceqs_f32(a, b); +// } + +// NYI-LABEL: @test_vceqd_f64( +// NYI: [[TMP0:%.*]] = fcmp oeq double %a, %b +// NYI: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCMPD_I]] +// uint64_t test_vceqd_f64(float64_t a, float64_t b) { +// return (uint64_t)vceqd_f64(a, b); +// } + +// NYI-LABEL: @test_vceqzs_f32( +// NYI: [[TMP0:%.*]] = fcmp oeq float %a, 0.000000e+00 +// NYI: [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i32 +// NYI: ret i32 [[VCEQZ_I]] +// uint32_t test_vceqzs_f32(float32_t a) { +// return (uint32_t)vceqzs_f32(a); +// } + +// NYI-LABEL: @test_vceqzd_f64( +// NYI: [[TMP0:%.*]] = fcmp oeq double %a, 0.000000e+00 +// NYI: [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCEQZ_I]] +// uint64_t test_vceqzd_f64(float64_t a) { +// return (uint64_t)vceqzd_f64(a); +// } + +// NYI-LABEL: @test_vcges_f32( +// NYI: [[TMP0:%.*]] = fcmp oge float %a, %b +// NYI: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 +// NYI: ret i32 [[VCMPD_I]] +// uint32_t test_vcges_f32(float32_t a, float32_t b) { +// return (uint32_t)vcges_f32(a, b); +// } + +// NYI-LABEL: @test_vcged_f64( +// NYI: [[TMP0:%.*]] = fcmp oge double %a, %b +// NYI: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCMPD_I]] +// uint64_t test_vcged_f64(float64_t a, float64_t b) { +// return (uint64_t)vcged_f64(a, b); +// } + +// NYI-LABEL: @test_vcgezs_f32( +// NYI: [[TMP0:%.*]] = fcmp oge float %a, 0.000000e+00 +// NYI: [[VCGEZ_I:%.*]] = sext i1 [[TMP0]] to i32 +// NYI: ret i32 [[VCGEZ_I]] +// uint32_t test_vcgezs_f32(float32_t a) { +// return (uint32_t)vcgezs_f32(a); +// } + +// NYI-LABEL: @test_vcgezd_f64( +// NYI: [[TMP0:%.*]] = fcmp oge double %a, 0.000000e+00 +// NYI: [[VCGEZ_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCGEZ_I]] +// uint64_t test_vcgezd_f64(float64_t a) { +// return (uint64_t)vcgezd_f64(a); +// } + +// NYI-LABEL: @test_vcgts_f32( +// NYI: [[TMP0:%.*]] = fcmp ogt float %a, %b +// NYI: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 +// NYI: ret i32 [[VCMPD_I]] +// uint32_t test_vcgts_f32(float32_t a, float32_t b) { +// return (uint32_t)vcgts_f32(a, b); +// } + +// NYI-LABEL: @test_vcgtd_f64( +// NYI: [[TMP0:%.*]] = fcmp ogt double %a, %b +// NYI: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCMPD_I]] +// uint64_t test_vcgtd_f64(float64_t a, float64_t b) { +// return (uint64_t)vcgtd_f64(a, b); +// } + +// NYI-LABEL: @test_vcgtzs_f32( +// NYI: [[TMP0:%.*]] = fcmp ogt float %a, 0.000000e+00 +// NYI: [[VCGTZ_I:%.*]] = sext i1 [[TMP0]] to i32 +// NYI: ret i32 [[VCGTZ_I]] +// uint32_t test_vcgtzs_f32(float32_t a) { +// return (uint32_t)vcgtzs_f32(a); +// } + +// NYI-LABEL: @test_vcgtzd_f64( +// NYI: [[TMP0:%.*]] = fcmp ogt double %a, 0.000000e+00 +// NYI: [[VCGTZ_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCGTZ_I]] +// uint64_t test_vcgtzd_f64(float64_t a) { +// return (uint64_t)vcgtzd_f64(a); +// } + +// NYI-LABEL: @test_vcles_f32( +// NYI: [[TMP0:%.*]] = fcmp ole float %a, %b +// NYI: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 +// NYI: ret i32 [[VCMPD_I]] +// uint32_t test_vcles_f32(float32_t a, float32_t b) { +// return (uint32_t)vcles_f32(a, b); +// } + +// NYI-LABEL: @test_vcled_f64( +// NYI: [[TMP0:%.*]] = fcmp ole double %a, %b +// NYI: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCMPD_I]] +// uint64_t test_vcled_f64(float64_t a, float64_t b) { +// return (uint64_t)vcled_f64(a, b); +// } + +// NYI-LABEL: @test_vclezs_f32( +// NYI: [[TMP0:%.*]] = fcmp ole float %a, 0.000000e+00 +// NYI: [[VCLEZ_I:%.*]] = sext i1 [[TMP0]] to i32 +// NYI: ret i32 [[VCLEZ_I]] +// uint32_t test_vclezs_f32(float32_t a) { +// return (uint32_t)vclezs_f32(a); +// } + +// NYI-LABEL: @test_vclezd_f64( +// NYI: [[TMP0:%.*]] = fcmp ole double %a, 0.000000e+00 +// NYI: [[VCLEZ_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCLEZ_I]] +// uint64_t test_vclezd_f64(float64_t a) { +// return (uint64_t)vclezd_f64(a); +// } + +// NYI-LABEL: @test_vclts_f32( +// NYI: [[TMP0:%.*]] = fcmp olt float %a, %b +// NYI: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 +// NYI: ret i32 [[VCMPD_I]] +// uint32_t test_vclts_f32(float32_t a, float32_t b) { +// return (uint32_t)vclts_f32(a, b); +// } + +// NYI-LABEL: @test_vcltd_f64( +// NYI: [[TMP0:%.*]] = fcmp olt double %a, %b +// NYI: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCMPD_I]] +// uint64_t test_vcltd_f64(float64_t a, float64_t b) { +// return (uint64_t)vcltd_f64(a, b); +// } + +// NYI-LABEL: @test_vcltzs_f32( +// NYI: [[TMP0:%.*]] = fcmp olt float %a, 0.000000e+00 +// NYI: [[VCLTZ_I:%.*]] = sext i1 [[TMP0]] to i32 +// NYI: ret i32 [[VCLTZ_I]] +// uint32_t test_vcltzs_f32(float32_t a) { +// return (uint32_t)vcltzs_f32(a); +// } + +// NYI-LABEL: @test_vcltzd_f64( +// NYI: [[TMP0:%.*]] = fcmp olt double %a, 0.000000e+00 +// NYI: [[VCLTZ_I:%.*]] = sext i1 [[TMP0]] to i64 +// NYI: ret i64 [[VCLTZ_I]] +// uint64_t test_vcltzd_f64(float64_t a) { +// return (uint64_t)vcltzd_f64(a); +// } + +// NYI-LABEL: @test_vcages_f32( +// NYI: [[VCAGES_F32_I:%.*]] = call i32 @llvm.aarch64.neon.facge.i32.f32(float %a, float %b) +// NYI: ret i32 [[VCAGES_F32_I]] +// uint32_t test_vcages_f32(float32_t a, float32_t b) { +// return (uint32_t)vcages_f32(a, b); +// } + +// NYI-LABEL: @test_vcaged_f64( +// NYI: [[VCAGED_F64_I:%.*]] = call i64 @llvm.aarch64.neon.facge.i64.f64(double %a, double %b) +// NYI: ret i64 [[VCAGED_F64_I]] +// uint64_t test_vcaged_f64(float64_t a, float64_t b) { +// return (uint64_t)vcaged_f64(a, b); +// } + +// NYI-LABEL: @test_vcagts_f32( +// NYI: [[VCAGTS_F32_I:%.*]] = call i32 @llvm.aarch64.neon.facgt.i32.f32(float %a, float %b) +// NYI: ret i32 [[VCAGTS_F32_I]] +// uint32_t test_vcagts_f32(float32_t a, float32_t b) { +// return (uint32_t)vcagts_f32(a, b); +// } + +// NYI-LABEL: @test_vcagtd_f64( +// NYI: [[VCAGTD_F64_I:%.*]] = call i64 @llvm.aarch64.neon.facgt.i64.f64(double %a, double %b) +// NYI: ret i64 [[VCAGTD_F64_I]] +// uint64_t test_vcagtd_f64(float64_t a, float64_t b) { +// return (uint64_t)vcagtd_f64(a, b); +// } + +// NYI-LABEL: @test_vcales_f32( +// NYI: [[VCALES_F32_I:%.*]] = call i32 @llvm.aarch64.neon.facge.i32.f32(float %b, float %a) +// NYI: ret i32 [[VCALES_F32_I]] +// uint32_t test_vcales_f32(float32_t a, float32_t b) { +// return (uint32_t)vcales_f32(a, b); +// } + +// NYI-LABEL: @test_vcaled_f64( +// NYI: [[VCALED_F64_I:%.*]] = call i64 @llvm.aarch64.neon.facge.i64.f64(double %b, double %a) +// NYI: ret i64 [[VCALED_F64_I]] +// uint64_t test_vcaled_f64(float64_t a, float64_t b) { +// return (uint64_t)vcaled_f64(a, b); +// } + +// NYI-LABEL: @test_vcalts_f32( +// NYI: [[VCALTS_F32_I:%.*]] = call i32 @llvm.aarch64.neon.facgt.i32.f32(float %b, float %a) +// NYI: ret i32 [[VCALTS_F32_I]] +// uint32_t test_vcalts_f32(float32_t a, float32_t b) { +// return (uint32_t)vcalts_f32(a, b); +// } + +// NYI-LABEL: @test_vcaltd_f64( +// NYI: [[VCALTD_F64_I:%.*]] = call i64 @llvm.aarch64.neon.facgt.i64.f64(double %b, double %a) +// NYI: ret i64 [[VCALTD_F64_I]] +// uint64_t test_vcaltd_f64(float64_t a, float64_t b) { +// return (uint64_t)vcaltd_f64(a, b); +// } + +// NYI-LABEL: @test_vshrd_n_s64( +// NYI: [[SHRD_N:%.*]] = ashr i64 %a, 1 +// NYI: ret i64 [[SHRD_N]] +// int64_t test_vshrd_n_s64(int64_t a) { +// return (int64_t)vshrd_n_s64(a, 1); +// } + +// NYI-LABEL: @test_vshr_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VSHR_N:%.*]] = ashr <1 x i64> [[TMP1]], +// NYI: ret <1 x i64> [[VSHR_N]] +// int64x1_t test_vshr_n_s64(int64x1_t a) { +// return vshr_n_s64(a, 1); +// } + +// NYI-LABEL: @test_vshrd_n_u64( +// NYI: ret i64 0 +// uint64_t test_vshrd_n_u64(uint64_t a) { +// return (uint64_t)vshrd_n_u64(a, 64); +// } + +// NYI-LABEL: @test_vshrd_n_u64_2( +// NYI: ret i64 0 +// uint64_t test_vshrd_n_u64_2() { +// uint64_t a = UINT64_C(0xf000000000000000); +// return vshrd_n_u64(a, 64); +// } + +// NYI-LABEL: @test_vshr_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VSHR_N:%.*]] = lshr <1 x i64> [[TMP1]], +// NYI: ret <1 x i64> [[VSHR_N]] +// uint64x1_t test_vshr_n_u64(uint64x1_t a) { +// return vshr_n_u64(a, 1); +// } + +// NYI-LABEL: @test_vrshrd_n_s64( +// NYI: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 %a, i64 -63) +// NYI: ret i64 [[VRSHR_N]] +// int64_t test_vrshrd_n_s64(int64_t a) { +// return (int64_t)vrshrd_n_s64(a, 63); +// } + +// NYI-LABEL: @test_vrshr_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> ) +// NYI: ret <1 x i64> [[VRSHR_N1]] +// int64x1_t test_vrshr_n_s64(int64x1_t a) { +// return vrshr_n_s64(a, 1); +// } + +// NYI-LABEL: @test_vrshrd_n_u64( +// NYI: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 %a, i64 -63) +// NYI: ret i64 [[VRSHR_N]] +// uint64_t test_vrshrd_n_u64(uint64_t a) { +// return (uint64_t)vrshrd_n_u64(a, 63); +// } + +// NYI-LABEL: @test_vrshr_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> ) +// NYI: ret <1 x i64> [[VRSHR_N1]] +// uint64x1_t test_vrshr_n_u64(uint64x1_t a) { +// return vrshr_n_u64(a, 1); +// } + +// NYI-LABEL: @test_vsrad_n_s64( +// NYI: [[SHRD_N:%.*]] = ashr i64 %b, 63 +// NYI: [[TMP0:%.*]] = add i64 %a, [[SHRD_N]] +// NYI: ret i64 [[TMP0]] +// int64_t test_vsrad_n_s64(int64_t a, int64_t b) { +// return (int64_t)vsrad_n_s64(a, b, 63); +// } + +// NYI-LABEL: @test_vsra_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: [[VSRA_N:%.*]] = ashr <1 x i64> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <1 x i64> [[TMP2]], [[VSRA_N]] +// NYI: ret <1 x i64> [[TMP4]] +// int64x1_t test_vsra_n_s64(int64x1_t a, int64x1_t b) { +// return vsra_n_s64(a, b, 1); +// } + +// NYI-LABEL: @test_vsrad_n_u64( +// NYI: [[SHRD_N:%.*]] = lshr i64 %b, 63 +// NYI: [[TMP0:%.*]] = add i64 %a, [[SHRD_N]] +// NYI: ret i64 [[TMP0]] +// uint64_t test_vsrad_n_u64(uint64_t a, uint64_t b) { +// return (uint64_t)vsrad_n_u64(a, b, 63); +// } + +// NYI-LABEL: @test_vsrad_n_u64_2( +// NYI: ret i64 %a +// uint64_t test_vsrad_n_u64_2(uint64_t a, uint64_t b) { +// return (uint64_t)vsrad_n_u64(a, b, 64); +// } + +// NYI-LABEL: @test_vsra_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: [[VSRA_N:%.*]] = lshr <1 x i64> [[TMP3]], +// NYI: [[TMP4:%.*]] = add <1 x i64> [[TMP2]], [[VSRA_N]] +// NYI: ret <1 x i64> [[TMP4]] +// uint64x1_t test_vsra_n_u64(uint64x1_t a, uint64x1_t b) { +// return vsra_n_u64(a, b, 1); +// } + +// NYI-LABEL: @test_vrsrad_n_s64( +// NYI: [[TMP0:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 %b, i64 -63) +// NYI: [[TMP1:%.*]] = add i64 %a, [[TMP0]] +// NYI: ret i64 [[TMP1]] +// int64_t test_vrsrad_n_s64(int64_t a, int64_t b) { +// return (int64_t)vrsrad_n_s64(a, b, 63); +// } + +// NYI-LABEL: @test_vrsra_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> ) +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[TMP3:%.*]] = add <1 x i64> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <1 x i64> [[TMP3]] +// int64x1_t test_vrsra_n_s64(int64x1_t a, int64x1_t b) { +// return vrsra_n_s64(a, b, 1); +// } + +// NYI-LABEL: @test_vrsrad_n_u64( +// NYI: [[TMP0:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 %b, i64 -63) +// NYI: [[TMP1:%.*]] = add i64 %a, [[TMP0]] +// NYI: ret i64 [[TMP1]] +// uint64_t test_vrsrad_n_u64(uint64_t a, uint64_t b) { +// return (uint64_t)vrsrad_n_u64(a, b, 63); +// } + +// NYI-LABEL: @test_vrsra_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> ) +// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[TMP3:%.*]] = add <1 x i64> [[TMP2]], [[VRSHR_N1]] +// NYI: ret <1 x i64> [[TMP3]] +// uint64x1_t test_vrsra_n_u64(uint64x1_t a, uint64x1_t b) { +// return vrsra_n_u64(a, b, 1); +// } + +// NYI-LABEL: @test_vshld_n_s64( +// NYI: [[SHLD_N:%.*]] = shl i64 %a, 1 +// NYI: ret i64 [[SHLD_N]] +// int64_t test_vshld_n_s64(int64_t a) { +// return (int64_t)vshld_n_s64(a, 1); +// } + +// NYI-LABEL: @test_vshl_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VSHL_N:%.*]] = shl <1 x i64> [[TMP1]], +// NYI: ret <1 x i64> [[VSHL_N]] +// int64x1_t test_vshl_n_s64(int64x1_t a) { +// return vshl_n_s64(a, 1); +// } + +// NYI-LABEL: @test_vshld_n_u64( +// NYI: [[SHLD_N:%.*]] = shl i64 %a, 63 +// NYI: ret i64 [[SHLD_N]] +// uint64_t test_vshld_n_u64(uint64_t a) { +// return (uint64_t)vshld_n_u64(a, 63); +// } + +// NYI-LABEL: @test_vshl_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VSHL_N:%.*]] = shl <1 x i64> [[TMP1]], +// NYI: ret <1 x i64> [[VSHL_N]] +// uint64x1_t test_vshl_n_u64(uint64x1_t a) { +// return vshl_n_u64(a, 1); +// } + +// NYI-LABEL: @test_vqshlb_n_s8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[VQSHLB_N_S8:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> [[TMP0]], <8 x i8> ) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQSHLB_N_S8]], i64 0 +// NYI: ret i8 [[TMP1]] +// int8_t test_vqshlb_n_s8(int8_t a) { +// return (int8_t)vqshlb_n_s8(a, 7); +// } + +// NYI-LABEL: @test_vqshlh_n_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[VQSHLH_N_S16:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> [[TMP0]], <4 x i16> ) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQSHLH_N_S16]], i64 0 +// NYI: ret i16 [[TMP1]] +// int16_t test_vqshlh_n_s16(int16_t a) { +// return (int16_t)vqshlh_n_s16(a, 15); +// } + +// NYI-LABEL: @test_vqshls_n_s32( +// NYI: [[VQSHLS_N_S32:%.*]] = call i32 @llvm.aarch64.neon.sqshl.i32(i32 %a, i32 31) +// NYI: ret i32 [[VQSHLS_N_S32]] +// int32_t test_vqshls_n_s32(int32_t a) { +// return (int32_t)vqshls_n_s32(a, 31); +// } + +// NYI-LABEL: @test_vqshld_n_s64( +// NYI: [[VQSHL_N:%.*]] = call i64 @llvm.aarch64.neon.sqshl.i64(i64 %a, i64 63) +// NYI: ret i64 [[VQSHL_N]] +// int64_t test_vqshld_n_s64(int64_t a) { +// return (int64_t)vqshld_n_s64(a, 63); +// } + +// NYI-LABEL: @test_vqshl_n_s8( +// NYI: [[VQSHL_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %a, <8 x i8> zeroinitializer) +// NYI: ret <8 x i8> [[VQSHL_N]] +// int8x8_t test_vqshl_n_s8(int8x8_t a) { +// return vqshl_n_s8(a, 0); +// } + +// NYI-LABEL: @test_vqshlq_n_s8( +// NYI: [[VQSHL_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %a, <16 x i8> zeroinitializer) +// NYI: ret <16 x i8> [[VQSHL_N]] +// int8x16_t test_vqshlq_n_s8(int8x16_t a) { +// return vqshlq_n_s8(a, 0); +// } + +// NYI-LABEL: @test_vqshl_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VQSHL_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> [[VQSHL_N]], <4 x i16> zeroinitializer) +// NYI: ret <4 x i16> [[VQSHL_N1]] +// int16x4_t test_vqshl_n_s16(int16x4_t a) { +// return vqshl_n_s16(a, 0); +// } + +// NYI-LABEL: @test_vqshlq_n_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQSHL_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> [[VQSHL_N]], <8 x i16> zeroinitializer) +// NYI: ret <8 x i16> [[VQSHL_N1]] +// int16x8_t test_vqshlq_n_s16(int16x8_t a) { +// return vqshlq_n_s16(a, 0); +// } + +// NYI-LABEL: @test_vqshl_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VQSHL_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> [[VQSHL_N]], <2 x i32> zeroinitializer) +// NYI: ret <2 x i32> [[VQSHL_N1]] +// int32x2_t test_vqshl_n_s32(int32x2_t a) { +// return vqshl_n_s32(a, 0); +// } + +// NYI-LABEL: @test_vqshlq_n_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQSHL_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> [[VQSHL_N]], <4 x i32> zeroinitializer) +// NYI: ret <4 x i32> [[VQSHL_N1]] +// int32x4_t test_vqshlq_n_s32(int32x4_t a) { +// return vqshlq_n_s32(a, 0); +// } + +// NYI-LABEL: @test_vqshlq_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQSHL_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> [[VQSHL_N]], <2 x i64> zeroinitializer) +// NYI: ret <2 x i64> [[VQSHL_N1]] +// int64x2_t test_vqshlq_n_s64(int64x2_t a) { +// return vqshlq_n_s64(a, 0); +// } + +// NYI-LABEL: @test_vqshl_n_u8( +// NYI: [[VQSHL_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %a, <8 x i8> zeroinitializer) +// NYI: ret <8 x i8> [[VQSHL_N]] +// uint8x8_t test_vqshl_n_u8(uint8x8_t a) { +// return vqshl_n_u8(a, 0); +// } + +// NYI-LABEL: @test_vqshlq_n_u8( +// NYI: [[VQSHL_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %a, <16 x i8> zeroinitializer) +// NYI: ret <16 x i8> [[VQSHL_N]] +// uint8x16_t test_vqshlq_n_u8(uint8x16_t a) { +// return vqshlq_n_u8(a, 0); +// } + +// NYI-LABEL: @test_vqshl_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> +// NYI: [[VQSHL_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> [[VQSHL_N]], <4 x i16> zeroinitializer) +// NYI: ret <4 x i16> [[VQSHL_N1]] +// uint16x4_t test_vqshl_n_u16(uint16x4_t a) { +// return vqshl_n_u16(a, 0); +// } + +// NYI-LABEL: @test_vqshlq_n_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> +// NYI: [[VQSHL_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> [[VQSHL_N]], <8 x i16> zeroinitializer) +// NYI: ret <8 x i16> [[VQSHL_N1]] +// uint16x8_t test_vqshlq_n_u16(uint16x8_t a) { +// return vqshlq_n_u16(a, 0); +// } + +// NYI-LABEL: @test_vqshl_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> +// NYI: [[VQSHL_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> [[VQSHL_N]], <2 x i32> zeroinitializer) +// NYI: ret <2 x i32> [[VQSHL_N1]] +// uint32x2_t test_vqshl_n_u32(uint32x2_t a) { +// return vqshl_n_u32(a, 0); +// } + +// NYI-LABEL: @test_vqshlq_n_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> +// NYI: [[VQSHL_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> [[VQSHL_N]], <4 x i32> zeroinitializer) +// NYI: ret <4 x i32> [[VQSHL_N1]] +// uint32x4_t test_vqshlq_n_u32(uint32x4_t a) { +// return vqshlq_n_u32(a, 0); +// } + +// NYI-LABEL: @test_vqshlq_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// NYI: [[VQSHL_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> [[VQSHL_N]], <2 x i64> zeroinitializer) +// NYI: ret <2 x i64> [[VQSHL_N1]] +// uint64x2_t test_vqshlq_n_u64(uint64x2_t a) { +// return vqshlq_n_u64(a, 0); +// } + +// NYI-LABEL: @test_vqshl_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VQSHL_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqshl.v1i64(<1 x i64> [[VQSHL_N]], <1 x i64> ) +// NYI: ret <1 x i64> [[VQSHL_N1]] +// int64x1_t test_vqshl_n_s64(int64x1_t a) { +// return vqshl_n_s64(a, 1); +// } + +// NYI-LABEL: @test_vqshlb_n_u8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[VQSHLB_N_U8:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> [[TMP0]], <8 x i8> ) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQSHLB_N_U8]], i64 0 +// NYI: ret i8 [[TMP1]] +// uint8_t test_vqshlb_n_u8(uint8_t a) { +// return (uint8_t)vqshlb_n_u8(a, 7); +// } + +// NYI-LABEL: @test_vqshlh_n_u16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[VQSHLH_N_U16:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> [[TMP0]], <4 x i16> ) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQSHLH_N_U16]], i64 0 +// NYI: ret i16 [[TMP1]] +// uint16_t test_vqshlh_n_u16(uint16_t a) { +// return (uint16_t)vqshlh_n_u16(a, 15); +// } + +// NYI-LABEL: @test_vqshls_n_u32( +// NYI: [[VQSHLS_N_U32:%.*]] = call i32 @llvm.aarch64.neon.uqshl.i32(i32 %a, i32 31) +// NYI: ret i32 [[VQSHLS_N_U32]] +// uint32_t test_vqshls_n_u32(uint32_t a) { +// return (uint32_t)vqshls_n_u32(a, 31); +// } + +// NYI-LABEL: @test_vqshld_n_u64( +// NYI: [[VQSHL_N:%.*]] = call i64 @llvm.aarch64.neon.uqshl.i64(i64 %a, i64 63) +// NYI: ret i64 [[VQSHL_N]] +// uint64_t test_vqshld_n_u64(uint64_t a) { +// return (uint64_t)vqshld_n_u64(a, 63); +// } + +// NYI-LABEL: @test_vqshl_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VQSHL_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VQSHL_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.uqshl.v1i64(<1 x i64> [[VQSHL_N]], <1 x i64> ) +// NYI: ret <1 x i64> [[VQSHL_N1]] +// uint64x1_t test_vqshl_n_u64(uint64x1_t a) { +// return vqshl_n_u64(a, 1); +// } + +// NYI-LABEL: @test_vqshlub_n_s8( +// NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 +// NYI: [[VQSHLUB_N_S8:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8> [[TMP0]], <8 x i8> ) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQSHLUB_N_S8]], i64 0 +// NYI: ret i8 [[TMP1]] +// int8_t test_vqshlub_n_s8(int8_t a) { +// return (int8_t)vqshlub_n_s8(a, 7); +// } + +// NYI-LABEL: @test_vqshluh_n_s16( +// NYI: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 %a, i64 0 +// NYI: [[VQSHLUH_N_S16:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16> [[TMP0]], <4 x i16> ) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQSHLUH_N_S16]], i64 0 +// NYI: ret i16 [[TMP1]] +// int16_t test_vqshluh_n_s16(int16_t a) { +// return (int16_t)vqshluh_n_s16(a, 15); +// } + +// NYI-LABEL: @test_vqshlus_n_s32( +// NYI: [[VQSHLUS_N_S32:%.*]] = call i32 @llvm.aarch64.neon.sqshlu.i32(i32 %a, i32 31) +// NYI: ret i32 [[VQSHLUS_N_S32]] +// int32_t test_vqshlus_n_s32(int32_t a) { +// return (int32_t)vqshlus_n_s32(a, 31); +// } + +// NYI-LABEL: @test_vqshlud_n_s64( +// NYI: [[VQSHLU_N:%.*]] = call i64 @llvm.aarch64.neon.sqshlu.i64(i64 %a, i64 63) +// NYI: ret i64 [[VQSHLU_N]] +// int64_t test_vqshlud_n_s64(int64_t a) { +// return (int64_t)vqshlud_n_s64(a, 63); +// } + +// NYI-LABEL: @test_vqshlu_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VQSHLU_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VQSHLU_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqshlu.v1i64(<1 x i64> [[VQSHLU_N]], <1 x i64> ) +// NYI: ret <1 x i64> [[VQSHLU_N1]] +// uint64x1_t test_vqshlu_n_s64(int64x1_t a) { +// return vqshlu_n_s64(a, 1); +// } + +// NYI-LABEL: @test_vsrid_n_s64( +// NYI: [[VSRID_N_S64:%.*]] = bitcast i64 %a to <1 x i64> +// NYI: [[VSRID_N_S641:%.*]] = bitcast i64 %b to <1 x i64> +// NYI: [[VSRID_N_S642:%.*]] = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> [[VSRID_N_S64]], <1 x i64> [[VSRID_N_S641]], i32 63) +// NYI: [[VSRID_N_S643:%.*]] = bitcast <1 x i64> [[VSRID_N_S642]] to i64 +// NYI: ret i64 [[VSRID_N_S643]] +// int64_t test_vsrid_n_s64(int64_t a, int64_t b) { +// return (int64_t)vsrid_n_s64(a, b, 63); +// } + +// NYI-LABEL: @test_vsri_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VSRI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: [[VSRI_N2:%.*]] = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> [[VSRI_N]], <1 x i64> [[VSRI_N1]], i32 1) +// NYI: ret <1 x i64> [[VSRI_N2]] +// int64x1_t test_vsri_n_s64(int64x1_t a, int64x1_t b) { +// return vsri_n_s64(a, b, 1); +// } + +// NYI-LABEL: @test_vsrid_n_u64( +// NYI: [[VSRID_N_U64:%.*]] = bitcast i64 %a to <1 x i64> +// NYI: [[VSRID_N_U641:%.*]] = bitcast i64 %b to <1 x i64> +// NYI: [[VSRID_N_U642:%.*]] = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> [[VSRID_N_U64]], <1 x i64> [[VSRID_N_U641]], i32 63) +// NYI: [[VSRID_N_U643:%.*]] = bitcast <1 x i64> [[VSRID_N_U642]] to i64 +// NYI: ret i64 [[VSRID_N_U643]] +// uint64_t test_vsrid_n_u64(uint64_t a, uint64_t b) { +// return (uint64_t)vsrid_n_u64(a, b, 63); +// } + +// NYI-LABEL: @test_vsri_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VSRI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VSRI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: [[VSRI_N2:%.*]] = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> [[VSRI_N]], <1 x i64> [[VSRI_N1]], i32 1) +// NYI: ret <1 x i64> [[VSRI_N2]] +// uint64x1_t test_vsri_n_u64(uint64x1_t a, uint64x1_t b) { +// return vsri_n_u64(a, b, 1); +// } + +// NYI-LABEL: @test_vslid_n_s64( +// NYI: [[VSLID_N_S64:%.*]] = bitcast i64 %a to <1 x i64> +// NYI: [[VSLID_N_S641:%.*]] = bitcast i64 %b to <1 x i64> +// NYI: [[VSLID_N_S642:%.*]] = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> [[VSLID_N_S64]], <1 x i64> [[VSLID_N_S641]], i32 63) +// NYI: [[VSLID_N_S643:%.*]] = bitcast <1 x i64> [[VSLID_N_S642]] to i64 +// NYI: ret i64 [[VSLID_N_S643]] +// int64_t test_vslid_n_s64(int64_t a, int64_t b) { +// return (int64_t)vslid_n_s64(a, b, 63); +// } + +// NYI-LABEL: @test_vsli_n_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VSLI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: [[VSLI_N2:%.*]] = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> [[VSLI_N]], <1 x i64> [[VSLI_N1]], i32 1) +// NYI: ret <1 x i64> [[VSLI_N2]] +// int64x1_t test_vsli_n_s64(int64x1_t a, int64x1_t b) { +// return vsli_n_s64(a, b, 1); +// } + +// NYI-LABEL: @test_vslid_n_u64( +// NYI: [[VSLID_N_U64:%.*]] = bitcast i64 %a to <1 x i64> +// NYI: [[VSLID_N_U641:%.*]] = bitcast i64 %b to <1 x i64> +// NYI: [[VSLID_N_U642:%.*]] = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> [[VSLID_N_U64]], <1 x i64> [[VSLID_N_U641]], i32 63) +// NYI: [[VSLID_N_U643:%.*]] = bitcast <1 x i64> [[VSLID_N_U642]] to i64 +// NYI: ret i64 [[VSLID_N_U643]] +// uint64_t test_vslid_n_u64(uint64_t a, uint64_t b) { +// return (uint64_t)vslid_n_u64(a, b, 63); +// } + +// NYI-LABEL: @test_vsli_n_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VSLI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VSLI_N1:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> +// NYI: [[VSLI_N2:%.*]] = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> [[VSLI_N]], <1 x i64> [[VSLI_N1]], i32 1) +// NYI: ret <1 x i64> [[VSLI_N2]] +// uint64x1_t test_vsli_n_u64(uint64x1_t a, uint64x1_t b) { +// return vsli_n_u64(a, b, 1); +// } + +// NYI-LABEL: @test_vqshrnh_n_s16( +// NYI: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 %a, i64 0 +// NYI: [[VQSHRNH_N_S16:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> [[TMP0]], i32 8) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQSHRNH_N_S16]], i64 0 +// NYI: ret i8 [[TMP1]] +// int8_t test_vqshrnh_n_s16(int16_t a) { +// return (int8_t)vqshrnh_n_s16(a, 8); +// } + +// NYI-LABEL: @test_vqshrns_n_s32( +// NYI: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 %a, i64 0 +// NYI: [[VQSHRNS_N_S32:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> [[TMP0]], i32 16) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQSHRNS_N_S32]], i64 0 +// NYI: ret i16 [[TMP1]] +// int16_t test_vqshrns_n_s32(int32_t a) { +// return (int16_t)vqshrns_n_s32(a, 16); +// } + +// NYI-LABEL: @test_vqshrnd_n_s64( +// NYI: [[VQSHRND_N_S64:%.*]] = call i32 @llvm.aarch64.neon.sqshrn.i32(i64 %a, i32 32) +// NYI: ret i32 [[VQSHRND_N_S64]] +// int32_t test_vqshrnd_n_s64(int64_t a) { +// return (int32_t)vqshrnd_n_s64(a, 32); +// } + +// NYI-LABEL: @test_vqshrnh_n_u16( +// NYI: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 %a, i64 0 +// NYI: [[VQSHRNH_N_U16:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> [[TMP0]], i32 8) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQSHRNH_N_U16]], i64 0 +// NYI: ret i8 [[TMP1]] +// uint8_t test_vqshrnh_n_u16(uint16_t a) { +// return (uint8_t)vqshrnh_n_u16(a, 8); +// } + +// NYI-LABEL: @test_vqshrns_n_u32( +// NYI: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 %a, i64 0 +// NYI: [[VQSHRNS_N_U32:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> [[TMP0]], i32 16) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQSHRNS_N_U32]], i64 0 +// NYI: ret i16 [[TMP1]] +// uint16_t test_vqshrns_n_u32(uint32_t a) { +// return (uint16_t)vqshrns_n_u32(a, 16); +// } + +// NYI-LABEL: @test_vqshrnd_n_u64( +// NYI: [[VQSHRND_N_U64:%.*]] = call i32 @llvm.aarch64.neon.uqshrn.i32(i64 %a, i32 32) +// NYI: ret i32 [[VQSHRND_N_U64]] +// uint32_t test_vqshrnd_n_u64(uint64_t a) { +// return (uint32_t)vqshrnd_n_u64(a, 32); +// } + +// NYI-LABEL: @test_vqrshrnh_n_s16( +// NYI: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 %a, i64 0 +// NYI: [[VQRSHRNH_N_S16:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> [[TMP0]], i32 8) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQRSHRNH_N_S16]], i64 0 +// NYI: ret i8 [[TMP1]] +// int8_t test_vqrshrnh_n_s16(int16_t a) { +// return (int8_t)vqrshrnh_n_s16(a, 8); +// } + +// NYI-LABEL: @test_vqrshrns_n_s32( +// NYI: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 %a, i64 0 +// NYI: [[VQRSHRNS_N_S32:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> [[TMP0]], i32 16) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQRSHRNS_N_S32]], i64 0 +// NYI: ret i16 [[TMP1]] +// int16_t test_vqrshrns_n_s32(int32_t a) { +// return (int16_t)vqrshrns_n_s32(a, 16); +// } + +// NYI-LABEL: @test_vqrshrnd_n_s64( +// NYI: [[VQRSHRND_N_S64:%.*]] = call i32 @llvm.aarch64.neon.sqrshrn.i32(i64 %a, i32 32) +// NYI: ret i32 [[VQRSHRND_N_S64]] +// int32_t test_vqrshrnd_n_s64(int64_t a) { +// return (int32_t)vqrshrnd_n_s64(a, 32); +// } + +// NYI-LABEL: @test_vqrshrnh_n_u16( +// NYI: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 %a, i64 0 +// NYI: [[VQRSHRNH_N_U16:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> [[TMP0]], i32 8) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQRSHRNH_N_U16]], i64 0 +// NYI: ret i8 [[TMP1]] +// uint8_t test_vqrshrnh_n_u16(uint16_t a) { +// return (uint8_t)vqrshrnh_n_u16(a, 8); +// } + +// NYI-LABEL: @test_vqrshrns_n_u32( +// NYI: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 %a, i64 0 +// NYI: [[VQRSHRNS_N_U32:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> [[TMP0]], i32 16) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQRSHRNS_N_U32]], i64 0 +// NYI: ret i16 [[TMP1]] +// uint16_t test_vqrshrns_n_u32(uint32_t a) { +// return (uint16_t)vqrshrns_n_u32(a, 16); +// } + +// NYI-LABEL: @test_vqrshrnd_n_u64( +// NYI: [[VQRSHRND_N_U64:%.*]] = call i32 @llvm.aarch64.neon.uqrshrn.i32(i64 %a, i32 32) +// NYI: ret i32 [[VQRSHRND_N_U64]] +// uint32_t test_vqrshrnd_n_u64(uint64_t a) { +// return (uint32_t)vqrshrnd_n_u64(a, 32); +// } + +// NYI-LABEL: @test_vqshrunh_n_s16( +// NYI: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 %a, i64 0 +// NYI: [[VQSHRUNH_N_S16:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> [[TMP0]], i32 8) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQSHRUNH_N_S16]], i64 0 +// NYI: ret i8 [[TMP1]] +// int8_t test_vqshrunh_n_s16(int16_t a) { +// return (int8_t)vqshrunh_n_s16(a, 8); +// } + +// NYI-LABEL: @test_vqshruns_n_s32( +// NYI: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 %a, i64 0 +// NYI: [[VQSHRUNS_N_S32:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> [[TMP0]], i32 16) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQSHRUNS_N_S32]], i64 0 +// NYI: ret i16 [[TMP1]] +// int16_t test_vqshruns_n_s32(int32_t a) { +// return (int16_t)vqshruns_n_s32(a, 16); +// } + +// NYI-LABEL: @test_vqshrund_n_s64( +// NYI: [[VQSHRUND_N_S64:%.*]] = call i32 @llvm.aarch64.neon.sqshrun.i32(i64 %a, i32 32) +// NYI: ret i32 [[VQSHRUND_N_S64]] +// int32_t test_vqshrund_n_s64(int64_t a) { +// return (int32_t)vqshrund_n_s64(a, 32); +// } + +// NYI-LABEL: @test_vqrshrunh_n_s16( +// NYI: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 %a, i64 0 +// NYI: [[VQRSHRUNH_N_S16:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> [[TMP0]], i32 8) +// NYI: [[TMP1:%.*]] = extractelement <8 x i8> [[VQRSHRUNH_N_S16]], i64 0 +// NYI: ret i8 [[TMP1]] +// uint8_t test_vqrshrunh_n_s16(int16_t a) { +// return (uint8_t)vqrshrunh_n_s16(a, 8); +// } + +// NYI-LABEL: @test_vqrshruns_n_s32( +// NYI: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 %a, i64 0 +// NYI: [[VQRSHRUNS_N_S32:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> [[TMP0]], i32 16) +// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQRSHRUNS_N_S32]], i64 0 +// NYI: ret i16 [[TMP1]] +// uint16_t test_vqrshruns_n_s32(int32_t a) { +// return (uint16_t)vqrshruns_n_s32(a, 16); +// } + +// NYI-LABEL: @test_vqrshrund_n_s64( +// NYI: [[VQRSHRUND_N_S64:%.*]] = call i32 @llvm.aarch64.neon.sqrshrun.i32(i64 %a, i32 32) +// NYI: ret i32 [[VQRSHRUND_N_S64]] +// uint32_t test_vqrshrund_n_s64(int64_t a) { +// return (uint32_t)vqrshrund_n_s64(a, 32); +// } + +// NYI-LABEL: @test_vcvts_n_f32_s32( +// NYI: [[VCVTS_N_F32_S32:%.*]] = call float @llvm.aarch64.neon.vcvtfxs2fp.f32.i32(i32 %a, i32 1) +// NYI: ret float [[VCVTS_N_F32_S32]] +// float32_t test_vcvts_n_f32_s32(int32_t a) { +// return vcvts_n_f32_s32(a, 1); +// } + +// NYI-LABEL: @test_vcvtd_n_f64_s64( +// NYI: [[VCVTD_N_F64_S64:%.*]] = call double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64 %a, i32 1) +// NYI: ret double [[VCVTD_N_F64_S64]] +// float64_t test_vcvtd_n_f64_s64(int64_t a) { +// return vcvtd_n_f64_s64(a, 1); +// } + +// NYI-LABEL: @test_vcvts_n_f32_u32( +// NYI: [[VCVTS_N_F32_U32:%.*]] = call float @llvm.aarch64.neon.vcvtfxu2fp.f32.i32(i32 %a, i32 32) +// NYI: ret float [[VCVTS_N_F32_U32]] +// float32_t test_vcvts_n_f32_u32(uint32_t a) { +// return vcvts_n_f32_u32(a, 32); +// } + +// NYI-LABEL: @test_vcvtd_n_f64_u64( +// NYI: [[VCVTD_N_F64_U64:%.*]] = call double @llvm.aarch64.neon.vcvtfxu2fp.f64.i64(i64 %a, i32 64) +// NYI: ret double [[VCVTD_N_F64_U64]] +// float64_t test_vcvtd_n_f64_u64(uint64_t a) { +// return vcvtd_n_f64_u64(a, 64); +// } + +// NYI-LABEL: @test_vcvts_n_s32_f32( +// NYI: [[VCVTS_N_S32_F32:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f32(float %a, i32 1) +// NYI: ret i32 [[VCVTS_N_S32_F32]] +// int32_t test_vcvts_n_s32_f32(float32_t a) { +// return (int32_t)vcvts_n_s32_f32(a, 1); +// } + +// NYI-LABEL: @test_vcvtd_n_s64_f64( +// NYI: [[VCVTD_N_S64_F64:%.*]] = call i64 @llvm.aarch64.neon.vcvtfp2fxs.i64.f64(double %a, i32 1) +// NYI: ret i64 [[VCVTD_N_S64_F64]] +// int64_t test_vcvtd_n_s64_f64(float64_t a) { +// return (int64_t)vcvtd_n_s64_f64(a, 1); +// } + +// NYI-LABEL: @test_vcvts_n_u32_f32( +// NYI: [[VCVTS_N_U32_F32:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f32(float %a, i32 32) +// NYI: ret i32 [[VCVTS_N_U32_F32]] +// uint32_t test_vcvts_n_u32_f32(float32_t a) { +// return (uint32_t)vcvts_n_u32_f32(a, 32); +// } + +// NYI-LABEL: @test_vcvtd_n_u64_f64( +// NYI: [[VCVTD_N_U64_F64:%.*]] = call i64 @llvm.aarch64.neon.vcvtfp2fxu.i64.f64(double %a, i32 64) +// NYI: ret i64 [[VCVTD_N_U64_F64]] +// uint64_t test_vcvtd_n_u64_f64(float64_t a) { +// return (uint64_t)vcvtd_n_u64_f64(a, 64); +// } + +// NYI-LABEL: @test_vreinterpret_s8_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vreinterpret_s8_s16(int16x4_t a) { +// return vreinterpret_s8_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vreinterpret_s8_s32(int32x2_t a) { +// return vreinterpret_s8_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vreinterpret_s8_s64(int64x1_t a) { +// return vreinterpret_s8_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_u8( +// NYI: ret <8 x i8> %a +// int8x8_t test_vreinterpret_s8_u8(uint8x8_t a) { +// return vreinterpret_s8_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vreinterpret_s8_u16(uint16x4_t a) { +// return vreinterpret_s8_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vreinterpret_s8_u32(uint32x2_t a) { +// return vreinterpret_s8_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vreinterpret_s8_u64(uint64x1_t a) { +// return vreinterpret_s8_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vreinterpret_s8_f16(float16x4_t a) { +// return vreinterpret_s8_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vreinterpret_s8_f32(float32x2_t a) { +// return vreinterpret_s8_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vreinterpret_s8_f64(float64x1_t a) { +// return vreinterpret_s8_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_p8( +// NYI: ret <8 x i8> %a +// int8x8_t test_vreinterpret_s8_p8(poly8x8_t a) { +// return vreinterpret_s8_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vreinterpret_s8_p16(poly16x4_t a) { +// return vreinterpret_s8_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s8_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// int8x8_t test_vreinterpret_s8_p64(poly64x1_t a) { +// return vreinterpret_s8_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_s8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// int16x4_t test_vreinterpret_s16_s8(int8x8_t a) { +// return vreinterpret_s16_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// int16x4_t test_vreinterpret_s16_s32(int32x2_t a) { +// return vreinterpret_s16_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// int16x4_t test_vreinterpret_s16_s64(int64x1_t a) { +// return vreinterpret_s16_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_u8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// int16x4_t test_vreinterpret_s16_u8(uint8x8_t a) { +// return vreinterpret_s16_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_u16( +// NYI: ret <4 x i16> %a +// int16x4_t test_vreinterpret_s16_u16(uint16x4_t a) { +// return vreinterpret_s16_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// int16x4_t test_vreinterpret_s16_u32(uint32x2_t a) { +// return vreinterpret_s16_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// int16x4_t test_vreinterpret_s16_u64(uint64x1_t a) { +// return vreinterpret_s16_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// int16x4_t test_vreinterpret_s16_f16(float16x4_t a) { +// return vreinterpret_s16_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// int16x4_t test_vreinterpret_s16_f32(float32x2_t a) { +// return vreinterpret_s16_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// int16x4_t test_vreinterpret_s16_f64(float64x1_t a) { +// return vreinterpret_s16_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_p8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// int16x4_t test_vreinterpret_s16_p8(poly8x8_t a) { +// return vreinterpret_s16_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_p16( +// NYI: ret <4 x i16> %a +// int16x4_t test_vreinterpret_s16_p16(poly16x4_t a) { +// return vreinterpret_s16_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s16_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// int16x4_t test_vreinterpret_s16_p64(poly64x1_t a) { +// return vreinterpret_s16_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_s8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_s8(int8x8_t a) { +// return vreinterpret_s32_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_s16(int16x4_t a) { +// return vreinterpret_s32_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_s64(int64x1_t a) { +// return vreinterpret_s32_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_u8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_u8(uint8x8_t a) { +// return vreinterpret_s32_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_u16(uint16x4_t a) { +// return vreinterpret_s32_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_u32( +// NYI: ret <2 x i32> %a +// int32x2_t test_vreinterpret_s32_u32(uint32x2_t a) { +// return vreinterpret_s32_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_u64(uint64x1_t a) { +// return vreinterpret_s32_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_f16(float16x4_t a) { +// return vreinterpret_s32_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_f32(float32x2_t a) { +// return vreinterpret_s32_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_f64(float64x1_t a) { +// return vreinterpret_s32_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_p8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_p8(poly8x8_t a) { +// return vreinterpret_s32_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_p16(poly16x4_t a) { +// return vreinterpret_s32_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s32_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// int32x2_t test_vreinterpret_s32_p64(poly64x1_t a) { +// return vreinterpret_s32_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_s8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// int64x1_t test_vreinterpret_s64_s8(int8x8_t a) { +// return vreinterpret_s64_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// int64x1_t test_vreinterpret_s64_s16(int16x4_t a) { +// return vreinterpret_s64_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// int64x1_t test_vreinterpret_s64_s32(int32x2_t a) { +// return vreinterpret_s64_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_u8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// int64x1_t test_vreinterpret_s64_u8(uint8x8_t a) { +// return vreinterpret_s64_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// int64x1_t test_vreinterpret_s64_u16(uint16x4_t a) { +// return vreinterpret_s64_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// int64x1_t test_vreinterpret_s64_u32(uint32x2_t a) { +// return vreinterpret_s64_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_u64( +// NYI: ret <1 x i64> %a +// int64x1_t test_vreinterpret_s64_u64(uint64x1_t a) { +// return vreinterpret_s64_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// int64x1_t test_vreinterpret_s64_f16(float16x4_t a) { +// return vreinterpret_s64_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// int64x1_t test_vreinterpret_s64_f32(float32x2_t a) { +// return vreinterpret_s64_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// int64x1_t test_vreinterpret_s64_f64(float64x1_t a) { +// return vreinterpret_s64_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_p8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// int64x1_t test_vreinterpret_s64_p8(poly8x8_t a) { +// return vreinterpret_s64_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// int64x1_t test_vreinterpret_s64_p16(poly16x4_t a) { +// return vreinterpret_s64_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_s64_p64( +// NYI: ret <1 x i64> %a +// int64x1_t test_vreinterpret_s64_p64(poly64x1_t a) { +// return vreinterpret_s64_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_s8( +// NYI: ret <8 x i8> %a +// uint8x8_t test_vreinterpret_u8_s8(int8x8_t a) { +// return vreinterpret_u8_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vreinterpret_u8_s16(int16x4_t a) { +// return vreinterpret_u8_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vreinterpret_u8_s32(int32x2_t a) { +// return vreinterpret_u8_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vreinterpret_u8_s64(int64x1_t a) { +// return vreinterpret_u8_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vreinterpret_u8_u16(uint16x4_t a) { +// return vreinterpret_u8_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vreinterpret_u8_u32(uint32x2_t a) { +// return vreinterpret_u8_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vreinterpret_u8_u64(uint64x1_t a) { +// return vreinterpret_u8_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vreinterpret_u8_f16(float16x4_t a) { +// return vreinterpret_u8_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vreinterpret_u8_f32(float32x2_t a) { +// return vreinterpret_u8_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vreinterpret_u8_f64(float64x1_t a) { +// return vreinterpret_u8_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_p8( +// NYI: ret <8 x i8> %a +// uint8x8_t test_vreinterpret_u8_p8(poly8x8_t a) { +// return vreinterpret_u8_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vreinterpret_u8_p16(poly16x4_t a) { +// return vreinterpret_u8_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u8_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// uint8x8_t test_vreinterpret_u8_p64(poly64x1_t a) { +// return vreinterpret_u8_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_s8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// uint16x4_t test_vreinterpret_u16_s8(int8x8_t a) { +// return vreinterpret_u16_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_s16( +// NYI: ret <4 x i16> %a +// uint16x4_t test_vreinterpret_u16_s16(int16x4_t a) { +// return vreinterpret_u16_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// uint16x4_t test_vreinterpret_u16_s32(int32x2_t a) { +// return vreinterpret_u16_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// uint16x4_t test_vreinterpret_u16_s64(int64x1_t a) { +// return vreinterpret_u16_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_u8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// uint16x4_t test_vreinterpret_u16_u8(uint8x8_t a) { +// return vreinterpret_u16_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// uint16x4_t test_vreinterpret_u16_u32(uint32x2_t a) { +// return vreinterpret_u16_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// uint16x4_t test_vreinterpret_u16_u64(uint64x1_t a) { +// return vreinterpret_u16_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// uint16x4_t test_vreinterpret_u16_f16(float16x4_t a) { +// return vreinterpret_u16_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// uint16x4_t test_vreinterpret_u16_f32(float32x2_t a) { +// return vreinterpret_u16_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// uint16x4_t test_vreinterpret_u16_f64(float64x1_t a) { +// return vreinterpret_u16_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_p8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// uint16x4_t test_vreinterpret_u16_p8(poly8x8_t a) { +// return vreinterpret_u16_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_p16( +// NYI: ret <4 x i16> %a +// uint16x4_t test_vreinterpret_u16_p16(poly16x4_t a) { +// return vreinterpret_u16_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u16_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// uint16x4_t test_vreinterpret_u16_p64(poly64x1_t a) { +// return vreinterpret_u16_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_s8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_s8(int8x8_t a) { +// return vreinterpret_u32_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_s16(int16x4_t a) { +// return vreinterpret_u32_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_s32( +// NYI: ret <2 x i32> %a +// uint32x2_t test_vreinterpret_u32_s32(int32x2_t a) { +// return vreinterpret_u32_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_s64(int64x1_t a) { +// return vreinterpret_u32_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_u8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_u8(uint8x8_t a) { +// return vreinterpret_u32_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_u16(uint16x4_t a) { +// return vreinterpret_u32_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_u64(uint64x1_t a) { +// return vreinterpret_u32_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_f16(float16x4_t a) { +// return vreinterpret_u32_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_f32(float32x2_t a) { +// return vreinterpret_u32_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_f64(float64x1_t a) { +// return vreinterpret_u32_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_p8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_p8(poly8x8_t a) { +// return vreinterpret_u32_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_p16(poly16x4_t a) { +// return vreinterpret_u32_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u32_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <2 x i32> +// NYI: ret <2 x i32> [[TMP0]] +// uint32x2_t test_vreinterpret_u32_p64(poly64x1_t a) { +// return vreinterpret_u32_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_s8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// uint64x1_t test_vreinterpret_u64_s8(int8x8_t a) { +// return vreinterpret_u64_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// uint64x1_t test_vreinterpret_u64_s16(int16x4_t a) { +// return vreinterpret_u64_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// uint64x1_t test_vreinterpret_u64_s32(int32x2_t a) { +// return vreinterpret_u64_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_s64( +// NYI: ret <1 x i64> %a +// uint64x1_t test_vreinterpret_u64_s64(int64x1_t a) { +// return vreinterpret_u64_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_u8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// uint64x1_t test_vreinterpret_u64_u8(uint8x8_t a) { +// return vreinterpret_u64_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// uint64x1_t test_vreinterpret_u64_u16(uint16x4_t a) { +// return vreinterpret_u64_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// uint64x1_t test_vreinterpret_u64_u32(uint32x2_t a) { +// return vreinterpret_u64_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// uint64x1_t test_vreinterpret_u64_f16(float16x4_t a) { +// return vreinterpret_u64_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// uint64x1_t test_vreinterpret_u64_f32(float32x2_t a) { +// return vreinterpret_u64_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// uint64x1_t test_vreinterpret_u64_f64(float64x1_t a) { +// return vreinterpret_u64_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_p8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// uint64x1_t test_vreinterpret_u64_p8(poly8x8_t a) { +// return vreinterpret_u64_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// uint64x1_t test_vreinterpret_u64_p16(poly16x4_t a) { +// return vreinterpret_u64_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_u64_p64( +// NYI: ret <1 x i64> %a +// uint64x1_t test_vreinterpret_u64_p64(poly64x1_t a) { +// return vreinterpret_u64_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_s8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_s8(int8x8_t a) { +// return vreinterpret_f16_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_s16(int16x4_t a) { +// return vreinterpret_f16_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_s32(int32x2_t a) { +// return vreinterpret_f16_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_s64(int64x1_t a) { +// return vreinterpret_f16_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_u8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_u8(uint8x8_t a) { +// return vreinterpret_f16_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_u16(uint16x4_t a) { +// return vreinterpret_f16_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_u32(uint32x2_t a) { +// return vreinterpret_f16_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_u64(uint64x1_t a) { +// return vreinterpret_f16_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_f32(float32x2_t a) { +// return vreinterpret_f16_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_f64(float64x1_t a) { +// return vreinterpret_f16_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_p8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_p8(poly8x8_t a) { +// return vreinterpret_f16_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_p16(poly16x4_t a) { +// return vreinterpret_f16_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_f16_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x half> +// NYI: ret <4 x half> [[TMP0]] +// float16x4_t test_vreinterpret_f16_p64(poly64x1_t a) { +// return vreinterpret_f16_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_s8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_s8(int8x8_t a) { +// return vreinterpret_f32_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_s16(int16x4_t a) { +// return vreinterpret_f32_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_s32(int32x2_t a) { +// return vreinterpret_f32_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_s64(int64x1_t a) { +// return vreinterpret_f32_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_u8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_u8(uint8x8_t a) { +// return vreinterpret_f32_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_u16(uint16x4_t a) { +// return vreinterpret_f32_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_u32(uint32x2_t a) { +// return vreinterpret_f32_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_u64(uint64x1_t a) { +// return vreinterpret_f32_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_f16(float16x4_t a) { +// return vreinterpret_f32_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_f64(float64x1_t a) { +// return vreinterpret_f32_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_p8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_p8(poly8x8_t a) { +// return vreinterpret_f32_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_p16(poly16x4_t a) { +// return vreinterpret_f32_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_f32_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <2 x float> +// NYI: ret <2 x float> [[TMP0]] +// float32x2_t test_vreinterpret_f32_p64(poly64x1_t a) { +// return vreinterpret_f32_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_s8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_s8(int8x8_t a) { +// return vreinterpret_f64_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_s16(int16x4_t a) { +// return vreinterpret_f64_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_s32(int32x2_t a) { +// return vreinterpret_f64_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_s64(int64x1_t a) { +// return vreinterpret_f64_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_u8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_u8(uint8x8_t a) { +// return vreinterpret_f64_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_u16(uint16x4_t a) { +// return vreinterpret_f64_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_u32(uint32x2_t a) { +// return vreinterpret_f64_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_u64(uint64x1_t a) { +// return vreinterpret_f64_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_f16(float16x4_t a) { +// return vreinterpret_f64_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_f32(float32x2_t a) { +// return vreinterpret_f64_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_p8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_p8(poly8x8_t a) { +// return vreinterpret_f64_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_p16(poly16x4_t a) { +// return vreinterpret_f64_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_f64_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <1 x double> +// NYI: ret <1 x double> [[TMP0]] +// float64x1_t test_vreinterpret_f64_p64(poly64x1_t a) { +// return vreinterpret_f64_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_s8( +// NYI: ret <8 x i8> %a +// poly8x8_t test_vreinterpret_p8_s8(int8x8_t a) { +// return vreinterpret_p8_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// poly8x8_t test_vreinterpret_p8_s16(int16x4_t a) { +// return vreinterpret_p8_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// poly8x8_t test_vreinterpret_p8_s32(int32x2_t a) { +// return vreinterpret_p8_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// poly8x8_t test_vreinterpret_p8_s64(int64x1_t a) { +// return vreinterpret_p8_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_u8( +// NYI: ret <8 x i8> %a +// poly8x8_t test_vreinterpret_p8_u8(uint8x8_t a) { +// return vreinterpret_p8_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// poly8x8_t test_vreinterpret_p8_u16(uint16x4_t a) { +// return vreinterpret_p8_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// poly8x8_t test_vreinterpret_p8_u32(uint32x2_t a) { +// return vreinterpret_p8_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// poly8x8_t test_vreinterpret_p8_u64(uint64x1_t a) { +// return vreinterpret_p8_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// poly8x8_t test_vreinterpret_p8_f16(float16x4_t a) { +// return vreinterpret_p8_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// poly8x8_t test_vreinterpret_p8_f32(float32x2_t a) { +// return vreinterpret_p8_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// poly8x8_t test_vreinterpret_p8_f64(float64x1_t a) { +// return vreinterpret_p8_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// poly8x8_t test_vreinterpret_p8_p16(poly16x4_t a) { +// return vreinterpret_p8_p16(a); +// } + +// NYI-LABEL: @test_vreinterpret_p8_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: ret <8 x i8> [[TMP0]] +// poly8x8_t test_vreinterpret_p8_p64(poly64x1_t a) { +// return vreinterpret_p8_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_s8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// poly16x4_t test_vreinterpret_p16_s8(int8x8_t a) { +// return vreinterpret_p16_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_s16( +// NYI: ret <4 x i16> %a +// poly16x4_t test_vreinterpret_p16_s16(int16x4_t a) { +// return vreinterpret_p16_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// poly16x4_t test_vreinterpret_p16_s32(int32x2_t a) { +// return vreinterpret_p16_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// poly16x4_t test_vreinterpret_p16_s64(int64x1_t a) { +// return vreinterpret_p16_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_u8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// poly16x4_t test_vreinterpret_p16_u8(uint8x8_t a) { +// return vreinterpret_p16_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_u16( +// NYI: ret <4 x i16> %a +// poly16x4_t test_vreinterpret_p16_u16(uint16x4_t a) { +// return vreinterpret_p16_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// poly16x4_t test_vreinterpret_p16_u32(uint32x2_t a) { +// return vreinterpret_p16_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// poly16x4_t test_vreinterpret_p16_u64(uint64x1_t a) { +// return vreinterpret_p16_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// poly16x4_t test_vreinterpret_p16_f16(float16x4_t a) { +// return vreinterpret_p16_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// poly16x4_t test_vreinterpret_p16_f32(float32x2_t a) { +// return vreinterpret_p16_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// poly16x4_t test_vreinterpret_p16_f64(float64x1_t a) { +// return vreinterpret_p16_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_p8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// poly16x4_t test_vreinterpret_p16_p8(poly8x8_t a) { +// return vreinterpret_p16_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_p16_p64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <4 x i16> +// NYI: ret <4 x i16> [[TMP0]] +// poly16x4_t test_vreinterpret_p16_p64(poly64x1_t a) { +// return vreinterpret_p16_p64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_s8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// poly64x1_t test_vreinterpret_p64_s8(int8x8_t a) { +// return vreinterpret_p64_s8(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_s16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// poly64x1_t test_vreinterpret_p64_s16(int16x4_t a) { +// return vreinterpret_p64_s16(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_s32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// poly64x1_t test_vreinterpret_p64_s32(int32x2_t a) { +// return vreinterpret_p64_s32(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_s64( +// NYI: ret <1 x i64> %a +// poly64x1_t test_vreinterpret_p64_s64(int64x1_t a) { +// return vreinterpret_p64_s64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_u8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// poly64x1_t test_vreinterpret_p64_u8(uint8x8_t a) { +// return vreinterpret_p64_u8(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// poly64x1_t test_vreinterpret_p64_u16(uint16x4_t a) { +// return vreinterpret_p64_u16(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// poly64x1_t test_vreinterpret_p64_u32(uint32x2_t a) { +// return vreinterpret_p64_u32(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_u64( +// NYI: ret <1 x i64> %a +// poly64x1_t test_vreinterpret_p64_u64(uint64x1_t a) { +// return vreinterpret_p64_u64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_f16( +// NYI: [[TMP0:%.*]] = bitcast <4 x half> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// poly64x1_t test_vreinterpret_p64_f16(float16x4_t a) { +// return vreinterpret_p64_f16(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_f32( +// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// poly64x1_t test_vreinterpret_p64_f32(float32x2_t a) { +// return vreinterpret_p64_f32(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// poly64x1_t test_vreinterpret_p64_f64(float64x1_t a) { +// return vreinterpret_p64_f64(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_p8( +// NYI: [[TMP0:%.*]] = bitcast <8 x i8> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// poly64x1_t test_vreinterpret_p64_p8(poly8x8_t a) { +// return vreinterpret_p64_p8(a); +// } + +// NYI-LABEL: @test_vreinterpret_p64_p16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <1 x i64> +// NYI: ret <1 x i64> [[TMP0]] +// poly64x1_t test_vreinterpret_p64_p16(poly16x4_t a) { +// return vreinterpret_p64_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vreinterpretq_s8_s16(int16x8_t a) { +// return vreinterpretq_s8_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vreinterpretq_s8_s32(int32x4_t a) { +// return vreinterpretq_s8_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vreinterpretq_s8_s64(int64x2_t a) { +// return vreinterpretq_s8_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_u8( +// NYI: ret <16 x i8> %a +// int8x16_t test_vreinterpretq_s8_u8(uint8x16_t a) { +// return vreinterpretq_s8_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vreinterpretq_s8_u16(uint16x8_t a) { +// return vreinterpretq_s8_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vreinterpretq_s8_u32(uint32x4_t a) { +// return vreinterpretq_s8_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vreinterpretq_s8_u64(uint64x2_t a) { +// return vreinterpretq_s8_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vreinterpretq_s8_f16(float16x8_t a) { +// return vreinterpretq_s8_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vreinterpretq_s8_f32(float32x4_t a) { +// return vreinterpretq_s8_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vreinterpretq_s8_f64(float64x2_t a) { +// return vreinterpretq_s8_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_p8( +// NYI: ret <16 x i8> %a +// int8x16_t test_vreinterpretq_s8_p8(poly8x16_t a) { +// return vreinterpretq_s8_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vreinterpretq_s8_p16(poly16x8_t a) { +// return vreinterpretq_s8_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s8_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// int8x16_t test_vreinterpretq_s8_p64(poly64x2_t a) { +// return vreinterpretq_s8_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_s8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vreinterpretq_s16_s8(int8x16_t a) { +// return vreinterpretq_s16_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vreinterpretq_s16_s32(int32x4_t a) { +// return vreinterpretq_s16_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vreinterpretq_s16_s64(int64x2_t a) { +// return vreinterpretq_s16_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_u8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vreinterpretq_s16_u8(uint8x16_t a) { +// return vreinterpretq_s16_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_u16( +// NYI: ret <8 x i16> %a +// int16x8_t test_vreinterpretq_s16_u16(uint16x8_t a) { +// return vreinterpretq_s16_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vreinterpretq_s16_u32(uint32x4_t a) { +// return vreinterpretq_s16_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vreinterpretq_s16_u64(uint64x2_t a) { +// return vreinterpretq_s16_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vreinterpretq_s16_f16(float16x8_t a) { +// return vreinterpretq_s16_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vreinterpretq_s16_f32(float32x4_t a) { +// return vreinterpretq_s16_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vreinterpretq_s16_f64(float64x2_t a) { +// return vreinterpretq_s16_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_p8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vreinterpretq_s16_p8(poly8x16_t a) { +// return vreinterpretq_s16_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_p16( +// NYI: ret <8 x i16> %a +// int16x8_t test_vreinterpretq_s16_p16(poly16x8_t a) { +// return vreinterpretq_s16_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s16_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// int16x8_t test_vreinterpretq_s16_p64(poly64x2_t a) { +// return vreinterpretq_s16_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_s8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_s8(int8x16_t a) { +// return vreinterpretq_s32_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_s16(int16x8_t a) { +// return vreinterpretq_s32_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_s64(int64x2_t a) { +// return vreinterpretq_s32_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_u8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_u8(uint8x16_t a) { +// return vreinterpretq_s32_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_u16(uint16x8_t a) { +// return vreinterpretq_s32_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_u32( +// NYI: ret <4 x i32> %a +// int32x4_t test_vreinterpretq_s32_u32(uint32x4_t a) { +// return vreinterpretq_s32_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_u64(uint64x2_t a) { +// return vreinterpretq_s32_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_f16(float16x8_t a) { +// return vreinterpretq_s32_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_f32(float32x4_t a) { +// return vreinterpretq_s32_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_f64(float64x2_t a) { +// return vreinterpretq_s32_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_p8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_p8(poly8x16_t a) { +// return vreinterpretq_s32_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_p16(poly16x8_t a) { +// return vreinterpretq_s32_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s32_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// int32x4_t test_vreinterpretq_s32_p64(poly64x2_t a) { +// return vreinterpretq_s32_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_s8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// int64x2_t test_vreinterpretq_s64_s8(int8x16_t a) { +// return vreinterpretq_s64_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// int64x2_t test_vreinterpretq_s64_s16(int16x8_t a) { +// return vreinterpretq_s64_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// int64x2_t test_vreinterpretq_s64_s32(int32x4_t a) { +// return vreinterpretq_s64_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_u8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// int64x2_t test_vreinterpretq_s64_u8(uint8x16_t a) { +// return vreinterpretq_s64_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// int64x2_t test_vreinterpretq_s64_u16(uint16x8_t a) { +// return vreinterpretq_s64_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// int64x2_t test_vreinterpretq_s64_u32(uint32x4_t a) { +// return vreinterpretq_s64_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_u64( +// NYI: ret <2 x i64> %a +// int64x2_t test_vreinterpretq_s64_u64(uint64x2_t a) { +// return vreinterpretq_s64_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// int64x2_t test_vreinterpretq_s64_f16(float16x8_t a) { +// return vreinterpretq_s64_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// int64x2_t test_vreinterpretq_s64_f32(float32x4_t a) { +// return vreinterpretq_s64_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// int64x2_t test_vreinterpretq_s64_f64(float64x2_t a) { +// return vreinterpretq_s64_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_p8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// int64x2_t test_vreinterpretq_s64_p8(poly8x16_t a) { +// return vreinterpretq_s64_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// int64x2_t test_vreinterpretq_s64_p16(poly16x8_t a) { +// return vreinterpretq_s64_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_s64_p64( +// NYI: ret <2 x i64> %a +// int64x2_t test_vreinterpretq_s64_p64(poly64x2_t a) { +// return vreinterpretq_s64_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_s8( +// NYI: ret <16 x i8> %a +// uint8x16_t test_vreinterpretq_u8_s8(int8x16_t a) { +// return vreinterpretq_u8_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vreinterpretq_u8_s16(int16x8_t a) { +// return vreinterpretq_u8_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vreinterpretq_u8_s32(int32x4_t a) { +// return vreinterpretq_u8_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vreinterpretq_u8_s64(int64x2_t a) { +// return vreinterpretq_u8_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vreinterpretq_u8_u16(uint16x8_t a) { +// return vreinterpretq_u8_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vreinterpretq_u8_u32(uint32x4_t a) { +// return vreinterpretq_u8_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vreinterpretq_u8_u64(uint64x2_t a) { +// return vreinterpretq_u8_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vreinterpretq_u8_f16(float16x8_t a) { +// return vreinterpretq_u8_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vreinterpretq_u8_f32(float32x4_t a) { +// return vreinterpretq_u8_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vreinterpretq_u8_f64(float64x2_t a) { +// return vreinterpretq_u8_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_p8( +// NYI: ret <16 x i8> %a +// uint8x16_t test_vreinterpretq_u8_p8(poly8x16_t a) { +// return vreinterpretq_u8_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vreinterpretq_u8_p16(poly16x8_t a) { +// return vreinterpretq_u8_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u8_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// uint8x16_t test_vreinterpretq_u8_p64(poly64x2_t a) { +// return vreinterpretq_u8_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_s8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vreinterpretq_u16_s8(int8x16_t a) { +// return vreinterpretq_u16_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_s16( +// NYI: ret <8 x i16> %a +// uint16x8_t test_vreinterpretq_u16_s16(int16x8_t a) { +// return vreinterpretq_u16_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vreinterpretq_u16_s32(int32x4_t a) { +// return vreinterpretq_u16_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vreinterpretq_u16_s64(int64x2_t a) { +// return vreinterpretq_u16_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_u8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vreinterpretq_u16_u8(uint8x16_t a) { +// return vreinterpretq_u16_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vreinterpretq_u16_u32(uint32x4_t a) { +// return vreinterpretq_u16_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vreinterpretq_u16_u64(uint64x2_t a) { +// return vreinterpretq_u16_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vreinterpretq_u16_f16(float16x8_t a) { +// return vreinterpretq_u16_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vreinterpretq_u16_f32(float32x4_t a) { +// return vreinterpretq_u16_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vreinterpretq_u16_f64(float64x2_t a) { +// return vreinterpretq_u16_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_p8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vreinterpretq_u16_p8(poly8x16_t a) { +// return vreinterpretq_u16_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_p16( +// NYI: ret <8 x i16> %a +// uint16x8_t test_vreinterpretq_u16_p16(poly16x8_t a) { +// return vreinterpretq_u16_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u16_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// uint16x8_t test_vreinterpretq_u16_p64(poly64x2_t a) { +// return vreinterpretq_u16_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_s8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_s8(int8x16_t a) { +// return vreinterpretq_u32_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_s16(int16x8_t a) { +// return vreinterpretq_u32_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_s32( +// NYI: ret <4 x i32> %a +// uint32x4_t test_vreinterpretq_u32_s32(int32x4_t a) { +// return vreinterpretq_u32_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_s64(int64x2_t a) { +// return vreinterpretq_u32_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_u8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_u8(uint8x16_t a) { +// return vreinterpretq_u32_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_u16(uint16x8_t a) { +// return vreinterpretq_u32_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_u64(uint64x2_t a) { +// return vreinterpretq_u32_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_f16(float16x8_t a) { +// return vreinterpretq_u32_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_f32(float32x4_t a) { +// return vreinterpretq_u32_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_f64(float64x2_t a) { +// return vreinterpretq_u32_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_p8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_p8(poly8x16_t a) { +// return vreinterpretq_u32_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_p16(poly16x8_t a) { +// return vreinterpretq_u32_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u32_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <4 x i32> +// NYI: ret <4 x i32> [[TMP0]] +// uint32x4_t test_vreinterpretq_u32_p64(poly64x2_t a) { +// return vreinterpretq_u32_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_s8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// uint64x2_t test_vreinterpretq_u64_s8(int8x16_t a) { +// return vreinterpretq_u64_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// uint64x2_t test_vreinterpretq_u64_s16(int16x8_t a) { +// return vreinterpretq_u64_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// uint64x2_t test_vreinterpretq_u64_s32(int32x4_t a) { +// return vreinterpretq_u64_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_s64( +// NYI: ret <2 x i64> %a +// uint64x2_t test_vreinterpretq_u64_s64(int64x2_t a) { +// return vreinterpretq_u64_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_u8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// uint64x2_t test_vreinterpretq_u64_u8(uint8x16_t a) { +// return vreinterpretq_u64_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// uint64x2_t test_vreinterpretq_u64_u16(uint16x8_t a) { +// return vreinterpretq_u64_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// uint64x2_t test_vreinterpretq_u64_u32(uint32x4_t a) { +// return vreinterpretq_u64_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// uint64x2_t test_vreinterpretq_u64_f16(float16x8_t a) { +// return vreinterpretq_u64_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// uint64x2_t test_vreinterpretq_u64_f32(float32x4_t a) { +// return vreinterpretq_u64_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// uint64x2_t test_vreinterpretq_u64_f64(float64x2_t a) { +// return vreinterpretq_u64_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_p8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// uint64x2_t test_vreinterpretq_u64_p8(poly8x16_t a) { +// return vreinterpretq_u64_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// uint64x2_t test_vreinterpretq_u64_p16(poly16x8_t a) { +// return vreinterpretq_u64_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_u64_p64( +// NYI: ret <2 x i64> %a +// uint64x2_t test_vreinterpretq_u64_p64(poly64x2_t a) { +// return vreinterpretq_u64_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_s8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_s8(int8x16_t a) { +// return vreinterpretq_f16_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_s16(int16x8_t a) { +// return vreinterpretq_f16_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_s32(int32x4_t a) { +// return vreinterpretq_f16_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_s64(int64x2_t a) { +// return vreinterpretq_f16_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_u8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_u8(uint8x16_t a) { +// return vreinterpretq_f16_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_u16(uint16x8_t a) { +// return vreinterpretq_f16_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_u32(uint32x4_t a) { +// return vreinterpretq_f16_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_u64(uint64x2_t a) { +// return vreinterpretq_f16_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_f32(float32x4_t a) { +// return vreinterpretq_f16_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_f64(float64x2_t a) { +// return vreinterpretq_f16_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_p8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_p8(poly8x16_t a) { +// return vreinterpretq_f16_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_p16(poly16x8_t a) { +// return vreinterpretq_f16_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f16_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x half> +// NYI: ret <8 x half> [[TMP0]] +// float16x8_t test_vreinterpretq_f16_p64(poly64x2_t a) { +// return vreinterpretq_f16_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_s8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_s8(int8x16_t a) { +// return vreinterpretq_f32_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_s16(int16x8_t a) { +// return vreinterpretq_f32_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_s32(int32x4_t a) { +// return vreinterpretq_f32_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_s64(int64x2_t a) { +// return vreinterpretq_f32_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_u8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_u8(uint8x16_t a) { +// return vreinterpretq_f32_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_u16(uint16x8_t a) { +// return vreinterpretq_f32_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_u32(uint32x4_t a) { +// return vreinterpretq_f32_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_u64(uint64x2_t a) { +// return vreinterpretq_f32_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_f16(float16x8_t a) { +// return vreinterpretq_f32_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_f64(float64x2_t a) { +// return vreinterpretq_f32_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_p8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_p8(poly8x16_t a) { +// return vreinterpretq_f32_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_p16(poly16x8_t a) { +// return vreinterpretq_f32_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f32_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <4 x float> +// NYI: ret <4 x float> [[TMP0]] +// float32x4_t test_vreinterpretq_f32_p64(poly64x2_t a) { +// return vreinterpretq_f32_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_s8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_s8(int8x16_t a) { +// return vreinterpretq_f64_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_s16(int16x8_t a) { +// return vreinterpretq_f64_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_s32(int32x4_t a) { +// return vreinterpretq_f64_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_s64(int64x2_t a) { +// return vreinterpretq_f64_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_u8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_u8(uint8x16_t a) { +// return vreinterpretq_f64_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_u16(uint16x8_t a) { +// return vreinterpretq_f64_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_u32(uint32x4_t a) { +// return vreinterpretq_f64_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_u64(uint64x2_t a) { +// return vreinterpretq_f64_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_f16(float16x8_t a) { +// return vreinterpretq_f64_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_f32(float32x4_t a) { +// return vreinterpretq_f64_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_p8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_p8(poly8x16_t a) { +// return vreinterpretq_f64_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_p16(poly16x8_t a) { +// return vreinterpretq_f64_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_f64_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <2 x double> +// NYI: ret <2 x double> [[TMP0]] +// float64x2_t test_vreinterpretq_f64_p64(poly64x2_t a) { +// return vreinterpretq_f64_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_s8( +// NYI: ret <16 x i8> %a +// poly8x16_t test_vreinterpretq_p8_s8(int8x16_t a) { +// return vreinterpretq_p8_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// poly8x16_t test_vreinterpretq_p8_s16(int16x8_t a) { +// return vreinterpretq_p8_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// poly8x16_t test_vreinterpretq_p8_s32(int32x4_t a) { +// return vreinterpretq_p8_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// poly8x16_t test_vreinterpretq_p8_s64(int64x2_t a) { +// return vreinterpretq_p8_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_u8( +// NYI: ret <16 x i8> %a +// poly8x16_t test_vreinterpretq_p8_u8(uint8x16_t a) { +// return vreinterpretq_p8_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// poly8x16_t test_vreinterpretq_p8_u16(uint16x8_t a) { +// return vreinterpretq_p8_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// poly8x16_t test_vreinterpretq_p8_u32(uint32x4_t a) { +// return vreinterpretq_p8_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// poly8x16_t test_vreinterpretq_p8_u64(uint64x2_t a) { +// return vreinterpretq_p8_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// poly8x16_t test_vreinterpretq_p8_f16(float16x8_t a) { +// return vreinterpretq_p8_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// poly8x16_t test_vreinterpretq_p8_f32(float32x4_t a) { +// return vreinterpretq_p8_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// poly8x16_t test_vreinterpretq_p8_f64(float64x2_t a) { +// return vreinterpretq_p8_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// poly8x16_t test_vreinterpretq_p8_p16(poly16x8_t a) { +// return vreinterpretq_p8_p16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p8_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: ret <16 x i8> [[TMP0]] +// poly8x16_t test_vreinterpretq_p8_p64(poly64x2_t a) { +// return vreinterpretq_p8_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_s8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// poly16x8_t test_vreinterpretq_p16_s8(int8x16_t a) { +// return vreinterpretq_p16_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_s16( +// NYI: ret <8 x i16> %a +// poly16x8_t test_vreinterpretq_p16_s16(int16x8_t a) { +// return vreinterpretq_p16_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// poly16x8_t test_vreinterpretq_p16_s32(int32x4_t a) { +// return vreinterpretq_p16_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_s64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// poly16x8_t test_vreinterpretq_p16_s64(int64x2_t a) { +// return vreinterpretq_p16_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_u8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// poly16x8_t test_vreinterpretq_p16_u8(uint8x16_t a) { +// return vreinterpretq_p16_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_u16( +// NYI: ret <8 x i16> %a +// poly16x8_t test_vreinterpretq_p16_u16(uint16x8_t a) { +// return vreinterpretq_p16_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// poly16x8_t test_vreinterpretq_p16_u32(uint32x4_t a) { +// return vreinterpretq_p16_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// poly16x8_t test_vreinterpretq_p16_u64(uint64x2_t a) { +// return vreinterpretq_p16_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// poly16x8_t test_vreinterpretq_p16_f16(float16x8_t a) { +// return vreinterpretq_p16_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// poly16x8_t test_vreinterpretq_p16_f32(float32x4_t a) { +// return vreinterpretq_p16_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// poly16x8_t test_vreinterpretq_p16_f64(float64x2_t a) { +// return vreinterpretq_p16_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_p8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// poly16x8_t test_vreinterpretq_p16_p8(poly8x16_t a) { +// return vreinterpretq_p16_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p16_p64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <8 x i16> +// NYI: ret <8 x i16> [[TMP0]] +// poly16x8_t test_vreinterpretq_p16_p64(poly64x2_t a) { +// return vreinterpretq_p16_p64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_s8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// poly64x2_t test_vreinterpretq_p64_s8(int8x16_t a) { +// return vreinterpretq_p64_s8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_s16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// poly64x2_t test_vreinterpretq_p64_s16(int16x8_t a) { +// return vreinterpretq_p64_s16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_s32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// poly64x2_t test_vreinterpretq_p64_s32(int32x4_t a) { +// return vreinterpretq_p64_s32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_s64( +// NYI: ret <2 x i64> %a +// poly64x2_t test_vreinterpretq_p64_s64(int64x2_t a) { +// return vreinterpretq_p64_s64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_u8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// poly64x2_t test_vreinterpretq_p64_u8(uint8x16_t a) { +// return vreinterpretq_p64_u8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// poly64x2_t test_vreinterpretq_p64_u16(uint16x8_t a) { +// return vreinterpretq_p64_u16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// poly64x2_t test_vreinterpretq_p64_u32(uint32x4_t a) { +// return vreinterpretq_p64_u32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_u64( +// NYI: ret <2 x i64> %a +// poly64x2_t test_vreinterpretq_p64_u64(uint64x2_t a) { +// return vreinterpretq_p64_u64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_f16( +// NYI: [[TMP0:%.*]] = bitcast <8 x half> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// poly64x2_t test_vreinterpretq_p64_f16(float16x8_t a) { +// return vreinterpretq_p64_f16(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_f32( +// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// poly64x2_t test_vreinterpretq_p64_f32(float32x4_t a) { +// return vreinterpretq_p64_f32(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_f64( +// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// poly64x2_t test_vreinterpretq_p64_f64(float64x2_t a) { +// return vreinterpretq_p64_f64(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_p8( +// NYI: [[TMP0:%.*]] = bitcast <16 x i8> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// poly64x2_t test_vreinterpretq_p64_p8(poly8x16_t a) { +// return vreinterpretq_p64_p8(a); +// } + +// NYI-LABEL: @test_vreinterpretq_p64_p16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <2 x i64> +// NYI: ret <2 x i64> [[TMP0]] +// poly64x2_t test_vreinterpretq_p64_p16(poly16x8_t a) { +// return vreinterpretq_p64_p16(a); +// } + +// NYI-LABEL: @test_vabds_f32( +// NYI: [[VABDS_F32_I:%.*]] = call float @llvm.aarch64.sisd.fabd.f32(float %a, float %b) +// NYI: ret float [[VABDS_F32_I]] +// float32_t test_vabds_f32(float32_t a, float32_t b) { +// return vabds_f32(a, b); +// } + +// NYI-LABEL: @test_vabdd_f64( +// NYI: [[VABDD_F64_I:%.*]] = call double @llvm.aarch64.sisd.fabd.f64(double %a, double %b) +// NYI: ret double [[VABDD_F64_I]] +// float64_t test_vabdd_f64(float64_t a, float64_t b) { +// return vabdd_f64(a, b); +// } + +// NYI-LABEL: @test_vuqaddq_s8( +// NYI: entry: +// NYI-NEXT: [[V:%.*]] = call <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI-NEXT: ret <16 x i8> [[V]] +// int8x16_t test_vuqaddq_s8(int8x16_t a, uint8x16_t b) { +// return vuqaddq_s8(a, b); +// } + +// NYI-LABEL: @test_vuqaddq_s32( +// NYI: [[V:%.*]] = call <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI-NEXT: ret <4 x i32> [[V]] +// int32x4_t test_vuqaddq_s32(int32x4_t a, uint32x4_t b) { +// return vuqaddq_s32(a, b); +// } + +// NYI-LABEL: @test_vuqaddq_s64( +// NYI: [[V:%.*]] = call <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI-NEXT: ret <2 x i64> [[V]] +// int64x2_t test_vuqaddq_s64(int64x2_t a, uint64x2_t b) { +// return vuqaddq_s64(a, b); +// } + +// NYI-LABEL: @test_vuqaddq_s16( +// NYI: [[V:%.*]] = call <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI-NEXT: ret <8 x i16> [[V]] +// int16x8_t test_vuqaddq_s16(int16x8_t a, uint16x8_t b) { +// return vuqaddq_s16(a, b); +// } + +// NYI-LABEL: @test_vuqadd_s8( +// NYI: entry: +// NYI-NEXT: [[V:%.*]] = call <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI-NEXT: ret <8 x i8> [[V]] +// int8x8_t test_vuqadd_s8(int8x8_t a, uint8x8_t b) { +// return vuqadd_s8(a, b); +// } + +// NYI-LABEL: @test_vuqadd_s32( +// NYI: [[V:%.*]] = call <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI-NEXT: ret <2 x i32> [[V]] +// int32x2_t test_vuqadd_s32(int32x2_t a, uint32x2_t b) { +// return vuqadd_s32(a, b); +// } + +// NYI-LABEL: @test_vuqadd_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VUQADD2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.suqadd.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: ret <1 x i64> [[VUQADD2_I]] +// int64x1_t test_vuqadd_s64(int64x1_t a, uint64x1_t b) { +// return vuqadd_s64(a, b); +// } + +// NYI-LABEL: @test_vuqadd_s16( +// NYI: [[V:%.*]] = call <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI-NEXT: ret <4 x i16> [[V]] +// int16x4_t test_vuqadd_s16(int16x4_t a, uint16x4_t b) { +// return vuqadd_s16(a, b); +// } + +// NYI-LABEL: @test_vsqadd_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> +// NYI: [[VSQADD2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> %a, <1 x i64> %b) +// NYI: ret <1 x i64> [[VSQADD2_I]] +// uint64x1_t test_vsqadd_u64(uint64x1_t a, int64x1_t b) { +// return vsqadd_u64(a, b); +// } + +// NYI-LABEL: @test_vsqadd_u8( +// NYI: [[VSQADD_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.usqadd.v8i8(<8 x i8> %a, <8 x i8> %b) +// NYI: ret <8 x i8> [[VSQADD_I]] +// uint8x8_t test_vsqadd_u8(uint8x8_t a, int8x8_t b) { +// return vsqadd_u8(a, b); +// } + +// NYI-LABEL: @test_vsqaddq_u8( +// NYI: [[VSQADD_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.usqadd.v16i8(<16 x i8> %a, <16 x i8> %b) +// NYI: ret <16 x i8> [[VSQADD_I]] +// uint8x16_t test_vsqaddq_u8(uint8x16_t a, int8x16_t b) { +// return vsqaddq_u8(a, b); +// } + +// NYI-LABEL: @test_vsqadd_u16( +// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> +// NYI: [[VSQADD2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.usqadd.v4i16(<4 x i16> %a, <4 x i16> %b) +// NYI: ret <4 x i16> [[VSQADD2_I]] +// uint16x4_t test_vsqadd_u16(uint16x4_t a, int16x4_t b) { +// return vsqadd_u16(a, b); +// } + +// NYI-LABEL: @test_vsqaddq_u16( +// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> +// NYI: [[VSQADD2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.usqadd.v8i16(<8 x i16> %a, <8 x i16> %b) +// NYI: ret <8 x i16> [[VSQADD2_I]] +// uint16x8_t test_vsqaddq_u16(uint16x8_t a, int16x8_t b) { +// return vsqaddq_u16(a, b); +// } + +// NYI-LABEL: @test_vsqadd_u32( +// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> +// NYI: [[VSQADD2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.usqadd.v2i32(<2 x i32> %a, <2 x i32> %b) +// NYI: ret <2 x i32> [[VSQADD2_I]] +// uint32x2_t test_vsqadd_u32(uint32x2_t a, int32x2_t b) { +// return vsqadd_u32(a, b); +// } + +// NYI-LABEL: @test_vsqaddq_u32( +// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> +// NYI: [[VSQADD2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.usqadd.v4i32(<4 x i32> %a, <4 x i32> %b) +// NYI: ret <4 x i32> [[VSQADD2_I]] +// uint32x4_t test_vsqaddq_u32(uint32x4_t a, int32x4_t b) { +// return vsqaddq_u32(a, b); +// } + +// NYI-LABEL: @test_vsqaddq_u64( +// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> +// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> +// NYI: [[VSQADD2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.usqadd.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: ret <2 x i64> [[VSQADD2_I]] +// uint64x2_t test_vsqaddq_u64(uint64x2_t a, int64x2_t b) { +// return vsqaddq_u64(a, b); +// } + +// NYI-LABEL: @test_vabs_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VABS1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.abs.v1i64(<1 x i64> %a) +// NYI: ret <1 x i64> [[VABS1_I]] +// int64x1_t test_vabs_s64(int64x1_t a) { +// return vabs_s64(a); +// } + +// NYI-LABEL: @test_vqabs_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VQABS_V1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqabs.v1i64(<1 x i64> %a) +// NYI: [[VQABS_V2_I:%.*]] = bitcast <1 x i64> [[VQABS_V1_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VQABS_V1_I]] +// int64x1_t test_vqabs_s64(int64x1_t a) { +// return vqabs_s64(a); +// } + +// NYI-LABEL: @test_vqneg_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VQNEG_V1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqneg.v1i64(<1 x i64> %a) +// NYI: [[VQNEG_V2_I:%.*]] = bitcast <1 x i64> [[VQNEG_V1_I]] to <8 x i8> +// NYI: ret <1 x i64> [[VQNEG_V1_I]] +// int64x1_t test_vqneg_s64(int64x1_t a) { +// return vqneg_s64(a); +// } + +// NYI-LABEL: @test_vneg_s64( +// NYI: [[SUB_I:%.*]] = sub <1 x i64> zeroinitializer, %a +// NYI: ret <1 x i64> [[SUB_I]] +// int64x1_t test_vneg_s64(int64x1_t a) { +// return vneg_s64(a); +// } + +// NYI-LABEL: @test_vaddv_f32( +// NYI: [[VADDV_F32_I:%.*]] = call float @llvm.aarch64.neon.faddv.f32.v2f32(<2 x float> %a) +// NYI: ret float [[VADDV_F32_I]] +// float32_t test_vaddv_f32(float32x2_t a) { +// return vaddv_f32(a); +// } + +// NYI-LABEL: @test_vaddvq_f32( +// NYI: [[VADDVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.faddv.f32.v4f32(<4 x float> %a) +// NYI: ret float [[VADDVQ_F32_I]] +// float32_t test_vaddvq_f32(float32x4_t a) { +// return vaddvq_f32(a); +// } + +// NYI-LABEL: @test_vaddvq_f64( +// NYI: [[VADDVQ_F64_I:%.*]] = call double @llvm.aarch64.neon.faddv.f64.v2f64(<2 x double> %a) +// NYI: ret double [[VADDVQ_F64_I]] +// float64_t test_vaddvq_f64(float64x2_t a) { +// return vaddvq_f64(a); +// } + +// NYI-LABEL: @test_vmaxv_f32( +// NYI: [[VMAXV_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float> %a) +// NYI: ret float [[VMAXV_F32_I]] +// float32_t test_vmaxv_f32(float32x2_t a) { +// return vmaxv_f32(a); +// } + +// NYI-LABEL: @test_vmaxvq_f64( +// NYI: [[VMAXVQ_F64_I:%.*]] = call double @llvm.aarch64.neon.fmaxv.f64.v2f64(<2 x double> %a) +// NYI: ret double [[VMAXVQ_F64_I]] +// float64_t test_vmaxvq_f64(float64x2_t a) { +// return vmaxvq_f64(a); +// } + +// NYI-LABEL: @test_vminv_f32( +// NYI: [[VMINV_F32_I:%.*]] = call float @llvm.aarch64.neon.fminv.f32.v2f32(<2 x float> %a) +// NYI: ret float [[VMINV_F32_I]] +// float32_t test_vminv_f32(float32x2_t a) { +// return vminv_f32(a); +// } + +// NYI-LABEL: @test_vminvq_f64( +// NYI: [[VMINVQ_F64_I:%.*]] = call double @llvm.aarch64.neon.fminv.f64.v2f64(<2 x double> %a) +// NYI: ret double [[VMINVQ_F64_I]] +// float64_t test_vminvq_f64(float64x2_t a) { +// return vminvq_f64(a); +// } + +// NYI-LABEL: @test_vmaxnmvq_f64( +// NYI: [[VMAXNMVQ_F64_I:%.*]] = call double @llvm.aarch64.neon.fmaxnmv.f64.v2f64(<2 x double> %a) +// NYI: ret double [[VMAXNMVQ_F64_I]] +// float64_t test_vmaxnmvq_f64(float64x2_t a) { +// return vmaxnmvq_f64(a); +// } + +// NYI-LABEL: @test_vmaxnmv_f32( +// NYI: [[VMAXNMV_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxnmv.f32.v2f32(<2 x float> %a) +// NYI: ret float [[VMAXNMV_F32_I]] +// float32_t test_vmaxnmv_f32(float32x2_t a) { +// return vmaxnmv_f32(a); +// } + +// NYI-LABEL: @test_vminnmvq_f64( +// NYI: [[VMINNMVQ_F64_I:%.*]] = call double @llvm.aarch64.neon.fminnmv.f64.v2f64(<2 x double> %a) +// NYI: ret double [[VMINNMVQ_F64_I]] +// float64_t test_vminnmvq_f64(float64x2_t a) { +// return vminnmvq_f64(a); +// } + +// NYI-LABEL: @test_vminnmv_f32( +// NYI: [[VMINNMV_F32_I:%.*]] = call float @llvm.aarch64.neon.fminnmv.f32.v2f32(<2 x float> %a) +// NYI: ret float [[VMINNMV_F32_I]] +// float32_t test_vminnmv_f32(float32x2_t a) { +// return vminnmv_f32(a); +// } + +// NYI-LABEL: @test_vpaddq_s64( +// NYI: [[VPADDQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VPADDQ_V3_I:%.*]] = bitcast <2 x i64> [[VPADDQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VPADDQ_V2_I]] +// int64x2_t test_vpaddq_s64(int64x2_t a, int64x2_t b) { +// return vpaddq_s64(a, b); +// } + +// NYI-LABEL: @test_vpaddq_u64( +// NYI: [[VPADDQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> %a, <2 x i64> %b) +// NYI: [[VPADDQ_V3_I:%.*]] = bitcast <2 x i64> [[VPADDQ_V2_I]] to <16 x i8> +// NYI: ret <2 x i64> [[VPADDQ_V2_I]] +// uint64x2_t test_vpaddq_u64(uint64x2_t a, uint64x2_t b) { +// return vpaddq_u64(a, b); +// } + +// NYI-LABEL: @test_vpaddd_u64( +// NYI: [[VPADDD_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a) +// NYI: ret i64 [[VPADDD_U64_I]] +// uint64_t test_vpaddd_u64(uint64x2_t a) { +// return vpaddd_u64(a); +// } + +// NYI-LABEL: @test_vaddvq_s64( +// NYI: [[VADDVQ_S64_I:%.*]] = call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> %a) +// NYI: ret i64 [[VADDVQ_S64_I]] +// int64_t test_vaddvq_s64(int64x2_t a) { +// return vaddvq_s64(a); +// } + +// NYI-LABEL: @test_vaddvq_u64( +// NYI: [[VADDVQ_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a) +// NYI: ret i64 [[VADDVQ_U64_I]] +// uint64_t test_vaddvq_u64(uint64x2_t a) { +// return vaddvq_u64(a); +// } + +// NYI-LABEL: @test_vadd_f64( +// NYI: [[ADD_I:%.*]] = fadd <1 x double> %a, %b +// NYI: ret <1 x double> [[ADD_I]] +// float64x1_t test_vadd_f64(float64x1_t a, float64x1_t b) { +// return vadd_f64(a, b); +// } + +// NYI-LABEL: @test_vmul_f64( +// NYI: [[MUL_I:%.*]] = fmul <1 x double> %a, %b +// NYI: ret <1 x double> [[MUL_I]] +// float64x1_t test_vmul_f64(float64x1_t a, float64x1_t b) { +// return vmul_f64(a, b); +// } + +// NYI-LABEL: @test_vdiv_f64( +// NYI: [[DIV_I:%.*]] = fdiv <1 x double> %a, %b +// NYI: ret <1 x double> [[DIV_I]] +// float64x1_t test_vdiv_f64(float64x1_t a, float64x1_t b) { +// return vdiv_f64(a, b); +// } + +// NYI-LABEL: @test_vmla_f64( +// NYI: [[MUL_I:%.*]] = fmul <1 x double> %b, %c +// NYI: [[ADD_I:%.*]] = fadd <1 x double> %a, [[MUL_I]] +// NYI: ret <1 x double> [[ADD_I]] +// float64x1_t test_vmla_f64(float64x1_t a, float64x1_t b, float64x1_t c) { +// return vmla_f64(a, b, c); +// } + +// NYI-LABEL: @test_vmls_f64( +// NYI: [[MUL_I:%.*]] = fmul <1 x double> %b, %c +// NYI: [[SUB_I:%.*]] = fsub <1 x double> %a, [[MUL_I]] +// NYI: ret <1 x double> [[SUB_I]] +// float64x1_t test_vmls_f64(float64x1_t a, float64x1_t b, float64x1_t c) { +// return vmls_f64(a, b, c); +// } + +// NYI-LABEL: @test_vfma_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <1 x double> %c to <8 x i8> +// NYI: [[TMP3:%.*]] = call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a) +// NYI: ret <1 x double> [[TMP3]] +// float64x1_t test_vfma_f64(float64x1_t a, float64x1_t b, float64x1_t c) { +// return vfma_f64(a, b, c); +// } + +// NYI-LABEL: @test_vfms_f64( +// NYI: [[SUB_I:%.*]] = fneg <1 x double> %b +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> [[SUB_I]] to <8 x i8> +// NYI: [[TMP2:%.*]] = bitcast <1 x double> %c to <8 x i8> +// NYI: [[TMP3:%.*]] = call <1 x double> @llvm.fma.v1f64(<1 x double> [[SUB_I]], <1 x double> %c, <1 x double> %a) +// NYI: ret <1 x double> [[TMP3]] +// float64x1_t test_vfms_f64(float64x1_t a, float64x1_t b, float64x1_t c) { +// return vfms_f64(a, b, c); +// } + +// NYI-LABEL: @test_vsub_f64( +// NYI: [[SUB_I:%.*]] = fsub <1 x double> %a, %b +// NYI: ret <1 x double> [[SUB_I]] +// float64x1_t test_vsub_f64(float64x1_t a, float64x1_t b) { +// return vsub_f64(a, b); +// } + +// NYI-LABEL: @test_vabd_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VABD2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fabd.v1f64(<1 x double> %a, <1 x double> %b) +// NYI: ret <1 x double> [[VABD2_I]] +// float64x1_t test_vabd_f64(float64x1_t a, float64x1_t b) { +// return vabd_f64(a, b); +// } + +// NYI-LABEL: @test_vmax_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VMAX2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmax.v1f64(<1 x double> %a, <1 x double> %b) +// NYI: ret <1 x double> [[VMAX2_I]] +// float64x1_t test_vmax_f64(float64x1_t a, float64x1_t b) { +// return vmax_f64(a, b); +// } + +// NYI-LABEL: @test_vmin_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VMIN2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> %a, <1 x double> %b) +// NYI: ret <1 x double> [[VMIN2_I]] +// float64x1_t test_vmin_f64(float64x1_t a, float64x1_t b) { +// return vmin_f64(a, b); +// } + +// NYI-LABEL: @test_vmaxnm_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VMAXNM2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmaxnm.v1f64(<1 x double> %a, <1 x double> %b) +// NYI: ret <1 x double> [[VMAXNM2_I]] +// float64x1_t test_vmaxnm_f64(float64x1_t a, float64x1_t b) { +// return vmaxnm_f64(a, b); +// } + +// NYI-LABEL: @test_vminnm_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VMINNM2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> %a, <1 x double> %b) +// NYI: ret <1 x double> [[VMINNM2_I]] +// float64x1_t test_vminnm_f64(float64x1_t a, float64x1_t b) { +// return vminnm_f64(a, b); +// } + +// NYI-LABEL: @test_vabs_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VABS1_I:%.*]] = call <1 x double> @llvm.fabs.v1f64(<1 x double> %a) +// NYI: ret <1 x double> [[VABS1_I]] +// float64x1_t test_vabs_f64(float64x1_t a) { +// return vabs_f64(a); +// } + +// NYI-LABEL: @test_vneg_f64( +// NYI: [[SUB_I:%.*]] = fneg <1 x double> %a +// NYI: ret <1 x double> [[SUB_I]] +// float64x1_t test_vneg_f64(float64x1_t a) { +// return vneg_f64(a); +// } + +// NYI-LABEL: @test_vcvt_s64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtzs.v1i64.v1f64(<1 x double> %a) +// NYI: ret <1 x i64> [[TMP1]] +// int64x1_t test_vcvt_s64_f64(float64x1_t a) { +// return vcvt_s64_f64(a); +// } + +// NYI-LABEL: @test_vcvt_u64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtzu.v1i64.v1f64(<1 x double> %a) +// NYI: ret <1 x i64> [[TMP1]] +// uint64x1_t test_vcvt_u64_f64(float64x1_t a) { +// return vcvt_u64_f64(a); +// } + +// NYI-LABEL: @test_vcvtn_s64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VCVTN1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtns.v1i64.v1f64(<1 x double> %a) +// NYI: ret <1 x i64> [[VCVTN1_I]] +// int64x1_t test_vcvtn_s64_f64(float64x1_t a) { +// return vcvtn_s64_f64(a); +// } + +// NYI-LABEL: @test_vcvtn_u64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VCVTN1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtnu.v1i64.v1f64(<1 x double> %a) +// NYI: ret <1 x i64> [[VCVTN1_I]] +// uint64x1_t test_vcvtn_u64_f64(float64x1_t a) { +// return vcvtn_u64_f64(a); +// } + +// NYI-LABEL: @test_vcvtp_s64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VCVTP1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtps.v1i64.v1f64(<1 x double> %a) +// NYI: ret <1 x i64> [[VCVTP1_I]] +// int64x1_t test_vcvtp_s64_f64(float64x1_t a) { +// return vcvtp_s64_f64(a); +// } + +// NYI-LABEL: @test_vcvtp_u64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VCVTP1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtpu.v1i64.v1f64(<1 x double> %a) +// NYI: ret <1 x i64> [[VCVTP1_I]] +// uint64x1_t test_vcvtp_u64_f64(float64x1_t a) { +// return vcvtp_u64_f64(a); +// } + +// NYI-LABEL: @test_vcvtm_s64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VCVTM1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtms.v1i64.v1f64(<1 x double> %a) +// NYI: ret <1 x i64> [[VCVTM1_I]] +// int64x1_t test_vcvtm_s64_f64(float64x1_t a) { +// return vcvtm_s64_f64(a); +// } + +// NYI-LABEL: @test_vcvtm_u64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VCVTM1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtmu.v1i64.v1f64(<1 x double> %a) +// NYI: ret <1 x i64> [[VCVTM1_I]] +// uint64x1_t test_vcvtm_u64_f64(float64x1_t a) { +// return vcvtm_u64_f64(a); +// } + +// NYI-LABEL: @test_vcvta_s64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VCVTA1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtas.v1i64.v1f64(<1 x double> %a) +// NYI: ret <1 x i64> [[VCVTA1_I]] +// int64x1_t test_vcvta_s64_f64(float64x1_t a) { +// return vcvta_s64_f64(a); +// } + +// NYI-LABEL: @test_vcvta_u64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VCVTA1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtau.v1i64.v1f64(<1 x double> %a) +// NYI: ret <1 x i64> [[VCVTA1_I]] +// uint64x1_t test_vcvta_u64_f64(float64x1_t a) { +// return vcvta_u64_f64(a); +// } + +// NYI-LABEL: @test_vcvt_f64_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VCVT_I:%.*]] = sitofp <1 x i64> %a to <1 x double> +// NYI: ret <1 x double> [[VCVT_I]] +// float64x1_t test_vcvt_f64_s64(int64x1_t a) { +// return vcvt_f64_s64(a); +// } + +// NYI-LABEL: @test_vcvt_f64_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VCVT_I:%.*]] = uitofp <1 x i64> %a to <1 x double> +// NYI: ret <1 x double> [[VCVT_I]] +// float64x1_t test_vcvt_f64_u64(uint64x1_t a) { +// return vcvt_f64_u64(a); +// } + +// NYI-LABEL: @test_vcvt_n_s64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double> +// NYI: [[VCVT_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64(<1 x double> [[VCVT_N]], i32 64) +// NYI: ret <1 x i64> [[VCVT_N1]] +// int64x1_t test_vcvt_n_s64_f64(float64x1_t a) { +// return vcvt_n_s64_f64(a, 64); +// } + +// NYI-LABEL: @test_vcvt_n_u64_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double> +// NYI: [[VCVT_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64(<1 x double> [[VCVT_N]], i32 64) +// NYI: ret <1 x i64> [[VCVT_N1]] +// uint64x1_t test_vcvt_n_u64_f64(float64x1_t a) { +// return vcvt_n_u64_f64(a, 64); +// } + +// NYI-LABEL: @test_vcvt_n_f64_s64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VCVT_N1:%.*]] = call <1 x double> @llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64(<1 x i64> [[VCVT_N]], i32 64) +// NYI: ret <1 x double> [[VCVT_N1]] +// float64x1_t test_vcvt_n_f64_s64(int64x1_t a) { +// return vcvt_n_f64_s64(a, 64); +// } + +// NYI-LABEL: @test_vcvt_n_f64_u64( +// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> +// NYI: [[VCVT_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// NYI: [[VCVT_N1:%.*]] = call <1 x double> @llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64(<1 x i64> [[VCVT_N]], i32 64) +// NYI: ret <1 x double> [[VCVT_N1]] +// float64x1_t test_vcvt_n_f64_u64(uint64x1_t a) { +// return vcvt_n_f64_u64(a, 64); +// } + +// NYI-LABEL: @test_vrndn_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VRNDN1_I:%.*]] = call <1 x double> @llvm.roundeven.v1f64(<1 x double> %a) +// NYI: ret <1 x double> [[VRNDN1_I]] +// float64x1_t test_vrndn_f64(float64x1_t a) { +// return vrndn_f64(a); +// } + +// NYI-LABEL: @test_vrnda_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VRNDA1_I:%.*]] = call <1 x double> @llvm.round.v1f64(<1 x double> %a) +// NYI: ret <1 x double> [[VRNDA1_I]] +// float64x1_t test_vrnda_f64(float64x1_t a) { +// return vrnda_f64(a); +// } + +// NYI-LABEL: @test_vrndp_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VRNDP1_I:%.*]] = call <1 x double> @llvm.ceil.v1f64(<1 x double> %a) +// NYI: ret <1 x double> [[VRNDP1_I]] +// float64x1_t test_vrndp_f64(float64x1_t a) { +// return vrndp_f64(a); +// } + +// NYI-LABEL: @test_vrndm_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VRNDM1_I:%.*]] = call <1 x double> @llvm.floor.v1f64(<1 x double> %a) +// NYI: ret <1 x double> [[VRNDM1_I]] +// float64x1_t test_vrndm_f64(float64x1_t a) { +// return vrndm_f64(a); +// } + +// NYI-LABEL: @test_vrndx_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VRNDX1_I:%.*]] = call <1 x double> @llvm.rint.v1f64(<1 x double> %a) +// NYI: ret <1 x double> [[VRNDX1_I]] +// float64x1_t test_vrndx_f64(float64x1_t a) { +// return vrndx_f64(a); +// } + +// NYI-LABEL: @test_vrnd_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> %a) +// NYI: ret <1 x double> [[VRNDZ1_I]] +// float64x1_t test_vrnd_f64(float64x1_t a) { +// return vrnd_f64(a); +// } + +// NYI-LABEL: @test_vrndi_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VRNDI1_I:%.*]] = call <1 x double> @llvm.nearbyint.v1f64(<1 x double> %a) +// NYI: ret <1 x double> [[VRNDI1_I]] +// float64x1_t test_vrndi_f64(float64x1_t a) { +// return vrndi_f64(a); +// } + +// NYI-LABEL: @test_vrsqrte_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VRSQRTE_V1_I:%.*]] = call <1 x double> @llvm.aarch64.neon.frsqrte.v1f64(<1 x double> %a) +// NYI: ret <1 x double> [[VRSQRTE_V1_I]] +// float64x1_t test_vrsqrte_f64(float64x1_t a) { +// return vrsqrte_f64(a); +// } + +// NYI-LABEL: @test_vrecpe_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VRECPE_V1_I:%.*]] = call <1 x double> @llvm.aarch64.neon.frecpe.v1f64(<1 x double> %a) +// NYI: ret <1 x double> [[VRECPE_V1_I]] +// float64x1_t test_vrecpe_f64(float64x1_t a) { +// return vrecpe_f64(a); +// } + +// NYI-LABEL: @test_vsqrt_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[VSQRT_I:%.*]] = call <1 x double> @llvm.sqrt.v1f64(<1 x double> %a) +// NYI: ret <1 x double> [[VSQRT_I]] +// float64x1_t test_vsqrt_f64(float64x1_t a) { +// return vsqrt_f64(a); +// } + +// NYI-LABEL: @test_vrecps_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VRECPS_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.frecps.v1f64(<1 x double> %a, <1 x double> %b) +// NYI: ret <1 x double> [[VRECPS_V2_I]] +// float64x1_t test_vrecps_f64(float64x1_t a, float64x1_t b) { +// return vrecps_f64(a, b); +// } + +// NYI-LABEL: @test_vrsqrts_f64( +// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> +// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> +// NYI: [[VRSQRTS_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.frsqrts.v1f64(<1 x double> %a, <1 x double> %b) +// NYI: [[VRSQRTS_V3_I:%.*]] = bitcast <1 x double> [[VRSQRTS_V2_I]] to <8 x i8> +// NYI: ret <1 x double> [[VRSQRTS_V2_I]] +// float64x1_t test_vrsqrts_f64(float64x1_t a, float64x1_t b) { +// return vrsqrts_f64(a, b); +// } + +// NYI-LABEL: @test_vminv_s32( +// NYI: [[VMINV_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> %a) +// NYI: ret i32 [[VMINV_S32_I]] +// int32_t test_vminv_s32(int32x2_t a) { +// return vminv_s32(a); +// } + +// NYI-LABEL: @test_vminv_u32( +// NYI: [[VMINV_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32> %a) +// NYI: ret i32 [[VMINV_U32_I]] +// uint32_t test_vminv_u32(uint32x2_t a) { +// return vminv_u32(a); +// } + +// NYI-LABEL: @test_vmaxv_s32( +// NYI: [[VMAXV_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32> %a) +// NYI: ret i32 [[VMAXV_S32_I]] +// int32_t test_vmaxv_s32(int32x2_t a) { +// return vmaxv_s32(a); +// } + +// NYI-LABEL: @test_vmaxv_u32( +// NYI: [[VMAXV_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32> %a) +// NYI: ret i32 [[VMAXV_U32_I]] +// uint32_t test_vmaxv_u32(uint32x2_t a) { +// return vmaxv_u32(a); +// } + +// NYI-LABEL: @test_vaddv_s32( +// NYI: [[VADDV_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> %a) +// NYI: ret i32 [[VADDV_S32_I]] +// int32_t test_vaddv_s32(int32x2_t a) { +// return vaddv_s32(a); +// } + +// NYI-LABEL: @test_vaddv_u32( +// NYI: [[VADDV_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32> %a) +// NYI: ret i32 [[VADDV_U32_I]] +// uint32_t test_vaddv_u32(uint32x2_t a) { +// return vaddv_u32(a); +// } + +// NYI-LABEL: @test_vaddlv_s32( +// NYI: [[VADDLV_S32_I:%.*]] = call i64 @llvm.aarch64.neon.saddlv.i64.v2i32(<2 x i32> %a) +// NYI: ret i64 [[VADDLV_S32_I]] +// int64_t test_vaddlv_s32(int32x2_t a) { +// return vaddlv_s32(a); +// } + +// NYI-LABEL: @test_vaddlv_u32( +// NYI: [[VADDLV_U32_I:%.*]] = call i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32> %a) +// NYI: ret i64 [[VADDLV_U32_I]] +// uint64_t test_vaddlv_u32(uint32x2_t a) { +// return vaddlv_u32(a); +// } From 25b37b8023671c9fa6a59da658f8269ad9103592 Mon Sep 17 00:00:00 2001 From: Twice Date: Fri, 24 May 2024 08:58:26 +0900 Subject: [PATCH 1588/2301] [CIR][CIRGen] Support CodeGen for structural bindings (#618) In this PR I added the support for structural bindings in CIR codegen, to reason `DecompositionDecl` and `BindDecl` properly. Note that since `ArrayInitLoopExpr` is not implemented so binding to arrays is not supported yet. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 6 +- clang/test/CIR/CodeGen/structural-binding.cpp | 112 ++++++++++++++++++ 3 files changed, 120 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/structural-binding.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 79623f4f362f..77d80a244e11 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -839,13 +839,9 @@ void CIRGenFunction::buildDecl(const Decl &D) { "Should not see file-scope variables inside a function!"); buildVarDecl(VD); if (auto *DD = dyn_cast(&VD)) - assert(0 && "Not implemented"); - - // FIXME: add this - // if (auto *DD = dyn_cast(&VD)) - // for (auto *B : DD->bindings()) - // if (auto *HD = B->getHoldingVar()) - // EmitVarDecl(*HD); + for (auto *B : DD->bindings()) + if (auto *HD = B->getHoldingVar()) + buildVarDecl(*HD); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 45c0a813bcd6..891c8d698878 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -928,7 +928,11 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { // DeclRefExprs we see should be implicitly treated as if they also refer to // an enclosing scope. if (const auto *BD = dyn_cast(ND)) { - llvm_unreachable("NYI"); + if (E->refersToEnclosingVariableOrCapture()) { + auto *FD = LambdaCaptureFields.lookup(BD); + return buildCapturedFieldLValue(*this, FD, CXXABIThisValue); + } + return buildLValue(BD->getBinding()); } // We can form DeclRefExprs naming GUID declarations when reconstituting diff --git a/clang/test/CIR/CodeGen/structural-binding.cpp b/clang/test/CIR/CodeGen/structural-binding.cpp new file mode 100644 index 000000000000..c7250d39a1e2 --- /dev/null +++ b/clang/test/CIR/CodeGen/structural-binding.cpp @@ -0,0 +1,112 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +struct B { B(); }; + +struct A { + B a; + int b; + char c; +}; + +struct C { + C(int a, int b): a(a), b(b) {} + template + friend const int &get(const C&); + private: + int a; + int b; +}; + +template <> +const int &get<0>(const C& c) { return c.a; } +template <> +const int &get<1>(const C& c) { return c.b; } + +namespace std { + +template +struct tuple_size; + +template <> +struct tuple_size { constexpr inline static unsigned value = 2; }; + +template +struct tuple_element; + +template +struct tuple_element { using type = const int; }; + +} + + +// binding to data members +void f(A &a) { + // CIR: @_Z1fR1A + // LLVM: @_Z1fR1A + + auto &[x, y, z] = a; + (x, y, z); + // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr + // CIR: {{.*}} = cir.get_member %[[a]][0] {name = "a"} : !cir.ptr -> !cir.ptr + // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr + // CIR: {{.*}} = cir.get_member %[[a]][1] {name = "b"} : !cir.ptr -> !cir.ptr + // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr + // CIR: {{.*}} = cir.get_member %[[a]][2] {name = "c"} : !cir.ptr -> !cir.ptr + // LLVM: {{.*}} = getelementptr %struct.A, ptr {{.*}}, i32 0, i32 0 + // LLVM: {{.*}} = getelementptr %struct.A, ptr {{.*}}, i32 0, i32 1 + // LLVM: {{.*}} = getelementptr %struct.A, ptr {{.*}}, i32 0, i32 2 + + auto [x2, y2, z2] = a; + (x2, y2, z2); + // CIR: cir.call @_ZN1AC1ERKS_(%2, {{.*}}) : (!cir.ptr, !cir.ptr) -> () + // CIR: {{.*}} = cir.get_member %2[0] {name = "a"} : !cir.ptr -> !cir.ptr + // CIR: {{.*}} = cir.get_member %2[1] {name = "b"} : !cir.ptr -> !cir.ptr + // CIR: {{.*}} = cir.get_member %2[2] {name = "c"} : !cir.ptr -> !cir.ptr + + // for the rest, just expect the codegen does't crash + auto &&[x3, y3, z3] = a; + (x3, y3, z3); + + const auto &[x4, y4, z4] = a; + (x4, y4, z4); + + const auto [x5, y5, z5] = a; + (x5, y5, z5); +} + +// binding to a tuple-like type +void g(C &c) { + // CIR: @_Z1gR1C + // LLVM: @_Z1gR1C + + auto [x8, y8] = c; + (x8, y8); + // CIR: cir.call @_ZN1CC1ERKS_(%[[c:.*]], %7) : (!cir.ptr, !cir.ptr) -> () + // CIR: %[[x8:.*]] = cir.call @_Z3getILj0EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr + // CIR: cir.store %[[x8]], %[[x8p:.*]] : !cir.ptr, !cir.ptr> + // CIR: %[[x9:.*]] = cir.call @_Z3getILj1EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr + // CIR: cir.store %[[x9]], %[[x9p:.*]] : !cir.ptr, !cir.ptr> + // CIR: {{.*}} = cir.load %[[x8p]] : !cir.ptr>, !cir.ptr + // CIR: {{.*}} = cir.load %[[x9p]] : !cir.ptr>, !cir.ptr + // LLVM: call void @_ZN1CC1ERKS_(ptr {{.*}}, ptr {{.*}}) + // LLVM: {{.*}} = call ptr @_Z3getILj0EERKiRK1C(ptr {{.*}}) + // LLVM: {{.*}} = call ptr @_Z3getILj1EERKiRK1C(ptr {{.*}}) + + auto &[x9, y9] = c; + (x9, y9); + // CIR: cir.store %12, %[[cp:.*]] : !cir.ptr, !cir.ptr> + // CIR: %[[c:.*]] = cir.load %[[cp]] : !cir.ptr>, !cir.ptr + // CIR: %[[x8:.*]] = cir.call @_Z3getILj0EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr + // CIR: cir.store %[[x8]], %[[x8p:.*]] : !cir.ptr, !cir.ptr> + // CIR: %[[c:.*]] = cir.load %[[cp]] : !cir.ptr>, !cir.ptr + // CIR: %[[x9:.*]] = cir.call @_Z3getILj1EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr + // CIR: cir.store %[[x9]], %[[x9p:.*]] : !cir.ptr, !cir.ptr> + // CIR: {{.*}} = cir.load %[[x8p]] : !cir.ptr>, !cir.ptr + // CIR: {{.*}} = cir.load %[[x9p]] : !cir.ptr>, !cir.ptr +} + +// TODO: add test case for binding to an array type +// after ArrayInitLoopExpr is supported From bd960cb4f256bdc666f2aeeda170dff73a74a208 Mon Sep 17 00:00:00 2001 From: ShivaChen <32083954+ShivaChen@users.noreply.github.com> Date: Fri, 24 May 2024 08:03:05 +0800 Subject: [PATCH 1589/2301] [CIR][ThroughMLIR] Support lowering ForOp to scf (#605) This commit introduces CIRForOpLowering for lowering to scf. The initial commit only support increment loop with lt or le comparison. --- clang/include/clang/CIR/LowerToMLIR.h | 21 ++ .../CIR/Lowering/ThroughMLIR/CMakeLists.txt | 1 + .../ThroughMLIR/LowerCIRLoopToSCF.cpp | 256 ++++++++++++++++++ .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 4 +- clang/test/CIR/Lowering/ThroughMLIR/for.cir | 222 +++++++++++++++ 5 files changed, 503 insertions(+), 1 deletion(-) create mode 100644 clang/include/clang/CIR/LowerToMLIR.h create mode 100644 clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/for.cir diff --git a/clang/include/clang/CIR/LowerToMLIR.h b/clang/include/clang/CIR/LowerToMLIR.h new file mode 100644 index 000000000000..567deb7abc7d --- /dev/null +++ b/clang/include/clang/CIR/LowerToMLIR.h @@ -0,0 +1,21 @@ +//====- LowerToMLIR.h- Lowering from CIR to MLIR --------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares functions for lowering CIR modules to MLIR. +// +//===----------------------------------------------------------------------===// +#ifndef CLANG_CIR_LOWERTOMLIR_H +#define CLANG_CIR_LOWERTOMLIR_H + +namespace cir { + +void populateCIRLoopToSCFConversionPatterns(mlir::RewritePatternSet &patterns, + mlir::TypeConverter &converter); +} // namespace cir + +#endif // CLANG_CIR_LOWERTOMLIR_H_ diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt index 788cd4e396db..167c8b791ea6 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -6,6 +6,7 @@ set(LLVM_LINK_COMPONENTS get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIRLoweringThroughMLIR + LowerCIRLoopToSCF.cpp LowerCIRToMLIR.cpp LowerMLIRToLLVM.cpp diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp new file mode 100644 index 000000000000..055f97c63b3e --- /dev/null +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp @@ -0,0 +1,256 @@ +//====- LowerCIRLoopToSCF.cpp - Lowering from CIR Loop to SCF -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements lowering of CIR loop operations to SCF. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/Arith/IR/Arith.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/SCF/IR/SCF.h" +#include "mlir/Dialect/SCF/Transforms/Passes.h" +#include "mlir/IR/BuiltinDialect.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/LowerToMLIR.h" +#include "clang/CIR/Passes.h" + +using namespace cir; +using namespace llvm; + +namespace cir { + +class SCFLoop { +public: + SCFLoop(mlir::cir::ForOp op, mlir::ConversionPatternRewriter *rewriter) + : forOp(op), rewriter(rewriter) {} + + int64_t getStep() { return step; } + mlir::Value getLowerBound() { return lowerBound; } + mlir::Value getUpperBound() { return upperBound; } + + int64_t findStepAndIV(mlir::Value &addr); + mlir::cir::CmpOp findCmpOp(); + mlir::Value findIVInitValue(); + void analysis(); + + mlir::Value plusConstant(mlir::Value V, mlir::Location loc, int addend); + void transferToSCFForOp(); + +private: + mlir::cir::ForOp forOp; + mlir::cir::CmpOp cmpOp; + mlir::Value IVAddr, lowerBound = nullptr, upperBound = nullptr; + mlir::ConversionPatternRewriter *rewriter; + int64_t step = 0; +}; + +static int64_t getConstant(mlir::cir::ConstantOp op) { + auto attr = op->getAttrs().front().getValue(); + const auto IntAttr = attr.dyn_cast(); + return IntAttr.getValue().getSExtValue(); +} + +int64_t SCFLoop::findStepAndIV(mlir::Value &addr) { + auto *stepBlock = + (forOp.maybeGetStep() ? &forOp.maybeGetStep()->front() : nullptr); + assert(stepBlock && "Can not find step block"); + + int64_t step = 0; + mlir::Value IV = nullptr; + // Try to match "IV load addr; ++IV; store IV, addr" to find step. + for (mlir::Operation &op : *stepBlock) + if (auto loadOp = dyn_cast(op)) { + addr = loadOp.getAddr(); + IV = loadOp.getResult(); + } else if (auto cop = dyn_cast(op)) { + if (step) + llvm_unreachable( + "Not support multiple constant in step calculation yet"); + step = getConstant(cop); + } else if (auto bop = dyn_cast(op)) { + if (bop.getLhs() != IV) + llvm_unreachable("Find BinOp not operate on IV"); + if (bop.getKind() != mlir::cir::BinOpKind::Add) + llvm_unreachable( + "Not support BinOp other than Add in step calculation yet"); + } else if (auto uop = dyn_cast(op)) { + if (uop.getInput() != IV) + llvm_unreachable("Find UnaryOp not operate on IV"); + if (uop.getKind() == mlir::cir::UnaryOpKind::Inc) + step = 1; + else if (uop.getKind() == mlir::cir::UnaryOpKind::Dec) + llvm_unreachable("Not support decrement step yet"); + } else if (auto storeOp = dyn_cast(op)) { + assert(storeOp.getAddr() == addr && "Can't find IV when lowering ForOp"); + } + assert(step && "Can't find step when lowering ForOp"); + + return step; +} + +static bool isIVLoad(mlir::Operation *op, mlir::Value IVAddr) { + if (!op) + return false; + if (isa(op)) { + if (!op->getOperand(0)) + return false; + if (op->getOperand(0) == IVAddr) + return true; + } + return false; +} + +mlir::cir::CmpOp SCFLoop::findCmpOp() { + cmpOp = nullptr; + for (auto *user : IVAddr.getUsers()) { + if (user->getParentRegion() != &forOp.getCond()) + continue; + if (auto loadOp = dyn_cast(*user)) { + if (!loadOp->hasOneUse()) + continue; + if (auto op = dyn_cast(*loadOp->user_begin())) { + cmpOp = op; + break; + } + } + } + if (!cmpOp) + llvm_unreachable("Can't find loop CmpOp"); + + auto type = cmpOp.getLhs().getType(); + if (!type.isa()) + llvm_unreachable("Non-integer type IV is not supported"); + + auto lhsDefOp = cmpOp.getLhs().getDefiningOp(); + if (!lhsDefOp) + llvm_unreachable("Can't find IV load"); + if (!isIVLoad(lhsDefOp, IVAddr)) + llvm_unreachable("cmpOp LHS is not IV"); + + if (cmpOp.getKind() != mlir::cir::CmpOpKind::le && + cmpOp.getKind() != mlir::cir::CmpOpKind::lt) + llvm_unreachable("Not support lowering other than le or lt comparison"); + + return cmpOp; +} + +mlir::Value SCFLoop::plusConstant(mlir::Value V, mlir::Location loc, + int addend) { + auto type = V.getType(); + auto c1 = rewriter->create( + loc, type, mlir::IntegerAttr::get(type, addend)); + return rewriter->create(loc, V, c1); +} + +// Return IV initial value by searching the store before the loop. +// The operations before the loop have been transferred to MLIR. +// So we need to go through getRemappedValue to find the value. +mlir::Value SCFLoop::findIVInitValue() { + auto remapAddr = rewriter->getRemappedValue(IVAddr); + if (!remapAddr) + return nullptr; + if (!remapAddr.hasOneUse()) + return nullptr; + auto memrefStore = dyn_cast(*remapAddr.user_begin()); + if (!memrefStore) + return nullptr; + return memrefStore->getOperand(0); +} + +void SCFLoop::analysis() { + step = findStepAndIV(IVAddr); + cmpOp = findCmpOp(); + auto IVInit = findIVInitValue(); + // The loop end value should be hoisted out of loop by -cir-mlir-scf-prepare. + // So we could get the value by getRemappedValue. + auto IVEndBound = rewriter->getRemappedValue(cmpOp.getRhs()); + // If the loop end bound is not loop invariant and can't be hoisted. + // The following assertion will be triggerred. + assert(IVEndBound && "can't find IV end boundary"); + + if (step > 0) { + lowerBound = IVInit; + if (cmpOp.getKind() == mlir::cir::CmpOpKind::lt) + upperBound = IVEndBound; + else if (cmpOp.getKind() == mlir::cir::CmpOpKind::le) + upperBound = plusConstant(IVEndBound, cmpOp.getLoc(), 1); + } + assert(lowerBound && "can't find loop lower bound"); + assert(upperBound && "can't find loop upper bound"); +} + +// Return true if op operation is in the loop body. +static bool isInLoopBody(mlir::Operation *op) { + mlir::Operation *parentOp = op->getParentOp(); + if (!parentOp) + return false; + if (isa(parentOp)) + return true; + auto forOp = dyn_cast(parentOp); + if (forOp && (&forOp.getBody() == op->getParentRegion())) + return true; + return false; +} + +void SCFLoop::transferToSCFForOp() { + auto ub = getUpperBound(); + auto lb = getLowerBound(); + auto loc = forOp.getLoc(); + auto type = lb.getType(); + auto step = rewriter->create( + loc, type, mlir::IntegerAttr::get(type, getStep())); + auto scfForOp = rewriter->create(loc, lb, ub, step); + SmallVector bbArg; + rewriter->eraseOp(&scfForOp.getBody()->back()); + rewriter->inlineBlockBefore(&forOp.getBody().front(), scfForOp.getBody(), + scfForOp.getBody()->end(), bbArg); + scfForOp->walk([&](mlir::Operation *op) { + if (isa(op) || isa(op) || + isa(op)) + llvm_unreachable( + "Not support lowering loop with break, continue or if yet"); + // Replace the IV usage to scf loop induction variable. + if (isIVLoad(op, IVAddr)) { + auto newIV = scfForOp.getInductionVar(); + op->getResult(0).replaceAllUsesWith(newIV); + // Only erase the IV load in the loop body because all the operations + // in loop step and condition regions will be erased. + if (isInLoopBody(op)) + rewriter->eraseOp(op); + } + return mlir::WalkResult::advance(); + }); +} + +class CIRForOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ForOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + SCFLoop loop(op, &rewriter); + loop.analysis(); + loop.transferToSCFForOp(); + rewriter.eraseOp(op); + return mlir::success(); + } +}; + +void populateCIRLoopToSCFConversionPatterns(mlir::RewritePatternSet &patterns, + mlir::TypeConverter &converter) { + patterns.add(converter, patterns.getContext()); +} + +} // namespace cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 24f55b4e79fd..52ec83c3edc2 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -40,6 +40,7 @@ #include "mlir/Transforms/DialectConversion.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/LowerToMLIR.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/TypeSwitch.h" @@ -802,7 +803,7 @@ class CIRYieldOpLowering mlir::ConversionPatternRewriter &rewriter) const override { auto *parentOp = op->getParentOp(); return llvm::TypeSwitch(parentOp) - .Case([&](auto) { + .Case([&](auto) { rewriter.replaceOpWithNewOp( op, adaptor.getOperands()); return mlir::success(); @@ -1199,6 +1200,7 @@ void ConvertCIRToMLIRPass::runOnOperation() { mlir::RewritePatternSet patterns(&getContext()); + populateCIRLoopToSCFConversionPatterns(patterns, converter); populateCIRToMLIRConversionPatterns(patterns, converter); mlir::ConversionTarget target(getContext()); diff --git a/clang/test/CIR/Lowering/ThroughMLIR/for.cir b/clang/test/CIR/Lowering/ThroughMLIR/for.cir new file mode 100644 index 000000000000..9ec345577255 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/for.cir @@ -0,0 +1,222 @@ +// RUN: cir-opt %s -cir-to-mlir --canonicalize | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-mlir --canonicalize -cir-mlir-to-llvm | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +module { + cir.global external @a = #cir.zero : !cir.array + + // MLIR-LABEL: func.func @constantLoopBound() + // LLVM-LABEL: define void @constantLoopBound() + cir.func @constantLoopBound() { + // MLIR: %[[C3:.*]] = arith.constant 3 : i32 + // MLIR: %[[C1:.*]] = arith.constant 1 : i32 + // MLIR: %[[C100:.*]] = arith.constant 100 : i32 + // MLIR: %[[C0:.*]] = arith.constant 0 : i32 + // MLIR: scf.for %[[I:.*]] = %[[C0]] to %[[C100]] step %[[C1]] : i32 { + // MLIR: %[[BASE:.*]] = memref.get_global @a : memref<100xi32> + // MLIR: %[[INDEX:.*]] = arith.index_cast %[[I]] : i32 to index + // MLIR: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<100xi32> + // MLIR: } + + // LLVM: %[[I:.*]] = phi i32 [ %[[I_INC:.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[PREHEADER:.*]] ] + // LLVM: %[[COND:.*]] = icmp slt i32 %[[I]], 100 + // LLVM: br i1 %[[COND]], label %[[LOOP_LATCH]], label %[[LOOP_EXIT:.*]] + // LLVM: %[[I64:.*]] = sext i32 %[[I]] to i64 + // LLVM: %[[ADDR:.*]] = getelementptr i32, ptr @a, i64 %[[I64]] + // LLVM: store i32 3, ptr %[[ADDR]], align 4 + // LLVM: %[[I_INC]] = add i32 %[[I]], 1 + // LLVM: br label %[[LOOP_HEADER:.*]] + + cir.scope { + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + %1 = cir.const #cir.int<0> : !s32i + cir.store %1, %0 : !s32i, !cir.ptr + %2 = cir.const #cir.int<100> : !s32i + cir.for : cond { + %3 = cir.load %0 : !cir.ptr, !s32i + %4 = cir.cmp(lt, %3, %2) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) + } body { + %3 = cir.const #cir.int<3> : !s32i + %4 = cir.get_global @a : !cir.ptr> + %5 = cir.load %0 : !cir.ptr, !s32i + %6 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr + %7 = cir.ptr_stride(%6 : !cir.ptr, %5 : !s32i), !cir.ptr + cir.store %3, %7 : !s32i, !cir.ptr + cir.yield + } step { + %3 = cir.load %0 : !cir.ptr, !s32i + %4 = cir.unary(inc, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, !cir.ptr + cir.yield + } + } + cir.return + } + + // MLIR-LABEL: func.func @constantLoopBound_LE() + // LLVM-LABEL: define void @constantLoopBound_LE() + cir.func @constantLoopBound_LE() { + // MLIR: %[[C3:.*]] = arith.constant 3 : i32 + // MLIR: %[[C1:.*]] = arith.constant 1 : i32 + // MLIR: %[[C0:.*]] = arith.constant 0 : i32 + // MLIR: %[[C101:.*]] = arith.constant 101 : i32 + // MLIR: scf.for %[[I:.*]] = %[[C0]] to %[[C101]] step %[[C1]] : i32 { + // MLIR: %[[BASE:.*]] = memref.get_global @a : memref<100xi32> + // MLIR: %[[INDEX:.*]] = arith.index_cast %[[I]] : i32 to index + // MLIR: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<100xi32> + // MLIR: } + + // LLVM: %[[I:.*]] = phi i32 [ %[[I_INC:.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[PREHEADER:.*]] ] + // LLVM: %[[COND:.*]] = icmp slt i32 %[[I]], 101 + // LLVM: br i1 %[[COND]], label %[[LOOP_LATCH]], label %[[LOOP_EXIT:.*]] + // LLVM: %[[I64:.*]] = sext i32 %[[I]] to i64 + // LLVM: %[[ADDR:.*]] = getelementptr i32, ptr @a, i64 %[[I64]] + // LLVM: store i32 3, ptr %[[ADDR]], align 4 + // LLVM: %[[I_INC]] = add i32 %[[I]], 1 + // LLVM: br label %[[LOOP_HEADER:.*]] + + cir.scope { + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + %1 = cir.const #cir.int<0> : !s32i + cir.store %1, %0 : !s32i, !cir.ptr + %2 = cir.const #cir.int<100> : !s32i + cir.for : cond { + %3 = cir.load %0 : !cir.ptr, !s32i + %4 = cir.cmp(le, %3, %2) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) + } body { + %3 = cir.const #cir.int<3> : !s32i + %4 = cir.get_global @a : !cir.ptr> + %5 = cir.load %0 : !cir.ptr, !s32i + %6 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr + %7 = cir.ptr_stride(%6 : !cir.ptr, %5 : !s32i), !cir.ptr + cir.store %3, %7 : !s32i, !cir.ptr + cir.yield + } step { + %3 = cir.load %0 : !cir.ptr, !s32i + %4 = cir.unary(inc, %3) : !s32i, !s32i + cir.store %4, %0 : !s32i, !cir.ptr + cir.yield + } + } + cir.return + } + + // MLIR-LABEL: func.func @variableLoopBound(%arg0: i32, %arg1: i32) + // LLVM-LABEL: define void @variableLoopBound(i32 %0, i32 %1) + cir.func @variableLoopBound(%arg0: !s32i, %arg1: !s32i) { + // MLIR: %[[C3:.*]] = arith.constant 3 : i32 + // MLIR: %[[C1:.*]] = arith.constant 1 : i32 + // MLIR: memref.store %arg0, %alloca[] : memref + // MLIR: memref.store %arg1, %alloca_0[] : memref + // MLIR: %[[LOWER:.*]] = memref.load %alloca[] : memref + // MLIR: %[[UPPER:.*]] = memref.load %alloca_0[] : memref + // MLIR: scf.for %[[I:.*]] = %[[LOWER]] to %[[UPPER]] step %[[C1]] : i32 { + // MLIR: %[[BASE:.*]] = memref.get_global @a : memref<100xi32> + // MLIR: %[[INDEX:.*]] = arith.index_cast %[[I]] : i32 to index + // MLIR: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<100xi32> + // MLIR: } + + // LLVM: %[[I:.*]] = phi i32 [ %[[I_INC:.*]], %[[LOOP_LATCH:.*]] ], [ %[[LOWER:.*]], %[[PREHEADER:.*]] ] + // LLVM: %[[COND:.*]] = icmp slt i32 %[[I]], %[[UPPER:.*]] + // LLVM: br i1 %[[COND]], label %[[LOOP_LATCH]], label %[[LOOP_EXIT:.*]] + // LLVM: %[[I64:.*]] = sext i32 %[[I]] to i64 + // LLVM: %[[ADDR:.*]] = getelementptr i32, ptr @a, i64 %[[I64]] + // LLVM: store i32 3, ptr %[[ADDR]], align 4 + // LLVM: %[[I_INC]] = add i32 %[[I]], 1 + // LLVM: br label %[[LOOP_HEADER:.*]] + + %0 = cir.alloca !s32i, !cir.ptr, ["l", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["u", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + cir.store %arg1, %1 : !s32i, !cir.ptr + cir.scope { + %2 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + %3 = cir.load %0 : !cir.ptr, !s32i + cir.store %3, %2 : !s32i, !cir.ptr + %4 = cir.load %1 : !cir.ptr, !s32i + cir.for : cond { + %5 = cir.load %2 : !cir.ptr, !s32i + %6 = cir.cmp(lt, %5, %4) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.condition(%7) + } body { + %5 = cir.const #cir.int<3> : !s32i + %6 = cir.get_global @a : !cir.ptr> + %7 = cir.load %2 : !cir.ptr, !s32i + %8 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr + %9 = cir.ptr_stride(%8 : !cir.ptr, %7 : !s32i), !cir.ptr + cir.store %5, %9 : !s32i, !cir.ptr + cir.yield + } step { + %5 = cir.load %2 : !cir.ptr, !s32i + %6 = cir.unary(inc, %5) : !s32i, !s32i + cir.store %6, %2 : !s32i, !cir.ptr + cir.yield + } + } + cir.return + } + + // MLIR-LABEL: func.func @variableLoopBound_LE(%arg0: i32, %arg1: i32) + // LLVM-LABEL: define void @variableLoopBound_LE(i32 %0, i32 %1) + cir.func @variableLoopBound_LE(%arg0: !s32i, %arg1: !s32i) { + // MLIR: %[[C3:.*]] = arith.constant 3 : i32 + // MLIR: %[[C4:.*]] = arith.constant 4 : i32 + // MLIR: %[[C1:.*]] = arith.constant 1 : i32 + // MLIR: memref.store %arg0, %alloca[] : memref + // MLIR: memref.store %arg1, %alloca_0[] : memref + // MLIR: %[[LOWER:.*]] = memref.load %alloca[] : memref + // MLIR: %[[UPPER_DEC_1:.*]] = memref.load %alloca_0[] : memref + // MLIR: %[[UPPER:.*]] = arith.addi %[[UPPER_DEC_1]], %[[C1]] : i32 + // MLIR: scf.for %[[I:.*]] = %[[LOWER]] to %[[UPPER]] step %[[C4]] : i32 { + // MLIR: %[[BASE:.*]] = memref.get_global @a : memref<100xi32> + // MLIR: %[[INDEX:.*]] = arith.index_cast %[[I]] : i32 to index + // MLIR: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<100xi32> + // MLIR: } + + // LLVM: %[[I:.*]] = phi i32 [ %[[I_INC:.*]], %[[LOOP_LATCH:.*]] ], [ %[[LOWER:.*]], %[[PREHEADER:.*]] ] + // LLVM: %[[COND:.*]] = icmp slt i32 %[[I]], %[[UPPER:.*]] + // LLVM: br i1 %[[COND]], label %[[LOOP_LATCH]], label %[[LOOP_EXIT:.*]] + // LLVM: %[[I64:.*]] = sext i32 %[[I]] to i64 + // LLVM: %[[ADDR:.*]] = getelementptr i32, ptr @a, i64 %[[I64]] + // LLVM: store i32 3, ptr %[[ADDR]], align 4 + // LLVM: %[[I_INC]] = add i32 %[[I]], 4 + // LLVM: br label %[[LOOP_HEADER:.*]] + + %0 = cir.alloca !s32i, !cir.ptr, ["l", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["u", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + cir.store %arg1, %1 : !s32i, !cir.ptr + cir.scope { + %2 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + %3 = cir.load %0 : !cir.ptr, !s32i + cir.store %3, %2 : !s32i, !cir.ptr + %4 = cir.load %1 : !cir.ptr, !s32i + cir.for : cond { + %5 = cir.load %2 : !cir.ptr, !s32i + %6 = cir.cmp(le, %5, %4) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.condition(%7) + } body { + %5 = cir.const #cir.int<3> : !s32i + %6 = cir.get_global @a : !cir.ptr> + %7 = cir.load %2 : !cir.ptr, !s32i + %8 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr + %9 = cir.ptr_stride(%8 : !cir.ptr, %7 : !s32i), !cir.ptr + cir.store %5, %9 : !s32i, !cir.ptr + cir.yield + } step { + %5 = cir.const #cir.int<4> : !s32i + %6 = cir.load %2 : !cir.ptr, !s32i + %7 = cir.binop(add, %6, %5) : !s32i + cir.store %7, %2 : !s32i, !cir.ptr + cir.yield + } + } + cir.return + } +} From 629cf2fa09ce7c063b75f11025b1b572b2334dc5 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 24 May 2024 03:16:53 +0300 Subject: [PATCH 1590/2301] [CIR][CodeGen][Bugfix] fixes volatile structs copy (#623) This PR fixes a fail on `llvm_unreachable` for the next case: ``` volatile A vol_a; A foo7() { return vol_a; } ``` Basically, it's just a copy-pasta from the original `code-gen`. Also, I added the `isVolatile` attribute for the `cit.copy` operation --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 ++++-- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 +++-- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 21 +++++++++++++------ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +- clang/test/CIR/CodeGen/agg-copy.c | 11 +++++++++- 5 files changed, 33 insertions(+), 12 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index fe548ea7782c..487b12d93b3e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3100,7 +3100,8 @@ def CatchParamOp : CIR_Op<"catch_param"> { def CopyOp : CIR_Op<"copy", [SameTypeOperands]> { let arguments = (ins Arg:$dst, - Arg:$src); + Arg:$src, + UnitAttr:$is_volatile); let summary = "Copies contents from a CIR pointer to another"; let description = [{ Given two CIR pointers, `src` and `dst`, `cir.copy` will copy the memory @@ -3118,7 +3119,8 @@ def CopyOp : CIR_Op<"copy", [SameTypeOperands]> { ``` }]; - let assemblyFormat = "$src `to` $dst attr-dict `:` qualified(type($dst))"; + let assemblyFormat = [{$src `to` $dst (`volatile` $is_volatile^)? + attr-dict `:` qualified(type($dst)) }]; let hasVerifier = 1; let extraClassDeclaration = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 9f2c34dd84be..5461438b06d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -584,8 +584,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { // /// Create a copy with inferred length. - mlir::cir::CopyOp createCopy(mlir::Value dst, mlir::Value src) { - return create(dst.getLoc(), dst, src); + mlir::cir::CopyOp createCopy(mlir::Value dst, mlir::Value src, + bool isVolatile = false) { + return create(dst.getLoc(), dst, src, isVolatile); } /// Create a break operation. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 87f770999b59..8eaaf02be27b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -389,10 +389,8 @@ void AggExprEmitter::buildCopy(QualType type, const AggValueSlot &dest, // the two sides. LValue DestLV = CGF.makeAddrLValue(dest.getAddress(), type); LValue SrcLV = CGF.makeAddrLValue(src.getAddress(), type); - if (dest.isVolatile() || src.isVolatile() || - UnimplementedFeature::volatileTypes()) - llvm_unreachable("volatile is NYI"); - CGF.buildAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), false); + CGF.buildAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), + dest.isVolatile() || src.isVolatile()); } // FIXME(cir): This function could be shared with traditional LLVM codegen @@ -912,7 +910,18 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { // into existence. if (E->getSubExpr()->getType().isVolatileQualified() || UnimplementedFeature::volatileTypes()) { - llvm_unreachable("volatile is NYI"); + bool Destruct = + !Dest.isExternallyDestructed() && + E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; + if (Destruct) + Dest.setExternallyDestructed(); + Visit(E->getSubExpr()); + + if (Destruct) + CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), + E->getType()); + + return; } [[fallthrough]]; @@ -1598,7 +1607,7 @@ void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, } } - builder.createCopy(DestPtr.getPointer(), SrcPtr.getPointer()); + builder.createCopy(DestPtr.getPointer(), SrcPtr.getPointer(), isVolatile); // Determine the metadata to describe the position of any padding in this // memcpy, as well as the TBAA tags for the members of the struct, in case diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index c41c7165fd5a..c59e916e8f7e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -437,7 +437,7 @@ class CIRCopyOpLowering : public mlir::OpConversionPattern { const mlir::Value length = rewriter.create( op.getLoc(), rewriter.getI32Type(), op.getLength()); rewriter.replaceOpWithNewOp( - op, adaptor.getDst(), adaptor.getSrc(), length, /*isVolatile=*/false); + op, adaptor.getDst(), adaptor.getSrc(), length, op.getIsVolatile()); return mlir::success(); } }; diff --git a/clang/test/CIR/CodeGen/agg-copy.c b/clang/test/CIR/CodeGen/agg-copy.c index 52d292d30b1a..f33d29fd1d11 100644 --- a/clang/test/CIR/CodeGen/agg-copy.c +++ b/clang/test/CIR/CodeGen/agg-copy.c @@ -82,4 +82,13 @@ void foo6(A* a1) { // CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> // CHECK: [[TMP2:%.*]] = cir.load deref [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr -} \ No newline at end of file +} + +volatile A vol_a; +A foo7() { + return vol_a; +} +// CHECK: cir.func {{.*@foo7}} +// CHECK: %0 = cir.alloca +// CHECK: %1 = cir.get_global @vol_a +// CHECK: cir.copy %1 to %0 volatile \ No newline at end of file From c4715a320149a4ffd96291206fee1e1233830f99 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 24 May 2024 03:17:34 +0300 Subject: [PATCH 1591/2301] [CIR][CodeGen][Bugfix] store fptr of a function with no args (#622) This PR fixes the next bug showed in the example below: ``` typedef int (*fn_t)(); int get42() { return 42; } void foo() { fn_t f = get42; } ``` The function type `fn_t` is generated as the variadic one due to no arg types listed, this is the `codegen` feature. And once we store the function pointer to a real function - a pointer to `get42` here has the expected `i32 ()*` type - we get a verification error, so `bitcast` is needed. The original `codegen` doesn't have it because of opaque pointers used, and had the `bitcast` earlier, long time ago: ``` %f = alloca i32 (...)* store i32 (...)* bitcast (i32 ()* @get42 to i32 (...)*), i32 (...)** %f ``` --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 14 ++++++++++++-- clang/test/CIR/CodeGen/store.c | 14 ++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 891c8d698878..98add6fde3e7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -790,10 +790,20 @@ static LValue buildFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, auto loc = CGF.getLoc(E->getSourceRange()); CharUnits align = CGF.getContext().getDeclAlign(FD); - auto fnTy = funcOp.getFunctionType(); + mlir::Type fnTy = funcOp.getFunctionType(); auto ptrTy = mlir::cir::PointerType::get(CGF.getBuilder().getContext(), fnTy); - auto addr = CGF.getBuilder().create( + mlir::Value addr = CGF.getBuilder().create( loc, ptrTy, funcOp.getSymName()); + + if (funcOp.getFunctionType() != + CGF.CGM.getTypes().ConvertType(FD->getType())) { + fnTy = CGF.CGM.getTypes().ConvertType(FD->getType()); + ptrTy = mlir::cir::PointerType::get(CGF.getBuilder().getContext(), fnTy); + + addr = CGF.getBuilder().create( + addr.getLoc(), ptrTy, mlir::cir::CastKind::bitcast, addr); + } + return CGF.makeAddrLValue(Address(addr, fnTy, align), E->getType(), AlignmentSource::Decl); } diff --git a/clang/test/CIR/CodeGen/store.c b/clang/test/CIR/CodeGen/store.c index 1bc215f75c3b..9a94e6578129 100644 --- a/clang/test/CIR/CodeGen/store.c +++ b/clang/test/CIR/CodeGen/store.c @@ -14,3 +14,17 @@ void foo(void) { // CHECK-NEXT: cir.store %2, %0 : !s32i, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } + +typedef int (*fn_t)(); +int get42() { return 42; } + +void storeNoArgsFn() { + fn_t f = get42; +} + +// CHECK: cir.func {{.*@storeNoArgsFn}} +// CHECK: %0 = cir.alloca +// CHECK: %1 = cir.get_global @get42 : !cir.ptr> +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr>), !cir.ptr> +// CHECK: cir.store %2, %0 : !cir.ptr>, !cir.ptr>> + From 187ad05f73dfaa18cbc2129273d206b11ad95e47 Mon Sep 17 00:00:00 2001 From: ShivaChen <32083954+ShivaChen@users.noreply.github.com> Date: Fri, 24 May 2024 10:34:19 +0800 Subject: [PATCH 1592/2301] [CIR] Add -cir-mlir-scf-prepare to simplify lowering to SCF (#604) This commit introduces SCFPreparePass to 1) Canonicalize IV to LHS of loop comparison For example, transfer `cir.cmp(gt, %bound, %IV)` to `cir.cmp(lt, %IV, %bound)`. So we could use RHS as boundary and use `lt` to determine it's an upper bound. 2) Hoist loop invariant operations in condition block out of loop. The condition block may be generated as following which contains the operations produced upper bound. SCF for loop required loop boundary as input operands. So we might need to hoist the boundary operations out of loop. ``` cir.for : cond { %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.const #cir.int<100> : !s32i <- upper bound %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.boo cir.condition(%7 } body { ``` --- clang/include/clang/CIR/CIRToCIRPasses.h | 15 +- clang/include/clang/CIR/Dialect/Passes.h | 1 + clang/include/clang/CIR/Dialect/Passes.td | 11 + clang/lib/CIR/CodeGen/CIRPasses.cpp | 18 +- .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + .../lib/CIR/Dialect/Transforms/SCFPrepare.cpp | 226 ++++++++++++++++++ clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 3 +- clang/test/CIR/Transforms/scf-prepare.cir | 140 +++++++++++ clang/test/CIR/mlirprint.c | 6 + clang/tools/cir-opt/cir-opt.cpp | 3 + 10 files changed, 407 insertions(+), 17 deletions(-) create mode 100644 clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp create mode 100644 clang/test/CIR/Transforms/scf-prepare.cir diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index ed089cd966f4..dfea5e6d004b 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -28,14 +28,13 @@ class ModuleOp; namespace cir { // Run set of cleanup/prepare/etc passes CIR <-> CIR. -mlir::LogicalResult -runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - clang::ASTContext &astCtx, bool enableVerifier, - bool enableLifetime, llvm::StringRef lifetimeOpts, - bool enableIdiomRecognizer, - llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, - llvm::StringRef libOptOpts, - std::string &passOptParsingFailure, bool flattenCIR); +mlir::LogicalResult runCIRToCIRPasses( + mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, + llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, + llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, + llvm::StringRef libOptOpts, std::string &passOptParsingFailure, + bool flattenCIR, bool emitMLIR); } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index 30ec06114476..b4bff1d5082c 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -28,6 +28,7 @@ std::unique_ptr createLifetimeCheckPass(ArrayRef remark, clang::ASTContext *astCtx); std::unique_ptr createMergeCleanupsPass(); std::unique_ptr createDropASTPass(); +std::unique_ptr createSCFPreparePass(); std::unique_ptr createLoweringPreparePass(); std::unique_ptr createLoweringPreparePass(clang::ASTContext *astCtx); std::unique_ptr createIdiomRecognizerPass(); diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index 1253bccf77b8..8038a627ad83 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -75,6 +75,17 @@ def LoweringPrepare : Pass<"cir-lowering-prepare"> { let dependentDialects = ["cir::CIRDialect"]; } +def SCFPrepare : Pass<"cir-mlir-scf-prepare"> { + let summary = "Preparation work before lowering to SCF dialect"; + let description = [{ + This pass does preparation work for SCF lowering. For example, it may + hoist the loop invariant or canonicalize the loop comparison. Currently, + the pass only be enabled for through MLIR pipeline. + }]; + let constructor = "mlir::createSCFPreparePass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + def FlattenCFG : Pass<"cir-flatten-cfg"> { let summary = "Produces flatten cfg"; let description = [{ diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index edffd66a9357..a8716d11759e 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -19,14 +19,13 @@ #include "mlir/Support/LogicalResult.h" namespace cir { -mlir::LogicalResult -runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - clang::ASTContext &astCtx, bool enableVerifier, - bool enableLifetime, llvm::StringRef lifetimeOpts, - bool enableIdiomRecognizer, - llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, - llvm::StringRef libOptOpts, - std::string &passOptParsingFailure, bool flattenCIR) { +mlir::LogicalResult runCIRToCIRPasses( + mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, + clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, + llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, + llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, + llvm::StringRef libOptOpts, std::string &passOptParsingFailure, + bool flattenCIR, bool emitMLIR) { mlir::PassManager pm(mlirCtx); pm.addPass(mlir::createMergeCleanupsPass()); @@ -68,6 +67,9 @@ runCIRToCIRPasses(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, if (flattenCIR) mlir::populateCIRPreLoweringPasses(pm); + if (emitMLIR) + pm.addPass(mlir::createSCFPreparePass()); + // FIXME: once CIRCodenAction fixes emission other than CIR we // need to run this right before dialect emission. pm.addPass(mlir::createDropASTPass()); diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 647d15aea8dc..9a0806bde8d4 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -9,6 +9,7 @@ add_clang_library(MLIRCIRTransforms StdHelpers.cpp FlattenCFG.cpp GotoSolver.cpp + SCFPrepare.cpp DEPENDS MLIRCIRPassIncGen diff --git a/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp new file mode 100644 index 000000000000..ac7e9f2c5b9e --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp @@ -0,0 +1,226 @@ +//===- SCFPrepare.cpp - pareparation work for SCF lowering ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" + +using namespace mlir; +using namespace cir; + +//===----------------------------------------------------------------------===// +// Rewrite patterns +//===----------------------------------------------------------------------===// + +namespace { + +static Value findIVAddr(Block *step) { + Value IVAddr = nullptr; + for (Operation &op : *step) { + if (auto loadOp = dyn_cast(op)) + IVAddr = loadOp.getAddr(); + else if (auto storeOp = dyn_cast(op)) + if (IVAddr != storeOp.getAddr()) + return nullptr; + } + return IVAddr; +} + +static CmpOp findLoopCmpAndIV(Block *cond, Value IVAddr, Value &IV) { + Operation *IVLoadOp = nullptr; + for (Operation &op : *cond) { + if (auto loadOp = dyn_cast(op)) + if (loadOp.getAddr() == IVAddr) { + IVLoadOp = &op; + break; + } + } + if (!IVLoadOp) + return nullptr; + if (!IVLoadOp->hasOneUse()) + return nullptr; + IV = IVLoadOp->getResult(0); + return dyn_cast(*IVLoadOp->user_begin()); +} + +// Canonicalize IV to LHS of loop comparison +// For example, transfer cir.cmp(gt, %bound, %IV) to cir.cmp(lt, %IV, %bound). +// So we could use RHS as boundary and use lt to determine it's an upper bound. +struct canonicalizeIVtoCmpLHS : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + CmpOpKind swapCmpKind(CmpOpKind kind) const { + switch (kind) { + case CmpOpKind::gt: + return CmpOpKind::lt; + case CmpOpKind::ge: + return CmpOpKind::le; + case CmpOpKind::lt: + return CmpOpKind::gt; + case CmpOpKind::le: + return CmpOpKind::ge; + default: + break; + } + return kind; + } + + void replaceWithNewCmpOp(CmpOp oldCmp, CmpOpKind newKind, Value lhs, + Value rhs, PatternRewriter &rewriter) const { + rewriter.setInsertionPointAfter(oldCmp.getOperation()); + auto newCmp = rewriter.create( + oldCmp.getLoc(), oldCmp.getType(), newKind, lhs, rhs); + oldCmp->replaceAllUsesWith(newCmp); + oldCmp->erase(); + } + + LogicalResult matchAndRewrite(ForOp op, + PatternRewriter &rewriter) const final { + auto *cond = &op.getCond().front(); + auto *step = (op.maybeGetStep() ? &op.maybeGetStep()->front() : nullptr); + if (!step) + return failure(); + Value IVAddr = findIVAddr(step); + if (!IVAddr) + return failure(); + Value IV = nullptr; + auto loopCmp = findLoopCmpAndIV(cond, IVAddr, IV); + if (!loopCmp || !IV) + return failure(); + + CmpOpKind cmpKind = loopCmp.getKind(); + Value cmpRhs = loopCmp.getRhs(); + // Canonicalize IV to LHS of loop Cmp. + if (loopCmp.getLhs() != IV) { + cmpKind = swapCmpKind(cmpKind); + cmpRhs = loopCmp.getLhs(); + replaceWithNewCmpOp(loopCmp, cmpKind, IV, cmpRhs, rewriter); + return success(); + } + + return failure(); + } +}; + +// Hoist loop invariant operations in condition block out of loop +// The condition block may be generated as following which contains the +// operations produced upper bound. +// SCF for loop required loop boundary as input operands. So we need to +// hoist the boundary operations out of loop. +// +// cir.for : cond { +// %4 = cir.load %2 : !cir.ptr, !s32i +// %5 = cir.const #cir.int<100> : !s32i <- upper bound +// %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i +// %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool +// cir.condition(%7 +// } body { +struct hoistLoopInvariantInCondBlock : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + bool isLoopInvariantLoad(Operation *op, ForOp forOp) const { + auto load = dyn_cast(op); + if (!load) + return false; + + auto loadAddr = load.getAddr(); + auto result = + forOp->walk([&](mlir::Operation *op) { + if (auto store = dyn_cast(op)) { + if (store.getAddr() == loadAddr) + return mlir::WalkResult::interrupt(); + } + return mlir::WalkResult::advance(); + }); + + if (result.wasInterrupted()) + return false; + + return true; + } + + LogicalResult matchAndRewrite(ForOp forOp, + PatternRewriter &rewriter) const final { + auto *cond = &forOp.getCond().front(); + auto *step = + (forOp.maybeGetStep() ? &forOp.maybeGetStep()->front() : nullptr); + if (!step) + return failure(); + Value IVAddr = findIVAddr(step); + if (!IVAddr) + return failure(); + Value IV = nullptr; + auto loopCmp = findLoopCmpAndIV(cond, IVAddr, IV); + if (!loopCmp || !IV) + return failure(); + + Value cmpRhs = loopCmp.getRhs(); + auto defOp = cmpRhs.getDefiningOp(); + SmallVector ops; + // Go through the cast if exist. + if (defOp && isa(defOp)) { + ops.push_back(defOp); + defOp = defOp->getOperand(0).getDefiningOp(); + } + if (defOp && + (isa(defOp) || isLoopInvariantLoad(defOp, forOp))) { + ops.push_back(defOp); + for (auto op : reverse(ops)) + op->moveBefore(forOp); + return success(); + } + + return failure(); + } +}; + +//===----------------------------------------------------------------------===// +// SCFPreparePass +//===----------------------------------------------------------------------===// + +struct SCFPreparePass : public SCFPrepareBase { + using SCFPrepareBase::SCFPrepareBase; + void runOnOperation() override; +}; + +void populateSCFPreparePatterns(RewritePatternSet &patterns) { + // clang-format off + patterns.add< + canonicalizeIVtoCmpLHS, + hoistLoopInvariantInCondBlock + >(patterns.getContext()); + // clang-format on +} + +void SCFPreparePass::runOnOperation() { + // Collect rewrite patterns. + RewritePatternSet patterns(&getContext()); + populateSCFPreparePatterns(patterns); + + // Collect operations to apply patterns. + SmallVector ops; + getOperation()->walk([&](Operation *op) { + // CastOp here is to perform a manual `fold` in + // applyOpPatternsAndFold + if (isa(op)) + ops.push_back(op); + }); + + // Apply patterns. + if (applyOpPatternsAndFold(ops, std::move(patterns)).failed()) + signalPassFailure(); +} + +} // namespace + +std::unique_ptr mlir::createSCFPreparePass() { + return std::make_unique(); +} diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 5f10d256d7aa..56051d3c81ec 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -189,7 +189,8 @@ class CIRGenConsumer : public clang::ASTConsumer { feOptions.ClangIRLifetimeCheck, lifetimeOpts, feOptions.ClangIRIdiomRecognizer, idiomRecognizerOpts, feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure, - action == CIRGenAction::OutputType::EmitCIRFlat) + action == CIRGenAction::OutputType::EmitCIRFlat, + action == CIRGenAction::OutputType::EmitMLIR) .failed()) { if (!passOptParsingFailure.empty()) diagnosticsEngine.Report(diag::err_drv_cir_pass_opt_parsing) diff --git a/clang/test/CIR/Transforms/scf-prepare.cir b/clang/test/CIR/Transforms/scf-prepare.cir new file mode 100644 index 000000000000..91bbdab5a538 --- /dev/null +++ b/clang/test/CIR/Transforms/scf-prepare.cir @@ -0,0 +1,140 @@ +// RUN: cir-opt %s -cir-mlir-scf-prepare -o - | FileCheck %s + +!s32i = !cir.int + +module { + cir.global "private" external @a : !cir.array + + // for (int i = l; u > i; ++i) + // a[i] = 3; + // + // Check that the loop boundary been hoisted out of loop and the comparison + // been transferred from gt to lt. + cir.func @variableLoopBound(%arg0: !s32i, %arg1: !s32i) { + // CHECK: %[[BOUND:.*]] = cir.load %[[BOUND_ADDR:.*]] : !cir.ptr, !s32i + // CHECK: cir.for : cond { + // CHECK: %[[IV:.*]] = cir.load %[[IV_ADDR:.*]] : !cir.ptr, !s32i + // CHECK: %[[COND:.*]] = cir.cmp(lt, %[[IV]], %4) : !s32i, !s32i + + %0 = cir.alloca !s32i, !cir.ptr, ["l", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["u", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + cir.store %arg1, %1 : !s32i, !cir.ptr + cir.scope { + %2 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + %3 = cir.load %0 : !cir.ptr, !s32i + cir.store %3, %2 : !s32i, !cir.ptr + cir.for : cond { + %4 = cir.load %1 : !cir.ptr, !s32i + %5 = cir.load %2 : !cir.ptr, !s32i + %6 = cir.cmp(gt, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.condition(%7) + } body { + %4 = cir.const #cir.int<3> : !s32i + %5 = cir.get_global @a : !cir.ptr> + %6 = cir.load %2 : !cir.ptr, !s32i + %7 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr + %8 = cir.ptr_stride(%7 : !cir.ptr, %6 : !s32i), !cir.ptr + cir.store %4, %8 : !s32i, !cir.ptr + cir.yield + } step { + %4 = cir.load %2 : !cir.ptr, !s32i + %5 = cir.unary(inc, %4) : !s32i, !s32i + cir.store %5, %2 : !s32i, !cir.ptr + cir.yield + } + } + cir.return + } + + // for (int i = 0; 50 >= i; ++i) + // a[i] = 3; + // + // Check that the loop boundary been hoisted out of loop and the comparison + // been transferred from ge to le. + cir.func @constantLoopBound() { + // CHECK: %[[BOUND:.*]] = cir.const #cir.int<50> : !s32i + // CHECK: cir.for : cond { + // CHECK: %[[IV:.*]] = cir.load %[[IV_ADDR:.*]] : !cir.ptr, !s32i + // CHECK: %[[COND:.*]] = cir.cmp(le, %[[IV]], %[[BOUND]]) : !s32i, !s32i + + cir.scope { + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + %1 = cir.const #cir.int<0> : !s32i + cir.store %1, %0 : !s32i, !cir.ptr + cir.for : cond { + %2 = cir.const #cir.int<50> : !s32i + %3 = cir.load %0 : !cir.ptr, !s32i + %4 = cir.cmp(ge, %2, %3) : !s32i, !s32i + %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool + cir.condition(%5) + } body { + %2 = cir.const #cir.int<3> : !s32i + %3 = cir.get_global @a : !cir.ptr> + %4 = cir.load %0 : !cir.ptr, !s32i + %5 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr + %6 = cir.ptr_stride(%5 : !cir.ptr, %4 : !s32i), !cir.ptr + cir.store %2, %6 : !s32i, !cir.ptr + cir.yield + } step { + %2 = cir.load %0 : !cir.ptr, !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, !cir.ptr + cir.yield + } + } + cir.return + } + + // for (int i = l; u > i; ++i) { + // --u; + // a[i] = 3; + // } + // + // Check that the loop boundary not been hoisted because it's not loop + // invariant and the loop comparison been transferred from gt to lt. + cir.func @variableLoopBoundNotLoopInvariant(%arg0: !s32i, %arg1: !s32i) { + // CHECK: cir.store %[[IV_INIT:.*]], %[[IV_ADDR:.*]] : !s32i, !cir.ptr + // CHECK: cir.for : cond { + // CHECK: %[[BOUND:.*]] = cir.load %[[BOUND_ADDR:.*]] : !cir.ptr, !s32i + // CHECK: %[[IV:.*]] = cir.load %[[IV_ADDR:.*]] : !cir.ptr, !s32i + // CHECK: %[[COND:.*]] = cir.cmp(lt, %[[IV]], %[[BOUND]]) : !s32i, !s32i + + %0 = cir.alloca !s32i, !cir.ptr, ["l", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["u", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + cir.store %arg1, %1 : !s32i, !cir.ptr + cir.scope { + %2 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + %3 = cir.load %0 : !cir.ptr, !s32i + cir.store %3, %2 : !s32i, !cir.ptr + cir.for : cond { + %4 = cir.load %1 : !cir.ptr, !s32i + %5 = cir.load %2 : !cir.ptr, !s32i + %6 = cir.cmp(gt, %4, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.condition(%7) + } body { + cir.scope { + %4 = cir.load %1 : !cir.ptr, !s32i + %5 = cir.unary(dec, %4) : !s32i, !s32i + cir.store %5, %1 : !s32i, !cir.ptr + %6 = cir.const #cir.int<3> : !s32i + %7 = cir.get_global @a : !cir.ptr> + %8 = cir.load %2 : !cir.ptr, !s32i + %9 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr + %10 = cir.ptr_stride(%9 : !cir.ptr, %8 : !s32i), !cir.ptr + cir.store %6, %10 : !s32i, !cir.ptr + } + cir.yield + } step { + %4 = cir.load %2 : !cir.ptr, !s32i + %5 = cir.unary(inc, %4) : !s32i, !s32i + cir.store %5, %2 : !s32i, !cir.ptr + cir.yield + } + } + cir.return + } +} diff --git a/clang/test/CIR/mlirprint.c b/clang/test/CIR/mlirprint.c index 2f6fe5651f60..8c947ecb37f9 100644 --- a/clang/test/CIR/mlirprint.c +++ b/clang/test/CIR/mlirprint.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIRFLAT +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fno-clangir-direct-lowering -emit-mlir -mmlir --mlir-print-ir-after-all %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIRMLIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -mmlir --mlir-print-ir-after-all -mllvm -print-after-all %s -o %t.ll 2>&1 | FileCheck %s -check-prefix=CIR -check-prefix=LLVM // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-drop-ast %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIRPASS // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -mmlir --mlir-print-ir-before=cir-flatten-cfg %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CFGPASS @@ -15,6 +16,7 @@ int foo(void) { // CIR: IR Dump After LoweringPrepare (cir-lowering-prepare) // CIR: cir.func @foo() -> !s32i // CIR-NOT: IR Dump After FlattenCFG +// CIR-NOT: IR Dump After SCFPrepare // CIR: IR Dump After DropAST (cir-drop-ast) // CIR: cir.func @foo() -> !s32i // CIRFLAT: IR Dump After MergeCleanups (cir-merge-cleanups) @@ -24,6 +26,10 @@ int foo(void) { // CIRFLAT: IR Dump After FlattenCFG (cir-flatten-cfg) // CIRFLAT: IR Dump After DropAST (cir-drop-ast) // CIRFLAT: cir.func @foo() -> !s32i +// CIRMLIR: IR Dump After MergeCleanups (cir-merge-cleanups) +// CIRMLIR: IR Dump After LoweringPrepare (cir-lowering-prepare) +// CIRMLIR: IR Dump After SCFPrepare (cir-mlir-scf-prepare +// CIRMLIR: IR Dump After DropAST (cir-drop-ast) // LLVM: IR Dump After cir::direct::ConvertCIRToLLVMPass (cir-flat-to-llvm) // LLVM: llvm.func @foo() -> i32 // LLVM: IR Dump After diff --git a/clang/tools/cir-opt/cir-opt.cpp b/clang/tools/cir-opt/cir-opt.cpp index 064aa7241c8e..ea58a3bdb3b3 100644 --- a/clang/tools/cir-opt/cir-opt.cpp +++ b/clang/tools/cir-opt/cir-opt.cpp @@ -42,6 +42,9 @@ int main(int argc, char **argv) { return mlir::createMergeCleanupsPass(); }); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return mlir::createSCFPreparePass(); + }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertCIRToMLIRPass(); }); From 1b642b1bb901527fe607ff99e817e1de264909a0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 23 May 2024 20:19:42 -0700 Subject: [PATCH 1593/2301] [CIR][CIRGen][NFC] Add optional alignment to cir.load and cir.store Don't hook this up with CIRGen just yet. While here update parsing tests to include `atomic(seq_cst)`. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 10 ++++++---- clang/include/clang/CIR/Dialect/IR/CIROps.td | 19 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 3 ++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 7 +++++-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 +--- clang/test/CIR/IR/cir-ops.cir | 4 ++++ 6 files changed, 36 insertions(+), 11 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index a46d90d4b8af..29fda2decf56 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -84,9 +84,10 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return getPointerTo(::mlir::cir::VoidType::get(getContext()), addressSpace); } - mlir::Value createLoad(mlir::Location loc, mlir::Value ptr) { - return create(loc, ptr, /*isDeref=*/false, - /*is_volatile=*/false, + mlir::Value createLoad(mlir::Location loc, mlir::Value ptr, + bool isVolatile = false) { + return create(loc, ptr, /*isDeref=*/false, isVolatile, + /*alignment=*/mlir::IntegerAttr{}, /*mem_order=*/mlir::cir::MemOrderAttr{}); } @@ -176,11 +177,12 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, mlir::Value dst, bool _volatile = false, + ::mlir::IntegerAttr align = {}, ::mlir::cir::MemOrderAttr order = {}) { if (dst.getType().cast().getPointee() != val.getType()) dst = createPtrBitcast(dst, val.getType()); - return create(loc, val, dst, _volatile, order); + return create(loc, val, dst, _volatile, align, order); } mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 487b12d93b3e..b5f809cc9692 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -480,6 +480,9 @@ def LoadOp : CIR_Op<"load", [ a pointer. A unit attribute `volatile` can be used to indicate a volatile loading. Load can be marked atomic by using `atomic()`. + `align` can be used to specify an alignment that's different from the + default, which is computed from `result`'s type ABI data layout. + Example: ```mlir @@ -493,18 +496,24 @@ def LoadOp : CIR_Op<"load", [ // Perform a volatile load from address in %0. %4 = cir.load volatile %0 : !cir.ptr, i32 + + // Others + %x = cir.load align(16) atomic(seq_cst) %0 : !cir.ptr, i32 ``` }]; let arguments = (ins Arg:$addr, UnitAttr:$isDeref, UnitAttr:$is_volatile, - OptionalAttr:$mem_order); + OptionalAttr:$alignment, + OptionalAttr:$mem_order + ); let results = (outs CIR_AnyType:$result); let assemblyFormat = [{ (`deref` $isDeref^)? (`volatile` $is_volatile^)? + (`align` `(` $alignment^ `)`)? (`atomic` `(` $mem_order^ `)`)? $addr `:` qualified(type($addr)) `,` type($result) attr-dict }]; @@ -528,6 +537,9 @@ def StoreOp : CIR_Op<"store", [ a volatile store. Store's can be marked atomic by using `atomic()`. + `align` can be used to specify an alignment that's different from the + default, which is computed from `result`'s type ABI data layout. + Example: ```mlir @@ -536,6 +548,9 @@ def StoreOp : CIR_Op<"store", [ // Perform a volatile store into memory location at the address in %0. cir.store volatile %arg0, %0 : i32, !cir.ptr + + // Others + cir.store align(16) atomic(seq_cst) %x, %addr : i32, !cir.ptr ``` }]; @@ -543,10 +558,12 @@ def StoreOp : CIR_Op<"store", [ Arg:$addr, UnitAttr:$is_volatile, + OptionalAttr:$alignment, OptionalAttr:$mem_order); let assemblyFormat = [{ (`volatile` $is_volatile^)? + (`align` `(` $alignment^ `)`)? (`atomic` `(` $mem_order^ `)`)? $value `,` $addr attr-dict `:` type($value) `,` qualified(type($addr)) }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 6f6b8c91d84e..9364aa393597 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -513,7 +513,8 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, auto loadVal1 = builder.createLoad(loc, Val1); // FIXME(cir): add scope information. assert(!UnimplementedFeature::syncScopeID()); - builder.createStore(loc, loadVal1, Ptr, E->isVolatile(), orderAttr); + builder.createStore(loc, loadVal1, Ptr, E->isVolatile(), + /*alignment=*/mlir::IntegerAttr{}, orderAttr); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 5461438b06d0..ad6e4fbfec4c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -776,9 +776,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool _volatile = false, + ::mlir::IntegerAttr align = {}, ::mlir::cir::MemOrderAttr order = {}) { return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), _volatile, - order); + align, order); } mlir::cir::StoreOp createFlagStore(mlir::Location loc, bool val, @@ -795,8 +796,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { // TODO: add alignment for LoadOp/StoreOp, right now LowerToLLVM knows // how to figure out for most part, but it's possible the client might want // to enforce a different alignment. + mlir::IntegerAttr alignAttr; assert(!UnimplementedFeature::alignedStore()); - return CIRBaseBuilderTy::createStore(loc, val, dst, _volatile, order); + return CIRBaseBuilderTy::createStore(loc, val, dst, _volatile, alignAttr, + order); } // Convert byte offset to sequence of high-level indices suitable for diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 98add6fde3e7..03fcb7e1f79b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2587,9 +2587,7 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, mlir::cir::CastKind::bitcast, Ptr); } - mlir::cir::LoadOp Load = - builder.create(Loc, ElemTy, Ptr, /* deref */ false, - Volatile, ::mlir::cir::MemOrderAttr{}); + mlir::Value Load = builder.CIRBaseBuilderTy::createLoad(Loc, Ptr, Volatile); if (isNontemporal) { llvm_unreachable("NYI"); diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 10fd9010cc1b..73a8de8c40cd 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -9,7 +9,9 @@ module { cir.func @foo(%arg0: !s32i) -> !s32i { %0 = cir.alloca !s32i, !cir.ptr, ["x", init] cir.store %arg0, %0 : !s32i, !cir.ptr + cir.store align(1) atomic(seq_cst) %arg0, %0 : !s32i, !cir.ptr %1 = cir.load %0 : !cir.ptr, !s32i + %2 = cir.load align(1) atomic(seq_cst) %0 : !cir.ptr, !s32i cir.return %1 : !s32i } @@ -62,7 +64,9 @@ module { // CHECK-NEXT: cir.func @foo(%arg0: !s32i) -> !s32i { // CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["x", init] // CHECK-NEXT: cir.store %arg0, %0 : !s32i, !cir.ptr +// CHECK-NEXT: cir.store align(1) atomic(seq_cst) %arg0, %0 : !s32i, !cir.ptr // CHECK-NEXT: %1 = cir.load %0 : !cir.ptr, !s32i +// CHECK-NEXT: %2 = cir.load align(1) atomic(seq_cst) %0 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %1 : !s32i // CHECK-NEXT: } From 4097f13a865a0515bc8d17ab299ab4075a80519a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 23 May 2024 21:39:03 -0700 Subject: [PATCH 1594/2301] [CIR][LowerToLLVM] Forward or compute alignment for every store Load coming next. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 10 +-- clang/test/CIR/Lowering/binop-overflow.cir | 4 +- clang/test/CIR/Lowering/bool.cir | 2 +- clang/test/CIR/Lowering/class.cir | 2 +- clang/test/CIR/Lowering/const.cir | 2 +- clang/test/CIR/Lowering/dot.cir | 16 ++-- clang/test/CIR/Lowering/goto.cir | 4 +- clang/test/CIR/Lowering/hello.cir | 2 +- clang/test/CIR/Lowering/loadstorealloca.cir | 4 +- clang/test/CIR/Lowering/ptrstride.cir | 2 +- clang/test/CIR/Lowering/scope.cir | 4 +- clang/test/CIR/Lowering/struct.cir | 2 +- clang/test/CIR/Lowering/ternary.cir | 4 +- clang/test/CIR/Lowering/unary-plus-minus.cir | 2 +- clang/test/CIR/Lowering/unions.cir | 2 +- clang/test/CIR/Lowering/vectype.cpp | 80 +++++++++---------- 16 files changed, 71 insertions(+), 71 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index c59e916e8f7e..e99c071b8602 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -899,17 +899,17 @@ class CIRStoreLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::StoreOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - unsigned alignment = 0; auto memorder = op.getMemOrder(); auto ordering = getLLVMMemOrder(memorder); - - // FIXME: right now we only pass in the alignment when the memory access - // is atomic, we should always pass it instead. - if (ordering != mlir::LLVM::AtomicOrdering::not_atomic) { + auto alignOpt = op.getAlignment(); + unsigned alignment = 0; + if (!alignOpt) { const auto llvmTy = getTypeConverter()->convertType(op.getValue().getType()); mlir::DataLayout layout(op->getParentOfType()); alignment = (unsigned)layout.getTypeABIAlignment(llvmTy); + } else { + alignment = *alignOpt; } // TODO: nontemporal, syncscope. diff --git a/clang/test/CIR/Lowering/binop-overflow.cir b/clang/test/CIR/Lowering/binop-overflow.cir index c73e708e5320..5cdd9d82ae7b 100644 --- a/clang/test/CIR/Lowering/binop-overflow.cir +++ b/clang/test/CIR/Lowering/binop-overflow.cir @@ -16,7 +16,7 @@ module { // MLIR-NEXT: %[[#RES:]] = llvm.extractvalue %[[#INTRIN_RET]][0] : !llvm.struct<(i32, i1)> // MLIR-NEXT: %[[#OVFL:]] = llvm.extractvalue %[[#INTRIN_RET]][1] : !llvm.struct<(i32, i1)> // MLIR-NEXT: %[[#OVFL_EXT:]] = llvm.zext %[[#OVFL]] : i1 to i8 - // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] : i32, !llvm.ptr + // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] {{.*}} : i32, !llvm.ptr // MLIR-NEXT: llvm.return %[[#OVFL_EXT]] : i8 // MLIR-NEXT: } @@ -46,7 +46,7 @@ module { // MLIR-NEXT: %[[#TRUNC_OVFL:]] = llvm.icmp "ne" %[[#RES_EXT_2]], %[[#RES_EXT]] : i33 // MLIR-NEXT: %[[#OVFL:]] = llvm.or %[[#ARITH_OVFL]], %[[#TRUNC_OVFL]] : i1 // MLIR-NEXT: %[[#OVFL_EXT:]] = llvm.zext %[[#OVFL]] : i1 to i8 - // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] : i32, !llvm.ptr + // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] {{.*}} : i32, !llvm.ptr // MLIR-NEXT: llvm.return %[[#OVFL_EXT]] : i8 // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index ee743e8376fa..9b424355aa18 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -19,7 +19,7 @@ module { // MLIR-DAG: = llvm.mlir.constant(1 : i8) : i8 // MLIR-DAG: [[Value:%[a-z0-9]+]] = llvm.mlir.constant(1 : index) : i64 // MLIR-DAG: = llvm.alloca [[Value]] x i8 {alignment = 1 : i64} : (i64) -> !llvm.ptr -// MLIR-DAG: llvm.store %0, %2 : i8, !llvm.ptr +// MLIR-DAG: llvm.store %0, %2 {{.*}} : i8, !llvm.ptr // MLIR-NEXT: llvm.return // LLVM: define void @foo() diff --git a/clang/test/CIR/Lowering/class.cir b/clang/test/CIR/Lowering/class.cir index 75366182ad3c..03ef0568d1d1 100644 --- a/clang/test/CIR/Lowering/class.cir +++ b/clang/test/CIR/Lowering/class.cir @@ -34,7 +34,7 @@ module { // CHECK: %2 = llvm.mlir.undef : !llvm.struct<"class.S2A", (i32)> // CHECK: %3 = llvm.mlir.constant(1 : i32) : i32 // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.struct<"class.S2A", (i32)> - // CHECK: llvm.store %4, %1 : !llvm.struct<"class.S2A", (i32)>, !llvm.ptr + // CHECK: llvm.store %4, %1 {{.*}}: !llvm.struct<"class.S2A", (i32)>, !llvm.ptr // CHECK: llvm.return // CHECK: } diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 7ac57b833fed..2058a6cbd8b0 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -51,7 +51,7 @@ module { // CHECK: %6 = llvm.mlir.constant(1 : i32) : i32 // CHECK: %7 = llvm.insertvalue %6, %5[1] : !llvm.struct<"struct.anon.1", (i32, i32)> // CHECK: %8 = llvm.insertvalue %7, %2[0] : !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>> - // CHECK: llvm.store %8, %1 : !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>>, !llvm.ptr + // CHECK: llvm.store %8, %1 {{.*}}: !llvm.array<1 x struct<"struct.anon.1", (i32, i32)>>, !llvm.ptr // CHECK: llvm.return cir.func @testArrWithTrailingZeros() { diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index d4823df783a7..8ea980afa99f 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -63,17 +63,17 @@ module { // MLIR: %[[VAL_10:.*]] = llvm.alloca %[[VAL_9]] x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr // MLIR: %[[VAL_11:.*]] = llvm.mlir.constant(1 : index) : i64 // MLIR: %[[VAL_12:.*]] = llvm.alloca %[[VAL_11]] x f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr -// MLIR: llvm.store {{.*}}, %[[VAL_4]] : !llvm.ptr, !llvm.ptr -// MLIR: llvm.store {{.*}}, %[[VAL_6]] : !llvm.ptr, !llvm.ptr -// MLIR: llvm.store {{.*}}, %[[VAL_8]] : i32, !llvm.ptr +// MLIR: llvm.store {{.*}}, %[[VAL_4]] {{.*}}: !llvm.ptr, !llvm.ptr +// MLIR: llvm.store {{.*}}, %[[VAL_6]] {{.*}}: !llvm.ptr, !llvm.ptr +// MLIR: llvm.store {{.*}}, %[[VAL_8]] {{.*}}: i32, !llvm.ptr // MLIR: %[[VAL_13:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 -// MLIR: llvm.store %[[VAL_13]], %[[VAL_12]] : f64, !llvm.ptr +// MLIR: llvm.store %[[VAL_13]], %[[VAL_12]] {{.*}}: f64, !llvm.ptr // MLIR: llvm.br ^bb1 // MLIR: ^bb1: // MLIR: %[[VAL_14:.*]] = llvm.mlir.constant(1 : index) : i64 // MLIR: %[[VAL_15:.*]] = llvm.alloca %[[VAL_14]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR: %[[VAL_16:.*]] = llvm.mlir.constant(0 : i32) : i32 -// MLIR: llvm.store %[[VAL_16]], %[[VAL_15]] : i32, !llvm.ptr +// MLIR: llvm.store %[[VAL_16]], %[[VAL_15]] {{.*}}: i32, !llvm.ptr // MLIR: llvm.br ^bb2 // MLIR: ^bb2: // MLIR: %[[VAL_17:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i32 @@ -97,19 +97,19 @@ module { // MLIR: %[[VAL_33:.*]] = llvm.fmul %[[VAL_27]], %[[VAL_32]] : f64 // MLIR: %[[VAL_34:.*]] = llvm.load %[[VAL_12]] : !llvm.ptr -> f64 // MLIR: %[[VAL_35:.*]] = llvm.fadd %[[VAL_34]], %[[VAL_33]] : f64 -// MLIR: llvm.store %[[VAL_35]], %[[VAL_12]] : f64, !llvm.ptr +// MLIR: llvm.store %[[VAL_35]], %[[VAL_12]] {{.*}}: f64, !llvm.ptr // MLIR: llvm.br ^bb4 // MLIR: ^bb4: // MLIR: %[[VAL_36:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i32 // MLIR: %[[VAL_37:.*]] = llvm.mlir.constant(1 : i32) : i32 // MLIR: %[[VAL_38:.*]] = llvm.add %[[VAL_36]], %[[VAL_37]] : i32 -// MLIR: llvm.store %[[VAL_38]], %[[VAL_15]] : i32, !llvm.ptr +// MLIR: llvm.store %[[VAL_38]], %[[VAL_15]] {{.*}}: i32, !llvm.ptr // MLIR: llvm.br ^bb2 // MLIR: ^bb5: // MLIR: llvm.br ^bb6 // MLIR: ^bb6: // MLIR: %[[VAL_39:.*]] = llvm.load %[[VAL_12]] : !llvm.ptr -> f64 -// MLIR: llvm.store %[[VAL_39]], %[[VAL_10]] : f64, !llvm.ptr +// MLIR: llvm.store %[[VAL_39]], %[[VAL_10]] {{.*}}: f64, !llvm.ptr // MLIR: %[[VAL_40:.*]] = llvm.load %[[VAL_10]] : !llvm.ptr -> f64 // MLIR: llvm.return %[[VAL_40]] : f64 // MLIR: } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index a7b02c0fe875..9a60987745f1 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -41,14 +41,14 @@ module { // MLIR: ^bb[[#COND_NO]]: // MLIR: llvm.br ^bb[[#BLK:]] // MLIR: ^bb[[#BLK]]: -// MLIR: llvm.store %[[#Zero]], %[[#Ret_val_addr:]] : i32, !llvm.ptr +// MLIR: llvm.store %[[#Zero]], %[[#Ret_val_addr:]] {{.*}}: i32, !llvm.ptr // MLIR: llvm.br ^bb[[#RETURN:]] // MLIR: ^bb[[#RETURN]]: // MLIR: %[[#Ret_val:]] = llvm.load %[[#Ret_val_addr]] : !llvm.ptr -> i32 // MLIR: llvm.return %[[#Ret_val]] : i32 // MLIR: ^bb[[#GOTO_BLK]]: // MLIR: %[[#Neg_one:]] = llvm.sub %[[#Zero]], %[[#One]] : i32 -// MLIR: llvm.store %[[#Neg_one]], %[[#Ret_val_addr]] : i32, !llvm.ptr +// MLIR: llvm.store %[[#Neg_one]], %[[#Ret_val_addr]] {{.*}}: i32, !llvm.ptr // MLIR: llvm.br ^bb[[#RETURN]] // MLIR: } } diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index 5d35cd6e81bb..90f649ce9f82 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -29,7 +29,7 @@ module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.sign // CHECK: %3 = llvm.getelementptr %2[0] : (!llvm.ptr) -> !llvm.ptr, i8 // CHECK: %4 = llvm.call @printf(%3) : (!llvm.ptr) -> i32 // CHECK: %5 = llvm.mlir.constant(0 : i32) : i32 -// CHECK: llvm.store %5, %1 : i32, !llvm.ptr +// CHECK: llvm.store %5, %1 {{.*}} : i32, !llvm.ptr // CHECK: %6 = llvm.load %1 : !llvm.ptr -> i32 // CHECK: llvm.return %6 : i32 // CHECK: } diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index f6d3a6eb521f..d14e89cb17ee 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -25,7 +25,7 @@ module { // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: llvm.store %2, %1 : i32, !llvm.ptr +// MLIR-NEXT: llvm.store %2, %1 {{.*}}: i32, !llvm.ptr // MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: return %3 : i32 @@ -34,6 +34,6 @@ module { // MLIR-NEXT: %0 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: llvm.store volatile %2, %1 : i32, !llvm.ptr +// MLIR-NEXT: llvm.store volatile %2, %1 {{.*}}: i32, !llvm.ptr // MLIR-NEXT: %3 = llvm.load volatile %1 : !llvm.ptr -> i32 // MLIR-NEXT: return %3 : i32 diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index 1616ede8934a..f919c294d683 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -17,7 +17,7 @@ module { // MLIR-LABEL: @f // MLIR: %[[VAL_1:.*]] = llvm.mlir.constant(1 : index) : i64 // MLIR: %[[VAL_2:.*]] = llvm.alloca %[[VAL_1]] x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr -// MLIR: llvm.store {{.*}}, %[[VAL_2]] : !llvm.ptr, !llvm.ptr +// MLIR: llvm.store {{.*}}, %[[VAL_2]] {{.*}}: !llvm.ptr, !llvm.ptr // MLIR: %[[VAL_3:.*]] = llvm.load %[[VAL_2]] : !llvm.ptr -> !llvm.ptr // MLIR: %[[VAL_4:.*]] = llvm.mlir.constant(1 : i32) : i32 // MLIR: %[[VAL_5:.*]] = llvm.sext %[[VAL_4]] : i32 to i64 diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index 9da4910499e4..7e8e06418299 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -19,7 +19,7 @@ module { // MLIR-DAG: [[v1:%[0-9]]] = llvm.mlir.constant(4 : i32) : i32 // MLIR-DAG: [[v2:%[0-9]]] = llvm.mlir.constant(1 : index) : i64 // MLIR-DAG: [[v3:%[0-9]]] = llvm.alloca [[v2]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: llvm.store [[v1]], [[v3]] : i32, !llvm.ptr +// MLIR-NEXT: llvm.store [[v1]], [[v3]] {{.*}}: i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 // MLIR-NEXT: ^bb2: // MLIR-NEXT: llvm.return @@ -67,7 +67,7 @@ module { // MLIR-NEXT: llvm.br ^bb1 // MLIR-NEXT: ^bb1: // pred: ^bb0 // MLIR-NEXT: [[v2:%.*]] = llvm.mlir.constant(0 : i32) : i32 - // MLIR-NEXT: llvm.store [[v2]], [[v1]] : i32, !llvm.ptr + // MLIR-NEXT: llvm.store [[v2]], [[v1]] {{.*}}: i32, !llvm.ptr // MLIR-NEXT: [[v3:%.*]] = llvm.load [[v1]] : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return [[v3]] : i32 // MLIR-NEXT: ^bb2: // no predecessors diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index 334ca781fd8e..7ae152e52713 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -34,7 +34,7 @@ module { // CHECK: %2 = llvm.mlir.undef : !llvm.struct<"struct.S2A", (i32)> // CHECK: %3 = llvm.mlir.constant(1 : i32) : i32 // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.struct<"struct.S2A", (i32)> - // CHECK: llvm.store %4, %1 : !llvm.struct<"struct.S2A", (i32)>, !llvm.ptr + // CHECK: llvm.store %4, %1 {{.*}}: !llvm.struct<"struct.S2A", (i32)>, !llvm.ptr // CHECK: llvm.return // CHECK: } diff --git a/clang/test/CIR/Lowering/ternary.cir b/clang/test/CIR/Lowering/ternary.cir index 9fccea8043f8..1de77af4ac01 100644 --- a/clang/test/CIR/Lowering/ternary.cir +++ b/clang/test/CIR/Lowering/ternary.cir @@ -29,7 +29,7 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %2 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %3 = llvm.alloca %2 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: llvm.store %arg0, %1 : i32, !llvm.ptr +// MLIR-NEXT: llvm.store %arg0, %1 {{.*}}: i32, !llvm.ptr // MLIR-NEXT: %4 = llvm.load %1 : !llvm.ptr -> i32 // MLIR-NEXT: %5 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %6 = llvm.icmp "sgt" %4, %5 : i32 @@ -43,7 +43,7 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { // MLIR-NEXT: ^bb3(%9: i32): // 2 preds: ^bb1, ^bb2 // MLIR-NEXT: llvm.br ^bb4 // MLIR-NEXT: ^bb4: // pred: ^bb3 -// MLIR-NEXT: llvm.store %9, %3 : i32, !llvm.ptr +// MLIR-NEXT: llvm.store %9, %3 {{.*}}: i32, !llvm.ptr // MLIR-NEXT: %10 = llvm.load %3 : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return %10 : i32 // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index 6734845ad03d..37f0b4195ced 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -32,7 +32,7 @@ module { cir.store %arg0, %0 : !cir.double, !cir.ptr %1 = cir.load %0 : !cir.ptr, !cir.double %2 = cir.unary(plus, %1) : !cir.double, !cir.double - // MLIR: llvm.store %arg0, %[[#F_PLUS:]] : f64, !llvm.ptr + // MLIR: llvm.store %arg0, %[[#F_PLUS:]] {{.*}}: f64, !llvm.ptr // MLIR: %{{[0-9]}} = llvm.load %[[#F_PLUS]] : !llvm.ptr -> f64 %3 = cir.load %0 : !cir.ptr, !cir.double %4 = cir.unary(minus, %3) : !cir.double, !cir.double diff --git a/clang/test/CIR/Lowering/unions.cir b/clang/test/CIR/Lowering/unions.cir index 694c3ae17465..87493aa2aa46 100644 --- a/clang/test/CIR/Lowering/unions.cir +++ b/clang/test/CIR/Lowering/unions.cir @@ -28,7 +28,7 @@ module { // CHECK: %[[#VAL:]] = llvm.mlir.constant(1 : i8) : i8 // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. // CHECK: %[[#ADDR:]] = llvm.bitcast %{{.+}} : !llvm.ptr - // CHECK: llvm.store %[[#VAL]], %[[#ADDR]] : i8, !llvm.ptr + // CHECK: llvm.store %[[#VAL]], %[[#ADDR]] {{.*}}: i8, !llvm.ptr // Should load direclty from the union's base address. %7 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index 3679c4b9d802..6d5e995bb9f0 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -26,7 +26,7 @@ void vector_int_test(int x) { // CHECK: %[[#T40:]] = llvm.insertelement %[[#T32]], %[[#T38]][%[[#T39]] : i64] : vector<4xi32> // CHECK: %[[#T41:]] = llvm.mlir.constant(3 : i64) : i64 // CHECK: %[[#T42:]] = llvm.insertelement %[[#T33]], %[[#T40]][%[[#T41]] : i64] : vector<4xi32> - // CHECK: llvm.store %[[#T42]], %[[#T3:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T42]], %[[#T3:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // Non-const vector initialization. vi4 b = { x, 5, 6, x + 1 }; @@ -45,13 +45,13 @@ void vector_int_test(int x) { // CHECK: %[[#T55:]] = llvm.insertelement %[[#T45]], %[[#T53]][%[[#T54]] : i64] : vector<4xi32> // CHECK: %[[#T56:]] = llvm.mlir.constant(3 : i64) : i64 // CHECK: %[[#T57:]] = llvm.insertelement %[[#T48]], %[[#T55]][%[[#T56]] : i64] : vector<4xi32> - // CHECK: llvm.store %[[#T57]], %[[#T5:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T57]], %[[#T5:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // Vector to vector conversion vd2 bb = (vd2)b; // CHECK: %[[#bval:]] = llvm.load %[[#bmem:]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#bbval:]] = llvm.bitcast %[[#bval]] : vector<4xi32> to vector<2xf64> - // CHECK: llvm.store %[[#bbval]], %[[#bbmem:]] : vector<2xf64>, !llvm.ptr + // CHECK: llvm.store %[[#bbval]], %[[#bbmem:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr // Scalar to vector conversion, a.k.a. vector splat. b = a + 7; @@ -65,7 +65,7 @@ void vector_int_test(int x) { // CHECK: %[[#T58:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T59:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 // CHECK: %[[#T60:]] = llvm.extractelement %[[#T58]][%[[#T59]] : i32] : vector<4xi32> - // CHECK: llvm.store %[[#T60]], %[[#T7:]] : i32, !llvm.ptr + // CHECK: llvm.store %[[#T60]], %[[#T7:]] {alignment = 4 : i64} : i32, !llvm.ptr // Insert element. a[x] = x; @@ -73,7 +73,7 @@ void vector_int_test(int x) { // CHECK: %[[#T62:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 // CHECK: %[[#T63:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T64:]] = llvm.insertelement %[[#T61]], %[[#T63]][%[[#T62]] : i32] : vector<4xi32> - // CHECK: llvm.store %[[#T64]], %[[#T3]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T64]], %[[#T3]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // Compound assignment a[x] += a[0]; @@ -85,59 +85,59 @@ void vector_int_test(int x) { // CHECK: %[[#SUMCA:]] = llvm.add %[[#LHSCA:]], %[[#RHSCA:]] : i32 // CHECK: %[[#LOADCAVEC4:]] = llvm.load %{{[0-9]+}} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#RESULTCAVEC:]] = llvm.insertelement %[[#SUMCA:]], %[[#LOADCAVEC4:]][%[[#LOADCAIDX2:]] : i32] : vector<4xi32> - // CHECK: llvm.store %[[#RESULTCAVEC:]], %{{[0-9]+}} : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#RESULTCAVEC:]], %{{[0-9]+}} {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // Binary arithmetic operators. vi4 d = a + b; // CHECK: %[[#T65:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T66:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T67:]] = llvm.add %[[#T65]], %[[#T66]] : vector<4xi32> - // CHECK: llvm.store %[[#T67]], %[[#T9:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T67]], %[[#T9:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 e = a - b; // CHECK: %[[#T68:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T69:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T70:]] = llvm.sub %[[#T68]], %[[#T69]] : vector<4xi32> - // CHECK: llvm.store %[[#T70]], %[[#T11:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T70]], %[[#T11:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 f = a * b; // CHECK: %[[#T71:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T72:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T73:]] = llvm.mul %[[#T71]], %[[#T72]] : vector<4xi32> - // CHECK: llvm.store %[[#T73]], %[[#T13:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T73]], %[[#T13:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 g = a / b; // CHECK: %[[#T74:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T75:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T76:]] = llvm.sdiv %[[#T74]], %[[#T75]] : vector<4xi32> - // CHECK: llvm.store %[[#T76]], %[[#T15:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T76]], %[[#T15:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 h = a % b; // CHECK: %[[#T77:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T78:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T79:]] = llvm.srem %[[#T77]], %[[#T78]] : vector<4xi32> - // CHECK: llvm.store %[[#T79]], %[[#T17:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T79]], %[[#T17:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 i = a & b; // CHECK: %[[#T80:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T81:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T82:]] = llvm.and %[[#T80]], %[[#T81]] : vector<4xi32> - // CHECK: llvm.store %[[#T82]], %[[#T19:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T82]], %[[#T19:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 j = a | b; // CHECK: %[[#T83:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T84:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T85:]] = llvm.or %[[#T83]], %[[#T84]] : vector<4xi32> - // CHECK: llvm.store %[[#T85]], %[[#T21:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T85]], %[[#T21:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 k = a ^ b; // CHECK: %[[#T86:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T87:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T88:]] = llvm.xor %[[#T86]], %[[#T87]] : vector<4xi32> - // CHECK: llvm.store %[[#T88]], %[[#T23:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T88]], %[[#T23:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // Unary arithmetic operators. vi4 l = +a; // CHECK: %[[#T89:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: llvm.store %[[#T89]], %[[#T25:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T89]], %[[#T25:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 m = -a; // CHECK: %[[#T90:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T91:]] = llvm.mlir.zero : vector<4xi32> // CHECK: %[[#T92:]] = llvm.sub %[[#T91]], %[[#T90]] : vector<4xi32> - // CHECK: llvm.store %[[#T92]], %[[#T27:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T92]], %[[#T27:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 n = ~a; // CHECK: %[[#T93:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T94:]] = llvm.mlir.constant(-1 : i32) : i32 @@ -151,7 +151,7 @@ void vector_int_test(int x) { // CHECK: %[[#T102:]] = llvm.mlir.constant(3 : i64) : i64 // CHECK: %[[#T103:]] = llvm.insertelement %[[#T94]], %[[#T101]][%[[#T102]] : i64] : vector<4xi32> // CHECK: %[[#T104:]] = llvm.xor %[[#T103]], %[[#T93]] : vector<4xi32> - // CHECK: llvm.store %[[#T104]], %[[#T29:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T104]], %[[#T29:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // Ternary conditional operator vi4 tc = a ? b : d; @@ -165,37 +165,37 @@ void vector_int_test(int x) { // CHECK: %[[#T106:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T107:]] = llvm.icmp "eq" %[[#T105]], %[[#T106]] : vector<4xi32> // CHECK: %[[#T108:]] = llvm.sext %[[#T107]] : vector<4xi1> to vector<4xi32> - // CHECK: llvm.store %[[#T108]], %[[#To:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T108]], %[[#To:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 p = a != b; // CHECK: %[[#T109:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T110:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T111:]] = llvm.icmp "ne" %[[#T109]], %[[#T110]] : vector<4xi32> // CHECK: %[[#T112:]] = llvm.sext %[[#T111]] : vector<4xi1> to vector<4xi32> - // CHECK: llvm.store %[[#T112]], %[[#Tp:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T112]], %[[#Tp:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 q = a < b; // CHECK: %[[#T113:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T114:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T115:]] = llvm.icmp "slt" %[[#T113]], %[[#T114]] : vector<4xi32> // CHECK: %[[#T116:]] = llvm.sext %[[#T115]] : vector<4xi1> to vector<4xi32> - // CHECK: llvm.store %[[#T116]], %[[#Tq:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T116]], %[[#Tq:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 r = a > b; // CHECK: %[[#T117:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T118:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T119:]] = llvm.icmp "sgt" %[[#T117]], %[[#T118]] : vector<4xi32> // CHECK: %[[#T120:]] = llvm.sext %[[#T119]] : vector<4xi1> to vector<4xi32> - // CHECK: llvm.store %[[#T120]], %[[#Tr:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T120]], %[[#Tr:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 s = a <= b; // CHECK: %[[#T121:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T122:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T123:]] = llvm.icmp "sle" %[[#T121]], %[[#T122]] : vector<4xi32> // CHECK: %[[#T124:]] = llvm.sext %[[#T123]] : vector<4xi1> to vector<4xi32> - // CHECK: llvm.store %[[#T124]], %[[#Ts:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T124]], %[[#Ts:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 t = a >= b; // CHECK: %[[#T125:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T126:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T127:]] = llvm.icmp "sge" %[[#T125]], %[[#T126]] : vector<4xi32> // CHECK: %[[#T128:]] = llvm.sext %[[#T127]] : vector<4xi1> to vector<4xi32> - // CHECK: llvm.store %[[#T128]], %[[#Tt:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#T128]], %[[#Tt:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // __builtin_shufflevector vi4 u = __builtin_shufflevector(a, b, 7, 5, 3, 1); @@ -231,7 +231,7 @@ void vector_int_test(int x) { // CHECK: %[[#svP:]] = llvm.extractelement %[[#svA]][%[[#svO]] : i64] : vector<4xi32> // CHECK: %[[#svQ:]] = llvm.extractelement %[[#sv_a]][%[[#svP:]] : i32] : vector<4xi32> // CHECK: %[[#svR:]] = llvm.insertelement %[[#svQ]], %[[#svN]][%[[#svO]] : i64] : vector<4xi32> - // CHECK: llvm.store %[[#svR]], %[[#sv_v:]] : vector<4xi32>, !llvm.ptr + // CHECK: llvm.store %[[#svR]], %[[#sv_v:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr } void vector_double_test(int x, double y) { @@ -246,7 +246,7 @@ void vector_double_test(int x, double y) { // CHECK: %[[#T26:]] = llvm.insertelement %[[#T22]], %[[#T24]][%[[#T25]] : i64] : vector<2xf64> // CHECK: %[[#T27:]] = llvm.mlir.constant(1 : i64) : i64 // CHECK: %[[#T28:]] = llvm.insertelement %[[#T23]], %[[#T26]][%[[#T27]] : i64] : vector<2xf64> - // CHECK: llvm.store %[[#T28]], %[[#T5:]] : vector<2xf64>, !llvm.ptr + // CHECK: llvm.store %[[#T28]], %[[#T5:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr // Non-const vector initialization. vd2 b = { y, y + 1.0 }; @@ -259,14 +259,14 @@ void vector_double_test(int x, double y) { // CHECK: %[[#T35:]] = llvm.insertelement %[[#T29]], %[[#T33]][%[[#T34]] : i64] : vector<2xf64> // CHECK: %[[#T36:]] = llvm.mlir.constant(1 : i64) : i64 // CHECK: %[[#T37:]] = llvm.insertelement %[[#T32]], %[[#T35]][%[[#T36]] : i64] : vector<2xf64> - // CHECK: llvm.store %[[#T37]], %[[#T7:]] : vector<2xf64>, !llvm.ptr + // CHECK: llvm.store %[[#T37]], %[[#T7:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr // Extract element. double c = a[x]; // CHECK: %[[#T38:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T39:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 // CHECK: %[[#T40:]] = llvm.extractelement %[[#T38]][%[[#T39]] : i32] : vector<2xf64> - // CHECK: llvm.store %[[#T40]], %[[#T9:]] : f64, !llvm.ptr + // CHECK: llvm.store %[[#T40]], %[[#T9:]] {alignment = 8 : i64} : f64, !llvm.ptr // Insert element. a[x] = y; @@ -274,38 +274,38 @@ void vector_double_test(int x, double y) { // CHECK: %[[#T42:]] = llvm.load %[[#T1:]] : !llvm.ptr -> i32 // CHECK: %[[#T43:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T44:]] = llvm.insertelement %[[#T41]], %[[#T43]][%[[#T42]] : i32] : vector<2xf64> - // CHECK: llvm.store %[[#T44]], %[[#T5]] : vector<2xf64>, !llvm.ptr + // CHECK: llvm.store %[[#T44]], %[[#T5]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr // Binary arithmetic operators. vd2 d = a + b; // CHECK: %[[#T45:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T46:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T47:]] = llvm.fadd %[[#T45]], %[[#T46]] : vector<2xf64> - // CHECK: llvm.store %[[#T47]], %[[#T11:]] : vector<2xf64>, !llvm.ptr + // CHECK: llvm.store %[[#T47]], %[[#T11:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr vd2 e = a - b; // CHECK: %[[#T48:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T49:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T50:]] = llvm.fsub %[[#T48]], %[[#T49]] : vector<2xf64> - // CHECK: llvm.store %[[#T50]], %[[#T13:]] : vector<2xf64>, !llvm.ptr + // CHECK: llvm.store %[[#T50]], %[[#T13:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr vd2 f = a * b; // CHECK: %[[#T51:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T52:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T53:]] = llvm.fmul %[[#T51]], %[[#T52]] : vector<2xf64> - // CHECK: llvm.store %[[#T53]], %[[#T15:]] : vector<2xf64>, !llvm.ptr + // CHECK: llvm.store %[[#T53]], %[[#T15:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr vd2 g = a / b; // CHECK: %[[#T54:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T55:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T56:]] = llvm.fdiv %[[#T54]], %[[#T55]] : vector<2xf64> - // CHECK: llvm.store %[[#T56]], %[[#T17:]] : vector<2xf64>, !llvm.ptr + // CHECK: llvm.store %[[#T56]], %[[#T17:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr // Unary arithmetic operators. vd2 l = +a; // CHECK: %[[#T57:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: llvm.store %[[#T57]], %[[#T19:]] : vector<2xf64>, !llvm.ptr + // CHECK: llvm.store %[[#T57]], %[[#T19:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr vd2 m = -a; // CHECK: %[[#T58:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T59:]] = llvm.fneg %[[#T58]] : vector<2xf64> - // CHECK: llvm.store %[[#T59]], %[[#T21:]] : vector<2xf64>, !llvm.ptr + // CHECK: llvm.store %[[#T59]], %[[#T21:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr // Comparisons vll2 o = a == b; @@ -313,37 +313,37 @@ void vector_double_test(int x, double y) { // CHECK: %[[#T61:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T62:]] = llvm.fcmp "oeq" %[[#T60]], %[[#T61]] : vector<2xf64> // CHECK: %[[#T63:]] = llvm.sext %[[#T62]] : vector<2xi1> to vector<2xi64> - // CHECK: llvm.store %[[#T63]], %[[#To:]] : vector<2xi64>, !llvm.ptr + // CHECK: llvm.store %[[#T63]], %[[#To:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr vll2 p = a != b; // CHECK: %[[#T64:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T65:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T66:]] = llvm.fcmp "une" %[[#T64]], %[[#T65]] : vector<2xf64> // CHECK: %[[#T67:]] = llvm.sext %[[#T66]] : vector<2xi1> to vector<2xi64> - // CHECK: llvm.store %[[#T67]], %[[#Tp:]] : vector<2xi64>, !llvm.ptr + // CHECK: llvm.store %[[#T67]], %[[#Tp:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr vll2 q = a < b; // CHECK: %[[#T68:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T69:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T70:]] = llvm.fcmp "olt" %[[#T68]], %[[#T69]] : vector<2xf64> // CHECK: %[[#T71:]] = llvm.sext %[[#T70]] : vector<2xi1> to vector<2xi64> - // CHECK: llvm.store %[[#T71]], %[[#Tq:]] : vector<2xi64>, !llvm.ptr + // CHECK: llvm.store %[[#T71]], %[[#Tq:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr vll2 r = a > b; // CHECK: %[[#T72:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T73:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T74:]] = llvm.fcmp "ogt" %[[#T72]], %[[#T73]] : vector<2xf64> // CHECK: %[[#T75:]] = llvm.sext %[[#T74]] : vector<2xi1> to vector<2xi64> - // CHECK: llvm.store %[[#T75]], %[[#Tr:]] : vector<2xi64>, !llvm.ptr + // CHECK: llvm.store %[[#T75]], %[[#Tr:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr vll2 s = a <= b; // CHECK: %[[#T76:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T77:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T78:]] = llvm.fcmp "ole" %[[#T76]], %[[#T77]] : vector<2xf64> // CHECK: %[[#T79:]] = llvm.sext %[[#T78]] : vector<2xi1> to vector<2xi64> - // CHECK: llvm.store %[[#T79]], %[[#Ts:]] : vector<2xi64>, !llvm.ptr + // CHECK: llvm.store %[[#T79]], %[[#Ts:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr vll2 t = a >= b; // CHECK: %[[#T80:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T81:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T82:]] = llvm.fcmp "oge" %[[#T80]], %[[#T81]] : vector<2xf64> // CHECK: %[[#T83:]] = llvm.sext %[[#T82]] : vector<2xi1> to vector<2xi64> - // CHECK: llvm.store %[[#T83]], %[[#Tt:]] : vector<2xi64>, !llvm.ptr + // CHECK: llvm.store %[[#T83]], %[[#Tt:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr // __builtin_convertvector vus2 w = __builtin_convertvector(a, vus2); From b6387f1de007b914e8b683665428c9bb48303adb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 23 May 2024 21:50:24 -0700 Subject: [PATCH 1595/2301] [CIR][LowerToLLVM] Forward or compute alignment for loads --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 10 +- clang/test/CIR/Lowering/dot.cir | 24 +-- clang/test/CIR/Lowering/globals.cir | 4 +- clang/test/CIR/Lowering/goto.cir | 2 +- clang/test/CIR/Lowering/hello.cir | 2 +- clang/test/CIR/Lowering/loadstorealloca.cir | 4 +- clang/test/CIR/Lowering/ptrstride.cir | 4 +- clang/test/CIR/Lowering/scope.cir | 4 +- clang/test/CIR/Lowering/ternary.cir | 4 +- clang/test/CIR/Lowering/unary-plus-minus.cir | 4 +- clang/test/CIR/Lowering/unions.cir | 2 +- clang/test/CIR/Lowering/vectype.cpp | 148 +++++++++--------- 12 files changed, 106 insertions(+), 106 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e99c071b8602..4fde702e519d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -872,15 +872,15 @@ class CIRLoadLowering : public mlir::OpConversionPattern { mlir::ConversionPatternRewriter &rewriter) const override { const auto llvmTy = getTypeConverter()->convertType(op.getResult().getType()); - unsigned alignment = 0; auto memorder = op.getMemOrder(); auto ordering = getLLVMMemOrder(memorder); - - // FIXME: right now we only pass in the alignment when the memory access - // is atomic, we should always pass it instead. - if (ordering != mlir::LLVM::AtomicOrdering::not_atomic) { + auto alignOpt = op.getAlignment(); + unsigned alignment = 0; + if (!alignOpt) { mlir::DataLayout layout(op->getParentOfType()); alignment = (unsigned)layout.getTypeABIAlignment(llvmTy); + } else { + alignment = *alignOpt; } // TODO: nontemporal, invariant, syncscope. diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 8ea980afa99f..5c5ed4736f7a 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -76,31 +76,31 @@ module { // MLIR: llvm.store %[[VAL_16]], %[[VAL_15]] {{.*}}: i32, !llvm.ptr // MLIR: llvm.br ^bb2 // MLIR: ^bb2: -// MLIR: %[[VAL_17:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i32 -// MLIR: %[[VAL_18:.*]] = llvm.load %[[VAL_8]] : !llvm.ptr -> i32 +// MLIR: %[[VAL_17:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 +// MLIR: %[[VAL_18:.*]] = llvm.load %[[VAL_8]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_19:.*]] = llvm.icmp "slt" %[[VAL_17]], %[[VAL_18]] : i32 // MLIR: %[[VAL_20:.*]] = llvm.zext %[[VAL_19]] : i1 to i32 // MLIR: %[[VAL_21:.*]] = llvm.mlir.constant(0 : i32) : i32 // MLIR: %[[VAL_22:.*]] = llvm.icmp "ne" %[[VAL_20]], %[[VAL_21]] : i32 // MLIR: llvm.cond_br %[[VAL_22]], ^bb3, ^bb5 // MLIR: ^bb3: -// MLIR: %[[VAL_23:.*]] = llvm.load %[[VAL_4]] : !llvm.ptr -> !llvm.ptr -// MLIR: %[[VAL_24:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i32 +// MLIR: %[[VAL_23:.*]] = llvm.load %[[VAL_4]] {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr +// MLIR: %[[VAL_24:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_25:.*]] = llvm.sext %[[VAL_24]] : i32 to i64 // MLIR: %[[VAL_26:.*]] = llvm.getelementptr %[[VAL_23]]{{\[}}%[[VAL_25]]] : (!llvm.ptr, i64) -> !llvm.ptr, f64 -// MLIR: %[[VAL_27:.*]] = llvm.load %[[VAL_26]] : !llvm.ptr -> f64 -// MLIR: %[[VAL_28:.*]] = llvm.load %[[VAL_6]] : !llvm.ptr -> !llvm.ptr -// MLIR: %[[VAL_29:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i32 +// MLIR: %[[VAL_27:.*]] = llvm.load %[[VAL_26]] {alignment = 8 : i64} : !llvm.ptr -> f64 +// MLIR: %[[VAL_28:.*]] = llvm.load %[[VAL_6]] {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr +// MLIR: %[[VAL_29:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_30:.*]] = llvm.sext %[[VAL_29]] : i32 to i64 // MLIR: %[[VAL_31:.*]] = llvm.getelementptr %[[VAL_28]]{{\[}}%[[VAL_30]]] : (!llvm.ptr, i64) -> !llvm.ptr, f64 -// MLIR: %[[VAL_32:.*]] = llvm.load %[[VAL_31]] : !llvm.ptr -> f64 +// MLIR: %[[VAL_32:.*]] = llvm.load %[[VAL_31]] {alignment = 8 : i64} : !llvm.ptr -> f64 // MLIR: %[[VAL_33:.*]] = llvm.fmul %[[VAL_27]], %[[VAL_32]] : f64 -// MLIR: %[[VAL_34:.*]] = llvm.load %[[VAL_12]] : !llvm.ptr -> f64 +// MLIR: %[[VAL_34:.*]] = llvm.load %[[VAL_12]] {alignment = 8 : i64} : !llvm.ptr -> f64 // MLIR: %[[VAL_35:.*]] = llvm.fadd %[[VAL_34]], %[[VAL_33]] : f64 // MLIR: llvm.store %[[VAL_35]], %[[VAL_12]] {{.*}}: f64, !llvm.ptr // MLIR: llvm.br ^bb4 // MLIR: ^bb4: -// MLIR: %[[VAL_36:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i32 +// MLIR: %[[VAL_36:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_37:.*]] = llvm.mlir.constant(1 : i32) : i32 // MLIR: %[[VAL_38:.*]] = llvm.add %[[VAL_36]], %[[VAL_37]] : i32 // MLIR: llvm.store %[[VAL_38]], %[[VAL_15]] {{.*}}: i32, !llvm.ptr @@ -108,8 +108,8 @@ module { // MLIR: ^bb5: // MLIR: llvm.br ^bb6 // MLIR: ^bb6: -// MLIR: %[[VAL_39:.*]] = llvm.load %[[VAL_12]] : !llvm.ptr -> f64 +// MLIR: %[[VAL_39:.*]] = llvm.load %[[VAL_12]] {alignment = 8 : i64} : !llvm.ptr -> f64 // MLIR: llvm.store %[[VAL_39]], %[[VAL_10]] {{.*}}: f64, !llvm.ptr -// MLIR: %[[VAL_40:.*]] = llvm.load %[[VAL_10]] : !llvm.ptr -> f64 +// MLIR: %[[VAL_40:.*]] = llvm.load %[[VAL_10]] {alignment = 8 : i64} : !llvm.ptr -> f64 // MLIR: llvm.return %[[VAL_40]] : f64 // MLIR: } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index e536f02eddda..7ad94e6faa9f 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -172,12 +172,12 @@ module { } //MLIR-LABEL: @foo //MLIR: %[[RES4:.*]] = llvm.mlir.addressof @Handlers : !llvm.ptr - //MLIR: %[[LOAD:.*]] = llvm.load {{.*}} : !llvm.ptr -> i32 + //MLIR: %[[LOAD:.*]] = llvm.load {{.*}} {alignment = 4 : i64} : !llvm.ptr -> i32 //MLIR: %[[RES6:.*]] = llvm.getelementptr %[[RES4]][0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> //MLIR: %[[RES5:.*]] = llvm.sext %[[LOAD]] : i32 to i64 //MLIR: %[[RES7:.*]] = llvm.getelementptr %[[RES6]][%[[RES5]]] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> //MLIR: %[[RES8:.*]] = llvm.getelementptr %[[RES7]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.anon.1", (ptr)> - //MLIR: %[[RES9:.*]] = llvm.load %[[RES8]] : !llvm.ptr -> !llvm.ptr + //MLIR: %[[RES9:.*]] = llvm.load %[[RES8]] {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr //MLIR: llvm.call %[[RES9]]({{.*}}) : !llvm.ptr, (i32) -> () cir.global external @zero_array = #cir.zero : !cir.array diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index 9a60987745f1..a98dceda4c17 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -44,7 +44,7 @@ module { // MLIR: llvm.store %[[#Zero]], %[[#Ret_val_addr:]] {{.*}}: i32, !llvm.ptr // MLIR: llvm.br ^bb[[#RETURN:]] // MLIR: ^bb[[#RETURN]]: -// MLIR: %[[#Ret_val:]] = llvm.load %[[#Ret_val_addr]] : !llvm.ptr -> i32 +// MLIR: %[[#Ret_val:]] = llvm.load %[[#Ret_val_addr]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: llvm.return %[[#Ret_val]] : i32 // MLIR: ^bb[[#GOTO_BLK]]: // MLIR: %[[#Neg_one:]] = llvm.sub %[[#Zero]], %[[#One]] : i32 diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index 90f649ce9f82..04017b6876b2 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -30,6 +30,6 @@ module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.sign // CHECK: %4 = llvm.call @printf(%3) : (!llvm.ptr) -> i32 // CHECK: %5 = llvm.mlir.constant(0 : i32) : i32 // CHECK: llvm.store %5, %1 {{.*}} : i32, !llvm.ptr -// CHECK: %6 = llvm.load %1 : !llvm.ptr -> i32 +// CHECK: %6 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32 // CHECK: llvm.return %6 : i32 // CHECK: } diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index d14e89cb17ee..5764d5afc8f5 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -26,7 +26,7 @@ module { // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 // MLIR-NEXT: llvm.store %2, %1 {{.*}}: i32, !llvm.ptr -// MLIR-NEXT: %3 = llvm.load %1 : !llvm.ptr -> i32 +// MLIR-NEXT: %3 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: return %3 : i32 @@ -35,5 +35,5 @@ module { // MLIR-NEXT: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 // MLIR-NEXT: llvm.store volatile %2, %1 {{.*}}: i32, !llvm.ptr -// MLIR-NEXT: %3 = llvm.load volatile %1 : !llvm.ptr -> i32 +// MLIR-NEXT: %3 = llvm.load volatile %1 {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: return %3 : i32 diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index f919c294d683..b3d74c657d82 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -18,9 +18,9 @@ module { // MLIR: %[[VAL_1:.*]] = llvm.mlir.constant(1 : index) : i64 // MLIR: %[[VAL_2:.*]] = llvm.alloca %[[VAL_1]] x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr // MLIR: llvm.store {{.*}}, %[[VAL_2]] {{.*}}: !llvm.ptr, !llvm.ptr -// MLIR: %[[VAL_3:.*]] = llvm.load %[[VAL_2]] : !llvm.ptr -> !llvm.ptr +// MLIR: %[[VAL_3:.*]] = llvm.load %[[VAL_2]] {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr // MLIR: %[[VAL_4:.*]] = llvm.mlir.constant(1 : i32) : i32 // MLIR: %[[VAL_5:.*]] = llvm.sext %[[VAL_4]] : i32 to i64 // MLIR: %[[VAL_6:.*]] = llvm.getelementptr %[[VAL_3]]{{\[}}%[[VAL_5]]] : (!llvm.ptr, i64) -> !llvm.ptr, i32 -// MLIR: %[[VAL_7:.*]] = llvm.load %[[VAL_6]] : !llvm.ptr -> i32 +// MLIR: %[[VAL_7:.*]] = llvm.load %[[VAL_6]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: llvm.return diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index 7e8e06418299..add46429cba2 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -68,10 +68,10 @@ module { // MLIR-NEXT: ^bb1: // pred: ^bb0 // MLIR-NEXT: [[v2:%.*]] = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: llvm.store [[v2]], [[v1]] {{.*}}: i32, !llvm.ptr - // MLIR-NEXT: [[v3:%.*]] = llvm.load [[v1]] : !llvm.ptr -> i32 + // MLIR-NEXT: [[v3:%.*]] = llvm.load [[v1]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return [[v3]] : i32 // MLIR-NEXT: ^bb2: // no predecessors - // MLIR-NEXT: [[v4:%.*]] = llvm.load [[v1]] : !llvm.ptr -> i32 + // MLIR-NEXT: [[v4:%.*]] = llvm.load [[v1]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return [[v4]] : i32 // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/ternary.cir b/clang/test/CIR/Lowering/ternary.cir index 1de77af4ac01..6e469f388d79 100644 --- a/clang/test/CIR/Lowering/ternary.cir +++ b/clang/test/CIR/Lowering/ternary.cir @@ -30,7 +30,7 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { // MLIR-NEXT: %2 = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: %3 = llvm.alloca %2 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: llvm.store %arg0, %1 {{.*}}: i32, !llvm.ptr -// MLIR-NEXT: %4 = llvm.load %1 : !llvm.ptr -> i32 +// MLIR-NEXT: %4 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: %5 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %6 = llvm.icmp "sgt" %4, %5 : i32 // MLIR-NEXT: llvm.cond_br %6, ^bb1, ^bb2 @@ -44,6 +44,6 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { // MLIR-NEXT: llvm.br ^bb4 // MLIR-NEXT: ^bb4: // pred: ^bb3 // MLIR-NEXT: llvm.store %9, %3 {{.*}}: i32, !llvm.ptr -// MLIR-NEXT: %10 = llvm.load %3 : !llvm.ptr -> i32 +// MLIR-NEXT: %10 = llvm.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return %10 : i32 // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index 37f0b4195ced..cdb4d90fa854 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -33,10 +33,10 @@ module { %1 = cir.load %0 : !cir.ptr, !cir.double %2 = cir.unary(plus, %1) : !cir.double, !cir.double // MLIR: llvm.store %arg0, %[[#F_PLUS:]] {{.*}}: f64, !llvm.ptr - // MLIR: %{{[0-9]}} = llvm.load %[[#F_PLUS]] : !llvm.ptr -> f64 + // MLIR: %{{[0-9]}} = llvm.load %[[#F_PLUS]] {alignment = 8 : i64} : !llvm.ptr -> f64 %3 = cir.load %0 : !cir.ptr, !cir.double %4 = cir.unary(minus, %3) : !cir.double, !cir.double - // MLIR: %[[#F_MINUS:]] = llvm.load %{{[0-9]}} : !llvm.ptr -> f64 + // MLIR: %[[#F_MINUS:]] = llvm.load %{{[0-9]}} {alignment = 8 : i64} : !llvm.ptr -> f64 // MLIR: %{{[0-9]}} = llvm.fneg %[[#F_MINUS]] : f64 cir.return } diff --git a/clang/test/CIR/Lowering/unions.cir b/clang/test/CIR/Lowering/unions.cir index 87493aa2aa46..6fbcd89b9a97 100644 --- a/clang/test/CIR/Lowering/unions.cir +++ b/clang/test/CIR/Lowering/unions.cir @@ -35,7 +35,7 @@ module { %8 = cir.load %7 : !cir.ptr, !cir.bool // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. // CHECK: %[[#BASE:]] = llvm.bitcast %{{.+}} : !llvm.ptr - // CHECK: %{{.+}} = llvm.load %[[#BASE]] : !llvm.ptr -> i8 + // CHECK: %{{.+}} = llvm.load %[[#BASE]] {alignment = 1 : i64} : !llvm.ptr -> i8 cir.return } diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index 6d5e995bb9f0..cc7331ef2ba9 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -30,10 +30,10 @@ void vector_int_test(int x) { // Non-const vector initialization. vi4 b = { x, 5, 6, x + 1 }; - // CHECK: %[[#T43:]] = llvm.load %[[#T1:]] : !llvm.ptr -> i32 + // CHECK: %[[#T43:]] = llvm.load %[[#T1:]] {alignment = 4 : i64} : !llvm.ptr -> i32 // CHECK: %[[#T44:]] = llvm.mlir.constant(5 : i32) : i32 // CHECK: %[[#T45:]] = llvm.mlir.constant(6 : i32) : i32 - // CHECK: %[[#T46:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T46:]] = llvm.load %[[#T1]] {alignment = 4 : i64} : !llvm.ptr -> i32 // CHECK: %[[#T47:]] = llvm.mlir.constant(1 : i32) : i32 // CHECK: %[[#T48:]] = llvm.add %[[#T46]], %[[#T47]] : i32 // CHECK: %[[#T49:]] = llvm.mlir.undef : vector<4xi32> @@ -49,7 +49,7 @@ void vector_int_test(int x) { // Vector to vector conversion vd2 bb = (vd2)b; - // CHECK: %[[#bval:]] = llvm.load %[[#bmem:]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#bval:]] = llvm.load %[[#bmem:]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#bbval:]] = llvm.bitcast %[[#bval]] : vector<4xi32> to vector<2xf64> // CHECK: llvm.store %[[#bbval]], %[[#bbmem:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr @@ -62,84 +62,84 @@ void vector_int_test(int x) { // Extract element. int c = a[x]; - // CHECK: %[[#T58:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T59:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T58:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T59:]] = llvm.load %[[#T1]] {alignment = 4 : i64} : !llvm.ptr -> i32 // CHECK: %[[#T60:]] = llvm.extractelement %[[#T58]][%[[#T59]] : i32] : vector<4xi32> // CHECK: llvm.store %[[#T60]], %[[#T7:]] {alignment = 4 : i64} : i32, !llvm.ptr // Insert element. a[x] = x; - // CHECK: %[[#T61:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 - // CHECK: %[[#T62:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 - // CHECK: %[[#T63:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T61:]] = llvm.load %[[#T1]] {alignment = 4 : i64} : !llvm.ptr -> i32 + // CHECK: %[[#T62:]] = llvm.load %[[#T1]] {alignment = 4 : i64} : !llvm.ptr -> i32 + // CHECK: %[[#T63:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T64:]] = llvm.insertelement %[[#T61]], %[[#T63]][%[[#T62]] : i32] : vector<4xi32> // CHECK: llvm.store %[[#T64]], %[[#T3]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // Compound assignment a[x] += a[0]; - // CHECK: %[[#LOADCA1:]] = llvm.load %{{[0-9]+}} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#LOADCA1:]] = llvm.load %{{[0-9]+}} {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#RHSCA:]] = llvm.extractelement %[[#LOADCA1:]][%{{[0-9]+}} : i32] : vector<4xi32> - // CHECK: %[[#LOADCAIDX2:]] = llvm.load %{{[0-9]+}} : !llvm.ptr -> i32 - // CHECK: %[[#LOADCAVEC3:]] = llvm.load %{{[0-9]+}} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#LOADCAIDX2:]] = llvm.load %{{[0-9]+}} {alignment = 4 : i64} : !llvm.ptr -> i32 + // CHECK: %[[#LOADCAVEC3:]] = llvm.load %{{[0-9]+}} {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#LHSCA:]] = llvm.extractelement %[[#LOADCAVEC3:]][%[[#LOADCAIDX2:]] : i32] : vector<4xi32> // CHECK: %[[#SUMCA:]] = llvm.add %[[#LHSCA:]], %[[#RHSCA:]] : i32 - // CHECK: %[[#LOADCAVEC4:]] = llvm.load %{{[0-9]+}} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#LOADCAVEC4:]] = llvm.load %{{[0-9]+}} {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#RESULTCAVEC:]] = llvm.insertelement %[[#SUMCA:]], %[[#LOADCAVEC4:]][%[[#LOADCAIDX2:]] : i32] : vector<4xi32> // CHECK: llvm.store %[[#RESULTCAVEC:]], %{{[0-9]+}} {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // Binary arithmetic operators. vi4 d = a + b; - // CHECK: %[[#T65:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T66:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T65:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T66:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T67:]] = llvm.add %[[#T65]], %[[#T66]] : vector<4xi32> // CHECK: llvm.store %[[#T67]], %[[#T9:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 e = a - b; - // CHECK: %[[#T68:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T69:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T68:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T69:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T70:]] = llvm.sub %[[#T68]], %[[#T69]] : vector<4xi32> // CHECK: llvm.store %[[#T70]], %[[#T11:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 f = a * b; - // CHECK: %[[#T71:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T72:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T71:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T72:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T73:]] = llvm.mul %[[#T71]], %[[#T72]] : vector<4xi32> // CHECK: llvm.store %[[#T73]], %[[#T13:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 g = a / b; - // CHECK: %[[#T74:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T75:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T74:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T75:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T76:]] = llvm.sdiv %[[#T74]], %[[#T75]] : vector<4xi32> // CHECK: llvm.store %[[#T76]], %[[#T15:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 h = a % b; - // CHECK: %[[#T77:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T78:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T77:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T78:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T79:]] = llvm.srem %[[#T77]], %[[#T78]] : vector<4xi32> // CHECK: llvm.store %[[#T79]], %[[#T17:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 i = a & b; - // CHECK: %[[#T80:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T81:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T80:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T81:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T82:]] = llvm.and %[[#T80]], %[[#T81]] : vector<4xi32> // CHECK: llvm.store %[[#T82]], %[[#T19:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 j = a | b; - // CHECK: %[[#T83:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T84:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T83:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T84:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T85:]] = llvm.or %[[#T83]], %[[#T84]] : vector<4xi32> // CHECK: llvm.store %[[#T85]], %[[#T21:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 k = a ^ b; - // CHECK: %[[#T86:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T87:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T86:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T87:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T88:]] = llvm.xor %[[#T86]], %[[#T87]] : vector<4xi32> // CHECK: llvm.store %[[#T88]], %[[#T23:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // Unary arithmetic operators. vi4 l = +a; - // CHECK: %[[#T89:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T89:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: llvm.store %[[#T89]], %[[#T25:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 m = -a; - // CHECK: %[[#T90:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T90:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T91:]] = llvm.mlir.zero : vector<4xi32> // CHECK: %[[#T92:]] = llvm.sub %[[#T91]], %[[#T90]] : vector<4xi32> // CHECK: llvm.store %[[#T92]], %[[#T27:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 n = ~a; - // CHECK: %[[#T93:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T93:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T94:]] = llvm.mlir.constant(-1 : i32) : i32 // CHECK: %[[#T95:]] = llvm.mlir.undef : vector<4xi32> // CHECK: %[[#T96:]] = llvm.mlir.constant(0 : i64) : i64 @@ -161,38 +161,38 @@ void vector_int_test(int x) { // Comparisons vi4 o = a == b; - // CHECK: %[[#T105:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T106:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T105:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T106:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T107:]] = llvm.icmp "eq" %[[#T105]], %[[#T106]] : vector<4xi32> // CHECK: %[[#T108:]] = llvm.sext %[[#T107]] : vector<4xi1> to vector<4xi32> // CHECK: llvm.store %[[#T108]], %[[#To:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 p = a != b; - // CHECK: %[[#T109:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T110:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T109:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T110:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T111:]] = llvm.icmp "ne" %[[#T109]], %[[#T110]] : vector<4xi32> // CHECK: %[[#T112:]] = llvm.sext %[[#T111]] : vector<4xi1> to vector<4xi32> // CHECK: llvm.store %[[#T112]], %[[#Tp:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 q = a < b; - // CHECK: %[[#T113:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T114:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T113:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T114:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T115:]] = llvm.icmp "slt" %[[#T113]], %[[#T114]] : vector<4xi32> // CHECK: %[[#T116:]] = llvm.sext %[[#T115]] : vector<4xi1> to vector<4xi32> // CHECK: llvm.store %[[#T116]], %[[#Tq:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 r = a > b; - // CHECK: %[[#T117:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T118:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T117:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T118:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T119:]] = llvm.icmp "sgt" %[[#T117]], %[[#T118]] : vector<4xi32> // CHECK: %[[#T120:]] = llvm.sext %[[#T119]] : vector<4xi1> to vector<4xi32> // CHECK: llvm.store %[[#T120]], %[[#Tr:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 s = a <= b; - // CHECK: %[[#T121:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T122:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T121:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T122:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T123:]] = llvm.icmp "sle" %[[#T121]], %[[#T122]] : vector<4xi32> // CHECK: %[[#T124:]] = llvm.sext %[[#T123]] : vector<4xi1> to vector<4xi32> // CHECK: llvm.store %[[#T124]], %[[#Ts:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr vi4 t = a >= b; - // CHECK: %[[#T125:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#T126:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T125:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T126:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#T127:]] = llvm.icmp "sge" %[[#T125]], %[[#T126]] : vector<4xi32> // CHECK: %[[#T128:]] = llvm.sext %[[#T127]] : vector<4xi1> to vector<4xi32> // CHECK: llvm.store %[[#T128]], %[[#Tt:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr @@ -201,8 +201,8 @@ void vector_int_test(int x) { vi4 u = __builtin_shufflevector(a, b, 7, 5, 3, 1); // CHECK: %[[#Tu:]] = llvm.shufflevector %[[#bsva:]], %[[#bsvb:]] [7, 5, 3, 1] : vector<4xi32> vi4 v = __builtin_shufflevector(a, b); - // CHECK: %[[#sv_a:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> - // CHECK: %[[#sv_b:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#sv_a:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#sv_b:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#sv0:]] = llvm.mlir.constant(3 : i32) : i32 // CHECK: %[[#sv1:]] = llvm.mlir.undef : vector<4xi32> // CHECK: %[[#sv2:]] = llvm.mlir.constant(0 : i64) : i64 @@ -250,8 +250,8 @@ void vector_double_test(int x, double y) { // Non-const vector initialization. vd2 b = { y, y + 1.0 }; - // CHECK: %[[#T29:]] = llvm.load %[[#T3:]] : !llvm.ptr -> f64 - // CHECK: %[[#T30:]] = llvm.load %[[#T3]] : !llvm.ptr -> f64 + // CHECK: %[[#T29:]] = llvm.load %[[#T3:]] {alignment = 8 : i64} : !llvm.ptr -> f64 + // CHECK: %[[#T30:]] = llvm.load %[[#T3]] {alignment = 8 : i64} : !llvm.ptr -> f64 // CHECK: %[[#T31:]] = llvm.mlir.constant(1.000000e+00 : f64) : f64 // CHECK: %[[#T32:]] = llvm.fadd %[[#T30]], %[[#T31]] : f64 // CHECK: %[[#T33:]] = llvm.mlir.undef : vector<2xf64> @@ -263,84 +263,84 @@ void vector_double_test(int x, double y) { // Extract element. double c = a[x]; - // CHECK: %[[#T38:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: %[[#T39:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T38:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T39:]] = llvm.load %[[#T1]] {alignment = 4 : i64} : !llvm.ptr -> i32 // CHECK: %[[#T40:]] = llvm.extractelement %[[#T38]][%[[#T39]] : i32] : vector<2xf64> // CHECK: llvm.store %[[#T40]], %[[#T9:]] {alignment = 8 : i64} : f64, !llvm.ptr // Insert element. a[x] = y; - // CHECK: %[[#T41:]] = llvm.load %[[#T3]] : !llvm.ptr -> f64 - // CHECK: %[[#T42:]] = llvm.load %[[#T1:]] : !llvm.ptr -> i32 - // CHECK: %[[#T43:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T41:]] = llvm.load %[[#T3]] {alignment = 8 : i64} : !llvm.ptr -> f64 + // CHECK: %[[#T42:]] = llvm.load %[[#T1:]] {alignment = 4 : i64} : !llvm.ptr -> i32 + // CHECK: %[[#T43:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T44:]] = llvm.insertelement %[[#T41]], %[[#T43]][%[[#T42]] : i32] : vector<2xf64> // CHECK: llvm.store %[[#T44]], %[[#T5]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr // Binary arithmetic operators. vd2 d = a + b; - // CHECK: %[[#T45:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: %[[#T46:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T45:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T46:]] = llvm.load %[[#T7]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T47:]] = llvm.fadd %[[#T45]], %[[#T46]] : vector<2xf64> // CHECK: llvm.store %[[#T47]], %[[#T11:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr vd2 e = a - b; - // CHECK: %[[#T48:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: %[[#T49:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T48:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T49:]] = llvm.load %[[#T7]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T50:]] = llvm.fsub %[[#T48]], %[[#T49]] : vector<2xf64> // CHECK: llvm.store %[[#T50]], %[[#T13:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr vd2 f = a * b; - // CHECK: %[[#T51:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: %[[#T52:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T51:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T52:]] = llvm.load %[[#T7]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T53:]] = llvm.fmul %[[#T51]], %[[#T52]] : vector<2xf64> // CHECK: llvm.store %[[#T53]], %[[#T15:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr vd2 g = a / b; - // CHECK: %[[#T54:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: %[[#T55:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T54:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T55:]] = llvm.load %[[#T7]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T56:]] = llvm.fdiv %[[#T54]], %[[#T55]] : vector<2xf64> // CHECK: llvm.store %[[#T56]], %[[#T17:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr // Unary arithmetic operators. vd2 l = +a; - // CHECK: %[[#T57:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T57:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: llvm.store %[[#T57]], %[[#T19:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr vd2 m = -a; - // CHECK: %[[#T58:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T58:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T59:]] = llvm.fneg %[[#T58]] : vector<2xf64> // CHECK: llvm.store %[[#T59]], %[[#T21:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr // Comparisons vll2 o = a == b; - // CHECK: %[[#T60:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: %[[#T61:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T60:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T61:]] = llvm.load %[[#T7]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T62:]] = llvm.fcmp "oeq" %[[#T60]], %[[#T61]] : vector<2xf64> // CHECK: %[[#T63:]] = llvm.sext %[[#T62]] : vector<2xi1> to vector<2xi64> // CHECK: llvm.store %[[#T63]], %[[#To:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr vll2 p = a != b; - // CHECK: %[[#T64:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: %[[#T65:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T64:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T65:]] = llvm.load %[[#T7]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T66:]] = llvm.fcmp "une" %[[#T64]], %[[#T65]] : vector<2xf64> // CHECK: %[[#T67:]] = llvm.sext %[[#T66]] : vector<2xi1> to vector<2xi64> // CHECK: llvm.store %[[#T67]], %[[#Tp:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr vll2 q = a < b; - // CHECK: %[[#T68:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: %[[#T69:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T68:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T69:]] = llvm.load %[[#T7]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T70:]] = llvm.fcmp "olt" %[[#T68]], %[[#T69]] : vector<2xf64> // CHECK: %[[#T71:]] = llvm.sext %[[#T70]] : vector<2xi1> to vector<2xi64> // CHECK: llvm.store %[[#T71]], %[[#Tq:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr vll2 r = a > b; - // CHECK: %[[#T72:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: %[[#T73:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T72:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T73:]] = llvm.load %[[#T7]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T74:]] = llvm.fcmp "ogt" %[[#T72]], %[[#T73]] : vector<2xf64> // CHECK: %[[#T75:]] = llvm.sext %[[#T74]] : vector<2xi1> to vector<2xi64> // CHECK: llvm.store %[[#T75]], %[[#Tr:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr vll2 s = a <= b; - // CHECK: %[[#T76:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: %[[#T77:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T76:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T77:]] = llvm.load %[[#T7]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T78:]] = llvm.fcmp "ole" %[[#T76]], %[[#T77]] : vector<2xf64> // CHECK: %[[#T79:]] = llvm.sext %[[#T78]] : vector<2xi1> to vector<2xi64> // CHECK: llvm.store %[[#T79]], %[[#Ts:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr vll2 t = a >= b; - // CHECK: %[[#T80:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> - // CHECK: %[[#T81:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T80:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T81:]] = llvm.load %[[#T7]] {alignment = 16 : i64} : !llvm.ptr -> vector<2xf64> // CHECK: %[[#T82:]] = llvm.fcmp "oge" %[[#T80]], %[[#T81]] : vector<2xf64> // CHECK: %[[#T83:]] = llvm.sext %[[#T82]] : vector<2xi1> to vector<2xi64> // CHECK: llvm.store %[[#T83]], %[[#Tt:]] {alignment = 16 : i64} : vector<2xi64>, !llvm.ptr From 70f52758cadd4ab3f645e069038008fa1650d6d8 Mon Sep 17 00:00:00 2001 From: Ivan Murashko Date: Fri, 24 May 2024 20:06:08 +0100 Subject: [PATCH 1596/2301] [CIR][CodeGen] Support trailing_zeros for constant string literals (#617) The patch resolves [issue #248](https://github.com/llvm/clangir/issues/248). It can be considered a subsequent patch to [#373](https://github.com/llvm/clangir/pull/373), where the case of empty strings was processed. The new patch adds processing for non-empty strings that may contain trailing zeros, such as: ``` char big_string[100000] = "123"; ``` That is converted to ``` @big_string = #cir.const_array<"123" : !cir.array, trailing_zeros> : !cir.array ``` --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 19 +++++++++++++++---- clang/test/CIR/CodeGen/globals.c | 4 +++- clang/test/CIR/CodeGen/string-literals.c | 24 ++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/string-literals.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index ad6e4fbfec4c..e7686e4c3531 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -153,15 +153,26 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { unsigned size = 0) { unsigned finalSize = size ? size : str.size(); + size_t lastNonZeroPos = str.find_last_not_of('\0'); // If the string is full of null bytes, emit a #cir.zero rather than // a #cir.const_array. - if (str.count('\0') == str.size()) { + if (lastNonZeroPos == llvm::StringRef::npos) { auto arrayTy = mlir::cir::ArrayType::get(getContext(), eltTy, finalSize); return getZeroAttr(arrayTy); } - - auto arrayTy = mlir::cir::ArrayType::get(getContext(), eltTy, finalSize); - return getConstArray(mlir::StringAttr::get(str, arrayTy), arrayTy); + // We will use trailing zeros only if there are more than one zero + // at the end + int trailingZerosNum = + finalSize > lastNonZeroPos + 2 ? finalSize - lastNonZeroPos - 1 : 0; + auto truncatedArrayTy = mlir::cir::ArrayType::get( + getContext(), eltTy, finalSize - trailingZerosNum); + auto fullArrayTy = + mlir::cir::ArrayType::get(getContext(), eltTy, finalSize); + return mlir::cir::ConstArrayAttr::get( + getContext(), fullArrayTy, + mlir::StringAttr::get(str.drop_back(trailingZerosNum), + truncatedArrayTy), + trailingZerosNum); } mlir::cir::ConstArrayAttr getConstArray(mlir::Attribute attrs, diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 3c58d8bbfd29..6548bb161cdb 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -8,6 +8,8 @@ char string[] = "whatnow"; // CHECK: cir.global external @string = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array +char big_string[100000] = "123"; +// CHECK: cir.global external @big_string = #cir.const_array<"123" : !cir.array, trailing_zeros> : !cir.array int sint[] = {123, 456, 789}; // CHECK: cir.global external @sint = #cir.const_array<[#cir.int<123> : !s32i, #cir.int<456> : !s32i, #cir.int<789> : !s32i]> : !cir.array int filler_sint[4] = {1, 2}; // Ensure missing elements are zero-initialized. @@ -41,7 +43,7 @@ struct { char y[3]; char z[3]; } nestedString = {"1", "", "\0"}; -// CHECK: cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.zero : !cir.array, #cir.zero : !cir.array}> +// CHECK: cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1" : !cir.array, trailing_zeros> : !cir.array, #cir.zero : !cir.array, #cir.zero : !cir.array}> struct { char *name; diff --git a/clang/test/CIR/CodeGen/string-literals.c b/clang/test/CIR/CodeGen/string-literals.c new file mode 100644 index 000000000000..23728b4f4c4c --- /dev/null +++ b/clang/test/CIR/CodeGen/string-literals.c @@ -0,0 +1,24 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +struct { + char x[10]; + char y[10]; + char z[10]; +} literals = {"1", "", "\00"}; + +// CIR-LABEL: @literals +// CIR: #cir.const_struct<{ +// CIR: #cir.const_array<"1" : !cir.array, trailing_zeros> : !cir.array, +// CIR: #cir.zero : !cir.array, +// CIR: #cir.zero : !cir.array +// CIR: }> + +// LLVM-LABEL: @literals +// LLVM: global %struct.anon.1 { +// LLVM: [10 x i8] c"1\00\00\00\00\00\00\00\00\00", +// LLVM: [10 x i8] zeroinitializer, +// LLVM: [10 x i8] zeroinitializer +// LLVM: } From 629dcf7ee5f6bb1bd5d27273977b2c685816348a Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Tue, 28 May 2024 19:23:26 -0300 Subject: [PATCH 1597/2301] [CIR][IR][NFC] Fix CallOp builder with void return (#629) One of the builders was adding a retun value to the CallOp when given a void return type. The expected behavior is to not add a return value. Two other minor fixes were added to the return value: its constraint was replaced from variadic to optional and it was assigned a name. This prevents function calls with multiple returns and facilitates access to the single return value, respectively. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 8 +++++--- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 8 ++++---- clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp | 8 ++++---- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 2 +- .../Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp | 2 +- 5 files changed, 15 insertions(+), 13 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b5f809cc9692..73ce72dc7468 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2816,7 +2816,7 @@ def CallOp : CIR_CallOp<"call"> { }]; let arguments = commonArgs; - let results = (outs Variadic); + let results = (outs Optional:$result); let builders = [ OpBuilder<(ins "FuncOp":$callee, CArg<"ValueRange", "{}">:$operands), [{ @@ -2837,13 +2837,15 @@ def CallOp : CIR_CallOp<"call"> { CArg<"ValueRange", "{}">:$operands), [{ $_state.addOperands(operands); $_state.addAttribute("callee", callee); - $_state.addTypes(resType); + if (resType && !resType.isa()) + $_state.addTypes(resType); }]>, OpBuilder<(ins "SymbolRefAttr":$callee, CArg<"ValueRange", "{}">:$operands), [{ $_state.addOperands(operands); $_state.addAttribute("callee", callee); - }]>]; + }]> + ]; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 2a23fd1a73ed..16f4b8b4f646 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -203,7 +203,7 @@ CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { fnOp = cast(builtin); return builder.create( - loc, fnOp, mlir::ValueRange{CurCoro.Data->CoroId.getResult(0)}); + loc, fnOp, mlir::ValueRange{CurCoro.Data->CoroId.getResult()}); } mlir::cir::CallOp @@ -225,7 +225,7 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, return builder.create( loc, fnOp, - mlir::ValueRange{CurCoro.Data->CoroId.getResult(0), coroframeAddr}); + mlir::ValueRange{CurCoro.Data->CoroId.getResult(), coroframeAddr}); } mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, @@ -273,7 +273,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { auto storeAddr = coroFrame.getPointer(); builder.CIRBaseBuilderTy::createStore(openCurlyLoc, nullPtrCst, storeAddr); - builder.create(openCurlyLoc, coroAlloc.getResult(0), + builder.create(openCurlyLoc, coroAlloc.getResult(), /*withElseRegion=*/false, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -287,7 +287,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { buildCoroBeginBuiltinCall( openCurlyLoc, builder.create(openCurlyLoc, allocaTy, storeAddr)) - .getResult(0); + .getResult(); // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided. if (auto *RetOnAllocFailure = S.getReturnStmtOnAllocFailure()) diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index 7b1218ad7c27..bd5097d3f323 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -104,7 +104,7 @@ bool IdiomRecognizerPass::raiseStdFind(CallOp call) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(call.getOperation()); auto findOp = builder.create( - call.getLoc(), call.getResult(0).getType(), call.getCalleeAttr(), + call.getLoc(), call.getResult().getType(), call.getCalleeAttr(), call.getOperand(0), call.getOperand(1), call.getOperand(2)); call.replaceAllUsesWith(findOp); @@ -140,7 +140,7 @@ bool IdiomRecognizerPass::raiseIteratorBeginEnd(CallOp call) { if (!callExprAttr) return false; - if (!isIteratorLikeType(call.getResult(0).getType())) + if (!isIteratorLikeType(call.getResult().getType())) return false; // First argument is the container "this" pointer. @@ -154,13 +154,13 @@ bool IdiomRecognizerPass::raiseIteratorBeginEnd(CallOp call) { if (opts.emitRemarkFoundCalls()) emitRemark(call.getLoc()) << "found call to begin() iterator"; iterOp = builder.create( - call.getLoc(), call.getResult(0).getType(), call.getCalleeAttr(), + call.getLoc(), call.getResult().getType(), call.getCalleeAttr(), call.getOperand(0)); } else if (callExprAttr.isIteratorEndCall()) { if (opts.emitRemarkFoundCalls()) emitRemark(call.getLoc()) << "found call to end() iterator"; iterOp = builder.create( - call.getLoc(), call.getResult(0).getType(), call.getCalleeAttr(), + call.getLoc(), call.getResult().getType(), call.getCalleeAttr(), call.getOperand(0)); } else { return false; diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index e77a6bdf14b8..bf65b038d35c 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -1265,7 +1265,7 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, if (auto callOp = dyn_cast(dataSrcOp)) { // iter = vector::begin() getPmap()[addr].clear(); - getPmap()[addr].insert(State::getLocalValue(callOp.getResult(0))); + getPmap()[addr].insert(State::getLocalValue(callOp.getResult())); } if (auto loadOp = dyn_cast(dataSrcOp)) { diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp index dc997e604a96..f08574ff49ea 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp @@ -71,7 +71,7 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, builder .create(loc, dynCastFuncRef, builder.getVoidPtrTy(), dynCastFuncArgs) - .getResult(0); + .getResult(); assert(castedPtr.getType().isa() && "the return value of __dynamic_cast should be a ptr"); From 9ee9441ed511b242368dfda3644f86e650bf6273 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Tue, 28 May 2024 18:25:52 -0400 Subject: [PATCH 1598/2301] [CIR][LowerToLLVM][CXXABI] Lower cir.va.arg (#573) lowering var_arg op for ARM64 architecture. This is CIR lowering. This PR modified LoweringPrepare CXXABI code to make LoweringPrepareArm64CXXABI class inherit more generic LoweringPrepareItaniumCXXABI, this way lowering var_arg would be only meaningful for arm64 targets and for other arch its no op for now. The ABI doc and detailed algorithm description can be found in this official doc. [](https://github.com/ARM-software/abi-aa/blob/617079d8a0d45bec83d351974849483cf0cc66d5/aapcs64/aapcs64.rst#appendix-variable-argument-lists) --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 13 + clang/lib/CIR/Dialect/IR/MissingFeatures.h | 9 + .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + .../Dialect/Transforms/LoweringPrepare.cpp | 40 +- .../LoweringPrepareAArch64CXXABI.cpp | 350 ++++++++++++++++++ .../Transforms/LoweringPrepareCXXABI.h | 13 + .../LoweringPrepareItaniumCXXABI.cpp | 37 +- .../Transforms/LoweringPrepareItaniumCXXABI.h | 24 ++ clang/test/CIR/CodeGen/var-arg-float.c | 121 ++++++ clang/test/CIR/CodeGen/var-arg.c | 122 ++++++ clang/test/CIR/CodeGen/variadics.c | 11 +- 11 files changed, 709 insertions(+), 32 deletions(-) create mode 100644 clang/lib/CIR/Dialect/Transforms/LoweringPrepareAArch64CXXABI.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h create mode 100644 clang/test/CIR/CodeGen/var-arg-float.c create mode 100644 clang/test/CIR/CodeGen/var-arg.c diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 29fda2decf56..e7c63dd1f5b3 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -269,6 +269,19 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createCast(mlir::cir::CastKind::int_to_ptr, src, newTy); } + mlir::Value createGetMemberOp(mlir::Location &loc, mlir::Value structPtr, + const char *fldName, unsigned idx) { + + assert(structPtr.getType().isa()); + auto structBaseTy = + structPtr.getType().cast().getPointee(); + assert(structBaseTy.isa()); + auto fldTy = structBaseTy.cast().getMembers()[idx]; + auto fldPtrTy = ::mlir::cir::PointerType::get(getContext(), fldTy); + return create(loc, fldPtrTy, structPtr, fldName, + idx); + } + mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy) { return createCast(mlir::cir::CastKind::ptr_to_int, src, newTy); } diff --git a/clang/lib/CIR/Dialect/IR/MissingFeatures.h b/clang/lib/CIR/Dialect/IR/MissingFeatures.h index e21fc0e0b191..2e4e9c8ad9e6 100644 --- a/clang/lib/CIR/Dialect/IR/MissingFeatures.h +++ b/clang/lib/CIR/Dialect/IR/MissingFeatures.h @@ -21,6 +21,15 @@ struct MissingFeatures { // C++ ABI support static bool cxxABI() { return false; } static bool setCallingConv() { return false; } + static bool handleBigEndian() { return false; } + static bool handleAArch64Indirect() { return false; } + static bool classifyArgumentTypeForAArch64() { return false; } + static bool supportgetCoerceToTypeForAArch64() { return false; } + static bool supportTySizeQueryForAArch64() { return false; } + static bool supportTyAlignQueryForAArch64() { return false; } + static bool supportisHomogeneousAggregateQueryForAArch64() { return false; } + static bool supportisEndianQueryForAArch64() { return false; } + static bool supportisAggregateTypeForABIAArch64() { return false; } // Address space related static bool addressSpace() { return false; } diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 9a0806bde8d4..babe408bcde6 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -2,6 +2,7 @@ add_clang_library(MLIRCIRTransforms LifetimeCheck.cpp LoweringPrepare.cpp LoweringPrepareItaniumCXXABI.cpp + LoweringPrepareAArch64CXXABI.cpp MergeCleanups.cpp DropAST.cpp IdiomRecognizer.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 4e9ded77b441..bb8c452b46ba 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -15,7 +15,9 @@ #include "clang/AST/CharUnits.h" #include "clang/AST/Mangle.h" #include "clang/Basic/Module.h" +#include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" @@ -70,6 +72,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { void runOnOp(Operation *op); void lowerThreeWayCmpOp(CmpThreeWayOp op); + void lowerVAArgOp(VAArgOp op); void lowerGlobalOp(GlobalOp op); void lowerDynamicCastOp(DynamicCastOp op); void lowerStdFindOp(StdFindOp op); @@ -108,15 +111,24 @@ struct LoweringPreparePass : public LoweringPrepareBase { void setASTContext(clang::ASTContext *c) { astCtx = c; + auto abiStr = c->getTargetInfo().getABI(); switch (c->getCXXABIKind()) { case clang::TargetCXXABI::GenericItanium: + cxxABI.reset(::cir::LoweringPrepareCXXABI::createItaniumABI()); + break; case clang::TargetCXXABI::GenericAArch64: case clang::TargetCXXABI::AppleARM64: - // TODO: this isn't quite right, clang uses AppleARM64CXXABI which - // inherits from ARMCXXABI. We'll have to follow suit. - cxxABI.reset(::cir::LoweringPrepareCXXABI::createItaniumABI()); + // TODO: This is temporary solution. ABIKind info should be + // propagated from the targetInfo managed by ABI lowering + // query system. + assert(abiStr == "aapcs" || abiStr == "darwinpcs" || + abiStr == "aapcs-soft"); + cxxABI.reset(::cir::LoweringPrepareCXXABI::createAArch64ABI( + abiStr == "aapcs" + ? ::cir::AArch64ABIKind::AAPCS + : (abiStr == "darwinpccs" ? ::cir::AArch64ABIKind::DarwinPCS + : ::cir::AArch64ABIKind::AAPCSSoft))); break; - default: llvm_unreachable("NYI"); } @@ -320,6 +332,19 @@ static void canonicalizeIntrinsicThreeWayCmp(CIRBaseBuilderTy &builder, op.erase(); } +void LoweringPreparePass::lowerVAArgOp(VAArgOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPoint(op); + ::cir::CIRDataLayout datalayout(theModule); + + auto res = cxxABI->lowerVAArg(builder, op, datalayout); + if (res) { + op.replaceAllUsesWith(res); + op.erase(); + } + return; +} + void LoweringPreparePass::lowerThreeWayCmpOp(CmpThreeWayOp op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op); @@ -603,6 +628,8 @@ void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { void LoweringPreparePass::runOnOp(Operation *op) { if (auto threeWayCmp = dyn_cast(op)) { lowerThreeWayCmpOp(threeWayCmp); + } else if (auto vaArgOp = dyn_cast(op)) { + lowerVAArgOp(vaArgOp); } else if (auto getGlobal = dyn_cast(op)) { lowerGlobalOp(getGlobal); } else if (auto dynamicCast = dyn_cast(op)) { @@ -635,8 +662,9 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { - if (isa(op)) + if (isa( + op)) opsToTransform.push_back(op); }); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareAArch64CXXABI.cpp new file mode 100644 index 000000000000..460266161ead --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareAArch64CXXABI.cpp @@ -0,0 +1,350 @@ +//====- LoweringPrepareArm64CXXABI.cpp - Arm64 ABI specific code -----====// +// +// Part of the LLVM Project, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===------------------------------------------------------------------===// +// +// This file provides ARM64 C++ ABI specific code that is used during LLVMIR +// lowering prepare. +// +//===------------------------------------------------------------------===// + +#include "../IR/MissingFeatures.h" +#include "LoweringPrepareItaniumCXXABI.h" +#include "clang/AST/CharUnits.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include + +using cir::AArch64ABIKind; + +namespace { +class LoweringPrepareAArch64CXXABI : public LoweringPrepareItaniumCXXABI { +public: + LoweringPrepareAArch64CXXABI(AArch64ABIKind k) : Kind(k) {} + mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) override; + +private: + AArch64ABIKind Kind; + mlir::Value lowerAAPCSVAArg(cir::CIRBaseBuilderTy &builder, + mlir::cir::VAArgOp op, + const cir::CIRDataLayout &datalayout); + bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; } + mlir::Value lowerMSVAArg(cir::CIRBaseBuilderTy &builder, + mlir::cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) { + llvm_unreachable("MSVC ABI not supported yet"); + } + mlir::Value lowerDarwinVAArg(cir::CIRBaseBuilderTy &builder, + mlir::cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) { + llvm_unreachable("Darwin ABI not supported yet"); + } +}; +} // namespace + +cir::LoweringPrepareCXXABI * +cir::LoweringPrepareCXXABI::createAArch64ABI(AArch64ABIKind k) { + return new LoweringPrepareAArch64CXXABI(k); +} + +mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( + cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) { + auto loc = op->getLoc(); + auto valist = op->getOperand(0); + auto opResTy = op.getType(); + // front end should not produce non-scalar type of VAArgOp + bool isSupportedType = + opResTy.isa(); + + // Homogenous Aggregate type not supported and indirect arg + // passing not supported yet. And for these supported types, + // we should not have alignment greater than 8 problem. + assert(isSupportedType); + assert(!cir::MissingFeatures::classifyArgumentTypeForAArch64()); + // indirect arg passing would expect one more level of pointer dereference. + assert(!cir::MissingFeatures::handleAArch64Indirect()); + // false as a place holder for now, as we don't have a way to query + bool isIndirect = false; + assert(!cir::MissingFeatures::supportgetCoerceToTypeForAArch64()); + // we don't convert to LLVM Type here as we are lowering to CIR here. + // so baseTy is the just type of the result of va_arg. + // but it depends on arg type indirectness and coercion defined by ABI. + auto baseTy = opResTy; + + if (baseTy.isa()) { + llvm_unreachable("ArrayType VAArg loweing NYI"); + } + // numRegs may not be 1 if ArrayType is supported. + unsigned numRegs = 1; + + if (Kind == AArch64ABIKind::AAPCSSoft) { + llvm_unreachable("AAPCSSoft cir.var_arg lowering NYI"); + } + bool IsFPR = mlir::cir::isAnyFloatingPointType(baseTy); + + // The AArch64 va_list type and handling is specified in the Procedure Call + // Standard, section B.4: + // + // struct { + // void *__stack; + // void *__gr_top; + // void *__vr_top; + // int __gr_offs; + // int __vr_offs; + // }; + auto curInsertionP = builder.saveInsertionPoint(); + auto currentBlock = builder.getInsertionBlock(); + auto boolTy = builder.getBoolTy(); + + auto maybeRegBlock = builder.createBlock(builder.getBlock()->getParent()); + auto inRegBlock = builder.createBlock(builder.getBlock()->getParent()); + auto onStackBlock = builder.createBlock(builder.getBlock()->getParent()); + + //======================================= + // Find out where argument was passed + //======================================= + + // If v/gr_offs >= 0 we're already using the stack for this type of + // argument. We don't want to keep updating regOffs (in case it overflows, + // though anyone passing 2GB of arguments, each at most 16 bytes, deserves + // whatever they get). + + assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + // One is just place holder for now, as we don't have a way to query + // type size and alignment. + clang::CharUnits tySize = + clang::CharUnits::fromQuantity(datalayout.getTypeStoreSize(opResTy)); + clang::CharUnits tyAlign = + clang::CharUnits::fromQuantity(datalayout.getAlignment(opResTy, true)); + + // indirectness, type size and type alignment all + // decide regSize, but they are all ABI defined + // thus need ABI lowering query system. + assert(!cir::MissingFeatures::handleAArch64Indirect()); + int regSize = isIndirect ? 8 : tySize.getQuantity(); + int regTopIndex; + mlir::Value regOffsP; + mlir::cir::LoadOp regOffs; + + builder.restoreInsertionPoint(curInsertionP); + // 3 is the field number of __gr_offs, 4 is the field number of __vr_offs + if (!IsFPR) { + regOffsP = builder.createGetMemberOp(loc, valist, "gr_offs", 3); + regOffs = builder.create(loc, regOffsP); + regTopIndex = 1; + regSize = llvm::alignTo(regSize, 8); + } else { + regOffsP = builder.createGetMemberOp(loc, valist, "vr_offs", 4); + regOffs = builder.create(loc, regOffsP); + regTopIndex = 2; + regSize = 16 * numRegs; + } + + //======================================= + // Find out where argument was passed + //======================================= + + // If regOffs >= 0 we're already using the stack for this type of + // argument. We don't want to keep updating regOffs (in case it overflows, + // though anyone passing 2GB of arguments, each at most 16 bytes, deserves + // whatever they get). + auto zeroValue = builder.create( + loc, regOffs.getType(), mlir::cir::IntAttr::get(regOffs.getType(), 0)); + auto usingStack = builder.create( + loc, boolTy, mlir::cir::CmpOpKind::ge, regOffs, zeroValue); + builder.create(loc, usingStack, onStackBlock, + maybeRegBlock); + + auto contBlock = currentBlock->splitBlock(op); + + // Otherwise, at least some kind of argument could go in these registers, the + // question is whether this particular type is too big. + builder.setInsertionPointToEnd(maybeRegBlock); + + // Integer arguments may need to correct register alignment (for example a + // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we + // align __gr_offs to calculate the potential address. + if (!IsFPR && !isIndirect && tyAlign.getQuantity() > 8) { + assert(!cir::MissingFeatures::handleAArch64Indirect()); + assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + llvm_unreachable("register alignment correction NYI"); + } + + // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. + // The fact that this is done unconditionally reflects the fact that + // allocating an argument to the stack also uses up all the remaining + // registers of the appropriate kind. + auto regSizeValue = builder.create( + loc, regOffs.getType(), + mlir::cir::IntAttr::get(regOffs.getType(), regSize)); + auto newOffset = builder.create( + loc, regOffs.getType(), mlir::cir::BinOpKind::Add, regOffs, regSizeValue); + builder.createStore(loc, newOffset, regOffsP); + // Now we're in a position to decide whether this argument really was in + // registers or not. + auto inRegs = builder.create( + loc, boolTy, mlir::cir::CmpOpKind::le, newOffset, zeroValue); + builder.create(loc, inRegs, inRegBlock, onStackBlock); + + //======================================= + // Argument was in registers + //======================================= + // Now we emit the code for if the argument was originally passed in + // registers. First start the appropriate block: + builder.setInsertionPointToEnd(inRegBlock); + auto regTopP = builder.createGetMemberOp( + loc, valist, IsFPR ? "vr_top" : "gr_top", regTopIndex); + auto regTop = builder.create(loc, regTopP); + auto i8Ty = mlir::IntegerType::get(builder.getContext(), 8); + auto i8PtrTy = mlir::cir::PointerType::get(builder.getContext(), i8Ty); + auto castRegTop = builder.createBitcast(regTop, i8PtrTy); + auto resAsInt8P = builder.create( + loc, castRegTop.getType(), castRegTop, regOffs); + + if (isIndirect) { + assert(!cir::MissingFeatures::handleAArch64Indirect()); + llvm_unreachable("indirect arg passing NYI"); + } + + // TODO: isHFA, numMembers and base should be query result from query + uint64_t numMembers = 0; + assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); + bool isHFA = false; + // though endianess can be known from datalayout, it might need an unified + // ABI lowering query system to answer the question. + assert(!cir::MissingFeatures::supportisEndianQueryForAArch64()); + bool isBigEndian = datalayout.isBigEndian(); + assert(!cir::MissingFeatures::supportisAggregateTypeForABIAArch64()); + // TODO: isAggregateTypeForABI should be query result from ABI info + bool isAggregateTypeForABI = false; + if (isHFA && numMembers > 1) { + // Homogeneous aggregates passed in registers will have their elements split + // and stored 16-bytes apart regardless of size (they're notionally in qN, + // qN+1, ...). We reload and store into a temporary local variable + // contiguously. + assert(!isIndirect && "Homogeneous aggregates should be passed directly"); + llvm_unreachable("Homogeneous aggregates NYI"); + } else { + assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + // TODO: slotSize should be query result about alignment. + clang::CharUnits slotSize = clang::CharUnits::fromQuantity(8); + if (isBigEndian && !isIndirect && (isHFA || isAggregateTypeForABI) && + tySize < slotSize) { + clang::CharUnits offset = slotSize - tySize; + auto offsetConst = builder.create( + loc, regOffs.getType(), + mlir::cir::IntAttr::get(regOffs.getType(), offset.getQuantity())); + + resAsInt8P = builder.create( + loc, castRegTop.getType(), resAsInt8P, offsetConst); + } + } + + auto resAsVoidP = builder.createBitcast(resAsInt8P, regTop.getType()); + + // On big-endian platforms, the value will be right-aligned in its stack slot. + // and we also need to think about other ABI lowering concerns listed below. + assert(!cir::MissingFeatures::handleBigEndian()); + assert(!cir::MissingFeatures::handleAArch64Indirect()); + assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); + assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + + builder.create(loc, mlir::ValueRange{resAsVoidP}, contBlock); + + //======================================= + // Argument was on the stack + //======================================= + builder.setInsertionPointToEnd(onStackBlock); + auto stackP = builder.createGetMemberOp(loc, valist, "stack", 0); + + auto onStackPtr = builder.create(loc, stackP); + auto ptrDiffTy = + mlir::cir::IntType::get(builder.getContext(), 64, /*signed=*/false); + + assert(!cir::MissingFeatures::handleAArch64Indirect()); + assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + // Again, stack arguments may need realignment. In this case both integer and + // floating-point ones might be affected. + if (!isIndirect && tyAlign.getQuantity() > 8) { + // TODO: this algorithm requres casting from ptr type to int type, then + // back to ptr type thus needs careful handling. NYI now. + llvm_unreachable("alignment greater than 8 NYI"); + } + + // All stack slots are multiples of 8 bytes. + clang::CharUnits stackSlotSize = clang::CharUnits::fromQuantity(8); + clang::CharUnits stackSize; + if (isIndirect) + stackSize = stackSlotSize; + else + stackSize = tySize.alignTo(stackSlotSize); + + // On big-endian platforms, the value will be right-aligned in its stack slot + // Also, the consideration involves type size and alignment, arg indirectness + // which are all ABI defined thus need ABI lowering query system. + // The implementation we have now supports most common cases which assumes + // no indirectness, no alignment greater than 8, and little endian. + assert(!cir::MissingFeatures::handleBigEndian()); + assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + + auto stackSizeC = builder.create( + loc, ptrDiffTy, + mlir::cir::IntAttr::get(ptrDiffTy, stackSize.getQuantity())); + auto castStack = builder.createBitcast(onStackPtr, i8PtrTy); + // Write the new value of __stack for the next call to va_arg + auto newStackAsi8Ptr = builder.create( + loc, castStack.getType(), castStack, stackSizeC); + auto newStack = builder.createBitcast(newStackAsi8Ptr, onStackPtr.getType()); + builder.createStore(loc, newStack, stackP); + + if (isBigEndian && !isAggregateTypeForABI && tySize < stackSlotSize) { + clang::CharUnits offset = stackSlotSize - tySize; + auto offsetConst = builder.create( + loc, ptrDiffTy, + mlir::cir::IntAttr::get(ptrDiffTy, offset.getQuantity())); + auto offsetStackAsi8Ptr = builder.create( + loc, castStack.getType(), castStack, offsetConst); + auto onStackPtrBE = + builder.createBitcast(offsetStackAsi8Ptr, onStackPtr.getType()); + builder.create(loc, mlir::ValueRange{onStackPtrBE}, + contBlock); + } else { + builder.create(loc, mlir::ValueRange{onStackPtr}, + contBlock); + } + + // generate additional instructions for end block + builder.setInsertionPoint(op); + contBlock->addArgument(onStackPtr.getType(), loc); + auto resP = contBlock->getArgument(0); + assert(resP.getType().isa()); + auto opResPTy = mlir::cir::PointerType::get(builder.getContext(), opResTy); + auto castResP = builder.createBitcast(resP, opResPTy); + auto res = builder.create(loc, castResP); + // there would be another level of ptr dereference if indirect arg passing + assert(!cir::MissingFeatures::handleAArch64Indirect()); + if (isIndirect) { + res = builder.create(loc, res.getResult()); + } + return res.getResult(); +} + +mlir::Value +LoweringPrepareAArch64CXXABI::lowerVAArg(cir::CIRBaseBuilderTy &builder, + mlir::cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) { + return Kind == AArch64ABIKind::Win64 ? lowerMSVAArg(builder, op, datalayout) + : isDarwinPCS() ? lowerDarwinVAArg(builder, op, datalayout) + : lowerAAPCSVAArg(builder, op, datalayout); +} diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h index 549a93e07c37..717516d09664 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h @@ -18,14 +18,27 @@ #include "mlir/IR/Value.h" #include "clang/AST/ASTContext.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" namespace cir { +// TODO: This is a temporary solution to know AArch64 ABI Kind +// This should be removed once we have a proper ABI info query +enum class AArch64ABIKind { + AAPCS = 0, + DarwinPCS, + Win64, + AAPCSSoft, +}; class LoweringPrepareCXXABI { public: static LoweringPrepareCXXABI *createItaniumABI(); + static LoweringPrepareCXXABI *createAArch64ABI(AArch64ABIKind k); + virtual mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, + mlir::cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) = 0; virtual ~LoweringPrepareCXXABI() {} virtual mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp index f08574ff49ea..c7984f86c451 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp @@ -1,40 +1,31 @@ -//====- LoweringPrepareItaniumCXXABI.h - Itanium ABI specific code --------===// +//====- LoweringPrepareItaniumCXXABI.cpp - Itanium ABI specific code-----===// // -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// Part of the LLVM Project, under the Apache License v2.0 with +// LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // -//===----------------------------------------------------------------------===// +//===--------------------------------------------------------------------===// // -// This file provides Itanium C++ ABI specific code that is used during LLVMIR -// lowering prepare. +// This file provides Itanium C++ ABI specific code +// that is used during LLVMIR lowering prepare. // -//===----------------------------------------------------------------------===// +//===--------------------------------------------------------------------===// +#include "LoweringPrepareItaniumCXXABI.h" #include "../IR/MissingFeatures.h" -#include "LoweringPrepareCXXABI.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" using namespace cir; -namespace { - -class LoweringPrepareItaniumCXXABI : public LoweringPrepareCXXABI { -public: - mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, - clang::ASTContext &astCtx, - mlir::cir::DynamicCastOp op) override; -}; - -} // namespace - -LoweringPrepareCXXABI *LoweringPrepareCXXABI::createItaniumABI() { +cir::LoweringPrepareCXXABI *cir::LoweringPrepareCXXABI::createItaniumABI() { return new LoweringPrepareItaniumCXXABI(); } @@ -169,3 +160,11 @@ LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, }) .getResult(); } + +mlir::Value LoweringPrepareItaniumCXXABI::lowerVAArg( + CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, + const ::cir::CIRDataLayout &datalayout) { + // There is no generic cir lowering for var_arg, here we fail + // so to prevent attempt of calling lowerVAArg for ItaniumCXXABI + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h new file mode 100644 index 000000000000..1dbef0d24ddd --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h @@ -0,0 +1,24 @@ +//====- LoweringPrepareItaniumCXXABI.h - Itanium ABI specific code --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides Itanium C++ ABI specific code that is used during LLVMIR +// lowering prepare. +// +//===----------------------------------------------------------------------===// + +#include "LoweringPrepareCXXABI.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" + +class LoweringPrepareItaniumCXXABI : public cir::LoweringPrepareCXXABI { +public: + mlir::Value lowerDynamicCast(cir::CIRBaseBuilderTy &builder, + clang::ASTContext &astCtx, + mlir::cir::DynamicCastOp op) override; + mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) override; +}; diff --git a/clang/test/CIR/CodeGen/var-arg-float.c b/clang/test/CIR/CodeGen/var-arg-float.c new file mode 100644 index 000000000000..2b1daedec54a --- /dev/null +++ b/clang/test/CIR/CodeGen/var-arg-float.c @@ -0,0 +1,121 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// XFAIL: * + +#include + +double f1(int n, ...) { + va_list valist; + va_start(valist, n); + double res = va_arg(valist, double); + va_end(valist); + return res; +} + +// BEFORE: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !cir.int, !cir.int} +// BEFORE: cir.func @f1(%arg0: !s32i, ...) -> !cir.double +// BEFORE: [[RETP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["__retval"] +// BEFORE: [[RESP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["res", init] +// BEFORE: cir.va.start [[VARLIST:%.*]] : !cir.ptr +// BEFORE: [[TMP0:%.*]] = cir.va.arg [[VARLIST]] : (!cir.ptr) -> !cir.double +// BEFORE: cir.store [[TMP0]], [[RESP]] : !cir.double, !cir.ptr +// BEFORE: cir.va.end [[VARLIST]] : !cir.ptr +// BEFORE: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !cir.double +// BEFORE: cir.store [[RES]], [[RETP]] : !cir.double, !cir.ptr +// BEFORE: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !cir.double +// BEFORE: cir.return [[RETV]] : !cir.double + +// beginning block cir code +// AFTER: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !cir.int, !cir.int} +// AFTER: cir.func @f1(%arg0: !s32i, ...) -> !cir.double +// AFTER: [[RETP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["__retval"] +// AFTER: [[RESP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["res", init] +// AFTER: cir.va.start [[VARLIST:%.*]] : !cir.ptr +// AFTER: [[VR_OFFS_P:%.*]] = cir.get_member [[VARLIST]][4] {name = "vr_offs"} : !cir.ptr -> !cir.ptr +// AFTER: [[VR_OFFS:%.*]] = cir.load [[VR_OFFS_P]] : !cir.ptr, !s32i +// AFTER: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i +// AFTER: [[CMP0:%.*]] = cir.cmp(ge, [[VR_OFFS]], [[ZERO]]) : !s32i, !cir.bool +// AFTER-NEXT: cir.brcond [[CMP0]] [[BB_ON_STACK:\^bb.*]], [[BB_MAY_REG:\^bb.*]] + + +// AFTER-NEXT: [[BB_END:\^bb.*]]([[BLK_ARG:%.*]]: !cir.ptr): // 2 preds: [[BB_IN_REG:\^bb.*]], [[BB_ON_STACK]] +// AFTER-NEXT: [[TMP0:%.*]] = cir.cast(bitcast, [[BLK_ARG]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr, !cir.double +// AFTER: cir.store [[TMP1]], [[RESP]] : !cir.double, !cir.ptr +// AFTER: cir.va.end [[VARLIST]] : !cir.ptr +// AFTER: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !cir.double +// AFTER: cir.store [[RES]], [[RETP]] : !cir.double, !cir.ptr +// AFTER: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !cir.double +// AFTER: cir.return [[RETV]] : !cir.double + + +// AFTER: [[BB_MAY_REG]]: +// AFTER-NEXT: [[SIXTEEN:%.*]] = cir.const #cir.int<16> : !s32i +// AFTER-NEXT: [[NEW_REG_OFFS:%.*]] = cir.binop(add, [[VR_OFFS]], [[SIXTEEN]]) : !s32i +// AFTER-NEXT: cir.store [[NEW_REG_OFFS]], [[VR_OFFS_P]] : !s32i, !cir.ptr +// AFTER-NEXT: [[CMP1:%.*]] = cir.cmp(le, [[NEW_REG_OFFS]], [[ZERO]]) : !s32i, !cir.bool +// AFTER-NEXT: cir.brcond [[CMP1]] [[BB_IN_REG]], [[BB_ON_STACK]] + + +// AFTER: [[BB_IN_REG]]: +// AFTER-NEXT: [[VR_TOP_P:%.*]] = cir.get_member [[VARLIST]][2] {name = "vr_top"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[VR_TOP:%.*]] = cir.load [[VR_TOP_P]] : !cir.ptr>, !cir.ptr +// AFTER-NEXT: [[TMP2:%.*]] = cir.cast(bitcast, [[VR_TOP]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: [[TMP3:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[VR_OFFS]] : !s32i), !cir.ptr +// AFTER-NEXT: [[IN_REG_OUTPUT:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.br [[BB_END]]([[IN_REG_OUTPUT]] : !cir.ptr) + + +// AFTER: [[BB_ON_STACK]]: +// AFTER-NEXT: [[STACK_P:%.*]] = cir.get_member [[VARLIST]][0] {name = "stack"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[STACK_V:%.*]] = cir.load [[STACK_P]] : !cir.ptr>, !cir.ptr +// AFTER-NEXT: [[EIGHT_IN_PTR_ARITH:%.*]] = cir.const #cir.int<8> : !u64i +// AFTER-NEXT: [[TMP4:%.*]] = cir.cast(bitcast, [[STACK_V]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: [[TMP5:%.*]] = cir.ptr_stride([[TMP4]] : !cir.ptr, [[EIGHT_IN_PTR_ARITH]] : !u64i), !cir.ptr +// AFTER-NEXT: [[NEW_STACK_V:%.*]] = cir.cast(bitcast, [[TMP5]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.store [[NEW_STACK_V]], [[STACK_P]] : !cir.ptr, !cir.ptr> +// AFTER-NEXT: cir.br [[BB_END]]([[STACK_V]] : !cir.ptr) + +// beginning block llvm code +// LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } +// LLVM: define double @f1(i32 %0, ...) +// LLVM: [[ARGN:%.*]] = alloca i32, i64 1, align 4, +// LLVM: [[RETP:%.*]] = alloca double, i64 1, align 8, +// LLVM: [[RESP:%.*]] = alloca double, i64 1, align 8, +// LLVM: call void @llvm.va_start.p0(ptr [[VARLIST:%.*]]), +// LLVM: [[VR_OFFS_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 4 +// LLVM: [[VR_OFFS:%.*]] = load i32, ptr [[VR_OFFS_P]], align 4, +// LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[VR_OFFS]], 0, +// LLVM-NEXT: br i1 [[CMP0]], label %[[BB_ON_STACK:.*]], label %[[BB_MAY_REG:.*]], + +// LLVM: [[BB_END:.*]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG:.*]] +// LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT:%.*]], %[[BB_IN_REG]] ], [ [[STACK_V:%.*]], %[[BB_ON_STACK]] ] +// LLVM-NEXT: [[PHIV:%.*]] = load double, ptr [[PHIP]], align 8, +// LLVM-NEXT: store double [[PHIV]], ptr [[RESP]], align 8, +// LLVM: call void @llvm.va_end.p0(ptr [[VARLIST]]), +// LLVM: [[RES:%.*]] = load double, ptr [[RESP]], align 8, +// LLVM: store double [[RES]], ptr [[RETP]], align 8, +// LLVM: [[RETV:%.*]] = load double, ptr [[RETP]], align 8, +// LLVM-NEXT: ret double [[RETV]], + +// LLVM: [[BB_MAY_REG]]: ; +// LLVM-NEXT: [[NEW_REG_OFFS:%.*]] = add i32 [[VR_OFFS]], 16, +// LLVM-NEXT: store i32 [[NEW_REG_OFFS]], ptr [[VR_OFFS_P]], align 4, +// LLVM-NEXT: [[CMP1:%.*]] = icmp sle i32 [[NEW_REG_OFFS]], 0, +// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG]], label %[[BB_ON_STACK]], + +// LLVM: [[BB_IN_REG]]: ; +// LLVM-NEXT: [[VR_TOP_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 2, +// LLVM-NEXT: [[VR_TOP:%.*]] = load ptr, ptr [[VR_TOP_P]], align 8, +// LLVM-NEXT: [[EXT64_VR_OFFS:%.*]] = sext i32 [[VR_OFFS]] to i64, +// LLVM-NEXT: [[IN_REG_OUTPUT]] = getelementptr i8, ptr [[VR_TOP]], i64 [[EXT64_VR_OFFS]], +// LLVM-NEXT: br label %[[BB_END]], + +// LLVM: [[BB_ON_STACK]]: ; +// LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0, +// LLVM-NEXT: [[STACK_V]] = load ptr, ptr [[STACK_P]], align 8, +// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i32 8, +// LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8, +// LLVM-NEXT: br label %[[BB_END]], diff --git a/clang/test/CIR/CodeGen/var-arg.c b/clang/test/CIR/CodeGen/var-arg.c new file mode 100644 index 000000000000..20478da5281f --- /dev/null +++ b/clang/test/CIR/CodeGen/var-arg.c @@ -0,0 +1,122 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// XFAIL: * + +#include + +int f1(int n, ...) { + va_list valist; + va_start(valist, n); + int res = va_arg(valist, int); + va_end(valist); + return res; +} + +// BEFORE: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !cir.int, !cir.int} +// BEFORE: cir.func @f1(%arg0: !s32i, ...) -> !s32i +// BEFORE: [[RETP:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] +// BEFORE: [[RESP:%.*]] = cir.alloca !s32i, !cir.ptr, ["res", init] +// BEFORE: cir.va.start [[VARLIST:%.*]] : !cir.ptr +// BEFORE: [[TMP0:%.*]] = cir.va.arg [[VARLIST]] : (!cir.ptr) -> !s32i +// BEFORE: cir.store [[TMP0]], [[RESP]] : !s32i, !cir.ptr +// BEFORE: cir.va.end [[VARLIST]] : !cir.ptr +// BEFORE: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !s32i +// BEFORE: cir.store [[RES]], [[RETP]] : !s32i, !cir.ptr +// BEFORE: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !s32i +// BEFORE: cir.return [[RETV]] : !s32i + +// AFTER: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !cir.int, !cir.int} +// AFTER: cir.func @f1(%arg0: !s32i, ...) -> !s32i +// AFTER: [[RETP:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] +// AFTER: [[RESP:%.*]] = cir.alloca !s32i, !cir.ptr, ["res", init] +// AFTER: cir.va.start [[VARLIST:%.*]] : !cir.ptr +// AFTER: [[GR_OFFS_P:%.*]] = cir.get_member [[VARLIST]][3] {name = "gr_offs"} : !cir.ptr -> !cir.ptr +// AFTER: [[GR_OFFS:%.*]] = cir.load [[GR_OFFS_P]] : !cir.ptr, !s32i +// AFTER: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i +// AFTER: [[CMP0:%.*]] = cir.cmp(ge, [[GR_OFFS]], [[ZERO]]) : !s32i, !cir.bool +// AFTER-NEXT: cir.brcond [[CMP0]] [[BB_ON_STACK:\^bb.*]], [[BB_MAY_REG:\^bb.*]] + +// This BB is where different path converges. BLK_ARG is the arg addr which +// could come from IN_REG block where arg is passed in register, and saved in callee +// stack's argument saving area. +// Or from ON_STACK block which means arg is passed in from caller's stack area. +// AFTER-NEXT: [[BB_END:\^bb.*]]([[BLK_ARG:%.*]]: !cir.ptr): // 2 preds: [[BB_IN_REG:\^bb.*]], [[BB_ON_STACK]] +// AFTER-NEXT: [[TMP0:%.*]] = cir.cast(bitcast, [[BLK_ARG]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr, !s32i +// AFTER: cir.store [[TMP1]], [[RESP]] : !s32i, !cir.ptr +// AFTER: cir.va.end [[VARLIST]] : !cir.ptr +// AFTER: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !s32i +// AFTER: cir.store [[RES]], [[RETP]] : !s32i, !cir.ptr +// AFTER: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !s32i +// AFTER: cir.return [[RETV]] : !s32i + +// This BB calculates to see if it is possible to pass arg in register. +// AFTER: [[BB_MAY_REG]]: +// AFTER-NEXT: [[EIGHT:%.*]] = cir.const #cir.int<8> : !s32i +// AFTER-NEXT: [[NEW_REG_OFFS:%.*]] = cir.binop(add, [[GR_OFFS]], [[EIGHT]]) : !s32i +// AFTER-NEXT: cir.store [[NEW_REG_OFFS]], [[GR_OFFS_P]] : !s32i, !cir.ptr +// AFTER-NEXT: [[CMP1:%.*]] = cir.cmp(le, [[NEW_REG_OFFS]], [[ZERO]]) : !s32i, !cir.bool +// AFTER-NEXT: cir.brcond [[CMP1]] [[BB_IN_REG]], [[BB_ON_STACK]] + +// arg is passed in register. +// AFTER: [[BB_IN_REG]]: +// AFTER-NEXT: [[GR_TOP_P:%.*]] = cir.get_member [[VARLIST]][1] {name = "gr_top"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[GR_TOP:%.*]] = cir.load [[GR_TOP_P]] : !cir.ptr>, !cir.ptr +// AFTER-NEXT: [[TMP2:%.*]] = cir.cast(bitcast, [[GR_TOP]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: [[TMP3:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[GR_OFFS]] : !s32i), !cir.ptr +// AFTER-NEXT: [[IN_REG_OUTPUT:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.br [[BB_END]]([[IN_REG_OUTPUT]] : !cir.ptr) + +// arg is passed in stack. +// AFTER: [[BB_ON_STACK]]: +// AFTER-NEXT: [[STACK_P:%.*]] = cir.get_member [[VARLIST]][0] {name = "stack"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[STACK_V:%.*]] = cir.load [[STACK_P]] : !cir.ptr>, !cir.ptr +// AFTER-NEXT: [[EIGHT_IN_PTR_ARITH:%.*]] = cir.const #cir.int<8> : !u64i +// AFTER-NEXT: [[TMP4:%.*]] = cir.cast(bitcast, [[STACK_V]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: [[TMP5:%.*]] = cir.ptr_stride([[TMP4]] : !cir.ptr, [[EIGHT_IN_PTR_ARITH]] : !u64i), !cir.ptr +// AFTER-NEXT: [[NEW_STACK_V:%.*]] = cir.cast(bitcast, [[TMP5]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.store [[NEW_STACK_V]], [[STACK_P]] : !cir.ptr, !cir.ptr> +// AFTER-NEXT: cir.br [[BB_END]]([[STACK_V]] : !cir.ptr) + +// LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } +// LLVM: define i32 @f1(i32 %0, ...) +// LLVM: [[ARGN:%.*]] = alloca i32, i64 1, align 4, +// LLVM: [[RETP:%.*]] = alloca i32, i64 1, align 4, +// LLVM: [[RESP:%.*]] = alloca i32, i64 1, align 4, +// LLVM: call void @llvm.va_start.p0(ptr [[VARLIST:%.*]]), +// LLVM: [[GR_OFFS_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 3 +// LLVM: [[GR_OFFS:%.*]] = load i32, ptr [[GR_OFFS_P]], align 4, +// LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[GR_OFFS]], 0, +// LLVM-NEXT: br i1 [[CMP0]], label %[[BB_ON_STACK:.*]], label %[[BB_MAY_REG:.*]], + +// LLVM: [[BB_END:.*]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG:.*]] +// LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT:%.*]], %[[BB_IN_REG]] ], [ [[STACK_V:%.*]], %[[BB_ON_STACK]] ] +// LLVM-NEXT: [[PHIV:%.*]] = load i32, ptr [[PHIP]], align 4, +// LLVM-NEXT: store i32 [[PHIV]], ptr [[RESP]], align 4, +// LLVM: call void @llvm.va_end.p0(ptr [[VARLIST]]), +// LLVM: [[RES:%.*]] = load i32, ptr [[RESP]], align 4, +// LLVM: store i32 [[RES]], ptr [[RETP]], align 4, +// LLVM: [[RETV:%.*]] = load i32, ptr [[RETP]], align 4, +// LLVM-NEXT: ret i32 [[RETV]], + +// LLVM: [[BB_MAY_REG]]: ; +// LLVM: [[NEW_REG_OFFS:%.*]] = add i32 [[GR_OFFS]], 8, +// LLVM: store i32 [[NEW_REG_OFFS]], ptr [[GR_OFFS_P]], align 4, +// LLVM-NEXT: [[CMP1:%.*]] = icmp sle i32 [[NEW_REG_OFFS]], 0, +// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG]], label %[[BB_ON_STACK]], + +// LLVM: [[BB_IN_REG]]: ; +// LLVM-NEXT: [[GR_TOP_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 1, +// LLVM-NEXT: [[GR_TOP:%.*]] = load ptr, ptr [[GR_TOP_P]], align 8, +// LLVM-NEXT: [[EXT64_GR_OFFS:%.*]] = sext i32 [[GR_OFFS]] to i64, +// LLVM-NEXT: [[IN_REG_OUTPUT]] = getelementptr i8, ptr [[GR_TOP]], i64 [[EXT64_GR_OFFS]], +// LLVM-NEXT: br label %[[BB_END]], + +// LLVM: [[BB_ON_STACK]]: ; +// LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0, +// LLVM-NEXT: [[STACK_V]] = load ptr, ptr [[STACK_P]], align 8, +// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i32 8, +// LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8, +// LLVM-NEXT: br label %[[BB_END]], diff --git a/clang/test/CIR/CodeGen/variadics.c b/clang/test/CIR/CodeGen/variadics.c index 90ab27cc8ae5..dd79ceedd93a 100644 --- a/clang/test/CIR/CodeGen/variadics.c +++ b/clang/test/CIR/CodeGen/variadics.c @@ -1,9 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s -// RUN: %clang_cc1 -x c++ -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s -// RUN: %clang_cc1 -x c++ -std=c++20 -triple aarch64-none-linux-android24 -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s +// RUN: %clang_cc1 -x c++ -std=c++20 -triple aarch64-none-linux-android24 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s typedef __builtin_va_list va_list; @@ -15,7 +11,8 @@ typedef __builtin_va_list va_list; // CHECK: [[VALISTTYPE:!.+va_list.*]] = !cir.struct !s32i +// CHECK: cir.func @{{.*}}average{{.*}}(%arg0: !s32i, ...) -> !s32i +// AMR64_CHECK: cir.func @{{.*}}average{{.*}}(%arg0: !s32i loc({{.+}}), ...) -> !s32i va_list args, args_copy; va_start(args, count); // CHECK: cir.va.start %{{[0-9]+}} : !cir.ptr<[[VALISTTYPE]]> From 54ca0114a8f7b859cc844047137d9009c7c117e9 Mon Sep 17 00:00:00 2001 From: Krito Date: Wed, 29 May 2024 14:36:39 +0800 Subject: [PATCH 1599/2301] [CIR][Lowering] Add MLIR lowering support for CIR shift operations (#630) This pr adds cir.shift lowering to MLIR passes and test files. --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 100 ++++++++++++------ clang/test/CIR/Lowering/ThroughMLIR/shift.cir | 31 ++++++ 2 files changed, 98 insertions(+), 33 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/shift.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 52ec83c3edc2..50edb7a40f5b 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -321,6 +321,61 @@ class CIRExpOpLowering : public mlir::OpConversionPattern { } }; +static mlir::Value createIntCast(mlir::ConversionPatternRewriter &rewriter, + mlir::Value src, mlir::Type dstTy, + bool isSigned = false) { + auto srcTy = src.getType(); + assert(isa(srcTy)); + assert(isa(dstTy)); + + auto srcWidth = srcTy.cast().getWidth(); + auto dstWidth = dstTy.cast().getWidth(); + auto loc = src.getLoc(); + + if (dstWidth > srcWidth && isSigned) + return rewriter.create(loc, dstTy, src); + else if (dstWidth > srcWidth) + return rewriter.create(loc, dstTy, src); + else if (dstWidth < srcWidth) + return rewriter.create(loc, dstTy, src); + else + return rewriter.create(loc, dstTy, src); +} + +class CIRShiftOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + mlir::LogicalResult + matchAndRewrite(mlir::cir::ShiftOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto cirAmtTy = op.getAmount().getType().dyn_cast(); + auto cirValTy = op.getValue().getType().dyn_cast(); + auto mlirTy = getTypeConverter()->convertType(op.getType()); + mlir::Value amt = adaptor.getAmount(); + mlir::Value val = adaptor.getValue(); + + assert(cirValTy && cirAmtTy && "non-integer shift is NYI"); + assert(cirValTy == op.getType() && "inconsistent operands' types NYI"); + + // Ensure shift amount is the same type as the value. Some undefined + // behavior might occur in the casts below as per [C99 6.5.7.3]. + amt = createIntCast(rewriter, amt, mlirTy, cirAmtTy.isSigned()); + + // Lower to the proper arith shift operation. + if (op.getIsShiftleft()) + rewriter.replaceOpWithNewOp(op, mlirTy, val, amt); + else { + if (cirValTy.isUnsigned()) + rewriter.replaceOpWithNewOp(op, mlirTy, val, amt); + else + rewriter.replaceOpWithNewOp(op, mlirTy, val, amt); + } + + return mlir::success(); + } +}; + class CIRExp2OpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -901,27 +956,6 @@ class CIRGetGlobalOpLowering } }; -static mlir::Value createIntCast(mlir::ConversionPatternRewriter &rewriter, - mlir::Value src, mlir::Type dstTy, - bool isSigned = false) { - auto srcTy = src.getType(); - assert(isa(srcTy)); - assert(isa(dstTy)); - - auto srcWidth = srcTy.cast().getWidth(); - auto dstWidth = dstTy.cast().getWidth(); - auto loc = src.getLoc(); - - if (dstWidth > srcWidth && isSigned) - return rewriter.create(loc, dstTy, src); - else if (dstWidth > srcWidth) - return rewriter.create(loc, dstTy, src); - else if (dstWidth < srcWidth) - return rewriter.create(loc, dstTy, src); - else - return rewriter.create(loc, dstTy, src); -} - class CIRCastOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -1124,18 +1158,18 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns - .add( - converter, patterns.getContext()); + patterns.add(converter, + patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/shift.cir b/clang/test/CIR/Lowering/ThroughMLIR/shift.cir new file mode 100644 index 000000000000..aecbc3f45940 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/shift.cir @@ -0,0 +1,31 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!u16i = !cir.int +module { + cir.func @testShiftWithDifferentValueAndAmountTypes(%arg0: !s16i, %arg1: !s32i, %arg2: !s64i, %arg3: !u16i) { + %1 = cir.shift(left, %arg1: !s32i, %arg2 : !s64i) -> !s32i + %2 = cir.shift(left, %arg1 : !s32i, %arg0 : !s16i) -> !s32i + %3 = cir.shift(left, %arg1 : !s32i, %arg3 : !u16i) -> !s32i + %4 = cir.shift(left, %arg1 : !s32i, %arg1 : !s32i) -> !s32i + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @testShiftWithDifferentValueAndAmountTypes(%arg0: i16, %arg1: i32, %arg2: i64, %arg3: i16) { +// CHECK-NEXT: %[[TRUNC:.+]] = arith.trunci %arg2 : i64 to i32 +// CHECK-NEXT: %[[SHIFT_TRUNC:.+]] = arith.shli %arg1, %[[TRUNC]] : i32 +// CHECK-NEXT: %[[EXTS:.+]] = arith.extsi %arg0 : i16 to i32 +// CHECK-NEXT: %[[SHIFT_EXTS:.+]] = arith.shli %arg1, %[[EXTS]] : i32 +// CHECK-NEXT: %[[EXTU:.+]] = arith.extui %arg3 : i16 to i32 +// CHECK-NEXT: %[[SHIFT_EXTU:.+]] = arith.shli %arg1, %[[EXTU]] : i32 +// CHECK-NEXT: %[[BITCAST:.+]] = arith.bitcast %arg1 : i32 to i32 +// CHECK-NEXT: %[[SHIFT_BITCAST:.+]] = arith.shli %arg1, %[[BITCAST]] : i32 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } + From cd2a32b96bb17d1d3b75c8fd1f87950f5c466850 Mon Sep 17 00:00:00 2001 From: ShivaChen <32083954+ShivaChen@users.noreply.github.com> Date: Wed, 29 May 2024 14:43:24 +0800 Subject: [PATCH 1600/2301] [CIR] Add C source code as comments in for.cir unit test (#625) With C source code, we would able to update the CIR tests when needed. --- clang/test/CIR/Lowering/ThroughMLIR/for.cir | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/clang/test/CIR/Lowering/ThroughMLIR/for.cir b/clang/test/CIR/Lowering/ThroughMLIR/for.cir index 9ec345577255..48d5f2d709f8 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/for.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/for.cir @@ -1,10 +1,16 @@ // RUN: cir-opt %s -cir-to-mlir --canonicalize | FileCheck %s -check-prefix=MLIR // RUN: cir-opt %s -cir-to-mlir --canonicalize -cir-mlir-to-llvm | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM +// Note that the following CIR been produced after -cir-mlir-scf-prepare. +// So the loop invariant in the condition block have been hoisted out of loop. + !s32i = !cir.int module { cir.global external @a = #cir.zero : !cir.array + // for (int i = 0; i < 100; ++i) + // a[i] = 3; + // // MLIR-LABEL: func.func @constantLoopBound() // LLVM-LABEL: define void @constantLoopBound() cir.func @constantLoopBound() { @@ -55,6 +61,9 @@ module { cir.return } + // for (int i = 0; i <= 100; ++i) + // a[i] = 3; + // // MLIR-LABEL: func.func @constantLoopBound_LE() // LLVM-LABEL: define void @constantLoopBound_LE() cir.func @constantLoopBound_LE() { @@ -105,6 +114,9 @@ module { cir.return } + // for (int i = l; i < u; ++i) + // a[i] = 3; + // // MLIR-LABEL: func.func @variableLoopBound(%arg0: i32, %arg1: i32) // LLVM-LABEL: define void @variableLoopBound(i32 %0, i32 %1) cir.func @variableLoopBound(%arg0: !s32i, %arg1: !s32i) { @@ -161,6 +173,9 @@ module { cir.return } + // for (int i = l; i <= u; i+=4) + // a[i] = 3; + // // MLIR-LABEL: func.func @variableLoopBound_LE(%arg0: i32, %arg1: i32) // LLVM-LABEL: define void @variableLoopBound_LE(i32 %0, i32 %1) cir.func @variableLoopBound_LE(%arg0: !s32i, %arg1: !s32i) { From 60f428b8156b569977c0c0da415918410a3bb313 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 23 May 2024 22:16:31 -0700 Subject: [PATCH 1601/2301] [CIR][CIRGen] Honor alignment on createAlignedLoad One more step into fixing overall alignment requirements. --- .../clang/CIR/Dialect/Builder/CIRBaseBuilder.h | 12 ++++++++---- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 15 ++++++--------- .../Transforms/LoweringPrepareItaniumCXXABI.cpp | 3 ++- clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c | 13 ++++++------- clang/test/CIR/CodeGen/derived-to-base.cpp | 2 +- .../CIR/CodeGen/dynamic-cast-relative-layout.cpp | 2 +- clang/test/CIR/CodeGen/dynamic-cast.cpp | 2 +- 7 files changed, 25 insertions(+), 24 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index e7c63dd1f5b3..9c2449f88189 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -85,16 +85,20 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { } mlir::Value createLoad(mlir::Location loc, mlir::Value ptr, - bool isVolatile = false) { + bool isVolatile = false, uint64_t alignment = 0) { + mlir::IntegerAttr intAttr; + if (alignment) + intAttr = mlir::IntegerAttr::get( + mlir::IntegerType::get(ptr.getContext(), 64), alignment); + return create(loc, ptr, /*isDeref=*/false, isVolatile, - /*alignment=*/mlir::IntegerAttr{}, + /*alignment=*/intAttr, /*mem_order=*/mlir::cir::MemOrderAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr, uint64_t alignment) { - // TODO(cir): implement aligned load in CIRBaseBuilder. - return createLoad(loc, ptr); + return createLoad(loc, ptr, /*isVolatile=*/false, alignment); } mlir::Value createNot(mlir::Value value) { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index e7686e4c3531..51c6846a2828 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -761,21 +761,18 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, - mlir::Value ptr, - [[maybe_unused]] llvm::MaybeAlign align, - [[maybe_unused]] bool isVolatile) { - assert(!UnimplementedFeature::volatileLoadOrStore()); - assert(!UnimplementedFeature::alignedLoad()); - // FIXME: create a more generic version of createLoad and rewrite this and - // others in terms of that. Ideally there should only be one call to - // create in all helpers. + mlir::Value ptr, llvm::MaybeAlign align, + bool isVolatile) { if (ty != ptr.getType().cast().getPointee()) ptr = createPtrBitcast(ptr, ty); - return create(loc, ty, ptr); + uint64_t alignment = align ? align->value() : 0; + return CIRBaseBuilderTy::createLoad(loc, ptr, isVolatile, alignment); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value ptr, llvm::MaybeAlign align) { + // TODO: make sure callsites shouldn't be really passing volatile. + assert(!UnimplementedFeature::volatileLoadOrStore()); return createAlignedLoad(loc, ty, ptr, align, /*isVolatile=*/false); } diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp index c7984f86c451..191ee837341c 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp @@ -106,7 +106,8 @@ buildDynamicCastToVoidAfterNullCheck(CIRBaseBuilderTy &builder, vtableElemTy = mlir::cir::IntType::get(builder.getContext(), ptrdiffTyWidth, ptrdiffTyIsSigned); - vtableElemAlign = targetInfo.getPointerAlign(clang::LangAS::Default); + vtableElemAlign = + llvm::divideCeil(targetInfo.getPointerAlign(clang::LangAS::Default), 8); } // Access vtable to get the offset from the given object to its containing diff --git a/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c b/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c index 56af730ba81e..68f2b11bf426 100644 --- a/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c +++ b/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c @@ -6,8 +6,8 @@ // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ // RUN: -fclangir -disable-O0-optnone \ // RUN: -flax-vector-conversions=none -emit-llvm -o - %s \ -// RUN: | opt -S -passes=mem2reg,simplifycfg \ -// RUN: | FileCheck --check-prefix=LLVM %s +// RUN: | opt -S -passes=mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s // REQUIRES: aarch64-registered-target || arm-registered-target @@ -8965,15 +8965,14 @@ // return vrsqrted_f64(a); // } -// FIXME: alignment should be 1. uint8x16_t test_vld1q_u8(uint8_t const *a) { return vld1q_u8(a); - // LLVM-LABEL: @test_vld1q_u8 - // LLVM: [[TMP1:%.*]] = load <16 x i8>, ptr %0, align 16 - // CIR-LABEL: @test_vld1q_u8 // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> - // CIR: cir.load %[[CAST]] : !cir.ptr>, !cir.vector + // CIR: cir.load align(1) %[[CAST]] : !cir.ptr>, !cir.vector + + // LLVM-LABEL: @test_vld1q_u8 + // LLVM: [[TMP1:%.*]] = load <16 x i8>, ptr %0, align 1, } // NYI-LABEL: @test_vld1q_u16( diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 4932e8d7a944..2de30990d9f4 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -121,7 +121,7 @@ void vcall(C1 &c1) { // CHECK: %7 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr, !s32i, !ty_22buffy22)>>>> // CHECK: %8 = cir.load %7 : !cir.ptr, !s32i, !ty_22buffy22)>>>>, !cir.ptr, !s32i, !ty_22buffy22)>>> // CHECK: %9 = cir.vtable.address_point( %8 : !cir.ptr, !s32i, !ty_22buffy22)>>>, vtable_index = 0, address_point_index = 2) : !cir.ptr, !s32i, !ty_22buffy22)>>> -// CHECK: %10 = cir.load %9 : !cir.ptr, !s32i, !ty_22buffy22)>>>, !cir.ptr, !s32i, !ty_22buffy22)>> +// CHECK: %10 = cir.load align(8) %9 : !cir.ptr, !s32i, !ty_22buffy22)>>>, !cir.ptr, !s32i, !ty_22buffy22)>> // CHECK: %11 = cir.call %10(%4, %5, %6) : (!cir.ptr, !s32i, !ty_22buffy22)>>, !cir.ptr, !s32i, !ty_22buffy22) -> !s32i // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp b/clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp index f778592e9951..58da33f0e60e 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp @@ -22,7 +22,7 @@ void *ptr_cast_to_complete(Base *ptr) { // AFTER-NEXT: %[[#VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr> // AFTER-NEXT: %[[#VPTR:]] = cir.load %[[#VPTR_PTR]] : !cir.ptr>, !cir.ptr // AFTER-NEXT: %[[#OFFSET_TO_TOP_PTR:]] = cir.vtable.address_point( %[[#VPTR]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : !cir.ptr -// AFTER-NEXT: %[[#OFFSET_TO_TOP:]] = cir.load %[[#OFFSET_TO_TOP_PTR]] : !cir.ptr, !s32i +// AFTER-NEXT: %[[#OFFSET_TO_TOP:]] = cir.load align(4) %[[#OFFSET_TO_TOP_PTR]] : !cir.ptr, !s32i // AFTER-NEXT: %[[#SRC_BYTES_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr // AFTER-NEXT: %[[#DST_BYTES_PTR:]] = cir.ptr_stride(%[[#SRC_BYTES_PTR]] : !cir.ptr, %[[#OFFSET_TO_TOP]] : !s32i), !cir.ptr // AFTER-NEXT: %[[#DST:]] = cir.cast(bitcast, %[[#DST_BYTES_PTR]] : !cir.ptr), !cir.ptr diff --git a/clang/test/CIR/CodeGen/dynamic-cast.cpp b/clang/test/CIR/CodeGen/dynamic-cast.cpp index 19701eb9e28b..622702104707 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast.cpp @@ -74,7 +74,7 @@ void *ptr_cast_to_complete(Base *ptr) { // AFTER-NEXT: %[[#VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr> // AFTER-NEXT: %[[#VPTR:]] = cir.load %[[#VPTR_PTR]] : !cir.ptr>, !cir.ptr // AFTER-NEXT: %[[#BASE_OFFSET_PTR:]] = cir.vtable.address_point( %[[#VPTR]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : !cir.ptr -// AFTER-NEXT: %[[#BASE_OFFSET:]] = cir.load %[[#BASE_OFFSET_PTR]] : !cir.ptr, !s64i +// AFTER-NEXT: %[[#BASE_OFFSET:]] = cir.load align(8) %[[#BASE_OFFSET_PTR]] : !cir.ptr, !s64i // AFTER-NEXT: %[[#SRC_BYTES_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr // AFTER-NEXT: %[[#DST_BYTES_PTR:]] = cir.ptr_stride(%[[#SRC_BYTES_PTR]] : !cir.ptr, %[[#BASE_OFFSET]] : !s64i), !cir.ptr // AFTER-NEXT: %[[#CASTED_PTR:]] = cir.cast(bitcast, %[[#DST_BYTES_PTR]] : !cir.ptr), !cir.ptr From c2fa77e352dd9f881c929624c0a9ba2286122ee7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 29 May 2024 14:52:53 -0700 Subject: [PATCH 1602/2301] [CIR][CIRGen] Honor alignment for createAlignedStore --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 20 ++++++++++--------- .../CodeGen/UnimplementedFeatureGuarding.h | 2 -- .../CIR/CodeGen/aarch64-neon-intrinsics.c | 15 ++++++++------ 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 51c6846a2828..14eed26e1019 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -796,16 +796,18 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return CIRBaseBuilderTy::createStore(loc, flag, dst); } - mlir::cir::StoreOp createAlignedStore(mlir::Location loc, mlir::Value val, - mlir::Value dst, - [[maybe_unused]] clang::CharUnits align, - bool _volatile = false, - ::mlir::cir::MemOrderAttr order = {}) { - // TODO: add alignment for LoadOp/StoreOp, right now LowerToLLVM knows - // how to figure out for most part, but it's possible the client might want - // to enforce a different alignment. + mlir::cir::StoreOp + createAlignedStore(mlir::Location loc, mlir::Value val, mlir::Value dst, + clang::CharUnits align = clang::CharUnits::One(), + bool _volatile = false, + ::mlir::cir::MemOrderAttr order = {}) { + llvm::MaybeAlign mayAlign = align.getAsAlign(); mlir::IntegerAttr alignAttr; - assert(!UnimplementedFeature::alignedStore()); + if (mayAlign) { + uint64_t alignment = mayAlign ? mayAlign->value() : 0; + alignAttr = mlir::IntegerAttr::get( + mlir::IntegerType::get(dst.getContext(), 64), alignment); + } return CIRBaseBuilderTy::createStore(loc, val, dst, _volatile, alignAttr, order); } diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index 19ea03daf7a7..09ab264be1ad 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -151,8 +151,6 @@ struct UnimplementedFeature { static bool loopInfoStack() { return false; } static bool requiresCleanups() { return false; } static bool constantFoldsToSimpleInteger() { return false; } - static bool alignedLoad() { return false; } - static bool alignedStore() { return false; } static bool checkFunctionCallABI() { return false; } static bool zeroInitializer() { return false; } static bool targetCodeGenInfoIsProtoCallVariadic() { return false; } diff --git a/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c b/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c index 68f2b11bf426..f55b9cd5772d 100644 --- a/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c +++ b/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c @@ -10177,12 +10177,15 @@ uint8x16_t test_vld1q_u8(uint8_t const *a) { // return vld4_p16(a); // } -// NYI-LABEL: @test_vst1q_u8( -// NYI: store <16 x i8> %b, ptr %a -// NYI: ret void -// void test_vst1q_u8(uint8_t *a, uint8x16_t b) { -// vst1q_u8(a, b); -// } +void test_vst1q_u8(uint8_t *a, uint8x16_t b) { + vst1q_u8(a, b); + // CIR-LABEL: @test_vst1q_u8 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.store align(1) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> + + // LLVM-LABEL: @test_vst1q_u8 + // LLVM: store <16 x i8> %{{.*}}, ptr %0, align 1, +} // NYI-LABEL: @test_vst1q_u16( // NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> From a986910b2a6cc1f400aac2b69082efd475f68dde Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Thu, 30 May 2024 20:08:41 -0300 Subject: [PATCH 1603/2301] [CIR][Passes] Add CallConvLowering pass skeleton (#642) This patch adds a new CallConvLowering pass that aims to lower the calling conventions of the functions in the module. It also includes a new Clang command line option to enable it. Also, it is considered a part of the lowering prepare set of passes, as it is unlikely to be used elsewhere in the pipeline. Since this will be dealing with ABI/Target-specific information, it requires AST info. For this reason, it can only be executed through the clang driver or cc1 tool for now as CIR does not encode AST info. This pass is disabled by default and can be enabled by passing the flag `-fclangir-call-conv-lowering`. Once this pass is more mature, it should be enabled by default as a required step to lower to LLVM Dialect. --- clang/include/clang/CIR/CIRToCIRPasses.h | 2 +- clang/include/clang/CIR/Dialect/Passes.h | 3 + clang/include/clang/CIR/Dialect/Passes.td | 11 +++ clang/include/clang/Driver/Options.td | 4 + .../include/clang/Frontend/FrontendOptions.h | 3 + clang/lib/CIR/CodeGen/CIRPasses.cpp | 8 +- .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + .../Dialect/Transforms/CallConvLowering.cpp | 76 +++++++++++++++++++ clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 3 +- clang/lib/Driver/ToolChains/Clang.cpp | 3 + clang/lib/Frontend/CompilerInvocation.cpp | 3 + .../x86/x86-call-conv-lowering-pass.cpp | 5 ++ 12 files changed, 119 insertions(+), 3 deletions(-) create mode 100644 clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp create mode 100644 clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index dfea5e6d004b..b70806ddb77e 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -34,7 +34,7 @@ mlir::LogicalResult runCIRToCIRPasses( llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, llvm::StringRef libOptOpts, std::string &passOptParsingFailure, - bool flattenCIR, bool emitMLIR); + bool flattenCIR, bool emitMLIR, bool enableCallConvLowering); } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index b4bff1d5082c..de9621fc8bb6 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -38,6 +38,9 @@ std::unique_ptr createLibOptPass(clang::ASTContext *astCtx); std::unique_ptr createFlattenCFGPass(); std::unique_ptr createGotoSolverPass(); +/// Create a pass to lower ABI-independent function definitions/calls. +std::unique_ptr createCallConvLoweringPass(); + void populateCIRPreLoweringPasses(mlir::OpPassManager &pm); //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index 8038a627ad83..c43812ff1032 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -149,4 +149,15 @@ def LibOpt : Pass<"cir-lib-opt"> { ]; } +def CallConvLowering : Pass<"cir-call-conv-lowering"> { + let summary = "Handle calling conventions for CIR functions"; + let description = [{ + This pass lowers CIR function definitions and calls according to the + calling conventions for the target architecture. This pass is necessary + to properly lower CIR functions to LLVM IR. + }]; + let constructor = "mlir::createCallConvLoweringPass()"; + let dependentDialects = ["mlir::cir::CIRDialect"]; +} + #endif // MLIR_DIALECT_CIR_PASSES diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 5015af3a9b82..a8651fcffb34 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3079,6 +3079,10 @@ def fclangir_lib_opt : Flag<["-"], "fclangir-lib-opt">, Visibility<[ClangOption, CC1Option]>, Group, Alias, HelpText<"Enable C/C++ library based optimizations">; +def fclangir_call_conv_lowering : Flag<["-"], "fclangir-call-conv-lowering">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"Enable ClangIR calling convention lowering">, + MarshallingInfoFlag>; def clangir_disable_passes : Flag<["-"], "clangir-disable-passes">, Visibility<[ClangOption, CC1Option]>, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index f0ee2c1b816d..5bff487c2068 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -448,6 +448,9 @@ class FrontendOptions { // Enable Clang IR library optimizations unsigned ClangIRLibOpt : 1; + // Enable Clang IR call conv lowering pass. + unsigned ClangIREnableCallConvLowering : 1; + CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index a8716d11759e..dcc613a89925 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -25,7 +25,7 @@ mlir::LogicalResult runCIRToCIRPasses( llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, llvm::StringRef libOptOpts, std::string &passOptParsingFailure, - bool flattenCIR, bool emitMLIR) { + bool flattenCIR, bool emitMLIR, bool enableCallConvLowering) { mlir::PassManager pm(mlirCtx); pm.addPass(mlir::createMergeCleanupsPass()); @@ -64,6 +64,12 @@ mlir::LogicalResult runCIRToCIRPasses( } pm.addPass(mlir::createLoweringPreparePass(&astCtx)); + + // FIXME(cir): This pass should run by default, but it is lacking support for + // several code bits. Once it's more mature, we should fix this. + if (enableCallConvLowering) + pm.addPass(mlir::createCallConvLoweringPass()); + if (flattenCIR) mlir::populateCIRPreLoweringPasses(pm); diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index babe408bcde6..bed968da6a81 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -11,6 +11,7 @@ add_clang_library(MLIRCIRTransforms FlattenCFG.cpp GotoSolver.cpp SCFPrepare.cpp + CallConvLowering.cpp DEPENDS MLIRCIRPassIncGen diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp new file mode 100644 index 000000000000..58e4d3200705 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -0,0 +1,76 @@ +//===- CallConvLowering.cpp - Rewrites functions according to call convs --===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/IR/PatternMatch.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +#define GEN_PASS_DEF_CALLCONVLOWERING +#include "clang/CIR/Dialect/Passes.h.inc" + +namespace mlir { +namespace cir { + +//===----------------------------------------------------------------------===// +// Rewrite Patterns +//===----------------------------------------------------------------------===// + +struct CallConvLoweringPattern : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(FuncOp op, + PatternRewriter &rewriter) const final { + if (!op.getAst()) + return op.emitError("function has no AST information"); + return success(); + } +}; + +//===----------------------------------------------------------------------===// +// Pass +//===----------------------------------------------------------------------===// + +struct CallConvLoweringPass + : ::impl::CallConvLoweringBase { + using CallConvLoweringBase::CallConvLoweringBase; + + void runOnOperation() override; + StringRef getArgument() const override { return "cir-call-conv-lowering"; }; +}; + +void populateCallConvLoweringPassPatterns(RewritePatternSet &patterns) { + patterns.add(patterns.getContext()); +} + +void CallConvLoweringPass::runOnOperation() { + + // Collect rewrite patterns. + RewritePatternSet patterns(&getContext()); + populateCallConvLoweringPassPatterns(patterns); + + // Collect operations to be considered by the pass. + SmallVector ops; + getOperation()->walk([&](FuncOp op) { ops.push_back(op); }); + + // Configure rewrite to ignore new ops created during the pass. + GreedyRewriteConfig config; + config.strictMode = GreedyRewriteStrictness::ExistingOps; + + // Apply patterns. + if (failed(applyOpPatternsAndFold(ops, std::move(patterns), config))) + signalPassFailure(); +} + +} // namespace cir + +std::unique_ptr createCallConvLoweringPass() { + return std::make_unique(); +} + +} // namespace mlir diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 56051d3c81ec..0a570a3c783c 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -190,7 +190,8 @@ class CIRGenConsumer : public clang::ASTConsumer { feOptions.ClangIRIdiomRecognizer, idiomRecognizerOpts, feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure, action == CIRGenAction::OutputType::EmitCIRFlat, - action == CIRGenAction::OutputType::EmitMLIR) + action == CIRGenAction::OutputType::EmitMLIR, + feOptions.ClangIREnableCallConvLowering) .failed()) { if (!passOptParsingFailure.empty()) diagnosticsEngine.Report(diag::err_drv_cir_pass_opt_parsing) diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 3a97f27c66e0..c883d88b51ba 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5252,6 +5252,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasArg(options::OPT_clangir_disable_passes)) CmdArgs.push_back("-clangir-disable-passes"); + if (Args.hasArg(options::OPT_fclangir_call_conv_lowering)) + CmdArgs.push_back("-fclangir-call-conv-lowering"); + // ClangIR lib opt requires idiom recognizer. if (Args.hasArg(options::OPT_fclangir_lib_opt, options::OPT_fclangir_lib_opt_EQ)) { diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 7ef1f905cf43..a9307703916e 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3128,6 +3128,9 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Args.hasArg(OPT_clangir_verify_diagnostics)) Opts.ClangIRVerifyDiags = true; + if (Args.hasArg(OPT_fclangir_call_conv_lowering)) + Opts.ClangIREnableCallConvLowering = true; + if (const Arg *A = Args.getLastArg(OPT_fclangir_lifetime_check, OPT_fclangir_lifetime_check_EQ)) { Opts.ClangIRLifetimeCheck = true; diff --git a/clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp new file mode 100644 index 000000000000..e8772b24c3b8 --- /dev/null +++ b/clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp @@ -0,0 +1,5 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Just check if the pass is called for now. +// CHECK: module From fa660fb96e69cfbe962b07643379029eb2aca3ed Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 31 May 2024 07:11:55 +0800 Subject: [PATCH 1604/2301] [CIR][CIRGen] Add CIRGen for binary fp2fp builtin operations (#616) This PR adds the following operations for the builtin binary fp2fp functions: - `cir.copysign` for `__builtin_copysign`; - `cir.fmax` for `__builtin_fmax`; - `cir.fmin` for `__builtin_fmin`; - `cir.fmod` for `__builtin_fmod`; - `cir.pow` for `__builtin_pow`. This PR also includes CIRGen support for these new operations. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 20 ++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 56 ++++- .../Dialect/Transforms/LoweringPrepare.cpp | 53 ++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 28 ++- .../test/CIR/CodeGen/builtin-floating-point.c | 224 +++++++++++++++++- .../test/CIR/Lowering/builtin-binary-fp2fp.c | 132 +++++++++++ 6 files changed, 501 insertions(+), 12 deletions(-) create mode 100644 clang/test/CIR/Lowering/builtin-binary-fp2fp.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 73ce72dc7468..3855b41bd274 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3324,6 +3324,26 @@ def SinOp : UnaryFPToFPBuiltinOp<"sin">; def SqrtOp : UnaryFPToFPBuiltinOp<"sqrt">; def TruncOp : UnaryFPToFPBuiltinOp<"trunc">; +class BinaryFPToFPBuiltinOp + : CIR_Op { + let summary = [{ + libc builtin equivalent ignoring floating-point exceptions and errno. + }]; + + let arguments = (ins CIR_AnyFloat:$lhs, CIR_AnyFloat:$rhs); + let results = (outs CIR_AnyFloat:$result); + + let assemblyFormat = [{ + $lhs `,` $rhs `:` qualified(type($lhs)) attr-dict + }]; +} + +def CopysignOp : BinaryFPToFPBuiltinOp<"copysign">; +def FMaxOp : BinaryFPToFPBuiltinOp<"fmax">; +def FMinOp : BinaryFPToFPBuiltinOp<"fmin">; +def FModOp : BinaryFPToFPBuiltinOp<"fmod">; +def PowOp : BinaryFPToFPBuiltinOp<"pow">; + //===----------------------------------------------------------------------===// // Branch Probability Operations //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index cb4cae9c9c26..0e801d05e41e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -57,6 +57,36 @@ static RValue buildUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { return RValue::get(Call->getResult(0)); } +template +static RValue buildBinaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { + auto Arg0 = CGF.buildScalarExpr(E.getArg(0)); + auto Arg1 = CGF.buildScalarExpr(E.getArg(1)); + + auto Loc = CGF.getLoc(E.getExprLoc()); + auto Ty = CGF.ConvertType(E.getType()); + auto Call = CGF.getBuilder().create(Loc, Ty, Arg0, Arg1); + + return RValue::get(Call->getResult(0)); +} + +template +static mlir::Value buildBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &CGF, + const CallExpr &E) { + auto Arg0 = CGF.buildScalarExpr(E.getArg(0)); + auto Arg1 = CGF.buildScalarExpr(E.getArg(1)); + + auto Loc = CGF.getLoc(E.getExprLoc()); + auto Ty = CGF.ConvertType(E.getType()); + + if (CGF.getBuilder().getIsFPConstrained()) { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, &E); + llvm_unreachable("constrained FP operations are NYI"); + } else { + auto Call = CGF.getBuilder().create(Loc, Ty, Arg0, Arg1); + return Call->getResult(0); + } +} + template static RValue buildBuiltinBitOp(CIRGenFunction &CGF, const CallExpr *E, @@ -290,8 +320,10 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIcopysignl: case Builtin::BI__builtin_copysign: case Builtin::BI__builtin_copysignf: - case Builtin::BI__builtin_copysignf16: case Builtin::BI__builtin_copysignl: + return buildBinaryFPBuiltin(*this, *E); + + case Builtin::BI__builtin_copysignf16: case Builtin::BI__builtin_copysignf128: llvm_unreachable("NYI"); @@ -360,8 +392,11 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIfmaxl: case Builtin::BI__builtin_fmax: case Builtin::BI__builtin_fmaxf: - case Builtin::BI__builtin_fmaxf16: case Builtin::BI__builtin_fmaxl: + return RValue::get( + buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + + case Builtin::BI__builtin_fmaxf16: case Builtin::BI__builtin_fmaxf128: llvm_unreachable("NYI"); @@ -370,8 +405,11 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIfminl: case Builtin::BI__builtin_fmin: case Builtin::BI__builtin_fminf: - case Builtin::BI__builtin_fminf16: case Builtin::BI__builtin_fminl: + return RValue::get( + buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + + case Builtin::BI__builtin_fminf16: case Builtin::BI__builtin_fminf128: llvm_unreachable("NYI"); @@ -382,11 +420,12 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIfmodl: case Builtin::BI__builtin_fmod: case Builtin::BI__builtin_fmodf: - case Builtin::BI__builtin_fmodf16: case Builtin::BI__builtin_fmodl: - case Builtin::BI__builtin_fmodf128: { + return buildBinaryFPBuiltin(*this, *E); + + case Builtin::BI__builtin_fmodf16: + case Builtin::BI__builtin_fmodf128: llvm_unreachable("NYI"); - } case Builtin::BIlog: case Builtin::BIlogf: @@ -432,8 +471,11 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIpowl: case Builtin::BI__builtin_pow: case Builtin::BI__builtin_powf: - case Builtin::BI__builtin_powf16: case Builtin::BI__builtin_powl: + return RValue::get( + buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + + case Builtin::BI__builtin_powf16: case Builtin::BI__builtin_powf128: llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index bb8c452b46ba..bbda97e45c80 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -80,6 +80,8 @@ struct LoweringPreparePass : public LoweringPrepareBase { void lowerIterEndOp(IterEndOp op); void lowerArrayDtor(ArrayDtor op); void lowerArrayCtor(ArrayCtor op); + void lowerFModOp(FModOp op); + void lowerPowOp(PowOp op); /// Build the function that initializes the specified global FuncOp buildCXXGlobalVarDeclInitFunc(GlobalOp op); @@ -625,6 +627,49 @@ void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { op.erase(); } +static void lowerBinaryFPToFPBuiltinOp(LoweringPreparePass &pass, + mlir::Operation *op, + llvm::StringRef floatRtFuncName, + llvm::StringRef doubleRtFuncName, + llvm::StringRef longDoubleRtFuncName) { + mlir::Type ty = op->getResult(0).getType(); + + llvm::StringRef rtFuncName; + if (ty.isa()) + rtFuncName = floatRtFuncName; + else if (ty.isa()) + rtFuncName = doubleRtFuncName; + else if (ty.isa()) + rtFuncName = longDoubleRtFuncName; + else + llvm_unreachable("unknown binary fp2fp builtin operand type"); + + CIRBaseBuilderTy builder(*pass.theModule.getContext()); + builder.setInsertionPointToStart(pass.theModule.getBody()); + + auto rtFuncTy = mlir::cir::FuncType::get({ty, ty}, ty); + FuncOp rtFunc = + pass.buildRuntimeFunction(builder, rtFuncName, op->getLoc(), rtFuncTy); + + auto lhs = op->getOperand(0); + auto rhs = op->getOperand(1); + + builder.setInsertionPointAfter(op); + auto call = builder.create(op->getLoc(), rtFunc, + mlir::ValueRange{lhs, rhs}); + + op->replaceAllUsesWith(call); + op->erase(); +} + +void LoweringPreparePass::lowerFModOp(FModOp op) { + lowerBinaryFPToFPBuiltinOp(*this, op, "fmodf", "fmod", "fmodl"); +} + +void LoweringPreparePass::lowerPowOp(PowOp op) { + lowerBinaryFPToFPBuiltinOp(*this, op, "powf", "pow", "powl"); +} + void LoweringPreparePass::runOnOp(Operation *op) { if (auto threeWayCmp = dyn_cast(op)) { lowerThreeWayCmpOp(threeWayCmp); @@ -650,6 +695,10 @@ void LoweringPreparePass::runOnOp(Operation *op) { } else if (auto globalDtor = fnOp.getGlobalDtorAttr()) { globalDtorList.push_back(globalDtor); } + } else if (auto fmodOp = dyn_cast(op)) { + lowerFModOp(fmodOp); + } else if (auto powOp = dyn_cast(op)) { + lowerPowOp(powOp); } } @@ -663,8 +712,8 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { if (isa( - op)) + IterEndOp, IterBeginOp, ArrayCtor, ArrayDtor, mlir::cir::FuncOp, + FModOp, PowOp>(op)) opsToTransform.push_back(op); }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 4fde702e519d..ab8298a02b41 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3162,6 +3162,31 @@ class CIRCmpThreeWayOpLowering } }; +template +class CIRBinaryFPToFPBuiltinOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(CIROp op, + typename mlir::OpConversionPattern::OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = this->getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, resTy, adaptor.getLhs(), + adaptor.getRhs()); + return mlir::success(); + } +}; + +using CIRCopysignOpLowering = + CIRBinaryFPToFPBuiltinOpLowering; +using CIRFMaxOpLowering = + CIRBinaryFPToFPBuiltinOpLowering; +using CIRFMinOpLowering = + CIRBinaryFPToFPBuiltinOpLowering; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -3186,7 +3211,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, - CIRCmpThreeWayOpLowering>(converter, patterns.getContext()); + CIRCmpThreeWayOpLowering, CIRCopysignOpLowering, CIRFMaxOpLowering, + CIRFMinOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/builtin-floating-point.c b/clang/test/CIR/CodeGen/builtin-floating-point.c index 82099f666f45..c47f390b8eac 100644 --- a/clang/test/CIR/CodeGen/builtin-floating-point.c +++ b/clang/test/CIR/CodeGen/builtin-floating-point.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -ffast-math -fclangir -emit-cir %s -o - | FileCheck %s -// RUN: %clang_cc1 -triple aarch64-apple-darwin-macho -ffast-math -fclangir -emit-cir %s -o - | FileCheck %s --check-prefix=AARCH64 +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -ffast-math -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t1.cir 2>&1 | FileCheck %s +// RUN: %clang_cc1 -triple aarch64-apple-darwin-macho -ffast-math -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t1.cir 2>&1 | FileCheck %s --check-prefix=AARCH64 // ceil @@ -616,3 +616,223 @@ long double call_truncl(long double f) { // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.trunc {{.+}} : !cir.long_double } + +// copysign + +float my_copysignf(float x, float y) { + return __builtin_copysignf(x, y); + // CHECK: cir.func @my_copysignf + // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.float +} + +double my_copysign(double x, double y) { + return __builtin_copysign(x, y); + // CHECK: cir.func @my_copysign + // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.double +} + +long double my_copysignl(long double x, long double y) { + return __builtin_copysignl(x, y); + // CHECK: cir.func @my_copysignl + // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double +} + +float copysignf(float, float); +double copysign(double, double); +long double copysignl(long double, long double); + +float call_copysignf(float x, float y) { + return copysignf(x, y); + // CHECK: cir.func @call_copysignf + // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.float +} + +double call_copysign(double x, double y) { + return copysign(x, y); + // CHECK: cir.func @call_copysign + // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.double +} + +long double call_copysignl(long double x, long double y) { + return copysignl(x, y); + // CHECK: cir.func @call_copysignl + // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double +} + +// fmax + +float my_fmaxf(float x, float y) { + return __builtin_fmaxf(x, y); + // CHECK: cir.func @my_fmaxf + // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.float +} + +double my_fmax(double x, double y) { + return __builtin_fmax(x, y); + // CHECK: cir.func @my_fmax + // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.double +} + +long double my_fmaxl(long double x, long double y) { + return __builtin_fmaxl(x, y); + // CHECK: cir.func @my_fmaxl + // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double +} + +float fmaxf(float, float); +double fmax(double, double); +long double fmaxl(long double, long double); + +float call_fmaxf(float x, float y) { + return fmaxf(x, y); + // CHECK: cir.func @call_fmaxf + // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.float +} + +double call_fmax(double x, double y) { + return fmax(x, y); + // CHECK: cir.func @call_fmax + // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.double +} + +long double call_fmaxl(long double x, long double y) { + return fmaxl(x, y); + // CHECK: cir.func @call_fmaxl + // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double +} + +// fmin + +float my_fminf(float x, float y) { + return __builtin_fminf(x, y); + // CHECK: cir.func @my_fminf + // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.float +} + +double my_fmin(double x, double y) { + return __builtin_fmin(x, y); + // CHECK: cir.func @my_fmin + // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.double +} + +long double my_fminl(long double x, long double y) { + return __builtin_fminl(x, y); + // CHECK: cir.func @my_fminl + // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double +} + +float fminf(float, float); +double fmin(double, double); +long double fminl(long double, long double); + +float call_fminf(float x, float y) { + return fminf(x, y); + // CHECK: cir.func @call_fminf + // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.float +} + +double call_fmin(double x, double y) { + return fmin(x, y); + // CHECK: cir.func @call_fmin + // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.double +} + +long double call_fminl(long double x, long double y) { + return fminl(x, y); + // CHECK: cir.func @call_fminl + // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double +} + +// fmod + +float my_fmodf(float x, float y) { + return __builtin_fmodf(x, y); + // CHECK: cir.func @my_fmodf + // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.float +} + +double my_fmod(double x, double y) { + return __builtin_fmod(x, y); + // CHECK: cir.func @my_fmod + // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.double +} + +long double my_fmodl(long double x, long double y) { + return __builtin_fmodl(x, y); + // CHECK: cir.func @my_fmodl + // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double +} + +float fmodf(float, float); +double fmod(double, double); +long double fmodl(long double, long double); + +float call_fmodf(float x, float y) { + return fmodf(x, y); + // CHECK: cir.func @call_fmodf + // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.float +} + +double call_fmod(double x, double y) { + return fmod(x, y); + // CHECK: cir.func @call_fmod + // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.double +} + +long double call_fmodl(long double x, long double y) { + return fmodl(x, y); + // CHECK: cir.func @call_fmodl + // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double +} + +// pow + +float my_powf(float x, float y) { + return __builtin_powf(x, y); + // CHECK: cir.func @my_powf + // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.float +} + +double my_pow(double x, double y) { + return __builtin_pow(x, y); + // CHECK: cir.func @my_pow + // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.double +} + +long double my_powl(long double x, long double y) { + return __builtin_powl(x, y); + // CHECK: cir.func @my_powl + // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double +} + +float powf(float, float); +double pow(double, double); +long double powl(long double, long double); + +float call_powf(float x, float y) { + return powf(x, y); + // CHECK: cir.func @call_powf + // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.float +} + +double call_pow(double x, double y) { + return pow(x, y); + // CHECK: cir.func @call_pow + // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.double +} + +long double call_powl(long double x, long double y) { + return powl(x, y); + // CHECK: cir.func @call_powl + // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double +} diff --git a/clang/test/CIR/Lowering/builtin-binary-fp2fp.c b/clang/test/CIR/Lowering/builtin-binary-fp2fp.c new file mode 100644 index 000000000000..acde798fdf11 --- /dev/null +++ b/clang/test/CIR/Lowering/builtin-binary-fp2fp.c @@ -0,0 +1,132 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +// copysign + +float my_copysignf(float x, float y) { + return __builtin_copysignf(x, y); +} + +// LLVM: define float @my_copysignf +// LLVM: %{{.+}} = call float @llvm.copysign.f32(float %{{.+}}, float %{{.+}}) +// LLVM: } + +double my_copysign(double x, double y) { + return __builtin_copysign(x, y); +} + +// LLVM: define double @my_copysign +// LLVM: %{{.+}} = call double @llvm.copysign.f64(double %{{.+}}, double %{{.+}}) +// LLVM: } + +long double my_copysignl(long double x, long double y) { + return __builtin_copysignl(x, y); +} + +// LLVM: define x86_fp80 @my_copysignl +// LLVM: %{{.+}} = call x86_fp80 @llvm.copysign.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) +// LLVM: } + +// fmax + +float my_fmaxf(float x, float y) { + return __builtin_fmaxf(x, y); +} + +// LLVM: define float @my_fmaxf +// LLVM: %{{.+}} = call float @llvm.maxnum.f32(float %{{.+}}, float %{{.+}}) +// LLVM: } + +double my_fmax(double x, double y) { + return __builtin_fmax(x, y); +} + +// LLVM: define double @my_fmax +// LLVM: %{{.+}} = call double @llvm.maxnum.f64(double %{{.+}}, double %{{.+}}) +// LLVM: } + +long double my_fmaxl(long double x, long double y) { + return __builtin_fmaxl(x, y); +} + +// LLVM: define x86_fp80 @my_fmaxl +// LLVM: %{{.+}} = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) +// LLVM: } + +// fmin + +float my_fminf(float x, float y) { + return __builtin_fminf(x, y); +} + +// LLVM: define float @my_fminf +// LLVM: %{{.+}} = call float @llvm.minnum.f32(float %{{.+}}, float %{{.+}}) +// LLVM: } + +double my_fmin(double x, double y) { + return __builtin_fmin(x, y); +} + +// LLVM: define double @my_fmin +// LLVM: %{{.+}} = call double @llvm.minnum.f64(double %{{.+}}, double %{{.+}}) +// LLVM: } + +long double my_fminl(long double x, long double y) { + return __builtin_fminl(x, y); +} + +// LLVM: define x86_fp80 @my_fminl +// LLVM: %{{.+}} = call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) +// LLVM: } + +// fmod + +float my_fmodf(float x, float y) { + return __builtin_fmodf(x, y); +} + +// LLVM: define float @my_fmodf +// LLVM: %{{.+}} = call float @fmodf(float %{{.+}}, float %{{.+}}) +// LLVM: } + +double my_fmod(double x, double y) { + return __builtin_fmod(x, y); +} + +// LLVM: define double @my_fmod +// LLVM: %{{.+}} = call double @fmod(double %{{.+}}, double %{{.+}}) +// LLVM: } + +long double my_fmodl(long double x, long double y) { + return __builtin_fmodl(x, y); +} + +// LLVM: define x86_fp80 @my_fmodl +// LLVM: %{{.+}} = call x86_fp80 @fmodl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) +// LLVM: } + +// pow + +float my_powf(float x, float y) { + return __builtin_powf(x, y); +} + +// LLVM: define float @my_powf +// LLVM: %{{.+}} = call float @powf(float %{{.+}}, float %{{.+}}) +// LLVM: } + +double my_pow(double x, double y) { + return __builtin_pow(x, y); +} + +// LLVM: define double @my_pow +// LLVM: %{{.+}} = call double @pow(double %{{.+}}, double %{{.+}}) +// LLVM: } + +long double my_powl(long double x, long double y) { + return __builtin_powl(x, y); +} + +// LLVM: define x86_fp80 @my_powl +// LLVM: %{{.+}} = call x86_fp80 @powl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) +// LLVM: } From 8330b0c89cd9774e053739cdb5bcdc55f90b11a4 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 31 May 2024 14:17:21 -0400 Subject: [PATCH 1605/2301] [CIR][Interface] introduce CIRGlobalValueInterface for GlobalOp and FuncOp (#641) CIRGlobalValueInterface inherits from mlir::Symbol as it should, and GlobalOp and FuncOp now has interface mlir::Symbol through CIRGlobalValueInterface and this PR basically make function isDeclarationForLinker into the CIRGlobalValueInterface interface. We also change some call sites of isDeclaration to use CIRGlobalValueInterface when its appropriate. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 23 +++++------- .../clang/CIR/Interfaces/CIROpInterfaces.td | 35 ++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 11 +++--- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 17 ++++++--- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 26 +++++++------- 5 files changed, 69 insertions(+), 43 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 3855b41bd274..1d09eb64e129 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1872,7 +1872,10 @@ def TLSModel : I32EnumAttr< let cppNamespace = "::mlir::cir"; } -def GlobalOp : CIR_Op<"global", [Symbol, DeclareOpInterfaceMethods, NoRegionArguments]> { +def GlobalOp : CIR_Op<"global", + [DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, + NoRegionArguments]> { let summary = "Declares or defines a global variable"; let description = [{ The `cir.global` operation declares or defines a named global variable. @@ -1933,13 +1936,6 @@ def GlobalOp : CIR_Op<"global", [Symbol, DeclareOpInterfaceMethods { def FuncOp : CIR_Op<"func", [ AutomaticAllocationScope, CallableOpInterface, FunctionOpInterface, - IsolatedFromAbove, Symbol + DeclareOpInterfaceMethods, + IsolatedFromAbove ]> { let summary = "Declare or define a function"; let description = [{ @@ -2727,12 +2724,8 @@ def FuncOp : CIR_Op<"func", [ bool isDeclaration(); - // FIXME: should be shared with GlobalOp extra declaration. - bool isDeclarationForLinker() { - if (mlir::cir::isAvailableExternallyLinkage(getLinkage())) - return true; - - return isDeclaration(); + bool hasAvailableExternallyLinkage() { + return mlir::cir::isAvailableExternallyLinkage(getLinkage()); } }]; diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td index b08e07a63d67..8f1c63e1b024 100644 --- a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td @@ -10,24 +10,49 @@ #define MLIR_CIR_OP_INTERFACES include "mlir/IR/OpBase.td" +include "mlir/IR/SymbolInterfaces.td" include "mlir/Interfaces/CallInterfaces.td" let cppNamespace = "::mlir::cir" in { // The CIRCallOpInterface must be used instead of CallOpInterface when looking // at arguments and other bits of CallOp. This creates a level of abstraction // that's useful for handling indirect calls and other details. - def CIRCallOpInterface : OpInterface<"CIRCallOpInterface", [CallOpInterface]> { + def CIRCallOpInterface + : OpInterface<"CIRCallOpInterface", [CallOpInterface]> { let methods = [ InterfaceMethod<"", "mlir::Operation::operand_iterator", "arg_operand_begin", (ins)>, InterfaceMethod<"", "mlir::Operation::operand_iterator", "arg_operand_end", (ins)>, InterfaceMethod< - "Return the operand at index 'i', accounts for indirect call or " - "exception info", "mlir::Value", "getArgOperand", (ins "unsigned":$i)>, + "Return the operand at index 'i', accounts for indirect call or " + "exception info", + "mlir::Value", "getArgOperand", + (ins "unsigned" + : $i)>, InterfaceMethod< - "Return the number of operands, accounts for indirect call or " - "exception info", "unsigned", "getNumArgOperands", (ins)>, + "Return the number of operands, accounts for indirect call or " + "exception info", + "unsigned", "getNumArgOperands", (ins)>, + ]; + } + + def CIRGlobalValueInterface + : OpInterface<"CIRGlobalValueInterface", [Symbol]> { + + let methods = [ + InterfaceMethod<"", + "bool", "hasAvailableExternallyLinkage", (ins), [{}], + /*defaultImplementation=*/[{ return false; }] + >, + InterfaceMethod<"", + "bool", "isDeclarationForLinker", (ins), [{}], + /*defaultImplementation=*/[{ + if ($_op.hasAvailableExternallyLinkage()) + return true; + return $_op.isDeclaration(); + }] + >, ]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 31dba6be75eb..568628c5bc9e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -122,18 +122,19 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { // Check if we have it already. StringRef MangledName = getMangledName(AliasDecl); auto Entry = getGlobalValue(MangledName); - auto fnOp = dyn_cast_or_null(Entry); - if (Entry && fnOp && !fnOp.isDeclaration()) + auto globalValue = dyn_cast(Entry); + if (Entry && globalValue && !globalValue.isDeclaration()) return false; if (Replacements.count(MangledName)) return false; - assert(fnOp && "only knows how to handle FuncOp"); + assert(globalValue && "only knows how to handle GlobalValue"); [[maybe_unused]] auto AliasValueType = getTypes().GetFunctionType(AliasDecl); // Find the referent. auto Aliasee = cast(GetAddrOfGlobal(TargetDecl)); - + auto AliaseeGV = dyn_cast_or_null( + GetAddrOfGlobal(TargetDecl)); // Instead of creating as alias to a linkonce_odr, replace all of the uses // of the aliasee. if (mlir::cir::isDiscardableIfUnused(Linkage) && @@ -161,7 +162,7 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { // is // avaialable_externally, don't emit an alias. We can't emit aliases to // declarations; that's just not how aliases work. - if (Aliasee.isDeclarationForLinker()) + if (AliaseeGV && AliaseeGV.isDeclarationForLinker()) return true; // Don't create an alias to a linker weak symbol. This avoids producing diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 4a9b168e578b..105b64c49a6c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -444,10 +444,14 @@ static void emitConstructorDestructorAlias(CIRGenModule &CGM, // Does this function alias already exists? StringRef MangledName = CGM.getMangledName(AliasDecl); + auto globalValue = dyn_cast_or_null( + CGM.getGlobalValue(MangledName)); + if (globalValue && !globalValue.isDeclaration()) { + return; + } + auto Entry = dyn_cast_or_null(CGM.getGlobalValue(MangledName)); - if (Entry && !Entry.isDeclaration()) - return; // Retrieve aliasee info. auto Aliasee = @@ -2047,19 +2051,22 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, // EmitFundamentalRTTIDescriptors(RD); } + auto VTableAsGlobalValue = + dyn_cast(*VTable); + assert(VTableAsGlobalValue && "VTable must support CIRGlobalValueInterface"); + bool isDeclarationForLinker = VTableAsGlobalValue.isDeclarationForLinker(); // Always emit type metadata on non-available_externally definitions, and on // available_externally definitions if we are performing whole program // devirtualization. For WPD we need the type metadata on all vtable // definitions to ensure we associate derived classes with base classes // defined in headers but with a strong definition only in a shared // library. - if (!VTable.isDeclarationForLinker() || - CGM.getCodeGenOpts().WholeProgramVTables) { + if (!isDeclarationForLinker || CGM.getCodeGenOpts().WholeProgramVTables) { CGM.buildVTableTypeMetadata(RD, VTable, VTLayout); // For available_externally definitions, add the vtable to // @llvm.compiler.used so that it isn't deleted before whole program // analysis. - if (VTable.isDeclarationForLinker()) { + if (isDeclarationForLinker) { llvm_unreachable("NYI"); assert(CGM.getCodeGenOpts().WholeProgramVTables); assert(!UnimplementedFeature::addCompilerUsedGlobal()); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 292a9c22da15..16018fa80bbf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -470,11 +470,12 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, Op = GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/true, ForDefinition); - auto Fn = cast(Op); - // Already emitted. - if (!Fn.isDeclaration()) + auto globalVal = dyn_cast_or_null(Op); + if (globalVal && !globalVal.isDeclaration()) { + // Already emitted. return; - + } + auto Fn = cast(Op); setFunctionLinkage(GD, Fn); setGVProperties(Op, D); // TODO(cir): MaubeHandleStaticInExternC @@ -2435,18 +2436,17 @@ void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { // ways (e.g. by an extern inline function acquiring a strong function // redefinition). Just ignore those cases. // TODO: Not sure what to map this to for MLIR - if (auto Fn = dyn_cast(Op)) - if (!Fn.isDeclaration()) - return; - - // TODO(cir): create a global value trait that allow us to uniformly handle - // global variables and functions. + auto globalValueOp = Op; if (auto Gv = dyn_cast(Op)) { auto *result = mlir::SymbolTable::lookupSymbolIn(getModule(), Gv.getNameAttr()); - if (auto globalOp = dyn_cast(result)) - if (!globalOp.isDeclaration()) - return; + globalValueOp = result; + } + + if (auto cirGlobalValue = + dyn_cast(globalValueOp)) { + if (!cirGlobalValue.isDeclaration()) + return; } // If this is OpenMP, check if it is legal to emit this global normally. From 237022b18f78f0e75c34def4dc8a0b8405363fe3 Mon Sep 17 00:00:00 2001 From: Krito Date: Sat, 1 Jun 2024 06:40:25 +0800 Subject: [PATCH 1606/2301] [CIR][ThroughMLIR] lowering cir.bit.clz and cir.bit.ctz to MLIR (#645) This pr adds cir.bit.clz and cir.bit.ctz lowering to MLIR passes and test files. I will complete the lowering of other `cir.bit` operations in subsequent PRs. --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 50 +++++++--- clang/test/CIR/Lowering/ThroughMLIR/bit.cir | 94 +++++++++++++++++++ 2 files changed, 132 insertions(+), 12 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/bit.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 50edb7a40f5b..f86038369010 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -400,6 +400,31 @@ class CIRSinOpLowering : public mlir::OpConversionPattern { } }; +template +class CIRBitOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(CIROp op, + typename mlir::OpConversionPattern::OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resultIntTy = this->getTypeConverter() + ->convertType(op.getType()) + .template cast(); + auto res = rewriter.create(op->getLoc(), adaptor.getInput()); + auto newOp = createIntCast(rewriter, res->getResult(0), resultIntTy, + /*isSigned=*/false); + rewriter.replaceOp(op, newOp); + return mlir::LogicalResult::success(); + } +}; + +using CIRBitClzOpLowering = + CIRBitOpLowering; +using CIRBitCtzOpLowering = + CIRBitOpLowering; + class CIRConstantOpLowering : public mlir::OpConversionPattern { public: @@ -1158,18 +1183,19 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns.add(converter, - patterns.getContext()); + patterns + .add( + converter, patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/bit.cir b/clang/test/CIR/Lowering/ThroughMLIR/bit.cir new file mode 100644 index 000000000000..da74ac48f518 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/bit.cir @@ -0,0 +1,94 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +!s16i = !cir.int +!s32i = !cir.int +!s64i = !cir.int +!u16i = !cir.int +!u32i = !cir.int +!u64i = !cir.int + + +// int clz_u16(unsigned short x) { +// return __builtin_clzs(x); +// } +cir.func @clz_u16(%arg : !u16i) { + %0 = cir.bit.clz(%arg : !u16i) : !s32i + cir.return +} + +// CHECK: func.func @clz_u16(%arg0: i16) { +// CHECK-NEXT: %[[CLZ_U16:.+]] = math.ctlz %arg0 : i16 +// CHECK-NEXT: %[[EXTUI_U16:.+]] = arith.extui %[[CLZ_U16]] : i16 to i32 +// CHECK-NEXT: return +// CHECK-NEXT: } + +// int clz_u32(unsigned x) { +// return __builtin_clz(x); +// } +cir.func @clz_u32(%arg : !u32i) { + %0 = cir.bit.clz(%arg : !u32i) : !s32i + cir.return +} + +// CHECK: func.func @clz_u32(%arg0: i32) { +// CHECK-NEXT: %[[CLZ_U32:.+]] = math.ctlz %arg0 : i32 +// CHECK-NEXT: %[[BITCAST_U32:.+]] = arith.bitcast %[[CLZ_U32]] : i32 to i32 +// CHECK-NEXT: return +// CHECK-NEXT: } + +// int clz_u64(unsigned long x) { +// return __builtin_clzl(x); +// } +cir.func @clz_u64(%arg : !u64i) { + %0 = cir.bit.clz(%arg : !u64i) : !s32i + cir.return +} + +// CHECK: func.func @clz_u64(%arg0: i64) { +// CHECK-NEXT: %[[CLZ_U64:.+]] = math.ctlz %arg0 : i64 +// CHECK-NEXT: %[[TRUNCI_U64:.+]] = arith.trunci %[[CLZ_U64]] : i64 to i32 +// CHECK-NEXT: return +// CHECK-NEXT: } + +// int ctz_u16(unsigned short x) { +// return __builtin_ctzs(x); +// } +cir.func @ctz_u16(%arg : !u16i) { + %0 = cir.bit.ctz(%arg : !u16i) : !s32i + cir.return +} + +// CHECK: func.func @ctz_u16(%arg0: i16) { +// CHECK-NEXT: %[[CTZ_U16:.+]] = math.cttz %arg0 : i16 +// CHECK-NEXT: %[[EXTUI_U16:.+]] = arith.extui %[[CTZ_U16]] : i16 to i32 +// CHECK-NEXT: return +// CHECK-NEXT: } + +// int ctz_u32(unsigned x) { +// return __builtin_ctz(x); +// } +cir.func @ctz_u32(%arg : !u32i) { + %0 = cir.bit.ctz(%arg : !u32i) : !s32i + cir.return +} + +// CHECK: func.func @ctz_u32(%arg0: i32) { +// CHECK-NEXT: %[[CTZ_U32:.+]] = math.cttz %arg0 : i32 +// CHECK-NEXT: %[[BITCAST_U32:.+]] = arith.bitcast %[[CTZ_U32]] : i32 to i32 +// CHECK-NEXT: return +// CHECK-NEXT: } + +// int ctz_u64(unsigned long x) { +// return __builtin_ctzl(x); +// } +cir.func @ctz_u64(%arg : !u64i) { + %0 = cir.bit.ctz(%arg : !u64i) : !s32i + cir.return +} + +// CHECK: func.func @ctz_u64(%arg0: i64) { +// CHECK-NEXT: %[[CTZ_U64:.+]] = math.cttz %arg0 : i64 +// CHECK-NEXT: %[[TRUNCI_U64:.+]] = arith.trunci %[[CTZ_U64]] : i64 to i32 +// CHECK-NEXT: return +// CHECK-NEXT: } \ No newline at end of file From 93bd4a97cf423c56b8de6ad1a92fcb2cae34e823 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 29 May 2024 15:02:23 -0700 Subject: [PATCH 1607/2301] [CIR][CIRGen] Aarch64 Builtins: add more load/store variants Now that alignment computation is correct for neon, add more neon types for load/store. --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 19 +- .../CIR/CodeGen/aarch64-neon-intrinsics.c | 222 ++++++++++-------- 2 files changed, 141 insertions(+), 100 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index a3e494574a12..783f40c7e589 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1362,10 +1362,15 @@ static mlir::Type GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags, case NeonTypeFlags::Int8: case NeonTypeFlags::Poly8: return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), - CGF->UInt8Ty, V1Ty ? 1 : (8 << IsQuad)); + TypeFlags.isUnsigned() ? CGF->UInt8Ty + : CGF->SInt8Ty, + V1Ty ? 1 : (8 << IsQuad)); case NeonTypeFlags::Int16: case NeonTypeFlags::Poly16: - llvm_unreachable("NYI"); + return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), + TypeFlags.isUnsigned() ? CGF->UInt16Ty + : CGF->SInt16Ty, + V1Ty ? 1 : (4 << IsQuad)); case NeonTypeFlags::BFloat16: if (AllowBFloatArgsAndRet) llvm_unreachable("NYI"); @@ -1377,10 +1382,16 @@ static mlir::Type GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags, else llvm_unreachable("NYI"); case NeonTypeFlags::Int32: - llvm_unreachable("NYI"); + return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), + TypeFlags.isUnsigned() ? CGF->UInt32Ty + : CGF->SInt32Ty, + V1Ty ? 1 : (2 << IsQuad)); case NeonTypeFlags::Int64: case NeonTypeFlags::Poly64: - llvm_unreachable("NYI"); + return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), + TypeFlags.isUnsigned() ? CGF->UInt64Ty + : CGF->SInt64Ty, + V1Ty ? 1 : (1 << IsQuad)); case NeonTypeFlags::Poly128: // FIXME: i128 and f128 doesn't get fully support in Clang and llvm. // There is a lot of i128 and f128 API missing. diff --git a/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c b/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c index f55b9cd5772d..02aa70a4d628 100644 --- a/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c +++ b/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c @@ -8975,54 +8975,75 @@ uint8x16_t test_vld1q_u8(uint8_t const *a) { // LLVM: [[TMP1:%.*]] = load <16 x i8>, ptr %0, align 1, } -// NYI-LABEL: @test_vld1q_u16( -// NYI: [[TMP2:%.*]] = load <8 x i16>, ptr %a, align 2 -// NYI: ret <8 x i16> [[TMP2]] -// uint16x8_t test_vld1q_u16(uint16_t const *a) { -// return vld1q_u16(a); -// } +uint16x8_t test_vld1q_u16(uint16_t const *a) { + return vld1q_u16(a); + // CIR-LABEL: @test_vld1q_u16 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.load align(2) %[[CAST]] : !cir.ptr>, !cir.vector + + // LLVM-LABEL: @test_vld1q_u16 + // LLVM: [[TMP1:%.*]] = load <8 x i16>, ptr %0, align 2, +} -// NYI-LABEL: @test_vld1q_u32( -// NYI: [[TMP2:%.*]] = load <4 x i32>, ptr %a, align 4 -// NYI: ret <4 x i32> [[TMP2]] -// uint32x4_t test_vld1q_u32(uint32_t const *a) { -// return vld1q_u32(a); -// } +uint32x4_t test_vld1q_u32(uint32_t const *a) { + return vld1q_u32(a); + // CIR-LABEL: @test_vld1q_u32 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.load align(4) %[[CAST]] : !cir.ptr>, !cir.vector -// NYI-LABEL: @test_vld1q_u64( -// NYI: [[TMP2:%.*]] = load <2 x i64>, ptr %a, align 8 -// NYI: ret <2 x i64> [[TMP2]] -// uint64x2_t test_vld1q_u64(uint64_t const *a) { -// return vld1q_u64(a); -// } + // LLVM-LABEL: @test_vld1q_u32 + // LLVM: [[TMP1:%.*]] = load <4 x i32>, ptr %0, align 4, +} -// NYI-LABEL: @test_vld1q_s8( -// NYI: [[TMP1:%.*]] = load <16 x i8>, ptr %a, align 1 -// NYI: ret <16 x i8> [[TMP1]] -// int8x16_t test_vld1q_s8(int8_t const *a) { -// return vld1q_s8(a); -// } +uint64x2_t test_vld1q_u64(uint64_t const *a) { + return vld1q_u64(a); + // CIR-LABEL: @test_vld1q_u64 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.load align(8) %[[CAST]] : !cir.ptr>, !cir.vector -// NYI-LABEL: @test_vld1q_s16( -// NYI: [[TMP2:%.*]] = load <8 x i16>, ptr %a, align 2 -// NYI: ret <8 x i16> [[TMP2]] -// int16x8_t test_vld1q_s16(int16_t const *a) { -// return vld1q_s16(a); -// } + // LLVM-LABEL: @test_vld1q_u64 + // LLVM: [[TMP1:%.*]] = load <2 x i64>, ptr %0, align 8, +} -// NYI-LABEL: @test_vld1q_s32( -// NYI: [[TMP2:%.*]] = load <4 x i32>, ptr %a, align 4 -// NYI: ret <4 x i32> [[TMP2]] -// int32x4_t test_vld1q_s32(int32_t const *a) { -// return vld1q_s32(a); -// } +int8x16_t test_vld1q_s8(int8_t const *a) { + return vld1q_s8(a); + // CIR-LABEL: @test_vld1q_s8 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.load align(1) %[[CAST]] : !cir.ptr>, !cir.vector -// NYI-LABEL: @test_vld1q_s64( -// NYI: [[TMP2:%.*]] = load <2 x i64>, ptr %a, align 8 -// NYI: ret <2 x i64> [[TMP2]] -// int64x2_t test_vld1q_s64(int64_t const *a) { -// return vld1q_s64(a); -// } + // LLVM-LABEL: @test_vld1q_s8 + // LLVM: [[TMP1:%.*]] = load <16 x i8>, ptr %0, align 1, +} + +int16x8_t test_vld1q_s16(int16_t const *a) { + return vld1q_s16(a); + // CIR-LABEL: @test_vld1q_s16 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.load align(2) %[[CAST]] : !cir.ptr>, !cir.vector + + // LLVM-LABEL: @test_vld1q_s16 + // LLVM: [[TMP1:%.*]] = load <8 x i16>, ptr %0, align 2, +} + +int32x4_t test_vld1q_s32(int32_t const *a) { + return vld1q_s32(a); + // CIR-LABEL: @test_vld1q_s32 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.load align(4) %[[CAST]] : !cir.ptr>, !cir.vector + + // LLVM-LABEL: @test_vld1q_s32 + // LLVM: [[TMP1:%.*]] = load <4 x i32>, ptr %0, align 4, +} + +int64x2_t test_vld1q_s64(int64_t const *a) { + return vld1q_s64(a); + // CIR-LABEL: @test_vld1q_s64 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.load align(8) %[[CAST]] : !cir.ptr>, !cir.vector + + // LLVM-LABEL: @test_vld1q_s64 + // LLVM: [[TMP1:%.*]] = load <2 x i64>, ptr %0, align 8, +} // NYI-LABEL: @test_vld1q_f16( // NYI: [[TMP2:%.*]] = load <8 x half>, ptr %a, align 2 @@ -10187,66 +10208,75 @@ void test_vst1q_u8(uint8_t *a, uint8x16_t b) { // LLVM: store <16 x i8> %{{.*}}, ptr %0, align 1, } -// NYI-LABEL: @test_vst1q_u16( -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> -// NYI: store <8 x i16> [[TMP3]], ptr %a -// NYI: ret void -// void test_vst1q_u16(uint16_t *a, uint16x8_t b) { -// vst1q_u16(a, b); -// } +void test_vst1q_u16(uint16_t *a, uint16x8_t b) { + vst1q_u16(a, b); + // CIR-LABEL: @test_vst1q_u16 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.store align(2) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> -// NYI-LABEL: @test_vst1q_u32( -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> -// NYI: store <4 x i32> [[TMP3]], ptr %a -// NYI: ret void -// void test_vst1q_u32(uint32_t *a, uint32x4_t b) { -// vst1q_u32(a, b); -// } + // LLVM-LABEL: @test_vst1q_u16 + // LLVM: store <8 x i16> %{{.*}}, ptr %0, align 2, +} -// NYI-LABEL: @test_vst1q_u64( -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> -// NYI: store <2 x i64> [[TMP3]], ptr %a -// NYI: ret void -// void test_vst1q_u64(uint64_t *a, uint64x2_t b) { -// vst1q_u64(a, b); -// } +void test_vst1q_u32(uint32_t *a, uint32x4_t b) { + vst1q_u32(a, b); + // CIR-LABEL: @test_vst1q_u32 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.store align(4) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> -// NYI-LABEL: @test_vst1q_s8( -// NYI: store <16 x i8> %b, ptr %a -// NYI: ret void -// void test_vst1q_s8(int8_t *a, int8x16_t b) { -// vst1q_s8(a, b); -// } + // LLVM-LABEL: @test_vst1q_u32 + // LLVM: store <4 x i32> %{{.*}}, ptr %0, align 4, +} -// NYI-LABEL: @test_vst1q_s16( -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> -// NYI: store <8 x i16> [[TMP3]], ptr %a -// NYI: ret void -// void test_vst1q_s16(int16_t *a, int16x8_t b) { -// vst1q_s16(a, b); -// } +void test_vst1q_u64(uint64_t *a, uint64x2_t b) { + vst1q_u64(a, b); + // CIR-LABEL: @test_vst1q_u64 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.store align(8) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> -// NYI-LABEL: @test_vst1q_s32( -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> -// NYI: store <4 x i32> [[TMP3]], ptr %a -// NYI: ret void -// void test_vst1q_s32(int32_t *a, int32x4_t b) { -// vst1q_s32(a, b); -// } + // LLVM-LABEL: @test_vst1q_u64 + // LLVM: store <2 x i64> %{{.*}}, ptr %0, align 8, +} -// NYI-LABEL: @test_vst1q_s64( -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> -// NYI: store <2 x i64> [[TMP3]], ptr %a -// NYI: ret void -// void test_vst1q_s64(int64_t *a, int64x2_t b) { -// vst1q_s64(a, b); -// } +void test_vst1q_s8(int8_t *a, int8x16_t b) { + vst1q_s8(a, b); + // CIR-LABEL: @test_vst1q_s8 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.store align(1) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> + + // LLVM-LABEL: @test_vst1q_s8 + // LLVM: store <16 x i8> %{{.*}}, ptr %0, align 1, +} + +void test_vst1q_s16(int16_t *a, int16x8_t b) { + vst1q_s16(a, b); + // CIR-LABEL: @test_vst1q_s16 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.store align(2) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> + + // LLVM-LABEL: @test_vst1q_s16 + // LLVM: store <8 x i16> %{{.*}}, ptr %0, align 2, +} + +void test_vst1q_s32(int32_t *a, int32x4_t b) { + vst1q_s32(a, b); + // CIR-LABEL: @test_vst1q_s32 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.store align(4) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> + + // LLVM-LABEL: @test_vst1q_s32 + // LLVM: store <4 x i32> %{{.*}}, ptr %0, align 4, +} + +void test_vst1q_s64(int64_t *a, int64x2_t b) { + vst1q_s64(a, b); + // CIR-LABEL: @test_vst1q_s64 + // CIR: %[[CAST:.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr> + // CIR: cir.store align(8) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> + + // LLVM-LABEL: @test_vst1q_s64 + // LLVM: store <2 x i64> %{{.*}}, ptr %0, align 8, +} // NYI-LABEL: @test_vst1q_f16( // NYI: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> From 142793006bc3cde2713a4c5889decb26ddc0e621 Mon Sep 17 00:00:00 2001 From: Ivan Murashko Date: Mon, 3 Jun 2024 21:32:19 +0100 Subject: [PATCH 1608/2301] [CIR][CodeGen] Get rid of ZeroInitConstOp (#646) mlir::cir::ZeroInitConstOp was replaced with llvm.mlir.zero resolves [#627](https://github.com/llvm/clangir/issues/627) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 18 ------------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 ++--- .../Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 2 +- clang/test/CIR/Lowering/const.cir | 4 +-- clang/test/CIR/Lowering/globals.cir | 2 +- .../test/CIR/Translation/zeroinitializer.cir | 27 ------------------- 6 files changed, 7 insertions(+), 52 deletions(-) delete mode 100644 clang/test/CIR/Translation/zeroinitializer.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1d09eb64e129..1c10924ace37 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3961,22 +3961,4 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", let hasVerifier = 0; } -//===----------------------------------------------------------------------===// -// Operations Lowered Directly to LLVM IR -// -// These operations are hacks to get around missing features in LLVM's dialect. -// Use it sparingly and remove it once the features are added. -//===----------------------------------------------------------------------===// - -def ZeroInitConstOp : CIR_Op<"llvmir.zeroinit", [Pure]>, - Results<(outs AnyType:$result)> { - let summary = "Zero initializes a constant value of a given type"; - let description = [{ - This operation circumvents the lack of a zeroinitializer operation in LLVM - Dialect. It can zeroinitialize any LLVM type. - }]; - let assemblyFormat = "attr-dict `:` type($result)"; - let hasVerifier = 0; -} - #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ab8298a02b41..36049d27cbfd 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -212,7 +212,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ZeroAttr zeroAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto loc = parentOp->getLoc(); - return rewriter.create( + return rewriter.create( loc, converter->convertType(zeroAttr.getType())); } @@ -279,7 +279,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, if (auto zeros = constArr.getTrailingZerosNum()) { auto arrayTy = constArr.getType(); - result = rewriter.create( + result = rewriter.create( loc, converter->convertType(arrayTy)); } else { result = rewriter.create(loc, llvmTy); @@ -3458,7 +3458,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { mlir::func::FuncDialect>(); // Allow operations that will be lowered directly to LLVM IR. - target.addLegalOp(); + target.addLegalOp(); getOperation()->removeAttr("cir.sob"); getOperation()->removeAttr("cir.lang"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index dac44ca4d8d0..1da4d7665f92 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -76,7 +76,7 @@ class CIRDialectLLVMIRTranslationInterface mlir::Operation *op, llvm::IRBuilderBase &builder, mlir::LLVM::ModuleTranslation &moduleTranslation) const final { - if (auto cirOp = llvm::dyn_cast(op)) + if (auto cirOp = llvm::dyn_cast(op)) moduleTranslation.mapValue(cirOp.getResult()) = llvm::Constant::getNullValue( moduleTranslation.convertType(cirOp.getType())); diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 2058a6cbd8b0..764089c1d6cb 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -14,7 +14,7 @@ module { %3 = cir.const #cir.const_array<[#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float]> : !cir.array // CHECK: llvm.mlir.constant(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) : !llvm.array<2 x f32> %4 = cir.const #cir.zero : !cir.array - // CHECK: cir.llvmir.zeroinit : !llvm.array<3 x i32> + // CHECK: llvm.mlir.zero : !llvm.array<3 x i32> cir.return } @@ -63,7 +63,7 @@ module { // CHECK: llvm.func @testArrWithTrailingZeros() // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 // CHECK: %1 = llvm.alloca %0 x !llvm.array<10 x i32> {alignment = 16 : i64} : (i64) -> !llvm.ptr - // CHECK: %2 = cir.llvmir.zeroinit : !llvm.array<10 x i32> + // CHECK: %2 = llvm.mlir.zero : !llvm.array<10 x i32> // CHECK: %3 = llvm.mlir.constant(1 : i32) : i32 // CHECK: %4 = llvm.insertvalue %3, %2[0] : !llvm.array<10 x i32> diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 7ad94e6faa9f..d8193b075d2a 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -143,7 +143,7 @@ module { // MLIR: } cir.global external @zeroStruct = #cir.zero : !ty_22Bar22 // MLIR: llvm.mlir.global external @zeroStruct() - // MLIR: %0 = cir.llvmir.zeroinit : !llvm.struct<"struct.Bar", (i32, i8)> + // MLIR: %0 = llvm.mlir.zero : !llvm.struct<"struct.Bar", (i32, i8)> // MLIR: llvm.return %0 : !llvm.struct<"struct.Bar", (i32, i8)> // MLIR: } cir.global common @comm = #cir.int<0> : !s32i diff --git a/clang/test/CIR/Translation/zeroinitializer.cir b/clang/test/CIR/Translation/zeroinitializer.cir deleted file mode 100644 index c6b92be604d5..000000000000 --- a/clang/test/CIR/Translation/zeroinitializer.cir +++ /dev/null @@ -1,27 +0,0 @@ -// RUN: cir-translate %s -cir-to-llvmir -o %t.ll -// RUN: FileCheck --input-file=%t.ll %s - -module { - // Should zero-initialize global structs initialized with cir.llvmir.zeroinit. - llvm.mlir.global external @bar() {addr_space = 0 : i32} : !llvm.struct<"struct.S", (i8, i32)> { - %0 = cir.llvmir.zeroinit : !llvm.struct<"struct.S", (i8, i32)> - llvm.return %0 : !llvm.struct<"struct.S", (i8, i32)> - } - // CHECK: @bar = global %struct.S zeroinitializer - - // Should null-initialize global pointer initialized with cir.llvmir.zeroinit. - llvm.mlir.global external @ptr() {addr_space = 0 : i32} : !llvm.ptr { - %0 = cir.llvmir.zeroinit : !llvm.ptr - llvm.return %0 : !llvm.ptr - } - // CHECK: @ptr = global ptr null - - // Should lower aggregates types with elements initialized with cir.llvmir.zeroinit. - llvm.mlir.global external @arr() {addr_space = 0 : i32} : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> { - %0 = llvm.mlir.undef : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> - %1 = cir.llvmir.zeroinit : !llvm.struct<"struct.S", (i8, i32)> - %2 = llvm.insertvalue %1, %0[0] : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> - llvm.return %2 : !llvm.array<1 x !llvm.struct<"struct.S", (i8, i32)>> - } - // CHECK: @arr = global [1 x %struct.S] zeroinitializer -} From 70f66551db3349a02f0956c6a6da10da1a5f68db Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Tue, 4 Jun 2024 05:35:39 +0800 Subject: [PATCH 1609/2301] [CIR][Pipeline] Support -fclangir-analysis-only (#638) Close https://github.com/llvm/clangir/issues/633. This patch introduces `-fclangir-analysis-only` option to allow the users to consume the AST to the CIR (and potential analysis passes, this can be done by specifying `-Xclang -fclangir-lifetime-check=""` now or some default value in following patches) and also generating the LLVM IR by the traditional code gen path. This will be helpful to use CIR with real world projects without worrying the correctness and completeness of CIR CodeGen part. --- .../clang/CIRFrontendAction/CIRGenAction.h | 3 +++ .../clang/CIRFrontendAction/CIRGenConsumer.h | 0 clang/include/clang/Driver/Options.td | 5 ++++ .../include/clang/Frontend/FrontendOptions.h | 7 +++++- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 8 ++++++ clang/lib/CodeGen/CMakeLists.txt | 8 ++++++ clang/lib/CodeGen/CodeGenAction.cpp | 25 +++++++++++++++---- clang/lib/Driver/ToolChains/Clang.cpp | 9 +++++++ clang/lib/Frontend/CompilerInvocation.cpp | 4 +-- clang/test/CIR/CodeGen/analysis-only.cpp | 8 ++++++ .../CIR/Transforms/lifetime-check-agg.cpp | 1 + clang/test/CIR/analysis-only.cpp | 2 ++ 12 files changed, 72 insertions(+), 8 deletions(-) create mode 100644 clang/include/clang/CIRFrontendAction/CIRGenConsumer.h create mode 100644 clang/test/CIR/CodeGen/analysis-only.cpp create mode 100644 clang/test/CIR/analysis-only.cpp diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIRFrontendAction/CIRGenAction.h index 74d5e5e32611..bcfca9bfcd89 100644 --- a/clang/include/clang/CIRFrontendAction/CIRGenAction.h +++ b/clang/include/clang/CIRFrontendAction/CIRGenAction.h @@ -120,6 +120,9 @@ class EmitObjAction : public CIRGenAction { EmitObjAction(mlir::MLIRContext *mlirCtx = nullptr); }; +std::unique_ptr +createCIRAnalysisOnlyConsumer(clang::CompilerInstance &); + } // namespace cir #endif diff --git a/clang/include/clang/CIRFrontendAction/CIRGenConsumer.h b/clang/include/clang/CIRFrontendAction/CIRGenConsumer.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index a8651fcffb34..3a8e02d7dfef 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3104,6 +3104,11 @@ defm clangir_direct_lowering : BoolFOption<"clangir-direct-lowering", FrontendOpts<"ClangIRDirectLowering">, DefaultTrue, PosFlag, NegFlag>; +defm clangir_analysis_only : BoolFOption<"clangir-analysis-only", + FrontendOpts<"ClangIRAnalysisOnly">, DefaultFalse, + PosFlag, + NegFlag>; def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, Group, HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 5bff487c2068..5bcf873ff98f 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -451,6 +451,10 @@ class FrontendOptions { // Enable Clang IR call conv lowering pass. unsigned ClangIREnableCallConvLowering : 1; + // Enable Clang IR analysis only pipeline that uses tranditional code gen + // pipeline. + unsigned ClangIRAnalysisOnly : 1; + CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. @@ -650,7 +654,8 @@ class FrontendOptions { ClangIRDisablePasses(false), ClangIRDisableCIRVerifier(false), ClangIRDisableEmitCXXDefault(false), ClangIRLifetimeCheck(false), ClangIRIdiomRecognizer(false), ClangIRLibOpt(false), - TimeTraceGranularity(500), TimeTraceVerbose(false) {} + ClangIRAnalysisOnly(false), TimeTraceGranularity(500), + TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file /// extension. For example, "c" would return Language::C. diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 0a570a3c783c..0f6963c11fda 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -471,3 +471,11 @@ EmitLLVMAction::EmitLLVMAction(mlir::MLIRContext *_MLIRContext) void EmitObjAction::anchor() {} EmitObjAction::EmitObjAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::EmitObj, _MLIRContext) {} + +std::unique_ptr +cir::createCIRAnalysisOnlyConsumer(clang::CompilerInstance &ci) { + return std::make_unique( + CIRGenAction::OutputType::None, ci, ci.getDiagnostics(), + &ci.getVirtualFileSystem(), ci.getHeaderSearchOpts(), ci.getCodeGenOpts(), + ci.getTargetOpts(), ci.getLangOpts(), ci.getFrontendOpts(), nullptr); +} diff --git a/clang/lib/CodeGen/CMakeLists.txt b/clang/lib/CodeGen/CMakeLists.txt index 868ec847b963..2327420ec673 100644 --- a/clang/lib/CodeGen/CMakeLists.txt +++ b/clang/lib/CodeGen/CMakeLists.txt @@ -54,6 +54,13 @@ if(MSVC AND NOT CMAKE_CXX_COMPILER_ID MATCHES Clang endif() endif() +set(conditional_link_libs) +if(CLANG_ENABLE_CIR) +list(APPEND conditional_link_libs + clangCIRFrontendAction + ) +endif() + add_clang_library(clangCodeGen ABIInfo.cpp ABIInfoImpl.cpp @@ -158,4 +165,5 @@ add_clang_library(clangCodeGen clangFrontend clangLex clangSerialization + ${conditional_link_libs} ) diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp index 7aa3639cabf3..0ff8ba3b7eda 100644 --- a/clang/lib/CodeGen/CodeGenAction.cpp +++ b/clang/lib/CodeGen/CodeGenAction.cpp @@ -21,6 +21,10 @@ #include "clang/Basic/LangStandard.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" +#include "clang/Config/config.h" +#if CLANG_ENABLE_CIR +#include "clang/CIRFrontendAction/CIRGenAction.h" +#endif #include "clang/CodeGen/BackendUtil.h" #include "clang/CodeGen/ModuleBuilder.h" #include "clang/Driver/DriverDiagnostic.h" @@ -989,14 +993,25 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { CI.getPreprocessor().addPPCallbacks(std::move(Callbacks)); } + std::vector> AdditionalConsumers; + AdditionalConsumers.reserve(2); + if (CI.getFrontendOpts().GenReducedBMI && !CI.getFrontendOpts().ModuleOutputPath.empty()) { - std::vector> Consumers(2); - Consumers[0] = std::make_unique( + + AdditionalConsumers.push_back(std::make_unique( CI.getPreprocessor(), CI.getModuleCache(), - CI.getFrontendOpts().ModuleOutputPath); - Consumers[1] = std::move(Result); - return std::make_unique(std::move(Consumers)); + CI.getFrontendOpts().ModuleOutputPath)); + } + +#if CLANG_ENABLE_CIR + if (CI.getFrontendOpts().ClangIRAnalysisOnly) + AdditionalConsumers.push_back(cir::createCIRAnalysisOnlyConsumer(CI)); +#endif + + if (!AdditionalConsumers.empty()) { + AdditionalConsumers.push_back(std::move(Result)); + return std::make_unique(std::move(AdditionalConsumers)); } return std::move(Result); diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index c883d88b51ba..f81689103e4e 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5263,6 +5263,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-fclangir-idiom-recognizer"); } + if (Args.hasArg(options::OPT_fclangir_analysis_only)) { + CmdArgs.push_back("-fclangir-analysis-only"); + + // TODO: We should pass some default analysis configuration here. + + // TODO2: Should we emit some diagnostics if the configurations conflict + // with each other? + } + if (IsOpenMPDevice) { // We have to pass the triple of the host if compiling for an OpenMP device. std::string NormalizedTriple = diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index a9307703916e..8e88e8cd79b6 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3128,8 +3128,8 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Args.hasArg(OPT_clangir_verify_diagnostics)) Opts.ClangIRVerifyDiags = true; - if (Args.hasArg(OPT_fclangir_call_conv_lowering)) - Opts.ClangIREnableCallConvLowering = true; + if (Args.hasArg(OPT_fclangir_analysis_only)) + Opts.ClangIRAnalysisOnly = true; if (const Arg *A = Args.getLastArg(OPT_fclangir_lifetime_check, OPT_fclangir_lifetime_check_EQ)) { diff --git a/clang/test/CIR/CodeGen/analysis-only.cpp b/clang/test/CIR/CodeGen/analysis-only.cpp new file mode 100644 index 000000000000..7f427f0de92f --- /dev/null +++ b/clang/test/CIR/CodeGen/analysis-only.cpp @@ -0,0 +1,8 @@ +// Check `-fclangir-analysis-only` would generate code correctly. +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir-analysis-only -std=c++20 \ +// RUN: -O2 -emit-llvm %s -o - | FileCheck %s + +extern "C" void foo() {} + +// CHECK: define{{.*}} @foo( + diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp index fb89c0e6fd8f..ebfe00c2ad56 100644 --- a/clang/test/CIR/Transforms/lifetime-check-agg.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir-analysis-only -fclangir-lifetime-check="history=all;remarks=all" %s -clangir-verify-diagnostics -emit-obj -o /dev/null typedef enum SType { INFO_ENUM_0 = 9, diff --git a/clang/test/CIR/analysis-only.cpp b/clang/test/CIR/analysis-only.cpp new file mode 100644 index 000000000000..7dc58250b91b --- /dev/null +++ b/clang/test/CIR/analysis-only.cpp @@ -0,0 +1,2 @@ +// RUN: %clang %s -fclangir-analysis-only -### -c %s 2>&1 | FileCheck %s +// CHECK: "-fclangir-analysis-only" From e7c5bb0cf092142fbd8e2ff3f544bd7e677fe8c3 Mon Sep 17 00:00:00 2001 From: Krito Date: Tue, 4 Jun 2024 05:37:07 +0800 Subject: [PATCH 1610/2301] [CIR][ThroughMLIR] lowering cir.bit.* to MLIR (#654) This pr adds cir.bit.ffs cir.bit.parity cir.bit.clrsb cir.bit.popcount lowering to MLIR passes and test files. --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 144 +++++++++++++---- .../Lowering/ThroughMLIR/LowerToMLIRHelpers.h | 40 +++++ clang/test/CIR/Lowering/ThroughMLIR/bit.c | 153 ++++++++++++++++++ clang/test/CIR/Lowering/ThroughMLIR/bit.cir | 94 ----------- 4 files changed, 302 insertions(+), 129 deletions(-) create mode 100644 clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/bit.c delete mode 100644 clang/test/CIR/Lowering/ThroughMLIR/bit.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index f86038369010..201c8c65f717 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "LowerToMLIRHelpers.h" #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" #include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" @@ -321,27 +322,6 @@ class CIRExpOpLowering : public mlir::OpConversionPattern { } }; -static mlir::Value createIntCast(mlir::ConversionPatternRewriter &rewriter, - mlir::Value src, mlir::Type dstTy, - bool isSigned = false) { - auto srcTy = src.getType(); - assert(isa(srcTy)); - assert(isa(dstTy)); - - auto srcWidth = srcTy.cast().getWidth(); - auto dstWidth = dstTy.cast().getWidth(); - auto loc = src.getLoc(); - - if (dstWidth > srcWidth && isSigned) - return rewriter.create(loc, dstTy, src); - else if (dstWidth > srcWidth) - return rewriter.create(loc, dstTy, src); - else if (dstWidth < srcWidth) - return rewriter.create(loc, dstTy, src); - else - return rewriter.create(loc, dstTy, src); -} - class CIRShiftOpLowering : public mlir::OpConversionPattern { public: @@ -424,6 +404,99 @@ using CIRBitClzOpLowering = CIRBitOpLowering; using CIRBitCtzOpLowering = CIRBitOpLowering; +using CIRBitPopcountOpLowering = + CIRBitOpLowering; + +class CIRBitClrsbOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitClrsbOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto inputTy = adaptor.getInput().getType(); + auto zero = getConst(rewriter, op.getLoc(), inputTy, 0); + auto isNeg = rewriter.create( + op.getLoc(), + mlir::arith::CmpIPredicateAttr::get(rewriter.getContext(), + mlir::arith::CmpIPredicate::slt), + adaptor.getInput(), zero); + + auto negOne = getConst(rewriter, op.getLoc(), inputTy, -1); + auto flipped = rewriter.create( + op.getLoc(), adaptor.getInput(), negOne); + + auto select = rewriter.create( + op.getLoc(), isNeg, flipped, adaptor.getInput()); + + auto resTy = + getTypeConverter()->convertType(op.getType()).cast(); + auto clz = + rewriter.create(op->getLoc(), select); + auto newClz = createIntCast(rewriter, clz, resTy); + + auto one = getConst(rewriter, op.getLoc(), resTy, 1); + auto res = rewriter.create(op.getLoc(), newClz, one); + rewriter.replaceOp(op, res); + + return mlir::LogicalResult::success(); + } +}; + +class CIRBitFfsOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitFfsOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto inputTy = adaptor.getInput().getType(); + auto ctz = rewriter.create( + op.getLoc(), adaptor.getInput()); + auto newCtz = createIntCast(rewriter, ctz, resTy); + + auto one = getConst(rewriter, op.getLoc(), resTy, 1); + auto ctzAddOne = + rewriter.create(op.getLoc(), newCtz, one); + + auto zeroInputTy = getConst(rewriter, op.getLoc(), inputTy, 0); + auto isZero = rewriter.create( + op.getLoc(), + mlir::arith::CmpIPredicateAttr::get(rewriter.getContext(), + mlir::arith::CmpIPredicate::eq), + adaptor.getInput(), zeroInputTy); + + auto zeroResTy = getConst(rewriter, op.getLoc(), resTy, 0); + auto res = rewriter.create(op.getLoc(), isZero, + zeroResTy, ctzAddOne); + rewriter.replaceOp(op, res); + + return mlir::LogicalResult::success(); + } +}; + +class CIRBitParityOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BitParityOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto count = + rewriter.create(op.getLoc(), adaptor.getInput()); + auto countMod2 = rewriter.create( + op.getLoc(), count, + getConst(rewriter, op.getLoc(), count.getType(), 1)); + auto res = createIntCast(rewriter, countMod2, resTy); + rewriter.replaceOp(op, res); + return mlir::LogicalResult::success(); + } +}; class CIRConstantOpLowering : public mlir::OpConversionPattern { @@ -1183,19 +1256,20 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns - .add( - converter, patterns.getContext()); + patterns.add< + CIRCmpOpLowering, CIRCallOpLowering, CIRUnaryOpLowering, CIRBinOpLowering, + CIRLoadOpLowering, CIRConstantOpLowering, CIRStoreOpLowering, + CIRAllocaOpLowering, CIRFuncOpLowering, CIRScopeOpLowering, + CIRBrCondOpLowering, CIRTernaryOpLowering, CIRYieldOpLowering, + CIRCosOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, + CIRCastOpLowering, CIRPtrStrideOpLowering, CIRSqrtOpLowering, + CIRCeilOpLowering, CIRExp2OpLowering, CIRExpOpLowering, CIRFAbsOpLowering, + CIRFloorOpLowering, CIRLog10OpLowering, CIRLog2OpLowering, + CIRLogOpLowering, CIRRoundOpLowering, CIRPtrStrideOpLowering, + CIRSinOpLowering, CIRShiftOpLowering, CIRBitClzOpLowering, + CIRBitCtzOpLowering, CIRBitPopcountOpLowering, CIRBitClrsbOpLowering, + CIRBitFfsOpLowering, CIRBitParityOpLowering>(converter, + patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { @@ -1327,4 +1401,4 @@ mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, return theModule; } -} // namespace cir +} // namespace cir \ No newline at end of file diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h b/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h new file mode 100644 index 000000000000..753e6b7d0528 --- /dev/null +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h @@ -0,0 +1,40 @@ +#ifndef LLVM_CLANG_LIB_LOWERTOMLIRHELPERS_H +#define LLVM_CLANG_LIB_LOWERTOMLIRHELPERS_H +#include "mlir/Dialect/Arith/IR/Arith.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/Transforms/DialectConversion.h" + +template +mlir::Value getConst(mlir::ConversionPatternRewriter &rewriter, + mlir::Location loc, mlir::Type ty, T value) { + assert(mlir::isa(ty) || mlir::isa(ty)); + if (mlir::isa(ty)) + return rewriter.create( + loc, ty, mlir::IntegerAttr::get(ty, value)); + return rewriter.create( + loc, ty, mlir::FloatAttr::get(ty, value)); +} + +mlir::Value createIntCast(mlir::ConversionPatternRewriter &rewriter, + mlir::Value src, mlir::Type dstTy, + bool isSigned = false) { + auto srcTy = src.getType(); + assert(mlir::isa(srcTy)); + assert(mlir::isa(dstTy)); + + auto srcWidth = srcTy.cast().getWidth(); + auto dstWidth = dstTy.cast().getWidth(); + auto loc = src.getLoc(); + + if (dstWidth > srcWidth && isSigned) + return rewriter.create(loc, dstTy, src); + else if (dstWidth > srcWidth) + return rewriter.create(loc, dstTy, src); + else if (dstWidth < srcWidth) + return rewriter.create(loc, dstTy, src); + else + return rewriter.create(loc, dstTy, src); +} + +#endif \ No newline at end of file diff --git a/clang/test/CIR/Lowering/ThroughMLIR/bit.c b/clang/test/CIR/Lowering/ThroughMLIR/bit.c new file mode 100644 index 000000000000..7d21f991215a --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/bit.c @@ -0,0 +1,153 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +int clz_u16(unsigned short x) { + return __builtin_clzs(x); +} +// CHECK: func.func @clz_u16(%arg0: i16{{.*}}) -> i32 { +// CHECK: %[[CTLZ:.+]] = math.ctlz %[[INPUT:.+]] : i16 +// CHECK: %[[EXTUI:.+]] = arith.extui %[[CTLZ]] : i16 to i32 +// CHECK: } + +int clz_u32(unsigned x) { + return __builtin_clz(x); +} +// CHECK: func.func @clz_u32(%arg0: i32{{.*}}) -> i32 { +// CHECK: %[[CTLZ:.+]] = math.ctlz %[[INPUT:.+]] : i32 +// CHECK: %[[BITCAST:.+]] = arith.bitcast %[[CTLZ]] : i32 to i32 +// CHECK: } + +int clz_u64(unsigned long x) { + return __builtin_clzl(x); +} +// CHECK: func.func @clz_u64(%arg0: i64{{.*}}) -> i32 { +// CHECK: %[[CTLZ:.+]] = math.ctlz %[[INPUT:.+]] : i64 +// CHECK: %[[TRUNCI:.+]] = arith.trunci %[[CTLZ]] : i64 to i32 +// CHECK: } + +int ctz_u16(unsigned short x) { + return __builtin_ctzs(x); +} +// CHECK: func.func @ctz_u16(%arg0: i16{{.*}}) -> i32 { +// CHECK: %[[CTTZ:.+]] = math.cttz %[[INPUT:.+]] : i16 +// CHECK: %[[EXTUI:.+]] = arith.extui %[[CTTZ]] : i16 to i32 +// CHECK: } + +int ctz_u32(unsigned x) { + return __builtin_ctz(x); +} +// CHECK: func.func @ctz_u32(%arg0: i32{{.*}}) -> i32 { +// CHECK: %[[CTTZ:.+]] = math.cttz %[[INPUT:.+]] : i32 +// CHECK: %[[BITCAST:.+]] = arith.bitcast %[[CTTZ]] : i32 to i32 +// CHECK: } + +int ctz_u64(unsigned long x) { + return __builtin_ctzl(x); +} +// CHECK: func.func @ctz_u64(%arg0: i64{{.*}}) -> i32 { +// CHECK: %[[CTTZ:.+]] = math.cttz %[[INPUT:.+]] : i64 +// CHECK: %[[TRUNCI:.+]] = arith.trunci %[[CTTZ]] : i64 to i32 +// CHECK: } + +int popcount_u16(unsigned short x) { + return __builtin_popcount(x); +} +// CHECK: func.func @popcount_u16(%arg0: i16{{.*}}) -> i32 { +// CHECK: %[[EXTUI:.+]] = arith.extui %[[INPUT:.+]] : i16 to i32 +// CHECK: %[[CTPOP:.+]] = math.ctpop %[[EXTUI]] : i32 +// CHECK: %[[BITCAST:.+]] = arith.bitcast %[[CTPOP]] : i32 to i32 +// CHECK: } + +int popcount_u32(unsigned x) { + return __builtin_popcount(x); +} +// CHECK: func.func @popcount_u32(%arg0: i32{{.*}}) -> i32 { +// CHECK: %[[CTPOP:.+]] = math.ctpop %[[INPUT:.+]] : i32 +// CHECK: %[[BITCAST:.+]] = arith.bitcast %[[CTPOP]] : i32 to i32 +// CHECK: } + +int popcount_u64(unsigned long x) { + return __builtin_popcountl(x); +} +// CHECK: func.func @popcount_u64(%arg0: i64{{.*}}) -> i32 { +// CHECK: %[[CTPOP:.+]] = math.ctpop %[[INPUT:.+]] : i64 +// CHECK: %[[TRUNCI:.+]] = arith.trunci %[[CTPOP]] : i64 to i32 +// CHECK: } + +int clrsb_s32(int x) { + return __builtin_clrsb(x); +} +// CHECK: func.func @clrsb_s32(%arg0: i32{{.*}}) -> i32 { +// CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +// CHECK: %[[CMP:.+]] = arith.cmpi slt, %[[INPUT:.+]], %[[C0_I32]] : i32 +// CHECK: %[[C_MINUS1_I32:.+]] = arith.constant -1 : i32 +// CHECK: %[[XORI:.+]] = arith.xori %[[INPUT]], %[[C_MINUS1_I32]] : i32 +// CHECK: %[[SELECT:.+]] = arith.select %[[CMP]], %[[XORI]], %[[INPUT]] : i32 +// CHECK: %[[CTLZ:.+]] = math.ctlz %[[SELECT]] : i32 +// CHECK: %[[BITCAST:.+]] = arith.bitcast %[[CTLZ]] : i32 to i32 +// CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +// CHECK: %[[SUBI:.+]] = arith.subi %[[BITCAST]], %[[C1_I32]] : i32 +// CHECK: } + +int clrsb_s64(long x) { + return __builtin_clrsbl(x); +} +// CHECK: func.func @clrsb_s64(%arg0: i64{{.*}}) -> i32 { +// CHECK: %[[C0_I64:.+]] = arith.constant 0 : i64 +// CHECK: %[[CMP:.+]] = arith.cmpi slt, %[[INPUT:.+]], %[[C0_I64]] : i64 +// CHECK: %[[C_MINUS1_I64:.+]] = arith.constant -1 : i64 +// CHECK: %[[XORI:.+]] = arith.xori %[[INPUT]], %[[C_MINUS1_I64]] : i64 +// CHECK: %[[SELECT:.+]] = arith.select %[[CMP]], %[[XORI]], %[[INPUT]] : i64 +// CHECK: %[[CTLZ:.+]] = math.ctlz %[[SELECT]] : i64 +// CHECK: %[[TRUNCI:.+]] = arith.trunci %[[CTLZ]] : i64 to i32 +// CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +// CHECK: %[[SUBI:.+]] = arith.subi %[[TRUNCI]], %[[C1_I32]] : i32 +// CHECK: } + +int ffs_s32(int x) { + return __builtin_ffs(x); +} +// CHECK: func.func @ffs_s32(%arg0: i32{{.*}}) -> i32 { +// CHECK: %[[CTTZ:.+]] = math.cttz %[[INPUT:.+]] : i32 +// CHECK-NEXT: %[[BITCAST:.+]] = arith.bitcast %[[CTTZ]] : i32 to i32 +// CHECK-NEXT: %[[C1_I32:.+]] = arith.constant 1 : i32 +// CHECK-NEXT: %[[ADDI:.+]] = arith.addi %[[BITCAST]], %[[C1_I32]] : i32 +// CHECK-NEXT: %[[C0_I32:.+]] = arith.constant 0 : i32 +// CHECK-NEXT: %[[CMPI:.+]] = arith.cmpi eq, %[[INPUT]], %[[C0_I32]] : i32 +// CHECK-NEXT: %[[C0_I32_1:.+]] = arith.constant 0 : i32 +// CHECK-NEXT: %[[SELECT:.+]] = arith.select %[[CMPI]], %[[C0_I32_1]], %[[ADDI]] : i32 +// CHECK: } + +int ffs_s64(long x) { + return __builtin_ffsl(x); +} +// CHECK: func.func @ffs_s64(%arg0: i64{{.*}}) -> i32 { +// CHECK: %[[CTTZ:.+]] = math.cttz %[[INPUT:.+]] : i64 +// CHECK-NEXT: %[[TRUNCI:.+]] = arith.trunci %[[CTTZ]] : i64 to i32 +// CHECK-NEXT: %[[C1_I32:.+]] = arith.constant 1 : i32 +// CHECK-NEXT: %[[ADDI:.+]] = arith.addi %[[TRUNCI]], %[[C1_I32]] : i32 +// CHECK-NEXT: %[[C0_I64:.+]] = arith.constant 0 : i64 +// CHECK-NEXT: %[[CMPI:.+]] = arith.cmpi eq, %[[INPUT]], %[[C0_I64]] : i64 +// CHECK-NEXT: %[[C0_I32:.+]] = arith.constant 0 : i32 +// CHECK-NEXT: %[[SELECT:.+]] = arith.select %[[CMPI]], %[[C0_I32]], %[[ADDI]] : i32 +// CHECK: } + +int parity_u32(unsigned x) { + return __builtin_parity(x); +} +// CHECK: func.func @parity_u32(%arg0: i32{{.*}}) -> i32 { +// CHECK: %[[CTPOP:.+]] = math.ctpop %[[INPUT:.+]] : i32 +// CHECK-NEXT: %[[C1_I32:.+]] = arith.constant 1 : i32 +// CHECK-NEXT: %[[ANDI:.+]] = arith.andi %[[CTPOP]], %[[C1_I32]] : i32 +// CHECK-NEXT: %[[BITCAST:.+]] = arith.bitcast %[[ANDI]] : i32 to i32 +// CHECK: } + +int parity_u64(unsigned long x) { + return __builtin_parityl(x); +} +// CHECK: func.func @parity_u64(%arg0: i64{{.*}}) -> i32 { +// CHECK: %[[CTPOP:.+]] = math.ctpop %[[INPUT:.+]] : i64 +// CHECK-NEXT: %[[C1_I64:.+]] = arith.constant 1 : i64 +// CHECK-NEXT: %[[ANDI:.+]] = arith.andi %[[CTPOP]], %[[C1_I64]] : i64 +// CHECK-NEXT: %[[TRUNCI:.+]] = arith.trunci %[[ANDI]] : i64 to i32 +// CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/ThroughMLIR/bit.cir b/clang/test/CIR/Lowering/ThroughMLIR/bit.cir deleted file mode 100644 index da74ac48f518..000000000000 --- a/clang/test/CIR/Lowering/ThroughMLIR/bit.cir +++ /dev/null @@ -1,94 +0,0 @@ -// RUN: cir-opt %s -cir-to-mlir -o %t.mlir -// RUN: FileCheck %s --input-file %t.mlir - -!s16i = !cir.int -!s32i = !cir.int -!s64i = !cir.int -!u16i = !cir.int -!u32i = !cir.int -!u64i = !cir.int - - -// int clz_u16(unsigned short x) { -// return __builtin_clzs(x); -// } -cir.func @clz_u16(%arg : !u16i) { - %0 = cir.bit.clz(%arg : !u16i) : !s32i - cir.return -} - -// CHECK: func.func @clz_u16(%arg0: i16) { -// CHECK-NEXT: %[[CLZ_U16:.+]] = math.ctlz %arg0 : i16 -// CHECK-NEXT: %[[EXTUI_U16:.+]] = arith.extui %[[CLZ_U16]] : i16 to i32 -// CHECK-NEXT: return -// CHECK-NEXT: } - -// int clz_u32(unsigned x) { -// return __builtin_clz(x); -// } -cir.func @clz_u32(%arg : !u32i) { - %0 = cir.bit.clz(%arg : !u32i) : !s32i - cir.return -} - -// CHECK: func.func @clz_u32(%arg0: i32) { -// CHECK-NEXT: %[[CLZ_U32:.+]] = math.ctlz %arg0 : i32 -// CHECK-NEXT: %[[BITCAST_U32:.+]] = arith.bitcast %[[CLZ_U32]] : i32 to i32 -// CHECK-NEXT: return -// CHECK-NEXT: } - -// int clz_u64(unsigned long x) { -// return __builtin_clzl(x); -// } -cir.func @clz_u64(%arg : !u64i) { - %0 = cir.bit.clz(%arg : !u64i) : !s32i - cir.return -} - -// CHECK: func.func @clz_u64(%arg0: i64) { -// CHECK-NEXT: %[[CLZ_U64:.+]] = math.ctlz %arg0 : i64 -// CHECK-NEXT: %[[TRUNCI_U64:.+]] = arith.trunci %[[CLZ_U64]] : i64 to i32 -// CHECK-NEXT: return -// CHECK-NEXT: } - -// int ctz_u16(unsigned short x) { -// return __builtin_ctzs(x); -// } -cir.func @ctz_u16(%arg : !u16i) { - %0 = cir.bit.ctz(%arg : !u16i) : !s32i - cir.return -} - -// CHECK: func.func @ctz_u16(%arg0: i16) { -// CHECK-NEXT: %[[CTZ_U16:.+]] = math.cttz %arg0 : i16 -// CHECK-NEXT: %[[EXTUI_U16:.+]] = arith.extui %[[CTZ_U16]] : i16 to i32 -// CHECK-NEXT: return -// CHECK-NEXT: } - -// int ctz_u32(unsigned x) { -// return __builtin_ctz(x); -// } -cir.func @ctz_u32(%arg : !u32i) { - %0 = cir.bit.ctz(%arg : !u32i) : !s32i - cir.return -} - -// CHECK: func.func @ctz_u32(%arg0: i32) { -// CHECK-NEXT: %[[CTZ_U32:.+]] = math.cttz %arg0 : i32 -// CHECK-NEXT: %[[BITCAST_U32:.+]] = arith.bitcast %[[CTZ_U32]] : i32 to i32 -// CHECK-NEXT: return -// CHECK-NEXT: } - -// int ctz_u64(unsigned long x) { -// return __builtin_ctzl(x); -// } -cir.func @ctz_u64(%arg : !u64i) { - %0 = cir.bit.ctz(%arg : !u64i) : !s32i - cir.return -} - -// CHECK: func.func @ctz_u64(%arg0: i64) { -// CHECK-NEXT: %[[CTZ_U64:.+]] = math.cttz %arg0 : i64 -// CHECK-NEXT: %[[TRUNCI_U64:.+]] = arith.trunci %[[CTZ_U64]] : i64 to i32 -// CHECK-NEXT: return -// CHECK-NEXT: } \ No newline at end of file From 4f8f5bc620df9c7d45ca9f26752a7ffad603fef8 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 4 Jun 2024 00:49:11 +0300 Subject: [PATCH 1611/2301] [CIR][CIRGen] Builtins: add __sync_fetch_and_add (#631) This PR adds support for atomic `__sync_fetch_and_add`. Basically it's a copy-pasta from the original `codegen`. The only thing that I doubt about is what exact operation I need to create in CIR. The first approach I used was to create `AtomicRMW` operation in CIR. But as far as I see I can use the existing `AtomicFetch` instead. Is it correct? or it's better to add a new op here? --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 91 +++++++++++++++++++++++++ clang/test/CIR/CodeGen/atomic.cpp | 43 +++++++++++- 2 files changed, 133 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 0e801d05e41e..02218a691b7a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -26,6 +26,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/TargetBuiltins.h" +#include "clang/Frontend/FrontendDiagnostic.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Value.h" @@ -167,6 +168,86 @@ EncompassingIntegerType(ArrayRef Types) { return {Width, Signed}; } +/// Emit the conversions required to turn the given value into an +/// integer of the given size. +static mlir::Value buildToInt(CIRGenFunction &CGF, mlir::Value v, QualType t, + mlir::cir::IntType intType) { + v = CGF.buildToMemory(v, t); + + if (isa(v.getType())) + return CGF.getBuilder().createPtrToInt(v, intType); + + assert(v.getType() == intType); + return v; +} + +static mlir::Value buildFromInt(CIRGenFunction &CGF, mlir::Value v, QualType t, + mlir::Type resultType) { + v = CGF.buildFromMemory(v, t); + + if (isa(resultType)) + return CGF.getBuilder().createIntToPtr(v, resultType); + + assert(v.getType() == resultType); + return v; +} + +static Address checkAtomicAlignment(CIRGenFunction &CGF, const CallExpr *E) { + ASTContext &ctx = CGF.getContext(); + Address ptr = CGF.buildPointerWithAlignment(E->getArg(0)); + unsigned bytes = + isa(ptr.getElementType()) + ? ctx.getTypeSizeInChars(ctx.VoidPtrTy).getQuantity() + : CGF.CGM.getDataLayout().getTypeSizeInBits(ptr.getElementType()) / 8; + unsigned align = ptr.getAlignment().getQuantity(); + if (align % bytes != 0) { + DiagnosticsEngine &diags = CGF.CGM.getDiags(); + diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned); + // Force address to be at least naturally-aligned. + return ptr.withAlignment(CharUnits::fromQuantity(bytes)); + } + return ptr; +} + +/// Utility to insert an atomic instruction based on Intrinsic::ID +/// and the expression node. +static mlir::Value +makeBinaryAtomicValue(CIRGenFunction &cgf, mlir::cir::AtomicFetchKind kind, + const CallExpr *expr, + mlir::cir::MemOrder ordering = + mlir::cir::MemOrder::SequentiallyConsistent) { + + QualType typ = expr->getType(); + + assert(expr->getArg(0)->getType()->isPointerType()); + assert(cgf.getContext().hasSameUnqualifiedType( + typ, expr->getArg(0)->getType()->getPointeeType())); + assert( + cgf.getContext().hasSameUnqualifiedType(typ, expr->getArg(1)->getType())); + + Address destAddr = checkAtomicAlignment(cgf, expr); + auto &builder = cgf.getBuilder(); + auto *ctxt = builder.getContext(); + auto intType = builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); + mlir::Value val = cgf.buildScalarExpr(expr->getArg(1)); + mlir::Type valueType = val.getType(); + val = buildToInt(cgf, val, typ, intType); + + auto fetchAttr = + mlir::cir::AtomicFetchKindAttr::get(builder.getContext(), kind); + auto rmwi = builder.create( + cgf.getLoc(expr->getSourceRange()), destAddr.emitRawPointer(), val, kind, + ordering, false, /* is volatile */ + true); /* fetch first */ + return buildFromInt(cgf, rmwi->getResult(0), typ, valueType); +} + +static RValue buildBinaryAtomic(CIRGenFunction &CGF, + mlir::cir::AtomicFetchKind kind, + const CallExpr *E) { + return RValue::get(makeBinaryAtomicValue(CGF, kind, E)); +} + RValue CIRGenFunction::buildRotate(const CallExpr *E, bool IsRotateRight) { auto src = buildScalarExpr(E->getArg(0)); auto shiftAmt = buildScalarExpr(E->getArg(1)); @@ -909,6 +990,16 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, builder.createBitcast(AllocaAddr, builder.getVoidPtrTy())); } + case Builtin::BI__sync_fetch_and_add: + llvm_unreachable("Shouldn't make it through sema"); + case Builtin::BI__sync_fetch_and_add_1: + case Builtin::BI__sync_fetch_and_add_2: + case Builtin::BI__sync_fetch_and_add_4: + case Builtin::BI__sync_fetch_and_add_8: + case Builtin::BI__sync_fetch_and_add_16: { + return buildBinaryAtomic(*this, mlir::cir::AtomicFetchKind::Add, E); + } + case Builtin::BI__builtin_add_overflow: case Builtin::BI__builtin_sub_overflow: case Builtin::BI__builtin_mul_overflow: { diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 3a9b60d8facf..9afda8ba5120 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -337,4 +337,45 @@ void incdec() { // LLVM-LABEL: @_Z6incdecv // LLVM: atomicrmw add ptr {{.*}}, i32 {{.*}} monotonic, align 4 -// LLVM: atomicrmw sub ptr {{.*}}, i32 {{.*}} monotonic, align 4 \ No newline at end of file +// LLVM: atomicrmw sub ptr {{.*}}, i32 {{.*}} monotonic, align 4 + +void inc_int(int* a, int b) { + int c = __sync_fetch_and_add(a, b); +} +// CHECK-LABEL: @_Z7inc_int +// CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// CHECK: %[[VAL:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: %[[RES:.*]] = cir.atomic.fetch(add, %[[PTR]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) fetch_first : !s32i +// CHECK: cir.store %[[RES]], {{.*}} : !s32i, !cir.ptr + +// LLVM-LABEL: @_Z7inc_int +// LLVM: atomicrmw add ptr {{.*}}, i32 {{.*}} seq_cst, align 4 + + +// CHECK-LABEL: @_Z8inc_long +// CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !s64i, seq_cst) fetch_first : !s64i + +// LLVM-LABEL: @_Z8inc_long +// LLVM: atomicrmw add ptr {{.*}}, i64 {{.*}} seq_cst, align 8 + +void inc_long(long* a, long b) { + long c = __sync_fetch_and_add(a, 2); +} + +// CHECK-LABEL: @_Z9inc_short +// CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !s16i, seq_cst) fetch_first : !s16i + +// LLVM-LABEL: @_Z9inc_short +// LLVM: atomicrmw add ptr {{.*}}, i16 {{.*}} seq_cst, align 2 +void inc_short(short* a, short b) { + short c = __sync_fetch_and_add(a, 2); +} + +// CHECK-LABEL: @_Z8inc_byte +// CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !s8i, seq_cst) fetch_first : !s8i + +// LLVM-LABEL: @_Z8inc_byte +// LLVM: atomicrmw add ptr {{.*}}, i8 {{.*}} seq_cst, align 1 +void inc_byte(char* a, char b) { + char c = __sync_fetch_and_add(a, b); +} \ No newline at end of file From 1324cd70385afd06647ad3fea274b1f30d6ee201 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Mon, 3 Jun 2024 19:50:03 -0300 Subject: [PATCH 1612/2301] [CIR] Centralize feature guarding (#649) Moves all feature guarding static methods into a to a single header file, centralizing the tracking of missing features in a common place regardless of where it impacts the compilation pipeline. It also moves the feature guarding logic into CIR's root include folder so that any CIR library may use it. --- .../clang/CIR/MissingFeatures.h} | 35 ++-- clang/lib/CIR/CodeGen/CIRAsm.cpp | 18 +- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 14 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 14 +- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 8 +- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 16 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 38 ++-- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 42 ++-- clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 14 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 182 +++++++++--------- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 40 ++-- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 18 +- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 49 +++-- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 62 +++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 8 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 42 ++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 68 ++++--- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp | 18 +- clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h | 2 +- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 44 ++--- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 4 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 4 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 3 +- clang/lib/CIR/Dialect/IR/MissingFeatures.h | 43 ----- .../LoweringPrepareAArch64CXXABI.cpp | 2 +- .../LoweringPrepareItaniumCXXABI.cpp | 2 +- 36 files changed, 392 insertions(+), 426 deletions(-) rename clang/{lib/CIR/CodeGen/UnimplementedFeatureGuarding.h => include/clang/CIR/MissingFeatures.h} (86%) delete mode 100644 clang/lib/CIR/Dialect/IR/MissingFeatures.h diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/include/clang/CIR/MissingFeatures.h similarity index 86% rename from clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h rename to clang/include/clang/CIR/MissingFeatures.h index 09ab264be1ad..439c57afb6a0 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -1,4 +1,4 @@ -//===---- UnimplementedFeatureGuarding.h - Checks against NYI ---*- C++ -*-===// +//===---- MissingFeatures.h - Checks for unimplemented features -*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -7,16 +7,17 @@ //===----------------------------------------------------------------------===// // // This file introduces some helper classes to guard against features that -// CodeGen supports that we do not have and also do not have great ways to +// CIR dialect supports that we do not have and also do not have great ways to // assert against. // //===----------------------------------------------------------------------===// -#ifndef LLVM_CLANG_LIB_CIR_UFG -#define LLVM_CLANG_LIB_CIR_UFG +#ifndef CLANG_CIR_MISSINGFEATURES_H +#define CLANG_CIR_MISSINGFEATURES_H namespace cir { -struct UnimplementedFeature { + +struct MissingFeatures { // TODO(CIR): Implement the CIRGenFunction::buildTypeCheck method that handles // sanitizer related type check features static bool buildTypeCheck() { return false; } @@ -169,12 +170,24 @@ struct UnimplementedFeature { static bool shouldInstrumentFunction() { return false; } // Inline assembly - static bool asm_goto() { return false; } - static bool asm_unwind_clobber() { return false; } - static bool asm_memory_effects() { return false; } - static bool asm_vector_type() { return false; } - static bool asm_llvm_assume() { return false; } + static bool asmGoto() { return false; } + static bool asmUnwindClobber() { return false; } + static bool asmMemoryEffects() { return false; } + static bool asmVectorType() { return false; } + static bool asmLLVMAssume() { return false; } + + // C++ ABI support + static bool handleBigEndian() { return false; } + static bool handleAArch64Indirect() { return false; } + static bool classifyArgumentTypeForAArch64() { return false; } + static bool supportgetCoerceToTypeForAArch64() { return false; } + static bool supportTySizeQueryForAArch64() { return false; } + static bool supportTyAlignQueryForAArch64() { return false; } + static bool supportisHomogeneousAggregateQueryForAArch64() { return false; } + static bool supportisEndianQueryForAArch64() { return false; } + static bool supportisAggregateTypeForABIAArch64() { return false; } }; + } // namespace cir -#endif +#endif // CLANG_CIR_MISSINGFEATURES_H diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 27193f718ece..ee76be657c23 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -3,7 +3,7 @@ #include "CIRGenFunction.h" #include "TargetInfo.h" -#include "UnimplementedFeatureGuarding.h" +#include "clang/CIR/MissingFeatures.h" using namespace cir; using namespace clang; @@ -285,7 +285,7 @@ static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, mlir::Type TruncTy = ResultTruncRegTypes[i]; if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) { - assert(!UnimplementedFeature::asm_llvm_assume()); + assert(!MissingFeatures::asmLLVMAssume()); } // If the result type of the LLVM IR asm doesn't match the result type of @@ -311,7 +311,7 @@ static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, } else if (isa(TruncTy)) { Tmp = Builder.createIntCast(Tmp, TruncTy); } else if (false /*TruncTy->isVectorTy()*/) { - assert(!UnimplementedFeature::asm_vector_type()); + assert(!MissingFeatures::asmVectorType()); } } @@ -468,7 +468,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { } // Update largest vector width for any vector types. - assert(!UnimplementedFeature::asm_vector_type()); + assert(!MissingFeatures::asmVectorType()); } else { Address DestAddr = Dest.getAddress(); @@ -504,7 +504,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { Arg = builder.createBitcast(Arg, AdjTy); // Update largest vector width for any vector types. - assert(!UnimplementedFeature::asm_vector_type()); + assert(!MissingFeatures::asmVectorType()); // Only tie earlyclobber physregs. if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber())) @@ -593,7 +593,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { << InputExpr->getType() << InputConstraint; // Update largest vector width for any vector types. - assert(!UnimplementedFeature::asm_vector_type()); + assert(!MissingFeatures::asmVectorType()); ArgTypes.push_back(Arg.getType()); ArgElemTypes.push_back(ArgElemType); @@ -636,11 +636,11 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { HasSideEffect, inferFlavor(CGM, S), mlir::ArrayAttr()); if (false /*IsGCCAsmGoto*/) { - assert(!UnimplementedFeature::asm_goto()); + assert(!MissingFeatures::asmGoto()); } else if (HasUnwindClobber) { - assert(!UnimplementedFeature::asm_unwind_clobber()); + assert(!MissingFeatures::asmUnwindClobber()); } else { - assert(!UnimplementedFeature::asm_memory_effects()); + assert(!MissingFeatures::asmMemoryEffects()); mlir::Value result; if (IA.getNumResults()) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 9364aa393597..1b1ca28ea9b5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -16,7 +16,6 @@ #include "CIRGenModule.h" #include "CIRGenOpenMPRuntime.h" #include "TargetInfo.h" -#include "UnimplementedFeatureGuarding.h" #include "clang/AST/ASTContext.h" #include "clang/AST/StmtVisitor.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" @@ -24,6 +23,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "llvm/Support/ErrorHandling.h" @@ -440,7 +440,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, mlir::Value IsWeak, mlir::Value FailureOrder, uint64_t Size, mlir::cir::MemOrder Order, uint8_t Scope) { - assert(!UnimplementedFeature::syncScopeID()); + assert(!MissingFeatures::syncScopeID()); StringRef Op; auto &builder = CGF.getBuilder(); @@ -487,7 +487,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__scoped_atomic_load: { auto *load = builder.createLoad(loc, Ptr).getDefiningOp(); // FIXME(cir): add scope information. - assert(!UnimplementedFeature::syncScopeID()); + assert(!MissingFeatures::syncScopeID()); load->setAttr("mem_order", orderAttr); if (E->isVolatile()) load->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); @@ -512,7 +512,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__scoped_atomic_store_n: { auto loadVal1 = builder.createLoad(loc, Val1); // FIXME(cir): add scope information. - assert(!UnimplementedFeature::syncScopeID()); + assert(!MissingFeatures::syncScopeID()); builder.createStore(loc, loadVal1, Ptr, E->isVolatile(), /*alignment=*/mlir::IntegerAttr{}, orderAttr); return; @@ -685,7 +685,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, // LLVM atomic instructions always have synch scope. If clang atomic // expression has no scope operand, use default LLVM synch scope. if (!ScopeModel) { - assert(!UnimplementedFeature::syncScopeID()); + assert(!MissingFeatures::syncScopeID()); buildAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, Order, /*FIXME(cir): LLVM default scope*/ 1); return; @@ -693,7 +693,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, // Handle constant scope. if (getConstOpIntAttr(Scope)) { - assert(!UnimplementedFeature::syncScopeID()); + assert(!MissingFeatures::syncScopeID()); llvm_unreachable("NYI"); return; } @@ -1289,7 +1289,7 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, store.setIsVolatile(true); // DecorateInstructionWithTBAA - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 14eed26e1019..705a057e35b0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -12,7 +12,7 @@ #include "Address.h" #include "CIRGenRecordLayout.h" #include "CIRGenTypeCache.h" -#include "UnimplementedFeatureGuarding.h" +#include "clang/CIR/MissingFeatures.h" #include "clang/AST/Decl.h" #include "clang/AST/Type.h" @@ -406,7 +406,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special // type so it's a bit more clear and C++ idiomatic. auto fnTy = mlir::cir::FuncType::get({}, getUInt32Ty(), isVarArg); - assert(!UnimplementedFeature::isVarArg()); + assert(!MissingFeatures::isVarArg()); return getPointerTo(getPointerTo(fnTy)); } @@ -639,11 +639,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } mlir::Value createFSub(mlir::Value lhs, mlir::Value rhs) { - assert(!UnimplementedFeature::metaDataNode()); + assert(!MissingFeatures::metaDataNode()); if (IsFPConstrained) llvm_unreachable("Constrained FP NYI"); - assert(!UnimplementedFeature::foldBinOpFMF()); + assert(!MissingFeatures::foldBinOpFMF()); return create(lhs.getLoc(), mlir::cir::BinOpKind::Sub, lhs, rhs); } @@ -660,7 +660,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Value createDynCastToVoid(mlir::Location loc, mlir::Value src, bool vtableUseRelativeLayout) { // TODO(cir): consider address space here. - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); auto destTy = getVoidPtrTy(); return create( loc, destTy, mlir::cir::DynamicCastKind::ptr, src, @@ -772,7 +772,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value ptr, llvm::MaybeAlign align) { // TODO: make sure callsites shouldn't be really passing volatile. - assert(!UnimplementedFeature::volatileLoadOrStore()); + assert(!MissingFeatures::volatileLoadOrStore()); return createAlignedLoad(loc, ty, ptr, align, /*isVolatile=*/false); } @@ -913,7 +913,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { auto memberPtrTy = memberPtr.getType().cast(); // TODO(cir): consider address space. - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); auto resultTy = getPointerTo(memberPtrTy.getMemberTy()); return create(loc, resultTy, objectPtr, diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 02218a691b7a..a8edf2524335 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -16,7 +16,7 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "TargetInfo.h" -#include "UnimplementedFeatureGuarding.h" +#include "clang/CIR/MissingFeatures.h" // TODO(cir): we shouldn't need this but we currently reuse intrinsic IDs for // convenience. @@ -717,7 +717,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, } case Builtin::BI__builtin_unpredictable: { if (CGM.getCodeGenOpts().OptimizationLevel != 0) - assert(!UnimplementedFeature::insertBuiltinUnpredictable()); + assert(!MissingFeatures::insertBuiltinUnpredictable()); return RValue::get(buildScalarExpr(E->getArg(0))); } @@ -978,7 +978,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // default (e.g. in C / C++ auto vars are in the generic address space). At // the AST level this is handled within CreateTempAlloca et al., but for the // builtin / dynamic alloca we have to handle it here. - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); LangAS AAS = getASTAllocaAddressSpace(); LangAS EAS = E->getType()->getPointeeType().getAddressSpace(); if (EAS != AAS) { @@ -1233,7 +1233,7 @@ mlir::Value CIRGenFunction::buildCheckedArgForBuiltin(const Expr *E, if (!SanOpts.has(SanitizerKind::Builtin)) return value; - assert(!UnimplementedFeature::sanitizerBuiltin()); + assert(!MissingFeatures::sanitizerBuiltin()); llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 783f40c7e589..ed87e71dac8e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -16,7 +16,7 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "TargetInfo.h" -#include "UnimplementedFeatureGuarding.h" +#include "clang/CIR/MissingFeatures.h" // TODO(cir): once all builtins are covered, decide whether we still // need to use LLVM intrinsics or if there's a better approach to follow. Right diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp index d26b73c0dfe1..d6e23a9f0a25 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp @@ -16,7 +16,7 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "TargetInfo.h" -#include "UnimplementedFeatureGuarding.h" +#include "clang/CIR/MissingFeatures.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Value.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 568628c5bc9e..27a8be9e6d11 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -38,7 +38,7 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { // an alias, unless this class owns no members. if (getCodeGenOpts().SanitizeMemoryUseAfterDtor && !D->getParent()->field_empty()) - assert(!UnimplementedFeature::sanitizeDtor()); + assert(!MissingFeatures::sanitizeDtor()); // If the destructor doesn't have a trivial body, we have to emit it // separately. diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 00aa6511169e..346cce788d91 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -26,13 +26,13 @@ #include "llvm/Support/ErrorHandling.h" #include -#include "UnimplementedFeatureGuarding.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/SymbolTable.h" #include "mlir/IR/Types.h" +#include "clang/CIR/MissingFeatures.h" using namespace cir; using namespace clang; @@ -164,7 +164,7 @@ void ClangToCIRArgMapping::construct(const ASTContext &Context, // // TODO(cir): a LLVM lowering prepare pass should break this down into // the appropriated pieces. - assert(!UnimplementedFeature::constructABIArgDirectExtend()); + assert(!MissingFeatures::constructABIArgDirectExtend()); CIRArgs.NumberOfArgs = 1; break; } @@ -428,12 +428,12 @@ void CIRGenModule::ConstructAttributeList(StringRef Name, } if (TargetDecl->hasAttr()) { - assert(!UnimplementedFeature::openCL()); + assert(!MissingFeatures::openCL()); } if (TargetDecl->hasAttr() && getLangOpts().OffloadUniformBlock) - assert(!UnimplementedFeature::CUDA()); + assert(!MissingFeatures::CUDA()); if (TargetDecl->hasAttr()) ; @@ -503,7 +503,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // Some architectures (such as x86-64) have the ABI changed based on // attribute-target/features. Give them a chance to diagnose. - assert(!UnimplementedFeature::checkFunctionCallABI()); + assert(!MissingFeatures::checkFunctionCallABI()); } // TODO: add DNEBUG code @@ -526,7 +526,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // When passing arguments using temporary allocas, we need to add the // appropriate lifetime markers. This vector keeps track of all the lifetime // markers that need to be ended right after the call. - assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); // Translate all of the arguments as necessary to match the CIR lowering. assert(CallInfo.arg_size() == CallArgs.size() && @@ -841,7 +841,7 @@ mlir::Value CIRGenFunction::buildRuntimeCall(mlir::Location loc, mlir::cir::FuncOp callee, ArrayRef args) { // TODO(cir): set the calling convention to this runtime call. - assert(!UnimplementedFeature::setCallingConv()); + assert(!MissingFeatures::setCallingConv()); auto call = builder.create(loc, callee, args); assert(call->getNumResults() <= 1 && @@ -1324,7 +1324,7 @@ arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, args.size()); } else if (llvm::isa(fnType)) { - assert(!UnimplementedFeature::targetCodeGenInfoIsProtoCallVariadic()); + assert(!MissingFeatures::targetCodeGenInfoIsProtoCallVariadic()); required = RequiredArgs(args.size()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 829b2063a503..c892d1ebc9a8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -12,12 +12,12 @@ #include "CIRGenCXXABI.h" #include "CIRGenFunction.h" -#include "UnimplementedFeatureGuarding.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/RecordLayout.h" #include "clang/Basic/NoSanitizeList.h" #include "clang/Basic/TargetBuiltins.h" +#include "clang/CIR/MissingFeatures.h" using namespace clang; using namespace cir; @@ -272,7 +272,7 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { if (!MemcpyableCtor) return false; - assert(!UnimplementedFeature::fieldMemcpyizerBuildMemcpy()); + assert(!MissingFeatures::fieldMemcpyizerBuildMemcpy()); return false; } @@ -733,11 +733,11 @@ void CIRGenFunction::initializeVTablePointer(mlir::Location loc, // // vtable field is derived from `this` pointer, therefore they should be in // the same addr space. - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); VTableField = builder.createElementBitCast(loc, VTableField, VTableAddressPoint.getType()); builder.createStore(loc, VTableAddressPoint, VTableField); - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); } void CIRGenFunction::initializeVTablePointers(mlir::Location loc, @@ -861,7 +861,7 @@ void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, // constructor. QualType::DestructionKind dtorKind = FieldType.isDestructedType(); (void)dtorKind; - if (UnimplementedFeature::cleanups()) + if (MissingFeatures::cleanups()) llvm_unreachable("NYI"); } @@ -908,7 +908,7 @@ void CIRGenFunction::buildImplicitAssignmentOperatorBody( // LexicalScope Scope(*this, RootCS->getSourceRange()); // FIXME(cir): add all of the below under a new scope. - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); AssignmentMemcpyizer AM(*this, AssignOp, Args); for (auto *I : RootCS->body()) AM.emitAssignment(I); @@ -1114,7 +1114,7 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { Stmt *Body = Dtor->getBody(); if (Body) - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); // The call to operator delete in a deleting destructor happens // outside of the function-try-block, which means it's always @@ -1139,7 +1139,7 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { llvm_unreachable("NYI"); // EnterCXXTryStmt(*cast(Body), true); } - if (UnimplementedFeature::emitAsanPrologueOrEpilogue()) + if (MissingFeatures::emitAsanPrologueOrEpilogue()) llvm_unreachable("NYI"); // Enter the epilogue cleanups. @@ -1277,7 +1277,7 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && SanOpts.has(SanitizerKind::Memory) && ClassDecl->getNumVBases() && ClassDecl->isPolymorphic()) - assert(!UnimplementedFeature::sanitizeDtor()); + assert(!MissingFeatures::sanitizeDtor()); // We push them in the forward order so that they'll be popped in // the reverse order. @@ -1289,7 +1289,7 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, // Under SanitizeMemoryUseAfterDtor, poison the trivial base class // memory. For non-trival base classes the same is done in the class // destructor. - assert(!UnimplementedFeature::sanitizeDtor()); + assert(!MissingFeatures::sanitizeDtor()); } else { EHStack.pushCleanup(NormalAndEHCleanup, BaseClassDecl, /*BaseIsVirtual*/ true); @@ -1305,7 +1305,7 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && SanOpts.has(SanitizerKind::Memory) && !ClassDecl->getNumVBases() && ClassDecl->isPolymorphic()) - assert(!UnimplementedFeature::sanitizeDtor()); + assert(!MissingFeatures::sanitizeDtor()); // Destroy non-virtual bases. for (const auto &Base : ClassDecl->bases()) { @@ -1318,7 +1318,7 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, if (BaseClassDecl->hasTrivialDestructor()) { if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && SanOpts.has(SanitizerKind::Memory) && !BaseClassDecl->isEmpty()) - assert(!UnimplementedFeature::sanitizeDtor()); + assert(!MissingFeatures::sanitizeDtor()); } else { EHStack.pushCleanup(NormalAndEHCleanup, BaseClassDecl, /*BaseIsVirtual*/ false); @@ -1329,12 +1329,12 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, // invoked, and before the base class destructor runs, is invalid. bool SanitizeFields = CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && SanOpts.has(SanitizerKind::Memory); - assert(!UnimplementedFeature::sanitizeDtor()); + assert(!MissingFeatures::sanitizeDtor()); // Destroy direct fields. for (const auto *Field : ClassDecl->fields()) { if (SanitizeFields) - assert(!UnimplementedFeature::sanitizeDtor()); + assert(!MissingFeatures::sanitizeDtor()); QualType type = Field->getType(); QualType::DestructionKind dtorKind = type.isDestructedType(); @@ -1351,7 +1351,7 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, } if (SanitizeFields) - assert(!UnimplementedFeature::sanitizeDtor()); + assert(!MissingFeatures::sanitizeDtor()); } void CIRGenFunction::buildCXXDestructorCall(const CXXDestructorDecl *DD, @@ -1422,7 +1422,7 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, // Get the base pointer type. auto BaseValueTy = convertType((PathEnd[-1])->getType()); - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); // auto BasePtrTy = builder.getPointerTo(BaseValueTy); // QualType DerivedTy = getContext().getRecordType(Derived); // CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived); @@ -1505,11 +1505,11 @@ mlir::Value CIRGenFunction::getVTablePtr(mlir::Location Loc, Address This, const CXXRecordDecl *RD) { Address VTablePtrSrc = builder.createElementBitCast(Loc, This, VTableTy); auto VTable = builder.createLoad(Loc, VTablePtrSrc); - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); if (CGM.getCodeGenOpts().OptimizationLevel > 0 && CGM.getCodeGenOpts().StrictVTablePointers) { - assert(!UnimplementedFeature::createInvariantGroup()); + assert(!MissingFeatures::createInvariantGroup()); } return VTable; @@ -1518,7 +1518,7 @@ mlir::Value CIRGenFunction::getVTablePtr(mlir::Location Loc, Address This, Address CIRGenFunction::buildCXXMemberDataPointerAddress( const Expr *E, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo) { - assert(!UnimplementedFeature::cxxABI()); + assert(!MissingFeatures::cxxABI()); auto op = builder.createGetIndirectMember(getLoc(E->getSourceRange()), base.getPointer(), memberPtr); diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 7dc94348368b..bdf6242a8e2b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -38,7 +38,7 @@ mlir::cir::BrOp CIRGenFunction::buildBranchThroughCleanup(mlir::Location Loc, // Remove this once we go for making sure unreachable code is // well modeled (or not). assert(builder.getInsertionBlock() && "not yet implemented"); - assert(!UnimplementedFeature::ehStack()); + assert(!MissingFeatures::ehStack()); // Insert a branch: to the cleanup block (unsolved) or to the already // materialized label. Keep track of unsolved goto's. @@ -316,7 +316,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // FIXME(cir): LLVM traditional codegen tries to simplify some of the // codegen here. Once we are further down with EH support revisit whether we // need to this during lowering. - assert(!UnimplementedFeature::simplifyCleanupEntry()); + assert(!MissingFeatures::simplifyCleanupEntry()); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 16f4b8b4f646..560335765ef6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -297,7 +297,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { // FIXME(cir): create a new scope to copy out the params? // LLVM create scope cleanups here, but might be due to the use // of many basic blocks? - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + assert(!MissingFeatures::generateDebugInfo() && "NYI"); ParamReferenceReplacerRAII ParamReplacer(LocalDeclMap); // Create mapping between parameters and copy-params for coroutine @@ -307,7 +307,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { "ParamMoves and FnArgs should be the same size for coroutine " "function"); // For zipping the arg map into debug info. - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + assert(!MissingFeatures::generateDebugInfo() && "NYI"); // Create parameter copies. We do it before creating a promise, since an // evolution of coroutine TS may allow promise constructor to observe @@ -348,7 +348,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { // FIXME(cir): wrap buildBodyAndFallthrough with try/catch bits. if (S.getExceptionHandler()) - assert(!UnimplementedFeature::unhandledException() && "NYI"); + assert(!MissingFeatures::unhandledException() && "NYI"); if (buildBodyAndFallthrough(*this, S, S.getBody(), currLexScope).failed()) return mlir::failure(); diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 77d80a244e11..d1a1768d83c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -15,7 +15,6 @@ #include "CIRGenFunction.h" #include "CIRGenOpenMPRuntime.h" #include "EHScopeStack.h" -#include "UnimplementedFeatureGuarding.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinOps.h" @@ -26,6 +25,7 @@ #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" #include @@ -38,7 +38,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, QualType Ty = D.getType(); // TODO: (|| Ty.getAddressSpace() == LangAS::opencl_private && // getLangOpts().OpenCL)) - assert(!UnimplementedFeature::openCL()); + assert(!MissingFeatures::openCL()); assert(Ty.getAddressSpace() == LangAS::Default); assert(!D.hasAttr() && "not implemented"); @@ -55,8 +55,8 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, if (Ty->isVariablyModifiedType()) buildVariablyModifiedType(Ty); - assert(!UnimplementedFeature::generateDebugInfo()); - assert(!UnimplementedFeature::cxxABI()); + assert(!MissingFeatures::generateDebugInfo()); + assert(!MissingFeatures::cxxABI()); Address address = Address::invalid(); Address allocaAddr = Address::invalid(); @@ -153,7 +153,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, } // TODO: what about emitting lifetime markers for MSVC catch parameters? // TODO: something like @llvm.lifetime.start/end here? revisit this later. - assert(!UnimplementedFeature::shouldEmitLifetimeMarkers()); + assert(!MissingFeatures::shouldEmitLifetimeMarkers()); } } else { // not openmp nor constant sized type bool VarAllocated = false; @@ -190,7 +190,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, // If we have debug info enabled, properly describe the VLA dimensions for // this type by registering the vla size expression for each of the // dimensions. - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); } emission.Addr = address; @@ -222,12 +222,12 @@ static void emitStoresForConstant(CIRGenModule &CGM, const VarDecl &D, uint64_t ConstantSize = layout.getTypeAllocSize(Ty); if (!ConstantSize) return; - assert(!UnimplementedFeature::addAutoInitAnnotation()); - assert(!UnimplementedFeature::vectorConstants()); - assert(!UnimplementedFeature::shouldUseBZeroPlusStoresToInitialize()); - assert(!UnimplementedFeature::shouldUseMemSetToInitialize()); - assert(!UnimplementedFeature::shouldSplitConstantStore()); - assert(!UnimplementedFeature::shouldCreateMemCpyFromGlobal()); + assert(!MissingFeatures::addAutoInitAnnotation()); + assert(!MissingFeatures::vectorConstants()); + assert(!MissingFeatures::shouldUseBZeroPlusStoresToInitialize()); + assert(!MissingFeatures::shouldUseMemSetToInitialize()); + assert(!MissingFeatures::shouldSplitConstantStore()); + assert(!MissingFeatures::shouldCreateMemCpyFromGlobal()); // In CIR we want to emit a store for the whole thing, later lowering // prepare to LLVM should unwrap this into the best policy (see asserts // above). @@ -275,7 +275,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { // Check whether this is a byref variable that's potentially // captured and moved by its own initializer. If so, we'll need to // emit the initializer first, then copy into the variable. - assert(!UnimplementedFeature::capturedByInit() && "NYI"); + assert(!MissingFeatures::capturedByInit() && "NYI"); // Note: constexpr already initializes everything correctly. LangOptions::TrivialAutoVarInitKind trivialAutoVarInit = @@ -464,7 +464,7 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, Name = getStaticDeclName(*this, D); mlir::Type LTy = getTypes().convertTypeForMem(Ty); - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); // OpenCL variables in local address space and CUDA shared // variables cannot have an initializer. @@ -491,7 +491,7 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, setGVProperties(GV, &D); // Make sure the result is of the correct type. - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); // Ensure that the static local gets initialized by making sure the parent // function gets emitted eventually. @@ -518,7 +518,7 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, // never defer them. assert(isa(DC) && "unexpected parent code decl"); } - if (GD.getDecl() && UnimplementedFeature::openMP()) { + if (GD.getDecl() && MissingFeatures::openMP()) { // Disable emission of the parent function for the OpenMP device codegen. llvm_unreachable("OpenMP is NYI"); } @@ -658,7 +658,7 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, "static var init type mismatch is NYI"); CGM.setStaticLocalDeclAddress(&D, var); - assert(!UnimplementedFeature::reportGlobalToASan()); + assert(!MissingFeatures::reportGlobalToASan()); // Emit global variable debug descriptor for static vars. auto *DI = getDebugInfo(); @@ -678,14 +678,14 @@ void CIRGenFunction::buildNullabilityCheck(LValue LHS, mlir::Value RHS, void CIRGenFunction::buildScalarInit(const Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit) { Qualifiers::ObjCLifetime lifetime = Qualifiers::ObjCLifetime::OCL_None; - assert(!UnimplementedFeature::objCLifetime()); + assert(!MissingFeatures::objCLifetime()); if (!lifetime) { SourceLocRAIIObject Loc{*this, loc}; mlir::Value value = buildScalarExpr(init); if (capturedByInit) llvm_unreachable("NYI"); - assert(!UnimplementedFeature::emitNullabilityCheck()); + assert(!MissingFeatures::emitNullabilityCheck()); buildStoreThroughLValue(RValue::get(value), lvalue, true); return; } @@ -827,7 +827,7 @@ void CIRGenFunction::buildDecl(const Decl &D) { case Decl::Using: // using X; [C++] case Decl::UsingEnum: // using enum X; [C++] case Decl::UsingDirective: // using namespace X; [C++] - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); return; case Decl::UsingPack: assert(0 && "Not implemented"); @@ -853,7 +853,7 @@ void CIRGenFunction::buildDecl(const Decl &D) { case Decl::TypeAlias: { // using X = int; [C++0x] QualType Ty = cast(D).getUnderlyingType(); if (auto *DI = getDebugInfo()) - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); if (Ty->isVariablyModifiedType()) buildVariablyModifiedType(Ty); return; diff --git a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp index 3d8c72dd7f5e..ed1cd708e6b5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp @@ -70,7 +70,7 @@ void CIRGenModule::buildGlobalVarDeclInit(const VarDecl *D, // For example, in the above CUDA code, the static local variable s has a // "shared" address space qualifier, but the constructor of StructWithCtor // expects "this" in the "generic" address space. - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); if (!T->isReferenceType()) { bool NeedsDtor = diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index da34c762ac96..7366c1ba42ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -14,7 +14,6 @@ #include "CIRGenCleanup.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" -#include "UnimplementedFeatureGuarding.h" #include "clang/AST/StmtVisitor.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" @@ -22,6 +21,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" #include @@ -439,7 +439,7 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, assert(typeValue && "fell into catch-all case!"); // Check for address space mismatch: if (typeValue->getType() != // argTy) - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); bool nextIsEnd = false; // If this is the last handler, we're at the end, and the next @@ -559,7 +559,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { CGM.getCXXABI().emitBeginCatch(*this, C); // Emit the PGO counter increment. - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); // Perform the body of the catch. (void)buildStmt(C->getHandlerBlock(), /*useCurrentScope=*/true); @@ -594,7 +594,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { llvm_unreachable("NYI"); } - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); } /// Check whether this is a non-EH scope, i.e. a scope which doesn't @@ -672,7 +672,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { { // Save the current CIR generation state. mlir::OpBuilder::InsertionGuard guard(builder); - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + assert(!MissingFeatures::generateDebugInfo() && "NYI"); // Traditional LLVM codegen creates the lpad basic block, extract // values, landing pad instructions, etc. @@ -749,7 +749,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { // Otherwise, signal that we at least have cleanups. } else if (hasCleanup) { // FIXME(cir): figure out whether and how we need this in CIR. - assert(!UnimplementedFeature::setLandingPadCleanup()); + assert(!MissingFeatures::setLandingPadCleanup()); } assert((clauses.size() > 0 || hasCleanup) && "CatchOp has no clauses!"); @@ -814,7 +814,7 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) { } case EHScope::Cleanup: - assert(!UnimplementedFeature::setLandingPadCleanup()); + assert(!MissingFeatures::setLandingPadCleanup()); dispatchBlock = currLexScope->getOrCreateCleanupBlock(builder); break; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 03fcb7e1f79b..505c045f7692 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -17,7 +17,6 @@ #include "CIRGenModule.h" #include "CIRGenOpenMPRuntime.h" #include "CIRGenValue.h" -#include "UnimplementedFeatureGuarding.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/GlobalDecl.h" @@ -25,6 +24,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/Casting.h" @@ -137,7 +137,7 @@ static Address buildPointerWithAlignment(const Expr *E, CE->getSubExpr()->getType()->getAs()) { if (PtrTy->getPointeeType()->isVoidType()) break; - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); LValueBaseInfo InnerBaseInfo; Address Addr = CGF.buildPointerWithAlignment( @@ -146,7 +146,7 @@ static Address buildPointerWithAlignment(const Expr *E, *BaseInfo = InnerBaseInfo; if (isa(CE)) { - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); LValueBaseInfo TargetTypeBaseInfo; CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( @@ -173,7 +173,7 @@ static Address buildPointerWithAlignment(const Expr *E, Addr = CGF.getBuilder().createElementBitCast( CGF.getLoc(E->getSourceRange()), Addr, ElemTy); if (CE->getCastKind() == CK_AddressSpaceConversion) { - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); llvm_unreachable("NYI"); } return Addr; @@ -195,7 +195,7 @@ static Address buildPointerWithAlignment(const Expr *E, // TODO: Support accesses to members of base classes in TBAA. For now, we // conservatively pretend that the complete object is of the base class // type. - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); Address Addr = CGF.buildPointerWithAlignment(CE->getSubExpr(), BaseInfo); auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); return CGF.getAddressOfBaseClass( @@ -212,7 +212,7 @@ static Address buildPointerWithAlignment(const Expr *E, LValue LV = CGF.buildLValue(UO->getSubExpr()); if (BaseInfo) *BaseInfo = LV.getBaseInfo(); - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); return LV.getAddress(); } } @@ -291,7 +291,7 @@ LValue CIRGenFunction::buildLValueForBitField(LValue base, QualType fieldType = field->getType().withCVRQualifiers(base.getVRQualifiers()); - assert(!UnimplementedFeature::tbaa() && "NYI TBAA for bit fields"); + assert(!MissingFeatures::tbaa() && "NYI TBAA for bit fields"); LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); return LValue::MakeBitfield(Addr, info, fieldType, FieldBaseInfo); } @@ -309,15 +309,15 @@ LValue CIRGenFunction::buildLValueForField(LValue base, const RecordDecl *rec = field->getParent(); AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); - if (UnimplementedFeature::tbaa() || rec->hasAttr() || + if (MissingFeatures::tbaa() || rec->hasAttr() || FieldType->isVectorType()) { - assert(!UnimplementedFeature::tbaa() && "NYI"); + assert(!MissingFeatures::tbaa() && "NYI"); } else if (rec->isUnion()) { - assert(!UnimplementedFeature::tbaa() && "NYI"); + assert(!MissingFeatures::tbaa() && "NYI"); } else { // If no base type been assigned for the base access, then try to generate // one for this base lvalue. - assert(!UnimplementedFeature::tbaa() && "NYI"); + assert(!MissingFeatures::tbaa() && "NYI"); } Address addr = base.getAddress(); @@ -342,11 +342,11 @@ LValue CIRGenFunction::buildLValueForField(LValue base, hasAnyVptr(FieldType, getContext())) // Because unions can easily skip invariant.barriers, we need to add // a barrier every time CXXRecord field with vptr is referenced. - assert(!UnimplementedFeature::createInvariantGroup()); + assert(!MissingFeatures::createInvariantGroup()); if (IsInPreservedAIRegion || (getDebugInfo() && rec->hasAttr())) { - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); } if (FieldType->isReferenceType()) @@ -368,7 +368,7 @@ LValue CIRGenFunction::buildLValueForField(LValue base, // If this is a reference field, load the reference right now. if (FieldType->isReferenceType()) { - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); LValue RefLVal = makeAddrLValue(addr, FieldType, FieldBaseInfo); if (RecordCVR & Qualifiers::Volatile) RefLVal.getQuals().addVolatile(); @@ -390,7 +390,7 @@ LValue CIRGenFunction::buildLValueForField(LValue base, if (field->hasAttr()) llvm_unreachable("NYI"); - if (UnimplementedFeature::tbaa()) + if (MissingFeatures::tbaa()) // Next line should take a TBAA object llvm_unreachable("NYI"); LValue LV = makeAddrLValue(addr, FieldType, FieldBaseInfo); @@ -426,7 +426,7 @@ LValue CIRGenFunction::buildLValueForFieldInitialization( LValueBaseInfo BaseInfo = Base.getBaseInfo(); AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource)); - assert(!UnimplementedFeature::tbaa() && "NYI"); + assert(!MissingFeatures::tbaa() && "NYI"); return makeAddrLValue(V, FieldType, FieldBaseInfo); } @@ -482,7 +482,7 @@ static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { bool IsPredefinedLibFunction = CGM.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID); bool HasAttributeNoBuiltin = false; - assert(!UnimplementedFeature::attributeNoBuiltin() && "NYI"); + assert(!MissingFeatures::attributeNoBuiltin() && "NYI"); // bool HasAttributeNoBuiltin = // CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) || // CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins); @@ -620,7 +620,7 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, llvm_unreachable("NYI"); } - if (UnimplementedFeature::tbaa()) + if (MissingFeatures::tbaa()) llvm_unreachable("NYI"); } @@ -671,7 +671,7 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, auto field = builder.createGetBitfield(getLoc(Loc), resLTy, ptr.getPointer(), ptr.getElementType(), info, LV.isVolatile(), useVolatile); - assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); + assert(!MissingFeatures::emitScalarRangeCheck() && "NYI"); return RValue::get(field); } @@ -772,7 +772,7 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, assert(0 && "NYI"); else LV = CGF.makeAddrLValue(Addr, T, AlignmentSource::Decl); - assert(!UnimplementedFeature::setObjCGCLValueClass() && "NYI"); + assert(!MissingFeatures::setObjCGCLValueClass() && "NYI"); return LV; } @@ -828,12 +828,12 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { VD = VD->getCanonicalDecl(); if (auto *FD = LambdaCaptureFields.lookup(VD)) return buildCapturedFieldLValue(*this, FD, CXXABIThisValue); - assert(!UnimplementedFeature::CGCapturedStmtInfo() && "NYI"); + assert(!MissingFeatures::CGCapturedStmtInfo() && "NYI"); // TODO[OpenMP]: Find the appropiate captured variable value and return // it. // TODO[OpenMP]: Set non-temporal information in the captured LVal. // LLVM codegen: - assert(!UnimplementedFeature::openMP()); + assert(!MissingFeatures::openMP()); // Address addr = GetAddrOfBlockDecl(VD); // return MakeAddrLValue(addr, T, AlignmentSource::Decl); } @@ -906,15 +906,15 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { bool NonGCable = isLocalStorage && !VD->getType()->isReferenceType() && !isBlockByref; - if (NonGCable && UnimplementedFeature::setNonGC()) { + if (NonGCable && MissingFeatures::setNonGC()) { llvm_unreachable("garbage collection is NYI"); } bool isImpreciseLifetime = (isLocalStorage && !VD->hasAttr()); - if (isImpreciseLifetime && UnimplementedFeature::ARC()) + if (isImpreciseLifetime && MissingFeatures::ARC()) llvm_unreachable("imprecise lifetime is NYI"); - assert(!UnimplementedFeature::setObjCGCLValueClass()); + assert(!MissingFeatures::setObjCGCLValueClass()); // Statics are defined as globals, so they are not include in the function's // symbol table. @@ -929,7 +929,7 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { // Emit debuginfo for the function declaration if the target wants to. if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); return LV; } @@ -973,7 +973,7 @@ CIRGenFunction::buildPointerToDataMemberBinaryExpr(const BinaryOperator *E) { LValueBaseInfo baseInfo; // TODO(cir): add TBAA - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); auto memberAddr = buildCXXMemberDataPointerAddress(E, baseAddr, memberPtr, memberPtrTy, &baseInfo); @@ -1233,8 +1233,8 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, // Chain calls use the same code path to add the inviisble chain parameter to // the function type. if (isa(FnType) || Chain) { - assert(!UnimplementedFeature::chainCalls()); - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::chainCalls()); + assert(!MissingFeatures::addressSpace()); auto CalleeTy = getTypes().GetFunctionType(FnInfo); // get non-variadic function type CalleeTy = mlir::cir::FuncType::get(CalleeTy.getInputs(), @@ -1332,7 +1332,7 @@ Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); if (BaseInfo) *BaseInfo = LV.getBaseInfo(); - assert(!UnimplementedFeature::tbaa() && "NYI"); + assert(!MissingFeatures::tbaa() && "NYI"); mlir::Value ptr = maybeBuildArrayDecay( CGM.getBuilder(), CGM.getLoc(E->getSourceRange()), Addr.getPointer(), @@ -1442,7 +1442,7 @@ buildArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, // TODO(cir): LLVM codegen emits in bound gep check here, is there anything // that would enhance tracking this later in CIR? if (inbounds) - assert(!UnimplementedFeature::emitCheckedInBoundsGEP() && "NYI"); + assert(!MissingFeatures::emitCheckedInBoundsGEP() && "NYI"); return buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, ptr, eltTy, idx, shouldDecay); } @@ -1541,7 +1541,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, llvm_unreachable("extvector subscript is NYI"); } - assert(!UnimplementedFeature::tbaa() && "TBAA is NYI"); + assert(!MissingFeatures::tbaa() && "TBAA is NYI"); LValueBaseInfo EltBaseInfo; Address Addr = Address::invalid(); if (const VariableArrayType *vla = @@ -1593,11 +1593,11 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, E->getBase()); EltBaseInfo = ArrayLV.getBaseInfo(); // TODO(cir): EltTBAAInfo - assert(!UnimplementedFeature::tbaa() && "TBAA is NYI"); + assert(!MissingFeatures::tbaa() && "TBAA is NYI"); } else { // The base must be a pointer; emit it with an estimate of its alignment. // TODO(cir): EltTBAAInfo - assert(!UnimplementedFeature::tbaa() && "TBAA is NYI"); + assert(!MissingFeatures::tbaa() && "TBAA is NYI"); Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); auto Idx = EmitIdxAfterBase(/*Promote*/ true); QualType ptrType = E->getBase()->getType(); @@ -1753,7 +1753,7 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { // TODO: Support accesses to members of base classes in TBAA. For now, we // conservatively pretend that the complete object is of the base class // type. - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); return makeAddrLValue(Base, E->getType(), LV.getBaseInfo()); } case CK_ToUnion: @@ -1863,7 +1863,7 @@ LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { NamedDecl *ND = E->getMemberDecl(); if (auto *Field = dyn_cast(ND)) { LValue LV = buildLValueForField(BaseLV, Field); - assert(!UnimplementedFeature::setObjCGCLValueClass() && "NYI"); + assert(!MissingFeatures::setObjCGCLValueClass() && "NYI"); if (getLangOpts().OpenMP) { // If the member was explicitly marked as nontemporal, mark it as // nontemporal. If the base lvalue is marked as nontemporal, mark access @@ -2094,7 +2094,7 @@ std::optional HandleConditionalOperatorLValueSimpleCase( if (!CGF.ContainsLabel(Dead)) { // If the true case is live, we need to track its region. if (CondExprBool) { - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); } // If a throw expression we emit it and return an undefined lvalue // because it can't be used. @@ -2160,54 +2160,54 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, } }; - Info.Result = - builder - .create( - loc, condV, /*trueBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScope lexScope{*this, loc, - b.getInsertionBlock()}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - Info.LHS = BranchGenFunc(CGF, trueExpr); - auto lhs = Info.LHS->getPointer(); - eval.end(CGF); - - if (lhs) { - yieldTy = lhs.getType(); - b.create(loc, lhs); - return; - } - // If LHS or RHS is a throw or void expression we need to patch - // arms as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - }, - /*falseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScope lexScope{*this, loc, - b.getInsertionBlock()}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - Info.RHS = BranchGenFunc(CGF, falseExpr); - auto rhs = Info.RHS->getPointer(); - eval.end(CGF); - - if (rhs) { - yieldTy = rhs.getType(); - b.create(loc, rhs); - } else { - // If LHS or RHS is a throw or void expression we need to - // patch arms as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - } - - patchVoidOrThrowSites(); - }) - .getResult(); + Info.Result = builder + .create( + loc, condV, /*trueBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{ + *this, loc, b.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + + assert(!MissingFeatures::incrementProfileCounter()); + eval.begin(CGF); + Info.LHS = BranchGenFunc(CGF, trueExpr); + auto lhs = Info.LHS->getPointer(); + eval.end(CGF); + + if (lhs) { + yieldTy = lhs.getType(); + b.create(loc, lhs); + return; + } + // If LHS or RHS is a throw or void expression we need + // to patch arms as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + }, + /*falseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{ + *this, loc, b.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + + assert(!MissingFeatures::incrementProfileCounter()); + eval.begin(CGF); + Info.RHS = BranchGenFunc(CGF, falseExpr); + auto rhs = Info.RHS->getPointer(); + eval.end(CGF); + + if (rhs) { + yieldTy = rhs.getType(); + b.create(loc, rhs); + } else { + // If LHS or RHS is a throw or void expression we + // need to patch arms as to properly match yield + // types. + insertPoints.push_back(b.saveInsertionPoint()); + } + + patchVoidOrThrowSites(); + }) + .getResult(); return Info; } @@ -2242,7 +2242,7 @@ LValue CIRGenFunction::buildConditionalOperatorLValue( AlignmentSource alignSource = std::max(Info.LHS->getBaseInfo().getAlignmentSource(), Info.RHS->getBaseInfo().getAlignmentSource()); - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource)); } else { llvm_unreachable("NYI"); @@ -2448,7 +2448,7 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, // This should be done in CIR prior to LLVM lowering, if we do now // we can make CIR based diagnostics misleading. // cir.ternary(!x, t, f) -> cir.ternary(x, f, t) - assert(!UnimplementedFeature::shouldReverseUnaryCondOnBoolExpr()); + assert(!MissingFeatures::shouldReverseUnaryCondOnBoolExpr()); } if (const ConditionalOperator *CondOp = dyn_cast(cond)) { @@ -2484,7 +2484,7 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, // Don't bother if not optimizing because that metadata would not be used. auto *Call = dyn_cast(cond->IgnoreImpCasts()); if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { - assert(!UnimplementedFeature::insertBuiltinUnpredictable()); + assert(!MissingFeatures::insertBuiltinUnpredictable()); } // Emit the code with the fully general case. @@ -2593,8 +2593,8 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, llvm_unreachable("NYI"); } - assert(!UnimplementedFeature::tbaa() && "NYI"); - assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); + assert(!MissingFeatures::tbaa() && "NYI"); + assert(!MissingFeatures::emitScalarRangeCheck() && "NYI"); return buildFromMemory(Load, Ty); } @@ -2646,7 +2646,7 @@ Address CIRGenFunction::buildLoadOfReference(LValue RefLVal, mlir::Location Loc, RefLVal.getAddress().getPointer()); // TODO(cir): DecorateInstructionWithTBAA relevant for us? - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); QualType PointeeType = RefLVal.getType()->getPointeeType(); CharUnits Align = CGM.getNaturalTypeAlignment(PointeeType, PointeeBaseInfo, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 8eaaf02be27b..a06bbc7e3453 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -14,7 +14,6 @@ #include "CIRGenModule.h" #include "CIRGenTypes.h" #include "CIRGenValue.h" -#include "UnimplementedFeatureGuarding.h" #include "mlir/IR/Attributes.h" #include "clang/AST/Decl.h" @@ -23,6 +22,7 @@ #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtVisitor.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" @@ -208,7 +208,7 @@ class AggExprEmitter : public StmtVisitor { void VisitCallExpr(const CallExpr *E); void VisitStmtExpr(const StmtExpr *E) { - assert(!UnimplementedFeature::stmtExprEvaluation() && "NYI"); + assert(!MissingFeatures::stmtExprEvaluation() && "NYI"); CGF.buildCompoundStmt(*E->getSubStmt(), /*getLast=*/true, Dest); } @@ -235,7 +235,7 @@ class AggExprEmitter : public StmtVisitor { // do an atomic copy. if (lhs.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(lhs)) { - assert(!UnimplementedFeature::atomicTypes()); + assert(!MissingFeatures::atomicTypes()); return; } @@ -246,7 +246,7 @@ class AggExprEmitter : public StmtVisitor { // A non-volatile aggregate destination might have volatile member. if (!lhsSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType())) - assert(!UnimplementedFeature::atomicTypes()); + assert(!MissingFeatures::atomicTypes()); CGF.buildAggExpr(E->getRHS(), lhsSlot); @@ -333,7 +333,7 @@ void AggExprEmitter::buildAggLoadOfLValue(const Expr *E) { // If the type of the l-value is atomic, then do an atomic load. if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV) || - UnimplementedFeature::atomicTypes()) + MissingFeatures::atomicTypes()) llvm_unreachable("atomic load is NYI"); buildFinalDestCopy(E->getType(), LV); @@ -358,7 +358,7 @@ void AggExprEmitter::buildFinalDestCopy(QualType type, const LValue &src, // Copy non-trivial C structs here. if (Dest.isVolatile()) - assert(!UnimplementedFeature::volatileTypes()); + assert(!MissingFeatures::volatileTypes()); if (SrcValueKind == EVK_RValue) { if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { @@ -537,7 +537,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, [&](mlir::OpBuilder &b, mlir::Location loc) { auto currentElement = builder.createLoad(loc, tmpAddr); - if (UnimplementedFeature::cleanups()) + if (MissingFeatures::cleanups()) llvm_unreachable("NYI"); // Emit the actual filler expression. @@ -830,7 +830,7 @@ void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { } void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { - if (UnimplementedFeature::cleanups()) + if (MissingFeatures::cleanups()) llvm_unreachable("NYI"); auto &builder = CGF.getBuilder(); @@ -852,7 +852,7 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { // We'll need to enter cleanup scopes in case any of the element initializers // throws an exception. - if (UnimplementedFeature::cleanups()) + if (MissingFeatures::cleanups()) llvm_unreachable("NYI"); mlir::Operation *CleanupDominator = nullptr; @@ -892,7 +892,7 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { // Deactivate all the partial cleanups in reverse order, which generally means // popping them. - if (UnimplementedFeature::cleanups()) + if (MissingFeatures::cleanups()) llvm_unreachable("NYI"); // Destroy the placeholder if we made one. @@ -909,7 +909,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { // If we're loading from a volatile type, force the destination // into existence. if (E->getSubExpr()->getType().isVolatileQualified() || - UnimplementedFeature::volatileTypes()) { + MissingFeatures::volatileTypes()) { bool Destruct = !Dest.isExternallyDestructed() && E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; @@ -1026,14 +1026,14 @@ void AggExprEmitter::withReturnValueSlot( (RequiresDestruction && !Dest.getAddress().isValid()); Address RetAddr = Address::invalid(); - assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); if (!UseTemp) { RetAddr = Dest.getAddress(); } else { RetAddr = CGF.CreateMemTemp(RetTy, CGF.getLoc(E->getSourceRange()), "tmp", &RetAddr); - assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); } RValue Src = @@ -1050,7 +1050,7 @@ void AggExprEmitter::withReturnValueSlot( // If there's no dtor to run, the copy was the last use of our temporary. // Since we're not guaranteed to be in an ExprWithCleanups, clean up // eagerly. - assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); } } @@ -1251,7 +1251,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( LValue LV = CGF.buildLValueForFieldInitialization(DestLV, field, field->getName()); // We never generate write-barries for initialized fields. - assert(!UnimplementedFeature::setNonGC()); + assert(!MissingFeatures::setNonGC()); if (curInitIndex < NumInitElements) { // Store the initializer into the field. @@ -1314,7 +1314,7 @@ void AggExprEmitter::VisitAbstractConditionalOperator( // Bind the common expression if necessary. CIRGenFunction::OpaqueValueMapping binding(CGF, E); CIRGenFunction::ConditionalEvaluation eval(CGF); - assert(!UnimplementedFeature::getProfileCount()); + assert(!MissingFeatures::getProfileCount()); // Save whether the destination's lifetime is externally managed. bool isExternallyDestructed = Dest.isExternallyDestructed(); @@ -1331,7 +1331,7 @@ void AggExprEmitter::VisitAbstractConditionalOperator( CIRGenFunction::LexicalScope lexScope{CGF, loc, builder.getInsertionBlock()}; Dest.setExternallyDestructed(isExternallyDestructed); - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); Visit(E->getTrueExpr()); } eval.end(CGF); @@ -1348,7 +1348,7 @@ void AggExprEmitter::VisitAbstractConditionalOperator( // with us, and we can safely emit the RHS into the same slot, but // we shouldn't claim that it's already being destructed. Dest.setExternallyDestructed(isExternallyDestructed); - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); Visit(E->getFalseExpr()); } eval.end(CGF); @@ -1357,7 +1357,7 @@ void AggExprEmitter::VisitAbstractConditionalOperator( if (destructNonTrivialCStruct) llvm_unreachable("NYI"); - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); } void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { @@ -1612,7 +1612,7 @@ void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, // Determine the metadata to describe the position of any padding in this // memcpy, as well as the TBAA tags for the members of the struct, in case // the optimizer wishes to expand it in to scalar memory operations. - if (CGM.getCodeGenOpts().NewStructPathTBAA || UnimplementedFeature::tbaa()) + if (CGM.getCodeGenOpts().NewStructPathTBAA || MissingFeatures::tbaa()) llvm_unreachable("TBAA is NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index c7b528d35497..9c6529d87633 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -11,11 +11,11 @@ //===----------------------------------------------------------------------===// #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/MissingFeatures.h" #include #include #include #include -#include #include @@ -138,7 +138,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( // one or the one of the full expression, we would have to build // a derived-to-base cast to compute the correct this pointer, but // we don't have support for that yet, so do a virtual call. - assert(!UnimplementedFeature::buildDerivedToBaseCastForDevirt()); + assert(!MissingFeatures::buildDerivedToBaseCastForDevirt()); DevirtualizedMethod = nullptr; } } @@ -173,7 +173,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( LValue This; if (IsArrow) { LValueBaseInfo BaseInfo; - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); Address ThisValue = buildPointerWithAlignment(Base, &BaseInfo); This = makeAddrLValue(ThisValue, Base->getType(), BaseInfo); } else { @@ -238,7 +238,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( SkippedChecks.set(SanitizerKind::Null, true); } - if (UnimplementedFeature::buildTypeCheck()) + if (MissingFeatures::buildTypeCheck()) llvm_unreachable("NYI"); // C++ [class.virtual]p12: @@ -613,7 +613,7 @@ static void buildNewInitializer(CIRGenFunction &CGF, const CXXNewExpr *E, QualType ElementType, mlir::Type ElementTy, Address NewPtr, mlir::Value NumElements, mlir::Value AllocSizeWithoutCookie) { - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); if (E->isArray()) { llvm_unreachable("NYI"); } else if (const Expr *Init = E->getInitializer()) { @@ -735,7 +735,7 @@ static bool EmitObjectDelete(CIRGenFunction &CGF, const CXXDeleteExpr *DE, // In traditional LLVM codegen null checks are emitted to save a delete call. // In CIR we optimize for size by default, the null check should be added into // this function callers. - assert(!UnimplementedFeature::emitNullCheckForDeleteCalls()); + assert(!MissingFeatures::emitNullCheckForDeleteCalls()); CGF.PopCleanupBlock(); return false; @@ -755,7 +755,7 @@ void CIRGenFunction::buildCXXDeleteExpr(const CXXDeleteExpr *E) { // // CIR note: emit the code size friendly by default for now, such as mentioned // in `EmitObjectDelete`. - assert(!UnimplementedFeature::emitNullCheckForDeleteCalls()); + assert(!MissingFeatures::emitNullCheckForDeleteCalls()); QualType DeleteTy = E->getDestroyedType(); // A destroying operator delete overrides the entire operation of the @@ -861,7 +861,7 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { buildNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); // Set !heapallocsite metadata on the call to operator new. - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); // If this was a call to a global replaceable allocation function that does // not take an alignment argument, the allocator is known to produce storage @@ -1010,7 +1010,7 @@ static RValue buildNewDeleteCall(CIRGenFunction &CGF, /// to a replaceable global allocation function. /// /// We model such elidable calls with the 'builtin' attribute. - assert(!UnimplementedFeature::attributeBuiltin()); + assert(!MissingFeatures::attributeBuiltin()); return RV; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index bad23ad72560..1e5453a11691 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1771,7 +1771,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, Elts, typedFiller); } case APValue::MemberPointer: { - assert(!UnimplementedFeature::cxxABI()); + assert(!MissingFeatures::cxxABI()); const ValueDecl *memberDecl = Value.getMemberPointerDecl(); assert(!Value.isMemberPointerToDerivedMember() && "NYI"); @@ -1824,7 +1824,7 @@ mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { } mlir::Value CIRGenModule::buildMemberPointerConstant(const UnaryOperator *E) { - assert(!UnimplementedFeature::cxxABI()); + assert(!MissingFeatures::cxxABI()); auto loc = getLoc(E->getSourceRange()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index d0f2181044fe..8e57367a35bb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -14,7 +14,7 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "CIRGenOpenMPRuntime.h" -#include "UnimplementedFeatureGuarding.h" +#include "clang/CIR/MissingFeatures.h" #include "clang/AST/StmtVisitor.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" @@ -59,7 +59,7 @@ struct BinOpInfo { return true; llvm::APInt Result; - assert(!UnimplementedFeature::mayHaveIntegerOverflow()); + assert(!MissingFeatures::mayHaveIntegerOverflow()); llvm_unreachable("NYI"); return false; } @@ -278,7 +278,7 @@ class ScalarExprEmitter : public StmtVisitor { // Do we need anything like TestAndClearIgnoreResultAssign()? if (E->getBase()->getType()->isVectorType()) { - assert(!UnimplementedFeature::scalableVectors() && + assert(!MissingFeatures::scalableVectors() && "NYI: index into scalable vector"); // Subscript of vector type. This is handled differently, with a custom // operation. @@ -350,7 +350,7 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitCallExpr(const CallExpr *E); mlir::Value VisitStmtExpr(StmtExpr *E) { - assert(!UnimplementedFeature::stmtExprEvaluation() && "NYI"); + assert(!MissingFeatures::stmtExprEvaluation() && "NYI"); Address retAlloca = CGF.buildCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType()); if (!retAlloca.isValid()) @@ -482,14 +482,14 @@ class ScalarExprEmitter : public StmtVisitor { } else { value = builder.create(loc, value.getType(), value, amt); - assert(!UnimplementedFeature::emitCheckedInBoundsGEP()); + assert(!MissingFeatures::emitCheckedInBoundsGEP()); } } } else if (type->isVectorType()) { llvm_unreachable("no vector inc/dec yet"); } else if (type->isRealFloatingType()) { // TODO(cir): CGFPOptionsRAII - assert(!UnimplementedFeature::CGFPOptionsRAII()); + assert(!MissingFeatures::CGFPOptionsRAII()); if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) llvm_unreachable("__fp16 type NYI"); @@ -810,7 +810,7 @@ class ScalarExprEmitter : public StmtVisitor { Result.Opcode = E->getOpcode(); Result.Loc = E->getSourceRange(); // TODO: Result.FPFeatures - assert(!UnimplementedFeature::getFPFeaturesInEffect()); + assert(!MissingFeatures::getFPFeaturesInEffect()); Result.E = E; return Result; } @@ -1365,7 +1365,7 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { // LLVM we shall take VLA's, division by element size, etc. // // See more in `EmitSub` in CGExprScalar.cpp. - assert(!UnimplementedFeature::llvmLoweringPtrDiffConsidersPointee()); + assert(!MissingFeatures::llvmLoweringPtrDiffConsidersPointee()); return Builder.create(CGF.getLoc(Ops.Loc), CGF.PtrDiffTy, Ops.LHS, Ops.RHS); } @@ -1477,7 +1477,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { auto Src = Visit(const_cast(E)); mlir::Type DstTy = CGF.convertType(DestTy); - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { llvm_unreachable("NYI"); } @@ -1487,17 +1487,17 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } // Update heapallocsite metadata when there is an explicit pointer cast. - assert(!UnimplementedFeature::addHeapAllocSiteMetadata()); + assert(!MissingFeatures::addHeapAllocSiteMetadata()); // If Src is a fixed vector and Dst is a scalable vector, and both have the // same element type, use the llvm.vector.insert intrinsic to perform the // bitcast. - assert(!UnimplementedFeature::scalableVectors()); + assert(!MissingFeatures::scalableVectors()); // If Src is a scalable vector and Dst is a fixed vector, and both have the // same element type, use the llvm.vector.extract intrinsic to perform the // bitcast. - assert(!UnimplementedFeature::scalableVectors()); + assert(!MissingFeatures::scalableVectors()); // Perform VLAT <-> VLST bitcast through memory. // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics @@ -1505,7 +1505,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // need to keep this around for bitcasts between VLAT <-> VLST where // the element types of the vectors are not the same, until we figure // out a better way of doing these casts. - assert(!UnimplementedFeature::scalableVectors()); + assert(!MissingFeatures::scalableVectors()); return CGF.getBuilder().createBitcast(CGF.getLoc(E->getSourceRange()), Src, DstTy); @@ -1558,7 +1558,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { if (MustVisitNullValue(E)) CGF.buildIgnoredExpr(E); - assert(!UnimplementedFeature::cxxABI()); + assert(!MissingFeatures::cxxABI()); const MemberPointerType *MPT = CE->getType()->getAs(); assert(!MPT->isMemberFunctionPointerType() && "NYI"); @@ -1707,14 +1707,14 @@ mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *E) { return buildLoadOfLValue(E); auto V = CGF.buildCallExpr(E).getScalarVal(); - assert(!UnimplementedFeature::buildLValueAlignmentAssumption()); + assert(!MissingFeatures::buildLValueAlignmentAssumption()); return V; } mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { // TODO(cir): Folding all this constants sound like work for MLIR optimizers, // keep assertion for now. - assert(!UnimplementedFeature::tryEmitAsConstant()); + assert(!MissingFeatures::tryEmitAsConstant()); Expr::EvalResult Result; if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { llvm::APSInt Value = Result.Val.getInt(); @@ -1761,9 +1761,8 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { llvm_unreachable("NYI"); if (E->getType()->isVectorType()) { - assert(!UnimplementedFeature::scalableVectors() && - "NYI: scalable vector init"); - assert(!UnimplementedFeature::vectorConstants() && "NYI: vector constants"); + assert(!MissingFeatures::scalableVectors() && "NYI: scalable vector init"); + assert(!MissingFeatures::vectorConstants() && "NYI: vector constants"); auto VectorType = CGF.getCIRType(E->getType()).dyn_cast(); SmallVector Elements; @@ -2189,7 +2188,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( // If the dead side doesn't have labels we need, just emit the Live part. if (!CGF.ContainsLabel(dead)) { if (CondExprBool) - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); auto Result = Visit(live); // If the live part is a throw expression, it acts like it has a void @@ -2227,7 +2226,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { bool lhsIsVoid = false; auto condV = CGF.evaluateExprAsBool(condExpr); - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); return builder .create( @@ -2287,7 +2286,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); eval.begin(CGF); auto lhs = Visit(lhsExpr); eval.end(CGF); @@ -2307,7 +2306,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::incrementProfileCounter()); eval.begin(CGF); auto rhs = Visit(rhsExpr); eval.end(CGF); @@ -2508,7 +2507,7 @@ mlir::Value ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { QualType Ty = VE->getType(); if (Ty->isVariablyModifiedType()) - assert(!UnimplementedFeature::variablyModifiedTypeEmission() && "NYI"); + assert(!MissingFeatures::variablyModifiedTypeEmission() && "NYI"); Address ArgValue = Address::invalid(); mlir::Value Val = CGF.buildVAArg(VE, ArgValue); @@ -2568,6 +2567,6 @@ mlir::Value CIRGenFunction::buildCheckedInBoundsGEP( // TODO(cir): the unreachable code below hides a substantial amount of code // from the original codegen related with pointer overflow sanitizer. - assert(UnimplementedFeature::pointerOverflowSanitizer()); + assert(MissingFeatures::pointerOverflowSanitizer()); llvm_unreachable("pointer overflow sanitizer NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 9f82686cb20b..f090fc71b86c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -14,7 +14,7 @@ #include "CIRGenCXXABI.h" #include "CIRGenModule.h" #include "CIRGenOpenMPRuntime.h" -#include "UnimplementedFeatureGuarding.h" +#include "clang/CIR/MissingFeatures.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/ExprObjC.h" @@ -326,7 +326,7 @@ void CIRGenFunction::LexicalScope::cleanup() { auto applyCleanup = [&]() { if (PerformCleanup) { // ApplyDebugLocation - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); ForceCleanup(); } }; @@ -444,7 +444,7 @@ void CIRGenFunction::LexicalScope::buildImplicitReturn() { FD->getASTContext(), FD->getReturnType()); if (CGF.SanOpts.has(SanitizerKind::Return)) { - assert(!UnimplementedFeature::sanitizerReturn()); + assert(!MissingFeatures::sanitizerReturn()); llvm_unreachable("NYI"); } else if (shouldEmitUnreachable) { if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { @@ -480,7 +480,7 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // instructions will get the location of the return statements and // all will be fine. if (auto *DI = getDebugInfo()) - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + assert(!MissingFeatures::generateDebugInfo() && "NYI"); // Pop any cleanups that might have been associated with the // parameters. Do this in whatever block we're currently in; it's @@ -491,7 +491,7 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // Make sure the line table doesn't jump back into the body for // the ret after it's been at EndLoc. if (auto *DI = getDebugInfo()) - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + assert(!MissingFeatures::generateDebugInfo() && "NYI"); // FIXME(cir): vla.c test currently crashes here. // PopCleanupBlocks(PrologueCleanupDepth); } @@ -502,41 +502,41 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // this as part of LexicalScope instead, given CIR might have multiple // blocks with `cir.return`. if (ShouldInstrumentFunction()) { - assert(!UnimplementedFeature::shouldInstrumentFunction() && "NYI"); + assert(!MissingFeatures::shouldInstrumentFunction() && "NYI"); } // Emit debug descriptor for function end. if (auto *DI = getDebugInfo()) - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + assert(!MissingFeatures::generateDebugInfo() && "NYI"); // Reset the debug location to that of the simple 'return' expression, if any // rather than that of the end of the function's scope '}'. - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + assert(!MissingFeatures::generateDebugInfo() && "NYI"); - assert(!UnimplementedFeature::emitFunctionEpilog() && "NYI"); - assert(!UnimplementedFeature::emitEndEHSpec() && "NYI"); + assert(!MissingFeatures::emitFunctionEpilog() && "NYI"); + assert(!MissingFeatures::emitEndEHSpec() && "NYI"); // FIXME(cir): vla.c test currently crashes here. // assert(EHStack.empty() && "did not remove all scopes from cleanup stack!"); // If someone did an indirect goto, emit the indirect goto block at the end of // the function. - assert(!UnimplementedFeature::indirectBranch() && "NYI"); + assert(!MissingFeatures::indirectBranch() && "NYI"); // If some of our locals escaped, insert a call to llvm.localescape in the // entry block. - assert(!UnimplementedFeature::escapedLocals() && "NYI"); + assert(!MissingFeatures::escapedLocals() && "NYI"); // If someone took the address of a label but never did an indirect goto, we // made a zero entry PHI node, which is illegal, zap it now. - assert(!UnimplementedFeature::indirectBranch() && "NYI"); + assert(!MissingFeatures::indirectBranch() && "NYI"); // CIRGen doesn't need to emit EHResumeBlock, TerminateLandingPad, // TerminateHandler, UnreachableBlock, TerminateFunclets, NormalCleanupDest // here because the basic blocks aren't shared. - assert(!UnimplementedFeature::emitDeclMetadata() && "NYI"); - assert(!UnimplementedFeature::deferredReplacements() && "NYI"); + assert(!MissingFeatures::emitDeclMetadata() && "NYI"); + assert(!MissingFeatures::deferredReplacements() && "NYI"); // Add the min-legal-vector-width attribute. This contains the max width from: // 1. min-vector-width attribute used in the source program. @@ -545,10 +545,10 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // 4. Width of vector arguments and return types for this function. // 5. Width of vector arguments and return types for functions called by // this function. - assert(!UnimplementedFeature::minLegalVectorWidthAttr() && "NYI"); + assert(!MissingFeatures::minLegalVectorWidthAttr() && "NYI"); // Add vscale_range attribute if appropriate. - assert(!UnimplementedFeature::vscaleRangeAttr() && "NYI"); + assert(!MissingFeatures::vscaleRangeAttr() && "NYI"); // In traditional LLVM codegen, if clang generated an unreachable return // block, it'd be deleted now. Same for unused ret allocas from ReturnValue @@ -619,12 +619,12 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, // tests when the time comes, but CIR should be intrinsically scope // accurate, so no need to tie coroutines to such markers. if (isa(Body)) - assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); // Initialize helper which will detect jumps which can cause invalid // lifetime markers. if (ShouldEmitLifetimeMarkers) - assert(!UnimplementedFeature::shouldEmitLifetimeMarkers() && "NYI"); + assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); } // Create a scope in the symbol table to hold variable declarations. @@ -695,7 +695,7 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, // If we haven't marked the function nothrow through other means, do a quick // pass now to see if we can. - assert(!UnimplementedFeature::tryMarkNoThrow()); + assert(!MissingFeatures::tryMarkNoThrow()); return Fn; } @@ -743,7 +743,7 @@ void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, // If this is a union copy constructor, we must emit a memcpy, because the AST // does not model that copy. if (isMemcpyEquivalentSpecialMember(D)) { - assert(!UnimplementedFeature::isMemcpyEquivalentSpecialMember()); + assert(!MissingFeatures::isMemcpyEquivalentSpecialMember()); } const FunctionProtoType *FPT = D->getType()->castAs(); @@ -775,10 +775,10 @@ void CIRGenFunction::buildCXXConstructorCall( // In LLVM: do nothing. // In CIR: emit as a regular call, other later passes should lower the // ctor call into trivial initialization. - assert(!UnimplementedFeature::isTrivialAndisDefaultConstructor()); + assert(!MissingFeatures::isTrivialAndisDefaultConstructor()); if (isMemcpyEquivalentSpecialMember(D)) { - assert(!UnimplementedFeature::isMemcpyEquivalentSpecialMember()); + assert(!MissingFeatures::isMemcpyEquivalentSpecialMember()); } bool PassPrototypeArgs = true; @@ -874,7 +874,7 @@ LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Value V, LValue CIRGenFunction::MakeNaturalAlignAddrLValue(mlir::Value V, QualType T) { LValueBaseInfo BaseInfo; - assert(!UnimplementedFeature::tbaa()); + assert(!MissingFeatures::tbaa()); CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo); Address Addr(V, getTypes().convertTypeForMem(T), Alignment); return LValue::makeAddr(Addr, T, getContext(), BaseInfo); @@ -1069,7 +1069,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm_unreachable("NYI"); } - assert(!UnimplementedFeature::emitStartEHSpec() && "NYI"); + assert(!MissingFeatures::emitStartEHSpec() && "NYI"); // FIXME(cir): vla.c test currently crashes here. // PrologueCleanupDepth = EHStack.stable_begin(); @@ -1160,7 +1160,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, MD->getParent()->getLambdaCaptureDefault() == LCD_None) SkippedChecks.set(SanitizerKind::Null, true); - assert(!UnimplementedFeature::buildTypeCheck() && "NYI"); + assert(!MissingFeatures::buildTypeCheck() && "NYI"); } } @@ -1354,7 +1354,7 @@ void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper( return; // TODO(cir): create guard to restore fast math configurations. - assert(!UnimplementedFeature::fastMathGuard()); + assert(!MissingFeatures::fastMathGuard()); llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode(); // TODO(cir): override rounding behaviour once FM configs are guarded. @@ -1364,7 +1364,7 @@ void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper( // TODO(cir): override exception behaviour once FM configs are guarded. // TODO(cir): override FP flags once FM configs are guarded. - assert(!UnimplementedFeature::fastMathFlags()); + assert(!MissingFeatures::fastMathFlags()); assert((CGF.CurFuncDecl == nullptr || CGF.builder.getIsFPConstrained() || isa(CGF.CurFuncDecl) || @@ -1374,7 +1374,7 @@ void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper( "FPConstrained should be enabled on entire function"); // TODO(cir): mark CIR function with fast math attributes. - assert(!UnimplementedFeature::fastMathFuncAttributes()); + assert(!MissingFeatures::fastMathFuncAttributes()); } CIRGenFunction::CIRGenFPOptionsRAII::~CIRGenFPOptionsRAII() { @@ -1406,7 +1406,7 @@ bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *CE) { void CIRGenFunction::buildDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init) { - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); } Address CIRGenFunction::buildVAListRef(const Expr *E) { @@ -1617,7 +1617,7 @@ void CIRGenFunction::buildVariablyModifiedType(QualType type) { mlir::Value &entry = VLASizeMap[sizeExpr]; if (!entry) { mlir::Value size = buildScalarExpr(sizeExpr); - assert(!UnimplementedFeature::sanitizeVLABound()); + assert(!MissingFeatures::sanitizeVLABound()); // Always zexting here would be wrong if it weren't // undefined behavior to have a negative bound. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index e8e192768a4d..696df531dfb7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -153,7 +153,7 @@ class CIRGenFunction : public CIRGenTypeCache { // Work around an extremely aggressive peephole optimization in // EmitScalarConversion which assumes that all other uses of a // value are extant. - assert(!UnimplementedFeature::peepholeProtection() && "NYI"); + assert(!MissingFeatures::peepholeProtection() && "NYI"); return data; } @@ -167,7 +167,7 @@ class CIRGenFunction : public CIRGenTypeCache { CGF.OpaqueLValues.erase(OpaqueValue); } else { CGF.OpaqueRValues.erase(OpaqueValue); - assert(!UnimplementedFeature::peepholeProtection() && "NYI"); + assert(!MissingFeatures::peepholeProtection() && "NYI"); } } }; @@ -930,7 +930,7 @@ class CIRGenFunction : public CIRGenTypeCache { bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; } /// Returns true inside SEH __try blocks. - bool isSEHTryScope() const { return UnimplementedFeature::isSEHTryScope(); } + bool isSEHTryScope() const { return MissingFeatures::isSEHTryScope(); } mlir::Operation *CurrentFuncletPad = nullptr; @@ -1931,7 +1931,7 @@ class CIRGenFunction : public CIRGenTypeCache { ~LexicalScope() { // EmitLexicalBlockEnd - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); // If we should perform a cleanup, force them now. Note that // this ends the cleanup scope before rescoping any labels. cleanup(); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 105b64c49a6c..363d970f4c87 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -691,7 +691,7 @@ static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, case Qualifiers::OCL_Strong: llvm_unreachable("NYI"); // arc retain non block: - assert(!UnimplementedFeature::ARC()); + assert(!MissingFeatures::ARC()); [[fallthrough]]; case Qualifiers::OCL_None: @@ -703,7 +703,7 @@ static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, case Qualifiers::OCL_Weak: llvm_unreachable("NYI"); // arc init weak: - assert(!UnimplementedFeature::ARC()); + assert(!MissingFeatures::ARC()); return; } llvm_unreachable("bad ownership qualifier!"); @@ -828,7 +828,7 @@ CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, mlir::cir::GlobalLinkageKind::ExternalLinkage, getContext().toCharUnitsFromBits(PAlign)); // LLVM codegen handles unnamedAddr - assert(!UnimplementedFeature::unnamedAddr()); + assert(!MissingFeatures::unnamedAddr()); // In MS C++ if you have a class with virtual functions in which you are using // selective member import/export, then all virtual functions must be exported @@ -1432,8 +1432,8 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo(mlir::Location loc, // Give the type_info object and name the formal visibility of the // type itself. - assert(!UnimplementedFeature::hiddenVisibility()); - assert(!UnimplementedFeature::protectedVisibility()); + assert(!MissingFeatures::hiddenVisibility()); + assert(!MissingFeatures::protectedVisibility()); mlir::SymbolTable::Visibility symVisibility; if (mlir::cir::isLocalLinkage(Linkage)) // If the linkage is local, only default visibility makes sense. @@ -1444,7 +1444,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo(mlir::Location loc, else symVisibility = CIRGenModule::getCIRVisibility(Ty->getVisibility()); - assert(!UnimplementedFeature::setDLLStorageClass()); + assert(!MissingFeatures::setDLLStorageClass()); return BuildTypeInfo(loc, Ty, Linkage, symVisibility); } @@ -1576,7 +1576,7 @@ void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, CGM.getBuilder().getUInt8PtrTy()); } - if (UnimplementedFeature::setDSOLocal()) + if (MissingFeatures::setDSOLocal()) llvm_unreachable("NYI"); // The vtable address point is 2. @@ -1816,7 +1816,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( mlir::Location loc, QualType Ty, mlir::cir::GlobalLinkageKind Linkage, mlir::SymbolTable::Visibility Visibility) { auto &builder = CGM.getBuilder(); - assert(!UnimplementedFeature::setDLLStorageClass()); + assert(!MissingFeatures::setDLLStorageClass()); // Add the vtable pointer. BuildVTablePointer(loc, cast(Ty)); @@ -1932,7 +1932,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( break; } - assert(!UnimplementedFeature::setDLLImportDLLExport()); + assert(!MissingFeatures::setDLLImportDLLExport()); auto init = builder.getTypeInfo(builder.getArrayAttr(Fields)); SmallString<256> Name; @@ -1963,7 +1963,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( } if (CGM.supportsCOMDAT() && mlir::cir::isWeakForLinker(GV.getLinkage())) { - assert(!UnimplementedFeature::setComdat()); + assert(!MissingFeatures::setComdat()); llvm_unreachable("NYI"); } @@ -1987,16 +1987,16 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( // object and the type_info name be uniqued when weakly emitted. // TODO(cir): setup other bits for TypeName - assert(!UnimplementedFeature::setDLLStorageClass()); - assert(!UnimplementedFeature::setPartition()); - assert(!UnimplementedFeature::setDSOLocal()); + assert(!MissingFeatures::setDLLStorageClass()); + assert(!MissingFeatures::setPartition()); + assert(!MissingFeatures::setDSOLocal()); mlir::SymbolTable::setSymbolVisibility( TypeName, CIRGenModule::getMLIRVisibility(TypeName)); // TODO(cir): setup other bits for GV - assert(!UnimplementedFeature::setDLLStorageClass()); - assert(!UnimplementedFeature::setPartition()); - assert(!UnimplementedFeature::setDSOLocal()); + assert(!MissingFeatures::setDLLStorageClass()); + assert(!MissingFeatures::setPartition()); + assert(!MissingFeatures::setDSOLocal()); CIRGenModule::setInitializer(GV, init); return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), GV); @@ -2032,7 +2032,7 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, VTable.setLinkage(Linkage); if (CGM.supportsCOMDAT() && mlir::cir::isWeakForLinker(Linkage)) { - assert(!UnimplementedFeature::setComdat()); + assert(!MissingFeatures::setComdat()); } // Set the right visibility. @@ -2069,7 +2069,7 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, if (isDeclarationForLinker) { llvm_unreachable("NYI"); assert(CGM.getCodeGenOpts().WholeProgramVTables); - assert(!UnimplementedFeature::addCompilerUsedGlobal()); + assert(!MissingFeatures::addCompilerUsedGlobal()); } } @@ -2202,7 +2202,7 @@ static mlir::cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { // Prototype: void __cxa_bad_cast(); // TODO(cir): set the calling convention of the runtime function. - assert(!UnimplementedFeature::setCallingConv()); + assert(!MissingFeatures::setCallingConv()); mlir::cir::FuncType FTy = CGF.getBuilder().getFuncType({}, CGF.getBuilder().getVoidTy()); @@ -2212,7 +2212,7 @@ static mlir::cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { void CIRGenItaniumCXXABI::buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) { // TODO(cir): set the calling convention to the runtime function. - assert(!UnimplementedFeature::setCallingConv()); + assert(!MissingFeatures::setCallingConv()); CGF.buildRuntimeCall(loc, getBadCastFn(CGF)); CGF.getBuilder().create(loc); @@ -2283,7 +2283,7 @@ static mlir::cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { // TODO(cir): mark the function as nowind readonly. // TODO(cir): set the calling convention of the runtime function. - assert(!UnimplementedFeature::setCallingConv()); + assert(!MissingFeatures::setCallingConv()); mlir::cir::FuncType FTy = CGF.getBuilder().getFuncType( {VoidPtrTy, RTTIPtrTy, RTTIPtrTy, PtrDiffTy}, VoidPtrTy); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 16018fa80bbf..fda516141de3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -20,7 +20,6 @@ #include "CIRGenValue.h" #include "TargetInfo.h" -#include "UnimplementedFeatureGuarding.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/Attributes.h" @@ -32,6 +31,7 @@ #include "mlir/IR/OperationSupport.h" #include "mlir/IR/SymbolTable.h" #include "mlir/IR/Verifier.h" +#include "clang/CIR/MissingFeatures.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/DeclCXX.h" @@ -371,15 +371,15 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { if (langOpts.OpenMP) { // If this is OpenMP, check if it is legal to emit this global normally. if (openMPRuntime && openMPRuntime->emitTargetGlobal(GD)) { - assert(!UnimplementedFeature::openMPRuntime()); + assert(!MissingFeatures::openMPRuntime()); return; } if (auto *DRD = dyn_cast(Global)) { - assert(!UnimplementedFeature::openMP()); + assert(!MissingFeatures::openMP()); return; } if (auto *DMD = dyn_cast(Global)) { - assert(!UnimplementedFeature::openMP()); + assert(!MissingFeatures::openMP()); return; } } @@ -578,7 +578,7 @@ mlir::cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &CGM, } void CIRGenModule::setCommonAttributes(GlobalDecl GD, mlir::Operation *GV) { - assert(!UnimplementedFeature::setCommonAttributes()); + assert(!MissingFeatures::setCommonAttributes()); } void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, @@ -678,14 +678,13 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // Handle dropped DLL attributes. if (D && !D->hasAttr() && !D->hasAttr()) - assert(!UnimplementedFeature::setDLLStorageClass() && "NYI"); + assert(!MissingFeatures::setDLLStorageClass() && "NYI"); if (langOpts.OpenMP && !langOpts.OpenMPSimd && D) getOpenMPRuntime().registerTargetGlobalVariable(D, Entry); // TODO(cir): check TargetAS matches Entry address space - if (Entry.getSymType() == Ty && - !UnimplementedFeature::addressSpaceInGlobalVar()) + if (Entry.getSymType() == Ty && !MissingFeatures::addressSpaceInGlobalVar()) return Entry; // If there are two attempts to define the same mangled name, issue an @@ -1114,11 +1113,11 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, GV.setVisibility(getMLIRVisibilityFromCIRLinkage(Linkage)); // TODO(cir): handle DLL storage classes in CIR? if (D->hasAttr()) - assert(!UnimplementedFeature::setDLLStorageClass()); + assert(!MissingFeatures::setDLLStorageClass()); else if (D->hasAttr()) - assert(!UnimplementedFeature::setDLLStorageClass()); + assert(!MissingFeatures::setDLLStorageClass()); else - assert(!UnimplementedFeature::setDLLStorageClass()); + assert(!MissingFeatures::setDLLStorageClass()); if (Linkage == mlir::cir::GlobalLinkageKind::CommonLinkage) { // common vars aren't constant even if declared const. @@ -1257,8 +1256,7 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, StringRef GlobalName, CharUnits Alignment) { unsigned AddrSpace = CGM.getASTContext().getTargetAddressSpace( CGM.getGlobalConstantAddressSpace()); - assert((AddrSpace == 0 && - !cir::UnimplementedFeature::addressSpaceInGlobalVar()) && + assert((AddrSpace == 0 && !cir::MissingFeatures::addressSpaceInGlobalVar()) && "NYI"); // Create a global variable for this string @@ -1273,10 +1271,10 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, CIRGenModule::setInitializer(GV, C); // TODO(cir) - assert(!cir::UnimplementedFeature::threadLocal() && "NYI"); - assert(!cir::UnimplementedFeature::unnamedAddr() && "NYI"); + assert(!cir::MissingFeatures::threadLocal() && "NYI"); + assert(!cir::MissingFeatures::unnamedAddr() && "NYI"); assert(!mlir::cir::isWeakForLinker(LT) && "NYI"); - assert(!cir::UnimplementedFeature::setDSOLocal() && "NYI"); + assert(!cir::MissingFeatures::setDSOLocal() && "NYI"); return GV; } @@ -1327,7 +1325,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, Alignment); ConstantStringMap[C] = GV; - assert(!cir::UnimplementedFeature::reportGlobalToASan() && "NYI"); + assert(!cir::MissingFeatures::reportGlobalToASan() && "NYI"); } auto ArrayTy = GV.getSymType().dyn_cast(); @@ -1411,7 +1409,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { break; case Decl::ClassTemplateSpecialization: { // const auto *Spec = cast(decl); - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + assert(!MissingFeatures::generateDebugInfo() && "NYI"); } [[fallthrough]]; case Decl::CXXRecord: { @@ -1438,7 +1436,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { case Decl::UsingEnum: // using enum X; [C++] case Decl::NamespaceAlias: case Decl::UsingDirective: // using namespace X; [C++] - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + assert(!MissingFeatures::generateDebugInfo() && "NYI"); break; case Decl::CXXConstructor: getCXXABI().buildCXXConstructors(cast(decl)); @@ -1459,7 +1457,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { case Decl::TypeAlias: // using foo = bar; [C++11] case Decl::Record: case Decl::Enum: - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + assert(!MissingFeatures::generateDebugInfo() && "NYI"); break; } } @@ -1699,9 +1697,9 @@ void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( return; // TODO(cir): this RAUW ignores the features below. - assert(!UnimplementedFeature::exceptions() && "Call vs Invoke NYI"); - assert(!UnimplementedFeature::parameterAttributes()); - assert(!UnimplementedFeature::operandBundles()); + assert(!MissingFeatures::exceptions() && "Call vs Invoke NYI"); + assert(!MissingFeatures::parameterAttributes()); + assert(!MissingFeatures::operandBundles()); assert(OldFn->getAttrs().size() > 1 && "Attribute forwarding NYI"); // Mark new function as originated from a no-proto declaration. @@ -1776,7 +1774,7 @@ void CIRGenModule::buildAliasForGlobal(StringRef mangledName, alias, getMLIRVisibilityFromCIRLinkage(linkage)); // Alias constructors and destructors are always unnamed_addr. - assert(!UnimplementedFeature::unnamedAddr()); + assert(!MissingFeatures::unnamedAddr()); // Switch any previous uses to the alias. if (op) { @@ -1973,16 +1971,16 @@ void CIRGenModule::buildTentativeDefinition(const VarDecl *D) { void CIRGenModule::setGlobalVisibility(mlir::Operation *GV, const NamedDecl *D) const { - assert(!UnimplementedFeature::setGlobalVisibility()); + assert(!MissingFeatures::setGlobalVisibility()); } void CIRGenModule::setDSOLocal(mlir::Operation *Op) const { - assert(!UnimplementedFeature::setDSOLocal()); + assert(!MissingFeatures::setDSOLocal()); } void CIRGenModule::setGVProperties(mlir::Operation *Op, const NamedDecl *D) const { - assert(!UnimplementedFeature::setDLLImportDLLExport()); + assert(!MissingFeatures::setDLLImportDLLExport()); setGVPropertiesAux(Op, D); } @@ -1990,7 +1988,7 @@ void CIRGenModule::setGVPropertiesAux(mlir::Operation *Op, const NamedDecl *D) const { setGlobalVisibility(Op, D); setDSOLocal(Op); - assert(!UnimplementedFeature::setPartition()); + assert(!MissingFeatures::setPartition()); } bool CIRGenModule::lookupRepresentativeDecl(StringRef MangledName, @@ -2194,7 +2192,7 @@ void CIRGenModule::setExtraAttributesForFunc(FuncOp f, void CIRGenModule::setFunctionAttributes(GlobalDecl GD, mlir::cir::FuncOp F, bool IsIncompleteFunction, bool IsThunk) { - assert(!UnimplementedFeature::setFunctionAttributes()); + assert(!MissingFeatures::setFunctionAttributes()); } /// If the specified mangled name is not in the module, @@ -2379,7 +2377,7 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( } // TODO(cir): Might need bitcast to different address space. - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); return F; } @@ -2615,7 +2613,7 @@ void CIRGenModule::maybeSetTrivialComdat(const Decl &D, mlir::Operation *Op) { return; // TODO: Op.setComdat - assert(!UnimplementedFeature::setComdat() && "NYI"); + assert(!MissingFeatures::setComdat() && "NYI"); } bool CIRGenModule::isInNoSanitizeList(SanitizerMask Kind, mlir::cir::FuncOp Fn, @@ -2687,7 +2685,7 @@ void CIRGenModule::buildExplicitCastExprType(const ExplicitCastExpr *E, if (CGF && E->getType()->isVariablyModifiedType()) llvm_unreachable("NYI"); - assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); + assert(!MissingFeatures::generateDebugInfo() && "NYI"); } void CIRGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { @@ -2742,10 +2740,10 @@ mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( OldGV->erase(); } - assert(!UnimplementedFeature::setComdat()); + assert(!MissingFeatures::setComdat()); if (supportsCOMDAT() && mlir::cir::isWeakForLinker(Linkage) && !GV.hasAvailableExternallyLinkage()) - assert(!UnimplementedFeature::setComdat()); + assert(!MissingFeatures::setComdat()); GV.setAlignmentAttr(getSize(Alignment)); return GV; @@ -2802,7 +2800,7 @@ mlir::cir::GlobalOp CIRGenModule::getOrInsertGlobal( // If the variable exists but has the wrong type, return a bitcast to the // right type. auto GVTy = GV.getSymType(); - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); auto PTy = builder.getPointerTo(Ty); if (GVTy != PTy) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 205dcaf62393..d75109428a84 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -19,7 +19,7 @@ #include "CIRGenTypes.h" #include "CIRGenVTables.h" #include "CIRGenValue.h" -#include "UnimplementedFeatureGuarding.h" +#include "clang/CIR/MissingFeatures.h" #include "clang/AST/ASTContext.h" #include "clang/AST/StmtVisitor.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp index a42d84b12cb0..fa2cc5a174b3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp @@ -21,35 +21,35 @@ CIRGenOpenMPRuntime::CIRGenOpenMPRuntime(CIRGenModule &CGM) : CGM(CGM) {} Address CIRGenOpenMPRuntime::getAddressOfLocalVariable(CIRGenFunction &CGF, const VarDecl *VD) { - assert(!UnimplementedFeature::openMPRuntime()); + assert(!MissingFeatures::openMPRuntime()); return Address::invalid(); } void CIRGenOpenMPRuntime::checkAndEmitLastprivateConditional( CIRGenFunction &CGF, const Expr *LHS) { - assert(!UnimplementedFeature::openMPRuntime()); + assert(!MissingFeatures::openMPRuntime()); return; } void CIRGenOpenMPRuntime::registerTargetGlobalVariable( const clang::VarDecl *VD, mlir::cir::GlobalOp globalOp) { - assert(!UnimplementedFeature::openMPRuntime()); + assert(!MissingFeatures::openMPRuntime()); return; } void CIRGenOpenMPRuntime::emitDeferredTargetDecls() const { - assert(!UnimplementedFeature::openMPRuntime()); + assert(!MissingFeatures::openMPRuntime()); return; } void CIRGenOpenMPRuntime::emitFunctionProlog(CIRGenFunction &CGF, const clang::Decl *D) { - assert(!UnimplementedFeature::openMPRuntime()); + assert(!MissingFeatures::openMPRuntime()); return; } bool CIRGenOpenMPRuntime::emitTargetGlobal(clang::GlobalDecl &GD) { - assert(!UnimplementedFeature::openMPRuntime()); + assert(!MissingFeatures::openMPRuntime()); return false; } @@ -70,14 +70,14 @@ void CIRGenOpenMPRuntime::emitTaskWaitCall(CIRGenBuilderTy &builder, } else { llvm_unreachable("NYI"); } - assert(!UnimplementedFeature::openMPRegionInfo()); + assert(!MissingFeatures::openMPRegionInfo()); } void CIRGenOpenMPRuntime::emitBarrierCall(CIRGenBuilderTy &builder, CIRGenFunction &CGF, mlir::Location Loc) { - assert(!UnimplementedFeature::openMPRegionInfo()); + assert(!MissingFeatures::openMPRegionInfo()); if (CGF.CGM.getLangOpts().OpenMPIRBuilder) { builder.create(Loc); @@ -103,5 +103,5 @@ void CIRGenOpenMPRuntime::emitTaskyieldCall(CIRGenBuilderTy &builder, llvm_unreachable("NYI"); } - assert(!UnimplementedFeature::openMPRegionInfo()); + assert(!MissingFeatures::openMPRegionInfo()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h index a27b04a4866b..15a47eddd58c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h @@ -26,7 +26,7 @@ #include "mlir/IR/Dialect.h" #include "mlir/IR/Location.h" -#include "UnimplementedFeatureGuarding.h" +#include "clang/CIR/MissingFeatures.h" namespace clang { class Decl; diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 9766b5df78fe..5df3b3586be7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -83,7 +83,7 @@ Address CIRGenFunction::buildCompoundStmt(const CompoundStmt &S, bool getLast, } void CIRGenFunction::buildStopPoint(const Stmt *S) { - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); } // Build CIR for a statement. useCurrentScope should be true if no @@ -420,11 +420,11 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { // TODO(cir): there is still an empty cir.scope generated by the caller. return mlir::success(); } - assert(!UnimplementedFeature::constantFoldsToSimpleInteger()); + assert(!MissingFeatures::constantFoldsToSimpleInteger()); } - assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); - assert(!UnimplementedFeature::incrementProfileCounter()); + assert(!MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); + assert(!MissingFeatures::incrementProfileCounter()); return buildIfOnBoolExpr(S.getCond(), S.getThen(), S.getElse()); }; @@ -456,7 +456,7 @@ mlir::LogicalResult CIRGenFunction::buildDeclStmt(const DeclStmt &S) { } mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { - assert(!UnimplementedFeature::requiresReturnValueCheck()); + assert(!MissingFeatures::requiresReturnValueCheck()); auto loc = getLoc(S.getSourceRange()); // Emit the result value, even if unused, to evaluate the side effects. @@ -474,7 +474,7 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { auto handleReturnVal = [&]() { if (getContext().getLangOpts().ElideConstructors && S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) { - assert(!UnimplementedFeature::openMP()); + assert(!MissingFeatures::openMP()); // Apply the named return value optimization for this return statement, // which means doing nothing: the appropriate result has already been // constructed into the NRVO variable. @@ -726,19 +726,19 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, if (buildStmt(S.getEndStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - assert(!UnimplementedFeature::loopInfoStack()); + assert(!MissingFeatures::loopInfoStack()); // From LLVM: if there are any cleanups between here and the loop-exit // scope, create a block to stage a loop exit along. // We probably already do the right thing because of ScopeOp, but make // sure we handle all cases. - assert(!UnimplementedFeature::requiresCleanups()); + assert(!MissingFeatures::requiresCleanups()); forOp = builder.createFor( getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - assert(!UnimplementedFeature::createProfileWeightsForLoop()); - assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); + assert(!MissingFeatures::createProfileWeightsForLoop()); + assert(!MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); mlir::Value condVal = evaluateExprAsBool(S.getCond()); builder.createCondition(condVal); }, @@ -793,19 +793,19 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { if (S.getInit()) if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - assert(!UnimplementedFeature::loopInfoStack()); + assert(!MissingFeatures::loopInfoStack()); // From LLVM: if there are any cleanups between here and the loop-exit // scope, create a block to stage a loop exit along. // We probably already do the right thing because of ScopeOp, but make // sure we handle all cases. - assert(!UnimplementedFeature::requiresCleanups()); + assert(!MissingFeatures::requiresCleanups()); forOp = builder.createFor( getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - assert(!UnimplementedFeature::createProfileWeightsForLoop()); - assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); + assert(!MissingFeatures::createProfileWeightsForLoop()); + assert(!MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); mlir::Value condVal; if (S.getCond()) { // If the for statement has a condition scope, @@ -868,19 +868,19 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { // TODO: pass in array of attributes. auto doStmtBuilder = [&]() -> mlir::LogicalResult { auto loopRes = mlir::success(); - assert(!UnimplementedFeature::loopInfoStack()); + assert(!MissingFeatures::loopInfoStack()); // From LLVM: if there are any cleanups between here and the loop-exit // scope, create a block to stage a loop exit along. // We probably already do the right thing because of ScopeOp, but make // sure we handle all cases. - assert(!UnimplementedFeature::requiresCleanups()); + assert(!MissingFeatures::requiresCleanups()); doWhileOp = builder.createDoWhile( getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - assert(!UnimplementedFeature::createProfileWeightsForLoop()); - assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); + assert(!MissingFeatures::createProfileWeightsForLoop()); + assert(!MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); // C99 6.8.5p2/p4: The first substatement is executed if the // expression compares unequal to 0. The condition must be a // scalar type. @@ -918,19 +918,19 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { // TODO: pass in array of attributes. auto whileStmtBuilder = [&]() -> mlir::LogicalResult { auto loopRes = mlir::success(); - assert(!UnimplementedFeature::loopInfoStack()); + assert(!MissingFeatures::loopInfoStack()); // From LLVM: if there are any cleanups between here and the loop-exit // scope, create a block to stage a loop exit along. // We probably already do the right thing because of ScopeOp, but make // sure we handle all cases. - assert(!UnimplementedFeature::requiresCleanups()); + assert(!MissingFeatures::requiresCleanups()); whileOp = builder.createWhile( getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - assert(!UnimplementedFeature::createProfileWeightsForLoop()); - assert(!UnimplementedFeature::emitCondLikelihoodViaExpectIntrinsic()); + assert(!MissingFeatures::createProfileWeightsForLoop()); + assert(!MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); mlir::Value condVal; // If the for statement has a condition scope, // emit the local variable declaration. diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index fd0f26e47af6..0930b17d55e3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -13,12 +13,12 @@ #ifndef LLVM_CLANG_LIB_CIR_CODEGENTYPECACHE_H #define LLVM_CLANG_LIB_CIR_CODEGENTYPECACHE_H -#include "UnimplementedFeatureGuarding.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Types.h" #include "clang/AST/CharUnits.h" #include "clang/Basic/AddressSpaces.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" namespace cir { @@ -127,7 +127,7 @@ struct CIRGenTypeCache { // Address spaces are not yet fully supported, but the usage of the default // alloca address space can be used for now only for comparison with the // default address space. - assert(!UnimplementedFeature::addressSpace()); + assert(!MissingFeatures::addressSpace()); assert(ASTAllocaAddressSpace == clang::LangAS::Default); return ASTAllocaAddressSpace; } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index fd398c1128fe..ca4d18461154 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -828,7 +828,7 @@ void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { } // If necessary, provide the full definition of a type only used with a // declaration so far. - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 319adf4619a8..bceb1c943764 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -151,7 +151,7 @@ void CIRGenModule::buildDeferredVTables() { } void CIRGenVTables::GenerateClassData(const CXXRecordDecl *RD) { - assert(!UnimplementedFeature::generateDebugInfo()); + assert(!MissingFeatures::generateDebugInfo()); if (RD->getNumVBases()) CGM.getCXXABI().emitVirtualInheritanceTables(RD); @@ -524,7 +524,7 @@ void CIRGenVTables::buildVTTDefinition(mlir::cir::GlobalOp VTT, CIRGenModule::getMLIRVisibility(VTT)); if (CGM.supportsCOMDAT() && VTT.isWeakForLinker()) { - assert(!UnimplementedFeature::setComdat()); + assert(!MissingFeatures::setComdat()); } } diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 390a6ccc1d62..8af007621ba4 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -398,7 +398,7 @@ void CIRRecordLowering::computeVolatileBitfields() { return; for ([[maybe_unused]] auto &I : bitFields) { - assert(!UnimplementedFeature::armComputeVolatileBitfields()); + assert(!MissingFeatures::armComputeVolatileBitfields()); } } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index b3101f6b73ce..3ab52cc73358 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -10,12 +10,11 @@ // //===----------------------------------------------------------------------===// -#include "MissingFeatures.h" - #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypesDetails.h" +#include "clang/CIR/MissingFeatures.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributes.h" diff --git a/clang/lib/CIR/Dialect/IR/MissingFeatures.h b/clang/lib/CIR/Dialect/IR/MissingFeatures.h deleted file mode 100644 index 2e4e9c8ad9e6..000000000000 --- a/clang/lib/CIR/Dialect/IR/MissingFeatures.h +++ /dev/null @@ -1,43 +0,0 @@ -//===---- UnimplementedFeatureGuarding.h - Checks against NYI ---*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file introduces some helper classes to guard against features that -// CIR dialect supports that we do not have and also do not have great ways to -// assert against. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_LIB_CIR_DIALECT_IR_UFG -#define LLVM_CLANG_LIB_CIR_DIALECT_IR_UFG - -namespace cir { - -struct MissingFeatures { - // C++ ABI support - static bool cxxABI() { return false; } - static bool setCallingConv() { return false; } - static bool handleBigEndian() { return false; } - static bool handleAArch64Indirect() { return false; } - static bool classifyArgumentTypeForAArch64() { return false; } - static bool supportgetCoerceToTypeForAArch64() { return false; } - static bool supportTySizeQueryForAArch64() { return false; } - static bool supportTyAlignQueryForAArch64() { return false; } - static bool supportisHomogeneousAggregateQueryForAArch64() { return false; } - static bool supportisEndianQueryForAArch64() { return false; } - static bool supportisAggregateTypeForABIAArch64() { return false; } - - // Address space related - static bool addressSpace() { return false; } - - // Sanitizers - static bool buildTypeCheck() { return false; } -}; - -} // namespace cir - -#endif // LLVM_CLANG_LIB_CIR_DIALECT_IR_UFG diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareAArch64CXXABI.cpp index 460266161ead..b5a1a6fa5a77 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareAArch64CXXABI.cpp @@ -12,11 +12,11 @@ // //===------------------------------------------------------------------===// -#include "../IR/MissingFeatures.h" #include "LoweringPrepareItaniumCXXABI.h" #include "clang/AST/CharUnits.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" #include diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp index 191ee837341c..b33bcfa94989 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp @@ -13,7 +13,6 @@ //===--------------------------------------------------------------------===// #include "LoweringPrepareItaniumCXXABI.h" -#include "../IR/MissingFeatures.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" @@ -22,6 +21,7 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/MissingFeatures.h" using namespace cir; From 4bde380b8e7a454bcc4fbe3f56d237471d047aa1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Jun 2024 12:43:05 -0700 Subject: [PATCH 1613/2301] [CIR][CIRGen] Add support for static decl initializer type mismatch --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 14 +++++--- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 42 ++++++++++++++++++----- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 +-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 1 + clang/test/CIR/CodeGen/stmtexpr-init.c | 47 ++++++++++++++++++++++++++ 5 files changed, 94 insertions(+), 15 deletions(-) create mode 100644 clang/test/CIR/CodeGen/stmtexpr-init.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 705a057e35b0..4c0109382eb7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -684,6 +684,15 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { // FIXME(cir): Track a list of globals, or at least the last one inserted, so // that we can insert globals in the same order they are defined by CIRGen. + [[nodiscard]] mlir::cir::GlobalOp + createGlobal(mlir::ModuleOp module, mlir::Location loc, mlir::StringRef name, + mlir::Type type, bool isConst, + mlir::cir::GlobalLinkageKind linkage) { + mlir::OpBuilder::InsertionGuard guard(*this); + setInsertionPointToStart(module.getBody()); + return create(loc, name, type, isConst, linkage); + } + /// Creates a versioned global variable. If the symbol is already taken, an ID /// will be appended to the symbol. The returned global must always be queried /// for its name so it can be referenced correctly. @@ -691,9 +700,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { createVersionedGlobal(mlir::ModuleOp module, mlir::Location loc, mlir::StringRef name, mlir::Type type, bool isConst, mlir::cir::GlobalLinkageKind linkage) { - mlir::OpBuilder::InsertionGuard guard(*this); - setInsertionPointToStart(module.getBody()); - // Create a unique name if the given name is already taken. std::string uniqueName; if (unsigned version = GlobalsVersioning[name.str()]++) @@ -701,7 +707,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { else uniqueName = name.str(); - return create(loc, uniqueName, type, isConst, linkage); + return createGlobal(module, loc, uniqueName, type, isConst, linkage); } mlir::Value createGetGlobal(mlir::cir::GlobalOp global, diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index d1a1768d83c9..1f678b47f57f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -529,9 +529,8 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, /// Add the initializer for 'D' to the global variable that has already been /// created for it. If the initializer has a different type than GV does, this /// may free GV and return a different one. Otherwise it just returns GV. -mlir::cir::GlobalOp -CIRGenFunction::addInitializerToStaticVarDecl(const VarDecl &D, - mlir::cir::GlobalOp GV) { +mlir::cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( + const VarDecl &D, mlir::cir::GlobalOp GV, mlir::cir::GetGlobalOp GVAddr) { ConstantEmitter emitter(*this); mlir::TypedAttr Init = emitter.tryEmitForInitializer(D).dyn_cast(); @@ -566,7 +565,27 @@ CIRGenFunction::addInitializerToStaticVarDecl(const VarDecl &D, // because some types, like unions, can't be completely represented // in the LLVM type system.) if (GV.getSymType() != Init.getType()) { - llvm_unreachable("static decl initializer type mismatch is NYI"); + mlir::cir::GlobalOp OldGV = GV; + GV = builder.createGlobal(CGM.getModule(), getLoc(D.getSourceRange()), + OldGV.getName(), Init.getType(), + OldGV.getConstant(), GV.getLinkage()); + // FIXME(cir): OG codegen inserts new GV before old one, we probably don't + // need that? + GV.setVisibility(OldGV.getVisibility()); + GV.setInitialValueAttr(Init); + GV.setTlsModelAttr(OldGV.getTlsModelAttr()); + assert(!MissingFeatures::setDSOLocal()); + assert(!MissingFeatures::setComdat()); + assert(!MissingFeatures::addressSpaceInGlobalVar()); + + // Normally this should be done with a call to CGM.replaceGlobal(OldGV, GV), + // but since at this point the current block hasn't been really attached, + // there's no visibility into the GetGlobalOp corresponding to this Global. + // Given those constraints, thread in the GetGlobalOp and update it + // directly. + GVAddr.getAddr().setType( + mlir::cir::PointerType::get(builder.getContext(), Init.getType())); + OldGV->erase(); } bool NeedsDtor = @@ -597,6 +616,8 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, // TODO(cir): we should have a way to represent global ops as values without // having to emit a get global op. Sometimes these emissions are not used. auto addr = getBuilder().createGetGlobal(globalOp); + auto getAddrOp = mlir::cast(addr.getDefiningOp()); + CharUnits alignment = getContext().getDeclAlign(&D); // Store into LocalDeclMap before generating initializer to handle @@ -623,7 +644,7 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, D.hasAttr(); // If this value has an initializer, emit it. if (D.getInit() && !isCudaSharedVar) - var = addInitializerToStaticVarDecl(D, var); + var = addInitializerToStaticVarDecl(D, var, getAddrOp); var.setAlignment(alignment.getAsAlign().value()); @@ -647,15 +668,18 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, else if (D.hasAttr()) llvm_unreachable("llvm.compiler.used metadata is NYI"); + // From traditional codegen: // We may have to cast the constant because of the initializer // mismatch above. // // FIXME: It is really dangerous to store this in the map; if anyone // RAUW's the GV uses of this constant will be invalid. - // TODO(cir): its suppose to be possible that the initializer does not match - // the static var type. When this happens, there should be a cast here. - assert(var.getSymType() != expectedType && - "static var init type mismatch is NYI"); + // + // Since in CIR the address materialization is done over cir.get_global + // and that's already updated, update the map directly instead of using + // casts. + LocalDeclMap.find(&D)->second = + Address(getAddrOp.getAddr(), elemTy, alignment); CGM.setStaticLocalDeclAddress(&D, var); assert(!MissingFeatures::reportGlobalToASan()); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 696df531dfb7..7c1fd6e005e1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1291,8 +1291,9 @@ class CIRGenFunction : public CIRGenTypeCache { /// inside a function, including static vars etc. void buildVarDecl(const clang::VarDecl &D); - mlir::cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &D, - mlir::cir::GlobalOp GV); + mlir::cir::GlobalOp + addInitializerToStaticVarDecl(const VarDecl &D, mlir::cir::GlobalOp GV, + mlir::cir::GetGlobalOp GVAddr); void buildStaticVarDecl(const VarDecl &D, mlir::cir::GlobalLinkageKind Linkage); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index fda516141de3..c28a34f23a75 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2735,6 +2735,7 @@ mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( // Replace occurrences of the old variable if needed. GV.setName(OldGV.getName()); if (!OldGV->use_empty()) { + // TODO(cir): remove erase call above and use replaceGlobal here. llvm_unreachable("NYI"); } OldGV->erase(); diff --git a/clang/test/CIR/CodeGen/stmtexpr-init.c b/clang/test/CIR/CodeGen/stmtexpr-init.c new file mode 100644 index 000000000000..d073335d7dcd --- /dev/null +++ b/clang/test/CIR/CodeGen/stmtexpr-init.c @@ -0,0 +1,47 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// CIR: ![[sized_array:.*]] = !cir.struct} +// CIR: ![[annon_struct:.*]] = !cir.struct}> + +void escape(const void *); + +// CIR-DAG: cir.global "private" internal @T1._x = #cir.int<99> : !s8i +// LLVM-DAG: internal global i8 99 + +void T1(void) { + const char *x[1] = {({static char _x = 99; &_x; })}; + escape(x); +} + +struct sized_array { + int count; + int entries[]; +}; + +#define N_ARGS(...) (sizeof((int[]){__VA_ARGS__}) / sizeof(int)) + +#define ARRAY_PTR(...) ({ \ + static const struct sized_array _a = {N_ARGS(__VA_ARGS__), {__VA_ARGS__}}; \ + &_a; \ +}) + +struct outer { + const struct sized_array *a; +}; + +void T2(void) { + // CIR-DAG: cir.global "private" constant internal @T2._a = #cir.const_struct<{#cir.int<2> : !s32i, #cir.const_array<[#cir.int<50> : !s32i, #cir.int<60> : !s32i]> : !cir.array}> + // LLVM-DAG: internal constant { i32, [2 x i32] } { i32 2, [2 x i32] [i32 50, i32 60] } + const struct sized_array *A = ARRAY_PTR(50, 60); + + // CIR-DAG: cir.global "private" constant internal @T2._a.1 = #cir.const_struct<{#cir.int<3> : !s32i, #cir.const_array<[#cir.int<10> : !s32i, #cir.int<20> : !s32i, #cir.int<30> : !s32i]> : !cir.array}> + // LLVM-DAG: internal constant { i32, [3 x i32] } { i32 3, [3 x i32] [i32 10, i32 20, i32 30] } + struct outer X = {ARRAY_PTR(10, 20, 30)}; + + // CIR-DAG: cir.cast(bitcast, %1 : !cir.ptr>), !cir.ptr> + escape(A); + escape(&X); +} From f6f6448584b01a78d4bbf611cd84461f5d4a5aa7 Mon Sep 17 00:00:00 2001 From: axp Date: Thu, 6 Jun 2024 05:26:26 +0800 Subject: [PATCH 1614/2301] [CIR] Add Case Op Kind Range (#650) Make lowering result of case range smart. Resolve #632 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 8 +- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 46 ++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 14 +- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 87 ++++++- clang/test/CIR/CodeGen/switch-gnurange.cpp | 227 +++++++++++++++--- clang/test/CIR/Transforms/switch.cir | 65 ++++- 6 files changed, 385 insertions(+), 62 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1c10924ace37..634265085535 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1468,11 +1468,12 @@ def CmpThreeWayOp : CIR_Op<"cmp3way", [Pure, SameTypeOperands]> { def CaseOpKind_DT : I32EnumAttrCase<"Default", 1, "default">; def CaseOpKind_EQ : I32EnumAttrCase<"Equal", 2, "equal">; def CaseOpKind_AO : I32EnumAttrCase<"Anyof", 3, "anyof">; +def CaseOpKind_RG : I32EnumAttrCase<"Range", 4, "range">; def CaseOpKind : I32EnumAttr< "CaseOpKind", "case kind", - [CaseOpKind_DT, CaseOpKind_EQ, CaseOpKind_AO]> { + [CaseOpKind_DT, CaseOpKind_EQ, CaseOpKind_AO, CaseOpKind_RG]> { let cppNamespace = "::mlir::cir"; } @@ -1510,6 +1511,7 @@ def SwitchOp : CIR_Op<"switch", condition. - `anyof, [constant-list]`: equals to any of the values in a subsequent following list. + - `range, [lower-bound, upper-bound]`: the condition is within the closed interval. - `default`: any other value. Each case region must be explicitly terminated. @@ -1526,6 +1528,10 @@ def SwitchOp : CIR_Op<"switch", ... cir.return ... } + case (range, [10, 15]) { + ... + cir.yield break + }, case (default) { ... cir.yield fallthrough diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 5df3b3586be7..93ab3ee06dea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -608,37 +608,55 @@ mlir::LogicalResult CIRGenFunction::buildBreakStmt(const clang::BreakStmt &S) { const CaseStmt * CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, SmallVector &caseAttrs) { + auto *ctxt = builder.getContext(); + const CaseStmt *caseStmt = &S; const CaseStmt *lastCase = &S; SmallVector caseEltValueListAttr; + int caseAttrCount = 0; + // Fold cascading cases whenever possible to simplify codegen a bit. while (caseStmt) { lastCase = caseStmt; - auto startVal = caseStmt->getLHS()->EvaluateKnownConstInt(getContext()); - auto endVal = startVal; + auto intVal = caseStmt->getLHS()->EvaluateKnownConstInt(getContext()); + if (auto *rhs = caseStmt->getRHS()) { - endVal = rhs->EvaluateKnownConstInt(getContext()); - } - for (auto intVal = startVal; intVal <= endVal; ++intVal) { + auto endVal = rhs->EvaluateKnownConstInt(getContext()); + SmallVector rangeCaseAttr = { + mlir::cir::IntAttr::get(condType, intVal), + mlir::cir::IntAttr::get(condType, endVal)}; + auto caseAttr = mlir::cir::CaseAttr::get( + ctxt, builder.getArrayAttr(rangeCaseAttr), + CaseOpKindAttr::get(ctxt, mlir::cir::CaseOpKind::Range)); + caseAttrs.push_back(caseAttr); + ++caseAttrCount; + } else { caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(condType, intVal)); } caseStmt = dyn_cast_or_null(caseStmt->getSubStmt()); } - assert(!caseEltValueListAttr.empty() && "empty case value NYI"); - - auto *ctxt = builder.getContext(); + if (!caseEltValueListAttr.empty()) { + auto caseOpKind = caseEltValueListAttr.size() > 1 + ? mlir::cir::CaseOpKind::Anyof + : mlir::cir::CaseOpKind::Equal; + auto caseAttr = mlir::cir::CaseAttr::get( + ctxt, builder.getArrayAttr(caseEltValueListAttr), + CaseOpKindAttr::get(ctxt, caseOpKind)); + caseAttrs.push_back(caseAttr); + ++caseAttrCount; + } - auto caseAttr = mlir::cir::CaseAttr::get( - ctxt, builder.getArrayAttr(caseEltValueListAttr), - CaseOpKindAttr::get(ctxt, caseEltValueListAttr.size() > 1 - ? mlir::cir::CaseOpKind::Anyof - : mlir::cir::CaseOpKind::Equal)); + assert(caseAttrCount > 0 && "there should be at least one valid case attr"); - caseAttrs.push_back(caseAttr); + for (int i = 1; i < caseAttrCount; ++i) { + // If there are multiple case attributes, we need to create a new region + auto *region = currLexScope->createSwitchRegion(); + auto *block = builder.createBlock(region); + } return lastCase; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index c0b6f2ef6910..0916c7233e33 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1003,7 +1003,8 @@ parseSwitchOp(OpAsmParser &parser, // 2. Get the value (next in list) // These needs to be in sync with CIROps.td - if (parser.parseOptionalKeyword(&attrStr, {"default", "equal", "anyof"})) { + if (parser.parseOptionalKeyword(&attrStr, + {"default", "equal", "anyof", "range"})) { ::mlir::StringAttr attrVal; ::mlir::OptionalParseResult parseResult = parser.parseOptionalAttribute( attrVal, parser.getBuilder().getNoneType(), "kind", attrStorage); @@ -1016,8 +1017,9 @@ parseSwitchOp(OpAsmParser &parser, if (attrStr.empty()) { return parser.emitError( - loc, "expected string or keyword containing one of the following " - "enum values for attribute 'kind' [default, equal, anyof]"); + loc, + "expected string or keyword containing one of the following " + "enum values for attribute 'kind' [default, equal, anyof, range]"); } auto attrOptional = ::mlir::cir::symbolizeCaseOpKind(attrStr.str()); @@ -1042,6 +1044,7 @@ parseSwitchOp(OpAsmParser &parser, caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(intCondType, val)); break; } + case cir::CaseOpKind::Range: case cir::CaseOpKind::Anyof: { if (parser.parseComma().failed()) return mlir::failure(); @@ -1129,7 +1132,7 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, auto attr = casesAttr[idx].cast(); auto kind = attr.getKind().getValue(); assert((kind == CaseOpKind::Default || kind == CaseOpKind::Equal || - kind == CaseOpKind::Anyof) && + kind == CaseOpKind::Anyof || kind == CaseOpKind::Range) && "unknown case"); // Case kind @@ -1144,6 +1147,9 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, (intAttrTy.isSigned() ? p << intAttr.getSInt() : p << intAttr.getUInt()); break; } + case cir::CaseOpKind::Range: + assert(attr.getValue().size() == 2 && "range must have two values"); + // The print format of the range is the same as anyof case cir::CaseOpKind::Anyof: { p << ", ["; llvm::interleaveComma(attr.getValue(), p, [&](const Attribute &a) { diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index ce643e6735fa..470ff1dbff3f 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -259,6 +259,43 @@ class CIRSwitchOpFlattening destination); } + // Return the new defaultDestination block. + Block *condBrToRangeDestination(mlir::cir::SwitchOp op, + mlir::PatternRewriter &rewriter, + mlir::Block *rangeDestination, + mlir::Block *defaultDestination, + APInt lowerBound, APInt upperBound) const { + assert(lowerBound.sle(upperBound) && "Invalid range"); + auto resBlock = rewriter.createBlock(defaultDestination); + auto sIntType = mlir::cir::IntType::get(op.getContext(), 32, true); + auto uIntType = mlir::cir::IntType::get(op.getContext(), 32, false); + + auto rangeLength = rewriter.create( + op.getLoc(), sIntType, + mlir::cir::IntAttr::get(op.getContext(), sIntType, + upperBound - lowerBound)); + + auto lowerBoundValue = rewriter.create( + op.getLoc(), sIntType, + mlir::cir::IntAttr::get(op.getContext(), sIntType, lowerBound)); + auto diffValue = rewriter.create( + op.getLoc(), sIntType, mlir::cir::BinOpKind::Sub, op.getCondition(), + lowerBoundValue); + + // Use unsigned comparison to check if the condition is in the range. + auto uDiffValue = rewriter.create( + op.getLoc(), uIntType, CastKind::integral, diffValue); + auto uRangeLength = rewriter.create( + op.getLoc(), uIntType, CastKind::integral, rangeLength); + + auto cmpResult = rewriter.create( + op.getLoc(), mlir::cir::BoolType::get(op.getContext()), + mlir::cir::CmpOpKind::le, uDiffValue, uRangeLength); + rewriter.create(op.getLoc(), cmpResult, + rangeDestination, defaultDestination); + return resBlock; + } + mlir::LogicalResult matchAndRewrite(mlir::cir::SwitchOp op, mlir::PatternRewriter &rewriter) const override { @@ -279,6 +316,10 @@ class CIRSwitchOpFlattening llvm::SmallVector caseDestinations; llvm::SmallVector caseOperands; + llvm::SmallVector> rangeValues; + llvm::SmallVector rangeDestinations; + llvm::SmallVector rangeOperands; + // Initialize default case as optional. mlir::Block *defaultDestination = exitBlock; mlir::ValueRange defaultOperands = exitBlock->getArguments(); @@ -292,16 +333,31 @@ class CIRSwitchOpFlattening auto caseAttr = op.getCases()->getValue()[i].cast(); // Found default case: save destination and operands. - if (caseAttr.getKind().getValue() == mlir::cir::CaseOpKind::Default) { + switch (caseAttr.getKind().getValue()) { + case mlir::cir::CaseOpKind::Default: defaultDestination = ®ion.front(); defaultOperands = region.getArguments(); - } else { + break; + case mlir::cir::CaseOpKind::Range: + assert(caseAttr.getValue().size() == 2 && + "Case range should have 2 case value"); + rangeValues.push_back( + {caseAttr.getValue()[0].cast().getValue(), + caseAttr.getValue()[1].cast().getValue()}); + rangeDestinations.push_back(®ion.front()); + rangeOperands.push_back(region.getArguments()); + break; + case mlir::cir::CaseOpKind::Anyof: + case mlir::cir::CaseOpKind::Equal: // AnyOf cases kind can have multiple values, hence the loop below. for (auto &value : caseAttr.getValue()) { caseValues.push_back(value.cast().getValue()); caseOperands.push_back(region.getArguments()); caseDestinations.push_back(®ion.front()); } + break; + default: + llvm_unreachable("unsupported case kind"); } // Previous case is a fallthrough: branch it to this case. @@ -336,6 +392,33 @@ class CIRSwitchOpFlattening fallthroughYieldOp = nullptr; } + for (size_t index = 0; index < rangeValues.size(); ++index) { + auto lowerBound = rangeValues[index].first; + auto upperBound = rangeValues[index].second; + + // The case range is unreachable, skip it. + if (lowerBound.sgt(upperBound)) + continue; + + // If range is small, add multiple switch instruction cases. + // This magical number is from the original CGStmt code. + constexpr int kSmallRangeThreshold = 64; + if ((upperBound - lowerBound) + .ult(llvm::APInt(32, kSmallRangeThreshold))) { + for (auto iValue = lowerBound; iValue.sle(upperBound); iValue++) { + caseValues.push_back(iValue); + caseOperands.push_back(rangeOperands[index]); + caseDestinations.push_back(rangeDestinations[index]); + } + continue; + } + + defaultDestination = + condBrToRangeDestination(op, rewriter, rangeDestinations[index], + defaultDestination, lowerBound, upperBound); + defaultOperands = rangeOperands[index]; + } + // Set switch op to branch to the newly created blocks. rewriter.setInsertionPoint(op); rewriter.replaceOpWithNewOp( diff --git a/clang/test/CIR/CodeGen/switch-gnurange.cpp b/clang/test/CIR/CodeGen/switch-gnurange.cpp index 7fbd49ad704c..f48a32506252 100644 --- a/clang/test/CIR/CodeGen/switch-gnurange.cpp +++ b/clang/test/CIR/CodeGen/switch-gnurange.cpp @@ -22,10 +22,21 @@ int sw1(enum letter c) { // CIR: cir.func @_Z3sw16letter // CIR: cir.scope { // CIR: cir.switch -// CIR-NEXT: case (anyof, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] : !s32i) { +// CIR-NEXT: case (range, [0, 2] : !s32i) { +// CIR-NEXT: cir.yield +// CIR-NEXT: }, +// CIR-NEXT: case (range, [4, 5] : !s32i) { +// CIR-NEXT: cir.yield +// CIR-NEXT: }, +// CIR-NEXT: case (range, [6, 10] : !s32i) { +// CIR-NEXT: cir.yield +// CIR-NEXT: }, +// CIR-NEXT: case (equal, 3) { +// CIR-NEXT: cir.int<1> // CIR: cir.return // CIR-NEXT: }, // CIR-NEXT: case (default) { +// CIR-NEXT: cir.int<0> // CIR: cir.return // CIR-NEXT: } // CIR-NEXT: ] @@ -33,19 +44,25 @@ int sw1(enum letter c) { // LLVM: @_Z3sw16letter // LLVM: switch i32 %[[C:[0-9]+]], label %[[DEFAULT:[0-9]+]] [ -// LLVM-NEXT: i32 0, label %[[CASE:[0-9]+]] -// LLVM-NEXT: i32 1, label %[[CASE]] -// LLVM-NEXT: i32 2, label %[[CASE]] -// LLVM-NEXT: i32 3, label %[[CASE]] -// LLVM-NEXT: i32 4, label %[[CASE]] -// LLVM-NEXT: i32 5, label %[[CASE]] -// LLVM-NEXT: i32 6, label %[[CASE]] -// LLVM-NEXT: i32 7, label %[[CASE]] -// LLVM-NEXT: i32 8, label %[[CASE]] -// LLVM-NEXT: i32 9, label %[[CASE]] -// LLVM-NEXT: i32 10, label %[[CASE]] +// LLVM-NEXT: i32 3, label %[[CASE_3:[0-9]+]] +// LLVM-NEXT: i32 0, label %[[CASE_0_2:[0-9]+]] +// LLVM-NEXT: i32 1, label %[[CASE_0_2]] +// LLVM-NEXT: i32 2, label %[[CASE_0_2]] +// LLVM-NEXT: i32 4, label %[[CASE_4_5:[0-9]+]] +// LLVM-NEXT: i32 5, label %[[CASE_4_5]] +// LLVM-NEXT: i32 6, label %[[CASE_6_10:[0-9]+]] +// LLVM-NEXT: i32 7, label %[[CASE_6_10]] +// LLVM-NEXT: i32 8, label %[[CASE_6_10]] +// LLVM-NEXT: i32 9, label %[[CASE_6_10]] +// LLVM-NEXT: i32 10, label %[[CASE_6_10]] // LLVM-NEXT: ] -// LLVM: [[CASE]]: +// LLVM: [[CASE_0_2]]: +// LLVM: br label %[[CASE_4_5]] +// LLVM: [[CASE_4_5]]: +// LLVM: br label %[[CASE_6_10]] +// LLVM: [[CASE_6_10]]: +// LLVM: br label %[[CASE_3]] +// LLVM: [[CASE_3]]: // LLVM: store i32 1 // LLVM: ret // LLVM: [[DEFAULT]]: @@ -66,7 +83,7 @@ int sw2(enum letter c) { // CIR: cir.func @_Z3sw26letter // CIR: cir.scope { // CIR: cir.switch -// CIR-NEXT: case (anyof, [0, 1, 2] : !s32i) { +// CIR-NEXT: case (range, [0, 2] : !s32i) { // CIR: cir.return // CIR-NEXT: }, // CIR-NEXT: case (default) { @@ -109,19 +126,19 @@ void sw3(enum letter c) { // CIR: cir.func @_Z3sw36letter // CIR: cir.scope { // CIR: cir.switch -// CIR-NEXT: case (anyof, [0, 1, 2] : !s32i) { +// CIR-NEXT: case (range, [0, 2] : !s32i) { // CIR-NEXT: cir.int<1> // CIR: cir.break // CIR-NEXT: }, -// CIR-NEXT: case (anyof, [3, 4, 5] : !s32i) { +// CIR-NEXT: case (range, [3, 5] : !s32i) { // CIR-NEXT: cir.int<2> // CIR: cir.break // CIR-NEXT: }, -// CIR-NEXT: case (anyof, [6, 7, 8] : !s32i) { +// CIR-NEXT: case (range, [6, 8] : !s32i) { // CIR-NEXT: cir.int<3> // CIR: cir.break // CIR-NEXT: }, -// CIR-NEXT: case (anyof, [9, 10] : !s32i) { +// CIR-NEXT: case (range, [9, 10] : !s32i) { // CIR-NEXT: cir.int<4> // CIR: cir.break // CIR-NEXT: } @@ -155,7 +172,9 @@ void sw3(enum letter c) { // LLVM: store i32 4, ptr %[[X]] // LLVM: br label %[[EPILOG]] // LLVM: [[EPILOG]]: -// LLVM: ret void +// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] +// LLVM: [[EPILOG_END]]: +// LLVM-NEXT: ret void void sw4(int x) { switch (x) { @@ -169,37 +188,165 @@ void sw4(int x) { // CIR: cir.func @_Z3sw4i // CIR: cir.scope { // CIR: cir.switch -// CIR-NEXT: case (anyof, [66, 67, 68, 69, {{[0-9, ]+}}, 230, 231, 232, 233] : !s32i) { +// CIR-NEXT: case (range, [66, 233] : !s32i) { // CIR-NEXT: cir.break // CIR-NEXT: }, -// CIR-NEXT: case (anyof, [-50, -49, -48, -47, {{[0-9, -]+}}, -1, 0, 1, {{[0-9, ]+}}, 47, 48, 49, 50] : !s32i) { +// CIR-NEXT: case (range, [-50, 50] : !s32i) { // CIR-NEXT: cir.break // CIR-NEXT: } // CIR-NEXT: ] // CIR-NEXT: } // LLVM: @_Z3sw4i +// LLVM: switch i32 %[[X:[0-9]+]], label %[[JUDGE_NEG50_50:[0-9]+]] [ +// LLVM-NEXT: ] +// LLVM: [[CASE_66_233:[0-9]+]]: +// LLVM-NEXT: br label %[[EPILOG:[0-9]+]] +// LLVM: [[CASE_NEG50_50:[0-9]+]]: +// LLVM-NEXT: br label %[[EPILOG]] +// LLVM: [[JUDGE_NEG50_50]]: +// LLVM-NEXT: %[[DIFF:[0-9]+]] = sub i32 %[[X]], -50 +// LLVM-NEXT: %[[DIFF_CMP:[0-9]+]] = icmp ule i32 %[[DIFF]], 100 +// LLVM-NEXT: br i1 %[[DIFF_CMP]], label %[[CASE_NEG50_50]], label %[[JUDGE_66_233:[0-9]+]] +// LLVM: [[JUDGE_66_233]]: +// LLVM-NEXT: %[[DIFF:[0-9]+]] = sub i32 %[[X]], 66 +// LLVM-NEXT: %[[DIFF_CMP:[0-9]+]] = icmp ule i32 %[[DIFF]], 167 +// LLVM: br i1 %[[DIFF_CMP]], label %[[CASE_66_233]], label %[[EPILOG]] +// LLVM: [[EPILOG]]: +// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] +// LLVM: [[EPILOG_END]]: +// LLVM-NEXT: ret void + +void sw5(int x) { + int y = 0; + switch (x) { + case 100 ... -100: + y = 1; + } +} + +// CIR: cir.func @_Z3sw5i +// CIR: cir.scope { +// CIR: cir.switch +// CIR-NEXT: case (range, [100, -100] : !s32i) { +// CIR-NEXT: cir.int<1> +// CIR: cir.yield +// CIR-NEXT: } +// CIR-NEXT: ] + +// LLVM: @_Z3sw5i +// LLVM: switch i32 %[[X:[0-9]+]], label %[[EPILOG:[0-9]+]] [ +// LLVM-NEXT: ] +// LLVM: [[CASE_100_NEG100:[0-9]+]]: +// LLVM-NEXT: store i32 1, ptr %[[Y:[0-9]+]] +// LLVM-NEXT: br label %[[EPILOG]] +// LLVM: [[EPILOG]]: +// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] +// LLVM: [[EPILOG_END]]: +// LLVM-NEXT: ret void + +void sw6(int x) { + int y = 0; + switch (x) { + case -2147483648 ... 2147483647: + y = 1; + } +} + +// CIR: cir.func @_Z3sw6i +// CIR: cir.scope { +// CIR: cir.switch +// CIR-NEXT: case (range, [-2147483648, 2147483647] : !s32i) { +// CIR-NEXT: cir.int<1> +// CIR: cir.yield +// CIR-NEXT: } +// CIR-NEXT: ] + +// LLVM: @_Z3sw6i // LLVM: switch i32 %[[X:[0-9]+]], label %[[DEFAULT:[0-9]+]] [ -// LLVM-NEXT: i32 66, label %[[CASE_66_233:[0-9]+]] -// LLVM-NEXT: i32 67, label %[[CASE_66_233]] -// ... -// LLVM: i32 232, label %[[CASE_66_233]] -// LLVM-NEXT: i32 233, label %[[CASE_66_233]] -// LLVM-NEXT: i32 -50, label %[[CASE_NEG50_50:[0-9]+]] -// LLVM-NEXT: i32 -49, label %[[CASE_NEG50_50]] -// ... -// LLVM: i32 -1, label %[[CASE_NEG50_50]] -// LLVM-NEXT: i32 0, label %[[CASE_NEG50_50]] -// LLVM-NEXT: i32 1, label %[[CASE_NEG50_50]] -// ... -// LLVM: i32 49, label %[[CASE_NEG50_50]] -// LLVM-NEXT: i32 50, label %[[CASE_NEG50_50]] // LLVM-NEXT: ] -// LLVM: [[CASE_66_233]]: -// LLVM: br label %[[EPILOG:[0-9]+]] -// LLVM: [[CASE_NEG50_50]]: -// LLVM: br label %[[EPILOG]] +// LLVM: [[CASE_MIN_MAX:[0-9]+]]: +// LLVM-NEXT: store i32 1, ptr %[[Y:[0-9]+]] +// LLVM-NEXT: br label %[[EPILOG:[0-9]+]] +// LLVM: [[DEFAULT]]: +// LLVM-NEXT: %[[DIFF:[0-9]+]] = sub i32 %[[X]], -2147483648 +// LLVM-NEXT: %[[DIFF_CMP:[0-9]+]] = icmp ule i32 %[[DIFF]], -1 +// LLVM-NEXT: br i1 %[[DIFF_CMP]], label %[[CASE_MIN_MAX]], label %[[EPILOG]] // LLVM: [[EPILOG]]: -// LLVM: ret void +// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] +// LLVM: [[EPILOG_END]]: +// LLVM-NEXT: ret void + +void sw7(int x) { + switch(x) { + case 0: + break; + case 100 ... 200: + break; + case 1: + break; + case 300 ... 400: + break; + default: + break; + case 500 ... 600: + break; + } +} + +// CIR: cir.func @_Z3sw7i +// CIR: cir.scope { +// CIR: cir.switch +// CIR-NEXT: case (equal, 0) { +// CIR-NEXT: cir.break +// CIR-NEXT: }, +// CIR-NEXT: case (range, [100, 200] : !s32i) { +// CIR-NEXT: cir.break +// CIR-NEXT: }, +// CIR-NEXT: case (equal, 1) { +// CIR-NEXT: cir.break +// CIR-NEXT: }, +// CIR-NEXT: case (range, [300, 400] : !s32i) { +// CIR-NEXT: cir.break +// CIR-NEXT: }, +// CIR-NEXT: case (default) { +// CIR-NEXT: cir.break +// CIR-NEXT: }, +// CIR-NEXT: case (range, [500, 600] : !s32i) { +// CIR-NEXT: cir.break +// CIR-NEXT: } +// LLVM: @_Z3sw7i +// LLVM: switch i32 %[[X:[0-9]+]], label %[[JUDGE_RANGE_500_600:[0-9]+]] [ +// LLVM-NEXT: i32 0, label %[[CASE_0:[0-9]+]] +// LLVM-NEXT: i32 1, label %[[CASE_1:[0-9]+]] +// LLVM-NEXT: ] +// LLVM: [[CASE_0]]: +// LLVM-NEXT: br label %[[EPILOG:[0-9]+]] +// LLVM: [[CASE_100_200:[0-9]+]]: +// LLVM-NEXT: br label %[[EPILOG]] +// LLVM: [[CASE_1]]: +// LLVM-NEXT: br label %[[EPILOG]] +// LLVM: [[CASE_300_400:[0-9]+]]: +// LLVM-NEXT: br label %[[EPILOG]] +// LLVM: [[JUDGE_RANGE_500_600]]: +// LLVM-NEXT: %[[DIFF:[0-9]+]] = sub i32 %[[X]], 500 +// LLVM-NEXT: %[[DIFF_CMP:[0-9]+]] = icmp ule i32 %[[DIFF]], 100 +// LLVM-NEXT: br i1 %[[DIFF_CMP]], label %[[CASE_500_600:[0-9]+]], label %[[JUDGE_RANGE_300_400:[0-9]+]] +// LLVM: [[JUDGE_RANGE_300_400]]: +// LLVM-NEXT: %[[DIFF:[0-9]+]] = sub i32 %[[X]], 300 +// LLVM-NEXT: %[[DIFF_CMP:[0-9]+]] = icmp ule i32 %[[DIFF]], 100 +// LLVM-NEXT: br i1 %[[DIFF_CMP]], label %[[CASE_300_400]], label %[[JUDGE_RANGE_100_200:[0-9]+]] +// LLVM: [[JUDGE_RANGE_100_200]]: +// LLVM-NEXT: %[[DIFF:[0-9]+]] = sub i32 %[[X]], 100 +// LLVM-NEXT: %[[DIFF_CMP:[0-9]+]] = icmp ule i32 %[[DIFF]], 100 +// LLVM-NEXT: br i1 %[[DIFF_CMP]], label %[[CASE_100_200]], label %[[DEFAULT:[0-9]+]] +// LLVM: [[DEFAULT]]: +// LLVM-NEXT: br label %[[EPILOG]] +// LLVM: [[CASE_500_600]]: +// LLVM-NEXT: br label %[[EPILOG]] +// LLVM: [[EPILOG]]: +// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] +// LLVM: [[EPILOG_END]]: +// LLVM-NEXT: ret void diff --git a/clang/test/CIR/Transforms/switch.cir b/clang/test/CIR/Transforms/switch.cir index 1ea6dba49c98..177dfc98c8af 100644 --- a/clang/test/CIR/Transforms/switch.cir +++ b/clang/test/CIR/Transforms/switch.cir @@ -156,7 +156,7 @@ module { // CHECK: } - cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { + cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} %2 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} @@ -204,4 +204,67 @@ module { // CHECK: cir.return %9 : !s32i // CHECK: } + + cir.func @flatCaseRange(%arg0: !s32i) -> !s32i { + %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + %3 = cir.const #cir.int<0> : !s32i + cir.store %3, %2 : !s32i, !cir.ptr + cir.scope { + %6 = cir.load %0 : !cir.ptr, !s32i + cir.switch (%6 : !s32i) [ + case (equal, -100) { + %7 = cir.const #cir.int<1> : !s32i + cir.store %7, %2 : !s32i, !cir.ptr + cir.break + }, + case (range, [1, 100] : !s32i) { + %7 = cir.const #cir.int<2> : !s32i + cir.store %7, %2 : !s32i, !cir.ptr + cir.break + }, + case (default) { + %7 = cir.const #cir.int<3> : !s32i + cir.store %7, %2 : !s32i, !cir.ptr + cir.break + } + ] + } + %4 = cir.load %2 : !cir.ptr, !s32i + cir.store %4, %1 : !s32i, !cir.ptr + %5 = cir.load %1 : !cir.ptr, !s32i + cir.return %5 : !s32i + } +// CHECK: cir.func @flatCaseRange(%arg0: !s32i) -> !s32i { +// CHECK: cir.switch.flat %[[X:[0-9]+]] : !s32i, ^[[JUDGE_RANGE:bb[0-9]+]] [ +// CHECK-NEXT: -100: ^[[CASE_EQUAL:bb[0-9]+]] +// CHECK-NEXT: ] +// CHECK-NEXT: ^[[CASE_EQUAL]]: +// CHECK-NEXT: cir.int<1> +// CHECK-NEXT: cir.store +// CHECK-NEXT: cir.br ^[[EPILOG:bb[0-9]+]] +// CHECK-NEXT: ^[[CASE_RANGE:bb[0-9]+]]: +// CHECK-NEXT: cir.int<2> +// CHECK-NEXT: cir.store +// CHECK-NEXT: cir.br ^[[EPILOG]] +// CHECK-NEXT: ^[[JUDGE_RANGE]]: +// CHECK-NEXT: %[[RANGE:[0-9]+]] = cir.const #cir.int<99> +// CHECK-NEXT: %[[LOWER_BOUND:[0-9]+]] = cir.const #cir.int<1> +// CHECK-NEXT: %[[DIFF:[0-9]+]] = cir.binop(sub, %[[X]], %[[LOWER_BOUND]]) +// CHECK-NEXT: %[[U_DIFF:[0-9]+]] = cir.cast(integral, %[[DIFF]] : !s32i), !u32i +// CHECK-NEXT: %[[U_RANGE:[0-9]+]] = cir.cast(integral, %[[RANGE]] : !s32i), !u32i +// CHECK-NEXT: %[[CMP_RESULT:[0-9]+]] = cir.cmp(le, %[[U_DIFF]], %[[U_RANGE]]) +// CHECK-NEXT: cir.brcond %[[CMP_RESULT]] ^[[CASE_RANGE]], ^[[CASE_DEFAULT:bb[0-9]+]] +// CHECK-NEXT: ^[[CASE_DEFAULT]]: +// CHECK-NEXT: cir.int<3> +// CHECK-NEXT: cir.store +// CHECK-NEXT: cir.br ^[[EPILOG]] +// CHECK-NEXT: ^[[EPILOG]]: +// CHECK-NEXT: cir.br ^[[EPILOG_END:bb[0-9]+]] +// CHECK-NEXT: ^[[EPILOG_END]]: +// CHECK: cir.return +// CHECK: } + } From 74d270fbf201cae495dbfc1c3a886c2650dbfa64 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 6 Jun 2024 05:27:51 +0800 Subject: [PATCH 1615/2301] [CIR][LLVMLowering] Add LLVM lowering for unary fp2fp builtins (#651) This patch adds LLVM lowering support for unary fp2fp builtins. Those builtins that should be lowered to runtime function calls are lowered to such calls during lowering prepare. Other builtins are lowered to LLVM intrinsic calls during LLVM lowering. --- .../Dialect/Transforms/LoweringPrepare.cpp | 126 +++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 38 +- .../test/CIR/CodeGen/builtin-floating-point.c | 458 ++++++++++++++++++ 3 files changed, 569 insertions(+), 53 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index bbda97e45c80..02b54c5a9962 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -25,6 +25,7 @@ #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" +#include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Path.h" @@ -71,6 +72,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { void runOnOperation() override; void runOnOp(Operation *op); + void runOnMathOp(Operation *op); void lowerThreeWayCmpOp(CmpThreeWayOp op); void lowerVAArgOp(VAArgOp op); void lowerGlobalOp(GlobalOp op); @@ -80,8 +82,6 @@ struct LoweringPreparePass : public LoweringPrepareBase { void lowerIterEndOp(IterEndOp op); void lowerArrayDtor(ArrayDtor op); void lowerArrayCtor(ArrayCtor op); - void lowerFModOp(FModOp op); - void lowerPowOp(PowOp op); /// Build the function that initializes the specified global FuncOp buildCXXGlobalVarDeclInitFunc(GlobalOp op); @@ -627,49 +627,6 @@ void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { op.erase(); } -static void lowerBinaryFPToFPBuiltinOp(LoweringPreparePass &pass, - mlir::Operation *op, - llvm::StringRef floatRtFuncName, - llvm::StringRef doubleRtFuncName, - llvm::StringRef longDoubleRtFuncName) { - mlir::Type ty = op->getResult(0).getType(); - - llvm::StringRef rtFuncName; - if (ty.isa()) - rtFuncName = floatRtFuncName; - else if (ty.isa()) - rtFuncName = doubleRtFuncName; - else if (ty.isa()) - rtFuncName = longDoubleRtFuncName; - else - llvm_unreachable("unknown binary fp2fp builtin operand type"); - - CIRBaseBuilderTy builder(*pass.theModule.getContext()); - builder.setInsertionPointToStart(pass.theModule.getBody()); - - auto rtFuncTy = mlir::cir::FuncType::get({ty, ty}, ty); - FuncOp rtFunc = - pass.buildRuntimeFunction(builder, rtFuncName, op->getLoc(), rtFuncTy); - - auto lhs = op->getOperand(0); - auto rhs = op->getOperand(1); - - builder.setInsertionPointAfter(op); - auto call = builder.create(op->getLoc(), rtFunc, - mlir::ValueRange{lhs, rhs}); - - op->replaceAllUsesWith(call); - op->erase(); -} - -void LoweringPreparePass::lowerFModOp(FModOp op) { - lowerBinaryFPToFPBuiltinOp(*this, op, "fmodf", "fmod", "fmodl"); -} - -void LoweringPreparePass::lowerPowOp(PowOp op) { - lowerBinaryFPToFPBuiltinOp(*this, op, "powf", "pow", "powl"); -} - void LoweringPreparePass::runOnOp(Operation *op) { if (auto threeWayCmp = dyn_cast(op)) { lowerThreeWayCmpOp(threeWayCmp); @@ -695,13 +652,73 @@ void LoweringPreparePass::runOnOp(Operation *op) { } else if (auto globalDtor = fnOp.getGlobalDtorAttr()) { globalDtorList.push_back(globalDtor); } - } else if (auto fmodOp = dyn_cast(op)) { - lowerFModOp(fmodOp); - } else if (auto powOp = dyn_cast(op)) { - lowerPowOp(powOp); } } +void LoweringPreparePass::runOnMathOp(Operation *op) { + struct MathOpFunctionNames { + llvm::StringRef floatVer; + llvm::StringRef doubleVer; + llvm::StringRef longDoubleVer; + }; + + mlir::Type ty = op->getResult(0).getType(); + + MathOpFunctionNames rtFuncNames = + llvm::TypeSwitch(op) + .Case([](auto) { + return MathOpFunctionNames{"fmodf", "fmod", "fmodl"}; + }) + .Case( + [](auto) { return MathOpFunctionNames{"powf", "pow", "powl"}; }) + .Case( + [](auto) { return MathOpFunctionNames{"cosf", "cos", "cosl"}; }) + .Case( + [](auto) { return MathOpFunctionNames{"expf", "exp", "expl"}; }) + .Case([](auto) { + return MathOpFunctionNames{"exp2f", "exp2", "exp2l"}; + }) + .Case( + [](auto) { return MathOpFunctionNames{"logf", "log", "logl"}; }) + .Case([](auto) { + return MathOpFunctionNames{"log10f", "log10", "log10l"}; + }) + .Case([](auto) { + return MathOpFunctionNames{"log2f", "log2", "log2l"}; + }) + .Case( + [](auto) { return MathOpFunctionNames{"sinf", "sin", "sinl"}; }) + .Case([](auto) { + return MathOpFunctionNames{"sqrtf", "sqrt", "sqrtl"}; + }); + llvm::StringRef rtFuncName = llvm::TypeSwitch(ty) + .Case([&](auto) { + return rtFuncNames.floatVer; + }) + .Case([&](auto) { + return rtFuncNames.doubleVer; + }) + .Case([&](auto) { + return rtFuncNames.longDoubleVer; + }); + + CIRBaseBuilderTy builder(*theModule.getContext()); + builder.setInsertionPointToStart(theModule.getBody()); + + llvm::SmallVector operandTypes(op->getNumOperands(), ty); + auto rtFuncTy = + mlir::cir::FuncType::get(operandTypes, op->getResult(0).getType()); + FuncOp rtFunc = + buildRuntimeFunction(builder, rtFuncName, op->getLoc(), rtFuncTy); + + builder.setInsertionPointAfter(op); + auto call = builder.create(op->getLoc(), rtFunc, + op->getOperands()); + + op->replaceAllUsesWith(call); + op->erase(); +} + void LoweringPreparePass::runOnOperation() { assert(astCtx && "Missing ASTContext, please construct with the right ctor"); auto *op = getOperation(); @@ -710,15 +727,22 @@ void LoweringPreparePass::runOnOperation() { } SmallVector opsToTransform; + SmallVector mathOpsToTransform; + op->walk([&](Operation *op) { if (isa(op)) + IterEndOp, IterBeginOp, ArrayCtor, ArrayDtor, mlir::cir::FuncOp>( + op)) opsToTransform.push_back(op); + else if (isa(op)) + mathOpsToTransform.push_back(op); }); for (auto *o : opsToTransform) runOnOp(o); + for (auto *o : mathOpsToTransform) + runOnMathOp(o); buildCXXGlobalInitFunc(); buildGlobalCtorDtorList(); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 36049d27cbfd..5663014641ce 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3162,6 +3162,38 @@ class CIRCmpThreeWayOpLowering } }; +template +class CIRUnaryFPToFPBuiltinOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(CIROp op, + typename mlir::OpConversionPattern::OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = this->getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, resTy, adaptor.getSrc()); + return mlir::success(); + } +}; + +using CIRCeilOpLowering = + CIRUnaryFPToFPBuiltinOpLowering; +using CIRFloorOpLowering = + CIRUnaryFPToFPBuiltinOpLowering; +using CIRFabsOpLowering = + CIRUnaryFPToFPBuiltinOpLowering; +using CIRNearbyintOpLowering = + CIRUnaryFPToFPBuiltinOpLowering; +using CIRRintOpLowering = + CIRUnaryFPToFPBuiltinOpLowering; +using CIRRoundOpLowering = + CIRUnaryFPToFPBuiltinOpLowering; +using CIRTruncOpLowering = + CIRUnaryFPToFPBuiltinOpLowering; + template class CIRBinaryFPToFPBuiltinOpLowering : public mlir::OpConversionPattern { @@ -3211,8 +3243,10 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, - CIRCmpThreeWayOpLowering, CIRCopysignOpLowering, CIRFMaxOpLowering, - CIRFMinOpLowering>(converter, patterns.getContext()); + CIRCmpThreeWayOpLowering, CIRCeilOpLowering, CIRFloorOpLowering, + CIRFAbsOpLowering, CIRNearbyintOpLowering, CIRRintOpLowering, + CIRRoundOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, + CIRFMaxOpLowering, CIRFMinOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/builtin-floating-point.c b/clang/test/CIR/CodeGen/builtin-floating-point.c index c47f390b8eac..329cbea8fc7c 100644 --- a/clang/test/CIR/CodeGen/builtin-floating-point.c +++ b/clang/test/CIR/CodeGen/builtin-floating-point.c @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -ffast-math -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t1.cir 2>&1 | FileCheck %s // RUN: %clang_cc1 -triple aarch64-apple-darwin-macho -ffast-math -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t1.cir 2>&1 | FileCheck %s --check-prefix=AARCH64 +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -ffast-math -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM // ceil @@ -7,12 +9,20 @@ float my_ceilf(float f) { return __builtin_ceilf(f); // CHECK: cir.func @my_ceilf // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.float + + // LLVM: define float @my_ceilf(float %0) + // LLVM: %{{.+}} = call float @llvm.ceil.f32(float %{{.+}}) + // LLVM: } } double my_ceil(double f) { return __builtin_ceil(f); // CHECK: cir.func @my_ceil // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.double + + // LLVM: define double @my_ceil(double %0) + // LLVM: %{{.+}} = call double @llvm.ceil.f64(double %{{.+}}) + // LLVM: } } long double my_ceill(long double f) { @@ -20,6 +30,10 @@ long double my_ceill(long double f) { // CHECK: cir.func @my_ceill // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.ceil {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_ceill(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.ceil.f80(x86_fp80 %{{.+}}) + // LLVM: } } float ceilf(float); @@ -30,12 +44,20 @@ float call_ceilf(float f) { return ceilf(f); // CHECK: cir.func @call_ceilf // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.float + + // LLVM: define float @call_ceilf(float %0) + // LLVM: %{{.+}} = call float @llvm.ceil.f32(float %{{.+}}) + // LLVM: } } double call_ceil(double f) { return ceil(f); // CHECK: cir.func @call_ceil // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.double + + // LLVM: define double @call_ceil(double %0) + // LLVM: %{{.+}} = call double @llvm.ceil.f64(double %{{.+}}) + // LLVM: } } long double call_ceill(long double f) { @@ -43,6 +65,10 @@ long double call_ceill(long double f) { // CHECK: cir.func @call_ceill // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.ceil {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_ceill(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.ceil.f80(x86_fp80 %{{.+}}) + // LLVM: } } // cos @@ -51,12 +77,20 @@ float my_cosf(float f) { return __builtin_cosf(f); // CHECK: cir.func @my_cosf // CHECK: {{.+}} = cir.cos {{.+}} : !cir.float + + // LLVM: define float @my_cosf(float %0) + // LLVM: %{{.+}} = call float @cosf(float %{{.+}}) + // LLVM: } } double my_cos(double f) { return __builtin_cos(f); // CHECK: cir.func @my_cos // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double + + // LLVM: define double @my_cos(double %0) + // LLVM: %{{.+}} = call double @cos(double %{{.+}}) + // LLVM: } } long double my_cosl(long double f) { @@ -64,6 +98,10 @@ long double my_cosl(long double f) { // CHECK: cir.func @my_cosl // CHECK: {{.+}} = cir.cos {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.cos {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_cosl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @cosl(x86_fp80 %{{.+}}) + // LLVM: } } float cosf(float); @@ -74,12 +112,20 @@ float call_cosf(float f) { return cosf(f); // CHECK: cir.func @call_cosf // CHECK: {{.+}} = cir.cos {{.+}} : !cir.float + + // LLVM: define float @call_cosf(float %0) + // LLVM: %{{.+}} = call float @cosf(float %{{.+}}) + // LLVM: } } double call_cos(double f) { return cos(f); // CHECK: cir.func @call_cos // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double + + // LLVM: define double @call_cos(double %0) + // LLVM: %{{.+}} = call double @cos(double %{{.+}}) + // LLVM: } } long double call_cosl(long double f) { @@ -87,6 +133,10 @@ long double call_cosl(long double f) { // CHECK: cir.func @call_cosl // CHECK: {{.+}} = cir.cos {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.cos {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_cosl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @cosl(x86_fp80 %{{.+}}) + // LLVM: } } // exp @@ -95,12 +145,20 @@ float my_expf(float f) { return __builtin_expf(f); // CHECK: cir.func @my_expf // CHECK: {{.+}} = cir.exp {{.+}} : !cir.float + + // LLVM: define float @my_expf(float %0) + // LLVM: %{{.+}} = call float @expf(float %{{.+}}) + // LLVM: } } double my_exp(double f) { return __builtin_exp(f); // CHECK: cir.func @my_exp // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double + + // LLVM: define double @my_exp(double %0) + // LLVM: %{{.+}} = call double @exp(double %{{.+}}) + // LLVM: } } long double my_expl(long double f) { @@ -108,6 +166,10 @@ long double my_expl(long double f) { // CHECK: cir.func @my_expl // CHECK: {{.+}} = cir.exp {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.exp {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_expl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @expl(x86_fp80 %{{.+}}) + // LLVM: } } float expf(float); @@ -118,12 +180,20 @@ float call_expf(float f) { return expf(f); // CHECK: cir.func @call_expf // CHECK: {{.+}} = cir.exp {{.+}} : !cir.float + + // LLVM: define float @call_expf(float %0) + // LLVM: %{{.+}} = call float @expf(float %{{.+}}) + // LLVM: } } double call_exp(double f) { return exp(f); // CHECK: cir.func @call_exp // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double + + // LLVM: define double @call_exp(double %0) + // LLVM: %{{.+}} = call double @exp(double %{{.+}}) + // LLVM: } } long double call_expl(long double f) { @@ -131,6 +201,10 @@ long double call_expl(long double f) { // CHECK: cir.func @call_expl // CHECK: {{.+}} = cir.exp {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.exp {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_expl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @expl(x86_fp80 %{{.+}}) + // LLVM: } } // exp2 @@ -139,12 +213,20 @@ float my_exp2f(float f) { return __builtin_exp2f(f); // CHECK: cir.func @my_exp2f // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.float + + // LLVM: define float @my_exp2f(float %0) + // LLVM: %{{.+}} = call float @exp2f(float %{{.+}}) + // LLVM: } } double my_exp2(double f) { return __builtin_exp2(f); // CHECK: cir.func @my_exp2 // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double + + // LLVM: define double @my_exp2(double %0) + // LLVM: %{{.+}} = call double @exp2(double %{{.+}}) + // LLVM: } } long double my_exp2l(long double f) { @@ -152,6 +234,10 @@ long double my_exp2l(long double f) { // CHECK: cir.func @my_exp2l // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.exp2 {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_exp2l(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @exp2l(x86_fp80 %{{.+}}) + // LLVM: } } float exp2f(float); @@ -162,12 +248,20 @@ float call_exp2f(float f) { return exp2f(f); // CHECK: cir.func @call_exp2f // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.float + + // LLVM: define float @call_exp2f(float %0) + // LLVM: %{{.+}} = call float @exp2f(float %{{.+}}) + // LLVM: } } double call_exp2(double f) { return exp2(f); // CHECK: cir.func @call_exp2 // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double + + // LLVM: define double @call_exp2(double %0) + // LLVM: %{{.+}} = call double @exp2(double %{{.+}}) + // LLVM: } } long double call_exp2l(long double f) { @@ -175,6 +269,10 @@ long double call_exp2l(long double f) { // CHECK: cir.func @call_exp2l // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.exp2 {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_exp2l(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @exp2l(x86_fp80 %{{.+}}) + // LLVM: } } // floor @@ -183,12 +281,20 @@ float my_floorf(float f) { return __builtin_floorf(f); // CHECK: cir.func @my_floorf // CHECK: {{.+}} = cir.floor {{.+}} : !cir.float + + // LLVM: define float @my_floorf(float %0) + // LLVM: %{{.+}} = call float @llvm.floor.f32(float %{{.+}}) + // LLVM: } } double my_floor(double f) { return __builtin_floor(f); // CHECK: cir.func @my_floor // CHECK: {{.+}} = cir.floor {{.+}} : !cir.double + + // LLVM: define double @my_floor(double %0) + // LLVM: %{{.+}} = call double @llvm.floor.f64(double %{{.+}}) + // LLVM: } } long double my_floorl(long double f) { @@ -196,6 +302,10 @@ long double my_floorl(long double f) { // CHECK: cir.func @my_floorl // CHECK: {{.+}} = cir.floor {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.floor {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_floorl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.floor.f80(x86_fp80 %{{.+}}) + // LLVM: } } float floorf(float); @@ -206,12 +316,20 @@ float call_floorf(float f) { return floorf(f); // CHECK: cir.func @call_floorf // CHECK: {{.+}} = cir.floor {{.+}} : !cir.float + + // LLVM: define float @call_floorf(float %0) + // LLVM: %{{.+}} = call float @llvm.floor.f32(float %{{.+}}) + // LLVM: } } double call_floor(double f) { return floor(f); // CHECK: cir.func @call_floor // CHECK: {{.+}} = cir.floor {{.+}} : !cir.double + + // LLVM: define double @call_floor(double %0) + // LLVM: %{{.+}} = call double @llvm.floor.f64(double %{{.+}}) + // LLVM: } } long double call_floorl(long double f) { @@ -219,6 +337,10 @@ long double call_floorl(long double f) { // CHECK: cir.func @call_floorl // CHECK: {{.+}} = cir.floor {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.floor {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_floorl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.floor.f80(x86_fp80 %{{.+}}) + // LLVM: } } // log @@ -227,12 +349,20 @@ float my_logf(float f) { return __builtin_logf(f); // CHECK: cir.func @my_logf // CHECK: {{.+}} = cir.log {{.+}} : !cir.float + + // LLVM: define float @my_logf(float %0) + // LLVM: %{{.+}} = call float @logf(float %{{.+}}) + // LLVM: } } double my_log(double f) { return __builtin_log(f); // CHECK: cir.func @my_log // CHECK: {{.+}} = cir.log {{.+}} : !cir.double + + // LLVM: define double @my_log(double %0) + // LLVM: %{{.+}} = call double @log(double %{{.+}}) + // LLVM: } } long double my_logl(long double f) { @@ -240,6 +370,10 @@ long double my_logl(long double f) { // CHECK: cir.func @my_logl // CHECK: {{.+}} = cir.log {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_logl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @logl(x86_fp80 %{{.+}}) + // LLVM: } } float logf(float); @@ -250,12 +384,20 @@ float call_logf(float f) { return logf(f); // CHECK: cir.func @call_logf // CHECK: {{.+}} = cir.log {{.+}} : !cir.float + + // LLVM: define float @call_logf(float %0) + // LLVM: %{{.+}} = call float @logf(float %{{.+}}) + // LLVM: } } double call_log(double f) { return log(f); // CHECK: cir.func @call_log // CHECK: {{.+}} = cir.log {{.+}} : !cir.double + + // LLVM: define double @call_log(double %0) + // LLVM: %{{.+}} = call double @log(double %{{.+}}) + // LLVM: } } long double call_logl(long double f) { @@ -263,6 +405,10 @@ long double call_logl(long double f) { // CHECK: cir.func @call_logl // CHECK: {{.+}} = cir.log {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_logl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @logl(x86_fp80 %{{.+}}) + // LLVM: } } // log10 @@ -271,12 +417,20 @@ float my_log10f(float f) { return __builtin_log10f(f); // CHECK: cir.func @my_log10f // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.float + + // LLVM: define float @my_log10f(float %0) + // LLVM: %{{.+}} = call float @log10f(float %{{.+}}) + // LLVM: } } double my_log10(double f) { return __builtin_log10(f); // CHECK: cir.func @my_log10 // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double + + // LLVM: define double @my_log10(double %0) + // LLVM: %{{.+}} = call double @log10(double %{{.+}}) + // LLVM: } } long double my_log10l(long double f) { @@ -284,6 +438,10 @@ long double my_log10l(long double f) { // CHECK: cir.func @my_log10l // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log10 {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_log10l(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @log10l(x86_fp80 %{{.+}}) + // LLVM: } } float log10f(float); @@ -294,12 +452,20 @@ float call_log10f(float f) { return log10f(f); // CHECK: cir.func @call_log10f // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.float + + // LLVM: define float @call_log10f(float %0) + // LLVM: %{{.+}} = call float @log10f(float %{{.+}}) + // LLVM: } } double call_log10(double f) { return log10(f); // CHECK: cir.func @call_log10 // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double + + // LLVM: define double @call_log10(double %0) + // LLVM: %{{.+}} = call double @log10(double %{{.+}}) + // LLVM: } } long double call_log10l(long double f) { @@ -307,6 +473,10 @@ long double call_log10l(long double f) { // CHECK: cir.func @call_log10l // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log10 {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_log10l(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @log10l(x86_fp80 %{{.+}}) + // LLVM: } } // log2 @@ -315,12 +485,20 @@ float my_log2f(float f) { return __builtin_log2f(f); // CHECK: cir.func @my_log2f // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.float + + // LLVM: define float @my_log2f(float %0) + // LLVM: %{{.+}} = call float @log2f(float %{{.+}}) + // LLVM: } } double my_log2(double f) { return __builtin_log2(f); // CHECK: cir.func @my_log2 // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double + + // LLVM: define double @my_log2(double %0) + // LLVM: %{{.+}} = call double @log2(double %{{.+}}) + // LLVM: } } long double my_log2l(long double f) { @@ -328,6 +506,10 @@ long double my_log2l(long double f) { // CHECK: cir.func @my_log2l // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log2 {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_log2l(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @log2l(x86_fp80 %{{.+}}) + // LLVM: } } float log2f(float); @@ -338,12 +520,20 @@ float call_log2f(float f) { return log2f(f); // CHECK: cir.func @call_log2f // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.float + + // LLVM: define float @call_log2f(float %0) + // LLVM: %{{.+}} = call float @log2f(float %{{.+}}) + // LLVM: } } double call_log2(double f) { return log2(f); // CHECK: cir.func @call_log2 // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double + + // LLVM: define double @call_log2(double %0) + // LLVM: %{{.+}} = call double @log2(double %{{.+}}) + // LLVM: } } long double call_log2l(long double f) { @@ -351,6 +541,10 @@ long double call_log2l(long double f) { // CHECK: cir.func @call_log2l // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log2 {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_log2l(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @log2l(x86_fp80 %{{.+}}) + // LLVM: } } // nearbyint @@ -359,12 +553,20 @@ float my_nearbyintf(float f) { return __builtin_nearbyintf(f); // CHECK: cir.func @my_nearbyintf // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.float + + // LLVM: define float @my_nearbyintf(float %0) + // LLVM: %{{.+}} = call float @llvm.nearbyint.f32(float %{{.+}}) + // LLVM: } } double my_nearbyint(double f) { return __builtin_nearbyint(f); // CHECK: cir.func @my_nearbyint // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.double + + // LLVM: define double @my_nearbyint(double %0) + // LLVM: %{{.+}} = call double @llvm.nearbyint.f64(double %{{.+}}) + // LLVM: } } long double my_nearbyintl(long double f) { @@ -372,6 +574,10 @@ long double my_nearbyintl(long double f) { // CHECK: cir.func @my_nearbyintl // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_nearbyintl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.nearbyint.f80(x86_fp80 %{{.+}}) + // LLVM: } } float nearbyintf(float); @@ -382,12 +588,20 @@ float call_nearbyintf(float f) { return nearbyintf(f); // CHECK: cir.func @call_nearbyintf // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.float + + // LLVM: define float @call_nearbyintf(float %0) + // LLVM: %{{.+}} = call float @llvm.nearbyint.f32(float %{{.+}}) + // LLVM: } } double call_nearbyint(double f) { return nearbyint(f); // CHECK: cir.func @call_nearbyint // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.double + + // LLVM: define double @call_nearbyint(double %0) + // LLVM: %{{.+}} = call double @llvm.nearbyint.f64(double %{{.+}}) + // LLVM: } } long double call_nearbyintl(long double f) { @@ -395,6 +609,10 @@ long double call_nearbyintl(long double f) { // CHECK: cir.func @call_nearbyintl // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_nearbyintl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.nearbyint.f80(x86_fp80 %{{.+}}) + // LLVM: } } // rint @@ -403,12 +621,20 @@ float my_rintf(float f) { return __builtin_rintf(f); // CHECK: cir.func @my_rintf // CHECK: {{.+}} = cir.rint {{.+}} : !cir.float + + // LLVM: define float @my_rintf(float %0) + // LLVM: %{{.+}} = call float @llvm.rint.f32(float %{{.+}}) + // LLVM: } } double my_rint(double f) { return __builtin_rint(f); // CHECK: cir.func @my_rint // CHECK: {{.+}} = cir.rint {{.+}} : !cir.double + + // LLVM: define double @my_rint(double %0) + // LLVM: %{{.+}} = call double @llvm.rint.f64(double %{{.+}}) + // LLVM: } } long double my_rintl(long double f) { @@ -416,6 +642,10 @@ long double my_rintl(long double f) { // CHECK: cir.func @my_rintl // CHECK: {{.+}} = cir.rint {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.rint {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_rintl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.rint.f80(x86_fp80 %{{.+}}) + // LLVM: } } float rintf(float); @@ -426,12 +656,20 @@ float call_rintf(float f) { return rintf(f); // CHECK: cir.func @call_rintf // CHECK: {{.+}} = cir.rint {{.+}} : !cir.float + + // LLVM: define float @call_rintf(float %0) + // LLVM: %{{.+}} = call float @llvm.rint.f32(float %{{.+}}) + // LLVM: } } double call_rint(double f) { return rint(f); // CHECK: cir.func @call_rint // CHECK: {{.+}} = cir.rint {{.+}} : !cir.double + + // LLVM: define double @call_rint(double %0) + // LLVM: %{{.+}} = call double @llvm.rint.f64(double %{{.+}}) + // LLVM: } } long double call_rintl(long double f) { @@ -439,6 +677,10 @@ long double call_rintl(long double f) { // CHECK: cir.func @call_rintl // CHECK: {{.+}} = cir.rint {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.rint {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_rintl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.rint.f80(x86_fp80 %{{.+}}) + // LLVM: } } // round @@ -447,12 +689,20 @@ float my_roundf(float f) { return __builtin_roundf(f); // CHECK: cir.func @my_roundf // CHECK: {{.+}} = cir.round {{.+}} : !cir.float + + // LLVM: define float @my_roundf(float %0) + // LLVM: %{{.+}} = call float @llvm.round.f32(float %{{.+}}) + // LLVM: } } double my_round(double f) { return __builtin_round(f); // CHECK: cir.func @my_round // CHECK: {{.+}} = cir.round {{.+}} : !cir.double + + // LLVM: define double @my_round(double %0) + // LLVM: %{{.+}} = call double @llvm.round.f64(double %{{.+}}) + // LLVM: } } long double my_roundl(long double f) { @@ -460,6 +710,10 @@ long double my_roundl(long double f) { // CHECK: cir.func @my_roundl // CHECK: {{.+}} = cir.round {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.round {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_roundl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.round.f80(x86_fp80 %{{.+}}) + // LLVM: } } float roundf(float); @@ -470,12 +724,20 @@ float call_roundf(float f) { return roundf(f); // CHECK: cir.func @call_roundf // CHECK: {{.+}} = cir.round {{.+}} : !cir.float + + // LLVM: define float @call_roundf(float %0) + // LLVM: %{{.+}} = call float @llvm.round.f32(float %{{.+}}) + // LLVM: } } double call_round(double f) { return round(f); // CHECK: cir.func @call_round // CHECK: {{.+}} = cir.round {{.+}} : !cir.double + + // LLVM: define double @call_round(double %0) + // LLVM: %{{.+}} = call double @llvm.round.f64(double %{{.+}}) + // LLVM: } } long double call_roundl(long double f) { @@ -483,6 +745,10 @@ long double call_roundl(long double f) { // CHECK: cir.func @call_roundl // CHECK: {{.+}} = cir.round {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.round {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_roundl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.round.f80(x86_fp80 %{{.+}}) + // LLVM: } } // sin @@ -491,12 +757,20 @@ float my_sinf(float f) { return __builtin_sinf(f); // CHECK: cir.func @my_sinf // CHECK: {{.+}} = cir.sin {{.+}} : !cir.float + + // LLVM: define float @my_sinf(float %0) + // LLVM: %{{.+}} = call float @sinf(float %{{.+}}) + // LLVM: } } double my_sin(double f) { return __builtin_sin(f); // CHECK: cir.func @my_sin // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double + + // LLVM: define double @my_sin(double %0) + // LLVM: %{{.+}} = call double @sin(double %{{.+}}) + // LLVM: } } long double my_sinl(long double f) { @@ -504,6 +778,10 @@ long double my_sinl(long double f) { // CHECK: cir.func @my_sinl // CHECK: {{.+}} = cir.sin {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.sin {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_sinl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @sinl(x86_fp80 %{{.+}}) + // LLVM: } } float sinf(float); @@ -514,12 +792,20 @@ float call_sinf(float f) { return sinf(f); // CHECK: cir.func @call_sinf // CHECK: {{.+}} = cir.sin {{.+}} : !cir.float + + // LLVM: define float @call_sinf(float %0) + // LLVM: %{{.+}} = call float @sinf(float %{{.+}}) + // LLVM: } } double call_sin(double f) { return sin(f); // CHECK: cir.func @call_sin // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double + + // LLVM: define double @call_sin(double %0) + // LLVM: %{{.+}} = call double @sin(double %{{.+}}) + // LLVM: } } long double call_sinl(long double f) { @@ -527,6 +813,10 @@ long double call_sinl(long double f) { // CHECK: cir.func @call_sinl // CHECK: {{.+}} = cir.sin {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.sin {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_sinl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @sinl(x86_fp80 %{{.+}}) + // LLVM: } } // sqrt @@ -535,12 +825,20 @@ float my_sqrtf(float f) { return __builtin_sqrtf(f); // CHECK: cir.func @my_sqrtf // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.float + + // LLVM: define float @my_sqrtf(float %0) + // LLVM: %{{.+}} = call float @sqrtf(float %{{.+}}) + // LLVM: } } double my_sqrt(double f) { return __builtin_sqrt(f); // CHECK: cir.func @my_sqrt // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double + + // LLVM: define double @my_sqrt(double %0) + // LLVM: %{{.+}} = call double @sqrt(double %{{.+}}) + // LLVM: } } long double my_sqrtl(long double f) { @@ -548,6 +846,10 @@ long double my_sqrtl(long double f) { // CHECK: cir.func @my_sqrtl // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.sqrt {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_sqrtl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @sqrtl(x86_fp80 %{{.+}}) + // LLVM: } } float sqrtf(float); @@ -558,12 +860,20 @@ float call_sqrtf(float f) { return sqrtf(f); // CHECK: cir.func @call_sqrtf // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.float + + // LLVM: define float @call_sqrtf(float %0) + // LLVM: %{{.+}} = call float @sqrtf(float %{{.+}}) + // LLVM: } } double call_sqrt(double f) { return sqrt(f); // CHECK: cir.func @call_sqrt // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double + + // LLVM: define double @call_sqrt(double %0) + // LLVM: %{{.+}} = call double @sqrt(double %{{.+}}) + // LLVM: } } long double call_sqrtl(long double f) { @@ -571,6 +881,10 @@ long double call_sqrtl(long double f) { // CHECK: cir.func @call_sqrtl // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.sqrt {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_sqrtl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @sqrtl(x86_fp80 %{{.+}}) + // LLVM: } } // trunc @@ -579,12 +893,20 @@ float my_truncf(float f) { return __builtin_truncf(f); // CHECK: cir.func @my_truncf // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.float + + // LLVM: define float @my_truncf(float %0) + // LLVM: %{{.+}} = call float @llvm.trunc.f32(float %{{.+}}) + // LLVM: } } double my_trunc(double f) { return __builtin_trunc(f); // CHECK: cir.func @my_trunc // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.double + + // LLVM: define double @my_trunc(double %0) + // LLVM: %{{.+}} = call double @llvm.trunc.f64(double %{{.+}}) + // LLVM: } } long double my_truncl(long double f) { @@ -592,6 +914,10 @@ long double my_truncl(long double f) { // CHECK: cir.func @my_truncl // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.trunc {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_truncl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.trunc.f80(x86_fp80 %{{.+}}) + // LLVM: } } float truncf(float); @@ -602,12 +928,20 @@ float call_truncf(float f) { return truncf(f); // CHECK: cir.func @call_truncf // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.float + + // LLVM: define float @call_truncf(float %0) + // LLVM: %{{.+}} = call float @llvm.trunc.f32(float %{{.+}}) + // LLVM: } } double call_trunc(double f) { return trunc(f); // CHECK: cir.func @call_trunc // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.double + + // LLVM: define double @call_trunc(double %0) + // LLVM: %{{.+}} = call double @llvm.trunc.f64(double %{{.+}}) + // LLVM: } } long double call_truncl(long double f) { @@ -615,6 +949,10 @@ long double call_truncl(long double f) { // CHECK: cir.func @call_truncl // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.trunc {{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_truncl(x86_fp80 %0) + // LLVM: %{{.+}} = call x86_fp80 @llvm.trunc.f80(x86_fp80 %{{.+}}) + // LLVM: } } // copysign @@ -623,12 +961,20 @@ float my_copysignf(float x, float y) { return __builtin_copysignf(x, y); // CHECK: cir.func @my_copysignf // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.float + + // LLVM: define float @my_copysignf + // LLVM: %{{.+}} = call float @llvm.copysign.f32(float %{{.+}}, float %{{.+}}) + // LLVM: } } double my_copysign(double x, double y) { return __builtin_copysign(x, y); // CHECK: cir.func @my_copysign // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.double + + // LLVM: define double @my_copysign + // LLVM: %{{.+}} = call double @llvm.copysign.f64(double %{{.+}}, double %{{.+}}) + // LLVM: } } long double my_copysignl(long double x, long double y) { @@ -636,6 +982,10 @@ long double my_copysignl(long double x, long double y) { // CHECK: cir.func @my_copysignl // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_copysignl + // LLVM: %{{.+}} = call x86_fp80 @llvm.copysign.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: } } float copysignf(float, float); @@ -646,12 +996,20 @@ float call_copysignf(float x, float y) { return copysignf(x, y); // CHECK: cir.func @call_copysignf // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.float + + // LLVM: define float @call_copysignf + // LLVM: %{{.+}} = call float @llvm.copysign.f32(float %{{.+}}, float %{{.+}}) + // LLVM: } } double call_copysign(double x, double y) { return copysign(x, y); // CHECK: cir.func @call_copysign // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.double + + // LLVM: define double @call_copysign + // LLVM: %{{.+}} = call double @llvm.copysign.f64(double %{{.+}}, double %{{.+}}) + // LLVM: } } long double call_copysignl(long double x, long double y) { @@ -659,6 +1017,10 @@ long double call_copysignl(long double x, long double y) { // CHECK: cir.func @call_copysignl // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_copysignl + // LLVM: %{{.+}} = call x86_fp80 @llvm.copysign.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: } } // fmax @@ -667,12 +1029,20 @@ float my_fmaxf(float x, float y) { return __builtin_fmaxf(x, y); // CHECK: cir.func @my_fmaxf // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.float + + // LLVM: define float @my_fmaxf + // LLVM: %{{.+}} = call float @llvm.maxnum.f32(float %{{.+}}, float %{{.+}}) + // LLVM: } } double my_fmax(double x, double y) { return __builtin_fmax(x, y); // CHECK: cir.func @my_fmax // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.double + + // LLVM: define double @my_fmax + // LLVM: %{{.+}} = call double @llvm.maxnum.f64(double %{{.+}}, double %{{.+}}) + // LLVM: } } long double my_fmaxl(long double x, long double y) { @@ -680,6 +1050,10 @@ long double my_fmaxl(long double x, long double y) { // CHECK: cir.func @my_fmaxl // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_fmaxl + // LLVM: %{{.+}} = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: } } float fmaxf(float, float); @@ -690,12 +1064,20 @@ float call_fmaxf(float x, float y) { return fmaxf(x, y); // CHECK: cir.func @call_fmaxf // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.float + + // LLVM: define float @call_fmaxf + // LLVM: %{{.+}} = call float @llvm.maxnum.f32(float %{{.+}}, float %{{.+}}) + // LLVM: } } double call_fmax(double x, double y) { return fmax(x, y); // CHECK: cir.func @call_fmax // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.double + + // LLVM: define double @call_fmax + // LLVM: %{{.+}} = call double @llvm.maxnum.f64(double %{{.+}}, double %{{.+}}) + // LLVM: } } long double call_fmaxl(long double x, long double y) { @@ -703,6 +1085,10 @@ long double call_fmaxl(long double x, long double y) { // CHECK: cir.func @call_fmaxl // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_fmaxl + // LLVM: %{{.+}} = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: } } // fmin @@ -711,12 +1097,20 @@ float my_fminf(float x, float y) { return __builtin_fminf(x, y); // CHECK: cir.func @my_fminf // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.float + + // LLVM: define float @my_fminf + // LLVM: %{{.+}} = call float @llvm.minnum.f32(float %{{.+}}, float %{{.+}}) + // LLVM: } } double my_fmin(double x, double y) { return __builtin_fmin(x, y); // CHECK: cir.func @my_fmin // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.double + + // LLVM: define double @my_fmin + // LLVM: %{{.+}} = call double @llvm.minnum.f64(double %{{.+}}, double %{{.+}}) + // LLVM: } } long double my_fminl(long double x, long double y) { @@ -724,6 +1118,10 @@ long double my_fminl(long double x, long double y) { // CHECK: cir.func @my_fminl // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_fminl + // LLVM: %{{.+}} = call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: } } float fminf(float, float); @@ -734,12 +1132,20 @@ float call_fminf(float x, float y) { return fminf(x, y); // CHECK: cir.func @call_fminf // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.float + + // LLVM: define float @call_fminf + // LLVM: %{{.+}} = call float @llvm.minnum.f32(float %{{.+}}, float %{{.+}}) + // LLVM: } } double call_fmin(double x, double y) { return fmin(x, y); // CHECK: cir.func @call_fmin // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.double + + // LLVM: define double @call_fmin + // LLVM: %{{.+}} = call double @llvm.minnum.f64(double %{{.+}}, double %{{.+}}) + // LLVM: } } long double call_fminl(long double x, long double y) { @@ -747,6 +1153,10 @@ long double call_fminl(long double x, long double y) { // CHECK: cir.func @call_fminl // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_fminl + // LLVM: %{{.+}} = call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: } } // fmod @@ -755,12 +1165,20 @@ float my_fmodf(float x, float y) { return __builtin_fmodf(x, y); // CHECK: cir.func @my_fmodf // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.float + + // LLVM: define float @my_fmodf + // LLVM: %{{.+}} = call float @fmodf(float %{{.+}}, float %{{.+}}) + // LLVM: } } double my_fmod(double x, double y) { return __builtin_fmod(x, y); // CHECK: cir.func @my_fmod // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.double + + // LLVM: define double @my_fmod + // LLVM: %{{.+}} = call double @fmod(double %{{.+}}, double %{{.+}}) + // LLVM: } } long double my_fmodl(long double x, long double y) { @@ -768,6 +1186,10 @@ long double my_fmodl(long double x, long double y) { // CHECK: cir.func @my_fmodl // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_fmodl + // LLVM: %{{.+}} = call x86_fp80 @fmodl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: } } float fmodf(float, float); @@ -778,12 +1200,20 @@ float call_fmodf(float x, float y) { return fmodf(x, y); // CHECK: cir.func @call_fmodf // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.float + + // LLVM: define float @call_fmodf + // LLVM: %{{.+}} = call float @fmodf(float %{{.+}}, float %{{.+}}) + // LLVM: } } double call_fmod(double x, double y) { return fmod(x, y); // CHECK: cir.func @call_fmod // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.double + + // LLVM: define double @call_fmod + // LLVM: %{{.+}} = call double @fmod(double %{{.+}}, double %{{.+}}) + // LLVM: } } long double call_fmodl(long double x, long double y) { @@ -791,6 +1221,10 @@ long double call_fmodl(long double x, long double y) { // CHECK: cir.func @call_fmodl // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_fmodl + // LLVM: %{{.+}} = call x86_fp80 @fmodl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: } } // pow @@ -799,12 +1233,20 @@ float my_powf(float x, float y) { return __builtin_powf(x, y); // CHECK: cir.func @my_powf // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.float + + // LLVM: define float @my_powf + // LLVM: %{{.+}} = call float @powf(float %{{.+}}, float %{{.+}}) + // LLVM: } } double my_pow(double x, double y) { return __builtin_pow(x, y); // CHECK: cir.func @my_pow // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.double + + // LLVM: define double @my_pow + // LLVM: %{{.+}} = call double @pow(double %{{.+}}, double %{{.+}}) + // LLVM: } } long double my_powl(long double x, long double y) { @@ -812,6 +1254,10 @@ long double my_powl(long double x, long double y) { // CHECK: cir.func @my_powl // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @my_powl + // LLVM: %{{.+}} = call x86_fp80 @powl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: } } float powf(float, float); @@ -822,12 +1268,20 @@ float call_powf(float x, float y) { return powf(x, y); // CHECK: cir.func @call_powf // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.float + + // LLVM: define float @call_powf + // LLVM: %{{.+}} = call float @powf(float %{{.+}}, float %{{.+}}) + // LLVM: } } double call_pow(double x, double y) { return pow(x, y); // CHECK: cir.func @call_pow // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.double + + // LLVM: define double @call_pow + // LLVM: %{{.+}} = call double @pow(double %{{.+}}, double %{{.+}}) + // LLVM: } } long double call_powl(long double x, long double y) { @@ -835,4 +1289,8 @@ long double call_powl(long double x, long double y) { // CHECK: cir.func @call_powl // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double + + // LLVM: define x86_fp80 @call_powl + // LLVM: %{{.+}} = call x86_fp80 @powl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: } } From db277b84d78f6612417014c61e594a69dc58a8cc Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Wed, 5 Jun 2024 20:21:25 -0300 Subject: [PATCH 1616/2301] [CIR][ABI] Create target lowering library skeleton (#643) This patch adds a new TargetLowering library that intends to add supoort for lowering CIR code to target specific CIR code. It is largely based on the original codegen library used to lower AST nodes to ABI/Target -specific LLVM IR instructions. Because of this, each file has a comment specifying the original codegen file that inspired the new file. The idea is that anyone who wishes to expand this library can look at the original codegen file to understand how to implement the new feature. In some cases, CIRGen defers the handling of ABI/target-specific details for a later stage in the pipeline. One reason for this is to keep the intermediate representation on a higher-level, which makes it easier to reason about and to perform optimizations. However, we still need to lower such representation to a target-specific format at some point. Some examples are ctor/dtors and calling conventions, which are not fully handled by CIRGen. The new library will be responsible for these lowerings. Some files are empty but will eventually be used and a few getters and methods where added to avoid unused warnings. Missing features in this library are tracked in a dedicated MissingFeature.h header. --- clang/include/clang/CIR/ABIArgInfo.h | 226 ++++++++++++++++++ clang/include/clang/CIR/MissingFeatures.h | 21 ++ clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h | 200 +--------------- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 + .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 5 +- .../Dialect/Transforms/CallConvLowering.cpp | 41 ++++ .../Transforms/TargetLowering/ABIInfo.cpp | 23 ++ .../Transforms/TargetLowering/ABIInfo.h | 40 ++++ .../Transforms/TargetLowering/ABIInfoImpl.cpp | 12 + .../Transforms/TargetLowering/ABIInfoImpl.h | 21 ++ .../Transforms/TargetLowering/CIRCXXABI.cpp | 22 ++ .../Transforms/TargetLowering/CIRCXXABI.h | 72 ++++++ .../TargetLowering/CIRLowerContext.cpp | 62 +++++ .../TargetLowering/CIRLowerContext.h | 74 ++++++ .../TargetLowering/CIRRecordLayout.cpp | 22 ++ .../TargetLowering/CIRRecordLayout.h | 33 +++ .../TargetLowering/CIRToCIRArgMapping.h | 60 +++++ .../Transforms/TargetLowering/CMakeLists.txt | 28 +++ .../TargetLowering/ItaniumCXXABI.cpp | 64 +++++ .../Transforms/TargetLowering/LowerCall.cpp | 0 .../Transforms/TargetLowering/LowerCall.h | 50 ++++ .../TargetLowering/LowerFunction.cpp | 36 +++ .../Transforms/TargetLowering/LowerFunction.h | 58 +++++ .../TargetLowering/LowerFunctionInfo.h | 134 +++++++++++ .../Transforms/TargetLowering/LowerModule.cpp | 91 +++++++ .../Transforms/TargetLowering/LowerModule.h | 77 ++++++ .../Transforms/TargetLowering/LowerTypes.cpp | 24 ++ .../Transforms/TargetLowering/LowerTypes.h | 59 +++++ .../TargetLowering/RecordLayoutBuilder.cpp | 12 + .../Transforms/TargetLowering/TargetInfo.cpp | 12 + .../Transforms/TargetLowering/TargetInfo.h | 36 +++ .../TargetLowering/TargetLoweringInfo.cpp | 12 + .../TargetLowering/TargetLoweringInfo.h | 38 +++ .../Targets}/LoweringPrepareAArch64CXXABI.cpp | 4 +- .../Targets}/LoweringPrepareItaniumCXXABI.cpp | 4 +- .../Transforms/TargetLowering/Targets/X86.cpp | 31 +++ 36 files changed, 1505 insertions(+), 203 deletions(-) create mode 100644 clang/include/clang/CIR/ABIArgInfo.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h rename clang/lib/CIR/Dialect/Transforms/{ => TargetLowering/Targets}/LoweringPrepareAArch64CXXABI.cpp (99%) rename clang/lib/CIR/Dialect/Transforms/{ => TargetLowering/Targets}/LoweringPrepareItaniumCXXABI.cpp (98%) create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp diff --git a/clang/include/clang/CIR/ABIArgInfo.h b/clang/include/clang/CIR/ABIArgInfo.h new file mode 100644 index 000000000000..08317d62297f --- /dev/null +++ b/clang/include/clang/CIR/ABIArgInfo.h @@ -0,0 +1,226 @@ +//==-- ABIArgInfo.h - Abstract info regarding ABI-specific arguments -------==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Defines ABIArgInfo and associated types used by CIR to track information +// regarding ABI-coerced types for function arguments and return values. This +// was moved to the common library as it might be used by both CIRGen and +// passes. +// +//===----------------------------------------------------------------------===// + +#ifndef CIR_COMMON_ABIARGINFO_H +#define CIR_COMMON_ABIARGINFO_H + +#include "mlir/IR/Types.h" +#include "clang/AST/Type.h" +#include + +namespace cir { + +/// Helper class to encapsulate information about how a specific C +/// type should be passed to or returned from a function. +class ABIArgInfo { +public: + enum Kind : uint8_t { + /// Pass the argument directly using the normal converted CIR type, + /// or by coercing to another specified type stored in 'CoerceToType'). If + /// an offset is specified (in UIntData), then the argument passed is offset + /// by some number of bytes in the memory representation. A dummy argument + /// is emitted before the real argument if the specified type stored in + /// "PaddingType" is not zero. + Direct, + + /// Valid only for integer argument types. Same as 'direct' but + /// also emit a zer/sign extension attribute. + Extend, + + /// Pass the argument indirectly via a hidden pointer with the + /// specified alignment (0 indicates default alignment) and address space. + Indirect, + + /// Similar to Indirect, but the pointer may be to an + /// object that is otherwise referenced. The object is known to not be + /// modified through any other references for the duration of the call, and + /// the callee must not itself modify the object. Because C allows parameter + /// variables to be modified and guarantees that they have unique addresses, + /// the callee must defensively copy the object into a local variable if it + /// might be modified or its address might be compared. Since those are + /// uncommon, in principle this convention allows programs to avoid copies + /// in more situations. However, it may introduce *extra* copies if the + /// callee fails to prove that a copy is unnecessary and the caller + /// naturally produces an unaliased object for the argument. + IndirectAliased, + + /// Ignore the argument (treat as void). Useful for void and empty + /// structs. + Ignore, + + /// Only valid for aggregate argument types. The structure should + /// be expanded into consecutive arguments for its constituent fields. + /// Currently expand is only allowed on structures whose fields are all + /// scalar types or are themselves expandable types. + Expand, + + /// Only valid for aggregate argument types. The structure + /// should be expanded into consecutive arguments corresponding to the + /// non-array elements of the type stored in CoerceToType. + /// Array elements in the type are assumed to be padding and skipped. + CoerceAndExpand, + + // TODO: translate this idea to CIR! Define it for now just to ensure that + // we can assert it not being used + InAlloca, + KindFirst = Direct, + KindLast = InAlloca + }; + +private: + mlir::Type TypeData; // canHaveCoerceToType(); + union { + mlir::Type PaddingType; // canHavePaddingType() + mlir::Type UnpaddedCoerceAndExpandType; // isCoerceAndExpand() + }; + struct DirectAttrInfo { + unsigned Offset; + unsigned Align; + }; + struct IndirectAttrInfo { + unsigned Align; + unsigned AddrSpace; + }; + union { + DirectAttrInfo DirectAttr; // isDirect() || isExtend() + IndirectAttrInfo IndirectAttr; // isIndirect() + unsigned AllocaFieldIndex; // isInAlloca() + }; + Kind TheKind; + bool CanBeFlattened : 1; // isDirect() + bool SignExt : 1; // isExtend() + + bool canHavePaddingType() const { + return isDirect() || isExtend() || isIndirect() || isIndirectAliased() || + isExpand(); + } + + void setPaddingType(mlir::Type T) { + assert(canHavePaddingType()); + PaddingType = T; + } + +public: + ABIArgInfo(Kind K = Direct) + : TypeData(nullptr), PaddingType(nullptr), DirectAttr{0, 0}, TheKind(K), + CanBeFlattened(false) {} + + static ABIArgInfo getDirect(mlir::Type T = nullptr, unsigned Offset = 0, + mlir::Type Padding = nullptr, + bool CanBeFlattened = true, unsigned Align = 0) { + auto AI = ABIArgInfo(Direct); + AI.setCoerceToType(T); + AI.setPaddingType(Padding); + AI.setDirectOffset(Offset); + AI.setDirectAlign(Align); + AI.setCanBeFlattened(CanBeFlattened); + return AI; + } + + static ABIArgInfo getSignExtend(clang::QualType Ty, mlir::Type T = nullptr) { + assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); + auto AI = ABIArgInfo(Extend); + AI.setCoerceToType(T); + AI.setPaddingType(nullptr); + AI.setDirectOffset(0); + AI.setDirectAlign(0); + AI.setSignExt(true); + return AI; + } + + static ABIArgInfo getZeroExtend(clang::QualType Ty, mlir::Type T = nullptr) { + assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); + auto AI = ABIArgInfo(Extend); + AI.setCoerceToType(T); + AI.setPaddingType(nullptr); + AI.setDirectOffset(0); + AI.setDirectAlign(0); + AI.setSignExt(false); + return AI; + } + + // ABIArgInfo will record the argument as being extended based on the sign of + // it's type. + static ABIArgInfo getExtend(clang::QualType Ty, mlir::Type T = nullptr) { + assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); + if (Ty->hasSignedIntegerRepresentation()) + return getSignExtend(Ty, T); + return getZeroExtend(Ty, T); + } + + static ABIArgInfo getIgnore() { return ABIArgInfo(Ignore); } + + Kind getKind() const { return TheKind; } + bool isDirect() const { return TheKind == Direct; } + bool isInAlloca() const { return TheKind == InAlloca; } + bool isExtend() const { return TheKind == Extend; } + bool isIndirect() const { return TheKind == Indirect; } + bool isIndirectAliased() const { return TheKind == IndirectAliased; } + bool isExpand() const { return TheKind == Expand; } + bool isCoerceAndExpand() const { return TheKind == CoerceAndExpand; } + + bool canHaveCoerceToType() const { + return isDirect() || isExtend() || isCoerceAndExpand(); + } + + // Direct/Extend accessors + unsigned getDirectOffset() const { + assert((isDirect() || isExtend()) && "Not a direct or extend kind"); + return DirectAttr.Offset; + } + + void setDirectOffset(unsigned Offset) { + assert((isDirect() || isExtend()) && "Not a direct or extend kind"); + DirectAttr.Offset = Offset; + } + + void setDirectAlign(unsigned Align) { + assert((isDirect() || isExtend()) && "Not a direct or extend kind"); + DirectAttr.Align = Align; + } + + void setSignExt(bool SExt) { + assert(isExtend() && "Invalid kind!"); + SignExt = SExt; + } + + void setCanBeFlattened(bool Flatten) { + assert(isDirect() && "Invalid kind!"); + CanBeFlattened = Flatten; + } + + bool getCanBeFlattened() const { + assert(isDirect() && "Invalid kind!"); + return CanBeFlattened; + } + + mlir::Type getPaddingType() const { + return (canHavePaddingType() ? PaddingType : nullptr); + } + + mlir::Type getCoerceToType() const { + assert(canHaveCoerceToType() && "Invalid kind!"); + return TypeData; + } + + void setCoerceToType(mlir::Type T) { + assert(canHaveCoerceToType() && "Invalid kind!"); + TypeData = T; + } +}; + +} // namespace cir + +#endif // CIR_COMMON_ABIARGINFO_H diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 439c57afb6a0..5b271bcc4d37 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -186,6 +186,27 @@ struct MissingFeatures { static bool supportisHomogeneousAggregateQueryForAArch64() { return false; } static bool supportisEndianQueryForAArch64() { return false; } static bool supportisAggregateTypeForABIAArch64() { return false; } + + //===--- ABI lowering --===// + + // Parameters may have additional attributes (e.g. [[noescape]]) that affect + // the compiler. This is not yet supported in CIR. + static bool extParamInfo() { return true; } + + // LangOpts may affect lowering, but we do not carry this information into CIR + // just yet. Right now, it only instantiates the default lang options. + static bool langOpts() { return true; } + + // Several type qualifiers are not yet supported in CIR, but important when + // evaluating ABI-specific lowering. + static bool qualifiedTypes() { return true; } + + // We're ignoring several details regarding ABI-halding for Swift. + static bool swift() { return true; } + + // Despite carrying some information about variadics, we are currently + // ignoring this to focus only on the code necessary to lower non-variadics. + static bool variadicFunctions() { return true; } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h index 36425beb9fb5..c443ea5f8d7a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h @@ -16,6 +16,7 @@ #define LLVM_CLANG_CIR_CIRGENFUNCTIONINFO_H #include "clang/AST/CanonicalType.h" +#include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/ADT/FoldingSet.h" @@ -23,205 +24,6 @@ namespace cir { -/// ABIArgInfo - Helper class to encapsulate information about how a specific C -/// type should be passed to or returned from a function. -class ABIArgInfo { -public: - enum Kind : uint8_t { - /// Direct - Pass the argument directly using the normal converted CIR type, - /// or by coercing to another specified type stored in 'CoerceToType'). If - /// an offset is specified (in UIntData), then the argument passed is offset - /// by some number of bytes in the memory representation. A dummy argument - /// is emitted before the real argument if the specified type stored in - /// "PaddingType" is not zero. - Direct, - - /// Extend - Valid only for integer argument types. Same as 'direct' but - /// also emit a zer/sign extension attribute. - Extend, - - /// Indirect - Pass the argument indirectly via a hidden pointer with the - /// specified alignment (0 indicates default alignment) and address space. - Indirect, - - /// IndirectAliased - Similar to Indirect, but the pointer may be to an - /// object that is otherwise referenced. The object is known to not be - /// modified through any other references for the duration of the call, and - /// the callee must not itself modify the object. Because C allows parameter - /// variables to be modified and guarantees that they have unique addresses, - /// the callee must defensively copy the object into a local variable if it - /// might be modified or its address might be compared. Since those are - /// uncommon, in principle this convention allows programs to avoid copies - /// in more situations. However, it may introduce *extra* copies if the - /// callee fails to prove that a copy is unnecessary and the caller - /// naturally produces an unaliased object for the argument. - IndirectAliased, - - /// Ignore - Ignore the argument (treat as void). Useful for void and empty - /// structs. - Ignore, - - /// Expand - Only valid for aggregate argument types. The structure should - /// be expanded into consecutive arguments for its constituent fields. - /// Currently expand is only allowed on structures whose fields are all - /// scalar types or are themselves expandable types. - Expand, - - /// CoerceAndExpand - Only valid for aggregate argument types. The structure - /// should be expanded into consecutive arguments corresponding to the - /// non-array elements of the type stored in CoerceToType. - /// Array elements in the type are assumed to be padding and skipped. - CoerceAndExpand, - - // TODO: translate this idea to CIR! Define it for now just to ensure that - // we can assert it not being used - InAlloca, - KindFirst = Direct, - KindLast = InAlloca - }; - -private: - mlir::Type TypeData; // canHaveCoerceToType(); - union { - mlir::Type PaddingType; // canHavePaddingType() - mlir::Type UnpaddedCoerceAndExpandType; // isCoerceAndExpand() - }; - struct DirectAttrInfo { - unsigned Offset; - unsigned Align; - }; - struct IndirectAttrInfo { - unsigned Align; - unsigned AddrSpace; - }; - union { - DirectAttrInfo DirectAttr; // isDirect() || isExtend() - IndirectAttrInfo IndirectAttr; // isIndirect() - unsigned AllocaFieldIndex; // isInAlloca() - }; - Kind TheKind; - bool CanBeFlattened : 1; // isDirect() - bool SignExt : 1; // isExtend() - - bool canHavePaddingType() const { - return isDirect() || isExtend() || isIndirect() || isIndirectAliased() || - isExpand(); - } - - void setPaddingType(mlir::Type T) { - assert(canHavePaddingType()); - PaddingType = T; - } - -public: - ABIArgInfo(Kind K = Direct) - : TypeData(nullptr), PaddingType(nullptr), DirectAttr{0, 0}, TheKind(K), - CanBeFlattened(false) {} - - static ABIArgInfo getDirect(mlir::Type T = nullptr, unsigned Offset = 0, - mlir::Type Padding = nullptr, - bool CanBeFlattened = true, unsigned Align = 0) { - auto AI = ABIArgInfo(Direct); - AI.setCoerceToType(T); - AI.setPaddingType(Padding); - AI.setDirectOffset(Offset); - AI.setDirectAlign(Align); - AI.setCanBeFlattened(CanBeFlattened); - return AI; - } - - static ABIArgInfo getSignExtend(clang::QualType Ty, mlir::Type T = nullptr) { - assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); - auto AI = ABIArgInfo(Extend); - AI.setCoerceToType(T); - AI.setPaddingType(nullptr); - AI.setDirectOffset(0); - AI.setDirectAlign(0); - AI.setSignExt(true); - return AI; - } - - static ABIArgInfo getZeroExtend(clang::QualType Ty, mlir::Type T = nullptr) { - assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); - auto AI = ABIArgInfo(Extend); - AI.setCoerceToType(T); - AI.setPaddingType(nullptr); - AI.setDirectOffset(0); - AI.setDirectAlign(0); - AI.setSignExt(false); - return AI; - } - - // ABIArgInfo will record the argument as being extended based on the sign of - // it's type. - static ABIArgInfo getExtend(clang::QualType Ty, mlir::Type T = nullptr) { - assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); - if (Ty->hasSignedIntegerRepresentation()) - return getSignExtend(Ty, T); - return getZeroExtend(Ty, T); - } - - static ABIArgInfo getIgnore() { return ABIArgInfo(Ignore); } - - Kind getKind() const { return TheKind; } - bool isDirect() const { return TheKind == Direct; } - bool isInAlloca() const { return TheKind == InAlloca; } - bool isExtend() const { return TheKind == Extend; } - bool isIndirect() const { return TheKind == Indirect; } - bool isIndirectAliased() const { return TheKind == IndirectAliased; } - bool isExpand() const { return TheKind == Expand; } - bool isCoerceAndExpand() const { return TheKind == CoerceAndExpand; } - - bool canHaveCoerceToType() const { - return isDirect() || isExtend() || isCoerceAndExpand(); - } - - // Direct/Extend accessors - unsigned getDirectOffset() const { - assert((isDirect() || isExtend()) && "Not a direct or extend kind"); - return DirectAttr.Offset; - } - - void setDirectOffset(unsigned Offset) { - assert((isDirect() || isExtend()) && "Not a direct or extend kind"); - DirectAttr.Offset = Offset; - } - - void setDirectAlign(unsigned Align) { - assert((isDirect() || isExtend()) && "Not a direct or extend kind"); - DirectAttr.Align = Align; - } - - void setSignExt(bool SExt) { - assert(isExtend() && "Invalid kind!"); - SignExt = SExt; - } - - void setCanBeFlattened(bool Flatten) { - assert(isDirect() && "Invalid kind!"); - CanBeFlattened = Flatten; - } - - bool getCanBeFlattened() const { - assert(isDirect() && "Invalid kind!"); - return CanBeFlattened; - } - - mlir::Type getPaddingType() const { - return (canHavePaddingType() ? PaddingType : nullptr); - } - - mlir::Type getCoerceToType() const { - assert(canHaveCoerceToType() && "Invalid kind!"); - return TypeData; - } - - void setCoerceToType(mlir::Type T) { - assert(canHaveCoerceToType() && "Invalid kind!"); - TypeData = T; - } -}; - struct CIRGenFunctionInfoArgInfo { clang::CanQualType type; ABIArgInfo info; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c28a34f23a75..9465ba3f3b05 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -177,11 +177,15 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, sob = sob::SignedOverflowBehavior::trapping; break; } + + // FIXME(cir): Implement a custom CIR Module Op and attributes to leverage + // MLIR features. theModule->setAttr("cir.sob", mlir::cir::SignedOverflowBehaviorAttr::get(&context, sob)); auto lang = SourceLanguageAttr::get(&context, getCIRSourceLanguage()); theModule->setAttr( "cir.lang", mlir::cir::LangAttr::get(&context, lang)); + theModule->setAttr("cir.triple", builder.getStringAttr(getTriple().str())); // Set the module name to be the name of the main file. TranslationUnitDecl // often contains invalid source locations and isn't a reliable source for the // module location. diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index bed968da6a81..93d9bb83edfa 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -1,8 +1,8 @@ +add_subdirectory(TargetLowering) + add_clang_library(MLIRCIRTransforms LifetimeCheck.cpp LoweringPrepare.cpp - LoweringPrepareItaniumCXXABI.cpp - LoweringPrepareAArch64CXXABI.cpp MergeCleanups.cpp DropAST.cpp IdiomRecognizer.cpp @@ -19,6 +19,7 @@ add_clang_library(MLIRCIRTransforms LINK_LIBS PUBLIC clangAST clangBasic + TargetLowering MLIRAnalysis MLIRIR diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 58e4d3200705..6130367d91a4 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -6,9 +6,17 @@ // //===----------------------------------------------------------------------===// +// FIXME(cir): This header file is not exposed to the public API, but can be +// reused by CIR ABI lowering since it holds target-specific information. +#include "../../../Basic/Targets.h" + +#include "TargetLowering/LowerModule.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/IR/BuiltinOps.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "clang/Basic/TargetOptions.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #define GEN_PASS_DEF_CALLCONVLOWERING @@ -17,6 +25,36 @@ namespace mlir { namespace cir { +namespace { + +LowerModule createLowerModule(FuncOp op, PatternRewriter &rewriter) { + auto module = op->getParentOfType(); + + // Fetch the LLVM data layout string. + auto dataLayoutStr = + module->getAttr(LLVM::LLVMDialect::getDataLayoutAttrName()) + .cast(); + + // Fetch target information. + llvm::Triple triple( + module->getAttr("cir.triple").cast().getValue()); + clang::TargetOptions targetOptions; + targetOptions.Triple = triple.str(); + auto targetInfo = clang::targets::AllocateTarget(triple, targetOptions); + + // FIXME(cir): This just uses the default language options. We need to account + // for custom options. + // Create context. + assert(::cir::MissingFeatures::langOpts()); + clang::LangOptions langOpts; + auto context = CIRLowerContext(module.getContext(), langOpts); + context.initBuiltinTypes(*targetInfo); + + return LowerModule(context, module, dataLayoutStr, *targetInfo, rewriter); +} + +} // namespace + //===----------------------------------------------------------------------===// // Rewrite Patterns //===----------------------------------------------------------------------===// @@ -28,6 +66,9 @@ struct CallConvLoweringPattern : public OpRewritePattern { PatternRewriter &rewriter) const final { if (!op.getAst()) return op.emitError("function has no AST information"); + + LowerModule lowerModule = createLowerModule(op, rewriter); + return success(); } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp new file mode 100644 index 000000000000..46a865da0670 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -0,0 +1,23 @@ +//===- ABIInfo.cpp --------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/ABIInfo.cpp. The queries are +// adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#include "ABIInfo.h" + +namespace mlir { +namespace cir { + +// Pin the vtable to this file. +ABIInfo::~ABIInfo() = default; + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h new file mode 100644 index 000000000000..3fad01f3d7a8 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h @@ -0,0 +1,40 @@ +//===----- ABIInfo.h - CIR's ABI information --------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics the CodeGen/ABIInfo.h class. The main difference +// is that this is adapted to operate on the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFO_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFO_H + +#include "llvm/IR/CallingConv.h" + +namespace mlir { +namespace cir { + +// Forward declarations. +class LowerTypes; + +/// Target specific hooks for defining how a type should be passed or returned +/// from functions. +class ABIInfo { +protected: + LowerTypes < + llvm::CallingConv::ID RuntimeCC; + +public: + ABIInfo(LowerTypes <) : LT(LT), RuntimeCC(llvm::CallingConv::C) {} + virtual ~ABIInfo(); +}; + +} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFO_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp new file mode 100644 index 000000000000..c51176a99b95 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -0,0 +1,12 @@ +//===--- ABIInfoImpl.cpp - Encapsulate calling convention details ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/ABIInfoImpl.cpp. The queries are +// adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h new file mode 100644 index 000000000000..f34d7fb07226 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h @@ -0,0 +1,21 @@ +//===- ABIInfoImpl.h --------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/ABIInfoImpl.h. The queries are +// adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H + +namespace mlir { +namespace cir {} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.cpp new file mode 100644 index 000000000000..8c483469f1ce --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.cpp @@ -0,0 +1,22 @@ +//===- CIRCXXABI.cpp ------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/CGCXXABI.cpp. The queries are +// adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#include "CIRCXXABI.h" + +namespace mlir { +namespace cir { + +CIRCXXABI::~CIRCXXABI() {} + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h new file mode 100644 index 000000000000..bf5131a074b8 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -0,0 +1,72 @@ +//===----- CIRCXXABI.h - Interface to C++ ABIs for CIR Dialect --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics the CodeGen/CGCXXABI.h class. The main difference +// is that this is adapted to operate on the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRCXXABI_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRCXXABI_H + +#include "mlir/IR/Value.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" + +namespace mlir { +namespace cir { + +// Forward declarations. +class LowerModule; + +class CIRCXXABI { + friend class LowerModule; + +protected: + LowerModule &LM; + + CIRCXXABI(LowerModule &LM) : LM(LM) {} + +public: + virtual ~CIRCXXABI(); +}; + +/// Creates an Itanium-family ABI. +CIRCXXABI *CreateItaniumCXXABI(LowerModule &CGM); + +} // namespace cir +} // namespace mlir + +// FIXME(cir): Merge this into the CIRCXXABI class above. To do so, this code +// should be updated to follow some level of codegen parity. +namespace cir { + +enum class AArch64ABIKind { + AAPCS = 0, + DarwinPCS, + Win64, + AAPCSSoft, +}; + +class LoweringPrepareCXXABI { +public: + static LoweringPrepareCXXABI *createItaniumABI(); + static LoweringPrepareCXXABI *createAArch64ABI(AArch64ABIKind k); + + virtual mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, + mlir::cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) = 0; + virtual ~LoweringPrepareCXXABI() {} + + virtual mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, + clang::ASTContext &astCtx, + mlir::cir::DynamicCastOp op) = 0; +}; +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRCXXABI_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp new file mode 100644 index 000000000000..b75893bfb33f --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -0,0 +1,62 @@ +//===- CIRLowerContext.cpp ------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/AST/ASTContext.cpp. The queries are +// adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#include "CIRLowerContext.h" +#include "mlir/IR/MLIRContext.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" +#include + +namespace mlir { +namespace cir { + +CIRLowerContext::CIRLowerContext(MLIRContext *MLIRCtx, + clang::LangOptions &LOpts) + : MLIRCtx(MLIRCtx), LangOpts(LOpts) {} + +CIRLowerContext::~CIRLowerContext() {} + +Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { + Type Ty; + + // NOTE(cir): Clang does more stuff here. Not sure if we need to do the same. + assert(::cir::MissingFeatures::qualifiedTypes()); + switch (K) { + case clang::BuiltinType::Char_S: + Ty = IntType::get(getMLIRContext(), 8, true); + break; + default: + llvm_unreachable("NYI"); + } + + Types.push_back(Ty); + return Ty; +} + +void CIRLowerContext::initBuiltinTypes(const clang::TargetInfo &Target, + const clang::TargetInfo *AuxTarget) { + assert((!this->Target || this->Target == &Target) && + "Incorrect target reinitialization"); + this->Target = &Target; + this->AuxTarget = AuxTarget; + + // C99 6.2.5p3. + if (LangOpts.CharIsSigned) + CharTy = initBuiltinType(clang::BuiltinType::Char_S); + else + llvm_unreachable("NYI"); +} + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h new file mode 100644 index 000000000000..3745d146a5e7 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h @@ -0,0 +1,74 @@ +//===- CIRLowerContext.h - Context to lower CIR -----------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Partially mimics AST/ASTContext.h. The main difference is that this is +// adapted to operate on the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRLowerContext_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRLowerContext_H + +#include "mlir/IR/MLIRContext.h" +#include "mlir/IR/Types.h" +#include "clang/AST/Type.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/IntrusiveRefCntPtr.h" + +namespace mlir { +namespace cir { + +// FIXME(cir): Most of this is type-related information that should already be +// embedded into CIR. Maybe we can move this to an MLIR interface. +class CIRLowerContext : public llvm::RefCountedBase { + +private: + mutable SmallVector Types; + + const clang::TargetInfo *Target = nullptr; + const clang::TargetInfo *AuxTarget = nullptr; + + /// MLIR context to be used when creating types. + MLIRContext *MLIRCtx; + + /// The language options used to create the AST associated with + /// this ASTContext object. + clang::LangOptions &LangOpts; + + //===--------------------------------------------------------------------===// + // Built-in Types + //===--------------------------------------------------------------------===// + + Type CharTy; + +public: + CIRLowerContext(MLIRContext *MLIRCtx, clang::LangOptions &LOpts); + CIRLowerContext(const CIRLowerContext &) = delete; + CIRLowerContext &operator=(const CIRLowerContext &) = delete; + ~CIRLowerContext(); + + /// Initialize built-in types. + /// + /// This routine may only be invoked once for a given ASTContext object. + /// It is normally invoked after ASTContext construction. + /// + /// \param Target The target + void initBuiltinTypes(const clang::TargetInfo &Target, + const clang::TargetInfo *AuxTarget = nullptr); + +private: + Type initBuiltinType(clang::BuiltinType::Kind K); + +public: + MLIRContext *getMLIRContext() const { return MLIRCtx; } +}; + +} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRLowerContext_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp new file mode 100644 index 000000000000..370ada5411a0 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp @@ -0,0 +1,22 @@ +//===- CIRRecordLayout.cpp ------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/AST/RecordLayout.cpp. The queries are +// adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#include "CIRRecordLayout.h" + +namespace mlir { +namespace cir { + +CIRRecordLayout::CIRRecordLayout() {} + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h new file mode 100644 index 000000000000..4ba672da9b43 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h @@ -0,0 +1,33 @@ +//===--- CGRecordLayout.h - LLVM Record Layout Information ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/CGRecordLayout.h. The queries +// are adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRRECORDLAYOUT_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRRECORDLAYOUT_H + +namespace mlir { +namespace cir { + +class CIRLowerContext; + +/// This class contains layout information for one RecordDecl, which is a +/// struct/union/class. The decl represented must be a definition, not a +/// forward declaration. This class is also used to contain layout information +/// for one ObjCInterfaceDecl. +class CIRRecordLayout { + CIRRecordLayout(); +}; + +} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRRECORDLAYOUT_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h new file mode 100644 index 000000000000..9c1dae1f3dbf --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -0,0 +1,60 @@ +//===--- CIRToCIRArgMapping.cpp - Maps to ABI-specific arguments ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics the ClangToLLVMArgMapping class in +// clang/lib/CodeGen/CGCall.cpp. The queries are adapted to operate on the CIR +// dialect, however. This class was extracted into a separate file to resolve +// build issues. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRTOCIRARGMAPPING_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRTOCIRARGMAPPING_H + +#include "CIRLowerContext.h" +#include "LoweringFunctionInfo.h" +#include "llvm/ADT/SmallVector.h" + +namespace mlir { +namespace cir { + +/// Encapsulates information about the way function arguments from +/// LoweringFunctionInfo should be passed to actual CIR function. +class CIRToCIRArgMapping { + static const unsigned InvalidIndex = ~0U; + unsigned TotalIRArgs; + + /// Arguments of CIR function corresponding to single CIR argument. + /// NOTE(cir): We add an MLIR block argument here indicating the actual + /// argument in the IR. + struct IRArgs { + unsigned PaddingArgIndex; + // Argument is expanded to IR arguments at positions + // [FirstArgIndex, FirstArgIndex + NumberOfArgs). + unsigned FirstArgIndex; + unsigned NumberOfArgs; + + IRArgs() + : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), + NumberOfArgs(0) {} + }; + + llvm::SmallVector ArgInfo; + +public: + CIRToCIRArgMapping(const CIRLowerContext &context, + const LowerFunctionInfo &FI, bool onlyRequiredArgs = false) + : ArgInfo(onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {}; + + unsigned totalIRArgs() const { return TotalIRArgs; } +}; + +} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRTOCIRARGMAPPING_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt new file mode 100644 index 000000000000..8e0afdc4367b --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt @@ -0,0 +1,28 @@ +add_clang_library(TargetLowering + ABIInfo.cpp + ABIInfoImpl.cpp + CIRCXXABI.cpp + CIRLowerContext.cpp + CIRRecordLayout.cpp + ItaniumCXXABI.cpp + LowerCall.cpp + LowerFunction.cpp + LowerModule.cpp + LowerTypes.cpp + RecordLayoutBuilder.cpp + TargetInfo.cpp + TargetLoweringInfo.cpp + Targets/X86.cpp + Targets/LoweringPrepareAArch64CXXABI.cpp + Targets/LoweringPrepareItaniumCXXABI.cpp + + DEPENDS + + LINK_LIBS PUBLIC + + MLIRIR + MLIRPass + + MLIRCIR + MLIRCIRInterfaces +) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp new file mode 100644 index 000000000000..aee2620496e6 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -0,0 +1,64 @@ +//===------- ItaniumCXXABI.cpp - Emit CIR code Itanium-specific code -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides CIR lowering logic targeting the Itanium C++ ABI. The class in +// this file generates structures that follow the Itanium C++ ABI, which is +// documented at: +// https://itanium-cxx-abi.github.io/cxx-abi/abi.html +// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html +// +// It also supports the closely-related ARM ABI, documented at: +// https://developer.arm.com/documentation/ihi0041/g/ +// +// This file partially mimics clang/lib/CodeGen/ItaniumCXXABI.cpp. The queries +// are adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#include "CIRCXXABI.h" +#include "LowerModule.h" + +namespace mlir { +namespace cir { + +namespace { + +class ItaniumCXXABI : public CIRCXXABI { + +public: + ItaniumCXXABI(LowerModule &LM) : CIRCXXABI(LM) {} +}; + +} // namespace + +CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { + switch (LM.getCXXABIKind()) { + case clang::TargetCXXABI::GenericItanium: + return new ItaniumCXXABI(LM); + + case clang::TargetCXXABI::Microsoft: + llvm_unreachable("Microsoft ABI is not Itanium-based"); + default: + llvm_unreachable("NYI"); + } + + llvm_unreachable("bad ABI kind"); +} + +} // namespace cir +} // namespace mlir + +// FIXME(cir): Merge this into the CIRCXXABI class above. +class LoweringPrepareItaniumCXXABI : public cir::LoweringPrepareCXXABI { +public: + mlir::Value lowerDynamicCast(cir::CIRBaseBuilderTy &builder, + clang::ASTContext &astCtx, + mlir::cir::DynamicCastOp op) override; + mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) override; +}; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h new file mode 100644 index 000000000000..ac54490c578f --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h @@ -0,0 +1,50 @@ +//===----- LowerCall.h - Encapsulate calling convention details -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/CGCall.h. The queries are +// adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERCALL_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERCALL_H + +#include "mlir/IR/Value.h" + +namespace mlir { +namespace cir { + +/// Contains the address where the return value of a function can be stored, and +/// whether the address is volatile or not. +class ReturnValueSlot { + Value Addr = {}; + + // Return value slot flags + unsigned IsVolatile : 1; + unsigned IsUnused : 1; + unsigned IsExternallyDestructed : 1; + +public: + ReturnValueSlot() + : IsVolatile(false), IsUnused(false), IsExternallyDestructed(false) {} + ReturnValueSlot(Value Addr, bool IsVolatile, bool IsUnused = false, + bool IsExternallyDestructed = false) + : Addr(Addr), IsVolatile(IsVolatile), IsUnused(IsUnused), + IsExternallyDestructed(IsExternallyDestructed) {} + + bool isNull() const { return !Addr; } + bool isVolatile() const { return IsVolatile; } + Value getValue() const { return Addr; } + bool isUnused() const { return IsUnused; } + bool isExternallyDestructed() const { return IsExternallyDestructed; } +}; + +} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERCALL_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp new file mode 100644 index 000000000000..6215b6149786 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -0,0 +1,36 @@ +//===--- LowerFunction.cpp - Lower CIR Function Code ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/CodeGenFunction.cpp. The queries +// are adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#include "LowerFunction.h" +#include "LowerCall.h" +#include "LowerModule.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Support/LogicalResult.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +namespace mlir { +namespace cir { + +// FIXME(cir): Pass SrcFn and NewFn around instead of having then as attributes. +LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, + FuncOp srcFn, FuncOp newFn) + : Target(LM.getTarget()), rewriter(rewriter), SrcFn(srcFn), NewFn(newFn), + LM(LM) {} + +LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, + FuncOp srcFn, CallOp callOp) + : Target(LM.getTarget()), rewriter(rewriter), SrcFn(srcFn), callOp(callOp), + LM(LM) {} + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h new file mode 100644 index 000000000000..319751790915 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h @@ -0,0 +1,58 @@ +//===-- LowerFunction.h - Per-Function state for CIR lowering ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class partially mimics clang/lib/CodeGen/CGFunctionInfo.h. The queries +// are adapted to operate on the CIR dialect, however. And we only copy code +// related to ABI-specific codegen. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERFUNCTION_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERFUNCTION_H + +#include "CIRCXXABI.h" +#include "LowerCall.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Support/LogicalResult.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +namespace mlir { +namespace cir { + +class LowerFunction { + LowerFunction(const LowerFunction &) = delete; + void operator=(const LowerFunction &) = delete; + + friend class CIRCXXABI; + + const clang::TargetInfo &Target; + + PatternRewriter &rewriter; + FuncOp SrcFn; // Original ABI-agnostic function. + FuncOp NewFn; // New ABI-aware function. + CallOp callOp; // Call operation to be lowered. + +public: + /// Builder for lowering calling convention of a function definition. + LowerFunction(LowerModule &LM, PatternRewriter &rewriter, FuncOp srcFn, + FuncOp newFn); + + /// Builder for lowering calling convention of a call operation. + LowerFunction(LowerModule &LM, PatternRewriter &rewriter, FuncOp srcFn, + CallOp callOp); + + ~LowerFunction() = default; + + LowerModule &LM; // Per-module state. +}; + +} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERFUNCTION_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h new file mode 100644 index 000000000000..4344745f2478 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -0,0 +1,134 @@ +//==-- LowerFunctionInfo.h - Represents of function argument/return types --==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/inlcude/CodeGen/LowerFunctionInfo.h. The +// queries are adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERFUNCTIONINFO_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERFUNCTIONINFO_H + +#include "mlir/IR/Types.h" +#include "clang/CIR/ABIArgInfo.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/TrailingObjects.h" + +namespace mlir { +namespace cir { + +/// A class for recording the number of arguments that a function +/// signature requires. +class RequiredArgs { + /// The number of required arguments, or ~0 if the signature does + /// not permit optional arguments. + unsigned NumRequired; + +public: + enum All_t { All }; + + RequiredArgs(All_t _) : NumRequired(~0U) {} + explicit RequiredArgs(unsigned n) : NumRequired(n) { assert(n != ~0U); } + + bool allowsOptionalArgs() const { return NumRequired != ~0U; } +}; + +// Implementation detail of LowerFunctionInfo, factored out so it can be +// named in the TrailingObjects base class of CGFunctionInfo. +struct LowerFunctionInfoArgInfo { + mlir::Type type; // Original ABI-agnostic type. + ::cir::ABIArgInfo info; // ABI-specific information. +}; + +class LowerFunctionInfo final + : private llvm::TrailingObjects { + typedef LowerFunctionInfoArgInfo ArgInfo; + + /// The LLVM::CallingConv to use for this function (as specified by the + /// user). + unsigned CallingConvention : 8; + + /// The LLVM::CallingConv to actually use for this function, which may + /// depend on the ABI. + unsigned EffectiveCallingConvention : 8; + + /// Whether this is an instance method. + unsigned InstanceMethod : 1; + + /// Whether this is a chain call. + unsigned ChainCall : 1; + + /// Whether this function is called by forwarding arguments. + /// This doesn't support inalloca or varargs. + unsigned DelegateCall : 1; + + RequiredArgs Required; + + /// The struct representing all arguments passed in memory. Only used when + /// passing non-trivial types with inalloca. Not part of the profile. + StructType ArgStruct; + + unsigned NumArgs; + + const ArgInfo *getArgsBuffer() const { return getTrailingObjects(); } + ArgInfo *getArgsBuffer() { return getTrailingObjects(); } + + LowerFunctionInfo() : Required(RequiredArgs::All) {} + +public: + static LowerFunctionInfo *create(unsigned llvmCC, bool instanceMethod, + bool chainCall, bool delegateCall, + Type resultType, + ArrayRef argTypes, + RequiredArgs required) { + // TODO(cir): Add assertions? + assert(::cir::MissingFeatures::extParamInfo()); + void *buffer = operator new(totalSizeToAlloc(argTypes.size() + 1)); + + LowerFunctionInfo *FI = new (buffer) LowerFunctionInfo(); + FI->CallingConvention = llvmCC; + FI->EffectiveCallingConvention = llvmCC; + FI->InstanceMethod = instanceMethod; + FI->ChainCall = chainCall; + FI->DelegateCall = delegateCall; + FI->Required = required; + FI->ArgStruct = nullptr; + FI->NumArgs = argTypes.size(); + FI->getArgsBuffer()[0].type = resultType; + for (unsigned i = 0, e = argTypes.size(); i != e; ++i) + FI->getArgsBuffer()[i + 1].type = argTypes[i]; + + return FI; + }; + + // Friending class TrailingObjects is apparently not good enough for MSVC, + // so these have to be public. + friend class TrailingObjects; + size_t numTrailingObjects(OverloadToken) const { + return NumArgs + 1; + } + + unsigned arg_size() const { return NumArgs; } + + bool isVariadic() const { + assert(::cir::MissingFeatures::variadicFunctions()); + return false; + } + unsigned getNumRequiredArgs() const { + if (isVariadic()) + llvm_unreachable("NYI"); + return arg_size(); + } +}; + +} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERFUNCTIONINFO_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp new file mode 100644 index 000000000000..44ef32a5ddfa --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -0,0 +1,91 @@ +//===--- LowerModule.cpp - Lower CIR Module to a Target -------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/CodeGenModule.cpp. The queries +// are adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#include "LowerModule.h" +#include "CIRLowerContext.h" +#include "TargetInfo.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Support/LogicalResult.h" +#include "llvm/Support/ErrorHandling.h" + +namespace mlir { +namespace cir { + +static CIRCXXABI *createCXXABI(LowerModule &CGM) { + switch (CGM.getCXXABIKind()) { + case clang::TargetCXXABI::AppleARM64: + case clang::TargetCXXABI::Fuchsia: + case clang::TargetCXXABI::GenericAArch64: + case clang::TargetCXXABI::GenericARM: + case clang::TargetCXXABI::iOS: + case clang::TargetCXXABI::WatchOS: + case clang::TargetCXXABI::GenericMIPS: + case clang::TargetCXXABI::GenericItanium: + case clang::TargetCXXABI::WebAssembly: + case clang::TargetCXXABI::XL: + return CreateItaniumCXXABI(CGM); + case clang::TargetCXXABI::Microsoft: + llvm_unreachable("Windows ABI NYI"); + } + + llvm_unreachable("invalid C++ ABI kind"); +} + +static std::unique_ptr +createTargetLoweringInfo(LowerModule &LM) { + const clang::TargetInfo &Target = LM.getTarget(); + const llvm::Triple &Triple = Target.getTriple(); + + switch (Triple.getArch()) { + case llvm::Triple::x86_64: { + switch (Triple.getOS()) { + case llvm::Triple::Win32: + llvm_unreachable("Windows ABI NYI"); + default: + return createX86_64TargetLoweringInfo(LM, X86AVXABILevel::None); + } + } + default: + llvm_unreachable("ABI NYI"); + } +} + +LowerModule::LowerModule(CIRLowerContext &C, ModuleOp &module, StringAttr DL, + const clang::TargetInfo &target, + PatternRewriter &rewriter) + : context(C), module(module), Target(target), ABI(createCXXABI(*this)), + types(*this, DL.getValue()), rewriter(rewriter) {} + +const TargetLoweringInfo &LowerModule::getTargetLoweringInfo() { + if (!TheTargetCodeGenInfo) + TheTargetCodeGenInfo = createTargetLoweringInfo(*this); + return *TheTargetCodeGenInfo; +} + +LogicalResult LowerModule::rewriteGlobalFunctionDefinition(FuncOp op, + LowerModule &state) { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + return failure(); +} + +LogicalResult LowerModule::rewriteFunctionCall(CallOp caller, FuncOp callee) { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(caller); + return failure(); +} + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h new file mode 100644 index 000000000000..d99d40f90554 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -0,0 +1,77 @@ +//===--- LowerModule.h - Abstracts CIR's module lowering --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/CodeGenModule.h. The queries are +// adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERMODULE_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERMODULE_H + +#include "CIRLowerContext.h" +#include "LowerTypes.h" +#include "TargetLoweringInfo.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/MissingFeatures.h" + +namespace mlir { +namespace cir { + +class LowerModule { + CIRLowerContext &context; + ModuleOp module; + const clang::TargetInfo &Target; + mutable std::unique_ptr TheTargetCodeGenInfo; + std::unique_ptr ABI; + + LowerTypes types; + + PatternRewriter &rewriter; + +public: + LowerModule(CIRLowerContext &C, ModuleOp &module, StringAttr DL, + const clang::TargetInfo &target, PatternRewriter &rewriter); + ~LowerModule() = default; + + // Trivial getters. + LowerTypes &getTypes() { return types; } + CIRLowerContext &getContext() { return context; } + CIRCXXABI &getCXXABI() const { return *ABI; } + const clang::TargetInfo &getTarget() const { return Target; } + MLIRContext *getMLIRContext() { return module.getContext(); } + ModuleOp &getModule() { return module; } + + const TargetLoweringInfo &getTargetLoweringInfo(); + + // FIXME(cir): This would be in ASTContext, not CodeGenModule. + const clang::TargetInfo &getTargetInfo() const { return Target; } + + // FIXME(cir): This would be in ASTContext, not CodeGenModule. + clang::TargetCXXABI::Kind getCXXABIKind() const { + auto kind = getTarget().getCXXABI().getKind(); + assert(::cir::MissingFeatures::langOpts()); + return kind; + } + + // Rewrite CIR FuncOp to match the target ABI. + LogicalResult rewriteGlobalFunctionDefinition(FuncOp op, LowerModule &state); + + // Rewrite CIR CallOp to match the target ABI. + LogicalResult rewriteFunctionCall(CallOp caller, FuncOp callee); +}; + +} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERMODULE_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp new file mode 100644 index 000000000000..1186da9df1e7 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -0,0 +1,24 @@ +//===--- LowerTypes.cpp - Type translation to target-specific types -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/CodeGenTypes.cpp. The queries +// are adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#include "LowerTypes.h" +#include "LowerModule.h" +#include "mlir/Support/LLVM.h" + +using namespace ::mlir::cir; + +LowerTypes::LowerTypes(LowerModule &LM, StringRef DLString) + : LM(LM), queries(LM.getContext()), Target(LM.getTarget()), + CXXABI(LM.getCXXABI()), + TheABIInfo(LM.getTargetLoweringInfo().getABIInfo()), + mlirContext(LM.getMLIRContext()), DL(LM.getModule()) {} diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h new file mode 100644 index 000000000000..395665d47f16 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h @@ -0,0 +1,59 @@ +//===--- LowerTypes.cpp - Type lowering for CIR dialect -------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/CodeGenTypes.cpp. The queries +// are adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERTYPES_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERTYPES_H + +#include "ABIInfo.h" +#include "CIRCXXABI.h" +#include "CIRLowerContext.h" +#include "mlir/IR/MLIRContext.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" + +namespace mlir { +namespace cir { + +// Forward declarations. +class LowerModule; + +/// This class organizes lowering to ABI-specific types in CIR. +class LowerTypes { + // FIXME(cir): This abstraction could likely be replaced by a MLIR interface + // or direct queries to CIR types. It here mostly for code parity. + +private: + LowerModule &LM; + CIRLowerContext &queries; + const clang::TargetInfo &Target; + CIRCXXABI &CXXABI; + + // This should not be moved earlier, since its initialization depends on some + // of the previous reference members being already initialized + const ABIInfo &TheABIInfo; + + // Used to build types and other MLIR operations. + MLIRContext *mlirContext; + + ::cir::CIRDataLayout DL; + +public: + LowerTypes(LowerModule &LM, StringRef DLString); + ~LowerTypes() = default; + + LowerModule &getLM() const { return LM; } +}; + +} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERTYPES_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp new file mode 100644 index 000000000000..8f606940702f --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -0,0 +1,12 @@ +//=== RecordLayoutBuilder.cpp - Helper class for building record layouts ---==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/AST/CGRecordLayoutBuilder.cpp. The +// queries are adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.cpp new file mode 100644 index 000000000000..2502f8f0dfcb --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.cpp @@ -0,0 +1,12 @@ +#include "TargetLoweringInfo.h" + +namespace mlir { +namespace cir { + +TargetLoweringInfo::TargetLoweringInfo(std::unique_ptr Info) + : Info(std::move(Info)) {} + +TargetLoweringInfo::~TargetLoweringInfo() = default; + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h new file mode 100644 index 000000000000..d01b222411e4 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h @@ -0,0 +1,36 @@ +//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics clang/lib/CodeGen/TargetInfo.h. The queries are +// adapted to operate on the CIR dialect, however. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_TARGETINFO_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_TARGETINFO_H + +#include "LowerModule.h" +#include "TargetLoweringInfo.h" + +namespace mlir { +namespace cir { + +/// The AVX ABI level for X86 targets. +enum class X86AVXABILevel { + None, + AVX, + AVX512, +}; + +std::unique_ptr +createX86_64TargetLoweringInfo(LowerModule &CGM, X86AVXABILevel AVXLevel); + +} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_TARGETINFO_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.cpp new file mode 100644 index 000000000000..ee670e1add4f --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.cpp @@ -0,0 +1,12 @@ +//===---- TargetLoweringInfo.cpp - Encapsulate target details ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics the TargetCodeGenInfo class from the file +// clang/lib/CodeGen/TargetInfo.cpp. +// +//===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h new file mode 100644 index 000000000000..b264e9ae7b89 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h @@ -0,0 +1,38 @@ +//===---- TargetLoweringInfo.h - Encapsulate target details -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file partially mimics the TargetCodeGenInfo class from the file +// clang/lib/CodeGen/TargetInfo.h. This particular class was isolated in this +// file due to build errors when trying to include the entire TargetInfo.h file. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_TARGETLOWERINGINFO_H +#define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_TARGETLOWERINGINFO_H + +#include "ABIInfo.h" +#include + +namespace mlir { +namespace cir { + +class TargetLoweringInfo { +private: + std::unique_ptr Info; + +public: + TargetLoweringInfo(std::unique_ptr Info); + virtual ~TargetLoweringInfo(); + + const ABIInfo &getABIInfo() const { return *Info; } +}; + +} // namespace cir +} // namespace mlir + +#endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_TARGETLOWERINGINFO_H diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp similarity index 99% rename from clang/lib/CIR/Dialect/Transforms/LoweringPrepareAArch64CXXABI.cpp rename to clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp index b5a1a6fa5a77..a561bc7f2ba5 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp @@ -12,7 +12,9 @@ // //===------------------------------------------------------------------===// -#include "LoweringPrepareItaniumCXXABI.h" +// TODO(cir): Refactor this to follow some level of codegen parity. + +#include "../LoweringPrepareItaniumCXXABI.h" #include "clang/AST/CharUnits.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp similarity index 98% rename from clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp rename to clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index b33bcfa94989..83235f50ffee 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -12,7 +12,9 @@ // //===--------------------------------------------------------------------===// -#include "LoweringPrepareItaniumCXXABI.h" +// TODO(cir): Refactor this to follow some level of codegen parity. + +#include "../LoweringPrepareItaniumCXXABI.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp new file mode 100644 index 000000000000..6d2a329e6d2a --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -0,0 +1,31 @@ +#include "ABIInfo.h" +#include "LowerModule.h" +#include "LowerTypes.h" +#include "TargetInfo.h" +#include "clang/CIR/MissingFeatures.h" +#include + +namespace mlir { +namespace cir { + +class X86_64ABIInfo : public ABIInfo { + +public: + X86_64ABIInfo(LowerTypes &CGT, X86AVXABILevel AVXLevel) : ABIInfo(CGT) {} +}; + +class X86_64TargetLoweringInfo : public TargetLoweringInfo { +public: + X86_64TargetLoweringInfo(LowerTypes &LM, X86AVXABILevel AVXLevel) + : TargetLoweringInfo(std::make_unique(LM, AVXLevel)) { + assert(::cir::MissingFeatures::swift()); + } +}; + +std::unique_ptr +createX86_64TargetLoweringInfo(LowerModule &LM, X86AVXABILevel AVXLevel) { + return std::make_unique(LM.getTypes(), AVXLevel); +} + +} // namespace cir +} // namespace mlir From d7706dc37341e9e9840ed3e410b086e2065ed1b8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 6 Jun 2024 12:21:45 -0700 Subject: [PATCH 1617/2301] [CIR][CIRGen] Handle empty structs on aggregates --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 5 ++++- clang/test/CIR/CodeGen/struct-empty.c | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/struct-empty.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index a06bbc7e3453..2f3c2b3c384f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -810,7 +810,10 @@ void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { if (Dest.isPotentiallyAliased() && E->getType().isPODType(CGF.getContext())) { - llvm_unreachable("NYI"); + // For a POD type, just emit a load of the lvalue + a copy, because our + // compound literal might alias the destination. + buildAggLoadOfLValue(E); + return; } AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); diff --git a/clang/test/CIR/CodeGen/struct-empty.c b/clang/test/CIR/CodeGen/struct-empty.c new file mode 100644 index 000000000000..07f04e75d767 --- /dev/null +++ b/clang/test/CIR/CodeGen/struct-empty.c @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// CIR: ![[lock:.*]] = !cir.struct +// CIR: ![[fs_struct:.*]] = !cir.structlock = (rwlock_t) { }; } + +// CIR-LABEL: __copy_fs_struct +// CIR: %[[VAL_1:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["fs", init] {alignment = 8 : i64} +// CIR: %[[VAL_2:.*]] = cir.alloca ![[lock]], !cir.ptr, [".compoundliteral"] {alignment = 1 : i64} +// CIR: cir.store {{.*}}, %[[VAL_1]] : !cir.ptr, !cir.ptr> +// CIR: %[[VAL_3:.*]] = cir.load %[[VAL_1]] : !cir.ptr>, !cir.ptr +// CIR: %[[VAL_4:.*]] = cir.get_member %[[VAL_3]][0] {name = "lock"} : !cir.ptr -> !cir.ptr +// CIR: cir.copy %[[VAL_2]] to %[[VAL_4]] : !cir.ptr + +// LLVM-LABEL: __copy_fs_struct +// LLVM: %[[VAL_5:.*]] = getelementptr {{.*}}, {{.*}}, i32 0, i32 0 +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[VAL_5]], ptr {{.*}}, i32 0, i1 false) From 3f1c511b005b9517df50be6c4a11b9ff92c979ad Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 6 Jun 2024 12:29:20 -0700 Subject: [PATCH 1618/2301] [CIR][NFC] Fix multiple warnings from latest PRs --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 3 --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 3 --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 1 + clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 4 +--- 5 files changed, 3 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index a8edf2524335..0c1196a4cb8d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -227,14 +227,11 @@ makeBinaryAtomicValue(CIRGenFunction &cgf, mlir::cir::AtomicFetchKind kind, Address destAddr = checkAtomicAlignment(cgf, expr); auto &builder = cgf.getBuilder(); - auto *ctxt = builder.getContext(); auto intType = builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); mlir::Value val = cgf.buildScalarExpr(expr->getArg(1)); mlir::Type valueType = val.getType(); val = buildToInt(cgf, val, typ, intType); - auto fetchAttr = - mlir::cir::AtomicFetchKindAttr::get(builder.getContext(), kind); auto rmwi = builder.create( cgf.getLoc(expr->getSourceRange()), destAddr.emitRawPointer(), val, kind, ordering, false, /* is volatile */ diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 1f678b47f57f..335d289cbcf5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -631,9 +631,6 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, if (D.getType()->isVariablyModifiedType()) llvm_unreachable("VLAs are NYI"); - // Save the type in case adding the initializer forces a type change. - mlir::Type expectedType = addr.getType(); - auto var = globalOp; // CUDA's local and local static __shared__ variables should not diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 93ab3ee06dea..007dd769ed2f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -655,7 +655,7 @@ CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, for (int i = 1; i < caseAttrCount; ++i) { // If there are multiple case attributes, we need to create a new region auto *region = currLexScope->createSwitchRegion(); - auto *block = builder.createBlock(region); + builder.createBlock(region); } return lastCase; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 0916c7233e33..af6edc6704da 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1150,6 +1150,7 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, case cir::CaseOpKind::Range: assert(attr.getValue().size() == 2 && "range must have two values"); // The print format of the range is the same as anyof + LLVM_FALLTHROUGH; case cir::CaseOpKind::Anyof: { p << ", ["; llvm::interleaveComma(attr.getValue(), p, [&](const Attribute &a) { diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 470ff1dbff3f..285f1a9bfe30 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -356,8 +356,6 @@ class CIRSwitchOpFlattening caseDestinations.push_back(®ion.front()); } break; - default: - llvm_unreachable("unsupported case kind"); } // Previous case is a fallthrough: branch it to this case. @@ -405,7 +403,7 @@ class CIRSwitchOpFlattening constexpr int kSmallRangeThreshold = 64; if ((upperBound - lowerBound) .ult(llvm::APInt(32, kSmallRangeThreshold))) { - for (auto iValue = lowerBound; iValue.sle(upperBound); iValue++) { + for (auto iValue = lowerBound; iValue.sle(upperBound); (void)iValue++) { caseValues.push_back(iValue); caseOperands.push_back(rangeOperands[index]); caseDestinations.push_back(rangeDestinations[index]); From 2afca22121411e8100ae7799334607443f7db6e2 Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 7 Jun 2024 06:19:09 +0800 Subject: [PATCH 1619/2301] [CIR][CIRGen] Support OpenCL Vector Types (#613) Resolve #532 . Support CIRGen of `ExtVectorElementExpr` that includes swizzle `v.xyx` and subscription `v.s0`. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 28 ++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 191 +++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 7 + clang/lib/CIR/CodeGen/CIRGenValue.h | 30 +- clang/test/CIR/CodeGen/vectype-ext.cpp | 455 +++++++++++++++++++++ 6 files changed, 715 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/vectype-ext.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 4c0109382eb7..0d25bdf195a6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -802,6 +802,34 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return CIRBaseBuilderTy::createStore(loc, flag, dst); } + mlir::cir::VecShuffleOp + createVecShuffle(mlir::Location loc, mlir::Value vec1, mlir::Value vec2, + llvm::ArrayRef maskAttrs) { + auto vecType = mlir::cast(vec1.getType()); + auto resultTy = mlir::cir::VectorType::get( + getContext(), vecType.getEltType(), maskAttrs.size()); + return CIRBaseBuilderTy::create( + loc, resultTy, vec1, vec2, getArrayAttr(maskAttrs)); + } + + mlir::cir::VecShuffleOp createVecShuffle(mlir::Location loc, mlir::Value vec1, + mlir::Value vec2, + llvm::ArrayRef mask) { + llvm::SmallVector maskAttrs; + for (int32_t idx : mask) { + maskAttrs.push_back(mlir::cir::IntAttr::get(getSInt32Ty(), idx)); + } + + return createVecShuffle(loc, vec1, vec2, maskAttrs); + } + + mlir::cir::VecShuffleOp createVecShuffle(mlir::Location loc, mlir::Value vec1, + llvm::ArrayRef mask) { + // FIXME(cir): Support use cir.vec.shuffle with single vec + // Workaround: pass Vec as both vec1 and vec2 + return createVecShuffle(loc, vec1, vec1, mask); + } + mlir::cir::StoreOp createAlignedStore(mlir::Location loc, mlir::Value val, mlir::Value dst, clang::CharUnits align = clang::CharUnits::One(), diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 505c045f7692..5e597e620059 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -654,9 +654,55 @@ RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { getLoc(Loc), load, LV.getVectorIdx())); } + if (LV.isExtVectorElt()) { + return buildLoadOfExtVectorElementLValue(LV); + } + llvm_unreachable("NYI"); } +int64_t CIRGenFunction::getAccessedFieldNo(unsigned int idx, + const mlir::ArrayAttr elts) { + auto elt = mlir::dyn_cast(elts[idx]); + assert(elt && "The indices should be integer attributes"); + return elt.getInt(); +} + +// If this is a reference to a subset of the elements of a vector, create an +// appropriate shufflevector. +RValue CIRGenFunction::buildLoadOfExtVectorElementLValue(LValue LV) { + mlir::Location loc = LV.getExtVectorPointer().getLoc(); + mlir::Value Vec = builder.createLoad(loc, LV.getExtVectorAddress()); + + // HLSL allows treating scalars as one-element vectors. Converting the scalar + // IR value to a vector here allows the rest of codegen to behave as normal. + if (getLangOpts().HLSL && !mlir::isa(Vec.getType())) { + llvm_unreachable("HLSL NYI"); + } + + const mlir::ArrayAttr Elts = LV.getExtVectorElts(); + + // If the result of the expression is a non-vector type, we must be extracting + // a single element. Just codegen as an extractelement. + const auto *ExprVT = LV.getType()->getAs(); + if (!ExprVT) { + int64_t InIdx = getAccessedFieldNo(0, Elts); + mlir::cir::ConstantOp Elt = + builder.getConstInt(loc, builder.getSInt64Ty(), InIdx); + return RValue::get(builder.create(loc, Vec, Elt)); + } + + // Always use shuffle vector to try to retain the original program structure + unsigned NumResultElts = ExprVT->getNumElements(); + + SmallVector Mask; + for (unsigned i = 0; i != NumResultElts; ++i) + Mask.push_back(getAccessedFieldNo(i, Elts)); + + Vec = builder.createVecShuffle(loc, Vec, Mask); + return RValue::get(Vec); +} + RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, SourceLocation Loc) { const CIRGenBitFieldInfo &info = LV.getBitFieldInfo(); @@ -675,6 +721,80 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, return RValue::get(field); } +void CIRGenFunction::buildStoreThroughExtVectorComponentLValue(RValue Src, + LValue Dst) { + mlir::Location loc = Dst.getExtVectorPointer().getLoc(); + + // HLSL allows storing to scalar values through ExtVector component LValues. + // To support this we need to handle the case where the destination address is + // a scalar. + Address DstAddr = Dst.getExtVectorAddress(); + if (!mlir::isa(DstAddr.getElementType())) { + llvm_unreachable("HLSL NYI"); + } + + // This access turns into a read/modify/write of the vector. Load the input + // value now. + mlir::Value Vec = builder.createLoad(loc, DstAddr); + const mlir::ArrayAttr Elts = Dst.getExtVectorElts(); + + mlir::Value SrcVal = Src.getScalarVal(); + + if (const clang::VectorType *VTy = + Dst.getType()->getAs()) { + unsigned NumSrcElts = VTy->getNumElements(); + unsigned NumDstElts = cast(Vec.getType()).getSize(); + if (NumDstElts == NumSrcElts) { + // Use shuffle vector is the src and destination are the same number of + // elements and restore the vector mask since it is on the side it will be + // stored. + SmallVector Mask(NumDstElts); + for (unsigned i = 0; i != NumSrcElts; ++i) + Mask[getAccessedFieldNo(i, Elts)] = i; + + Vec = builder.createVecShuffle(loc, SrcVal, Mask); + } else if (NumDstElts > NumSrcElts) { + // Extended the source vector to the same length and then shuffle it + // into the destination. + // FIXME: since we're shuffling with undef, can we just use the indices + // into that? This could be simpler. + SmallVector ExtMask; + for (unsigned i = 0; i != NumSrcElts; ++i) + ExtMask.push_back(i); + ExtMask.resize(NumDstElts, -1); + mlir::Value ExtSrcVal = builder.createVecShuffle(loc, SrcVal, ExtMask); + // build identity + SmallVector Mask; + for (unsigned i = 0; i != NumDstElts; ++i) + Mask.push_back(i); + + // When the vector size is odd and .odd or .hi is used, the last element + // of the Elts constant array will be one past the size of the vector. + // Ignore the last element here, if it is greater than the mask size. + if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) + llvm_unreachable("NYI"); + + // modify when what gets shuffled in + for (unsigned i = 0; i != NumSrcElts; ++i) + Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts; + Vec = builder.createVecShuffle(loc, Vec, ExtSrcVal, Mask); + } else { + // We should never shorten the vector + llvm_unreachable("unexpected shorten vector length"); + } + } else { + // If the Src is a scalar (not a vector), and the target is a vector it must + // be updating one element. + unsigned InIdx = getAccessedFieldNo(0, Elts); + auto Elt = builder.getSInt64(InIdx, loc); + + Vec = builder.create(loc, Vec, SrcVal, Elt); + } + + builder.createStore(loc, Vec, Dst.getExtVectorAddress(), + Dst.isVolatileQualified()); +} + void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, bool isInit) { if (!Dst.isSimple()) { @@ -687,6 +807,10 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, builder.createStore(loc, Vector, Dst.getVectorAddress()); return; } + + if (Dst.isExtVectorElt()) + return buildStoreThroughExtVectorComponentLValue(Src, Dst); + assert(Dst.isBitField() && "NIY LValue type"); mlir::Value result; return buildStoreThroughBitfieldLValue(Src, Dst, result); @@ -980,6 +1104,71 @@ CIRGenFunction::buildPointerToDataMemberBinaryExpr(const BinaryOperator *E) { return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo); } +LValue +CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { + // Emit the base vector as an l-value. + LValue Base; + + // ExtVectorElementExpr's base can either be a vector or pointer to vector. + if (E->isArrow()) { + // If it is a pointer to a vector, emit the address and form an lvalue with + // it. + LValueBaseInfo BaseInfo; + // TODO(cir): Support TBAA + assert(!MissingFeatures::tbaa()); + Address Ptr = buildPointerWithAlignment(E->getBase(), &BaseInfo); + const auto *PT = E->getBase()->getType()->castAs(); + Base = makeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo); + Base.getQuals().removeObjCGCAttr(); + } else if (E->getBase()->isGLValue()) { + // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), + // emit the base as an lvalue. + assert(E->getBase()->getType()->isVectorType()); + Base = buildLValue(E->getBase()); + } else { + // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. + assert(E->getBase()->getType()->isVectorType() && + "Result must be a vector"); + mlir::Value Vec = buildScalarExpr(E->getBase()); + + // Store the vector to memory (because LValue wants an address). + QualType BaseTy = E->getBase()->getType(); + Address VecMem = CreateMemTemp(BaseTy, Vec.getLoc(), "tmp"); + builder.createStore(Vec.getLoc(), Vec, VecMem); + Base = makeAddrLValue(VecMem, BaseTy, AlignmentSource::Decl); + } + + QualType type = + E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); + + // Encode the element access list into a vector of unsigned indices. + SmallVector indices; + E->getEncodedElementAccess(indices); + + if (Base.isSimple()) { + SmallVector attrElts; + for (uint32_t i : indices) { + attrElts.push_back(static_cast(i)); + } + auto elts = builder.getI64ArrayAttr(attrElts); + return LValue::MakeExtVectorElt(Base.getAddress(), elts, type, + Base.getBaseInfo()); + } + assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); + + mlir::ArrayAttr baseElts = Base.getExtVectorElts(); + + // Composite the two indices + SmallVector attrElts; + for (uint32_t i : indices) { + attrElts.push_back(getAccessedFieldNo(i, baseElts)); + } + auto elts = builder.getI64ArrayAttr(attrElts); + + return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), elts, type, + Base.getBaseInfo()); +} + LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { // Comma expressions just emit their LHS then their RHS as an l-value. if (E->getOpcode() == BO_Comma) { @@ -2264,6 +2453,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return buildConditionalOperatorLValue(cast(E)); case Expr::ArraySubscriptExprClass: return buildArraySubscriptExpr(cast(E)); + case Expr::ExtVectorElementExprClass: + return buildExtVectorElementExpr(cast(E)); case Expr::BinaryOperatorClass: return buildBinaryOperatorLValue(cast(E)); case Expr::CompoundAssignOperatorClass: { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 8e57367a35bb..d15a6a6a272a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -328,8 +328,12 @@ class ScalarExprEmitter : public StmtVisitor { E->getSrcExpr()->getType(), E->getType(), E->getSourceRange().getBegin()); } + + mlir::Value VisitExtVectorElementExpr(Expr *E) { + return buildLoadOfLValue(E); + } + mlir::Value VisitMemberExpr(MemberExpr *E); - mlir::Value VisitExtVectorelementExpr(Expr *E) { llvm_unreachable("NYI"); } mlir::Value VisitCompoundLiteralEpxr(CompoundLiteralExpr *E) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 7c1fd6e005e1..9d599985b67b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -763,6 +763,12 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Location Loc, LValueBaseInfo BaseInfo, bool isNontemporal = false); + int64_t getAccessedFieldNo(unsigned idx, const mlir::ArrayAttr elts); + + RValue buildLoadOfExtVectorElementLValue(LValue LV); + + void buildStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst); + RValue buildLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); /// Load a scalar value from an address, taking care to appropriately convert @@ -1223,6 +1229,7 @@ class CIRGenFunction : public CIRGenTypeCache { LValue lvalue, bool capturedByInit = false); LValue buildDeclRefLValue(const clang::DeclRefExpr *E); + LValue buildExtVectorElementExpr(const ExtVectorElementExpr *E); LValue buildBinaryOperatorLValue(const clang::BinaryOperator *E); LValue buildCompoundAssignmentLValue(const clang::CompoundAssignOperator *E); LValue buildUnaryOpLValue(const clang::UnaryOperator *E); diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 4862d32df245..408dcdcd605d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -207,7 +207,8 @@ class LValue { unsigned Alignment; mlir::Value V; mlir::Type ElementType; - mlir::Value VectorIdx; // Index for vector subscript + mlir::Value VectorIdx; // Index for vector subscript + mlir::Attribute VectorElts; // ExtVector element subset: V.xyx LValueBaseInfo BaseInfo; const CIRGenBitFieldInfo *BitFieldInfo{0}; @@ -316,6 +317,20 @@ class LValue { return VectorIdx; } + // extended vector elements. + Address getExtVectorAddress() const { + assert(isExtVectorElt()); + return Address(getExtVectorPointer(), ElementType, getAlignment()); + } + mlir::Value getExtVectorPointer() const { + assert(isExtVectorElt()); + return V; + } + mlir::ArrayAttr getExtVectorElts() const { + assert(isExtVectorElt()); + return mlir::cast(VectorElts); + } + static LValue MakeVectorElt(Address vecAddress, mlir::Value Index, clang::QualType type, LValueBaseInfo BaseInfo) { LValue R; @@ -328,6 +343,19 @@ class LValue { return R; } + static LValue MakeExtVectorElt(Address vecAddress, mlir::ArrayAttr elts, + clang::QualType type, + LValueBaseInfo baseInfo) { + LValue R; + R.LVType = ExtVectorElt; + R.V = vecAddress.getPointer(); + R.ElementType = vecAddress.getElementType(); + R.VectorElts = elts; + R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(), + baseInfo); + return R; + } + // bitfield lvalue Address getBitFieldAddress() const { return Address(getBitFieldPointer(), ElementType, getAlignment()); diff --git a/clang/test/CIR/CodeGen/vectype-ext.cpp b/clang/test/CIR/CodeGen/vectype-ext.cpp new file mode 100644 index 000000000000..a68ecdd78f8e --- /dev/null +++ b/clang/test/CIR/CodeGen/vectype-ext.cpp @@ -0,0 +1,455 @@ +// RUN: %clang_cc1 -std=c++17 -fclangir -emit-cir -triple x86_64-unknown-linux-gnu %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -std=c++17 -fclangir -emit-llvm -triple x86_64-unknown-linux-gnu %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// XFAIL: * + +typedef int vi4 __attribute__((ext_vector_type(4))); +typedef int vi2 __attribute__((ext_vector_type(2))); +typedef double vd2 __attribute__((ext_vector_type(2))); +typedef long vl2 __attribute__((ext_vector_type(2))); +typedef unsigned short vus2 __attribute__((ext_vector_type(2))); + +// CIR: cir.func {{@.*vector_int_test.*}} +// LLVM: define void {{@.*vector_int_test.*}} +void vector_int_test(int x) { + + // Vector constant. Not yet implemented. Expected results will change from + // cir.vec.create to cir.const. + vi4 a = { 1, 2, 3, 4 }; + // CIR: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : !cir.vector + // LLVM: store <4 x i32> , ptr %{{[0-9]+}}, align 16 + + // Non-const vector initialization. + vi4 b = { x, 5, 6, x + 1 }; + // CIR: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : !cir.vector + // LLVM: %[[#X1:]] = load i32, ptr %{{[0-9]+}}, align 4 + // LLVM-NEXT: %[[#X2:]] = load i32, ptr %{{[0-9]+}}, align 4 + // LLVM-NEXT: %[[#SUM:]] = add i32 %[[#X2]], 1 + // LLVM-NEXT: %[[#VEC1:]] = insertelement <4 x i32> undef, i32 %[[#X1]], i64 0 + // LLVM-NEXT: %[[#VEC2:]] = insertelement <4 x i32> %[[#VEC1]], i32 5, i64 1 + // LLVM-NEXT: %[[#VEC3:]] = insertelement <4 x i32> %[[#VEC2]], i32 6, i64 2 + // LLVM-NEXT: %[[#VEC4:]] = insertelement <4 x i32> %[[#VEC3]], i32 %[[#SUM]], i64 3 + // LLVM-NEXT: store <4 x i32> %[[#VEC4]], ptr %{{[0-9]+}}, align 16 + + // Incomplete vector initialization. + vi4 bb = { x, x + 1 }; + // CIR: %[[#zero:]] = cir.const #cir.int<0> : !s32i + // CIR: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %[[#zero]], %[[#zero]] : !s32i, !s32i, !s32i, !s32i) : !cir.vector + // LLVM: %[[#X1:]] = load i32, ptr %{{[0-9]+}}, align 4 + // LLVM-NEXT: %[[#X2:]] = load i32, ptr %{{[0-9]+}}, align 4 + // LLVM-NEXT: %[[#SUM:]] = add i32 %[[#X2]], 1 + // LLVM-NEXT: %[[#VEC1:]] = insertelement <4 x i32> undef, i32 %[[#X1]], i64 0 + // LLVM-NEXT: %[[#VEC2:]] = insertelement <4 x i32> %[[#VEC1]], i32 %[[#SUM]], i64 1 + // LLVM-NEXT: %[[#VEC3:]] = insertelement <4 x i32> %[[#VEC2]], i32 0, i64 2 + // LLVM-NEXT: %[[#VEC4:]] = insertelement <4 x i32> %[[#VEC3]], i32 0, i64 3 + // LLVM-NEXT: store <4 x i32> %[[#VEC4]], ptr %{{[0-9]+}}, align 16 + + + // Scalar to vector conversion, a.k.a. vector splat. Only valid as an + // operand of a binary operator, not as a regular conversion. + bb = a + 7; + // CIR: %[[#seven:]] = cir.const #cir.int<7> : !s32i + // CIR: %{{[0-9]+}} = cir.vec.splat %[[#seven]] : !s32i, !cir.vector + // LLVM: %[[#A:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#BB:]] = add <4 x i32> %[[#A]], splat (i32 7) + // LLVM-NEXT: store <4 x i32> %[[#BB]], ptr %{{[0-9]+}}, align 16 + + // Vector to vector conversion + vd2 bbb = { }; + bb = (vi4)bbb; + // CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.vector), !cir.vector + // LLVM: %{{[0-9]+}} = bitcast <2 x double> %{{[0-9]+}} to <4 x i32> + + // Extract element + int c = a[x]; + // CIR: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector + // LLVM: %[[#A:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#X:]] = load i32, ptr %{{[0-9]+}}, align 4 + // LLVM-NEXT: %[[#EXT:]] = extractelement <4 x i32> %[[#A]], i32 %[[#X]] + // LLVM-NEXT: store i32 %[[#EXT]], ptr %{{[0-9]+}}, align 4 + + // Insert element + a[x] = x; + // CIR: %[[#LOADEDVI:]] = cir.load %[[#STORAGEVI:]] : !cir.ptr>, !cir.vector + // CIR: %[[#UPDATEDVI:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVI]][%{{[0-9]+}} : !s32i] : !cir.vector + // CIR: cir.store %[[#UPDATEDVI]], %[[#STORAGEVI]] : !cir.vector, !cir.ptr> + // LLVM: %[[#X1:]] = load i32, ptr %{{[0-9]+}}, align 4 + // LLVM-NEXT: %[[#X2:]] = load i32, ptr %{{[0-9]+}}, align 4 + // LLVM-NEXT: %[[#A:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#INS:]] = insertelement <4 x i32> %[[#A]], i32 %[[#X1]], i32 %[[#X2]] + // LLVM-NEXT: store <4 x i32> %[[#INS]], ptr %{{[0-9]+}}, align 16 + + // Compound assignment + a[x] += a[0]; + // CIR: %[[#RHSCA:]] = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector + // CIR: %[[#LHSCA:]] = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector + // CIR: %[[#SUMCA:]] = cir.binop(add, %[[#LHSCA]], %[[#RHSCA]]) : !s32i + // CIR: cir.vec.insert %[[#SUMCA]], %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector + // LLVM: %[[#A1:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#RHSCA:]] = extractelement <4 x i32> %[[#A1]], i32 0 + // LLVM-NEXT: %[[#X:]] = load i32, ptr %{{[0-9]+}}, align 4 + // LLVM-NEXT: %[[#A2:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#LHSCA:]] = extractelement <4 x i32> %[[#A2]], i32 %[[#X]] + // LLVM-NEXT: %[[#SUMCA:]] = add i32 %[[#LHSCA]], %[[#RHSCA]] + // LLVM-NEXT: %[[#A3:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#RES:]] = insertelement <4 x i32> %[[#A3]], i32 %[[#SUMCA]], i32 %[[#X]] + // LLVM-NEXT: store <4 x i32> %[[#RES]], ptr %{{[0-9]+}}, align 16 + + // Binary arithmetic operations + vi4 d = a + b; + // CIR: %{{[0-9]+}} = cir.binop(add, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = add <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + vi4 e = a - b; + // CIR: %{{[0-9]+}} = cir.binop(sub, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = sub <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + vi4 f = a * b; + // CIR: %{{[0-9]+}} = cir.binop(mul, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = mul <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + vi4 g = a / b; + // CIR: %{{[0-9]+}} = cir.binop(div, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = sdiv <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + vi4 h = a % b; + // CIR: %{{[0-9]+}} = cir.binop(rem, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = srem <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + vi4 i = a & b; + // CIR: %{{[0-9]+}} = cir.binop(and, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = and <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + vi4 j = a | b; + // CIR: %{{[0-9]+}} = cir.binop(or, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = or <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + vi4 k = a ^ b; + // CIR: %{{[0-9]+}} = cir.binop(xor, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = xor <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + + // Unary arithmetic operations + vi4 l = +a; + // CIR: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#VAL:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: store <4 x i32> %[[#VAL]], ptr %{{[0-9]+}}, align 16 + vi4 m = -a; + // CIR: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#VAL:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#RES:]] = sub <4 x i32> zeroinitializer, %[[#VAL]] + // LLVM-NEXT: store <4 x i32> %[[#RES]], ptr %{{[0-9]+}}, align 16 + vi4 n = ~a; + // CIR: %{{[0-9]+}} = cir.unary(not, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#VAL:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#RES:]] = xor <4 x i32> splat (i32 -1), %[[#VAL]] + // LLVM-NEXT: store <4 x i32> %[[#RES]], ptr %{{[0-9]+}}, align 16 + + // TODO: Ternary conditional operator + + // Comparisons + vi4 o = a == b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = icmp eq <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: %[[#EXT:]] = sext <4 x i1> %[[#RES]] to <4 x i32> + vi4 p = a != b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = icmp ne <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: %[[#EXT:]] = sext <4 x i1> %[[#RES]] to <4 x i32> + vi4 q = a < b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = icmp slt <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: %[[#EXT:]] = sext <4 x i1> %[[#RES]] to <4 x i32> + vi4 r = a > b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = icmp sgt <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: %[[#EXT:]] = sext <4 x i1> %[[#RES]] to <4 x i32> + vi4 s = a <= b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = icmp sle <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: %[[#EXT:]] = sext <4 x i1> %[[#RES]] to <4 x i32> + vi4 t = a >= b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = icmp sge <4 x i32> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: %[[#EXT:]] = sext <4 x i1> %[[#RES]] to <4 x i32> + + // __builtin_shufflevector + vi4 u = __builtin_shufflevector(a, b, 7, 5, 3, 1); + // CIR: %{{[0-9]+}} = cir.vec.shuffle(%{{[0-9]+}}, %{{[0-9]+}} : !cir.vector) [#cir.int<7> : !s64i, #cir.int<5> : !s64i, #cir.int<3> : !s64i, #cir.int<1> : !s64i] : !cir.vector + + // LLVM: %[[#A:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#B:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#SHFL:]] = shufflevector <4 x i32> %[[#A]], <4 x i32> %[[#B]], <4 x i32> + // LLVM-NEXT: store <4 x i32> %[[#SHFL]], ptr %{{[0-9]+}}, align 16 + + vi4 v = __builtin_shufflevector(a, b); + // CIR: %{{[0-9]+}} = cir.vec.shuffle.dynamic %{{[0-9]+}} : !cir.vector, %{{[0-9]+}} : !cir.vector + + // LLVM: %[[#A:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#B:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#IDXMOD:]] = and <4 x i32> %[[#B]], splat (i32 3) + // LLVM-NEXT: %[[#IDX0:]] = extractelement <4 x i32> %[[#IDXMOD]], i64 0 + // LLVM-NEXT: %[[#EXT1:]] = extractelement <4 x i32> %[[#A]], i32 %[[#IDX0]] + // LLVM-NEXT: %[[#INS1:]] = insertelement <4 x i32> undef, i32 %[[#EXT1]], i64 0 + // LLVM-NEXT: %[[#IDX1:]] = extractelement <4 x i32> %[[#IDXMOD]], i64 1 + // LLVM-NEXT: %[[#EXT2:]] = extractelement <4 x i32> %[[#A]], i32 %[[#IDX1]] + // LLVM-NEXT: %[[#INS2:]] = insertelement <4 x i32> %[[#INS1]], i32 %[[#EXT2]], i64 1 + // LLVM-NEXT: %[[#IDX2:]] = extractelement <4 x i32> %[[#IDXMOD]], i64 2 + // LLVM-NEXT: %[[#EXT3:]] = extractelement <4 x i32> %[[#A]], i32 %[[#IDX2]] + // LLVM-NEXT: %[[#INS3:]] = insertelement <4 x i32> %[[#INS2]], i32 %[[#EXT3]], i64 2 + // LLVM-NEXT: %[[#IDX3:]] = extractelement <4 x i32> %[[#IDXMOD]], i64 3 + // LLVM-NEXT: %[[#EXT4:]] = extractelement <4 x i32> %[[#A]], i32 %[[#IDX3]] + // LLVM-NEXT: %[[#INS4:]] = insertelement <4 x i32> %[[#INS3]], i32 %[[#EXT4]], i64 3 + // LLVM-NEXT: store <4 x i32> %[[#INS4]], ptr %{{[0-9]+}}, align 16 +} + +// CIR: cir.func {{@.*vector_double_test.*}} +// LLVM: define void {{@.*vector_double_test.*}} +void vector_double_test(int x, double y) { + // Vector constant. Not yet implemented. Expected results will change from + // cir.vec.create to cir.const. + vd2 a = { 1.5, 2.5 }; + // CIR: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : !cir.vector + + // LLVM: store <2 x double> , ptr %{{[0-9]+}}, align 16 + + // Non-const vector initialization. + vd2 b = { y, y + 1.0 }; + // CIR: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : !cir.vector + + // LLVM: %[[#Y1:]] = load double, ptr %{{[0-9]+}}, align 8 + // LLVM-NEXT: %[[#Y2:]] = load double, ptr %{{[0-9]+}}, align 8 + // LLVM-NEXT: %[[#SUM:]] = fadd double %[[#Y2]], 1.000000e+00 + // LLVM-NEXT: %[[#VEC1:]] = insertelement <2 x double> undef, double %[[#Y1]], i64 0 + // LLVM-NEXT: %[[#VEC2:]] = insertelement <2 x double> %[[#VEC1]], double %[[#SUM]], i64 1 + // LLVM-NEXT: store <2 x double> %[[#VEC2]], ptr %{{[0-9]+}}, align 16 + + // Incomplete vector initialization + vd2 bb = { y }; + // CIR: [[#dzero:]] = cir.const #cir.fp<0.000000e+00> : !cir.double + // CIR: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %[[#dzero]] : !cir.double, !cir.double) : !cir.vector + + // LLVM: %[[#Y1:]] = load double, ptr %{{[0-9]+}}, align 8 + // LLVM-NEXT: %[[#VEC1:]] = insertelement <2 x double> undef, double %[[#Y1]], i64 0 + // LLVM-NEXT: %[[#VEC2:]] = insertelement <2 x double> %[[#VEC1]], double 0.000000e+00, i64 1 + // LLVM-NEXT: store <2 x double> %[[#VEC2]], ptr %{{[0-9]+}}, align 16 + + // Scalar to vector conversion, a.k.a. vector splat. Only valid as an + // operand of a binary operator, not as a regular conversion. + bb = a + 2.5; + // CIR: %[[#twohalf:]] = cir.const #cir.fp<2.500000e+00> : !cir.double + // CIR: %{{[0-9]+}} = cir.vec.splat %[[#twohalf]] : !cir.double, !cir.vector + + // LLVM: %[[#A:]] = load <2 x double>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#BB:]] = fadd <2 x double> %[[#A]], splat (double 2.500000e+00) + // LLVM-NEXT: store <2 x double> %[[#BB]], ptr %{{[0-9]+}}, align 16 + + // Extract element + double c = a[x]; + // CIR: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector + // LLVM: %{{[0-9]+}} = extractelement <2 x double> %{{[0-9]+}}, i32 %{{[0-9]+}} + + // Insert element + a[x] = y; + // CIR: %[[#LOADEDVF:]] = cir.load %[[#STORAGEVF:]] : !cir.ptr>, !cir.vector + // CIR: %[[#UPDATEDVF:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVF]][%{{[0-9]+}} : !s32i] : !cir.vector + // CIR: cir.store %[[#UPDATEDVF]], %[[#STORAGEVF]] : !cir.vector, !cir.ptr> + + // LLVM: %[[#Y:]] = load double, ptr %{{[0-9]+}}, align 8 + // LLVM-NEXT: %[[#X:]] = load i32, ptr %{{[0-9]+}}, align 4 + // LLVM-NEXT: %[[#A:]] = load <2 x double>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#INS:]] = insertelement <2 x double> %[[#A]], double %[[#Y]], i32 %[[#X]] + // LLVM-NEXT: store <2 x double> %[[#INS]], ptr %{{[0-9]+}}, align 16 + + // Binary arithmetic operations + vd2 d = a + b; + // CIR: %{{[0-9]+}} = cir.binop(add, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = fadd <2 x double> %{{[0-9]+}}, %{{[0-9]+}} + vd2 e = a - b; + // CIR: %{{[0-9]+}} = cir.binop(sub, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}} + vd2 f = a * b; + // CIR: %{{[0-9]+}} = cir.binop(mul, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}} + vd2 g = a / b; + // CIR: %{{[0-9]+}} = cir.binop(div, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + // LLVM: %{{[0-9]+}} = fdiv <2 x double> %{{[0-9]+}}, %{{[0-9]+}} + + // Unary arithmetic operations + vd2 l = +a; + // CIR: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#VAL:]] = load <2 x double>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: store <2 x double> %[[#VAL]], ptr %{{[0-9]+}}, align 16 + vd2 m = -a; + // CIR: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#VAL:]] = load <2 x double>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#RES:]] = fneg <2 x double> %[[#VAL]] + // LLVM-NEXT: store <2 x double> %[[#RES]], ptr %{{[0-9]+}}, align 16 + + // Comparisons + vl2 o = a == b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(eq, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = fcmp oeq <2 x double> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: sext <2 x i1> %[[#RES:]] to <2 x i64> + vl2 p = a != b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(ne, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = fcmp une <2 x double> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: sext <2 x i1> %[[#RES:]] to <2 x i64> + vl2 q = a < b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(lt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = fcmp olt <2 x double> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: sext <2 x i1> %[[#RES:]] to <2 x i64> + vl2 r = a > b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(gt, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = fcmp ogt <2 x double> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: sext <2 x i1> %[[#RES:]] to <2 x i64> + vl2 s = a <= b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(le, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = fcmp ole <2 x double> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: sext <2 x i1> %[[#RES:]] to <2 x i64> + vl2 t = a >= b; + // CIR: %{{[0-9]+}} = cir.vec.cmp(ge, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector, !cir.vector + // LLVM: %[[#RES:]] = fcmp oge <2 x double> %{{[0-9]+}}, %{{[0-9]+}} + // LLVM-NEXT: sext <2 x i1> %[[#RES:]] to <2 x i64> + + // __builtin_convertvector + vus2 w = __builtin_convertvector(a, vus2); + // CIR: %{{[0-9]+}} = cir.cast(float_to_int, %{{[0-9]+}} : !cir.vector), !cir.vector + // LLVM: %{{[0-9]+}} = fptoui <2 x double> %{{[0-9]+}} to <2 x i16> +} + +// CIR: cir.func {{@.*test_load.*}} +// LLVM: define void {{@.*test_load.*}} +void test_load() { + vi4 a = { 1, 2, 3, 4 }; + + vi2 b; + + b = a.wz; + // CIR: %[[#LOAD1:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#SHUFFLE1:]] = cir.vec.shuffle(%[[#LOAD1]], %[[#LOAD1]] : !cir.vector) [#cir.int<3> : !s32i, #cir.int<2> : !s32i] : !cir.vector + // CIR-NEXT: cir.store %[[#SHUFFLE1]], %{{[0-9]+}} : !cir.vector, !cir.ptr> + + // LLVM: %[[#LOAD1:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#SHUFFLE1:]] = shufflevector <4 x i32> %[[#LOAD1]], <4 x i32> %[[#LOAD1]], <2 x i32> + // LLVM-NEXT: store <2 x i32> %[[#SHUFFLE1]], ptr %{{[0-9]+}}, align 8 + + int one_elem_load = a.s2; + // CIR-NEXT: %[[#LOAD8:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#EXTRACT_INDEX:]] = cir.const #cir.int<2> : !s64i + // CIR-NEXT: %[[#EXTRACT1:]] = cir.vec.extract %[[#LOAD8]][%[[#EXTRACT_INDEX]] : !s64i] : !cir.vector + // CIR-NEXT: cir.store %[[#EXTRACT1]], %{{[0-9]+}} : !s32i, !cir.ptr + + // LLVM-NEXT: %[[#LOAD8:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#EXTRACT1:]] = extractelement <4 x i32> %[[#LOAD8]], i64 2 + // LLVM-NEXT: store i32 %[[#EXTRACT1]], ptr %{{[0-9]+}}, align 4 + +} + +// CIR: cir.func {{@.*test_store.*}} +// LLVM: define void {{@.*test_store.*}} +void test_store() { + vi4 a; + // CIR: %[[#PVECA:]] = cir.alloca !cir.vector + // LLVM: %[[#PVECA:]] = alloca <4 x i32> + + vi2 b = {1, 2}; + // CIR-NEXT: %[[#PVECB:]] = cir.alloca !cir.vector + // LLVM-NEXT: %[[#PVECB:]] = alloca <2 x i32> + + a.xy = b; + // CIR: %[[#LOAD4RHS:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#LOAD5LHS:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#SHUFFLE5:]] = cir.vec.shuffle(%[[#LOAD4RHS]], %[[#LOAD4RHS]] : !cir.vector) [#cir.int<0> : !s32i, #cir.int<1> : !s32i, #cir.int<-1> : !s32i, #cir.int<-1> : !s32i] : !cir.vector + // CIR-NEXT: %[[#SHUFFLE6:]] = cir.vec.shuffle(%[[#LOAD5LHS]], %[[#SHUFFLE5]] : !cir.vector) [#cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i] : !cir.vector + // CIR-NEXT: cir.store %[[#SHUFFLE6]], %{{[0-9]+}} : !cir.vector, !cir.ptr> + + // LLVM: %[[#LOAD4RHS:]] = load <2 x i32>, ptr %{{[0-9]+}}, align 8 + // LLVM-NEXT: %[[#LOAD5LHS:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#SHUFFLE5:]] = shufflevector <2 x i32> %[[#LOAD4RHS]], <2 x i32> %[[#LOAD4RHS]], <4 x i32> + // LLVM-NEXT: %[[#SHUFFLE6:]] = shufflevector <4 x i32> %[[#LOAD5LHS]], <4 x i32> %[[#SHUFFLE5]], <4 x i32> + // LLVM-NEXT: store <4 x i32> %[[#SHUFFLE6]], ptr %{{[0-9]+}}, align 16 + + // load single element + a.s0 = 1; + // CIR-NEXT: cir.const #cir.int<1> + // CIR-NEXT: %[[#LOAD7:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#INSERT_INDEX:]] = cir.const #cir.int<0> : !s64i + // CIR-NEXT: %[[#INSERT1:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOAD7]][%[[#INSERT_INDEX]] : !s64i] : !cir.vector + // CIR-NEXT: cir.store %[[#INSERT1]], %{{[0-9]+}} : !cir.vector, !cir.ptr> + + // LLVM-NEXT: %[[#LOAD7:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#INSERT1:]] = insertelement <4 x i32> %[[#LOAD7]], i32 1, i64 0 + // LLVM-NEXT: store <4 x i32> %[[#INSERT1]], ptr %{{[0-9]+}}, align 16 + + // extend length from 2 to 4, then merge two vectors + a.lo = b; + // CIR: %[[#VECB:]] = cir.load %[[#PVECB]] : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#VECA:]] = cir.load %[[#PVECA]] : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#EXTVECB:]] = cir.vec.shuffle(%[[#VECB]], %[[#VECB]] : !cir.vector) [#cir.int<0> : !s32i, #cir.int<1> : !s32i, #cir.int<-1> : !s32i, #cir.int<-1> : !s32i] : !cir.vector + // CIR-NEXT: %[[#RESULT:]] = cir.vec.shuffle(%[[#VECA]], %[[#EXTVECB]] : !cir.vector) [#cir.int<4> : !s32i, #cir.int<5> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i] : !cir.vector + // CIR-NEXT: cir.store %[[#RESULT]], %[[#PVECA]] : !cir.vector, !cir.ptr> + + // LLVM: %[[#VECB:]] = load <2 x i32>, ptr %[[#PVECB]], align 8 + // LLVM-NEXT: %[[#VECA:]] = load <4 x i32>, ptr %[[#PVECA]], align 16 + // LLVM-NEXT: %[[#EXTVECB:]] = shufflevector <2 x i32> %[[#VECB]], <2 x i32> %[[#VECB]], <4 x i32> + // LLVM-NEXT: %[[#RESULT:]] = shufflevector <4 x i32> %[[#VECA]], <4 x i32> %[[#EXTVECB]], <4 x i32> + // LLVM-NEXT: store <4 x i32> %[[#RESULT]], ptr %[[#PVECA]], align 16 + +} + +// CIR: cir.func {{@.*test_build_lvalue.*}} +// LLVM: define void {{@.*test_build_lvalue.*}} +void test_build_lvalue() { + // special cases only + + vi4 *pv, v; + + // CIR-NEXT: %[[#ALLOCAPV:]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["pv"] {alignment = 8 : i64} + // CIR-NEXT: %[[#ALLOCAV:]] = cir.alloca !cir.vector, !cir.ptr>, ["v"] {alignment = 16 : i64} + // CIR-NEXT: %[[#ALLOCAS:]] = cir.alloca !s32i, !cir.ptr, ["s", init] {alignment = 4 : i64} + // CIR-NEXT: %[[#ALLOCATMP:]] = cir.alloca !cir.vector, !cir.ptr>, ["tmp"] {alignment = 16 : i64} + // CIR-NEXT: %[[#ALLOCAR:]] = cir.alloca !s32i, !cir.ptr, ["r", init] {alignment = 4 : i64} + + // LLVM-NEXT: %[[#ALLOCAPV:]] = alloca ptr, i64 1, align 8 + // LLVM-NEXT: %[[#ALLOCAV:]] = alloca <4 x i32>, i64 1, align 16 + // LLVM-NEXT: %[[#ALLOCAS:]] = alloca i32, i64 1, align 4 + // LLVM-NEXT: %[[#ALLOCATMP:]] = alloca <4 x i32>, i64 1, align 16 + // LLVM-NEXT: %[[#ALLOCAR:]] = alloca i32, i64 1, align 4 + + pv->x = 99; + // CIR-NEXT: %[[#VAL:]] = cir.const #cir.int<99> : !s32i + // CIR-NEXT: %[[#PV:]] = cir.load %[[#ALLOCAPV]] : !cir.ptr>>, !cir.ptr> + // CIR-NEXT: %[[#V:]] = cir.load %[[#PV]] : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#IDX:]] = cir.const #cir.int<0> : !s64i + // CIR-NEXT: %[[#RESULT:]] = cir.vec.insert %[[#VAL]], %[[#V]][%[[#IDX]] : !s64i] : !cir.vector + // CIR-NEXT: cir.store %[[#RESULT]], %[[#PV]] : !cir.vector, !cir.ptr> + + // LLVM-NEXT: %[[#PV:]] = load ptr, ptr %[[#ALLOCAPV]], align 8 + // LLVM-NEXT: %[[#V:]] = load <4 x i32>, ptr %[[#PV]], align 16 + // LLVM-NEXT: %[[#RESULT:]] = insertelement <4 x i32> %[[#V]], i32 99, i64 0 + // LLVM-NEXT: store <4 x i32> %[[#RESULT]], ptr %[[#PV]], align 16 + + int s = (v+v).x; + + // CIR-NEXT: %[[#LOAD1:]] = cir.load %[[#ALLOCAV]] : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#LOAD2:]] = cir.load %[[#ALLOCAV]] : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#SUM:]] = cir.binop(add, %[[#LOAD1]], %[[#LOAD2]]) : !cir.vector + // CIR-NEXT: cir.store %[[#SUM]], %[[#ALLOCATMP]] : !cir.vector, !cir.ptr> + // CIR-NEXT: %[[#TMP:]] = cir.load %[[#ALLOCATMP]] : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#IDX:]] = cir.const #cir.int<0> : !s64i + // CIR-NEXT: %[[#RESULT:]] = cir.vec.extract %[[#TMP]][%[[#IDX]] : !s64i] : !cir.vector + // CIR-NEXT: cir.store %[[#RESULT]], %[[#ALLOCAS]] : !s32i, !cir.ptr + + // LLVM-NEXT: %[[#LOAD1:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#LOAD2:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 + // LLVM-NEXT: %[[#SUM:]] = add <4 x i32> %[[#LOAD1]], %[[#LOAD2]] + // LLVM-NEXT: store <4 x i32> %[[#SUM]], ptr %[[#ALLOCATMP]], align 16 + // LLVM-NEXT: %[[#TMP:]] = load <4 x i32>, ptr %[[#ALLOCATMP]], align 16 + // LLVM-NEXT: %[[#RESULT:]] = extractelement <4 x i32> %[[#TMP]], i64 0 + // LLVM-NEXT: store i32 %[[#RESULT]], ptr %[[#ALLOCAS]], align 4 + + int r = v.xy.x; + // CIR-NEXT: %[[#V:]] = cir.load %[[#ALLOCAV]] : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#IDX:]] = cir.const #cir.int<0> : !s64i + // CIR-NEXT: %[[#RESULT:]] = cir.vec.extract %[[#V]][%[[#IDX]] : !s64i] : !cir.vector + // CIR-NEXT: cir.store %[[#RESULT]], %[[#ALLOCAR]] : !s32i, !cir.ptr + + // LLVM-NEXT: %[[#V:]] = load <4 x i32>, ptr %[[#ALLOCAV]], align 16 + // LLVM-NEXT: %[[#RESULT:]] = extractelement <4 x i32> %[[#V]], i64 0 + // LLVM-NEXT: store i32 %[[#RESULT]], ptr %[[#ALLOCAR]], align 4 + +} From d42d45312b2018a952453d4998cc74ff06c32398 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 6 Jun 2024 15:21:38 -0700 Subject: [PATCH 1620/2301] Revert "[CIR][Pipeline] Support -fclangir-analysis-only (#638)" This reverts commit f4d538f4245e3d18187dee48e86a214391bc54cd. It's causing a circular dependency in shared lib builds. See https://github.com/llvm/clangir/issues/655 --- .../clang/CIRFrontendAction/CIRGenAction.h | 3 --- .../clang/CIRFrontendAction/CIRGenConsumer.h | 0 clang/include/clang/Driver/Options.td | 5 ---- .../include/clang/Frontend/FrontendOptions.h | 7 +----- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 8 ------ clang/lib/CodeGen/CMakeLists.txt | 8 ------ clang/lib/CodeGen/CodeGenAction.cpp | 25 ++++--------------- clang/lib/Driver/ToolChains/Clang.cpp | 9 ------- clang/lib/Frontend/CompilerInvocation.cpp | 4 +-- clang/test/CIR/CodeGen/analysis-only.cpp | 8 ------ .../CIR/Transforms/lifetime-check-agg.cpp | 1 - clang/test/CIR/analysis-only.cpp | 2 -- 12 files changed, 8 insertions(+), 72 deletions(-) delete mode 100644 clang/include/clang/CIRFrontendAction/CIRGenConsumer.h delete mode 100644 clang/test/CIR/CodeGen/analysis-only.cpp delete mode 100644 clang/test/CIR/analysis-only.cpp diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIRFrontendAction/CIRGenAction.h index bcfca9bfcd89..74d5e5e32611 100644 --- a/clang/include/clang/CIRFrontendAction/CIRGenAction.h +++ b/clang/include/clang/CIRFrontendAction/CIRGenAction.h @@ -120,9 +120,6 @@ class EmitObjAction : public CIRGenAction { EmitObjAction(mlir::MLIRContext *mlirCtx = nullptr); }; -std::unique_ptr -createCIRAnalysisOnlyConsumer(clang::CompilerInstance &); - } // namespace cir #endif diff --git a/clang/include/clang/CIRFrontendAction/CIRGenConsumer.h b/clang/include/clang/CIRFrontendAction/CIRGenConsumer.h deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 3a8e02d7dfef..a8651fcffb34 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3104,11 +3104,6 @@ defm clangir_direct_lowering : BoolFOption<"clangir-direct-lowering", FrontendOpts<"ClangIRDirectLowering">, DefaultTrue, PosFlag, NegFlag>; -defm clangir_analysis_only : BoolFOption<"clangir-analysis-only", - FrontendOpts<"ClangIRAnalysisOnly">, DefaultFalse, - PosFlag, - NegFlag>; def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, Group, HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 5bcf873ff98f..5bff487c2068 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -451,10 +451,6 @@ class FrontendOptions { // Enable Clang IR call conv lowering pass. unsigned ClangIREnableCallConvLowering : 1; - // Enable Clang IR analysis only pipeline that uses tranditional code gen - // pipeline. - unsigned ClangIRAnalysisOnly : 1; - CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. @@ -654,8 +650,7 @@ class FrontendOptions { ClangIRDisablePasses(false), ClangIRDisableCIRVerifier(false), ClangIRDisableEmitCXXDefault(false), ClangIRLifetimeCheck(false), ClangIRIdiomRecognizer(false), ClangIRLibOpt(false), - ClangIRAnalysisOnly(false), TimeTraceGranularity(500), - TimeTraceVerbose(false) {} + TimeTraceGranularity(500), TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file /// extension. For example, "c" would return Language::C. diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 0f6963c11fda..0a570a3c783c 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -471,11 +471,3 @@ EmitLLVMAction::EmitLLVMAction(mlir::MLIRContext *_MLIRContext) void EmitObjAction::anchor() {} EmitObjAction::EmitObjAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::EmitObj, _MLIRContext) {} - -std::unique_ptr -cir::createCIRAnalysisOnlyConsumer(clang::CompilerInstance &ci) { - return std::make_unique( - CIRGenAction::OutputType::None, ci, ci.getDiagnostics(), - &ci.getVirtualFileSystem(), ci.getHeaderSearchOpts(), ci.getCodeGenOpts(), - ci.getTargetOpts(), ci.getLangOpts(), ci.getFrontendOpts(), nullptr); -} diff --git a/clang/lib/CodeGen/CMakeLists.txt b/clang/lib/CodeGen/CMakeLists.txt index 2327420ec673..868ec847b963 100644 --- a/clang/lib/CodeGen/CMakeLists.txt +++ b/clang/lib/CodeGen/CMakeLists.txt @@ -54,13 +54,6 @@ if(MSVC AND NOT CMAKE_CXX_COMPILER_ID MATCHES Clang endif() endif() -set(conditional_link_libs) -if(CLANG_ENABLE_CIR) -list(APPEND conditional_link_libs - clangCIRFrontendAction - ) -endif() - add_clang_library(clangCodeGen ABIInfo.cpp ABIInfoImpl.cpp @@ -165,5 +158,4 @@ add_clang_library(clangCodeGen clangFrontend clangLex clangSerialization - ${conditional_link_libs} ) diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp index 0ff8ba3b7eda..7aa3639cabf3 100644 --- a/clang/lib/CodeGen/CodeGenAction.cpp +++ b/clang/lib/CodeGen/CodeGenAction.cpp @@ -21,10 +21,6 @@ #include "clang/Basic/LangStandard.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" -#include "clang/Config/config.h" -#if CLANG_ENABLE_CIR -#include "clang/CIRFrontendAction/CIRGenAction.h" -#endif #include "clang/CodeGen/BackendUtil.h" #include "clang/CodeGen/ModuleBuilder.h" #include "clang/Driver/DriverDiagnostic.h" @@ -993,25 +989,14 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { CI.getPreprocessor().addPPCallbacks(std::move(Callbacks)); } - std::vector> AdditionalConsumers; - AdditionalConsumers.reserve(2); - if (CI.getFrontendOpts().GenReducedBMI && !CI.getFrontendOpts().ModuleOutputPath.empty()) { - - AdditionalConsumers.push_back(std::make_unique( + std::vector> Consumers(2); + Consumers[0] = std::make_unique( CI.getPreprocessor(), CI.getModuleCache(), - CI.getFrontendOpts().ModuleOutputPath)); - } - -#if CLANG_ENABLE_CIR - if (CI.getFrontendOpts().ClangIRAnalysisOnly) - AdditionalConsumers.push_back(cir::createCIRAnalysisOnlyConsumer(CI)); -#endif - - if (!AdditionalConsumers.empty()) { - AdditionalConsumers.push_back(std::move(Result)); - return std::make_unique(std::move(AdditionalConsumers)); + CI.getFrontendOpts().ModuleOutputPath); + Consumers[1] = std::move(Result); + return std::make_unique(std::move(Consumers)); } return std::move(Result); diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index f81689103e4e..c883d88b51ba 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5263,15 +5263,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-fclangir-idiom-recognizer"); } - if (Args.hasArg(options::OPT_fclangir_analysis_only)) { - CmdArgs.push_back("-fclangir-analysis-only"); - - // TODO: We should pass some default analysis configuration here. - - // TODO2: Should we emit some diagnostics if the configurations conflict - // with each other? - } - if (IsOpenMPDevice) { // We have to pass the triple of the host if compiling for an OpenMP device. std::string NormalizedTriple = diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 8e88e8cd79b6..a9307703916e 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3128,8 +3128,8 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Args.hasArg(OPT_clangir_verify_diagnostics)) Opts.ClangIRVerifyDiags = true; - if (Args.hasArg(OPT_fclangir_analysis_only)) - Opts.ClangIRAnalysisOnly = true; + if (Args.hasArg(OPT_fclangir_call_conv_lowering)) + Opts.ClangIREnableCallConvLowering = true; if (const Arg *A = Args.getLastArg(OPT_fclangir_lifetime_check, OPT_fclangir_lifetime_check_EQ)) { diff --git a/clang/test/CIR/CodeGen/analysis-only.cpp b/clang/test/CIR/CodeGen/analysis-only.cpp deleted file mode 100644 index 7f427f0de92f..000000000000 --- a/clang/test/CIR/CodeGen/analysis-only.cpp +++ /dev/null @@ -1,8 +0,0 @@ -// Check `-fclangir-analysis-only` would generate code correctly. -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir-analysis-only -std=c++20 \ -// RUN: -O2 -emit-llvm %s -o - | FileCheck %s - -extern "C" void foo() {} - -// CHECK: define{{.*}} @foo( - diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp index ebfe00c2ad56..fb89c0e6fd8f 100644 --- a/clang/test/CIR/Transforms/lifetime-check-agg.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -1,5 +1,4 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir-analysis-only -fclangir-lifetime-check="history=all;remarks=all" %s -clangir-verify-diagnostics -emit-obj -o /dev/null typedef enum SType { INFO_ENUM_0 = 9, diff --git a/clang/test/CIR/analysis-only.cpp b/clang/test/CIR/analysis-only.cpp deleted file mode 100644 index 7dc58250b91b..000000000000 --- a/clang/test/CIR/analysis-only.cpp +++ /dev/null @@ -1,2 +0,0 @@ -// RUN: %clang %s -fclangir-analysis-only -### -c %s 2>&1 | FileCheck %s -// CHECK: "-fclangir-analysis-only" From cb0a8db2a88ec032c5cee22735b7323d22ef0ad9 Mon Sep 17 00:00:00 2001 From: Piggy Date: Fri, 7 Jun 2024 06:54:32 +0800 Subject: [PATCH 1621/2301] [CIR][CIRGen] Create a new block after break and continue (#611) Without this patch, CIR CodeGen continue to generate in the same block after `cir.break` and `cir.continue`, which would cause verification error because `cir.break` and `cir.continue` should appear at the end of blocks. This patch creates a new dangling block after generating `cir.break` and `cir.continue` to fix the issue. This will fix #323. --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 21 ++++---- clang/test/CIR/CodeGen/goto.cpp | 20 ------- clang/test/CIR/CodeGen/loop.cpp | 52 +++++++++++++++++++ clang/test/CIR/CodeGen/return.cpp | 19 +++++++ .../switch-unreachable-after-break.cpp | 22 ++++++++ clang/test/CIR/CodeGen/switch.cpp | 20 +++++++ 6 files changed, 123 insertions(+), 31 deletions(-) create mode 100644 clang/test/CIR/CodeGen/switch-unreachable-after-break.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 007dd769ed2f..ba7aa4435ab0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -551,20 +551,11 @@ mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { // info support just yet, look at this again once we have it. assert(builder.getInsertionBlock() && "not yet implemented"); - mlir::Block *currBlock = builder.getBlock(); - mlir::Block *gotoBlock = currBlock; - if (!currBlock->empty() && - currBlock->back().hasTrait()) { - gotoBlock = builder.createBlock(builder.getBlock()->getParent()); - builder.setInsertionPointToEnd(gotoBlock); - } - - // A goto marks the end of a block, create a new one for codegen after - // buildGotoStmt can resume building in that block. - builder.create(getLoc(S.getSourceRange()), S.getLabel()->getName()); + // A goto marks the end of a block, create a new one for codegen after + // buildGotoStmt can resume building in that block. // Insert the new block to continue codegen after goto. builder.createBlock(builder.getBlock()->getParent()); @@ -597,11 +588,19 @@ mlir::LogicalResult CIRGenFunction::buildLabel(const LabelDecl *D) { mlir::LogicalResult CIRGenFunction::buildContinueStmt(const clang::ContinueStmt &S) { builder.createContinue(getLoc(S.getContinueLoc())); + + // Insert the new block to continue codegen after the continue statement. + builder.createBlock(builder.getBlock()->getParent()); + return mlir::success(); } mlir::LogicalResult CIRGenFunction::buildBreakStmt(const clang::BreakStmt &S) { builder.createBreak(getLoc(S.getBreakLoc())); + + // Insert the new block to continue codegen after the break statement. + builder.createBlock(builder.getBlock()->getParent()); + return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index dc36517863ef..06870feba910 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -101,26 +101,6 @@ int shouldGenBranch(int x) { // NOFLAT: ^bb1: // NOFLAT: cir.label "err" -int shouldCreateBlkForGoto(int a) { - switch (a) { - case(42): - break; - goto exit; - default: - return 0; - }; - -exit: - return -1; - -} -// NOFLAT: cir.func @_Z22shouldCreateBlkForGotoi -// NOFLAT: case (equal, 42) { -// NOFLAT: cir.break -// NOFLAT: ^bb1: // no predecessors -// NOFLAT: cir.goto "exit" -// NOFLAT: } - void severalLabelsInARow(int a) { int b = a; goto end1; diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 4cda3fba3410..092c8b952472 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -205,3 +205,55 @@ void l6() { // CHECK-NEXT: } // CHECK-NEXT: cir.return // CHECK-NEXT: } + +void unreachable_after_break() { + for (;;) { + break; + int x = 1; + } +} + +// CHECK-NEXT: cir.func @_Z23unreachable_after_breakv() +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %1 = cir.const #true +// CHECK-NEXT: cir.condition(%1) +// CHECK-NEXT: } body { +// CHECK-NEXT: cir.break +// CHECK-NEXT: ^bb1: // no predecessors +// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +void unreachable_after_continue() { + for (;;) { + continue; + int x = 1; + } +} + +// CHECK-NEXT: cir.func @_Z26unreachable_after_continuev() +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %1 = cir.const #true +// CHECK-NEXT: cir.condition(%1) +// CHECK-NEXT: } body { +// CHECK-NEXT: cir.continue +// CHECK-NEXT: ^bb1: // no predecessors +// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/return.cpp b/clang/test/CIR/CodeGen/return.cpp index 54855b003d06..8391e647d46b 100644 --- a/clang/test/CIR/CodeGen/return.cpp +++ b/clang/test/CIR/CodeGen/return.cpp @@ -12,3 +12,22 @@ int &ret0(int &x) { // CHECK: cir.store %2, %1 : !cir.ptr, !cir.ptr> // CHECK: %3 = cir.load %1 : !cir.ptr>, !cir.ptr // CHECK: cir.return %3 : !cir.ptr + +int unreachable_after_return() { + return 0; + return 1; +} + +// CHECK: cir.func @_Z24unreachable_after_returnv +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr +// CHECK-NEXT: cir.br ^bb1 +// CHECK-NEXT: ^bb1: // 2 preds: ^bb0, ^bb2 +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i +// CHECK-NEXT: cir.return %2 : !s32i +// CHECK-NEXT: ^bb2: // no predecessors +// CHECK-NEXT: %3 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store %3, %0 : !s32i, !cir.ptr +// CHECK-NEXT: cir.br ^bb1 +// CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/switch-unreachable-after-break.cpp b/clang/test/CIR/CodeGen/switch-unreachable-after-break.cpp new file mode 100644 index 000000000000..762bf98c6adb --- /dev/null +++ b/clang/test/CIR/CodeGen/switch-unreachable-after-break.cpp @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +void unreachable_after_break(int a) { + switch(a) { + case 0: + break; + break; + int x = 1; + } +} + +int unreachable_after_return(int a) { + switch (a) { + case 0: + return 0; + return 1; + int x = 1; + } + return 2; +} diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index a19a55348ff6..367656d1965a 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -327,3 +327,23 @@ void fallthrough(int x) { // CHECK-NEXT: } // CHECK-NEXT: ] // CHECK-NEXT: } + +int unreachable_after_break_1(int a) { + switch (a) { + case(42): + break; + goto exit; + default: + return 0; + }; + +exit: + return -1; + +} +// CHECK: cir.func @_Z25unreachable_after_break_1i +// CHECK: case (equal, 42) { +// CHECK: cir.break +// CHECK: ^bb1: // no predecessors +// CHECK: cir.goto "exit" +// CHECK: } From 01e26827248e7150c8bd26eb446e4891d558fe7c Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 7 Jun 2024 07:43:30 +0800 Subject: [PATCH 1622/2301] [CIR][CIRGen][LowerToLLVM] Support address space casting (#652) * New `CastKind::addrspace_cast` for `cir.cast` * `TargetCIRGenInfo::performAddrSpaceCast` helper for non-constant values only * CIRGen for address space casting of pointers and references * Lowering to LLVM --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 11 ++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 +- clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 11 +++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 21 ++++++- clang/lib/CIR/CodeGen/TargetInfo.cpp | 11 ++++ clang/lib/CIR/CodeGen/TargetInfo.h | 13 +++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 12 +++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 8 +++ .../CIR/CodeGen/address-space-conversion.cpp | 57 +++++++++++++++++++ clang/test/CIR/IR/cast.cir | 9 +++ clang/test/CIR/IR/invalid.cir | 25 ++++++++ clang/test/CIR/Transforms/merge-cleanups.cir | 9 +++ 13 files changed, 188 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/address-space-conversion.cpp diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 9c2449f88189..9420b24e42e0 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -16,6 +16,7 @@ #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/IR/FPEnv.h" +#include "clang/CIR/MissingFeatures.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" @@ -328,6 +329,15 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createBitcast(src, getPointerTo(newPointeeTy)); } + mlir::Value createAddrSpaceCast(mlir::Location loc, mlir::Value src, + mlir::Type newTy) { + return createCast(loc, mlir::cir::CastKind::address_space, src, newTy); + } + + mlir::Value createAddrSpaceCast(mlir::Value src, mlir::Type newTy) { + return createAddrSpaceCast(src.getLoc(), src, newTy); + } + mlir::Value createPtrIsNull(mlir::Value ptr) { return createNot(createPtrToBoolCast(ptr)); } @@ -391,6 +401,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { // Creates constant nullptr for pointer type ty. mlir::cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { + assert(!MissingFeatures::targetCodeGenInfoGetNullPointer()); return create(loc, ty, getConstPtrAttr(ty, 0)); } diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 634265085535..30f623d4f931 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -69,6 +69,7 @@ def CK_FloatToBoolean : I32EnumAttrCase<"float_to_bool", 10>; def CK_BooleanToIntegral : I32EnumAttrCase<"bool_to_int", 11>; def CK_IntegralToFloat : I32EnumAttrCase<"int_to_float", 12>; def CK_BooleanToFloat : I32EnumAttrCase<"bool_to_float", 13>; +def CK_AddressSpaceConversion : I32EnumAttrCase<"address_space", 14>; def CastKind : I32EnumAttr< "CastKind", @@ -76,7 +77,8 @@ def CastKind : I32EnumAttr< [CK_IntegralToBoolean, CK_ArrayToPointerDecay, CK_IntegralCast, CK_BitCast, CK_FloatingCast, CK_PtrToBoolean, CK_FloatToIntegral, CK_IntegralToPointer, CK_PointerToIntegral, CK_FloatToBoolean, - CK_BooleanToIntegral, CK_IntegralToFloat, CK_BooleanToFloat]> { + CK_BooleanToIntegral, CK_IntegralToFloat, CK_BooleanToFloat, + CK_AddressSpaceConversion]> { let cppNamespace = "::mlir::cir"; } @@ -98,6 +100,7 @@ def CastOp : CIR_Op<"cast", [Pure]> { - `ptr_to_bool` - `bool_to_int` - `bool_to_float` + - `address_space` This is effectively a subset of the rules from `llvm-project/clang/include/clang/AST/OperationKinds.def`; but note that some diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 5b271bcc4d37..12255d409a75 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -155,6 +155,7 @@ struct MissingFeatures { static bool checkFunctionCallABI() { return false; } static bool zeroInitializer() { return false; } static bool targetCodeGenInfoIsProtoCallVariadic() { return false; } + static bool targetCodeGenInfoGetNullPointer() { return false; } static bool chainCalls() { return false; } static bool operandBundles() { return false; } static bool exceptions() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 5e597e620059..62640392d971 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -17,6 +17,7 @@ #include "CIRGenModule.h" #include "CIRGenOpenMPRuntime.h" #include "CIRGenValue.h" +#include "TargetInfo.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/GlobalDecl.h" @@ -1954,7 +1955,15 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { assert(0 && "NYI"); } case CK_AddressSpaceConversion: { - assert(0 && "NYI"); + LValue LV = buildLValue(E->getSubExpr()); + QualType DestTy = getContext().getPointerType(E->getType()); + mlir::Value V = getTargetHooks().performAddrSpaceCast( + *this, LV.getPointer(), E->getSubExpr()->getType().getAddressSpace(), + E->getType().getAddressSpace(), ConvertType(DestTy)); + assert(!MissingFeatures::tbaa()); + return makeAddrLValue(Address(V, getTypes().convertTypeForMem(E->getType()), + LV.getAddress().getAlignment()), + E->getType(), LV.getBaseInfo()); } case CK_ObjCObjectLValueCast: { assert(0 && "NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index d15a6a6a272a..85e795d79537 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -14,6 +14,7 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "CIRGenOpenMPRuntime.h" +#include "TargetInfo.h" #include "clang/CIR/MissingFeatures.h" #include "clang/AST/StmtVisitor.h" @@ -1514,8 +1515,24 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { return CGF.getBuilder().createBitcast(CGF.getLoc(E->getSourceRange()), Src, DstTy); } - case CK_AddressSpaceConversion: - llvm_unreachable("NYI"); + case CK_AddressSpaceConversion: { + Expr::EvalResult Result; + if (E->EvaluateAsRValue(Result, CGF.getContext()) && + Result.Val.isNullPointer()) { + // If E has side effect, it is emitted even if its final result is a + // null pointer. In that case, a DCE pass should be able to + // eliminate the useless instructions emitted during translating E. + if (Result.HasSideEffects) { + llvm_unreachable("NYI"); + } + return CGF.CGM.buildNullConstant(DestTy, CGF.getLoc(E->getExprLoc())); + } + // Since target may map different address spaces in AST to the same address + // space, an address space conversion may end up as a bitcast. + return CGF.CGM.getTargetCIRGenInfo().performAddrSpaceCast( + CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(), + DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy)); + } case CK_AtomicToNonAtomic: llvm_unreachable("NYI"); case CK_NonAtomicToAtomic: diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index dc5ab92b4121..121bc2f023c1 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -419,6 +419,17 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { return ABIArgInfo::getDirect(ResType); } +mlir::Value TargetCIRGenInfo::performAddrSpaceCast( + CIRGenFunction &CGF, mlir::Value Src, clang::LangAS SrcAddr, + clang::LangAS DestAddr, mlir::Type DestTy, bool IsNonNull) const { + // Since target may map different address spaces in AST to the same address + // space, an address space conversion may end up as a bitcast. + if (auto globalOp = Src.getDefiningOp()) + llvm_unreachable("Global ops addrspace cast NYI"); + // Try to preserve the source's name to make IR more readable. + return CGF.getBuilder().createAddrSpaceCast(Src, DestTy); +} + const TargetCIRGenInfo &CIRGenModule::getTargetCIRGenInfo() { if (TheTargetCIRGenInfo) return *TheTargetCIRGenInfo; diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index e4fee4f2c330..ca84cac72ed8 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -23,6 +23,7 @@ namespace cir { class CIRGenFunction; +class CIRGenModule; /// This class organizes various target-specific codegeneration issues, like /// target-specific attributes, builtins and so on. @@ -65,6 +66,18 @@ class TargetCIRGenInfo { return clang::LangAS::Default; } + /// Perform address space cast of an expression of pointer type. + /// \param V is the value to be casted to another address space. + /// \param SrcAddr is the language address space of \p V. + /// \param DestAddr is the targeted language address space. + /// \param DestTy is the destination pointer type. + /// \param IsNonNull is the flag indicating \p V is known to be non null. + virtual mlir::Value performAddrSpaceCast(CIRGenFunction &CGF, mlir::Value V, + clang::LangAS SrcAddr, + clang::LangAS DestAddr, + mlir::Type DestTy, + bool IsNonNull = false) const; + virtual ~TargetCIRGenInfo() {} }; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index af6edc6704da..6249f3f5ffd0 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -496,6 +496,15 @@ LogicalResult CastOp::verify() { return emitOpError() << "requires !cir.float type for result"; return success(); } + case cir::CastKind::address_space: { + auto srcPtrTy = srcType.dyn_cast(); + auto resPtrTy = resType.dyn_cast(); + if (!srcPtrTy || !resPtrTy) + return emitOpError() << "requires !cir.ptr type for source and result"; + if (srcPtrTy.getPointee() != resPtrTy.getPointee()) + return emitOpError() << "requires two types differ in addrspace only"; + return success(); + } } llvm_unreachable("Unknown CastOp kind?"); @@ -514,7 +523,8 @@ OpFoldResult CastOp::fold(FoldAdaptor adaptor) { return foldResults[0].get(); return {}; } - case mlir::cir::CastKind::bitcast: { + case mlir::cir::CastKind::bitcast: + case mlir::cir::CastKind::address_space: { return getSrc(); } default: diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5663014641ce..214837ec4722 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -753,6 +753,14 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { mlir::cir::CmpOpKind::ne, castOp.getSrc(), null); break; } + case mlir::cir::CastKind::address_space: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp( + castOp, llvmDstTy, llvmSrcVal); + break; + } } return mlir::success(); diff --git a/clang/test/CIR/CodeGen/address-space-conversion.cpp b/clang/test/CIR/CodeGen/address-space-conversion.cpp new file mode 100644 index 000000000000..84adaa59ac51 --- /dev/null +++ b/clang/test/CIR/CodeGen/address-space-conversion.cpp @@ -0,0 +1,57 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +using pi1_t = int __attribute__((address_space(1))) *; +using pi2_t = int __attribute__((address_space(2))) *; + +using ri1_t = int __attribute__((address_space(1))) &; +using ri2_t = int __attribute__((address_space(2))) &; + +// CIR: cir.func @{{.*test_ptr.*}} +// LLVM: define void @{{.*test_ptr.*}} +void test_ptr() { + pi1_t ptr1; + pi2_t ptr2 = (pi2_t)ptr1; + // CIR: %[[#PTR1:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.ptr + // CIR-NEXT: %[[#CAST:]] = cir.cast(address_space, %[[#PTR1]] : !cir.ptr), !cir.ptr + // CIR-NEXT: cir.store %[[#CAST]], %{{[0-9]+}} : !cir.ptr, !cir.ptr> + + // LLVM: %[[#PTR1:]] = load ptr addrspace(1), ptr %{{[0-9]+}}, align 8 + // LLVM-NEXT: %[[#CAST:]] = addrspacecast ptr addrspace(1) %[[#PTR1]] to ptr addrspace(2) + // LLVM-NEXT: store ptr addrspace(2) %[[#CAST]], ptr %{{[0-9]+}}, align 8 +} + +// CIR: cir.func @{{.*test_ref.*}} +// LLVM: define void @{{.*test_ref.*}} +void test_ref() { + pi1_t ptr; + ri1_t ref1 = *ptr; + ri2_t ref2 = (ri2_t)ref1; + // CIR: %[[#DEREF:]] = cir.load deref %{{[0-9]+}} : !cir.ptr>, !cir.ptr + // CIR-NEXT: cir.store %[[#DEREF]], %[[#ALLOCAREF1:]] : !cir.ptr, !cir.ptr> + // CIR-NEXT: %[[#REF1:]] = cir.load %[[#ALLOCAREF1]] : !cir.ptr>, !cir.ptr + // CIR-NEXT: %[[#CAST:]] = cir.cast(address_space, %[[#REF1]] : !cir.ptr), !cir.ptr + // CIR-NEXT: cir.store %[[#CAST]], %{{[0-9]+}} : !cir.ptr, !cir.ptr> + + // LLVM: %[[#DEREF:]] = load ptr addrspace(1), ptr %{{[0-9]+}}, align 8 + // LLVM-NEXT: store ptr addrspace(1) %[[#DEREF]], ptr %[[#ALLOCAREF1:]], align 8 + // LLVM-NEXT: %[[#REF1:]] = load ptr addrspace(1), ptr %[[#ALLOCAREF1]], align 8 + // LLVM-NEXT: %[[#CAST:]] = addrspacecast ptr addrspace(1) %[[#REF1]] to ptr addrspace(2) + // LLVM-NEXT: store ptr addrspace(2) %[[#CAST]], ptr %{{[0-9]+}}, align 8 +} + +// CIR: cir.func @{{.*test_nullptr.*}} +// LLVM: define void @{{.*test_nullptr.*}} +void test_nullptr() { + constexpr pi1_t null1 = nullptr; + pi2_t ptr = (pi2_t)null1; + // CIR: %[[#NULL1:]] = cir.const #cir.ptr : !cir.ptr + // CIR-NEXT: cir.store %[[#NULL1]], %{{[0-9]+}} : !cir.ptr, !cir.ptr> + // CIR-NEXT: %[[#NULL2:]] = cir.const #cir.ptr : !cir.ptr + // CIR-NEXT: cir.store %[[#NULL2]], %{{[0-9]+}} : !cir.ptr, !cir.ptr> + + // LLVM: store ptr addrspace(1) null, ptr %{{[0-9]+}}, align 8 + // LLVM-NEXT: store ptr addrspace(2) null, ptr %{{[0-9]+}}, align 8 +} diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index 986ed13a906e..6511529b32ee 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -15,10 +15,19 @@ module { %2 = cir.cast(bitcast, %p : !cir.ptr), !cir.ptr cir.return } + + cir.func @addrspace_cast(%arg0: !cir.ptr) { + %0 = cir.cast(address_space, %arg0 : !cir.ptr), !cir.ptr + cir.return + } } // CHECK: cir.func @yolo(%arg0: !s32i) // CHECK: %1 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool // CHECK: %2 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr + // CHECK: cir.func @bitcast // CHECK: %0 = cir.cast(bitcast, %arg0 : !cir.ptr), !cir.ptr + +// CHECK: cir.func @addrspace_cast +// CHECK: %0 = cir.cast(address_space, %arg0 : !cir.ptr), !cir.ptr diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 2ed718d6176f..63a6fd97b4c9 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -300,6 +300,31 @@ cir.func @cast24(%p : !u32i) { // ----- +!u32i = !cir.int +!u64i = !cir.int +cir.func @cast25(%p : !cir.ptr) { + %0 = cir.cast(address_space, %p : !cir.ptr), !cir.ptr // expected-error {{requires two types differ in addrspace only}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @cast26(%p : !cir.ptr) { + %0 = cir.cast(address_space, %p : !cir.ptr), !u64i // expected-error {{requires !cir.ptr type for source and result}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @cast27(%p : !u64i) { + %0 = cir.cast(address_space, %p : !u64i), !cir.ptr // expected-error {{requires !cir.ptr type for source and result}} + cir.return +} + +// ----- + !u32i = !cir.int !u8i = !cir.int module { diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index f6def4d34107..9a87ad4b0968 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -126,4 +126,13 @@ module { cir.return %0 : !cir.ptr } + // Should remove redundant address space casts. + // CHECK-LABEL: @addrspacecastfold + // CHECK: %[[ARG0:.+]]: !cir.ptr + // CHECK: cir.return %[[ARG0]] : !cir.ptr + cir.func @addrspacecastfold(%arg0: !cir.ptr) -> !cir.ptr { + %0 = cir.cast(address_space, %arg0: !cir.ptr), !cir.ptr + cir.return %0 : !cir.ptr + } + } From e4663dfdc0f3b29d06770d12973338ab29c6dec5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 6 Jun 2024 18:05:27 -0700 Subject: [PATCH 1623/2301] [CIR][CIRGen] Fix static init for unions --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 11 +++++------ clang/test/CIR/CodeGen/stmtexpr-init.c | 3 ++- clang/test/CIR/CodeGen/union-init.c | 23 +++++++++++++++++++---- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 335d289cbcf5..87878ad1c723 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -631,6 +631,9 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, if (D.getType()->isVariablyModifiedType()) llvm_unreachable("VLAs are NYI"); + // Save the type in case adding the initializer forces a type change. + auto expectedType = addr.getType(); + auto var = globalOp; // CUDA's local and local static __shared__ variables should not @@ -671,12 +674,8 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, // // FIXME: It is really dangerous to store this in the map; if anyone // RAUW's the GV uses of this constant will be invalid. - // - // Since in CIR the address materialization is done over cir.get_global - // and that's already updated, update the map directly instead of using - // casts. - LocalDeclMap.find(&D)->second = - Address(getAddrOp.getAddr(), elemTy, alignment); + auto castedAddr = builder.createBitcast(getAddrOp.getAddr(), expectedType); + LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment); CGM.setStaticLocalDeclAddress(&D, var); assert(!MissingFeatures::reportGlobalToASan()); diff --git a/clang/test/CIR/CodeGen/stmtexpr-init.c b/clang/test/CIR/CodeGen/stmtexpr-init.c index d073335d7dcd..7a38f3ac4b8c 100644 --- a/clang/test/CIR/CodeGen/stmtexpr-init.c +++ b/clang/test/CIR/CodeGen/stmtexpr-init.c @@ -41,7 +41,8 @@ void T2(void) { // LLVM-DAG: internal constant { i32, [3 x i32] } { i32 3, [3 x i32] [i32 10, i32 20, i32 30] } struct outer X = {ARRAY_PTR(10, 20, 30)}; - // CIR-DAG: cir.cast(bitcast, %1 : !cir.ptr>), !cir.ptr> + // CIR-DAG: %[[T2A:.*]] = cir.get_global @T2._a : !cir.ptr + // CIR-DAG: cir.cast(bitcast, %[[T2A]] : !cir.ptr), !cir.ptr escape(A); escape(&X); } diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index 7e756392ee62..4918fcdf71ed 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -12,9 +12,11 @@ void foo(int x) { A a = {.x = x}; } -// CHECK: ![[anon:.*]] = !cir.struct -// CHECK: #[[bfi_x:.*]] = #cir.bitfield_info -// CHECK: #[[bfi_y:.*]] = #cir.bitfield_info +// CHECK-DAG: ![[anon0:.*]] = !cir.struct +// CHECK-DAG: ![[anon:.*]] = !cir.struct +// CHECK-DAG: #[[bfi_x:.*]] = #cir.bitfield_info +// CHECK-DAG: #[[bfi_y:.*]] = #cir.bitfield_info +// CHECK-DAG: ![[anon1:.*]] = !cir.struct} // CHECK-LABEL: cir.func @foo( // CHECK: %[[VAL_1:.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} @@ -30,4 +32,17 @@ void foo(int x) { // CHECK: cir.return union { int i; float f; } u = { }; -// CHECK: cir.global external @u = #cir.zero : ![[anon]] \ No newline at end of file +// CHECK: cir.global external @u = #cir.zero : ![[anon]] + +unsigned is_little(void) { + const union { + unsigned int u; + unsigned char c[4]; + } one = {1}; + return one.c[0]; +} + +// CHECK: cir.func @is_little +// CHECK: %[[VAL_1:.*]] = cir.get_global @is_little.one : !cir.ptr +// CHECK: %[[VAL_2:.*]] = cir.cast(bitcast, %[[VAL_1]] : !cir.ptr), !cir.ptr +// CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][1] {name = "c"} : !cir.ptr -> !cir.ptr> \ No newline at end of file From 49203df2d87278725cd7c899a0857195621f13d3 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 7 Jun 2024 20:18:39 +0300 Subject: [PATCH 1624/2301] [CIR][CodeGen] builtins: adds __sync_bool/val_compare_and_swap (#656) This PR adds support for ` __sync_bool_compare_and_swap` and ` __sync_val_compare_and_swap`. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 37 ++++++++++ clang/test/CIR/CodeGen/atomic.cpp | 92 +++++++++++++++++++++++++ 2 files changed, 129 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 0c1196a4cb8d..2bdc7ed40633 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -245,6 +245,29 @@ static RValue buildBinaryAtomic(CIRGenFunction &CGF, return RValue::get(makeBinaryAtomicValue(CGF, kind, E)); } +static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, + const CallExpr *expr, + bool returnBool) { + QualType typ = returnBool ? expr->getArg(1)->getType() : expr->getType(); + Address destAddr = checkAtomicAlignment(cgf, expr); + auto &builder = cgf.getBuilder(); + + auto intType = builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); + auto cmpVal = cgf.buildScalarExpr(expr->getArg(1)); + auto valueType = cmpVal.getType(); + cmpVal = buildToInt(cgf, cmpVal, typ, intType); + auto newVal = + buildToInt(cgf, cgf.buildScalarExpr(expr->getArg(2)), typ, intType); + + auto op = builder.create( + cgf.getLoc(expr->getSourceRange()), cmpVal.getType(), builder.getBoolTy(), + destAddr.getPointer(), cmpVal, newVal, + mlir::cir::MemOrder::SequentiallyConsistent, + mlir::cir::MemOrder::SequentiallyConsistent); + + return returnBool ? op.getResult(1) : op.getResult(0); +} + RValue CIRGenFunction::buildRotate(const CallExpr *E, bool IsRotateRight) { auto src = buildScalarExpr(E->getArg(0)); auto shiftAmt = buildScalarExpr(E->getArg(1)); @@ -997,6 +1020,20 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return buildBinaryAtomic(*this, mlir::cir::AtomicFetchKind::Add, E); } + case Builtin::BI__sync_val_compare_and_swap_1: + case Builtin::BI__sync_val_compare_and_swap_2: + case Builtin::BI__sync_val_compare_and_swap_4: + case Builtin::BI__sync_val_compare_and_swap_8: + case Builtin::BI__sync_val_compare_and_swap_16: + return RValue::get(MakeAtomicCmpXchgValue(*this, E, false)); + + case Builtin::BI__sync_bool_compare_and_swap_1: + case Builtin::BI__sync_bool_compare_and_swap_2: + case Builtin::BI__sync_bool_compare_and_swap_4: + case Builtin::BI__sync_bool_compare_and_swap_8: + case Builtin::BI__sync_bool_compare_and_swap_16: + return RValue::get(MakeAtomicCmpXchgValue(*this, E, true)); + case Builtin::BI__builtin_add_overflow: case Builtin::BI__builtin_sub_overflow: case Builtin::BI__builtin_mul_overflow: { diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 9afda8ba5120..0b64cda23446 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -378,4 +378,96 @@ void inc_short(short* a, short b) { // LLVM: atomicrmw add ptr {{.*}}, i8 {{.*}} seq_cst, align 1 void inc_byte(char* a, char b) { char c = __sync_fetch_and_add(a, b); +} + + +// CHECK-LABEL: @_Z12cmp_bool_int +// CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// CHECK: %[[CMP:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP]] : !s32i, %[[UPD]] : !s32i, success = seq_cst, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: cir.store %[[RES]], {{.*}} : !cir.bool, !cir.ptr + +// LLVM-LABEL: @_Z12cmp_bool_int +// LLVM: %[[PTR:.*]] = load ptr +// LLVM: %[[CMP:.*]] = load i32 +// LLVM: %[[UPD:.*]] = load i32 +// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst +// LLVM: %[[TMP:.*]] = extractvalue { i32, i1 } %[[RES]], 1 +// LLVM: %[[EXT:.*]] = zext i1 %[[TMP]] to i8 +// LLVM: store i8 %[[EXT]], ptr {{.*}} +void cmp_bool_int(int* p, int x, int u) { + bool r = __sync_bool_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z13cmp_bool_long +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s64i, {{.*}} : !s64i, success = seq_cst, failure = seq_cst) : (!s64i, !cir.bool) + +// LLVM-LABEL: @_Z13cmp_bool_long +// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst +void cmp_bool_long(long* p, long x, long u) { + bool r = __sync_bool_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z14cmp_bool_short +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s16i, {{.*}} : !s16i, success = seq_cst, failure = seq_cst) : (!s16i, !cir.bool) + +// LLVM-LABEL: @_Z14cmp_bool_short +// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst +void cmp_bool_short(short* p, short x, short u) { + bool r = __sync_bool_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z13cmp_bool_byte +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s8i, {{.*}} : !s8i, success = seq_cst, failure = seq_cst) : (!s8i, !cir.bool) + +// LLVM-LABEL: @_Z13cmp_bool_byte +// LLVM: cmpxchg ptr {{.*}}, i8 {{.*}}, i8 {{.*}} seq_cst seq_cst +void cmp_bool_byte(char* p, char x, char u) { + bool r = __sync_bool_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z11cmp_val_int +// CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// CHECK: %[[CMP:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP]] : !s32i, %[[UPD]] : !s32i, success = seq_cst, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: cir.store %[[OLD]], {{.*}} : !s32i, !cir.ptr + +// LLVM-LABEL: @_Z11cmp_val_int +// LLVM: %[[PTR:.*]] = load ptr +// LLVM: %[[CMP:.*]] = load i32 +// LLVM: %[[UPD:.*]] = load i32 +// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst +// LLVM: %[[TMP:.*]] = extractvalue { i32, i1 } %[[RES]], 0 +// LLVM: store i32 %[[TMP]], ptr {{.*}} +void cmp_val_int(int* p, int x, int u) { + int r = __sync_val_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z12cmp_val_long +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s64i, {{.*}} : !s64i, success = seq_cst, failure = seq_cst) : (!s64i, !cir.bool) + +// LLVM-LABEL: @_Z12cmp_val_long +// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst +void cmp_val_long(long* p, long x, long u) { + long r = __sync_val_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z13cmp_val_short +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s16i, {{.*}} : !s16i, success = seq_cst, failure = seq_cst) : (!s16i, !cir.bool) + +// LLVM-LABEL: @_Z13cmp_val_short +// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst +void cmp_val_short(short* p, short x, short u) { + short r = __sync_val_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z12cmp_val_byte +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s8i, {{.*}} : !s8i, success = seq_cst, failure = seq_cst) : (!s8i, !cir.bool) + +// LLVM-LABEL: @_Z12cmp_val_byte +// LLVM: cmpxchg ptr {{.*}}, i8 {{.*}}, i8 {{.*}} seq_cst seq_cst +void cmp_val_byte(char* p, char x, char u) { + char r = __sync_val_compare_and_swap(p, x, u); } \ No newline at end of file From ed45ed24f0fe1b0a25fbbd9948106b2abe13bdce Mon Sep 17 00:00:00 2001 From: GaoXiangYa <168072492+GaoXiangYa@users.noreply.github.com> Date: Sat, 8 Jun 2024 06:36:25 +0800 Subject: [PATCH 1625/2301] [CIR][ThroughMLIR] Support lowering cir.condition and cir.while to scf.condition, scf.while (#636) This pr intruduces CIRConditionLowering and CIRWhileLowering for lowering to scf. --- .../ThroughMLIR/LowerCIRLoopToSCF.cpp | 69 ++++++++++++++++++- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 10 ++- clang/test/CIR/Lowering/ThroughMLIR/while.c | 35 ++++++++++ 3 files changed, 109 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/while.c diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp index 055f97c63b3e..41311c1408e4 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp @@ -24,6 +24,7 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/LowerToMLIR.h" #include "clang/CIR/Passes.h" +#include "llvm/ADT/TypeSwitch.h" using namespace cir; using namespace llvm; @@ -55,6 +56,19 @@ class SCFLoop { int64_t step = 0; }; +class SCFWhileLoop { +public: + SCFWhileLoop(mlir::cir::WhileOp op, mlir::cir::WhileOp::Adaptor adaptor, + mlir::ConversionPatternRewriter *rewriter) + : whileOp(op), adaptor(adaptor), rewriter(rewriter) {} + void transferToSCFWhileOp(); + +private: + mlir::cir::WhileOp whileOp; + mlir::cir::WhileOp::Adaptor adaptor; + mlir::ConversionPatternRewriter *rewriter; +}; + static int64_t getConstant(mlir::cir::ConstantOp op) { auto attr = op->getAttrs().front().getValue(); const auto IntAttr = attr.dyn_cast(); @@ -233,6 +247,20 @@ void SCFLoop::transferToSCFForOp() { }); } +void SCFWhileLoop::transferToSCFWhileOp() { + auto scfWhileOp = rewriter->create( + whileOp->getLoc(), whileOp->getResultTypes(), adaptor.getOperands()); + rewriter->createBlock(&scfWhileOp.getBefore()); + rewriter->createBlock(&scfWhileOp.getAfter()); + + rewriter->cloneRegionBefore(whileOp.getCond(), + &scfWhileOp.getBefore().back()); + rewriter->eraseBlock(&scfWhileOp.getBefore().back()); + + rewriter->cloneRegionBefore(whileOp.getBody(), &scfWhileOp.getAfter().back()); + rewriter->eraseBlock(&scfWhileOp.getAfter().back()); +} + class CIRForOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -248,9 +276,46 @@ class CIRForOpLowering : public mlir::OpConversionPattern { } }; +class CIRWhileOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::WhileOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + SCFWhileLoop loop(op, adaptor, &rewriter); + loop.transferToSCFWhileOp(); + rewriter.eraseOp(op); + return mlir::success(); + } +}; + +class CIRConditionOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + mlir::LogicalResult + matchAndRewrite(mlir::cir::ConditionOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto *parentOp = op->getParentOp(); + return llvm::TypeSwitch(parentOp) + .Case([&](auto) { + auto condition = adaptor.getCondition(); + auto i1Condition = rewriter.create( + op.getLoc(), rewriter.getI1Type(), condition); + rewriter.replaceOpWithNewOp( + op, i1Condition, parentOp->getOperands()); + return mlir::success(); + }) + .Default([](auto) { return mlir::failure(); }); + } +}; + void populateCIRLoopToSCFConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add(converter, patterns.getContext()); + patterns.add( + converter, patterns.getContext()); } -} // namespace cir +} // namespace cir \ No newline at end of file diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 201c8c65f717..f5395dc984a2 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -31,6 +31,10 @@ #include "mlir/Dialect/SCF/Transforms/Passes.h" #include "mlir/IR/BuiltinDialect.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Operation.h" +#include "mlir/IR/Region.h" +#include "mlir/IR/TypeRange.h" +#include "mlir/IR/ValueRange.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Support/LogicalResult.h" @@ -43,7 +47,9 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/LowerToMLIR.h" #include "clang/CIR/Passes.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Sequence.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TypeSwitch.h" using namespace cir; @@ -558,7 +564,6 @@ class CIRFuncOpLowering : public mlir::OpConversionPattern { return mlir::failure(); rewriter.eraseOp(op); - return mlir::LogicalResult::success(); } }; @@ -883,7 +888,6 @@ class CIRScopeOpLowering if (mlir::failed(getTypeConverter()->convertTypes(scopeOp->getResultTypes(), mlirResultTypes))) return mlir::LogicalResult::failure(); - rewriter.setInsertionPoint(scopeOp); auto newScopeOp = rewriter.create( scopeOp.getLoc(), mlirResultTypes); @@ -956,7 +960,7 @@ class CIRYieldOpLowering mlir::ConversionPatternRewriter &rewriter) const override { auto *parentOp = op->getParentOp(); return llvm::TypeSwitch(parentOp) - .Case([&](auto) { + .Case([&](auto) { rewriter.replaceOpWithNewOp( op, adaptor.getOperands()); return mlir::success(); diff --git a/clang/test/CIR/Lowering/ThroughMLIR/while.c b/clang/test/CIR/Lowering/ThroughMLIR/while.c new file mode 100644 index 000000000000..df459fd2c27a --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/while.c @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +void foo() { + int a = 0; + while(a < 2) { + a++; + } +} + +//CHECK: func.func @foo() { +//CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +//CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref +//CHECK: memref.alloca_scope { +//CHECK: scf.while : () -> () { +//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi ult, %[[ZERO:.+]], %[[C2_I32]] : i32 +//CHECK: %[[TWO:.+]] = arith.extui %[[ONE:.+]] : i1 to i32 +//CHECK: %[[C0_I32_0:.+]] = arith.constant 0 : i32 +//CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO:.+]], %[[C0_I32_0]] : i32 +//CHECK: %[[FOUR:.+]] = arith.extui %[[THREE:.+]] : i1 to i8 +//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR:.+]] : i8 to i1 +//CHECK: scf.condition(%[[FIVE]]) +//CHECK: } do { +//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: %[[ONE:.+]] = arith.addi %0, %[[C1_I32:.+]] : i32 +//CHECK: memref.store %[[ONE:.+]], %[[alloca]][] : memref +//CHECK: scf.yield +//CHECK: } +//CHECK: } +//CHECK: return +//CHECK: } \ No newline at end of file From 6404ff3c8a0b74f784e4244b7dfacd80ad366e9c Mon Sep 17 00:00:00 2001 From: GaoXiangYa <168072492+GaoXiangYa@users.noreply.github.com> Date: Sat, 8 Jun 2024 06:39:40 +0800 Subject: [PATCH 1626/2301] [CIR][ThroughMLIR] Support lowering cir.if to scf.if (#640) This pr introduces CIRIfOpLowering for lowering cir.if to scf.if --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 29 +++- clang/test/CIR/Lowering/ThroughMLIR/if.c | 137 ++++++++++++++++++ 2 files changed, 164 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/if.c diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index f5395dc984a2..07d3db7b47ce 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -969,6 +969,31 @@ class CIRYieldOpLowering } }; +class CIRIfOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::IfOp ifop, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto condition = adaptor.getCondition(); + auto i1Condition = rewriter.create( + ifop->getLoc(), rewriter.getI1Type(), condition); + auto newIfOp = rewriter.create( + ifop->getLoc(), ifop->getResultTypes(), i1Condition); + auto *thenBlock = rewriter.createBlock(&newIfOp.getThenRegion()); + rewriter.inlineBlockBefore(&ifop.getThenRegion().front(), thenBlock, + thenBlock->end()); + if (!ifop.getElseRegion().empty()) { + auto *elseBlock = rewriter.createBlock(&newIfOp.getElseRegion()); + rewriter.inlineBlockBefore(&ifop.getElseRegion().front(), elseBlock, + elseBlock->end()); + } + rewriter.replaceOp(ifop, newIfOp); + return mlir::success(); + } +}; + class CIRGlobalOpLowering : public mlir::OpConversionPattern { public: @@ -1272,8 +1297,8 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, CIRLogOpLowering, CIRRoundOpLowering, CIRPtrStrideOpLowering, CIRSinOpLowering, CIRShiftOpLowering, CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitPopcountOpLowering, CIRBitClrsbOpLowering, - CIRBitFfsOpLowering, CIRBitParityOpLowering>(converter, - patterns.getContext()); + CIRBitFfsOpLowering, CIRBitParityOpLowering, CIRIfOpLowering>( + converter, patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/if.c b/clang/test/CIR/Lowering/ThroughMLIR/if.c new file mode 100644 index 000000000000..5783cec9fe1d --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/if.c @@ -0,0 +1,137 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +void foo() { + int a = 2; + int b = 0; + if (a > 0) { + b++; + } else { + b--; + } +} + +//CHECK: func.func @foo() { +//CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +//CHECK: memref.store %[[C2_I32]], %[[alloca]][] : memref +//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +//CHECK: memref.store %[[C0_I32]], %[[alloca_0]][] : memref +//CHECK: memref.alloca_scope { +//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi ugt, %[[ZERO]], %[[C0_I32_1]] : i32 +//CHECK: %[[TWO:.+]] = arith.extui %[[ONE]] : i1 to i32 +//CHECK: %[[C0_I32_2:.+]] = arith.constant 0 : i32 +//CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO]], %[[C0_I32_2]] : i32 +//CHECK: %[[FOUR:.+]] = arith.extui %[[THREE]] : i1 to i8 +//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR]] : i8 to i1 +//CHECK: scf.if %[[FIVE]] { +//CHECK: %[[SIX:.+]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: %[[SEVEN:.+]] = arith.addi %[[SIX]], %[[C1_I32]] : i32 +//CHECK: memref.store %[[SEVEN]], %[[alloca_0]][] : memref +//CHECK: } else { +//CHECK: %[[SIX:.+]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: %[[SEVEN:.+]] = arith.subi %[[SIX]], %[[C1_I32]] : i32 +//CHECK: memref.store %[[SEVEN]], %[[alloca_0]][] : memref +//CHECK: } +//CHECK: } +//CHECK: return +//CHECK: } + +void foo2() { + int a = 2; + int b = 0; + if (a < 3) { + b++; + } +} + +//CHECK: func.func @foo2() { +//CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +//CHECK: memref.store %[[C2_I32]], %[[alloca]][] : memref +//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +//CHECK: memref.store %[[C0_I32]], %[[alloca_0]][] : memref +//CHECK: memref.alloca_scope { +//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C3_I32:.+]] = arith.constant 3 : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi ult, %[[ZERO]], %[[C3_I32]] : i32 +//CHECK: %[[TWO:.+]] = arith.extui %[[ONE]] : i1 to i32 +//CHECK: %[[C0_I32_1]] = arith.constant 0 : i32 +//CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO]], %[[C0_I32_1]] : i32 +//CHECK: %[[FOUR:.+]] = arith.extui %[[THREE]] : i1 to i8 +//CHECK: %[[FIVE]] = arith.trunci %[[FOUR]] : i8 to i1 +//CHECK: scf.if %[[FIVE]] { +//CHECK: %[[SIX:.+]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: %[[SEVEN:.+]] = arith.addi %[[SIX]], %[[C1_I32]] : i32 +//CHECK: memref.store %[[SEVEN]], %[[alloca_0]][] : memref +//CHECK: } +//CHECK: } +//CHECK: return +//CHECK: } + +void foo3() { + int a = 2; + int b = 0; + if (a < 3) { + int c = 1; + if (c > 2) { + b++; + } else { + b--; + } + } +} + + +//CHECK: func.func @foo3() { +//CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +//CHECK: memref.store %[[C2_I32]], %[[alloca]][] : memref +//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +//CHECK: memref.store %[[C0_I32]], %[[alloca_0]][] : memref +//CHECK: memref.alloca_scope { +//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C3_I32:.+]] = arith.constant 3 : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi ult, %[[ZERO]], %[[C3_I32]] : i32 +//CHECK: %[[TWO:.+]] = arith.extui %[[ONE]] : i1 to i32 +//CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 +//CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO:.+]], %[[C0_I32_1]] : i32 +//CHECK: %[[FOUR:.+]] = arith.extui %[[THREE]] : i1 to i8 +//CHECK: %[[FIVE]] = arith.trunci %[[FOUR]] : i8 to i1 +//CHECK: scf.if %[[FIVE]] { +//CHECK: %[[alloca_2:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: memref.store %[[C1_I32]], %[[alloca_2]][] : memref +//CHECK: memref.alloca_scope { +//CHECK: %[[SIX:.+]] = memref.load %[[alloca_2]][] : memref +//CHECK: %[[C2_I32_3:.+]] = arith.constant 2 : i32 +//CHECK: %[[SEVEN:.+]] = arith.cmpi ugt, %[[SIX]], %[[C2_I32_3]] : i32 +//CHECK: %[[EIGHT:.+]] = arith.extui %[[SEVEN]] : i1 to i32 +//CHECK: %[[C0_I32_4:.+]] = arith.constant 0 : i32 +//CHECK: %[[NINE:.+]] = arith.cmpi ne, %[[EIGHT]], %[[C0_I32_4]] : i32 +//CHECK: %[[TEN:.+]] = arith.extui %[[NINE]] : i1 to i8 +//CHECK: %[[ELEVEN:.+]] = arith.trunci %[[TEN]] : i8 to i1 +//CHECK: scf.if %[[ELEVEN]] { +//CHECK: %[[TWELVE:.+]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32_5:.+]] = arith.constant 1 : i32 +//CHECK: %[[THIRTEEN:.+]] = arith.addi %[[TWELVE]], %[[C1_I32_5]] : i32 +//CHECK: memref.store %[[THIRTEEN]], %[[alloca_0]][] : memref +//CHECK: } else { +//CHECK: %[[TWELVE:.+]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32_5:.+]] = arith.constant 1 : i32 +//CHECK: %[[THIRTEEN:.+]] = arith.subi %[[TWELVE]], %[[C1_I32_5]] : i32 +//CHECK: memref.store %[[THIRTEEN]], %[[alloca_0]][] : memref +//CHECK: } +//CHECK: } +//CHECK: } +//CHECK: } +//CHECK: return +//CHECK: } From c468b8673f38b18f2377f34a1a002e0a9a8d6d2b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 7 Jun 2024 16:54:42 -0700 Subject: [PATCH 1627/2301] [CIR][Transforms] Fix flattening for TernaryOp to properly handle locations --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 10 ++++++--- clang/test/CIR/Transforms/ternary.cir | 21 +++++++++++++++++++ 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 285f1a9bfe30..ba489d3076ee 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -438,9 +438,13 @@ class CIRTernaryOpFlattening auto *condBlock = rewriter.getInsertionBlock(); auto opPosition = rewriter.getInsertionPoint(); auto *remainingOpsBlock = rewriter.splitBlock(condBlock, opPosition); - auto *continueBlock = rewriter.createBlock( - remainingOpsBlock, op->getResultTypes(), - SmallVector(/* result number always 1 */ 1, loc)); + SmallVector locs; + // Ternary result is optional, make sure to populate the location only + // when relevant. + if (op->getResultTypes().size()) + locs.push_back(loc); + auto *continueBlock = + rewriter.createBlock(remainingOpsBlock, op->getResultTypes(), locs); rewriter.create(loc, remainingOpsBlock); auto &trueRegion = op.getTrueRegion(); diff --git a/clang/test/CIR/Transforms/ternary.cir b/clang/test/CIR/Transforms/ternary.cir index fedfbcbbc5ea..67ef7f95a6b5 100644 --- a/clang/test/CIR/Transforms/ternary.cir +++ b/clang/test/CIR/Transforms/ternary.cir @@ -44,4 +44,25 @@ module { // CHECK: cir.return %8 : !s32i // CHECK: } + cir.func @foo2(%arg0: !cir.bool) { + cir.ternary(%arg0, true { + cir.yield + }, false { + cir.yield + }) : (!cir.bool) -> () + cir.return + } + +// CHECK: cir.func @foo2(%arg0: !cir.bool) { +// CHECK: cir.brcond %arg0 ^bb1, ^bb2 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: cir.br ^bb3 +// CHECK: ^bb2: // pred: ^bb0 +// CHECK: cir.br ^bb3 +// CHECK: ^bb3: // 2 preds: ^bb1, ^bb2 +// CHECK: cir.br ^bb4 +// CHECK: ^bb4: // pred: ^bb3 +// CHECK: cir.return +// CHECK: } + } From 85c10adc27413ec785081de5e988ed31b2997258 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 7 Jun 2024 17:39:21 -0700 Subject: [PATCH 1628/2301] [CIR][LowerToLLVM] Fix crash in PtrStrideOp lowering Assumptions about values having a defining op can be misleading when block arguments are involved. --- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 14 +++++++------- clang/test/CIR/Lowering/ptrstride.cir | 9 +++++++++ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 214837ec4722..7377820a29fd 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -499,21 +499,21 @@ class CIRPtrStrideOpLowering // Zero-extend, sign-extend or trunc the pointer value. auto index = adaptor.getStride(); auto width = index.getType().cast().getWidth(); - mlir::DataLayout LLVMLayout( - index.getDefiningOp()->getParentOfType()); + mlir::DataLayout LLVMLayout(ptrStrideOp->getParentOfType()); auto layoutWidth = LLVMLayout.getTypeIndexBitwidth(adaptor.getBase().getType()); - if (layoutWidth && width != *layoutWidth) { + auto indexOp = index.getDefiningOp(); + if (indexOp && layoutWidth && width != *layoutWidth) { // If the index comes from a subtraction, make sure the extension happens // before it. To achieve that, look at unary minus, which already got // lowered to "sub 0, x". - auto sub = dyn_cast(index.getDefiningOp()); - auto unary = - dyn_cast(ptrStrideOp.getStride().getDefiningOp()); + auto sub = dyn_cast(indexOp); + auto unary = dyn_cast_if_present( + ptrStrideOp.getStride().getDefiningOp()); bool rewriteSub = unary && unary.getKind() == mlir::cir::UnaryOpKind::Minus && sub; if (rewriteSub) - index = index.getDefiningOp()->getOperand(1); + index = indexOp->getOperand(1); // Handle the cast auto llvmDstType = mlir::IntegerType::get(ctx, *layoutWidth); diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index b3d74c657d82..84e5b0aff6a7 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -1,5 +1,6 @@ // RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck %s --input-file=%t.mlir -check-prefix=MLIR +// XFAIL: * !s32i = !cir.int module { @@ -12,6 +13,10 @@ module { %4 = cir.load %3 : !cir.ptr, !s32i cir.return } + cir.func @g(%arg0: !cir.ptr, %2 : !s32i) { + %3 = cir.ptr_stride(%arg0 : !cir.ptr, %2 : !s32i), !cir.ptr + cir.return + } } // MLIR-LABEL: @f @@ -24,3 +29,7 @@ module { // MLIR: %[[VAL_6:.*]] = llvm.getelementptr %[[VAL_3]]{{\[}}%[[VAL_5]]] : (!llvm.ptr, i64) -> !llvm.ptr, i32 // MLIR: %[[VAL_7:.*]] = llvm.load %[[VAL_6]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: llvm.return + +// MLIR-LABEL: @g +// MLIR: %0 = llvm.sext %arg1 : i32 to i64 +// MLIR-NEXT: llvm.getelementptr %arg0[%0] : (!llvm.ptr, i64) -> !llvm.ptr, i32 From c8bbcef5ac3e26d870bca95d9a1e878c91e8f177 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Mon, 10 Jun 2024 15:29:51 -0400 Subject: [PATCH 1629/2301] [CIR][LowerToLLVM][CXXABI] Fix basic block ordering issue. (#676) When loweringPrepare cg.var_arg for AArch64, we create multiple basic blocks, but didn't really get ordering of the blocks in the blocklist of the parent region right. That is, we didn't make sure the last of the block list is the naturally last block (exit) of the region. This PR fixes this problem. If we don't fix this problem, FlattenCFGPass will fail verification because CIRScopeOpFlattening in this pass is onlyy expecting to see cir.yield op in the last block of the region's block list. --- .../Targets/LoweringPrepareAArch64CXXABI.cpp | 10 ++ clang/test/CIR/CodeGen/var-arg-float.c | 54 +++++---- clang/test/CIR/CodeGen/var-arg-scope.c | 106 ++++++++++++++++++ clang/test/CIR/CodeGen/var-arg.c | 60 +++++----- 4 files changed, 172 insertions(+), 58 deletions(-) create mode 100644 clang/test/CIR/CodeGen/var-arg-scope.c diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp index a561bc7f2ba5..6fe71b13ff92 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp @@ -168,6 +168,16 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( maybeRegBlock); auto contBlock = currentBlock->splitBlock(op); + // now contBlock should be the block after onStackBlock in CFG. + // This is essential, considering the case where originally currentBlock + // was the only block in the region. By splitting the block, and added + // above blocks, really the rear block in the region should be contBlock, + // not onStackBlock, but splitBlock would just insert contBlock after + // currentBlock, so we need to move it. + auto contBlockIter = contBlock->getIterator(); + contBlock->getParent()->getBlocks().remove(contBlockIter); + onStackBlock->getParent()->getBlocks().insertAfter( + mlir::Region::iterator(onStackBlock), contBlock); // Otherwise, at least some kind of argument could go in these registers, the // question is whether this particular type is too big. diff --git a/clang/test/CIR/CodeGen/var-arg-float.c b/clang/test/CIR/CodeGen/var-arg-float.c index 2b1daedec54a..10c950e86226 100644 --- a/clang/test/CIR/CodeGen/var-arg-float.c +++ b/clang/test/CIR/CodeGen/var-arg-float.c @@ -39,24 +39,12 @@ double f1(int n, ...) { // AFTER: [[CMP0:%.*]] = cir.cmp(ge, [[VR_OFFS]], [[ZERO]]) : !s32i, !cir.bool // AFTER-NEXT: cir.brcond [[CMP0]] [[BB_ON_STACK:\^bb.*]], [[BB_MAY_REG:\^bb.*]] - -// AFTER-NEXT: [[BB_END:\^bb.*]]([[BLK_ARG:%.*]]: !cir.ptr): // 2 preds: [[BB_IN_REG:\^bb.*]], [[BB_ON_STACK]] -// AFTER-NEXT: [[TMP0:%.*]] = cir.cast(bitcast, [[BLK_ARG]] : !cir.ptr), !cir.ptr -// AFTER-NEXT: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr, !cir.double -// AFTER: cir.store [[TMP1]], [[RESP]] : !cir.double, !cir.ptr -// AFTER: cir.va.end [[VARLIST]] : !cir.ptr -// AFTER: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !cir.double -// AFTER: cir.store [[RES]], [[RETP]] : !cir.double, !cir.ptr -// AFTER: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !cir.double -// AFTER: cir.return [[RETV]] : !cir.double - - // AFTER: [[BB_MAY_REG]]: // AFTER-NEXT: [[SIXTEEN:%.*]] = cir.const #cir.int<16> : !s32i // AFTER-NEXT: [[NEW_REG_OFFS:%.*]] = cir.binop(add, [[VR_OFFS]], [[SIXTEEN]]) : !s32i // AFTER-NEXT: cir.store [[NEW_REG_OFFS]], [[VR_OFFS_P]] : !s32i, !cir.ptr // AFTER-NEXT: [[CMP1:%.*]] = cir.cmp(le, [[NEW_REG_OFFS]], [[ZERO]]) : !s32i, !cir.bool -// AFTER-NEXT: cir.brcond [[CMP1]] [[BB_IN_REG]], [[BB_ON_STACK]] +// AFTER-NEXT: cir.brcond [[CMP1]] [[BB_IN_REG:\^bb.*]], [[BB_ON_STACK]] // AFTER: [[BB_IN_REG]]: @@ -65,7 +53,7 @@ double f1(int n, ...) { // AFTER-NEXT: [[TMP2:%.*]] = cir.cast(bitcast, [[VR_TOP]] : !cir.ptr), !cir.ptr // AFTER-NEXT: [[TMP3:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[VR_OFFS]] : !s32i), !cir.ptr // AFTER-NEXT: [[IN_REG_OUTPUT:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// AFTER-NEXT: cir.br [[BB_END]]([[IN_REG_OUTPUT]] : !cir.ptr) +// AFTER-NEXT: cir.br [[BB_END:\^bb.*]]([[IN_REG_OUTPUT]] : !cir.ptr) // AFTER: [[BB_ON_STACK]]: @@ -78,6 +66,16 @@ double f1(int n, ...) { // AFTER-NEXT: cir.store [[NEW_STACK_V]], [[STACK_P]] : !cir.ptr, !cir.ptr> // AFTER-NEXT: cir.br [[BB_END]]([[STACK_V]] : !cir.ptr) +// AFTER-NEXT: [[BB_END]]([[BLK_ARG:%.*]]: !cir.ptr): // 2 preds: [[BB_IN_REG]], [[BB_ON_STACK]] +// AFTER-NEXT: [[TMP0:%.*]] = cir.cast(bitcast, [[BLK_ARG]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr, !cir.double +// AFTER: cir.store [[TMP1]], [[RESP]] : !cir.double, !cir.ptr +// AFTER: cir.va.end [[VARLIST]] : !cir.ptr +// AFTER: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !cir.double +// AFTER: cir.store [[RES]], [[RETP]] : !cir.double, !cir.ptr +// AFTER: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !cir.double +// AFTER: cir.return [[RETV]] : !cir.double + // beginning block llvm code // LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } // LLVM: define double @f1(i32 %0, ...) @@ -90,32 +88,32 @@ double f1(int n, ...) { // LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[VR_OFFS]], 0, // LLVM-NEXT: br i1 [[CMP0]], label %[[BB_ON_STACK:.*]], label %[[BB_MAY_REG:.*]], -// LLVM: [[BB_END:.*]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG:.*]] -// LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT:%.*]], %[[BB_IN_REG]] ], [ [[STACK_V:%.*]], %[[BB_ON_STACK]] ] -// LLVM-NEXT: [[PHIV:%.*]] = load double, ptr [[PHIP]], align 8, -// LLVM-NEXT: store double [[PHIV]], ptr [[RESP]], align 8, -// LLVM: call void @llvm.va_end.p0(ptr [[VARLIST]]), -// LLVM: [[RES:%.*]] = load double, ptr [[RESP]], align 8, -// LLVM: store double [[RES]], ptr [[RETP]], align 8, -// LLVM: [[RETV:%.*]] = load double, ptr [[RETP]], align 8, -// LLVM-NEXT: ret double [[RETV]], - // LLVM: [[BB_MAY_REG]]: ; // LLVM-NEXT: [[NEW_REG_OFFS:%.*]] = add i32 [[VR_OFFS]], 16, // LLVM-NEXT: store i32 [[NEW_REG_OFFS]], ptr [[VR_OFFS_P]], align 4, // LLVM-NEXT: [[CMP1:%.*]] = icmp sle i32 [[NEW_REG_OFFS]], 0, -// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG]], label %[[BB_ON_STACK]], +// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG:.*]], label %[[BB_ON_STACK]], // LLVM: [[BB_IN_REG]]: ; // LLVM-NEXT: [[VR_TOP_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 2, // LLVM-NEXT: [[VR_TOP:%.*]] = load ptr, ptr [[VR_TOP_P]], align 8, // LLVM-NEXT: [[EXT64_VR_OFFS:%.*]] = sext i32 [[VR_OFFS]] to i64, -// LLVM-NEXT: [[IN_REG_OUTPUT]] = getelementptr i8, ptr [[VR_TOP]], i64 [[EXT64_VR_OFFS]], -// LLVM-NEXT: br label %[[BB_END]], +// LLVM-NEXT: [[IN_REG_OUTPUT:%.*]] = getelementptr i8, ptr [[VR_TOP]], i64 [[EXT64_VR_OFFS]], +// LLVM-NEXT: br label %[[BB_END:.*]], // LLVM: [[BB_ON_STACK]]: ; // LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0, -// LLVM-NEXT: [[STACK_V]] = load ptr, ptr [[STACK_P]], align 8, +// LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8, // LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i32 8, // LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8, // LLVM-NEXT: br label %[[BB_END]], + +// LLVM: [[BB_END]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG]] +// LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT]], %[[BB_IN_REG]] ], [ [[STACK_V]], %[[BB_ON_STACK]] ] +// LLVM-NEXT: [[PHIV:%.*]] = load double, ptr [[PHIP]], align 8, +// LLVM-NEXT: store double [[PHIV]], ptr [[RESP]], align 8, +// LLVM: call void @llvm.va_end.p0(ptr [[VARLIST]]), +// LLVM: [[RES:%.*]] = load double, ptr [[RESP]], align 8, +// LLVM: store double [[RES]], ptr [[RETP]], align 8, +// LLVM: [[RETV:%.*]] = load double, ptr [[RETP]], align 8, +// LLVM-NEXT: ret double [[RETV]], diff --git a/clang/test/CIR/CodeGen/var-arg-scope.c b/clang/test/CIR/CodeGen/var-arg-scope.c new file mode 100644 index 000000000000..b4087d30048f --- /dev/null +++ b/clang/test/CIR/CodeGen/var-arg-scope.c @@ -0,0 +1,106 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// XFAIL: * + +void f1(__builtin_va_list c) { + { __builtin_va_arg(c, void *); } +} + +// BEFORE: cir.func @f1(%arg0: !ty_22__va_list22) attributes +// BEFORE: [[VAR_LIST:%.*]] = cir.alloca !ty_22__va_list22, !cir.ptr, ["c", init] {alignment = 8 : i64} +// BEFORE: cir.store %arg0, [[VAR_LIST]] : !ty_22__va_list22, !cir.ptr +// BEFORE: cir.scope { +// BEFORE-NEXT: [[TMP:%.*]] = cir.va.arg [[VAR_LIST]] : (!cir.ptr) -> !cir.ptr +// BEFORE-NEXT: } +// BEFORE-NEXT: cir.return + +// AFTER: cir.func @f1(%arg0: !ty_22__va_list22) attributes +// AFTER: [[VARLIST:%.*]] = cir.alloca !ty_22__va_list22, !cir.ptr, ["c", init] {alignment = 8 : i64} +// AFTER: cir.store %arg0, [[VARLIST]] : !ty_22__va_list22, !cir.ptr +// AFTER: cir.scope { +// +// AFTER-NEXT: [[GR_OFFS_P:%.*]] = cir.get_member [[VARLIST]][3] {name = "gr_offs"} : !cir.ptr -> !cir.ptr +// AFTER-NEXT: [[GR_OFFS:%.*]] = cir.load [[GR_OFFS_P]] : !cir.ptr +// AFTER: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i +// AFTER: [[CMP0:%.*]] = cir.cmp(ge, [[GR_OFFS]], [[ZERO]]) : !s32i, !cir.bool +// AFTER-NEXT: cir.brcond [[CMP0]] [[BB_ON_STACK:\^bb.*]], [[BB_MAY_REG:\^bb.*]] + +// This BB calculates to see if it is possible to pass arg in register. +// AFTER: [[BB_MAY_REG]]: +// AFTER-NEXT: [[EIGHT:%.*]] = cir.const #cir.int<8> : !s32i +// AFTER-NEXT: [[NEW_REG_OFFS:%.*]] = cir.binop(add, [[GR_OFFS]], [[EIGHT]]) : !s32i +// AFTER-NEXT: cir.store [[NEW_REG_OFFS]], [[GR_OFFS_P]] : !s32i, !cir.ptr +// AFTER-NEXT: [[CMP1:%.*]] = cir.cmp(le, [[NEW_REG_OFFS]], [[ZERO]]) : !s32i, !cir.bool +// AFTER-NEXT: cir.brcond [[CMP1]] [[BB_IN_REG:\^bb.*]], [[BB_ON_STACK]] + +// arg is passed in register. +// AFTER: [[BB_IN_REG]]: +// AFTER-NEXT: [[GR_TOP_P:%.*]] = cir.get_member [[VARLIST]][1] {name = "gr_top"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[GR_TOP:%.*]] = cir.load [[GR_TOP_P]] : !cir.ptr>, !cir.ptr +// AFTER-NEXT: [[TMP2:%.*]] = cir.cast(bitcast, [[GR_TOP]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: [[TMP3:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[GR_OFFS]] : !s32i), !cir.ptr +// AFTER-NEXT: [[IN_REG_OUTPUT:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.br [[BB_END:\^bb.*]]([[IN_REG_OUTPUT]] : !cir.ptr) + +// arg is passed in stack. +// AFTER: [[BB_ON_STACK]]: +// AFTER-NEXT: [[STACK_P:%.*]] = cir.get_member [[VARLIST]][0] {name = "stack"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[STACK_V:%.*]] = cir.load [[STACK_P]] : !cir.ptr>, !cir.ptr +// AFTER-NEXT: [[EIGHT_IN_PTR_ARITH:%.*]] = cir.const #cir.int<8> : !u64i +// AFTER-NEXT: [[TMP4:%.*]] = cir.cast(bitcast, [[STACK_V]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: [[TMP5:%.*]] = cir.ptr_stride([[TMP4]] : !cir.ptr, [[EIGHT_IN_PTR_ARITH]] : !u64i), !cir.ptr +// AFTER-NEXT: [[NEW_STACK_V:%.*]] = cir.cast(bitcast, [[TMP5]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.store [[NEW_STACK_V]], [[STACK_P]] : !cir.ptr, !cir.ptr> +// AFTER-NEXT: cir.br [[BB_END]]([[STACK_V]] : !cir.ptr) + +// This BB is where different path converges. BLK_ARG is the arg addr which +// could come from IN_REG block where arg is passed in register, and saved in callee +// stack's argument saving area. +// Or from ON_STACK block which means arg is passed in from caller's stack area. +// AFTER-NEXT: [[BB_END]]([[BLK_ARG:%.*]]: !cir.ptr): // 2 preds: [[BB_IN_REG]], [[BB_ON_STACK]] +// AFTER-NEXT: [[TMP0:%.*]] = cir.cast(bitcast, [[BLK_ARG]] : !cir.ptr), !cir.ptr> +// AFTER-NEXT: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr +// AFTER-NEXT: cir.yield +// AFTER-NEXT: } +// AFTER-NEXT: cir.return + +// LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } +// LLVM: define void @f1(%struct.__va_list %0) +// LLVM: [[VARLIST:%.*]] = alloca %struct.__va_list, i64 1, align 8, +// LLVM: br label %[[SCOPE_FRONT:.*]], + +// LLVM: [[SCOPE_FRONT]]: ; preds = %1 +// LLVM: [[GR_OFFS_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 3 +// LLVM: [[GR_OFFS:%.*]] = load i32, ptr [[GR_OFFS_P]], align 4, +// LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[GR_OFFS]], 0, +// LLVM-NEXT: br i1 [[CMP0]], label %[[BB_ON_STACK:.*]], label %[[BB_MAY_REG:.*]], + +// LLVM: [[BB_MAY_REG]]: ; +// LLVM: [[NEW_REG_OFFS:%.*]] = add i32 [[GR_OFFS]], 8, +// LLVM: store i32 [[NEW_REG_OFFS]], ptr [[GR_OFFS_P]], align 4, +// LLVM-NEXT: [[CMP1:%.*]] = icmp sle i32 [[NEW_REG_OFFS]], 0, +// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG:.*]], label %[[BB_ON_STACK]], + +// LLVM: [[BB_IN_REG]]: ; +// LLVM-NEXT: [[GR_TOP_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 1, +// LLVM-NEXT: [[GR_TOP:%.*]] = load ptr, ptr [[GR_TOP_P]], align 8, +// LLVM-NEXT: [[EXT64_GR_OFFS:%.*]] = sext i32 [[GR_OFFS]] to i64, +// LLVM-NEXT: [[IN_REG_OUTPUT:%.*]] = getelementptr i8, ptr [[GR_TOP]], i64 [[EXT64_GR_OFFS]], +// LLVM-NEXT: br label %[[BB_END:.*]], + +// LLVM: [[BB_ON_STACK]]: ; +// LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0, +// LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8, +// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i32 8, +// LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8, +// LLVM-NEXT: br label %[[BB_END]], + +// LLVM: [[BB_END]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG]] +// LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT]], %[[BB_IN_REG]] ], [ [[STACK_V]], %[[BB_ON_STACK]] ] +// LLVM-NEXT: [[PHIV:%.*]] = load ptr, ptr [[PHIP]], align 8, +// LLVM-NEXT: br label %[[OUT_SCOPE:.*]], + +// LLVM: [[OUT_SCOPE]]: ; preds = %[[BB_END]] +// LLVM-NEXT: ret void, diff --git a/clang/test/CIR/CodeGen/var-arg.c b/clang/test/CIR/CodeGen/var-arg.c index 20478da5281f..7987237025a6 100644 --- a/clang/test/CIR/CodeGen/var-arg.c +++ b/clang/test/CIR/CodeGen/var-arg.c @@ -38,27 +38,13 @@ int f1(int n, ...) { // AFTER: [[CMP0:%.*]] = cir.cmp(ge, [[GR_OFFS]], [[ZERO]]) : !s32i, !cir.bool // AFTER-NEXT: cir.brcond [[CMP0]] [[BB_ON_STACK:\^bb.*]], [[BB_MAY_REG:\^bb.*]] -// This BB is where different path converges. BLK_ARG is the arg addr which -// could come from IN_REG block where arg is passed in register, and saved in callee -// stack's argument saving area. -// Or from ON_STACK block which means arg is passed in from caller's stack area. -// AFTER-NEXT: [[BB_END:\^bb.*]]([[BLK_ARG:%.*]]: !cir.ptr): // 2 preds: [[BB_IN_REG:\^bb.*]], [[BB_ON_STACK]] -// AFTER-NEXT: [[TMP0:%.*]] = cir.cast(bitcast, [[BLK_ARG]] : !cir.ptr), !cir.ptr -// AFTER-NEXT: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr, !s32i -// AFTER: cir.store [[TMP1]], [[RESP]] : !s32i, !cir.ptr -// AFTER: cir.va.end [[VARLIST]] : !cir.ptr -// AFTER: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !s32i -// AFTER: cir.store [[RES]], [[RETP]] : !s32i, !cir.ptr -// AFTER: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !s32i -// AFTER: cir.return [[RETV]] : !s32i - // This BB calculates to see if it is possible to pass arg in register. // AFTER: [[BB_MAY_REG]]: // AFTER-NEXT: [[EIGHT:%.*]] = cir.const #cir.int<8> : !s32i // AFTER-NEXT: [[NEW_REG_OFFS:%.*]] = cir.binop(add, [[GR_OFFS]], [[EIGHT]]) : !s32i // AFTER-NEXT: cir.store [[NEW_REG_OFFS]], [[GR_OFFS_P]] : !s32i, !cir.ptr // AFTER-NEXT: [[CMP1:%.*]] = cir.cmp(le, [[NEW_REG_OFFS]], [[ZERO]]) : !s32i, !cir.bool -// AFTER-NEXT: cir.brcond [[CMP1]] [[BB_IN_REG]], [[BB_ON_STACK]] +// AFTER-NEXT: cir.brcond [[CMP1]] [[BB_IN_REG:\^bb.*]], [[BB_ON_STACK]] // arg is passed in register. // AFTER: [[BB_IN_REG]]: @@ -67,7 +53,7 @@ int f1(int n, ...) { // AFTER-NEXT: [[TMP2:%.*]] = cir.cast(bitcast, [[GR_TOP]] : !cir.ptr), !cir.ptr // AFTER-NEXT: [[TMP3:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[GR_OFFS]] : !s32i), !cir.ptr // AFTER-NEXT: [[IN_REG_OUTPUT:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// AFTER-NEXT: cir.br [[BB_END]]([[IN_REG_OUTPUT]] : !cir.ptr) +// AFTER-NEXT: cir.br [[BB_END:\^bb.*]]([[IN_REG_OUTPUT]] : !cir.ptr) // arg is passed in stack. // AFTER: [[BB_ON_STACK]]: @@ -80,6 +66,20 @@ int f1(int n, ...) { // AFTER-NEXT: cir.store [[NEW_STACK_V]], [[STACK_P]] : !cir.ptr, !cir.ptr> // AFTER-NEXT: cir.br [[BB_END]]([[STACK_V]] : !cir.ptr) +// This BB is where different path converges. BLK_ARG is the arg addr which +// could come from IN_REG block where arg is passed in register, and saved in callee +// stack's argument saving area. +// Or from ON_STACK block which means arg is passed in from caller's stack area. +// AFTER-NEXT: [[BB_END]]([[BLK_ARG:%.*]]: !cir.ptr): // 2 preds: [[BB_IN_REG]], [[BB_ON_STACK]] +// AFTER-NEXT: [[TMP0:%.*]] = cir.cast(bitcast, [[BLK_ARG]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr, !s32i +// AFTER: cir.store [[TMP1]], [[RESP]] : !s32i, !cir.ptr +// AFTER: cir.va.end [[VARLIST]] : !cir.ptr +// AFTER: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !s32i +// AFTER: cir.store [[RES]], [[RETP]] : !s32i, !cir.ptr +// AFTER: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !s32i +// AFTER: cir.return [[RETV]] : !s32i + // LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } // LLVM: define i32 @f1(i32 %0, ...) // LLVM: [[ARGN:%.*]] = alloca i32, i64 1, align 4, @@ -91,32 +91,32 @@ int f1(int n, ...) { // LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[GR_OFFS]], 0, // LLVM-NEXT: br i1 [[CMP0]], label %[[BB_ON_STACK:.*]], label %[[BB_MAY_REG:.*]], -// LLVM: [[BB_END:.*]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG:.*]] -// LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT:%.*]], %[[BB_IN_REG]] ], [ [[STACK_V:%.*]], %[[BB_ON_STACK]] ] -// LLVM-NEXT: [[PHIV:%.*]] = load i32, ptr [[PHIP]], align 4, -// LLVM-NEXT: store i32 [[PHIV]], ptr [[RESP]], align 4, -// LLVM: call void @llvm.va_end.p0(ptr [[VARLIST]]), -// LLVM: [[RES:%.*]] = load i32, ptr [[RESP]], align 4, -// LLVM: store i32 [[RES]], ptr [[RETP]], align 4, -// LLVM: [[RETV:%.*]] = load i32, ptr [[RETP]], align 4, -// LLVM-NEXT: ret i32 [[RETV]], - // LLVM: [[BB_MAY_REG]]: ; // LLVM: [[NEW_REG_OFFS:%.*]] = add i32 [[GR_OFFS]], 8, // LLVM: store i32 [[NEW_REG_OFFS]], ptr [[GR_OFFS_P]], align 4, // LLVM-NEXT: [[CMP1:%.*]] = icmp sle i32 [[NEW_REG_OFFS]], 0, -// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG]], label %[[BB_ON_STACK]], +// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG:.*]], label %[[BB_ON_STACK]], // LLVM: [[BB_IN_REG]]: ; // LLVM-NEXT: [[GR_TOP_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 1, // LLVM-NEXT: [[GR_TOP:%.*]] = load ptr, ptr [[GR_TOP_P]], align 8, // LLVM-NEXT: [[EXT64_GR_OFFS:%.*]] = sext i32 [[GR_OFFS]] to i64, -// LLVM-NEXT: [[IN_REG_OUTPUT]] = getelementptr i8, ptr [[GR_TOP]], i64 [[EXT64_GR_OFFS]], -// LLVM-NEXT: br label %[[BB_END]], +// LLVM-NEXT: [[IN_REG_OUTPUT:%.*]] = getelementptr i8, ptr [[GR_TOP]], i64 [[EXT64_GR_OFFS]], +// LLVM-NEXT: br label %[[BB_END:.*]], // LLVM: [[BB_ON_STACK]]: ; // LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0, -// LLVM-NEXT: [[STACK_V]] = load ptr, ptr [[STACK_P]], align 8, +// LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8, // LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i32 8, // LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8, // LLVM-NEXT: br label %[[BB_END]], + +// LLVM: [[BB_END]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG]] +// LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT]], %[[BB_IN_REG]] ], [ [[STACK_V]], %[[BB_ON_STACK]] ] +// LLVM-NEXT: [[PHIV:%.*]] = load i32, ptr [[PHIP]], align 4, +// LLVM-NEXT: store i32 [[PHIV]], ptr [[RESP]], align 4, +// LLVM: call void @llvm.va_end.p0(ptr [[VARLIST]]), +// LLVM: [[RES:%.*]] = load i32, ptr [[RESP]], align 4, +// LLVM: store i32 [[RES]], ptr [[RETP]], align 4, +// LLVM: [[RETV:%.*]] = load i32, ptr [[RETP]], align 4, +// LLVM-NEXT: ret i32 [[RETV]], From 3671e214fba4ee97c6530949354554a271276fde Mon Sep 17 00:00:00 2001 From: 7mile Date: Tue, 11 Jun 2024 10:17:46 +0800 Subject: [PATCH 1630/2301] [CIR][CodeGen] Special treatment of 3-element extended vector load and store (#674) Continue the work of #613 . Original CodeGen treat vec3 as vec4 to get aligned memory access. This PR enable these paths. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 39 +++++++++++++++-- clang/test/CIR/CodeGen/vectype-ext.cpp | 58 ++++++++++++++++++++++++++ 2 files changed, 94 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 62640392d971..2604fdd9fdae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -598,9 +598,25 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, return; } + mlir::Type SrcTy = Value.getType(); if (const auto *ClangVecTy = Ty->getAs()) { // TODO(CIR): this has fallen out of date with codegen llvm_unreachable("NYI: Special treatment of 3-element vector store"); + // auto VecTy = dyn_cast(SrcTy); + // if (!CGM.getCodeGenOpts().PreserveVec3Type && + // ClangVecTy->getNumElements() == 3) { + // // Handle vec3 special. + // if (VecTy && VecTy.getSize() == 3) { + // // Our source is a vec3, do a shuffle vector to make it a vec4. + // Value = builder.createVecShuffle(Value.getLoc(), Value, + // ArrayRef{0, 1, 2, -1}); + // SrcTy = mlir::cir::VectorType::get(VecTy.getContext(), + // VecTy.getEltType(), 4); + // } + // if (Addr.getElementType() != SrcTy) { + // Addr = Addr.withElementType(SrcTy); + // } + // } } // Update the alloca with more info on initialization. @@ -773,7 +789,7 @@ void CIRGenFunction::buildStoreThroughExtVectorComponentLValue(RValue Src, // of the Elts constant array will be one past the size of the vector. // Ignore the last element here, if it is greater than the mask size. if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) - llvm_unreachable("NYI"); + NumSrcElts--; // modify when what gets shuffled in for (unsigned i = 0; i != NumSrcElts; ++i) @@ -2773,13 +2789,30 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, llvm_unreachable("NYI"); } + auto ElemTy = Addr.getElementType(); + if (const auto *ClangVecTy = Ty->getAs()) { + // Handle vectors of size 3 like size 4 for better performance. + const auto VTy = cast(ElemTy); + // TODO(CIR): this has fallen out of sync with codegen - llvm_unreachable("NYI: Special treatment of 3-element vector load"); + llvm_unreachable("NYI: Special treatment of 3-element vector store"); + // if (!CGM.getCodeGenOpts().PreserveVec3Type && + // ClangVecTy->getNumElements() == 3) { + // auto loc = Addr.getPointer().getLoc(); + // auto vec4Ty = + // mlir::cir::VectorType::get(VTy.getContext(), VTy.getEltType(), 4); + // Address Cast = Addr.withElementType(vec4Ty); + // // Now load value. + // mlir::Value V = builder.createLoad(loc, Cast); + + // // Shuffle vector to get vec3. + // V = builder.createVecShuffle(loc, V, ArrayRef{0, 1, 2}); + // return buildFromMemory(V, Ty); + // } } auto Ptr = Addr.getPointer(); - auto ElemTy = Addr.getElementType(); if (ElemTy.isa()) { ElemTy = mlir::cir::IntType::get(builder.getContext(), 8, true); auto ElemPtrTy = mlir::cir::PointerType::get(builder.getContext(), ElemTy); diff --git a/clang/test/CIR/CodeGen/vectype-ext.cpp b/clang/test/CIR/CodeGen/vectype-ext.cpp index a68ecdd78f8e..1ab3369c8c46 100644 --- a/clang/test/CIR/CodeGen/vectype-ext.cpp +++ b/clang/test/CIR/CodeGen/vectype-ext.cpp @@ -5,6 +5,7 @@ // XFAIL: * typedef int vi4 __attribute__((ext_vector_type(4))); +typedef int vi3 __attribute__((ext_vector_type(3))); typedef int vi2 __attribute__((ext_vector_type(2))); typedef double vd2 __attribute__((ext_vector_type(2))); typedef long vl2 __attribute__((ext_vector_type(2))); @@ -350,6 +351,10 @@ void test_store() { // CIR-NEXT: %[[#PVECB:]] = cir.alloca !cir.vector // LLVM-NEXT: %[[#PVECB:]] = alloca <2 x i32> + vi3 c = {}; + // CIR-NEXT: %[[#PVECC:]] = cir.alloca !cir.vector + // LLVM-NEXT: %[[#PVECC:]] = alloca <3 x i32> + a.xy = b; // CIR: %[[#LOAD4RHS:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector // CIR-NEXT: %[[#LOAD5LHS:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector @@ -389,6 +394,35 @@ void test_store() { // LLVM-NEXT: %[[#RESULT:]] = shufflevector <4 x i32> %[[#VECA]], <4 x i32> %[[#EXTVECB]], <4 x i32> // LLVM-NEXT: store <4 x i32> %[[#RESULT]], ptr %[[#PVECA]], align 16 + // OpenCL C Specification 6.3.7. Vector Components + // The suffixes .lo (or .even) and .hi (or .odd) for a 3-component vector type + // operate as if the 3-component vector type is a 4-component vector type with + // the value in the w component undefined. + b = c.hi; + + // CIR-NEXT: %[[#VECC:]] = cir.load %[[#PVECC]] : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#HIPART:]] = cir.vec.shuffle(%[[#VECC]], %[[#VECC]] : !cir.vector) [#cir.int<2> : !s32i, #cir.int<3> : !s32i] : !cir.vector + // CIR-NEXT: cir.store %[[#HIPART]], %[[#PVECB]] : !cir.vector, !cir.ptr> + + // LLVM-NEXT: %[[#VECC:]] = load <3 x i32>, ptr %[[#PVECC]], align 16 + // LLVM-NEXT: %[[#HIPART:]] = shufflevector <3 x i32> %[[#VECC]], <3 x i32> %[[#VECC]], <2 x i32> + // LLVM-NEXT: store <2 x i32> %[[#HIPART]], ptr %[[#PVECB]], align 8 + + // c.hi is c[2, 3], in which 3 should be ignored in CIRGen for store + c.hi = b; + + // CIR-NEXT: %[[#VECB:]] = cir.load %[[#PVECB]] : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#VECC:]] = cir.load %[[#PVECC]] : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#EXTVECB:]] = cir.vec.shuffle(%[[#VECB]], %[[#VECB]] : !cir.vector) [#cir.int<0> : !s32i, #cir.int<1> : !s32i, #cir.int<-1> : !s32i] : !cir.vector + // CIR-NEXT: %[[#RESULT:]] = cir.vec.shuffle(%[[#VECC]], %[[#EXTVECB]] : !cir.vector) [#cir.int<0> : !s32i, #cir.int<1> : !s32i, #cir.int<3> : !s32i] : !cir.vector + // CIR-NEXT: cir.store %[[#RESULT]], %[[#PVECC]] : !cir.vector, !cir.ptr> + + // LLVM-NEXT: %[[#VECB:]] = load <2 x i32>, ptr %[[#PVECB]], align 8 + // LLVM-NEXT: %[[#VECC:]] = load <3 x i32>, ptr %[[#PVECC]], align 16 + // LLVM-NEXT: %[[#EXTVECB:]] = shufflevector <2 x i32> %[[#VECB]], <2 x i32> %[[#VECB]], <3 x i32> + // LLVM-NEXT: %[[#RESULT:]] = shufflevector <3 x i32> %[[#VECC]], <3 x i32> %[[#EXTVECB]], <3 x i32> + // LLVM-NEXT: store <3 x i32> %[[#RESULT]], ptr %[[#PVECC]], align 16 + } // CIR: cir.func {{@.*test_build_lvalue.*}} @@ -453,3 +487,27 @@ void test_build_lvalue() { // LLVM-NEXT: store i32 %[[#RESULT]], ptr %[[#ALLOCAR]], align 4 } + +// CIR: cir.func {{@.*test_vec3.*}} +// LLVM: define void {{@.*test_vec3.*}} +void test_vec3() { + vi3 v = {}; + // CIR-NEXT: %[[#PV:]] = cir.alloca !cir.vector, !cir.ptr>, ["v", init] {alignment = 16 : i64} + // CIR: %[[#VEC4:]] = cir.vec.shuffle(%{{[0-9]+}}, %{{[0-9]+}} : !cir.vector) [#cir.int<0> : !s32i, #cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<-1> : !s32i] : !cir.vector + // CIR-NEXT: %[[#PV4:]] = cir.cast(bitcast, %[[#PV]] : !cir.ptr>), !cir.ptr> + // CIR-NEXT: cir.store %[[#VEC4]], %[[#PV4]] : !cir.vector, !cir.ptr> + + // LLVM-NEXT: %[[#PV:]] = alloca <3 x i32>, i64 1, align 16 + // LLVM-NEXT: store <4 x i32> , ptr %[[#PV]], align 16 + + v + 1; + // CIR-NEXT: %[[#PV4:]] = cir.cast(bitcast, %[[#PV]] : !cir.ptr>), !cir.ptr> + // CIR-NEXT: %[[#V4:]] = cir.load %[[#PV4]] : !cir.ptr>, !cir.vector + // CIR-NEXT: %[[#V3:]] = cir.vec.shuffle(%[[#V4]], %[[#V4]] : !cir.vector) [#cir.int<0> : !s32i, #cir.int<1> : !s32i, #cir.int<2> : !s32i] : !cir.vector + // CIR: %[[#RES:]] = cir.binop(add, %[[#V3]], %{{[0-9]+}}) : !cir.vector + + // LLVM-NEXT: %[[#V4:]] = load <4 x i32>, ptr %[[#PV:]], align 16 + // LLVM-NEXT: %[[#V3:]] = shufflevector <4 x i32> %[[#V4]], <4 x i32> %[[#V4]], <3 x i32> + // LLVM-NEXT: %[[#RES:]] = add <3 x i32> %[[#V3]], splat (i32 1) + +} From 791f2c2dd15d654addd9b4bf3a8651d9dd40a039 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Tue, 11 Jun 2024 05:20:38 -0300 Subject: [PATCH 1631/2301] [CIR][ABI][NFC] Add CC lowering for void CallOps (#668) This patch implements the lowering of function calls that receive and return void. In practice, nothing has to be done (at least for the x86 ABI), so this case is used as a primer for the target lowering library since it helps populate the base logic for handling calling convention lowering of function calls. --- .../clang/CIR/Dialect/IR/CIRDataLayout.h | 13 + clang/include/clang/CIR/FnInfoOpts.h | 37 +++ clang/include/clang/CIR/MissingFeatures.h | 37 ++- clang/include/clang/CIR/Target/x86.h | 32 +++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenCall.h | 7 - clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenTypes.h | 1 + .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 4 + clang/lib/CIR/CodeGen/TargetInfo.cpp | 12 +- .../Dialect/Transforms/CallConvLowering.cpp | 18 +- .../Transforms/TargetLowering/ABIInfo.cpp | 4 + .../Transforms/TargetLowering/ABIInfo.h | 6 + .../Transforms/TargetLowering/ABIInfoImpl.cpp | 22 ++ .../Transforms/TargetLowering/ABIInfoImpl.h | 11 +- .../Transforms/TargetLowering/CIRCXXABI.h | 5 + .../TargetLowering/CIRLowerContext.cpp | 2 +- .../TargetLowering/CIRToCIRArgMapping.h | 33 ++- .../TargetLowering/ItaniumCXXABI.cpp | 14 ++ .../Transforms/TargetLowering/LowerCall.cpp | 107 +++++++++ .../Transforms/TargetLowering/LowerCall.h | 2 + .../TargetLowering/LowerFunction.cpp | 222 ++++++++++++++++++ .../Transforms/TargetLowering/LowerFunction.h | 15 ++ .../TargetLowering/LowerFunctionInfo.h | 33 ++- .../Transforms/TargetLowering/LowerModule.cpp | 16 +- .../Transforms/TargetLowering/LowerModule.h | 4 +- .../Transforms/TargetLowering/LowerTypes.cpp | 50 +++- .../Transforms/TargetLowering/LowerTypes.h | 35 ++- .../Transforms/TargetLowering/Targets/X86.cpp | 139 ++++++++++- .../x86_64-call-conv-lowering-pass.cpp} | 7 +- 31 files changed, 850 insertions(+), 42 deletions(-) create mode 100644 clang/include/clang/CIR/FnInfoOpts.h create mode 100644 clang/include/clang/CIR/Target/x86.h rename clang/test/CIR/Transforms/Target/{x86/x86-call-conv-lowering-pass.cpp => x86_64/x86_64-call-conv-lowering-pass.cpp} (69%) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h index 88e030fb424a..e820d11340ae 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h @@ -15,6 +15,7 @@ #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/IR/BuiltinOps.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/ADT/StringRef.h" namespace cir { @@ -24,6 +25,18 @@ class CIRDataLayout { public: mlir::DataLayout layout; + /// Constructs a DataLayout from a specification string. See reset(). + explicit CIRDataLayout(llvm::StringRef dataLayout, mlir::ModuleOp module) + : layout(module) { + reset(dataLayout); + } + + /// Parse a data layout string (with fallback to default values). + void reset(llvm::StringRef dataLayout); + + // Free all internal data structures. + void clear(); + CIRDataLayout(mlir::ModuleOp modOp); bool isBigEndian() const { return bigEndian; } diff --git a/clang/include/clang/CIR/FnInfoOpts.h b/clang/include/clang/CIR/FnInfoOpts.h new file mode 100644 index 000000000000..cea4d89f4c14 --- /dev/null +++ b/clang/include/clang/CIR/FnInfoOpts.h @@ -0,0 +1,37 @@ +#ifndef CIR_FNINFOOPTS_H +#define CIR_FNINFOOPTS_H + +#include "llvm/ADT/STLForwardCompat.h" + +namespace cir { + +enum class FnInfoOpts { + None = 0, + IsInstanceMethod = 1 << 0, + IsChainCall = 1 << 1, + IsDelegateCall = 1 << 2, +}; + +inline FnInfoOpts operator|(FnInfoOpts A, FnInfoOpts B) { + return static_cast(llvm::to_underlying(A) | + llvm::to_underlying(B)); +} + +inline FnInfoOpts operator&(FnInfoOpts A, FnInfoOpts B) { + return static_cast(llvm::to_underlying(A) & + llvm::to_underlying(B)); +} + +inline FnInfoOpts operator|=(FnInfoOpts A, FnInfoOpts B) { + A = A | B; + return A; +} + +inline FnInfoOpts operator&=(FnInfoOpts A, FnInfoOpts B) { + A = A & B; + return A; +} + +} // namespace cir + +#endif // CIR_FNINFOOPTS_H diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 12255d409a75..e2f554019422 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -156,7 +156,6 @@ struct MissingFeatures { static bool zeroInitializer() { return false; } static bool targetCodeGenInfoIsProtoCallVariadic() { return false; } static bool targetCodeGenInfoGetNullPointer() { return false; } - static bool chainCalls() { return false; } static bool operandBundles() { return false; } static bool exceptions() { return false; } static bool metaDataNode() { return false; } @@ -190,24 +189,50 @@ struct MissingFeatures { //===--- ABI lowering --===// + //-- Missing AST queries + + static bool recordDeclCanPassInRegisters() { return false; } + + //-- Missing types + + static bool vectorType() { return false; } + + //-- Other missing features + + // Calls with a static chain pointer argument may be optimized (p.e. freeing + // up argument registers), but we do not yet track such cases. + static bool chainCall() { return false; } + + // ABI-lowering has special handling for regcall calling convention (tries to + // pass every argument in regs). We don't support it just yet. + static bool regCall() { return false; } + + // Some ABIs (e.g. x86) require special handling for returning large structs + // by value. The sret argument parameter aids in this, but it is current NYI. + static bool sretArgs() { return false; } + + // Inalloca parameter attributes are mostly used for Windows x86_32 ABI. We + // do not yet support this yet. + static bool inallocaArgs() { return false; } + // Parameters may have additional attributes (e.g. [[noescape]]) that affect // the compiler. This is not yet supported in CIR. - static bool extParamInfo() { return true; } + static bool extParamInfo() { return false; } // LangOpts may affect lowering, but we do not carry this information into CIR // just yet. Right now, it only instantiates the default lang options. - static bool langOpts() { return true; } + static bool langOpts() { return false; } // Several type qualifiers are not yet supported in CIR, but important when // evaluating ABI-specific lowering. - static bool qualifiedTypes() { return true; } + static bool qualifiedTypes() { return false; } // We're ignoring several details regarding ABI-halding for Swift. - static bool swift() { return true; } + static bool swift() { return false; } // Despite carrying some information about variadics, we are currently // ignoring this to focus only on the code necessary to lower non-variadics. - static bool variadicFunctions() { return true; } + static bool variadicFunctions() { return false; } }; } // namespace cir diff --git a/clang/include/clang/CIR/Target/x86.h b/clang/include/clang/CIR/Target/x86.h new file mode 100644 index 000000000000..2aa2d0493aac --- /dev/null +++ b/clang/include/clang/CIR/Target/x86.h @@ -0,0 +1,32 @@ +//==-- x86.h - Definitions common to all x86 ABI variants ------------------==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Definitions common to any X86 ABI implementation. +// +//===----------------------------------------------------------------------===// + +#ifndef CIR_X86_H +#define CIR_X86_H + +namespace cir { + +// Possible argument classifications according to the x86 ABI documentation. +enum X86ArgClass { + Integer = 0, + SSE, + SSEUp, + X87, + X87Up, + ComplexX87, + NoClass, + Memory +}; + +} // namespace cir + +#endif // CIR_X86_H diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 346cce788d91..60268b72d9dd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -23,6 +23,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/FnInfoOpts.h" #include "llvm/Support/ErrorHandling.h" #include diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 866ba9af7a3b..ea8e9e546352 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -290,13 +290,6 @@ class ReturnValueSlot { Address getAddress() const { return Addr; } }; -enum class FnInfoOpts { - None = 0, - IsInstanceMethod = 1 << 0, - IsChainCall = 1 << 1, - IsDelegateCall = 1 << 2, -}; - } // namespace cir #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 2604fdd9fdae..8db9ce53f547 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1439,7 +1439,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, // Chain calls use the same code path to add the inviisble chain parameter to // the function type. if (isa(FnType) || Chain) { - assert(!MissingFeatures::chainCalls()); + assert(!MissingFeatures::chainCall()); assert(!MissingFeatures::addressSpace()); auto CalleeTy = getTypes().GetFunctionType(FnInfo); // get non-variadic function type diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index ca4d18461154..3eb2b1b455e3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -16,6 +16,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/AST/RecordLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/FnInfoOpts.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 51350c9ea70e..d1d547f24a9a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -22,6 +22,7 @@ #include "clang/AST/Type.h" #include "clang/Basic/ABI.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/FnInfoOpts.h" #include "llvm/ADT/SmallPtrSet.h" diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 8af007621ba4..da1851a2b00f 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -748,3 +748,7 @@ CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { } } } + +void CIRDataLayout::reset(StringRef Desc) { clear(); } + +void CIRDataLayout::clear() {} diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 121bc2f023c1..2973a6ce70d3 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -6,6 +6,7 @@ #include "CallingConv.h" #include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Target/x86.h" using namespace cir; using namespace clang; @@ -79,16 +80,7 @@ namespace { enum class X86AVXABILevel { None, AVX, AVX512 }; class X86_64ABIInfo : public ABIInfo { - enum Class { - Integer = 0, - SSE, - SSEUp, - X87, - X87Up, - ComplexX87, - NoClass, - Memory - }; + using Class = X86ArgClass; // X86AVXABILevel AVXLevel; // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 64-bit diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 6130367d91a4..ad35d7835255 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -45,7 +45,7 @@ LowerModule createLowerModule(FuncOp op, PatternRewriter &rewriter) { // FIXME(cir): This just uses the default language options. We need to account // for custom options. // Create context. - assert(::cir::MissingFeatures::langOpts()); + assert(!::cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; auto context = CIRLowerContext(module.getContext(), langOpts); context.initBuiltinTypes(*targetInfo); @@ -64,11 +64,27 @@ struct CallConvLoweringPattern : public OpRewritePattern { LogicalResult matchAndRewrite(FuncOp op, PatternRewriter &rewriter) const final { + const auto module = op->getParentOfType(); + if (!op.getAst()) return op.emitError("function has no AST information"); LowerModule lowerModule = createLowerModule(op, rewriter); + // Rewrite function calls before definitions. This should be done before + // lowering the definition. + auto calls = op.getSymbolUses(module); + if (calls.has_value()) { + for (auto call : calls.value()) { + auto callOp = cast(call.getUser()); + if (lowerModule.rewriteFunctionCall(callOp, op).failed()) + return failure(); + } + } + + // Rewrite function definition. + // FIXME(cir): This is a workaround to avoid an infinite loop in the driver. + rewriter.replaceOp(op, rewriter.clone(*op)); return success(); } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index 46a865da0670..6160174191dc 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -12,6 +12,8 @@ //===----------------------------------------------------------------------===// #include "ABIInfo.h" +#include "CIRCXXABI.h" +#include "LowerTypes.h" namespace mlir { namespace cir { @@ -19,5 +21,7 @@ namespace cir { // Pin the vtable to this file. ABIInfo::~ABIInfo() = default; +CIRCXXABI &ABIInfo::getCXXABI() const { return LT.getCXXABI(); } + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h index 3fad01f3d7a8..ef5bae6d13fa 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h @@ -14,6 +14,8 @@ #ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFO_H #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFO_H +#include "CIRCXXABI.h" +#include "LowerFunctionInfo.h" #include "llvm/IR/CallingConv.h" namespace mlir { @@ -32,6 +34,10 @@ class ABIInfo { public: ABIInfo(LowerTypes <) : LT(LT), RuntimeCC(llvm::CallingConv::C) {} virtual ~ABIInfo(); + + CIRCXXABI &getCXXABI() const; + + virtual void computeInfo(LowerFunctionInfo &FI) const = 0; }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index c51176a99b95..ef90698054e8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -10,3 +10,25 @@ // adapted to operate on the CIR dialect, however. // //===----------------------------------------------------------------------===// + +#include "ABIInfo.h" +#include "CIRCXXABI.h" +#include "LowerFunctionInfo.h" +#include "llvm/Support/ErrorHandling.h" + +namespace mlir { +namespace cir { + +bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, + const ABIInfo &Info) { + Type Ty = FI.getReturnType(); + + if (const auto RT = Ty.dyn_cast()) { + llvm_unreachable("NYI"); + } + + return CXXABI.classifyReturnType(FI); +} + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h index f34d7fb07226..d3ee18f0467b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h @@ -14,8 +14,17 @@ #ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H +#include "ABIInfo.h" +#include "CIRCXXABI.h" +#include "LowerFunctionInfo.h" + namespace mlir { -namespace cir {} // namespace cir +namespace cir { + +bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, + const ABIInfo &Info); + +} // namespace cir } // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h index bf5131a074b8..5496dbbf2327 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -14,6 +14,7 @@ #ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRCXXABI_H #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRCXXABI_H +#include "LowerFunctionInfo.h" #include "mlir/IR/Value.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" @@ -34,6 +35,10 @@ class CIRCXXABI { public: virtual ~CIRCXXABI(); + + /// If the C++ ABI requires the given type be returned in a particular way, + /// this method sets RetAI and returns true. + virtual bool classifyReturnType(LowerFunctionInfo &FI) const = 0; }; /// Creates an Itanium-family ABI. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index b75893bfb33f..7152ab081ec5 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -31,7 +31,7 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { Type Ty; // NOTE(cir): Clang does more stuff here. Not sure if we need to do the same. - assert(::cir::MissingFeatures::qualifiedTypes()); + assert(!::cir::MissingFeatures::qualifiedTypes()); switch (K) { case clang::BuiltinType::Char_S: Ty = IntType::get(getMLIRContext(), 8, true); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index 9c1dae1f3dbf..6481874bf3ab 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -17,8 +17,10 @@ #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRTOCIRARGMAPPING_H #include "CIRLowerContext.h" -#include "LoweringFunctionInfo.h" +#include "LowerFunctionInfo.h" +#include "clang/CIR/ABIArgInfo.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/Support/ErrorHandling.h" namespace mlir { namespace cir { @@ -49,9 +51,36 @@ class CIRToCIRArgMapping { public: CIRToCIRArgMapping(const CIRLowerContext &context, const LowerFunctionInfo &FI, bool onlyRequiredArgs = false) - : ArgInfo(onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {}; + : ArgInfo(onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { + construct(context, FI, onlyRequiredArgs); + }; unsigned totalIRArgs() const { return TotalIRArgs; } + + void construct(const CIRLowerContext &context, const LowerFunctionInfo &FI, + bool onlyRequiredArgs = false) { + unsigned IRArgNo = 0; + const ::cir::ABIArgInfo &RetAI = FI.getReturnInfo(); + + if (RetAI.getKind() == ::cir::ABIArgInfo::Indirect) { + llvm_unreachable("NYI"); + } + + unsigned ArgNo = 0; + unsigned NumArgs = + onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); + for (LowerFunctionInfo::const_arg_iterator _ = FI.arg_begin(); + ArgNo < NumArgs; ++_, ++ArgNo) { + llvm_unreachable("NYI"); + } + assert(ArgNo == ArgInfo.size()); + + if (::cir::MissingFeatures::inallocaArgs()) { + llvm_unreachable("NYI"); + } + + TotalIRArgs = IRArgNo; + } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index aee2620496e6..6e0fecfa44d5 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -32,10 +32,24 @@ class ItaniumCXXABI : public CIRCXXABI { public: ItaniumCXXABI(LowerModule &LM) : CIRCXXABI(LM) {} + + bool classifyReturnType(LowerFunctionInfo &FI) const override; }; } // namespace +bool ItaniumCXXABI::classifyReturnType(LowerFunctionInfo &FI) const { + const StructType RD = FI.getReturnType().dyn_cast(); + if (!RD) + return false; + + // If C++ prohibits us from making a copy, return by address. + if (::cir::MissingFeatures::recordDeclCanPassInRegisters()) + llvm_unreachable("NYI"); + + return false; +} + CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { switch (LM.getCXXABIKind()) { case clang::TargetCXXABI::GenericItanium: diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index e69de29bb2d1..59d736c0574f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -0,0 +1,107 @@ +#include "LowerCall.h" +#include "LowerFunctionInfo.h" +#include "LowerTypes.h" +#include "clang/CIR/FnInfoOpts.h" + +using namespace mlir; +using namespace mlir::cir; + +using FnInfoOpts = ::cir::FnInfoOpts; + +namespace { + +/// Arrange a call as unto a free function, except possibly with an +/// additional number of formal parameters considered required. +const LowerFunctionInfo & +arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, + const OperandRange &args, const FuncType fnType, + unsigned numExtraRequiredArgs, bool chainCall) { + assert(args.size() >= numExtraRequiredArgs); + + assert(!::cir::MissingFeatures::extParamInfo()); + + // In most cases, there are no optional arguments. + RequiredArgs required = RequiredArgs::All; + + // If we have a variadic prototype, the required arguments are the + // extra prefix plus the arguments in the prototype. + // FIXME(cir): Properly check if function is no-proto. + if (/*IsPrototypedFunction=*/true) { + if (fnType.isVarArg()) + llvm_unreachable("NYI"); + + if (::cir::MissingFeatures::extParamInfo()) + llvm_unreachable("NYI"); + } + + // TODO(cir): There's some CC stuff related to no-proto functions here, but + // I'm skipping it since it requires CodeGen info. Maybe we can embbed this + // information in the FuncOp during CIRGen. + + assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); + FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; + return LT.arrangeLLVMFunctionInfo(fnType.getReturnType(), opts, + fnType.getInputs(), required); +} + +} // namespace + +/// Figure out the rules for calling a function with the given formal +/// type using the given arguments. The arguments are necessary +/// because the function might be unprototyped, in which case it's +/// target-dependent in crazy ways. +const LowerFunctionInfo & +LowerTypes::arrangeFreeFunctionCall(const OperandRange args, + const FuncType fnType, bool chainCall) { + return arrangeFreeFunctionLikeCall(*this, LM, args, fnType, chainCall ? 1 : 0, + chainCall); +} + +/// Arrange the argument and result information for an abstract value +/// of a given function type. This is the method which all of the +/// above functions ultimately defer to. +/// +/// \param resultType - ABI-agnostic CIR result type. +/// \param opts - Options to control the arrangement. +/// \param argTypes - ABI-agnostic CIR argument types. +/// \param required - Information about required/optional arguments. +const LowerFunctionInfo & +LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, + ArrayRef argTypes, + RequiredArgs required) { + assert(!::cir::MissingFeatures::qualifiedTypes()); + + LowerFunctionInfo *FI = nullptr; + + // FIXME(cir): Allow user-defined CCs (e.g. __attribute__((vectorcall))). + assert(!::cir::MissingFeatures::extParamInfo()); + unsigned CC = clangCallConvToLLVMCallConv(clang::CallingConv::CC_C); + + // Construct the function info. We co-allocate the ArgInfos. + // NOTE(cir): This initial function info might hold incorrect data. + FI = LowerFunctionInfo::create( + CC, /*isInstanceMethod=*/false, /*isChainCall=*/false, + /*isDelegateCall=*/false, resultType, argTypes, required); + + // Compute ABI information. + if (CC == llvm::CallingConv::SPIR_KERNEL) { + llvm_unreachable("NYI"); + } else if (::cir::MissingFeatures::extParamInfo()) { + llvm_unreachable("NYI"); + } else { + // NOTE(cir): This corects the initial function info data. + getABIInfo().computeInfo(*FI); // FIXME(cir): Args should be set to null. + } + + // Loop over all of the computed argument and return value info. If any of + // them are direct or extend without a specified coerce type, specify the + // default now. + ::cir::ABIArgInfo &retInfo = FI->getReturnInfo(); + if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) + llvm_unreachable("NYI"); + + for (auto &_ : FI->arguments()) + llvm_unreachable("NYI"); + + return *FI; +} diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h index ac54490c578f..b579f96fb436 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h @@ -22,6 +22,8 @@ namespace cir { /// Contains the address where the return value of a function can be stored, and /// whether the address is volatile or not. class ReturnValueSlot { + // FIXME(cir): We should be able to query this directly from CIR at some + // point. This class can then be removed. Value Addr = {}; // Return value slot flags diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 6215b6149786..ae341dd03f53 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -12,11 +12,16 @@ //===----------------------------------------------------------------------===// #include "LowerFunction.h" +#include "CIRToCIRArgMapping.h" #include "LowerCall.h" +#include "LowerFunctionInfo.h" #include "LowerModule.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" +#include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" namespace mlir { namespace cir { @@ -32,5 +37,222 @@ LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, : Target(LM.getTarget()), rewriter(rewriter), SrcFn(srcFn), callOp(callOp), LM(LM) {} +/// Rewrite a call operation to abide to the ABI calling convention. +/// +/// FIXME(cir): This method has partial parity to CodeGenFunction's +/// EmitCallEpxr method defined in CGExpr.cpp. This could likely be +/// removed in favor of a more direct approach. +LogicalResult LowerFunction::rewriteCallOp(CallOp op, + ReturnValueSlot retValSlot) { + + // TODO(cir): Check if BlockCall, CXXMemberCall, CUDAKernelCall, or + // CXXOperatorMember require special handling here. These should be handled in + // CIRGen, unless there is call conv or ABI-specific stuff to be handled, them + // we should do it here. + + // TODO(cir): Also check if Builtin and CXXPeseudoDtor need special handling + // here. These should be handled in CIRGen, unless there is call conv or + // ABI-specific stuff to be handled, them we should do it here. + + // NOTE(cir): There is no direct way to fetch the function type from the + // CallOp, so we fetch it from the source function. This assumes the function + // definition has not yet been lowered. + assert(SrcFn && "No source function"); + auto fnType = SrcFn.getFunctionType(); + + // Rewrite the call operation to abide to the ABI calling convention. + auto Ret = rewriteCallOp(fnType, SrcFn, op, retValSlot); + + // Replace the original call result with the new one. + if (Ret) + rewriter.replaceAllUsesWith(op.getResult(), Ret); + + return success(); +} + +/// Rewrite a call operation to abide to the ABI calling convention. +/// +/// FIXME(cir): This method has partial parity to CodeGenFunction's EmitCall +/// method defined in CGExpr.cpp. This could likely be removed in favor of a +/// more direct approach since most of the code here is exclusively CodeGen. +Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, + CallOp callOp, ReturnValueSlot retValSlot, + Value Chain) { + // NOTE(cir): Skip a bunch of function pointer stuff and AST declaration + // asserts. Also skip sanitizers, as these should likely be handled at + // CIRGen. + CallArgList Args; + if (Chain) + llvm_unreachable("NYI"); + + // NOTE(cir): Call args were already emitted in CIRGen. Skip the evaluation + // order done in CIRGen and just fetch the exiting arguments here. + Args = callOp.getArgOperands(); + + const LowerFunctionInfo &FnInfo = LM.getTypes().arrangeFreeFunctionCall( + callOp.getArgOperands(), calleeTy, /*chainCall=*/false); + + // C99 6.5.2.2p6: + // If the expression that denotes the called function has a type + // that does not include a prototype, [the default argument + // promotions are performed]. If the number of arguments does not + // equal the number of parameters, the behavior is undefined. If + // the function is defined with a type that includes a prototype, + // and either the prototype ends with an ellipsis (, ...) or the + // types of the arguments after promotion are not compatible with + // the types of the parameters, the behavior is undefined. If the + // function is defined with a type that does not include a + // prototype, and the types of the arguments after promotion are + // not compatible with those of the parameters after promotion, + // the behavior is undefined [except in some trivial cases]. + // That is, in the general case, we should assume that a call + // through an unprototyped function type works like a *non-variadic* + // call. The way we make this work is to cast to the exact type + // of the promoted arguments. + // + // Chain calls use this same code path to add the invisible chain parameter + // to the function type. + if (origCallee.getNoProto() || Chain) { + llvm_unreachable("NYI"); + } + + assert(!::cir::MissingFeatures::CUDA()); + + // TODO(cir): LLVM IR has the concept of "CallBase", which is a base class for + // all types of calls. Perhaps we should have a CIR interface to mimic this + // class. + CallOp CallOrInvoke = {}; + Value CallResult = {}; + rewriteCallOp(FnInfo, origCallee, callOp, retValSlot, Args, CallOrInvoke, + /*isMustTail=*/false, callOp.getLoc()); + + // NOTE(cir): Skipping debug stuff here. + + return CallResult; +} + +// NOTE(cir): This method has partial parity to CodeGenFunction's EmitCall +// method in CGCall.cpp. When incrementing it, use the original codegen as a +// reference: add ABI-specific stuff and skip codegen stuff. +Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, + FuncOp Callee, CallOp Caller, + ReturnValueSlot ReturnValue, + CallArgList &CallArgs, CallOp CallOrInvoke, + bool isMustTail, Location loc) { + // FIXME: We no longer need the types from CallArgs; lift up and simplify. + + // Handle struct-return functions by passing a pointer to the + // location that we would like to return into. + Type RetTy = CallInfo.getReturnType(); // ABI-agnostic type. + const ::cir::ABIArgInfo &RetAI = CallInfo.getReturnInfo(); + + FuncType IRFuncTy = LM.getTypes().getFunctionType(CallInfo); + + // NOTE(cir): Some target/ABI related checks happen here. I'm skipping them + // under the assumption that they are handled in CIRGen. + + // 1. Set up the arguments. + + // If we're using inalloca, insert the allocation after the stack save. + // FIXME: Do this earlier rather than hacking it in here! + if (StructType ArgStruct = CallInfo.getArgStruct()) { + llvm_unreachable("NYI"); + } + + CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), CallInfo); + SmallVector IRCallArgs(IRFunctionArgs.totalIRArgs()); + + // If the call returns a temporary with struct return, create a temporary + // alloca to hold the result, unless one is given to us. + if (RetAI.isIndirect() || RetAI.isCoerceAndExpand() || RetAI.isInAlloca()) { + llvm_unreachable("NYI"); + } + + assert(!::cir::MissingFeatures::swift()); + + // NOTE(cir): Skipping lifetime markers here. + + // Translate all of the arguments as necessary to match the IR lowering. + assert(CallInfo.arg_size() == CallArgs.size() && + "Mismatch between function signature & arguments."); + unsigned ArgNo = 0; + LowerFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); + for (auto I = CallArgs.begin(), E = CallArgs.end(); I != E; + ++I, ++info_it, ++ArgNo) { + llvm_unreachable("NYI"); + } + + // 2. Prepare the function pointer. + // NOTE(cir): This is not needed for CIR. + + // 3. Perform the actual call. + + // NOTE(cir): CIRGen handle when to "deactive" cleanups. We also skip some + // debugging stuff here. + + // Update the largest vector width if any arguments have vector types. + assert(!::cir::MissingFeatures::vectorType()); + + // Compute the calling convention and attributes. + + // FIXME(cir): Skipping call attributes for now. Not sure if we have to do + // this at all since we already do it for the function definition. + + // FIXME(cir): Implement the required procedures for strictfp function and + // fast-math. + + // FIXME(cir): Add missing call-site attributes here if they are + // ABI/target-specific, otherwise, do it in CIRGen. + + // NOTE(cir): Deciding whether to use Call or Invoke is done in CIRGen. + + // Rewrite the actual call operation. + // TODO(cir): Handle other types of CIR calls (e.g. cir.try_call). + // NOTE(cir): We don't know if the callee was already lowered, so we only + // fetch the name from the callee, while the return type is fetch from the + // lowering types manager. + CallOp _ = rewriter.create(loc, Caller.getCalleeAttr(), + IRFuncTy.getReturnType(), IRCallArgs); + + assert(!::cir::MissingFeatures::vectorType()); + + // NOTE(cir): Skipping some ObjC, tail-call, debug, and attribute stuff here. + + // 4. Finish the call. + + // NOTE(cir): Skipping no-return, isMustTail, swift error handling, and + // writebacks here. These should be handled in CIRGen, I think. + + // Convert return value from ABI-agnostic to ABI-aware. + Value Ret = [&] { + // NOTE(cir): CIRGen already handled the emission of the return value. We + // need only to handle the ABI-specific to ABI-agnostic cast here. + switch (RetAI.getKind()) { + case ::cir::ABIArgInfo::Ignore: + // If we are ignoring an argument that had a result, make sure to + // construct the appropriate return value for our caller. + return getUndefRValue(RetTy); + default: + llvm::errs() << "Unhandled ABIArgInfo kind: " << RetAI.getKind() << "\n"; + llvm_unreachable("NYI"); + } + }(); + + // NOTE(cir): Skipping Emissions, lifetime markers, and dtors here that should + // be handled in CIRGen. + + return Ret; +} + +// NOTE(cir): This method has partial parity to CodeGenFunction's GetUndefRValue +// defined in CGExpr.cpp. +Value LowerFunction::getUndefRValue(Type Ty) { + if (Ty.isa()) + return nullptr; + + llvm::outs() << "Missing undef handler for value type: " << Ty << "\n"; + llvm_unreachable("NYI"); +} + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h index 319751790915..6498bd705288 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h @@ -25,6 +25,8 @@ namespace mlir { namespace cir { +using CallArgList = SmallVector; + class LowerFunction { LowerFunction(const LowerFunction &) = delete; void operator=(const LowerFunction &) = delete; @@ -50,6 +52,19 @@ class LowerFunction { ~LowerFunction() = default; LowerModule &LM; // Per-module state. + + /// Rewrite a call operation to abide to the ABI calling convention. + LogicalResult rewriteCallOp(CallOp op, + ReturnValueSlot retValSlot = ReturnValueSlot()); + Value rewriteCallOp(FuncType calleeTy, FuncOp origCallee, CallOp callOp, + ReturnValueSlot retValSlot, Value Chain = nullptr); + Value rewriteCallOp(const LowerFunctionInfo &CallInfo, FuncOp Callee, + CallOp Caller, ReturnValueSlot ReturnValue, + CallArgList &CallArgs, CallOp CallOrInvoke, + bool isMustTail, Location loc); + + /// Get an appropriate 'undef' value for the given type. + Value getUndefRValue(Type Ty); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index 4344745f2478..ea7174caf6b9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -18,6 +18,7 @@ #include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/MissingFeatures.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/Support/TrailingObjects.h" namespace mlir { @@ -46,6 +47,8 @@ struct LowerFunctionInfoArgInfo { ::cir::ABIArgInfo info; // ABI-specific information. }; +// FIXME(cir): We could likely encode this information within CIR/MLIR, allowing +// us to eliminate this class. class LowerFunctionInfo final : private llvm::TrailingObjects { @@ -89,7 +92,7 @@ class LowerFunctionInfo final ArrayRef argTypes, RequiredArgs required) { // TODO(cir): Add assertions? - assert(::cir::MissingFeatures::extParamInfo()); + assert(!::cir::MissingFeatures::extParamInfo()); void *buffer = operator new(totalSizeToAlloc(argTypes.size() + 1)); LowerFunctionInfo *FI = new (buffer) LowerFunctionInfo(); @@ -115,10 +118,22 @@ class LowerFunctionInfo final return NumArgs + 1; } + typedef const ArgInfo *const_arg_iterator; + typedef ArgInfo *arg_iterator; + + MutableArrayRef arguments() { + return MutableArrayRef(arg_begin(), NumArgs); + } + + const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; } + const_arg_iterator arg_end() const { return getArgsBuffer() + 1 + NumArgs; } + arg_iterator arg_begin() { return getArgsBuffer() + 1; } + arg_iterator arg_end() { return getArgsBuffer() + 1 + NumArgs; } + unsigned arg_size() const { return NumArgs; } bool isVariadic() const { - assert(::cir::MissingFeatures::variadicFunctions()); + assert(!::cir::MissingFeatures::variadicFunctions()); return false; } unsigned getNumRequiredArgs() const { @@ -126,6 +141,20 @@ class LowerFunctionInfo final llvm_unreachable("NYI"); return arg_size(); } + + Type getReturnType() const { return getArgsBuffer()[0].type; } + + ::cir::ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; } + const ::cir::ABIArgInfo &getReturnInfo() const { + return getArgsBuffer()[0].info; + } + + /// Return the user specified callingconvention, which has been translated + /// into an LLVM CC. + unsigned getCallingConvention() const { return CallingConvention; } + + /// Get the struct type used to represent all the arguments in memory. + StructType getArgStruct() const { return ArgStruct; } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 44ef32a5ddfa..28760fea585d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -13,6 +13,7 @@ #include "LowerModule.h" #include "CIRLowerContext.h" +#include "LowerFunction.h" #include "TargetInfo.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributes.h" @@ -81,10 +82,19 @@ LogicalResult LowerModule::rewriteGlobalFunctionDefinition(FuncOp op, return failure(); } -LogicalResult LowerModule::rewriteFunctionCall(CallOp caller, FuncOp callee) { +LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, FuncOp funcOp) { mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPoint(caller); - return failure(); + rewriter.setInsertionPoint(callOp); + + // Create a new function with the ABI-specific calling convention. + if (LowerFunction(*this, rewriter, funcOp, callOp) + .rewriteCallOp(callOp) + .failed()) + return failure(); + + // Erase original ABI-agnostic call. + rewriter.eraseOp(callOp); + return success(); } } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index d99d40f90554..bb56cb5fef92 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -60,7 +60,7 @@ class LowerModule { // FIXME(cir): This would be in ASTContext, not CodeGenModule. clang::TargetCXXABI::Kind getCXXABIKind() const { auto kind = getTarget().getCXXABI().getKind(); - assert(::cir::MissingFeatures::langOpts()); + assert(!::cir::MissingFeatures::langOpts()); return kind; } @@ -68,7 +68,7 @@ class LowerModule { LogicalResult rewriteGlobalFunctionDefinition(FuncOp op, LowerModule &state); // Rewrite CIR CallOp to match the target ABI. - LogicalResult rewriteFunctionCall(CallOp caller, FuncOp callee); + LogicalResult rewriteFunctionCall(CallOp callOp, FuncOp funcOp); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index 1186da9df1e7..3d8ca6cfe61f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -12,13 +12,59 @@ //===----------------------------------------------------------------------===// #include "LowerTypes.h" +#include "CIRToCIRArgMapping.h" #include "LowerModule.h" #include "mlir/Support/LLVM.h" +#include "clang/CIR/ABIArgInfo.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" using namespace ::mlir::cir; +unsigned LowerTypes::clangCallConvToLLVMCallConv(clang::CallingConv CC) { + switch (CC) { + case clang::CC_C: + return llvm::CallingConv::C; + default: + llvm_unreachable("calling convention NYI"); + } +} + LowerTypes::LowerTypes(LowerModule &LM, StringRef DLString) - : LM(LM), queries(LM.getContext()), Target(LM.getTarget()), + : LM(LM), context(LM.getContext()), Target(LM.getTarget()), CXXABI(LM.getCXXABI()), TheABIInfo(LM.getTargetLoweringInfo().getABIInfo()), - mlirContext(LM.getMLIRContext()), DL(LM.getModule()) {} + mlirContext(LM.getMLIRContext()), DL(DLString, LM.getModule()) {} + +/// Return the ABI-specific function type for a CIR function type. +FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { + + mlir::Type resultType = {}; + const ::cir::ABIArgInfo &retAI = FI.getReturnInfo(); + switch (retAI.getKind()) { + case ::cir::ABIArgInfo::Ignore: + resultType = VoidType::get(getMLIRContext()); + break; + default: + llvm_unreachable("Missing ABIArgInfo::Kind"); + } + + CIRToCIRArgMapping IRFunctionArgs(getContext(), FI, true); + SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); + + // Add type for sret argument. + assert(!::cir::MissingFeatures::sretArgs()); + + // Add type for inalloca argument. + assert(!::cir::MissingFeatures::inallocaArgs()); + + // Add in all of the required arguments. + unsigned ArgNo = 0; + LowerFunctionInfo::const_arg_iterator it = FI.arg_begin(), + ie = it + FI.getNumRequiredArgs(); + for (; it != ie; ++it, ++ArgNo) { + llvm_unreachable("NYI"); + } + + return FuncType::get(getMLIRContext(), ArgTypes, resultType, FI.isVariadic()); +} diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h index 395665d47f16..941b3d7aeab7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h @@ -17,8 +17,11 @@ #include "ABIInfo.h" #include "CIRCXXABI.h" #include "CIRLowerContext.h" +#include "LowerCall.h" #include "mlir/IR/MLIRContext.h" +#include "clang/Basic/Specifiers.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" +#include "clang/CIR/FnInfoOpts.h" namespace mlir { namespace cir { @@ -33,7 +36,7 @@ class LowerTypes { private: LowerModule &LM; - CIRLowerContext &queries; + CIRLowerContext &context; const clang::TargetInfo &Target; CIRCXXABI &CXXABI; @@ -46,11 +49,41 @@ class LowerTypes { ::cir::CIRDataLayout DL; + const ABIInfo &getABIInfo() const { return TheABIInfo; } + public: LowerTypes(LowerModule &LM, StringRef DLString); ~LowerTypes() = default; LowerModule &getLM() const { return LM; } + CIRCXXABI &getCXXABI() const { return CXXABI; } + CIRLowerContext &getContext() { return context; } + MLIRContext *getMLIRContext() { return mlirContext; } + + /// Convert clang calling convention to LLVM callilng convention. + unsigned clangCallConvToLLVMCallConv(clang::CallingConv CC); + + /// Free functions are functions that are compatible with an ordinary + /// C function pointer type. + const LowerFunctionInfo &arrangeFreeFunctionCall(const OperandRange args, + const FuncType fnType, + bool chainCall); + + /// Arrange the argument and result information for an abstract value + /// of a given function type. This is the method which all of the + /// above functions ultimately defer to. + /// + /// \param resultType - ABI-agnostic CIR result type. + /// \param opts - Options to control the arrangement. + /// \param argTypes - ABI-agnostic CIR argument types. + /// \param required - Information about required/optional arguments. + const LowerFunctionInfo &arrangeLLVMFunctionInfo(Type resultType, + ::cir::FnInfoOpts opts, + ArrayRef argTypes, + RequiredArgs required); + + /// Return the ABI-specific function type for a CIR function type. + FuncType getFunctionType(const LowerFunctionInfo &FI); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 6d2a329e6d2a..736f3a7ea301 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -1,27 +1,164 @@ + +#include "clang/CIR/Target/x86.h" #include "ABIInfo.h" +#include "ABIInfoImpl.h" #include "LowerModule.h" #include "LowerTypes.h" #include "TargetInfo.h" +#include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" #include namespace mlir { namespace cir { class X86_64ABIInfo : public ABIInfo { + using Class = ::cir::X86ArgClass; + + /// Determine the x86_64 register classes in which the given type T should be + /// passed. + /// + /// \param Lo - The classification for the parts of the type + /// residing in the low word of the containing object. + /// + /// \param Hi - The classification for the parts of the type + /// residing in the high word of the containing object. + /// + /// \param OffsetBase - The bit offset of this type in the + /// containing object. Some parameters are classified different + /// depending on whether they straddle an eightbyte boundary. + /// + /// \param isNamedArg - Whether the argument in question is a "named" + /// argument, as used in AMD64-ABI 3.5.7. + /// + /// \param IsRegCall - Whether the calling conversion is regcall. + /// + /// If a word is unused its result will be NoClass; if a type should + /// be passed in Memory then at least the classification of \arg Lo + /// will be Memory. + /// + /// The \arg Lo class will be NoClass iff the argument is ignored. + /// + /// If the \arg Lo class is ComplexX87, then the \arg Hi class will + /// also be ComplexX87. + void classify(Type T, uint64_t OffsetBase, Class &Lo, Class &Hi, + bool isNamedArg, bool IsRegCall = false) const; public: X86_64ABIInfo(LowerTypes &CGT, X86AVXABILevel AVXLevel) : ABIInfo(CGT) {} + + ::cir::ABIArgInfo classifyReturnType(Type RetTy) const; + + void computeInfo(LowerFunctionInfo &FI) const override; }; class X86_64TargetLoweringInfo : public TargetLoweringInfo { public: X86_64TargetLoweringInfo(LowerTypes &LM, X86AVXABILevel AVXLevel) : TargetLoweringInfo(std::make_unique(LM, AVXLevel)) { - assert(::cir::MissingFeatures::swift()); + assert(!::cir::MissingFeatures::swift()); } }; +void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, + bool isNamedArg, bool IsRegCall) const { + // FIXME: This code can be simplified by introducing a simple value class + // for Class pairs with appropriate constructor methods for the various + // situations. + + // FIXME: Some of the split computations are wrong; unaligned vectors + // shouldn't be passed in registers for example, so there is no chance they + // can straddle an eightbyte. Verify & simplify. + + Lo = Hi = Class::NoClass; + + Class &Current = OffsetBase < 64 ? Lo : Hi; + Current = Class::Memory; + + // FIXME(cir): There's currently no direct way to identify if a type is a + // builtin. + if (/*isBuitinType=*/true) { + if (Ty.isa()) { + Current = Class::NoClass; + } else { + llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; + llvm_unreachable("NYI"); + } + // FIXME: _Decimal32 and _Decimal64 are SSE. + // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). + return; + } + + llvm::outs() << "Missing X86 classification for non-builtin types\n"; + llvm_unreachable("NYI"); +} + +::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { + // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the + // classification algorithm. + X86_64ABIInfo::Class Lo, Hi; + classify(RetTy, 0, Lo, Hi, true); + + // Check some invariants. + assert((Hi != Class::Memory || Lo == Class::Memory) && + "Invalid memory classification."); + assert((Hi != Class::SSEUp || Lo == Class::SSE) && + "Invalid SSEUp classification."); + + switch (Lo) { + case Class::NoClass: + if (Hi == Class::NoClass) + return ::cir::ABIArgInfo::getIgnore(); + break; + default: + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); +} + +void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { + const unsigned CallingConv = FI.getCallingConvention(); + // It is possible to force Win64 calling convention on any x86_64 target by + // using __attribute__((ms_abi)). In such case to correctly emit Win64 + // compatible code delegate this call to WinX86_64ABIInfo::computeInfo. + if (CallingConv == llvm::CallingConv::Win64) { + llvm_unreachable("Win64 CC is NYI"); + } + + bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; + + // Keep track of the number of assigned registers. + unsigned NeededSSE = 0, MaxVectorWidth = 0; + + if (!::mlir::cir::classifyReturnType(getCXXABI(), FI, *this)) { + if (IsRegCall || ::cir::MissingFeatures::regCall()) { + llvm_unreachable("RegCall is NYI"); + } else + FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); + } + + // If the return value is indirect, then the hidden argument is consuming + // one integer register. + if (FI.getReturnInfo().isIndirect()) + llvm_unreachable("NYI"); + else if (NeededSSE && MaxVectorWidth) + llvm_unreachable("NYI"); + + // The chain argument effectively gives us another free register. + if (::cir::MissingFeatures::chainCall()) + llvm_unreachable("NYI"); + + // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers + // get assigned (in left-to-right order) for passing as follows... + unsigned ArgNo = 0; + for (LowerFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it, ++ArgNo) { + llvm_unreachable("NYI"); + } +} + std::unique_ptr createX86_64TargetLoweringInfo(LowerModule &LM, X86AVXABILevel AVXLevel) { return std::make_unique(LM.getTypes(), AVXLevel); diff --git a/clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp similarity index 69% rename from clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp rename to clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp index e8772b24c3b8..6bb4d71d4877 100644 --- a/clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp @@ -1,5 +1,8 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// Just check if the pass is called for now. -// CHECK: module +// CHECK: @_Z4Voidv() +void Void(void) { +// CHECK: cir.call @_Z4Voidv() : () -> () + Void(); +} From fee69ed7b37e121c6ebca6e8b21f8a65bd5266ac Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Tue, 11 Jun 2024 16:27:44 -0300 Subject: [PATCH 1632/2301] [CIR][ABI][NFC] Add CC lowering for void FuncOps (#678) This patch implements the lowering of function definitions with no arguments and returns. In pratice, nothing has to be done (at least for the x86 ABI), so this case is used as a primer for the target lowering library since it helps populate the base logic for handling calling convention lowering of function definitions. --- clang/include/clang/CIR/MissingFeatures.h | 11 ++ .../Dialect/Transforms/CallConvLowering.cpp | 8 +- .../Transforms/TargetLowering/LowerCall.cpp | 166 ++++++++++++++++++ .../TargetLowering/LowerFunction.cpp | 106 +++++++++++ .../Transforms/TargetLowering/LowerFunction.h | 15 ++ .../TargetLowering/LowerFunctionInfo.h | 13 ++ .../Transforms/TargetLowering/LowerModule.cpp | 105 ++++++++++- .../Transforms/TargetLowering/LowerModule.h | 22 ++- .../Transforms/TargetLowering/LowerTypes.h | 6 + 9 files changed, 445 insertions(+), 7 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index e2f554019422..d61c5e618605 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -192,11 +192,22 @@ struct MissingFeatures { //-- Missing AST queries static bool recordDeclCanPassInRegisters() { return false; } + static bool funcDeclIsCXXConstructorDecl() { return false; } + static bool funcDeclIsCXXDestructorDecl() { return false; } + static bool funcDeclIsCXXMethodDecl() { return false; } + static bool funcDeclIsInlineBuiltinDeclaration() { return false; } + static bool funcDeclIsReplaceableGlobalAllocationFunction() { return false; } + static bool qualTypeIsReferenceType() { return false; } //-- Missing types static bool vectorType() { return false; } + //-- Missing LLVM attributes + + static bool noReturn() { return false; } + static bool csmeCall() { return false; } + //-- Other missing features // Calls with a static chain pointer argument may be optimized (p.e. freeing diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index ad35d7835255..9363c7349519 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -82,9 +82,11 @@ struct CallConvLoweringPattern : public OpRewritePattern { } } - // Rewrite function definition. - // FIXME(cir): This is a workaround to avoid an infinite loop in the driver. - rewriter.replaceOp(op, rewriter.clone(*op)); + // TODO(cir): Instead of re-emmiting every load and store, bitcast arguments + // and return values to their ABI-specific counterparts when possible. + if (lowerModule.rewriteFunctionDefinition(op).failed()) + return failure(); + return success(); } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index 59d736c0574f..85890532e4f9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -1,12 +1,18 @@ #include "LowerCall.h" +#include "CIRToCIRArgMapping.h" #include "LowerFunctionInfo.h" +#include "LowerModule.h" #include "LowerTypes.h" #include "clang/CIR/FnInfoOpts.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" using namespace mlir; using namespace mlir::cir; +using ABIArgInfo = ::cir::ABIArgInfo; using FnInfoOpts = ::cir::FnInfoOpts; +using MissingFeatures = ::cir::MissingFeatures; namespace { @@ -44,8 +50,150 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, fnType.getInputs(), required); } +/// Adds the formal parameters in FPT to the given prefix. If any parameter in +/// FPT has pass_object_size attrs, then we'll add parameters for those, too. +static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { + // Fast path: don't touch param info if we don't need to. + if (/*!fnTy->hasExtParameterInfos()=*/true) { + prefix.append(fnTy.getInputs().begin(), fnTy.getInputs().end()); + return; + } + + assert(MissingFeatures::extParamInfo()); + llvm_unreachable("NYI"); +} + +/// Arrange the LLVM function layout for a value of the given function +/// type, on top of any implicit parameters already stored. +/// +/// \param CGT - Abstraction for lowering CIR types. +/// \param instanceMethod - Whether the function is an instance method. +/// \param prefix - List of implicit parameters to be prepended (e.g. 'this'). +/// \param FTP - ABI-agnostic function type. +static const LowerFunctionInfo & +arrangeCIRFunctionInfo(LowerTypes &CGT, bool instanceMethod, + SmallVectorImpl &prefix, FuncType fnTy) { + assert(!MissingFeatures::extParamInfo()); + RequiredArgs Required = RequiredArgs::forPrototypePlus(fnTy, prefix.size()); + // FIXME: Kill copy. + appendParameterTypes(prefix, fnTy); + assert(!MissingFeatures::qualifiedTypes()); + Type resultType = fnTy.getReturnType(); + + FnInfoOpts opts = + instanceMethod ? FnInfoOpts::IsInstanceMethod : FnInfoOpts::None; + return CGT.arrangeLLVMFunctionInfo(resultType, opts, prefix, Required); +} + } // namespace +/// Update function with ABI-specific attributes. +/// +/// NOTE(cir): Partially copies CodeGenModule::ConstructAttributeList, but +/// focuses on ABI/Target-related attributes. +void LowerModule::constructAttributeList(StringRef Name, + const LowerFunctionInfo &FI, + FuncOp CalleeInfo, FuncOp newFn, + unsigned &CallingConv, + bool AttrOnCallSite, bool IsThunk) { + // Collect function IR attributes from the CC lowering. + // We'll collect the paramete and result attributes later. + // FIXME(cir): Codegen differentiates between CallConv and EffectiveCallConv, + // but I don't think we need to do this here. + CallingConv = FI.getCallingConvention(); + // FIXME(cir): No-return should probably be set in CIRGen (ABI-agnostic). + if (MissingFeatures::noReturn()) + llvm_unreachable("NYI"); + if (MissingFeatures::csmeCall()) + llvm_unreachable("NYI"); + + // TODO(cir): Implement AddAttributesFromFunctionProtoType here. + // TODO(cir): Implement AddAttributesFromOMPAssumes here. + assert(!MissingFeatures::openMP()); + + // TODO(cir): Skipping a bunch of AST queries here. We will need to partially + // implement some of them as this section sets target-specific attributes + // too. + // if (TargetDecl) { + // [...] + // } + + // NOTE(cir): The original code adds default and no-builtin attributes here as + // well. AFAIK, these are ABI/Target-agnostic, so it would be better handled + // in CIRGen. Regardless, I'm leaving this comment here as a heads up. + + // Override some default IR attributes based on declaration-specific + // information. + // NOTE(cir): Skipping another set of AST queries here. + + // Collect attributes from arguments and return values. + CIRToCIRArgMapping IRFunctionArgs(getContext(), FI); + + const ABIArgInfo &RetAI = FI.getReturnInfo(); + + // TODO(cir): No-undef attribute for return values partially depends on + // ABI-specific information. Maybe we should include it here. + + switch (RetAI.getKind()) { + case ABIArgInfo::Ignore: + break; + default: + llvm_unreachable("Missing ABIArgInfo::Kind"); + } + + if (!IsThunk) { + if (MissingFeatures::qualTypeIsReferenceType()) { + llvm_unreachable("NYI"); + } + } + + // Attach attributes to sret. + if (MissingFeatures::sretArgs()) { + llvm_unreachable("sret is NYI"); + } + + // Attach attributes to inalloca arguments. + if (MissingFeatures::inallocaArgs()) { + llvm_unreachable("inalloca is NYI"); + } + + // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, + // unless this is a thunk function. + // FIXME: fix this properly, https://reviews.llvm.org/D100388 + if (MissingFeatures::funcDeclIsCXXMethodDecl() || + MissingFeatures::inallocaArgs()) { + llvm_unreachable("`this` argument attributes are NYI"); + } + + unsigned ArgNo = 0; + for (LowerFunctionInfo::const_arg_iterator I = FI.arg_begin(), + E = FI.arg_end(); + I != E; ++I, ++ArgNo) { + llvm_unreachable("NYI"); + } + assert(ArgNo == FI.arg_size()); +} + +/// Arrange the argument and result information for the declaration or +/// definition of the given function. +const LowerFunctionInfo &LowerTypes::arrangeFunctionDeclaration(FuncOp fnOp) { + if (MissingFeatures::funcDeclIsCXXMethodDecl()) + llvm_unreachable("NYI"); + + assert(!MissingFeatures::qualifiedTypes()); + FuncType FTy = fnOp.getFunctionType(); + + assert(!MissingFeatures::CUDA()); + + // When declaring a function without a prototype, always use a + // non-variadic type. + if (fnOp.getNoProto()) { + llvm_unreachable("NYI"); + } + + return arrangeFreeFunctionType(FTy); +} + /// Figure out the rules for calling a function with the given formal /// type using the given arguments. The arguments are necessary /// because the function might be unprototyped, in which case it's @@ -57,6 +205,24 @@ LowerTypes::arrangeFreeFunctionCall(const OperandRange args, chainCall); } +/// Arrange the argument and result information for the declaration or +/// definition of the given function. +const LowerFunctionInfo &LowerTypes::arrangeFreeFunctionType(FuncType FTy) { + SmallVector argTypes; + return ::arrangeCIRFunctionInfo(*this, /*instanceMethod=*/false, argTypes, + FTy); +} + +/// Arrange the argument and result information for the declaration or +/// definition of the given function. +const LowerFunctionInfo &LowerTypes::arrangeGlobalDeclaration(FuncOp fnOp) { + if (MissingFeatures::funcDeclIsCXXConstructorDecl() || + MissingFeatures::funcDeclIsCXXDestructorDecl()) + llvm_unreachable("NYI"); + + return arrangeFunctionDeclaration(fnOp); +} + /// Arrange the argument and result information for an abstract value /// of a given function type. This is the method which all of the /// above functions ultimately defer to. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index ae341dd03f53..edf4dfc7d4b3 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -23,6 +23,8 @@ #include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" +using ABIArgInfo = ::cir::ABIArgInfo; + namespace mlir { namespace cir { @@ -37,6 +39,110 @@ LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, : Target(LM.getTarget()), rewriter(rewriter), SrcFn(srcFn), callOp(callOp), LM(LM) {} +/// This method has partial parity with CodeGenFunction::EmitFunctionProlog from +/// the original codegen. However, it focuses on the ABI-specific details. On +/// top of that, it is also responsible for rewriting the original function. +LogicalResult +LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, + MutableArrayRef Args) { + // NOTE(cir): Skipping naked and implicit-return-zero functions here. These + // are dealt with in CIRGen. + + CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), FI); + assert(Fn.getNumArguments() == IRFunctionArgs.totalIRArgs()); + + // If we're using inalloca, all the memory arguments are GEPs off of the last + // parameter, which is a pointer to the complete memory area. + assert(!::cir::MissingFeatures::inallocaArgs()); + + // Name the struct return parameter. + assert(!::cir::MissingFeatures::sretArgs()); + + // Track if we received the parameter as a pointer (indirect, byval, or + // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it + // into a local alloca for us. + SmallVector ArgVals; + ArgVals.reserve(Args.size()); + + // Create a pointer value for every parameter declaration. This usually + // entails copying one or more LLVM IR arguments into an alloca. Don't push + // any cleanups or do anything that might unwind. We do that separately, so + // we can push the cleanups in the correct order for the ABI. + assert(FI.arg_size() == Args.size()); + unsigned ArgNo = 0; + LowerFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); + for (MutableArrayRef::const_iterator i = Args.begin(), + e = Args.end(); + i != e; ++i, ++info_it, ++ArgNo) { + llvm_unreachable("NYI"); + } + + if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { + llvm_unreachable("NYI"); + } else { + // FIXME(cir): In the original codegen, EmitParamDecl is called here. It is + // likely that said function considers ABI details during emission, so we + // migth have to add a counter part here. Currently, it is not needed. + } + + return success(); +} + +LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { + const ABIArgInfo &RetAI = FI.getReturnInfo(); + + switch (RetAI.getKind()) { + + case ABIArgInfo::Ignore: + break; + + default: + llvm_unreachable("Unhandled ABIArgInfo::Kind"); + } + + return success(); +} + +/// Generate code for a function based on the ABI-specific information. +/// +/// This method has partial parity with CodeGenFunction::GenerateCode, but it +/// focuses on the ABI-specific details. So a lot of codegen stuff is removed. +LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, + const LowerFunctionInfo &FnInfo) { + assert(newFn && "generating code for null Function"); + auto Args = oldFn.getArguments(); + + // Emit the ABI-specific function prologue. + assert(newFn.empty() && "Function already has a body"); + rewriter.setInsertionPointToEnd(newFn.addEntryBlock()); + if (buildFunctionProlog(FnInfo, newFn, oldFn.getArguments()).failed()) + return failure(); + + // Ensure that old ABI-agnostic arguments uses were replaced. + const auto hasNoUses = [](Value val) { return val.getUses().empty(); }; + assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && "Missing RAUW?"); + + // Migrate function body to new ABI-aware function. + assert(oldFn.getBody().hasOneBlock() && + "Multiple blocks in original function not supported"); + + // Move old function body to new function. + // FIXME(cir): The merge below is not very good: will not work if SrcFn has + // multiple blocks and it mixes the new and old prologues. + rewriter.mergeBlocks(&oldFn.getBody().front(), &newFn.getBody().front(), + newFn.getArguments()); + + // FIXME(cir): What about saving parameters for corotines? Should we do + // something about it in this pass? If the change with the calling + // convention, we might have to handle this here. + + // Emit the standard function epilogue. + if (buildFunctionEpilog(FnInfo).failed()) + return failure(); + + return success(); +} + /// Rewrite a call operation to abide to the ABI calling convention. /// /// FIXME(cir): This method has partial parity to CodeGenFunction's diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h index 6498bd705288..40cdd39463e6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h @@ -53,6 +53,21 @@ class LowerFunction { LowerModule &LM; // Per-module state. + const clang::TargetInfo &getTarget() const { return Target; } + + // Build ABI/Target-specific function prologue. + LogicalResult buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, + MutableArrayRef Args); + + // Build ABI/Target-specific function epilogue. + LogicalResult buildFunctionEpilog(const LowerFunctionInfo &FI); + + // Parity with CodeGenFunction::GenerateCode. Keep in mind that several + // sections in the original function are focused on codegen unrelated to the + // ABI. Such sections are handled in CIR's codegen, not here. + LogicalResult generateCode(FuncOp oldFn, FuncOp newFn, + const LowerFunctionInfo &FnInfo); + /// Rewrite a call operation to abide to the ABI calling convention. LogicalResult rewriteCallOp(CallOp op, ReturnValueSlot retValSlot = ReturnValueSlot()); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index ea7174caf6b9..c81335c9985a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -37,6 +37,19 @@ class RequiredArgs { RequiredArgs(All_t _) : NumRequired(~0U) {} explicit RequiredArgs(unsigned n) : NumRequired(n) { assert(n != ~0U); } + /// Compute the arguments required by the given formal prototype, + /// given that there may be some additional, non-formal arguments + /// in play. + /// + /// If FD is not null, this will consider pass_object_size params in FD. + static RequiredArgs forPrototypePlus(const FuncType prototype, + unsigned additional) { + if (!prototype.isVarArg()) + return All; + + llvm_unreachable("Variadic function is NYI"); + } + bool allowsOptionalArgs() const { return NumRequired != ~0U; } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 28760fea585d..0ec4b589bb41 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -21,6 +21,8 @@ #include "mlir/Support/LogicalResult.h" #include "llvm/Support/ErrorHandling.h" +using MissingFeatures = ::cir::MissingFeatures; + namespace mlir { namespace cir { @@ -75,11 +77,108 @@ const TargetLoweringInfo &LowerModule::getTargetLoweringInfo() { return *TheTargetCodeGenInfo; } -LogicalResult LowerModule::rewriteGlobalFunctionDefinition(FuncOp op, - LowerModule &state) { +void LowerModule::setCIRFunctionAttributes(FuncOp GD, + const LowerFunctionInfo &Info, + FuncOp F, bool IsThunk) { + unsigned CallingConv; + // NOTE(cir): The method below will update the F function in-place with the + // proper attributes. + constructAttributeList(GD.getName(), Info, GD, F, CallingConv, + /*AttrOnCallSite=*/false, IsThunk); + // TODO(cir): Set Function's calling convention. +} + +/// Set function attributes for a function declaration. +/// +/// This method is based on CodeGenModule::SetFunctionAttributes but it +/// altered to consider only the ABI/Target-related bits. +void LowerModule::setFunctionAttributes(FuncOp oldFn, FuncOp newFn, + bool IsIncompleteFunction, + bool IsThunk) { + + // TODO(cir): There's some special handling from attributes related to LLVM + // intrinsics. Should we do that here as well? + + // Setup target-specific attributes. + if (!IsIncompleteFunction) + setCIRFunctionAttributes(oldFn, getTypes().arrangeGlobalDeclaration(oldFn), + newFn, IsThunk); + + // TODO(cir): Handle attributes for returned "this" objects. + + // NOTE(cir): Skipping some linkage and other global value attributes here as + // it might be better for CIRGen to handle them. + + // TODO(cir): Skipping section attributes here. + + // TODO(cir): Skipping error attributes here. + + // If we plan on emitting this inline builtin, we can't treat it as a builtin. + if (MissingFeatures::funcDeclIsInlineBuiltinDeclaration()) { + llvm_unreachable("NYI"); + } + + if (MissingFeatures::funcDeclIsReplaceableGlobalAllocationFunction()) { + llvm_unreachable("NYI"); + } + + if (MissingFeatures::funcDeclIsCXXConstructorDecl() || + MissingFeatures::funcDeclIsCXXDestructorDecl()) + llvm_unreachable("NYI"); + else if (MissingFeatures::funcDeclIsCXXMethodDecl()) + llvm_unreachable("NYI"); + + // NOTE(cir) Skipping emissions that depend on codegen options, as well as + // sanitizers handling here. Do this in CIRGen. + + if (MissingFeatures::langOpts() && MissingFeatures::openMP()) + llvm_unreachable("NYI"); + + // NOTE(cir): Skipping more things here that depend on codegen options. + + if (MissingFeatures::extParamInfo()) { + llvm_unreachable("NYI"); + } +} + +/// Rewrites an existing function to conform to the ABI. +/// +/// This method is based on CodeGenModule::EmitGlobalFunctionDefinition but it +/// considerably simplified as it tries to remove any CodeGen related code. +LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { mlir::OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint(op); - return failure(); + + // Get ABI/target-specific function information. + const LowerFunctionInfo &FI = this->getTypes().arrangeGlobalDeclaration(op); + + // Get ABI/target-specific function type. + FuncType Ty = this->getTypes().getFunctionType(FI); + + // NOTE(cir): Skipping getAddrOfFunction and getOrCreateCIRFunction methods + // here, as they are mostly codegen logic. + + // Create a new function with the ABI-specific types. + FuncOp newFn = cast(rewriter.cloneWithoutRegions(op)); + newFn.setType(Ty); + + // NOTE(cir): The clone above will preserve any existing attributes. If there + // are high-level attributes that ought to be dropped, do it here. + + // Set up ABI-specific function attributes. + setFunctionAttributes(op, newFn, false, /*IsThunk=*/false); + if (MissingFeatures::extParamInfo()) { + llvm_unreachable("ExtraAttrs are NYI"); + } + + if (LowerFunction(*this, rewriter, op, newFn) + .generateCode(op, newFn, FI) + .failed()) + return failure(); + + // Erase original ABI-agnostic function. + rewriter.eraseOp(op); + return success(); } LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, FuncOp funcOp) { diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index bb56cb5fef92..74f7ed0bb5ac 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -64,8 +64,28 @@ class LowerModule { return kind; } + void + constructAttributeList(StringRef Name, const LowerFunctionInfo &FI, + FuncOp CalleeInfo, // TODO(cir): Implement CalleeInfo? + FuncOp newFn, unsigned &CallingConv, + bool AttrOnCallSite, bool IsThunk); + + void setCIRFunctionAttributes(FuncOp GD, const LowerFunctionInfo &Info, + FuncOp F, bool IsThunk); + + /// Set function attributes for a function declaration. + void setFunctionAttributes(FuncOp oldFn, FuncOp newFn, + bool IsIncompleteFunction, bool IsThunk); + + // Create a CIR FuncOp with with the given signature. + FuncOp createCIRFunction( + StringRef MangledName, FuncType Ty, FuncOp D, bool ForVTable, + bool DontDefer = false, bool IsThunk = false, + ArrayRef = {}, // TODO(cir): __attribute__(()) stuff. + bool IsForDefinition = false); + // Rewrite CIR FuncOp to match the target ABI. - LogicalResult rewriteGlobalFunctionDefinition(FuncOp op, LowerModule &state); + LogicalResult rewriteFunctionDefinition(FuncOp op); // Rewrite CIR CallOp to match the target ABI. LogicalResult rewriteFunctionCall(CallOp callOp, FuncOp funcOp); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h index 941b3d7aeab7..44f0d16b1bd8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h @@ -21,6 +21,7 @@ #include "mlir/IR/MLIRContext.h" #include "clang/Basic/Specifiers.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/FnInfoOpts.h" namespace mlir { @@ -65,9 +66,14 @@ class LowerTypes { /// Free functions are functions that are compatible with an ordinary /// C function pointer type. + /// FIXME(cir): Does the "free function" concept makes sense here? + const LowerFunctionInfo &arrangeFunctionDeclaration(FuncOp fnOp); const LowerFunctionInfo &arrangeFreeFunctionCall(const OperandRange args, const FuncType fnType, bool chainCall); + const LowerFunctionInfo &arrangeFreeFunctionType(FuncType FTy); + + const LowerFunctionInfo &arrangeGlobalDeclaration(FuncOp fnOp); /// Arrange the argument and result information for an abstract value /// of a given function type. This is the method which all of the From 4dd36fbcb0df48451929e9fcfaeed4e9da228d61 Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 13 Jun 2024 02:47:26 +0800 Subject: [PATCH 1633/2301] [CIR][CodeGen] Support side effects in address space casting (#673) Continue the work of #652 . Test the branch of null pointer expressions with side effects. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 5 ++--- clang/test/CIR/CodeGen/address-space-conversion.cpp | 11 +++++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 85e795d79537..b969bd9ffdbb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1522,9 +1522,8 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // If E has side effect, it is emitted even if its final result is a // null pointer. In that case, a DCE pass should be able to // eliminate the useless instructions emitted during translating E. - if (Result.HasSideEffects) { - llvm_unreachable("NYI"); - } + if (Result.HasSideEffects) + Visit(E); return CGF.CGM.buildNullConstant(DestTy, CGF.getLoc(E->getExprLoc())); } // Since target may map different address spaces in AST to the same address diff --git a/clang/test/CIR/CodeGen/address-space-conversion.cpp b/clang/test/CIR/CodeGen/address-space-conversion.cpp index 84adaa59ac51..1490a174892a 100644 --- a/clang/test/CIR/CodeGen/address-space-conversion.cpp +++ b/clang/test/CIR/CodeGen/address-space-conversion.cpp @@ -55,3 +55,14 @@ void test_nullptr() { // LLVM: store ptr addrspace(1) null, ptr %{{[0-9]+}}, align 8 // LLVM-NEXT: store ptr addrspace(2) null, ptr %{{[0-9]+}}, align 8 } + +void test_side_effect(pi1_t b) { + pi2_t p = (pi2_t)(*b++, (int*)0); + // CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s32i), !cir.ptr + // CIR: %[[#CAST:]] = cir.const #cir.ptr : !cir.ptr + // CIR-NEXT: cir.store %[[#CAST]], %{{[0-9]+}} : !cir.ptr, !cir.ptr> + + // LLVM: %{{[0-9]+}} = getelementptr i32, ptr addrspace(1) %{{[0-9]+}}, i64 1 + // LLVM: store ptr addrspace(2) null, ptr %{{[0-9]+}}, align 8 + +} From 47e981f7c66a588ca9ef392110a03f2b7c1629a8 Mon Sep 17 00:00:00 2001 From: Krito Date: Fri, 14 Jun 2024 02:22:29 +0800 Subject: [PATCH 1634/2301] [CIR][Transforms] Move RemoveRedundantBranches logic into BrOp::fold method (#663) This pr is a part of #593 . Move RemoveRedundantBranches logic into BrOp::fold method and modify tests. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 + clang/lib/CIR/CodeGen/CIRPasses.cpp | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 36 +++++ .../CIR/Dialect/Transforms/MergeCleanups.cpp | 35 ----- clang/test/CIR/CodeGen/goto.cpp | 106 ++++++--------- clang/test/CIR/CodeGen/switch-gnurange.cpp | 10 -- clang/test/CIR/CodeGen/var-arg-scope.c | 8 +- clang/test/CIR/Lowering/ThroughMLIR/goto.cir | 14 +- clang/test/CIR/Lowering/cast.cir | 90 ++++++++----- clang/test/CIR/Lowering/dot.cir | 18 +-- clang/test/CIR/Lowering/goto.cir | 14 +- clang/test/CIR/Lowering/loop.cir | 12 +- clang/test/CIR/Lowering/loops-with-break.cir | 55 +------- .../test/CIR/Lowering/loops-with-continue.cir | 127 ++++++------------ clang/test/CIR/Lowering/region-simplify.cir | 7 +- clang/test/CIR/Lowering/scope.cir | 18 +-- clang/test/CIR/Lowering/switch.cir | 37 ++--- clang/test/CIR/Lowering/ternary.cir | 2 - clang/test/CIR/Lowering/unary-not.cir | 67 ++++++--- clang/test/CIR/Transforms/loop.cir | 16 +-- clang/test/CIR/Transforms/scope.cir | 8 +- clang/test/CIR/Transforms/switch.cir | 40 ++---- clang/test/CIR/Transforms/ternary.cir | 6 - 23 files changed, 287 insertions(+), 442 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 30f623d4f931..0baa53347656 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1599,6 +1599,8 @@ def BrOp : CIR_Op<"br", let assemblyFormat = [{ $dest (`(` $destOperands^ `:` type($destOperands) `)`)? attr-dict }]; + + let hasFolder = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index dcc613a89925..4c4982d7f599 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -91,6 +91,7 @@ namespace mlir { void populateCIRPreLoweringPasses(OpPassManager &pm) { pm.addPass(createFlattenCFGPass()); pm.addPass(createGotoSolverPass()); + pm.addPass(createMergeCleanupsPass()); } } // namespace mlir diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 6249f3f5ffd0..20531fc49a2c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -944,6 +944,42 @@ mlir::SuccessorOperands BrOp::getSuccessorOperands(unsigned index) { Block *BrOp::getSuccessorForOperands(ArrayRef) { return getDest(); } +/// Removes branches between two blocks if it is the only branch. +/// +/// From: +/// ^bb0: +/// cir.br ^bb1 +/// ^bb1: // pred: ^bb0 +/// cir.return +/// +/// To: +/// ^bb0: +/// cir.return +LogicalResult BrOp::fold(FoldAdaptor adaptor, + SmallVectorImpl &results) { + Block *block = getOperation()->getBlock(); + Block *dest = getDest(); + + if (isa(dest->front())) { + return failure(); + } + + if (block->getNumSuccessors() == 1 && dest->getSinglePredecessor() == block) { + getOperation()->erase(); + block->getOperations().splice(block->end(), dest->getOperations()); + auto eraseBlock = [](Block *block) { + for (auto &op : llvm::make_early_inc_range(*block)) + op.erase(); + block->erase(); + }; + eraseBlock(dest); + + return success(); + } + + return failure(); +} + //===----------------------------------------------------------------------===// // BrCondOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index 106065c6b6e3..d9d87a94635b 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -23,40 +23,6 @@ using namespace cir; namespace { -/// Removes branches between two blocks if it is the only branch. -/// -/// From: -/// ^bb0: -/// cir.br ^bb1 -/// ^bb1: // pred: ^bb0 -/// cir.return -/// -/// To: -/// ^bb0: -/// cir.return -struct RemoveRedundantBranches : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - LogicalResult matchAndRewrite(BrOp op, - PatternRewriter &rewriter) const final { - Block *block = op.getOperation()->getBlock(); - Block *dest = op.getDest(); - - if (isa(dest->front())) - return failure(); - - // Single edge between blocks: merge it. - if (block->getNumSuccessors() == 1 && - dest->getSinglePredecessor() == block) { - rewriter.eraseOp(op); - rewriter.mergeBlocks(dest, block); - return success(); - } - - return failure(); - } -}; - struct RemoveEmptyScope : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; @@ -104,7 +70,6 @@ struct MergeCleanupsPass : public MergeCleanupsBase { void populateMergeCleanupPatterns(RewritePatternSet &patterns) { // clang-format off patterns.add< - RemoveRedundantBranches, RemoveEmptyScope, RemoveEmptySwitch >(patterns.getContext()); diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 06870feba910..f064d1b215ad 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -159,25 +159,19 @@ int jumpIntoLoop(int* ar) { // CHECK: cir.func @_Z12jumpIntoLoopPi // CHECK: cir.brcond {{.*}} ^bb[[#BLK2:]], ^bb[[#BLK3:]] // CHECK: ^bb[[#BLK2]]: -// CHECK: cir.br ^bb[[#BODY:]] +// CHECK: cir.br ^bb[[#BLK7:]] // CHECK: ^bb[[#BLK3]]: // CHECK: cir.br ^bb[[#BLK4:]] // CHECK: ^bb[[#BLK4]]: -// CHECK: cir.br ^bb[[#RETURN:]] -// CHECK: ^bb[[#RETURN]]: // CHECK: cir.return // CHECK: ^bb[[#BLK5:]]: // CHECK: cir.br ^bb[[#BLK6:]] -// CHECK: ^bb[[#BLK6]]: -// CHECK: cir.br ^bb[[#COND:]] -// CHECK: ^bb[[#COND]]: -// CHECK: cir.brcond {{.*}} ^bb[[#BODY]], ^bb[[#EXIT:]] -// CHECK: ^bb[[#BODY]]: -// CHECK: cir.br ^bb[[#COND]] -// CHECK: ^bb[[#EXIT]]: -// CHECK: cir.br ^bb[[#BLK7:]] +// CHECK: ^bb[[#BLK6]]: +// CHECK: cir.brcond {{.*}} ^bb[[#BLK7:]], ^bb[[#BLK8:]] // CHECK: ^bb[[#BLK7]]: -// CHECK: cir.br ^bb[[#RETURN]] +// CHECK: cir.br ^bb[[#BLK6]] +// CHECK: ^bb[[#BLK8]]: +// CHECK: cir.br ^bb[[#BLK4]] @@ -197,31 +191,21 @@ int jumpFromLoop(int* ar) { return 0; } // CHECK: cir.func @_Z12jumpFromLoopPi -// CHECK: cir.brcond {{.*}} ^bb[[#RETURN1:]], ^bb[[#BLK3:]] -// CHECK: ^bb[[#RETURN1]]: -// CHECK: cir.return -// CHECK: ^bb[[#BLK3]]: -// CHECK: cir.br ^bb[[#BLK4:]] -// CHECK: ^bb[[#BLK4]]: -// CHECK: cir.br ^bb[[#BLK5:]] -// CHECK: ^bb[[#BLK5]]: -// CHECK: cir.br ^bb[[#COND:]] -// CHECK: ^bb[[#COND]]: -// CHECK: cir.brcond {{.*}} ^bb[[#BODY:]], ^bb[[#EXIT:]] -// CHECK: ^bb[[#BODY]]: -// CHECK: cir.br ^bb[[#IF42:]] -// CHECK: ^bb[[#IF42]]: -// CHECK: cir.brcond {{.*}} ^bb[[#IF42TRUE:]], ^bb[[#IF42FALSE:]] -// CHECK: ^bb[[#IF42TRUE]]: -// CHECK: cir.br ^bb[[#RETURN1]] -// CHECK: ^bb[[#IF42FALSE]]: -// CHECK: cir.br ^bb[[#BLK11:]] -// CHECK: ^bb[[#BLK11]]: -// CHECK: cir.br ^bb[[#COND]] -// CHECK: ^bb[[#EXIT]]: -// CHECK: cir.br ^bb[[#RETURN2:]] -// CHECK: ^bb[[#RETURN2]]: -// CHECK: cir.return +// CHECK: cir.brcond {{.*}} ^bb[[#BLK1:]], ^bb[[#BLK2:]] +// CHECK: ^bb[[#BLK1:]]: +// CHECK: cir.return {{.*}} +// CHECK: ^bb[[#BLK2:]]: +// CHECK: cir.br ^bb[[#BLK3:]] +// CHECK: ^bb[[#BLK3:]]: +// CHECK: cir.brcond {{.*}} ^bb[[#BLK4:]], ^bb[[#BLK7:]] +// CHECK: ^bb[[#BLK4:]]: +// CHECK: cir.brcond {{.*}} ^bb[[#BLK5:]], ^bb[[#BLK6:]] +// CHECK: ^bb[[#BLK5:]]: +// CHECK: cir.br ^bb[[#BLK1:]] +// CHECK: ^bb[[#BLK6:]]: +// CHECK: cir.br ^bb[[#BLK3:]] +// CHECK: ^bb[[#BLK7:]]: +// CHECK: cir.return {{.*}} void flatLoopWithNoTerminatorInFront(int* ptr) { @@ -240,35 +224,21 @@ void flatLoopWithNoTerminatorInFront(int* ptr) { ; } -// CHECK: cir.func @_Z31flatLoopWithNoTerminatorInFrontPi -// CHECK: cir.brcond {{.*}} ^bb[[#BLK2:]], ^bb[[#BLK3:]] -// CHECK: ^bb[[#BLK2]]: -// CHECK: cir.br ^bb[[#LABEL_LOOP:]] -// CHECK: ^bb[[#BLK3]]: -// CHECK: cir.br ^bb[[#BLK4:]] -// CHECK: ^bb[[#BLK4]]: -// CHECK: cir.br ^bb[[#BLK5:]] -// CHECK: ^bb[[#BLK5]]: -// CHECK: cir.br ^bb[[#BODY:]] -// CHECK: ^bb[[#COND]]: -// CHECK: cir.brcond {{.*}} ^bb[[#BODY]], ^bb[[#EXIT:]] -// CHECK: ^bb[[#BODY]]: +// CHECK-LABEL: cir.func @_Z31flatLoopWithNoTerminatorInFrontPi +// CHECK: cir.brcond {{.*}} ^bb[[#BLK1:]], ^bb[[#BLK2:]] +// CHECK: ^bb[[#BLK1:]]: +// CHECK: cir.br ^bb[[#BLK6:]] +// CHECK: ^bb[[#BLK2:]]: +// CHECK: cir.br ^bb[[#BLK3:]] +// CHECK: ^bb[[#BLK3:]]: // 2 preds: ^bb[[#BLK2:]], ^bb[[#BLK6:]] +// CHECK: cir.brcond {{.*}} ^bb[[#BLK4:]], ^bb[[#BLK5:]] +// CHECK: ^bb[[#BLK4:]]: // CHECK: cir.br ^bb[[#BLK8:]] -// CHECK: ^bb[[#BLK8]]: -// CHECK: cir.brcond {{.*}} ^bb[[#BLK9:]], ^bb[[#BLK10:]] -// CHECK: ^bb[[#BLK9]]: -// CHECK: cir.br ^bb[[#RETURN:]] -// CHECK: ^bb[[#BLK10]]: -// CHECK: cir.br ^bb[[#BLK11:]] -// CHECK: ^bb[[#BLK11]]: -// CHECK: cir.br ^bb[[#LABEL_LOOP]] -// CHECK: ^bb[[#LABEL_LOOP]]: -// CHECK: cir.br ^bb[[#COND]] -// CHECK: ^bb[[#EXIT]]: -// CHECK: cir.br ^bb[[#BLK14:]] -// CHECK: ^bb[[#BLK14]]: -// CHECK: cir.br ^bb[[#RETURN]] -// CHECK: ^bb[[#RETURN]]: -// CHECK: cir.return -// CHECK: } -// CHECK:} \ No newline at end of file +// CHECK: ^bb[[#BLK5:]]: +// CHECK: cir.br ^bb[[#BLK6:]] +// CHECK: ^bb[[#BLK6:]]: // 2 preds: ^bb[[#BLK1:]], ^bb[[#BLK5:]] +// CHECK: cir.brcond {{.*}} ^bb[[#BLK3:]], ^bb[[#BLK7:]] +// CHECK: ^bb[[#BLK7:]]: +// CHECK: cir.br ^bb[[#BLK8:]] +// CHECK: ^bb[[#BLK8:]]: // 2 preds: ^bb[[#BLK4:]], ^bb[[#BLK7:]] +// CHECK: cir.return \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/switch-gnurange.cpp b/clang/test/CIR/CodeGen/switch-gnurange.cpp index f48a32506252..99f558f3070d 100644 --- a/clang/test/CIR/CodeGen/switch-gnurange.cpp +++ b/clang/test/CIR/CodeGen/switch-gnurange.cpp @@ -172,8 +172,6 @@ void sw3(enum letter c) { // LLVM: store i32 4, ptr %[[X]] // LLVM: br label %[[EPILOG]] // LLVM: [[EPILOG]]: -// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] -// LLVM: [[EPILOG_END]]: // LLVM-NEXT: ret void void sw4(int x) { @@ -213,8 +211,6 @@ void sw4(int x) { // LLVM-NEXT: %[[DIFF_CMP:[0-9]+]] = icmp ule i32 %[[DIFF]], 167 // LLVM: br i1 %[[DIFF_CMP]], label %[[CASE_66_233]], label %[[EPILOG]] // LLVM: [[EPILOG]]: -// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] -// LLVM: [[EPILOG_END]]: // LLVM-NEXT: ret void void sw5(int x) { @@ -241,8 +237,6 @@ void sw5(int x) { // LLVM-NEXT: store i32 1, ptr %[[Y:[0-9]+]] // LLVM-NEXT: br label %[[EPILOG]] // LLVM: [[EPILOG]]: -// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] -// LLVM: [[EPILOG_END]]: // LLVM-NEXT: ret void void sw6(int x) { @@ -273,8 +267,6 @@ void sw6(int x) { // LLVM-NEXT: %[[DIFF_CMP:[0-9]+]] = icmp ule i32 %[[DIFF]], -1 // LLVM-NEXT: br i1 %[[DIFF_CMP]], label %[[CASE_MIN_MAX]], label %[[EPILOG]] // LLVM: [[EPILOG]]: -// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] -// LLVM: [[EPILOG_END]]: // LLVM-NEXT: ret void void sw7(int x) { @@ -346,7 +338,5 @@ void sw7(int x) { // LLVM: [[CASE_500_600]]: // LLVM-NEXT: br label %[[EPILOG]] // LLVM: [[EPILOG]]: -// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] -// LLVM: [[EPILOG_END]]: // LLVM-NEXT: ret void diff --git a/clang/test/CIR/CodeGen/var-arg-scope.c b/clang/test/CIR/CodeGen/var-arg-scope.c index b4087d30048f..9fa92b77b276 100644 --- a/clang/test/CIR/CodeGen/var-arg-scope.c +++ b/clang/test/CIR/CodeGen/var-arg-scope.c @@ -69,9 +69,6 @@ void f1(__builtin_va_list c) { // LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } // LLVM: define void @f1(%struct.__va_list %0) // LLVM: [[VARLIST:%.*]] = alloca %struct.__va_list, i64 1, align 8, -// LLVM: br label %[[SCOPE_FRONT:.*]], - -// LLVM: [[SCOPE_FRONT]]: ; preds = %1 // LLVM: [[GR_OFFS_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 3 // LLVM: [[GR_OFFS:%.*]] = load i32, ptr [[GR_OFFS_P]], align 4, // LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[GR_OFFS]], 0, @@ -100,7 +97,4 @@ void f1(__builtin_va_list c) { // LLVM: [[BB_END]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG]] // LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT]], %[[BB_IN_REG]] ], [ [[STACK_V]], %[[BB_ON_STACK]] ] // LLVM-NEXT: [[PHIV:%.*]] = load ptr, ptr [[PHIP]], align 8, -// LLVM-NEXT: br label %[[OUT_SCOPE:.*]], - -// LLVM: [[OUT_SCOPE]]: ; preds = %[[BB_END]] -// LLVM-NEXT: ret void, +// LLVM: ret void, diff --git a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir index 6c1d5c66fffa..fd85a142d7e0 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir @@ -7,13 +7,17 @@ module { %0 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %1 = cir.const #cir.int<1> : !u32i cir.store %1, %0 : !u32i, !cir.ptr - cir.br ^bb2 - ^bb1: // no predecessors + %c = cir.const #cir.int<0> : !u32i + %cond = cir.cast(int_to_bool, %c : !u32i), !cir.bool + cir.brcond %cond ^bb1, ^bb2 + + ^bb1: %2 = cir.load %0 : !cir.ptr, !u32i %3 = cir.const #cir.int<1> : !u32i %4 = cir.binop(add, %2, %3) : !u32i cir.store %4, %0 : !u32i, !cir.ptr cir.br ^bb2 + ^bb2: // 2 preds: ^bb0, ^bb1 %5 = cir.load %0 : !cir.ptr, !u32i %6 = cir.const #cir.int<2> : !u32i @@ -25,8 +29,10 @@ module { // MLIR: module { // MLIR-NEXT: func @foo -// MLIR: cf.br ^bb1 -// MLIR: ^bb1: +// MLIR: cf.cond_br %{{.+}}, ^bb[[#BLK1:]], ^bb[[#BLK2:]] +// MLIR: ^bb[[#BLK1:]]: +// MLIR: cf.br ^bb[[#BLK2:]] +// MLIR: ^bb[[#BLK2:]]: // MLIR: return // LLVM: br label %[[Value:[0-9]+]] diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index e100e0c2f07e..a3586f6c156f 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -11,10 +11,10 @@ module { cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i, %arg2: !cir.float, %arg3: !cir.double) -> !s32i { - // CHECK: llvm.func @cStyleCasts + // CHECK: llvm.func @cStyleCasts %0 = cir.alloca !u32i, !cir.ptr, ["x1", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["x2", init] {alignment = 4 : i64} - %20 = cir.alloca !s16i, !cir.ptr, ["x4", init] {alignment = 2 : i64} + %44 = cir.alloca !s16i, !cir.ptr, ["x4", init] {alignment = 2 : i64} %2 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} %3 = cir.alloca !s8i, !cir.ptr, ["a", init] {alignment = 1 : i64} %4 = cir.alloca !s16i, !cir.ptr, ["b", init] {alignment = 2 : i64} @@ -22,64 +22,86 @@ module { %6 = cir.alloca !s64i, !cir.ptr, ["d", init] {alignment = 8 : i64} %7 = cir.alloca !cir.array, !cir.ptr>, ["arr"] {alignment = 4 : i64} %8 = cir.alloca !cir.ptr, !cir.ptr>, ["e", init] {alignment = 8 : i64} + %9 = cir.alloca !s8i, !cir.ptr, ["tmp1"] {alignment = 1 : i64} + %10 = cir.alloca !s16i, !cir.ptr, ["tmp2"] {alignment = 2 : i64} + %11 = cir.alloca !s64i, !cir.ptr, ["tmp3"] {alignment = 8 : i64} + %12 = cir.alloca !u64i, !cir.ptr, ["tmp4"] {alignment = 8 : i64} + %13 = cir.alloca !cir.ptr, !cir.ptr>, ["tmp5"] {alignment = 8 : i64} + %14 = cir.alloca !s32i, !cir.ptr, ["tmp6"] {alignment = 4 : i64} + %15 = cir.alloca !cir.bool, !cir.ptr, ["tmp7"] {alignment = 1 : i64} + %16 = cir.alloca !cir.float, !cir.ptr, ["tmp8"] {alignment = 4 : i64} + %17 = cir.alloca !cir.float, !cir.ptr, ["tmp9"] {alignment = 4 : i64} + %18 = cir.alloca !u32i, !cir.ptr, ["tmp10"] {alignment = 4 : i64} + %19 = cir.alloca !s32i, !cir.ptr, ["tmp11"] {alignment = 4 : i64} cir.store %arg0, %0 : !u32i, !cir.ptr cir.store %arg1, %1 : !s32i, !cir.ptr // Integer casts. - %9 = cir.load %0 : !cir.ptr, !u32i - %10 = cir.cast(integral, %9 : !u32i), !s8i + %20 = cir.load %0 : !cir.ptr, !u32i + %21 = cir.cast(integral, %20 : !u32i), !s8i // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i8 - cir.store %10, %3 : !s8i, !cir.ptr - %11 = cir.load %1 : !cir.ptr, !s32i - %12 = cir.cast(integral, %11 : !s32i), !s16i + cir.store %21, %3 : !s8i, !cir.ptr + %22 = cir.load %1 : !cir.ptr, !s32i + %23 = cir.cast(integral, %22 : !s32i), !s16i // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i16 - cir.store %12, %4 : !s16i, !cir.ptr - %13 = cir.load %0 : !cir.ptr, !u32i - %14 = cir.cast(integral, %13 : !u32i), !s64i + cir.store %23, %4 : !s16i, !cir.ptr + %24 = cir.load %0 : !cir.ptr, !u32i + %25 = cir.cast(integral, %24 : !u32i), !s64i // CHECK: %{{[0-9]+}} = llvm.zext %{{[0-9]+}} : i32 to i64 - cir.store %14, %5 : !s64i, !cir.ptr - %15 = cir.load %1 : !cir.ptr, !s32i - %16 = cir.cast(integral, %15 : !s32i), !s64i + cir.store %25, %5 : !s64i, !cir.ptr + %26 = cir.load %1 : !cir.ptr, !s32i + %27 = cir.cast(integral, %26 : !s32i), !s64i // CHECK: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 - %30 = cir.cast(integral, %arg1 : !s32i), !u32i + cir.store %27, %6 : !s64i, !cir.ptr + %28 = cir.cast(integral, %arg1 : !s32i), !u32i + cir.store %28, %18 : !u32i, !cir.ptr // Should not produce a cast. - %32 = cir.cast(integral, %arg0 : !u32i), !s32i + %29 = cir.cast(integral, %arg0 : !u32i), !s32i + cir.store %29, %19 : !s32i, !cir.ptr // Should not produce a cast. - %21 = cir.load %20 : !cir.ptr, !s16i - %22 = cir.cast(integral, %21 : !s16i), !u64i + %30 = cir.load %44 : !cir.ptr, !s16i + %31 = cir.cast(integral, %30 : !s16i), !u64i // CHECK: %[[TMP:[0-9]+]] = llvm.sext %{{[0-9]+}} : i16 to i64 - %33 = cir.cast(int_to_bool, %arg1 : !s32i), !cir.bool + cir.store %31, %12 : !u64i, !cir.ptr + %32 = cir.cast(int_to_bool, %arg1 : !s32i), !cir.bool + cir.store %32, %15 : !cir.bool, !cir.ptr // CHECK: %[[#ZERO:]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[#CMP:]] = llvm.icmp "ne" %arg1, %[[#ZERO]] : i32 // CHECK: %{{.+}} = llvm.zext %[[#CMP]] : i1 to i8 // Pointer casts. - cir.store %16, %6 : !s64i, !cir.ptr - %17 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr - cir.store %17, %8 : !cir.ptr, !cir.ptr> + %33 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr + cir.store %33, %8 : !cir.ptr, !cir.ptr> // CHECK: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, i32 - %23 = cir.cast(int_to_ptr, %22 : !u64i), !cir.ptr + %34 = cir.cast(int_to_ptr, %31 : !u64i), !cir.ptr + cir.store %34, %13 : !cir.ptr, !cir.ptr> // CHECK: %[[TMP2:[0-9]+]] = llvm.inttoptr %[[TMP]] : i64 to !llvm.ptr - %24 = cir.cast(ptr_to_int, %23 : !cir.ptr), !s32i + %35 = cir.cast(ptr_to_int, %34 : !cir.ptr), !s32i + cir.store %35, %14 : !s32i, !cir.ptr // CHECK: %{{[0-9]+}} = llvm.ptrtoint %[[TMP2]] : !llvm.ptr to i32 - %29 = cir.cast(ptr_to_bool, %23 : !cir.ptr), !cir.bool + %36 = cir.cast(ptr_to_bool, %34 : !cir.ptr), !cir.bool + cir.store %36, %15 : !cir.bool, !cir.ptr // Floating point casts. - %25 = cir.cast(int_to_float, %arg1 : !s32i), !cir.float + %37 = cir.cast(int_to_float, %arg1 : !s32i), !cir.float + cir.store %37, %16 : !cir.float, !cir.ptr // CHECK: %{{.+}} = llvm.sitofp %{{.+}} : i32 to f32 - %26 = cir.cast(int_to_float, %arg0 : !u32i), !cir.float + %38 = cir.cast(int_to_float, %arg0 : !u32i), !cir.float + cir.store %38, %16 : !cir.float, !cir.ptr // CHECK: %{{.+}} = llvm.uitofp %{{.+}} : i32 to f32 - %27 = cir.cast(float_to_int, %arg2 : !cir.float), !s32i + %39 = cir.cast(float_to_int, %arg2 : !cir.float), !s32i + cir.store %39, %14 : !s32i, !cir.ptr // CHECK: %{{.+}} = llvm.fptosi %{{.+}} : f32 to i32 - %28 = cir.cast(float_to_int, %arg2 : !cir.float), !u32i + %40 = cir.cast(float_to_int, %arg2 : !cir.float), !u32i + cir.store %40, %18 : !u32i, !cir.ptr // CHECK: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32 - %18 = cir.const #cir.int<0> : !s32i - // CHECK: %{{.+}} = llvm.fptrunc %{{.+}} : f64 to f32 - %34 = cir.cast(floating, %arg3 : !cir.double), !cir.float + %41 = cir.cast(floating, %arg3 : !cir.double), !cir.float + cir.store %41, %17 : !cir.float, !cir.ptr - cir.store %18, %2 : !s32i, !cir.ptr - %19 = cir.load %2 : !cir.ptr, !s32i - cir.return %19 : !s32i + %42 = cir.const #cir.int<0> : !s32i + cir.store %42, %2 : !s32i, !cir.ptr + %43 = cir.load %2 : !cir.ptr, !s32i + cir.return %43 : !s32i } cir.func @testBoolToIntCast(%arg0: !cir.bool) { diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 5c5ed4736f7a..b0df3eecdf85 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -68,22 +68,20 @@ module { // MLIR: llvm.store {{.*}}, %[[VAL_8]] {{.*}}: i32, !llvm.ptr // MLIR: %[[VAL_13:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 // MLIR: llvm.store %[[VAL_13]], %[[VAL_12]] {{.*}}: f64, !llvm.ptr -// MLIR: llvm.br ^bb1 -// MLIR: ^bb1: // MLIR: %[[VAL_14:.*]] = llvm.mlir.constant(1 : index) : i64 // MLIR: %[[VAL_15:.*]] = llvm.alloca %[[VAL_14]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR: %[[VAL_16:.*]] = llvm.mlir.constant(0 : i32) : i32 // MLIR: llvm.store %[[VAL_16]], %[[VAL_15]] {{.*}}: i32, !llvm.ptr -// MLIR: llvm.br ^bb2 -// MLIR: ^bb2: +// MLIR: llvm.br ^bb1 +// MLIR: ^bb1: // MLIR: %[[VAL_17:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_18:.*]] = llvm.load %[[VAL_8]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_19:.*]] = llvm.icmp "slt" %[[VAL_17]], %[[VAL_18]] : i32 // MLIR: %[[VAL_20:.*]] = llvm.zext %[[VAL_19]] : i1 to i32 // MLIR: %[[VAL_21:.*]] = llvm.mlir.constant(0 : i32) : i32 // MLIR: %[[VAL_22:.*]] = llvm.icmp "ne" %[[VAL_20]], %[[VAL_21]] : i32 -// MLIR: llvm.cond_br %[[VAL_22]], ^bb3, ^bb5 -// MLIR: ^bb3: +// MLIR: llvm.cond_br %[[VAL_22]], ^bb2, ^bb3 +// MLIR: ^bb2: // MLIR: %[[VAL_23:.*]] = llvm.load %[[VAL_4]] {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr // MLIR: %[[VAL_24:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_25:.*]] = llvm.sext %[[VAL_24]] : i32 to i64 @@ -98,16 +96,12 @@ module { // MLIR: %[[VAL_34:.*]] = llvm.load %[[VAL_12]] {alignment = 8 : i64} : !llvm.ptr -> f64 // MLIR: %[[VAL_35:.*]] = llvm.fadd %[[VAL_34]], %[[VAL_33]] : f64 // MLIR: llvm.store %[[VAL_35]], %[[VAL_12]] {{.*}}: f64, !llvm.ptr -// MLIR: llvm.br ^bb4 -// MLIR: ^bb4: // MLIR: %[[VAL_36:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_37:.*]] = llvm.mlir.constant(1 : i32) : i32 // MLIR: %[[VAL_38:.*]] = llvm.add %[[VAL_36]], %[[VAL_37]] : i32 // MLIR: llvm.store %[[VAL_38]], %[[VAL_15]] {{.*}}: i32, !llvm.ptr -// MLIR: llvm.br ^bb2 -// MLIR: ^bb5: -// MLIR: llvm.br ^bb6 -// MLIR: ^bb6: +// MLIR: llvm.br ^bb1 +// MLIR: ^bb3: // MLIR: %[[VAL_39:.*]] = llvm.load %[[VAL_12]] {alignment = 8 : i64} : !llvm.ptr -> f64 // MLIR: llvm.store %[[VAL_39]], %[[VAL_10]] {{.*}}: f64, !llvm.ptr // MLIR: %[[VAL_40:.*]] = llvm.load %[[VAL_10]] {alignment = 8 : i64} : !llvm.ptr -> f64 diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index a98dceda4c17..0050e5393ed3 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -37,18 +37,14 @@ module { // MLIR: %[[#Zero:]] = llvm.mlir.constant(0 : i32) : i32 // MLIR: llvm.cond_br {{.*}}, ^bb[[#COND_YES:]], ^bb[[#COND_NO:]] // MLIR: ^bb[[#COND_YES]]: -// MLIR: llvm.br ^bb[[#GOTO_BLK:]] -// MLIR: ^bb[[#COND_NO]]: -// MLIR: llvm.br ^bb[[#BLK:]] -// MLIR: ^bb[[#BLK]]: -// MLIR: llvm.store %[[#Zero]], %[[#Ret_val_addr:]] {{.*}}: i32, !llvm.ptr +// MLIR: %[[#Neg_one:]] = llvm.sub %[[#Zero]], %[[#One]] : i32 +// MLIR: llvm.store %[[#Neg_one]], %[[#Ret_val_addr:]] {{.*}}: i32, !llvm.ptr // MLIR: llvm.br ^bb[[#RETURN:]] +// MLIR: ^bb[[#COND_NO]]: +// MLIR: llvm.store %[[#Zero]], %[[#Ret_val_addr]] {{.*}}: i32, !llvm.ptr +// MLIR: llvm.br ^bb[[#RETURN]] // MLIR: ^bb[[#RETURN]]: // MLIR: %[[#Ret_val:]] = llvm.load %[[#Ret_val_addr]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: llvm.return %[[#Ret_val]] : i32 -// MLIR: ^bb[[#GOTO_BLK]]: -// MLIR: %[[#Neg_one:]] = llvm.sub %[[#Zero]], %[[#One]] : i32 -// MLIR: llvm.store %[[#Neg_one]], %[[#Ret_val_addr]] {{.*}}: i32, !llvm.ptr -// MLIR: llvm.br ^bb[[#RETURN]] // MLIR: } } diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index d15479a76a0d..3e9a47e80f8f 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -22,8 +22,6 @@ module { // CHECK: ^bb[[#COND]]: // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: -// CHECK: llvm.br ^bb[[#STEP:]] -// CHECK: ^bb[[#STEP]]: // CHECK: llvm.br ^bb[[#COND]] // CHECK: ^bb[[#EXIT]]: @@ -60,11 +58,9 @@ module { } // CHECK: @testDoWhile -// CHECK: llvm.br ^bb[[#BODY:]] +// CHECK: llvm.br ^bb[[#COND]] // CHECK: ^bb[[#COND:]]: // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] -// CHECK: ^bb[[#BODY]]: -// CHECK: llvm.br ^bb[[#COND]] // CHECK: ^bb[[#EXIT]]: @@ -83,8 +79,6 @@ module { } // CHECK: @testWhileWithBreakTerminatedBody -// CHECK: llvm.br ^bb[[#COND:]] -// CHECK: ^bb[[#COND]]: // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // CHECK: llvm.br ^bb[[#EXIT]] @@ -115,12 +109,8 @@ module { // CHECK: ^bb[[#COND:]]: // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: -// CHECK: llvm.br ^bb[[#SCOPE_IN:]] -// CHECK: ^bb[[#SCOPE_IN]]: // CHECK: llvm.br ^bb[[#EXIT]] // CHECK: ^bb[[#SCOPE_EXIT:]]: -// CHECK: llvm.br ^bb[[#STEP:]] -// CHECK: ^bb[[#STEP]]: // CHECK: llvm.br ^bb[[#COND]] // CHECK: ^bb[[#EXIT]]: } diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir index 34b6bfd7618e..9b9090c12900 100644 --- a/clang/test/CIR/Lowering/loops-with-break.cir +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -44,21 +44,11 @@ module { // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK1:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#preBREAK1]]: - // CHECK: llvm.br ^bb[[#preBREAK2:]] - // CHECK: ^bb[[#preBREAK2]]: - // CHECK: llvm.br ^bb[[#BREAK:]] - // CHECK: ^bb[[#BREAK]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preBODY0:]] // CHECK: ^bb[[#preEXIT1]]: // CHECK: llvm.br ^bb[[#EXIT:]] // CHECK: ^bb[[#preBODY0]]: - // CHECK: llvm.br ^bb[[#preBODY1:]] - // CHECK: ^bb[[#preBODY1]]: - // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#BODY]]: - // CHECK: llvm.br ^bb[[#STEP:]] - // CHECK: ^bb[[#STEP]]: // [...] // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#EXIT]]: @@ -127,40 +117,21 @@ module { // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED1:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#preNESTED1]]: - // CHECK: llvm.br ^bb[[#preNESTED2:]] - // CHECK: ^bb[[#preNESTED2]]: - // CHECK: llvm.br ^bb[[#NESTED:]] - // CHECK: ^bb[[#NESTED]]: // [...] // CHECK: llvm.br ^bb[[#COND_NESTED:]] // CHECK: ^bb[[#COND_NESTED]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK1:]], ^bb[[#EXIT_NESTED:]] // CHECK: ^bb[[#preBREAK1]]: - // CHECK: llvm.br ^bb[[#preBREAK2:]] - // CHECK: ^bb[[#preBREAK2]]: - // CHECK: llvm.br ^bb[[#BREAK:]] - // CHECK: ^bb[[#BREAK]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT2:]], ^bb[[#preBODY0:]] // CHECK: ^bb[[#preEXIT2]]: // CHECK: llvm.br ^bb[[#EXIT_NESTED:]] // CHECK: ^bb[[#preBODY0]]: - // CHECK: llvm.br ^bb[[#preBODY1:]] - // CHECK: ^bb[[#preBODY1]]: - // CHECK: llvm.br ^bb[[#BODY_NESTED:]] - // CHECK: ^bb[[#BODY_NESTED]]: - // CHECK: llvm.br ^bb[[#STEP_NESTED:]] - // CHECK: ^bb[[#STEP_NESTED]]: // [...] // CHECK: llvm.br ^bb[[#COND_NESTED:]] // CHECK: ^bb[[#EXIT_NESTED]]: // [...] - // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#BODY]]: - // CHECK: llvm.br ^bb[[#STEP:]] - // CHECK: ^bb[[#STEP]]: - // [...] // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#EXIT]]: // [...] @@ -205,18 +176,11 @@ module { // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // [...] - // CHECK: llvm.br ^bb[[#BREAK:]] - // CHECK: ^bb[[#BREAK]]: - // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preCOND0:]] // CHECK: ^bb[[#preEXIT1]]: - // CHECK: llvm.br ^bb[[#preEXIT2:]] + // CHECK: llvm.br ^bb[[#EXIT:]] // CHECK: ^bb[[#preCOND0]]: - // CHECK: llvm.br ^bb[[#preCOND1:]] - // CHECK: ^bb[[#preCOND1]]: // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#preEXIT2]]: - // CHECK: llvm.br ^bb[[#EXIT:]] // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } @@ -256,21 +220,12 @@ cir.func @testDoWhile() { // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#BODY]]: - // [...] - // CHECK: llvm.br ^bb[[#BREAK:]] - // CHECK: ^bb[[#BREAK]]: - // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preCOND0:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#BODY:]] // CHECK: ^bb[[#preEXIT1]]: - // CHECK: llvm.br ^bb[[#preEXIT2:]] - // CHECK: ^bb[[#preCOND0]]: - // CHECK: llvm.br ^bb[[#preCOND1:]] - // CHECK: ^bb[[#preCOND1]]: - // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#preEXIT2]]: // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: ^bb[[#BODY]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#COND]], ^bb[[#EXIT]] // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir index 0371d416b61d..c3fb6406bd6b 100644 --- a/clang/test/CIR/Lowering/loops-with-continue.cir +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -37,33 +37,21 @@ module { cir.return } - // CHECK: llvm.func @testFor() - // [...] - // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#COND]]: - // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE1:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#preCONTINUE1]]: - // CHECK: llvm.br ^bb[[#preCONTINUE2:]] - // CHECK: ^bb[[#preCONTINUE2]]: - // CHECK: llvm.br ^bb[[#CONTINUE:]] - // CHECK: ^bb[[#CONTINUE]]: - // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preSTEP:]], ^bb[[#preBODY0:]] - // CHECK: ^bb[[#preSTEP]]: - // CHECK: llvm.br ^bb[[#STEP:]] - // CHECK: ^bb[[#preBODY0]]: - // CHECK: llvm.br ^bb[[#preBODY1:]] - // CHECK: ^bb[[#preBODY1]]: - // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#BODY]]: - // CHECK: llvm.br ^bb[[#STEP:]] - // CHECK: ^bb[[#STEP]]: - // [...] - // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#EXIT]]: - // [...] - // CHECK: } +// CHECK: llvm.func @testFor() +// CHECK: llvm.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: llvm.cond_br %8, ^bb[[#CONTINUE:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#CONTINUE]]: +// CHECK: llvm.cond_br %14, ^bb[[#preSTEP:]], ^bb[[#BODY:]] +// CHECK: ^bb[[#preSTEP]]: +// CHECK: llvm.br ^bb[[#STEP:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: llvm.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: llvm.return +// CHECK: } cir.func @testForNested() { @@ -121,50 +109,27 @@ module { cir.return } - // CHECK: llvm.func @testForNested() - // [...] - // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#COND]]: - // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED1:]], ^bb[[#EXIT:]] - // CHECK: ^bb[[#preNESTED1]]: - // CHECK: llvm.br ^bb[[#preNESTED2:]] - // CHECK: ^bb[[#preNESTED2]]: - // CHECK: llvm.br ^bb[[#NESTED:]] - // CHECK: ^bb[[#NESTED]]: - // [...] - // CHECK: llvm.br ^bb[[#COND_NESTED:]] - // CHECK: ^bb[[#COND_NESTED]]: - // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE1:]], ^bb[[#EXIT_NESTED:]] - // CHECK: ^bb[[#preCONTINUE1]]: - // CHECK: llvm.br ^bb[[#preCONTINUE2:]] - // CHECK: ^bb[[#preCONTINUE2]]: - // CHECK: llvm.br ^bb[[#CONTINUE:]] - // CHECK: ^bb[[#CONTINUE]]: - // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preSTEP0:]], ^bb[[#preBODY0:]] - // CHECK: ^bb[[#preSTEP0]]: - // CHECK: llvm.br ^bb[[#STEP_NESTED:]] - // CHECK: ^bb[[#preBODY0]]: - // CHECK: llvm.br ^bb[[#preBODY1:]] - // CHECK: ^bb[[#preBODY1]]: - // CHECK: llvm.br ^bb[[#BODY_NESTED:]] - // CHECK: ^bb[[#BODY_NESTED]]: - // CHECK: llvm.br ^bb[[#STEP_NESTED:]] - // CHECK: ^bb[[#STEP_NESTED]]: - // [...] - // CHECK: llvm.br ^bb[[#COND_NESTED:]] - // CHECK: ^bb[[#EXIT_NESTED]]: - // CHECK: llvm.br ^bb[[#BODY:]] - // CHECK: ^bb[[#BODY]]: - // CHECK: llvm.br ^bb[[#STEP:]] - // CHECK: ^bb[[#STEP]]: - // [...] - // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#EXIT]]: - // [...] - // CHECK: } +// CHECK: llvm.func @testForNested() +// CHECK: llvm.br ^bb[[#COND1:]] +// CHECK: ^bb[[#COND1]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#LOOP1BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#LOOP1BODY]]: +// CHECK: llvm.br ^bb[[#COND2:]] +// CHECK: ^bb[[#COND2]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#LOOP2BODY:]], ^bb[[#LOOP1CONTINUE:]] +// CHECK: ^bb[[#LOOP2BODY]]: +// CHECK: llvm.cond_br %{{.+}}, ^bb[[#IFBODY1:]], ^bb[[#IFBODY2:]] +// CHECK: ^bb[[#IFBODY1]]: +// CHECK: llvm.br ^bb[[#STEP2:]] +// CHECK: ^bb[[#IFBODY2]]: +// CHECK: llvm.br ^bb[[#STEP2:]] +// CHECK: ^bb[[#STEP2]]: +// CHECK: llvm.br ^bb[[#COND2]] +// CHECK: ^bb[[#LOOP1CONTINUE]]: +// CHECK: llvm.br ^bb[[#COND1]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: llvm.return +// CHECK: } cir.func @testWhile() { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} @@ -198,22 +163,14 @@ cir.func @testWhile() { // CHECK: llvm.func @testWhile() // [...] - // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#COND]]: - // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // [...] - // CHECK: llvm.br ^bb[[#CONTINUE:]] - // CHECK: ^bb[[#CONTINUE]]: - // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCOND0:]], ^bb[[#preCOND1:]] // CHECK: ^bb[[#preCOND0]]: - // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: llvm.br ^bb[[#COND]] // CHECK: ^bb[[#preCOND1]]: - // CHECK: llvm.br ^bb[[#preCOND2:]] - // CHECK: ^bb[[#preCOND2]]: - // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: llvm.br ^bb[[#COND]] // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } @@ -251,21 +208,13 @@ cir.func @testWhile() { // CHECK: llvm.func @testDoWhile() // [...] - // CHECK: llvm.br ^bb[[#COND:]] - // CHECK: ^bb[[#COND]]: - // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // [...] - // CHECK: llvm.br ^bb[[#CONTINUE:]] - // CHECK: ^bb[[#CONTINUE]]: - // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCOND0:]], ^bb[[#preCOND1:]] // CHECK: ^bb[[#preCOND0]]: // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#preCOND1]]: - // CHECK: llvm.br ^bb[[#preCOND2:]] - // CHECK: ^bb[[#preCOND2]]: // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#EXIT]]: // [...] diff --git a/clang/test/CIR/Lowering/region-simplify.cir b/clang/test/CIR/Lowering/region-simplify.cir index 5f32205cb032..0ebedfc6eb62 100644 --- a/clang/test/CIR/Lowering/region-simplify.cir +++ b/clang/test/CIR/Lowering/region-simplify.cir @@ -1,5 +1,5 @@ -// RUN: cir-opt %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-opt %s -canonicalize -o - | cir-translate -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -cir-to-llvm -canonicalize -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -cir-to-llvm -canonicalize -o - | cir-translate -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !u32i = !cir.int @@ -8,7 +8,7 @@ module { %0 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %1 = cir.const #cir.int<1> : !u32i cir.store %1, %0 : !u32i, !cir.ptr - cir.br ^bb2 + cir.goto "err" ^bb1: // no predecessors %2 = cir.load %0 : !cir.ptr, !u32i %3 = cir.const #cir.int<1> : !u32i @@ -16,6 +16,7 @@ module { cir.store %4, %0 : !u32i, !cir.ptr cir.br ^bb2 ^bb2: // 2 preds: ^bb0, ^bb1 + cir.label "err" %5 = cir.load %0 : !cir.ptr, !u32i %6 = cir.const #cir.int<2> : !u32i %7 = cir.binop(add, %5, %6) : !u32i diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index add46429cba2..e04272cbec12 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -14,26 +14,16 @@ module { } // MLIR: llvm.func @foo() -// MLIR-NEXT: llvm.br ^bb1 -// MLIR-NEXT: ^bb1: // MLIR-DAG: [[v1:%[0-9]]] = llvm.mlir.constant(4 : i32) : i32 // MLIR-DAG: [[v2:%[0-9]]] = llvm.mlir.constant(1 : index) : i64 // MLIR-DAG: [[v3:%[0-9]]] = llvm.alloca [[v2]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: llvm.store [[v1]], [[v3]] {{.*}}: i32, !llvm.ptr -// MLIR-NEXT: llvm.br ^bb2 -// MLIR-NEXT: ^bb2: // MLIR-NEXT: llvm.return // LLVM: define void @foo() -// LLVM-NEXT: br label %1 -// LLVM-EMPTY: -// LLVM-NEXT: 1: -// LLVM-NEXT: %2 = alloca i32, i64 1, align 4 -// LLVM-NEXT: store i32 4, ptr %2, align 4 -// LLVM-NEXT: br label %3 -// LLVM-EMPTY: -// LLVM-NEXT: 3: +// LLVM: %1 = alloca i32, i64 1, align 4 +// LLVM-NEXT: store i32 4, ptr %1, align 4 // LLVM-NEXT: ret void // LLVM-NEXT: } @@ -64,13 +54,11 @@ module { // MLIR: llvm.func @scope_with_return() // MLIR-NEXT: [[v0:%.*]] = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: [[v1:%.*]] = llvm.alloca [[v0]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr - // MLIR-NEXT: llvm.br ^bb1 - // MLIR-NEXT: ^bb1: // pred: ^bb0 // MLIR-NEXT: [[v2:%.*]] = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: llvm.store [[v2]], [[v1]] {{.*}}: i32, !llvm.ptr // MLIR-NEXT: [[v3:%.*]] = llvm.load [[v1]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return [[v3]] : i32 - // MLIR-NEXT: ^bb2: // no predecessors + // MLIR-NEXT: ^bb1: // no predecessors // MLIR-NEXT: [[v4:%.*]] = llvm.load [[v1]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return [[v4]] : i32 // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir index dee8e98db858..b362753145ce 100644 --- a/clang/test/CIR/Lowering/switch.cir +++ b/clang/test/CIR/Lowering/switch.cir @@ -122,17 +122,14 @@ module { cir.return } // CHECK: llvm.func @shouldLowerMultiBlockCase - // CHECK: ^bb1: // pred: ^bb0 - // CHECK: llvm.switch {{.*}} : i32, ^bb4 [ - // CHECK: 3: ^bb2 + // CHECK: llvm.switch {{.*}} : i32, ^bb3 [ + // CHECK: 3: ^bb1 // CHECK: ] - // CHECK: ^bb2: // pred: ^bb1 + // CHECK: ^bb1: // pred: ^bb0 // CHECK: llvm.return - // CHECK: ^bb3: // no predecessors - // CHECK: llvm.br ^bb4 - // CHECK: ^bb4: // 2 preds: ^bb1, ^bb3 - // CHECK: llvm.br ^bb5 - // CHECK: ^bb5: // pred: ^bb4 + // CHECK: ^bb2: // no predecessors + // CHECK: llvm.br ^bb3 + // CHECK: ^bb3: // 2 preds: ^bb0, ^bb2 // CHECK: llvm.return // CHECK: } @@ -165,21 +162,15 @@ module { cir.return %4 : !s32i } // CHECK: llvm.func @shouldLowerNestedBreak - // CHECK: llvm.switch %6 : i32, ^bb7 [ - // CHECK: 0: ^bb2 + // CHECK: llvm.switch %6 : i32, ^bb4 [ + // CHECK: 0: ^bb1 // CHECK: ] + // CHECK: ^bb1: // pred: ^bb0 + // CHECK: llvm.cond_br {{%.*}}, ^bb2, ^bb3 // CHECK: ^bb2: // pred: ^bb1 - // CHECK: llvm.br ^bb3 - // CHECK: ^bb3: // pred: ^bb2 - // CHECK: llvm.cond_br {{%.*}}, ^bb4, ^bb5 - // CHECK: ^bb4: // pred: ^bb3 - // CHECK: llvm.br ^bb7 - // CHECK: ^bb5: // pred: ^bb3 - // CHECK: llvm.br ^bb6 - // CHECK: ^bb6: // pred: ^bb5 - // CHECK: llvm.br ^bb7 - // CHECK: ^bb7: // 3 preds: ^bb1, ^bb4, ^bb6 - // CHECK: llvm.br ^bb8 - // CHECK: ^bb8: // pred: ^bb7 + // CHECK: llvm.br ^bb4 + // CHECK: ^bb3: // pred: ^bb1 + // CHECK: llvm.br ^bb4 + // CHECK: ^bb4: // 3 preds: ^bb0, ^bb2, ^bb3 // CHECK: llvm.return } diff --git a/clang/test/CIR/Lowering/ternary.cir b/clang/test/CIR/Lowering/ternary.cir index 6e469f388d79..b79d7eac726f 100644 --- a/clang/test/CIR/Lowering/ternary.cir +++ b/clang/test/CIR/Lowering/ternary.cir @@ -41,8 +41,6 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { // MLIR-NEXT: %8 = llvm.mlir.constant(5 : i32) : i32 // MLIR-NEXT: llvm.br ^bb3(%8 : i32) // MLIR-NEXT: ^bb3(%9: i32): // 2 preds: ^bb1, ^bb2 -// MLIR-NEXT: llvm.br ^bb4 -// MLIR-NEXT: ^bb4: // pred: ^bb3 // MLIR-NEXT: llvm.store %9, %3 {{.*}}: i32, !llvm.ptr // MLIR-NEXT: %10 = llvm.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return %10 : i32 diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index 48e2705e756d..b1eac868133e 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -22,41 +22,67 @@ module { cir.func @floatingPoint(%arg0: !cir.float, %arg1: !cir.double) { - // MLIR: llvm.func @floatingPoint + // MLIR: llvm.func @floatingPoint %0 = cir.alloca !cir.float, !cir.ptr, ["f", init] {alignment = 4 : i64} %1 = cir.alloca !cir.double, !cir.ptr, ["d", init] {alignment = 8 : i64} + %2 = cir.alloca !cir.bool, !cir.ptr, ["tmp1"] {alignment = 1 : i64} + %3 = cir.alloca !cir.bool, !cir.ptr, ["tmp2"] {alignment = 1 : i64} + %4 = cir.alloca !cir.bool, !cir.ptr, ["tmp3"] {alignment = 1 : i64} + %5 = cir.alloca !cir.bool, !cir.ptr, ["tmp4"] {alignment = 1 : i64} + cir.store %arg0, %0 : !cir.float, !cir.ptr cir.store %arg1, %1 : !cir.double, !cir.ptr - %2 = cir.load %0 : !cir.ptr, !cir.float - %3 = cir.cast(float_to_bool, %2 : !cir.float), !cir.bool + + %6 = cir.load %0 : !cir.ptr, !cir.float + %7 = cir.cast(float_to_bool, %6 : !cir.float), !cir.bool + cir.store %7, %2 : !cir.bool, !cir.ptr // MLIR: %[[#F_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 // MLIR: %[[#F_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#F_ZERO]] : f32 // MLIR: %[[#F_ZEXT:]] = llvm.zext %[[#F_BOOL]] : i1 to i8 - %4 = cir.unary(not, %3) : !cir.bool, !cir.bool + + %8 = cir.unary(not, %7) : !cir.bool, !cir.bool + cir.store %8, %3 : !cir.bool, !cir.ptr // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(1 : i8) : i8 // MLIR: = llvm.xor %[[#F_ZEXT]], %[[#F_ONE]] : i8 - %5 = cir.load %1 : !cir.ptr, !cir.double - %6 = cir.cast(float_to_bool, %5 : !cir.double), !cir.bool + + %9 = cir.load %1 : !cir.ptr, !cir.double + %10 = cir.cast(float_to_bool, %9 : !cir.double), !cir.bool + cir.store %10, %4 : !cir.bool, !cir.ptr // MLIR: %[[#D_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 // MLIR: %[[#D_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#D_ZERO]] : f64 // MLIR: %[[#D_ZEXT:]] = llvm.zext %[[#D_BOOL]] : i1 to i8 - %7 = cir.unary(not, %6) : !cir.bool, !cir.bool + + %11 = cir.unary(not, %10) : !cir.bool, !cir.bool + cir.store %11, %5 : !cir.bool, !cir.ptr // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(1 : i8) : i8 // MLIR: = llvm.xor %[[#D_ZEXT]], %[[#D_ONE]] : i8 + cir.return } cir.func @CStyleValueNegation(%arg0: !s32i, %arg1: !cir.float) { - // MLIR: llvm.func @CStyleValueNegation + // MLIR: llvm.func @CStyleValueNegation %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %3 = cir.alloca !cir.float, !cir.ptr, ["f", init] {alignment = 4 : i64} + %1 = cir.alloca !cir.float, !cir.ptr, ["f", init] {alignment = 4 : i64} + %2 = cir.alloca !cir.bool, !cir.ptr, ["tmp1"] {alignment = 1 : i64} + %3 = cir.alloca !cir.bool, !cir.ptr, ["tmp2"] {alignment = 1 : i64} + %4 = cir.alloca !s32i, !cir.ptr, ["tmp3"] {alignment = 4 : i64} + %5 = cir.alloca !cir.bool, !cir.ptr, ["tmp4"] {alignment = 1 : i64} + %6 = cir.alloca !cir.bool, !cir.ptr, ["tmp5"] {alignment = 1 : i64} + %7 = cir.alloca !s32i, !cir.ptr, ["tmp6"] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr - cir.store %arg1, %3 : !cir.float, !cir.ptr + cir.store %arg1, %1 : !cir.float, !cir.ptr - %5 = cir.load %0 : !cir.ptr, !s32i - %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool - %7 = cir.unary(not, %6) : !cir.bool, !cir.bool - %8 = cir.cast(bool_to_int, %7 : !cir.bool), !s32i + %8 = cir.load %0 : !cir.ptr, !s32i + %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool + cir.store %9, %2 : !cir.bool, !cir.ptr + + %10 = cir.unary(not, %9) : !cir.bool, !cir.bool + cir.store %10, %3 : !cir.bool, !cir.ptr + + %11 = cir.cast(bool_to_int, %10 : !cir.bool), !s32i + cir.store %11, %4 : !s32i, !cir.ptr // MLIR: %[[#INT:]] = llvm.load %{{.+}} : !llvm.ptr // MLIR: %[[#IZERO:]] = llvm.mlir.constant(0 : i32) : i32 // MLIR: %[[#ICMP:]] = llvm.icmp "ne" %[[#INT]], %[[#IZERO]] : i32 @@ -65,10 +91,15 @@ module { // MLIR: %[[#IXOR:]] = llvm.xor %[[#IEXT]], %[[#IONE]] : i8 // MLIR: = llvm.zext %[[#IXOR]] : i8 to i32 - %17 = cir.load %3 : !cir.ptr, !cir.float - %18 = cir.cast(float_to_bool, %17 : !cir.float), !cir.bool - %19 = cir.unary(not, %18) : !cir.bool, !cir.bool - %20 = cir.cast(bool_to_int, %19 : !cir.bool), !s32i + %12 = cir.load %1 : !cir.ptr, !cir.float + %13 = cir.cast(float_to_bool, %12 : !cir.float), !cir.bool + cir.store %13, %5 : !cir.bool, !cir.ptr + + %14 = cir.unary(not, %13) : !cir.bool, !cir.bool + cir.store %14, %6 : !cir.bool, !cir.ptr + + %15 = cir.cast(bool_to_int, %14 : !cir.bool), !s32i + cir.store %15, %7 : !s32i, !cir.ptr // MLIR: %[[#FLOAT:]] = llvm.load %{{.+}} : !llvm.ptr // MLIR: %[[#FZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 // MLIR: %[[#FCMP:]] = llvm.fcmp "une" %[[#FLOAT]], %[[#FZERO]] : f32 diff --git a/clang/test/CIR/Transforms/loop.cir b/clang/test/CIR/Transforms/loop.cir index 8204216b6f52..ff1caf9dae53 100644 --- a/clang/test/CIR/Transforms/loop.cir +++ b/clang/test/CIR/Transforms/loop.cir @@ -19,8 +19,6 @@ module { // CHECK: ^bb[[#COND]]: // CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: -// CHECK: cir.br ^bb[[#STEP:]] -// CHECK: ^bb[[#STEP]]: // CHECK: cir.br ^bb[[#COND:]] // CHECK: ^bb[[#EXIT]]: // CHECK: cir.return @@ -56,10 +54,8 @@ module { } // CHECK: cir.func @testDoWhile(%arg0: !cir.bool) { // CHECK: cir.br ^bb[[#BODY:]] -// CHECK: ^bb[[#COND]]: -// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: -// CHECK: cir.br ^bb[[#COND:]] +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#EXIT]]: // CHECK: cir.return // CHECK: } @@ -77,8 +73,6 @@ module { cir.return } // CHECK: cir.func @testWhileWithBreakTerminatedBody(%arg0: !cir.bool) { -// CHECK: cir.br ^bb[[#COND:]] -// CHECK: ^bb[[#COND]]: // CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // CHECK: cir.br ^bb[[#EXIT]] @@ -108,13 +102,9 @@ module { // CHECK: ^bb[[#COND]]: // CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: -// CHECK: cir.br ^bb[[#EX_SCOPE_IN:]] -// CHECK: ^bb[[#EX_SCOPE_IN]]: -// CHECK: cir.br ^bb[[#EXIT:]] +// CHECK: cir.br ^bb[[#EXIT]] // CHECK: ^bb[[#EX_SCOPE_EXIT:]]: -// CHECK: cir.br ^bb[[#STEP:]] -// CHECK: ^bb[[#STEP]]: -// CHECK: cir.br ^bb[[#COND:]] +// CHECK: cir.br ^bb[[#COND]] // CHECK: ^bb[[#EXIT]]: // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/Transforms/scope.cir b/clang/test/CIR/Transforms/scope.cir index 2d14784c33f8..b5ba0f7aac2e 100644 --- a/clang/test/CIR/Transforms/scope.cir +++ b/clang/test/CIR/Transforms/scope.cir @@ -12,13 +12,9 @@ module { cir.return } // CHECK: cir.func @foo() { -// CHECK: cir.br ^bb1 -// CHECK: ^bb1: // pred: ^bb0 // CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK: %1 = cir.const #cir.int<4> : !u32i // CHECK: cir.store %1, %0 : !u32i, !cir.ptr -// CHECK: cir.br ^bb2 -// CHECK: ^bb2: // pred: ^bb1 // CHECK: cir.return // CHECK: } @@ -46,13 +42,11 @@ module { // CHECK: cir.func @scope_with_return() -> !u32i { // CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} -// CHECK: cir.br ^bb1 -// CHECK: ^bb1: // pred: ^bb0 // CHECK: %1 = cir.const #cir.int<0> : !u32i // CHECK: cir.store %1, %0 : !u32i, !cir.ptr // CHECK: %2 = cir.load %0 : !cir.ptr, !u32i // CHECK: cir.return %2 : !u32i -// CHECK: ^bb2: // no predecessors +// CHECK: ^bb1: // no predecessors // CHECK: %3 = cir.load %0 : !cir.ptr, !u32i // CHECK: cir.return %3 : !u32i // CHECK: } diff --git a/clang/test/CIR/Transforms/switch.cir b/clang/test/CIR/Transforms/switch.cir index 177dfc98c8af..6d0422bbf5eb 100644 --- a/clang/test/CIR/Transforms/switch.cir +++ b/clang/test/CIR/Transforms/switch.cir @@ -139,19 +139,15 @@ module { // CHECK: cir.func @shouldFlatMultiBlockCase(%arg0: !s32i) { // CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK: cir.store %arg0, %0 : !s32i, !cir.ptr -// CHECK: cir.br ^bb1 -// CHECK: ^bb1: // pred: ^bb0 // CHECK: %1 = cir.load %0 : !cir.ptr, !s32i -// CHECK: cir.switch.flat %1 : !s32i, ^bb4 [ -// CHECK: 3: ^bb2 +// CHECK: cir.switch.flat %1 : !s32i, ^bb3 [ +// CHECK: 3: ^bb1 // CHECK: ] -// CHECK: ^bb2: // pred: ^bb1 +// CHECK: ^bb1: // pred: ^bb0 // CHECK: cir.return -// CHECK: ^bb3: // no predecessors -// CHECK: cir.br ^bb4 -// CHECK: ^bb4: // 2 preds: ^bb1, ^bb3 -// CHECK: cir.br ^bb5 -// CHECK: ^bb5: // pred: ^bb4 +// CHECK: ^bb2: // no predecessors +// CHECK: cir.br ^bb3 +// CHECK: ^bb3: // 2 preds: ^bb0, ^bb2 // CHECK: cir.return // CHECK: } @@ -185,22 +181,16 @@ module { cir.return %4 : !s32i } // CHECK: cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { -// CHECK: cir.switch.flat %3 : !s32i, ^bb7 [ -// CHECK: 0: ^bb2 +// CHECK: cir.switch.flat %3 : !s32i, ^bb4 [ +// CHECK: 0: ^bb1 // CHECK: ] +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: cir.brcond {{%.*}} ^bb2, ^bb3 // CHECK: ^bb2: // pred: ^bb1 -// CHECK: cir.br ^bb3 -// CHECK: ^bb3: // pred: ^bb2 -// CHECK: cir.brcond {{%.*}} ^bb4, ^bb5 -// CHECK: ^bb4: // pred: ^bb3 -// CHECK: cir.br ^bb7 -// CHECK: ^bb5: // pred: ^bb3 -// CHECK: cir.br ^bb6 -// CHECK: ^bb6: // pred: ^bb5 -// CHECK: cir.br ^bb7 -// CHECK: ^bb7: // 3 preds: ^bb1, ^bb4, ^bb6 -// CHECK: cir.br ^bb8 -// CHECK: ^bb8: // pred: ^bb7 +// CHECK: cir.br ^bb4 +// CHECK: ^bb3: // pred: ^bb1 +// CHECK: cir.br ^bb4 +// CHECK: ^bb4: // 3 preds: ^bb0, ^bb2, ^bb3 // CHECK: cir.return %9 : !s32i // CHECK: } @@ -260,8 +250,6 @@ module { // CHECK-NEXT: ^[[CASE_DEFAULT]]: // CHECK-NEXT: cir.int<3> // CHECK-NEXT: cir.store -// CHECK-NEXT: cir.br ^[[EPILOG]] -// CHECK-NEXT: ^[[EPILOG]]: // CHECK-NEXT: cir.br ^[[EPILOG_END:bb[0-9]+]] // CHECK-NEXT: ^[[EPILOG_END]]: // CHECK: cir.return diff --git a/clang/test/CIR/Transforms/ternary.cir b/clang/test/CIR/Transforms/ternary.cir index 67ef7f95a6b5..833084602338 100644 --- a/clang/test/CIR/Transforms/ternary.cir +++ b/clang/test/CIR/Transforms/ternary.cir @@ -35,10 +35,6 @@ module { // CHECK: cir.br ^bb3(%5 : !s32i) // CHECK: ^bb2: // pred: ^bb0 // CHECK: %6 = cir.const #cir.int<5> : !s32i -// CHECK: cir.br ^bb3(%6 : !s32i) -// CHECK: ^bb3(%7: !s32i): // 2 preds: ^bb1, ^bb2 -// CHECK: cir.br ^bb4 -// CHECK: ^bb4: // pred: ^bb3 // CHECK: cir.store %7, %1 : !s32i, !cir.ptr // CHECK: %8 = cir.load %1 : !cir.ptr, !s32i // CHECK: cir.return %8 : !s32i @@ -60,8 +56,6 @@ module { // CHECK: ^bb2: // pred: ^bb0 // CHECK: cir.br ^bb3 // CHECK: ^bb3: // 2 preds: ^bb1, ^bb2 -// CHECK: cir.br ^bb4 -// CHECK: ^bb4: // pred: ^bb3 // CHECK: cir.return // CHECK: } From c34e6a45f39b1f272fe8637c9dfa21656bb63c2b Mon Sep 17 00:00:00 2001 From: Krito Date: Fri, 14 Jun 2024 02:38:49 +0800 Subject: [PATCH 1635/2301] [CIR][ThroughMLIR] Fix FuncOp for functions with pointer arguments. (#684) This PR is to fix the issue #658 . Now we can get the correct result using the following command. ``` echo "void test(int *){}" | ./build/Debug/bin/clang -cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir -o - ``` result: ``` module attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior, cir.triple = "x86_64-unknown-linux-gnu", dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>, #dlti.dl_entry<"dlti.endianness", "little">>, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"} { func.func @test(%arg0: memref loc(fused[#loc3, #loc4])) { %alloca = memref.alloca() {alignment = 8 : i64} : memref> loc(#loc7) memref.store %arg0, %alloca[] : memref> loc(#loc5) return loc(#loc2) } loc(#loc6) } loc(#loc) ``` And the test/CIR/Lowering/ThroughMLIR/dot.cir now passes the test, so I have removed the XFAIL flag. --- clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 4 ++-- clang/test/CIR/Lowering/ThroughMLIR/dot.cir | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 07d3db7b47ce..e0f2511dd6f6 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -558,10 +558,10 @@ class CIRFuncOpLowering : public mlir::OpConversionPattern { resultType ? mlir::TypeRange(resultType) : mlir::TypeRange())); - rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); - if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, + if (failed(rewriter.convertRegionTypes(&op.getBody(), *typeConverter, &signatureConversion))) return mlir::failure(); + rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); rewriter.eraseOp(op); return mlir::LogicalResult::success(); diff --git a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir index 27ab0e9c33f6..5fc5311a65a9 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/dot.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/dot.cir @@ -1,6 +1,5 @@ // RUN: cir-opt %s -cir-to-mlir -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -// XFAIL: * !s32i = !cir.int module { From 1d18e56ae90796194e8d6866375fd25e17e3e187 Mon Sep 17 00:00:00 2001 From: roro47 <40341016+roro47@users.noreply.github.com> Date: Thu, 13 Jun 2024 19:39:54 +0100 Subject: [PATCH 1636/2301] [CIR] Add FuncAttrs to cir.calls (#637) Some function attributes are also callsite attributes, for instance, nothrow. This means they are going to show up in both. We don't support that just yet, hence the PR. CIR has an attribute `ExtraFuncAttr` that we current use as part of `FuncOp`, see CIROps.td. This attribute also needs to be added to `CallOp` and `TryCalOp`. Right now, In `CIRGenCall.cpp: AddAttributesFromFunctionProtoType` fills in `FuncAttrs`, but doesn't use it for anything. We should use the `FuncAttrs` result to populate constructing a `ExtraFuncAttr` and add it to the aforementioned call operations. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 51 +++++++++++++++++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 25 ++++----- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 32 +++++++----- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 13 +++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 39 ++++++++++++-- .../Dialect/Transforms/LoweringPrepare.cpp | 25 +++++---- .../TargetLowering/LowerFunction.cpp | 5 +- .../Targets/LoweringPrepareItaniumCXXABI.cpp | 6 +-- clang/test/CIR/CodeGen/call-extra-attrs.cpp | 34 +++++++++++++ 10 files changed, 174 insertions(+), 60 deletions(-) create mode 100644 clang/test/CIR/CodeGen/call-extra-attrs.cpp diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 9420b24e42e0..cb7bb85617f5 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -415,6 +415,57 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::ValueRange value = {}) { return create(loc, value); } + + mlir::cir::CallOp + createCallOp(mlir::Location loc, + mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(), + mlir::Type returnType = mlir::cir::VoidType(), + mlir::ValueRange operands = mlir::ValueRange(), + mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + + mlir::cir::CallOp callOp = + create(loc, callee, returnType, operands); + + if (extraFnAttr) { + callOp->setAttr("extra_attrs", extraFnAttr); + } else { + mlir::NamedAttrList empty; + callOp->setAttr("extra_attrs", + mlir::cir::ExtraFuncAttributesAttr::get( + getContext(), empty.getDictionary(getContext()))); + } + return callOp; + } + + mlir::cir::CallOp + createCallOp(mlir::Location loc, mlir::cir::FuncOp callee, + mlir::ValueRange operands = mlir::ValueRange(), + mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + return createCallOp(loc, mlir::SymbolRefAttr::get(callee), + callee.getFunctionType().getReturnType(), operands, + extraFnAttr); + } + + mlir::cir::CallOp + createIndirectCallOp(mlir::Location loc, mlir::Value ind_target, + mlir::cir::FuncType fn_type, + mlir::ValueRange operands = mlir::ValueRange(), + mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + + llvm::SmallVector resOperands({ind_target}); + resOperands.append(operands.begin(), operands.end()); + + return createCallOp(loc, mlir::SymbolRefAttr(), fn_type.getReturnType(), + resOperands, extraFnAttr); + } + + mlir::cir::CallOp + createCallOp(mlir::Location loc, mlir::SymbolRefAttr callee, + mlir::ValueRange operands = mlir::ValueRange(), + mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + return createCallOp(loc, callee, mlir::cir::VoidType(), operands, + extraFnAttr); + } }; } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 0baa53347656..03fc98619226 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2787,6 +2787,7 @@ class CIR_CallOp extra_traits = []> : dag commonArgs = (ins OptionalAttr:$callee, Variadic:$arg_ops, + ExtraFuncAttr:$extra_attrs, OptionalAttr:$ast ); } @@ -2822,12 +2823,16 @@ def CallOp : CIR_CallOp<"call"> { let arguments = commonArgs; let results = (outs Optional:$result); + let skipDefaultBuilders = 1; + let builders = [ - OpBuilder<(ins "FuncOp":$callee, CArg<"ValueRange", "{}">:$operands), [{ + OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Type":$resType, + CArg<"ValueRange", "{}">:$operands), [{ $_state.addOperands(operands); - $_state.addAttribute("callee", SymbolRefAttr::get(callee)); - if (!callee.getFunctionType().isVoid()) - $_state.addTypes(callee.getFunctionType().getReturnType()); + if (callee) + $_state.addAttribute("callee", callee); + if (resType && !resType.isa()) + $_state.addTypes(resType); }]>, OpBuilder<(ins "Value":$ind_target, "FuncType":$fn_type, @@ -2836,18 +2841,6 @@ def CallOp : CIR_CallOp<"call"> { $_state.addOperands(operands); if (!fn_type.isVoid()) $_state.addTypes(fn_type.getReturnType()); - }]>, - OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Type":$resType, - CArg<"ValueRange", "{}">:$operands), [{ - $_state.addOperands(operands); - $_state.addAttribute("callee", callee); - if (resType && !resType.isa()) - $_state.addTypes(resType); - }]>, - OpBuilder<(ins "SymbolRefAttr":$callee, - CArg<"ValueRange", "{}">:$operands), [{ - $_state.addOperands(operands); - $_state.addAttribute("callee", callee); }]> ]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 60268b72d9dd..bf44ddb263b7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -447,22 +447,29 @@ static mlir::cir::CIRCallOpInterface buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, mlir::cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal, mlir::cir::FuncOp directFuncOp, - SmallVectorImpl &CIRCallArgs, bool InvokeDest) { + SmallVectorImpl &CIRCallArgs, bool InvokeDest, + mlir::cir::ExtraFuncAttributesAttr extraFnAttrs) { auto &builder = CGF.getBuilder(); if (InvokeDest) { auto addr = CGF.currLexScope->getExceptionInfo().addr; - if (indirectFuncTy) - return builder.create( + + mlir::cir::TryCallOp tryCallOp; + if (indirectFuncTy) { + tryCallOp = builder.create( callLoc, addr, indirectFuncVal, indirectFuncTy, CIRCallArgs); - return builder.create(callLoc, directFuncOp, addr, - CIRCallArgs); + } else { + tryCallOp = builder.create(callLoc, directFuncOp, + addr, CIRCallArgs); + } + tryCallOp->setAttr("extra_attrs", extraFnAttrs); + return tryCallOp; } if (indirectFuncTy) - return builder.create(callLoc, indirectFuncVal, - indirectFuncTy, CIRCallArgs); - return builder.create(callLoc, directFuncOp, CIRCallArgs); + return builder.createIndirectCallOp( + callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs, extraFnAttrs); + return builder.createCallOp(callLoc, directFuncOp, CIRCallArgs, extraFnAttrs); } RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, @@ -735,9 +742,10 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, indirectFuncVal = CalleePtr->getResult(0); } - mlir::cir::CIRCallOpInterface callLikeOp = - buildCallLikeOp(*this, callLoc, indirectFuncTy, indirectFuncVal, - directFuncOp, CIRCallArgs, InvokeDest); + mlir::cir::CIRCallOpInterface callLikeOp = buildCallLikeOp( + *this, callLoc, indirectFuncTy, indirectFuncVal, directFuncOp, + CIRCallArgs, InvokeDest, + mlir::cir::ExtraFuncAttributesAttr::get(builder.getContext(), Attrs)); if (E) callLikeOp->setAttr( @@ -844,7 +852,7 @@ mlir::Value CIRGenFunction::buildRuntimeCall(mlir::Location loc, // TODO(cir): set the calling convention to this runtime call. assert(!MissingFeatures::setCallingConv()); - auto call = builder.create(loc, callee, args); + auto call = builder.createCallOp(loc, callee, args); assert(call->getNumResults() <= 1 && "runtime functions have at most 1 result"); diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 560335765ef6..362f1ee5fcb3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -179,10 +179,9 @@ mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, } else fnOp = cast(builtin); - return builder.create( - loc, fnOp, - mlir::ValueRange{builder.getUInt32(NewAlign, loc), nullPtr, nullPtr, - nullPtr}); + return builder.createCallOp(loc, fnOp, + mlir::ValueRange{builder.getUInt32(NewAlign, loc), + nullPtr, nullPtr, nullPtr}); } mlir::cir::CallOp @@ -202,7 +201,7 @@ CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { } else fnOp = cast(builtin); - return builder.create( + return builder.createCallOp( loc, fnOp, mlir::ValueRange{CurCoro.Data->CoroId.getResult()}); } @@ -223,7 +222,7 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, } else fnOp = cast(builtin); - return builder.create( + return builder.createCallOp( loc, fnOp, mlir::ValueRange{CurCoro.Data->CoroId.getResult(), coroframeAddr}); } @@ -244,7 +243,7 @@ mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, } else fnOp = cast(builtin); - return builder.create( + return builder.createCallOp( loc, fnOp, mlir::ValueRange{nullPtr, builder.getBool(false, loc)}); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 9465ba3f3b05..df777b95062c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1718,8 +1718,8 @@ void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( builder.setInsertionPoint(noProtoCallOp); // Patch call type with the real function type. - auto realCallOp = builder.create( - noProtoCallOp.getLoc(), NewFn, noProtoCallOp.getOperands()); + auto realCallOp = builder.createCallOp(noProtoCallOp.getLoc(), NewFn, + noProtoCallOp.getOperands()); // Replace old no proto call with fixed call. noProtoCallOp.replaceAllUsesWith(realCallOp); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 20531fc49a2c..d62fd3207803 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2346,6 +2346,7 @@ verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { static ::mlir::ParseResult parseCallCommon( ::mlir::OpAsmParser &parser, ::mlir::OperationState &result, + llvm::StringRef extraAttrsAttrName, llvm::function_ref<::mlir::ParseResult(::mlir::OpAsmParser &, ::mlir::OperationState &)> customOpHandler = @@ -2380,6 +2381,23 @@ static ::mlir::ParseResult parseCallCommon( return ::mlir::failure(); if (parser.parseRParen()) return ::mlir::failure(); + + auto &builder = parser.getBuilder(); + Attribute extraAttrs; + if (::mlir::succeeded(parser.parseOptionalKeyword("extra"))) { + if (parser.parseLParen().failed()) + return failure(); + if (parser.parseAttribute(extraAttrs).failed()) + return failure(); + if (parser.parseRParen().failed()) + return failure(); + } else { + NamedAttrList empty; + extraAttrs = mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), empty.getDictionary(builder.getContext())); + } + result.addAttribute(extraAttrsAttrName, extraAttrs); + if (parser.parseOptionalAttrDict(result.attributes)) return ::mlir::failure(); if (parser.parseColon()) @@ -2400,6 +2418,7 @@ static ::mlir::ParseResult parseCallCommon( void printCallCommon( Operation *op, mlir::Value indirectCallee, mlir::FlatSymbolRefAttr flatSym, ::mlir::OpAsmPrinter &state, + ::mlir::cir::ExtraFuncAttributesAttr extraAttrs, llvm::function_ref customOpHandler = []() {}) { state << ' '; @@ -2415,13 +2434,20 @@ void printCallCommon( state << "("; state << ops; state << ")"; - llvm::SmallVector<::llvm::StringRef, 2> elidedAttrs; + + llvm::SmallVector<::llvm::StringRef, 4> elidedAttrs; elidedAttrs.push_back("callee"); elidedAttrs.push_back("ast"); + elidedAttrs.push_back("extra_attrs"); state.printOptionalAttrDict(op->getAttrs(), elidedAttrs); state << ' ' << ":"; state << ' '; state.printFunctionalType(op->getOperands().getTypes(), op->getResultTypes()); + if (!extraAttrs.getElements().empty()) { + state << " extra("; + state.printAttributeWithoutType(extraAttrs); + state << ")"; + } } LogicalResult @@ -2431,12 +2457,14 @@ cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { ::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result) { - return parseCallCommon(parser, result); + + return parseCallCommon(parser, result, getExtraAttrsAttrName(result.name)); } void CallOp::print(::mlir::OpAsmPrinter &state) { mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; - printCallCommon(*this, indirectCallee, getCalleeAttr(), state); + printCallCommon(*this, indirectCallee, getCalleeAttr(), state, + getExtraAttrs()); } //===----------------------------------------------------------------------===// @@ -2493,7 +2521,7 @@ LogicalResult cir::TryCallOp::verify() { return mlir::success(); } ::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result) { return parseCallCommon( - parser, result, + parser, result, getExtraAttrsAttrName(result.name), [](::mlir::OpAsmParser &parser, ::mlir::OperationState &result) -> ::mlir::ParseResult { ::mlir::OpAsmParser::UnresolvedOperand exceptionRawOperands[1]; @@ -2535,7 +2563,8 @@ void TryCallOp::print(::mlir::OpAsmPrinter &state) { state << getExceptionInfo(); state << ")"; mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; - printCallCommon(*this, indirectCallee, getCalleeAttr(), state); + printCallCommon(*this, indirectCallee, getCalleeAttr(), state, + getExtraAttrs()); } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 02b54c5a9962..f7643c9b8016 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -195,7 +195,7 @@ FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { } // Create a variable initialization function. - mlir::OpBuilder builder(&getContext()); + CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op); auto voidTy = ::mlir::cir::VoidType::get(builder.getContext()); auto fnType = mlir::cir::FuncType::get({}, voidTy); @@ -264,7 +264,7 @@ FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { dtorCall.getArgOperand(0)); args[2] = builder.create( Handle.getLoc(), HandlePtrTy, Handle.getSymName()); - builder.create(dtorCall.getLoc(), fnAtExit, args); + builder.createCallOp(dtorCall.getLoc(), fnAtExit, args); dtorCall->erase(); entryBB->getOperations().splice(entryBB->end(), dtorBlock.getOperations(), dtorBlock.begin(), @@ -481,7 +481,7 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { fnName += getTransformedFileName(theModule); } - mlir::OpBuilder builder(&getContext()); + CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointToEnd(&theModule.getBodyRegion().back()); auto fnType = mlir::cir::FuncType::get( {}, mlir::cir::VoidType::get(builder.getContext())); @@ -490,7 +490,7 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { mlir::cir::GlobalLinkageKind::ExternalLinkage); builder.setInsertionPointToStart(f.addEntryBlock()); for (auto &f : dynamicInitializers) { - builder.create(f.getLoc(), f); + builder.createCallOp(f.getLoc(), f); } builder.create(f.getLoc()); @@ -597,7 +597,7 @@ void LoweringPreparePass::lowerArrayCtor(ArrayCtor op) { void LoweringPreparePass::lowerStdFindOp(StdFindOp op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op.getOperation()); - auto call = builder.create( + auto call = builder.createCallOp( op.getLoc(), op.getOriginalFnAttr(), op.getResult().getType(), mlir::ValueRange{op.getOperand(0), op.getOperand(1), op.getOperand(2)}); @@ -608,9 +608,9 @@ void LoweringPreparePass::lowerStdFindOp(StdFindOp op) { void LoweringPreparePass::lowerIterBeginOp(IterBeginOp op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op.getOperation()); - auto call = builder.create( - op.getLoc(), op.getOriginalFnAttr(), op.getResult().getType(), - mlir::ValueRange{op.getOperand()}); + auto call = builder.createCallOp(op.getLoc(), op.getOriginalFnAttr(), + op.getResult().getType(), + mlir::ValueRange{op.getOperand()}); op.replaceAllUsesWith(call); op.erase(); @@ -619,9 +619,9 @@ void LoweringPreparePass::lowerIterBeginOp(IterBeginOp op) { void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op.getOperation()); - auto call = builder.create( - op.getLoc(), op.getOriginalFnAttr(), op.getResult().getType(), - mlir::ValueRange{op.getOperand()}); + auto call = builder.createCallOp(op.getLoc(), op.getOriginalFnAttr(), + op.getResult().getType(), + mlir::ValueRange{op.getOperand()}); op.replaceAllUsesWith(call); op.erase(); @@ -712,8 +712,7 @@ void LoweringPreparePass::runOnMathOp(Operation *op) { buildRuntimeFunction(builder, rtFuncName, op->getLoc(), rtFuncTy); builder.setInsertionPointAfter(op); - auto call = builder.create(op->getLoc(), rtFunc, - op->getOperands()); + auto call = builder.createCallOp(op->getLoc(), rtFunc, op->getOperands()); op->replaceAllUsesWith(call); op->erase(); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index edf4dfc7d4b3..4d4e2d4f3ba6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -317,8 +317,9 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // NOTE(cir): We don't know if the callee was already lowered, so we only // fetch the name from the callee, while the return type is fetch from the // lowering types manager. - CallOp _ = rewriter.create(loc, Caller.getCalleeAttr(), - IRFuncTy.getReturnType(), IRCallArgs); + CallOp callOp = rewriter.create(loc, Caller.getCalleeAttr(), + IRFuncTy.getReturnType(), IRCallArgs); + callOp.setExtraAttrsAttr(Caller.getExtraAttrs()); assert(!::cir::MissingFeatures::vectorType()); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index 83235f50ffee..ebbcfab0f573 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -36,7 +36,7 @@ static void buildBadCastCall(CIRBaseBuilderTy &builder, mlir::Location loc, // TODO(cir): set the calling convention to __cxa_bad_cast. assert(!MissingFeatures::setCallingConv()); - builder.create(loc, badCastFuncRef, mlir::ValueRange{}); + builder.createCallOp(loc, badCastFuncRef, mlir::ValueRange{}); builder.create(loc); builder.clearInsertionPoint(); } @@ -62,8 +62,8 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, assert(!MissingFeatures::setCallingConv()); mlir::Value castedPtr = builder - .create(loc, dynCastFuncRef, - builder.getVoidPtrTy(), dynCastFuncArgs) + .createCallOp(loc, dynCastFuncRef, builder.getVoidPtrTy(), + dynCastFuncArgs) .getResult(); assert(castedPtr.getType().isa() && diff --git a/clang/test/CIR/CodeGen/call-extra-attrs.cpp b/clang/test/CIR/CodeGen/call-extra-attrs.cpp new file mode 100644 index 000000000000..a17246ddb1b5 --- /dev/null +++ b/clang/test/CIR/CodeGen/call-extra-attrs.cpp @@ -0,0 +1,34 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +__attribute__((nothrow)) +int s0(int a, int b) { + int x = a + b; + return x; +} + +__attribute__((noinline)) +int s1(int a, int b) { + return s0(a,b); +} + +int s2(int a, int b) { + return s1(a, b); +} + +// CIR: #fn_attr = #cir, nothrow = #cir.nothrow, optnone = #cir.optnone})> +// CIR: #fn_attr1 = #cir + +// CIR: cir.func @_Z2s0ii(%{{.*}}, %{{.*}}) -> {{.*}} extra(#fn_attr) +// CIR: cir.func @_Z2s1ii(%{{.*}}, %{{.*}}) -> {{.*}} extra(#fn_attr) +// CIR: cir.call @_Z2s0ii(%{{.*}}, %{{.*}}) : ({{.*}}, {{.*}}) -> {{.*}} extra(#fn_attr1) +// CIR: cir.func @_Z2s2ii(%{{.*}}, %{{.*}}) -> {{.*}} extra(#fn_attr) +// CHECK-NOT: cir.call @_Z2s1ii(%{{.*}}, %{{.*}}) : ({{.*}}, {{.*}}) -> {{.*}} extra(#fn_attr{{.*}}) + +// LLVM: define i32 @_Z2s0ii(i32 %0, i32 %1) #[[#ATTR1:]] +// LLVM: define i32 @_Z2s1ii(i32 %0, i32 %1) #[[#ATTR1:]] +// LLVM: define i32 @_Z2s2ii(i32 %0, i32 %1) #[[#ATTR1:]] + +// LLVM: attributes #[[#ATTR1]] = {{.*}} noinline nounwind optnone From 3954ddd14ea4ad8fb0909d8951d292287e42c10c Mon Sep 17 00:00:00 2001 From: Krito Date: Sat, 15 Jun 2024 02:53:02 +0800 Subject: [PATCH 1637/2301] [CIR][ThroughMLIR] lowering cir.vec.create, extract, insert op to MLIR vector Dialect (#681) This pr adds cir.vec.create, extract, insert op lowering to MLIR passes and test files. Can we lower the vector-related operations in CIR to the vector dialect? This is feasible, although the vector dialect hasn't been used in the CIRToMLIR conversion before. If this lowering is acceptable, I will complete the remaining operations' lowering in the next PR. If there is a more suitable dialect, feel free to discuss it. --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 105 ++++++++++++++---- .../test/CIR/Lowering/ThroughMLIR/vectype.cpp | 67 +++++++++++ 2 files changed, 153 insertions(+), 19 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index e0f2511dd6f6..e8c62e3a097d 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -29,6 +29,7 @@ #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" +#include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/IR/BuiltinDialect.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Operation.h" @@ -78,7 +79,8 @@ struct ConvertCIRToMLIRPass registry.insert(); + mlir::scf::SCFDialect, mlir::math::MathDialect, + mlir::vector::VectorDialect>(); } void runOnOperation() final; @@ -1083,6 +1085,64 @@ class CIRGetGlobalOpLowering } }; +class CIRVectorCreateLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecCreateOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto vecTy = op.getType().dyn_cast(); + assert(vecTy && "result type of cir.vec.create op is not VectorType"); + auto elementTy = typeConverter->convertType(vecTy.getEltType()); + auto loc = op.getLoc(); + auto zeroElement = rewriter.getZeroAttr(elementTy); + mlir::Value result = rewriter.create( + loc, + mlir::DenseElementsAttr::get( + mlir::VectorType::get(vecTy.getSize(), elementTy), zeroElement)); + assert(vecTy.getSize() == op.getElements().size() && + "cir.vec.create op count doesn't match vector type elements count"); + for (uint64_t i = 0; i < vecTy.getSize(); ++i) { + mlir::Value indexValue = + getConst(rewriter, loc, rewriter.getI64Type(), i); + result = rewriter.create( + loc, adaptor.getElements()[i], result, indexValue); + } + rewriter.replaceOp(op, result); + return mlir::success(); + } +}; + +class CIRVectorInsertLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecInsertOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getValue(), adaptor.getVec(), adaptor.getIndex()); + return mlir::success(); + } +}; + +class CIRVectorExtractLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecExtractOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getVec(), adaptor.getIndex()); + return mlir::success(); + } +}; + class CIRCastOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -1285,20 +1345,22 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns.add< - CIRCmpOpLowering, CIRCallOpLowering, CIRUnaryOpLowering, CIRBinOpLowering, - CIRLoadOpLowering, CIRConstantOpLowering, CIRStoreOpLowering, - CIRAllocaOpLowering, CIRFuncOpLowering, CIRScopeOpLowering, - CIRBrCondOpLowering, CIRTernaryOpLowering, CIRYieldOpLowering, - CIRCosOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, - CIRCastOpLowering, CIRPtrStrideOpLowering, CIRSqrtOpLowering, - CIRCeilOpLowering, CIRExp2OpLowering, CIRExpOpLowering, CIRFAbsOpLowering, - CIRFloorOpLowering, CIRLog10OpLowering, CIRLog2OpLowering, - CIRLogOpLowering, CIRRoundOpLowering, CIRPtrStrideOpLowering, - CIRSinOpLowering, CIRShiftOpLowering, CIRBitClzOpLowering, - CIRBitCtzOpLowering, CIRBitPopcountOpLowering, CIRBitClrsbOpLowering, - CIRBitFfsOpLowering, CIRBitParityOpLowering, CIRIfOpLowering>( - converter, patterns.getContext()); + patterns + .add( + converter, patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { @@ -1352,6 +1414,10 @@ static mlir::TypeConverter prepareTypeConverter() { return nullptr; return mlir::MemRefType::get(shape, elementType); }); + converter.addConversion([&](mlir::cir::VectorType type) -> mlir::Type { + auto ty = converter.convertType(type.getEltType()); + return mlir::VectorType::get(type.getSize(), ty); + }); return converter; } @@ -1368,10 +1434,11 @@ void ConvertCIRToMLIRPass::runOnOperation() { mlir::ConversionTarget target(getContext()); target.addLegalOp(); - target.addLegalDialect(); + target + .addLegalDialect(); target.addIllegalDialect(); if (failed(applyPartialConversion(module, target, std::move(patterns)))) diff --git a/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp b/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp new file mode 100644 index 000000000000..75484a1fc7ae --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp @@ -0,0 +1,67 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s +// XFAIL: * + +typedef int vi4 __attribute__((vector_size(16))); + +void vector_int_test(int x) { + + // CHECK: %[[ALLOC1:.*]] = memref.alloca() {alignment = 4 : i64} : memref + // CHECK: %[[ALLOC2:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC3:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC4:.*]] = memref.alloca() {alignment = 4 : i64} : memref + // CHECK: memref.store %arg0, %[[ALLOC1]][] : memref + + vi4 a = { 1, 2, 3, 4 }; + + // CHECK: %[[C1:.*]] = arith.constant 1 : i32 + // CHECK: %[[C2:.*]] = arith.constant 2 : i32 + // CHECK: %[[C3:.*]] = arith.constant 3 : i32 + // CHECK: %[[C4:.*]] = arith.constant 4 : i32 + // CHECK: %[[CST:.*]] = arith.constant dense<0> : vector<4xi32> + // CHECK: %[[C0_I64:.*]] = arith.constant 0 : i64 + // CHECK: %[[VEC0:.*]] = vector.insertelement %[[C1]], %[[CST]][%[[C0_I64]] : i64] : vector<4xi32> + // CHECK: %[[C1_I64:.*]] = arith.constant 1 : i64 + // CHECK: %[[VEC1:.*]] = vector.insertelement %[[C2]], %[[VEC0]][%[[C1_I64]] : i64] : vector<4xi32> + // CHECK: %[[C2_I64:.*]] = arith.constant 2 : i64 + // CHECK: %[[VEC2:.*]] = vector.insertelement %[[C3]], %[[VEC1]][%[[C2_I64]] : i64] : vector<4xi32> + // CHECK: %[[C3_I64:.*]] = arith.constant 3 : i64 + // CHECK: %[[VEC3:.*]] = vector.insertelement %[[C4]], %[[VEC2]][%[[C3_I64]] : i64] : vector<4xi32> + // CHECK: memref.store %[[VEC3]], %[[ALLOC2]][] : memref> + + vi4 b = {x, 5, 6, x + 1}; + + // CHECK: %[[VAL1:.*]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[C5:.*]] = arith.constant 5 : i32 + // CHECK: %[[C6:.*]] = arith.constant 6 : i32 + // CHECK: %[[VAL2:.*]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[C1_I32_2:.*]] = arith.constant 1 : i32 + // CHECK: %[[SUM:.*]] = arith.addi %[[VAL2]], %[[C1_I32_2]] : i32 + // CHECK: %[[CST2:.*]] = arith.constant dense<0> : vector<4xi32> + // CHECK: %[[C0_I64_2:.*]] = arith.constant 0 : i64 + // CHECK: %[[VEC4:.*]] = vector.insertelement %[[VAL1]], %[[CST2]][%[[C0_I64_2]] : i64] : vector<4xi32> + // CHECK: %[[C1_I64_2:.*]] = arith.constant 1 : i64 + // CHECK: %[[VEC5:.*]] = vector.insertelement %[[C5]], %[[VEC4]][%[[C1_I64_2]] : i64] : vector<4xi32> + // CHECK: %[[C2_I64_2:.*]] = arith.constant 2 : i64 + // CHECK: %[[VEC6:.*]] = vector.insertelement %[[C6]], %[[VEC5]][%[[C2_I64_2]] : i64] : vector<4xi32> + // CHECK: %[[C3_I64_2:.*]] = arith.constant 3 : i64 + // CHECK: %[[VEC7:.*]] = vector.insertelement %[[SUM]], %[[VEC6]][%[[C3_I64_2]] : i64] : vector<4xi32> + // CHECK: memref.store %[[VEC7]], %[[ALLOC3]][] : memref> + + a[x] = x; + + // CHECK: %[[VAL3:.*]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[VAL4:.*]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[VEC8:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[VEC9:.*]] = vector.insertelement %[[VAL3]], %[[VEC8]][%[[VAL4]] : i32] : vector<4xi32> + // CHECK: memref.store %[[VEC9]], %[[ALLOC2]][] : memref> + + int c = a[x]; + + // CHECK: %[[VEC10:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[VAL5:.*]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[EXTRACT:.*]] = vector.extractelement %[[VEC10]][%[[VAL5]] : i32] : vector<4xi32> + // CHECK: memref.store %[[EXTRACT]], %[[ALLOC4]][] : memref + + // CHECK: return +} From 10a27e4befe5b2c63d79e5daacd53d4e4ada45c5 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 18 Jun 2024 02:30:29 +0800 Subject: [PATCH 1638/2301] [CIR][LLVMLowering] Add LLVM lowering for data member pointers (#612) This PR adds LLVM lowering support for data member pointers. It includes the following changes: - ~~The `#cir.data_member` attribute now has a new parameter named `memberOffset`. When the data member pointer is not null, this parameter gives the offset of the pointed-to member within its containing object. This offset is calculated by target ABI.~~ - ~~A new attribute `#cir.data_member_ptr_layout` is added. It contains ABI-specific layout information about a data member pointer that is required to lower it to LLVM IR. This attribute is attached to the module op, and it is queried during LLVMIR lowering to obtain the lowering information in it.~~ - Some CIRGen of the data member pointers is refactored to follow the upstream CodeGen skeleton. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 12 ++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 +- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 2 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 82 +++++++++++++++---- clang/test/CIR/Lowering/data-member.cir | 52 ++++++++++++ 5 files changed, 131 insertions(+), 19 deletions(-) create mode 100644 clang/test/CIR/Lowering/data-member.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 8c68c54a4159..9285cade6953 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -345,12 +345,12 @@ def DataMemberAttr : CIR_Attr<"DataMember", "data_member", let parameters = (ins AttributeSelfTypeParameter< "", "mlir::cir::DataMemberType">:$type, OptionalParameter< - "std::optional">:$memberIndex); + "std::optional">:$member_index); let description = [{ A data member attribute is a literal attribute that represents a constant pointer-to-data-member value. - The `memberIndex` parameter represents the index of the pointed-to member + The `member_index` parameter represents the index of the pointed-to member within its containing struct. It is an optional parameter; lack of this parameter indicates a null pointer-to-data-member value. @@ -365,7 +365,13 @@ def DataMemberAttr : CIR_Attr<"DataMember", "data_member", let genVerifyDecl = 1; let assemblyFormat = [{ - `<` ($memberIndex^):(`null`)? `>` + `<` ($member_index^):(`null`)? `>` + }]; + + let extraClassDeclaration = [{ + bool isNullPtr() const { + return !getMemberIndex().has_value(); + } }]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 0d25bdf195a6..4d5676c691fd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -245,7 +245,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } mlir::cir::DataMemberAttr getDataMemberAttr(mlir::cir::DataMemberType ty, - size_t memberIndex) { + unsigned memberIndex) { return mlir::cir::DataMemberAttr::get(getContext(), ty, memberIndex); } diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 8e44cb1cf2b7..192e7b3a8d18 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -423,7 +423,7 @@ CmpThreeWayInfoAttr::verify(function_ref emitError, LogicalResult DataMemberAttr::verify(function_ref emitError, mlir::cir::DataMemberType ty, - std::optional memberIndex) { + std::optional memberIndex) { if (!memberIndex.has_value()) { // DataMemberAttr without a given index represents a null value. return success(); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7377820a29fd..572065a00222 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -51,6 +51,7 @@ #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/MissingFeatures.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" @@ -1065,6 +1066,28 @@ bool hasTrailingZeros(mlir::cir::ConstArrayAttr attr) { })); } +static mlir::Attribute +lowerDataMemberAttr(mlir::ModuleOp moduleOp, mlir::cir::DataMemberAttr attr, + const mlir::TypeConverter &typeConverter) { + mlir::DataLayout layout{moduleOp}; + + uint64_t memberOffset; + if (attr.isNullPtr()) { + // TODO(cir): the numerical value of a null data member pointer is + // ABI-specific and should be queried through ABI. + assert(!MissingFeatures::targetCodeGenInfoGetNullPointer()); + memberOffset = -1ull; + } else { + auto memberIndex = attr.getMemberIndex().value(); + memberOffset = + attr.getType().getClsTy().getElementOffset(layout, memberIndex); + } + + auto underlyingIntTy = mlir::IntegerType::get( + moduleOp->getContext(), layout.getTypeSizeInBits(attr.getType())); + return mlir::IntegerAttr::get(underlyingIntTy, memberOffset); +} + class CIRConstantLowering : public mlir::OpConversionPattern { public: @@ -1106,6 +1129,10 @@ class CIRConstantLowering return mlir::success(); } attr = op.getValue(); + } else if (op.getType().isa()) { + auto dataMember = op.getValue().cast(); + attr = lowerDataMemberAttr(op->getParentOfType(), + dataMember, *typeConverter); } // TODO(cir): constant arrays are currently just pushed into the stack using // the store instruction, instead of being stored as global variables and @@ -1689,6 +1716,10 @@ class CIRGlobalOpLowering lowerCirAttrAsValue(op, init.value(), rewriter, typeConverter); rewriter.create(loc, value); return mlir::success(); + } else if (auto dataMemberAttr = + init.value().dyn_cast()) { + init = lowerDataMemberAttr(op->getParentOfType(), + dataMemberAttr, *typeConverter); } else if (const auto structAttr = init.value().dyn_cast()) { setupRegionInitializedLLVMGlobalOp(op, rewriter); @@ -2691,6 +2722,24 @@ class CIRGetMemberOpLowering } }; +class CIRGetRuntimeMemberOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern< + mlir::cir::GetRuntimeMemberOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::GetRuntimeMemberOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto llvmResTy = getTypeConverter()->convertType(op.getType()); + auto llvmElementTy = mlir::IntegerType::get(op.getContext(), 8); + + rewriter.replaceOpWithNewOp( + op, llvmResTy, llvmElementTy, adaptor.getAddr(), adaptor.getMember()); + return mlir::success(); + } +}; + class CIRPtrDiffOpLowering : public mlir::OpConversionPattern { public: @@ -3241,20 +3290,21 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, - CIRGetMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, - CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, - CIRExpectOpLowering, CIRVTableAddrPointOpLowering, - CIRVectorCreateLowering, CIRVectorInsertLowering, - CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, - CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, - CIRVectorShuffleVecLowering, CIRStackSaveLowering, - CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, - CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, - CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, - CIRCmpThreeWayOpLowering, CIRCeilOpLowering, CIRFloorOpLowering, - CIRFAbsOpLowering, CIRNearbyintOpLowering, CIRRintOpLowering, - CIRRoundOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, - CIRFMaxOpLowering, CIRFMinOpLowering>(converter, patterns.getContext()); + CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, + CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, + CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, + CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, + CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, + CIRVectorSplatLowering, CIRVectorTernaryLowering, + CIRVectorShuffleIntsLowering, CIRVectorShuffleVecLowering, + CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, + CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, + CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, + CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, CIRCeilOpLowering, + CIRFloorOpLowering, CIRFAbsOpLowering, CIRNearbyintOpLowering, + CIRRintOpLowering, CIRRoundOpLowering, CIRTruncOpLowering, + CIRCopysignOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering>( + converter, patterns.getContext()); } namespace { @@ -3265,6 +3315,10 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, return mlir::LLVM::LLVMPointerType::get(type.getContext(), type.getAddrSpace()); }); + converter.addConversion([&](mlir::cir::DataMemberType type) -> mlir::Type { + return mlir::IntegerType::get(type.getContext(), + dataLayout.getTypeSizeInBits(type)); + }); converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { auto ty = converter.convertType(type.getEltType()); return mlir::LLVM::LLVMArrayType::get(ty, type.getSize()); diff --git a/clang/test/CIR/Lowering/data-member.cir b/clang/test/CIR/Lowering/data-member.cir new file mode 100644 index 000000000000..40846c53f920 --- /dev/null +++ b/clang/test/CIR/Lowering/data-member.cir @@ -0,0 +1,52 @@ +// RUN: cir-opt -cir-to-llvm -o - %s | FileCheck -check-prefix=MLIR %s +// RUN: cir-translate -cir-to-llvmir -o - %s | FileCheck -check-prefix=LLVM %s + +!s32i = !cir.int +!s64i = !cir.int +!structT = !cir.struct, !cir.int, !cir.int}> + +module @test { + cir.global external @pt_member = #cir.data_member<1> : !cir.data_member + // MLIR: llvm.mlir.global external @pt_member(4 : i64) {addr_space = 0 : i32} : i64 + // LLVM: @pt_member = global i64 4 + + cir.func @constant() -> !cir.data_member { + %0 = cir.const #cir.data_member<1> : !cir.data_member + cir.return %0 : !cir.data_member + } + // MLIR: llvm.func @constant() -> i64 + // MLIR-NEXT: %0 = llvm.mlir.constant(4 : i64) : i64 + // MLIR-NEXT: llvm.return %0 : i64 + // MLIR-NEXT: } + + // LLVM: define i64 @constant() + // LLVM-NEXT: ret i64 4 + // LLVM-NEXT: } + + cir.func @null_constant() -> !cir.data_member { + %0 = cir.const #cir.data_member : !cir.data_member + cir.return %0 : !cir.data_member + } + // MLIR: llvm.func @null_constant() -> i64 + // MLIR-NEXT: %0 = llvm.mlir.constant(-1 : i64) : i64 + // MLIR-NEXT: llvm.return %0 : i64 + // MLIR-NEXT: } + + // LLVM: define i64 @null_constant() !dbg !7 { + // LLVM-NEXT: ret i64 -1 + // LLVM-NEXT: } + + cir.func @get_runtime_member(%arg0: !cir.ptr, %arg1: !cir.data_member) -> !cir.ptr { + %0 = cir.get_runtime_member %arg0[%arg1 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.return %0 : !cir.ptr + } + // MLIR: llvm.func @get_runtime_member(%arg0: !llvm.ptr, %arg1: i64) -> !llvm.ptr + // MLIR-NEXT: %0 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, i8 + // MLIR-NEXT: llvm.return %0 : !llvm.ptr + // MLIR-NEXT: } + + // LLVM: define ptr @get_runtime_member(ptr %0, i64 %1) + // LLVM-NEXT: %3 = getelementptr i8, ptr %0, i64 %1 + // LLVM-NEXT: ret ptr %3 + // LLVM-NEXT: } +} \ No newline at end of file From 3d65fefb0b14dc80583614bc8c4e3f3af0923b91 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Jun 2024 10:28:18 -0700 Subject: [PATCH 1639/2301] Revert "[CIR][ABI][NFC] Add CC lowering for void FuncOps (#678)" This reverts commit 901f532a6fe4008e48681987818b6816d4cb7d56. Revert as a prereq to revert #668 This broke the build: ``` /usr/bin/ld: lib/libMLIRCIRTransforms.a(LoweringPrepare.cpp.o): in function `(anonymous namespace)::LoweringPreparePass::lowerVAArgOp(mlir::cir::VAArgOp)': /local/home/dolsen/clangir/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp:340: undefined reference to `cir::CIRDataLayout::CIRDataLayout(mlir::ModuleOp)' /usr/bin/ld: lib/libTargetLowering.a(LowerTypes.cpp.o): in function `cir::CIRDataLayout::CIRDataLayout(llvm::StringRef, mlir::ModuleOp)': /local/home/dolsen/clangir/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h:31: undefined reference to `cir::CIRDataLayout::reset(llvm::StringRef)'`' ``` Steps to reproduce: ``` $ cmake -GNinja -DCMAKE_INSTALL_PREFIX=//clangir-install "-DLLVM_ENABLE_PROJECTS=clang;mlir" -DCLANG_ENABLE_CIR=ON -DLLVM_TARGETS_TO_BUILD=host -DCMAKE_BUILD_TYPE=Debug ../clangir/llvm $ ninja install ``` --- clang/include/clang/CIR/MissingFeatures.h | 11 -- .../Dialect/Transforms/CallConvLowering.cpp | 8 +- .../Transforms/TargetLowering/LowerCall.cpp | 166 ------------------ .../TargetLowering/LowerFunction.cpp | 106 ----------- .../Transforms/TargetLowering/LowerFunction.h | 15 -- .../TargetLowering/LowerFunctionInfo.h | 13 -- .../Transforms/TargetLowering/LowerModule.cpp | 105 +---------- .../Transforms/TargetLowering/LowerModule.h | 22 +-- .../Transforms/TargetLowering/LowerTypes.h | 6 - 9 files changed, 7 insertions(+), 445 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index d61c5e618605..e2f554019422 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -192,22 +192,11 @@ struct MissingFeatures { //-- Missing AST queries static bool recordDeclCanPassInRegisters() { return false; } - static bool funcDeclIsCXXConstructorDecl() { return false; } - static bool funcDeclIsCXXDestructorDecl() { return false; } - static bool funcDeclIsCXXMethodDecl() { return false; } - static bool funcDeclIsInlineBuiltinDeclaration() { return false; } - static bool funcDeclIsReplaceableGlobalAllocationFunction() { return false; } - static bool qualTypeIsReferenceType() { return false; } //-- Missing types static bool vectorType() { return false; } - //-- Missing LLVM attributes - - static bool noReturn() { return false; } - static bool csmeCall() { return false; } - //-- Other missing features // Calls with a static chain pointer argument may be optimized (p.e. freeing diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 9363c7349519..ad35d7835255 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -82,11 +82,9 @@ struct CallConvLoweringPattern : public OpRewritePattern { } } - // TODO(cir): Instead of re-emmiting every load and store, bitcast arguments - // and return values to their ABI-specific counterparts when possible. - if (lowerModule.rewriteFunctionDefinition(op).failed()) - return failure(); - + // Rewrite function definition. + // FIXME(cir): This is a workaround to avoid an infinite loop in the driver. + rewriter.replaceOp(op, rewriter.clone(*op)); return success(); } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index 85890532e4f9..59d736c0574f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -1,18 +1,12 @@ #include "LowerCall.h" -#include "CIRToCIRArgMapping.h" #include "LowerFunctionInfo.h" -#include "LowerModule.h" #include "LowerTypes.h" #include "clang/CIR/FnInfoOpts.h" -#include "clang/CIR/MissingFeatures.h" -#include "llvm/Support/ErrorHandling.h" using namespace mlir; using namespace mlir::cir; -using ABIArgInfo = ::cir::ABIArgInfo; using FnInfoOpts = ::cir::FnInfoOpts; -using MissingFeatures = ::cir::MissingFeatures; namespace { @@ -50,150 +44,8 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, fnType.getInputs(), required); } -/// Adds the formal parameters in FPT to the given prefix. If any parameter in -/// FPT has pass_object_size attrs, then we'll add parameters for those, too. -static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { - // Fast path: don't touch param info if we don't need to. - if (/*!fnTy->hasExtParameterInfos()=*/true) { - prefix.append(fnTy.getInputs().begin(), fnTy.getInputs().end()); - return; - } - - assert(MissingFeatures::extParamInfo()); - llvm_unreachable("NYI"); -} - -/// Arrange the LLVM function layout for a value of the given function -/// type, on top of any implicit parameters already stored. -/// -/// \param CGT - Abstraction for lowering CIR types. -/// \param instanceMethod - Whether the function is an instance method. -/// \param prefix - List of implicit parameters to be prepended (e.g. 'this'). -/// \param FTP - ABI-agnostic function type. -static const LowerFunctionInfo & -arrangeCIRFunctionInfo(LowerTypes &CGT, bool instanceMethod, - SmallVectorImpl &prefix, FuncType fnTy) { - assert(!MissingFeatures::extParamInfo()); - RequiredArgs Required = RequiredArgs::forPrototypePlus(fnTy, prefix.size()); - // FIXME: Kill copy. - appendParameterTypes(prefix, fnTy); - assert(!MissingFeatures::qualifiedTypes()); - Type resultType = fnTy.getReturnType(); - - FnInfoOpts opts = - instanceMethod ? FnInfoOpts::IsInstanceMethod : FnInfoOpts::None; - return CGT.arrangeLLVMFunctionInfo(resultType, opts, prefix, Required); -} - } // namespace -/// Update function with ABI-specific attributes. -/// -/// NOTE(cir): Partially copies CodeGenModule::ConstructAttributeList, but -/// focuses on ABI/Target-related attributes. -void LowerModule::constructAttributeList(StringRef Name, - const LowerFunctionInfo &FI, - FuncOp CalleeInfo, FuncOp newFn, - unsigned &CallingConv, - bool AttrOnCallSite, bool IsThunk) { - // Collect function IR attributes from the CC lowering. - // We'll collect the paramete and result attributes later. - // FIXME(cir): Codegen differentiates between CallConv and EffectiveCallConv, - // but I don't think we need to do this here. - CallingConv = FI.getCallingConvention(); - // FIXME(cir): No-return should probably be set in CIRGen (ABI-agnostic). - if (MissingFeatures::noReturn()) - llvm_unreachable("NYI"); - if (MissingFeatures::csmeCall()) - llvm_unreachable("NYI"); - - // TODO(cir): Implement AddAttributesFromFunctionProtoType here. - // TODO(cir): Implement AddAttributesFromOMPAssumes here. - assert(!MissingFeatures::openMP()); - - // TODO(cir): Skipping a bunch of AST queries here. We will need to partially - // implement some of them as this section sets target-specific attributes - // too. - // if (TargetDecl) { - // [...] - // } - - // NOTE(cir): The original code adds default and no-builtin attributes here as - // well. AFAIK, these are ABI/Target-agnostic, so it would be better handled - // in CIRGen. Regardless, I'm leaving this comment here as a heads up. - - // Override some default IR attributes based on declaration-specific - // information. - // NOTE(cir): Skipping another set of AST queries here. - - // Collect attributes from arguments and return values. - CIRToCIRArgMapping IRFunctionArgs(getContext(), FI); - - const ABIArgInfo &RetAI = FI.getReturnInfo(); - - // TODO(cir): No-undef attribute for return values partially depends on - // ABI-specific information. Maybe we should include it here. - - switch (RetAI.getKind()) { - case ABIArgInfo::Ignore: - break; - default: - llvm_unreachable("Missing ABIArgInfo::Kind"); - } - - if (!IsThunk) { - if (MissingFeatures::qualTypeIsReferenceType()) { - llvm_unreachable("NYI"); - } - } - - // Attach attributes to sret. - if (MissingFeatures::sretArgs()) { - llvm_unreachable("sret is NYI"); - } - - // Attach attributes to inalloca arguments. - if (MissingFeatures::inallocaArgs()) { - llvm_unreachable("inalloca is NYI"); - } - - // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, - // unless this is a thunk function. - // FIXME: fix this properly, https://reviews.llvm.org/D100388 - if (MissingFeatures::funcDeclIsCXXMethodDecl() || - MissingFeatures::inallocaArgs()) { - llvm_unreachable("`this` argument attributes are NYI"); - } - - unsigned ArgNo = 0; - for (LowerFunctionInfo::const_arg_iterator I = FI.arg_begin(), - E = FI.arg_end(); - I != E; ++I, ++ArgNo) { - llvm_unreachable("NYI"); - } - assert(ArgNo == FI.arg_size()); -} - -/// Arrange the argument and result information for the declaration or -/// definition of the given function. -const LowerFunctionInfo &LowerTypes::arrangeFunctionDeclaration(FuncOp fnOp) { - if (MissingFeatures::funcDeclIsCXXMethodDecl()) - llvm_unreachable("NYI"); - - assert(!MissingFeatures::qualifiedTypes()); - FuncType FTy = fnOp.getFunctionType(); - - assert(!MissingFeatures::CUDA()); - - // When declaring a function without a prototype, always use a - // non-variadic type. - if (fnOp.getNoProto()) { - llvm_unreachable("NYI"); - } - - return arrangeFreeFunctionType(FTy); -} - /// Figure out the rules for calling a function with the given formal /// type using the given arguments. The arguments are necessary /// because the function might be unprototyped, in which case it's @@ -205,24 +57,6 @@ LowerTypes::arrangeFreeFunctionCall(const OperandRange args, chainCall); } -/// Arrange the argument and result information for the declaration or -/// definition of the given function. -const LowerFunctionInfo &LowerTypes::arrangeFreeFunctionType(FuncType FTy) { - SmallVector argTypes; - return ::arrangeCIRFunctionInfo(*this, /*instanceMethod=*/false, argTypes, - FTy); -} - -/// Arrange the argument and result information for the declaration or -/// definition of the given function. -const LowerFunctionInfo &LowerTypes::arrangeGlobalDeclaration(FuncOp fnOp) { - if (MissingFeatures::funcDeclIsCXXConstructorDecl() || - MissingFeatures::funcDeclIsCXXDestructorDecl()) - llvm_unreachable("NYI"); - - return arrangeFunctionDeclaration(fnOp); -} - /// Arrange the argument and result information for an abstract value /// of a given function type. This is the method which all of the /// above functions ultimately defer to. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 4d4e2d4f3ba6..58ddfb6e553e 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -23,8 +23,6 @@ #include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" -using ABIArgInfo = ::cir::ABIArgInfo; - namespace mlir { namespace cir { @@ -39,110 +37,6 @@ LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, : Target(LM.getTarget()), rewriter(rewriter), SrcFn(srcFn), callOp(callOp), LM(LM) {} -/// This method has partial parity with CodeGenFunction::EmitFunctionProlog from -/// the original codegen. However, it focuses on the ABI-specific details. On -/// top of that, it is also responsible for rewriting the original function. -LogicalResult -LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, - MutableArrayRef Args) { - // NOTE(cir): Skipping naked and implicit-return-zero functions here. These - // are dealt with in CIRGen. - - CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), FI); - assert(Fn.getNumArguments() == IRFunctionArgs.totalIRArgs()); - - // If we're using inalloca, all the memory arguments are GEPs off of the last - // parameter, which is a pointer to the complete memory area. - assert(!::cir::MissingFeatures::inallocaArgs()); - - // Name the struct return parameter. - assert(!::cir::MissingFeatures::sretArgs()); - - // Track if we received the parameter as a pointer (indirect, byval, or - // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it - // into a local alloca for us. - SmallVector ArgVals; - ArgVals.reserve(Args.size()); - - // Create a pointer value for every parameter declaration. This usually - // entails copying one or more LLVM IR arguments into an alloca. Don't push - // any cleanups or do anything that might unwind. We do that separately, so - // we can push the cleanups in the correct order for the ABI. - assert(FI.arg_size() == Args.size()); - unsigned ArgNo = 0; - LowerFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); - for (MutableArrayRef::const_iterator i = Args.begin(), - e = Args.end(); - i != e; ++i, ++info_it, ++ArgNo) { - llvm_unreachable("NYI"); - } - - if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { - llvm_unreachable("NYI"); - } else { - // FIXME(cir): In the original codegen, EmitParamDecl is called here. It is - // likely that said function considers ABI details during emission, so we - // migth have to add a counter part here. Currently, it is not needed. - } - - return success(); -} - -LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { - const ABIArgInfo &RetAI = FI.getReturnInfo(); - - switch (RetAI.getKind()) { - - case ABIArgInfo::Ignore: - break; - - default: - llvm_unreachable("Unhandled ABIArgInfo::Kind"); - } - - return success(); -} - -/// Generate code for a function based on the ABI-specific information. -/// -/// This method has partial parity with CodeGenFunction::GenerateCode, but it -/// focuses on the ABI-specific details. So a lot of codegen stuff is removed. -LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, - const LowerFunctionInfo &FnInfo) { - assert(newFn && "generating code for null Function"); - auto Args = oldFn.getArguments(); - - // Emit the ABI-specific function prologue. - assert(newFn.empty() && "Function already has a body"); - rewriter.setInsertionPointToEnd(newFn.addEntryBlock()); - if (buildFunctionProlog(FnInfo, newFn, oldFn.getArguments()).failed()) - return failure(); - - // Ensure that old ABI-agnostic arguments uses were replaced. - const auto hasNoUses = [](Value val) { return val.getUses().empty(); }; - assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && "Missing RAUW?"); - - // Migrate function body to new ABI-aware function. - assert(oldFn.getBody().hasOneBlock() && - "Multiple blocks in original function not supported"); - - // Move old function body to new function. - // FIXME(cir): The merge below is not very good: will not work if SrcFn has - // multiple blocks and it mixes the new and old prologues. - rewriter.mergeBlocks(&oldFn.getBody().front(), &newFn.getBody().front(), - newFn.getArguments()); - - // FIXME(cir): What about saving parameters for corotines? Should we do - // something about it in this pass? If the change with the calling - // convention, we might have to handle this here. - - // Emit the standard function epilogue. - if (buildFunctionEpilog(FnInfo).failed()) - return failure(); - - return success(); -} - /// Rewrite a call operation to abide to the ABI calling convention. /// /// FIXME(cir): This method has partial parity to CodeGenFunction's diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h index 40cdd39463e6..6498bd705288 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h @@ -53,21 +53,6 @@ class LowerFunction { LowerModule &LM; // Per-module state. - const clang::TargetInfo &getTarget() const { return Target; } - - // Build ABI/Target-specific function prologue. - LogicalResult buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, - MutableArrayRef Args); - - // Build ABI/Target-specific function epilogue. - LogicalResult buildFunctionEpilog(const LowerFunctionInfo &FI); - - // Parity with CodeGenFunction::GenerateCode. Keep in mind that several - // sections in the original function are focused on codegen unrelated to the - // ABI. Such sections are handled in CIR's codegen, not here. - LogicalResult generateCode(FuncOp oldFn, FuncOp newFn, - const LowerFunctionInfo &FnInfo); - /// Rewrite a call operation to abide to the ABI calling convention. LogicalResult rewriteCallOp(CallOp op, ReturnValueSlot retValSlot = ReturnValueSlot()); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index c81335c9985a..ea7174caf6b9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -37,19 +37,6 @@ class RequiredArgs { RequiredArgs(All_t _) : NumRequired(~0U) {} explicit RequiredArgs(unsigned n) : NumRequired(n) { assert(n != ~0U); } - /// Compute the arguments required by the given formal prototype, - /// given that there may be some additional, non-formal arguments - /// in play. - /// - /// If FD is not null, this will consider pass_object_size params in FD. - static RequiredArgs forPrototypePlus(const FuncType prototype, - unsigned additional) { - if (!prototype.isVarArg()) - return All; - - llvm_unreachable("Variadic function is NYI"); - } - bool allowsOptionalArgs() const { return NumRequired != ~0U; } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 0ec4b589bb41..28760fea585d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -21,8 +21,6 @@ #include "mlir/Support/LogicalResult.h" #include "llvm/Support/ErrorHandling.h" -using MissingFeatures = ::cir::MissingFeatures; - namespace mlir { namespace cir { @@ -77,108 +75,11 @@ const TargetLoweringInfo &LowerModule::getTargetLoweringInfo() { return *TheTargetCodeGenInfo; } -void LowerModule::setCIRFunctionAttributes(FuncOp GD, - const LowerFunctionInfo &Info, - FuncOp F, bool IsThunk) { - unsigned CallingConv; - // NOTE(cir): The method below will update the F function in-place with the - // proper attributes. - constructAttributeList(GD.getName(), Info, GD, F, CallingConv, - /*AttrOnCallSite=*/false, IsThunk); - // TODO(cir): Set Function's calling convention. -} - -/// Set function attributes for a function declaration. -/// -/// This method is based on CodeGenModule::SetFunctionAttributes but it -/// altered to consider only the ABI/Target-related bits. -void LowerModule::setFunctionAttributes(FuncOp oldFn, FuncOp newFn, - bool IsIncompleteFunction, - bool IsThunk) { - - // TODO(cir): There's some special handling from attributes related to LLVM - // intrinsics. Should we do that here as well? - - // Setup target-specific attributes. - if (!IsIncompleteFunction) - setCIRFunctionAttributes(oldFn, getTypes().arrangeGlobalDeclaration(oldFn), - newFn, IsThunk); - - // TODO(cir): Handle attributes for returned "this" objects. - - // NOTE(cir): Skipping some linkage and other global value attributes here as - // it might be better for CIRGen to handle them. - - // TODO(cir): Skipping section attributes here. - - // TODO(cir): Skipping error attributes here. - - // If we plan on emitting this inline builtin, we can't treat it as a builtin. - if (MissingFeatures::funcDeclIsInlineBuiltinDeclaration()) { - llvm_unreachable("NYI"); - } - - if (MissingFeatures::funcDeclIsReplaceableGlobalAllocationFunction()) { - llvm_unreachable("NYI"); - } - - if (MissingFeatures::funcDeclIsCXXConstructorDecl() || - MissingFeatures::funcDeclIsCXXDestructorDecl()) - llvm_unreachable("NYI"); - else if (MissingFeatures::funcDeclIsCXXMethodDecl()) - llvm_unreachable("NYI"); - - // NOTE(cir) Skipping emissions that depend on codegen options, as well as - // sanitizers handling here. Do this in CIRGen. - - if (MissingFeatures::langOpts() && MissingFeatures::openMP()) - llvm_unreachable("NYI"); - - // NOTE(cir): Skipping more things here that depend on codegen options. - - if (MissingFeatures::extParamInfo()) { - llvm_unreachable("NYI"); - } -} - -/// Rewrites an existing function to conform to the ABI. -/// -/// This method is based on CodeGenModule::EmitGlobalFunctionDefinition but it -/// considerably simplified as it tries to remove any CodeGen related code. -LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { +LogicalResult LowerModule::rewriteGlobalFunctionDefinition(FuncOp op, + LowerModule &state) { mlir::OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint(op); - - // Get ABI/target-specific function information. - const LowerFunctionInfo &FI = this->getTypes().arrangeGlobalDeclaration(op); - - // Get ABI/target-specific function type. - FuncType Ty = this->getTypes().getFunctionType(FI); - - // NOTE(cir): Skipping getAddrOfFunction and getOrCreateCIRFunction methods - // here, as they are mostly codegen logic. - - // Create a new function with the ABI-specific types. - FuncOp newFn = cast(rewriter.cloneWithoutRegions(op)); - newFn.setType(Ty); - - // NOTE(cir): The clone above will preserve any existing attributes. If there - // are high-level attributes that ought to be dropped, do it here. - - // Set up ABI-specific function attributes. - setFunctionAttributes(op, newFn, false, /*IsThunk=*/false); - if (MissingFeatures::extParamInfo()) { - llvm_unreachable("ExtraAttrs are NYI"); - } - - if (LowerFunction(*this, rewriter, op, newFn) - .generateCode(op, newFn, FI) - .failed()) - return failure(); - - // Erase original ABI-agnostic function. - rewriter.eraseOp(op); - return success(); + return failure(); } LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, FuncOp funcOp) { diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index 74f7ed0bb5ac..bb56cb5fef92 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -64,28 +64,8 @@ class LowerModule { return kind; } - void - constructAttributeList(StringRef Name, const LowerFunctionInfo &FI, - FuncOp CalleeInfo, // TODO(cir): Implement CalleeInfo? - FuncOp newFn, unsigned &CallingConv, - bool AttrOnCallSite, bool IsThunk); - - void setCIRFunctionAttributes(FuncOp GD, const LowerFunctionInfo &Info, - FuncOp F, bool IsThunk); - - /// Set function attributes for a function declaration. - void setFunctionAttributes(FuncOp oldFn, FuncOp newFn, - bool IsIncompleteFunction, bool IsThunk); - - // Create a CIR FuncOp with with the given signature. - FuncOp createCIRFunction( - StringRef MangledName, FuncType Ty, FuncOp D, bool ForVTable, - bool DontDefer = false, bool IsThunk = false, - ArrayRef = {}, // TODO(cir): __attribute__(()) stuff. - bool IsForDefinition = false); - // Rewrite CIR FuncOp to match the target ABI. - LogicalResult rewriteFunctionDefinition(FuncOp op); + LogicalResult rewriteGlobalFunctionDefinition(FuncOp op, LowerModule &state); // Rewrite CIR CallOp to match the target ABI. LogicalResult rewriteFunctionCall(CallOp callOp, FuncOp funcOp); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h index 44f0d16b1bd8..941b3d7aeab7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h @@ -21,7 +21,6 @@ #include "mlir/IR/MLIRContext.h" #include "clang/Basic/Specifiers.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" -#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/FnInfoOpts.h" namespace mlir { @@ -66,14 +65,9 @@ class LowerTypes { /// Free functions are functions that are compatible with an ordinary /// C function pointer type. - /// FIXME(cir): Does the "free function" concept makes sense here? - const LowerFunctionInfo &arrangeFunctionDeclaration(FuncOp fnOp); const LowerFunctionInfo &arrangeFreeFunctionCall(const OperandRange args, const FuncType fnType, bool chainCall); - const LowerFunctionInfo &arrangeFreeFunctionType(FuncType FTy); - - const LowerFunctionInfo &arrangeGlobalDeclaration(FuncOp fnOp); /// Arrange the argument and result information for an abstract value /// of a given function type. This is the method which all of the From 5c603cc341ec6de4a50dce111342a66880f56ee1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 17 Jun 2024 12:05:29 -0700 Subject: [PATCH 1640/2301] Revert "[CIR][ABI][NFC] Add CC lowering for void CallOps (#668)" This reverts commit 66bb15b007e9b0a2487b84cb536306b8bcd9f028. This broke the build: ``` /usr/bin/ld: lib/libMLIRCIRTransforms.a(LoweringPrepare.cpp.o): in function `(anonymous namespace)::LoweringPreparePass::lowerVAArgOp(mlir::cir::VAArgOp)': /local/home/dolsen/clangir/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp:340: undefined reference to `cir::CIRDataLayout::CIRDataLayout(mlir::ModuleOp)' /usr/bin/ld: lib/libTargetLowering.a(LowerTypes.cpp.o): in function `cir::CIRDataLayout::CIRDataLayout(llvm::StringRef, mlir::ModuleOp)': /local/home/dolsen/clangir/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h:31: undefined reference to `cir::CIRDataLayout::reset(llvm::StringRef)'`' ``` Steps to reproduce: ``` $ cmake -GNinja -DCMAKE_INSTALL_PREFIX=//clangir-install "-DLLVM_ENABLE_PROJECTS=clang;mlir" -DCLANG_ENABLE_CIR=ON -DLLVM_TARGETS_TO_BUILD=host -DCMAKE_BUILD_TYPE=Debug ../clangir/llvm $ ninja install ``` --- .../clang/CIR/Dialect/IR/CIRDataLayout.h | 13 - clang/include/clang/CIR/FnInfoOpts.h | 37 --- clang/include/clang/CIR/MissingFeatures.h | 37 +-- clang/include/clang/CIR/Target/x86.h | 32 --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 1 - clang/lib/CIR/CodeGen/CIRGenCall.h | 7 + clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 1 - clang/lib/CIR/CodeGen/CIRGenTypes.h | 1 - .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 4 - clang/lib/CIR/CodeGen/TargetInfo.cpp | 12 +- .../Dialect/Transforms/CallConvLowering.cpp | 18 +- .../Transforms/TargetLowering/ABIInfo.cpp | 4 - .../Transforms/TargetLowering/ABIInfo.h | 6 - .../Transforms/TargetLowering/ABIInfoImpl.cpp | 22 -- .../Transforms/TargetLowering/ABIInfoImpl.h | 11 +- .../Transforms/TargetLowering/CIRCXXABI.h | 5 - .../TargetLowering/CIRLowerContext.cpp | 2 +- .../TargetLowering/CIRToCIRArgMapping.h | 33 +-- .../TargetLowering/ItaniumCXXABI.cpp | 14 -- .../Transforms/TargetLowering/LowerCall.cpp | 107 --------- .../Transforms/TargetLowering/LowerCall.h | 2 - .../TargetLowering/LowerFunction.cpp | 223 ------------------ .../Transforms/TargetLowering/LowerFunction.h | 15 -- .../TargetLowering/LowerFunctionInfo.h | 33 +-- .../Transforms/TargetLowering/LowerModule.cpp | 16 +- .../Transforms/TargetLowering/LowerModule.h | 4 +- .../Transforms/TargetLowering/LowerTypes.cpp | 50 +--- .../Transforms/TargetLowering/LowerTypes.h | 35 +-- .../Transforms/TargetLowering/Targets/X86.cpp | 139 +---------- .../x86-call-conv-lowering-pass.cpp} | 7 +- 31 files changed, 42 insertions(+), 851 deletions(-) delete mode 100644 clang/include/clang/CIR/FnInfoOpts.h delete mode 100644 clang/include/clang/CIR/Target/x86.h rename clang/test/CIR/Transforms/Target/{x86_64/x86_64-call-conv-lowering-pass.cpp => x86/x86-call-conv-lowering-pass.cpp} (69%) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h index e820d11340ae..88e030fb424a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h @@ -15,7 +15,6 @@ #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/IR/BuiltinOps.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "llvm/ADT/StringRef.h" namespace cir { @@ -25,18 +24,6 @@ class CIRDataLayout { public: mlir::DataLayout layout; - /// Constructs a DataLayout from a specification string. See reset(). - explicit CIRDataLayout(llvm::StringRef dataLayout, mlir::ModuleOp module) - : layout(module) { - reset(dataLayout); - } - - /// Parse a data layout string (with fallback to default values). - void reset(llvm::StringRef dataLayout); - - // Free all internal data structures. - void clear(); - CIRDataLayout(mlir::ModuleOp modOp); bool isBigEndian() const { return bigEndian; } diff --git a/clang/include/clang/CIR/FnInfoOpts.h b/clang/include/clang/CIR/FnInfoOpts.h deleted file mode 100644 index cea4d89f4c14..000000000000 --- a/clang/include/clang/CIR/FnInfoOpts.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef CIR_FNINFOOPTS_H -#define CIR_FNINFOOPTS_H - -#include "llvm/ADT/STLForwardCompat.h" - -namespace cir { - -enum class FnInfoOpts { - None = 0, - IsInstanceMethod = 1 << 0, - IsChainCall = 1 << 1, - IsDelegateCall = 1 << 2, -}; - -inline FnInfoOpts operator|(FnInfoOpts A, FnInfoOpts B) { - return static_cast(llvm::to_underlying(A) | - llvm::to_underlying(B)); -} - -inline FnInfoOpts operator&(FnInfoOpts A, FnInfoOpts B) { - return static_cast(llvm::to_underlying(A) & - llvm::to_underlying(B)); -} - -inline FnInfoOpts operator|=(FnInfoOpts A, FnInfoOpts B) { - A = A | B; - return A; -} - -inline FnInfoOpts operator&=(FnInfoOpts A, FnInfoOpts B) { - A = A & B; - return A; -} - -} // namespace cir - -#endif // CIR_FNINFOOPTS_H diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index e2f554019422..12255d409a75 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -156,6 +156,7 @@ struct MissingFeatures { static bool zeroInitializer() { return false; } static bool targetCodeGenInfoIsProtoCallVariadic() { return false; } static bool targetCodeGenInfoGetNullPointer() { return false; } + static bool chainCalls() { return false; } static bool operandBundles() { return false; } static bool exceptions() { return false; } static bool metaDataNode() { return false; } @@ -189,50 +190,24 @@ struct MissingFeatures { //===--- ABI lowering --===// - //-- Missing AST queries - - static bool recordDeclCanPassInRegisters() { return false; } - - //-- Missing types - - static bool vectorType() { return false; } - - //-- Other missing features - - // Calls with a static chain pointer argument may be optimized (p.e. freeing - // up argument registers), but we do not yet track such cases. - static bool chainCall() { return false; } - - // ABI-lowering has special handling for regcall calling convention (tries to - // pass every argument in regs). We don't support it just yet. - static bool regCall() { return false; } - - // Some ABIs (e.g. x86) require special handling for returning large structs - // by value. The sret argument parameter aids in this, but it is current NYI. - static bool sretArgs() { return false; } - - // Inalloca parameter attributes are mostly used for Windows x86_32 ABI. We - // do not yet support this yet. - static bool inallocaArgs() { return false; } - // Parameters may have additional attributes (e.g. [[noescape]]) that affect // the compiler. This is not yet supported in CIR. - static bool extParamInfo() { return false; } + static bool extParamInfo() { return true; } // LangOpts may affect lowering, but we do not carry this information into CIR // just yet. Right now, it only instantiates the default lang options. - static bool langOpts() { return false; } + static bool langOpts() { return true; } // Several type qualifiers are not yet supported in CIR, but important when // evaluating ABI-specific lowering. - static bool qualifiedTypes() { return false; } + static bool qualifiedTypes() { return true; } // We're ignoring several details regarding ABI-halding for Swift. - static bool swift() { return false; } + static bool swift() { return true; } // Despite carrying some information about variadics, we are currently // ignoring this to focus only on the code necessary to lower non-variadics. - static bool variadicFunctions() { return false; } + static bool variadicFunctions() { return true; } }; } // namespace cir diff --git a/clang/include/clang/CIR/Target/x86.h b/clang/include/clang/CIR/Target/x86.h deleted file mode 100644 index 2aa2d0493aac..000000000000 --- a/clang/include/clang/CIR/Target/x86.h +++ /dev/null @@ -1,32 +0,0 @@ -//==-- x86.h - Definitions common to all x86 ABI variants ------------------==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// Definitions common to any X86 ABI implementation. -// -//===----------------------------------------------------------------------===// - -#ifndef CIR_X86_H -#define CIR_X86_H - -namespace cir { - -// Possible argument classifications according to the x86 ABI documentation. -enum X86ArgClass { - Integer = 0, - SSE, - SSEUp, - X87, - X87Up, - ComplexX87, - NoClass, - Memory -}; - -} // namespace cir - -#endif // CIR_X86_H diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index bf44ddb263b7..3128cf1e4d3a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -23,7 +23,6 @@ #include "clang/AST/GlobalDecl.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "clang/CIR/FnInfoOpts.h" #include "llvm/Support/ErrorHandling.h" #include diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index ea8e9e546352..866ba9af7a3b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -290,6 +290,13 @@ class ReturnValueSlot { Address getAddress() const { return Addr; } }; +enum class FnInfoOpts { + None = 0, + IsInstanceMethod = 1 << 0, + IsChainCall = 1 << 1, + IsDelegateCall = 1 << 2, +}; + } // namespace cir #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 8db9ce53f547..2604fdd9fdae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1439,7 +1439,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, // Chain calls use the same code path to add the inviisble chain parameter to // the function type. if (isa(FnType) || Chain) { - assert(!MissingFeatures::chainCall()); + assert(!MissingFeatures::chainCalls()); assert(!MissingFeatures::addressSpace()); auto CalleeTy = getTypes().GetFunctionType(FnInfo); // get non-variadic function type diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 3eb2b1b455e3..ca4d18461154 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -16,7 +16,6 @@ #include "clang/AST/GlobalDecl.h" #include "clang/AST/RecordLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "clang/CIR/FnInfoOpts.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index d1d547f24a9a..51350c9ea70e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -22,7 +22,6 @@ #include "clang/AST/Type.h" #include "clang/Basic/ABI.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "clang/CIR/FnInfoOpts.h" #include "llvm/ADT/SmallPtrSet.h" diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index da1851a2b00f..8af007621ba4 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -748,7 +748,3 @@ CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { } } } - -void CIRDataLayout::reset(StringRef Desc) { clear(); } - -void CIRDataLayout::clear() {} diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 2973a6ce70d3..121bc2f023c1 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -6,7 +6,6 @@ #include "CallingConv.h" #include "clang/Basic/TargetInfo.h" -#include "clang/CIR/Target/x86.h" using namespace cir; using namespace clang; @@ -80,7 +79,16 @@ namespace { enum class X86AVXABILevel { None, AVX, AVX512 }; class X86_64ABIInfo : public ABIInfo { - using Class = X86ArgClass; + enum Class { + Integer = 0, + SSE, + SSEUp, + X87, + X87Up, + ComplexX87, + NoClass, + Memory + }; // X86AVXABILevel AVXLevel; // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 64-bit diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index ad35d7835255..6130367d91a4 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -45,7 +45,7 @@ LowerModule createLowerModule(FuncOp op, PatternRewriter &rewriter) { // FIXME(cir): This just uses the default language options. We need to account // for custom options. // Create context. - assert(!::cir::MissingFeatures::langOpts()); + assert(::cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; auto context = CIRLowerContext(module.getContext(), langOpts); context.initBuiltinTypes(*targetInfo); @@ -64,27 +64,11 @@ struct CallConvLoweringPattern : public OpRewritePattern { LogicalResult matchAndRewrite(FuncOp op, PatternRewriter &rewriter) const final { - const auto module = op->getParentOfType(); - if (!op.getAst()) return op.emitError("function has no AST information"); LowerModule lowerModule = createLowerModule(op, rewriter); - // Rewrite function calls before definitions. This should be done before - // lowering the definition. - auto calls = op.getSymbolUses(module); - if (calls.has_value()) { - for (auto call : calls.value()) { - auto callOp = cast(call.getUser()); - if (lowerModule.rewriteFunctionCall(callOp, op).failed()) - return failure(); - } - } - - // Rewrite function definition. - // FIXME(cir): This is a workaround to avoid an infinite loop in the driver. - rewriter.replaceOp(op, rewriter.clone(*op)); return success(); } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index 6160174191dc..46a865da0670 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -12,8 +12,6 @@ //===----------------------------------------------------------------------===// #include "ABIInfo.h" -#include "CIRCXXABI.h" -#include "LowerTypes.h" namespace mlir { namespace cir { @@ -21,7 +19,5 @@ namespace cir { // Pin the vtable to this file. ABIInfo::~ABIInfo() = default; -CIRCXXABI &ABIInfo::getCXXABI() const { return LT.getCXXABI(); } - } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h index ef5bae6d13fa..3fad01f3d7a8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h @@ -14,8 +14,6 @@ #ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFO_H #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFO_H -#include "CIRCXXABI.h" -#include "LowerFunctionInfo.h" #include "llvm/IR/CallingConv.h" namespace mlir { @@ -34,10 +32,6 @@ class ABIInfo { public: ABIInfo(LowerTypes <) : LT(LT), RuntimeCC(llvm::CallingConv::C) {} virtual ~ABIInfo(); - - CIRCXXABI &getCXXABI() const; - - virtual void computeInfo(LowerFunctionInfo &FI) const = 0; }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index ef90698054e8..c51176a99b95 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -10,25 +10,3 @@ // adapted to operate on the CIR dialect, however. // //===----------------------------------------------------------------------===// - -#include "ABIInfo.h" -#include "CIRCXXABI.h" -#include "LowerFunctionInfo.h" -#include "llvm/Support/ErrorHandling.h" - -namespace mlir { -namespace cir { - -bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, - const ABIInfo &Info) { - Type Ty = FI.getReturnType(); - - if (const auto RT = Ty.dyn_cast()) { - llvm_unreachable("NYI"); - } - - return CXXABI.classifyReturnType(FI); -} - -} // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h index d3ee18f0467b..f34d7fb07226 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h @@ -14,17 +14,8 @@ #ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H -#include "ABIInfo.h" -#include "CIRCXXABI.h" -#include "LowerFunctionInfo.h" - namespace mlir { -namespace cir { - -bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, - const ABIInfo &Info); - -} // namespace cir +namespace cir {} // namespace cir } // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h index 5496dbbf2327..bf5131a074b8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -14,7 +14,6 @@ #ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRCXXABI_H #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRCXXABI_H -#include "LowerFunctionInfo.h" #include "mlir/IR/Value.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" @@ -35,10 +34,6 @@ class CIRCXXABI { public: virtual ~CIRCXXABI(); - - /// If the C++ ABI requires the given type be returned in a particular way, - /// this method sets RetAI and returns true. - virtual bool classifyReturnType(LowerFunctionInfo &FI) const = 0; }; /// Creates an Itanium-family ABI. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index 7152ab081ec5..b75893bfb33f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -31,7 +31,7 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { Type Ty; // NOTE(cir): Clang does more stuff here. Not sure if we need to do the same. - assert(!::cir::MissingFeatures::qualifiedTypes()); + assert(::cir::MissingFeatures::qualifiedTypes()); switch (K) { case clang::BuiltinType::Char_S: Ty = IntType::get(getMLIRContext(), 8, true); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index 6481874bf3ab..9c1dae1f3dbf 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -17,10 +17,8 @@ #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRTOCIRARGMAPPING_H #include "CIRLowerContext.h" -#include "LowerFunctionInfo.h" -#include "clang/CIR/ABIArgInfo.h" +#include "LoweringFunctionInfo.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/Support/ErrorHandling.h" namespace mlir { namespace cir { @@ -51,36 +49,9 @@ class CIRToCIRArgMapping { public: CIRToCIRArgMapping(const CIRLowerContext &context, const LowerFunctionInfo &FI, bool onlyRequiredArgs = false) - : ArgInfo(onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { - construct(context, FI, onlyRequiredArgs); - }; + : ArgInfo(onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {}; unsigned totalIRArgs() const { return TotalIRArgs; } - - void construct(const CIRLowerContext &context, const LowerFunctionInfo &FI, - bool onlyRequiredArgs = false) { - unsigned IRArgNo = 0; - const ::cir::ABIArgInfo &RetAI = FI.getReturnInfo(); - - if (RetAI.getKind() == ::cir::ABIArgInfo::Indirect) { - llvm_unreachable("NYI"); - } - - unsigned ArgNo = 0; - unsigned NumArgs = - onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); - for (LowerFunctionInfo::const_arg_iterator _ = FI.arg_begin(); - ArgNo < NumArgs; ++_, ++ArgNo) { - llvm_unreachable("NYI"); - } - assert(ArgNo == ArgInfo.size()); - - if (::cir::MissingFeatures::inallocaArgs()) { - llvm_unreachable("NYI"); - } - - TotalIRArgs = IRArgNo; - } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index 6e0fecfa44d5..aee2620496e6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -32,24 +32,10 @@ class ItaniumCXXABI : public CIRCXXABI { public: ItaniumCXXABI(LowerModule &LM) : CIRCXXABI(LM) {} - - bool classifyReturnType(LowerFunctionInfo &FI) const override; }; } // namespace -bool ItaniumCXXABI::classifyReturnType(LowerFunctionInfo &FI) const { - const StructType RD = FI.getReturnType().dyn_cast(); - if (!RD) - return false; - - // If C++ prohibits us from making a copy, return by address. - if (::cir::MissingFeatures::recordDeclCanPassInRegisters()) - llvm_unreachable("NYI"); - - return false; -} - CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { switch (LM.getCXXABIKind()) { case clang::TargetCXXABI::GenericItanium: diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index 59d736c0574f..e69de29bb2d1 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -1,107 +0,0 @@ -#include "LowerCall.h" -#include "LowerFunctionInfo.h" -#include "LowerTypes.h" -#include "clang/CIR/FnInfoOpts.h" - -using namespace mlir; -using namespace mlir::cir; - -using FnInfoOpts = ::cir::FnInfoOpts; - -namespace { - -/// Arrange a call as unto a free function, except possibly with an -/// additional number of formal parameters considered required. -const LowerFunctionInfo & -arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, - const OperandRange &args, const FuncType fnType, - unsigned numExtraRequiredArgs, bool chainCall) { - assert(args.size() >= numExtraRequiredArgs); - - assert(!::cir::MissingFeatures::extParamInfo()); - - // In most cases, there are no optional arguments. - RequiredArgs required = RequiredArgs::All; - - // If we have a variadic prototype, the required arguments are the - // extra prefix plus the arguments in the prototype. - // FIXME(cir): Properly check if function is no-proto. - if (/*IsPrototypedFunction=*/true) { - if (fnType.isVarArg()) - llvm_unreachable("NYI"); - - if (::cir::MissingFeatures::extParamInfo()) - llvm_unreachable("NYI"); - } - - // TODO(cir): There's some CC stuff related to no-proto functions here, but - // I'm skipping it since it requires CodeGen info. Maybe we can embbed this - // information in the FuncOp during CIRGen. - - assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); - FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; - return LT.arrangeLLVMFunctionInfo(fnType.getReturnType(), opts, - fnType.getInputs(), required); -} - -} // namespace - -/// Figure out the rules for calling a function with the given formal -/// type using the given arguments. The arguments are necessary -/// because the function might be unprototyped, in which case it's -/// target-dependent in crazy ways. -const LowerFunctionInfo & -LowerTypes::arrangeFreeFunctionCall(const OperandRange args, - const FuncType fnType, bool chainCall) { - return arrangeFreeFunctionLikeCall(*this, LM, args, fnType, chainCall ? 1 : 0, - chainCall); -} - -/// Arrange the argument and result information for an abstract value -/// of a given function type. This is the method which all of the -/// above functions ultimately defer to. -/// -/// \param resultType - ABI-agnostic CIR result type. -/// \param opts - Options to control the arrangement. -/// \param argTypes - ABI-agnostic CIR argument types. -/// \param required - Information about required/optional arguments. -const LowerFunctionInfo & -LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, - ArrayRef argTypes, - RequiredArgs required) { - assert(!::cir::MissingFeatures::qualifiedTypes()); - - LowerFunctionInfo *FI = nullptr; - - // FIXME(cir): Allow user-defined CCs (e.g. __attribute__((vectorcall))). - assert(!::cir::MissingFeatures::extParamInfo()); - unsigned CC = clangCallConvToLLVMCallConv(clang::CallingConv::CC_C); - - // Construct the function info. We co-allocate the ArgInfos. - // NOTE(cir): This initial function info might hold incorrect data. - FI = LowerFunctionInfo::create( - CC, /*isInstanceMethod=*/false, /*isChainCall=*/false, - /*isDelegateCall=*/false, resultType, argTypes, required); - - // Compute ABI information. - if (CC == llvm::CallingConv::SPIR_KERNEL) { - llvm_unreachable("NYI"); - } else if (::cir::MissingFeatures::extParamInfo()) { - llvm_unreachable("NYI"); - } else { - // NOTE(cir): This corects the initial function info data. - getABIInfo().computeInfo(*FI); // FIXME(cir): Args should be set to null. - } - - // Loop over all of the computed argument and return value info. If any of - // them are direct or extend without a specified coerce type, specify the - // default now. - ::cir::ABIArgInfo &retInfo = FI->getReturnInfo(); - if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) - llvm_unreachable("NYI"); - - for (auto &_ : FI->arguments()) - llvm_unreachable("NYI"); - - return *FI; -} diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h index b579f96fb436..ac54490c578f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h @@ -22,8 +22,6 @@ namespace cir { /// Contains the address where the return value of a function can be stored, and /// whether the address is volatile or not. class ReturnValueSlot { - // FIXME(cir): We should be able to query this directly from CIR at some - // point. This class can then be removed. Value Addr = {}; // Return value slot flags diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 58ddfb6e553e..6215b6149786 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -12,16 +12,11 @@ //===----------------------------------------------------------------------===// #include "LowerFunction.h" -#include "CIRToCIRArgMapping.h" #include "LowerCall.h" -#include "LowerFunctionInfo.h" #include "LowerModule.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" -#include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "clang/CIR/MissingFeatures.h" -#include "llvm/Support/ErrorHandling.h" namespace mlir { namespace cir { @@ -37,223 +32,5 @@ LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, : Target(LM.getTarget()), rewriter(rewriter), SrcFn(srcFn), callOp(callOp), LM(LM) {} -/// Rewrite a call operation to abide to the ABI calling convention. -/// -/// FIXME(cir): This method has partial parity to CodeGenFunction's -/// EmitCallEpxr method defined in CGExpr.cpp. This could likely be -/// removed in favor of a more direct approach. -LogicalResult LowerFunction::rewriteCallOp(CallOp op, - ReturnValueSlot retValSlot) { - - // TODO(cir): Check if BlockCall, CXXMemberCall, CUDAKernelCall, or - // CXXOperatorMember require special handling here. These should be handled in - // CIRGen, unless there is call conv or ABI-specific stuff to be handled, them - // we should do it here. - - // TODO(cir): Also check if Builtin and CXXPeseudoDtor need special handling - // here. These should be handled in CIRGen, unless there is call conv or - // ABI-specific stuff to be handled, them we should do it here. - - // NOTE(cir): There is no direct way to fetch the function type from the - // CallOp, so we fetch it from the source function. This assumes the function - // definition has not yet been lowered. - assert(SrcFn && "No source function"); - auto fnType = SrcFn.getFunctionType(); - - // Rewrite the call operation to abide to the ABI calling convention. - auto Ret = rewriteCallOp(fnType, SrcFn, op, retValSlot); - - // Replace the original call result with the new one. - if (Ret) - rewriter.replaceAllUsesWith(op.getResult(), Ret); - - return success(); -} - -/// Rewrite a call operation to abide to the ABI calling convention. -/// -/// FIXME(cir): This method has partial parity to CodeGenFunction's EmitCall -/// method defined in CGExpr.cpp. This could likely be removed in favor of a -/// more direct approach since most of the code here is exclusively CodeGen. -Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, - CallOp callOp, ReturnValueSlot retValSlot, - Value Chain) { - // NOTE(cir): Skip a bunch of function pointer stuff and AST declaration - // asserts. Also skip sanitizers, as these should likely be handled at - // CIRGen. - CallArgList Args; - if (Chain) - llvm_unreachable("NYI"); - - // NOTE(cir): Call args were already emitted in CIRGen. Skip the evaluation - // order done in CIRGen and just fetch the exiting arguments here. - Args = callOp.getArgOperands(); - - const LowerFunctionInfo &FnInfo = LM.getTypes().arrangeFreeFunctionCall( - callOp.getArgOperands(), calleeTy, /*chainCall=*/false); - - // C99 6.5.2.2p6: - // If the expression that denotes the called function has a type - // that does not include a prototype, [the default argument - // promotions are performed]. If the number of arguments does not - // equal the number of parameters, the behavior is undefined. If - // the function is defined with a type that includes a prototype, - // and either the prototype ends with an ellipsis (, ...) or the - // types of the arguments after promotion are not compatible with - // the types of the parameters, the behavior is undefined. If the - // function is defined with a type that does not include a - // prototype, and the types of the arguments after promotion are - // not compatible with those of the parameters after promotion, - // the behavior is undefined [except in some trivial cases]. - // That is, in the general case, we should assume that a call - // through an unprototyped function type works like a *non-variadic* - // call. The way we make this work is to cast to the exact type - // of the promoted arguments. - // - // Chain calls use this same code path to add the invisible chain parameter - // to the function type. - if (origCallee.getNoProto() || Chain) { - llvm_unreachable("NYI"); - } - - assert(!::cir::MissingFeatures::CUDA()); - - // TODO(cir): LLVM IR has the concept of "CallBase", which is a base class for - // all types of calls. Perhaps we should have a CIR interface to mimic this - // class. - CallOp CallOrInvoke = {}; - Value CallResult = {}; - rewriteCallOp(FnInfo, origCallee, callOp, retValSlot, Args, CallOrInvoke, - /*isMustTail=*/false, callOp.getLoc()); - - // NOTE(cir): Skipping debug stuff here. - - return CallResult; -} - -// NOTE(cir): This method has partial parity to CodeGenFunction's EmitCall -// method in CGCall.cpp. When incrementing it, use the original codegen as a -// reference: add ABI-specific stuff and skip codegen stuff. -Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, - FuncOp Callee, CallOp Caller, - ReturnValueSlot ReturnValue, - CallArgList &CallArgs, CallOp CallOrInvoke, - bool isMustTail, Location loc) { - // FIXME: We no longer need the types from CallArgs; lift up and simplify. - - // Handle struct-return functions by passing a pointer to the - // location that we would like to return into. - Type RetTy = CallInfo.getReturnType(); // ABI-agnostic type. - const ::cir::ABIArgInfo &RetAI = CallInfo.getReturnInfo(); - - FuncType IRFuncTy = LM.getTypes().getFunctionType(CallInfo); - - // NOTE(cir): Some target/ABI related checks happen here. I'm skipping them - // under the assumption that they are handled in CIRGen. - - // 1. Set up the arguments. - - // If we're using inalloca, insert the allocation after the stack save. - // FIXME: Do this earlier rather than hacking it in here! - if (StructType ArgStruct = CallInfo.getArgStruct()) { - llvm_unreachable("NYI"); - } - - CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), CallInfo); - SmallVector IRCallArgs(IRFunctionArgs.totalIRArgs()); - - // If the call returns a temporary with struct return, create a temporary - // alloca to hold the result, unless one is given to us. - if (RetAI.isIndirect() || RetAI.isCoerceAndExpand() || RetAI.isInAlloca()) { - llvm_unreachable("NYI"); - } - - assert(!::cir::MissingFeatures::swift()); - - // NOTE(cir): Skipping lifetime markers here. - - // Translate all of the arguments as necessary to match the IR lowering. - assert(CallInfo.arg_size() == CallArgs.size() && - "Mismatch between function signature & arguments."); - unsigned ArgNo = 0; - LowerFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); - for (auto I = CallArgs.begin(), E = CallArgs.end(); I != E; - ++I, ++info_it, ++ArgNo) { - llvm_unreachable("NYI"); - } - - // 2. Prepare the function pointer. - // NOTE(cir): This is not needed for CIR. - - // 3. Perform the actual call. - - // NOTE(cir): CIRGen handle when to "deactive" cleanups. We also skip some - // debugging stuff here. - - // Update the largest vector width if any arguments have vector types. - assert(!::cir::MissingFeatures::vectorType()); - - // Compute the calling convention and attributes. - - // FIXME(cir): Skipping call attributes for now. Not sure if we have to do - // this at all since we already do it for the function definition. - - // FIXME(cir): Implement the required procedures for strictfp function and - // fast-math. - - // FIXME(cir): Add missing call-site attributes here if they are - // ABI/target-specific, otherwise, do it in CIRGen. - - // NOTE(cir): Deciding whether to use Call or Invoke is done in CIRGen. - - // Rewrite the actual call operation. - // TODO(cir): Handle other types of CIR calls (e.g. cir.try_call). - // NOTE(cir): We don't know if the callee was already lowered, so we only - // fetch the name from the callee, while the return type is fetch from the - // lowering types manager. - CallOp callOp = rewriter.create(loc, Caller.getCalleeAttr(), - IRFuncTy.getReturnType(), IRCallArgs); - callOp.setExtraAttrsAttr(Caller.getExtraAttrs()); - - assert(!::cir::MissingFeatures::vectorType()); - - // NOTE(cir): Skipping some ObjC, tail-call, debug, and attribute stuff here. - - // 4. Finish the call. - - // NOTE(cir): Skipping no-return, isMustTail, swift error handling, and - // writebacks here. These should be handled in CIRGen, I think. - - // Convert return value from ABI-agnostic to ABI-aware. - Value Ret = [&] { - // NOTE(cir): CIRGen already handled the emission of the return value. We - // need only to handle the ABI-specific to ABI-agnostic cast here. - switch (RetAI.getKind()) { - case ::cir::ABIArgInfo::Ignore: - // If we are ignoring an argument that had a result, make sure to - // construct the appropriate return value for our caller. - return getUndefRValue(RetTy); - default: - llvm::errs() << "Unhandled ABIArgInfo kind: " << RetAI.getKind() << "\n"; - llvm_unreachable("NYI"); - } - }(); - - // NOTE(cir): Skipping Emissions, lifetime markers, and dtors here that should - // be handled in CIRGen. - - return Ret; -} - -// NOTE(cir): This method has partial parity to CodeGenFunction's GetUndefRValue -// defined in CGExpr.cpp. -Value LowerFunction::getUndefRValue(Type Ty) { - if (Ty.isa()) - return nullptr; - - llvm::outs() << "Missing undef handler for value type: " << Ty << "\n"; - llvm_unreachable("NYI"); -} - } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h index 6498bd705288..319751790915 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h @@ -25,8 +25,6 @@ namespace mlir { namespace cir { -using CallArgList = SmallVector; - class LowerFunction { LowerFunction(const LowerFunction &) = delete; void operator=(const LowerFunction &) = delete; @@ -52,19 +50,6 @@ class LowerFunction { ~LowerFunction() = default; LowerModule &LM; // Per-module state. - - /// Rewrite a call operation to abide to the ABI calling convention. - LogicalResult rewriteCallOp(CallOp op, - ReturnValueSlot retValSlot = ReturnValueSlot()); - Value rewriteCallOp(FuncType calleeTy, FuncOp origCallee, CallOp callOp, - ReturnValueSlot retValSlot, Value Chain = nullptr); - Value rewriteCallOp(const LowerFunctionInfo &CallInfo, FuncOp Callee, - CallOp Caller, ReturnValueSlot ReturnValue, - CallArgList &CallArgs, CallOp CallOrInvoke, - bool isMustTail, Location loc); - - /// Get an appropriate 'undef' value for the given type. - Value getUndefRValue(Type Ty); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index ea7174caf6b9..4344745f2478 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -18,7 +18,6 @@ #include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/MissingFeatures.h" -#include "llvm/ADT/ArrayRef.h" #include "llvm/Support/TrailingObjects.h" namespace mlir { @@ -47,8 +46,6 @@ struct LowerFunctionInfoArgInfo { ::cir::ABIArgInfo info; // ABI-specific information. }; -// FIXME(cir): We could likely encode this information within CIR/MLIR, allowing -// us to eliminate this class. class LowerFunctionInfo final : private llvm::TrailingObjects { @@ -92,7 +89,7 @@ class LowerFunctionInfo final ArrayRef argTypes, RequiredArgs required) { // TODO(cir): Add assertions? - assert(!::cir::MissingFeatures::extParamInfo()); + assert(::cir::MissingFeatures::extParamInfo()); void *buffer = operator new(totalSizeToAlloc(argTypes.size() + 1)); LowerFunctionInfo *FI = new (buffer) LowerFunctionInfo(); @@ -118,22 +115,10 @@ class LowerFunctionInfo final return NumArgs + 1; } - typedef const ArgInfo *const_arg_iterator; - typedef ArgInfo *arg_iterator; - - MutableArrayRef arguments() { - return MutableArrayRef(arg_begin(), NumArgs); - } - - const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; } - const_arg_iterator arg_end() const { return getArgsBuffer() + 1 + NumArgs; } - arg_iterator arg_begin() { return getArgsBuffer() + 1; } - arg_iterator arg_end() { return getArgsBuffer() + 1 + NumArgs; } - unsigned arg_size() const { return NumArgs; } bool isVariadic() const { - assert(!::cir::MissingFeatures::variadicFunctions()); + assert(::cir::MissingFeatures::variadicFunctions()); return false; } unsigned getNumRequiredArgs() const { @@ -141,20 +126,6 @@ class LowerFunctionInfo final llvm_unreachable("NYI"); return arg_size(); } - - Type getReturnType() const { return getArgsBuffer()[0].type; } - - ::cir::ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; } - const ::cir::ABIArgInfo &getReturnInfo() const { - return getArgsBuffer()[0].info; - } - - /// Return the user specified callingconvention, which has been translated - /// into an LLVM CC. - unsigned getCallingConvention() const { return CallingConvention; } - - /// Get the struct type used to represent all the arguments in memory. - StructType getArgStruct() const { return ArgStruct; } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 28760fea585d..44ef32a5ddfa 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -13,7 +13,6 @@ #include "LowerModule.h" #include "CIRLowerContext.h" -#include "LowerFunction.h" #include "TargetInfo.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributes.h" @@ -82,19 +81,10 @@ LogicalResult LowerModule::rewriteGlobalFunctionDefinition(FuncOp op, return failure(); } -LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, FuncOp funcOp) { +LogicalResult LowerModule::rewriteFunctionCall(CallOp caller, FuncOp callee) { mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPoint(callOp); - - // Create a new function with the ABI-specific calling convention. - if (LowerFunction(*this, rewriter, funcOp, callOp) - .rewriteCallOp(callOp) - .failed()) - return failure(); - - // Erase original ABI-agnostic call. - rewriter.eraseOp(callOp); - return success(); + rewriter.setInsertionPoint(caller); + return failure(); } } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index bb56cb5fef92..d99d40f90554 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -60,7 +60,7 @@ class LowerModule { // FIXME(cir): This would be in ASTContext, not CodeGenModule. clang::TargetCXXABI::Kind getCXXABIKind() const { auto kind = getTarget().getCXXABI().getKind(); - assert(!::cir::MissingFeatures::langOpts()); + assert(::cir::MissingFeatures::langOpts()); return kind; } @@ -68,7 +68,7 @@ class LowerModule { LogicalResult rewriteGlobalFunctionDefinition(FuncOp op, LowerModule &state); // Rewrite CIR CallOp to match the target ABI. - LogicalResult rewriteFunctionCall(CallOp callOp, FuncOp funcOp); + LogicalResult rewriteFunctionCall(CallOp caller, FuncOp callee); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index 3d8ca6cfe61f..1186da9df1e7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -12,59 +12,13 @@ //===----------------------------------------------------------------------===// #include "LowerTypes.h" -#include "CIRToCIRArgMapping.h" #include "LowerModule.h" #include "mlir/Support/LLVM.h" -#include "clang/CIR/ABIArgInfo.h" -#include "clang/CIR/MissingFeatures.h" -#include "llvm/Support/ErrorHandling.h" using namespace ::mlir::cir; -unsigned LowerTypes::clangCallConvToLLVMCallConv(clang::CallingConv CC) { - switch (CC) { - case clang::CC_C: - return llvm::CallingConv::C; - default: - llvm_unreachable("calling convention NYI"); - } -} - LowerTypes::LowerTypes(LowerModule &LM, StringRef DLString) - : LM(LM), context(LM.getContext()), Target(LM.getTarget()), + : LM(LM), queries(LM.getContext()), Target(LM.getTarget()), CXXABI(LM.getCXXABI()), TheABIInfo(LM.getTargetLoweringInfo().getABIInfo()), - mlirContext(LM.getMLIRContext()), DL(DLString, LM.getModule()) {} - -/// Return the ABI-specific function type for a CIR function type. -FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { - - mlir::Type resultType = {}; - const ::cir::ABIArgInfo &retAI = FI.getReturnInfo(); - switch (retAI.getKind()) { - case ::cir::ABIArgInfo::Ignore: - resultType = VoidType::get(getMLIRContext()); - break; - default: - llvm_unreachable("Missing ABIArgInfo::Kind"); - } - - CIRToCIRArgMapping IRFunctionArgs(getContext(), FI, true); - SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); - - // Add type for sret argument. - assert(!::cir::MissingFeatures::sretArgs()); - - // Add type for inalloca argument. - assert(!::cir::MissingFeatures::inallocaArgs()); - - // Add in all of the required arguments. - unsigned ArgNo = 0; - LowerFunctionInfo::const_arg_iterator it = FI.arg_begin(), - ie = it + FI.getNumRequiredArgs(); - for (; it != ie; ++it, ++ArgNo) { - llvm_unreachable("NYI"); - } - - return FuncType::get(getMLIRContext(), ArgTypes, resultType, FI.isVariadic()); -} + mlirContext(LM.getMLIRContext()), DL(LM.getModule()) {} diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h index 941b3d7aeab7..395665d47f16 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h @@ -17,11 +17,8 @@ #include "ABIInfo.h" #include "CIRCXXABI.h" #include "CIRLowerContext.h" -#include "LowerCall.h" #include "mlir/IR/MLIRContext.h" -#include "clang/Basic/Specifiers.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" -#include "clang/CIR/FnInfoOpts.h" namespace mlir { namespace cir { @@ -36,7 +33,7 @@ class LowerTypes { private: LowerModule &LM; - CIRLowerContext &context; + CIRLowerContext &queries; const clang::TargetInfo &Target; CIRCXXABI &CXXABI; @@ -49,41 +46,11 @@ class LowerTypes { ::cir::CIRDataLayout DL; - const ABIInfo &getABIInfo() const { return TheABIInfo; } - public: LowerTypes(LowerModule &LM, StringRef DLString); ~LowerTypes() = default; LowerModule &getLM() const { return LM; } - CIRCXXABI &getCXXABI() const { return CXXABI; } - CIRLowerContext &getContext() { return context; } - MLIRContext *getMLIRContext() { return mlirContext; } - - /// Convert clang calling convention to LLVM callilng convention. - unsigned clangCallConvToLLVMCallConv(clang::CallingConv CC); - - /// Free functions are functions that are compatible with an ordinary - /// C function pointer type. - const LowerFunctionInfo &arrangeFreeFunctionCall(const OperandRange args, - const FuncType fnType, - bool chainCall); - - /// Arrange the argument and result information for an abstract value - /// of a given function type. This is the method which all of the - /// above functions ultimately defer to. - /// - /// \param resultType - ABI-agnostic CIR result type. - /// \param opts - Options to control the arrangement. - /// \param argTypes - ABI-agnostic CIR argument types. - /// \param required - Information about required/optional arguments. - const LowerFunctionInfo &arrangeLLVMFunctionInfo(Type resultType, - ::cir::FnInfoOpts opts, - ArrayRef argTypes, - RequiredArgs required); - - /// Return the ABI-specific function type for a CIR function type. - FuncType getFunctionType(const LowerFunctionInfo &FI); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 736f3a7ea301..6d2a329e6d2a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -1,164 +1,27 @@ - -#include "clang/CIR/Target/x86.h" #include "ABIInfo.h" -#include "ABIInfoImpl.h" #include "LowerModule.h" #include "LowerTypes.h" #include "TargetInfo.h" -#include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/MissingFeatures.h" -#include "llvm/Support/ErrorHandling.h" #include namespace mlir { namespace cir { class X86_64ABIInfo : public ABIInfo { - using Class = ::cir::X86ArgClass; - - /// Determine the x86_64 register classes in which the given type T should be - /// passed. - /// - /// \param Lo - The classification for the parts of the type - /// residing in the low word of the containing object. - /// - /// \param Hi - The classification for the parts of the type - /// residing in the high word of the containing object. - /// - /// \param OffsetBase - The bit offset of this type in the - /// containing object. Some parameters are classified different - /// depending on whether they straddle an eightbyte boundary. - /// - /// \param isNamedArg - Whether the argument in question is a "named" - /// argument, as used in AMD64-ABI 3.5.7. - /// - /// \param IsRegCall - Whether the calling conversion is regcall. - /// - /// If a word is unused its result will be NoClass; if a type should - /// be passed in Memory then at least the classification of \arg Lo - /// will be Memory. - /// - /// The \arg Lo class will be NoClass iff the argument is ignored. - /// - /// If the \arg Lo class is ComplexX87, then the \arg Hi class will - /// also be ComplexX87. - void classify(Type T, uint64_t OffsetBase, Class &Lo, Class &Hi, - bool isNamedArg, bool IsRegCall = false) const; public: X86_64ABIInfo(LowerTypes &CGT, X86AVXABILevel AVXLevel) : ABIInfo(CGT) {} - - ::cir::ABIArgInfo classifyReturnType(Type RetTy) const; - - void computeInfo(LowerFunctionInfo &FI) const override; }; class X86_64TargetLoweringInfo : public TargetLoweringInfo { public: X86_64TargetLoweringInfo(LowerTypes &LM, X86AVXABILevel AVXLevel) : TargetLoweringInfo(std::make_unique(LM, AVXLevel)) { - assert(!::cir::MissingFeatures::swift()); + assert(::cir::MissingFeatures::swift()); } }; -void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, - bool isNamedArg, bool IsRegCall) const { - // FIXME: This code can be simplified by introducing a simple value class - // for Class pairs with appropriate constructor methods for the various - // situations. - - // FIXME: Some of the split computations are wrong; unaligned vectors - // shouldn't be passed in registers for example, so there is no chance they - // can straddle an eightbyte. Verify & simplify. - - Lo = Hi = Class::NoClass; - - Class &Current = OffsetBase < 64 ? Lo : Hi; - Current = Class::Memory; - - // FIXME(cir): There's currently no direct way to identify if a type is a - // builtin. - if (/*isBuitinType=*/true) { - if (Ty.isa()) { - Current = Class::NoClass; - } else { - llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; - llvm_unreachable("NYI"); - } - // FIXME: _Decimal32 and _Decimal64 are SSE. - // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). - return; - } - - llvm::outs() << "Missing X86 classification for non-builtin types\n"; - llvm_unreachable("NYI"); -} - -::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { - // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the - // classification algorithm. - X86_64ABIInfo::Class Lo, Hi; - classify(RetTy, 0, Lo, Hi, true); - - // Check some invariants. - assert((Hi != Class::Memory || Lo == Class::Memory) && - "Invalid memory classification."); - assert((Hi != Class::SSEUp || Lo == Class::SSE) && - "Invalid SSEUp classification."); - - switch (Lo) { - case Class::NoClass: - if (Hi == Class::NoClass) - return ::cir::ABIArgInfo::getIgnore(); - break; - default: - llvm_unreachable("NYI"); - } - - llvm_unreachable("NYI"); -} - -void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { - const unsigned CallingConv = FI.getCallingConvention(); - // It is possible to force Win64 calling convention on any x86_64 target by - // using __attribute__((ms_abi)). In such case to correctly emit Win64 - // compatible code delegate this call to WinX86_64ABIInfo::computeInfo. - if (CallingConv == llvm::CallingConv::Win64) { - llvm_unreachable("Win64 CC is NYI"); - } - - bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; - - // Keep track of the number of assigned registers. - unsigned NeededSSE = 0, MaxVectorWidth = 0; - - if (!::mlir::cir::classifyReturnType(getCXXABI(), FI, *this)) { - if (IsRegCall || ::cir::MissingFeatures::regCall()) { - llvm_unreachable("RegCall is NYI"); - } else - FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); - } - - // If the return value is indirect, then the hidden argument is consuming - // one integer register. - if (FI.getReturnInfo().isIndirect()) - llvm_unreachable("NYI"); - else if (NeededSSE && MaxVectorWidth) - llvm_unreachable("NYI"); - - // The chain argument effectively gives us another free register. - if (::cir::MissingFeatures::chainCall()) - llvm_unreachable("NYI"); - - // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers - // get assigned (in left-to-right order) for passing as follows... - unsigned ArgNo = 0; - for (LowerFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); - it != ie; ++it, ++ArgNo) { - llvm_unreachable("NYI"); - } -} - std::unique_ptr createX86_64TargetLoweringInfo(LowerModule &LM, X86AVXABILevel AVXLevel) { return std::make_unique(LM.getTypes(), AVXLevel); diff --git a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp similarity index 69% rename from clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp rename to clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp index 6bb4d71d4877..e8772b24c3b8 100644 --- a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp @@ -1,8 +1,5 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// CHECK: @_Z4Voidv() -void Void(void) { -// CHECK: cir.call @_Z4Voidv() : () -> () - Void(); -} +// Just check if the pass is called for now. +// CHECK: module From fdde871b5a7d8d361261525e7c9e8a38ece919ee Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Mon, 17 Jun 2024 23:27:55 -0400 Subject: [PATCH 1641/2301] [CIR][CIRDataLayout]moving CIRDataLayout to MLIRCIR (#693) fix build failure undefined reference to `cir::CIRDataLayout::CIRDataLayout(mlir::ModuleOp)' by breaking circular dependency caused by the fact CIRDataLayout was in CIR Codegen --- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 26 ----------- clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp | 43 +++++++++++++++++++ clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 + 3 files changed, 44 insertions(+), 26 deletions(-) create mode 100644 clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 8af007621ba4..958b23a8950f 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -722,29 +722,3 @@ CIRGenBitFieldInfo CIRGenBitFieldInfo::MakeInfo(CIRGenTypes &Types, CharUnits StorageOffset) { llvm_unreachable("NYI"); } - -CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { - auto dlSpec = modOp->getAttr(mlir::DLTIDialect::kDataLayoutAttrName) - .dyn_cast(); - assert(dlSpec && "expected dl_spec in the module"); - auto entries = dlSpec.getEntries(); - - for (auto entry : entries) { - auto entryKey = entry.getKey(); - auto strKey = entryKey.dyn_cast(); - if (!strKey) - continue; - auto entryName = strKey.strref(); - if (entryName == mlir::DLTIDialect::kDataLayoutEndiannessKey) { - auto value = entry.getValue().dyn_cast(); - assert(value && "expected string attribute"); - auto endian = value.getValue(); - if (endian == mlir::DLTIDialect::kDataLayoutEndiannessBig) - bigEndian = true; - else if (endian == mlir::DLTIDialect::kDataLayoutEndiannessLittle) - bigEndian = false; - else - llvm_unreachable("unknown endianess"); - } - } -} diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp new file mode 100644 index 000000000000..fe05d25a5cac --- /dev/null +++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp @@ -0,0 +1,43 @@ +//===- CIRDialect.cpp - MLIR CIR ops implementation -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the CIR DataLayout class and its functions. +// +//===----------------------------------------------------------------------===//a +// + +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" + +namespace cir { +CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { + auto dlSpec = modOp->getAttr(mlir::DLTIDialect::kDataLayoutAttrName) + .dyn_cast(); + assert(dlSpec && "expected dl_spec in the module"); + auto entries = dlSpec.getEntries(); + + for (auto entry : entries) { + auto entryKey = entry.getKey(); + auto strKey = entryKey.dyn_cast(); + if (!strKey) + continue; + auto entryName = strKey.strref(); + if (entryName == mlir::DLTIDialect::kDataLayoutEndiannessKey) { + auto value = entry.getValue().dyn_cast(); + assert(value && "expected string attribute"); + auto endian = value.getValue(); + if (endian == mlir::DLTIDialect::kDataLayoutEndiannessBig) + bigEndian = true; + else if (endian == mlir::DLTIDialect::kDataLayoutEndiannessLittle) + bigEndian = false; + else + llvm_unreachable("unknown endianess"); + } + } +} + +} // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 27d826e84489..208b7b4586d8 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -1,5 +1,6 @@ add_clang_library(MLIRCIR CIRAttrs.cpp + CIRDataLayout.cpp CIRDialect.cpp CIRTypes.cpp FPEnv.cpp From 43e498245c545a0eed4fc42086042625a115fbea Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Thu, 20 Jun 2024 15:01:34 -0300 Subject: [PATCH 1642/2301] [CIR][ABI] Replay TargetLowering library reverted commits (#697) Essentially re-applies #668 and #678, but also includes #687 which patched build introduced by the other two PRs. Closes #691 --- .../clang/CIR/Dialect/IR/CIRDataLayout.h | 14 + clang/include/clang/CIR/FnInfoOpts.h | 37 ++ clang/include/clang/CIR/MissingFeatures.h | 48 ++- clang/include/clang/CIR/Target/x86.h | 32 ++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenCall.h | 7 - clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenTypes.h | 1 + clang/lib/CIR/CodeGen/TargetInfo.cpp | 12 +- clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp | 19 +- clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 + .../Dialect/Transforms/CallConvLowering.cpp | 20 +- .../Transforms/TargetLowering/ABIInfo.cpp | 4 + .../Transforms/TargetLowering/ABIInfo.h | 6 + .../Transforms/TargetLowering/ABIInfoImpl.cpp | 22 ++ .../Transforms/TargetLowering/ABIInfoImpl.h | 11 +- .../Transforms/TargetLowering/CIRCXXABI.h | 5 + .../TargetLowering/CIRLowerContext.cpp | 2 +- .../TargetLowering/CIRToCIRArgMapping.h | 33 +- .../Transforms/TargetLowering/CMakeLists.txt | 5 +- .../TargetLowering/ItaniumCXXABI.cpp | 14 + .../Transforms/TargetLowering/LowerCall.cpp | 273 ++++++++++++++ .../Transforms/TargetLowering/LowerCall.h | 2 + .../TargetLowering/LowerFunction.cpp | 333 ++++++++++++++++++ .../Transforms/TargetLowering/LowerFunction.h | 30 ++ .../TargetLowering/LowerFunctionInfo.h | 46 ++- .../Transforms/TargetLowering/LowerModule.cpp | 121 ++++++- .../Transforms/TargetLowering/LowerModule.h | 26 +- .../Transforms/TargetLowering/LowerTypes.cpp | 50 ++- .../Transforms/TargetLowering/LowerTypes.h | 41 ++- .../Transforms/TargetLowering/Targets/X86.cpp | 139 +++++++- .../x86_64-call-conv-lowering-pass.cpp} | 7 +- 33 files changed, 1305 insertions(+), 60 deletions(-) create mode 100644 clang/include/clang/CIR/FnInfoOpts.h create mode 100644 clang/include/clang/CIR/Target/x86.h rename clang/test/CIR/Transforms/Target/{x86/x86-call-conv-lowering-pass.cpp => x86_64/x86_64-call-conv-lowering-pass.cpp} (69%) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h index 88e030fb424a..9f8be20f5e5f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h @@ -15,6 +15,7 @@ #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/IR/BuiltinOps.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/ADT/StringRef.h" namespace cir { @@ -24,7 +25,20 @@ class CIRDataLayout { public: mlir::DataLayout layout; + /// Constructs a DataLayout from a specification string. See reset(). + explicit CIRDataLayout(llvm::StringRef dataLayout, mlir::ModuleOp module) + : layout(module) { + reset(dataLayout); + } + + /// Parse a data layout string (with fallback to default values). + void reset(llvm::StringRef dataLayout); + + // Free all internal data structures. + void clear(); + CIRDataLayout(mlir::ModuleOp modOp); + bool isBigEndian() const { return bigEndian; } // `useABI` is `true` if not using prefered alignment. diff --git a/clang/include/clang/CIR/FnInfoOpts.h b/clang/include/clang/CIR/FnInfoOpts.h new file mode 100644 index 000000000000..cea4d89f4c14 --- /dev/null +++ b/clang/include/clang/CIR/FnInfoOpts.h @@ -0,0 +1,37 @@ +#ifndef CIR_FNINFOOPTS_H +#define CIR_FNINFOOPTS_H + +#include "llvm/ADT/STLForwardCompat.h" + +namespace cir { + +enum class FnInfoOpts { + None = 0, + IsInstanceMethod = 1 << 0, + IsChainCall = 1 << 1, + IsDelegateCall = 1 << 2, +}; + +inline FnInfoOpts operator|(FnInfoOpts A, FnInfoOpts B) { + return static_cast(llvm::to_underlying(A) | + llvm::to_underlying(B)); +} + +inline FnInfoOpts operator&(FnInfoOpts A, FnInfoOpts B) { + return static_cast(llvm::to_underlying(A) & + llvm::to_underlying(B)); +} + +inline FnInfoOpts operator|=(FnInfoOpts A, FnInfoOpts B) { + A = A | B; + return A; +} + +inline FnInfoOpts operator&=(FnInfoOpts A, FnInfoOpts B) { + A = A & B; + return A; +} + +} // namespace cir + +#endif // CIR_FNINFOOPTS_H diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 12255d409a75..d61c5e618605 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -156,7 +156,6 @@ struct MissingFeatures { static bool zeroInitializer() { return false; } static bool targetCodeGenInfoIsProtoCallVariadic() { return false; } static bool targetCodeGenInfoGetNullPointer() { return false; } - static bool chainCalls() { return false; } static bool operandBundles() { return false; } static bool exceptions() { return false; } static bool metaDataNode() { return false; } @@ -190,24 +189,61 @@ struct MissingFeatures { //===--- ABI lowering --===// + //-- Missing AST queries + + static bool recordDeclCanPassInRegisters() { return false; } + static bool funcDeclIsCXXConstructorDecl() { return false; } + static bool funcDeclIsCXXDestructorDecl() { return false; } + static bool funcDeclIsCXXMethodDecl() { return false; } + static bool funcDeclIsInlineBuiltinDeclaration() { return false; } + static bool funcDeclIsReplaceableGlobalAllocationFunction() { return false; } + static bool qualTypeIsReferenceType() { return false; } + + //-- Missing types + + static bool vectorType() { return false; } + + //-- Missing LLVM attributes + + static bool noReturn() { return false; } + static bool csmeCall() { return false; } + + //-- Other missing features + + // Calls with a static chain pointer argument may be optimized (p.e. freeing + // up argument registers), but we do not yet track such cases. + static bool chainCall() { return false; } + + // ABI-lowering has special handling for regcall calling convention (tries to + // pass every argument in regs). We don't support it just yet. + static bool regCall() { return false; } + + // Some ABIs (e.g. x86) require special handling for returning large structs + // by value. The sret argument parameter aids in this, but it is current NYI. + static bool sretArgs() { return false; } + + // Inalloca parameter attributes are mostly used for Windows x86_32 ABI. We + // do not yet support this yet. + static bool inallocaArgs() { return false; } + // Parameters may have additional attributes (e.g. [[noescape]]) that affect // the compiler. This is not yet supported in CIR. - static bool extParamInfo() { return true; } + static bool extParamInfo() { return false; } // LangOpts may affect lowering, but we do not carry this information into CIR // just yet. Right now, it only instantiates the default lang options. - static bool langOpts() { return true; } + static bool langOpts() { return false; } // Several type qualifiers are not yet supported in CIR, but important when // evaluating ABI-specific lowering. - static bool qualifiedTypes() { return true; } + static bool qualifiedTypes() { return false; } // We're ignoring several details regarding ABI-halding for Swift. - static bool swift() { return true; } + static bool swift() { return false; } // Despite carrying some information about variadics, we are currently // ignoring this to focus only on the code necessary to lower non-variadics. - static bool variadicFunctions() { return true; } + static bool variadicFunctions() { return false; } }; } // namespace cir diff --git a/clang/include/clang/CIR/Target/x86.h b/clang/include/clang/CIR/Target/x86.h new file mode 100644 index 000000000000..2aa2d0493aac --- /dev/null +++ b/clang/include/clang/CIR/Target/x86.h @@ -0,0 +1,32 @@ +//==-- x86.h - Definitions common to all x86 ABI variants ------------------==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Definitions common to any X86 ABI implementation. +// +//===----------------------------------------------------------------------===// + +#ifndef CIR_X86_H +#define CIR_X86_H + +namespace cir { + +// Possible argument classifications according to the x86 ABI documentation. +enum X86ArgClass { + Integer = 0, + SSE, + SSEUp, + X87, + X87Up, + ComplexX87, + NoClass, + Memory +}; + +} // namespace cir + +#endif // CIR_X86_H diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 3128cf1e4d3a..bf44ddb263b7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -23,6 +23,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/FnInfoOpts.h" #include "llvm/Support/ErrorHandling.h" #include diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 866ba9af7a3b..ea8e9e546352 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -290,13 +290,6 @@ class ReturnValueSlot { Address getAddress() const { return Addr; } }; -enum class FnInfoOpts { - None = 0, - IsInstanceMethod = 1 << 0, - IsChainCall = 1 << 1, - IsDelegateCall = 1 << 2, -}; - } // namespace cir #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 2604fdd9fdae..8db9ce53f547 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1439,7 +1439,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, // Chain calls use the same code path to add the inviisble chain parameter to // the function type. if (isa(FnType) || Chain) { - assert(!MissingFeatures::chainCalls()); + assert(!MissingFeatures::chainCall()); assert(!MissingFeatures::addressSpace()); auto CalleeTy = getTypes().GetFunctionType(FnInfo); // get non-variadic function type diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index ca4d18461154..3eb2b1b455e3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -16,6 +16,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/AST/RecordLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/FnInfoOpts.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 51350c9ea70e..d1d547f24a9a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -22,6 +22,7 @@ #include "clang/AST/Type.h" #include "clang/Basic/ABI.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/FnInfoOpts.h" #include "llvm/ADT/SmallPtrSet.h" diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 121bc2f023c1..2973a6ce70d3 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -6,6 +6,7 @@ #include "CallingConv.h" #include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Target/x86.h" using namespace cir; using namespace clang; @@ -79,16 +80,7 @@ namespace { enum class X86AVXABILevel { None, AVX, AVX512 }; class X86_64ABIInfo : public ABIInfo { - enum Class { - Integer = 0, - SSE, - SSEUp, - X87, - X87Up, - ComplexX87, - NoClass, - Memory - }; + using Class = X86ArgClass; // X86AVXABILevel AVXLevel; // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 64-bit diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp index fe05d25a5cac..147bff99e0bd 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp @@ -1,19 +1,8 @@ -//===- CIRDialect.cpp - MLIR CIR ops implementation -----------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file defines the CIR DataLayout class and its functions. -// -//===----------------------------------------------------------------------===//a -// - #include "clang/CIR/Dialect/IR/CIRDataLayout.h" +#include "llvm/ADT/StringRef.h" namespace cir { + CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { auto dlSpec = modOp->getAttr(mlir::DLTIDialect::kDataLayoutAttrName) .dyn_cast(); @@ -40,4 +29,8 @@ CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { } } +void CIRDataLayout::reset(llvm::StringRef Desc) { clear(); } + +void CIRDataLayout::clear() {} + } // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 208b7b4586d8..6eb3f295286c 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -17,6 +17,7 @@ add_clang_library(MLIRCIR LINK_LIBS PUBLIC MLIRIR MLIRCIRInterfaces + MLIRDLTIDialect MLIRDataLayoutInterfaces MLIRFuncDialect MLIRLoopLikeInterface diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 6130367d91a4..9363c7349519 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -45,7 +45,7 @@ LowerModule createLowerModule(FuncOp op, PatternRewriter &rewriter) { // FIXME(cir): This just uses the default language options. We need to account // for custom options. // Create context. - assert(::cir::MissingFeatures::langOpts()); + assert(!::cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; auto context = CIRLowerContext(module.getContext(), langOpts); context.initBuiltinTypes(*targetInfo); @@ -64,11 +64,29 @@ struct CallConvLoweringPattern : public OpRewritePattern { LogicalResult matchAndRewrite(FuncOp op, PatternRewriter &rewriter) const final { + const auto module = op->getParentOfType(); + if (!op.getAst()) return op.emitError("function has no AST information"); LowerModule lowerModule = createLowerModule(op, rewriter); + // Rewrite function calls before definitions. This should be done before + // lowering the definition. + auto calls = op.getSymbolUses(module); + if (calls.has_value()) { + for (auto call : calls.value()) { + auto callOp = cast(call.getUser()); + if (lowerModule.rewriteFunctionCall(callOp, op).failed()) + return failure(); + } + } + + // TODO(cir): Instead of re-emmiting every load and store, bitcast arguments + // and return values to their ABI-specific counterparts when possible. + if (lowerModule.rewriteFunctionDefinition(op).failed()) + return failure(); + return success(); } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index 46a865da0670..6160174191dc 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -12,6 +12,8 @@ //===----------------------------------------------------------------------===// #include "ABIInfo.h" +#include "CIRCXXABI.h" +#include "LowerTypes.h" namespace mlir { namespace cir { @@ -19,5 +21,7 @@ namespace cir { // Pin the vtable to this file. ABIInfo::~ABIInfo() = default; +CIRCXXABI &ABIInfo::getCXXABI() const { return LT.getCXXABI(); } + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h index 3fad01f3d7a8..ef5bae6d13fa 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h @@ -14,6 +14,8 @@ #ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFO_H #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFO_H +#include "CIRCXXABI.h" +#include "LowerFunctionInfo.h" #include "llvm/IR/CallingConv.h" namespace mlir { @@ -32,6 +34,10 @@ class ABIInfo { public: ABIInfo(LowerTypes <) : LT(LT), RuntimeCC(llvm::CallingConv::C) {} virtual ~ABIInfo(); + + CIRCXXABI &getCXXABI() const; + + virtual void computeInfo(LowerFunctionInfo &FI) const = 0; }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index c51176a99b95..ef90698054e8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -10,3 +10,25 @@ // adapted to operate on the CIR dialect, however. // //===----------------------------------------------------------------------===// + +#include "ABIInfo.h" +#include "CIRCXXABI.h" +#include "LowerFunctionInfo.h" +#include "llvm/Support/ErrorHandling.h" + +namespace mlir { +namespace cir { + +bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, + const ABIInfo &Info) { + Type Ty = FI.getReturnType(); + + if (const auto RT = Ty.dyn_cast()) { + llvm_unreachable("NYI"); + } + + return CXXABI.classifyReturnType(FI); +} + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h index f34d7fb07226..d3ee18f0467b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h @@ -14,8 +14,17 @@ #ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H +#include "ABIInfo.h" +#include "CIRCXXABI.h" +#include "LowerFunctionInfo.h" + namespace mlir { -namespace cir {} // namespace cir +namespace cir { + +bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, + const ABIInfo &Info); + +} // namespace cir } // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h index bf5131a074b8..5496dbbf2327 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -14,6 +14,7 @@ #ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRCXXABI_H #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRCXXABI_H +#include "LowerFunctionInfo.h" #include "mlir/IR/Value.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" @@ -34,6 +35,10 @@ class CIRCXXABI { public: virtual ~CIRCXXABI(); + + /// If the C++ ABI requires the given type be returned in a particular way, + /// this method sets RetAI and returns true. + virtual bool classifyReturnType(LowerFunctionInfo &FI) const = 0; }; /// Creates an Itanium-family ABI. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index b75893bfb33f..7152ab081ec5 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -31,7 +31,7 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { Type Ty; // NOTE(cir): Clang does more stuff here. Not sure if we need to do the same. - assert(::cir::MissingFeatures::qualifiedTypes()); + assert(!::cir::MissingFeatures::qualifiedTypes()); switch (K) { case clang::BuiltinType::Char_S: Ty = IntType::get(getMLIRContext(), 8, true); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index 9c1dae1f3dbf..6481874bf3ab 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -17,8 +17,10 @@ #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRTOCIRARGMAPPING_H #include "CIRLowerContext.h" -#include "LoweringFunctionInfo.h" +#include "LowerFunctionInfo.h" +#include "clang/CIR/ABIArgInfo.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/Support/ErrorHandling.h" namespace mlir { namespace cir { @@ -49,9 +51,36 @@ class CIRToCIRArgMapping { public: CIRToCIRArgMapping(const CIRLowerContext &context, const LowerFunctionInfo &FI, bool onlyRequiredArgs = false) - : ArgInfo(onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {}; + : ArgInfo(onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { + construct(context, FI, onlyRequiredArgs); + }; unsigned totalIRArgs() const { return TotalIRArgs; } + + void construct(const CIRLowerContext &context, const LowerFunctionInfo &FI, + bool onlyRequiredArgs = false) { + unsigned IRArgNo = 0; + const ::cir::ABIArgInfo &RetAI = FI.getReturnInfo(); + + if (RetAI.getKind() == ::cir::ABIArgInfo::Indirect) { + llvm_unreachable("NYI"); + } + + unsigned ArgNo = 0; + unsigned NumArgs = + onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); + for (LowerFunctionInfo::const_arg_iterator _ = FI.arg_begin(); + ArgNo < NumArgs; ++_, ++ArgNo) { + llvm_unreachable("NYI"); + } + assert(ArgNo == ArgInfo.size()); + + if (::cir::MissingFeatures::inallocaArgs()) { + llvm_unreachable("NYI"); + } + + TotalIRArgs = IRArgNo; + } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt index 8e0afdc4367b..e90beeb13a43 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt @@ -17,12 +17,15 @@ add_clang_library(TargetLowering Targets/LoweringPrepareItaniumCXXABI.cpp DEPENDS + clangBasic LINK_LIBS PUBLIC + clangBasic + LLVMTargetParser MLIRIR MLIRPass - + MLIRDLTIDialect MLIRCIR MLIRCIRInterfaces ) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index aee2620496e6..6e0fecfa44d5 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -32,10 +32,24 @@ class ItaniumCXXABI : public CIRCXXABI { public: ItaniumCXXABI(LowerModule &LM) : CIRCXXABI(LM) {} + + bool classifyReturnType(LowerFunctionInfo &FI) const override; }; } // namespace +bool ItaniumCXXABI::classifyReturnType(LowerFunctionInfo &FI) const { + const StructType RD = FI.getReturnType().dyn_cast(); + if (!RD) + return false; + + // If C++ prohibits us from making a copy, return by address. + if (::cir::MissingFeatures::recordDeclCanPassInRegisters()) + llvm_unreachable("NYI"); + + return false; +} + CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { switch (LM.getCXXABIKind()) { case clang::TargetCXXABI::GenericItanium: diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index e69de29bb2d1..85890532e4f9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -0,0 +1,273 @@ +#include "LowerCall.h" +#include "CIRToCIRArgMapping.h" +#include "LowerFunctionInfo.h" +#include "LowerModule.h" +#include "LowerTypes.h" +#include "clang/CIR/FnInfoOpts.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace mlir; +using namespace mlir::cir; + +using ABIArgInfo = ::cir::ABIArgInfo; +using FnInfoOpts = ::cir::FnInfoOpts; +using MissingFeatures = ::cir::MissingFeatures; + +namespace { + +/// Arrange a call as unto a free function, except possibly with an +/// additional number of formal parameters considered required. +const LowerFunctionInfo & +arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, + const OperandRange &args, const FuncType fnType, + unsigned numExtraRequiredArgs, bool chainCall) { + assert(args.size() >= numExtraRequiredArgs); + + assert(!::cir::MissingFeatures::extParamInfo()); + + // In most cases, there are no optional arguments. + RequiredArgs required = RequiredArgs::All; + + // If we have a variadic prototype, the required arguments are the + // extra prefix plus the arguments in the prototype. + // FIXME(cir): Properly check if function is no-proto. + if (/*IsPrototypedFunction=*/true) { + if (fnType.isVarArg()) + llvm_unreachable("NYI"); + + if (::cir::MissingFeatures::extParamInfo()) + llvm_unreachable("NYI"); + } + + // TODO(cir): There's some CC stuff related to no-proto functions here, but + // I'm skipping it since it requires CodeGen info. Maybe we can embbed this + // information in the FuncOp during CIRGen. + + assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); + FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; + return LT.arrangeLLVMFunctionInfo(fnType.getReturnType(), opts, + fnType.getInputs(), required); +} + +/// Adds the formal parameters in FPT to the given prefix. If any parameter in +/// FPT has pass_object_size attrs, then we'll add parameters for those, too. +static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { + // Fast path: don't touch param info if we don't need to. + if (/*!fnTy->hasExtParameterInfos()=*/true) { + prefix.append(fnTy.getInputs().begin(), fnTy.getInputs().end()); + return; + } + + assert(MissingFeatures::extParamInfo()); + llvm_unreachable("NYI"); +} + +/// Arrange the LLVM function layout for a value of the given function +/// type, on top of any implicit parameters already stored. +/// +/// \param CGT - Abstraction for lowering CIR types. +/// \param instanceMethod - Whether the function is an instance method. +/// \param prefix - List of implicit parameters to be prepended (e.g. 'this'). +/// \param FTP - ABI-agnostic function type. +static const LowerFunctionInfo & +arrangeCIRFunctionInfo(LowerTypes &CGT, bool instanceMethod, + SmallVectorImpl &prefix, FuncType fnTy) { + assert(!MissingFeatures::extParamInfo()); + RequiredArgs Required = RequiredArgs::forPrototypePlus(fnTy, prefix.size()); + // FIXME: Kill copy. + appendParameterTypes(prefix, fnTy); + assert(!MissingFeatures::qualifiedTypes()); + Type resultType = fnTy.getReturnType(); + + FnInfoOpts opts = + instanceMethod ? FnInfoOpts::IsInstanceMethod : FnInfoOpts::None; + return CGT.arrangeLLVMFunctionInfo(resultType, opts, prefix, Required); +} + +} // namespace + +/// Update function with ABI-specific attributes. +/// +/// NOTE(cir): Partially copies CodeGenModule::ConstructAttributeList, but +/// focuses on ABI/Target-related attributes. +void LowerModule::constructAttributeList(StringRef Name, + const LowerFunctionInfo &FI, + FuncOp CalleeInfo, FuncOp newFn, + unsigned &CallingConv, + bool AttrOnCallSite, bool IsThunk) { + // Collect function IR attributes from the CC lowering. + // We'll collect the paramete and result attributes later. + // FIXME(cir): Codegen differentiates between CallConv and EffectiveCallConv, + // but I don't think we need to do this here. + CallingConv = FI.getCallingConvention(); + // FIXME(cir): No-return should probably be set in CIRGen (ABI-agnostic). + if (MissingFeatures::noReturn()) + llvm_unreachable("NYI"); + if (MissingFeatures::csmeCall()) + llvm_unreachable("NYI"); + + // TODO(cir): Implement AddAttributesFromFunctionProtoType here. + // TODO(cir): Implement AddAttributesFromOMPAssumes here. + assert(!MissingFeatures::openMP()); + + // TODO(cir): Skipping a bunch of AST queries here. We will need to partially + // implement some of them as this section sets target-specific attributes + // too. + // if (TargetDecl) { + // [...] + // } + + // NOTE(cir): The original code adds default and no-builtin attributes here as + // well. AFAIK, these are ABI/Target-agnostic, so it would be better handled + // in CIRGen. Regardless, I'm leaving this comment here as a heads up. + + // Override some default IR attributes based on declaration-specific + // information. + // NOTE(cir): Skipping another set of AST queries here. + + // Collect attributes from arguments and return values. + CIRToCIRArgMapping IRFunctionArgs(getContext(), FI); + + const ABIArgInfo &RetAI = FI.getReturnInfo(); + + // TODO(cir): No-undef attribute for return values partially depends on + // ABI-specific information. Maybe we should include it here. + + switch (RetAI.getKind()) { + case ABIArgInfo::Ignore: + break; + default: + llvm_unreachable("Missing ABIArgInfo::Kind"); + } + + if (!IsThunk) { + if (MissingFeatures::qualTypeIsReferenceType()) { + llvm_unreachable("NYI"); + } + } + + // Attach attributes to sret. + if (MissingFeatures::sretArgs()) { + llvm_unreachable("sret is NYI"); + } + + // Attach attributes to inalloca arguments. + if (MissingFeatures::inallocaArgs()) { + llvm_unreachable("inalloca is NYI"); + } + + // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, + // unless this is a thunk function. + // FIXME: fix this properly, https://reviews.llvm.org/D100388 + if (MissingFeatures::funcDeclIsCXXMethodDecl() || + MissingFeatures::inallocaArgs()) { + llvm_unreachable("`this` argument attributes are NYI"); + } + + unsigned ArgNo = 0; + for (LowerFunctionInfo::const_arg_iterator I = FI.arg_begin(), + E = FI.arg_end(); + I != E; ++I, ++ArgNo) { + llvm_unreachable("NYI"); + } + assert(ArgNo == FI.arg_size()); +} + +/// Arrange the argument and result information for the declaration or +/// definition of the given function. +const LowerFunctionInfo &LowerTypes::arrangeFunctionDeclaration(FuncOp fnOp) { + if (MissingFeatures::funcDeclIsCXXMethodDecl()) + llvm_unreachable("NYI"); + + assert(!MissingFeatures::qualifiedTypes()); + FuncType FTy = fnOp.getFunctionType(); + + assert(!MissingFeatures::CUDA()); + + // When declaring a function without a prototype, always use a + // non-variadic type. + if (fnOp.getNoProto()) { + llvm_unreachable("NYI"); + } + + return arrangeFreeFunctionType(FTy); +} + +/// Figure out the rules for calling a function with the given formal +/// type using the given arguments. The arguments are necessary +/// because the function might be unprototyped, in which case it's +/// target-dependent in crazy ways. +const LowerFunctionInfo & +LowerTypes::arrangeFreeFunctionCall(const OperandRange args, + const FuncType fnType, bool chainCall) { + return arrangeFreeFunctionLikeCall(*this, LM, args, fnType, chainCall ? 1 : 0, + chainCall); +} + +/// Arrange the argument and result information for the declaration or +/// definition of the given function. +const LowerFunctionInfo &LowerTypes::arrangeFreeFunctionType(FuncType FTy) { + SmallVector argTypes; + return ::arrangeCIRFunctionInfo(*this, /*instanceMethod=*/false, argTypes, + FTy); +} + +/// Arrange the argument and result information for the declaration or +/// definition of the given function. +const LowerFunctionInfo &LowerTypes::arrangeGlobalDeclaration(FuncOp fnOp) { + if (MissingFeatures::funcDeclIsCXXConstructorDecl() || + MissingFeatures::funcDeclIsCXXDestructorDecl()) + llvm_unreachable("NYI"); + + return arrangeFunctionDeclaration(fnOp); +} + +/// Arrange the argument and result information for an abstract value +/// of a given function type. This is the method which all of the +/// above functions ultimately defer to. +/// +/// \param resultType - ABI-agnostic CIR result type. +/// \param opts - Options to control the arrangement. +/// \param argTypes - ABI-agnostic CIR argument types. +/// \param required - Information about required/optional arguments. +const LowerFunctionInfo & +LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, + ArrayRef argTypes, + RequiredArgs required) { + assert(!::cir::MissingFeatures::qualifiedTypes()); + + LowerFunctionInfo *FI = nullptr; + + // FIXME(cir): Allow user-defined CCs (e.g. __attribute__((vectorcall))). + assert(!::cir::MissingFeatures::extParamInfo()); + unsigned CC = clangCallConvToLLVMCallConv(clang::CallingConv::CC_C); + + // Construct the function info. We co-allocate the ArgInfos. + // NOTE(cir): This initial function info might hold incorrect data. + FI = LowerFunctionInfo::create( + CC, /*isInstanceMethod=*/false, /*isChainCall=*/false, + /*isDelegateCall=*/false, resultType, argTypes, required); + + // Compute ABI information. + if (CC == llvm::CallingConv::SPIR_KERNEL) { + llvm_unreachable("NYI"); + } else if (::cir::MissingFeatures::extParamInfo()) { + llvm_unreachable("NYI"); + } else { + // NOTE(cir): This corects the initial function info data. + getABIInfo().computeInfo(*FI); // FIXME(cir): Args should be set to null. + } + + // Loop over all of the computed argument and return value info. If any of + // them are direct or extend without a specified coerce type, specify the + // default now. + ::cir::ABIArgInfo &retInfo = FI->getReturnInfo(); + if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) + llvm_unreachable("NYI"); + + for (auto &_ : FI->arguments()) + llvm_unreachable("NYI"); + + return *FI; +} diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h index ac54490c578f..b579f96fb436 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h @@ -22,6 +22,8 @@ namespace cir { /// Contains the address where the return value of a function can be stored, and /// whether the address is volatile or not. class ReturnValueSlot { + // FIXME(cir): We should be able to query this directly from CIR at some + // point. This class can then be removed. Value Addr = {}; // Return value slot flags diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 6215b6149786..7a71fffb2fbf 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -12,11 +12,20 @@ //===----------------------------------------------------------------------===// #include "LowerFunction.h" +#include "CIRToCIRArgMapping.h" #include "LowerCall.h" +#include "LowerFunctionInfo.h" #include "LowerModule.h" +#include "mlir/IR/MLIRContext.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" +#include "clang/CIR/ABIArgInfo.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" + +using ABIArgInfo = ::cir::ABIArgInfo; namespace mlir { namespace cir { @@ -32,5 +41,329 @@ LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, : Target(LM.getTarget()), rewriter(rewriter), SrcFn(srcFn), callOp(callOp), LM(LM) {} +/// This method has partial parity with CodeGenFunction::EmitFunctionProlog from +/// the original codegen. However, it focuses on the ABI-specific details. On +/// top of that, it is also responsible for rewriting the original function. +LogicalResult +LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, + MutableArrayRef Args) { + // NOTE(cir): Skipping naked and implicit-return-zero functions here. These + // are dealt with in CIRGen. + + CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), FI); + assert(Fn.getNumArguments() == IRFunctionArgs.totalIRArgs()); + + // If we're using inalloca, all the memory arguments are GEPs off of the last + // parameter, which is a pointer to the complete memory area. + assert(!::cir::MissingFeatures::inallocaArgs()); + + // Name the struct return parameter. + assert(!::cir::MissingFeatures::sretArgs()); + + // Track if we received the parameter as a pointer (indirect, byval, or + // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it + // into a local alloca for us. + SmallVector ArgVals; + ArgVals.reserve(Args.size()); + + // Create a pointer value for every parameter declaration. This usually + // entails copying one or more LLVM IR arguments into an alloca. Don't push + // any cleanups or do anything that might unwind. We do that separately, so + // we can push the cleanups in the correct order for the ABI. + assert(FI.arg_size() == Args.size()); + unsigned ArgNo = 0; + LowerFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); + for (MutableArrayRef::const_iterator i = Args.begin(), + e = Args.end(); + i != e; ++i, ++info_it, ++ArgNo) { + llvm_unreachable("NYI"); + } + + if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { + llvm_unreachable("NYI"); + } else { + // FIXME(cir): In the original codegen, EmitParamDecl is called here. It is + // likely that said function considers ABI details during emission, so we + // migth have to add a counter part here. Currently, it is not needed. + } + + return success(); +} + +LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { + const ABIArgInfo &RetAI = FI.getReturnInfo(); + + switch (RetAI.getKind()) { + + case ABIArgInfo::Ignore: + break; + + default: + llvm_unreachable("Unhandled ABIArgInfo::Kind"); + } + + return success(); +} + +/// Generate code for a function based on the ABI-specific information. +/// +/// This method has partial parity with CodeGenFunction::GenerateCode, but it +/// focuses on the ABI-specific details. So a lot of codegen stuff is removed. +LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, + const LowerFunctionInfo &FnInfo) { + assert(newFn && "generating code for null Function"); + auto Args = oldFn.getArguments(); + + // Emit the ABI-specific function prologue. + assert(newFn.empty() && "Function already has a body"); + rewriter.setInsertionPointToEnd(newFn.addEntryBlock()); + if (buildFunctionProlog(FnInfo, newFn, oldFn.getArguments()).failed()) + return failure(); + + // Ensure that old ABI-agnostic arguments uses were replaced. + const auto hasNoUses = [](Value val) { return val.getUses().empty(); }; + assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && "Missing RAUW?"); + + // Migrate function body to new ABI-aware function. + assert(oldFn.getBody().hasOneBlock() && + "Multiple blocks in original function not supported"); + + // Move old function body to new function. + // FIXME(cir): The merge below is not very good: will not work if SrcFn has + // multiple blocks and it mixes the new and old prologues. + rewriter.mergeBlocks(&oldFn.getBody().front(), &newFn.getBody().front(), + newFn.getArguments()); + + // FIXME(cir): What about saving parameters for corotines? Should we do + // something about it in this pass? If the change with the calling + // convention, we might have to handle this here. + + // Emit the standard function epilogue. + if (buildFunctionEpilog(FnInfo).failed()) + return failure(); + + return success(); +} + +/// Rewrite a call operation to abide to the ABI calling convention. +/// +/// FIXME(cir): This method has partial parity to CodeGenFunction's +/// EmitCallEpxr method defined in CGExpr.cpp. This could likely be +/// removed in favor of a more direct approach. +LogicalResult LowerFunction::rewriteCallOp(CallOp op, + ReturnValueSlot retValSlot) { + + // TODO(cir): Check if BlockCall, CXXMemberCall, CUDAKernelCall, or + // CXXOperatorMember require special handling here. These should be handled in + // CIRGen, unless there is call conv or ABI-specific stuff to be handled, them + // we should do it here. + + // TODO(cir): Also check if Builtin and CXXPeseudoDtor need special handling + // here. These should be handled in CIRGen, unless there is call conv or + // ABI-specific stuff to be handled, them we should do it here. + + // NOTE(cir): There is no direct way to fetch the function type from the + // CallOp, so we fetch it from the source function. This assumes the function + // definition has not yet been lowered. + assert(SrcFn && "No source function"); + auto fnType = SrcFn.getFunctionType(); + + // Rewrite the call operation to abide to the ABI calling convention. + auto Ret = rewriteCallOp(fnType, SrcFn, op, retValSlot); + + // Replace the original call result with the new one. + if (Ret) + rewriter.replaceAllUsesWith(op.getResult(), Ret); + + return success(); +} + +/// Rewrite a call operation to abide to the ABI calling convention. +/// +/// FIXME(cir): This method has partial parity to CodeGenFunction's EmitCall +/// method defined in CGExpr.cpp. This could likely be removed in favor of a +/// more direct approach since most of the code here is exclusively CodeGen. +Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, + CallOp callOp, ReturnValueSlot retValSlot, + Value Chain) { + // NOTE(cir): Skip a bunch of function pointer stuff and AST declaration + // asserts. Also skip sanitizers, as these should likely be handled at + // CIRGen. + CallArgList Args; + if (Chain) + llvm_unreachable("NYI"); + + // NOTE(cir): Call args were already emitted in CIRGen. Skip the evaluation + // order done in CIRGen and just fetch the exiting arguments here. + Args = callOp.getArgOperands(); + + const LowerFunctionInfo &FnInfo = LM.getTypes().arrangeFreeFunctionCall( + callOp.getArgOperands(), calleeTy, /*chainCall=*/false); + + // C99 6.5.2.2p6: + // If the expression that denotes the called function has a type + // that does not include a prototype, [the default argument + // promotions are performed]. If the number of arguments does not + // equal the number of parameters, the behavior is undefined. If + // the function is defined with a type that includes a prototype, + // and either the prototype ends with an ellipsis (, ...) or the + // types of the arguments after promotion are not compatible with + // the types of the parameters, the behavior is undefined. If the + // function is defined with a type that does not include a + // prototype, and the types of the arguments after promotion are + // not compatible with those of the parameters after promotion, + // the behavior is undefined [except in some trivial cases]. + // That is, in the general case, we should assume that a call + // through an unprototyped function type works like a *non-variadic* + // call. The way we make this work is to cast to the exact type + // of the promoted arguments. + // + // Chain calls use this same code path to add the invisible chain parameter + // to the function type. + if (origCallee.getNoProto() || Chain) { + llvm_unreachable("NYI"); + } + + assert(!::cir::MissingFeatures::CUDA()); + + // TODO(cir): LLVM IR has the concept of "CallBase", which is a base class for + // all types of calls. Perhaps we should have a CIR interface to mimic this + // class. + CallOp CallOrInvoke = {}; + Value CallResult = {}; + rewriteCallOp(FnInfo, origCallee, callOp, retValSlot, Args, CallOrInvoke, + /*isMustTail=*/false, callOp.getLoc()); + + // NOTE(cir): Skipping debug stuff here. + + return CallResult; +} + +// NOTE(cir): This method has partial parity to CodeGenFunction's EmitCall +// method in CGCall.cpp. When incrementing it, use the original codegen as a +// reference: add ABI-specific stuff and skip codegen stuff. +Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, + FuncOp Callee, CallOp Caller, + ReturnValueSlot ReturnValue, + CallArgList &CallArgs, CallOp CallOrInvoke, + bool isMustTail, Location loc) { + // FIXME: We no longer need the types from CallArgs; lift up and simplify. + + // Handle struct-return functions by passing a pointer to the + // location that we would like to return into. + Type RetTy = CallInfo.getReturnType(); // ABI-agnostic type. + const ::cir::ABIArgInfo &RetAI = CallInfo.getReturnInfo(); + + FuncType IRFuncTy = LM.getTypes().getFunctionType(CallInfo); + + // NOTE(cir): Some target/ABI related checks happen here. I'm skipping them + // under the assumption that they are handled in CIRGen. + + // 1. Set up the arguments. + + // If we're using inalloca, insert the allocation after the stack save. + // FIXME: Do this earlier rather than hacking it in here! + if (StructType ArgStruct = CallInfo.getArgStruct()) { + llvm_unreachable("NYI"); + } + + CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), CallInfo); + SmallVector IRCallArgs(IRFunctionArgs.totalIRArgs()); + + // If the call returns a temporary with struct return, create a temporary + // alloca to hold the result, unless one is given to us. + if (RetAI.isIndirect() || RetAI.isCoerceAndExpand() || RetAI.isInAlloca()) { + llvm_unreachable("NYI"); + } + + assert(!::cir::MissingFeatures::swift()); + + // NOTE(cir): Skipping lifetime markers here. + + // Translate all of the arguments as necessary to match the IR lowering. + assert(CallInfo.arg_size() == CallArgs.size() && + "Mismatch between function signature & arguments."); + unsigned ArgNo = 0; + LowerFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); + for (auto I = CallArgs.begin(), E = CallArgs.end(); I != E; + ++I, ++info_it, ++ArgNo) { + llvm_unreachable("NYI"); + } + + // 2. Prepare the function pointer. + // NOTE(cir): This is not needed for CIR. + + // 3. Perform the actual call. + + // NOTE(cir): CIRGen handle when to "deactive" cleanups. We also skip some + // debugging stuff here. + + // Update the largest vector width if any arguments have vector types. + assert(!::cir::MissingFeatures::vectorType()); + + // Compute the calling convention and attributes. + + // FIXME(cir): Skipping call attributes for now. Not sure if we have to do + // this at all since we already do it for the function definition. + + // FIXME(cir): Implement the required procedures for strictfp function and + // fast-math. + + // FIXME(cir): Add missing call-site attributes here if they are + // ABI/target-specific, otherwise, do it in CIRGen. + + // NOTE(cir): Deciding whether to use Call or Invoke is done in CIRGen. + + // Rewrite the actual call operation. + // TODO(cir): Handle other types of CIR calls (e.g. cir.try_call). + // NOTE(cir): We don't know if the callee was already lowered, so we only + // fetch the name from the callee, while the return type is fetch from the + // lowering types manager. + CallOp newCallOp = rewriter.create( + loc, Caller.getCalleeAttr(), IRFuncTy.getReturnType(), IRCallArgs); + auto extraAttrs = + rewriter.getAttr(rewriter.getDictionaryAttr({})); + newCallOp->setAttr("extra_attrs", extraAttrs); + + assert(!::cir::MissingFeatures::vectorType()); + + // NOTE(cir): Skipping some ObjC, tail-call, debug, and attribute stuff here. + + // 4. Finish the call. + + // NOTE(cir): Skipping no-return, isMustTail, swift error handling, and + // writebacks here. These should be handled in CIRGen, I think. + + // Convert return value from ABI-agnostic to ABI-aware. + Value Ret = [&] { + // NOTE(cir): CIRGen already handled the emission of the return value. We + // need only to handle the ABI-specific to ABI-agnostic cast here. + switch (RetAI.getKind()) { + case ::cir::ABIArgInfo::Ignore: + // If we are ignoring an argument that had a result, make sure to + // construct the appropriate return value for our caller. + return getUndefRValue(RetTy); + default: + llvm::errs() << "Unhandled ABIArgInfo kind: " << RetAI.getKind() << "\n"; + llvm_unreachable("NYI"); + } + }(); + + // NOTE(cir): Skipping Emissions, lifetime markers, and dtors here that should + // be handled in CIRGen. + + return Ret; +} + +// NOTE(cir): This method has partial parity to CodeGenFunction's GetUndefRValue +// defined in CGExpr.cpp. +Value LowerFunction::getUndefRValue(Type Ty) { + if (Ty.isa()) + return nullptr; + + llvm::outs() << "Missing undef handler for value type: " << Ty << "\n"; + llvm_unreachable("NYI"); +} + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h index 319751790915..40cdd39463e6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h @@ -25,6 +25,8 @@ namespace mlir { namespace cir { +using CallArgList = SmallVector; + class LowerFunction { LowerFunction(const LowerFunction &) = delete; void operator=(const LowerFunction &) = delete; @@ -50,6 +52,34 @@ class LowerFunction { ~LowerFunction() = default; LowerModule &LM; // Per-module state. + + const clang::TargetInfo &getTarget() const { return Target; } + + // Build ABI/Target-specific function prologue. + LogicalResult buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, + MutableArrayRef Args); + + // Build ABI/Target-specific function epilogue. + LogicalResult buildFunctionEpilog(const LowerFunctionInfo &FI); + + // Parity with CodeGenFunction::GenerateCode. Keep in mind that several + // sections in the original function are focused on codegen unrelated to the + // ABI. Such sections are handled in CIR's codegen, not here. + LogicalResult generateCode(FuncOp oldFn, FuncOp newFn, + const LowerFunctionInfo &FnInfo); + + /// Rewrite a call operation to abide to the ABI calling convention. + LogicalResult rewriteCallOp(CallOp op, + ReturnValueSlot retValSlot = ReturnValueSlot()); + Value rewriteCallOp(FuncType calleeTy, FuncOp origCallee, CallOp callOp, + ReturnValueSlot retValSlot, Value Chain = nullptr); + Value rewriteCallOp(const LowerFunctionInfo &CallInfo, FuncOp Callee, + CallOp Caller, ReturnValueSlot ReturnValue, + CallArgList &CallArgs, CallOp CallOrInvoke, + bool isMustTail, Location loc); + + /// Get an appropriate 'undef' value for the given type. + Value getUndefRValue(Type Ty); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index 4344745f2478..c81335c9985a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -18,6 +18,7 @@ #include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/MissingFeatures.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/Support/TrailingObjects.h" namespace mlir { @@ -36,6 +37,19 @@ class RequiredArgs { RequiredArgs(All_t _) : NumRequired(~0U) {} explicit RequiredArgs(unsigned n) : NumRequired(n) { assert(n != ~0U); } + /// Compute the arguments required by the given formal prototype, + /// given that there may be some additional, non-formal arguments + /// in play. + /// + /// If FD is not null, this will consider pass_object_size params in FD. + static RequiredArgs forPrototypePlus(const FuncType prototype, + unsigned additional) { + if (!prototype.isVarArg()) + return All; + + llvm_unreachable("Variadic function is NYI"); + } + bool allowsOptionalArgs() const { return NumRequired != ~0U; } }; @@ -46,6 +60,8 @@ struct LowerFunctionInfoArgInfo { ::cir::ABIArgInfo info; // ABI-specific information. }; +// FIXME(cir): We could likely encode this information within CIR/MLIR, allowing +// us to eliminate this class. class LowerFunctionInfo final : private llvm::TrailingObjects { @@ -89,7 +105,7 @@ class LowerFunctionInfo final ArrayRef argTypes, RequiredArgs required) { // TODO(cir): Add assertions? - assert(::cir::MissingFeatures::extParamInfo()); + assert(!::cir::MissingFeatures::extParamInfo()); void *buffer = operator new(totalSizeToAlloc(argTypes.size() + 1)); LowerFunctionInfo *FI = new (buffer) LowerFunctionInfo(); @@ -115,10 +131,22 @@ class LowerFunctionInfo final return NumArgs + 1; } + typedef const ArgInfo *const_arg_iterator; + typedef ArgInfo *arg_iterator; + + MutableArrayRef arguments() { + return MutableArrayRef(arg_begin(), NumArgs); + } + + const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; } + const_arg_iterator arg_end() const { return getArgsBuffer() + 1 + NumArgs; } + arg_iterator arg_begin() { return getArgsBuffer() + 1; } + arg_iterator arg_end() { return getArgsBuffer() + 1 + NumArgs; } + unsigned arg_size() const { return NumArgs; } bool isVariadic() const { - assert(::cir::MissingFeatures::variadicFunctions()); + assert(!::cir::MissingFeatures::variadicFunctions()); return false; } unsigned getNumRequiredArgs() const { @@ -126,6 +154,20 @@ class LowerFunctionInfo final llvm_unreachable("NYI"); return arg_size(); } + + Type getReturnType() const { return getArgsBuffer()[0].type; } + + ::cir::ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; } + const ::cir::ABIArgInfo &getReturnInfo() const { + return getArgsBuffer()[0].info; + } + + /// Return the user specified callingconvention, which has been translated + /// into an LLVM CC. + unsigned getCallingConvention() const { return CallingConvention; } + + /// Get the struct type used to represent all the arguments in memory. + StructType getArgStruct() const { return ArgStruct; } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 44ef32a5ddfa..0ec4b589bb41 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -13,6 +13,7 @@ #include "LowerModule.h" #include "CIRLowerContext.h" +#include "LowerFunction.h" #include "TargetInfo.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributes.h" @@ -20,6 +21,8 @@ #include "mlir/Support/LogicalResult.h" #include "llvm/Support/ErrorHandling.h" +using MissingFeatures = ::cir::MissingFeatures; + namespace mlir { namespace cir { @@ -74,17 +77,123 @@ const TargetLoweringInfo &LowerModule::getTargetLoweringInfo() { return *TheTargetCodeGenInfo; } -LogicalResult LowerModule::rewriteGlobalFunctionDefinition(FuncOp op, - LowerModule &state) { +void LowerModule::setCIRFunctionAttributes(FuncOp GD, + const LowerFunctionInfo &Info, + FuncOp F, bool IsThunk) { + unsigned CallingConv; + // NOTE(cir): The method below will update the F function in-place with the + // proper attributes. + constructAttributeList(GD.getName(), Info, GD, F, CallingConv, + /*AttrOnCallSite=*/false, IsThunk); + // TODO(cir): Set Function's calling convention. +} + +/// Set function attributes for a function declaration. +/// +/// This method is based on CodeGenModule::SetFunctionAttributes but it +/// altered to consider only the ABI/Target-related bits. +void LowerModule::setFunctionAttributes(FuncOp oldFn, FuncOp newFn, + bool IsIncompleteFunction, + bool IsThunk) { + + // TODO(cir): There's some special handling from attributes related to LLVM + // intrinsics. Should we do that here as well? + + // Setup target-specific attributes. + if (!IsIncompleteFunction) + setCIRFunctionAttributes(oldFn, getTypes().arrangeGlobalDeclaration(oldFn), + newFn, IsThunk); + + // TODO(cir): Handle attributes for returned "this" objects. + + // NOTE(cir): Skipping some linkage and other global value attributes here as + // it might be better for CIRGen to handle them. + + // TODO(cir): Skipping section attributes here. + + // TODO(cir): Skipping error attributes here. + + // If we plan on emitting this inline builtin, we can't treat it as a builtin. + if (MissingFeatures::funcDeclIsInlineBuiltinDeclaration()) { + llvm_unreachable("NYI"); + } + + if (MissingFeatures::funcDeclIsReplaceableGlobalAllocationFunction()) { + llvm_unreachable("NYI"); + } + + if (MissingFeatures::funcDeclIsCXXConstructorDecl() || + MissingFeatures::funcDeclIsCXXDestructorDecl()) + llvm_unreachable("NYI"); + else if (MissingFeatures::funcDeclIsCXXMethodDecl()) + llvm_unreachable("NYI"); + + // NOTE(cir) Skipping emissions that depend on codegen options, as well as + // sanitizers handling here. Do this in CIRGen. + + if (MissingFeatures::langOpts() && MissingFeatures::openMP()) + llvm_unreachable("NYI"); + + // NOTE(cir): Skipping more things here that depend on codegen options. + + if (MissingFeatures::extParamInfo()) { + llvm_unreachable("NYI"); + } +} + +/// Rewrites an existing function to conform to the ABI. +/// +/// This method is based on CodeGenModule::EmitGlobalFunctionDefinition but it +/// considerably simplified as it tries to remove any CodeGen related code. +LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { mlir::OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint(op); - return failure(); + + // Get ABI/target-specific function information. + const LowerFunctionInfo &FI = this->getTypes().arrangeGlobalDeclaration(op); + + // Get ABI/target-specific function type. + FuncType Ty = this->getTypes().getFunctionType(FI); + + // NOTE(cir): Skipping getAddrOfFunction and getOrCreateCIRFunction methods + // here, as they are mostly codegen logic. + + // Create a new function with the ABI-specific types. + FuncOp newFn = cast(rewriter.cloneWithoutRegions(op)); + newFn.setType(Ty); + + // NOTE(cir): The clone above will preserve any existing attributes. If there + // are high-level attributes that ought to be dropped, do it here. + + // Set up ABI-specific function attributes. + setFunctionAttributes(op, newFn, false, /*IsThunk=*/false); + if (MissingFeatures::extParamInfo()) { + llvm_unreachable("ExtraAttrs are NYI"); + } + + if (LowerFunction(*this, rewriter, op, newFn) + .generateCode(op, newFn, FI) + .failed()) + return failure(); + + // Erase original ABI-agnostic function. + rewriter.eraseOp(op); + return success(); } -LogicalResult LowerModule::rewriteFunctionCall(CallOp caller, FuncOp callee) { +LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, FuncOp funcOp) { mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPoint(caller); - return failure(); + rewriter.setInsertionPoint(callOp); + + // Create a new function with the ABI-specific calling convention. + if (LowerFunction(*this, rewriter, funcOp, callOp) + .rewriteCallOp(callOp) + .failed()) + return failure(); + + // Erase original ABI-agnostic call. + rewriter.eraseOp(callOp); + return success(); } } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index d99d40f90554..74f7ed0bb5ac 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -60,15 +60,35 @@ class LowerModule { // FIXME(cir): This would be in ASTContext, not CodeGenModule. clang::TargetCXXABI::Kind getCXXABIKind() const { auto kind = getTarget().getCXXABI().getKind(); - assert(::cir::MissingFeatures::langOpts()); + assert(!::cir::MissingFeatures::langOpts()); return kind; } + void + constructAttributeList(StringRef Name, const LowerFunctionInfo &FI, + FuncOp CalleeInfo, // TODO(cir): Implement CalleeInfo? + FuncOp newFn, unsigned &CallingConv, + bool AttrOnCallSite, bool IsThunk); + + void setCIRFunctionAttributes(FuncOp GD, const LowerFunctionInfo &Info, + FuncOp F, bool IsThunk); + + /// Set function attributes for a function declaration. + void setFunctionAttributes(FuncOp oldFn, FuncOp newFn, + bool IsIncompleteFunction, bool IsThunk); + + // Create a CIR FuncOp with with the given signature. + FuncOp createCIRFunction( + StringRef MangledName, FuncType Ty, FuncOp D, bool ForVTable, + bool DontDefer = false, bool IsThunk = false, + ArrayRef = {}, // TODO(cir): __attribute__(()) stuff. + bool IsForDefinition = false); + // Rewrite CIR FuncOp to match the target ABI. - LogicalResult rewriteGlobalFunctionDefinition(FuncOp op, LowerModule &state); + LogicalResult rewriteFunctionDefinition(FuncOp op); // Rewrite CIR CallOp to match the target ABI. - LogicalResult rewriteFunctionCall(CallOp caller, FuncOp callee); + LogicalResult rewriteFunctionCall(CallOp callOp, FuncOp funcOp); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index 1186da9df1e7..3d8ca6cfe61f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -12,13 +12,59 @@ //===----------------------------------------------------------------------===// #include "LowerTypes.h" +#include "CIRToCIRArgMapping.h" #include "LowerModule.h" #include "mlir/Support/LLVM.h" +#include "clang/CIR/ABIArgInfo.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" using namespace ::mlir::cir; +unsigned LowerTypes::clangCallConvToLLVMCallConv(clang::CallingConv CC) { + switch (CC) { + case clang::CC_C: + return llvm::CallingConv::C; + default: + llvm_unreachable("calling convention NYI"); + } +} + LowerTypes::LowerTypes(LowerModule &LM, StringRef DLString) - : LM(LM), queries(LM.getContext()), Target(LM.getTarget()), + : LM(LM), context(LM.getContext()), Target(LM.getTarget()), CXXABI(LM.getCXXABI()), TheABIInfo(LM.getTargetLoweringInfo().getABIInfo()), - mlirContext(LM.getMLIRContext()), DL(LM.getModule()) {} + mlirContext(LM.getMLIRContext()), DL(DLString, LM.getModule()) {} + +/// Return the ABI-specific function type for a CIR function type. +FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { + + mlir::Type resultType = {}; + const ::cir::ABIArgInfo &retAI = FI.getReturnInfo(); + switch (retAI.getKind()) { + case ::cir::ABIArgInfo::Ignore: + resultType = VoidType::get(getMLIRContext()); + break; + default: + llvm_unreachable("Missing ABIArgInfo::Kind"); + } + + CIRToCIRArgMapping IRFunctionArgs(getContext(), FI, true); + SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); + + // Add type for sret argument. + assert(!::cir::MissingFeatures::sretArgs()); + + // Add type for inalloca argument. + assert(!::cir::MissingFeatures::inallocaArgs()); + + // Add in all of the required arguments. + unsigned ArgNo = 0; + LowerFunctionInfo::const_arg_iterator it = FI.arg_begin(), + ie = it + FI.getNumRequiredArgs(); + for (; it != ie; ++it, ++ArgNo) { + llvm_unreachable("NYI"); + } + + return FuncType::get(getMLIRContext(), ArgTypes, resultType, FI.isVariadic()); +} diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h index 395665d47f16..44f0d16b1bd8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h @@ -17,8 +17,12 @@ #include "ABIInfo.h" #include "CIRCXXABI.h" #include "CIRLowerContext.h" +#include "LowerCall.h" #include "mlir/IR/MLIRContext.h" +#include "clang/Basic/Specifiers.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/FnInfoOpts.h" namespace mlir { namespace cir { @@ -33,7 +37,7 @@ class LowerTypes { private: LowerModule &LM; - CIRLowerContext &queries; + CIRLowerContext &context; const clang::TargetInfo &Target; CIRCXXABI &CXXABI; @@ -46,11 +50,46 @@ class LowerTypes { ::cir::CIRDataLayout DL; + const ABIInfo &getABIInfo() const { return TheABIInfo; } + public: LowerTypes(LowerModule &LM, StringRef DLString); ~LowerTypes() = default; LowerModule &getLM() const { return LM; } + CIRCXXABI &getCXXABI() const { return CXXABI; } + CIRLowerContext &getContext() { return context; } + MLIRContext *getMLIRContext() { return mlirContext; } + + /// Convert clang calling convention to LLVM callilng convention. + unsigned clangCallConvToLLVMCallConv(clang::CallingConv CC); + + /// Free functions are functions that are compatible with an ordinary + /// C function pointer type. + /// FIXME(cir): Does the "free function" concept makes sense here? + const LowerFunctionInfo &arrangeFunctionDeclaration(FuncOp fnOp); + const LowerFunctionInfo &arrangeFreeFunctionCall(const OperandRange args, + const FuncType fnType, + bool chainCall); + const LowerFunctionInfo &arrangeFreeFunctionType(FuncType FTy); + + const LowerFunctionInfo &arrangeGlobalDeclaration(FuncOp fnOp); + + /// Arrange the argument and result information for an abstract value + /// of a given function type. This is the method which all of the + /// above functions ultimately defer to. + /// + /// \param resultType - ABI-agnostic CIR result type. + /// \param opts - Options to control the arrangement. + /// \param argTypes - ABI-agnostic CIR argument types. + /// \param required - Information about required/optional arguments. + const LowerFunctionInfo &arrangeLLVMFunctionInfo(Type resultType, + ::cir::FnInfoOpts opts, + ArrayRef argTypes, + RequiredArgs required); + + /// Return the ABI-specific function type for a CIR function type. + FuncType getFunctionType(const LowerFunctionInfo &FI); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 6d2a329e6d2a..736f3a7ea301 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -1,27 +1,164 @@ + +#include "clang/CIR/Target/x86.h" #include "ABIInfo.h" +#include "ABIInfoImpl.h" #include "LowerModule.h" #include "LowerTypes.h" #include "TargetInfo.h" +#include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" #include namespace mlir { namespace cir { class X86_64ABIInfo : public ABIInfo { + using Class = ::cir::X86ArgClass; + + /// Determine the x86_64 register classes in which the given type T should be + /// passed. + /// + /// \param Lo - The classification for the parts of the type + /// residing in the low word of the containing object. + /// + /// \param Hi - The classification for the parts of the type + /// residing in the high word of the containing object. + /// + /// \param OffsetBase - The bit offset of this type in the + /// containing object. Some parameters are classified different + /// depending on whether they straddle an eightbyte boundary. + /// + /// \param isNamedArg - Whether the argument in question is a "named" + /// argument, as used in AMD64-ABI 3.5.7. + /// + /// \param IsRegCall - Whether the calling conversion is regcall. + /// + /// If a word is unused its result will be NoClass; if a type should + /// be passed in Memory then at least the classification of \arg Lo + /// will be Memory. + /// + /// The \arg Lo class will be NoClass iff the argument is ignored. + /// + /// If the \arg Lo class is ComplexX87, then the \arg Hi class will + /// also be ComplexX87. + void classify(Type T, uint64_t OffsetBase, Class &Lo, Class &Hi, + bool isNamedArg, bool IsRegCall = false) const; public: X86_64ABIInfo(LowerTypes &CGT, X86AVXABILevel AVXLevel) : ABIInfo(CGT) {} + + ::cir::ABIArgInfo classifyReturnType(Type RetTy) const; + + void computeInfo(LowerFunctionInfo &FI) const override; }; class X86_64TargetLoweringInfo : public TargetLoweringInfo { public: X86_64TargetLoweringInfo(LowerTypes &LM, X86AVXABILevel AVXLevel) : TargetLoweringInfo(std::make_unique(LM, AVXLevel)) { - assert(::cir::MissingFeatures::swift()); + assert(!::cir::MissingFeatures::swift()); } }; +void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, + bool isNamedArg, bool IsRegCall) const { + // FIXME: This code can be simplified by introducing a simple value class + // for Class pairs with appropriate constructor methods for the various + // situations. + + // FIXME: Some of the split computations are wrong; unaligned vectors + // shouldn't be passed in registers for example, so there is no chance they + // can straddle an eightbyte. Verify & simplify. + + Lo = Hi = Class::NoClass; + + Class &Current = OffsetBase < 64 ? Lo : Hi; + Current = Class::Memory; + + // FIXME(cir): There's currently no direct way to identify if a type is a + // builtin. + if (/*isBuitinType=*/true) { + if (Ty.isa()) { + Current = Class::NoClass; + } else { + llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; + llvm_unreachable("NYI"); + } + // FIXME: _Decimal32 and _Decimal64 are SSE. + // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). + return; + } + + llvm::outs() << "Missing X86 classification for non-builtin types\n"; + llvm_unreachable("NYI"); +} + +::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { + // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the + // classification algorithm. + X86_64ABIInfo::Class Lo, Hi; + classify(RetTy, 0, Lo, Hi, true); + + // Check some invariants. + assert((Hi != Class::Memory || Lo == Class::Memory) && + "Invalid memory classification."); + assert((Hi != Class::SSEUp || Lo == Class::SSE) && + "Invalid SSEUp classification."); + + switch (Lo) { + case Class::NoClass: + if (Hi == Class::NoClass) + return ::cir::ABIArgInfo::getIgnore(); + break; + default: + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); +} + +void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { + const unsigned CallingConv = FI.getCallingConvention(); + // It is possible to force Win64 calling convention on any x86_64 target by + // using __attribute__((ms_abi)). In such case to correctly emit Win64 + // compatible code delegate this call to WinX86_64ABIInfo::computeInfo. + if (CallingConv == llvm::CallingConv::Win64) { + llvm_unreachable("Win64 CC is NYI"); + } + + bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; + + // Keep track of the number of assigned registers. + unsigned NeededSSE = 0, MaxVectorWidth = 0; + + if (!::mlir::cir::classifyReturnType(getCXXABI(), FI, *this)) { + if (IsRegCall || ::cir::MissingFeatures::regCall()) { + llvm_unreachable("RegCall is NYI"); + } else + FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); + } + + // If the return value is indirect, then the hidden argument is consuming + // one integer register. + if (FI.getReturnInfo().isIndirect()) + llvm_unreachable("NYI"); + else if (NeededSSE && MaxVectorWidth) + llvm_unreachable("NYI"); + + // The chain argument effectively gives us another free register. + if (::cir::MissingFeatures::chainCall()) + llvm_unreachable("NYI"); + + // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers + // get assigned (in left-to-right order) for passing as follows... + unsigned ArgNo = 0; + for (LowerFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it, ++ArgNo) { + llvm_unreachable("NYI"); + } +} + std::unique_ptr createX86_64TargetLoweringInfo(LowerModule &LM, X86AVXABILevel AVXLevel) { return std::make_unique(LM.getTypes(), AVXLevel); diff --git a/clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp similarity index 69% rename from clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp rename to clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp index e8772b24c3b8..6bb4d71d4877 100644 --- a/clang/test/CIR/Transforms/Target/x86/x86-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp @@ -1,5 +1,8 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// Just check if the pass is called for now. -// CHECK: module +// CHECK: @_Z4Voidv() +void Void(void) { +// CHECK: cir.call @_Z4Voidv() : () -> () + Void(); +} From 38ccd14146ab2b3ee2e27295ad362189e7f9e206 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 21 Jun 2024 02:07:57 +0800 Subject: [PATCH 1643/2301] [CIR] Add support for unary fp2int builtins (#669) This PR adds new ops, CIRGen, and LLVM lowering support for the following unary fp2int builtins and libc functions: - `__builtin_lround` family of builtins and `lround` family of libc functions; - `__builtin_llround` family of builtins and `llround` family of libc functions; - `__builtin_lrint` family of builtins and `lrint` family of libc functions; - `__builtin_llrint` family of builtins and `llrint` family of libc functions. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 19 ++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 25 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 39 ++- .../test/CIR/CodeGen/builtin-floating-point.c | 272 ++++++++++++++++++ 4 files changed, 340 insertions(+), 15 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 03fc98619226..44724c079d09 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3296,6 +3296,25 @@ def IterEndOp : CIR_Op<"iterator_end"> { // Floating Point Ops //===----------------------------------------------------------------------===// +class UnaryFPToIntBuiltinOp : CIR_Op { + let arguments = (ins CIR_AnyFloat:$src); + let results = (outs CIR_IntType:$result); + + let summary = [{ + Builtin function that takes a floating-point value as input and produces an + integral value as output. + }]; + + let assemblyFormat = [{ + $src `:` type($src) `->` type($result) attr-dict + }]; +} + +def LroundOp : UnaryFPToIntBuiltinOp<"lround">; +def LLroundOp : UnaryFPToIntBuiltinOp<"llround">; +def LrintOp : UnaryFPToIntBuiltinOp<"lrint">; +def LLrintOp : UnaryFPToIntBuiltinOp<"llrint">; + class UnaryFPToFPBuiltinOp : CIR_Op { let arguments = (ins CIR_AnyFloat:$src); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 2bdc7ed40633..058fa54ecb04 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -58,6 +58,19 @@ static RValue buildUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { return RValue::get(Call->getResult(0)); } +template +static RValue buildUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &CGF, + const CallExpr &E) { + auto ResultType = CGF.ConvertType(E.getType()); + auto Src = CGF.buildScalarExpr(E.getArg(0)); + + if (CGF.getBuilder().getIsFPConstrained()) + llvm_unreachable("constraint FP operations are NYI"); + + auto Call = CGF.getBuilder().create(Src.getLoc(), ResultType, Src); + return RValue::get(Call->getResult(0)); +} + template static RValue buildBinaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { auto Arg0 = CGF.buildScalarExpr(E.getArg(0)); @@ -636,6 +649,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_lround: case Builtin::BI__builtin_lroundf: case Builtin::BI__builtin_lroundl: + return buildUnaryMaybeConstrainedFPToIntBuiltin( + *this, *E); + case Builtin::BI__builtin_lroundf128: llvm_unreachable("NYI"); @@ -645,6 +661,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_llround: case Builtin::BI__builtin_llroundf: case Builtin::BI__builtin_llroundl: + return buildUnaryMaybeConstrainedFPToIntBuiltin( + *this, *E); + case Builtin::BI__builtin_llroundf128: llvm_unreachable("NYI"); @@ -654,6 +673,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_lrint: case Builtin::BI__builtin_lrintf: case Builtin::BI__builtin_lrintl: + return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, + *E); + case Builtin::BI__builtin_lrintf128: llvm_unreachable("NYI"); @@ -663,6 +685,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_llrint: case Builtin::BI__builtin_llrintf: case Builtin::BI__builtin_llrintl: + return buildUnaryMaybeConstrainedFPToIntBuiltin( + *this, *E); + case Builtin::BI__builtin_llrintf128: llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 572065a00222..4a5b63569f03 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3220,8 +3220,7 @@ class CIRCmpThreeWayOpLowering }; template -class CIRUnaryFPToFPBuiltinOpLowering - : public mlir::OpConversionPattern { +class CIRUnaryFPBuiltinOpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -3236,20 +3235,29 @@ class CIRUnaryFPToFPBuiltinOpLowering }; using CIRCeilOpLowering = - CIRUnaryFPToFPBuiltinOpLowering; + CIRUnaryFPBuiltinOpLowering; using CIRFloorOpLowering = - CIRUnaryFPToFPBuiltinOpLowering; + CIRUnaryFPBuiltinOpLowering; using CIRFabsOpLowering = - CIRUnaryFPToFPBuiltinOpLowering; + CIRUnaryFPBuiltinOpLowering; using CIRNearbyintOpLowering = - CIRUnaryFPToFPBuiltinOpLowering; + CIRUnaryFPBuiltinOpLowering; using CIRRintOpLowering = - CIRUnaryFPToFPBuiltinOpLowering; + CIRUnaryFPBuiltinOpLowering; using CIRRoundOpLowering = - CIRUnaryFPToFPBuiltinOpLowering; + CIRUnaryFPBuiltinOpLowering; using CIRTruncOpLowering = - CIRUnaryFPToFPBuiltinOpLowering; + CIRUnaryFPBuiltinOpLowering; + +using CIRLroundOpLowering = + CIRUnaryFPBuiltinOpLowering; +using CIRLLroundOpLowering = + CIRUnaryFPBuiltinOpLowering; +using CIRLrintOpLowering = + CIRUnaryFPBuiltinOpLowering; +using CIRLLrintOpLowering = + CIRUnaryFPBuiltinOpLowering; template class CIRBinaryFPToFPBuiltinOpLowering @@ -3300,11 +3308,12 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, - CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, CIRCeilOpLowering, - CIRFloorOpLowering, CIRFAbsOpLowering, CIRNearbyintOpLowering, - CIRRintOpLowering, CIRRoundOpLowering, CIRTruncOpLowering, - CIRCopysignOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering>( - converter, patterns.getContext()); + CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, CIRLroundOpLowering, + CIRLLroundOpLowering, CIRLrintOpLowering, CIRLLrintOpLowering, + CIRCeilOpLowering, CIRFloorOpLowering, CIRFAbsOpLowering, + CIRNearbyintOpLowering, CIRRintOpLowering, CIRRoundOpLowering, + CIRTruncOpLowering, CIRCopysignOpLowering, CIRFMaxOpLowering, + CIRFMinOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/builtin-floating-point.c b/clang/test/CIR/CodeGen/builtin-floating-point.c index 329cbea8fc7c..51e7a1a6a6ef 100644 --- a/clang/test/CIR/CodeGen/builtin-floating-point.c +++ b/clang/test/CIR/CodeGen/builtin-floating-point.c @@ -3,6 +3,278 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -ffast-math -fclangir -emit-llvm -o %t.ll %s // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM +// lround + +long my_lroundf(float f) { + return __builtin_lroundf(f); + // CHECK: cir.func @my_lroundf + // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.float -> !s64i + + // LLVM: define i64 @my_lroundf + // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f32(float %{{.+}}) + // LLVM: } +} + +long my_lround(double f) { + return __builtin_lround(f); + // CHECK: cir.func @my_lround + // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.double -> !s64i + + // LLVM: define i64 @my_lround + // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f64(double %{{.+}}) + // LLVM: } +} + +long my_lroundl(long double f) { + return __builtin_lroundl(f); + // CHECK: cir.func @my_lroundl + // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.long_double -> !s64i + // AARCH64: %{{.+}} = cir.lround %{{.+}} : !cir.long_double -> !s64i + + // LLVM: define i64 @my_lroundl + // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f80(x86_fp80 %{{.+}}) + // LLVM: } +} + +long lroundf(float); +long lround(double); +long lroundl(long double); + +long call_lroundf(float f) { + return lroundf(f); + // CHECK: cir.func @call_lroundf + // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.float -> !s64i + + // LLVM: define i64 @call_lroundf + // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f32(float %{{.+}}) + // LLVM: } +} + +long call_lround(double f) { + return lround(f); + // CHECK: cir.func @call_lround + // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.double -> !s64i + + // LLVM: define i64 @call_lround + // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f64(double %{{.+}}) + // LLVM: } +} + +long call_lroundl(long double f) { + return lroundl(f); + // CHECK: cir.func @call_lroundl + // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.long_double -> !s64i + // AARCH64: %{{.+}} = cir.lround %{{.+}} : !cir.long_double -> !s64i + + // LLVM: define i64 @call_lroundl + // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f80(x86_fp80 %{{.+}}) + // LLVM: } +} + +// llround + +long long my_llroundf(float f) { + return __builtin_llroundf(f); + // CHECK: cir.func @my_llroundf + // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.float -> !s64i + + // LLVM: define i64 @my_llroundf + // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f32(float %{{.+}}) + // LLVM: } +} + +long long my_llround(double f) { + return __builtin_llround(f); + // CHECK: cir.func @my_llround + // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.double -> !s64i + + // LLVM: define i64 @my_llround + // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f64(double %{{.+}}) + // LLVM: } +} + +long long my_llroundl(long double f) { + return __builtin_llroundl(f); + // CHECK: cir.func @my_llroundl + // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.long_double -> !s64i + // AARCH64: %{{.+}} = cir.llround %{{.+}} : !cir.long_double -> !s64i + + // LLVM: define i64 @my_llroundl + // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f80(x86_fp80 %{{.+}}) + // LLVM: } +} + +long long llroundf(float); +long long llround(double); +long long llroundl(long double); + +long long call_llroundf(float f) { + return llroundf(f); + // CHECK: cir.func @call_llroundf + // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.float -> !s64i + + // LLVM: define i64 @call_llroundf + // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f32(float %{{.+}}) + // LLVM: } +} + +long long call_llround(double f) { + return llround(f); + // CHECK: cir.func @call_llround + // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.double -> !s64i + + // LLVM: define i64 @call_llround + // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f64(double %{{.+}}) + // LLVM: } +} + +long long call_llroundl(long double f) { + return llroundl(f); + // CHECK: cir.func @call_llroundl + // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.long_double -> !s64i + // AARCH64: %{{.+}} = cir.llround %{{.+}} : !cir.long_double -> !s64i + + // LLVM: define i64 @call_llroundl + // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f80(x86_fp80 %{{.+}}) + // LLVM: } +} + +// lrint + +long my_lrintf(float f) { + return __builtin_lrintf(f); + // CHECK: cir.func @my_lrintf + // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.float -> !s64i + + // LLVM: define i64 @my_lrintf + // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f32(float %{{.+}}) + // LLVM: } +} + +long my_lrint(double f) { + return __builtin_lrint(f); + // CHECK: cir.func @my_lrint + // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.double -> !s64i + + // LLVM: define i64 @my_lrint + // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f64(double %{{.+}}) + // LLVM: } +} + +long my_lrintl(long double f) { + return __builtin_lrintl(f); + // CHECK: cir.func @my_lrintl + // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.long_double -> !s64i + // AARCH64: %{{.+}} = cir.lrint %{{.+}} : !cir.long_double -> !s64i + + // LLVM: define i64 @my_lrintl + // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f80(x86_fp80 %{{.+}}) + // LLVM: } +} + +long lrintf(float); +long lrint(double); +long lrintl(long double); + +long call_lrintf(float f) { + return lrintf(f); + // CHECK: cir.func @call_lrintf + // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.float -> !s64i + + // LLVM: define i64 @call_lrintf + // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f32(float %{{.+}}) + // LLVM: } +} + +long call_lrint(double f) { + return lrint(f); + // CHECK: cir.func @call_lrint + // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.double -> !s64i + + // LLVM: define i64 @call_lrint + // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f64(double %{{.+}}) + // LLVM: } +} + +long call_lrintl(long double f) { + return lrintl(f); + // CHECK: cir.func @call_lrintl + // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.long_double -> !s64i + // AARCH64: %{{.+}} = cir.lrint %{{.+}} : !cir.long_double -> !s64i + + // LLVM: define i64 @call_lrintl + // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f80(x86_fp80 %{{.+}}) + // LLVM: } +} + +// llrint + +long long my_llrintf(float f) { + return __builtin_llrintf(f); + // CHECK: cir.func @my_llrintf + // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.float -> !s64i + + // LLVM: define i64 @my_llrintf + // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f32(float %{{.+}}) + // LLVM: } +} + +long long my_llrint(double f) { + return __builtin_llrint(f); + // CHECK: cir.func @my_llrint + // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.double -> !s64i + + // LLVM: define i64 @my_llrint + // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f64(double %{{.+}}) + // LLVM: } +} + +long long my_llrintl(long double f) { + return __builtin_llrintl(f); + // CHECK: cir.func @my_llrintl + // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.long_double -> !s64i + // AARCH64: %{{.+}} = cir.llrint %{{.+}} : !cir.long_double -> !s64i + + // LLVM: define i64 @my_llrintl + // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f80(x86_fp80 %{{.+}}) + // LLVM: } +} + +long long llrintf(float); +long long llrint(double); +long long llrintl(long double); + +long long call_llrintf(float f) { + return llrintf(f); + // CHECK: cir.func @call_llrintf + // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.float -> !s64i + + // LLVM: define i64 @call_llrintf + // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f32(float %{{.+}}) + // LLVM: } +} + +long long call_llrint(double f) { + return llrint(f); + // CHECK: cir.func @call_llrint + // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.double -> !s64i + + // LLVM: define i64 @call_llrint + // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f64(double %{{.+}}) + // LLVM: } +} + +long long call_llrintl(long double f) { + return llrintl(f); + // CHECK: cir.func @call_llrintl + // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.long_double -> !s64i + // AARCH64: %{{.+}} = cir.llrint %{{.+}} : !cir.long_double -> !s64i + + // LLVM: define i64 @call_llrintl + // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f80(x86_fp80 %{{.+}}) + // LLVM: } +} + // ceil float my_ceilf(float f) { From 96bd84e91cbf16c5d3087b1839465a4750cb62f6 Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 21 Jun 2024 05:37:14 +0800 Subject: [PATCH 1644/2301] [CIR][CodeGen] Basic skeleton of SPIRV64 target support (#671) * SPIRV64 TargetInfo * Calling convention `SPIR_KERNEL` * Minimal ABI with Direct/Extend for arguments and Ignore for return --------- Co-authored-by: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 10 +- clang/lib/CIR/CodeGen/CallingConv.h | 6 + clang/lib/CIR/CodeGen/TargetInfo.cpp | 133 +++++++++++++++++- clang/lib/CIR/CodeGen/TargetInfo.h | 15 ++ .../CIR/CodeGen/OpenCL/addrspace-alloca.cl | 25 ++++ clang/test/CIR/CodeGen/OpenCL/spirv-target.cl | 29 ++++ 8 files changed, 214 insertions(+), 13 deletions(-) create mode 100644 clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl create mode 100644 clang/test/CIR/CodeGen/OpenCL/spirv-target.cl diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 87878ad1c723..b19fd67fcee0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -36,10 +36,10 @@ CIRGenFunction::AutoVarEmission CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, mlir::OpBuilder::InsertPoint ip) { QualType Ty = D.getType(); - // TODO: (|| Ty.getAddressSpace() == LangAS::opencl_private && - // getLangOpts().OpenCL)) assert(!MissingFeatures::openCL()); - assert(Ty.getAddressSpace() == LangAS::Default); + assert( + Ty.getAddressSpace() == LangAS::Default || + (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL)); assert(!D.hasAttr() && "not implemented"); auto loc = getLoc(D.getSourceRange()); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index f090fc71b86c..84b95fc3767b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -993,7 +993,8 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm_unreachable("NYI"); if (FD && getLangOpts().OpenCL) { - llvm_unreachable("NYI"); + // TODO(cir): Emit OpenCL kernel metadata + assert(!MissingFeatures::openCL()); } // If we are checking function types, emit a function type signature as diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 3eb2b1b455e3..d470e2080fcc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -25,8 +25,14 @@ using namespace clang; using namespace cir; unsigned CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { - assert(CC == CC_C && "No other calling conventions implemented."); - return cir::CallingConv::C; + switch (CC) { + case CC_C: + return cir::CallingConv::C; + case CC_OpenCLKernel: + return CGM.getTargetCIRGenInfo().getOpenCLKernelCallingConv(); + default: + llvm_unreachable("No other calling conventions implemented."); + } } CIRGenTypes::CIRGenTypes(CIRGenModule &cgm) diff --git a/clang/lib/CIR/CodeGen/CallingConv.h b/clang/lib/CIR/CodeGen/CallingConv.h index e6b41cdb550c..2f7a5d270c24 100644 --- a/clang/lib/CIR/CodeGen/CallingConv.h +++ b/clang/lib/CIR/CodeGen/CallingConv.h @@ -34,6 +34,12 @@ enum { /// with typical C calling conventions, the callee/caller have to tolerate /// certain amounts of prototype mismatch. C = 0, + + /// Used for SPIR kernel functions. Inherits the restrictions of SPIR_FUNC, + /// except it cannot have non-void return values, it cannot have variable + /// arguments, it can also be called by the host or it is externally + /// visible. + SPIR_KERNEL = 76, }; } // namespace CallingConv diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 2973a6ce70d3..617073bd0230 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -20,6 +20,68 @@ static bool testIfIsVoidTy(QualType Ty) { return k == BuiltinType::Void; } +static bool isAggregateTypeForABI(QualType T) { + return !CIRGenFunction::hasScalarEvaluationKind(T) || + T->isMemberFunctionPointerType(); +} + +/// Pass transparent unions as if they were the type of the first element. Sema +/// should ensure that all elements of the union have the same "machine type". +static QualType useFirstFieldIfTransparentUnion(QualType Ty) { + assert(!Ty->getAsUnionType() && "NYI"); + return Ty; +} + +namespace { + +/// The default implementation for ABI specific +/// details. This implementation provides information which results in +/// self-consistent and sensible LLVM IR generation, but does not +/// conform to any particular ABI. +class DefaultABIInfo : public ABIInfo { +public: + DefaultABIInfo(CIRGenTypes &CGT) : ABIInfo(CGT) {} + + virtual ~DefaultABIInfo() = default; + + ABIArgInfo classifyReturnType(QualType RetTy) const { + if (RetTy->isVoidType()) + return ABIArgInfo::getIgnore(); + + llvm_unreachable("Non-void return type NYI"); + } + + ABIArgInfo classifyArgumentType(QualType Ty) const { + Ty = useFirstFieldIfTransparentUnion(Ty); + + if (isAggregateTypeForABI(Ty)) { + llvm_unreachable("NYI"); + } + + // Treat an enum type as its underlying type. + if (const EnumType *EnumTy = Ty->getAs()) + llvm_unreachable("NYI"); + + ASTContext &Context = getContext(); + if (const auto *EIT = Ty->getAs()) + llvm_unreachable("NYI"); + + if (isPromotableIntegerTypeForABI(Ty)) { + llvm_unreachable("ArgInfo integer extend NYI"); + } else { + return ABIArgInfo::getDirect(); + } + } + + void computeInfo(CIRGenFunctionInfo &FI) const override { + if (!getCXXABI().classifyReturnType(FI)) + FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); + for (auto &I : FI.arguments()) + I.info = classifyArgumentType(I.type); + } +}; +} // namespace + //===----------------------------------------------------------------------===// // AArch64 ABI Implementation //===----------------------------------------------------------------------===// @@ -151,6 +213,66 @@ class X86_64TargetCIRGenInfo : public TargetCIRGenInfo { }; } // namespace +//===----------------------------------------------------------------------===// +// Base ABI and target codegen info implementation common between SPIR and +// SPIR-V. +//===----------------------------------------------------------------------===// + +namespace { +class CommonSPIRABIInfo : public DefaultABIInfo { +public: + CommonSPIRABIInfo(CIRGenTypes &CGT) : DefaultABIInfo(CGT) {} +}; + +class SPIRVABIInfo : public CommonSPIRABIInfo { +public: + SPIRVABIInfo(CIRGenTypes &CGT) : CommonSPIRABIInfo(CGT) {} + void computeInfo(CIRGenFunctionInfo &FI) const override { + // The logic is same as in DefaultABIInfo with an exception on the kernel + // arguments handling. + llvm::CallingConv::ID CC = FI.getCallingConvention(); + + bool cxxabiHit = getCXXABI().classifyReturnType(FI); + assert(!cxxabiHit && "C++ ABI not considered"); + + FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); + + for (auto &I : FI.arguments()) { + if (CC == llvm::CallingConv::SPIR_KERNEL) { + I.info = classifyKernelArgumentType(I.type); + } else { + I.info = classifyArgumentType(I.type); + } + } + } + +private: + ABIArgInfo classifyKernelArgumentType(QualType Ty) const { + assert(!getContext().getLangOpts().CUDAIsDevice && "NYI"); + return classifyArgumentType(Ty); + } +}; +} // namespace +namespace { + +class CommonSPIRTargetCIRGenInfo : public TargetCIRGenInfo { +public: + CommonSPIRTargetCIRGenInfo(std::unique_ptr ABIInfo) + : TargetCIRGenInfo(std::move(ABIInfo)) {} + + unsigned getOpenCLKernelCallingConv() const override { + return llvm::CallingConv::SPIR_KERNEL; + } +}; + +class SPIRVTargetCIRGenInfo : public CommonSPIRTargetCIRGenInfo { +public: + SPIRVTargetCIRGenInfo(CIRGenTypes &CGT) + : CommonSPIRTargetCIRGenInfo(std::make_unique(CGT)) {} +}; + +} // namespace + // TODO(cir): remove the attribute once this gets used. LLVM_ATTRIBUTE_UNUSED static bool classifyReturnType(const CIRGenCXXABI &CXXABI, @@ -189,13 +311,6 @@ void X86_64ABIInfo::computeInfo(CIRGenFunctionInfo &FI) const { FI.getReturnInfo() = ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); } -/// Pass transparent unions as if they were the type of the first element. Sema -/// should ensure that all elements of the union have the same "machine type". -static QualType useFirstFieldIfTransparentUnion(QualType Ty) { - assert(!Ty->getAsUnionType() && "NYI"); - return Ty; -} - /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in /// an 8-byte GPR. This means that we either have a scalar or we are talking /// about the high or low part of an up-to-16-byte struct. This routine picks @@ -459,5 +574,9 @@ const TargetCIRGenInfo &CIRGenModule::getTargetCIRGenInfo() { return SetCIRGenInfo(new X86_64TargetCIRGenInfo(genTypes, AVXLevel)); } } + + case llvm::Triple::spirv64: { + return SetCIRGenInfo(new SPIRVTargetCIRGenInfo(genTypes)); + } } } diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index ca84cac72ed8..395ef52ab4c8 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -78,6 +78,21 @@ class TargetCIRGenInfo { mlir::Type DestTy, bool IsNonNull = false) const; + /// Get LLVM calling convention for OpenCL kernel. + virtual unsigned getOpenCLKernelCallingConv() const { + // OpenCL kernels are called via an explicit runtime API with arguments + // set with clSetKernelArg(), not as normal sub-functions. + // Return SPIR_KERNEL by default as the kernel calling convention to + // ensure the fingerprint is fixed such way that each OpenCL argument + // gets one matching argument in the produced kernel function argument + // list to enable feasible implementation of clSetKernelArg() with + // aggregates etc. In case we would use the default C calling conv here, + // clSetKernelArg() might break depending on the target-specific + // conventions; different targets might split structs passed as values + // to multiple function arguments etc. + return llvm::CallingConv::SPIR_KERNEL; + } + virtual ~TargetCIRGenInfo() {} }; diff --git a/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl new file mode 100644 index 000000000000..baa14874765b --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM + +// CIR: cir.func @func(%arg0: !cir.ptr +// LLVM: @func(ptr addrspace(3) +kernel void func(local int *p) { + // CIR-NEXT: %[[#ALLOCA_P:]] = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} + // LLVM-NEXT: %[[#ALLOCA_P:]] = alloca ptr addrspace(3), i64 1, align 8 + + int x; + // CIR-NEXT: %[[#ALLOCA_X:]] = cir.alloca !s32i, !cir.ptr, ["x"] {alignment = 4 : i64} + // LLVM-NEXT: %[[#ALLOCA_X:]] = alloca i32, i64 1, align 4 + + global char *b; + // CIR-NEXT: %[[#ALLOCA_B:]] = cir.alloca !cir.ptr, !cir.ptr>, ["b"] {alignment = 8 : i64} + // LLVM-NEXT: %[[#ALLOCA_B:]] = alloca ptr addrspace(1), i64 1, align 8 + + // Store of the argument `p` + // CIR-NEXT: cir.store %arg0, %[[#ALLOCA_P]] : !cir.ptr, !cir.ptr> + // LLVM-NEXT: store ptr addrspace(3) %{{[0-9]+}}, ptr %[[#ALLOCA_P]], align 8 + + return; +} diff --git a/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl new file mode 100644 index 000000000000..44c59fc3534c --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl @@ -0,0 +1,29 @@ +// See also: clang/test/CodeGenOpenCL/spirv_target.cl +// RUN: %clang_cc1 -cl-std=CL3.0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t_64.cir +// RUN: FileCheck --input-file=%t_64.cir %s --check-prefix=CIR-SPIRV64 +// RUN: %clang_cc1 -cl-std=CL3.0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t_64.ll +// RUN: FileCheck --input-file=%t_64.ll %s --check-prefix=LLVM-SPIRV64 + +// CIR-SPIRV64: cir.triple = "spirv64-unknown-unknown" +// LLVM-SPIRV64: target triple = "spirv64-unknown-unknown" + +typedef struct { + char c; + void *v; + void *v2; +} my_st; + +// CIR-SPIRV64: cir.func @func( +// LLVM-SPIRV64: @func( +kernel void func(global long *arg) { + int res1[sizeof(my_st) == 24 ? 1 : -1]; // expected-no-diagnostics + int res2[sizeof(void *) == 8 ? 1 : -1]; // expected-no-diagnostics + int res3[sizeof(arg) == 8 ? 1 : -1]; // expected-no-diagnostics + + my_st *tmp = 0; + + // LLVM-SPIRV64: store i64 8, ptr addrspace(1) + arg[0] = (long)(&tmp->v); + // LLVM-SPIRV64: store i64 16, ptr addrspace(1) + arg[1] = (long)(&tmp->v2); +} From e30a2055deea1901525cb35a0b92fedbeb64c182 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 21 Jun 2024 10:47:38 -0700 Subject: [PATCH 1645/2301] [CIR] Update region-simplify usage to enum value --- clang/test/CIR/Lowering/goto.cir | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index 0050e5393ed3..3263359714aa 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -1,10 +1,9 @@ -// RUN: cir-opt %s --pass-pipeline='builtin.module(cir-to-llvm,canonicalize{region-simplify=false})' -o - | FileCheck %s -check-prefix=MLIR -// XFAIL: * +// RUN: cir-opt %s --pass-pipeline='builtin.module(cir-to-llvm,canonicalize{region-simplify=disabled})' -o - | FileCheck %s -check-prefix=MLIR !s32i = !cir.int module { - + cir.func @gotoFromIf(%arg0: !s32i) -> !s32i { %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} From 352c73e9b2606cbad0c44d5d28b08720ac1824b4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 21 Jun 2024 10:48:56 -0700 Subject: [PATCH 1646/2301] [CIR] Fix erroneous flag passed to cc1 in a test invocation --- clang/test/CIR/CodeGen/address-space.c | 3 +-- clang/test/CIR/CodeGen/attributes.c | 3 +-- clang/test/CIR/CodeGen/compound-literal.c | 5 ++--- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/clang/test/CIR/CodeGen/address-space.c b/clang/test/CIR/CodeGen/address-space.c index 047aa25bbcc7..c743b2b723d6 100644 --- a/clang/test/CIR/CodeGen/address-space.c +++ b/clang/test/CIR/CodeGen/address-space.c @@ -1,8 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// XFAIL: * // CIR: cir.func {{@.*foo.*}}(%arg0: !cir.ptr // LLVM: define void @foo(ptr addrspace(1) %0) diff --git a/clang/test/CIR/CodeGen/attributes.c b/clang/test/CIR/CodeGen/attributes.c index 67b625c11520..71c018b081ed 100644 --- a/clang/test/CIR/CodeGen/attributes.c +++ b/clang/test/CIR/CodeGen/attributes.c @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -S -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM -// XFAIL: * +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM extern int __attribute__((section(".shared"))) ext; int getExt() { diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c index 62f15826dd64..f0ee805dda44 100644 --- a/clang/test/CIR/CodeGen/compound-literal.c +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -1,8 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -S -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// XFAIL: * typedef struct { @@ -104,4 +103,4 @@ void split_large_page(unsigned long addr, pgprot_t prot) // CHECK: call void @llvm.memcpy.p0.p0.i32(ptr %[[ADDR]], ptr {{.*}}, i32 8, i1 false) // CHECK: br label %[[EXIT]] // CHECK: [[EXIT]]: -// CHECK: ret void \ No newline at end of file +// CHECK: ret void From e6741a93dd0e6c6d39ec77f0b0f5b9f54b9c7727 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 21 Jun 2024 12:46:20 -0700 Subject: [PATCH 1647/2301] [CIR] Account for changes to gep lowering that now uses i64 --- clang/test/CIR/CodeGen/var-arg-float.c | 7 +++---- clang/test/CIR/CodeGen/var-arg-scope.c | 3 +-- clang/test/CIR/CodeGen/var-arg.c | 7 +++---- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/clang/test/CIR/CodeGen/var-arg-float.c b/clang/test/CIR/CodeGen/var-arg-float.c index 10c950e86226..5dfbcd9ce12a 100644 --- a/clang/test/CIR/CodeGen/var-arg-float.c +++ b/clang/test/CIR/CodeGen/var-arg-float.c @@ -2,7 +2,6 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// XFAIL: * #include @@ -14,7 +13,7 @@ double f1(int n, ...) { return res; } -// BEFORE: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !cir.int, !cir.int} +// BEFORE: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} // BEFORE: cir.func @f1(%arg0: !s32i, ...) -> !cir.double // BEFORE: [[RETP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["__retval"] // BEFORE: [[RESP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["res", init] @@ -28,7 +27,7 @@ double f1(int n, ...) { // BEFORE: cir.return [[RETV]] : !cir.double // beginning block cir code -// AFTER: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !cir.int, !cir.int} +// AFTER: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} // AFTER: cir.func @f1(%arg0: !s32i, ...) -> !cir.double // AFTER: [[RETP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["__retval"] // AFTER: [[RESP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["res", init] @@ -104,7 +103,7 @@ double f1(int n, ...) { // LLVM: [[BB_ON_STACK]]: ; // LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0, // LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8, -// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i32 8, +// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i64 8, // LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8, // LLVM-NEXT: br label %[[BB_END]], diff --git a/clang/test/CIR/CodeGen/var-arg-scope.c b/clang/test/CIR/CodeGen/var-arg-scope.c index 9fa92b77b276..8a993a9bc1ce 100644 --- a/clang/test/CIR/CodeGen/var-arg-scope.c +++ b/clang/test/CIR/CodeGen/var-arg-scope.c @@ -2,7 +2,6 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// XFAIL: * void f1(__builtin_va_list c) { { __builtin_va_arg(c, void *); } @@ -90,7 +89,7 @@ void f1(__builtin_va_list c) { // LLVM: [[BB_ON_STACK]]: ; // LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0, // LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8, -// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i32 8, +// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i64 8, // LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8, // LLVM-NEXT: br label %[[BB_END]], diff --git a/clang/test/CIR/CodeGen/var-arg.c b/clang/test/CIR/CodeGen/var-arg.c index 7987237025a6..34b3705f111c 100644 --- a/clang/test/CIR/CodeGen/var-arg.c +++ b/clang/test/CIR/CodeGen/var-arg.c @@ -2,7 +2,6 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// XFAIL: * #include @@ -14,7 +13,7 @@ int f1(int n, ...) { return res; } -// BEFORE: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !cir.int, !cir.int} +// BEFORE: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} // BEFORE: cir.func @f1(%arg0: !s32i, ...) -> !s32i // BEFORE: [[RETP:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] // BEFORE: [[RESP:%.*]] = cir.alloca !s32i, !cir.ptr, ["res", init] @@ -27,7 +26,7 @@ int f1(int n, ...) { // BEFORE: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !s32i // BEFORE: cir.return [[RETV]] : !s32i -// AFTER: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !cir.int, !cir.int} +// AFTER: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} // AFTER: cir.func @f1(%arg0: !s32i, ...) -> !s32i // AFTER: [[RETP:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] // AFTER: [[RESP:%.*]] = cir.alloca !s32i, !cir.ptr, ["res", init] @@ -107,7 +106,7 @@ int f1(int n, ...) { // LLVM: [[BB_ON_STACK]]: ; // LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0, // LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8, -// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i32 8, +// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i64 8, // LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8, // LLVM-NEXT: br label %[[BB_END]], From 3aff342455b418be16926e2f0bcb4088710e6b72 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 21 Jun 2024 12:46:47 -0700 Subject: [PATCH 1648/2301] [CIR] Account for change to clang behavior to emit ZdlPvm instead of ZdlPV clang's choice of delete here changes from just hte pointer version to the sized version. Change the test to do the same --- clang/test/CIR/CodeGen/dtors.cpp | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index 5c34604a623a..b0db1d1cdcda 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -1,7 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// XFAIL: * - enum class EFMode { Always, Verbose }; @@ -38,10 +36,10 @@ class B : public A }; // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.record.decl.ast> // Class B -// CHECK: ![[ClassB:ty_.*]] = !cir.struct ()>>>} #cir.record.decl.ast>}> +// CHECK: ![[ClassB:ty_.*]] = !cir.struct // CHECK: cir.func @_Z4bluev() // CHECK: %0 = cir.alloca !ty_22PSEvent22, !cir.ptr, ["p", init] {alignment = 8 : i64} @@ -63,7 +61,7 @@ class B : public A // CHECK: cir.call @_ZN1BD2Ev(%0) : (!cir.ptr) -> () // operator delete(void*) declaration -// CHECK: cir.func private @_ZdlPv(!cir.ptr) +// CHECK: cir.func private @_ZdlPvm(!cir.ptr, !u64i) // B dtor => @B::~B() #2 // Calls dtor #1 @@ -75,7 +73,7 @@ class B : public A // CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: cir.call @_ZN1BD2Ev(%1) : (!cir.ptr) -> () // CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr -// CHECK: cir.call @_ZdlPv(%2) : (!cir.ptr) -> () +// CHECK: cir.call @_ZdlPvm(%2, %3) : (!cir.ptr, !u64i) -> () // CHECK: cir.return // CHECK: } From d7bc73c15ec67f89bf7e60d96e0ce004e3dcd247 Mon Sep 17 00:00:00 2001 From: Krito Date: Fri, 21 Jun 2024 05:49:35 +0800 Subject: [PATCH 1649/2301] [CIR][ThroughMLIR] fix BinOp, CmpOp Lowering to MLIR and lowering cir.vec.cmp to MLIR (#694) This PR does Three things: 1. Fixes the BinOp lowering to MLIR issue where signed numbers were not handled correctly, and adds support for vector types. The corresponding test files have been modified. 2. Fixes the CmpOp lowering to MLIR issue where signed numbers were not handled correctly And modified test files. 3. Adds cir.vec.cmp lowering to MLIR along with the corresponding test files. I originally planned to complete the remaining cir.vec.* lowerings in this PR, but it seems there's quite a lot to do, so I'll split it into multiple PRs. --------- Co-authored-by: Kritoooo --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 243 ++++++------------ .../Lowering/ThroughMLIR/LowerToMLIRHelpers.h | 43 ++++ .../CIR/Lowering/ThroughMLIR/binop-fp.cir | 68 ----- .../ThroughMLIR/binop-unsigned-int.cir | 78 ------ clang/test/CIR/Lowering/ThroughMLIR/binop.cpp | 77 ++++++ clang/test/CIR/Lowering/ThroughMLIR/cmp.cir | 76 ------ clang/test/CIR/Lowering/ThroughMLIR/cmp.cpp | 185 +++++++++++++ clang/test/CIR/Lowering/ThroughMLIR/if.c | 8 +- .../test/CIR/Lowering/ThroughMLIR/tenary.cir | 6 +- .../test/CIR/Lowering/ThroughMLIR/vectype.cpp | 124 +++++++++ clang/test/CIR/Lowering/ThroughMLIR/while.c | 2 +- 11 files changed, 518 insertions(+), 392 deletions(-) delete mode 100644 clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir delete mode 100644 clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/binop.cpp delete mode 100644 clang/test/CIR/Lowering/ThroughMLIR/cmp.cir create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/cmp.cpp diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index e8c62e3a097d..e823b428ab11 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -628,12 +628,18 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { "inconsistent operands' types not supported yet"); mlir::Type mlirType = getTypeConverter()->convertType(op.getType()); assert((mlirType.isa() || - mlirType.isa()) && + mlirType.isa() || + mlirType.isa()) && "operand type not supported yet"); + auto type = op.getLhs().getType(); + if (auto VecType = type.dyn_cast()) { + type = VecType.getEltType(); + } + switch (op.getKind()) { case mlir::cir::BinOpKind::Add: - if (mlirType.isa()) + if (type.isa()) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else @@ -641,7 +647,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Sub: - if (mlirType.isa()) + if (type.isa()) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else @@ -649,7 +655,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Mul: - if (mlirType.isa()) + if (type.isa()) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else @@ -657,23 +663,25 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Div: - if (mlirType.isa()) { - if (mlirType.isSignlessInteger()) + if (auto ty = type.dyn_cast()) { + if (ty.isUnsigned()) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else - llvm_unreachable("integer mlirType not supported in CIR yet"); + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); } else rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Rem: - if (mlirType.isa()) { - if (mlirType.isSignlessInteger()) + if (auto ty = type.dyn_cast()) { + if (ty.isUnsigned()) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else - llvm_unreachable("integer mlirType not supported in CIR yet"); + rewriter.replaceOpWithNewOp( + op, mlirType, adaptor.getLhs(), adaptor.getRhs()); } else rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); @@ -703,144 +711,22 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::CmpOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto type = adaptor.getLhs().getType(); - auto integerType = - mlir::IntegerType::get(getContext(), 1, mlir::IntegerType::Signless); + auto type = op.getLhs().getType(); mlir::Value mlirResult; - switch (op.getKind()) { - case mlir::cir::CmpOpKind::gt: { - if (type.isa()) { - mlir::arith::CmpIPredicate cmpIType; - if (!type.isSignlessInteger()) - llvm_unreachable("integer type not supported in CIR yet"); - cmpIType = mlir::arith::CmpIPredicate::ugt; - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), - adaptor.getLhs(), adaptor.getRhs()); - } else if (type.isa()) { - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpFPredicateAttr::get( - getContext(), mlir::arith::CmpFPredicate::UGT), - adaptor.getLhs(), adaptor.getRhs(), - mlir::arith::FastMathFlagsAttr::get( - getContext(), mlir::arith::FastMathFlags::none)); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } - case mlir::cir::CmpOpKind::ge: { - if (type.isa()) { - mlir::arith::CmpIPredicate cmpIType; - if (!type.isSignlessInteger()) - llvm_unreachable("integer type not supported in CIR yet"); - cmpIType = mlir::arith::CmpIPredicate::uge; - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), - adaptor.getLhs(), adaptor.getRhs()); - } else if (type.isa()) { - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpFPredicateAttr::get( - getContext(), mlir::arith::CmpFPredicate::UGE), - adaptor.getLhs(), adaptor.getRhs(), - mlir::arith::FastMathFlagsAttr::get( - getContext(), mlir::arith::FastMathFlags::none)); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } - case mlir::cir::CmpOpKind::lt: { - if (type.isa()) { - mlir::arith::CmpIPredicate cmpIType; - if (!type.isSignlessInteger()) - llvm_unreachable("integer type not supported in CIR yet"); - cmpIType = mlir::arith::CmpIPredicate::ult; - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), - adaptor.getLhs(), adaptor.getRhs()); - } else if (type.isa()) { - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpFPredicateAttr::get( - getContext(), mlir::arith::CmpFPredicate::ULT), - adaptor.getLhs(), adaptor.getRhs(), - mlir::arith::FastMathFlagsAttr::get( - getContext(), mlir::arith::FastMathFlags::none)); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } - case mlir::cir::CmpOpKind::le: { - if (type.isa()) { - mlir::arith::CmpIPredicate cmpIType; - if (!type.isSignlessInteger()) - llvm_unreachable("integer type not supported in CIR yet"); - cmpIType = mlir::arith::CmpIPredicate::ule; - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpIPredicateAttr::get(getContext(), cmpIType), - adaptor.getLhs(), adaptor.getRhs()); - } else if (type.isa()) { - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpFPredicateAttr::get( - getContext(), mlir::arith::CmpFPredicate::ULE), - adaptor.getLhs(), adaptor.getRhs(), - mlir::arith::FastMathFlagsAttr::get( - getContext(), mlir::arith::FastMathFlags::none)); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } - case mlir::cir::CmpOpKind::eq: { - if (type.isa()) { - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpIPredicateAttr::get(getContext(), - mlir::arith::CmpIPredicate::eq), - adaptor.getLhs(), adaptor.getRhs()); - } else if (type.isa()) { - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpFPredicateAttr::get( - getContext(), mlir::arith::CmpFPredicate::UEQ), - adaptor.getLhs(), adaptor.getRhs(), - mlir::arith::FastMathFlagsAttr::get( - getContext(), mlir::arith::FastMathFlags::none)); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } - case mlir::cir::CmpOpKind::ne: { - if (type.isa()) { - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpIPredicateAttr::get(getContext(), - mlir::arith::CmpIPredicate::ne), - adaptor.getLhs(), adaptor.getRhs()); - } else if (type.isa()) { - mlirResult = rewriter.create( - op.getLoc(), integerType, - mlir::arith::CmpFPredicateAttr::get( - getContext(), mlir::arith::CmpFPredicate::UNE), - adaptor.getLhs(), adaptor.getRhs(), - mlir::arith::FastMathFlagsAttr::get( - getContext(), mlir::arith::FastMathFlags::none)); - } else { - llvm_unreachable("Unknown Operand Type"); - } - break; - } + + if (auto ty = type.dyn_cast()) { + auto kind = convertCmpKindToCmpIPredicate(op.getKind(), ty.isSigned()); + mlirResult = rewriter.create( + op.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + } else if (auto ty = type.dyn_cast()) { + auto kind = convertCmpKindToCmpFPredicate(op.getKind()); + mlirResult = rewriter.create( + op.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + } else if (auto ty = type.dyn_cast()) { + llvm_unreachable("pointer comparison not supported yet"); + } else { + return op.emitError() << "unsupported type for CmpOp: " << type; } // MLIR comparison ops return i1, but cir::CmpOp returns the same type as @@ -1143,6 +1029,39 @@ class CIRVectorExtractLowering } }; +class CIRVectorCmpOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecCmpOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert(op.getType().isa() && + op.getLhs().getType().isa() && + op.getRhs().getType().isa() && + "Vector compare with non-vector type"); + auto elementType = + op.getLhs().getType().cast().getEltType(); + mlir::Value bitResult; + if (auto intType = elementType.dyn_cast()) { + bitResult = rewriter.create( + op.getLoc(), + convertCmpKindToCmpIPredicate(op.getKind(), intType.isSigned()), + adaptor.getLhs(), adaptor.getRhs()); + } else if (elementType.isa()) { + bitResult = rewriter.create( + op.getLoc(), convertCmpKindToCmpFPredicate(op.getKind()), + adaptor.getLhs(), adaptor.getRhs()); + } else { + return op.emitError() << "unsupported type for VecCmpOp: " << elementType; + } + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType()), bitResult); + return mlir::success(); + } +}; + class CIRCastOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -1345,22 +1264,22 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); - patterns - .add( - converter, patterns.getContext()); + patterns.add< + CIRCmpOpLowering, CIRCallOpLowering, CIRUnaryOpLowering, CIRBinOpLowering, + CIRLoadOpLowering, CIRConstantOpLowering, CIRStoreOpLowering, + CIRAllocaOpLowering, CIRFuncOpLowering, CIRScopeOpLowering, + CIRBrCondOpLowering, CIRTernaryOpLowering, CIRYieldOpLowering, + CIRCosOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, + CIRCastOpLowering, CIRPtrStrideOpLowering, CIRSqrtOpLowering, + CIRCeilOpLowering, CIRExp2OpLowering, CIRExpOpLowering, CIRFAbsOpLowering, + CIRFloorOpLowering, CIRLog10OpLowering, CIRLog2OpLowering, + CIRLogOpLowering, CIRRoundOpLowering, CIRPtrStrideOpLowering, + CIRSinOpLowering, CIRShiftOpLowering, CIRBitClzOpLowering, + CIRBitCtzOpLowering, CIRBitPopcountOpLowering, CIRBitClrsbOpLowering, + CIRBitFfsOpLowering, CIRBitParityOpLowering, CIRIfOpLowering, + CIRVectorCreateLowering, CIRVectorInsertLowering, + CIRVectorExtractLowering, CIRVectorCmpOpLowering>(converter, + patterns.getContext()); } static mlir::TypeConverter prepareTypeConverter() { diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h b/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h index 753e6b7d0528..c8b3b4a5bc12 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h @@ -4,6 +4,7 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" template mlir::Value getConst(mlir::ConversionPatternRewriter &rewriter, @@ -37,4 +38,46 @@ mlir::Value createIntCast(mlir::ConversionPatternRewriter &rewriter, return rewriter.create(loc, dstTy, src); } +mlir::arith::CmpIPredicate +convertCmpKindToCmpIPredicate(mlir::cir::CmpOpKind kind, bool isSigned) { + using CIR = mlir::cir::CmpOpKind; + using arithCmpI = mlir::arith::CmpIPredicate; + switch (kind) { + case CIR::eq: + return arithCmpI::eq; + case CIR::ne: + return arithCmpI::ne; + case CIR::lt: + return (isSigned ? arithCmpI::slt : arithCmpI::ult); + case CIR::le: + return (isSigned ? arithCmpI::sle : arithCmpI::ule); + case CIR::gt: + return (isSigned ? arithCmpI::sgt : arithCmpI::ugt); + case CIR::ge: + return (isSigned ? arithCmpI::sge : arithCmpI::uge); + } + llvm_unreachable("Unknown CmpOpKind"); +} + +mlir::arith::CmpFPredicate +convertCmpKindToCmpFPredicate(mlir::cir::CmpOpKind kind) { + using CIR = mlir::cir::CmpOpKind; + using arithCmpF = mlir::arith::CmpFPredicate; + switch (kind) { + case CIR::eq: + return arithCmpF::OEQ; + case CIR::ne: + return arithCmpF::UNE; + case CIR::lt: + return arithCmpF::OLT; + case CIR::le: + return arithCmpF::OLE; + case CIR::gt: + return arithCmpF::OGT; + case CIR::ge: + return arithCmpF::OGE; + } + llvm_unreachable("Unknown CmpOpKind"); +} + #endif \ No newline at end of file diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir deleted file mode 100644 index 59db7ccb7959..000000000000 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-fp.cir +++ /dev/null @@ -1,68 +0,0 @@ -// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM - -module { - cir.func @foo() { - %0 = cir.alloca !cir.float, !cir.ptr, ["c"] {alignment = 4 : i64} - %1 = cir.alloca !cir.float, !cir.ptr, ["d"] {alignment = 4 : i64} - %2 = cir.alloca !cir.float, !cir.ptr, ["y", init] {alignment = 4 : i64} - %3 = cir.alloca !cir.double, !cir.ptr, ["e"] {alignment = 8 : i64} - %4 = cir.alloca !cir.double, !cir.ptr, ["f"] {alignment = 8 : i64} - %5 = cir.alloca !cir.double, !cir.ptr, ["g", init] {alignment = 8 : i64} - %6 = cir.load %0 : !cir.ptr, !cir.float - %7 = cir.load %1 : !cir.ptr, !cir.float - %8 = cir.binop(mul, %6, %7) : !cir.float - cir.store %8, %2 : !cir.float, !cir.ptr - %9 = cir.load %2 : !cir.ptr, !cir.float - %10 = cir.load %1 : !cir.ptr, !cir.float - %11 = cir.binop(div, %9, %10) : !cir.float - cir.store %11, %2 : !cir.float, !cir.ptr - %12 = cir.load %2 : !cir.ptr, !cir.float - %13 = cir.load %1 : !cir.ptr, !cir.float - %14 = cir.binop(add, %12, %13) : !cir.float - cir.store %14, %2 : !cir.float, !cir.ptr - %15 = cir.load %2 : !cir.ptr, !cir.float - %16 = cir.load %1 : !cir.ptr, !cir.float - %17 = cir.binop(sub, %15, %16) : !cir.float - cir.store %17, %2 : !cir.float, !cir.ptr - %18 = cir.load %3 : !cir.ptr, !cir.double - %19 = cir.load %4 : !cir.ptr, !cir.double - %20 = cir.binop(add, %18, %19) : !cir.double - cir.store %20, %5 : !cir.double, !cir.ptr - %21 = cir.load %3 : !cir.ptr, !cir.double - %22 = cir.load %4 : !cir.ptr, !cir.double - %23 = cir.binop(sub, %21, %22) : !cir.double - cir.store %23, %5 : !cir.double, !cir.ptr - %24 = cir.load %3 : !cir.ptr, !cir.double - %25 = cir.load %4 : !cir.ptr, !cir.double - %26 = cir.binop(mul, %24, %25) : !cir.double - cir.store %26, %5 : !cir.double, !cir.ptr - %27 = cir.load %3 : !cir.ptr, !cir.double - %28 = cir.load %4 : !cir.ptr, !cir.double - %29 = cir.binop(div, %27, %28) : !cir.double - cir.store %29, %5 : !cir.double, !cir.ptr - cir.return - } -} - -// MLIR: = memref.alloca() {alignment = 4 : i64} : memref -// MLIR: = memref.alloca() {alignment = 8 : i64} : memref -// MLIR: = arith.mulf {{.*}} : f32 -// MLIR: = arith.divf -// MLIR: = arith.addf -// MLIR: = arith.subf -// MLIR: = arith.addf {{.*}} : f64 -// MLIR: = arith.subf -// MLIR: = arith.mulf -// MLIR: = arith.divf - -// LLVM: = alloca float, i64 -// LLVM: = alloca double, i64 -// LLVM: = fmul float -// LLVM: = fdiv float -// LLVM: = fadd float -// LLVM: = fsub float -// LLVM: = fadd double -// LLVM: = fsub double -// LLVM: = fmul double -// LLVM: = fdiv double diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir b/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir deleted file mode 100644 index eb5b747d8761..000000000000 --- a/clang/test/CIR/Lowering/ThroughMLIR/binop-unsigned-int.cir +++ /dev/null @@ -1,78 +0,0 @@ -// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM -!u32i = !cir.int - -module { - cir.func @foo() { - %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} - %1 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} - %2 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} - %3 = cir.const #cir.int<2> : !u32i cir.store %3, %0 : !u32i, !cir.ptr - %4 = cir.const #cir.int<1> : !u32i cir.store %4, %1 : !u32i, !cir.ptr - %5 = cir.load %0 : !cir.ptr, !u32i - %6 = cir.load %1 : !cir.ptr, !u32i - %7 = cir.binop(mul, %5, %6) : !u32i - cir.store %7, %2 : !u32i, !cir.ptr - %8 = cir.load %2 : !cir.ptr, !u32i - %9 = cir.load %1 : !cir.ptr, !u32i - %10 = cir.binop(div, %8, %9) : !u32i - cir.store %10, %2 : !u32i, !cir.ptr - %11 = cir.load %2 : !cir.ptr, !u32i - %12 = cir.load %1 : !cir.ptr, !u32i - %13 = cir.binop(rem, %11, %12) : !u32i - cir.store %13, %2 : !u32i, !cir.ptr - %14 = cir.load %2 : !cir.ptr, !u32i - %15 = cir.load %1 : !cir.ptr, !u32i - %16 = cir.binop(add, %14, %15) : !u32i - cir.store %16, %2 : !u32i, !cir.ptr - %17 = cir.load %2 : !cir.ptr, !u32i - %18 = cir.load %1 : !cir.ptr, !u32i - %19 = cir.binop(sub, %17, %18) : !u32i - cir.store %19, %2 : !u32i, !cir.ptr - // should move to cir.shift, which only accepts - // CIR types. - // %20 = cir.load %2 : !cir.ptr, !u32i - // %21 = cir.load %1 : !cir.ptr, !u32i - // %22 = cir.binop(shr, %20, %21) : !u32i - // cir.store %22, %2 : !u32i, !cir.ptr - // %23 = cir.load %2 : !cir.ptr, !u32i - // %24 = cir.load %1 : !cir.ptr, !u32i - // %25 = cir.binop(shl, %23, %24) : !u32i - // cir.store %25, %2 : !u32i, !cir.ptr - %26 = cir.load %2 : !cir.ptr, !u32i - %27 = cir.load %1 : !cir.ptr, !u32i - %28 = cir.binop(and, %26, %27) : !u32i - cir.store %28, %2 : !u32i, !cir.ptr - %29 = cir.load %2 : !cir.ptr, !u32i - %30 = cir.load %1 : !cir.ptr, !u32i - %31 = cir.binop(xor, %29, %30) : !u32i - cir.store %31, %2 : !u32i, !cir.ptr - %32 = cir.load %2 : !cir.ptr, !u32i - %33 = cir.load %1 : !cir.ptr, !u32i - %34 = cir.binop(or, %32, %33) : !u32i - cir.store %34, %2 : !u32i, !cir.ptr - cir.return - } -} - -// MLIR: = arith.muli -// MLIR: = arith.divui -// MLIR: = arith.remui -// MLIR: = arith.addi -// MLIR: = arith.subi -// arith.shrui -// arith.shli -// MLIR: = arith.andi -// MLIR: = arith.xori -// MLIR: = arith.ori - -// LLVM: = mul i32 -// LLVM: = udiv i32 -// LLVM: = urem i32 -// LLVM: = add i32 -// LLVM: = sub i32 -// = lshr i32 -// = shl i32 -// LLVM: = and i32 -// LLVM: = xor i32 -// LLVM: = or i32 diff --git a/clang/test/CIR/Lowering/ThroughMLIR/binop.cpp b/clang/test/CIR/Lowering/ThroughMLIR/binop.cpp new file mode 100644 index 000000000000..7f3c8b98cfd1 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/binop.cpp @@ -0,0 +1,77 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +void testSignedIntBinOps(int a, int b) { + int x = a * b; + x = x / b; + x = x % b; + x = x + b; + x = x - b; + x = x >> b; + x = x << b; + x = x & b; + x = x ^ b; + x = x | b; +} + +// CHECK: func.func @_Z19testSignedIntBinOpsii +// CHECK: %[[VAR2:.*]] = arith.muli %[[VAR0:.*]], %[[VAR1:.*]] : i32 +// CHECK: %[[VAR5:.*]] = arith.divsi %[[VAR3:.*]], %[[VAR4:.*]] : i32 +// CHECK: %[[VAR8:.*]] = arith.remsi %[[VAR6:.*]], %[[VAR7:.*]] : i32 +// CHECK: %[[VAR11:.*]] = arith.addi %[[VAR9:.*]], %[[VAR10:.*]] : i32 +// CHECK: %[[VAR14:.*]] = arith.subi %[[VAR12:.*]], %[[VAR13:.*]] : i32 +// CHECK: %[[VAR18:.*]] = arith.shrsi %[[VAR15:.*]], %[[VAR16:.*]] : i32 +// CHECK: %[[VAR22:.*]] = arith.shli %[[VAR19:.*]], %[[VAR20:.*]] : i32 +// CHECK: %[[VAR25:.*]] = arith.andi %[[VAR23:.*]], %[[VAR24:.*]] : i32 +// CHECK: %[[VAR28:.*]] = arith.xori %[[VAR26:.*]], %[[VAR27:.*]] : i32 +// CHECK: %[[VAR31:.*]] = arith.ori %[[VAR29:.*]], %[[VAR30:.*]] : i32 +// CHECK: } + +void testUnSignedIntBinOps(unsigned a, unsigned b) { + unsigned x = a * b; + x = x / b; + x = x % b; + x = x + b; + x = x - b; + x = x >> b; + x = x << b; + x = x & b; + x = x ^ b; + x = x | b; +} + +// CHECK: func.func @_Z21testUnSignedIntBinOpsjj +// CHECK: %[[VAR2:.*]] = arith.muli %[[VAR0:.*]], %[[VAR1:.*]] : i32 +// CHECK: %[[VAR5:.*]] = arith.divui %[[VAR3:.*]], %[[VAR4:.*]] : i32 +// CHECK: %[[VAR8:.*]] = arith.remui %[[VAR6:.*]], %[[VAR7:.*]] : i32 +// CHECK: %[[VAR11:.*]] = arith.addi %[[VAR9:.*]], %[[VAR10:.*]] : i32 +// CHECK: %[[VAR14:.*]] = arith.subi %[[VAR12:.*]], %[[VAR13:.*]] : i32 +// CHECK: %[[VAR18:.*]] = arith.shrui %[[VAR15:.*]], %[[VAR16:.*]] : i32 +// CHECK: %[[VAR22:.*]] = arith.shli %[[VAR19:.*]], %[[VAR20:.*]] : i32 +// CHECK: %[[VAR25:.*]] = arith.andi %[[VAR23:.*]], %[[VAR24:.*]] : i32 +// CHECK: %[[VAR28:.*]] = arith.xori %[[VAR26:.*]], %[[VAR27:.*]] : i32 +// CHECK: %[[VAR31:.*]] = arith.ori %[[VAR29:.*]], %[[VAR30:.*]] : i32 +// CHECK: } + +void testFloatingPointBinOps(float a, float b, double c, double d) { + float e = a * b; + e = a / b; + e = a + b; + e = a - b; + + double f = a * b; + f = c * d; + f = c / d; + f = c + d; + f = c - d; +} + +// CHECK: func.func @_Z23testFloatingPointBinOpsffdd +// CHECK: %[[VAR2:.*]] = arith.mulf %[[VAR0:.*]], %[[VAR1:.*]] : f32 +// CHECK: %[[VAR5:.*]] = arith.divf %[[VAR3:.*]], %[[VAR4:.*]] : f32 +// CHECK: %[[VAR8:.*]] = arith.addf %[[VAR6:.*]], %[[VAR7:.*]] : f32 +// CHECK: %[[VAR11:.*]] = arith.subf %[[VAR9:.*]], %[[VAR10:.*]] : f32 +// CHECK: %[[VAR14:.*]] = arith.mulf %[[VAR12:.*]], %[[VAR13:.*]] : f64 +// CHECK: %[[VAR18:.*]] = arith.divf %[[VAR16:.*]], %[[VAR17:.*]] : f64 +// CHECK: %[[VAR22:.*]] = arith.addf %[[VAR20:.*]], %[[VAR21:.*]] : f64 +// CHECK: %[[VAR26:.*]] = arith.subf %[[VAR24:.*]], %[[VAR25:.*]] : f64 diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir deleted file mode 100644 index 0efd41de816c..000000000000 --- a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cir +++ /dev/null @@ -1,76 +0,0 @@ -// RUN: cir-opt %s -cir-to-mlir -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-opt %s -cir-to-mlir -cir-mlir-to-llvm -o - | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM - -!s32i = !cir.int -module { - cir.func @foo() { - %0 = cir.alloca !s32i, !cir.ptr, ["a"] {alignment = 4 : i64} - %1 = cir.alloca !s32i, !cir.ptr, ["b"] {alignment = 4 : i64} - %2 = cir.alloca !cir.float, !cir.ptr, ["c"] {alignment = 4 : i64} - %3 = cir.alloca !cir.float, !cir.ptr, ["d"] {alignment = 4 : i64} - %4 = cir.alloca !cir.bool, !cir.ptr, ["e"] {alignment = 1 : i64} - %5 = cir.load %0 : !cir.ptr, !s32i - %6 = cir.load %1 : !cir.ptr, !s32i - %7 = cir.cmp(gt, %5, %6) : !s32i, !cir.bool - %8 = cir.load %0 : !cir.ptr, !s32i - %9 = cir.load %1 : !cir.ptr, !s32i - %10 = cir.cmp(eq, %8, %9) : !s32i, !cir.bool - %11 = cir.load %0 : !cir.ptr, !s32i - %12 = cir.load %1 : !cir.ptr, !s32i - %13 = cir.cmp(lt, %11, %12) : !s32i, !cir.bool - %14 = cir.load %0 : !cir.ptr, !s32i - %15 = cir.load %1 : !cir.ptr, !s32i - %16 = cir.cmp(ge, %14, %15) : !s32i, !cir.bool - %17 = cir.load %0 : !cir.ptr, !s32i - %18 = cir.load %1 : !cir.ptr, !s32i - %19 = cir.cmp(ne, %17, %18) : !s32i, !cir.bool - %20 = cir.load %0 : !cir.ptr, !s32i - %21 = cir.load %1 : !cir.ptr, !s32i - %22 = cir.cmp(le, %20, %21) : !s32i, !cir.bool - %23 = cir.load %2 : !cir.ptr, !cir.float - %24 = cir.load %3 : !cir.ptr, !cir.float - %25 = cir.cmp(gt, %23, %24) : !cir.float, !cir.bool - %26 = cir.load %2 : !cir.ptr, !cir.float - %27 = cir.load %3 : !cir.ptr, !cir.float - %28 = cir.cmp(eq, %26, %27) : !cir.float, !cir.bool - %29 = cir.load %2 : !cir.ptr, !cir.float - %30 = cir.load %3 : !cir.ptr, !cir.float - %31 = cir.cmp(lt, %29, %30) : !cir.float, !cir.bool - %32 = cir.load %2 : !cir.ptr, !cir.float - %33 = cir.load %3 : !cir.ptr, !cir.float - %34 = cir.cmp(ge, %32, %33) : !cir.float, !cir.bool - %35 = cir.load %2 : !cir.ptr, !cir.float - %36 = cir.load %3 : !cir.ptr, !cir.float - %37 = cir.cmp(ne, %35, %36) : !cir.float, !cir.bool - %38 = cir.load %2 : !cir.ptr, !cir.float - %39 = cir.load %3 : !cir.ptr, !cir.float - %40 = cir.cmp(le, %38, %39) : !cir.float, !cir.bool - cir.return - } -} - -// MLIR: = arith.cmpi ugt -// MLIR: = arith.cmpi eq, -// MLIR: = arith.cmpi ult, -// MLIR: = arith.cmpi uge, -// MLIR: = arith.cmpi ne, -// MLIR: = arith.cmpi ule, -// MLIR: = arith.cmpf ugt -// MLIR: = arith.cmpf ueq, -// MLIR: = arith.cmpf ult, -// MLIR: = arith.cmpf uge, -// MLIR: = arith.cmpf une, -// MLIR: = arith.cmpf ule, - -// LLVM: icmp ugt i32 -// LLVM: icmp eq i32 -// LLVM: icmp ult i32 -// LLVM: icmp uge i32 -// LLVM: icmp ne i32 -// LLVM: icmp ule i32 -// LLVM: fcmp ugt float -// LLVM: fcmp ueq float -// LLVM: fcmp ult float -// LLVM: fcmp uge float -// LLVM: fcmp une float -// LLVM: fcmp ule float diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cpp b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cpp new file mode 100644 index 000000000000..fcb9247bfb8f --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cpp @@ -0,0 +1,185 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +bool testSignedIntCmpOps(int a, int b) { + // CHECK: %[[ALLOC1:.+]] = memref.alloca() {alignment = 4 : i64} : memref + // CHECK: %[[ALLOC2:.+]] = memref.alloca() {alignment = 4 : i64} : memref + // CHECK: %[[ALLOC3:.+]] = memref.alloca() {alignment = 1 : i64} : memref + // CHECK: %[[ALLOC4:.+]] = memref.alloca() {alignment = 1 : i64} : memref + // CHECK: memref.store %arg0, %[[ALLOC1]][] : memref + // CHECK: memref.store %arg1, %[[ALLOC2]][] : memref + + bool x = a == b; + + // CHECK: %[[LOAD0:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD1:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP0:.+]] = arith.cmpi eq, %[[LOAD0]], %[[LOAD1]] : i32 + // CHECK: %[[EXT0:.+]] = arith.extui %[[CMP0]] : i1 to i8 + // CHECK: memref.store %[[EXT0]], %[[ALLOC4]][] : memref + + x = a != b; + + // CHECK: %[[LOAD2:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD3:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP1:.+]] = arith.cmpi ne, %[[LOAD2]], %[[LOAD3]] : i32 + // CHECK: %[[EXT1:.+]] = arith.extui %[[CMP1]] : i1 to i8 + // CHECK: memref.store %[[EXT1]], %[[ALLOC4]][] : memref + + x = a < b; + + // CHECK: %[[LOAD4:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD5:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP2:.+]] = arith.cmpi slt, %[[LOAD4]], %[[LOAD5]] : i32 + // CHECK: %[[EXT2:.+]] = arith.extui %[[CMP2]] : i1 to i8 + // CHECK: memref.store %[[EXT2]], %[[ALLOC4]][] : memref + + x = a <= b; + + // CHECK: %[[LOAD6:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD7:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP3:.+]] = arith.cmpi sle, %[[LOAD6]], %[[LOAD7]] : i32 + // CHECK: %[[EXT3:.+]] = arith.extui %[[CMP3]] : i1 to i8 + // CHECK: memref.store %[[EXT3]], %[[ALLOC4]][] : memref + + x = a > b; + + // CHECK: %[[LOAD8:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD9:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP4:.+]] = arith.cmpi sgt, %[[LOAD8]], %[[LOAD9]] : i32 + // CHECK: %[[EXT4:.+]] = arith.extui %[[CMP4]] : i1 to i8 + // CHECK: memref.store %[[EXT4]], %[[ALLOC4]][] : memref + + x = a >= b; + + // CHECK: %[[LOAD10:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD11:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP5:.+]] = arith.cmpi sge, %[[LOAD10]], %[[LOAD11]] : i32 + // CHECK: %[[EXT5:.+]] = arith.extui %[[CMP5]] : i1 to i8 + // CHECK: memref.store %[[EXT5]], %[[ALLOC4]][] : memref + + // CHECK: %[[LOAD12:.+]] = memref.load %[[ALLOC4]][] : memref + // CHECK: memref.store %[[LOAD12]], %[[ALLOC3]][] : memref + // CHECK: %[[LOAD13:.+]] = memref.load %[[ALLOC3]][] : memref + // CHECK: return %[[LOAD13]] : i8 + return x; +} + +bool testUnSignedIntBinOps(unsigned a, unsigned b) { + // CHECK: %[[ALLOC1:.+]] = memref.alloca() {alignment = 4 : i64} : memref + // CHECK: %[[ALLOC2:.+]] = memref.alloca() {alignment = 4 : i64} : memref + // CHECK: %[[ALLOC3:.+]] = memref.alloca() {alignment = 1 : i64} : memref + // CHECK: %[[ALLOC4:.+]] = memref.alloca() {alignment = 1 : i64} : memref + // CHECK: memref.store %arg0, %[[ALLOC1]][] : memref + // CHECK: memref.store %arg1, %[[ALLOC2]][] : memref + + bool x = a == b; + + // CHECK: %[[LOAD0:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD1:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP0:.+]] = arith.cmpi eq, %[[LOAD0]], %[[LOAD1]] : i32 + // CHECK: %[[EXT0:.+]] = arith.extui %[[CMP0]] : i1 to i8 + // CHECK: memref.store %[[EXT0]], %[[ALLOC4]][] : memref + + x = a != b; + + // CHECK: %[[LOAD2:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD3:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP1:.+]] = arith.cmpi ne, %[[LOAD2]], %[[LOAD3]] : i32 + // CHECK: %[[EXT1:.+]] = arith.extui %[[CMP1]] : i1 to i8 + // CHECK: memref.store %[[EXT1]], %[[ALLOC4]][] : memref + + x = a < b; + + // CHECK: %[[LOAD4:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD5:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP2:.+]] = arith.cmpi ult, %[[LOAD4]], %[[LOAD5]] : i32 + // CHECK: %[[EXT2:.+]] = arith.extui %[[CMP2]] : i1 to i8 + // CHECK: memref.store %[[EXT2]], %[[ALLOC4]][] : memref + + x = a <= b; + + // CHECK: %[[LOAD6:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD7:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP3:.+]] = arith.cmpi ule, %[[LOAD6]], %[[LOAD7]] : i32 + // CHECK: %[[EXT3:.+]] = arith.extui %[[CMP3]] : i1 to i8 + // CHECK: memref.store %[[EXT3]], %[[ALLOC4]][] : memref + + x = a > b; + + // CHECK: %[[LOAD8:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD9:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP4:.+]] = arith.cmpi ugt, %[[LOAD8]], %[[LOAD9]] : i32 + // CHECK: %[[EXT4:.+]] = arith.extui %[[CMP4]] : i1 to i8 + // CHECK: memref.store %[[EXT4]], %[[ALLOC4]][] : memref + + x = a >= b; + + // CHECK: %[[LOAD10:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD11:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP5:.+]] = arith.cmpi uge, %[[LOAD10]], %[[LOAD11]] : i32 + // CHECK: %[[EXT5:.+]] = arith.extui %[[CMP5]] : i1 to i8 + // CHECK: memref.store %[[EXT5]], %[[ALLOC4]][] : memref + + return x; + // CHECK: return +} + +bool testFloatingPointCmpOps(float a, float b) { + // CHECK: %[[ALLOC1:.+]] = memref.alloca() {alignment = 4 : i64} : memref + // CHECK: %[[ALLOC2:.+]] = memref.alloca() {alignment = 4 : i64} : memref + // CHECK: %[[ALLOC3:.+]] = memref.alloca() {alignment = 1 : i64} : memref + // CHECK: %[[ALLOC4:.+]] = memref.alloca() {alignment = 1 : i64} : memref + // CHECK: memref.store %arg0, %[[ALLOC1]][] : memref + // CHECK: memref.store %arg1, %[[ALLOC2]][] : memref + + bool x = a == b; + + // CHECK: %[[LOAD0:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD1:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP0:.+]] = arith.cmpf oeq, %[[LOAD0]], %[[LOAD1]] : f32 + // CHECK: %[[EXT0:.+]] = arith.extui %[[CMP0]] : i1 to i8 + // CHECK: memref.store %[[EXT0]], %[[ALLOC4]][] : memref + + x = a != b; + + // CHECK: %[[LOAD2:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD3:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP1:.+]] = arith.cmpf une, %[[LOAD2]], %[[LOAD3]] : f32 + // CHECK: %[[EXT1:.+]] = arith.extui %[[CMP1]] : i1 to i8 + // CHECK: memref.store %[[EXT1]], %[[ALLOC4]][] : memref + + x = a < b; + + // CHECK: %[[LOAD4:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD5:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP2:.+]] = arith.cmpf olt, %[[LOAD4]], %[[LOAD5]] : f32 + // CHECK: %[[EXT2:.+]] = arith.extui %[[CMP2]] : i1 to i8 + // CHECK: memref.store %[[EXT2]], %[[ALLOC4]][] : memref + + x = a <= b; + + // CHECK: %[[LOAD6:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD7:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP3:.+]] = arith.cmpf ole, %[[LOAD6]], %[[LOAD7]] : f32 + // CHECK: %[[EXT3:.+]] = arith.extui %[[CMP3]] : i1 to i8 + // CHECK: memref.store %[[EXT3]], %[[ALLOC4]][] : memref + + x = a > b; + + // CHECK: %[[LOAD8:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD9:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP4:.+]] = arith.cmpf ogt, %[[LOAD8]], %[[LOAD9]] : f32 + // CHECK: %[[EXT4:.+]] = arith.extui %[[CMP4]] : i1 to i8 + // CHECK: memref.store %[[EXT4]], %[[ALLOC4]][] : memref + + x = a >= b; + + // CHECK: %[[LOAD10:.+]] = memref.load %[[ALLOC1]][] : memref + // CHECK: %[[LOAD11:.+]] = memref.load %[[ALLOC2]][] : memref + // CHECK: %[[CMP5:.+]] = arith.cmpf oge, %[[LOAD10]], %[[LOAD11]] : f32 + // CHECK: %[[EXT5:.+]] = arith.extui %[[CMP5]] : i1 to i8 + // CHECK: memref.store %[[EXT5]], %[[ALLOC4]][] : memref + + return x; + // CHECK: return +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/ThroughMLIR/if.c b/clang/test/CIR/Lowering/ThroughMLIR/if.c index 5783cec9fe1d..4ff228514cd6 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/if.c +++ b/clang/test/CIR/Lowering/ThroughMLIR/if.c @@ -21,7 +21,7 @@ void foo() { //CHECK: memref.alloca_scope { //CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref //CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 -//CHECK: %[[ONE:.+]] = arith.cmpi ugt, %[[ZERO]], %[[C0_I32_1]] : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi sgt, %[[ZERO]], %[[C0_I32_1]] : i32 //CHECK: %[[TWO:.+]] = arith.extui %[[ONE]] : i1 to i32 //CHECK: %[[C0_I32_2:.+]] = arith.constant 0 : i32 //CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO]], %[[C0_I32_2]] : i32 @@ -60,7 +60,7 @@ void foo2() { //CHECK: memref.alloca_scope { //CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref //CHECK: %[[C3_I32:.+]] = arith.constant 3 : i32 -//CHECK: %[[ONE:.+]] = arith.cmpi ult, %[[ZERO]], %[[C3_I32]] : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C3_I32]] : i32 //CHECK: %[[TWO:.+]] = arith.extui %[[ONE]] : i1 to i32 //CHECK: %[[C0_I32_1]] = arith.constant 0 : i32 //CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO]], %[[C0_I32_1]] : i32 @@ -100,7 +100,7 @@ void foo3() { //CHECK: memref.alloca_scope { //CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref //CHECK: %[[C3_I32:.+]] = arith.constant 3 : i32 -//CHECK: %[[ONE:.+]] = arith.cmpi ult, %[[ZERO]], %[[C3_I32]] : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C3_I32]] : i32 //CHECK: %[[TWO:.+]] = arith.extui %[[ONE]] : i1 to i32 //CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 //CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO:.+]], %[[C0_I32_1]] : i32 @@ -113,7 +113,7 @@ void foo3() { //CHECK: memref.alloca_scope { //CHECK: %[[SIX:.+]] = memref.load %[[alloca_2]][] : memref //CHECK: %[[C2_I32_3:.+]] = arith.constant 2 : i32 -//CHECK: %[[SEVEN:.+]] = arith.cmpi ugt, %[[SIX]], %[[C2_I32_3]] : i32 +//CHECK: %[[SEVEN:.+]] = arith.cmpi sgt, %[[SIX]], %[[C2_I32_3]] : i32 //CHECK: %[[EIGHT:.+]] = arith.extui %[[SEVEN]] : i1 to i32 //CHECK: %[[C0_I32_4:.+]] = arith.constant 0 : i32 //CHECK: %[[NINE:.+]] = arith.cmpi ne, %[[EIGHT]], %[[C0_I32_4]] : i32 diff --git a/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir index bcac62912fa9..ce6f466aebc9 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir @@ -25,7 +25,7 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { } } -// MLIR: %1 = arith.cmpi ugt, %0, %c0_i32 : i32 +// MLIR: %1 = arith.cmpi sgt, %0, %c0_i32 : i32 // MLIR-NEXT: %2 = arith.extui %1 : i1 to i8 // MLIR-NEXT: %3 = arith.trunci %2 : i8 to i1 // MLIR-NEXT: %4 = scf.if %3 -> (i32) { @@ -37,8 +37,8 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { // MLIR-NEXT: } // MLIR-NEXT: memref.store %4, %alloca_0[] : memref -// MLIR-CANONICALIZE: %[[CMP:.*]] = arith.cmpi ugt +// MLIR-CANONICALIZE: %[[CMP:.*]] = arith.cmpi sgt // MLIR-CANONICALIZE: arith.select %[[CMP]] -// LLVM: %[[CMP:.*]] = icmp ugt +// LLVM: %[[CMP:.*]] = icmp sgt // LLVM: select i1 %[[CMP]] diff --git a/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp b/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp index 75484a1fc7ae..7b1bc1047ece 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp +++ b/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp @@ -10,6 +10,21 @@ void vector_int_test(int x) { // CHECK: %[[ALLOC2:.*]] = memref.alloca() {alignment = 16 : i64} : memref> // CHECK: %[[ALLOC3:.*]] = memref.alloca() {alignment = 16 : i64} : memref> // CHECK: %[[ALLOC4:.*]] = memref.alloca() {alignment = 4 : i64} : memref + // CHECK: %[[ALLOC5:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC6:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC7:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC8:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC9:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC10:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC11:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC12:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC13:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC14:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC15:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC16:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC17:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: %[[ALLOC18:.*]] = memref.alloca() {alignment = 16 : i64} : memref> + // CHECK: memref.store %arg0, %[[ALLOC1]][] : memref vi4 a = { 1, 2, 3, 4 }; @@ -63,5 +78,114 @@ void vector_int_test(int x) { // CHECK: %[[EXTRACT:.*]] = vector.extractelement %[[VEC10]][%[[VAL5]] : i32] : vector<4xi32> // CHECK: memref.store %[[EXTRACT]], %[[ALLOC4]][] : memref + vi4 d = a + b; + + // CHECK: %[[ALLOC0_1:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[ALLOC1_1:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[ALLOC2_1:.*]] = arith.addi %[[ALLOC0_1]], %[[ALLOC1_1]] : vector<4xi32> + // CHECK: memref.store %[[ALLOC2_1]], %[[ALLOC5]][] : memref> + + vi4 e = a - b; + + // CHECK: %[[ALLOC0_2:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[ALLOC1_2:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[ALLOC3_2:.*]] = arith.subi %[[ALLOC0_2]], %[[ALLOC1_2]] : vector<4xi32> + // CHECK: memref.store %[[ALLOC3_2]], %[[ALLOC6]][] : memref> + + vi4 f = a * b; + + // CHECK: %[[ALLOC0_3:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[ALLOC1_3:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[ALLOC4_1:.*]] = arith.muli %[[ALLOC0_3]], %[[ALLOC1_3]] : vector<4xi32> + // CHECK: memref.store %[[ALLOC4_1]], %[[ALLOC7]][] : memref> + + vi4 g = a / b; + + // CHECK: %[[ALLOC0_4:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[ALLOC1_4:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[ALLOC5_1:.*]] = arith.divsi %[[ALLOC0_4]], %[[ALLOC1_4]] : vector<4xi32> + // CHECK: memref.store %[[ALLOC5_1]], %[[ALLOC8]][] : memref> + + vi4 h = a % b; + + // CHECK: %[[ALLOC0_5:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[ALLOC1_5:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[ALLOC6_1:.*]] = arith.remsi %[[ALLOC0_5]], %[[ALLOC1_5]] : vector<4xi32> + // CHECK: memref.store %[[ALLOC6_1]], %[[ALLOC9]][] : memref> + + vi4 i = a & b; + + // CHECK: %[[ALLOC0_6:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[ALLOC1_6:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[ALLOC7_1:.*]] = arith.andi %[[ALLOC0_6]], %[[ALLOC1_6]] : vector<4xi32> + // CHECK: memref.store %[[ALLOC7_1]], %[[ALLOC10]][] : memref> + + vi4 j = a | b; + + // CHECK: %[[ALLOC0_7:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[ALLOC1_7:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[ALLOC8_1:.*]] = arith.ori %[[ALLOC0_7]], %[[ALLOC1_7]] : vector<4xi32> + // CHECK: memref.store %[[ALLOC8_1]], %[[ALLOC11]][] : memref> + + vi4 k = a ^ b; + + // CHECK: %[[ALLOC0_8:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[ALLOC1_8:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[ALLOC9_1:.*]] = arith.xori %[[ALLOC0_8]], %[[ALLOC1_8]] : vector<4xi32> + // CHECK: memref.store %[[ALLOC9_1]], %[[ALLOC12]][] : memref> + + // TODO(cir) : Fix the lowering of unary operators + // vi4 l = +a; + // vi4 m = -a; + // vi4 n = ~a; + + vi4 o = a == b; + + // CHECK: %[[VAL11:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[VAL12:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[CMP_EQ:.*]] = arith.cmpi eq, %[[VAL11]], %[[VAL12]] : vector<4xi32> + // CHECK: %[[EXT_EQ:.*]] = arith.extsi %[[CMP_EQ]] : vector<4xi1> to vector<4xi32> + // CHECK: memref.store %[[EXT_EQ]], %[[ALLOC13]][] : memref> + + vi4 p = a != b; + + // CHECK: %[[VAL13:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[VAL14:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[CMP_NE:.*]] = arith.cmpi ne, %[[VAL13]], %[[VAL14]] : vector<4xi32> + // CHECK: %[[EXT_NE:.*]] = arith.extsi %[[CMP_NE]] : vector<4xi1> to vector<4xi32> + // CHECK: memref.store %[[EXT_NE]], %[[ALLOC14]][] : memref> + + vi4 q = a < b; + + // CHECK: %[[VAL15:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[VAL16:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[CMP_SLT:.*]] = arith.cmpi slt, %[[VAL15]], %[[VAL16]] : vector<4xi32> + // CHECK: %[[EXT_SLT:.*]] = arith.extsi %[[CMP_SLT]] : vector<4xi1> to vector<4xi32> + // CHECK: memref.store %[[EXT_SLT]], %[[ALLOC15]][] : memref> + + vi4 r = a > b; + + // CHECK: %[[VAL17:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[VAL18:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[CMP_SGT:.*]] = arith.cmpi sgt, %[[VAL17]], %[[VAL18]] : vector<4xi32> + // CHECK: %[[EXT_SGT:.*]] = arith.extsi %[[CMP_SGT]] : vector<4xi1> to vector<4xi32> + // CHECK: memref.store %[[EXT_SGT]], %[[ALLOC16]][] : memref> + + vi4 s = a <= b; + + // CHECK: %[[VAL19:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[VAL20:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[CMP_SLE:.*]] = arith.cmpi sle, %[[VAL19]], %[[VAL20]] : vector<4xi32> + // CHECK: %[[EXT_SLE:.*]] = arith.extsi %[[CMP_SLE]] : vector<4xi1> to vector<4xi32> + // CHECK: memref.store %[[EXT_SLE]], %[[ALLOC17]][] : memref> + + vi4 t = a >= b; + + // CHECK: %[[VAL21:.*]] = memref.load %[[ALLOC2]][] : memref> + // CHECK: %[[VAL22:.*]] = memref.load %[[ALLOC3]][] : memref> + // CHECK: %[[CMP_SGE:.*]] = arith.cmpi sge, %[[VAL21]], %[[VAL22]] : vector<4xi32> + // CHECK: %[[EXT_SGE:.*]] = arith.extsi %[[CMP_SGE]] : vector<4xi1> to vector<4xi32> + // CHECK: memref.store %[[EXT_SGE]], %[[ALLOC18]][] : memref> + // CHECK: return } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/while.c b/clang/test/CIR/Lowering/ThroughMLIR/while.c index df459fd2c27a..bdf5f04b2181 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/while.c +++ b/clang/test/CIR/Lowering/ThroughMLIR/while.c @@ -16,7 +16,7 @@ void foo() { //CHECK: scf.while : () -> () { //CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref //CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 -//CHECK: %[[ONE:.+]] = arith.cmpi ult, %[[ZERO:.+]], %[[C2_I32]] : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO:.+]], %[[C2_I32]] : i32 //CHECK: %[[TWO:.+]] = arith.extui %[[ONE:.+]] : i1 to i32 //CHECK: %[[C0_I32_0:.+]] = arith.constant 0 : i32 //CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO:.+]], %[[C0_I32_0]] : i32 From d151f0cf145feda51032185cc470cbc57b666c08 Mon Sep 17 00:00:00 2001 From: akashi Date: Fri, 21 Jun 2024 00:52:18 +0300 Subject: [PATCH 1650/2301] [CIR][CodeGen] Fix missing 'nsw' flag in add, sub, and mul in binop operator (#677) This PR is to fix the missing **nsw** flag in issue #664 regarding add, mul arithmetic operations. there is also a problem with unary operations such as **Inc ,Dec,Plus,Minus and Not** . which should also have 'nsw' flag [example](https://godbolt.org/z/q3o3jsbe1). This part should need to be fixed through lowering. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 39 ++++++++++- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 2 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 19 ++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 67 ++++++++++++++++++- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 13 ++++ clang/test/CIR/CodeGen/binop.cpp | 4 +- clang/test/CIR/CodeGen/bitint.cpp | 2 +- clang/test/CIR/CodeGen/call.c | 4 +- clang/test/CIR/CodeGen/comma.cpp | 2 +- clang/test/CIR/CodeGen/if-constexpr.cpp | 4 +- clang/test/CIR/CodeGen/lambda.cpp | 2 +- clang/test/CIR/CodeGen/loop.cpp | 18 ++--- clang/test/CIR/CodeGen/sourcelocation.cpp | 31 +++++---- clang/test/CIR/CodeGen/switch.cpp | 4 +- clang/test/CIR/CodeGen/vectype-ext.cpp | 8 +-- clang/test/CIR/CodeGen/vectype.cpp | 2 +- clang/test/CIR/Lowering/vectype.cpp | 4 +- 17 files changed, 178 insertions(+), 47 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index cb7bb85617f5..9201f0859be0 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -171,8 +171,21 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); } - mlir::Value createMul(mlir::Value lhs, mlir::Value rhs) { - return createBinop(lhs, mlir::cir::BinOpKind::Mul, rhs); + mlir::Value createMul(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, + bool hasNSW = false) { + auto op = create(lhs.getLoc(), lhs.getType(), + mlir::cir::BinOpKind::Mul, lhs, rhs); + if (hasNUW) + op.setNoUnsignedWrap(true); + if (hasNSW) + op.setNoSignedWrap(true); + return op; + } + mlir::Value createNSWMul(mlir::Value lhs, mlir::Value rhs) { + return createMul(lhs, rhs, false, true); + } + mlir::Value createNUWAMul(mlir::Value lhs, mlir::Value rhs) { + return createMul(lhs, rhs, true, false); } mlir::Value createMul(mlir::Value lhs, llvm::APInt rhs) { @@ -235,6 +248,28 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createSub(lhs, rhs, false, true); } + mlir::Value createNUWSub(mlir::Value lhs, mlir::Value rhs) { + return createSub(lhs, rhs, true, false); + } + + mlir::Value createAdd(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, + bool hasNSW = false) { + auto op = create(lhs.getLoc(), lhs.getType(), + mlir::cir::BinOpKind::Add, lhs, rhs); + if (hasNUW) + op.setNoUnsignedWrap(true); + if (hasNSW) + op.setNoSignedWrap(true); + return op; + } + + mlir::Value createNSWAdd(mlir::Value lhs, mlir::Value rhs) { + return createAdd(lhs, rhs, false, true); + } + mlir::Value createNUWAdd(mlir::Value lhs, mlir::Value rhs) { + return createAdd(lhs, rhs, true, false); + } + struct BinOpOverflowResults { mlir::Value result; mlir::Value overflow; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 725cf6871083..d2d9973c7e9b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -177,7 +177,7 @@ class StructType }; bool isAnyFloatingPointType(mlir::Type t); - +bool isFPOrFPVectorTy(mlir::Type); } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 4d5676c691fd..ad57fa91a70a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -648,6 +648,25 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { lhs, rhs); } + mlir::Value createFAdd(mlir::Value lhs, mlir::Value rhs) { + assert(!MissingFeatures::metaDataNode()); + if (IsFPConstrained) + llvm_unreachable("Constrained FP NYI"); + + assert(!MissingFeatures::foldBinOpFMF()); + return create(lhs.getLoc(), mlir::cir::BinOpKind::Add, + lhs, rhs); + } + mlir::Value createFMul(mlir::Value lhs, mlir::Value rhs) { + assert(!MissingFeatures::metaDataNode()); + if (IsFPConstrained) + llvm_unreachable("Constrained FP NYI"); + + assert(!MissingFeatures::foldBinOpFMF()); + return create(lhs.getLoc(), mlir::cir::BinOpKind::Mul, + lhs, rhs); + } + mlir::Value createDynCast(mlir::Location loc, mlir::Value src, mlir::cir::PointerType destType, bool isRefCast, mlir::cir::DynamicCastInfoAttr info) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index b969bd9ffdbb..35ec7f0dc5ba 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1289,6 +1289,38 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, } mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { + if (Ops.CompType->isSignedIntegerOrEnumerationType()) { + switch (CGF.getLangOpts().getSignedOverflowBehavior()) { + case LangOptions::SOB_Defined: + if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return Builder.createMul(Ops.LHS, Ops.RHS); + [[fallthrough]]; + case LangOptions::SOB_Undefined: + if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return Builder.createNSWMul(Ops.LHS, Ops.RHS); + [[fallthrough]]; + case LangOptions::SOB_Trapping: + if (CanElideOverflowCheck(CGF.getContext(), Ops)) + return Builder.createNSWMul(Ops.LHS, Ops.RHS); + llvm_unreachable("NYI"); + } + } + if (Ops.FullType->isConstantMatrixType()) { + llvm_unreachable("NYI"); + } + if (Ops.CompType->isUnsignedIntegerType() && + CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && + !CanElideOverflowCheck(CGF.getContext(), Ops)) + llvm_unreachable("NYI"); + + if (mlir::cir::isFPOrFPVectorTy(Ops.LHS.getType())) { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); + return Builder.createFMul(Ops.LHS, Ops.RHS); + } + + if (Ops.isFixedPointOp()) + llvm_unreachable("NYI"); + return Builder.create( CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), mlir::cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); @@ -1308,6 +1340,39 @@ mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { if (Ops.LHS.getType().isa() || Ops.RHS.getType().isa()) return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/false); + if (Ops.CompType->isSignedIntegerOrEnumerationType()) { + switch (CGF.getLangOpts().getSignedOverflowBehavior()) { + case LangOptions::SOB_Defined: + if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return Builder.createAdd(Ops.LHS, Ops.RHS); + [[fallthrough]]; + case LangOptions::SOB_Undefined: + if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) + return Builder.createNSWAdd(Ops.LHS, Ops.RHS); + [[fallthrough]]; + case LangOptions::SOB_Trapping: + if (CanElideOverflowCheck(CGF.getContext(), Ops)) + return Builder.createNSWAdd(Ops.LHS, Ops.RHS); + + llvm_unreachable("NYI"); + } + } + if (Ops.FullType->isConstantMatrixType()) { + llvm_unreachable("NYI"); + } + + if (Ops.CompType->isUnsignedIntegerType() && + CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && + !CanElideOverflowCheck(CGF.getContext(), Ops)) + llvm_unreachable("NYI"); + + if (mlir::cir::isFPOrFPVectorTy(Ops.LHS.getType())) { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); + return Builder.createFAdd(Ops.LHS, Ops.RHS); + } + + if (Ops.isFixedPointOp()) + llvm_unreachable("NYI"); return Builder.create( CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), @@ -1344,7 +1409,7 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { !CanElideOverflowCheck(CGF.getContext(), Ops)) llvm_unreachable("NYI"); - if (Ops.CompType->isFloatingType()) { + if (mlir::cir::isFPOrFPVectorTy(Ops.LHS.getType())) { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); return Builder.createFSub(Ops.LHS, Ops.RHS); } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 3ab52cc73358..9c8993bcc30f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -802,6 +802,19 @@ bool mlir::cir::isAnyFloatingPointType(mlir::Type t) { mlir::cir::LongDoubleType, mlir::cir::FP80Type>(t); } +//===----------------------------------------------------------------------===// +// Floating-point and Float-point Vecotr type helpers +//===----------------------------------------------------------------------===// + +bool mlir::cir::isFPOrFPVectorTy(mlir::Type t) { + + if (isa(t)) { + return isAnyFloatingPointType( + t.dyn_cast().getEltType()); + } + return isAnyFloatingPointType(t); +} + //===----------------------------------------------------------------------===// // FuncType Definitions //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index 30b54beab761..29f6e89282b0 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -14,10 +14,10 @@ void b0(int a, int b) { x = x | b; } -// CHECK: = cir.binop(mul, %3, %4) : !s32i +// CHECK: = cir.binop(mul, %3, %4) nsw : !s32i // CHECK: = cir.binop(div, %6, %7) : !s32i // CHECK: = cir.binop(rem, %9, %10) : !s32i -// CHECK: = cir.binop(add, %12, %13) : !s32i +// CHECK: = cir.binop(add, %12, %13) nsw : !s32i // CHECK: = cir.binop(sub, %15, %16) nsw : !s32i // CHECK: = cir.shift( right, %18 : !s32i, %19 : !s32i) -> !s32i // CHECK: = cir.shift(left, %21 : !s32i, %22 : !s32i) -> !s32i diff --git a/clang/test/CIR/CodeGen/bitint.cpp b/clang/test/CIR/CodeGen/bitint.cpp index 09c133d0e1be..7f7c85ed268c 100644 --- a/clang/test/CIR/CodeGen/bitint.cpp +++ b/clang/test/CIR/CodeGen/bitint.cpp @@ -47,7 +47,7 @@ i10 test_arith(i10 lhs, i10 rhs) { // CHECK: cir.func @_Z10test_arithDB10_S_(%arg0: !cir.int loc({{.+}}), %arg1: !cir.int loc({{.+}})) -> !cir.int // CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int // CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int -// CHECK-NEXT: %{{.+}} = cir.binop(add, %[[#LHS]], %[[#RHS]]) : !cir.int +// CHECK-NEXT: %{{.+}} = cir.binop(add, %[[#LHS]], %[[#RHS]]) nsw : !cir.int // CHECK: } void Size1ExtIntParam(unsigned _BitInt(1) A) { diff --git a/clang/test/CIR/CodeGen/call.c b/clang/test/CIR/CodeGen/call.c index 8129288bbd68..2c3d5cfa151e 100644 --- a/clang/test/CIR/CodeGen/call.c +++ b/clang/test/CIR/CodeGen/call.c @@ -26,7 +26,7 @@ void d(void) { // CHECK: cir.store %arg1, %1 : !s32i, !cir.ptr // CHECK: %3 = cir.load %0 : !cir.ptr, !s32i // CHECK: %4 = cir.load %1 : !cir.ptr, !s32i -// CHECK: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK: %5 = cir.binop(add, %3, %4) nsw : !s32i // CHECK: cir.store %5, %2 : !s32i, !cir.ptr // CHECK: %6 = cir.load %2 : !cir.ptr, !s32i // CHECK: cir.return %6 @@ -64,7 +64,7 @@ void d(void) { // CXX-NEXT: cir.store %arg1, %1 : !s32i, !cir.ptr // CXX-NEXT: %3 = cir.load %0 : !cir.ptr, !s32i // CXX-NEXT: %4 = cir.load %1 : !cir.ptr, !s32i -// CXX-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CXX-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i // CXX-NEXT: cir.store %5, %2 : !s32i, !cir.ptr // CXX-NEXT: %6 = cir.load %2 : !cir.ptr, !s32i // CXX-NEXT: cir.return %6 diff --git a/clang/test/CIR/CodeGen/comma.cpp b/clang/test/CIR/CodeGen/comma.cpp index fd3f11f81d02..368b0e1bd18d 100644 --- a/clang/test/CIR/CodeGen/comma.cpp +++ b/clang/test/CIR/CodeGen/comma.cpp @@ -12,7 +12,7 @@ int c0() { // CHECK: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] // CHECK: %[[#B:]] = cir.alloca !s32i, !cir.ptr, ["b", init] // CHECK: %[[#LOADED_B:]] = cir.load %[[#B]] : !cir.ptr, !s32i -// CHECK: %[[#]] = cir.binop(add, %[[#LOADED_B]], %[[#]]) : !s32i +// CHECK: %[[#]] = cir.binop(add, %[[#LOADED_B]], %[[#]]) nsw : !s32i // CHECK: %[[#LOADED_A:]] = cir.load %[[#A]] : !cir.ptr, !s32i // CHECK: cir.store %[[#LOADED_A]], %[[#RET]] : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/if-constexpr.cpp b/clang/test/CIR/CodeGen/if-constexpr.cpp index 1e487389cc62..f980f3100841 100644 --- a/clang/test/CIR/CodeGen/if-constexpr.cpp +++ b/clang/test/CIR/CodeGen/if-constexpr.cpp @@ -66,7 +66,7 @@ void if0() { // CHECK-NEXT: cir.store %4, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: %5 = cir.const #cir.int<3> : !s32i loc({{.*}}) // CHECK-NEXT: %6 = cir.load %2 : !cir.ptr, !s32i loc({{.*}}) -// CHECK-NEXT: %7 = cir.binop(mul, %5, %6) : !s32i loc({{.*}}) +// CHECK-NEXT: %7 = cir.binop(mul, %5, %6) nsw : !s32i loc({{.*}}) // CHECK-NEXT: cir.store %7, %3 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { @@ -84,7 +84,7 @@ void if0() { // CHECK-NEXT: cir.store %4, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: %5 = cir.const #cir.int<10> : !s32i loc({{.*}}) // CHECK-NEXT: %6 = cir.load %2 : !cir.ptr, !s32i loc({{.*}}) -// CHECK-NEXT: %7 = cir.binop(mul, %5, %6) : !s32i loc({{.*}}) +// CHECK-NEXT: %7 = cir.binop(mul, %5, %6) nsw : !s32i loc({{.*}}) // CHECK-NEXT: cir.store %7, %3 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 7ab80bf18aff..91639e6b3b6d 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -30,7 +30,7 @@ void l0() { // CHECK: %3 = cir.load %2 : !cir.ptr>, !cir.ptr // CHECK: %4 = cir.load %3 : !cir.ptr, !s32i // CHECK: %5 = cir.const #cir.int<1> : !s32i -// CHECK: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK: %6 = cir.binop(add, %4, %5) nsw : !s32i // CHECK: %7 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: %8 = cir.load %7 : !cir.ptr>, !cir.ptr // CHECK: cir.store %6, %8 : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 092c8b952472..64909759fd25 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -27,13 +27,13 @@ void l1() { // CHECK-NEXT: } body { // CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) nsw : !s32i // CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } step { // CHECK-NEXT: %4 = cir.load %2 : !cir.ptr, !s32i // CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) nsw : !s32i // CHECK-NEXT: cir.store %6, %2 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } @@ -59,7 +59,7 @@ void l2(bool cond) { // CHECK-NEXT: } do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } @@ -71,7 +71,7 @@ void l2(bool cond) { // CHECK-NEXT: } do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } @@ -84,7 +84,7 @@ void l2(bool cond) { // CHECK-NEXT: } do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } @@ -108,7 +108,7 @@ void l3(bool cond) { // CHECK-NEXT: cir.do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { @@ -120,7 +120,7 @@ void l3(bool cond) { // CHECK-NEXT: cir.do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { @@ -132,7 +132,7 @@ void l3(bool cond) { // CHECK-NEXT: cir.do { // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { @@ -159,7 +159,7 @@ void l4() { // CHECK-NEXT: } do { // CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !s32i // CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) nsw : !s32i // CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr // CHECK-NEXT: cir.scope { // CHECK-NEXT: %10 = cir.load %0 : !cir.ptr, !s32i diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index cc456e6cf58b..85dd678cf52f 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -28,24 +28,24 @@ int s0(int a, int b) { // CIR: cir.store %arg1, %1 : !s32i, !cir.ptr loc(#loc9) // CIR: %4 = cir.load %0 : !cir.ptr, !s32i loc(#loc10) // CIR: %5 = cir.load %1 : !cir.ptr, !s32i loc(#loc8) -// CIR: %6 = cir.binop(add, %4, %5) : !s32i loc(#loc24) +// CIR: %6 = cir.binop(add, %4, %5) nsw : !s32i loc(#loc10) // CIR: cir.store %6, %3 : !s32i, !cir.ptr loc(#loc23) // CIR: cir.scope { // CIR: %9 = cir.load %3 : !cir.ptr, !s32i loc(#loc13) // CIR: %10 = cir.const #cir.int<0> : !s32i loc(#loc14) -// CIR: %11 = cir.cmp(gt, %9, %10) : !s32i, !cir.bool loc(#loc26) +// CIR: %11 = cir.cmp(gt, %9, %10) : !s32i, !cir.bool loc(#loc25) // CIR: cir.if %11 { // CIR: %12 = cir.const #cir.int<0> : !s32i loc(#loc16) -// CIR: cir.store %12, %3 : !s32i, !cir.ptr loc(#loc28) +// CIR: cir.store %12, %3 : !s32i, !cir.ptr loc(#loc27) // CIR: } else { // CIR: %12 = cir.const #cir.int<1> : !s32i loc(#loc12) -// CIR: cir.store %12, %3 : !s32i, !cir.ptr loc(#loc29) -// CIR: } loc(#loc27) -// CIR: } loc(#loc25) +// CIR: cir.store %12, %3 : !s32i, !cir.ptr loc(#loc28) +// CIR: } loc(#loc26) +// CIR: } loc(#loc24) // CIR: %7 = cir.load %3 : !cir.ptr, !s32i loc(#loc18) -// CIR: cir.store %7, %2 : !s32i, !cir.ptr loc(#loc30) -// CIR: %8 = cir.load %2 : !cir.ptr, !s32i loc(#loc30) -// CIR: cir.return %8 : !s32i loc(#loc30) +// CIR: cir.store %7, %2 : !s32i, !cir.ptr loc(#loc29) +// CIR: %8 = cir.load %2 : !cir.ptr, !s32i loc(#loc29) +// CIR: cir.return %8 : !s32i loc(#loc29) // CIR: } loc(#loc20) // CIR: } loc(#loc) // CIR: #loc = loc("{{.*}}sourcelocation.cpp":0:0) @@ -66,13 +66,12 @@ int s0(int a, int b) { // CIR: #loc19 = loc("{{.*}}sourcelocation.cpp":12:3) // CIR: #loc20 = loc(fused[#loc1, #loc2]) // CIR: #loc23 = loc(fused[#loc7, #loc8]) -// CIR: #loc24 = loc(fused[#loc10, #loc8]) -// CIR: #loc25 = loc(fused[#loc11, #loc12]) -// CIR: #loc26 = loc(fused[#loc13, #loc14]) -// CIR: #loc27 = loc(fused[#loc15, #loc16, #loc17, #loc12]) -// CIR: #loc28 = loc(fused[#loc15, #loc16]) -// CIR: #loc29 = loc(fused[#loc17, #loc12]) -// CIR: #loc30 = loc(fused[#loc19, #loc18]) +// CIR: #loc24 = loc(fused[#loc11, #loc12]) +// CIR: #loc25 = loc(fused[#loc13, #loc14]) +// CIR: #loc26 = loc(fused[#loc15, #loc16, #loc17, #loc12]) +// CIR: #loc27 = loc(fused[#loc15, #loc16]) +// CIR: #loc28 = loc(fused[#loc17, #loc12]) +// CIR: #loc29 = loc(fused[#loc19, #loc18]) // LLVM: ModuleID = '{{.*}}sourcelocation.cpp' diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 367656d1965a..74b1312fd229 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -20,7 +20,7 @@ void sw1(int a) { // CHECK-NEXT: case (equal, 0) { // CHECK-NEXT: %4 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) nsw : !s32i // CHECK-NEXT: cir.store %6, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.break // CHECK-NEXT: }, @@ -32,7 +32,7 @@ void sw1(int a) { // CHECK-NEXT: %4 = cir.alloca !s32i, !cir.ptr, ["yolo", init] // CHECK-NEXT: %5 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %6 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %7 = cir.binop(add, %5, %6) : !s32i +// CHECK-NEXT: %7 = cir.binop(add, %5, %6) nsw : !s32i // CHECK-NEXT: cir.store %7, %1 : !s32i, !cir.ptr // CHECK-NEXT: %8 = cir.const #cir.int<100> : !s32i // CHECK-NEXT: cir.store %8, %4 : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/vectype-ext.cpp b/clang/test/CIR/CodeGen/vectype-ext.cpp index 1ab3369c8c46..915fb231177c 100644 --- a/clang/test/CIR/CodeGen/vectype-ext.cpp +++ b/clang/test/CIR/CodeGen/vectype-ext.cpp @@ -26,7 +26,7 @@ void vector_int_test(int x) { // CIR: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : !cir.vector // LLVM: %[[#X1:]] = load i32, ptr %{{[0-9]+}}, align 4 // LLVM-NEXT: %[[#X2:]] = load i32, ptr %{{[0-9]+}}, align 4 - // LLVM-NEXT: %[[#SUM:]] = add i32 %[[#X2]], 1 + // LLVM-NEXT: %[[#SUM:]] = add nsw i32 %[[#X2]], 1 // LLVM-NEXT: %[[#VEC1:]] = insertelement <4 x i32> undef, i32 %[[#X1]], i64 0 // LLVM-NEXT: %[[#VEC2:]] = insertelement <4 x i32> %[[#VEC1]], i32 5, i64 1 // LLVM-NEXT: %[[#VEC3:]] = insertelement <4 x i32> %[[#VEC2]], i32 6, i64 2 @@ -39,7 +39,7 @@ void vector_int_test(int x) { // CIR: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %[[#zero]], %[[#zero]] : !s32i, !s32i, !s32i, !s32i) : !cir.vector // LLVM: %[[#X1:]] = load i32, ptr %{{[0-9]+}}, align 4 // LLVM-NEXT: %[[#X2:]] = load i32, ptr %{{[0-9]+}}, align 4 - // LLVM-NEXT: %[[#SUM:]] = add i32 %[[#X2]], 1 + // LLVM-NEXT: %[[#SUM:]] = add nsw i32 %[[#X2]], 1 // LLVM-NEXT: %[[#VEC1:]] = insertelement <4 x i32> undef, i32 %[[#X1]], i64 0 // LLVM-NEXT: %[[#VEC2:]] = insertelement <4 x i32> %[[#VEC1]], i32 %[[#SUM]], i64 1 // LLVM-NEXT: %[[#VEC3:]] = insertelement <4 x i32> %[[#VEC2]], i32 0, i64 2 @@ -85,14 +85,14 @@ void vector_int_test(int x) { a[x] += a[0]; // CIR: %[[#RHSCA:]] = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector // CIR: %[[#LHSCA:]] = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector - // CIR: %[[#SUMCA:]] = cir.binop(add, %[[#LHSCA]], %[[#RHSCA]]) : !s32i + // CIR: %[[#SUMCA:]] = cir.binop(add, %[[#LHSCA]], %[[#RHSCA]]) nsw : !s32i // CIR: cir.vec.insert %[[#SUMCA]], %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : !cir.vector // LLVM: %[[#A1:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 // LLVM-NEXT: %[[#RHSCA:]] = extractelement <4 x i32> %[[#A1]], i32 0 // LLVM-NEXT: %[[#X:]] = load i32, ptr %{{[0-9]+}}, align 4 // LLVM-NEXT: %[[#A2:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 // LLVM-NEXT: %[[#LHSCA:]] = extractelement <4 x i32> %[[#A2]], i32 %[[#X]] - // LLVM-NEXT: %[[#SUMCA:]] = add i32 %[[#LHSCA]], %[[#RHSCA]] + // LLVM-NEXT: %[[#SUMCA:]] = add nsw i32 %[[#LHSCA]], %[[#RHSCA]] // LLVM-NEXT: %[[#A3:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 // LLVM-NEXT: %[[#RES:]] = insertelement <4 x i32> %[[#A3]], i32 %[[#SUMCA]], i32 %[[#X]] // LLVM-NEXT: store <4 x i32> %[[#RES]], ptr %{{[0-9]+}}, align 16 diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index fddfba552619..745f863e6766 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -50,7 +50,7 @@ void vector_int_test(int x) { // CHECK: %[[#LOADCAIDX2:]] = cir.load %{{[0-9]+}} : !cir.ptr, !s32i // CHECK: %[[#LOADCAVEC3:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector // CHECK: %[[#LHSCA:]] = cir.vec.extract %[[#LOADCAVEC3]][%[[#LOADCAIDX2]] : !s32i] : !cir.vector - // CHECK: %[[#SUMCA:]] = cir.binop(add, %[[#LHSCA]], %[[#RHSCA]]) : !s32i + // CHECK: %[[#SUMCA:]] = cir.binop(add, %[[#LHSCA]], %[[#RHSCA]]) nsw : !s32i // CHECK: %[[#LOADCAVEC4:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.vector // CHECK: %[[#RESULTCAVEC:]] = cir.vec.insert %[[#SUMCA]], %[[#LOADCAVEC4]][%[[#LOADCAIDX2]] : !s32i] : !cir.vector // CHECK: cir.store %[[#RESULTCAVEC]], %{{[0-9]+}} : !cir.vector, !cir.ptr> diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index cc7331ef2ba9..d7c5f4c32b02 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -35,7 +35,7 @@ void vector_int_test(int x) { // CHECK: %[[#T45:]] = llvm.mlir.constant(6 : i32) : i32 // CHECK: %[[#T46:]] = llvm.load %[[#T1]] {alignment = 4 : i64} : !llvm.ptr -> i32 // CHECK: %[[#T47:]] = llvm.mlir.constant(1 : i32) : i32 - // CHECK: %[[#T48:]] = llvm.add %[[#T46]], %[[#T47]] : i32 + // CHECK: %[[#T48:]] = llvm.add %[[#T46]], %[[#T47]] overflow : i32 // CHECK: %[[#T49:]] = llvm.mlir.undef : vector<4xi32> // CHECK: %[[#T50:]] = llvm.mlir.constant(0 : i64) : i64 // CHECK: %[[#T51:]] = llvm.insertelement %[[#T43]], %[[#T49]][%[[#T50]] : i64] : vector<4xi32> @@ -82,7 +82,7 @@ void vector_int_test(int x) { // CHECK: %[[#LOADCAIDX2:]] = llvm.load %{{[0-9]+}} {alignment = 4 : i64} : !llvm.ptr -> i32 // CHECK: %[[#LOADCAVEC3:]] = llvm.load %{{[0-9]+}} {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#LHSCA:]] = llvm.extractelement %[[#LOADCAVEC3:]][%[[#LOADCAIDX2:]] : i32] : vector<4xi32> - // CHECK: %[[#SUMCA:]] = llvm.add %[[#LHSCA:]], %[[#RHSCA:]] : i32 + // CHECK: %[[#SUMCA:]] = llvm.add %[[#LHSCA:]], %[[#RHSCA:]] overflow : i32 // CHECK: %[[#LOADCAVEC4:]] = llvm.load %{{[0-9]+}} {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> // CHECK: %[[#RESULTCAVEC:]] = llvm.insertelement %[[#SUMCA:]], %[[#LOADCAVEC4:]][%[[#LOADCAIDX2:]] : i32] : vector<4xi32> // CHECK: llvm.store %[[#RESULTCAVEC:]], %{{[0-9]+}} {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr From c91ef121e2f6f92690734a48d7fd766840fc1f8f Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Sat, 22 Jun 2024 11:19:16 -0300 Subject: [PATCH 1651/2301] [CIR][ABI][NFC] Prime AArch64 CC lowering (#679) This patch is a preparation for the AArch64 calling convention lowering. It adds the basic infrastructure to initialize the AArch64 ABI details and validates it against a trivial void return and argument call conv lowering. --- clang/include/clang/CIR/Target/AArch64.h | 17 ++++ clang/include/clang/CIR/Target/x86.h | 7 ++ clang/lib/CIR/CodeGen/TargetInfo.cpp | 7 +- .../Dialect/Transforms/LoweringPrepare.cpp | 1 - .../Transforms/LoweringPrepareCXXABI.h | 11 +-- .../Transforms/TargetLowering/CIRCXXABI.h | 10 +-- .../Transforms/TargetLowering/CMakeLists.txt | 1 + .../TargetLowering/ItaniumCXXABI.cpp | 18 ++++- .../Transforms/TargetLowering/LowerModule.cpp | 14 ++++ .../Transforms/TargetLowering/TargetInfo.h | 14 ++-- .../TargetLowering/Targets/AArch64.cpp | 79 +++++++++++++++++++ .../Transforms/TargetLowering/Targets/X86.cpp | 2 + .../CIR/Lowering/DirectToLLVM/CMakeLists.txt | 1 + .../CIR/Lowering/ThroughMLIR/CMakeLists.txt | 1 + .../aarch64-call-conv-lowering-pass.cpp | 8 ++ 15 files changed, 163 insertions(+), 28 deletions(-) create mode 100644 clang/include/clang/CIR/Target/AArch64.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp create mode 100644 clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp diff --git a/clang/include/clang/CIR/Target/AArch64.h b/clang/include/clang/CIR/Target/AArch64.h new file mode 100644 index 000000000000..0788cab1fa71 --- /dev/null +++ b/clang/include/clang/CIR/Target/AArch64.h @@ -0,0 +1,17 @@ + +#ifndef CIR_AAARCH64_H +#define CIR_AAARCH64_H + +namespace cir { + +/// The ABI kind for AArch64 targets. +enum class AArch64ABIKind { + AAPCS = 0, + DarwinPCS, + Win64, + AAPCSSoft, +}; + +} // namespace cir + +#endif // CIR_AAARCH64_H diff --git a/clang/include/clang/CIR/Target/x86.h b/clang/include/clang/CIR/Target/x86.h index 2aa2d0493aac..08c6cae7b94f 100644 --- a/clang/include/clang/CIR/Target/x86.h +++ b/clang/include/clang/CIR/Target/x86.h @@ -15,6 +15,13 @@ namespace cir { +/// The AVX ABI level for X86 targets. +enum class X86AVXABILevel { + None, + AVX, + AVX512, +}; + // Possible argument classifications according to the x86 ABI documentation. enum X86ArgClass { Integer = 0, diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 617073bd0230..43bc2e33f7c8 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -3,7 +3,6 @@ #include "CIRGenCXXABI.h" #include "CIRGenFunctionInfo.h" #include "CIRGenTypes.h" -#include "CallingConv.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Target/x86.h" @@ -136,10 +135,14 @@ class AArch64TargetCIRGenInfo : public TargetCIRGenInfo { } // namespace +//===----------------------------------------------------------------------===// +// X86 ABI Implementation +//===----------------------------------------------------------------------===// + namespace { /// The AVX ABI leel for X86 targets. -enum class X86AVXABILevel { None, AVX, AVX512 }; +using X86AVXABILevel = ::cir::X86AVXABILevel; class X86_64ABIInfo : public ABIInfo { using Class = X86ArgClass; diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index f7643c9b8016..843c35f4a1bd 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -8,7 +8,6 @@ #include "LoweringPrepareCXXABI.h" #include "PassDetail.h" -#include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Region.h" #include "clang/AST/ASTContext.h" diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h index 717516d09664..42e8917b43b6 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h @@ -20,21 +20,14 @@ #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Target/AArch64.h" namespace cir { -// TODO: This is a temporary solution to know AArch64 ABI Kind -// This should be removed once we have a proper ABI info query -enum class AArch64ABIKind { - AAPCS = 0, - DarwinPCS, - Win64, - AAPCSSoft, -}; class LoweringPrepareCXXABI { public: static LoweringPrepareCXXABI *createItaniumABI(); - static LoweringPrepareCXXABI *createAArch64ABI(AArch64ABIKind k); + static LoweringPrepareCXXABI *createAArch64ABI(::cir::AArch64ABIKind k); virtual mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h index 5496dbbf2327..3cc1bde1f763 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -18,6 +18,7 @@ #include "mlir/IR/Value.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" +#include "clang/CIR/Target/AArch64.h" namespace mlir { namespace cir { @@ -51,17 +52,10 @@ CIRCXXABI *CreateItaniumCXXABI(LowerModule &CGM); // should be updated to follow some level of codegen parity. namespace cir { -enum class AArch64ABIKind { - AAPCS = 0, - DarwinPCS, - Win64, - AAPCSSoft, -}; - class LoweringPrepareCXXABI { public: static LoweringPrepareCXXABI *createItaniumABI(); - static LoweringPrepareCXXABI *createAArch64ABI(AArch64ABIKind k); + static LoweringPrepareCXXABI *createAArch64ABI(::cir::AArch64ABIKind k); virtual mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt index e90beeb13a43..6b3eea6032cd 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt @@ -12,6 +12,7 @@ add_clang_library(TargetLowering RecordLayoutBuilder.cpp TargetInfo.cpp TargetLoweringInfo.cpp + Targets/AArch64.cpp Targets/X86.cpp Targets/LoweringPrepareAArch64CXXABI.cpp Targets/LoweringPrepareItaniumCXXABI.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index 6e0fecfa44d5..4a11ebb6758a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -22,6 +22,7 @@ #include "CIRCXXABI.h" #include "LowerModule.h" +#include "llvm/Support/ErrorHandling.h" namespace mlir { namespace cir { @@ -30,8 +31,16 @@ namespace { class ItaniumCXXABI : public CIRCXXABI { +protected: + bool UseARMMethodPtrABI; + bool UseARMGuardVarABI; + bool Use32BitVTableOffsetABI; + public: - ItaniumCXXABI(LowerModule &LM) : CIRCXXABI(LM) {} + ItaniumCXXABI(LowerModule &LM, bool UseARMMethodPtrABI = false, + bool UseARMGuardVarABI = false) + : CIRCXXABI(LM), UseARMMethodPtrABI(UseARMMethodPtrABI), + UseARMGuardVarABI(UseARMGuardVarABI), Use32BitVTableOffsetABI(false) {} bool classifyReturnType(LowerFunctionInfo &FI) const override; }; @@ -52,6 +61,13 @@ bool ItaniumCXXABI::classifyReturnType(LowerFunctionInfo &FI) const { CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { switch (LM.getCXXABIKind()) { + // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't + // include the other 32-bit ARM oddities: constructor/destructor return values + // and array cookies. + case clang::TargetCXXABI::GenericAArch64: + return new ItaniumCXXABI(LM, /*UseARMMethodPtrABI=*/true, + /*UseARMGuardVarABI=*/true); + case clang::TargetCXXABI::GenericItanium: return new ItaniumCXXABI(LM); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 0ec4b589bb41..d6a0b3488b2a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -19,9 +19,12 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" +#include "clang/CIR/Target/AArch64.h" #include "llvm/Support/ErrorHandling.h" using MissingFeatures = ::cir::MissingFeatures; +using AArch64ABIKind = ::cir::AArch64ABIKind; +using X86AVXABILevel = ::cir::X86AVXABILevel; namespace mlir { namespace cir { @@ -52,6 +55,17 @@ createTargetLoweringInfo(LowerModule &LM) { const llvm::Triple &Triple = Target.getTriple(); switch (Triple.getArch()) { + case llvm::Triple::aarch64: { + AArch64ABIKind Kind = AArch64ABIKind::AAPCS; + if (Target.getABI() == "darwinpcs") + llvm_unreachable("DarwinPCS ABI NYI"); + else if (Triple.isOSWindows()) + llvm_unreachable("Windows ABI NYI"); + else if (Target.getABI() == "aapcs-soft") + llvm_unreachable("AAPCS-soft ABI NYI"); + + return createAArch64TargetLoweringInfo(LM, Kind); + } case llvm::Triple::x86_64: { switch (Triple.getOS()) { case llvm::Triple::Win32: diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h index d01b222411e4..e0e984fcbc70 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h @@ -16,19 +16,19 @@ #include "LowerModule.h" #include "TargetLoweringInfo.h" +#include "clang/CIR/Target/AArch64.h" +#include "clang/CIR/Target/x86.h" namespace mlir { namespace cir { -/// The AVX ABI level for X86 targets. -enum class X86AVXABILevel { - None, - AVX, - AVX512, -}; +std::unique_ptr +createX86_64TargetLoweringInfo(LowerModule &CGM, + ::cir::X86AVXABILevel AVXLevel); std::unique_ptr -createX86_64TargetLoweringInfo(LowerModule &CGM, X86AVXABILevel AVXLevel); +createAArch64TargetLoweringInfo(LowerModule &CGM, + ::cir::AArch64ABIKind AVXLevel); } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp new file mode 100644 index 000000000000..1d8666684fad --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -0,0 +1,79 @@ +//===- AArch64.cpp --------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "clang/CIR/Target/AArch64.h" +#include "ABIInfoImpl.h" +#include "LowerFunctionInfo.h" +#include "LowerTypes.h" +#include "TargetInfo.h" +#include "TargetLoweringInfo.h" +#include "clang/CIR/ABIArgInfo.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" + +using AArch64ABIKind = ::cir::AArch64ABIKind; +using ABIArgInfo = ::cir::ABIArgInfo; +using MissingFeature = ::cir::MissingFeatures; + +namespace mlir { +namespace cir { + +//===----------------------------------------------------------------------===// +// AArch64 ABI Implementation +//===----------------------------------------------------------------------===// + +namespace { + +class AArch64ABIInfo : public ABIInfo { + AArch64ABIKind Kind; + +public: + AArch64ABIInfo(LowerTypes &CGT, AArch64ABIKind Kind) + : ABIInfo(CGT), Kind(Kind) {} + +private: + AArch64ABIKind getABIKind() const { return Kind; } + + ABIArgInfo classifyReturnType(Type RetTy, bool IsVariadic) const; + + void computeInfo(LowerFunctionInfo &FI) const override { + if (!::mlir::cir::classifyReturnType(getCXXABI(), FI, *this)) + FI.getReturnInfo() = + classifyReturnType(FI.getReturnType(), FI.isVariadic()); + + for (auto &_ : FI.arguments()) + llvm_unreachable("NYI"); + } +}; + +class AArch64TargetLoweringInfo : public TargetLoweringInfo { +public: + AArch64TargetLoweringInfo(LowerTypes <, AArch64ABIKind Kind) + : TargetLoweringInfo(std::make_unique(LT, Kind)) { + assert(!MissingFeature::swift()); + } +}; + +} // namespace + +ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, + bool IsVariadic) const { + if (RetTy.isa()) + return ABIArgInfo::getIgnore(); + + llvm_unreachable("NYI"); +} + +std::unique_ptr +createAArch64TargetLoweringInfo(LowerModule &CGM, AArch64ABIKind Kind) { + return std::make_unique(CGM.getTypes(), Kind); +} + +} // namespace cir +} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 736f3a7ea301..63fccc7e9cd6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -10,6 +10,8 @@ #include "llvm/Support/ErrorHandling.h" #include +using X86AVXABILevel = ::cir::X86AVXABILevel; + namespace mlir { namespace cir { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index c87f7531d996..6205c9047c0c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -30,6 +30,7 @@ add_clang_library(clangCIRLoweringDirectToLLVM MLIRCIR MLIRAnalysis MLIRBuiltinToLLVMIRTranslation + MLIRLLVMToLLVMIRTranslation MLIRCIRTransforms MLIRIR MLIRLLVMToLLVMIRTranslation diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt index 167c8b791ea6..d3ecb0764071 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -31,6 +31,7 @@ add_clang_library(clangCIRLoweringThroughMLIR MLIRCIR MLIRAnalysis MLIRBuiltinToLLVMIRTranslation + MLIRLLVMToLLVMIRTranslation MLIRIR MLIRLLVMToLLVMIRTranslation MLIRParser diff --git a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp new file mode 100644 index 000000000000..145e8ab83ded --- /dev/null +++ b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -std=c++20 -triple aarch64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// CHECK: @_Z4Voidv() +void Void(void) { +// CHECK: cir.call @_Z4Voidv() : () -> () + Void(); +} From 80f9c15d62d6ba157607df44195d79667b9509f9 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Tue, 25 Jun 2024 12:10:44 -0400 Subject: [PATCH 1652/2301] [CIR][CIRGen] Add dsolocal attribute to GlobalOp and FuncOp (#686) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit as title. In this PR 1. make setDSOLocal an interface function. 2. implemented shouldAssumeDSOLocal function in CIRGenModule, using the same skeleton as shouldAssumeDSOLocal in OG's CodeGenModule.cpp. 3. added call sites of setDSOLocal within CIRGenModule, like what's in OG's CodeGenModule. 4. fixed printing format 5. LLVM lowering 6. keep CIRGenModule::setDSOLocal(mlir::Operation *Op) wrapper at call sites, so if we make changes to interface, we don't have to touch call sites since there are many. We don't have LLVM test for this PR yet, and it will be addressed by the next PR,: **TODO in the next PR:** 1. Implement setNonAliasAttributes in CIRGenModule.cpp, which should be called by CIRGenModule::buildGlobalFunctionDefinition. That way, we will set dso_local correctly for all func ops who have defs in the module. That way we should have LLVM test case in this next PR. detailed explanation below: Since LLVM asm printer omits dso_local in [isImplicitDSOLocal](https://github.com/llvm/clangir/blob/main/llvm/lib/IR/AsmWriter.cpp#L3689)(), and all we cover so far in CIR all fall into this category, we're not able to have a LLVM test. However, the case [isDeclarationForLinker()](https://github.com/llvm/clangir/blob/c28908396a3ba7bda6345907233e4f5c4e53a33e/clang/lib/CodeGen/CodeGenModule.cpp#L1655) should have a lot of test examples as all func defs should have dso_local, We don't have it CIR is because A to-do in our CG. When OG is building func def, after code is generated, it will call setDSOLocal again via setNonAliasAttributes—>SetCommonAttributes—>setGVProperties. The key difference is now GV is not declaration anymore. so satisfies the test if (!GV->isDeclarationForLinker()) return true; https://github.com/llvm/clangir/blob/f78f9a55e7cd6b9e350556e35097616676cf1f3e/clang/lib/CodeGen/CodeGenModule.cpp#L5864 But our CG missed this step of calling setNonAliasAttributes so it won’t give setDSOLocal another chance to get it right https://github.com/llvm/clangir/blob/c28908396a3ba7bda6345907233e4f5c4e53a33e/clang/lib/CIR/CodeGen/CIRGenModule.cpp#L496 **TODO in the next next PR** 2. add call to setDSOLocal in other parts of CG other than CIRGenModule. 3. implement DefaultVisibility check, didn't do in this PR as LLVM::DefaultVisibility has no direct counterpart in [MLIR::](mlir::SymbolTable::Visibility). Therefore, it takes care examination of cases to see what is the best emulation of hasDefaultVisibility in MLIR/CIR context as far as dsolocal is concerned. **TODO in future** other than DefaultVisibility check, we didn't implement canBenefitFromLocalAlias as it depends on other missing features like setComDat. There is a lot of cases we need to cover, so this is just the first step! --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 + .../clang/CIR/Interfaces/CIROpInterfaces.h | 1 + .../clang/CIR/Interfaces/CIROpInterfaces.td | 21 ++++ clang/include/clang/CIR/MissingFeatures.h | 2 + clang/lib/CIR/CodeGen/CIRGenModule.cpp | 103 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 + clang/lib/CIR/Interfaces/CIROpInterfaces.cpp | 9 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 +- clang/test/CIR/CodeGen/array.cpp | 2 +- clang/test/CIR/CodeGen/const-array.c | 2 +- clang/test/CIR/CodeGen/coro-task.cpp | 2 +- clang/test/CIR/CodeGen/globals.cpp | 4 +- clang/test/CIR/CodeGen/hello.c | 2 +- clang/test/CIR/CodeGen/lambda.cpp | 10 +- clang/test/CIR/CodeGen/libcall.cpp | 4 +- clang/test/CIR/CodeGen/linkage.c | 2 +- clang/test/CIR/CodeGen/static-vars.c | 16 +-- clang/test/CIR/CodeGen/static-vars.cpp | 12 +- clang/test/CIR/CodeGen/stmtexpr-init.c | 2 +- clang/test/CIR/CodeGen/wide-string.cpp | 8 +- 21 files changed, 182 insertions(+), 36 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 44724c079d09..eb281891e8c5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1924,6 +1924,7 @@ def GlobalOp : CIR_Op<"global", // Note this can also be a FlatSymbolRefAttr OptionalAttr:$initial_value, UnitAttr:$constant, + UnitAttr:$dsolocal, OptionalAttr:$alignment, OptionalAttr:$ast, OptionalAttr:$section @@ -1934,6 +1935,7 @@ def GlobalOp : CIR_Op<"global", (`constant` $constant^)? $linkage ($tls_model^)? + (`dsolocal` $dsolocal^)? $sym_name custom($sym_type, $initial_value, $ctorRegion, $dtorRegion) attr-dict @@ -2671,6 +2673,7 @@ def FuncOp : CIR_Op<"func", [ UnitAttr:$coroutine, UnitAttr:$lambda, UnitAttr:$no_proto, + UnitAttr:$dsolocal, DefaultValuedAttr:$linkage, ExtraFuncAttr:$extra_attrs, diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h index fcef7a33eb20..2cd4d9e42524 100644 --- a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h @@ -17,6 +17,7 @@ #include "clang/AST/Attr.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Mangle.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" namespace mlir { namespace cir {} // namespace cir diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td index 8f1c63e1b024..cec43646d0e8 100644 --- a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td @@ -46,6 +46,18 @@ let cppNamespace = "::mlir::cir" in { /*defaultImplementation=*/[{ return false; }] >, InterfaceMethod<"", + "bool", "hasLocalLinkage", (ins), [{}], + /*defaultImplementation=*/[{ + return mlir::cir::isLocalLinkage($_op.getLinkage()); + }] + >, + InterfaceMethod<"", + "bool", "hasExternalWeakLinkage", (ins), [{}], + /*defaultImplementation=*/[{ + return mlir::cir::isExternalWeakLinkage($_op.getLinkage()); + }] + >, + InterfaceMethod<"", "bool", "isDeclarationForLinker", (ins), [{}], /*defaultImplementation=*/[{ if ($_op.hasAvailableExternallyLinkage()) @@ -53,7 +65,16 @@ let cppNamespace = "::mlir::cir" in { return $_op.isDeclaration(); }] >, + InterfaceMethod<"", + "void", "setDSOLocal", (ins "bool":$val), [{}], + /*defaultImplementation=*/[{ + $_op.setDsolocal(val); + }] + >, ]; + let extraClassDeclaration = [{ + bool canBenefitFromLocalAlias() const; + }]; } } // namespace mlir::cir diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index d61c5e618605..1de2d2d1de53 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -48,6 +48,8 @@ struct MissingFeatures { static bool hiddenVisibility() { return false; } static bool protectedVisibility() { return false; } static bool addCompilerUsedGlobal() { return false; } + static bool supportIFuncAttr() { return false; } + static bool setDefaultVisibility() { return false; } // Sanitizers static bool reportGlobalToASan() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index df777b95062c..c12f79a9aeb1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -365,6 +365,103 @@ bool CIRGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { return true; } +static bool hasDefaultVisibility(CIRGlobalValueInterface GV) { + // TODO: we need to have a precise definition of what is a default visibility. + // in the context of MILR and CIR, now we default to + assert(!MissingFeatures::setDefaultVisibility()); + return true; +} + +static bool shouldAssumeDSOLocal(const CIRGenModule &CGM, + CIRGlobalValueInterface GV) { + if (GV.hasLocalLinkage()) + return true; + + if (!hasDefaultVisibility(GV) && !GV.hasExternalWeakLinkage()) { + return true; + } + + // DLLImport explicitly marks the GV as external. + // so it shouldn't be dso_local + // But we don't have the info set now + assert(!MissingFeatures::setDLLImportDLLExport()); + + const llvm::Triple &TT = CGM.getTriple(); + const auto &CGOpts = CGM.getCodeGenOpts(); + if (TT.isWindowsGNUEnvironment()) { + // In MinGW, variables without DLLImport can still be automatically + // imported from a DLL by the linker; don't mark variables that + // potentially could come from another DLL as DSO local. + + // With EmulatedTLS, TLS variables can be autoimported from other DLLs + // (and this actually happens in the public interface of libstdc++), so + // such variables can't be marked as DSO local. (Native TLS variables + // can't be dllimported at all, though.) + llvm_unreachable("MinGW not supported here"); + } + + // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols + // remain unresolved in the link, they can be resolved to zero, which is + // outside the current DSO. + if (TT.isOSBinFormatCOFF() && GV.hasExternalWeakLinkage()) + return false; + + // Every other GV is local on COFF. + // Make an exception for windows OS in the triple: Some firmware builds use + // *-win32-macho triples. This (accidentally?) produced windows relocations + // without GOT tables in older clang versions; Keep this behaviour. + // FIXME: even thread local variables? + if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO())) + return true; + + // Only handle COFF and ELF for now. + if (!TT.isOSBinFormatELF()) + return false; + + llvm::Reloc::Model RM = CGOpts.RelocationModel; + const auto &LOpts = CGM.getLangOpts(); + if (RM != llvm::Reloc::Static && !LOpts.PIE) { + // On ELF, if -fno-semantic-interposition is specified and the target + // supports local aliases, there will be neither CC1 + // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set + // dso_local on the function if using a local alias is preferable (can avoid + // PLT indirection). + if (!(isa(GV) && GV.canBenefitFromLocalAlias())) { + return false; + } + return !(CGM.getLangOpts().SemanticInterposition || + CGM.getLangOpts().HalfNoSemanticInterposition); + } + + // A definition cannot be preempted from an executable. + if (!GV.isDeclarationForLinker()) + return true; + + // Most PIC code sequences that assume that a symbol is local cannot produce a + // 0 if it turns out the symbol is undefined. While this is ABI and relocation + // depended, it seems worth it to handle it here. + if (RM == llvm::Reloc::PIC_ && GV.hasExternalWeakLinkage()) + return false; + + // PowerPC64 prefers TOC indirection to avoid copy relocations. + if (TT.isPPC64()) + return false; + + if (CGOpts.DirectAccessExternalData) { + llvm_unreachable("-fdirect-access-external-data not supported"); + } + + // If we can use copy relocations we can assume it is local. + + // Otherwise don't assume it is local. + + return false; +} + +void CIRGenModule::setDSOLocal(CIRGlobalValueInterface GV) const { + GV.setDSOLocal(shouldAssumeDSOLocal(*this, GV)); +} + void CIRGenModule::buildGlobal(GlobalDecl GD) { const auto *Global = cast(GD.getDecl()); @@ -1273,7 +1370,6 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, GV.setLinkageAttr( mlir::cir::GlobalLinkageKindAttr::get(CGM.getBuilder().getContext(), LT)); CIRGenModule::setInitializer(GV, C); - // TODO(cir) assert(!cir::MissingFeatures::threadLocal() && "NYI"); assert(!cir::MissingFeatures::unnamedAddr() && "NYI"); @@ -1327,6 +1423,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, llvm_unreachable("this should never be untyped at this point"); GV = generateStringLiteral(loc, typedC, LT, *this, GlobalVariableName, Alignment); + setDSOLocal(static_cast(GV)); ConstantStringMap[C] = GV; assert(!cir::MissingFeatures::reportGlobalToASan() && "NYI"); @@ -1980,6 +2077,9 @@ void CIRGenModule::setGlobalVisibility(mlir::Operation *GV, void CIRGenModule::setDSOLocal(mlir::Operation *Op) const { assert(!MissingFeatures::setDSOLocal()); + if (auto globalValue = dyn_cast(Op)) { + setDSOLocal(globalValue); + } } void CIRGenModule::setGVProperties(mlir::Operation *Op, @@ -2751,6 +2851,7 @@ mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( assert(!MissingFeatures::setComdat()); GV.setAlignmentAttr(getSize(Alignment)); + setDSOLocal(static_cast(GV)); return GV; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index d75109428a84..acb8e9188c43 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -30,6 +30,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Interfaces/CIROpInterfaces.h" #include "llvm/ADT/ScopedHashTable.h" #include "llvm/ADT/SmallPtrSet.h" @@ -281,6 +282,8 @@ class CIRGenModule : public CIRGenTypeCache { void buildDeferredVTables(); bool shouldOpportunisticallyEmitVTables(); + void setDSOLocal(mlir::cir::CIRGlobalValueInterface GV) const; + /// Return the appropriate linkage for the vtable, VTT, and type information /// of the given class. mlir::cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *RD); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d62fd3207803..a2a06308b03e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2116,6 +2116,9 @@ void cir::FuncOp::print(OpAsmPrinter &p) { if (vis != mlir::SymbolTable::Visibility::Public) p << vis << " "; + if (getDsolocal()) + p << "dsolocal "; + // Print function name, signature, and control. p.printSymbolName(getSymName()); auto fnType = getFunctionType(); @@ -2134,6 +2137,7 @@ void cir::FuncOp::print(OpAsmPrinter &p) { getAliaseeAttrName(), getBuiltinAttrName(), getCoroutineAttrName(), + getDsolocalAttrName(), getExtraAttrsAttrName(), getFunctionTypeAttrName(), getGlobalCtorAttrName(), diff --git a/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp b/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp index 38211effb79c..46e472c312be 100644 --- a/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp +++ b/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp @@ -7,9 +7,18 @@ //===----------------------------------------------------------------------===// #include "clang/CIR/Interfaces/CIROpInterfaces.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "llvm/ADT/SmallVector.h" using namespace mlir::cir; /// Include the generated type qualifiers interfaces. #include "clang/CIR/Interfaces/CIROpInterfaces.cpp.inc" + +#include "clang/CIR/MissingFeatures.h" + +bool CIRGlobalValueInterface::canBenefitFromLocalAlias() const { + assert(!::cir::MissingFeatures::supportIFuncAttr()); + assert(!::cir::MissingFeatures::setComdat()); + return false; +} diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 4a5b63569f03..515c3254d875 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1513,6 +1513,7 @@ class CIRFuncLowering : public mlir::OpConversionPattern { mlir::ConversionPatternRewriter &rewriter) const override { auto fnType = op.getFunctionType(); + auto isDsoLocal = op.getDsolocal(); mlir::TypeConverter::SignatureConversion signatureConversion( fnType.getNumInputs()); @@ -1546,7 +1547,7 @@ class CIRFuncLowering : public mlir::OpConversionPattern { filterFuncAttributes(op, /*filterArgAndResAttrs=*/false, attributes); auto fn = rewriter.create( - Loc, op.getName(), llvmFnTy, linkage, false, mlir::LLVM::CConv::C, + Loc, op.getName(), llvmFnTy, linkage, isDsoLocal, mlir::LLVM::CConv::C, mlir::SymbolRefAttr(), attributes); rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); @@ -1655,6 +1656,7 @@ class CIRGlobalOpLowering // Fetch required values to create LLVM op. const auto llvmType = getTypeConverter()->convertType(op.getSymType()); const auto isConst = op.getConstant(); + const auto isDsoLocal = op.getDsolocal(); const auto linkage = convertLinkage(op.getLinkage()); const auto symbol = op.getSymName(); const auto loc = op.getLoc(); @@ -1671,7 +1673,7 @@ class CIRGlobalOpLowering rewriter.replaceOpWithNewOp( op, llvmType, isConst, linkage, symbol, mlir::Attribute(), /*alignment*/ 0, /*addrSpace*/ 0, - /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), + /*dsoLocal*/ isDsoLocal, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 31649406fac1..1fc6989058ae 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -40,7 +40,7 @@ void local_stringlit() { const char *s = "whatnow"; } -// CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global "private" constant internal dsolocal @".str" = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.func @_Z15local_stringlitv() // CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : !cir.ptr> diff --git a/clang/test/CIR/CodeGen/const-array.c b/clang/test/CIR/CodeGen/const-array.c index 91a77d113daf..0020d47d9fc3 100644 --- a/clang/test/CIR/CodeGen/const-array.c +++ b/clang/test/CIR/CodeGen/const-array.c @@ -4,7 +4,7 @@ void bar() { const int arr[1] = {1}; } -// CHECK: cir.global "private" constant internal @bar.arr = #cir.const_array<[#cir.int<1> : !s32i]> : !cir.array {alignment = 4 : i64} +// CHECK: cir.global "private" constant internal dsolocal @bar.arr = #cir.const_array<[#cir.int<1> : !s32i]> : !cir.array {alignment = 4 : i64} // CHECK: cir.func no_proto @bar() // CHECK: {{.*}} = cir.get_global @bar.arr : !cir.ptr> diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index ba9c6cdf973e..fddb9ef8d735 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -338,7 +338,7 @@ folly::coro::Task go1_lambda() { co_return co_await task; } -// CHECK: cir.func coroutine lambda internal private @_ZZ10go1_lambdavENK3$_0clEv{{.*}}22 extra{{.*}}{ +// CHECK: cir.func coroutine lambda internal private dsolocal @_ZZ10go1_lambdavENK3$_0clEv{{.*}}22 extra{{.*}}{ // CHECK: cir.func coroutine @_Z10go1_lambdav() {{.*}}22 extra{{.*}}{ folly::coro::Task go4() { diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index cd2c235db672..4df6dface2c6 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -49,10 +49,10 @@ int use_func() { return func(); } // CHECK-NEXT: cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array // CHECK-NEXT: cir.global external @alpha = #cir.const_array<"abc\00" : !cir.array> : !cir.array -// CHECK-NEXT: cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global "private" constant internal dsolocal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK-NEXT: cir.global external @s = #cir.global_view<@".str"> : !cir.ptr -// CHECK-NEXT: cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global "private" constant internal dsolocal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK-NEXT: cir.global external @s1 = #cir.global_view<@".str1"> : !cir.ptr // CHECK-NEXT: cir.global external @s2 = #cir.global_view<@".str"> : !cir.ptr diff --git a/clang/test/CIR/CodeGen/hello.c b/clang/test/CIR/CodeGen/hello.c index 8aa29c05a211..3eff7227943c 100644 --- a/clang/test/CIR/CodeGen/hello.c +++ b/clang/test/CIR/CodeGen/hello.c @@ -8,7 +8,7 @@ int main (void) { } // CHECK: cir.func private @printf(!cir.ptr, ...) -> !s32i -// CHECK: cir.global "private" constant internal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global "private" constant internal dsolocal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.func @main() -> !s32i // CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK: %1 = cir.get_global @printf : !cir.ptr, ...)>> diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 91639e6b3b6d..a54e420ba2fd 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -9,7 +9,7 @@ void fn() { // CHECK: !ty_22anon2E222 = !cir.struct // CHECK-DAG: module -// CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv{{.*}}) extra +// CHECK: cir.func lambda internal private dsolocal @_ZZ2fnvENK3$_0clEv{{.*}}) extra // CHECK: cir.func @_Z2fnv() // CHECK-NEXT: %0 = cir.alloca !ty_22anon2E222, !cir.ptr, ["a"] @@ -21,7 +21,7 @@ void l0() { a(); } -// CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv({{.*}}) extra +// CHECK: cir.func lambda internal private dsolocal @_ZZ2l0vENK3$_0clEv({{.*}}) extra // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> @@ -99,13 +99,13 @@ int g3() { } // lambda operator() -// CHECK: cir.func lambda internal private @_ZZ2g3vENK3$_0clERKi{{.*}}!s32i extra +// CHECK: cir.func lambda internal private dsolocal @_ZZ2g3vENK3$_0clERKi{{.*}}!s32i extra // lambda __invoke() -// CHECK: cir.func internal private @_ZZ2g3vEN3$_08__invokeERKi +// CHECK: cir.func internal private dsolocal @_ZZ2g3vEN3$_08__invokeERKi // lambda operator int (*)(int const&)() -// CHECK: cir.func internal private @_ZZ2g3vENK3$_0cvPFiRKiEEv +// CHECK: cir.func internal private dsolocal @_ZZ2g3vENK3$_0cvPFiRKiEEv // CHECK: cir.func @_Z2g3v() -> !s32i // CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/libcall.cpp b/clang/test/CIR/CodeGen/libcall.cpp index 62944de443eb..96537b392d59 100644 --- a/clang/test/CIR/CodeGen/libcall.cpp +++ b/clang/test/CIR/CodeGen/libcall.cpp @@ -47,7 +47,7 @@ void t(const char* fmt, ...) { // CHECK: %5 = cir.call @_ZL6strlenPKcU17pass_object_size0(%3, %4) : (!cir.ptr, !u64i) -> !u64i // CHECK: cir.func private @__vsnprintf_chk -// CHECK: cir.func internal private @_ZL9vsnprintfPcU17pass_object_size1iPKcP13__va_list_tag +// CHECK: cir.func internal private dsolocal @_ZL9vsnprintfPcU17pass_object_size1iPKcP13__va_list_tag // Implicit size parameter in arg %1 // @@ -60,4 +60,4 @@ void t(const char* fmt, ...) { // CHECK: %10 = cir.load %1 : !cir.ptr, !u64i // CHECK: %11 = cir.load %3 : !cir.ptr>, !cir.ptr // CHECK: %12 = cir.load %4 : !cir.ptr>, !cir.ptr -// CHECK: %13 = cir.call @__vsnprintf_chk(%6, %8, %9, %10, %11, %12) \ No newline at end of file +// CHECK: %13 = cir.call @__vsnprintf_chk(%6, %8, %9, %10, %11, %12) diff --git a/clang/test/CIR/CodeGen/linkage.c b/clang/test/CIR/CodeGen/linkage.c index aff2c6ccafad..84b1413f559a 100644 --- a/clang/test/CIR/CodeGen/linkage.c +++ b/clang/test/CIR/CodeGen/linkage.c @@ -12,7 +12,7 @@ int foo(void) { return bar(5); } -// CIR: cir.func internal private @bar( +// CIR: cir.func internal private dsolocal @bar( // CIR: cir.func @foo( // LLVM: define internal i32 @bar( diff --git a/clang/test/CIR/CodeGen/static-vars.c b/clang/test/CIR/CodeGen/static-vars.c index cec8544fc967..140f4e6052f6 100644 --- a/clang/test/CIR/CodeGen/static-vars.c +++ b/clang/test/CIR/CodeGen/static-vars.c @@ -4,20 +4,20 @@ void func1(void) { // Should lower default-initialized static vars. static int i; - // CHECK-DAG: cir.global "private" internal @func1.i = #cir.int<0> : !s32i + // CHECK-DAG: cir.global "private" internal dsolocal @func1.i = #cir.int<0> : !s32i // Should lower constant-initialized static vars. static int j = 1; - // CHECK-DAG: cir.global "private" internal @func1.j = #cir.int<1> : !s32i + // CHECK-DAG: cir.global "private" internal dsolocal @func1.j = #cir.int<1> : !s32i // Should properly shadow static vars in nested scopes. { static int j = 2; - // CHECK-DAG: cir.global "private" internal @func1.j.1 = #cir.int<2> : !s32i + // CHECK-DAG: cir.global "private" internal dsolocal @func1.j.1 = #cir.int<2> : !s32i } { static int j = 3; - // CHECK-DAG: cir.global "private" internal @func1.j.2 = #cir.int<3> : !s32i + // CHECK-DAG: cir.global "private" internal dsolocal @func1.j.2 = #cir.int<3> : !s32i } // Should lower basic static vars arithmetics. @@ -31,20 +31,20 @@ void func1(void) { // Should shadow static vars on different functions. void func2(void) { static char i; - // CHECK-DAG: cir.global "private" internal @func2.i = #cir.int<0> : !s8i + // CHECK-DAG: cir.global "private" internal dsolocal @func2.i = #cir.int<0> : !s8i static float j; - // CHECK-DAG: cir.global "private" internal @func2.j = #cir.fp<0.000000e+00> : !cir.float + // CHECK-DAG: cir.global "private" internal dsolocal @func2.j = #cir.fp<0.000000e+00> : !cir.float } // Should const initialize static vars with constant addresses. void func3(void) { static int var; static int *constAddr = &var; - // CHECK-DAG: cir.global "private" internal @func3.constAddr = #cir.global_view<@func3.var> : !cir.ptr + // CHECK-DAG: cir.global "private" internal dsolocal @func3.constAddr = #cir.global_view<@func3.var> : !cir.ptr } // Should match type size in bytes between var and initializer. void func4(void) { static char string[] = "Hello"; - // CHECK-DAG: cir.global "private" internal @func4.string = #cir.const_array<"Hello\00" : !cir.array> : !cir.array + // CHECK-DAG: cir.global "private" internal dsolocal @func4.string = #cir.const_array<"Hello\00" : !cir.array> : !cir.array } diff --git a/clang/test/CIR/CodeGen/static-vars.cpp b/clang/test/CIR/CodeGen/static-vars.cpp index bc971d3d9cee..c1c65bea0748 100644 --- a/clang/test/CIR/CodeGen/static-vars.cpp +++ b/clang/test/CIR/CodeGen/static-vars.cpp @@ -4,20 +4,20 @@ void func1(void) { // Should lower default-initialized static vars. static int i; - // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1i = #cir.int<0> : !s32i + // CHECK-DAG: cir.global "private" internal dsolocal @_ZZ5func1vE1i = #cir.int<0> : !s32i // Should lower constant-initialized static vars. static int j = 1; - // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1j = #cir.int<1> : !s32i + // CHECK-DAG: cir.global "private" internal dsolocal @_ZZ5func1vE1j = #cir.int<1> : !s32i // Should properly shadow static vars in nested scopes. { static int j = 2; - // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1j_0 = #cir.int<2> : !s32i + // CHECK-DAG: cir.global "private" internal dsolocal @_ZZ5func1vE1j_0 = #cir.int<2> : !s32i } { static int j = 3; - // CHECK-DAG: cir.global "private" internal @_ZZ5func1vE1j_1 = #cir.int<3> : !s32i + // CHECK-DAG: cir.global "private" internal dsolocal @_ZZ5func1vE1j_1 = #cir.int<3> : !s32i } // Should lower basic static vars arithmetics. @@ -31,7 +31,7 @@ void func1(void) { // Should shadow static vars on different functions. void func2(void) { static char i; - // CHECK-DAG: cir.global "private" internal @_ZZ5func2vE1i = #cir.int<0> : !s8i + // CHECK-DAG: cir.global "private" internal dsolocal @_ZZ5func2vE1i = #cir.int<0> : !s8i static float j; - // CHECK-DAG: cir.global "private" internal @_ZZ5func2vE1j = #cir.fp<0.000000e+00> : !cir.float + // CHECK-DAG: cir.global "private" internal dsolocal @_ZZ5func2vE1j = #cir.fp<0.000000e+00> : !cir.float } diff --git a/clang/test/CIR/CodeGen/stmtexpr-init.c b/clang/test/CIR/CodeGen/stmtexpr-init.c index 7a38f3ac4b8c..7fd44aebc991 100644 --- a/clang/test/CIR/CodeGen/stmtexpr-init.c +++ b/clang/test/CIR/CodeGen/stmtexpr-init.c @@ -8,7 +8,7 @@ void escape(const void *); -// CIR-DAG: cir.global "private" internal @T1._x = #cir.int<99> : !s8i +// CIR-DAG: cir.global "private" internal dsolocal @T1._x = #cir.int<99> : !s8i // LLVM-DAG: internal global i8 99 void T1(void) { diff --git a/clang/test/CIR/CodeGen/wide-string.cpp b/clang/test/CIR/CodeGen/wide-string.cpp index 1b3cacc4dd49..b02380041ce1 100644 --- a/clang/test/CIR/CodeGen/wide-string.cpp +++ b/clang/test/CIR/CodeGen/wide-string.cpp @@ -5,22 +5,22 @@ const char16_t *test_utf16() { return u"你好世界"; } -// CHECK: cir.global "private" constant internal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u16i, #cir.int<22909> : !u16i, #cir.int<19990> : !u16i, #cir.int<30028> : !u16i, #cir.int<0> : !u16i]> : !cir.array +// CHECK: cir.global "private" constant internal dsolocal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u16i, #cir.int<22909> : !u16i, #cir.int<19990> : !u16i, #cir.int<30028> : !u16i, #cir.int<0> : !u16i]> : !cir.array const char32_t *test_utf32() { return U"你好世界"; } -// CHECK: cir.global "private" constant internal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u32i, #cir.int<22909> : !u32i, #cir.int<19990> : !u32i, #cir.int<30028> : !u32i, #cir.int<0> : !u32i]> : !cir.array +// CHECK: cir.global "private" constant internal dsolocal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u32i, #cir.int<22909> : !u32i, #cir.int<19990> : !u32i, #cir.int<30028> : !u32i, #cir.int<0> : !u32i]> : !cir.array const char16_t *test_zero16() { return u"\0\0\0\0"; } -// CHECK: cir.global "private" constant internal @{{.+}} = #cir.zero : !cir.array +// CHECK: cir.global "private" constant internal dsolocal @{{.+}} = #cir.zero : !cir.array const char32_t *test_zero32() { return U"\0\0\0\0"; } -// CHECK: cir.global "private" constant internal @{{.+}} = #cir.zero : !cir.array +// CHECK: cir.global "private" constant internal dsolocal @{{.+}} = #cir.zero : !cir.array From 1b6d6462b1cc3da42df3b2785f8da64dbbfada3f Mon Sep 17 00:00:00 2001 From: Julian Oppermann Date: Wed, 26 Jun 2024 18:18:33 +0200 Subject: [PATCH 1653/2301] [CIR][NFC] Replace uses of isa/dyn_cast/cast/... member functions (#703) Mechanical rewrite to use the corresponding free functions; fixes #702. I used a slightly modified version of the `clang-tidy` check provided in https://discourse.llvm.org/t/psa-deprecating-cast-isa-methods-in-some-classes/70909 to rewrite the C++ source files, regular expressions for the TableGen files, and manual cleanups where needed (e.g. chains like `x.foo().cast().bar().cast()`) I applied the following heuristic to determine which namespace prefix to use: - If the target type is not qualified, and the TU has `using namespace mlir` or the code is inside the `mlir` namespace -> use a plain `isa`/`cast`/... - Exception: Always qualify inside custom types and attributes, because their base classes define the very members we want to get rid of. - Else. i.e. the target type is qualified as `::mlir::` or `mlir::`, use that prefix. The `clang-tidy` check also rewrote `dyn_cast_or_null` to `dyn_cast_if_present`. I think that's useful because the former variant is going to be deprecated as well in the future. I'm using `-Werror=deprecated-declarations` to test the change (see 6b7420a93278ee01d37d95882dec39358378cfb3); this required also changing two occurrences of `StringRef::equals` to `==`. I could also just drop the commit here; maybe we want to enable `-Werror` in general (there aren't too many other warnings left in the codebase). --------- Signed-off-by: Julian Oppermann --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 23 +- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 14 +- .../clang/CIR/Dialect/IR/CIRDataLayout.h | 9 +- clang/include/clang/CIR/Dialect/IR/CIROps.td | 26 +- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 71 ++-- clang/lib/CIR/CMakeLists.txt | 7 + clang/lib/CIR/CodeGen/Address.h | 11 +- clang/lib/CIR/CodeGen/CIRAsm.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 14 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 85 ++--- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 15 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 9 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 25 +- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 52 +-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 85 ++--- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 14 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 24 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenValue.h | 2 +- clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp | 4 +- clang/lib/CIR/CodeGen/ConstantInitBuilder.h | 2 +- clang/lib/CIR/CodeGen/TargetInfo.cpp | 4 +- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 30 +- clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp | 8 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 211 +++++------ clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 42 +-- .../Dialect/Transforms/CallConvLowering.cpp | 9 +- clang/lib/CIR/Dialect/Transforms/DropAST.cpp | 2 +- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 10 +- .../Dialect/Transforms/IdiomRecognizer.cpp | 6 +- clang/lib/CIR/Dialect/Transforms/LibOpt.cpp | 10 +- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 38 +- .../Dialect/Transforms/LoweringPrepare.cpp | 16 +- .../CIR/Dialect/Transforms/MergeCleanups.cpp | 2 +- .../lib/CIR/Dialect/Transforms/SCFPrepare.cpp | 2 +- .../lib/CIR/Dialect/Transforms/StdHelpers.cpp | 2 +- .../Transforms/TargetLowering/ABIInfoImpl.cpp | 2 +- .../TargetLowering/ItaniumCXXABI.cpp | 2 +- .../TargetLowering/LowerFunction.cpp | 2 +- .../TargetLowering/Targets/AArch64.cpp | 2 +- .../Targets/LoweringPrepareAArch64CXXABI.cpp | 10 +- .../Targets/LoweringPrepareItaniumCXXABI.cpp | 2 +- .../Transforms/TargetLowering/Targets/X86.cpp | 2 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 343 +++++++++--------- .../Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 10 +- .../Lowering/DirectToLLVM/LoweringHelpers.h | 4 +- .../ThroughMLIR/LowerCIRLoopToSCF.cpp | 4 +- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 89 ++--- .../Lowering/ThroughMLIR/LowerToMLIRHelpers.h | 4 +- 56 files changed, 729 insertions(+), 683 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 9201f0859be0..b9ad798d27d0 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -133,7 +133,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { } mlir::Value createShift(mlir::Value lhs, unsigned bits, bool isShiftLeft) { - auto width = lhs.getType().dyn_cast().getWidth(); + auto width = mlir::dyn_cast(lhs.getType()).getWidth(); auto shift = llvm::APInt(width, bits); return createShift(lhs, shift, isShiftLeft); } @@ -197,7 +197,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::Value dst, bool _volatile = false, ::mlir::IntegerAttr align = {}, ::mlir::cir::MemOrderAttr order = {}) { - if (dst.getType().cast().getPointee() != + if (mlir::cast(dst.getType()).getPointee() != val.getType()) dst = createPtrBitcast(dst, val.getType()); return create(loc, val, dst, _volatile, align, order); @@ -312,11 +312,12 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::Value createGetMemberOp(mlir::Location &loc, mlir::Value structPtr, const char *fldName, unsigned idx) { - assert(structPtr.getType().isa()); + assert(mlir::isa(structPtr.getType())); auto structBaseTy = - structPtr.getType().cast().getPointee(); - assert(structBaseTy.isa()); - auto fldTy = structBaseTy.cast().getMembers()[idx]; + mlir::cast(structPtr.getType()).getPointee(); + assert(mlir::isa(structBaseTy)); + auto fldTy = + mlir::cast(structBaseTy).getMembers()[idx]; auto fldPtrTy = ::mlir::cir::PointerType::get(getContext(), fldTy); return create(loc, fldPtrTy, structPtr, fldName, idx); @@ -340,7 +341,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { if (srcTy == newTy) return src; - if (srcTy.isa() && newTy.isa()) + if (mlir::isa(srcTy) && + mlir::isa(newTy)) return createBoolToInt(src, newTy); llvm_unreachable("unhandled extension cast"); @@ -360,7 +362,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { } mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy) { - assert(src.getType().isa() && "expected ptr src"); + assert(mlir::isa(src.getType()) && + "expected ptr src"); return createBitcast(src, getPointerTo(newPointeeTy)); } @@ -430,8 +433,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::TypedAttr getConstPtrAttr(mlir::Type t, int64_t v) { auto val = mlir::IntegerAttr::get(mlir::IntegerType::get(t.getContext(), 64), v); - return mlir::cir::ConstPtrAttr::get(getContext(), - t.cast(), val); + return mlir::cir::ConstPtrAttr::get( + getContext(), mlir::cast(t), val); } // Creates constant nullptr for pointer type ty. diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 9285cade6953..2cd912e42f6f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -125,11 +125,11 @@ def ConstArrayAttr : CIR_Attr<"ConstArray", "const_array", [TypedAttrInterface]> AttrBuilderWithInferredContext<(ins "mlir::cir::ArrayType":$type, "Attribute":$elts), [{ int zeros = 0; - auto typeSize = type.cast().getSize(); - if (auto str = elts.dyn_cast()) + auto typeSize = mlir::cast(type).getSize(); + if (auto str = mlir::dyn_cast(elts)) zeros = typeSize - str.size(); else - zeros = typeSize - elts.cast().size(); + zeros = typeSize - mlir::cast(elts).size(); return $_get(type.getContext(), type, elts, zeros); }]> @@ -200,7 +200,7 @@ def IntAttr : CIR_Attr<"Int", "int", [TypedAttrInterface]> { return $_get(type.getContext(), type, value); }]>, AttrBuilderWithInferredContext<(ins "Type":$type, "int64_t":$value), [{ - IntType intType = type.cast(); + IntType intType = mlir::cast(type); mlir::APInt apValue(intType.getWidth(), value, intType.isSigned()); return $_get(intType.getContext(), intType, apValue); }]>, @@ -209,7 +209,7 @@ def IntAttr : CIR_Attr<"Int", "int", [TypedAttrInterface]> { int64_t getSInt() const { return getValue().getSExtValue(); } uint64_t getUInt() const { return getValue().getZExtValue(); } bool isNullValue() const { return getValue() == 0; } - uint64_t getBitWidth() const { return getType().cast().getWidth(); } + uint64_t getBitWidth() const { return mlir::cast(getType()).getWidth(); } }]; let genVerifyDecl = 1; let hasCustomAssemblyFormat = 1; @@ -257,11 +257,11 @@ def ConstPtrAttr : CIR_Attr<"ConstPtr", "ptr", [TypedAttrInterface]> { }]; let builders = [ AttrBuilderWithInferredContext<(ins "Type":$type, "mlir::IntegerAttr":$value), [{ - return $_get(type.getContext(), type.cast(), value); + return $_get(type.getContext(), mlir::cast(type), value); }]>, AttrBuilder<(ins "Type":$type, "mlir::IntegerAttr":$value), [{ - return $_get($_ctxt, type.cast(), value); + return $_get($_ctxt, mlir::cast(type), value); }]>, ]; let extraClassDeclaration = [{ diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h index 9f8be20f5e5f..e2fd966e3cb2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h @@ -44,11 +44,12 @@ class CIRDataLayout { // `useABI` is `true` if not using prefered alignment. unsigned getAlignment(mlir::Type ty, bool useABI) const { if (llvm::isa(ty)) { - auto sTy = ty.cast(); + auto sTy = mlir::cast(ty); if (sTy.getPacked() && useABI) return 1; } else if (llvm::isa(ty)) { - return getAlignment(ty.cast().getEltType(), useABI); + return getAlignment(mlir::cast(ty).getEltType(), + useABI); } return useABI ? layout.getTypeABIAlignment(ty) @@ -86,7 +87,7 @@ class CIRDataLayout { } unsigned getPointerTypeSizeInBits(mlir::Type Ty) const { - assert(Ty.isa() && + assert(mlir::isa(Ty) && "This should only be called with a pointer type"); return layout.getTypeSizeInBits(Ty); } @@ -96,7 +97,7 @@ class CIRDataLayout { } mlir::Type getIntPtrType(mlir::Type Ty) const { - assert(Ty.isa() && "Expected pointer type"); + assert(mlir::isa(Ty) && "Expected pointer type"); auto IntTy = mlir::cir::IntType::get(Ty.getContext(), getPointerTypeSizeInBits(Ty), false); return IntTy; diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index eb281891e8c5..6a76ec2f4bc4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -302,7 +302,7 @@ def PtrStrideOp : CIR_Op<"ptr_stride", let extraClassDeclaration = [{ // Get type pointed by the base pointer. mlir::Type getElementTy() { - return getBase().getType().cast().getPointee(); + return mlir::cast(getBase().getType()).getPointee(); } }]; @@ -343,7 +343,7 @@ def ConstantOp : CIR_Op<"const", let extraClassDeclaration = [{ bool isNullPtr() { - if (const auto ptrAttr = getValue().dyn_cast()) + if (const auto ptrAttr = mlir::dyn_cast(getValue())) return ptrAttr.isNullValue(); return false; } @@ -389,7 +389,7 @@ class AllocaTypesMatchWith().getPointee()">]> { + "cast($_self).getPointee()">]> { let summary = "Defines a scope-local variable"; let description = [{ The `cir.alloca` operation defines a scope-local variable. @@ -449,7 +449,7 @@ def AllocaOp : CIR_Op<"alloca", [ let extraClassDeclaration = [{ // Whether the alloca input type is a pointer. - bool isPointerType() { return getAllocaType().isa<::mlir::cir::PointerType>(); } + bool isPointerType() { return ::mlir::isa<::mlir::cir::PointerType>(getAllocaType()); } bool isDynamic() { return (bool)getDynAllocSize(); } }]; @@ -473,7 +473,7 @@ def AllocaOp : CIR_Op<"alloca", [ def LoadOp : CIR_Op<"load", [ TypesMatchWith<"type of 'result' matches pointee type of 'addr'", "addr", "result", - "$_self.cast().getPointee()">]> { + "cast($_self).getPointee()">]> { let summary = "Load value from memory adddress"; let description = [{ @@ -531,7 +531,7 @@ def LoadOp : CIR_Op<"load", [ def StoreOp : CIR_Op<"store", [ TypesMatchWith<"type of 'value' matches pointee type of 'addr'", "addr", "value", - "$_self.cast().getPointee()">]> { + "cast($_self).getPointee()">]> { let summary = "Store value to memory address"; let description = [{ @@ -1459,7 +1459,7 @@ def CmpThreeWayOp : CIR_Op<"cmp3way", [Pure, SameTypeOperands]> { /// Determine whether this three-way comparison compares integral operands. bool isIntegralComparison() { - return getLhs().getType().isa(); + return mlir::isa(getLhs().getType()); } }]; } @@ -2269,7 +2269,7 @@ def GetMemberOp : CIR_Op<"get_member"> { /// Return the result type. mlir::cir::PointerType getResultTy() { - return getResult().getType().cast(); + return mlir::cast(getResult().getType()); } }]; @@ -2339,7 +2339,7 @@ def GetRuntimeMemberOp : CIR_Op<"get_runtime_member"> { def VecInsertOp : CIR_Op<"vec.insert", [Pure, TypesMatchWith<"argument type matches vector element type", "vec", "value", - "$_self.cast().getEltType()">, + "cast($_self).getEltType()">, AllTypesMatch<["result", "vec"]>]> { let summary = "Insert one element into a vector object"; @@ -2366,7 +2366,7 @@ def VecInsertOp : CIR_Op<"vec.insert", [Pure, def VecExtractOp : CIR_Op<"vec.extract", [Pure, TypesMatchWith<"type of 'result' matches element type of 'vec'", "vec", - "result", "$_self.cast().getEltType()">]> { + "result", "cast($_self).getEltType()">]> { let summary = "Extract one element from a vector object"; let description = [{ @@ -2419,7 +2419,7 @@ def VecCreateOp : CIR_Op<"vec.create", [Pure]> { def VecSplatOp : CIR_Op<"vec.splat", [Pure, TypesMatchWith<"type of 'value' matches element type of 'result'", "result", - "value", "$_self.cast().getEltType()">]> { + "value", "cast($_self).getEltType()">]> { let summary = "Convert a scalar into a vector"; let description = [{ @@ -2834,7 +2834,7 @@ def CallOp : CIR_CallOp<"call"> { $_state.addOperands(operands); if (callee) $_state.addAttribute("callee", callee); - if (resType && !resType.isa()) + if (resType && !isa(resType)) $_state.addTypes(resType); }]>, OpBuilder<(ins "Value":$ind_target, @@ -3427,7 +3427,7 @@ def VAArgOp : CIR_Op<"va.arg">, def AllocException : CIR_Op<"alloc_exception", [ AllocaTypesMatchWith<"'allocType' matches pointee type of 'addr'", "addr", "allocType", - "$_self.cast().getPointee()">]> { + "cast($_self).getPointee()">]> { let summary = "Defines a scope-local variable"; let description = [{ Implements a slightly higher level __cxa_allocate_exception: diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index ca7ed7730220..99267a88dc69 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -78,9 +78,9 @@ def CIR_IntType : CIR_Type<"Int", "int", // Unsigned integer type of a specific width. class UInt : Type()">, - CPred<"$_self.cast<::mlir::cir::IntType>().isUnsigned()">, - CPred<"$_self.cast<::mlir::cir::IntType>().getWidth() == " # width> + CPred<"::mlir::isa<::mlir::cir::IntType>($_self)">, + CPred<"::mlir::cast<::mlir::cir::IntType>($_self).isUnsigned()">, + CPred<"::mlir::cast<::mlir::cir::IntType>($_self).getWidth() == " # width> ]>, width # "-bit unsigned integer", "::mlir::cir::IntType">, BuildableType< "mlir::cir::IntType::get($_builder.getContext(), " @@ -97,9 +97,9 @@ def UInt64 : UInt<64>; // Signed integer type of a specific width. class SInt : Type()">, - CPred<"$_self.cast<::mlir::cir::IntType>().isSigned()">, - CPred<"$_self.cast<::mlir::cir::IntType>().getWidth() == " # width> + CPred<"::mlir::isa<::mlir::cir::IntType>($_self)">, + CPred<"::mlir::cast<::mlir::cir::IntType>($_self).isSigned()">, + CPred<"::mlir::cast<::mlir::cir::IntType>($_self).getWidth() == " # width> ]>, width # "-bit signed integer", "::mlir::cir::IntType">, BuildableType< "mlir::cir::IntType::get($_builder.getContext(), " @@ -230,7 +230,7 @@ def CIR_PointerType : CIR_Type<"Pointer", "ptr", let extraClassDeclaration = [{ bool isVoidPtr() const { - return getPointee().isa(); + return mlir::isa(getPointee()); } }]; } @@ -412,9 +412,9 @@ def CIR_VoidType : CIR_Type<"Void", "void"> { // Pointer to void def VoidPtr : Type< And<[ - CPred<"$_self.isa<::mlir::cir::PointerType>()">, - CPred<"$_self.cast<::mlir::cir::PointerType>()" - ".getPointee().isa<::mlir::cir::VoidType>()">, + CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::mlir::cir::VoidType>(" + "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())">, ]>, "void*">, BuildableType< "mlir::cir::PointerType::get($_builder.getContext()," @@ -424,28 +424,28 @@ def VoidPtr : Type< // Pointer to a primitive int, float or double def PrimitiveIntOrFPPtr : Type< And<[ - CPred<"$_self.isa<::mlir::cir::PointerType>()">, - CPred<"$_self.cast<::mlir::cir::PointerType>()" - ".getPointee().isa<::mlir::cir::IntType," - "::mlir::cir::SingleType, ::mlir::cir::DoubleType>()">, + CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::mlir::cir::IntType, ::mlir::cir::SingleType," + "::mlir::cir::DoubleType>(" + "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())">, ]>, "{int,void}*"> { } // Pointer to struct def StructPtr : Type< And<[ - CPred<"$_self.isa<::mlir::cir::PointerType>()">, - CPred<"$_self.cast<::mlir::cir::PointerType>()" - ".getPointee().isa<::mlir::cir::StructType>()">, + CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::mlir::cir::StructType>(" + "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())"> ]>, "!cir.struct*"> { } // Pointers to exception info def ExceptionInfoPtr : Type< And<[ - CPred<"$_self.isa<::mlir::cir::PointerType>()">, - CPred<"$_self.cast<::mlir::cir::PointerType>()" - ".getPointee().isa<::mlir::cir::ExceptionInfoType>()">, + CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::mlir::cir::ExceptionInfoType>(" + "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())"> ]>, "!cir.eh_info*">, BuildableType< "mlir::cir::PointerType::get($_builder.getContext()," @@ -455,13 +455,14 @@ def ExceptionInfoPtr : Type< // Pooint to pointers to exception info def ExceptionInfoPtrPtr : Type< And<[ - CPred<"$_self.isa<::mlir::cir::PointerType>()">, + CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, And<[ - CPred<"$_self.cast<::mlir::cir::PointerType>()" - ".getPointee().isa<::mlir::cir::PointerType>()">, - CPred<"$_self.cast<::mlir::cir::PointerType>()" - ".getPointee().cast<::mlir::cir::PointerType>()" - ".getPointee().isa<::mlir::cir::ExceptionInfoType>()">, + CPred<"::mlir::isa<::mlir::cir::PointerType>(" + "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee()">, + CPred<"::mlir::isa<::mlir::cir::ExceptionInfoType>(" + "::mlir::cast<::mlir::cir::PointerType>(" + "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())" + ".getPointee()))"> ]> ]>, "!cir.eh_info**">, BuildableType< @@ -473,11 +474,11 @@ def ExceptionInfoPtrPtr : Type< // Vector of integral type def IntegerVector : Type< And<[ - CPred<"$_self.isa<::mlir::cir::VectorType>()">, - CPred<"$_self.cast<::mlir::cir::VectorType>()" - ".getEltType().isa<::mlir::cir::IntType>()">, - CPred<"$_self.cast<::mlir::cir::VectorType>()" - ".getEltType().cast<::mlir::cir::IntType>()" + CPred<"::mlir::isa<::mlir::cir::VectorType>($_self)">, + CPred<"::mlir::isa<::mlir::cir::IntType>(" + "::mlir::cast<::mlir::cir::VectorType>($_self).getEltType())">, + CPred<"::mlir::cast<::mlir::cir::IntType>(" + "::mlir::cast<::mlir::cir::VectorType>($_self).getEltType())" ".isPrimitive()"> ]>, "!cir.vector of !cir.int"> { } @@ -485,9 +486,9 @@ def IntegerVector : Type< // Pointer to Arrays def ArrayPtr : Type< And<[ - CPred<"$_self.isa<::mlir::cir::PointerType>()">, - CPred<"$_self.cast<::mlir::cir::PointerType>()" - ".getPointee().isa<::mlir::cir::ArrayType>()">, + CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::mlir::cir::ArrayType>(" + "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())">, ]>, "!cir.ptr"> { } @@ -495,7 +496,7 @@ def ArrayPtr : Type< // StructType (defined in cpp files) //===----------------------------------------------------------------------===// -def CIR_StructType : Type()">, +def CIR_StructType : Type($_self)">, "CIR struct type">; //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 093420b4fee3..8843b9847074 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -1,6 +1,13 @@ include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) +# Report use of deprecated APIs as errors +if (MSVC) + add_compile_options("/we4996") +else() + add_compile_options("-Werror=deprecated-declarations") +endif(MSVC) + add_subdirectory(Dialect) add_subdirectory(CodeGen) add_subdirectory(FrontendAction) diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index ac3afd779919..0d1ca5cd2944 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -40,7 +40,7 @@ class Address { KnownNonNull_t IsKnownNonNull = NotKnownNonNull) : PointerAndKnownNonNull(pointer, IsKnownNonNull), ElementType(elementType), Alignment(alignment) { - assert(pointer.getType().isa() && + assert(mlir::isa(pointer.getType()) && "Expected cir.ptr type"); assert(pointer && "Pointer cannot be null"); @@ -48,9 +48,10 @@ class Address { assert(!alignment.isZero() && "Alignment cannot be zero"); } Address(mlir::Value pointer, clang::CharUnits alignment) - : Address(pointer, - pointer.getType().cast().getPointee(), - alignment) { + : Address( + pointer, + mlir::cast(pointer.getType()).getPointee(), + alignment) { assert((!alignment.isZero() || pointer == nullptr) && "creating valid address with invalid alignment"); @@ -104,7 +105,7 @@ class Address { /// Return the type of the pointer value. mlir::cir::PointerType getType() const { - return getPointer().getType().cast(); + return mlir::cast(getPointer().getType()); } mlir::Type getElementType() const { diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index ee76be657c23..e88eb1da098f 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -294,7 +294,7 @@ static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, // Truncate the integer result to the right size, note that TruncTy can be // a pointer. - if (TruncTy.isa()) + if (mlir::isa(TruncTy)) Tmp = Builder.createFloatingCast(Tmp, TruncTy); else if (isa(TruncTy) && isa(Tmp.getType())) { @@ -652,7 +652,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { for (auto typ : ArgElemTypes) { if (typ) { auto op = Args[i++]; - assert(op.getType().isa() && + assert(mlir::isa(op.getType()) && "pointer type expected"); assert(cast(op.getType()).getPointee() == typ && "element type differs from pointee type!"); diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 1b1ca28ea9b5..36c2e33de195 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -302,7 +302,7 @@ bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const { } Address AtomicInfo::castToAtomicIntPointer(Address addr) const { - auto intTy = addr.getElementType().dyn_cast(); + auto intTy = mlir::dyn_cast(addr.getElementType()); // Don't bother with int casts if the integer size is the same. if (intTy && intTy.getWidth() == AtomicSizeInBits) return addr; @@ -342,8 +342,8 @@ static mlir::cir::IntAttr getConstOpIntAttr(mlir::Value v) { while (auto c = dyn_cast(op)) op = c.getOperand().getDefiningOp(); if (auto c = dyn_cast(op)) { - if (c.getType().isa()) - constVal = c.getValue().cast(); + if (mlir::isa(c.getType())) + constVal = mlir::cast(c.getValue()); } return constVal; } @@ -376,7 +376,8 @@ static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, auto cmp = builder.createNot(cmpxchg.getCmp()); builder.create( loc, cmp, false, [&](mlir::OpBuilder &, mlir::Location) { - auto ptrTy = Val1.getPointer().getType().cast(); + auto ptrTy = + mlir::cast(Val1.getPointer().getType()); if (Val1.getElementType() != ptrTy.getPointee()) { Val1 = Val1.withPointer(builder.createPtrBitcast( Val1.getPointer(), Val1.getElementType())); @@ -494,7 +495,8 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, // TODO(cir): this logic should be part of createStore, but doing so // currently breaks CodeGen/union.cpp and CodeGen/union.cpp. - auto ptrTy = Dest.getPointer().getType().cast(); + auto ptrTy = + mlir::cast(Dest.getPointer().getType()); if (Dest.getElementType() != ptrTy.getPointee()) { Dest = Dest.withPointer( builder.createPtrBitcast(Dest.getPointer(), Dest.getElementType())); @@ -659,7 +661,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, // TODO(cir): this logic should be part of createStore, but doing so currently // breaks CodeGen/union.cpp and CodeGen/union.cpp. - auto ptrTy = Dest.getPointer().getType().cast(); + auto ptrTy = mlir::cast(Dest.getPointer().getType()); if (Dest.getElementType() != ptrTy.getPointee()) { Dest = Dest.withPointer( builder.createPtrBitcast(Dest.getPointer(), Dest.getElementType())); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index ad57fa91a70a..8a36dd4d1d26 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -145,7 +145,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } mlir::TypedAttr getConstNullPtrAttr(mlir::Type t) { - assert(t.isa() && "expected cir.ptr"); + assert(mlir::isa(t) && "expected cir.ptr"); return getConstPtrAttr(t, 0); } @@ -184,13 +184,13 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { bool packed = false, mlir::Type type = {}) { llvm::SmallVector members; - auto structTy = type.dyn_cast(); + auto structTy = mlir::dyn_cast(type); assert(structTy && "expected cir.struct"); // Collect members and check if they are all zero. bool isZero = true; for (auto &attr : arrayAttr) { - const auto typedAttr = attr.dyn_cast(); + const auto typedAttr = mlir::dyn_cast(attr); members.push_back(typedAttr.getType()); isZero &= isNullValue(typedAttr); } @@ -212,7 +212,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Type ty = {}) { llvm::SmallVector members; for (auto &f : arrayAttr) { - auto ta = f.dyn_cast(); + auto ta = mlir::dyn_cast(f); assert(ta && "expected typed attribute member"); members.push_back(ta.getType()); } @@ -220,7 +220,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { if (!ty) ty = getAnonStructTy(members, packed); - auto sTy = ty.dyn_cast(); + auto sTy = mlir::dyn_cast(ty); assert(sTy && "expected struct type"); return mlir::cir::ConstStructAttr::get(sTy, arrayAttr); } @@ -255,23 +255,23 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } mlir::TypedAttr getZeroInitAttr(mlir::Type ty) { - if (ty.isa()) + if (mlir::isa(ty)) return mlir::cir::IntAttr::get(ty, 0); - if (auto fltType = ty.dyn_cast()) + if (auto fltType = mlir::dyn_cast(ty)) return mlir::cir::FPAttr::getZero(fltType); - if (auto fltType = ty.dyn_cast()) + if (auto fltType = mlir::dyn_cast(ty)) return mlir::cir::FPAttr::getZero(fltType); - if (auto fltType = ty.dyn_cast()) + if (auto fltType = mlir::dyn_cast(ty)) return mlir::cir::FPAttr::getZero(fltType); - if (auto fltType = ty.dyn_cast()) + if (auto fltType = mlir::dyn_cast(ty)) return mlir::cir::FPAttr::getZero(fltType); - if (auto arrTy = ty.dyn_cast()) + if (auto arrTy = mlir::dyn_cast(ty)) return getZeroAttr(arrTy); - if (auto ptrTy = ty.dyn_cast()) + if (auto ptrTy = mlir::dyn_cast(ty)) return getConstNullPtrAttr(ptrTy); - if (auto structTy = ty.dyn_cast()) + if (auto structTy = mlir::dyn_cast(ty)) return getZeroAttr(structTy); - if (ty.isa()) { + if (mlir::isa(ty)) { return getCIRBoolAttr(false); } llvm_unreachable("Zero initializer for given type is NYI"); @@ -280,22 +280,22 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { // TODO(cir): Once we have CIR float types, replace this by something like a // NullableValueInterface to allow for type-independent queries. bool isNullValue(mlir::Attribute attr) const { - if (attr.isa()) + if (mlir::isa(attr)) return true; - if (const auto ptrVal = attr.dyn_cast()) + if (const auto ptrVal = mlir::dyn_cast(attr)) return ptrVal.isNullValue(); - if (attr.isa()) + if (mlir::isa(attr)) return false; // TODO(cir): introduce char type in CIR and check for that instead. - if (const auto intVal = attr.dyn_cast()) + if (const auto intVal = mlir::dyn_cast(attr)) return intVal.isNullValue(); - if (const auto boolVal = attr.dyn_cast()) + if (const auto boolVal = mlir::dyn_cast(attr)) return !boolVal.getValue(); - if (auto fpAttr = attr.dyn_cast()) { + if (auto fpAttr = mlir::dyn_cast(attr)) { auto fpVal = fpAttr.getValue(); bool ignored; llvm::APFloat FV(+0.0); @@ -304,10 +304,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return FV.bitwiseIsEqual(fpVal); } - if (const auto structVal = attr.dyn_cast()) { + if (const auto structVal = + mlir::dyn_cast(attr)) { for (const auto elt : structVal.getMembers()) { // FIXME(cir): the struct's ID should not be considered a member. - if (elt.isa()) + if (mlir::isa(elt)) continue; if (!isNullValue(elt)) return false; @@ -315,10 +316,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return true; } - if (const auto arrayVal = attr.dyn_cast()) { - if (arrayVal.getElts().isa()) + if (const auto arrayVal = mlir::dyn_cast(attr)) { + if (mlir::isa(arrayVal.getElts())) return false; - for (const auto elt : arrayVal.getElts().cast()) { + for (const auto elt : mlir::cast(arrayVal.getElts())) { if (!isNullValue(elt)) return false; } @@ -386,7 +387,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { bool isInt64Ty(mlir::Type i) { return i == typeCache.UInt64Ty || i == typeCache.SInt64Ty; } - bool isInt(mlir::Type i) { return i.isa(); } + bool isInt(mlir::Type i) { return mlir::isa(i); } mlir::cir::LongDoubleType getLongDoubleTy(const llvm::fltSemantics &format) const { @@ -495,7 +496,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { const clang::RecordDecl *ast = nullptr) { llvm::SmallVector members; for (auto &attr : fields) { - const auto typedAttr = attr.dyn_cast(); + const auto typedAttr = mlir::dyn_cast(attr); members.push_back(typedAttr.getType()); } @@ -510,9 +511,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } bool isSized(mlir::Type ty) { - if (ty.isa()) + if (mlir::isa(ty)) return true; assert(0 && "Unimplemented size for type"); return false; @@ -553,7 +554,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::ConstantOp getConstInt(mlir::Location loc, mlir::Type t, uint64_t C) { - auto intTy = t.dyn_cast(); + auto intTy = mlir::dyn_cast(t); assert(intTy && "expected mlir::cir::IntType"); return create(loc, intTy, mlir::cir::IntAttr::get(t, C)); @@ -583,9 +584,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { // TODO: dispatch creation for primitive types. - assert( - (ty.isa() || ty.isa()) && - "NYI for other types"); + assert((mlir::isa(ty) || + mlir::isa(ty)) && + "NYI for other types"); return create(loc, ty, getZeroAttr(ty)); } @@ -617,7 +618,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Value createNeg(mlir::Value value) { - if (auto intTy = value.getType().dyn_cast()) { + if (auto intTy = mlir::dyn_cast(value.getType())) { // Source is a unsigned integer: first cast it to signed. if (intTy.isUnsigned()) value = createIntCast(value, getSIntNTy(intTy.getWidth())); @@ -776,7 +777,8 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } mlir::Value createLoad(mlir::Location loc, Address addr) { - auto ptrTy = addr.getPointer().getType().dyn_cast(); + auto ptrTy = + mlir::dyn_cast(addr.getPointer().getType()); if (addr.getElementType() != ptrTy.getPointee()) addr = addr.withPointer( createPtrBitcast(addr.getPointer(), addr.getElementType())); @@ -788,7 +790,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value ptr, llvm::MaybeAlign align, bool isVolatile) { - if (ty != ptr.getType().cast().getPointee()) + if (ty != mlir::cast(ptr.getType()).getPointee()) ptr = createPtrBitcast(ptr, ty); uint64_t alignment = align ? align->value() : 0; return CIRBaseBuilderTy::createLoad(loc, ptr, isVolatile, alignment); @@ -877,17 +879,17 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Type SubType; - if (auto ArrayTy = Ty.dyn_cast()) { + if (auto ArrayTy = mlir::dyn_cast(Ty)) { auto EltSize = Layout.getTypeAllocSize(ArrayTy.getEltType()); Indices.push_back(Offset / EltSize); SubType = ArrayTy.getEltType(); Offset %= EltSize; - } else if (auto PtrTy = Ty.dyn_cast()) { + } else if (auto PtrTy = mlir::dyn_cast(Ty)) { auto EltSize = Layout.getTypeAllocSize(PtrTy.getPointee()); Indices.push_back(Offset / EltSize); SubType = PtrTy.getPointee(); Offset %= EltSize; - } else if (auto StructTy = Ty.dyn_cast()) { + } else if (auto StructTy = mlir::dyn_cast(Ty)) { auto Elts = StructTy.getMembers(); unsigned Pos = 0; for (size_t I = 0; I < Elts.size(); ++I) { @@ -963,7 +965,8 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::GetRuntimeMemberOp createGetIndirectMember(mlir::Location loc, mlir::Value objectPtr, mlir::Value memberPtr) { - auto memberPtrTy = memberPtr.getType().cast(); + auto memberPtrTy = + mlir::cast(memberPtr.getType()); // TODO(cir): consider address space. assert(!MissingFeatures::addressSpace()); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 058fa54ecb04..47aac08600cc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -838,7 +838,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_object_size: { unsigned Type = E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); - auto ResType = ConvertType(E->getType()).dyn_cast(); + auto ResType = + mlir::dyn_cast(ConvertType(E->getType())); assert(ResType && "not sure what to do?"); // We pass this builtin onto the optimizer so that it can figure out the @@ -969,14 +970,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // inlining. return RValue::get( builder.getConstInt(getLoc(E->getSourceRange()), - ResultType.cast(), 0)); + mlir::cast(ResultType), 0)); if (Arg->HasSideEffects(getContext())) // The argument is unevaluated, so be conservative if it might have // side-effects. return RValue::get( builder.getConstInt(getLoc(E->getSourceRange()), - ResultType.cast(), 0)); + mlir::cast(ResultType), 0)); mlir::Value ArgValue = buildScalarExpr(Arg); if (ArgType->isObjCObjectPointerType()) @@ -1085,7 +1086,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, auto EncompassingCIRTy = mlir::cir::IntType::get( builder.getContext(), EncompassingInfo.Width, EncompassingInfo.Signed); auto ResultCIRTy = - CGM.getTypes().ConvertType(ResultQTy).cast(); + mlir::cast(CGM.getTypes().ConvertType(ResultQTy)); mlir::Value Left = buildScalarExpr(LeftArg); mlir::Value Right = buildScalarExpr(RightArg); @@ -1196,7 +1197,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, clang::QualType ResultQTy = ResultArg->getType()->castAs()->getPointeeType(); auto ResultCIRTy = - CGM.getTypes().ConvertType(ResultQTy).cast(); + mlir::cast(CGM.getTypes().ConvertType(ResultQTy)); auto Loc = getLoc(E->getSourceRange()); auto ArithResult = @@ -1266,7 +1267,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, if (auto V = buildTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { switch (EvalKind) { case TEK_Scalar: - if (V.getType().isa()) + if (mlir::isa(V.getType())) return RValue::get(nullptr); return RValue::get(V); case TEK_Aggregate: @@ -1424,7 +1425,7 @@ mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, llvm_unreachable("NYI"); auto Ptr = EmittedE ? EmittedE : buildScalarExpr(E); - assert(Ptr.getType().isa() && + assert(mlir::isa(Ptr.getType()) && "Non-pointer passed to __builtin_object_size?"); // LLVM intrinsics (which CIR lowers to at some point, only supports 0 diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index bf44ddb263b7..c453de137bdd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -553,7 +553,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, switch (ArgInfo.getKind()) { case ABIArgInfo::Direct: { - if (!ArgInfo.getCoerceToType().isa() && + if (!mlir::isa(ArgInfo.getCoerceToType()) && ArgInfo.getCoerceToType() == convertType(info_it->type) && ArgInfo.getDirectOffset() == 0) { assert(NumCIRArgs == 1); @@ -567,7 +567,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // We might have to widen integers, but we should never truncate. if (ArgInfo.getCoerceToType() != V.getType() && - V.getType().isa()) + mlir::isa(V.getType())) llvm_unreachable("NYI"); // If the argument doesn't match, perform a bitcast to coerce it. This @@ -733,9 +733,9 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, } else { [[maybe_unused]] auto resultTypes = CalleePtr->getResultTypes(); [[maybe_unused]] auto FuncPtrTy = - resultTypes.front().dyn_cast(); + mlir::dyn_cast(resultTypes.front()); assert((resultTypes.size() == 1) && FuncPtrTy && - FuncPtrTy.getPointee().isa() && + mlir::isa(FuncPtrTy.getPointee()) && "expected pointer to function"); indirectFuncTy = CIRFuncTy; @@ -946,7 +946,7 @@ void CIRGenFunction::buildCallArgs( // First, if a prototype was provided, use those argument types. bool IsVariadic = false; if (Prototype.P) { - const auto *MD = Prototype.P.dyn_cast(); + const auto *MD = mlir::dyn_cast(Prototype.P); assert(!MD && "ObjCMethodDecl NYI"); const auto *FPT = Prototype.P.get(); diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index c892d1ebc9a8..bf18b1dcbba6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1614,7 +1614,8 @@ void CIRGenFunction::buildCXXAggrConstructorCall( auto constantCount = dyn_cast(numElements.getDefiningOp()); if (constantCount) { - auto constIntAttr = constantCount.getValue().dyn_cast(); + auto constIntAttr = + mlir::dyn_cast(constantCount.getValue()); // Just skip out if the constant count is zero. if (constIntAttr && constIntAttr.getUInt() == 0) return; @@ -1623,7 +1624,8 @@ void CIRGenFunction::buildCXXAggrConstructorCall( llvm_unreachable("NYI"); } - auto arrayTy = arrayBase.getElementType().dyn_cast(); + auto arrayTy = + mlir::dyn_cast(arrayBase.getElementType()); assert(arrayTy && "expected array type"); auto elementType = arrayTy.getEltType(); auto ptrToElmType = builder.getPointerTo(elementType); diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index b19fd67fcee0..1c43655ced6b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -305,7 +305,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { // frequently return an empty Attribute, to signal we want to codegen // some trivial ctor calls and whatnots. constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D); - if (constant && !constant.isa() && + if (constant && !mlir::isa(constant) && (trivialAutoVarInit != LangOptions::TrivialAutoVarInitKind::Uninitialized)) { llvm_unreachable("NYI"); @@ -333,7 +333,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { } // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. - auto typedConstant = constant.dyn_cast(); + auto typedConstant = mlir::dyn_cast(constant); assert(typedConstant && "expected typed attribute"); if (!emission.IsConstantAggregate) { // For simple scalar/complex initialization, store the value directly. @@ -533,7 +533,7 @@ mlir::cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( const VarDecl &D, mlir::cir::GlobalOp GV, mlir::cir::GetGlobalOp GVAddr) { ConstantEmitter emitter(*this); mlir::TypedAttr Init = - emitter.tryEmitForInitializer(D).dyn_cast(); + mlir::dyn_cast(emitter.tryEmitForInitializer(D)); assert(Init && "Expected typed attribute"); // If constant emission failed, then this should be a C++ static @@ -1141,7 +1141,8 @@ void CIRGenFunction::emitDestroy(Address addr, QualType type, // But if the array length is constant, we can suppress that. auto constantCount = dyn_cast(length.getDefiningOp()); if (constantCount) { - auto constIntAttr = constantCount.getValue().dyn_cast(); + auto constIntAttr = + mlir::dyn_cast(constantCount.getValue()); // ...and if it's constant zero, we can just skip the entire thing. if (constIntAttr && constIntAttr.getUInt() == 0) return; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 8db9ce53f547..a0edb6ff81ef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1489,9 +1489,11 @@ static mlir::Value maybeBuildArrayDecay(mlir::OpBuilder &builder, mlir::Location loc, mlir::Value arrayPtr, mlir::Type eltTy) { - auto arrayPtrTy = arrayPtr.getType().dyn_cast<::mlir::cir::PointerType>(); + auto arrayPtrTy = + ::mlir::dyn_cast<::mlir::cir::PointerType>(arrayPtr.getType()); assert(arrayPtrTy && "expected pointer type"); - auto arrayTy = arrayPtrTy.getPointee().dyn_cast<::mlir::cir::ArrayType>(); + auto arrayTy = + ::mlir::dyn_cast<::mlir::cir::ArrayType>(arrayPtrTy.getPointee()); if (arrayTy) { mlir::cir::PointerType flatPtrTy = @@ -1517,17 +1519,18 @@ Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, // If the array type was an incomplete type, we need to make sure // the decay ends up being the right type. auto lvalueAddrTy = - Addr.getPointer().getType().dyn_cast(); + mlir::dyn_cast(Addr.getPointer().getType()); assert(lvalueAddrTy && "expected pointer"); if (E->getType()->isVariableArrayType()) return Addr; - auto pointeeTy = lvalueAddrTy.getPointee().dyn_cast(); + auto pointeeTy = + mlir::dyn_cast(lvalueAddrTy.getPointee()); assert(pointeeTy && "expected array"); mlir::Type arrayTy = convertType(E->getType()); - assert(arrayTy.isa() && "expected array"); + assert(mlir::isa(arrayTy) && "expected array"); assert(pointeeTy == arrayTy); // The result of this decay conversion points to an array element within the @@ -1604,7 +1607,7 @@ static bool isPreserveAIArrayBase(CIRGenFunction &CGF, const Expr *ArrayBase) { static mlir::IntegerAttr getConstantIndexOrNull(mlir::Value idx) { // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr? if (auto constantOp = dyn_cast(idx.getDefiningOp())) - return constantOp.getValue().dyn_cast(); + return mlir::dyn_cast(constantOp.getValue()); return {}; } @@ -1722,8 +1725,8 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, llvm_unreachable("array bounds sanitizer is NYI"); // Extend or truncate the index type to 32 or 64-bits. - auto ptrTy = Idx.getType().dyn_cast(); - if (Promote && ptrTy && ptrTy.getPointee().isa()) + auto ptrTy = mlir::dyn_cast(Idx.getType()); + if (Promote && ptrTy && mlir::isa(ptrTy.getPointee())) llvm_unreachable("index type cast is NYI"); return Idx; @@ -2365,7 +2368,7 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, builder.restoreInsertionPoint(toInsert); // Block does not return: build empty yield. - if (yieldTy.isa()) { + if (mlir::isa(yieldTy)) { builder.create(loc); } else { // Block returns: set null yield value. mlir::Value op0 = builder.getNullValue(yieldTy, loc); @@ -2813,7 +2816,7 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, } auto Ptr = Addr.getPointer(); - if (ElemTy.isa()) { + if (mlir::isa(ElemTy)) { ElemTy = mlir::cir::IntType::get(builder.getContext(), 8, true); auto ElemPtrTy = mlir::cir::PointerType::get(builder.getContext(), ElemTy); Ptr = builder.create(Loc, ElemPtrTy, @@ -3112,7 +3115,7 @@ CIRGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { // somewhat heavy refactoring...) auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(), result.Val, resultType); - mlir::TypedAttr cstToEmit = C.dyn_cast_or_null(); + mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present(C); assert(cstToEmit && "expect a typed attribute"); // Make sure we emit a debug reference to the global variable. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 2f3c2b3c384f..a0830a9f2108 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -467,7 +467,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, for (uint64_t i = 0; i != NumInitElements; ++i) { if (i == 1) one = CGF.getBuilder().getConstInt( - loc, CGF.PtrDiffTy.cast(), 1); + loc, mlir::cast(CGF.PtrDiffTy), 1); // Advance to the next element. if (i > 0) { @@ -502,8 +502,8 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, // Advance to the start of the rest of the array. if (NumInitElements) { - auto one = - builder.getConstInt(loc, CGF.PtrDiffTy.cast(), 1); + auto one = builder.getConstInt( + loc, mlir::cast(CGF.PtrDiffTy), 1); element = builder.create(loc, cirElementPtrType, element, one); @@ -519,7 +519,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, // Compute the end of array auto numArrayElementsConst = builder.getConstInt( - loc, CGF.PtrDiffTy.cast(), NumArrayElements); + loc, mlir::cast(CGF.PtrDiffTy), NumArrayElements); mlir::Value end = builder.create( loc, cirElementPtrType, begin, numArrayElementsConst); @@ -554,7 +554,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, // Advance pointer and store them to temporary variable auto one = builder.getConstInt( - loc, CGF.PtrDiffTy.cast(), 1); + loc, mlir::cast(CGF.PtrDiffTy), 1); auto nextElement = builder.create( loc, cirElementPtrType, currentElement, one); CGF.buildStoreThroughLValue(RValue::get(nextElement), tmpLV); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 9c6529d87633..cf18593ff711 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -1088,7 +1088,7 @@ void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, static mlir::Value buildDynamicCastToNull(CIRGenFunction &CGF, mlir::Location Loc, QualType DestTy) { mlir::Type DestCIRTy = CGF.ConvertType(DestTy); - assert(DestCIRTy.isa() && + assert(mlir::isa(DestCIRTy) && "result of dynamic_cast should be a ptr"); mlir::Value NullPtrValue = CGF.getBuilder().getNullPtr(DestCIRTy, Loc); @@ -1136,7 +1136,7 @@ mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, if (DCE->isAlwaysNull()) return buildDynamicCastToNull(*this, loc, destTy); - auto destCirTy = ConvertType(destTy).cast(); + auto destCirTy = mlir::cast(ConvertType(destTy)); return CGM.getCXXABI().buildDynamicCast(*this, loc, srcRecordTy, destRecordTy, destCirTy, isRefCast, ThisAddr.getPointer()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 1e5453a11691..993dbf52d1ee 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -151,7 +151,7 @@ static void replace(Container &C, size_t BeginOff, size_t EndOff, Range Vals) { bool ConstantAggregateBuilder::add(mlir::Attribute A, CharUnits Offset, bool AllowOverwrite) { // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. - mlir::TypedAttr C = A.dyn_cast(); + mlir::TypedAttr C = mlir::dyn_cast(A); assert(C && "expected typed attribute"); // Common case: appending to a layout. if (Offset >= Size) { @@ -319,7 +319,7 @@ std::optional ConstantAggregateBuilder::splitAt(CharUnits Pos) { // We found an element starting before Pos. Check for overlap. // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. mlir::TypedAttr C = - Elems[LastAtOrBeforePosIndex].dyn_cast(); + mlir::dyn_cast(Elems[LastAtOrBeforePosIndex]); assert(C && "expected typed attribute"); if (Offsets[LastAtOrBeforePosIndex] + getSize(C) <= Pos) return LastAtOrBeforePosIndex + 1; @@ -349,7 +349,7 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( // If we want an array type, see if all the elements are the same type and // appropriately spaced. - if (auto aty = DesiredTy.dyn_cast()) { + if (auto aty = mlir::dyn_cast(DesiredTy)) { llvm_unreachable("NYI"); } @@ -366,7 +366,7 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( CharUnits Align = CharUnits::One(); for (auto e : Elems) { // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. - auto C = e.dyn_cast(); + auto C = mlir::dyn_cast(e); assert(C && "expected typed attribute"); Align = std::max(Align, Utils.getAlignment(C)); } @@ -395,7 +395,7 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( if (!NaturalLayout) { CharUnits SizeSoFar = CharUnits::Zero(); for (size_t I = 0; I != Elems.size(); ++I) { - mlir::TypedAttr C = Elems[I].dyn_cast(); + mlir::TypedAttr C = mlir::dyn_cast(Elems[I]); assert(C && "expected typed attribute"); CharUnits Align = Utils.getAlignment(C); @@ -452,7 +452,7 @@ void ConstantAggregateBuilder::condense(CharUnits Offset, return; // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. - mlir::TypedAttr C = Elems[First].dyn_cast(); + mlir::TypedAttr C = mlir::dyn_cast(Elems[First]); assert(C && "expected typed attribute"); if (Length == 1 && Offsets[First] == Offset && getSize(C) == Size) { // Re-wrap single element structs if necessary. Otherwise, leave any single @@ -1263,9 +1263,10 @@ class ConstantLValueEmitter ConstantLValue applyOffset(ConstantLValue &C) { // Handle attribute constant LValues. - if (auto Attr = C.Value.dyn_cast()) { - if (auto GV = Attr.dyn_cast()) { - auto baseTy = GV.getType().cast().getPointee(); + if (auto Attr = mlir::dyn_cast(C.Value)) { + if (auto GV = mlir::dyn_cast(Attr)) { + auto baseTy = + mlir::cast(GV.getType()).getPointee(); auto destTy = CGM.getTypes().convertTypeForMem(DestType); assert(!GV.getIndices() && "Global view is already indexed"); return mlir::cir::GlobalViewAttr::get(destTy, GV.getSymbol(), @@ -1292,7 +1293,7 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { // non-zero null pointer and addrspace casts that aren't trivially // represented in LLVM IR. auto destTy = CGM.getTypes().convertTypeForMem(DestType); - assert(destTy.isa()); + assert(mlir::isa(destTy)); // If there's no base at all, this is a null or absolute pointer, // possibly cast back to an integer type. @@ -1315,7 +1316,7 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { // Convert to the appropriate type; this could be an lvalue for // an integer. FIXME: performAddrSpaceCast - if (destTy.isa()) { + if (mlir::isa(destTy)) { if (value.is()) return value.get(); llvm_unreachable("NYI"); @@ -1328,7 +1329,7 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { /// bitcast to pointer type. mlir::Attribute ConstantLValueEmitter::tryEmitAbsolute(mlir::Type destTy) { // If we're producing a pointer, this is easy. - auto destPtrTy = destTy.dyn_cast(); + auto destPtrTy = mlir::dyn_cast(destTy); assert(destPtrTy && "expected !cir.ptr type"); return CGM.getBuilder().getConstPtrAttr( destPtrTy, Value.getLValueOffset().getQuantity()); @@ -1652,8 +1653,8 @@ mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, } // Zero-extend bool. - auto typed = C.dyn_cast(); - if (typed && typed.getType().isa()) { + auto typed = mlir::dyn_cast(C); + if (typed && mlir::isa(typed.getType())) { // Already taken care given that bool values coming from // integers only carry true/false. } @@ -1666,7 +1667,7 @@ mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *E, assert(!destType->isVoidType() && "can't emit a void constant"); if (auto C = ConstExprEmitter(*this).Visit(const_cast(E), destType)) { - if (auto TypedC = C.dyn_cast_or_null()) + if (auto TypedC = mlir::dyn_cast_if_present(C)) return TypedC; llvm_unreachable("this should always be typed"); } @@ -1683,7 +1684,7 @@ mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *E, if (Success && !Result.hasSideEffects()) { auto C = tryEmitPrivate(Result.Val, destType); - if (auto TypedC = C.dyn_cast_or_null()) + if (auto TypedC = mlir::dyn_cast_if_present(C)) return TypedC; llvm_unreachable("this should always be typed"); } @@ -1702,9 +1703,9 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, assert(0 && "not implemented"); case APValue::Int: { mlir::Type ty = CGM.getCIRType(DestType); - if (ty.isa()) + if (mlir::isa(ty)) return builder.getCIRBoolAttr(Value.getInt().getZExtValue()); - assert(ty.isa() && "expected integral type"); + assert(mlir::isa(ty) && "expected integral type"); return CGM.getBuilder().getAttr(ty, Value.getInt()); } case APValue::Float: { @@ -1715,7 +1716,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, assert(0 && "not implemented"); else { mlir::Type ty = CGM.getCIRType(DestType); - assert(ty.isa() && + assert(mlir::isa(ty) && "expected floating-point type"); return CGM.getBuilder().getAttr(ty, Init); } @@ -1748,8 +1749,9 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, if (!C) return {}; - assert(C.isa() && "This should always be a TypedAttr."); - auto CTyped = C.cast(); + assert(mlir::isa(C) && + "This should always be a TypedAttr."); + auto CTyped = mlir::cast(C); if (I == 0) CommonElementType = CTyped.getType(); @@ -1779,8 +1781,8 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, if (const auto *memberFuncDecl = dyn_cast(memberDecl)) assert(0 && "not implemented"); - auto cirTy = - CGM.getTypes().ConvertType(DestType).cast(); + auto cirTy = mlir::cast( + CGM.getTypes().ConvertType(DestType)); const auto *fieldDecl = cast(memberDecl); return builder.getDataMemberAttr(cirTy, fieldDecl->getFieldIndex()); @@ -1835,7 +1837,7 @@ mlir::Value CIRGenModule::buildMemberPointerConstant(const UnaryOperator *E) { if (const auto *methodDecl = dyn_cast(decl)) assert(0 && "not implemented"); - auto ty = getCIRType(E->getType()).cast(); + auto ty = mlir::cast(getCIRType(E->getType())); // Otherwise, a member data pointer. const auto *fieldDecl = cast(decl); @@ -1846,7 +1848,7 @@ mlir::Value CIRGenModule::buildMemberPointerConstant(const UnaryOperator *E) { mlir::Attribute ConstantEmitter::emitAbstract(const Expr *E, QualType destType) { auto state = pushAbstract(); - auto C = tryEmitPrivate(E, destType).cast(); + auto C = mlir::cast(tryEmitPrivate(E, destType)); C = validateAndPopAbstract(C, state); if (!C) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 35ec7f0dc5ba..019705c247d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -178,7 +178,7 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitFloatingLiteral(const FloatingLiteral *E) { mlir::Type Ty = CGF.getCIRType(E->getType()); - assert(Ty.isa() && + assert(mlir::isa(Ty) && "expect floating-point type"); return Builder.create( CGF.getLoc(E->getExprLoc()), Ty, @@ -435,8 +435,9 @@ class ScalarExprEmitter : public StmtVisitor { // TODO(cir): Currently, we store bitwidths in CIR types only for // integers. This might also be required for other types. - auto srcCirTy = ConvertType(type).dyn_cast(); - auto promotedCirTy = ConvertType(type).dyn_cast(); + auto srcCirTy = mlir::dyn_cast(ConvertType(type)); + auto promotedCirTy = + mlir::dyn_cast(ConvertType(type)); assert(srcCirTy && promotedCirTy && "Expected integer type"); assert( @@ -499,7 +500,8 @@ class ScalarExprEmitter : public StmtVisitor { if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) llvm_unreachable("__fp16 type NYI"); - if (value.getType().isa()) { + if (mlir::isa( + value.getType())) { // Create the inc/dec operation. // NOTE(CIR): clang calls CreateAdd but folds this to a unary op auto kind = @@ -514,11 +516,11 @@ class ScalarExprEmitter : public StmtVisitor { const llvm::fltSemantics *FS; // Don't use getFloatTypeSemantics because Half isn't // necessarily represented using the "half" LLVM type. - if (value.getType().isa()) + if (mlir::isa(value.getType())) FS = &CGF.getTarget().getLongDoubleFormat(); - else if (value.getType().isa()) + else if (mlir::isa(value.getType())) FS = &CGF.getTarget().getHalfFormat(); - else if (value.getType().isa()) + else if (mlir::isa(value.getType())) FS = &CGF.getTarget().getBFloat16Format(); else llvm_unreachable("fp128 / ppc_fp128 NYI"); @@ -932,8 +934,8 @@ class ScalarExprEmitter : public StmtVisitor { // Unsigned integers and pointers. if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && - LHS.getType().isa() && - RHS.getType().isa()) { + mlir::isa(LHS.getType()) && + mlir::isa(RHS.getType())) { llvm_unreachable("NYI"); } @@ -982,7 +984,7 @@ class ScalarExprEmitter : public StmtVisitor { if (SrcType->isIntegerType()) return buildIntToBoolConversion(Src, loc); - assert(Src.getType().isa<::mlir::cir::PointerType>()); + assert(::mlir::isa<::mlir::cir::PointerType>(Src.getType())); return buildPointerToBoolConversion(Src, SrcType); } @@ -1203,7 +1205,7 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, Expr *indexOperand = expr->getRHS(); // In a subtraction, the LHS is always the pointer. - if (!isSubtraction && !pointer.getType().isa()) { + if (!isSubtraction && !mlir::isa(pointer.getType())) { std::swap(pointer, index); std::swap(pointerOperand, indexOperand); } @@ -1337,8 +1339,8 @@ mlir::Value ScalarExprEmitter::buildRem(const BinOpInfo &Ops) { } mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { - if (Ops.LHS.getType().isa() || - Ops.RHS.getType().isa()) + if (mlir::isa(Ops.LHS.getType()) || + mlir::isa(Ops.RHS.getType())) return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/false); if (Ops.CompType->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { @@ -1381,7 +1383,7 @@ mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { // The LHS is always a pointer if either side is. - if (!Ops.LHS.getType().isa()) { + if (!mlir::isa(Ops.LHS.getType())) { if (Ops.CompType->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: { @@ -1424,7 +1426,7 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { // If the RHS is not a pointer, then we have normal pointer // arithmetic. - if (!Ops.RHS.getType().isa()) + if (!mlir::isa(Ops.RHS.getType())) return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/true); // Otherwise, this is a pointer subtraction @@ -1463,7 +1465,7 @@ mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { if (CGF.getLangOpts().OpenCL) llvm_unreachable("NYI"); else if ((SanitizeBase || SanitizeExponent) && - Ops.LHS.getType().isa()) { + mlir::isa(Ops.LHS.getType())) { llvm_unreachable("NYI"); } @@ -1485,7 +1487,7 @@ mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { if (CGF.getLangOpts().OpenCL) llvm_unreachable("NYI"); else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && - Ops.LHS.getType().isa()) { + mlir::isa(Ops.LHS.getType())) { llvm_unreachable("NYI"); } @@ -1648,7 +1650,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { const MemberPointerType *MPT = CE->getType()->getAs(); assert(!MPT->isMemberFunctionPointerType() && "NYI"); - auto Ty = CGF.getCIRType(DestTy).cast(); + auto Ty = mlir::cast(CGF.getCIRType(DestTy)); return Builder.getNullDataMemberPtr(Ty, CGF.getLoc(E->getExprLoc())); } case CK_ReinterpretMemberPointer: @@ -1849,7 +1851,7 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { assert(!MissingFeatures::scalableVectors() && "NYI: scalable vector init"); assert(!MissingFeatures::vectorConstants() && "NYI: vector constants"); auto VectorType = - CGF.getCIRType(E->getType()).dyn_cast(); + mlir::dyn_cast(CGF.getCIRType(E->getType())); SmallVector Elements; for (Expr *init : E->inits()) { Elements.push_back(Visit(init)); @@ -1891,9 +1893,9 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { // ZExt result to the expr type. auto dstTy = ConvertType(E->getType()); - if (dstTy.isa()) + if (mlir::isa(dstTy)) return Builder.createBoolToInt(boolVal, dstTy); - if (dstTy.isa()) + if (mlir::isa(dstTy)) return boolVal; llvm_unreachable("destination type for logical-not unary operator is NYI"); @@ -1908,28 +1910,29 @@ mlir::Value ScalarExprEmitter::buildScalarCast( mlir::Type DstTy, ScalarConversionOpts Opts) { assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && "Internal error: matrix types not handled by this function."); - if (SrcTy.isa() || DstTy.isa()) + if (mlir::isa(SrcTy) || + mlir::isa(DstTy)) llvm_unreachable("Obsolete code. Don't use mlir::IntegerType with CIR."); mlir::Type FullDstTy = DstTy; - if (SrcTy.isa() && - DstTy.isa()) { + if (mlir::isa(SrcTy) && + mlir::isa(DstTy)) { // Use the element types of the vectors to figure out the CastKind. - SrcTy = SrcTy.dyn_cast().getEltType(); - DstTy = DstTy.dyn_cast().getEltType(); + SrcTy = mlir::dyn_cast(SrcTy).getEltType(); + DstTy = mlir::dyn_cast(DstTy).getEltType(); } - assert(!SrcTy.isa() && - !DstTy.isa() && + assert(!mlir::isa(SrcTy) && + !mlir::isa(DstTy) && "buildScalarCast given a vector type and a non-vector type"); std::optional CastKind; - if (SrcTy.isa()) { + if (mlir::isa(SrcTy)) { if (Opts.TreatBooleanAsSigned) llvm_unreachable("NYI: signed bool"); if (CGF.getBuilder().isInt(DstTy)) { CastKind = mlir::cir::CastKind::bool_to_int; - } else if (DstTy.isa()) { + } else if (mlir::isa(DstTy)) { CastKind = mlir::cir::CastKind::bool_to_float; } else { llvm_unreachable("Internal error: Cast to unexpected type"); @@ -1937,12 +1940,12 @@ mlir::Value ScalarExprEmitter::buildScalarCast( } else if (CGF.getBuilder().isInt(SrcTy)) { if (CGF.getBuilder().isInt(DstTy)) { CastKind = mlir::cir::CastKind::integral; - } else if (DstTy.isa()) { + } else if (mlir::isa(DstTy)) { CastKind = mlir::cir::CastKind::int_to_float; } else { llvm_unreachable("Internal error: Cast to unexpected type"); } - } else if (SrcTy.isa()) { + } else if (mlir::isa(SrcTy)) { if (CGF.getBuilder().isInt(DstTy)) { // If we can't recognize overflow as undefined behavior, assume that // overflow saturates. This protects against normal optimizations if we @@ -1952,7 +1955,7 @@ mlir::Value ScalarExprEmitter::buildScalarCast( if (Builder.getIsFPConstrained()) llvm_unreachable("NYI"); CastKind = mlir::cir::CastKind::float_to_int; - } else if (DstTy.isa()) { + } else if (mlir::isa(DstTy)) { // TODO: split this to createFPExt/createFPTrunc return Builder.createFloatingCast(Src, FullDstTy); } else { @@ -2354,7 +2357,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( builder.restoreInsertionPoint(toInsert); // Block does not return: build empty yield. - if (yieldTy.isa()) { + if (mlir::isa(yieldTy)) { builder.create(loc); } else { // Block returns: set null yield value. mlir::Value op0 = builder.getNullValue(yieldTy, loc); @@ -2516,7 +2519,7 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { } // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. if (!CGF.ContainsLabel(E->getRHS())) { - if (auto intTy = ResTy.dyn_cast()) + if (auto intTy = mlir::dyn_cast(ResTy)) return Builder.getConstInt(Loc, intTy, 1); else return Builder.getBool(true, Loc); @@ -2545,11 +2548,11 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { Loc, RHSCondV, /*trueBuilder*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { SmallVector Locs; - if (Loc.isa()) { + if (mlir::isa(Loc)) { Locs.push_back(Loc); Locs.push_back(Loc); - } else if (Loc.isa()) { - auto fusedLoc = Loc.cast(); + } else if (mlir::isa(Loc)) { + auto fusedLoc = mlir::cast(Loc); Locs.push_back(fusedLoc.getLocations()[0]); Locs.push_back(fusedLoc.getLocations()[1]); } @@ -2565,11 +2568,11 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { /*falseBuilder*/ [&](mlir::OpBuilder &b, mlir::Location Loc) { SmallVector Locs; - if (Loc.isa()) { + if (mlir::isa(Loc)) { Locs.push_back(Loc); Locs.push_back(Loc); - } else if (Loc.isa()) { - auto fusedLoc = Loc.cast(); + } else if (mlir::isa(Loc)) { + auto fusedLoc = mlir::cast(Loc); Locs.push_back(fusedLoc.getLocations()[0]); Locs.push_back(fusedLoc.getLocations()[1]); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 84b95fc3767b..f5efa14796ab 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1688,14 +1688,16 @@ CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, // llvm::ArrayType *llvmArrayType = // dyn_cast(addr.getElementType()); - auto cirArrayType = addr.getElementType().dyn_cast(); + auto cirArrayType = + mlir::dyn_cast(addr.getElementType()); while (cirArrayType) { assert(isa(arrayType)); countFromCLAs *= cirArrayType.getSize(); eltType = arrayType->getElementType(); - cirArrayType = cirArrayType.getEltType().dyn_cast(); + cirArrayType = + mlir::dyn_cast(cirArrayType.getEltType()); arrayType = getContext().getAsArrayType(arrayType->getElementType()); assert((!cirArrayType || arrayType) && diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9d599985b67b..c14913ddd8f1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1089,7 +1089,7 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::TypedAttr getValue() const { assert(!isReference()); - return ValueAndIsReference.getPointer().cast(); + return mlir::cast(ValueAndIsReference.getPointer()); } }; @@ -1923,7 +1923,7 @@ class CIRGenFunction : public CIRGenTypeCache { Depth++; // Has multiple locations: overwrite with separate start and end locs. - if (const auto fusedLoc = loc.dyn_cast()) { + if (const auto fusedLoc = mlir::dyn_cast(loc)) { assert(fusedLoc.getLocations().size() == 2 && "too many locations"); BeginLoc = fusedLoc.getLocations()[0]; EndLoc = fusedLoc.getLocations()[1]; diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 363d970f4c87..b72ec9b6c34a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2165,9 +2165,9 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, CGF.buildAnyExprToExn(E->getSubExpr(), Address(exceptionPtr, exnAlign)); // Get the RTTI symbol address. - auto typeInfo = CGM.getAddrOfRTTIDescriptor(subExprLoc, clangThrowType, - /*ForEH=*/true) - .dyn_cast_or_null(); + auto typeInfo = mlir::dyn_cast_if_present( + CGM.getAddrOfRTTIDescriptor(subExprLoc, clangThrowType, + /*ForEH=*/true)); assert(typeInfo && "expected GlobalViewAttr typeinfo"); assert(!typeInfo.getIndices() && "expected no indirection"); @@ -2303,10 +2303,10 @@ static mlir::Value buildDynamicCastToVoid(CIRGenFunction &CGF, static mlir::cir::DynamicCastInfoAttr buildDynamicCastInfo(CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, QualType DestRecordTy) { - auto srcRtti = CGF.CGM.getAddrOfRTTIDescriptor(Loc, SrcRecordTy) - .cast(); - auto destRtti = CGF.CGM.getAddrOfRTTIDescriptor(Loc, DestRecordTy) - .cast(); + auto srcRtti = mlir::cast( + CGF.CGM.getAddrOfRTTIDescriptor(Loc, SrcRecordTy)); + auto destRtti = mlir::cast( + CGF.CGM.getAddrOfRTTIDescriptor(Loc, DestRecordTy)); auto runtimeFuncOp = getItaniumDynamicCastFn(CGF); auto badCastFuncOp = getBadCastFn(CGF); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c12f79a9aeb1..1e250192c72f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -970,7 +970,7 @@ mlir::Operation *CIRGenModule::getWeakRefReference(const ValueDecl *VD) { } mlir::Type DeclTy = getTypes().convertTypeForMem(VD->getType()); - if (DeclTy.isa()) { + if (mlir::isa(DeclTy)) { auto F = GetOrCreateCIRFunction(AA->getAliasee(), DeclTy, GlobalDecl(cast(VD)), /*ForVtable=*/false); @@ -1111,23 +1111,23 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // // TODO(cir): create another attribute to contain the final type and abstract // away SymbolRefAttr. - if (auto symAttr = Init.dyn_cast()) { + if (auto symAttr = mlir::dyn_cast(Init)) { auto cstGlobal = mlir::SymbolTable::lookupSymbolIn(theModule, symAttr); assert(isa(cstGlobal) && "unaware of other symbol providers"); auto g = cast(cstGlobal); - auto arrayTy = g.getSymType().dyn_cast(); + auto arrayTy = mlir::dyn_cast(g.getSymType()); // TODO(cir): pointer to array decay. Should this be modeled explicitly in // CIR? if (arrayTy) InitType = mlir::cir::PointerType::get(builder.getContext(), arrayTy.getEltType()); } else { - assert(Init.isa() && "This should have a type"); - auto TypedInitAttr = Init.cast(); + assert(mlir::isa(Init) && "This should have a type"); + auto TypedInitAttr = mlir::cast(Init); InitType = TypedInitAttr.getType(); } - assert(!InitType.isa() && "Should have a type by now"); + assert(!mlir::isa(InitType) && "Should have a type by now"); auto Entry = buildGlobal(D, InitType, ForDefinition_t(!IsTentative)); // TODO(cir): Strip off pointer casts from Entry if we get them? @@ -1306,11 +1306,11 @@ CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { return builder.getString(Str, eltTy, finalSize); } - auto arrayTy = - getTypes().ConvertType(E->getType()).dyn_cast(); + auto arrayTy = mlir::dyn_cast( + getTypes().ConvertType(E->getType())); assert(arrayTy && "string literals must be emitted as an array type"); - auto arrayEltTy = arrayTy.getEltType().dyn_cast(); + auto arrayEltTy = mlir::dyn_cast(arrayTy.getEltType()); assert(arrayEltTy && "string literal elements must be emitted as integral type"); @@ -1429,7 +1429,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, assert(!cir::MissingFeatures::reportGlobalToASan() && "NYI"); } - auto ArrayTy = GV.getSymType().dyn_cast(); + auto ArrayTy = mlir::dyn_cast(GV.getSymType()); assert(ArrayTy && "String literal must be array"); auto PtrTy = mlir::cir::PointerType::get(builder.getContext(), ArrayTy.getEltType()); @@ -2377,8 +2377,8 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( bool IsIncompleteFunction = false; mlir::cir::FuncType FTy; - if (Ty.isa()) { - FTy = Ty.cast(); + if (mlir::isa(Ty)) { + FTy = mlir::cast(Ty); } else { assert(false && "NYI"); // FTy = mlir::FunctionType::get(VoidTy, false); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index d470e2080fcc..8135fd0b7a95 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -714,8 +714,8 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { assert(MPT->isMemberDataPointer() && "ptr-to-member-function is NYI"); auto memberTy = ConvertType(MPT->getPointeeType()); - auto clsTy = - ConvertType(QualType(MPT->getClass(), 0)).cast(); + auto clsTy = mlir::cast( + ConvertType(QualType(MPT->getClass(), 0))); ResultType = mlir::cir::DataMemberType::get(Builder.getContext(), memberTy, clsTy); break; diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index bceb1c943764..b10a12d9e0da 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -202,8 +202,8 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, // vtableHasLocalLinkage, // /*isCompleteDtor=*/false); } else { - assert((rtti.isa() || - rtti.isa()) && + assert((mlir::isa(rtti) || + mlir::isa(rtti)) && "expected GlobalViewAttr or ConstPtrAttr"); return builder.add(rtti); } diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 408dcdcd605d..53280e765e20 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -293,7 +293,7 @@ class LValue { LValue R; R.LVType = Simple; - assert(address.getPointer().getType().cast()); + assert(mlir::cast(address.getPointer().getType())); R.V = address.getPointer(); R.ElementType = address.getElementType(); R.Initialize(type, qs, address.getAlignment(), diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp index 89852f29e648..522f59adff60 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -24,7 +24,7 @@ ConstantInitBuilderBase::ConstantInitBuilderBase(CIRGenModule &CGM) mlir::Type ConstantInitFuture::getType() const { assert(Data && "dereferencing null future"); if (Data.is()) { - auto attr = Data.get().dyn_cast(); + auto attr = mlir::dyn_cast(Data.get()); assert(attr && "expected typed attribute"); return attr.getType(); } else { @@ -34,7 +34,7 @@ mlir::Type ConstantInitFuture::getType() const { void ConstantInitFuture::abandon() { assert(Data && "abandoning null future"); - if (auto builder = Data.dyn_cast()) { + if (auto builder = mlir::dyn_cast(Data)) { builder->abandon(0); } Data = nullptr; diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h index 1bddd1323473..d78584f42e71 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -405,7 +405,7 @@ class ConstantAggregateBuilderTemplateBase bool forVTable = false) { assert(!this->Parent && "finishing non-root builder"); mlir::Attribute init = asImpl().finishImpl(global.getContext()); - auto initCSA = init.dyn_cast(); + auto initCSA = mlir::dyn_cast(init); assert(initCSA && "expected #cir.const_struct attribute to represent vtable data"); return this->Builder.setGlobalInitializer( diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 43bc2e33f7c8..47f5a57e3d9b 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -370,7 +370,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, // If we have a sign or zero extended integer, make sure to return Extend so // that the parameter gets the right LLVM IR attributes. - if (Hi == NoClass && ResType.isa()) { + if (Hi == NoClass && mlir::isa(ResType)) { assert(!Ty->getAs() && "NYI"); if (Ty->isSignedIntegerOrEnumerationType() && isPromotableIntegerTypeForABI(Ty)) @@ -499,7 +499,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { // If we have a sign or zero extended integer, make sure to return Extend so // that the parameter gets the right LLVM IR attributes. // TODO: extend the above consideration to MLIR - if (Hi == NoClass && ResType.isa()) { + if (Hi == NoClass && mlir::isa(ResType)) { // Treat an enum type as its underlying type. if (const auto *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 192e7b3a8d18..159382ba2fb7 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -140,7 +140,7 @@ static ParseResult parseStructMembers(mlir::AsmParser &parser, LogicalResult ConstStructAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, mlir::Type type, ArrayAttr members) { - auto sTy = type.dyn_cast_or_null(); + auto sTy = mlir::dyn_cast_if_present(type); if (!sTy) { emitError() << "expected !cir.struct type"; return failure(); @@ -153,7 +153,7 @@ LogicalResult ConstStructAttr::verify( unsigned attrIdx = 0; for (auto &member : sTy.getMembers()) { - auto m = members[attrIdx].dyn_cast_or_null(); + auto m = dyn_cast_if_present(members[attrIdx]); if (!m) { emitError() << "expected mlir::TypedAttr attribute"; return failure(); @@ -175,7 +175,7 @@ LogicalResult StructLayoutAttr::verify( unsigned alignment, bool padded, mlir::Type largest_member, mlir::ArrayAttr offsets) { if (not std::all_of(offsets.begin(), offsets.end(), [](mlir::Attribute attr) { - return attr.isa(); + return mlir::isa(attr); })) { return emitError() << "all index values must be integers"; } @@ -245,9 +245,9 @@ static void printConstPtr(AsmPrinter &p, mlir::IntegerAttr value) { Attribute IntAttr::parse(AsmParser &parser, Type odsType) { mlir::APInt APValue; - if (!odsType.isa()) + if (!mlir::isa(odsType)) return {}; - auto type = odsType.cast(); + auto type = mlir::cast(odsType); // Consume the '<' symbol. if (parser.parseLess()) @@ -282,7 +282,7 @@ Attribute IntAttr::parse(AsmParser &parser, Type odsType) { } void IntAttr::print(AsmPrinter &printer) const { - auto type = getType().cast(); + auto type = mlir::cast(getType()); printer << '<'; if (type.isSigned()) printer << getSInt(); @@ -293,12 +293,12 @@ void IntAttr::print(AsmPrinter &printer) const { LogicalResult IntAttr::verify(function_ref emitError, Type type, APInt value) { - if (!type.isa()) { + if (!mlir::isa(type)) { emitError() << "expected 'simple.int' type"; return failure(); } - auto intType = type.cast(); + auto intType = mlir::cast(type); if (value.getBitWidth() != intType.getWidth()) { emitError() << "type and value bitwidth mismatch: " << intType.getWidth() << " != " << value.getBitWidth(); @@ -329,7 +329,7 @@ parseFloatLiteral(mlir::AsmParser &parser, auto losesInfo = false; value.emplace(rawValue); - auto tyFpInterface = ty.dyn_cast(); + auto tyFpInterface = dyn_cast(ty); if (!tyFpInterface) { // Parsing of the current floating-point literal has succeeded, but the // given attribute type is invalid. This error will be reported later when @@ -343,14 +343,14 @@ parseFloatLiteral(mlir::AsmParser &parser, } cir::FPAttr cir::FPAttr::getZero(mlir::Type type) { - return get(type, - APFloat::getZero( - type.cast().getFloatSemantics())); + return get( + type, APFloat::getZero( + mlir::cast(type).getFloatSemantics())); } LogicalResult cir::FPAttr::verify(function_ref emitError, Type type, APFloat value) { - auto fltTypeInterface = type.dyn_cast(); + auto fltTypeInterface = mlir::dyn_cast(type); if (!fltTypeInterface) { emitError() << "expected floating-point type"; return failure(); @@ -477,11 +477,11 @@ LogicalResult DynamicCastInfoAttr::verify( auto isRttiPtr = [](mlir::Type ty) { // RTTI pointers are !cir.ptr. - auto ptrTy = ty.dyn_cast(); + auto ptrTy = mlir::dyn_cast(ty); if (!ptrTy) return false; - auto pointeeIntTy = ptrTy.getPointee().dyn_cast(); + auto pointeeIntTy = mlir::dyn_cast(ptrTy.getPointee()); if (!pointeeIntTy) return false; diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp index 147bff99e0bd..26d055b69351 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp @@ -4,19 +4,19 @@ namespace cir { CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { - auto dlSpec = modOp->getAttr(mlir::DLTIDialect::kDataLayoutAttrName) - .dyn_cast(); + auto dlSpec = mlir::dyn_cast( + modOp->getAttr(mlir::DLTIDialect::kDataLayoutAttrName)); assert(dlSpec && "expected dl_spec in the module"); auto entries = dlSpec.getEntries(); for (auto entry : entries) { auto entryKey = entry.getKey(); - auto strKey = entryKey.dyn_cast(); + auto strKey = mlir::dyn_cast(entryKey); if (!strKey) continue; auto entryName = strKey.strref(); if (entryName == mlir::DLTIDialect::kDataLayoutEndiannessKey) { - auto value = entry.getValue().dyn_cast(); + auto value = mlir::dyn_cast(entry.getValue()); assert(value && "expected string attribute"); auto endian = value.getValue(); if (endian == mlir::DLTIDialect::kDataLayoutEndiannessBig) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a2a06308b03e..03f9ca7c0926 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -58,7 +58,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { using OpAsmDialectInterface::OpAsmDialectInterface; AliasResult getAlias(Type type, raw_ostream &os) const final { - if (auto structType = type.dyn_cast()) { + if (auto structType = dyn_cast(type)) { if (!structType.getName()) { os << "ty_anon_" << structType.getKindAsStr(); return AliasResult::OverridableAlias; @@ -66,7 +66,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << "ty_" << structType.getName(); return AliasResult::OverridableAlias; } - if (auto intType = type.dyn_cast()) { + if (auto intType = dyn_cast(type)) { // We only provide alias for standard integer types (i.e. integer types // whose width is divisible by 8). if (intType.getWidth() % 8 != 0) @@ -74,7 +74,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << intType.getAlias(); return AliasResult::OverridableAlias; } - if (auto voidType = type.dyn_cast()) { + if (auto voidType = dyn_cast(type)) { os << voidType.getAlias(); return AliasResult::OverridableAlias; } @@ -83,26 +83,26 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { } AliasResult getAlias(Attribute attr, raw_ostream &os) const final { - if (auto boolAttr = attr.dyn_cast()) { + if (auto boolAttr = mlir::dyn_cast(attr)) { os << (boolAttr.getValue() ? "true" : "false"); return AliasResult::FinalAlias; } - if (auto bitfield = attr.dyn_cast()) { + if (auto bitfield = mlir::dyn_cast(attr)) { os << "bfi_" << bitfield.getName().str(); return AliasResult::FinalAlias; } if (auto extraFuncAttr = - attr.dyn_cast()) { + mlir::dyn_cast(attr)) { os << "fn_attr"; return AliasResult::FinalAlias; } if (auto cmpThreeWayInfoAttr = - attr.dyn_cast()) { + mlir::dyn_cast(attr)) { os << cmpThreeWayInfoAttr.getAlias(); return AliasResult::FinalAlias; } if (auto dynCastInfoAttr = - attr.dyn_cast()) { + mlir::dyn_cast(attr)) { os << dynCastInfoAttr.getAlias(); return AliasResult::FinalAlias; } @@ -303,33 +303,33 @@ LogicalResult ConditionOp::verify() { static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, mlir::Attribute attrType) { - if (attrType.isa()) { - if (opType.isa<::mlir::cir::PointerType>()) + if (isa(attrType)) { + if (::mlir::isa<::mlir::cir::PointerType>(opType)) return success(); return op->emitOpError("nullptr expects pointer type"); } - if (attrType.isa()) { + if (isa(attrType)) { // More detailed type verifications are already done in // DataMemberAttr::verify. Don't need to repeat here. return success(); } - if (attrType.isa()) { - if (opType.isa<::mlir::cir::StructType, ::mlir::cir::ArrayType>()) + if (isa(attrType)) { + if (::mlir::isa<::mlir::cir::StructType, ::mlir::cir::ArrayType>(opType)) return success(); return op->emitOpError("zero expects struct or array type"); } - if (attrType.isa()) { - if (!opType.isa()) + if (mlir::isa(attrType)) { + if (!mlir::isa(opType)) return op->emitOpError("result type (") << opType << ") must be '!cir.bool' for '" << attrType << "'"; return success(); } - if (attrType.isa()) { - auto at = attrType.cast(); + if (mlir::isa(attrType)) { + auto at = cast(attrType); if (at.getType() != opType) { return op->emitOpError("result type (") << opType << ") does not match value type (" << at.getType() @@ -338,24 +338,24 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return success(); } - if (attrType.isa()) { - if (opType.isa<::mlir::cir::PointerType>()) + if (isa(attrType)) { + if (::mlir::isa<::mlir::cir::PointerType>(opType)) return success(); return op->emitOpError("symbolref expects pointer type"); } - if (attrType.isa() || - attrType.isa() || - attrType.isa() || - attrType.isa() || - attrType.isa()) + if (mlir::isa(attrType) || + mlir::isa(attrType) || + mlir::isa(attrType) || + mlir::isa(attrType) || + mlir::isa(attrType)) return success(); - if (attrType.isa()) + if (mlir::isa(attrType)) return success(); - assert(attrType.isa() && "What else could we be looking at here?"); + assert(isa(attrType) && "What else could we be looking at here?"); return op->emitOpError("global with type ") - << attrType.cast().getType() << " not supported"; + << cast(attrType).getType() << " not supported"; } LogicalResult ConstantOp::verify() { @@ -385,43 +385,44 @@ LogicalResult CastOp::verify() { auto resType = getResult().getType(); auto srcType = getSrc().getType(); - if (srcType.isa() && - resType.isa()) { + if (mlir::isa(srcType) && + mlir::isa(resType)) { // Use the element type of the vector to verify the cast kind. (Except for // bitcast, see below.) - srcType = srcType.dyn_cast().getEltType(); - resType = resType.dyn_cast().getEltType(); + srcType = mlir::dyn_cast(srcType).getEltType(); + resType = mlir::dyn_cast(resType).getEltType(); } switch (getKind()) { case cir::CastKind::int_to_bool: { - if (!resType.isa()) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.bool type for result"; - if (!srcType.isa()) + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.int type for source"; return success(); } case cir::CastKind::ptr_to_bool: { - if (!resType.isa()) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.bool type for result"; - if (!srcType.isa()) + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.ptr type for source"; return success(); } case cir::CastKind::integral: { - if (!resType.isa()) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.int type for result"; - if (!srcType.isa()) + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.int type for source"; return success(); } case cir::CastKind::array_to_ptrdecay: { - auto arrayPtrTy = srcType.dyn_cast(); - auto flatPtrTy = resType.dyn_cast(); + auto arrayPtrTy = mlir::dyn_cast(srcType); + auto flatPtrTy = mlir::dyn_cast(resType); if (!arrayPtrTy || !flatPtrTy) return emitOpError() << "requires !cir.ptr type for source and result"; - auto arrayTy = arrayPtrTy.getPointee().dyn_cast(); + auto arrayTy = + mlir::dyn_cast(arrayPtrTy.getPointee()); if (!arrayTy) return emitOpError() << "requires !cir.array pointee"; @@ -433,72 +434,72 @@ LogicalResult CastOp::verify() { case cir::CastKind::bitcast: { // This is the only cast kind where we don't want vector types to decay // into the element type. - if ((!getSrc().getType().isa() || - !getResult().getType().isa()) && - (!getSrc().getType().isa() || - !getResult().getType().isa())) + if ((!mlir::isa(getSrc().getType()) || + !mlir::isa(getResult().getType())) && + (!mlir::isa(getSrc().getType()) || + !mlir::isa(getResult().getType()))) return emitOpError() << "requires !cir.ptr or !cir.vector type for source and result"; return success(); } case cir::CastKind::floating: { - if (!srcType.isa() || - !resType.isa()) + if (!mlir::isa(srcType) || + !mlir::isa(resType)) return emitOpError() << "requires !cir.float type for source and result"; return success(); } case cir::CastKind::float_to_int: { - if (!srcType.isa()) + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.float type for source"; - if (!resType.dyn_cast()) + if (!mlir::dyn_cast(resType)) return emitOpError() << "requires !cir.int type for result"; return success(); } case cir::CastKind::int_to_ptr: { - if (!srcType.dyn_cast()) + if (!mlir::dyn_cast(srcType)) return emitOpError() << "requires !cir.int type for source"; - if (!resType.dyn_cast()) + if (!mlir::dyn_cast(resType)) return emitOpError() << "requires !cir.ptr type for result"; return success(); } case cir::CastKind::ptr_to_int: { - if (!srcType.dyn_cast()) + if (!mlir::dyn_cast(srcType)) return emitOpError() << "requires !cir.ptr type for source"; - if (!resType.dyn_cast()) + if (!mlir::dyn_cast(resType)) return emitOpError() << "requires !cir.int type for result"; return success(); } case cir::CastKind::float_to_bool: { - if (!srcType.isa()) + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.float type for source"; - if (!resType.isa()) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.bool type for result"; return success(); } case cir::CastKind::bool_to_int: { - if (!srcType.isa()) + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.bool type for source"; - if (!resType.isa()) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.int type for result"; return success(); } case cir::CastKind::int_to_float: { - if (!srcType.isa()) + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.int type for source"; - if (!resType.isa()) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.float type for result"; return success(); } case cir::CastKind::bool_to_float: { - if (!srcType.isa()) + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.bool type for source"; - if (!resType.isa()) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.float type for result"; return success(); } case cir::CastKind::address_space: { - auto srcPtrTy = srcType.dyn_cast(); - auto resPtrTy = resType.dyn_cast(); + auto srcPtrTy = mlir::dyn_cast(srcType); + auto resPtrTy = mlir::dyn_cast(resType); if (!srcPtrTy || !resPtrTy) return emitOpError() << "requires !cir.ptr type for source and result"; if (srcPtrTy.getPointee() != resPtrTy.getPointee()) @@ -537,8 +538,9 @@ OpFoldResult CastOp::fold(FoldAdaptor adaptor) { //===----------------------------------------------------------------------===// LogicalResult DynamicCastOp::verify() { - auto resultPointeeTy = getType().cast().getPointee(); - if (!resultPointeeTy.isa()) + auto resultPointeeTy = + mlir::cast(getType()).getPointee(); + if (!mlir::isa(resultPointeeTy)) return emitOpError() << "cir.dyn_cast must produce a void ptr or struct ptr"; @@ -579,7 +581,7 @@ LogicalResult VecTernaryOp::verify() { // other operands. (The automatic verification already checked that all // operands are vector types and that the second and third operands are the // same type.) - if (getCond().getType().cast().getSize() != + if (mlir::cast(getCond().getType()).getSize() != getVec1().getType().getSize()) { return emitOpError() << ": the number of elements in " << getCond().getType() << " and " @@ -608,7 +610,7 @@ LogicalResult VecShuffleOp::verify() { // The indices must all be integer constants if (not std::all_of(getIndices().begin(), getIndices().end(), [](mlir::Attribute attr) { - return attr.isa(); + return mlir::isa(attr); })) { return emitOpError() << "all index values must be integers"; } @@ -622,7 +624,7 @@ LogicalResult VecShuffleOp::verify() { LogicalResult VecShuffleDynamicOp::verify() { // The number of elements in the two input vectors must match. if (getVec().getType().getSize() != - getIndices().getType().cast().getSize()) { + mlir::cast(getIndices().getType()).getSize()) { return emitOpError() << ": the number of elements in " << getVec().getType() << " and " << getIndices().getType() << " don't match"; } @@ -991,7 +993,7 @@ mlir::SuccessorOperands BrCondOp::getSuccessorOperands(unsigned index) { } Block *BrCondOp::getSuccessorForOperands(ArrayRef operands) { - if (IntegerAttr condAttr = operands.front().dyn_cast_or_null()) + if (IntegerAttr condAttr = dyn_cast_if_present(operands.front())) return condAttr.getValue().isOne() ? getDestTrue() : getDestFalse(); return nullptr; } @@ -1175,7 +1177,7 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, for (auto &r : regions) { p << "case ("; - auto attr = casesAttr[idx].cast(); + auto attr = cast(casesAttr[idx]); auto kind = attr.getKind().getValue(); assert((kind == CaseOpKind::Default || kind == CaseOpKind::Equal || kind == CaseOpKind::Anyof || kind == CaseOpKind::Range) && @@ -1188,8 +1190,8 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, switch (kind) { case cir::CaseOpKind::Equal: { p << ", "; - auto intAttr = attr.getValue()[0].cast(); - auto intAttrTy = intAttr.getType().cast(); + auto intAttr = cast(attr.getValue()[0]); + auto intAttrTy = cast(intAttr.getType()); (intAttrTy.isSigned() ? p << intAttr.getSInt() : p << intAttr.getUInt()); break; } @@ -1200,13 +1202,13 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, case cir::CaseOpKind::Anyof: { p << ", ["; llvm::interleaveComma(attr.getValue(), p, [&](const Attribute &a) { - auto intAttr = a.cast(); - auto intAttrTy = intAttr.getType().cast(); + auto intAttr = cast(a); + auto intAttrTy = cast(intAttr.getType()); (intAttrTy.isSigned() ? p << intAttr.getSInt() : p << intAttr.getUInt()); }); p << "] : "; - auto typedAttr = attr.getValue()[0].dyn_cast(); + auto typedAttr = dyn_cast(attr.getValue()[0]); assert(typedAttr && "this should never not have a type!"); p.printType(typedAttr.getType()); break; @@ -1357,7 +1359,7 @@ static void printSwitchFlatOpCases(OpAsmPrinter &p, SwitchFlatOp op, [&](auto i) { p << " "; mlir::Attribute a = std::get<0>(i); - p << a.cast().getValue(); + p << mlir::cast(a).getValue(); p << ": "; p.printSuccessorAndUseList(std::get<1>(i), caseOperands[index++]); }, @@ -1457,7 +1459,7 @@ void printCatchOp(OpAsmPrinter &p, CatchOp op, p.increaseIndent(); auto exRtti = a; - if (a.isa()) { + if (mlir::isa(a)) { p.printAttribute(a); } else if (!exRtti) { p << "all"; @@ -1622,9 +1624,9 @@ static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, if (parseConstantValue(parser, initialValueAttr).failed()) return failure(); - assert(initialValueAttr.isa() && + assert(mlir::isa(initialValueAttr) && "Non-typed attrs shouldn't appear here."); - auto typedAttr = initialValueAttr.cast(); + auto typedAttr = mlir::cast(initialValueAttr); opTy = typedAttr.getType(); } @@ -1810,7 +1812,7 @@ GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { else llvm_unreachable("shall not get here"); - auto resultType = getAddr().getType().dyn_cast(); + auto resultType = dyn_cast(getAddr().getType()); if (!resultType || symTy != resultType.getPointee()) return emitOpError("result type pointee type '") << resultType.getPointee() << "' does not match type " << symTy @@ -2188,7 +2190,7 @@ void cir::FuncOp::print(OpAsmPrinter &p) { // getNumArguments hook not failing. LogicalResult cir::FuncOp::verifyType() { auto type = getFunctionType(); - if (!type.isa()) + if (!isa(type)) return emitOpError("requires '" + getFunctionTypeAttrName().str() + "' attribute of function type"); if (!getNoProto() && type.isVarArg() && type.getNumInputs() == 0) @@ -2706,12 +2708,12 @@ LogicalResult mlir::cir::ConstArrayAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::Type type, Attribute attr, int trailingZerosNum) { - if (!(attr.isa() || attr.isa())) + if (!(mlir::isa(attr) || mlir::isa(attr))) return emitError() << "constant array expects ArrayAttr or StringAttr"; - if (auto strAttr = attr.dyn_cast()) { - mlir::cir::ArrayType at = type.cast(); - auto intTy = at.getEltType().dyn_cast(); + if (auto strAttr = mlir::dyn_cast(attr)) { + mlir::cir::ArrayType at = mlir::cast(type); + auto intTy = mlir::dyn_cast(at.getEltType()); // TODO: add CIR type for char. if (!intTy || intTy.getWidth() != 8) { @@ -2722,9 +2724,9 @@ LogicalResult mlir::cir::ConstArrayAttr::verify( return success(); } - assert(attr.isa()); - auto arrayAttr = attr.cast(); - auto at = type.cast(); + assert(mlir::isa(attr)); + auto arrayAttr = mlir::cast(attr); + auto at = mlir::cast(type); // Make sure both number of elements and subelement types match type. if (at.getSize() != arrayAttr.size() + trailingZerosNum) @@ -2735,7 +2737,7 @@ LogicalResult mlir::cir::ConstArrayAttr::verify( // Once we find a mismatch, stop there. if (eltTypeCheck.failed()) return; - auto typedAttr = attr.dyn_cast(); + auto typedAttr = mlir::dyn_cast(attr); if (!typedAttr || typedAttr.getType() != at.getEltType()) { eltTypeCheck = failure(); emitError() @@ -2767,7 +2769,7 @@ ::mlir::Attribute ConstArrayAttr::parse(::mlir::AsmParser &parser, } // ArrayAttrrs have per-element type, not the type of the array... - if (resultVal->dyn_cast()) { + if (mlir::dyn_cast(*resultVal)) { // Array has implicit type: infer from const array type. if (parser.parseOptionalColon().failed()) { resultTy = type; @@ -2782,10 +2784,10 @@ ::mlir::Attribute ConstArrayAttr::parse(::mlir::AsmParser &parser, } } } else { - assert(resultVal->isa() && "IDK"); - auto ta = resultVal->cast(); + assert(mlir::isa(*resultVal) && "IDK"); + auto ta = mlir::cast(*resultVal); resultTy = ta.getType(); - if (resultTy->isa()) { + if (mlir::isa(*resultTy)) { parser.emitError(parser.getCurrentLocation(), "expected type declaration for string literal"); return {}; @@ -2795,12 +2797,13 @@ ::mlir::Attribute ConstArrayAttr::parse(::mlir::AsmParser &parser, auto zeros = 0; if (parser.parseOptionalComma().succeeded()) { if (parser.parseOptionalKeyword("trailing_zeros").succeeded()) { - auto typeSize = resultTy.value().cast().getSize(); + auto typeSize = + mlir::cast(resultTy.value()).getSize(); auto elts = resultVal.value(); - if (auto str = elts.dyn_cast()) + if (auto str = mlir::dyn_cast(elts)) zeros = typeSize - str.size(); else - zeros = typeSize - elts.cast().size(); + zeros = typeSize - mlir::cast(elts).size(); } else { return {}; } @@ -2871,7 +2874,7 @@ LogicalResult TypeInfoAttr::verify( LogicalResult VTableAttr::verify(::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::Type type, ::mlir::ArrayAttr vtableData) { - auto sTy = type.dyn_cast_or_null(); + auto sTy = mlir::dyn_cast_if_present(type); if (!sTy) { emitError() << "expected !cir.struct type result"; return failure(); @@ -2883,8 +2886,9 @@ VTableAttr::verify(::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, for (size_t i = 0; i < sTy.getMembers().size(); ++i) { - auto arrayTy = sTy.getMembers()[i].dyn_cast(); - auto constArrayAttr = vtableData[i].dyn_cast(); + auto arrayTy = mlir::dyn_cast(sTy.getMembers()[i]); + auto constArrayAttr = + mlir::dyn_cast(vtableData[i]); if (!arrayTy || !constArrayAttr) { emitError() << "expected struct type with one array element"; return failure(); @@ -2895,10 +2899,11 @@ VTableAttr::verify(::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, return failure(); LogicalResult eltTypeCheck = success(); - if (auto arrayElts = constArrayAttr.getElts().dyn_cast()) { + if (auto arrayElts = mlir::dyn_cast(constArrayAttr.getElts())) { arrayElts.walkImmediateSubElements( [&](Attribute attr) { - if (attr.isa() || attr.isa()) + if (mlir::isa(attr) || + mlir::isa(attr)) return; emitError() << "expected GlobalViewAttr attribute"; eltTypeCheck = failure(); @@ -2952,7 +2957,7 @@ LogicalResult MemCpyOp::verify() { LogicalResult GetMemberOp::verify() { - const auto recordTy = getAddrTy().getPointee().dyn_cast(); + const auto recordTy = dyn_cast(getAddrTy().getPointee()); if (!recordTy) return emitError() << "expected pointer to a record type"; @@ -2974,7 +2979,7 @@ LogicalResult GetMemberOp::verify() { LogicalResult GetRuntimeMemberOp::verify() { auto recordTy = - getAddr().getType().cast().getPointee().cast(); + cast(cast(getAddr().getType()).getPointee()); auto memberPtrTy = getMember().getType(); if (recordTy != memberPtrTy.getClsTy()) { @@ -3179,7 +3184,7 @@ LogicalResult AtomicFetch::verify() { getBinop() == mlir::cir::AtomicFetchKind::Sub) return mlir::success(); - if (!getVal().getType().isa()) + if (!mlir::isa(getVal().getType())) return emitError() << "only operates on integer values"; return mlir::success(); diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 9c8993bcc30f..597538419854 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -111,7 +111,7 @@ void BoolType::print(mlir::AsmPrinter &printer) const {} Type StructType::getLargestMember(const ::mlir::DataLayout &dataLayout) const { if (!layoutInfo) computeSizeAndAlignment(dataLayout); - return layoutInfo.cast().getLargestMember(); + return mlir::cast(layoutInfo).getLargestMember(); } Type StructType::parse(mlir::AsmParser &parser) { @@ -194,8 +194,8 @@ Type StructType::parse(mlir::AsmParser &parser) { type = getChecked(eLoc, context, membersRef, name, packed, kind); // If the record has a self-reference, its type already exists in a // incomplete state. In this case, we must complete it. - if (type.cast().isIncomplete()) - type.cast().complete(membersRef, packed, ast); + if (mlir::cast(type).isIncomplete()) + mlir::cast(type).complete(membersRef, packed, ast); } else if (!name && !incomplete) { // anonymous & complete type = getChecked(eLoc, context, membersRef, packed, kind); } else { // anonymous & incomplete @@ -456,7 +456,7 @@ StructType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, if (!layoutInfo) computeSizeAndAlignment(dataLayout); return llvm::TypeSize::getFixed( - layoutInfo.cast().getSize() * 8); + mlir::cast(layoutInfo).getSize() * 8); } uint64_t @@ -464,7 +464,7 @@ StructType::getABIAlignment(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { if (!layoutInfo) computeSizeAndAlignment(dataLayout); - return layoutInfo.cast().getAlignment(); + return mlir::cast(layoutInfo).getAlignment(); } uint64_t @@ -476,7 +476,7 @@ StructType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, bool StructType::isPadded(const ::mlir::DataLayout &dataLayout) const { if (!layoutInfo) computeSizeAndAlignment(dataLayout); - return layoutInfo.cast().getPadded(); + return mlir::cast(layoutInfo).getPadded(); } uint64_t StructType::getElementOffset(const ::mlir::DataLayout &dataLayout, @@ -484,8 +484,9 @@ uint64_t StructType::getElementOffset(const ::mlir::DataLayout &dataLayout, assert(idx < getMembers().size() && "access not valid"); if (!layoutInfo) computeSizeAndAlignment(dataLayout); - auto offsets = layoutInfo.cast().getOffsets(); - auto intAttr = offsets[idx].cast(); + auto offsets = + mlir::cast(layoutInfo).getOffsets(); + auto intAttr = mlir::cast(offsets[idx]); return intAttr.getInt(); } @@ -512,7 +513,7 @@ void StructType::computeSizeAndAlignment( auto ty = members[i]; // Found a nested union: recurse into it to fetch its largest member. - auto structMember = ty.dyn_cast(); + auto structMember = mlir::dyn_cast(ty); if (structMember && structMember.isUnion()) { auto candidate = structMember.getLargestMember(dataLayout); if (dataLayout.getTypeSize(candidate) > largestMemberSize) { @@ -581,9 +582,9 @@ Type IntType::parse(mlir::AsmParser &parser) { llvm::StringRef sign; if (parser.parseKeyword(&sign)) return {}; - if (sign.equals_insensitive("s")) + if (sign == "s") isSigned = true; - else if (sign.equals_insensitive("u")) + else if (sign == "u") isSigned = false; else { parser.emitError(loc, "expected 's' or 'u'"); @@ -754,38 +755,35 @@ FP80Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, } const llvm::fltSemantics &LongDoubleType::getFloatSemantics() const { - return getUnderlying() - .cast() + return mlir::cast(getUnderlying()) .getFloatSemantics(); } llvm::TypeSize LongDoubleType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, mlir::DataLayoutEntryListRef params) const { - return getUnderlying() - .cast() + return mlir::cast(getUnderlying()) .getTypeSizeInBits(dataLayout, params); } uint64_t LongDoubleType::getABIAlignment(const mlir::DataLayout &dataLayout, mlir::DataLayoutEntryListRef params) const { - return getUnderlying().cast().getABIAlignment( - dataLayout, params); + return mlir::cast(getUnderlying()) + .getABIAlignment(dataLayout, params); } uint64_t LongDoubleType::getPreferredAlignment( const ::mlir::DataLayout &dataLayout, mlir::DataLayoutEntryListRef params) const { - return getUnderlying() - .cast() + return mlir::cast(getUnderlying()) .getPreferredAlignment(dataLayout, params); } LogicalResult LongDoubleType::verify(function_ref emitError, mlir::Type underlying) { - if (!underlying.isa()) { + if (!mlir::isa(underlying)) { emitError() << "invalid underlying type for long double"; return failure(); } @@ -810,7 +808,7 @@ bool mlir::cir::isFPOrFPVectorTy(mlir::Type t) { if (isa(t)) { return isAnyFloatingPointType( - t.dyn_cast().getEltType()); + mlir::dyn_cast(t).getEltType()); } return isAnyFloatingPointType(t); } @@ -872,7 +870,7 @@ llvm::ArrayRef FuncType::getReturnTypes() const { return static_cast(getImpl())->returnType; } -bool FuncType::isVoid() const { return getReturnType().isa(); } +bool FuncType::isVoid() const { return mlir::isa(getReturnType()); } //===----------------------------------------------------------------------===// // CIR Dialect diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 9363c7349519..c2c3e6a19531 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -31,13 +31,12 @@ LowerModule createLowerModule(FuncOp op, PatternRewriter &rewriter) { auto module = op->getParentOfType(); // Fetch the LLVM data layout string. - auto dataLayoutStr = - module->getAttr(LLVM::LLVMDialect::getDataLayoutAttrName()) - .cast(); + auto dataLayoutStr = cast( + module->getAttr(LLVM::LLVMDialect::getDataLayoutAttrName())); // Fetch target information. llvm::Triple triple( - module->getAttr("cir.triple").cast().getValue()); + cast(module->getAttr("cir.triple")).getValue()); clang::TargetOptions targetOptions; targetOptions.Triple = triple.str(); auto targetInfo = clang::targets::AllocateTarget(triple, targetOptions); @@ -122,7 +121,7 @@ void CallConvLoweringPass::runOnOperation() { config.strictMode = GreedyRewriteStrictness::ExistingOps; // Apply patterns. - if (failed(applyOpPatternsAndFold(ops, std::move(patterns), config))) + if (failed(applyOpPatternsGreedily(ops, std::move(patterns), config))) signalPassFailure(); } diff --git a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp index b72e7a686788..b8745cdf0c2f 100644 --- a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp +++ b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp @@ -33,7 +33,7 @@ void DropASTPass::runOnOperation() { op->walk([&](Operation *op) { if (auto alloca = dyn_cast(op)) { alloca.removeAstAttr(); - auto ty = alloca.getAllocaType().dyn_cast(); + auto ty = mlir::dyn_cast(alloca.getAllocaType()); if (!ty) return; ty.dropAst(); diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index ba489d3076ee..4edd74babf24 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -330,7 +330,7 @@ class CIRSwitchOpFlattening // Digest the case statements values and bodies. for (size_t i = 0; i < op.getCases()->size(); ++i) { auto ®ion = op.getRegion(i); - auto caseAttr = op.getCases()->getValue()[i].cast(); + auto caseAttr = cast(op.getCases()->getValue()[i]); // Found default case: save destination and operands. switch (caseAttr.getKind().getValue()) { @@ -342,8 +342,8 @@ class CIRSwitchOpFlattening assert(caseAttr.getValue().size() == 2 && "Case range should have 2 case value"); rangeValues.push_back( - {caseAttr.getValue()[0].cast().getValue(), - caseAttr.getValue()[1].cast().getValue()}); + {cast(caseAttr.getValue()[0]).getValue(), + cast(caseAttr.getValue()[1]).getValue()}); rangeDestinations.push_back(®ion.front()); rangeOperands.push_back(region.getArguments()); break; @@ -351,7 +351,7 @@ class CIRSwitchOpFlattening case mlir::cir::CaseOpKind::Equal: // AnyOf cases kind can have multiple values, hence the loop below. for (auto &value : caseAttr.getValue()) { - caseValues.push_back(value.cast().getValue()); + caseValues.push_back(cast(value).getValue()); caseOperands.push_back(region.getArguments()); caseDestinations.push_back(®ion.front()); } @@ -498,7 +498,7 @@ void FlattenCFGPass::runOnOperation() { }); // Apply patterns. - if (applyOpPatternsAndFold(ops, std::move(patterns)).failed()) + if (applyOpPatternsGreedily(ops, std::move(patterns)).failed()) signalPassFailure(); } diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index bd5097d3f323..f160239d460d 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -116,8 +116,8 @@ static bool isIteratorLikeType(mlir::Type t) { // TODO: some iterators are going to be represented with structs, // in which case we could look at ASTRecordDeclInterface for more // information. - auto pTy = t.dyn_cast(); - if (!pTy || !pTy.getPointee().isa()) + auto pTy = dyn_cast(t); + if (!pTy || !mlir::isa(pTy.getPointee())) return false; return true; } @@ -144,7 +144,7 @@ bool IdiomRecognizerPass::raiseIteratorBeginEnd(CallOp call) { return false; // First argument is the container "this" pointer. - auto thisPtr = call.getOperand(0).getType().dyn_cast(); + auto thisPtr = dyn_cast(call.getOperand(0).getType()); if (!thisPtr || !isIteratorInStdContainter(thisPtr.getPointee())) return false; diff --git a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp index 08c2586bc3cf..b936157a1e9f 100644 --- a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp @@ -131,7 +131,7 @@ void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { auto first = findOp.getOperand(0); auto last = findOp.getOperand(1); auto value = findOp->getOperand(2); - if (!first.getType().isa() || !last.getType().isa()) + if (!isa(first.getType()) || !isa(last.getType())) return; // Transformation: @@ -139,9 +139,9 @@ void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { // - Assert the Iterator is a pointer to primitive type. // - Check IterBeginOp is char sized. TODO: add other types that map to // char size. - auto iterResTy = findOp.getType().dyn_cast(); + auto iterResTy = dyn_cast(findOp.getType()); assert(iterResTy && "expected pointer type for iterator"); - auto underlyingDataTy = iterResTy.getPointee().dyn_cast(); + auto underlyingDataTy = dyn_cast(iterResTy.getPointee()); if (!underlyingDataTy || underlyingDataTy.getWidth() != 8) return; @@ -149,7 +149,7 @@ void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { // - Check it's a pointer type. // - Load the pattern from memory // - cast it to `int`. - auto patternAddrTy = value.getType().dyn_cast(); + auto patternAddrTy = dyn_cast(value.getType()); if (!patternAddrTy || patternAddrTy.getPointee() != underlyingDataTy) return; @@ -178,7 +178,7 @@ void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { // Look at this pointer to retrieve container information. auto thisPtr = - iterBegin.getOperand().getType().cast().getPointee(); + cast(iterBegin.getOperand().getType()).getPointee(); auto containerTy = dyn_cast(thisPtr); unsigned staticSize = 0; diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index bf65b038d35c..99398bba908f 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -498,7 +498,7 @@ static std::string getVarNameFromValue(mlir::Value v) { } static Location getEndLoc(Location loc, int idx = 1) { - auto fusedLoc = loc.dyn_cast(); + auto fusedLoc = dyn_cast(loc); if (!fusedLoc) return loc; return fusedLoc.getLocations()[idx]; @@ -867,9 +867,9 @@ void LifetimeCheckPass::checkIf(IfOp ifOp) { } template bool isStructAndHasAttr(mlir::Type ty) { - if (!ty.isa()) + if (!mlir::isa(ty)) return false; - return hasAttr(ty.cast().getAst()); + return hasAttr(mlir::cast(ty).getAst()); } static bool isOwnerType(mlir::Type ty) { @@ -901,12 +901,12 @@ static bool isOwnerType(mlir::Type ty) { static bool containsPointerElts(mlir::cir::StructType s) { auto members = s.getMembers(); return std::any_of(members.begin(), members.end(), [](mlir::Type t) { - return t.isa(); + return mlir::isa(t); }); } static bool isAggregateType(LifetimeCheckPass *pass, mlir::Type agg) { - auto t = agg.dyn_cast(); + auto t = mlir::dyn_cast(agg); if (!t) return false; // Lambdas have their special handling, and shall not be considered as @@ -956,7 +956,7 @@ static bool isPointerType(mlir::Type t) { // library headers, the following well- known standard types are treated as-if // annotated as Pointers, in addition to raw pointers and references: ref- // erence_wrapper, and vector::reference. - if (t.isa()) + if (mlir::isa(t)) return true; return isStructAndHasAttr(t); } @@ -1017,7 +1017,7 @@ void LifetimeCheckPass::classifyAndInitTypeCategories(mlir::Value addr, break; // Map values for members to it's index in the aggregate. - auto members = t.cast().getMembers(); + auto members = mlir::cast(t).getMembers(); SmallVector fieldVals; fieldVals.assign(members.size(), {}); @@ -1035,7 +1035,7 @@ void LifetimeCheckPass::classifyAndInitTypeCategories(mlir::Value addr, return; auto eltTy = - eltAddr.getType().cast().getPointee(); + mlir::cast(eltAddr.getType()).getPointee(); // Classify exploded types. Keep alloca original location. classifyAndInitTypeCategories(eltAddr, eltTy, loc, ++nestLevel); @@ -1139,12 +1139,12 @@ void LifetimeCheckPass::updatePointsToForConstStruct( assert(aggregates.count(addr) && "expected association with aggregate"); int memberIdx = 0; for (auto &attr : value.getMembers()) { - auto ta = attr.dyn_cast(); + auto ta = mlir::dyn_cast(attr); assert(ta && "expected typed attribute"); auto fieldAddr = aggregates[addr][memberIdx]; // Unseen fields are not tracked. - if (fieldAddr && ta.getType().isa()) { - assert(ta.isa() && + if (fieldAddr && mlir::isa(ta.getType())) { + assert(mlir::isa(ta) && "other than null not implemented"); markPsetNull(fieldAddr, loc); } @@ -1160,7 +1160,7 @@ void LifetimeCheckPass::updatePointsToForZeroStruct(mlir::Value addr, for (auto &t : sTy.getMembers()) { auto fieldAddr = aggregates[addr][memberIdx]; // Unseen fields are not tracked. - if (fieldAddr && t.isa()) { + if (fieldAddr && mlir::isa(t)) { markPsetNull(fieldAddr, loc); } memberIdx++; @@ -1217,13 +1217,13 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, // individual exploded fields. if (aggregates.count(addr)) { if (auto constStruct = - cstOp.getValue().dyn_cast()) { + mlir::dyn_cast(cstOp.getValue())) { updatePointsToForConstStruct(addr, constStruct, loc); return; } - if (auto zero = cstOp.getValue().dyn_cast()) { - if (auto zeroStructTy = zero.getType().dyn_cast()) { + if (auto zero = mlir::dyn_cast(cstOp.getValue())) { + if (auto zeroStructTy = dyn_cast(zero.getType())) { updatePointsToForZeroStruct(addr, zeroStructTy, loc); return; } @@ -1682,11 +1682,11 @@ void LifetimeCheckPass::checkForOwnerAndPointerArguments(CallOp callOp, if (aggregates.count(arg)) { int memberIdx = 0; auto sTy = - arg.getType().cast().getPointee().dyn_cast(); + dyn_cast(cast(arg.getType()).getPointee()); assert(sTy && "expected struct type"); for (auto m : sTy.getMembers()) { auto ptrMemberAddr = aggregates[arg][memberIdx]; - if (m.isa() && ptrMemberAddr) { + if (isa(m) && ptrMemberAddr) { ptrsToDeref.insert(ptrMemberAddr); } memberIdx++; @@ -1732,7 +1732,7 @@ bool LifetimeCheckPass::isLambdaType(mlir::Type ty) { return IsLambdaTyCache[ty]; IsLambdaTyCache[ty] = false; - auto taskTy = ty.dyn_cast(); + auto taskTy = mlir::dyn_cast(ty); if (!taskTy) return false; if (taskTy.getAst().isLambda()) @@ -1747,7 +1747,7 @@ bool LifetimeCheckPass::isTaskType(mlir::Value taskVal) { return IsTaskTyCache[ty]; bool result = [&] { - auto taskTy = taskVal.getType().dyn_cast(); + auto taskTy = mlir::dyn_cast(taskVal.getType()); if (!taskTy) return false; return taskTy.getAst().hasPromiseType(); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 843c35f4a1bd..49f05d4187f3 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -570,11 +570,9 @@ void LoweringPreparePass::lowerArrayDtor(ArrayDtor op) { builder.setInsertionPointAfter(op.getOperation()); auto eltTy = op->getRegion(0).getArgument(0).getType(); - auto arrayLen = op.getAddr() - .getType() - .cast() - .getPointee() - .cast() + auto arrayLen = mlir::cast( + mlir::cast(op.getAddr().getType()) + .getPointee()) .getSize(); lowerArrayDtorCtorIntoLoop(builder, op, eltTy, op.getAddr(), arrayLen); } @@ -584,11 +582,9 @@ void LoweringPreparePass::lowerArrayCtor(ArrayCtor op) { builder.setInsertionPointAfter(op.getOperation()); auto eltTy = op->getRegion(0).getArgument(0).getType(); - auto arrayLen = op.getAddr() - .getType() - .cast() - .getPointee() - .cast() + auto arrayLen = mlir::cast( + mlir::cast(op.getAddr().getType()) + .getPointee()) .getSize(); lowerArrayDtorCtorIntoLoop(builder, op, eltTy, op.getAddr(), arrayLen); } diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index d9d87a94635b..3168d5e19384 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -91,7 +91,7 @@ void MergeCleanupsPass::runOnOperation() { }); // Apply patterns. - if (applyOpPatternsAndFold(ops, std::move(patterns)).failed()) + if (applyOpPatternsGreedily(ops, std::move(patterns)).failed()) signalPassFailure(); } diff --git a/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp index ac7e9f2c5b9e..de46433dc9a7 100644 --- a/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp @@ -215,7 +215,7 @@ void SCFPreparePass::runOnOperation() { }); // Apply patterns. - if (applyOpPatternsAndFold(ops, std::move(patterns)).failed()) + if (applyOpPatternsGreedily(ops, std::move(patterns)).failed()) signalPassFailure(); } diff --git a/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp b/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp index 93e19294feec..2fbccfc7946a 100644 --- a/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp +++ b/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp @@ -12,7 +12,7 @@ namespace mlir { namespace cir { bool isStdArrayType(mlir::Type t) { - auto sTy = t.dyn_cast(); + auto sTy = dyn_cast(t); if (!sTy) return false; auto recordDecl = sTy.getAst(); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index ef90698054e8..7687a271c1f0 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -23,7 +23,7 @@ bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, const ABIInfo &Info) { Type Ty = FI.getReturnType(); - if (const auto RT = Ty.dyn_cast()) { + if (const auto RT = dyn_cast(Ty)) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index 4a11ebb6758a..ce12263b73d8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -48,7 +48,7 @@ class ItaniumCXXABI : public CIRCXXABI { } // namespace bool ItaniumCXXABI::classifyReturnType(LowerFunctionInfo &FI) const { - const StructType RD = FI.getReturnType().dyn_cast(); + const StructType RD = dyn_cast(FI.getReturnType()); if (!RD) return false; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 7a71fffb2fbf..91634c93c5f2 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -358,7 +358,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // NOTE(cir): This method has partial parity to CodeGenFunction's GetUndefRValue // defined in CGExpr.cpp. Value LowerFunction::getUndefRValue(Type Ty) { - if (Ty.isa()) + if (isa(Ty)) return nullptr; llvm::outs() << "Missing undef handler for value type: " << Ty << "\n"; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index 1d8666684fad..b452995813fd 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -64,7 +64,7 @@ class AArch64TargetLoweringInfo : public TargetLoweringInfo { ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, bool IsVariadic) const { - if (RetTy.isa()) + if (isa(RetTy)) return ABIArgInfo::getIgnore(); llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp index 6fe71b13ff92..7d43000877b7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp @@ -63,9 +63,9 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( auto opResTy = op.getType(); // front end should not produce non-scalar type of VAArgOp bool isSupportedType = - opResTy.isa(); + mlir::isa(opResTy); // Homogenous Aggregate type not supported and indirect arg // passing not supported yet. And for these supported types, @@ -82,7 +82,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // but it depends on arg type indirectness and coercion defined by ABI. auto baseTy = opResTy; - if (baseTy.isa()) { + if (mlir::isa(baseTy)) { llvm_unreachable("ArrayType VAArg loweing NYI"); } // numRegs may not be 1 if ArrayType is supported. @@ -340,7 +340,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( builder.setInsertionPoint(op); contBlock->addArgument(onStackPtr.getType(), loc); auto resP = contBlock->getArgument(0); - assert(resP.getType().isa()); + assert(mlir::isa(resP.getType())); auto opResPTy = mlir::cir::PointerType::get(builder.getContext(), opResTy); auto castResP = builder.createBitcast(resP, opResPTy); auto res = builder.create(loc, castResP); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index ebbcfab0f573..9d79fb7ccb43 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -66,7 +66,7 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, dynCastFuncArgs) .getResult(); - assert(castedPtr.getType().isa() && + assert(mlir::isa(castedPtr.getType()) && "the return value of __dynamic_cast should be a ptr"); /// C++ [expr.dynamic.cast]p9: diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 63fccc7e9cd6..d921a39a9e42 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -81,7 +81,7 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, // FIXME(cir): There's currently no direct way to identify if a type is a // builtin. if (/*isBuitinType=*/true) { - if (Ty.isa()) { + if (isa(Ty)) { Current = Class::NoClass; } else { llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 515c3254d875..95b6d8bb8e9b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -141,7 +141,7 @@ convertCmpKindToFCmpPredicate(mlir::cir::CmpOpKind kind) { /// If the given type is a vector type, return the vector's element type. /// Otherwise return the given type unchanged. mlir::Type elementTypeIfVector(mlir::Type type) { - if (auto VecType = type.dyn_cast()) { + if (auto VecType = mlir::dyn_cast(type)) { return VecType.getEltType(); } return type; @@ -287,7 +287,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } // Iteratively lower each constant element of the array. - if (auto arrayAttr = constArr.getElts().dyn_cast()) { + if (auto arrayAttr = mlir::dyn_cast(constArr.getElts())) { for (auto [idx, elt] : llvm::enumerate(arrayAttr)) { mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); @@ -297,8 +297,9 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } // TODO(cir): this diverges from traditional lowering. Normally the string // would be a global constant that is memcopied. - else if (auto strAttr = constArr.getElts().dyn_cast()) { - auto arrayTy = strAttr.getType().dyn_cast(); + else if (auto strAttr = + mlir::dyn_cast(constArr.getElts())) { + auto arrayTy = mlir::dyn_cast(strAttr.getType()); assert(arrayTy && "String attribute must have an array type"); auto eltTy = arrayTy.getEltType(); for (auto [idx, elt] : llvm::enumerate(strAttr)) { @@ -357,7 +358,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, indices, true); } - auto ptrTy = globalAttr.getType().dyn_cast(); + auto ptrTy = mlir::dyn_cast(globalAttr.getType()); assert(ptrTy && "Expecting pointer type in GlobalViewAttr"); auto llvmEltTy = converter->convertType(ptrTy.getPointee()); @@ -374,25 +375,25 @@ inline mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { - if (const auto intAttr = attr.dyn_cast()) + if (const auto intAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, intAttr, rewriter, converter); - if (const auto fltAttr = attr.dyn_cast()) + if (const auto fltAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, fltAttr, rewriter, converter); - if (const auto ptrAttr = attr.dyn_cast()) + if (const auto ptrAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, ptrAttr, rewriter, converter); - if (const auto constStruct = attr.dyn_cast()) + if (const auto constStruct = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, constStruct, rewriter, converter); - if (const auto constArr = attr.dyn_cast()) + if (const auto constArr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, constArr, rewriter, converter); - if (const auto boolAttr = attr.dyn_cast()) + if (const auto boolAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, boolAttr, rewriter, converter); - if (const auto zeroAttr = attr.dyn_cast()) + if (const auto zeroAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, zeroAttr, rewriter, converter); - if (const auto globalAttr = attr.dyn_cast()) + if (const auto globalAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, globalAttr, rewriter, converter); - if (const auto vtableAttr = attr.dyn_cast()) + if (const auto vtableAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, vtableAttr, rewriter, converter); - if (const auto typeinfoAttr = attr.dyn_cast()) + if (const auto typeinfoAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, typeinfoAttr, rewriter, converter); llvm_unreachable("unhandled attribute type"); @@ -462,7 +463,8 @@ static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter, mlir::Value llvmSrc, mlir::IntegerType llvmDstIntTy, bool isUnsigned, uint64_t cirDstIntWidth) { - auto cirSrcWidth = llvmSrc.getType().cast().getWidth(); + auto cirSrcWidth = + mlir::cast(llvmSrc.getType()).getWidth(); if (cirSrcWidth == cirDstIntWidth) return llvmSrc; @@ -492,14 +494,14 @@ class CIRPtrStrideOpLowering // void and function types doesn't really have a layout to use in GEPs, // make it i8 instead. - if (elementTy.isa() || - elementTy.isa()) + if (mlir::isa(elementTy) || + mlir::isa(elementTy)) elementTy = mlir::IntegerType::get(elementTy.getContext(), 8, mlir::IntegerType::Signless); // Zero-extend, sign-extend or trunc the pointer value. auto index = adaptor.getStride(); - auto width = index.getType().cast().getWidth(); + auto width = mlir::cast(index.getType()).getWidth(); mlir::DataLayout LLVMLayout(ptrStrideOp->getParentOfType()); auto layoutWidth = LLVMLayout.getTypeIndexBitwidth(adaptor.getBase().getType()); @@ -591,7 +593,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { switch (castOp.getKind()) { case mlir::cir::CastKind::array_to_ptrdecay: { - const auto ptrTy = castOp.getType().cast(); + const auto ptrTy = mlir::cast(castOp.getType()); auto sourceValue = adaptor.getOperands().front(); auto targetType = convertTy(ptrTy); auto elementTy = convertTy(ptrTy.getPointee()); @@ -615,14 +617,14 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstType = getTypeConverter()->convertType(dstType); mlir::cir::IntType srcIntType = - elementTypeIfVector(srcType).cast(); + mlir::cast(elementTypeIfVector(srcType)); mlir::cir::IntType dstIntType = - elementTypeIfVector(dstType).cast(); - rewriter.replaceOp(castOp, - getLLVMIntCast(rewriter, llvmSrcVal, - llvmDstType.cast(), - srcIntType.isUnsigned(), - dstIntType.getWidth())); + mlir::cast(elementTypeIfVector(dstType)); + rewriter.replaceOp( + castOp, + getLLVMIntCast(rewriter, llvmSrcVal, + mlir::cast(llvmDstType), + srcIntType.isUnsigned(), dstIntType.getWidth())); break; } case mlir::cir::CastKind::floating: { @@ -633,13 +635,13 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto srcTy = elementTypeIfVector(castOp.getSrc().getType()); auto dstTy = elementTypeIfVector(castOp.getResult().getType()); - if (!dstTy.isa() || - !srcTy.isa()) + if (!mlir::isa(dstTy) || + !mlir::isa(srcTy)) return castOp.emitError() << "NYI cast from " << srcTy << " to " << dstTy; auto getFloatWidth = [](mlir::Type ty) -> unsigned { - return ty.cast().getWidth(); + return mlir::cast(ty).getWidth(); }; if (getFloatWidth(srcTy) > getFloatWidth(dstTy)) @@ -651,7 +653,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return mlir::success(); } case mlir::cir::CastKind::int_to_ptr: { - auto dstTy = castOp.getType().cast(); + auto dstTy = mlir::cast(castOp.getType()); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); rewriter.replaceOpWithNewOp(castOp, llvmDstTy, @@ -659,7 +661,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return mlir::success(); } case mlir::cir::CastKind::ptr_to_int: { - auto dstTy = castOp.getType().cast(); + auto dstTy = mlir::cast(castOp.getType()); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); rewriter.replaceOpWithNewOp(castOp, llvmDstTy, @@ -667,7 +669,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return mlir::success(); } case mlir::cir::CastKind::float_to_bool: { - auto dstTy = castOp.getType().cast(); + auto dstTy = mlir::cast(castOp.getType()); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); auto kind = mlir::LLVM::FCmpPredicate::une; @@ -685,11 +687,11 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return mlir::success(); } case mlir::cir::CastKind::bool_to_int: { - auto dstTy = castOp.getType().cast(); + auto dstTy = mlir::cast(castOp.getType()); auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmSrcTy = llvmSrcVal.getType().cast(); + auto llvmSrcTy = mlir::cast(llvmSrcVal.getType()); auto llvmDstTy = - getTypeConverter()->convertType(dstTy).cast(); + mlir::cast(getTypeConverter()->convertType(dstTy)); if (llvmSrcTy.getWidth() == llvmDstTy.getWidth()) rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); @@ -710,8 +712,8 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto dstTy = castOp.getType(); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); - if (elementTypeIfVector(castOp.getSrc().getType()) - .cast() + if (mlir::cast( + elementTypeIfVector(castOp.getSrc().getType())) .isSigned()) rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); @@ -724,8 +726,8 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto dstTy = castOp.getType(); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); - if (elementTypeIfVector(castOp.getResult().getType()) - .cast() + if (mlir::cast( + elementTypeIfVector(castOp.getResult().getType())) .isSigned()) rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); @@ -934,7 +936,7 @@ mlir::DenseElementsAttr convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, mlir::Type type) { auto values = llvm::SmallVector{}; - auto stringAttr = attr.getElts().dyn_cast(); + auto stringAttr = mlir::dyn_cast(attr.getElts()); assert(stringAttr && "expected string attribute here"); for (auto element : stringAttr) values.push_back({8, (uint64_t)element}); @@ -946,17 +948,17 @@ convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, template StorageTy getZeroInitFromType(mlir::Type Ty); template <> mlir::APInt getZeroInitFromType(mlir::Type Ty) { - assert(Ty.isa() && "expected int type"); - auto IntTy = Ty.cast(); + assert(mlir::isa(Ty) && "expected int type"); + auto IntTy = mlir::cast(Ty); return mlir::APInt::getZero(IntTy.getWidth()); } template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty) { - assert((Ty.isa()) && + assert((mlir::isa(Ty)) && "only float and double supported"); - if (Ty.isF32() || Ty.isa()) + if (Ty.isF32() || mlir::isa(Ty)) return mlir::APFloat(0.f); - if (Ty.isF64() || Ty.isa()) + if (Ty.isF64() || mlir::isa(Ty)) return mlir::APFloat(0.0); llvm_unreachable("NYI"); } @@ -965,11 +967,11 @@ template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty) { // e.g: for !cir.array x 1> // it returns !s32i as return value and stores 3 to elemQuantity. mlir::Type getNestedTypeAndElemQuantity(mlir::Type Ty, unsigned &elemQuantity) { - assert(Ty.isa() && "expected ArrayType"); + assert(mlir::isa(Ty) && "expected ArrayType"); elemQuantity = 1; mlir::Type nestTy = Ty; - while (auto ArrTy = nestTy.dyn_cast()) { + while (auto ArrTy = mlir::dyn_cast(nestTy)) { nestTy = ArrTy.getEltType(); elemQuantity *= ArrTy.getSize(); } @@ -980,14 +982,14 @@ mlir::Type getNestedTypeAndElemQuantity(mlir::Type Ty, unsigned &elemQuantity) { template void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, llvm::SmallVectorImpl &values) { - auto arrayAttr = attr.getElts().cast(); + auto arrayAttr = mlir::cast(attr.getElts()); for (auto eltAttr : arrayAttr) { - if (auto valueAttr = eltAttr.dyn_cast()) { + if (auto valueAttr = mlir::dyn_cast(eltAttr)) { values.push_back(valueAttr.getValue()); } else if (auto subArrayAttr = - eltAttr.dyn_cast()) { + mlir::dyn_cast(eltAttr)) { convertToDenseElementsAttrImpl(subArrayAttr, values); - } else if (auto zeroAttr = eltAttr.dyn_cast()) { + } else if (auto zeroAttr = mlir::dyn_cast(eltAttr)) { unsigned numStoredZeros = 0; auto nestTy = getNestedTypeAndElemQuantity(zeroAttr.getType(), numStoredZeros); @@ -1002,11 +1004,11 @@ void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, // type isn't another array (for the mult-dim case). auto numTrailingZeros = attr.getTrailingZerosNum(); if (numTrailingZeros) { - auto localArrayTy = attr.getType().dyn_cast(); + auto localArrayTy = mlir::dyn_cast(attr.getType()); assert(localArrayTy && "expected !cir.array"); auto nestTy = localArrayTy.getEltType(); - if (!nestTy.isa()) + if (!mlir::isa(nestTy)) values.insert(values.end(), localArrayTy.getSize() - numTrailingZeros, getZeroInitFromType(nestTy)); } @@ -1028,29 +1030,30 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, const mlir::TypeConverter *converter) { // Ensure ConstArrayAttr has a type. - auto typedConstArr = constArr.dyn_cast(); + auto typedConstArr = mlir::dyn_cast(constArr); assert(typedConstArr && "cir::ConstArrayAttr is not a mlir::TypedAttr"); // Ensure ConstArrayAttr type is a ArrayType. - auto cirArrayType = typedConstArr.getType().dyn_cast(); + auto cirArrayType = + mlir::dyn_cast(typedConstArr.getType()); assert(cirArrayType && "cir::ConstArrayAttr is not a cir::ArrayType"); // Is a ConstArrayAttr with an cir::ArrayType: fetch element type. mlir::Type type = cirArrayType; auto dims = llvm::SmallVector{}; - while (auto arrayType = type.dyn_cast()) { + while (auto arrayType = mlir::dyn_cast(type)) { dims.push_back(arrayType.getSize()); type = arrayType.getEltType(); } // Convert array attr to LLVM compatible dense elements attr. - if (constArr.getElts().isa()) + if (mlir::isa(constArr.getElts())) return convertStringAttrToDenseElementsAttr(constArr, converter->convertType(type)); - if (type.isa()) + if (mlir::isa(type)) return convertToDenseElementsAttr( constArr, dims, converter->convertType(type)); - if (type.isa()) + if (mlir::isa(type)) return convertToDenseElementsAttr( constArr, dims, converter->convertType(type)); @@ -1058,7 +1061,7 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, } bool hasTrailingZeros(mlir::cir::ConstArrayAttr attr) { - auto array = attr.getElts().dyn_cast(); + auto array = mlir::dyn_cast(attr.getElts()); return attr.hasTrailingZeros() || (array && std::count_if(array.begin(), array.end(), [](auto elt) { auto ar = dyn_cast(elt); @@ -1098,49 +1101,49 @@ class CIRConstantLowering mlir::ConversionPatternRewriter &rewriter) const override { mlir::Attribute attr = op.getValue(); - if (op.getType().isa()) { + if (mlir::isa(op.getType())) { int value = (op.getValue() == mlir::cir::BoolAttr::get( getContext(), ::mlir::cir::BoolType::get(getContext()), true)); attr = rewriter.getIntegerAttr(typeConverter->convertType(op.getType()), value); - } else if (op.getType().isa()) { + } else if (mlir::isa(op.getType())) { attr = rewriter.getIntegerAttr( typeConverter->convertType(op.getType()), - op.getValue().cast().getValue()); - } else if (op.getType().isa()) { + mlir::cast(op.getValue()).getValue()); + } else if (mlir::isa(op.getType())) { attr = rewriter.getFloatAttr( typeConverter->convertType(op.getType()), - op.getValue().cast().getValue()); - } else if (op.getType().isa()) { + mlir::cast(op.getValue()).getValue()); + } else if (mlir::isa(op.getType())) { // Optimize with dedicated LLVM op for null pointers. - if (op.getValue().isa()) { - if (op.getValue().cast().isNullValue()) { + if (mlir::isa(op.getValue())) { + if (mlir::cast(op.getValue()).isNullValue()) { rewriter.replaceOpWithNewOp( op, typeConverter->convertType(op.getType())); return mlir::success(); } } // Lower GlobalViewAttr to llvm.mlir.addressof - if (auto gv = op.getValue().dyn_cast()) { + if (auto gv = mlir::dyn_cast(op.getValue())) { auto newOp = lowerCirAttrAsValue(op, gv, rewriter, getTypeConverter()); rewriter.replaceOp(op, newOp); return mlir::success(); } attr = op.getValue(); - } else if (op.getType().isa()) { - auto dataMember = op.getValue().cast(); + } else if (mlir::isa(op.getType())) { + auto dataMember = mlir::cast(op.getValue()); attr = lowerDataMemberAttr(op->getParentOfType(), dataMember, *typeConverter); } // TODO(cir): constant arrays are currently just pushed into the stack using // the store instruction, instead of being stored as global variables and // then memcopyied into the stack (as done in Clang). - else if (auto arrTy = op.getType().dyn_cast()) { + else if (auto arrTy = mlir::dyn_cast(op.getType())) { // Fetch operation constant array initializer. - auto constArr = op.getValue().dyn_cast(); + auto constArr = mlir::dyn_cast(op.getValue()); if (!constArr && !isa(op.getValue())) return op.emitError() << "array does not have a constant initializer"; @@ -1161,7 +1164,7 @@ class CIRConstantLowering return mlir::success(); } } else if (const auto structAttr = - op.getValue().dyn_cast()) { + mlir::dyn_cast(op.getValue())) { // TODO(cir): this diverges from traditional lowering. Normally the // initializer would be a global constant that is memcopied. Here we just // define a local constant with llvm.undef that will be stored into the @@ -1171,8 +1174,9 @@ class CIRConstantLowering rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); return mlir::success(); - } else if (auto strTy = op.getType().dyn_cast()) { - if (auto zero = op.getValue().dyn_cast()) { + } else if (auto strTy = + mlir::dyn_cast(op.getType())) { + if (auto zero = mlir::dyn_cast(op.getValue())) { auto initVal = lowerCirAttrAsValue(op, zero, rewriter, typeConverter); rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); @@ -1201,7 +1205,7 @@ class CIRVectorCreateLowering mlir::ConversionPatternRewriter &rewriter) const override { // Start with an 'undef' value for the vector. Then 'insertelement' for // each of the vector elements. - auto vecTy = op.getType().dyn_cast(); + auto vecTy = mlir::dyn_cast(op.getType()); assert(vecTy && "result type of cir.vec.create op is not VectorType"); auto llvmTy = typeConverter->convertType(vecTy); auto loc = op.getLoc(); @@ -1255,20 +1259,20 @@ class CIRVectorCmpOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::VecCmpOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - assert(op.getType().isa() && - op.getLhs().getType().isa() && - op.getRhs().getType().isa() && + assert(mlir::isa(op.getType()) && + mlir::isa(op.getLhs().getType()) && + mlir::isa(op.getRhs().getType()) && "Vector compare with non-vector type"); // LLVM IR vector comparison returns a vector of i1. This one-bit vector // must be sign-extended to the correct result type. auto elementType = elementTypeIfVector(op.getLhs().getType()); mlir::Value bitResult; - if (auto intType = elementType.dyn_cast()) { + if (auto intType = mlir::dyn_cast(elementType)) { bitResult = rewriter.create( op.getLoc(), convertCmpKindToICmpPredicate(op.getKind(), intType.isSigned()), adaptor.getLhs(), adaptor.getRhs()); - } else if (elementType.isa()) { + } else if (mlir::isa(elementType)) { bitResult = rewriter.create( op.getLoc(), convertCmpKindToFCmpPredicate(op.getKind()), adaptor.getLhs(), adaptor.getRhs()); @@ -1294,7 +1298,7 @@ class CIRVectorSplatLowering // element in the vector. Start with an undef vector. Insert the value into // the first element. Then use a `shufflevector` with a mask of all 0 to // fill out the entire vector with that value. - auto vecTy = op.getType().dyn_cast(); + auto vecTy = mlir::dyn_cast(op.getType()); assert(vecTy && "result type of cir.vec.splat op is not VectorType"); auto llvmTy = typeConverter->convertType(vecTy); auto loc = op.getLoc(); @@ -1320,10 +1324,10 @@ class CIRVectorTernaryLowering mlir::LogicalResult matchAndRewrite(mlir::cir::VecTernaryOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - assert(op.getType().isa() && - op.getCond().getType().isa() && - op.getVec1().getType().isa() && - op.getVec2().getType().isa() && + assert(mlir::isa(op.getType()) && + mlir::isa(op.getCond().getType()) && + mlir::isa(op.getVec1().getType()) && + mlir::isa(op.getVec2().getType()) && "Vector ternary op with non-vector type"); // Convert `cond` into a vector of i1, then use that in a `select` op. mlir::Value bitVec = rewriter.create( @@ -1349,11 +1353,12 @@ class CIRVectorShuffleIntsLowering // Convert the ClangIR ArrayAttr of IntAttr constants into a // SmallVector. SmallVector indices; - std::transform( - op.getIndices().begin(), op.getIndices().end(), - std::back_inserter(indices), [](mlir::Attribute intAttr) { - return intAttr.cast().getValue().getSExtValue(); - }); + std::transform(op.getIndices().begin(), op.getIndices().end(), + std::back_inserter(indices), [](mlir::Attribute intAttr) { + return mlir::cast(intAttr) + .getValue() + .getSExtValue(); + }); rewriter.replaceOpWithNewOp( op, adaptor.getVec1(), adaptor.getVec2(), indices); return mlir::success(); @@ -1384,7 +1389,7 @@ class CIRVectorShuffleVecLowering mlir::Type llvmIndexType = getTypeConverter()->convertType( elementTypeIfVector(op.getIndices().getType())); uint64_t numElements = - op.getVec().getType().cast().getSize(); + mlir::cast(op.getVec().getType()).getSize(); mlir::Value maskValue = rewriter.create( loc, llvmIndexType, mlir::IntegerAttr::get(llvmIndexType, numElements - 1)); @@ -1535,11 +1540,12 @@ class CIRFuncLowering : public mlir::OpConversionPattern { // LLVMFuncOp expects a single FileLine Location instead of a fused // location. auto Loc = op.getLoc(); - if (Loc.isa()) { - auto FusedLoc = Loc.cast(); + if (mlir::isa(Loc)) { + auto FusedLoc = mlir::cast(Loc); Loc = FusedLoc.getLocations()[0]; } - assert((Loc.isa() || Loc.isa()) && + assert((mlir::isa(Loc) || + mlir::isa(Loc)) && "expected single location or unknown location here"); auto linkage = convertLinkage(op.getLinkage()); @@ -1679,10 +1685,12 @@ class CIRGlobalOpLowering } // Initializer is a constant array: convert it to a compatible llvm init. - if (auto constArr = init.value().dyn_cast()) { - if (auto attr = constArr.getElts().dyn_cast()) { + if (auto constArr = + mlir::dyn_cast(init.value())) { + if (auto attr = mlir::dyn_cast(constArr.getElts())) { init = rewriter.getStringAttr(attr.getValue()); - } else if (auto attr = constArr.getElts().dyn_cast()) { + } else if (auto attr = + mlir::dyn_cast(constArr.getElts())) { // Failed to use a compact attribute as an initializer: // initialize elements individually. if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { @@ -1698,15 +1706,16 @@ class CIRGlobalOpLowering << constArr.getElts(); return mlir::failure(); } - } else if (auto fltAttr = init.value().dyn_cast()) { + } else if (auto fltAttr = mlir::dyn_cast(init.value())) { // Initializer is a constant floating-point number: convert to MLIR // builtin constant. init = rewriter.getFloatAttr(llvmType, fltAttr.getValue()); } // Initializer is a constant integer: convert to MLIR builtin constant. - else if (auto intAttr = init.value().dyn_cast()) { + else if (auto intAttr = mlir::dyn_cast(init.value())) { init = rewriter.getIntegerAttr(llvmType, intAttr.getValue()); - } else if (auto boolAttr = init.value().dyn_cast()) { + } else if (auto boolAttr = + mlir::dyn_cast(init.value())) { init = rewriter.getBoolAttr(boolAttr.getValue()); } else if (isa( init.value())) { @@ -1719,30 +1728,31 @@ class CIRGlobalOpLowering rewriter.create(loc, value); return mlir::success(); } else if (auto dataMemberAttr = - init.value().dyn_cast()) { + mlir::dyn_cast(init.value())) { init = lowerDataMemberAttr(op->getParentOfType(), dataMemberAttr, *typeConverter); } else if (const auto structAttr = - init.value().dyn_cast()) { + mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( op->getLoc(), lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter)); return mlir::success(); - } else if (auto attr = init.value().dyn_cast()) { + } else if (auto attr = + mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( loc, lowerCirAttrAsValue(op, attr, rewriter, typeConverter)); return mlir::success(); } else if (const auto vtableAttr = - init.value().dyn_cast()) { + mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( op->getLoc(), lowerCirAttrAsValue(op, vtableAttr, rewriter, typeConverter)); return mlir::success(); } else if (const auto typeinfoAttr = - init.value().dyn_cast()) { + mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( op->getLoc(), @@ -1775,12 +1785,12 @@ class CIRUnaryOpLowering "Unary operation's operand type and result type are different"); mlir::Type type = op.getType(); mlir::Type elementType = elementTypeIfVector(type); - bool IsVector = type.isa(); + bool IsVector = mlir::isa(type); auto llvmType = getTypeConverter()->convertType(type); auto loc = op.getLoc(); // Integer unary operations: + - ~ ++ -- - if (elementType.isa()) { + if (mlir::isa(elementType)) { switch (op.getKind()) { case mlir::cir::UnaryOpKind::Inc: { assert(!IsVector && "++ not allowed on vector types"); @@ -1825,7 +1835,8 @@ class CIRUnaryOpLowering loc, llvmElementType, mlir::IntegerAttr::get(llvmElementType, -1)); MinusOne = rewriter.create(loc, llvmType); - auto NumElements = type.dyn_cast().getSize(); + auto NumElements = + mlir::dyn_cast(type).getSize(); for (uint64_t i = 0; i < NumElements; ++i) { mlir::Value indexValue = rewriter.create( loc, rewriter.getI64Type(), i); @@ -1844,7 +1855,7 @@ class CIRUnaryOpLowering } // Floating point unary operations: + - ++ -- - if (elementType.isa()) { + if (mlir::isa(elementType)) { switch (op.getKind()) { case mlir::cir::UnaryOpKind::Inc: { assert(!IsVector && "++ not allowed on vector types"); @@ -1880,7 +1891,7 @@ class CIRUnaryOpLowering // Boolean unary operations: ! only. (For all others, the operand has // already been promoted to int.) - if (elementType.isa()) { + if (mlir::isa(elementType)) { switch (op.getKind()) { case mlir::cir::UnaryOpKind::Not: assert(!IsVector && "NYI: op! on vector mask"); @@ -1897,7 +1908,7 @@ class CIRUnaryOpLowering // Pointer unary operations: + only. (++ and -- of pointers are implemented // with cir.ptr_stride, not cir.unary.) - if (elementType.isa()) { + if (mlir::isa(elementType)) { switch (op.getKind()) { case mlir::cir::UnaryOpKind::Plus: rewriter.replaceOp(op, adaptor.getInput()); @@ -1935,8 +1946,8 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { assert((op.getLhs().getType() == op.getRhs().getType()) && "inconsistent operands' types not supported yet"); mlir::Type type = op.getRhs().getType(); - assert((type.isa()) && + assert((mlir::isa(type)) && "operand type not supported yet"); auto llvmTy = getTypeConverter()->convertType(op.getType()); @@ -1947,28 +1958,28 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { switch (op.getKind()) { case mlir::cir::BinOpKind::Add: - if (type.isa()) + if (mlir::isa(type)) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Sub: - if (type.isa()) + if (mlir::isa(type)) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Mul: - if (type.isa()) + if (mlir::isa(type)) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Div: - if (auto ty = type.dyn_cast()) { + if (auto ty = mlir::dyn_cast(type)) { if (ty.isUnsigned()) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else @@ -1977,7 +1988,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case mlir::cir::BinOpKind::Rem: - if (auto ty = type.dyn_cast()) { + if (auto ty = mlir::dyn_cast(type)) { if (ty.isUnsigned()) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else @@ -2135,8 +2146,9 @@ class CIRShiftOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::ShiftOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto cirAmtTy = op.getAmount().getType().dyn_cast(); - auto cirValTy = op.getValue().getType().dyn_cast(); + auto cirAmtTy = + mlir::dyn_cast(op.getAmount().getType()); + auto cirValTy = mlir::dyn_cast(op.getValue().getType()); auto llvmTy = getTypeConverter()->convertType(op.getType()); mlir::Value amt = adaptor.getAmount(); mlir::Value val = adaptor.getValue(); @@ -2146,7 +2158,7 @@ class CIRShiftOpLowering // Ensure shift amount is the same type as the value. Some undefined // behavior might occur in the casts below as per [C99 6.5.7.3]. - amt = getLLVMIntCast(rewriter, amt, llvmTy.cast(), + amt = getLLVMIntCast(rewriter, amt, mlir::cast(llvmTy), !cirAmtTy.isSigned(), cirValTy.getWidth()); // Lower to the proper LLVM shift operation. @@ -2174,17 +2186,17 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::Value llResult; // Lower to LLVM comparison op. - if (auto intTy = type.dyn_cast()) { + if (auto intTy = mlir::dyn_cast(type)) { auto kind = convertCmpKindToICmpPredicate(cmpOp.getKind(), intTy.isSigned()); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else if (auto ptrTy = type.dyn_cast()) { + } else if (auto ptrTy = mlir::dyn_cast(type)) { auto kind = convertCmpKindToICmpPredicate(cmpOp.getKind(), /* isSigned=*/false); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else if (type.isa()) { + } else if (mlir::isa(type)) { auto kind = convertCmpKindToFCmpPredicate(cmpOp.getKind()); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); @@ -2228,8 +2240,8 @@ static mlir::Value createLLVMBitOp(mlir::Location loc, mlir::Type resultTy, mlir::Value operand, std::optional poisonZeroInputFlag, mlir::ConversionPatternRewriter &rewriter) { - auto operandIntTy = operand.getType().cast(); - auto resultIntTy = resultTy.cast(); + auto operandIntTy = mlir::cast(operand.getType()); + auto resultIntTy = mlir::cast(resultTy); std::string llvmIntrinName = llvmIntrinBaseName.concat(".i") @@ -2251,7 +2263,7 @@ static mlir::Value createLLVMBitOp(mlir::Location loc, } return getLLVMIntCast(rewriter, op->getResult(0), - resultTy.cast(), + mlir::cast(resultTy), /*isUnsigned=*/true, resultIntTy.getWidth()); } @@ -2592,12 +2604,12 @@ class CIRAtomicFetchLowering mlir::ConversionPatternRewriter &rewriter) const override { bool isInt, isSignedInt = false; // otherwise it's float. - if (auto intTy = op.getVal().getType().dyn_cast()) { + if (auto intTy = + mlir::dyn_cast(op.getVal().getType())) { isInt = true; isSignedInt = intTy.isSigned(); - } else if (op.getVal() - .getType() - .isa()) + } else if (mlir::isa( + op.getVal().getType())) isInt = false; else { return op.emitError() @@ -2644,8 +2656,8 @@ class CIRByteswapOpLowering // Note that LLVM intrinsic calls to @llvm.bswap.i* have the same type as // the operand. - auto resTy = - getTypeConverter()->convertType(op.getType()).cast(); + auto resTy = mlir::cast( + getTypeConverter()->convertType(op.getType())); std::string llvmIntrinName = "llvm.bswap.i"; llvmIntrinName.append(std::to_string(resTy.getWidth())); @@ -2700,7 +2712,7 @@ class CIRGetMemberOpLowering mlir::ConversionPatternRewriter &rewriter) const override { auto llResTy = getTypeConverter()->convertType(op.getType()); const auto structTy = - op.getAddrTy().getPointee().cast(); + mlir::cast(op.getAddrTy().getPointee()); assert(structTy && "expected struct type"); switch (structTy.getKind()) { @@ -2758,7 +2770,7 @@ class CIRPtrDiffOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::PtrDiffOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto dstTy = op.getType().cast(); + auto dstTy = mlir::cast(op.getType()); auto llvmDstTy = getTypeConverter()->convertType(dstTy); auto lhs = rewriter.create(op.getLoc(), llvmDstTy, @@ -2769,7 +2781,7 @@ class CIRPtrDiffOpLowering auto diff = rewriter.create(op.getLoc(), llvmDstTy, lhs, rhs); - auto ptrTy = op.getLhs().getType().cast(); + auto ptrTy = mlir::cast(op.getLhs().getType()); auto typeSize = getTypeSize(ptrTy.getPointee(), *op); // Avoid silly division by 1. @@ -3029,9 +3041,9 @@ class CIRSetBitfieldLowering unsigned storageSize = 0; - if (auto arTy = storageType.dyn_cast()) + if (auto arTy = mlir::dyn_cast(storageType)) storageSize = arTy.getSize() * 8; - else if (auto intTy = storageType.dyn_cast()) + else if (auto intTy = mlir::dyn_cast(storageType)) storageSize = intTy.getWidth(); else llvm_unreachable( @@ -3068,8 +3080,8 @@ class CIRSetBitfieldLowering auto resultTy = getTypeConverter()->convertType(op.getType()); - resultVal = - createIntCast(rewriter, resultVal, resultTy.cast()); + resultVal = createIntCast(rewriter, resultVal, + mlir::cast(resultTy)); if (info.getIsSigned()) { assert(size <= storageSize); @@ -3105,9 +3117,9 @@ class CIRGetBitfieldLowering auto context = storageType.getContext(); unsigned storageSize = 0; - if (auto arTy = storageType.dyn_cast()) + if (auto arTy = mlir::dyn_cast(storageType)) storageSize = arTy.getSize() * 8; - else if (auto intTy = storageType.dyn_cast()) + else if (auto intTy = mlir::dyn_cast(storageType)) storageSize = intTy.getWidth(); else llvm_unreachable( @@ -3133,8 +3145,9 @@ class CIRGetBitfieldLowering } auto resTy = getTypeConverter()->convertType(op.getType()); - auto newOp = createIntCast(rewriter, val, resTy.cast(), - info.getIsSigned()); + auto newOp = + createIntCast(rewriter, val, mlir::cast(resTy), + info.getIsSigned()); rewriter.replaceOp(op, newOp); return mlir::success(); } @@ -3177,7 +3190,7 @@ class CIRCmpThreeWayOpLowering assert(cmpInfo.getLt() == -1 && cmpInfo.getEq() == 0 && cmpInfo.getGt() == 1); - auto operandTy = op.getLhs().getType().cast(); + auto operandTy = mlir::cast(op.getLhs().getType()); auto resultTy = op.getType(); auto llvmIntrinsicName = getLLVMIntrinsicName( operandTy.isSigned(), operandTy.getWidth(), resultTy.getWidth()); @@ -3413,7 +3426,7 @@ static void buildCtorDtorList( llvm::SmallVector, 2> globalXtors; for (auto namedAttr : module->getAttrs()) { if (namedAttr.getName() == globalXtorName) { - for (auto attr : namedAttr.getValue().cast()) + for (auto attr : mlir::cast(namedAttr.getValue())) globalXtors.emplace_back(createXtor(attr)); break; } @@ -3578,23 +3591,23 @@ void ConvertCIRToLLVMPass::runOnOperation() { signalPassFailure(); // Emit the llvm.global_ctors array. - buildCtorDtorList(module, "cir.global_ctors", "llvm.global_ctors", - [](mlir::Attribute attr) { - assert(attr.isa() && - "must be a GlobalCtorAttr"); - auto ctorAttr = attr.cast(); - return std::make_pair(ctorAttr.getName(), - ctorAttr.getPriority()); - }); + buildCtorDtorList( + module, "cir.global_ctors", "llvm.global_ctors", + [](mlir::Attribute attr) { + assert(mlir::isa(attr) && + "must be a GlobalCtorAttr"); + auto ctorAttr = mlir::cast(attr); + return std::make_pair(ctorAttr.getName(), ctorAttr.getPriority()); + }); // Emit the llvm.global_dtors array. - buildCtorDtorList(module, "cir.global_dtors", "llvm.global_dtors", - [](mlir::Attribute attr) { - assert(attr.isa() && - "must be a GlobalDtorAttr"); - auto dtorAttr = attr.cast(); - return std::make_pair(dtorAttr.getName(), - dtorAttr.getPriority()); - }); + buildCtorDtorList( + module, "cir.global_dtors", "llvm.global_dtors", + [](mlir::Attribute attr) { + assert(mlir::isa(attr) && + "must be a GlobalDtorAttr"); + auto dtorAttr = mlir::cast(attr); + return std::make_pair(dtorAttr.getName(), dtorAttr.getPriority()); + }); } std::unique_ptr createConvertCIRToLLVMPass() { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index 1da4d7665f92..ea1c04d76fc9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -44,11 +44,11 @@ class CIRDialectLLVMIRTranslationInterface if (!func) return mlir::success(); llvm::Function *llvmFunc = moduleTranslation.lookupFunction(func.getName()); - if (auto extraAttr = attribute.getValue() - .dyn_cast()) { + if (auto extraAttr = mlir::dyn_cast( + attribute.getValue())) { for (auto attr : extraAttr.getElements()) { if (auto inlineAttr = - attr.getValue().dyn_cast()) { + mlir::dyn_cast(attr.getValue())) { if (inlineAttr.isNoInline()) llvmFunc->addFnAttr(llvm::Attribute::NoInline); else if (inlineAttr.isAlwaysInline()) @@ -57,9 +57,9 @@ class CIRDialectLLVMIRTranslationInterface llvmFunc->addFnAttr(llvm::Attribute::InlineHint); else llvm_unreachable("Unknown inline kind"); - } else if (attr.getValue().dyn_cast()) { + } else if (mlir::dyn_cast(attr.getValue())) { llvmFunc->addFnAttr(llvm::Attribute::OptimizeNone); - } else if (attr.getValue().dyn_cast()) { + } else if (mlir::dyn_cast(attr.getValue())) { llvmFunc->addFnAttr(llvm::Attribute::NoUnwind); } } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h b/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h index c9ee75a06352..b42f7c263b0f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h @@ -20,8 +20,8 @@ mlir::Value createIntCast(mlir::OpBuilder &bld, mlir::Value src, auto srcTy = src.getType(); assert(isa(srcTy)); - auto srcWidth = srcTy.cast().getWidth(); - auto dstWidth = dstTy.cast().getWidth(); + auto srcWidth = mlir::cast(srcTy).getWidth(); + auto dstWidth = mlir::cast(dstTy).getWidth(); auto loc = src.getLoc(); if (dstWidth > srcWidth && isSigned) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp index 41311c1408e4..f308076ef62c 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp @@ -71,7 +71,7 @@ class SCFWhileLoop { static int64_t getConstant(mlir::cir::ConstantOp op) { auto attr = op->getAttrs().front().getValue(); - const auto IntAttr = attr.dyn_cast(); + const auto IntAttr = mlir::dyn_cast(attr); return IntAttr.getValue().getSExtValue(); } @@ -143,7 +143,7 @@ mlir::cir::CmpOp SCFLoop::findCmpOp() { llvm_unreachable("Can't find loop CmpOp"); auto type = cmpOp.getLhs().getType(); - if (!type.isa()) + if (!mlir::isa(type)) llvm_unreachable("Non-integer type IV is not supported"); auto lhsDefOp = cmpOp.getLhs().getDefiningOp(); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index e823b428ab11..083bf0d69a12 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -337,8 +337,9 @@ class CIRShiftOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::ShiftOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto cirAmtTy = op.getAmount().getType().dyn_cast(); - auto cirValTy = op.getValue().getType().dyn_cast(); + auto cirAmtTy = + mlir::dyn_cast(op.getAmount().getType()); + auto cirValTy = mlir::dyn_cast(op.getValue().getType()); auto mlirTy = getTypeConverter()->convertType(op.getType()); mlir::Value amt = adaptor.getAmount(); mlir::Value val = adaptor.getValue(); @@ -397,9 +398,8 @@ class CIRBitOpLowering : public mlir::OpConversionPattern { matchAndRewrite(CIROp op, typename mlir::OpConversionPattern::OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto resultIntTy = this->getTypeConverter() - ->convertType(op.getType()) - .template cast(); + auto resultIntTy = mlir::cast( + this->getTypeConverter()->convertType(op.getType())); auto res = rewriter.create(op->getLoc(), adaptor.getInput()); auto newOp = createIntCast(rewriter, res->getResult(0), resultIntTy, /*isSigned=*/false); @@ -438,8 +438,8 @@ class CIRBitClrsbOpLowering auto select = rewriter.create( op.getLoc(), isNeg, flipped, adaptor.getInput()); - auto resTy = - getTypeConverter()->convertType(op.getType()).cast(); + auto resTy = mlir::cast( + getTypeConverter()->convertType(op.getType())); auto clz = rewriter.create(op->getLoc(), select); auto newClz = createIntCast(rewriter, clz, resTy); @@ -519,9 +519,9 @@ class CIRConstantOpLowering if (mlir::isa(op.getType())) { auto boolValue = mlir::cast(op.getValue()); value = rewriter.getIntegerAttr(ty, boolValue.getValue()); - } else if (op.getType().isa()) { + } else if (mlir::isa(op.getType())) { value = rewriter.getFloatAttr( - ty, op.getValue().cast().getValue()); + ty, mlir::cast(op.getValue()).getValue()); } else { auto cirIntAttr = mlir::dyn_cast(op.getValue()); assert(cirIntAttr && "NYI non cir.int attr"); @@ -627,19 +627,19 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { assert((adaptor.getLhs().getType() == adaptor.getRhs().getType()) && "inconsistent operands' types not supported yet"); mlir::Type mlirType = getTypeConverter()->convertType(op.getType()); - assert((mlirType.isa() || - mlirType.isa() || - mlirType.isa()) && + assert((mlir::isa(mlirType) || + mlir::isa(mlirType) || + mlir::isa(mlirType)) && "operand type not supported yet"); auto type = op.getLhs().getType(); - if (auto VecType = type.dyn_cast()) { + if (auto VecType = mlir::dyn_cast(type)) { type = VecType.getEltType(); } switch (op.getKind()) { case mlir::cir::BinOpKind::Add: - if (type.isa()) + if (mlir::isa(type)) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else @@ -647,7 +647,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Sub: - if (type.isa()) + if (mlir::isa(type)) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else @@ -655,7 +655,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Mul: - if (type.isa()) + if (mlir::isa(type)) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else @@ -663,7 +663,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Div: - if (auto ty = type.dyn_cast()) { + if (auto ty = mlir::dyn_cast(type)) { if (ty.isUnsigned()) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); @@ -675,7 +675,7 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; case mlir::cir::BinOpKind::Rem: - if (auto ty = type.dyn_cast()) { + if (auto ty = mlir::dyn_cast(type)) { if (ty.isUnsigned()) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); @@ -715,15 +715,15 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::Value mlirResult; - if (auto ty = type.dyn_cast()) { + if (auto ty = mlir::dyn_cast(type)) { auto kind = convertCmpKindToCmpIPredicate(op.getKind(), ty.isSigned()); mlirResult = rewriter.create( op.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else if (auto ty = type.dyn_cast()) { + } else if (auto ty = mlir::dyn_cast(type)) { auto kind = convertCmpKindToCmpFPredicate(op.getKind()); mlirResult = rewriter.create( op.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else if (auto ty = type.dyn_cast()) { + } else if (auto ty = mlir::dyn_cast(type)) { llvm_unreachable("pointer comparison not supported yet"); } else { return op.emitError() << "unsupported type for CmpOp: " << type; @@ -911,7 +911,7 @@ class CIRGlobalOpLowering mlir::Attribute initialValue = mlir::Attribute(); std::optional init = op.getInitialValue(); if (init.has_value()) { - if (auto constArr = init.value().dyn_cast()) { + if (auto constArr = mlir::dyn_cast(init.value())) { if (memrefType.getShape().size()) { auto rtt = mlir::RankedTensorType::get(memrefType.getShape(), memrefType.getElementType()); @@ -920,13 +920,16 @@ class CIRGlobalOpLowering auto rtt = mlir::RankedTensorType::get({}, convertedType); initialValue = mlir::DenseIntElementsAttr::get(rtt, 0); } - } else if (auto intAttr = init.value().dyn_cast()) { + } else if (auto intAttr = + mlir::dyn_cast(init.value())) { auto rtt = mlir::RankedTensorType::get({}, convertedType); initialValue = mlir::DenseIntElementsAttr::get(rtt, intAttr.getValue()); - } else if (auto fltAttr = init.value().dyn_cast()) { + } else if (auto fltAttr = + mlir::dyn_cast(init.value())) { auto rtt = mlir::RankedTensorType::get({}, convertedType); initialValue = mlir::DenseFPElementsAttr::get(rtt, fltAttr.getValue()); - } else if (auto boolAttr = init.value().dyn_cast()) { + } else if (auto boolAttr = + mlir::dyn_cast(init.value())) { auto rtt = mlir::RankedTensorType::get({}, convertedType); initialValue = mlir::DenseIntElementsAttr::get(rtt, (char)boolAttr.getValue()); @@ -979,7 +982,7 @@ class CIRVectorCreateLowering mlir::LogicalResult matchAndRewrite(mlir::cir::VecCreateOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto vecTy = op.getType().dyn_cast(); + auto vecTy = mlir::dyn_cast(op.getType()); assert(vecTy && "result type of cir.vec.create op is not VectorType"); auto elementTy = typeConverter->convertType(vecTy.getEltType()); auto loc = op.getLoc(); @@ -1037,19 +1040,19 @@ class CIRVectorCmpOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::VecCmpOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - assert(op.getType().isa() && - op.getLhs().getType().isa() && - op.getRhs().getType().isa() && + assert(mlir::isa(op.getType()) && + mlir::isa(op.getLhs().getType()) && + mlir::isa(op.getRhs().getType()) && "Vector compare with non-vector type"); auto elementType = - op.getLhs().getType().cast().getEltType(); + mlir::cast(op.getLhs().getType()).getEltType(); mlir::Value bitResult; - if (auto intType = elementType.dyn_cast()) { + if (auto intType = mlir::dyn_cast(elementType)) { bitResult = rewriter.create( op.getLoc(), convertCmpKindToCmpIPredicate(op.getKind(), intType.isSigned()), adaptor.getLhs(), adaptor.getRhs()); - } else if (elementType.isa()) { + } else if (mlir::isa(elementType)) { bitResult = rewriter.create( op.getLoc(), convertCmpKindToCmpFPredicate(op.getKind()), adaptor.getLhs(), adaptor.getRhs()); @@ -1080,7 +1083,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { using CIR = mlir::cir::CastKind; switch (op.getKind()) { case CIR::array_to_ptrdecay: { - auto newDstType = convertTy(dstType).cast(); + auto newDstType = mlir::cast(convertTy(dstType)); rewriter.replaceOpWithNewOp( op, newDstType, src, 0, std::nullopt, std::nullopt); return mlir::success(); @@ -1097,7 +1100,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { case CIR::integral: { auto newDstType = convertTy(dstType); auto srcType = op.getSrc().getType(); - mlir::cir::IntType srcIntType = srcType.cast(); + mlir::cir::IntType srcIntType = mlir::cast(srcType); auto newOp = createIntCast(rewriter, src, newDstType, srcIntType.isSigned()); rewriter.replaceOp(op, newOp); @@ -1108,12 +1111,12 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto srcTy = op.getSrc().getType(); auto dstTy = op.getResult().getType(); - if (!dstTy.isa() || - !srcTy.isa()) + if (!mlir::isa(dstTy) || + !mlir::isa(srcTy)) return op.emitError() << "NYI cast from " << srcTy << " to " << dstTy; auto getFloatWidth = [](mlir::Type ty) -> unsigned { - return ty.cast().getWidth(); + return mlir::cast(ty).getWidth(); }; if (getFloatWidth(srcTy) > getFloatWidth(dstTy)) @@ -1123,7 +1126,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return mlir::success(); } case CIR::float_to_bool: { - auto dstTy = op.getType().cast(); + auto dstTy = mlir::cast(op.getType()); auto newDstType = convertTy(dstTy); auto kind = mlir::arith::CmpFPredicate::UNE; @@ -1139,8 +1142,8 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return mlir::success(); } case CIR::bool_to_int: { - auto dstTy = op.getType().cast(); - auto newDstType = convertTy(dstTy).cast(); + auto dstTy = mlir::cast(op.getType()); + auto newDstType = mlir::cast(convertTy(dstTy)); auto newOp = createIntCast(rewriter, src, newDstType); rewriter.replaceOp(op, newOp); return mlir::success(); @@ -1154,7 +1157,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { case CIR::int_to_float: { auto dstTy = op.getType(); auto newDstType = convertTy(dstTy); - if (op.getSrc().getType().cast().isSigned()) + if (mlir::cast(op.getSrc().getType()).isSigned()) rewriter.replaceOpWithNewOp(op, newDstType, src); else rewriter.replaceOpWithNewOp(op, newDstType, src); @@ -1163,7 +1166,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { case CIR::float_to_int: { auto dstTy = op.getType(); auto newDstType = convertTy(dstTy); - if (op.getResult().getType().cast().isSigned()) + if (mlir::cast(op.getResult().getType()).isSigned()) rewriter.replaceOpWithNewOp(op, newDstType, src); else rewriter.replaceOpWithNewOp(op, newDstType, src); @@ -1246,7 +1249,7 @@ class CIRPtrStrideOpLowering return mlir::failure(); auto base = baseOp->getOperand(0); auto dstType = op.getResult().getType(); - auto newDstType = convertTy(dstType).cast(); + auto newDstType = mlir::cast(convertTy(dstType)); auto stride = adaptor.getStride(); auto indexType = rewriter.getIndexType(); // Generate casting if the stride is not index type. diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h b/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h index c8b3b4a5bc12..46d2bd7fc2a1 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h @@ -24,8 +24,8 @@ mlir::Value createIntCast(mlir::ConversionPatternRewriter &rewriter, assert(mlir::isa(srcTy)); assert(mlir::isa(dstTy)); - auto srcWidth = srcTy.cast().getWidth(); - auto dstWidth = dstTy.cast().getWidth(); + auto srcWidth = mlir::cast(srcTy).getWidth(); + auto dstWidth = mlir::cast(dstTy).getWidth(); auto loc = src.getLoc(); if (dstWidth > srcWidth && isSigned) From b93a6feb2eb87f97ecc5476796f69f519d701923 Mon Sep 17 00:00:00 2001 From: roro47 <40341016+roro47@users.noreply.github.com> Date: Thu, 27 Jun 2024 17:59:34 +0100 Subject: [PATCH 1654/2301] [CIR] Refactor TryCallOp creation (#704) --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 39 +++++++++++++++++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 14 ++----- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 6 +-- 3 files changed, 46 insertions(+), 13 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index b9ad798d27d0..ff125149c865 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -504,6 +504,45 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createCallOp(loc, callee, mlir::cir::VoidType(), operands, extraFnAttr); } + + mlir::cir::TryCallOp + createTryCallOp(mlir::Location loc, mlir::Value exception, + mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(), + mlir::Type returnType = mlir::cir::VoidType(), + mlir::ValueRange operands = mlir::ValueRange(), + mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + mlir::cir::TryCallOp tryCallOp = create( + loc, callee, exception, returnType, operands); + if (extraFnAttr) { + tryCallOp->setAttr("extra_attrs", extraFnAttr); + } else { + mlir::NamedAttrList empty; + tryCallOp->setAttr("extra_attrs", + mlir::cir::ExtraFuncAttributesAttr::get( + getContext(), empty.getDictionary(getContext()))); + } + return tryCallOp; + } + + mlir::cir::TryCallOp + createTryCallOp(mlir::Location loc, mlir::cir::FuncOp callee, + mlir::Value exception, mlir::ValueRange operands, + mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + return createTryCallOp(loc, exception, mlir::SymbolRefAttr::get(callee), + callee.getFunctionType().getReturnType(), operands, + extraFnAttr); + } + + mlir::cir::TryCallOp createIndirectTryCallOp(mlir::Location loc, + mlir::Value ind_target, + mlir::Value exception, + mlir::cir::FuncType fn_type, + mlir::ValueRange operands) { + llvm::SmallVector resOperands({ind_target}); + resOperands.append(operands.begin(), operands.end()); + return createTryCallOp(loc, exception, mlir::SymbolRefAttr(), + fn_type.getReturnType(), resOperands); + } }; } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 6a76ec2f4bc4..b8af2c76115b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2881,14 +2881,6 @@ def TryCallOp : CIR_CallOp<"try_call"> { let results = (outs Variadic); let builders = [ - OpBuilder<(ins "FuncOp":$callee, "mlir::Value":$exception, - CArg<"ValueRange", "{}">:$operands), [{ - $_state.addOperands(ValueRange{exception}); - $_state.addOperands(operands); - $_state.addAttribute("callee", SymbolRefAttr::get(callee)); - if (!callee.getFunctionType().isVoid()) - $_state.addTypes(callee.getFunctionType().getReturnType()); - }]>, OpBuilder<(ins "Value":$ind_target, "mlir::Value":$exception, "FuncType":$fn_type, CArg<"ValueRange", "{}">:$operands), [{ @@ -2903,8 +2895,10 @@ def TryCallOp : CIR_CallOp<"try_call"> { [{ $_state.addOperands(ValueRange{exception}); $_state.addOperands(operands); - $_state.addAttribute("callee", callee); - $_state.addTypes(resType); + if (callee) + $_state.addAttribute("callee", callee); + if (resType && !isa(resType)) + $_state.addTypes(resType); }]>]; let hasVerifier = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index c453de137bdd..5966a5d3f7e6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -456,11 +456,11 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, mlir::cir::TryCallOp tryCallOp; if (indirectFuncTy) { - tryCallOp = builder.create( + tryCallOp = builder.createIndirectTryCallOp( callLoc, addr, indirectFuncVal, indirectFuncTy, CIRCallArgs); } else { - tryCallOp = builder.create(callLoc, directFuncOp, - addr, CIRCallArgs); + tryCallOp = + builder.createTryCallOp(callLoc, directFuncOp, addr, CIRCallArgs); } tryCallOp->setAttr("extra_attrs", extraFnAttrs); return tryCallOp; From 55d2486d39629493dcbe994b3fb9dfaa4ddfe951 Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 28 Jun 2024 03:52:53 +0800 Subject: [PATCH 1655/2301] [CIR][Dialect] Make addrspace in pointer types to model LangAS (#692) This PR implements the solution B as discussed in #682. * Use the syntax `cir.ptr` `cir.ptr)` `cir.ptr` * Add a new `AddressSpaceAttr`, which is used as the new type of addrspace parameter in `PointerType` * `AddressSpaceAttr` itself takes one single `int64_t $value` as the parameter * TableGen templates to generate the conversion between `clang::LangAS -> int64_t $value <-> text-form CIR` --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 16 +- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 137 ++++++++++++++++++ clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 2 + .../include/clang/CIR/Dialect/IR/CIRTypes.td | 26 +++- clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 8 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 64 ++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 14 +- .../CIR/CodeGen/OpenCL/addrspace-alloca.cl | 11 +- clang/test/CIR/CodeGen/OpenCL/spirv-target.cl | 3 + .../CIR/CodeGen/address-space-conversion.cpp | 30 ++-- clang/test/CIR/CodeGen/address-space.c | 14 +- clang/test/CIR/IR/address-space.cir | 9 +- clang/test/CIR/IR/cast.cir | 4 +- clang/test/CIR/IR/invalid.cir | 38 ++++- clang/test/CIR/Lowering/address-space.cir | 11 +- clang/test/CIR/Transforms/merge-cleanups.cir | 10 +- 17 files changed, 341 insertions(+), 57 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index ff125149c865..df2231c389a3 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -75,14 +75,18 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return mlir::cir::IntType::get(getContext(), N, true); } - mlir::cir::PointerType getPointerTo(mlir::Type ty, - unsigned addressSpace = 0) { - assert(!addressSpace && "address space is NYI"); - return mlir::cir::PointerType::get(getContext(), ty); + mlir::cir::PointerType + getPointerTo(mlir::Type ty, clang::LangAS langAS = clang::LangAS::Default) { + mlir::cir::AddressSpaceAttr addrSpaceAttr; + if (langAS != clang::LangAS::Default) + addrSpaceAttr = mlir::cir::AddressSpaceAttr::get(getContext(), langAS); + + return mlir::cir::PointerType::get(getContext(), ty, addrSpaceAttr); } - mlir::cir::PointerType getVoidPtrTy(unsigned addressSpace = 0) { - return getPointerTo(::mlir::cir::VoidType::get(getContext()), addressSpace); + mlir::cir::PointerType + getVoidPtrTy(clang::LangAS langAS = clang::LangAS::Default) { + return getPointerTo(::mlir::cir::VoidType::get(getContext()), langAS); } mlir::Value createLoad(mlir::Location loc, mlir::Value ptr, diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 2cd912e42f6f..0d270a4b59ce 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -617,6 +617,143 @@ def DynamicCastInfoAttr }]; } +//===----------------------------------------------------------------------===// +// AddressSpaceAttr +//===----------------------------------------------------------------------===// + +// TODO: other CIR AS cases +def AS_Target : I32EnumAttrCase<"target", 21>; + +def AddressSpaceAttr : CIR_Attr<"AddressSpace", "addrspace"> { + + let summary = "Address space attribute for pointer types"; + let description = [{ + The address space attribute models `clang::LangAS` rather than the LLVM + address space, which means it's not yet converted by the address space map + to carry target-specific semantics. + + The representation is one-to-one except for `LangAS::Default`, which + corresponds to a null attribute instead. + }]; + + let parameters = (ins "int32_t":$value); + + let assemblyFormat = [{ + `<` $value `>` + }]; + + let builders = [ + AttrBuilder<(ins "clang::LangAS":$langAS), [{ + assert(langAS != clang::LangAS::Default && + "Default address space is encoded as null attribute"); + return $_get($_ctxt, getValueFromLangAS(langAS).value()); + }]> + ]; + + let cppNamespace = "::mlir::cir"; + + // The following codes implement these conversions: + // clang::LangAS -> int32_t <-> text-form CIR + + // CIR_PointerType manipulates the parse- and stringify- methods to provide + // simplified assembly format `custom`. + + list langASCases = [ + // TODO: includes all non-target CIR AS cases here + ]; + + I32EnumAttrCase targetASCase = AS_Target; + + let extraClassDeclaration = [{ + static constexpr char kTargetKeyword[] = "}]#targetASCase.symbol#[{"; + static constexpr int32_t kFirstTargetASValue = }]#targetASCase.value#[{; + + bool isLang() const; + bool isTarget() const; + unsigned getTargetValue() const; + + static std::optional parseValueFromString(llvm::StringRef s); + static std::optional getValueFromLangAS(clang::LangAS v); + static std::optional stringifyValue(int32_t v); + }]; + + let extraClassDefinition = [{ + bool $cppClass::isLang() const { + return !isTarget(); + } + + bool $cppClass::isTarget() const { + return getValue() >= kFirstTargetASValue; + } + + unsigned $cppClass::getTargetValue() const { + assert(isTarget() && "Not a target address space"); + return getValue() - kFirstTargetASValue; + } + + std::optional + $cppClass::parseValueFromString(llvm::StringRef str) { + return llvm::StringSwitch<::std::optional>(str) + }] + # + !interleave( + !foreach(case, langASCases, + ".Case(\""#case.symbol# "\", "#case.value # ")\n" + ), + "\n" + ) + # + [{ + // Target address spaces are not parsed here + .Default(std::nullopt); + } + + std::optional + $cppClass::stringifyValue(int32_t value) { + switch (value) { + }] + # + !interleave( + !foreach(case, langASCases, + "case "#case.value + # ": return \""#case.symbol # "\";" ), + "\n" + ) + # + [{ + default: + // Target address spaces are not processed here + return std::nullopt; + } + } + + std::optional + $cppClass::getValueFromLangAS(clang::LangAS langAS) { + assert((langAS == clang::LangAS::Default || + clang::isTargetAddressSpace(langAS)) && + "Language-specific address spaces are not supported"); + switch (langAS) { + }] + # + !interleave( + !foreach(case, langASCases, + "case clang::LangAS::"#case.symbol + # [{: llvm_unreachable("Not Yet Supported");}] ), + "\n" + ) + # + [{ + case clang::LangAS::Default: + // Default address space should be encoded as a null attribute. + return std::nullopt; + default: + // Target address space offset arithmetics + return clang::toTargetAddressSpace(langAS) + kFirstTargetASValue; + } + } + }]; +} + //===----------------------------------------------------------------------===// // AST Wrappers //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index d2d9973c7e9b..ff2a98a9ff5a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -20,6 +20,8 @@ #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" + //===----------------------------------------------------------------------===// // CIR StructType // diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 99267a88dc69..47de7623ffce 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -208,24 +208,34 @@ def CIR_PointerType : CIR_Type<"Pointer", "ptr", `CIR.ptr` is a type returned by any op generating a pointer in C++. }]; - let parameters = (ins "mlir::Type":$pointee, - DefaultValuedParameter<"unsigned", "0">:$addrSpace); + let parameters = (ins + "mlir::Type":$pointee, + // FIXME(cir): Currently unable to directly use AddressSpaceAttr because of + // cyclic dep. Workaround with the top type and verifier. + OptionalParameter<"mlir::Attribute">:$addrSpace + ); let builders = [ TypeBuilderWithInferredContext<(ins - "mlir::Type":$pointee, CArg<"unsigned", "0">:$addrSpace), [{ - return Base::get(pointee.getContext(), pointee, addrSpace); + "mlir::Type":$pointee, + CArg<"mlir::Attribute", "{}">:$addrSpace), [{ + return $_get(pointee.getContext(), pointee, addrSpace); }]>, TypeBuilder<(ins - "mlir::Type":$pointee, CArg<"unsigned", "0">:$addrSpace), [{ - return Base::get($_ctxt, pointee, addrSpace); - }]>, + "mlir::Type":$pointee, + CArg<"mlir::Attribute", "{}">:$addrSpace), [{ + return $_get($_ctxt, pointee, addrSpace); + }]> ]; let assemblyFormat = [{ - `<` $pointee ( `,` `addrspace` `(` $addrSpace^ `)` )? `>` + `<` $pointee ( `,` `addrspace` `(` + custom($addrSpace)^ + `)` )? `>` }]; + let genVerifyDecl = 1; + let skipDefaultBuilders = 1; let extraClassDeclaration = [{ diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 1de2d2d1de53..a88c7acb93dc 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -156,6 +156,7 @@ struct MissingFeatures { static bool constantFoldsToSimpleInteger() { return false; } static bool checkFunctionCallABI() { return false; } static bool zeroInitializer() { return false; } + static bool targetLoweringInfoAddressSpaceMap() { return false; } static bool targetCodeGenInfoIsProtoCallVariadic() { return false; } static bool targetCodeGenInfoGetNullPointer() { return false; } static bool operandBundles() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 8135fd0b7a95..d4c38430e2e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -605,9 +605,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { const ReferenceType *RTy = cast(Ty); QualType ETy = RTy->getPointeeType(); auto PointeeType = convertTypeForMem(ETy); - ResultType = ::mlir::cir::PointerType::get( - Builder.getContext(), PointeeType, - Context.getTargetAddressSpace(ETy.getAddressSpace())); + ResultType = Builder.getPointerTo(PointeeType, ETy.getAddressSpace()); assert(ResultType && "Cannot get pointer type?"); break; } @@ -622,9 +620,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { // if (PointeeType->isVoidTy()) // PointeeType = Builder.getI8Type(); - ResultType = ::mlir::cir::PointerType::get( - Builder.getContext(), PointeeType, - Context.getTargetAddressSpace(ETy.getAddressSpace())); + ResultType = Builder.getPointerTo(PointeeType, ETy.getAddressSpace()); assert(ResultType && "Cannot get pointer type?"); break; } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 597538419854..07a7339374dc 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -45,6 +45,11 @@ parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, static void printFuncTypeArgs(mlir::AsmPrinter &p, mlir::ArrayRef params, bool isVarArg); +static mlir::ParseResult parsePointerAddrSpace(mlir::AsmParser &p, + mlir::Attribute &addrSpaceAttr); +static void printPointerAddrSpace(mlir::AsmPrinter &p, + mlir::Attribute addrSpaceAttr); + //===----------------------------------------------------------------------===// // Get autogenerated stuff //===----------------------------------------------------------------------===// @@ -872,6 +877,65 @@ llvm::ArrayRef FuncType::getReturnTypes() const { bool FuncType::isVoid() const { return mlir::isa(getReturnType()); } +//===----------------------------------------------------------------------===// +// PointerType Definitions +//===----------------------------------------------------------------------===// + +mlir::LogicalResult +PointerType::verify(llvm::function_ref emitError, + mlir::Type pointee, mlir::Attribute addrSpace) { + if (addrSpace && !mlir::isa(addrSpace)) { + emitError() << "unexpected addrspace attribute type"; + return mlir::failure(); + } + return mlir::success(); +} + +mlir::ParseResult parsePointerAddrSpace(mlir::AsmParser &p, + mlir::Attribute &addrSpaceAttr) { + using mlir::cir::AddressSpaceAttr; + auto attrLoc = p.getCurrentLocation(); + + llvm::StringRef addrSpaceKind; + if (mlir::failed(p.parseOptionalKeyword(&addrSpaceKind))) { + p.emitError(attrLoc, "expected keyword for addrspace kind"); + return mlir::failure(); + } + + if (addrSpaceKind == AddressSpaceAttr::kTargetKeyword) { + int64_t targetValue = -1; + if (p.parseLess() || p.parseInteger(targetValue) || p.parseGreater()) { + return mlir::failure(); + } + addrSpaceAttr = AddressSpaceAttr::get( + p.getContext(), AddressSpaceAttr::kFirstTargetASValue + targetValue); + } else { + std::optional value = + AddressSpaceAttr::parseValueFromString(addrSpaceKind); + // not target AS, must be wrong keyword if no value + if (!value.has_value()) { + p.emitError(attrLoc, "invalid addrspace kind keyword: " + addrSpaceKind); + return mlir::failure(); + } + + addrSpaceAttr = AddressSpaceAttr::get(p.getContext(), *value); + } + + return mlir::success(); +} + +void printPointerAddrSpace(mlir::AsmPrinter &p, + mlir::Attribute rawAddrSpaceAttr) { + using mlir::cir::AddressSpaceAttr; + auto addrSpaceAttr = mlir::cast(rawAddrSpaceAttr); + if (addrSpaceAttr.isTarget()) { + p << AddressSpaceAttr::kTargetKeyword << "<" + << addrSpaceAttr.getTargetValue() << ">"; + } else { + p << AddressSpaceAttr::stringifyValue(addrSpaceAttr.getValue()); + } +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 95b6d8bb8e9b..3bde0f97e0f8 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3336,8 +3336,20 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, mlir::DataLayout &dataLayout) { converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { // Drop pointee type since LLVM dialect only allows opaque pointers. + + auto addrSpace = + mlir::cast_if_present(type.getAddrSpace()); + // null addrspace attribute indicates the default addrspace + if (!addrSpace) + return mlir::LLVM::LLVMPointerType::get(type.getContext()); + + // TODO(cir): Query the target-specific address space map to lower other ASs + // like `opencl_private`. + assert(!MissingFeatures::targetLoweringInfoAddressSpaceMap()); + assert(addrSpace.isTarget() && "NYI"); + return mlir::LLVM::LLVMPointerType::get(type.getContext(), - type.getAddrSpace()); + addrSpace.getTargetValue()); }); converter.addConversion([&](mlir::cir::DataMemberType type) -> mlir::Type { return mlir::IntegerType::get(type.getContext(), diff --git a/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl index baa14874765b..7650a08968e4 100644 --- a/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl +++ b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl @@ -3,10 +3,13 @@ // RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM -// CIR: cir.func @func(%arg0: !cir.ptr +// Lowering of language-specific AS not supported +// XFAIL: * + +// CIR: cir.func @func(%arg0: !cir.ptr)> // LLVM: @func(ptr addrspace(3) kernel void func(local int *p) { - // CIR-NEXT: %[[#ALLOCA_P:]] = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} + // CIR-NEXT: %[[#ALLOCA_P:]] = cir.alloca !cir.ptr)>, !cir.ptr)>>, ["p", init] {alignment = 8 : i64} // LLVM-NEXT: %[[#ALLOCA_P:]] = alloca ptr addrspace(3), i64 1, align 8 int x; @@ -14,11 +17,11 @@ kernel void func(local int *p) { // LLVM-NEXT: %[[#ALLOCA_X:]] = alloca i32, i64 1, align 4 global char *b; - // CIR-NEXT: %[[#ALLOCA_B:]] = cir.alloca !cir.ptr, !cir.ptr>, ["b"] {alignment = 8 : i64} + // CIR-NEXT: %[[#ALLOCA_B:]] = cir.alloca !cir.ptr)>, !cir.ptr)>>, ["b"] {alignment = 8 : i64} // LLVM-NEXT: %[[#ALLOCA_B:]] = alloca ptr addrspace(1), i64 1, align 8 // Store of the argument `p` - // CIR-NEXT: cir.store %arg0, %[[#ALLOCA_P]] : !cir.ptr, !cir.ptr> + // CIR-NEXT: cir.store %arg0, %[[#ALLOCA_P]] : !cir.ptr)>, !cir.ptr)>> // LLVM-NEXT: store ptr addrspace(3) %{{[0-9]+}}, ptr %[[#ALLOCA_P]], align 8 return; diff --git a/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl index 44c59fc3534c..eb6d2028d1ba 100644 --- a/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl +++ b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl @@ -4,6 +4,9 @@ // RUN: %clang_cc1 -cl-std=CL3.0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t_64.ll // RUN: FileCheck --input-file=%t_64.ll %s --check-prefix=LLVM-SPIRV64 +// Lowering of language-specific AS not supported +// XFAIL: * + // CIR-SPIRV64: cir.triple = "spirv64-unknown-unknown" // LLVM-SPIRV64: target triple = "spirv64-unknown-unknown" diff --git a/clang/test/CIR/CodeGen/address-space-conversion.cpp b/clang/test/CIR/CodeGen/address-space-conversion.cpp index 1490a174892a..e618e9ac21fe 100644 --- a/clang/test/CIR/CodeGen/address-space-conversion.cpp +++ b/clang/test/CIR/CodeGen/address-space-conversion.cpp @@ -14,9 +14,9 @@ using ri2_t = int __attribute__((address_space(2))) &; void test_ptr() { pi1_t ptr1; pi2_t ptr2 = (pi2_t)ptr1; - // CIR: %[[#PTR1:]] = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.ptr - // CIR-NEXT: %[[#CAST:]] = cir.cast(address_space, %[[#PTR1]] : !cir.ptr), !cir.ptr - // CIR-NEXT: cir.store %[[#CAST]], %{{[0-9]+}} : !cir.ptr, !cir.ptr> + // CIR: %[[#PTR1:]] = cir.load %{{[0-9]+}} : !cir.ptr)>>, !cir.ptr)> + // CIR-NEXT: %[[#CAST:]] = cir.cast(address_space, %[[#PTR1]] : !cir.ptr)>), !cir.ptr)> + // CIR-NEXT: cir.store %[[#CAST]], %{{[0-9]+}} : !cir.ptr)>, !cir.ptr)>> // LLVM: %[[#PTR1:]] = load ptr addrspace(1), ptr %{{[0-9]+}}, align 8 // LLVM-NEXT: %[[#CAST:]] = addrspacecast ptr addrspace(1) %[[#PTR1]] to ptr addrspace(2) @@ -29,11 +29,11 @@ void test_ref() { pi1_t ptr; ri1_t ref1 = *ptr; ri2_t ref2 = (ri2_t)ref1; - // CIR: %[[#DEREF:]] = cir.load deref %{{[0-9]+}} : !cir.ptr>, !cir.ptr - // CIR-NEXT: cir.store %[[#DEREF]], %[[#ALLOCAREF1:]] : !cir.ptr, !cir.ptr> - // CIR-NEXT: %[[#REF1:]] = cir.load %[[#ALLOCAREF1]] : !cir.ptr>, !cir.ptr - // CIR-NEXT: %[[#CAST:]] = cir.cast(address_space, %[[#REF1]] : !cir.ptr), !cir.ptr - // CIR-NEXT: cir.store %[[#CAST]], %{{[0-9]+}} : !cir.ptr, !cir.ptr> + // CIR: %[[#DEREF:]] = cir.load deref %{{[0-9]+}} : !cir.ptr)>>, !cir.ptr)> + // CIR-NEXT: cir.store %[[#DEREF]], %[[#ALLOCAREF1:]] : !cir.ptr)>, !cir.ptr)>> + // CIR-NEXT: %[[#REF1:]] = cir.load %[[#ALLOCAREF1]] : !cir.ptr)>>, !cir.ptr)> + // CIR-NEXT: %[[#CAST:]] = cir.cast(address_space, %[[#REF1]] : !cir.ptr)>), !cir.ptr)> + // CIR-NEXT: cir.store %[[#CAST]], %{{[0-9]+}} : !cir.ptr)>, !cir.ptr)>> // LLVM: %[[#DEREF:]] = load ptr addrspace(1), ptr %{{[0-9]+}}, align 8 // LLVM-NEXT: store ptr addrspace(1) %[[#DEREF]], ptr %[[#ALLOCAREF1:]], align 8 @@ -47,10 +47,10 @@ void test_ref() { void test_nullptr() { constexpr pi1_t null1 = nullptr; pi2_t ptr = (pi2_t)null1; - // CIR: %[[#NULL1:]] = cir.const #cir.ptr : !cir.ptr - // CIR-NEXT: cir.store %[[#NULL1]], %{{[0-9]+}} : !cir.ptr, !cir.ptr> - // CIR-NEXT: %[[#NULL2:]] = cir.const #cir.ptr : !cir.ptr - // CIR-NEXT: cir.store %[[#NULL2]], %{{[0-9]+}} : !cir.ptr, !cir.ptr> + // CIR: %[[#NULL1:]] = cir.const #cir.ptr : !cir.ptr)> + // CIR-NEXT: cir.store %[[#NULL1]], %{{[0-9]+}} : !cir.ptr)>, !cir.ptr)>> + // CIR-NEXT: %[[#NULL2:]] = cir.const #cir.ptr : !cir.ptr)> + // CIR-NEXT: cir.store %[[#NULL2]], %{{[0-9]+}} : !cir.ptr)>, !cir.ptr)>> // LLVM: store ptr addrspace(1) null, ptr %{{[0-9]+}}, align 8 // LLVM-NEXT: store ptr addrspace(2) null, ptr %{{[0-9]+}}, align 8 @@ -58,9 +58,9 @@ void test_nullptr() { void test_side_effect(pi1_t b) { pi2_t p = (pi2_t)(*b++, (int*)0); - // CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s32i), !cir.ptr - // CIR: %[[#CAST:]] = cir.const #cir.ptr : !cir.ptr - // CIR-NEXT: cir.store %[[#CAST]], %{{[0-9]+}} : !cir.ptr, !cir.ptr> + // CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr)>, %{{[0-9]+}} : !s32i), !cir.ptr)> + // CIR: %[[#CAST:]] = cir.const #cir.ptr : !cir.ptr)> + // CIR-NEXT: cir.store %[[#CAST]], %{{[0-9]+}} : !cir.ptr)>, !cir.ptr)>> // LLVM: %{{[0-9]+}} = getelementptr i32, ptr addrspace(1) %{{[0-9]+}}, i64 1 // LLVM: store ptr addrspace(2) null, ptr %{{[0-9]+}}, align 8 diff --git a/clang/test/CIR/CodeGen/address-space.c b/clang/test/CIR/CodeGen/address-space.c index c743b2b723d6..100cdaaaa753 100644 --- a/clang/test/CIR/CodeGen/address-space.c +++ b/clang/test/CIR/CodeGen/address-space.c @@ -3,8 +3,20 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// CIR: cir.func {{@.*foo.*}}(%arg0: !cir.ptr +// CIR: cir.func {{@.*foo.*}}(%arg0: !cir.ptr)> // LLVM: define void @foo(ptr addrspace(1) %0) void foo(int __attribute__((address_space(1))) *arg) { return; } + +// CIR: cir.func {{@.*bar.*}}(%arg0: !cir.ptr)> +// LLVM: define void @bar(ptr %0) +void bar(int __attribute__((address_space(0))) *arg) { + return; +} + +// CIR: cir.func {{@.*baz.*}}(%arg0: !cir.ptr +// LLVM: define void @baz(ptr %0) +void baz(int *arg) { + return; +} diff --git a/clang/test/CIR/IR/address-space.cir b/clang/test/CIR/IR/address-space.cir index dde39bdd4d73..b7472876f56d 100644 --- a/clang/test/CIR/IR/address-space.cir +++ b/clang/test/CIR/IR/address-space.cir @@ -4,8 +4,13 @@ !s32i = !cir.int module { - // CHECK: @test_addrspace_assembly_format(%arg0: !cir.ptr) - cir.func @test_addrspace_assembly_format(%arg0: !cir.ptr) { + // CHECK: @test_format1(%arg0: !cir.ptr)>) + cir.func @test_format1(%arg0: !cir.ptr)>) { + cir.return + } + + // CHECK: @test_format2(%arg0: !cir.ptr) + cir.func @test_format2(%arg0: !cir.ptr) { cir.return } } diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index 6511529b32ee..8523439a7c2c 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -17,7 +17,7 @@ module { } cir.func @addrspace_cast(%arg0: !cir.ptr) { - %0 = cir.cast(address_space, %arg0 : !cir.ptr), !cir.ptr + %0 = cir.cast(address_space, %arg0 : !cir.ptr), !cir.ptr)> cir.return } } @@ -30,4 +30,4 @@ module { // CHECK: %0 = cir.cast(bitcast, %arg0 : !cir.ptr), !cir.ptr // CHECK: cir.func @addrspace_cast -// CHECK: %0 = cir.cast(address_space, %arg0 : !cir.ptr), !cir.ptr +// CHECK: %0 = cir.cast(address_space, %arg0 : !cir.ptr), !cir.ptr)> diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 63a6fd97b4c9..50ee1d3fed34 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -302,16 +302,16 @@ cir.func @cast24(%p : !u32i) { !u32i = !cir.int !u64i = !cir.int -cir.func @cast25(%p : !cir.ptr) { - %0 = cir.cast(address_space, %p : !cir.ptr), !cir.ptr // expected-error {{requires two types differ in addrspace only}} +cir.func @cast25(%p : !cir.ptr)>) { + %0 = cir.cast(address_space, %p : !cir.ptr)>), !cir.ptr)> // expected-error {{requires two types differ in addrspace only}} cir.return } // ----- !u64i = !cir.int -cir.func @cast26(%p : !cir.ptr) { - %0 = cir.cast(address_space, %p : !cir.ptr), !u64i // expected-error {{requires !cir.ptr type for source and result}} +cir.func @cast26(%p : !cir.ptr)>) { + %0 = cir.cast(address_space, %p : !cir.ptr)>), !u64i // expected-error {{requires !cir.ptr type for source and result}} cir.return } @@ -319,7 +319,7 @@ cir.func @cast26(%p : !cir.ptr) { !u64i = !cir.int cir.func @cast27(%p : !u64i) { - %0 = cir.cast(address_space, %p : !u64i), !cir.ptr // expected-error {{requires !cir.ptr type for source and result}} + %0 = cir.cast(address_space, %p : !u64i), !cir.ptr)> // expected-error {{requires !cir.ptr type for source and result}} cir.return } @@ -1143,3 +1143,31 @@ cir.func @bad_goto() -> () { cir.label "label" cir.return } + +// ----- + +!u64i = !cir.int +cir.func @address_space1(%p : !cir.ptr) { // expected-error {{expected keyword for addrspace kind}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @address_space2(%p : !cir.ptr)>) { // expected-error {{expected integer value}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @address_space3(%p : !cir.ptr) { // expected-error {{expected '<'}} + cir.return +} + +// ----- + +!u64i = !cir.int +cir.func @address_space4(%p : !cir.ptr) { // expected-error {{invalid addrspace kind keyword: foobar}} + cir.return +} diff --git a/clang/test/CIR/Lowering/address-space.cir b/clang/test/CIR/Lowering/address-space.cir index c7d5a84829c4..b7328713e9b9 100644 --- a/clang/test/CIR/Lowering/address-space.cir +++ b/clang/test/CIR/Lowering/address-space.cir @@ -12,9 +12,16 @@ module { } // LLVM: define void @bar(ptr addrspace(1) %0) - cir.func @bar(%arg0: !cir.ptr) { + cir.func @bar(%arg0: !cir.ptr)>) { // LLVM-NEXT: alloca ptr addrspace(1) - %0 = cir.alloca !cir.ptr, !cir.ptr>, ["arg", init] {alignment = 8 : i64} + %0 = cir.alloca !cir.ptr)>, !cir.ptr)>>, ["arg", init] {alignment = 8 : i64} + cir.return + } + + // LLVM: define void @baz(ptr %0) + cir.func @baz(%arg0: !cir.ptr)>) { + // LLVM-NEXT: alloca ptr, + %0 = cir.alloca !cir.ptr)>, !cir.ptr)>>, ["arg", init] {alignment = 8 : i64} cir.return } } diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 9a87ad4b0968..92cf22b1abef 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -128,11 +128,11 @@ module { // Should remove redundant address space casts. // CHECK-LABEL: @addrspacecastfold - // CHECK: %[[ARG0:.+]]: !cir.ptr - // CHECK: cir.return %[[ARG0]] : !cir.ptr - cir.func @addrspacecastfold(%arg0: !cir.ptr) -> !cir.ptr { - %0 = cir.cast(address_space, %arg0: !cir.ptr), !cir.ptr - cir.return %0 : !cir.ptr + // CHECK: %[[ARG0:.+]]: !cir.ptr)> + // CHECK: cir.return %[[ARG0]] : !cir.ptr)> + cir.func @addrspacecastfold(%arg0: !cir.ptr)>) -> !cir.ptr)> { + %0 = cir.cast(address_space, %arg0: !cir.ptr)>), !cir.ptr)> + cir.return %0 : !cir.ptr)> } } From a84438101f026cafaeeca21ece09b8b70dfd472f Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Sun, 30 Jun 2024 07:37:01 -0300 Subject: [PATCH 1656/2301] [CIR][ABI] Add unsigned int CC lowering for x86_64 (#701) Adds the necessary bits to lower arguments and return values of type unsigned int for the x86_64 target. This includes adding logic for extend and direct argument-passing kinds. --- clang/include/clang/CIR/ABIArgInfo.h | 57 +++- .../clang/CIR/Dialect/IR/CIRDialect.td | 5 + clang/include/clang/CIR/MissingFeatures.h | 18 ++ clang/include/clang/CIR/TypeEvaluationKind.h | 12 + clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 1 - clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 +- .../Dialect/Transforms/CallConvLowering.cpp | 2 +- .../Transforms/TargetLowering/ABIInfo.cpp | 12 + .../Transforms/TargetLowering/ABIInfo.h | 7 + .../Transforms/TargetLowering/ABIInfoImpl.cpp | 8 + .../Transforms/TargetLowering/ABIInfoImpl.h | 4 + .../TargetLowering/CIRLowerContext.cpp | 104 +++++++- .../TargetLowering/CIRLowerContext.h | 38 ++- .../TargetLowering/CIRToCIRArgMapping.h | 55 +++- .../Transforms/TargetLowering/LowerCall.cpp | 74 +++++- .../TargetLowering/LowerFunction.cpp | 246 ++++++++++++++++-- .../Transforms/TargetLowering/LowerFunction.h | 6 + .../Transforms/TargetLowering/LowerModule.cpp | 2 - .../Transforms/TargetLowering/LowerTypes.cpp | 53 +++- .../Transforms/TargetLowering/LowerTypes.h | 3 + .../Transforms/TargetLowering/Targets/X86.cpp | 230 +++++++++++++++- .../x86_64/x86_64-call-conv-lowering-pass.cpp | 28 ++ 22 files changed, 918 insertions(+), 51 deletions(-) create mode 100644 clang/include/clang/CIR/TypeEvaluationKind.h diff --git a/clang/include/clang/CIR/ABIArgInfo.h b/clang/include/clang/CIR/ABIArgInfo.h index 08317d62297f..78127230a7ce 100644 --- a/clang/include/clang/CIR/ABIArgInfo.h +++ b/clang/include/clang/CIR/ABIArgInfo.h @@ -18,6 +18,7 @@ #include "mlir/IR/Types.h" #include "clang/AST/Type.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include namespace cir { @@ -99,6 +100,7 @@ class ABIArgInfo { unsigned AllocaFieldIndex; // isInAlloca() }; Kind TheKind; + bool InReg : 1; // isDirect() || isExtend() || isIndirect() bool CanBeFlattened : 1; // isDirect() bool SignExt : 1; // isExtend() @@ -115,7 +117,7 @@ class ABIArgInfo { public: ABIArgInfo(Kind K = Direct) : TypeData(nullptr), PaddingType(nullptr), DirectAttr{0, 0}, TheKind(K), - CanBeFlattened(false) {} + InReg(false), CanBeFlattened(false), SignExt(false) {} static ABIArgInfo getDirect(mlir::Type T = nullptr, unsigned Offset = 0, mlir::Type Padding = nullptr, @@ -139,6 +141,16 @@ class ABIArgInfo { AI.setSignExt(true); return AI; } + static ABIArgInfo getSignExtend(mlir::Type Ty, mlir::Type T = nullptr) { + // NOTE(cir): Enumerations are IntTypes in CIR. + auto AI = ABIArgInfo(Extend); + AI.setCoerceToType(T); + AI.setPaddingType(nullptr); + AI.setDirectOffset(0); + AI.setDirectAlign(0); + AI.setSignExt(true); + return AI; + } static ABIArgInfo getZeroExtend(clang::QualType Ty, mlir::Type T = nullptr) { assert(Ty->isIntegralOrEnumerationType() && "Unexpected QualType"); @@ -150,6 +162,18 @@ class ABIArgInfo { AI.setSignExt(false); return AI; } + static ABIArgInfo getZeroExtend(mlir::Type Ty, mlir::Type T = nullptr) { + // NOTE(cir): Enumerations are IntTypes in CIR. + assert(mlir::isa(Ty) || + mlir::isa(Ty)); + auto AI = ABIArgInfo(Extend); + AI.setCoerceToType(T); + AI.setPaddingType(nullptr); + AI.setDirectOffset(0); + AI.setDirectAlign(0); + AI.setSignExt(false); + return AI; + } // ABIArgInfo will record the argument as being extended based on the sign of // it's type. @@ -159,6 +183,14 @@ class ABIArgInfo { return getSignExtend(Ty, T); return getZeroExtend(Ty, T); } + static ABIArgInfo getExtend(mlir::Type Ty, mlir::Type T = nullptr) { + // NOTE(cir): The original can apply this method on both integers and + // enumerations, but in CIR, these two types are one and the same. + if (mlir::isa(Ty) && + mlir::cast(Ty).isSigned()) + return getSignExtend(mlir::cast(Ty), T); + return getZeroExtend(Ty, T); + } static ABIArgInfo getIgnore() { return ABIArgInfo(Ignore); } @@ -171,6 +203,24 @@ class ABIArgInfo { bool isExpand() const { return TheKind == Expand; } bool isCoerceAndExpand() const { return TheKind == CoerceAndExpand; } + bool isSignExt() const { + assert(isExtend() && "Invalid kind!"); + return SignExt; + } + void setSignExt(bool SExt) { + assert(isExtend() && "Invalid kind!"); + SignExt = SExt; + } + + bool getInReg() const { + assert((isDirect() || isExtend() || isIndirect()) && "Invalid kind!"); + return InReg; + } + void setInReg(bool IR) { + assert((isDirect() || isExtend() || isIndirect()) && "Invalid kind!"); + InReg = IR; + } + bool canHaveCoerceToType() const { return isDirect() || isExtend() || isCoerceAndExpand(); } @@ -191,11 +241,6 @@ class ABIArgInfo { DirectAttr.Align = Align; } - void setSignExt(bool SExt) { - assert(isExtend() && "Invalid kind!"); - SignExt = SExt; - } - void setCanBeFlattened(bool Flatten) { assert(isDirect() && "Invalid kind!"); CanBeFlattened = Flatten; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td index 69d6e9774942..df5dbe9872a6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -28,6 +28,11 @@ def CIR_Dialect : Dialect { let useDefaultTypePrinterParser = 0; let extraClassDeclaration = [{ + + // Names of CIR parameter attributes. + static StringRef getSExtAttrName() { return "cir.signext"; } + static StringRef getZExtAttrName() { return "cir.zeroext"; } + void registerAttributes(); void registerTypes(); diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index a88c7acb93dc..f7cdafe5b1bb 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -201,18 +201,32 @@ struct MissingFeatures { static bool funcDeclIsInlineBuiltinDeclaration() { return false; } static bool funcDeclIsReplaceableGlobalAllocationFunction() { return false; } static bool qualTypeIsReferenceType() { return false; } + static bool typeGetAsEnumType() { return false; } + static bool typeGetAsBuiltinType() { return false; } + static bool varDeclIsKNRPromoted() { return false; } //-- Missing types + static bool fixedWidthIntegers() { return false; } static bool vectorType() { return false; } //-- Missing LLVM attributes static bool noReturn() { return false; } static bool csmeCall() { return false; } + static bool undef() { return false; } + static bool noFPClass() { return false; } //-- Other missing features + // Empty values might be passed as arguments to serve as padding, ensuring + // alignment and compliance (e.g. MIPS). We do not yet support this. + static bool argumentPadding() { return false; } + + // Clang has evaluation kinds which determines how code is emitted for certain + // group of type classes. We don't have a way to identify type classes. + static bool evaluationKind() { return false; } + // Calls with a static chain pointer argument may be optimized (p.e. freeing // up argument registers), but we do not yet track such cases. static bool chainCall() { return false; } @@ -247,6 +261,10 @@ struct MissingFeatures { // Despite carrying some information about variadics, we are currently // ignoring this to focus only on the code necessary to lower non-variadics. static bool variadicFunctions() { return false; } + + // If a store op is guaranteed to execute before the retun value load op, we + // can optimize away the store and load ops. Seems like an early optimization. + static bool returnValueDominatingStoreOptmiization() { return false; } }; } // namespace cir diff --git a/clang/include/clang/CIR/TypeEvaluationKind.h b/clang/include/clang/CIR/TypeEvaluationKind.h new file mode 100644 index 000000000000..4926727dae40 --- /dev/null +++ b/clang/include/clang/CIR/TypeEvaluationKind.h @@ -0,0 +1,12 @@ +#ifndef CLANG_CIR_TYPEEVALUATIONKIND_H +#define CLANG_CIR_TYPEEVALUATIONKIND_H + +namespace cir { + +// FIXME: for now we are reusing this from lib/Clang/CIRGenFunction.h, which +// isn't available in the include dir. Same for getEvaluationKind below. +enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; + +} // namespace cir + +#endif // CLANG_CIR_TYPEEVALUATIONKIND_H diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 47aac08600cc..5c5e444a1fe1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -267,7 +267,6 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, auto intType = builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); auto cmpVal = cgf.buildScalarExpr(expr->getArg(1)); - auto valueType = cmpVal.getType(); cmpVal = buildToInt(cgf, cmpVal, typ, intType); auto newVal = buildToInt(cgf, cgf.buildScalarExpr(expr->getArg(2)), typ, intType); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index c14913ddd8f1..79cd01967ded 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -27,6 +27,7 @@ #include "clang/AST/Type.h" #include "clang/Basic/ABI.h" #include "clang/Basic/TargetInfo.h" +#include "clang/CIR/TypeEvaluationKind.h" #include "mlir/IR/TypeRange.h" #include "mlir/IR/Value.h" @@ -49,9 +50,6 @@ class AggExprEmitter; namespace cir { -// FIXME: for now we are reusing this from lib/Clang/CIRGenFunction.h, which -// isn't available in the include dir. Same for getEvaluationKind below. -enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; struct CGCoroData; class CIRGenFunction : public CIRGenTypeCache { diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index c2c3e6a19531..6ba681d0cc35 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -46,7 +46,7 @@ LowerModule createLowerModule(FuncOp op, PatternRewriter &rewriter) { // Create context. assert(!::cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; - auto context = CIRLowerContext(module.getContext(), langOpts); + auto context = CIRLowerContext(module, langOpts); context.initBuiltinTypes(*targetInfo); return LowerModule(context, module, dataLayoutStr, *targetInfo, rewriter); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index 6160174191dc..42c1c9cc2c11 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -13,6 +13,7 @@ #include "ABIInfo.h" #include "CIRCXXABI.h" +#include "CIRLowerContext.h" #include "LowerTypes.h" namespace mlir { @@ -23,5 +24,16 @@ ABIInfo::~ABIInfo() = default; CIRCXXABI &ABIInfo::getCXXABI() const { return LT.getCXXABI(); } +CIRLowerContext &ABIInfo::getContext() const { return LT.getContext(); } + +bool ABIInfo::isPromotableIntegerTypeForABI(Type Ty) const { + if (getContext().isPromotableIntegerType(Ty)) + return true; + + assert(!::cir::MissingFeatures::fixedWidthIntegers()); + + return false; +} + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h index ef5bae6d13fa..d69fee2f26b8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h @@ -15,6 +15,7 @@ #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFO_H #include "CIRCXXABI.h" +#include "CIRLowerContext.h" #include "LowerFunctionInfo.h" #include "llvm/IR/CallingConv.h" @@ -37,7 +38,13 @@ class ABIInfo { CIRCXXABI &getCXXABI() const; + CIRLowerContext &getContext() const; + virtual void computeInfo(LowerFunctionInfo &FI) const = 0; + + // Implement the Type::IsPromotableIntegerType for ABI specific needs. The + // only difference is that this considers bit-precise integer types as well. + bool isPromotableIntegerTypeForABI(Type Ty) const; }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index 7687a271c1f0..04ee613ec0cc 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -30,5 +30,13 @@ bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, return CXXABI.classifyReturnType(FI); } +Type useFirstFieldIfTransparentUnion(Type Ty) { + if (auto RT = dyn_cast(Ty)) { + if (RT.isUnion()) + llvm_unreachable("NYI"); + } + return Ty; +} + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h index d3ee18f0467b..84904434bffd 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h @@ -24,6 +24,10 @@ namespace cir { bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, const ABIInfo &Info); +/// Pass transparent unions as if they were the type of the first element. Sema +/// should ensure that all elements of the union have the same "machine type". +Type useFirstFieldIfTransparentUnion(Type Ty); + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index 7152ab081ec5..37cd8f825baa 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -12,7 +12,9 @@ //===----------------------------------------------------------------------===// #include "CIRLowerContext.h" -#include "mlir/IR/MLIRContext.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "clang/AST/ASTContext.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" @@ -21,12 +23,68 @@ namespace mlir { namespace cir { -CIRLowerContext::CIRLowerContext(MLIRContext *MLIRCtx, - clang::LangOptions &LOpts) - : MLIRCtx(MLIRCtx), LangOpts(LOpts) {} +CIRLowerContext::CIRLowerContext(ModuleOp module, clang::LangOptions &LOpts) + : MLIRCtx(module.getContext()), LangOpts(LOpts) {} CIRLowerContext::~CIRLowerContext() {} +clang::TypeInfo CIRLowerContext::getTypeInfo(Type T) const { + // TODO(cir): Memoize type info. + + clang::TypeInfo TI = getTypeInfoImpl(T); + return TI; +} + +/// getTypeInfoImpl - Return the size of the specified type, in bits. This +/// method does not work on incomplete types. +/// +/// FIXME: Pointers into different addr spaces could have different sizes and +/// alignment requirements: getPointerInfo should take an AddrSpace, this +/// should take a QualType, &c. +clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { + uint64_t Width = 0; + unsigned Align = 8; + clang::AlignRequirementKind AlignRequirement = + clang::AlignRequirementKind::None; + + // TODO(cir): We should implement a better way to identify type kinds and use + // builting data layout interface for this. + auto typeKind = clang::Type::Builtin; + if (isa(T)) { + typeKind = clang::Type::Builtin; + } else { + llvm_unreachable("Unhandled type class"); + } + + // FIXME(cir): Here we fetch the width and alignment of a type considering the + // current target. We can likely improve this using MLIR's data layout, or + // some other interface, to abstract this away (e.g. type.getWidth() & + // type.getAlign()). I'm not sure if data layoot suffices because this would + // involve some other types such as vectors and complex numbers. + // FIXME(cir): In the original codegen, this receives an AST type, meaning it + // differs chars from integers, something that is not possible with the + // current level of CIR. + switch (typeKind) { + case clang::Type::Builtin: { + if (auto intTy = dyn_cast(T)) { + // NOTE(cir): This assumes int types are already ABI-specific. + // FIXME(cir): Use data layout interface here instead. + Width = intTy.getWidth(); + // FIXME(cir): Use the proper getABIAlignment method here. + Align = std::ceil((float)Width / 8) * 8; + break; + } + llvm_unreachable("Unknown builtin type!"); + break; + } + default: + llvm_unreachable("Unhandled type class"); + } + + assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); + return clang::TypeInfo(Width, Align, AlignRequirement); +} + Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { Type Ty; @@ -58,5 +116,43 @@ void CIRLowerContext::initBuiltinTypes(const clang::TargetInfo &Target, llvm_unreachable("NYI"); } +/// Convert a size in bits to a size in characters. +clang::CharUnits CIRLowerContext::toCharUnitsFromBits(int64_t BitSize) const { + return clang::CharUnits::fromQuantity(BitSize / getCharWidth()); +} + +clang::TypeInfoChars CIRLowerContext::getTypeInfoInChars(Type T) const { + if (auto arrTy = dyn_cast(T)) + llvm_unreachable("NYI"); + clang::TypeInfo Info = getTypeInfo(T); + return clang::TypeInfoChars(toCharUnitsFromBits(Info.Width), + toCharUnitsFromBits(Info.Align), + Info.AlignRequirement); +} + +bool CIRLowerContext::isPromotableIntegerType(Type T) const { + // HLSL doesn't promote all small integer types to int, it + // just uses the rank-based promotion rules for all types. + if (::cir::MissingFeatures::langOpts()) + llvm_unreachable("NYI"); + + // FIXME(cir): CIR does not distinguish between char, short, etc. So we just + // assume it is promotable if smaller than 32 bits. This is wrong since, for + // example, Char32 is promotable. Improve CIR or add an AST query here. + if (auto intTy = dyn_cast(T)) { + return cast(T).getWidth() < 32; + } + + // Enumerated types are promotable to their compatible integer types + // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). + // TODO(cir): CIR doesn't know if a integer originated from an enum. Improve + // CIR or add an AST query here. + if (::cir::MissingFeatures::typeGetAsEnumType()) { + llvm_unreachable("NYI"); + } + + return false; +} + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h index 3745d146a5e7..a803fb992e74 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h @@ -16,6 +16,8 @@ #include "mlir/IR/MLIRContext.h" #include "mlir/IR/Types.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "clang/AST/ASTContext.h" #include "clang/AST/Type.h" #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" @@ -30,6 +32,8 @@ class CIRLowerContext : public llvm::RefCountedBase { private: mutable SmallVector Types; + clang::TypeInfo getTypeInfoImpl(const Type T) const; + const clang::TargetInfo *Target = nullptr; const clang::TargetInfo *AuxTarget = nullptr; @@ -47,7 +51,7 @@ class CIRLowerContext : public llvm::RefCountedBase { Type CharTy; public: - CIRLowerContext(MLIRContext *MLIRCtx, clang::LangOptions &LOpts); + CIRLowerContext(ModuleOp module, clang::LangOptions &LOpts); CIRLowerContext(const CIRLowerContext &) = delete; CIRLowerContext &operator=(const CIRLowerContext &) = delete; ~CIRLowerContext(); @@ -66,6 +70,38 @@ class CIRLowerContext : public llvm::RefCountedBase { public: MLIRContext *getMLIRContext() const { return MLIRCtx; } + + //===--------------------------------------------------------------------===// + // Type Sizing and Analysis + //===--------------------------------------------------------------------===// + + /// Get the size and alignment of the specified complete type in bits. + clang::TypeInfo getTypeInfo(Type T) const; + + /// Return the size of the specified (complete) type \p T, in bits. + uint64_t getTypeSize(Type T) const { return getTypeInfo(T).Width; } + + /// Return the size of the character type, in bits. + // FIXME(cir): Refactor types and properly implement DataLayout interface in + // CIR so that this can be queried from the module. + uint64_t getCharWidth() const { return 8; } + + /// Convert a size in bits to a size in characters. + clang::CharUnits toCharUnitsFromBits(int64_t BitSize) const; + + clang::CharUnits getTypeSizeInChars(Type T) const { + // FIXME(cir): We should query MLIR's Datalayout here instead. + return getTypeInfoInChars(T).Width; + } + + /// Return the ABI-specified alignment of a (complete) type \p T, in + /// bits. + unsigned getTypeAlign(Type T) const { return getTypeInfo(T).Align; } + + clang::TypeInfoChars getTypeInfoInChars(Type T) const; + + /// More type predicates useful for type checking/promotion + bool isPromotableIntegerType(Type T) const; // C99 6.3.1.1p2 }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index 6481874bf3ab..dd09122b94d9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -57,9 +57,15 @@ class CIRToCIRArgMapping { unsigned totalIRArgs() const { return TotalIRArgs; } + bool hasPaddingArg(unsigned ArgNo) const { + assert(ArgNo < ArgInfo.size()); + return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; + } + void construct(const CIRLowerContext &context, const LowerFunctionInfo &FI, bool onlyRequiredArgs = false) { unsigned IRArgNo = 0; + bool SwapThisWithSRet = false; const ::cir::ABIArgInfo &RetAI = FI.getReturnInfo(); if (RetAI.getKind() == ::cir::ABIArgInfo::Indirect) { @@ -69,9 +75,44 @@ class CIRToCIRArgMapping { unsigned ArgNo = 0; unsigned NumArgs = onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); - for (LowerFunctionInfo::const_arg_iterator _ = FI.arg_begin(); - ArgNo < NumArgs; ++_, ++ArgNo) { - llvm_unreachable("NYI"); + for (LowerFunctionInfo::const_arg_iterator I = FI.arg_begin(); + ArgNo < NumArgs; ++I, ++ArgNo) { + assert(I != FI.arg_end()); + // Type ArgType = I->type; + const ::cir::ABIArgInfo &AI = I->info; + // Collect data about IR arguments corresponding to Clang argument ArgNo. + auto &IRArgs = ArgInfo[ArgNo]; + + if (::cir::MissingFeatures::argumentPadding()) { + llvm_unreachable("NYI"); + } + + switch (AI.getKind()) { + case ::cir::ABIArgInfo::Extend: + case ::cir::ABIArgInfo::Direct: { + // FIXME(cir): handle sseregparm someday... + assert(AI.getCoerceToType() && "Missing coerced type!!"); + StructType STy = dyn_cast(AI.getCoerceToType()); + if (AI.isDirect() && AI.getCanBeFlattened() && STy) { + llvm_unreachable("NYI"); + } else { + IRArgs.NumberOfArgs = 1; + } + break; + } + default: + llvm_unreachable("Missing ABIArgInfo::Kind"); + } + + if (IRArgs.NumberOfArgs > 0) { + IRArgs.FirstArgIndex = IRArgNo; + IRArgNo += IRArgs.NumberOfArgs; + } + + // Skip over the sret parameter when it comes second. We already handled + // it above. + if (IRArgNo == 1 && SwapThisWithSRet) + IRArgNo++; } assert(ArgNo == ArgInfo.size()); @@ -81,6 +122,14 @@ class CIRToCIRArgMapping { TotalIRArgs = IRArgNo; } + + /// Returns index of first IR argument corresponding to ArgNo, and their + /// quantity. + std::pair getIRArgs(unsigned ArgNo) const { + assert(ArgNo < ArgInfo.size()); + return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, + ArgInfo[ArgNo].NumberOfArgs); + } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index 85890532e4f9..553c639cf28e 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -3,6 +3,7 @@ #include "LowerFunctionInfo.h" #include "LowerModule.h" #include "LowerTypes.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/FnInfoOpts.h" #include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" @@ -135,6 +136,20 @@ void LowerModule::constructAttributeList(StringRef Name, // ABI-specific information. Maybe we should include it here. switch (RetAI.getKind()) { + case ABIArgInfo::Extend: + if (RetAI.isSignExt()) + newFn.setResultAttr(0, CIRDialect::getSExtAttrName(), + rewriter.getUnitAttr()); + else + // FIXME(cir): Add a proper abstraction to create attributes. + newFn.setResultAttr(0, CIRDialect::getZExtAttrName(), + rewriter.getUnitAttr()); + [[fallthrough]]; + case ABIArgInfo::Direct: + if (RetAI.getInReg()) + llvm_unreachable("InReg attribute is NYI"); + assert(!::cir::MissingFeatures::noFPClass()); + break; case ABIArgInfo::Ignore: break; default: @@ -169,7 +184,57 @@ void LowerModule::constructAttributeList(StringRef Name, for (LowerFunctionInfo::const_arg_iterator I = FI.arg_begin(), E = FI.arg_end(); I != E; ++I, ++ArgNo) { - llvm_unreachable("NYI"); + // Type ParamType = I->type; + const ABIArgInfo &AI = I->info; + SmallVector Attrs; + + // Add attribute for padding argument, if necessary. + if (IRFunctionArgs.hasPaddingArg(ArgNo)) { + llvm_unreachable("Padding argument is NYI"); + } + + // TODO(cir): Mark noundef arguments and return values. Although this + // attribute is not a part of the call conve, it uses it to determine if a + // value is noundef (e.g. if an argument is passed direct, indirectly, etc). + + // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we + // have the corresponding parameter variable. It doesn't make + // sense to do it here because parameters are so messed up. + switch (AI.getKind()) { + case ABIArgInfo::Extend: + if (AI.isSignExt()) + Attrs.push_back( + rewriter.getNamedAttr("cir.signext", rewriter.getUnitAttr())); + else + // FIXME(cir): Add a proper abstraction to create attributes. + Attrs.push_back( + rewriter.getNamedAttr("cir.zeroext", rewriter.getUnitAttr())); + [[fallthrough]]; + case ABIArgInfo::Direct: + if (ArgNo == 0 && ::cir::MissingFeatures::chainCall()) + llvm_unreachable("ChainCall is NYI"); + else if (AI.getInReg()) + llvm_unreachable("InReg attribute is NYI"); + // Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); + assert(!::cir::MissingFeatures::noFPClass()); + break; + default: + llvm_unreachable("Missing ABIArgInfo::Kind"); + } + + if (::cir::MissingFeatures::qualTypeIsReferenceType()) { + llvm_unreachable("Reference handling is NYI"); + } + + // TODO(cir): Missing some swift and nocapture stuff here. + assert(!::cir::MissingFeatures::extParamInfo()); + + if (!Attrs.empty()) { + unsigned FirstIRArg, NumIRArgs; + std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); + for (unsigned i = 0; i < NumIRArgs; i++) + newFn.setArgAttrs(FirstIRArg + i, Attrs); + } } assert(ArgNo == FI.arg_size()); } @@ -264,10 +329,11 @@ LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, // default now. ::cir::ABIArgInfo &retInfo = FI->getReturnInfo(); if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) - llvm_unreachable("NYI"); + retInfo.setCoerceToType(convertType(FI->getReturnType())); - for (auto &_ : FI->arguments()) - llvm_unreachable("NYI"); + for (auto &I : FI->arguments()) + if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) + I.info.setCoerceToType(convertType(I.type)); return *FI; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 91634c93c5f2..b1d5e24092cb 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -23,6 +23,7 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/MissingFeatures.h" +#include "clang/CIR/TypeEvaluationKind.h" #include "llvm/Support/ErrorHandling.h" using ABIArgInfo = ::cir::ABIArgInfo; @@ -30,6 +31,16 @@ using ABIArgInfo = ::cir::ABIArgInfo; namespace mlir { namespace cir { +namespace { + +// FIXME(cir): Create a custom rewriter class to abstract this away. +Value createBitcast(Value Src, Type Ty, LowerFunction &LF) { + return LF.getRewriter().create(Src.getLoc(), Ty, CastKind::bitcast, + Src); +} + +} // namespace + // FIXME(cir): Pass SrcFn and NewFn around instead of having then as attributes. LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, FuncOp srcFn, FuncOp newFn) @@ -76,21 +87,91 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, for (MutableArrayRef::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i, ++info_it, ++ArgNo) { - llvm_unreachable("NYI"); + const Value Arg = *i; + const ABIArgInfo &ArgI = info_it->info; + + bool isPromoted = ::cir::MissingFeatures::varDeclIsKNRPromoted(); + // We are converting from ABIArgInfo type to VarDecl type directly, unless + // the parameter is promoted. In this case we convert to + // CGFunctionInfo::ArgInfo type with subsequent argument demotion. + Type Ty = {}; + if (isPromoted) + llvm_unreachable("NYI"); + else + Ty = Arg.getType(); + assert(!::cir::MissingFeatures::evaluationKind()); + + unsigned FirstIRArg, NumIRArgs; + std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); + + switch (ArgI.getKind()) { + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: { + auto AI = Fn.getArgument(FirstIRArg); + Type LTy = Arg.getType(); + + // Prepare parameter attributes. So far, only attributes for pointer + // parameters are prepared. See + // http://llvm.org/docs/LangRef.html#paramattrs. + if (ArgI.getDirectOffset() == 0 && isa(LTy) && + isa(ArgI.getCoerceToType())) { + llvm_unreachable("NYI"); + } + + // Prepare the argument value. If we have the trivial case, handle it + // with no muss and fuss. + if (!isa(ArgI.getCoerceToType()) && + ArgI.getCoerceToType() == Ty && ArgI.getDirectOffset() == 0) { + assert(NumIRArgs == 1); + + // LLVM expects swifterror parameters to be used in very restricted + // ways. Copy the value into a less-restricted temporary. + Value V = AI; + if (::cir::MissingFeatures::extParamInfo()) { + llvm_unreachable("NYI"); + } + + // Ensure the argument is the correct type. + if (V.getType() != ArgI.getCoerceToType()) + llvm_unreachable("NYI"); + + if (isPromoted) + llvm_unreachable("NYI"); + + ArgVals.push_back(V); + + // NOTE(cir): Here we have a trivial case, which means we can just + // replace all uses of the original argument with the new one. + Value oldArg = SrcFn.getArgument(ArgNo); + Value newArg = Fn.getArgument(FirstIRArg); + rewriter.replaceAllUsesWith(oldArg, newArg); + + break; + } + + llvm_unreachable("NYI"); + } + default: + llvm_unreachable("Unhandled ABIArgInfo::Kind"); + } } if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { llvm_unreachable("NYI"); } else { - // FIXME(cir): In the original codegen, EmitParamDecl is called here. It is - // likely that said function considers ABI details during emission, so we - // migth have to add a counter part here. Currently, it is not needed. + // FIXME(cir): In the original codegen, EmitParamDecl is called here. It + // is likely that said function considers ABI details during emission, so + // we migth have to add a counter part here. Currently, it is not needed. } return success(); } LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { + // NOTE(cir): no-return, naked, and no result functions should be handled in + // CIRGen. + + Type RetTy = FI.getReturnType(); const ABIArgInfo &RetAI = FI.getReturnInfo(); switch (RetAI.getKind()) { @@ -98,6 +179,35 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { case ABIArgInfo::Ignore: break; + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: + // FIXME(cir): Should we call ConvertType(RetTy) here? + if (RetAI.getCoerceToType() == RetTy && RetAI.getDirectOffset() == 0) { + // The internal return value temp always will have pointer-to-return-type + // type, just do a load. + + // If there is a dominating store to ReturnValue, we can elide + // the load, zap the store, and usually zap the alloca. + // NOTE(cir): This seems like a premature optimization case, so I'm + // skipping it. + if (::cir::MissingFeatures::returnValueDominatingStoreOptmiization()) { + llvm_unreachable("NYI"); + } + // Otherwise, we have to do a simple load. + else { + // NOTE(cir): Nothing to do here. The codegen already emitted this load + // for us and there is no casting necessary to conform to the ABI. The + // zero-extension is enforced by the return value's attribute. Just + // early exit. + return success(); + } + } else { + llvm_unreachable("NYI"); + } + + // TODO(cir): Should AutoreleaseResult be handled here? + break; + default: llvm_unreachable("Unhandled ABIArgInfo::Kind"); } @@ -154,17 +264,17 @@ LogicalResult LowerFunction::rewriteCallOp(CallOp op, ReturnValueSlot retValSlot) { // TODO(cir): Check if BlockCall, CXXMemberCall, CUDAKernelCall, or - // CXXOperatorMember require special handling here. These should be handled in - // CIRGen, unless there is call conv or ABI-specific stuff to be handled, them - // we should do it here. + // CXXOperatorMember require special handling here. These should be handled + // in CIRGen, unless there is call conv or ABI-specific stuff to be handled, + // them we should do it here. // TODO(cir): Also check if Builtin and CXXPeseudoDtor need special handling // here. These should be handled in CIRGen, unless there is call conv or // ABI-specific stuff to be handled, them we should do it here. // NOTE(cir): There is no direct way to fetch the function type from the - // CallOp, so we fetch it from the source function. This assumes the function - // definition has not yet been lowered. + // CallOp, so we fetch it from the source function. This assumes the + // function definition has not yet been lowered. assert(SrcFn && "No source function"); auto fnType = SrcFn.getFunctionType(); @@ -175,6 +285,8 @@ LogicalResult LowerFunction::rewriteCallOp(CallOp op, if (Ret) rewriter.replaceAllUsesWith(op.getResult(), Ret); + // Erase original ABI-agnostic call. + rewriter.eraseOp(op); return success(); } @@ -226,13 +338,13 @@ Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, assert(!::cir::MissingFeatures::CUDA()); - // TODO(cir): LLVM IR has the concept of "CallBase", which is a base class for - // all types of calls. Perhaps we should have a CIR interface to mimic this - // class. + // TODO(cir): LLVM IR has the concept of "CallBase", which is a base class + // for all types of calls. Perhaps we should have a CIR interface to mimic + // this class. CallOp CallOrInvoke = {}; - Value CallResult = {}; - rewriteCallOp(FnInfo, origCallee, callOp, retValSlot, Args, CallOrInvoke, - /*isMustTail=*/false, callOp.getLoc()); + Value CallResult = + rewriteCallOp(FnInfo, origCallee, callOp, retValSlot, Args, CallOrInvoke, + /*isMustTail=*/false, callOp.getLoc()); // NOTE(cir): Skipping debug stuff here. @@ -287,7 +399,61 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, LowerFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); for (auto I = CallArgs.begin(), E = CallArgs.end(); I != E; ++I, ++info_it, ++ArgNo) { - llvm_unreachable("NYI"); + const ABIArgInfo &ArgInfo = info_it->info; + + if (IRFunctionArgs.hasPaddingArg(ArgNo)) + llvm_unreachable("NYI"); + + unsigned FirstIRArg, NumIRArgs; + std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); + + switch (ArgInfo.getKind()) { + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: { + // NOTE(cir): While booleans are lowered directly as `i1`s in the + // original codegen, in CIR they require a trivial bitcast. This is + // handled here. + if (isa(info_it->type)) { + IRCallArgs[FirstIRArg] = + createBitcast(*I, ArgInfo.getCoerceToType(), *this); + break; + } + + if (!isa(ArgInfo.getCoerceToType()) && + ArgInfo.getCoerceToType() == info_it->type && + ArgInfo.getDirectOffset() == 0) { + assert(NumIRArgs == 1); + Value V; + if (!isa(I->getType())) { + V = *I; + } else { + llvm_unreachable("NYI"); + } + + if (::cir::MissingFeatures::extParamInfo()) { + llvm_unreachable("NYI"); + } + + if (ArgInfo.getCoerceToType() != V.getType() && + isa(V.getType())) + llvm_unreachable("NYI"); + + if (FirstIRArg < IRFuncTy.getNumInputs() && + V.getType() != IRFuncTy.getInput(FirstIRArg)) + llvm_unreachable("NYI"); + + if (::cir::MissingFeatures::undef()) + llvm_unreachable("NYI"); + IRCallArgs[FirstIRArg] = V; + break; + } + + llvm_unreachable("NYI"); + } + default: + llvm::outs() << "Missing ABIArgInfo::Kind: " << ArgInfo.getKind() << "\n"; + llvm_unreachable("NYI"); + } } // 2. Prepare the function pointer. @@ -327,7 +493,8 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, assert(!::cir::MissingFeatures::vectorType()); - // NOTE(cir): Skipping some ObjC, tail-call, debug, and attribute stuff here. + // NOTE(cir): Skipping some ObjC, tail-call, debug, and attribute stuff + // here. // 4. Finish the call. @@ -339,24 +506,52 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // NOTE(cir): CIRGen already handled the emission of the return value. We // need only to handle the ABI-specific to ABI-agnostic cast here. switch (RetAI.getKind()) { + case ::cir::ABIArgInfo::Ignore: // If we are ignoring an argument that had a result, make sure to // construct the appropriate return value for our caller. return getUndefRValue(RetTy); + + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: { + // NOTE(cir): While booleans are lowered directly as `i1`s in the + // original codegen, in CIR they require a trivial bitcast. This is + // handled here. + assert(!isa(RetTy)); + + Type RetIRTy = RetTy; + if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { + switch (getEvaluationKind(RetTy)) { + case ::cir::TypeEvaluationKind::TEK_Scalar: { + // If the argument doesn't match, perform a bitcast to coerce it. + // This can happen due to trivial type mismatches. NOTE(cir): + // Perhaps this section should handle CIR's boolean case. + Value V = newCallOp.getResult(); + if (V.getType() != RetIRTy) + llvm_unreachable("NYI"); + return V; + } + default: + llvm_unreachable("NYI"); + } + } + + llvm_unreachable("NYI"); + } default: llvm::errs() << "Unhandled ABIArgInfo kind: " << RetAI.getKind() << "\n"; llvm_unreachable("NYI"); } }(); - // NOTE(cir): Skipping Emissions, lifetime markers, and dtors here that should - // be handled in CIRGen. + // NOTE(cir): Skipping Emissions, lifetime markers, and dtors here that + // should be handled in CIRGen. return Ret; } -// NOTE(cir): This method has partial parity to CodeGenFunction's GetUndefRValue -// defined in CGExpr.cpp. +// NOTE(cir): This method has partial parity to CodeGenFunction's +// GetUndefRValue defined in CGExpr.cpp. Value LowerFunction::getUndefRValue(Type Ty) { if (isa(Ty)) return nullptr; @@ -365,5 +560,14 @@ Value LowerFunction::getUndefRValue(Type Ty) { llvm_unreachable("NYI"); } +::cir::TypeEvaluationKind LowerFunction::getEvaluationKind(Type type) { + // FIXME(cir): Implement type classes for CIR types. + if (isa(type)) + return ::cir::TypeEvaluationKind::TEK_Aggregate; + if (isa(type)) + return ::cir::TypeEvaluationKind::TEK_Scalar; + llvm_unreachable("NYI"); +} + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h index 40cdd39463e6..fe54b98939ec 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h @@ -21,6 +21,7 @@ #include "mlir/Support/LogicalResult.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/TypeEvaluationKind.h" namespace mlir { namespace cir { @@ -53,6 +54,8 @@ class LowerFunction { LowerModule &LM; // Per-module state. + PatternRewriter &getRewriter() const { return rewriter; } + const clang::TargetInfo &getTarget() const { return Target; } // Build ABI/Target-specific function prologue. @@ -80,6 +83,9 @@ class LowerFunction { /// Get an appropriate 'undef' value for the given type. Value getUndefRValue(Type Ty); + + /// Return the TypeEvaluationKind of Type \c T. + static ::cir::TypeEvaluationKind getEvaluationKind(Type T); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index d6a0b3488b2a..894a8581c9d2 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -205,8 +205,6 @@ LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, FuncOp funcOp) { .failed()) return failure(); - // Erase original ABI-agnostic call. - rewriter.eraseOp(callOp); return success(); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index 3d8ca6cfe61f..ecee0b23ce75 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -14,6 +14,7 @@ #include "LowerTypes.h" #include "CIRToCIRArgMapping.h" #include "LowerModule.h" +#include "mlir/IR/Types.h" #include "mlir/Support/LLVM.h" #include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/MissingFeatures.h" @@ -21,6 +22,8 @@ using namespace ::mlir::cir; +using ABIArgInfo = ::cir::ABIArgInfo; + unsigned LowerTypes::clangCallConvToLLVMCallConv(clang::CallingConv CC) { switch (CC) { case clang::CC_C: @@ -42,6 +45,10 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { mlir::Type resultType = {}; const ::cir::ABIArgInfo &retAI = FI.getReturnInfo(); switch (retAI.getKind()) { + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: + resultType = retAI.getCoerceToType(); + break; case ::cir::ABIArgInfo::Ignore: resultType = VoidType::get(getMLIRContext()); break; @@ -63,8 +70,52 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { LowerFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie = it + FI.getNumRequiredArgs(); for (; it != ie; ++it, ++ArgNo) { - llvm_unreachable("NYI"); + const ABIArgInfo &ArgInfo = it->info; + + assert(!::cir::MissingFeatures::argumentPadding()); + + unsigned FirstIRArg, NumIRArgs; + std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); + + switch (ArgInfo.getKind()) { + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: { + // Fast-isel and the optimizer generally like scalar values better than + // FCAs, so we flatten them if this is safe to do for this argument. + Type argType = ArgInfo.getCoerceToType(); + StructType st = dyn_cast(argType); + if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { + assert(NumIRArgs == st.getNumElements()); + for (unsigned i = 0, e = st.getNumElements(); i != e; ++i) + ArgTypes[FirstIRArg + i] = st.getMembers()[i]; + } else { + assert(NumIRArgs == 1); + ArgTypes[FirstIRArg] = argType; + } + break; + } + default: + llvm_unreachable("Missing ABIArgInfo::Kind"); + } } return FuncType::get(getMLIRContext(), ArgTypes, resultType, FI.isVariadic()); } + +/// Convert a CIR type to its ABI-specific default form. +mlir::Type LowerTypes::convertType(Type T) { + /// NOTE(cir): It the original codegen this method is used to get the default + /// LLVM IR representation for a given AST type. When a the ABI-specific + /// function info sets a nullptr for a return or argument type, the default + /// type given by this method is used. In CIR's case, its types are already + /// supposed to be ABI-specific, so this method is not really useful here. I'm + /// keeping it here for parity's sake. + + // Certain CIR types are already ABI-specific, so we just return them. + if (isa(T)) { + return T; + } + + llvm::outs() << "Missing default ABI-specific type for " << T << "\n"; + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h index 44f0d16b1bd8..9ab1cdf335d5 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h @@ -90,6 +90,9 @@ class LowerTypes { /// Return the ABI-specific function type for a CIR function type. FuncType getFunctionType(const LowerFunctionInfo &FI); + + /// Convert a CIR type to its ABI-specific default form. + Type convertType(Type T); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index d921a39a9e42..b05a46070638 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -11,10 +11,34 @@ #include using X86AVXABILevel = ::cir::X86AVXABILevel; +using ABIArgInfo = ::cir::ABIArgInfo; namespace mlir { namespace cir { +namespace { + +/// Return true if the specified [start,end) bit range is known to either be +/// off the end of the specified type or being in alignment padding. The user +/// type specified is known to be at most 128 bits in size, and have passed +/// through X86_64ABIInfo::classify with a successful classification that put +/// one of the two halves in the INTEGER class. +/// +/// It is conservatively correct to return false. +static bool BitsContainNoUserData(Type Ty, unsigned StartBit, unsigned EndBit, + CIRLowerContext &Context) { + // If the bytes being queried are off the end of the type, there is no user + // data hiding here. This handles analysis of builtins, vectors and other + // types that don't contain interesting padding. + unsigned TySize = (unsigned)Context.getTypeSize(Ty); + if (TySize <= StartBit) + return true; + + llvm_unreachable("NYI"); +} + +} // namespace + class X86_64ABIInfo : public ABIInfo { using Class = ::cir::X86ArgClass; @@ -47,11 +71,18 @@ class X86_64ABIInfo : public ABIInfo { void classify(Type T, uint64_t OffsetBase, Class &Lo, Class &Hi, bool isNamedArg, bool IsRegCall = false) const; + Type GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, Type SourceTy, + unsigned SourceOffset) const; + public: X86_64ABIInfo(LowerTypes &CGT, X86AVXABILevel AVXLevel) : ABIInfo(CGT) {} ::cir::ABIArgInfo classifyReturnType(Type RetTy) const; + ABIArgInfo classifyArgumentType(Type Ty, unsigned freeIntRegs, + unsigned &neededInt, unsigned &neededSSE, + bool isNamedArg, bool IsRegCall) const; + void computeInfo(LowerFunctionInfo &FI) const override; }; @@ -83,6 +114,17 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, if (/*isBuitinType=*/true) { if (isa(Ty)) { Current = Class::NoClass; + } else if (isa(Ty)) { + + // FIXME(cir): Clang's BuiltinType::Kind allow comparisons (GT, LT, etc). + // We should implement this in CIR to simplify the conditions below. BTW, + // I'm not sure if the comparisons below are truly equivalent to the ones + // in Clang. + if (isa(Ty)) { + Current = Class::Integer; + } + return; + } else { llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; llvm_unreachable("NYI"); @@ -96,6 +138,70 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, llvm_unreachable("NYI"); } +/// The ABI specifies that a value should be passed in an 8-byte GPR. This +/// means that we either have a scalar or we are talking about the high or low +/// part of an up-to-16-byte struct. This routine picks the best CIR type +/// to represent this, which may be i64 or may be anything else that the +/// backend will pass in a GPR that works better (e.g. i8, %foo*, etc). +/// +/// PrefType is an CIR type that corresponds to (part of) the IR type for +/// the source type. IROffset is an offset in bytes into the CIR type that +/// the 8-byte value references. PrefType may be null. +/// +/// SourceTy is the source-level type for the entire argument. SourceOffset +/// is an offset into this that we're processing (which is always either 0 or +/// 8). +/// +Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, + Type SourceTy, + unsigned SourceOffset) const { + // If we're dealing with an un-offset CIR type, then it means that we're + // returning an 8-byte unit starting with it. See if we can safely use it. + if (IROffset == 0) { + // Pointers and int64's always fill the 8-byte unit. + assert(!isa(DestTy) && "Ptrs are NYI"); + + // If we have a 1/2/4-byte integer, we can use it only if the rest of the + // goodness in the source type is just tail padding. This is allowed to + // kick in for struct {double,int} on the int, but not on + // struct{double,int,int} because we wouldn't return the second int. We + // have to do this analysis on the source type because we can't depend on + // unions being lowered a specific way etc. + if (auto intTy = dyn_cast(DestTy)) { + if (intTy.getWidth() == 8 || intTy.getWidth() == 16 || + intTy.getWidth() == 32) { + unsigned BitWidth = intTy.getWidth(); + if (BitsContainNoUserData(SourceTy, SourceOffset * 8 + BitWidth, + SourceOffset * 8 + 64, getContext())) + return DestTy; + } + } + } + + if (auto RT = dyn_cast(DestTy)) { + llvm_unreachable("NYI"); + } + + // Okay, we don't have any better idea of what to pass, so we pass this in + // an integer register that isn't too big to fit the rest of the struct. + unsigned TySizeInBytes = + (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); + + assert(TySizeInBytes != SourceOffset && "Empty field?"); + + // It is always safe to classify this as an integer type up to i64 that + // isn't larger than the structure. + // FIXME(cir): Perhaps we should have the concept of singless integers in + // CIR, mostly because coerced types should carry sign. On the other hand, + // this might not make a difference in practice. For now, we just preserve the + // sign as is to avoid unecessary bitcasts. + bool isSigned = false; + if (auto intTy = dyn_cast(SourceTy)) + isSigned = intTy.isSigned(); + return IntType::get(LT.getMLIRContext(), + std::min(TySizeInBytes - SourceOffset, 8U) * 8, isSigned); +} + ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the // classification algorithm. @@ -108,16 +214,110 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { assert((Hi != Class::SSEUp || Lo == Class::SSE) && "Invalid SSEUp classification."); + Type resType = {}; switch (Lo) { case Class::NoClass: if (Hi == Class::NoClass) - return ::cir::ABIArgInfo::getIgnore(); + return ABIArgInfo::getIgnore(); break; + + case Class::Integer: + resType = GetINTEGERTypeAtOffset(RetTy, 0, RetTy, 0); + + // If we have a sign or zero extended integer, make sure to return Extend + // so that the parameter gets the right LLVM IR attributes. + if (Hi == Class::NoClass && isa(resType)) { + // NOTE(cir): We skip enum types handling here since CIR represents + // enums directly as their unerlying integer types. NOTE(cir): For some + // reason, Clang does not set the coerce type here and delays it to + // arrangeLLVMFunctionInfo. We do the same to keep parity. + if (isa(RetTy) && isPromotableIntegerTypeForABI(RetTy)) + return ABIArgInfo::getExtend(RetTy); + } + break; + default: llvm_unreachable("NYI"); } - llvm_unreachable("NYI"); + Type HighPart = {}; + switch (Hi) { + + case Class::NoClass: + break; + + default: + llvm_unreachable("NYI"); + } + + // If a high part was specified, merge it together with the low part. It is + // known to pass in the high eightbyte of the result. We do this by forming + // a first class struct aggregate with the high and low part: {low, high} + if (HighPart) + llvm_unreachable("NYI"); + + return ABIArgInfo::getDirect(RetTy); +} + +ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, + unsigned &neededInt, + unsigned &neededSSE, + bool isNamedArg, + bool IsRegCall = false) const { + Ty = useFirstFieldIfTransparentUnion(Ty); + + X86_64ABIInfo::Class Lo, Hi; + classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall); + + // Check some invariants. + // FIXME: Enforce these by construction. + assert((Hi != Class::Memory || Lo == Class::Memory) && + "Invalid memory classification."); + assert((Hi != Class::SSEUp || Lo == Class::SSE) && + "Invalid SSEUp classification."); + + neededInt = 0; + neededSSE = 0; + Type ResType = {}; + switch (Lo) { + // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next + // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 + // and %r9 is used. + case Class::Integer: + ++neededInt; + + // Pick an 8-byte type based on the preferred type. + ResType = GetINTEGERTypeAtOffset(Ty, 0, Ty, 0); + + // If we have a sign or zero extended integer, make sure to return Extend + // so that the parameter gets the right LLVM IR attributes. + if (Hi == Class::NoClass && isa(ResType)) { + // NOTE(cir): We skip enum types handling here since CIR represents + // enums directly as their unerlying integer types. NOTE(cir): For some + // reason, Clang does not set the coerce type here and delays it to + // arrangeLLVMFunctionInfo. We do the same to keep parity. + if (isa(Ty) && isPromotableIntegerTypeForABI(Ty)) + return ABIArgInfo::getExtend(Ty); + } + + break; + + default: + llvm_unreachable("NYI"); + } + + Type HighPart = {}; + switch (Hi) { + case Class::NoClass: + break; + default: + llvm_unreachable("NYI"); + } + + if (HighPart) + llvm_unreachable("NYI"); + + return ABIArgInfo::getDirect(ResType); } void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { @@ -132,7 +332,9 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; // Keep track of the number of assigned registers. - unsigned NeededSSE = 0, MaxVectorWidth = 0; + unsigned FreeIntRegs = IsRegCall ? 11 : 6; + unsigned FreeSSERegs = IsRegCall ? 16 : 8; + unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0; if (!::mlir::cir::classifyReturnType(getCXXABI(), FI, *this)) { if (IsRegCall || ::cir::MissingFeatures::regCall()) { @@ -152,12 +354,32 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { if (::cir::MissingFeatures::chainCall()) llvm_unreachable("NYI"); + unsigned NumRequiredArgs = FI.getNumRequiredArgs(); // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers // get assigned (in left-to-right order) for passing as follows... unsigned ArgNo = 0; for (LowerFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); it != ie; ++it, ++ArgNo) { - llvm_unreachable("NYI"); + bool IsNamedArg = ArgNo < NumRequiredArgs; + + if (IsRegCall && ::cir::MissingFeatures::regCall()) + llvm_unreachable("NYI"); + else + it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, + NeededSSE, IsNamedArg); + + // AMD64-ABI 3.2.3p3: If there are no registers available for any + // eightbyte of an argument, the whole argument is passed on the + // stack. If registers have already been assigned for some + // eightbytes of such an argument, the assignments get reverted. + if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { + FreeIntRegs -= NeededInt; + FreeSSERegs -= NeededSSE; + if (::cir::MissingFeatures::vectorType()) + llvm_unreachable("NYI"); + } else { + llvm_unreachable("Indirect results are NYI"); + } } } diff --git a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp index 6bb4d71d4877..d431f9ed1db9 100644 --- a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp @@ -6,3 +6,31 @@ void Void(void) { // CHECK: cir.call @_Z4Voidv() : () -> () Void(); } + +// Test call conv lowering for trivial zeroext cases. + +// CHECK: cir.func @_Z5UCharh(%arg0: !u8i {cir.zeroext} loc({{.+}})) -> (!u8i {cir.zeroext}) +unsigned char UChar(unsigned char c) { + // CHECK: cir.call @_Z5UCharh(%2) : (!u8i) -> !u8i + return UChar(c); +} +// CHECK: cir.func @_Z6UShortt(%arg0: !u16i {cir.zeroext} loc({{.+}})) -> (!u16i {cir.zeroext}) +unsigned short UShort(unsigned short s) { + // CHECK: cir.call @_Z6UShortt(%2) : (!u16i) -> !u16i + return UShort(s); +} +// CHECK: cir.func @_Z4UIntj(%arg0: !u32i loc({{.+}})) -> !u32i +unsigned int UInt(unsigned int i) { + // CHECK: cir.call @_Z4UIntj(%2) : (!u32i) -> !u32i + return UInt(i); +} +// CHECK: cir.func @_Z5ULongm(%arg0: !u64i loc({{.+}})) -> !u64i +unsigned long ULong(unsigned long l) { + // CHECK: cir.call @_Z5ULongm(%2) : (!u64i) -> !u64i + return ULong(l); +} +// CHECK: cir.func @_Z9ULongLongy(%arg0: !u64i loc({{.+}})) -> !u64i +unsigned long long ULongLong(unsigned long long l) { + // CHECK: cir.call @_Z9ULongLongy(%2) : (!u64i) -> !u64i + return ULongLong(l); +} From 0426bba83e023cdb2cf01e5dcc03451d4f349f15 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Mon, 1 Jul 2024 12:20:29 -0700 Subject: [PATCH 1657/2301] [CIR] Vector constants (#700) Implement vector constants in ClangIR. Resolves issue #498 - Add a `cir.const_vec`, simlar to `cir.const_array` and `cir.const_struct` Create a new kind of attribute, `cir::ConstVectorAttr` in the code or `#cir.const_vector` in the assembly, which represents a compile-time value of a `cir::VectorType`. The values for the elements within the vector are stored as attributes within an `mlir::ArrayAttr`. When doing CodeGen for a prvalue of vector type, try to represent it as `cir.const #cir.const_vector` first. If that fails, most likely because some of the elements are not compile-time values, fall back to the existing code that uses a `cir.vec.create` operation. When lowering directly to LLVM IR, lower `cir.const #cir.const_vector` as `llvm.mlir.constant(dense<[...]> : _type_) : _type_`. When lowering through other MLIR dialects, lower `cir.const #cir.const_vector` as `arith.constant dense<[...]> : _type_`. No new tests were added, but the expected results of the existing tests that use vector constants were updated. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 31 +++++++ clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 7 +- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 44 +++++++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 81 +++++++++++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 36 +++++++++ .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 57 +++++++++---- clang/test/CIR/CodeGen/vectype-ext.cpp | 19 ++--- clang/test/CIR/CodeGen/vectype.cpp | 10 +-- .../test/CIR/Lowering/ThroughMLIR/vectype.cpp | 16 +--- clang/test/CIR/Lowering/vectype.cpp | 28 +------ 10 files changed, 254 insertions(+), 75 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 0d270a4b59ce..b5cff38517e2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -146,6 +146,37 @@ def ConstArrayAttr : CIR_Attr<"ConstArray", "const_array", [TypedAttrInterface]> }]; } +//===----------------------------------------------------------------------===// +// ConstVectorAttr +//===----------------------------------------------------------------------===// + +def ConstVectorAttr : CIR_Attr<"ConstVector", "const_vector", + [TypedAttrInterface]> { + let summary = "A constant vector from ArrayAttr"; + let description = [{ + A CIR vector attribute is an array of literals of the specified attribute + types. + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type, + "ArrayAttr":$elts); + + // Define a custom builder for the type; that removes the need to pass in an + // MLIRContext instance, as it can be inferred from the `type`. + let builders = [ + AttrBuilderWithInferredContext<(ins "mlir::cir::VectorType":$type, + "ArrayAttr":$elts), [{ + return $_get(type.getContext(), type, elts); + }]> + ]; + + // Printing and parsing available in CIRDialect.cpp + let hasCustomAssemblyFormat = 1; + + // Enable verifier. + let genVerifyDecl = 1; +} + //===----------------------------------------------------------------------===// // ConstStructAttr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 1c43655ced6b..52b36c1fa7ac 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -66,15 +66,16 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, if (getLangOpts().OpenMP && openMPLocalAddr.isValid()) { llvm_unreachable("NYI"); } else if (Ty->isConstantSizeType()) { - // If this value is an array or struct with a statically determinable - // constant initializer, there are optimizations we can do. + // If this value is an array, struct, or vector with a statically + // determinable constant initializer, there are optimizations we can do. // // TODO: We should constant-evaluate the initializer of any variable, // as long as it is initialized by a constant expression. Currently, // isConstantInitializer produces wrong answers for structs with // reference or bitfield members, and a few other cases, and checking // for POD-ness protects us from some of these. - if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) && + if (D.getInit() && + (Ty->isArrayType() || Ty->isRecordType() || Ty->isVectorType()) && (D.isConstexpr() || ((Ty.isPODType(getContext()) || getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) && diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 993dbf52d1ee..4a5a9b698695 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1039,6 +1039,29 @@ class ConstExprEmitter return ConstStructBuilder::BuildStruct(Emitter, ILE, T); } + mlir::Attribute EmitVectorInitialization(InitListExpr *ILE, QualType T) { + mlir::cir::VectorType VecTy = + mlir::cast(CGM.getTypes().ConvertType(T)); + unsigned NumElements = VecTy.getSize(); + unsigned NumInits = ILE->getNumInits(); + assert(NumElements >= NumInits && "Too many initializers for a vector"); + QualType EltTy = T->castAs()->getElementType(); + SmallVector Elts; + // Process the explicit initializers + for (unsigned i = 0; i < NumInits; ++i) { + auto Value = Emitter.tryEmitPrivateForMemory(ILE->getInit(i), EltTy); + if (!Value) + return {}; + Elts.push_back(std::move(Value)); + } + // Zero-fill the rest of the vector + for (unsigned i = NumInits; i < NumElements; ++i) { + Elts.push_back(CGM.getBuilder().getZeroInitAttr(VecTy.getEltType())); + } + return mlir::cir::ConstVectorAttr::get( + VecTy, mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Elts)); + } + mlir::Attribute VisitImplicitValueInitExpr(ImplicitValueInitExpr *E, QualType T) { return CGM.getBuilder().getZeroInitAttr(CGM.getCIRType(T)); @@ -1054,6 +1077,9 @@ class ConstExprEmitter if (ILE->getType()->isRecordType()) return EmitRecordInitialization(ILE, T); + if (ILE->getType()->isVectorType()) + return EmitVectorInitialization(ILE, T); + return nullptr; } @@ -1772,6 +1798,23 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, return buildArrayConstant(CGM, Desired, CommonElementType, NumElements, Elts, typedFiller); } + case APValue::Vector: { + const QualType ElementType = + DestType->castAs()->getElementType(); + unsigned NumElements = Value.getVectorLength(); + SmallVector Elts; + Elts.reserve(NumElements); + for (int i = 0; i < NumElements; ++i) { + auto C = tryEmitPrivateForMemory(Value.getVectorElt(i), ElementType); + if (!C) + return {}; + Elts.push_back(C); + } + auto Desired = + mlir::cast(CGM.getTypes().ConvertType(DestType)); + return mlir::cir::ConstVectorAttr::get( + Desired, mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Elts)); + } case APValue::MemberPointer: { assert(!MissingFeatures::cxxABI()); @@ -1795,7 +1838,6 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, case APValue::FixedPoint: case APValue::ComplexInt: case APValue::ComplexFloat: - case APValue::Vector: case APValue::AddrLabelDiff: assert(0 && "not implemented"); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 03f9ca7c0926..9a827e572fe1 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -347,6 +347,7 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, if (mlir::isa(attrType) || mlir::isa(attrType) || mlir::isa(attrType) || + mlir::isa(attrType) || mlir::isa(attrType) || mlir::isa(attrType)) return success(); @@ -2825,6 +2826,86 @@ void ConstArrayAttr::print(::mlir::AsmPrinter &printer) const { printer << ">"; } +LogicalResult mlir::cir::ConstVectorAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::Type type, mlir::ArrayAttr arrayAttr) { + + if (!mlir::isa(type)) { + return emitError() + << "type of cir::ConstVectorAttr is not a cir::VectorType: " << type; + } + auto vecType = mlir::cast(type); + + // Do the number of elements match? + if (vecType.getSize() != arrayAttr.size()) { + return emitError() + << "number of constant elements should match vector size"; + } + // Do the types of the elements match? + LogicalResult elementTypeCheck = success(); + arrayAttr.walkImmediateSubElements( + [&](Attribute element) { + if (elementTypeCheck.failed()) { + // An earlier element didn't match + return; + } + auto typedElement = mlir::dyn_cast(element); + if (!typedElement || typedElement.getType() != vecType.getEltType()) { + elementTypeCheck = failure(); + emitError() << "constant type should match vector element type"; + } + }, + [&](Type) {}); + return elementTypeCheck; +} + +::mlir::Attribute ConstVectorAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { + ::mlir::FailureOr<::mlir::Type> resultType; + ::mlir::FailureOr resultValue; + ::llvm::SMLoc loc = parser.getCurrentLocation(); + + // Parse literal '<' + if (parser.parseLess()) { + return {}; + } + + // Parse variable 'value' + resultValue = ::mlir::FieldParser::parse(parser); + if (failed(resultValue)) { + parser.emitError(parser.getCurrentLocation(), + "failed to parse ConstVectorAttr parameter 'value' as " + "an attribute"); + return {}; + } + + if (parser.parseOptionalColon().failed()) { + resultType = type; + } else { + resultType = ::mlir::FieldParser<::mlir::Type>::parse(parser); + if (failed(resultType)) { + parser.emitError(parser.getCurrentLocation(), + "failed to parse ConstVectorAttr parameter 'type' as " + "an MLIR type"); + return {}; + } + } + + // Parse literal '>' + if (parser.parseGreater()) { + return {}; + } + + return parser.getChecked( + loc, parser.getContext(), resultType.value(), resultValue.value()); +} + +void ConstVectorAttr::print(::mlir::AsmPrinter &printer) const { + printer << "<"; + printer.printStrippedAttrOrType(getElts()); + printer << ">"; +} + ::mlir::Attribute SignedOverflowBehaviorAttr::parse(::mlir::AsmParser &parser, ::mlir::Type type) { if (parser.parseLess()) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3bde0f97e0f8..7fa50766f737 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -315,6 +315,35 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, return result; } +// ConstVectorAttr visitor. +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + mlir::cir::ConstVectorAttr constVec, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto llvmTy = converter->convertType(constVec.getType()); + auto loc = parentOp->getLoc(); + SmallVector mlirValues; + for (auto elementAttr : constVec.getElts()) { + mlir::Attribute mlirAttr; + if (auto intAttr = mlir::dyn_cast(elementAttr)) { + mlirAttr = rewriter.getIntegerAttr( + converter->convertType(intAttr.getType()), intAttr.getValue()); + } else if (auto floatAttr = + mlir::dyn_cast(elementAttr)) { + mlirAttr = rewriter.getFloatAttr( + converter->convertType(floatAttr.getType()), floatAttr.getValue()); + } else { + llvm_unreachable( + "vector constant with an element that is neither an int nor a float"); + } + mlirValues.push_back(mlirAttr); + } + return rewriter.create( + loc, llvmTy, + mlir::DenseElementsAttr::get(mlir::cast(llvmTy), + mlirValues)); +} + // GlobalViewAttr visitor. mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::GlobalViewAttr globalAttr, @@ -385,6 +414,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, return lowerCirAttrAsValue(parentOp, constStruct, rewriter, converter); if (const auto constArr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, constArr, rewriter, converter); + if (const auto constVec = mlir::dyn_cast(attr)) + return lowerCirAttrAsValue(parentOp, constVec, rewriter, converter); if (const auto boolAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, boolAttr, rewriter, converter); if (const auto zeroAttr = mlir::dyn_cast(attr)) @@ -1185,6 +1216,11 @@ class CIRConstantLowering return op.emitError() << "unsupported lowering for struct constant type " << op.getType(); + } else if (const auto vecTy = + mlir::dyn_cast(op.getType())) { + rewriter.replaceOp(op, lowerCirAttrAsValue(op, op.getValue(), rewriter, + getTypeConverter())); + return mlir::success(); } else return op.emitError() << "unsupported constant type " << op.getType(); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 083bf0d69a12..8cf7f226570e 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -511,24 +511,49 @@ class CIRConstantOpLowering public: using OpConversionPattern::OpConversionPattern; +private: + // This code is in a separate function rather than part of matchAndRewrite + // because it is recursive. There is currently only one level of recursion; + // when lowing a vector attribute the attributes for the elements also need + // to be lowered. + mlir::TypedAttr + lowerCirAttrToMlirAttr(mlir::Attribute cirAttr, + mlir::ConversionPatternRewriter &rewriter) const { + assert(mlir::isa(cirAttr) && + "Can't lower a non-typed attribute"); + auto mlirType = getTypeConverter()->convertType( + mlir::cast(cirAttr).getType()); + if (auto vecAttr = mlir::dyn_cast(cirAttr)) { + assert(mlir::isa(mlirType) && + "MLIR type for CIR vector attribute is not mlir::VectorType"); + assert(mlir::isa(mlirType) && + "mlir::VectorType is not a mlir::ShapedType ??"); + SmallVector mlirValues; + for (auto elementAttr : vecAttr.getElts()) { + mlirValues.push_back( + this->lowerCirAttrToMlirAttr(elementAttr, rewriter)); + } + return mlir::DenseElementsAttr::get( + mlir::cast(mlirType), mlirValues); + } else if (auto boolAttr = mlir::dyn_cast(cirAttr)) { + return rewriter.getIntegerAttr(mlirType, boolAttr.getValue()); + } else if (auto floatAttr = mlir::dyn_cast(cirAttr)) { + return rewriter.getFloatAttr(mlirType, floatAttr.getValue()); + } else if (auto intAttr = mlir::dyn_cast(cirAttr)) { + return rewriter.getIntegerAttr(mlirType, intAttr.getValue()); + } else { + llvm_unreachable("NYI: unsupported attribute kind lowering to MLIR"); + return {}; + } + } + +public: mlir::LogicalResult matchAndRewrite(mlir::cir::ConstantOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto ty = getTypeConverter()->convertType(op.getType()); - mlir::TypedAttr value; - if (mlir::isa(op.getType())) { - auto boolValue = mlir::cast(op.getValue()); - value = rewriter.getIntegerAttr(ty, boolValue.getValue()); - } else if (mlir::isa(op.getType())) { - value = rewriter.getFloatAttr( - ty, mlir::cast(op.getValue()).getValue()); - } else { - auto cirIntAttr = mlir::dyn_cast(op.getValue()); - assert(cirIntAttr && "NYI non cir.int attr"); - value = rewriter.getIntegerAttr( - ty, cast(op.getValue()).getValue()); - } - rewriter.replaceOpWithNewOp(op, ty, value); + rewriter.replaceOpWithNewOp( + op, getTypeConverter()->convertType(op.getType()), + this->lowerCirAttrToMlirAttr(op.getValue(), rewriter)); return mlir::LogicalResult::success(); } }; @@ -1419,4 +1444,4 @@ mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, return theModule; } -} // namespace cir \ No newline at end of file +} // namespace cir diff --git a/clang/test/CIR/CodeGen/vectype-ext.cpp b/clang/test/CIR/CodeGen/vectype-ext.cpp index 915fb231177c..5c9533b723e8 100644 --- a/clang/test/CIR/CodeGen/vectype-ext.cpp +++ b/clang/test/CIR/CodeGen/vectype-ext.cpp @@ -15,10 +15,9 @@ typedef unsigned short vus2 __attribute__((ext_vector_type(2))); // LLVM: define void {{@.*vector_int_test.*}} void vector_int_test(int x) { - // Vector constant. Not yet implemented. Expected results will change from - // cir.vec.create to cir.const. + // Vector constant. vi4 a = { 1, 2, 3, 4 }; - // CIR: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : !cir.vector + // CIR: %{{[0-9]+}} = cir.const #cir.const_vector<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.vector // LLVM: store <4 x i32> , ptr %{{[0-9]+}}, align 16 // Non-const vector initialization. @@ -200,10 +199,9 @@ void vector_int_test(int x) { // CIR: cir.func {{@.*vector_double_test.*}} // LLVM: define void {{@.*vector_double_test.*}} void vector_double_test(int x, double y) { - // Vector constant. Not yet implemented. Expected results will change from - // cir.vec.create to cir.const. + // Vector constant. vd2 a = { 1.5, 2.5 }; - // CIR: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : !cir.vector + // CIR: %{{[0-9]+}} = cir.const #cir.const_vector<[#cir.fp<1.500000e+00> : !cir.double, #cir.fp<2.500000e+00> : !cir.double]> : !cir.vector // LLVM: store <2 x double> , ptr %{{[0-9]+}}, align 16 @@ -492,13 +490,12 @@ void test_build_lvalue() { // LLVM: define void {{@.*test_vec3.*}} void test_vec3() { vi3 v = {}; - // CIR-NEXT: %[[#PV:]] = cir.alloca !cir.vector, !cir.ptr>, ["v", init] {alignment = 16 : i64} - // CIR: %[[#VEC4:]] = cir.vec.shuffle(%{{[0-9]+}}, %{{[0-9]+}} : !cir.vector) [#cir.int<0> : !s32i, #cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<-1> : !s32i] : !cir.vector - // CIR-NEXT: %[[#PV4:]] = cir.cast(bitcast, %[[#PV]] : !cir.ptr>), !cir.ptr> - // CIR-NEXT: cir.store %[[#VEC4]], %[[#PV4]] : !cir.vector, !cir.ptr> + // CIR-NEXT: %[[#PV:]] = cir.alloca !cir.vector, !cir.ptr>, ["v"] {alignment = 16 : i64} + // CIR-NEXT: %[[#VVAL:]] = cir.const #cir.const_vector<[#cir.int<0> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i]> : !cir.vector + // CIR-NEXT: cir.store %[[#VVAL]], %[[#PV]] : !cir.vector, !cir.ptr> // LLVM-NEXT: %[[#PV:]] = alloca <3 x i32>, i64 1, align 16 - // LLVM-NEXT: store <4 x i32> , ptr %[[#PV]], align 16 + // LLVM-NEXT: store <3 x i32> zeroinitializer, ptr %[[#PV]], align 16 v + 1; // CIR-NEXT: %[[#PV4:]] = cir.cast(bitcast, %[[#PV]] : !cir.ptr>), !cir.ptr> diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index 745f863e6766..312a1dcba47f 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -8,10 +8,9 @@ typedef unsigned short vus2 __attribute__((vector_size(4))); void vector_int_test(int x) { - // Vector constant. Not yet implemented. Expected results will change from - // cir.vec.create to cir.const. + // Vector constant. vi4 a = { 1, 2, 3, 4 }; - // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : !cir.vector + // CHECK: %{{[0-9]+}} = cir.const #cir.const_vector<[#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.vector // Non-const vector initialization. vi4 b = { x, 5, 6, x + 1 }; @@ -107,10 +106,9 @@ void vector_int_test(int x) { } void vector_double_test(int x, double y) { - // Vector constant. Not yet implemented. Expected results will change from - // cir.vec.create to cir.const. + // Vector constant. vd2 a = { 1.5, 2.5 }; - // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !cir.double, !cir.double) : !cir.vector + // CHECK: %{{[0-9]+}} = cir.const #cir.const_vector<[#cir.fp<1.500000e+00> : !cir.double, #cir.fp<2.500000e+00> : !cir.double]> : !cir.vector // Non-const vector initialization. vd2 b = { y, y + 1.0 }; diff --git a/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp b/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp index 7b1bc1047ece..57c18c67d44a 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp +++ b/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp @@ -29,20 +29,8 @@ void vector_int_test(int x) { vi4 a = { 1, 2, 3, 4 }; - // CHECK: %[[C1:.*]] = arith.constant 1 : i32 - // CHECK: %[[C2:.*]] = arith.constant 2 : i32 - // CHECK: %[[C3:.*]] = arith.constant 3 : i32 - // CHECK: %[[C4:.*]] = arith.constant 4 : i32 - // CHECK: %[[CST:.*]] = arith.constant dense<0> : vector<4xi32> - // CHECK: %[[C0_I64:.*]] = arith.constant 0 : i64 - // CHECK: %[[VEC0:.*]] = vector.insertelement %[[C1]], %[[CST]][%[[C0_I64]] : i64] : vector<4xi32> - // CHECK: %[[C1_I64:.*]] = arith.constant 1 : i64 - // CHECK: %[[VEC1:.*]] = vector.insertelement %[[C2]], %[[VEC0]][%[[C1_I64]] : i64] : vector<4xi32> - // CHECK: %[[C2_I64:.*]] = arith.constant 2 : i64 - // CHECK: %[[VEC2:.*]] = vector.insertelement %[[C3]], %[[VEC1]][%[[C2_I64]] : i64] : vector<4xi32> - // CHECK: %[[C3_I64:.*]] = arith.constant 3 : i64 - // CHECK: %[[VEC3:.*]] = vector.insertelement %[[C4]], %[[VEC2]][%[[C3_I64]] : i64] : vector<4xi32> - // CHECK: memref.store %[[VEC3]], %[[ALLOC2]][] : memref> + // CHECK: %[[CST:.*]] = arith.constant dense<[1, 2, 3, 4]> : vector<4xi32> + // CHECK: memref.store %[[CST]], %[[ALLOC2]][] : memref> vi4 b = {x, 5, 6, x + 1}; diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index d7c5f4c32b02..5c436798209d 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -10,22 +10,9 @@ typedef unsigned short vus2 __attribute__((vector_size(4))); void vector_int_test(int x) { - // Vector constant. Not yet implemented. Expected results will change when - // fully implemented. + // Vector constant. vi4 a = { 1, 2, 3, 4 }; - // CHECK: %[[#T30:]] = llvm.mlir.constant(1 : i32) : i32 - // CHECK: %[[#T31:]] = llvm.mlir.constant(2 : i32) : i32 - // CHECK: %[[#T32:]] = llvm.mlir.constant(3 : i32) : i32 - // CHECK: %[[#T33:]] = llvm.mlir.constant(4 : i32) : i32 - // CHECK: %[[#T34:]] = llvm.mlir.undef : vector<4xi32> - // CHECK: %[[#T35:]] = llvm.mlir.constant(0 : i64) : i64 - // CHECK: %[[#T36:]] = llvm.insertelement %[[#T30]], %[[#T34]][%[[#T35]] : i64] : vector<4xi32> - // CHECK: %[[#T37:]] = llvm.mlir.constant(1 : i64) : i64 - // CHECK: %[[#T38:]] = llvm.insertelement %[[#T31]], %[[#T36]][%[[#T37]] : i64] : vector<4xi32> - // CHECK: %[[#T39:]] = llvm.mlir.constant(2 : i64) : i64 - // CHECK: %[[#T40:]] = llvm.insertelement %[[#T32]], %[[#T38]][%[[#T39]] : i64] : vector<4xi32> - // CHECK: %[[#T41:]] = llvm.mlir.constant(3 : i64) : i64 - // CHECK: %[[#T42:]] = llvm.insertelement %[[#T33]], %[[#T40]][%[[#T41]] : i64] : vector<4xi32> + // CHECK: %[[#T42:]] = llvm.mlir.constant(dense<[1, 2, 3, 4]> : vector<4xi32>) : vector<4xi32> // CHECK: llvm.store %[[#T42]], %[[#T3:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // Non-const vector initialization. @@ -236,16 +223,9 @@ void vector_int_test(int x) { void vector_double_test(int x, double y) { - // Vector constant. Not yet implemented. Expected results will change when - // fully implemented. + // Vector constant. vd2 a = { 1.5, 2.5 }; - // CHECK: %[[#T22:]] = llvm.mlir.constant(1.500000e+00 : f64) : f64 - // CHECK: %[[#T23:]] = llvm.mlir.constant(2.500000e+00 : f64) : f64 - // CHECK: %[[#T24:]] = llvm.mlir.undef : vector<2xf64> - // CHECK: %[[#T25:]] = llvm.mlir.constant(0 : i64) : i64 - // CHECK: %[[#T26:]] = llvm.insertelement %[[#T22]], %[[#T24]][%[[#T25]] : i64] : vector<2xf64> - // CHECK: %[[#T27:]] = llvm.mlir.constant(1 : i64) : i64 - // CHECK: %[[#T28:]] = llvm.insertelement %[[#T23]], %[[#T26]][%[[#T27]] : i64] : vector<2xf64> + // CHECK: %[[#T28:]] = llvm.mlir.constant(dense<[1.500000e+00, 2.500000e+00]> : vector<2xf64>) : vector<2xf64> // CHECK: llvm.store %[[#T28]], %[[#T5:]] {alignment = 16 : i64} : vector<2xf64>, !llvm.ptr // Non-const vector initialization. From c844a53f73de4d1d7fdde014ade81603a7099cd7 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 2 Jul 2024 03:23:23 +0800 Subject: [PATCH 1658/2301] [CIR][NFC] Fix bug during fp16 unary op CIRGen (#706) This PR fixes a bug during the CIRGen of fp16 unary operations. Before this patch, for the expression `-x` where `x` is a fp16 value, CIRGen emits the code like the following: ```mlir %0 = cir.cast float_to_float %x : !cir.f16 -> !cir.float %1 = cir.cast float_to_float %0 : !cir.float -> !cir.f16 %2 = cir.unary minus %1 : !cir.fp16 ``` The expected CIRGen should instead be: ```mlir %0 = cir.cast float_to_float %x : !cir.f16 -> !cir.float %1 = cir.unary minus %0 : !cir.float %2 = cir.cast float_to_float %1 : !cir.float -> !cir.f16 ``` This PR fixes this issue. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 42 +++++++++++++--------- clang/test/CIR/CodeGen/bf16-ops.c | 8 ++--- clang/test/CIR/CodeGen/fp16-ops.c | 8 ++--- 3 files changed, 34 insertions(+), 24 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 019705c247d0..c7289d62aa14 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -604,37 +604,47 @@ class ScalarExprEmitter : public StmtVisitor { : PromotionType; auto result = VisitPlus(E, promotionTy); if (result && !promotionTy.isNull()) - result = buildUnPromotedValue(result, E->getType()); - return buildUnaryOp(E, mlir::cir::UnaryOpKind::Plus, result); + return buildUnPromotedValue(result, E->getType()); + return result; } - mlir::Value VisitPlus(const UnaryOperator *E, QualType PromotionType) { + mlir::Value VisitPlus(const UnaryOperator *E, + QualType PromotionType = QualType()) { // This differs from gcc, though, most likely due to a bug in gcc. TestAndClearIgnoreResultAssign(); + + mlir::Value operand; if (!PromotionType.isNull()) - return CGF.buildPromotedScalarExpr(E->getSubExpr(), PromotionType); - return Visit(E->getSubExpr()); + operand = CGF.buildPromotedScalarExpr(E->getSubExpr(), PromotionType); + else + operand = Visit(E->getSubExpr()); + + return buildUnaryOp(E, mlir::cir::UnaryOpKind::Plus, operand); } - mlir::Value VisitUnaryMinus(const UnaryOperator *E) { - // NOTE(cir): QualType function parameter still not used, so don´t replicate - // it here yet. - QualType promotionTy = getPromotionType(E->getSubExpr()->getType()); + mlir::Value VisitUnaryMinus(const UnaryOperator *E, + QualType PromotionType = QualType()) { + QualType promotionTy = PromotionType.isNull() + ? getPromotionType(E->getSubExpr()->getType()) + : PromotionType; auto result = VisitMinus(E, promotionTy); if (result && !promotionTy.isNull()) - result = buildUnPromotedValue(result, E->getType()); - return buildUnaryOp(E, mlir::cir::UnaryOpKind::Minus, result); + return buildUnPromotedValue(result, E->getType()); + return result; } mlir::Value VisitMinus(const UnaryOperator *E, QualType PromotionType) { TestAndClearIgnoreResultAssign(); + + mlir::Value operand; if (!PromotionType.isNull()) - return CGF.buildPromotedScalarExpr(E->getSubExpr(), PromotionType); + operand = CGF.buildPromotedScalarExpr(E->getSubExpr(), PromotionType); + else + operand = Visit(E->getSubExpr()); // NOTE: LLVM codegen will lower this directly to either a FNeg // or a Sub instruction. In CIR this will be handled later in LowerToLLVM. - - return Visit(E->getSubExpr()); + return buildUnaryOp(E, mlir::cir::UnaryOpKind::Minus, operand); } mlir::Value VisitUnaryNot(const UnaryOperator *E) { @@ -660,8 +670,8 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value buildUnaryOp(const UnaryOperator *E, mlir::cir::UnaryOpKind kind, mlir::Value input) { return Builder.create( - CGF.getLoc(E->getSourceRange().getBegin()), - CGF.getCIRType(E->getType()), kind, input); + CGF.getLoc(E->getSourceRange().getBegin()), input.getType(), kind, + input); } // C++ diff --git a/clang/test/CIR/CodeGen/bf16-ops.c b/clang/test/CIR/CodeGen/bf16-ops.c index 6a55e9acfe09..7812e03b129b 100644 --- a/clang/test/CIR/CodeGen/bf16-ops.c +++ b/clang/test/CIR/CodeGen/bf16-ops.c @@ -30,8 +30,8 @@ void foo(void) { h1 = -h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.bf16 - // NONATIVE-NEXT: %{{.+}} = cir.unary(minus, %[[#B]]) : !cir.bf16, !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.bf16 @@ -39,8 +39,8 @@ void foo(void) { h1 = +h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.bf16 - // NONATIVE-NEXT: %{{.+}} = cir.unary(plus, %[[#B]]) : !cir.bf16, !cir.bf16 + // NONATIVE-NEXT: %[[#B:]] = cir.unary(plus, %[[#A]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.bf16 diff --git a/clang/test/CIR/CodeGen/fp16-ops.c b/clang/test/CIR/CodeGen/fp16-ops.c index e39b4fd4e9a9..46a410793a0e 100644 --- a/clang/test/CIR/CodeGen/fp16-ops.c +++ b/clang/test/CIR/CodeGen/fp16-ops.c @@ -30,8 +30,8 @@ void foo(void) { h1 = -h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.f16 - // NONATIVE-NEXT: %{{.+}} = cir.unary(minus, %[[#B]]) : !cir.f16, !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.f16 @@ -39,8 +39,8 @@ void foo(void) { h1 = +h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.f16 - // NONATIVE-NEXT: %{{.+}} = cir.unary(plus, %[[#B]]) : !cir.f16, !cir.f16 + // NONATIVE-NEXT: %[[#B:]] = cir.unary(plus, %[[#A]]) : !cir.float, !cir.float + // NONATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.f16 From 9a28bb058915df373cccbbe3783ecf3bcbae4992 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Tue, 2 Jul 2024 19:37:24 -0300 Subject: [PATCH 1659/2301] [CIR][ABI] Add AArch64 unsigned int CC lowering (#708) Adds the necessary bits to lower arguments and return values of type unsigned int for the x86_64 target. --- clang/include/clang/CIR/MissingFeatures.h | 2 + .../Transforms/TargetLowering/ABIInfoImpl.cpp | 7 +++ .../Transforms/TargetLowering/ABIInfoImpl.h | 2 + .../Transforms/TargetLowering/LowerFunction.h | 4 ++ .../TargetLowering/Targets/AArch64.cpp | 50 ++++++++++++++++++- .../aarch64-call-conv-lowering-pass.cpp | 28 +++++++++++ 6 files changed, 91 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index f7cdafe5b1bb..dd8e3deaa00c 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -209,6 +209,8 @@ struct MissingFeatures { static bool fixedWidthIntegers() { return false; } static bool vectorType() { return false; } + static bool functionMemberPointerType() { return false; } + static bool fixedSizeIntType() { return false; } //-- Missing LLVM attributes diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index 04ee613ec0cc..e5ddcff6b5e7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -13,7 +13,9 @@ #include "ABIInfo.h" #include "CIRCXXABI.h" +#include "LowerFunction.h" #include "LowerFunctionInfo.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" namespace mlir { @@ -30,6 +32,11 @@ bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, return CXXABI.classifyReturnType(FI); } +bool isAggregateTypeForABI(Type T) { + assert(!::cir::MissingFeatures::functionMemberPointerType()); + return !LowerFunction::hasScalarEvaluationKind(T); +} + Type useFirstFieldIfTransparentUnion(Type Ty) { if (auto RT = dyn_cast(Ty)) { if (RT.isUnion()) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h index 84904434bffd..80f43d9a5e9f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h @@ -24,6 +24,8 @@ namespace cir { bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, const ABIInfo &Info); +bool isAggregateTypeForABI(Type T); + /// Pass transparent unions as if they were the type of the first element. Sema /// should ensure that all elements of the union have the same "machine type". Type useFirstFieldIfTransparentUnion(Type Ty); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h index fe54b98939ec..6a892ef79d9f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h @@ -86,6 +86,10 @@ class LowerFunction { /// Return the TypeEvaluationKind of Type \c T. static ::cir::TypeEvaluationKind getEvaluationKind(Type T); + + static bool hasScalarEvaluationKind(Type T) { + return getEvaluationKind(T) == ::cir::TypeEvaluationKind::TEK_Scalar; + } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index b452995813fd..1490a3babc96 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -39,16 +39,20 @@ class AArch64ABIInfo : public ABIInfo { private: AArch64ABIKind getABIKind() const { return Kind; } + bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; } ABIArgInfo classifyReturnType(Type RetTy, bool IsVariadic) const; + ABIArgInfo classifyArgumentType(Type RetTy, bool IsVariadic, + unsigned CallingConvention) const; void computeInfo(LowerFunctionInfo &FI) const override { if (!::mlir::cir::classifyReturnType(getCXXABI(), FI, *this)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic()); - for (auto &_ : FI.arguments()) - llvm_unreachable("NYI"); + for (auto &it : FI.arguments()) + it.info = classifyArgumentType(it.type, FI.isVariadic(), + FI.getCallingConvention()); } }; @@ -67,6 +71,48 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, if (isa(RetTy)) return ABIArgInfo::getIgnore(); + if (const auto _ = dyn_cast(RetTy)) { + llvm_unreachable("NYI"); + } + + // Large vector types should be returned via memory. + if (isa(RetTy) && getContext().getTypeSize(RetTy) > 128) + llvm_unreachable("NYI"); + + if (!isAggregateTypeForABI(RetTy)) { + // NOTE(cir): Skip enum handling. + + if (MissingFeature::fixedSizeIntType()) + llvm_unreachable("NYI"); + + return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS() + ? ABIArgInfo::getExtend(RetTy) + : ABIArgInfo::getDirect()); + } + + llvm_unreachable("NYI"); +} + +ABIArgInfo +AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, + unsigned CallingConvention) const { + Ty = useFirstFieldIfTransparentUnion(Ty); + + // TODO(cir): check for illegal vector types. + if (MissingFeature::vectorType()) + llvm_unreachable("NYI"); + + if (!isAggregateTypeForABI(Ty)) { + // NOTE(cir): Enum is IntType in CIR. Skip enum handling here. + + if (MissingFeature::fixedSizeIntType()) + llvm_unreachable("NYI"); + + return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS() + ? ABIArgInfo::getExtend(Ty) + : ABIArgInfo::getDirect()); + } + llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp index 145e8ab83ded..1d1671bbad1a 100644 --- a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp @@ -6,3 +6,31 @@ void Void(void) { // CHECK: cir.call @_Z4Voidv() : () -> () Void(); } + +// Test call conv lowering for trivial usinged integer cases. + +// CHECK: cir.func @_Z5UCharh(%arg0: !u8i loc({{.+}})) -> !u8i +unsigned char UChar(unsigned char c) { + // CHECK: cir.call @_Z5UCharh(%2) : (!u8i) -> !u8i + return UChar(c); +} +// CHECK: cir.func @_Z6UShortt(%arg0: !u16i loc({{.+}})) -> !u16i +unsigned short UShort(unsigned short s) { + // CHECK: cir.call @_Z6UShortt(%2) : (!u16i) -> !u16i + return UShort(s); +} +// CHECK: cir.func @_Z4UIntj(%arg0: !u32i loc({{.+}})) -> !u32i +unsigned int UInt(unsigned int i) { + // CHECK: cir.call @_Z4UIntj(%2) : (!u32i) -> !u32i + return UInt(i); +} +// CHECK: cir.func @_Z5ULongm(%arg0: !u64i loc({{.+}})) -> !u64i +unsigned long ULong(unsigned long l) { + // CHECK: cir.call @_Z5ULongm(%2) : (!u64i) -> !u64i + return ULong(l); +} +// CHECK: cir.func @_Z9ULongLongy(%arg0: !u64i loc({{.+}})) -> !u64i +unsigned long long ULongLong(unsigned long long l) { + // CHECK: cir.call @_Z9ULongLongy(%2) : (!u64i) -> !u64i + return ULongLong(l); +} From baa2fbb2a81d700a42950d09a2d94a51d8a5a094 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Tue, 2 Jul 2024 18:43:06 -0400 Subject: [PATCH 1660/2301] [CIR][CIRGen] Add setNonAliasAttributes for GlobalOp and FuncOp (#707) In this PR: as title we added setNonAliasAttributes in the skeleton of OG's setNonAliasAttributes, and call this function in buildGlobalFunctionDefinition after code for FuncOP is generated. This is needed for CIR OG to know FuncOP is not declaration anymore, thus giving shouldAssumeDsoLocal another run to make dso_local right. A couple of notes about test; 1. having to changed driver.c, because in terms of dso_local for func, masOS is different from other targets as even in OG, as [macOS is !isOSBinFormatELF()](https://github.com/llvm/clangir/blob/f78f9a55e7cd6b9e350556e35097616676cf1f3e/clang/lib/CodeGen/CodeGenModule.cpp#L1599), thus even OG doesn't set dso_local for its functions. 3. most of functions in existing tests still not getting dso_local in LLVM yet because they fall into case of [(RM != llvm::Reloc::Static && !LOpts.PIE) ](https://github.com/llvm/clangir/blob/f78f9a55e7cd6b9e350556e35097616676cf1f3e/clang/lib/CodeGen/CodeGenModule.cpp#L1605C6-L1605C47), which is more complicated to implement as we need to get canBenefitFromLocalAlias right. So I treated it as a missing feature and default it to false. We gonna leave it to another PR to address. In this PR, I just added additional test with -fpie option to my test so we get dso_local for functions without having to deal with this case. Next 2 PRs: PR1. call setNonAliasAttributes in buildGlobalVarDefinition, after initialization for GlobalOP is found, similar to FuncOp. didn't to it in this PR as there are many more test cases needed to be fixed/added for this case. PR2: try to implement canBenefitFromLocalAlias. --- clang/include/clang/CIR/MissingFeatures.h | 8 ++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 64 +++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 1 + clang/test/CIR/CodeGen/func_dsolocal_pie.c | 34 ++++++++++++ clang/test/CIR/driver.c | 18 +++++- 5 files changed, 119 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/CodeGen/func_dsolocal_pie.c diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index dd8e3deaa00c..3c096884a04b 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -50,6 +50,11 @@ struct MissingFeatures { static bool addCompilerUsedGlobal() { return false; } static bool supportIFuncAttr() { return false; } static bool setDefaultVisibility() { return false; } + static bool addUsedOrCompilerUsedGlobal() { return false; } + static bool addUsedGlobal() { return false; } + static bool addSectionAttributes() { return false; } + static bool setSectionForFuncOp() { return false; } + static bool updateCPUAndFeaturesAttributes() { return false; } // Sanitizers static bool reportGlobalToASan() { return false; } @@ -146,7 +151,6 @@ struct MissingFeatures { static bool setNonGC() { return false; } static bool volatileLoadOrStore() { return false; } static bool armComputeVolatileBitfields() { return false; } - static bool setCommonAttributes() { return false; } static bool insertBuiltinUnpredictable() { return false; } static bool createInvariantGroup() { return false; } static bool addAutoInitAnnotation() { return false; } @@ -267,6 +271,8 @@ struct MissingFeatures { // If a store op is guaranteed to execute before the retun value load op, we // can optimize away the store and load ops. Seems like an early optimization. static bool returnValueDominatingStoreOptmiization() { return false; } + // Globals (vars and functions) may have attributes that are target depedent. + static bool setTargetAttributes() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 1e250192c72f..3f093f560477 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -591,7 +591,7 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, } CurCGF = nullptr; - // TODO: setNonAliasAttributes + setNonAliasAttributes(GD, Op); // TODO: SetLLVMFunctionAttributesForDeclaration if (const ConstructorAttr *CA = D->getAttr()) @@ -679,7 +679,67 @@ mlir::cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &CGM, } void CIRGenModule::setCommonAttributes(GlobalDecl GD, mlir::Operation *GV) { - assert(!MissingFeatures::setCommonAttributes()); + const Decl *D = GD.getDecl(); + if (isa_and_nonnull(D)) + setGVProperties(GV, dyn_cast(D)); + else + assert(!MissingFeatures::setDefaultVisibility()); + + if (D && D->hasAttr()) + assert(!MissingFeatures::addUsedOrCompilerUsedGlobal()); + + if (const auto *VD = dyn_cast_if_present(D); + VD && + ((codeGenOpts.KeepPersistentStorageVariables && + (VD->getStorageDuration() == SD_Static || + VD->getStorageDuration() == SD_Thread)) || + (codeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static && + VD->getType().isConstQualified()))) + assert(!MissingFeatures::addUsedOrCompilerUsedGlobal()); +} + +void CIRGenModule::setNonAliasAttributes(GlobalDecl GD, mlir::Operation *GO) { + const Decl *D = GD.getDecl(); + setCommonAttributes(GD, GO); + + if (D) { + auto GV = llvm::dyn_cast_or_null(GO); + if (GV) { + if (D->hasAttr()) + assert(!MissingFeatures::addUsedGlobal()); + if (auto *SA = D->getAttr()) + assert(!MissingFeatures::addSectionAttributes()); + if (auto *SA = D->getAttr()) + assert(!MissingFeatures::addSectionAttributes()); + if (auto *SA = D->getAttr()) + assert(!MissingFeatures::addSectionAttributes()); + if (auto *SA = D->getAttr()) + assert(!MissingFeatures::addSectionAttributes()); + } + auto F = llvm::dyn_cast_or_null(GO); + if (F) { + if (D->hasAttr()) + assert(!MissingFeatures::addUsedGlobal()); + if (auto *SA = D->getAttr()) + if (!D->getAttr()) + assert(!MissingFeatures::setSectionForFuncOp()); + + assert(!MissingFeatures::updateCPUAndFeaturesAttributes()); + } + + if (const auto *CSA = D->getAttr()) { + assert(!MissingFeatures::setSectionForFuncOp()); + if (GV) + GV.setSection(CSA->getName()); + if (F) + assert(!MissingFeatures::setSectionForFuncOp()); + } else if (const auto *SA = D->getAttr()) + if (GV) + GV.setSection(SA->getName()); + if (F) + assert(!MissingFeatures::setSectionForFuncOp()); + } + assert(!MissingFeatures::setTargetAttributes()); } void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index acb8e9188c43..52704261110d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -693,6 +693,7 @@ class CIRGenModule : public CIRGenTypeCache { /// Call replaceAllUsesWith on all pairs in Replacements. void applyReplacements(); + void setNonAliasAttributes(GlobalDecl GD, mlir::Operation *GV); /// Map source language used to a CIR attribute. mlir::cir::SourceLanguage getCIRSourceLanguage(); }; diff --git a/clang/test/CIR/CodeGen/func_dsolocal_pie.c b/clang/test/CIR/CodeGen/func_dsolocal_pie.c new file mode 100644 index 000000000000..acbdcda63aee --- /dev/null +++ b/clang/test/CIR/CodeGen/func_dsolocal_pie.c @@ -0,0 +1,34 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -pic-is-pie -pic-level 1 %s -o %t1.cir +// RUN: FileCheck --input-file=%t1.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -pic-is-pie -pic-level 1 %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +void foo(int i) { + +} + +int main() { + foo(2); + return 0; +} + +// CIR: cir.func dsolocal @foo(%arg0: !s32i +// CIR-NEXT: [[TMP0:%.*]] = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} +// CIR-NEXT: cir.store %arg0, [[TMP0]] : !s32i, !cir.ptr +// CIR-NEXT: cir.return + +// CIR: cir.func no_proto dsolocal @main() -> !s32i +// CIR: [[TMP1:%.*]] = cir.const #cir.int<2> : !s32i +// CIR: cir.call @foo([[TMP1]]) : (!s32i) -> () + +// LLVM: define dso_local void @foo(i32 [[TMP3:%.*]]) +// LLVM: [[ARG_STACK:%.*]] = alloca i32, i64 1, align 4, +// LLVM: store i32 [[TMP3]], ptr [[ARG_STACK]], align 4 +// LLVM: ret void, + +// LLVM: define dso_local i32 @main() +// LLVM: [[TMP4:%.*]] = alloca i32, i64 1, align 4, +// LLVM: call void @foo(i32 2), +// LLVM: store i32 0, ptr [[TMP4]], align 4 +// LLVM: [[RET_VAL:%.*]] = load i32, ptr [[TMP4]], align 4 +// LLVM: ret i32 [[RET_VAL]], diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index d1e0d7614489..a02d73b99a67 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -11,18 +11,30 @@ // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-passes -S -Xclang -emit-cir %s -o %t.cir // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-verifier -S -Xclang -emit-cir %s -o %t.cir // RUN: %clang -target arm64-apple-macosx12.0.0 -fclangir -S -Xclang -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR_MACOS +// RUN: %clang -target arm64-apple-macosx12.0.0 -fclangir -S -emit-llvm %s -o %t3.ll +// RUN: FileCheck --input-file=%t3.ll %s -check-prefix=LLVM_MACOS void foo(void) {} // CIR: module {{.*}} { -// CIR-NEXT: cir.func @foo() +// CIR-NEXT: cir.func dsolocal @foo() // CIR-NEXT: cir.return // CIR-NEXT: } // CIR-NEXT: } -// LLVM: define void @foo() +// CIR_MACOS: module {{.*}} { +// CIR_MACOS-NEXT: cir.func @foo() +// CIR_MACOS-NEXT: cir.return +// CIR_MACOS-NEXT: } +// CIR_MACOS-NEXT: } + +// LLVM: define dso_local void @foo() // LLVM-NEXT: ret void // LLVM-NEXT: } +// LLVM_MACOS: define void @foo() +// LLVM_MACOS-NEXT: ret void +// LLVM_MACOS-NEXT: } + // OBJ: 0: c3 retq From ce97be8844b455abd4dabb17f7998b8578e87a34 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Tue, 2 Jul 2024 18:47:09 -0400 Subject: [PATCH 1661/2301] [CIR][CIRGen] Resolve more calls to member functions (#715) OG's counterpart is [here](https://github.com/llvm/clangir/blob/b41d427cc72057adadbdbd2f5e54fdb46592a52a/clang/lib/CodeGen/CGExpr.cpp#L5607) --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 6 +- .../CodeGen/call-via-class-member-funcptr.cpp | 57 +++++++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index a0edb6ff81ef..fe0f9a655f4d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -542,9 +542,13 @@ CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { assert(FD && "DeclRef referring to FunctionDecl only thing supported so far"); return buildDirectCallee(CGM, FD); + } else if (auto ME = dyn_cast(E)) { + if (auto FD = dyn_cast(ME->getMemberDecl())) { + buildIgnoredExpr(ME->getBase()); + return buildDirectCallee(CGM, FD); + } } - assert(!dyn_cast(E) && "NYI"); assert(!dyn_cast(E) && "NYI"); assert(!dyn_cast(E) && "NYI"); diff --git a/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp new file mode 100644 index 000000000000..5a9031503958 --- /dev/null +++ b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp @@ -0,0 +1,57 @@ +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM + +class a { +public: + static char *b(int); +}; +int h=0; +class f { +public: + const char *b(); + a g; +}; +const char *f::b() { return g.b(h); } +void fn1() { f f1; } + +// CIR: ty_22a22 = !cir.struct +// CIR: ty_22f22 = !cir.struct + +// CIR: cir.global external @h = #cir.int<0> +// CIR: cir.func private @_ZN1a1bEi(!s32i) -> !cir.ptr + +// CIR: cir.func @_ZN1f1bEv(%arg0: !cir.ptr loc{{.*}}) -> !cir.ptr +// CIR: [[H_PTR:%.*]] = cir.get_global @h : !cir.ptr loc(#loc18) +// CIR: [[H_VAL:%.*]] = cir.load [[H_PTR]] : !cir.ptr, !s32i +// CIR: [[RET1_VAL:%.*]] = cir.call @_ZN1a1bEi([[H_VAL]]) : (!s32i) -> !cir.ptr +// CIR: cir.store [[RET1_VAL]], [[RET1_P:%.*]] : !cir.ptr, !cir.ptr> +// CIR: [[RET1_VAL2:%.*]] = cir.load [[RET1_P]] : !cir.ptr>, !cir.ptr +// %7 = cir.load %1 : !cir.ptr>, !cir.ptr +// CIR: cir.return [[RET1_VAL2]] : !cir.ptr + +// CIR: cir.func @_Z3fn1v() +// CIR: [[CLS_F:%.*]] = cir.alloca !ty_22f22, !cir.ptr, ["f1"] {alignment = 1 : i64} +// CIR: cir.return + +// LLVM: %class.f = type { %class.a } +// LLVM: %class.a = type { i8 } +// LLVM: @h = global i32 0 +// LLVM: declare {{.*}} ptr @_ZN1a1bEi(i32) + +// LLVM: define ptr @_ZN1f1bEv(ptr [[ARG0:%.*]]) +// LLVM: [[ARG0_SAVE:%.*]] = alloca ptr, i64 1, align 8 +// LLVM: [[RET_SAVE:%.*]] = alloca ptr, i64 1, align 8 +// LLVM: store ptr [[ARG0]], ptr [[ARG0_SAVE]], align 8, +// LLVM: [[ARG0_LOAD:%.*]] = load ptr, ptr [[ARG0_SAVE]], align 8 +// LLVM: [[FUNC_PTR:%.*]] = getelementptr %class.f, ptr [[ARG0_LOAD]], i32 0, i32 0, +// LLVM: [[VAR_H:%.*]] = load i32, ptr @h, align 4 +// LLVM: [[RET_VAL:%.*]] = call ptr @_ZN1a1bEi(i32 [[VAR_H]]), +// LLVM: store ptr [[RET_VAL]], ptr [[RET_SAVE]], align 8, +// LLVM: [[RET_VAL2:%.*]] = load ptr, ptr [[RET_SAVE]], align 8 +// LLVM: ret ptr [[RET_VAL2]] + +// LLVM: define void @_Z3fn1v() +// LLVM: [[FUNC_PTR:%.*]] = alloca %class.f, i64 1, align 1 +// LLVM: ret void From 706388f9176b6cce2bd1646f5b25a4047b0f37f5 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 3 Jul 2024 07:03:45 +0800 Subject: [PATCH 1662/2301] [CIR][CIRGen] Add support for exact dynamic cast (#709) This PR implements the last piece to make CIR catch up upstream CodeGen on `dynamic_cast` support. It ports an upstream optimization "exact cast" to CIR. The basic idea of exact cast is when `dynamic_cast` to a final class, we don't have to call into the runtime -- we could just check if the dynamic type of the source object is exactly the destination type by quickly comparing the vtable pointers. To give a concrete example of this optimization: ```cpp struct Base { virtual ~Base(); }; struct Derived final : Base {}; Derived *test(Base *src) { return dynamic_cast(src); } ``` Without the optimization, we have to call the runtime function `__dynamic_cast` to do the heavy and slow type check. After enabling the optimization, we could quickly carry out the runtime type check by inline checking whether the vtable ptr of `src` points to the vtable of `Derived`. This PR also fixes a bug in existing dynamic_cast CIRGen code. The bug mistakenly removes the insertion point after emitting a call to bad_cast, causing the CIRGen of any follow up statements to crash. --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 2 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 8 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 169 ++++++++++++++++-- clang/test/CIR/CodeGen/dynamic-cast-exact.cpp | 87 +++++++++ 4 files changed, 250 insertions(+), 16 deletions(-) create mode 100644 clang/test/CIR/CodeGen/dynamic-cast-exact.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 5f6ea8c1d852..e29f843d235f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -309,7 +309,7 @@ class CIRGenCXXABI { QualType SrcRecordTy, QualType DestRecordTy, mlir::cir::PointerType DestCIRTy, - bool isRefCast, mlir::Value Src) = 0; + bool isRefCast, Address Src) = 0; }; /// Creates and Itanium-family ABI diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index cf18593ff711..e3ce7adb0be7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -1094,9 +1094,14 @@ static mlir::Value buildDynamicCastToNull(CIRGenFunction &CGF, mlir::Value NullPtrValue = CGF.getBuilder().getNullPtr(DestCIRTy, Loc); if (!DestTy->isPointerType()) { + auto *CurrentRegion = CGF.getBuilder().getBlock()->getParent(); /// C++ [expr.dynamic.cast]p9: /// A failed cast to reference type throws std::bad_cast CGF.CGM.getCXXABI().buildBadCastCall(CGF, Loc); + + // The call to bad_cast will terminate the current block. Create a new block + // to hold any follow up code. + CGF.getBuilder().createBlock(CurrentRegion, CurrentRegion->end()); } return NullPtrValue; @@ -1138,6 +1143,5 @@ mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, auto destCirTy = mlir::cast(ConvertType(destTy)); return CGM.getCXXABI().buildDynamicCast(*this, loc, srcRecordTy, destRecordTy, - destCirTy, isRefCast, - ThisAddr.getPointer()); + destCirTy, isRefCast, ThisAddr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index b72ec9b6c34a..106117f8f7da 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -293,7 +293,7 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { mlir::Value buildDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, QualType DestRecordTy, mlir::cir::PointerType DestCIRTy, bool isRefCast, - mlir::Value Src) override; + Address Src) override; /**************************** RTTI Uniqueness ******************************/ protected: @@ -2209,8 +2209,7 @@ static mlir::cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { return CGF.CGM.createRuntimeFunction(FTy, "__cxa_bad_cast"); } -void CIRGenItaniumCXXABI::buildBadCastCall(CIRGenFunction &CGF, - mlir::Location loc) { +static void buildCallToBadCast(CIRGenFunction &CGF, mlir::Location loc) { // TODO(cir): set the calling convention to the runtime function. assert(!MissingFeatures::setCallingConv()); @@ -2219,6 +2218,11 @@ void CIRGenItaniumCXXABI::buildBadCastCall(CIRGenFunction &CGF, CGF.getBuilder().clearInsertionPoint(); } +void CIRGenItaniumCXXABI::buildBadCastCall(CIRGenFunction &CGF, + mlir::Location loc) { + buildCallToBadCast(CGF, loc); +} + static CharUnits computeOffsetHint(ASTContext &Context, const CXXRecordDecl *Src, const CXXRecordDecl *Dst) { @@ -2290,14 +2294,146 @@ static mlir::cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { return CGF.CGM.createRuntimeFunction(FTy, "__dynamic_cast"); } -static mlir::Value buildDynamicCastToVoid(CIRGenFunction &CGF, - mlir::Location Loc, - QualType SrcRecordTy, - mlir::Value Src) { +static Address buildDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, Address Src) { auto vtableUsesRelativeLayout = CGF.CGM.getItaniumVTableContext().isRelativeLayout(); - return CGF.getBuilder().createDynCastToVoid(Loc, Src, - vtableUsesRelativeLayout); + auto ptr = CGF.getBuilder().createDynCastToVoid(Loc, Src.getPointer(), + vtableUsesRelativeLayout); + return Address{ptr, Src.getAlignment()}; +} + +static mlir::Value +buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, + mlir::Location Loc, QualType SrcRecordTy, + QualType DestRecordTy, mlir::cir::PointerType DestCIRTy, + bool IsRefCast, Address Src) { + // Find all the inheritance paths from SrcRecordTy to DestRecordTy. + const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); + const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); + CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, + /*DetectVirtual=*/false); + (void)DestDecl->isDerivedFrom(SrcDecl, Paths); + + // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr + // might appear. + std::optional Offset; + for (const CXXBasePath &Path : Paths) { + // dynamic_cast only finds public inheritance paths. + if (Path.Access != AS_public) + continue; + + CharUnits PathOffset; + for (const CXXBasePathElement &PathElement : Path) { + // Find the offset along this inheritance step. + const CXXRecordDecl *Base = + PathElement.Base->getType()->getAsCXXRecordDecl(); + if (PathElement.Base->isVirtual()) { + // For a virtual base class, we know that the derived class is exactly + // DestDecl, so we can use the vbase offset from its layout. + const ASTRecordLayout &L = + CGF.getContext().getASTRecordLayout(DestDecl); + PathOffset = L.getVBaseClassOffset(Base); + } else { + const ASTRecordLayout &L = + CGF.getContext().getASTRecordLayout(PathElement.Class); + PathOffset += L.getBaseClassOffset(Base); + } + } + + if (!Offset) + Offset = PathOffset; + else if (Offset != PathOffset) { + // Base appears in at least two different places. Find the most-derived + // object and see if it's a DestDecl. Note that the most-derived object + // must be at least as aligned as this base class subobject, and must + // have a vptr at offset 0. + Src = buildDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src); + SrcDecl = DestDecl; + Offset = CharUnits::Zero(); + break; + } + } + + if (!Offset) { + // If there are no public inheritance paths, the cast always fails. + mlir::Value NullPtrValue = CGF.getBuilder().getNullPtr(DestCIRTy, Loc); + if (IsRefCast) { + auto *CurrentRegion = CGF.getBuilder().getBlock()->getParent(); + buildCallToBadCast(CGF, Loc); + + // The call to bad_cast will terminate the block. Create a new block to + // hold any follow up code. + CGF.getBuilder().createBlock(CurrentRegion, CurrentRegion->end()); + } + + return NullPtrValue; + } + + // Compare the vptr against the expected vptr for the destination type at + // this offset. Note that we do not know what type Src points to in the case + // where the derived class multiply inherits from the base class so we can't + // use GetVTablePtr, so we load the vptr directly instead. + + mlir::Value ExpectedVPtr = + ABI.getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl); + + // TODO(cir): handle address space here. + assert(!MissingFeatures::addressSpace()); + mlir::Type VPtrTy = ExpectedVPtr.getType(); + mlir::Type VPtrPtrTy = CGF.getBuilder().getPointerTo(VPtrTy); + Address SrcVPtrPtr( + CGF.getBuilder().createBitcast(Src.getPointer(), VPtrPtrTy), + Src.getAlignment()); + mlir::Value SrcVPtr = CGF.getBuilder().createLoad(Loc, SrcVPtrPtr); + + // TODO(cir): decorate SrcVPtr with TBAA info. + assert(!MissingFeatures::tbaa()); + + mlir::Value Success = CGF.getBuilder().createCompare( + Loc, mlir::cir::CmpOpKind::eq, SrcVPtr, ExpectedVPtr); + + auto buildCastResult = [&] { + if (Offset->isZero()) + return CGF.getBuilder().createBitcast(Src.getPointer(), DestCIRTy); + + // TODO(cir): handle address space here. + assert(!MissingFeatures::addressSpace()); + mlir::Type U8PtrTy = + CGF.getBuilder().getPointerTo(CGF.getBuilder().getUInt8Ty()); + + mlir::Value StrideToApply = CGF.getBuilder().getConstInt( + Loc, CGF.getBuilder().getUInt64Ty(), Offset->getQuantity()); + mlir::Value SrcU8Ptr = + CGF.getBuilder().createBitcast(Src.getPointer(), U8PtrTy); + mlir::Value ResultU8Ptr = CGF.getBuilder().create( + Loc, U8PtrTy, SrcU8Ptr, StrideToApply); + return CGF.getBuilder().createBitcast(ResultU8Ptr, DestCIRTy); + }; + + if (IsRefCast) { + mlir::Value Failed = CGF.getBuilder().createNot(Success); + CGF.getBuilder().create( + Loc, Failed, /*withElseRegion=*/false, + [&](mlir::OpBuilder &, mlir::Location) { + buildCallToBadCast(CGF, Loc); + }); + return buildCastResult(); + } + + return CGF.getBuilder() + .create( + Loc, Success, + [&](mlir::OpBuilder &, mlir::Location) { + auto Result = buildCastResult(); + CGF.getBuilder().createYield(Loc, Result); + }, + [&](mlir::OpBuilder &, mlir::Location) { + mlir::Value NullPtrValue = + CGF.getBuilder().getNullPtr(DestCIRTy, Loc); + CGF.getBuilder().createYield(Loc, NullPtrValue); + }) + .getResult(); } static mlir::cir::DynamicCastInfoAttr @@ -2328,14 +2464,21 @@ buildDynamicCastInfo(CIRGenFunction &CGF, mlir::Location Loc, mlir::Value CIRGenItaniumCXXABI::buildDynamicCast( CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, QualType DestRecordTy, mlir::cir::PointerType DestCIRTy, bool isRefCast, - mlir::Value Src) { + Address Src) { bool isCastToVoid = DestRecordTy.isNull(); assert((!isCastToVoid || !isRefCast) && "cannot cast to void reference"); if (isCastToVoid) - return buildDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src); + return buildDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src).getPointer(); + + // If the destination is effectively final, the cast succeeds if and only + // if the dynamic type of the pointer is exactly the destination type. + if (DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() && + CGF.CGM.getCodeGenOpts().OptimizationLevel > 0) + return buildExactDynamicCast(*this, CGF, Loc, SrcRecordTy, DestRecordTy, + DestCIRTy, isRefCast, Src); auto castInfo = buildDynamicCastInfo(CGF, Loc, SrcRecordTy, DestRecordTy); - return CGF.getBuilder().createDynCast(Loc, Src, DestCIRTy, isRefCast, - castInfo); + return CGF.getBuilder().createDynCast(Loc, Src.getPointer(), DestCIRTy, + isRefCast, castInfo); } diff --git a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp new file mode 100644 index 000000000000..8ce0344780ec --- /dev/null +++ b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp @@ -0,0 +1,87 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -O1 -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -O1 -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s + +struct Base1 { + virtual ~Base1(); +}; + +struct Base2 { + virtual ~Base2(); +}; + +struct Derived final : Base1 {}; + +Derived *ptr_cast(Base1 *ptr) { + return dynamic_cast(ptr); + // CHECK: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK-NEXT: %[[#EXPECTED_VPTR:]] = cir.vtable.address_point(@_ZTV7Derived, vtable_index = 0, address_point_index = 2) : !cir.ptr>> + // CHECK-NEXT: %[[#SRC_VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr>>> + // CHECK-NEXT: %[[#SRC_VPTR:]] = cir.load %[[#SRC_VPTR_PTR]] : !cir.ptr>>>, !cir.ptr>> + // CHECK-NEXT: %[[#SUCCESS:]] = cir.cmp(eq, %[[#SRC_VPTR]], %[[#EXPECTED_VPTR]]) : !cir.ptr>>, !cir.bool + // CHECK-NEXT: %{{.+}} = cir.ternary(%[[#SUCCESS]], true { + // CHECK-NEXT: %[[#RES:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr + // CHECK-NEXT: cir.yield %[[#RES]] : !cir.ptr + // CHECK-NEXT: }, false { + // CHECK-NEXT: %[[#NULL:]] = cir.const #cir.ptr : !cir.ptr + // CHECK-NEXT: cir.yield %[[#NULL]] : !cir.ptr + // CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr +} + +// LLVM: define ptr @_Z8ptr_castP5Base1(ptr readonly %[[#SRC:]]) +// LLVM-NEXT: %[[#VPTR:]] = load ptr, ptr %[[#SRC]], align 8 +// LLVM-NEXT: %[[#SUCCESS:]] = icmp eq ptr %[[#VPTR]], getelementptr inbounds nuw (i8, ptr @_ZTV7Derived, i64 16) +// LLVM-NEXT: %[[RESULT:.+]] = select i1 %[[#SUCCESS]], ptr %[[#SRC]], ptr null +// LLVM-NEXT: ret ptr %[[RESULT]] +// LLVM-NEXT: } + +Derived &ref_cast(Base1 &ref) { + return dynamic_cast(ref); + // CHECK: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK-NEXT: %[[#EXPECTED_VPTR:]] = cir.vtable.address_point(@_ZTV7Derived, vtable_index = 0, address_point_index = 2) : !cir.ptr>> + // CHECK-NEXT: %[[#SRC_VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr>>> + // CHECK-NEXT: %[[#SRC_VPTR:]] = cir.load %[[#SRC_VPTR_PTR]] : !cir.ptr>>>, !cir.ptr>> + // CHECK-NEXT: %[[#SUCCESS:]] = cir.cmp(eq, %[[#SRC_VPTR]], %[[#EXPECTED_VPTR]]) : !cir.ptr>>, !cir.bool + // CHECK-NEXT: %[[#FAILED:]] = cir.unary(not, %[[#SUCCESS]]) : !cir.bool, !cir.bool + // CHECK-NEXT: cir.if %[[#FAILED]] { + // CHECK-NEXT: cir.call @__cxa_bad_cast() : () -> () + // CHECK-NEXT: cir.unreachable + // CHECK-NEXT: } + // CHECK-NEXT: %{{.+}} = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr +} + +// LLVM: define noundef ptr @_Z8ref_castR5Base1(ptr readonly returned %[[#SRC:]]) +// LLVM-NEXT: %[[#VPTR:]] = load ptr, ptr %[[#SRC]], align 8 +// LLVM-NEXT: %[[OK:.+]] = icmp eq ptr %[[#VPTR]], getelementptr inbounds nuw (i8, ptr @_ZTV7Derived, i64 16) +// LLVM-NEXT: br i1 %[[OK]], label %[[#LABEL_OK:]], label %[[#LABEL_FAIL:]] +// LLVM: [[#LABEL_FAIL]]: +// LLVM-NEXT: tail call void @__cxa_bad_cast() +// LLVM-NEXT: unreachable +// LLVM: [[#LABEL_OK]]: +// LLVM-NEXT: ret ptr %[[#SRC]] +// LLVM-NEXT: } + +Derived *ptr_cast_always_fail(Base2 *ptr) { + return dynamic_cast(ptr); + // CHECK: %{{.+}} = cir.load %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK-NEXT: %[[#RESULT:]] = cir.const #cir.ptr : !cir.ptr + // CHECK-NEXT: cir.store %[[#RESULT]], %{{.+}} : !cir.ptr, !cir.ptr> +} + +// LLVM: define noalias noundef ptr @_Z20ptr_cast_always_failP5Base2(ptr nocapture readnone %{{.+}}) +// LLVM-NEXT: ret ptr null +// LLVM-NEXT: } + +Derived &ref_cast_always_fail(Base2 &ref) { + return dynamic_cast(ref); + // CHECK: %{{.+}} = cir.load %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK-NEXT: %{{.+}} = cir.const #cir.ptr : !cir.ptr + // CHECK-NEXT: cir.call @__cxa_bad_cast() : () -> () + // CHECK-NEXT: cir.unreachable +} + +// LLVM: define noalias noundef nonnull ptr @_Z20ref_cast_always_failR5Base2(ptr nocapture readnone %{{.+}}) +// LLVM-NEXT: tail call void @__cxa_bad_cast() +// LLVM-NEXT: unreachable +// LLVM-NEXT: } From ba387cd06412e556e8f196be1a0f7e643c92faa8 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 3 Jul 2024 07:05:32 +0800 Subject: [PATCH 1663/2301] [CIR][LLVMLowering] Add LLVM lowering support for _Float16 (#716) This PR adds LLVM lowering support for `_Float16` type. The only change we need to make here is adding a new type converter to the LLVM lowering pass. The majority of this PR is tests that check the generated LLVM IR. Later I'll add another separate PR that adds LLVM lowering support for the `__bf16` type. This PR is already big enough. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 + .../CIR/CodeGen/{fp16-ops.c => float16-ops.c} | 654 +++++++++++++++++- 2 files changed, 655 insertions(+), 2 deletions(-) rename clang/test/CIR/CodeGen/{fp16-ops.c => float16-ops.c} (65%) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7fa50766f737..6818e1bf5c2e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3419,6 +3419,9 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, converter.addConversion([&](mlir::cir::LongDoubleType type) -> mlir::Type { return converter.convertType(type.getUnderlying()); }); + converter.addConversion([&](mlir::cir::FP16Type type) -> mlir::Type { + return mlir::Float16Type::get(type.getContext()); + }); converter.addConversion([&](mlir::cir::FuncType type) -> mlir::Type { auto result = converter.convertType(type.getReturnType()); llvm::SmallVector arguments; diff --git a/clang/test/CIR/CodeGen/fp16-ops.c b/clang/test/CIR/CodeGen/float16-ops.c similarity index 65% rename from clang/test/CIR/CodeGen/fp16-ops.c rename to clang/test/CIR/CodeGen/float16-ops.c index 46a410793a0e..5b3b7127476b 100644 --- a/clang/test/CIR/CodeGen/fp16-ops.c +++ b/clang/test/CIR/CodeGen/float16-ops.c @@ -1,5 +1,11 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -o - %s | FileCheck --check-prefix=NONATIVE %s -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fnative-half-type -fclangir -emit-cir -o - %s | FileCheck --check-prefix=NATIVE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -o %t.cir %s +// FileCheck --input-file=%t.cir --check-prefix=NONATIVE %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fnative-half-type -fclangir -emit-cir -o %t.cir %s +// FileCheck --input-file=%t.cir --check-prefix=NATIVE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -o %t.ll %s +// FileCheck --input-file=%t.ll --check-prefix=NONATIVE-LLVM %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fnative-half-type -fclangir -emit-llvm -o %t.ll %s +// FileCheck --input-file=%t.ll --check-prefix=NATIVE-LLVM %s volatile unsigned test; volatile int i0; @@ -13,10 +19,16 @@ void foo(void) { // NONATIVE: %{{.+}} = cir.cast(float_to_int, %{{.+}} : !cir.f16), !u32i // NATIVE: %{{.+}} = cir.cast(float_to_int, %{{.+}} : !cir.f16), !u32i + // NONATIVE-LLVM: %{{.+}} = fptoui half %{{.+}} to i32 + // NATIVE-LLVM: %{{.+}} = fptoui half %{{.+}} to i32 + h0 = (test); // NONATIVE: %{{.+}} = cir.cast(int_to_float, %{{.+}} : !u32i), !cir.f16 // NATIVE: %{{.+}} = cir.cast(int_to_float, %{{.+}} : !u32i), !cir.f16 + // NONATIVE-LLVM: %{{.+}} = uitofp i32 %{{.+}} to half + // NATIVE-LLVM: %{{.+}} = uitofp i32 %{{.+}} to half + test = (!h1); // NONATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.f16), !cir.bool // NONATIVE-NEXT: %[[#B:]] = cir.unary(not, %[[#A]]) : !cir.bool, !cir.bool @@ -28,6 +40,16 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#A:]] = fcmp une half %{{.+}}, 0xH0000 + // NONATIVE-LLVM-NEXT: %[[#B:]] = zext i1 %[[#A]] to i8 + // NONATIVE-LLVM-NEXT: %[[#C:]] = xor i8 %[[#B]], 1 + // NONATIVE-LLVM-NEXT: %{{.+}} = zext i8 %[[#C]] to i32 + + // NATIVE-LLVM: %[[#A:]] = fcmp une half %{{.+}}, 0xH0000 + // NATIVE-LLVM-NEXT: %[[#B:]] = zext i1 %[[#A]] to i8 + // NATIVE-LLVM-NEXT: %[[#C:]] = xor i8 %[[#B]], 1 + // NATIVE-LLVM-NEXT: %{{.+}} = zext i8 %[[#C]] to i32 + h1 = -h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float @@ -37,6 +59,12 @@ void foo(void) { // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.f16 // NATIVE: %{{.+}} = cir.unary(minus, %{{.+}}) : !cir.f16, !cir.f16 + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fneg float %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to half + + // NATIVE-LLVM: %{{.+}} = fneg half %{{.+}} + h1 = +h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.unary(plus, %[[#A]]) : !cir.float, !cir.float @@ -46,6 +74,12 @@ void foo(void) { // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.f16 // NATIVE: %{{.+}} = cir.unary(plus, %{{.+}}) : !cir.f16, !cir.f16 + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fptrunc float %[[#A]] to half + + // NATIVE-LLVM: %[[#A:]] = load volatile half, ptr @h1, align 2 + // NATIVE-LLVM-NEXT: store volatile half %[[#A]], ptr @h1, align 2 + h1++; // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.f16 // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 @@ -53,6 +87,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.f16 // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + // NONATIVE-LLVM: %{.+} = fadd half %{.+}, 0xH3C00 + + // NATIVE-LLVM: %{.+} = fadd half %{.+}, 0xH3C00 + ++h1; // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.f16 // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 @@ -60,6 +98,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.f16 // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + // NONATIVE-LLVM: %{.+} = fadd half %{.+}, 0xH3C00 + + // NATIVE-LLVM: %{.+} = fadd half %{.+}, 0xH3C00 + --h1; // NONATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.f16 // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 @@ -67,6 +109,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.f16 // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + // NONATIVE-LLVM: %{.+} = fadd half %{.+}, 0xHBC00 + + // NATIVE-LLVM: %{.+} = fadd half %{.+}, 0xHBC00 + h1--; // NONATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.f16 // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 @@ -74,6 +120,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.f16 // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + // NONATIVE-LLVM: %{.+} = fadd half %{.+}, 0xHBC00 + + // NATIVE-LLVM: %{.+} = fadd half %{.+}, 0xHBC00 + h1 = h0 * h2; // NONATIVE: %[[#LHS:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#RHS:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float @@ -82,6 +132,13 @@ void foo(void) { // NATIVE: %{{.+}} = cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#SUM:]] = fmul float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#SUM]] to half + + // NATIVE-LLVM: %{{.+}} = fmul half %{{.+}}, %{{.+}} + h1 = h0 * (_Float16) -2.0f; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.const #cir.fp<2.000000e+00> : !cir.float @@ -96,6 +153,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 // NATIVE-NEXT: %{{.+}} = cir.binop(mul, %{{.+}}, %[[#C]]) : !cir.f16 + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fmul float %[[#A]], -2.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to half + + // NATIVE-LLVM: %{{.+}} = fmul half %{{.+}}, 0xHC000 + h1 = h0 * f2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float @@ -105,6 +168,14 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RES:]] = fmul float %[[#LHS]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM: %[[#RES:]] = fmul float %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + h1 = f0 * h2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.float @@ -114,6 +185,14 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %{{.+}}, %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %{{.+}}, %[[#RHS]] + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + h1 = h0 * i0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 @@ -124,6 +203,15 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NATIVE-NEXT: %{{.+}} = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %[[#A:]] = fpext half %[[#RHS]] to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %[[#LHS]], %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %{{.+}} = fmul half %{{.+}}, %[[#A]] + h1 = (h0 / h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float @@ -132,6 +220,13 @@ void foo(void) { // NATIVE: %{{.+}} = cir.binop(div, %{{.+}}, %{{.+}}) : !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %{{.+}} = fdiv half %{{.+}}, %{{.+}} + h1 = (h0 / (_Float16) -2.0f); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.const #cir.fp<2.000000e+00> : !cir.float @@ -146,6 +241,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 // NATIVE-NEXT: %{{.+}} = cir.binop(div, %{{.+}}, %[[#C]]) : !cir.f16 + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fdiv float %[[#A]], -2.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to half + + // NATIVE-LLVM: %{{.+}} = fdiv half %{{.+}}, 0xHC000 + h1 = (h0 / f2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float @@ -155,6 +256,14 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RES:]] = fdiv float %[[#LHS]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM: %[[#RES:]] = fdiv float %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + h1 = (f0 / h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.float @@ -164,6 +273,14 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %{{.+}}, %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %{{.+}}, %[[#RHS]] + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + h1 = (h0 / i0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 @@ -174,6 +291,15 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NATIVE-NEXT: %{{.+}} = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %[[#A:]] = fpext half %[[#RHS]] to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %[[#LHS]], %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %{{.+}} = fdiv half %{{.+}}, %[[#A]] + h1 = (h2 + h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float @@ -182,6 +308,13 @@ void foo(void) { // NATIVE: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %{{.+}} = fadd half %{{.+}}, %{{.+}} + h1 = ((_Float16)-2.0 + h0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double @@ -196,6 +329,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 // NATIVE: %{{.+}} = cir.binop(add, %[[#C]], %{{.+}}) : !cir.f16 + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fadd float -2.000000e+00, %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to half + + // NATIVE-LLVM: %{{.+}} = fadd half 0xHC000, %{{.+}} + h1 = (h2 + f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float @@ -205,6 +344,14 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RES:]] = fadd float %[[#LHS]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM: %[[#RES:]] = fadd float %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + h1 = (f2 + h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.float @@ -214,6 +361,14 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fadd float %{{.+}}, %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to half + + // NATIVE-LLVM: %[[#RHS:]] = fpext half %{{.=}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %{{.+}}, %[[#RHS]] + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + h1 = (h0 + i0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 @@ -224,6 +379,15 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %[[#A:]] = fpext half %[[#RHS]] to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.=}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %{{.+}} = fadd half %{{.+}}, %[[#A]] + h1 = (h2 - h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float @@ -232,6 +396,13 @@ void foo(void) { // NATIVE: %{{.+}} = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %{{.+}} = fsub half %{{.+}}, %{{.+}} + h1 = ((_Float16)-2.0f - h0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float @@ -246,6 +417,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 // NATIVE: %{{.+}} = cir.binop(sub, %[[#C]], %{{.+}}) : !cir.f16 + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fsub float -2.000000e+00, %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to half + + // NATIVE-LLVM: %{{.+}} = fsub half 0xHC000, %{{.+}} + h1 = (h2 - f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float @@ -255,6 +432,14 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RES:]] = fsub float %[[#LHS]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM: %[[#RES:]] = fsub float %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + h1 = (f2 - h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.float @@ -264,6 +449,14 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %{{.+}}, %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#RHS:]] = fpext half %{{.=}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %{{.+}}, %[[#RHS]] + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + h1 = (h0 - i0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 @@ -274,6 +467,15 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NATIVE-NEXT: %{{.+}} = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.f16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %[[#A:]] = fpext half %[[#RHS]] to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %{{.+}} = fsub half %{{.+}}, %[[#A]] + test = (h2 < h0); // NONATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -281,6 +483,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp olt half %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp olt half %{{.+}}, %{{.+}} + test = (h2 < (_Float16)42.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 @@ -292,6 +498,10 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp olt half %{{.+}}, 0xH5140 + + // NATIVE-LLVM: %{{.+}} = fcmp olt half %{{.+}}, 0xH5140 + test = (h2 < f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -301,6 +511,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp olt float %[[#A]], %{{.+}} + + // NATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp olt float %[[#A]], %{{.+}} + test = (f2 < h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -310,6 +526,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.=}} to float + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp olt float %{{.+}}, %[[#A]] + + // NATIVE-LLVM: %[[#A:]] = fpext half %{{.=}} to float + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp olt float %{{.+}}, %[[#A]] + test = (i0 < h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.f16, !s32i @@ -319,6 +541,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM: %{{.+}} = fcmp olt half %[[#A]], %{{.+}} + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM: %{{.+}} = fcmp olt half %[[#A]], %{{.+}} + test = (h0 < i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i @@ -328,6 +556,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp olt half %{{.+}}, %[[#A]] + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp olt half %{{.+}}, %[[#A]] + test = (h0 > h2); // NONATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -335,6 +569,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp ogt half %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp ogt half %{{.+}}, %{{.+}} + test = ((_Float16)42.0 > h2); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 @@ -346,6 +584,10 @@ void foo(void) { // NATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp ogt half 0xH5140, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp ogt half 0xH5140, %{{.+}} + test = (h0 > f2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -355,6 +597,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.=}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp ogt float %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.=}} to float + // NATIVE-LLVM: %{{.+}} = fcmp ogt float %[[#LHS]], %{{.+}} + test = (f0 > h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -364,6 +612,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp ogt float %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp ogt float %{{.+}}, %[[#RHS]] + test = (i0 > h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.f16, !s32i @@ -373,6 +627,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM: %{{.+}} = fcmp ogt half %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM: %{{.+}} = fcmp ogt half %[[#LHS]], %{{.+}} + test = (h0 > i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i @@ -382,6 +642,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp ogt half %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp ogt half %{{.+}}, %[[#RHS]] + test = (h2 <= h0); // NONATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -389,6 +655,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp ole half %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp ole half %{{.+}}, %{{.+}} + test = (h2 <= (_Float16)42.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 @@ -400,6 +670,10 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp ole half %{{.+}}, 0xH5140 + + // NATIVE-LLVM: %{{.+}} = fcmp ole half %{{.+}}, 0xH5140 + test = (h2 <= f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -409,6 +683,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp ole float %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp ole float %[[#LHS]], %{{.+}} + test = (f2 <= h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -418,6 +698,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp ole float %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp ole float %{{.+}}, %[[#RHS]] + test = (i0 <= h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.f16, !s32i @@ -427,6 +713,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM: %{{.+}} = fcmp ole half %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM: %{{.+}} = fcmp ole half %[[#LHS]], %{{.+}} + test = (h0 <= i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.f16, !s32i @@ -436,6 +728,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp ole half %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp ole half %{{.+}}, %[[#RHS]] + test = (h0 >= h2); // NONATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -444,6 +742,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp oge half %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp oge half %{{.+}}, %{{.+}} + test = (h0 >= (_Float16)-2.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double @@ -457,6 +759,10 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp oge half %{{.+}}, 0xHC000 + + // NATIVE-LLVM: %{{.+}} = fcmp oge half %{{.+}}, 0xHC000 + test = (h0 >= f2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -466,6 +772,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp oge float %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp oge float %[[#LHS]], %{{.+}} + test = (f0 >= h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -475,6 +787,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp oge float %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp oge float %{{.+}}, %[[#RHS]] + test = (i0 >= h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.f16, !s32i @@ -484,6 +802,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM: %{{.+}} = fcmp oge half %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM: %{{.+}} = fcmp oge half %[[#LHS]], %{{.+}} + test = (h0 >= i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.f16, !s32i @@ -493,6 +817,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp oge half %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp oge half %{{.+}}, %[[#RHS]] + test = (h1 == h2); // NONATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -500,6 +830,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp oeq half %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp oeq half %{{.+}}, %{{.+}} + test = (h1 == (_Float16)1.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 @@ -511,6 +845,10 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp oeq half %{{.+}}, 0xH3C00 + + // NATIVE-LLVM: %{{.+}} = fcmp oeq half %{{.+}}, 0xH3C00 + test = (h1 == f1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -520,6 +858,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp oeq float %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp oeq float %[[#LHS]], %{{.+}} + test = (f1 == h1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -529,6 +873,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp oeq float %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp oeq float %{{.+}}, %[[#RHS]] + test = (i0 == h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.f16, !s32i @@ -538,6 +888,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM: %{{.+}} = fcmp oeq half %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM: %{{.+}} = fcmp oeq half %[[#LHS]], %{{.+}} + test = (h0 == i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.f16, !s32i @@ -547,6 +903,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %{{.=}} = fcmp oeq half %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %{{.=}} = fcmp oeq half %{{.+}}, %[[#RHS]] + test = (h1 != h2); // NONATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -554,6 +916,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp une half %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp une half %{{.+}}, %{{.+}} + test = (h1 != (_Float16)1.0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !s32i @@ -564,6 +930,10 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cmp(ne, %{{.+}}, %[[#B]]) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp une half %{{.+}}, 0xH3C00 + + // NATIVE-LLVM: %{{.+}} = fcmp une half %{{.+}}, 0xH3C00 + test = (h1 != f1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -573,6 +943,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp une float %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.=}} to float + // NATIVE-LLVM: %{{.+}} = fcmp une float %[[#LHS]], %{{.+}} + test = (f1 != h1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -582,6 +958,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp une float %{{.+}}, %[[#A]] + + // NATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp une float %{{.+}}, %[[#A]] + test = (i0 != h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.f16, !s32i @@ -591,6 +973,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM: %{{.+}} = fcmp une half %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM: %{{.+}} = fcmp une half %[[#LHS]], %{{.+}} + test = (h0 != i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !s32i @@ -600,6 +988,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp une half %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp une half %{{.+}}, %[[#RHS]] + h1 = (h1 ? h2 : h0); // NONATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.f16), !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.ternary(%[[#A]], true { @@ -618,6 +1012,28 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.get_global @h1 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fcmp une half %{{.+}}, 0xH0000 + // NONATIVE-LLVM-NEXT: br i1 %[[#A]], label %[[#LABEL_A:]], label %[[#LABEL_B:]] + // NONATIVE-LLVM: [[#LABEL_A]]: + // NONATIVE-LLVM-NEXT: %[[#B:]] = load volatile half, ptr @h2, align 2 + // NONATIVE-LLVM-NEXT: br label %[[#LABEL_C:]] + // NONATIVE-LLVM: [[#LABEL_B]]: + // NONATIVE-LLVM-NEXT: %[[#C:]] = load volatile half, ptr @h0, align 2 + // NONATIVE-LLVM-NEXT: br label %[[#LABEL_C]] + // NONATIVE-LLVM: [[#LABEL_C]]: + // NONATIVE-LLVM-NEXT: %8 = phi half [ %[[#C]], %[[#LABEL_B]] ], [ %[[#B]], %[[#LABEL_A]] ] + + // NATIVE-LLVM: %[[#A:]] = fcmp une half %{{.+}}, 0xH0000 + // NATIVE-LLVM-NEXT: br i1 %[[#A]], label %[[#LABEL_A:]], label %[[#LABEL_B:]] + // NATIVE-LLVM: [[#LABEL_A]]: + // NATIVE-LLVM-NEXT: %[[#B:]] = load volatile half, ptr @h2, align 2 + // NATIVE-LLVM-NEXT: br label %[[#LABEL_C:]] + // NATIVE-LLVM: [[#LABEL_B]]: + // NATIVE-LLVM-NEXT: %[[#C:]] = load volatile half, ptr @h0, align 2 + // NATIVE-LLVM-NEXT: br label %[[#LABEL_C]] + // NATIVE-LLVM: [[#LABEL_C]]: + // NATIVE-LLVM-NEXT: %8 = phi half [ %[[#C]], %[[#LABEL_B]] ], [ %[[#B]], %[[#LABEL_A]] ] + h0 = h1; // NONATIVE: %[[#A:]] = cir.get_global @h1 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 @@ -629,6 +1045,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile half, ptr @h1, align 2 + // NONATIVE-LLVM-NEXT: store volatile half %[[#A]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load volatile half, ptr @h1, align 2 + // NATIVE-LLVM-NEXT: store volatile half %[[#A]], ptr @h0, align 2 + h0 = (_Float16)-2.0f; // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float @@ -642,6 +1064,10 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + // NONATIVE-LLVM: store volatile half 0xHC000, ptr @h0, align 2 + + // NATIVE-LLVM: store volatile half 0xHC000, ptr @h0, align 2 + h0 = f0; // NONATIVE: %[[#A:]] = cir.get_global @f0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.float @@ -655,6 +1081,14 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile float, ptr @f0, align 4 + // NONATIVE-LLVM-NEXT: %[[#B:]] = fptrunc float %[[#A]] to half + // NONATIVE-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load volatile float, ptr @f0, align 4 + // NATIVE-LLVM-NEXT: %[[#B:]] = fptrunc float %[[#A]] to half + // NATIVE-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 + h0 = i0; // NONATIVE: %[[#A:]] = cir.get_global @i0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !s32i @@ -668,6 +1102,14 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile i32, ptr @i0, align 4 + // NONATIVE-LLVM-NEXT: %[[#B:]] = sitofp i32 %[[#A]] to half + // NONATIVE-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load volatile i32, ptr @i0, align 4 + // NATIVE-LLVM-NEXT: %[[#B:]] = sitofp i32 %[[#A]] to half + // NATIVE-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 + i0 = h0; // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 @@ -681,6 +1123,14 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @i0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !s32i, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile half, ptr @h0, align 2 + // NONATIVE-LLVM-NEXT: %[[#B:]] = fptosi half %[[#A]] to i32 + // NONATIVE-LLVM-NEXT: store volatile i32 %[[#B]], ptr @i0, align 4 + + // NATIVE-LLVM: %[[#A:]] = load volatile half, ptr @h0, align 2 + // NATIVE-LLVM-NEXT: %[[#B:]] = fptosi half %[[#A]] to i32 + // NATIVE-LLVM-NEXT: store volatile i32 %[[#B]], ptr @i0, align 4 + h0 += h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float @@ -691,6 +1141,13 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %{{.+}} = fadd half %{{.+}}, %{{.+}} + h0 += (_Float16)1.0f; // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.f16 @@ -705,6 +1162,12 @@ void foo(void) { // NATIVE: %[[#C:]] = cir.binop(add, %{{.+}}, %[[#B]]) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fadd float %[[#A]], 1.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to half + + // NATIVE-LLVM: %{{.+}} = fadd half %{{.+}}, 0xH3C00 + h0 += f2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float @@ -716,6 +1179,14 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + i0 += h0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float @@ -728,6 +1199,15 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + // NONATVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATVE-LLVM: %[[#LHS:]] = sitofp i32 %3 to float + // NONATVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %[[#RHS]] + // NONATVE-LLVM-NEXT: %{{.+}} = fptosi float %[[#RES]] to i32 + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %[[#B:]] = fadd half %[[#A]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptosi half %[[#B]] to i32 + h0 += i0; // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float @@ -740,6 +1220,15 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %[[#B:]] = fpext half %[[#A]] to float + // NONATIVE-LLVM: %[[#C:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#D:]] = fadd float %[[#C]], %[[#B]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#D]] to half + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM: %{{.+}} = fadd half %{{.+}}, %[[#A]] + h0 -= h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float @@ -750,6 +1239,13 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %{{.+}} = fsub half %{{.+}}, %{{.+}} + h0 -= (_Float16)1.0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float @@ -763,6 +1259,12 @@ void foo(void) { // NATIVE: %[[#C:]] = cir.binop(sub, %{{.+}}, %[[#B]]) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fsub float %[[#A]], 1.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to half + + // NATIVE-LLVM: %{{.+}} = fsub half %{{.+}}, 0xH3C00 + h0 -= f2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float @@ -774,6 +1276,14 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + i0 -= h0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float @@ -786,6 +1296,15 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + // NONATVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATVE-LLVM: %[[#LHS:]] = sitofp i32 %3 to float + // NONATVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %[[#RHS]] + // NONATVE-LLVM-NEXT: %{{.+}} = fptosi float %[[#RES]] to i32 + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %[[#B:]] = fsub half %[[#A]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptosi half %[[#B]] to i32 + h0 -= i0; // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float @@ -798,6 +1317,15 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %[[#B:]] = fpext half %[[#A]] to float + // NONATIVE-LLVM: %[[#C:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#D:]] = fsub float %[[#C]], %[[#B]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#D]] to half + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM: %{{.+}} = fsub half %{{.+}}, %[[#A]] + h0 *= h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float @@ -808,6 +1336,13 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %{{.+}} = fmul half %{{.+}}, %{{.+}} + h0 *= (_Float16)1.0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float @@ -821,6 +1356,12 @@ void foo(void) { // NATIVE: %[[#C:]] = cir.binop(mul, %{{.+}}, %[[#B]]) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fmul float %[[#A]], 1.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to half + + // NATIVE-LLVM: %{{.+}} = fmul half %{{.+}}, 0xH3C00 + h0 *= f2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float @@ -832,6 +1373,14 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %[[#LHS]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + i0 *= h0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float @@ -844,6 +1393,15 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + // NONATVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATVE-LLVM: %[[#LHS:]] = sitofp i32 %3 to float + // NONATVE-LLVM-NEXT: %[[#RES:]] = fmul float %[[#LHS]], %[[#RHS]] + // NONATVE-LLVM-NEXT: %{{.+}} = fptosi float %[[#RES]] to i32 + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %[[#B:]] = fmul half %[[#A]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptosi half %[[#B]] to i32 + h0 *= i0; // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float @@ -856,6 +1414,15 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %[[#B:]] = fpext half %[[#A]] to float + // NONATIVE-LLVM: %[[#C:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#D:]] = fmul float %[[#C]], %[[#B]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#D]] to half + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM: %{{.+}} = fmul half %{{.+}}, %[[#A]] + h0 /= h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float @@ -866,6 +1433,13 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.binop(div, %{{.+}}, %{{.+}}) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %{{.+}} = fdiv half %{{.+}}, %{{.+}} + h0 /= (_Float16)1.0; // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 @@ -880,6 +1454,12 @@ void foo(void) { // NATIVE: %[[#C:]] = cir.binop(div, %{{.+}}, %[[#B]]) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fdiv float %[[#A]], 1.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to half + + // NATIVE-LLVM: %{{.+}} = fdiv half %{{.+}}, 0xH3C00 + h0 /= f2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float @@ -891,6 +1471,14 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %[[#LHS]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + // NATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + i0 /= h0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float @@ -903,6 +1491,15 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + // NONATVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // NONATVE-LLVM: %[[#LHS:]] = sitofp i32 %3 to float + // NONATVE-LLVM-NEXT: %[[#RES:]] = fdiv float %[[#LHS]], %[[#RHS]] + // NONATVE-LLVM-NEXT: %{{.+}} = fptosi float %[[#RES]] to i32 + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM-NEXT: %[[#B:]] = fdiv half %[[#A]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptosi half %[[#B]] to i32 + h0 /= i0; // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.f16), !cir.float @@ -915,6 +1512,15 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.f16 // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NONATIVE-LLVM-NEXT: %[[#B:]] = fpext half %[[#A]] to float + // NONATIVE-LLVM: %[[#C:]] = fpext half %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#D:]] = fdiv float %[[#C]], %[[#B]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#D]] to half + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // NATIVE-LLVM: %{{.+}} = fdiv half %{{.+}}, %[[#A]] + h0 = d0; // NONATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double @@ -928,6 +1534,14 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile double, ptr @d0, align 8 + // NONATIVE-LLVM-NEXT: %[[#B:]] = fptrunc double %[[#A]] to half + // NONATIVE-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load volatile double, ptr @d0, align 8 + // NATIVE-LLVM-NEXT: %[[#B:]] = fptrunc double %[[#A]] to half + // NATIVE-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 + h0 = (float)d0; // NONATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double @@ -943,6 +1557,16 @@ void foo(void) { // NATIVE-NEXT: %[[#E:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.f16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile double, ptr @d0, align 8 + // NONATIVE-LLVM-NEXT: %[[#B:]] = fptrunc double %[[#A]] to float + // NONATIVE-LLVM-NEXT: %[[#C:]] = fptrunc float %[[#B]] to half + // NONATIVE-LLVM-NEXT: store volatile half %[[#C]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load volatile double, ptr @d0, align 8 + // NATIVE-LLVM-NEXT: %[[#B:]] = fptrunc double %[[#A]] to float + // NATIVE-LLVM-NEXT: %[[#C:]] = fptrunc float %[[#B]] to half + // NATIVE-LLVM-NEXT: store volatile half %[[#C]], ptr @h0, align 2 + d0 = h0; // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 @@ -956,6 +1580,14 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @d0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.double, !cir.ptr + // NONATVE-LLVM: %[[#A:]] = load volatile half, ptr @h0, align 2 + // NONATVE-LLVM-NEXT: %[[#B:]] = fpext half %[[#A]] to double + // NONATVE-LLVM-NEXT: store volatile double %[[#B]], ptr @d0, align 8 + + // NATIVE-LLVM: %[[#A:]] = load volatile half, ptr @h0, align 2 + // NATIVE-LLVM-NEXT: %[[#B:]] = fpext half %[[#A]] to double + // NATIVE-LLVM-NEXT: store volatile double %[[#B]], ptr @d0, align 8 + d0 = (float)h0; // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 @@ -971,6 +1603,16 @@ void foo(void) { // NATIVE-NEXT: %[[#E:]] = cir.get_global @d0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.double, !cir.ptr + // NONATVE-LLVM: %[[#A:]] = load volatile half, ptr @h0, align 2 + // NONATVE-LLVM-NEXT: %[[#B:]] = fpext half %[[#A]] to float + // NONATVE-LLVM-NEXT: %[[#C:]] = fpext float %[[#B]] to double + // NONATVE-LLVM-NEXT: store volatile double %[[#C]], ptr @d0, align 8 + + // NATIVE-LLVM: %[[#A:]] = load volatile half, ptr @h0, align 2 + // NATIVE-LLVM-NEXT: %[[#B:]] = fpext half %[[#A]] to float + // NATIVE-LLVM-NEXT: %[[#C:]] = fpext float %[[#B]] to double + // NATIVE-LLVM-NEXT: store volatile double %[[#C]], ptr @d0, align 8 + h0 = s0; // NONATIVE: %[[#A:]] = cir.get_global @s0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr, !s16i @@ -983,4 +1625,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s16i), !cir.f16 // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + // NONATIVE-LLVM: %[[#A:]] = load i16, ptr @s0, align 2 + // NONATIVE-LLVM-NEXT: %[[#B:]] = sitofp i16 %[[#A]] to half + // NONATIVE-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load i16, ptr @s0, align 2 + // NATIVE-LLVM-NEXT: %[[#B:]] = sitofp i16 %[[#A]] to half + // NATIVE-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 } From 1c09533db5142b5716b2a173600e03080dea70c1 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Tue, 2 Jul 2024 20:09:05 -0300 Subject: [PATCH 1664/2301] [CIR][ABI][NFC] Add missing x86-64 signed int CC lowering tests (#713) --- .../x86_64/x86_64-call-conv-lowering-pass.cpp | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp index d431f9ed1db9..2ead4bbba761 100644 --- a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp @@ -34,3 +34,31 @@ unsigned long long ULongLong(unsigned long long l) { // CHECK: cir.call @_Z9ULongLongy(%2) : (!u64i) -> !u64i return ULongLong(l); } + +/// Test call conv lowering for trivial signext cases. /// + +// CHECK: cir.func @_Z4Chara(%arg0: !s8i {cir.signext} loc({{.+}})) -> (!s8i {cir.signext}) +char Char(signed char c) { + // CHECK: cir.call @_Z4Chara(%{{.+}}) : (!s8i) -> !s8i + return Char(c); +} +// CHECK: cir.func @_Z5Shorts(%arg0: !s16i {cir.signext} loc({{.+}})) -> (!s16i {cir.signext}) +short Short(short s) { + // CHECK: cir.call @_Z5Shorts(%{{.+}}) : (!s16i) -> !s16i + return Short(s); +} +// CHECK: cir.func @_Z3Inti(%arg0: !s32i loc({{.+}})) -> !s32i +int Int(int i) { + // CHECK: cir.call @_Z3Inti(%{{.+}}) : (!s32i) -> !s32i + return Int(i); +} +// CHECK: cir.func @_Z4Longl(%arg0: !s64i loc({{.+}})) -> !s64i +long Long(long l) { + // CHECK: cir.call @_Z4Longl(%{{.+}}) : (!s64i) -> !s64i + return Long(l); +} +// CHECK: cir.func @_Z8LongLongx(%arg0: !s64i loc({{.+}})) -> !s64i +long long LongLong(long long l) { + // CHECK: cir.call @_Z8LongLongx(%{{.+}}) : (!s64i) -> !s64i + return LongLong(l); +} From 9fe5f443c9ab631ea98c8417efb28180a9bd2c86 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 4 Jul 2024 02:44:00 +0800 Subject: [PATCH 1665/2301] [CIR][LLVMLowering] Add LLVM lowering for __bf16 (#717) This PR adds LLVM lowering support for the `__bf16` type. To support its LLVM lowering, we just need to add a new type conversion rule to the LLVM lowering pass. The majority of this PR are the new LLVM IR checks in the tests. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 + clang/test/CIR/CodeGen/bf16-ops.c | 659 +++++++++++++++++- 2 files changed, 660 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6818e1bf5c2e..999754c84f57 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3422,6 +3422,9 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, converter.addConversion([&](mlir::cir::FP16Type type) -> mlir::Type { return mlir::Float16Type::get(type.getContext()); }); + converter.addConversion([&](mlir::cir::BF16Type type) -> mlir::Type { + return mlir::Float16Type::get(type.getContext()); + }); converter.addConversion([&](mlir::cir::FuncType type) -> mlir::Type { auto result = converter.convertType(type.getReturnType()); llvm::SmallVector arguments; diff --git a/clang/test/CIR/CodeGen/bf16-ops.c b/clang/test/CIR/CodeGen/bf16-ops.c index 7812e03b129b..08086eefc874 100644 --- a/clang/test/CIR/CodeGen/bf16-ops.c +++ b/clang/test/CIR/CodeGen/bf16-ops.c @@ -1,5 +1,12 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -o - %s | FileCheck --check-prefix=NONATIVE %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -target-feature +fullbf16 -fclangir -emit-cir -o - %s | FileCheck --check-prefix=NATIVE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefix=NONATIVE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -target-feature +fullbf16 -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefix=NATIVE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll --check-prefix=NONATIVE-LLVM %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -target-feature +fullbf16 -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll --check-prefix=NATIVE-LLVM %s +// XFAIL: * volatile unsigned test; volatile int i0; @@ -13,10 +20,16 @@ void foo(void) { // NONATIVE: %{{.+}} = cir.cast(float_to_int, %{{.+}} : !cir.bf16), !u32i // NATIVE: %{{.+}} = cir.cast(float_to_int, %{{.+}} : !cir.bf16), !u32i + // NONATIVE-LLVM: %{{.+}} = fptoui bfloat %{{.+}} to i32 + // NATIVE-LLVM: %{{.+}} = fptoui bfloat %{{.+}} to i32 + h0 = (test); // NONATIVE: %{{.+}} = cir.cast(int_to_float, %{{.+}} : !u32i), !cir.bf16 // NATIVE: %{{.+}} = cir.cast(int_to_float, %{{.+}} : !u32i), !cir.bf16 + // NONATIVE-LLVM: %{{.+}} = uitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM: %{{.+}} = uitofp i32 %{{.+}} to bfloat + test = (!h1); // NONATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.bf16), !cir.bool // NONATIVE-NEXT: %[[#B:]] = cir.unary(not, %[[#A]]) : !cir.bool, !cir.bool @@ -28,6 +41,16 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#A:]] = fcmp une bfloat %{{.+}}, 0xR0000 + // NONATIVE-LLVM-NEXT: %[[#B:]] = zext i1 %[[#A]] to i8 + // NONATIVE-LLVM-NEXT: %[[#C:]] = xor i8 %[[#B]], 1 + // NONATIVE-LLVM-NEXT: %{{.+}} = zext i8 %[[#C]] to i32 + + // NATIVE-LLVM: %[[#A:]] = fcmp une bfloat %{{.+}}, 0xR0000 + // NATIVE-LLVM-NEXT: %[[#B:]] = zext i1 %[[#A]] to i8 + // NATIVE-LLVM-NEXT: %[[#C:]] = xor i8 %[[#B]], 1 + // NATIVE-LLVM-NEXT: %{{.+}} = zext i8 %[[#C]] to i32 + h1 = -h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float @@ -37,6 +60,12 @@ void foo(void) { // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.bf16 // NATIVE: %{{.+}} = cir.unary(minus, %{{.+}}) : !cir.bf16, !cir.bf16 + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fneg float %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fneg bfloat %{{.+}} + h1 = +h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.unary(plus, %[[#A]]) : !cir.float, !cir.float @@ -46,6 +75,12 @@ void foo(void) { // NATIVE-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.bf16 // NATIVE: %{{.+}} = cir.unary(plus, %{{.+}}) : !cir.bf16, !cir.bf16 + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#A]] to bfloat + + // NATIVE-LLVM: %[[#A:]] = load volatile bfloat, ptr @h1, align 2 + // NATIVE-LLVM-NEXT: store volatile bfloat %[[#A]], ptr @h1, align 2 + h1++; // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.bf16 // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 @@ -53,6 +88,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.bf16 // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + // NONATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, 0xR3F80 + + // NATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, 0xR3F80 + ++h1; // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.bf16 // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 @@ -60,6 +99,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.bf16 // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + // NONATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, 0xR3F80 + + // NATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, 0xR3F80 + --h1; // NONATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.bf16 // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 @@ -67,6 +110,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.bf16 // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + // NONATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, 0xRBF80 + + // NATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, 0xRBF80 + h1--; // NONATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.bf16 // NONATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 @@ -74,6 +121,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.bf16 // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + // NONATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, 0xRBF80 + + // NATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, 0xRBF80 + h1 = h0 * h2; // NONATIVE: %[[#LHS:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#RHS:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float @@ -82,6 +133,13 @@ void foo(void) { // NATIVE: %{{.+}} = cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fmul bfloat %{{.+}}, %{{.+}} + h1 = h0 * (__bf16) -2.0f; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.const #cir.fp<2.000000e+00> : !cir.float @@ -96,6 +154,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 // NATIVE-NEXT: %{{.+}} = cir.binop(mul, %{{.+}}, %[[#C]]) : !cir.bf16 + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fmul float %[[#A]], -2.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fmul bfloat %{{.+}}, 0xRC000 + h1 = h0 * f2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float @@ -105,6 +169,14 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#RES:]] = fmul float %[[#LHS]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM: %[[#RES:]] = fmul float %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + h1 = f0 * h2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.float @@ -114,6 +186,14 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %{{.+}}, %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %{{.+}}, %[[#RHS]] + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + h1 = h0 * i0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 @@ -124,6 +204,15 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NATIVE-NEXT: %{{.+}} = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %[[#A:]] = fpext bfloat %[[#RHS]] to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %[[#LHS]], %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %{{.+}} = fmul bfloat %{{.+}}, %[[#A]] + h1 = (h0 / h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float @@ -132,6 +221,13 @@ void foo(void) { // NATIVE: %{{.+}} = cir.binop(div, %{{.+}}, %{{.+}}) : !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fdiv bfloat %{{.+}}, %{{.+}} + h1 = (h0 / (__bf16) -2.0f); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.const #cir.fp<2.000000e+00> : !cir.float @@ -146,6 +242,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 // NATIVE-NEXT: %{{.+}} = cir.binop(div, %{{.+}}, %[[#C]]) : !cir.bf16 + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fdiv float %[[#A]], -2.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fdiv bfloat %{{.+}}, 0xRC000 + h1 = (h0 / f2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float @@ -155,6 +257,14 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#RES:]] = fdiv float %[[#LHS]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM: %[[#RES:]] = fdiv float %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + h1 = (f0 / h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.float @@ -164,6 +274,14 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %{{.+}}, %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %{{.+}}, %[[#RHS]] + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + h1 = (h0 / i0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 @@ -174,6 +292,15 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NATIVE-NEXT: %{{.+}} = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %[[#A:]] = fpext bfloat %[[#RHS]] to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %[[#LHS]], %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %{{.+}} = fdiv bfloat %{{.+}}, %[[#A]] + h1 = (h2 + h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float @@ -182,6 +309,13 @@ void foo(void) { // NATIVE: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, %{{.+}} + h1 = ((__bf16)-2.0 + h0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double @@ -196,6 +330,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.bf16 // NATIVE: %{{.+}} = cir.binop(add, %[[#C]], %{{.+}}) : !cir.bf16 + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fadd float -2.000000e+00, %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fadd bfloat 0xRC000, %{{.+}} + h1 = (h2 + f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float @@ -205,6 +345,16 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RHS:]] = load volatile float, ptr @f0, align 4 + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RHS:]] = load volatile float, ptr @f0, align 4 + // NATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %[[#RHS]] + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + h1 = (f2 + h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.float @@ -214,6 +364,14 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %{{.+}}, %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %{{.+}}, %[[#RHS]] + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + h1 = (h0 + i0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 @@ -224,6 +382,15 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NATIVE-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS_INT:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %[[#RHS:]] = fpext bfloat %[[#RHS_INT]] to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %{{.+}} = fadd bfloat %{{.+}}, %[[#A]] + h1 = (h2 - h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float @@ -232,6 +399,13 @@ void foo(void) { // NATIVE: %{{.+}} = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fsub bfloat %{{.+}}, %{{.+}} + h1 = ((__bf16)-2.0f - h0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float @@ -246,6 +420,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 // NATIVE: %{{.+}} = cir.binop(sub, %[[#C]], %{{.+}}) : !cir.bf16 + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fsub float -2.000000e+00, %[[#A]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fsub bfloat 0xRC000, %{{.+}} + h1 = (h2 - f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float @@ -255,6 +435,16 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RHS:]] = load volatile float, ptr @f0, align 4 + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RHS:]] = load volatile float, ptr @f0, align 4 + // NATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %[[#RHS]] + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + h1 = (f2 - h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.float @@ -264,6 +454,14 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.float // NATIVE-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %{{.+}}, %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %{{.+}}, %[[#RHS]] + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + h1 = (h0 - i0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 @@ -274,6 +472,15 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NATIVE-NEXT: %{{.+}} = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.bf16 + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#RHS_INT:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %[[#RHS:]] = fpext bfloat %[[#RHS_INT]] to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %{{.+}} = fsub bfloat %{{.+}}, %[[#A]] + test = (h2 < h0); // NONATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -281,6 +488,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp olt bfloat %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp olt bfloat %{{.+}}, %{{.+}} + test = (h2 < (__bf16)42.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 @@ -292,6 +503,10 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp olt bfloat %{{.+}}, 0xR4228 + + // NATIVE-LLVM: %{{.+}} = fcmp olt bfloat %{{.+}}, 0xR4228 + test = (h2 < f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -301,6 +516,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp olt float %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp olt float %[[#LHS]], %{{.+}} + test = (f2 < h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -310,6 +531,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp olt float %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp olt float %{{.+}}, %[[#RHS]] + test = (i0 < h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i @@ -319,6 +546,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM: %{{.+}} = fcmp olt bfloat %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM: %{{.+}} = fcmp olt bfloat %[[#LHS]], %{{.+}} + test = (h0 < i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i @@ -328,6 +561,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp olt bfloat %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp olt bfloat %{{.+}}, %[[#RHS]] + test = (h0 > h2); // NONATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -335,6 +574,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp ogt bfloat %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp ogt bfloat %{{.+}}, %{{.+}} + test = ((__bf16)42.0 > h2); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 @@ -346,6 +589,10 @@ void foo(void) { // NATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp ogt bfloat 0xR4228, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp ogt bfloat 0xR4228, %{{.+}} + test = (h0 > f2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -355,6 +602,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp ogt float %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp ogt float %[[#LHS]], %{{.+}} + test = (f0 > h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -364,6 +617,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp ogt float %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp ogt float %{{.+}}, %[[#RHS]] + test = (i0 > h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i @@ -373,6 +632,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM: %{{.+}} = fcmp ogt bfloat %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM: %{{.+}} = fcmp ogt bfloat %[[#LHS]], %{{.+}} + test = (h0 > i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i @@ -382,6 +647,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp ogt bfloat %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp ogt bfloat %{{.+}}, %[[#RHS]] + test = (h2 <= h0); // NONATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -389,6 +660,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp ole bfloat %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp ole bfloat %{{.+}}, %{{.+}} + test = (h2 <= (__bf16)42.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 @@ -400,6 +675,10 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp ole bfloat %{{.+}}, 0xR4228 + + // NATIVE-LLVM: %{{.+}} = fcmp ole bfloat %{{.+}}, 0xR4228 + test = (h2 <= f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -409,6 +688,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp ole float %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp ole float %[[#LHS]], %{{.+}} + test = (f2 <= h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -418,6 +703,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp ole float %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp ole float %{{.+}}, %[[#RHS]] + test = (i0 <= h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.bf16, !s32i @@ -427,6 +718,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM: %{{.+}} = fcmp ole bfloat %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM: %{{.+}} = fcmp ole bfloat %[[#LHS]], %{{.+}} + test = (h0 <= i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i @@ -436,6 +733,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp ole bfloat %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp ole bfloat %{{.+}}, %[[#RHS]] + test = (h0 >= h2); // NONATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -444,6 +747,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp oge bfloat %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp oge bfloat %{{.+}}, %{{.+}} + test = (h0 >= (__bf16)-2.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double @@ -457,6 +764,10 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp oge bfloat %{{.+}}, 0xRC000 + + // NATIVE-LLVM: %{{.+}} = fcmp oge bfloat %{{.+}}, 0xRC000 + test = (h0 >= f2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -466,6 +777,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp oge float %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp oge float %[[#LHS]], %{{.+}} + test = (f0 >= h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -475,6 +792,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp oge float %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp oge float %{{.+}}, %[[#RHS]] + test = (i0 >= h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.bf16, !s32i @@ -484,6 +807,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM: %{{.+}} = fcmp oge bfloat %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM: %{{.+}} = fcmp oge bfloat %[[#LHS]], %{{.+}} + test = (h0 >= i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i @@ -493,6 +822,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp oge bfloat %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp oge bfloat %{{.+}}, %[[#RHS]] + test = (h1 == h2); // NONATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -500,6 +835,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp oeq bfloat %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp oeq bfloat %{{.+}}, %{{.+}} + test = (h1 == (__bf16)1.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 @@ -511,6 +850,10 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp oeq bfloat %{{.+}}, 0xR3F80 + + // NATIVE-LLVM: %{{.+}} = fcmp oeq bfloat %{{.+}}, 0xR3F80 + test = (h1 == f1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -520,6 +863,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp oeq float %[[#A]], %{{.+}} + + // NATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp oeq float %[[#A]], %{{.+}} + test = (f1 == h1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -529,6 +878,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp oeq float %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp oeq float %{{.+}}, %[[#RHS]] + test = (i0 == h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.bf16, !s32i @@ -538,6 +893,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM: %{{.+}} = fcmp oeq bfloat %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM: %{{.+}} = fcmp oeq bfloat %[[#LHS]], %{{.+}} + test = (h0 == i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i @@ -547,6 +908,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp oeq bfloat %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp oeq bfloat %{{.+}}, %[[#RHS]] + test = (h1 != h2); // NONATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i @@ -554,6 +921,10 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp une bfloat %{{.+}}, %{{.+}} + + // NATIVE-LLVM: %{{.+}} = fcmp une bfloat %{{.+}}, %{{.+}} + test = (h1 != (__bf16)1.0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i @@ -564,6 +935,10 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cmp(ne, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-LLVM: %{{.+}} = fcmp une bfloat %{{.+}}, 0xR3F80 + + // NATIVE-LLVM: %{{.+}} = fcmp une bfloat %{{.+}}, 0xR3F80 + test = (h1 != f1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i @@ -573,6 +948,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %{{.+}} = fcmp une float %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM: %{{.+}} = fcmp une float %[[#LHS]], %{{.+}} + test = (f1 != h1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i @@ -582,6 +963,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp une float %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp une float %{{.+}}, %[[#RHS]] + test = (i0 != h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.bf16, !s32i @@ -591,6 +978,12 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM: %{{.+}} = fcmp une bfloat %[[#LHS]], %{{.+}} + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM: %{{.+}} = fcmp une bfloat %[[#LHS]], %{{.+}} + test = (h0 != i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i @@ -600,6 +993,12 @@ void foo(void) { // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp une bfloat %{{.+}}, %[[#RHS]] + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %{{.+}} = fcmp une bfloat %{{.+}}, %[[#RHS]] + h1 = (h1 ? h2 : h0); // NONATIVE: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.bf16), !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.ternary(%[[#A]], true { @@ -618,6 +1017,28 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.get_global @h1 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fcmp une bfloat %{{.+}}, 0xR0000 + // NONATIVE-LLVM-NEXT: br i1 %[[#A]], label %[[#LABEL_A:]], label %[[#LABEL_B:]] + // NONATIVE-LLVM: [[#LABEL_A]]: + // NONATIVE-LLVM-NEXT: %[[#B:]] = load volatile bfloat, ptr @h2, align 2 + // NONATIVE-LLVM-NEXT: br label %[[#LABEL_C:]] + // NONATIVE-LLVM: [[#LABEL_B]]: + // NONATIVE-LLVM-NEXT: %[[#C:]] = load volatile bfloat, ptr @h0, align 2 + // NONATIVE-LLVM-NEXT: br label %[[#LABEL_C]] + // NONATIVE-LLVM: [[#LABEL_C]]: + // NONATIVE-LLVM-NEXT: %{{.+}} = phi bfloat [ %[[#C]], %[[#LABEL_B]] ], [ %[[#B]], %[[#LABEL_A]] ] + + // NATIVE-LLVM: %[[#A:]] = fcmp une bfloat %{{.+}}, 0xR0000 + // NATIVE-LLVM-NEXT: br i1 %[[#A]], label %[[#LABEL_A:]], label %[[#LABEL_B:]] + // NATIVE-LLVM: [[#LABEL_A]]: + // NATIVE-LLVM-NEXT: %[[#B:]] = load volatile bfloat, ptr @h2, align 2 + // NATIVE-LLVM-NEXT: br label %[[#LABEL_C:]] + // NATIVE-LLVM: [[#LABEL_B]]: + // NATIVE-LLVM-NEXT: %[[#C:]] = load volatile bfloat, ptr @h0, align 2 + // NATIVE-LLVM-NEXT: br label %[[#LABEL_C]] + // NATIVE-LLVM: [[#LABEL_C]]: + // NATIVE-LLVM-NEXT: %{{.+}} = phi bfloat [ %[[#C]], %[[#LABEL_B]] ], [ %[[#B]], %[[#LABEL_A]] ] + h0 = h1; // NONATIVE: %[[#A:]] = cir.get_global @h1 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 @@ -629,6 +1050,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile bfloat, ptr @h1, align 2 + // NONATIVE-LLVM-NEXT: store volatile bfloat %[[#A]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load volatile bfloat, ptr @h1, align 2 + // NATIVE-LLVM-NEXT: store volatile bfloat %[[#A]], ptr @h0, align 2 + h0 = (__bf16)-2.0f; // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float @@ -642,6 +1069,10 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: store volatile bfloat 0xRC000, ptr @h0, align 2 + + // NATIVE-LLVM: store volatile bfloat 0xRC000, ptr @h0, align 2 + h0 = f0; // NONATIVE: %[[#A:]] = cir.get_global @f0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.float @@ -655,6 +1086,14 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile float, ptr @f0, align 4 + // NONATIVE-LLVM-NEXT: %[[#B:]] = fptrunc float %[[#A]] to bfloat + // NONATIVE-LLVM-NEXT: store volatile bfloat %[[#B]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load volatile float, ptr @f0, align 4 + // NATIVE-LLVM-NEXT: %[[#B:]] = fptrunc float %[[#A]] to bfloat + // NATIVE-LLVM-NEXT: store volatile bfloat %[[#B]], ptr @h0, align 2 + h0 = i0; // NONATIVE: %[[#A:]] = cir.get_global @i0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !s32i @@ -668,6 +1107,14 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile i32, ptr @i0, align 4 + // NONATIVE-LLVM-NEXT: %[[#B:]] = sitofp i32 %[[#A]] to bfloat + // NONATIVE-LLVM-NEXT: store volatile bfloat %[[#B]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load volatile i32, ptr @i0, align 4 + // NATIVE-LLVM-NEXT: %[[#B:]] = sitofp i32 %[[#A]] to bfloat + // NATIVE-LLVM-NEXT: store volatile bfloat %[[#B]], ptr @h0, align 2 + i0 = h0; // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 @@ -681,6 +1128,14 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @i0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !s32i, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile bfloat, ptr @h0, align 2 + // NONATIVE-LLVM-NEXT: %[[#B:]] = fptosi bfloat %[[#A]] to i32 + // NONATIVE-LLVM-NEXT: store volatile i32 %[[#B]], ptr @i0, align 4 + + // NATIVE-LLVM: %[[#A:]] = load volatile bfloat, ptr @h0, align 2 + // NATIVE-LLVM-NEXT: %[[#B:]] = fptosi bfloat %[[#A]] to i32 + // NATIVE-LLVM-NEXT: store volatile i32 %[[#B]], ptr @i0, align 4 + h0 += h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float @@ -691,6 +1146,13 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#A:]] = fadd float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#A]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, %{{.+}} + h0 += (__bf16)1.0f; // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.bf16 @@ -705,6 +1167,12 @@ void foo(void) { // NATIVE: %[[#C:]] = cir.binop(add, %{{.+}}, %[[#B]]) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fadd float %[[#A]], 1.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, 0xR3F80 + h0 += f2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float @@ -716,6 +1184,14 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fadd float %[[#A]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#B:]] = fadd float %[[#A]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + i0 += h0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float @@ -728,6 +1204,15 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.bf16), !s32i // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptosi float %[[#RES]] to i32 + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %[[#A:]] = fadd bfloat %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptosi bfloat %[[#A]] to i32 + h0 += i0; // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float @@ -740,6 +1225,15 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %[[#RHS:]] = fpext bfloat %[[#A]] to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM: %{{.+}} = fadd bfloat %{{.+}}, %[[#RHS]] + h0 -= h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float @@ -750,6 +1244,13 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#A:]] = fsub float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#A]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fsub bfloat %{{.+}}, %{{.+}} + h0 -= (__bf16)1.0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float @@ -763,6 +1264,12 @@ void foo(void) { // NATIVE: %[[#C:]] = cir.binop(sub, %{{.+}}, %[[#B]]) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fsub float %[[#A]], 1.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fsub bfloat %{{.+}}, 0xR3F80 + h0 -= f2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float @@ -774,6 +1281,14 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fsub float %[[#A]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#B:]] = fsub float %[[#A]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + i0 -= h0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float @@ -786,6 +1301,15 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.bf16), !s32i // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptosi float %[[#RES]] to i32 + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %[[#A:]] = fsub bfloat %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptosi bfloat %[[#A]] to i32 + h0 -= i0; // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float @@ -798,6 +1322,15 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %[[#RHS:]] = fpext bfloat %[[#A]] to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM: %{{.+}} = fsub bfloat %{{.+}}, %[[#RHS]] + h0 *= h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float @@ -808,6 +1341,13 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#A:]] = fmul float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#A]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fmul bfloat %{{.+}}, %{{.+}} + h0 *= (__bf16)1.0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float @@ -821,6 +1361,12 @@ void foo(void) { // NATIVE: %[[#C:]] = cir.binop(mul, %{{.+}}, %[[#B]]) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fmul float %[[#A]], 1.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fmul bfloat %{{.+}}, 0xR3F80 + h0 *= f2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float @@ -832,6 +1378,14 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fmul float %[[#A]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#B:]] = fmul float %[[#A]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + i0 *= h0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float @@ -844,6 +1398,15 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.bf16), !s32i // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptosi float %[[#RES]] to i32 + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %[[#A:]] = fmul bfloat %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptosi bfloat %[[#A]] to i32 + h0 *= i0; // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float @@ -856,6 +1419,15 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %[[#RHS:]] = fpext bfloat %[[#A]] to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fmul float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM: %{{.+}} = fmul bfloat %{{.+}}, %[[#RHS]] + h0 /= h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float @@ -866,6 +1438,13 @@ void foo(void) { // NATIVE: %[[#A:]] = cir.binop(div, %{{.+}}, %{{.+}}) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#A:]] = fdiv float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#A]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fdiv bfloat %{{.+}}, %{{.+}} + h0 /= (__bf16)1.0; // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 @@ -880,6 +1459,12 @@ void foo(void) { // NATIVE: %[[#C:]] = cir.binop(div, %{{.+}}, %[[#B]]) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fdiv float %[[#A]], 1.000000e+00 + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %{{.+}} = fdiv bfloat %{{.+}}, 0xR3F80 + h0 /= f2; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE-NEXT: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float @@ -891,6 +1476,14 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#B:]] = fdiv float %[[#A]], %{{.+}} + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + + // NATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float + // NATIVE-LLVM-NEXT: %[[#B:]] = fdiv float %[[#A]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#B]] to bfloat + i0 /= h0; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float // NONATIVE: %[[#B:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.float @@ -903,6 +1496,15 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.bf16), !s32i // NATIVE-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptosi float %[[#RES]] to i32 + + // NATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM-NEXT: %[[#A:]] = fdiv bfloat %[[#LHS]], %{{.+}} + // NATIVE-LLVM-NEXT: %{{.+}} = fptosi bfloat %[[#A]] to i32 + h0 /= i0; // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.bf16), !cir.float @@ -915,6 +1517,15 @@ void foo(void) { // NATIVE: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.bf16 // NATIVE-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to bfloat + // NONATIVE-LLVM-NEXT: %[[#RHS:]] = fpext bfloat %[[#A]] to float + // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float + // NONATIVE-LLVM-NEXT: %[[#RES:]] = fdiv float %[[#LHS]], %[[#RHS]] + // NONATIVE-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to bfloat + + // NATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat + // NATIVE-LLVM: %{{.+}} = fdiv bfloat %{{.+}}, %[[#RHS]] + h0 = d0; // NONATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double @@ -928,6 +1539,14 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile double, ptr @d0, align 8 + // NONATIVE-LLVM-NEXT: %[[#B:]] = fptrunc double %[[#A]] to bfloat + // NONATIVE-LLVM-NEXT: store volatile bfloat %[[#B]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load volatile double, ptr @d0, align 8 + // NATIVE-LLVM-NEXT: %[[#B:]] = fptrunc double %[[#A]] to bfloat + // NATIVE-LLVM-NEXT: store volatile bfloat %[[#B]], ptr @h0, align 2 + h0 = (float)d0; // NONATIVE: %[[#A:]] = cir.get_global @d0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double @@ -943,6 +1562,16 @@ void foo(void) { // NATIVE-NEXT: %[[#E:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.bf16, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile double, ptr @d0, align 8 + // NONATIVE-LLVM-NEXT: %[[#B:]] = fptrunc double %[[#A]] to float + // NONATIVE-LLVM-NEXT: %[[#C:]] = fptrunc float %[[#B]] to bfloat + // NONATIVE-LLVM-NEXT: store volatile bfloat %[[#C]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load volatile double, ptr @d0, align 8 + // NATIVE-LLVM-NEXT: %[[#B:]] = fptrunc double %[[#A]] to float + // NATIVE-LLVM-NEXT: %[[#C:]] = fptrunc float %[[#B]] to bfloat + // NATIVE-LLVM-NEXT: store volatile bfloat %[[#C]], ptr @h0, align 2 + d0 = h0; // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 @@ -956,6 +1585,14 @@ void foo(void) { // NATIVE-NEXT: %[[#D:]] = cir.get_global @d0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.double, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile bfloat, ptr @h0, align 2 + // NONATIVE-LLVM-NEXT: %[[#B:]] = fpext bfloat %[[#A]] to double + // NONATIVE-LLVM-NEXT: store volatile double %[[#B]], ptr @d0, align 8 + + // NATIVE-LLVM: %[[#A:]] = load volatile bfloat, ptr @h0, align 2 + // NATIVE-LLVM-NEXT: %[[#B:]] = fpext bfloat %[[#A]] to double + // NATIVE-LLVM-NEXT: store volatile double %[[#B]], ptr @d0, align 8 + d0 = (float)h0; // NONATIVE: %[[#A:]] = cir.get_global @h0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.bf16 @@ -971,6 +1608,16 @@ void foo(void) { // NATIVE-NEXT: %[[#E:]] = cir.get_global @d0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.double, !cir.ptr + // NONATIVE-LLVM: %[[#A:]] = load volatile bfloat, ptr @h0, align 2 + // NONATIVE-LLVM-NEXT: %[[#B:]] = fpext bfloat %[[#A]] to float + // NONATIVE-LLVM-NEXT: %[[#C:]] = fpext float %[[#B]] to double + // NONATIVE-LLVM-NEXT: store volatile double %[[#C]], ptr @d0, align 8 + + // NATIVE-LLVM: %[[#A:]] = load volatile bfloat, ptr @h0, align 2 + // NATIVE-LLVM-NEXT: %[[#B:]] = fpext bfloat %[[#A]] to float + // NATIVE-LLVM-NEXT: %[[#C:]] = fpext float %[[#B]] to double + // NATIVE-LLVM-NEXT: store volatile double %[[#C]], ptr @d0, align 8 + h0 = s0; // NONATIVE: %[[#A:]] = cir.get_global @s0 : !cir.ptr // NONATIVE-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr, !s16i @@ -983,4 +1630,12 @@ void foo(void) { // NATIVE-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s16i), !cir.bf16 // NATIVE-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr // NATIVE-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.bf16, !cir.ptr + + // NONATIVE-LLVM: %[[#A:]] = load i16, ptr @s0, align 2 + // NONATIVE-LLVM-NEXT: %[[#B:]] = sitofp i16 %[[#A]] to bfloat + // NONATIVE-LLVM-NEXT: store volatile bfloat %[[#B]], ptr @h0, align 2 + + // NATIVE-LLVM: %[[#A:]] = load i16, ptr @s0, align 2 + // NATIVE-LLVM-NEXT: %[[#B:]] = sitofp i16 %[[#A]] to bfloat + // NATIVE-LLVM-NEXT: store volatile bfloat %[[#B]], ptr @h0, align 2 } From b90bf6137ea79d3c972fbaf3d1a49271f8d3cf48 Mon Sep 17 00:00:00 2001 From: Julian Oppermann Date: Wed, 3 Jul 2024 20:44:28 +0200 Subject: [PATCH 1666/2301] [CIR][NFC] Don't enforce deprecated API check on MSVC (#718) Implements approach 2 in https://github.com/llvm/clangir/pull/703#issuecomment-2194665478, as discussed in the community call. Signed-off-by: Julian Oppermann --- clang/lib/CIR/CMakeLists.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt index 8843b9847074..1812b6669e19 100644 --- a/clang/lib/CIR/CMakeLists.txt +++ b/clang/lib/CIR/CMakeLists.txt @@ -1,12 +1,12 @@ include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) -# Report use of deprecated APIs as errors -if (MSVC) - add_compile_options("/we4996") -else() +# Report use of deprecated APIs as errors. +# TODO: Consider adding `/we4996` for MSVC when upstream MLIR resolves +# https://github.com/llvm/llvm-project/issues/65255. +if (NOT MSVC) add_compile_options("-Werror=deprecated-declarations") -endif(MSVC) +endif() add_subdirectory(Dialect) add_subdirectory(CodeGen) From 2e49e8bf42879db64eb36f7cbd4280d6eea5d850 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 4 Jul 2024 02:49:15 +0800 Subject: [PATCH 1667/2301] [CIR][CIRGen] Add complex type and its CIRGen support (#513) This PR adds `!cir.complex` type to model the `_Complex` type in C. It also contains support for its CIRGen. In detail, this patch adds the following CIR types, ops, and attributes: - The `!cir.complex` type is added to model the `_Complex` type in C. This type is parameterized with the type of the components of the complex number, which must be either an integer type or a floating-point type. - ~The `#cir.complex` attribute is added to represent a literal value of `_Complex` type. It is a struct-like attribute that provides the real and imaginary part of the literal `_Complex` value.~ - ~The `#cir.imag` attribute is added to represent a purely imaginary number.~ - The `cir.complex.create` op is added to create a complex value from its real and imaginary parts. - ~The `cir.complex.real` and `cir.complex.imag` op is added to extract the real and imaginary part of a value of `!cir.complex` type, respectively.~ - The `cir.complex.real_ptr` and `cir.complex.imag_ptr` op is added to derive a pointer to the real and imaginary part of a value of `!cir.complex` type, respectively. CIRGen support for some of the fundamental complex number operations is also included. ~Note the implementation diverges from the original clang CodeGen, where expressions of complex types are handled differently from scalars and aggregates. Instead, this patch treats expressions of complex types as scalars, as such expressions can be simply lowered to a CIR value of `!cir.complex` type.~ This PR addresses #445 . --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 29 ++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 84 +++ .../include/clang/CIR/Dialect/IR/CIRTypes.td | 36 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 55 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 29 +- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 478 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 47 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 16 + clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenValue.h | 13 +- clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 55 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 54 ++ clang/test/CIR/CodeGen/complex.c | 153 ++++++ 16 files changed, 1032 insertions(+), 32 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp create mode 100644 clang/test/CIR/CodeGen/complex.c diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index df2231c389a3..e636d59c6539 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -89,6 +89,35 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return getPointerTo(::mlir::cir::VoidType::get(getContext()), langAS); } + mlir::cir::BoolAttr getCIRBoolAttr(bool state) { + return mlir::cir::BoolAttr::get(getContext(), getBoolTy(), state); + } + + mlir::TypedAttr getZeroAttr(mlir::Type t) { + return mlir::cir::ZeroAttr::get(getContext(), t); + } + + mlir::TypedAttr getZeroInitAttr(mlir::Type ty) { + if (mlir::isa(ty)) + return mlir::cir::IntAttr::get(ty, 0); + if (auto fltType = mlir::dyn_cast(ty)) + return mlir::cir::FPAttr::getZero(fltType); + if (auto fltType = mlir::dyn_cast(ty)) + return mlir::cir::FPAttr::getZero(fltType); + if (auto complexType = mlir::dyn_cast(ty)) + return getZeroAttr(complexType); + if (auto arrTy = mlir::dyn_cast(ty)) + return getZeroAttr(arrTy); + if (auto ptrTy = mlir::dyn_cast(ty)) + return getConstPtrAttr(ptrTy, 0); + if (auto structTy = mlir::dyn_cast(ty)) + return getZeroAttr(structTy); + if (mlir::isa(ty)) { + return getCIRBoolAttr(false); + } + llvm_unreachable("Zero initializer for given type is NYI"); + } + mlir::Value createLoad(mlir::Location loc, mlir::Value ptr, bool isVolatile = false, uint64_t alignment = 0) { mlir::IntegerAttr intAttr; diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b8af2c76115b..8aedb481eab2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1174,6 +1174,90 @@ def BinOpOverflowOp : CIR_Op<"binop.overflow", [Pure, SameTypeOperands]> { ]; } +//===----------------------------------------------------------------------===// +// ComplexCreateOp +//===----------------------------------------------------------------------===// + +def ComplexCreateOp : CIR_Op<"complex.create", [Pure, SameTypeOperands]> { + let summary = "Create a complex value from its real and imaginary parts"; + let description = [{ + `cir.complex.create` operation takes two operands that represent the real + and imaginary part of a complex number, and yields the complex number. + + Example: + + ```mlir + %0 = cir.const #cir.fp<1.000000e+00> : !cir.double + %1 = cir.const #cir.fp<2.000000e+00> : !cir.double + %2 = cir.complex.create %0, %1 : !cir.complex + ``` + }]; + + let results = (outs CIR_ComplexType:$result); + let arguments = (ins CIR_AnyIntOrFloat:$real, CIR_AnyIntOrFloat:$imag); + + let assemblyFormat = [{ + $real `,` $imag + `:` qualified(type($real)) `->` qualified(type($result)) attr-dict + }]; + + let hasVerifier = 1; +} + +//===----------------------------------------------------------------------===// +// ComplexRealPtrOp and ComplexImagPtrOp +//===----------------------------------------------------------------------===// + +def ComplexRealPtrOp : CIR_Op<"complex.real_ptr", [Pure]> { + let summary = "Extract the real part of a complex value"; + let description = [{ + `cir.complex.real_ptr` operation takes a pointer operand that points to a + complex value of type `!cir.complex` and yields a pointer to the real part + of the operand. + + Example: + + ```mlir + %1 = cir.complex.real_ptr %0 : !cir.ptr> -> !cir.ptr + ``` + }]; + + let results = (outs PrimitiveIntOrFPPtr:$result); + let arguments = (ins ComplexPtr:$operand); + + let assemblyFormat = [{ + $operand `:` + qualified(type($operand)) `->` qualified(type($result)) attr-dict + }]; + + let hasVerifier = 1; +} + +def ComplexImagPtrOp : CIR_Op<"complex.imag_ptr", [Pure]> { + let summary = "Extract the imaginary part of a complex value"; + let description = [{ + `cir.complex.imag_ptr` operation takes a pointer operand that points to a + complex value of type `!cir.complex` and yields a pointer to the imaginary + part of the operand. + + Example: + + ```mlir + %1 = cir.complex.imag_ptr %0 : !cir.ptr> -> !cir.ptr + ``` + }]; + + let results = (outs PrimitiveIntOrFPPtr:$result); + let arguments = (ins ComplexPtr:$operand); + + let assemblyFormat = [{ + $operand `:` + qualified(type($operand)) `->` qualified(type($result)) attr-dict + }]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // BitsOp //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 47de7623ffce..0ffd22ee9620 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -196,6 +196,32 @@ def CIR_LongDouble : CIR_FloatType<"LongDouble", "long_double"> { def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_FP80, CIR_LongDouble]>; def CIR_AnyIntOrFloat: AnyTypeOf<[CIR_AnyFloat, CIR_IntType]>; +//===----------------------------------------------------------------------===// +// ComplexType +//===----------------------------------------------------------------------===// + +def CIR_ComplexType : CIR_Type<"Complex", "complex", + [DeclareTypeInterfaceMethods]> { + + let summary = "CIR complex type"; + let description = [{ + CIR type that represents a C complex number. `cir.complex` models the C type + `T _Complex`. + + The parameter `elementTy` gives the type of the real and imaginary part of + the complex number. `elementTy` must be either a CIR integer type or a CIR + floating-point type. + }]; + + let parameters = (ins "mlir::Type":$elementTy); + + let assemblyFormat = [{ + `<` $elementTy `>` + }]; + + let genVerifyDecl = 1; +} + //===----------------------------------------------------------------------===// // PointerType //===----------------------------------------------------------------------===// @@ -441,6 +467,14 @@ def PrimitiveIntOrFPPtr : Type< ]>, "{int,void}*"> { } +def ComplexPtr : Type< + And<[ + CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::mlir::cir::ComplexType>(" + "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())">, + ]>, "!cir.complex*"> { +} + // Pointer to struct def StructPtr : Type< And<[ @@ -516,7 +550,7 @@ def CIR_StructType : Type($_self)">, def CIR_AnyType : AnyTypeOf<[ CIR_IntType, CIR_PointerType, CIR_DataMemberType, CIR_BoolType, CIR_ArrayType, CIR_VectorType, CIR_FuncType, CIR_VoidType, CIR_StructType, CIR_ExceptionInfo, - CIR_AnyFloat, CIR_FP16, CIR_BFloat16 + CIR_AnyFloat, CIR_FP16, CIR_BFloat16, CIR_ComplexType ]>; #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 8a36dd4d1d26..4cb51ed2a112 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -136,14 +136,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::GlobalViewAttr::get(type, symbol, indices); } - mlir::TypedAttr getZeroAttr(mlir::Type t) { - return mlir::cir::ZeroAttr::get(getContext(), t); - } - - mlir::cir::BoolAttr getCIRBoolAttr(bool state) { - return mlir::cir::BoolAttr::get(getContext(), getBoolTy(), state); - } - mlir::TypedAttr getConstNullPtrAttr(mlir::Type t) { assert(mlir::isa(t) && "expected cir.ptr"); return getConstPtrAttr(t, 0); @@ -265,6 +257,8 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::FPAttr::getZero(fltType); if (auto fltType = mlir::dyn_cast(ty)) return mlir::cir::FPAttr::getZero(fltType); + if (auto complexType = mlir::dyn_cast(ty)) + return getZeroAttr(complexType); if (auto arrTy = mlir::dyn_cast(ty)) return getZeroAttr(arrTy); if (auto ptrTy = mlir::dyn_cast(ty)) @@ -764,6 +758,42 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, result, base, name, index); } + mlir::Value createComplexCreate(mlir::Location loc, mlir::Value real, + mlir::Value imag) { + auto resultComplexTy = + mlir::cir::ComplexType::get(getContext(), real.getType()); + return create(loc, resultComplexTy, real, imag); + } + + /// Create a cir.complex.real_ptr operation that derives a pointer to the real + /// part of the complex value pointed to by the specified pointer value. + mlir::Value createRealPtr(mlir::Location loc, mlir::Value value) { + auto srcPtrTy = mlir::cast(value.getType()); + auto srcComplexTy = + mlir::cast(srcPtrTy.getPointee()); + return create( + loc, getPointerTo(srcComplexTy.getElementTy()), value); + } + + Address createRealPtr(mlir::Location loc, Address addr) { + return Address{createRealPtr(loc, addr.getPointer()), addr.getAlignment()}; + } + + /// Create a cir.complex.imag_ptr operation that derives a pointer to the + /// imaginary part of the complex value pointed to by the specified pointer + /// value. + mlir::Value createImagPtr(mlir::Location loc, mlir::Value value) { + auto srcPtrTy = mlir::cast(value.getType()); + auto srcComplexTy = + mlir::cast(srcPtrTy.getPointee()); + return create( + loc, getPointerTo(srcComplexTy.getElementTy()), value); + } + + Address createImagPtr(mlir::Location loc, Address addr) { + return Address{createImagPtr(loc, addr.getPointer()), addr.getAlignment()}; + } + /// Cast the element type of the given address to a different type, /// preserving information like the alignment. cir::Address createElementBitCast(mlir::Location loc, cir::Address addr, @@ -776,15 +806,18 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { addr.getAlignment()); } - mlir::Value createLoad(mlir::Location loc, Address addr) { + mlir::Value createLoad(mlir::Location loc, Address addr, + bool isVolatile = false) { auto ptrTy = mlir::dyn_cast(addr.getPointer().getType()); if (addr.getElementType() != ptrTy.getPointee()) addr = addr.withPointer( createPtrBitcast(addr.getPointer(), addr.getElementType())); - return create(loc, addr.getElementType(), - addr.getPointer()); + return create( + loc, addr.getElementType(), addr.getPointer(), /*isDeref=*/false, + /*is_volatile=*/isVolatile, /*alignment=*/mlir::IntegerAttr{}, + /*mem_order=*/mlir::cir::MemOrderAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 52b36c1fa7ac..d6e11e4516e2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -734,7 +734,11 @@ void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, buildScalarInit(init, getLoc(D->getSourceRange()), lvalue); return; case TEK_Complex: { - assert(0 && "not implemented"); + mlir::Value complex = buildComplexExpr(init); + if (capturedByInit) + llvm_unreachable("NYI"); + buildStoreOfComplex(getLoc(init->getExprLoc()), complex, lvalue, + /*init*/ true); return; } case TEK_Aggregate: diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index fe0f9a655f4d..92566d067e23 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1228,7 +1228,7 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { } case TEK_Complex: - assert(0 && "not implemented"); + return buildComplexAssignmentLValue(E); case TEK_Aggregate: assert(0 && "not implemented"); } @@ -1268,6 +1268,7 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { if (E->getOpcode() == UO_Extension) return buildLValue(E->getSubExpr()); + QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); switch (E->getOpcode()) { default: llvm_unreachable("Unknown unary operator lvalue!"); @@ -1292,7 +1293,29 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { } case UO_Real: case UO_Imag: { - assert(0 && "not implemented"); + LValue LV = buildLValue(E->getSubExpr()); + assert(LV.isSimple() && "real/imag on non-ordinary l-value"); + + // __real is valid on scalars. This is a faster way of testing that. + // __imag can only produce an rvalue on scalars. + if (E->getOpcode() == UO_Real && + !mlir::isa(LV.getAddress().getElementType())) { + assert(E->getSubExpr()->getType()->isArithmeticType()); + return LV; + } + + QualType T = ExprTy->castAs()->getElementType(); + + auto Loc = getLoc(E->getExprLoc()); + Address Component = + (E->getOpcode() == UO_Real + ? buildAddrOfRealComponent(Loc, LV.getAddress(), LV.getType()) + : buildAddrOfImagComponent(Loc, LV.getAddress(), LV.getType())); + // TODO(cir): TBAA info. + assert(!MissingFeatures::tbaa()); + LValue ElemLV = makeAddrLValue(Component, T, LV.getBaseInfo()); + ElemLV.getQuals().addQualifiers(LV.getQuals()); + return ElemLV; } case UO_PreInc: case UO_PreDec: { @@ -1319,7 +1342,7 @@ RValue CIRGenFunction::buildAnyExpr(const Expr *E, AggValueSlot aggSlot, case TEK_Scalar: return RValue::get(buildScalarExpr(E)); case TEK_Complex: - assert(0 && "not implemented"); + return RValue::getComplex(buildComplexExpr(E)); case TEK_Aggregate: { if (!ignoreResult && aggSlot.isIgnored()) aggSlot = CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp new file mode 100644 index 000000000000..8809a2b1d631 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -0,0 +1,478 @@ +#include "CIRGenBuilder.h" +#include "CIRGenCstEmitter.h" +#include "CIRGenFunction.h" +#include "clang/CIR/MissingFeatures.h" + +#include "mlir/IR/Value.h" +#include "clang/AST/StmtVisitor.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace cir; +using namespace clang; + +namespace { + +class ComplexExprEmitter : public StmtVisitor { + CIRGenFunction &CGF; + CIRGenBuilderTy &Builder; + bool FPHasBeenPromoted; + +public: + explicit ComplexExprEmitter(CIRGenFunction &cgf) + : CGF(cgf), Builder(cgf.getBuilder()), FPHasBeenPromoted(false) {} + + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + /// Given an expression with complex type that represents a value l-value, + /// this method emits the address of the l-value, then loads and returns the + /// result. + mlir::Value buildLoadOfLValue(const Expr *E) { + return buildLoadOfLValue(CGF.buildLValue(E), E->getExprLoc()); + } + + mlir::Value buildLoadOfLValue(LValue LV, SourceLocation Loc); + + /// EmitStoreOfComplex - Store the specified real/imag parts into the + /// specified value pointer. + void buildStoreOfComplex(mlir::Location Loc, mlir::Value Val, LValue LV, + bool isInit); + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + mlir::Value Visit(Expr *E) { + assert(!MissingFeatures::generateDebugInfo()); + return StmtVisitor::Visit(E); + } + + mlir::Value VisitStmt(Stmt *S) { + S->dump(llvm::errs(), CGF.getContext()); + llvm_unreachable("Stmt can't have complex result type!"); + } + + mlir::Value VisitExpr(Expr *S) { llvm_unreachable("not supported"); } + mlir::Value VisitConstantExpr(ConstantExpr *E) { + if (auto Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) + return Builder.getConstant(CGF.getLoc(E->getSourceRange()), + mlir::cast(Result)); + return Visit(E->getSubExpr()); + } + mlir::Value VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); } + mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *GE) { + return Visit(GE->getResultExpr()); + } + mlir::Value VisitImaginaryLiteral(const ImaginaryLiteral *IL); + mlir::Value + VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) { + return Visit(PE->getReplacement()); + } + mlir::Value VisitCoawaitExpr(CoawaitExpr *S) { llvm_unreachable("NYI"); } + mlir::Value VisitCoyieldExpr(CoyieldExpr *S) { llvm_unreachable("NYI"); } + mlir::Value VisitUnaryCoawait(const UnaryOperator *E) { + return Visit(E->getSubExpr()); + } + + mlir::Value emitConstant(const CIRGenFunction::ConstantEmission &Constant, + Expr *E) { + assert(Constant && "not a constant"); + if (Constant.isReference()) + return buildLoadOfLValue(Constant.getReferenceLValue(CGF, E), + E->getExprLoc()); + + auto valueAttr = Constant.getValue(); + return Builder.getConstant(CGF.getLoc(E->getSourceRange()), valueAttr); + } + + // l-values. + mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { + if (CIRGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) + return emitConstant(Constant, E); + return buildLoadOfLValue(E); + } + mlir::Value VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitObjCMessageExpr(ObjCMessageExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitArraySubscriptExpr(Expr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitMemberExpr(MemberExpr *ME) { llvm_unreachable("NYI"); } + mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *E) { + llvm_unreachable("NYI"); + } + + mlir::Value VisitPseudoObjectExpr(PseudoObjectExpr *E) { + llvm_unreachable("NYI"); + } + + // FIXME: CompoundLiteralExpr + + mlir::Value buildCast(CastKind CK, Expr *Op, QualType DestTy); + mlir::Value VisitImplicitCastExpr(ImplicitCastExpr *E) { + // Unlike for scalars, we don't have to worry about function->ptr demotion + // here. + if (E->changesVolatileQualification()) + return buildLoadOfLValue(E); + return buildCast(E->getCastKind(), E->getSubExpr(), E->getType()); + } + mlir::Value VisitCastExpr(CastExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitCallExpr(const CallExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitStmtExpr(const StmtExpr *E) { llvm_unreachable("NYI"); } + + // Operators. + mlir::Value VisitPrePostIncDec(const UnaryOperator *E, bool isInc, + bool isPre) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryPostDec(const UnaryOperator *E) { + return VisitPrePostIncDec(E, false, false); + } + mlir::Value VisitUnaryPostInc(const UnaryOperator *E) { + return VisitPrePostIncDec(E, true, false); + } + mlir::Value VisitUnaryPreDec(const UnaryOperator *E) { + return VisitPrePostIncDec(E, false, true); + } + mlir::Value VisitUnaryPreInc(const UnaryOperator *E) { + return VisitPrePostIncDec(E, true, true); + } + mlir::Value VisitUnaryDeref(const Expr *E) { llvm_unreachable("NYI"); } + + mlir::Value VisitUnaryPlus(const UnaryOperator *E, + QualType PromotionType = QualType()) { + llvm_unreachable("NYI"); + } + mlir::Value VisitPlus(const UnaryOperator *E, QualType PromotionType) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryMinus(const UnaryOperator *E, + QualType PromotionType = QualType()) { + llvm_unreachable("NYI"); + } + mlir::Value VisitMinus(const UnaryOperator *E, QualType PromotionType) { + llvm_unreachable("NYI"); + } + mlir::Value VisitUnaryNot(const UnaryOperator *E) { llvm_unreachable("NYI"); } + // LNot,Real,Imag never return complex. + mlir::Value VisitUnaryExtension(const UnaryOperator *E) { + return Visit(E->getSubExpr()); + } + mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { + llvm_unreachable("NYI"); + } + mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { + llvm_unreachable("NYI"); + } + mlir::Value VisitExprWithCleanups(ExprWithCleanups *E) { + CIRGenFunction::RunCleanupsScope Scope(CGF); + mlir::Value V = Visit(E->getSubExpr()); + // Defend against dominance problems caused by jumps out of expression + // evaluation through the shared cleanup block. + Scope.ForceCleanup({&V}); + return V; + } + mlir::Value VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { + llvm_unreachable("NYI"); + } + +#define HANDLEBINOP(OP) \ + mlir::Value VisitBin##OP(const BinaryOperator *E) { llvm_unreachable("NYI"); } + + HANDLEBINOP(Mul) + HANDLEBINOP(Div) + HANDLEBINOP(Add) + HANDLEBINOP(Sub) +#undef HANDLEBINOP + + mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { + llvm_unreachable("NYI"); + } + + // Compound assignments. + mlir::Value VisitBinAddAssign(const CompoundAssignOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitBinSubAssign(const CompoundAssignOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitBinMulAssign(const CompoundAssignOperator *E) { + llvm_unreachable("NYI"); + } + mlir::Value VisitBinDivAssign(const CompoundAssignOperator *E) { + llvm_unreachable("NYI"); + } + + // GCC rejects rem/and/or/xor for integer complex. + // Logical and/or always return int, never complex. + + // No comparisons produce a complex result. + + LValue buildBinAssignLValue(const BinaryOperator *E, mlir::Value &Val); + mlir::Value VisitBinAssign(const BinaryOperator *E) { + mlir::Value Val; + LValue LV = buildBinAssignLValue(E, Val); + + // The result of an assignment in C is the assigned r-value. + if (!CGF.getLangOpts().CPlusPlus) + return Val; + + // If the lvalue is non-volatile, return the computed value of the + // assignment. + if (!LV.isVolatileQualified()) + return Val; + + return buildLoadOfLValue(LV, E->getExprLoc()); + }; + mlir::Value VisitBinComma(const BinaryOperator *E) { + llvm_unreachable("NYI"); + } + + mlir::Value + VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { + llvm_unreachable("NYI"); + } + mlir::Value VisitChooseExpr(ChooseExpr *CE) { llvm_unreachable("NYI"); } + + mlir::Value VisitInitListExpr(InitListExpr *E); + + mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { + llvm_unreachable("NYI"); + } + + mlir::Value VisitVAArgExpr(VAArgExpr *E) { llvm_unreachable("NYI"); } + + mlir::Value VisitAtomicExpr(AtomicExpr *E) { llvm_unreachable("NYI"); } + + mlir::Value VisitPackIndexingExpr(PackIndexingExpr *E) { + llvm_unreachable("NYI"); + } +}; + +} // namespace + +static const ComplexType *getComplexType(QualType type) { + type = type.getCanonicalType(); + if (const ComplexType *comp = dyn_cast(type)) + return comp; + return cast(cast(type)->getValueType()); +} + +mlir::Value ComplexExprEmitter::buildLoadOfLValue(LValue LV, + SourceLocation Loc) { + assert(LV.isSimple() && "non-simple complex l-value?"); + if (LV.getType()->isAtomicType()) + llvm_unreachable("NYI"); + + Address SrcPtr = LV.getAddress(); + return Builder.createLoad(CGF.getLoc(Loc), SrcPtr, LV.isVolatileQualified()); +} + +void ComplexExprEmitter::buildStoreOfComplex(mlir::Location Loc, + mlir::Value Val, LValue LV, + bool isInit) { + if (LV.getType()->isAtomicType() || + (!isInit && CGF.LValueIsSuitableForInlineAtomic(LV))) + llvm_unreachable("NYI"); + + Address DestAddr = LV.getAddress(); + Builder.createStore(Loc, Val, DestAddr, LV.isVolatileQualified()); +} + +mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, + QualType DestTy) { + switch (CK) { + case CK_Dependent: + llvm_unreachable("dependent cast kind in IR gen!"); + + // Atomic to non-atomic casts may be more than a no-op for some platforms and + // for some types. + case CK_LValueToRValue: + return Visit(Op); + + case CK_AtomicToNonAtomic: + case CK_NonAtomicToAtomic: + case CK_NoOp: + case CK_UserDefinedConversion: + llvm_unreachable("NYI"); + + case CK_LValueBitCast: + llvm_unreachable("NYI"); + + case CK_LValueToRValueBitCast: + llvm_unreachable("NYI"); + + case CK_BitCast: + case CK_BaseToDerived: + case CK_DerivedToBase: + case CK_UncheckedDerivedToBase: + case CK_Dynamic: + case CK_ToUnion: + case CK_ArrayToPointerDecay: + case CK_FunctionToPointerDecay: + case CK_NullToPointer: + case CK_NullToMemberPointer: + case CK_BaseToDerivedMemberPointer: + case CK_DerivedToBaseMemberPointer: + case CK_MemberPointerToBoolean: + case CK_ReinterpretMemberPointer: + case CK_ConstructorConversion: + case CK_IntegralToPointer: + case CK_PointerToIntegral: + case CK_PointerToBoolean: + case CK_ToVoid: + case CK_VectorSplat: + case CK_IntegralCast: + case CK_BooleanToSignedIntegral: + case CK_IntegralToBoolean: + case CK_IntegralToFloating: + case CK_FloatingToIntegral: + case CK_FloatingToBoolean: + case CK_FloatingCast: + case CK_CPointerToObjCPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_AnyPointerToBlockPointerCast: + case CK_ObjCObjectLValueCast: + case CK_FloatingComplexToReal: + case CK_FloatingComplexToBoolean: + case CK_IntegralComplexToReal: + case CK_IntegralComplexToBoolean: + case CK_ARCProduceObject: + case CK_ARCConsumeObject: + case CK_ARCReclaimReturnedObject: + case CK_ARCExtendBlockObject: + case CK_CopyAndAutoreleaseBlockObject: + case CK_BuiltinFnToFnPtr: + case CK_ZeroToOCLOpaqueType: + case CK_AddressSpaceConversion: + case CK_IntToOCLSampler: + case CK_FloatingToFixedPoint: + case CK_FixedPointToFloating: + case CK_FixedPointCast: + case CK_FixedPointToBoolean: + case CK_FixedPointToIntegral: + case CK_IntegralToFixedPoint: + case CK_MatrixCast: + case CK_HLSLVectorTruncation: + case CK_HLSLArrayRValue: + llvm_unreachable("invalid cast kind for complex value"); + + case CK_FloatingRealToComplex: + case CK_IntegralRealToComplex: + llvm_unreachable("NYI"); + + case CK_FloatingComplexCast: + case CK_FloatingComplexToIntegralComplex: + case CK_IntegralComplexCast: + case CK_IntegralComplexToFloatingComplex: + llvm_unreachable("NYI"); + } + + llvm_unreachable("unknown cast resulting in complex value"); +} + +LValue ComplexExprEmitter::buildBinAssignLValue(const BinaryOperator *E, + mlir::Value &Val) { + assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), + E->getRHS()->getType()) && + "Invalid assignment"); + + // Emit the RHS. __block variables need the RHS evaluated first. + Val = Visit(E->getRHS()); + + // Compute the address to store into. + LValue LHS = CGF.buildLValue(E->getLHS()); + + // Store the result value into the LHS lvalue. + buildStoreOfComplex(CGF.getLoc(E->getExprLoc()), Val, LHS, /*isInit*/ false); + + return LHS; +} + +mlir::Value +ComplexExprEmitter::VisitImaginaryLiteral(const ImaginaryLiteral *IL) { + auto Loc = CGF.getLoc(IL->getExprLoc()); + auto Ty = mlir::cast(CGF.getCIRType(IL->getType())); + auto ElementTy = Ty.getElementTy(); + + mlir::TypedAttr RealValueAttr; + mlir::TypedAttr ImagValueAttr; + if (mlir::isa(ElementTy)) { + auto ImagValue = cast(IL->getSubExpr())->getValue(); + RealValueAttr = mlir::cir::IntAttr::get(ElementTy, 0); + ImagValueAttr = mlir::cir::IntAttr::get(ElementTy, ImagValue); + } else if (mlir::isa(ElementTy)) { + auto ImagValue = cast(IL->getSubExpr())->getValue(); + RealValueAttr = mlir::cir::FPAttr::get( + ElementTy, llvm::APFloat::getZero(ImagValue.getSemantics())); + ImagValueAttr = mlir::cir::FPAttr::get(ElementTy, ImagValue); + } else + llvm_unreachable("unexpected complex element type"); + + auto RealValue = Builder.getConstant(Loc, RealValueAttr); + auto ImagValue = Builder.getConstant(Loc, ImagValueAttr); + return Builder.createComplexCreate(Loc, RealValue, ImagValue); +} + +mlir::Value ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) { + if (E->getNumInits() == 2) { + mlir::Value Real = CGF.buildScalarExpr(E->getInit(0)); + mlir::Value Imag = CGF.buildScalarExpr(E->getInit(1)); + return Builder.createComplexCreate(CGF.getLoc(E->getExprLoc()), Real, Imag); + } + + if (E->getNumInits() == 1) + return Visit(E->getInit(0)); + + // Empty init list initializes to null + assert(E->getNumInits() == 0 && "Unexpected number of inits"); + QualType Ty = E->getType()->castAs()->getElementType(); + return Builder.getZero(CGF.getLoc(E->getExprLoc()), CGF.ConvertType(Ty)); +} + +mlir::Value CIRGenFunction::buildComplexExpr(const Expr *E) { + assert(E && getComplexType(E->getType()) && + "Invalid complex expression to emit"); + + return ComplexExprEmitter(*this).Visit(const_cast(E)); +} + +void CIRGenFunction::buildComplexExprIntoLValue(const Expr *E, LValue dest, + bool isInit) { + assert(E && getComplexType(E->getType()) && + "Invalid complex expression to emit"); + ComplexExprEmitter Emitter(*this); + mlir::Value Val = Emitter.Visit(const_cast(E)); + Emitter.buildStoreOfComplex(getLoc(E->getExprLoc()), Val, dest, isInit); +} + +void CIRGenFunction::buildStoreOfComplex(mlir::Location Loc, mlir::Value V, + LValue dest, bool isInit) { + ComplexExprEmitter(*this).buildStoreOfComplex(Loc, V, dest, isInit); +} + +Address CIRGenFunction::buildAddrOfRealComponent(mlir::Location loc, + Address addr, + QualType complexType) { + return builder.createRealPtr(loc, addr); +} + +Address CIRGenFunction::buildAddrOfImagComponent(mlir::Location loc, + Address addr, + QualType complexType) { + return builder.createImagPtr(loc, addr); +} + +LValue CIRGenFunction::buildComplexAssignmentLValue(const BinaryOperator *E) { + assert(E->getOpcode() == BO_Assign); + mlir::Value Val; // ignored + LValue LVal = ComplexExprEmitter(*this).buildBinAssignLValue(E, Val); + if (getLangOpts().OpenMP) + llvm_unreachable("NYI"); + return LVal; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index c7289d62aa14..962d74acd498 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -654,12 +654,11 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitUnaryLNot(const UnaryOperator *E); - mlir::Value VisitUnaryReal(const UnaryOperator *E) { - llvm_unreachable("NYI"); - } - mlir::Value VisitUnaryImag(const UnaryOperator *E) { - llvm_unreachable("NYI"); - } + mlir::Value VisitUnaryReal(const UnaryOperator *E) { return VisitReal(E); } + mlir::Value VisitUnaryImag(const UnaryOperator *E) { return VisitImag(E); } + + mlir::Value VisitReal(const UnaryOperator *E); + mlir::Value VisitImag(const UnaryOperator *E); mlir::Value VisitUnaryExtension(const UnaryOperator *E) { // __extension__ doesn't requred any codegen @@ -1911,6 +1910,42 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { llvm_unreachable("destination type for logical-not unary operator is NYI"); } +mlir::Value ScalarExprEmitter::VisitReal(const UnaryOperator *E) { + // TODO(cir): handle scalar promotion. + + Expr *Op = E->getSubExpr(); + if (Op->getType()->isAnyComplexType()) { + // If it's an l-value, load through the appropriate subobject l-value. + // Note that we have to ask E because Op might be an l-value that + // this won't work for, e.g. an Obj-C property. + if (E->isGLValue()) + return CGF.buildLoadOfLValue(CGF.buildLValue(E), E->getExprLoc()) + .getScalarVal(); + // Otherwise, calculate and project. + llvm_unreachable("NYI"); + } + + return Visit(Op); +} + +mlir::Value ScalarExprEmitter::VisitImag(const UnaryOperator *E) { + // TODO(cir): handle scalar promotion. + + Expr *Op = E->getSubExpr(); + if (Op->getType()->isAnyComplexType()) { + // If it's an l-value, load through the appropriate subobject l-value. + // Note that we have to ask E because Op might be an l-value that + // this won't work for, e.g. an Obj-C property. + if (E->isGLValue()) + return CGF.buildLoadOfLValue(CGF.buildLValue(E), E->getExprLoc()) + .getScalarVal(); + // Otherwise, calculate and project. + llvm_unreachable("NYI"); + } + + return Visit(Op); +} + // Conversion from bool, integral, or floating-point to integral or // floating-point. Conversions involving other types are handled elsewhere. // Conversion to bool is handled elsewhere because that's a comparison against diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 79cd01967ded..8d40f29439ce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -580,6 +580,22 @@ class CIRGenFunction : public CIRGenTypeCache { void buildAggExpr(const clang::Expr *E, AggValueSlot Slot); + /// Emit the computation of the specified expression of complex type, + /// returning the result. + mlir::Value buildComplexExpr(const Expr *E); + + void buildComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit); + + void buildStoreOfComplex(mlir::Location Loc, mlir::Value V, LValue dest, + bool isInit); + + Address buildAddrOfRealComponent(mlir::Location loc, Address complex, + QualType complexType); + Address buildAddrOfImagComponent(mlir::Location loc, Address complex, + QualType complexType); + + LValue buildComplexAssignmentLValue(const BinaryOperator *E); + /// Emits a reference binding to the passed in expression. RValue buildReferenceBindingToExpr(const Expr *E); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index ba7aa4435ab0..2b6f4c49c655 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -504,7 +504,9 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { builder.CIRBaseBuilderTy::createStore(loc, V, *FnRetAlloca); break; case TEK_Complex: - llvm_unreachable("NYI"); + buildComplexExprIntoLValue(RV, + makeAddrLValue(ReturnValue, RV->getType()), + /*isInit*/ true); break; case TEK_Aggregate: buildAggExpr( diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index d4c38430e2e0..0010a83fe6b6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -597,7 +597,9 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case Type::DeducedTemplateSpecialization: llvm_unreachable("Unexpected undeduced type!"); case Type::Complex: { - assert(0 && "not implemented"); + const ComplexType *CT = cast(Ty); + auto ElementTy = ConvertType(CT->getElementType()); + ResultType = ::mlir::cir::ComplexType::get(Builder.getContext(), ElementTy); break; } case Type::LValueReference: diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 53280e765e20..c31ed30a183a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -92,13 +92,12 @@ class RValue { ER.V2.setInt(false); return ER; } - static RValue getComplex(mlir::Value V1, mlir::Value V2) { - assert(0 && "not implemented"); - return RValue{}; - } - static RValue getComplex(const std::pair &C) { - assert(0 && "not implemented"); - return RValue{}; + static RValue getComplex(mlir::Value V) { + RValue ER; + ER.V1.setPointer(V); + ER.V1.setInt(Complex); + ER.V2.setInt(false); + return ER; } // FIXME: Aggregate rvalues need to retain information about whether they are // volatile or not. Remove default to find all places that probably get this diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 40134a0cb113..e1d96bdbf65e 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -22,6 +22,7 @@ add_clang_library(clangCIR CIRGenDeclCXX.cpp CIRGenException.cpp CIRGenExpr.cpp + CIRGenExprComplex.cpp CIRGenExprConst.cpp CIRGenExprAgg.cpp CIRGenExprCXX.cpp diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9a827e572fe1..a90eee0e934e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -316,7 +316,8 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, } if (isa(attrType)) { - if (::mlir::isa<::mlir::cir::StructType, ::mlir::cir::ArrayType>(opType)) + if (::mlir::isa<::mlir::cir::StructType, ::mlir::cir::ArrayType, + ::mlir::cir::ComplexType>(opType)) return success(); return op->emitOpError("zero expects struct or array type"); } @@ -548,6 +549,58 @@ LogicalResult DynamicCastOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// ComplexCreateOp +//===----------------------------------------------------------------------===// + +LogicalResult ComplexCreateOp::verify() { + if (getType().getElementTy() != getReal().getType()) { + emitOpError() + << "operand type of cir.complex.create does not match its result type"; + return failure(); + } + + return success(); +} + +//===----------------------------------------------------------------------===// +// ComplexRealPtrOp and ComplexImagPtrOp +//===----------------------------------------------------------------------===// + +LogicalResult ComplexRealPtrOp::verify() { + auto resultPointeeTy = + mlir::cast(getType()).getPointee(); + auto operandPtrTy = + mlir::cast(getOperand().getType()); + auto operandPointeeTy = + mlir::cast(operandPtrTy.getPointee()); + + if (resultPointeeTy != operandPointeeTy.getElementTy()) { + emitOpError() + << "cir.complex.real_ptr result type does not match operand type"; + return failure(); + } + + return success(); +} + +LogicalResult ComplexImagPtrOp::verify() { + auto resultPointeeTy = + mlir::cast(getType()).getPointee(); + auto operandPtrTy = + mlir::cast(getOperand().getType()); + auto operandPointeeTy = + mlir::cast(operandPtrTy.getPointee()); + + if (resultPointeeTy != operandPointeeTy.getElementTy()) { + emitOpError() + << "cir.complex.imag_ptr result type does not match operand type"; + return failure(); + } + + return success(); +} + //===----------------------------------------------------------------------===// // VecCreateOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 07a7339374dc..10d581e826b4 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -26,6 +26,7 @@ #include "mlir/Support/LogicalResult.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "clang/CIR/Interfaces/CIRFPTypeInterface.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" @@ -818,6 +819,59 @@ bool mlir::cir::isFPOrFPVectorTy(mlir::Type t) { return isAnyFloatingPointType(t); } +//===----------------------------------------------------------------------===// +// ComplexType Definitions +//===----------------------------------------------------------------------===// + +mlir::LogicalResult mlir::cir::ComplexType::verify( + llvm::function_ref emitError, + mlir::Type elementTy) { + if (!mlir::isa( + elementTy)) { + emitError() << "element type of !cir.complex must be either a " + "floating-point type or an integer type"; + return failure(); + } + + return success(); +} + +llvm::TypeSize mlir::cir::ComplexType::getTypeSizeInBits( + const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + // C17 6.2.5p13: + // Each complex type has the same representation and alignment requirements + // as an array type containing exactly two elements of the corresponding + // real type. + + auto elementTy = getElementTy(); + return dataLayout.getTypeSizeInBits(elementTy) * 2; +} + +uint64_t mlir::cir::ComplexType::getABIAlignment( + const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + // C17 6.2.5p13: + // Each complex type has the same representation and alignment requirements + // as an array type containing exactly two elements of the corresponding + // real type. + + auto elementTy = getElementTy(); + return dataLayout.getTypeABIAlignment(elementTy); +} + +uint64_t mlir::cir::ComplexType::getPreferredAlignment( + const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + // C17 6.2.5p13: + // Each complex type has the same representation and alignment requirements + // as an array type containing exactly two elements of the corresponding + // real type. + + auto elementTy = getElementTy(); + return dataLayout.getTypePreferredAlignment(elementTy); +} + //===----------------------------------------------------------------------===// // FuncType Definitions //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/complex.c b/clang/test/CIR/CodeGen/complex.c new file mode 100644 index 000000000000..43fcdb76156f --- /dev/null +++ b/clang/test/CIR/CodeGen/complex.c @@ -0,0 +1,153 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=C,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CPP,CHECK %s + +double _Complex c, c2; +int _Complex ci, ci2; + +volatile double _Complex vc, vc2; +volatile int _Complex vci, vci2; + +void list_init() { + double _Complex c1 = {1.0, 2.0}; + int _Complex c2 = {1, 2}; +} + +// C: cir.func no_proto @list_init() +// CPP: cir.func @_Z9list_initv() +// CHECK: %[[#REAL:]] = cir.const #cir.fp<1.000000e+00> : !cir.double +// CHECK-NEXT: %[[#IMAG:]] = cir.const #cir.fp<2.000000e+00> : !cir.double +// CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !cir.double -> !cir.complex +// CHECK: %[[#REAL:]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %[[#IMAG:]] = cir.const #cir.int<2> : !s32i +// CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex +// CHECK: } + +void list_init_2(double r, double i) { + double _Complex c1 = {r, i}; +} + +// C: cir.func @list_init_2 +// CPP: cir.func @_Z11list_init_2dd +// CHECK: %[[#R:]] = cir.load %{{.+}} : !cir.ptr, !cir.double +// CHECK-NEXT: %[[#I:]] = cir.load %{{.+}} : !cir.ptr, !cir.double +// CHECK-NEXT: %[[#C:]] = cir.complex.create %[[#R]], %[[#I]] : !cir.double -> !cir.complex +// CHECK-NEXT: cir.store %[[#C]], %{{.+}} : !cir.complex, !cir.ptr> +// CHECK: } + +void imag_literal() { + c = 3.0i; + ci = 3i; +} + +// C: cir.func no_proto @imag_literal() +// CPP: cir.func @_Z12imag_literalv() +// CHECK: %[[#REAL:]] = cir.const #cir.fp<0.000000e+00> : !cir.double +// CHECK-NEXT: %[[#IMAG:]] = cir.const #cir.fp<3.000000e+00> : !cir.double +// CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !cir.double -> !cir.complex +// CHECK: %[[#REAL:]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: %[[#IMAG:]] = cir.const #cir.int<3> : !s32i +// CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex +// CHECK: } + +void load_store() { + c = c2; + ci = ci2; +} + +// C: cir.func no_proto @load_store() +// CPP: cir.func @_Z10load_storev() +// CHECK-NEXT: %[[#C2_PTR:]] = cir.get_global @c2 : !cir.ptr> +// CHECK-NEXT: %[[#C2:]] = cir.load %[[#C2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-NEXT: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-NEXT: cir.store %[[#C2]], %[[#C_PTR]] : !cir.complex, !cir.ptr> +// CHECK-NEXT: %[[#CI2_PTR:]] = cir.get_global @ci2 : !cir.ptr> +// CHECK-NEXT: %[[#CI2:]] = cir.load %[[#CI2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-NEXT: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-NEXT: cir.store %[[#CI2]], %[[#CI_PTR]] : !cir.complex, !cir.ptr> +// CHECK: } + +void load_store_volatile() { + vc = vc2; + vci = vci2; +} + +// C: cir.func no_proto @load_store_volatile() +// CPP: cir.func @_Z19load_store_volatilev() +// CHECK-NEXT: %[[#VC2_PTR:]] = cir.get_global @vc2 : !cir.ptr> +// CHECK-NEXT: %[[#VC2:]] = cir.load volatile %[[#VC2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-NEXT: %[[#VC_PTR:]] = cir.get_global @vc : !cir.ptr> +// CHECK-NEXT: cir.store volatile %[[#VC2]], %[[#VC_PTR]] : !cir.complex, !cir.ptr> +// CHECK-NEXT: %[[#VCI2_PTR:]] = cir.get_global @vci2 : !cir.ptr> +// CHECK-NEXT: %[[#VCI2:]] = cir.load volatile %[[#VCI2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-NEXT: %[[#VCI_PTR:]] = cir.get_global @vci : !cir.ptr> +// CHECK-NEXT: cir.store volatile %[[#VCI2]], %[[#VCI_PTR]] : !cir.complex, !cir.ptr> +// CHECK: } + +void real_ptr() { + double *r1 = &__real__ c; + int *r2 = &__real__ ci; +} + +// C: cir.func no_proto @real_ptr() +// CPP: cir.func @_Z8real_ptrv() +// CHECK: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-NEXT: %{{.+}} = cir.complex.real_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-NEXT: %{{.+}} = cir.complex.real_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK: } + +void real_ptr_local() { + double _Complex c1 = {1.0, 2.0}; + double *r3 = &__real__ c1; +} + +// C: cir.func no_proto @real_ptr_local() +// CPP: cir.func @_Z14real_ptr_localv() +// CHECK: %[[#C:]] = cir.alloca !cir.complex, !cir.ptr> +// CHECK: %{{.+}} = cir.complex.real_ptr %[[#C]] : !cir.ptr> -> !cir.ptr +// CHECK: } + +void extract_real() { + double r1 = __real__ c; + int r2 = __real__ ci; +} + +// C: cir.func no_proto @extract_real() +// CPP: cir.func @_Z12extract_realv() +// CHECK: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-NEXT: %[[#REAL_PTR:]] = cir.complex.real_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %{{.+}} = cir.load %[[#REAL_PTR]] : !cir.ptr, !cir.double +// CHECK: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-NEXT: %[[#REAL_PTR:]] = cir.complex.real_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %{{.+}} = cir.load %[[#REAL_PTR]] : !cir.ptr, !s32i +// CHECK: } + +void imag_ptr() { + double *i1 = &__imag__ c; + int *i2 = &__imag__ ci; +} + +// C: cir.func no_proto @imag_ptr() +// CPP: cir.func @_Z8imag_ptrv() +// CHECK: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-NEXT: %{{.+}} = cir.complex.imag_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-NEXT: %{{.+}} = cir.complex.imag_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK: } + +void extract_imag() { + double i1 = __imag__ c; + int i2 = __imag__ ci; +} + +// C: cir.func no_proto @extract_imag() +// CPP: cir.func @_Z12extract_imagv() +// CHECK: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-NEXT: %[[#IMAG_PTR:]] = cir.complex.imag_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %{{.+}} = cir.load %[[#IMAG_PTR]] : !cir.ptr, !cir.double +// CHECK: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-NEXT: %[[#IMAG_PTR:]] = cir.complex.imag_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %{{.+}} = cir.load %[[#IMAG_PTR]] : !cir.ptr, !s32i +// CHECK: } From f82702107543309e5094e21ef6a91adc6d816969 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 3 Jul 2024 15:17:34 -0700 Subject: [PATCH 1668/2301] [CIR][CIRGen] Defer template printing to existing machinery There's no good reason to add our own switch here, given there's existing machinery to compute these names. Fallback to that instead. --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 12 ++---------- clang/test/CIR/CodeGen/std-array.cpp | 2 +- clang/test/CIR/CodeGen/std-find.cpp | 2 +- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 0010a83fe6b6..0498bd902829 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -70,16 +70,8 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, outStream << '<'; const auto args = templateSpecialization->getTemplateArgs().asArray(); const auto printer = [&policy, &outStream](const TemplateArgument &arg) { - switch (arg.getKind()) { - case TemplateArgument::Integral: - outStream << arg.getAsIntegral(); - break; - case TemplateArgument::Type: - arg.getAsType().print(outStream, policy); - break; - default: - llvm_unreachable("NYI"); - } + /// Print this template argument to the given output stream. + arg.print(policy, outStream, /*IncludeType=*/true); }; llvm::interleaveComma(args, outStream, printer); outStream << '>'; diff --git a/clang/test/CIR/CodeGen/std-array.cpp b/clang/test/CIR/CodeGen/std-array.cpp index 7b7fe1f86782..a360a0a37d44 100644 --- a/clang/test/CIR/CodeGen/std-array.cpp +++ b/clang/test/CIR/CodeGen/std-array.cpp @@ -8,7 +8,7 @@ void t() { (void)v.end(); } -// CHECK: ![[array:.*]] = !cir.struct" +// CHECK: ![[array:.*]] = !cir.struct" // CHECK: {{.*}} = cir.get_member // CHECK: {{.*}} = cir.cast(array_to_ptrdecay diff --git a/clang/test/CIR/CodeGen/std-find.cpp b/clang/test/CIR/CodeGen/std-find.cpp index ec3ac05eb23a..73494ba8b308 100644 --- a/clang/test/CIR/CodeGen/std-find.cpp +++ b/clang/test/CIR/CodeGen/std-find.cpp @@ -3,7 +3,7 @@ #include "std-cxx.h" -// CHECK: ![[array:.*]] = !cir.struct" +// CHECK: ![[array:.*]] = !cir.struct" int test_find(unsigned char n = 3) { From e1cdb9d34011387b70f94f2c4efc0a878d5aed66 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 3 Jul 2024 17:40:58 -0700 Subject: [PATCH 1669/2301] [CIR][CIRGen] Add support for operator new with null checks --- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 30 +++++++++- clang/test/CIR/CodeGen/new-null.cpp | 79 +++++++++++++++++++++++++ 2 files changed, 107 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/new-null.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index e3ce7adb0be7..07a6c01672e1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -891,11 +891,30 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { // The null-check means that the initializer is conditionally // evaluated. ConditionalEvaluation conditional(*this); + mlir::OpBuilder::InsertPoint ifBody, postIfBody; + mlir::Location loc = getLoc(E->getSourceRange()); if (nullCheck) { - llvm_unreachable("NYI"); + conditional.begin(*this); + mlir::Value nullPtr = + builder.getNullPtr(allocation.getPointer().getType(), loc); + mlir::Value nullCmpResult = builder.createCompare( + loc, mlir::cir::CmpOpKind::ne, allocation.getPointer(), nullPtr); + + // mlir::Value Failed = CGF.getBuilder().createNot(Success); + builder.create(loc, nullCmpResult, + /*withElseRegion=*/false, + [&](mlir::OpBuilder &, mlir::Location) { + ifBody = builder.saveInsertionPoint(); + }); + postIfBody = builder.saveInsertionPoint(); } + // All the actual work to be done should be placed inside the IfOp above, + // so change the insertion point over there. + if (ifBody.isSet()) + builder.restoreInsertionPoint(ifBody); + // If there's an operator delete, enter a cleanup to call it if an // exception is thrown. EHScopeStack::stable_iterator operatorDeleteCleanup; @@ -957,7 +976,14 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { } if (nullCheck) { - llvm_unreachable("NYI"); + conditional.end(*this); + // resultPtr is already updated in the first null check phase. + + // Reset insertion point to resume back to post ifOp. + if (postIfBody.isSet()) { + builder.create(loc); + builder.restoreInsertionPoint(postIfBody); + } } return resultPtr; diff --git a/clang/test/CIR/CodeGen/new-null.cpp b/clang/test/CIR/CodeGen/new-null.cpp new file mode 100644 index 000000000000..001423966b12 --- /dev/null +++ b/clang/test/CIR/CodeGen/new-null.cpp @@ -0,0 +1,79 @@ +// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-linux-gnu %s -fclangir -emit-cir -o %t.cir +// RUN: FileCheck --input-file=%t.cir -check-prefix=CIR %s +// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-linux-gnu %s -fclangir -emit-llvm -o %t.ll +// RUN: FileCheck --input-file=%t.ll -check-prefix=LLVM %s + +// TODO: This file is inspired by clang/test/CodeGenCXX/new.cpp, add all tests from it. + +typedef __typeof__(sizeof(0)) size_t; + +// Declare an 'operator new' template to tickle a bug in __builtin_operator_new. +template void *operator new(size_t, int (*)(T)); + +// Ensure that this declaration doesn't cause operator new to lose its +// 'noalias' attribute. +void *operator new[](size_t); + +namespace std { + struct nothrow_t {}; +} +std::nothrow_t nothrow; + +// Declare the reserved placement operators. +void *operator new(size_t, void*) throw(); +void operator delete(void*, void*) throw(); +void *operator new[](size_t, void*) throw(); +void operator delete[](void*, void*) throw(); + +// Declare the replaceable global allocation operators. +void *operator new(size_t, const std::nothrow_t &) throw(); +void *operator new[](size_t, const std::nothrow_t &) throw(); +void operator delete(void *, const std::nothrow_t &) throw(); +void operator delete[](void *, const std::nothrow_t &) throw(); + +// Declare some other placemenet operators. +void *operator new(size_t, void*, bool) throw(); +void *operator new[](size_t, void*, bool) throw(); + +namespace test15 { + struct A { A(); ~A(); }; + // CIR-DAG: ![[TEST15A:.*]] = !cir.struct + // CIR: %[[VAL_1:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} + // CIR: cir.store %[[VAL_0]], %[[VAL_1]] : !cir.ptr, !cir.ptr> + // CIR: %[[VAL_2:.*]] = cir.const #cir.int<1> : !u64i + // CIR: %[[VAL_3:.*]] = cir.load %[[VAL_1]] : !cir.ptr>, !cir.ptr + // CIR: %[[VAL_4:.*]] = cir.const #true + // CIR: %[[VAL_5:.*]] = cir.call @_ZnwmPvb(%[[VAL_2]], %[[VAL_3]], %[[VAL_4]]) + // CIR: %[[VAL_6:.*]] = cir.const #cir.ptr : !cir.ptr + // CIR: %[[VAL_7:.*]] = cir.cmp(ne, %[[VAL_5]], %[[VAL_6]]) : !cir.ptr, !cir.bool + // CIR: cir.if %[[VAL_7]] { + // CIR: %[[VAL_8:.*]] = cir.cast(bitcast, %[[VAL_5]] : !cir.ptr), !cir.ptr + // CIR: cir.call @_ZN6test151AC1Ev(%[[VAL_8]]) : (!cir.ptr) -> () + // CIR: } + // CIR: cir.return + // CIR: } + + // LLVM-LABEL: _ZN6test156test0bEPv + // LLVM: %[[VAL_0:.*]] = alloca ptr, i64 1, align 8 + // LLVM: store ptr %[[VAL_1:.*]], ptr %[[VAL_0]], align 8 + // LLVM: %[[VAL_2:.*]] = load ptr, ptr %[[VAL_0]], align 8 + // LLVM: %[[VAL_3:.*]] = call ptr @_ZnwmPvb(i64 1, ptr %[[VAL_2]], i8 1) + // LLVM: %[[VAL_4:.*]] = icmp ne ptr %[[VAL_3]], null + // LLVM: br i1 %[[VAL_4]], label %[[VAL_5:.*]], label %[[VAL_6:.*]], + // LLVM: [[VAL_5]]: ; preds = %[[VAL_7:.*]] + // LLVM: call void @_ZN6test151AC1Ev(ptr %[[VAL_3]]) + // LLVM: br label %[[VAL_6]], + // LLVM: [[VAL_6]]: ; preds = %[[VAL_5]], %[[VAL_7]] + // LLVM: ret void + + void test0b(void *p) { + new (p, true) A(); + } +} \ No newline at end of file From be9934c9d155727b79c2fdfd60e7a5afd00c1791 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 8 Jul 2024 11:43:45 -0700 Subject: [PATCH 1670/2301] [CIR][CIRGen] Builtins: Lower __builtin___clear_cache to CIR LLVM support coming next. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 18 +++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 11 +++++++++ clang/test/CIR/CodeGen/clear_cache.c | 24 ++++++++++++++++++++ 3 files changed, 53 insertions(+) create mode 100644 clang/test/CIR/CodeGen/clear_cache.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8aedb481eab2..6da55ba8fa43 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3790,6 +3790,24 @@ def PrefetchOp : CIR_Op<"prefetch"> { }]; } +//===----------------------------------------------------------------------===// +// ClearCacheOp +//===----------------------------------------------------------------------===// + +def ClearCacheOp : CIR_Op<"clear_cache", [AllTypesMatch<["begin", "end"]>]> { + let summary = "clear cache operation"; + let description = [{ + CIR representation for `__builtin___clear_cache`. + }]; + + let arguments = (ins VoidPtr:$begin, VoidPtr:$end); + let assemblyFormat = [{ + $begin `:` qualified(type($begin)) `,` + $end `,` + attr-dict + }]; +} + //===----------------------------------------------------------------------===// // ArrayCtor & ArrayDtor //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 5c5e444a1fe1..97a0c08ef3c6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -788,6 +788,17 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(nullptr); } + case Builtin::BI__builtin___clear_cache: { + mlir::Type voidTy = mlir::cir::VoidType::get(builder.getContext()); + mlir::Value begin = + builder.createPtrBitcast(buildScalarExpr(E->getArg(0)), voidTy); + mlir::Value end = + builder.createPtrBitcast(buildScalarExpr(E->getArg(1)), voidTy); + builder.create(getLoc(E->getSourceRange()), begin, + end); + return RValue::get(nullptr); + } + // C++ std:: builtins. case Builtin::BImove: case Builtin::BImove_if_noexcept: diff --git a/clang/test/CIR/CodeGen/clear_cache.c b/clang/test/CIR/CodeGen/clear_cache.c new file mode 100644 index 000000000000..c4c02858becf --- /dev/null +++ b/clang/test/CIR/CodeGen/clear_cache.c @@ -0,0 +1,24 @@ +// RUNAA: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu %s -fclangir -emit-cir -o %t.cir +// RUN: FileCheck --input-file=%t.cir -check-prefix=CIR %s + +char buffer[32] = "This is a largely unused buffer"; + +// __builtin___clear_cache always maps to @llvm.clear_cache, but what +// each back-end produces is different, and this is tested in LLVM + +// CIR-LABEL: main +// CIR: %[[VAL_1:.*]] = cir.get_global @buffer : !cir.ptr> +// CIR: %[[VAL_2:.*]] = cir.cast(array_to_ptrdecay, %[[VAL_1]] : !cir.ptr>), !cir.ptr +// CIR: %[[VAL_3:.*]] = cir.cast(bitcast, %[[VAL_2]] : !cir.ptr), !cir.ptr +// CIR: %[[VAL_4:.*]] = cir.get_global @buffer : !cir.ptr> +// CIR: %[[VAL_5:.*]] = cir.cast(array_to_ptrdecay, %[[VAL_4]] : !cir.ptr>), !cir.ptr +// CIR: %[[VAL_6:.*]] = cir.const #cir.int<32> : !s32i +// CIR: %[[VAL_7:.*]] = cir.ptr_stride(%[[VAL_5]] : !cir.ptr, %[[VAL_6]] : !s32i), !cir.ptr +// CIR: %[[VAL_8:.*]] = cir.cast(bitcast, %[[VAL_7]] : !cir.ptr), !cir.ptr +// CIR: cir.clear_cache %[[VAL_3]] : !cir.ptr, %[[VAL_8]], + +int main(void) { + __builtin___clear_cache(buffer, buffer+32); + return 0; +} From 22fe5b3e1a40271075bf64c31dad4e45aead61d1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 8 Jul 2024 12:32:01 -0700 Subject: [PATCH 1671/2301] [CIR][LowerToLLVM] Builtins: Lower cir.clear_cache to LLVM --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 22 ++++++++++++++++++- clang/test/CIR/CodeGen/clear_cache.c | 6 ++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 999754c84f57..b6cc6433d43d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3335,6 +3335,25 @@ using CIRFMaxOpLowering = using CIRFMinOpLowering = CIRBinaryFPToFPBuiltinOpLowering; +class CIRClearCacheOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ClearCacheOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto begin = adaptor.getBegin(); + auto end = adaptor.getEnd(); + auto intrinNameAttr = + mlir::StringAttr::get(op.getContext(), "llvm.clear_cache"); + rewriter.replaceOpWithNewOp( + op, mlir::Type{}, intrinNameAttr, mlir::ValueRange{begin, end}); + + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -3364,7 +3383,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRCeilOpLowering, CIRFloorOpLowering, CIRFAbsOpLowering, CIRNearbyintOpLowering, CIRRintOpLowering, CIRRoundOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, CIRFMaxOpLowering, - CIRFMinOpLowering>(converter, patterns.getContext()); + CIRFMinOpLowering, CIRClearCacheOpLowering>(converter, + patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/clear_cache.c b/clang/test/CIR/CodeGen/clear_cache.c index c4c02858becf..7b649e068a19 100644 --- a/clang/test/CIR/CodeGen/clear_cache.c +++ b/clang/test/CIR/CodeGen/clear_cache.c @@ -1,6 +1,7 @@ -// RUNAA: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu %s -fclangir -emit-cir -o %t.cir // RUN: FileCheck --input-file=%t.cir -check-prefix=CIR %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu %s -fclangir -emit-llvm -o %t.ll +// RUN: FileCheck --input-file=%t.ll -check-prefix=LLVM %s char buffer[32] = "This is a largely unused buffer"; @@ -18,6 +19,9 @@ char buffer[32] = "This is a largely unused buffer"; // CIR: %[[VAL_8:.*]] = cir.cast(bitcast, %[[VAL_7]] : !cir.ptr), !cir.ptr // CIR: cir.clear_cache %[[VAL_3]] : !cir.ptr, %[[VAL_8]], +// LLVM-LABEL: main +// LLVM: call void @llvm.clear_cache(ptr @buffer, ptr getelementptr (i8, ptr @buffer, i64 32)), + int main(void) { __builtin___clear_cache(buffer, buffer+32); return 0; From 8b2641087368fb875c3cef84250702dc983e5408 Mon Sep 17 00:00:00 2001 From: ShivaChen <32083954+ShivaChen@users.noreply.github.com> Date: Tue, 9 Jul 2024 03:49:29 +0800 Subject: [PATCH 1672/2301] [CIR][ThroughMLIR] Fix floating GlobalOp lowering without initialized value (#719) This commit fixes GlobalOp lowering for floating without initial value. It implies to be initialized with zeros. E.g. float f[100]; double d; --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 22 +++++++++++++++---- .../test/CIR/Lowering/ThroughMLIR/global.cpp | 8 +++++++ 2 files changed, 26 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/global.cpp diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 8cf7f226570e..4693e58ebf61 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -938,12 +938,26 @@ class CIRGlobalOpLowering if (init.has_value()) { if (auto constArr = mlir::dyn_cast(init.value())) { if (memrefType.getShape().size()) { - auto rtt = mlir::RankedTensorType::get(memrefType.getShape(), - memrefType.getElementType()); - initialValue = mlir::DenseIntElementsAttr::get(rtt, 0); + auto elementType = memrefType.getElementType(); + auto rtt = + mlir::RankedTensorType::get(memrefType.getShape(), elementType); + if (mlir::isa(elementType)) + initialValue = mlir::DenseIntElementsAttr::get(rtt, 0); + else if (mlir::isa(elementType)) { + auto floatZero = mlir::FloatAttr::get(elementType, 0.0).getValue(); + initialValue = mlir::DenseFPElementsAttr::get(rtt, floatZero); + } else + llvm_unreachable("GlobalOp lowering unsuppored element type"); } else { auto rtt = mlir::RankedTensorType::get({}, convertedType); - initialValue = mlir::DenseIntElementsAttr::get(rtt, 0); + if (mlir::isa(convertedType)) + initialValue = mlir::DenseIntElementsAttr::get(rtt, 0); + else if (mlir::isa(convertedType)) { + auto floatZero = + mlir::FloatAttr::get(convertedType, 0.0).getValue(); + initialValue = mlir::DenseFPElementsAttr::get(rtt, floatZero); + } else + llvm_unreachable("GlobalOp lowering unsuppored type"); } } else if (auto intAttr = mlir::dyn_cast(init.value())) { diff --git a/clang/test/CIR/Lowering/ThroughMLIR/global.cpp b/clang/test/CIR/Lowering/ThroughMLIR/global.cpp new file mode 100644 index 000000000000..412ffd6b8d89 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/global.cpp @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +float f[32000]; +double d; + +// CHECK: memref.global "public" @f : memref<32000xf32> = dense<0.000000e+00> +// CHECK: memref.global "public" @d : memref = dense<0.000000e+00> From 851a3ba5dce6adcc4f3bd427ecebdee9128d3042 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 8 Jul 2024 16:09:03 -0700 Subject: [PATCH 1673/2301] Revert "[CIR][Transforms] Move RemoveRedundantBranches logic into BrOp::fold method (#663)" This reverts commit 3b9f69879d1776d424b7d7c92df7644712cff853. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 - clang/lib/CIR/CodeGen/CIRPasses.cpp | 1 - clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 36 ----- .../CIR/Dialect/Transforms/MergeCleanups.cpp | 35 +++++ clang/test/CIR/CodeGen/goto.cpp | 106 +++++++++------ clang/test/CIR/CodeGen/switch-gnurange.cpp | 10 ++ clang/test/CIR/CodeGen/var-arg-scope.c | 8 +- clang/test/CIR/Lowering/ThroughMLIR/goto.cir | 14 +- clang/test/CIR/Lowering/cast.cir | 90 +++++-------- clang/test/CIR/Lowering/dot.cir | 18 ++- clang/test/CIR/Lowering/goto.cir | 14 +- clang/test/CIR/Lowering/loop.cir | 12 +- clang/test/CIR/Lowering/loops-with-break.cir | 55 +++++++- .../test/CIR/Lowering/loops-with-continue.cir | 127 ++++++++++++------ clang/test/CIR/Lowering/region-simplify.cir | 7 +- clang/test/CIR/Lowering/scope.cir | 18 ++- clang/test/CIR/Lowering/switch.cir | 37 +++-- clang/test/CIR/Lowering/ternary.cir | 2 + clang/test/CIR/Lowering/unary-not.cir | 67 +++------ clang/test/CIR/Transforms/loop.cir | 16 ++- clang/test/CIR/Transforms/scope.cir | 8 +- clang/test/CIR/Transforms/switch.cir | 40 ++++-- clang/test/CIR/Transforms/ternary.cir | 6 + 23 files changed, 442 insertions(+), 287 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 6da55ba8fa43..bb1252c9c62c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1683,8 +1683,6 @@ def BrOp : CIR_Op<"br", let assemblyFormat = [{ $dest (`(` $destOperands^ `:` type($destOperands) `)`)? attr-dict }]; - - let hasFolder = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 4c4982d7f599..dcc613a89925 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -91,7 +91,6 @@ namespace mlir { void populateCIRPreLoweringPasses(OpPassManager &pm) { pm.addPass(createFlattenCFGPass()); pm.addPass(createGotoSolverPass()); - pm.addPass(createMergeCleanupsPass()); } } // namespace mlir diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a90eee0e934e..166fa7a2df09 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1000,42 +1000,6 @@ mlir::SuccessorOperands BrOp::getSuccessorOperands(unsigned index) { Block *BrOp::getSuccessorForOperands(ArrayRef) { return getDest(); } -/// Removes branches between two blocks if it is the only branch. -/// -/// From: -/// ^bb0: -/// cir.br ^bb1 -/// ^bb1: // pred: ^bb0 -/// cir.return -/// -/// To: -/// ^bb0: -/// cir.return -LogicalResult BrOp::fold(FoldAdaptor adaptor, - SmallVectorImpl &results) { - Block *block = getOperation()->getBlock(); - Block *dest = getDest(); - - if (isa(dest->front())) { - return failure(); - } - - if (block->getNumSuccessors() == 1 && dest->getSinglePredecessor() == block) { - getOperation()->erase(); - block->getOperations().splice(block->end(), dest->getOperations()); - auto eraseBlock = [](Block *block) { - for (auto &op : llvm::make_early_inc_range(*block)) - op.erase(); - block->erase(); - }; - eraseBlock(dest); - - return success(); - } - - return failure(); -} - //===----------------------------------------------------------------------===// // BrCondOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index 3168d5e19384..2bddd9c46135 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -23,6 +23,40 @@ using namespace cir; namespace { +/// Removes branches between two blocks if it is the only branch. +/// +/// From: +/// ^bb0: +/// cir.br ^bb1 +/// ^bb1: // pred: ^bb0 +/// cir.return +/// +/// To: +/// ^bb0: +/// cir.return +struct RemoveRedundantBranches : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(BrOp op, + PatternRewriter &rewriter) const final { + Block *block = op.getOperation()->getBlock(); + Block *dest = op.getDest(); + + if (isa(dest->front())) + return failure(); + + // Single edge between blocks: merge it. + if (block->getNumSuccessors() == 1 && + dest->getSinglePredecessor() == block) { + rewriter.eraseOp(op); + rewriter.mergeBlocks(dest, block); + return success(); + } + + return failure(); + } +}; + struct RemoveEmptyScope : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; @@ -70,6 +104,7 @@ struct MergeCleanupsPass : public MergeCleanupsBase { void populateMergeCleanupPatterns(RewritePatternSet &patterns) { // clang-format off patterns.add< + RemoveRedundantBranches, RemoveEmptyScope, RemoveEmptySwitch >(patterns.getContext()); diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index f064d1b215ad..06870feba910 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -159,19 +159,25 @@ int jumpIntoLoop(int* ar) { // CHECK: cir.func @_Z12jumpIntoLoopPi // CHECK: cir.brcond {{.*}} ^bb[[#BLK2:]], ^bb[[#BLK3:]] // CHECK: ^bb[[#BLK2]]: -// CHECK: cir.br ^bb[[#BLK7:]] +// CHECK: cir.br ^bb[[#BODY:]] // CHECK: ^bb[[#BLK3]]: // CHECK: cir.br ^bb[[#BLK4:]] // CHECK: ^bb[[#BLK4]]: +// CHECK: cir.br ^bb[[#RETURN:]] +// CHECK: ^bb[[#RETURN]]: // CHECK: cir.return // CHECK: ^bb[[#BLK5:]]: // CHECK: cir.br ^bb[[#BLK6:]] -// CHECK: ^bb[[#BLK6]]: -// CHECK: cir.brcond {{.*}} ^bb[[#BLK7:]], ^bb[[#BLK8:]] +// CHECK: ^bb[[#BLK6]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond {{.*}} ^bb[[#BODY]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.br ^bb[[#BLK7:]] // CHECK: ^bb[[#BLK7]]: -// CHECK: cir.br ^bb[[#BLK6]] -// CHECK: ^bb[[#BLK8]]: -// CHECK: cir.br ^bb[[#BLK4]] +// CHECK: cir.br ^bb[[#RETURN]] @@ -191,21 +197,31 @@ int jumpFromLoop(int* ar) { return 0; } // CHECK: cir.func @_Z12jumpFromLoopPi -// CHECK: cir.brcond {{.*}} ^bb[[#BLK1:]], ^bb[[#BLK2:]] -// CHECK: ^bb[[#BLK1:]]: -// CHECK: cir.return {{.*}} -// CHECK: ^bb[[#BLK2:]]: -// CHECK: cir.br ^bb[[#BLK3:]] -// CHECK: ^bb[[#BLK3:]]: -// CHECK: cir.brcond {{.*}} ^bb[[#BLK4:]], ^bb[[#BLK7:]] -// CHECK: ^bb[[#BLK4:]]: -// CHECK: cir.brcond {{.*}} ^bb[[#BLK5:]], ^bb[[#BLK6:]] -// CHECK: ^bb[[#BLK5:]]: -// CHECK: cir.br ^bb[[#BLK1:]] -// CHECK: ^bb[[#BLK6:]]: -// CHECK: cir.br ^bb[[#BLK3:]] -// CHECK: ^bb[[#BLK7:]]: -// CHECK: cir.return {{.*}} +// CHECK: cir.brcond {{.*}} ^bb[[#RETURN1:]], ^bb[[#BLK3:]] +// CHECK: ^bb[[#RETURN1]]: +// CHECK: cir.return +// CHECK: ^bb[[#BLK3]]: +// CHECK: cir.br ^bb[[#BLK4:]] +// CHECK: ^bb[[#BLK4]]: +// CHECK: cir.br ^bb[[#BLK5:]] +// CHECK: ^bb[[#BLK5]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond {{.*}} ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#IF42:]] +// CHECK: ^bb[[#IF42]]: +// CHECK: cir.brcond {{.*}} ^bb[[#IF42TRUE:]], ^bb[[#IF42FALSE:]] +// CHECK: ^bb[[#IF42TRUE]]: +// CHECK: cir.br ^bb[[#RETURN1]] +// CHECK: ^bb[[#IF42FALSE]]: +// CHECK: cir.br ^bb[[#BLK11:]] +// CHECK: ^bb[[#BLK11]]: +// CHECK: cir.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.br ^bb[[#RETURN2:]] +// CHECK: ^bb[[#RETURN2]]: +// CHECK: cir.return void flatLoopWithNoTerminatorInFront(int* ptr) { @@ -224,21 +240,35 @@ void flatLoopWithNoTerminatorInFront(int* ptr) { ; } -// CHECK-LABEL: cir.func @_Z31flatLoopWithNoTerminatorInFrontPi -// CHECK: cir.brcond {{.*}} ^bb[[#BLK1:]], ^bb[[#BLK2:]] -// CHECK: ^bb[[#BLK1:]]: -// CHECK: cir.br ^bb[[#BLK6:]] -// CHECK: ^bb[[#BLK2:]]: -// CHECK: cir.br ^bb[[#BLK3:]] -// CHECK: ^bb[[#BLK3:]]: // 2 preds: ^bb[[#BLK2:]], ^bb[[#BLK6:]] -// CHECK: cir.brcond {{.*}} ^bb[[#BLK4:]], ^bb[[#BLK5:]] -// CHECK: ^bb[[#BLK4:]]: -// CHECK: cir.br ^bb[[#BLK8:]] -// CHECK: ^bb[[#BLK5:]]: -// CHECK: cir.br ^bb[[#BLK6:]] -// CHECK: ^bb[[#BLK6:]]: // 2 preds: ^bb[[#BLK1:]], ^bb[[#BLK5:]] -// CHECK: cir.brcond {{.*}} ^bb[[#BLK3:]], ^bb[[#BLK7:]] -// CHECK: ^bb[[#BLK7:]]: +// CHECK: cir.func @_Z31flatLoopWithNoTerminatorInFrontPi +// CHECK: cir.brcond {{.*}} ^bb[[#BLK2:]], ^bb[[#BLK3:]] +// CHECK: ^bb[[#BLK2]]: +// CHECK: cir.br ^bb[[#LABEL_LOOP:]] +// CHECK: ^bb[[#BLK3]]: +// CHECK: cir.br ^bb[[#BLK4:]] +// CHECK: ^bb[[#BLK4]]: +// CHECK: cir.br ^bb[[#BLK5:]] +// CHECK: ^bb[[#BLK5]]: +// CHECK: cir.br ^bb[[#BODY:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond {{.*}} ^bb[[#BODY]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: // CHECK: cir.br ^bb[[#BLK8:]] -// CHECK: ^bb[[#BLK8:]]: // 2 preds: ^bb[[#BLK4:]], ^bb[[#BLK7:]] -// CHECK: cir.return \ No newline at end of file +// CHECK: ^bb[[#BLK8]]: +// CHECK: cir.brcond {{.*}} ^bb[[#BLK9:]], ^bb[[#BLK10:]] +// CHECK: ^bb[[#BLK9]]: +// CHECK: cir.br ^bb[[#RETURN:]] +// CHECK: ^bb[[#BLK10]]: +// CHECK: cir.br ^bb[[#BLK11:]] +// CHECK: ^bb[[#BLK11]]: +// CHECK: cir.br ^bb[[#LABEL_LOOP]] +// CHECK: ^bb[[#LABEL_LOOP]]: +// CHECK: cir.br ^bb[[#COND]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.br ^bb[[#BLK14:]] +// CHECK: ^bb[[#BLK14]]: +// CHECK: cir.br ^bb[[#RETURN]] +// CHECK: ^bb[[#RETURN]]: +// CHECK: cir.return +// CHECK: } +// CHECK:} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/switch-gnurange.cpp b/clang/test/CIR/CodeGen/switch-gnurange.cpp index 99f558f3070d..f48a32506252 100644 --- a/clang/test/CIR/CodeGen/switch-gnurange.cpp +++ b/clang/test/CIR/CodeGen/switch-gnurange.cpp @@ -172,6 +172,8 @@ void sw3(enum letter c) { // LLVM: store i32 4, ptr %[[X]] // LLVM: br label %[[EPILOG]] // LLVM: [[EPILOG]]: +// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] +// LLVM: [[EPILOG_END]]: // LLVM-NEXT: ret void void sw4(int x) { @@ -211,6 +213,8 @@ void sw4(int x) { // LLVM-NEXT: %[[DIFF_CMP:[0-9]+]] = icmp ule i32 %[[DIFF]], 167 // LLVM: br i1 %[[DIFF_CMP]], label %[[CASE_66_233]], label %[[EPILOG]] // LLVM: [[EPILOG]]: +// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] +// LLVM: [[EPILOG_END]]: // LLVM-NEXT: ret void void sw5(int x) { @@ -237,6 +241,8 @@ void sw5(int x) { // LLVM-NEXT: store i32 1, ptr %[[Y:[0-9]+]] // LLVM-NEXT: br label %[[EPILOG]] // LLVM: [[EPILOG]]: +// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] +// LLVM: [[EPILOG_END]]: // LLVM-NEXT: ret void void sw6(int x) { @@ -267,6 +273,8 @@ void sw6(int x) { // LLVM-NEXT: %[[DIFF_CMP:[0-9]+]] = icmp ule i32 %[[DIFF]], -1 // LLVM-NEXT: br i1 %[[DIFF_CMP]], label %[[CASE_MIN_MAX]], label %[[EPILOG]] // LLVM: [[EPILOG]]: +// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] +// LLVM: [[EPILOG_END]]: // LLVM-NEXT: ret void void sw7(int x) { @@ -338,5 +346,7 @@ void sw7(int x) { // LLVM: [[CASE_500_600]]: // LLVM-NEXT: br label %[[EPILOG]] // LLVM: [[EPILOG]]: +// LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] +// LLVM: [[EPILOG_END]]: // LLVM-NEXT: ret void diff --git a/clang/test/CIR/CodeGen/var-arg-scope.c b/clang/test/CIR/CodeGen/var-arg-scope.c index 8a993a9bc1ce..e28fb83698c4 100644 --- a/clang/test/CIR/CodeGen/var-arg-scope.c +++ b/clang/test/CIR/CodeGen/var-arg-scope.c @@ -68,6 +68,9 @@ void f1(__builtin_va_list c) { // LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } // LLVM: define void @f1(%struct.__va_list %0) // LLVM: [[VARLIST:%.*]] = alloca %struct.__va_list, i64 1, align 8, +// LLVM: br label %[[SCOPE_FRONT:.*]], + +// LLVM: [[SCOPE_FRONT]]: ; preds = %1 // LLVM: [[GR_OFFS_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 3 // LLVM: [[GR_OFFS:%.*]] = load i32, ptr [[GR_OFFS_P]], align 4, // LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[GR_OFFS]], 0, @@ -96,4 +99,7 @@ void f1(__builtin_va_list c) { // LLVM: [[BB_END]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG]] // LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT]], %[[BB_IN_REG]] ], [ [[STACK_V]], %[[BB_ON_STACK]] ] // LLVM-NEXT: [[PHIV:%.*]] = load ptr, ptr [[PHIP]], align 8, -// LLVM: ret void, +// LLVM-NEXT: br label %[[OUT_SCOPE:.*]], + +// LLVM: [[OUT_SCOPE]]: ; preds = %[[BB_END]] +// LLVM-NEXT: ret void, diff --git a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir index fd85a142d7e0..6c1d5c66fffa 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/goto.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/goto.cir @@ -7,17 +7,13 @@ module { %0 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %1 = cir.const #cir.int<1> : !u32i cir.store %1, %0 : !u32i, !cir.ptr - %c = cir.const #cir.int<0> : !u32i - %cond = cir.cast(int_to_bool, %c : !u32i), !cir.bool - cir.brcond %cond ^bb1, ^bb2 - - ^bb1: + cir.br ^bb2 + ^bb1: // no predecessors %2 = cir.load %0 : !cir.ptr, !u32i %3 = cir.const #cir.int<1> : !u32i %4 = cir.binop(add, %2, %3) : !u32i cir.store %4, %0 : !u32i, !cir.ptr cir.br ^bb2 - ^bb2: // 2 preds: ^bb0, ^bb1 %5 = cir.load %0 : !cir.ptr, !u32i %6 = cir.const #cir.int<2> : !u32i @@ -29,10 +25,8 @@ module { // MLIR: module { // MLIR-NEXT: func @foo -// MLIR: cf.cond_br %{{.+}}, ^bb[[#BLK1:]], ^bb[[#BLK2:]] -// MLIR: ^bb[[#BLK1:]]: -// MLIR: cf.br ^bb[[#BLK2:]] -// MLIR: ^bb[[#BLK2:]]: +// MLIR: cf.br ^bb1 +// MLIR: ^bb1: // MLIR: return // LLVM: br label %[[Value:[0-9]+]] diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index a3586f6c156f..e100e0c2f07e 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -11,10 +11,10 @@ module { cir.func @cStyleCasts(%arg0: !u32i, %arg1: !s32i, %arg2: !cir.float, %arg3: !cir.double) -> !s32i { - // CHECK: llvm.func @cStyleCasts + // CHECK: llvm.func @cStyleCasts %0 = cir.alloca !u32i, !cir.ptr, ["x1", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["x2", init] {alignment = 4 : i64} - %44 = cir.alloca !s16i, !cir.ptr, ["x4", init] {alignment = 2 : i64} + %20 = cir.alloca !s16i, !cir.ptr, ["x4", init] {alignment = 2 : i64} %2 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} %3 = cir.alloca !s8i, !cir.ptr, ["a", init] {alignment = 1 : i64} %4 = cir.alloca !s16i, !cir.ptr, ["b", init] {alignment = 2 : i64} @@ -22,86 +22,64 @@ module { %6 = cir.alloca !s64i, !cir.ptr, ["d", init] {alignment = 8 : i64} %7 = cir.alloca !cir.array, !cir.ptr>, ["arr"] {alignment = 4 : i64} %8 = cir.alloca !cir.ptr, !cir.ptr>, ["e", init] {alignment = 8 : i64} - %9 = cir.alloca !s8i, !cir.ptr, ["tmp1"] {alignment = 1 : i64} - %10 = cir.alloca !s16i, !cir.ptr, ["tmp2"] {alignment = 2 : i64} - %11 = cir.alloca !s64i, !cir.ptr, ["tmp3"] {alignment = 8 : i64} - %12 = cir.alloca !u64i, !cir.ptr, ["tmp4"] {alignment = 8 : i64} - %13 = cir.alloca !cir.ptr, !cir.ptr>, ["tmp5"] {alignment = 8 : i64} - %14 = cir.alloca !s32i, !cir.ptr, ["tmp6"] {alignment = 4 : i64} - %15 = cir.alloca !cir.bool, !cir.ptr, ["tmp7"] {alignment = 1 : i64} - %16 = cir.alloca !cir.float, !cir.ptr, ["tmp8"] {alignment = 4 : i64} - %17 = cir.alloca !cir.float, !cir.ptr, ["tmp9"] {alignment = 4 : i64} - %18 = cir.alloca !u32i, !cir.ptr, ["tmp10"] {alignment = 4 : i64} - %19 = cir.alloca !s32i, !cir.ptr, ["tmp11"] {alignment = 4 : i64} cir.store %arg0, %0 : !u32i, !cir.ptr cir.store %arg1, %1 : !s32i, !cir.ptr // Integer casts. - %20 = cir.load %0 : !cir.ptr, !u32i - %21 = cir.cast(integral, %20 : !u32i), !s8i + %9 = cir.load %0 : !cir.ptr, !u32i + %10 = cir.cast(integral, %9 : !u32i), !s8i // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i8 - cir.store %21, %3 : !s8i, !cir.ptr - %22 = cir.load %1 : !cir.ptr, !s32i - %23 = cir.cast(integral, %22 : !s32i), !s16i + cir.store %10, %3 : !s8i, !cir.ptr + %11 = cir.load %1 : !cir.ptr, !s32i + %12 = cir.cast(integral, %11 : !s32i), !s16i // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i16 - cir.store %23, %4 : !s16i, !cir.ptr - %24 = cir.load %0 : !cir.ptr, !u32i - %25 = cir.cast(integral, %24 : !u32i), !s64i + cir.store %12, %4 : !s16i, !cir.ptr + %13 = cir.load %0 : !cir.ptr, !u32i + %14 = cir.cast(integral, %13 : !u32i), !s64i // CHECK: %{{[0-9]+}} = llvm.zext %{{[0-9]+}} : i32 to i64 - cir.store %25, %5 : !s64i, !cir.ptr - %26 = cir.load %1 : !cir.ptr, !s32i - %27 = cir.cast(integral, %26 : !s32i), !s64i + cir.store %14, %5 : !s64i, !cir.ptr + %15 = cir.load %1 : !cir.ptr, !s32i + %16 = cir.cast(integral, %15 : !s32i), !s64i // CHECK: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 - cir.store %27, %6 : !s64i, !cir.ptr - %28 = cir.cast(integral, %arg1 : !s32i), !u32i - cir.store %28, %18 : !u32i, !cir.ptr + %30 = cir.cast(integral, %arg1 : !s32i), !u32i // Should not produce a cast. - %29 = cir.cast(integral, %arg0 : !u32i), !s32i - cir.store %29, %19 : !s32i, !cir.ptr + %32 = cir.cast(integral, %arg0 : !u32i), !s32i // Should not produce a cast. - %30 = cir.load %44 : !cir.ptr, !s16i - %31 = cir.cast(integral, %30 : !s16i), !u64i + %21 = cir.load %20 : !cir.ptr, !s16i + %22 = cir.cast(integral, %21 : !s16i), !u64i // CHECK: %[[TMP:[0-9]+]] = llvm.sext %{{[0-9]+}} : i16 to i64 - cir.store %31, %12 : !u64i, !cir.ptr - %32 = cir.cast(int_to_bool, %arg1 : !s32i), !cir.bool - cir.store %32, %15 : !cir.bool, !cir.ptr + %33 = cir.cast(int_to_bool, %arg1 : !s32i), !cir.bool // CHECK: %[[#ZERO:]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[#CMP:]] = llvm.icmp "ne" %arg1, %[[#ZERO]] : i32 // CHECK: %{{.+}} = llvm.zext %[[#CMP]] : i1 to i8 // Pointer casts. - %33 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr - cir.store %33, %8 : !cir.ptr, !cir.ptr> + cir.store %16, %6 : !s64i, !cir.ptr + %17 = cir.cast(array_to_ptrdecay, %7 : !cir.ptr>), !cir.ptr + cir.store %17, %8 : !cir.ptr, !cir.ptr> // CHECK: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, i32 - %34 = cir.cast(int_to_ptr, %31 : !u64i), !cir.ptr - cir.store %34, %13 : !cir.ptr, !cir.ptr> + %23 = cir.cast(int_to_ptr, %22 : !u64i), !cir.ptr // CHECK: %[[TMP2:[0-9]+]] = llvm.inttoptr %[[TMP]] : i64 to !llvm.ptr - %35 = cir.cast(ptr_to_int, %34 : !cir.ptr), !s32i - cir.store %35, %14 : !s32i, !cir.ptr + %24 = cir.cast(ptr_to_int, %23 : !cir.ptr), !s32i // CHECK: %{{[0-9]+}} = llvm.ptrtoint %[[TMP2]] : !llvm.ptr to i32 - %36 = cir.cast(ptr_to_bool, %34 : !cir.ptr), !cir.bool - cir.store %36, %15 : !cir.bool, !cir.ptr + %29 = cir.cast(ptr_to_bool, %23 : !cir.ptr), !cir.bool // Floating point casts. - %37 = cir.cast(int_to_float, %arg1 : !s32i), !cir.float - cir.store %37, %16 : !cir.float, !cir.ptr + %25 = cir.cast(int_to_float, %arg1 : !s32i), !cir.float // CHECK: %{{.+}} = llvm.sitofp %{{.+}} : i32 to f32 - %38 = cir.cast(int_to_float, %arg0 : !u32i), !cir.float - cir.store %38, %16 : !cir.float, !cir.ptr + %26 = cir.cast(int_to_float, %arg0 : !u32i), !cir.float // CHECK: %{{.+}} = llvm.uitofp %{{.+}} : i32 to f32 - %39 = cir.cast(float_to_int, %arg2 : !cir.float), !s32i - cir.store %39, %14 : !s32i, !cir.ptr + %27 = cir.cast(float_to_int, %arg2 : !cir.float), !s32i // CHECK: %{{.+}} = llvm.fptosi %{{.+}} : f32 to i32 - %40 = cir.cast(float_to_int, %arg2 : !cir.float), !u32i - cir.store %40, %18 : !u32i, !cir.ptr + %28 = cir.cast(float_to_int, %arg2 : !cir.float), !u32i // CHECK: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32 - %41 = cir.cast(floating, %arg3 : !cir.double), !cir.float - cir.store %41, %17 : !cir.float, !cir.ptr + %18 = cir.const #cir.int<0> : !s32i + // CHECK: %{{.+}} = llvm.fptrunc %{{.+}} : f64 to f32 + %34 = cir.cast(floating, %arg3 : !cir.double), !cir.float - %42 = cir.const #cir.int<0> : !s32i - cir.store %42, %2 : !s32i, !cir.ptr - %43 = cir.load %2 : !cir.ptr, !s32i - cir.return %43 : !s32i + cir.store %18, %2 : !s32i, !cir.ptr + %19 = cir.load %2 : !cir.ptr, !s32i + cir.return %19 : !s32i } cir.func @testBoolToIntCast(%arg0: !cir.bool) { diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index b0df3eecdf85..5c5ed4736f7a 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -68,20 +68,22 @@ module { // MLIR: llvm.store {{.*}}, %[[VAL_8]] {{.*}}: i32, !llvm.ptr // MLIR: %[[VAL_13:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 // MLIR: llvm.store %[[VAL_13]], %[[VAL_12]] {{.*}}: f64, !llvm.ptr +// MLIR: llvm.br ^bb1 +// MLIR: ^bb1: // MLIR: %[[VAL_14:.*]] = llvm.mlir.constant(1 : index) : i64 // MLIR: %[[VAL_15:.*]] = llvm.alloca %[[VAL_14]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR: %[[VAL_16:.*]] = llvm.mlir.constant(0 : i32) : i32 // MLIR: llvm.store %[[VAL_16]], %[[VAL_15]] {{.*}}: i32, !llvm.ptr -// MLIR: llvm.br ^bb1 -// MLIR: ^bb1: +// MLIR: llvm.br ^bb2 +// MLIR: ^bb2: // MLIR: %[[VAL_17:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_18:.*]] = llvm.load %[[VAL_8]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_19:.*]] = llvm.icmp "slt" %[[VAL_17]], %[[VAL_18]] : i32 // MLIR: %[[VAL_20:.*]] = llvm.zext %[[VAL_19]] : i1 to i32 // MLIR: %[[VAL_21:.*]] = llvm.mlir.constant(0 : i32) : i32 // MLIR: %[[VAL_22:.*]] = llvm.icmp "ne" %[[VAL_20]], %[[VAL_21]] : i32 -// MLIR: llvm.cond_br %[[VAL_22]], ^bb2, ^bb3 -// MLIR: ^bb2: +// MLIR: llvm.cond_br %[[VAL_22]], ^bb3, ^bb5 +// MLIR: ^bb3: // MLIR: %[[VAL_23:.*]] = llvm.load %[[VAL_4]] {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr // MLIR: %[[VAL_24:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_25:.*]] = llvm.sext %[[VAL_24]] : i32 to i64 @@ -96,12 +98,16 @@ module { // MLIR: %[[VAL_34:.*]] = llvm.load %[[VAL_12]] {alignment = 8 : i64} : !llvm.ptr -> f64 // MLIR: %[[VAL_35:.*]] = llvm.fadd %[[VAL_34]], %[[VAL_33]] : f64 // MLIR: llvm.store %[[VAL_35]], %[[VAL_12]] {{.*}}: f64, !llvm.ptr +// MLIR: llvm.br ^bb4 +// MLIR: ^bb4: // MLIR: %[[VAL_36:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_37:.*]] = llvm.mlir.constant(1 : i32) : i32 // MLIR: %[[VAL_38:.*]] = llvm.add %[[VAL_36]], %[[VAL_37]] : i32 // MLIR: llvm.store %[[VAL_38]], %[[VAL_15]] {{.*}}: i32, !llvm.ptr -// MLIR: llvm.br ^bb1 -// MLIR: ^bb3: +// MLIR: llvm.br ^bb2 +// MLIR: ^bb5: +// MLIR: llvm.br ^bb6 +// MLIR: ^bb6: // MLIR: %[[VAL_39:.*]] = llvm.load %[[VAL_12]] {alignment = 8 : i64} : !llvm.ptr -> f64 // MLIR: llvm.store %[[VAL_39]], %[[VAL_10]] {{.*}}: f64, !llvm.ptr // MLIR: %[[VAL_40:.*]] = llvm.load %[[VAL_10]] {alignment = 8 : i64} : !llvm.ptr -> f64 diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index 3263359714aa..f09626ec122f 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -36,14 +36,18 @@ module { // MLIR: %[[#Zero:]] = llvm.mlir.constant(0 : i32) : i32 // MLIR: llvm.cond_br {{.*}}, ^bb[[#COND_YES:]], ^bb[[#COND_NO:]] // MLIR: ^bb[[#COND_YES]]: -// MLIR: %[[#Neg_one:]] = llvm.sub %[[#Zero]], %[[#One]] : i32 -// MLIR: llvm.store %[[#Neg_one]], %[[#Ret_val_addr:]] {{.*}}: i32, !llvm.ptr +// MLIR: llvm.br ^bb[[#GOTO_BLK:]] +// MLIR: ^bb[[#COND_NO]]: +// MLIR: llvm.br ^bb[[#BLK:]] +// MLIR: ^bb[[#BLK]]: +// MLIR: llvm.store %[[#Zero]], %[[#Ret_val_addr:]] {{.*}}: i32, !llvm.ptr // MLIR: llvm.br ^bb[[#RETURN:]] -// MLIR: ^bb[[#COND_NO]]: -// MLIR: llvm.store %[[#Zero]], %[[#Ret_val_addr]] {{.*}}: i32, !llvm.ptr -// MLIR: llvm.br ^bb[[#RETURN]] // MLIR: ^bb[[#RETURN]]: // MLIR: %[[#Ret_val:]] = llvm.load %[[#Ret_val_addr]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: llvm.return %[[#Ret_val]] : i32 +// MLIR: ^bb[[#GOTO_BLK]]: +// MLIR: %[[#Neg_one:]] = llvm.sub %[[#Zero]], %[[#One]] : i32 +// MLIR: llvm.store %[[#Neg_one]], %[[#Ret_val_addr]] {{.*}}: i32, !llvm.ptr +// MLIR: llvm.br ^bb[[#RETURN]] // MLIR: } } diff --git a/clang/test/CIR/Lowering/loop.cir b/clang/test/CIR/Lowering/loop.cir index 3e9a47e80f8f..d15479a76a0d 100644 --- a/clang/test/CIR/Lowering/loop.cir +++ b/clang/test/CIR/Lowering/loop.cir @@ -22,6 +22,8 @@ module { // CHECK: ^bb[[#COND]]: // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: // CHECK: llvm.br ^bb[[#COND]] // CHECK: ^bb[[#EXIT]]: @@ -58,9 +60,11 @@ module { } // CHECK: @testDoWhile -// CHECK: llvm.br ^bb[[#COND]] +// CHECK: llvm.br ^bb[[#BODY:]] // CHECK: ^bb[[#COND:]]: // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#COND]] // CHECK: ^bb[[#EXIT]]: @@ -79,6 +83,8 @@ module { } // CHECK: @testWhileWithBreakTerminatedBody +// CHECK: llvm.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // CHECK: llvm.br ^bb[[#EXIT]] @@ -109,8 +115,12 @@ module { // CHECK: ^bb[[#COND:]]: // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: +// CHECK: llvm.br ^bb[[#SCOPE_IN:]] +// CHECK: ^bb[[#SCOPE_IN]]: // CHECK: llvm.br ^bb[[#EXIT]] // CHECK: ^bb[[#SCOPE_EXIT:]]: +// CHECK: llvm.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: // CHECK: llvm.br ^bb[[#COND]] // CHECK: ^bb[[#EXIT]]: } diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir index 9b9090c12900..34b6bfd7618e 100644 --- a/clang/test/CIR/Lowering/loops-with-break.cir +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -44,11 +44,21 @@ module { // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK1:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#preBREAK1]]: + // CHECK: llvm.br ^bb[[#preBREAK2:]] + // CHECK: ^bb[[#preBREAK2]]: + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preBODY0:]] // CHECK: ^bb[[#preEXIT1]]: // CHECK: llvm.br ^bb[[#EXIT:]] // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: // [...] // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#EXIT]]: @@ -117,21 +127,40 @@ module { // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED1:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#preNESTED1]]: + // CHECK: llvm.br ^bb[[#preNESTED2:]] + // CHECK: ^bb[[#preNESTED2]]: + // CHECK: llvm.br ^bb[[#NESTED:]] + // CHECK: ^bb[[#NESTED]]: // [...] // CHECK: llvm.br ^bb[[#COND_NESTED:]] // CHECK: ^bb[[#COND_NESTED]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preBREAK1:]], ^bb[[#EXIT_NESTED:]] // CHECK: ^bb[[#preBREAK1]]: + // CHECK: llvm.br ^bb[[#preBREAK2:]] + // CHECK: ^bb[[#preBREAK2]]: + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT2:]], ^bb[[#preBODY0:]] // CHECK: ^bb[[#preEXIT2]]: // CHECK: llvm.br ^bb[[#EXIT_NESTED:]] // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY_NESTED:]] + // CHECK: ^bb[[#BODY_NESTED]]: + // CHECK: llvm.br ^bb[[#STEP_NESTED:]] + // CHECK: ^bb[[#STEP_NESTED]]: // [...] // CHECK: llvm.br ^bb[[#COND_NESTED:]] // CHECK: ^bb[[#EXIT_NESTED]]: // [...] + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#EXIT]]: // [...] @@ -176,11 +205,18 @@ module { // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // [...] + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: + // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preCOND0:]] // CHECK: ^bb[[#preEXIT1]]: - // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: llvm.br ^bb[[#preEXIT2:]] // CHECK: ^bb[[#preCOND0]]: + // CHECK: llvm.br ^bb[[#preCOND1:]] + // CHECK: ^bb[[#preCOND1]]: // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#preEXIT2]]: + // CHECK: llvm.br ^bb[[#EXIT:]] // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } @@ -220,12 +256,21 @@ cir.func @testDoWhile() { // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#BODY:]] - // CHECK: ^bb[[#preEXIT1]]: - // CHECK: llvm.br ^bb[[#EXIT:]] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // [...] - // CHECK: llvm.cond_br %{{.+}}, ^bb[[#COND]], ^bb[[#EXIT]] + // CHECK: llvm.br ^bb[[#BREAK:]] + // CHECK: ^bb[[#BREAK]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preEXIT1:]], ^bb[[#preCOND0:]] + // CHECK: ^bb[[#preEXIT1]]: + // CHECK: llvm.br ^bb[[#preEXIT2:]] + // CHECK: ^bb[[#preCOND0]]: + // CHECK: llvm.br ^bb[[#preCOND1:]] + // CHECK: ^bb[[#preCOND1]]: + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#preEXIT2]]: + // CHECK: llvm.br ^bb[[#EXIT:]] // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir index c3fb6406bd6b..0371d416b61d 100644 --- a/clang/test/CIR/Lowering/loops-with-continue.cir +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -37,21 +37,33 @@ module { cir.return } -// CHECK: llvm.func @testFor() -// CHECK: llvm.br ^bb[[#COND:]] -// CHECK: ^bb[[#COND]]: -// CHECK: llvm.cond_br %8, ^bb[[#CONTINUE:]], ^bb[[#EXIT:]] -// CHECK: ^bb[[#CONTINUE]]: -// CHECK: llvm.cond_br %14, ^bb[[#preSTEP:]], ^bb[[#BODY:]] -// CHECK: ^bb[[#preSTEP]]: -// CHECK: llvm.br ^bb[[#STEP:]] -// CHECK: ^bb[[#BODY]]: -// CHECK: llvm.br ^bb[[#STEP:]] -// CHECK: ^bb[[#STEP]]: -// CHECK: llvm.br ^bb[[#COND:]] -// CHECK: ^bb[[#EXIT]]: -// CHECK: llvm.return -// CHECK: } + // CHECK: llvm.func @testFor() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE1:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#preCONTINUE1]]: + // CHECK: llvm.br ^bb[[#preCONTINUE2:]] + // CHECK: ^bb[[#preCONTINUE2]]: + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preSTEP:]], ^bb[[#preBODY0:]] + // CHECK: ^bb[[#preSTEP]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } cir.func @testForNested() { @@ -109,27 +121,50 @@ module { cir.return } -// CHECK: llvm.func @testForNested() -// CHECK: llvm.br ^bb[[#COND1:]] -// CHECK: ^bb[[#COND1]]: -// CHECK: llvm.cond_br %{{.+}}, ^bb[[#LOOP1BODY:]], ^bb[[#EXIT:]] -// CHECK: ^bb[[#LOOP1BODY]]: -// CHECK: llvm.br ^bb[[#COND2:]] -// CHECK: ^bb[[#COND2]]: -// CHECK: llvm.cond_br %{{.+}}, ^bb[[#LOOP2BODY:]], ^bb[[#LOOP1CONTINUE:]] -// CHECK: ^bb[[#LOOP2BODY]]: -// CHECK: llvm.cond_br %{{.+}}, ^bb[[#IFBODY1:]], ^bb[[#IFBODY2:]] -// CHECK: ^bb[[#IFBODY1]]: -// CHECK: llvm.br ^bb[[#STEP2:]] -// CHECK: ^bb[[#IFBODY2]]: -// CHECK: llvm.br ^bb[[#STEP2:]] -// CHECK: ^bb[[#STEP2]]: -// CHECK: llvm.br ^bb[[#COND2]] -// CHECK: ^bb[[#LOOP1CONTINUE]]: -// CHECK: llvm.br ^bb[[#COND1]] -// CHECK: ^bb[[#EXIT]]: -// CHECK: llvm.return -// CHECK: } + // CHECK: llvm.func @testForNested() + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preNESTED1:]], ^bb[[#EXIT:]] + // CHECK: ^bb[[#preNESTED1]]: + // CHECK: llvm.br ^bb[[#preNESTED2:]] + // CHECK: ^bb[[#preNESTED2]]: + // CHECK: llvm.br ^bb[[#NESTED:]] + // CHECK: ^bb[[#NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#COND_NESTED:]] + // CHECK: ^bb[[#COND_NESTED]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCONTINUE1:]], ^bb[[#EXIT_NESTED:]] + // CHECK: ^bb[[#preCONTINUE1]]: + // CHECK: llvm.br ^bb[[#preCONTINUE2:]] + // CHECK: ^bb[[#preCONTINUE2]]: + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] + // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preSTEP0:]], ^bb[[#preBODY0:]] + // CHECK: ^bb[[#preSTEP0]]: + // CHECK: llvm.br ^bb[[#STEP_NESTED:]] + // CHECK: ^bb[[#preBODY0]]: + // CHECK: llvm.br ^bb[[#preBODY1:]] + // CHECK: ^bb[[#preBODY1]]: + // CHECK: llvm.br ^bb[[#BODY_NESTED:]] + // CHECK: ^bb[[#BODY_NESTED]]: + // CHECK: llvm.br ^bb[[#STEP_NESTED:]] + // CHECK: ^bb[[#STEP_NESTED]]: + // [...] + // CHECK: llvm.br ^bb[[#COND_NESTED:]] + // CHECK: ^bb[[#EXIT_NESTED]]: + // CHECK: llvm.br ^bb[[#BODY:]] + // CHECK: ^bb[[#BODY]]: + // CHECK: llvm.br ^bb[[#STEP:]] + // CHECK: ^bb[[#STEP]]: + // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#EXIT]]: + // [...] + // CHECK: } cir.func @testWhile() { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} @@ -163,14 +198,22 @@ cir.func @testWhile() { // CHECK: llvm.func @testWhile() // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // [...] + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCOND0:]], ^bb[[#preCOND1:]] // CHECK: ^bb[[#preCOND0]]: - // CHECK: llvm.br ^bb[[#COND]] + // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#preCOND1]]: - // CHECK: llvm.br ^bb[[#COND]] + // CHECK: llvm.br ^bb[[#preCOND2:]] + // CHECK: ^bb[[#preCOND2]]: + // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#EXIT]]: // [...] // CHECK: } @@ -208,13 +251,21 @@ cir.func @testWhile() { // CHECK: llvm.func @testDoWhile() // [...] + // CHECK: llvm.br ^bb[[#COND:]] + // CHECK: ^bb[[#COND]]: + // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // [...] + // CHECK: llvm.br ^bb[[#CONTINUE:]] + // CHECK: ^bb[[#CONTINUE]]: + // [...] // CHECK: llvm.cond_br %{{.+}}, ^bb[[#preCOND0:]], ^bb[[#preCOND1:]] // CHECK: ^bb[[#preCOND0]]: // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#preCOND1]]: + // CHECK: llvm.br ^bb[[#preCOND2:]] + // CHECK: ^bb[[#preCOND2]]: // CHECK: llvm.br ^bb[[#COND:]] // CHECK: ^bb[[#EXIT]]: // [...] diff --git a/clang/test/CIR/Lowering/region-simplify.cir b/clang/test/CIR/Lowering/region-simplify.cir index 0ebedfc6eb62..5f32205cb032 100644 --- a/clang/test/CIR/Lowering/region-simplify.cir +++ b/clang/test/CIR/Lowering/region-simplify.cir @@ -1,5 +1,5 @@ -// RUN: cir-opt %s -cir-to-llvm -canonicalize -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-opt %s -cir-to-llvm -canonicalize -o - | cir-translate -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR +// RUN: cir-opt %s -canonicalize -o - | cir-translate -cir-to-llvmir | FileCheck %s -check-prefix=LLVM !u32i = !cir.int @@ -8,7 +8,7 @@ module { %0 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %1 = cir.const #cir.int<1> : !u32i cir.store %1, %0 : !u32i, !cir.ptr - cir.goto "err" + cir.br ^bb2 ^bb1: // no predecessors %2 = cir.load %0 : !cir.ptr, !u32i %3 = cir.const #cir.int<1> : !u32i @@ -16,7 +16,6 @@ module { cir.store %4, %0 : !u32i, !cir.ptr cir.br ^bb2 ^bb2: // 2 preds: ^bb0, ^bb1 - cir.label "err" %5 = cir.load %0 : !cir.ptr, !u32i %6 = cir.const #cir.int<2> : !u32i %7 = cir.binop(add, %5, %6) : !u32i diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index e04272cbec12..add46429cba2 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -14,16 +14,26 @@ module { } // MLIR: llvm.func @foo() +// MLIR-NEXT: llvm.br ^bb1 +// MLIR-NEXT: ^bb1: // MLIR-DAG: [[v1:%[0-9]]] = llvm.mlir.constant(4 : i32) : i32 // MLIR-DAG: [[v2:%[0-9]]] = llvm.mlir.constant(1 : index) : i64 // MLIR-DAG: [[v3:%[0-9]]] = llvm.alloca [[v2]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR-NEXT: llvm.store [[v1]], [[v3]] {{.*}}: i32, !llvm.ptr +// MLIR-NEXT: llvm.br ^bb2 +// MLIR-NEXT: ^bb2: // MLIR-NEXT: llvm.return // LLVM: define void @foo() -// LLVM: %1 = alloca i32, i64 1, align 4 -// LLVM-NEXT: store i32 4, ptr %1, align 4 +// LLVM-NEXT: br label %1 +// LLVM-EMPTY: +// LLVM-NEXT: 1: +// LLVM-NEXT: %2 = alloca i32, i64 1, align 4 +// LLVM-NEXT: store i32 4, ptr %2, align 4 +// LLVM-NEXT: br label %3 +// LLVM-EMPTY: +// LLVM-NEXT: 3: // LLVM-NEXT: ret void // LLVM-NEXT: } @@ -54,11 +64,13 @@ module { // MLIR: llvm.func @scope_with_return() // MLIR-NEXT: [[v0:%.*]] = llvm.mlir.constant(1 : index) : i64 // MLIR-NEXT: [[v1:%.*]] = llvm.alloca [[v0]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr + // MLIR-NEXT: llvm.br ^bb1 + // MLIR-NEXT: ^bb1: // pred: ^bb0 // MLIR-NEXT: [[v2:%.*]] = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: llvm.store [[v2]], [[v1]] {{.*}}: i32, !llvm.ptr // MLIR-NEXT: [[v3:%.*]] = llvm.load [[v1]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return [[v3]] : i32 - // MLIR-NEXT: ^bb1: // no predecessors + // MLIR-NEXT: ^bb2: // no predecessors // MLIR-NEXT: [[v4:%.*]] = llvm.load [[v1]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return [[v4]] : i32 // MLIR-NEXT: } diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir index b362753145ce..dee8e98db858 100644 --- a/clang/test/CIR/Lowering/switch.cir +++ b/clang/test/CIR/Lowering/switch.cir @@ -122,14 +122,17 @@ module { cir.return } // CHECK: llvm.func @shouldLowerMultiBlockCase - // CHECK: llvm.switch {{.*}} : i32, ^bb3 [ - // CHECK: 3: ^bb1 - // CHECK: ] // CHECK: ^bb1: // pred: ^bb0 + // CHECK: llvm.switch {{.*}} : i32, ^bb4 [ + // CHECK: 3: ^bb2 + // CHECK: ] + // CHECK: ^bb2: // pred: ^bb1 // CHECK: llvm.return - // CHECK: ^bb2: // no predecessors - // CHECK: llvm.br ^bb3 - // CHECK: ^bb3: // 2 preds: ^bb0, ^bb2 + // CHECK: ^bb3: // no predecessors + // CHECK: llvm.br ^bb4 + // CHECK: ^bb4: // 2 preds: ^bb1, ^bb3 + // CHECK: llvm.br ^bb5 + // CHECK: ^bb5: // pred: ^bb4 // CHECK: llvm.return // CHECK: } @@ -162,15 +165,21 @@ module { cir.return %4 : !s32i } // CHECK: llvm.func @shouldLowerNestedBreak - // CHECK: llvm.switch %6 : i32, ^bb4 [ - // CHECK: 0: ^bb1 + // CHECK: llvm.switch %6 : i32, ^bb7 [ + // CHECK: 0: ^bb2 // CHECK: ] - // CHECK: ^bb1: // pred: ^bb0 - // CHECK: llvm.cond_br {{%.*}}, ^bb2, ^bb3 // CHECK: ^bb2: // pred: ^bb1 - // CHECK: llvm.br ^bb4 - // CHECK: ^bb3: // pred: ^bb1 - // CHECK: llvm.br ^bb4 - // CHECK: ^bb4: // 3 preds: ^bb0, ^bb2, ^bb3 + // CHECK: llvm.br ^bb3 + // CHECK: ^bb3: // pred: ^bb2 + // CHECK: llvm.cond_br {{%.*}}, ^bb4, ^bb5 + // CHECK: ^bb4: // pred: ^bb3 + // CHECK: llvm.br ^bb7 + // CHECK: ^bb5: // pred: ^bb3 + // CHECK: llvm.br ^bb6 + // CHECK: ^bb6: // pred: ^bb5 + // CHECK: llvm.br ^bb7 + // CHECK: ^bb7: // 3 preds: ^bb1, ^bb4, ^bb6 + // CHECK: llvm.br ^bb8 + // CHECK: ^bb8: // pred: ^bb7 // CHECK: llvm.return } diff --git a/clang/test/CIR/Lowering/ternary.cir b/clang/test/CIR/Lowering/ternary.cir index b79d7eac726f..6e469f388d79 100644 --- a/clang/test/CIR/Lowering/ternary.cir +++ b/clang/test/CIR/Lowering/ternary.cir @@ -41,6 +41,8 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { // MLIR-NEXT: %8 = llvm.mlir.constant(5 : i32) : i32 // MLIR-NEXT: llvm.br ^bb3(%8 : i32) // MLIR-NEXT: ^bb3(%9: i32): // 2 preds: ^bb1, ^bb2 +// MLIR-NEXT: llvm.br ^bb4 +// MLIR-NEXT: ^bb4: // pred: ^bb3 // MLIR-NEXT: llvm.store %9, %3 {{.*}}: i32, !llvm.ptr // MLIR-NEXT: %10 = llvm.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: llvm.return %10 : i32 diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index b1eac868133e..48e2705e756d 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -22,67 +22,41 @@ module { cir.func @floatingPoint(%arg0: !cir.float, %arg1: !cir.double) { - // MLIR: llvm.func @floatingPoint + // MLIR: llvm.func @floatingPoint %0 = cir.alloca !cir.float, !cir.ptr, ["f", init] {alignment = 4 : i64} %1 = cir.alloca !cir.double, !cir.ptr, ["d", init] {alignment = 8 : i64} - %2 = cir.alloca !cir.bool, !cir.ptr, ["tmp1"] {alignment = 1 : i64} - %3 = cir.alloca !cir.bool, !cir.ptr, ["tmp2"] {alignment = 1 : i64} - %4 = cir.alloca !cir.bool, !cir.ptr, ["tmp3"] {alignment = 1 : i64} - %5 = cir.alloca !cir.bool, !cir.ptr, ["tmp4"] {alignment = 1 : i64} - cir.store %arg0, %0 : !cir.float, !cir.ptr cir.store %arg1, %1 : !cir.double, !cir.ptr - - %6 = cir.load %0 : !cir.ptr, !cir.float - %7 = cir.cast(float_to_bool, %6 : !cir.float), !cir.bool - cir.store %7, %2 : !cir.bool, !cir.ptr + %2 = cir.load %0 : !cir.ptr, !cir.float + %3 = cir.cast(float_to_bool, %2 : !cir.float), !cir.bool // MLIR: %[[#F_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 // MLIR: %[[#F_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#F_ZERO]] : f32 // MLIR: %[[#F_ZEXT:]] = llvm.zext %[[#F_BOOL]] : i1 to i8 - - %8 = cir.unary(not, %7) : !cir.bool, !cir.bool - cir.store %8, %3 : !cir.bool, !cir.ptr + %4 = cir.unary(not, %3) : !cir.bool, !cir.bool // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(1 : i8) : i8 // MLIR: = llvm.xor %[[#F_ZEXT]], %[[#F_ONE]] : i8 - - %9 = cir.load %1 : !cir.ptr, !cir.double - %10 = cir.cast(float_to_bool, %9 : !cir.double), !cir.bool - cir.store %10, %4 : !cir.bool, !cir.ptr + %5 = cir.load %1 : !cir.ptr, !cir.double + %6 = cir.cast(float_to_bool, %5 : !cir.double), !cir.bool // MLIR: %[[#D_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 // MLIR: %[[#D_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#D_ZERO]] : f64 // MLIR: %[[#D_ZEXT:]] = llvm.zext %[[#D_BOOL]] : i1 to i8 - - %11 = cir.unary(not, %10) : !cir.bool, !cir.bool - cir.store %11, %5 : !cir.bool, !cir.ptr + %7 = cir.unary(not, %6) : !cir.bool, !cir.bool // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(1 : i8) : i8 // MLIR: = llvm.xor %[[#D_ZEXT]], %[[#D_ONE]] : i8 - cir.return } cir.func @CStyleValueNegation(%arg0: !s32i, %arg1: !cir.float) { - // MLIR: llvm.func @CStyleValueNegation + // MLIR: llvm.func @CStyleValueNegation %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %1 = cir.alloca !cir.float, !cir.ptr, ["f", init] {alignment = 4 : i64} - %2 = cir.alloca !cir.bool, !cir.ptr, ["tmp1"] {alignment = 1 : i64} - %3 = cir.alloca !cir.bool, !cir.ptr, ["tmp2"] {alignment = 1 : i64} - %4 = cir.alloca !s32i, !cir.ptr, ["tmp3"] {alignment = 4 : i64} - %5 = cir.alloca !cir.bool, !cir.ptr, ["tmp4"] {alignment = 1 : i64} - %6 = cir.alloca !cir.bool, !cir.ptr, ["tmp5"] {alignment = 1 : i64} - %7 = cir.alloca !s32i, !cir.ptr, ["tmp6"] {alignment = 4 : i64} - + %3 = cir.alloca !cir.float, !cir.ptr, ["f", init] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, !cir.ptr - cir.store %arg1, %1 : !cir.float, !cir.ptr + cir.store %arg1, %3 : !cir.float, !cir.ptr - %8 = cir.load %0 : !cir.ptr, !s32i - %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool - cir.store %9, %2 : !cir.bool, !cir.ptr - - %10 = cir.unary(not, %9) : !cir.bool, !cir.bool - cir.store %10, %3 : !cir.bool, !cir.ptr - - %11 = cir.cast(bool_to_int, %10 : !cir.bool), !s32i - cir.store %11, %4 : !s32i, !cir.ptr + %5 = cir.load %0 : !cir.ptr, !s32i + %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool + %7 = cir.unary(not, %6) : !cir.bool, !cir.bool + %8 = cir.cast(bool_to_int, %7 : !cir.bool), !s32i // MLIR: %[[#INT:]] = llvm.load %{{.+}} : !llvm.ptr // MLIR: %[[#IZERO:]] = llvm.mlir.constant(0 : i32) : i32 // MLIR: %[[#ICMP:]] = llvm.icmp "ne" %[[#INT]], %[[#IZERO]] : i32 @@ -91,15 +65,10 @@ module { // MLIR: %[[#IXOR:]] = llvm.xor %[[#IEXT]], %[[#IONE]] : i8 // MLIR: = llvm.zext %[[#IXOR]] : i8 to i32 - %12 = cir.load %1 : !cir.ptr, !cir.float - %13 = cir.cast(float_to_bool, %12 : !cir.float), !cir.bool - cir.store %13, %5 : !cir.bool, !cir.ptr - - %14 = cir.unary(not, %13) : !cir.bool, !cir.bool - cir.store %14, %6 : !cir.bool, !cir.ptr - - %15 = cir.cast(bool_to_int, %14 : !cir.bool), !s32i - cir.store %15, %7 : !s32i, !cir.ptr + %17 = cir.load %3 : !cir.ptr, !cir.float + %18 = cir.cast(float_to_bool, %17 : !cir.float), !cir.bool + %19 = cir.unary(not, %18) : !cir.bool, !cir.bool + %20 = cir.cast(bool_to_int, %19 : !cir.bool), !s32i // MLIR: %[[#FLOAT:]] = llvm.load %{{.+}} : !llvm.ptr // MLIR: %[[#FZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 // MLIR: %[[#FCMP:]] = llvm.fcmp "une" %[[#FLOAT]], %[[#FZERO]] : f32 diff --git a/clang/test/CIR/Transforms/loop.cir b/clang/test/CIR/Transforms/loop.cir index ff1caf9dae53..8204216b6f52 100644 --- a/clang/test/CIR/Transforms/loop.cir +++ b/clang/test/CIR/Transforms/loop.cir @@ -19,6 +19,8 @@ module { // CHECK: ^bb[[#COND]]: // CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: // CHECK: cir.br ^bb[[#COND:]] // CHECK: ^bb[[#EXIT]]: // CHECK: cir.return @@ -54,8 +56,10 @@ module { } // CHECK: cir.func @testDoWhile(%arg0: !cir.bool) { // CHECK: cir.br ^bb[[#BODY:]] -// CHECK: ^bb[[#BODY]]: +// CHECK: ^bb[[#COND]]: // CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#COND:]] // CHECK: ^bb[[#EXIT]]: // CHECK: cir.return // CHECK: } @@ -73,6 +77,8 @@ module { cir.return } // CHECK: cir.func @testWhileWithBreakTerminatedBody(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: // CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: // CHECK: cir.br ^bb[[#EXIT]] @@ -102,9 +108,13 @@ module { // CHECK: ^bb[[#COND]]: // CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] // CHECK: ^bb[[#BODY]]: -// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: cir.br ^bb[[#EX_SCOPE_IN:]] +// CHECK: ^bb[[#EX_SCOPE_IN]]: +// CHECK: cir.br ^bb[[#EXIT:]] // CHECK: ^bb[[#EX_SCOPE_EXIT:]]: -// CHECK: cir.br ^bb[[#COND]] +// CHECK: cir.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: cir.br ^bb[[#COND:]] // CHECK: ^bb[[#EXIT]]: // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/Transforms/scope.cir b/clang/test/CIR/Transforms/scope.cir index b5ba0f7aac2e..2d14784c33f8 100644 --- a/clang/test/CIR/Transforms/scope.cir +++ b/clang/test/CIR/Transforms/scope.cir @@ -12,9 +12,13 @@ module { cir.return } // CHECK: cir.func @foo() { +// CHECK: cir.br ^bb1 +// CHECK: ^bb1: // pred: ^bb0 // CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK: %1 = cir.const #cir.int<4> : !u32i // CHECK: cir.store %1, %0 : !u32i, !cir.ptr +// CHECK: cir.br ^bb2 +// CHECK: ^bb2: // pred: ^bb1 // CHECK: cir.return // CHECK: } @@ -42,11 +46,13 @@ module { // CHECK: cir.func @scope_with_return() -> !u32i { // CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: cir.br ^bb1 +// CHECK: ^bb1: // pred: ^bb0 // CHECK: %1 = cir.const #cir.int<0> : !u32i // CHECK: cir.store %1, %0 : !u32i, !cir.ptr // CHECK: %2 = cir.load %0 : !cir.ptr, !u32i // CHECK: cir.return %2 : !u32i -// CHECK: ^bb1: // no predecessors +// CHECK: ^bb2: // no predecessors // CHECK: %3 = cir.load %0 : !cir.ptr, !u32i // CHECK: cir.return %3 : !u32i // CHECK: } diff --git a/clang/test/CIR/Transforms/switch.cir b/clang/test/CIR/Transforms/switch.cir index 6d0422bbf5eb..177dfc98c8af 100644 --- a/clang/test/CIR/Transforms/switch.cir +++ b/clang/test/CIR/Transforms/switch.cir @@ -139,15 +139,19 @@ module { // CHECK: cir.func @shouldFlatMultiBlockCase(%arg0: !s32i) { // CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK: cir.store %arg0, %0 : !s32i, !cir.ptr +// CHECK: cir.br ^bb1 +// CHECK: ^bb1: // pred: ^bb0 // CHECK: %1 = cir.load %0 : !cir.ptr, !s32i -// CHECK: cir.switch.flat %1 : !s32i, ^bb3 [ -// CHECK: 3: ^bb1 +// CHECK: cir.switch.flat %1 : !s32i, ^bb4 [ +// CHECK: 3: ^bb2 // CHECK: ] -// CHECK: ^bb1: // pred: ^bb0 +// CHECK: ^bb2: // pred: ^bb1 // CHECK: cir.return -// CHECK: ^bb2: // no predecessors -// CHECK: cir.br ^bb3 -// CHECK: ^bb3: // 2 preds: ^bb0, ^bb2 +// CHECK: ^bb3: // no predecessors +// CHECK: cir.br ^bb4 +// CHECK: ^bb4: // 2 preds: ^bb1, ^bb3 +// CHECK: cir.br ^bb5 +// CHECK: ^bb5: // pred: ^bb4 // CHECK: cir.return // CHECK: } @@ -181,16 +185,22 @@ module { cir.return %4 : !s32i } // CHECK: cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { -// CHECK: cir.switch.flat %3 : !s32i, ^bb4 [ -// CHECK: 0: ^bb1 +// CHECK: cir.switch.flat %3 : !s32i, ^bb7 [ +// CHECK: 0: ^bb2 // CHECK: ] -// CHECK: ^bb1: // pred: ^bb0 -// CHECK: cir.brcond {{%.*}} ^bb2, ^bb3 // CHECK: ^bb2: // pred: ^bb1 -// CHECK: cir.br ^bb4 -// CHECK: ^bb3: // pred: ^bb1 -// CHECK: cir.br ^bb4 -// CHECK: ^bb4: // 3 preds: ^bb0, ^bb2, ^bb3 +// CHECK: cir.br ^bb3 +// CHECK: ^bb3: // pred: ^bb2 +// CHECK: cir.brcond {{%.*}} ^bb4, ^bb5 +// CHECK: ^bb4: // pred: ^bb3 +// CHECK: cir.br ^bb7 +// CHECK: ^bb5: // pred: ^bb3 +// CHECK: cir.br ^bb6 +// CHECK: ^bb6: // pred: ^bb5 +// CHECK: cir.br ^bb7 +// CHECK: ^bb7: // 3 preds: ^bb1, ^bb4, ^bb6 +// CHECK: cir.br ^bb8 +// CHECK: ^bb8: // pred: ^bb7 // CHECK: cir.return %9 : !s32i // CHECK: } @@ -250,6 +260,8 @@ module { // CHECK-NEXT: ^[[CASE_DEFAULT]]: // CHECK-NEXT: cir.int<3> // CHECK-NEXT: cir.store +// CHECK-NEXT: cir.br ^[[EPILOG]] +// CHECK-NEXT: ^[[EPILOG]]: // CHECK-NEXT: cir.br ^[[EPILOG_END:bb[0-9]+]] // CHECK-NEXT: ^[[EPILOG_END]]: // CHECK: cir.return diff --git a/clang/test/CIR/Transforms/ternary.cir b/clang/test/CIR/Transforms/ternary.cir index 833084602338..67ef7f95a6b5 100644 --- a/clang/test/CIR/Transforms/ternary.cir +++ b/clang/test/CIR/Transforms/ternary.cir @@ -35,6 +35,10 @@ module { // CHECK: cir.br ^bb3(%5 : !s32i) // CHECK: ^bb2: // pred: ^bb0 // CHECK: %6 = cir.const #cir.int<5> : !s32i +// CHECK: cir.br ^bb3(%6 : !s32i) +// CHECK: ^bb3(%7: !s32i): // 2 preds: ^bb1, ^bb2 +// CHECK: cir.br ^bb4 +// CHECK: ^bb4: // pred: ^bb3 // CHECK: cir.store %7, %1 : !s32i, !cir.ptr // CHECK: %8 = cir.load %1 : !cir.ptr, !s32i // CHECK: cir.return %8 : !s32i @@ -56,6 +60,8 @@ module { // CHECK: ^bb2: // pred: ^bb0 // CHECK: cir.br ^bb3 // CHECK: ^bb3: // 2 preds: ^bb1, ^bb2 +// CHECK: cir.br ^bb4 +// CHECK: ^bb4: // pred: ^bb3 // CHECK: cir.return // CHECK: } From 0f70bca277a592fe7cf7d575242b97fa755c0c6f Mon Sep 17 00:00:00 2001 From: David Olsen Date: Mon, 8 Jul 2024 19:29:15 -0700 Subject: [PATCH 1674/2301] [CIR] Fix for __atomic_compare_exchange weak arg (#721) ClangIR was failing on ``` __atomic_compare_exchange_n(&a, &old, 42, true, 5, 5); ``` The `true` was the problem. It would work with a literal `0` or `1`, but not with a literal `true` or `false`. The bug was in `isCstWeak` in CIRGenAtomic.cpp, which was only looking for an integral constant. It didn't recognize a boolean constant and was falling back on the non-constant path, which isn't implemented yet. Rewrite `isCstWeak` to check for both intergral and boolean constants. --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 27 +++++++++++++++++++------- clang/test/CIR/CodeGen/atomic.cpp | 6 +++--- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 36c2e33de195..4f6567d1fd15 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -348,12 +348,25 @@ static mlir::cir::IntAttr getConstOpIntAttr(mlir::Value v) { return constVal; } -static bool isCstWeak(mlir::Value weakVal, uint64_t &val) { - auto intAttr = getConstOpIntAttr(weakVal); - if (!intAttr) - return false; - val = intAttr.getUInt(); - return true; +// Inspect a value that is the strong/weak flag for a compare-exchange. If it +// is a constant of intergral or boolean type, set `val` to the constant's +// boolean value and return true. Otherwise leave `val` unchanged and return +// false. +static bool isCstWeak(mlir::Value weakVal, bool &val) { + mlir::Operation *op = weakVal.getDefiningOp(); + while (auto c = dyn_cast(op)) { + op = c.getOperand().getDefiningOp(); + } + if (auto c = dyn_cast(op)) { + if (mlir::isa(c.getType())) { + val = mlir::cast(c.getValue()).getUInt() != 0; + return true; + } else if (mlir::isa(c.getType())) { + val = mlir::cast(c.getValue()).getValue(); + return true; + } + } + return false; } static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, @@ -470,7 +483,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_compare_exchange_n: case AtomicExpr::AO__scoped_atomic_compare_exchange: case AtomicExpr::AO__scoped_atomic_compare_exchange_n: { - uint64_t weakVal; + bool weakVal; if (isCstWeak(IsWeak, weakVal)) { buildAtomicCmpXchgFailureSet(CGF, E, weakVal, Dest, Ptr, Val1, Val2, FailureOrder, Size, Order, Scope); diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 0b64cda23446..262b130273f1 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -260,7 +260,7 @@ bool fd4(struct S *a, struct S *b, struct S *c) { bool fi4a(int *i) { int cmp = 0; int desired = 1; - return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire); + return __atomic_compare_exchange(i, &cmp, &desired, false, memory_order_acquire, memory_order_acquire); } // CHECK-LABEL: @_Z4fi4aPi @@ -273,7 +273,7 @@ bool fi4a(int *i) { bool fi4b(int *i) { int cmp = 0; - return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire); + return __atomic_compare_exchange_n(i, &cmp, 1, true, memory_order_acquire, memory_order_acquire); } // CHECK-LABEL: @_Z4fi4bPi @@ -470,4 +470,4 @@ void cmp_val_short(short* p, short x, short u) { // LLVM: cmpxchg ptr {{.*}}, i8 {{.*}}, i8 {{.*}} seq_cst seq_cst void cmp_val_byte(char* p, char x, char u) { char r = __sync_val_compare_and_swap(p, x, u); -} \ No newline at end of file +} From 018262247151f328ae1a7a70902c6e63cbc97f67 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 9 Jul 2024 10:34:32 +0800 Subject: [PATCH 1675/2301] [CIR][LLVMLowering] Add LLVM lowering for complex operations (#723) This PR adds LLVM lowering for the following operations related to complex numbers: - `cir.complex.create`, - `cir.complex.real_ptr`, and - `cir.complex.imag_ptr`. The LLVM IR generated for `cir.complex.create` is a bit ugly since it includes the `insertvalue` instruction, which typically is not generated in upstream CodeGen. Later we may need further CIR canonicalization passes to try folding `cir.complex.create`. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 89 ++++++++++++++++++- clang/test/CIR/CodeGen/complex.c | 56 ++++++++++++ 2 files changed, 142 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b6cc6433d43d..26f3e45566f1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1634,6 +1634,80 @@ class CIRGetGlobalOpLowering } }; +class CIRComplexCreateOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ComplexCreateOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto complexLLVMTy = + getTypeConverter()->convertType(op.getResult().getType()); + auto initialComplex = + rewriter.create(op->getLoc(), complexLLVMTy); + + int64_t position[1]{0}; + auto realComplex = rewriter.create( + op->getLoc(), initialComplex, adaptor.getReal(), position); + + position[0] = 1; + auto complex = rewriter.create( + op->getLoc(), realComplex, adaptor.getImag(), position); + + rewriter.replaceOp(op, complex); + return mlir::success(); + } +}; + +class CIRComplexRealPtrOPLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ComplexRealPtrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto operandTy = + mlir::cast(op.getOperand().getType()); + auto resultLLVMTy = + getTypeConverter()->convertType(op.getResult().getType()); + auto elementLLVMTy = + getTypeConverter()->convertType(operandTy.getPointee()); + + mlir::LLVM::GEPArg gepIndices[2]{{0}, {0}}; + rewriter.replaceOpWithNewOp( + op, resultLLVMTy, elementLLVMTy, adaptor.getOperand(), gepIndices, + /*inbounds=*/true); + + return mlir::success(); + } +}; + +class CIRComplexImagPtrOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ComplexImagPtrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto operandTy = + mlir::cast(op.getOperand().getType()); + auto resultLLVMTy = + getTypeConverter()->convertType(op.getResult().getType()); + auto elementLLVMTy = + getTypeConverter()->convertType(operandTy.getPointee()); + + mlir::LLVM::GEPArg gepIndices[2]{{0}, {1}}; + rewriter.replaceOpWithNewOp( + op, resultLLVMTy, elementLLVMTy, adaptor.getOperand(), gepIndices, + /*inbounds=*/true); + + return mlir::success(); + } +}; + class CIRSwitchFlatOpLowering : public mlir::OpConversionPattern { public: @@ -3366,9 +3440,10 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRUnaryOpLowering, CIRBinOpLowering, CIRBinOpOverflowOpLowering, CIRShiftOpLowering, CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, CIRCastOpLowering, - CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, - CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, - CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, + CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRComplexCreateOpLowering, + CIRComplexRealPtrOPLowering, CIRComplexImagPtrOpLowering, + CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, + CIRBrOpLowering, CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, @@ -3445,6 +3520,14 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, converter.addConversion([&](mlir::cir::BF16Type type) -> mlir::Type { return mlir::Float16Type::get(type.getContext()); }); + converter.addConversion([&](mlir::cir::ComplexType type) -> mlir::Type { + // A complex type is lowered to an LLVM struct that contains the real and + // imaginary part as data fields. + mlir::Type elementTy = converter.convertType(type.getElementTy()); + mlir::Type structFields[2] = {elementTy, elementTy}; + return mlir::LLVM::LLVMStructType::getLiteral(type.getContext(), + structFields); + }); converter.addConversion([&](mlir::cir::FuncType type) -> mlir::Type { auto result = converter.convertType(type.getReturnType()); llvm::SmallVector arguments; diff --git a/clang/test/CIR/CodeGen/complex.c b/clang/test/CIR/CodeGen/complex.c index 43fcdb76156f..41ffca8334f3 100644 --- a/clang/test/CIR/CodeGen/complex.c +++ b/clang/test/CIR/CodeGen/complex.c @@ -2,6 +2,8 @@ // RUN: FileCheck --input-file=%t.cir --check-prefixes=C,CHECK %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -fclangir -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPP,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll --check-prefixes=LLVM %s double _Complex c, c2; int _Complex ci, ci2; @@ -24,6 +26,10 @@ void list_init() { // CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex // CHECK: } +// LLVM: define void @list_init() +// LLVM: store { double, double } { double 1.000000e+00, double 2.000000e+00 }, ptr %{{.+}}, align 8 +// LLVM: } + void list_init_2(double r, double i) { double _Complex c1 = {r, i}; } @@ -36,6 +42,12 @@ void list_init_2(double r, double i) { // CHECK-NEXT: cir.store %[[#C]], %{{.+}} : !cir.complex, !cir.ptr> // CHECK: } +// LLVM: define void @list_init_2(double %{{.+}}, double %{{.+}}) +// LLVM: %[[#A:]] = insertvalue { double, double } undef, double %{{.+}}, 0 +// LLVM-NEXT: %[[#B:]] = insertvalue { double, double } %[[#A]], double %{{.+}}, 1 +// LLVM-NEXT: store { double, double } %[[#B]], ptr %5, align 8 +// LLVM: } + void imag_literal() { c = 3.0i; ci = 3i; @@ -51,6 +63,11 @@ void imag_literal() { // CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex // CHECK: } +// LLVM: define void @imag_literal() +// LLVM: store { double, double } { double 0.000000e+00, double 3.000000e+00 }, ptr @c, align 8 +// LLVM: store { i32, i32 } { i32 0, i32 3 }, ptr @ci, align 4 +// LLVM: } + void load_store() { c = c2; ci = ci2; @@ -68,6 +85,13 @@ void load_store() { // CHECK-NEXT: cir.store %[[#CI2]], %[[#CI_PTR]] : !cir.complex, !cir.ptr> // CHECK: } +// LLVM: define void @load_store() +// LLVM: %[[#A:]] = load { double, double }, ptr @c2, align 8 +// LLVM-NEXT: store { double, double } %[[#A]], ptr @c, align 8 +// LLVM-NEXT: %[[#B:]] = load { i32, i32 }, ptr @ci2, align 4 +// LLVM-NEXT: store { i32, i32 } %[[#B]], ptr @ci, align 4 +// LLVM: } + void load_store_volatile() { vc = vc2; vci = vci2; @@ -85,6 +109,13 @@ void load_store_volatile() { // CHECK-NEXT: cir.store volatile %[[#VCI2]], %[[#VCI_PTR]] : !cir.complex, !cir.ptr> // CHECK: } +// LLVM: define void @load_store_volatile() +// LLVM: %[[#A:]] = load volatile { double, double }, ptr @vc2, align 8 +// LLVM-NEXT: store volatile { double, double } %[[#A]], ptr @vc, align 8 +// LLVM-NEXT: %[[#B:]] = load volatile { i32, i32 }, ptr @vci2, align 4 +// LLVM-NEXT: store volatile { i32, i32 } %[[#B]], ptr @vci, align 4 +// LLVM: } + void real_ptr() { double *r1 = &__real__ c; int *r2 = &__real__ ci; @@ -98,6 +129,11 @@ void real_ptr() { // CHECK-NEXT: %{{.+}} = cir.complex.real_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr // CHECK: } +// LLVM: define void @real_ptr() +// LLVM: store ptr @c, ptr %{{.+}}, align 8 +// LLVM-NEXT: store ptr @ci, ptr %{{.+}}, align 8 +// LLVM: } + void real_ptr_local() { double _Complex c1 = {1.0, 2.0}; double *r3 = &__real__ c1; @@ -109,6 +145,11 @@ void real_ptr_local() { // CHECK: %{{.+}} = cir.complex.real_ptr %[[#C]] : !cir.ptr> -> !cir.ptr // CHECK: } +// LLVM: define void @real_ptr_local() +// LLVM: store { double, double } { double 1.000000e+00, double 2.000000e+00 }, ptr %{{.+}}, align 8 +// LLVM-NEXT: %{{.+}} = getelementptr inbounds { double, double }, ptr %{{.+}}, i32 0, i32 0 +// LLVM: } + void extract_real() { double r1 = __real__ c; int r2 = __real__ ci; @@ -124,6 +165,11 @@ void extract_real() { // CHECK-NEXT: %{{.+}} = cir.load %[[#REAL_PTR]] : !cir.ptr, !s32i // CHECK: } +// LLVM: define void @extract_real() +// LLVM: %{{.+}} = load double, ptr @c, align 8 +// LLVM: %{{.+}} = load i32, ptr @ci, align 4 +// LLVM: } + void imag_ptr() { double *i1 = &__imag__ c; int *i2 = &__imag__ ci; @@ -137,6 +183,11 @@ void imag_ptr() { // CHECK-NEXT: %{{.+}} = cir.complex.imag_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr // CHECK: } +// LLVM: define void @imag_ptr() +// LLVM: store ptr getelementptr inbounds ({ double, double }, ptr @c, i32 0, i32 1), ptr %{{.+}}, align 8 +// LLVM: store ptr getelementptr inbounds ({ i32, i32 }, ptr @ci, i32 0, i32 1), ptr %{{.+}}, align 8 +// LLVM: } + void extract_imag() { double i1 = __imag__ c; int i2 = __imag__ ci; @@ -151,3 +202,8 @@ void extract_imag() { // CHECK-NEXT: %[[#IMAG_PTR:]] = cir.complex.imag_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %{{.+}} = cir.load %[[#IMAG_PTR]] : !cir.ptr, !s32i // CHECK: } + +// LLVM: define void @extract_imag() +// LLVM: %{{.+}} = load double, ptr getelementptr inbounds ({ double, double }, ptr @c, i32 0, i32 1), align 8 +// LLVM: %{{.+}} = load i32, ptr getelementptr inbounds ({ i32, i32 }, ptr @ci, i32 0, i32 1), align 4 +// LLVM: } From 7dd63dbeaff54905eac3dc466a7ae1096da4fc30 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 11 Jul 2024 02:35:49 +0800 Subject: [PATCH 1676/2301] [CIR][NFC] Fix bug in MLIR lowering of cir.call (#728) This PR fixes the bug described as in https://github.com/llvm/clangir/issues/727#issuecomment-2219515908. It should resolve the crash reported in #727 . --- .../CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 2 +- clang/test/CIR/Lowering/ThroughMLIR/call.c | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/call.c diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 4693e58ebf61..c59df1ec1ba8 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -99,7 +99,7 @@ class CIRCallOpLowering : public mlir::OpConversionPattern { getTypeConverter()->convertTypes(op.getResultTypes(), types))) return mlir::failure(); rewriter.replaceOpWithNewOp( - op, mlir::SymbolRefAttr::get(op), types, adaptor.getOperands()); + op, op.getCalleeAttr(), types, adaptor.getOperands()); return mlir::LogicalResult::success(); } }; diff --git a/clang/test/CIR/Lowering/ThroughMLIR/call.c b/clang/test/CIR/Lowering/ThroughMLIR/call.c new file mode 100644 index 000000000000..a325db5f2dd4 --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/call.c @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +void foo(int i) {} + +int test(void) { + foo(2); + return 0; +} + +// CHECK-LABEL: func.func @test() -> i32 { +// CHECK: %[[ARG:.+]] = arith.constant 2 : i32 +// CHECK-NEXT: call @foo(%[[ARG]]) : (i32) -> () +// CHECK: } From d280a38031dfed8987543086efb78674721998d2 Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 11 Jul 2024 03:35:43 +0800 Subject: [PATCH 1677/2301] [CIR][Dialect] Emit OpenCL kernel metadata (#705) This PR introduces a new attribute `OpenCLKernelMetadataAttr` to model the OpenCL kernel metadata structurally in CIR, with its corresponding implementations of CodeGen, Lowering and Translation. The `"TypeAttr":$vec_type_hint` part is tricky because of the absence of the signless feature of LLVM IR, while SPIR-V requires it. According to the spec, the final LLVM IR should encode signedness with an extra `i32` boolean value. In this PR, the droping logic from CIR's `TypeConverter` is still used to avoid code duplication when lowering to LLVM dialect. However, the signedness is then restored (still capsuled by a CIR attribute) and dropped again in the translation into LLVM IR. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 2 + .../clang/CIR/Dialect/IR/CIROpenCLAttrs.td | 95 +++++++++++++++++++ clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 67 ++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 + clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 55 +++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 41 +++++++- .../Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 59 ++++++++++++ .../CIR/CodeGen/OpenCL/kernel-attributes.cl | 35 +++++++ .../CIR/IR/invalid-opencl-vec-type-hint.cir | 7 ++ clang/test/CIR/IR/invalid.cir | 45 +++++++++ 11 files changed, 406 insertions(+), 5 deletions(-) create mode 100644 clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td create mode 100644 clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl create mode 100644 clang/test/CIR/IR/invalid-opencl-vec-type-hint.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index b5cff38517e2..ca9e991e6565 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -979,4 +979,6 @@ def BitfieldInfoAttr : CIR_Attr<"BitfieldInfo", "bitfield_info"> { ]; } +include "clang/CIR/Dialect/IR/CIROpenCLAttrs.td" + #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td new file mode 100644 index 000000000000..1f32701909b7 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td @@ -0,0 +1,95 @@ +//===- CIROpenCLAttrs.td - CIR dialect attrs for OpenCL ----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the CIR dialect attributes for OpenCL. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_DIALECT_CIR_OPENCL_ATTRS +#define MLIR_CIR_DIALECT_CIR_OPENCL_ATTRS + +//===----------------------------------------------------------------------===// +// OpenCLKernelMetadataAttr +//===----------------------------------------------------------------------===// + +def OpenCLKernelMetadataAttr + : CIR_Attr<"OpenCLKernelMetadata", "cl.kernel_metadata"> { + + let summary = "OpenCL kernel metadata"; + let description = [{ + Provide the required information of an OpenCL kernel for the SPIR-V backend. + + The `work_group_size_hint` and `reqd_work_group_size` parameter are integer + arrays with 3 elements that provide hints for the work-group size and the + required work-group size, respectively. + + The `vec_type_hint` parameter is a type attribute that provides a hint for + the vectorization. It can be a CIR or LLVM type, depending on the lowering + stage. + + The `vec_type_hint_signedness` parameter is a boolean that indicates the + signedness of the vector type hint. It's useful when LLVM type is set in + `vec_type_hint`, which is signless by design. It should be set if and only + if the `vec_type_hint` is present. + + The `intel_reqd_sub_group_size` parameter is an integer that restricts the + sub-group size to the specified value. + + Example: + ``` + #fn_attr = #cir})> + + cir.func @kernel(%arg0: !s32i) extra(#fn_attr) { + cir.return + } + ``` + }]; + + let parameters = (ins + OptionalParameter<"ArrayAttr">:$work_group_size_hint, + OptionalParameter<"ArrayAttr">:$reqd_work_group_size, + OptionalParameter<"TypeAttr">:$vec_type_hint, + OptionalParameter<"std::optional">:$vec_type_hint_signedness, + OptionalParameter<"IntegerAttr">:$intel_reqd_sub_group_size + ); + + let assemblyFormat = "`<` struct(params) `>`"; + + let genVerifyDecl = 1; + + let extraClassDeclaration = [{ + /// Extract the signedness from int or int vector types. + static std::optional isSignedHint(mlir::Type vecTypeHint); + }]; + + let extraClassDefinition = [{ + std::optional $cppClass::isSignedHint(mlir::Type hintQTy) { + // Only types in CIR carry signedness + if (!mlir::isa(hintQTy.getDialect())) + return std::nullopt; + + // See also clang::CodeGen::CodeGenFunction::EmitKernelMetadata + auto hintEltQTy = mlir::dyn_cast(hintQTy); + auto isCIRSignedIntType = [](mlir::Type t) { + return mlir::isa(t) && + mlir::cast(t).isSigned(); + }; + return isCIRSignedIntType(hintQTy) || + (hintEltQTy && isCIRSignedIntType(hintEltQTy.getEltType())); + } + }]; + +} + +#endif // MLIR_CIR_DIALECT_CIR_OPENCL_ATTRS diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 3c096884a04b..5e2c4f7143a9 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -142,6 +142,7 @@ struct MissingFeatures { static bool getFPFeaturesInEffect() { return false; } static bool cxxABI() { return false; } static bool openCL() { return false; } + static bool openCLGenKernelMetadata() { return false; } static bool CUDA() { return false; } static bool openMP() { return false; } static bool openMPRuntime() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index f5efa14796ab..27049934a556 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -993,8 +993,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm_unreachable("NYI"); if (FD && getLangOpts().OpenCL) { - // TODO(cir): Emit OpenCL kernel metadata - assert(!MissingFeatures::openCL()); + buildKernelMetadata(FD, Fn); } // If we are checking function types, emit a function type signature as @@ -1720,3 +1719,67 @@ CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, return numElements; } + +void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, + mlir::cir::FuncOp Fn) { + if (!FD->hasAttr() && !FD->hasAttr()) + return; + + // TODO(cir): CGM.genKernelArgMetadata(Fn, FD, this); + assert(!MissingFeatures::openCLGenKernelMetadata()); + + if (!getLangOpts().OpenCL) + return; + + using mlir::cir::OpenCLKernelMetadataAttr; + + mlir::ArrayAttr workGroupSizeHintAttr, reqdWorkGroupSizeAttr; + mlir::TypeAttr vecTypeHintAttr; + std::optional vecTypeHintSignedness; + mlir::IntegerAttr intelReqdSubGroupSizeAttr; + + if (const VecTypeHintAttr *A = FD->getAttr()) { + mlir::Type typeHintValue = getTypes().ConvertType(A->getTypeHint()); + vecTypeHintAttr = mlir::TypeAttr::get(typeHintValue); + vecTypeHintSignedness = + OpenCLKernelMetadataAttr::isSignedHint(typeHintValue); + } + + if (const WorkGroupSizeHintAttr *A = FD->getAttr()) { + workGroupSizeHintAttr = builder.getI32ArrayAttr({ + static_cast(A->getXDim()), + static_cast(A->getYDim()), + static_cast(A->getZDim()), + }); + } + + if (const ReqdWorkGroupSizeAttr *A = FD->getAttr()) { + reqdWorkGroupSizeAttr = builder.getI32ArrayAttr({ + static_cast(A->getXDim()), + static_cast(A->getYDim()), + static_cast(A->getZDim()), + }); + } + + if (const OpenCLIntelReqdSubGroupSizeAttr *A = + FD->getAttr()) { + intelReqdSubGroupSizeAttr = builder.getI32IntegerAttr(A->getSubGroupSize()); + } + + // Skip the metadata attr if no hints are present. + if (!vecTypeHintAttr && !workGroupSizeHintAttr && !reqdWorkGroupSizeAttr && + !intelReqdSubGroupSizeAttr) + return; + + // Append the kernel metadata to the extra attributes dictionary. + mlir::NamedAttrList attrs; + attrs.append(Fn.getExtraAttrs().getElements()); + + auto kernelMetadataAttr = OpenCLKernelMetadataAttr::get( + builder.getContext(), workGroupSizeHintAttr, reqdWorkGroupSizeAttr, + vecTypeHintAttr, vecTypeHintSignedness, intelReqdSubGroupSizeAttr); + attrs.append(kernelMetadataAttr.getMnemonic(), kernelMetadataAttr); + + Fn.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), attrs.getDictionary(builder.getContext()))); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 8d40f29439ce..77da0e2185a1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -100,6 +100,10 @@ class CIRGenFunction : public CIRGenTypeCache { // enter/leave scopes. llvm::DenseMap VLASizeMap; + /// Add OpenCL kernel arg metadata and the kernel attribute metadata to + /// the function metadata. + void buildKernelMetadata(const FunctionDecl *FD, mlir::cir::FuncOp Fn); + public: /// A non-RAII class containing all the information about a bound /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 159382ba2fb7..3463c97b6d7b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -15,6 +15,7 @@ #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" @@ -501,6 +502,60 @@ LogicalResult DynamicCastInfoAttr::verify( return success(); } +//===----------------------------------------------------------------------===// +// OpenCLKernelMetadataAttr definitions +//===----------------------------------------------------------------------===// + +LogicalResult OpenCLKernelMetadataAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ArrayAttr workGroupSizeHint, ArrayAttr reqdWorkGroupSize, + TypeAttr vecTypeHint, std::optional vecTypeHintSignedness, + IntegerAttr intelReqdSubGroupSize) { + // If no field is present, the attribute is considered invalid. + if (!workGroupSizeHint && !reqdWorkGroupSize && !vecTypeHint && + !vecTypeHintSignedness && !intelReqdSubGroupSize) { + return emitError() + << "metadata attribute without any field present is invalid"; + } + + // Check for 3-dim integer tuples + auto is3dimIntTuple = [](ArrayAttr arr) { + auto isInt = [](Attribute dim) { return mlir::isa(dim); }; + return arr.size() == 3 && llvm::all_of(arr, isInt); + }; + if (workGroupSizeHint && !is3dimIntTuple(workGroupSizeHint)) { + return emitError() + << "work_group_size_hint must have exactly 3 integer elements"; + } + if (reqdWorkGroupSize && !is3dimIntTuple(reqdWorkGroupSize)) { + return emitError() + << "reqd_work_group_size must have exactly 3 integer elements"; + } + + // Check for co-presence of vecTypeHintSignedness + if (!!vecTypeHint != vecTypeHintSignedness.has_value()) { + return emitError() << "vec_type_hint_signedness should be present if and " + "only if vec_type_hint is set"; + } + + if (vecTypeHint) { + Type vecTypeHintValue = vecTypeHint.getValue(); + if (mlir::isa(vecTypeHintValue.getDialect())) { + // Check for signedness alignment in CIR + if (isSignedHint(vecTypeHintValue) != vecTypeHintSignedness) { + return emitError() << "vec_type_hint_signedness must match the " + "signedness of the vec_type_hint type"; + } + // Check for the dialect of type hint + } else if (!LLVM::isCompatibleType(vecTypeHintValue)) { + return emitError() << "vec_type_hint must be a type from the CIR or LLVM " + "dialect"; + } + } + + return success(); +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 26f3e45566f1..8a7df1c6ce31 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1524,12 +1524,13 @@ class CIRFuncLowering : public mlir::OpConversionPattern { /// to the name of the attribute in ODS. static StringRef getLinkageAttrNameString() { return "linkage"; } + /// Convert the `cir.func` attributes to `llvm.func` attributes. /// Only retain those attributes that are not constructed by /// `LLVMFuncOp::build`. If `filterArgAttrs` is set, also filter out /// argument attributes. void - filterFuncAttributes(mlir::cir::FuncOp func, bool filterArgAndResAttrs, - SmallVectorImpl &result) const { + lowerFuncAttributes(mlir::cir::FuncOp func, bool filterArgAndResAttrs, + SmallVectorImpl &result) const { for (auto attr : func->getAttrs()) { if (attr.getName() == mlir::SymbolTable::getSymbolAttrName() || attr.getName() == func.getFunctionTypeAttrName() || @@ -1544,11 +1545,45 @@ class CIRFuncLowering : public mlir::OpConversionPattern { if (attr.getName() == func.getExtraAttrsAttrName()) { std::string cirName = "cir." + func.getExtraAttrsAttrName().str(); attr.setName(mlir::StringAttr::get(getContext(), cirName)); + + lowerFuncOpenCLKernelMetadata(attr); } result.push_back(attr); } } + /// When do module translation, we can only translate LLVM-compatible types. + /// Here we lower possible OpenCLKernelMetadataAttr to use the converted type. + void + lowerFuncOpenCLKernelMetadata(mlir::NamedAttribute &extraAttrsEntry) const { + const auto attrKey = mlir::cir::OpenCLKernelMetadataAttr::getMnemonic(); + auto oldExtraAttrs = + cast(extraAttrsEntry.getValue()); + if (!oldExtraAttrs.getElements().contains(attrKey)) + return; + + mlir::NamedAttrList newExtraAttrs; + for (auto entry : oldExtraAttrs.getElements()) { + if (entry.getName() == attrKey) { + auto clKernelMetadata = + cast(entry.getValue()); + if (auto vecTypeHint = clKernelMetadata.getVecTypeHint()) { + auto newType = typeConverter->convertType(vecTypeHint.getValue()); + auto newTypeHint = mlir::TypeAttr::get(newType); + auto newCLKMAttr = mlir::cir::OpenCLKernelMetadataAttr::get( + getContext(), clKernelMetadata.getWorkGroupSizeHint(), + clKernelMetadata.getReqdWorkGroupSize(), newTypeHint, + clKernelMetadata.getVecTypeHintSignedness(), + clKernelMetadata.getIntelReqdSubGroupSize()); + entry.setValue(newCLKMAttr); + } + } + newExtraAttrs.push_back(entry); + } + extraAttrsEntry.setValue(mlir::cir::ExtraFuncAttributesAttr::get( + getContext(), newExtraAttrs.getDictionary(getContext()))); + } + mlir::LogicalResult matchAndRewrite(mlir::cir::FuncOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { @@ -1586,7 +1621,7 @@ class CIRFuncLowering : public mlir::OpConversionPattern { auto linkage = convertLinkage(op.getLinkage()); SmallVector attributes; - filterFuncAttributes(op, /*filterArgAndResAttrs=*/false, attributes); + lowerFuncAttributes(op, /*filterArgAndResAttrs=*/false, attributes); auto fn = rewriter.create( Loc, op.getName(), llvmFnTy, linkage, isDsoLocal, mlir::LLVM::CConv::C, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index ea1c04d76fc9..62a988b37533 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -61,6 +61,11 @@ class CIRDialectLLVMIRTranslationInterface llvmFunc->addFnAttr(llvm::Attribute::OptimizeNone); } else if (mlir::dyn_cast(attr.getValue())) { llvmFunc->addFnAttr(llvm::Attribute::NoUnwind); + } else if (auto clKernelMetadata = + mlir::dyn_cast( + attr.getValue())) { + emitOpenCLKernelMetadata(clKernelMetadata, llvmFunc, + moduleTranslation); } } } @@ -83,6 +88,60 @@ class CIRDialectLLVMIRTranslationInterface return mlir::success(); } + +private: + void emitOpenCLKernelMetadata( + mlir::cir::OpenCLKernelMetadataAttr clKernelMetadata, + llvm::Function *llvmFunc, + mlir::LLVM::ModuleTranslation &moduleTranslation) const { + auto &vmCtx = moduleTranslation.getLLVMContext(); + + auto lowerArrayAttr = [&](mlir::ArrayAttr arrayAttr) { + llvm::SmallVector attrMDArgs; + for (mlir::Attribute attr : arrayAttr) { + int64_t value = mlir::cast(attr).getInt(); + attrMDArgs.push_back( + llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( + llvm::IntegerType::get(vmCtx, 32), llvm::APInt(32, value)))); + } + return llvm::MDNode::get(vmCtx, attrMDArgs); + }; + + if (auto workGroupSizeHint = clKernelMetadata.getWorkGroupSizeHint()) { + llvmFunc->setMetadata("work_group_size_hint", + lowerArrayAttr(workGroupSizeHint)); + } + + if (auto reqdWorkGroupSize = clKernelMetadata.getReqdWorkGroupSize()) { + llvmFunc->setMetadata("reqd_work_group_size", + lowerArrayAttr(reqdWorkGroupSize)); + } + + if (auto vecTypeHint = clKernelMetadata.getVecTypeHint()) { + auto hintQTy = vecTypeHint.getValue(); + bool isSignedInteger = *clKernelMetadata.getVecTypeHintSignedness(); + llvm::Metadata *attrMDArgs[] = { + llvm::ConstantAsMetadata::get( + llvm::UndefValue::get(moduleTranslation.convertType(hintQTy))), + llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( + llvm::IntegerType::get(vmCtx, 32), + llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))))}; + llvmFunc->setMetadata("vec_type_hint", + llvm::MDNode::get(vmCtx, attrMDArgs)); + } + + if (auto intelReqdSubgroupSize = + clKernelMetadata.getIntelReqdSubGroupSize()) { + int64_t reqdSubgroupSize = intelReqdSubgroupSize.getInt(); + llvm::Metadata *attrMDArgs[] = { + llvm::ConstantAsMetadata::get( + llvm::ConstantInt::get(llvm::IntegerType::get(vmCtx, 32), + llvm::APInt(32, reqdSubgroupSize))), + }; + llvmFunc->setMetadata("intel_reqd_sub_group_size", + llvm::MDNode::get(vmCtx, attrMDArgs)); + } + } }; void registerCIRDialectTranslation(mlir::DialectRegistry ®istry) { diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl new file mode 100644 index 000000000000..6badc7ce47ba --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -fclangir -emit-cir -triple x86_64-unknown-linux-gnu %s -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR +// RUN: %clang_cc1 -fclangir -emit-llvm -triple x86_64-unknown-linux-gnu %s -o %t.ll +// RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM + +typedef unsigned int uint4 __attribute__((ext_vector_type(4))); + + +kernel __attribute__((vec_type_hint(int))) __attribute__((reqd_work_group_size(1,2,4))) void kernel1(int a) {} + +// CIR-DAG: #fn_attr[[KERNEL1:[0-9]*]] = {{.+}}cl.kernel_metadata = #cir.cl.kernel_metadata{{.+}} +// CIR-DAG: cir.func @kernel1{{.+}} extra(#fn_attr[[KERNEL1]]) + +// LLVM-DAG: define{{.*}}@kernel1(i32 {{[^%]*}}%0) {{[^{]+}} !reqd_work_group_size ![[MD1_REQD_WG:[0-9]+]] !vec_type_hint ![[MD1_VEC_TYPE:[0-9]+]] +// LLVM-DAG: [[MD1_VEC_TYPE]] = !{i32 undef, i32 1} +// LLVM-DAG: [[MD1_REQD_WG]] = !{i32 1, i32 2, i32 4} + + +kernel __attribute__((vec_type_hint(uint4))) __attribute__((work_group_size_hint(8,16,32))) void kernel2(int a) {} + +// CIR-DAG: #fn_attr[[KERNEL2:[0-9]*]] = {{.+}}cl.kernel_metadata = #cir.cl.kernel_metadata, vec_type_hint_signedness = 0>{{.+}} +// CIR-DAG: cir.func @kernel2{{.+}} extra(#fn_attr[[KERNEL2]]) + +// LLVM-DAG: define{{.*}}@kernel2(i32 {{[^%]*}}%0) {{[^{]+}} !vec_type_hint ![[MD2_VEC_TYPE:[0-9]+]] !work_group_size_hint ![[MD2_WG_SIZE:[0-9]+]] +// LLVM-DAG: [[MD2_VEC_TYPE]] = !{<4 x i32> undef, i32 0} +// LLVM-DAG: [[MD2_WG_SIZE]] = !{i32 8, i32 16, i32 32} + + +kernel __attribute__((intel_reqd_sub_group_size(8))) void kernel3(int a) {} + +// CIR-DAG: #fn_attr[[KERNEL3:[0-9]*]] = {{.+}}cl.kernel_metadata = #cir.cl.kernel_metadata{{.+}} +// CIR-DAG: cir.func @kernel3{{.+}} extra(#fn_attr[[KERNEL3]]) + +// LLVM-DAG: define{{.*}}@kernel3(i32 {{[^%]*}}%0) {{[^{]+}} !intel_reqd_sub_group_size ![[MD3_INTEL:[0-9]+]] +// LLVM-DAG: [[MD3_INTEL]] = !{i32 8} diff --git a/clang/test/CIR/IR/invalid-opencl-vec-type-hint.cir b/clang/test/CIR/IR/invalid-opencl-vec-type-hint.cir new file mode 100644 index 000000000000..9e57ad793bf8 --- /dev/null +++ b/clang/test/CIR/IR/invalid-opencl-vec-type-hint.cir @@ -0,0 +1,7 @@ +// RUN: cir-opt %s -verify-diagnostics -allow-unregistered-dialect + +// expected-error@+1 {{vec_type_hint must be a type from the CIR or LLVM dialect}} +#fn_attr = #cir.cl.kernel_metadata< + vec_type_hint = !tensor<7xi8>, + vec_type_hint_signedness = 0 +> diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 50ee1d3fed34..45dda0a39e42 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1171,3 +1171,48 @@ cir.func @address_space3(%p : !cir.ptr) { // expected- cir.func @address_space4(%p : !cir.ptr) { // expected-error {{invalid addrspace kind keyword: foobar}} cir.return } + +// ----- + +// expected-error@+1 {{metadata attribute without any field present is invalid}} +#fn_attr = #cir.cl.kernel_metadata<> + +// ----- + +// expected-error@+1 {{work_group_size_hint must have exactly 3 integer elements}} +#fn_attr = #cir.cl.kernel_metadata< + work_group_size_hint = [2 : i32] +> + +// ----- + +// expected-error@+1 {{reqd_work_group_size must have exactly 3 integer elements}} +#fn_attr = #cir.cl.kernel_metadata< + reqd_work_group_size = [3.0 : f32, 1.7 : f32] +> + +// ----- + +// expected-error@+1 {{vec_type_hint_signedness should be present if and only if vec_type_hint is set}} +#fn_attr = #cir.cl.kernel_metadata< + vec_type_hint_signedness = 1 +> + +// ----- + +!s32i = !cir.int + +// expected-error@+1 {{vec_type_hint_signedness should be present if and only if vec_type_hint is set}} +#fn_attr = #cir.cl.kernel_metadata< + vec_type_hint = !s32i +> + +// ----- + +!s32i = !cir.int + +// expected-error@+1 {{vec_type_hint_signedness must match the signedness of the vec_type_hint type}} +#fn_attr = #cir.cl.kernel_metadata< + vec_type_hint = !s32i, + vec_type_hint_signedness = 0 +> From fee4c71b3d2a93cef962c76b90bb9d2ed873cb57 Mon Sep 17 00:00:00 2001 From: ShivaChen <32083954+ShivaChen@users.noreply.github.com> Date: Thu, 11 Jul 2024 03:41:47 +0800 Subject: [PATCH 1678/2301] [CIR] Extend -cir-mlir-scf-prepare to support hoisting loop invariant BinOp (#720) This commit extends the pass to support loop invariant BinOp hoisting as SCF forOp boundary. E.g. // (100 - 1) should be hoisted out of loop. // So the boundary could be input operand to generate SCF forOp. for (int i = 0; i < 100 - 1; ++i) {} --- .../lib/CIR/Dialect/Transforms/SCFPrepare.cpp | 39 +++++++--- clang/test/CIR/Transforms/scf-prepare.cir | 71 +++++++++++++++++++ 2 files changed, 100 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp index de46433dc9a7..19ebf75a1c2c 100644 --- a/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp @@ -147,6 +147,31 @@ struct hoistLoopInvariantInCondBlock : public OpRewritePattern { return true; } + // Return true for loop invariant operation and push it to initOps. + bool isLoopInvariantOp(Operation *op, ForOp forOp, + SmallVector &initOps) const { + if (!op) + return false; + if (isa(op) || isLoopInvariantLoad(op, forOp)) { + initOps.push_back(op); + return true; + } else if (isa(op) && + isLoopInvariantOp(op->getOperand(0).getDefiningOp(), forOp, + initOps) && + isLoopInvariantOp(op->getOperand(1).getDefiningOp(), forOp, + initOps)) { + initOps.push_back(op); + return true; + } else if (isa(op) && + isLoopInvariantOp(op->getOperand(0).getDefiningOp(), forOp, + initOps)) { + initOps.push_back(op); + return true; + } + + return false; + } + LogicalResult matchAndRewrite(ForOp forOp, PatternRewriter &rewriter) const final { auto *cond = &forOp.getCond().front(); @@ -164,16 +189,10 @@ struct hoistLoopInvariantInCondBlock : public OpRewritePattern { Value cmpRhs = loopCmp.getRhs(); auto defOp = cmpRhs.getDefiningOp(); - SmallVector ops; - // Go through the cast if exist. - if (defOp && isa(defOp)) { - ops.push_back(defOp); - defOp = defOp->getOperand(0).getDefiningOp(); - } - if (defOp && - (isa(defOp) || isLoopInvariantLoad(defOp, forOp))) { - ops.push_back(defOp); - for (auto op : reverse(ops)) + SmallVector initOps; + // Collect loop invariant operations and move them before forOp. + if (isLoopInvariantOp(defOp, forOp, initOps)) { + for (auto op : initOps) op->moveBefore(forOp); return success(); } diff --git a/clang/test/CIR/Transforms/scf-prepare.cir b/clang/test/CIR/Transforms/scf-prepare.cir index 91bbdab5a538..063420b1c516 100644 --- a/clang/test/CIR/Transforms/scf-prepare.cir +++ b/clang/test/CIR/Transforms/scf-prepare.cir @@ -137,4 +137,75 @@ module { } cir.return } + + // for (int i = 0; i < 100 - 1; ++i) {} + // + // Check that the loop upper bound operations(100 - 1) will be hoisted out + // of loop. + cir.func @loopInvariantBinOp() { + // CHECK: %[[C100:.*]] = cir.const #cir.int<100> : !s32i + // CHECK: %[[C1:.*]] = cir.const #cir.int<1> : !s32i + // CHECK: %[[UPPER_BOUND:.*]] = cir.binop(sub, %[[C100]], %[[C1]]) nsw : !s32i + // CHECK: cir.for : cond { + + cir.scope { + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + %1 = cir.const #cir.int<0> : !s32i + cir.store %1, %0 : !s32i, !cir.ptr + cir.for : cond { + %2 = cir.load %0 : !cir.ptr, !s32i + %3 = cir.const #cir.int<100> : !s32i + %4 = cir.const #cir.int<1> : !s32i + %5 = cir.binop(sub, %3, %4) nsw : !s32i + %6 = cir.cmp(lt, %2, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.condition(%7) + } body { + cir.scope { + } + cir.yield + } step { + %2 = cir.load %0 : !cir.ptr, !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, !cir.ptr + cir.yield + } + } + cir.return + } + + // It's a hand-writing test case to check that the operation has block + // argument as operand won't be hoisted out of loop. + // Note that the current codegen will store the argument first and then + // load the value to user. This test case is manually created to check + // that the hoisting pass won't break when encounter block argument. + cir.func @loopInvariantBinOp_blockArg(%arg0: !s32i) { + // CHECK: cir.for : cond { + // CHECK: %[[C100:.*]] = cir.const #cir.int<100> : !s32i + // CHECK: %[[UPPER_BOUND:.*]] = cir.binop(sub, %[[C100]], %arg0) nsw : !s32i + + cir.scope { + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + %1 = cir.const #cir.int<0> : !s32i + cir.store %1, %0 : !s32i, !cir.ptr + cir.for : cond { + %2 = cir.load %0 : !cir.ptr, !s32i + %3 = cir.const #cir.int<100> : !s32i + %5 = cir.binop(sub, %3, %arg0) nsw : !s32i + %6 = cir.cmp(lt, %2, %5) : !s32i, !s32i + %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool + cir.condition(%7) + } body { + cir.scope { + } + cir.yield + } step { + %2 = cir.load %0 : !cir.ptr, !s32i + %3 = cir.unary(inc, %2) : !s32i, !s32i + cir.store %3, %0 : !s32i, !cir.ptr + cir.yield + } + } + cir.return + } } From ea3402a2558e13697f2e551075a9e8fd2768cb0f Mon Sep 17 00:00:00 2001 From: ShivaChen <32083954+ShivaChen@users.noreply.github.com> Date: Thu, 11 Jul 2024 04:31:03 +0800 Subject: [PATCH 1679/2301] [CIR][ThroughMLIR] Lower CIR IV load with SCF IV move operation (#729) Previously, when lowering induction variable in forOp, we removed the IV load and replaced the users with SCF.IV. The CIR IV users might still CIR operations during lowering forOp. It caused the issue that CIR operation contained SCF.IV as operand which is MLIR integer type instead CIR type. This comment lower CIR load IV_ADDR with ARITH addi SCF.IV, 0 So SCF.IV can be propagated by OpAdaptor when lowering individual IV users. This simplifies the lowering and fixes the issue. The redundant arith.addi can be removed by later MLIR passes. --- .../ThroughMLIR/LowerCIRLoopToSCF.cpp | 28 +-- clang/test/CIR/Lowering/ThroughMLIR/for.cir | 237 ------------------ clang/test/CIR/Lowering/ThroughMLIR/for.cpp | 103 ++++++++ 3 files changed, 111 insertions(+), 257 deletions(-) delete mode 100644 clang/test/CIR/Lowering/ThroughMLIR/for.cir create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/for.cpp diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp index f308076ef62c..4aee2701c86a 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp @@ -204,19 +204,6 @@ void SCFLoop::analysis() { assert(upperBound && "can't find loop upper bound"); } -// Return true if op operation is in the loop body. -static bool isInLoopBody(mlir::Operation *op) { - mlir::Operation *parentOp = op->getParentOp(); - if (!parentOp) - return false; - if (isa(parentOp)) - return true; - auto forOp = dyn_cast(parentOp); - if (forOp && (&forOp.getBody() == op->getParentRegion())) - return true; - return false; -} - void SCFLoop::transferToSCFForOp() { auto ub = getUpperBound(); auto lb = getLowerBound(); @@ -236,12 +223,13 @@ void SCFLoop::transferToSCFForOp() { "Not support lowering loop with break, continue or if yet"); // Replace the IV usage to scf loop induction variable. if (isIVLoad(op, IVAddr)) { - auto newIV = scfForOp.getInductionVar(); - op->getResult(0).replaceAllUsesWith(newIV); - // Only erase the IV load in the loop body because all the operations - // in loop step and condition regions will be erased. - if (isInLoopBody(op)) - rewriter->eraseOp(op); + // Replace CIR IV load with arith.addi scf.IV, 0. + // The replacement makes the SCF IV can be automatically propogated + // by OpAdaptor for individual IV user lowering. + // The redundant arith.addi can be removed by later MLIR passes. + rewriter->setInsertionPoint(op); + auto newIV = plusConstant(scfForOp.getInductionVar(), loc, 0); + rewriter->replaceOp(op, newIV.getDefiningOp()); } return mlir::WalkResult::advance(); }); @@ -318,4 +306,4 @@ void populateCIRLoopToSCFConversionPatterns(mlir::RewritePatternSet &patterns, converter, patterns.getContext()); } -} // namespace cir \ No newline at end of file +} // namespace cir diff --git a/clang/test/CIR/Lowering/ThroughMLIR/for.cir b/clang/test/CIR/Lowering/ThroughMLIR/for.cir deleted file mode 100644 index 48d5f2d709f8..000000000000 --- a/clang/test/CIR/Lowering/ThroughMLIR/for.cir +++ /dev/null @@ -1,237 +0,0 @@ -// RUN: cir-opt %s -cir-to-mlir --canonicalize | FileCheck %s -check-prefix=MLIR -// RUN: cir-opt %s -cir-to-mlir --canonicalize -cir-mlir-to-llvm | mlir-translate -mlir-to-llvmir | FileCheck %s -check-prefix=LLVM - -// Note that the following CIR been produced after -cir-mlir-scf-prepare. -// So the loop invariant in the condition block have been hoisted out of loop. - -!s32i = !cir.int -module { - cir.global external @a = #cir.zero : !cir.array - - // for (int i = 0; i < 100; ++i) - // a[i] = 3; - // - // MLIR-LABEL: func.func @constantLoopBound() - // LLVM-LABEL: define void @constantLoopBound() - cir.func @constantLoopBound() { - // MLIR: %[[C3:.*]] = arith.constant 3 : i32 - // MLIR: %[[C1:.*]] = arith.constant 1 : i32 - // MLIR: %[[C100:.*]] = arith.constant 100 : i32 - // MLIR: %[[C0:.*]] = arith.constant 0 : i32 - // MLIR: scf.for %[[I:.*]] = %[[C0]] to %[[C100]] step %[[C1]] : i32 { - // MLIR: %[[BASE:.*]] = memref.get_global @a : memref<100xi32> - // MLIR: %[[INDEX:.*]] = arith.index_cast %[[I]] : i32 to index - // MLIR: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<100xi32> - // MLIR: } - - // LLVM: %[[I:.*]] = phi i32 [ %[[I_INC:.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[PREHEADER:.*]] ] - // LLVM: %[[COND:.*]] = icmp slt i32 %[[I]], 100 - // LLVM: br i1 %[[COND]], label %[[LOOP_LATCH]], label %[[LOOP_EXIT:.*]] - // LLVM: %[[I64:.*]] = sext i32 %[[I]] to i64 - // LLVM: %[[ADDR:.*]] = getelementptr i32, ptr @a, i64 %[[I64]] - // LLVM: store i32 3, ptr %[[ADDR]], align 4 - // LLVM: %[[I_INC]] = add i32 %[[I]], 1 - // LLVM: br label %[[LOOP_HEADER:.*]] - - cir.scope { - %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %1 = cir.const #cir.int<0> : !s32i - cir.store %1, %0 : !s32i, !cir.ptr - %2 = cir.const #cir.int<100> : !s32i - cir.for : cond { - %3 = cir.load %0 : !cir.ptr, !s32i - %4 = cir.cmp(lt, %3, %2) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) - } body { - %3 = cir.const #cir.int<3> : !s32i - %4 = cir.get_global @a : !cir.ptr> - %5 = cir.load %0 : !cir.ptr, !s32i - %6 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr - %7 = cir.ptr_stride(%6 : !cir.ptr, %5 : !s32i), !cir.ptr - cir.store %3, %7 : !s32i, !cir.ptr - cir.yield - } step { - %3 = cir.load %0 : !cir.ptr, !s32i - %4 = cir.unary(inc, %3) : !s32i, !s32i - cir.store %4, %0 : !s32i, !cir.ptr - cir.yield - } - } - cir.return - } - - // for (int i = 0; i <= 100; ++i) - // a[i] = 3; - // - // MLIR-LABEL: func.func @constantLoopBound_LE() - // LLVM-LABEL: define void @constantLoopBound_LE() - cir.func @constantLoopBound_LE() { - // MLIR: %[[C3:.*]] = arith.constant 3 : i32 - // MLIR: %[[C1:.*]] = arith.constant 1 : i32 - // MLIR: %[[C0:.*]] = arith.constant 0 : i32 - // MLIR: %[[C101:.*]] = arith.constant 101 : i32 - // MLIR: scf.for %[[I:.*]] = %[[C0]] to %[[C101]] step %[[C1]] : i32 { - // MLIR: %[[BASE:.*]] = memref.get_global @a : memref<100xi32> - // MLIR: %[[INDEX:.*]] = arith.index_cast %[[I]] : i32 to index - // MLIR: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<100xi32> - // MLIR: } - - // LLVM: %[[I:.*]] = phi i32 [ %[[I_INC:.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[PREHEADER:.*]] ] - // LLVM: %[[COND:.*]] = icmp slt i32 %[[I]], 101 - // LLVM: br i1 %[[COND]], label %[[LOOP_LATCH]], label %[[LOOP_EXIT:.*]] - // LLVM: %[[I64:.*]] = sext i32 %[[I]] to i64 - // LLVM: %[[ADDR:.*]] = getelementptr i32, ptr @a, i64 %[[I64]] - // LLVM: store i32 3, ptr %[[ADDR]], align 4 - // LLVM: %[[I_INC]] = add i32 %[[I]], 1 - // LLVM: br label %[[LOOP_HEADER:.*]] - - cir.scope { - %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %1 = cir.const #cir.int<0> : !s32i - cir.store %1, %0 : !s32i, !cir.ptr - %2 = cir.const #cir.int<100> : !s32i - cir.for : cond { - %3 = cir.load %0 : !cir.ptr, !s32i - %4 = cir.cmp(le, %3, %2) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) - } body { - %3 = cir.const #cir.int<3> : !s32i - %4 = cir.get_global @a : !cir.ptr> - %5 = cir.load %0 : !cir.ptr, !s32i - %6 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr - %7 = cir.ptr_stride(%6 : !cir.ptr, %5 : !s32i), !cir.ptr - cir.store %3, %7 : !s32i, !cir.ptr - cir.yield - } step { - %3 = cir.load %0 : !cir.ptr, !s32i - %4 = cir.unary(inc, %3) : !s32i, !s32i - cir.store %4, %0 : !s32i, !cir.ptr - cir.yield - } - } - cir.return - } - - // for (int i = l; i < u; ++i) - // a[i] = 3; - // - // MLIR-LABEL: func.func @variableLoopBound(%arg0: i32, %arg1: i32) - // LLVM-LABEL: define void @variableLoopBound(i32 %0, i32 %1) - cir.func @variableLoopBound(%arg0: !s32i, %arg1: !s32i) { - // MLIR: %[[C3:.*]] = arith.constant 3 : i32 - // MLIR: %[[C1:.*]] = arith.constant 1 : i32 - // MLIR: memref.store %arg0, %alloca[] : memref - // MLIR: memref.store %arg1, %alloca_0[] : memref - // MLIR: %[[LOWER:.*]] = memref.load %alloca[] : memref - // MLIR: %[[UPPER:.*]] = memref.load %alloca_0[] : memref - // MLIR: scf.for %[[I:.*]] = %[[LOWER]] to %[[UPPER]] step %[[C1]] : i32 { - // MLIR: %[[BASE:.*]] = memref.get_global @a : memref<100xi32> - // MLIR: %[[INDEX:.*]] = arith.index_cast %[[I]] : i32 to index - // MLIR: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<100xi32> - // MLIR: } - - // LLVM: %[[I:.*]] = phi i32 [ %[[I_INC:.*]], %[[LOOP_LATCH:.*]] ], [ %[[LOWER:.*]], %[[PREHEADER:.*]] ] - // LLVM: %[[COND:.*]] = icmp slt i32 %[[I]], %[[UPPER:.*]] - // LLVM: br i1 %[[COND]], label %[[LOOP_LATCH]], label %[[LOOP_EXIT:.*]] - // LLVM: %[[I64:.*]] = sext i32 %[[I]] to i64 - // LLVM: %[[ADDR:.*]] = getelementptr i32, ptr @a, i64 %[[I64]] - // LLVM: store i32 3, ptr %[[ADDR]], align 4 - // LLVM: %[[I_INC]] = add i32 %[[I]], 1 - // LLVM: br label %[[LOOP_HEADER:.*]] - - %0 = cir.alloca !s32i, !cir.ptr, ["l", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, !cir.ptr, ["u", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, !cir.ptr - cir.store %arg1, %1 : !s32i, !cir.ptr - cir.scope { - %2 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %3 = cir.load %0 : !cir.ptr, !s32i - cir.store %3, %2 : !s32i, !cir.ptr - %4 = cir.load %1 : !cir.ptr, !s32i - cir.for : cond { - %5 = cir.load %2 : !cir.ptr, !s32i - %6 = cir.cmp(lt, %5, %4) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.condition(%7) - } body { - %5 = cir.const #cir.int<3> : !s32i - %6 = cir.get_global @a : !cir.ptr> - %7 = cir.load %2 : !cir.ptr, !s32i - %8 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr - %9 = cir.ptr_stride(%8 : !cir.ptr, %7 : !s32i), !cir.ptr - cir.store %5, %9 : !s32i, !cir.ptr - cir.yield - } step { - %5 = cir.load %2 : !cir.ptr, !s32i - %6 = cir.unary(inc, %5) : !s32i, !s32i - cir.store %6, %2 : !s32i, !cir.ptr - cir.yield - } - } - cir.return - } - - // for (int i = l; i <= u; i+=4) - // a[i] = 3; - // - // MLIR-LABEL: func.func @variableLoopBound_LE(%arg0: i32, %arg1: i32) - // LLVM-LABEL: define void @variableLoopBound_LE(i32 %0, i32 %1) - cir.func @variableLoopBound_LE(%arg0: !s32i, %arg1: !s32i) { - // MLIR: %[[C3:.*]] = arith.constant 3 : i32 - // MLIR: %[[C4:.*]] = arith.constant 4 : i32 - // MLIR: %[[C1:.*]] = arith.constant 1 : i32 - // MLIR: memref.store %arg0, %alloca[] : memref - // MLIR: memref.store %arg1, %alloca_0[] : memref - // MLIR: %[[LOWER:.*]] = memref.load %alloca[] : memref - // MLIR: %[[UPPER_DEC_1:.*]] = memref.load %alloca_0[] : memref - // MLIR: %[[UPPER:.*]] = arith.addi %[[UPPER_DEC_1]], %[[C1]] : i32 - // MLIR: scf.for %[[I:.*]] = %[[LOWER]] to %[[UPPER]] step %[[C4]] : i32 { - // MLIR: %[[BASE:.*]] = memref.get_global @a : memref<100xi32> - // MLIR: %[[INDEX:.*]] = arith.index_cast %[[I]] : i32 to index - // MLIR: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<100xi32> - // MLIR: } - - // LLVM: %[[I:.*]] = phi i32 [ %[[I_INC:.*]], %[[LOOP_LATCH:.*]] ], [ %[[LOWER:.*]], %[[PREHEADER:.*]] ] - // LLVM: %[[COND:.*]] = icmp slt i32 %[[I]], %[[UPPER:.*]] - // LLVM: br i1 %[[COND]], label %[[LOOP_LATCH]], label %[[LOOP_EXIT:.*]] - // LLVM: %[[I64:.*]] = sext i32 %[[I]] to i64 - // LLVM: %[[ADDR:.*]] = getelementptr i32, ptr @a, i64 %[[I64]] - // LLVM: store i32 3, ptr %[[ADDR]], align 4 - // LLVM: %[[I_INC]] = add i32 %[[I]], 4 - // LLVM: br label %[[LOOP_HEADER:.*]] - - %0 = cir.alloca !s32i, !cir.ptr, ["l", init] {alignment = 4 : i64} - %1 = cir.alloca !s32i, !cir.ptr, ["u", init] {alignment = 4 : i64} - cir.store %arg0, %0 : !s32i, !cir.ptr - cir.store %arg1, %1 : !s32i, !cir.ptr - cir.scope { - %2 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} - %3 = cir.load %0 : !cir.ptr, !s32i - cir.store %3, %2 : !s32i, !cir.ptr - %4 = cir.load %1 : !cir.ptr, !s32i - cir.for : cond { - %5 = cir.load %2 : !cir.ptr, !s32i - %6 = cir.cmp(le, %5, %4) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.condition(%7) - } body { - %5 = cir.const #cir.int<3> : !s32i - %6 = cir.get_global @a : !cir.ptr> - %7 = cir.load %2 : !cir.ptr, !s32i - %8 = cir.cast(array_to_ptrdecay, %6 : !cir.ptr>), !cir.ptr - %9 = cir.ptr_stride(%8 : !cir.ptr, %7 : !s32i), !cir.ptr - cir.store %5, %9 : !s32i, !cir.ptr - cir.yield - } step { - %5 = cir.const #cir.int<4> : !s32i - %6 = cir.load %2 : !cir.ptr, !s32i - %7 = cir.binop(add, %6, %5) : !s32i - cir.store %7, %2 : !s32i, !cir.ptr - cir.yield - } - } - cir.return - } -} diff --git a/clang/test/CIR/Lowering/ThroughMLIR/for.cpp b/clang/test/CIR/Lowering/ThroughMLIR/for.cpp new file mode 100644 index 000000000000..3ed99718369a --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/for.cpp @@ -0,0 +1,103 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +int a[101], b[101]; + +void constantLoopBound() { + for (int i = 0; i < 100; ++i) + a[i] = 3; +} +// CHECK-LABEL: func.func @_Z17constantLoopBoundv() { +// CHECK: %[[C0:.*]] = arith.constant 0 : i32 +// CHECK: %[[C100:.*]] = arith.constant 100 : i32 +// CHECK: %[[C1:.*]] = arith.constant 1 : i32 +// CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[C100]] step %[[C1]] : i32 { +// CHECK: %[[C3:.*]] = arith.constant 3 : i32 +// CHECK: %[[BASE:.*]] = memref.get_global @a : memref<101xi32> +// CHECK: %[[C0_i32:.*]] = arith.constant 0 : i32 +// CHECK: %[[IV:.*]] = arith.addi %[[I]], %[[C0_i32]] : i32 +// CHECK: %[[INDEX:.*]] = arith.index_cast %[[IV]] : i32 to index +// CHECK: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<101xi32> +// CHECK: } + +void constantLoopBound_LE() { + for (int i = 0; i <= 100; ++i) + a[i] = 3; +} +// CHECK-LABEL: func.func @_Z20constantLoopBound_LEv() { +// CHECK: %[[C0:.*]] = arith.constant 0 : i32 +// CHECK: %[[C100:.*]] = arith.constant 100 : i32 +// CHECK: %[[C1:.*]] = arith.constant 1 : i32 +// CHECK: %[[C101:.*]] = arith.addi %c100_i32, %c1_i32 : i32 +// CHECK: %[[C1_STEP:.*]] = arith.constant 1 : i32 +// CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[C101]] step %[[C1_STEP]] : i32 { +// CHECK: %[[C3:.*]] = arith.constant 3 : i32 +// CHECK: %[[BASE:.*]] = memref.get_global @a : memref<101xi32> +// CHECK: %[[C0_i32:.*]] = arith.constant 0 : i32 +// CHECK: %[[IV:.*]] = arith.addi %[[I]], %[[C0_i32]] : i32 +// CHECK: %[[INDEX:.*]] = arith.index_cast %[[IV]] : i32 to index +// CHECK: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<101xi32> +// CHECK: } + +void variableLoopBound(int l, int u) { + for (int i = l; i < u; ++i) + a[i] = 3; +} +// CHECK-LABEL: func.func @_Z17variableLoopBoundii +// CHECK: memref.store %arg0, %alloca[] : memref +// CHECK: memref.store %arg1, %alloca_0[] : memref +// CHECK: %[[LOWER:.*]] = memref.load %alloca[] : memref +// CHECK: %[[UPPER:.*]] = memref.load %alloca_0[] : memref +// CHECK: %[[C1:.*]] = arith.constant 1 : i32 +// CHECK: scf.for %[[I:.*]] = %[[LOWER]] to %[[UPPER]] step %[[C1]] : i32 { +// CHECK: %[[C3:.*]] = arith.constant 3 : i32 +// CHECK: %[[BASE:.*]] = memref.get_global @a : memref<101xi32> +// CHECK: %[[C0:.*]] = arith.constant 0 : i32 +// CHECK: %[[IV:.*]] = arith.addi %[[I]], %[[C0]] : i32 +// CHECK: %[[INDEX:.*]] = arith.index_cast %[[IV]] : i32 to index +// CHECK: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<101xi32> +// CHECK: } + +void ariableLoopBound_LE(int l, int u) { + for (int i = l; i <= u; i+=4) + a[i] = 3; +} +// CHECK-LABEL: func.func @_Z19ariableLoopBound_LEii +// CHECK: memref.store %arg0, %alloca[] : memref +// CHECK: memref.store %arg1, %alloca_0[] : memref +// CHECK: %[[LOWER:.*]] = memref.load %alloca[] : memref +// CHECK: %[[UPPER_DEC_1:.*]] = memref.load %alloca_0[] : memref +// CHECK: %[[C1:.*]] = arith.constant 1 : i32 +// CHECK: %[[UPPER:.*]] = arith.addi %[[UPPER_DEC_1]], %[[C1]] : i32 +// CHECK: %[[C4:.*]] = arith.constant 4 : i32 +// CHECK: scf.for %[[I:.*]] = %[[LOWER]] to %[[UPPER]] step %[[C4]] : i32 { +// CHECK: %[[C3:.*]] = arith.constant 3 : i32 +// CHECK: %[[BASE:.*]] = memref.get_global @a : memref<101xi32> +// CHECK: %[[C0:.*]] = arith.constant 0 : i32 +// CHECK: %[[IV:.*]] = arith.addi %[[I]], %[[C0]] : i32 +// CHECK: %[[INDEX:.*]] = arith.index_cast %[[IV]] : i32 to index +// CHECK: memref.store %[[C3]], %[[BASE]][%[[INDEX]]] : memref<101xi32> +// CHECK: } + +void incArray() { + for (int i = 0; i < 100; ++i) + a[i] += b[i]; +} +// CHECK-LABEL: func.func @_Z8incArrayv() { +// CHECK: %[[C0:.*]] = arith.constant 0 : i32 +// CHECK: %[[C100:.*]] = arith.constant 100 : i32 +// CHECK: %[[C1:.*]] = arith.constant 1 : i32 +// CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[C100]] step %[[C1]] : i32 { +// CHECK: %[[B:.*]] = memref.get_global @b : memref<101xi32> +// CHECK: %[[C0_2:.*]] = arith.constant 0 : i32 +// CHECK: %[[IV2:.*]] = arith.addi %[[I]], %[[C0_2]] : i32 +// CHECK: %[[INDEX_2:.*]] = arith.index_cast %[[IV2]] : i32 to index +// CHECK: %[[B_VALUE:.*]] = memref.load %[[B]][%[[INDEX_2]]] : memref<101xi32> +// CHECK: %[[A:.*]] = memref.get_global @a : memref<101xi32> +// CHECK: %[[C0_1:.*]] = arith.constant 0 : i32 +// CHECK: %[[IV1:.*]] = arith.addi %[[I]], %[[C0_1]] : i32 +// CHECK: %[[INDEX_1:.*]] = arith.index_cast %[[IV1]] : i32 to index +// CHECK: %[[A_VALUE:.*]] = memref.load %[[A]][%[[INDEX_1]]] : memref<101xi32> +// CHECK: %[[SUM:.*]] = arith.addi %[[A_VALUE]], %[[B_VALUE]] : i32 +// CHECK: memref.store %[[SUM]], %[[A]][%[[INDEX_1]]] : memref<101xi32> +// CHECK: } From 7490e1e5362417e664f88a19d40884ab0121ed1c Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 12 Jul 2024 05:53:40 +0800 Subject: [PATCH 1680/2301] [CIR][Fix] FP builtins should lower directly to LLVM builtins (#670) LLVM lowering for the following operations is introduced in #616 and #651: `cos`, `exp`, `exp2`, `log`, `log10`, `log2`, `sin`, `sqrt`, `fmod`, and `pow`. However, they are not lowered to their corresponding LLVM intrinsics; instead they are transformed to libc calls during lowering prepare. This does not match the upstream behavior. This PR tries to correct this mistake. It makes all CIR FP intrinsic ops lower to their corresponding LLVM intrinsics (`fmod` is a special case and it is lowered to the `frem` LLVM instruction). --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 20 +++ .../Dialect/Transforms/LoweringPrepare.cpp | 71 ----------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 46 ++++++- .../test/CIR/CodeGen/builtin-floating-point.c | 120 +++++++++--------- .../test/CIR/Lowering/builtin-binary-fp2fp.c | 64 +++++++++- 5 files changed, 184 insertions(+), 137 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 97a0c08ef3c6..e4c8201d2dca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -448,6 +448,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_cosf16: case Builtin::BI__builtin_cosl: case Builtin::BI__builtin_cosf128: + assert(getContext().getLangOpts().FastMath && + "cir.cos is only expected under -ffast-math"); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIexp: @@ -458,6 +460,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_expf16: case Builtin::BI__builtin_expl: case Builtin::BI__builtin_expf128: + assert(getContext().getLangOpts().FastMath && + "cir.exp is only expected under -ffast-math"); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIexp2: @@ -468,6 +472,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_exp2f16: case Builtin::BI__builtin_exp2l: case Builtin::BI__builtin_exp2f128: + assert(getContext().getLangOpts().FastMath && + "cir.exp2 is only expected under -ffast-math"); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIfabs: @@ -534,6 +540,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmod: case Builtin::BI__builtin_fmodf: case Builtin::BI__builtin_fmodl: + assert(getContext().getLangOpts().FastMath && + "cir.fmod is only expected under -ffast-math"); return buildBinaryFPBuiltin(*this, *E); case Builtin::BI__builtin_fmodf16: @@ -548,6 +556,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_logf16: case Builtin::BI__builtin_logl: case Builtin::BI__builtin_logf128: + assert(getContext().getLangOpts().FastMath && + "cir.log is only expected under -ffast-math"); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlog10: @@ -558,6 +568,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log10f16: case Builtin::BI__builtin_log10l: case Builtin::BI__builtin_log10f128: + assert(getContext().getLangOpts().FastMath && + "cir.log10 is only expected under -ffast-math"); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlog2: @@ -568,6 +580,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log2f16: case Builtin::BI__builtin_log2l: case Builtin::BI__builtin_log2f128: + assert(getContext().getLangOpts().FastMath && + "cir.log2 is only expected under -ffast-math"); return buildUnaryFPBuiltin(*this, *E); case Builtin::BInearbyint: @@ -585,6 +599,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_pow: case Builtin::BI__builtin_powf: case Builtin::BI__builtin_powl: + assert(getContext().getLangOpts().FastMath && + "cir.pow is only expected under -ffast-math"); return RValue::get( buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); @@ -620,6 +636,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sinf16: case Builtin::BI__builtin_sinl: case Builtin::BI__builtin_sinf128: + assert(getContext().getLangOpts().FastMath && + "cir.sin is only expected under -ffast-math"); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIsqrt: @@ -630,6 +648,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sqrtf16: case Builtin::BI__builtin_sqrtl: case Builtin::BI__builtin_sqrtf128: + assert(getContext().getLangOpts().FastMath && + "cir.sqrt is only expected under -ffast-math"); return buildUnaryFPBuiltin(*this, *E); case Builtin::BItrunc: diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 49f05d4187f3..1172c7332ce2 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -24,7 +24,6 @@ #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" -#include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Path.h" @@ -71,7 +70,6 @@ struct LoweringPreparePass : public LoweringPrepareBase { void runOnOperation() override; void runOnOp(Operation *op); - void runOnMathOp(Operation *op); void lowerThreeWayCmpOp(CmpThreeWayOp op); void lowerVAArgOp(VAArgOp op); void lowerGlobalOp(GlobalOp op); @@ -650,69 +648,6 @@ void LoweringPreparePass::runOnOp(Operation *op) { } } -void LoweringPreparePass::runOnMathOp(Operation *op) { - struct MathOpFunctionNames { - llvm::StringRef floatVer; - llvm::StringRef doubleVer; - llvm::StringRef longDoubleVer; - }; - - mlir::Type ty = op->getResult(0).getType(); - - MathOpFunctionNames rtFuncNames = - llvm::TypeSwitch(op) - .Case([](auto) { - return MathOpFunctionNames{"fmodf", "fmod", "fmodl"}; - }) - .Case( - [](auto) { return MathOpFunctionNames{"powf", "pow", "powl"}; }) - .Case( - [](auto) { return MathOpFunctionNames{"cosf", "cos", "cosl"}; }) - .Case( - [](auto) { return MathOpFunctionNames{"expf", "exp", "expl"}; }) - .Case([](auto) { - return MathOpFunctionNames{"exp2f", "exp2", "exp2l"}; - }) - .Case( - [](auto) { return MathOpFunctionNames{"logf", "log", "logl"}; }) - .Case([](auto) { - return MathOpFunctionNames{"log10f", "log10", "log10l"}; - }) - .Case([](auto) { - return MathOpFunctionNames{"log2f", "log2", "log2l"}; - }) - .Case( - [](auto) { return MathOpFunctionNames{"sinf", "sin", "sinl"}; }) - .Case([](auto) { - return MathOpFunctionNames{"sqrtf", "sqrt", "sqrtl"}; - }); - llvm::StringRef rtFuncName = llvm::TypeSwitch(ty) - .Case([&](auto) { - return rtFuncNames.floatVer; - }) - .Case([&](auto) { - return rtFuncNames.doubleVer; - }) - .Case([&](auto) { - return rtFuncNames.longDoubleVer; - }); - - CIRBaseBuilderTy builder(*theModule.getContext()); - builder.setInsertionPointToStart(theModule.getBody()); - - llvm::SmallVector operandTypes(op->getNumOperands(), ty); - auto rtFuncTy = - mlir::cir::FuncType::get(operandTypes, op->getResult(0).getType()); - FuncOp rtFunc = - buildRuntimeFunction(builder, rtFuncName, op->getLoc(), rtFuncTy); - - builder.setInsertionPointAfter(op); - auto call = builder.createCallOp(op->getLoc(), rtFunc, op->getOperands()); - - op->replaceAllUsesWith(call); - op->erase(); -} - void LoweringPreparePass::runOnOperation() { assert(astCtx && "Missing ASTContext, please construct with the right ctor"); auto *op = getOperation(); @@ -721,22 +656,16 @@ void LoweringPreparePass::runOnOperation() { } SmallVector opsToTransform; - SmallVector mathOpsToTransform; op->walk([&](Operation *op) { if (isa( op)) opsToTransform.push_back(op); - else if (isa(op)) - mathOpsToTransform.push_back(op); }); for (auto *o : opsToTransform) runOnOp(o); - for (auto *o : mathOpsToTransform) - runOnMathOp(o); buildCXXGlobalInitFunc(); buildGlobalCtorDtorList(); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8a7df1c6ce31..9c2a01c7f6ea 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3396,10 +3396,22 @@ class CIRUnaryFPBuiltinOpLowering : public mlir::OpConversionPattern { using CIRCeilOpLowering = CIRUnaryFPBuiltinOpLowering; +using CIRCosOpLowering = + CIRUnaryFPBuiltinOpLowering; +using CIRExpOpLowering = + CIRUnaryFPBuiltinOpLowering; +using CIRExp2OpLowering = + CIRUnaryFPBuiltinOpLowering; using CIRFloorOpLowering = CIRUnaryFPBuiltinOpLowering; using CIRFabsOpLowering = CIRUnaryFPBuiltinOpLowering; +using CIRLogOpLowering = + CIRUnaryFPBuiltinOpLowering; +using CIRLog10OpLowering = + CIRUnaryFPBuiltinOpLowering; +using CIRLog2OpLowering = + CIRUnaryFPBuiltinOpLowering; using CIRNearbyintOpLowering = CIRUnaryFPBuiltinOpLowering; @@ -3407,6 +3419,10 @@ using CIRRintOpLowering = CIRUnaryFPBuiltinOpLowering; using CIRRoundOpLowering = CIRUnaryFPBuiltinOpLowering; +using CIRSinOpLowering = + CIRUnaryFPBuiltinOpLowering; +using CIRSqrtOpLowering = + CIRUnaryFPBuiltinOpLowering; using CIRTruncOpLowering = CIRUnaryFPBuiltinOpLowering; @@ -3443,6 +3459,24 @@ using CIRFMaxOpLowering = CIRBinaryFPToFPBuiltinOpLowering; using CIRFMinOpLowering = CIRBinaryFPToFPBuiltinOpLowering; +using CIRPowOpLowering = + CIRBinaryFPToFPBuiltinOpLowering; + +// cir.fmod is special. Instead of lowering it to an intrinsic call, lower it to +// the frem LLVM instruction. +class CIRFModOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::FModOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = this->getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, resTy, adaptor.getLhs(), + adaptor.getRhs()); + return mlir::success(); + } +}; class CIRClearCacheOpLowering : public mlir::OpConversionPattern { @@ -3490,11 +3524,13 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, CIRLroundOpLowering, CIRLLroundOpLowering, CIRLrintOpLowering, CIRLLrintOpLowering, - CIRCeilOpLowering, CIRFloorOpLowering, CIRFAbsOpLowering, - CIRNearbyintOpLowering, CIRRintOpLowering, CIRRoundOpLowering, - CIRTruncOpLowering, CIRCopysignOpLowering, CIRFMaxOpLowering, - CIRFMinOpLowering, CIRClearCacheOpLowering>(converter, - patterns.getContext()); + CIRCeilOpLowering, CIRCosOpLowering, CIRExpOpLowering, CIRExp2OpLowering, + CIRFloorOpLowering, CIRFAbsOpLowering, CIRLogOpLowering, + CIRLog10OpLowering, CIRLog2OpLowering, CIRNearbyintOpLowering, + CIRRintOpLowering, CIRRoundOpLowering, CIRSinOpLowering, + CIRSqrtOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, + CIRFModOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering, CIRPowOpLowering, + CIRClearCacheOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/builtin-floating-point.c b/clang/test/CIR/CodeGen/builtin-floating-point.c index 51e7a1a6a6ef..84bd60c06ac2 100644 --- a/clang/test/CIR/CodeGen/builtin-floating-point.c +++ b/clang/test/CIR/CodeGen/builtin-floating-point.c @@ -351,7 +351,7 @@ float my_cosf(float f) { // CHECK: {{.+}} = cir.cos {{.+}} : !cir.float // LLVM: define float @my_cosf(float %0) - // LLVM: %{{.+}} = call float @cosf(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.cos.f32(float %{{.+}}) // LLVM: } } @@ -361,7 +361,7 @@ double my_cos(double f) { // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double // LLVM: define double @my_cos(double %0) - // LLVM: %{{.+}} = call double @cos(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.cos.f64(double %{{.+}}) // LLVM: } } @@ -372,7 +372,7 @@ long double my_cosl(long double f) { // AARCH64: {{.+}} = cir.cos {{.+}} : !cir.long_double // LLVM: define x86_fp80 @my_cosl(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @cosl(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.cos.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -386,7 +386,7 @@ float call_cosf(float f) { // CHECK: {{.+}} = cir.cos {{.+}} : !cir.float // LLVM: define float @call_cosf(float %0) - // LLVM: %{{.+}} = call float @cosf(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.cos.f32(float %{{.+}}) // LLVM: } } @@ -396,7 +396,7 @@ double call_cos(double f) { // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double // LLVM: define double @call_cos(double %0) - // LLVM: %{{.+}} = call double @cos(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.cos.f64(double %{{.+}}) // LLVM: } } @@ -407,7 +407,7 @@ long double call_cosl(long double f) { // AARCH64: {{.+}} = cir.cos {{.+}} : !cir.long_double // LLVM: define x86_fp80 @call_cosl(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @cosl(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.cos.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -419,7 +419,7 @@ float my_expf(float f) { // CHECK: {{.+}} = cir.exp {{.+}} : !cir.float // LLVM: define float @my_expf(float %0) - // LLVM: %{{.+}} = call float @expf(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.exp.f32(float %{{.+}}) // LLVM: } } @@ -429,7 +429,7 @@ double my_exp(double f) { // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double // LLVM: define double @my_exp(double %0) - // LLVM: %{{.+}} = call double @exp(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.exp.f64(double %{{.+}}) // LLVM: } } @@ -440,7 +440,7 @@ long double my_expl(long double f) { // AARCH64: {{.+}} = cir.exp {{.+}} : !cir.long_double // LLVM: define x86_fp80 @my_expl(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @expl(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.exp.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -454,7 +454,7 @@ float call_expf(float f) { // CHECK: {{.+}} = cir.exp {{.+}} : !cir.float // LLVM: define float @call_expf(float %0) - // LLVM: %{{.+}} = call float @expf(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.exp.f32(float %{{.+}}) // LLVM: } } @@ -464,7 +464,7 @@ double call_exp(double f) { // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double // LLVM: define double @call_exp(double %0) - // LLVM: %{{.+}} = call double @exp(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.exp.f64(double %{{.+}}) // LLVM: } } @@ -475,7 +475,7 @@ long double call_expl(long double f) { // AARCH64: {{.+}} = cir.exp {{.+}} : !cir.long_double // LLVM: define x86_fp80 @call_expl(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @expl(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.exp.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -487,7 +487,7 @@ float my_exp2f(float f) { // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.float // LLVM: define float @my_exp2f(float %0) - // LLVM: %{{.+}} = call float @exp2f(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.exp2.f32(float %{{.+}}) // LLVM: } } @@ -497,7 +497,7 @@ double my_exp2(double f) { // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double // LLVM: define double @my_exp2(double %0) - // LLVM: %{{.+}} = call double @exp2(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.exp2.f64(double %{{.+}}) // LLVM: } } @@ -508,7 +508,7 @@ long double my_exp2l(long double f) { // AARCH64: {{.+}} = cir.exp2 {{.+}} : !cir.long_double // LLVM: define x86_fp80 @my_exp2l(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @exp2l(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.exp2.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -522,7 +522,7 @@ float call_exp2f(float f) { // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.float // LLVM: define float @call_exp2f(float %0) - // LLVM: %{{.+}} = call float @exp2f(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.exp2.f32(float %{{.+}}) // LLVM: } } @@ -532,7 +532,7 @@ double call_exp2(double f) { // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double // LLVM: define double @call_exp2(double %0) - // LLVM: %{{.+}} = call double @exp2(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.exp2.f64(double %{{.+}}) // LLVM: } } @@ -543,7 +543,7 @@ long double call_exp2l(long double f) { // AARCH64: {{.+}} = cir.exp2 {{.+}} : !cir.long_double // LLVM: define x86_fp80 @call_exp2l(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @exp2l(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.exp2.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -623,7 +623,7 @@ float my_logf(float f) { // CHECK: {{.+}} = cir.log {{.+}} : !cir.float // LLVM: define float @my_logf(float %0) - // LLVM: %{{.+}} = call float @logf(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.log.f32(float %{{.+}}) // LLVM: } } @@ -633,7 +633,7 @@ double my_log(double f) { // CHECK: {{.+}} = cir.log {{.+}} : !cir.double // LLVM: define double @my_log(double %0) - // LLVM: %{{.+}} = call double @log(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.log.f64(double %{{.+}}) // LLVM: } } @@ -644,7 +644,7 @@ long double my_logl(long double f) { // AARCH64: {{.+}} = cir.log {{.+}} : !cir.long_double // LLVM: define x86_fp80 @my_logl(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @logl(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.log.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -658,7 +658,7 @@ float call_logf(float f) { // CHECK: {{.+}} = cir.log {{.+}} : !cir.float // LLVM: define float @call_logf(float %0) - // LLVM: %{{.+}} = call float @logf(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.log.f32(float %{{.+}}) // LLVM: } } @@ -668,7 +668,7 @@ double call_log(double f) { // CHECK: {{.+}} = cir.log {{.+}} : !cir.double // LLVM: define double @call_log(double %0) - // LLVM: %{{.+}} = call double @log(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.log.f64(double %{{.+}}) // LLVM: } } @@ -679,7 +679,7 @@ long double call_logl(long double f) { // AARCH64: {{.+}} = cir.log {{.+}} : !cir.long_double // LLVM: define x86_fp80 @call_logl(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @logl(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.log.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -691,7 +691,7 @@ float my_log10f(float f) { // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.float // LLVM: define float @my_log10f(float %0) - // LLVM: %{{.+}} = call float @log10f(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.log10.f32(float %{{.+}}) // LLVM: } } @@ -701,7 +701,7 @@ double my_log10(double f) { // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double // LLVM: define double @my_log10(double %0) - // LLVM: %{{.+}} = call double @log10(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.log10.f64(double %{{.+}}) // LLVM: } } @@ -712,7 +712,7 @@ long double my_log10l(long double f) { // AARCH64: {{.+}} = cir.log10 {{.+}} : !cir.long_double // LLVM: define x86_fp80 @my_log10l(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @log10l(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.log10.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -726,7 +726,7 @@ float call_log10f(float f) { // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.float // LLVM: define float @call_log10f(float %0) - // LLVM: %{{.+}} = call float @log10f(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.log10.f32(float %{{.+}}) // LLVM: } } @@ -736,7 +736,7 @@ double call_log10(double f) { // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double // LLVM: define double @call_log10(double %0) - // LLVM: %{{.+}} = call double @log10(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.log10.f64(double %{{.+}}) // LLVM: } } @@ -747,7 +747,7 @@ long double call_log10l(long double f) { // AARCH64: {{.+}} = cir.log10 {{.+}} : !cir.long_double // LLVM: define x86_fp80 @call_log10l(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @log10l(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.log10.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -759,7 +759,7 @@ float my_log2f(float f) { // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.float // LLVM: define float @my_log2f(float %0) - // LLVM: %{{.+}} = call float @log2f(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.log2.f32(float %{{.+}}) // LLVM: } } @@ -769,7 +769,7 @@ double my_log2(double f) { // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double // LLVM: define double @my_log2(double %0) - // LLVM: %{{.+}} = call double @log2(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.log2.f64(double %{{.+}}) // LLVM: } } @@ -780,7 +780,7 @@ long double my_log2l(long double f) { // AARCH64: {{.+}} = cir.log2 {{.+}} : !cir.long_double // LLVM: define x86_fp80 @my_log2l(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @log2l(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.log2.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -794,7 +794,7 @@ float call_log2f(float f) { // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.float // LLVM: define float @call_log2f(float %0) - // LLVM: %{{.+}} = call float @log2f(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.log2.f32(float %{{.+}}) // LLVM: } } @@ -804,7 +804,7 @@ double call_log2(double f) { // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double // LLVM: define double @call_log2(double %0) - // LLVM: %{{.+}} = call double @log2(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.log2.f64(double %{{.+}}) // LLVM: } } @@ -815,7 +815,7 @@ long double call_log2l(long double f) { // AARCH64: {{.+}} = cir.log2 {{.+}} : !cir.long_double // LLVM: define x86_fp80 @call_log2l(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @log2l(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.log2.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1031,7 +1031,7 @@ float my_sinf(float f) { // CHECK: {{.+}} = cir.sin {{.+}} : !cir.float // LLVM: define float @my_sinf(float %0) - // LLVM: %{{.+}} = call float @sinf(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.sin.f32(float %{{.+}}) // LLVM: } } @@ -1041,7 +1041,7 @@ double my_sin(double f) { // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double // LLVM: define double @my_sin(double %0) - // LLVM: %{{.+}} = call double @sin(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.sin.f64(double %{{.+}}) // LLVM: } } @@ -1052,7 +1052,7 @@ long double my_sinl(long double f) { // AARCH64: {{.+}} = cir.sin {{.+}} : !cir.long_double // LLVM: define x86_fp80 @my_sinl(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @sinl(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.sin.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1066,7 +1066,7 @@ float call_sinf(float f) { // CHECK: {{.+}} = cir.sin {{.+}} : !cir.float // LLVM: define float @call_sinf(float %0) - // LLVM: %{{.+}} = call float @sinf(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.sin.f32(float %{{.+}}) // LLVM: } } @@ -1076,7 +1076,7 @@ double call_sin(double f) { // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double // LLVM: define double @call_sin(double %0) - // LLVM: %{{.+}} = call double @sin(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.sin.f64(double %{{.+}}) // LLVM: } } @@ -1087,7 +1087,7 @@ long double call_sinl(long double f) { // AARCH64: {{.+}} = cir.sin {{.+}} : !cir.long_double // LLVM: define x86_fp80 @call_sinl(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @sinl(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.sin.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1099,7 +1099,7 @@ float my_sqrtf(float f) { // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.float // LLVM: define float @my_sqrtf(float %0) - // LLVM: %{{.+}} = call float @sqrtf(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.sqrt.f32(float %{{.+}}) // LLVM: } } @@ -1109,7 +1109,7 @@ double my_sqrt(double f) { // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double // LLVM: define double @my_sqrt(double %0) - // LLVM: %{{.+}} = call double @sqrt(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.sqrt.f64(double %{{.+}}) // LLVM: } } @@ -1120,7 +1120,7 @@ long double my_sqrtl(long double f) { // AARCH64: {{.+}} = cir.sqrt {{.+}} : !cir.long_double // LLVM: define x86_fp80 @my_sqrtl(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @sqrtl(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.sqrt.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1134,7 +1134,7 @@ float call_sqrtf(float f) { // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.float // LLVM: define float @call_sqrtf(float %0) - // LLVM: %{{.+}} = call float @sqrtf(float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.sqrt.f32(float %{{.+}}) // LLVM: } } @@ -1144,7 +1144,7 @@ double call_sqrt(double f) { // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double // LLVM: define double @call_sqrt(double %0) - // LLVM: %{{.+}} = call double @sqrt(double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.sqrt.f64(double %{{.+}}) // LLVM: } } @@ -1155,7 +1155,7 @@ long double call_sqrtl(long double f) { // AARCH64: {{.+}} = cir.sqrt {{.+}} : !cir.long_double // LLVM: define x86_fp80 @call_sqrtl(x86_fp80 %0) - // LLVM: %{{.+}} = call x86_fp80 @sqrtl(x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.sqrt.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1439,7 +1439,7 @@ float my_fmodf(float x, float y) { // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.float // LLVM: define float @my_fmodf - // LLVM: %{{.+}} = call float @fmodf(float %{{.+}}, float %{{.+}}) + // LLVM: %{{.+}} = frem float %{{.+}}, %{{.+}} // LLVM: } } @@ -1449,7 +1449,7 @@ double my_fmod(double x, double y) { // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.double // LLVM: define double @my_fmod - // LLVM: %{{.+}} = call double @fmod(double %{{.+}}, double %{{.+}}) + // LLVM: %{{.+}} = frem double %{{.+}}, %{{.+}} // LLVM: } } @@ -1460,7 +1460,7 @@ long double my_fmodl(long double x, long double y) { // AARCH64: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double // LLVM: define x86_fp80 @my_fmodl - // LLVM: %{{.+}} = call x86_fp80 @fmodl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = frem x86_fp80 %{{.+}}, %{{.+}} // LLVM: } } @@ -1474,7 +1474,7 @@ float call_fmodf(float x, float y) { // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.float // LLVM: define float @call_fmodf - // LLVM: %{{.+}} = call float @fmodf(float %{{.+}}, float %{{.+}}) + // LLVM: %{{.+}} = frem float %{{.+}}, %{{.+}} // LLVM: } } @@ -1484,7 +1484,7 @@ double call_fmod(double x, double y) { // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.double // LLVM: define double @call_fmod - // LLVM: %{{.+}} = call double @fmod(double %{{.+}}, double %{{.+}}) + // LLVM: %{{.+}} = frem double %{{.+}}, %{{.+}} // LLVM: } } @@ -1495,7 +1495,7 @@ long double call_fmodl(long double x, long double y) { // AARCH64: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double // LLVM: define x86_fp80 @call_fmodl - // LLVM: %{{.+}} = call x86_fp80 @fmodl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = frem x86_fp80 %{{.+}}, %{{.+}} // LLVM: } } @@ -1507,7 +1507,7 @@ float my_powf(float x, float y) { // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.float // LLVM: define float @my_powf - // LLVM: %{{.+}} = call float @powf(float %{{.+}}, float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.pow.f32(float %{{.+}}, float %{{.+}}) // LLVM: } } @@ -1517,7 +1517,7 @@ double my_pow(double x, double y) { // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.double // LLVM: define double @my_pow - // LLVM: %{{.+}} = call double @pow(double %{{.+}}, double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.pow.f64(double %{{.+}}, double %{{.+}}) // LLVM: } } @@ -1528,7 +1528,7 @@ long double my_powl(long double x, long double y) { // AARCH64: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double // LLVM: define x86_fp80 @my_powl - // LLVM: %{{.+}} = call x86_fp80 @powl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.pow.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } } @@ -1542,7 +1542,7 @@ float call_powf(float x, float y) { // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.float // LLVM: define float @call_powf - // LLVM: %{{.+}} = call float @powf(float %{{.+}}, float %{{.+}}) + // LLVM: %{{.+}} = call float @llvm.pow.f32(float %{{.+}}, float %{{.+}}) // LLVM: } } @@ -1552,7 +1552,7 @@ double call_pow(double x, double y) { // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.double // LLVM: define double @call_pow - // LLVM: %{{.+}} = call double @pow(double %{{.+}}, double %{{.+}}) + // LLVM: %{{.+}} = call double @llvm.pow.f64(double %{{.+}}, double %{{.+}}) // LLVM: } } @@ -1563,6 +1563,6 @@ long double call_powl(long double x, long double y) { // AARCH64: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double // LLVM: define x86_fp80 @call_powl - // LLVM: %{{.+}} = call x86_fp80 @powl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) + // LLVM: %{{.+}} = call x86_fp80 @llvm.pow.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } } diff --git a/clang/test/CIR/Lowering/builtin-binary-fp2fp.c b/clang/test/CIR/Lowering/builtin-binary-fp2fp.c index acde798fdf11..0910776847dc 100644 --- a/clang/test/CIR/Lowering/builtin-binary-fp2fp.c +++ b/clang/test/CIR/Lowering/builtin-binary-fp2fp.c @@ -1,5 +1,7 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fmath-errno -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -ffast-math -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM-FASTMATH // copysign @@ -11,6 +13,10 @@ float my_copysignf(float x, float y) { // LLVM: %{{.+}} = call float @llvm.copysign.f32(float %{{.+}}, float %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define float @my_copysignf +// LLVM-FASTMATH: %{{.+}} = call float @llvm.copysign.f32(float %{{.+}}, float %{{.+}}) +// LLVM-FASTMATH: } + double my_copysign(double x, double y) { return __builtin_copysign(x, y); } @@ -19,6 +25,10 @@ double my_copysign(double x, double y) { // LLVM: %{{.+}} = call double @llvm.copysign.f64(double %{{.+}}, double %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define double @my_copysign +// LLVM-FASTMATH: %{{.+}} = call double @llvm.copysign.f64(double %{{.+}}, double %{{.+}}) +// LLVM-FASTMATH: } + long double my_copysignl(long double x, long double y) { return __builtin_copysignl(x, y); } @@ -27,6 +37,10 @@ long double my_copysignl(long double x, long double y) { // LLVM: %{{.+}} = call x86_fp80 @llvm.copysign.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define x86_fp80 @my_copysignl +// LLVM-FASTMATH: %{{.+}} = call x86_fp80 @llvm.copysign.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) +// LLVM-FASTMATH: } + // fmax float my_fmaxf(float x, float y) { @@ -37,6 +51,10 @@ float my_fmaxf(float x, float y) { // LLVM: %{{.+}} = call float @llvm.maxnum.f32(float %{{.+}}, float %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define float @my_fmaxf +// LLVM-FASTMATH: %{{.+}} = call float @llvm.maxnum.f32(float %{{.+}}, float %{{.+}}) +// LLVM-FASTMATH: } + double my_fmax(double x, double y) { return __builtin_fmax(x, y); } @@ -45,6 +63,10 @@ double my_fmax(double x, double y) { // LLVM: %{{.+}} = call double @llvm.maxnum.f64(double %{{.+}}, double %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define double @my_fmax +// LLVM-FASTMATH: %{{.+}} = call double @llvm.maxnum.f64(double %{{.+}}, double %{{.+}}) +// LLVM-FASTMATH: } + long double my_fmaxl(long double x, long double y) { return __builtin_fmaxl(x, y); } @@ -53,6 +75,10 @@ long double my_fmaxl(long double x, long double y) { // LLVM: %{{.+}} = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define x86_fp80 @my_fmaxl +// LLVM-FASTMATH: %{{.+}} = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) +// LLVM-FASTMATH: } + // fmin float my_fminf(float x, float y) { @@ -63,6 +89,10 @@ float my_fminf(float x, float y) { // LLVM: %{{.+}} = call float @llvm.minnum.f32(float %{{.+}}, float %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define float @my_fminf +// LLVM-FASTMATH: %{{.+}} = call float @llvm.minnum.f32(float %{{.+}}, float %{{.+}}) +// LLVM-FASTMATH: } + double my_fmin(double x, double y) { return __builtin_fmin(x, y); } @@ -71,6 +101,10 @@ double my_fmin(double x, double y) { // LLVM: %{{.+}} = call double @llvm.minnum.f64(double %{{.+}}, double %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define double @my_fmin +// LLVM-FASTMATH: %{{.+}} = call double @llvm.minnum.f64(double %{{.+}}, double %{{.+}}) +// LLVM-FASTMATH: } + long double my_fminl(long double x, long double y) { return __builtin_fminl(x, y); } @@ -79,6 +113,10 @@ long double my_fminl(long double x, long double y) { // LLVM: %{{.+}} = call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define x86_fp80 @my_fminl +// LLVM-FASTMATH: %{{.+}} = call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) +// LLVM-FASTMATH: } + // fmod float my_fmodf(float x, float y) { @@ -89,6 +127,10 @@ float my_fmodf(float x, float y) { // LLVM: %{{.+}} = call float @fmodf(float %{{.+}}, float %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define float @my_fmodf +// LLVM-FASTMATH: %{{.+}} = frem float %{{.+}}, %{{.+}} +// LLVM-FASTMATH: } + double my_fmod(double x, double y) { return __builtin_fmod(x, y); } @@ -97,6 +139,10 @@ double my_fmod(double x, double y) { // LLVM: %{{.+}} = call double @fmod(double %{{.+}}, double %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define double @my_fmod +// LLVM-FASTMATH: %{{.+}} = frem double %{{.+}}, %{{.+}} +// LLVM-FASTMATH: } + long double my_fmodl(long double x, long double y) { return __builtin_fmodl(x, y); } @@ -105,6 +151,10 @@ long double my_fmodl(long double x, long double y) { // LLVM: %{{.+}} = call x86_fp80 @fmodl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define x86_fp80 @my_fmodl +// LLVM-FASTMATH: %{{.+}} = frem x86_fp80 %{{.+}}, %{{.+}} +// LLVM-FASTMATH: } + // pow float my_powf(float x, float y) { @@ -115,6 +165,10 @@ float my_powf(float x, float y) { // LLVM: %{{.+}} = call float @powf(float %{{.+}}, float %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define float @my_powf +// LLVM-FASTMATH: %{{.+}} = call float @llvm.pow.f32(float %{{.+}}, float %{{.+}}) +// LLVM-FASTMATH: } + double my_pow(double x, double y) { return __builtin_pow(x, y); } @@ -123,6 +177,10 @@ double my_pow(double x, double y) { // LLVM: %{{.+}} = call double @pow(double %{{.+}}, double %{{.+}}) // LLVM: } +// LLVM-FASTMATH: define double @my_pow +// LLVM-FASTMATH: %{{.+}} = call double @llvm.pow.f64(double %{{.+}}, double %{{.+}}) +// LLVM-FASTMATH: } + long double my_powl(long double x, long double y) { return __builtin_powl(x, y); } @@ -130,3 +188,7 @@ long double my_powl(long double x, long double y) { // LLVM: define x86_fp80 @my_powl // LLVM: %{{.+}} = call x86_fp80 @powl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } + +// LLVM-FASTMATH: define x86_fp80 @my_powl +// LLVM-FASTMATH: %{{.+}} = call x86_fp80 @llvm.pow.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) +// LLVM-FASTMATH: } From 22c9a710a6ebb72d673b66a6dffeeff9a28c521c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 11 Jul 2024 17:40:00 -0700 Subject: [PATCH 1681/2301] [CIR][Lowering] Exceptions: Add support for flattening cir.try For now only handle the cir.try part, cir.catch is coming next. Using flat cir for tests make this easy to incrementally build. --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 1 - .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 53 ++++++++++++++++++- clang/test/CIR/Lowering/try-catch.cpp | 39 ++++++++++++++ 3 files changed, 90 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/Lowering/try-catch.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 7366c1ba42ca..f9b1d6de7d18 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -508,7 +508,6 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { // If the catch was not required, bail out now. if (!CatchScope.hasEHBranches()) { - llvm_unreachable("NYI"); CatchScope.clearHandlerBlocks(); EHStack.popCatch(); return; diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 4edd74babf24..1a3281c8839a 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -172,6 +172,55 @@ class CIRScopeOpFlattening : public mlir::OpRewritePattern { } }; +class CIRTryOpFlattening : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::TryOp tryOp, + mlir::PatternRewriter &rewriter) const override { + mlir::OpBuilder::InsertionGuard guard(rewriter); + auto loc = tryOp.getLoc(); + + // Empty scope: just remove it. + if (tryOp.getRegion().empty()) { + rewriter.eraseOp(tryOp); + return mlir::success(); + } + + // Split the current block before the TryOp to create the inlining + // point. + auto *currentBlock = rewriter.getInsertionBlock(); + auto *remainingOpsBlock = + rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + mlir::Block *continueBlock; + continueBlock = remainingOpsBlock; + + // Inline body region. + auto *beforeBody = &tryOp.getRegion().front(); + auto *afterBody = &tryOp.getRegion().back(); + rewriter.inlineRegionBefore(tryOp.getRegion(), continueBlock); + + // Branch into the body of the region. + rewriter.setInsertionPointToEnd(currentBlock); + rewriter.create(loc, mlir::ValueRange(), beforeBody); + + // Replace the tryOp return with a branch that jumps out of the body. + rewriter.setInsertionPointToEnd(afterBody); + auto yieldOp = cast(afterBody->getTerminator()); + assert(yieldOp.getOperands().size() == 1 && "expect one exact value"); + auto br = rewriter.replaceOpWithNewOp( + yieldOp, yieldOp.getArgs(), continueBlock); + + // Replace the op with values return from the body region. + continueBlock->addArgument(br.getDestOperands()[0].getType(), + tryOp.getLoc()); + rewriter.replaceOp(tryOp, continueBlock->getArguments()); + + return mlir::success(); + } +}; + class CIRLoopOpInterfaceFlattening : public mlir::OpInterfaceRewritePattern { public: @@ -482,7 +531,7 @@ class CIRTernaryOpFlattening void populateFlattenCFGPatterns(RewritePatternSet &patterns) { patterns .add( + CIRSwitchOpFlattening, CIRTernaryOpFlattening, CIRTryOpFlattening>( patterns.getContext()); } @@ -493,7 +542,7 @@ void FlattenCFGPass::runOnOperation() { // Collect operations to apply patterns. SmallVector ops; getOperation()->walk([&](Operation *op) { - if (isa(op)) + if (isa(op)) ops.push_back(op); }); diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp new file mode 100644 index 000000000000..545cde0accee --- /dev/null +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -0,0 +1,39 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.flat.cir +// RUN: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_FLAT %s + +double division(int a, int b); + +// CIR: cir.func @_Z2tcv() +// CIR_FLAT: cir.func @_Z2tcv() +unsigned long long tc() { + int x = 50, y = 3; + unsigned long long z; + + try { + int a = 4; + // CIR_FLAT: cir.br ^bb1 + // CIR_FLAT: ^bb1: // pred: ^bb0 + // CIR_FLAT: cir.alloca !cir.ptr, !cir.ptr>, ["msg"] + // CIR_FLAT: cir.alloca !s32i, !cir.ptr, ["idx"] + // CIR_FLAT: cir.br ^bb2 + // CIR_FLAT: ^bb2: // pred: ^bb1 + // CIR_FLAT: %[[EH_PTR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__exception_ptr"] + // CIR_FLAT: cir.try_call exception(%[[EH_PTR]]) @_Z8divisionii( + z = division(x, y); + a++; + + // CIR_FLAT: %[[LOAD_EH_PTR:.*]] = cir.load %[[EH_PTR]] : !cir.ptr>, !cir.ptr + // CIR_FLAT: cir.br ^bb3(%[[LOAD_EH_PTR]] : !cir.ptr) + // CIR_FLAT: ^bb3(%[[EH_ARG:.*]]: !cir.ptr loc(fused[#loc1, #loc2])): // pred: ^bb2 + // CIR_FLAT: cir.catch(%[[EH_ARG:.*]] : !cir.ptr, [ + } catch (int idx) { + z = 98; + idx++; + } catch (const char* msg) { + z = 99; + (void)msg[0]; + } + + return z; +} + From 59cac57147bf48264cd54d7324ee9926f0822ca1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 11 Jul 2024 17:58:40 -0700 Subject: [PATCH 1682/2301] [CIR][NFCI] cir.try: improve docs and be more flexible on the region --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 21 ++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index bb1252c9c62c..7b1c8cca6c62 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3092,17 +3092,26 @@ def TryOp : CIR_Op<"try", [DeclareOpInterfaceMethods, RecursivelySpeculatable, AutomaticAllocationScope, NoRegionArguments]> { - let summary = ""; + let summary = "C++ try block"; let description = [{ ```mlir - TBD - ``` - Note that variables declared inside a `try {}` in C++ will - have their allocas places in the surrounding (parent) scope. + Holds the lexical scope of `try {}`. Note that resources used on catch + clauses are usually allocated in the same parent as `cir.try`. + + Example: + %5 = cir.alloca !cir.ptr, !cir.ptr>, ["msg"] + ... + %10 = cir.try { + %11 = cir.alloca !cir.ptr, !cir.ptr>, ["__exception_ptr"] + ... // cir.try_call's + %20 = cir.load %11 : !cir.ptr>, !cir.ptr + cir.yield %20 : !cir.ptr + } : () -> !cir.ptr + ``` }]; - let regions = (region SizedRegion<1>:$body); + let regions = (region AnyRegion:$body); let results = (outs ExceptionInfoPtr:$result); let assemblyFormat = [{ From ea6fa6df92e295daefd064920f2ff9358d09563b Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 12 Jul 2024 14:12:50 -0400 Subject: [PATCH 1683/2301] [CIR][IR] Fix parsing of dsolocal in cir.func (#732) as title. document will be in another PR as it seems to be a different upstream branch --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 3 +++ clang/test/CIR/IR/func-dsolocal-parser.cir | 13 +++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 clang/test/CIR/IR/func-dsolocal-parser.cir diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 166fa7a2df09..11d38bb154cc 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1926,6 +1926,7 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { auto lambdaNameAttr = getLambdaAttrName(state.name); auto visNameAttr = getSymVisibilityAttrName(state.name); auto noProtoNameAttr = getNoProtoAttrName(state.name); + auto dsolocalNameAttr = getDsolocalAttrName(state.name); if (::mlir::succeeded(parser.parseOptionalKeyword(builtinNameAttr.strref()))) state.addAttribute(builtinNameAttr, parser.getBuilder().getUnitAttr()); if (::mlir::succeeded( @@ -1949,6 +1950,8 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { state.addAttribute(visNameAttr, parser.getBuilder().getStringAttr(visAttrStr)); } + if (parser.parseOptionalKeyword(dsolocalNameAttr).succeeded()) + state.addAttribute(dsolocalNameAttr, parser.getBuilder().getUnitAttr()); StringAttr nameAttr; SmallVector arguments; diff --git a/clang/test/CIR/IR/func-dsolocal-parser.cir b/clang/test/CIR/IR/func-dsolocal-parser.cir new file mode 100644 index 000000000000..1d8322cd8e26 --- /dev/null +++ b/clang/test/CIR/IR/func-dsolocal-parser.cir @@ -0,0 +1,13 @@ +// RUN: cir-opt %s | FileCheck %s + +!s32i = !cir.int +#fn_attr = #cir, nothrow = #cir.nothrow, optnone = #cir.optnone})> +module { + cir.func dsolocal @foo(%arg0: !s32i ) extra(#fn_attr) { + %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} + cir.store %arg0, %0 : !s32i, !cir.ptr + cir.return + } +} + +// CHECK: cir.func dsolocal @foo(%arg0: !s32i) extra(#fn_attr) From 24049351316886a5e3e813611531cea404647e4f Mon Sep 17 00:00:00 2001 From: 7mile Date: Sat, 13 Jul 2024 02:17:41 +0800 Subject: [PATCH 1684/2301] [CIR][ABI][NFC] AppleARM64 CXXABI handling in TargetLowering library (#733) In [this commit](https://github.com/llvm/clangir/commit/e5d840b72c1bdb3276094960e9746e413c6f4456), minimal support for Darwin aarch64 triples was added. But TargetLoweringInfo was not updated correspondingly. This could lead to a failure of the test `driver.c` with CallConvLowering pass enabled (or `LowerModule` used in some other ways). This PR fixes the inconsistency and adds an extra missing feature flag for it. --- clang/include/clang/CIR/MissingFeatures.h | 3 +++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 1 + .../CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp | 4 ++++ 3 files changed, 8 insertions(+) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 5e2c4f7143a9..48ab598311e8 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -265,6 +265,9 @@ struct MissingFeatures { // We're ignoring several details regarding ABI-halding for Swift. static bool swift() { return false; } + // The AppleARM64 is using ItaniumCXXABI, which is not quite right. + static bool appleArm64CXXABI() { return false; } + // Despite carrying some information about variadics, we are currently // ignoring this to focus only on the code necessary to lower non-variadics. static bool variadicFunctions() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 106117f8f7da..1e272cf953b8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -362,6 +362,7 @@ CIRGenCXXABI *cir::CreateCIRGenItaniumCXXABI(CIRGenModule &CGM) { case TargetCXXABI::AppleARM64: // TODO: this isn't quite right, clang uses AppleARM64CXXABI which inherits // from ARMCXXABI. We'll have to follow suit. + assert(!MissingFeatures::appleArm64CXXABI()); return new CIRGenItaniumCXXABI(CGM); default: diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index ce12263b73d8..5fac8fdaf359 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -65,6 +65,10 @@ CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { // include the other 32-bit ARM oddities: constructor/destructor return values // and array cookies. case clang::TargetCXXABI::GenericAArch64: + case clang::TargetCXXABI::AppleARM64: + // TODO: this isn't quite right, clang uses AppleARM64CXXABI which inherits + // from ARMCXXABI. We'll have to follow suit. + assert(!::cir::MissingFeatures::appleArm64CXXABI()); return new ItaniumCXXABI(LM, /*UseARMMethodPtrABI=*/true, /*UseARMGuardVarABI=*/true); From 911dbe2500e9699dcbe4fd30d984f1c728db2f20 Mon Sep 17 00:00:00 2001 From: 7mile Date: Sat, 13 Jul 2024 02:28:05 +0800 Subject: [PATCH 1685/2301] [CIR][ABI][NFC] Make `createLowerModule` public (#734) Although currently LowerModule is not ready for formal usage, we need it for target-specific lowering to LLVM. This PR temporarily public the symbol `createLowerModule` to reuse the logic of preparing a `LowerModule`, making it easier for future refactor (making `TargetLoweringInfo` available for most stages in CIR Lowering). --- .../Dialect/Transforms/CallConvLowering.cpp | 36 ++----------------- .../Transforms/TargetLowering/LowerModule.cpp | 32 ++++++++++++++++- .../Transforms/TargetLowering/LowerModule.h | 2 ++ 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 6ba681d0cc35..45bcbe15f7a4 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -6,9 +6,6 @@ // //===----------------------------------------------------------------------===// -// FIXME(cir): This header file is not exposed to the public API, but can be -// reused by CIR ABI lowering since it holds target-specific information. -#include "../../../Basic/Targets.h" #include "TargetLowering/LowerModule.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" @@ -16,7 +13,6 @@ #include "mlir/IR/PatternMatch.h" #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" -#include "clang/Basic/TargetOptions.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #define GEN_PASS_DEF_CALLCONVLOWERING @@ -25,35 +21,6 @@ namespace mlir { namespace cir { -namespace { - -LowerModule createLowerModule(FuncOp op, PatternRewriter &rewriter) { - auto module = op->getParentOfType(); - - // Fetch the LLVM data layout string. - auto dataLayoutStr = cast( - module->getAttr(LLVM::LLVMDialect::getDataLayoutAttrName())); - - // Fetch target information. - llvm::Triple triple( - cast(module->getAttr("cir.triple")).getValue()); - clang::TargetOptions targetOptions; - targetOptions.Triple = triple.str(); - auto targetInfo = clang::targets::AllocateTarget(triple, targetOptions); - - // FIXME(cir): This just uses the default language options. We need to account - // for custom options. - // Create context. - assert(!::cir::MissingFeatures::langOpts()); - clang::LangOptions langOpts; - auto context = CIRLowerContext(module, langOpts); - context.initBuiltinTypes(*targetInfo); - - return LowerModule(context, module, dataLayoutStr, *targetInfo, rewriter); -} - -} // namespace - //===----------------------------------------------------------------------===// // Rewrite Patterns //===----------------------------------------------------------------------===// @@ -68,7 +35,8 @@ struct CallConvLoweringPattern : public OpRewritePattern { if (!op.getAst()) return op.emitError("function has no AST information"); - LowerModule lowerModule = createLowerModule(op, rewriter); + auto modOp = op->getParentOfType(); + LowerModule lowerModule = createLowerModule(modOp, rewriter); // Rewrite function calls before definitions. This should be done before // lowering the definition. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 894a8581c9d2..0994497c8dda 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -11,10 +11,16 @@ // //===----------------------------------------------------------------------===// -#include "LowerModule.h" +// FIXME(cir): This header file is not exposed to the public API, but can be +// reused by CIR ABI lowering since it holds target-specific information. +#include "../../../../Basic/Targets.h" +#include "clang/Basic/TargetOptions.h" + #include "CIRLowerContext.h" #include "LowerFunction.h" +#include "LowerModule.h" #include "TargetInfo.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/PatternMatch.h" @@ -208,5 +214,29 @@ LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, FuncOp funcOp) { return success(); } +// TODO: not to create it every time +LowerModule createLowerModule(ModuleOp module, PatternRewriter &rewriter) { + // Fetch the LLVM data layout string. + auto dataLayoutStr = cast( + module->getAttr(LLVM::LLVMDialect::getDataLayoutAttrName())); + + // Fetch target information. + llvm::Triple triple( + cast(module->getAttr("cir.triple")).getValue()); + clang::TargetOptions targetOptions; + targetOptions.Triple = triple.str(); + auto targetInfo = clang::targets::AllocateTarget(triple, targetOptions); + + // FIXME(cir): This just uses the default language options. We need to account + // for custom options. + // Create context. + assert(!::cir::MissingFeatures::langOpts()); + clang::LangOptions langOpts; + auto context = CIRLowerContext(module, langOpts); + context.initBuiltinTypes(*targetInfo); + + return LowerModule(context, module, dataLayoutStr, *targetInfo, rewriter); +} + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index 74f7ed0bb5ac..35870c716c88 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -91,6 +91,8 @@ class LowerModule { LogicalResult rewriteFunctionCall(CallOp callOp, FuncOp funcOp); }; +LowerModule createLowerModule(ModuleOp module, PatternRewriter &rewriter); + } // namespace cir } // namespace mlir From d73d78851acbea6fecf9f3480abc1cd8bd61c5e0 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 12 Jul 2024 15:46:02 -0400 Subject: [PATCH 1686/2301] [CIR][CIRGen] More on dsolocal: visibility improvements (#735) In this PR, we 1. implement defaultVisibility as far as dsolocal is concerned, currently is either MLIR::Visibility isPublic() or isPrivate(). Now, we don't handle hiddenVisibility and protectedVisibility from AST. I put missFeature assert so that If in anyway we translate hiddenVisibility or protectedVisibility into mlir::SymbolTable::Visibility::Private (hopefully not for it'd be confusing), then we need to revise this defaultVisibility setting. 2. call setNonAliasAttributes on global op upon discovery of its initialization, thus we have globals dso_local correctly set. Still missing is lots of function should have dso_local set, but the all depend on comDat implementation, which will come from next PR within the next few days. --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 13 ++++++++----- clang/test/CIR/CodeGen/linkage.c | 2 +- clang/test/CIR/CodeGen/static.cpp | 8 ++++---- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 27a8be9e6d11..c38c5ad6af61 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -253,7 +253,7 @@ mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { } CurCGF = nullptr; - // TODO: setNonAliasAttributes + setNonAliasAttributes(GD, Fn); // TODO: SetLLVMFunctionAttributesForDefinition return Fn; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 3f093f560477..81d0482a07a0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -366,10 +366,13 @@ bool CIRGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { } static bool hasDefaultVisibility(CIRGlobalValueInterface GV) { - // TODO: we need to have a precise definition of what is a default visibility. - // in the context of MILR and CIR, now we default to - assert(!MissingFeatures::setDefaultVisibility()); - return true; + // Since we do not support hidden visibility and private visibility, + // we can assume that the default visibility is public or private. + // The way we use private visibility now simply is just treating it + // as either local or private linkage, or just default for declarations + assert(!MissingFeatures::hiddenVisibility()); + assert(!MissingFeatures::protectedVisibility()); + return GV.isPublic() || GV.isPrivate(); } static bool shouldAssumeDSOLocal(const CIRGenModule &CGM, @@ -1292,7 +1295,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, GV.setLinkage(mlir::cir::GlobalLinkageKind::WeakAnyLinkage); } - // TODO(cir): setNonAliasAttributes(D, GV); + setNonAliasAttributes(D, GV); if (D->getTLSKind() && !GV.getTlsModelAttr()) { if (D->getTLSKind() == VarDecl::TLS_Dynamic) diff --git a/clang/test/CIR/CodeGen/linkage.c b/clang/test/CIR/CodeGen/linkage.c index 84b1413f559a..b3a108df7bba 100644 --- a/clang/test/CIR/CodeGen/linkage.c +++ b/clang/test/CIR/CodeGen/linkage.c @@ -19,7 +19,7 @@ int foo(void) { // LLVM: define i32 @foo( static int var = 0; -// CIR: cir.global "private" internal @var = #cir.int<0> : !s32i +// CIR: cir.global "private" internal dsolocal @var = #cir.int<0> : !s32i int get_var(void) { return var; } diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index 2d51cb1514d9..d756a7834f70 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -19,7 +19,7 @@ static Init __ioinit2(false); // BEFORE: module {{.*}} { // BEFORE-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) // BEFORE-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) -// BEFORE-NEXT: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { +// BEFORE-NEXT: cir.global "private" internal dsolocal @_ZL8__ioinit = ctor : !ty_22Init22 { // BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // BEFORE-NEXT: %1 = cir.const #true // BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () @@ -27,7 +27,7 @@ static Init __ioinit2(false); // BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () // BEFORE-NEXT: } {ast = #cir.var.decl.ast} -// BEFORE: cir.global "private" internal @_ZL9__ioinit2 = ctor : !ty_22Init22 { +// BEFORE: cir.global "private" internal dsolocal @_ZL9__ioinit2 = ctor : !ty_22Init22 { // BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr // BEFORE-NEXT: %1 = cir.const #false // BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () @@ -43,7 +43,7 @@ static Init __ioinit2(false); // AFTER-NEXT: cir.func private @__cxa_atexit(!cir.ptr)>>, !cir.ptr, !cir.ptr) // AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) // AFTER-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) -// AFTER-NEXT: cir.global "private" internal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} +// AFTER-NEXT: cir.global "private" internal dsolocal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init() // AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // AFTER-NEXT: %1 = cir.const #true @@ -55,7 +55,7 @@ static Init __ioinit2(false); // AFTER-NEXT: %6 = cir.get_global @__dso_handle : !cir.ptr // AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () // AFTER-NEXT: cir.return -// AFTER: cir.global "private" internal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} +// AFTER: cir.global "private" internal dsolocal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init.1() // AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr // AFTER-NEXT: %1 = cir.const #false From 6f45aa4b88ad93613183d57ed8aa4fb3178cdcda Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 12 Jul 2024 19:54:23 -0700 Subject: [PATCH 1687/2301] [CIR][CIRGen] cir.try: handle trivial ones and fix crash OG codegen does not generate any exception related content when there are not calls happening inside the try block. For now we mimic OG and do the same, until we see a concrete use case that would have used emitting this code. --- clang/lib/CIR/CodeGen/CIRGenCleanup.h | 3 +-- clang/lib/CIR/CodeGen/CIRGenException.cpp | 3 ++- .../CIR/Dialect/Transforms/MergeCleanups.cpp | 27 +++++++++++++++++-- clang/test/CIR/CodeGen/try-catch.cpp | 24 +++++++++++++++++ 4 files changed, 52 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h index 4627b60d1c63..cf6dbb1851b8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -206,8 +206,7 @@ class EHCatchScope : public EHScope { // 'takeHandler' or some such function which removes ownership from the // EHCatchScope object if the handlers should live longer than EHCatchScope. void clearHandlerBlocks() { - for (unsigned I = 0, N = getNumHandlers(); I != N; ++I) - delete getHandler(I).Block; + // The blocks are owned by CatchOp, nothing to delete. } typedef const Handler *iterator; diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index f9b1d6de7d18..4f74c861fc74 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -257,7 +257,7 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup) { // pointer but only use it to denote we're tracking things, but there // shouldn't be any changes to that block after work done in this function. auto catchOp = currLexScope->getExceptionInfo().catchOp; - assert(catchOp.getNumRegions() && "expected at least one region"); + assert(catchOp && catchOp.getNumRegions() && "expected at least one region"); auto &fallbackRegion = catchOp.getRegion(catchOp.getNumRegions() - 1); auto *resumeBlock = &fallbackRegion.getBlocks().back(); @@ -510,6 +510,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { if (!CatchScope.hasEHBranches()) { CatchScope.clearHandlerBlocks(); EHStack.popCatch(); + currLexScope->getExceptionInfo().catchOp->erase(); return; } diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index 2bddd9c46135..c0af33ef071d 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -83,6 +83,28 @@ struct RemoveEmptySwitch : public OpRewritePattern { } }; +struct RemoveTrivialTry : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult match(TryOp op) const final { + return success(op.getResult().use_empty() && op.getBody().hasOneBlock()); + } + + void rewrite(TryOp op, PatternRewriter &rewriter) const final { + // Move try body to the parent. + assert(op.getBody().hasOneBlock()); + + Block *parentBlock = op.getOperation()->getBlock(); + mlir::Block *tryBody = &op.getBody().getBlocks().front(); + YieldOp y = dyn_cast(tryBody->getTerminator()); + assert(y && "expected well wrapped up try block"); + y->erase(); + + rewriter.inlineBlockBefore(tryBody, parentBlock, Block::iterator(op)); + rewriter.eraseOp(op); + } +}; + //===----------------------------------------------------------------------===// // MergeCleanupsPass //===----------------------------------------------------------------------===// @@ -106,7 +128,8 @@ void populateMergeCleanupPatterns(RewritePatternSet &patterns) { patterns.add< RemoveRedundantBranches, RemoveEmptyScope, - RemoveEmptySwitch + RemoveEmptySwitch, + RemoveTrivialTry >(patterns.getContext()); // clang-format on } @@ -121,7 +144,7 @@ void MergeCleanupsPass::runOnOperation() { getOperation()->walk([&](Operation *op) { // CastOp here is to perform a manual `fold` in // applyOpPatternsAndFold - if (isa(op)) + if (isa(op)) ops.push_back(op); }); diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 46d6315e53a2..f55cfd042d49 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -83,5 +83,29 @@ unsigned long long tc3() { z = 100; } + return z; +} + +// CIR: cir.func @_Z3tc4v() +unsigned long long tc4() { + int x = 50, y = 3; + unsigned long long z; + + // CIR-NOT: cir.try + try { + int a = 4; + a++; + + // CIR: cir.scope { + // CIR: cir.alloca !s32i, !cir.ptr, ["a", init] + // CIR-NOT: cir.alloca !cir.ptr + // CIR: cir.const #cir.int<4> : !s32i + // CIR: cir.unary(inc, + // CIR: cir.store %11, %8 : !s32i, !cir.ptr + } catch (int idx) { + z = 98; + idx++; + } + return z; } \ No newline at end of file From d91ecfca1d8c629db08c86e9bcb7ab8033e7ddfa Mon Sep 17 00:00:00 2001 From: 7mile Date: Tue, 16 Jul 2024 05:24:01 +0800 Subject: [PATCH 1688/2301] [CIR][Dialect] Add minimal definitions of unified address space `offload_*` cases (#738) This PR adds definitions of unified address space cases `offload_*` discussed in [this RFC thread](https://discourse.llvm.org/t/rfc-clangir-unified-address-space-design-in-clangir/79728). It also refactors the `getValueFromLangAS` method in tablegen to a hand-written method, because it should be a non-trivial map for unified AS. --------- Co-authored-by: Julian Oppermann --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 66 +++++++++---------- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 38 +++++++++++ clang/test/CIR/IR/address-space.cir | 25 +++++++ 3 files changed, 95 insertions(+), 34 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index ca9e991e6565..a9a601f5fae9 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -652,19 +652,27 @@ def DynamicCastInfoAttr // AddressSpaceAttr //===----------------------------------------------------------------------===// -// TODO: other CIR AS cases -def AS_Target : I32EnumAttrCase<"target", 21>; +def AS_OffloadPrivate : I32EnumAttrCase<"offload_private", 1>; +def AS_OffloadLocal : I32EnumAttrCase<"offload_local", 2>; +def AS_OffloadGlobal : I32EnumAttrCase<"offload_global", 3>; +def AS_OffloadConstant : I32EnumAttrCase<"offload_constant", 4>; +def AS_OffloadGeneric : I32EnumAttrCase<"offload_generic", 5>; +def AS_Target : I32EnumAttrCase<"target", 6>; def AddressSpaceAttr : CIR_Attr<"AddressSpace", "addrspace"> { let summary = "Address space attribute for pointer types"; let description = [{ - The address space attribute models `clang::LangAS` rather than the LLVM - address space, which means it's not yet converted by the address space map - to carry target-specific semantics. + The address space attribute is used in pointer types. It essentially + provides a unified model on top of `clang::LangAS`, rather than LLVM address + spaces. - The representation is one-to-one except for `LangAS::Default`, which - corresponds to a null attribute instead. + The representation is further simplified: `LangAS::Default` is encoded as + a null attribute; many address spaces from different offloading languages + are unified as `offload_*`; etc. + + The meaning of `value` parameter is defined as an extensible enum `Kind`, + which encodes target AS as offset to the last language AS. }]; let parameters = (ins "int32_t":$value); @@ -690,7 +698,8 @@ def AddressSpaceAttr : CIR_Attr<"AddressSpace", "addrspace"> { // simplified assembly format `custom`. list langASCases = [ - // TODO: includes all non-target CIR AS cases here + AS_OffloadPrivate, AS_OffloadLocal, AS_OffloadGlobal, AS_OffloadConstant, + AS_OffloadGeneric ]; I32EnumAttrCase targetASCase = AS_Target; @@ -703,9 +712,23 @@ def AddressSpaceAttr : CIR_Attr<"AddressSpace", "addrspace"> { bool isTarget() const; unsigned getTargetValue() const; - static std::optional parseValueFromString(llvm::StringRef s); + /// Convert a clang LangAS to its corresponding CIR AS storage value. This + /// helper does not perform any language-specific mappings (e.g. determining + /// the default AS for offloading languages), so these must be handled in + /// the caller. static std::optional getValueFromLangAS(clang::LangAS v); + + /// Helper methods for the assembly format `custom`. + static std::optional parseValueFromString(llvm::StringRef s); static std::optional stringifyValue(int32_t v); + + struct Kind { + }]#!interleave( + !foreach(case, langASCases, + "static constexpr int32_t "#case.symbol#" = "#case.value#";" + ), "\n" + )#[{ + }; }]; let extraClassDefinition = [{ @@ -757,31 +780,6 @@ def AddressSpaceAttr : CIR_Attr<"AddressSpace", "addrspace"> { return std::nullopt; } } - - std::optional - $cppClass::getValueFromLangAS(clang::LangAS langAS) { - assert((langAS == clang::LangAS::Default || - clang::isTargetAddressSpace(langAS)) && - "Language-specific address spaces are not supported"); - switch (langAS) { - }] - # - !interleave( - !foreach(case, langASCases, - "case clang::LangAS::"#case.symbol - # [{: llvm_unreachable("Not Yet Supported");}] ), - "\n" - ) - # - [{ - case clang::LangAS::Default: - // Default address space should be encoded as a null attribute. - return std::nullopt; - default: - // Target address space offset arithmetics - return clang::toTargetAddressSpace(langAS) + kFirstTargetASValue; - } - } }]; } diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 3463c97b6d7b..7e942f85f959 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -556,6 +556,44 @@ LogicalResult OpenCLKernelMetadataAttr::verify( return success(); } +//===----------------------------------------------------------------------===// +// AddressSpaceAttr definitions +//===----------------------------------------------------------------------===// + +std::optional +AddressSpaceAttr::getValueFromLangAS(clang::LangAS langAS) { + using clang::LangAS; + switch (langAS) { + case LangAS::Default: + // Default address space should be encoded as a null attribute. + return std::nullopt; + case LangAS::opencl_global: + case LangAS::opencl_local: + case LangAS::opencl_constant: + case LangAS::opencl_private: + case LangAS::opencl_generic: + case LangAS::opencl_global_device: + case LangAS::opencl_global_host: + case LangAS::cuda_device: + case LangAS::cuda_constant: + case LangAS::cuda_shared: + case LangAS::sycl_global: + case LangAS::sycl_global_device: + case LangAS::sycl_global_host: + case LangAS::sycl_local: + case LangAS::sycl_private: + case LangAS::ptr32_sptr: + case LangAS::ptr32_uptr: + case LangAS::ptr64: + case LangAS::hlsl_groupshared: + case LangAS::wasm_funcref: + llvm_unreachable("NYI"); + default: + // Target address space offset arithmetics + return clang::toTargetAddressSpace(langAS) + kFirstTargetASValue; + } +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/address-space.cir b/clang/test/CIR/IR/address-space.cir index b7472876f56d..176cc6ca8c62 100644 --- a/clang/test/CIR/IR/address-space.cir +++ b/clang/test/CIR/IR/address-space.cir @@ -13,4 +13,29 @@ module { cir.func @test_format2(%arg0: !cir.ptr) { cir.return } + + // CHECK: @test_format3(%arg0: !cir.ptr) + cir.func @test_format3(%arg0: !cir.ptr) { + cir.return + } + + // CHECK: @test_format4(%arg0: !cir.ptr) + cir.func @test_format4(%arg0: !cir.ptr) { + cir.return + } + + // CHECK: @test_format5(%arg0: !cir.ptr) + cir.func @test_format5(%arg0: !cir.ptr) { + cir.return + } + + // CHECK: @test_format6(%arg0: !cir.ptr) + cir.func @test_format6(%arg0: !cir.ptr) { + cir.return + } + + // CHECK: @test_format7(%arg0: !cir.ptr) + cir.func @test_format7(%arg0: !cir.ptr) { + cir.return + } } From 236062c8da5b4926d929aaf027d86695dd329ecf Mon Sep 17 00:00:00 2001 From: David Olsen Date: Mon, 15 Jul 2024 15:17:43 -0700 Subject: [PATCH 1689/2301] [CIR] Atomic builtins with non-const memory order (#736) Fix #731 Implement atomic built-in operations where the memory order argument is a runtime value rather than a compile-time constant. This is necessary to support `std::atomic`. The ClangIR atomic operations don't support runtime memory orders, so this is implemented during CodeGen by generating a switch statement that effectively converts the runtime memory order into a compile-time memory order. A new file, atomic-runtime.cpp, was added to the ClangIR CodeGen tests to cover this situation. --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 190 ++++++++++++- clang/test/CIR/CodeGen/atomic-runtime.cpp | 309 +++++++++++++++++++++ clang/test/CIR/Lowering/atomic-runtime.cpp | 37 +++ 3 files changed, 533 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/atomic-runtime.cpp create mode 100644 clang/test/CIR/Lowering/atomic-runtime.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 4f6567d1fd15..e4e5c718c16b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -369,6 +369,82 @@ static bool isCstWeak(mlir::Value weakVal, bool &val) { return false; } +// Functions that help with the creation of compiler-generated switch +// statements that are used to implement non-constant memory order parameters. + +// Create a new region. Create a block within the region. Add a "break" +// statement to the block. Set the builder's insertion point to before the +// "break" statement. Add the new region to the given container. +template +static void startRegion(mlir::OpBuilder &builder, RegionsCont &Regions, + mlir::Location loc) { + + Regions.push_back(std::make_unique()); + mlir::Region *Region = Regions.back().get(); + mlir::Block *Block = builder.createBlock(Region); + builder.setInsertionPointToEnd(Block); + auto Break = builder.create(loc); + builder.setInsertionPoint(Break); +} + +// Create a "default:" label and add it to the given collection of case labels. +// Create the region that will hold the body of the "default:" block. +template +static void buildDefaultCase(mlir::OpBuilder &builder, CaseAttrsCont &CaseAttrs, + RegionsCont &Regions, mlir::Location loc) { + + auto Context = builder.getContext(); + auto EmptyArrayAttr = builder.getArrayAttr({}); + auto DefaultKind = + mlir::cir::CaseOpKindAttr::get(Context, mlir::cir::CaseOpKind::Default); + auto DefaultAttr = + mlir::cir::CaseAttr::get(Context, EmptyArrayAttr, DefaultKind); + CaseAttrs.push_back(DefaultAttr); + startRegion(builder, Regions, loc); +} + +// Create a single "case" label with the given MemOrder as its value. Add the +// "case" label to the given collection of case labels. Create the region that +// will hold the body of the "case" block. +template +static void +buildSingleMemOrderCase(mlir::OpBuilder &builder, CaseAttrsCont &CaseAttrs, + RegionsCont &Regions, mlir::Location loc, + mlir::Type Type, mlir::cir::MemOrder Order) { + + auto Context = builder.getContext(); + SmallVector OneOrder{ + mlir::cir::IntAttr::get(Type, static_cast(Order))}; + auto OneAttribute = builder.getArrayAttr(OneOrder); + auto CaseKind = + mlir::cir::CaseOpKindAttr::get(Context, mlir::cir::CaseOpKind::Equal); + auto CaseAttr = mlir::cir::CaseAttr::get(Context, OneAttribute, CaseKind); + CaseAttrs.push_back(CaseAttr); + startRegion(builder, Regions, loc); +} + +// Create a pair of "case" labels with the given MemOrders as their values. +// Add the combined "case" attribute to the given collection of case labels. +// Create the region that will hold the body of the "case" block. +template +static void buildDoubleMemOrderCase(mlir::OpBuilder &builder, + CaseAttrsCont &CaseAttrs, + RegionsCont &Regions, mlir::Location loc, + mlir::Type Type, mlir::cir::MemOrder Order1, + mlir::cir::MemOrder Order2) { + + auto Context = builder.getContext(); + SmallVector TwoOrders{ + mlir::cir::IntAttr::get(Type, static_cast(Order1)), + mlir::cir::IntAttr::get(Type, static_cast(Order2))}; + auto TwoAttributes = builder.getArrayAttr(TwoOrders); + auto CaseKind = + mlir::cir::CaseOpKindAttr::get(Context, mlir::cir::CaseOpKind::Anyof); + auto CaseAttr = mlir::cir::CaseAttr::get(Context, TwoAttributes, CaseKind); + CaseAttrs.push_back(CaseAttr); + startRegion(builder, Regions, loc); +} + static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, @@ -446,7 +522,49 @@ static void buildAtomicCmpXchgFailureSet( return; } - llvm_unreachable("NYI"); + // The failure memory order is not a compile-time value. The CIR atomic ops + // can't handle a runtime value; all memory orders must be hard coded. + // Generate a "switch" statement that converts the runtime value into a + // compile-time value. + CGF.getBuilder().create( + FailureOrderVal.getLoc(), FailureOrderVal, + [&](mlir::OpBuilder &builder, mlir::Location loc, + mlir::OperationState &os) { + SmallVector CaseAttrs; + SmallVector, 3> Regions; + + // default: + // Unsupported memory orders get generated as memory_order_relaxed, + // because there is no practical way to report an error at runtime. + buildDefaultCase(builder, CaseAttrs, Regions, loc); + buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, mlir::cir::MemOrder::Relaxed, Scope); + + // case consume: + // case acquire: + // memory_order_consume is not implemented and always falls back to + // memory_order_acquire + buildDoubleMemOrderCase( + builder, CaseAttrs, Regions, loc, FailureOrderVal.getType(), + mlir::cir::MemOrder::Consume, mlir::cir::MemOrder::Acquire); + buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, mlir::cir::MemOrder::Acquire, Scope); + + // A failed compare-exchange is a read-only operation. So + // memory_order_release and memory_order_acq_rel are not supported for + // the failure memory order. They fall back to memory_order_relaxed. + + // case seq_cst: + buildSingleMemOrderCase(builder, CaseAttrs, Regions, loc, + FailureOrderVal.getType(), + mlir::cir::MemOrder::SequentiallyConsistent); + buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, + mlir::cir::MemOrder::SequentiallyConsistent, Scope); + + os.addRegions(Regions); + os.addAttribute("cases", builder.getArrayAttr(CaseAttrs)); + }); } static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, @@ -1149,8 +1267,74 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { RValTy, E->getExprLoc()); } - // Long case, when Order isn't obviously constant. - llvm_unreachable("NYI"); + // The memory order is not known at compile-time. The atomic operations + // can't handle runtime memory orders; the memory order must be hard coded. + // Generate a "switch" statement that converts a runtime value into a + // compile-time value. + builder.create( + Order.getLoc(), Order, + [&](mlir::OpBuilder &builder, mlir::Location loc, + mlir::OperationState &os) { + llvm::SmallVector CaseAttrs; + llvm::SmallVector, 6> Regions; + + // default: + // Use memory_order_relaxed for relaxed operations and for any memory + // order value that is not supported. There is no good way to report + // an unsupported memory order at runtime, hence the fallback to + // memory_order_relaxed. + buildDefaultCase(builder, CaseAttrs, Regions, loc); + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + mlir::cir::MemOrder::Relaxed, Scope); + + if (!IsStore) { + // case consume: + // case acquire: + // memory_order_consume is not implemented; it is always treated like + // memory_order_acquire. These memory orders are not valid for + // write-only operations. + buildDoubleMemOrderCase(builder, CaseAttrs, Regions, loc, + Order.getType(), mlir::cir::MemOrder::Consume, + mlir::cir::MemOrder::Acquire); + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, mlir::cir::MemOrder::Acquire, Scope); + } + + if (!IsLoad) { + // case release: + // memory_order_release is not valid for read-only operations. + buildSingleMemOrderCase(builder, CaseAttrs, Regions, loc, + Order.getType(), + mlir::cir::MemOrder::Release); + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, mlir::cir::MemOrder::Release, Scope); + } + + if (!IsLoad && !IsStore) { + // case acq_rel: + // memory_order_acq_rel is only valid for read-write operations. + buildSingleMemOrderCase(builder, CaseAttrs, Regions, loc, + Order.getType(), + mlir::cir::MemOrder::AcquireRelease); + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, + Size, mlir::cir::MemOrder::AcquireRelease, Scope); + } + + // case seq_cst: + buildSingleMemOrderCase(builder, CaseAttrs, Regions, loc, + Order.getType(), + mlir::cir::MemOrder::SequentiallyConsistent); + buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + mlir::cir::MemOrder::SequentiallyConsistent, Scope); + + os.addRegions(Regions); + os.addAttribute("cases", builder.getArrayAttr(CaseAttrs)); + }); + + if (RValTy->isVoidType()) + return RValue::get(nullptr); + return convertTempToRValue(Dest.withElementType(convertTypeForMem(RValTy)), + RValTy, E->getExprLoc()); } void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue lvalue, diff --git a/clang/test/CIR/CodeGen/atomic-runtime.cpp b/clang/test/CIR/CodeGen/atomic-runtime.cpp new file mode 100644 index 000000000000..dfe74a9e77c9 --- /dev/null +++ b/clang/test/CIR/CodeGen/atomic-runtime.cpp @@ -0,0 +1,309 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Test __atomic_* built-ins that have a memory order parameter with a runtime +// value. This requires generating a switch statement, so the amount of +// generated code is surprisingly large. +// +// Only a representative sample of atomic operations are tested: one read-only +// operation (atomic_load), one write-only operation (atomic_store), one +// read-write operation (atomic_exchange), and the most complex operation +// (atomic_compare_exchange). + +int runtime_load(int *ptr, int order) { + return __atomic_load_n(ptr, order); +} + +// CHECK: %[[ptr:.*]] = cir.load %[[ptr_var:.*]] : !cir.ptr>, !cir.ptr +// CHECK: %[[order:.*]] = cir.load %[[order_var:.*]] : !cir.ptr, !s32i +// CHECK: cir.switch (%[[order]] : !s32i) [ +// CHECK: case (default) { +// CHECK: %[[T8:.*]] = cir.load atomic(relaxed) %[[ptr]] : !cir.ptr, !s32i +// CHECK: cir.store %[[T8]], %[[temp_var:.*]] : !s32i, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: %[[T8:.*]] = cir.load atomic(acquire) %[[ptr]] : !cir.ptr, !s32i +// CHECK: cir.store %[[T8]], %[[temp_var]] : !s32i, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 5) { +// CHECK: %[[T8:.*]] = cir.load atomic(seq_cst) %[[ptr]] : !cir.ptr, !s32i +// CHECK: cir.store %[[T8]], %[[temp_var]] : !s32i, !cir.ptr +// CHECK: cir.break +// CHECK: } +// CHECK: ] + +void atomic_store_n(int* ptr, int val, int order) { + __atomic_store_n(ptr, val, order); +} + +// CHECK: %[[ptr:.*]] = cir.load %[[ptr_var:.*]] : !cir.ptr>, !cir.ptr +// CHECK: %[[order:.*]] = cir.load %[[order_var:.*]] : !cir.ptr, !s32i +// CHECK: %[[val:.*]] = cir.load %[[val_var:.*]] : !cir.ptr, !s32i +// CHECK: cir.store %[[val]], %[[temp_var:.*]] : !s32i, !cir.ptr +// CHECK: cir.switch (%[[order]] : !s32i) [ +// CHECK: case (default) { +// CHECK: %[[T7:.*]] = cir.load %[[temp_var:.*]] : !cir.ptr, !s32i +// CHECK: cir.store atomic(relaxed) %[[T7]], %[[ptr]] : !s32i, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 3) { +// CHECK: %[[T7:.*]] = cir.load %[[temp_var:.*]] : !cir.ptr, !s32i +// CHECK: cir.store atomic(release) %[[T7]], %[[ptr]] : !s32i, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 5) { +// CHECK: %[[T7:.*]] = cir.load %[[temp_var:.*]] : !cir.ptr, !s32i +// CHECK: cir.store atomic(seq_cst) %[[T7]], %[[ptr]] : !s32i, !cir.ptr +// CHECK: cir.break +// CHECK: } +// CHECK: ] + +int atomic_exchange_n(int* ptr, int val, int order) { + return __atomic_exchange_n(ptr, val, order); +} + +// CHECK: %[[ptr:.*]] = cir.load %[[ptr_var:.*]] : !cir.ptr>, !cir.ptr +// CHECK: %[[order:.*]] = cir.load %[[order_var:.*]] : !cir.ptr, !s32i +// CHECK: %[[val:.*]] = cir.load %[[val_var:.*]] : !cir.ptr, !s32i +// CHECK: cir.store %[[val]], %[[temp_var:.*]] : !s32i, !cir.ptr +// CHECK: cir.switch (%[[order]] : !s32i) [ +// CHECK: case (default) { +// CHECK: %[[T11:.*]] = cir.load %[[temp_var]] : !cir.ptr, !s32i +// CHECK: %[[T12:.*]] = cir.atomic.xchg(%[[ptr]] : !cir.ptr, %[[T11]] : !s32i, relaxed) : !s32i +// CHECK: cir.store %[[T12]], %[[result:.*]] : !s32i, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: %[[T11:.*]] = cir.load %[[temp_var]] : !cir.ptr, !s32i +// CHECK: %[[T12:.*]] = cir.atomic.xchg(%[[ptr]] : !cir.ptr, %[[T11]] : !s32i, acquire) : !s32i +// CHECK: cir.store %[[T12]], %[[result]] : !s32i, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 3) { +// CHECK: %[[T11:.*]] = cir.load %[[temp_var]] : !cir.ptr, !s32i +// CHECK: %[[T12:.*]] = cir.atomic.xchg(%[[ptr]] : !cir.ptr, %[[T11]] : !s32i, release) : !s32i +// CHECK: cir.store %[[T12]], %[[result]] : !s32i, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 4) { +// CHECK: %[[T11:.*]] = cir.load %[[temp_var]] : !cir.ptr, !s32i +// CHECK: %[[T12:.*]] = cir.atomic.xchg(%[[ptr]] : !cir.ptr, %[[T11]] : !s32i, acq_rel) : !s32i +// CHECK: cir.store %[[T12]], %[[result]] : !s32i, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 5) { +// CHECK: %[[T11:.*]] = cir.load %[[temp_var]] : !cir.ptr, !s32i +// CHECK: %[[T12:.*]] = cir.atomic.xchg(%[[ptr]] : !cir.ptr, %[[T11]] : !s32i, seq_cst) : !s32i +// CHECK: cir.store %[[T12]], %[[result]] : !s32i, !cir.ptr +// CHECK: cir.break +// CHECK: } +// CHECK: ] + +bool atomic_compare_exchange_n(int* ptr, int* expected, + int desired, int success, int failure) { + return __atomic_compare_exchange_n(ptr, expected, desired, false, + success, failure); +} + +// CHECK: %[[ptr:.*]] = cir.load %[[T0:.*]] : !cir.ptr>, !cir.ptr +// CHECK: %[[success:.*]] = cir.load %[[T3:.*]] : !cir.ptr, !s32i +// CHECK: %[[expected_addr:.*]] = cir.load %[[T1:.*]] : !cir.ptr>, !cir.ptr +// CHECK: %[[T11:.*]] = cir.load %[[T2:.*]] : !cir.ptr, !s32i +// CHECK: cir.store %[[T11]], %[[desired_var:.*]] : !s32i, !cir.ptr +// CHECK: %[[failure:.*]] = cir.load %[[T4:.*]] : !cir.ptr, !s32i +// CHECK: %[[T13:.*]] = cir.const #false +// CHECK: cir.switch (%[[success]] : !s32i) [ +// CHECK: case (default) { +// CHECK: cir.switch (%[[failure]] : !s32i) [ +// CHECK: case (default) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = relaxed) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var:.*]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = acquire) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 5) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: } +// CHECK: ] +// CHECK: cir.break +// CHECK: }, +// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: cir.switch (%[[failure]] : !s32i) [ +// CHECK: case (default) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = relaxed) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = acquire) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 5) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: } +// CHECK: ] +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 3) { +// CHECK: cir.switch (%[[failure]] : !s32i) [ +// CHECK: case (default) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = relaxed) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = acquire) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 5) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: } +// CHECK: ] +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 4) { +// CHECK: cir.switch (%[[failure]] : !s32i) [ +// CHECK: case (default) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = relaxed) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = acquire) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 5) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: } +// CHECK: ] +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 5) { +// CHECK: cir.switch (%[[failure]] : !s32i) [ +// CHECK: case (default) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = relaxed) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = acquire) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: }, +// CHECK: case (equal, 5) { +// CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i +// CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[succeeded]] { +// CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr +// CHECK: } +// CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr +// CHECK: cir.break +// CHECK: } +// CHECK: ] +// CHECK: cir.break +// CHECK: } +// CHECK: ] + diff --git a/clang/test/CIR/Lowering/atomic-runtime.cpp b/clang/test/CIR/Lowering/atomic-runtime.cpp new file mode 100644 index 000000000000..411a08dc5af2 --- /dev/null +++ b/clang/test/CIR/Lowering/atomic-runtime.cpp @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s + +// Test __atomic_* built-ins that have a memory order parameter with a runtime +// value. This requires generating a switch statement, so the amount of +// generated code is surprisingly large. +// +// This is just a quick smoke test. Only atomic_load_n is tested. + +int runtime_load(int *ptr, int order) { + return __atomic_load_n(ptr, order); +} + +// CHECK: %[[T7:[0-9]+]] = load ptr, ptr %[[T3:[0-9]+]], align 8 +// CHECK: %[[T8:[0-9]+]] = load i32, ptr %[[T4:[0-9]+]], align 4 +// CHECK: switch i32 %[[T8]], label %[[L9:[0-9]+]] [ +// CHECK: i32 1, label %[[L11:[0-9]+]] +// CHECK: i32 2, label %[[L11]] +// CHECK: i32 5, label %[[L13:[0-9]+]] +// CHECK: ] +// CHECK: [[L9]]: +// CHECK: %[[T10:[0-9]+]] = load atomic i32, ptr %[[T7]] monotonic, align 4 +// CHECK: store i32 %[[T10]], ptr %[[T6:[0-9]+]], align 4 +// CHECK: br label %[[L15:[0-9]+]] +// CHECK: [[L11]]: +// CHECK: %[[T12:[0-9]+]] = load atomic i32, ptr %[[T7]] acquire, align 4 +// CHECK: store i32 %[[T12]], ptr %[[T6]], align 4 +// CHECK: br label %[[L15]] +// CHECK: [[L13]]: +// CHECK: %[[T14:[0-9]+]] = load atomic i32, ptr %[[T7]] seq_cst, align 4 +// CHECK: store i32 %[[T14]], ptr %[[T6]], align 4 +// CHECK: br label %[[L15]] +// CHECK: [[L15]]: +// CHECK: %[[T16:[0-9]+]] = load i32, ptr %[[T6]], align 4 +// CHECK: store i32 %[[T16]], ptr %[[T5:[0-9]+]], align 4 +// CHECK: %[[T17:[0-9]+]] = load i32, ptr %[[T5]], align 4 +// CHECK: ret i32 %[[T17]] From b330cb71d84a5cf83930a00386123e8fb4be9910 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 17 Jul 2024 22:35:10 +0300 Subject: [PATCH 1690/2301] [CIR] Add support and enable MLIR's mem2reg for flat CIR (#659) This is PR enables generic MLIR's mem2reg pass for the flat CIR code. I submit this draft PR if someone wants to play with mem2reg right now and to define the next incremental steps if necessary. Briefly, what's important: 1) I used llvm dialect implementation for the inspiration 2) I did not implement interfaces for some of the operations, e.g. for `cir.get_member`, since looks like mem2reg doesn't handle them - it's more about SROA pass. 3) simple tests are added 4) mem2reg is disabled by default, so one need to use it explicitly, with the `-fclangir-mem2reg` option 5) There are a couple bugs fixed in the code . The most important is tablegen for `BranchOp` that assumed same sized operands for branches (it was extremely hard to detect!). Note, that this `mem2reg` implementation work with flat CIR only. First of all, the MLIR's mem2reg knows how to propagate (and remove) memory slots only when we can build a graph of blocks (e.g. dom tree is a bread and butter of mem2reg). Also, given that phi nodes are represents as blocks with arguments in MLIR, there is no chance to transfer control flow and `call` a block from another region (and pass some values as well). The last one is important and has some relation to `canonicalizer` - may be you remember I mentioned this problem once somewhere, when conditional branch verification fails on target block arguments number and branch operands number mismatch. finally, short example to play with: ``` int return_42() { int y = 42; return y; } ``` First, without mem2reg enabled `clang tmp.c -fclangir -emit-llvm -S -o -` : ``` define dso_local i32 @return_42() #0 !dbg !3 { %1 = alloca i32, i64 1, align 4, !dbg !7 %2 = alloca i32, i64 1, align 4, !dbg !8 store i32 42, ptr %2, align 4, !dbg !8 %3 = load i32, ptr %2, align 4, !dbg !9 store i32 %3, ptr %1, align 4, !dbg !10 %4 = load i32, ptr %1, align 4, !dbg !10 ret i32 %4, !dbg !10 } ``` and with mem2reg enabled `clang tmp.c -fclangir -fclangir-mem2reg -emit-llvm -S -o -`: ``` define dso_local i32 @return_42() #0 !dbg !3 { ret i32 42, !dbg !7 } ``` So. What do you think? Which steps do you expect me to perform? I can try to split this PR into parts and add interfaces one-by-one, but I'n not sure the tests will work without all of them implemented. --- clang/include/clang/CIR/CIRToCIRPasses.h | 3 +- .../include/clang/CIR/Dialect/IR/CIRDialect.h | 1 + clang/include/clang/CIR/Dialect/IR/CIROps.td | 34 ++- clang/include/clang/Driver/Options.td | 4 + .../include/clang/Frontend/FrontendOptions.h | 3 + clang/lib/CIR/CodeGen/CIRPasses.cpp | 12 +- clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp | 184 +++++++++++++++++ clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 + clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 3 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 17 +- clang/lib/Driver/ToolChains/Clang.cpp | 3 + clang/lib/Frontend/CompilerInvocation.cpp | 3 + clang/test/CIR/Transforms/mem2reg.c | 195 ++++++++++++++++++ clang/test/CIR/Transforms/mem2reg.cir | 31 +++ 14 files changed, 478 insertions(+), 16 deletions(-) create mode 100644 clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp create mode 100644 clang/test/CIR/Transforms/mem2reg.c create mode 100644 clang/test/CIR/Transforms/mem2reg.cir diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index b70806ddb77e..a88fba7a76e3 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -34,7 +34,8 @@ mlir::LogicalResult runCIRToCIRPasses( llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, llvm::StringRef libOptOpts, std::string &passOptParsingFailure, - bool flattenCIR, bool emitMLIR, bool enableCallConvLowering); + bool flattenCIR, bool emitMLIR, bool enableCallConvLowering, + bool enableMem2reg); } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index e78471035cf1..d59b4ede3091 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -23,6 +23,7 @@ #include "mlir/Interfaces/FunctionInterfaces.h" #include "mlir/Interfaces/InferTypeOpInterface.h" #include "mlir/Interfaces/LoopLikeInterface.h" +#include "mlir/Interfaces/MemorySlotInterfaces.h" #include "mlir/Interfaces/SideEffectInterfaces.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7b1c8cca6c62..eadce251c411 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -26,6 +26,7 @@ include "mlir/Interfaces/ControlFlowInterfaces.td" include "mlir/Interfaces/FunctionInterfaces.td" include "mlir/Interfaces/InferTypeOpInterface.td" include "mlir/Interfaces/LoopLikeInterface.td" +include "mlir/Interfaces/MemorySlotInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/IR/BuiltinAttributeInterfaces.td" @@ -82,7 +83,9 @@ def CastKind : I32EnumAttr< let cppNamespace = "::mlir::cir"; } -def CastOp : CIR_Op<"cast", [Pure]> { +def CastOp : CIR_Op<"cast", + [Pure, + DeclareOpInterfaceMethods]> { // FIXME: not all conversions are free of side effects. let summary = "Conversion between values of different types"; let description = [{ @@ -389,7 +392,8 @@ class AllocaTypesMatchWith($_self).getPointee()">]> { + "cast($_self).getPointee()">, + DeclareOpInterfaceMethods]> { let summary = "Defines a scope-local variable"; let description = [{ The `cir.alloca` operation defines a scope-local variable. @@ -473,7 +477,8 @@ def AllocaOp : CIR_Op<"alloca", [ def LoadOp : CIR_Op<"load", [ TypesMatchWith<"type of 'result' matches pointee type of 'addr'", "addr", "result", - "cast($_self).getPointee()">]> { + "cast($_self).getPointee()">, + DeclareOpInterfaceMethods]> { let summary = "Load value from memory adddress"; let description = [{ @@ -531,7 +536,8 @@ def LoadOp : CIR_Op<"load", [ def StoreOp : CIR_Op<"store", [ TypesMatchWith<"type of 'value' matches pointee type of 'addr'", "addr", "value", - "cast($_self).getPointee()">]> { + "cast($_self).getPointee()">, + DeclareOpInterfaceMethods]> { let summary = "Store value to memory address"; let description = [{ @@ -1691,7 +1697,7 @@ def BrOp : CIR_Op<"br", def BrCondOp : CIR_Op<"brcond", [DeclareOpInterfaceMethods, - Pure, Terminator, SameVariadicOperandSize]> { + Pure, Terminator, AttrSizedOperandSegments]> { let summary = "Conditional branch"; let description = [{ The `cir.brcond %cond, ^bb0, ^bb1` branches to 'bb0' block in case @@ -1714,11 +1720,8 @@ def BrCondOp : CIR_Op<"brcond", OpBuilder<(ins "Value":$cond, "Block *":$destTrue, "Block *":$destFalse, CArg<"ValueRange", "{}">:$destOperandsTrue, CArg<"ValueRange", "{}">:$destOperandsFalse), [{ - $_state.addOperands(cond); - $_state.addSuccessors(destTrue); - $_state.addSuccessors(destFalse); - $_state.addOperands(destOperandsTrue); - $_state.addOperands(destOperandsFalse); + build($_builder, $_state, cond, destOperandsTrue, + destOperandsFalse, destTrue, destFalse); }]> ]; @@ -3202,7 +3205,9 @@ def CatchParamOp : CIR_Op<"catch_param"> { // CopyOp //===----------------------------------------------------------------------===// -def CopyOp : CIR_Op<"copy", [SameTypeOperands]> { +def CopyOp : CIR_Op<"copy", + [SameTypeOperands, + DeclareOpInterfaceMethods]> { let arguments = (ins Arg:$dst, Arg:$src, UnitAttr:$is_volatile); @@ -4090,4 +4095,11 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", let hasVerifier = 0; } +def UndefOp : CIR_Op<"undef", [Pure]> { + let summary = "Creates an undefined value of CIR dialect type."; + let description = [{ `cir.undef` is similar to the one in the LLVM IR dialect }]; + let results = (outs AnyType:$res); + let assemblyFormat = "attr-dict `:` type($res)"; +} + #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index a8651fcffb34..986d95b77bbc 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3083,6 +3083,10 @@ def fclangir_call_conv_lowering : Flag<["-"], "fclangir-call-conv-lowering">, Visibility<[ClangOption, CC1Option]>, Group, HelpText<"Enable ClangIR calling convention lowering">, MarshallingInfoFlag>; +def fclangir_mem2reg : Flag<["-"], "fclangir-mem2reg">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"Enable mem2reg on the flat ClangIR">, + MarshallingInfoFlag>; def clangir_disable_passes : Flag<["-"], "clangir-disable-passes">, Visibility<[ClangOption, CC1Option]>, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 5bff487c2068..865b294586ab 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -451,6 +451,9 @@ class FrontendOptions { // Enable Clang IR call conv lowering pass. unsigned ClangIREnableCallConvLowering : 1; + // Enable Clang IR mem2reg pass on the flat CIR. + unsigned ClangIREnableMem2Reg : 1; + CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index dcc613a89925..fda1887028d4 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -17,6 +17,9 @@ #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/Passes.h" + +#include namespace cir { mlir::LogicalResult runCIRToCIRPasses( @@ -25,7 +28,9 @@ mlir::LogicalResult runCIRToCIRPasses( llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, llvm::StringRef libOptOpts, std::string &passOptParsingFailure, - bool flattenCIR, bool emitMLIR, bool enableCallConvLowering) { + bool flattenCIR, bool emitMLIR, bool enableCallConvLowering, + bool enableMem2Reg) { + mlir::PassManager pm(mlirCtx); pm.addPass(mlir::createMergeCleanupsPass()); @@ -70,9 +75,12 @@ mlir::LogicalResult runCIRToCIRPasses( if (enableCallConvLowering) pm.addPass(mlir::createCallConvLoweringPass()); - if (flattenCIR) + if (flattenCIR || enableMem2Reg) mlir::populateCIRPreLoweringPasses(pm); + if (enableMem2Reg) + pm.addPass(mlir::createMem2Reg()); + if (emitMLIR) pm.addPass(mlir::createSCFPreparePass()); diff --git a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp new file mode 100644 index 000000000000..2ced31cbbad8 --- /dev/null +++ b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp @@ -0,0 +1,184 @@ +//====- CIRMemorySlot.cpp - MemorySlot interfaces -------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements MemorySlot-related interfaces for CIR dialect +// operations. +// +//===----------------------------------------------------------------------===// + +#include "mlir/IR/Matchers.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "mlir/Interfaces/MemorySlotInterfaces.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/TypeSwitch.h" + +using namespace mlir; + +/// Conditions the deletion of the operation to the removal of all its uses. +static bool forwardToUsers(Operation *op, + SmallVectorImpl &newBlockingUses) { + for (Value result : op->getResults()) + for (OpOperand &use : result.getUses()) + newBlockingUses.push_back(&use); + return true; +} + +//===----------------------------------------------------------------------===// +// Interfaces for AllocaOp +//===----------------------------------------------------------------------===// + +llvm::SmallVector cir::AllocaOp::getPromotableSlots() { + return {MemorySlot{getResult(), getAllocaType()}}; +} + +Value cir::AllocaOp::getDefaultValue(const MemorySlot &slot, + OpBuilder &builder) { + return builder.create(getLoc(), slot.elemType); +} + +void cir::AllocaOp::handleBlockArgument(const MemorySlot &slot, + BlockArgument argument, + OpBuilder &builder) {} + +std::optional +cir::AllocaOp::handlePromotionComplete(const MemorySlot &slot, + Value defaultValue, + OpBuilder &builder) { + if (defaultValue && defaultValue.use_empty()) + defaultValue.getDefiningOp()->erase(); + this->erase(); + return std::nullopt; +} + +//===----------------------------------------------------------------------===// +// Interfaces for LoadOp +//===----------------------------------------------------------------------===// + +bool cir::LoadOp::loadsFrom(const MemorySlot &slot) { + return getAddr() == slot.ptr; +} + +bool cir::LoadOp::storesTo(const MemorySlot &slot) { return false; } + +Value cir::LoadOp::getStored(const MemorySlot &slot, OpBuilder &builder, + Value reachingDef, const DataLayout &dataLayout) { + llvm_unreachable("getStored should not be called on LoadOp"); +} + +bool cir::LoadOp::canUsesBeRemoved( + const MemorySlot &slot, const SmallPtrSetImpl &blockingUses, + SmallVectorImpl &newBlockingUses, + const DataLayout &dataLayout) { + if (blockingUses.size() != 1) + return false; + Value blockingUse = (*blockingUses.begin())->get(); + return blockingUse == slot.ptr && getAddr() == slot.ptr && + getResult().getType() == slot.elemType; +} + +DeletionKind cir::LoadOp::removeBlockingUses( + const MemorySlot &slot, const SmallPtrSetImpl &blockingUses, + OpBuilder &builder, Value reachingDefinition, + const DataLayout &dataLayout) { + getResult().replaceAllUsesWith(reachingDefinition); + return DeletionKind::Delete; +} + +//===----------------------------------------------------------------------===// +// Interfaces for StoreOp +//===----------------------------------------------------------------------===// + +bool cir::StoreOp::loadsFrom(const MemorySlot &slot) { return false; } + +bool cir::StoreOp::storesTo(const MemorySlot &slot) { + return getAddr() == slot.ptr; +} + +Value cir::StoreOp::getStored(const MemorySlot &slot, OpBuilder &builder, + Value reachingDef, const DataLayout &dataLayout) { + return getValue(); +} + +bool cir::StoreOp::canUsesBeRemoved( + const MemorySlot &slot, const SmallPtrSetImpl &blockingUses, + SmallVectorImpl &newBlockingUses, + const DataLayout &dataLayout) { + if (blockingUses.size() != 1) + return false; + Value blockingUse = (*blockingUses.begin())->get(); + return blockingUse == slot.ptr && getAddr() == slot.ptr && + getValue() != slot.ptr && slot.elemType == getValue().getType(); +} + +DeletionKind cir::StoreOp::removeBlockingUses( + const MemorySlot &slot, const SmallPtrSetImpl &blockingUses, + OpBuilder &builder, Value reachingDefinition, + const DataLayout &dataLayout) { + return DeletionKind::Delete; +} + +//===----------------------------------------------------------------------===// +// Interfaces for CopyOp +//===----------------------------------------------------------------------===// + +bool cir::CopyOp::loadsFrom(const MemorySlot &slot) { + return getSrc() == slot.ptr; +} + +bool cir::CopyOp::storesTo(const MemorySlot &slot) { + return getDst() == slot.ptr; +} + +Value cir::CopyOp::getStored(const MemorySlot &slot, OpBuilder &builder, + Value reachingDef, const DataLayout &dataLayout) { + return builder.create(getLoc(), slot.elemType, getSrc()); +} + +DeletionKind cir::CopyOp::removeBlockingUses( + const MemorySlot &slot, const SmallPtrSetImpl &blockingUses, + OpBuilder &builder, Value reachingDefinition, + const DataLayout &dataLayout) { + if (loadsFrom(slot)) + builder.create(getLoc(), reachingDefinition, getDst(), false, + mlir::IntegerAttr{}, + mlir::cir::MemOrderAttr()); + return DeletionKind::Delete; +} + +bool cir::CopyOp::canUsesBeRemoved( + const MemorySlot &slot, const SmallPtrSetImpl &blockingUses, + SmallVectorImpl &newBlockingUses, + const DataLayout &dataLayout) { + + if (getDst() == getSrc()) + return false; + + return getLength() == dataLayout.getTypeSize(slot.elemType); +} + +//===----------------------------------------------------------------------===// +// Interfaces for CastOp +//===----------------------------------------------------------------------===// + +bool cir::CastOp::canUsesBeRemoved( + const SmallPtrSetImpl &blockingUses, + SmallVectorImpl &newBlockingUses, + const DataLayout &dataLayout) { + if (getKind() == cir::CastKind::bitcast) + return forwardToUsers(*this, newBlockingUses); + else + return false; +} + +DeletionKind cir::CastOp::removeBlockingUses( + const SmallPtrSetImpl &blockingUses, OpBuilder &builder) { + return DeletionKind::Delete; +} diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 6eb3f295286c..03f588f7c7a5 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -2,6 +2,7 @@ add_clang_library(MLIRCIR CIRAttrs.cpp CIRDataLayout.cpp CIRDialect.cpp + CIRMemorySlot.cpp CIRTypes.cpp FPEnv.cpp diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 0a570a3c783c..f19bcc208431 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -191,7 +191,8 @@ class CIRGenConsumer : public clang::ASTConsumer { feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure, action == CIRGenAction::OutputType::EmitCIRFlat, action == CIRGenAction::OutputType::EmitMLIR, - feOptions.ClangIREnableCallConvLowering) + feOptions.ClangIREnableCallConvLowering, + feOptions.ClangIREnableMem2Reg) .failed()) { if (!passOptParsingFailure.empty()) diagnosticsEngine.Report(diag::err_drv_cir_pass_opt_parsing) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 9c2a01c7f6ea..9e68d890cf38 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3497,6 +3497,21 @@ class CIRClearCacheOpLowering } }; +class CIRUndefOpLowering + : public mlir::OpConversionPattern { + + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::UndefOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto typ = getTypeConverter()->convertType(op.getRes().getType()); + + rewriter.replaceOpWithNewOp(op, typ); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -3530,7 +3545,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRRintOpLowering, CIRRoundOpLowering, CIRSinOpLowering, CIRSqrtOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, CIRFModOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering, CIRPowOpLowering, - CIRClearCacheOpLowering>(converter, patterns.getContext()); + CIRClearCacheOpLowering, CIRUndefOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index c883d88b51ba..642e1da2d95a 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5254,6 +5254,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasArg(options::OPT_fclangir_call_conv_lowering)) CmdArgs.push_back("-fclangir-call-conv-lowering"); + + if (Args.hasArg(options::OPT_fclangir_mem2reg)) + CmdArgs.push_back("-fclangir-mem2reg"); // ClangIR lib opt requires idiom recognizer. if (Args.hasArg(options::OPT_fclangir_lib_opt, diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index a9307703916e..0d7da85829a5 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3149,6 +3149,9 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Opts.ClangIRLibOptOpts = A->getValue(); } + if (Args.hasArg(OPT_fclangir_mem2reg)) + Opts.ClangIREnableMem2Reg = true; + if (Args.hasArg(OPT_aux_target_cpu)) Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu)); if (Args.hasArg(OPT_aux_target_feature)) diff --git a/clang/test/CIR/Transforms/mem2reg.c b/clang/test/CIR/Transforms/mem2reg.c new file mode 100644 index 000000000000..83c975fd6d13 --- /dev/null +++ b/clang/test/CIR/Transforms/mem2reg.c @@ -0,0 +1,195 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fclangir-mem2reg %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=MEM2REG + +int return_42() { + int y = 42; + return y; +} + +// BEFORE: cir.func {{.*@return_42}} +// BEFORE: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// BEFORE: %1 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} +// BEFORE: %2 = cir.const #cir.int<42> : !s32i +// BEFORE: cir.store %2, %1 : !s32i, !cir.ptr +// BEFORE: %3 = cir.load %1 : !cir.ptr, !s32i +// BEFORE: cir.store %3, %0 : !s32i, !cir.ptr +// BEFORE: %4 = cir.load %0 : !cir.ptr, !s32i +// BEFORE: cir.return %4 : !s32i + +// MEM2REG: cir.func {{.*@return_42()}} +// MEM2REG: %0 = cir.const #cir.int<42> : !s32i +// MEM2REG: cir.return %0 : !s32i + +void alloca_in_loop(int* ar, int n) { + for (int i = 0; i < n; ++i) { + int a = 4; + ar[i] = a; + } +} + +// BEFORE: cir.func {{.*@alloca_in_loop}} +// BEFORE: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["ar", init] {alignment = 8 : i64} +// BEFORE: %1 = cir.alloca !s32i, !cir.ptr, ["n", init] {alignment = 4 : i64} +// BEFORE: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// BEFORE: cir.store %arg1, %1 : !s32i, !cir.ptr +// BEFORE: cir.scope { +// BEFORE: %2 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} +// BEFORE: %3 = cir.const #cir.int<0> : !s32i +// BEFORE: cir.store %3, %2 : !s32i, !cir.ptr +// BEFORE: cir.for : cond { +// BEFORE: %4 = cir.load %2 : !cir.ptr, !s32i +// BEFORE: %5 = cir.load %1 : !cir.ptr, !s32i +// BEFORE: %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i +// BEFORE: %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool +// BEFORE: cir.condition(%7) +// BEFORE: } body { +// BEFORE: cir.scope { +// BEFORE: %4 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} +// BEFORE: %5 = cir.const #cir.int<4> : !s32i +// BEFORE: cir.store %5, %4 : !s32i, !cir.ptr +// BEFORE: %6 = cir.load %4 : !cir.ptr, !s32i +// BEFORE: %7 = cir.load %0 : !cir.ptr>, !cir.ptr +// BEFORE: %8 = cir.load %2 : !cir.ptr, !s32i +// BEFORE: %9 = cir.ptr_stride(%7 : !cir.ptr, %8 : !s32i), !cir.ptr +// BEFORE: cir.store %6, %9 : !s32i, !cir.ptr +// BEFORE: } +// BEFORE: cir.yield +// BEFORE: } step { +// BEFORE: %4 = cir.load %2 : !cir.ptr, !s32i +// BEFORE: %5 = cir.unary(inc, %4) : !s32i, !s32i +// BEFORE: cir.store %5, %2 : !s32i, !cir.ptr +// BEFORE: cir.yield +// BEFORE: } +// BEFORE: } +// BEFORE: cir.return + +// MEM2REG: cir.func {{.*@alloca_in_loop}} +// MEM2REG: cir.br ^bb1 +// MEM2REG: ^bb1: // pred: ^bb0 +// MEM2REG: %0 = cir.const #cir.int<0> : !s32i +// MEM2REG: cir.br ^bb2(%0 : !s32i) +// MEM2REG: ^bb2(%1: !s32i{{.*}}): // 2 preds: ^bb1, ^bb6 +// MEM2REG: %2 = cir.cmp(lt, %1, %arg1) : !s32i, !s32i +// MEM2REG: %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool +// MEM2REG: cir.brcond %3 ^bb3, ^bb7 +// MEM2REG: ^bb3: // pred: ^bb2 +// MEM2REG: cir.br ^bb4 +// MEM2REG: ^bb4: // pred: ^bb3 +// MEM2REG: %4 = cir.const #cir.int<4> : !s32i +// MEM2REG: %5 = cir.ptr_stride(%arg0 : !cir.ptr, %1 : !s32i), !cir.ptr +// MEM2REG: cir.store %4, %5 : !s32i, !cir.ptr +// MEM2REG: cir.br ^bb5 +// MEM2REG: ^bb5: // pred: ^bb4 +// MEM2REG: cir.br ^bb6 +// MEM2REG: ^bb6: // pred: ^bb5 +// MEM2REG: %6 = cir.unary(inc, %1) : !s32i, !s32i +// MEM2REG: cir.br ^bb2(%6 : !s32i) +// MEM2REG: ^bb7: // pred: ^bb2 +// MEM2REG: cir.br ^bb8 +// MEM2REG: ^bb8: // pred: ^bb7 +// MEM2REG: cir.return + + +int alloca_in_ifelse(int x) { + int y = 0; + if (x > 42) { + int z = 2; + y = x * z; + } else { + int z = 3; + y = x * z; + } + + y = y + 1; + return y; +} + +// BEFORE: cir.func {{.*@alloca_in_ifelse}} +// BEFORE: %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// BEFORE: %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// BEFORE: %2 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} +// BEFORE: cir.store %arg0, %0 : !s32i, !cir.ptr +// BEFORE: %3 = cir.const #cir.int<0> : !s32i +// BEFORE: cir.store %3, %2 : !s32i, !cir.ptr +// BEFORE: cir.scope { +// BEFORE: %9 = cir.load %0 : !cir.ptr, !s32i +// BEFORE: %10 = cir.const #cir.int<42> : !s32i +// BEFORE: %11 = cir.cmp(gt, %9, %10) : !s32i, !s32i +// BEFORE: %12 = cir.cast(int_to_bool, %11 : !s32i), !cir.bool +// BEFORE: cir.if %12 { +// BEFORE: %13 = cir.alloca !s32i, !cir.ptr, ["z", init] {alignment = 4 : i64} +// BEFORE: %14 = cir.const #cir.int<2> : !s32i +// BEFORE: cir.store %14, %13 : !s32i, !cir.ptr +// BEFORE: %15 = cir.load %0 : !cir.ptr, !s32i +// BEFORE: %16 = cir.load %13 : !cir.ptr, !s32i +// BEFORE: %17 = cir.binop(mul, %15, %16) nsw : !s32i +// BEFORE: cir.store %17, %2 : !s32i, !cir.ptr +// BEFORE: } else { +// BEFORE: %13 = cir.alloca !s32i, !cir.ptr, ["z", init] {alignment = 4 : i64} +// BEFORE: %14 = cir.const #cir.int<3> : !s32i +// BEFORE: cir.store %14, %13 : !s32i, !cir.ptr +// BEFORE: %15 = cir.load %0 : !cir.ptr, !s32i +// BEFORE: %16 = cir.load %13 : !cir.ptr, !s32i +// BEFORE: %17 = cir.binop(mul, %15, %16) nsw : !s32i +// BEFORE: cir.store %17, %2 : !s32i, !cir.ptr +// BEFORE: } +// BEFORE: } +// BEFORE: %4 = cir.load %2 : !cir.ptr, !s32i +// BEFORE: %5 = cir.const #cir.int<1> : !s32i +// BEFORE: %6 = cir.binop(add, %4, %5) nsw : !s32i +// BEFORE: cir.store %6, %2 : !s32i, !cir.ptr +// BEFORE: %7 = cir.load %2 : !cir.ptr, !s32i +// BEFORE: cir.store %7, %1 : !s32i, !cir.ptr +// BEFORE: %8 = cir.load %1 : !cir.ptr, !s32i +// BEFORE: cir.return %8 : !s32i + +// MEM2REG: cir.func {{.*@alloca_in_ifelse}} +// MEM2REG: %0 = cir.const #cir.int<0> : !s32i +// MEM2REG: cir.br ^bb1 +// MEM2REG: ^bb1: // pred: ^bb0 +// MEM2REG: %1 = cir.const #cir.int<42> : !s32i +// MEM2REG: %2 = cir.cmp(gt, %arg0, %1) : !s32i, !s32i +// MEM2REG: %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool +// MEM2REG: cir.brcond %3 ^bb3, ^bb2 +// MEM2REG: ^bb2: // pred: ^bb1 +// MEM2REG: %4 = cir.const #cir.int<3> : !s32i +// MEM2REG: %5 = cir.binop(mul, %arg0, %4) nsw : !s32i +// MEM2REG: cir.br ^bb4(%5 : !s32i) +// MEM2REG: ^bb3: // pred: ^bb1 +// MEM2REG: %6 = cir.const #cir.int<2> : !s32i +// MEM2REG: %7 = cir.binop(mul, %arg0, %6) nsw : !s32i +// MEM2REG: cir.br ^bb4(%7 : !s32i) +// MEM2REG: ^bb4(%8: !s32i{{.*}}): // 2 preds: ^bb2, ^bb3 +// MEM2REG: cir.br ^bb5 +// MEM2REG: ^bb5: // pred: ^bb4 +// MEM2REG: %9 = cir.const #cir.int<1> : !s32i +// MEM2REG: %10 = cir.binop(add, %8, %9) nsw : !s32i +// MEM2REG: cir.return %10 : !s32i +// MEM2REG: } + + + + +typedef __SIZE_TYPE__ size_t; +void *alloca(size_t size); + +void test_bitcast(size_t n) { + int *c1 = alloca(n); +} + +// BEFORE: cir.func {{.*@test_bitcast}} +// BEFORE: %0 = cir.alloca !u64i, !cir.ptr, ["n", init] {alignment = 8 : i64} +// BEFORE: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["c1", init] {alignment = 8 : i64} +// BEFORE: cir.store %arg0, %0 : !u64i, !cir.ptr +// BEFORE: %2 = cir.load %0 : !cir.ptr, !u64i +// BEFORE: %3 = cir.alloca !u8i, !cir.ptr, %2 : !u64i, ["bi_alloca"] {alignment = 16 : i64} +// BEFORE: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr +// BEFORE: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr +// BEFORE: cir.store %5, %1 : !cir.ptr, !cir.ptr> +// BEFORE: cir.return + +// MEM2REG: cir.func {{.*@test_bitcast}} +// MEM2REG: cir.return +// MEM2REG: } \ No newline at end of file diff --git a/clang/test/CIR/Transforms/mem2reg.cir b/clang/test/CIR/Transforms/mem2reg.cir new file mode 100644 index 000000000000..dca55d3c3068 --- /dev/null +++ b/clang/test/CIR/Transforms/mem2reg.cir @@ -0,0 +1,31 @@ +// RUN: cir-opt %s -cir-flatten-cfg -mem2reg -o - | FileCheck %s + +!s32i = !cir.int +!u64i = !cir.int +!u8i = !cir.int +!void = !cir.void + +module { + + // ==== Simple case + // C code + // int return_42() { + // int y = 42; + // return y; + // } + cir.func @return_42() -> !s32i { + %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} + %2 = cir.const #cir.int<42> : !s32i + cir.store %2, %1 : !s32i, !cir.ptr + %3 = cir.load %1 : !cir.ptr, !s32i + cir.store %3, %0 : !s32i, !cir.ptr + %4 = cir.load %0 : !cir.ptr, !s32i + cir.return %4 : !s32i + } + // CHECK: cir.func @return_42() -> !s32i { + // CHECK: %0 = cir.const #cir.int<42> : !s32i + // CHECK: cir.return %0 : !s32i + // CHECK: } + +} //module \ No newline at end of file From f3ac31e1373899b5033dccf9f522496e434086c6 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 18 Jul 2024 07:38:35 +0800 Subject: [PATCH 1691/2301] [CIR] Add support for binary arithmetic operations on complex numbers (#730) This PR adds support for complex number binary arithmetic operations. Specifically, CIRGen for the following operations on complex numbers are added: - Binary arithmetic operations: add, sub, mul, and div. - ~Unary arithmetic operations: plus, minus, and conjugate.~ A new operation `cir.complex.binop` is added to represent binary operations on complex number operands. The new operation contains complex-number-specific attributes for correct lowering to LLVM IR. ~I'll add LLVM IR lowering in later PRs as this PR is already large enough.~ This PR also includes LLVM IR lowering support. It introduces two new operations `cir.complex.real` and `cir.complex.imag` to aid for LLVM IR lowering. These two new operations extract the real and imaginary part of a complex value, which is useful during lowering prepare, during which phase the complex arithmetic operations are broken down to a series of scalar arithmetic operations on the real and imaginary parts of the complex operands. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 72 ++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 118 +++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 7 - clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 401 ++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 10 + clang/lib/CIR/CodeGen/CIRGenValue.h | 6 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 26 + .../Dialect/Transforms/LoweringPrepare.cpp | 330 ++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 39 +- clang/test/CIR/CodeGen/complex-arithmetic.c | 647 ++++++++++++++++++ 11 files changed, 1637 insertions(+), 25 deletions(-) create mode 100644 clang/test/CIR/CodeGen/complex-arithmetic.c diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index e636d59c6539..d6f6ce972f41 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -145,6 +145,15 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return create(loc, getBoolTy(), kind, lhs, rhs); } + mlir::Value createIsNaN(mlir::Location loc, mlir::Value operand) { + return createCompare(loc, mlir::cir::CmpOpKind::ne, operand, operand); + } + + mlir::Value createUnaryOp(mlir::Location loc, mlir::cir::UnaryOpKind kind, + mlir::Value operand) { + return create(loc, kind, operand); + } + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, const llvm::APInt &rhs) { return create( @@ -158,6 +167,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { rhs); } + mlir::Value createBinop(mlir::Location loc, mlir::Value lhs, + mlir::cir::BinOpKind kind, mlir::Value rhs) { + return create(loc, lhs.getType(), kind, lhs, rhs); + } + mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, bool isShiftLeft) { return create( @@ -195,6 +209,10 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createBinop(lhs, mlir::cir::BinOpKind::And, rhs); } + mlir::Value createAnd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) { + return createBinop(loc, lhs, mlir::cir::BinOpKind::And, rhs); + } + mlir::Value createOr(mlir::Value lhs, llvm::APInt rhs) { auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); return createBinop(lhs, mlir::cir::BinOpKind::Or, val); @@ -226,6 +244,60 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createBinop(lhs, mlir::cir::BinOpKind::Mul, val); } + mlir::Value createComplexCreate(mlir::Location loc, mlir::Value real, + mlir::Value imag) { + auto resultComplexTy = + mlir::cir::ComplexType::get(getContext(), real.getType()); + return create(loc, resultComplexTy, real, imag); + } + + mlir::Value createComplexReal(mlir::Location loc, mlir::Value operand) { + auto operandTy = mlir::cast(operand.getType()); + return create(loc, operandTy.getElementTy(), + operand); + } + + mlir::Value createComplexImag(mlir::Location loc, mlir::Value operand) { + auto operandTy = mlir::cast(operand.getType()); + return create(loc, operandTy.getElementTy(), + operand); + } + + mlir::Value createComplexBinOp(mlir::Location loc, mlir::Value lhs, + mlir::cir::ComplexBinOpKind kind, + mlir::Value rhs, + mlir::cir::ComplexRangeKind range, + bool promoted) { + return create(loc, kind, lhs, rhs, range, + promoted); + } + + mlir::Value createComplexAdd(mlir::Location loc, mlir::Value lhs, + mlir::Value rhs) { + return createBinop(loc, lhs, mlir::cir::BinOpKind::Add, rhs); + } + + mlir::Value createComplexSub(mlir::Location loc, mlir::Value lhs, + mlir::Value rhs) { + return createBinop(loc, lhs, mlir::cir::BinOpKind::Sub, rhs); + } + + mlir::Value createComplexMul(mlir::Location loc, mlir::Value lhs, + mlir::Value rhs, + mlir::cir::ComplexRangeKind range, + bool promoted) { + return createComplexBinOp(loc, lhs, mlir::cir::ComplexBinOpKind::Mul, rhs, + range, promoted); + } + + mlir::Value createComplexDiv(mlir::Location loc, mlir::Value lhs, + mlir::Value rhs, + mlir::cir::ComplexRangeKind range, + bool promoted) { + return createComplexBinOp(loc, lhs, mlir::cir::ComplexBinOpKind::Div, rhs, + range, promoted); + } + mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, mlir::Value dst, bool _volatile = false, ::mlir::IntegerAttr align = {}, diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index eadce251c411..a692124e4fc7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1210,12 +1210,64 @@ def ComplexCreateOp : CIR_Op<"complex.create", [Pure, SameTypeOperands]> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// ComplexRealOp and ComplexImagOp +//===----------------------------------------------------------------------===// + +def ComplexRealOp : CIR_Op<"complex.real", [Pure]> { + let summary = "Extract the real part of a complex value"; + let description = [{ + `cir.complex.real` operation takes an operand of `!cir.complex` type and + yields the real part of it. + + Example: + + ```mlir + %1 = cir.complex.real %0 : !cir.complex -> !cir.float + ``` + }]; + + let results = (outs CIR_AnyIntOrFloat:$result); + let arguments = (ins CIR_ComplexType:$operand); + + let assemblyFormat = [{ + $operand `:` qualified(type($operand)) `->` qualified(type($result)) + attr-dict + }]; + + let hasVerifier = 1; +} + +def ComplexImagOp : CIR_Op<"complex.imag", [Pure]> { + let summary = "Extract the imaginary part of a complex value"; + let description = [{ + `cir.complex.imag` operation takes an operand of `!cir.complex` type and + yields the imaginary part of it. + + Example: + + ```mlir + %1 = cir.complex.imag %0 : !cir.complex -> !cir.float + ``` + }]; + + let results = (outs CIR_AnyIntOrFloat:$result); + let arguments = (ins CIR_ComplexType:$operand); + + let assemblyFormat = [{ + $operand `:` qualified(type($operand)) `->` qualified(type($result)) + attr-dict + }]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // ComplexRealPtrOp and ComplexImagPtrOp //===----------------------------------------------------------------------===// def ComplexRealPtrOp : CIR_Op<"complex.real_ptr", [Pure]> { - let summary = "Extract the real part of a complex value"; + let summary = "Derive a pointer to the real part of a complex value"; let description = [{ `cir.complex.real_ptr` operation takes a pointer operand that points to a complex value of type `!cir.complex` and yields a pointer to the real part @@ -1240,7 +1292,7 @@ def ComplexRealPtrOp : CIR_Op<"complex.real_ptr", [Pure]> { } def ComplexImagPtrOp : CIR_Op<"complex.imag_ptr", [Pure]> { - let summary = "Extract the imaginary part of a complex value"; + let summary = "Derive a pointer to the imaginary part of a complex value"; let description = [{ `cir.complex.imag_ptr` operation takes a pointer operand that points to a complex value of type `!cir.complex` and yields a pointer to the imaginary @@ -1264,6 +1316,68 @@ def ComplexImagPtrOp : CIR_Op<"complex.imag_ptr", [Pure]> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// ComplexBinOp +//===----------------------------------------------------------------------===// + +def ComplexBinOpKind : I32EnumAttr< + "ComplexBinOpKind", + "complex number binary operation kind", + [BinOpKind_Mul, BinOpKind_Div]> { + let cppNamespace = "::mlir::cir"; +} + +def ComplexRangeKind_Full : I32EnumAttrCase<"Full", 1, "full">; +def ComplexRangeKind_Improved : I32EnumAttrCase<"Improved", 2, "improved">; +def ComplexRangeKind_Promoted : I32EnumAttrCase<"Promoted", 3, "promoted">; +def ComplexRangeKind_Basic : I32EnumAttrCase<"Basic", 4, "basic">; +def ComplexRangeKind_None : I32EnumAttrCase<"None", 5, "none">; + +def ComplexRangeKind : I32EnumAttr< + "ComplexRangeKind", + "complex multiplication and division implementation", + [ComplexRangeKind_Full, ComplexRangeKind_Improved, + ComplexRangeKind_Promoted, ComplexRangeKind_Basic, + ComplexRangeKind_None]> { + let cppNamespace = "::mlir::cir"; +} + +def ComplexBinOp : CIR_Op<"complex.binop", + [Pure, SameTypeOperands, SameOperandsAndResultType]> { + let summary = "Binary operations on operands of complex type"; + let description = [{ + The `cir.complex.binop` operation represents a binary operation on operands + of C complex type (e.g. `float _Complex`). The operation can only represent + binary multiplication or division on complex numbers; other binary + operations, such as addition and subtraction, are represented by the + `cir.binop` operation. + + The operation requires two input operands and has one result. The types of + all the operands and the result should be of the same `!cir.complex` type. + + The operation also takes a `range` attribute that specifies the complex + range of the binary operation. + + Examples: + + ```mlir + %2 = cir.complex.binop add %0, %1 : !cir.complex + %2 = cir.complex.binop mul %0, %1 : !cir.complex + ``` + }]; + + let results = (outs CIR_ComplexType:$result); + let arguments = (ins Arg:$kind, + CIR_ComplexType:$lhs, CIR_ComplexType:$rhs, + Arg:$range, + UnitAttr:$promoted); + + let assemblyFormat = [{ + $kind $lhs `,` $rhs `range` `(` $range `)` (`promoted` $promoted^)? + `:` qualified(type($lhs)) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // BitsOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 4cb51ed2a112..30bf342ba5e2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -758,13 +758,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, result, base, name, index); } - mlir::Value createComplexCreate(mlir::Location loc, mlir::Value real, - mlir::Value imag) { - auto resultComplexTy = - mlir::cir::ComplexType::get(getContext(), real.getType()); - return create(loc, resultComplexTy, real, imag); - } - /// Create a cir.complex.real_ptr operation that derives a pointer to the real /// part of the complex value pointed to by the specified pointer value. mlir::Value createRealPtr(mlir::Location loc, mlir::Value value) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 92566d067e23..6d3861466ab2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2516,8 +2516,10 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { QualType Ty = E->getType(); if (const AtomicType *AT = Ty->getAs()) assert(0 && "not yet implemented"); - assert(!Ty->isAnyComplexType() && "complex types not implemented"); - return buildCompoundAssignmentLValue(cast(E)); + if (!Ty->isAnyComplexType()) + return buildCompoundAssignmentLValue(cast(E)); + return buildComplexCompoundAssignmentLValue( + cast(E)); } case Expr::CallExprClass: case Expr::CXXMemberCallExprClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 8809a2b1d631..99e93e513c5b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -1,8 +1,11 @@ #include "CIRGenBuilder.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" +#include "clang/Basic/LangOptions.h" +#include "clang/CIR/Interfaces/CIRFPTypeInterface.h" #include "clang/CIR/MissingFeatures.h" +#include "mlir/IR/Location.h" #include "mlir/IR/Value.h" #include "clang/AST/StmtVisitor.h" #include "llvm/Support/ErrorHandling.h" @@ -39,6 +42,13 @@ class ComplexExprEmitter : public StmtVisitor { void buildStoreOfComplex(mlir::Location Loc, mlir::Value Val, LValue LV, bool isInit); + /// Emit a cast from complex value Val to DestType. + mlir::Value buildComplexToComplexCast(mlir::Value Val, QualType SrcType, + QualType DestType, SourceLocation Loc); + /// Emit a cast from scalar value Val to DestType. + mlir::Value buildScalarToComplexCast(mlir::Value Val, QualType SrcType, + QualType DestType, SourceLocation Loc); + //===--------------------------------------------------------------------===// // Visitor Methods //===--------------------------------------------------------------------===// @@ -181,8 +191,84 @@ class ComplexExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } + struct BinOpInfo { + mlir::Location Loc; + mlir::Value LHS; + mlir::Value RHS; + QualType Ty; // Computation Type. + FPOptions FPFeatures; + }; + + BinOpInfo buildBinOps(const BinaryOperator *E, + QualType PromotionTy = QualType()); + mlir::Value buildPromoted(const Expr *E, QualType PromotionTy); + mlir::Value buildPromotedComplexOperand(const Expr *E, QualType PromotionTy); + + LValue buildCompoundAssignLValue( + const CompoundAssignOperator *E, + mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &), RValue &Val); + mlir::Value buildCompoundAssign( + const CompoundAssignOperator *E, + mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &)); + + mlir::Value buildBinAdd(const BinOpInfo &Op); + mlir::Value buildBinSub(const BinOpInfo &Op); + mlir::Value buildBinMul(const BinOpInfo &Op); + mlir::Value buildBinDiv(const BinOpInfo &Op); + + QualType HigherPrecisionTypeForComplexArithmetic(QualType ElementType, + bool IsDivOpCode) { + ASTContext &Ctx = CGF.getContext(); + const QualType HigherElementType = + Ctx.GetHigherPrecisionFPType(ElementType); + const llvm::fltSemantics &ElementTypeSemantics = + Ctx.getFloatTypeSemantics(ElementType); + const llvm::fltSemantics &HigherElementTypeSemantics = + Ctx.getFloatTypeSemantics(HigherElementType); + // Check that the promoted type can handle the intermediate values without + // overflowing. This can be interpreted as: + // (SmallerType.LargestFiniteVal * SmallerType.LargestFiniteVal) * 2 <= + // LargerType.LargestFiniteVal. + // In terms of exponent it gives this formula: + // (SmallerType.LargestFiniteVal * SmallerType.LargestFiniteVal + // doubles the exponent of SmallerType.LargestFiniteVal) + if (llvm::APFloat::semanticsMaxExponent(ElementTypeSemantics) * 2 + 1 <= + llvm::APFloat::semanticsMaxExponent(HigherElementTypeSemantics)) { + FPHasBeenPromoted = true; + return Ctx.getComplexType(HigherElementType); + } else { + // The intermediate values can't be represented in the promoted type + // without overflowing. + return QualType(); + } + } + + QualType getPromotionType(QualType Ty, bool IsDivOpCode = false) { + if (auto *CT = Ty->getAs()) { + QualType ElementType = CT->getElementType(); + if (IsDivOpCode && ElementType->isFloatingType() && + CGF.getLangOpts().getComplexRange() == + LangOptions::ComplexRangeKind::CX_Promoted) + return HigherPrecisionTypeForComplexArithmetic(ElementType, + IsDivOpCode); + if (ElementType.UseExcessPrecision(CGF.getContext())) + return CGF.getContext().getComplexType(CGF.getContext().FloatTy); + } + if (Ty.UseExcessPrecision(CGF.getContext())) + return CGF.getContext().FloatTy; + return QualType(); + } + #define HANDLEBINOP(OP) \ - mlir::Value VisitBin##OP(const BinaryOperator *E) { llvm_unreachable("NYI"); } + mlir::Value VisitBin##OP(const BinaryOperator *E) { \ + QualType promotionTy = getPromotionType( \ + E->getType(), \ + (E->getOpcode() == BinaryOperatorKind::BO_Div) ? true : false); \ + mlir::Value result = buildBin##OP(buildBinOps(E, promotionTy)); \ + if (!promotionTy.isNull()) \ + result = CGF.buildUnPromotedValue(result, E->getType()); \ + return result; \ + } HANDLEBINOP(Mul) HANDLEBINOP(Div) @@ -196,16 +282,16 @@ class ComplexExprEmitter : public StmtVisitor { // Compound assignments. mlir::Value VisitBinAddAssign(const CompoundAssignOperator *E) { - llvm_unreachable("NYI"); + return buildCompoundAssign(E, &ComplexExprEmitter::buildBinAdd); } mlir::Value VisitBinSubAssign(const CompoundAssignOperator *E) { - llvm_unreachable("NYI"); + return buildCompoundAssign(E, &ComplexExprEmitter::buildBinSub); } mlir::Value VisitBinMulAssign(const CompoundAssignOperator *E) { - llvm_unreachable("NYI"); + return buildCompoundAssign(E, &ComplexExprEmitter::buildBinMul); } mlir::Value VisitBinDivAssign(const CompoundAssignOperator *E) { - llvm_unreachable("NYI"); + return buildCompoundAssign(E, &ComplexExprEmitter::buildBinDiv); } // GCC rejects rem/and/or/xor for integer complex. @@ -263,6 +349,12 @@ static const ComplexType *getComplexType(QualType type) { return cast(cast(type)->getValueType()); } +static mlir::Value createComplexFromReal(CIRGenBuilderTy &builder, + mlir::Location loc, mlir::Value real) { + mlir::Value imag = builder.getNullValue(real.getType(), loc); + return builder.createComplexCreate(loc, real, imag); +} + mlir::Value ComplexExprEmitter::buildLoadOfLValue(LValue LV, SourceLocation Loc) { assert(LV.isSimple() && "non-simple complex l-value?"); @@ -284,6 +376,26 @@ void ComplexExprEmitter::buildStoreOfComplex(mlir::Location Loc, Builder.createStore(Loc, Val, DestAddr, LV.isVolatileQualified()); } +mlir::Value ComplexExprEmitter::buildComplexToComplexCast(mlir::Value Val, + QualType SrcType, + QualType DestType, + SourceLocation Loc) { + // Get the src/dest element type. + SrcType = SrcType->castAs()->getElementType(); + DestType = DestType->castAs()->getElementType(); + if (SrcType == DestType) + return Val; + + llvm_unreachable("complex cast is NYI"); +} + +mlir::Value ComplexExprEmitter::buildScalarToComplexCast(mlir::Value Val, + QualType SrcType, + QualType DestType, + SourceLocation Loc) { + llvm_unreachable("complex cast is NYI"); +} + mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, QualType DestTy) { switch (CK) { @@ -376,6 +488,238 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, llvm_unreachable("unknown cast resulting in complex value"); } +ComplexExprEmitter::BinOpInfo +ComplexExprEmitter::buildBinOps(const BinaryOperator *E, QualType PromotionTy) { + BinOpInfo Ops{CGF.getLoc(E->getExprLoc())}; + + Ops.LHS = buildPromotedComplexOperand(E->getLHS(), PromotionTy); + Ops.RHS = buildPromotedComplexOperand(E->getRHS(), PromotionTy); + if (!PromotionTy.isNull()) + Ops.Ty = PromotionTy; + else + Ops.Ty = E->getType(); + Ops.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); + return Ops; +} + +mlir::Value ComplexExprEmitter::buildPromoted(const Expr *E, + QualType PromotionTy) { + E = E->IgnoreParens(); + if (const auto *BO = dyn_cast(E)) { + switch (BO->getOpcode()) { +#define HANDLE_BINOP(OP) \ + case BO_##OP: \ + return buildBin##OP(buildBinOps(BO, PromotionTy)); + HANDLE_BINOP(Add) + HANDLE_BINOP(Sub) + HANDLE_BINOP(Mul) + HANDLE_BINOP(Div) +#undef HANDLE_BINOP + default: + break; + } + } else if (const auto *UO = dyn_cast(E)) { + switch (UO->getOpcode()) { + case UO_Minus: + return VisitMinus(UO, PromotionTy); + case UO_Plus: + return VisitPlus(UO, PromotionTy); + default: + break; + } + } + auto result = Visit(const_cast(E)); + if (!PromotionTy.isNull()) + return CGF.buildPromotedValue(result, PromotionTy); + return result; +} + +mlir::Value +ComplexExprEmitter::buildPromotedComplexOperand(const Expr *E, + QualType PromotionTy) { + if (E->getType()->isAnyComplexType()) { + if (!PromotionTy.isNull()) + return CGF.buildPromotedComplexExpr(E, PromotionTy); + return Visit(const_cast(E)); + } + + mlir::Value Real; + if (!PromotionTy.isNull()) { + QualType ComplexElementTy = + PromotionTy->castAs()->getElementType(); + Real = CGF.buildPromotedScalarExpr(E, ComplexElementTy); + } else + Real = CGF.buildScalarExpr(E); + + return createComplexFromReal(CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), + Real); +} + +LValue ComplexExprEmitter::buildCompoundAssignLValue( + const CompoundAssignOperator *E, + mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &), RValue &Val) { + QualType LHSTy = E->getLHS()->getType(); + if (const AtomicType *AT = LHSTy->getAs()) + LHSTy = AT->getValueType(); + + BinOpInfo OpInfo{CGF.getLoc(E->getExprLoc())}; + OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); + + assert(!MissingFeatures::CGFPOptionsRAII()); + + // Load the RHS and LHS operands. + // __block variables need to have the rhs evaluated first, plus this should + // improve codegen a little. + QualType PromotionTypeCR; + PromotionTypeCR = getPromotionType(E->getComputationResultType()); + if (PromotionTypeCR.isNull()) + PromotionTypeCR = E->getComputationResultType(); + OpInfo.Ty = PromotionTypeCR; + QualType ComplexElementTy = + OpInfo.Ty->castAs()->getElementType(); + QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType()); + + // The RHS should have been converted to the computation type. + if (E->getRHS()->getType()->isRealFloatingType()) { + if (!PromotionTypeRHS.isNull()) + OpInfo.RHS = createComplexFromReal( + CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), + CGF.buildPromotedScalarExpr(E->getRHS(), PromotionTypeRHS)); + else { + assert(CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, + E->getRHS()->getType())); + OpInfo.RHS = + createComplexFromReal(CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), + CGF.buildScalarExpr(E->getRHS())); + } + } else { + if (!PromotionTypeRHS.isNull()) { + OpInfo.RHS = createComplexFromReal( + CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), + CGF.buildPromotedComplexExpr(E->getRHS(), PromotionTypeRHS)); + } else { + assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty, + E->getRHS()->getType())); + OpInfo.RHS = Visit(E->getRHS()); + } + } + + LValue LHS = CGF.buildLValue(E->getLHS()); + + // Load from the l-value and convert it. + SourceLocation Loc = E->getExprLoc(); + QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType()); + if (LHSTy->isAnyComplexType()) { + mlir::Value LHSVal = buildLoadOfLValue(LHS, Loc); + if (!PromotionTypeLHS.isNull()) + OpInfo.LHS = + buildComplexToComplexCast(LHSVal, LHSTy, PromotionTypeLHS, Loc); + else + OpInfo.LHS = buildComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc); + } else { + mlir::Value LHSVal = CGF.buildLoadOfScalar(LHS, Loc); + // For floating point real operands we can directly pass the scalar form + // to the binary operator emission and potentially get more efficient code. + if (LHSTy->isRealFloatingType()) { + QualType PromotedComplexElementTy; + if (!PromotionTypeLHS.isNull()) { + PromotedComplexElementTy = + cast(PromotionTypeLHS)->getElementType(); + if (!CGF.getContext().hasSameUnqualifiedType(PromotedComplexElementTy, + PromotionTypeLHS)) + LHSVal = CGF.buildScalarConversion(LHSVal, LHSTy, + PromotedComplexElementTy, Loc); + } else { + if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy)) + LHSVal = + CGF.buildScalarConversion(LHSVal, LHSTy, ComplexElementTy, Loc); + } + OpInfo.LHS = createComplexFromReal(CGF.getBuilder(), + CGF.getLoc(E->getExprLoc()), LHSVal); + } else { + OpInfo.LHS = buildScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc); + } + } + + // Expand the binary operator. + mlir::Value Result = (this->*Func)(OpInfo); + + // Truncate the result and store it into the LHS lvalue. + if (LHSTy->isAnyComplexType()) { + mlir::Value ResVal = + buildComplexToComplexCast(Result, OpInfo.Ty, LHSTy, Loc); + buildStoreOfComplex(CGF.getLoc(E->getExprLoc()), ResVal, LHS, + /*isInit*/ false); + Val = RValue::getComplex(ResVal); + } else { + mlir::Value ResVal = + CGF.buildComplexToScalarConversion(Result, OpInfo.Ty, LHSTy, Loc); + CGF.buildStoreOfScalar(ResVal, LHS, /*isInit*/ false); + Val = RValue::get(ResVal); + } + + return LHS; +} + +mlir::Value ComplexExprEmitter::buildCompoundAssign( + const CompoundAssignOperator *E, + mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &)) { + RValue Val; + LValue LV = buildCompoundAssignLValue(E, Func, Val); + + // The result of an assignment in C is the assigned r-value. + if (!CGF.getLangOpts().CPlusPlus) + return Val.getComplexVal(); + + // If the lvalue is non-volatile, return the computed value of the assignment. + if (!LV.isVolatileQualified()) + return Val.getComplexVal(); + + return buildLoadOfLValue(LV, E->getExprLoc()); +} + +mlir::Value ComplexExprEmitter::buildBinAdd(const BinOpInfo &Op) { + assert(!MissingFeatures::CGFPOptionsRAII()); + return CGF.getBuilder().createComplexAdd(Op.Loc, Op.LHS, Op.RHS); +} + +mlir::Value ComplexExprEmitter::buildBinSub(const BinOpInfo &Op) { + assert(!MissingFeatures::CGFPOptionsRAII()); + return CGF.getBuilder().createComplexSub(Op.Loc, Op.LHS, Op.RHS); +} + +static mlir::cir::ComplexRangeKind +getComplexRangeAttr(LangOptions::ComplexRangeKind range) { + switch (range) { + case LangOptions::CX_Full: + return mlir::cir::ComplexRangeKind::Full; + case LangOptions::CX_Improved: + return mlir::cir::ComplexRangeKind::Improved; + case LangOptions::CX_Promoted: + return mlir::cir::ComplexRangeKind::Promoted; + case LangOptions::CX_Basic: + return mlir::cir::ComplexRangeKind::Basic; + case LangOptions::CX_None: + return mlir::cir::ComplexRangeKind::None; + default: + llvm_unreachable("unknown ComplexRangeKind"); + } +} + +mlir::Value ComplexExprEmitter::buildBinMul(const BinOpInfo &Op) { + assert(!MissingFeatures::CGFPOptionsRAII()); + return CGF.getBuilder().createComplexMul( + Op.Loc, Op.LHS, Op.RHS, + getComplexRangeAttr(Op.FPFeatures.getComplexRange()), FPHasBeenPromoted); +} + +mlir::Value ComplexExprEmitter::buildBinDiv(const BinOpInfo &Op) { + assert(!MissingFeatures::CGFPOptionsRAII()); + return CGF.getBuilder().createComplexDiv( + Op.Loc, Op.LHS, Op.RHS, + getComplexRangeAttr(Op.FPFeatures.getComplexRange()), FPHasBeenPromoted); +} + LValue ComplexExprEmitter::buildBinAssignLValue(const BinaryOperator *E, mlir::Value &Val) { assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), @@ -435,6 +779,21 @@ mlir::Value ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) { return Builder.getZero(CGF.getLoc(E->getExprLoc()), CGF.ConvertType(Ty)); } +mlir::Value CIRGenFunction::buildPromotedComplexExpr(const Expr *E, + QualType PromotionType) { + return ComplexExprEmitter(*this).buildPromoted(E, PromotionType); +} + +mlir::Value CIRGenFunction::buildPromotedValue(mlir::Value result, + QualType PromotionType) { + llvm_unreachable("complex type conversion is NYI"); +} + +mlir::Value CIRGenFunction::buildUnPromotedValue(mlir::Value result, + QualType PromotionType) { + llvm_unreachable("complex type conversion is NYI"); +} + mlir::Value CIRGenFunction::buildComplexExpr(const Expr *E) { assert(E && getComplexType(E->getType()) && "Invalid complex expression to emit"); @@ -476,3 +835,35 @@ LValue CIRGenFunction::buildComplexAssignmentLValue(const BinaryOperator *E) { llvm_unreachable("NYI"); return LVal; } + +using CompoundFunc = + mlir::Value (ComplexExprEmitter::*)(const ComplexExprEmitter::BinOpInfo &); + +static CompoundFunc getComplexOp(BinaryOperatorKind Op) { + switch (Op) { + case BO_MulAssign: + return &ComplexExprEmitter::buildBinMul; + case BO_DivAssign: + return &ComplexExprEmitter::buildBinDiv; + case BO_SubAssign: + return &ComplexExprEmitter::buildBinSub; + case BO_AddAssign: + return &ComplexExprEmitter::buildBinAdd; + default: + llvm_unreachable("unexpected complex compound assignment"); + } +} + +LValue CIRGenFunction::buildComplexCompoundAssignmentLValue( + const CompoundAssignOperator *E) { + CompoundFunc Op = getComplexOp(E->getOpcode()); + RValue Val; + return ComplexExprEmitter(*this).buildCompoundAssignLValue(E, Op, Val); +} + +mlir::Value CIRGenFunction::buildComplexToScalarConversion(mlir::Value Src, + QualType SrcTy, + QualType DstTy, + SourceLocation Loc) { + llvm_unreachable("complex cast is NYI"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 77da0e2185a1..3f48a9e1f9cf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -599,6 +599,7 @@ class CIRGenFunction : public CIRGenTypeCache { QualType complexType); LValue buildComplexAssignmentLValue(const BinaryOperator *E); + LValue buildComplexCompoundAssignmentLValue(const CompoundAssignOperator *E); /// Emits a reference binding to the passed in expression. RValue buildReferenceBindingToExpr(const Expr *E); @@ -1119,8 +1120,11 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value buildScalarExpr(const clang::Expr *E); mlir::Value buildScalarConstant(const ConstantEmission &Constant, Expr *E); + mlir::Value buildPromotedComplexExpr(const Expr *E, QualType PromotionType); mlir::Value buildPromotedScalarExpr(const clang::Expr *E, QualType PromotionType); + mlir::Value buildPromotedValue(mlir::Value result, QualType PromotionType); + mlir::Value buildUnPromotedValue(mlir::Value result, QualType PromotionType); mlir::Type getCIRType(const clang::QualType &type); @@ -1519,6 +1523,12 @@ class CIRGenFunction : public CIRGenTypeCache { clang::QualType DstTy, clang::SourceLocation Loc); + /// Emit a conversion from the specified complex type to the specified + /// destination type, where the destination type is an LLVM scalar type. + mlir::Value buildComplexToScalarConversion(mlir::Value Src, QualType SrcTy, + QualType DstTy, + SourceLocation Loc); + LValue makeAddrLValue(Address Addr, clang::QualType T, LValueBaseInfo BaseInfo) { return LValue::makeAddr(Addr, T, getContext(), BaseInfo); diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index c31ed30a183a..37acf60457c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -61,9 +61,9 @@ class RValue { } /// Return the real/imag components of this complex value. - std::pair getComplexVal() const { - assert(0 && "not implemented"); - return {}; + mlir::Value getComplexVal() const { + assert(isComplex() && "Not a complex!"); + return V1.getPointer(); } /// Return the mlir::Value of the address of the aggregate. diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 11d38bb154cc..f759e03fc880 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -563,6 +563,26 @@ LogicalResult ComplexCreateOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// ComplexRealOp and ComplexImagOp +//===----------------------------------------------------------------------===// + +LogicalResult ComplexRealOp::verify() { + if (getType() != getOperand().getType().getElementTy()) { + emitOpError() << "cir.complex.real result type does not match operand type"; + return failure(); + } + return success(); +} + +LogicalResult ComplexImagOp::verify() { + if (getType() != getOperand().getType().getElementTy()) { + emitOpError() << "cir.complex.imag result type does not match operand type"; + return failure(); + } + return success(); +} + //===----------------------------------------------------------------------===// // ComplexRealPtrOp and ComplexImagPtrOp //===----------------------------------------------------------------------===// @@ -3306,6 +3326,12 @@ LogicalResult BinOp::verify() { return emitError() << "The nsw/nuw flags are applicable to opcodes: 'add', " "'sub' and 'mul'"; + bool complexOps = getKind() == mlir::cir::BinOpKind::Add || + getKind() == mlir::cir::BinOpKind::Sub; + if (isa(getType()) && !complexOps) + return emitError() + << "cir.binop can only represent 'add' and 'sub' on complex numbers"; + return mlir::success(); } diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 1172c7332ce2..62c0e5acd899 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -20,6 +20,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" +#include "llvm/ADT/APFloat.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" @@ -70,6 +71,8 @@ struct LoweringPreparePass : public LoweringPrepareBase { void runOnOperation() override; void runOnOp(Operation *op); + void lowerBinOp(BinOp op); + void lowerComplexBinOp(ComplexBinOp op); void lowerThreeWayCmpOp(CmpThreeWayOp op); void lowerVAArgOp(VAArgOp op); void lowerGlobalOp(GlobalOp op); @@ -344,6 +347,321 @@ void LoweringPreparePass::lowerVAArgOp(VAArgOp op) { return; } +void LoweringPreparePass::lowerBinOp(BinOp op) { + auto ty = op.getType(); + if (!mlir::isa(ty)) + return; + + auto loc = op.getLoc(); + auto opKind = op.getKind(); + assert((opKind == mlir::cir::BinOpKind::Add || + opKind == mlir::cir::BinOpKind::Sub) && + "invalid binary op kind on complex numbers"); + + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op); + + auto lhs = op.getLhs(); + auto rhs = op.getRhs(); + + // (a+bi) + (c+di) = (a+c) + (b+d)i + // (a+bi) - (c+di) = (a-c) + (b-d)i + auto lhsReal = builder.createComplexReal(loc, lhs); + auto lhsImag = builder.createComplexImag(loc, lhs); + auto rhsReal = builder.createComplexReal(loc, rhs); + auto rhsImag = builder.createComplexImag(loc, rhs); + auto resultReal = builder.createBinop(lhsReal, opKind, rhsReal); + auto resultImag = builder.createBinop(lhsImag, opKind, rhsImag); + auto result = builder.createComplexCreate(loc, resultReal, resultImag); + + op.replaceAllUsesWith(result); + op.erase(); +} + +static mlir::Value buildComplexBinOpLibCall( + LoweringPreparePass &pass, CIRBaseBuilderTy &builder, + llvm::StringRef (*libFuncNameGetter)(llvm::APFloat::Semantics), + mlir::Location loc, mlir::cir::ComplexType ty, mlir::Value lhsReal, + mlir::Value lhsImag, mlir::Value rhsReal, mlir::Value rhsImag) { + auto elementTy = mlir::cast(ty.getElementTy()); + + auto libFuncName = libFuncNameGetter( + llvm::APFloat::SemanticsToEnum(elementTy.getFloatSemantics())); + llvm::SmallVector libFuncInputTypes(4, elementTy); + auto libFuncTy = mlir::cir::FuncType::get(libFuncInputTypes, ty); + + mlir::cir::FuncOp libFunc; + { + mlir::OpBuilder::InsertionGuard ipGuard{builder}; + builder.setInsertionPointToStart(pass.theModule.getBody()); + libFunc = pass.buildRuntimeFunction(builder, libFuncName, loc, libFuncTy); + } + + auto call = + builder.createCallOp(loc, libFunc, {lhsReal, lhsImag, rhsReal, rhsImag}); + return call.getResult(); +} + +static llvm::StringRef +getComplexMulLibCallName(llvm::APFloat::Semantics semantics) { + switch (semantics) { + case llvm::APFloat::S_IEEEhalf: + return "__mulhc3"; + case llvm::APFloat::S_IEEEsingle: + return "__mulsc3"; + case llvm::APFloat::S_IEEEdouble: + return "__muldc3"; + case llvm::APFloat::S_PPCDoubleDouble: + return "__multc3"; + case llvm::APFloat::S_x87DoubleExtended: + return "__mulxc3"; + case llvm::APFloat::S_IEEEquad: + return "__multc3"; + default: + llvm_unreachable("unsupported floating point type"); + } +} + +static llvm::StringRef +getComplexDivLibCallName(llvm::APFloat::Semantics semantics) { + switch (semantics) { + case llvm::APFloat::S_IEEEhalf: + return "__divhc3"; + case llvm::APFloat::S_IEEEsingle: + return "__divsc3"; + case llvm::APFloat::S_IEEEdouble: + return "__divdc3"; + case llvm::APFloat::S_PPCDoubleDouble: + return "__divtc3"; + case llvm::APFloat::S_x87DoubleExtended: + return "__divxc3"; + case llvm::APFloat::S_IEEEquad: + return "__divtc3"; + default: + llvm_unreachable("unsupported floating point type"); + } +} + +static mlir::Value lowerComplexMul(LoweringPreparePass &pass, + CIRBaseBuilderTy &builder, + mlir::Location loc, + mlir::cir::ComplexBinOp op, + mlir::Value lhsReal, mlir::Value lhsImag, + mlir::Value rhsReal, mlir::Value rhsImag) { + // (a+bi) * (c+di) = (ac-bd) + (ad+bc)i + auto resultRealLhs = + builder.createBinop(lhsReal, mlir::cir::BinOpKind::Mul, rhsReal); + auto resultRealRhs = + builder.createBinop(lhsImag, mlir::cir::BinOpKind::Mul, rhsImag); + auto resultImagLhs = + builder.createBinop(lhsReal, mlir::cir::BinOpKind::Mul, rhsImag); + auto resultImagRhs = + builder.createBinop(lhsImag, mlir::cir::BinOpKind::Mul, rhsReal); + auto resultReal = builder.createBinop( + resultRealLhs, mlir::cir::BinOpKind::Sub, resultRealRhs); + auto resultImag = builder.createBinop( + resultImagLhs, mlir::cir::BinOpKind::Add, resultImagRhs); + auto algebraicResult = + builder.createComplexCreate(loc, resultReal, resultImag); + + auto ty = op.getType(); + auto range = op.getRange(); + if (mlir::isa(ty.getElementTy()) || + range == mlir::cir::ComplexRangeKind::Basic || + range == mlir::cir::ComplexRangeKind::Improved || + range == mlir::cir::ComplexRangeKind::Promoted) + return algebraicResult; + + // Check whether the real part and the imaginary part of the result are both + // NaN. If so, emit a library call to compute the multiplication instead. + // We check a value against NaN by comparing the value against itself. + auto resultRealIsNaN = builder.createIsNaN(loc, resultReal); + return builder + .create( + loc, resultRealIsNaN, + [&](mlir::OpBuilder &, mlir::Location) { + auto resultImagIsNaN = builder.createIsNaN(loc, resultImag); + auto inner = + builder + .create( + loc, resultImagIsNaN, + [&](mlir::OpBuilder &, mlir::Location) { + auto libCallResult = buildComplexBinOpLibCall( + pass, builder, &getComplexMulLibCallName, loc, ty, + lhsReal, lhsImag, rhsReal, rhsImag); + builder.createYield(loc, libCallResult); + }, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield(loc, algebraicResult); + }) + .getResult(); + builder.createYield(loc, inner); + }, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield(loc, algebraicResult); + }) + .getResult(); +} + +static mlir::Value +buildAlgebraicComplexDiv(CIRBaseBuilderTy &builder, mlir::Location loc, + mlir::Value lhsReal, mlir::Value lhsImag, + mlir::Value rhsReal, mlir::Value rhsImag) { + // (a+bi) / (c+di) = ((ac+bd)/(cc+dd)) + ((bc-ad)/(cc+dd))i + auto &a = lhsReal; + auto &b = lhsImag; + auto &c = rhsReal; + auto &d = rhsImag; + + auto ac = builder.createBinop(loc, a, mlir::cir::BinOpKind::Mul, c); // a*c + auto bd = builder.createBinop(loc, b, mlir::cir::BinOpKind::Mul, d); // b*d + auto cc = builder.createBinop(loc, c, mlir::cir::BinOpKind::Mul, c); // c*c + auto dd = builder.createBinop(loc, d, mlir::cir::BinOpKind::Mul, d); // d*d + auto acbd = + builder.createBinop(loc, ac, mlir::cir::BinOpKind::Add, bd); // ac+bd + auto ccdd = + builder.createBinop(loc, cc, mlir::cir::BinOpKind::Add, dd); // cc+dd + auto resultReal = + builder.createBinop(loc, acbd, mlir::cir::BinOpKind::Div, ccdd); + + auto bc = builder.createBinop(loc, b, mlir::cir::BinOpKind::Mul, c); // b*c + auto ad = builder.createBinop(loc, a, mlir::cir::BinOpKind::Mul, d); // a*d + auto bcad = + builder.createBinop(loc, bc, mlir::cir::BinOpKind::Sub, ad); // bc-ad + auto resultImag = + builder.createBinop(loc, bcad, mlir::cir::BinOpKind::Div, ccdd); + + return builder.createComplexCreate(loc, resultReal, resultImag); +} + +static mlir::Value +buildRangeReductionComplexDiv(CIRBaseBuilderTy &builder, mlir::Location loc, + mlir::Value lhsReal, mlir::Value lhsImag, + mlir::Value rhsReal, mlir::Value rhsImag) { + // Implements Smith's algorithm for complex division. + // SMITH, R. L. Algorithm 116: Complex division. Commun. ACM 5, 8 (1962). + + // Let: + // - lhs := a+bi + // - rhs := c+di + // - result := lhs / rhs = e+fi + // + // The algorithm psudocode looks like follows: + // if fabs(c) >= fabs(d): + // r := d / c + // tmp := c + r*d + // e = (a + b*r) / tmp + // f = (b - a*r) / tmp + // else: + // r := c / d + // tmp := d + r*c + // e = (a*r + b) / tmp + // f = (b*r - a) / tmp + + auto &a = lhsReal; + auto &b = lhsImag; + auto &c = rhsReal; + auto &d = rhsImag; + + auto trueBranchBuilder = [&](mlir::OpBuilder &, mlir::Location) { + auto r = builder.createBinop(loc, d, mlir::cir::BinOpKind::Div, + c); // r := d / c + auto rd = builder.createBinop(loc, r, mlir::cir::BinOpKind::Mul, d); // r*d + auto tmp = builder.createBinop(loc, c, mlir::cir::BinOpKind::Add, + rd); // tmp := c + r*d + + auto br = builder.createBinop(loc, b, mlir::cir::BinOpKind::Mul, r); // b*r + auto abr = + builder.createBinop(loc, a, mlir::cir::BinOpKind::Add, br); // a + b*r + auto e = builder.createBinop(loc, abr, mlir::cir::BinOpKind::Div, tmp); + + auto ar = builder.createBinop(loc, a, mlir::cir::BinOpKind::Mul, r); // a*r + auto bar = + builder.createBinop(loc, b, mlir::cir::BinOpKind::Sub, ar); // b - a*r + auto f = builder.createBinop(loc, bar, mlir::cir::BinOpKind::Div, tmp); + + auto result = builder.createComplexCreate(loc, e, f); + builder.createYield(loc, result); + }; + + auto falseBranchBuilder = [&](mlir::OpBuilder &, mlir::Location) { + auto r = builder.createBinop(loc, c, mlir::cir::BinOpKind::Div, + d); // r := c / d + auto rc = builder.createBinop(loc, r, mlir::cir::BinOpKind::Mul, c); // r*c + auto tmp = builder.createBinop(loc, d, mlir::cir::BinOpKind::Add, + rc); // tmp := d + r*c + + auto ar = builder.createBinop(loc, a, mlir::cir::BinOpKind::Mul, r); // a*r + auto arb = + builder.createBinop(loc, ar, mlir::cir::BinOpKind::Add, b); // a*r + b + auto e = builder.createBinop(loc, arb, mlir::cir::BinOpKind::Div, tmp); + + auto br = builder.createBinop(loc, b, mlir::cir::BinOpKind::Mul, r); // b*r + auto bra = + builder.createBinop(loc, br, mlir::cir::BinOpKind::Sub, a); // b*r - a + auto f = builder.createBinop(loc, bra, mlir::cir::BinOpKind::Div, tmp); + + auto result = builder.createComplexCreate(loc, e, f); + builder.createYield(loc, result); + }; + + auto cFabs = builder.create(loc, c); + auto dFabs = builder.create(loc, d); + auto cmpResult = + builder.createCompare(loc, mlir::cir::CmpOpKind::ge, cFabs, dFabs); + auto ternary = builder.create( + loc, cmpResult, trueBranchBuilder, falseBranchBuilder); + + return ternary.getResult(); +} + +static mlir::Value lowerComplexDiv(LoweringPreparePass &pass, + CIRBaseBuilderTy &builder, + mlir::Location loc, + mlir::cir::ComplexBinOp op, + mlir::Value lhsReal, mlir::Value lhsImag, + mlir::Value rhsReal, mlir::Value rhsImag) { + auto ty = op.getType(); + if (mlir::isa(ty.getElementTy())) { + auto range = op.getRange(); + if (range == mlir::cir::ComplexRangeKind::Improved || + (range == mlir::cir::ComplexRangeKind::Promoted && !op.getPromoted())) + return buildRangeReductionComplexDiv(builder, loc, lhsReal, lhsImag, + rhsReal, rhsImag); + if (range == mlir::cir::ComplexRangeKind::Full) + return buildComplexBinOpLibCall(pass, builder, &getComplexDivLibCallName, + loc, ty, lhsReal, lhsImag, rhsReal, + rhsImag); + } + + return buildAlgebraicComplexDiv(builder, loc, lhsReal, lhsImag, rhsReal, + rhsImag); +} + +void LoweringPreparePass::lowerComplexBinOp(ComplexBinOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op); + + auto loc = op.getLoc(); + auto lhs = op.getLhs(); + auto rhs = op.getRhs(); + auto lhsReal = builder.createComplexReal(loc, lhs); + auto lhsImag = builder.createComplexImag(loc, lhs); + auto rhsReal = builder.createComplexReal(loc, rhs); + auto rhsImag = builder.createComplexImag(loc, rhs); + + mlir::Value loweredResult; + if (op.getKind() == mlir::cir::ComplexBinOpKind::Mul) + loweredResult = lowerComplexMul(*this, builder, loc, op, lhsReal, lhsImag, + rhsReal, rhsImag); + else + loweredResult = lowerComplexDiv(*this, builder, loc, op, lhsReal, lhsImag, + rhsReal, rhsImag); + + op.replaceAllUsesWith(loweredResult); + op.erase(); +} + void LoweringPreparePass::lowerThreeWayCmpOp(CmpThreeWayOp op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op); @@ -621,7 +939,11 @@ void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { } void LoweringPreparePass::runOnOp(Operation *op) { - if (auto threeWayCmp = dyn_cast(op)) { + if (auto bin = dyn_cast(op)) { + lowerBinOp(bin); + } else if (auto complexBin = dyn_cast(op)) { + lowerComplexBinOp(complexBin); + } else if (auto threeWayCmp = dyn_cast(op)) { lowerThreeWayCmpOp(threeWayCmp); } else if (auto vaArgOp = dyn_cast(op)) { lowerVAArgOp(vaArgOp); @@ -658,9 +980,9 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { - if (isa( - op)) + if (isa(op)) opsToTransform.push_back(op); }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 9e68d890cf38..2c5cc69bc6a2 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1695,7 +1695,41 @@ class CIRComplexCreateOpLowering } }; -class CIRComplexRealPtrOPLowering +class CIRComplexRealOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ComplexRealOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resultLLVMTy = + getTypeConverter()->convertType(op.getResult().getType()); + rewriter.replaceOpWithNewOp( + op, resultLLVMTy, adaptor.getOperand(), + llvm::ArrayRef{0}); + return mlir::success(); + } +}; + +class CIRComplexImagOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ComplexImagOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resultLLVMTy = + getTypeConverter()->convertType(op.getResult().getType()); + rewriter.replaceOpWithNewOp( + op, resultLLVMTy, adaptor.getOperand(), + llvm::ArrayRef{1}); + return mlir::success(); + } +}; + +class CIRComplexRealPtrOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -3525,7 +3559,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRShiftOpLowering, CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRComplexCreateOpLowering, - CIRComplexRealPtrOPLowering, CIRComplexImagPtrOpLowering, + CIRComplexRealOpLowering, CIRComplexImagOpLowering, + CIRComplexRealPtrOpLowering, CIRComplexImagPtrOpLowering, CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, diff --git a/clang/test/CIR/CodeGen/complex-arithmetic.c b/clang/test/CIR/CodeGen/complex-arithmetic.c new file mode 100644 index 000000000000..6dc0c546bd2f --- /dev/null +++ b/clang/test/CIR/CodeGen/complex-arithmetic.c @@ -0,0 +1,647 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIRGEN,CIRGEN-BASIC,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIRGEN,CIRGEN-BASIC,CHECK %s + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIRGEN,CIRGEN-IMPROVED,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIRGEN,CIRGEN-IMPROVED,CHECK %s + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIRGEN,CIRGEN-FULL,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIRGEN,CIRGEN-FULL,CHECK %s + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIR,CIR-BASIC,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIR,CIR-BASIC,CHECK %s + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIR,CIR-IMPROVED,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIR,CIR-IMPROVED,CHECK %s + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIR,CIR-FULL,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIR,CIR-FULL,CHECK %s + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll --check-prefixes=CLANG,LLVM,LLVM-BASIC,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll --check-prefixes=CPPLANG,LLVM,LLVM-BASIC,CHECK %s + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll --check-prefixes=CLANG,LLVM,LLVM-IMPROVED,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll --check-prefixes=CPPLANG,LLVM,LLVM-IMPROVED,CHECK %s + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll --check-prefixes=CLANG,LLVM,LLVM-FULL,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll --check-prefixes=CPPLANG,LLVM,LLVM-FULL,CHECK %s + +double _Complex cd1, cd2; +int _Complex ci1, ci2; + +void add() { + cd1 = cd1 + cd2; + ci1 = ci1 + ci2; +} + +// CLANG: @add +// CPPLANG: @_Z3addv + +// CIRGEN: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.complex +// CIRGEN: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.complex + +// CIR: %[[#LHS_REAL:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#LHS_IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#RHS_REAL:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#RHS_IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#RES_REAL:]] = cir.binop(add, %[[#LHS_REAL]], %[[#RHS_REAL]]) : !cir.double +// CIR-NEXT: %[[#RES_IMAG:]] = cir.binop(add, %[[#LHS_IMAG]], %[[#RHS_IMAG]]) : !cir.double +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#RES_REAL]], %[[#RES_IMAG]] : !cir.double -> !cir.complex + +// CIR: %[[#LHS_REAL:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#LHS_IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#RHS_REAL:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#RHS_IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#RES_REAL:]] = cir.binop(add, %[[#LHS_REAL]], %[[#RHS_REAL]]) : !s32i +// CIR-NEXT: %[[#RES_IMAG:]] = cir.binop(add, %[[#LHS_IMAG]], %[[#RHS_IMAG]]) : !s32i +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#RES_REAL]], %[[#RES_IMAG]] : !s32i -> !cir.complex + +// LLVM: %[[#LHS_REAL:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#LHS_IMAG:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#RHS_REAL:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#RHS_IMAG:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#RES_REAL:]] = fadd double %[[#LHS_REAL]], %[[#RHS_REAL]] +// LLVM-NEXT: %[[#RES_IMAG:]] = fadd double %[[#LHS_IMAG]], %[[#RHS_IMAG]] +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#RES_REAL]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double %[[#RES_IMAG]], 1 + +// LLVM: %[[#LHS_REAL:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#LHS_IMAG:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#RHS_REAL:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#RHS_IMAG:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#RES_REAL:]] = add i32 %[[#LHS_REAL]], %[[#RHS_REAL]] +// LLVM-NEXT: %[[#RES_IMAG:]] = add i32 %[[#LHS_IMAG]], %[[#RHS_IMAG]] +// LLVM-NEXT: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %[[#RES_REAL]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 %[[#RES_IMAG]], 1 + +// CHECK: } + +void sub() { + cd1 = cd1 - cd2; + ci1 = ci1 - ci2; +} + +// CLANG: @sub +// CPPLANG: @_Z3subv + +// CIRGEN: %{{.+}} = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.complex +// CIRGEN: %{{.+}} = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.complex + +// CIR: %[[#LHS_REAL:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#LHS_IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#RHS_REAL:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#RHS_IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#RES_REAL:]] = cir.binop(sub, %[[#LHS_REAL]], %[[#RHS_REAL]]) : !cir.double +// CIR-NEXT: %[[#RES_IMAG:]] = cir.binop(sub, %[[#LHS_IMAG]], %[[#RHS_IMAG]]) : !cir.double +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#RES_REAL]], %[[#RES_IMAG]] : !cir.double -> !cir.complex + +// CIR: %[[#LHS_REAL:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#LHS_IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#RHS_REAL:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#RHS_IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#RES_REAL:]] = cir.binop(sub, %[[#LHS_REAL]], %[[#RHS_REAL]]) : !s32i +// CIR-NEXT: %[[#RES_IMAG:]] = cir.binop(sub, %[[#LHS_IMAG]], %[[#RHS_IMAG]]) : !s32i +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#RES_REAL]], %[[#RES_IMAG]] : !s32i -> !cir.complex + +// LLVM: %[[#LHS_REAL:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#LHS_IMAG:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#RHS_REAL:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#RHS_IMAG:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#RES_REAL:]] = fsub double %[[#LHS_REAL]], %[[#RHS_REAL]] +// LLVM-NEXT: %[[#RES_IMAG:]] = fsub double %[[#LHS_IMAG]], %[[#RHS_IMAG]] +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#RES_REAL]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double %[[#RES_IMAG]], 1 + +// LLVM: %[[#LHS_REAL:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#LHS_IMAG:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#RHS_REAL:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#RHS_IMAG:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#RES_REAL:]] = sub i32 %[[#LHS_REAL]], %[[#RHS_REAL]] +// LLVM-NEXT: %[[#RES_IMAG:]] = sub i32 %[[#LHS_IMAG]], %[[#RHS_IMAG]] +// LLVM-NEXT: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %[[#RES_REAL]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 %[[#RES_IMAG]], 1 + +// CHECK: } + +void mul() { + cd1 = cd1 * cd2; + ci1 = ci1 * ci2; +} + +// CLANG: @mul +// CPPLANG: @_Z3mulv + +// CIRGEN-BASIC: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(basic) : !cir.complex +// CIRGEN-BASIC: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(basic) : !cir.complex + +// CIR-BASIC: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-BASIC-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-BASIC-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-BASIC-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-BASIC-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSR]]) : !cir.double +// CIR-BASIC-NEXT: %[[#B:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSI]]) : !cir.double +// CIR-BASIC-NEXT: %[[#C:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSI]]) : !cir.double +// CIR-BASIC-NEXT: %[[#D:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSR]]) : !cir.double +// CIR-BASIC-NEXT: %[[#E:]] = cir.binop(sub, %[[#A]], %[[#B]]) : !cir.double +// CIR-BASIC-NEXT: %[[#F:]] = cir.binop(add, %[[#C]], %[[#D]]) : !cir.double +// CIR-BASIC-NEXT: %{{.+}} = cir.complex.create %[[#E]], %[[#F]] : !cir.double -> !cir.complex + +// CIR-BASIC: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-BASIC-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-BASIC-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-BASIC-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-BASIC-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSR]]) : !s32i +// CIR-BASIC-NEXT: %[[#B:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSI]]) : !s32i +// CIR-BASIC-NEXT: %[[#C:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSI]]) : !s32i +// CIR-BASIC-NEXT: %[[#D:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSR]]) : !s32i +// CIR-BASIC-NEXT: %[[#E:]] = cir.binop(sub, %[[#A]], %[[#B]]) : !s32i +// CIR-BASIC-NEXT: %[[#F:]] = cir.binop(add, %[[#C]], %[[#D]]) : !s32i +// CIR-BASIC-NEXT: %{{.+}} = cir.complex.create %[[#E]], %[[#F]] : !s32i -> !cir.complex + +// LLVM-BASIC: %[[#LHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-BASIC-NEXT: %[[#LHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-BASIC-NEXT: %[[#RHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-BASIC-NEXT: %[[#RHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-BASIC-NEXT: %[[#A:]] = fmul double %[[#LHSR]], %[[#RHSR]] +// LLVM-BASIC-NEXT: %[[#B:]] = fmul double %[[#LHSI]], %[[#RHSI]] +// LLVM-BASIC-NEXT: %[[#C:]] = fmul double %[[#LHSR]], %[[#RHSI]] +// LLVM-BASIC-NEXT: %[[#D:]] = fmul double %[[#LHSI]], %[[#RHSR]] +// LLVM-BASIC-NEXT: %[[#E:]] = fsub double %[[#A]], %[[#B]] +// LLVM-BASIC-NEXT: %[[#F:]] = fadd double %[[#C]], %[[#D]] +// LLVM-BASIC-NEXT: %[[#G:]] = insertvalue { double, double } undef, double %[[#E]], 0 +// LLVM-BASIC-NEXT: %{{.+}} = insertvalue { double, double } %[[#G]], double %[[#F]], 1 + +// LLVM-BASIC: %[[#LHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-BASIC-NEXT: %[[#LHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-BASIC-NEXT: %[[#RHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-BASIC-NEXT: %[[#RHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-BASIC-NEXT: %[[#A:]] = mul i32 %[[#LHSR]], %[[#RHSR]] +// LLVM-BASIC-NEXT: %[[#B:]] = mul i32 %[[#LHSI]], %[[#RHSI]] +// LLVM-BASIC-NEXT: %[[#C:]] = mul i32 %[[#LHSR]], %[[#RHSI]] +// LLVM-BASIC-NEXT: %[[#D:]] = mul i32 %[[#LHSI]], %[[#RHSR]] +// LLVM-BASIC-NEXT: %[[#E:]] = sub i32 %[[#A]], %[[#B]] +// LLVM-BASIC-NEXT: %[[#F:]] = add i32 %[[#C]], %[[#D]] +// LLVM-BASIC-NEXT: %[[#G:]] = insertvalue { i32, i32 } undef, i32 %[[#E]], 0 +// LLVM-BASIC-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#G]], i32 %[[#F]], 1 + +// CIRGEN-IMPROVED: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(improved) : !cir.complex +// CIRGEN-IMPROVED: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(improved) : !cir.complex + +// CIR-IMPROVED: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-IMPROVED-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-IMPROVED-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-IMPROVED-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-IMPROVED-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSR]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#B:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSI]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#C:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSI]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#D:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSR]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#E:]] = cir.binop(sub, %[[#A]], %[[#B]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#F:]] = cir.binop(add, %[[#C]], %[[#D]]) : !cir.double +// CIR-IMPROVED-NEXT: %{{.+}} = cir.complex.create %[[#E]], %[[#F]] : !cir.double -> !cir.complex + +// CIR-IMPROVED: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-IMPROVED-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-IMPROVED-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-IMPROVED-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-IMPROVED-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSR]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#B:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSI]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#C:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSI]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#D:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSR]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#E:]] = cir.binop(sub, %[[#A]], %[[#B]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#F:]] = cir.binop(add, %[[#C]], %[[#D]]) : !s32i +// CIR-IMPROVED-NEXT: %{{.+}} = cir.complex.create %[[#E]], %[[#F]] : !s32i -> !cir.complex + +// LLVM-IMPROVED: %[[#LHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-IMPROVED-NEXT: %[[#LHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-IMPROVED-NEXT: %[[#RHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-IMPROVED-NEXT: %[[#RHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-IMPROVED-NEXT: %[[#A:]] = fmul double %[[#LHSR]], %[[#RHSR]] +// LLVM-IMPROVED-NEXT: %[[#B:]] = fmul double %[[#LHSI]], %[[#RHSI]] +// LLVM-IMPROVED-NEXT: %[[#C:]] = fmul double %[[#LHSR]], %[[#RHSI]] +// LLVM-IMPROVED-NEXT: %[[#D:]] = fmul double %[[#LHSI]], %[[#RHSR]] +// LLVM-IMPROVED-NEXT: %[[#E:]] = fsub double %[[#A]], %[[#B]] +// LLVM-IMPROVED-NEXT: %[[#F:]] = fadd double %[[#C]], %[[#D]] +// LLVM-IMPROVED-NEXT: %[[#G:]] = insertvalue { double, double } undef, double %[[#E]], 0 +// LLVM-IMPROVED-NEXT: %{{.+}} = insertvalue { double, double } %[[#G]], double %[[#F]], 1 + +// LLVM-IMPROVED: %[[#LHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-IMPROVED-NEXT: %[[#LHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-IMPROVED-NEXT: %[[#RHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-IMPROVED-NEXT: %[[#RHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-IMPROVED-NEXT: %[[#A:]] = mul i32 %[[#LHSR]], %[[#RHSR]] +// LLVM-IMPROVED-NEXT: %[[#B:]] = mul i32 %[[#LHSI]], %[[#RHSI]] +// LLVM-IMPROVED-NEXT: %[[#C:]] = mul i32 %[[#LHSR]], %[[#RHSI]] +// LLVM-IMPROVED-NEXT: %[[#D:]] = mul i32 %[[#LHSI]], %[[#RHSR]] +// LLVM-IMPROVED-NEXT: %[[#E:]] = sub i32 %[[#A]], %[[#B]] +// LLVM-IMPROVED-NEXT: %[[#F:]] = add i32 %[[#C]], %[[#D]] +// LLVM-IMPROVED-NEXT: %[[#G:]] = insertvalue { i32, i32 } undef, i32 %[[#E]], 0 +// LLVM-IMPROVED-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#G]], i32 %[[#F]], 1 + +// CIRGEN-FULL: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(full) : !cir.complex +// CIRGEN-FULL: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(full) : !cir.complex + +// CIR-FULL: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-FULL-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-FULL-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-FULL-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-FULL-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSR]]) : !cir.double +// CIR-FULL-NEXT: %[[#B:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSI]]) : !cir.double +// CIR-FULL-NEXT: %[[#C:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSI]]) : !cir.double +// CIR-FULL-NEXT: %[[#D:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSR]]) : !cir.double +// CIR-FULL-NEXT: %[[#E:]] = cir.binop(sub, %[[#A]], %[[#B]]) : !cir.double +// CIR-FULL-NEXT: %[[#F:]] = cir.binop(add, %[[#C]], %[[#D]]) : !cir.double +// CIR-FULL-NEXT: %[[#RES:]] = cir.complex.create %[[#E]], %[[#F]] : !cir.double -> !cir.complex +// CIR-FULL-NEXT: %[[#COND:]] = cir.cmp(ne, %[[#E]], %[[#E]]) : !cir.double, !cir.bool +// CIR-FULL-NEXT: %{{.+}} = cir.ternary(%[[#COND]], true { +// CIR-FULL-NEXT: %[[#COND2:]] = cir.cmp(ne, %[[#F]], %[[#F]]) : !cir.double, !cir.bool +// CIR-FULL-NEXT: %[[#INNER:]] = cir.ternary(%[[#COND2]], true { +// CIR-FULL-NEXT: %[[#RES2:]] = cir.call @__muldc3(%[[#LHSR]], %[[#LHSI]], %[[#RHSR]], %[[#RHSI]]) : (!cir.double, !cir.double, !cir.double, !cir.double) -> !cir.complex +// CIR-FULL-NEXT: cir.yield %[[#RES2]] : !cir.complex +// CIR-FULL-NEXT: }, false { +// CIR-FULL-NEXT: cir.yield %[[#RES]] : !cir.complex +// CIR-FULL-NEXT: }) : (!cir.bool) -> !cir.complex +// CIR-FULL-NEXT: cir.yield %[[#INNER]] : !cir.complex +// CIR-FULL-NEXT: }, false { +// CIR-FULL-NEXT: cir.yield %[[#RES]] : !cir.complex +// CIR-FULL-NEXT: }) : (!cir.bool) -> !cir.complex + +// CIR-FULL: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-FULL-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-FULL-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-FULL-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-FULL-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSR]]) : !s32i +// CIR-FULL-NEXT: %[[#B:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSI]]) : !s32i +// CIR-FULL-NEXT: %[[#C:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSI]]) : !s32i +// CIR-FULL-NEXT: %[[#D:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSR]]) : !s32i +// CIR-FULL-NEXT: %[[#E:]] = cir.binop(sub, %[[#A]], %[[#B]]) : !s32i +// CIR-FULL-NEXT: %[[#F:]] = cir.binop(add, %[[#C]], %[[#D]]) : !s32i +// CIR-FULL-NEXT: %{{.+}} = cir.complex.create %[[#E]], %[[#F]] : !s32i -> !cir.complex + +// LLVM-FULL: %[[#LHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-FULL-NEXT: %[[#LHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-FULL-NEXT: %[[#RHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-FULL-NEXT: %[[#RHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-FULL-NEXT: %[[#A:]] = fmul double %[[#LHSR]], %[[#RHSR]] +// LLVM-FULL-NEXT: %[[#B:]] = fmul double %[[#LHSI]], %[[#RHSI]] +// LLVM-FULL-NEXT: %[[#C:]] = fmul double %[[#LHSR]], %[[#RHSI]] +// LLVM-FULL-NEXT: %[[#D:]] = fmul double %[[#LHSI]], %[[#RHSR]] +// LLVM-FULL-NEXT: %[[#E:]] = fsub double %[[#A]], %[[#B]] +// LLVM-FULL-NEXT: %[[#F:]] = fadd double %[[#C]], %[[#D]] +// LLVM-FULL-NEXT: %[[#G:]] = insertvalue { double, double } undef, double %[[#E]], 0 +// LLVM-FULL-NEXT: %[[#RES:]] = insertvalue { double, double } %[[#G]], double %[[#F]], 1 +// LLVM-FULL-NEXT: %[[#COND:]] = fcmp une double %[[#E]], %[[#E]] +// LLVM-FULL-NEXT: br i1 %[[#COND]], label %[[#LA:]], label %[[#LB:]] +// LLVM-FULL: [[#LA]]: +// LLVM-FULL-NEXT: %[[#H:]] = fcmp une double %[[#F]], %[[#F]] +// LLVM-FULL-NEXT: br i1 %[[#H]], label %[[#LC:]], label %[[#LD:]] +// LLVM-FULL: [[#LC]]: +// LLVM-FULL-NEXT: %[[#RES2:]] = call { double, double } @__muldc3(double %[[#LHSR]], double %[[#LHSI]], double %[[#RHSR]], double %[[#RHSI]]) +// LLVM-FULL-NEXT: br label %[[#LE:]] +// LLVM-FULL: [[#LD]]: +// LLVM-FULL-NEXT: br label %[[#LE]] +// LLVM-FULL: [[#LE]]: +// LLVM-FULL-NEXT: %[[#RES3:]] = phi { double, double } [ %[[#RES]], %[[#LD]] ], [ %[[#RES2]], %[[#LC]] ] +// LLVM-FULL-NEXT: br label %[[#LF:]] +// LLVM-FULL: [[#LF]]: +// LLVM-FULL-NEXT: br label %[[#LG:]] +// LLVM-FULL: [[#LB]]: +// LLVM-FULL-NEXT: br label %[[#LG]] +// LLVM-FULL: [[#LG]]: +// LLVM-FULL-NEXT: %26 = phi { double, double } [ %[[#RES]], %[[#LB]] ], [ %[[#RES3]], %[[#LF]] ] + +// LLVM-FULL: %[[#LHSR:]] = extractvalue { i32, i32 } %28, 0 +// LLVM-FULL-NEXT: %[[#LHSI:]] = extractvalue { i32, i32 } %28, 1 +// LLVM-FULL-NEXT: %[[#RHSR:]] = extractvalue { i32, i32 } %29, 0 +// LLVM-FULL-NEXT: %[[#RHSI:]] = extractvalue { i32, i32 } %29, 1 +// LLVM-FULL-NEXT: %[[#A:]] = mul i32 %[[#LHSR]], %[[#RHSR]] +// LLVM-FULL-NEXT: %[[#B:]] = mul i32 %[[#LHSI]], %[[#RHSI]] +// LLVM-FULL-NEXT: %[[#C:]] = mul i32 %[[#LHSR]], %[[#RHSI]] +// LLVM-FULL-NEXT: %[[#D:]] = mul i32 %[[#LHSI]], %[[#RHSR]] +// LLVM-FULL-NEXT: %[[#E:]] = sub i32 %[[#A]], %[[#B]] +// LLVM-FULL-NEXT: %[[#F:]] = add i32 %[[#C]], %[[#D]] +// LLVM-FULL-NEXT: %[[#G:]] = insertvalue { i32, i32 } undef, i32 %[[#E]], 0 +// LLVM-FULL-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#G]], i32 %[[#F]], 1 + +// CHECK: } + +void div() { + cd1 = cd1 / cd2; + ci1 = ci1 / ci2; +} + +// CLANG: @div +// CPPLANG: @_Z3divv + +// CIRGEN-BASIC: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(basic) : !cir.complex +// CIRGEN-BASIC: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(basic) : !cir.complex + +// CIR-BASIC: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-BASIC-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-BASIC-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-BASIC-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-BASIC-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSR]]) : !cir.double +// CIR-BASIC-NEXT: %[[#B:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSI]]) : !cir.double +// CIR-BASIC-NEXT: %[[#C:]] = cir.binop(mul, %[[#RHSR]], %[[#RHSR]]) : !cir.double +// CIR-BASIC-NEXT: %[[#D:]] = cir.binop(mul, %[[#RHSI]], %[[#RHSI]]) : !cir.double +// CIR-BASIC-NEXT: %[[#E:]] = cir.binop(add, %[[#A]], %[[#B]]) : !cir.double +// CIR-BASIC-NEXT: %[[#F:]] = cir.binop(add, %[[#C]], %[[#D]]) : !cir.double +// CIR-BASIC-NEXT: %[[#G:]] = cir.binop(div, %[[#E]], %[[#F]]) : !cir.double +// CIR-BASIC-NEXT: %[[#H:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSR]]) : !cir.double +// CIR-BASIC-NEXT: %[[#I:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSI]]) : !cir.double +// CIR-BASIC-NEXT: %[[#J:]] = cir.binop(sub, %[[#H]], %[[#I]]) : !cir.double +// CIR-BASIC-NEXT: %[[#K:]] = cir.binop(div, %[[#J]], %[[#F]]) : !cir.double +// CIR-BASIC-NEXT: %{{.+}} = cir.complex.create %[[#G]], %[[#K]] : !cir.double -> !cir.complex + +// CIR-BASIC: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-BASIC-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-BASIC-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-BASIC-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-BASIC-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSR]]) : !s32i +// CIR-BASIC-NEXT: %[[#B:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSI]]) : !s32i +// CIR-BASIC-NEXT: %[[#C:]] = cir.binop(mul, %[[#RHSR]], %[[#RHSR]]) : !s32i +// CIR-BASIC-NEXT: %[[#D:]] = cir.binop(mul, %[[#RHSI]], %[[#RHSI]]) : !s32i +// CIR-BASIC-NEXT: %[[#E:]] = cir.binop(add, %[[#A]], %[[#B]]) : !s32i +// CIR-BASIC-NEXT: %[[#F:]] = cir.binop(add, %[[#C]], %[[#D]]) : !s32i +// CIR-BASIC-NEXT: %[[#G:]] = cir.binop(div, %[[#E]], %[[#F]]) : !s32i +// CIR-BASIC-NEXT: %[[#H:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSR]]) : !s32i +// CIR-BASIC-NEXT: %[[#I:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSI]]) : !s32i +// CIR-BASIC-NEXT: %[[#J:]] = cir.binop(sub, %[[#H]], %[[#I]]) : !s32i +// CIR-BASIC-NEXT: %[[#K:]] = cir.binop(div, %[[#J]], %[[#F]]) : !s32i +// CIR-BASIC-NEXT: %{{.+}} = cir.complex.create %[[#G]], %[[#K]] : !s32i -> !cir.complex + +// LLVM-BASIC: %[[#LHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-BASIC-NEXT: %[[#LHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-BASIC-NEXT: %[[#RHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-BASIC-NEXT: %[[#RHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-BASIC-NEXT: %[[#A:]] = fmul double %[[#LHSR]], %[[#RHSR]] +// LLVM-BASIC-NEXT: %[[#B:]] = fmul double %[[#LHSI]], %[[#RHSI]] +// LLVM-BASIC-NEXT: %[[#C:]] = fmul double %[[#RHSR]], %[[#RHSR]] +// LLVM-BASIC-NEXT: %[[#D:]] = fmul double %[[#RHSI]], %[[#RHSI]] +// LLVM-BASIC-NEXT: %[[#E:]] = fadd double %[[#A]], %[[#B]] +// LLVM-BASIC-NEXT: %[[#F:]] = fadd double %[[#C]], %[[#D]] +// LLVM-BASIC-NEXT: %[[#G:]] = fdiv double %[[#E]], %[[#F]] +// LLVM-BASIC-NEXT: %[[#H:]] = fmul double %[[#LHSI]], %[[#RHSR]] +// LLVM-BASIC-NEXT: %[[#I:]] = fmul double %[[#LHSR]], %[[#RHSI]] +// LLVM-BASIC-NEXT: %[[#J:]] = fsub double %[[#H]], %[[#I]] +// LLVM-BASIC-NEXT: %[[#K:]] = fdiv double %[[#J]], %[[#F]] +// LLVM-BASIC-NEXT: %[[#L:]] = insertvalue { double, double } undef, double %[[#G]], 0 +// LLVM-BASIC-NEXT: %{{.+}} = insertvalue { double, double } %[[#L]], double %[[#K]], 1 + +// LLVM-BASIC: %[[#LHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-BASIC-NEXT: %[[#LHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-BASIC-NEXT: %[[#RHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-BASIC-NEXT: %[[#RHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-BASIC-NEXT: %[[#A:]] = mul i32 %[[#LHSR]], %[[#RHSR]] +// LLVM-BASIC-NEXT: %[[#B:]] = mul i32 %[[#LHSI]], %[[#RHSI]] +// LLVM-BASIC-NEXT: %[[#C:]] = mul i32 %[[#RHSR]], %[[#RHSR]] +// LLVM-BASIC-NEXT: %[[#D:]] = mul i32 %[[#RHSI]], %[[#RHSI]] +// LLVM-BASIC-NEXT: %[[#E:]] = add i32 %[[#A]], %[[#B]] +// LLVM-BASIC-NEXT: %[[#F:]] = add i32 %[[#C]], %[[#D]] +// LLVM-BASIC-NEXT: %[[#G:]] = sdiv i32 %[[#E]], %[[#F]] +// LLVM-BASIC-NEXT: %[[#H:]] = mul i32 %[[#LHSI]], %[[#RHSR]] +// LLVM-BASIC-NEXT: %[[#I:]] = mul i32 %[[#LHSR]], %[[#RHSI]] +// LLVM-BASIC-NEXT: %[[#J:]] = sub i32 %[[#H]], %[[#I]] +// LLVM-BASIC-NEXT: %[[#K:]] = sdiv i32 %[[#J]], %[[#F]] +// LLVM-BASIC-NEXT: %[[#L:]] = insertvalue { i32, i32 } undef, i32 %[[#G]], 0 +// LLVM-BASIC-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#L]], i32 %[[#K]], 1 + +// CIRGEN-IMPROVED: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(improved) : !cir.complex +// CIRGEN-IMPROVED: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(improved) : !cir.complex + +// CIR-IMPROVED: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-IMPROVED-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-IMPROVED-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-IMPROVED-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-IMPROVED-NEXT: %[[#A:]] = cir.fabs %[[#RHSR]] : !cir.double +// CIR-IMPROVED-NEXT: %[[#B:]] = cir.fabs %[[#RHSI]] : !cir.double +// CIR-IMPROVED-NEXT: %[[#C:]] = cir.cmp(ge, %[[#A]], %[[#B]]) : !cir.double, !cir.bool +// CIR-IMPROVED-NEXT: %{{.+}} = cir.ternary(%[[#C]], true { +// CIR-IMPROVED-NEXT: %[[#D:]] = cir.binop(div, %[[#RHSI]], %[[#RHSR]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#E:]] = cir.binop(mul, %[[#D]], %[[#RHSI]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#F:]] = cir.binop(add, %[[#RHSR]], %[[#E]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#G:]] = cir.binop(mul, %[[#LHSI]], %[[#D]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#H:]] = cir.binop(add, %[[#LHSR]], %[[#G]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#I:]] = cir.binop(div, %[[#H]], %[[#F]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#J:]] = cir.binop(mul, %[[#LHSR]], %[[#D]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#K:]] = cir.binop(sub, %[[#LHSI]], %[[#J]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#L:]] = cir.binop(div, %[[#K]], %[[#F]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#M:]] = cir.complex.create %[[#I]], %[[#L]] : !cir.double -> !cir.complex +// CIR-IMPROVED-NEXT: cir.yield %[[#M]] : !cir.complex +// CIR-IMPROVED-NEXT: }, false { +// CIR-IMPROVED-NEXT: %[[#D:]] = cir.binop(div, %[[#RHSR]], %[[#RHSI]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#E:]] = cir.binop(mul, %[[#D]], %[[#RHSR]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#F:]] = cir.binop(add, %[[#RHSI]], %[[#E]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#G:]] = cir.binop(mul, %[[#LHSR]], %[[#D]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#H:]] = cir.binop(add, %[[#G]], %[[#LHSI]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#I:]] = cir.binop(div, %[[#H]], %[[#F]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#J:]] = cir.binop(mul, %[[#LHSI]], %[[#D]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#K:]] = cir.binop(sub, %[[#J]], %4) : !cir.double +// CIR-IMPROVED-NEXT: %[[#L:]] = cir.binop(div, %[[#K]], %[[#F]]) : !cir.double +// CIR-IMPROVED-NEXT: %[[#M:]] = cir.complex.create %[[#I]], %[[#L]] : !cir.double -> !cir.complex +// CIR-IMPROVED-NEXT: cir.yield %[[#M]] : !cir.complex +// CIR-IMPROVED-NEXT: }) : (!cir.bool) -> !cir.complex + +// CIR-IMPROVED: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-IMPROVED-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-IMPROVED-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-IMPROVED-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-IMPROVED-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSR]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#B:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSI]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#C:]] = cir.binop(mul, %[[#RHSR]], %[[#RHSR]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#D:]] = cir.binop(mul, %[[#RHSI]], %[[#RHSI]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#E:]] = cir.binop(add, %[[#A]], %[[#B]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#F:]] = cir.binop(add, %[[#C]], %[[#D]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#G:]] = cir.binop(div, %[[#E]], %[[#F]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#H:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSR]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#I:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSI]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#J:]] = cir.binop(sub, %[[#H]], %[[#I]]) : !s32i +// CIR-IMPROVED-NEXT: %[[#K:]] = cir.binop(div, %[[#J]], %[[#F]]) : !s32i +// CIR-IMPROVED-NEXT: %{{.+}} = cir.complex.create %[[#G]], %[[#K]] : !s32i -> !cir.complex + +// LLVM-IMPROVED: %[[#LHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-IMPROVED-NEXT: %[[#LHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-IMPROVED-NEXT: %[[#RHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-IMPROVED-NEXT: %[[#RHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-IMPROVED-NEXT: %[[#A:]] = call double @llvm.fabs.f64(double %[[#RHSR]]) +// LLVM-IMPROVED-NEXT: %[[#B:]] = call double @llvm.fabs.f64(double %[[#RHSI]]) +// LLVM-IMPROVED-NEXT: %[[#C:]] = fcmp oge double %[[#A]], %[[#B]] +// LLVM-IMPROVED-NEXT: br i1 %[[#C]], label %[[#LA:]], label %[[#LB:]] +// LLVM-IMPROVED: [[#LA]]: +// LLVM-IMPROVED-NEXT: %[[#D:]] = fdiv double %[[#RHSI]], %[[#RHSR]] +// LLVM-IMPROVED-NEXT: %[[#E:]] = fmul double %[[#D]], %[[#RHSI]] +// LLVM-IMPROVED-NEXT: %[[#F:]] = fadd double %[[#RHSR]], %[[#E]] +// LLVM-IMPROVED-NEXT: %[[#G:]] = fmul double %[[#LHSI]], %[[#D]] +// LLVM-IMPROVED-NEXT: %[[#H:]] = fadd double %[[#LHSR]], %[[#G]] +// LLVM-IMPROVED-NEXT: %[[#I:]] = fdiv double %[[#H]], %[[#F]] +// LLVM-IMPROVED-NEXT: %[[#J:]] = fmul double %[[#LHSR]], %[[#D]] +// LLVM-IMPROVED-NEXT: %[[#K:]] = fsub double %[[#LHSI]], %[[#J]] +// LLVM-IMPROVED-NEXT: %[[#L:]] = fdiv double %[[#K]], %[[#F]] +// LLVM-IMPROVED-NEXT: %[[#M:]] = insertvalue { double, double } undef, double %[[#I]], 0 +// LLVM-IMPROVED-NEXT: %[[#N1:]] = insertvalue { double, double } %[[#M]], double %[[#L]], 1 +// LLVM-IMPROVED-NEXT: br label %[[#LC:]] +// LLVM-IMPROVED: [[#LB]]: +// LLVM-IMPROVED-NEXT: %[[#D:]] = fdiv double %[[#RHSR]], %[[#RHSI]] +// LLVM-IMPROVED-NEXT: %[[#E:]] = fmul double %[[#D]], %[[#RHSR]] +// LLVM-IMPROVED-NEXT: %[[#F:]] = fadd double %[[#RHSI]], %[[#E]] +// LLVM-IMPROVED-NEXT: %[[#G:]] = fmul double %[[#LHSR]], %[[#D]] +// LLVM-IMPROVED-NEXT: %[[#H:]] = fadd double %[[#G]], %[[#LHSI]] +// LLVM-IMPROVED-NEXT: %[[#I:]] = fdiv double %[[#H]], %[[#F]] +// LLVM-IMPROVED-NEXT: %[[#J:]] = fmul double %[[#LHSI]], %[[#D]] +// LLVM-IMPROVED-NEXT: %[[#K:]] = fsub double %[[#J]], %[[#LHSR]] +// LLVM-IMPROVED-NEXT: %[[#L:]] = fdiv double %[[#K]], %[[#F]] +// LLVM-IMPROVED-NEXT: %[[#M:]] = insertvalue { double, double } undef, double %[[#I]], 0 +// LLVM-IMPROVED-NEXT: %[[#N2:]] = insertvalue { double, double } %[[#M]], double %[[#L]], 1 +// LLVM-IMPROVED-NEXT: br label %[[#LC]] +// LLVM-IMPROVED: [[#LC]]: +// LLVM-IMPROVED-NEXT: %{{.+}} = phi { double, double } [ %[[#N2]], %[[#LB]] ], [ %[[#N1]], %[[#LA]] ] + +// LLVM-IMPROVED: %[[#LHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-IMPROVED-NEXT: %[[#LHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-IMPROVED-NEXT: %[[#RHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-IMPROVED-NEXT: %[[#RHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-IMPROVED-NEXT: %[[#A:]] = mul i32 %[[#LHSR]], %[[#RHSR]] +// LLVM-IMPROVED-NEXT: %[[#B:]] = mul i32 %[[#LHSI]], %[[#RHSI]] +// LLVM-IMPROVED-NEXT: %[[#C:]] = mul i32 %[[#RHSR]], %[[#RHSR]] +// LLVM-IMPROVED-NEXT: %[[#D:]] = mul i32 %[[#RHSI]], %[[#RHSI]] +// LLVM-IMPROVED-NEXT: %[[#E:]] = add i32 %[[#A]], %[[#B]] +// LLVM-IMPROVED-NEXT: %[[#F:]] = add i32 %[[#C]], %[[#D]] +// LLVM-IMPROVED-NEXT: %[[#G:]] = sdiv i32 %[[#E]], %[[#F]] +// LLVM-IMPROVED-NEXT: %[[#H:]] = mul i32 %[[#LHSI]], %[[#RHSR]] +// LLVM-IMPROVED-NEXT: %[[#I:]] = mul i32 %[[#LHSR]], %[[#RHSI]] +// LLVM-IMPROVED-NEXT: %[[#J:]] = sub i32 %[[#H]], %[[#I]] +// LLVM-IMPROVED-NEXT: %[[#K:]] = sdiv i32 %[[#J]], %[[#F]] +// LLVM-IMPROVED-NEXT: %[[#L:]] = insertvalue { i32, i32 } undef, i32 %[[#G]], 0 +// LLVM-IMPROVED-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#L]], i32 %[[#K]], 1 + +// CIRGEN-FULL: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(full) : !cir.complex +// CIRGEN-FULL: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(full) : !cir.complex + +// CIR-FULL: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-FULL-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-FULL-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-FULL-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-FULL-NEXT: %{{.+}} = cir.call @__divdc3(%[[#LHSR]], %[[#LHSI]], %[[#RHSR]], %[[#RHSI]]) : (!cir.double, !cir.double, !cir.double, !cir.double) -> !cir.complex + +// CIR-FULL: %[[#LHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-FULL-NEXT: %[[#LHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-FULL-NEXT: %[[#RHSR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-FULL-NEXT: %[[#RHSI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-FULL-NEXT: %[[#A:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSR]]) : !s32i +// CIR-FULL-NEXT: %[[#B:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSI]]) : !s32i +// CIR-FULL-NEXT: %[[#C:]] = cir.binop(mul, %[[#RHSR]], %[[#RHSR]]) : !s32i +// CIR-FULL-NEXT: %[[#D:]] = cir.binop(mul, %[[#RHSI]], %[[#RHSI]]) : !s32i +// CIR-FULL-NEXT: %[[#E:]] = cir.binop(add, %[[#A]], %[[#B]]) : !s32i +// CIR-FULL-NEXT: %[[#F:]] = cir.binop(add, %[[#C]], %[[#D]]) : !s32i +// CIR-FULL-NEXT: %[[#G:]] = cir.binop(div, %[[#E]], %[[#F]]) : !s32i +// CIR-FULL-NEXT: %[[#H:]] = cir.binop(mul, %[[#LHSI]], %[[#RHSR]]) : !s32i +// CIR-FULL-NEXT: %[[#I:]] = cir.binop(mul, %[[#LHSR]], %[[#RHSI]]) : !s32i +// CIR-FULL-NEXT: %[[#J:]] = cir.binop(sub, %[[#H]], %[[#I]]) : !s32i +// CIR-FULL-NEXT: %[[#K:]] = cir.binop(div, %[[#J]], %[[#F]]) : !s32i +// CIR-FULL-NEXT: %{{.+}} = cir.complex.create %[[#G]], %[[#K]] : !s32i -> !cir.complex + +// LLVM-FULL: %[[#LHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-FULL-NEXT: %[[#LHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-FULL-NEXT: %[[#RHSR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-FULL-NEXT: %[[#RHSI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-FULL-NEXT: %{{.+}} = call { double, double } @__divdc3(double %[[#LHSR]], double %[[#LHSI]], double %[[#RHSR]], double %[[#RHSI]]) + +// LLVM-FULL: %[[#LHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-FULL-NEXT: %[[#LHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-FULL-NEXT: %[[#RHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-FULL-NEXT: %[[#RHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-FULL-NEXT: %[[#A:]] = mul i32 %[[#LHSR]], %[[#RHSR]] +// LLVM-FULL-NEXT: %[[#B:]] = mul i32 %[[#LHSI]], %[[#RHSI]] +// LLVM-FULL-NEXT: %[[#C:]] = mul i32 %[[#RHSR]], %[[#RHSR]] +// LLVM-FULL-NEXT: %[[#D:]] = mul i32 %[[#RHSI]], %[[#RHSI]] +// LLVM-FULL-NEXT: %[[#E:]] = add i32 %[[#A]], %[[#B]] +// LLVM-FULL-NEXT: %[[#F:]] = add i32 %[[#C]], %[[#D]] +// LLVM-FULL-NEXT: %[[#G:]] = sdiv i32 %[[#E]], %[[#F]] +// LLVM-FULL-NEXT: %[[#H:]] = mul i32 %[[#LHSI]], %[[#RHSR]] +// LLVM-FULL-NEXT: %[[#I:]] = mul i32 %[[#LHSR]], %[[#RHSI]] +// LLVM-FULL-NEXT: %[[#J:]] = sub i32 %[[#H]], %[[#I]] +// LLVM-FULL-NEXT: %[[#K:]] = sdiv i32 %[[#J]], %[[#F]] +// LLVM-FULL-NEXT: %[[#L:]] = insertvalue { i32, i32 } undef, i32 %[[#G]], 0 +// LLVM-FULL-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#L]], i32 %[[#K]], 1 + +// CHECK: } + +void add_assign() { + cd1 += cd2; + ci1 += ci2; +} + +// CLANG: @add_assign +// CPPLANG: @_Z10add_assignv + +// CIRGEN: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.complex +// CIRGEN: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.complex + +// CHECK: } + +void sub_assign() { + cd1 -= cd2; + ci1 -= ci2; +} + +// CLANG: @sub_assign +// CPPLANG: @_Z10sub_assignv + +// CIRGEN: %{{.+}} = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.complex +// CIRGEN: %{{.+}} = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.complex + +// CHECK: } + +void mul_assign() { + cd1 *= cd2; + ci1 *= ci2; +} + +// CLANG: @mul_assign +// CPPLANG: @_Z10mul_assignv + +// CIRGEN-BASIC: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(basic) : !cir.complex +// CIRGEN-BASIC: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(basic) : !cir.complex + +// CIRGEN-IMPROVED: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(improved) : !cir.complex +// CIRGEN-IMPROVED: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(improved) : !cir.complex + +// CIRGEN-FULL: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(full) : !cir.complex +// CIRGEN-FULL: %{{.+}} = cir.complex.binop mul %{{.+}}, %{{.+}} range(full) : !cir.complex + +// CHECK: } + +void div_assign() { + cd1 /= cd2; + ci1 /= ci2; +} + +// CLANG: @div_assign +// CPPLANG: @_Z10div_assignv + +// CIRGEN-BASIC: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(basic) : !cir.complex +// CIRGEN-BASIC: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(basic) : !cir.complex + +// CIRGEN-IMPROVED: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(improved) : !cir.complex +// CIRGEN-IMPROVED: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(improved) : !cir.complex + +// CIRGEN-FULL: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(full) : !cir.complex +// CIRGEN-FULL: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(full) : !cir.complex + +// CHECK: } From 608389007e61c85837f7ed081073cab01e4d2706 Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 18 Jul 2024 07:39:09 +0800 Subject: [PATCH 1692/2301] [CIR][Transforms][TargetLowering][NFC] Add SPIR-V skeleton (#737) This NFC PR adds the SPIR-V `TargetLoweringInfo` with ABI stuff unimplemented. It's useful for other target-specific information to land first. --- .../Transforms/TargetLowering/CMakeLists.txt | 1 + .../Transforms/TargetLowering/LowerModule.cpp | 2 + .../Transforms/TargetLowering/TargetInfo.h | 3 ++ .../TargetLowering/Targets/SPIR.cpp | 54 +++++++++++++++++++ 4 files changed, 60 insertions(+) create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt index 6b3eea6032cd..218656c3b144 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt @@ -13,6 +13,7 @@ add_clang_library(TargetLowering TargetInfo.cpp TargetLoweringInfo.cpp Targets/AArch64.cpp + Targets/SPIR.cpp Targets/X86.cpp Targets/LoweringPrepareAArch64CXXABI.cpp Targets/LoweringPrepareItaniumCXXABI.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 0994497c8dda..e0e53edfc1e2 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -80,6 +80,8 @@ createTargetLoweringInfo(LowerModule &LM) { return createX86_64TargetLoweringInfo(LM, X86AVXABILevel::None); } } + case llvm::Triple::spirv64: + return createSPIRVTargetLoweringInfo(LM); default: llvm_unreachable("ABI NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h index e0e984fcbc70..4350458eeed2 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h @@ -30,6 +30,9 @@ std::unique_ptr createAArch64TargetLoweringInfo(LowerModule &CGM, ::cir::AArch64ABIKind AVXLevel); +std::unique_ptr +createSPIRVTargetLoweringInfo(LowerModule &CGM); + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp new file mode 100644 index 000000000000..974b4d3d27aa --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp @@ -0,0 +1,54 @@ +//===- SPIR.cpp - TargetInfo for SPIR and SPIR-V --------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "ABIInfoImpl.h" +#include "LowerFunctionInfo.h" +#include "LowerTypes.h" +#include "TargetInfo.h" +#include "TargetLoweringInfo.h" +#include "clang/CIR/ABIArgInfo.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" + +using ABIArgInfo = ::cir::ABIArgInfo; +using MissingFeature = ::cir::MissingFeatures; + +namespace mlir { +namespace cir { + +//===----------------------------------------------------------------------===// +// SPIR-V ABI Implementation +//===----------------------------------------------------------------------===// + +namespace { + +class SPIRVABIInfo : public ABIInfo { +public: + SPIRVABIInfo(LowerTypes <) : ABIInfo(LT) {} + +private: + void computeInfo(LowerFunctionInfo &FI) const override { + llvm_unreachable("ABI NYI"); + } +}; + +class SPIRVTargetLoweringInfo : public TargetLoweringInfo { +public: + SPIRVTargetLoweringInfo(LowerTypes <) + : TargetLoweringInfo(std::make_unique(LT)) {} +}; + +} // namespace + +std::unique_ptr +createSPIRVTargetLoweringInfo(LowerModule &lowerModule) { + return std::make_unique(lowerModule.getTypes()); +} + +} // namespace cir +} // namespace mlir From 69ea613d3a275fc1723a04671793ba853fc3d6cb Mon Sep 17 00:00:00 2001 From: roro47 <40341016+roro47@users.noreply.github.com> Date: Thu, 18 Jul 2024 00:41:14 +0100 Subject: [PATCH 1693/2301] [CIR] Add MLIR visibility for weak and weak_odr linkage (#744) --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 ++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 2 ++ clang/test/CIR/CodeGen/weak.c | 9 +++++++++ 3 files changed, 13 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 81d0482a07a0..10e047dd1c35 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1757,6 +1757,8 @@ mlir::SymbolTable::Visibility CIRGenModule::getMLIRVisibilityFromCIRLinkage( case mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage: case mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage: case mlir::cir::GlobalLinkageKind::CommonLinkage: + case mlir::cir::GlobalLinkageKind::WeakAnyLinkage: + case mlir::cir::GlobalLinkageKind::WeakODRLinkage: return mlir::SymbolTable::Visibility::Public; default: { llvm::errs() << "visibility not implemented for '" diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index f759e03fc880..a96079eb03c1 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1746,6 +1746,8 @@ LogicalResult GlobalOp::verify() { case GlobalLinkageKind::LinkOnceODRLinkage: case GlobalLinkageKind::LinkOnceAnyLinkage: case GlobalLinkageKind::CommonLinkage: + case GlobalLinkageKind::WeakAnyLinkage: + case GlobalLinkageKind::WeakODRLinkage: // FIXME: mlir's concept of visibility gets tricky with LLVM ones, // for instance, symbol declarations cannot be "public", so we // have to mark them "private" to workaround the symbol verifier. diff --git a/clang/test/CIR/CodeGen/weak.c b/clang/test/CIR/CodeGen/weak.c index 02adfeb53de2..dcee29f31934 100644 --- a/clang/test/CIR/CodeGen/weak.c +++ b/clang/test/CIR/CodeGen/weak.c @@ -11,6 +11,9 @@ void active (void) A(); } +// LLVM: @y = weak_odr global +// LLVM: @x = weak global + // CIR: cir.func extern_weak private @B() // CIR: cir.func @active() // CIR-NEXT: cir.call @B() : () -> () @@ -18,3 +21,9 @@ void active (void) // LLVM: declare !dbg !{{.}} extern_weak void @B() // LLVM: define void @active() // LLVM-NEXT: call void @B() + +int __attribute__((selectany)) y; +// CIR: cir.global weak_odr @y + +int __attribute__((weak)) x; +// CIR: cir.global weak From df6fd9c4f2dc2d312d5d0233aa3e916d24f6d5a5 Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 18 Jul 2024 07:42:21 +0800 Subject: [PATCH 1694/2301] [CIR][DirectToLLVM][NFC] Add include of target lowering library to DirectToLLVM (#745) TargetLowering should also serve for DirectToLLVM for target-specific information. The library is already linked against DirectToLLVM, but we have to write dirty includes like `#include "../../Dialect/Transforms/TargetLowering/LowerModule.h"`. This PR adds a private include directory `clang/lib/CIR/Dialect/Transforms/TargetLowering` to the target DirectToLLVM. Then we can simplify the include directive above to `#include "LowerModule.h"`. --- clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index 6205c9047c0c..1755bcdcd470 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -42,3 +42,7 @@ add_clang_library(clangCIRLoweringDirectToLLVM MLIROpenMPDialect MLIROpenMPToLLVMIRTranslation ) + +target_include_directories(clangCIRLoweringDirectToLLVM PRIVATE + ${CLANG_SOURCE_DIR}/lib/CIR/Dialect/Transforms/TargetLowering + ) From e6e0b6754b157f30888c3ddb4b96d5053b439afa Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 18 Jul 2024 07:43:01 +0800 Subject: [PATCH 1695/2301] [CIR][Builder][NFC] Refactor `getPointerTo` considering AddressSpaceAttr (#746) After switching to a separate address space attribute in CIR, we need better helper methods in CIR builder. This PR provides: * two versions of `getPointerTo`, consuming `LangAS` or `AddressSpaceAttr`. * an extra helper method `getAddrSpaceAttr(LangAS)` that is used by `getPointerTo(LangAS)` and other potential use cases (mainly about address space cast in the future). Calls to `getPointerTo` without addrspace will invoke the version consuming `AddressSpaceAttr` and return a type with null addrspace attribute, which should be exactly what we expect for CIRBuilder. --- .../clang/CIR/Dialect/Builder/CIRBaseBuilder.h | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index d6f6ce972f41..b5a88008446b 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -75,13 +75,19 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return mlir::cir::IntType::get(getContext(), N, true); } - mlir::cir::PointerType - getPointerTo(mlir::Type ty, clang::LangAS langAS = clang::LangAS::Default) { - mlir::cir::AddressSpaceAttr addrSpaceAttr; - if (langAS != clang::LangAS::Default) - addrSpaceAttr = mlir::cir::AddressSpaceAttr::get(getContext(), langAS); + mlir::cir::AddressSpaceAttr getAddrSpaceAttr(clang::LangAS langAS) { + if (langAS == clang::LangAS::Default) + return {}; + return mlir::cir::AddressSpaceAttr::get(getContext(), langAS); + } + + mlir::cir::PointerType getPointerTo(mlir::Type ty, + mlir::cir::AddressSpaceAttr cirAS = {}) { + return mlir::cir::PointerType::get(getContext(), ty, cirAS); + } - return mlir::cir::PointerType::get(getContext(), ty, addrSpaceAttr); + mlir::cir::PointerType getPointerTo(mlir::Type ty, clang::LangAS langAS) { + return getPointerTo(ty, getAddrSpaceAttr(langAS)); } mlir::cir::PointerType From 04063b3d0ce25878b538bb4f37927a02717865a8 Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 19 Jul 2024 03:21:04 +0800 Subject: [PATCH 1696/2301] [CIR][CodeGen][NFC] Refactor `performAddrSpaceCast` to consume CIR address space attributes (#747) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Originally `TargetCodeGenInfo::performAddrSpaceCast` consumes two *unused* parameters typed `clang::LangAS`. This PR changes its type to `mlir::cir::AddressSpaceAttr` to better fit the need of CIRGen. In [D32248: CodeGen: Cast alloca to expected address space](https://reviews.llvm.org/D32248), the author explained why these AS parameters are not used: > This is just the default implementation. The idea is that targets that need to do something more complex on a particular conversion — e.g. to make sure that null pointers are translated correctly when they have different bit-patterns — can easily do so. Further more, I'm confident that the CIR AS is also capable of providing custom behaviors like above for those targets. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 5 +++-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 6 ++++-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 7 +++++-- clang/lib/CIR/CodeGen/TargetInfo.cpp | 5 +++-- clang/lib/CIR/CodeGen/TargetInfo.h | 9 +++++---- 5 files changed, 20 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index e4c8201d2dca..441c56d9aefb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1055,8 +1055,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // the AST level this is handled within CreateTempAlloca et al., but for the // builtin / dynamic alloca we have to handle it here. assert(!MissingFeatures::addressSpace()); - LangAS AAS = getASTAllocaAddressSpace(); - LangAS EAS = E->getType()->getPointeeType().getAddressSpace(); + auto AAS = builder.getAddrSpaceAttr(getASTAllocaAddressSpace()); + auto EAS = builder.getAddrSpaceAttr( + E->getType()->getPointeeType().getAddressSpace()); if (EAS != AAS) { assert(false && "Non-default address space for alloca NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 6d3861466ab2..d6045548bfd6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2003,9 +2003,11 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { case CK_AddressSpaceConversion: { LValue LV = buildLValue(E->getSubExpr()); QualType DestTy = getContext().getPointerType(E->getType()); + auto SrcAS = builder.getAddrSpaceAttr( + E->getSubExpr()->getType().getAddressSpace()); + auto DestAS = builder.getAddrSpaceAttr(E->getType().getAddressSpace()); mlir::Value V = getTargetHooks().performAddrSpaceCast( - *this, LV.getPointer(), E->getSubExpr()->getType().getAddressSpace(), - E->getType().getAddressSpace(), ConvertType(DestTy)); + *this, LV.getPointer(), SrcAS, DestAS, ConvertType(DestTy)); assert(!MissingFeatures::tbaa()); return makeAddrLValue(Address(V, getTypes().convertTypeForMem(E->getType()), LV.getAddress().getAlignment()), diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 962d74acd498..3d4eb82adfa1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1604,9 +1604,12 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } // Since target may map different address spaces in AST to the same address // space, an address space conversion may end up as a bitcast. + auto SrcAS = CGF.builder.getAddrSpaceAttr( + E->getType()->getPointeeType().getAddressSpace()); + auto DestAS = CGF.builder.getAddrSpaceAttr( + DestTy->getPointeeType().getAddressSpace()); return CGF.CGM.getTargetCIRGenInfo().performAddrSpaceCast( - CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(), - DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy)); + CGF, Visit(E), SrcAS, DestAS, ConvertType(DestTy)); } case CK_AtomicToNonAtomic: llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 47f5a57e3d9b..ba81ad88c435 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -530,8 +530,9 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { } mlir::Value TargetCIRGenInfo::performAddrSpaceCast( - CIRGenFunction &CGF, mlir::Value Src, clang::LangAS SrcAddr, - clang::LangAS DestAddr, mlir::Type DestTy, bool IsNonNull) const { + CIRGenFunction &CGF, mlir::Value Src, mlir::cir::AddressSpaceAttr SrcAddr, + mlir::cir::AddressSpaceAttr DestAddr, mlir::Type DestTy, + bool IsNonNull) const { // Since target may map different address spaces in AST to the same address // space, an address space conversion may end up as a bitcast. if (auto globalOp = Src.getDefiningOp()) diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index 395ef52ab4c8..21cf9b78d35e 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -17,6 +17,7 @@ #include "ABIInfo.h" #include "CIRGenValue.h" #include "mlir/IR/Types.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include @@ -68,13 +69,13 @@ class TargetCIRGenInfo { /// Perform address space cast of an expression of pointer type. /// \param V is the value to be casted to another address space. - /// \param SrcAddr is the language address space of \p V. - /// \param DestAddr is the targeted language address space. + /// \param SrcAddr is the CIR address space of \p V. + /// \param DestAddr is the targeted CIR address space. /// \param DestTy is the destination pointer type. /// \param IsNonNull is the flag indicating \p V is known to be non null. virtual mlir::Value performAddrSpaceCast(CIRGenFunction &CGF, mlir::Value V, - clang::LangAS SrcAddr, - clang::LangAS DestAddr, + mlir::cir::AddressSpaceAttr SrcAddr, + mlir::cir::AddressSpaceAttr DestAddr, mlir::Type DestTy, bool IsNonNull = false) const; From c6b242e987070d350c0e6d104a8e183f0508448e Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 19 Jul 2024 03:21:47 +0800 Subject: [PATCH 1697/2301] [CIR] Add support for complex related intrinsics (#748) This PR adds CIRGen for the following complex related intrinsics: - `__builtin_complex`, - `__builtin_creal`, and - `__builtin_cimag`. The generated CIR does not include any new ops so LLVM IR lowering is already done. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 41 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 9 +++- clang/test/CIR/CodeGen/complex.c | 47 +++++++++++++++++++++ 3 files changed, 96 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 441c56d9aefb..aa0793a57282 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -718,6 +718,47 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, switch (BuiltinIDIfNoAsmLabel) { default: break; + + case Builtin::BI__builtin_complex: { + mlir::Value Real = buildScalarExpr(E->getArg(0)); + mlir::Value Imag = buildScalarExpr(E->getArg(1)); + mlir::Value Complex = + builder.createComplexCreate(getLoc(E->getExprLoc()), Real, Imag); + return RValue::getComplex(Complex); + } + + case Builtin::BI__builtin_creal: + case Builtin::BI__builtin_crealf: + case Builtin::BI__builtin_creall: + case Builtin::BIcreal: + case Builtin::BIcrealf: + case Builtin::BIcreall: { + mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); + mlir::Value Real = + builder.createComplexReal(getLoc(E->getExprLoc()), ComplexVal); + return RValue::get(Real); + } + + case Builtin::BI__builtin_cimag: + case Builtin::BI__builtin_cimagf: + case Builtin::BI__builtin_cimagl: + case Builtin::BIcimag: + case Builtin::BIcimagf: + case Builtin::BIcimagl: { + mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); + mlir::Value Real = + builder.createComplexImag(getLoc(E->getExprLoc()), ComplexVal); + return RValue::get(Real); + } + + case Builtin::BI__builtin_conj: + case Builtin::BI__builtin_conjf: + case Builtin::BI__builtin_conjl: + case Builtin::BIconj: + case Builtin::BIconjf: + case Builtin::BIconjl: + llvm_unreachable("NYI"); + case Builtin::BI__builtin___CFStringMakeConstantString: case Builtin::BI__builtin___NSStringMakeConstantString: llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 99e93e513c5b..7c175b278430 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -129,7 +129,7 @@ class ComplexExprEmitter : public StmtVisitor { return buildCast(E->getCastKind(), E->getSubExpr(), E->getType()); } mlir::Value VisitCastExpr(CastExpr *E) { llvm_unreachable("NYI"); } - mlir::Value VisitCallExpr(const CallExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitCallExpr(const CallExpr *E); mlir::Value VisitStmtExpr(const StmtExpr *E) { llvm_unreachable("NYI"); } // Operators. @@ -488,6 +488,13 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, llvm_unreachable("unknown cast resulting in complex value"); } +mlir::Value ComplexExprEmitter::VisitCallExpr(const CallExpr *E) { + if (E->getCallReturnType(CGF.getContext())->isReferenceType()) + return buildLoadOfLValue(E); + + return CGF.buildCallExpr(E).getComplexVal(); +} + ComplexExprEmitter::BinOpInfo ComplexExprEmitter::buildBinOps(const BinaryOperator *E, QualType PromotionTy) { BinOpInfo Ops{CGF.getLoc(E->getExprLoc())}; diff --git a/clang/test/CIR/CodeGen/complex.c b/clang/test/CIR/CodeGen/complex.c index 41ffca8334f3..e8c9885f685f 100644 --- a/clang/test/CIR/CodeGen/complex.c +++ b/clang/test/CIR/CodeGen/complex.c @@ -48,6 +48,21 @@ void list_init_2(double r, double i) { // LLVM-NEXT: store { double, double } %[[#B]], ptr %5, align 8 // LLVM: } +void builtin_init(double r, double i) { + double _Complex c = __builtin_complex(r, i); +} + +// C: cir.func @builtin_init +// CPP: cir.func @_Z12builtin_initdd +// CHECK: %{{.+}} = cir.complex.create %{{.+}}, %{{.+}} : !cir.double -> !cir.complex +// CHECK: } + +// LLVM: define void @builtin_init +// LLVM: %[[#A:]] = insertvalue { double, double } undef, double %{{.+}}, 0 +// LLVM-NEXT: %[[#B:]] = insertvalue { double, double } %[[#A]], double %{{.+}}, 1 +// LLVM-NEXT: store { double, double } %[[#B]], ptr %{{.+}}, align 8 +// LLVM: } + void imag_literal() { c = 3.0i; ci = 3i; @@ -116,6 +131,38 @@ void load_store_volatile() { // LLVM-NEXT: store volatile { i32, i32 } %[[#B]], ptr @vci, align 4 // LLVM: } +void real() { + double r = __builtin_creal(c); +} + +// C: cir.func no_proto @real() +// CPP: cir.func @_Z4realv() +// CHECK: %[[#A:]] = cir.get_global @c : !cir.ptr> +// CHECK-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr>, !cir.complex +// CHECK-NEXT: %{{.+}} = cir.complex.real %[[#B]] : !cir.complex -> !cir.double +// CHECK: } + +// LLVM: define void @real() +// LLVM: %[[#A:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: store double %[[#A]], ptr %{{.+}}, align 8 +// LLVM: } + +void imag() { + double i = __builtin_cimag(c); +} + +// C: cir.func no_proto @imag() +// CPP: cir.func @_Z4imagv() +// CHECK: %[[#A:]] = cir.get_global @c : !cir.ptr> +// CHECK-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr>, !cir.complex +// CHECK-NEXT: %{{.+}} = cir.complex.imag %[[#B]] : !cir.complex -> !cir.double +// CHECK: } + +// LLVM: define void @imag() +// LLVM: %[[#A:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: store double %[[#A]], ptr %{{.+}}, align 8 +// LLVM: } + void real_ptr() { double *r1 = &__real__ c; int *r2 = &__real__ ci; From 898cc1b5135513260705f9ebe71f71c5816a27aa Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 22 Jul 2024 19:18:42 -0700 Subject: [PATCH 1698/2301] [CIR][CIRGen][NFC] Exceptions: redesign cir.catch in terms of cir.try Simplify things a bit and remove the propagation of exception in terms of a returned exception value. This also merges cir.catch as part of cir.try and remove the former. Everything that didn't work still doesn't work, nothing new added here. The CatchOp parsing was wrong even before this commit, so for now add an XFAIL with the complete testcase we want to be able to parse soon (incremental work will fix this) in clang/test/CIR/IR/exceptions.cir. More cleanups to come as well, this is just the first part. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 79 ++---- clang/lib/CIR/CodeGen/CIRGenCleanup.h | 2 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 118 +++----- clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 20 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 261 ++++++++---------- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 18 +- .../CIR/Dialect/Transforms/MergeCleanups.cpp | 8 +- clang/test/CIR/CodeGen/try-catch.cpp | 21 +- clang/test/CIR/IR/exceptions.cir | 79 ++++-- clang/test/CIR/Lowering/try-catch.cpp | 13 +- 11 files changed, 276 insertions(+), 347 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a692124e4fc7..b3d4583e8e72 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -780,8 +780,8 @@ def ConditionOp : CIR_Op<"condition", [ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "WhileOp", "ForOp", "AwaitOp", - "TernaryOp", "GlobalOp", "DoWhileOp", "CatchOp", "TryOp", - "ArrayCtor", "ArrayDtor"]>]> { + "TernaryOp", "GlobalOp", "DoWhileOp", "TryOp", "ArrayCtor", + "ArrayDtor"]>]> { let summary = "Represents the default branching behaviour of a region"; let description = [{ The `cir.yield` operation terminates regions on different CIR operations, @@ -871,7 +871,7 @@ def ContinueOp : CIR_Op<"continue", [Terminator]> { //===----------------------------------------------------------------------===// def ResumeOp : CIR_Op<"resume", [ReturnLike, Terminator, - ParentOneOf<["CatchOp"]>]> { + ParentOneOf<["TryOp"]>]> { let summary = "Resumes execution after not catching exceptions"; let description = [{ The `cir.resume` operation terminates a region on `cir.catch`, "resuming" @@ -3205,6 +3205,17 @@ def AwaitOp : CIR_Op<"await", // TryOp //===----------------------------------------------------------------------===// +// Represents the unwind region where unwind continues or +// the program std::terminate's. +def CatchUnwind : CIRUnitAttr<"CatchUnwind", "unwind"> { + let storageType = [{ CatchUnwind }]; +} + +// Represents the catch_all region. +def CatchAllAttr : CIRUnitAttr<"CatchAll", "all"> { + let storageType = [{ CatchAllAttr }]; +} + def TryOp : CIR_Op<"try", [DeclareOpInterfaceMethods, RecursivelySpeculatable, AutomaticAllocationScope, @@ -3228,67 +3239,26 @@ def TryOp : CIR_Op<"try", ``` }]; - let regions = (region AnyRegion:$body); - let results = (outs ExceptionInfoPtr:$result); + let arguments = (ins OptionalAttr:$catch_types); + let regions = (region AnyRegion:$try_region, + VariadicRegion:$catch_regions); let assemblyFormat = [{ - $body `:` functional-type(operands, results) attr-dict + $try_region + custom($catch_regions, $catch_types) + attr-dict }]; // Everything already covered elsewhere. let hasVerifier = 0; let builders = [ OpBuilder<(ins - "function_ref":$tryBuilder)>, + "function_ref":$tryBuilder, + "function_ref" + :$catchBuilder)>, ]; } -//===----------------------------------------------------------------------===// -// CatchOp -//===----------------------------------------------------------------------===// - -// Represents the unwind region where unwind continues or -// the program std::terminate's. -def CatchUnwind : CIRUnitAttr<"CatchUnwind", "unwind"> { - let storageType = [{ CatchUnwind }]; -} - -// Represents the catch_all region. -def CatchAllAttr : CIRUnitAttr<"CatchAll", "all"> { - let storageType = [{ CatchAllAttr }]; -} - -def CatchOp : CIR_Op<"catch", - [SameVariadicOperandSize, - DeclareOpInterfaceMethods, - RecursivelySpeculatable, NoRegionArguments]> { - let summary = "Catch operation"; - let description = [{ - }]; - - let arguments = (ins CIR_AnyType:$exception_info, - OptionalAttr:$catchers); - let regions = (region VariadicRegion:$regions); - - // Already verified elsewhere - let hasVerifier = 0; - - let skipDefaultBuilders = 1; - let builders = [ - OpBuilder<(ins - "Value":$exception_info, - "function_ref" - :$catchBuilder)> - ]; - - let assemblyFormat = [{ - `(` - $exception_info `:` type($exception_info) `,` - custom($regions, $catchers) - `)` attr-dict - }]; -} - //===----------------------------------------------------------------------===// // CatchParamOp //===----------------------------------------------------------------------===// @@ -3306,10 +3276,9 @@ def CatchParamOp : CIR_Op<"catch_param"> { ``` }]; - let arguments = (ins ExceptionInfoPtr:$exception_info); let results = (outs CIR_AnyType:$param); let assemblyFormat = [{ - `(` $exception_info `)` `->` qualified(type($param)) attr-dict + `->` qualified(type($param)) attr-dict }]; let hasVerifier = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h index cf6dbb1851b8..661830f83fcf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -206,7 +206,7 @@ class EHCatchScope : public EHScope { // 'takeHandler' or some such function which removes ownership from the // EHCatchScope object if the handlers should live longer than EHCatchScope. void clearHandlerBlocks() { - // The blocks are owned by CatchOp, nothing to delete. + // The blocks are owned by TryOp, nothing to delete. } typedef const Handler *iterator; diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 4f74c861fc74..b89632d9682d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -257,8 +257,9 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup) { // pointer but only use it to denote we're tracking things, but there // shouldn't be any changes to that block after work done in this function. auto catchOp = currLexScope->getExceptionInfo().catchOp; - assert(catchOp && catchOp.getNumRegions() && "expected at least one region"); - auto &fallbackRegion = catchOp.getRegion(catchOp.getNumRegions() - 1); + unsigned numCatchRegions = catchOp.getCatchRegions().size(); + assert(catchOp && numCatchRegions && "expected at least one region"); + auto &fallbackRegion = catchOp.getCatchRegions()[numCatchRegions - 1]; auto *resumeBlock = &fallbackRegion.getBlocks().back(); if (!resumeBlock->empty()) @@ -322,7 +323,6 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { auto numHandlers = S.getNumHandlers(); auto tryLoc = getLoc(S.getBeginLoc()); - auto scopeLoc = getLoc(S.getSourceRange()); mlir::OpBuilder::InsertPoint beginInsertTryBody; auto ehPtrTy = mlir::cir::PointerType::get( @@ -335,28 +335,21 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { // info but don't emit the bulk right away, for now only make sure the // scope returns the exception information. auto tryScope = builder.create( - scopeLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) { + tryLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { // Allocate space for our exception info that might be passed down // to `cir.try_call` everytime a call happens. - yieldTy = ehPtrTy; exceptionInfoInsideTry = b.create( - loc, /*addr type*/ getBuilder().getPointerTo(yieldTy), - /*var type*/ yieldTy, "__exception_ptr", + loc, /*addr type*/ getBuilder().getPointerTo(ehPtrTy), + /*var type*/ ehPtrTy, "__exception_ptr", CGM.getSize(CharUnits::One()), nullptr); beginInsertTryBody = getBuilder().saveInsertionPoint(); - }); - - // The catch {} parts consume the exception information provided by a - // try scope. Also don't emit the code right away for catch clauses, for - // now create the regions and consume the try scope result. - // Note that clauses are later populated in - // CIRGenFunction::buildLandingPad. - auto catchOp = builder.create( - tryLoc, - tryScope->getResult( - 0), // FIXME(cir): we can do better source location here. + }, + // Don't emit the code right away for catch clauses, for + // now create the regions and consume the try scope result. + // Note that clauses are later populated in + // CIRGenFunction::buildLandingPad. [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &result) { mlir::OpBuilder::InsertionGuard guard(b); @@ -372,28 +365,24 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { // Finally emit the body for try/catch. auto emitTryCatchBody = [&]() -> mlir::LogicalResult { - auto loc = catchOp.getLoc(); + auto loc = tryScope.getLoc(); mlir::OpBuilder::InsertionGuard guard(getBuilder()); getBuilder().restoreInsertionPoint(beginInsertTryBody); CIRGenFunction::LexicalScope lexScope{*this, loc, getBuilder().getInsertionBlock()}; { - lexScope.setExceptionInfo({exceptionInfoInsideTry, catchOp}); - // Attach the basic blocks for the catchOp regions into ScopeCatch - // info. - enterCXXTryStmt(S, catchOp); + lexScope.setExceptionInfo({exceptionInfoInsideTry, tryScope}); + // Attach the basic blocks for the catch regions. + enterCXXTryStmt(S, tryScope); // Emit the body for the `try {}` part. if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - - auto v = getBuilder().create(loc, ehPtrTy, - exceptionInfoInsideTry); - getBuilder().create(loc, v.getResult()); + getBuilder().create(loc); } { - lexScope.setExceptionInfo({tryScope->getResult(0), catchOp}); + lexScope.setExceptionInfo({nullptr, tryScope}); // Emit catch clauses. exitCXXTryStmt(S); } @@ -452,7 +441,7 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, // If the next handler is a catch-all, we're at the end, and the // next block is that handler. } else if (catchScope.getHandler(i + 1).isCatchAll()) { - // Block already created when creating CatchOp, just mark this + // Block already created when creating catch regions, just mark this // is the end. nextIsEnd = true; } @@ -464,14 +453,14 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, } void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &S, - mlir::cir::CatchOp catchOp, + mlir::cir::TryOp catchOp, bool IsFnTryBlock) { unsigned NumHandlers = S.getNumHandlers(); EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers); for (unsigned I = 0; I != NumHandlers; ++I) { const CXXCatchStmt *C = S.getHandler(I); - mlir::Block *Handler = &catchOp.getRegion(I).getBlocks().front(); + mlir::Block *Handler = &catchOp.getCatchRegions()[I].getBlocks().front(); if (C->getExceptionDecl()) { // FIXME: Dropping the reference type on the type into makes it // impossible to correctly implement catch-by-reference @@ -510,7 +499,18 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { if (!CatchScope.hasEHBranches()) { CatchScope.clearHandlerBlocks(); EHStack.popCatch(); - currLexScope->getExceptionInfo().catchOp->erase(); + // Drop all basic block from all catch regions. + auto tryOp = currLexScope->getExceptionInfo().catchOp; + SmallVector eraseBlocks; + for (mlir::Region &r : tryOp.getCatchRegions()) { + if (r.empty()) + continue; + for (mlir::Block &b : r.getBlocks()) + eraseBlocks.push_back(&b); + } + for (mlir::Block *b : eraseBlocks) + b->erase(); + tryOp.setCatchTypesAttr({}); return; } @@ -630,43 +630,13 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { return lpad; } - // If there's an existing CatchOp, it means we got a `cir.try` scope + // If there's an existing TryOp, it means we got a `cir.try` scope // that leads to this "landing pad" creation site. Otherwise, exceptions - // are enabled but a throwing function is called anyways. - auto catchOp = currLexScope->getExceptionInfo().catchOp; - if (!catchOp) { - auto loc = *currSrcLoc; - auto ehPtrTy = mlir::cir::PointerType::get( - getBuilder().getContext(), - getBuilder().getType<::mlir::cir::ExceptionInfoType>()); - - mlir::Value exceptionAddr; - { - // Get a new alloca within the current scope. - mlir::OpBuilder::InsertionGuard guard(builder); - exceptionAddr = buildAlloca( - "__exception_ptr", ehPtrTy, loc, CharUnits::One(), - builder.getBestAllocaInsertPoint(builder.getInsertionBlock())); - } - - { - // Insert catch at the end of the block, and place the insert pointer - // back to where it was. - mlir::OpBuilder::InsertionGuard guard(builder); - auto exceptionPtr = - builder.create(loc, ehPtrTy, exceptionAddr); - catchOp = builder.create( - loc, exceptionPtr, - [&](mlir::OpBuilder &b, mlir::Location loc, - mlir::OperationState &result) { - // There's no source code level catch here, create one region for - // the resume block. - mlir::OpBuilder::InsertionGuard guard(b); - auto *r = result.addRegion(); - builder.createBlock(r); - }); - } - currLexScope->setExceptionInfo({exceptionAddr, catchOp}); + // are enabled but a throwing function is called anyways (common pattern + // with function local static initializers). + auto tryOp = currLexScope->getExceptionInfo().catchOp; + if (!tryOp) { + llvm_unreachable("NYI"); } { @@ -752,17 +722,17 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { assert(!MissingFeatures::setLandingPadCleanup()); } - assert((clauses.size() > 0 || hasCleanup) && "CatchOp has no clauses!"); + assert((clauses.size() > 0 || hasCleanup) && "no catch clauses!"); // If there's no catch_all, attach the unwind region. This needs to be the - // last region in the CatchOp operation. + // last region in the TryOp operation catch list. if (!hasCatchAll) { auto catchUnwind = mlir::cir::CatchUnwindAttr::get(builder.getContext()); clauses.push_back(catchUnwind); } - // Add final array of clauses into catchOp. - catchOp.setCatchersAttr( + // Add final array of clauses into TryOp. + tryOp.setCatchTypesAttr( mlir::ArrayAttr::get(builder.getContext(), clauses)); // In traditional LLVM codegen. this tells the backend how to generate the @@ -772,7 +742,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { (void)getEHDispatchBlock(EHStack.getInnermostEHScope()); } - return catchOp; + return tryOp; } // Differently from LLVM traditional codegen, there are no dispatch blocks diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 3f48a9e1f9cf..6d9fd1c32962 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -316,7 +316,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// allocas for the exception info struct CIRExceptionInfo { mlir::Value addr{}; - mlir::cir::CatchOp catchOp{}; + mlir::cir::TryOp catchOp{}; }; enum class EvaluationOrder { @@ -940,7 +940,7 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildCXXTryStmtUnderScope(const clang::CXXTryStmt &S); mlir::LogicalResult buildCXXTryStmt(const clang::CXXTryStmt &S); - void enterCXXTryStmt(const CXXTryStmt &S, mlir::cir::CatchOp catchOp, + void enterCXXTryStmt(const CXXTryStmt &S, mlir::cir::TryOp catchOp, bool IsFnTryBlock = false); void exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 1e272cf953b8..b9769e22f47b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -647,12 +647,10 @@ struct CallEndCatch final : EHScopeStack::Cleanup { /// call can be marked as nounwind even if EndMightThrow is true. /// /// \param EndMightThrow - true if __cxa_end_catch might throw -static mlir::Value CallBeginCatch(CIRGenFunction &CGF, mlir::Value Exn, - mlir::Type ParamTy, bool EndMightThrow) { - // llvm::CallInst *call = - // CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); +static mlir::Value CallBeginCatch(CIRGenFunction &CGF, mlir::Type ParamTy, + bool EndMightThrow) { auto catchParam = CGF.getBuilder().create( - Exn.getLoc(), ParamTy, Exn); + CGF.getBuilder().getUnknownLoc(), ParamTy); CGF.EHStack.pushCleanup( NormalAndEHCleanup, @@ -686,7 +684,7 @@ static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, // If the catch type is a pointer type, __cxa_begin_catch returns // the pointer by value. if (CatchType->hasPointerRepresentation()) { - auto catchParam = CallBeginCatch(CGF, Exn, CIRCatchTy, false); + auto catchParam = CallBeginCatch(CGF, CIRCatchTy, false); switch (CatchType.getQualifiers().getObjCLifetime()) { case Qualifiers::OCL_Strong: @@ -698,7 +696,8 @@ static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Autoreleasing: - CGF.getBuilder().createStore(Exn.getLoc(), catchParam, ParamAddr); + CGF.getBuilder().createStore(CGF.getBuilder().getUnknownLoc(), + catchParam, ParamAddr); return; case Qualifiers::OCL_Weak: @@ -711,8 +710,8 @@ static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, } // Otherwise, it returns a pointer into the exception object. - auto catchParam = CallBeginCatch( - CGF, Exn, CGF.getBuilder().getPointerTo(CIRCatchTy), false); + auto catchParam = + CallBeginCatch(CGF, CGF.getBuilder().getPointerTo(CIRCatchTy), false); LValue srcLV = CGF.MakeNaturalAlignAddrLValue(catchParam, CatchType); LValue destLV = CGF.makeAddrLValue(ParamAddr, CatchType); switch (TEK) { @@ -770,8 +769,7 @@ void CIRGenItaniumCXXABI::emitBeginCatch(CIRGenFunction &CGF, VarDecl *CatchParam = S->getExceptionDecl(); if (!CatchParam) { - auto Exn = CGF.currLexScope->getExceptionInfo().addr; - CallBeginCatch(CGF, Exn, CGF.getBuilder().getVoidPtrTy(), true); + CallBeginCatch(CGF, CGF.getBuilder().getVoidPtrTy(), true); return; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a96079eb03c1..d099b662fb14 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -936,30 +936,126 @@ LogicalResult ScopeOp::verify() { return success(); } void TryOp::build( OpBuilder &builder, OperationState &result, - function_ref scopeBuilder) { - assert(scopeBuilder && "the builder callback for 'then' must be present"); + function_ref tryBodyBuilder, + function_ref catchBuilder) { + assert(tryBodyBuilder && "expected builder callback for 'cir.try' body"); OpBuilder::InsertionGuard guard(builder); - Region *scopeRegion = result.addRegion(); - builder.createBlock(scopeRegion); + Region *tryBodyRegion = result.addRegion(); + builder.createBlock(tryBodyRegion); - mlir::Type yieldTy; - scopeBuilder(builder, yieldTy, result.location); - - if (yieldTy) - result.addTypes(TypeRange{yieldTy}); + tryBodyBuilder(builder, result.location); + catchBuilder(builder, result.location, result); } void TryOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { - // The only region always branch back to the parent operation. + // If any index all the underlying regions branch back to the parent + // operation. if (!point.isParent()) { - regions.push_back(RegionSuccessor(this->getODSResults(0))); + regions.push_back(RegionSuccessor()); return; } // If the condition isn't constant, both regions may be executed. - regions.push_back(RegionSuccessor(&getBody())); + regions.push_back(RegionSuccessor(&getTryRegion())); + // FIXME: optimize, ideas include: + // - If we know a target function never throws a specific type, we can + // remove the catch handler. + for (auto &r : this->getCatchRegions()) + regions.push_back(RegionSuccessor(&r)); +} + +void printCatchRegions(OpAsmPrinter &p, TryOp op, + mlir::MutableArrayRef<::mlir::Region> regions, + mlir::ArrayAttr catchList) { + + int currCatchIdx = 0; + if (!catchList) + return; + p << "catch ["; + llvm::interleaveComma(catchList, p, [&](const Attribute &a) { + auto exRtti = a; + + if (mlir::isa(a)) { + p.printAttribute(a); + p << " "; + } else if (!exRtti) { + p << "all"; + } else { + p << "type "; + p.printAttribute(exRtti); + p << " "; + } + p.printRegion(regions[currCatchIdx], /*printEntryBLockArgs=*/false, + /*printBlockTerminators=*/true); + currCatchIdx++; + }); + p << "]"; +} + +ParseResult parseCatchRegions( + OpAsmParser &parser, + llvm::SmallVectorImpl> ®ions, + ::mlir::ArrayAttr &catchersAttr) { + SmallVector catchList; + + auto parseAndCheckRegion = [&]() -> ParseResult { + // Parse region attached to catch + regions.emplace_back(new Region); + Region &currRegion = *regions.back().get(); + auto parserLoc = parser.getCurrentLocation(); + if (parser.parseRegion(currRegion, /*arguments=*/{}, /*argTypes=*/{})) { + regions.clear(); + return failure(); + } + + if (currRegion.empty()) { + return parser.emitError(parser.getCurrentLocation(), + "catch region shall not be empty"); + } + + if (!(currRegion.back().mightHaveTerminator() && + currRegion.back().getTerminator())) + return parser.emitError( + parserLoc, "blocks are expected to be explicitly terminated"); + + return success(); + }; + + auto parseCatchEntry = [&]() -> ParseResult { + mlir::Type exceptionType; + mlir::Attribute exceptionTypeInfo; + + // FIXME: support most recent syntax, currently broken. + ::llvm::StringRef attrStr; + if (!parser.parseOptionalKeyword(&attrStr, {"all"})) { + if (parser.parseKeyword("type").failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected 'type' keyword here"); + if (parser.parseType(exceptionType).failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected valid exception type"); + if (parser.parseAttribute(exceptionTypeInfo).failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected valid RTTI info attribute"); + } + catchList.push_back(exceptionTypeInfo); + return parseAndCheckRegion(); + }; + + if (parser.parseKeyword("catch").failed()) + return parser.emitError(parser.getCurrentLocation(), + "expected 'catch' keyword here"); + + if (parser + .parseCommaSeparatedList(OpAsmParser::Delimiter::Square, + parseCatchEntry, " in catch list") + .failed()) + return failure(); + + catchersAttr = parser.getBuilder().getArrayAttr(catchList); + return ::mlir::success(); } //===----------------------------------------------------------------------===// @@ -1409,147 +1505,6 @@ static void printSwitchFlatOpCases(OpAsmPrinter &p, SwitchFlatOp op, p << ']'; } -//===----------------------------------------------------------------------===// -// CatchOp -//===----------------------------------------------------------------------===// - -ParseResult -parseCatchOp(OpAsmParser &parser, - llvm::SmallVectorImpl> ®ions, - ::mlir::ArrayAttr &catchersAttr) { - SmallVector catchList; - - auto parseAndCheckRegion = [&]() -> ParseResult { - // Parse region attached to catch - regions.emplace_back(new Region); - Region &currRegion = *regions.back().get(); - auto parserLoc = parser.getCurrentLocation(); - if (parser.parseRegion(currRegion, /*arguments=*/{}, /*argTypes=*/{})) { - regions.clear(); - return failure(); - } - - if (currRegion.empty()) { - return parser.emitError(parser.getCurrentLocation(), - "catch region shall not be empty"); - } - - if (!(currRegion.back().mightHaveTerminator() && - currRegion.back().getTerminator())) - return parser.emitError( - parserLoc, "blocks are expected to be explicitly terminated"); - - return success(); - }; - - auto parseCatchEntry = [&]() -> ParseResult { - mlir::Type exceptionType; - mlir::Attribute exceptionTypeInfo; - - // cir.catch(..., [ - // type (!cir.ptr, @type_info_char_star) { - // ... - // }, - // all { - // ... - // } - // ] - ::llvm::StringRef attrStr; - if (!parser.parseOptionalKeyword(&attrStr, {"all"})) { - if (parser.parseKeyword("type").failed()) - return parser.emitError(parser.getCurrentLocation(), - "expected 'type' keyword here"); - - if (parser.parseLParen().failed()) - return parser.emitError(parser.getCurrentLocation(), "expected '('"); - - if (parser.parseType(exceptionType).failed()) - return parser.emitError(parser.getCurrentLocation(), - "expected valid exception type"); - if (parser.parseAttribute(exceptionTypeInfo).failed()) - return parser.emitError(parser.getCurrentLocation(), - "expected valid RTTI info attribute"); - if (parser.parseRParen().failed()) - return parser.emitError(parser.getCurrentLocation(), "expected ')'"); - } - catchList.push_back(exceptionTypeInfo); - return parseAndCheckRegion(); - }; - - if (parser - .parseCommaSeparatedList(OpAsmParser::Delimiter::Square, - parseCatchEntry, " in catch list") - .failed()) - return failure(); - - catchersAttr = parser.getBuilder().getArrayAttr(catchList); - return ::mlir::success(); -} - -void printCatchOp(OpAsmPrinter &p, CatchOp op, - mlir::MutableArrayRef<::mlir::Region> regions, - mlir::ArrayAttr catchList) { - - int currCatchIdx = 0; - p << "["; - llvm::interleaveComma(catchList, p, [&](const Attribute &a) { - p.printNewline(); - p.increaseIndent(); - auto exRtti = a; - - if (mlir::isa(a)) { - p.printAttribute(a); - } else if (!exRtti) { - p << "all"; - } else { - p << "type ("; - p.printAttribute(exRtti); - p << ") "; - } - p.printNewline(); - p.increaseIndent(); - p.printRegion(regions[currCatchIdx], /*printEntryBLockArgs=*/false, - /*printBlockTerminators=*/true); - currCatchIdx++; - p.decreaseIndent(); - p.decreaseIndent(); - }); - p << "]"; -} - -/// Given the region at `index`, or the parent operation if `index` is None, -/// return the successor regions. These are the regions that may be selected -/// during the flow of control. `operands` is a set of optional attributes -/// that correspond to a constant value for each operand, or null if that -/// operand is not a constant. -void CatchOp::getSuccessorRegions(mlir::RegionBranchPoint point, - SmallVectorImpl ®ions) { - // If any index all the underlying regions branch back to the parent - // operation. - if (!point.isParent()) { - regions.push_back(RegionSuccessor()); - return; - } - - // FIXME: optimize, ideas include: - // - If we know a target function never throws a specific type, we can - // remove the catch handler. - // - ??? - - // If the condition isn't constant, all regions may be executed. - for (auto &r : this->getRegions()) - regions.push_back(RegionSuccessor(&r)); -} - -void CatchOp::build( - OpBuilder &builder, OperationState &result, mlir::Value exceptionInfo, - function_ref catchBuilder) { - assert(catchBuilder && "the builder callback for regions must be present"); - result.addOperands(ValueRange{exceptionInfo}); - OpBuilder::InsertionGuard guardCatch(builder); - catchBuilder(builder, result.location, result); -} - //===----------------------------------------------------------------------===// // LoopOpInterface Methods //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 1a3281c8839a..40635cb7bbc8 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -183,7 +183,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto loc = tryOp.getLoc(); // Empty scope: just remove it. - if (tryOp.getRegion().empty()) { + if (tryOp.getTryRegion().empty()) { rewriter.eraseOp(tryOp); return mlir::success(); } @@ -197,9 +197,9 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { continueBlock = remainingOpsBlock; // Inline body region. - auto *beforeBody = &tryOp.getRegion().front(); - auto *afterBody = &tryOp.getRegion().back(); - rewriter.inlineRegionBefore(tryOp.getRegion(), continueBlock); + auto *beforeBody = &tryOp.getTryRegion().front(); + auto *afterBody = &tryOp.getTryRegion().back(); + rewriter.inlineRegionBefore(tryOp.getTryRegion(), continueBlock); // Branch into the body of the region. rewriter.setInsertionPointToEnd(currentBlock); @@ -208,15 +208,11 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Replace the tryOp return with a branch that jumps out of the body. rewriter.setInsertionPointToEnd(afterBody); auto yieldOp = cast(afterBody->getTerminator()); - assert(yieldOp.getOperands().size() == 1 && "expect one exact value"); - auto br = rewriter.replaceOpWithNewOp( - yieldOp, yieldOp.getArgs(), continueBlock); + rewriter.replaceOpWithNewOp(yieldOp, continueBlock); - // Replace the op with values return from the body region. - continueBlock->addArgument(br.getDestOperands()[0].getType(), - tryOp.getLoc()); - rewriter.replaceOp(tryOp, continueBlock->getArguments()); + // TODO: handle the catch clauses and cir.try_call while here. + rewriter.eraseOp(tryOp); return mlir::success(); } }; diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp index c0af33ef071d..4da0692d6153 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp @@ -87,15 +87,17 @@ struct RemoveTrivialTry : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult match(TryOp op) const final { - return success(op.getResult().use_empty() && op.getBody().hasOneBlock()); + // FIXME: also check all catch regions are empty + // return success(op.getTryRegion().hasOneBlock()); + return mlir::failure(); } void rewrite(TryOp op, PatternRewriter &rewriter) const final { // Move try body to the parent. - assert(op.getBody().hasOneBlock()); + assert(op.getTryRegion().hasOneBlock()); Block *parentBlock = op.getOperation()->getBlock(); - mlir::Block *tryBody = &op.getBody().getBlocks().front(); + mlir::Block *tryBody = &op.getTryRegion().getBlocks().front(); YieldOp y = dyn_cast(tryBody->getTerminator()); assert(y && "expected well wrapped up try block"); y->erase(); diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index f55cfd042d49..bdb988e8169b 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -13,7 +13,7 @@ unsigned long long tc() { // CHECK: %[[msg:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["msg"] // CHECK: %[[idx:.*]] = cir.alloca !s32i, !cir.ptr, ["idx"] - // CHECK: %[[try_eh:.*]] = cir.try { + // CHECK: cir.try { // CHECK: %[[eh_info:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__exception_ptr"] // CHECK: %[[local_a:.*]] = cir.alloca !s32i, !cir.ptr, ["a", init] int a = 4; @@ -21,25 +21,22 @@ unsigned long long tc() { // CHECK: %[[div_res:.*]] = cir.try_call exception(%[[eh_info]]) @_Z8divisionii({{.*}}) : (!cir.ptr>, !s32i, !s32i) -> !cir.double a++; - // CHECK: cir.catch(%[[try_eh]] : !cir.ptr, [ } catch (int idx) { - // CHECK: type (#cir.global_view<@_ZTIi> : !cir.ptr) - // CHECK: { - // CHECK: %[[catch_idx_addr:.*]] = cir.catch_param(%[[try_eh]]) -> !cir.ptr + // CHECK: } catch [type #cir.global_view<@_ZTIi> : !cir.ptr { + // CHECK: %[[catch_idx_addr:.*]] = cir.catch_param -> !cir.ptr // CHECK: %[[idx_load:.*]] = cir.load %[[catch_idx_addr]] : !cir.ptr, !s32i // CHECK: cir.store %[[idx_load]], %[[idx]] : !s32i, !cir.ptr z = 98; idx++; } catch (const char* msg) { - // CHECK: type (#cir.global_view<@_ZTIPKc> : !cir.ptr) - // CHECK: { - // CHECK: %[[msg_addr:.*]] = cir.catch_param(%[[try_eh]]) -> !cir.ptr + // CHECK: }, type #cir.global_view<@_ZTIPKc> : !cir.ptr { + // CHECK: %[[msg_addr:.*]] = cir.catch_param -> !cir.ptr // CHECK: cir.store %[[msg_addr]], %[[msg]] : !cir.ptr, !cir.ptr> z = 99; (void)msg[0]; - } // CHECK: #cir.unwind + } // CHECK: }, #cir.unwind { // CHECK: cir.resume - // CHECK-NEXT: }]) + // CHECK-NEXT: } return z; } @@ -60,7 +57,7 @@ unsigned long long tc2() { z = 99; (void)msg[0]; } catch (...) { - // CHECK: type (#cir.all) + // CHECK: }, type #cir.all { // CHECK: cir.catch_param // CHECK: cir.const #cir.int<100> : !s32i z = 100; @@ -77,7 +74,7 @@ unsigned long long tc3() { try { z = division(x, y); } catch (...) { - // CHECK: type (#cir.all) + // CHECK: } catch [type #cir.all { // CHECK: cir.catch_param // CHECK: cir.const #cir.int<100> : !s32i z = 100; diff --git a/clang/test/CIR/IR/exceptions.cir b/clang/test/CIR/IR/exceptions.cir index c1a981d7a9f7..25aa67357c7d 100644 --- a/clang/test/CIR/IR/exceptions.cir +++ b/clang/test/CIR/IR/exceptions.cir @@ -1,24 +1,69 @@ // RUN: cir-opt %s | FileCheck %s +// XFAIL: * !s32i = !cir.int +!s8i = !cir.int +!u64i = !cir.int +!u8i = !cir.int +!void = !cir.void module { - cir.func @div(%x : !s32i, %y : !s32i) -> !s32i { - %3 = cir.const #cir.int<0> : !s32i - cir.return %3 : !s32i - } - - cir.func @foo(%x : !s32i, %y : !s32i) -> !cir.ptr { - %11 = cir.scope { - %10 = cir.try { - %0 = cir.alloca !cir.ptr, !cir.ptr>, ["exception_info"] {alignment = 16 : i64} - %d = cir.try_call exception(%0) @div(%x, %y) : (!s32i, !s32i) -> !s32i - // CHECK: cir.try_call exception(%2) @div(%arg0, %arg1) : (!cir.ptr>, !s32i, !s32i) -> !s32i - %1 = cir.load %0 : !cir.ptr>, !cir.ptr - cir.yield %1 : !cir.ptr - } : () -> !cir.ptr - cir.yield %10 : !cir.ptr - } : !cir.ptr - cir.return %11 : !cir.ptr + // Generated from clang/test/CIR/CodeGen/try-catch.cpp + cir.func @_Z2tcv() -> !u64i { + %0 = cir.alloca !u64i, !cir.ptr, ["__retval"] {alignment = 8 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} + %3 = cir.alloca !u64i, !cir.ptr, ["z"] {alignment = 8 : i64} + %4 = cir.const #cir.int<50> : !s32i + cir.store %4, %1 : !s32i, !cir.ptr + %5 = cir.const #cir.int<3> : !s32i + cir.store %5, %2 : !s32i, !cir.ptr + cir.scope { + %8 = cir.alloca !cir.ptr, !cir.ptr>, ["msg"] {alignment = 8 : i64} + %9 = cir.alloca !s32i, !cir.ptr, ["idx"] {alignment = 4 : i64} + cir.try { + %10 = cir.alloca !cir.ptr, !cir.ptr>, ["__exception_ptr"] {alignment = 1 : i64} + %11 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + %12 = cir.const #cir.int<4> : !s32i + cir.store %12, %11 : !s32i, !cir.ptr + %13 = cir.load %1 : !cir.ptr, !s32i + %14 = cir.load %2 : !cir.ptr, !s32i + %15 = cir.try_call exception(%10) @_Z8divisionii(%13, %14) : (!s32i, !s32i) -> !cir.double + %16 = cir.cast(float_to_int, %15 : !cir.double), !u64i + cir.store %16, %3 : !u64i, !cir.ptr + %17 = cir.load %11 : !cir.ptr, !s32i + %18 = cir.unary(inc, %17) : !s32i, !s32i + cir.store %18, %11 : !s32i, !cir.ptr + %19 = cir.load %10 : !cir.ptr>, !cir.ptr + cir.yield + } catch [type #cir.global_view<@_ZTIi> : !cir.ptr { + %10 = cir.catch_param -> !cir.ptr + %11 = cir.load %10 : !cir.ptr, !s32i + cir.store %11, %9 : !s32i, !cir.ptr + %12 = cir.const #cir.int<98> : !s32i + %13 = cir.cast(integral, %12 : !s32i), !u64i + cir.store %13, %3 : !u64i, !cir.ptr + %14 = cir.load %9 : !cir.ptr, !s32i + %15 = cir.unary(inc, %14) : !s32i, !s32i + cir.store %15, %9 : !s32i, !cir.ptr + cir.yield + }, type #cir.global_view<@_ZTIPKc> : !cir.ptr { + %10 = cir.catch_param -> !cir.ptr + cir.store %10, %8 : !cir.ptr, !cir.ptr> + %11 = cir.const #cir.int<99> : !s32i + %12 = cir.cast(integral, %11 : !s32i), !u64i + cir.store %12, %3 : !u64i, !cir.ptr + %13 = cir.load %8 : !cir.ptr>, !cir.ptr + %14 = cir.const #cir.int<0> : !s32i + %15 = cir.ptr_stride(%13 : !cir.ptr, %14 : !s32i), !cir.ptr + cir.yield + }, #cir.unwind { + cir.resume + }] + } + %6 = cir.load %3 : !cir.ptr, !u64i + cir.store %6, %0 : !u64i, !cir.ptr + %7 = cir.load %0 : !cir.ptr, !u64i + cir.return %7 : !u64i } } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 545cde0accee..4a11b3a36e19 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -11,21 +11,18 @@ unsigned long long tc() { try { int a = 4; + // CIR_FLAT_DISABLED: cir.alloca !cir.ptr, !cir.ptr>, ["msg"] + // CIR_FLAT_DISABLED: cir.alloca !s32i, !cir.ptr, ["idx"] // CIR_FLAT: cir.br ^bb1 - // CIR_FLAT: ^bb1: // pred: ^bb0 - // CIR_FLAT: cir.alloca !cir.ptr, !cir.ptr>, ["msg"] - // CIR_FLAT: cir.alloca !s32i, !cir.ptr, ["idx"] + // CIR_FLAT: ^bb1: // pred: ^bb0 // CIR_FLAT: cir.br ^bb2 - // CIR_FLAT: ^bb2: // pred: ^bb1 + // CIR_FLAT: ^bb2: // pred: ^bb1 // CIR_FLAT: %[[EH_PTR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__exception_ptr"] // CIR_FLAT: cir.try_call exception(%[[EH_PTR]]) @_Z8divisionii( z = division(x, y); a++; - // CIR_FLAT: %[[LOAD_EH_PTR:.*]] = cir.load %[[EH_PTR]] : !cir.ptr>, !cir.ptr - // CIR_FLAT: cir.br ^bb3(%[[LOAD_EH_PTR]] : !cir.ptr) - // CIR_FLAT: ^bb3(%[[EH_ARG:.*]]: !cir.ptr loc(fused[#loc1, #loc2])): // pred: ^bb2 - // CIR_FLAT: cir.catch(%[[EH_ARG:.*]] : !cir.ptr, [ + // CIR_FLAT: cir.br ^bb3 } catch (int idx) { z = 98; idx++; From ab90c7dff055ca2de1b303813343e853c3a516af Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 24 Jul 2024 07:21:20 +0800 Subject: [PATCH 1699/2301] [CIR] Add support for unary complex operations (#750) This PR adds support for unary operations on complex numbers, namely plus(+), minus(-), and conjugate(~). This PR also adds support for the `__builtin_conj` builtin function which computes the conjugate of the input. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 8 +- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 70 ++++++++-- .../Dialect/Transforms/LoweringPrepare.cpp | 51 ++++++- clang/test/CIR/CodeGen/complex-arithmetic.c | 131 ++++++++++++++++++ 4 files changed, 243 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index aa0793a57282..a16148a21986 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -756,8 +756,12 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_conjl: case Builtin::BIconj: case Builtin::BIconjf: - case Builtin::BIconjl: - llvm_unreachable("NYI"); + case Builtin::BIconjl: { + mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); + mlir::Value Conj = builder.createUnaryOp( + getLoc(E->getExprLoc()), mlir::cir::UnaryOpKind::Not, ComplexVal); + return RValue::getComplex(Conj); + } case Builtin::BI__builtin___CFStringMakeConstantString: case Builtin::BI__builtin___NSStringMakeConstantString: diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 7c175b278430..ebff8e10c793 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -152,20 +152,12 @@ class ComplexExprEmitter : public StmtVisitor { mlir::Value VisitUnaryDeref(const Expr *E) { llvm_unreachable("NYI"); } mlir::Value VisitUnaryPlus(const UnaryOperator *E, - QualType PromotionType = QualType()) { - llvm_unreachable("NYI"); - } - mlir::Value VisitPlus(const UnaryOperator *E, QualType PromotionType) { - llvm_unreachable("NYI"); - } + QualType PromotionType = QualType()); + mlir::Value VisitPlus(const UnaryOperator *E, QualType PromotionType); mlir::Value VisitUnaryMinus(const UnaryOperator *E, - QualType PromotionType = QualType()) { - llvm_unreachable("NYI"); - } - mlir::Value VisitMinus(const UnaryOperator *E, QualType PromotionType) { - llvm_unreachable("NYI"); - } - mlir::Value VisitUnaryNot(const UnaryOperator *E) { llvm_unreachable("NYI"); } + QualType PromotionType = QualType()); + mlir::Value VisitMinus(const UnaryOperator *E, QualType PromotionType); + mlir::Value VisitUnaryNot(const UnaryOperator *E); // LNot,Real,Imag never return complex. mlir::Value VisitUnaryExtension(const UnaryOperator *E) { return Visit(E->getSubExpr()); @@ -495,6 +487,58 @@ mlir::Value ComplexExprEmitter::VisitCallExpr(const CallExpr *E) { return CGF.buildCallExpr(E).getComplexVal(); } +mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *E, + QualType PromotionType) { + QualType promotionTy = PromotionType.isNull() + ? getPromotionType(E->getSubExpr()->getType()) + : PromotionType; + mlir::Value result = VisitPlus(E, promotionTy); + if (!promotionTy.isNull()) + return CGF.buildUnPromotedValue(result, E->getSubExpr()->getType()); + return result; +} + +mlir::Value ComplexExprEmitter::VisitPlus(const UnaryOperator *E, + QualType PromotionType) { + mlir::Value Op; + if (!PromotionType.isNull()) + Op = CGF.buildPromotedComplexExpr(E->getSubExpr(), PromotionType); + else + Op = Visit(E->getSubExpr()); + + return Builder.createUnaryOp(CGF.getLoc(E->getExprLoc()), + mlir::cir::UnaryOpKind::Plus, Op); +} + +mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E, + QualType PromotionType) { + QualType promotionTy = PromotionType.isNull() + ? getPromotionType(E->getSubExpr()->getType()) + : PromotionType; + mlir::Value result = VisitMinus(E, promotionTy); + if (!promotionTy.isNull()) + return CGF.buildUnPromotedValue(result, E->getSubExpr()->getType()); + return result; +} + +mlir::Value ComplexExprEmitter::VisitMinus(const UnaryOperator *E, + QualType PromotionType) { + mlir::Value Op; + if (!PromotionType.isNull()) + Op = CGF.buildPromotedComplexExpr(E->getSubExpr(), PromotionType); + else + Op = Visit(E->getSubExpr()); + + return Builder.createUnaryOp(CGF.getLoc(E->getExprLoc()), + mlir::cir::UnaryOpKind::Minus, Op); +} + +mlir::Value ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) { + mlir::Value Op = Visit(E->getSubExpr()); + return Builder.createUnaryOp(CGF.getLoc(E->getExprLoc()), + mlir::cir::UnaryOpKind::Not, Op); +} + ComplexExprEmitter::BinOpInfo ComplexExprEmitter::buildBinOps(const BinaryOperator *E, QualType PromotionTy) { BinOpInfo Ops{CGF.getLoc(E->getExprLoc())}; diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 62c0e5acd899..201895abfaea 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -71,6 +71,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { void runOnOperation() override; void runOnOp(Operation *op); + void lowerUnaryOp(UnaryOp op); void lowerBinOp(BinOp op); void lowerComplexBinOp(ComplexBinOp op); void lowerThreeWayCmpOp(CmpThreeWayOp op); @@ -347,6 +348,50 @@ void LoweringPreparePass::lowerVAArgOp(VAArgOp op) { return; } +void LoweringPreparePass::lowerUnaryOp(UnaryOp op) { + auto ty = op.getType(); + if (!mlir::isa(ty)) + return; + + auto loc = op.getLoc(); + auto opKind = op.getKind(); + assert((opKind == mlir::cir::UnaryOpKind::Plus || + opKind == mlir::cir::UnaryOpKind::Minus || + opKind == mlir::cir::UnaryOpKind::Not) && + "invalid unary op kind on complex numbers"); + + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op); + + auto operand = op.getInput(); + + auto operandReal = builder.createComplexReal(loc, operand); + auto operandImag = builder.createComplexImag(loc, operand); + + mlir::Value resultReal; + mlir::Value resultImag; + switch (opKind) { + case mlir::cir::UnaryOpKind::Plus: + case mlir::cir::UnaryOpKind::Minus: + resultReal = builder.createUnaryOp(loc, opKind, operandReal); + resultImag = builder.createUnaryOp(loc, opKind, operandImag); + break; + + case mlir::cir::UnaryOpKind::Not: + resultReal = operandReal; + resultImag = + builder.createUnaryOp(loc, mlir::cir::UnaryOpKind::Minus, operandImag); + break; + + default: + llvm_unreachable("unsupported complex unary op kind"); + } + + auto result = builder.createComplexCreate(loc, resultReal, resultImag); + op.replaceAllUsesWith(result); + op.erase(); +} + void LoweringPreparePass::lowerBinOp(BinOp op) { auto ty = op.getType(); if (!mlir::isa(ty)) @@ -939,7 +984,9 @@ void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { } void LoweringPreparePass::runOnOp(Operation *op) { - if (auto bin = dyn_cast(op)) { + if (auto unary = dyn_cast(op)) { + lowerUnaryOp(unary); + } else if (auto bin = dyn_cast(op)) { lowerBinOp(bin); } else if (auto complexBin = dyn_cast(op)) { lowerComplexBinOp(complexBin); @@ -980,7 +1027,7 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { - if (isa(op)) opsToTransform.push_back(op); diff --git a/clang/test/CIR/CodeGen/complex-arithmetic.c b/clang/test/CIR/CodeGen/complex-arithmetic.c index 6dc0c546bd2f..f7b85000ce6b 100644 --- a/clang/test/CIR/CodeGen/complex-arithmetic.c +++ b/clang/test/CIR/CodeGen/complex-arithmetic.c @@ -645,3 +645,134 @@ void div_assign() { // CIRGEN-FULL: %{{.+}} = cir.complex.binop div %{{.+}}, %{{.+}} range(full) : !cir.complex // CHECK: } + +void unary_plus() { + cd1 = +cd1; + ci1 = +ci1; +} + +// CLANG: @unary_plus +// CPPLANG: @_Z10unary_plusv + +// CIRGEN: %{{.+}} = cir.unary(plus, %{{.+}}) : !cir.complex, !cir.complex +// CIRGEN: %{{.+}} = cir.unary(plus, %{{.+}}) : !cir.complex, !cir.complex + +// CIR: %[[#OPR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#OPI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#RESR:]] = cir.unary(plus, %[[#OPR]]) : !cir.double, !cir.double +// CIR-NEXT: %[[#RESI:]] = cir.unary(plus, %[[#OPI]]) : !cir.double, !cir.double +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#RESR]], %[[#RESI]] : !cir.double -> !cir.complex + +// CIR: %[[#OPR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#OPI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#RESR:]] = cir.unary(plus, %[[#OPR]]) : !s32i, !s32i +// CIR-NEXT: %[[#RESI:]] = cir.unary(plus, %[[#OPI]]) : !s32i, !s32i +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#RESR]], %[[#RESI]] : !s32i -> !cir.complex + +// LLVM: %[[#OPR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#OPI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#OPR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double %[[#OPI]], 1 + +// LLVM: %[[#OPR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#OPI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %[[#OPR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 %[[#OPI]], 1 + +// CHECK: } + +void unary_minus() { + cd1 = -cd1; + ci1 = -ci1; +} + +// CLANG: @unary_minus +// CPPLANG: @_Z11unary_minusv + +// CIRGEN: %{{.+}} = cir.unary(minus, %{{.+}}) : !cir.complex, !cir.complex +// CIRGEN: %{{.+}} = cir.unary(minus, %{{.+}}) : !cir.complex, !cir.complex + +// CIR: %[[#OPR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#OPI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#RESR:]] = cir.unary(minus, %[[#OPR]]) : !cir.double, !cir.double +// CIR-NEXT: %[[#RESI:]] = cir.unary(minus, %[[#OPI]]) : !cir.double, !cir.double +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#RESR]], %[[#RESI]] : !cir.double -> !cir.complex + +// CIR: %[[#OPR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#OPI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#RESR:]] = cir.unary(minus, %[[#OPR]]) : !s32i, !s32i +// CIR-NEXT: %[[#RESI:]] = cir.unary(minus, %[[#OPI]]) : !s32i, !s32i +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#RESR]], %[[#RESI]] : !s32i -> !cir.complex + +// LLVM: %[[#OPR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#OPI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#RESR:]] = fneg double %[[#OPR]] +// LLVM-NEXT: %[[#RESI:]] = fneg double %[[#OPI]] +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#RESR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double %[[#RESI]], 1 + +// LLVM: %[[#OPR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#OPI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#RESR:]] = sub i32 0, %[[#OPR]] +// LLVM-NEXT: %[[#RESI:]] = sub i32 0, %[[#OPI]] +// LLVM-NEXT: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %[[#RESR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 %[[#RESI]], 1 + +// CHECK: } + +void unary_not() { + cd1 = ~cd1; + ci1 = ~ci1; +} + +// CLANG: @unary_not +// CPPLANG: @_Z9unary_notv + +// CIRGEN: %{{.+}} = cir.unary(not, %{{.+}}) : !cir.complex, !cir.complex +// CIRGEN: %{{.+}} = cir.unary(not, %{{.+}}) : !cir.complex, !cir.complex + +// CIR: %[[#OPR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#OPI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#RESI:]] = cir.unary(minus, %[[#OPI]]) : !cir.double, !cir.double +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#OPR]], %[[#RESI]] : !cir.double -> !cir.complex + +// CIR: %[[#OPR:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#OPI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#RESI:]] = cir.unary(minus, %[[#OPI]]) : !s32i, !s32i +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#OPR]], %[[#RESI]] : !s32i -> !cir.complex + +// LLVM: %[[#OPR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#OPI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#RESI:]] = fneg double %[[#OPI]] +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#OPR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double %[[#RESI]], 1 + +// LLVM: %[[#OPR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#OPI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#RESI:]] = sub i32 0, %[[#OPI]] +// LLVM-NEXT: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %[[#OPR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 %[[#RESI]], 1 + +// CHECK: } + +void builtin_conj() { + cd1 = __builtin_conj(cd1); +} + +// CLANG: @builtin_conj +// CPPLANG: @_Z12builtin_conjv + +// CIRGEN: %{{.+}} = cir.unary(not, %{{.+}}) : !cir.complex, !cir.complex + +// CIR: %[[#OPR:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#OPI:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#RESI:]] = cir.unary(minus, %[[#OPI]]) : !cir.double, !cir.double +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#OPR]], %[[#RESI]] : !cir.double -> !cir.complex + +// LLVM: %[[#OPR:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#OPI:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#RESI:]] = fneg double %[[#OPI]] +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#OPR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double %[[#RESI]], 1 + +// CHECK: } From a3e0cb8fb143b840a78c6906c18c9ab9eec5ed28 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 23 Jul 2024 17:35:15 -0700 Subject: [PATCH 1700/2301] [CIR][CIRGen][NFC] Exceptions: Remove cir.try in favor of cir.call exception Part of cleaning up for CFG flattening and LLVM lowering --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 27 ++-- clang/include/clang/CIR/Dialect/IR/CIROps.td | 62 +------- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 9 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 134 +++--------------- clang/test/CIR/CodeGen/try-catch.cpp | 2 +- clang/test/CIR/IR/try.cir | 2 +- clang/test/CIR/Lowering/try-catch.cpp | 2 +- 7 files changed, 42 insertions(+), 196 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index b5a88008446b..4208aa5aaed3 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -616,14 +616,14 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { extraFnAttr); } - mlir::cir::TryCallOp - createTryCallOp(mlir::Location loc, mlir::Value exception, + mlir::cir::CallOp + createTryCallOp(mlir::Location loc, mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(), mlir::Type returnType = mlir::cir::VoidType(), mlir::ValueRange operands = mlir::ValueRange(), mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { - mlir::cir::TryCallOp tryCallOp = create( - loc, callee, exception, returnType, operands); + mlir::cir::CallOp tryCallOp = create( + loc, callee, returnType, operands, getUnitAttr()); if (extraFnAttr) { tryCallOp->setAttr("extra_attrs", extraFnAttr); } else { @@ -635,24 +635,23 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return tryCallOp; } - mlir::cir::TryCallOp + mlir::cir::CallOp createTryCallOp(mlir::Location loc, mlir::cir::FuncOp callee, - mlir::Value exception, mlir::ValueRange operands, + mlir::ValueRange operands, mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { - return createTryCallOp(loc, exception, mlir::SymbolRefAttr::get(callee), + return createTryCallOp(loc, mlir::SymbolRefAttr::get(callee), callee.getFunctionType().getReturnType(), operands, extraFnAttr); } - mlir::cir::TryCallOp createIndirectTryCallOp(mlir::Location loc, - mlir::Value ind_target, - mlir::Value exception, - mlir::cir::FuncType fn_type, - mlir::ValueRange operands) { + mlir::cir::CallOp createIndirectTryCallOp(mlir::Location loc, + mlir::Value ind_target, + mlir::cir::FuncType fn_type, + mlir::ValueRange operands) { llvm::SmallVector resOperands({ind_target}); resOperands.append(operands.begin(), operands.end()); - return createTryCallOp(loc, exception, mlir::SymbolRefAttr(), - fn_type.getReturnType(), resOperands); + return createTryCallOp(loc, mlir::SymbolRefAttr(), fn_type.getReturnType(), + resOperands); } }; diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b3d4583e8e72..dde43d0a733c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2990,6 +2990,7 @@ class CIR_CallOp extra_traits = []> : OptionalAttr:$callee, Variadic:$arg_ops, ExtraFuncAttr:$extra_attrs, + UnitAttr:$exception, OptionalAttr:$ast ); } @@ -3029,10 +3030,13 @@ def CallOp : CIR_CallOp<"call"> { let builders = [ OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Type":$resType, - CArg<"ValueRange", "{}">:$operands), [{ + CArg<"ValueRange", "{}">:$operands, + CArg<"UnitAttr", "{}">:$exception), [{ $_state.addOperands(operands); if (callee) $_state.addAttribute("callee", callee); + if (exception) // $_builder + $_state.addAttribute("exception", exception); if (resType && !isa(resType)) $_state.addTypes(resType); }]>, @@ -3047,62 +3051,6 @@ def CallOp : CIR_CallOp<"call"> { ]; } -//===----------------------------------------------------------------------===// -// TryCallOp -//===----------------------------------------------------------------------===// - -def TryCallOp : CIR_CallOp<"try_call"> { - let summary = "try call operation"; - let description = [{ - Similar to `cir.call`, direct and indirect properties are the same. The - difference relies in an exception object address operand. It's encoded - as the first operands or second (for indirect calls). - - Similarly to `cir.call`, avoid using `mlir::Operation` methods to walk the - operands for this operation, instead use the methods provided by - `CIRCallOpInterface`. - - Example: - - ```mlir - cir.try { - %0 = cir.alloca !cir.ptr, !cir.ptr> - ... - %r = cir.try_call %exception(%0) @division(%1, %2) - } ... - ``` - }]; - - let arguments = !con((ins - ExceptionInfoPtrPtr:$exceptionInfo - ), commonArgs); - - let results = (outs Variadic); - - let builders = [ - OpBuilder<(ins "Value":$ind_target, "mlir::Value":$exception, - "FuncType":$fn_type, - CArg<"ValueRange", "{}">:$operands), [{ - $_state.addOperands(ValueRange{exception}); - $_state.addOperands(ValueRange{ind_target}); - $_state.addOperands(operands); - if (!fn_type.isVoid()) - $_state.addTypes(fn_type.getReturnType()); - }]>, - OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Value":$exception, - "mlir::Type":$resType, CArg<"ValueRange", "{}">:$operands), - [{ - $_state.addOperands(ValueRange{exception}); - $_state.addOperands(operands); - if (callee) - $_state.addAttribute("callee", callee); - if (resType && !isa(resType)) - $_state.addTypes(resType); - }]>]; - - let hasVerifier = 1; -} - //===----------------------------------------------------------------------===// // AwaitOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 5966a5d3f7e6..6d75f1c8d2fd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -454,13 +454,12 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, if (InvokeDest) { auto addr = CGF.currLexScope->getExceptionInfo().addr; - mlir::cir::TryCallOp tryCallOp; + mlir::cir::CallOp tryCallOp; if (indirectFuncTy) { - tryCallOp = builder.createIndirectTryCallOp( - callLoc, addr, indirectFuncVal, indirectFuncTy, CIRCallArgs); + tryCallOp = builder.createIndirectTryCallOp(callLoc, indirectFuncVal, + indirectFuncTy, CIRCallArgs); } else { - tryCallOp = - builder.createTryCallOp(callLoc, directFuncOp, addr, CIRCallArgs); + tryCallOp = builder.createTryCallOp(callLoc, directFuncOp, CIRCallArgs); } tryCallOp->setAttr("extra_attrs", extraFnAttrs); return tryCallOp; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d099b662fb14..24643ceb2047 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2348,15 +2348,9 @@ verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { return success(); } -static ::mlir::ParseResult parseCallCommon( - ::mlir::OpAsmParser &parser, ::mlir::OperationState &result, - llvm::StringRef extraAttrsAttrName, - llvm::function_ref<::mlir::ParseResult(::mlir::OpAsmParser &, - ::mlir::OperationState &)> - customOpHandler = - [](::mlir::OpAsmParser &parser, ::mlir::OperationState &result) { - return mlir::success(); - }) { +static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result, + llvm::StringRef extraAttrsAttrName) { mlir::FlatSymbolRefAttr calleeAttr; llvm::SmallVector<::mlir::OpAsmParser::UnresolvedOperand, 4> ops; llvm::SMLoc opsLoc; @@ -2364,8 +2358,8 @@ static ::mlir::ParseResult parseCallCommon( llvm::ArrayRef<::mlir::Type> operandsTypes; llvm::ArrayRef<::mlir::Type> allResultTypes; - if (customOpHandler(parser, result)) - return ::mlir::failure(); + if (::mlir::succeeded(parser.parseOptionalKeyword("exception"))) + result.addAttribute("exception", parser.getBuilder().getUnitAttr()); // If we cannot parse a string callee, it means this is an indirect call. if (!parser.parseOptionalAttribute(calleeAttr, "callee", result.attributes) @@ -2419,16 +2413,19 @@ static ::mlir::ParseResult parseCallCommon( return ::mlir::success(); } -void printCallCommon( - Operation *op, mlir::Value indirectCallee, mlir::FlatSymbolRefAttr flatSym, - ::mlir::OpAsmPrinter &state, - ::mlir::cir::ExtraFuncAttributesAttr extraAttrs, - llvm::function_ref customOpHandler = []() {}) { +void printCallCommon(Operation *op, mlir::Value indirectCallee, + mlir::FlatSymbolRefAttr flatSym, + ::mlir::OpAsmPrinter &state, + ::mlir::cir::ExtraFuncAttributesAttr extraAttrs, + ::mlir::UnitAttr exception = {}) { state << ' '; auto callLikeOp = mlir::cast(op); auto ops = callLikeOp.getArgOperands(); + if (exception) + state << "exception "; + if (flatSym) { // Direct calls state.printAttributeWithoutType(flatSym); } else { // Indirect calls @@ -2443,6 +2440,8 @@ void printCallCommon( elidedAttrs.push_back("callee"); elidedAttrs.push_back("ast"); elidedAttrs.push_back("extra_attrs"); + elidedAttrs.push_back("exception"); + state.printOptionalAttrDict(op->getAttrs(), elidedAttrs); state << ' ' << ":"; state << ' '; @@ -2467,108 +2466,9 @@ ::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, void CallOp::print(::mlir::OpAsmPrinter &state) { mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; + mlir::UnitAttr exception = getExceptionAttr(); printCallCommon(*this, indirectCallee, getCalleeAttr(), state, - getExtraAttrs()); -} - -//===----------------------------------------------------------------------===// -// TryCallOp -//===----------------------------------------------------------------------===// - -mlir::Value cir::TryCallOp::getIndirectCall() { - // First operand is the exception pointer, skip it - assert(isIndirect()); - return getOperand(1); -} - -mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_begin() { - auto arg_begin = operand_begin(); - // First operand is the exception pointer, skip it. - arg_begin++; - if (isIndirect()) - arg_begin++; - - // FIXME(cir): for this and all the other calculations in the other methods: - // we currently have no basic block arguments on cir.try_call, but if it gets - // to that, this needs further adjustment. - return arg_begin; -} -mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_end() { - return operand_end(); -} - -/// Return the operand at index 'i', accounts for indirect call. -Value cir::TryCallOp::getArgOperand(unsigned i) { - // First operand is the exception pointer, skip it. - i++; - if (isIndirect()) - i++; - return getOperand(i); -} -/// Return the number of operands, , accounts for indirect call. -unsigned cir::TryCallOp::getNumArgOperands() { - unsigned numOperands = this->getOperation()->getNumOperands(); - // First operand is the exception pointer, skip it. - numOperands--; - if (isIndirect()) - numOperands--; - return numOperands; -} - -LogicalResult -cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { - return verifyCallCommInSymbolUses(*this, symbolTable); -} - -LogicalResult cir::TryCallOp::verify() { return mlir::success(); } - -::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, - ::mlir::OperationState &result) { - return parseCallCommon( - parser, result, getExtraAttrsAttrName(result.name), - [](::mlir::OpAsmParser &parser, - ::mlir::OperationState &result) -> ::mlir::ParseResult { - ::mlir::OpAsmParser::UnresolvedOperand exceptionRawOperands[1]; - ::llvm::ArrayRef<::mlir::OpAsmParser::UnresolvedOperand> - exceptionOperands(exceptionRawOperands); - ::llvm::SMLoc exceptionOperandsLoc; - (void)exceptionOperandsLoc; - - if (parser.parseKeyword("exception").failed()) - return parser.emitError(parser.getCurrentLocation(), - "expected 'exception' keyword here"); - - if (parser.parseLParen().failed()) - return parser.emitError(parser.getCurrentLocation(), "expected '('"); - - exceptionOperandsLoc = parser.getCurrentLocation(); - if (parser.parseOperand(exceptionRawOperands[0])) - return ::mlir::failure(); - - if (parser.parseRParen().failed()) - return parser.emitError(parser.getCurrentLocation(), "expected ')'"); - - auto &builder = parser.getBuilder(); - auto exceptionPtrPtrTy = cir::PointerType::get( - builder.getContext(), - cir::PointerType::get( - builder.getContext(), - builder.getType<::mlir::cir::ExceptionInfoType>())); - if (parser.resolveOperands(exceptionOperands, exceptionPtrPtrTy, - exceptionOperandsLoc, result.operands)) - return ::mlir::failure(); - - return ::mlir::success(); - }); -} - -void TryCallOp::print(::mlir::OpAsmPrinter &state) { - state << " exception("; - state << getExceptionInfo(); - state << ")"; - mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; - printCallCommon(*this, indirectCallee, getCalleeAttr(), state, - getExtraAttrs()); + getExtraAttrs(), exception); } //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index bdb988e8169b..1968701fab92 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -18,7 +18,7 @@ unsigned long long tc() { // CHECK: %[[local_a:.*]] = cir.alloca !s32i, !cir.ptr, ["a", init] int a = 4; z = division(x, y); - // CHECK: %[[div_res:.*]] = cir.try_call exception(%[[eh_info]]) @_Z8divisionii({{.*}}) : (!cir.ptr>, !s32i, !s32i) -> !cir.double + // CHECK: %[[div_res:.*]] = cir.call exception @_Z8divisionii({{.*}}) : (!s32i, !s32i) -> !cir.double a++; } catch (int idx) { diff --git a/clang/test/CIR/IR/try.cir b/clang/test/CIR/IR/try.cir index 3bcb44e070bd..21632b81d550 100644 --- a/clang/test/CIR/IR/try.cir +++ b/clang/test/CIR/IR/try.cir @@ -13,7 +13,7 @@ module { %11 = cir.scope { %10 = cir.scope { %0 = cir.alloca !cir.ptr, !cir.ptr>, ["exception_info"] {alignment = 16 : i64} - %d = cir.try_call exception(%0) @div(%x, %y) : (!s32i, !s32i) -> !s32i + %d = cir.call exception @div(%x, %y) : (!s32i, !s32i) -> !s32i %1 = cir.load %0 : !cir.ptr>, !cir.ptr cir.yield %1 : !cir.ptr } : !cir.ptr diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 4a11b3a36e19..02c5fff35a3b 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -18,7 +18,7 @@ unsigned long long tc() { // CIR_FLAT: cir.br ^bb2 // CIR_FLAT: ^bb2: // pred: ^bb1 // CIR_FLAT: %[[EH_PTR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__exception_ptr"] - // CIR_FLAT: cir.try_call exception(%[[EH_PTR]]) @_Z8divisionii( + // CIR_FLAT: cir.call exception @_Z8divisionii( z = division(x, y); a++; From 29d5175f7e46070d7338b032d5f00385fe583618 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 23 Jul 2024 17:58:46 -0700 Subject: [PATCH 1701/2301] [CIR][CIRGen][NFC] Exceptions: tide up lexical scope information needed Also cleanup usage of that information and rename to more updated terms. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 2 -- clang/lib/CIR/CodeGen/CIRGenException.cpp | 23 +++++++-------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 29 +++++++++---------- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 3 -- 4 files changed, 24 insertions(+), 33 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 6d75f1c8d2fd..ed8cdc1e60d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -452,8 +452,6 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, auto &builder = CGF.getBuilder(); if (InvokeDest) { - auto addr = CGF.currLexScope->getExceptionInfo().addr; - mlir::cir::CallOp tryCallOp; if (indirectFuncTy) { tryCallOp = builder.createIndirectTryCallOp(callLoc, indirectFuncVal, diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index b89632d9682d..d6924b01daab 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -256,10 +256,10 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup) { // Just like some other try/catch related logic: return the basic block // pointer but only use it to denote we're tracking things, but there // shouldn't be any changes to that block after work done in this function. - auto catchOp = currLexScope->getExceptionInfo().catchOp; - unsigned numCatchRegions = catchOp.getCatchRegions().size(); - assert(catchOp && numCatchRegions && "expected at least one region"); - auto &fallbackRegion = catchOp.getCatchRegions()[numCatchRegions - 1]; + auto tryOp = currLexScope->getTry(); + unsigned numCatchRegions = tryOp.getCatchRegions().size(); + assert(tryOp && numCatchRegions && "expected at least one region"); + auto &fallbackRegion = tryOp.getCatchRegions()[numCatchRegions - 1]; auto *resumeBlock = &fallbackRegion.getBlocks().back(); if (!resumeBlock->empty()) @@ -277,7 +277,7 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup) { llvm_unreachable("NYI"); } - getBuilder().create(catchOp.getLoc()); + getBuilder().create(tryOp.getLoc()); getBuilder().restoreInsertionPoint(ip); return resumeBlock; } @@ -372,7 +372,7 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { getBuilder().getInsertionBlock()}; { - lexScope.setExceptionInfo({exceptionInfoInsideTry, tryScope}); + lexScope.setAsTry(tryScope); // Attach the basic blocks for the catch regions. enterCXXTryStmt(S, tryScope); // Emit the body for the `try {}` part. @@ -382,7 +382,6 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { } { - lexScope.setExceptionInfo({nullptr, tryScope}); // Emit catch clauses. exitCXXTryStmt(S); } @@ -453,14 +452,14 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, } void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &S, - mlir::cir::TryOp catchOp, + mlir::cir::TryOp tryOp, bool IsFnTryBlock) { unsigned NumHandlers = S.getNumHandlers(); EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers); for (unsigned I = 0; I != NumHandlers; ++I) { const CXXCatchStmt *C = S.getHandler(I); - mlir::Block *Handler = &catchOp.getCatchRegions()[I].getBlocks().front(); + mlir::Block *Handler = &tryOp.getCatchRegions()[I].getBlocks().front(); if (C->getExceptionDecl()) { // FIXME: Dropping the reference type on the type into makes it // impossible to correctly implement catch-by-reference @@ -500,7 +499,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { CatchScope.clearHandlerBlocks(); EHStack.popCatch(); // Drop all basic block from all catch regions. - auto tryOp = currLexScope->getExceptionInfo().catchOp; + mlir::cir::TryOp tryOp = currLexScope->getTry(); SmallVector eraseBlocks; for (mlir::Region &r : tryOp.getCatchRegions()) { if (r.empty()) @@ -634,10 +633,10 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { // that leads to this "landing pad" creation site. Otherwise, exceptions // are enabled but a throwing function is called anyways (common pattern // with function local static initializers). - auto tryOp = currLexScope->getExceptionInfo().catchOp; - if (!tryOp) { + if (!currLexScope->isTry()) { llvm_unreachable("NYI"); } + mlir::cir::TryOp tryOp = currLexScope->getTry(); { // Save the current CIR generation state. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 6d9fd1c32962..1aaebee77869 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -312,13 +312,6 @@ class CIRGenFunction : public CIRGenTypeCache { using SymTableScopeTy = llvm::ScopedHashTableScope; - /// Try/Catch: calls within try statements need to refer to local - /// allocas for the exception info - struct CIRExceptionInfo { - mlir::Value addr{}; - mlir::cir::TryOp catchOp{}; - }; - enum class EvaluationOrder { ///! No langauge constraints on evaluation order. Default, @@ -1925,14 +1918,15 @@ class CIRGenFunction : public CIRGenTypeCache { LexicalScope *ParentScope = nullptr; - // If there's exception information for this scope, store it. - CIRExceptionInfo exInfo{}; + // Holds actual value for ScopeKind::Try + mlir::cir::TryOp tryOp = nullptr; // FIXME: perhaps we can use some info encoded in operations. enum Kind { Regular, // cir.if, cir.scope, if_regions Ternary, // cir.ternary - Switch // cir.switch + Switch, // cir.switch + Try, // cir.try } ScopeKind = Regular; // Track scope return value. @@ -1993,9 +1987,18 @@ class CIRGenFunction : public CIRGenTypeCache { bool isRegular() { return ScopeKind == Kind::Regular; } bool isSwitch() { return ScopeKind == Kind::Switch; } bool isTernary() { return ScopeKind == Kind::Ternary; } + bool isTry() { return ScopeKind == Kind::Try; } + mlir::cir::TryOp getTry() { + assert(isTry()); + return tryOp; + } void setAsSwitch() { ScopeKind = Kind::Switch; } void setAsTernary() { ScopeKind = Kind::Ternary; } + void setAsTry(mlir::cir::TryOp op) { + ScopeKind = Kind::Try; + tryOp = op; + } // --- // Goto handling @@ -2021,12 +2024,6 @@ class CIRGenFunction : public CIRGenTypeCache { return CleanupBlock; } - // --- - // Exception handling - // --- - CIRExceptionInfo &getExceptionInfo() { return exInfo; } - void setExceptionInfo(const CIRExceptionInfo &info) { exInfo = info; } - // --- // Return handling // --- diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index b9769e22f47b..3bf7159f0bf5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -663,9 +663,6 @@ static mlir::Value CallBeginCatch(CIRGenFunction &CGF, mlir::Type ParamTy, /// parameter during catch initialization. static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, Address ParamAddr, SourceLocation Loc) { - // Load the exception from where the landing pad saved it. - auto Exn = CGF.currLexScope->getExceptionInfo().addr; - CanQualType CatchType = CGF.CGM.getASTContext().getCanonicalType(CatchParam.getType()); auto CIRCatchTy = CGF.convertTypeForMem(CatchType); From ee72f4166fbe6f804f2ced91615b02085d8c7b5a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 23 Jul 2024 18:04:43 -0700 Subject: [PATCH 1702/2301] [CIR][CIRGen][NFC] Exceptions: remove exception ptr alloca, not used anymore All this information is now implicit as part of using cir.try. Note that we still keep the cir.eh.info around, since it will be used when we expand catch clauses. --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 11 ----------- clang/test/CIR/CodeGen/try-catch.cpp | 1 - clang/test/CIR/Lowering/try-catch.cpp | 1 - 3 files changed, 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index d6924b01daab..d39a4eb9953d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -325,10 +325,6 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { auto tryLoc = getLoc(S.getBeginLoc()); mlir::OpBuilder::InsertPoint beginInsertTryBody; - auto ehPtrTy = mlir::cir::PointerType::get( - getBuilder().getContext(), - getBuilder().getType<::mlir::cir::ExceptionInfoType>()); - mlir::Value exceptionInfoInsideTry; // Create the scope to represent only the C/C++ `try {}` part. However, // don't populate right away. Reserve some space to store the exception @@ -337,13 +333,6 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { auto tryScope = builder.create( tryLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - // Allocate space for our exception info that might be passed down - // to `cir.try_call` everytime a call happens. - exceptionInfoInsideTry = b.create( - loc, /*addr type*/ getBuilder().getPointerTo(ehPtrTy), - /*var type*/ ehPtrTy, "__exception_ptr", - CGM.getSize(CharUnits::One()), nullptr); - beginInsertTryBody = getBuilder().saveInsertionPoint(); }, // Don't emit the code right away for catch clauses, for diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 1968701fab92..4042afb08dcb 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -14,7 +14,6 @@ unsigned long long tc() { // CHECK: %[[idx:.*]] = cir.alloca !s32i, !cir.ptr, ["idx"] // CHECK: cir.try { - // CHECK: %[[eh_info:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__exception_ptr"] // CHECK: %[[local_a:.*]] = cir.alloca !s32i, !cir.ptr, ["a", init] int a = 4; z = division(x, y); diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 02c5fff35a3b..f13bc578a36b 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -17,7 +17,6 @@ unsigned long long tc() { // CIR_FLAT: ^bb1: // pred: ^bb0 // CIR_FLAT: cir.br ^bb2 // CIR_FLAT: ^bb2: // pred: ^bb1 - // CIR_FLAT: %[[EH_PTR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__exception_ptr"] // CIR_FLAT: cir.call exception @_Z8divisionii( z = division(x, y); a++; From 094d682bb299622c8ad506316e7a3c3cd7e33eaa Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 23 Jul 2024 18:57:45 -0700 Subject: [PATCH 1703/2301] [CIR][CIRGen][FlattenCFG] Exceptions: incremental work for handling catch clauses - Introduce a new operation: `cir.inflight_exception`, which returns a opaque exception object value. This will later be decomposed into selector and exception pointer. - Add a still incomplete basic block flattening for catch clauses, but enough to add tests. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 20 +++++++++++++ .../include/clang/CIR/Dialect/IR/CIRTypes.td | 12 ++++---- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 28 +++++++++++++++---- clang/test/CIR/IR/try.cir | 18 ++++++------ clang/test/CIR/Lowering/try-catch.cpp | 8 +++++- 5 files changed, 63 insertions(+), 23 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index dde43d0a733c..bd5ac3a67d73 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3232,6 +3232,26 @@ def CatchParamOp : CIR_Op<"catch_param"> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// InflightEhOp +//===----------------------------------------------------------------------===// + +def InflightEhOp : CIR_Op<"inflight_exception"> { + let summary = "Materialize the catch clause formal parameter"; + let description = [{ + `cir.inflight_exception` returns an exception coming from a + `cir.call exception` that might throw. The returned value is opaque + but can be further decomposed by other operations. + }]; + + let results = (outs CIR_ExceptionType:$exception); + let assemblyFormat = [{ + attr-dict + }]; + + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // CopyOp //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 0ffd22ee9620..97c8c808f857 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -418,13 +418,13 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { // //===----------------------------------------------------------------------===// -def CIR_ExceptionInfo : CIR_Type<"ExceptionInfo", "eh.info"> { +def CIR_ExceptionType : CIR_Type<"ExceptionInfo", "exception"> { let summary = "CIR exception info"; let description = [{ - Represents the content necessary for a `cir.call` to pass back an exception - object pointer + some extra selector information. This type is required for - some exception related operations, like `cir.catch`, `cir.eh.selector_slot` - and `cir.eh.slot`. + In presence of an inflight exception, this type holds all specific + information for an exception: the associated type id, and the exception + object pointer. These are materialzed from this type through other + specific operations. }]; } @@ -549,7 +549,7 @@ def CIR_StructType : Type($_self)">, def CIR_AnyType : AnyTypeOf<[ CIR_IntType, CIR_PointerType, CIR_DataMemberType, CIR_BoolType, CIR_ArrayType, - CIR_VectorType, CIR_FuncType, CIR_VoidType, CIR_StructType, CIR_ExceptionInfo, + CIR_VectorType, CIR_FuncType, CIR_VoidType, CIR_StructType, CIR_ExceptionType, CIR_AnyFloat, CIR_FP16, CIR_BFloat16, CIR_ComplexType ]>; diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 40635cb7bbc8..4520d8a90b3a 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -188,11 +188,13 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { return mlir::success(); } + // TODO: keep track of cir.try_call before we flatten. + // Split the current block before the TryOp to create the inlining // point. - auto *currentBlock = rewriter.getInsertionBlock(); + auto *beforeTryScopeBlock = rewriter.getInsertionBlock(); auto *remainingOpsBlock = - rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + rewriter.splitBlock(beforeTryScopeBlock, rewriter.getInsertionPoint()); mlir::Block *continueBlock; continueBlock = remainingOpsBlock; @@ -202,15 +204,29 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { rewriter.inlineRegionBefore(tryOp.getTryRegion(), continueBlock); // Branch into the body of the region. - rewriter.setInsertionPointToEnd(currentBlock); + rewriter.setInsertionPointToEnd(beforeTryScopeBlock); rewriter.create(loc, mlir::ValueRange(), beforeBody); // Replace the tryOp return with a branch that jumps out of the body. rewriter.setInsertionPointToEnd(afterBody); auto yieldOp = cast(afterBody->getTerminator()); - rewriter.replaceOpWithNewOp(yieldOp, continueBlock); - - // TODO: handle the catch clauses and cir.try_call while here. + mlir::Block *beforeCatch = rewriter.getInsertionBlock(); + auto *catchBegin = + rewriter.splitBlock(beforeCatch, rewriter.getInsertionPoint()); + rewriter.setInsertionPointToEnd(beforeCatch); + + // FIXME: first step here is to build the landing pad like block, but + // since cir.call exception isn't yet lowered, jump from the try block + // to the catch block as a placeholder for now. + rewriter.replaceOpWithNewOp(yieldOp, catchBegin); + + // Start the landing pad by getting the inflight exception information, + // and jumping to the catchBegin phase. + rewriter.setInsertionPointToEnd(catchBegin); + InflightEhOp exception = rewriter.create( + loc, mlir::cir::ExceptionInfoType::get(rewriter.getContext())); + // FIXME: TBD emission. + rewriter.create(loc, continueBlock); rewriter.eraseOp(tryOp); return mlir::success(); diff --git a/clang/test/CIR/IR/try.cir b/clang/test/CIR/IR/try.cir index 21632b81d550..9104cebd19db 100644 --- a/clang/test/CIR/IR/try.cir +++ b/clang/test/CIR/IR/try.cir @@ -9,16 +9,14 @@ module { cir.return %3 : !s32i } - cir.func @foo(%x : !s32i, %y : !s32i) -> !cir.ptr { - %11 = cir.scope { - %10 = cir.scope { - %0 = cir.alloca !cir.ptr, !cir.ptr>, ["exception_info"] {alignment = 16 : i64} + cir.func @foo(%x : !s32i, %y : !s32i) -> () { + cir.scope { + cir.scope { %d = cir.call exception @div(%x, %y) : (!s32i, !s32i) -> !s32i - %1 = cir.load %0 : !cir.ptr>, !cir.ptr - cir.yield %1 : !cir.ptr - } : !cir.ptr - cir.yield %10 : !cir.ptr - } : !cir.ptr - cir.return %11 : !cir.ptr + cir.yield + } + cir.yield + } + cir.return } } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index f13bc578a36b..1cc4aebfa04f 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -21,7 +21,13 @@ unsigned long long tc() { z = division(x, y); a++; - // CIR_FLAT: cir.br ^bb3 + // FIXME: this is temporary, should branch directly to ^bb4 + // but if done now it would be stripped by MLIR simplification. + // CIR_FLAT: cir.br ^bb3 + + // CIR_FLAT: ^bb3: // pred: ^bb2 + // CIR_FLAT: %14 = cir.inflight_exception + // CIR_FLAT: cir.br ^bb4 } catch (int idx) { z = 98; idx++; From 3351e4abc5986e1edc5590c2ad8d43c1c9006c42 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 24 Jul 2024 15:14:38 -0700 Subject: [PATCH 1704/2301] [CIR][CIRGen][FlattenCFG] Exceptions: introduce cir.eh.selector --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 24 ++++++++++++++++--- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 20 ++++++++++++---- clang/test/CIR/Lowering/try-catch.cpp | 4 +++- 3 files changed, 40 insertions(+), 8 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index bd5ac3a67d73..da17ecc9ce36 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3233,13 +3233,13 @@ def CatchParamOp : CIR_Op<"catch_param"> { } //===----------------------------------------------------------------------===// -// InflightEhOp +// Exception related: EhInflightOp, EhSelectorOp //===----------------------------------------------------------------------===// -def InflightEhOp : CIR_Op<"inflight_exception"> { +def EhInflightOp : CIR_Op<"eh.inflight_exception"> { let summary = "Materialize the catch clause formal parameter"; let description = [{ - `cir.inflight_exception` returns an exception coming from a + `cir.eh.inflight_exception` returns an exception coming from a `cir.call exception` that might throw. The returned value is opaque but can be further decomposed by other operations. }]; @@ -3252,6 +3252,24 @@ def InflightEhOp : CIR_Op<"inflight_exception"> { let hasVerifier = 0; } +def EhSelectorOp : CIR_Op<"eh.selector"> { + let summary = "Materialize the eh selector"; + let description = [{ + `cir.eh.inflight_exception` returns an exception coming from a + `cir.call exception ...` that might throw. `cir.eh.selector` returns the + runtime selector value (type id) for the needed , which represents the type id used by + operations to compare against other type ids. + }]; + + let arguments = (ins CIR_ExceptionType:$exception); + let results = (outs UInt32:$selector); + let assemblyFormat = [{ + $exception attr-dict + }]; + + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // CopyOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 4520d8a90b3a..f203a11886ce 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -220,12 +220,24 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // to the catch block as a placeholder for now. rewriter.replaceOpWithNewOp(yieldOp, catchBegin); - // Start the landing pad by getting the inflight exception information, - // and jumping to the catchBegin phase. + // Start the landing pad by getting the inflight exception information. rewriter.setInsertionPointToEnd(catchBegin); - InflightEhOp exception = rewriter.create( + auto exception = rewriter.create( loc, mlir::cir::ExceptionInfoType::get(rewriter.getContext())); - // FIXME: TBD emission. + + // TODO: direct catch all needs no dispatch. + + // Handle dispatch. In could in theory use a switch, but let's just + // mimic LLVM more closely since we have no specific thing to achieve + // doing that (might not play as well with existing optimizers either). + auto *dispatchBlock = + rewriter.splitBlock(catchBegin, rewriter.getInsertionPoint()); + rewriter.setInsertionPointToEnd(catchBegin); + rewriter.create(loc, dispatchBlock); + + // Fill in dispatcher. + rewriter.setInsertionPointToEnd(dispatchBlock); + auto selector = rewriter.create(loc, exception); rewriter.create(loc, continueBlock); rewriter.eraseOp(tryOp); diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 1cc4aebfa04f..36e86687992d 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -26,8 +26,10 @@ unsigned long long tc() { // CIR_FLAT: cir.br ^bb3 // CIR_FLAT: ^bb3: // pred: ^bb2 - // CIR_FLAT: %14 = cir.inflight_exception + // CIR_FLAT: %[[EH:.*]] = cir.eh.inflight_exception // CIR_FLAT: cir.br ^bb4 + // CIR_FLAT: ^bb4: // pred: ^bb3 + // CIR_FLAT: %[[SEL:.*]] = cir.eh.selector %[[EH]] } catch (int idx) { z = 98; idx++; From e485c434e1a41e0b676038ab7e96a99c3dc3a7ab Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 25 Jul 2024 10:10:23 +0800 Subject: [PATCH 1705/2301] [CIR][Transforms][NFC] Use `unique_ptr` to encapsulate LowerModule (#752) Currently `LowerModule` mimics `CodeGenModule` and uses many raw references. It cannot be moved or copied. Value semantic does not fit the need. For example, we cannot pass LowerModule around. A better practice would be to use `unique_ptr` to encapsulate it. In the future, we hold its ownership in some long-lived contexts (it's `CodeGeneratorImpl` for `CodeGenModule`) and pass references to it around safely. --- clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp | 7 ++++--- .../CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp | 6 ++++-- .../CIR/Dialect/Transforms/TargetLowering/LowerModule.h | 3 ++- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 45bcbe15f7a4..3a4b9b397c5b 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -36,7 +36,8 @@ struct CallConvLoweringPattern : public OpRewritePattern { return op.emitError("function has no AST information"); auto modOp = op->getParentOfType(); - LowerModule lowerModule = createLowerModule(modOp, rewriter); + std::unique_ptr lowerModule = + createLowerModule(modOp, rewriter); // Rewrite function calls before definitions. This should be done before // lowering the definition. @@ -44,14 +45,14 @@ struct CallConvLoweringPattern : public OpRewritePattern { if (calls.has_value()) { for (auto call : calls.value()) { auto callOp = cast(call.getUser()); - if (lowerModule.rewriteFunctionCall(callOp, op).failed()) + if (lowerModule->rewriteFunctionCall(callOp, op).failed()) return failure(); } } // TODO(cir): Instead of re-emmiting every load and store, bitcast arguments // and return values to their ABI-specific counterparts when possible. - if (lowerModule.rewriteFunctionDefinition(op).failed()) + if (lowerModule->rewriteFunctionDefinition(op).failed()) return failure(); return success(); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index e0e53edfc1e2..2d82fa58a24f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -217,7 +217,8 @@ LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, FuncOp funcOp) { } // TODO: not to create it every time -LowerModule createLowerModule(ModuleOp module, PatternRewriter &rewriter) { +std::unique_ptr createLowerModule(ModuleOp module, + PatternRewriter &rewriter) { // Fetch the LLVM data layout string. auto dataLayoutStr = cast( module->getAttr(LLVM::LLVMDialect::getDataLayoutAttrName())); @@ -237,7 +238,8 @@ LowerModule createLowerModule(ModuleOp module, PatternRewriter &rewriter) { auto context = CIRLowerContext(module, langOpts); context.initBuiltinTypes(*targetInfo); - return LowerModule(context, module, dataLayoutStr, *targetInfo, rewriter); + return std::make_unique(context, module, dataLayoutStr, + *targetInfo, rewriter); } } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index 35870c716c88..f088086d70c6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -91,7 +91,8 @@ class LowerModule { LogicalResult rewriteFunctionCall(CallOp callOp, FuncOp funcOp); }; -LowerModule createLowerModule(ModuleOp module, PatternRewriter &rewriter); +std::unique_ptr createLowerModule(ModuleOp module, + PatternRewriter &rewriter); } // namespace cir } // namespace mlir From e37dafb328ffa075bf0901e5530abffe8c0d3344 Mon Sep 17 00:00:00 2001 From: ShivaChen <32083954+ShivaChen@users.noreply.github.com> Date: Thu, 25 Jul 2024 15:39:18 +0800 Subject: [PATCH 1706/2301] [CIR][Lowering][NFC] Move helper functions to LoweringHelpers.cpp (#754) This commit moves array initial value lowering relative helper functions from DirectToLLVM/LowerToLLVM.cpp to LoweringHelpers.cpp. So ThroughMLIR/LowerCIRToMLIR.cpp can reuse the helper functions to enable array with initial value lowering in later patch. This is a refactoring without functional changes. --- clang/include/clang/CIR/LoweringHelpers.h | 43 ++++++ clang/lib/CIR/Lowering/CMakeLists.txt | 40 +++++ .../CIR/Lowering/DirectToLLVM/CMakeLists.txt | 1 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 129 +---------------- clang/lib/CIR/Lowering/LoweringHelpers.cpp | 137 ++++++++++++++++++ 5 files changed, 222 insertions(+), 128 deletions(-) create mode 100644 clang/include/clang/CIR/LoweringHelpers.h create mode 100644 clang/lib/CIR/Lowering/LoweringHelpers.cpp diff --git a/clang/include/clang/CIR/LoweringHelpers.h b/clang/include/clang/CIR/LoweringHelpers.h new file mode 100644 index 000000000000..01b9b4301c3a --- /dev/null +++ b/clang/include/clang/CIR/LoweringHelpers.h @@ -0,0 +1,43 @@ +//====- LoweringHelpers.h - Lowering helper functions ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares helper functions for lowering from CIR to LLVM or MLIR. +// +//===----------------------------------------------------------------------===// +#ifndef LLVM_CLANG_CIR_LOWERINGHELPERS_H +#define LLVM_CLANG_CIR_LOWERINGHELPERS_H +#include "mlir/Dialect/Arith/IR/Arith.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" + +mlir::DenseElementsAttr +convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, + mlir::Type type); + +template StorageTy getZeroInitFromType(mlir::Type Ty); +template <> mlir::APInt getZeroInitFromType(mlir::Type Ty); +template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty); + +mlir::Type getNestedTypeAndElemQuantity(mlir::Type Ty, unsigned &elemQuantity); + +template +void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, + llvm::SmallVectorImpl &values); + +template +mlir::DenseElementsAttr +convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, + const llvm::SmallVectorImpl &dims, + mlir::Type type); + +std::optional +lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, + const mlir::TypeConverter *converter); +#endif diff --git a/clang/lib/CIR/Lowering/CMakeLists.txt b/clang/lib/CIR/Lowering/CMakeLists.txt index f720e597ecb0..e34884ce21bd 100644 --- a/clang/lib/CIR/Lowering/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/CMakeLists.txt @@ -1,2 +1,42 @@ +set(LLVM_LINK_COMPONENTS + Core + Support + ) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) + +add_clang_library(clangCIRLoweringHelpers + LoweringHelpers.cpp + + DEPENDS + MLIRCIROpsIncGen + MLIRCIREnumsGen + MLIRCIRASTAttrInterfacesIncGen + MLIRCIROpInterfacesIncGen + MLIRCIRLoopOpInterfaceIncGen + MLIRBuiltinLocationAttributesIncGen + MLIRBuiltinTypeInterfacesIncGen + MLIRFunctionInterfacesIncGen + + LINK_LIBS + clangAST + clangBasic + clangCodeGen + clangLex + clangFrontend + clangCIR + ${dialect_libs} + MLIRCIR + MLIRAnalysis + MLIRBuiltinToLLVMIRTranslation + MLIRLLVMToLLVMIRTranslation + MLIRIR + MLIRParser + MLIRSideEffectInterfaces + MLIRTransforms + MLIRSupport + MLIRMemRefDialect + ) + add_subdirectory(DirectToLLVM) add_subdirectory(ThroughMLIR) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index 1755bcdcd470..df89f6b2a7b7 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -26,6 +26,7 @@ add_clang_library(clangCIRLoweringDirectToLLVM clangLex clangFrontend clangCIR + clangCIRLoweringHelpers ${dialect_libs} MLIRCIR MLIRAnalysis diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 2c5cc69bc6a2..a7d3728bcccb 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -46,6 +46,7 @@ #include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" #include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" @@ -963,134 +964,6 @@ class CIRStoreLowering : public mlir::OpConversionPattern { } }; -mlir::DenseElementsAttr -convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, - mlir::Type type) { - auto values = llvm::SmallVector{}; - auto stringAttr = mlir::dyn_cast(attr.getElts()); - assert(stringAttr && "expected string attribute here"); - for (auto element : stringAttr) - values.push_back({8, (uint64_t)element}); - return mlir::DenseElementsAttr::get( - mlir::RankedTensorType::get({(int64_t)values.size()}, type), - llvm::ArrayRef(values)); -} - -template StorageTy getZeroInitFromType(mlir::Type Ty); - -template <> mlir::APInt getZeroInitFromType(mlir::Type Ty) { - assert(mlir::isa(Ty) && "expected int type"); - auto IntTy = mlir::cast(Ty); - return mlir::APInt::getZero(IntTy.getWidth()); -} - -template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty) { - assert((mlir::isa(Ty)) && - "only float and double supported"); - if (Ty.isF32() || mlir::isa(Ty)) - return mlir::APFloat(0.f); - if (Ty.isF64() || mlir::isa(Ty)) - return mlir::APFloat(0.0); - llvm_unreachable("NYI"); -} - -// return the nested type and quantity of elements for cir.array type. -// e.g: for !cir.array x 1> -// it returns !s32i as return value and stores 3 to elemQuantity. -mlir::Type getNestedTypeAndElemQuantity(mlir::Type Ty, unsigned &elemQuantity) { - assert(mlir::isa(Ty) && "expected ArrayType"); - - elemQuantity = 1; - mlir::Type nestTy = Ty; - while (auto ArrTy = mlir::dyn_cast(nestTy)) { - nestTy = ArrTy.getEltType(); - elemQuantity *= ArrTy.getSize(); - } - - return nestTy; -} - -template -void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, - llvm::SmallVectorImpl &values) { - auto arrayAttr = mlir::cast(attr.getElts()); - for (auto eltAttr : arrayAttr) { - if (auto valueAttr = mlir::dyn_cast(eltAttr)) { - values.push_back(valueAttr.getValue()); - } else if (auto subArrayAttr = - mlir::dyn_cast(eltAttr)) { - convertToDenseElementsAttrImpl(subArrayAttr, values); - } else if (auto zeroAttr = mlir::dyn_cast(eltAttr)) { - unsigned numStoredZeros = 0; - auto nestTy = - getNestedTypeAndElemQuantity(zeroAttr.getType(), numStoredZeros); - values.insert(values.end(), numStoredZeros, - getZeroInitFromType(nestTy)); - } else { - llvm_unreachable("unknown element in ConstArrayAttr"); - } - } - - // Only fill in trailing zeros at the local cir.array level where the element - // type isn't another array (for the mult-dim case). - auto numTrailingZeros = attr.getTrailingZerosNum(); - if (numTrailingZeros) { - auto localArrayTy = mlir::dyn_cast(attr.getType()); - assert(localArrayTy && "expected !cir.array"); - - auto nestTy = localArrayTy.getEltType(); - if (!mlir::isa(nestTy)) - values.insert(values.end(), localArrayTy.getSize() - numTrailingZeros, - getZeroInitFromType(nestTy)); - } -} - -template -mlir::DenseElementsAttr -convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, - const llvm::SmallVectorImpl &dims, - mlir::Type type) { - auto values = llvm::SmallVector{}; - convertToDenseElementsAttrImpl(attr, values); - return mlir::DenseElementsAttr::get(mlir::RankedTensorType::get(dims, type), - llvm::ArrayRef(values)); -} - -std::optional -lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, - const mlir::TypeConverter *converter) { - - // Ensure ConstArrayAttr has a type. - auto typedConstArr = mlir::dyn_cast(constArr); - assert(typedConstArr && "cir::ConstArrayAttr is not a mlir::TypedAttr"); - - // Ensure ConstArrayAttr type is a ArrayType. - auto cirArrayType = - mlir::dyn_cast(typedConstArr.getType()); - assert(cirArrayType && "cir::ConstArrayAttr is not a cir::ArrayType"); - - // Is a ConstArrayAttr with an cir::ArrayType: fetch element type. - mlir::Type type = cirArrayType; - auto dims = llvm::SmallVector{}; - while (auto arrayType = mlir::dyn_cast(type)) { - dims.push_back(arrayType.getSize()); - type = arrayType.getEltType(); - } - - // Convert array attr to LLVM compatible dense elements attr. - if (mlir::isa(constArr.getElts())) - return convertStringAttrToDenseElementsAttr(constArr, - converter->convertType(type)); - if (mlir::isa(type)) - return convertToDenseElementsAttr( - constArr, dims, converter->convertType(type)); - if (mlir::isa(type)) - return convertToDenseElementsAttr( - constArr, dims, converter->convertType(type)); - - return std::nullopt; -} - bool hasTrailingZeros(mlir::cir::ConstArrayAttr attr) { auto array = mlir::dyn_cast(attr.getElts()); return attr.hasTrailingZeros() || diff --git a/clang/lib/CIR/Lowering/LoweringHelpers.cpp b/clang/lib/CIR/Lowering/LoweringHelpers.cpp new file mode 100644 index 000000000000..2393819b4813 --- /dev/null +++ b/clang/lib/CIR/Lowering/LoweringHelpers.cpp @@ -0,0 +1,137 @@ +//====- LoweringHelpers.cpp - Lowering helper functions -------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains helper functions for lowering from CIR to LLVM or MLIR. +// +//===----------------------------------------------------------------------===// +#include "clang/CIR/LoweringHelpers.h" + +mlir::DenseElementsAttr +convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, + mlir::Type type) { + auto values = llvm::SmallVector{}; + auto stringAttr = mlir::dyn_cast(attr.getElts()); + assert(stringAttr && "expected string attribute here"); + for (auto element : stringAttr) + values.push_back({8, (uint64_t)element}); + return mlir::DenseElementsAttr::get( + mlir::RankedTensorType::get({(int64_t)values.size()}, type), + llvm::ArrayRef(values)); +} + +template <> mlir::APInt getZeroInitFromType(mlir::Type Ty) { + assert(mlir::isa(Ty) && "expected int type"); + auto IntTy = mlir::cast(Ty); + return mlir::APInt::getZero(IntTy.getWidth()); +} + +template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty) { + assert((mlir::isa(Ty)) && + "only float and double supported"); + if (Ty.isF32() || mlir::isa(Ty)) + return mlir::APFloat(0.f); + if (Ty.isF64() || mlir::isa(Ty)) + return mlir::APFloat(0.0); + llvm_unreachable("NYI"); +} + +// return the nested type and quantity of elements for cir.array type. +// e.g: for !cir.array x 1> +// it returns !s32i as return value and stores 3 to elemQuantity. +mlir::Type getNestedTypeAndElemQuantity(mlir::Type Ty, unsigned &elemQuantity) { + assert(mlir::isa(Ty) && "expected ArrayType"); + + elemQuantity = 1; + mlir::Type nestTy = Ty; + while (auto ArrTy = mlir::dyn_cast(nestTy)) { + nestTy = ArrTy.getEltType(); + elemQuantity *= ArrTy.getSize(); + } + + return nestTy; +} + +template +void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, + llvm::SmallVectorImpl &values) { + auto arrayAttr = mlir::cast(attr.getElts()); + for (auto eltAttr : arrayAttr) { + if (auto valueAttr = mlir::dyn_cast(eltAttr)) { + values.push_back(valueAttr.getValue()); + } else if (auto subArrayAttr = + mlir::dyn_cast(eltAttr)) { + convertToDenseElementsAttrImpl(subArrayAttr, values); + } else if (auto zeroAttr = mlir::dyn_cast(eltAttr)) { + unsigned numStoredZeros = 0; + auto nestTy = + getNestedTypeAndElemQuantity(zeroAttr.getType(), numStoredZeros); + values.insert(values.end(), numStoredZeros, + getZeroInitFromType(nestTy)); + } else { + llvm_unreachable("unknown element in ConstArrayAttr"); + } + } + + // Only fill in trailing zeros at the local cir.array level where the element + // type isn't another array (for the mult-dim case). + auto numTrailingZeros = attr.getTrailingZerosNum(); + if (numTrailingZeros) { + auto localArrayTy = mlir::dyn_cast(attr.getType()); + assert(localArrayTy && "expected !cir.array"); + + auto nestTy = localArrayTy.getEltType(); + if (!mlir::isa(nestTy)) + values.insert(values.end(), localArrayTy.getSize() - numTrailingZeros, + getZeroInitFromType(nestTy)); + } +} + +template +mlir::DenseElementsAttr +convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, + const llvm::SmallVectorImpl &dims, + mlir::Type type) { + auto values = llvm::SmallVector{}; + convertToDenseElementsAttrImpl(attr, values); + return mlir::DenseElementsAttr::get(mlir::RankedTensorType::get(dims, type), + llvm::ArrayRef(values)); +} + +std::optional +lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, + const mlir::TypeConverter *converter) { + + // Ensure ConstArrayAttr has a type. + auto typedConstArr = mlir::dyn_cast(constArr); + assert(typedConstArr && "cir::ConstArrayAttr is not a mlir::TypedAttr"); + + // Ensure ConstArrayAttr type is a ArrayType. + auto cirArrayType = + mlir::dyn_cast(typedConstArr.getType()); + assert(cirArrayType && "cir::ConstArrayAttr is not a cir::ArrayType"); + + // Is a ConstArrayAttr with an cir::ArrayType: fetch element type. + mlir::Type type = cirArrayType; + auto dims = llvm::SmallVector{}; + while (auto arrayType = mlir::dyn_cast(type)) { + dims.push_back(arrayType.getSize()); + type = arrayType.getEltType(); + } + + if (mlir::isa(constArr.getElts())) + return convertStringAttrToDenseElementsAttr(constArr, + converter->convertType(type)); + if (mlir::isa(type)) + return convertToDenseElementsAttr( + constArr, dims, converter->convertType(type)); + if (mlir::isa(type)) + return convertToDenseElementsAttr( + constArr, dims, converter->convertType(type)); + + return std::nullopt; +} From 925cd34c71f55bc517713cc683e4072db4f673f8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 24 Jul 2024 16:57:50 -0700 Subject: [PATCH 1707/2301] [CIR][FlattenCFG] Exceptions: handle type and unwind cases Implemented flatten logic for putthing the necessary branches between blocks representing catch clauses. Still missing the all catcher, but cover most part of type checks and resume blocks. Add some operations to represent higher level abstractions of retrieving exception information. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 28 +++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 12 ++ .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 142 +++++++++++++----- clang/test/CIR/Lowering/try-catch.cpp | 19 +++ 4 files changed, 161 insertions(+), 40 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index da17ecc9ce36..6b46400c24c4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -870,8 +870,7 @@ def ContinueOp : CIR_Op<"continue", [Terminator]> { // Resume //===----------------------------------------------------------------------===// -def ResumeOp : CIR_Op<"resume", [ReturnLike, Terminator, - ParentOneOf<["TryOp"]>]> { +def ResumeOp : CIR_Op<"resume", [ReturnLike, Terminator]> { let summary = "Resumes execution after not catching exceptions"; let description = [{ The `cir.resume` operation terminates a region on `cir.catch`, "resuming" @@ -886,8 +885,10 @@ def ResumeOp : CIR_Op<"resume", [ReturnLike, Terminator, ``` }]; - let arguments = (ins); - let assemblyFormat = "attr-dict"; + let arguments = (ins UnitAttr:$rethrow); + let assemblyFormat = [{ + (`rethrow` $rethrow^)? attr-dict + }]; } //===----------------------------------------------------------------------===// @@ -3262,7 +3263,7 @@ def EhSelectorOp : CIR_Op<"eh.selector"> { }]; let arguments = (ins CIR_ExceptionType:$exception); - let results = (outs UInt32:$selector); + let results = (outs UInt32:$type_id); let assemblyFormat = [{ $exception attr-dict }]; @@ -3270,6 +3271,23 @@ def EhSelectorOp : CIR_Op<"eh.selector"> { let hasVerifier = 0; } +def EhTypeIdOp : CIR_Op<"eh.typeid", + [Pure, DeclareOpInterfaceMethods]> { + let summary = "Compute exception type id from it's global type symbol"; + let description = [{ + Returns the exception type id for a given global symbol representing + a type. + }]; + + let arguments = (ins FlatSymbolRefAttr:$type_sym); + let results = (outs UInt32:$type_id); + let assemblyFormat = [{ + $type_sym attr-dict + }]; + + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // CopyOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 24643ceb2047..42a6af5fb416 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -3204,6 +3204,18 @@ LogicalResult LabelOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// EhTypeIdOp +//===----------------------------------------------------------------------===// + +LogicalResult EhTypeIdOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + auto op = symbolTable.lookupNearestSymbolFrom(*this, getTypeSymAttr()); + if (!isa(op)) + return emitOpError("'") + << getTypeSym() << "' does not reference a valid cir.global"; + return success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index f203a11886ce..27a23ebe4924 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -176,56 +176,54 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { public: using OpRewritePattern::OpRewritePattern; - mlir::LogicalResult - matchAndRewrite(mlir::cir::TryOp tryOp, - mlir::PatternRewriter &rewriter) const override { - mlir::OpBuilder::InsertionGuard guard(rewriter); - auto loc = tryOp.getLoc(); - - // Empty scope: just remove it. - if (tryOp.getTryRegion().empty()) { - rewriter.eraseOp(tryOp); - return mlir::success(); - } - - // TODO: keep track of cir.try_call before we flatten. - - // Split the current block before the TryOp to create the inlining - // point. - auto *beforeTryScopeBlock = rewriter.getInsertionBlock(); - auto *remainingOpsBlock = - rewriter.splitBlock(beforeTryScopeBlock, rewriter.getInsertionPoint()); - mlir::Block *continueBlock; - continueBlock = remainingOpsBlock; + mlir::Block *buildTypeCase(mlir::PatternRewriter &rewriter, mlir::Region &r, + mlir::Block *afterTry) const { + YieldOp yieldOp; + CatchParamOp paramOp; + r.walk([&](YieldOp op) { + assert(!yieldOp && "expect to only find one"); + yieldOp = op; + }); + r.walk([&](CatchParamOp op) { + assert(!paramOp && "expect to only find one"); + paramOp = op; + }); - // Inline body region. - auto *beforeBody = &tryOp.getTryRegion().front(); - auto *afterBody = &tryOp.getTryRegion().back(); - rewriter.inlineRegionBefore(tryOp.getTryRegion(), continueBlock); + rewriter.inlineRegionBefore(r, afterTry); + rewriter.setInsertionPointToEnd(yieldOp->getBlock()); + rewriter.replaceOpWithNewOp(yieldOp, afterTry); + return paramOp->getBlock(); + } - // Branch into the body of the region. - rewriter.setInsertionPointToEnd(beforeTryScopeBlock); - rewriter.create(loc, mlir::ValueRange(), beforeBody); + void buildUnwindCase(mlir::PatternRewriter &rewriter, mlir::Region &r, + mlir::Block *unwindBlock) const { + assert(&r.front() == &r.back() && "only one block expected"); + rewriter.mergeBlocks(&r.back(), unwindBlock); + } + void buildCatchers(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, + mlir::Block *afterBody, mlir::Block *afterTry) const { + auto loc = tryOp.getLoc(); // Replace the tryOp return with a branch that jumps out of the body. rewriter.setInsertionPointToEnd(afterBody); - auto yieldOp = cast(afterBody->getTerminator()); + auto tryBodyYield = cast(afterBody->getTerminator()); + mlir::Block *beforeCatch = rewriter.getInsertionBlock(); auto *catchBegin = rewriter.splitBlock(beforeCatch, rewriter.getInsertionPoint()); rewriter.setInsertionPointToEnd(beforeCatch); - // FIXME: first step here is to build the landing pad like block, but - // since cir.call exception isn't yet lowered, jump from the try block - // to the catch block as a placeholder for now. - rewriter.replaceOpWithNewOp(yieldOp, catchBegin); + // FIXME: this branch should be to afterTry instead of catchBegin, before we + // change this, we need to break calls into their branch version + // (invoke-like) first, otherwise these will be unrecheable and eliminated. + rewriter.replaceOpWithNewOp(tryBodyYield, catchBegin); // Start the landing pad by getting the inflight exception information. rewriter.setInsertionPointToEnd(catchBegin); auto exception = rewriter.create( loc, mlir::cir::ExceptionInfoType::get(rewriter.getContext())); - // TODO: direct catch all needs no dispatch. + // TODO: direct catch all needs no dispatch? // Handle dispatch. In could in theory use a switch, but let's just // mimic LLVM more closely since we have no specific thing to achieve @@ -238,9 +236,83 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Fill in dispatcher. rewriter.setInsertionPointToEnd(dispatchBlock); auto selector = rewriter.create(loc, exception); - rewriter.create(loc, continueBlock); + // FIXME: we should have an extra block for the dispatcher, just in case + // there isn't one later. + + llvm::MutableArrayRef caseRegions = tryOp.getCatchRegions(); + mlir::ArrayAttr caseAttrList = tryOp.getCatchTypesAttr(); + unsigned caseCnt = 0; + + mlir::Block *nextDispatcher = rewriter.getInsertionBlock(); + + for (mlir::Attribute caseAttr : caseAttrList) { + if (auto typeIdGlobal = dyn_cast(caseAttr)) { + auto typeId = rewriter.create( + loc, typeIdGlobal.getSymbol()); + auto match = rewriter.create( + loc, mlir::cir::BoolType::get(rewriter.getContext()), + mlir::cir::CmpOpKind::eq, selector, typeId); + + auto *previousDispatcher = nextDispatcher; + mlir::Block *typeCatchBlock = + buildTypeCase(rewriter, caseRegions[caseCnt], afterTry); + nextDispatcher = rewriter.createBlock(afterTry); + rewriter.setInsertionPointToEnd(previousDispatcher); + rewriter.create(loc, match, typeCatchBlock, + nextDispatcher); + rewriter.setInsertionPointToEnd(nextDispatcher); + } else if (auto catchAll = dyn_cast(caseAttr)) { + // TBD + } else if (auto catchUnwind = + dyn_cast(caseAttr)) { + assert(nextDispatcher->empty() && "expect empty dispatcher"); + buildUnwindCase(rewriter, caseRegions[caseCnt], nextDispatcher); + nextDispatcher = nullptr; // No more business in try/catch + } + caseCnt++; + } + + assert(!nextDispatcher && "no dispatcher available anymore"); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::TryOp tryOp, + mlir::PatternRewriter &rewriter) const override { + mlir::OpBuilder::InsertionGuard guard(rewriter); + auto loc = tryOp.getLoc(); + + // Empty scope: just remove it. + if (tryOp.getTryRegion().empty()) { + rewriter.eraseOp(tryOp); + return mlir::success(); + } + + // TODO: keep track of cir.try_call before we flatten. + + // Split the current block before the TryOp to create the inlining + // point. + auto *beforeTryScopeBlock = rewriter.getInsertionBlock(); + mlir::Block *afterTry = + rewriter.splitBlock(beforeTryScopeBlock, rewriter.getInsertionPoint()); + + // Inline body region. + auto *beforeBody = &tryOp.getTryRegion().front(); + auto *afterBody = &tryOp.getTryRegion().back(); + rewriter.inlineRegionBefore(tryOp.getTryRegion(), afterTry); + + // Branch into the body of the region. + rewriter.setInsertionPointToEnd(beforeTryScopeBlock); + rewriter.create(loc, mlir::ValueRange(), beforeBody); + + buildCatchers(tryOp, rewriter, afterBody, afterTry); rewriter.eraseOp(tryOp); + + // Quick block cleanup: no indirection to the post try block. + auto brOp = dyn_cast(afterTry->getTerminator()); + mlir::Block *srcBlock = brOp.getDest(); + rewriter.eraseOp(brOp); + rewriter.mergeBlocks(srcBlock, afterTry); return mlir::success(); } }; diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 36e86687992d..f7e4ea8ebf32 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -31,9 +31,28 @@ unsigned long long tc() { // CIR_FLAT: ^bb4: // pred: ^bb3 // CIR_FLAT: %[[SEL:.*]] = cir.eh.selector %[[EH]] } catch (int idx) { + // CIR_FLAT: %[[INT_IDX_ID:.*]] = cir.eh.typeid @_ZTIi + // CIR_FLAT: %[[MATCH_CASE_INT_IDX:.*]] = cir.cmp(eq, %[[SEL]], %[[INT_IDX_ID]]) : !u32i, !cir.bool + // CIR_FLAT: cir.brcond %[[MATCH_CASE_INT_IDX]] ^bb5, ^bb6 + // CIR_FLAT: ^bb5: // pred: ^bb4 + // CIR_FLAT: %[[PARAM_INT_IDX:.*]] = cir.catch_param -> !cir.ptr + // CIR_FLAT: cir.const #cir.int<98> + // CIR_FLAT: cir.br ^bb9 z = 98; idx++; } catch (const char* msg) { + // CIR_FLAT: ^bb6: // pred: ^bb4 + // CIR_FLAT: %[[CHAR_MSG_ID:.*]] = cir.eh.typeid @_ZTIPKc + // CIR_FLAT: %[[MATCH_CASE_CHAR_MSG:.*]] = cir.cmp(eq, %[[SEL]], %[[CHAR_MSG_ID]]) : !u32i, !cir.bool + // CIR_FLAT: cir.brcond %[[MATCH_CASE_CHAR_MSG]] ^bb7, ^bb8 + // CIR_FLAT: ^bb7: // pred: ^bb6 + // CIR_FLAT: %[[PARAM_CHAR_MSG:.*]] = cir.catch_param -> !cir.ptr + // CIR_FLAT: cir.const #cir.int<99> : !s32i + // CIR_FLAT: cir.br ^bb9 + // CIR_FLAT: ^bb8: // pred: ^bb6 + // CIR_FLAT: cir.resume + // CIR_FLAT: ^bb9: // 2 preds: ^bb5, ^bb7 + // CIR_FLAT: cir.load z = 99; (void)msg[0]; } From f1a9f5325b662cf116339775ea6a2168a34afa46 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 25 Jul 2024 18:28:21 -0700 Subject: [PATCH 1708/2301] [CIR][NFC] Tide up exception test a bit --- clang/test/CIR/Lowering/try-catch.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index f7e4ea8ebf32..7cc4643687f4 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -49,13 +49,14 @@ unsigned long long tc() { // CIR_FLAT: %[[PARAM_CHAR_MSG:.*]] = cir.catch_param -> !cir.ptr // CIR_FLAT: cir.const #cir.int<99> : !s32i // CIR_FLAT: cir.br ^bb9 - // CIR_FLAT: ^bb8: // pred: ^bb6 - // CIR_FLAT: cir.resume - // CIR_FLAT: ^bb9: // 2 preds: ^bb5, ^bb7 - // CIR_FLAT: cir.load z = 99; (void)msg[0]; } + // CIR_FLAT: ^bb8: // pred: ^bb6 + // CIR_FLAT: cir.resume + + // CIR_FLAT: ^bb9: // 2 preds: ^bb5, ^bb7 + // CIR_FLAT: cir.load return z; } From 8f9673487ce9df800fd90670f62eca49c0feed3c Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Fri, 26 Jul 2024 11:42:23 -0300 Subject: [PATCH 1709/2301] [CIR][ABI] Add X86_64 float and double CC lowering (#714) Implements calling convention lowering of float and double arguments and return values conventions for X86_64. --- .../Transforms/TargetLowering/ABIInfo.cpp | 5 ++ .../Transforms/TargetLowering/ABIInfo.h | 3 + .../TargetLowering/CIRLowerContext.cpp | 12 +++- .../Transforms/TargetLowering/LowerModule.cpp | 19 +++--- .../Transforms/TargetLowering/LowerModule.h | 15 +++-- .../Transforms/TargetLowering/LowerTypes.h | 1 + .../Transforms/TargetLowering/Targets/X86.cpp | 62 +++++++++++++++++++ .../x86_64/x86_64-call-conv-lowering-pass.cpp | 13 ++++ 8 files changed, 115 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index 42c1c9cc2c11..3ed29dd4d549 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -15,6 +15,7 @@ #include "CIRCXXABI.h" #include "CIRLowerContext.h" #include "LowerTypes.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" namespace mlir { namespace cir { @@ -26,6 +27,10 @@ CIRCXXABI &ABIInfo::getCXXABI() const { return LT.getCXXABI(); } CIRLowerContext &ABIInfo::getContext() const { return LT.getContext(); } +const ::cir::CIRDataLayout &ABIInfo::getDataLayout() const { + return LT.getDataLayout(); +} + bool ABIInfo::isPromotableIntegerTypeForABI(Type Ty) const { if (getContext().isPromotableIntegerType(Ty)) return true; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h index d69fee2f26b8..67d628f4eb30 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h @@ -17,6 +17,7 @@ #include "CIRCXXABI.h" #include "CIRLowerContext.h" #include "LowerFunctionInfo.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "llvm/IR/CallingConv.h" namespace mlir { @@ -40,6 +41,8 @@ class ABIInfo { CIRLowerContext &getContext() const; + const ::cir::CIRDataLayout &getDataLayout() const; + virtual void computeInfo(LowerFunctionInfo &FI) const = 0; // Implement the Type::IsPromotableIntegerType for ABI specific needs. The diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index 37cd8f825baa..dce5fabc314b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -50,7 +50,7 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { // TODO(cir): We should implement a better way to identify type kinds and use // builting data layout interface for this. auto typeKind = clang::Type::Builtin; - if (isa(T)) { + if (isa(T)) { typeKind = clang::Type::Builtin; } else { llvm_unreachable("Unhandled type class"); @@ -74,6 +74,16 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { Align = std::ceil((float)Width / 8) * 8; break; } + if (auto floatTy = dyn_cast(T)) { + Width = Target->getFloatWidth(); + Align = Target->getFloatAlign(); + break; + } + if (auto doubleTy = dyn_cast(T)) { + Width = Target->getDoubleWidth(); + Align = Target->getDoubleAlign(); + break; + } llvm_unreachable("Unknown builtin type!"); break; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 2d82fa58a24f..086822ed4143 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -14,6 +14,7 @@ // FIXME(cir): This header file is not exposed to the public API, but can be // reused by CIR ABI lowering since it holds target-specific information. #include "../../../../Basic/Targets.h" +#include "clang/Basic/LangOptions.h" #include "clang/Basic/TargetOptions.h" #include "CIRLowerContext.h" @@ -87,11 +88,15 @@ createTargetLoweringInfo(LowerModule &LM) { } } -LowerModule::LowerModule(CIRLowerContext &C, ModuleOp &module, StringAttr DL, - const clang::TargetInfo &target, +LowerModule::LowerModule(clang::LangOptions opts, ModuleOp &module, + StringAttr DL, + std::unique_ptr target, PatternRewriter &rewriter) - : context(C), module(module), Target(target), ABI(createCXXABI(*this)), - types(*this, DL.getValue()), rewriter(rewriter) {} + : context(module, opts), module(module), Target(std::move(target)), + ABI(createCXXABI(*this)), types(*this, DL.getValue()), + rewriter(rewriter) { + context.initBuiltinTypes(*Target); +} const TargetLoweringInfo &LowerModule::getTargetLoweringInfo() { if (!TheTargetCodeGenInfo) @@ -235,11 +240,9 @@ std::unique_ptr createLowerModule(ModuleOp module, // Create context. assert(!::cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; - auto context = CIRLowerContext(module, langOpts); - context.initBuiltinTypes(*targetInfo); - return std::make_unique(context, module, dataLayoutStr, - *targetInfo, rewriter); + return std::make_unique(langOpts, module, dataLayoutStr, + std::move(targetInfo), rewriter); } } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index f088086d70c6..46ac0c105269 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -21,17 +21,19 @@ #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "clang/Basic/LangOptions.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/MissingFeatures.h" +#include namespace mlir { namespace cir { class LowerModule { - CIRLowerContext &context; + CIRLowerContext context; ModuleOp module; - const clang::TargetInfo &Target; + const std::unique_ptr Target; mutable std::unique_ptr TheTargetCodeGenInfo; std::unique_ptr ABI; @@ -40,22 +42,23 @@ class LowerModule { PatternRewriter &rewriter; public: - LowerModule(CIRLowerContext &C, ModuleOp &module, StringAttr DL, - const clang::TargetInfo &target, PatternRewriter &rewriter); + LowerModule(clang::LangOptions opts, ModuleOp &module, StringAttr DL, + std::unique_ptr target, + PatternRewriter &rewriter); ~LowerModule() = default; // Trivial getters. LowerTypes &getTypes() { return types; } CIRLowerContext &getContext() { return context; } CIRCXXABI &getCXXABI() const { return *ABI; } - const clang::TargetInfo &getTarget() const { return Target; } + const clang::TargetInfo &getTarget() const { return *Target; } MLIRContext *getMLIRContext() { return module.getContext(); } ModuleOp &getModule() { return module; } const TargetLoweringInfo &getTargetLoweringInfo(); // FIXME(cir): This would be in ASTContext, not CodeGenModule. - const clang::TargetInfo &getTargetInfo() const { return Target; } + const clang::TargetInfo &getTargetInfo() const { return *Target; } // FIXME(cir): This would be in ASTContext, not CodeGenModule. clang::TargetCXXABI::Kind getCXXABIKind() const { diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h index 9ab1cdf335d5..9e6149707c07 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h @@ -56,6 +56,7 @@ class LowerTypes { LowerTypes(LowerModule &LM, StringRef DLString); ~LowerTypes() = default; + const ::cir::CIRDataLayout &getDataLayout() const { return DL; } LowerModule &getLM() const { return LM; } CIRCXXABI &getCXXABI() const { return CXXABI; } CIRLowerContext &getContext() { return context; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index b05a46070638..7b3aaca75364 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -6,6 +6,7 @@ #include "LowerTypes.h" #include "TargetInfo.h" #include "clang/CIR/ABIArgInfo.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" #include @@ -37,6 +38,15 @@ static bool BitsContainNoUserData(Type Ty, unsigned StartBit, unsigned EndBit, llvm_unreachable("NYI"); } +/// Return a floating point type at the specified offset. +Type getFPTypeAtOffset(Type IRType, unsigned IROffset, + const ::cir::CIRDataLayout &TD) { + if (IROffset == 0 && isa(IRType)) + return IRType; + + llvm_unreachable("NYI"); +} + } // namespace class X86_64ABIInfo : public ABIInfo { @@ -71,6 +81,9 @@ class X86_64ABIInfo : public ABIInfo { void classify(Type T, uint64_t OffsetBase, Class &Lo, Class &Hi, bool isNamedArg, bool IsRegCall = false) const; + Type GetSSETypeAtOffset(Type IRType, unsigned IROffset, Type SourceTy, + unsigned SourceOffset) const; + Type GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, Type SourceTy, unsigned SourceOffset) const; @@ -125,6 +138,10 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, } return; + } else if (isa(Ty) || isa(Ty)) { + Current = Class::SSE; + return; + } else { llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; llvm_unreachable("NYI"); @@ -138,6 +155,37 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, llvm_unreachable("NYI"); } +/// Return a type that will be passed by the backend in the low 8 bytes of an +/// XMM register, corresponding to the SSE class. +Type X86_64ABIInfo::GetSSETypeAtOffset(Type IRType, unsigned IROffset, + Type SourceTy, + unsigned SourceOffset) const { + const ::cir::CIRDataLayout &TD = getDataLayout(); + unsigned SourceSize = + (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset; + Type T0 = getFPTypeAtOffset(IRType, IROffset, TD); + if (!T0 || isa(T0)) + return T0; // NOTE(cir): Not sure if this is correct. + + Type T1 = {}; + unsigned T0Size = TD.getTypeAllocSize(T0); + if (SourceSize > T0Size) + llvm_unreachable("NYI"); + if (T1 == nullptr) { + // Check if IRType is a half/bfloat + float. float type will be in + // IROffset+4 due to its alignment. + if (isa(T0) && SourceSize > 4) + llvm_unreachable("NYI"); + // If we can't get a second FP type, return a simple half or float. + // avx512fp16-abi.c:pr51813_2 shows it works to return float for + // {float, i8} too. + if (T1 == nullptr) + return T0; + } + + llvm_unreachable("NYI"); +} + /// The ABI specifies that a value should be passed in an 8-byte GPR. This /// means that we either have a scalar or we are talking about the high or low /// part of an up-to-16-byte struct. This routine picks the best CIR type @@ -236,6 +284,12 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { } break; + // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next + // available SSE register of the sequence %xmm0, %xmm1 is used. + case Class::SSE: + resType = GetSSETypeAtOffset(RetTy, 0, RetTy, 0); + break; + default: llvm_unreachable("NYI"); } @@ -302,6 +356,14 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, break; + // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next + // available SSE register is used, the registers are taken in the + // order from %xmm0 to %xmm7. + case Class::SSE: { + ResType = GetSSETypeAtOffset(Ty, 0, Ty, 0); + ++neededSSE; + break; + } default: llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp index 2ead4bbba761..7a7a244397a1 100644 --- a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp @@ -62,3 +62,16 @@ long long LongLong(long long l) { // CHECK: cir.call @_Z8LongLongx(%{{.+}}) : (!s64i) -> !s64i return LongLong(l); } + +/// Test call conv lowering for floating point. /// + +// CHECK: cir.func @_Z5Floatf(%arg0: !cir.float loc({{.+}})) -> !cir.float +float Float(float f) { + // cir.call @_Z5Floatf(%{{.+}}) : (!cir.float) -> !cir.float + return Float(f); +} +// CHECK: cir.func @_Z6Doubled(%arg0: !cir.double loc({{.+}})) -> !cir.double +double Double(double d) { + // cir.call @_Z6Doubled(%{{.+}}) : (!cir.double) -> !cir.double + return Double(d); +} From e22c3c97e7f9d01d3733b4557adc694ed3fff7e8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 26 Jul 2024 12:34:10 -0700 Subject: [PATCH 1710/2301] [CIR][NFC] Exceptions: re-introduce cir.try_call (skeleton), now with branches `cir.try_call` was recently removed since the call mechanism changed for exceptions with structured control flow. Here we basically just reuse the name, it's going to be the call used for flat CIR in face of exceptions (like LLVM's invoke). Refactor CallOp a bit. This just introduces the basic skeleton, functionality tests coming next. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 79 +++++++++++++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 99 +++++++++++++++++++- 2 files changed, 171 insertions(+), 7 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 6b46400c24c4..314589702f1d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2948,7 +2948,7 @@ def FuncOp : CIR_Op<"func", [ } //===----------------------------------------------------------------------===// -// CallOp +// CallOp and TryCallOp //===----------------------------------------------------------------------===// class CIR_CallOp extra_traits = []> : @@ -2991,7 +2991,6 @@ class CIR_CallOp extra_traits = []> : OptionalAttr:$callee, Variadic:$arg_ops, ExtraFuncAttr:$extra_attrs, - UnitAttr:$exception, OptionalAttr:$ast ); } @@ -3024,8 +3023,10 @@ def CallOp : CIR_CallOp<"call"> { ``` }]; - let arguments = commonArgs; let results = (outs Optional:$result); + let arguments = !con((ins + UnitAttr:$exception + ), commonArgs); let skipDefaultBuilders = 1; @@ -3036,18 +3037,86 @@ def CallOp : CIR_CallOp<"call"> { $_state.addOperands(operands); if (callee) $_state.addAttribute("callee", callee); - if (exception) // $_builder + if (exception) $_state.addAttribute("exception", exception); if (resType && !isa(resType)) $_state.addTypes(resType); }]>, OpBuilder<(ins "Value":$ind_target, "FuncType":$fn_type, - CArg<"ValueRange", "{}">:$operands), [{ + CArg<"ValueRange", "{}">:$operands, + CArg<"UnitAttr", "{}">:$exception), [{ $_state.addOperands(ValueRange{ind_target}); $_state.addOperands(operands); if (!fn_type.isVoid()) $_state.addTypes(fn_type.getReturnType()); + if (exception) + $_state.addAttribute("exception", exception); + }]> + ]; +} + +def TryCallOp : CIR_CallOp<"try_call", + [DeclareOpInterfaceMethods, Terminator, + AttrSizedOperandSegments]> { + let summary = "try_call operation"; + let description = [{ + Mostly similar to cir.call but requires two destination + branches, one for handling exceptions in case its thrown and + the other one to follow on regular control-flow. + + Example: + + ```mlir + // Direct call + %2 = cir.try_call @my_add(%0, %1) ^continue, ^landing_pad : (f32, f32) -> f32 + ``` + }]; + + let arguments = !con((ins + Variadic:$contOperands, + Variadic:$landingPadOperands + ), commonArgs); + + let results = (outs Optional:$result); + let successors = (successor AnySuccessor:$cont, + AnySuccessor:$landing_pad); + + let skipDefaultBuilders = 1; + + let builders = [ + OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Type":$resType, + "Block *":$cont, "Block *":$landing_pad, + CArg<"ValueRange", "{}">:$operands, + CArg<"ValueRange", "{}">:$contOperands, + CArg<"ValueRange", "{}">:$landingPadOperands), [{ + $_state.addOperands(operands); + if (callee) + $_state.addAttribute("callee", callee); + if (resType && !isa(resType)) + $_state.addTypes(resType); + + // Handle branches + $_state.addSuccessors(cont); + $_state.addSuccessors(landing_pad); + $_state.addOperands(contOperands); + $_state.addOperands(landingPadOperands); + }]>, + OpBuilder<(ins "Value":$ind_target, + "FuncType":$fn_type, + "Block *":$cont, "Block *":$landing_pad, + CArg<"ValueRange", "{}">:$operands, + CArg<"ValueRange", "{}">:$contOperands, + CArg<"ValueRange", "{}">:$landingPadOperands), [{ + $_state.addOperands(ValueRange{ind_target}); + $_state.addOperands(operands); + if (!fn_type.isVoid()) + $_state.addTypes(fn_type.getReturnType()); + // Handle branches + $_state.addSuccessors(cont); + $_state.addSuccessors(landing_pad); + $_state.addOperands(contOperands); + $_state.addOperands(landingPadOperands); }]> ]; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 42a6af5fb416..a9363cc7c266 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2348,9 +2348,17 @@ verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { return success(); } +static ::mlir::ParseResult +parseTryCallBranches(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { + parser.emitError(parser.getCurrentLocation(), "NYI"); + return failure(); +} + static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, ::mlir::OperationState &result, - llvm::StringRef extraAttrsAttrName) { + llvm::StringRef extraAttrsAttrName, + bool hasDestinationBlocks = false) { mlir::FlatSymbolRefAttr calleeAttr; llvm::SmallVector<::mlir::OpAsmParser::UnresolvedOperand, 4> ops; llvm::SMLoc opsLoc; @@ -2380,6 +2388,10 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, if (parser.parseRParen()) return ::mlir::failure(); + if (hasDestinationBlocks) + if (parseTryCallBranches(parser, result).failed()) + return ::mlir::failure(); + auto &builder = parser.getBuilder(); Attribute extraAttrs; if (::mlir::succeeded(parser.parseOptionalKeyword("extra"))) { @@ -2417,7 +2429,9 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, mlir::FlatSymbolRefAttr flatSym, ::mlir::OpAsmPrinter &state, ::mlir::cir::ExtraFuncAttributesAttr extraAttrs, - ::mlir::UnitAttr exception = {}) { + ::mlir::UnitAttr exception = {}, + mlir::Block *cont = nullptr, + mlir::Block *landingPad = nullptr) { state << ' '; auto callLikeOp = mlir::cast(op); @@ -2436,6 +2450,32 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, state << ops; state << ")"; + if (cont) { + assert(landingPad && "expected two successors"); + auto tryCall = dyn_cast(op); + assert(tryCall && "regular calls do not branch"); + state << tryCall.getCont(); + if (!tryCall.getContOperands().empty()) { + state << "("; + state << tryCall.getContOperands(); + state << ' ' << ":"; + state << ' '; + state << tryCall.getContOperands().getTypes(); + state << ")"; + } + state << ","; + state << ' '; + state << tryCall.getLandingPad(); + if (!tryCall.getLandingPadOperands().empty()) { + state << "("; + state << tryCall.getLandingPadOperands(); + state << ' ' << ":"; + state << ' '; + state << tryCall.getLandingPadOperands().getTypes(); + state << ")"; + } + } + llvm::SmallVector<::llvm::StringRef, 4> elidedAttrs; elidedAttrs.push_back("callee"); elidedAttrs.push_back("ast"); @@ -2471,6 +2511,61 @@ void CallOp::print(::mlir::OpAsmPrinter &state) { getExtraAttrs(), exception); } +//===----------------------------------------------------------------------===// +// TryCallOp +//===----------------------------------------------------------------------===// + +mlir::Value cir::TryCallOp::getIndirectCall() { + assert(isIndirect()); + return getOperand(0); +} + +mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_begin() { + auto arg_begin = operand_begin(); + if (isIndirect()) + arg_begin++; + return arg_begin; +} +mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_end() { + return operand_end(); +} + +/// Return the operand at index 'i', accounts for indirect call. +Value cir::TryCallOp::getArgOperand(unsigned i) { + if (isIndirect()) + i++; + return getOperand(i); +} +/// Return the number of operands, accounts for indirect call. +unsigned cir::TryCallOp::getNumArgOperands() { + if (isIndirect()) + return this->getOperation()->getNumOperands() - 1; + return this->getOperation()->getNumOperands(); +} + +LogicalResult +cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + return verifyCallCommInSymbolUses(*this, symbolTable); +} + +::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { + + return parseCallCommon(parser, result, getExtraAttrsAttrName(result.name)); +} + +void TryCallOp::print(::mlir::OpAsmPrinter &state) { + mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; + printCallCommon(*this, indirectCallee, getCalleeAttr(), state, + getExtraAttrs(), {}, getCont(), getLandingPad()); +} + +mlir::SuccessorOperands TryCallOp::getSuccessorOperands(unsigned index) { + assert(index < getNumSuccessors() && "invalid successor index"); + return SuccessorOperands(index == 0 ? getContOperandsMutable() + : getLandingPadOperandsMutable()); +} + //===----------------------------------------------------------------------===// // UnaryOp //===----------------------------------------------------------------------===// From 7043dc77b6d1807ef01aa515cb103df78980a0bf Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 26 Jul 2024 18:16:10 -0400 Subject: [PATCH 1711/2301] [CIR][CIRGen] Add comdat support, enabling more dso_local and comdat LLVM tagging (#751) This PR implements: 1. Add comdat attribute as an optional attribute to CIR GlobalOp and FuncOp, they are of the enum ComdatSelection type because it contains all information we need. The [llvm/include/llvm/IR/Comdat.h](https://github.com/llvm/clangir/blob/4ea2ec38638391b964e88b77f926e0892b350e04/llvm/include/llvm/IR/Comdat.h#L33) has Comdat with selectionKind and its name, add users which are set of pointers to GlobalObjects sharing the same name. The name is always the name of global object (in CIR context, the GlobalOp or FuncOp), so we don't need it. And the user set can always be collected when a transformation looks up symbol table of the module, thus not really necessary. Plus, it's not good for CIR to keep a set of pointers. 2. Thanks to comdat support, and adding call sites of setComdat similar to what was in OG, we are able to implement canBenefitFromLocalAlias as similar as possible to [GlobalValue::canBenefitFromLocalAlias() ](https://github.com/llvm/clangir/blob/4ea2ec38638391b964e88b77f926e0892b350e04/llvm/lib/IR/Globals.cpp#L112) and this enable us to complete dso_local work by correctly setting dso_local for functions. 3. I took back printing of dsolocal attribute of CIR, as I would fix 128+ test cases instead of 36-37 of tests (mostly adding dso_local to function prototype line in LLVM check) in this PR. printing dsolocal attribute is not really necessary as llvm IR would verify it for us. 4. LLVM lowering of Comdat for GlobalOp Things left to next/future PR 1. I'd like to extend setComdat to other parts of CG like Vtable, etc. 2. LLVM lowering of comdat for FuncOp --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 +- .../clang/CIR/Interfaces/CIROpInterfaces.td | 15 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 25 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 +- clang/lib/CIR/Interfaces/CIROpInterfaces.cpp | 17 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 33 ++- .../CIR/CodeGen/address-space-conversion.cpp | 6 +- clang/test/CIR/CodeGen/address-space.c | 6 +- clang/test/CIR/CodeGen/atomic.cpp | 18 +- clang/test/CIR/CodeGen/basic.c | 2 +- clang/test/CIR/CodeGen/builtin-alloca.c | 6 +- clang/test/CIR/CodeGen/builtin-constant-p.c | 2 +- .../test/CIR/CodeGen/builtin-floating-point.c | 276 +++++++++--------- clang/test/CIR/CodeGen/builtin-ms-alloca.c | 2 +- clang/test/CIR/CodeGen/builtin-prefetch.c | 2 +- clang/test/CIR/CodeGen/call-extra-attrs.cpp | 8 +- .../CodeGen/call-via-class-member-funcptr.cpp | 10 +- clang/test/CIR/CodeGen/complex.c | 26 +- clang/test/CIR/CodeGen/cond.cpp | 2 +- clang/test/CIR/CodeGen/coro-task.cpp | 2 +- clang/test/CIR/CodeGen/dynamic-cast-exact.cpp | 8 +- clang/test/CIR/CodeGen/fun-ptr.c | 4 +- clang/test/CIR/CodeGen/func_dsolocal_pie.c | 4 +- clang/test/CIR/CodeGen/function-attrs.cpp | 4 +- clang/test/CIR/CodeGen/lambda.cpp | 10 +- clang/test/CIR/CodeGen/libcall.cpp | 2 +- clang/test/CIR/CodeGen/linkage.c | 4 +- clang/test/CIR/CodeGen/multi-vtable.cpp | 16 +- clang/test/CIR/CodeGen/optnone.cpp | 2 +- clang/test/CIR/CodeGen/pass-object-size.c | 2 +- clang/test/CIR/CodeGen/sourcelocation.cpp | 2 +- clang/test/CIR/CodeGen/var-arg-float.c | 2 +- clang/test/CIR/CodeGen/var-arg-scope.c | 2 +- clang/test/CIR/CodeGen/var-arg.c | 2 +- clang/test/CIR/CodeGen/vbase.cpp | 10 +- clang/test/CIR/CodeGen/vectype-ext.cpp | 12 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- clang/test/CIR/CodeGen/weak.c | 2 +- clang/test/CIR/IR/func-dsolocal-parser.cir | 2 +- clang/test/CIR/Lowering/array-init.c | 2 +- clang/test/CIR/Lowering/bitfieils.c | 8 +- .../test/CIR/Lowering/builtin-binary-fp2fp.c | 60 ++-- clang/test/CIR/Lowering/struct-init.c | 4 +- clang/test/CIR/cc1.c | 2 +- clang/test/CIR/driver.c | 2 +- clang/test/CIR/mlirprint.c | 2 +- 46 files changed, 347 insertions(+), 295 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 314589702f1d..460b403ec19d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2082,7 +2082,7 @@ def TLSModel : I32EnumAttr< [TLS_GeneralDynamic, TLS_LocalDynamic, TLS_InitialExec, TLS_LocalExec]> { let cppNamespace = "::mlir::cir"; } - + def GlobalOp : CIR_Op<"global", [DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, @@ -2123,6 +2123,7 @@ def GlobalOp : CIR_Op<"global", OptionalAttr:$tls_model, // Note this can also be a FlatSymbolRefAttr OptionalAttr:$initial_value, + UnitAttr:$comdat, UnitAttr:$constant, UnitAttr:$dsolocal, OptionalAttr:$alignment, @@ -2134,6 +2135,7 @@ def GlobalOp : CIR_Op<"global", ($sym_visibility^)? (`constant` $constant^)? $linkage + (`comdat` $comdat^)? ($tls_model^)? (`dsolocal` $dsolocal^)? $sym_name @@ -2878,6 +2880,7 @@ def FuncOp : CIR_Op<"func", [ "GlobalLinkageKind::ExternalLinkage">:$linkage, ExtraFuncAttr:$extra_attrs, OptionalAttr:$sym_visibility, + UnitAttr:$comdat, OptionalAttr:$arg_attrs, OptionalAttr:$res_attrs, OptionalAttr:$aliasee, diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td index cec43646d0e8..09b09e8467f3 100644 --- a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td @@ -58,6 +58,12 @@ let cppNamespace = "::mlir::cir" in { }] >, InterfaceMethod<"", + "bool", "isExternalLinkage", (ins), [{}], + /*defaultImplementation=*/[{ + return mlir::cir::isExternalLinkage($_op.getLinkage()); + }] + >, + InterfaceMethod<"", "bool", "isDeclarationForLinker", (ins), [{}], /*defaultImplementation=*/[{ if ($_op.hasAvailableExternallyLinkage()) @@ -66,6 +72,12 @@ let cppNamespace = "::mlir::cir" in { }] >, InterfaceMethod<"", + "bool", "hasComdat", (ins), [{}], + /*defaultImplementation=*/[{ + return $_op.getComdat(); + }] + >, + InterfaceMethod<"", "void", "setDSOLocal", (ins "bool":$val), [{}], /*defaultImplementation=*/[{ $_op.setDsolocal(val); @@ -73,7 +85,8 @@ let cppNamespace = "::mlir::cir" in { >, ]; let extraClassDeclaration = [{ - bool canBenefitFromLocalAlias() const; + bool hasDefaultVisibility(); + bool canBenefitFromLocalAlias(); }]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 10e047dd1c35..9ff4acd41cef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -365,22 +365,12 @@ bool CIRGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { return true; } -static bool hasDefaultVisibility(CIRGlobalValueInterface GV) { - // Since we do not support hidden visibility and private visibility, - // we can assume that the default visibility is public or private. - // The way we use private visibility now simply is just treating it - // as either local or private linkage, or just default for declarations - assert(!MissingFeatures::hiddenVisibility()); - assert(!MissingFeatures::protectedVisibility()); - return GV.isPublic() || GV.isPrivate(); -} - static bool shouldAssumeDSOLocal(const CIRGenModule &CGM, CIRGlobalValueInterface GV) { if (GV.hasLocalLinkage()) return true; - if (!hasDefaultVisibility(GV) && !GV.hasExternalWeakLinkage()) { + if (!GV.hasDefaultVisibility() && !GV.hasExternalWeakLinkage()) { return true; } @@ -1436,8 +1426,11 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, // TODO(cir) assert(!cir::MissingFeatures::threadLocal() && "NYI"); assert(!cir::MissingFeatures::unnamedAddr() && "NYI"); - assert(!mlir::cir::isWeakForLinker(LT) && "NYI"); - assert(!cir::MissingFeatures::setDSOLocal() && "NYI"); + if (GV.isWeakForLinker()) { + assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals"); + GV.setComdat(true); + } + CGM.setDSOLocal(static_cast(GV)); return GV; } @@ -2910,10 +2903,10 @@ mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( OldGV->erase(); } - assert(!MissingFeatures::setComdat()); if (supportsCOMDAT() && mlir::cir::isWeakForLinker(Linkage) && - !GV.hasAvailableExternallyLinkage()) - assert(!MissingFeatures::setComdat()); + !GV.hasAvailableExternallyLinkage()) { + GV.setComdat(true); + } GV.setAlignmentAttr(getSize(Alignment)); setDSOLocal(static_cast(GV)); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a9363cc7c266..e5ae4e32b761 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2109,6 +2109,9 @@ void cir::FuncOp::print(OpAsmPrinter &p) { if (getNoProto()) p << "no_proto "; + if (getComdat()) + p << "comdat "; + if (getLinkage() != GlobalLinkageKind::ExternalLinkage) p << stringifyGlobalLinkageKind(getLinkage()) << ' '; @@ -2116,9 +2119,6 @@ void cir::FuncOp::print(OpAsmPrinter &p) { if (vis != mlir::SymbolTable::Visibility::Public) p << vis << " "; - if (getDsolocal()) - p << "dsolocal "; - // Print function name, signature, and control. p.printSymbolName(getSymName()); auto fnType = getFunctionType(); @@ -2148,6 +2148,7 @@ void cir::FuncOp::print(OpAsmPrinter &p) { getSymVisibilityAttrName(), getArgAttrsAttrName(), getResAttrsAttrName(), + getComdatAttrName(), }); if (auto aliaseeName = getAliasee()) { diff --git a/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp b/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp index 46e472c312be..93ab428d5f13 100644 --- a/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp +++ b/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp @@ -17,8 +17,21 @@ using namespace mlir::cir; #include "clang/CIR/MissingFeatures.h" -bool CIRGlobalValueInterface::canBenefitFromLocalAlias() const { +bool CIRGlobalValueInterface::hasDefaultVisibility() { + assert(!::cir::MissingFeatures::hiddenVisibility()); + assert(!::cir::MissingFeatures::protectedVisibility()); + return isPublic() || isPrivate(); +} + +bool CIRGlobalValueInterface::canBenefitFromLocalAlias() { assert(!::cir::MissingFeatures::supportIFuncAttr()); - assert(!::cir::MissingFeatures::setComdat()); + // hasComdat here should be isDeduplicateComdat, but as far as clang codegen + // is concerned, there is no case for Comdat::NoDeduplicate as all comdat + // would be Comdat::Any or Comdat::Largest (in the case of MS ABI). And CIRGen + // wouldn't even generate Comdat::Largest comdat as it tries to leave ABI + // specifics to LLVM lowering stage, thus here we don't need test Comdat + // selectionKind. + return hasDefaultVisibility() && isExternalLinkage() && !isDeclaration() && + !hasComdat(); return false; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index a7d3728bcccb..4fcab4d34b2e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -46,12 +46,12 @@ #include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" #include "mlir/Transforms/DialectConversion.h" -#include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/MissingFeatures.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/APInt.h" @@ -1434,7 +1434,7 @@ class CIRFuncLowering : public mlir::OpConversionPattern { cast(extraAttrsEntry.getValue()); if (!oldExtraAttrs.getElements().contains(attrKey)) return; - + mlir::NamedAttrList newExtraAttrs; for (auto entry : oldExtraAttrs.getElements()) { if (entry.getName() == attrKey) { @@ -1816,13 +1816,35 @@ class CIRGlobalOpLowering } // Rewrite op. - rewriter.replaceOpWithNewOp( + auto llvmGlobalOp = rewriter.replaceOpWithNewOp( op, llvmType, isConst, linkage, symbol, init.value(), /*alignment*/ 0, /*addrSpace*/ 0, /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); + auto mod = op->getParentOfType(); + if (op.getComdat()) + addComdat(llvmGlobalOp, comdatOp, rewriter, mod); return mlir::success(); } + +private: + mutable mlir::LLVM::ComdatOp comdatOp = nullptr; + static void addComdat(mlir::LLVM::GlobalOp &op, + mlir::LLVM::ComdatOp &comdatOp, + mlir::OpBuilder &builder, mlir::ModuleOp &module) { + StringRef comdatName("__llvm_comdat_globals"); + if (!comdatOp) { + builder.setInsertionPointToStart(module.getBody()); + comdatOp = + builder.create(module.getLoc(), comdatName); + } + builder.setInsertionPointToStart(&comdatOp.getBody().back()); + auto selectorOp = builder.create( + comdatOp.getLoc(), op.getSymName(), mlir::LLVM::comdat::Comdat::Any); + op.setComdatAttr(mlir::SymbolRefAttr::get( + builder.getContext(), comdatName, + mlir::FlatSymbolRefAttr::get(selectorOp.getSymNameAttr()))); + } }; class CIRUnaryOpLowering @@ -3413,7 +3435,7 @@ class CIRUndefOpLowering matchAndRewrite(mlir::cir::UndefOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto typ = getTypeConverter()->convertType(op.getRes().getType()); - + rewriter.replaceOpWithNewOp(op, typ); return mlir::success(); } @@ -3453,7 +3475,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRRintOpLowering, CIRRoundOpLowering, CIRSinOpLowering, CIRSqrtOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, CIRFModOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering, CIRPowOpLowering, - CIRClearCacheOpLowering, CIRUndefOpLowering>(converter, patterns.getContext()); + CIRClearCacheOpLowering, CIRUndefOpLowering>(converter, + patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/address-space-conversion.cpp b/clang/test/CIR/CodeGen/address-space-conversion.cpp index e618e9ac21fe..1f4ee00ca63b 100644 --- a/clang/test/CIR/CodeGen/address-space-conversion.cpp +++ b/clang/test/CIR/CodeGen/address-space-conversion.cpp @@ -10,7 +10,7 @@ using ri1_t = int __attribute__((address_space(1))) &; using ri2_t = int __attribute__((address_space(2))) &; // CIR: cir.func @{{.*test_ptr.*}} -// LLVM: define void @{{.*test_ptr.*}} +// LLVM: define dso_local void @{{.*test_ptr.*}} void test_ptr() { pi1_t ptr1; pi2_t ptr2 = (pi2_t)ptr1; @@ -24,7 +24,7 @@ void test_ptr() { } // CIR: cir.func @{{.*test_ref.*}} -// LLVM: define void @{{.*test_ref.*}} +// LLVM: define dso_local void @{{.*test_ref.*}} void test_ref() { pi1_t ptr; ri1_t ref1 = *ptr; @@ -43,7 +43,7 @@ void test_ref() { } // CIR: cir.func @{{.*test_nullptr.*}} -// LLVM: define void @{{.*test_nullptr.*}} +// LLVM: define dso_local void @{{.*test_nullptr.*}} void test_nullptr() { constexpr pi1_t null1 = nullptr; pi2_t ptr = (pi2_t)null1; diff --git a/clang/test/CIR/CodeGen/address-space.c b/clang/test/CIR/CodeGen/address-space.c index 100cdaaaa753..b1b741594257 100644 --- a/clang/test/CIR/CodeGen/address-space.c +++ b/clang/test/CIR/CodeGen/address-space.c @@ -4,19 +4,19 @@ // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM // CIR: cir.func {{@.*foo.*}}(%arg0: !cir.ptr)> -// LLVM: define void @foo(ptr addrspace(1) %0) +// LLVM: define dso_local void @foo(ptr addrspace(1) %0) void foo(int __attribute__((address_space(1))) *arg) { return; } // CIR: cir.func {{@.*bar.*}}(%arg0: !cir.ptr)> -// LLVM: define void @bar(ptr %0) +// LLVM: define dso_local void @bar(ptr %0) void bar(int __attribute__((address_space(0))) *arg) { return; } // CIR: cir.func {{@.*baz.*}}(%arg0: !cir.ptr -// LLVM: define void @baz(ptr %0) +// LLVM: define dso_local void @baz(ptr %0) void baz(int *arg) { return; } diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 262b130273f1..b031c330c275 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -28,7 +28,7 @@ int basic_binop_fetch(int *i) { // CHECK: %[[VAL:.*]] = cir.load %[[ONE_ADDR]] : !cir.ptr, !s32i // CHECK: cir.atomic.fetch(add, %[[I]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) : !s32i -// LLVM: define i32 @_Z17basic_binop_fetchPi +// LLVM: define dso_local i32 @_Z17basic_binop_fetchPi // LLVM: %[[RMW:.*]] = atomicrmw add ptr {{.*}}, i32 %[[VAL:.*]] seq_cst, align 4 // LLVM: add i32 %[[RMW]], %[[VAL]] @@ -45,7 +45,7 @@ int other_binop_fetch(int *i) { // CHECK: cir.atomic.fetch(or, {{.*}}, acquire // CHECK: cir.atomic.fetch(xor, {{.*}}, release -// LLVM: define i32 @_Z17other_binop_fetchPi +// LLVM: define dso_local i32 @_Z17other_binop_fetchPi // LLVM: %[[RMW_SUB:.*]] = atomicrmw sub ptr {{.*}} monotonic // LLVM: sub i32 %[[RMW_SUB]], {{.*}} // LLVM: %[[RMW_AND:.*]] = atomicrmw and ptr {{.*}} acquire @@ -62,7 +62,7 @@ int nand_binop_fetch(int *i) { // CHECK: cir.func @_Z16nand_binop_fetchPi // CHECK: cir.atomic.fetch(nand, {{.*}}, acq_rel -// LLVM: define i32 @_Z16nand_binop_fetchPi +// LLVM: define dso_local i32 @_Z16nand_binop_fetchPi // LLVM: %[[RMW_NAND:.*]] = atomicrmw nand ptr {{.*}} acq_rel // LLVM: %[[AND:.*]] = and i32 %[[RMW_NAND]] // LLVM: = xor i32 %[[AND]], -1 @@ -76,7 +76,7 @@ int fp_binop_fetch(float *i) { // CHECK: cir.atomic.fetch(add, // CHECK: cir.atomic.fetch(sub, -// LLVM: define i32 @_Z14fp_binop_fetchPf +// LLVM: define dso_local i32 @_Z14fp_binop_fetchPf // LLVM: %[[RMW_FADD:.*]] = atomicrmw fadd ptr // LLVM: fadd float %[[RMW_FADD]] // LLVM: %[[RMW_FSUB:.*]] = atomicrmw fsub ptr @@ -99,7 +99,7 @@ int fetch_binop(int *i) { // CHECK: cir.atomic.fetch(xor, {{.*}}) fetch_first // CHECK: cir.atomic.fetch(nand, {{.*}}) fetch_first -// LLVM: define i32 @_Z11fetch_binopPi +// LLVM: define dso_local i32 @_Z11fetch_binopPi // LLVM: atomicrmw add ptr // LLVM-NOT: add {{.*}} // LLVM: atomicrmw sub ptr @@ -126,7 +126,7 @@ void min_max_fetch(int *i) { // CHECK: = cir.atomic.fetch(max, {{.*}}) : !s32i // CHECK: = cir.atomic.fetch(min, {{.*}}) : !s32i -// LLVM: define void @_Z13min_max_fetchPi +// LLVM: define dso_local void @_Z13min_max_fetchPi // LLVM: atomicrmw max ptr // LLVM-NOT: icmp {{.*}} // LLVM: atomicrmw min ptr @@ -343,7 +343,7 @@ void inc_int(int* a, int b) { int c = __sync_fetch_and_add(a, b); } // CHECK-LABEL: @_Z7inc_int -// CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr // CHECK: %[[VAL:.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: %[[RES:.*]] = cir.atomic.fetch(add, %[[PTR]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) fetch_first : !s32i // CHECK: cir.store %[[RES]], {{.*}} : !s32i, !cir.ptr @@ -442,7 +442,7 @@ void cmp_bool_byte(char* p, char x, char u) { // LLVM: %[[TMP:.*]] = extractvalue { i32, i1 } %[[RES]], 0 // LLVM: store i32 %[[TMP]], ptr {{.*}} void cmp_val_int(int* p, int x, int u) { - int r = __sync_val_compare_and_swap(p, x, u); + int r = __sync_val_compare_and_swap(p, x, u); } // CHECK-LABEL: @_Z12cmp_val_long @@ -466,7 +466,7 @@ void cmp_val_short(short* p, short x, short u) { // CHECK-LABEL: @_Z12cmp_val_byte // CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s8i, {{.*}} : !s8i, success = seq_cst, failure = seq_cst) : (!s8i, !cir.bool) -// LLVM-LABEL: @_Z12cmp_val_byte +// LLVM-LABEL: @_Z12cmp_val_byte // LLVM: cmpxchg ptr {{.*}}, i8 {{.*}}, i8 {{.*}} seq_cst seq_cst void cmp_val_byte(char* p, char x, char u) { char r = __sync_val_compare_and_swap(p, x, u); diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 5ef5dbf21a6e..4fb5f6c6853c 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -30,7 +30,7 @@ int f2(void) { return 3; } // CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i // CIR-NEXT: cir.return %2 : !s32i -// LLVM: define i32 @f2() +// LLVM: define dso_local i32 @f2() // LLVM-NEXT: %1 = alloca i32, i64 1, align 4 // LLVM-NEXT: store i32 3, ptr %1, align 4 // LLVM-NEXT: %2 = load i32, ptr %1, align 4 diff --git a/clang/test/CIR/CodeGen/builtin-alloca.c b/clang/test/CIR/CodeGen/builtin-alloca.c index f79a9f8c9a83..3aa6b04bbeb9 100644 --- a/clang/test/CIR/CodeGen/builtin-alloca.c +++ b/clang/test/CIR/CodeGen/builtin-alloca.c @@ -17,7 +17,7 @@ void my_alloca(size_t n) // CIR: } -// LLVM: define void @my_alloca(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: define dso_local void @my_alloca(i64 [[ALLOCA_SIZE:%.*]]) // LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], // LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], // LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 @@ -36,7 +36,7 @@ void my___builtin_alloca(size_t n) // CIR: } -// LLVM: define void @my___builtin_alloca(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: define dso_local void @my___builtin_alloca(i64 [[ALLOCA_SIZE:%.*]]) // LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], // LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], // LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 @@ -55,7 +55,7 @@ void my__builtin_alloca_uninitialized(size_t n) // CIR: } -// LLVM: define void @my__builtin_alloca_uninitialized(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: define dso_local void @my__builtin_alloca_uninitialized(i64 [[ALLOCA_SIZE:%.*]]) // LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], // LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], // LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 diff --git a/clang/test/CIR/CodeGen/builtin-constant-p.c b/clang/test/CIR/CodeGen/builtin-constant-p.c index 4d6b5c9e5597..a8eb13adacfd 100644 --- a/clang/test/CIR/CodeGen/builtin-constant-p.c +++ b/clang/test/CIR/CodeGen/builtin-constant-p.c @@ -16,7 +16,7 @@ int foo() { // CIR: [[TMP5:%.*]] = cir.load [[TMP0]] : !cir.ptr, !s32i // CIR: cir.return [[TMP5]] : !s32i -// LLVM:define i32 @foo() +// LLVM:define dso_local i32 @foo() // LLVM: [[TMP1:%.*]] = alloca i32, i64 1 // LLVM: [[TMP2:%.*]] = load i32, ptr @a // LLVM: [[TMP3:%.*]] = call i1 @llvm.is.constant.i32(i32 [[TMP2]]) diff --git a/clang/test/CIR/CodeGen/builtin-floating-point.c b/clang/test/CIR/CodeGen/builtin-floating-point.c index 84bd60c06ac2..3fb9e4142834 100644 --- a/clang/test/CIR/CodeGen/builtin-floating-point.c +++ b/clang/test/CIR/CodeGen/builtin-floating-point.c @@ -10,7 +10,7 @@ long my_lroundf(float f) { // CHECK: cir.func @my_lroundf // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.float -> !s64i - // LLVM: define i64 @my_lroundf + // LLVM: define dso_local i64 @my_lroundf // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f32(float %{{.+}}) // LLVM: } } @@ -20,7 +20,7 @@ long my_lround(double f) { // CHECK: cir.func @my_lround // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.double -> !s64i - // LLVM: define i64 @my_lround + // LLVM: define dso_local i64 @my_lround // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f64(double %{{.+}}) // LLVM: } } @@ -31,7 +31,7 @@ long my_lroundl(long double f) { // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.long_double -> !s64i // AARCH64: %{{.+}} = cir.lround %{{.+}} : !cir.long_double -> !s64i - // LLVM: define i64 @my_lroundl + // LLVM: define dso_local i64 @my_lroundl // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -45,7 +45,7 @@ long call_lroundf(float f) { // CHECK: cir.func @call_lroundf // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.float -> !s64i - // LLVM: define i64 @call_lroundf + // LLVM: define dso_local i64 @call_lroundf // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f32(float %{{.+}}) // LLVM: } } @@ -55,7 +55,7 @@ long call_lround(double f) { // CHECK: cir.func @call_lround // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.double -> !s64i - // LLVM: define i64 @call_lround + // LLVM: define dso_local i64 @call_lround // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f64(double %{{.+}}) // LLVM: } } @@ -66,7 +66,7 @@ long call_lroundl(long double f) { // CHECK: %{{.+}} = cir.lround %{{.+}} : !cir.long_double -> !s64i // AARCH64: %{{.+}} = cir.lround %{{.+}} : !cir.long_double -> !s64i - // LLVM: define i64 @call_lroundl + // LLVM: define dso_local i64 @call_lroundl // LLVM: %{{.+}} = call i64 @llvm.lround.i64.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -78,7 +78,7 @@ long long my_llroundf(float f) { // CHECK: cir.func @my_llroundf // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.float -> !s64i - // LLVM: define i64 @my_llroundf + // LLVM: define dso_local i64 @my_llroundf // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f32(float %{{.+}}) // LLVM: } } @@ -88,7 +88,7 @@ long long my_llround(double f) { // CHECK: cir.func @my_llround // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.double -> !s64i - // LLVM: define i64 @my_llround + // LLVM: define dso_local i64 @my_llround // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f64(double %{{.+}}) // LLVM: } } @@ -99,7 +99,7 @@ long long my_llroundl(long double f) { // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.long_double -> !s64i // AARCH64: %{{.+}} = cir.llround %{{.+}} : !cir.long_double -> !s64i - // LLVM: define i64 @my_llroundl + // LLVM: define dso_local i64 @my_llroundl // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -113,7 +113,7 @@ long long call_llroundf(float f) { // CHECK: cir.func @call_llroundf // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.float -> !s64i - // LLVM: define i64 @call_llroundf + // LLVM: define dso_local i64 @call_llroundf // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f32(float %{{.+}}) // LLVM: } } @@ -123,7 +123,7 @@ long long call_llround(double f) { // CHECK: cir.func @call_llround // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.double -> !s64i - // LLVM: define i64 @call_llround + // LLVM: define dso_local i64 @call_llround // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f64(double %{{.+}}) // LLVM: } } @@ -134,7 +134,7 @@ long long call_llroundl(long double f) { // CHECK: %{{.+}} = cir.llround %{{.+}} : !cir.long_double -> !s64i // AARCH64: %{{.+}} = cir.llround %{{.+}} : !cir.long_double -> !s64i - // LLVM: define i64 @call_llroundl + // LLVM: define dso_local i64 @call_llroundl // LLVM: %{{.+}} = call i64 @llvm.llround.i64.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -146,7 +146,7 @@ long my_lrintf(float f) { // CHECK: cir.func @my_lrintf // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.float -> !s64i - // LLVM: define i64 @my_lrintf + // LLVM: define dso_local i64 @my_lrintf // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f32(float %{{.+}}) // LLVM: } } @@ -156,7 +156,7 @@ long my_lrint(double f) { // CHECK: cir.func @my_lrint // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.double -> !s64i - // LLVM: define i64 @my_lrint + // LLVM: define dso_local i64 @my_lrint // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f64(double %{{.+}}) // LLVM: } } @@ -167,7 +167,7 @@ long my_lrintl(long double f) { // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.long_double -> !s64i // AARCH64: %{{.+}} = cir.lrint %{{.+}} : !cir.long_double -> !s64i - // LLVM: define i64 @my_lrintl + // LLVM: define dso_local i64 @my_lrintl // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -181,7 +181,7 @@ long call_lrintf(float f) { // CHECK: cir.func @call_lrintf // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.float -> !s64i - // LLVM: define i64 @call_lrintf + // LLVM: define dso_local i64 @call_lrintf // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f32(float %{{.+}}) // LLVM: } } @@ -191,7 +191,7 @@ long call_lrint(double f) { // CHECK: cir.func @call_lrint // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.double -> !s64i - // LLVM: define i64 @call_lrint + // LLVM: define dso_local i64 @call_lrint // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f64(double %{{.+}}) // LLVM: } } @@ -202,7 +202,7 @@ long call_lrintl(long double f) { // CHECK: %{{.+}} = cir.lrint %{{.+}} : !cir.long_double -> !s64i // AARCH64: %{{.+}} = cir.lrint %{{.+}} : !cir.long_double -> !s64i - // LLVM: define i64 @call_lrintl + // LLVM: define dso_local i64 @call_lrintl // LLVM: %{{.+}} = call i64 @llvm.lrint.i64.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -214,7 +214,7 @@ long long my_llrintf(float f) { // CHECK: cir.func @my_llrintf // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.float -> !s64i - // LLVM: define i64 @my_llrintf + // LLVM: define dso_local i64 @my_llrintf // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f32(float %{{.+}}) // LLVM: } } @@ -224,7 +224,7 @@ long long my_llrint(double f) { // CHECK: cir.func @my_llrint // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.double -> !s64i - // LLVM: define i64 @my_llrint + // LLVM: define dso_local i64 @my_llrint // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f64(double %{{.+}}) // LLVM: } } @@ -235,7 +235,7 @@ long long my_llrintl(long double f) { // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.long_double -> !s64i // AARCH64: %{{.+}} = cir.llrint %{{.+}} : !cir.long_double -> !s64i - // LLVM: define i64 @my_llrintl + // LLVM: define dso_local i64 @my_llrintl // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -249,7 +249,7 @@ long long call_llrintf(float f) { // CHECK: cir.func @call_llrintf // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.float -> !s64i - // LLVM: define i64 @call_llrintf + // LLVM: define dso_local i64 @call_llrintf // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f32(float %{{.+}}) // LLVM: } } @@ -259,7 +259,7 @@ long long call_llrint(double f) { // CHECK: cir.func @call_llrint // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.double -> !s64i - // LLVM: define i64 @call_llrint + // LLVM: define dso_local i64 @call_llrint // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f64(double %{{.+}}) // LLVM: } } @@ -270,7 +270,7 @@ long long call_llrintl(long double f) { // CHECK: %{{.+}} = cir.llrint %{{.+}} : !cir.long_double -> !s64i // AARCH64: %{{.+}} = cir.llrint %{{.+}} : !cir.long_double -> !s64i - // LLVM: define i64 @call_llrintl + // LLVM: define dso_local i64 @call_llrintl // LLVM: %{{.+}} = call i64 @llvm.llrint.i64.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -282,7 +282,7 @@ float my_ceilf(float f) { // CHECK: cir.func @my_ceilf // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.float - // LLVM: define float @my_ceilf(float %0) + // LLVM: define dso_local float @my_ceilf(float %0) // LLVM: %{{.+}} = call float @llvm.ceil.f32(float %{{.+}}) // LLVM: } } @@ -292,7 +292,7 @@ double my_ceil(double f) { // CHECK: cir.func @my_ceil // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.double - // LLVM: define double @my_ceil(double %0) + // LLVM: define dso_local double @my_ceil(double %0) // LLVM: %{{.+}} = call double @llvm.ceil.f64(double %{{.+}}) // LLVM: } } @@ -303,7 +303,7 @@ long double my_ceill(long double f) { // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.ceil {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_ceill(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_ceill(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.ceil.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -317,7 +317,7 @@ float call_ceilf(float f) { // CHECK: cir.func @call_ceilf // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.float - // LLVM: define float @call_ceilf(float %0) + // LLVM: define dso_local float @call_ceilf(float %0) // LLVM: %{{.+}} = call float @llvm.ceil.f32(float %{{.+}}) // LLVM: } } @@ -327,7 +327,7 @@ double call_ceil(double f) { // CHECK: cir.func @call_ceil // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.double - // LLVM: define double @call_ceil(double %0) + // LLVM: define dso_local double @call_ceil(double %0) // LLVM: %{{.+}} = call double @llvm.ceil.f64(double %{{.+}}) // LLVM: } } @@ -338,7 +338,7 @@ long double call_ceill(long double f) { // CHECK: {{.+}} = cir.ceil {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.ceil {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_ceill(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_ceill(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.ceil.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -350,7 +350,7 @@ float my_cosf(float f) { // CHECK: cir.func @my_cosf // CHECK: {{.+}} = cir.cos {{.+}} : !cir.float - // LLVM: define float @my_cosf(float %0) + // LLVM: define dso_local float @my_cosf(float %0) // LLVM: %{{.+}} = call float @llvm.cos.f32(float %{{.+}}) // LLVM: } } @@ -360,7 +360,7 @@ double my_cos(double f) { // CHECK: cir.func @my_cos // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double - // LLVM: define double @my_cos(double %0) + // LLVM: define dso_local double @my_cos(double %0) // LLVM: %{{.+}} = call double @llvm.cos.f64(double %{{.+}}) // LLVM: } } @@ -371,7 +371,7 @@ long double my_cosl(long double f) { // CHECK: {{.+}} = cir.cos {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.cos {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_cosl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_cosl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.cos.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -385,7 +385,7 @@ float call_cosf(float f) { // CHECK: cir.func @call_cosf // CHECK: {{.+}} = cir.cos {{.+}} : !cir.float - // LLVM: define float @call_cosf(float %0) + // LLVM: define dso_local float @call_cosf(float %0) // LLVM: %{{.+}} = call float @llvm.cos.f32(float %{{.+}}) // LLVM: } } @@ -395,7 +395,7 @@ double call_cos(double f) { // CHECK: cir.func @call_cos // CHECK: {{.+}} = cir.cos {{.+}} : !cir.double - // LLVM: define double @call_cos(double %0) + // LLVM: define dso_local double @call_cos(double %0) // LLVM: %{{.+}} = call double @llvm.cos.f64(double %{{.+}}) // LLVM: } } @@ -406,7 +406,7 @@ long double call_cosl(long double f) { // CHECK: {{.+}} = cir.cos {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.cos {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_cosl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_cosl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.cos.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -418,7 +418,7 @@ float my_expf(float f) { // CHECK: cir.func @my_expf // CHECK: {{.+}} = cir.exp {{.+}} : !cir.float - // LLVM: define float @my_expf(float %0) + // LLVM: define dso_local float @my_expf(float %0) // LLVM: %{{.+}} = call float @llvm.exp.f32(float %{{.+}}) // LLVM: } } @@ -428,7 +428,7 @@ double my_exp(double f) { // CHECK: cir.func @my_exp // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double - // LLVM: define double @my_exp(double %0) + // LLVM: define dso_local double @my_exp(double %0) // LLVM: %{{.+}} = call double @llvm.exp.f64(double %{{.+}}) // LLVM: } } @@ -439,7 +439,7 @@ long double my_expl(long double f) { // CHECK: {{.+}} = cir.exp {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.exp {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_expl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_expl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.exp.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -453,7 +453,7 @@ float call_expf(float f) { // CHECK: cir.func @call_expf // CHECK: {{.+}} = cir.exp {{.+}} : !cir.float - // LLVM: define float @call_expf(float %0) + // LLVM: define dso_local float @call_expf(float %0) // LLVM: %{{.+}} = call float @llvm.exp.f32(float %{{.+}}) // LLVM: } } @@ -463,7 +463,7 @@ double call_exp(double f) { // CHECK: cir.func @call_exp // CHECK: {{.+}} = cir.exp {{.+}} : !cir.double - // LLVM: define double @call_exp(double %0) + // LLVM: define dso_local double @call_exp(double %0) // LLVM: %{{.+}} = call double @llvm.exp.f64(double %{{.+}}) // LLVM: } } @@ -474,7 +474,7 @@ long double call_expl(long double f) { // CHECK: {{.+}} = cir.exp {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.exp {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_expl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_expl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.exp.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -486,7 +486,7 @@ float my_exp2f(float f) { // CHECK: cir.func @my_exp2f // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.float - // LLVM: define float @my_exp2f(float %0) + // LLVM: define dso_local float @my_exp2f(float %0) // LLVM: %{{.+}} = call float @llvm.exp2.f32(float %{{.+}}) // LLVM: } } @@ -496,7 +496,7 @@ double my_exp2(double f) { // CHECK: cir.func @my_exp2 // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double - // LLVM: define double @my_exp2(double %0) + // LLVM: define dso_local double @my_exp2(double %0) // LLVM: %{{.+}} = call double @llvm.exp2.f64(double %{{.+}}) // LLVM: } } @@ -507,7 +507,7 @@ long double my_exp2l(long double f) { // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.exp2 {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_exp2l(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_exp2l(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.exp2.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -521,7 +521,7 @@ float call_exp2f(float f) { // CHECK: cir.func @call_exp2f // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.float - // LLVM: define float @call_exp2f(float %0) + // LLVM: define dso_local float @call_exp2f(float %0) // LLVM: %{{.+}} = call float @llvm.exp2.f32(float %{{.+}}) // LLVM: } } @@ -531,7 +531,7 @@ double call_exp2(double f) { // CHECK: cir.func @call_exp2 // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.double - // LLVM: define double @call_exp2(double %0) + // LLVM: define dso_local double @call_exp2(double %0) // LLVM: %{{.+}} = call double @llvm.exp2.f64(double %{{.+}}) // LLVM: } } @@ -542,7 +542,7 @@ long double call_exp2l(long double f) { // CHECK: {{.+}} = cir.exp2 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.exp2 {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_exp2l(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_exp2l(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.exp2.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -554,7 +554,7 @@ float my_floorf(float f) { // CHECK: cir.func @my_floorf // CHECK: {{.+}} = cir.floor {{.+}} : !cir.float - // LLVM: define float @my_floorf(float %0) + // LLVM: define dso_local float @my_floorf(float %0) // LLVM: %{{.+}} = call float @llvm.floor.f32(float %{{.+}}) // LLVM: } } @@ -564,7 +564,7 @@ double my_floor(double f) { // CHECK: cir.func @my_floor // CHECK: {{.+}} = cir.floor {{.+}} : !cir.double - // LLVM: define double @my_floor(double %0) + // LLVM: define dso_local double @my_floor(double %0) // LLVM: %{{.+}} = call double @llvm.floor.f64(double %{{.+}}) // LLVM: } } @@ -575,7 +575,7 @@ long double my_floorl(long double f) { // CHECK: {{.+}} = cir.floor {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.floor {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_floorl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_floorl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.floor.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -589,7 +589,7 @@ float call_floorf(float f) { // CHECK: cir.func @call_floorf // CHECK: {{.+}} = cir.floor {{.+}} : !cir.float - // LLVM: define float @call_floorf(float %0) + // LLVM: define dso_local float @call_floorf(float %0) // LLVM: %{{.+}} = call float @llvm.floor.f32(float %{{.+}}) // LLVM: } } @@ -599,7 +599,7 @@ double call_floor(double f) { // CHECK: cir.func @call_floor // CHECK: {{.+}} = cir.floor {{.+}} : !cir.double - // LLVM: define double @call_floor(double %0) + // LLVM: define dso_local double @call_floor(double %0) // LLVM: %{{.+}} = call double @llvm.floor.f64(double %{{.+}}) // LLVM: } } @@ -610,7 +610,7 @@ long double call_floorl(long double f) { // CHECK: {{.+}} = cir.floor {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.floor {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_floorl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_floorl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.floor.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -622,7 +622,7 @@ float my_logf(float f) { // CHECK: cir.func @my_logf // CHECK: {{.+}} = cir.log {{.+}} : !cir.float - // LLVM: define float @my_logf(float %0) + // LLVM: define dso_local float @my_logf(float %0) // LLVM: %{{.+}} = call float @llvm.log.f32(float %{{.+}}) // LLVM: } } @@ -632,7 +632,7 @@ double my_log(double f) { // CHECK: cir.func @my_log // CHECK: {{.+}} = cir.log {{.+}} : !cir.double - // LLVM: define double @my_log(double %0) + // LLVM: define dso_local double @my_log(double %0) // LLVM: %{{.+}} = call double @llvm.log.f64(double %{{.+}}) // LLVM: } } @@ -643,7 +643,7 @@ long double my_logl(long double f) { // CHECK: {{.+}} = cir.log {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_logl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_logl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.log.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -657,7 +657,7 @@ float call_logf(float f) { // CHECK: cir.func @call_logf // CHECK: {{.+}} = cir.log {{.+}} : !cir.float - // LLVM: define float @call_logf(float %0) + // LLVM: define dso_local float @call_logf(float %0) // LLVM: %{{.+}} = call float @llvm.log.f32(float %{{.+}}) // LLVM: } } @@ -667,7 +667,7 @@ double call_log(double f) { // CHECK: cir.func @call_log // CHECK: {{.+}} = cir.log {{.+}} : !cir.double - // LLVM: define double @call_log(double %0) + // LLVM: define dso_local double @call_log(double %0) // LLVM: %{{.+}} = call double @llvm.log.f64(double %{{.+}}) // LLVM: } } @@ -678,7 +678,7 @@ long double call_logl(long double f) { // CHECK: {{.+}} = cir.log {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_logl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_logl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.log.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -690,7 +690,7 @@ float my_log10f(float f) { // CHECK: cir.func @my_log10f // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.float - // LLVM: define float @my_log10f(float %0) + // LLVM: define dso_local float @my_log10f(float %0) // LLVM: %{{.+}} = call float @llvm.log10.f32(float %{{.+}}) // LLVM: } } @@ -700,7 +700,7 @@ double my_log10(double f) { // CHECK: cir.func @my_log10 // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double - // LLVM: define double @my_log10(double %0) + // LLVM: define dso_local double @my_log10(double %0) // LLVM: %{{.+}} = call double @llvm.log10.f64(double %{{.+}}) // LLVM: } } @@ -711,7 +711,7 @@ long double my_log10l(long double f) { // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log10 {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_log10l(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_log10l(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.log10.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -725,7 +725,7 @@ float call_log10f(float f) { // CHECK: cir.func @call_log10f // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.float - // LLVM: define float @call_log10f(float %0) + // LLVM: define dso_local float @call_log10f(float %0) // LLVM: %{{.+}} = call float @llvm.log10.f32(float %{{.+}}) // LLVM: } } @@ -735,7 +735,7 @@ double call_log10(double f) { // CHECK: cir.func @call_log10 // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.double - // LLVM: define double @call_log10(double %0) + // LLVM: define dso_local double @call_log10(double %0) // LLVM: %{{.+}} = call double @llvm.log10.f64(double %{{.+}}) // LLVM: } } @@ -746,7 +746,7 @@ long double call_log10l(long double f) { // CHECK: {{.+}} = cir.log10 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log10 {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_log10l(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_log10l(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.log10.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -758,7 +758,7 @@ float my_log2f(float f) { // CHECK: cir.func @my_log2f // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.float - // LLVM: define float @my_log2f(float %0) + // LLVM: define dso_local float @my_log2f(float %0) // LLVM: %{{.+}} = call float @llvm.log2.f32(float %{{.+}}) // LLVM: } } @@ -768,7 +768,7 @@ double my_log2(double f) { // CHECK: cir.func @my_log2 // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double - // LLVM: define double @my_log2(double %0) + // LLVM: define dso_local double @my_log2(double %0) // LLVM: %{{.+}} = call double @llvm.log2.f64(double %{{.+}}) // LLVM: } } @@ -779,7 +779,7 @@ long double my_log2l(long double f) { // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log2 {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_log2l(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_log2l(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.log2.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -793,7 +793,7 @@ float call_log2f(float f) { // CHECK: cir.func @call_log2f // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.float - // LLVM: define float @call_log2f(float %0) + // LLVM: define dso_local float @call_log2f(float %0) // LLVM: %{{.+}} = call float @llvm.log2.f32(float %{{.+}}) // LLVM: } } @@ -803,7 +803,7 @@ double call_log2(double f) { // CHECK: cir.func @call_log2 // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.double - // LLVM: define double @call_log2(double %0) + // LLVM: define dso_local double @call_log2(double %0) // LLVM: %{{.+}} = call double @llvm.log2.f64(double %{{.+}}) // LLVM: } } @@ -814,7 +814,7 @@ long double call_log2l(long double f) { // CHECK: {{.+}} = cir.log2 {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.log2 {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_log2l(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_log2l(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.log2.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -826,7 +826,7 @@ float my_nearbyintf(float f) { // CHECK: cir.func @my_nearbyintf // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.float - // LLVM: define float @my_nearbyintf(float %0) + // LLVM: define dso_local float @my_nearbyintf(float %0) // LLVM: %{{.+}} = call float @llvm.nearbyint.f32(float %{{.+}}) // LLVM: } } @@ -836,7 +836,7 @@ double my_nearbyint(double f) { // CHECK: cir.func @my_nearbyint // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.double - // LLVM: define double @my_nearbyint(double %0) + // LLVM: define dso_local double @my_nearbyint(double %0) // LLVM: %{{.+}} = call double @llvm.nearbyint.f64(double %{{.+}}) // LLVM: } } @@ -847,7 +847,7 @@ long double my_nearbyintl(long double f) { // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_nearbyintl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_nearbyintl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.nearbyint.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -861,7 +861,7 @@ float call_nearbyintf(float f) { // CHECK: cir.func @call_nearbyintf // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.float - // LLVM: define float @call_nearbyintf(float %0) + // LLVM: define dso_local float @call_nearbyintf(float %0) // LLVM: %{{.+}} = call float @llvm.nearbyint.f32(float %{{.+}}) // LLVM: } } @@ -871,7 +871,7 @@ double call_nearbyint(double f) { // CHECK: cir.func @call_nearbyint // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.double - // LLVM: define double @call_nearbyint(double %0) + // LLVM: define dso_local double @call_nearbyint(double %0) // LLVM: %{{.+}} = call double @llvm.nearbyint.f64(double %{{.+}}) // LLVM: } } @@ -882,7 +882,7 @@ long double call_nearbyintl(long double f) { // CHECK: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.nearbyint {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_nearbyintl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_nearbyintl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.nearbyint.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -894,7 +894,7 @@ float my_rintf(float f) { // CHECK: cir.func @my_rintf // CHECK: {{.+}} = cir.rint {{.+}} : !cir.float - // LLVM: define float @my_rintf(float %0) + // LLVM: define dso_local float @my_rintf(float %0) // LLVM: %{{.+}} = call float @llvm.rint.f32(float %{{.+}}) // LLVM: } } @@ -904,7 +904,7 @@ double my_rint(double f) { // CHECK: cir.func @my_rint // CHECK: {{.+}} = cir.rint {{.+}} : !cir.double - // LLVM: define double @my_rint(double %0) + // LLVM: define dso_local double @my_rint(double %0) // LLVM: %{{.+}} = call double @llvm.rint.f64(double %{{.+}}) // LLVM: } } @@ -915,7 +915,7 @@ long double my_rintl(long double f) { // CHECK: {{.+}} = cir.rint {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.rint {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_rintl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_rintl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.rint.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -929,7 +929,7 @@ float call_rintf(float f) { // CHECK: cir.func @call_rintf // CHECK: {{.+}} = cir.rint {{.+}} : !cir.float - // LLVM: define float @call_rintf(float %0) + // LLVM: define dso_local float @call_rintf(float %0) // LLVM: %{{.+}} = call float @llvm.rint.f32(float %{{.+}}) // LLVM: } } @@ -939,7 +939,7 @@ double call_rint(double f) { // CHECK: cir.func @call_rint // CHECK: {{.+}} = cir.rint {{.+}} : !cir.double - // LLVM: define double @call_rint(double %0) + // LLVM: define dso_local double @call_rint(double %0) // LLVM: %{{.+}} = call double @llvm.rint.f64(double %{{.+}}) // LLVM: } } @@ -950,7 +950,7 @@ long double call_rintl(long double f) { // CHECK: {{.+}} = cir.rint {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.rint {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_rintl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_rintl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.rint.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -962,7 +962,7 @@ float my_roundf(float f) { // CHECK: cir.func @my_roundf // CHECK: {{.+}} = cir.round {{.+}} : !cir.float - // LLVM: define float @my_roundf(float %0) + // LLVM: define dso_local float @my_roundf(float %0) // LLVM: %{{.+}} = call float @llvm.round.f32(float %{{.+}}) // LLVM: } } @@ -972,7 +972,7 @@ double my_round(double f) { // CHECK: cir.func @my_round // CHECK: {{.+}} = cir.round {{.+}} : !cir.double - // LLVM: define double @my_round(double %0) + // LLVM: define dso_local double @my_round(double %0) // LLVM: %{{.+}} = call double @llvm.round.f64(double %{{.+}}) // LLVM: } } @@ -983,7 +983,7 @@ long double my_roundl(long double f) { // CHECK: {{.+}} = cir.round {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.round {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_roundl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_roundl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.round.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -997,7 +997,7 @@ float call_roundf(float f) { // CHECK: cir.func @call_roundf // CHECK: {{.+}} = cir.round {{.+}} : !cir.float - // LLVM: define float @call_roundf(float %0) + // LLVM: define dso_local float @call_roundf(float %0) // LLVM: %{{.+}} = call float @llvm.round.f32(float %{{.+}}) // LLVM: } } @@ -1007,7 +1007,7 @@ double call_round(double f) { // CHECK: cir.func @call_round // CHECK: {{.+}} = cir.round {{.+}} : !cir.double - // LLVM: define double @call_round(double %0) + // LLVM: define dso_local double @call_round(double %0) // LLVM: %{{.+}} = call double @llvm.round.f64(double %{{.+}}) // LLVM: } } @@ -1018,7 +1018,7 @@ long double call_roundl(long double f) { // CHECK: {{.+}} = cir.round {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.round {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_roundl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_roundl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.round.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1030,7 +1030,7 @@ float my_sinf(float f) { // CHECK: cir.func @my_sinf // CHECK: {{.+}} = cir.sin {{.+}} : !cir.float - // LLVM: define float @my_sinf(float %0) + // LLVM: define dso_local float @my_sinf(float %0) // LLVM: %{{.+}} = call float @llvm.sin.f32(float %{{.+}}) // LLVM: } } @@ -1040,7 +1040,7 @@ double my_sin(double f) { // CHECK: cir.func @my_sin // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double - // LLVM: define double @my_sin(double %0) + // LLVM: define dso_local double @my_sin(double %0) // LLVM: %{{.+}} = call double @llvm.sin.f64(double %{{.+}}) // LLVM: } } @@ -1051,7 +1051,7 @@ long double my_sinl(long double f) { // CHECK: {{.+}} = cir.sin {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.sin {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_sinl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_sinl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.sin.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1065,7 +1065,7 @@ float call_sinf(float f) { // CHECK: cir.func @call_sinf // CHECK: {{.+}} = cir.sin {{.+}} : !cir.float - // LLVM: define float @call_sinf(float %0) + // LLVM: define dso_local float @call_sinf(float %0) // LLVM: %{{.+}} = call float @llvm.sin.f32(float %{{.+}}) // LLVM: } } @@ -1075,7 +1075,7 @@ double call_sin(double f) { // CHECK: cir.func @call_sin // CHECK: {{.+}} = cir.sin {{.+}} : !cir.double - // LLVM: define double @call_sin(double %0) + // LLVM: define dso_local double @call_sin(double %0) // LLVM: %{{.+}} = call double @llvm.sin.f64(double %{{.+}}) // LLVM: } } @@ -1086,7 +1086,7 @@ long double call_sinl(long double f) { // CHECK: {{.+}} = cir.sin {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.sin {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_sinl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_sinl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.sin.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1098,7 +1098,7 @@ float my_sqrtf(float f) { // CHECK: cir.func @my_sqrtf // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.float - // LLVM: define float @my_sqrtf(float %0) + // LLVM: define dso_local float @my_sqrtf(float %0) // LLVM: %{{.+}} = call float @llvm.sqrt.f32(float %{{.+}}) // LLVM: } } @@ -1108,7 +1108,7 @@ double my_sqrt(double f) { // CHECK: cir.func @my_sqrt // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double - // LLVM: define double @my_sqrt(double %0) + // LLVM: define dso_local double @my_sqrt(double %0) // LLVM: %{{.+}} = call double @llvm.sqrt.f64(double %{{.+}}) // LLVM: } } @@ -1119,7 +1119,7 @@ long double my_sqrtl(long double f) { // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.sqrt {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_sqrtl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_sqrtl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.sqrt.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1133,7 +1133,7 @@ float call_sqrtf(float f) { // CHECK: cir.func @call_sqrtf // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.float - // LLVM: define float @call_sqrtf(float %0) + // LLVM: define dso_local float @call_sqrtf(float %0) // LLVM: %{{.+}} = call float @llvm.sqrt.f32(float %{{.+}}) // LLVM: } } @@ -1143,7 +1143,7 @@ double call_sqrt(double f) { // CHECK: cir.func @call_sqrt // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.double - // LLVM: define double @call_sqrt(double %0) + // LLVM: define dso_local double @call_sqrt(double %0) // LLVM: %{{.+}} = call double @llvm.sqrt.f64(double %{{.+}}) // LLVM: } } @@ -1154,7 +1154,7 @@ long double call_sqrtl(long double f) { // CHECK: {{.+}} = cir.sqrt {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.sqrt {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_sqrtl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_sqrtl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.sqrt.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1166,7 +1166,7 @@ float my_truncf(float f) { // CHECK: cir.func @my_truncf // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.float - // LLVM: define float @my_truncf(float %0) + // LLVM: define dso_local float @my_truncf(float %0) // LLVM: %{{.+}} = call float @llvm.trunc.f32(float %{{.+}}) // LLVM: } } @@ -1176,7 +1176,7 @@ double my_trunc(double f) { // CHECK: cir.func @my_trunc // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.double - // LLVM: define double @my_trunc(double %0) + // LLVM: define dso_local double @my_trunc(double %0) // LLVM: %{{.+}} = call double @llvm.trunc.f64(double %{{.+}}) // LLVM: } } @@ -1187,7 +1187,7 @@ long double my_truncl(long double f) { // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.trunc {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_truncl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @my_truncl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.trunc.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1201,7 +1201,7 @@ float call_truncf(float f) { // CHECK: cir.func @call_truncf // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.float - // LLVM: define float @call_truncf(float %0) + // LLVM: define dso_local float @call_truncf(float %0) // LLVM: %{{.+}} = call float @llvm.trunc.f32(float %{{.+}}) // LLVM: } } @@ -1211,7 +1211,7 @@ double call_trunc(double f) { // CHECK: cir.func @call_trunc // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.double - // LLVM: define double @call_trunc(double %0) + // LLVM: define dso_local double @call_trunc(double %0) // LLVM: %{{.+}} = call double @llvm.trunc.f64(double %{{.+}}) // LLVM: } } @@ -1222,7 +1222,7 @@ long double call_truncl(long double f) { // CHECK: {{.+}} = cir.trunc {{.+}} : !cir.long_double // AARCH64: {{.+}} = cir.trunc {{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_truncl(x86_fp80 %0) + // LLVM: define dso_local x86_fp80 @call_truncl(x86_fp80 %0) // LLVM: %{{.+}} = call x86_fp80 @llvm.trunc.f80(x86_fp80 %{{.+}}) // LLVM: } } @@ -1234,7 +1234,7 @@ float my_copysignf(float x, float y) { // CHECK: cir.func @my_copysignf // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.float - // LLVM: define float @my_copysignf + // LLVM: define dso_local float @my_copysignf // LLVM: %{{.+}} = call float @llvm.copysign.f32(float %{{.+}}, float %{{.+}}) // LLVM: } } @@ -1244,7 +1244,7 @@ double my_copysign(double x, double y) { // CHECK: cir.func @my_copysign // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.double - // LLVM: define double @my_copysign + // LLVM: define dso_local double @my_copysign // LLVM: %{{.+}} = call double @llvm.copysign.f64(double %{{.+}}, double %{{.+}}) // LLVM: } } @@ -1255,7 +1255,7 @@ long double my_copysignl(long double x, long double y) { // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_copysignl + // LLVM: define dso_local x86_fp80 @my_copysignl // LLVM: %{{.+}} = call x86_fp80 @llvm.copysign.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } } @@ -1269,7 +1269,7 @@ float call_copysignf(float x, float y) { // CHECK: cir.func @call_copysignf // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.float - // LLVM: define float @call_copysignf + // LLVM: define dso_local float @call_copysignf // LLVM: %{{.+}} = call float @llvm.copysign.f32(float %{{.+}}, float %{{.+}}) // LLVM: } } @@ -1279,7 +1279,7 @@ double call_copysign(double x, double y) { // CHECK: cir.func @call_copysign // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.double - // LLVM: define double @call_copysign + // LLVM: define dso_local double @call_copysign // LLVM: %{{.+}} = call double @llvm.copysign.f64(double %{{.+}}, double %{{.+}}) // LLVM: } } @@ -1290,7 +1290,7 @@ long double call_copysignl(long double x, long double y) { // CHECK: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.copysign %{{.+}}, %{{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_copysignl + // LLVM: define dso_local x86_fp80 @call_copysignl // LLVM: %{{.+}} = call x86_fp80 @llvm.copysign.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } } @@ -1302,7 +1302,7 @@ float my_fmaxf(float x, float y) { // CHECK: cir.func @my_fmaxf // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.float - // LLVM: define float @my_fmaxf + // LLVM: define dso_local float @my_fmaxf // LLVM: %{{.+}} = call float @llvm.maxnum.f32(float %{{.+}}, float %{{.+}}) // LLVM: } } @@ -1312,7 +1312,7 @@ double my_fmax(double x, double y) { // CHECK: cir.func @my_fmax // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.double - // LLVM: define double @my_fmax + // LLVM: define dso_local double @my_fmax // LLVM: %{{.+}} = call double @llvm.maxnum.f64(double %{{.+}}, double %{{.+}}) // LLVM: } } @@ -1323,7 +1323,7 @@ long double my_fmaxl(long double x, long double y) { // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_fmaxl + // LLVM: define dso_local x86_fp80 @my_fmaxl // LLVM: %{{.+}} = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } } @@ -1337,7 +1337,7 @@ float call_fmaxf(float x, float y) { // CHECK: cir.func @call_fmaxf // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.float - // LLVM: define float @call_fmaxf + // LLVM: define dso_local float @call_fmaxf // LLVM: %{{.+}} = call float @llvm.maxnum.f32(float %{{.+}}, float %{{.+}}) // LLVM: } } @@ -1347,7 +1347,7 @@ double call_fmax(double x, double y) { // CHECK: cir.func @call_fmax // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.double - // LLVM: define double @call_fmax + // LLVM: define dso_local double @call_fmax // LLVM: %{{.+}} = call double @llvm.maxnum.f64(double %{{.+}}, double %{{.+}}) // LLVM: } } @@ -1358,7 +1358,7 @@ long double call_fmaxl(long double x, long double y) { // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_fmaxl + // LLVM: define dso_local x86_fp80 @call_fmaxl // LLVM: %{{.+}} = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } } @@ -1370,7 +1370,7 @@ float my_fminf(float x, float y) { // CHECK: cir.func @my_fminf // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.float - // LLVM: define float @my_fminf + // LLVM: define dso_local float @my_fminf // LLVM: %{{.+}} = call float @llvm.minnum.f32(float %{{.+}}, float %{{.+}}) // LLVM: } } @@ -1380,7 +1380,7 @@ double my_fmin(double x, double y) { // CHECK: cir.func @my_fmin // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.double - // LLVM: define double @my_fmin + // LLVM: define dso_local double @my_fmin // LLVM: %{{.+}} = call double @llvm.minnum.f64(double %{{.+}}, double %{{.+}}) // LLVM: } } @@ -1391,7 +1391,7 @@ long double my_fminl(long double x, long double y) { // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_fminl + // LLVM: define dso_local x86_fp80 @my_fminl // LLVM: %{{.+}} = call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } } @@ -1405,7 +1405,7 @@ float call_fminf(float x, float y) { // CHECK: cir.func @call_fminf // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.float - // LLVM: define float @call_fminf + // LLVM: define dso_local float @call_fminf // LLVM: %{{.+}} = call float @llvm.minnum.f32(float %{{.+}}, float %{{.+}}) // LLVM: } } @@ -1415,7 +1415,7 @@ double call_fmin(double x, double y) { // CHECK: cir.func @call_fmin // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.double - // LLVM: define double @call_fmin + // LLVM: define dso_local double @call_fmin // LLVM: %{{.+}} = call double @llvm.minnum.f64(double %{{.+}}, double %{{.+}}) // LLVM: } } @@ -1426,7 +1426,7 @@ long double call_fminl(long double x, long double y) { // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_fminl + // LLVM: define dso_local x86_fp80 @call_fminl // LLVM: %{{.+}} = call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } } @@ -1438,7 +1438,7 @@ float my_fmodf(float x, float y) { // CHECK: cir.func @my_fmodf // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.float - // LLVM: define float @my_fmodf + // LLVM: define dso_local float @my_fmodf // LLVM: %{{.+}} = frem float %{{.+}}, %{{.+}} // LLVM: } } @@ -1448,7 +1448,7 @@ double my_fmod(double x, double y) { // CHECK: cir.func @my_fmod // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.double - // LLVM: define double @my_fmod + // LLVM: define dso_local double @my_fmod // LLVM: %{{.+}} = frem double %{{.+}}, %{{.+}} // LLVM: } } @@ -1459,7 +1459,7 @@ long double my_fmodl(long double x, long double y) { // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_fmodl + // LLVM: define dso_local x86_fp80 @my_fmodl // LLVM: %{{.+}} = frem x86_fp80 %{{.+}}, %{{.+}} // LLVM: } } @@ -1473,7 +1473,7 @@ float call_fmodf(float x, float y) { // CHECK: cir.func @call_fmodf // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.float - // LLVM: define float @call_fmodf + // LLVM: define dso_local float @call_fmodf // LLVM: %{{.+}} = frem float %{{.+}}, %{{.+}} // LLVM: } } @@ -1483,7 +1483,7 @@ double call_fmod(double x, double y) { // CHECK: cir.func @call_fmod // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.double - // LLVM: define double @call_fmod + // LLVM: define dso_local double @call_fmod // LLVM: %{{.+}} = frem double %{{.+}}, %{{.+}} // LLVM: } } @@ -1494,7 +1494,7 @@ long double call_fmodl(long double x, long double y) { // CHECK: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.fmod %{{.+}}, %{{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_fmodl + // LLVM: define dso_local x86_fp80 @call_fmodl // LLVM: %{{.+}} = frem x86_fp80 %{{.+}}, %{{.+}} // LLVM: } } @@ -1506,7 +1506,7 @@ float my_powf(float x, float y) { // CHECK: cir.func @my_powf // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.float - // LLVM: define float @my_powf + // LLVM: define dso_local float @my_powf // LLVM: %{{.+}} = call float @llvm.pow.f32(float %{{.+}}, float %{{.+}}) // LLVM: } } @@ -1516,7 +1516,7 @@ double my_pow(double x, double y) { // CHECK: cir.func @my_pow // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.double - // LLVM: define double @my_pow + // LLVM: define dso_local double @my_pow // LLVM: %{{.+}} = call double @llvm.pow.f64(double %{{.+}}, double %{{.+}}) // LLVM: } } @@ -1527,7 +1527,7 @@ long double my_powl(long double x, long double y) { // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double - // LLVM: define x86_fp80 @my_powl + // LLVM: define dso_local x86_fp80 @my_powl // LLVM: %{{.+}} = call x86_fp80 @llvm.pow.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } } @@ -1541,7 +1541,7 @@ float call_powf(float x, float y) { // CHECK: cir.func @call_powf // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.float - // LLVM: define float @call_powf + // LLVM: define dso_local float @call_powf // LLVM: %{{.+}} = call float @llvm.pow.f32(float %{{.+}}, float %{{.+}}) // LLVM: } } @@ -1551,7 +1551,7 @@ double call_pow(double x, double y) { // CHECK: cir.func @call_pow // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.double - // LLVM: define double @call_pow + // LLVM: define dso_local double @call_pow // LLVM: %{{.+}} = call double @llvm.pow.f64(double %{{.+}}, double %{{.+}}) // LLVM: } } @@ -1562,7 +1562,7 @@ long double call_powl(long double x, long double y) { // CHECK: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double // AARCH64: %{{.+}} = cir.pow %{{.+}}, %{{.+}} : !cir.long_double - // LLVM: define x86_fp80 @call_powl + // LLVM: define dso_local x86_fp80 @call_powl // LLVM: %{{.+}} = call x86_fp80 @llvm.pow.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } } diff --git a/clang/test/CIR/CodeGen/builtin-ms-alloca.c b/clang/test/CIR/CodeGen/builtin-ms-alloca.c index 2a3176955bc0..baec3072d58d 100644 --- a/clang/test/CIR/CodeGen/builtin-ms-alloca.c +++ b/clang/test/CIR/CodeGen/builtin-ms-alloca.c @@ -16,7 +16,7 @@ void my_win_alloca(size_t n) // CIR: } -// LLVM: define void @my_win_alloca(i64 [[ALLOCA_SIZE:%.*]]) +// LLVM: define dso_local void @my_win_alloca(i64 [[ALLOCA_SIZE:%.*]]) // LLVM: store i64 [[ALLOCA_SIZE]], ptr [[LOCAL_VAR_ALLOCA_SIZE:%.*]], // LLVM: [[TMP_ALLOCA_SIZE:%.*]] = load i64, ptr [[LOCAL_VAR_ALLOCA_SIZE]], // LLVM: [[ALLOCA_RES:%.*]] = alloca i8, i64 [[TMP_ALLOCA_SIZE]], align 16 diff --git a/clang/test/CIR/CodeGen/builtin-prefetch.c b/clang/test/CIR/CodeGen/builtin-prefetch.c index fb84a1204892..56ac9a70ddb4 100644 --- a/clang/test/CIR/CodeGen/builtin-prefetch.c +++ b/clang/test/CIR/CodeGen/builtin-prefetch.c @@ -12,7 +12,7 @@ void foo(void *a) { // CIR: cir.prefetch([[PTR]] : !cir.ptr) locality(1) write // CIR: cir.return -// LLVM: define void @foo(ptr [[ARG0:%.*]]) +// LLVM: define dso_local void @foo(ptr [[ARG0:%.*]]) // LLVM: [[PTR_ALLOC:%.*]] = alloca ptr, i64 1 // LLVM: store ptr [[ARG0]], ptr [[PTR_ALLOC]] // LLVM: [[PTR:%.*]] = load ptr, ptr [[PTR_ALLOC]] diff --git a/clang/test/CIR/CodeGen/call-extra-attrs.cpp b/clang/test/CIR/CodeGen/call-extra-attrs.cpp index a17246ddb1b5..674343f44a6f 100644 --- a/clang/test/CIR/CodeGen/call-extra-attrs.cpp +++ b/clang/test/CIR/CodeGen/call-extra-attrs.cpp @@ -25,10 +25,10 @@ int s2(int a, int b) { // CIR: cir.func @_Z2s1ii(%{{.*}}, %{{.*}}) -> {{.*}} extra(#fn_attr) // CIR: cir.call @_Z2s0ii(%{{.*}}, %{{.*}}) : ({{.*}}, {{.*}}) -> {{.*}} extra(#fn_attr1) // CIR: cir.func @_Z2s2ii(%{{.*}}, %{{.*}}) -> {{.*}} extra(#fn_attr) -// CHECK-NOT: cir.call @_Z2s1ii(%{{.*}}, %{{.*}}) : ({{.*}}, {{.*}}) -> {{.*}} extra(#fn_attr{{.*}}) +// CHECK-NOT: cir.call @_Z2s1ii(%{{.*}}, %{{.*}}) : ({{.*}}, {{.*}}) -> {{.*}} extra(#fn_attr{{.*}}) -// LLVM: define i32 @_Z2s0ii(i32 %0, i32 %1) #[[#ATTR1:]] -// LLVM: define i32 @_Z2s1ii(i32 %0, i32 %1) #[[#ATTR1:]] -// LLVM: define i32 @_Z2s2ii(i32 %0, i32 %1) #[[#ATTR1:]] +// LLVM: define dso_local i32 @_Z2s0ii(i32 %0, i32 %1) #[[#ATTR1:]] +// LLVM: define dso_local i32 @_Z2s1ii(i32 %0, i32 %1) #[[#ATTR1:]] +// LLVM: define dso_local i32 @_Z2s2ii(i32 %0, i32 %1) #[[#ATTR1:]] // LLVM: attributes #[[#ATTR1]] = {{.*}} noinline nounwind optnone diff --git a/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp index 5a9031503958..d77c8b450e4a 100644 --- a/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp +++ b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp @@ -25,11 +25,11 @@ void fn1() { f f1; } // CIR: cir.func @_ZN1f1bEv(%arg0: !cir.ptr loc{{.*}}) -> !cir.ptr // CIR: [[H_PTR:%.*]] = cir.get_global @h : !cir.ptr loc(#loc18) // CIR: [[H_VAL:%.*]] = cir.load [[H_PTR]] : !cir.ptr, !s32i -// CIR: [[RET1_VAL:%.*]] = cir.call @_ZN1a1bEi([[H_VAL]]) : (!s32i) -> !cir.ptr +// CIR: [[RET1_VAL:%.*]] = cir.call @_ZN1a1bEi([[H_VAL]]) : (!s32i) -> !cir.ptr // CIR: cir.store [[RET1_VAL]], [[RET1_P:%.*]] : !cir.ptr, !cir.ptr> // CIR: [[RET1_VAL2:%.*]] = cir.load [[RET1_P]] : !cir.ptr>, !cir.ptr -// %7 = cir.load %1 : !cir.ptr>, !cir.ptr -// CIR: cir.return [[RET1_VAL2]] : !cir.ptr +// %7 = cir.load %1 : !cir.ptr>, !cir.ptr +// CIR: cir.return [[RET1_VAL2]] : !cir.ptr // CIR: cir.func @_Z3fn1v() // CIR: [[CLS_F:%.*]] = cir.alloca !ty_22f22, !cir.ptr, ["f1"] {alignment = 1 : i64} @@ -40,7 +40,7 @@ void fn1() { f f1; } // LLVM: @h = global i32 0 // LLVM: declare {{.*}} ptr @_ZN1a1bEi(i32) -// LLVM: define ptr @_ZN1f1bEv(ptr [[ARG0:%.*]]) +// LLVM: define dso_local ptr @_ZN1f1bEv(ptr [[ARG0:%.*]]) // LLVM: [[ARG0_SAVE:%.*]] = alloca ptr, i64 1, align 8 // LLVM: [[RET_SAVE:%.*]] = alloca ptr, i64 1, align 8 // LLVM: store ptr [[ARG0]], ptr [[ARG0_SAVE]], align 8, @@ -52,6 +52,6 @@ void fn1() { f f1; } // LLVM: [[RET_VAL2:%.*]] = load ptr, ptr [[RET_SAVE]], align 8 // LLVM: ret ptr [[RET_VAL2]] -// LLVM: define void @_Z3fn1v() +// LLVM: define dso_local void @_Z3fn1v() // LLVM: [[FUNC_PTR:%.*]] = alloca %class.f, i64 1, align 1 // LLVM: ret void diff --git a/clang/test/CIR/CodeGen/complex.c b/clang/test/CIR/CodeGen/complex.c index e8c9885f685f..3dd02118ea6a 100644 --- a/clang/test/CIR/CodeGen/complex.c +++ b/clang/test/CIR/CodeGen/complex.c @@ -26,7 +26,7 @@ void list_init() { // CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex // CHECK: } -// LLVM: define void @list_init() +// LLVM: define dso_local void @list_init() // LLVM: store { double, double } { double 1.000000e+00, double 2.000000e+00 }, ptr %{{.+}}, align 8 // LLVM: } @@ -42,7 +42,7 @@ void list_init_2(double r, double i) { // CHECK-NEXT: cir.store %[[#C]], %{{.+}} : !cir.complex, !cir.ptr> // CHECK: } -// LLVM: define void @list_init_2(double %{{.+}}, double %{{.+}}) +// LLVM: define dso_local void @list_init_2(double %{{.+}}, double %{{.+}}) // LLVM: %[[#A:]] = insertvalue { double, double } undef, double %{{.+}}, 0 // LLVM-NEXT: %[[#B:]] = insertvalue { double, double } %[[#A]], double %{{.+}}, 1 // LLVM-NEXT: store { double, double } %[[#B]], ptr %5, align 8 @@ -57,7 +57,7 @@ void builtin_init(double r, double i) { // CHECK: %{{.+}} = cir.complex.create %{{.+}}, %{{.+}} : !cir.double -> !cir.complex // CHECK: } -// LLVM: define void @builtin_init +// LLVM: define dso_local void @builtin_init // LLVM: %[[#A:]] = insertvalue { double, double } undef, double %{{.+}}, 0 // LLVM-NEXT: %[[#B:]] = insertvalue { double, double } %[[#A]], double %{{.+}}, 1 // LLVM-NEXT: store { double, double } %[[#B]], ptr %{{.+}}, align 8 @@ -78,7 +78,7 @@ void imag_literal() { // CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex // CHECK: } -// LLVM: define void @imag_literal() +// LLVM: define dso_local void @imag_literal() // LLVM: store { double, double } { double 0.000000e+00, double 3.000000e+00 }, ptr @c, align 8 // LLVM: store { i32, i32 } { i32 0, i32 3 }, ptr @ci, align 4 // LLVM: } @@ -100,7 +100,7 @@ void load_store() { // CHECK-NEXT: cir.store %[[#CI2]], %[[#CI_PTR]] : !cir.complex, !cir.ptr> // CHECK: } -// LLVM: define void @load_store() +// LLVM: define dso_local void @load_store() // LLVM: %[[#A:]] = load { double, double }, ptr @c2, align 8 // LLVM-NEXT: store { double, double } %[[#A]], ptr @c, align 8 // LLVM-NEXT: %[[#B:]] = load { i32, i32 }, ptr @ci2, align 4 @@ -124,7 +124,7 @@ void load_store_volatile() { // CHECK-NEXT: cir.store volatile %[[#VCI2]], %[[#VCI_PTR]] : !cir.complex, !cir.ptr> // CHECK: } -// LLVM: define void @load_store_volatile() +// LLVM: define dso_local void @load_store_volatile() // LLVM: %[[#A:]] = load volatile { double, double }, ptr @vc2, align 8 // LLVM-NEXT: store volatile { double, double } %[[#A]], ptr @vc, align 8 // LLVM-NEXT: %[[#B:]] = load volatile { i32, i32 }, ptr @vci2, align 4 @@ -142,7 +142,7 @@ void real() { // CHECK-NEXT: %{{.+}} = cir.complex.real %[[#B]] : !cir.complex -> !cir.double // CHECK: } -// LLVM: define void @real() +// LLVM: define dso_local void @real() // LLVM: %[[#A:]] = extractvalue { double, double } %{{.+}}, 0 // LLVM-NEXT: store double %[[#A]], ptr %{{.+}}, align 8 // LLVM: } @@ -158,7 +158,7 @@ void imag() { // CHECK-NEXT: %{{.+}} = cir.complex.imag %[[#B]] : !cir.complex -> !cir.double // CHECK: } -// LLVM: define void @imag() +// LLVM: define dso_local void @imag() // LLVM: %[[#A:]] = extractvalue { double, double } %{{.+}}, 1 // LLVM-NEXT: store double %[[#A]], ptr %{{.+}}, align 8 // LLVM: } @@ -176,7 +176,7 @@ void real_ptr() { // CHECK-NEXT: %{{.+}} = cir.complex.real_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr // CHECK: } -// LLVM: define void @real_ptr() +// LLVM: define dso_local void @real_ptr() // LLVM: store ptr @c, ptr %{{.+}}, align 8 // LLVM-NEXT: store ptr @ci, ptr %{{.+}}, align 8 // LLVM: } @@ -192,7 +192,7 @@ void real_ptr_local() { // CHECK: %{{.+}} = cir.complex.real_ptr %[[#C]] : !cir.ptr> -> !cir.ptr // CHECK: } -// LLVM: define void @real_ptr_local() +// LLVM: define dso_local void @real_ptr_local() // LLVM: store { double, double } { double 1.000000e+00, double 2.000000e+00 }, ptr %{{.+}}, align 8 // LLVM-NEXT: %{{.+}} = getelementptr inbounds { double, double }, ptr %{{.+}}, i32 0, i32 0 // LLVM: } @@ -212,7 +212,7 @@ void extract_real() { // CHECK-NEXT: %{{.+}} = cir.load %[[#REAL_PTR]] : !cir.ptr, !s32i // CHECK: } -// LLVM: define void @extract_real() +// LLVM: define dso_local void @extract_real() // LLVM: %{{.+}} = load double, ptr @c, align 8 // LLVM: %{{.+}} = load i32, ptr @ci, align 4 // LLVM: } @@ -230,7 +230,7 @@ void imag_ptr() { // CHECK-NEXT: %{{.+}} = cir.complex.imag_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr // CHECK: } -// LLVM: define void @imag_ptr() +// LLVM: define dso_local void @imag_ptr() // LLVM: store ptr getelementptr inbounds ({ double, double }, ptr @c, i32 0, i32 1), ptr %{{.+}}, align 8 // LLVM: store ptr getelementptr inbounds ({ i32, i32 }, ptr @ci, i32 0, i32 1), ptr %{{.+}}, align 8 // LLVM: } @@ -250,7 +250,7 @@ void extract_imag() { // CHECK-NEXT: %{{.+}} = cir.load %[[#IMAG_PTR]] : !cir.ptr, !s32i // CHECK: } -// LLVM: define void @extract_imag() +// LLVM: define dso_local void @extract_imag() // LLVM: %{{.+}} = load double, ptr getelementptr inbounds ({ double, double }, ptr @c, i32 0, i32 1), align 8 // LLVM: %{{.+}} = load i32, ptr getelementptr inbounds ({ i32, i32 }, ptr @ci, i32 0, i32 1), align 4 // LLVM: } diff --git a/clang/test/CIR/CodeGen/cond.cpp b/clang/test/CIR/CodeGen/cond.cpp index a1395baf9eee..f2c063eb574b 100644 --- a/clang/test/CIR/CodeGen/cond.cpp +++ b/clang/test/CIR/CodeGen/cond.cpp @@ -29,4 +29,4 @@ min(const unsigned long& __a, const unsigned long& __b) { // CHECK: %9 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: cir.yield %9 : !cir.ptr // CHECK: }) : (!cir.bool) -> !cir.ptr -// CHECK: cir.store %8, %2 : !cir.ptr, !cir.ptr> \ No newline at end of file +// CHECK: cir.store %8, %2 : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index fddb9ef8d735..ba9c6cdf973e 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -338,7 +338,7 @@ folly::coro::Task go1_lambda() { co_return co_await task; } -// CHECK: cir.func coroutine lambda internal private dsolocal @_ZZ10go1_lambdavENK3$_0clEv{{.*}}22 extra{{.*}}{ +// CHECK: cir.func coroutine lambda internal private @_ZZ10go1_lambdavENK3$_0clEv{{.*}}22 extra{{.*}}{ // CHECK: cir.func coroutine @_Z10go1_lambdav() {{.*}}22 extra{{.*}}{ folly::coro::Task go4() { diff --git a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp index 8ce0344780ec..d94c8775a2de 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp @@ -29,7 +29,7 @@ Derived *ptr_cast(Base1 *ptr) { // CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr } -// LLVM: define ptr @_Z8ptr_castP5Base1(ptr readonly %[[#SRC:]]) +// LLVM: define dso_local ptr @_Z8ptr_castP5Base1(ptr readonly %[[#SRC:]]) // LLVM-NEXT: %[[#VPTR:]] = load ptr, ptr %[[#SRC]], align 8 // LLVM-NEXT: %[[#SUCCESS:]] = icmp eq ptr %[[#VPTR]], getelementptr inbounds nuw (i8, ptr @_ZTV7Derived, i64 16) // LLVM-NEXT: %[[RESULT:.+]] = select i1 %[[#SUCCESS]], ptr %[[#SRC]], ptr null @@ -51,7 +51,7 @@ Derived &ref_cast(Base1 &ref) { // CHECK-NEXT: %{{.+}} = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr } -// LLVM: define noundef ptr @_Z8ref_castR5Base1(ptr readonly returned %[[#SRC:]]) +// LLVM: define dso_local noundef ptr @_Z8ref_castR5Base1(ptr readonly returned %[[#SRC:]]) // LLVM-NEXT: %[[#VPTR:]] = load ptr, ptr %[[#SRC]], align 8 // LLVM-NEXT: %[[OK:.+]] = icmp eq ptr %[[#VPTR]], getelementptr inbounds nuw (i8, ptr @_ZTV7Derived, i64 16) // LLVM-NEXT: br i1 %[[OK]], label %[[#LABEL_OK:]], label %[[#LABEL_FAIL:]] @@ -69,7 +69,7 @@ Derived *ptr_cast_always_fail(Base2 *ptr) { // CHECK-NEXT: cir.store %[[#RESULT]], %{{.+}} : !cir.ptr, !cir.ptr> } -// LLVM: define noalias noundef ptr @_Z20ptr_cast_always_failP5Base2(ptr nocapture readnone %{{.+}}) +// LLVM: define dso_local noalias noundef ptr @_Z20ptr_cast_always_failP5Base2(ptr nocapture readnone %{{.+}}) // LLVM-NEXT: ret ptr null // LLVM-NEXT: } @@ -81,7 +81,7 @@ Derived &ref_cast_always_fail(Base2 &ref) { // CHECK-NEXT: cir.unreachable } -// LLVM: define noalias noundef nonnull ptr @_Z20ref_cast_always_failR5Base2(ptr nocapture readnone %{{.+}}) +// LLVM: define dso_local noalias noundef nonnull ptr @_Z20ref_cast_always_failR5Base2(ptr nocapture readnone %{{.+}}) // LLVM-NEXT: tail call void @__cxa_bad_cast() // LLVM-NEXT: unreachable // LLVM-NEXT: } diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c index a9c5b38b8cf1..f4ad939c01eb 100644 --- a/clang/test/CIR/CodeGen/fun-ptr.c +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -38,7 +38,7 @@ int extract_a(Data* d) { // CIR: [[TMP7:%.*]] = cir.call [[TMP5]]([[TMP6]]) : (!cir.ptr)>>, !cir.ptr) -> !s32i // CIR: cir.store [[TMP7]], [[TMP1]] : !s32i, !cir.ptr -// LLVM: define i32 {{@.*foo.*}}(ptr %0) +// LLVM: define dso_local i32 {{@.*foo.*}}(ptr %0) // LLVM: [[TMP1:%.*]] = alloca ptr, i64 1 // LLVM: [[TMP2:%.*]] = alloca i32, i64 1 // LLVM: [[TMP3:%.*]] = alloca ptr, i64 1 @@ -62,7 +62,7 @@ int foo(Data* d) { // CIR: cir.return // LLVM: declare {{.*}} ptr {{@.*test.*}}() -// LLVM: define void {{@.*bar.*}}() +// LLVM: define dso_local void {{@.*bar.*}}() // LLVM: [[RET:%.*]] = call ptr {{@.*test.*}}() // LLVM: call void [[RET]]() // LLVM: ret void diff --git a/clang/test/CIR/CodeGen/func_dsolocal_pie.c b/clang/test/CIR/CodeGen/func_dsolocal_pie.c index acbdcda63aee..94f0dda5392f 100644 --- a/clang/test/CIR/CodeGen/func_dsolocal_pie.c +++ b/clang/test/CIR/CodeGen/func_dsolocal_pie.c @@ -12,12 +12,12 @@ int main() { return 0; } -// CIR: cir.func dsolocal @foo(%arg0: !s32i +// CIR: cir.func @foo(%arg0: !s32i // CIR-NEXT: [[TMP0:%.*]] = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CIR-NEXT: cir.store %arg0, [[TMP0]] : !s32i, !cir.ptr // CIR-NEXT: cir.return -// CIR: cir.func no_proto dsolocal @main() -> !s32i +// CIR: cir.func no_proto @main() -> !s32i // CIR: [[TMP1:%.*]] = cir.const #cir.int<2> : !s32i // CIR: cir.call @foo([[TMP1]]) : (!s32i) -> () diff --git a/clang/test/CIR/CodeGen/function-attrs.cpp b/clang/test/CIR/CodeGen/function-attrs.cpp index 4975a3f31253..8ded0a7d9730 100644 --- a/clang/test/CIR/CodeGen/function-attrs.cpp +++ b/clang/test/CIR/CodeGen/function-attrs.cpp @@ -33,7 +33,7 @@ int s3(int a, int b) { // CIR: cir.func @_Z2s2ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} extra(#fn_attr2) // CIR: cir.func @_Z2s3ii(%arg0:{{.*}}, %arg1:{{.*}} -> {{.*}} { -// LLVM: define i32 @_Z2s1ii(i32 %0, i32 %1) {{.*}} #[[#ATTR1:]] -// LLVM: define i32 @_Z2s2ii(i32 %0, i32 %1) {{.*}} #[[#ATTR2:]] +// LLVM: define dso_local i32 @_Z2s1ii(i32 %0, i32 %1) {{.*}} #[[#ATTR1:]] +// LLVM: define dso_local i32 @_Z2s2ii(i32 %0, i32 %1) {{.*}} #[[#ATTR2:]] // LLVM: attributes #[[#ATTR1]] = {{.*}} noinline // LLVM: attributes #[[#ATTR2]] = {{.*}} alwaysinline diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index a54e420ba2fd..91639e6b3b6d 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -9,7 +9,7 @@ void fn() { // CHECK: !ty_22anon2E222 = !cir.struct // CHECK-DAG: module -// CHECK: cir.func lambda internal private dsolocal @_ZZ2fnvENK3$_0clEv{{.*}}) extra +// CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv{{.*}}) extra // CHECK: cir.func @_Z2fnv() // CHECK-NEXT: %0 = cir.alloca !ty_22anon2E222, !cir.ptr, ["a"] @@ -21,7 +21,7 @@ void l0() { a(); } -// CHECK: cir.func lambda internal private dsolocal @_ZZ2l0vENK3$_0clEv({{.*}}) extra +// CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv({{.*}}) extra // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> @@ -99,13 +99,13 @@ int g3() { } // lambda operator() -// CHECK: cir.func lambda internal private dsolocal @_ZZ2g3vENK3$_0clERKi{{.*}}!s32i extra +// CHECK: cir.func lambda internal private @_ZZ2g3vENK3$_0clERKi{{.*}}!s32i extra // lambda __invoke() -// CHECK: cir.func internal private dsolocal @_ZZ2g3vEN3$_08__invokeERKi +// CHECK: cir.func internal private @_ZZ2g3vEN3$_08__invokeERKi // lambda operator int (*)(int const&)() -// CHECK: cir.func internal private dsolocal @_ZZ2g3vENK3$_0cvPFiRKiEEv +// CHECK: cir.func internal private @_ZZ2g3vENK3$_0cvPFiRKiEEv // CHECK: cir.func @_Z2g3v() -> !s32i // CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/libcall.cpp b/clang/test/CIR/CodeGen/libcall.cpp index 96537b392d59..8bc469e94368 100644 --- a/clang/test/CIR/CodeGen/libcall.cpp +++ b/clang/test/CIR/CodeGen/libcall.cpp @@ -47,7 +47,7 @@ void t(const char* fmt, ...) { // CHECK: %5 = cir.call @_ZL6strlenPKcU17pass_object_size0(%3, %4) : (!cir.ptr, !u64i) -> !u64i // CHECK: cir.func private @__vsnprintf_chk -// CHECK: cir.func internal private dsolocal @_ZL9vsnprintfPcU17pass_object_size1iPKcP13__va_list_tag +// CHECK: cir.func internal private @_ZL9vsnprintfPcU17pass_object_size1iPKcP13__va_list_tag // Implicit size parameter in arg %1 // diff --git a/clang/test/CIR/CodeGen/linkage.c b/clang/test/CIR/CodeGen/linkage.c index b3a108df7bba..1b087f43ca81 100644 --- a/clang/test/CIR/CodeGen/linkage.c +++ b/clang/test/CIR/CodeGen/linkage.c @@ -12,11 +12,11 @@ int foo(void) { return bar(5); } -// CIR: cir.func internal private dsolocal @bar( +// CIR: cir.func internal private @bar( // CIR: cir.func @foo( // LLVM: define internal i32 @bar( -// LLVM: define i32 @foo( +// LLVM: define dso_local i32 @foo( static int var = 0; // CIR: cir.global "private" internal dsolocal @var = #cir.int<0> : !s32i diff --git a/clang/test/CIR/CodeGen/multi-vtable.cpp b/clang/test/CIR/CodeGen/multi-vtable.cpp index ad9e51500b6a..c22a769df1f3 100644 --- a/clang/test/CIR/CodeGen/multi-vtable.cpp +++ b/clang/test/CIR/CodeGen/multi-vtable.cpp @@ -62,6 +62,10 @@ int main() { // CIR: cir.return // CIR: } +// LLVM-DAG: $_ZTS6Mother = comdat any +// LLVM-DAG: $_ZTS5Child = comdat any +// LLVM-DAG: $_ZTS6Father = comdat any + // LLVM-DAG: define linkonce_odr void @_ZN5ChildC2Ev(ptr %0) // LLVM-DAG: store ptr getelementptr inbounds ({ [4 x ptr], [3 x ptr] }, ptr @_ZTV5Child, i32 0, i32 0, i32 2), ptr %{{[0-9]+}}, align 8 // LLVM-DAG: %{{[0-9]+}} = getelementptr %class.Child, ptr %3, i64 8 @@ -86,8 +90,8 @@ int main() { // LLVM-DAG: @_ZTVN10__cxxabiv117__class_type_infoE = external global ptr // typeinfo name for Mother -// CIR: cir.global linkonce_odr @_ZTS6Mother = #cir.const_array<"6Mother" : !cir.array> : !cir.array {alignment = 1 : i64} -// LLVM-DAG: @_ZTS6Mother = linkonce_odr global [7 x i8] c"6Mother" +// CIR: cir.global linkonce_odr comdat @_ZTS6Mother = #cir.const_array<"6Mother" : !cir.array> : !cir.array {alignment = 1 : i64} +// LLVM-DAG: @_ZTS6Mother = linkonce_odr global [7 x i8] c"6Mother", comdat // typeinfo for Mother // CIR: cir.global constant external @_ZTI6Mother = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS6Mother> : !cir.ptr}> : ![[VTypeInfoA]] {alignment = 8 : i64} @@ -106,12 +110,12 @@ int main() { // LLVM-DAG: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global ptr // typeinfo name for Child -// CIR: cir.global linkonce_odr @_ZTS5Child = #cir.const_array<"5Child" : !cir.array> : !cir.array {alignment = 1 : i64} -// LLVM-DAG: @_ZTS5Child = linkonce_odr global [6 x i8] c"5Child" +// CIR: cir.global linkonce_odr comdat @_ZTS5Child = #cir.const_array<"5Child" : !cir.array> : !cir.array {alignment = 1 : i64} +// LLVM-DAG: @_ZTS5Child = linkonce_odr global [6 x i8] c"5Child", comdat // typeinfo name for Father -// CIR: cir.global linkonce_odr @_ZTS6Father = #cir.const_array<"6Father" : !cir.array> : !cir.array {alignment = 1 : i64} -// LLVM-DAG: @_ZTS6Father = linkonce_odr global [7 x i8] c"6Father" +// CIR: cir.global linkonce_odr comdat @_ZTS6Father = #cir.const_array<"6Father" : !cir.array> : !cir.array {alignment = 1 : i64} +// LLVM-DAG: @_ZTS6Father = linkonce_odr global [7 x i8] c"6Father", comdat // typeinfo for Father // CIR: cir.global constant external @_ZTI6Father = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS6Father> : !cir.ptr}> : !ty_anon_struct {alignment = 8 : i64} diff --git a/clang/test/CIR/CodeGen/optnone.cpp b/clang/test/CIR/CodeGen/optnone.cpp index 7fa22865c274..1dbb7892a5ad 100644 --- a/clang/test/CIR/CodeGen/optnone.cpp +++ b/clang/test/CIR/CodeGen/optnone.cpp @@ -22,6 +22,6 @@ int s0(int a, int b) { // CIR-O2-NOT: #fn_attr ={{.*}} optnone -// LLVM-O0: define i32 @_Z2s0ii(i32 %0, i32 %1) #[[#ATTR:]] +// LLVM-O0: define dso_local i32 @_Z2s0ii(i32 %0, i32 %1) #[[#ATTR:]] // LLVM-O0: attributes #[[#ATTR]] = { noinline nounwind optnone } // LLVM-O2-NOT: attributes #[[#]] = { noinline nounwind optnone } diff --git a/clang/test/CIR/CodeGen/pass-object-size.c b/clang/test/CIR/CodeGen/pass-object-size.c index 21753d05db88..5bd20f8934de 100644 --- a/clang/test/CIR/CodeGen/pass-object-size.c +++ b/clang/test/CIR/CodeGen/pass-object-size.c @@ -21,7 +21,7 @@ void c() { // CIR: [[TMP4:%.*]] = cir.objsize([[TMP3]] : , min) -> !u64i // CIR-NEXT: cir.call @e([[TMP3]], [[TMP4]]) : (!cir.ptr, !u64i) -> () -// LLVM: define void @c() +// LLVM: define dso_local void @c() // LLVM: [[TMP0:%.*]] = alloca i32, i64 %{{[0-9]+}}, // LLVM: [[TMP1:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 false, i1 true, i1 false), // LLVM-NEXT: call void @b(ptr [[TMP0]], i64 [[TMP1]]) diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 85dd678cf52f..97ee16aa3cde 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -76,7 +76,7 @@ int s0(int a, int b) { // LLVM: ModuleID = '{{.*}}sourcelocation.cpp' // LLVM: source_filename = "{{.*}}sourcelocation.cpp" -// LLVM: define i32 @_Z2s0ii(i32 %0, i32 %1) #[[#]] !dbg ![[#SP:]] +// LLVM: define dso_local i32 @_Z2s0ii(i32 %0, i32 %1) #[[#]] !dbg ![[#SP:]] // LLVM: %3 = alloca i32, i64 1, align 4, !dbg ![[#LOC1:]] diff --git a/clang/test/CIR/CodeGen/var-arg-float.c b/clang/test/CIR/CodeGen/var-arg-float.c index 5dfbcd9ce12a..bd224c098f5a 100644 --- a/clang/test/CIR/CodeGen/var-arg-float.c +++ b/clang/test/CIR/CodeGen/var-arg-float.c @@ -77,7 +77,7 @@ double f1(int n, ...) { // beginning block llvm code // LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } -// LLVM: define double @f1(i32 %0, ...) +// LLVM: define dso_local double @f1(i32 %0, ...) // LLVM: [[ARGN:%.*]] = alloca i32, i64 1, align 4, // LLVM: [[RETP:%.*]] = alloca double, i64 1, align 8, // LLVM: [[RESP:%.*]] = alloca double, i64 1, align 8, diff --git a/clang/test/CIR/CodeGen/var-arg-scope.c b/clang/test/CIR/CodeGen/var-arg-scope.c index e28fb83698c4..0829d0b8f81b 100644 --- a/clang/test/CIR/CodeGen/var-arg-scope.c +++ b/clang/test/CIR/CodeGen/var-arg-scope.c @@ -66,7 +66,7 @@ void f1(__builtin_va_list c) { // AFTER-NEXT: cir.return // LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } -// LLVM: define void @f1(%struct.__va_list %0) +// LLVM: define dso_local void @f1(%struct.__va_list %0) // LLVM: [[VARLIST:%.*]] = alloca %struct.__va_list, i64 1, align 8, // LLVM: br label %[[SCOPE_FRONT:.*]], diff --git a/clang/test/CIR/CodeGen/var-arg.c b/clang/test/CIR/CodeGen/var-arg.c index 34b3705f111c..8352bc832dd5 100644 --- a/clang/test/CIR/CodeGen/var-arg.c +++ b/clang/test/CIR/CodeGen/var-arg.c @@ -80,7 +80,7 @@ int f1(int n, ...) { // AFTER: cir.return [[RETV]] : !s32i // LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } -// LLVM: define i32 @f1(i32 %0, ...) +// LLVM: define dso_local i32 @f1(i32 %0, ...) // LLVM: [[ARGN:%.*]] = alloca i32, i64 1, align 4, // LLVM: [[RETP:%.*]] = alloca i32, i64 1, align 4, // LLVM: [[RESP:%.*]] = alloca i32, i64 1, align 4, diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp index b32fb0eb59ab..1ba565b7cb79 100644 --- a/clang/test/CIR/CodeGen/vbase.cpp +++ b/clang/test/CIR/CodeGen/vbase.cpp @@ -23,12 +23,12 @@ void ppp() { B b; } // CIR: cir.global "private" external @_ZTVN10__cxxabiv121__vmi_class_type_infoE // Type info name for B -// CIR: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array +// CIR: cir.global linkonce_odr comdat @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array // CIR: cir.global "private" external @_ZTVN10__cxxabiv117__class_type_infoE : !cir.ptr> // Type info name for A -// CIR: cir.global linkonce_odr @_ZTS1A = #cir.const_array<"1A" : !cir.array> : !cir.array +// CIR: cir.global linkonce_odr comdat @_ZTS1A = #cir.const_array<"1A" : !cir.array> : !cir.array // Type info A. // CIR: cir.global constant external @_ZTI1A = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS1A> : !cir.ptr}> @@ -36,12 +36,14 @@ void ppp() { B b; } // Type info B. // CIR: cir.global constant external @_ZTI1B = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS1B> : !cir.ptr, #cir.int<0> : !u32i, #cir.int<1> : !u32i, #cir.global_view<@_ZTI1A> : !cir.ptr, #cir.int<-6141> : !s64i}> +// LLVM: $_ZTS1B = comdat any +// LLVM: $_ZTS1A = comdat any // LLVM: @_ZTV1B = linkonce_odr global { [3 x ptr] } { [3 x ptr] [ptr inttoptr (i64 12 to ptr), ptr null, ptr @_ZTI1B] } // LLVM: @_ZTT1B = linkonce_odr global [1 x ptr] [ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1B, i32 0, i32 0, i32 3)] // LLVM: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global ptr -// LLVM: @_ZTS1B = linkonce_odr global [2 x i8] c"1B" +// LLVM: @_ZTS1B = linkonce_odr global [2 x i8] c"1B", comdat // LLVM: @_ZTVN10__cxxabiv117__class_type_infoE = external global ptr -// LLVM: @_ZTS1A = linkonce_odr global [2 x i8] c"1A" +// LLVM: @_ZTS1A = linkonce_odr global [2 x i8] c"1A", comdat // LLVM: @_ZTI1A = constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 2), ptr @_ZTS1A } // LLVM: @_ZTI1B = constant { ptr, ptr, i32, i32, ptr, i64 } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i32 2), ptr @_ZTS1B, i32 0, i32 1, ptr @_ZTI1A, i64 -6141 } diff --git a/clang/test/CIR/CodeGen/vectype-ext.cpp b/clang/test/CIR/CodeGen/vectype-ext.cpp index 5c9533b723e8..49f3a2ade69f 100644 --- a/clang/test/CIR/CodeGen/vectype-ext.cpp +++ b/clang/test/CIR/CodeGen/vectype-ext.cpp @@ -12,7 +12,7 @@ typedef long vl2 __attribute__((ext_vector_type(2))); typedef unsigned short vus2 __attribute__((ext_vector_type(2))); // CIR: cir.func {{@.*vector_int_test.*}} -// LLVM: define void {{@.*vector_int_test.*}} +// LLVM: define dso_local void {{@.*vector_int_test.*}} void vector_int_test(int x) { // Vector constant. @@ -197,7 +197,7 @@ void vector_int_test(int x) { } // CIR: cir.func {{@.*vector_double_test.*}} -// LLVM: define void {{@.*vector_double_test.*}} +// LLVM: define dso_local void {{@.*vector_double_test.*}} void vector_double_test(int x, double y) { // Vector constant. vd2 a = { 1.5, 2.5 }; @@ -311,7 +311,7 @@ void vector_double_test(int x, double y) { } // CIR: cir.func {{@.*test_load.*}} -// LLVM: define void {{@.*test_load.*}} +// LLVM: define dso_local void {{@.*test_load.*}} void test_load() { vi4 a = { 1, 2, 3, 4 }; @@ -339,7 +339,7 @@ void test_load() { } // CIR: cir.func {{@.*test_store.*}} -// LLVM: define void {{@.*test_store.*}} +// LLVM: define dso_local void {{@.*test_store.*}} void test_store() { vi4 a; // CIR: %[[#PVECA:]] = cir.alloca !cir.vector @@ -424,7 +424,7 @@ void test_store() { } // CIR: cir.func {{@.*test_build_lvalue.*}} -// LLVM: define void {{@.*test_build_lvalue.*}} +// LLVM: define dso_local void {{@.*test_build_lvalue.*}} void test_build_lvalue() { // special cases only @@ -487,7 +487,7 @@ void test_build_lvalue() { } // CIR: cir.func {{@.*test_vec3.*}} -// LLVM: define void {{@.*test_vec3.*}} +// LLVM: define dso_local void {{@.*test_vec3.*}} void test_vec3() { vi3 v = {}; // CIR-NEXT: %[[#PV:]] = cir.alloca !cir.vector, !cir.ptr>, ["v"] {alignment = 16 : i64} diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 5a92da4c87b2..ecb20f8d2301 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -88,7 +88,7 @@ class B : public A // RTTI_DISABLED-NOT: cir.global "private" external @_ZTVN10__cxxabiv120__si_class_type_infoE : !cir.ptr> // typeinfo name for B -// CHECK: cir.global linkonce_odr @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global linkonce_odr comdat @_ZTS1B = #cir.const_array<"1B" : !cir.array> : !cir.array {alignment = 1 : i64} // RTTI_DISABLED-NOT: cir.global linkonce_odr @_ZTS1B // typeinfo for A diff --git a/clang/test/CIR/CodeGen/weak.c b/clang/test/CIR/CodeGen/weak.c index dcee29f31934..25ebf15f8a89 100644 --- a/clang/test/CIR/CodeGen/weak.c +++ b/clang/test/CIR/CodeGen/weak.c @@ -19,7 +19,7 @@ void active (void) // CIR-NEXT: cir.call @B() : () -> () // LLVM: declare !dbg !{{.}} extern_weak void @B() -// LLVM: define void @active() +// LLVM: define dso_local void @active() // LLVM-NEXT: call void @B() int __attribute__((selectany)) y; diff --git a/clang/test/CIR/IR/func-dsolocal-parser.cir b/clang/test/CIR/IR/func-dsolocal-parser.cir index 1d8322cd8e26..9737279ce144 100644 --- a/clang/test/CIR/IR/func-dsolocal-parser.cir +++ b/clang/test/CIR/IR/func-dsolocal-parser.cir @@ -10,4 +10,4 @@ module { } } -// CHECK: cir.func dsolocal @foo(%arg0: !s32i) extra(#fn_attr) +// CHECK: cir.func @foo(%arg0: !s32i) extra(#fn_attr) diff --git a/clang/test/CIR/Lowering/array-init.c b/clang/test/CIR/Lowering/array-init.c index 8e452bf06878..ab0ddb4dd0ea 100644 --- a/clang/test/CIR/Lowering/array-init.c +++ b/clang/test/CIR/Lowering/array-init.c @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// LLVM: define void @zeroInit +// LLVM: define dso_local void @zeroInit // LLVM: [[RES:%.*]] = alloca [3 x i32], i64 1 // LLVM: store [3 x i32] zeroinitializer, ptr [[RES]] void zeroInit() { diff --git a/clang/test/CIR/Lowering/bitfieils.c b/clang/test/CIR/Lowering/bitfieils.c index ec289bf1048b..cac1285c4e44 100644 --- a/clang/test/CIR/Lowering/bitfieils.c +++ b/clang/test/CIR/Lowering/bitfieils.c @@ -5,17 +5,17 @@ typedef struct { int a : 4; } B; -// LLVM: define void @set_signed +// LLVM: define dso_local void @set_signed // LLVM: [[TMP0:%.*]] = load ptr // LLVM: [[TMP1:%.*]] = load i8, ptr [[TMP0]] // LLVM: [[TMP2:%.*]] = and i8 [[TMP1]], -16 // LLVM: [[TMP3:%.*]] = or i8 [[TMP2]], 14 // LLVM: store i8 [[TMP3]], ptr [[TMP0]] void set_signed(B* b) { - b->a = -2; + b->a = -2; } -// LLVM: define i32 @get_signed +// LLVM: define dso_local i32 @get_signed // LLVM: [[TMP0:%.*]] = alloca i32 // LLVM: [[TMP1:%.*]] = load ptr // LLVM: [[TMP2:%.*]] = load i8, ptr [[TMP1]] @@ -27,4 +27,4 @@ void set_signed(B* b) { // LLVM: ret i32 [[TMP6]] int get_signed(B* b) { return b->a; -} \ No newline at end of file +} diff --git a/clang/test/CIR/Lowering/builtin-binary-fp2fp.c b/clang/test/CIR/Lowering/builtin-binary-fp2fp.c index 0910776847dc..2877aa5cef30 100644 --- a/clang/test/CIR/Lowering/builtin-binary-fp2fp.c +++ b/clang/test/CIR/Lowering/builtin-binary-fp2fp.c @@ -9,11 +9,11 @@ float my_copysignf(float x, float y) { return __builtin_copysignf(x, y); } -// LLVM: define float @my_copysignf +// LLVM: define dso_local float @my_copysignf // LLVM: %{{.+}} = call float @llvm.copysign.f32(float %{{.+}}, float %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define float @my_copysignf +// LLVM-FASTMATH: define dso_local float @my_copysignf // LLVM-FASTMATH: %{{.+}} = call float @llvm.copysign.f32(float %{{.+}}, float %{{.+}}) // LLVM-FASTMATH: } @@ -21,11 +21,11 @@ double my_copysign(double x, double y) { return __builtin_copysign(x, y); } -// LLVM: define double @my_copysign +// LLVM: define dso_local double @my_copysign // LLVM: %{{.+}} = call double @llvm.copysign.f64(double %{{.+}}, double %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define double @my_copysign +// LLVM-FASTMATH: define dso_local double @my_copysign // LLVM-FASTMATH: %{{.+}} = call double @llvm.copysign.f64(double %{{.+}}, double %{{.+}}) // LLVM-FASTMATH: } @@ -33,11 +33,11 @@ long double my_copysignl(long double x, long double y) { return __builtin_copysignl(x, y); } -// LLVM: define x86_fp80 @my_copysignl +// LLVM: define dso_local x86_fp80 @my_copysignl // LLVM: %{{.+}} = call x86_fp80 @llvm.copysign.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define x86_fp80 @my_copysignl +// LLVM-FASTMATH: define dso_local x86_fp80 @my_copysignl // LLVM-FASTMATH: %{{.+}} = call x86_fp80 @llvm.copysign.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM-FASTMATH: } @@ -47,11 +47,11 @@ float my_fmaxf(float x, float y) { return __builtin_fmaxf(x, y); } -// LLVM: define float @my_fmaxf +// LLVM: define dso_local float @my_fmaxf // LLVM: %{{.+}} = call float @llvm.maxnum.f32(float %{{.+}}, float %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define float @my_fmaxf +// LLVM-FASTMATH: define dso_local float @my_fmaxf // LLVM-FASTMATH: %{{.+}} = call float @llvm.maxnum.f32(float %{{.+}}, float %{{.+}}) // LLVM-FASTMATH: } @@ -59,11 +59,11 @@ double my_fmax(double x, double y) { return __builtin_fmax(x, y); } -// LLVM: define double @my_fmax +// LLVM: define dso_local double @my_fmax // LLVM: %{{.+}} = call double @llvm.maxnum.f64(double %{{.+}}, double %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define double @my_fmax +// LLVM-FASTMATH: define dso_local double @my_fmax // LLVM-FASTMATH: %{{.+}} = call double @llvm.maxnum.f64(double %{{.+}}, double %{{.+}}) // LLVM-FASTMATH: } @@ -71,11 +71,11 @@ long double my_fmaxl(long double x, long double y) { return __builtin_fmaxl(x, y); } -// LLVM: define x86_fp80 @my_fmaxl +// LLVM: define dso_local x86_fp80 @my_fmaxl // LLVM: %{{.+}} = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define x86_fp80 @my_fmaxl +// LLVM-FASTMATH: define dso_local x86_fp80 @my_fmaxl // LLVM-FASTMATH: %{{.+}} = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM-FASTMATH: } @@ -85,11 +85,11 @@ float my_fminf(float x, float y) { return __builtin_fminf(x, y); } -// LLVM: define float @my_fminf +// LLVM: define dso_local float @my_fminf // LLVM: %{{.+}} = call float @llvm.minnum.f32(float %{{.+}}, float %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define float @my_fminf +// LLVM-FASTMATH: define dso_local float @my_fminf // LLVM-FASTMATH: %{{.+}} = call float @llvm.minnum.f32(float %{{.+}}, float %{{.+}}) // LLVM-FASTMATH: } @@ -97,11 +97,11 @@ double my_fmin(double x, double y) { return __builtin_fmin(x, y); } -// LLVM: define double @my_fmin +// LLVM: define dso_local double @my_fmin // LLVM: %{{.+}} = call double @llvm.minnum.f64(double %{{.+}}, double %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define double @my_fmin +// LLVM-FASTMATH: define dso_local double @my_fmin // LLVM-FASTMATH: %{{.+}} = call double @llvm.minnum.f64(double %{{.+}}, double %{{.+}}) // LLVM-FASTMATH: } @@ -109,11 +109,11 @@ long double my_fminl(long double x, long double y) { return __builtin_fminl(x, y); } -// LLVM: define x86_fp80 @my_fminl +// LLVM: define dso_local x86_fp80 @my_fminl // LLVM: %{{.+}} = call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define x86_fp80 @my_fminl +// LLVM-FASTMATH: define dso_local x86_fp80 @my_fminl // LLVM-FASTMATH: %{{.+}} = call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM-FASTMATH: } @@ -123,11 +123,11 @@ float my_fmodf(float x, float y) { return __builtin_fmodf(x, y); } -// LLVM: define float @my_fmodf +// LLVM: define dso_local float @my_fmodf // LLVM: %{{.+}} = call float @fmodf(float %{{.+}}, float %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define float @my_fmodf +// LLVM-FASTMATH: define dso_local float @my_fmodf // LLVM-FASTMATH: %{{.+}} = frem float %{{.+}}, %{{.+}} // LLVM-FASTMATH: } @@ -135,11 +135,11 @@ double my_fmod(double x, double y) { return __builtin_fmod(x, y); } -// LLVM: define double @my_fmod +// LLVM: define dso_local double @my_fmod // LLVM: %{{.+}} = call double @fmod(double %{{.+}}, double %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define double @my_fmod +// LLVM-FASTMATH: define dso_local double @my_fmod // LLVM-FASTMATH: %{{.+}} = frem double %{{.+}}, %{{.+}} // LLVM-FASTMATH: } @@ -147,11 +147,11 @@ long double my_fmodl(long double x, long double y) { return __builtin_fmodl(x, y); } -// LLVM: define x86_fp80 @my_fmodl +// LLVM: define dso_local x86_fp80 @my_fmodl // LLVM: %{{.+}} = call x86_fp80 @fmodl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define x86_fp80 @my_fmodl +// LLVM-FASTMATH: define dso_local x86_fp80 @my_fmodl // LLVM-FASTMATH: %{{.+}} = frem x86_fp80 %{{.+}}, %{{.+}} // LLVM-FASTMATH: } @@ -161,11 +161,11 @@ float my_powf(float x, float y) { return __builtin_powf(x, y); } -// LLVM: define float @my_powf +// LLVM: define dso_local float @my_powf // LLVM: %{{.+}} = call float @powf(float %{{.+}}, float %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define float @my_powf +// LLVM-FASTMATH: define dso_local float @my_powf // LLVM-FASTMATH: %{{.+}} = call float @llvm.pow.f32(float %{{.+}}, float %{{.+}}) // LLVM-FASTMATH: } @@ -173,11 +173,11 @@ double my_pow(double x, double y) { return __builtin_pow(x, y); } -// LLVM: define double @my_pow +// LLVM: define dso_local double @my_pow // LLVM: %{{.+}} = call double @pow(double %{{.+}}, double %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define double @my_pow +// LLVM-FASTMATH: define dso_local double @my_pow // LLVM-FASTMATH: %{{.+}} = call double @llvm.pow.f64(double %{{.+}}, double %{{.+}}) // LLVM-FASTMATH: } @@ -185,10 +185,10 @@ long double my_powl(long double x, long double y) { return __builtin_powl(x, y); } -// LLVM: define x86_fp80 @my_powl +// LLVM: define dso_local x86_fp80 @my_powl // LLVM: %{{.+}} = call x86_fp80 @powl(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM: } -// LLVM-FASTMATH: define x86_fp80 @my_powl +// LLVM-FASTMATH: define dso_local x86_fp80 @my_powl // LLVM-FASTMATH: %{{.+}} = call x86_fp80 @llvm.pow.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) // LLVM-FASTMATH: } diff --git a/clang/test/CIR/Lowering/struct-init.c b/clang/test/CIR/Lowering/struct-init.c index 3c94cf9d5f50..a8b84e9d20d9 100644 --- a/clang/test/CIR/Lowering/struct-init.c +++ b/clang/test/CIR/Lowering/struct-init.c @@ -1,10 +1,10 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM struct S { - int x; + int x; }; -// LLVM: define void @zeroInit +// LLVM: define dso_local void @zeroInit // LLVM: [[TMP0:%.*]] = alloca %struct.S, i64 1 // LLVM: store %struct.S zeroinitializer, ptr [[TMP0]] void zeroInit() { diff --git a/clang/test/CIR/cc1.c b/clang/test/CIR/cc1.c index c29c6943d6ff..af53e22fd20a 100644 --- a/clang/test/CIR/cc1.c +++ b/clang/test/CIR/cc1.c @@ -13,7 +13,7 @@ void foo() {} // MLIR-NEXT: return // MLIR-NEXT: } -// LLVM: define void @foo() +// LLVM: define dso_local void @foo() // LLVM-NEXT: ret void // LLVM-NEXT: } diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index a02d73b99a67..401c40d41064 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -18,7 +18,7 @@ void foo(void) {} // CIR: module {{.*}} { -// CIR-NEXT: cir.func dsolocal @foo() +// CIR-NEXT: cir.func @foo() // CIR-NEXT: cir.return // CIR-NEXT: } // CIR-NEXT: } diff --git a/clang/test/CIR/mlirprint.c b/clang/test/CIR/mlirprint.c index 8c947ecb37f9..99a57366e4ba 100644 --- a/clang/test/CIR/mlirprint.c +++ b/clang/test/CIR/mlirprint.c @@ -33,7 +33,7 @@ int foo(void) { // LLVM: IR Dump After cir::direct::ConvertCIRToLLVMPass (cir-flat-to-llvm) // LLVM: llvm.func @foo() -> i32 // LLVM: IR Dump After -// LLVM: define i32 @foo() +// LLVM: define dso_local i32 @foo() // CIRPASS-NOT: IR Dump After MergeCleanups // CIRPASS: IR Dump After DropAST From d42094c04951ede76e0455fa5f55c7c503c60732 Mon Sep 17 00:00:00 2001 From: 7mile Date: Sat, 27 Jul 2024 06:20:09 +0800 Subject: [PATCH 1712/2301] [CIR][CodeGen][LowerToLLVM] End-to-end implementation of `offload_*` cases for OpenCL with SPIR-V target (#724) This PR implements `offload_*` cases discussed in [this thread](https://discourse.llvm.org/t/rfc-clangir-unified-address-space-design-in-clangir/79728). * Integrate target-specific CIR-to-LLVM address space map into `TargetLoweringInfo` * CIRGen: Implement these cases in `getValueFromLangAS` * Lowering: Extend the state of type converter with `LowerModule` When frontend provides a new LangAS like `opencl_generic`, it would be processed by CIRGenTypes and `Builder.getPointerTo()` and encoded as `offload_generic`. When we lower CIR to LLVM, * For pointer types without address space attribute, it's mapped to `ptr addrspace(0)` directly * For target cases `target`, it's mapped to `ptr addrspace(x)` * For other defined cases, query the target info with a new virtual method `getTargetAddrSpaceFromCIRAddrSpace`. General targets like X86 and ARM64 map all known cases to 0. For SPIR-V target here, it maps `offload_generic` to `addrspace(4)`. --- clang/include/clang/CIR/MissingFeatures.h | 5 ++- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 6 +++ .../TargetLowering/TargetLoweringInfo.h | 4 ++ .../TargetLowering/Targets/AArch64.cpp | 15 +++++++ .../TargetLowering/Targets/SPIR.cpp | 19 ++++++++ .../Transforms/TargetLowering/Targets/X86.cpp | 15 +++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 43 ++++++++++++++----- .../CIR/CodeGen/OpenCL/addrspace-alloca.cl | 10 ++--- clang/test/CIR/CodeGen/OpenCL/spirv-target.cl | 2 - clang/test/CIR/Lowering/address-space.cir | 20 ++++++++- 10 files changed, 119 insertions(+), 20 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 48ab598311e8..d67989120562 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -161,7 +161,6 @@ struct MissingFeatures { static bool constantFoldsToSimpleInteger() { return false; } static bool checkFunctionCallABI() { return false; } static bool zeroInitializer() { return false; } - static bool targetLoweringInfoAddressSpaceMap() { return false; } static bool targetCodeGenInfoIsProtoCallVariadic() { return false; } static bool targetCodeGenInfoGetNullPointer() { return false; } static bool operandBundles() { return false; } @@ -277,6 +276,10 @@ struct MissingFeatures { static bool returnValueDominatingStoreOptmiization() { return false; } // Globals (vars and functions) may have attributes that are target depedent. static bool setTargetAttributes() { return false; } + + // CIR modules parsed from text form may not carry the triple or data layout + // specs. We should make it always present. + static bool makeTripleAlwaysPresent() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 7e942f85f959..c77ad8c7a46e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -568,10 +568,16 @@ AddressSpaceAttr::getValueFromLangAS(clang::LangAS langAS) { // Default address space should be encoded as a null attribute. return std::nullopt; case LangAS::opencl_global: + return Kind::offload_global; case LangAS::opencl_local: + return Kind::offload_local; case LangAS::opencl_constant: + return Kind::offload_constant; case LangAS::opencl_private: + return Kind::offload_private; case LangAS::opencl_generic: + return Kind::offload_generic; + case LangAS::opencl_global_device: case LangAS::opencl_global_host: case LangAS::cuda_device: diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h index b264e9ae7b89..4be2db10c1dd 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h @@ -18,6 +18,8 @@ #include "ABIInfo.h" #include +#include "clang/CIR/Dialect/IR/CIRAttrs.h" + namespace mlir { namespace cir { @@ -30,6 +32,8 @@ class TargetLoweringInfo { virtual ~TargetLoweringInfo(); const ABIInfo &getABIInfo() const { return *Info; } + virtual unsigned getTargetAddrSpaceFromCIRAddrSpace( + mlir::cir::AddressSpaceAttr addressSpaceAttr) const = 0; }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index 1490a3babc96..a3406b722c41 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -62,6 +62,21 @@ class AArch64TargetLoweringInfo : public TargetLoweringInfo { : TargetLoweringInfo(std::make_unique(LT, Kind)) { assert(!MissingFeature::swift()); } + + unsigned getTargetAddrSpaceFromCIRAddrSpace( + mlir::cir::AddressSpaceAttr addressSpaceAttr) const override { + using Kind = mlir::cir::AddressSpaceAttr::Kind; + switch (addressSpaceAttr.getValue()) { + case Kind::offload_private: + case Kind::offload_local: + case Kind::offload_global: + case Kind::offload_constant: + case Kind::offload_generic: + return 0; + default: + llvm_unreachable("Unknown CIR address space for this target"); + } + } }; } // namespace diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp index 974b4d3d27aa..f5a7250dffd0 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp @@ -41,6 +41,25 @@ class SPIRVTargetLoweringInfo : public TargetLoweringInfo { public: SPIRVTargetLoweringInfo(LowerTypes <) : TargetLoweringInfo(std::make_unique(LT)) {} + + unsigned getTargetAddrSpaceFromCIRAddrSpace( + mlir::cir::AddressSpaceAttr addressSpaceAttr) const override { + using Kind = mlir::cir::AddressSpaceAttr::Kind; + switch (addressSpaceAttr.getValue()) { + case Kind::offload_private: + return 0; + case Kind::offload_local: + return 3; + case Kind::offload_global: + return 1; + case Kind::offload_constant: + return 2; + case Kind::offload_generic: + return 4; + default: + llvm_unreachable("Unknown CIR address space for this target"); + } + } }; } // namespace diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 7b3aaca75364..a82530e1bd30 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -105,6 +105,21 @@ class X86_64TargetLoweringInfo : public TargetLoweringInfo { : TargetLoweringInfo(std::make_unique(LM, AVXLevel)) { assert(!::cir::MissingFeatures::swift()); } + + unsigned getTargetAddrSpaceFromCIRAddrSpace( + mlir::cir::AddressSpaceAttr addressSpaceAttr) const override { + using Kind = mlir::cir::AddressSpaceAttr::Kind; + switch (addressSpaceAttr.getValue()) { + case Kind::offload_private: + case Kind::offload_local: + case Kind::offload_global: + case Kind::offload_constant: + case Kind::offload_generic: + return 0; + default: + llvm_unreachable("Unknown CIR address space for this target"); + } + } }; void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 4fcab4d34b2e..339ca0d98451 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -69,6 +69,8 @@ #include #include +#include "LowerModule.h" + using namespace cir; using namespace llvm; @@ -3480,24 +3482,43 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, } namespace { + +std::unique_ptr +prepareLowerModule(mlir::ModuleOp module) { + mlir::PatternRewriter rewriter{module->getContext()}; + // If the triple is not present, e.g. CIR modules parsed from text, we + // cannot init LowerModule properly. + assert(!::cir::MissingFeatures::makeTripleAlwaysPresent()); + if (!module->hasAttr("cir.triple")) + return {}; + return mlir::cir::createLowerModule(module, rewriter); +} + +// FIXME: change the type of lowerModule to `LowerModule &` to have better +// lambda capturing experience. Also blocked by makeTripleAlwaysPresent. void prepareTypeConverter(mlir::LLVMTypeConverter &converter, - mlir::DataLayout &dataLayout) { - converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { + mlir::DataLayout &dataLayout, + mlir::cir::LowerModule *lowerModule) { + converter.addConversion([&, lowerModule]( + mlir::cir::PointerType type) -> mlir::Type { // Drop pointee type since LLVM dialect only allows opaque pointers. auto addrSpace = mlir::cast_if_present(type.getAddrSpace()); - // null addrspace attribute indicates the default addrspace + // Null addrspace attribute indicates the default addrspace. if (!addrSpace) return mlir::LLVM::LLVMPointerType::get(type.getContext()); - // TODO(cir): Query the target-specific address space map to lower other ASs - // like `opencl_private`. - assert(!MissingFeatures::targetLoweringInfoAddressSpaceMap()); - assert(addrSpace.isTarget() && "NYI"); + assert(lowerModule && "CIR AS map is not available"); + // Pass through target addrspace and map CIR addrspace to LLVM addrspace by + // querying the target info. + unsigned targetAS = + addrSpace.isTarget() + ? addrSpace.getTargetValue() + : lowerModule->getTargetLoweringInfo() + .getTargetAddrSpaceFromCIRAddrSpace(addrSpace); - return mlir::LLVM::LLVMPointerType::get(type.getContext(), - addrSpace.getTargetValue()); + return mlir::LLVM::LLVMPointerType::get(type.getContext(), targetAS); }); converter.addConversion([&](mlir::cir::DataMemberType type) -> mlir::Type { return mlir::IntegerType::get(type.getContext(), @@ -3722,7 +3743,9 @@ void ConvertCIRToLLVMPass::runOnOperation() { auto module = getOperation(); mlir::DataLayout dataLayout(module); mlir::LLVMTypeConverter converter(&getContext()); - prepareTypeConverter(converter, dataLayout); + std::unique_ptr lowerModule = + prepareLowerModule(module); + prepareTypeConverter(converter, dataLayout, lowerModule.get()); mlir::RewritePatternSet patterns(&getContext()); diff --git a/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl index 7650a08968e4..58edf5c2791e 100644 --- a/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl +++ b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl @@ -3,13 +3,11 @@ // RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM -// Lowering of language-specific AS not supported -// XFAIL: * -// CIR: cir.func @func(%arg0: !cir.ptr)> +// CIR: cir.func @func(%arg0: !cir.ptr // LLVM: @func(ptr addrspace(3) kernel void func(local int *p) { - // CIR-NEXT: %[[#ALLOCA_P:]] = cir.alloca !cir.ptr)>, !cir.ptr)>>, ["p", init] {alignment = 8 : i64} + // CIR-NEXT: %[[#ALLOCA_P:]] = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} // LLVM-NEXT: %[[#ALLOCA_P:]] = alloca ptr addrspace(3), i64 1, align 8 int x; @@ -17,11 +15,11 @@ kernel void func(local int *p) { // LLVM-NEXT: %[[#ALLOCA_X:]] = alloca i32, i64 1, align 4 global char *b; - // CIR-NEXT: %[[#ALLOCA_B:]] = cir.alloca !cir.ptr)>, !cir.ptr)>>, ["b"] {alignment = 8 : i64} + // CIR-NEXT: %[[#ALLOCA_B:]] = cir.alloca !cir.ptr, !cir.ptr>, ["b"] {alignment = 8 : i64} // LLVM-NEXT: %[[#ALLOCA_B:]] = alloca ptr addrspace(1), i64 1, align 8 // Store of the argument `p` - // CIR-NEXT: cir.store %arg0, %[[#ALLOCA_P]] : !cir.ptr)>, !cir.ptr)>> + // CIR-NEXT: cir.store %arg0, %[[#ALLOCA_P]] : !cir.ptr, !cir.ptr> // LLVM-NEXT: store ptr addrspace(3) %{{[0-9]+}}, ptr %[[#ALLOCA_P]], align 8 return; diff --git a/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl index eb6d2028d1ba..523ffaf405e9 100644 --- a/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl +++ b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl @@ -4,8 +4,6 @@ // RUN: %clang_cc1 -cl-std=CL3.0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t_64.ll // RUN: FileCheck --input-file=%t_64.ll %s --check-prefix=LLVM-SPIRV64 -// Lowering of language-specific AS not supported -// XFAIL: * // CIR-SPIRV64: cir.triple = "spirv64-unknown-unknown" // LLVM-SPIRV64: target triple = "spirv64-unknown-unknown" diff --git a/clang/test/CIR/Lowering/address-space.cir b/clang/test/CIR/Lowering/address-space.cir index b7328713e9b9..1b2d01e8b1db 100644 --- a/clang/test/CIR/Lowering/address-space.cir +++ b/clang/test/CIR/Lowering/address-space.cir @@ -3,7 +3,10 @@ !s32i = !cir.int -module { +module attributes { + cir.triple = "spirv64-unknown-unknown", + llvm.data_layout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1" +} { // LLVM: define void @foo(ptr %0) cir.func @foo(%arg0: !cir.ptr) { // LLVM-NEXT: alloca ptr, @@ -24,4 +27,19 @@ module { %0 = cir.alloca !cir.ptr)>, !cir.ptr)>>, ["arg", init] {alignment = 8 : i64} cir.return } + + // LLVM: define void @test_lower_offload_as() + cir.func @test_lower_offload_as() { + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["arg0", init] {alignment = 8 : i64} + // LLVM-NEXT: alloca ptr, + %1 = cir.alloca !cir.ptr, !cir.ptr>, ["arg1", init] {alignment = 8 : i64} + // LLVM-NEXT: alloca ptr addrspace(1), + %2 = cir.alloca !cir.ptr, !cir.ptr>, ["arg2", init] {alignment = 8 : i64} + // LLVM-NEXT: alloca ptr addrspace(2), + %3 = cir.alloca !cir.ptr, !cir.ptr>, ["arg3", init] {alignment = 8 : i64} + // LLVM-NEXT: alloca ptr addrspace(3), + %4 = cir.alloca !cir.ptr, !cir.ptr>, ["arg4", init] {alignment = 8 : i64} + // LLVM-NEXT: alloca ptr addrspace(4), + cir.return + } } From 073abad7f9c72ef381f8af8593fb3c0d0d34ff7f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 26 Jul 2024 15:22:13 -0700 Subject: [PATCH 1713/2301] [CIR][FlattenCFG][NFC] Factor out try body emission --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 36 +++++++++++-------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 27a23ebe4924..13dd78a97ed8 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -276,20 +276,9 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { assert(!nextDispatcher && "no dispatcher available anymore"); } - mlir::LogicalResult - matchAndRewrite(mlir::cir::TryOp tryOp, - mlir::PatternRewriter &rewriter) const override { - mlir::OpBuilder::InsertionGuard guard(rewriter); + mlir::Block *buildTryBody(mlir::cir::TryOp tryOp, + mlir::PatternRewriter &rewriter) const { auto loc = tryOp.getLoc(); - - // Empty scope: just remove it. - if (tryOp.getTryRegion().empty()) { - rewriter.eraseOp(tryOp); - return mlir::success(); - } - - // TODO: keep track of cir.try_call before we flatten. - // Split the current block before the TryOp to create the inlining // point. auto *beforeTryScopeBlock = rewriter.getInsertionBlock(); @@ -298,13 +287,32 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Inline body region. auto *beforeBody = &tryOp.getTryRegion().front(); - auto *afterBody = &tryOp.getTryRegion().back(); rewriter.inlineRegionBefore(tryOp.getTryRegion(), afterTry); // Branch into the body of the region. rewriter.setInsertionPointToEnd(beforeTryScopeBlock); rewriter.create(loc, mlir::ValueRange(), beforeBody); + return afterTry; + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::TryOp tryOp, + mlir::PatternRewriter &rewriter) const override { + mlir::OpBuilder::InsertionGuard guard(rewriter); + auto *afterBody = &tryOp.getTryRegion().back(); + + // Empty scope: just remove it. + if (tryOp.getTryRegion().empty()) { + rewriter.eraseOp(tryOp); + return mlir::success(); + } + + // TODO: keep track of cir.try_call before we flatten. + + // Build try body. + mlir::Block *afterTry = buildTryBody(tryOp, rewriter); + // Build catchers. buildCatchers(tryOp, rewriter, afterBody, afterTry); rewriter.eraseOp(tryOp); From 28d0f9425bb09e336e1e048b3f696e992c6d0349 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 26 Jul 2024 17:33:21 -0700 Subject: [PATCH 1714/2301] [CIR][FlattenCFG] Exceptions: lower call to try_call and wire up landing pads --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 29 ++++++++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 12 +++-- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 48 +++++++++++++++---- clang/test/CIR/Lowering/try-catch.cpp | 47 +++++++++--------- 4 files changed, 94 insertions(+), 42 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 460b403ec19d..331e88b62f9f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3100,10 +3100,17 @@ def TryCallOp : CIR_CallOp<"try_call", $_state.addTypes(resType); // Handle branches - $_state.addSuccessors(cont); - $_state.addSuccessors(landing_pad); $_state.addOperands(contOperands); $_state.addOperands(landingPadOperands); + // The TryCall ODS layout is: cont, landing_pad, operands. + llvm::copy(::llvm::ArrayRef({ + static_cast(contOperands.size()), + static_cast(landingPadOperands.size()), + static_cast(operands.size()) + }), + odsState.getOrAddProperties().operandSegmentSizes.begin()); + $_state.addSuccessors(cont); + $_state.addSuccessors(landing_pad); }]>, OpBuilder<(ins "Value":$ind_target, "FuncType":$fn_type, @@ -3111,15 +3118,25 @@ def TryCallOp : CIR_CallOp<"try_call", CArg<"ValueRange", "{}">:$operands, CArg<"ValueRange", "{}">:$contOperands, CArg<"ValueRange", "{}">:$landingPadOperands), [{ - $_state.addOperands(ValueRange{ind_target}); - $_state.addOperands(operands); + ::llvm::SmallVector finalCallOperands({ind_target}); + finalCallOperands.append(operands.begin(), operands.end()); + $_state.addOperands(finalCallOperands); + if (!fn_type.isVoid()) $_state.addTypes(fn_type.getReturnType()); + // Handle branches - $_state.addSuccessors(cont); - $_state.addSuccessors(landing_pad); $_state.addOperands(contOperands); $_state.addOperands(landingPadOperands); + // The TryCall ODS layout is: cont, landing_pad, operands. + llvm::copy(::llvm::ArrayRef({ + static_cast(contOperands.size()), + static_cast(landingPadOperands.size()), + static_cast(finalCallOperands.size()) + }), + odsState.getOrAddProperties().operandSegmentSizes.begin()); + $_state.addSuccessors(cont); + $_state.addSuccessors(landing_pad); }]> ]; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index e5ae4e32b761..9c015ce24c8c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2455,7 +2455,7 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, assert(landingPad && "expected two successors"); auto tryCall = dyn_cast(op); assert(tryCall && "regular calls do not branch"); - state << tryCall.getCont(); + state << ' ' << tryCall.getCont(); if (!tryCall.getContOperands().empty()) { state << "("; state << tryCall.getContOperands(); @@ -2482,6 +2482,7 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, elidedAttrs.push_back("ast"); elidedAttrs.push_back("extra_attrs"); elidedAttrs.push_back("exception"); + elidedAttrs.push_back("operandSegmentSizes"); state.printOptionalAttrDict(op->getAttrs(), elidedAttrs); state << ' ' << ":"; @@ -2563,8 +2564,13 @@ void TryCallOp::print(::mlir::OpAsmPrinter &state) { mlir::SuccessorOperands TryCallOp::getSuccessorOperands(unsigned index) { assert(index < getNumSuccessors() && "invalid successor index"); - return SuccessorOperands(index == 0 ? getContOperandsMutable() - : getLandingPadOperandsMutable()); + if (index == 0) + return SuccessorOperands(getContOperandsMutable()); + if (index == 1) + return SuccessorOperands(getLandingPadOperandsMutable()); + + // index == 2 + return SuccessorOperands(getArgOperandsMutable()); } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 13dd78a97ed8..82f1b54571af 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -201,8 +201,10 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { rewriter.mergeBlocks(&r.back(), unwindBlock); } - void buildCatchers(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, - mlir::Block *afterBody, mlir::Block *afterTry) const { + mlir::Block *buildCatchers(mlir::cir::TryOp tryOp, + mlir::PatternRewriter &rewriter, + mlir::Block *afterBody, + mlir::Block *afterTry) const { auto loc = tryOp.getLoc(); // Replace the tryOp return with a branch that jumps out of the body. rewriter.setInsertionPointToEnd(afterBody); @@ -212,11 +214,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto *catchBegin = rewriter.splitBlock(beforeCatch, rewriter.getInsertionPoint()); rewriter.setInsertionPointToEnd(beforeCatch); - - // FIXME: this branch should be to afterTry instead of catchBegin, before we - // change this, we need to break calls into their branch version - // (invoke-like) first, otherwise these will be unrecheable and eliminated. - rewriter.replaceOpWithNewOp(tryBodyYield, catchBegin); + rewriter.replaceOpWithNewOp(tryBodyYield, afterTry); // Start the landing pad by getting the inflight exception information. rewriter.setInsertionPointToEnd(catchBegin); @@ -274,6 +272,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { } assert(!nextDispatcher && "no dispatcher available anymore"); + return catchBegin; } mlir::Block *buildTryBody(mlir::cir::TryOp tryOp, @@ -307,15 +306,46 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { return mlir::success(); } - // TODO: keep track of cir.try_call before we flatten. + // Grab the collection of `cir.call exception`s to rewrite to + // `cir.try_call`. + SmallVector callsToRewrite; + tryOp.getTryRegion().walk([&](CallOp op) { + // Only grab calls within immediate closest TryOp scope. + if (op->getParentOfType() != tryOp) + return; + if (!op.getException()) + return; + callsToRewrite.push_back(op); + }); // Build try body. mlir::Block *afterTry = buildTryBody(tryOp, rewriter); // Build catchers. - buildCatchers(tryOp, rewriter, afterBody, afterTry); + mlir::Block *landingPad = + buildCatchers(tryOp, rewriter, afterBody, afterTry); rewriter.eraseOp(tryOp); + // Rewrite calls. + for (CallOp callOp : callsToRewrite) { + mlir::Block *callBlock = callOp->getBlock(); + mlir::Block *cont = + rewriter.splitBlock(callBlock, mlir::Block::iterator(callOp)); + mlir::cir::ExtraFuncAttributesAttr extraAttrs = callOp.getExtraAttrs(); + std::optional ast = callOp.getAst(); + + mlir::FlatSymbolRefAttr symbol; + if (!callOp.isIndirect()) + symbol = callOp.getCalleeAttr(); + rewriter.setInsertionPointToEnd(callBlock); + auto tryCall = rewriter.replaceOpWithNewOp( + callOp, symbol, callOp.getResult().getType(), cont, landingPad, + callOp.getOperands()); + tryCall.setExtraAttrsAttr(extraAttrs); + if (ast) + tryCall.setAstAttr(*ast); + } + // Quick block cleanup: no indirection to the post try block. auto brOp = dyn_cast(afterTry->getTerminator()); mlir::Block *srcBlock = brOp.getDest(); diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 7cc4643687f4..d1b63b09908b 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -9,53 +9,52 @@ unsigned long long tc() { int x = 50, y = 3; unsigned long long z; + // CIR_FLAT: cir.alloca !cir.ptr, !cir.ptr>, ["msg"] + // CIR_FLAT: cir.alloca !s32i, !cir.ptr, ["idx"] + // CIR_FLAT: cir.br ^bb2 try { - int a = 4; - // CIR_FLAT_DISABLED: cir.alloca !cir.ptr, !cir.ptr>, ["msg"] - // CIR_FLAT_DISABLED: cir.alloca !s32i, !cir.ptr, ["idx"] - // CIR_FLAT: cir.br ^bb1 - // CIR_FLAT: ^bb1: // pred: ^bb0 - // CIR_FLAT: cir.br ^bb2 // CIR_FLAT: ^bb2: // pred: ^bb1 - // CIR_FLAT: cir.call exception @_Z8divisionii( + // CIR_FLAT: cir.alloca !s32i, !cir.ptr + // CIR_FLAT: cir.try_call @_Z8divisionii({{.*}}) ^[[CONT:.*]], ^[[LPAD:.*]] : (!s32i, !s32i) + int a = 4; z = division(x, y); - a++; - // FIXME: this is temporary, should branch directly to ^bb4 - // but if done now it would be stripped by MLIR simplification. - // CIR_FLAT: cir.br ^bb3 + // CIR_FLAT: ^[[CONT:.*]]: // pred: ^bb2 + // CIR_FLAT: cir.cast(float_to_int, %12 : !cir.double), !u64i + a++; + // CIR_FLAT: cir.br ^[[AFTER_TRY:.*]] loc - // CIR_FLAT: ^bb3: // pred: ^bb2 + // CIR_FLAT: ^[[LPAD]]: // pred: ^bb2 // CIR_FLAT: %[[EH:.*]] = cir.eh.inflight_exception - // CIR_FLAT: cir.br ^bb4 - // CIR_FLAT: ^bb4: // pred: ^bb3 + // CIR_FLAT: cir.br ^[[BB_INT_IDX_SEL:.*]] loc + // CIR_FLAT: ^[[BB_INT_IDX_SEL]]: // pred: ^[[LPAD]] // CIR_FLAT: %[[SEL:.*]] = cir.eh.selector %[[EH]] } catch (int idx) { // CIR_FLAT: %[[INT_IDX_ID:.*]] = cir.eh.typeid @_ZTIi // CIR_FLAT: %[[MATCH_CASE_INT_IDX:.*]] = cir.cmp(eq, %[[SEL]], %[[INT_IDX_ID]]) : !u32i, !cir.bool - // CIR_FLAT: cir.brcond %[[MATCH_CASE_INT_IDX]] ^bb5, ^bb6 - // CIR_FLAT: ^bb5: // pred: ^bb4 + // CIR_FLAT: cir.brcond %[[MATCH_CASE_INT_IDX]] ^[[BB_INT_IDX_CATCH:.*]], ^[[BB_CHAR_MSG_SEL:.*]] loc + // CIR_FLAT: ^[[BB_INT_IDX_CATCH]]: // pred: ^[[BB_INT_IDX_SEL]] // CIR_FLAT: %[[PARAM_INT_IDX:.*]] = cir.catch_param -> !cir.ptr // CIR_FLAT: cir.const #cir.int<98> - // CIR_FLAT: cir.br ^bb9 + // CIR_FLAT: cir.br ^[[AFTER_TRY]] z = 98; idx++; } catch (const char* msg) { - // CIR_FLAT: ^bb6: // pred: ^bb4 + // CIR_FLAT: ^[[BB_CHAR_MSG_SEL]]: // pred: ^[[BB_INT_IDX_SEL]] // CIR_FLAT: %[[CHAR_MSG_ID:.*]] = cir.eh.typeid @_ZTIPKc - // CIR_FLAT: %[[MATCH_CASE_CHAR_MSG:.*]] = cir.cmp(eq, %[[SEL]], %[[CHAR_MSG_ID]]) : !u32i, !cir.bool - // CIR_FLAT: cir.brcond %[[MATCH_CASE_CHAR_MSG]] ^bb7, ^bb8 - // CIR_FLAT: ^bb7: // pred: ^bb6 + // CIR_FLAT: %[[MATCH_CASE_CHAR_MSG:.*]] = cir.cmp(eq, %[[SEL]], %[[CHAR_MSG_ID]]) + // CIR_FLAT: cir.brcond %[[MATCH_CASE_CHAR_MSG]] ^[[BB_CHAR_MSG_CATCH:.*]], ^[[BB_RESUME:.*]] loc + // CIR_FLAT: ^[[BB_CHAR_MSG_CATCH]]: // pred: ^[[BB_CHAR_MSG_SEL]] // CIR_FLAT: %[[PARAM_CHAR_MSG:.*]] = cir.catch_param -> !cir.ptr // CIR_FLAT: cir.const #cir.int<99> : !s32i - // CIR_FLAT: cir.br ^bb9 + // CIR_FLAT: cir.br ^[[AFTER_TRY]] loc z = 99; (void)msg[0]; } - // CIR_FLAT: ^bb8: // pred: ^bb6 + // CIR_FLAT: ^[[BB_RESUME]]: // CIR_FLAT: cir.resume - // CIR_FLAT: ^bb9: // 2 preds: ^bb5, ^bb7 + // CIR_FLAT: ^[[AFTER_TRY]]: // CIR_FLAT: cir.load return z; From 768fec1658e2acdbafaf7ce946a0d888254d7ad2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 26 Jul 2024 18:08:14 -0700 Subject: [PATCH 1715/2301] [CIR][FlattenCFG] Exceptions: add support for catch all --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 18 ++++++++++++- clang/test/CIR/Lowering/try-catch.cpp | 27 +++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 82f1b54571af..635f154e7a5f 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -201,6 +201,20 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { rewriter.mergeBlocks(&r.back(), unwindBlock); } + void buildAllCase(mlir::PatternRewriter &rewriter, mlir::Region &r, + mlir::Block *afterTry, mlir::Block *catchAllBlock) const { + YieldOp yieldOp; + r.walk([&](YieldOp op) { + assert(!yieldOp && "expect to only find one"); + yieldOp = op; + }); + mlir::Block *catchAllStartBB = &r.front(); + rewriter.inlineRegionBefore(r, afterTry); + rewriter.mergeBlocks(catchAllStartBB, catchAllBlock); + rewriter.setInsertionPointToEnd(yieldOp->getBlock()); + rewriter.replaceOpWithNewOp(yieldOp, afterTry); + } + mlir::Block *buildCatchers(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, mlir::Block *afterBody, @@ -261,7 +275,9 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { nextDispatcher); rewriter.setInsertionPointToEnd(nextDispatcher); } else if (auto catchAll = dyn_cast(caseAttr)) { - // TBD + assert(nextDispatcher->empty() && "expect empty dispatcher"); + buildAllCase(rewriter, caseRegions[caseCnt], afterTry, nextDispatcher); + nextDispatcher = nullptr; // No more business in try/catch } else if (auto catchUnwind = dyn_cast(caseAttr)) { assert(nextDispatcher->empty() && "expect empty dispatcher"); diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index d1b63b09908b..d70a4070635b 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -60,3 +60,30 @@ unsigned long long tc() { return z; } +// CIR_FLAT: cir.func @_Z3tc2v +unsigned long long tc2() { + int x = 50, y = 3; + unsigned long long z; + + try { + int a = 4; + z = division(x, y); + a++; + } catch (int idx) { + z = 98; + idx++; + } catch (const char* msg) { + z = 99; + (void)msg[0]; + } catch (...) { + // CIR_FLAT: cir.catch_param + // CIR_FLAT: cir.const #cir.int<100> : !s32i + // CIR_FLAT: cir.br ^[[AFTER_TRY:.*]] loc + // CIR_FLAT: ^[[AFTER_TRY]]: // 4 preds + // CIR_FLAT: cir.load + // CIR_FLAT: cir.return + z = 100; + } + + return z; +} From 9f81be050d0ea53daf40d66e8984e15c43e8c21b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 26 Jul 2024 18:17:48 -0700 Subject: [PATCH 1716/2301] [CIR][FlattenCFG] Exceptions: move cir.eh.selector up to dominate more uses --- clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 2 +- clang/test/CIR/Lowering/try-catch.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 635f154e7a5f..89d1d49ba2b6 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -234,6 +234,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { rewriter.setInsertionPointToEnd(catchBegin); auto exception = rewriter.create( loc, mlir::cir::ExceptionInfoType::get(rewriter.getContext())); + auto selector = rewriter.create(loc, exception); // TODO: direct catch all needs no dispatch? @@ -247,7 +248,6 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Fill in dispatcher. rewriter.setInsertionPointToEnd(dispatchBlock); - auto selector = rewriter.create(loc, exception); // FIXME: we should have an extra block for the dispatcher, just in case // there isn't one later. diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index d70a4070635b..12a66fbd97af 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -26,9 +26,9 @@ unsigned long long tc() { // CIR_FLAT: ^[[LPAD]]: // pred: ^bb2 // CIR_FLAT: %[[EH:.*]] = cir.eh.inflight_exception + // CIR_FLAT: %[[SEL:.*]] = cir.eh.selector %[[EH]] // CIR_FLAT: cir.br ^[[BB_INT_IDX_SEL:.*]] loc // CIR_FLAT: ^[[BB_INT_IDX_SEL]]: // pred: ^[[LPAD]] - // CIR_FLAT: %[[SEL:.*]] = cir.eh.selector %[[EH]] } catch (int idx) { // CIR_FLAT: %[[INT_IDX_ID:.*]] = cir.eh.typeid @_ZTIi // CIR_FLAT: %[[MATCH_CASE_INT_IDX:.*]] = cir.cmp(eq, %[[SEL]], %[[INT_IDX_ID]]) : !u32i, !cir.bool From 92c761c820a65f431e4ed3a5fb307186c994c94d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 26 Jul 2024 18:23:07 -0700 Subject: [PATCH 1717/2301] [CIR][FlattenCFG] Exceptions: support catch all only try's --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 17 ++++--------- clang/test/CIR/Lowering/try-catch.cpp | 24 ++++++++++++++++++- 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 89d1d49ba2b6..b7c440d806f7 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -236,28 +236,20 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { loc, mlir::cir::ExceptionInfoType::get(rewriter.getContext())); auto selector = rewriter.create(loc, exception); - // TODO: direct catch all needs no dispatch? - // Handle dispatch. In could in theory use a switch, but let's just // mimic LLVM more closely since we have no specific thing to achieve // doing that (might not play as well with existing optimizers either). - auto *dispatchBlock = + auto *nextDispatcher = rewriter.splitBlock(catchBegin, rewriter.getInsertionPoint()); rewriter.setInsertionPointToEnd(catchBegin); - rewriter.create(loc, dispatchBlock); + rewriter.create(loc, nextDispatcher); // Fill in dispatcher. - rewriter.setInsertionPointToEnd(dispatchBlock); - - // FIXME: we should have an extra block for the dispatcher, just in case - // there isn't one later. - + rewriter.setInsertionPointToEnd(nextDispatcher); llvm::MutableArrayRef caseRegions = tryOp.getCatchRegions(); mlir::ArrayAttr caseAttrList = tryOp.getCatchTypesAttr(); unsigned caseCnt = 0; - mlir::Block *nextDispatcher = rewriter.getInsertionBlock(); - for (mlir::Attribute caseAttr : caseAttrList) { if (auto typeIdGlobal = dyn_cast(caseAttr)) { auto typeId = rewriter.create( @@ -275,7 +267,8 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { nextDispatcher); rewriter.setInsertionPointToEnd(nextDispatcher); } else if (auto catchAll = dyn_cast(caseAttr)) { - assert(nextDispatcher->empty() && "expect empty dispatcher"); + // In case the catch(...) is all we got, `nextDispatcher` shall be + // non-empty. buildAllCase(rewriter, caseRegions[caseCnt], afterTry, nextDispatcher); nextDispatcher = nullptr; // No more business in try/catch } else if (auto catchUnwind = diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 12a66fbd97af..9bb380df1efb 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -36,7 +36,7 @@ unsigned long long tc() { // CIR_FLAT: ^[[BB_INT_IDX_CATCH]]: // pred: ^[[BB_INT_IDX_SEL]] // CIR_FLAT: %[[PARAM_INT_IDX:.*]] = cir.catch_param -> !cir.ptr // CIR_FLAT: cir.const #cir.int<98> - // CIR_FLAT: cir.br ^[[AFTER_TRY]] + // CIR_FLAT: cir.br ^[[AFTER_TRY]] loc z = 98; idx++; } catch (const char* msg) { @@ -87,3 +87,25 @@ unsigned long long tc2() { return z; } + +// CIR_FLAT: cir.func @_Z3tc3v +unsigned long long tc3() { + int x = 50, y = 3; + unsigned long long z; + + try { + z = division(x, y); + } catch (...) { + // CIR_FLAT: cir.eh.selector + // CIR_FLAT: cir.br ^[[CATCH_ALL:.*]] loc + // CIR_FLAT: ^[[CATCH_ALL]]: + // CIR_FLAT: cir.catch_param -> !cir.ptr + // CIR_FLAT: cir.const #cir.int<100> : !s32i + // CIR_FLAT: cir.br ^[[AFTER_TRY:.*]] loc + // CIR_FLAT: ^[[AFTER_TRY]]: // 2 preds + // CIR_FLAT: cir.load + z = 100; + } + + return z; +} \ No newline at end of file From 1a36faef233c7db44ab7b79f31c8db8a6fd92f68 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Mon, 29 Jul 2024 15:18:18 -0300 Subject: [PATCH 1718/2301] [CIR][ABI] Add X86_64 and AArch64 bool CC lowering (#755) Implements calling convention lowering of bool arguments and return value calling conventions for X86_64 and AArch64. For x86_64, this is a bit of an odd case. In the orignal codegen bools are represented as i8 everywhere, except for function arguments/return values. In CIR, we don't allow i1 types, so bools are still represented as `cir.bool` when in functions. However, when lowering to LLVM Dialect, we need to ensure bools will be converted to i1 when in function's argument/return values. --- clang/include/clang/CIR/ABIArgInfo.h | 3 ++- .../Transforms/TargetLowering/CIRLowerContext.cpp | 12 +++++++++++- .../Transforms/TargetLowering/LowerFunction.cpp | 15 ++++----------- .../Transforms/TargetLowering/LowerTypes.cpp | 2 +- .../Transforms/TargetLowering/Targets/X86.cpp | 5 ++++- .../aarch64/aarch64-call-conv-lowering-pass.cpp | 6 ++++++ .../x86_64/x86_64-call-conv-lowering-pass.cpp | 12 ++++++++++++ 7 files changed, 40 insertions(+), 15 deletions(-) diff --git a/clang/include/clang/CIR/ABIArgInfo.h b/clang/include/clang/CIR/ABIArgInfo.h index 78127230a7ce..d330b2c3e24d 100644 --- a/clang/include/clang/CIR/ABIArgInfo.h +++ b/clang/include/clang/CIR/ABIArgInfo.h @@ -185,7 +185,8 @@ class ABIArgInfo { } static ABIArgInfo getExtend(mlir::Type Ty, mlir::Type T = nullptr) { // NOTE(cir): The original can apply this method on both integers and - // enumerations, but in CIR, these two types are one and the same. + // enumerations, but in CIR, these two types are one and the same. Booleans + // will also fall into this category, but they have their own type. if (mlir::isa(Ty) && mlir::cast(Ty).isSigned()) return getSignExtend(mlir::cast(Ty), T); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index dce5fabc314b..f7020f37f513 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -50,7 +50,7 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { // TODO(cir): We should implement a better way to identify type kinds and use // builting data layout interface for this. auto typeKind = clang::Type::Builtin; - if (isa(T)) { + if (isa(T)) { typeKind = clang::Type::Builtin; } else { llvm_unreachable("Unhandled type class"); @@ -74,6 +74,11 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { Align = std::ceil((float)Width / 8) * 8; break; } + if (auto boolTy = dyn_cast(T)) { + Width = Target->getFloatWidth(); + Align = Target->getFloatAlign(); + break; + } if (auto floatTy = dyn_cast(T)) { Width = Target->getFloatWidth(); Align = Target->getFloatAlign(); @@ -153,6 +158,11 @@ bool CIRLowerContext::isPromotableIntegerType(Type T) const { return cast(T).getWidth() < 32; } + // Bool are also handled here for codegen parity. + if (auto boolTy = dyn_cast(T)) { + return true; + } + // Enumerated types are promotable to their compatible integer types // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). // TODO(cir): CIR doesn't know if a integer originated from an enum. Improve diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index b1d5e24092cb..08a56d03c604 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -22,6 +22,7 @@ #include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/MissingFeatures.h" #include "clang/CIR/TypeEvaluationKind.h" #include "llvm/Support/ErrorHandling.h" @@ -410,12 +411,9 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, switch (ArgInfo.getKind()) { case ABIArgInfo::Extend: case ABIArgInfo::Direct: { - // NOTE(cir): While booleans are lowered directly as `i1`s in the - // original codegen, in CIR they require a trivial bitcast. This is - // handled here. + if (isa(info_it->type)) { - IRCallArgs[FirstIRArg] = - createBitcast(*I, ArgInfo.getCoerceToType(), *this); + IRCallArgs[FirstIRArg] = *I; break; } @@ -514,11 +512,6 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, case ABIArgInfo::Extend: case ABIArgInfo::Direct: { - // NOTE(cir): While booleans are lowered directly as `i1`s in the - // original codegen, in CIR they require a trivial bitcast. This is - // handled here. - assert(!isa(RetTy)); - Type RetIRTy = RetTy; if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { switch (getEvaluationKind(RetTy)) { @@ -564,7 +557,7 @@ ::cir::TypeEvaluationKind LowerFunction::getEvaluationKind(Type type) { // FIXME(cir): Implement type classes for CIR types. if (isa(type)) return ::cir::TypeEvaluationKind::TEK_Aggregate; - if (isa(type)) + if (isa(type)) return ::cir::TypeEvaluationKind::TEK_Scalar; llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index ecee0b23ce75..486e771d8b66 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -112,7 +112,7 @@ mlir::Type LowerTypes::convertType(Type T) { /// keeping it here for parity's sake. // Certain CIR types are already ABI-specific, so we just return them. - if (isa(T)) { + if (isa(T)) { return T; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index a82530e1bd30..477ccd312cc4 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -7,6 +7,7 @@ #include "TargetInfo.h" #include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" #include @@ -157,6 +158,8 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, Current = Class::SSE; return; + } else if (isa(Ty)) { + Current = Class::Integer; } else { llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; llvm_unreachable("NYI"); @@ -294,7 +297,7 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { // enums directly as their unerlying integer types. NOTE(cir): For some // reason, Clang does not set the coerce type here and delays it to // arrangeLLVMFunctionInfo. We do the same to keep parity. - if (isa(RetTy) && isPromotableIntegerTypeForABI(RetTy)) + if (isa(RetTy) && isPromotableIntegerTypeForABI(RetTy)) return ABIArgInfo::getExtend(RetTy); } break; diff --git a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp index 1d1671bbad1a..33cb05c25a01 100644 --- a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp @@ -9,6 +9,12 @@ void Void(void) { // Test call conv lowering for trivial usinged integer cases. +// CHECK: @_Z4Boolb(%arg0: !cir.bool loc({{.+}})) -> !cir.bool +bool Bool(bool a) { +// CHECK: cir.call @_Z4Boolb({{.+}}) : (!cir.bool) -> !cir.bool + return Bool(a); +} + // CHECK: cir.func @_Z5UCharh(%arg0: !u8i loc({{.+}})) -> !u8i unsigned char UChar(unsigned char c) { // CHECK: cir.call @_Z5UCharh(%2) : (!u8i) -> !u8i diff --git a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp index 7a7a244397a1..6eb1189402fc 100644 --- a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp @@ -1,6 +1,8 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// Test call conv lowering for trivial cases. // + // CHECK: @_Z4Voidv() void Void(void) { // CHECK: cir.call @_Z4Voidv() : () -> () @@ -9,6 +11,16 @@ void Void(void) { // Test call conv lowering for trivial zeroext cases. +// Bools are a bit of an odd case in CIR's x86_64 representation: they are considered i8 +// everywhere except in the function return/arguments, where they are considered i1. To +// match LLVM's behavior, we need to zero-extend them when passing them as arguments. + +// CHECK: @_Z4Boolb(%arg0: !cir.bool {cir.zeroext} loc({{.+}})) -> (!cir.bool {cir.zeroext}) +bool Bool(bool a) { +// CHECK: cir.call @_Z4Boolb({{.+}}) : (!cir.bool) -> !cir.bool + return Bool(a); +} + // CHECK: cir.func @_Z5UCharh(%arg0: !u8i {cir.zeroext} loc({{.+}})) -> (!u8i {cir.zeroext}) unsigned char UChar(unsigned char c) { // CHECK: cir.call @_Z5UCharh(%2) : (!u8i) -> !u8i From 377e70e5f4cca91bc10af815d9cd083a7b42e082 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 29 Jul 2024 11:48:38 -0700 Subject: [PATCH 1719/2301] [CIR][NFC] Exceptions: use a pointer for an opaque exception type instead Effectively this doesn't have any visible side-effect to behavior. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 16 ++++++------- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 23 ++----------------- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 6 +++-- 3 files changed, 13 insertions(+), 32 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 331e88b62f9f..b819a396aa28 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3329,12 +3329,12 @@ def CatchParamOp : CIR_Op<"catch_param"> { def EhInflightOp : CIR_Op<"eh.inflight_exception"> { let summary = "Materialize the catch clause formal parameter"; let description = [{ - `cir.eh.inflight_exception` returns an exception coming from a - `cir.call exception` that might throw. The returned value is opaque - but can be further decomposed by other operations. + `cir.eh.inflight_exception` returns a pointer to exception information + coming from a `cir.call exception` that might throw. The returned value is + a pointer to an opaque type that is further decomposed by other operations. }]; - let results = (outs CIR_ExceptionType:$exception); + let results = (outs ExceptionPtr:$exception); let assemblyFormat = [{ attr-dict }]; @@ -3345,13 +3345,11 @@ def EhInflightOp : CIR_Op<"eh.inflight_exception"> { def EhSelectorOp : CIR_Op<"eh.selector"> { let summary = "Materialize the eh selector"; let description = [{ - `cir.eh.inflight_exception` returns an exception coming from a - `cir.call exception ...` that might throw. `cir.eh.selector` returns the - runtime selector value (type id) for the needed , which represents the type id used by - operations to compare against other type ids. + Given an exception information pointer, `cir.eh.selector` returns the + runtime selector value (type id) for the inflight exception. }]; - let arguments = (ins CIR_ExceptionType:$exception); + let arguments = (ins ExceptionPtr:$exception); let results = (outs UInt32:$type_id); let assemblyFormat = [{ $exception attr-dict diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 97c8c808f857..7f6a3084c80f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -484,8 +484,8 @@ def StructPtr : Type< ]>, "!cir.struct*"> { } -// Pointers to exception info -def ExceptionInfoPtr : Type< +// Pointer to exception info +def ExceptionPtr : Type< And<[ CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, CPred<"::mlir::isa<::mlir::cir::ExceptionInfoType>(" @@ -496,25 +496,6 @@ def ExceptionInfoPtr : Type< "mlir::cir::ExceptionInfoType::get($_builder.getContext()))"> { } -// Pooint to pointers to exception info -def ExceptionInfoPtrPtr : Type< - And<[ - CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, - And<[ - CPred<"::mlir::isa<::mlir::cir::PointerType>(" - "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee()">, - CPred<"::mlir::isa<::mlir::cir::ExceptionInfoType>(" - "::mlir::cast<::mlir::cir::PointerType>(" - "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())" - ".getPointee()))"> - ]> - ]>, "!cir.eh_info**">, - BuildableType< - "mlir::cir::PointerType::get($_builder.getContext()," - "mlir::cir::PointerType::get($_builder.getContext()," - "mlir::cir::ExceptionInfoType::get($_builder.getContext())))"> { -} - // Vector of integral type def IntegerVector : Type< And<[ diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index b7c440d806f7..3bff6dbaabc8 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -232,8 +232,10 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Start the landing pad by getting the inflight exception information. rewriter.setInsertionPointToEnd(catchBegin); - auto exception = rewriter.create( - loc, mlir::cir::ExceptionInfoType::get(rewriter.getContext())); + auto exceptionPtrType = mlir::cir::PointerType::get( + mlir::cir::ExceptionInfoType::get(rewriter.getContext())); + auto exception = + rewriter.create(loc, exceptionPtrType); auto selector = rewriter.create(loc, exception); // Handle dispatch. In could in theory use a switch, but let's just From ef5caf92793f26bc8ea978f438bbe8c5fb2a7c3e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 29 Jul 2024 13:07:39 -0700 Subject: [PATCH 1720/2301] [CIR][FlattenCFG] Exceptions: use block arguments to pass down selector --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 21 ++++++++++++---- clang/test/CIR/Lowering/try-catch.cpp | 24 +++++++++---------- 2 files changed, 28 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 3bff6dbaabc8..9194d9210cc9 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -244,7 +244,9 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto *nextDispatcher = rewriter.splitBlock(catchBegin, rewriter.getInsertionPoint()); rewriter.setInsertionPointToEnd(catchBegin); - rewriter.create(loc, nextDispatcher); + nextDispatcher->addArgument(selector.getType(), loc); + rewriter.create(loc, nextDispatcher, + mlir::ValueRange{selector}); // Fill in dispatcher. rewriter.setInsertionPointToEnd(nextDispatcher); @@ -254,28 +256,37 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { for (mlir::Attribute caseAttr : caseAttrList) { if (auto typeIdGlobal = dyn_cast(caseAttr)) { + auto *previousDispatcher = nextDispatcher; auto typeId = rewriter.create( loc, typeIdGlobal.getSymbol()); auto match = rewriter.create( loc, mlir::cir::BoolType::get(rewriter.getContext()), - mlir::cir::CmpOpKind::eq, selector, typeId); + mlir::cir::CmpOpKind::eq, previousDispatcher->getArgument(0), + typeId); - auto *previousDispatcher = nextDispatcher; mlir::Block *typeCatchBlock = buildTypeCase(rewriter, caseRegions[caseCnt], afterTry); nextDispatcher = rewriter.createBlock(afterTry); rewriter.setInsertionPointToEnd(previousDispatcher); - rewriter.create(loc, match, typeCatchBlock, - nextDispatcher); + + nextDispatcher->addArgument(selector.getType(), loc); + typeCatchBlock->addArgument(selector.getType(), loc); + + rewriter.create( + loc, match, typeCatchBlock, nextDispatcher, + mlir::ValueRange{previousDispatcher->getArgument(0)}, + mlir::ValueRange{previousDispatcher->getArgument(0)}); rewriter.setInsertionPointToEnd(nextDispatcher); } else if (auto catchAll = dyn_cast(caseAttr)) { // In case the catch(...) is all we got, `nextDispatcher` shall be // non-empty. + assert(!nextDispatcher->args_empty() && "expected block argument"); buildAllCase(rewriter, caseRegions[caseCnt], afterTry, nextDispatcher); nextDispatcher = nullptr; // No more business in try/catch } else if (auto catchUnwind = dyn_cast(caseAttr)) { assert(nextDispatcher->empty() && "expect empty dispatcher"); + assert(!nextDispatcher->args_empty() && "expected block argument"); buildUnwindCase(rewriter, caseRegions[caseCnt], nextDispatcher); nextDispatcher = nullptr; // No more business in try/catch } diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 9bb380df1efb..e9ca406893ef 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -27,31 +27,31 @@ unsigned long long tc() { // CIR_FLAT: ^[[LPAD]]: // pred: ^bb2 // CIR_FLAT: %[[EH:.*]] = cir.eh.inflight_exception // CIR_FLAT: %[[SEL:.*]] = cir.eh.selector %[[EH]] - // CIR_FLAT: cir.br ^[[BB_INT_IDX_SEL:.*]] loc - // CIR_FLAT: ^[[BB_INT_IDX_SEL]]: // pred: ^[[LPAD]] + // CIR_FLAT: cir.br ^[[BB_INT_IDX_SEL:.*]](%[[SEL]] : {{.*}}) loc } catch (int idx) { + // CIR_FLAT: ^[[BB_INT_IDX_SEL]](%[[INT_IDX_SEL:.*]]: !u32i // CIR_FLAT: %[[INT_IDX_ID:.*]] = cir.eh.typeid @_ZTIi - // CIR_FLAT: %[[MATCH_CASE_INT_IDX:.*]] = cir.cmp(eq, %[[SEL]], %[[INT_IDX_ID]]) : !u32i, !cir.bool - // CIR_FLAT: cir.brcond %[[MATCH_CASE_INT_IDX]] ^[[BB_INT_IDX_CATCH:.*]], ^[[BB_CHAR_MSG_SEL:.*]] loc - // CIR_FLAT: ^[[BB_INT_IDX_CATCH]]: // pred: ^[[BB_INT_IDX_SEL]] + // CIR_FLAT: %[[MATCH_CASE_INT_IDX:.*]] = cir.cmp(eq, %[[INT_IDX_SEL]], %[[INT_IDX_ID]]) : !u32i, !cir.bool + // CIR_FLAT: cir.brcond %[[MATCH_CASE_INT_IDX]] ^[[BB_INT_IDX_CATCH:.*]](%[[INT_IDX_SEL]] : {{.*}}), ^[[BB_CHAR_MSG_SEL:.*]](%[[INT_IDX_SEL]] : {{.*}}) loc + // CIR_FLAT: ^[[BB_INT_IDX_CATCH]](%[[INT_IDX_CATCH_SLOT:.*]]: !u32i // CIR_FLAT: %[[PARAM_INT_IDX:.*]] = cir.catch_param -> !cir.ptr // CIR_FLAT: cir.const #cir.int<98> // CIR_FLAT: cir.br ^[[AFTER_TRY]] loc z = 98; idx++; } catch (const char* msg) { - // CIR_FLAT: ^[[BB_CHAR_MSG_SEL]]: // pred: ^[[BB_INT_IDX_SEL]] + // CIR_FLAT: ^[[BB_CHAR_MSG_SEL]](%[[CHAR_MSG_SEL:.*]]: !u32i // CIR_FLAT: %[[CHAR_MSG_ID:.*]] = cir.eh.typeid @_ZTIPKc - // CIR_FLAT: %[[MATCH_CASE_CHAR_MSG:.*]] = cir.cmp(eq, %[[SEL]], %[[CHAR_MSG_ID]]) - // CIR_FLAT: cir.brcond %[[MATCH_CASE_CHAR_MSG]] ^[[BB_CHAR_MSG_CATCH:.*]], ^[[BB_RESUME:.*]] loc - // CIR_FLAT: ^[[BB_CHAR_MSG_CATCH]]: // pred: ^[[BB_CHAR_MSG_SEL]] + // CIR_FLAT: %[[MATCH_CASE_CHAR_MSG:.*]] = cir.cmp(eq, %[[CHAR_MSG_SEL]], %[[CHAR_MSG_ID]]) + // CIR_FLAT: cir.brcond %[[MATCH_CASE_CHAR_MSG]] ^[[BB_CHAR_MSG_CATCH:.*]](%[[CHAR_MSG_SEL]] : {{.*}}), ^[[BB_RESUME:.*]](%[[CHAR_MSG_SEL]] : {{.*}}) loc + // CIR_FLAT: ^[[BB_CHAR_MSG_CATCH]](%[[CHAR_MSG_CATCH_SLOT:.*]]: // CIR_FLAT: %[[PARAM_CHAR_MSG:.*]] = cir.catch_param -> !cir.ptr // CIR_FLAT: cir.const #cir.int<99> : !s32i // CIR_FLAT: cir.br ^[[AFTER_TRY]] loc z = 99; (void)msg[0]; } - // CIR_FLAT: ^[[BB_RESUME]]: + // CIR_FLAT: ^[[BB_RESUME]](%[[RESUME_SEL:.*]]: !u32i // CIR_FLAT: cir.resume // CIR_FLAT: ^[[AFTER_TRY]]: @@ -97,8 +97,8 @@ unsigned long long tc3() { z = division(x, y); } catch (...) { // CIR_FLAT: cir.eh.selector - // CIR_FLAT: cir.br ^[[CATCH_ALL:.*]] loc - // CIR_FLAT: ^[[CATCH_ALL]]: + // CIR_FLAT: cir.br ^[[CATCH_ALL:.*]]({{.*}} : {{.*}}) loc + // CIR_FLAT: ^[[CATCH_ALL]](%[[CATCH_ALL_SEL:.*]]: !u32i loc // CIR_FLAT: cir.catch_param -> !cir.ptr // CIR_FLAT: cir.const #cir.int<100> : !s32i // CIR_FLAT: cir.br ^[[AFTER_TRY:.*]] loc From ab5b83d9a8a1045aff8d092b550eda5ea6c6f65e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 29 Jul 2024 14:44:06 -0700 Subject: [PATCH 1721/2301] [CIR][FlattenCFG] Exceptions: simplify selector/slot a bit further --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 28 +++++-------------- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 9 +++--- clang/test/CIR/Lowering/try-catch.cpp | 5 ++-- 3 files changed, 14 insertions(+), 28 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b819a396aa28..d382da62aa51 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3323,18 +3323,20 @@ def CatchParamOp : CIR_Op<"catch_param"> { } //===----------------------------------------------------------------------===// -// Exception related: EhInflightOp, EhSelectorOp +// Exception related: EhInflightOp, EhTypeIdOp //===----------------------------------------------------------------------===// def EhInflightOp : CIR_Op<"eh.inflight_exception"> { let summary = "Materialize the catch clause formal parameter"; let description = [{ - `cir.eh.inflight_exception` returns a pointer to exception information - coming from a `cir.call exception` that might throw. The returned value is - a pointer to an opaque type that is further decomposed by other operations. + `cir.eh.inflight_exception` returns two values: + - `exception_ptr`: The exception pointer for the inflight exception + - `type_id`: pointer to the exception object + This operation is expected to be the first one basic blocks on the + exception path out of `cir.try_call` operations. }]; - let results = (outs ExceptionPtr:$exception); + let results = (outs VoidPtr:$exception_ptr, UInt32:$type_id); let assemblyFormat = [{ attr-dict }]; @@ -3342,22 +3344,6 @@ def EhInflightOp : CIR_Op<"eh.inflight_exception"> { let hasVerifier = 0; } -def EhSelectorOp : CIR_Op<"eh.selector"> { - let summary = "Materialize the eh selector"; - let description = [{ - Given an exception information pointer, `cir.eh.selector` returns the - runtime selector value (type id) for the inflight exception. - }]; - - let arguments = (ins ExceptionPtr:$exception); - let results = (outs UInt32:$type_id); - let assemblyFormat = [{ - $exception attr-dict - }]; - - let hasVerifier = 0; -} - def EhTypeIdOp : CIR_Op<"eh.typeid", [Pure, DeclareOpInterfaceMethods]> { let summary = "Compute exception type id from it's global type symbol"; diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 9194d9210cc9..439bb9279ad4 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -233,10 +233,11 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Start the landing pad by getting the inflight exception information. rewriter.setInsertionPointToEnd(catchBegin); auto exceptionPtrType = mlir::cir::PointerType::get( - mlir::cir::ExceptionInfoType::get(rewriter.getContext())); - auto exception = - rewriter.create(loc, exceptionPtrType); - auto selector = rewriter.create(loc, exception); + mlir::cir::VoidType::get(rewriter.getContext())); + auto typeIdType = mlir::cir::IntType::get(getContext(), 32, false); + auto inflightEh = rewriter.create( + loc, exceptionPtrType, typeIdType); + auto selector = inflightEh.getTypeId(); // Handle dispatch. In could in theory use a switch, but let's just // mimic LLVM more closely since we have no specific thing to achieve diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index e9ca406893ef..adc8b178b857 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -25,8 +25,7 @@ unsigned long long tc() { // CIR_FLAT: cir.br ^[[AFTER_TRY:.*]] loc // CIR_FLAT: ^[[LPAD]]: // pred: ^bb2 - // CIR_FLAT: %[[EH:.*]] = cir.eh.inflight_exception - // CIR_FLAT: %[[SEL:.*]] = cir.eh.selector %[[EH]] + // CIR_FLAT: %exception_ptr, %[[SEL:.*]] = cir.eh.inflight_exception // CIR_FLAT: cir.br ^[[BB_INT_IDX_SEL:.*]](%[[SEL]] : {{.*}}) loc } catch (int idx) { // CIR_FLAT: ^[[BB_INT_IDX_SEL]](%[[INT_IDX_SEL:.*]]: !u32i @@ -96,7 +95,7 @@ unsigned long long tc3() { try { z = division(x, y); } catch (...) { - // CIR_FLAT: cir.eh.selector + // CIR_FLAT: cir.eh.inflight_exception // CIR_FLAT: cir.br ^[[CATCH_ALL:.*]]({{.*}} : {{.*}}) loc // CIR_FLAT: ^[[CATCH_ALL]](%[[CATCH_ALL_SEL:.*]]: !u32i loc // CIR_FLAT: cir.catch_param -> !cir.ptr From 018e83a35f900822b811173f97afd6b3ccab05fd Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 29 Jul 2024 15:07:59 -0700 Subject: [PATCH 1722/2301] [CIR][FlattenCFG] Exceptions: extend 'cir.catch_param' to cover more functionality If `cir.catch_param` is surrounded be a `cir.try`, exception information is assumed from the context, otherwise it needs to be decomposed to `cir.catch_param begin %exception` or `cir.catch_param end` to enforce the scope directly. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 28 +++-- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 4 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 16 +++ .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 118 ++++++++++++++---- clang/test/CIR/Lowering/try-catch.cpp | 24 ++-- 5 files changed, 148 insertions(+), 42 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d382da62aa51..d59c2a0f419e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3301,12 +3301,21 @@ def TryOp : CIR_Op<"try", // CatchParamOp //===----------------------------------------------------------------------===// +def CatchParamBegin : I32EnumAttrCase<"begin", 0>; +def CatchParamEnd : I32EnumAttrCase<"end", 1>; +def CatchParamKind : I32EnumAttr< + "CatchParamKind", + "Designate limits for begin/end of catch param handling", + [CatchParamBegin, CatchParamEnd]> { + let cppNamespace = "::mlir::cir"; +} + def CatchParamOp : CIR_Op<"catch_param"> { - let summary = "Materialize the catch clause formal parameter"; + let summary = "Represents catch clause formal parameter"; let description = [{ - The `cir.catch_param` binds to a the C/C++ catch clause param and allow - it to be materialized. This operantion grabs the param by looking into - a exception info `!cir.eh_info` argument. + The `cir.catch_param` can operate in two modes: within catch regions of + `cir.try` or anywhere else with the `begin` or `end` markers. The `begin` + version requires an exception pointer of `cir.ptr`. Example: ```mlir @@ -3314,12 +3323,17 @@ def CatchParamOp : CIR_Op<"catch_param"> { ``` }]; - let results = (outs CIR_AnyType:$param); + let arguments = (ins Optional:$exception_ptr, + OptionalAttr:$kind); + let results = (outs Optional:$param); let assemblyFormat = [{ - `->` qualified(type($param)) attr-dict + ($kind^)? + ($exception_ptr^)? + (`->` qualified(type($param))^)? + attr-dict }]; - let hasVerifier = 0; + let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 3bf7159f0bf5..5161a06e5b9f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -650,13 +650,13 @@ struct CallEndCatch final : EHScopeStack::Cleanup { static mlir::Value CallBeginCatch(CIRGenFunction &CGF, mlir::Type ParamTy, bool EndMightThrow) { auto catchParam = CGF.getBuilder().create( - CGF.getBuilder().getUnknownLoc(), ParamTy); + CGF.getBuilder().getUnknownLoc(), ParamTy, nullptr, nullptr); CGF.EHStack.pushCleanup( NormalAndEHCleanup, EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor); - return catchParam; + return catchParam.getParam(); } /// A "special initializer" callback for initializing a catch diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9c015ce24c8c..de62c91439de 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -3318,6 +3318,22 @@ LogicalResult EhTypeIdOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return success(); } +//===----------------------------------------------------------------------===// +// CatchParamOp +//===----------------------------------------------------------------------===// + +LogicalResult cir::CatchParamOp::verify() { + if (getExceptionPtr()) { + auto kind = getKind(); + if (!kind || *kind != mlir::cir::CatchParamKind::begin) + return emitOpError("needs 'begin' to work with exception pointer"); + return success(); + } + if (!getKind() && !(*this)->getParentOfType()) + return emitOpError("without 'kind' requires 'cir.try' surrounding scope"); + return success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 439bb9279ad4..644285685ba9 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -177,7 +177,8 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { using OpRewritePattern::OpRewritePattern; mlir::Block *buildTypeCase(mlir::PatternRewriter &rewriter, mlir::Region &r, - mlir::Block *afterTry) const { + mlir::Block *afterTry, + mlir::Type exceptionPtrTy) const { YieldOp yieldOp; CatchParamOp paramOp; r.walk([&](YieldOp op) { @@ -188,11 +189,38 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { assert(!paramOp && "expect to only find one"); paramOp = op; }); - rewriter.inlineRegionBefore(r, afterTry); + + // Rewrite `cir.catch_param` to be scope aware and instead generate: + // ``` + // cir.catch_param begin %exception_ptr + // ... + // cir.catch_param end + // cir.br ... + mlir::Value catchResult = paramOp.getParam(); + assert(catchResult && "expected to be available"); + rewriter.setInsertionPointAfterValue(catchResult); + auto catchType = catchResult.getType(); + mlir::Block *entryBlock = paramOp->getBlock(); + mlir::Location catchLoc = paramOp.getLoc(); + // Catch handler only gets the exception pointer (selection not needed). + mlir::Value exceptionPtr = + entryBlock->addArgument(exceptionPtrTy, paramOp.getLoc()); + + rewriter.replaceOpWithNewOp( + paramOp, catchType, exceptionPtr, + mlir::cir::CatchParamKindAttr::get(rewriter.getContext(), + mlir::cir::CatchParamKind::begin)); + + rewriter.setInsertionPoint(yieldOp); + rewriter.create( + catchLoc, mlir::Type{}, nullptr, + mlir::cir::CatchParamKindAttr::get(rewriter.getContext(), + mlir::cir::CatchParamKind::end)); + rewriter.setInsertionPointToEnd(yieldOp->getBlock()); rewriter.replaceOpWithNewOp(yieldOp, afterTry); - return paramOp->getBlock(); + return entryBlock; } void buildUnwindCase(mlir::PatternRewriter &rewriter, mlir::Region &r, @@ -202,15 +230,44 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { } void buildAllCase(mlir::PatternRewriter &rewriter, mlir::Region &r, - mlir::Block *afterTry, mlir::Block *catchAllBlock) const { + mlir::Block *afterTry, mlir::Block *catchAllBlock, + mlir::Value exceptionPtr) const { YieldOp yieldOp; + CatchParamOp paramOp; r.walk([&](YieldOp op) { assert(!yieldOp && "expect to only find one"); yieldOp = op; }); + r.walk([&](CatchParamOp op) { + assert(!paramOp && "expect to only find one"); + paramOp = op; + }); mlir::Block *catchAllStartBB = &r.front(); rewriter.inlineRegionBefore(r, afterTry); rewriter.mergeBlocks(catchAllStartBB, catchAllBlock); + + // Rewrite `cir.catch_param` to be scope aware and instead generate: + // ``` + // cir.catch_param begin %exception_ptr + // ... + // cir.catch_param end + // cir.br ... + mlir::Value catchResult = paramOp.getParam(); + assert(catchResult && "expected to be available"); + rewriter.setInsertionPointAfterValue(catchResult); + auto catchType = catchResult.getType(); + mlir::Location catchLoc = paramOp.getLoc(); + rewriter.replaceOpWithNewOp( + paramOp, catchType, exceptionPtr, + mlir::cir::CatchParamKindAttr::get(rewriter.getContext(), + mlir::cir::CatchParamKind::begin)); + + rewriter.setInsertionPoint(yieldOp); + rewriter.create( + catchLoc, mlir::Type{}, nullptr, + mlir::cir::CatchParamKindAttr::get(rewriter.getContext(), + mlir::cir::CatchParamKind::end)); + rewriter.setInsertionPointToEnd(yieldOp->getBlock()); rewriter.replaceOpWithNewOp(yieldOp, afterTry); } @@ -238,6 +295,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto inflightEh = rewriter.create( loc, exceptionPtrType, typeIdType); auto selector = inflightEh.getTypeId(); + auto exceptionPtr = inflightEh.getExceptionPtr(); // Handle dispatch. In could in theory use a switch, but let's just // mimic LLVM more closely since we have no specific thing to achieve @@ -245,14 +303,20 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto *nextDispatcher = rewriter.splitBlock(catchBegin, rewriter.getInsertionPoint()); rewriter.setInsertionPointToEnd(catchBegin); - nextDispatcher->addArgument(selector.getType(), loc); - rewriter.create(loc, nextDispatcher, - mlir::ValueRange{selector}); + mlir::ArrayAttr caseAttrList = tryOp.getCatchTypesAttr(); + nextDispatcher->addArgument(exceptionPtr.getType(), loc); + SmallVector dispatcherInitOps = {exceptionPtr}; + bool tryOnlyHasCatchAll = caseAttrList.size() == 1 && + isa(caseAttrList[0]); + if (!tryOnlyHasCatchAll) { + nextDispatcher->addArgument(selector.getType(), loc); + dispatcherInitOps.push_back(selector); + } + rewriter.create(loc, nextDispatcher, dispatcherInitOps); // Fill in dispatcher. rewriter.setInsertionPointToEnd(nextDispatcher); llvm::MutableArrayRef caseRegions = tryOp.getCatchRegions(); - mlir::ArrayAttr caseAttrList = tryOp.getCatchTypesAttr(); unsigned caseCnt = 0; for (mlir::Attribute caseAttr : caseAttrList) { @@ -260,34 +324,46 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto *previousDispatcher = nextDispatcher; auto typeId = rewriter.create( loc, typeIdGlobal.getSymbol()); + auto ehPtr = previousDispatcher->getArgument(0); + auto ehSel = previousDispatcher->getArgument(1); + auto match = rewriter.create( loc, mlir::cir::BoolType::get(rewriter.getContext()), - mlir::cir::CmpOpKind::eq, previousDispatcher->getArgument(0), - typeId); + mlir::cir::CmpOpKind::eq, ehSel, typeId); - mlir::Block *typeCatchBlock = - buildTypeCase(rewriter, caseRegions[caseCnt], afterTry); + mlir::Block *typeCatchBlock = buildTypeCase( + rewriter, caseRegions[caseCnt], afterTry, ehPtr.getType()); nextDispatcher = rewriter.createBlock(afterTry); rewriter.setInsertionPointToEnd(previousDispatcher); - nextDispatcher->addArgument(selector.getType(), loc); - typeCatchBlock->addArgument(selector.getType(), loc); + // Next dispatcher gets by default both exception ptr and selector info, + // but on a catch all we don't need selector info. + nextDispatcher->addArgument(ehPtr.getType(), loc); + SmallVector nextDispatchOps = {ehPtr}; + if (!isa(caseAttrList[caseCnt + 1])) { + nextDispatcher->addArgument(ehSel.getType(), loc); + nextDispatchOps.push_back(ehSel); + } rewriter.create( - loc, match, typeCatchBlock, nextDispatcher, - mlir::ValueRange{previousDispatcher->getArgument(0)}, - mlir::ValueRange{previousDispatcher->getArgument(0)}); + loc, match, typeCatchBlock, nextDispatcher, mlir::ValueRange{ehPtr}, + nextDispatchOps); rewriter.setInsertionPointToEnd(nextDispatcher); } else if (auto catchAll = dyn_cast(caseAttr)) { // In case the catch(...) is all we got, `nextDispatcher` shall be // non-empty. - assert(!nextDispatcher->args_empty() && "expected block argument"); - buildAllCase(rewriter, caseRegions[caseCnt], afterTry, nextDispatcher); + assert(nextDispatcher->getArguments().size() == 1 && + "expected one block argument"); + auto ehPtr = nextDispatcher->getArgument(0); + buildAllCase(rewriter, caseRegions[caseCnt], afterTry, nextDispatcher, + ehPtr); nextDispatcher = nullptr; // No more business in try/catch } else if (auto catchUnwind = dyn_cast(caseAttr)) { - assert(nextDispatcher->empty() && "expect empty dispatcher"); - assert(!nextDispatcher->args_empty() && "expected block argument"); + // assert(nextDispatcher->empty() && "expect empty dispatcher"); + // assert(!nextDispatcher->args_empty() && "expected block argument"); + assert(nextDispatcher->getArguments().size() == 2 && + "expected two block argument"); buildUnwindCase(rewriter, caseRegions[caseCnt], nextDispatcher); nextDispatcher = nullptr; // No more business in try/catch } diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index adc8b178b857..8ab5c0fea0cd 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -25,26 +25,26 @@ unsigned long long tc() { // CIR_FLAT: cir.br ^[[AFTER_TRY:.*]] loc // CIR_FLAT: ^[[LPAD]]: // pred: ^bb2 - // CIR_FLAT: %exception_ptr, %[[SEL:.*]] = cir.eh.inflight_exception - // CIR_FLAT: cir.br ^[[BB_INT_IDX_SEL:.*]](%[[SEL]] : {{.*}}) loc + // CIR_FLAT: %[[EH:.*]], %[[SEL:.*]] = cir.eh.inflight_exception + // CIR_FLAT: cir.br ^[[BB_INT_IDX_SEL:.*]](%[[EH]], %[[SEL]] : {{.*}}) loc } catch (int idx) { - // CIR_FLAT: ^[[BB_INT_IDX_SEL]](%[[INT_IDX_SEL:.*]]: !u32i + // CIR_FLAT: ^[[BB_INT_IDX_SEL]](%[[INT_IDX_EH:.*]]: !cir.ptr loc({{.*}}), %[[INT_IDX_SEL:.*]]: !u32i // CIR_FLAT: %[[INT_IDX_ID:.*]] = cir.eh.typeid @_ZTIi // CIR_FLAT: %[[MATCH_CASE_INT_IDX:.*]] = cir.cmp(eq, %[[INT_IDX_SEL]], %[[INT_IDX_ID]]) : !u32i, !cir.bool - // CIR_FLAT: cir.brcond %[[MATCH_CASE_INT_IDX]] ^[[BB_INT_IDX_CATCH:.*]](%[[INT_IDX_SEL]] : {{.*}}), ^[[BB_CHAR_MSG_SEL:.*]](%[[INT_IDX_SEL]] : {{.*}}) loc - // CIR_FLAT: ^[[BB_INT_IDX_CATCH]](%[[INT_IDX_CATCH_SLOT:.*]]: !u32i - // CIR_FLAT: %[[PARAM_INT_IDX:.*]] = cir.catch_param -> !cir.ptr + // CIR_FLAT: cir.brcond %[[MATCH_CASE_INT_IDX]] ^[[BB_INT_IDX_CATCH:.*]](%[[INT_IDX_EH]] : {{.*}}), ^[[BB_CHAR_MSG_CMP:.*]](%[[INT_IDX_EH]], %[[INT_IDX_SEL]] : {{.*}}) loc + // CIR_FLAT: ^[[BB_INT_IDX_CATCH]](%[[INT_IDX_CATCH_SLOT:.*]]: !cir.ptr + // CIR_FLAT: %[[PARAM_INT_IDX:.*]] = cir.catch_param begin %[[INT_IDX_CATCH_SLOT]] -> !cir.ptr // CIR_FLAT: cir.const #cir.int<98> // CIR_FLAT: cir.br ^[[AFTER_TRY]] loc z = 98; idx++; } catch (const char* msg) { - // CIR_FLAT: ^[[BB_CHAR_MSG_SEL]](%[[CHAR_MSG_SEL:.*]]: !u32i + // CIR_FLAT: ^[[BB_CHAR_MSG_CMP]](%[[CHAR_MSG_EH:.*]]: !cir.ptr loc({{.*}}), %[[CHAR_MSG_SEL:.*]]: !u32i // CIR_FLAT: %[[CHAR_MSG_ID:.*]] = cir.eh.typeid @_ZTIPKc // CIR_FLAT: %[[MATCH_CASE_CHAR_MSG:.*]] = cir.cmp(eq, %[[CHAR_MSG_SEL]], %[[CHAR_MSG_ID]]) - // CIR_FLAT: cir.brcond %[[MATCH_CASE_CHAR_MSG]] ^[[BB_CHAR_MSG_CATCH:.*]](%[[CHAR_MSG_SEL]] : {{.*}}), ^[[BB_RESUME:.*]](%[[CHAR_MSG_SEL]] : {{.*}}) loc - // CIR_FLAT: ^[[BB_CHAR_MSG_CATCH]](%[[CHAR_MSG_CATCH_SLOT:.*]]: - // CIR_FLAT: %[[PARAM_CHAR_MSG:.*]] = cir.catch_param -> !cir.ptr + // CIR_FLAT: cir.brcond %[[MATCH_CASE_CHAR_MSG]] ^[[BB_CHAR_MSG_CATCH:.*]](%[[CHAR_MSG_EH]] : {{.*}}), ^[[BB_RESUME:.*]](%[[CHAR_MSG_EH]], %[[CHAR_MSG_SEL]] : {{.*}}) loc + // CIR_FLAT: ^[[BB_CHAR_MSG_CATCH]](%[[CHAR_MSG_CATCH_SLOT:.*]]: !cir.ptr + // CIR_FLAT: %[[PARAM_CHAR_MSG:.*]] = cir.catch_param begin %[[CHAR_MSG_CATCH_SLOT]] -> !cir.ptr // CIR_FLAT: cir.const #cir.int<99> : !s32i // CIR_FLAT: cir.br ^[[AFTER_TRY]] loc z = 99; @@ -97,8 +97,8 @@ unsigned long long tc3() { } catch (...) { // CIR_FLAT: cir.eh.inflight_exception // CIR_FLAT: cir.br ^[[CATCH_ALL:.*]]({{.*}} : {{.*}}) loc - // CIR_FLAT: ^[[CATCH_ALL]](%[[CATCH_ALL_SEL:.*]]: !u32i loc - // CIR_FLAT: cir.catch_param -> !cir.ptr + // CIR_FLAT: ^[[CATCH_ALL]](%[[CATCH_ALL_EH:.*]]: !cir.ptr + // CIR_FLAT: cir.catch_param begin %[[CATCH_ALL_EH]] -> !cir.ptr // CIR_FLAT: cir.const #cir.int<100> : !s32i // CIR_FLAT: cir.br ^[[AFTER_TRY:.*]] loc // CIR_FLAT: ^[[AFTER_TRY]]: // 2 preds From 8510748f3270f2e7e9e96cc5e6080b6072fdb9d9 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Tue, 30 Jul 2024 07:13:40 -0300 Subject: [PATCH 1723/2301] [CIR][ABI] Add AArch64 singed integer and floating point CC lowering (#757) --- .../Transforms/TargetLowering/LowerTypes.cpp | 2 +- .../aarch64-call-conv-lowering-pass.cpp | 43 +++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index 486e771d8b66..20e4dc643df0 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -112,7 +112,7 @@ mlir::Type LowerTypes::convertType(Type T) { /// keeping it here for parity's sake. // Certain CIR types are already ABI-specific, so we just return them. - if (isa(T)) { + if (isa(T)) { return T; } diff --git a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp index 33cb05c25a01..209679ebf383 100644 --- a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp @@ -40,3 +40,46 @@ unsigned long long ULongLong(unsigned long long l) { // CHECK: cir.call @_Z9ULongLongy(%2) : (!u64i) -> !u64i return ULongLong(l); } + + +/// Test call conv lowering for trivial signed cases. /// + +// CHECK: cir.func @_Z4Chara(%arg0: !s8i loc({{.+}})) -> !s8i +char Char(signed char c) { + // CHECK: cir.call @_Z4Chara(%{{.+}}) : (!s8i) -> !s8i + return Char(c); +} +// CHECK: cir.func @_Z5Shorts(%arg0: !s16i loc({{.+}})) -> !s16i +short Short(short s) { + // CHECK: cir.call @_Z5Shorts(%{{.+}}) : (!s16i) -> !s16i + return Short(s); +} +// CHECK: cir.func @_Z3Inti(%arg0: !s32i loc({{.+}})) -> !s32i +int Int(int i) { + // CHECK: cir.call @_Z3Inti(%{{.+}}) : (!s32i) -> !s32i + return Int(i); +} +// CHECK: cir.func @_Z4Longl(%arg0: !s64i loc({{.+}})) -> !s64i +long Long(long l) { + // CHECK: cir.call @_Z4Longl(%{{.+}}) : (!s64i) -> !s64i + return Long(l); +} +// CHECK: cir.func @_Z8LongLongx(%arg0: !s64i loc({{.+}})) -> !s64i +long long LongLong(long long l) { + // CHECK: cir.call @_Z8LongLongx(%{{.+}}) : (!s64i) -> !s64i + return LongLong(l); +} + + +/// Test call conv lowering for floating point. /// + +// CHECK: cir.func @_Z5Floatf(%arg0: !cir.float loc({{.+}})) -> !cir.float +float Float(float f) { + // cir.call @_Z5Floatf(%{{.+}}) : (!cir.float) -> !cir.float + return Float(f); +} +// CHECK: cir.func @_Z6Doubled(%arg0: !cir.double loc({{.+}})) -> !cir.double +double Double(double d) { + // cir.call @_Z6Doubled(%{{.+}}) : (!cir.double) -> !cir.double + return Double(d); +} From e12efa26892492c1eecee1b753a6bc4ba914a6f7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 30 Jul 2024 11:42:15 -0700 Subject: [PATCH 1724/2301] [CIR][FlattenCFG] Exceptions: track type symbols in cir.eh.inflight_exception --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 ++ .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 20 ++++++++++++++++++- clang/test/CIR/Lowering/try-catch.cpp | 5 +++-- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d59c2a0f419e..75e417057825 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3350,8 +3350,10 @@ def EhInflightOp : CIR_Op<"eh.inflight_exception"> { exception path out of `cir.try_call` operations. }]; + let arguments = (ins OptionalAttr:$sym_type_list); let results = (outs VoidPtr:$exception_ptr, UInt32:$type_id); let assemblyFormat = [{ + ($sym_type_list^)? attr-dict }]; diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 644285685ba9..fd729dd65b7a 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -272,6 +272,23 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { rewriter.replaceOpWithNewOp(yieldOp, afterTry); } + mlir::ArrayAttr collectTypeSymbols(mlir::cir::TryOp tryOp) const { + mlir::ArrayAttr caseAttrList = tryOp.getCatchTypesAttr(); + llvm::SmallVector symbolList; + + for (mlir::Attribute caseAttr : caseAttrList) { + auto typeIdGlobal = dyn_cast(caseAttr); + if (!typeIdGlobal) + continue; + symbolList.push_back(typeIdGlobal.getSymbol()); + } + + // Return an empty attribute instead of an empty list... + if (symbolList.empty()) + return {}; + return mlir::ArrayAttr::get(caseAttrList.getContext(), symbolList); + } + mlir::Block *buildCatchers(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, mlir::Block *afterBody, @@ -292,8 +309,9 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto exceptionPtrType = mlir::cir::PointerType::get( mlir::cir::VoidType::get(rewriter.getContext())); auto typeIdType = mlir::cir::IntType::get(getContext(), 32, false); + mlir::ArrayAttr symlist = collectTypeSymbols(tryOp); auto inflightEh = rewriter.create( - loc, exceptionPtrType, typeIdType); + loc, exceptionPtrType, typeIdType, symlist); auto selector = inflightEh.getTypeId(); auto exceptionPtr = inflightEh.getExceptionPtr(); diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 8ab5c0fea0cd..779af3610662 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -25,7 +25,7 @@ unsigned long long tc() { // CIR_FLAT: cir.br ^[[AFTER_TRY:.*]] loc // CIR_FLAT: ^[[LPAD]]: // pred: ^bb2 - // CIR_FLAT: %[[EH:.*]], %[[SEL:.*]] = cir.eh.inflight_exception + // CIR_FLAT: %[[EH:.*]], %[[SEL:.*]] = cir.eh.inflight_exception [@_ZTIi, @_ZTIPKc] // CIR_FLAT: cir.br ^[[BB_INT_IDX_SEL:.*]](%[[EH]], %[[SEL]] : {{.*}}) loc } catch (int idx) { // CIR_FLAT: ^[[BB_INT_IDX_SEL]](%[[INT_IDX_EH:.*]]: !cir.ptr loc({{.*}}), %[[INT_IDX_SEL:.*]]: !u32i @@ -69,6 +69,7 @@ unsigned long long tc2() { z = division(x, y); a++; } catch (int idx) { + // CIR_FLAT: cir.eh.inflight_exception [@_ZTIi, @_ZTIPKc] z = 98; idx++; } catch (const char* msg) { @@ -95,7 +96,7 @@ unsigned long long tc3() { try { z = division(x, y); } catch (...) { - // CIR_FLAT: cir.eh.inflight_exception + // CIR_FLAT: cir.eh.inflight_exception loc // CIR_FLAT: cir.br ^[[CATCH_ALL:.*]]({{.*}} : {{.*}}) loc // CIR_FLAT: ^[[CATCH_ALL]](%[[CATCH_ALL_EH:.*]]: !cir.ptr // CIR_FLAT: cir.catch_param begin %[[CATCH_ALL_EH]] -> !cir.ptr From 1f4381054849dfee199d0a7d0a152f2cd5314947 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 30 Jul 2024 17:28:02 -0700 Subject: [PATCH 1725/2301] [CIR][LowerToLLVM][NFC] Refactor CallOp in prep for TryCall lowering --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 59 +++++++++++-------- 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 339ca0d98451..646877a15fe0 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -830,6 +830,37 @@ struct ConvertCIRToLLVMPass virtual StringRef getArgument() const override { return "cir-flat-to-llvm"; } }; +mlir::LogicalResult +rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter, + mlir::FlatSymbolRefAttr calleeAttr) { + llvm::SmallVector llvmResults; + auto cirResults = op->getResultTypes(); + + if (converter->convertTypes(cirResults, llvmResults).failed()) + return mlir::failure(); + + if (calleeAttr) { // direct call + rewriter.replaceOpWithNewOp(op, llvmResults, calleeAttr, + callOperands); + } else { // indirect call + assert(op->getOperands().size() && + "operands list must no be empty for the indirect call"); + auto typ = op->getOperands().front().getType(); + assert(isa(typ) && "expected pointer type"); + auto ptyp = dyn_cast(typ); + auto ftyp = dyn_cast(ptyp.getPointee()); + assert(ftyp && "expected a pointer to a function as the first operand"); + + rewriter.replaceOpWithNewOp( + op, + dyn_cast(converter->convertType(ftyp)), + callOperands); + } + return mlir::success(); +} + class CIRCallLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -837,31 +868,9 @@ class CIRCallLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(mlir::cir::CallOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - llvm::SmallVector llvmResults; - auto cirResults = op.getResultTypes(); - auto *converter = getTypeConverter(); - - if (converter->convertTypes(cirResults, llvmResults).failed()) - return mlir::failure(); - - if (auto callee = op.getCalleeAttr()) { // direct call - rewriter.replaceOpWithNewOp( - op, llvmResults, op.getCalleeAttr(), adaptor.getOperands()); - } else { // indirect call - assert(op.getOperands().size() && - "operands list must no be empty for the indirect call"); - auto typ = op.getOperands().front().getType(); - assert(isa(typ) && "expected pointer type"); - auto ptyp = dyn_cast(typ); - auto ftyp = dyn_cast(ptyp.getPointee()); - assert(ftyp && "expected a pointer to a function as the first operand"); - - rewriter.replaceOpWithNewOp( - op, - dyn_cast(converter->convertType(ftyp)), - adaptor.getOperands()); - } - return mlir::success(); + return rewriteToCallOrInvoke(op.getOperation(), adaptor.getOperands(), + rewriter, getTypeConverter(), + op.getCalleeAttr()); } }; From bdb9ef21ce551bd51360ec97bb4ab632d4bae7c5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 30 Jul 2024 18:13:42 -0700 Subject: [PATCH 1726/2301] [CIR][LowerToLLVM] Exceptions: lower cir.try_call to llvm's invoke Incremental step: we need the full try/catch to test `cir.try_call`, so testing coming next with the other necessary operations. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 59 ++++++++++++++----- clang/test/CIR/Lowering/try-catch.cpp | 3 +- 2 files changed, 46 insertions(+), 16 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 646877a15fe0..7665231a98a3 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -834,7 +834,9 @@ mlir::LogicalResult rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter, - mlir::FlatSymbolRefAttr calleeAttr) { + mlir::FlatSymbolRefAttr calleeAttr, bool invoke = false, + mlir::Block *continueBlock = nullptr, + mlir::Block *landingPadBlock = nullptr) { llvm::SmallVector llvmResults; auto cirResults = op->getResultTypes(); @@ -842,8 +844,13 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, return mlir::failure(); if (calleeAttr) { // direct call - rewriter.replaceOpWithNewOp(op, llvmResults, calleeAttr, - callOperands); + if (invoke) + rewriter.replaceOpWithNewOp( + op, llvmResults, calleeAttr, callOperands, continueBlock, + mlir::ValueRange{}, landingPadBlock, mlir::ValueRange{}); + else + rewriter.replaceOpWithNewOp(op, llvmResults, + calleeAttr, callOperands); } else { // indirect call assert(op->getOperands().size() && "operands list must no be empty for the indirect call"); @@ -853,10 +860,17 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, auto ftyp = dyn_cast(ptyp.getPointee()); assert(ftyp && "expected a pointer to a function as the first operand"); - rewriter.replaceOpWithNewOp( - op, - dyn_cast(converter->convertType(ftyp)), - callOperands); + if (invoke) { + auto llvmFnTy = + dyn_cast(converter->convertType(ftyp)); + rewriter.replaceOpWithNewOp( + op, llvmFnTy, mlir::FlatSymbolRefAttr{}, callOperands, continueBlock, + mlir::ValueRange{}, landingPadBlock, mlir::ValueRange{}); + } else + rewriter.replaceOpWithNewOp( + op, + dyn_cast(converter->convertType(ftyp)), + callOperands); } return mlir::success(); } @@ -874,6 +888,20 @@ class CIRCallLowering : public mlir::OpConversionPattern { } }; +class CIRTryCallLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::TryCallOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + return rewriteToCallOrInvoke(op.getOperation(), adaptor.getOperands(), + rewriter, getTypeConverter(), + op.getCalleeAttr()); + } +}; + class CIRAllocaLowering : public mlir::OpConversionPattern { public: @@ -3461,14 +3489,15 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRBitPopcountOpLowering, CIRAtomicCmpXchgLowering, CIRAtomicXchgLowering, CIRAtomicFetchLowering, CIRByteswapOpLowering, CIRRotateOpLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, - CIRUnaryOpLowering, CIRBinOpLowering, CIRBinOpOverflowOpLowering, - CIRShiftOpLowering, CIRLoadLowering, CIRConstantLowering, - CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, CIRCastOpLowering, - CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRComplexCreateOpLowering, - CIRComplexRealOpLowering, CIRComplexImagOpLowering, - CIRComplexRealPtrOpLowering, CIRComplexImagPtrOpLowering, - CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, - CIRBrOpLowering, CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, + CIRTryCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, + CIRBinOpOverflowOpLowering, CIRShiftOpLowering, CIRLoadLowering, + CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, + CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, + CIRComplexCreateOpLowering, CIRComplexRealOpLowering, + CIRComplexImagOpLowering, CIRComplexRealPtrOpLowering, + CIRComplexImagPtrOpLowering, CIRVAStartLowering, CIRVAEndLowering, + CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, + CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 779af3610662..551e9a08e9f3 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -1,6 +1,7 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.flat.cir // RUN: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_FLAT %s - +// RUN_DISABLED: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll +// RUN_DISABLED: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_LLVM %s double division(int a, int b); // CIR: cir.func @_Z2tcv() From d6cee34a94cc42a85146dbe6efebc6cff91ed81a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 30 Jul 2024 18:19:27 -0700 Subject: [PATCH 1727/2301] [CIR][LowerToLLVM][NFC] Fix formatting error and add skeleton for EhInflightOp --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7665231a98a3..ca9fb629353b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -872,7 +872,7 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, dyn_cast(converter->convertType(ftyp)), callOperands); } - return mlir::success(); + return mlir::success(); } class CIRCallLowering : public mlir::OpConversionPattern { @@ -902,6 +902,18 @@ class CIRTryCallLowering } }; +class CIREhInflightOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::EhInflightOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + return mlir::failure(); + } +}; + class CIRAllocaLowering : public mlir::OpConversionPattern { public: @@ -3489,15 +3501,15 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRBitPopcountOpLowering, CIRAtomicCmpXchgLowering, CIRAtomicXchgLowering, CIRAtomicFetchLowering, CIRByteswapOpLowering, CIRRotateOpLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, - CIRTryCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, - CIRBinOpOverflowOpLowering, CIRShiftOpLowering, CIRLoadLowering, - CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, - CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, - CIRComplexCreateOpLowering, CIRComplexRealOpLowering, - CIRComplexImagOpLowering, CIRComplexRealPtrOpLowering, - CIRComplexImagPtrOpLowering, CIRVAStartLowering, CIRVAEndLowering, - CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, - CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, + CIRTryCallLowering, CIREhInflightOpLowering, CIRUnaryOpLowering, + CIRBinOpLowering, CIRBinOpOverflowOpLowering, CIRShiftOpLowering, + CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, + CIRFuncLowering, CIRCastOpLowering, CIRGlobalOpLowering, + CIRGetGlobalOpLowering, CIRComplexCreateOpLowering, + CIRComplexRealOpLowering, CIRComplexImagOpLowering, + CIRComplexRealPtrOpLowering, CIRComplexImagPtrOpLowering, + CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, + CIRBrOpLowering, CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, From 01d0df5a88cca44f64b1678a6b5cd5aa9b452c70 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 31 Jul 2024 11:25:53 -0700 Subject: [PATCH 1728/2301] [CIR][LowerToLLVM] Exceptions: more lowering work for `cir.try_call` and `cir.eh.inflight_exception` - Fix parser problems that were preventing testing and fix additional lowering missing for `cir.try_call`. - Add lowering from scratch for `cir.eh.inflight_exception`. End-to-end requires full exception support (still more lowering TBD to get there). --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 92 ++++++++++++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 83 ++++++++++++++-- clang/test/CIR/Lowering/exceptions.cir | 97 +++++++++++++++++++ 3 files changed, 258 insertions(+), 14 deletions(-) create mode 100644 clang/test/CIR/Lowering/exceptions.cir diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index de62c91439de..11b198acae92 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2349,11 +2349,60 @@ verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { return success(); } -static ::mlir::ParseResult -parseTryCallBranches(::mlir::OpAsmParser &parser, - ::mlir::OperationState &result) { - parser.emitError(parser.getCurrentLocation(), "NYI"); - return failure(); +static mlir::ParseResult +parseTryCallBranches(mlir::OpAsmParser &parser, mlir::OperationState &result, + llvm::SmallVectorImpl + &continueOperands, + llvm::SmallVectorImpl + &landingPadOperands, + llvm::SmallVectorImpl &continueTypes, + llvm::SmallVectorImpl &landingPadTypes, + llvm::SMLoc &continueOperandsLoc, + llvm::SMLoc &landingPadOperandsLoc) { + mlir::Block *continueSuccessor = nullptr; + mlir::Block *landingPadSuccessor = nullptr; + + if (parser.parseSuccessor(continueSuccessor)) + return mlir::failure(); + if (mlir::succeeded(parser.parseOptionalLParen())) { + continueOperandsLoc = parser.getCurrentLocation(); + if (parser.parseOperandList(continueOperands)) + return mlir::failure(); + if (parser.parseColon()) + return mlir::failure(); + + if (parser.parseTypeList(continueTypes)) + return mlir::failure(); + if (parser.parseRParen()) + return mlir::failure(); + } + if (parser.parseComma()) + return mlir::failure(); + + if (parser.parseSuccessor(landingPadSuccessor)) + return mlir::failure(); + if (mlir::succeeded(parser.parseOptionalLParen())) { + + landingPadOperandsLoc = parser.getCurrentLocation(); + if (parser.parseOperandList(landingPadOperands)) + return mlir::failure(); + if (parser.parseColon()) + return mlir::failure(); + + if (parser.parseTypeList(landingPadTypes)) + return mlir::failure(); + if (parser.parseRParen()) + return mlir::failure(); + } + { + auto loc = parser.getCurrentLocation(); + (void)loc; + if (parser.parseOptionalAttrDict(result.attributes)) + return mlir::failure(); + } + result.addSuccessors(continueSuccessor); + result.addSuccessors(landingPadSuccessor); + return mlir::success(); } static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, @@ -2367,6 +2416,14 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, llvm::ArrayRef<::mlir::Type> operandsTypes; llvm::ArrayRef<::mlir::Type> allResultTypes; + // Control flow related + llvm::SmallVector continueOperands; + llvm::SMLoc continueOperandsLoc; + llvm::SmallVector continueTypes; + llvm::SmallVector landingPadOperands; + llvm::SMLoc landingPadOperandsLoc; + llvm::SmallVector landingPadTypes; + if (::mlir::succeeded(parser.parseOptionalKeyword("exception"))) result.addAttribute("exception", parser.getBuilder().getUnitAttr()); @@ -2390,7 +2447,10 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, return ::mlir::failure(); if (hasDestinationBlocks) - if (parseTryCallBranches(parser, result).failed()) + if (parseTryCallBranches(parser, result, continueOperands, + landingPadOperands, continueTypes, landingPadTypes, + continueOperandsLoc, landingPadOperandsLoc) + .failed()) return ::mlir::failure(); auto &builder = parser.getBuilder(); @@ -2423,6 +2483,23 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, if (parser.resolveOperands(ops, operandsTypes, opsLoc, result.operands)) return ::mlir::failure(); + + if (hasDestinationBlocks) { + // The TryCall ODS layout is: cont, landing_pad, operands. + llvm::copy(::llvm::ArrayRef( + {static_cast(continueOperands.size()), + static_cast(landingPadOperands.size()), + static_cast(ops.size())}), + result.getOrAddProperties() + .operandSegmentSizes.begin()); + if (parser.resolveOperands(continueOperands, continueTypes, + continueOperandsLoc, result.operands)) + return ::mlir::failure(); + if (parser.resolveOperands(landingPadOperands, landingPadTypes, + landingPadOperandsLoc, result.operands)) + return ::mlir::failure(); + } + return ::mlir::success(); } @@ -2553,7 +2630,8 @@ cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { ::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result) { - return parseCallCommon(parser, result, getExtraAttrsAttrName(result.name)); + return parseCallCommon(parser, result, getExtraAttrsAttrName(result.name), + /*hasDestinationBlocks=*/true); } void TryCallOp::print(::mlir::OpAsmPrinter &state) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ca9fb629353b..dee10d811b07 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -834,7 +834,7 @@ mlir::LogicalResult rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter, - mlir::FlatSymbolRefAttr calleeAttr, bool invoke = false, + mlir::FlatSymbolRefAttr calleeAttr, mlir::Block *continueBlock = nullptr, mlir::Block *landingPadBlock = nullptr) { llvm::SmallVector llvmResults; @@ -844,7 +844,7 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, return mlir::failure(); if (calleeAttr) { // direct call - if (invoke) + if (landingPadBlock) rewriter.replaceOpWithNewOp( op, llvmResults, calleeAttr, callOperands, continueBlock, mlir::ValueRange{}, landingPadBlock, mlir::ValueRange{}); @@ -860,7 +860,7 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, auto ftyp = dyn_cast(ptyp.getPointee()); assert(ftyp && "expected a pointer to a function as the first operand"); - if (invoke) { + if (landingPadBlock) { auto llvmFnTy = dyn_cast(converter->convertType(ftyp)); rewriter.replaceOpWithNewOp( @@ -896,9 +896,9 @@ class CIRTryCallLowering mlir::LogicalResult matchAndRewrite(mlir::cir::TryCallOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - return rewriteToCallOrInvoke(op.getOperation(), adaptor.getOperands(), - rewriter, getTypeConverter(), - op.getCalleeAttr()); + return rewriteToCallOrInvoke( + op.getOperation(), adaptor.getOperands(), rewriter, getTypeConverter(), + op.getCalleeAttr(), op.getCont(), op.getLandingPad()); } }; @@ -910,7 +910,76 @@ class CIREhInflightOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::EhInflightOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - return mlir::failure(); + mlir::Location loc = op.getLoc(); + // Create the landing pad type: struct { ptr, i32 } + mlir::MLIRContext *ctx = rewriter.getContext(); + auto llvmPtr = mlir::LLVM::LLVMPointerType::get(ctx); + llvm::SmallVector structFields; + structFields.push_back(llvmPtr); + structFields.push_back(rewriter.getI32Type()); + + auto llvmLandingPadStructTy = + mlir::LLVM::LLVMStructType::getLiteral(ctx, structFields); + mlir::ArrayAttr symListAttr = op.getSymTypeListAttr(); + mlir::SmallVector symAddrs; + + auto llvmFn = op->getParentOfType(); + assert(llvmFn && "expected LLVM function parent"); + mlir::Block *entryBlock = &llvmFn.getRegion().front(); + assert(entryBlock->isEntryBlock()); + + // %x = landingpad { ptr, i32 } + if (symListAttr) { + // catch ptr @_ZTIi + // catch ptr @_ZTIPKc + for (mlir::Attribute attr : op.getSymTypeListAttr()) { + auto symAttr = cast(attr); + // Generate `llvm.mlir.addressof` for each symbol, and place those + // operations in the LLVM function entry basic block. + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPointToStart(entryBlock); + mlir::Value addrOp = rewriter.create( + loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), + symAttr.getValue()); + symAddrs.push_back(addrOp); + } + } else { + // catch ptr null + mlir::Value nullOp = rewriter.create( + loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext())); + symAddrs.push_back(nullOp); + } + + // %slot = extractvalue { ptr, i32 } %x, 0 + // %selector = extractvalue { ptr, i32 } %x, 1 + auto padOp = rewriter.create( + loc, llvmLandingPadStructTy, symAddrs); + SmallVector slotIdx = {0}; + SmallVector selectorIdx = {1}; + + mlir::Value slot = + rewriter.create(loc, padOp, slotIdx); + mlir::Value selector = + rewriter.create(loc, padOp, selectorIdx); + + rewriter.replaceOp(op, mlir::ValueRange{slot, selector}); + + // Landing pads are required to be in LLVM functions with personality + // attribute. FIXME: for now hardcode personality creation in order to start + // adding exception tests, once we annotate CIR with such information, + // change it to be in FuncOp lowering instead. + { + mlir::OpBuilder::InsertionGuard guard(rewriter); + // Insert personality decl before the current function. + rewriter.setInsertionPoint(llvmFn); + auto personalityFnTy = + mlir::LLVM::LLVMFunctionType::get(rewriter.getI32Type(), {}, + /*isVarArg=*/true); + auto personalityFn = rewriter.create( + loc, "__gxx_personality_v0", personalityFnTy); + llvmFn.setPersonality(personalityFn.getName()); + } + return mlir::success(); } }; diff --git a/clang/test/CIR/Lowering/exceptions.cir b/clang/test/CIR/Lowering/exceptions.cir new file mode 100644 index 000000000000..f82c481002eb --- /dev/null +++ b/clang/test/CIR/Lowering/exceptions.cir @@ -0,0 +1,97 @@ +// RUN: cir-translate %s -cir-to-llvmir -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +!s32i = !cir.int +!s8i = !cir.int +!u32i = !cir.int +!u64i = !cir.int +!u8i = !cir.int +!void = !cir.void + +module @"try-catch.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior, cir.triple = "x86_64-unknown-linux-gnu", dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>, #dlti.dl_entry<"dlti.endianness", "little">>, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"} { + cir.global "private" constant external @_ZTIi : !cir.ptr + cir.global "private" constant external @_ZTIPKc : !cir.ptr + cir.func private @_Z8divisionii(!s32i, !s32i) -> !cir.double + // LLVM: @_Z2tcv() personality ptr @__gxx_personality_v0 + cir.func @_Z2tcv() -> !u64i { + %0 = cir.alloca !u64i, !cir.ptr, ["__retval"] {alignment = 8 : i64} + %1 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %2 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} + %3 = cir.alloca !u64i, !cir.ptr, ["z"] {alignment = 8 : i64} + %4 = cir.const #cir.int<50> : !s32i + cir.store %4, %1 : !s32i, !cir.ptr + %5 = cir.const #cir.int<3> : !s32i + cir.store %5, %2 : !s32i, !cir.ptr + cir.br ^bb1 + ^bb1: + %6 = cir.alloca !cir.ptr, !cir.ptr>, ["msg"] {alignment = 8 : i64} + %7 = cir.alloca !s32i, !cir.ptr, ["idx"] {alignment = 4 : i64} + cir.br ^bb2 + ^bb2: + %8 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + %9 = cir.const #cir.int<4> : !s32i + cir.store %9, %8 : !s32i, !cir.ptr + %10 = cir.load %1 : !cir.ptr, !s32i + %11 = cir.load %2 : !cir.ptr, !s32i + %12 = cir.try_call @_Z8divisionii(%10, %11) ^bb3, ^bb4 : (!s32i, !s32i) -> !cir.double + // LLVM: invoke double @_Z8divisionii + // LLVM: to label %[[CONT:.*]] unwind label %[[UNWIND:.*]], + ^bb3: + // LLVM: [[CONT]]: + %13 = cir.cast(float_to_int, %12 : !cir.double), !u64i + cir.store %13, %3 : !u64i, !cir.ptr + %14 = cir.load %8 : !cir.ptr, !s32i + %15 = cir.unary(inc, %14) : !s32i, !s32i + cir.store %15, %8 : !s32i, !cir.ptr + cir.br ^bb10 + ^bb4: + // LLVM: [[UNWIND]]: + // LLVM: %[[EHINFO:.*]] = landingpad { ptr, i32 } + // LLVM: catch ptr @_ZTIi + // LLVM: catch ptr @_ZTIPKc + %exception_ptr, %type_id = cir.eh.inflight_exception [@_ZTIi, @_ZTIPKc] + // LLVM: extractvalue { ptr, i32 } %[[EHINFO]], 0, !dbg !29 + // LLVM: extractvalue { ptr, i32 } %[[EHINFO]], 1, !dbg !29 + cir.br ^bb10 + // TODO: TBD + // cir.br ^bb5(%exception_ptr, %type_id : !cir.ptr, !u32i) + // ^bb5(%16: !cir.ptr, %17: !u32): + // %18 = cir.eh.typeid @_ZTIi + // %19 = cir.cmp(eq, %17, %18) : !u32i, !cir.bool + // cir.brcond %19 ^bb6(%16 : !cir.ptr), ^bb7(%16, %17 : !cir.ptr, !u32i) + // ^bb6(%20: !cir.ptr): + // %21 = cir.catch_param begin %20 -> !cir.ptr + // %22 = cir.load %21 : !cir.ptr, !s32i + // cir.store %22, %7 : !s32i, !cir.ptr + // %23 = cir.const #cir.int<98> : !s32i + // %24 = cir.cast(integral, %23 : !s32i), !u64i + // cir.store %24, %3 : !u64i, !cir.ptr + // %25 = cir.load %7 : !cir.ptr, !s32i + // %26 = cir.unary(inc, %25) : !s32i, !s32i + // cir.store %26, %7 : !s32i, !cir.ptr + // cir.catch_param end + // cir.br ^bb10 + // ^bb7(%27: !cir.ptr, %28: !u32i): + // %29 = cir.eh.typeid @_ZTIPKc + // %30 = cir.cmp(eq, %28, %29) : !u32i, !cir.bool + // cir.brcond %30 ^bb8(%27 : !cir.ptr), ^bb9(%27, %28 : !cir.ptr, !u32i) + // ^bb8(%31: !cir.ptr): + // %32 = cir.catch_param begin %31 -> !cir.ptr + // cir.store %32, %6 : !cir.ptr, !cir.ptr> + // %33 = cir.const #cir.int<99> : !s32i + // %34 = cir.cast(integral, %33 : !s32i), !u64i + // cir.store %34, %3 : !u64i, !cir.ptr + // %35 = cir.load %6 : !cir.ptr>, !cir.ptr + // %36 = cir.const #cir.int<0> : !s32i + // %37 = cir.ptr_stride(%35 : !cir.ptr, %36 : !s32i), !cir.ptr + // cir.catch_param end + // cir.br ^bb10 + // ^bb9(%38: !cir.ptr, %39: !u32i): + // cir.resume + ^bb10: + %40 = cir.load %3 : !cir.ptr, !u64i + cir.store %40, %0 : !u64i, !cir.ptr + %41 = cir.load %0 : !cir.ptr, !u64i + cir.return %41 : !u64i + } +} From f35a75da7c2fdc42122139ef8c011ff526522b40 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 31 Jul 2024 15:52:06 -0700 Subject: [PATCH 1729/2301] [CIR][LowerToLLVM] Exceptions: lower cir.eh.typeid --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 23 +++++++++++++++++-- clang/test/CIR/Lowering/exceptions.cir | 9 ++++---- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index dee10d811b07..cc494992c6bb 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3561,6 +3561,25 @@ class CIRUndefOpLowering } }; +class CIREhTypeIdOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::EhTypeIdOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::Value addrOp = rewriter.create( + op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), + op.getTypeSymAttr()); + mlir::LLVM::CallIntrinsicOp newOp = createCallLLVMIntrinsicOp( + rewriter, op.getLoc(), "llvm.eh.typeid.for.p0", rewriter.getI32Type(), + mlir::ValueRange{addrOp}); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -3596,8 +3615,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRRintOpLowering, CIRRoundOpLowering, CIRSinOpLowering, CIRSqrtOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, CIRFModOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering, CIRPowOpLowering, - CIRClearCacheOpLowering, CIRUndefOpLowering>(converter, - patterns.getContext()); + CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering>( + converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/exceptions.cir b/clang/test/CIR/Lowering/exceptions.cir index f82c481002eb..42a3fae8666a 100644 --- a/clang/test/CIR/Lowering/exceptions.cir +++ b/clang/test/CIR/Lowering/exceptions.cir @@ -52,12 +52,13 @@ module @"try-catch.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.si %exception_ptr, %type_id = cir.eh.inflight_exception [@_ZTIi, @_ZTIPKc] // LLVM: extractvalue { ptr, i32 } %[[EHINFO]], 0, !dbg !29 // LLVM: extractvalue { ptr, i32 } %[[EHINFO]], 1, !dbg !29 + cir.br ^bb5(%exception_ptr, %type_id : !cir.ptr, !u32i) + ^bb5(%16: !cir.ptr, %17: !u32i): + %18 = cir.eh.typeid @_ZTIi + // LLVM: call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi) + %19 = cir.cmp(eq, %17, %18) : !u32i, !cir.bool cir.br ^bb10 // TODO: TBD - // cir.br ^bb5(%exception_ptr, %type_id : !cir.ptr, !u32i) - // ^bb5(%16: !cir.ptr, %17: !u32): - // %18 = cir.eh.typeid @_ZTIi - // %19 = cir.cmp(eq, %17, %18) : !u32i, !cir.bool // cir.brcond %19 ^bb6(%16 : !cir.ptr), ^bb7(%16, %17 : !cir.ptr, !u32i) // ^bb6(%20: !cir.ptr): // %21 = cir.catch_param begin %20 -> !cir.ptr From c1df87b9f2f300aa121fad5506b6e6c7f6a365d9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 31 Jul 2024 16:18:55 -0700 Subject: [PATCH 1730/2301] [CIR][LowerToLLVM] Exceptions: lower cir.catch_param --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 59 ++++++++++++++- clang/test/CIR/Lowering/exceptions.cir | 71 ++++++++++--------- 3 files changed, 101 insertions(+), 34 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 75e417057825..23a584422200 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3333,6 +3333,11 @@ def CatchParamOp : CIR_Op<"catch_param"> { attr-dict }]; + let extraClassDeclaration = [{ + bool isBegin() { return getKind() == mlir::cir::CatchParamKind::begin; } + bool isEnd() { return getKind() == mlir::cir::CatchParamKind::end; } + }]; + let hasVerifier = 1; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index cc494992c6bb..48720508fa48 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3580,6 +3580,61 @@ class CIREhTypeIdOpLowering } }; +class CIRCatchParamOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::CatchParamOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + if (op.isBegin()) { + // Get or create `declare ptr @__cxa_begin_catch(ptr)` + llvm::StringRef cxaBeginCatch = "__cxa_begin_catch"; + auto *sourceSymbol = mlir::SymbolTable::lookupSymbolIn( + op->getParentOfType(), cxaBeginCatch); + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + if (!sourceSymbol) { + auto catchFnTy = + mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {llvmPtrTy}, + /*isVarArg=*/false); + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint( + op->getParentOfType()); + auto catchFn = rewriter.create( + op.getLoc(), cxaBeginCatch, catchFnTy); + sourceSymbol = catchFn; + } + rewriter.replaceOpWithNewOp( + op, mlir::TypeRange{llvmPtrTy}, cxaBeginCatch, + mlir::ValueRange{adaptor.getExceptionPtr()}); + return mlir::success(); + } else if (op.isEnd()) { + // Get or create `declare void @__cxa_end_catch()` + llvm::StringRef cxaEndCatch = "__cxa_end_catch"; + auto *sourceSymbol = mlir::SymbolTable::lookupSymbolIn( + op->getParentOfType(), cxaEndCatch); + auto llvmVoidTy = mlir::LLVM::LLVMVoidType::get(rewriter.getContext()); + if (!sourceSymbol) { + auto catchFnTy = mlir::LLVM::LLVMFunctionType::get(llvmVoidTy, {}, + /*isVarArg=*/false); + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint( + op->getParentOfType()); + auto catchFn = rewriter.create( + op.getLoc(), cxaEndCatch, catchFnTy); + sourceSymbol = catchFn; + } + rewriter.create(op.getLoc(), mlir::TypeRange{}, + cxaEndCatch, mlir::ValueRange{}); + rewriter.eraseOp(op); + return mlir::success(); + } + llvm_unreachable("only begin/end supposed to make to lowering stage"); + return mlir::failure(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { patterns.add(patterns.getContext()); @@ -3615,8 +3670,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRRintOpLowering, CIRRoundOpLowering, CIRSinOpLowering, CIRSqrtOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, CIRFModOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering, CIRPowOpLowering, - CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering>( - converter, patterns.getContext()); + CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering, + CIRCatchParamOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/exceptions.cir b/clang/test/CIR/Lowering/exceptions.cir index 42a3fae8666a..ae2e511526c6 100644 --- a/clang/test/CIR/Lowering/exceptions.cir +++ b/clang/test/CIR/Lowering/exceptions.cir @@ -50,44 +50,51 @@ module @"try-catch.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.si // LLVM: catch ptr @_ZTIi // LLVM: catch ptr @_ZTIPKc %exception_ptr, %type_id = cir.eh.inflight_exception [@_ZTIi, @_ZTIPKc] - // LLVM: extractvalue { ptr, i32 } %[[EHINFO]], 0, !dbg !29 - // LLVM: extractvalue { ptr, i32 } %[[EHINFO]], 1, !dbg !29 + // LLVM: extractvalue { ptr, i32 } %[[EHINFO]], 0 + // LLVM: extractvalue { ptr, i32 } %[[EHINFO]], 1 cir.br ^bb5(%exception_ptr, %type_id : !cir.ptr, !u32i) ^bb5(%16: !cir.ptr, %17: !u32i): %18 = cir.eh.typeid @_ZTIi // LLVM: call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi) %19 = cir.cmp(eq, %17, %18) : !u32i, !cir.bool + cir.brcond %19 ^bb6(%16 : !cir.ptr), ^bb7(%16, %17 : !cir.ptr, !u32i) + ^bb6(%20: !cir.ptr): + %21 = cir.catch_param begin %20 -> !cir.ptr + // LLVM: %[[EH_IDX:.*]] = phi ptr + // LLVM: call ptr @__cxa_begin_catch(ptr %[[EH_IDX]]) + %22 = cir.load %21 : !cir.ptr, !s32i + cir.store %22, %7 : !s32i, !cir.ptr + %23 = cir.const #cir.int<98> : !s32i + %24 = cir.cast(integral, %23 : !s32i), !u64i + cir.store %24, %3 : !u64i, !cir.ptr + %25 = cir.load %7 : !cir.ptr, !s32i + %26 = cir.unary(inc, %25) : !s32i, !s32i + cir.store %26, %7 : !s32i, !cir.ptr + cir.catch_param end + // LLVM: call void @__cxa_end_catch() cir.br ^bb10 - // TODO: TBD - // cir.brcond %19 ^bb6(%16 : !cir.ptr), ^bb7(%16, %17 : !cir.ptr, !u32i) - // ^bb6(%20: !cir.ptr): - // %21 = cir.catch_param begin %20 -> !cir.ptr - // %22 = cir.load %21 : !cir.ptr, !s32i - // cir.store %22, %7 : !s32i, !cir.ptr - // %23 = cir.const #cir.int<98> : !s32i - // %24 = cir.cast(integral, %23 : !s32i), !u64i - // cir.store %24, %3 : !u64i, !cir.ptr - // %25 = cir.load %7 : !cir.ptr, !s32i - // %26 = cir.unary(inc, %25) : !s32i, !s32i - // cir.store %26, %7 : !s32i, !cir.ptr - // cir.catch_param end - // cir.br ^bb10 - // ^bb7(%27: !cir.ptr, %28: !u32i): - // %29 = cir.eh.typeid @_ZTIPKc - // %30 = cir.cmp(eq, %28, %29) : !u32i, !cir.bool - // cir.brcond %30 ^bb8(%27 : !cir.ptr), ^bb9(%27, %28 : !cir.ptr, !u32i) - // ^bb8(%31: !cir.ptr): - // %32 = cir.catch_param begin %31 -> !cir.ptr - // cir.store %32, %6 : !cir.ptr, !cir.ptr> - // %33 = cir.const #cir.int<99> : !s32i - // %34 = cir.cast(integral, %33 : !s32i), !u64i - // cir.store %34, %3 : !u64i, !cir.ptr - // %35 = cir.load %6 : !cir.ptr>, !cir.ptr - // %36 = cir.const #cir.int<0> : !s32i - // %37 = cir.ptr_stride(%35 : !cir.ptr, %36 : !s32i), !cir.ptr - // cir.catch_param end - // cir.br ^bb10 - // ^bb9(%38: !cir.ptr, %39: !u32i): + ^bb7(%27: !cir.ptr, %28: !u32i): + %29 = cir.eh.typeid @_ZTIPKc + // LLVM: call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIPKc) + %30 = cir.cmp(eq, %28, %29) : !u32i, !cir.bool + cir.brcond %30 ^bb8(%27 : !cir.ptr), ^bb9(%27, %28 : !cir.ptr, !u32i) + ^bb8(%31: !cir.ptr): + %32 = cir.catch_param begin %31 -> !cir.ptr + // LLVM: %[[EH_MSG:.*]] = phi ptr + // LLVM: call ptr @__cxa_begin_catch(ptr %[[EH_MSG]]) + cir.store %32, %6 : !cir.ptr, !cir.ptr> + %33 = cir.const #cir.int<99> : !s32i + %34 = cir.cast(integral, %33 : !s32i), !u64i + cir.store %34, %3 : !u64i, !cir.ptr + %35 = cir.load %6 : !cir.ptr>, !cir.ptr + %36 = cir.const #cir.int<0> : !s32i + %37 = cir.ptr_stride(%35 : !cir.ptr, %36 : !s32i), !cir.ptr + cir.catch_param end + // LLVM: call void @__cxa_end_catch() + cir.br ^bb10 + ^bb9(%38: !cir.ptr, %39: !u32i): + cir.br ^bb10 + // TODO: support resume. // cir.resume ^bb10: %40 = cir.load %3 : !cir.ptr, !u64i From b82183b0b50e4ea6ba942b1ae63c41c4bb4fa695 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 31 Jul 2024 16:47:34 -0700 Subject: [PATCH 1731/2301] [CIR][NFC] Fix few unused vars and tablegen format --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 ++-- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 23a584422200..49ee8112b7bf 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3334,8 +3334,8 @@ def CatchParamOp : CIR_Op<"catch_param"> { }]; let extraClassDeclaration = [{ - bool isBegin() { return getKind() == mlir::cir::CatchParamKind::begin; } - bool isEnd() { return getKind() == mlir::cir::CatchParamKind::end; } + bool isBegin() { return getKind() == mlir::cir::CatchParamKind::begin; } + bool isEnd() { return getKind() == mlir::cir::CatchParamKind::end; } }]; let hasVerifier = 1; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 48720508fa48..3e0c766df0f7 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3603,7 +3603,6 @@ class CIRCatchParamOpLowering op->getParentOfType()); auto catchFn = rewriter.create( op.getLoc(), cxaBeginCatch, catchFnTy); - sourceSymbol = catchFn; } rewriter.replaceOpWithNewOp( op, mlir::TypeRange{llvmPtrTy}, cxaBeginCatch, @@ -3623,7 +3622,6 @@ class CIRCatchParamOpLowering op->getParentOfType()); auto catchFn = rewriter.create( op.getLoc(), cxaEndCatch, catchFnTy); - sourceSymbol = catchFn; } rewriter.create(op.getLoc(), mlir::TypeRange{}, cxaEndCatch, mlir::ValueRange{}); From fd51b4e77ad778db4f10cba1ab17c941c1875e84 Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 2 Aug 2024 06:26:01 +0800 Subject: [PATCH 1732/2301] [CIR][CodeGen][Lowering] Better handling of alloca address space with unified AS (#682) `TargetCodeGenInfo::getASTAllocaAddressSpace` is a bad design because it requires the targets return `LangAS`, which enforce the targets to consider which languages could be their upstream and unnecessarily complicate the target info. Unified AS in CIR is a better abstraction level for this kind of target info. Apart from that, the languages also has some requirements on the result address space of alloca. ```cpp void func() { int x; // Here, the AS of pointer `&x` is the alloca AS defined by the language. } ``` When we have inconsistency between the alloca AS defined by the language and the one from target info, we have to perform `addrspacecast` from the target one to the language one. This PR includes * New method `CGM.getLangTempAllocaAddressSpace` which explicitly specifies the alloca address space defined by languages. It replaces the vague `LangAS::Default` in the AS comparisons from OG CodeGen. * Replace `getASTAllocaAddressSpace` with `getCIRAllocaAddressSpace`, which returns CIR unified AS. * Also use `getCIRAllocaAddressSpace` as the result address space of `buildAlloca`. * Fix the lowering of `cir.alloca` operation to be address-space-aware. We don't perform any `addrspacecast` in this PR, i.e. all the related code paths still remain NYI and it's fine. That's because we don't even have a `(language, target)` pair holding the inconsistency. After these changes, in the previous OpenCL testcases we will see all the alloca pointers turning into private AS, without any `addrspacecast`. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 9 +++++++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 12 +++++++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 6 ++++++ clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 12 ++++-------- clang/lib/CIR/CodeGen/TargetInfo.cpp | 6 ++++++ clang/lib/CIR/CodeGen/TargetInfo.h | 8 +++++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 ++- .../test/CIR/CodeGen/OpenCL/addrspace-alloca.cl | 16 ++++++++++++---- 9 files changed, 54 insertions(+), 20 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index a16148a21986..7970be0eccf7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1100,7 +1100,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // the AST level this is handled within CreateTempAlloca et al., but for the // builtin / dynamic alloca we have to handle it here. assert(!MissingFeatures::addressSpace()); - auto AAS = builder.getAddrSpaceAttr(getASTAllocaAddressSpace()); + auto AAS = getCIRAllocaAddressSpace(); auto EAS = builder.getAddrSpaceAttr( E->getType()->getPointeeType().getAddressSpace()); if (EAS != AAS) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index d6045548bfd6..d91c8603b3d7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2756,7 +2756,10 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, mlir::Location loc, CharUnits alignment, mlir::OpBuilder::InsertPoint ip, mlir::Value arraySize) { - auto localVarPtrTy = mlir::cir::PointerType::get(builder.getContext(), ty); + // CIR uses its own alloca AS rather than follow the target data layout like + // original CodeGen. The data layout awareness should be done in the lowering + // pass instead. + auto localVarPtrTy = builder.getPointerTo(ty, getCIRAllocaAddressSpace()); auto alignIntAttr = CGM.getSize(alignment); mlir::Value addr; @@ -2985,7 +2988,9 @@ Address CIRGenFunction::CreateTempAlloca(mlir::Type Ty, CharUnits Align, // be different from the type defined by the language. For example, // in C++ the auto variables are in the default address space. Therefore // cast alloca to the default address space when necessary. - if (getASTAllocaAddressSpace() != LangAS::Default) { + if (auto ASTAS = + builder.getAddrSpaceAttr(CGM.getLangTempAllocaAddressSpace()); + getCIRAllocaAddressSpace() != ASTAS) { llvm_unreachable("Requires address space cast which is NYI"); } return Address(V, Ty, Align); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 9ff4acd41cef..bf2b31cf1510 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -159,7 +159,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, AllocaInt8PtrTy = UInt8PtrTy; // TODO: GlobalsInt8PtrTy // TODO: ConstGlobalsPtrTy - ASTAllocaAddressSpace = getTargetCIRGenInfo().getASTAllocaAddressSpace(); + CIRAllocaAddressSpace = getTargetCIRGenInfo().getCIRAllocaAddressSpace(); PtrDiffTy = ::mlir::cir::IntType::get( builder.getContext(), astCtx.getTargetInfo().getMaxPointerWidth(), @@ -1404,6 +1404,16 @@ LangAS CIRGenModule::getGlobalConstantAddressSpace() const { return LangAS::Default; } +// TODO(cir): this could be a common AST helper for both CIR and LLVM codegen. +LangAS CIRGenModule::getLangTempAllocaAddressSpace() const { + if (getLangOpts().OpenCL) + return LangAS::opencl_private; + if (getLangOpts().SYCLIsDevice || getLangOpts().CUDAIsDevice || + (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)) + llvm_unreachable("NYI"); + return LangAS::Default; +} + static mlir::cir::GlobalOp generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, mlir::cir::GlobalLinkageKind LT, CIRGenModule &CGM, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 52704261110d..b46befcc949a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -335,6 +335,12 @@ class CIRGenModule : public CIRGenTypeCache { /// in AST is always in default address space. LangAS getGlobalConstantAddressSpace() const; + /// Returns the address space for temporary allocations in the language. This + /// ensures that the allocated variable's address space matches the + /// expectations of the AST, rather than using the target's allocation address + /// space, which may lead to type mismatches in other parts of the IR. + LangAS getLangTempAllocaAddressSpace() const; + /// Set attributes which are common to any form of a global definition (alias, /// Objective-C method, function, global variable). /// diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 0930b17d55e3..e07e46be68e5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -18,6 +18,7 @@ #include "clang/AST/CharUnits.h" #include "clang/Basic/AddressSpaces.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/MissingFeatures.h" namespace cir { @@ -108,7 +109,7 @@ struct CIRGenTypeCache { // unsigned char SizeAlignInBytes; // }; - clang::LangAS ASTAllocaAddressSpace; + mlir::cir::AddressSpaceAttr CIRAllocaAddressSpace; // clang::CharUnits getSizeSize() const { // return clang::CharUnits::fromQuantity(SizeSizeInBytes); @@ -123,13 +124,8 @@ struct CIRGenTypeCache { return clang::CharUnits::fromQuantity(PointerAlignInBytes); } - clang::LangAS getASTAllocaAddressSpace() const { - // Address spaces are not yet fully supported, but the usage of the default - // alloca address space can be used for now only for comparison with the - // default address space. - assert(!MissingFeatures::addressSpace()); - assert(ASTAllocaAddressSpace == clang::LangAS::Default); - return ASTAllocaAddressSpace; + mlir::cir::AddressSpaceAttr getCIRAllocaAddressSpace() const { + return CIRAllocaAddressSpace; } }; diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index ba81ad88c435..0016f2200b37 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -263,6 +263,12 @@ class CommonSPIRTargetCIRGenInfo : public TargetCIRGenInfo { CommonSPIRTargetCIRGenInfo(std::unique_ptr ABIInfo) : TargetCIRGenInfo(std::move(ABIInfo)) {} + mlir::cir::AddressSpaceAttr getCIRAllocaAddressSpace() const override { + return mlir::cir::AddressSpaceAttr::get( + &getABIInfo().CGT.getMLIRContext(), + mlir::cir::AddressSpaceAttr::Kind::offload_private); + } + unsigned getOpenCLKernelCallingConv() const override { return llvm::CallingConv::SPIR_KERNEL; } diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index 21cf9b78d35e..1b4dbea110fc 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -62,9 +62,11 @@ class TargetCIRGenInfo { std::vector &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const {} - /// Get the AST address space for alloca. - virtual clang::LangAS getASTAllocaAddressSpace() const { - return clang::LangAS::Default; + /// Get the CIR address space for alloca. + virtual mlir::cir::AddressSpaceAttr getCIRAllocaAddressSpace() const { + // Return the null attribute, which means the target does not care about the + // alloca address space. + return {}; } /// Perform address space cast of an expression of pointer type. diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3e0c766df0f7..e8962574b546 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -999,7 +999,8 @@ class CIRAllocaLowering typeConverter->convertType(rewriter.getIndexType()), rewriter.getIntegerAttr(rewriter.getIndexType(), 1)); auto elementTy = getTypeConverter()->convertType(op.getAllocaType()); - auto resultTy = mlir::LLVM::LLVMPointerType::get(getContext()); + auto resultTy = getTypeConverter()->convertType(op.getResult().getType()); + // TODO: Verification between the CIR alloca AS and the one from data layout rewriter.replaceOpWithNewOp( op, resultTy, elementTy, size, op.getAlignmentAttr().getInt()); return mlir::success(); diff --git a/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl index 58edf5c2791e..c64b5015f369 100644 --- a/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl +++ b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl @@ -7,20 +7,28 @@ // CIR: cir.func @func(%arg0: !cir.ptr // LLVM: @func(ptr addrspace(3) kernel void func(local int *p) { - // CIR-NEXT: %[[#ALLOCA_P:]] = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} + // CIR-NEXT: %[[#ALLOCA_P:]] = cir.alloca !cir.ptr, !cir.ptr, addrspace(offload_private)>, ["p", init] {alignment = 8 : i64} // LLVM-NEXT: %[[#ALLOCA_P:]] = alloca ptr addrspace(3), i64 1, align 8 int x; - // CIR-NEXT: %[[#ALLOCA_X:]] = cir.alloca !s32i, !cir.ptr, ["x"] {alignment = 4 : i64} + // CIR-NEXT: %[[#ALLOCA_X:]] = cir.alloca !s32i, !cir.ptr, ["x"] {alignment = 4 : i64} // LLVM-NEXT: %[[#ALLOCA_X:]] = alloca i32, i64 1, align 4 global char *b; - // CIR-NEXT: %[[#ALLOCA_B:]] = cir.alloca !cir.ptr, !cir.ptr>, ["b"] {alignment = 8 : i64} + // CIR-NEXT: %[[#ALLOCA_B:]] = cir.alloca !cir.ptr, !cir.ptr, addrspace(offload_private)>, ["b"] {alignment = 8 : i64} // LLVM-NEXT: %[[#ALLOCA_B:]] = alloca ptr addrspace(1), i64 1, align 8 + private int *ptr; + // CIR-NEXT: %[[#ALLOCA_PTR:]] = cir.alloca !cir.ptr, !cir.ptr, addrspace(offload_private)>, ["ptr"] {alignment = 8 : i64} + // LLVM-NEXT: %[[#ALLOCA_PTR:]] = alloca ptr, i64 1, align 8 + // Store of the argument `p` - // CIR-NEXT: cir.store %arg0, %[[#ALLOCA_P]] : !cir.ptr, !cir.ptr> + // CIR-NEXT: cir.store %arg0, %[[#ALLOCA_P]] : !cir.ptr, !cir.ptr, addrspace(offload_private)> // LLVM-NEXT: store ptr addrspace(3) %{{[0-9]+}}, ptr %[[#ALLOCA_P]], align 8 + ptr = &x; + // CIR-NEXT: cir.store %[[#ALLOCA_X]], %[[#ALLOCA_PTR]] : !cir.ptr, !cir.ptr, addrspace(offload_private)> + // LLVM-NEXT: store ptr %[[#ALLOCA_X]], ptr %[[#ALLOCA_PTR]] + return; } From 070f3c16bacfa5026d6fd3034299435ec8c48e5a Mon Sep 17 00:00:00 2001 From: GaoXiangYa <168072492+GaoXiangYa@users.noreply.github.com> Date: Fri, 2 Aug 2024 06:27:22 +0800 Subject: [PATCH 1733/2301] [CIR][ThroughMLIR] Lowering cir.do to scf.while,and fix cir.while lowering bugs. (#756) In this pr, I lowering cir.do to scf.while, fix cir.while nested loop bugs and add test cases. --- .../ThroughMLIR/LowerCIRLoopToSCF.cpp | 66 +++++++++-- clang/test/CIR/Lowering/ThroughMLIR/doWhile.c | 107 ++++++++++++++++++ clang/test/CIR/Lowering/ThroughMLIR/while.c | 64 ++++++++++- 3 files changed, 227 insertions(+), 10 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/doWhile.c diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp index 4aee2701c86a..1b6eba94c5ea 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp @@ -14,8 +14,11 @@ #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/SCF/Transforms/Passes.h" +#include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinDialect.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Location.h" +#include "mlir/IR/ValueRange.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Support/LogicalResult.h" @@ -69,6 +72,19 @@ class SCFWhileLoop { mlir::ConversionPatternRewriter *rewriter; }; +class SCFDoLoop { +public: + SCFDoLoop(mlir::cir::DoWhileOp op, mlir::cir::DoWhileOp::Adaptor adaptor, + mlir::ConversionPatternRewriter *rewriter) + : DoOp(op), adaptor(adaptor), rewriter(rewriter) {} + void transferToSCFWhileOp(); + +private: + mlir::cir::DoWhileOp DoOp; + mlir::cir::DoWhileOp::Adaptor adaptor; + mlir::ConversionPatternRewriter *rewriter; +}; + static int64_t getConstant(mlir::cir::ConstantOp op) { auto attr = op->getAttrs().front().getValue(); const auto IntAttr = mlir::dyn_cast(attr); @@ -240,13 +256,33 @@ void SCFWhileLoop::transferToSCFWhileOp() { whileOp->getLoc(), whileOp->getResultTypes(), adaptor.getOperands()); rewriter->createBlock(&scfWhileOp.getBefore()); rewriter->createBlock(&scfWhileOp.getAfter()); + rewriter->inlineBlockBefore(&whileOp.getCond().front(), + scfWhileOp.getBeforeBody(), + scfWhileOp.getBeforeBody()->end()); + rewriter->inlineBlockBefore(&whileOp.getBody().front(), + scfWhileOp.getAfterBody(), + scfWhileOp.getAfterBody()->end()); +} - rewriter->cloneRegionBefore(whileOp.getCond(), - &scfWhileOp.getBefore().back()); - rewriter->eraseBlock(&scfWhileOp.getBefore().back()); - - rewriter->cloneRegionBefore(whileOp.getBody(), &scfWhileOp.getAfter().back()); - rewriter->eraseBlock(&scfWhileOp.getAfter().back()); +void SCFDoLoop::transferToSCFWhileOp() { + + auto beforeBuilder = [&](mlir::OpBuilder &builder, mlir::Location loc, + mlir::ValueRange args) { + auto *newBlock = builder.getBlock(); + rewriter->mergeBlocks(&DoOp.getBody().front(), newBlock); + auto *yieldOp = newBlock->getTerminator(); + rewriter->mergeBlocks(&DoOp.getCond().front(), newBlock, + yieldOp->getResults()); + rewriter->eraseOp(yieldOp); + }; + auto afterBuilder = [&](mlir::OpBuilder &builder, mlir::Location loc, + mlir::ValueRange args) { + rewriter->create(loc, args); + }; + + rewriter->create(DoOp.getLoc(), DoOp->getResultTypes(), + adaptor.getOperands(), beforeBuilder, + afterBuilder); } class CIRForOpLowering : public mlir::OpConversionPattern { @@ -279,6 +315,20 @@ class CIRWhileOpLowering } }; +class CIRDoOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::DoWhileOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + SCFDoLoop loop(op, adaptor, &rewriter); + loop.transferToSCFWhileOp(); + rewriter.eraseOp(op); + return mlir::success(); + } +}; + class CIRConditionOpLowering : public mlir::OpConversionPattern { public: @@ -302,8 +352,8 @@ class CIRConditionOpLowering void populateCIRLoopToSCFConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter) { - patterns.add( - converter, patterns.getContext()); + patterns.add(converter, patterns.getContext()); } } // namespace cir diff --git a/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c b/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c new file mode 100644 index 000000000000..b6069e8a787e --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c @@ -0,0 +1,107 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +int sum() { + int s = 0; + int i = 0; + do { + s += i; + ++i; + } while (i <= 10); + return s; +} + +void nestedDoWhile() { + int a = 0; + do { + a++; + int b = 0; + while(b < 2) { + b++; + } + }while(a < 2); +} + +// CHECK: func.func @sum() -> i32 { +// CHECK: %[[ALLOC:.+]] = memref.alloca() {alignment = 4 : i64} : memref +// CHECK: %[[ALLOC0:.+]] = memref.alloca() {alignment = 4 : i64} : memref +// CHECK: %[[ALLOC1:.+]] = memref.alloca() {alignment = 4 : i64} : memref +// CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +// CHECK: memref.store %[[C0_I32]], %[[ALLOC0]][] : memref +// CHECK: %[[C0_I32_2:.+]] = arith.constant 0 : i32 +// CHECK: memref.store %[[C0_I32_2]], %[[ALLOC1]][] : memref +// CHECK: memref.alloca_scope { +// CHECK: scf.while : () -> () { +// CHECK: %[[VAR1:.+]] = memref.load %[[ALLOC1]][] : memref +// CHECK: %[[VAR2:.+]] = memref.load %[[ALLOC0]][] : memref +// CHECK: %[[ADD:.+]] = arith.addi %[[VAR2]], %[[VAR1]] : i32 +// CHECK: memref.store %[[ADD]], %[[ALLOC0]][] : memref +// CHECK: %[[VAR3:.+]] = memref.load %[[ALLOC1]][] : memref +// CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +// CHECK: %[[ADD1:.+]] = arith.addi %[[VAR3]], %[[C1_I32]] : i32 +// CHECK: memref.store %[[ADD1]], %[[ALLOC1]][] : memref +// CHECK: %[[VAR4:.+]] = memref.load %[[ALLOC1]][] : memref +// CHECK: %[[C10_I32:.+]] = arith.constant 10 : i32 +// CHECK: %[[CMP:.+]] = arith.cmpi sle, %[[VAR4]], %[[C10_I32]] : i32 +// CHECK: %[[EXT:.+]] = arith.extui %[[CMP]] : i1 to i32 +// CHECK: %[[C0_I32_3:.+]] = arith.constant 0 : i32 +// CHECK: %[[NE:.+]] = arith.cmpi ne, %[[EXT]], %[[C0_I32_3]] : i32 +// CHECK: %[[EXT1:.+]] = arith.extui %[[NE]] : i1 to i8 +// CHECK: %[[TRUNC:.+]] = arith.trunci %[[EXT1]] : i8 to i1 +// CHECK: scf.condition(%[[TRUNC]]) +// CHECK: } do { +// CHECK: scf.yield +// CHECK: } +// CHECK: } +// CHECK: %[[LOAD:.+]] = memref.load %[[ALLOC0]][] : memref +// CHECK: memref.store %[[LOAD]], %[[ALLOC]][] : memref +// CHECK: %[[RET:.+]] = memref.load %[[ALLOC]][] : memref +// CHECK: return %[[RET]] : i32 + +// CHECK: func.func @nestedDoWhile() { +// CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref +// CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +// CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref +// CHECK: memref.alloca_scope { +// CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref +// CHECK: scf.while : () -> () { +// CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +// CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +// CHECK: %[[ONE:.+]] = arith.addi %[[ZERO]], %[[C1_I32]] : i32 +// CHECK: memref.store %[[ONE]], %[[alloca]][] : memref +// CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 +// CHECK: memref.store %[[C0_I32_1]], %[[alloca_0]][] : memref +// CHECK: memref.alloca_scope { +// CHECK: scf.while : () -> () { +// CHECK: %[[EIGHT:.+]] = memref.load %[[alloca_0]][] : memref +// CHECK: %[[C2_I32_3:.+]] = arith.constant 2 : i32 +// CHECK: %[[NINE:.+]] = arith.cmpi slt, %[[EIGHT]], %[[C2_I32_3]] : i32 +// CHECK: %[[TEN:.+]] = arith.extui %9 : i1 to i32 +// CHECK: %[[C0_I32_4:.+]] = arith.constant 0 : i32 +// CHECK: %[[ELEVEN:.+]] = arith.cmpi ne, %[[TEN]], %[[C0_I32_4]] : i32 +// CHECK: %[[TWELVE:.+]] = arith.extui %[[ELEVEN]] : i1 to i8 +// CHECK: %[[THIRTEEN:.+]] = arith.trunci %[[TWELVE]] : i8 to i1 +// CHECK: scf.condition(%[[THIRTEEN]]) +// CHECK: } do { +// CHECK: %[[EIGHT]] = memref.load %[[alloca_0]][] : memref +// CHECK: %[[C1_I32_3:.+]] = arith.constant 1 : i32 +// CHECK: %[[NINE]] = arith.addi %[[EIGHT]], %[[C1_I32_3]] : i32 +// CHECK: memref.store %[[NINE]], %[[alloca_0]][] : memref +// CHECK: scf.yield +// CHECK: } +// CHECK: } +// CHECK: %[[TWO:.+]] = memref.load %[[alloca]][] : memref +// CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +// CHECK: %[[THREE:.+]] = arith.cmpi slt, %[[TWO]], %[[C2_I32]] : i32 +// CHECK: %[[FOUR:.+]] = arith.extui %[[THREE]] : i1 to i32 +// CHECK: %[[C0_I32_2:.+]] = arith.constant 0 : i32 +// CHECK: %[[FIVE:.+]] = arith.cmpi ne, %[[FOUR]], %[[C0_I32_2]] : i32 +// CHECK: %[[SIX:.+]] = arith.extui %[[FIVE]] : i1 to i8 +// CHECK: %[[SEVEN:.+]] = arith.trunci %[[SIX]] : i8 to i1 +// CHECK: scf.condition(%[[SEVEN]]) +// CHECK: } do { +// CHECK: scf.yield +// CHECK: } +// CHECK: } +// CHECK: return +// CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/ThroughMLIR/while.c b/clang/test/CIR/Lowering/ThroughMLIR/while.c index bdf5f04b2181..40ad92de95e4 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/while.c +++ b/clang/test/CIR/Lowering/ThroughMLIR/while.c @@ -1,14 +1,25 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -void foo() { +void singleWhile() { int a = 0; while(a < 2) { a++; } } -//CHECK: func.func @foo() { +void nestedWhile() { + int a = 0; + while(a < 2) { + int b = 0; + while(b < 2) { + b++; + } + a++; + } +} + +//CHECK: func.func @singleWhile() { //CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref //CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 //CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref @@ -32,4 +43,53 @@ void foo() { //CHECK: } //CHECK: } //CHECK: return +//CHECK: } + +//CHECK: func.func @nestedWhile() { +//CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +//CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref +//CHECK: memref.alloca_scope { +//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: scf.while : () -> () { +//CHECK: %[[ZERO:.+]] = memref.load %alloca[] : memref +//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C2_I32]] : i32 +//CHECK: %[[TWO:.+]] = arith.extui %[[ONE]] : i1 to i32 +//CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 +//CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO]], %[[C0_I32_1]] : i32 +//CHECK: %[[FOUR:.+]] = arith.extui %[[THREE]] : i1 to i8 +//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR]] : i8 to i1 +//CHECK: scf.condition(%[[FIVE]]) +//CHECK: } do { +//CHECK: %[[C0_I32_1]] = arith.constant 0 : i32 +//CHECK: memref.store %[[C0_I32_1]], %[[alloca_0]][] : memref +//CHECK: memref.alloca_scope { +//CHECK: scf.while : () -> () { +//CHECK: %[[TWO]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C2_I32]] = arith.constant 2 : i32 +//CHECK: %[[THREE]] = arith.cmpi slt, %[[TWO]], %[[C2_I32]] : i32 +//CHECK: %[[FOUR]] = arith.extui %[[THREE]] : i1 to i32 +//CHECK: %[[C0_I32_2:.+]] = arith.constant 0 : i32 +//CHECK: %[[FIVE]] = arith.cmpi ne, %[[FOUR]], %[[C0_I32_2]] : i32 +//CHECK: %[[SIX:.+]] = arith.extui %[[FIVE]] : i1 to i8 +//CHECK: %[[SEVEN:.+]] = arith.trunci %[[SIX]] : i8 to i1 +//CHECK: scf.condition(%[[SEVEN]]) +//CHECK: } do { +//CHECK: %[[TWO]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32_2:.+]] = arith.constant 1 : i32 +//CHECK: %[[THREE]] = arith.addi %[[TWO]], %[[C1_I32_2]] : i32 +//CHECK: memref.store %[[THREE]], %[[alloca_0]][] : memref +//CHECK: scf.yield +//CHECK: } +//CHECK: } +//CHECK: %[[ZERO]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: %[[ONE]] = arith.addi %[[ZERO]], %[[C1_I32]] : i32 +//CHECK: memref.store %[[ONE]], %[[alloca]][] : memref +//CHECK: scf.yield +//CHECK: } +//CHECK: } +//CHECK: return +//CHECK: } //CHECK: } \ No newline at end of file From e510e03a3b91f711df364848895d86b77d734afa Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 2 Aug 2024 06:50:38 +0800 Subject: [PATCH 1734/2301] [CIR][CIRGen] Add CIRGen for scalar co_yield expression (#761) This PR adds CIRGen for scalar `co_yield` expressions. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 8 ++-- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 30 ++++++++---- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 ++ clang/test/CIR/CodeGen/coro-task.cpp | 49 ++++++++++++++++++++ 5 files changed, 82 insertions(+), 12 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 49ee8112b7bf..eb398f5dffb6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3147,12 +3147,13 @@ def TryCallOp : CIR_CallOp<"try_call", def AK_Initial : I32EnumAttrCase<"init", 1>; def AK_User : I32EnumAttrCase<"user", 2>; -def AK_Final : I32EnumAttrCase<"final", 3>; +def AK_Yield : I32EnumAttrCase<"yield", 3>; +def AK_Final : I32EnumAttrCase<"final", 4>; def AwaitKind : I32EnumAttr< "AwaitKind", "await kind", - [AK_Initial, AK_User, AK_Final]> { + [AK_Initial, AK_User, AK_Yield, AK_Final]> { let cppNamespace = "::mlir::cir"; } @@ -3186,9 +3187,10 @@ def AwaitOp : CIR_Op<"await", of CIR, e.g. LLVM, should use the `suspend` region to track more lower level codegen (e.g. intrinsic emission for coro.save/coro.suspend). - There are also 3 flavors of `cir.await` available: + There are also 4 flavors of `cir.await` available: - `init`: compiler generated initial suspend via implicit `co_await`. - `user`: also known as normal, representing user written co_await's. + - `yield`: user written `co_yield` expressions. - `final`: compiler generated final suspend via implicit `co_await`. From the C++ snippet we get: diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 362f1ee5fcb3..5aece0476abd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -492,11 +492,12 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, return awaitRes; } -RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, - AggValueSlot aggSlot, - bool ignoreResult) { +static RValue buildSuspendExpr(CIRGenFunction &CGF, + const CoroutineSuspendExpr &E, + mlir::cir::AwaitKind kind, AggValueSlot aggSlot, + bool ignoreResult) { RValue rval; - auto scopeLoc = getLoc(E.getSourceRange()); + auto scopeLoc = CGF.getLoc(E.getSourceRange()); // Since we model suspend / resume as an inner region, we must store // resume scalar results in a tmp alloca, and load it after we build the @@ -504,13 +505,12 @@ RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, // every region return a value when promise.return_value() is used, but // it's a bit awkward given that resume is the only region that actually // returns a value. - mlir::Block *currEntryBlock = currLexScope->getEntryBlock(); + mlir::Block *currEntryBlock = CGF.currLexScope->getEntryBlock(); [[maybe_unused]] mlir::Value tmpResumeRValAddr; // No need to explicitly wrap this into a scope since the AST already uses a // ExprWithCleanups, which will wrap this into a cir.scope anyways. - rval = buildSuspendExpression(*this, *CurCoro.Data, E, - CurCoro.Data->CurrentAwaitKind, aggSlot, + rval = buildSuspendExpression(CGF, *CGF.CurCoro.Data, E, kind, aggSlot, ignoreResult, currEntryBlock, tmpResumeRValAddr, /*forLValue*/ false) .RV; @@ -519,7 +519,7 @@ RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, return rval; if (rval.isScalar()) { - rval = RValue::get(builder.create( + rval = RValue::get(CGF.getBuilder().create( scopeLoc, rval.getScalarVal().getType(), tmpResumeRValAddr)); } else if (rval.isAggregate()) { // This is probably already handled via AggSlot, remove this assertion @@ -531,6 +531,20 @@ RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, return rval; } +RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, + AggValueSlot aggSlot, + bool ignoreResult) { + return buildSuspendExpr(*this, E, CurCoro.Data->CurrentAwaitKind, aggSlot, + ignoreResult); +} + +RValue CIRGenFunction::buildCoyieldExpr(const CoyieldExpr &E, + AggValueSlot aggSlot, + bool ignoreResult) { + return buildSuspendExpr(*this, E, mlir::cir::AwaitKind::yield, aggSlot, + ignoreResult); +} + mlir::LogicalResult CIRGenFunction::buildCoreturnStmt(CoreturnStmt const &S) { ++CurCoro.Data->CoreturnCount; currLexScope->setCoreturn(); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 3d4eb82adfa1..9df6780e0c41 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -160,7 +160,9 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitCoawaitExpr(CoawaitExpr *S) { return CGF.buildCoawaitExpr(*S).getScalarVal(); } - mlir::Value VisitCoyieldExpr(CoyieldExpr *S) { llvm_unreachable("NYI"); } + mlir::Value VisitCoyieldExpr(CoyieldExpr *S) { + return CGF.buildCoyieldExpr(*S).getScalarVal(); + } mlir::Value VisitUnaryCoawait(const UnaryOperator *E) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 1aaebee77869..7458571ca08a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -900,6 +900,9 @@ class CIRGenFunction : public CIRGenTypeCache { RValue buildCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); + RValue buildCoyieldExpr(const CoyieldExpr &E, + AggValueSlot aggSlot = AggValueSlot::ignored(), + bool ignoreResult = false); RValue buildCoroutineIntrinsic(const CallExpr *E, unsigned int IID); RValue buildCoroutineFrame(); diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index ba9c6cdf973e..24ee6df5f5c6 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -380,3 +380,52 @@ folly::coro::Task go4() { // CHECK: }, resume : { // CHECK: },) // CHECK: } + +folly::coro::Task yield(); +folly::coro::Task yield1() { + auto t = yield(); + co_yield t; +} + +// CHECK: cir.func coroutine @_Z6yield1v() -> !ty_22folly3A3Acoro3A3ATask3Cvoid3E22 + +// CHECK: cir.await(init, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) + +// CHECK: cir.scope { +// CHECK-NEXT: %[[#SUSPEND_PTR:]] = cir.alloca !ty_22std3A3Asuspend_always22, !cir.ptr +// CHECK-NEXT: %[[#AWAITER_PTR:]] = cir.alloca !ty_22folly3A3Acoro3A3ATask3Cvoid3E22, !cir.ptr +// CHECK-NEXT: %[[#CORO_PTR:]] = cir.alloca !ty_22std3A3Acoroutine_handle3Cvoid3E22, !cir.ptr +// CHECK-NEXT: %[[#CORO2_PTR:]] = cir.alloca !ty_22std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E22, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5folly4coro4TaskIvEC1ERKS2_(%[[#AWAITER_PTR]], %{{.+}}) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[#AWAITER:]] = cir.load %[[#AWAITER_PTR]] : !cir.ptr, !ty_22folly3A3Acoro3A3ATask3Cvoid3E22 +// CHECK-NEXT: %[[#SUSPEND:]] = cir.call @_ZN5folly4coro4TaskIvE12promise_type11yield_valueES2_(%{{.+}}, %[[#AWAITER]]) : (!cir.ptr, !ty_22folly3A3Acoro3A3ATask3Cvoid3E22) -> !ty_22std3A3Asuspend_always22 +// CHECK-NEXT: cir.store %[[#SUSPEND]], %[[#SUSPEND_PTR]] : !ty_22std3A3Asuspend_always22, !cir.ptr +// CHECK-NEXT: cir.await(yield, ready : { +// CHECK-NEXT: %[[#READY:]] = cir.scope { +// CHECK-NEXT: %[[#A:]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[#SUSPEND_PTR]]) : (!cir.ptr) -> !cir.bool +// CHECK-NEXT: cir.yield %[[#A]] : !cir.bool +// CHECK-NEXT: } : !cir.bool +// CHECK-NEXT: cir.condition(%[[#READY]]) +// CHECK-NEXT: }, suspend : { +// CHECK-NEXT: %[[#CORO2:]] = cir.call @_ZNSt16coroutine_handleIN5folly4coro4TaskIvE12promise_typeEE12from_addressEPv(%9) : (!cir.ptr) -> !ty_22std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E22 +// CHECK-NEXT: cir.store %[[#CORO2]], %[[#CORO2_PTR]] : !ty_22std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E22, !cir.ptr +// CHECK-NEXT: %[[#B:]] = cir.load %[[#CORO2_PTR]] : !cir.ptr, !ty_22std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E22 +// CHECK-NEXT: cir.call @_ZNSt16coroutine_handleIvEC1IN5folly4coro4TaskIvE12promise_typeEEES_IT_E(%[[#CORO_PTR]], %[[#B]]) : (!cir.ptr, !ty_22std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E22) -> () +// CHECK-NEXT: %[[#C:]] = cir.load %[[#CORO_PTR]] : !cir.ptr, !ty_22std3A3Acoroutine_handle3Cvoid3E22 +// CHECK-NEXT: cir.call @_ZNSt14suspend_always13await_suspendESt16coroutine_handleIvE(%[[#SUSPEND_PTR]], %[[#C]]) : (!cir.ptr, !ty_22std3A3Acoroutine_handle3Cvoid3E22) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: }, resume : { +// CHECK-NEXT: cir.call @_ZNSt14suspend_always12await_resumeEv(%[[#SUSPEND_PTR]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: },) +// CHECK-NEXT: } + +// CHECK: cir.await(final, ready : { +// CHECK: }, suspend : { +// CHECK: }, resume : { +// CHECK: },) + +// CHECK: } From 51265ce1ed7ca33b32b2d5946acb7e530172b38c Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 2 Aug 2024 06:54:16 +0800 Subject: [PATCH 1735/2301] [CIR] CIRGen for bit cast builtin (#762) This PR adds CIRGen support for the `__builtin_bit_cast` builtin. No new operations are added so the LLVM IR lowering is also added automatically. --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 24 ++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 23 +++- clang/test/CIR/CodeGen/builtin-bit-cast.cpp | 136 ++++++++++++++++++++ 3 files changed, 179 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-bit-cast.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index a0830a9f2108..979ea4ebf258 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -907,6 +907,30 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { if (const auto *ECE = dyn_cast(E)) CGF.CGM.buildExplicitCastExprType(ECE, &CGF); switch (E->getCastKind()) { + case CK_LValueToRValueBitCast: { + if (Dest.isIgnored()) { + CGF.buildAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), + /*ignoreResult=*/true); + break; + } + + LValue SourceLV = CGF.buildLValue(E->getSubExpr()); + Address SourceAddress = SourceLV.getAddress(); + Address DestAddress = Dest.getAddress(); + + auto Loc = CGF.getLoc(E->getExprLoc()); + mlir::Value SrcPtr = CGF.getBuilder().createBitcast( + Loc, SourceAddress.getPointer(), CGF.VoidPtrTy); + mlir::Value DstPtr = CGF.getBuilder().createBitcast( + Loc, DestAddress.getPointer(), CGF.VoidPtrTy); + + mlir::Value SizeVal = CGF.getBuilder().getConstInt( + Loc, CGF.SizeTy, + CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity()); + CGF.getBuilder().createMemCpy(Loc, DstPtr, SrcPtr, SizeVal); + + break; + } case CK_LValueToRValue: // If we're loading from a volatile type, force the destination diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 9df6780e0c41..9dcda33fa134 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1548,11 +1548,26 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { llvm_unreachable("builtin functions are handled elsewhere"); case CK_LValueBitCast: - llvm_unreachable("NYI"); case CK_ObjCObjectLValueCast: - llvm_unreachable("NYI"); - case CK_LValueToRValueBitCast: - llvm_unreachable("NYI"); + case CK_LValueToRValueBitCast: { + LValue SourceLVal = CGF.buildLValue(E); + Address SourceAddr = SourceLVal.getAddress(); + + mlir::Type DestElemTy = CGF.convertTypeForMem(DestTy); + mlir::Type DestPtrTy = CGF.getBuilder().getPointerTo(DestElemTy); + mlir::Value DestPtr = CGF.getBuilder().createBitcast( + CGF.getLoc(E->getExprLoc()), SourceAddr.getPointer(), DestPtrTy); + + Address DestAddr = + SourceAddr.withPointer(DestPtr).withElementType(DestElemTy); + LValue DestLVal = CGF.makeAddrLValue(DestAddr, DestTy); + + if (Kind == CK_LValueToRValueBitCast) + assert(!MissingFeatures::tbaa()); + + return buildLoadOfLValue(DestLVal, CE->getExprLoc()); + } + case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_AnyPointerToBlockPointerCast: diff --git a/clang/test/CIR/CodeGen/builtin-bit-cast.cpp b/clang/test/CIR/CodeGen/builtin-bit-cast.cpp new file mode 100644 index 000000000000..88e584a2d72c --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-bit-cast.cpp @@ -0,0 +1,136 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CIR %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s + +float test_scalar(int &oper) { + return __builtin_bit_cast(float, oper); +} + +// CIR-LABEL: cir.func @_Z11test_scalarRi +// CIR: %[[#SRC_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[#DST_PTR:]] = cir.cast(bitcast, %[[#SRC_PTR]] : !cir.ptr), !cir.ptr +// CIR-NEXT: %{{.+}} = cir.load %[[#DST_PTR]] : !cir.ptr, !cir.float +// CIR: } + +// LLVM-LABEL: define dso_local float @_Z11test_scalarRi +// LLVM: %[[#PTR:]] = load ptr, ptr %{{.+}}, align 8 +// LLVM-NEXT: %{{.+}} = load float, ptr %[[#PTR]], align 4 +// LLVM: } + +struct two_ints { + int x; + int y; +}; + +unsigned long test_aggregate_to_scalar(two_ints &ti) { + return __builtin_bit_cast(unsigned long, ti); +} + +// CIR-LABEL: cir.func @_Z24test_aggregate_to_scalarR8two_ints +// CIR: %[[#SRC_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[#DST_PTR:]] = cir.cast(bitcast, %[[#SRC_PTR]] : !cir.ptr), !cir.ptr +// CIR-NEXT: %{{.+}} = cir.load %[[#DST_PTR]] : !cir.ptr, !u64i +// CIR: } + +// LLVM-LABEL: define dso_local i64 @_Z24test_aggregate_to_scalarR8two_ints +// LLVM: %[[#PTR:]] = load ptr, ptr %{{.+}}, align 8 +// LLVM-NEXT: %{{.+}} = load i64, ptr %[[#PTR]], align 8 +// LLVM: } + +struct two_floats { + float x; + float y; +}; + +two_floats test_aggregate_record(two_ints& ti) { + return __builtin_bit_cast(two_floats, ti); +} + +// CIR-LABEL: cir.func @_Z21test_aggregate_recordR8two_ints +// CIR: %[[#SRC_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %[[#SRC_PTR]] : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#SIZE:]] = cir.const #cir.int<8> : !u64i +// CIR-NEXT: cir.libc.memcpy %[[#SIZE]] bytes from %[[#SRC_VOID_PTR]] to %[[#DST_VOID_PTR]] : !u64i, !cir.ptr -> !cir.ptr +// CIR: } + +// LLVM-LABEL: define dso_local %struct.two_floats @_Z21test_aggregate_recordR8two_ints +// LLVM: %[[#DST_SLOT:]] = alloca %struct.two_floats, i64 1, align 4 +// LLVM: %[[#SRC_PTR:]] = load ptr, ptr %2, align 8 +// LLVM-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %[[#DST_SLOT]], ptr %[[#SRC_PTR]], i64 8, i1 false) +// LLVM-NEXT: %{{.+}} = load %struct.two_floats, ptr %[[#DST_SLOT]], align 4 +// LLVM: } + +two_floats test_aggregate_array(int (&ary)[2]) { + return __builtin_bit_cast(two_floats, ary); +} + +// CIR-LABEL: cir.func @_Z20test_aggregate_arrayRA2_i +// CIR: %[[#SRC_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> +// CIR-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %[[#SRC_PTR]] : !cir.ptr>), !cir.ptr +// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#SIZE:]] = cir.const #cir.int<8> : !u64i +// CIR-NEXT: cir.libc.memcpy %[[#SIZE]] bytes from %[[#SRC_VOID_PTR]] to %[[#DST_VOID_PTR]] : !u64i, !cir.ptr -> !cir.ptr +// CIR: } + +// LLVM-LABEL: define dso_local %struct.two_floats @_Z20test_aggregate_arrayRA2_i +// LLVM: %[[#DST_SLOT:]] = alloca %struct.two_floats, i64 1, align 4 +// LLVM: %[[#SRC_PTR:]] = load ptr, ptr %2, align 8 +// LLVM-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %[[#DST_SLOT]], ptr %[[#SRC_PTR]], i64 8, i1 false) +// LLVM-NEXT: %{{.+}} = load %struct.two_floats, ptr %[[#DST_SLOT]], align 4 +// LLVM: } + +two_ints test_scalar_to_aggregate(unsigned long ul) { + return __builtin_bit_cast(two_ints, ul); +} + +// CIR-LABEL: cir.func @_Z24test_scalar_to_aggregatem +// CIR: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#SIZE:]] = cir.const #cir.int<8> : !u64i +// CIR-NEXT: cir.libc.memcpy %[[#SIZE]] bytes from %[[#SRC_VOID_PTR]] to %[[#DST_VOID_PTR]] : !u64i, !cir.ptr -> !cir.ptr +// CIR: } + +// LLVM-LABEL: define dso_local %struct.two_ints @_Z24test_scalar_to_aggregatem +// LLVM: %[[#DST_SLOT:]] = alloca %struct.two_ints, i64 1, align 4 +// LLVM: call void @llvm.memcpy.p0.p0.i64(ptr %[[#DST_SLOT]], ptr %{{.+}}, i64 8, i1 false) +// LLVM-NEXT: %{{.+}} = load %struct.two_ints, ptr %[[#DST_SLOT]], align 4 +// LLVM: } + +unsigned long test_array(int (&ary)[2]) { + return __builtin_bit_cast(unsigned long, ary); +} + +// CIR-LABEL: cir.func @_Z10test_arrayRA2_i +// CIR: %[[#SRC_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> +// CIR-NEXT: %[[#DST_PTR:]] = cir.cast(bitcast, %[[#SRC_PTR]] : !cir.ptr>), !cir.ptr +// CIR-NEXT: %{{.+}} = cir.load %[[#DST_PTR]] : !cir.ptr, !u64i +// CIR: } + +// LLVM-LABEL: define dso_local i64 @_Z10test_arrayRA2_i +// LLVM: %[[#SRC_PTR:]] = load ptr, ptr %{{.+}}, align 8 +// LLVM-NEXT: %{{.+}} = load i64, ptr %[[#SRC_PTR]], align 8 +// LLVM: } + +two_ints test_rvalue_aggregate() { + return __builtin_bit_cast(two_ints, 42ul); +} + +// CIR-LABEL: cir.func @_Z21test_rvalue_aggregatev() +// CIR: cir.scope { +// CIR-NEXT: %[[#TMP_SLOT:]] = cir.alloca !u64i, !cir.ptr +// CIR-NEXT: %[[#A:]] = cir.const #cir.int<42> : !u64i +// CIR-NEXT: cir.store %[[#A]], %[[#TMP_SLOT]] : !u64i, !cir.ptr +// CIR-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %[[#TMP_SLOT]] : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#SIZE:]] = cir.const #cir.int<8> : !u64i +// CIR-NEXT: cir.libc.memcpy %[[#SIZE]] bytes from %[[#SRC_VOID_PTR]] to %[[#DST_VOID_PTR]] : !u64i, !cir.ptr -> !cir.ptr +// CIR-NEXT: } +// CIR: } + +// LLVM-LABEL: define dso_local %struct.two_ints @_Z21test_rvalue_aggregatev +// LLVM: %[[#SRC_SLOT:]] = alloca i64, i64 1, align 8 +// LLVM-NEXT: store i64 42, ptr %[[#SRC_SLOT]], align 8 +// LLVM-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %{{.+}}, ptr %[[#SRC_SLOT]], i64 8, i1 false) +// LLVM: } From 2e6e4be2a9d305b63c9eaa60418c5d28ffbf1a9b Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 2 Aug 2024 06:56:27 +0800 Subject: [PATCH 1736/2301] [CIR][ABI][NFCI] Enable SPIR-V return value and argument ABI to use Direct and Extend (#763) This NFCI PR enhances the SPIR-V *CIRGen* ABI with Direct and Extend in both argument and return value, because some future test cases requires it. * kernel argument metadata needs arguments of promotable integer types * builtin functions like `get_global_id` returns `si64`, rather than void for all OpenCL kernels Given that CallConvLowering will replace these bits and other targets is already doing the same, I think it's safe to enable it now. --- clang/lib/CIR/CodeGen/TargetInfo.cpp | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 0016f2200b37..165b01f513a4 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -47,7 +47,18 @@ class DefaultABIInfo : public ABIInfo { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); - llvm_unreachable("Non-void return type NYI"); + if (isAggregateTypeForABI(RetTy)) + llvm_unreachable("NYI"); + + // Treat an enum type as its underlying type. + if (const EnumType *EnumTy = RetTy->getAs()) + llvm_unreachable("NYI"); + + if (const auto *EIT = RetTy->getAs()) + llvm_unreachable("NYI"); + + return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) + : ABIArgInfo::getDirect()); } ABIArgInfo classifyArgumentType(QualType Ty) const { @@ -65,11 +76,8 @@ class DefaultABIInfo : public ABIInfo { if (const auto *EIT = Ty->getAs()) llvm_unreachable("NYI"); - if (isPromotableIntegerTypeForABI(Ty)) { - llvm_unreachable("ArgInfo integer extend NYI"); - } else { - return ABIArgInfo::getDirect(); - } + return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) + : ABIArgInfo::getDirect()); } void computeInfo(CIRGenFunctionInfo &FI) const override { From bdb863dc4e4d77362332de641fa69ce108012fc7 Mon Sep 17 00:00:00 2001 From: 7mile Date: Sat, 3 Aug 2024 06:11:14 +0800 Subject: [PATCH 1737/2301] [CIR][LowerToLLVM][NFC] Refactor `amendOperation` to dispatch different ops (#768) Soon I would like to submit a patch emitting OpenCL module metadata in LowerToLLVM path, which requires to attach the metadata to LLVM module when `amendOperaion` is called for MLIR module op. This PR refactors the method to a dispatcher to make the future changes cleaner. --- .../Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 48 +++++++++++-------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index 62a988b37533..65fe667f6ff9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -39,10 +39,32 @@ class CIRDialectLLVMIRTranslationInterface mlir::Operation *op, llvm::ArrayRef instructions, mlir::NamedAttribute attribute, mlir::LLVM::ModuleTranslation &moduleTranslation) const override { - // Translate CIR's extra function attributes to LLVM's function attributes. - auto func = dyn_cast(op); - if (!func) - return mlir::success(); + if (auto func = dyn_cast(op)) { + amendFunction(func, instructions, attribute, moduleTranslation); + } + return mlir::success(); + } + + /// Translates the given operation to LLVM IR using the provided IR builder + /// and saving the state in `moduleTranslation`. + mlir::LogicalResult convertOperation( + mlir::Operation *op, llvm::IRBuilderBase &builder, + mlir::LLVM::ModuleTranslation &moduleTranslation) const final { + + if (auto cirOp = llvm::dyn_cast(op)) + moduleTranslation.mapValue(cirOp.getResult()) = + llvm::Constant::getNullValue( + moduleTranslation.convertType(cirOp.getType())); + + return mlir::success(); + } + +private: + // Translate CIR's extra function attributes to LLVM's function attributes. + void amendFunction(mlir::LLVM::LLVMFuncOp func, + llvm::ArrayRef instructions, + mlir::NamedAttribute attribute, + mlir::LLVM::ModuleTranslation &moduleTranslation) const { llvm::Function *llvmFunc = moduleTranslation.lookupFunction(func.getName()); if (auto extraAttr = mlir::dyn_cast( attribute.getValue())) { @@ -71,25 +93,9 @@ class CIRDialectLLVMIRTranslationInterface } // Drop ammended CIR attribute from LLVM op. - op->removeAttr(attribute.getName()); - return mlir::success(); + func->removeAttr(attribute.getName()); } - /// Translates the given operation to LLVM IR using the provided IR builder - /// and saving the state in `moduleTranslation`. - mlir::LogicalResult convertOperation( - mlir::Operation *op, llvm::IRBuilderBase &builder, - mlir::LLVM::ModuleTranslation &moduleTranslation) const final { - - if (auto cirOp = llvm::dyn_cast(op)) - moduleTranslation.mapValue(cirOp.getResult()) = - llvm::Constant::getNullValue( - moduleTranslation.convertType(cirOp.getType())); - - return mlir::success(); - } - -private: void emitOpenCLKernelMetadata( mlir::cir::OpenCLKernelMetadataAttr clKernelMetadata, llvm::Function *llvmFunc, From c9a0e703887e1a0f226df1c76176e27d3b6f5f2e Mon Sep 17 00:00:00 2001 From: 7mile Date: Sat, 3 Aug 2024 06:17:29 +0800 Subject: [PATCH 1738/2301] [CIR][Dialect] Emit OpenCL kernel argument metadata (#767) Similar to #705, this PR implements the remaining `genKernelArgMetadata()` logic. The attribute `cir.cl.kernel_arg_metadata` is also intentionally placed in the `cir.func`'s `extra_attrs` rather than `cir.func`'s standard `arg_attrs` list. Also, the metadata is stored by `Array` with proper verification on it. See the tablegen doc string for details. This is in order to * keep it side-by-side with `cl.kernel_metadata`. * still emit metadata when kernel has an *empty* arg list (see the test `kernel-arg-meatadata.cl`). * avoid horrors of repeating the long name `cir.cl.kernel_arg_metadata` for `numArgs` times. Because clangir doesn't support OpenCL built-in types and the `half` floating point type yet, their changes and test cases are not included. Corresponding missing feature flag is added. --- .../clang/CIR/Dialect/IR/CIROpenCLAttrs.td | 55 ++++++ clang/include/clang/CIR/MissingFeatures.h | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 171 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 14 ++ clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 37 ++++ .../Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 78 ++++++++ .../OpenCL/kernel-arg-info-single-as.cl | 14 ++ .../CIR/CodeGen/OpenCL/kernel-arg-info.cl | 90 +++++++++ .../CIR/CodeGen/OpenCL/kernel-arg-metadata.cl | 12 ++ clang/test/CIR/IR/invalid.cir | 46 +++++ 11 files changed, 519 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl create mode 100644 clang/test/CIR/CodeGen/OpenCL/kernel-arg-info.cl create mode 100644 clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td index 1f32701909b7..294f18c9414d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td @@ -92,4 +92,59 @@ def OpenCLKernelMetadataAttr } +//===----------------------------------------------------------------------===// +// OpenCLKernelArgMetadataAttr +//===----------------------------------------------------------------------===// + +def OpenCLKernelArgMetadataAttr + : CIR_Attr<"OpenCLKernelArgMetadata", "cl.kernel_arg_metadata"> { + + let summary = "OpenCL kernel argument metadata"; + let description = [{ + Provide the required information of an OpenCL kernel argument for the SPIR-V + backend. + + All parameters are arrays, containing the information of the argument in + the same order as they appear in the source code. + + The `addr_space` parameter is an array of I32 that provides the address + space of the argument. It's useful for special types like `image`, which + have implicit global address space. + + Other parameters are arrays of strings that pass through the information + from the source code correspondingly. + + All the fields are mandatory except for `name`, which is optional. + + Example: + ``` + #fn_attr = #cir})> + + cir.func @kernel(%arg0: !s32i) extra(#fn_attr) { + cir.return + } + ``` + }]; + + let parameters = (ins + "ArrayAttr":$addr_space, + "ArrayAttr":$access_qual, + "ArrayAttr":$type, + "ArrayAttr":$base_type, + "ArrayAttr":$type_qual, + OptionalParameter<"ArrayAttr">:$name + ); + + let assemblyFormat = "`<` struct(params) `>`"; + + let genVerifyDecl = 1; +} + #endif // MLIR_CIR_DIALECT_CIR_OPENCL_ATTRS diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index d67989120562..436bc506df7c 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -142,7 +142,7 @@ struct MissingFeatures { static bool getFPFeaturesInEffect() { return false; } static bool cxxABI() { return false; } static bool openCL() { return false; } - static bool openCLGenKernelMetadata() { return false; } + static bool openCLBuiltinTypes() { return false; } static bool CUDA() { return false; } static bool openMP() { return false; } static bool openMPRuntime() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 27049934a556..916566936283 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1725,8 +1725,7 @@ void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, if (!FD->hasAttr() && !FD->hasAttr()) return; - // TODO(cir): CGM.genKernelArgMetadata(Fn, FD, this); - assert(!MissingFeatures::openCLGenKernelMetadata()); + CGM.genKernelArgMetadata(Fn, FD, this); if (!getLangOpts().OpenCL) return; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index bf2b31cf1510..a8df5255e540 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -3061,3 +3061,174 @@ mlir::cir::SourceLanguage CIRGenModule::getCIRSourceLanguage() { // TODO(cir): support remaining source languages. llvm_unreachable("CIR does not yet support the given source language"); } + +// Returns the address space id that should be produced to the +// kernel_arg_addr_space metadata. This is always fixed to the ids +// as specified in the SPIR 2.0 specification in order to differentiate +// for example in clGetKernelArgInfo() implementation between the address +// spaces with targets without unique mapping to the OpenCL address spaces +// (basically all single AS CPUs). +static unsigned ArgInfoAddressSpace(LangAS AS) { + switch (AS) { + case LangAS::opencl_global: + return 1; + case LangAS::opencl_constant: + return 2; + case LangAS::opencl_local: + return 3; + case LangAS::opencl_generic: + return 4; // Not in SPIR 2.0 specs. + case LangAS::opencl_global_device: + return 5; + case LangAS::opencl_global_host: + return 6; + default: + return 0; // Assume private. + } +} + +void CIRGenModule::genKernelArgMetadata(mlir::cir::FuncOp Fn, + const FunctionDecl *FD, + CIRGenFunction *CGF) { + assert(((FD && CGF) || (!FD && !CGF)) && + "Incorrect use - FD and CGF should either be both null or not!"); + // Create MDNodes that represent the kernel arg metadata. + // Each MDNode is a list in the form of "key", N number of values which is + // the same number of values as their are kernel arguments. + + const PrintingPolicy &Policy = getASTContext().getPrintingPolicy(); + + // Integer values for the kernel argument address space qualifiers. + SmallVector addressQuals; + + // Attrs for the kernel argument access qualifiers (images only). + SmallVector accessQuals; + + // Attrs for the kernel argument type names. + SmallVector argTypeNames; + + // Attrs for the kernel argument base type names. + SmallVector argBaseTypeNames; + + // Attrs for the kernel argument type qualifiers. + SmallVector argTypeQuals; + + // Attrs for the kernel argument names. + SmallVector argNames; + + // OpenCL image and pipe types require special treatments for some metadata + assert(!MissingFeatures::openCLBuiltinTypes()); + + if (FD && CGF) + for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { + const ParmVarDecl *parm = FD->getParamDecl(i); + // Get argument name. + argNames.push_back(builder.getStringAttr(parm->getName())); + + if (!getLangOpts().OpenCL) + continue; + QualType ty = parm->getType(); + std::string typeQuals; + + // Get image and pipe access qualifier: + if (ty->isImageType() || ty->isPipeType()) { + llvm_unreachable("NYI"); + } else + accessQuals.push_back(builder.getStringAttr("none")); + + auto getTypeSpelling = [&](QualType Ty) { + auto typeName = Ty.getUnqualifiedType().getAsString(Policy); + + if (Ty.isCanonical()) { + StringRef typeNameRef = typeName; + // Turn "unsigned type" to "utype" + if (typeNameRef.consume_front("unsigned ")) + return std::string("u") + typeNameRef.str(); + if (typeNameRef.consume_front("signed ")) + return typeNameRef.str(); + } + + return typeName; + }; + + if (ty->isPointerType()) { + QualType pointeeTy = ty->getPointeeType(); + + // Get address qualifier. + addressQuals.push_back( + ArgInfoAddressSpace(pointeeTy.getAddressSpace())); + + // Get argument type name. + std::string typeName = getTypeSpelling(pointeeTy) + "*"; + std::string baseTypeName = + getTypeSpelling(pointeeTy.getCanonicalType()) + "*"; + argTypeNames.push_back(builder.getStringAttr(typeName)); + argBaseTypeNames.push_back(builder.getStringAttr(baseTypeName)); + + // Get argument type qualifiers: + if (ty.isRestrictQualified()) + typeQuals = "restrict"; + if (pointeeTy.isConstQualified() || + (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) + typeQuals += typeQuals.empty() ? "const" : " const"; + if (pointeeTy.isVolatileQualified()) + typeQuals += typeQuals.empty() ? "volatile" : " volatile"; + } else { + uint32_t AddrSpc = 0; + bool isPipe = ty->isPipeType(); + if (ty->isImageType() || isPipe) + llvm_unreachable("NYI"); + + addressQuals.push_back(AddrSpc); + + // Get argument type name. + ty = isPipe ? ty->castAs()->getElementType() : ty; + std::string typeName = getTypeSpelling(ty); + std::string baseTypeName = getTypeSpelling(ty.getCanonicalType()); + + // Remove access qualifiers on images + // (as they are inseparable from type in clang implementation, + // but OpenCL spec provides a special query to get access qualifier + // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER): + if (ty->isImageType()) { + llvm_unreachable("NYI"); + } + + argTypeNames.push_back(builder.getStringAttr(typeName)); + argBaseTypeNames.push_back(builder.getStringAttr(baseTypeName)); + + if (isPipe) + llvm_unreachable("NYI"); + } + argTypeQuals.push_back(builder.getStringAttr(typeQuals)); + } + + bool shouldEmitArgName = getCodeGenOpts().EmitOpenCLArgMetadata || + getCodeGenOpts().HIPSaveKernelArgName; + + if (getLangOpts().OpenCL) { + // The kernel arg name is emitted only when `-cl-kernel-arg-info` is on, + // since it is only used to support `clGetKernelArgInfo` which requires + // `-cl-kernel-arg-info` to work. The other metadata are mandatory because + // they are necessary for OpenCL runtime to set kernel argument. + mlir::ArrayAttr resArgNames = {}; + if (shouldEmitArgName) + resArgNames = builder.getArrayAttr(argNames); + + // Update the function's extra attributes with the kernel argument metadata. + auto value = mlir::cir::OpenCLKernelArgMetadataAttr::get( + Fn.getContext(), builder.getI32ArrayAttr(addressQuals), + builder.getArrayAttr(accessQuals), builder.getArrayAttr(argTypeNames), + builder.getArrayAttr(argBaseTypeNames), + builder.getArrayAttr(argTypeQuals), resArgNames); + mlir::NamedAttrList items{Fn.getExtraAttrs().getElements().getValue()}; + auto oldValue = items.set(value.getMnemonic(), value); + if (oldValue != value) { + Fn.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), builder.getDictionaryAttr(items))); + } + } else { + if (shouldEmitArgName) + llvm_unreachable("NYI HIPSaveKernelArgName"); + } +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index b46befcc949a..fa6da9c9506d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -688,6 +688,20 @@ class CIRGenModule : public CIRGenTypeCache { return *openMPRuntime; } + /// OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument + /// information in the program executable. The argument information stored + /// includes the argument name, its type, the address and access qualifiers + /// used. This helper can be used to generate metadata for source code kernel + /// function as well as generated implicitly kernels. If a kernel is generated + /// implicitly null value has to be passed to the last two parameters, + /// otherwise all parameters must have valid non-null values. + /// \param FN is a pointer to IR function being generated. + /// \param FD is a pointer to function declaration if any. + /// \param CGF is a pointer to CIRGenFunction that generates this function. + void genKernelArgMetadata(mlir::cir::FuncOp FN, + const FunctionDecl *FD = nullptr, + CIRGenFunction *CGF = nullptr); + private: // An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector MangledDeclNames; diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index c77ad8c7a46e..4cfa01b08687 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -556,6 +556,43 @@ LogicalResult OpenCLKernelMetadataAttr::verify( return success(); } +//===----------------------------------------------------------------------===// +// OpenCLKernelArgMetadataAttr definitions +//===----------------------------------------------------------------------===// + +LogicalResult OpenCLKernelArgMetadataAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ArrayAttr addrSpaces, ArrayAttr accessQuals, ArrayAttr types, + ArrayAttr baseTypes, ArrayAttr typeQuals, ArrayAttr argNames) { + auto isIntArray = [](ArrayAttr elt) { + return llvm::all_of( + elt, [](Attribute elt) { return mlir::isa(elt); }); + }; + auto isStrArray = [](ArrayAttr elt) { + return llvm::all_of( + elt, [](Attribute elt) { return mlir::isa(elt); }); + }; + + if (!isIntArray(addrSpaces)) + return emitError() << "addr_space must be integer arrays"; + if (!llvm::all_of>( + {accessQuals, types, baseTypes, typeQuals}, isStrArray)) + return emitError() + << "access_qual, type, base_type, type_qual must be string arrays"; + if (argNames && !isStrArray(argNames)) { + return emitError() << "name must be a string array"; + } + + if (!llvm::all_of>( + {addrSpaces, accessQuals, types, baseTypes, typeQuals, argNames}, + [&](ArrayAttr arr) { + return !arr || arr.size() == addrSpaces.size(); + })) { + return emitError() << "all arrays must have the same number of elements"; + } + return success(); +} + //===----------------------------------------------------------------------===// // AddressSpaceAttr definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index 65fe667f6ff9..08aeb902b78e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -88,6 +88,11 @@ class CIRDialectLLVMIRTranslationInterface attr.getValue())) { emitOpenCLKernelMetadata(clKernelMetadata, llvmFunc, moduleTranslation); + } else if (auto clArgMetadata = + mlir::dyn_cast( + attr.getValue())) { + emitOpenCLKernelArgMetadata(clArgMetadata, func.getNumArguments(), + llvmFunc, moduleTranslation); } } } @@ -148,6 +153,79 @@ class CIRDialectLLVMIRTranslationInterface llvm::MDNode::get(vmCtx, attrMDArgs)); } } + + void emitOpenCLKernelArgMetadata( + mlir::cir::OpenCLKernelArgMetadataAttr clArgMetadata, unsigned numArgs, + llvm::Function *llvmFunc, + mlir::LLVM::ModuleTranslation &moduleTranslation) const { + auto &vmCtx = moduleTranslation.getLLVMContext(); + + // MDNode for the kernel argument address space qualifiers. + SmallVector addressQuals; + + // MDNode for the kernel argument access qualifiers (images only). + SmallVector accessQuals; + + // MDNode for the kernel argument type names. + SmallVector argTypeNames; + + // MDNode for the kernel argument base type names. + SmallVector argBaseTypeNames; + + // MDNode for the kernel argument type qualifiers. + SmallVector argTypeQuals; + + // MDNode for the kernel argument names. + SmallVector argNames; + + auto lowerStringAttr = [&](mlir::Attribute strAttr) { + return llvm::MDString::get( + vmCtx, mlir::cast(strAttr).getValue()); + }; + + bool shouldEmitArgName = !!clArgMetadata.getName(); + + auto addressSpaceValues = + clArgMetadata.getAddrSpace().getAsValueRange(); + + for (auto &&[i, addrSpace] : llvm::enumerate(addressSpaceValues)) { + // Address space qualifier. + addressQuals.push_back( + llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( + llvm::IntegerType::get(vmCtx, 32), addrSpace))); + + // Access qualifier. + accessQuals.push_back(lowerStringAttr(clArgMetadata.getAccessQual()[i])); + + // Type name. + argTypeNames.push_back(lowerStringAttr(clArgMetadata.getType()[i])); + + // Base type name. + argBaseTypeNames.push_back( + lowerStringAttr(clArgMetadata.getBaseType()[i])); + + // Type qualifier. + argTypeQuals.push_back(lowerStringAttr(clArgMetadata.getTypeQual()[i])); + + // Argument name. + if (shouldEmitArgName) + argNames.push_back(lowerStringAttr(clArgMetadata.getName()[i])); + } + + llvmFunc->setMetadata("kernel_arg_addr_space", + llvm::MDNode::get(vmCtx, addressQuals)); + llvmFunc->setMetadata("kernel_arg_access_qual", + llvm::MDNode::get(vmCtx, accessQuals)); + llvmFunc->setMetadata("kernel_arg_type", + llvm::MDNode::get(vmCtx, argTypeNames)); + llvmFunc->setMetadata("kernel_arg_base_type", + llvm::MDNode::get(vmCtx, argBaseTypeNames)); + llvmFunc->setMetadata("kernel_arg_type_qual", + llvm::MDNode::get(vmCtx, argTypeQuals)); + if (shouldEmitArgName) + llvmFunc->setMetadata("kernel_arg_name", + llvm::MDNode::get(vmCtx, argNames)); + } }; void registerCIRDialectTranslation(mlir::DialectRegistry ®istry) { diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl new file mode 100644 index 000000000000..b78ee6dddbf7 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl @@ -0,0 +1,14 @@ +// Test that the kernel argument info always refers to SPIR address spaces, +// even if the target has only one address space like x86_64 does. +// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -o - -triple x86_64-unknown-linux-gnu -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR + +// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-llvm -o - -triple x86_64-unknown-linux-gnu -o %t.ll +// RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM + +kernel void foo(__global int * G, __constant int *C, __local int *L) { + *G = *C + *L; +} +// CIR: cl.kernel_arg_metadata = #cir.cl.kernel_arg_metadata +// CIR-DAG: cir.func @foo({{.+}}) extra(#fn_attr[[KERNEL0]]) +// CIR-ARGINFO-DAG: #fn_attr[[KERNEL0:[0-9]*]] = {{.+}}cl.kernel_arg_metadata = #cir.cl.kernel_arg_metadata +// CIR-ARGINFO-DAG: cir.func @foo({{.+}}) extra(#fn_attr[[KERNEL0]]) + +// LLVM-DAG: define{{.*}} void @foo{{.+}} !kernel_arg_addr_space ![[MD11:[0-9]+]] !kernel_arg_access_qual ![[MD12:[0-9]+]] !kernel_arg_type ![[MD13:[0-9]+]] !kernel_arg_base_type ![[MD13]] !kernel_arg_type_qual ![[MD14:[0-9]+]] { +// LLVM-ARGINFO-DAG: define{{.*}} void @foo{{.+}} !kernel_arg_addr_space ![[MD11:[0-9]+]] !kernel_arg_access_qual ![[MD12:[0-9]+]] !kernel_arg_type ![[MD13:[0-9]+]] !kernel_arg_base_type ![[MD13]] !kernel_arg_type_qual ![[MD14:[0-9]+]] !kernel_arg_name ![[MD15:[0-9]+]] { + +// LLVM-DAG: ![[MD11]] = !{i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 1, i32 1, i32 1, i32 1, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 0, i32 0, i32 0, i32 0} +// LLVM-DAG: ![[MD12]] = !{!"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none", !"none"} +// LLVM-DAG: ![[MD13]] = !{!"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int*", !"int", !"int", !"int", !"int"} +// LLVM-DAG: ![[MD14]] = !{!"", !"restrict", !"const", !"restrict const", !"const", !"restrict const", !"const volatile", !"restrict const volatile", !"volatile", !"restrict volatile", !"", !"restrict", !"const", !"restrict const", !"const volatile", !"restrict const volatile", !"volatile", !"restrict volatile", !"", !"", !"", !""} +// LLVM-ARGINFO-DAG: ![[MD15]] = !{!"globalintp", !"globalintrestrictp", !"globalconstintp", !"globalconstintrestrictp", !"constantintp", !"constantintrestrictp", !"globalconstvolatileintp", !"globalconstvolatileintrestrictp", !"globalvolatileintp", !"globalvolatileintrestrictp", !"localintp", !"localintrestrictp", !"localconstintp", !"localconstintrestrictp", !"localconstvolatileintp", !"localconstvolatileintrestrictp", !"localvolatileintp", !"localvolatileintrestrictp", !"X", !"constint", !"constvolatileint", !"volatileint"} + +typedef unsigned int myunsignedint; +kernel void foo4(__global unsigned int * X, __global myunsignedint * Y) { +} + +// CIR-DAG: #fn_attr[[KERNEL4:[0-9]*]] = {{.+}}cl.kernel_arg_metadata = #cir.cl.kernel_arg_metadata +// CIR-DAG: cir.func @foo4({{.+}}) extra(#fn_attr[[KERNEL4]]) +// CIR-ARGINFO-DAG: #fn_attr[[KERNEL4:[0-9]*]] = {{.+}}cl.kernel_arg_metadata = #cir.cl.kernel_arg_metadata +// CIR-ARGINFO-DAG: cir.func @foo4({{.+}}) extra(#fn_attr[[KERNEL4]]) + +// LLVM-DAG: define{{.*}} void @foo4{{.+}} !kernel_arg_addr_space ![[MD41:[0-9]+]] !kernel_arg_access_qual ![[MD42:[0-9]+]] !kernel_arg_type ![[MD43:[0-9]+]] !kernel_arg_base_type ![[MD44:[0-9]+]] !kernel_arg_type_qual ![[MD45:[0-9]+]] { +// LLVM-ARGINFO-DAG: define{{.*}} void @foo4{{.+}} !kernel_arg_addr_space ![[MD41:[0-9]+]] !kernel_arg_access_qual ![[MD42:[0-9]+]] !kernel_arg_type ![[MD43:[0-9]+]] !kernel_arg_base_type ![[MD44:[0-9]+]] !kernel_arg_type_qual ![[MD45:[0-9]+]] !kernel_arg_name ![[MD46:[0-9]+]] { + +// LLVM-DAG: ![[MD41]] = !{i32 1, i32 1} +// LLVM-DAG: ![[MD42]] = !{!"none", !"none"} +// LLVM-DAG: ![[MD43]] = !{!"uint*", !"myunsignedint*"} +// LLVM-DAG: ![[MD44]] = !{!"uint*", !"uint*"} +// LLVM-DAG: ![[MD45]] = !{!"", !""} +// LLVM-ARGINFO-DAG: ![[MD46]] = !{!"X", !"Y"} + +typedef char char16 __attribute__((ext_vector_type(16))); +__kernel void foo6(__global char16 arg[]) {} + +// CIR-DAG: #fn_attr[[KERNEL6:[0-9]*]] = {{.+}}cl.kernel_arg_metadata = #cir.cl.kernel_arg_metadata +// CIR-DAG: cir.func @foo6({{.+}}) extra(#fn_attr[[KERNEL6]]) +// CIR-ARGINFO-DAG: #fn_attr[[KERNEL6:[0-9]*]] = {{.+}}cl.kernel_arg_metadata = #cir.cl.kernel_arg_metadata +// CIR-ARGINFO-DAG: cir.func @foo6({{.+}}) extra(#fn_attr[[KERNEL6]]) + +// LLVM-DAG: !kernel_arg_type ![[MD61:[0-9]+]] +// LLVM-ARGINFO-DAG: !kernel_arg_name ![[MD62:[0-9]+]] +// LLVM-DAG: ![[MD61]] = !{!"char16*"} +// LLVM-ARGINFO-DAG: ![[MD62]] = !{!"arg"} + +kernel void foo9(signed char sc1, global const signed char* sc2) {} + +// CIR-DAG: #fn_attr[[KERNEL9:[0-9]*]] = {{.+}}cl.kernel_arg_metadata = #cir.cl.kernel_arg_metadata +// CIR-DAG: cir.func @foo9({{.+}}) extra(#fn_attr[[KERNEL9]]) +// CIR-ARGINFO-DAG: #fn_attr[[KERNEL9:[0-9]*]] = {{.+}}cl.kernel_arg_metadata = #cir.cl.kernel_arg_metadata +// CIR-ARGINFO-DAG: cir.func @foo9({{.+}}) extra(#fn_attr[[KERNEL9]]) + +// LLVM-DAG: define{{.*}} void @foo9{{.+}} !kernel_arg_addr_space ![[SCHAR_AS_QUAL:[0-9]+]] !kernel_arg_access_qual ![[MD42]] !kernel_arg_type ![[SCHAR_TY:[0-9]+]] !kernel_arg_base_type ![[SCHAR_TY]] !kernel_arg_type_qual ![[SCHAR_QUAL:[0-9]+]] { +// LLVM-ARGINFO-DAG: define{{.*}} void @foo9{{.+}} !kernel_arg_addr_space ![[SCHAR_AS_QUAL:[0-9]+]] !kernel_arg_access_qual ![[MD42]] !kernel_arg_type ![[SCHAR_TY:[0-9]+]] !kernel_arg_base_type ![[SCHAR_TY]] !kernel_arg_type_qual ![[SCHAR_QUAL:[0-9]+]] !kernel_arg_name ![[SCHAR_ARG_NAMES:[0-9]+]] { + +// LLVM-DAG: ![[SCHAR_AS_QUAL]] = !{i32 0, i32 1} +// LLVM-DAG: ![[SCHAR_TY]] = !{!"char", !"char*"} +// LLVM-DAG: ![[SCHAR_QUAL]] = !{!"", !"const"} +// LLVM-ARGINFO-DAG: ![[SCHAR_ARG_NAMES]] = !{!"sc1", !"sc2"} diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl new file mode 100644 index 000000000000..7961e0e26244 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 %s -fclangir -triple spirv64-unknown-unknown -emit-cir -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR +// RUN: %clang_cc1 %s -fclangir -triple spirv64-unknown-unknown -emit-llvm -o %t.ll +// RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM + +__kernel void kernel_function() {} + +// CIR: #fn_attr[[ATTR:[0-9]*]] = {{.+}}cl.kernel_arg_metadata = #cir.cl.kernel_arg_metadata{{.+}} +// CIR: cir.func @kernel_function() extra(#fn_attr[[ATTR]]) + +// LLVM: define {{.*}}void @kernel_function() {{[^{]+}} !kernel_arg_addr_space ![[MD:[0-9]+]] !kernel_arg_access_qual ![[MD]] !kernel_arg_type ![[MD]] !kernel_arg_base_type ![[MD]] !kernel_arg_type_qual ![[MD]] { +// LLVM: ![[MD]] = !{} diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 45dda0a39e42..8386a59ba9bd 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1216,3 +1216,49 @@ cir.func @address_space4(%p : !cir.ptr) { // expected- vec_type_hint = !s32i, vec_type_hint_signedness = 0 > + +// ----- + +// expected-error@+1 {{addr_space must be integer arrays}} +#fn_attr = #cir.cl.kernel_arg_metadata< + addr_space = ["none"], + access_qual = ["none"], + type = ["uint*"], + base_type = ["uint*"], + type_qual = [""] +> + +// ----- + +// expected-error@+1 {{access_qual, type, base_type, type_qual must be string arrays}} +#fn_attr = #cir.cl.kernel_arg_metadata< + addr_space = [0 : i32], + access_qual = [42 : i32], + type = ["uint*"], + base_type = ["uint*"], + type_qual = [""] +> + +// ----- + +// expected-error@+1 {{name must be a string array}} +#fn_attr = #cir.cl.kernel_arg_metadata< + addr_space = [0 : i32], + access_qual = ["none"], + type = ["uint*"], + base_type = ["uint*"], + type_qual = [""], + name = [33 : i32] +> + +// ----- + +// expected-error@+1 {{all arrays must have the same number of elements}} +#fn_attr = #cir.cl.kernel_arg_metadata< + addr_space = [0 : i32], + access_qual = ["none"], + type = ["uint*", "myunsignedint*"], + base_type = ["uint*", "uint*"], + type_qual = [""], + name = ["foo"] +> From f8f14a4f71d3540312ab2868b46bc6b571e10104 Mon Sep 17 00:00:00 2001 From: 7mile Date: Sat, 3 Aug 2024 07:23:19 +0800 Subject: [PATCH 1739/2301] [CIR][Dialect][Lowering] Add calling convention attribute to FuncOp (#760) This PR simply adds the calling convention attribute to FuncOp with LLVM Lowering support. The overall approach follows `GlobalLinkageKind`: Extend the ODS, parser, printer and lowering pass. When the call conv is C call conv, it's omitted in the output assembly. --------- Co-authored-by: Bruno Cardoso Lopes --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 22 ++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 40 ++++++++++++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 20 +++++++++- clang/test/CIR/IR/func-call-conv.cir | 27 +++++++++++++ clang/test/CIR/IR/invalid.cir | 10 +++++ clang/test/CIR/Lowering/func-call-conv.cir | 20 ++++++++++ 6 files changed, 137 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/IR/func-call-conv.cir create mode 100644 clang/test/CIR/Lowering/func-call-conv.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index eb398f5dffb6..a3ecb9bf18f3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2794,6 +2794,19 @@ def BaseClassAddrOp : CIR_Op<"base_class_addr"> { // FuncOp //===----------------------------------------------------------------------===// +// The enumeration values are not necessarily in sync with `clang::CallingConv` +// or `llvm::CallingConv`. +def CC_C : I32EnumAttrCase<"C", 1, "c">; +def CC_SpirKernel : I32EnumAttrCase<"SpirKernel", 2, "spir_kernel">; +def CC_SpirFunction : I32EnumAttrCase<"SpirFunction", 3, "spir_function">; + +def CallingConv : I32EnumAttr< + "CallingConv", + "calling convention", + [CC_C, CC_SpirKernel, CC_SpirFunction]> { + let cppNamespace = "::mlir::cir"; +} + def FuncOp : CIR_Op<"func", [ AutomaticAllocationScope, CallableOpInterface, FunctionOpInterface, DeclareOpInterfaceMethods, @@ -2819,6 +2832,9 @@ def FuncOp : CIR_Op<"func", [ The function linkage information is specified by `linkage`, as defined by `GlobalLinkageKind` attribute. + The `calling_conv` attribute specifies the calling convention of the function. + The default calling convention is `CallingConv::C`. + A compiler builtin function must be marked as `builtin` for further processing when lowering from CIR. @@ -2857,6 +2873,9 @@ def FuncOp : CIR_Op<"func", [ // Linkage information cir.func linkonce_odr @some_method(...) + // Calling convention information + cir.func @another_func(...) cc(spir_kernel) extra(#fn_attr) + // Builtin function cir.func builtin @__builtin_coro_end(!cir.ptr, !cir.bool) -> !cir.bool @@ -2878,6 +2897,8 @@ def FuncOp : CIR_Op<"func", [ UnitAttr:$dsolocal, DefaultValuedAttr:$linkage, + DefaultValuedAttr:$calling_conv, ExtraFuncAttr:$extra_attrs, OptionalAttr:$sym_visibility, UnitAttr:$comdat, @@ -2893,6 +2914,7 @@ def FuncOp : CIR_Op<"func", [ let builders = [OpBuilder<(ins "StringRef":$name, "FuncType":$type, CArg<"GlobalLinkageKind", "GlobalLinkageKind::ExternalLinkage">:$linkage, + CArg<"CallingConv", "CallingConv::C">:$callingConv, CArg<"ArrayRef", "{}">:$attrs, CArg<"ArrayRef", "{}">:$argAttrs) >]; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 11b198acae92..55e64770e370 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -157,6 +157,7 @@ template struct EnumTraits {}; } REGISTER_ENUM_TYPE(GlobalLinkageKind); +REGISTER_ENUM_TYPE(CallingConv); REGISTER_ENUM_TYPE_WITH_NS(sob, SignedOverflowBehavior); } // namespace @@ -176,6 +177,20 @@ static RetTy parseOptionalCIRKeyword(AsmParser &parser, EnumTy defaultValue) { return static_cast(index); } +/// Parse an enum from the keyword, return failure if the keyword is not found. +template +static ParseResult parseCIRKeyword(AsmParser &parser, RetTy &result) { + SmallVector names; + for (unsigned i = 0, e = EnumTraits::getMaxEnumVal(); i <= e; ++i) + names.push_back(EnumTraits::stringify(static_cast(i))); + + int index = parseOptionalKeywordAlternative(parser, names); + if (index == -1) + return failure(); + result = static_cast(index); + return success(); +} + // Check if a region's termination omission is valid and, if so, creates and // inserts the omitted terminator into the region. LogicalResult ensureRegionTerm(OpAsmParser &parser, Region ®ion, @@ -1874,7 +1889,7 @@ static StringRef getLinkageAttrNameString() { return "linkage"; } void cir::FuncOp::build(OpBuilder &builder, OperationState &result, StringRef name, cir::FuncType type, - GlobalLinkageKind linkage, + GlobalLinkageKind linkage, CallingConv callingConv, ArrayRef attrs, ArrayRef argAttrs) { result.addRegion(); @@ -1885,6 +1900,8 @@ void cir::FuncOp::build(OpBuilder &builder, OperationState &result, result.addAttribute( getLinkageAttrNameString(), GlobalLinkageKindAttr::get(builder.getContext(), linkage)); + result.addAttribute(getCallingConvAttrName(result.name), + CallingConvAttr::get(builder.getContext(), callingConv)); result.attributes.append(attrs.begin(), attrs.end()); if (argAttrs.empty()) return; @@ -1991,6 +2008,20 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { hasAlias = true; } + // Default to C calling convention if no keyword is provided. + auto callConvNameAttr = getCallingConvAttrName(state.name); + CallingConv callConv = CallingConv::C; + if (parser.parseOptionalKeyword("cc").succeeded()) { + if (parser.parseLParen().failed()) + return failure(); + if (parseCIRKeyword(parser, callConv).failed()) + return parser.emitError(loc) << "unknown calling convention"; + if (parser.parseRParen().failed()) + return failure(); + } + state.addAttribute(callConvNameAttr, + CallingConvAttr::get(parser.getContext(), callConv)); + auto parseGlobalDtorCtor = [&](StringRef keyword, llvm::function_ref prio)> createAttr) @@ -2144,6 +2175,7 @@ void cir::FuncOp::print(OpAsmPrinter &p) { getGlobalDtorAttrName(), getLambdaAttrName(), getLinkageAttrName(), + getCallingConvAttrName(), getNoProtoAttrName(), getSymVisibilityAttrName(), getArgAttrsAttrName(), @@ -2157,6 +2189,12 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p << ")"; } + if (getCallingConv() != CallingConv::C) { + p << " cc("; + p << stringifyCallingConv(getCallingConv()); + p << ")"; + } + if (auto globalCtor = getGlobalCtorAttr()) { p << " global_ctor"; if (!globalCtor.isDefaultPriority()) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e8962574b546..ba7712d20ba6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -463,6 +463,22 @@ mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { }; } +mlir::LLVM::CConv convertCallingConv(mlir::cir::CallingConv callinvConv) { + using CIR = mlir::cir::CallingConv; + using LLVM = mlir::LLVM::CConv; + + switch (callinvConv) { + case CIR::C: + return LLVM::C; + case CIR::SpirKernel: + return LLVM::SPIR_KERNEL; + case CIR::SpirFunction: + return LLVM::SPIR_FUNC; + default: + llvm_unreachable("Unknown calling convention"); + } +} + class CIRCopyOpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -1529,6 +1545,7 @@ class CIRFuncLowering : public mlir::OpConversionPattern { if (attr.getName() == mlir::SymbolTable::getSymbolAttrName() || attr.getName() == func.getFunctionTypeAttrName() || attr.getName() == getLinkageAttrNameString() || + attr.getName() == func.getCallingConvAttrName() || (filterArgAndResAttrs && (attr.getName() == func.getArgAttrsAttrName() || attr.getName() == func.getResAttrsAttrName()))) @@ -1614,11 +1631,12 @@ class CIRFuncLowering : public mlir::OpConversionPattern { "expected single location or unknown location here"); auto linkage = convertLinkage(op.getLinkage()); + auto cconv = convertCallingConv(op.getCallingConv()); SmallVector attributes; lowerFuncAttributes(op, /*filterArgAndResAttrs=*/false, attributes); auto fn = rewriter.create( - Loc, op.getName(), llvmFnTy, linkage, isDsoLocal, mlir::LLVM::CConv::C, + Loc, op.getName(), llvmFnTy, linkage, isDsoLocal, cconv, mlir::SymbolRefAttr(), attributes); rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); diff --git a/clang/test/CIR/IR/func-call-conv.cir b/clang/test/CIR/IR/func-call-conv.cir new file mode 100644 index 000000000000..331b8fa23d7a --- /dev/null +++ b/clang/test/CIR/IR/func-call-conv.cir @@ -0,0 +1,27 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +!s32i = !cir.int + +#fn_attr = #cir})> + +module { + // CHECK: cir.func @foo() { + cir.func @foo() cc(c) { + cir.return + } + + // CHECK: cir.func @bar() cc(spir_kernel) + cir.func @bar() cc(spir_kernel) { + cir.return + } + + // CHECK: cir.func @bar_alias() alias(@bar) cc(spir_kernel) + cir.func @bar_alias() alias(@bar) cc(spir_kernel) + + // CHECK: cir.func @baz() cc(spir_function) extra(#fn_attr) + cir.func @baz() cc(spir_function) extra(#fn_attr) { + cir.return + } +} + diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 8386a59ba9bd..eb06fdcddda6 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1262,3 +1262,13 @@ cir.func @address_space4(%p : !cir.ptr) { // expected- type_qual = [""], name = ["foo"] > + +// ----- + +module { + // expected-error@+1 {{unknown calling convention}} + cir.func @foo() cc(foobar) { + cir.return + } +} + diff --git a/clang/test/CIR/Lowering/func-call-conv.cir b/clang/test/CIR/Lowering/func-call-conv.cir new file mode 100644 index 000000000000..a32e67a7d1de --- /dev/null +++ b/clang/test/CIR/Lowering/func-call-conv.cir @@ -0,0 +1,20 @@ +// RUN: cir-translate %s -cir-to-llvmir -o %t.ll +// RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM + +!s32i = !cir.int +module { + // LLVM: define void @foo() + cir.func @foo() cc(c) { + cir.return + } + + // LLVM: define spir_kernel void @bar() + cir.func @bar() cc(spir_kernel) { + cir.return + } + + // LLVM: define spir_func void @baz() + cir.func @baz() cc(spir_function) { + cir.return + } +} From 43ed17a39a3e973813272b98ddb9a4bc529ae664 Mon Sep 17 00:00:00 2001 From: ShivaChen <32083954+ShivaChen@users.noreply.github.com> Date: Sat, 3 Aug 2024 07:29:39 +0800 Subject: [PATCH 1740/2301] [CIR][ThroughMLIR] Support array type GlobalOp lowering with initial values (#753) This commit makes the changes as following. 1. Enable array type GlobalOp lowering with initial values 2. Add error message when array size is not equal to initial string value size E.g. char big_string[10] = "abc"; --- clang/lib/CIR/Lowering/LoweringHelpers.cpp | 5 +++++ clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt | 1 + .../lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 11 ++++++++++- clang/test/CIR/Lowering/ThroughMLIR/global.cpp | 13 +++++++++++-- 4 files changed, 27 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/LoweringHelpers.cpp b/clang/lib/CIR/Lowering/LoweringHelpers.cpp index 2393819b4813..26b2af82ca19 100644 --- a/clang/lib/CIR/Lowering/LoweringHelpers.cpp +++ b/clang/lib/CIR/Lowering/LoweringHelpers.cpp @@ -19,6 +19,11 @@ convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, assert(stringAttr && "expected string attribute here"); for (auto element : stringAttr) values.push_back({8, (uint64_t)element}); + auto arrayTy = mlir::dyn_cast(attr.getType()); + assert(arrayTy && "String attribute must have an array type"); + if (arrayTy.getSize() != stringAttr.size()) + llvm_unreachable("array type of the length not equal to that of the string " + "attribute is not supported yet"); return mlir::DenseElementsAttr::get( mlir::RankedTensorType::get({(int64_t)values.size()}, type), llvm::ArrayRef(values)); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt index d3ecb0764071..33a74b9ddf8b 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/ThroughMLIR/CMakeLists.txt @@ -27,6 +27,7 @@ add_clang_library(clangCIRLoweringThroughMLIR clangLex clangFrontend clangCIR + clangCIRLoweringHelpers ${dialect_libs} MLIRCIR MLIRAnalysis diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index c59df1ec1ba8..f4ec4a4ba260 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -46,6 +46,7 @@ #include "mlir/Transforms/DialectConversion.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/LowerToMLIR.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/STLExtras.h" @@ -936,7 +937,15 @@ class CIRGlobalOpLowering mlir::Attribute initialValue = mlir::Attribute(); std::optional init = op.getInitialValue(); if (init.has_value()) { - if (auto constArr = mlir::dyn_cast(init.value())) { + if (auto constArr = + mlir::dyn_cast(init.value())) { + init = lowerConstArrayAttr(constArr, getTypeConverter()); + if (init.has_value()) + initialValue = init.value(); + else + llvm_unreachable("GlobalOp lowering array with initial value fail"); + } else if (auto constArr = + mlir::dyn_cast(init.value())) { if (memrefType.getShape().size()) { auto elementType = memrefType.getElementType(); auto rtt = diff --git a/clang/test/CIR/Lowering/ThroughMLIR/global.cpp b/clang/test/CIR/Lowering/ThroughMLIR/global.cpp index 412ffd6b8d89..d7627139ff6c 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/global.cpp +++ b/clang/test/CIR/Lowering/ThroughMLIR/global.cpp @@ -2,7 +2,16 @@ // RUN: FileCheck --input-file=%t.mlir %s float f[32000]; -double d; - // CHECK: memref.global "public" @f : memref<32000xf32> = dense<0.000000e+00> +double d; // CHECK: memref.global "public" @d : memref = dense<0.000000e+00> +float f_init[] = {1.0, 2.0}; +// CHECK: memref.global "public" @f_init : memref<2xf32> = dense<[1.000000e+00, 2.000000e+00]> +int i_init[2] = {0, 1}; +// CHECK: memref.global "public" @i_init : memref<2xi32> = dense<[0, 1]> +char string[] = "whatnow"; +// CHECK: memref.global "public" @string : memref<8xi8> = dense<[119, 104, 97, 116, 110, 111, 119, 0]> +int excess_sint[4] = {1, 2}; +// CHECK: memref.global "public" @excess_sint : memref<4xi32> = dense<[1, 2, 0, 0]> +int sint[] = {123, 456, 789}; +// CHECK: memref.global "public" @sint : memref<3xi32> = dense<[123, 456, 789]> From 25690580919926b076bb1622c865eaac538dcbc5 Mon Sep 17 00:00:00 2001 From: 7mile Date: Wed, 7 Aug 2024 08:09:55 +0800 Subject: [PATCH 1741/2301] [CIR][LowerToLLVM][NFC] Add data layout verification of alloca as (#769) There are two sources for the target allocation address space: one from `TargetCIRGenInfo::getCIRAllocaAddressSpace()` and another from `targetDataLayout.getAllocaMemorySpace()`. Since both are provided by the specific target, they should be consistent. This PR adds a check to ensure this consistency and avoid potential errors. The ctor of `CIRAllocaLowering` pattern is updated to pass the data layout in. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 44 ++++++++++++++----- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ba7712d20ba6..fd22e4b3079a 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1001,8 +1001,14 @@ class CIREhInflightOpLowering class CIRAllocaLowering : public mlir::OpConversionPattern { + mlir::DataLayout const &dataLayout; + public: - using OpConversionPattern::OpConversionPattern; + CIRAllocaLowering(mlir::TypeConverter const &typeConverter, + mlir::DataLayout const &dataLayout, + mlir::MLIRContext *context) + : OpConversionPattern(typeConverter, context), + dataLayout(dataLayout) {} mlir::LogicalResult matchAndRewrite(mlir::cir::AllocaOp op, OpAdaptor adaptor, @@ -1016,7 +1022,21 @@ class CIRAllocaLowering rewriter.getIntegerAttr(rewriter.getIndexType(), 1)); auto elementTy = getTypeConverter()->convertType(op.getAllocaType()); auto resultTy = getTypeConverter()->convertType(op.getResult().getType()); - // TODO: Verification between the CIR alloca AS and the one from data layout + // Verification between the CIR alloca AS and the one from data layout. + { + auto resPtrTy = mlir::cast(resultTy); + auto dlAllocaASAttr = mlir::cast_if_present( + dataLayout.getAllocaMemorySpace()); + // Absence means 0 + // TODO: The query for the alloca AS should be done through CIRDataLayout + // instead to reuse the logic of interpret null attr as 0. + auto dlAllocaAS = dlAllocaASAttr ? dlAllocaASAttr.getInt() : 0; + if (dlAllocaAS != resPtrTy.getAddressSpace()) { + return op.emitError() << "alloca address space doesn't match the one " + "from the target data layout: " + << dlAllocaAS; + } + } rewriter.replaceOpWithNewOp( op, resultTy, elementTy, size, op.getAlignmentAttr().getInt()); return mlir::success(); @@ -3653,8 +3673,10 @@ class CIRCatchParamOpLowering }; void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, - mlir::TypeConverter &converter) { + mlir::TypeConverter &converter, + mlir::DataLayout &dataLayout) { patterns.add(patterns.getContext()); + patterns.add(converter, dataLayout, patterns.getContext()); patterns.add< CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, @@ -3663,13 +3685,13 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, CIRTryCallLowering, CIREhInflightOpLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRBinOpOverflowOpLowering, CIRShiftOpLowering, - CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, - CIRFuncLowering, CIRCastOpLowering, CIRGlobalOpLowering, - CIRGetGlobalOpLowering, CIRComplexCreateOpLowering, - CIRComplexRealOpLowering, CIRComplexImagOpLowering, - CIRComplexRealPtrOpLowering, CIRComplexImagPtrOpLowering, - CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, - CIRBrOpLowering, CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, + CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRFuncLowering, + CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, + CIRComplexCreateOpLowering, CIRComplexRealOpLowering, + CIRComplexImagOpLowering, CIRComplexRealPtrOpLowering, + CIRComplexImagPtrOpLowering, CIRVAStartLowering, CIRVAEndLowering, + CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, + CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, @@ -3959,7 +3981,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { mlir::RewritePatternSet patterns(&getContext()); - populateCIRToLLVMConversionPatterns(patterns, converter); + populateCIRToLLVMConversionPatterns(patterns, converter, dataLayout); mlir::populateFuncToLLVMConversionPatterns(converter, patterns); mlir::ConversionTarget target(getContext()); From a5eb5aa3dd94cbf7e8a015746f1b6b6806287d8f Mon Sep 17 00:00:00 2001 From: 7mile Date: Wed, 7 Aug 2024 08:15:26 +0800 Subject: [PATCH 1742/2301] [CIR][CodeGen][NFC] Replace the calling convention in CodeGen with the one in dialect (#772) This PR remove the header `CIR/CodeGen/CallingConv.h` and migrates all `::cir::CallingConv` stuff to `::mlir::cir::CallingConv` in `CIRGenTypes` and `CIRGenFunctionInfo`. In TargetLowering library, LowerTypes and LowerFunctionInfo basically have the same clangCallConvToLLVMCallConv stuff. The CC there is the LLVM one. But those changes are not included because of the potential conflicts. We can still easily switch to the dialect when it's needed. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h | 8 ++-- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 10 +++-- clang/lib/CIR/CodeGen/CIRGenTypes.h | 4 +- clang/lib/CIR/CodeGen/CallingConv.h | 49 ---------------------- clang/lib/CIR/CodeGen/TargetInfo.cpp | 8 ++-- clang/lib/CIR/CodeGen/TargetInfo.h | 6 +-- 7 files changed, 20 insertions(+), 67 deletions(-) delete mode 100644 clang/lib/CIR/CodeGen/CallingConv.h diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index ed8cdc1e60d0..82b6625a43bc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -39,7 +39,7 @@ using namespace cir; using namespace clang; CIRGenFunctionInfo *CIRGenFunctionInfo::create( - unsigned cirCC, bool instanceMethod, bool chainCall, + mlir::cir::CallingConv cirCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &info, llvm::ArrayRef paramInfos, CanQualType resultType, llvm::ArrayRef argTypes, RequiredArgs required) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h index c443ea5f8d7a..4c9df914ee7c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h @@ -88,11 +88,11 @@ class CIRGenFunctionInfo final typedef clang::FunctionProtoType::ExtParameterInfo ExtParameterInfo; /// The cir::CallingConv to use for this function (as specified by the user). - unsigned CallingConvention : 8; + mlir::cir::CallingConv CallingConvention : 8; /// The cir::CallingConv to actually use for this function, which may depend /// on the ABI. - unsigned EffectiveCallingConvention : 8; + mlir::cir::CallingConv EffectiveCallingConvention : 8; /// The clang::CallingConv that this was originally created with. unsigned ASTCallingConvention : 6; @@ -150,7 +150,7 @@ class CIRGenFunctionInfo final CIRGenFunctionInfo() : Required(RequiredArgs::All) {} public: - static CIRGenFunctionInfo *create(unsigned cirCC, bool instanceMethod, + static CIRGenFunctionInfo *create(mlir::cir::CallingConv cirCC, bool instanceMethod, bool chainCall, const clang::FunctionType::ExtInfo &extInfo, llvm::ArrayRef paramInfos, @@ -252,7 +252,7 @@ class CIRGenFunctionInfo final /// getCallingConvention - REturn the user specified calling convention, which /// has been translated into a CIR CC. - unsigned getCallingConvention() const { return CallingConvention; } + mlir::cir::CallingConv getCallingConvention() const { return CallingConvention; } clang::CanQualType getReturnType() const { return getArgsBuffer()[0].type; } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 0498bd902829..e2ce60e25f07 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -2,12 +2,12 @@ #include "CIRGenCall.h" #include "CIRGenFunctionInfo.h" #include "CIRGenModule.h" -#include "CallingConv.h" #include "TargetInfo.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" @@ -24,12 +24,14 @@ using namespace clang; using namespace cir; -unsigned CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { +mlir::cir::CallingConv CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { switch (CC) { case CC_C: - return cir::CallingConv::C; + return mlir::cir::CallingConv::C; case CC_OpenCLKernel: return CGM.getTargetCIRGenInfo().getOpenCLKernelCallingConv(); + case CC_SpirFunction: + return mlir::cir::CallingConv::SpirFunction; default: llvm_unreachable("No other calling conventions implemented."); } @@ -761,7 +763,7 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( if (FI) return *FI; - unsigned CC = ClangCallConvToCIRCallConv(info.getCC()); + mlir::cir::CallingConv CC = ClangCallConvToCIRCallConv(info.getCC()); // Construction the function info. We co-allocate the ArgInfos. FI = CIRGenFunctionInfo::create(CC, instanceMethod, chainCall, info, diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index d1d547f24a9a..3bb5bafb194d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -126,8 +126,8 @@ class CIRGenTypes { bool isFuncTypeConvertible(const clang::FunctionType *FT); bool isFuncParamTypeConvertible(clang::QualType Ty); - /// Convert clang calling convention to LLVM calling convention. - unsigned ClangCallConvToCIRCallConv(clang::CallingConv CC); + /// Convert clang calling convention to CIR calling convention. + mlir::cir::CallingConv ClangCallConvToCIRCallConv(clang::CallingConv CC); /// Derives the 'this' type for CIRGen purposes, i.e. ignoring method CVR /// qualification. diff --git a/clang/lib/CIR/CodeGen/CallingConv.h b/clang/lib/CIR/CodeGen/CallingConv.h deleted file mode 100644 index 2f7a5d270c24..000000000000 --- a/clang/lib/CIR/CodeGen/CallingConv.h +++ /dev/null @@ -1,49 +0,0 @@ -//===- CallingConv.h - CIR Calling Conventions ------------*- C++ -------*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file defines CIR's set of calling conventions. -// -//===----------------------------------------------------------------------===// - -#ifndef CLANG_CIR_CALLINGCONV_H -#define CLANG_CIR_CALLINGCONV_H - -// TODO: This whole file needs translated to CIR - -namespace cir { - -/// CallingConv Namespace - This namespace contains an enum with a value for the -/// well-known calling conventions. -namespace CallingConv { - -/// LLVM IR allows to use arbitrary numbers as calling convention identifiers. -/// TODO: What should we do for this for CIR -using ID = unsigned; - -/// A set of enums which specify the assigned numeric values for known llvm -/// calling conventions. -/// LLVM Calling Convention Represetnation -enum { - /// C - The default llvm calling convention, compatible with C. This - /// convention is the only calling convention that supports varargs calls. As - /// with typical C calling conventions, the callee/caller have to tolerate - /// certain amounts of prototype mismatch. - C = 0, - - /// Used for SPIR kernel functions. Inherits the restrictions of SPIR_FUNC, - /// except it cannot have non-void return values, it cannot have variable - /// arguments, it can also be called by the host or it is externally - /// visible. - SPIR_KERNEL = 76, -}; - -} // namespace CallingConv - -} // namespace cir - -#endif diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 165b01f513a4..a3db79645320 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -241,7 +241,7 @@ class SPIRVABIInfo : public CommonSPIRABIInfo { void computeInfo(CIRGenFunctionInfo &FI) const override { // The logic is same as in DefaultABIInfo with an exception on the kernel // arguments handling. - llvm::CallingConv::ID CC = FI.getCallingConvention(); + mlir::cir::CallingConv CC = FI.getCallingConvention(); bool cxxabiHit = getCXXABI().classifyReturnType(FI); assert(!cxxabiHit && "C++ ABI not considered"); @@ -249,7 +249,7 @@ class SPIRVABIInfo : public CommonSPIRABIInfo { FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) { - if (CC == llvm::CallingConv::SPIR_KERNEL) { + if (CC == mlir::cir::CallingConv::SpirKernel) { I.info = classifyKernelArgumentType(I.type); } else { I.info = classifyArgumentType(I.type); @@ -277,8 +277,8 @@ class CommonSPIRTargetCIRGenInfo : public TargetCIRGenInfo { mlir::cir::AddressSpaceAttr::Kind::offload_private); } - unsigned getOpenCLKernelCallingConv() const override { - return llvm::CallingConv::SPIR_KERNEL; + mlir::cir::CallingConv getOpenCLKernelCallingConv() const override { + return mlir::cir::CallingConv::SpirKernel; } }; diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index 1b4dbea110fc..f56c68d59732 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -81,8 +81,8 @@ class TargetCIRGenInfo { mlir::Type DestTy, bool IsNonNull = false) const; - /// Get LLVM calling convention for OpenCL kernel. - virtual unsigned getOpenCLKernelCallingConv() const { + /// Get CIR calling convention for OpenCL kernel. + virtual mlir::cir::CallingConv getOpenCLKernelCallingConv() const { // OpenCL kernels are called via an explicit runtime API with arguments // set with clSetKernelArg(), not as normal sub-functions. // Return SPIR_KERNEL by default as the kernel calling convention to @@ -93,7 +93,7 @@ class TargetCIRGenInfo { // clSetKernelArg() might break depending on the target-specific // conventions; different targets might split structs passed as values // to multiple function arguments etc. - return llvm::CallingConv::SPIR_KERNEL; + return mlir::cir::CallingConv::SpirKernel; } virtual ~TargetCIRGenInfo() {} From cb160a3d6265c42559652210f816cf3f13444f0e Mon Sep 17 00:00:00 2001 From: 7mile Date: Wed, 7 Aug 2024 08:20:46 +0800 Subject: [PATCH 1743/2301] [CIR][Dialect] Add OpenCL C language in cir.lang (#774) This PR adds OpenCL C language case to the enum `mlir::cir::SourceLanguage`, and maps `opts.OpenCL && !opts.OpenCLCPlusPlus` to it. --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 3 ++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 3 +++ clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index a9a601f5fae9..4207a3b37532 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -43,9 +43,10 @@ class CIRUnitAttr traits = []> def C : I32EnumAttrCase<"C", 1, "c">; def CXX : I32EnumAttrCase<"CXX", 2, "cxx">; +def OpenCLC : I32EnumAttrCase<"OpenCLC", 3, "opencl_c">; def SourceLanguage : I32EnumAttr<"SourceLanguage", "Source language", [ - C, CXX + C, CXX, OpenCLC ]> { let cppNamespace = "::mlir::cir"; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index a8df5255e540..fece8f9fa723 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -3049,6 +3049,9 @@ mlir::cir::SourceLanguage CIRGenModule::getCIRSourceLanguage() { using CIRLang = mlir::cir::SourceLanguage; auto opts = getLangOpts(); + if (opts.OpenCL && !opts.OpenCLCPlusPlus) + return CIRLang::OpenCLC; + if (opts.CPlusPlus || opts.CPlusPlus11 || opts.CPlusPlus14 || opts.CPlusPlus17 || opts.CPlusPlus20 || opts.CPlusPlus23 || opts.CPlusPlus26) diff --git a/clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl b/clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl new file mode 100644 index 000000000000..67aeda32c2a1 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl @@ -0,0 +1,4 @@ +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR + +// CIR: module{{.*}} attributes {{{.*}}cir.lang = #cir.lang From 870edf7c5ad5aec06a6e586bd94cab9a3f0fe0fe Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 7 Aug 2024 12:08:23 -0700 Subject: [PATCH 1744/2301] [CIR][FlattenCFG][LowerToLLVM] Exceptions: lower cir.resume --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 29 +++++++---- clang/lib/CIR/CodeGen/CIRGenException.cpp | 5 +- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 5 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 52 +++++++++++++++---- clang/test/CIR/Lowering/exceptions.cir | 9 ++-- clang/test/CIR/Lowering/try-catch.cpp | 4 +- 6 files changed, 77 insertions(+), 27 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a3ecb9bf18f3..585cbfcd0ff3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -870,24 +870,31 @@ def ContinueOp : CIR_Op<"continue", [Terminator]> { // Resume //===----------------------------------------------------------------------===// -def ResumeOp : CIR_Op<"resume", [ReturnLike, Terminator]> { +def ResumeOp : CIR_Op<"resume", [ReturnLike, Terminator, + AttrSizedOperandSegments]> { let summary = "Resumes execution after not catching exceptions"; let description = [{ - The `cir.resume` operation terminates a region on `cir.catch`, "resuming" - or continuing the unwind process. + The `cir.resume` operation handles an uncaught exception scenario and + behaves in two different modes: - Examples: - ```mlir - cir.catch ... { - ... - fallback { cir.resume }; - } + - As the terminator of a `CatchUnwind` region of `cir.try`, where it + does not receive any arguments (implied from the `cir.try` scope), or + - The terminator of a regular basic block without an enclosing `cir.try` + operation, where it requires an `exception_ptr` and a `type_id`. + + The `rethrow` attribute is used to denote rethrowing behavior for the + resume operation (versus default terminaton). ``` }]; - let arguments = (ins UnitAttr:$rethrow); + let arguments = (ins Optional:$exception_ptr, + Optional:$type_id, + UnitAttr:$rethrow); let assemblyFormat = [{ - (`rethrow` $rethrow^)? attr-dict + ($rethrow^)? + ($exception_ptr^)? + (`,` $type_id^)? + attr-dict }]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index d39a4eb9953d..7674af00b266 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -274,10 +274,13 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup) { // anything on the EH stack which needs our help. const char *RethrowName = Personality.CatchallRethrowFn; if (RethrowName != nullptr && !isCleanup) { + // FIXME(cir): upon testcase this should just add the 'rethrow' attribute + // to mlir::cir::ResumeOp below. llvm_unreachable("NYI"); } - getBuilder().create(tryOp.getLoc()); + getBuilder().create(tryOp.getLoc(), mlir::Value{}, + mlir::Value{}); getBuilder().restoreInsertionPoint(ip); return resumeBlock; } diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index fd729dd65b7a..b40fe14d9731 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -227,6 +227,11 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { mlir::Block *unwindBlock) const { assert(&r.front() == &r.back() && "only one block expected"); rewriter.mergeBlocks(&r.back(), unwindBlock); + auto resume = dyn_cast(unwindBlock->getTerminator()); + assert(resume && "expected 'cir.resume'"); + rewriter.setInsertionPointToEnd(unwindBlock); + rewriter.replaceOpWithNewOp( + resume, unwindBlock->getArgument(0), unwindBlock->getArgument(1)); } void buildAllCase(mlir::PatternRewriter &rewriter, mlir::Region &r, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index fd22e4b3079a..ea7a25b6df15 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -918,6 +918,18 @@ class CIRTryCallLowering } }; +static mlir::LLVM::LLVMStructType +getLLVMLandingPadStructTy(mlir::ConversionPatternRewriter &rewriter) { + // Create the landing pad type: struct { ptr, i32 } + mlir::MLIRContext *ctx = rewriter.getContext(); + auto llvmPtr = mlir::LLVM::LLVMPointerType::get(ctx); + llvm::SmallVector structFields; + structFields.push_back(llvmPtr); + structFields.push_back(rewriter.getI32Type()); + + return mlir::LLVM::LLVMStructType::getLiteral(ctx, structFields); +} + class CIREhInflightOpLowering : public mlir::OpConversionPattern { public: @@ -927,15 +939,7 @@ class CIREhInflightOpLowering matchAndRewrite(mlir::cir::EhInflightOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Location loc = op.getLoc(); - // Create the landing pad type: struct { ptr, i32 } - mlir::MLIRContext *ctx = rewriter.getContext(); - auto llvmPtr = mlir::LLVM::LLVMPointerType::get(ctx); - llvm::SmallVector structFields; - structFields.push_back(llvmPtr); - structFields.push_back(rewriter.getI32Type()); - - auto llvmLandingPadStructTy = - mlir::LLVM::LLVMStructType::getLiteral(ctx, structFields); + auto llvmLandingPadStructTy = getLLVMLandingPadStructTy(rewriter); mlir::ArrayAttr symListAttr = op.getSymTypeListAttr(); mlir::SmallVector symAddrs; @@ -3672,6 +3676,33 @@ class CIRCatchParamOpLowering } }; +class CIRResumeOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ResumeOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // %lpad.val = insertvalue { ptr, i32 } poison, ptr %exception_ptr, 0 + // %lpad.val2 = insertvalue { ptr, i32 } %lpad.val, i32 %selector, 1 + // resume { ptr, i32 } %lpad.val2 + SmallVector slotIdx = {0}; + SmallVector selectorIdx = {1}; + auto llvmLandingPadStructTy = getLLVMLandingPadStructTy(rewriter); + mlir::Value poison = rewriter.create( + op.getLoc(), llvmLandingPadStructTy); + + mlir::Value slot = rewriter.create( + op.getLoc(), poison, adaptor.getExceptionPtr(), slotIdx); + mlir::Value selector = rewriter.create( + op.getLoc(), slot, adaptor.getTypeId(), selectorIdx); + + rewriter.replaceOpWithNewOp(op, selector); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter, mlir::DataLayout &dataLayout) { @@ -3710,7 +3741,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRSqrtOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, CIRFModOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering, CIRPowOpLowering, CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering, - CIRCatchParamOpLowering>(converter, patterns.getContext()); + CIRCatchParamOpLowering, CIRResumeOpLowering>(converter, + patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/exceptions.cir b/clang/test/CIR/Lowering/exceptions.cir index ae2e511526c6..5b0414e2ee78 100644 --- a/clang/test/CIR/Lowering/exceptions.cir +++ b/clang/test/CIR/Lowering/exceptions.cir @@ -93,9 +93,12 @@ module @"try-catch.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.si // LLVM: call void @__cxa_end_catch() cir.br ^bb10 ^bb9(%38: !cir.ptr, %39: !u32i): - cir.br ^bb10 - // TODO: support resume. - // cir.resume + // LLVM: %[[RESUME_EH:.*]] = phi ptr + // LLVM: %[[RESUME_SEL:.*]] = phi i32 + // LLVM: %[[RES0:.*]] = insertvalue { ptr, i32 } poison, ptr %[[RESUME_EH]], 0 + // LLVM: %[[RES1:.*]] = insertvalue { ptr, i32 } %[[RES0]], i32 %[[RESUME_SEL]], 1 + // LLVM: resume { ptr, i32 } %[[RES1]] + cir.resume %38, %39 ^bb10: %40 = cir.load %3 : !cir.ptr, !u64i cir.store %40, %0 : !u64i, !cir.ptr diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 551e9a08e9f3..34a547d5ed10 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -51,8 +51,8 @@ unsigned long long tc() { z = 99; (void)msg[0]; } - // CIR_FLAT: ^[[BB_RESUME]](%[[RESUME_SEL:.*]]: !u32i - // CIR_FLAT: cir.resume + // CIR_FLAT: ^[[BB_RESUME]](%[[RESUME_EH:.*]]: !cir.ptr loc({{.*}}), %[[RESUME_SEL:.*]]: !u32i + // CIR_FLAT: cir.resume %[[RESUME_EH]], %[[RESUME_SEL]] // CIR_FLAT: ^[[AFTER_TRY]]: // CIR_FLAT: cir.load From 372bb2b855de7ed5a0a511c99c5cd30d78fcec99 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 8 Aug 2024 00:43:57 +0300 Subject: [PATCH 1745/2301] [CIR] Introduce CIR simplification (#696) This PR introduce cir simplification pass. The idea is to have a pass for the redundant operations removal/update. Right now two pattern implemented, both related to the redundant `bool` operations. First pattern removes redundant casts from `bool` to `int` and back that for some reasons appear in the code. Second pattern removes sequential unary not operations (`!!`) . For example, the code from the test is expanded from the simple check that is common for C code: ``` #define CHECK_PTR(ptr) \ do { \ if (__builtin_expect((!!((ptr) == 0)), 0))\ return -42; \ } while(0) ``` I mark this PR as a draft for the following reasons: 1) I have no idea if it's useful for the community 2) There is a test fail - unfortunately current pattern rewriter run DCE underneath the hood and looks like we can't workaround it. It's enough just to add an operation to the list - in this case `UnaryOp` - and call `applyOpPatternsAndFold`. I could rewrite a test a little in order to make everything non dead or implement a simple fix point algorithm for the particular task (I would do the former). --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 1 + clang/include/clang/CIR/Dialect/Passes.h | 2 +- clang/include/clang/CIR/Dialect/Passes.td | 22 +++-- clang/lib/CIR/CodeGen/CIRPasses.cpp | 4 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 94 +++++++++++++++---- .../{MergeCleanups.cpp => CIRSimplify.cpp} | 16 ++-- .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 2 +- clang/test/CIR/CodeGen/unary.cpp | 30 +++--- clang/test/CIR/Transforms/merge-cleanups.cir | 2 +- clang/test/CIR/Transforms/simpl.c | 38 ++++++++ clang/test/CIR/Transforms/simpl.cir | 50 ++++++++++ clang/test/CIR/mlirprint.c | 8 +- clang/tools/cir-opt/cir-opt.cpp | 2 +- 13 files changed, 214 insertions(+), 57 deletions(-) rename clang/lib/CIR/Dialect/Transforms/{MergeCleanups.cpp => CIRSimplify.cpp} (91%) create mode 100644 clang/test/CIR/Transforms/simpl.c create mode 100644 clang/test/CIR/Transforms/simpl.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 585cbfcd0ff3..6d4abb8897c8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -991,6 +991,7 @@ def UnaryOp : CIR_Op<"unary", [Pure, SameOperandsAndResultType]> { }]; let hasVerifier = 1; + let hasFolder = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index de9621fc8bb6..116966d52f15 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -26,7 +26,7 @@ std::unique_ptr createLifetimeCheckPass(ArrayRef remark, ArrayRef hist, unsigned hist_limit, clang::ASTContext *astCtx); -std::unique_ptr createMergeCleanupsPass(); +std::unique_ptr createCIRSimplifyPass(); std::unique_ptr createDropASTPass(); std::unique_ptr createSCFPreparePass(); std::unique_ptr createLoweringPreparePass(); diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index c43812ff1032..536e35b9e0dc 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -11,14 +11,24 @@ include "mlir/Pass/PassBase.td" -def MergeCleanups : Pass<"cir-merge-cleanups"> { - let summary = "Remove unnecessary branches to cleanup blocks"; +def CIRSimplify : Pass<"cir-simplify"> { + let summary = "Performs CIR simplification"; let description = [{ - Canonicalize pass is too aggressive for CIR when the pipeline is - used for C/C++ analysis. This pass runs some rewrites for scopes, - merging some blocks and eliminating unnecessary control-flow. + The pass rewrites CIR and removes some redundant operations. + + For example, due to canonicalize pass is too aggressive for CIR when + the pipeline is used for C/C++ analysis, this pass runs some rewrites + for scopes, merging some blocks and eliminating unnecessary control-flow. + + Also, the pass removes redundant and/or unneccessary cast and unary not + operation e.g. + ```mlir + %1 = cir.cast(bool_to_int, %0 : !cir.bool), !s32i + %2 = cir.cast(int_to_bool, %1 : !s32i), !cir.bool + ``` + }]; - let constructor = "mlir::createMergeCleanupsPass()"; + let constructor = "mlir::createCIRSimplifyPass()"; let dependentDialects = ["cir::CIRDialect"]; } diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index fda1887028d4..60e393f7985f 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -19,8 +19,6 @@ #include "mlir/Support/LogicalResult.h" #include "mlir/Transforms/Passes.h" -#include - namespace cir { mlir::LogicalResult runCIRToCIRPasses( mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, @@ -32,7 +30,7 @@ mlir::LogicalResult runCIRToCIRPasses( bool enableMem2Reg) { mlir::PassManager pm(mlirCtx); - pm.addPass(mlir::createMergeCleanupsPass()); + pm.addPass(mlir::createCIRSimplifyPass()); // TODO(CIR): Make this actually propagate errors correctly. This is stubbed // in to get rebases going. diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 55e64770e370..e47e888ba155 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -528,26 +528,86 @@ LogicalResult CastOp::verify() { llvm_unreachable("Unknown CastOp kind?"); } -OpFoldResult CastOp::fold(FoldAdaptor adaptor) { - if (getSrc().getType() != getResult().getType()) - return {}; - switch (getKind()) { - case mlir::cir::CastKind::integral: { - // TODO: for sign differences, it's possible in certain conditions to - // create a new attribute that's capable of representing the source. - SmallVector foldResults; - auto foldOrder = getSrc().getDefiningOp()->fold(foldResults); - if (foldOrder.succeeded() && foldResults[0].is()) - return foldResults[0].get(); - return {}; - } - case mlir::cir::CastKind::bitcast: - case mlir::cir::CastKind::address_space: { - return getSrc(); +bool isIntOrBoolCast(mlir::cir::CastOp op) { + auto kind = op.getKind(); + return kind == mlir::cir::CastKind::bool_to_int || + kind == mlir::cir::CastKind::int_to_bool || + kind == mlir::cir::CastKind::integral; +} + +Value tryFoldCastChain(CastOp op) { + CastOp head = op, tail = op; + + while(op) { + if (!isIntOrBoolCast(op)) + break; + head = op; + op = dyn_cast_or_null(head.getSrc().getDefiningOp()); } - default: + + if (head == tail) return {}; + + // if bool_to_int -> ... -> int_to_bool: take the bool + // as we had it was before all casts + if (head.getKind() == mlir::cir::CastKind::bool_to_int && + tail.getKind() == mlir::cir::CastKind::int_to_bool) + return head.getSrc(); + + // if int_to_bool -> ... -> int_to_bool: take the result + // of the first one, as no other casts (and ext casts as well) + // don't change the first result + if (head.getKind() == mlir::cir::CastKind::int_to_bool && + tail.getKind() == mlir::cir::CastKind::int_to_bool) + return head.getResult(); + + return {}; +} + +OpFoldResult CastOp::fold(FoldAdaptor adaptor) { + if (getSrc().getType() == getResult().getType()) { + switch (getKind()) { + case mlir::cir::CastKind::integral: { + // TODO: for sign differences, it's possible in certain conditions to + // create a new attribute that's capable of representing the source. + SmallVector foldResults; + auto foldOrder = getSrc().getDefiningOp()->fold(foldResults); + if (foldOrder.succeeded() && foldResults[0].is()) + return foldResults[0].get(); + return {}; + } + case mlir::cir::CastKind::bitcast: + case mlir::cir::CastKind::address_space: { + return getSrc(); + } + default: + return {}; + } } + return tryFoldCastChain(*this); +} + +static bool isBoolNot(mlir::cir::UnaryOp op) { + return isa(op.getInput().getType()) && + op.getKind() == mlir::cir::UnaryOpKind::Not; +} + +/* This folder simplifies the sequential boolean not operations. + For instance, the next two unary operations will be eliminated: + + ```mlir + %1 = cir.unary(not, %0) : !cir.bool, !cir.bool + %2 = cir.unary(not, %1) : !cir.bool, !cir.bool + ``` + + and the argument of the first one (%0) will be used instead. */ +OpFoldResult UnaryOp::fold(FoldAdaptor adaptor) { + if (isBoolNot(*this)) + if (auto previous = dyn_cast_or_null(getInput().getDefiningOp())) + if (isBoolNot(previous)) + return previous.getInput(); + + return {}; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp similarity index 91% rename from clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp rename to clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp index 4da0692d6153..7e8381bc78ac 100644 --- a/clang/lib/CIR/Dialect/Transforms/MergeCleanups.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp @@ -1,4 +1,4 @@ -//===- MergeCleanups.cpp - merge simple return/yield blocks ---------------===// +//===- CIRSimplify.cpp - performs CIR simplification ----------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -108,11 +108,11 @@ struct RemoveTrivialTry : public OpRewritePattern { }; //===----------------------------------------------------------------------===// -// MergeCleanupsPass +// CIRSimplifyPass //===----------------------------------------------------------------------===// -struct MergeCleanupsPass : public MergeCleanupsBase { - using MergeCleanupsBase::MergeCleanupsBase; +struct CIRSimplifyPass : public CIRSimplifyBase { + using CIRSimplifyBase::CIRSimplifyBase; // The same operation rewriting done here could have been performed // by CanonicalizerPass (adding hasCanonicalizer for target Ops and @@ -136,7 +136,7 @@ void populateMergeCleanupPatterns(RewritePatternSet &patterns) { // clang-format on } -void MergeCleanupsPass::runOnOperation() { +void CIRSimplifyPass::runOnOperation() { // Collect rewrite patterns. RewritePatternSet patterns(&getContext()); populateMergeCleanupPatterns(patterns); @@ -146,7 +146,7 @@ void MergeCleanupsPass::runOnOperation() { getOperation()->walk([&](Operation *op) { // CastOp here is to perform a manual `fold` in // applyOpPatternsAndFold - if (isa(op)) + if (isa(op)) ops.push_back(op); }); @@ -157,6 +157,6 @@ void MergeCleanupsPass::runOnOperation() { } // namespace -std::unique_ptr mlir::createMergeCleanupsPass() { - return std::make_unique(); +std::unique_ptr mlir::createCIRSimplifyPass() { + return std::make_unique(); } diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 93d9bb83edfa..ffe1efbe0839 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -3,7 +3,7 @@ add_subdirectory(TargetLowering) add_clang_library(MLIRCIRTransforms LifetimeCheck.cpp LoweringPrepare.cpp - MergeCleanups.cpp + CIRSimplify.cpp DropAST.cpp IdiomRecognizer.cpp LibOpt.cpp diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index 5f6a451cee75..986e9b2dcedc 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -155,28 +155,28 @@ int *inc_p(int *i) { void floats(float f) { // CHECK: cir.func @{{.+}}floats{{.+}} - +f; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.float, !cir.float - -f; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.float, !cir.float + f = +f; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.float, !cir.float + f = -f; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.float, !cir.float ++f; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.float, !cir.float --f; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.float, !cir.float f++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.float, !cir.float f--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.float, !cir.float - !f; + f = !f; // CHECK: %[[#F_BOOL:]] = cir.cast(float_to_bool, %{{[0-9]+}} : !cir.float), !cir.bool // CHECK: = cir.unary(not, %[[#F_BOOL]]) : !cir.bool, !cir.bool } void doubles(double d) { // CHECK: cir.func @{{.+}}doubles{{.+}} - +d; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.double, !cir.double - -d; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.double, !cir.double + d = +d; // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.double, !cir.double + d = -d; // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.double, !cir.double ++d; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.double, !cir.double --d; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.double, !cir.double d++; // CHECK: = cir.unary(inc, %{{[0-9]+}}) : !cir.double, !cir.double d--; // CHECK: = cir.unary(dec, %{{[0-9]+}}) : !cir.double, !cir.double - !d; + d = !d; // CHECK: %[[#D_BOOL:]] = cir.cast(float_to_bool, %{{[0-9]+}} : !cir.double), !cir.bool // CHECK: = cir.unary(not, %[[#D_BOOL]]) : !cir.bool, !cir.bool } @@ -185,7 +185,7 @@ void pointers(int *p) { // CHECK: cir.func @{{[^ ]+}}pointers // CHECK: %[[#P:]] = cir.alloca !cir.ptr, !cir.ptr> - +p; + p = +p; // CHECK: cir.unary(plus, %{{.+}}) : !cir.ptr, !cir.ptr ++p; @@ -205,7 +205,7 @@ void pointers(int *p) { // CHECK: %[[#RES:]] = cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#DEC]] : !s32i), !cir.ptr // CHECK: cir.store %[[#RES]], %[[#P]] : !cir.ptr, !cir.ptr> - !p; + bool p1 = !p; // %[[BOOLPTR:]] = cir.cast(ptr_to_bool, %15 : !cir.ptr), !cir.bool // cir.unary(not, %[[BOOLPTR]]) : !cir.bool, !cir.bool } @@ -213,20 +213,20 @@ void pointers(int *p) { void chars(char c) { // CHECK: cir.func @{{.+}}chars{{.+}} - +c; + int c1 = +c; // CHECK: %[[#PROMO:]] = cir.cast(integral, %{{.+}} : !s8i), !s32i // CHECK: cir.unary(plus, %[[#PROMO]]) : !s32i, !s32i - -c; + int c2 = -c; // CHECK: %[[#PROMO:]] = cir.cast(integral, %{{.+}} : !s8i), !s32i // CHECK: cir.unary(minus, %[[#PROMO]]) : !s32i, !s32i // Chars can go through some integer promotion codegen paths even when not promoted. - ++c; // CHECK: cir.unary(inc, %7) : !s8i, !s8i - --c; // CHECK: cir.unary(dec, %9) : !s8i, !s8i - c++; // CHECK: cir.unary(inc, %11) : !s8i, !s8i - c--; // CHECK: cir.unary(dec, %13) : !s8i, !s8i + ++c; // CHECK: cir.unary(inc, %10) : !s8i, !s8i + --c; // CHECK: cir.unary(dec, %12) : !s8i, !s8i + c++; // CHECK: cir.unary(inc, %14) : !s8i, !s8i + c--; // CHECK: cir.unary(dec, %16) : !s8i, !s8i - !c; + bool c3 = !c; // CHECK: %[[#C_BOOL:]] = cir.cast(int_to_bool, %{{[0-9]+}} : !s8i), !cir.bool // CHECK: cir.unary(not, %[[#C_BOOL]]) : !cir.bool, !cir.bool } diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 92cf22b1abef..4280768fc4b0 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s -cir-merge-cleanups -o %t.out.cir +// RUN: cir-opt %s -cir-simplify -o %t.out.cir // RUN: FileCheck --input-file=%t.out.cir %s #false = #cir.bool : !cir.bool diff --git a/clang/test/CIR/Transforms/simpl.c b/clang/test/CIR/Transforms/simpl.c new file mode 100644 index 000000000000..6033bd5a8372 --- /dev/null +++ b/clang/test/CIR/Transforms/simpl.c @@ -0,0 +1,38 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-simplify %s -o %t1.cir 2>&1 | FileCheck -check-prefix=BEFORE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-simplify %s -o %t2.cir 2>&1 | FileCheck -check-prefix=AFTER %s + + +#define CHECK_PTR(ptr) \ + do { \ + if (__builtin_expect((!!((ptr) == 0)), 0))\ + return -42; \ + } while(0) + +int foo(int* ptr) { + CHECK_PTR(ptr); + + (*ptr)++; + return 0; +} + +// BEFORE: cir.func {{.*@foo}} +// BEFORE: [[X0:%.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// BEFORE: [[X1:%.*]] = cir.const #cir.ptr : !cir.ptr +// BEFORE: [[X2:%.*]] = cir.cmp(eq, [[X0]], [[X1]]) : !cir.ptr, !s32i +// BEFORE: [[X3:%.*]] = cir.cast(int_to_bool, [[X2]] : !s32i), !cir.bool +// BEFORE: [[X4:%.*]] = cir.unary(not, [[X3]]) : !cir.bool, !cir.bool +// BEFORE: [[X5:%.*]] = cir.cast(bool_to_int, [[X4]] : !cir.bool), !s32i +// BEFORE: [[X6:%.*]] = cir.cast(int_to_bool, [[X5]] : !s32i), !cir.bool +// BEFORE: [[X7:%.*]] = cir.unary(not, [[X6]]) : !cir.bool, !cir.bool +// BEFORE: [[X8:%.*]] = cir.cast(bool_to_int, [[X7]] : !cir.bool), !s32i +// BEFORE: [[X9:%.*]] = cir.cast(integral, [[X8]] : !s32i), !s64i +// BEFORE: [[X10:%.*]] = cir.const #cir.int<0> : !s32i +// BEFORE: [[X11:%.*]] = cir.cast(integral, [[X10]] : !s32i), !s64i +// BEFORE: [[X12:%.*]] = cir.cast(int_to_bool, [[X9]] : !s64i), !cir.bool +// BEFORE: cir.if [[X12]] + +// AFTER: [[X0:%.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// AFTER: [[X1:%.*]] = cir.const #cir.ptr : !cir.ptr +// AFTER: [[X2:%.*]] = cir.cmp(eq, [[X0]], [[X1]]) : !cir.ptr, !s32i +// AFTER: [[X3:%.*]] = cir.cast(int_to_bool, [[X2]] : !s32i), !cir.bool +// AFTER: cir.if [[X3]] \ No newline at end of file diff --git a/clang/test/CIR/Transforms/simpl.cir b/clang/test/CIR/Transforms/simpl.cir new file mode 100644 index 000000000000..1ebedc323471 --- /dev/null +++ b/clang/test/CIR/Transforms/simpl.cir @@ -0,0 +1,50 @@ +// RUN: cir-opt %s -cir-simplify -o - | FileCheck %s + +!s32i = !cir.int +!s64i = !cir.int +module { + cir.func @unary_not(%arg0: !cir.bool) -> !cir.bool { + %0 = cir.unary(not, %arg0) : !cir.bool, !cir.bool + %1 = cir.unary(not, %0) : !cir.bool, !cir.bool + cir.return %1 : !cir.bool + } + // CHECK: cir.func @unary_not(%arg0: !cir.bool) -> !cir.bool + // CHECK: cir.return %arg0 : !cir.bool + + cir.func @cast1(%arg0: !cir.bool) -> !cir.bool { + %0 = cir.cast(bool_to_int, %arg0 : !cir.bool), !s32i + %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool + cir.return %1 : !cir.bool + } + // CHECK: cir.func @cast1(%arg0: !cir.bool) -> !cir.bool + // CHECK: cir.return %arg0 : !cir.bool + + cir.func @cast2(%arg0: !s32i) -> !cir.bool { + %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + %1 = cir.cast(bool_to_int, %0 : !cir.bool), !s32i + %2 = cir.cast(integral, %1 : !s32i), !s64i + %3 = cir.cast(int_to_bool, %2 : !s64i), !cir.bool + cir.return %3 : !cir.bool + } + // CHECK: cir.func @cast2(%arg0: !s32i) -> !cir.bool + // CHECK: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + // CHECK: cir.return %0 : !cir.bool + + cir.func @no_cast(%arg0: !s32i) -> !s64i { + %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + %1 = cir.cast(bool_to_int, %0 : !cir.bool), !s32i + %2 = cir.cast(integral, %1 : !s32i), !s64i + cir.return %2 : !s64i + } + // CHECK: cir.func @no_cast(%arg0: !s32i) -> !s64i + // CHECK: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + // CHECK: %1 = cir.cast(bool_to_int, %0 : !cir.bool), !s32i + // CHECK: %2 = cir.cast(integral, %1 : !s32i), !s64i + // CHECK: cir.return %2 : !s64i + +} + + + + + diff --git a/clang/test/CIR/mlirprint.c b/clang/test/CIR/mlirprint.c index 99a57366e4ba..83336cc73792 100644 --- a/clang/test/CIR/mlirprint.c +++ b/clang/test/CIR/mlirprint.c @@ -11,7 +11,7 @@ int foo(void) { } -// CIR: IR Dump After MergeCleanups (cir-merge-cleanups) +// CIR: IR Dump After CIRSimplify (cir-simplify) // CIR: cir.func @foo() -> !s32i // CIR: IR Dump After LoweringPrepare (cir-lowering-prepare) // CIR: cir.func @foo() -> !s32i @@ -19,14 +19,14 @@ int foo(void) { // CIR-NOT: IR Dump After SCFPrepare // CIR: IR Dump After DropAST (cir-drop-ast) // CIR: cir.func @foo() -> !s32i -// CIRFLAT: IR Dump After MergeCleanups (cir-merge-cleanups) +// CIRFLAT: IR Dump After CIRSimplify (cir-simplify) // CIRFLAT: cir.func @foo() -> !s32i // CIRFLAT: IR Dump After LoweringPrepare (cir-lowering-prepare) // CIRFLAT: cir.func @foo() -> !s32i // CIRFLAT: IR Dump After FlattenCFG (cir-flatten-cfg) // CIRFLAT: IR Dump After DropAST (cir-drop-ast) // CIRFLAT: cir.func @foo() -> !s32i -// CIRMLIR: IR Dump After MergeCleanups (cir-merge-cleanups) +// CIRMLIR: IR Dump After CIRSimplify (cir-simplify) // CIRMLIR: IR Dump After LoweringPrepare (cir-lowering-prepare) // CIRMLIR: IR Dump After SCFPrepare (cir-mlir-scf-prepare // CIRMLIR: IR Dump After DropAST (cir-drop-ast) @@ -35,7 +35,7 @@ int foo(void) { // LLVM: IR Dump After // LLVM: define dso_local i32 @foo() -// CIRPASS-NOT: IR Dump After MergeCleanups +// CIRPASS-NOT: IR Dump After CIRSimplify // CIRPASS: IR Dump After DropAST // CFGPASS: IR Dump Before FlattenCFG (cir-flatten-cfg) diff --git a/clang/tools/cir-opt/cir-opt.cpp b/clang/tools/cir-opt/cir-opt.cpp index ea58a3bdb3b3..343f2d317137 100644 --- a/clang/tools/cir-opt/cir-opt.cpp +++ b/clang/tools/cir-opt/cir-opt.cpp @@ -39,7 +39,7 @@ int main(int argc, char **argv) { return cir::createConvertMLIRToLLVMPass(); }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { - return mlir::createMergeCleanupsPass(); + return mlir::createCIRSimplifyPass(); }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { From 2760f94cb583bffbf896f9e3c3b1aa698f08d57a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 7 Aug 2024 14:58:27 -0700 Subject: [PATCH 1746/2301] [CIR][NFC] Cleanup whitespaces --- clang/include/clang/CIR/Dialect/Passes.td | 6 ++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 28 +++++++++++------------ clang/test/CIR/Transforms/simpl.c | 4 ++-- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index 536e35b9e0dc..1c17ca9c5ed1 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -14,13 +14,13 @@ include "mlir/Pass/PassBase.td" def CIRSimplify : Pass<"cir-simplify"> { let summary = "Performs CIR simplification"; let description = [{ - The pass rewrites CIR and removes some redundant operations. - + The pass rewrites CIR and removes some redundant operations. + For example, due to canonicalize pass is too aggressive for CIR when the pipeline is used for C/C++ analysis, this pass runs some rewrites for scopes, merging some blocks and eliminating unnecessary control-flow. - Also, the pass removes redundant and/or unneccessary cast and unary not + Also, the pass removes redundant and/or unneccessary cast and unary not operation e.g. ```mlir %1 = cir.cast(bool_to_int, %0 : !cir.bool), !s32i diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index e47e888ba155..383a1314bd3b 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -536,10 +536,10 @@ bool isIntOrBoolCast(mlir::cir::CastOp op) { } Value tryFoldCastChain(CastOp op) { - CastOp head = op, tail = op; + CastOp head = op, tail = op; while(op) { - if (!isIntOrBoolCast(op)) + if (!isIntOrBoolCast(op)) break; head = op; op = dyn_cast_or_null(head.getSrc().getDefiningOp()); @@ -564,10 +564,10 @@ Value tryFoldCastChain(CastOp op) { return {}; } -OpFoldResult CastOp::fold(FoldAdaptor adaptor) { +OpFoldResult CastOp::fold(FoldAdaptor adaptor) { if (getSrc().getType() == getResult().getType()) { switch (getKind()) { - case mlir::cir::CastKind::integral: { + case mlir::cir::CastKind::integral: { // TODO: for sign differences, it's possible in certain conditions to // create a new attribute that's capable of representing the source. SmallVector foldResults; @@ -579,7 +579,7 @@ OpFoldResult CastOp::fold(FoldAdaptor adaptor) { case mlir::cir::CastKind::bitcast: case mlir::cir::CastKind::address_space: { return getSrc(); - } + } default: return {}; } @@ -592,15 +592,15 @@ static bool isBoolNot(mlir::cir::UnaryOp op) { op.getKind() == mlir::cir::UnaryOpKind::Not; } -/* This folder simplifies the sequential boolean not operations. - For instance, the next two unary operations will be eliminated: - - ```mlir - %1 = cir.unary(not, %0) : !cir.bool, !cir.bool - %2 = cir.unary(not, %1) : !cir.bool, !cir.bool - ``` - - and the argument of the first one (%0) will be used instead. */ +// This folder simplifies the sequential boolean not operations. +// For instance, the next two unary operations will be eliminated: +// +// ```mlir +// %1 = cir.unary(not, %0) : !cir.bool, !cir.bool +// %2 = cir.unary(not, %1) : !cir.bool, !cir.bool +// ``` +// +// and the argument of the first one (%0) will be used instead. OpFoldResult UnaryOp::fold(FoldAdaptor adaptor) { if (isBoolNot(*this)) if (auto previous = dyn_cast_or_null(getInput().getDefiningOp())) diff --git a/clang/test/CIR/Transforms/simpl.c b/clang/test/CIR/Transforms/simpl.c index 6033bd5a8372..efb22ddb3f57 100644 --- a/clang/test/CIR/Transforms/simpl.c +++ b/clang/test/CIR/Transforms/simpl.c @@ -8,7 +8,7 @@ return -42; \ } while(0) -int foo(int* ptr) { +int foo(int* ptr) { CHECK_PTR(ptr); (*ptr)++; @@ -29,7 +29,7 @@ int foo(int* ptr) { // BEFORE: [[X10:%.*]] = cir.const #cir.int<0> : !s32i // BEFORE: [[X11:%.*]] = cir.cast(integral, [[X10]] : !s32i), !s64i // BEFORE: [[X12:%.*]] = cir.cast(int_to_bool, [[X9]] : !s64i), !cir.bool -// BEFORE: cir.if [[X12]] +// BEFORE: cir.if [[X12]] // AFTER: [[X0:%.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr // AFTER: [[X1:%.*]] = cir.const #cir.ptr : !cir.ptr From 04a87b10bf898b092341e174523f562f29111b3c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 7 Aug 2024 15:16:14 -0700 Subject: [PATCH 1747/2301] [CIR][NFC] Fix warnings post rebase + from PRs --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 10 ++++------ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 2 ++ clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 15 ++++++++++++--- .../Transforms/TargetLowering/LowerFunction.cpp | 10 ---------- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 11 +++++------ 8 files changed, 26 insertions(+), 27 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index d91c8603b3d7..a1b4dd63b3b5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -792,7 +792,7 @@ void CIRGenFunction::buildStoreThroughExtVectorComponentLValue(RValue Src, // When the vector size is odd and .odd or .hi is used, the last element // of the Elts constant array will be one past the size of the vector. // Ignore the last element here, if it is greater than the mask size. - if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) + if ((unsigned)getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) NumSrcElts--; // modify when what gets shuffled in diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index ebff8e10c793..e7a5cfac4e7a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -185,10 +185,10 @@ class ComplexExprEmitter : public StmtVisitor { struct BinOpInfo { mlir::Location Loc; - mlir::Value LHS; - mlir::Value RHS; - QualType Ty; // Computation Type. - FPOptions FPFeatures; + mlir::Value LHS{}; + mlir::Value RHS{}; + QualType Ty{}; // Computation Type. + FPOptions FPFeatures{}; }; BinOpInfo buildBinOps(const BinaryOperator *E, @@ -752,8 +752,6 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) { return mlir::cir::ComplexRangeKind::Basic; case LangOptions::CX_None: return mlir::cir::ComplexRangeKind::None; - default: - llvm_unreachable("unknown ComplexRangeKind"); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 4a5a9b698695..721767b02118 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1804,7 +1804,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, unsigned NumElements = Value.getVectorLength(); SmallVector Elts; Elts.reserve(NumElements); - for (int i = 0; i < NumElements; ++i) { + for (unsigned i = 0; i < NumElements; ++i) { auto C = tryEmitPrivateForMemory(Value.getVectorElt(i), ElementType); if (!C) return {}; diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 5161a06e5b9f..8f9cbec3d882 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1154,6 +1154,8 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) { #include "clang/Basic/PPCTypes.def" #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/RISCVVTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id: +#include "clang/Basic/AMDGPUTypes.def" case BuiltinType::ShortAccum: case BuiltinType::Accum: case BuiltinType::LongAccum: diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 2b6f4c49c655..543b744cc5d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -102,6 +102,7 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, case Stmt::OMPScopeDirectiveClass: llvm_unreachable("NYI"); case Stmt::OpenACCComputeConstructClass: + case Stmt::OpenACCLoopConstructClass: case Stmt::OMPErrorDirectiveClass: case Stmt::NoStmtClass: case Stmt::CXXCatchStmtClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index e2ce60e25f07..66a0c7c6d3ac 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -6,8 +6,8 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" -#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" @@ -24,7 +24,8 @@ using namespace clang; using namespace cir; -mlir::cir::CallingConv CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { +mlir::cir::CallingConv +CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { switch (CC) { case CC_C: return mlir::cir::CallingConv::C; @@ -389,7 +390,6 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case Type::Builtin: { switch (cast(Ty)->getKind()) { - case BuiltinType::WasmExternRef: case BuiltinType::SveBoolx2: case BuiltinType::SveBoolx4: case BuiltinType::SveCount: @@ -579,6 +579,15 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { assert(0 && "not implemented"); break; } +#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ + case BuiltinType::Id: { \ + llvm_unreachable("NYI"); \ + } break; +#include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \ + case BuiltinType::Id: \ + llvm_unreachable("NYI"); +#include "clang/Basic/AMDGPUTypes.def" case BuiltinType::Dependent: #define BUILTIN_TYPE(Id, SingletonId) #define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 08a56d03c604..136ea500d014 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -32,16 +32,6 @@ using ABIArgInfo = ::cir::ABIArgInfo; namespace mlir { namespace cir { -namespace { - -// FIXME(cir): Create a custom rewriter class to abstract this away. -Value createBitcast(Value Src, Type Ty, LowerFunction &LF) { - return LF.getRewriter().create(Src.getLoc(), Ty, CastKind::bitcast, - Src); -} - -} // namespace - // FIXME(cir): Pass SrcFn and NewFn around instead of having then as attributes. LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, FuncOp srcFn, FuncOp newFn) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ea7a25b6df15..88cbe2b8d4e3 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -474,9 +474,8 @@ mlir::LLVM::CConv convertCallingConv(mlir::cir::CallingConv callinvConv) { return LLVM::SPIR_KERNEL; case CIR::SpirFunction: return LLVM::SPIR_FUNC; - default: - llvm_unreachable("Unknown calling convention"); } + llvm_unreachable("Unknown calling convention"); } class CIRCopyOpLowering : public mlir::OpConversionPattern { @@ -3644,8 +3643,8 @@ class CIRCatchParamOpLowering mlir::OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint( op->getParentOfType()); - auto catchFn = rewriter.create( - op.getLoc(), cxaBeginCatch, catchFnTy); + rewriter.create(op.getLoc(), cxaBeginCatch, + catchFnTy); } rewriter.replaceOpWithNewOp( op, mlir::TypeRange{llvmPtrTy}, cxaBeginCatch, @@ -3663,8 +3662,8 @@ class CIRCatchParamOpLowering mlir::OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint( op->getParentOfType()); - auto catchFn = rewriter.create( - op.getLoc(), cxaEndCatch, catchFnTy); + rewriter.create(op.getLoc(), cxaEndCatch, + catchFnTy); } rewriter.create(op.getLoc(), mlir::TypeRange{}, cxaEndCatch, mlir::ValueRange{}); From 34a7650de65ac39486f08ec8da07ba4b434d3b79 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 9 Aug 2024 06:21:17 +0800 Subject: [PATCH 1748/2301] [CIR] Add support for complex cast operations (#758) This PR adds support for complex cast operations. It adds the following new cast kind variants to the `cir.cast` operation: - `float_to_complex`, - `int_to_complex`, - `float_complex_to_real`, - `int_complex_to_real`, - `float_complex_to_bool`, - `int_complex_to_bool`, - `float_complex`, - `float_complex_to_int_complex`, - `int_complex`, and - `int_complex_to_float_complex`. CIRGen and LLVM IR support for these new cast variants are also included. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 27 ++- clang/include/clang/CIR/Dialect/IR/CIROps.td | 28 ++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 46 ---- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 47 +++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 36 ++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 112 +++++++++- .../Dialect/Transforms/LoweringPrepare.cpp | 140 +++++++++++- clang/test/CIR/CodeGen/complex-cast.c | 205 ++++++++++++++++++ 8 files changed, 575 insertions(+), 66 deletions(-) create mode 100644 clang/test/CIR/CodeGen/complex-cast.c diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 4208aa5aaed3..b327645d1958 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -59,6 +59,22 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return create(loc, attr.getType(), attr); } + // Creates constant null value for integral type ty. + mlir::cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc) { + return create(loc, ty, getZeroInitAttr(ty)); + } + + mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { + return create(loc, getBoolTy(), + getCIRBoolAttr(state)); + } + mlir::cir::ConstantOp getFalse(mlir::Location loc) { + return getBool(false, loc); + } + mlir::cir::ConstantOp getTrue(mlir::Location loc) { + return getBool(true, loc); + } + mlir::cir::BoolType getBoolTy() { return ::mlir::cir::BoolType::get(getContext()); } @@ -110,12 +126,16 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return mlir::cir::FPAttr::getZero(fltType); if (auto fltType = mlir::dyn_cast(ty)) return mlir::cir::FPAttr::getZero(fltType); + if (auto fltType = mlir::dyn_cast(ty)) + return mlir::cir::FPAttr::getZero(fltType); + if (auto fltType = mlir::dyn_cast(ty)) + return mlir::cir::FPAttr::getZero(fltType); if (auto complexType = mlir::dyn_cast(ty)) return getZeroAttr(complexType); if (auto arrTy = mlir::dyn_cast(ty)) return getZeroAttr(arrTy); if (auto ptrTy = mlir::dyn_cast(ty)) - return getConstPtrAttr(ptrTy, 0); + return getConstNullPtrAttr(ptrTy); if (auto structTy = mlir::dyn_cast(ty)) return getZeroAttr(structTy); if (mlir::isa(ty)) { @@ -548,6 +568,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { getContext(), mlir::cast(t), val); } + mlir::TypedAttr getConstNullPtrAttr(mlir::Type t) { + assert(mlir::isa(t) && "expected cir.ptr"); + return getConstPtrAttr(t, 0); + } + // Creates constant nullptr for pointer type ty. mlir::cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { assert(!MissingFeatures::targetCodeGenInfoGetNullPointer()); diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 6d4abb8897c8..da3f985631e9 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -71,6 +71,18 @@ def CK_BooleanToIntegral : I32EnumAttrCase<"bool_to_int", 11>; def CK_IntegralToFloat : I32EnumAttrCase<"int_to_float", 12>; def CK_BooleanToFloat : I32EnumAttrCase<"bool_to_float", 13>; def CK_AddressSpaceConversion : I32EnumAttrCase<"address_space", 14>; +def CK_FloatToComplex : I32EnumAttrCase<"float_to_complex", 15>; +def CK_IntegralToComplex : I32EnumAttrCase<"int_to_complex", 16>; +def CK_FloatComplexToReal : I32EnumAttrCase<"float_complex_to_real", 17>; +def CK_IntegralComplexToReal : I32EnumAttrCase<"int_complex_to_real", 18>; +def CK_FloatComplexToBoolean : I32EnumAttrCase<"float_complex_to_bool", 19>; +def CK_IntegralComplexToBoolean : I32EnumAttrCase<"int_complex_to_bool", 20>; +def CK_FloatComplexCast : I32EnumAttrCase<"float_complex", 21>; +def CK_FloatComplexToIntegralComplex + : I32EnumAttrCase<"float_complex_to_int_complex", 22>; +def CK_IntegralComplexCast : I32EnumAttrCase<"int_complex", 23>; +def CK_IntegralComplexToFloatComplex + : I32EnumAttrCase<"int_complex_to_float_complex", 24>; def CastKind : I32EnumAttr< "CastKind", @@ -79,7 +91,11 @@ def CastKind : I32EnumAttr< CK_BitCast, CK_FloatingCast, CK_PtrToBoolean, CK_FloatToIntegral, CK_IntegralToPointer, CK_PointerToIntegral, CK_FloatToBoolean, CK_BooleanToIntegral, CK_IntegralToFloat, CK_BooleanToFloat, - CK_AddressSpaceConversion]> { + CK_AddressSpaceConversion, CK_FloatToComplex, CK_IntegralToComplex, + CK_FloatComplexToReal, CK_IntegralComplexToReal, CK_FloatComplexToBoolean, + CK_IntegralComplexToBoolean, CK_FloatComplexCast, + CK_FloatComplexToIntegralComplex, CK_IntegralComplexCast, + CK_IntegralComplexToFloatComplex]> { let cppNamespace = "::mlir::cir"; } @@ -104,6 +120,16 @@ def CastOp : CIR_Op<"cast", - `bool_to_int` - `bool_to_float` - `address_space` + - `float_to_complex` + - `int_to_complex` + - `float_complex_to_real` + - `int_complex_to_real` + - `float_complex_to_bool` + - `int_complex_to_bool` + - `float_complex` + - `float_complex_to_int_complex` + - `int_complex` + - `int_complex_to_float_complex` This is effectively a subset of the rules from `llvm-project/clang/include/clang/AST/OperationKinds.def`; but note that some diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 30bf342ba5e2..16ce15f2624e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -136,11 +136,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::GlobalViewAttr::get(type, symbol, indices); } - mlir::TypedAttr getConstNullPtrAttr(mlir::Type t) { - assert(mlir::isa(t) && "expected cir.ptr"); - return getConstPtrAttr(t, 0); - } - mlir::Attribute getString(llvm::StringRef str, mlir::Type eltTy, unsigned size = 0) { unsigned finalSize = size ? size : str.size(); @@ -246,31 +241,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::DataMemberAttr::get(getContext(), ty, std::nullopt); } - mlir::TypedAttr getZeroInitAttr(mlir::Type ty) { - if (mlir::isa(ty)) - return mlir::cir::IntAttr::get(ty, 0); - if (auto fltType = mlir::dyn_cast(ty)) - return mlir::cir::FPAttr::getZero(fltType); - if (auto fltType = mlir::dyn_cast(ty)) - return mlir::cir::FPAttr::getZero(fltType); - if (auto fltType = mlir::dyn_cast(ty)) - return mlir::cir::FPAttr::getZero(fltType); - if (auto fltType = mlir::dyn_cast(ty)) - return mlir::cir::FPAttr::getZero(fltType); - if (auto complexType = mlir::dyn_cast(ty)) - return getZeroAttr(complexType); - if (auto arrTy = mlir::dyn_cast(ty)) - return getZeroAttr(arrTy); - if (auto ptrTy = mlir::dyn_cast(ty)) - return getConstNullPtrAttr(ptrTy); - if (auto structTy = mlir::dyn_cast(ty)) - return getZeroAttr(structTy); - if (mlir::isa(ty)) { - return getCIRBoolAttr(false); - } - llvm_unreachable("Zero initializer for given type is NYI"); - } - // TODO(cir): Once we have CIR float types, replace this by something like a // NullableValueInterface to allow for type-independent queries. bool isNullValue(mlir::Attribute attr) const { @@ -554,28 +524,12 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::IntAttr::get(t, C)); } - mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { - return create(loc, getBoolTy(), - getCIRBoolAttr(state)); - } - mlir::cir::ConstantOp getFalse(mlir::Location loc) { - return getBool(false, loc); - } - mlir::cir::ConstantOp getTrue(mlir::Location loc) { - return getBool(true, loc); - } - /// Create constant nullptr for pointer-to-data-member type ty. mlir::cir::ConstantOp getNullDataMemberPtr(mlir::cir::DataMemberType ty, mlir::Location loc) { return create(loc, ty, getNullDataMemberAttr(ty)); } - // Creates constant null value for integral type ty. - mlir::cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc) { - return create(loc, ty, getZeroInitAttr(ty)); - } - mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { // TODO: dispatch creation for primitive types. assert((mlir::isa(ty) || diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index e7a5cfac4e7a..8880dfd37cb1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -372,20 +372,43 @@ mlir::Value ComplexExprEmitter::buildComplexToComplexCast(mlir::Value Val, QualType SrcType, QualType DestType, SourceLocation Loc) { - // Get the src/dest element type. - SrcType = SrcType->castAs()->getElementType(); - DestType = DestType->castAs()->getElementType(); if (SrcType == DestType) return Val; - llvm_unreachable("complex cast is NYI"); + // Get the src/dest element type. + QualType SrcElemTy = SrcType->castAs()->getElementType(); + QualType DestElemTy = DestType->castAs()->getElementType(); + + mlir::cir::CastKind CastOpKind; + if (SrcElemTy->isFloatingType() && DestElemTy->isFloatingType()) + CastOpKind = mlir::cir::CastKind::float_complex; + else if (SrcElemTy->isFloatingType() && DestElemTy->isIntegerType()) + CastOpKind = mlir::cir::CastKind::float_complex_to_int_complex; + else if (SrcElemTy->isIntegerType() && DestElemTy->isFloatingType()) + CastOpKind = mlir::cir::CastKind::int_complex_to_float_complex; + else if (SrcElemTy->isIntegerType() && DestElemTy->isIntegerType()) + CastOpKind = mlir::cir::CastKind::int_complex; + else + llvm_unreachable("unexpected src type or dest type"); + + return Builder.createCast(CGF.getLoc(Loc), CastOpKind, Val, + CGF.ConvertType(DestType)); } mlir::Value ComplexExprEmitter::buildScalarToComplexCast(mlir::Value Val, QualType SrcType, QualType DestType, SourceLocation Loc) { - llvm_unreachable("complex cast is NYI"); + mlir::cir::CastKind CastOpKind; + if (SrcType->isFloatingType()) + CastOpKind = mlir::cir::CastKind::float_to_complex; + else if (SrcType->isIntegerType()) + CastOpKind = mlir::cir::CastKind::int_to_complex; + else + llvm_unreachable("unexpected src type"); + + return Builder.createCast(CGF.getLoc(Loc), CastOpKind, Val, + CGF.ConvertType(DestType)); } mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, @@ -467,14 +490,20 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, llvm_unreachable("invalid cast kind for complex value"); case CK_FloatingRealToComplex: - case CK_IntegralRealToComplex: - llvm_unreachable("NYI"); + case CK_IntegralRealToComplex: { + assert(!MissingFeatures::CGFPOptionsRAII()); + return buildScalarToComplexCast(CGF.buildScalarExpr(Op), Op->getType(), + DestTy, Op->getExprLoc()); + } case CK_FloatingComplexCast: case CK_FloatingComplexToIntegralComplex: case CK_IntegralComplexCast: - case CK_IntegralComplexToFloatingComplex: - llvm_unreachable("NYI"); + case CK_IntegralComplexToFloatingComplex: { + assert(!MissingFeatures::CGFPOptionsRAII()); + return buildComplexToComplexCast(Visit(Op), Op->getType(), DestTy, + Op->getExprLoc()); + } } llvm_unreachable("unknown cast resulting in complex value"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 9dcda33fa134..940b3800ec14 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -113,6 +113,9 @@ class ScalarExprEmitter : public StmtVisitor { return CGF.buildCheckedLValue(E, TCK); } + mlir::Value buildComplexToScalarConversion(mlir::Location Loc, mlir::Value V, + CastKind Kind, QualType DestTy); + /// Emit a value that corresponds to null for the given type. mlir::Value buildNullValue(QualType Ty, mlir::Location loc); @@ -1797,13 +1800,13 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_MemberPointerToBoolean: llvm_unreachable("NYI"); case CK_FloatingComplexToReal: - llvm_unreachable("NYI"); case CK_IntegralComplexToReal: - llvm_unreachable("NYI"); case CK_FloatingComplexToBoolean: - llvm_unreachable("NYI"); - case CK_IntegralComplexToBoolean: - llvm_unreachable("NYI"); + case CK_IntegralComplexToBoolean: { + mlir::Value V = CGF.buildComplexExpr(E); + return buildComplexToScalarConversion(CGF.getLoc(CE->getExprLoc()), V, Kind, + DestTy); + } case CK_ZeroToOCLOpaqueType: llvm_unreachable("NYI"); case CK_IntToOCLSampler: @@ -2161,6 +2164,29 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( return LHSLV; } +mlir::Value ScalarExprEmitter::buildComplexToScalarConversion( + mlir::Location Loc, mlir::Value V, CastKind Kind, QualType DestTy) { + mlir::cir::CastKind CastOpKind; + switch (Kind) { + case CK_FloatingComplexToReal: + CastOpKind = mlir::cir::CastKind::float_complex_to_real; + break; + case CK_IntegralComplexToReal: + CastOpKind = mlir::cir::CastKind::int_complex_to_real; + break; + case CK_FloatingComplexToBoolean: + CastOpKind = mlir::cir::CastKind::float_complex_to_bool; + break; + case CK_IntegralComplexToBoolean: + CastOpKind = mlir::cir::CastKind::int_complex_to_bool; + break; + default: + llvm_unreachable("invalid complex-to-scalar cast kind"); + } + + return Builder.createCast(Loc, CastOpKind, V, CGF.ConvertType(DestTy)); +} + mlir::Value ScalarExprEmitter::buildNullValue(QualType Ty, mlir::Location loc) { return CGF.buildFromMemory(CGF.CGM.buildNullConstant(Ty, loc), Ty); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 383a1314bd3b..a41abb1caa1d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -523,6 +523,114 @@ LogicalResult CastOp::verify() { return emitOpError() << "requires two types differ in addrspace only"; return success(); } + case cir::CastKind::float_to_complex: { + if (!mlir::isa(srcType)) + return emitOpError() << "requires !cir.float type for source"; + auto resComplexTy = mlir::dyn_cast(resType); + if (!resComplexTy) + return emitOpError() << "requires !cir.complex type for result"; + if (srcType != resComplexTy.getElementTy()) + return emitOpError() << "requires source type match result element type"; + return success(); + } + case cir::CastKind::int_to_complex: { + if (!mlir::isa(srcType)) + return emitOpError() << "requires !cir.int type for source"; + auto resComplexTy = mlir::dyn_cast(resType); + if (!resComplexTy) + return emitOpError() << "requires !cir.complex type for result"; + if (srcType != resComplexTy.getElementTy()) + return emitOpError() << "requires source type match result element type"; + return success(); + } + case cir::CastKind::float_complex_to_real: { + auto srcComplexTy = mlir::dyn_cast(srcType); + if (!srcComplexTy) + return emitOpError() << "requires !cir.complex type for source"; + if (!mlir::isa(resType)) + return emitOpError() << "requires !cir.float type for result"; + if (srcComplexTy.getElementTy() != resType) + return emitOpError() << "requires source element type match result type"; + return success(); + } + case cir::CastKind::int_complex_to_real: { + auto srcComplexTy = mlir::dyn_cast(srcType); + if (!srcComplexTy) + return emitOpError() << "requires !cir.complex type for source"; + if (!mlir::isa(resType)) + return emitOpError() << "requires !cir.int type for result"; + if (srcComplexTy.getElementTy() != resType) + return emitOpError() << "requires source element type match result type"; + return success(); + } + case cir::CastKind::float_complex_to_bool: { + auto srcComplexTy = mlir::dyn_cast(srcType); + if (!srcComplexTy || + !mlir::isa(srcComplexTy.getElementTy())) + return emitOpError() + << "requires !cir.complex type for source"; + if (!mlir::isa(resType)) + return emitOpError() << "requires !cir.bool type for result"; + return success(); + } + case cir::CastKind::int_complex_to_bool: { + auto srcComplexTy = mlir::dyn_cast(srcType); + if (!srcComplexTy || + !mlir::isa(srcComplexTy.getElementTy())) + return emitOpError() + << "requires !cir.complex type for source"; + if (!mlir::isa(resType)) + return emitOpError() << "requires !cir.bool type for result"; + return success(); + } + case cir::CastKind::float_complex: { + auto srcComplexTy = mlir::dyn_cast(srcType); + if (!srcComplexTy || + !mlir::isa(srcComplexTy.getElementTy())) + return emitOpError() + << "requires !cir.complex type for source"; + auto resComplexTy = mlir::dyn_cast(resType); + if (!resComplexTy || + !mlir::isa(resComplexTy.getElementTy())) + return emitOpError() + << "requires !cir.complex type for result"; + return success(); + } + case cir::CastKind::float_complex_to_int_complex: { + auto srcComplexTy = mlir::dyn_cast(srcType); + if (!srcComplexTy || + !mlir::isa(srcComplexTy.getElementTy())) + return emitOpError() + << "requires !cir.complex type for source"; + auto resComplexTy = mlir::dyn_cast(resType); + if (!resComplexTy || + !mlir::isa(resComplexTy.getElementTy())) + return emitOpError() << "requires !cir.complex type for result"; + return success(); + } + case cir::CastKind::int_complex: { + auto srcComplexTy = mlir::dyn_cast(srcType); + if (!srcComplexTy || + !mlir::isa(srcComplexTy.getElementTy())) + return emitOpError() << "requires !cir.complex type for source"; + auto resComplexTy = mlir::dyn_cast(resType); + if (!resComplexTy || + !mlir::isa(resComplexTy.getElementTy())) + return emitOpError() << "requires !cir.complex type for result"; + return success(); + } + case cir::CastKind::int_complex_to_float_complex: { + auto srcComplexTy = mlir::dyn_cast(srcType); + if (!srcComplexTy || + !mlir::isa(srcComplexTy.getElementTy())) + return emitOpError() << "requires !cir.complex type for source"; + auto resComplexTy = mlir::dyn_cast(resType); + if (!resComplexTy || + !mlir::isa(resComplexTy.getElementTy())) + return emitOpError() + << "requires !cir.complex type for result"; + return success(); + } } llvm_unreachable("Unknown CastOp kind?"); @@ -577,7 +685,9 @@ OpFoldResult CastOp::fold(FoldAdaptor adaptor) { return {}; } case mlir::cir::CastKind::bitcast: - case mlir::cir::CastKind::address_space: { + case mlir::cir::CastKind::address_space: + case mlir::cir::CastKind::float_complex: + case mlir::cir::CastKind::int_complex: { return getSrc(); } default: diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 201895abfaea..c4244b1b2e8f 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -73,6 +73,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { void runOnOp(Operation *op); void lowerUnaryOp(UnaryOp op); void lowerBinOp(BinOp op); + void lowerCastOp(CastOp op); void lowerComplexBinOp(ComplexBinOp op); void lowerThreeWayCmpOp(CmpThreeWayOp op); void lowerVAArgOp(VAArgOp op); @@ -423,6 +424,137 @@ void LoweringPreparePass::lowerBinOp(BinOp op) { op.erase(); } +static mlir::Value lowerScalarToComplexCast(MLIRContext &ctx, CastOp op) { + CIRBaseBuilderTy builder(ctx); + builder.setInsertionPoint(op); + + auto src = op.getSrc(); + auto imag = builder.getNullValue(src.getType(), op.getLoc()); + return builder.createComplexCreate(op.getLoc(), src, imag); +} + +static mlir::Value lowerComplexToScalarCast(MLIRContext &ctx, CastOp op) { + CIRBaseBuilderTy builder(ctx); + builder.setInsertionPoint(op); + + auto src = op.getSrc(); + + if (!mlir::isa(op.getType())) + return builder.createComplexReal(op.getLoc(), src); + + // Complex cast to bool: (bool)(a+bi) => (bool)a || (bool)b + auto srcReal = builder.createComplexReal(op.getLoc(), src); + auto srcImag = builder.createComplexImag(op.getLoc(), src); + + mlir::cir::CastKind elemToBoolKind; + if (op.getKind() == mlir::cir::CastKind::float_complex_to_bool) + elemToBoolKind = mlir::cir::CastKind::float_to_bool; + else if (op.getKind() == mlir::cir::CastKind::int_complex_to_bool) + elemToBoolKind = mlir::cir::CastKind::int_to_bool; + else + llvm_unreachable("invalid complex to bool cast kind"); + + auto boolTy = builder.getBoolTy(); + auto srcRealToBool = + builder.createCast(op.getLoc(), elemToBoolKind, srcReal, boolTy); + auto srcImagToBool = + builder.createCast(op.getLoc(), elemToBoolKind, srcImag, boolTy); + + // srcRealToBool || srcImagToBool + return builder + .create( + op.getLoc(), srcRealToBool, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield(op.getLoc(), + builder.getTrue(op.getLoc()).getResult()); + }, + [&](mlir::OpBuilder &, mlir::Location) { + auto inner = + builder + .create( + op.getLoc(), srcImagToBool, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield( + op.getLoc(), + builder.getTrue(op.getLoc()).getResult()); + }, + [&](mlir::OpBuilder &, mlir::Location) { + builder.createYield( + op.getLoc(), + builder.getFalse(op.getLoc()).getResult()); + }) + .getResult(); + builder.createYield(op.getLoc(), inner); + }) + .getResult(); +} + +static mlir::Value lowerComplexToComplexCast(MLIRContext &ctx, CastOp op) { + CIRBaseBuilderTy builder(ctx); + builder.setInsertionPoint(op); + + auto src = op.getSrc(); + auto dstComplexElemTy = + mlir::cast(op.getType()).getElementTy(); + + auto srcReal = builder.createComplexReal(op.getLoc(), src); + auto srcImag = builder.createComplexReal(op.getLoc(), src); + + mlir::cir::CastKind scalarCastKind; + switch (op.getKind()) { + case mlir::cir::CastKind::float_complex: + scalarCastKind = mlir::cir::CastKind::floating; + break; + case mlir::cir::CastKind::float_complex_to_int_complex: + scalarCastKind = mlir::cir::CastKind::float_to_int; + break; + case mlir::cir::CastKind::int_complex: + scalarCastKind = mlir::cir::CastKind::integral; + break; + case mlir::cir::CastKind::int_complex_to_float_complex: + scalarCastKind = mlir::cir::CastKind::int_to_float; + break; + default: + llvm_unreachable("invalid complex to complex cast kind"); + } + + auto dstReal = builder.createCast(op.getLoc(), scalarCastKind, srcReal, + dstComplexElemTy); + auto dstImag = builder.createCast(op.getLoc(), scalarCastKind, srcImag, + dstComplexElemTy); + return builder.createComplexCreate(op.getLoc(), dstReal, dstImag); +} + +void LoweringPreparePass::lowerCastOp(CastOp op) { + mlir::Value loweredValue; + switch (op.getKind()) { + case mlir::cir::CastKind::float_to_complex: + case mlir::cir::CastKind::int_to_complex: + loweredValue = lowerScalarToComplexCast(getContext(), op); + break; + + case mlir::cir::CastKind::float_complex_to_real: + case mlir::cir::CastKind::int_complex_to_real: + case mlir::cir::CastKind::float_complex_to_bool: + case mlir::cir::CastKind::int_complex_to_bool: + loweredValue = lowerComplexToScalarCast(getContext(), op); + break; + + case mlir::cir::CastKind::float_complex: + case mlir::cir::CastKind::float_complex_to_int_complex: + case mlir::cir::CastKind::int_complex: + case mlir::cir::CastKind::int_complex_to_float_complex: + loweredValue = lowerComplexToComplexCast(getContext(), op); + break; + + default: + return; + } + + op.replaceAllUsesWith(loweredValue); + op.erase(); +} + static mlir::Value buildComplexBinOpLibCall( LoweringPreparePass &pass, CIRBaseBuilderTy &builder, llvm::StringRef (*libFuncNameGetter)(llvm::APFloat::Semantics), @@ -988,6 +1120,8 @@ void LoweringPreparePass::runOnOp(Operation *op) { lowerUnaryOp(unary); } else if (auto bin = dyn_cast(op)) { lowerBinOp(bin); + } else if (auto cast = dyn_cast(op)) { + lowerCastOp(cast); } else if (auto complexBin = dyn_cast(op)) { lowerComplexBinOp(complexBin); } else if (auto threeWayCmp = dyn_cast(op)) { @@ -1027,9 +1161,9 @@ void LoweringPreparePass::runOnOperation() { SmallVector opsToTransform; op->walk([&](Operation *op) { - if (isa(op)) + if (isa(op)) opsToTransform.push_back(op); }); diff --git a/clang/test/CIR/CodeGen/complex-cast.c b/clang/test/CIR/CodeGen/complex-cast.c new file mode 100644 index 000000000000..dcff8b545b3e --- /dev/null +++ b/clang/test/CIR/CodeGen/complex-cast.c @@ -0,0 +1,205 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare -o %t.cir %s 2>&1 | FileCheck --check-prefixes=CIR-BEFORE,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare -o %t.cir %s 2>&1 | FileCheck --check-prefixes=CIR-AFTER,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll --check-prefixes=LLVM,CHECK %s + +#include + +volatile double _Complex cd; +volatile float _Complex cf; +volatile int _Complex ci; +volatile short _Complex cs; +volatile double sd; +volatile int si; +volatile bool b; + +void scalar_to_complex() { + cd = sd; + ci = si; + cd = si; + ci = sd; +} + +// CHECK-LABEL: @scalar_to_complex() + +// CIR-BEFORE: %{{.+}} = cir.cast(float_to_complex, %{{.+}} : !cir.double), !cir.complex + +// CIR-AFTER: %[[#REAL:]] = cir.load volatile %{{.+}} : !cir.ptr, !cir.double +// CIR-AFTER-NEXT: %[[#IMAG:]] = cir.const #cir.fp<0.000000e+00> : !cir.double +// CIR-AFTER-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !cir.double -> !cir.complex + +// CIR-BEFORE: %{{.+}} = cir.cast(int_to_complex, %{{.+}} : !s32i), !cir.complex + +// CIR-AFTER: %[[#REAL:]] = cir.load volatile %{{.+}} : !cir.ptr, !s32i +// CIR-AFTER-NEXT: %[[#IMAG:]] = cir.const #cir.int<0> : !s32i +// CIR-AFTER-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex + +// CIR-BEFORE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.double +// CIR-BEFORE-NEXT: %{{.+}} = cir.cast(float_to_complex, %[[#A]] : !cir.double), !cir.complex + +// CIR-AFTER: %[[#A:]] = cir.load volatile %{{.+}} : !cir.ptr, !s32i +// CIR-AFTER-NEXT: %[[#REAL:]] = cir.cast(int_to_float, %[[#A]] : !s32i), !cir.double +// CIR-AFTER-NEXT: %[[#IMAG:]] = cir.const #cir.fp<0.000000e+00> : !cir.double +// CIR-AFTER-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !cir.double -> !cir.complex + +// CIR-BEFORE: %[[#A:]] = cir.cast(float_to_int, %{{.+}} : !cir.double), !s32i +// CIR-BEFORE-NEXT: %{{.+}} = cir.cast(int_to_complex, %[[#A]] : !s32i), !cir.complex + +// CIR-AFTER: %[[#A:]] = cir.load volatile %{{.+}} : !cir.ptr, !cir.double +// CIR-AFTER-NEXT: %[[#REAL:]] = cir.cast(float_to_int, %[[#A]] : !cir.double), !s32i +// CIR-AFTER-NEXT: %[[#IMAG:]] = cir.const #cir.int<0> : !s32i +// CIR-AFTER-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex + +// LLVM: %[[#REAL:]] = load volatile double, ptr @sd, align 8 +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#REAL]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double 0.000000e+00, 1 + +// LLVM: %[[#REAL:]] = load volatile i32, ptr @si, align 4 +// LLVM-NEXT: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %[[#REAL]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 0, 1 + +// LLVM: %[[#A:]] = load volatile i32, ptr @si, align 4 +// LLVM-NEXT: %[[#REAL:]] = sitofp i32 %[[#A]] to double +// LLVM-NEXT: %[[#B:]] = insertvalue { double, double } undef, double %[[#REAL]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#B]], double 0.000000e+00, 1 + +// LLVM: %[[#A:]] = load volatile double, ptr @sd, align 8 +// LLVM-NEXT: %[[#REAL:]] = fptosi double %[[#A]] to i32 +// LLVM-NEXT: %[[#B:]] = insertvalue { i32, i32 } undef, i32 %[[#REAL]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#B]], i32 0, 1 + +// CHECK: } + +void complex_to_scalar() { + sd = (double)cd; + si = (int)ci; + sd = (double)ci; + si = (int)cd; +} + +// CHECK-LABEL: @complex_to_scalar() + +// CIR-BEFORE: %{{.+}} = cir.cast(float_complex_to_real, %{{.+}} : !cir.complex), !cir.double + +// CIR-AFTER: %{{.+}} = cir.complex.real %{{.+}} : !cir.complex -> !cir.double + +// LLVM: %{{.+}} = extractvalue { double, double } %{{.+}}, 0 + +// CIR-BEFORE: %{{.+}} = cir.cast(int_complex_to_real, %{{.+}} : !cir.complex), !s32i + +// CIR-AFTER: %{{.+}} = cir.complex.real %{{.+}} : !cir.complex -> !s32i + +// LLVM: %{{.+}} = extractvalue { i32, i32 } %{{.+}}, 0 + +// CIR-BEFORE: %[[#A:]] = cir.cast(int_complex_to_real, %{{.+}} : !cir.complex), !s32i +// CIR-BEFORE-NEXT: %{{.+}} = cir.cast(int_to_float, %[[#A]] : !s32i), !cir.double + +// CIR-AFTER: %[[#A:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-AFTER-NEXT: %{{.+}} = cir.cast(int_to_float, %[[#A]] : !s32i), !cir.double + +// LLVM: %[[#A:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %{{.+}} = sitofp i32 %[[#A]] to double + +// CIR-BEFORE: %[[#A:]] = cir.cast(float_complex_to_real, %{{.+}} : !cir.complex), !cir.double +// CIR-BEFORE-NEXT: %{{.+}} = cir.cast(float_to_int, %[[#A]] : !cir.double), !s32i + +// CIR-AFTER: %[[#A:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-AFTER-NEXT: %{{.+}} = cir.cast(float_to_int, %[[#A]] : !cir.double), !s32i + +// LLVM: %[[#A:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %{{.+}} = fptosi double %[[#A]] to i32 + +// CHECK: } + +void complex_to_bool() { + b = (bool)cd; + b = (bool)ci; +} + +// CHECK-LABEL: @complex_to_bool() + +// CIR-BEFORE: %{{.+}} = cir.cast(float_complex_to_bool, %{{.+}} : !cir.complex), !cir.bool + +// CIR-AFTER: %[[#REAL:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-AFTER-NEXT: %[[#IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-AFTER-NEXT: %[[#RB:]] = cir.cast(float_to_bool, %[[#REAL]] : !cir.double), !cir.bool +// CIR-AFTER-NEXT: %[[#IB:]] = cir.cast(float_to_bool, %[[#IMAG]] : !cir.double), !cir.bool +// CIR-AFTER-NEXT: %{{.+}} = cir.ternary(%[[#RB]], true { +// CIR-AFTER-NEXT: %[[#A:]] = cir.const #true +// CIR-AFTER-NEXT: cir.yield %[[#A]] : !cir.bool +// CIR-AFTER-NEXT: }, false { +// CIR-AFTER-NEXT: %[[#B:]] = cir.ternary(%[[#IB]], true { +// CIR-AFTER-NEXT: %[[#C:]] = cir.const #true +// CIR-AFTER-NEXT: cir.yield %[[#C]] : !cir.bool +// CIR-AFTER-NEXT: }, false { +// CIR-AFTER-NEXT: %[[#D:]] = cir.const #false +// CIR-AFTER-NEXT: cir.yield %[[#D]] : !cir.bool +// CIR-AFTER-NEXT: }) : (!cir.bool) -> !cir.bool +// CIR-AFTER-NEXT: cir.yield %[[#B]] : !cir.bool +// CIR-AFTER-NEXT: }) : (!cir.bool) -> !cir.bool + +// LLVM: %[[#REAL:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#IMAG:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#RB:]] = fcmp une double %[[#REAL]], 0.000000e+00 +// LLVM-NEXT: %[[#IB:]] = fcmp une double %[[#IMAG]], 0.000000e+00 +// LLVM-NEXT: br i1 %[[#RB]], label %[[#LABEL_RB:]], label %[[#LABEL_RB_NOT:]] +// LLVM: [[#LABEL_RB]]: +// LLVM-NEXT: br label %[[#LABEL_EXIT:]] +// LLVM: [[#LABEL_RB_NOT]]: +// LLVM-NEXT: br i1 %[[#IB]], label %[[#LABEL_IB:]], label %[[#LABEL_IB_NOT:]] +// LLVM: [[#LABEL_IB]]: +// LLVM-NEXT: br label %[[#LABEL_A:]] +// LLVM: [[#LABEL_IB_NOT]]: +// LLVM-NEXT: br label %[[#LABEL_A]] +// LLVM: [[#LABEL_A]]: +// LLVM-NEXT: %[[#A:]] = phi i8 [ 0, %[[#LABEL_IB_NOT]] ], [ 1, %[[#LABEL_IB]] ] +// LLVM-NEXT: br label %[[#LABEL_B:]] +// LLVM: [[#LABEL_B]]: +// LLVM-NEXT: br label %[[#LABEL_EXIT]] +// LLVM: [[#LABEL_EXIT]]: +// LLVM-NEXT: %{{.+}} = phi i8 [ %[[#A]], %[[#LABEL_B]] ], [ 1, %[[#LABEL_RB]] ] +// LLVM-NEXT: br label %{{.+}} + +// CIR-BEFORE: %{{.+}} = cir.cast(int_complex_to_bool, %{{.+}} : !cir.complex), !cir.bool + +// CIR-AFTER: %[[#REAL:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-AFTER-NEXT: %[[#IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-AFTER-NEXT: %[[#RB:]] = cir.cast(int_to_bool, %[[#REAL]] : !s32i), !cir.bool +// CIR-AFTER-NEXT: %[[#IB:]] = cir.cast(int_to_bool, %[[#IMAG]] : !s32i), !cir.bool +// CIR-AFTER-NEXT: %{{.+}} = cir.ternary(%[[#RB]], true { +// CIR-AFTER-NEXT: %[[#A:]] = cir.const #true +// CIR-AFTER-NEXT: cir.yield %[[#A]] : !cir.bool +// CIR-AFTER-NEXT: }, false { +// CIR-AFTER-NEXT: %[[#B:]] = cir.ternary(%[[#IB]], true { +// CIR-AFTER-NEXT: %[[#C:]] = cir.const #true +// CIR-AFTER-NEXT: cir.yield %[[#C]] : !cir.bool +// CIR-AFTER-NEXT: }, false { +// CIR-AFTER-NEXT: %[[#D:]] = cir.const #false +// CIR-AFTER-NEXT: cir.yield %[[#D]] : !cir.bool +// CIR-AFTER-NEXT: }) : (!cir.bool) -> !cir.bool +// CIR-AFTER-NEXT: cir.yield %[[#B]] : !cir.bool +// CIR-AFTER-NEXT: }) : (!cir.bool) -> !cir.bool + +// LLVM: %[[#REAL:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#IMAG:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#RB:]] = icmp ne i32 %[[#REAL]], 0 +// LLVM-NEXT: %[[#IB:]] = icmp ne i32 %[[#IMAG]], 0 +// LLVM-NEXT: br i1 %[[#RB]], label %[[#LABEL_RB:]], label %[[#LABEL_RB_NOT:]] +// LLVM: [[#LABEL_RB]]: +// LLVM-NEXT: br label %[[#LABEL_EXIT:]] +// LLVM: [[#LABEL_RB_NOT]]: +// LLVM-NEXT: br i1 %[[#IB]], label %[[#LABEL_IB:]], label %[[#LABEL_IB_NOT:]] +// LLVM: [[#LABEL_IB]]: +// LLVM-NEXT: br label %[[#LABEL_A:]] +// LLVM: [[#LABEL_IB_NOT]]: +// LLVM-NEXT: br label %[[#LABEL_A]] +// LLVM: [[#LABEL_A]]: +// LLVM-NEXT: %[[#A:]] = phi i8 [ 0, %[[#LABEL_IB_NOT]] ], [ 1, %[[#LABEL_IB]] ] +// LLVM-NEXT: br label %[[#LABEL_B:]] +// LLVM: [[#LABEL_B]]: +// LLVM-NEXT: br label %[[#LABEL_EXIT]] +// LLVM: [[#LABEL_EXIT]]: +// LLVM-NEXT: %{{.+}} = phi i8 [ %[[#A]], %[[#LABEL_B]] ], [ 1, %[[#LABEL_RB]] ] +// LLVM-NEXT: br label %{{.+}} + +// CHECK: } From d011b24bc3feba69fad2f2d69a9154b8b11d1105 Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 9 Aug 2024 06:21:46 +0800 Subject: [PATCH 1749/2301] [CIR][CodeGen][LowerToLLVM] Emit OpenCL version metadata for SPIR-V target (#773) Similar to #767, this PR emit the module level OpenCL version metadata following the OG CodeGen skeleton. We use a full qualified `cir.cl.version` attribute on the module op to store the info in CIR. --- .../clang/CIR/Dialect/IR/CIROpenCLAttrs.td | 21 ++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 24 ++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 3 +++ .../Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 25 +++++++++++++++++++ .../test/CIR/CodeGen/OpenCL/opencl-version.cl | 16 ++++++++++++ 5 files changed, 89 insertions(+) create mode 100644 clang/test/CIR/CodeGen/OpenCL/opencl-version.cl diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td index 294f18c9414d..1a47186de581 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td @@ -147,4 +147,25 @@ def OpenCLKernelArgMetadataAttr let genVerifyDecl = 1; } +//===----------------------------------------------------------------------===// +// OpenCLVersionAttr +//===----------------------------------------------------------------------===// + +def OpenCLVersionAttr : CIR_Attr<"OpenCLVersion", "cl.version"> { + let summary = "OpenCL version"; + let parameters = (ins "int32_t":$major, "int32_t":$minor); + let description = [{ + Represents the version of OpenCL. + + Example: + ``` + // Module compiled from OpenCL 1.2. + module attributes {cir.cl.version = cir.cl.version<1, 2>} {} + // Module compiled from OpenCL 3.0. + module attributes {cir.cl.version = cir.cl.version<3, 0>} {} + ``` + }]; + let assemblyFormat = "`<` $major `,` $minor `>`"; +} + #endif // MLIR_CIR_DIALECT_CIR_OPENCL_ATTRS diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index fece8f9fa723..2d857dcfe3c8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2768,6 +2768,16 @@ void CIRGenModule::Release() { // TODO: buildModuleLinkOptions } + // Emit OpenCL specific module metadata: OpenCL/SPIR version. + if (langOpts.CUDAIsDevice && getTriple().isSPIRV()) + llvm_unreachable("CUDA SPIR-V NYI"); + if (langOpts.OpenCL) { + buildOpenCLMetadata(); + // Emit SPIR version. + if (getTriple().isSPIR()) + llvm_unreachable("SPIR target NYI"); + } + // TODO: FINISH THE REST OF THIS } @@ -3235,3 +3245,17 @@ void CIRGenModule::genKernelArgMetadata(mlir::cir::FuncOp Fn, llvm_unreachable("NYI HIPSaveKernelArgName"); } } + +void CIRGenModule::buildOpenCLMetadata() { + // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the + // opencl.ocl.version named metadata node. + // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL. + unsigned version = langOpts.getOpenCLCompatibleVersion(); + unsigned major = version / 100; + unsigned minor = (version % 100) / 10; + + auto clVersionAttr = + mlir::cir::OpenCLVersionAttr::get(builder.getContext(), major, minor); + + theModule->setAttr("cir.cl.version", clVersionAttr); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index fa6da9c9506d..895f6a54d403 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -702,6 +702,9 @@ class CIRGenModule : public CIRGenTypeCache { const FunctionDecl *FD = nullptr, CIRGenFunction *CGF = nullptr); + /// Emits OpenCL specific Metadata e.g. OpenCL version. + void buildOpenCLMetadata(); + private: // An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector MangledDeclNames; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index 08aeb902b78e..f8fbd3e0846f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -41,6 +41,8 @@ class CIRDialectLLVMIRTranslationInterface mlir::LLVM::ModuleTranslation &moduleTranslation) const override { if (auto func = dyn_cast(op)) { amendFunction(func, instructions, attribute, moduleTranslation); + } else if (auto mod = dyn_cast(op)) { + amendModule(mod, attribute, moduleTranslation); } return mlir::success(); } @@ -60,6 +62,29 @@ class CIRDialectLLVMIRTranslationInterface } private: + // Translate CIR's module attributes to LLVM's module metadata + void amendModule(mlir::ModuleOp module, mlir::NamedAttribute attribute, + mlir::LLVM::ModuleTranslation &moduleTranslation) const { + llvm::Module *llvmModule = moduleTranslation.getLLVMModule(); + llvm::LLVMContext &llvmContext = llvmModule->getContext(); + + if (auto openclVersionAttr = mlir::dyn_cast( + attribute.getValue())) { + auto *int32Ty = llvm::IntegerType::get(llvmContext, 32); + llvm::Metadata *oclVerElts[] = { + llvm::ConstantAsMetadata::get( + llvm::ConstantInt::get(int32Ty, openclVersionAttr.getMajor())), + llvm::ConstantAsMetadata::get( + llvm::ConstantInt::get(int32Ty, openclVersionAttr.getMinor()))}; + llvm::NamedMDNode *oclVerMD = + llvmModule->getOrInsertNamedMetadata("opencl.ocl.version"); + oclVerMD->addOperand(llvm::MDNode::get(llvmContext, oclVerElts)); + } + + // Drop ammended CIR attribute from LLVM op. + module->removeAttr(attribute.getName()); + } + // Translate CIR's extra function attributes to LLVM's function attributes. void amendFunction(mlir::LLVM::LLVMFuncOp func, llvm::ArrayRef instructions, diff --git a/clang/test/CIR/CodeGen/OpenCL/opencl-version.cl b/clang/test/CIR/CodeGen/OpenCL/opencl-version.cl new file mode 100644 index 000000000000..f0536a560b97 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/opencl-version.cl @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR-CL30 +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM-CL30 +// RUN: %clang_cc1 -cl-std=CL1.2 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR-CL12 +// RUN: %clang_cc1 -cl-std=CL1.2 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM-CL12 + +// CIR-CL30: module {{.*}} attributes {{{.*}}cir.cl.version = #cir.cl.version<3, 0> +// LLVM-CL30: !opencl.ocl.version = !{![[MDCL30:[0-9]+]]} +// LLVM-CL30: ![[MDCL30]] = !{i32 3, i32 0} + +// CIR-CL12: module {{.*}} attributes {{{.*}}cir.cl.version = #cir.cl.version<1, 2> +// LLVM-CL12: !opencl.ocl.version = !{![[MDCL12:[0-9]+]]} +// LLVM-CL12: ![[MDCL12]] = !{i32 1, i32 2} From 6a30e2746e6cfb508faba3845d0275c41326323b Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 9 Aug 2024 06:22:00 +0800 Subject: [PATCH 1750/2301] [CIR][Transforms] Add folders for complex operations (#775) This PR adds folders for `cir.complex.create`, `cir.complex.real`, and `cir.complex.imag`. This PR adds a new attribute `#cir.complex` that represents a constant complex value. Besides, the CIR dialect does not have a constant materializer yet; this PR adds it. Address #726 . --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 34 +++ .../clang/CIR/Dialect/IR/CIRDialect.td | 2 + clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 + clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 20 ++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 47 ++- .../CIR/Dialect/Transforms/CIRSimplify.cpp | 3 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 24 ++ clang/test/CIR/CodeGen/complex.c | 287 +++++++++++------- clang/test/CIR/Lowering/complex.cir | 15 + clang/test/CIR/Transforms/complex-fold.cir | 44 +++ 10 files changed, 373 insertions(+), 106 deletions(-) create mode 100644 clang/test/CIR/Lowering/complex.cir create mode 100644 clang/test/CIR/Transforms/complex-fold.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 4207a3b37532..3c28768a7fd7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -274,6 +274,40 @@ def FPAttr : CIR_Attr<"FP", "fp", [TypedAttrInterface]> { }]; } +//===----------------------------------------------------------------------===// +// ComplexAttr +//===----------------------------------------------------------------------===// + +def ComplexAttr : CIR_Attr<"Complex", "complex", [TypedAttrInterface]> { + let summary = "An attribute that contains a constant complex value"; + let description = [{ + The `#cir.complex` attribute contains a constant value of complex number + type. The `real` parameter gives the real part of the complex number and the + `imag` parameter gives the imaginary part of the complex number. + + The `real` and `imag` parameter must be either an IntAttr or an FPAttr that + contains values of the same CIR type. + }]; + + let parameters = (ins + AttributeSelfTypeParameter<"", "mlir::cir::ComplexType">:$type, + "mlir::TypedAttr":$real, "mlir::TypedAttr":$imag); + + let builders = [ + AttrBuilderWithInferredContext<(ins "mlir::cir::ComplexType":$type, + "mlir::TypedAttr":$real, + "mlir::TypedAttr":$imag), [{ + return $_get(type.getContext(), type, real, imag); + }]>, + ]; + + let genVerifyDecl = 1; + + let assemblyFormat = [{ + `<` qualified($real) `,` qualified($imag) `>` + }]; +} + //===----------------------------------------------------------------------===// // ConstPointerAttr //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td index df5dbe9872a6..fc87df7c86a2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -27,6 +27,8 @@ def CIR_Dialect : Dialect { let useDefaultAttributePrinterParser = 0; let useDefaultTypePrinterParser = 0; + let hasConstantMaterializer = 1; + let extraClassDeclaration = [{ // Names of CIR parameter attributes. diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index da3f985631e9..78da99a65bfe 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1243,6 +1243,7 @@ def ComplexCreateOp : CIR_Op<"complex.create", [Pure, SameTypeOperands]> { }]; let hasVerifier = 1; + let hasFolder = 1; } //===----------------------------------------------------------------------===// @@ -1271,6 +1272,7 @@ def ComplexRealOp : CIR_Op<"complex.real", [Pure]> { }]; let hasVerifier = 1; + let hasFolder = 1; } def ComplexImagOp : CIR_Op<"complex.imag", [Pure]> { @@ -1295,6 +1297,7 @@ def ComplexImagOp : CIR_Op<"complex.imag", [Pure]> { }]; let hasVerifier = 1; + let hasFolder = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 4cfa01b08687..7d5d401f8d0f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -365,6 +365,26 @@ LogicalResult cir::FPAttr::verify(function_ref emitError, return success(); } +//===----------------------------------------------------------------------===// +// ComplexAttr definitions +//===----------------------------------------------------------------------===// + +LogicalResult ComplexAttr::verify(function_ref emitError, + mlir::cir::ComplexType type, + mlir::TypedAttr real, mlir::TypedAttr imag) { + auto elemTy = type.getElementTy(); + if (real.getType() != elemTy) { + emitError() << "type of the real part does not match the complex type"; + return failure(); + } + if (imag.getType() != elemTy) { + emitError() << "type of the imaginary part does not match the complex type"; + return failure(); + } + + return success(); +} + //===----------------------------------------------------------------------===// // CmpThreeWayInfoAttr definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a41abb1caa1d..cf9ba8a6885d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -124,6 +124,14 @@ void cir::CIRDialect::initialize() { addInterfaces(); } +Operation *cir::CIRDialect::materializeConstant(mlir::OpBuilder &builder, + mlir::Attribute value, + mlir::Type type, + mlir::Location loc) { + return builder.create( + loc, type, mlir::cast(value)); +} + //===----------------------------------------------------------------------===// // Helpers //===----------------------------------------------------------------------===// @@ -344,7 +352,8 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return success(); } - if (mlir::isa(attrType)) { + if (mlir::isa( + attrType)) { auto at = cast(attrType); if (at.getType() != opType) { return op->emitOpError("result type (") @@ -748,6 +757,26 @@ LogicalResult ComplexCreateOp::verify() { return success(); } +OpFoldResult ComplexCreateOp::fold(FoldAdaptor adaptor) { + auto real = adaptor.getReal(); + auto imag = adaptor.getImag(); + + if (!real || !imag) + return nullptr; + + // When both of real and imag are constants, we can fold the operation into an + // `cir.const #cir.complex` operation. + + auto realAttr = mlir::cast(real); + auto imagAttr = mlir::cast(imag); + assert(realAttr.getType() == imagAttr.getType() && + "real part and imag part should be of the same type"); + + auto complexTy = + mlir::cir::ComplexType::get(getContext(), realAttr.getType()); + return mlir::cir::ComplexAttr::get(complexTy, realAttr, imagAttr); +} + //===----------------------------------------------------------------------===// // ComplexRealOp and ComplexImagOp //===----------------------------------------------------------------------===// @@ -760,6 +789,14 @@ LogicalResult ComplexRealOp::verify() { return success(); } +OpFoldResult ComplexRealOp::fold(FoldAdaptor adaptor) { + auto input = + mlir::cast_if_present(adaptor.getOperand()); + if (input) + return input.getReal(); + return nullptr; +} + LogicalResult ComplexImagOp::verify() { if (getType() != getOperand().getType().getElementTy()) { emitOpError() << "cir.complex.imag result type does not match operand type"; @@ -768,6 +805,14 @@ LogicalResult ComplexImagOp::verify() { return success(); } +OpFoldResult ComplexImagOp::fold(FoldAdaptor adaptor) { + auto input = + mlir::cast_if_present(adaptor.getOperand()); + if (input) + return input.getImag(); + return nullptr; +} + //===----------------------------------------------------------------------===// // ComplexRealPtrOp and ComplexImagPtrOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp index 7e8381bc78ac..07fe0e6b5594 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp @@ -146,7 +146,8 @@ void CIRSimplifyPass::runOnOperation() { getOperation()->walk([&](Operation *op) { // CastOp here is to perform a manual `fold` in // applyOpPatternsAndFold - if (isa(op)) + if (isa(op)) ops.push_back(op); }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 88cbe2b8d4e3..6ef01c493112 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1180,6 +1180,30 @@ class CIRConstantLowering attr = rewriter.getFloatAttr( typeConverter->convertType(op.getType()), mlir::cast(op.getValue()).getValue()); + } else if (auto complexTy = + mlir::dyn_cast(op.getType())) { + auto complexAttr = mlir::cast(op.getValue()); + auto complexElemTy = complexTy.getElementTy(); + auto complexElemLLVMTy = typeConverter->convertType(complexElemTy); + + mlir::Attribute components[2]; + if (mlir::isa(complexElemTy)) { + components[0] = rewriter.getIntegerAttr( + complexElemLLVMTy, + mlir::cast(complexAttr.getReal()).getValue()); + components[1] = rewriter.getIntegerAttr( + complexElemLLVMTy, + mlir::cast(complexAttr.getImag()).getValue()); + } else { + components[0] = rewriter.getFloatAttr( + complexElemLLVMTy, + mlir::cast(complexAttr.getReal()).getValue()); + components[1] = rewriter.getFloatAttr( + complexElemLLVMTy, + mlir::cast(complexAttr.getImag()).getValue()); + } + + attr = rewriter.getArrayAttr(components); } else if (mlir::isa(op.getType())) { // Optimize with dedicated LLVM op for null pointers. if (mlir::isa(op.getValue())) { diff --git a/clang/test/CIR/CodeGen/complex.c b/clang/test/CIR/CodeGen/complex.c index 3dd02118ea6a..a1cab1070aca 100644 --- a/clang/test/CIR/CodeGen/complex.c +++ b/clang/test/CIR/CodeGen/complex.c @@ -1,7 +1,7 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -o %t.cir %s -// RUN: FileCheck --input-file=%t.cir --check-prefixes=C,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -fclangir -emit-cir -o %t.cir %s -// RUN: FileCheck --input-file=%t.cir --check-prefixes=CPP,CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-simplify -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-BEFORE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-simplify -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-BEFORE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-simplify -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-AFTER %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-simplify -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-AFTER %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -o %t.ll %s // RUN: FileCheck --input-file=%t.ll --check-prefixes=LLVM %s @@ -16,15 +16,19 @@ void list_init() { int _Complex c2 = {1, 2}; } -// C: cir.func no_proto @list_init() -// CPP: cir.func @_Z9list_initv() -// CHECK: %[[#REAL:]] = cir.const #cir.fp<1.000000e+00> : !cir.double -// CHECK-NEXT: %[[#IMAG:]] = cir.const #cir.fp<2.000000e+00> : !cir.double -// CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !cir.double -> !cir.complex -// CHECK: %[[#REAL:]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %[[#IMAG:]] = cir.const #cir.int<2> : !s32i -// CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE: %[[#REAL:]] = cir.const #cir.fp<1.000000e+00> : !cir.double +// CHECK-BEFORE-NEXT: %[[#IMAG:]] = cir.const #cir.fp<2.000000e+00> : !cir.double +// CHECK-BEFORE-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !cir.double -> !cir.complex +// CHECK-BEFORE: %[[#REAL:]] = cir.const #cir.int<1> : !s32i +// CHECK-BEFORE-NEXT: %[[#IMAG:]] = cir.const #cir.int<2> : !s32i +// CHECK-BEFORE-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER: %{{.+}} = cir.const #cir.complex<#cir.fp<1.000000e+00> : !cir.double, #cir.fp<2.000000e+00> : !cir.double> : !cir.complex +// CHECK-AFTER: %{{.+}} = cir.const #cir.complex<#cir.int<1> : !s32i, #cir.int<2> : !s32i> : !cir.complex +// CHECK-AFTER: } // LLVM: define dso_local void @list_init() // LLVM: store { double, double } { double 1.000000e+00, double 2.000000e+00 }, ptr %{{.+}}, align 8 @@ -34,13 +38,19 @@ void list_init_2(double r, double i) { double _Complex c1 = {r, i}; } -// C: cir.func @list_init_2 -// CPP: cir.func @_Z11list_init_2dd -// CHECK: %[[#R:]] = cir.load %{{.+}} : !cir.ptr, !cir.double -// CHECK-NEXT: %[[#I:]] = cir.load %{{.+}} : !cir.ptr, !cir.double -// CHECK-NEXT: %[[#C:]] = cir.complex.create %[[#R]], %[[#I]] : !cir.double -> !cir.complex -// CHECK-NEXT: cir.store %[[#C]], %{{.+}} : !cir.complex, !cir.ptr> -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE: %[[#R:]] = cir.load %{{.+}} : !cir.ptr, !cir.double +// CHECK-BEFORE-NEXT: %[[#I:]] = cir.load %{{.+}} : !cir.ptr, !cir.double +// CHECK-BEFORE-NEXT: %[[#C:]] = cir.complex.create %[[#R]], %[[#I]] : !cir.double -> !cir.complex +// CHECK-BEFORE-NEXT: cir.store %[[#C]], %{{.+}} : !cir.complex, !cir.ptr> +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER: %[[#R:]] = cir.load %{{.+}} : !cir.ptr, !cir.double +// CHECK-AFTER-NEXT: %[[#I:]] = cir.load %{{.+}} : !cir.ptr, !cir.double +// CHECK-AFTER-NEXT: %[[#C:]] = cir.complex.create %[[#R]], %[[#I]] : !cir.double -> !cir.complex +// CHECK-AFTER-NEXT: cir.store %[[#C]], %{{.+}} : !cir.complex, !cir.ptr> +// CHECK-AFTER: } // LLVM: define dso_local void @list_init_2(double %{{.+}}, double %{{.+}}) // LLVM: %[[#A:]] = insertvalue { double, double } undef, double %{{.+}}, 0 @@ -52,10 +62,13 @@ void builtin_init(double r, double i) { double _Complex c = __builtin_complex(r, i); } -// C: cir.func @builtin_init -// CPP: cir.func @_Z12builtin_initdd -// CHECK: %{{.+}} = cir.complex.create %{{.+}}, %{{.+}} : !cir.double -> !cir.complex -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE: %{{.+}} = cir.complex.create %{{.+}}, %{{.+}} : !cir.double -> !cir.complex +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER: %{{.+}} = cir.complex.create %{{.+}}, %{{.+}} : !cir.double -> !cir.complex +// CHECK-AFTER: } // LLVM: define dso_local void @builtin_init // LLVM: %[[#A:]] = insertvalue { double, double } undef, double %{{.+}}, 0 @@ -68,15 +81,19 @@ void imag_literal() { ci = 3i; } -// C: cir.func no_proto @imag_literal() -// CPP: cir.func @_Z12imag_literalv() -// CHECK: %[[#REAL:]] = cir.const #cir.fp<0.000000e+00> : !cir.double -// CHECK-NEXT: %[[#IMAG:]] = cir.const #cir.fp<3.000000e+00> : !cir.double -// CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !cir.double -> !cir.complex -// CHECK: %[[#REAL:]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: %[[#IMAG:]] = cir.const #cir.int<3> : !s32i -// CHECK-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE: %[[#REAL:]] = cir.const #cir.fp<0.000000e+00> : !cir.double +// CHECK-BEFORE-NEXT: %[[#IMAG:]] = cir.const #cir.fp<3.000000e+00> : !cir.double +// CHECK-BEFORE-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !cir.double -> !cir.complex +// CHECK-BEFORE: %[[#REAL:]] = cir.const #cir.int<0> : !s32i +// CHECK-BEFORE-NEXT: %[[#IMAG:]] = cir.const #cir.int<3> : !s32i +// CHECK-BEFORE-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER: %{{.+}} = cir.const #cir.complex<#cir.fp<0.000000e+00> : !cir.double, #cir.fp<3.000000e+00> : !cir.double> : !cir.complex +// CHECK-AFTER: %{{.+}} = cir.const #cir.complex<#cir.int<0> : !s32i, #cir.int<3> : !s32i> : !cir.complex +// CHECK-AFTER: } // LLVM: define dso_local void @imag_literal() // LLVM: store { double, double } { double 0.000000e+00, double 3.000000e+00 }, ptr @c, align 8 @@ -88,17 +105,27 @@ void load_store() { ci = ci2; } -// C: cir.func no_proto @load_store() -// CPP: cir.func @_Z10load_storev() -// CHECK-NEXT: %[[#C2_PTR:]] = cir.get_global @c2 : !cir.ptr> -// CHECK-NEXT: %[[#C2:]] = cir.load %[[#C2_PTR]] : !cir.ptr>, !cir.complex -// CHECK-NEXT: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> -// CHECK-NEXT: cir.store %[[#C2]], %[[#C_PTR]] : !cir.complex, !cir.ptr> -// CHECK-NEXT: %[[#CI2_PTR:]] = cir.get_global @ci2 : !cir.ptr> -// CHECK-NEXT: %[[#CI2:]] = cir.load %[[#CI2_PTR]] : !cir.ptr>, !cir.complex -// CHECK-NEXT: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> -// CHECK-NEXT: cir.store %[[#CI2]], %[[#CI_PTR]] : !cir.complex, !cir.ptr> -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE-NEXT: %[[#C2_PTR:]] = cir.get_global @c2 : !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#C2:]] = cir.load %[[#C2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-BEFORE-NEXT: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-BEFORE-NEXT: cir.store %[[#C2]], %[[#C_PTR]] : !cir.complex, !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#CI2_PTR:]] = cir.get_global @ci2 : !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#CI2:]] = cir.load %[[#CI2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-BEFORE-NEXT: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-BEFORE-NEXT: cir.store %[[#CI2]], %[[#CI_PTR]] : !cir.complex, !cir.ptr> +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER-NEXT: %[[#C2_PTR:]] = cir.get_global @c2 : !cir.ptr> +// CHECK-AFTER-NEXT: %[[#C2:]] = cir.load %[[#C2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-AFTER-NEXT: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-AFTER-NEXT: cir.store %[[#C2]], %[[#C_PTR]] : !cir.complex, !cir.ptr> +// CHECK-AFTER-NEXT: %[[#CI2_PTR:]] = cir.get_global @ci2 : !cir.ptr> +// CHECK-AFTER-NEXT: %[[#CI2:]] = cir.load %[[#CI2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-AFTER-NEXT: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-AFTER-NEXT: cir.store %[[#CI2]], %[[#CI_PTR]] : !cir.complex, !cir.ptr> +// CHECK-AFTER: } // LLVM: define dso_local void @load_store() // LLVM: %[[#A:]] = load { double, double }, ptr @c2, align 8 @@ -112,17 +139,27 @@ void load_store_volatile() { vci = vci2; } -// C: cir.func no_proto @load_store_volatile() -// CPP: cir.func @_Z19load_store_volatilev() -// CHECK-NEXT: %[[#VC2_PTR:]] = cir.get_global @vc2 : !cir.ptr> -// CHECK-NEXT: %[[#VC2:]] = cir.load volatile %[[#VC2_PTR]] : !cir.ptr>, !cir.complex -// CHECK-NEXT: %[[#VC_PTR:]] = cir.get_global @vc : !cir.ptr> -// CHECK-NEXT: cir.store volatile %[[#VC2]], %[[#VC_PTR]] : !cir.complex, !cir.ptr> -// CHECK-NEXT: %[[#VCI2_PTR:]] = cir.get_global @vci2 : !cir.ptr> -// CHECK-NEXT: %[[#VCI2:]] = cir.load volatile %[[#VCI2_PTR]] : !cir.ptr>, !cir.complex -// CHECK-NEXT: %[[#VCI_PTR:]] = cir.get_global @vci : !cir.ptr> -// CHECK-NEXT: cir.store volatile %[[#VCI2]], %[[#VCI_PTR]] : !cir.complex, !cir.ptr> -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE-NEXT: %[[#VC2_PTR:]] = cir.get_global @vc2 : !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#VC2:]] = cir.load volatile %[[#VC2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-BEFORE-NEXT: %[[#VC_PTR:]] = cir.get_global @vc : !cir.ptr> +// CHECK-BEFORE-NEXT: cir.store volatile %[[#VC2]], %[[#VC_PTR]] : !cir.complex, !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#VCI2_PTR:]] = cir.get_global @vci2 : !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#VCI2:]] = cir.load volatile %[[#VCI2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-BEFORE-NEXT: %[[#VCI_PTR:]] = cir.get_global @vci : !cir.ptr> +// CHECK-BEFORE-NEXT: cir.store volatile %[[#VCI2]], %[[#VCI_PTR]] : !cir.complex, !cir.ptr> +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER-NEXT: %[[#VC2_PTR:]] = cir.get_global @vc2 : !cir.ptr> +// CHECK-AFTER-NEXT: %[[#VC2:]] = cir.load volatile %[[#VC2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-AFTER-NEXT: %[[#VC_PTR:]] = cir.get_global @vc : !cir.ptr> +// CHECK-AFTER-NEXT: cir.store volatile %[[#VC2]], %[[#VC_PTR]] : !cir.complex, !cir.ptr> +// CHECK-AFTER-NEXT: %[[#VCI2_PTR:]] = cir.get_global @vci2 : !cir.ptr> +// CHECK-AFTER-NEXT: %[[#VCI2:]] = cir.load volatile %[[#VCI2_PTR]] : !cir.ptr>, !cir.complex +// CHECK-AFTER-NEXT: %[[#VCI_PTR:]] = cir.get_global @vci : !cir.ptr> +// CHECK-AFTER-NEXT: cir.store volatile %[[#VCI2]], %[[#VCI_PTR]] : !cir.complex, !cir.ptr> +// CHECK-AFTER: } // LLVM: define dso_local void @load_store_volatile() // LLVM: %[[#A:]] = load volatile { double, double }, ptr @vc2, align 8 @@ -135,12 +172,17 @@ void real() { double r = __builtin_creal(c); } -// C: cir.func no_proto @real() -// CPP: cir.func @_Z4realv() -// CHECK: %[[#A:]] = cir.get_global @c : !cir.ptr> -// CHECK-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr>, !cir.complex -// CHECK-NEXT: %{{.+}} = cir.complex.real %[[#B]] : !cir.complex -> !cir.double -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE: %[[#A:]] = cir.get_global @c : !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr>, !cir.complex +// CHECK-BEFORE-NEXT: %{{.+}} = cir.complex.real %[[#B]] : !cir.complex -> !cir.double +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER: %[[#A:]] = cir.get_global @c : !cir.ptr> +// CHECK-AFTER-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr>, !cir.complex +// CHECK-AFTER-NEXT: %{{.+}} = cir.complex.real %[[#B]] : !cir.complex -> !cir.double +// CHECK-AFTER: } // LLVM: define dso_local void @real() // LLVM: %[[#A:]] = extractvalue { double, double } %{{.+}}, 0 @@ -151,12 +193,17 @@ void imag() { double i = __builtin_cimag(c); } -// C: cir.func no_proto @imag() -// CPP: cir.func @_Z4imagv() -// CHECK: %[[#A:]] = cir.get_global @c : !cir.ptr> -// CHECK-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr>, !cir.complex -// CHECK-NEXT: %{{.+}} = cir.complex.imag %[[#B]] : !cir.complex -> !cir.double -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE: %[[#A:]] = cir.get_global @c : !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr>, !cir.complex +// CHECK-BEFORE-NEXT: %{{.+}} = cir.complex.imag %[[#B]] : !cir.complex -> !cir.double +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER: %[[#A:]] = cir.get_global @c : !cir.ptr> +// CHECK-AFTER-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr>, !cir.complex +// CHECK-AFTER-NEXT: %{{.+}} = cir.complex.imag %[[#B]] : !cir.complex -> !cir.double +// CHECK-AFTER: } // LLVM: define dso_local void @imag() // LLVM: %[[#A:]] = extractvalue { double, double } %{{.+}}, 1 @@ -168,13 +215,19 @@ void real_ptr() { int *r2 = &__real__ ci; } -// C: cir.func no_proto @real_ptr() -// CPP: cir.func @_Z8real_ptrv() -// CHECK: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> -// CHECK-NEXT: %{{.+}} = cir.complex.real_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr -// CHECK: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> -// CHECK-NEXT: %{{.+}} = cir.complex.real_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-BEFORE-NEXT: %{{.+}} = cir.complex.real_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-BEFORE: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-BEFORE-NEXT: %{{.+}} = cir.complex.real_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-AFTER-NEXT: %{{.+}} = cir.complex.real_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-AFTER: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-AFTER-NEXT: %{{.+}} = cir.complex.real_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-AFTER: } // LLVM: define dso_local void @real_ptr() // LLVM: store ptr @c, ptr %{{.+}}, align 8 @@ -186,11 +239,15 @@ void real_ptr_local() { double *r3 = &__real__ c1; } -// C: cir.func no_proto @real_ptr_local() -// CPP: cir.func @_Z14real_ptr_localv() -// CHECK: %[[#C:]] = cir.alloca !cir.complex, !cir.ptr> -// CHECK: %{{.+}} = cir.complex.real_ptr %[[#C]] : !cir.ptr> -> !cir.ptr -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE: %[[#C:]] = cir.alloca !cir.complex, !cir.ptr> +// CHECK-BEFORE: %{{.+}} = cir.complex.real_ptr %[[#C]] : !cir.ptr> -> !cir.ptr +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER: %[[#C:]] = cir.alloca !cir.complex, !cir.ptr> +// CHECK-AFTER: %{{.+}} = cir.complex.real_ptr %[[#C]] : !cir.ptr> -> !cir.ptr +// CHECK-AFTER: } // LLVM: define dso_local void @real_ptr_local() // LLVM: store { double, double } { double 1.000000e+00, double 2.000000e+00 }, ptr %{{.+}}, align 8 @@ -202,15 +259,23 @@ void extract_real() { int r2 = __real__ ci; } -// C: cir.func no_proto @extract_real() -// CPP: cir.func @_Z12extract_realv() -// CHECK: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> -// CHECK-NEXT: %[[#REAL_PTR:]] = cir.complex.real_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %{{.+}} = cir.load %[[#REAL_PTR]] : !cir.ptr, !cir.double -// CHECK: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> -// CHECK-NEXT: %[[#REAL_PTR:]] = cir.complex.real_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %{{.+}} = cir.load %[[#REAL_PTR]] : !cir.ptr, !s32i -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#REAL_PTR:]] = cir.complex.real_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-BEFORE-NEXT: %{{.+}} = cir.load %[[#REAL_PTR]] : !cir.ptr, !cir.double +// CHECK-BEFORE: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#REAL_PTR:]] = cir.complex.real_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-BEFORE-NEXT: %{{.+}} = cir.load %[[#REAL_PTR]] : !cir.ptr, !s32i +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-AFTER-NEXT: %[[#REAL_PTR:]] = cir.complex.real_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-AFTER-NEXT: %{{.+}} = cir.load %[[#REAL_PTR]] : !cir.ptr, !cir.double +// CHECK-AFTER: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-AFTER-NEXT: %[[#REAL_PTR:]] = cir.complex.real_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-AFTER-NEXT: %{{.+}} = cir.load %[[#REAL_PTR]] : !cir.ptr, !s32i +// CHECK-AFTER: } // LLVM: define dso_local void @extract_real() // LLVM: %{{.+}} = load double, ptr @c, align 8 @@ -222,13 +287,19 @@ void imag_ptr() { int *i2 = &__imag__ ci; } -// C: cir.func no_proto @imag_ptr() -// CPP: cir.func @_Z8imag_ptrv() -// CHECK: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> -// CHECK-NEXT: %{{.+}} = cir.complex.imag_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr -// CHECK: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> -// CHECK-NEXT: %{{.+}} = cir.complex.imag_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-BEFORE-NEXT: %{{.+}} = cir.complex.imag_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-BEFORE: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-BEFORE-NEXT: %{{.+}} = cir.complex.imag_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-AFTER-NEXT: %{{.+}} = cir.complex.imag_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-AFTER: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-AFTER-NEXT: %{{.+}} = cir.complex.imag_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-AFTER: } // LLVM: define dso_local void @imag_ptr() // LLVM: store ptr getelementptr inbounds ({ double, double }, ptr @c, i32 0, i32 1), ptr %{{.+}}, align 8 @@ -240,15 +311,23 @@ void extract_imag() { int i2 = __imag__ ci; } -// C: cir.func no_proto @extract_imag() -// CPP: cir.func @_Z12extract_imagv() -// CHECK: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> -// CHECK-NEXT: %[[#IMAG_PTR:]] = cir.complex.imag_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %{{.+}} = cir.load %[[#IMAG_PTR]] : !cir.ptr, !cir.double -// CHECK: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> -// CHECK-NEXT: %[[#IMAG_PTR:]] = cir.complex.imag_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr -// CHECK-NEXT: %{{.+}} = cir.load %[[#IMAG_PTR]] : !cir.ptr, !s32i -// CHECK: } +// CHECK-BEFORE: cir.func +// CHECK-BEFORE: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#IMAG_PTR:]] = cir.complex.imag_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-BEFORE-NEXT: %{{.+}} = cir.load %[[#IMAG_PTR]] : !cir.ptr, !cir.double +// CHECK-BEFORE: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-BEFORE-NEXT: %[[#IMAG_PTR:]] = cir.complex.imag_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-BEFORE-NEXT: %{{.+}} = cir.load %[[#IMAG_PTR]] : !cir.ptr, !s32i +// CHECK-BEFORE: } + +// CHECK-AFTER: cir.func +// CHECK-AFTER: %[[#C_PTR:]] = cir.get_global @c : !cir.ptr> +// CHECK-AFTER-NEXT: %[[#IMAG_PTR:]] = cir.complex.imag_ptr %[[#C_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-AFTER-NEXT: %{{.+}} = cir.load %[[#IMAG_PTR]] : !cir.ptr, !cir.double +// CHECK-AFTER: %[[#CI_PTR:]] = cir.get_global @ci : !cir.ptr> +// CHECK-AFTER-NEXT: %[[#IMAG_PTR:]] = cir.complex.imag_ptr %[[#CI_PTR]] : !cir.ptr> -> !cir.ptr +// CHECK-AFTER-NEXT: %{{.+}} = cir.load %[[#IMAG_PTR]] : !cir.ptr, !s32i +// CHECK-AFTER: } // LLVM: define dso_local void @extract_imag() // LLVM: %{{.+}} = load double, ptr getelementptr inbounds ({ double, double }, ptr @c, i32 0, i32 1), align 8 diff --git a/clang/test/CIR/Lowering/complex.cir b/clang/test/CIR/Lowering/complex.cir new file mode 100644 index 000000000000..91ded659997d --- /dev/null +++ b/clang/test/CIR/Lowering/complex.cir @@ -0,0 +1,15 @@ +// RUN: cir-translate -cir-to-llvmir -o %t.ll %s +// RUN: FileCheck --input-file %t.ll -check-prefix=LLVM %s + +!s32i = !cir.int + +module { + cir.func @complex_const() -> !cir.complex { + %0 = cir.const #cir.complex<#cir.int<1> : !s32i, #cir.int<2> : !s32i> : !cir.complex + cir.return %0 : !cir.complex + } + + // LLVM-LABEL: define { i32, i32 } @complex_const() + // LLVM-NEXT: ret { i32, i32 } { i32 1, i32 2 } + // LLVM-NEXT: } +} diff --git a/clang/test/CIR/Transforms/complex-fold.cir b/clang/test/CIR/Transforms/complex-fold.cir new file mode 100644 index 000000000000..34f6b67e1dc4 --- /dev/null +++ b/clang/test/CIR/Transforms/complex-fold.cir @@ -0,0 +1,44 @@ +// RUN: cir-opt --canonicalize -o %t.cir %s +// RUN: FileCheck --input-file %t.cir %s + +!s32i = !cir.int + +module { + cir.func @complex_create_fold() -> !cir.complex { + %0 = cir.const #cir.int<1> : !s32i + %1 = cir.const #cir.int<2> : !s32i + %2 = cir.complex.create %0, %1 : !s32i -> !cir.complex + cir.return %2 : !cir.complex + } + + // CHECK-LABEL: cir.func @complex_create_fold() -> !cir.complex { + // CHECK-NEXT: %[[#A:]] = cir.const #cir.complex<#cir.int<1> : !s32i, #cir.int<2> : !s32i> : !cir.complex + // CHECK-NEXT: cir.return %[[#A]] : !cir.complex + // CHECK-NEXT: } + + cir.func @fold_complex_real() -> !s32i { + %0 = cir.const #cir.int<1> : !s32i + %1 = cir.const #cir.int<2> : !s32i + %2 = cir.complex.create %0, %1 : !s32i -> !cir.complex + %3 = cir.complex.real %2 : !cir.complex -> !s32i + cir.return %3 : !s32i + } + + // CHECK-LABEL: cir.func @fold_complex_real() -> !s32i { + // CHECK-NEXT: %[[#A:]] = cir.const #cir.int<1> : !s32i + // CHECK-NEXT: cir.return %[[#A]] : !s32i + // CHECK-NEXT: } + + cir.func @fold_complex_imag() -> !s32i { + %0 = cir.const #cir.int<1> : !s32i + %1 = cir.const #cir.int<2> : !s32i + %2 = cir.complex.create %0, %1 : !s32i -> !cir.complex + %3 = cir.complex.imag %2 : !cir.complex -> !s32i + cir.return %3 : !s32i + } + + // CHECK-LABEL: cir.func @fold_complex_imag() -> !s32i { + // CHECK-NEXT: %[[#A:]] = cir.const #cir.int<2> : !s32i + // CHECK-NEXT: cir.return %[[#A]] : !s32i + // CHECK-NEXT: } +} From 28cdbf464f05b0e9461e5e0b3d6f705a609b30cd Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 9 Aug 2024 06:22:54 +0800 Subject: [PATCH 1751/2301] [CIR][CodeGen][NFCI] Target-independent ABI handling for SpirKernel call conv (#778) This PR follows OG CodeGen to use SPIR ABI info whatever the target is when analysing the function info of SPIR-V kernels (identified by its calling convention). For example, when compiling OpenCL kernels to x86-64 target, the kernel should still use SPIR-V's ABIInfo. As we haven't implemented SPIR-V ABI handling for complex constructs, there should be no functional changes. There is a test for this logic in OG CodeGen: `clang/test/CodeGenOpenCL/kernels-have-spir-cc-by-default.cl`. It mainly involves structs, which is beyond the progress of CIR ABI stuff. --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 15 ++++++++++----- clang/lib/CIR/CodeGen/TargetInfo.cpp | 10 ++++++++++ clang/lib/CIR/CodeGen/TargetInfo.h | 2 ++ 3 files changed, 22 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 66a0c7c6d3ac..541e15a0930e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -783,11 +783,16 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( (void)inserted; assert(inserted && "Recursively being processed?"); - // Compute ABI inforamtion. - assert(info.getCC() != clang::CallingConv::CC_SpirFunction && "NYI"); - assert(info.getCC() != CC_Swift && info.getCC() != CC_SwiftAsync && - "Swift NYI"); - getABIInfo().computeInfo(*FI); + // Compute ABI information. + if (CC == mlir::cir::CallingConv::SpirKernel) { + // Force target independent argument handling for the host visible + // kernel functions. + computeSPIRKernelABIInfo(CGM, *FI); + } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) { + llvm_unreachable("Swift NYI"); + } else { + getABIInfo().computeInfo(*FI); + } // Loop over all of the computed argument and return value info. If any of // them are direct or extend without a specified coerce type, specify the diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index a3db79645320..02850dad8bf8 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -264,6 +264,16 @@ class SPIRVABIInfo : public CommonSPIRABIInfo { } }; } // namespace + +namespace cir { +void computeSPIRKernelABIInfo(CIRGenModule &CGM, CIRGenFunctionInfo &FI) { + if (CGM.getTarget().getTriple().isSPIRV()) + SPIRVABIInfo(CGM.getTypes()).computeInfo(FI); + else + CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI); +} +} // namespace cir + namespace { class CommonSPIRTargetCIRGenInfo : public TargetCIRGenInfo { diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index f56c68d59732..d2f4f3cdbf88 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -99,6 +99,8 @@ class TargetCIRGenInfo { virtual ~TargetCIRGenInfo() {} }; +void computeSPIRKernelABIInfo(CIRGenModule &CGM, CIRGenFunctionInfo &FI); + } // namespace cir #endif From 3173ed49604318d641f125d5f50fcfad7fe56f25 Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 9 Aug 2024 06:25:45 +0800 Subject: [PATCH 1752/2301] [CIR][Dialect] Add address space attribute to global op (#779) This PR adds the CIR address space attribute to GlobalOp and starts to resolve the missing feature flag `addressSpaceInGlobalVar`. The same asm format in pointer type is used: ``` cir.global external addrspace(offload_global) @addrspace1 = #cir.int<1> : !s32i ``` The parsing and printing helper is extracted into a common function to be reused by both `GlobalOp` and `PointerType` with two custom format proxy to it. That's because the signature of ODS generated method differs from the one for PointerType. Lowering to LLVM IR and CIRGen will come sequentially. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 +++ clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 5 +++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 12 ++++++++---- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 14 ++++++++++++++ clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 18 ++++++++++++++---- clang/test/CIR/IR/global.cir | 10 +++++++++- 6 files changed, 53 insertions(+), 9 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 78da99a65bfe..b2924d1f422b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2157,6 +2157,7 @@ def GlobalOp : CIR_Op<"global", OptionalAttr:$sym_visibility, TypeAttr:$sym_type, Arg:$linkage, + OptionalAttr:$addr_space, OptionalAttr:$tls_model, // Note this can also be a FlatSymbolRefAttr OptionalAttr:$initial_value, @@ -2175,6 +2176,7 @@ def GlobalOp : CIR_Op<"global", (`comdat` $comdat^)? ($tls_model^)? (`dsolocal` $dsolocal^)? + (`addrspace` `(` custom($addr_space)^ `)`)? $sym_name custom($sym_type, $initial_value, $ctorRegion, $dtorRegion) attr-dict @@ -2202,6 +2204,7 @@ def GlobalOp : CIR_Op<"global", // CIR defaults to external linkage. CArg<"cir::GlobalLinkageKind", "cir::GlobalLinkageKind::ExternalLinkage">:$linkage, + CArg<"cir::AddressSpaceAttr", "{}">:$addrSpace, CArg<"function_ref", "nullptr">:$ctorBuilder, CArg<"function_ref", diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index ff2a98a9ff5a..9736b3cd575e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -183,6 +183,11 @@ bool isFPOrFPVectorTy(mlir::Type); } // namespace cir } // namespace mlir +mlir::ParseResult parseAddrSpaceAttribute(mlir::AsmParser &p, + mlir::Attribute &addrSpaceAttr); +void printAddrSpaceAttribute(mlir::AsmPrinter &p, + mlir::Attribute addrSpaceAttr); + //===----------------------------------------------------------------------===// // CIR Dialect Tablegen'd Types //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 16ce15f2624e..899597967bff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -655,10 +655,12 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { [[nodiscard]] mlir::cir::GlobalOp createGlobal(mlir::ModuleOp module, mlir::Location loc, mlir::StringRef name, mlir::Type type, bool isConst, - mlir::cir::GlobalLinkageKind linkage) { + mlir::cir::GlobalLinkageKind linkage, + mlir::cir::AddressSpaceAttr addrSpace = {}) { mlir::OpBuilder::InsertionGuard guard(*this); setInsertionPointToStart(module.getBody()); - return create(loc, name, type, isConst, linkage); + return create(loc, name, type, isConst, linkage, + addrSpace); } /// Creates a versioned global variable. If the symbol is already taken, an ID @@ -667,7 +669,8 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { [[nodiscard]] mlir::cir::GlobalOp createVersionedGlobal(mlir::ModuleOp module, mlir::Location loc, mlir::StringRef name, mlir::Type type, bool isConst, - mlir::cir::GlobalLinkageKind linkage) { + mlir::cir::GlobalLinkageKind linkage, + mlir::cir::AddressSpaceAttr addrSpace = {}) { // Create a unique name if the given name is already taken. std::string uniqueName; if (unsigned version = GlobalsVersioning[name.str()]++) @@ -675,7 +678,8 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { else uniqueName = name.str(); - return createGlobal(module, loc, uniqueName, type, isConst, linkage); + return createGlobal(module, loc, uniqueName, type, isConst, linkage, + addrSpace); } mlir::Value createGetGlobal(mlir::cir::GlobalOp global, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index cf9ba8a6885d..69e179f8619c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1781,6 +1781,16 @@ static void printConstant(OpAsmPrinter &p, Attribute value) { p.printAttribute(value); } +static ParseResult parseGlobalOpAddrSpace(OpAsmParser &p, + AddressSpaceAttr &addrSpaceAttr) { + return parseAddrSpaceAttribute(p, addrSpaceAttr); +} + +static void printGlobalOpAddrSpace(OpAsmPrinter &p, GlobalOp op, + AddressSpaceAttr addrSpaceAttr) { + printAddrSpaceAttribute(p, addrSpaceAttr); +} + static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, TypeAttr type, Attribute initAttr, mlir::Region &ctorRegion, @@ -1954,6 +1964,7 @@ LogicalResult GlobalOp::verify() { void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, StringRef sym_name, Type sym_type, bool isConstant, cir::GlobalLinkageKind linkage, + cir::AddressSpaceAttr addrSpace, function_ref ctorBuilder, function_ref dtorBuilder) { odsState.addAttribute(getSymNameAttrName(odsState.name), @@ -1968,6 +1979,9 @@ void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage); odsState.addAttribute(getLinkageAttrName(odsState.name), linkageAttr); + if (addrSpace) + odsState.addAttribute(getAddrSpaceAttrName(odsState.name), addrSpace); + Region *ctorRegion = odsState.addRegion(); if (ctorBuilder) { odsBuilder.createBlock(ctorRegion); diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 10d581e826b4..4c250295856c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -945,8 +945,8 @@ PointerType::verify(llvm::function_ref emitError, return mlir::success(); } -mlir::ParseResult parsePointerAddrSpace(mlir::AsmParser &p, - mlir::Attribute &addrSpaceAttr) { +mlir::ParseResult parseAddrSpaceAttribute(mlir::AsmParser &p, + mlir::Attribute &addrSpaceAttr) { using mlir::cir::AddressSpaceAttr; auto attrLoc = p.getCurrentLocation(); @@ -978,8 +978,8 @@ mlir::ParseResult parsePointerAddrSpace(mlir::AsmParser &p, return mlir::success(); } -void printPointerAddrSpace(mlir::AsmPrinter &p, - mlir::Attribute rawAddrSpaceAttr) { +void printAddrSpaceAttribute(mlir::AsmPrinter &p, + mlir::Attribute rawAddrSpaceAttr) { using mlir::cir::AddressSpaceAttr; auto addrSpaceAttr = mlir::cast(rawAddrSpaceAttr); if (addrSpaceAttr.isTarget()) { @@ -990,6 +990,16 @@ void printPointerAddrSpace(mlir::AsmPrinter &p, } } +mlir::ParseResult parsePointerAddrSpace(mlir::AsmParser &p, + mlir::Attribute &addrSpaceAttr) { + return parseAddrSpaceAttribute(p, addrSpaceAttr); +} + +void printPointerAddrSpace(mlir::AsmPrinter &p, + mlir::Attribute rawAddrSpaceAttr) { + printAddrSpaceAttribute(p, rawAddrSpaceAttr); +} + //===----------------------------------------------------------------------===// // CIR Dialect //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 0db5f9f11b1a..ad8bba6a9410 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -69,6 +69,10 @@ module { %0 = cir.get_global thread_local @batata : !cir.ptr cir.return } + + cir.global external addrspace(offload_global) @addrspace1 = #cir.int<1> : !s32i + cir.global "private" internal addrspace(offload_local) @addrspace2 : !s32i + cir.global external addrspace(target<1>) @addrspace3 = #cir.int<3> : !s32i } // CHECK: cir.global external @a = #cir.int<3> : !s32i @@ -103,4 +107,8 @@ module { // CHECK: cir.func @f35() { // CHECK: %0 = cir.get_global thread_local @batata : !cir.ptr // CHECK: cir.return -// CHECK: } \ No newline at end of file +// CHECK: } + +// CHECK: cir.global external addrspace(offload_global) @addrspace1 = #cir.int<1> : !s32i +// CHECK: cir.global "private" internal addrspace(offload_local) @addrspace2 : !s32i +// CHECK: cir.global external addrspace(target<1>) @addrspace3 = #cir.int<3> : !s32i From b72fb6e219cb5a590e10e539256298a8b8b45cd5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 8 Aug 2024 16:30:21 -0700 Subject: [PATCH 1753/2301] [CIR][CIRGen][NFC] Tide up cir.alloc.exception --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 15 ++++++--------- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 12 +++++++++--- clang/test/CIR/CodeGen/throw.cpp | 2 +- clang/test/CIR/IR/invalid.cir | 2 +- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b2924d1f422b..8a398e85ba6a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3756,14 +3756,11 @@ def VAArgOp : CIR_Op<"va.arg">, } //===----------------------------------------------------------------------===// -// AllocException +// AllocExceptionOp //===----------------------------------------------------------------------===// -def AllocException : CIR_Op<"alloc_exception", [ - AllocaTypesMatchWith<"'allocType' matches pointee type of 'addr'", - "addr", "allocType", - "cast($_self).getPointee()">]> { - let summary = "Defines a scope-local variable"; +def AllocExceptionOp : CIR_Op<"alloc.exception"> { + let summary = "Allocates an exception according to Itanium ABI"; let description = [{ Implements a slightly higher level __cxa_allocate_exception: @@ -3778,18 +3775,18 @@ def AllocException : CIR_Op<"alloc_exception", [ // ... // throw "..."; cir.if %10 { - %11 = cir.alloc_exception(!cir.ptr) -> > + %11 = cir.alloc_exception 8 -> !cir.ptr ... // store exception content into %11 cir.throw(%11 : !cir.ptr>, ... ``` }]; - let arguments = (ins TypeAttr:$allocType); + let arguments = (ins UI64Attr:$size); let results = (outs Res]>:$addr); let assemblyFormat = [{ - `(` $allocType `)` `->` type($addr) attr-dict + $size `->` qualified(type($addr)) attr-dict }]; // Constraints verified elsewhere. diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 8f9cbec3d882..b554ffd856f0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2149,13 +2149,19 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, // Now allocate the exception object. auto &builder = CGF.getBuilder(); QualType clangThrowType = E->getSubExpr()->getType(); - auto throwTy = CGF.ConvertType(clangThrowType); + auto throwTy = builder.getPointerTo(CGF.ConvertType(clangThrowType)); + uint64_t typeSize = + CGF.getContext().getTypeSizeInChars(clangThrowType).getQuantity(); auto subExprLoc = CGF.getLoc(E->getSubExpr()->getSourceRange()); // Defer computing allocation size to some later lowering pass. auto exceptionPtr = builder - .create( - subExprLoc, builder.getPointerTo(throwTy), throwTy) + .create( + subExprLoc, throwTy, + mlir::IntegerAttr::get( + mlir::IntegerType::get(builder.getContext(), 64, + mlir::IntegerType::Unsigned), + typeSize)) .getAddr(); // Build expression and store its result into exceptionPtr. diff --git a/clang/test/CIR/CodeGen/throw.cpp b/clang/test/CIR/CodeGen/throw.cpp index 0ae33db072cb..3ad1c10c14ca 100644 --- a/clang/test/CIR/CodeGen/throw.cpp +++ b/clang/test/CIR/CodeGen/throw.cpp @@ -8,7 +8,7 @@ double d(int a, int b) { } // CHECK: cir.if %10 { -// CHECK-NEXT: %11 = cir.alloc_exception(!cir.ptr) -> > +// CHECK-NEXT: %11 = cir.alloc.exception 8 // CHECK-NEXT: %12 = cir.get_global @".str" : !cir.ptr> // CHECK-NEXT: %13 = cir.cast(array_to_ptrdecay, %12 : !cir.ptr>), !cir.ptr // CHECK-NEXT: cir.store %13, %11 : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index eb06fdcddda6..ea11992ae692 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -763,7 +763,7 @@ module { cir.global "private" constant internal @".str" = #cir.const_array<"Division by zero condition!\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global "private" constant external @_ZTIPKc : !cir.ptr cir.func @_Z8divisionii() { - %11 = cir.alloc_exception(!cir.ptr) -> > + %11 = cir.alloc.exception 8 -> !cir.ptr> %12 = cir.get_global @".str" : !cir.ptr> %13 = cir.cast(array_to_ptrdecay, %12 : !cir.ptr>), !cir.ptr cir.store %13, %11 : !cir.ptr, !cir.ptr> From 8588c38020cf5a422517ca35e761f694495cd07d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 8 Aug 2024 16:55:14 -0700 Subject: [PATCH 1754/2301] [CIR][LowerToLLVM] Exceptions: lower cir.alloc.exception Incremental work: test is available in the followup commit. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 88 ++++++++++++------- 1 file changed, 57 insertions(+), 31 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6ef01c493112..7af97c69fef4 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3646,6 +3646,20 @@ class CIREhTypeIdOpLowering } }; +// Make sure the LLVM function we are about to create a call for actually +// exists, if not create one. Returns a function +void getOrCreateLLVMFuncOp(mlir::ConversionPatternRewriter &rewriter, + mlir::Location loc, mlir::ModuleOp mod, + mlir::LLVM::LLVMFuncOp enclosingfnOp, + llvm::StringRef fnName, mlir::Type fnTy) { + auto *sourceSymbol = mlir::SymbolTable::lookupSymbolIn(mod, fnName); + if (!sourceSymbol) { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(enclosingfnOp); + rewriter.create(loc, fnName, fnTy); + } +} + class CIRCatchParamOpLowering : public mlir::OpConversionPattern { public: @@ -3654,43 +3668,29 @@ class CIRCatchParamOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::CatchParamOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { + auto modOp = op->getParentOfType(); + auto enclosingFnOp = op->getParentOfType(); if (op.isBegin()) { // Get or create `declare ptr @__cxa_begin_catch(ptr)` - llvm::StringRef cxaBeginCatch = "__cxa_begin_catch"; - auto *sourceSymbol = mlir::SymbolTable::lookupSymbolIn( - op->getParentOfType(), cxaBeginCatch); + StringRef fnName = "__cxa_begin_catch"; auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); - if (!sourceSymbol) { - auto catchFnTy = - mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {llvmPtrTy}, - /*isVarArg=*/false); - mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPoint( - op->getParentOfType()); - rewriter.create(op.getLoc(), cxaBeginCatch, - catchFnTy); - } + auto fnTy = mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {llvmPtrTy}, + /*isVarArg=*/false); + getOrCreateLLVMFuncOp(rewriter, op.getLoc(), modOp, enclosingFnOp, fnName, + fnTy); rewriter.replaceOpWithNewOp( - op, mlir::TypeRange{llvmPtrTy}, cxaBeginCatch, + op, mlir::TypeRange{llvmPtrTy}, fnName, mlir::ValueRange{adaptor.getExceptionPtr()}); return mlir::success(); } else if (op.isEnd()) { - // Get or create `declare void @__cxa_end_catch()` - llvm::StringRef cxaEndCatch = "__cxa_end_catch"; - auto *sourceSymbol = mlir::SymbolTable::lookupSymbolIn( - op->getParentOfType(), cxaEndCatch); - auto llvmVoidTy = mlir::LLVM::LLVMVoidType::get(rewriter.getContext()); - if (!sourceSymbol) { - auto catchFnTy = mlir::LLVM::LLVMFunctionType::get(llvmVoidTy, {}, - /*isVarArg=*/false); - mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPoint( - op->getParentOfType()); - rewriter.create(op.getLoc(), cxaEndCatch, - catchFnTy); - } + StringRef fnName = "__cxa_end_catch"; + auto fnTy = mlir::LLVM::LLVMFunctionType::get( + mlir::LLVM::LLVMVoidType::get(rewriter.getContext()), {}, + /*isVarArg=*/false); + getOrCreateLLVMFuncOp(rewriter, op.getLoc(), modOp, enclosingFnOp, fnName, + fnTy); rewriter.create(op.getLoc(), mlir::TypeRange{}, - cxaEndCatch, mlir::ValueRange{}); + fnName, mlir::ValueRange{}); rewriter.eraseOp(op); return mlir::success(); } @@ -3726,6 +3726,32 @@ class CIRResumeOpLowering } }; +class CIRAllocExceptionOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AllocExceptionOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Get or create `declare ptr @__cxa_allocate_exception(i64)` + StringRef fnName = "__cxa_allocate_exception"; + auto modOp = op->getParentOfType(); + auto enclosingFnOp = op->getParentOfType(); + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + auto int64Ty = mlir::IntegerType::get(rewriter.getContext(), 64); + auto fnTy = mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {int64Ty}, + /*isVarArg=*/false); + getOrCreateLLVMFuncOp(rewriter, op.getLoc(), modOp, enclosingFnOp, fnName, + fnTy); + auto size = rewriter.create(op.getLoc(), + adaptor.getSizeAttr()); + rewriter.replaceOpWithNewOp( + op, mlir::TypeRange{llvmPtrTy}, fnName, mlir::ValueRange{size}); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter, mlir::DataLayout &dataLayout) { @@ -3764,8 +3790,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRSqrtOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, CIRFModOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering, CIRPowOpLowering, CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering, - CIRCatchParamOpLowering, CIRResumeOpLowering>(converter, - patterns.getContext()); + CIRCatchParamOpLowering, CIRResumeOpLowering, + CIRAllocExceptionOpLowering>(converter, patterns.getContext()); } namespace { From 31da7a3fb764337a94942b93f033e0d9a6a53a0b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 8 Aug 2024 17:22:48 -0700 Subject: [PATCH 1755/2301] [CIR][LowerToLLVM] Exceptions: lower cir.throw --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 24 +++++------ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 14 +++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 43 ++++++++++++++++++- clang/test/CIR/CodeGen/throw.cpp | 24 +++++++---- clang/test/CIR/IR/invalid.cir | 2 +- 5 files changed, 75 insertions(+), 32 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8a398e85ba6a..2abed62b3754 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3777,11 +3777,11 @@ def AllocExceptionOp : CIR_Op<"alloc.exception"> { cir.if %10 { %11 = cir.alloc_exception 8 -> !cir.ptr ... // store exception content into %11 - cir.throw(%11 : !cir.ptr>, ... + cir.throw %11 : !cir.ptr>, ... ``` }]; - let arguments = (ins UI64Attr:$size); + let arguments = (ins I64Attr:$size); let results = (outs Res]>:$addr); @@ -3797,10 +3797,7 @@ def AllocExceptionOp : CIR_Op<"alloc.exception"> { // ThrowOp //===----------------------------------------------------------------------===// -def ThrowOp : CIR_Op<"throw", [ - ParentOneOf<["FuncOp", "ScopeOp", "IfOp", "SwitchOp", - "DoWhileOp", "WhileOp", "ForOp"]>, - Terminator]> { +def ThrowOp : CIR_Op<"throw"> { let summary = "(Re)Throws an exception"; let description = [{ Very similar to __cxa_throw: @@ -3821,23 +3818,22 @@ def ThrowOp : CIR_Op<"throw", [ // if (b == 0) // throw "Division by zero condition!"; cir.if %10 { - %11 = cir.alloc_exception(!cir.ptr) -> > + %11 = cir.alloc_exception 8 -> !cir.ptr ... cir.store %13, %11 : // Store string addr for "Division by zero condition!" - cir.throw(%11 : !cir.ptr>, @"typeinfo for char const*") + cir.throw %11 : !cir.ptr>, @"typeinfo for char const*" ``` }]; - let arguments = (ins Optional:$exception_ptr, + let arguments = (ins Optional:$exception_ptr, OptionalAttr:$type_info, OptionalAttr:$dtor); let assemblyFormat = [{ - `(` - ($exception_ptr^ `:` type($exception_ptr))? - (`,` $type_info^)? - (`,` $dtor^)? - `)` attr-dict + ($exception_ptr^ `:` type($exception_ptr))? + (`,` $type_info^)? + (`,` $dtor^)? + attr-dict }]; let extraClassDeclaration = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index b554ffd856f0..260d1c24baa7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2157,11 +2157,7 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, auto exceptionPtr = builder .create( - subExprLoc, throwTy, - mlir::IntegerAttr::get( - mlir::IntegerType::get(builder.getContext(), 64, - mlir::IntegerType::Unsigned), - typeSize)) + subExprLoc, throwTy, builder.getI64IntegerAttr(typeSize)) .getAddr(); // Build expression and store its result into exceptionPtr. @@ -2195,11 +2191,15 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, .getSymNameAttr()); } + // FIXME: When adding support for invoking, we should wrap the throw op + // below into a try, and let CFG flatten pass to generate a cir.try_call. assert(!CGF.getInvokeDest() && "landing pad like logic NYI"); // Now throw the exception. - builder.create(CGF.getLoc(E->getSourceRange()), - exceptionPtr, typeInfo.getSymbol(), dtor); + mlir::Location loc = CGF.getLoc(E->getSourceRange()); + builder.create(loc, exceptionPtr, typeInfo.getSymbol(), + dtor); + builder.create(loc); } static mlir::cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7af97c69fef4..0a121f126da2 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3752,6 +3752,45 @@ class CIRAllocExceptionOpLowering } }; +class CIRThrowOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ThrowOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Get or create `declare void @__cxa_throw(ptr, ptr, ptr)` + StringRef fnName = "__cxa_throw"; + auto modOp = op->getParentOfType(); + auto enclosingFnOp = op->getParentOfType(); + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + auto voidTy = mlir::LLVM::LLVMVoidType::get(rewriter.getContext()); + auto fnTy = mlir::LLVM::LLVMFunctionType::get( + voidTy, {llvmPtrTy, llvmPtrTy, llvmPtrTy}, + /*isVarArg=*/false); + getOrCreateLLVMFuncOp(rewriter, op.getLoc(), modOp, enclosingFnOp, fnName, + fnTy); + mlir::Value typeInfo = rewriter.create( + op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), + adaptor.getTypeInfoAttr()); + + mlir::Value dtor; + if (op.getDtor()) { + dtor = rewriter.create( + op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), + adaptor.getDtorAttr()); + } else { + dtor = rewriter.create( + op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext())); + } + rewriter.replaceOpWithNewOp( + op, mlir::TypeRange{}, fnName, + mlir::ValueRange{adaptor.getExceptionPtr(), typeInfo, dtor}); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter, mlir::DataLayout &dataLayout) { @@ -3790,8 +3829,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRSqrtOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, CIRFModOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering, CIRPowOpLowering, CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering, - CIRCatchParamOpLowering, CIRResumeOpLowering, - CIRAllocExceptionOpLowering>(converter, patterns.getContext()); + CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, + CIRThrowOpLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/throw.cpp b/clang/test/CIR/CodeGen/throw.cpp index 3ad1c10c14ca..c2395c3725c3 100644 --- a/clang/test/CIR/CodeGen/throw.cpp +++ b/clang/test/CIR/CodeGen/throw.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: FileCheck --input-file=%t.cir --check-prefix=CIR %s +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s double d(int a, int b) { if (b == 0) @@ -7,10 +9,16 @@ double d(int a, int b) { return (a/b); } -// CHECK: cir.if %10 { -// CHECK-NEXT: %11 = cir.alloc.exception 8 -// CHECK-NEXT: %12 = cir.get_global @".str" : !cir.ptr> -// CHECK-NEXT: %13 = cir.cast(array_to_ptrdecay, %12 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store %13, %11 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.throw(%11 : !cir.ptr>, @_ZTIPKc) -// CHECK-NEXT: } \ No newline at end of file +// CIR: cir.if +// CIR-NEXT: %[[ADDR:.*]] = cir.alloc.exception 8 +// CIR-NEXT: %[[STR:.*]] = cir.get_global @".str" : !cir.ptr> +// CIR-NEXT: %[[STR_ADD:.*]] = cir.cast(array_to_ptrdecay, %[[STR]] : !cir.ptr>), !cir.ptr +// CIR-NEXT: cir.store %[[STR_ADD]], %[[ADDR]] : !cir.ptr, !cir.ptr> +// CIR-NEXT: cir.throw %[[ADDR]] : !cir.ptr>, @_ZTIPKc +// CIR-NEXT: cir.unreachable +// CIR-NEXT: } + +// LLVM: %[[ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 8) +// LLVM: store ptr @.str, ptr %[[ADDR]], align 8 +// LLVM: call void @__cxa_throw(ptr %[[ADDR]], ptr @_ZTIPKc, ptr null) +// LLVM: unreachable \ No newline at end of file diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index ea11992ae692..fbbe3a0b7d87 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -767,7 +767,7 @@ module { %12 = cir.get_global @".str" : !cir.ptr> %13 = cir.cast(array_to_ptrdecay, %12 : !cir.ptr>), !cir.ptr cir.store %13, %11 : !cir.ptr, !cir.ptr> - cir.throw(%11 : !cir.ptr>) // expected-error {{'type_info' symbol attribute missing}} + cir.throw %11 : !cir.ptr> // expected-error {{'type_info' symbol attribute missing}} } } From e32baf3119185c28d22d638ac01d51b7f16c669f Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 9 Aug 2024 09:15:18 +0800 Subject: [PATCH 1756/2301] [CIR][CodeGen] Set CIR function calling conventions (#780) This PR adds the counterparts of methods `SetFunctionAttributes` `SetLLVMFunctionAttributes` from OG CodeGen, in order to set proper calling conv for `cir.func` ops. `spir-calling-conv.cl` is the dedicated test, while other OpenCL-related tests are also updated. It removes previous workaround of incorrect calling conv and better synchronizes with the original tests. (These changes are not musts). --- clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h | 12 +++++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 32 +++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenModule.h | 4 +++ .../CIR/CodeGen/OpenCL/kernel-arg-metadata.cl | 4 +-- .../CIR/CodeGen/OpenCL/kernel-attributes.cl | 10 +++--- .../CIR/CodeGen/OpenCL/spir-calling-conv.cl | 20 ++++++++++++ clang/test/CIR/CodeGen/OpenCL/spirv-target.cl | 2 +- 7 files changed, 71 insertions(+), 13 deletions(-) create mode 100644 clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl diff --git a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h index 4c9df914ee7c..a07f62fe28d7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h @@ -250,9 +250,17 @@ class CIRGenFunctionInfo final return getExtParameterInfos()[argIndex]; } - /// getCallingConvention - REturn the user specified calling convention, which + /// getCallingConvention - Return the user specified calling convention, which /// has been translated into a CIR CC. - mlir::cir::CallingConv getCallingConvention() const { return CallingConvention; } + mlir::cir::CallingConv getCallingConvention() const { + return CallingConvention; + } + + /// getEffectiveCallingConvention - Return the actual calling convention to + /// use, which may depend on the ABI. + mlir::cir::CallingConv getEffectiveCallingConvention() const { + return EffectiveCallingConvention; + } clang::CanQualType getReturnType() const { return getArgsBuffer()[0].type; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 2d857dcfe3c8..fa3ff4b57162 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2361,9 +2361,35 @@ void CIRGenModule::setExtraAttributesForFunc(FuncOp f, builder.getContext(), attrs.getDictionary(builder.getContext()))); } -void CIRGenModule::setFunctionAttributes(GlobalDecl GD, mlir::cir::FuncOp F, - bool IsIncompleteFunction, - bool IsThunk) { +void CIRGenModule::setCIRFunctionAttributes(GlobalDecl GD, + const CIRGenFunctionInfo &info, + mlir::cir::FuncOp func, + bool isThunk) { + // TODO(cir): More logic of constructAttributeList is needed. + // NOTE(cir): Here we only need CallConv, so a call to constructAttributeList + // is omitted for simplicity. + mlir::cir::CallingConv callingConv = info.getEffectiveCallingConvention(); + + // TODO(cir): Check X86_VectorCall incompatibility with WinARM64EC + + func.setCallingConv(callingConv); +} + +void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl, + mlir::cir::FuncOp func, + bool isIncompleteFunction, + bool isThunk) { + // NOTE(cir): Original CodeGen checks if this is an intrinsic. In CIR we + // represent them in dedicated ops. The correct attributes are ensured during + // translation to LLVM. Thus, we don't need to check for them here. + + if (!isIncompleteFunction) { + setCIRFunctionAttributes(globalDecl, + getTypes().arrangeGlobalDeclaration(globalDecl), + func, isThunk); + } + + // TODO(cir): Complete the remaining part of the function. assert(!MissingFeatures::setFunctionAttributes()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 895f6a54d403..b33e80d478c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -551,6 +551,10 @@ class CIRGenModule : public CIRGenTypeCache { void setFunctionAttributes(GlobalDecl GD, mlir::cir::FuncOp F, bool IsIncompleteFunction, bool IsThunk); + /// Set the CIR function attributes (sext, zext, etc). + void setCIRFunctionAttributes(GlobalDecl GD, const CIRGenFunctionInfo &info, + mlir::cir::FuncOp func, bool isThunk); + void buildGlobalDefinition(clang::GlobalDecl D, mlir::Operation *Op = nullptr); void buildGlobalFunctionDefinition(clang::GlobalDecl D, mlir::Operation *Op); diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl index 7961e0e26244..ccc8ce967e50 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl @@ -6,7 +6,7 @@ __kernel void kernel_function() {} // CIR: #fn_attr[[ATTR:[0-9]*]] = {{.+}}cl.kernel_arg_metadata = #cir.cl.kernel_arg_metadata{{.+}} -// CIR: cir.func @kernel_function() extra(#fn_attr[[ATTR]]) +// CIR: cir.func @kernel_function() cc(spir_kernel) extra(#fn_attr[[ATTR]]) -// LLVM: define {{.*}}void @kernel_function() {{[^{]+}} !kernel_arg_addr_space ![[MD:[0-9]+]] !kernel_arg_access_qual ![[MD]] !kernel_arg_type ![[MD]] !kernel_arg_base_type ![[MD]] !kernel_arg_type_qual ![[MD]] { +// LLVM: define {{.*}}spir_kernel void @kernel_function() {{[^{]+}} !kernel_arg_addr_space ![[MD:[0-9]+]] !kernel_arg_access_qual ![[MD]] !kernel_arg_type ![[MD]] !kernel_arg_base_type ![[MD]] !kernel_arg_type_qual ![[MD]] { // LLVM: ![[MD]] = !{} diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl index 6badc7ce47ba..8a32f1d8088d 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -fclangir -emit-cir -triple x86_64-unknown-linux-gnu %s -o %t.cir +// RUN: %clang_cc1 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR -// RUN: %clang_cc1 -fclangir -emit-llvm -triple x86_64-unknown-linux-gnu %s -o %t.ll +// RUN: %clang_cc1 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM typedef unsigned int uint4 __attribute__((ext_vector_type(4))); @@ -11,7 +11,7 @@ kernel __attribute__((vec_type_hint(int))) __attribute__((reqd_work_group_size( // CIR-DAG: #fn_attr[[KERNEL1:[0-9]*]] = {{.+}}cl.kernel_metadata = #cir.cl.kernel_metadata{{.+}} // CIR-DAG: cir.func @kernel1{{.+}} extra(#fn_attr[[KERNEL1]]) -// LLVM-DAG: define{{.*}}@kernel1(i32 {{[^%]*}}%0) {{[^{]+}} !reqd_work_group_size ![[MD1_REQD_WG:[0-9]+]] !vec_type_hint ![[MD1_VEC_TYPE:[0-9]+]] +// LLVM-DAG: define {{(dso_local )?}}spir_kernel void @kernel1(i32 {{[^%]*}}%0) {{[^{]+}} !reqd_work_group_size ![[MD1_REQD_WG:[0-9]+]] !vec_type_hint ![[MD1_VEC_TYPE:[0-9]+]] // LLVM-DAG: [[MD1_VEC_TYPE]] = !{i32 undef, i32 1} // LLVM-DAG: [[MD1_REQD_WG]] = !{i32 1, i32 2, i32 4} @@ -21,7 +21,7 @@ kernel __attribute__((vec_type_hint(uint4))) __attribute__((work_group_size_hint // CIR-DAG: #fn_attr[[KERNEL2:[0-9]*]] = {{.+}}cl.kernel_metadata = #cir.cl.kernel_metadata, vec_type_hint_signedness = 0>{{.+}} // CIR-DAG: cir.func @kernel2{{.+}} extra(#fn_attr[[KERNEL2]]) -// LLVM-DAG: define{{.*}}@kernel2(i32 {{[^%]*}}%0) {{[^{]+}} !vec_type_hint ![[MD2_VEC_TYPE:[0-9]+]] !work_group_size_hint ![[MD2_WG_SIZE:[0-9]+]] +// LLVM-DAG: define {{(dso_local )?}}spir_kernel void @kernel2(i32 {{[^%]*}}%0) {{[^{]+}} !vec_type_hint ![[MD2_VEC_TYPE:[0-9]+]] !work_group_size_hint ![[MD2_WG_SIZE:[0-9]+]] // LLVM-DAG: [[MD2_VEC_TYPE]] = !{<4 x i32> undef, i32 0} // LLVM-DAG: [[MD2_WG_SIZE]] = !{i32 8, i32 16, i32 32} @@ -31,5 +31,5 @@ kernel __attribute__((intel_reqd_sub_group_size(8))) void kernel3(int a) {} // CIR-DAG: #fn_attr[[KERNEL3:[0-9]*]] = {{.+}}cl.kernel_metadata = #cir.cl.kernel_metadata{{.+}} // CIR-DAG: cir.func @kernel3{{.+}} extra(#fn_attr[[KERNEL3]]) -// LLVM-DAG: define{{.*}}@kernel3(i32 {{[^%]*}}%0) {{[^{]+}} !intel_reqd_sub_group_size ![[MD3_INTEL:[0-9]+]] +// LLVM-DAG: define {{(dso_local )?}}spir_kernel void @kernel3(i32 {{[^%]*}}%0) {{[^{]+}} !intel_reqd_sub_group_size ![[MD3_INTEL:[0-9]+]] // LLVM-DAG: [[MD3_INTEL]] = !{i32 8} diff --git a/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl b/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl new file mode 100644 index 000000000000..96550f721bf5 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -fclangir %s -O0 -triple "spirv64-unknown-unknown" -emit-cir -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR +// RUN: %clang_cc1 -fclangir %s -O0 -triple "spirv64-unknown-unknown" -emit-llvm -o %t.ll +// RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM + +// CIR: cir.func {{.*}}@get_dummy_id{{.*}} cc(spir_function) +// LLVM-DAG: declare{{.*}} spir_func i32 @get_dummy_id( +int get_dummy_id(int D); + +// CIR: cir.func {{.*}}@bar{{.*}} cc(spir_kernel) +// LLVM-DAG: declare{{.*}} spir_kernel void @bar( +kernel void bar(global int *A); + +// CIR: cir.func {{.*}}@foo{{.*}} cc(spir_kernel) +// LLVM-DAG: define{{.*}} spir_kernel void @foo( +kernel void foo(global int *A) { + int id = get_dummy_id(0); + A[id] = id; + bar(A); +} diff --git a/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl index 523ffaf405e9..dadf4e6022b5 100644 --- a/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl +++ b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl @@ -15,7 +15,7 @@ typedef struct { } my_st; // CIR-SPIRV64: cir.func @func( -// LLVM-SPIRV64: @func( +// LLVM-SPIRV64: define spir_kernel void @func( kernel void func(global long *arg) { int res1[sizeof(my_st) == 24 ? 1 : -1]; // expected-no-diagnostics int res2[sizeof(void *) == 8 ? 1 : -1]; // expected-no-diagnostics From 90956da4f48fe80f11b5fcdfd3b92f58293132c5 Mon Sep 17 00:00:00 2001 From: 7mile Date: Sat, 10 Aug 2024 01:55:42 +0800 Subject: [PATCH 1757/2301] [CIR][CodeGen][Dialect][NFC] Refactor OpenCL stuff to separate impl files (#781) This PR refactors OpenCL-specific logic that is rather independent from CIRGen and dialect definition into two new implementation files: `CIRGenOpenCL.cpp` and `CIROpenCLAttrs.cpp`. There will also be a `CIRGenOpenCLRuntime.cpp` in the future as the counterpart of `CGOpenCLRuntime.cpp` from OG CodeGen. But that's basically a dedicated implementation file of a class `CGOpenCLRuntime`, we should not bother it. --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 63 ----- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 185 -------------- clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp | 265 ++++++++++++++++++++ clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 91 ------- clang/lib/CIR/Dialect/IR/CIROpenCLAttrs.cpp | 116 +++++++++ clang/lib/CIR/Dialect/IR/CMakeLists.txt | 1 + 7 files changed, 383 insertions(+), 339 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp create mode 100644 clang/lib/CIR/Dialect/IR/CIROpenCLAttrs.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 916566936283..c2e0f6668d56 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1719,66 +1719,3 @@ CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, return numElements; } - -void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, - mlir::cir::FuncOp Fn) { - if (!FD->hasAttr() && !FD->hasAttr()) - return; - - CGM.genKernelArgMetadata(Fn, FD, this); - - if (!getLangOpts().OpenCL) - return; - - using mlir::cir::OpenCLKernelMetadataAttr; - - mlir::ArrayAttr workGroupSizeHintAttr, reqdWorkGroupSizeAttr; - mlir::TypeAttr vecTypeHintAttr; - std::optional vecTypeHintSignedness; - mlir::IntegerAttr intelReqdSubGroupSizeAttr; - - if (const VecTypeHintAttr *A = FD->getAttr()) { - mlir::Type typeHintValue = getTypes().ConvertType(A->getTypeHint()); - vecTypeHintAttr = mlir::TypeAttr::get(typeHintValue); - vecTypeHintSignedness = - OpenCLKernelMetadataAttr::isSignedHint(typeHintValue); - } - - if (const WorkGroupSizeHintAttr *A = FD->getAttr()) { - workGroupSizeHintAttr = builder.getI32ArrayAttr({ - static_cast(A->getXDim()), - static_cast(A->getYDim()), - static_cast(A->getZDim()), - }); - } - - if (const ReqdWorkGroupSizeAttr *A = FD->getAttr()) { - reqdWorkGroupSizeAttr = builder.getI32ArrayAttr({ - static_cast(A->getXDim()), - static_cast(A->getYDim()), - static_cast(A->getZDim()), - }); - } - - if (const OpenCLIntelReqdSubGroupSizeAttr *A = - FD->getAttr()) { - intelReqdSubGroupSizeAttr = builder.getI32IntegerAttr(A->getSubGroupSize()); - } - - // Skip the metadata attr if no hints are present. - if (!vecTypeHintAttr && !workGroupSizeHintAttr && !reqdWorkGroupSizeAttr && - !intelReqdSubGroupSizeAttr) - return; - - // Append the kernel metadata to the extra attributes dictionary. - mlir::NamedAttrList attrs; - attrs.append(Fn.getExtraAttrs().getElements()); - - auto kernelMetadataAttr = OpenCLKernelMetadataAttr::get( - builder.getContext(), workGroupSizeHintAttr, reqdWorkGroupSizeAttr, - vecTypeHintAttr, vecTypeHintSignedness, intelReqdSubGroupSizeAttr); - attrs.append(kernelMetadataAttr.getMnemonic(), kernelMetadataAttr); - - Fn.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), attrs.getDictionary(builder.getContext()))); -} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index fa3ff4b57162..9b389b53fb08 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -3100,188 +3100,3 @@ mlir::cir::SourceLanguage CIRGenModule::getCIRSourceLanguage() { // TODO(cir): support remaining source languages. llvm_unreachable("CIR does not yet support the given source language"); } - -// Returns the address space id that should be produced to the -// kernel_arg_addr_space metadata. This is always fixed to the ids -// as specified in the SPIR 2.0 specification in order to differentiate -// for example in clGetKernelArgInfo() implementation between the address -// spaces with targets without unique mapping to the OpenCL address spaces -// (basically all single AS CPUs). -static unsigned ArgInfoAddressSpace(LangAS AS) { - switch (AS) { - case LangAS::opencl_global: - return 1; - case LangAS::opencl_constant: - return 2; - case LangAS::opencl_local: - return 3; - case LangAS::opencl_generic: - return 4; // Not in SPIR 2.0 specs. - case LangAS::opencl_global_device: - return 5; - case LangAS::opencl_global_host: - return 6; - default: - return 0; // Assume private. - } -} - -void CIRGenModule::genKernelArgMetadata(mlir::cir::FuncOp Fn, - const FunctionDecl *FD, - CIRGenFunction *CGF) { - assert(((FD && CGF) || (!FD && !CGF)) && - "Incorrect use - FD and CGF should either be both null or not!"); - // Create MDNodes that represent the kernel arg metadata. - // Each MDNode is a list in the form of "key", N number of values which is - // the same number of values as their are kernel arguments. - - const PrintingPolicy &Policy = getASTContext().getPrintingPolicy(); - - // Integer values for the kernel argument address space qualifiers. - SmallVector addressQuals; - - // Attrs for the kernel argument access qualifiers (images only). - SmallVector accessQuals; - - // Attrs for the kernel argument type names. - SmallVector argTypeNames; - - // Attrs for the kernel argument base type names. - SmallVector argBaseTypeNames; - - // Attrs for the kernel argument type qualifiers. - SmallVector argTypeQuals; - - // Attrs for the kernel argument names. - SmallVector argNames; - - // OpenCL image and pipe types require special treatments for some metadata - assert(!MissingFeatures::openCLBuiltinTypes()); - - if (FD && CGF) - for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { - const ParmVarDecl *parm = FD->getParamDecl(i); - // Get argument name. - argNames.push_back(builder.getStringAttr(parm->getName())); - - if (!getLangOpts().OpenCL) - continue; - QualType ty = parm->getType(); - std::string typeQuals; - - // Get image and pipe access qualifier: - if (ty->isImageType() || ty->isPipeType()) { - llvm_unreachable("NYI"); - } else - accessQuals.push_back(builder.getStringAttr("none")); - - auto getTypeSpelling = [&](QualType Ty) { - auto typeName = Ty.getUnqualifiedType().getAsString(Policy); - - if (Ty.isCanonical()) { - StringRef typeNameRef = typeName; - // Turn "unsigned type" to "utype" - if (typeNameRef.consume_front("unsigned ")) - return std::string("u") + typeNameRef.str(); - if (typeNameRef.consume_front("signed ")) - return typeNameRef.str(); - } - - return typeName; - }; - - if (ty->isPointerType()) { - QualType pointeeTy = ty->getPointeeType(); - - // Get address qualifier. - addressQuals.push_back( - ArgInfoAddressSpace(pointeeTy.getAddressSpace())); - - // Get argument type name. - std::string typeName = getTypeSpelling(pointeeTy) + "*"; - std::string baseTypeName = - getTypeSpelling(pointeeTy.getCanonicalType()) + "*"; - argTypeNames.push_back(builder.getStringAttr(typeName)); - argBaseTypeNames.push_back(builder.getStringAttr(baseTypeName)); - - // Get argument type qualifiers: - if (ty.isRestrictQualified()) - typeQuals = "restrict"; - if (pointeeTy.isConstQualified() || - (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) - typeQuals += typeQuals.empty() ? "const" : " const"; - if (pointeeTy.isVolatileQualified()) - typeQuals += typeQuals.empty() ? "volatile" : " volatile"; - } else { - uint32_t AddrSpc = 0; - bool isPipe = ty->isPipeType(); - if (ty->isImageType() || isPipe) - llvm_unreachable("NYI"); - - addressQuals.push_back(AddrSpc); - - // Get argument type name. - ty = isPipe ? ty->castAs()->getElementType() : ty; - std::string typeName = getTypeSpelling(ty); - std::string baseTypeName = getTypeSpelling(ty.getCanonicalType()); - - // Remove access qualifiers on images - // (as they are inseparable from type in clang implementation, - // but OpenCL spec provides a special query to get access qualifier - // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER): - if (ty->isImageType()) { - llvm_unreachable("NYI"); - } - - argTypeNames.push_back(builder.getStringAttr(typeName)); - argBaseTypeNames.push_back(builder.getStringAttr(baseTypeName)); - - if (isPipe) - llvm_unreachable("NYI"); - } - argTypeQuals.push_back(builder.getStringAttr(typeQuals)); - } - - bool shouldEmitArgName = getCodeGenOpts().EmitOpenCLArgMetadata || - getCodeGenOpts().HIPSaveKernelArgName; - - if (getLangOpts().OpenCL) { - // The kernel arg name is emitted only when `-cl-kernel-arg-info` is on, - // since it is only used to support `clGetKernelArgInfo` which requires - // `-cl-kernel-arg-info` to work. The other metadata are mandatory because - // they are necessary for OpenCL runtime to set kernel argument. - mlir::ArrayAttr resArgNames = {}; - if (shouldEmitArgName) - resArgNames = builder.getArrayAttr(argNames); - - // Update the function's extra attributes with the kernel argument metadata. - auto value = mlir::cir::OpenCLKernelArgMetadataAttr::get( - Fn.getContext(), builder.getI32ArrayAttr(addressQuals), - builder.getArrayAttr(accessQuals), builder.getArrayAttr(argTypeNames), - builder.getArrayAttr(argBaseTypeNames), - builder.getArrayAttr(argTypeQuals), resArgNames); - mlir::NamedAttrList items{Fn.getExtraAttrs().getElements().getValue()}; - auto oldValue = items.set(value.getMnemonic(), value); - if (oldValue != value) { - Fn.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), builder.getDictionaryAttr(items))); - } - } else { - if (shouldEmitArgName) - llvm_unreachable("NYI HIPSaveKernelArgName"); - } -} - -void CIRGenModule::buildOpenCLMetadata() { - // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the - // opencl.ocl.version named metadata node. - // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL. - unsigned version = langOpts.getOpenCLCompatibleVersion(); - unsigned major = version / 100; - unsigned minor = (version % 100) / 10; - - auto clVersionAttr = - mlir::cir::OpenCLVersionAttr::get(builder.getContext(), major, minor); - - theModule->setAttr("cir.cl.version", clVersionAttr); -} diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp new file mode 100644 index 000000000000..6c2e7542fbbb --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp @@ -0,0 +1,265 @@ +//===- CIRGenOpenCL.cpp - OpenCL-specific logic for CIR generation --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with OpenCL-specific logic of CIR generation. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" +#include "CIRGenModule.h" + +using namespace cir; +using namespace clang; + +// Returns the address space id that should be produced to the +// kernel_arg_addr_space metadata. This is always fixed to the ids +// as specified in the SPIR 2.0 specification in order to differentiate +// for example in clGetKernelArgInfo() implementation between the address +// spaces with targets without unique mapping to the OpenCL address spaces +// (basically all single AS CPUs). +static unsigned ArgInfoAddressSpace(LangAS AS) { + switch (AS) { + case LangAS::opencl_global: + return 1; + case LangAS::opencl_constant: + return 2; + case LangAS::opencl_local: + return 3; + case LangAS::opencl_generic: + return 4; // Not in SPIR 2.0 specs. + case LangAS::opencl_global_device: + return 5; + case LangAS::opencl_global_host: + return 6; + default: + return 0; // Assume private. + } +} + +void CIRGenModule::genKernelArgMetadata(mlir::cir::FuncOp Fn, + const FunctionDecl *FD, + CIRGenFunction *CGF) { + assert(((FD && CGF) || (!FD && !CGF)) && + "Incorrect use - FD and CGF should either be both null or not!"); + // Create MDNodes that represent the kernel arg metadata. + // Each MDNode is a list in the form of "key", N number of values which is + // the same number of values as their are kernel arguments. + + const PrintingPolicy &Policy = getASTContext().getPrintingPolicy(); + + // Integer values for the kernel argument address space qualifiers. + SmallVector addressQuals; + + // Attrs for the kernel argument access qualifiers (images only). + SmallVector accessQuals; + + // Attrs for the kernel argument type names. + SmallVector argTypeNames; + + // Attrs for the kernel argument base type names. + SmallVector argBaseTypeNames; + + // Attrs for the kernel argument type qualifiers. + SmallVector argTypeQuals; + + // Attrs for the kernel argument names. + SmallVector argNames; + + // OpenCL image and pipe types require special treatments for some metadata + assert(!MissingFeatures::openCLBuiltinTypes()); + + if (FD && CGF) + for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { + const ParmVarDecl *parm = FD->getParamDecl(i); + // Get argument name. + argNames.push_back(builder.getStringAttr(parm->getName())); + + if (!getLangOpts().OpenCL) + continue; + QualType ty = parm->getType(); + std::string typeQuals; + + // Get image and pipe access qualifier: + if (ty->isImageType() || ty->isPipeType()) { + llvm_unreachable("NYI"); + } else + accessQuals.push_back(builder.getStringAttr("none")); + + auto getTypeSpelling = [&](QualType Ty) { + auto typeName = Ty.getUnqualifiedType().getAsString(Policy); + + if (Ty.isCanonical()) { + StringRef typeNameRef = typeName; + // Turn "unsigned type" to "utype" + if (typeNameRef.consume_front("unsigned ")) + return std::string("u") + typeNameRef.str(); + if (typeNameRef.consume_front("signed ")) + return typeNameRef.str(); + } + + return typeName; + }; + + if (ty->isPointerType()) { + QualType pointeeTy = ty->getPointeeType(); + + // Get address qualifier. + addressQuals.push_back( + ArgInfoAddressSpace(pointeeTy.getAddressSpace())); + + // Get argument type name. + std::string typeName = getTypeSpelling(pointeeTy) + "*"; + std::string baseTypeName = + getTypeSpelling(pointeeTy.getCanonicalType()) + "*"; + argTypeNames.push_back(builder.getStringAttr(typeName)); + argBaseTypeNames.push_back(builder.getStringAttr(baseTypeName)); + + // Get argument type qualifiers: + if (ty.isRestrictQualified()) + typeQuals = "restrict"; + if (pointeeTy.isConstQualified() || + (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) + typeQuals += typeQuals.empty() ? "const" : " const"; + if (pointeeTy.isVolatileQualified()) + typeQuals += typeQuals.empty() ? "volatile" : " volatile"; + } else { + uint32_t AddrSpc = 0; + bool isPipe = ty->isPipeType(); + if (ty->isImageType() || isPipe) + llvm_unreachable("NYI"); + + addressQuals.push_back(AddrSpc); + + // Get argument type name. + ty = isPipe ? ty->castAs()->getElementType() : ty; + std::string typeName = getTypeSpelling(ty); + std::string baseTypeName = getTypeSpelling(ty.getCanonicalType()); + + // Remove access qualifiers on images + // (as they are inseparable from type in clang implementation, + // but OpenCL spec provides a special query to get access qualifier + // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER): + if (ty->isImageType()) { + llvm_unreachable("NYI"); + } + + argTypeNames.push_back(builder.getStringAttr(typeName)); + argBaseTypeNames.push_back(builder.getStringAttr(baseTypeName)); + + if (isPipe) + llvm_unreachable("NYI"); + } + argTypeQuals.push_back(builder.getStringAttr(typeQuals)); + } + + bool shouldEmitArgName = getCodeGenOpts().EmitOpenCLArgMetadata || + getCodeGenOpts().HIPSaveKernelArgName; + + if (getLangOpts().OpenCL) { + // The kernel arg name is emitted only when `-cl-kernel-arg-info` is on, + // since it is only used to support `clGetKernelArgInfo` which requires + // `-cl-kernel-arg-info` to work. The other metadata are mandatory because + // they are necessary for OpenCL runtime to set kernel argument. + mlir::ArrayAttr resArgNames = {}; + if (shouldEmitArgName) + resArgNames = builder.getArrayAttr(argNames); + + // Update the function's extra attributes with the kernel argument metadata. + auto value = mlir::cir::OpenCLKernelArgMetadataAttr::get( + Fn.getContext(), builder.getI32ArrayAttr(addressQuals), + builder.getArrayAttr(accessQuals), builder.getArrayAttr(argTypeNames), + builder.getArrayAttr(argBaseTypeNames), + builder.getArrayAttr(argTypeQuals), resArgNames); + mlir::NamedAttrList items{Fn.getExtraAttrs().getElements().getValue()}; + auto oldValue = items.set(value.getMnemonic(), value); + if (oldValue != value) { + Fn.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), builder.getDictionaryAttr(items))); + } + } else { + if (shouldEmitArgName) + llvm_unreachable("NYI HIPSaveKernelArgName"); + } +} + +void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, + mlir::cir::FuncOp Fn) { + if (!FD->hasAttr() && !FD->hasAttr()) + return; + + CGM.genKernelArgMetadata(Fn, FD, this); + + if (!getLangOpts().OpenCL) + return; + + using mlir::cir::OpenCLKernelMetadataAttr; + + mlir::ArrayAttr workGroupSizeHintAttr, reqdWorkGroupSizeAttr; + mlir::TypeAttr vecTypeHintAttr; + std::optional vecTypeHintSignedness; + mlir::IntegerAttr intelReqdSubGroupSizeAttr; + + if (const VecTypeHintAttr *A = FD->getAttr()) { + mlir::Type typeHintValue = getTypes().ConvertType(A->getTypeHint()); + vecTypeHintAttr = mlir::TypeAttr::get(typeHintValue); + vecTypeHintSignedness = + OpenCLKernelMetadataAttr::isSignedHint(typeHintValue); + } + + if (const WorkGroupSizeHintAttr *A = FD->getAttr()) { + workGroupSizeHintAttr = builder.getI32ArrayAttr({ + static_cast(A->getXDim()), + static_cast(A->getYDim()), + static_cast(A->getZDim()), + }); + } + + if (const ReqdWorkGroupSizeAttr *A = FD->getAttr()) { + reqdWorkGroupSizeAttr = builder.getI32ArrayAttr({ + static_cast(A->getXDim()), + static_cast(A->getYDim()), + static_cast(A->getZDim()), + }); + } + + if (const OpenCLIntelReqdSubGroupSizeAttr *A = + FD->getAttr()) { + intelReqdSubGroupSizeAttr = builder.getI32IntegerAttr(A->getSubGroupSize()); + } + + // Skip the metadata attr if no hints are present. + if (!vecTypeHintAttr && !workGroupSizeHintAttr && !reqdWorkGroupSizeAttr && + !intelReqdSubGroupSizeAttr) + return; + + // Append the kernel metadata to the extra attributes dictionary. + mlir::NamedAttrList attrs; + attrs.append(Fn.getExtraAttrs().getElements()); + + auto kernelMetadataAttr = OpenCLKernelMetadataAttr::get( + builder.getContext(), workGroupSizeHintAttr, reqdWorkGroupSizeAttr, + vecTypeHintAttr, vecTypeHintSignedness, intelReqdSubGroupSizeAttr); + attrs.append(kernelMetadataAttr.getMnemonic(), kernelMetadataAttr); + + Fn.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), attrs.getDictionary(builder.getContext()))); +} + +void CIRGenModule::buildOpenCLMetadata() { + // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the + // opencl.ocl.version named metadata node. + // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL. + unsigned version = langOpts.getOpenCLCompatibleVersion(); + unsigned major = version / 100; + unsigned minor = (version % 100) / 10; + + auto clVersionAttr = + mlir::cir::OpenCLVersionAttr::get(builder.getContext(), major, minor); + + theModule->setAttr("cir.cl.version", clVersionAttr); +} diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index e1d96bdbf65e..b4350caa79fa 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -30,6 +30,7 @@ add_clang_library(clangCIR CIRGenFunction.cpp CIRGenItaniumCXXABI.cpp CIRGenModule.cpp + CIRGenOpenCL.cpp CIRGenOpenMPRuntime.cpp CIRGenStmt.cpp CIRGenStmtOpenMP.cpp diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 7d5d401f8d0f..e4675345a00f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -522,97 +522,6 @@ LogicalResult DynamicCastInfoAttr::verify( return success(); } -//===----------------------------------------------------------------------===// -// OpenCLKernelMetadataAttr definitions -//===----------------------------------------------------------------------===// - -LogicalResult OpenCLKernelMetadataAttr::verify( - ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ArrayAttr workGroupSizeHint, ArrayAttr reqdWorkGroupSize, - TypeAttr vecTypeHint, std::optional vecTypeHintSignedness, - IntegerAttr intelReqdSubGroupSize) { - // If no field is present, the attribute is considered invalid. - if (!workGroupSizeHint && !reqdWorkGroupSize && !vecTypeHint && - !vecTypeHintSignedness && !intelReqdSubGroupSize) { - return emitError() - << "metadata attribute without any field present is invalid"; - } - - // Check for 3-dim integer tuples - auto is3dimIntTuple = [](ArrayAttr arr) { - auto isInt = [](Attribute dim) { return mlir::isa(dim); }; - return arr.size() == 3 && llvm::all_of(arr, isInt); - }; - if (workGroupSizeHint && !is3dimIntTuple(workGroupSizeHint)) { - return emitError() - << "work_group_size_hint must have exactly 3 integer elements"; - } - if (reqdWorkGroupSize && !is3dimIntTuple(reqdWorkGroupSize)) { - return emitError() - << "reqd_work_group_size must have exactly 3 integer elements"; - } - - // Check for co-presence of vecTypeHintSignedness - if (!!vecTypeHint != vecTypeHintSignedness.has_value()) { - return emitError() << "vec_type_hint_signedness should be present if and " - "only if vec_type_hint is set"; - } - - if (vecTypeHint) { - Type vecTypeHintValue = vecTypeHint.getValue(); - if (mlir::isa(vecTypeHintValue.getDialect())) { - // Check for signedness alignment in CIR - if (isSignedHint(vecTypeHintValue) != vecTypeHintSignedness) { - return emitError() << "vec_type_hint_signedness must match the " - "signedness of the vec_type_hint type"; - } - // Check for the dialect of type hint - } else if (!LLVM::isCompatibleType(vecTypeHintValue)) { - return emitError() << "vec_type_hint must be a type from the CIR or LLVM " - "dialect"; - } - } - - return success(); -} - -//===----------------------------------------------------------------------===// -// OpenCLKernelArgMetadataAttr definitions -//===----------------------------------------------------------------------===// - -LogicalResult OpenCLKernelArgMetadataAttr::verify( - ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ArrayAttr addrSpaces, ArrayAttr accessQuals, ArrayAttr types, - ArrayAttr baseTypes, ArrayAttr typeQuals, ArrayAttr argNames) { - auto isIntArray = [](ArrayAttr elt) { - return llvm::all_of( - elt, [](Attribute elt) { return mlir::isa(elt); }); - }; - auto isStrArray = [](ArrayAttr elt) { - return llvm::all_of( - elt, [](Attribute elt) { return mlir::isa(elt); }); - }; - - if (!isIntArray(addrSpaces)) - return emitError() << "addr_space must be integer arrays"; - if (!llvm::all_of>( - {accessQuals, types, baseTypes, typeQuals}, isStrArray)) - return emitError() - << "access_qual, type, base_type, type_qual must be string arrays"; - if (argNames && !isStrArray(argNames)) { - return emitError() << "name must be a string array"; - } - - if (!llvm::all_of>( - {addrSpaces, accessQuals, types, baseTypes, typeQuals, argNames}, - [&](ArrayAttr arr) { - return !arr || arr.size() == addrSpaces.size(); - })) { - return emitError() << "all arrays must have the same number of elements"; - } - return success(); -} - //===----------------------------------------------------------------------===// // AddressSpaceAttr definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIROpenCLAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIROpenCLAttrs.cpp new file mode 100644 index 000000000000..e16aad6d6867 --- /dev/null +++ b/clang/lib/CIR/Dialect/IR/CIROpenCLAttrs.cpp @@ -0,0 +1,116 @@ +//===- CIROpenCLAttrs.cpp - OpenCL specific attributes in CIR -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the OpenCL-specific attrs in the CIR dialect. +// +//===----------------------------------------------------------------------===// + +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/IR/DialectImplementation.h" + +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/TypeSwitch.h" + +using namespace mlir; +using namespace mlir::cir; + +//===----------------------------------------------------------------------===// +// OpenCLKernelMetadataAttr definitions +//===----------------------------------------------------------------------===// + +LogicalResult OpenCLKernelMetadataAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ArrayAttr workGroupSizeHint, ArrayAttr reqdWorkGroupSize, + TypeAttr vecTypeHint, std::optional vecTypeHintSignedness, + IntegerAttr intelReqdSubGroupSize) { + // If no field is present, the attribute is considered invalid. + if (!workGroupSizeHint && !reqdWorkGroupSize && !vecTypeHint && + !vecTypeHintSignedness && !intelReqdSubGroupSize) { + return emitError() + << "metadata attribute without any field present is invalid"; + } + + // Check for 3-dim integer tuples + auto is3dimIntTuple = [](ArrayAttr arr) { + auto isInt = [](Attribute dim) { return mlir::isa(dim); }; + return arr.size() == 3 && llvm::all_of(arr, isInt); + }; + if (workGroupSizeHint && !is3dimIntTuple(workGroupSizeHint)) { + return emitError() + << "work_group_size_hint must have exactly 3 integer elements"; + } + if (reqdWorkGroupSize && !is3dimIntTuple(reqdWorkGroupSize)) { + return emitError() + << "reqd_work_group_size must have exactly 3 integer elements"; + } + + // Check for co-presence of vecTypeHintSignedness + if (!!vecTypeHint != vecTypeHintSignedness.has_value()) { + return emitError() << "vec_type_hint_signedness should be present if and " + "only if vec_type_hint is set"; + } + + if (vecTypeHint) { + Type vecTypeHintValue = vecTypeHint.getValue(); + if (mlir::isa(vecTypeHintValue.getDialect())) { + // Check for signedness alignment in CIR + if (isSignedHint(vecTypeHintValue) != vecTypeHintSignedness) { + return emitError() << "vec_type_hint_signedness must match the " + "signedness of the vec_type_hint type"; + } + // Check for the dialect of type hint + } else if (!LLVM::isCompatibleType(vecTypeHintValue)) { + return emitError() << "vec_type_hint must be a type from the CIR or LLVM " + "dialect"; + } + } + + return success(); +} + +//===----------------------------------------------------------------------===// +// OpenCLKernelArgMetadataAttr definitions +//===----------------------------------------------------------------------===// + +LogicalResult OpenCLKernelArgMetadataAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ArrayAttr addrSpaces, ArrayAttr accessQuals, ArrayAttr types, + ArrayAttr baseTypes, ArrayAttr typeQuals, ArrayAttr argNames) { + auto isIntArray = [](ArrayAttr elt) { + return llvm::all_of( + elt, [](Attribute elt) { return mlir::isa(elt); }); + }; + auto isStrArray = [](ArrayAttr elt) { + return llvm::all_of( + elt, [](Attribute elt) { return mlir::isa(elt); }); + }; + + if (!isIntArray(addrSpaces)) + return emitError() << "addr_space must be integer arrays"; + if (!llvm::all_of>( + {accessQuals, types, baseTypes, typeQuals}, isStrArray)) + return emitError() + << "access_qual, type, base_type, type_qual must be string arrays"; + if (argNames && !isStrArray(argNames)) { + return emitError() << "name must be a string array"; + } + + if (!llvm::all_of>( + {addrSpaces, accessQuals, types, baseTypes, typeQuals, argNames}, + [&](ArrayAttr arr) { + return !arr || arr.size() == addrSpaces.size(); + })) { + return emitError() << "all arrays must have the same number of elements"; + } + return success(); +} diff --git a/clang/lib/CIR/Dialect/IR/CMakeLists.txt b/clang/lib/CIR/Dialect/IR/CMakeLists.txt index 03f588f7c7a5..34e2f642cefe 100644 --- a/clang/lib/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/IR/CMakeLists.txt @@ -1,5 +1,6 @@ add_clang_library(MLIRCIR CIRAttrs.cpp + CIROpenCLAttrs.cpp CIRDataLayout.cpp CIRDialect.cpp CIRMemorySlot.cpp From dda425f44d5e670a28e0e071a6b501b2297b694a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 9 Aug 2024 11:37:20 -0700 Subject: [PATCH 1758/2301] [CIR][ClangTidy][Lifetime] Update to newer dyn_cast usage and update pass pipeline Unbreaks `ninja check-clang-extra-clang-tidy` --- clang-tools-extra/clang-tidy/cir/Lifetime.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/clang-tools-extra/clang-tidy/cir/Lifetime.cpp b/clang-tools-extra/clang-tidy/cir/Lifetime.cpp index e74b34825318..c349febed734 100644 --- a/clang-tools-extra/clang-tidy/cir/Lifetime.cpp +++ b/clang-tools-extra/clang-tidy/cir/Lifetime.cpp @@ -105,25 +105,25 @@ void Lifetime::setupAndRunClangIRLifetimeChecker(ASTContext &astCtx) { clang::SourceLocation getClangSrcLoc(mlir::Location loc) { // Direct maps into a clang::SourceLocation. - if (auto fileLoc = loc.dyn_cast()) { + if (auto fileLoc = dyn_cast(loc)) { return getClangFromFileLineCol(fileLoc); } // FusedLoc needs to be decomposed but the canonical one // is the first location, we handle source ranges somewhere // else. - if (auto fileLoc = loc.dyn_cast()) { + if (auto fileLoc = dyn_cast(loc)) { auto locArray = fileLoc.getLocations(); assert(locArray.size() > 0 && "expected multiple locs"); return getClangFromFileLineCol( - locArray[0].dyn_cast()); + dyn_cast(locArray[0])); } // Many loc styles are yet to be handled. - if (auto fileLoc = loc.dyn_cast()) { + if (auto fileLoc = dyn_cast(loc)) { llvm_unreachable("mlir::UnknownLoc not implemented!"); } - if (auto fileLoc = loc.dyn_cast()) { + if (auto fileLoc = dyn_cast(loc)) { llvm_unreachable("mlir::CallSiteLoc not implemented!"); } llvm_unreachable("Unknown location style"); @@ -178,7 +178,7 @@ void Lifetime::setupAndRunClangIRLifetimeChecker(ASTContext &astCtx) { mlir::PassManager pm(mlirCtx.get()); // Add pre-requisite passes to the pipeline - pm.addPass(mlir::createMergeCleanupsPass()); + pm.addPass(mlir::createCIRSimplifyPass()); // Insert the lifetime checker. pm.addPass(mlir::createLifetimeCheckPass( From 1987f6efac67b005e975ddec2eec0fb920306fd7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 9 Aug 2024 12:23:24 -0700 Subject: [PATCH 1759/2301] [CIR][CIRGen][NFC] Add more missing feature tracking and cleanup warnings --- clang/include/clang/CIR/MissingFeatures.h | 3 + clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 143 +++++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 + clang/lib/CIR/CodeGen/TargetInfo.cpp | 1 - 4 files changed, 134 insertions(+), 18 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 436bc506df7c..ae6b70a5d7b9 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -65,6 +65,7 @@ struct MissingFeatures { static bool sanitizeVLABound() { return false; } static bool sanitizerBuiltin() { return false; } static bool sanitizerReturn() { return false; } + static bool sanitizeOther() { return false; } // ObjC static bool setObjCGCLValueClass() { return false; } @@ -128,6 +129,7 @@ struct MissingFeatures { static bool volatileTypes() { return false; } static bool syncScopeID() { return false; } + // Misc static bool capturedByInit() { return false; } static bool tryEmitAsConstant() { return false; } static bool incrementProfileCounter() { return false; } @@ -175,6 +177,7 @@ struct MissingFeatures { static bool escapedLocals() { return false; } static bool deferredReplacements() { return false; } static bool shouldInstrumentFunction() { return false; } + static bool xray() { return false; } // Inline assembly static bool asmGoto() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index c2e0f6668d56..7ef13b7a524d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -896,6 +896,38 @@ ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) { } } +bool CIRGenFunction::ShouldSkipSanitizerInstrumentation() { + if (!CurFuncDecl) + return false; + return CurFuncDecl->hasAttr(); +} + +/// Return true if the current function should be instrumented with XRay nop +/// sleds. +bool CIRGenFunction::ShouldXRayInstrumentFunction() const { + return CGM.getCodeGenOpts().XRayInstrumentFunctions; +} + +static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { + auto *MD = dyn_cast_or_null(D); + if (!MD || !MD->getDeclName().getAsIdentifierInfo() || + !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") || + (MD->getNumParams() != 1 && MD->getNumParams() != 2)) + return false; + + if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) + return false; + + if (MD->getNumParams() == 2) { + auto *PT = MD->parameters()[1]->getType()->getAs(); + if (!PT || !PT->isVoidPointerType() || + !PT->getPointeeType().isConstQualified()) + return false; + } + + return true; +} + void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, mlir::cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, @@ -932,32 +964,109 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, } while (0); if (D) { + const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds); + SanitizerMask no_sanitize_mask; bool NoSanitizeCoverage = false; - (void)NoSanitizeCoverage; - for (auto Attr : D->specific_attrs()) { - (void)Attr; - llvm_unreachable("NYI"); + for (auto *Attr : D->specific_attrs()) { + no_sanitize_mask |= Attr->getMask(); + // SanitizeCoverage is not handled by SanOpts. + if (Attr->hasCoverage()) + NoSanitizeCoverage = true; } - // SanitizeCoverage is not handled by SanOpts + // Apply the no_sanitize* attributes to SanOpts. + SanOpts.Mask &= ~no_sanitize_mask; + if (no_sanitize_mask & SanitizerKind::Address) + SanOpts.set(SanitizerKind::KernelAddress, false); + if (no_sanitize_mask & SanitizerKind::KernelAddress) + SanOpts.set(SanitizerKind::Address, false); + if (no_sanitize_mask & SanitizerKind::HWAddress) + SanOpts.set(SanitizerKind::KernelHWAddress, false); + if (no_sanitize_mask & SanitizerKind::KernelHWAddress) + SanOpts.set(SanitizerKind::HWAddress, false); + + // TODO(cir): set llvm::Attribute::NoSanitizeBounds + if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds)) + assert(!MissingFeatures::sanitizeOther()); + + // TODO(cir): set llvm::Attribute::NoSanitizeCoverage if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage()) + assert(!MissingFeatures::sanitizeOther()); + + // Some passes need the non-negated no_sanitize attribute. Pass them on. + if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) { + // TODO(cir): set no_sanitize_thread + if (no_sanitize_mask & SanitizerKind::Thread) + assert(!MissingFeatures::sanitizeOther()); + } + } + + if (ShouldSkipSanitizerInstrumentation()) { + assert(!MissingFeatures::sanitizeOther()); + } else { + // Apply sanitizer attributes to the function. + if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) + assert(!MissingFeatures::sanitizeOther()); + if (SanOpts.hasOneOf(SanitizerKind::HWAddress | + SanitizerKind::KernelHWAddress)) + assert(!MissingFeatures::sanitizeOther()); + if (SanOpts.has(SanitizerKind::MemtagStack)) + assert(!MissingFeatures::sanitizeOther()); + if (SanOpts.has(SanitizerKind::Thread)) + assert(!MissingFeatures::sanitizeOther()); + if (SanOpts.has(SanitizerKind::NumericalStability)) + assert(!MissingFeatures::sanitizeOther()); + if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory)) + assert(!MissingFeatures::sanitizeOther()); + } + if (SanOpts.has(SanitizerKind::SafeStack)) + assert(!MissingFeatures::sanitizeOther()); + if (SanOpts.has(SanitizerKind::ShadowCallStack)) + assert(!MissingFeatures::sanitizeOther()); + + // Apply fuzzing attribute to the function. + if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink)) + assert(!MissingFeatures::sanitizeOther()); + + // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize, + // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time. + if (SanOpts.has(SanitizerKind::Thread)) { + if (const auto *OMD = dyn_cast_or_null(D)) { llvm_unreachable("NYI"); + } } - // Apply sanitizer attributes to the function. - if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress | - SanitizerKind::HWAddress | - SanitizerKind::KernelHWAddress | SanitizerKind::MemTag | - SanitizerKind::Thread | SanitizerKind::Memory | - SanitizerKind::KernelMemory | SanitizerKind::SafeStack | - SanitizerKind::ShadowCallStack | SanitizerKind::Fuzzer | - SanitizerKind::FuzzerNoLink | - SanitizerKind::CFIUnrelatedCast | SanitizerKind::Null)) - llvm_unreachable("NYI"); + // Ignore unrelated casts in STL allocate() since the allocator must cast + // from void* to T* before object initialization completes. Don't match on the + // namespace because not all allocators are in std:: + if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { + if (matchesStlAllocatorFn(D, getContext())) + SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast; + } + + // Ignore null checks in coroutine functions since the coroutines passes + // are not aware of how to move the extra UBSan instructions across the split + // coroutine boundaries. + if (D && SanOpts.has(SanitizerKind::Null)) + if (FD && FD->getBody() && + FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass) + SanOpts.Mask &= ~SanitizerKind::Null; + + // Apply xray attributes to the function (as a string, for now) + if (const auto *XRayAttr = D ? D->getAttr() : nullptr) { + assert(!MissingFeatures::xray()); + } else { + assert(!MissingFeatures::xray()); + } - // TODO: XRay - // TODO: PGO + if (ShouldXRayInstrumentFunction()) { + assert(!MissingFeatures::xray()); + } + + if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) { + assert(!MissingFeatures::getProfileCount()); + } unsigned Count, Offset; if (const auto *Attr = diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 7458571ca08a..25a74293a19c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1601,6 +1601,11 @@ class CIRGenFunction : public CIRGenTypeCache { void buildDelegateCallArg(CallArgList &args, const clang::VarDecl *param, clang::SourceLocation loc); + /// Return true if the current function should not be instrumented with + /// sanitizers. + bool ShouldSkipSanitizerInstrumentation(); + bool ShouldXRayInstrumentFunction() const; + /// Return true if the current function should be instrumented with /// __cyg_profile_func_* calls bool ShouldInstrumentFunction(); diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 02850dad8bf8..29b21b00eb0e 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -72,7 +72,6 @@ class DefaultABIInfo : public ABIInfo { if (const EnumType *EnumTy = Ty->getAs()) llvm_unreachable("NYI"); - ASTContext &Context = getContext(); if (const auto *EIT = Ty->getAs()) llvm_unreachable("NYI"); From fe929dd4594354967027ec42cc56d727d7082035 Mon Sep 17 00:00:00 2001 From: 7mile Date: Sat, 10 Aug 2024 03:24:21 +0800 Subject: [PATCH 1760/2301] [CIR][FrontendAction] Use ClangIR pipeline to emit LLVM bitcode (#782) This PR enables ClangIR pipeline for LLVM bitcode output when it's specified properly, aligned with the behavior of text-form LLVM IR. Some refactors about switch cases are also included to avoid too many dups. --- .../clang/CIRFrontendAction/CIRGenAction.h | 8 +++ clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 55 ++++++++++--------- .../ExecuteCompilerInvocation.cpp | 8 ++- clang/test/CIR/cc1.c | 3 + clang/test/CIR/driver.c | 6 ++ 5 files changed, 52 insertions(+), 28 deletions(-) diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIRFrontendAction/CIRGenAction.h index 74d5e5e32611..b4e183d7978a 100644 --- a/clang/include/clang/CIRFrontendAction/CIRGenAction.h +++ b/clang/include/clang/CIRFrontendAction/CIRGenAction.h @@ -34,6 +34,7 @@ class CIRGenAction : public clang::ASTFrontendAction { EmitCIR, EmitCIRFlat, EmitLLVM, + EmitBC, EmitMLIR, EmitObj, None @@ -106,6 +107,13 @@ class EmitLLVMAction : public CIRGenAction { EmitLLVMAction(mlir::MLIRContext *mlirCtx = nullptr); }; +class EmitBCAction : public CIRGenAction { + virtual void anchor(); + +public: + EmitBCAction(mlir::MLIRContext *mlirCtx = nullptr); +}; + class EmitAssemblyAction : public CIRGenAction { virtual void anchor(); diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index f19bcc208431..578ccbf8ca2e 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -74,6 +74,22 @@ static std::string sanitizePassOptions(llvm::StringRef o) { namespace cir { +static BackendAction +getBackendActionFromOutputType(CIRGenAction::OutputType action) { + switch (action) { + case CIRGenAction::OutputType::EmitAssembly: + return BackendAction::Backend_EmitAssembly; + case CIRGenAction::OutputType::EmitBC: + return BackendAction::Backend_EmitBC; + case CIRGenAction::OutputType::EmitLLVM: + return BackendAction::Backend_EmitLL; + case CIRGenAction::OutputType::EmitObj: + return BackendAction::Backend_EmitObj; + default: + llvm_unreachable("Unsupported action"); + } +} + static std::unique_ptr lowerFromCIRToLLVMIR(const clang::FrontendOptions &feOptions, mlir::ModuleOp mlirMod, @@ -261,7 +277,10 @@ class CIRGenConsumer : public clang::ASTConsumer { loweredMlirModule->print(*outputStream, flags); break; } - case CIRGenAction::OutputType::EmitLLVM: { + case CIRGenAction::OutputType::EmitLLVM: + case CIRGenAction::OutputType::EmitBC: + case CIRGenAction::OutputType::EmitObj: + case CIRGenAction::OutputType::EmitAssembly: { llvm::LLVMContext llvmCtx; auto llvmModule = lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx, @@ -269,38 +288,14 @@ class CIRGenConsumer : public clang::ASTConsumer { llvmModule->setTargetTriple(targetOptions.Triple); - emitBackendOutput(compilerInstance, codeGenOptions, - C.getTargetInfo().getDataLayoutString(), - llvmModule.get(), BackendAction::Backend_EmitLL, FS, - std::move(outputStream)); - break; - } - case CIRGenAction::OutputType::EmitObj: { - llvm::LLVMContext llvmCtx; - auto llvmModule = - lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx, - feOptions.ClangIRDisableCIRVerifier); + BackendAction backendAction = getBackendActionFromOutputType(action); - llvmModule->setTargetTriple(targetOptions.Triple); emitBackendOutput(compilerInstance, codeGenOptions, C.getTargetInfo().getDataLayoutString(), - llvmModule.get(), BackendAction::Backend_EmitObj, FS, + llvmModule.get(), backendAction, FS, std::move(outputStream)); break; } - case CIRGenAction::OutputType::EmitAssembly: { - llvm::LLVMContext llvmCtx; - auto llvmModule = - lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx, - feOptions.ClangIRDisableCIRVerifier); - - llvmModule->setTargetTriple(targetOptions.Triple); - emitBackendOutput(compilerInstance, codeGenOptions, - C.getTargetInfo().getDataLayoutString(), - llvmModule.get(), BackendAction::Backend_EmitAssembly, - FS, std::move(outputStream)); - break; - } case CIRGenAction::OutputType::None: break; } @@ -364,6 +359,8 @@ getOutputStream(CompilerInstance &ci, StringRef inFile, return ci.createDefaultOutputFile(false, inFile, "mlir"); case CIRGenAction::OutputType::EmitLLVM: return ci.createDefaultOutputFile(false, inFile, "llvm"); + case CIRGenAction::OutputType::EmitBC: + return ci.createDefaultOutputFile(true, inFile, "bc"); case CIRGenAction::OutputType::EmitObj: return ci.createDefaultOutputFile(true, inFile, "o"); case CIRGenAction::OutputType::None: @@ -469,6 +466,10 @@ void EmitLLVMAction::anchor() {} EmitLLVMAction::EmitLLVMAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::EmitLLVM, _MLIRContext) {} +void EmitBCAction::anchor() {} +EmitBCAction::EmitBCAction(mlir::MLIRContext *_MLIRContext) + : CIRGenAction(OutputType::EmitBC, _MLIRContext) {} + void EmitObjAction::anchor() {} EmitObjAction::EmitObjAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::EmitObj, _MLIRContext) {} diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 1e1f6b34012f..134978b7bad7 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -78,7 +78,13 @@ CreateFrontendBaseAction(CompilerInstance &CI) { return std::make_unique<::cir::EmitAssemblyAction>(); #endif return std::make_unique(); - case EmitBC: return std::make_unique(); + case EmitBC: { +#if CLANG_ENABLE_CIR + if (UseCIR) + return std::make_unique<::cir::EmitBCAction>(); +#endif + return std::make_unique(); + } #if CLANG_ENABLE_CIR case EmitCIR: return std::make_unique<::cir::EmitCIRAction>(); case EmitCIRFlat: diff --git a/clang/test/CIR/cc1.c b/clang/test/CIR/cc1.c index af53e22fd20a..176ea42d44de 100644 --- a/clang/test/CIR/cc1.c +++ b/clang/test/CIR/cc1.c @@ -2,6 +2,9 @@ // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm-bc %s -o %t.bc +// RUN: llvm-dis %t.bc -o %t.bc.ll +// RUN: FileCheck --input-file=%t.bc.ll %s -check-prefix=LLVM // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -S %s -o %t.s // RUN: FileCheck --input-file=%t.s %s -check-prefix=ASM // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-obj %s -o %t.o diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index 401c40d41064..bd1d13d0dba9 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -6,6 +6,12 @@ // RUN: FileCheck --input-file=%t1.ll %s -check-prefix=LLVM // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -S -emit-llvm %s -o %t2.ll // RUN: FileCheck --input-file=%t2.ll %s -check-prefix=LLVM +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fclangir-direct-lowering -c -emit-llvm %s -o %t1.bc +// RUN: llvm-dis %t1.bc -o %t1.bc.ll +// RUN: FileCheck --input-file=%t1.bc.ll %s -check-prefix=LLVM +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -c -emit-llvm %s -o %t2.bc +// RUN: llvm-dis %t2.bc -o %t2.bc.ll +// RUN: FileCheck --input-file=%t2.bc.ll %s -check-prefix=LLVM // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -c %s -o %t.o // RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-passes -S -Xclang -emit-cir %s -o %t.cir From dd22ef6a5668cec773ac82aaeb3415ef5a886c88 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 9 Aug 2024 14:08:38 -0700 Subject: [PATCH 1761/2301] [CIR][NFC] Add missing feature for nodebug --- clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index ae6b70a5d7b9..6560ab4666a3 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -73,6 +73,7 @@ struct MissingFeatures { // Debug info static bool generateDebugInfo() { return false; } + static bool noDebugInfo() { return false; } // LLVM Attributes static bool setFunctionAttributes() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 7ef13b7a524d..23740b1be5dc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -586,7 +586,7 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, // Check if we should generate debug info for this function. if (FD->hasAttr()) { - llvm_unreachable("NYI"); + assert(!MissingFeatures::noDebugInfo()); } // The function might not have a body if we're generating thunks for a From bd0aa857714a89ec2e4404dfe6294f59194a3c32 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 9 Aug 2024 15:50:45 -0700 Subject: [PATCH 1762/2301] [CIR][CIRGen] Emit constant global _Atomic's --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 17 ++++++++++++++++- clang/test/CIR/CodeGen/c11atomic.c | 13 +++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/c11atomic.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 721767b02118..636b1af1b579 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1675,7 +1675,22 @@ mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, QualType destType) { // For an _Atomic-qualified constant, we may need to add tail padding. if (auto AT = destType->getAs()) { - assert(0 && "not implemented"); + QualType destValueType = AT->getValueType(); + C = emitForMemory(CGM, C, destValueType); + + uint64_t innerSize = CGM.getASTContext().getTypeSize(destValueType); + uint64_t outerSize = CGM.getASTContext().getTypeSize(destType); + if (innerSize == outerSize) + return C; + + assert(innerSize < outerSize && "emitted over-large constant for atomic"); + auto &builder = CGM.getBuilder(); + auto zeroArray = builder.getZeroInitAttr( + mlir::cir::ArrayType::get(builder.getContext(), builder.getUInt8Ty(), + (outerSize - innerSize) / 8)); + SmallVector anonElts = {C, zeroArray}; + auto arrAttr = mlir::ArrayAttr::get(builder.getContext(), anonElts); + return builder.getAnonConstStruct(arrAttr, false); } // Zero-extend bool. diff --git a/clang/test/CIR/CodeGen/c11atomic.c b/clang/test/CIR/CodeGen/c11atomic.c new file mode 100644 index 000000000000..114182a79fa9 --- /dev/null +++ b/clang/test/CIR/CodeGen/c11atomic.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 %s -triple aarch64-none-linux-android21 -fclangir -emit-cir -std=c11 -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 %s -triple aarch64-none-linux-android21 -fclangir -emit-llvm -std=c11 -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// CIR-DAG: ![[PS:.*]] = !cir.struct}> +// CIR-DAG: cir.global external @testPromotedStructGlobal = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s16i, #cir.int<2> : !s16i, #cir.int<3> : !s16i}> : ![[PS]], #cir.zero : !cir.array}> : ![[ANON]] + +// LLVM-DAG: %[[PS:.*]] = type { i16, i16, i16 } +// LLVM-DAG: @testPromotedStructGlobal = global { %[[PS]], [2 x i8] } { %[[PS]] { i16 1, i16 2, i16 3 }, [2 x i8] zeroinitializer } +typedef struct { short x, y, z; } PS; +_Atomic PS testPromotedStructGlobal = (PS){1, 2, 3}; From 8ff98e7aa4753df33c22661ed49c649ad41247e3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 9 Aug 2024 17:36:01 -0700 Subject: [PATCH 1763/2301] [CIR][CIRGen] Builtins: focus on non fast math path The LLVM lowering actually maps to the version without fast math, to add support for fast math we need to set the proper LLVM attribute on each of those operations. --- clang/include/clang/CIR/MissingFeatures.h | 3 ++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 30 +++++++------------ .../test/CIR/CodeGen/builtin-floating-point.c | 6 ++-- 3 files changed, 16 insertions(+), 23 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 6560ab4666a3..89b4069f3686 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -114,6 +114,9 @@ struct MissingFeatures { // Fast math. static bool fastMathGuard() { return false; } + // Should be implemented with a moduleOp level attribute and directly + // mapped to LLVM - those can be set directly for every relevant LLVM IR + // dialect operation (log10, ...). static bool fastMathFlags() { return false; } static bool fastMathFuncAttributes() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 7970be0eccf7..5ba526584e16 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -448,8 +448,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_cosf16: case Builtin::BI__builtin_cosl: case Builtin::BI__builtin_cosf128: - assert(getContext().getLangOpts().FastMath && - "cir.cos is only expected under -ffast-math"); + assert(!MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIexp: @@ -460,8 +459,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_expf16: case Builtin::BI__builtin_expl: case Builtin::BI__builtin_expf128: - assert(getContext().getLangOpts().FastMath && - "cir.exp is only expected under -ffast-math"); + assert(!MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIexp2: @@ -472,8 +470,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_exp2f16: case Builtin::BI__builtin_exp2l: case Builtin::BI__builtin_exp2f128: - assert(getContext().getLangOpts().FastMath && - "cir.exp2 is only expected under -ffast-math"); + assert(!MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIfabs: @@ -540,8 +537,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmod: case Builtin::BI__builtin_fmodf: case Builtin::BI__builtin_fmodl: - assert(getContext().getLangOpts().FastMath && - "cir.fmod is only expected under -ffast-math"); + assert(!MissingFeatures::fastMathFlags()); return buildBinaryFPBuiltin(*this, *E); case Builtin::BI__builtin_fmodf16: @@ -556,8 +552,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_logf16: case Builtin::BI__builtin_logl: case Builtin::BI__builtin_logf128: - assert(getContext().getLangOpts().FastMath && - "cir.log is only expected under -ffast-math"); + assert(!MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlog10: @@ -568,8 +563,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log10f16: case Builtin::BI__builtin_log10l: case Builtin::BI__builtin_log10f128: - assert(getContext().getLangOpts().FastMath && - "cir.log10 is only expected under -ffast-math"); + assert(!MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlog2: @@ -580,8 +574,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log2f16: case Builtin::BI__builtin_log2l: case Builtin::BI__builtin_log2f128: - assert(getContext().getLangOpts().FastMath && - "cir.log2 is only expected under -ffast-math"); + assert(!MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BInearbyint: @@ -599,8 +592,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_pow: case Builtin::BI__builtin_powf: case Builtin::BI__builtin_powl: - assert(getContext().getLangOpts().FastMath && - "cir.pow is only expected under -ffast-math"); + assert(!MissingFeatures::fastMathFlags()); return RValue::get( buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); @@ -636,8 +628,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sinf16: case Builtin::BI__builtin_sinl: case Builtin::BI__builtin_sinf128: - assert(getContext().getLangOpts().FastMath && - "cir.sin is only expected under -ffast-math"); + assert(!MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIsqrt: @@ -648,8 +639,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sqrtf16: case Builtin::BI__builtin_sqrtl: case Builtin::BI__builtin_sqrtf128: - assert(getContext().getLangOpts().FastMath && - "cir.sqrt is only expected under -ffast-math"); + assert(!MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BItrunc: diff --git a/clang/test/CIR/CodeGen/builtin-floating-point.c b/clang/test/CIR/CodeGen/builtin-floating-point.c index 3fb9e4142834..e882d8606458 100644 --- a/clang/test/CIR/CodeGen/builtin-floating-point.c +++ b/clang/test/CIR/CodeGen/builtin-floating-point.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -ffast-math -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t1.cir 2>&1 | FileCheck %s -// RUN: %clang_cc1 -triple aarch64-apple-darwin-macho -ffast-math -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t1.cir 2>&1 | FileCheck %s --check-prefix=AARCH64 -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -ffast-math -fclangir -emit-llvm -o %t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t1.cir 2>&1 | FileCheck %s +// RUN: %clang_cc1 -triple aarch64-apple-darwin-macho -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t1.cir 2>&1 | FileCheck %s --check-prefix=AARCH64 +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -o %t.ll %s // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM // lround From 403459986b56660c727bf8a0bbea77353feecdf7 Mon Sep 17 00:00:00 2001 From: 7mile Date: Tue, 13 Aug 2024 07:12:26 +0800 Subject: [PATCH 1764/2301] [CIR][LowerToLLVM] Support address space lowering for global ops (#783) This PR set a proper addrspace attribute for LLVM globals. For simplicity, a temporary pointer type is created and consumed by our LLVM type converter. The correct address space is then extracted from the converted pointer type of LLVM. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 19 ++++++++++++++++--- clang/test/CIR/Lowering/address-space.cir | 9 +++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 0a121f126da2..9bb4e9caf98d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1878,6 +1878,18 @@ class CIRGlobalOpLowering public: using OpConversionPattern::OpConversionPattern; + // Get addrspace by converting a pointer type. + // TODO: The approach here is a little hacky. We should access the target info + // directly to convert the address space of global op, similar to what we do + // for type converter. + unsigned getGlobalOpTargetAddrSpace(mlir::cir::GlobalOp op) const { + auto tempPtrTy = mlir::cir::PointerType::get(getContext(), op.getSymType(), + op.getAddrSpaceAttr()); + return cast( + typeConverter->convertType(tempPtrTy)) + .getAddressSpace(); + } + /// Replace CIR global with a region initialized LLVM global and update /// insertion point to the end of the initializer block. inline void setupRegionInitializedLLVMGlobalOp( @@ -1886,7 +1898,8 @@ class CIRGlobalOpLowering SmallVector attributes; auto newGlobalOp = rewriter.replaceOpWithNewOp( op, llvmType, op.getConstant(), convertLinkage(op.getLinkage()), - op.getSymName(), nullptr, /*alignment*/ 0, /*addrSpace*/ 0, + op.getSymName(), nullptr, /*alignment*/ 0, + /*addrSpace*/ getGlobalOpTargetAddrSpace(op), /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); newGlobalOp.getRegion().push_back(new mlir::Block()); @@ -1916,7 +1929,7 @@ class CIRGlobalOpLowering if (!init.has_value()) { rewriter.replaceOpWithNewOp( op, llvmType, isConst, linkage, symbol, mlir::Attribute(), - /*alignment*/ 0, /*addrSpace*/ 0, + /*alignment*/ 0, /*addrSpace*/ getGlobalOpTargetAddrSpace(op), /*dsoLocal*/ isDsoLocal, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); return mlir::success(); @@ -2004,7 +2017,7 @@ class CIRGlobalOpLowering // Rewrite op. auto llvmGlobalOp = rewriter.replaceOpWithNewOp( op, llvmType, isConst, linkage, symbol, init.value(), - /*alignment*/ 0, /*addrSpace*/ 0, + /*alignment*/ 0, /*addrSpace*/ getGlobalOpTargetAddrSpace(op), /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); auto mod = op->getParentOfType(); diff --git a/clang/test/CIR/Lowering/address-space.cir b/clang/test/CIR/Lowering/address-space.cir index 1b2d01e8b1db..ee857bd32119 100644 --- a/clang/test/CIR/Lowering/address-space.cir +++ b/clang/test/CIR/Lowering/address-space.cir @@ -7,6 +7,15 @@ module attributes { cir.triple = "spirv64-unknown-unknown", llvm.data_layout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1" } { + cir.global external addrspace(offload_global) @addrspace1 = #cir.int<1> : !s32i + // LLVM: @addrspace1 = addrspace(1) global i32 + + cir.global "private" internal addrspace(offload_local) @addrspace2 : !s32i + // LLVM: @addrspace2 = internal addrspace(3) global i32 undef + + cir.global external addrspace(target<7>) @addrspace3 = #cir.int<3> : !s32i + // LLVM: @addrspace3 = addrspace(7) global i32 + // LLVM: define void @foo(ptr %0) cir.func @foo(%arg0: !cir.ptr) { // LLVM-NEXT: alloca ptr, From 786b8bd4bb24d5f54582f6534d3836a6b0141cb4 Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Mon, 12 Aug 2024 20:44:08 -0300 Subject: [PATCH 1765/2301] [CIR][ABI] Implement basic struct CC lowering for x86_64 (#784) This patch adds the necessary bits for unraveling struct arguments and return values for the x86_64 calling convention. --- .../clang/CIR/Dialect/IR/CIRDataLayout.h | 123 +++- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 + clang/include/clang/CIR/MissingFeatures.h | 45 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 +- clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp | 241 ++++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 + clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 5 +- .../Transforms/TargetLowering/ABIInfo.cpp | 2 + .../Transforms/TargetLowering/ABIInfo.h | 2 + .../Transforms/TargetLowering/ABIInfoImpl.cpp | 10 +- .../Transforms/TargetLowering/ABIInfoImpl.h | 2 + .../Transforms/TargetLowering/CIRCXXABI.h | 20 + .../TargetLowering/CIRLowerContext.cpp | 29 +- .../TargetLowering/CIRLowerContext.h | 17 +- .../TargetLowering/CIRRecordLayout.cpp | 42 +- .../TargetLowering/CIRRecordLayout.h | 107 ++- .../TargetLowering/ItaniumCXXABI.cpp | 8 + .../TargetLowering/LowerFunction.cpp | 367 +++++++++- .../Transforms/TargetLowering/LowerFunction.h | 9 + .../Transforms/TargetLowering/LowerModule.h | 5 + .../Transforms/TargetLowering/LowerTypes.cpp | 2 +- .../Transforms/TargetLowering/LowerTypes.h | 1 + .../TargetLowering/RecordLayoutBuilder.cpp | 637 ++++++++++++++++++ .../Transforms/TargetLowering/Targets/X86.cpp | 265 +++++++- clang/test/CIR/CodeGen/bool.c | 2 +- clang/test/CIR/CodeGen/union-init.c | 2 +- clang/test/CIR/Lowering/address-space.cir | 3 +- clang/test/CIR/Lowering/struct-init.c | 1 + .../x86_64/x86_64-call-conv-lowering-pass.cpp | 29 + 29 files changed, 1900 insertions(+), 88 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h index e2fd966e3cb2..75b3978ba1ad 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h @@ -12,51 +12,52 @@ #ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRDATALAYOUT_H #define LLVM_CLANG_CIR_DIALECT_IR_CIRDATALAYOUT_H -#include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/IR/BuiltinOps.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "llvm/ADT/StringRef.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/Support/Alignment.h" +#include "llvm/Support/TypeSize.h" namespace cir { +class StructLayout; + +// FIXME(cir): This might be replaced by a CIRDataLayout interface which can +// provide the same functionalities. class CIRDataLayout { bool bigEndian = false; + /// Primitive type alignment data. This is sorted by type and bit + /// width during construction. + llvm::DataLayout::PrimitiveSpec StructAlignment; + + // The StructType -> StructLayout map. + mutable void *LayoutMap = nullptr; + public: mlir::DataLayout layout; - /// Constructs a DataLayout from a specification string. See reset(). - explicit CIRDataLayout(llvm::StringRef dataLayout, mlir::ModuleOp module) - : layout(module) { - reset(dataLayout); - } + /// Constructs a DataLayout the module's data layout attribute. + CIRDataLayout(mlir::ModuleOp modOp); /// Parse a data layout string (with fallback to default values). - void reset(llvm::StringRef dataLayout); + void reset(); // Free all internal data structures. void clear(); - CIRDataLayout(mlir::ModuleOp modOp); - bool isBigEndian() const { return bigEndian; } - // `useABI` is `true` if not using prefered alignment. - unsigned getAlignment(mlir::Type ty, bool useABI) const { - if (llvm::isa(ty)) { - auto sTy = mlir::cast(ty); - if (sTy.getPacked() && useABI) - return 1; - } else if (llvm::isa(ty)) { - return getAlignment(mlir::cast(ty).getEltType(), - useABI); - } - - return useABI ? layout.getTypeABIAlignment(ty) - : layout.getTypePreferredAlignment(ty); - } + /// Returns a StructLayout object, indicating the alignment of the + /// struct, its size, and the offsets of its fields. + /// + /// Note that this information is lazily cached. + const StructLayout *getStructLayout(mlir::cir::StructType Ty) const; + + /// Internal helper method that returns requested alignment for type. + llvm::Align getAlignment(mlir::Type Ty, bool abi_or_pref) const; - unsigned getABITypeAlign(mlir::Type ty) const { + llvm::Align getABITypeAlign(mlir::Type ty) const { return getAlignment(ty, true); } @@ -67,10 +68,10 @@ class CIRDataLayout { /// the runtime size will be a positive integer multiple of the base size. /// /// For example, returns 5 for i36 and 10 for x86_fp80. - unsigned getTypeStoreSize(mlir::Type Ty) const { - // FIXME: this is a bit inaccurate, see DataLayout::getTypeStoreSize for - // more information. - return llvm::divideCeil(layout.getTypeSizeInBits(Ty), 8); + llvm::TypeSize getTypeStoreSize(mlir::Type Ty) const { + llvm::TypeSize BaseSize = getTypeSizeInBits(Ty); + return {llvm::divideCeil(BaseSize.getKnownMinValue(), 8), + BaseSize.isScalable()}; } /// Returns the offset in bytes between successive objects of the @@ -81,20 +82,20 @@ class CIRDataLayout { /// /// This is the amount that alloca reserves for this type. For example, /// returns 12 or 16 for x86_fp80, depending on alignment. - unsigned getTypeAllocSize(mlir::Type Ty) const { + llvm::TypeSize getTypeAllocSize(mlir::Type Ty) const { // Round up to the next alignment boundary. - return llvm::alignTo(getTypeStoreSize(Ty), getABITypeAlign(Ty)); + return llvm::alignTo(getTypeStoreSize(Ty), getABITypeAlign(Ty).value()); } - unsigned getPointerTypeSizeInBits(mlir::Type Ty) const { + llvm::TypeSize getPointerTypeSizeInBits(mlir::Type Ty) const { assert(mlir::isa(Ty) && "This should only be called with a pointer type"); return layout.getTypeSizeInBits(Ty); } - unsigned getTypeSizeInBits(mlir::Type Ty) const { - return layout.getTypeSizeInBits(Ty); - } + // The implementation of this method is provided inline as it is particularly + // well suited to constant folding when called on a specific Type subclass. + llvm::TypeSize getTypeSizeInBits(mlir::Type Ty) const; mlir::Type getIntPtrType(mlir::Type Ty) const { assert(mlir::isa(Ty) && "Expected pointer type"); @@ -104,6 +105,58 @@ class CIRDataLayout { } }; +/// Used to lazily calculate structure layout information for a target machine, +/// based on the DataLayout structure. +class StructLayout final + : public llvm::TrailingObjects { + llvm::TypeSize StructSize; + llvm::Align StructAlignment; + unsigned IsPadded : 1; + unsigned NumElements : 31; + +public: + llvm::TypeSize getSizeInBytes() const { return StructSize; } + + llvm::TypeSize getSizeInBits() const { return 8 * StructSize; } + + llvm::Align getAlignment() const { return StructAlignment; } + + /// Returns whether the struct has padding or not between its fields. + /// NB: Padding in nested element is not taken into account. + bool hasPadding() const { return IsPadded; } + + /// Given a valid byte offset into the structure, returns the structure + /// index that contains it. + unsigned getElementContainingOffset(uint64_t FixedOffset) const; + + llvm::MutableArrayRef getMemberOffsets() { + return llvm::MutableArrayRef(getTrailingObjects(), + NumElements); + } + + llvm::ArrayRef getMemberOffsets() const { + return llvm::ArrayRef(getTrailingObjects(), NumElements); + } + + llvm::TypeSize getElementOffset(unsigned Idx) const { + assert(Idx < NumElements && "Invalid element idx!"); + return getMemberOffsets()[Idx]; + } + + llvm::TypeSize getElementOffsetInBits(unsigned Idx) const { + return getElementOffset(Idx) * 8; + } + +private: + friend class CIRDataLayout; // Only DataLayout can create this class + + StructLayout(mlir::cir::StructType ST, const CIRDataLayout &DL); + + size_t numTrailingObjects(OverloadToken) const { + return NumElements; + } +}; + } // namespace cir #endif diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 2abed62b3754..cd4434c201bc 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -589,6 +589,12 @@ def StoreOp : CIR_Op<"store", [ ``` }]; + let builders = [ + OpBuilder<(ins "Value":$value, "Value":$addr), [{ + $_state.addOperands({value, addr}); + }]> + ]; + let arguments = (ins CIR_AnyType:$value, Arg:$addr, diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 89b4069f3686..3f56c8fd29e3 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -70,6 +70,7 @@ struct MissingFeatures { // ObjC static bool setObjCGCLValueClass() { return false; } static bool objCLifetime() { return false; } + static bool objCIvarDecls() { return false; } // Debug info static bool generateDebugInfo() { return false; } @@ -205,17 +206,42 @@ struct MissingFeatures { //-- Missing AST queries - static bool recordDeclCanPassInRegisters() { return false; } + static bool CXXRecordDeclIsEmptyCXX11() { return false; } + static bool CXXRecordDeclIsPOD() { return false; } + static bool CXXRecordIsDynamicClass() { return false; } + static bool astContextGetExternalSource() { return false; } + static bool declGetMaxAlignment() { return false; } + static bool declHasAlignMac68kAttr() { return false; } + static bool declHasAlignNaturalAttr() { return false; } + static bool declHasMaxFieldAlignmentAttr() { return false; } + static bool fieldDeclIsBitfield() { return false; } + static bool fieldDeclIsPotentiallyOverlapping() { return false; } + static bool fieldDeclGetMaxFieldAlignment() { return false; } + static bool fieldDeclisUnnamedBitField() { return false; } static bool funcDeclIsCXXConstructorDecl() { return false; } static bool funcDeclIsCXXDestructorDecl() { return false; } static bool funcDeclIsCXXMethodDecl() { return false; } static bool funcDeclIsInlineBuiltinDeclaration() { return false; } static bool funcDeclIsReplaceableGlobalAllocationFunction() { return false; } + static bool isCXXRecordDecl() { return false; } static bool qualTypeIsReferenceType() { return false; } - static bool typeGetAsEnumType() { return false; } + static bool recordDeclCanPassInRegisters() { return false; } + static bool recordDeclHasAlignmentAttr() { return false; } + static bool recordDeclHasFlexibleArrayMember() { return false; } + static bool recordDeclIsCXXDecl() { return false; } + static bool recordDeclIsMSStruct() { return false; } + static bool recordDeclIsPacked() { return false; } + static bool recordDeclMayInsertExtraPadding() { return false; } static bool typeGetAsBuiltinType() { return false; } + static bool typeGetAsEnumType() { return false; } + static bool typeIsCXXRecordDecl() { return false; } + static bool typeIsScalableType() { return false; } + static bool typeIsSized() { return false; } static bool varDeclIsKNRPromoted() { return false; } + // We need to track parent (base) classes to determine the layout of a class. + static bool getCXXRecordBases() { return false; } + //-- Missing types static bool fixedWidthIntegers() { return false; } @@ -232,6 +258,18 @@ struct MissingFeatures { //-- Other missing features + // We need to track the parent record types that represent a field + // declaration. This is necessary to determine the layout of a class. + static bool fieldDeclAbstraction() { return false; } + + // There are some padding diagnostic features for Itanium ABI that we might + // wanna add later. + static bool bitFieldPaddingDiagnostics() { return false; } + + // Clang considers both enums and records as tag types. We don't have a way to + // transparently handle both these types yet. Might need an interface here. + static bool tagTypeClassAbstraction() { return false; } + // Empty values might be passed as arguments to serve as padding, ensuring // alignment and compliance (e.g. MIPS). We do not yet support this. static bool argumentPadding() { return false; } @@ -268,7 +306,7 @@ struct MissingFeatures { // evaluating ABI-specific lowering. static bool qualifiedTypes() { return false; } - // We're ignoring several details regarding ABI-halding for Swift. + // We're ignoring several details regarding ABI-handling for Swift. static bool swift() { return false; } // The AppleARM64 is using ItaniumCXXABI, which is not quite right. @@ -281,6 +319,7 @@ struct MissingFeatures { // If a store op is guaranteed to execute before the retun value load op, we // can optimize away the store and load ops. Seems like an early optimization. static bool returnValueDominatingStoreOptmiization() { return false; } + // Globals (vars and functions) may have attributes that are target depedent. static bool setTargetAttributes() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 899597967bff..70de6e4de221 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -878,7 +878,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { unsigned Pos = 0; for (size_t I = 0; I < Elts.size(); ++I) { auto EltSize = Layout.getTypeAllocSize(Elts[I]); - unsigned AlignMask = Layout.getABITypeAlign(Elts[I]) - 1; + unsigned AlignMask = Layout.getABITypeAlign(Elts[I]).value() - 1; Pos = (Pos + AlignMask) & ~AlignMask; if (Offset < Pos + EltSize) { Indices.push_back(I); diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp index 26d055b69351..6daa8781b453 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp @@ -1,36 +1,219 @@ #include "clang/CIR/Dialect/IR/CIRDataLayout.h" -#include "llvm/ADT/StringRef.h" - -namespace cir { - -CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { - auto dlSpec = mlir::dyn_cast( - modOp->getAttr(mlir::DLTIDialect::kDataLayoutAttrName)); - assert(dlSpec && "expected dl_spec in the module"); - auto entries = dlSpec.getEntries(); - - for (auto entry : entries) { - auto entryKey = entry.getKey(); - auto strKey = mlir::dyn_cast(entryKey); - if (!strKey) - continue; - auto entryName = strKey.strref(); - if (entryName == mlir::DLTIDialect::kDataLayoutEndiannessKey) { - auto value = mlir::dyn_cast(entry.getValue()); - assert(value && "expected string attribute"); - auto endian = value.getValue(); - if (endian == mlir::DLTIDialect::kDataLayoutEndiannessBig) - bigEndian = true; - else if (endian == mlir::DLTIDialect::kDataLayoutEndiannessLittle) - bigEndian = false; - else - llvm_unreachable("unknown endianess"); +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/IR/DataLayout.h" + +using namespace cir; + +//===----------------------------------------------------------------------===// +// Support for StructLayout +//===----------------------------------------------------------------------===// + +StructLayout::StructLayout(mlir::cir::StructType ST, const CIRDataLayout &DL) + : StructSize(llvm::TypeSize::getFixed(0)) { + assert(!ST.isIncomplete() && "Cannot get layout of opaque structs"); + IsPadded = false; + NumElements = ST.getNumElements(); + + // Loop over each of the elements, placing them in memory. + for (unsigned i = 0, e = NumElements; i != e; ++i) { + mlir::Type Ty = ST.getMembers()[i]; + if (i == 0 && ::cir::MissingFeatures::typeIsScalableType()) + llvm_unreachable("Scalable types are not yet supported in CIR"); + + assert(!::cir::MissingFeatures::recordDeclIsPacked() && + "Cannot identify packed structs"); + const llvm::Align TyAlign = DL.getABITypeAlign(Ty); + + // Add padding if necessary to align the data element properly. + // Currently the only structure with scalable size will be the homogeneous + // scalable vector types. Homogeneous scalable vector types have members of + // the same data type so no alignment issue will happen. The condition here + // assumes so and needs to be adjusted if this assumption changes (e.g. we + // support structures with arbitrary scalable data type, or structure that + // contains both fixed size and scalable size data type members). + if (!StructSize.isScalable() && !isAligned(TyAlign, StructSize)) { + IsPadded = true; + StructSize = llvm::TypeSize::getFixed(alignTo(StructSize, TyAlign)); } + + // Keep track of maximum alignment constraint. + StructAlignment = std::max(TyAlign, StructAlignment); + + getMemberOffsets()[i] = StructSize; + // Consume space for this data item + StructSize += DL.getTypeAllocSize(Ty); + } + + // Add padding to the end of the struct so that it could be put in an array + // and all array elements would be aligned correctly. + if (!StructSize.isScalable() && !isAligned(StructAlignment, StructSize)) { + IsPadded = true; + StructSize = llvm::TypeSize::getFixed(alignTo(StructSize, StructAlignment)); } } -void CIRDataLayout::reset(llvm::StringRef Desc) { clear(); } +/// getElementContainingOffset - Given a valid offset into the structure, +/// return the structure index that contains it. +unsigned StructLayout::getElementContainingOffset(uint64_t FixedOffset) const { + assert(!StructSize.isScalable() && + "Cannot get element at offset for structure containing scalable " + "vector types"); + llvm::TypeSize Offset = llvm::TypeSize::getFixed(FixedOffset); + llvm::ArrayRef MemberOffsets = getMemberOffsets(); + + const auto *SI = + std::upper_bound(MemberOffsets.begin(), MemberOffsets.end(), Offset, + [](llvm::TypeSize LHS, llvm::TypeSize RHS) -> bool { + return llvm::TypeSize::isKnownLT(LHS, RHS); + }); + assert(SI != MemberOffsets.begin() && "Offset not in structure type!"); + --SI; + assert(llvm::TypeSize::isKnownLE(*SI, Offset) && "upper_bound didn't work"); + assert((SI == MemberOffsets.begin() || + llvm::TypeSize::isKnownLE(*(SI - 1), Offset)) && + (SI + 1 == MemberOffsets.end() || + llvm::TypeSize::isKnownGT(*(SI + 1), Offset)) && + "Upper bound didn't work!"); + + // Multiple fields can have the same offset if any of them are zero sized. + // For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop + // at the i32 element, because it is the last element at that offset. This is + // the right one to return, because anything after it will have a higher + // offset, implying that this element is non-empty. + return SI - MemberOffsets.begin(); +} + +//===----------------------------------------------------------------------===// +// DataLayout Class Implementation +//===----------------------------------------------------------------------===// + +namespace { -void CIRDataLayout::clear() {} +class StructLayoutMap { + using LayoutInfoTy = llvm::DenseMap; + LayoutInfoTy LayoutInfo; + +public: + ~StructLayoutMap() { + // Remove any layouts. + for (const auto &I : LayoutInfo) { + StructLayout *Value = I.second; + Value->~StructLayout(); + free(Value); + } + } + + StructLayout *&operator[](mlir::cir::StructType STy) { + return LayoutInfo[STy]; + } +}; + +} // namespace + +CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { reset(); } + +void CIRDataLayout::reset() { + clear(); + + LayoutMap = nullptr; + bigEndian = false; + // ManglingMode = MM_None; + // NonIntegralAddressSpaces.clear(); + StructAlignment = + llvm::DataLayout::PrimitiveSpec{0, llvm::Align(1), llvm::Align(8)}; + + // NOTE(cir): Alignment setter functions are skipped as these should already + // be set in MLIR's data layout. +} -} // namespace cir +void CIRDataLayout::clear() { + delete static_cast(LayoutMap); + LayoutMap = nullptr; +} + +const StructLayout * +CIRDataLayout::getStructLayout(mlir::cir::StructType Ty) const { + if (!LayoutMap) + LayoutMap = new StructLayoutMap(); + + StructLayoutMap *STM = static_cast(LayoutMap); + StructLayout *&SL = (*STM)[Ty]; + if (SL) + return SL; + + // Otherwise, create the struct layout. Because it is variable length, we + // malloc it, then use placement new. + StructLayout *L = (StructLayout *)llvm::safe_malloc( + StructLayout::totalSizeToAlloc(Ty.getNumElements())); + + // Set SL before calling StructLayout's ctor. The ctor could cause other + // entries to be added to TheMap, invalidating our reference. + SL = L; + + new (L) StructLayout(Ty, *this); + + return L; +} + +/*! + \param abi_or_pref Flag that determines which alignment is returned. true + returns the ABI alignment, false returns the preferred alignment. + \param Ty The underlying type for which alignment is determined. + + Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref + == false) for the requested type \a Ty. + */ +llvm::Align CIRDataLayout::getAlignment(mlir::Type Ty, bool abi_or_pref) const { + + if (llvm::isa(Ty)) { + // Packed structure types always have an ABI alignment of one. + if (::cir::MissingFeatures::recordDeclIsPacked() && abi_or_pref) + llvm_unreachable("NYI"); + + // Get the layout annotation... which is lazily created on demand. + const StructLayout *Layout = + getStructLayout(llvm::cast(Ty)); + const llvm::Align Align = + abi_or_pref ? StructAlignment.ABIAlign : StructAlignment.PrefAlign; + return std::max(Align, Layout->getAlignment()); + } + + // FIXME(cir): This does not account for differnt address spaces, and relies + // on CIR's data layout to give the proper alignment. + assert(!::cir::MissingFeatures::addressSpace()); + + // Fetch type alignment from MLIR's data layout. + unsigned align = abi_or_pref ? layout.getTypeABIAlignment(Ty) + : layout.getTypePreferredAlignment(Ty); + return llvm::Align(align); +} + +// The implementation of this method is provided inline as it is particularly +// well suited to constant folding when called on a specific Type subclass. +llvm::TypeSize CIRDataLayout::getTypeSizeInBits(mlir::Type Ty) const { + assert(!::cir::MissingFeatures::typeIsSized() && + "Cannot getTypeInfo() on a type that is unsized!"); + + if (auto structTy = llvm::dyn_cast(Ty)) { + + // FIXME(cir): CIR struct's data layout implementation doesn't do a good job + // of handling unions particularities. We should have a separate union type. + if (structTy.isUnion()) { + auto largestMember = structTy.getLargestMember(layout); + return llvm::TypeSize::getFixed(layout.getTypeSizeInBits(largestMember)); + } + + // FIXME(cir): We should be able to query the size of a struct directly to + // its data layout implementation instead of requiring a separate + // StructLayout object. + // Get the layout annotation... which is lazily created on demand. + return getStructLayout(structTy)->getSizeInBits(); + } + + // FIXME(cir): This does not account for different address spaces, and relies + // on CIR's data layout to give the proper ABI-specific type width. + assert(!::cir::MissingFeatures::addressSpace()); + + return llvm::TypeSize::getFixed(layout.getTypeSizeInBits(Ty)); +} diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 69e179f8619c..b8ce9bd5bc4e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -458,6 +458,10 @@ LogicalResult CastOp::verify() { return success(); } case cir::CastKind::bitcast: { + // Allow bitcast of structs for calling conventions. + if (isa(srcType) || isa(resType)) + return success(); + // This is the only cast kind where we don't want vector types to decay // into the element type. if ((!mlir::isa(getSrc().getType()) || diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 4c250295856c..c83c4a6f1109 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -32,6 +32,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" #include using cir::MissingFeatures; @@ -447,13 +448,13 @@ llvm::TypeSize mlir::cir::VectorType::getTypeSizeInBits( uint64_t mlir::cir::VectorType::getABIAlignment( const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { - return getSize() * dataLayout.getTypeABIAlignment(getEltType()); + return llvm::NextPowerOf2(dataLayout.getTypeSizeInBits(*this)); } uint64_t mlir::cir::VectorType::getPreferredAlignment( const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { - return getSize() * dataLayout.getTypePreferredAlignment(getEltType()); + return llvm::NextPowerOf2(dataLayout.getTypeSizeInBits(*this)); } llvm::TypeSize diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index 3ed29dd4d549..4e2a81de9fc1 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -27,6 +27,8 @@ CIRCXXABI &ABIInfo::getCXXABI() const { return LT.getCXXABI(); } CIRLowerContext &ABIInfo::getContext() const { return LT.getContext(); } +const clang::TargetInfo &ABIInfo::getTarget() const { return LT.getTarget(); } + const ::cir::CIRDataLayout &ABIInfo::getDataLayout() const { return LT.getDataLayout(); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h index 67d628f4eb30..bbcd906e849a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h @@ -41,6 +41,8 @@ class ABIInfo { CIRLowerContext &getContext() const; + const clang::TargetInfo &getTarget() const; + const ::cir::CIRDataLayout &getDataLayout() const; virtual void computeInfo(LowerFunctionInfo &FI) const = 0; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index e5ddcff6b5e7..041c801dbe2e 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -26,7 +26,7 @@ bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, Type Ty = FI.getReturnType(); if (const auto RT = dyn_cast(Ty)) { - llvm_unreachable("NYI"); + assert(!::cir::MissingFeatures::isCXXRecordDecl()); } return CXXABI.classifyReturnType(FI); @@ -45,5 +45,13 @@ Type useFirstFieldIfTransparentUnion(Type Ty) { return Ty; } +CIRCXXABI::RecordArgABI getRecordArgABI(const StructType RT, + CIRCXXABI &CXXABI) { + if (::cir::MissingFeatures::typeIsCXXRecordDecl()) { + llvm_unreachable("NYI"); + } + return CXXABI.getRecordArgABI(RT); +} + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h index 80f43d9a5e9f..9e45bc4e0ecc 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h @@ -30,6 +30,8 @@ bool isAggregateTypeForABI(Type T); /// should ensure that all elements of the union have the same "machine type". Type useFirstFieldIfTransparentUnion(Type Ty); +CIRCXXABI::RecordArgABI getRecordArgABI(const StructType RT, CIRCXXABI &CXXABI); + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h index 3cc1bde1f763..42e666999005 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -40,6 +40,26 @@ class CIRCXXABI { /// If the C++ ABI requires the given type be returned in a particular way, /// this method sets RetAI and returns true. virtual bool classifyReturnType(LowerFunctionInfo &FI) const = 0; + + /// Specify how one should pass an argument of a record type. + enum RecordArgABI { + /// Pass it using the normal C aggregate rules for the ABI, potentially + /// introducing extra copies and passing some or all of it in registers. + RAA_Default = 0, + + /// Pass it on the stack using its defined layout. The argument must be + /// evaluated directly into the correct stack position in the arguments + /// area, + /// and the call machinery must not move it or introduce extra copies. + RAA_DirectInMemory, + + /// Pass it as a pointer to temporary memory. + RAA_Indirect + }; + + /// Returns how an argument of the given record type should be passed. + /// FIXME(cir): This expects a CXXRecordDecl! Not any record type. + virtual RecordArgABI getRecordArgABI(const StructType RD) const = 0; }; /// Creates an Itanium-family ABI. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index f7020f37f513..57d29643ca3c 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -23,7 +23,7 @@ namespace mlir { namespace cir { -CIRLowerContext::CIRLowerContext(ModuleOp module, clang::LangOptions &LOpts) +CIRLowerContext::CIRLowerContext(ModuleOp module, clang::LangOptions LOpts) : MLIRCtx(module.getContext()), LangOpts(LOpts) {} CIRLowerContext::~CIRLowerContext() {} @@ -52,6 +52,8 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { auto typeKind = clang::Type::Builtin; if (isa(T)) { typeKind = clang::Type::Builtin; + } else if (isa(T)) { + typeKind = clang::Type::Record; } else { llvm_unreachable("Unhandled type class"); } @@ -92,6 +94,26 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { llvm_unreachable("Unknown builtin type!"); break; } + case clang::Type::Record: { + const auto RT = dyn_cast(T); + assert(!::cir::MissingFeatures::tagTypeClassAbstraction()); + + // Only handle TagTypes (names types) for now. + assert(RT.getName() && "Anonymous record is NYI"); + + // NOTE(cir): Clang does some hanlding of invalid tagged declarations here. + // Not sure if this is necessary in CIR. + + if (::cir::MissingFeatures::typeGetAsEnumType()) { + llvm_unreachable("NYI"); + } + + const CIRRecordLayout &Layout = getCIRRecordLayout(RT); + Width = toBits(Layout.getSize()); + Align = toBits(Layout.getAlignment()); + assert(!::cir::MissingFeatures::recordDeclHasAlignmentAttr()); + break; + } default: llvm_unreachable("Unhandled type class"); } @@ -136,6 +158,11 @@ clang::CharUnits CIRLowerContext::toCharUnitsFromBits(int64_t BitSize) const { return clang::CharUnits::fromQuantity(BitSize / getCharWidth()); } +/// Convert a size in characters to a size in characters. +int64_t CIRLowerContext::toBits(clang::CharUnits CharSize) const { + return CharSize.getQuantity() * getCharWidth(); +} + clang::TypeInfoChars CIRLowerContext::getTypeInfoInChars(Type T) const { if (auto arrTy = dyn_cast(T)) llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h index a803fb992e74..5a87f71c2bdc 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h @@ -14,6 +14,7 @@ #ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRLowerContext_H #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRLowerContext_H +#include "CIRRecordLayout.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/Types.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" @@ -42,7 +43,7 @@ class CIRLowerContext : public llvm::RefCountedBase { /// The language options used to create the AST associated with /// this ASTContext object. - clang::LangOptions &LangOpts; + clang::LangOptions LangOpts; //===--------------------------------------------------------------------===// // Built-in Types @@ -51,7 +52,7 @@ class CIRLowerContext : public llvm::RefCountedBase { Type CharTy; public: - CIRLowerContext(ModuleOp module, clang::LangOptions &LOpts); + CIRLowerContext(ModuleOp module, clang::LangOptions LOpts); CIRLowerContext(const CIRLowerContext &) = delete; CIRLowerContext &operator=(const CIRLowerContext &) = delete; ~CIRLowerContext(); @@ -69,6 +70,10 @@ class CIRLowerContext : public llvm::RefCountedBase { Type initBuiltinType(clang::BuiltinType::Kind K); public: + const clang::TargetInfo &getTargetInfo() const { return *Target; } + + const clang::LangOptions &getLangOpts() const { return LangOpts; } + MLIRContext *getMLIRContext() const { return MLIRCtx; } //===--------------------------------------------------------------------===// @@ -89,6 +94,9 @@ class CIRLowerContext : public llvm::RefCountedBase { /// Convert a size in bits to a size in characters. clang::CharUnits toCharUnitsFromBits(int64_t BitSize) const; + /// Convert a size in characters to a size in bits. + int64_t toBits(clang::CharUnits CharSize) const; + clang::CharUnits getTypeSizeInChars(Type T) const { // FIXME(cir): We should query MLIR's Datalayout here instead. return getTypeInfoInChars(T).Width; @@ -102,6 +110,11 @@ class CIRLowerContext : public llvm::RefCountedBase { /// More type predicates useful for type checking/promotion bool isPromotableIntegerType(Type T) const; // C99 6.3.1.1p2 + + /// Get or compute information about the layout of the specified + /// record (struct/union/class) \p D, which indicates its size and field + /// position information. + const CIRRecordLayout &getCIRRecordLayout(const Type D) const; }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp index 370ada5411a0..68b777fa7755 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp @@ -16,7 +16,47 @@ namespace mlir { namespace cir { -CIRRecordLayout::CIRRecordLayout() {} +// Constructor for C++ records. +CIRRecordLayout::CIRRecordLayout( + const CIRLowerContext &Ctx, clang::CharUnits size, + clang::CharUnits alignment, clang::CharUnits preferredAlignment, + clang::CharUnits unadjustedAlignment, clang::CharUnits requiredAlignment, + bool hasOwnVFPtr, bool hasExtendableVFPtr, clang::CharUnits vbptroffset, + clang::CharUnits datasize, ArrayRef fieldoffsets, + clang::CharUnits nonvirtualsize, clang::CharUnits nonvirtualalignment, + clang::CharUnits preferrednvalignment, + clang::CharUnits SizeOfLargestEmptySubobject, const Type PrimaryBase, + bool IsPrimaryBaseVirtual, const Type BaseSharingVBPtr, + bool EndsWithZeroSizedObject, bool LeadsWithZeroSizedBase) + : Size(size), DataSize(datasize), Alignment(alignment), + PreferredAlignment(preferredAlignment), + UnadjustedAlignment(unadjustedAlignment), + RequiredAlignment(requiredAlignment), CXXInfo(new CXXRecordLayoutInfo) { + // NOTE(cir): Clang does a far more elaborate append here by leveraging the + // custom ASTVector class. For now, we'll do a simple append. + FieldOffsets.insert(FieldOffsets.end(), fieldoffsets.begin(), + fieldoffsets.end()); + + assert(!PrimaryBase && "Layout for class with inheritance is NYI"); + // CXXInfo->PrimaryBase.setPointer(PrimaryBase); + assert(!IsPrimaryBaseVirtual && "Layout for virtual base class is NYI"); + // CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual); + CXXInfo->NonVirtualSize = nonvirtualsize; + CXXInfo->NonVirtualAlignment = nonvirtualalignment; + CXXInfo->PreferredNVAlignment = preferrednvalignment; + CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject; + // FIXME(cir): I'm assuming that since we are not dealing with inherited + // classes yet, removing the following lines will be ok. + // CXXInfo->BaseOffsets = BaseOffsets; + // CXXInfo->VBaseOffsets = VBaseOffsets; + CXXInfo->HasOwnVFPtr = hasOwnVFPtr; + CXXInfo->VBPtrOffset = vbptroffset; + CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr; + // FIXME(cir): Probably not necessary for now. + // CXXInfo->BaseSharingVBPtr = BaseSharingVBPtr; + CXXInfo->EndsWithZeroSizedObject = EndsWithZeroSizedObject; + CXXInfo->LeadsWithZeroSizedBase = LeadsWithZeroSizedBase; +} } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h index 4ba672da9b43..b282f32f8a9d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h @@ -14,17 +14,122 @@ #ifndef LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRRECORDLAYOUT_H #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRRECORDLAYOUT_H +#include "mlir/IR/Types.h" +#include "mlir/Support/LLVM.h" +#include "clang/AST/CharUnits.h" +#include +#include + namespace mlir { namespace cir { class CIRLowerContext; +// FIXME(cir): Perhaps this logic can be moved to the CIR dialect, specifically +// the data layout abstractions. + /// This class contains layout information for one RecordDecl, which is a /// struct/union/class. The decl represented must be a definition, not a /// forward declaration. This class is also used to contain layout information /// for one ObjCInterfaceDecl. class CIRRecordLayout { - CIRRecordLayout(); + +private: + friend class CIRLowerContext; + + /// Size of record in characters. + clang::CharUnits Size; + + /// Size of record in characters without tail padding. + clang::CharUnits DataSize; + + // Alignment of record in characters. + clang::CharUnits Alignment; + + // Preferred alignment of record in characters. This can be different than + // Alignment in cases where it is beneficial for performance or backwards + // compatibility preserving (e.g. AIX-ABI). + clang::CharUnits PreferredAlignment; + + // Maximum of the alignments of the record members in characters. + clang::CharUnits UnadjustedAlignment; + + /// The required alignment of the object. In the MS-ABI the + /// __declspec(align()) trumps #pramga pack and must always be obeyed. + clang::CharUnits RequiredAlignment; + + /// Array of field offsets in bits. + /// FIXME(cir): Create a custom CIRVector instead? + std::vector FieldOffsets; + + struct CXXRecordLayoutInfo { + /// The non-virtual size (in chars) of an object, which is the size of the + /// object without virtual bases. + clang::CharUnits NonVirtualSize; + + /// The non-virtual alignment (in chars) of an object, which is the + /// alignment of the object without virtual bases. + clang::CharUnits NonVirtualAlignment; + + /// The preferred non-virtual alignment (in chars) of an object, which is + /// the preferred alignment of the object without virtual bases. + clang::CharUnits PreferredNVAlignment; + + /// The size of the largest empty subobject (either a base or a member). + /// Will be zero if the class doesn't contain any empty subobjects. + clang::CharUnits SizeOfLargestEmptySubobject; + + /// Virtual base table offset (Microsoft-only). + clang::CharUnits VBPtrOffset; + + /// Does this class provide a virtual function table (vtable in Itanium, + /// vftbl in Microsoft) that is independent from its base classes? + bool HasOwnVFPtr : 1; + + /// Does this class have a vftable that could be extended by a derived + /// class. The class may have inherited this pointer from a primary base + /// class. + bool HasExtendableVFPtr : 1; + + /// True if this class contains a zero sized member or base or a base with a + /// zero sized member or base. Only used for MS-ABI. + bool EndsWithZeroSizedObject : 1; + + /// True if this class is zero sized or first base is zero sized or has this + /// property. Only used for MS-ABI. + bool LeadsWithZeroSizedBase : 1; + }; + + /// CXXInfo - If the record layout is for a C++ record, this will have + /// C++ specific information about the record. + CXXRecordLayoutInfo *CXXInfo = nullptr; + + // Constructor for C++ records. + CIRRecordLayout( + const CIRLowerContext &Ctx, clang::CharUnits size, + clang::CharUnits alignment, clang::CharUnits preferredAlignment, + clang::CharUnits unadjustedAlignment, clang::CharUnits requiredAlignment, + bool hasOwnVFPtr, bool hasExtendableVFPtr, clang::CharUnits vbptroffset, + clang::CharUnits datasize, ArrayRef fieldoffsets, + clang::CharUnits nonvirtualsize, clang::CharUnits nonvirtualalignment, + clang::CharUnits preferrednvalignment, + clang::CharUnits SizeOfLargestEmptySubobject, const Type PrimaryBase, + bool IsPrimaryBaseVirtual, const Type BaseSharingVBPtr, + bool EndsWithZeroSizedObject, bool LeadsWithZeroSizedBase); + + ~CIRRecordLayout() = default; + +public: + /// Get the record alignment in characters. + clang::CharUnits getAlignment() const { return Alignment; } + + /// Get the record size in characters. + clang::CharUnits getSize() const { return Size; } + + /// Get the offset of the given field index, in bits. + uint64_t getFieldOffset(unsigned FieldNo) const { + return FieldOffsets[FieldNo]; + } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index 5fac8fdaf359..c0add1ecc1df 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -43,6 +43,14 @@ class ItaniumCXXABI : public CIRCXXABI { UseARMGuardVarABI(UseARMGuardVarABI), Use32BitVTableOffsetABI(false) {} bool classifyReturnType(LowerFunctionInfo &FI) const override; + + // FIXME(cir): This expects a CXXRecordDecl! Not any record type. + RecordArgABI getRecordArgABI(const StructType RD) const override { + assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + // If C++ prohibits us from making a copy, pass by address. + assert(!::cir::MissingFeatures::recordDeclCanPassInRegisters()); + return RAA_Default; + } }; } // namespace diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 136ea500d014..27b515cc9939 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -32,6 +32,210 @@ using ABIArgInfo = ::cir::ABIArgInfo; namespace mlir { namespace cir { +namespace { + +Value buildAddressAtOffset(LowerFunction &LF, Value addr, + const ABIArgInfo &info) { + if (unsigned offset = info.getDirectOffset()) { + llvm_unreachable("NYI"); + } + return addr; +} + +/// Given a struct pointer that we are accessing some number of bytes out of it, +/// try to gep into the struct to get at its inner goodness. Dive as deep as +/// possible without entering an element with an in-memory size smaller than +/// DstSize. +Value enterStructPointerForCoercedAccess(Value SrcPtr, StructType SrcSTy, + uint64_t DstSize, LowerFunction &CGF) { + // We can't dive into a zero-element struct. + if (SrcSTy.getNumElements() == 0) + llvm_unreachable("NYI"); + + Type FirstElt = SrcSTy.getMembers()[0]; + + // If the first elt is at least as large as what we're looking for, or if the + // first element is the same size as the whole struct, we can enter it. The + // comparison must be made on the store size and not the alloca size. Using + // the alloca size may overstate the size of the load. + uint64_t FirstEltSize = CGF.LM.getDataLayout().getTypeStoreSize(FirstElt); + if (FirstEltSize < DstSize && + FirstEltSize < CGF.LM.getDataLayout().getTypeStoreSize(SrcSTy)) + return SrcPtr; + + llvm_unreachable("NYI"); +} + +/// Create a store to \param Dst from \param Src where the source and +/// destination may have different types. +/// +/// This safely handles the case when the src type is larger than the +/// destination type; the upper bits of the src will be lost. +void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, + LowerFunction &CGF) { + Type SrcTy = Src.getType(); + Type DstTy = Dst.getType(); + if (SrcTy == DstTy) { + llvm_unreachable("NYI"); + } + + // FIXME(cir): We need a better way to handle datalayout queries. + assert(isa(SrcTy)); + llvm::TypeSize SrcSize = CGF.LM.getDataLayout().getTypeAllocSize(SrcTy); + + if (StructType DstSTy = dyn_cast(DstTy)) { + Dst = enterStructPointerForCoercedAccess(Dst, DstSTy, + SrcSize.getFixedValue(), CGF); + assert(isa(Dst.getType())); + DstTy = cast(Dst.getType()).getPointee(); + } + + PointerType SrcPtrTy = dyn_cast(SrcTy); + PointerType DstPtrTy = dyn_cast(DstTy); + // TODO(cir): Implement address space. + if (SrcPtrTy && DstPtrTy && !::cir::MissingFeatures::addressSpace()) { + llvm_unreachable("NYI"); + } + + // If the source and destination are integer or pointer types, just do an + // extension or truncation to the desired type. + if ((isa(SrcTy) || isa(SrcTy)) && + (isa(DstTy) || isa(DstTy))) { + llvm_unreachable("NYI"); + } + + llvm::TypeSize DstSize = CGF.LM.getDataLayout().getTypeAllocSize(DstTy); + + // If store is legal, just bitcast the src pointer. + assert(!::cir::MissingFeatures::vectorType()); + if (SrcSize.getFixedValue() <= DstSize.getFixedValue()) { + // Dst = Dst.withElementType(SrcTy); + CGF.buildAggregateStore(Src, Dst, DstIsVolatile); + } else { + llvm_unreachable("NYI"); + } +} + +// FIXME(cir): Create a custom rewriter class to abstract this away. +Value createBitcast(Value Src, Type Ty, LowerFunction &LF) { + return LF.getRewriter().create(Src.getLoc(), Ty, CastKind::bitcast, + Src); +} + +/// Coerces a \param Src value to a value of type \param Ty. +/// +/// This safely handles the case when the src type is smaller than the +/// destination type; in this situation the values of bits which not present in +/// the src are undefined. +/// +/// NOTE(cir): This method has partial parity with CGCall's CreateCoercedLoad. +/// Unlike the original codegen, this function does not emit a coerced load +/// since CIR's type checker wouldn't allow it. Instead, it casts the existing +/// ABI-agnostic value to it's ABI-aware counterpart. Nevertheless, we should +/// try to follow the same logic as the original codegen for correctness. +Value createCoercedValue(Value Src, Type Ty, LowerFunction &CGF) { + Type SrcTy = Src.getType(); + + // If SrcTy and Ty are the same, just reuse the exising load. + if (SrcTy == Ty) + return Src; + + // If it is the special boolean case, simply bitcast it. + if ((isa(SrcTy) && isa(Ty)) || + (isa(SrcTy) && isa(Ty))) + return createBitcast(Src, Ty, CGF); + + llvm::TypeSize DstSize = CGF.LM.getDataLayout().getTypeAllocSize(Ty); + + if (auto SrcSTy = dyn_cast(SrcTy)) { + Src = enterStructPointerForCoercedAccess(Src, SrcSTy, + DstSize.getFixedValue(), CGF); + SrcTy = Src.getType(); + } + + llvm::TypeSize SrcSize = CGF.LM.getDataLayout().getTypeAllocSize(SrcTy); + + // If the source and destination are integer or pointer types, just do an + // extension or truncation to the desired type. + if ((isa(Ty) || isa(Ty)) && + (isa(SrcTy) || isa(SrcTy))) { + llvm_unreachable("NYI"); + } + + // If load is legal, just bitcast the src pointer. + if (!SrcSize.isScalable() && !DstSize.isScalable() && + SrcSize.getFixedValue() >= DstSize.getFixedValue()) { + // Generally SrcSize is never greater than DstSize, since this means we are + // losing bits. However, this can happen in cases where the structure has + // additional padding, for example due to a user specified alignment. + // + // FIXME: Assert that we aren't truncating non-padding bits when have access + // to that information. + // Src = Src.withElementType(); + return CGF.buildAggregateBitcast(Src, Ty); + } + + llvm_unreachable("NYI"); +} + +Value emitAddressAtOffset(LowerFunction &LF, Value addr, + const ABIArgInfo &info) { + if (unsigned offset = info.getDirectOffset()) { + llvm_unreachable("NYI"); + } + return addr; +} + +/// After the calling convention is lowered, an ABI-agnostic type might have to +/// be loaded back to its ABI-aware couterpart so it may be returned. If they +/// differ, we have to do a coerced load. A coerced load, which means to load a +/// type to another despite that they represent the same value. The simplest +/// cases can be solved with a mere bitcast. +/// +/// This partially replaces CreateCoercedLoad from the original codegen. +/// However, instead of emitting the load, it emits a cast. +/// +/// FIXME(cir): Improve parity with the original codegen. +Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { + Type SrcTy = Src.getType(); + + // If SrcTy and Ty are the same, nothing to do. + if (SrcTy == Ty) + return Src; + + // If is the special boolean case, simply bitcast it. + if (isa(SrcTy) && isa(Ty)) + return createBitcast(Src, Ty, LF); + + llvm::TypeSize DstSize = LF.LM.getDataLayout().getTypeAllocSize(Ty); + + // FIXME(cir): Do we need the EnterStructPointerForCoercedAccess routine here? + + llvm::TypeSize SrcSize = LF.LM.getDataLayout().getTypeAllocSize(SrcTy); + + if ((isa(Ty) || isa(Ty)) && + (isa(SrcTy) || isa(SrcTy))) { + llvm_unreachable("NYI"); + } + + // If load is legal, just bitcast the src pointer. + if (!SrcSize.isScalable() && !DstSize.isScalable() && + SrcSize.getFixedValue() >= DstSize.getFixedValue()) { + // Generally SrcSize is never greater than DstSize, since this means we are + // losing bits. However, this can happen in cases where the structure has + // additional padding, for example due to a user specified alignment. + // + // FIXME: Assert that we aren't truncating non-padding bits when have access + // to that information. + return LF.getRewriter().create(Src.getLoc(), Ty, CastKind::bitcast, + Src); + } + + llvm_unreachable("NYI"); +} + +} // namespace + // FIXME(cir): Pass SrcFn and NewFn around instead of having then as attributes. LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, FuncOp srcFn, FuncOp newFn) @@ -140,7 +344,57 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, break; } - llvm_unreachable("NYI"); + assert(!::cir::MissingFeatures::vectorType()); + + // Allocate original argument to be "uncoerced". + // FIXME(cir): We should have a alloca op builder that does not required + // the pointer type to be explicitly passed. + // FIXME(cir): Get the original name of the argument, as well as the + // proper alignment for the given type being allocated. + auto Alloca = rewriter.create( + Fn.getLoc(), rewriter.getType(Ty), Ty, + /*name=*/StringRef(""), + /*alignment=*/rewriter.getI64IntegerAttr(4)); + + Value Ptr = buildAddressAtOffset(*this, Alloca.getResult(), ArgI); + + // Fast-isel and the optimizer generally like scalar values better than + // FCAs, so we flatten them if this is safe to do for this argument. + StructType STy = dyn_cast(ArgI.getCoerceToType()); + if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && + STy.getNumElements() > 1) { + llvm_unreachable("NYI"); + } else { + // Simple case, just do a coerced store of the argument into the alloca. + assert(NumIRArgs == 1); + Value AI = Fn.getArgument(FirstIRArg); + // TODO(cir): Set argument name in the new function. + createCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); + } + + // Match to what EmitParamDecl is expecting for this type. + if (::cir::MissingFeatures::evaluationKind()) { + llvm_unreachable("NYI"); + } else { + // FIXME(cir): Should we have an ParamValue abstraction like in the + // original codegen? + ArgVals.push_back(Alloca); + } + + // NOTE(cir): Once we have uncoerced the argument, we should be able to + // RAUW the original argument alloca with the new one. This assumes that + // the argument is used only to be stored in a alloca. + Value arg = SrcFn.getArgument(ArgNo); + assert(arg.hasOneUse()); + for (auto *firstStore : arg.getUsers()) { + assert(isa(firstStore)); + auto argAlloca = cast(firstStore).getAddr(); + rewriter.replaceAllUsesWith(argAlloca, Alloca); + rewriter.eraseOp(firstStore); + rewriter.eraseOp(argAlloca.getDefiningOp()); + } + + break; } default: llvm_unreachable("Unhandled ABIArgInfo::Kind"); @@ -162,6 +416,7 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { // NOTE(cir): no-return, naked, and no result functions should be handled in // CIRGen. + Value RV = {}; Type RetTy = FI.getReturnType(); const ABIArgInfo &RetAI = FI.getReturnInfo(); @@ -193,7 +448,21 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { return success(); } } else { - llvm_unreachable("NYI"); + // NOTE(cir): Unlike the original codegen, CIR may have multiple return + // statements in the function body. We have to handle this here. + mlir::PatternRewriter::InsertionGuard guard(rewriter); + NewFn->walk([&](ReturnOp returnOp) { + rewriter.setInsertionPoint(returnOp); + + // TODO(cir): I'm not sure if we need this offset here or in CIRGen. + // Perhaps both? For now I'm just ignoring it. + // Value V = emitAddressAtOffset(*this, getResultAlloca(returnOp), + // RetAI); + + RV = castReturnValue(returnOp->getOperand(0), RetAI.getCoerceToType(), + *this); + rewriter.replaceOpWithNewOp(returnOp, RV); + }); } // TODO(cir): Should AutoreleaseResult be handled here? @@ -246,6 +515,33 @@ LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, return success(); } +void LowerFunction::buildAggregateStore(Value Val, Value Dest, + bool DestIsVolatile) { + // In LLVM codegen: + // Function to store a first-class aggregate into memory. We prefer to + // store the elements rather than the aggregate to be more friendly to + // fast-isel. + assert(mlir::isa(Dest.getType()) && "Storing in a non-pointer!"); + (void)DestIsVolatile; + + // Circumvent CIR's type checking. + Type pointeeTy = mlir::cast(Dest.getType()).getPointee(); + if (Val.getType() != pointeeTy) { + // NOTE(cir): We only bitcast and store if the types have the same size. + assert((LM.getDataLayout().getTypeSizeInBits(Val.getType()) == + LM.getDataLayout().getTypeSizeInBits(pointeeTy)) && + "Incompatible types"); + auto loc = Val.getLoc(); + Val = rewriter.create(loc, pointeeTy, CastKind::bitcast, Val); + } + + rewriter.create(Val.getLoc(), Val, Dest); +} + +Value LowerFunction::buildAggregateBitcast(Value Val, Type DestTy) { + return rewriter.create(Val.getLoc(), DestTy, CastKind::bitcast, Val); +} + /// Rewrite a call operation to abide to the ABI calling convention. /// /// FIXME(cir): This method has partial parity to CodeGenFunction's @@ -436,7 +732,38 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, break; } - llvm_unreachable("NYI"); + // FIXME: Avoid the conversion through memory if possible. + Value Src = {}; + if (!isa(I->getType())) { + llvm_unreachable("NYI"); + } else { + // NOTE(cir): I'm leaving L/RValue stuff for CIRGen to handle. + Src = *I; + } + + // If the value is offst in memory, apply the offset now. + // FIXME(cir): Is this offset already handled in CIRGen? + Src = emitAddressAtOffset(*this, Src, ArgInfo); + + // Fast-isel and the optimizer generally like scalar values better than + // FCAs, so we flatten them if this is safe to do for this argument. + StructType STy = dyn_cast(ArgInfo.getCoerceToType()); + if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { + llvm_unreachable("NYI"); + } else { + // In the simple case, just pass the coerced loaded value. + assert(NumIRArgs == 1); + Value Load = createCoercedValue(Src, ArgInfo.getCoerceToType(), *this); + + // FIXME(cir): We should probably handle CMSE non-secure calls here + + // since they are a ARM-specific feature. + if (::cir::MissingFeatures::undef()) + llvm_unreachable("NYI"); + IRCallArgs[FirstIRArg] = Load; + } + + break; } default: llvm::outs() << "Missing ABIArgInfo::Kind: " << ArgInfo.getKind() << "\n"; @@ -519,7 +846,39 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, } } - llvm_unreachable("NYI"); + // If coercing a fixed vector from a scalable vector for ABI + // compatibility, and the types match, use the llvm.vector.extract + // intrinsic to perform the conversion. + if (::cir::MissingFeatures::vectorType()) { + llvm_unreachable("NYI"); + } + + // FIXME(cir): Use return value slot here. + Value RetVal = callOp.getResult(); + // TODO(cir): Check for volatile return values. + + // NOTE(cir): If the function returns, there should always be a valid + // return value present. Instead of setting the return value here, we + // should have the ReturnValueSlot object set it beforehand. + if (!RetVal) { + RetVal = callOp.getResult(); + // TODO(cir): Check for volatile return values. + } + + // An empty record can overlap other data (if declared with + // no_unique_address); omit the store for such types - as there is no + // actual data to store. + if (dyn_cast(RetTy) && + cast(RetTy).getNumElements() != 0) { + // NOTE(cir): I'm assuming we don't need to change any offsets here. + // Value StorePtr = emitAddressAtOffset(*this, RetVal, RetAI); + RetVal = + createCoercedValue(newCallOp.getResult(), RetVal.getType(), *this); + } + + // NOTE(cir): No need to convert from a temp to an RValue. This is + // done in CIRGen + return RetVal; } default: llvm::errs() << "Unhandled ABIArgInfo kind: " << RetAI.getKind() << "\n"; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h index 6a892ef79d9f..bd46bcdd1d8b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h @@ -71,6 +71,15 @@ class LowerFunction { LogicalResult generateCode(FuncOp oldFn, FuncOp newFn, const LowerFunctionInfo &FnInfo); + // Emit the most simple cir.store possible (e.g. a store for a whole + // struct), which can later be broken down in other CIR levels (or prior + // to dialect codegen). + void buildAggregateStore(Value Val, Value Dest, bool DestIsVolatile); + + // Emit a simple bitcast for a coerced aggregate type to convert it from an + // ABI-agnostic to an ABI-aware type. + Value buildAggregateBitcast(Value Val, Type DestTy); + /// Rewrite a call operation to abide to the ABI calling convention. LogicalResult rewriteCallOp(CallOp op, ReturnValueSlot retValSlot = ReturnValueSlot()); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index 46ac0c105269..44cd5a0ae1cb 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -23,6 +23,7 @@ #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/MissingFeatures.h" #include @@ -55,6 +56,10 @@ class LowerModule { MLIRContext *getMLIRContext() { return module.getContext(); } ModuleOp &getModule() { return module; } + const ::cir::CIRDataLayout &getDataLayout() const { + return types.getDataLayout(); + } + const TargetLoweringInfo &getTargetLoweringInfo(); // FIXME(cir): This would be in ASTContext, not CodeGenModule. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index 20e4dc643df0..e7eaa2bda2d0 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -37,7 +37,7 @@ LowerTypes::LowerTypes(LowerModule &LM, StringRef DLString) : LM(LM), context(LM.getContext()), Target(LM.getTarget()), CXXABI(LM.getCXXABI()), TheABIInfo(LM.getTargetLoweringInfo().getABIInfo()), - mlirContext(LM.getMLIRContext()), DL(DLString, LM.getModule()) {} + mlirContext(LM.getMLIRContext()), DL(LM.getModule()) {} /// Return the ABI-specific function type for a CIR function type. FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h index 9e6149707c07..d6f20941544f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h @@ -60,6 +60,7 @@ class LowerTypes { LowerModule &getLM() const { return LM; } CIRCXXABI &getCXXABI() const { return CXXABI; } CIRLowerContext &getContext() { return context; } + const clang::TargetInfo &getTarget() const { return Target; } MLIRContext *getMLIRContext() { return mlirContext; } /// Convert clang calling convention to LLVM callilng convention. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index 8f606940702f..2f947c5143ef 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -10,3 +10,640 @@ // queries are adapted to operate on the CIR dialect, however. // //===----------------------------------------------------------------------===// + +#include "CIRLowerContext.h" +#include "CIRRecordLayout.h" +#include "mlir/IR/Types.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" + +using namespace mlir; +using namespace mlir::cir; + +namespace { + +//===-----------------------------------------------------------------------==// +// EmptySubobjectMap Implementation +//===----------------------------------------------------------------------===// + +/// Keeps track of which empty subobjects exist at different offsets while +/// laying out a C++ class. +class EmptySubobjectMap { + const CIRLowerContext &Context; + uint64_t CharWidth; + + /// The class whose empty entries we're keeping track of. + const StructType Class; + + /// The highest offset known to contain an empty base subobject. + clang::CharUnits MaxEmptyClassOffset; + + /// Compute the size of the largest base or member subobject that is empty. + void ComputeEmptySubobjectSizes(); + +public: + /// This holds the size of the largest empty subobject (either a base + /// or a member). Will be zero if the record being built doesn't contain + /// any empty classes. + clang::CharUnits SizeOfLargestEmptySubobject; + + EmptySubobjectMap(const CIRLowerContext &Context, const StructType Class) + : Context(Context), CharWidth(Context.getCharWidth()), Class(Class) { + ComputeEmptySubobjectSizes(); + } + + /// Return whether a field can be placed at the given offset. + bool canPlaceFieldAtOffset(const Type Ty, clang::CharUnits Offset); +}; + +void EmptySubobjectMap::ComputeEmptySubobjectSizes() { + // Check the bases. + assert(!::cir::MissingFeatures::getCXXRecordBases()); + + // Check the fields. + for (const auto FT : Class.getMembers()) { + assert(!::cir::MissingFeatures::qualifiedTypes()); + const auto RT = dyn_cast(FT); + + // We only care about record types. + if (!RT) + continue; + + // TODO(cir): Handle nested record types. + llvm_unreachable("NYI"); + } +} + +bool EmptySubobjectMap::canPlaceFieldAtOffset(const Type Ty, + clang::CharUnits Offset) { + llvm_unreachable("NYI"); +} + +//===-----------------------------------------------------------------------==// +// ItaniumRecordLayoutBuilder Implementation +//===----------------------------------------------------------------------===// + +class ItaniumRecordLayoutBuilder { +protected: + // FIXME(cir): Remove this and make the appropriate fields public. + friend class mlir::cir::CIRLowerContext; + + const CIRLowerContext &Context; + + EmptySubobjectMap *EmptySubobjects; + + /// Size - The current size of the record layout. + uint64_t Size; + + /// Alignment - The current alignment of the record layout. + clang::CharUnits Alignment; + + /// PreferredAlignment - The preferred alignment of the record layout. + clang::CharUnits PreferredAlignment; + + /// The alignment if attribute packed is not used. + clang::CharUnits UnpackedAlignment; + + /// \brief The maximum of the alignments of top-level members. + clang::CharUnits UnadjustedAlignment; + + SmallVector FieldOffsets; + + /// Whether the external AST source has provided a layout for this + /// record. + unsigned UseExternalLayout : 1; + + /// Whether we need to infer alignment, even when we have an + /// externally-provided layout. + unsigned InferAlignment : 1; + + /// Packed - Whether the record is packed or not. + unsigned Packed : 1; + + unsigned IsUnion : 1; + + unsigned IsMac68kAlign : 1; + + unsigned IsNaturalAlign : 1; + + unsigned IsMsStruct : 1; + + /// UnfilledBitsInLastUnit - If the last field laid out was a bitfield, + /// this contains the number of bits in the last unit that can be used for + /// an adjacent bitfield if necessary. The unit in question is usually + /// a byte, but larger units are used if IsMsStruct. + unsigned char UnfilledBitsInLastUnit; + + /// LastBitfieldStorageUnitSize - If IsMsStruct, represents the size of the + /// storage unit of the previous field if it was a bitfield. + unsigned char LastBitfieldStorageUnitSize; + + /// MaxFieldAlignment - The maximum allowed field alignment. This is set by + /// #pragma pack. + clang::CharUnits MaxFieldAlignment; + + /// DataSize - The data size of the record being laid out. + uint64_t DataSize; + + clang::CharUnits NonVirtualSize; + clang::CharUnits NonVirtualAlignment; + clang::CharUnits PreferredNVAlignment; + + /// If we've laid out a field but not included its tail padding in Size yet, + /// this is the size up to the end of that field. + clang::CharUnits PaddedFieldSize; + + /// The primary base class (if one exists) of the class we're laying out. + const StructType PrimaryBase; + + /// Whether the primary base of the class we're laying out is virtual. + bool PrimaryBaseIsVirtual; + + /// Whether the class provides its own vtable/vftbl pointer, as opposed to + /// inheriting one from a primary base class. + bool HasOwnVFPtr; + + /// the flag of field offset changing due to packed attribute. + bool HasPackedField; + + /// An auxiliary field used for AIX. When there are OverlappingEmptyFields + /// existing in the aggregate, the flag shows if the following first non-empty + /// or empty-but-non-overlapping field has been handled, if any. + bool HandledFirstNonOverlappingEmptyField; + +public: + ItaniumRecordLayoutBuilder(const CIRLowerContext &Context, + EmptySubobjectMap *EmptySubobjects) + : Context(Context), EmptySubobjects(EmptySubobjects), Size(0), + Alignment(clang::CharUnits::One()), + PreferredAlignment(clang::CharUnits::One()), + UnpackedAlignment(clang::CharUnits::One()), + UnadjustedAlignment(clang::CharUnits::One()), UseExternalLayout(false), + InferAlignment(false), Packed(false), IsUnion(false), + IsMac68kAlign(false), + IsNaturalAlign(!Context.getTargetInfo().getTriple().isOSAIX()), + IsMsStruct(false), UnfilledBitsInLastUnit(0), + LastBitfieldStorageUnitSize(0), + MaxFieldAlignment(clang::CharUnits::Zero()), DataSize(0), + NonVirtualSize(clang::CharUnits::Zero()), + NonVirtualAlignment(clang::CharUnits::One()), + PreferredNVAlignment(clang::CharUnits::One()), + PaddedFieldSize(clang::CharUnits::Zero()), PrimaryBaseIsVirtual(false), + HasOwnVFPtr(false), HasPackedField(false), + HandledFirstNonOverlappingEmptyField(false) {} + + void layout(const StructType D); + + void layoutFields(const StructType D); + void layoutField(const Type Ty, bool InsertExtraPadding); + + void UpdateAlignment(clang::CharUnits NewAlignment, + clang::CharUnits UnpackedNewAlignment, + clang::CharUnits PreferredAlignment); + + void checkFieldPadding(uint64_t Offset, uint64_t UnpaddedOffset, + uint64_t UnpackedOffset, unsigned UnpackedAlign, + bool isPacked, const Type Ty); + + clang::CharUnits getSize() const { + assert(Size % Context.getCharWidth() == 0); + return Context.toCharUnitsFromBits(Size); + } + uint64_t getSizeInBits() const { return Size; } + + void setSize(clang::CharUnits NewSize) { Size = Context.toBits(NewSize); } + void setSize(uint64_t NewSize) { Size = NewSize; } + + clang::CharUnits getDataSize() const { + assert(DataSize % Context.getCharWidth() == 0); + return Context.toCharUnitsFromBits(DataSize); + } + + /// Initialize record layout for the given record decl. + void initializeLayout(const Type Ty); + + uint64_t getDataSizeInBits() const { return DataSize; } + + void setDataSize(clang::CharUnits NewSize) { + DataSize = Context.toBits(NewSize); + } + void setDataSize(uint64_t NewSize) { DataSize = NewSize; } +}; + +void ItaniumRecordLayoutBuilder::layout(const StructType RT) { + initializeLayout(RT); + + // Lay out the vtable and the non-virtual bases. + assert(!::cir::MissingFeatures::isCXXRecordDecl() && + !::cir::MissingFeatures::CXXRecordIsDynamicClass()); + + layoutFields(RT); + + // NonVirtualSize = Context.toCharUnitsFromBits( + // llvm::alignTo(getSizeInBits(), + // Context.getTargetInfo().getCharAlign())); + // NonVirtualAlignment = Alignment; + // PreferredNVAlignment = PreferredAlignment; + + // // Lay out the virtual bases and add the primary virtual base offsets. + // LayoutVirtualBases(RD, RD); + + // // Finally, round the size of the total struct up to the alignment + // // of the struct itself. + // FinishLayout(RD); +} + +void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { + if (const auto RT = dyn_cast(Ty)) { + IsUnion = RT.isUnion(); + assert(!::cir::MissingFeatures::recordDeclIsMSStruct()); + } + + assert(!::cir::MissingFeatures::recordDeclIsPacked()); + + // Honor the default struct packing maximum alignment flag. + if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) { + llvm_unreachable("NYI"); + } + + // mac68k alignment supersedes maximum field alignment and attribute aligned, + // and forces all structures to have 2-byte alignment. The IBM docs on it + // allude to additional (more complicated) semantics, especially with regard + // to bit-fields, but gcc appears not to follow that. + if (::cir::MissingFeatures::declHasAlignMac68kAttr()) { + llvm_unreachable("NYI"); + } else { + if (::cir::MissingFeatures::declHasAlignNaturalAttr()) + llvm_unreachable("NYI"); + + if (::cir::MissingFeatures::declHasMaxFieldAlignmentAttr()) + llvm_unreachable("NYI"); + + if (::cir::MissingFeatures::declGetMaxAlignment()) + llvm_unreachable("NYI"); + } + + HandledFirstNonOverlappingEmptyField = + !Context.getTargetInfo().defaultsToAIXPowerAlignment() || IsNaturalAlign; + + // If there is an external AST source, ask it for the various offsets. + if (const auto RT = dyn_cast(Ty)) { + if (::cir::MissingFeatures::astContextGetExternalSource()) { + llvm_unreachable("NYI"); + } + } +} + +void ItaniumRecordLayoutBuilder::layoutField(const Type D, + bool InsertExtraPadding) { + // auto FieldClass = D.dyn_cast(); + assert(!::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping() && + !::cir::MissingFeatures::CXXRecordDeclIsEmptyCXX11()); + bool IsOverlappingEmptyField = false; // FIXME(cir): Needs more features. + + clang::CharUnits FieldOffset = (IsUnion || IsOverlappingEmptyField) + ? clang::CharUnits::Zero() + : getDataSize(); + + const bool DefaultsToAIXPowerAlignment = + Context.getTargetInfo().defaultsToAIXPowerAlignment(); + bool FoundFirstNonOverlappingEmptyFieldForAIX = false; + if (DefaultsToAIXPowerAlignment && !HandledFirstNonOverlappingEmptyField) { + llvm_unreachable("NYI"); + } + + assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + + uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit; + // Reset the unfilled bits. + UnfilledBitsInLastUnit = 0; + LastBitfieldStorageUnitSize = 0; + + llvm::Triple Target = Context.getTargetInfo().getTriple(); + + clang::AlignRequirementKind AlignRequirement = + clang::AlignRequirementKind::None; + clang::CharUnits FieldSize; + clang::CharUnits FieldAlign; + // The amount of this class's dsize occupied by the field. + // This is equal to FieldSize unless we're permitted to pack + // into the field's tail padding. + clang::CharUnits EffectiveFieldSize; + + auto setDeclInfo = [&](bool IsIncompleteArrayType) { + auto TI = Context.getTypeInfoInChars(D); + FieldAlign = TI.Align; + // Flexible array members don't have any size, but they have to be + // aligned appropriately for their element type. + EffectiveFieldSize = FieldSize = + IsIncompleteArrayType ? clang::CharUnits::Zero() : TI.Width; + AlignRequirement = TI.AlignRequirement; + }; + + if (isa(D) && cast(D).getSize() == 0) { + llvm_unreachable("NYI"); + } else { + setDeclInfo(false /* IsIncompleteArrayType */); + + if (::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping()) + llvm_unreachable("NYI"); + + if (IsMsStruct) + llvm_unreachable("NYI"); + } + + assert(!::cir::MissingFeatures::recordDeclIsPacked() && + !::cir::MissingFeatures::CXXRecordDeclIsPOD()); + bool FieldPacked = false; // FIXME(cir): Needs more features. + + // When used as part of a typedef, or together with a 'packed' attribute, the + // 'aligned' attribute can be used to decrease alignment. In that case, it + // overrides any computed alignment we have, and there is no need to upgrade + // the alignment. + auto alignedAttrCanDecreaseAIXAlignment = [AlignRequirement, FieldPacked] { + // Enum alignment sources can be safely ignored here, because this only + // helps decide whether we need the AIX alignment upgrade, which only + // applies to floating-point types. + return AlignRequirement == clang::AlignRequirementKind::RequiredByTypedef || + (AlignRequirement == clang::AlignRequirementKind::RequiredByRecord && + FieldPacked); + }; + + // The AIX `power` alignment rules apply the natural alignment of the + // "first member" if it is of a floating-point data type (or is an aggregate + // whose recursively "first" member or element is such a type). The alignment + // associated with these types for subsequent members use an alignment value + // where the floating-point data type is considered to have 4-byte alignment. + // + // For the purposes of the foregoing: vtable pointers, non-empty base classes, + // and zero-width bit-fields count as prior members; members of empty class + // types marked `no_unique_address` are not considered to be prior members. + clang::CharUnits PreferredAlign = FieldAlign; + if (DefaultsToAIXPowerAlignment && !alignedAttrCanDecreaseAIXAlignment() && + (FoundFirstNonOverlappingEmptyFieldForAIX || IsNaturalAlign)) { + llvm_unreachable("NYI"); + } + + // The align if the field is not packed. This is to check if the attribute + // was unnecessary (-Wpacked). + clang::CharUnits UnpackedFieldAlign = FieldAlign; + clang::CharUnits PackedFieldAlign = clang::CharUnits::One(); + clang::CharUnits UnpackedFieldOffset = FieldOffset; + // clang::CharUnits OriginalFieldAlign = UnpackedFieldAlign; + + assert(!::cir::MissingFeatures::fieldDeclGetMaxFieldAlignment()); + clang::CharUnits MaxAlignmentInChars = clang::CharUnits::Zero(); + PackedFieldAlign = std::max(PackedFieldAlign, MaxAlignmentInChars); + PreferredAlign = std::max(PreferredAlign, MaxAlignmentInChars); + UnpackedFieldAlign = std::max(UnpackedFieldAlign, MaxAlignmentInChars); + + // The maximum field alignment overrides the aligned attribute. + if (!MaxFieldAlignment.isZero()) { + llvm_unreachable("NYI"); + } + + if (!FieldPacked) + FieldAlign = UnpackedFieldAlign; + if (DefaultsToAIXPowerAlignment) + llvm_unreachable("NYI"); + if (FieldPacked) { + llvm_unreachable("NYI"); + } + + clang::CharUnits AlignTo = + !DefaultsToAIXPowerAlignment ? FieldAlign : PreferredAlign; + // Round up the current record size to the field's alignment boundary. + FieldOffset = FieldOffset.alignTo(AlignTo); + UnpackedFieldOffset = UnpackedFieldOffset.alignTo(UnpackedFieldAlign); + + if (UseExternalLayout) { + llvm_unreachable("NYI"); + } else { + if (!IsUnion && EmptySubobjects) { + // Check if we can place the field at this offset. + while (/*!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)*/ + false) { + llvm_unreachable("NYI"); + } + } + } + + // Place this field at the current location. + FieldOffsets.push_back(Context.toBits(FieldOffset)); + + if (!UseExternalLayout) + checkFieldPadding(Context.toBits(FieldOffset), UnpaddedFieldOffset, + Context.toBits(UnpackedFieldOffset), + Context.toBits(UnpackedFieldAlign), FieldPacked, D); + + if (InsertExtraPadding) { + llvm_unreachable("NYI"); + } + + // Reserve space for this field. + if (!IsOverlappingEmptyField) { + // uint64_t EffectiveFieldSizeInBits = Context.toBits(EffectiveFieldSize); + if (IsUnion) + llvm_unreachable("NYI"); + else + setDataSize(FieldOffset + EffectiveFieldSize); + + PaddedFieldSize = std::max(PaddedFieldSize, FieldOffset + FieldSize); + setSize(std::max(getSizeInBits(), getDataSizeInBits())); + } else { + llvm_unreachable("NYI"); + } + + // Remember max struct/class ABI-specified alignment. + UnadjustedAlignment = std::max(UnadjustedAlignment, FieldAlign); + UpdateAlignment(FieldAlign, UnpackedFieldAlign, PreferredAlign); + + // For checking the alignment of inner fields against + // the alignment of its parent record. + // FIXME(cir): We need to track the parent record of the current type being + // laid out. A regular mlir::Type has not way of doing this. In fact, we will + // likely need an external abstraction, as I don't think this is possible with + // just the field type. + assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + + if (Packed && !FieldPacked && PackedFieldAlign < FieldAlign) + llvm_unreachable("NYI"); +} + +void ItaniumRecordLayoutBuilder::layoutFields(const StructType D) { + // Layout each field, for now, just sequentially, respecting alignment. In + // the future, this will need to be tweakable by targets. + assert(!::cir::MissingFeatures::recordDeclMayInsertExtraPadding() && + !Context.getLangOpts().SanitizeAddressFieldPadding); + bool InsertExtraPadding = false; + assert(!::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()); + bool HasFlexibleArrayMember = false; + for (const auto FT : D.getMembers()) { + layoutField(FT, InsertExtraPadding && (FT != D.getMembers().back() || + !HasFlexibleArrayMember)); + } +} + +void ItaniumRecordLayoutBuilder::UpdateAlignment( + clang::CharUnits NewAlignment, clang::CharUnits UnpackedNewAlignment, + clang::CharUnits PreferredNewAlignment) { + // The alignment is not modified when using 'mac68k' alignment or when + // we have an externally-supplied layout that also provides overall alignment. + if (IsMac68kAlign || (UseExternalLayout && !InferAlignment)) + return; + + if (NewAlignment > Alignment) { + assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) && + "Alignment not a power of 2"); + Alignment = NewAlignment; + } + + if (UnpackedNewAlignment > UnpackedAlignment) { + assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) && + "Alignment not a power of 2"); + UnpackedAlignment = UnpackedNewAlignment; + } + + if (PreferredNewAlignment > PreferredAlignment) { + assert(llvm::isPowerOf2_64(PreferredNewAlignment.getQuantity()) && + "Alignment not a power of 2"); + PreferredAlignment = PreferredNewAlignment; + } +} + +void ItaniumRecordLayoutBuilder::checkFieldPadding( + uint64_t Offset, uint64_t UnpaddedOffset, uint64_t UnpackedOffset, + unsigned UnpackedAlign, bool isPacked, const Type Ty) { + // We let objc ivars without warning, objc interfaces generally are not used + // for padding tricks. + if (::cir::MissingFeatures::objCIvarDecls()) + llvm_unreachable("NYI"); + + // FIXME(cir): Should the following be skiped in CIR? + // Don't warn about structs created without a SourceLocation. This can + // be done by clients of the AST, such as codegen. + + unsigned CharBitNum = Context.getTargetInfo().getCharWidth(); + + // Warn if padding was introduced to the struct/class. + if (!IsUnion && Offset > UnpaddedOffset) { + unsigned PadSize = Offset - UnpaddedOffset; + // bool InBits = true; + if (PadSize % CharBitNum == 0) { + PadSize = PadSize / CharBitNum; + // InBits = false; + } + assert(::cir::MissingFeatures::bitFieldPaddingDiagnostics()); + } + if (isPacked && Offset != UnpackedOffset) { + HasPackedField = true; + } +} + +//===-----------------------------------------------------------------------==// +// Misc. Helper Functions +//===----------------------------------------------------------------------===// + +bool isMsLayout(const CIRLowerContext &Context) { + return Context.getTargetInfo().getCXXABI().isMicrosoft(); +} + +/// Does the target C++ ABI require us to skip over the tail-padding +/// of the given class (considering it as a base class) when allocating +/// objects? +static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { + assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + switch (ABI.getTailPaddingUseRules()) { + case clang::TargetCXXABI::AlwaysUseTailPadding: + return false; + + case clang::TargetCXXABI::UseTailPaddingUnlessPOD03: + // FIXME: To the extent that this is meant to cover the Itanium ABI + // rules, we should implement the restrictions about over-sized + // bitfields: + // + // http://itanium-cxx-abi.github.io/cxx-abi/abi.html#POD : + // In general, a type is considered a POD for the purposes of + // layout if it is a POD type (in the sense of ISO C++ + // [basic.types]). However, a POD-struct or POD-union (in the + // sense of ISO C++ [class]) with a bitfield member whose + // declared width is wider than the declared type of the + // bitfield is not a POD for the purpose of layout. Similarly, + // an array type is not a POD for the purpose of layout if the + // element type of the array is not a POD for the purpose of + // layout. + // + // Where references to the ISO C++ are made in this paragraph, + // the Technical Corrigendum 1 version of the standard is + // intended. + // FIXME(cir): This always returns true since we can't check if a CIR record + // is a POD type. + assert(!::cir::MissingFeatures::CXXRecordDeclIsPOD()); + return true; + + case clang::TargetCXXABI::UseTailPaddingUnlessPOD11: + // This is equivalent to RD->getTypeForDecl().isCXX11PODType(), + // but with a lot of abstraction penalty stripped off. This does + // assume that these properties are set correctly even in C++98 + // mode; fortunately, that is true because we want to assign + // consistently semantics to the type-traits intrinsics (or at + // least as many of them as possible). + llvm_unreachable("NYI"); + } + + llvm_unreachable("bad tail-padding use kind"); +} + +} // namespace + +/// Get or compute information about the layout of the specified record +/// (struct/union/class), which indicates its size and field position +/// information. +const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { + assert(isa(D) && "Not a record type"); + auto RT = dyn_cast(D); + + assert(RT.isComplete() && "Cannot get layout of forward declarations!"); + + // FIXME(cir): Cache the layout. Also, use a more MLIR-based approach. + + const CIRRecordLayout *NewEntry = nullptr; + + if (isMsLayout(*this)) { + llvm_unreachable("NYI"); + } else { + // FIXME(cir): Add if-else separating C and C++ records. + assert(!::cir::MissingFeatures::isCXXRecordDecl()); + EmptySubobjectMap EmptySubobjects(*this, RT); + ItaniumRecordLayoutBuilder Builder(*this, &EmptySubobjects); + Builder.layout(RT); + + // In certain situations, we are allowed to lay out objects in the + // tail-padding of base classes. This is ABI-dependent. + // FIXME: this should be stored in the record layout. + bool skipTailPadding = mustSkipTailPadding(getTargetInfo().getCXXABI(), RT); + + // FIXME: This should be done in FinalizeLayout. + clang::CharUnits DataSize = + skipTailPadding ? Builder.getSize() : Builder.getDataSize(); + clang::CharUnits NonVirtualSize = + skipTailPadding ? DataSize : Builder.NonVirtualSize; + assert(!::cir::MissingFeatures::CXXRecordIsDynamicClass()); + // FIXME(cir): Whose responsible for freeing the allocation below? + NewEntry = new CIRRecordLayout( + *this, Builder.getSize(), Builder.Alignment, Builder.PreferredAlignment, + Builder.UnadjustedAlignment, + /*RequiredAlignment : used by MS-ABI)*/ + Builder.Alignment, Builder.HasOwnVFPtr, /*RD->isDynamicClass()=*/false, + clang::CharUnits::fromQuantity(-1), DataSize, Builder.FieldOffsets, + NonVirtualSize, Builder.NonVirtualAlignment, + Builder.PreferredNVAlignment, + EmptySubobjects.SizeOfLargestEmptySubobject, Builder.PrimaryBase, + Builder.PrimaryBaseIsVirtual, nullptr, false, false); + } + + // TODO(cir): Cache the layout. + // TODO(cir): Add option to dump the layouts. + + return *NewEntry; +} diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 477ccd312cc4..4a6124ad898a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -20,6 +20,19 @@ namespace cir { namespace { +/// \p returns the size in bits of the largest (native) vector for \p AVXLevel. +unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { + switch (AVXLevel) { + case X86AVXABILevel::AVX512: + return 512; + case X86AVXABILevel::AVX: + return 256; + case X86AVXABILevel::None: + return 128; + } + llvm_unreachable("Unknown AVXLevel"); +} + /// Return true if the specified [start,end) bit range is known to either be /// off the end of the specified type or being in alignment padding. The user /// type specified is known to be at most 128 bits in size, and have passed @@ -36,7 +49,44 @@ static bool BitsContainNoUserData(Type Ty, unsigned StartBit, unsigned EndBit, if (TySize <= StartBit) return true; - llvm_unreachable("NYI"); + if (auto arrTy = llvm::dyn_cast(Ty)) { + llvm_unreachable("NYI"); + } + + if (auto structTy = llvm::dyn_cast(Ty)) { + const CIRRecordLayout &Layout = Context.getCIRRecordLayout(Ty); + + // If this is a C++ record, check the bases first. + if (::cir::MissingFeatures::isCXXRecordDecl() || + ::cir::MissingFeatures::getCXXRecordBases()) { + llvm_unreachable("NYI"); + } + + // Verify that no field has data that overlaps the region of interest. Yes + // this could be sped up a lot by being smarter about queried fields, + // however we're only looking at structs up to 16 bytes, so we don't care + // much. + unsigned idx = 0; + for (auto type : structTy.getMembers()) { + unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); + + // If we found a field after the region we care about, then we're done. + if (FieldOffset >= EndBit) + break; + + unsigned FieldStart = FieldOffset < StartBit ? StartBit - FieldOffset : 0; + if (!BitsContainNoUserData(type, FieldStart, EndBit - FieldOffset, + Context)) + return false; + + ++idx; + } + + // If nothing in this record overlapped the area of interest, we're good. + return true; + } + + return false; } /// Return a floating point type at the specified offset. @@ -53,6 +103,33 @@ Type getFPTypeAtOffset(Type IRType, unsigned IROffset, class X86_64ABIInfo : public ABIInfo { using Class = ::cir::X86ArgClass; + /// Implement the X86_64 ABI merging algorithm. + /// + /// Merge an accumulating classification \arg Accum with a field + /// classification \arg Field. + /// + /// \param Accum - The accumulating classification. This should + /// always be either NoClass or the result of a previous merge + /// call. In addition, this should never be Memory (the caller + /// should just return Memory for the aggregate). + static Class merge(Class Accum, Class Field); + + /// Implement the X86_64 ABI post merging algorithm. + /// + /// Post merger cleanup, reduces a malformed Hi and Lo pair to + /// final MEMORY or SSE classes when necessary. + /// + /// \param AggregateSize - The size of the current aggregate in + /// the classification process. + /// + /// \param Lo - The classification for the parts of the type + /// residing in the low word of the containing object. + /// + /// \param Hi - The classification for the parts of the type + /// residing in the higher words of the containing object. + /// + void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; + /// Determine the x86_64 register classes in which the given type T should be /// passed. /// @@ -88,8 +165,20 @@ class X86_64ABIInfo : public ABIInfo { Type GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, Type SourceTy, unsigned SourceOffset) const; + /// The 0.98 ABI revision clarified a lot of ambiguities, + /// unfortunately in ways that were not always consistent with + /// certain previous compilers. In particular, platforms which + /// required strict binary compatibility with older versions of GCC + /// may need to exempt themselves. + bool honorsRevision0_98() const { + return !getTarget().getTriple().isOSDarwin(); + } + + X86AVXABILevel AVXLevel; + public: - X86_64ABIInfo(LowerTypes &CGT, X86AVXABILevel AVXLevel) : ABIInfo(CGT) {} + X86_64ABIInfo(LowerTypes &CGT, X86AVXABILevel AVXLevel) + : ABIInfo(CGT), AVXLevel(AVXLevel) {} ::cir::ABIArgInfo classifyReturnType(Type RetTy) const; @@ -160,6 +249,92 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, } else if (isa(Ty)) { Current = Class::Integer; + } else if (const auto RT = dyn_cast(Ty)) { + uint64_t Size = getContext().getTypeSize(Ty); + + // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger + // than eight eightbytes, ..., it has class MEMORY. + if (Size > 512) + llvm_unreachable("NYI"); + + // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial + // copy constructor or a non-trivial destructor, it is passed by invisible + // reference. + if (getRecordArgABI(RT, getCXXABI())) + llvm_unreachable("NYI"); + + // Assume variable sized types are passed in memory. + if (::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()) + llvm_unreachable("NYI"); + + const auto &Layout = getContext().getCIRRecordLayout(Ty); + + // Reset Lo class, this will be recomputed. + Current = Class::NoClass; + + // If this is a C++ record, classify the bases first. + assert(!::cir::MissingFeatures::isCXXRecordDecl() && + !::cir::MissingFeatures::getCXXRecordBases()); + + // Classify the fields one at a time, merging the results. + bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <= + clang::LangOptions::ClangABI::Ver11 || + getContext().getTargetInfo().getTriple().isPS(); + bool IsUnion = RT.isUnion() && !UseClang11Compat; + + // FIXME(cir): An interface to handle field declaration might be needed. + assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + for (auto [idx, FT] : llvm::enumerate(RT.getMembers())) { + uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); + assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + bool BitField = false; + + // Ignore padding bit-fields. + if (BitField && !::cir::MissingFeatures::fieldDeclisUnnamedBitField()) + llvm_unreachable("NYI"); + + // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than + // eight eightbytes, or it contains unaligned fields, it has class + // MEMORY. + // + // The only case a 256-bit or a 512-bit wide vector could be used is + // when the struct contains a single 256-bit or 512-bit element. Early + // check and fallback to memory. + // + // FIXME: Extended the Lo and Hi logic properly to work for size wider + // than 128. + if (Size > 128 && ((!IsUnion && Size != getContext().getTypeSize(FT)) || + Size > getNativeVectorSizeForAVXABI(AVXLevel))) { + llvm_unreachable("NYI"); + } + // Note, skip this test for bit-fields, see below. + if (!BitField && Offset % getContext().getTypeAlign(RT)) { + llvm_unreachable("NYI"); + } + + // Classify this field. + // + // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate + // exceeds a single eightbyte, each is classified + // separately. Each eightbyte gets initialized to class + // NO_CLASS. + Class FieldLo, FieldHi; + + // Bit-fields require special handling, they do not force the + // structure to be passed in memory even if unaligned, and + // therefore they can straddle an eightbyte. + if (BitField) { + llvm_unreachable("NYI"); + } else { + classify(FT, Offset, FieldLo, FieldHi, isNamedArg); + } + Lo = merge(Lo, FieldLo); + Hi = merge(Hi, FieldHi); + if (Lo == Class::Memory || Hi == Class::Memory) + break; + } + + postMerge(Size, Lo, Hi); } else { llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; llvm_unreachable("NYI"); @@ -245,7 +420,15 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, } if (auto RT = dyn_cast(DestTy)) { - llvm_unreachable("NYI"); + // If this is a struct, recurse into the field at the specified offset. + const ::cir::StructLayout *SL = getDataLayout().getStructLayout(RT); + if (IROffset < SL->getSizeInBytes()) { + unsigned FieldIdx = SL->getElementContainingOffset(IROffset); + IROffset -= SL->getElementOffset(FieldIdx); + + return GetINTEGERTypeAtOffset(RT.getMembers()[FieldIdx], IROffset, + SourceTy, SourceOffset); + } } // Okay, we don't have any better idea of what to pass, so we pass this in @@ -328,7 +511,7 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { if (HighPart) llvm_unreachable("NYI"); - return ABIArgInfo::getDirect(RetTy); + return ABIArgInfo::getDirect(resType); } ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, @@ -463,6 +646,80 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { } } +X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { + // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is + // classified recursively so that always two fields are + // considered. The resulting class is calculated according to + // the classes of the fields in the eightbyte: + // + // (a) If both classes are equal, this is the resulting class. + // + // (b) If one of the classes is NO_CLASS, the resulting class is + // the other class. + // + // (c) If one of the classes is MEMORY, the result is the MEMORY + // class. + // + // (d) If one of the classes is INTEGER, the result is the + // INTEGER. + // + // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, + // MEMORY is used as class. + // + // (f) Otherwise class SSE is used. + + // Accum should never be memory (we should have returned) or + // ComplexX87 (because this cannot be passed in a structure). + assert((Accum != Class::Memory && Accum != Class::ComplexX87) && + "Invalid accumulated classification during merge."); + if (Accum == Field || Field == Class::NoClass) + return Accum; + if (Field == Class::Memory) + return Class::Memory; + if (Accum == Class::NoClass) + return Field; + if (Accum == Class::Integer || Field == Class::Integer) + return Class::Integer; + if (Field == Class::X87 || Field == Class::X87Up || + Field == Class::ComplexX87 || Accum == Class::X87 || + Accum == Class::X87Up) + return Class::Memory; + return Class::SSE; +} + +void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, + Class &Hi) const { + // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: + // + // (a) If one of the classes is Memory, the whole argument is passed in + // memory. + // + // (b) If X87UP is not preceded by X87, the whole argument is passed in + // memory. + // + // (c) If the size of the aggregate exceeds two eightbytes and the first + // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole + // argument is passed in memory. NOTE: This is necessary to keep the + // ABI working for processors that don't support the __m256 type. + // + // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. + // + // Some of these are enforced by the merging logic. Others can arise + // only with unions; for example: + // union { _Complex double; unsigned; } + // + // Note that clauses (b) and (c) were added in 0.98. + // + if (Hi == Class::Memory) + Lo = Class::Memory; + if (Hi == Class::X87Up && Lo != Class::X87 && honorsRevision0_98()) + Lo = Class::Memory; + if (AggregateSize > 128 && (Lo != Class::SSE || Hi != Class::SSEUp)) + Lo = Class::Memory; + if (Hi == Class::SSEUp && Lo != Class::SSE) + Hi = Class::SSE; +} + std::unique_ptr createX86_64TargetLoweringInfo(LowerModule &LM, X86AVXABILevel AVXLevel) { return std::make_unique(LM.getTypes(), AVXLevel); diff --git a/clang/test/CIR/CodeGen/bool.c b/clang/test/CIR/CodeGen/bool.c index 038de348797b..6cc618b094a5 100644 --- a/clang/test/CIR/CodeGen/bool.c +++ b/clang/test/CIR/CodeGen/bool.c @@ -36,4 +36,4 @@ void store_bool(S *s) { // CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : !cir.ptr, !cir.bool void load_bool(S *s) { bool x = s->x; -} \ No newline at end of file +} diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index 4918fcdf71ed..cb0928d19a39 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -45,4 +45,4 @@ unsigned is_little(void) { // CHECK: cir.func @is_little // CHECK: %[[VAL_1:.*]] = cir.get_global @is_little.one : !cir.ptr // CHECK: %[[VAL_2:.*]] = cir.cast(bitcast, %[[VAL_1]] : !cir.ptr), !cir.ptr -// CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][1] {name = "c"} : !cir.ptr -> !cir.ptr> \ No newline at end of file +// CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][1] {name = "c"} : !cir.ptr -> !cir.ptr> diff --git a/clang/test/CIR/Lowering/address-space.cir b/clang/test/CIR/Lowering/address-space.cir index ee857bd32119..c3426899d1e2 100644 --- a/clang/test/CIR/Lowering/address-space.cir +++ b/clang/test/CIR/Lowering/address-space.cir @@ -5,7 +5,8 @@ module attributes { cir.triple = "spirv64-unknown-unknown", - llvm.data_layout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1" + llvm.data_layout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1", + dlti.dl_spec = #dlti.dl_spec<> // Avoid assert errors. } { cir.global external addrspace(offload_global) @addrspace1 = #cir.int<1> : !s32i // LLVM: @addrspace1 = addrspace(1) global i32 diff --git a/clang/test/CIR/Lowering/struct-init.c b/clang/test/CIR/Lowering/struct-init.c index a8b84e9d20d9..8be8f6ffc5c0 100644 --- a/clang/test/CIR/Lowering/struct-init.c +++ b/clang/test/CIR/Lowering/struct-init.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + struct S { int x; }; diff --git a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp index 6eb1189402fc..48345dfc7c0d 100644 --- a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp @@ -87,3 +87,32 @@ double Double(double d) { // cir.call @_Z6Doubled(%{{.+}}) : (!cir.double) -> !cir.double return Double(d); } + + +/// Test call conv lowering for struct type coercion scenarios. /// + +struct S1 { + int a, b; +}; + + +/// Validate coerced argument and cast it to the expected type. + +/// Cast arguments to the expected type. +// CHECK: cir.func @_Z2s12S1(%arg0: !u64i loc({{.+}})) -> !u64i +// CHECK: %[[#V0:]] = cir.alloca !ty_22S122, !cir.ptr +// CHECK: %[[#V1:]] = cir.cast(bitcast, %arg0 : !u64i), !ty_22S122 +// CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22S122, !cir.ptr +S1 s1(S1 arg) { + + /// Cast argument and result of the function call to the expected types. + // CHECK: %[[#V9:]] = cir.cast(bitcast, %{{.+}} : !ty_22S122), !u64i + // CHECK: %[[#V10:]] = cir.call @_Z2s12S1(%[[#V9]]) : (!u64i) -> !u64i + // CHECK: %[[#V11:]] = cir.cast(bitcast, %[[#V10]] : !u64i), !ty_22S122 + s1({1, 2}); + + // CHECK: %[[#V12:]] = cir.load %{{.+}} : !cir.ptr, !ty_22S122 + // CHECK: %[[#V13:]] = cir.cast(bitcast, %[[#V12]] : !ty_22S122), !u64i + // CHECK: cir.return %[[#V13]] : !u64i + return {1, 2}; +} From 99a1603950596a053215eeed10901fd05119d5e3 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Tue, 13 Aug 2024 14:29:14 -0400 Subject: [PATCH 1766/2301] [CIR][CIRGen] Implement VisitCXXStdInitializerListExpr to support use of std::initializer_list (#764) implement VisitCXXStdInitializerListExpr as similar as to [OG](https://github.com/llvm/clangir/blob/7150a050c12119c27e9eb1547aa65f535e4bfbe9/clang/lib/CodeGen/CGExprAgg.cpp#L417): In order to support this implementation, made some changes to get more helper functions. Also added some tests. The generated LLVM code is most similar to OG's llvm code, 3 interesting differences: 1. CIR introduced scope, thus extra branch 2. OG' has comdat for _ZSt1fIiEvSt16initializer_listIT_E function, but we haven't implemented FuncOP's comdat yet. I'll probably try to add it in another PR, but it's not in the scope of this PR. 3. When defining initialized_list type, OG prefers generic type like %"class.std::initializer_list" = type { ptr, ptr }, but CIR prefers instantiated like "%"class.std::initializer_list" = type { ptr, ptr }" --- clang/lib/CIR/CodeGen/CIRGenBuilder.cpp | 68 +++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 31 +++---- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 51 ++---------- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 60 +++++++++++++- clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp | 82 +++++++++++++++++++ .../CIR/CodeGen/initlist-ptr-unsigned.cpp | 64 +++++++++++++++ 7 files changed, 297 insertions(+), 60 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenBuilder.cpp create mode 100644 clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp create mode 100644 clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp new file mode 100644 index 000000000000..cc2a3aee0ab4 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp @@ -0,0 +1,68 @@ +//===-- CIRGenBuilder.cpp - CIRBuilder implementation ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "CIRGenBuilder.h" + +namespace cir { + +mlir::Value CIRGenBuilderTy::maybeBuildArrayDecay(mlir::Location loc, + mlir::Value arrayPtr, + mlir::Type eltTy) { + auto arrayPtrTy = + ::mlir::dyn_cast<::mlir::cir::PointerType>(arrayPtr.getType()); + assert(arrayPtrTy && "expected pointer type"); + auto arrayTy = + ::mlir::dyn_cast<::mlir::cir::ArrayType>(arrayPtrTy.getPointee()); + + if (arrayTy) { + mlir::cir::PointerType flatPtrTy = + mlir::cir::PointerType::get(getContext(), arrayTy.getEltType()); + return create( + loc, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, arrayPtr); + } + + assert(arrayPtrTy.getPointee() == eltTy && + "flat pointee type must match original array element type"); + return arrayPtr; +} + +mlir::Value CIRGenBuilderTy::getArrayElement(mlir::Location arrayLocBegin, + mlir::Location arrayLocEnd, + mlir::Value arrayPtr, + mlir::Type eltTy, mlir::Value idx, + bool shouldDecay) { + mlir::Value basePtr = arrayPtr; + if (shouldDecay) + basePtr = maybeBuildArrayDecay(arrayLocBegin, arrayPtr, eltTy); + mlir::Type flatPtrTy = basePtr.getType(); + return create(arrayLocEnd, flatPtrTy, basePtr, idx); +} + +mlir::cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, + llvm::APSInt intVal) { + bool isSigned = intVal.isSigned(); + auto width = intVal.getBitWidth(); + mlir::cir::IntType t = isSigned ? getSIntNTy(width) : getUIntNTy(width); + return getConstInt(loc, t, + isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); +} + +mlir::cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, + llvm::APInt intVal) { + auto width = intVal.getBitWidth(); + mlir::cir::IntType t = getUIntNTy(width); + return getConstInt(loc, t, intVal.getZExtValue()); +} + +mlir::cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, + mlir::Type t, uint64_t C) { + auto intTy = mlir::dyn_cast(t); + assert(intTy && "expected mlir::cir::IntType"); + return create(loc, intTy, + mlir::cir::IntAttr::get(t, C)); +} +}; // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 70de6e4de221..385f18df0ee0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -508,22 +508,12 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::IntAttr::get(uInt64Ty, C)); } - mlir::cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal) { - bool isSigned = intVal.isSigned(); - auto width = intVal.getBitWidth(); - mlir::cir::IntType t = isSigned ? getSIntNTy(width) : getUIntNTy(width); - return getConstInt( - loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); - } + mlir::cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal); - mlir::cir::ConstantOp getConstInt(mlir::Location loc, mlir::Type t, - uint64_t C) { - auto intTy = mlir::dyn_cast(t); - assert(intTy && "expected mlir::cir::IntType"); - return create(loc, intTy, - mlir::cir::IntAttr::get(t, C)); - } + mlir::cir::ConstantOp getConstInt(mlir::Location loc, llvm::APInt intVal); + mlir::cir::ConstantOp getConstInt(mlir::Location loc, mlir::Type t, + uint64_t C); /// Create constant nullptr for pointer-to-data-member type ty. mlir::cir::ConstantOp getNullDataMemberPtr(mlir::cir::DataMemberType ty, mlir::Location loc) { @@ -959,6 +949,19 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, resultTy, objectPtr, memberPtr); } + + /// Create a cir.ptr_stride operation to get access to an array element. + /// idx is the index of the element to access, shouldDecay is true if the + /// result should decay to a pointer to the element type. + mlir::Value getArrayElement(mlir::Location arrayLocBegin, + mlir::Location arrayLocEnd, mlir::Value arrayPtr, + mlir::Type eltTy, mlir::Value idx, + bool shouldDecay); + + /// Returns a decayed pointer to the first element of the array + /// pointed to by arrayPtr. + mlir::Value maybeBuildArrayDecay(mlir::Location loc, mlir::Value arrayPtr, + mlir::Type eltTy); }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index a1b4dd63b3b5..03ebf4193d0d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -9,7 +9,6 @@ // This contains code to emit Expr nodes as CIR code. // //===----------------------------------------------------------------------===// -#include "CIRGenBuilder.h" #include "CIRGenCXXABI.h" #include "CIRGenCall.h" #include "CIRGenCstEmitter.h" @@ -1512,28 +1511,6 @@ void CIRGenFunction::buildIgnoredExpr(const Expr *E) { buildLValue(E); } -static mlir::Value maybeBuildArrayDecay(mlir::OpBuilder &builder, - mlir::Location loc, - mlir::Value arrayPtr, - mlir::Type eltTy) { - auto arrayPtrTy = - ::mlir::dyn_cast<::mlir::cir::PointerType>(arrayPtr.getType()); - assert(arrayPtrTy && "expected pointer type"); - auto arrayTy = - ::mlir::dyn_cast<::mlir::cir::ArrayType>(arrayPtrTy.getPointee()); - - if (arrayTy) { - mlir::cir::PointerType flatPtrTy = - mlir::cir::PointerType::get(builder.getContext(), arrayTy.getEltType()); - return builder.create( - loc, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, arrayPtr); - } - - assert(arrayPtrTy.getPointee() == eltTy && - "flat pointee type must match original array element type"); - return arrayPtr; -} - Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, LValueBaseInfo *BaseInfo) { assert(E->getType()->isArrayType() && @@ -1570,8 +1547,8 @@ Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, *BaseInfo = LV.getBaseInfo(); assert(!MissingFeatures::tbaa() && "NYI"); - mlir::Value ptr = maybeBuildArrayDecay( - CGM.getBuilder(), CGM.getLoc(E->getSourceRange()), Addr.getPointer(), + mlir::Value ptr = CGM.getBuilder().maybeBuildArrayDecay( + CGM.getLoc(E->getSourceRange()), Addr.getPointer(), getTypes().convertTypeForMem(EltType)); return Address(ptr, Addr.getAlignment()); } @@ -1652,20 +1629,6 @@ static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, } } -static mlir::Value buildArrayAccessOp(mlir::OpBuilder &builder, - mlir::Location arrayLocBegin, - mlir::Location arrayLocEnd, - mlir::Value arrayPtr, mlir::Type eltTy, - mlir::Value idx, bool shouldDecay) { - mlir::Value basePtr = arrayPtr; - if (shouldDecay) - basePtr = maybeBuildArrayDecay(builder, arrayLocBegin, arrayPtr, eltTy); - mlir::Type flatPtrTy = basePtr.getType(); - - return builder.create(arrayLocEnd, flatPtrTy, basePtr, - idx); -} - static mlir::Value buildArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, @@ -1679,8 +1642,8 @@ buildArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, // that would enhance tracking this later in CIR? if (inbounds) assert(!MissingFeatures::emitCheckedInBoundsGEP() && "NYI"); - return buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, ptr, eltTy, idx, - shouldDecay); + return CGM.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx, + shouldDecay); } static QualType getFixedSizeElementType(const ASTContext &ctx, @@ -1721,7 +1684,7 @@ static Address buildArraySubscriptPtr( // assert(indices.size() == 1 && "cannot handle multiple indices yet"); // auto idx = indices.back(); // auto &CGM = CGF.getCIRGenModule(); - // eltPtr = buildArrayAccessOp(CGM.getBuilder(), beginLoc, endLoc, + // eltPtr = CGM.getBuilder().getArrayElement(beginLoc, endLoc, // addr.getPointer(), addr.getElementType(), // idx); assert(0 && "NYI"); @@ -2003,8 +1966,8 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { case CK_AddressSpaceConversion: { LValue LV = buildLValue(E->getSubExpr()); QualType DestTy = getContext().getPointerType(E->getType()); - auto SrcAS = builder.getAddrSpaceAttr( - E->getSubExpr()->getType().getAddressSpace()); + auto SrcAS = + builder.getAddrSpaceAttr(E->getSubExpr()->getType().getAddressSpace()); auto DestAS = builder.getAddrSpaceAttr(E->getType().getAddressSpace()); mlir::Value V = getTargetHooks().performAddrSpaceCast( *this, LV.getPointer(), SrcAS, DestAS, ConvertType(DestTy)); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 979ea4ebf258..c403de3c51da 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -300,8 +300,64 @@ class AggExprEmitter : public StmtVisitor { } void VisitLambdaExpr(LambdaExpr *E); void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { - llvm_unreachable("NYI"); + ASTContext &Ctx = CGF.getContext(); + CIRGenFunction::SourceLocRAIIObject locRAIIObject{ + CGF, CGF.getLoc(E->getSourceRange())}; + // Emit an array containing the elements. The array is externally + // destructed if the std::initializer_list object is. + LValue Array = CGF.buildLValue(E->getSubExpr()); + assert(Array.isSimple() && "initializer_list array not a simple lvalue"); + Address ArrayPtr = Array.getAddress(); + + const ConstantArrayType *ArrayType = + Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); + assert(ArrayType && "std::initializer_list constructed from non-array"); + + RecordDecl *Record = E->getType()->castAs()->getDecl(); + RecordDecl::field_iterator Field = Record->field_begin(); + assert(Field != Record->field_end() && + Ctx.hasSameType(Field->getType()->getPointeeType(), + ArrayType->getElementType()) && + "Expected std::initializer_list first field to be const E *"); + // Start pointer. + auto loc = CGF.getLoc(E->getSourceRange()); + AggValueSlot Dest = EnsureSlot(loc, E->getType()); + LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), E->getType()); + LValue Start = + CGF.buildLValueForFieldInitialization(DestLV, *Field, Field->getName()); + mlir::Value ArrayStart = ArrayPtr.emitRawPointer(); + CGF.buildStoreThroughLValue(RValue::get(ArrayStart), Start); + ++Field; + assert(Field != Record->field_end() && + "Expected std::initializer_list to have two fields"); + + auto Builder = CGF.getBuilder(); + + auto sizeOp = Builder.getConstInt(loc, ArrayType->getSize()); + + mlir::Value Size = sizeOp.getRes(); + Builder.getUIntNTy(ArrayType->getSizeBitWidth()); + LValue EndOrLength = + CGF.buildLValueForFieldInitialization(DestLV, *Field, Field->getName()); + if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { + // Length. + CGF.buildStoreThroughLValue(RValue::get(Size), EndOrLength); + } else { + // End pointer. + assert(Field->getType()->isPointerType() && + Ctx.hasSameType(Field->getType()->getPointeeType(), + ArrayType->getElementType()) && + "Expected std::initializer_list second field to be const E *"); + + auto ArrayEnd = + Builder.getArrayElement(loc, loc, ArrayPtr.getPointer(), + ArrayPtr.getElementType(), Size, false); + CGF.buildStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); + } + assert(++Field == Record->field_end() && + "Expected std::initializer_list to only have two fields"); } + void VisitExprWithCleanups(ExprWithCleanups *E); void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { llvm_unreachable("NYI"); @@ -1671,4 +1727,4 @@ LValue CIRGenFunction::buildAggExprToLValue(const Expr *E) { AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); return LV; -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index b4350caa79fa..552555779d46 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -9,6 +9,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIR CIRAsm.cpp CIRGenAtomic.cpp + CIRGenBuilder.cpp CIRGenBuiltin.cpp CIRGenBuiltinAArch64.cpp CIRGenBuiltinX86.cpp diff --git a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp new file mode 100644 index 000000000000..f20f687ecf9a --- /dev/null +++ b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp @@ -0,0 +1,82 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +namespace std { +template class initializer_list { + const b *array_start; + const b *array_end; +}; +template +void f(initializer_list) {;} +void test() { + f({"xy","uv"}); +} +} // namespace std + +// CIR: [[INITLIST_TYPE:!.*]] = !cir.struct" {!cir.ptr>, !cir.ptr>}> +// CIR: cir.func linkonce_odr @_ZSt1fIPKcEvSt16initializer_listIT_E(%arg0: [[INITLIST_TYPE]] +// CIR: [[LOCAL:%.*]] = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, +// CIR: cir.store %arg0, [[LOCAL]] : [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]> +// CIR: cir.return + +// CIR: cir.global "private" constant internal dsolocal [[STR_XY:@.*]] = #cir.const_array<"xy\00" : !cir.array> : !cir.array +// CIR: cir.global "private" constant internal dsolocal [[STR_UV:@.*]] = #cir.const_array<"uv\00" : !cir.array> : !cir.array + +// CIR: cir.func @_ZSt4testv() +// CIR: cir.scope { +// CIR: [[INITLIST_LOCAL:%.*]] = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, +// CIR: [[LOCAL_ELEM_ARRAY:%.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, +// CIR: [[FIRST_ELEM_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[LOCAL_ELEM_ARRAY]] : !cir.ptr x 2>>), !cir.ptr> +// CIR: [[XY_CHAR_ARRAY:%.*]] = cir.get_global [[STR_XY]] : !cir.ptr> +// CIR: [[STR_XY_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[XY_CHAR_ARRAY]] : !cir.ptr>), !cir.ptr +// CIR: cir.store [[STR_XY_PTR]], [[FIRST_ELEM_PTR]] : !cir.ptr, !cir.ptr> +// CIR: [[ONE:%.*]] = cir.const #cir.int<1> +// CIR: [[NEXT_ELEM_PTR:%.*]] = cir.ptr_stride([[FIRST_ELEM_PTR]] : !cir.ptr>, [[ONE]] : !s64i), !cir.ptr> +// CIR: [[UV_CHAR_ARRAY:%.*]] = cir.get_global [[STR_UV]] : !cir.ptr> +// CIR: [[STR_UV_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[UV_CHAR_ARRAY]] : !cir.ptr>), !cir.ptr +// CIR: cir.store [[STR_UV_PTR]], [[NEXT_ELEM_PTR]] : !cir.ptr, !cir.ptr> +// CIR: [[START_FLD_PTR:%.*]] = cir.get_member [[INITLIST_LOCAL]][0] {name = "array_start"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr>> +// CIR: [[START_FLD_PTR_AS_PTR_2_CHAR_ARRAY:%.*]] = cir.cast(bitcast, [[START_FLD_PTR]] : !cir.ptr>>), !cir.ptr x 2>>> +// CIR: cir.store [[LOCAL_ELEM_ARRAY]], [[START_FLD_PTR_AS_PTR_2_CHAR_ARRAY]] : !cir.ptr x 2>>, !cir.ptr x 2>>> +// CIR: [[ELEM_ARRAY_LEN:%.*]] = cir.const #cir.int<2> +// CIR: [[END_FLD_PTR:%.*]] = cir.get_member [[INITLIST_LOCAL]][1] {name = "array_end"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr>> +// CIR: [[LOCAL_ELEM_ARRAY_END:%.*]] = cir.ptr_stride([[LOCAL_ELEM_ARRAY]] : !cir.ptr x 2>>, [[ELEM_ARRAY_LEN]] : !u64i), !cir.ptr x 2>> +// CIR: [[END_FLD_PTR_AS_PTR_2_CHAR_ARRAY:%.*]] = cir.cast(bitcast, [[END_FLD_PTR]] : !cir.ptr>>), !cir.ptr x 2>>> +// CIR: cir.store [[LOCAL_ELEM_ARRAY_END]], [[END_FLD_PTR_AS_PTR_2_CHAR_ARRAY]] : !cir.ptr x 2>>, !cir.ptr x 2>>> +// CIR: [[ARG:%.*]] = cir.load [[INITLIST_LOCAL]] : !cir.ptr<[[INITLIST_TYPE]]>, [[INITLIST_TYPE]] +// CIR: cir.call @_ZSt1fIPKcEvSt16initializer_listIT_E([[ARG]]) : ([[INITLIST_TYPE]]) -> () +// CIR: } +// CIR: cir.return +// CIR: } + +// LLVM: %"class.std::initializer_list" = type { ptr, ptr } + +// LLVM: @.str = internal constant [3 x i8] c"xy\00" +// LLVM: @.str1 = internal constant [3 x i8] c"uv\00" + +// LLVM: define linkonce_odr void @_ZSt1fIPKcEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG0:%.*]]) +// LLVM: [[LOCAL_PTR:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, +// LLVM: store %"class.std::initializer_list" [[ARG0]], ptr [[LOCAL_PTR]], align 8, +// LLVM: ret void, +// LLVM: } + +// LLVM: define dso_local void @_ZSt4testv() +// LLVM: br label %[[SCOPE_START:.*]], +// LLVM: [[SCOPE_START]]: ; preds = %0 +// LLVM: [[INIT_STRUCT:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, +// LLVM: [[ELEM_ARRAY_PTR:%.*]] = alloca [2 x ptr], i64 1, align 8, +// LLVM: [[PTR_FIRST_ELEM:%.*]] = getelementptr ptr, ptr [[ELEM_ARRAY_PTR]], i32 0, +// LLVM: store ptr @.str, ptr [[PTR_FIRST_ELEM]], align 8, +// LLVM: [[PTR_SECOND_ELEM:%.*]] = getelementptr ptr, ptr [[PTR_FIRST_ELEM]], i64 1, +// LLVM: store ptr @.str1, ptr [[PTR_SECOND_ELEM]], align 8, +// LLVM: [[INIT_START_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0, +// LLVM: [[INIT_END_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 1, +// LLVM: [[ELEM_ARRAY_END:%.*]] = getelementptr [2 x ptr], ptr [[ELEM_ARRAY_PTR]], i64 2, +// LLVM: store ptr [[ELEM_ARRAY_END]], ptr [[INIT_END_FLD_PTR]], align 8, +// LLVM: [[ARG2PASS:%.*]] = load %"class.std::initializer_list", ptr [[INIT_STRUCT]], align 8, +// LLVM: call void @_ZSt1fIPKcEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG2PASS]]), +// LLVM: br label %[[SCOPE_END:.*]], +// LLVM: [[SCOPE_END]]: ; preds = %[[SCOPE_START]] +// LLVM: ret void diff --git a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp new file mode 100644 index 000000000000..183a04d78045 --- /dev/null +++ b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp @@ -0,0 +1,64 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +namespace std { +template class initializer_list { + const b *c; + unsigned long len; +}; +template +void f(initializer_list) {;} +void test() { + f({7}); +} +} // namespace std + +// CIR: [[INITLIST_TYPE:!.*]] = !cir.struct" {!cir.ptr, !u64i}> + +// CIR: cir.func linkonce_odr @_ZSt1fIiEvSt16initializer_listIT_E(%arg0: [[INITLIST_TYPE]] +// CIR: [[REG0:%.*]] = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, +// CIR: cir.store %arg0, [[REG0]] : [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]> +// CIR: cir.return + +// CIR: cir.func @_ZSt4testv() +// CIR: cir.scope { +// CIR: [[LIST_PTR:%.*]] = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, +// CIR: [[ARRAY:%.*]] = cir.alloca !cir.array, !cir.ptr>, +// CIR: [[DECAY_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[ARRAY]] : !cir.ptr>), !cir.ptr +// CIR: [[SEVEN:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: cir.store [[SEVEN]], [[DECAY_PTR]] : !s32i, !cir.ptr +// CIR: [[FLD_C:%.*]] = cir.get_member [[LIST_PTR]][0] {name = "c"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr> +// CIR: [[ARRAY_PTR:%.*]] = cir.cast(bitcast, [[FLD_C]] : !cir.ptr>), !cir.ptr>> +// CIR: cir.store [[ARRAY]], [[ARRAY_PTR]] : !cir.ptr>, !cir.ptr>> +// CIR: [[LENGTH_ONE:%.*]] = cir.const #cir.int<1> +// CIR: [[FLD_LEN:%.*]] = cir.get_member [[LIST_PTR]][1] {name = "len"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr +// CIR: cir.store [[LENGTH_ONE]], [[FLD_LEN]] : !u64i, !cir.ptr +// CIR: [[ARG2PASS:%.*]] = cir.load [[LIST_PTR]] : !cir.ptr<[[INITLIST_TYPE]]>, [[INITLIST_TYPE]] +// CIR: cir.call @_ZSt1fIiEvSt16initializer_listIT_E([[ARG2PASS]]) : ([[INITLIST_TYPE]]) -> () +// CIR: } +// CIR: cir.return +// CIR: } + +// LLVM: %"class.std::initializer_list" = type { ptr, i64 } +// LLVM: define linkonce_odr void @_ZSt1fIiEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG:%.*]]) +// LLVM: [[LOCAL:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, +// LLVM: store %"class.std::initializer_list" [[ARG]], ptr [[LOCAL]], align 8, + +// LLVM: define dso_local void @_ZSt4testv() +// LLVM: br label %[[SCOPE_START:.*]], +// LLVM: [[SCOPE_START]]: ; preds = %0 +// LLVM: [[INIT_STRUCT:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, +// LLVM: [[ELEM_ARRAY:%.*]] = alloca [1 x i32], i64 1, align 4, +// LLVM: [[PTR_FIRST_ELEM:%.*]] = getelementptr i32, ptr [[ELEM_ARRAY]], i32 0, +// LLVM: store i32 7, ptr [[PTR_FIRST_ELEM]], align 4, +// LLVM: [[ELEM_ARRAY_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0, +// LLVM: store ptr [[ELEM_ARRAY]], ptr [[ELEM_ARRAY_PTR]], align 8, +// LLVM: [[INIT_LEN_FLD:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 1, +// LLVM: store i64 1, ptr [[INIT_LEN_FLD]], align 8, +// LLVM: [[ARG2PASS:%.*]] = load %"class.std::initializer_list", ptr [[INIT_STRUCT]], align 8, +// LLVM: call void @_ZSt1fIiEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG2PASS]]) +// LLVM: br label %[[SCOPE_END:.*]], +// LLVM: [[SCOPE_END]]: ; preds = %[[SCOPE_START]] +// LLVM: ret void From 9dbd9f250411b3e5262210e89ac7a22885a2fa46 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 14 Aug 2024 02:30:51 +0800 Subject: [PATCH 1767/2301] [CIR][CIRGen] Support more complex value casts (#786) This PR adds CIRGen and LLVMIR codegen for those not-yet-covered complex casts, including explicit type cast expressions of complex types and complex value promotion. All type conversion expressions involving complex types should now codegen. --- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 33 ++++++++----- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 32 +++++++++++-- clang/test/CIR/CodeGen/complex-cast.c | 53 +++++++++++++++++++++ 3 files changed, 103 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 8880dfd37cb1..33e3b67f8082 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -128,7 +128,7 @@ class ComplexExprEmitter : public StmtVisitor { return buildLoadOfLValue(E); return buildCast(E->getCastKind(), E->getSubExpr(), E->getType()); } - mlir::Value VisitCastExpr(CastExpr *E) { llvm_unreachable("NYI"); } + mlir::Value VisitCastExpr(CastExpr *E); mlir::Value VisitCallExpr(const CallExpr *E); mlir::Value VisitStmtExpr(const StmtExpr *E) { llvm_unreachable("NYI"); } @@ -509,6 +509,14 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, llvm_unreachable("unknown cast resulting in complex value"); } +mlir::Value ComplexExprEmitter::VisitCastExpr(CastExpr *E) { + if (const auto *ECE = dyn_cast(E)) + CGF.CGM.buildExplicitCastExprType(ECE, &CGF); + if (E->changesVolatileQualification()) + return buildLoadOfLValue(E); + return buildCast(E->getCastKind(), E->getSubExpr(), E->getType()); +} + mlir::Value ComplexExprEmitter::VisitCallExpr(const CallExpr *E) { if (E->getCallReturnType(CGF.getContext())->isReferenceType()) return buildLoadOfLValue(E); @@ -864,12 +872,22 @@ mlir::Value CIRGenFunction::buildPromotedComplexExpr(const Expr *E, mlir::Value CIRGenFunction::buildPromotedValue(mlir::Value result, QualType PromotionType) { - llvm_unreachable("complex type conversion is NYI"); + assert(mlir::isa( + mlir::cast(result.getType()) + .getElementTy()) && + "integral complex will never be promoted"); + return builder.createCast(mlir::cir::CastKind::float_complex, result, + ConvertType(PromotionType)); } mlir::Value CIRGenFunction::buildUnPromotedValue(mlir::Value result, - QualType PromotionType) { - llvm_unreachable("complex type conversion is NYI"); + QualType UnPromotionType) { + assert(mlir::isa( + mlir::cast(result.getType()) + .getElementTy()) && + "integral complex will never be promoted"); + return builder.createCast(mlir::cir::CastKind::float_complex, result, + ConvertType(UnPromotionType)); } mlir::Value CIRGenFunction::buildComplexExpr(const Expr *E) { @@ -938,10 +956,3 @@ LValue CIRGenFunction::buildComplexCompoundAssignmentLValue( RValue Val; return ComplexExprEmitter(*this).buildCompoundAssignLValue(E, Op, Val); } - -mlir::Value CIRGenFunction::buildComplexToScalarConversion(mlir::Value Src, - QualType SrcTy, - QualType DstTy, - SourceLocation Loc) { - llvm_unreachable("complex cast is NYI"); -} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 940b3800ec14..7933f2f8603d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1701,14 +1701,15 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { llvm_unreachable("NYI"); case CK_CopyAndAutoreleaseBlockObject: llvm_unreachable("NYI"); + case CK_FloatingRealToComplex: - llvm_unreachable("NYI"); case CK_FloatingComplexCast: - llvm_unreachable("NYI"); + case CK_IntegralRealToComplex: + case CK_IntegralComplexCast: case CK_IntegralComplexToFloatingComplex: - llvm_unreachable("NYI"); case CK_FloatingComplexToIntegralComplex: - llvm_unreachable("NYI"); + llvm_unreachable("scalar cast to non-scalar value"); + case CK_ConstructorConversion: llvm_unreachable("NYI"); case CK_ToUnion: @@ -1856,6 +1857,29 @@ mlir::Value CIRGenFunction::buildScalarConversion(mlir::Value Src, .buildScalarConversion(Src, SrcTy, DstTy, Loc); } +mlir::Value CIRGenFunction::buildComplexToScalarConversion(mlir::Value Src, + QualType SrcTy, + QualType DstTy, + SourceLocation Loc) { + assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && + "Invalid complex -> scalar conversion"); + + auto ComplexElemTy = SrcTy->castAs()->getElementType(); + if (DstTy->isBooleanType()) { + auto Kind = ComplexElemTy->isFloatingType() + ? mlir::cir::CastKind::float_complex_to_bool + : mlir::cir::CastKind::int_complex_to_bool; + return builder.createCast(getLoc(Loc), Kind, Src, ConvertType(DstTy)); + } + + auto Kind = ComplexElemTy->isFloatingType() + ? mlir::cir::CastKind::float_complex_to_real + : mlir::cir::CastKind::int_complex_to_real; + auto Real = + builder.createCast(getLoc(Loc), Kind, Src, ConvertType(ComplexElemTy)); + return buildScalarConversion(Real, ComplexElemTy, DstTy, Loc); +} + /// If the specified expression does not fold /// to a constant, or if it does but contains a label, return false. If it /// constant folds return true and set the boolean result in Result. diff --git a/clang/test/CIR/CodeGen/complex-cast.c b/clang/test/CIR/CodeGen/complex-cast.c index dcff8b545b3e..72dd45e652ca 100644 --- a/clang/test/CIR/CodeGen/complex-cast.c +++ b/clang/test/CIR/CodeGen/complex-cast.c @@ -70,6 +70,55 @@ void scalar_to_complex() { // CHECK: } +void scalar_to_complex_explicit() { + cd = (double _Complex)sd; + ci = (int _Complex)si; + cd = (double _Complex)si; + ci = (int _Complex)sd; +} + +// CHECK-LABEL: @scalar_to_complex_explicit() + +// CIR-BEFORE: %{{.+}} = cir.cast(float_to_complex, %{{.+}} : !cir.double), !cir.complex + +// CIR-AFTER: %[[#IMAG:]] = cir.const #cir.fp<0.000000e+00> : !cir.double +// CIR-AFTER-NEXT: %{{.+}} = cir.complex.create %{{.+}}, %[[#IMAG]] : !cir.double -> !cir.complex + +// LLVM: %[[#A:]] = insertvalue { double, double } undef, double %{{.+}}, 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double 0.000000e+00, 1 + +// CIR-BEFORE: %{{.+}} = cir.cast(int_to_complex, %{{.+}} : !s32i), !cir.complex + +// CIR-AFTER: %[[#IMAG:]] = cir.const #cir.int<0> : !s32i +// CIR-AFTER-NEXT: %{{.+}} = cir.complex.create %{{.+}}, %[[#IMAG]] : !s32i -> !cir.complex + +// LLVM: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %{{.+}}, 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 0, 1 + +// CIR-BEFORE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.double +// CIR-BEFORE-NEXT: %{{.+}} = cir.cast(float_to_complex, %[[#A]] : !cir.double), !cir.complex + +// CIR-AFTER: %[[#REAL:]] = cir.cast(int_to_float, %11 : !s32i), !cir.double +// CIR-AFTER-NEXT: %[[#IMAG:]] = cir.const #cir.fp<0.000000e+00> : !cir.double +// CIR-AFTER-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !cir.double -> !cir.complex + +// LLVM: %[[#REAL:]] = sitofp i32 %{{.+}} to double +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#REAL]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double 0.000000e+00, 1 + +// CIR-BEFORE: %[[#A:]] = cir.cast(float_to_int, %{{.+}} : !cir.double), !s32i +// CIR-BEFORE-NEXT: %{{.+}} = cir.cast(int_to_complex, %[[#A]] : !s32i), !cir.complex + +// CIR-AFTER: %[[#REAL:]] = cir.cast(float_to_int, %{{.+}} : !cir.double), !s32i +// CIR-AFTER-NEXT: %[[#IMAG:]] = cir.const #cir.int<0> : !s32i +// CIR-AFTER-NEXT: %{{.+}} = cir.complex.create %[[#REAL]], %[[#IMAG]] : !s32i -> !cir.complex + +// LLVM: %[[#REAL:]] = fptosi double %{{.+}} to i32 +// LLVM-NEXT: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %[[#REAL]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 0, 1 + +// CHECK: } + void complex_to_scalar() { sd = (double)cd; si = (int)ci; @@ -203,3 +252,7 @@ void complex_to_bool() { // LLVM-NEXT: br label %{{.+}} // CHECK: } + +void promotion() { + cd = cf + cf; +} From 376de5ec02606c11b5a2c49a4ceb7a80f37c4474 Mon Sep 17 00:00:00 2001 From: 7mile Date: Wed, 14 Aug 2024 02:31:44 +0800 Subject: [PATCH 1768/2301] [CIR][Dialect] Add verification of address space to `get_global` (#787) This PR verifies `cir.get_global` has its result type correctly annotated with address space of the referenced symbol. The documentation is also updated to clarify this constraint. `GlobalOp` is the main consideration. It's worth noting that if the `cir.get_global` op references a function, we also (implicitly) checks that its result pointer type has no address space attribute. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 +++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 10 ++++++++++ clang/test/CIR/IR/invalid.cir | 13 +++++++++++++ 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index cd4434c201bc..412c96255f76 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2231,7 +2231,8 @@ def GetGlobalOp : CIR_Op<"get_global", The `cir.get_global` operation retrieves the address pointing to a named global variable. If the global variable is marked constant, writing to the resulting address (such as through a `cir.store` operation) is - undefined. Resulting type must always be a `!cir.ptr<...>` type. + undefined. Resulting type must always be a `!cir.ptr<...>` type with the + same address space as the global variable. Addresses of thread local globals can only be retrieved if this operation is marked `thread_local`, which indicates the address isn't constant. @@ -2241,6 +2242,9 @@ def GetGlobalOp : CIR_Op<"get_global", %x = cir.get_global @foo : !cir.ptr ... %y = cir.get_global thread_local @batata : !cir.ptr + ... + cir.global external addrspace(offload_global) @gv = #cir.int<0> : !s32i + %z = cir.get_global @gv : !cir.ptr ``` }]; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index b8ce9bd5bc4e..42af0c8e5fe1 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2044,8 +2044,10 @@ GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { << "' does not reference a valid cir.global or cir.func"; mlir::Type symTy; + mlir::cir::AddressSpaceAttr symAddrSpace{}; if (auto g = dyn_cast(op)) { symTy = g.getSymType(); + symAddrSpace = g.getAddrSpaceAttr(); // Verify that for thread local global access, the global needs to // be marked with tls bits. if (getTls() && !g.getTlsModel()) @@ -2060,6 +2062,14 @@ GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return emitOpError("result type pointee type '") << resultType.getPointee() << "' does not match type " << symTy << " of the global @" << getName(); + + if (symAddrSpace != resultType.getAddrSpace()) { + return emitOpError() + << "result type address space does not match the address " + "space of the global @" + << getName(); + } + return success(); } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index fbbe3a0b7d87..182845fa2b6f 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1272,3 +1272,16 @@ module { } } +// ----- + +!s32i = !cir.int + +module { + cir.global external addrspace(offload_global) @gv = #cir.int<0> : !s32i + + cir.func @test_get_global() { + // expected-error@+1 {{'cir.get_global' op result type address space does not match the address space of the global @gv}} + %addr = cir.get_global @gv : !cir.ptr + cir.return + } +} From 46cc2583f4348dda1c8cfa7d656008b4175692da Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 15 Aug 2024 02:45:01 +0800 Subject: [PATCH 1769/2301] [CIR][CodeGen] Set address space for OpenCL globals (#788) This PR sets proper address space when creating `cir.global` and `cir.get_global`. Different languages use different ways to encode the address space in AST constructs (i.e. VarDecl *). OpenCL and SYCL use an address space qualifier on the type of `VarDecl`, while CUDA uses separate AST attributes like `CUDASharedAttr`. Similarily, some targets may want to use special address space for global variables. So a per-language + per-target hook is needed to provide this customization point. In the LLVM CodeGen, it's the helper method `getGlobalVarAddressSpace` that takes on the role. For OpenCL C + SPIR-V combination, OpenCL C converts the address space qualifier to corresponding LangAS, but SPIR-V does not require any action. This PR implements `global` qualifier in OpenCL C, but does not include `constant` qualifier. Although the modified part works for `constant`, CIRGen is not yet able to set constant attribute for global ops (there is a TODO line). Static variable decl and `local` qualifier work in a similar way and come in later patches. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 7 +-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 62 +++++++++++++++++++------ clang/lib/CIR/CodeGen/CIRGenModule.h | 11 +++++ clang/lib/CIR/CodeGen/TargetInfo.cpp | 9 ++++ clang/lib/CIR/CodeGen/TargetInfo.h | 7 +++ clang/test/CIR/CodeGen/OpenCL/global.cl | 23 +++++++++ 7 files changed, 104 insertions(+), 19 deletions(-) create mode 100644 clang/test/CIR/CodeGen/OpenCL/global.cl diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 385f18df0ee0..85c8b5aa2bea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -674,9 +674,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Value createGetGlobal(mlir::cir::GlobalOp global, bool threadLocal = false) { - return create(global.getLoc(), - getPointerTo(global.getSymType()), - global.getName(), threadLocal); + return create( + global.getLoc(), + getPointerTo(global.getSymType(), global.getAddrSpaceAttr()), + global.getName(), threadLocal); } mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 03ebf4193d0d..32e4afcbb377 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -899,7 +899,9 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, auto V = CGF.CGM.getAddrOfGlobalVar(VD); auto RealVarTy = CGF.getTypes().convertTypeForMem(VD->getType()); - auto realPtrTy = CGF.getBuilder().getPointerTo(RealVarTy); + mlir::cir::PointerType realPtrTy = CGF.getBuilder().getPointerTo( + RealVarTy, cast_if_present( + cast(V.getType()).getAddrSpace())); if (realPtrTy != V.getType()) V = CGF.getBuilder().createBitcast(V.getLoc(), V, realPtrTy); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 9b389b53fb08..22620dd8be13 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -637,11 +637,11 @@ mlir::Value CIRGenModule::getGlobalValue(const Decl *D) { return CurCGF->symbolTable.lookup(D); } -mlir::cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &CGM, - mlir::Location loc, - StringRef name, mlir::Type t, - bool isCst, - mlir::Operation *insertPoint) { +mlir::cir::GlobalOp +CIRGenModule::createGlobalOp(CIRGenModule &CGM, mlir::Location loc, + StringRef name, mlir::Type t, bool isCst, + mlir::cir::AddressSpaceAttr addrSpace, + mlir::Operation *insertPoint) { mlir::cir::GlobalOp g; auto &builder = CGM.getBuilder(); { @@ -655,7 +655,8 @@ mlir::cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &CGM, if (curCGF) builder.setInsertionPoint(curCGF->CurFn); - g = builder.create(loc, name, t, isCst); + g = builder.create( + loc, name, t, isCst, GlobalLinkageKind::ExternalLinkage, addrSpace); if (!curCGF) { if (insertPoint) CGM.getModule().insert(insertPoint, g); @@ -742,6 +743,12 @@ void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, // If the types does not match, update all references to Old to the new type. auto OldTy = Old.getSymType(); auto NewTy = New.getSymType(); + mlir::cir::AddressSpaceAttr oldAS = Old.getAddrSpaceAttr(); + mlir::cir::AddressSpaceAttr newAS = New.getAddrSpaceAttr(); + // TODO(cir): If the AS differs, we should also update all references. + if (oldAS != newAS) { + llvm_unreachable("NYI"); + } if (OldTy != NewTy) { auto OldSymUses = Old.getSymbolUses(theModule.getOperation()); if (OldSymUses.has_value()) { @@ -809,7 +816,7 @@ void CIRGenModule::setTLSMode(mlir::Operation *Op, const VarDecl &D) const { /// mangled name but some other type. mlir::cir::GlobalOp CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, - LangAS AddrSpace, const VarDecl *D, + LangAS langAS, const VarDecl *D, ForDefinition_t IsForDefinition) { // Lookup the entry, lazily creating it if necessary. mlir::cir::GlobalOp Entry; @@ -818,8 +825,9 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, Entry = dyn_cast_or_null(V); } - // unsigned TargetAS = astCtx.getTargetAddressSpace(AddrSpace); + mlir::cir::AddressSpaceAttr cirAS = builder.getAddrSpaceAttr(langAS); if (Entry) { + auto entryCIRAS = Entry.getAddrSpaceAttr(); if (WeakRefReferences.erase(Entry)) { if (D && !D->hasAttr()) { auto LT = mlir::cir::GlobalLinkageKind::ExternalLinkage; @@ -837,8 +845,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, if (langOpts.OpenMP && !langOpts.OpenMPSimd && D) getOpenMPRuntime().registerTargetGlobalVariable(D, Entry); - // TODO(cir): check TargetAS matches Entry address space - if (Entry.getSymType() == Ty && !MissingFeatures::addressSpaceInGlobalVar()) + if (Entry.getSymType() == Ty && entryCIRAS == cirAS) return Entry; // If there are two attempts to define the same mangled name, issue an @@ -867,6 +874,8 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // TODO(cir): LLVM codegen makes sure the result is of the correct type // by issuing a address space cast. + if (entryCIRAS != cirAS) + llvm_unreachable("NYI"); // (If global is requested for a definition, we always need to create a new // global, not just return a bitcast.) @@ -874,7 +883,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, return Entry; } - // TODO(cir): auto DAddrSpace = GetGlobalVarAddressSpace(D); + auto declCIRAS = builder.getAddrSpaceAttr(getGlobalVarAddressSpace(D)); // TODO(cir): do we need to strip pointer casts for Entry? auto loc = getLoc(D->getSourceRange()); @@ -883,6 +892,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // mark it as such. auto GV = CIRGenModule::createGlobalOp(*this, loc, MangledName, Ty, /*isConstant=*/false, + /*addrSpace=*/declCIRAS, /*insertPoint=*/Entry.getOperation()); // If we already created a global with the same mangled name (but different @@ -992,8 +1002,7 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty, bool tlsAccess = D->getTLSKind() != VarDecl::TLS_None; auto g = buildGlobal(D, Ty, IsForDefinition); - auto ptrTy = - mlir::cir::PointerType::get(builder.getContext(), g.getSymType()); + auto ptrTy = builder.getPointerTo(g.getSymType(), g.getAddrSpaceAttr()); return builder.create( getLoc(D->getSourceRange()), ptrTy, g.getSymName(), tlsAccess); } @@ -1076,7 +1085,8 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // If this is OpenMP device, check if it is legal to emit this global // normally. QualType ASTTy = D->getType(); - if (getLangOpts().OpenCL || getLangOpts().OpenMPIsTargetDevice) + if ((getLangOpts().OpenCL && ASTTy->isSamplerT()) || + getLangOpts().OpenMPIsTargetDevice) llvm_unreachable("not implemented"); // TODO(cir): LLVM's codegen uses a llvm::TrackingVH here. Is that @@ -1409,7 +1419,7 @@ LangAS CIRGenModule::getLangTempAllocaAddressSpace() const { if (getLangOpts().OpenCL) return LangAS::opencl_private; if (getLangOpts().SYCLIsDevice || getLangOpts().CUDAIsDevice || - (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)) + (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)) llvm_unreachable("NYI"); return LangAS::Default; } @@ -3100,3 +3110,25 @@ mlir::cir::SourceLanguage CIRGenModule::getCIRSourceLanguage() { // TODO(cir): support remaining source languages. llvm_unreachable("CIR does not yet support the given source language"); } + +LangAS CIRGenModule::getGlobalVarAddressSpace(const VarDecl *D) { + if (langOpts.OpenCL) { + LangAS AS = D ? D->getType().getAddressSpace() : LangAS::opencl_global; + assert(AS == LangAS::opencl_global || AS == LangAS::opencl_global_device || + AS == LangAS::opencl_global_host || AS == LangAS::opencl_constant || + AS == LangAS::opencl_local || AS >= LangAS::FirstTargetAddressSpace); + return AS; + } + + if (langOpts.SYCLIsDevice && + (!D || D->getType().getAddressSpace() == LangAS::Default)) + llvm_unreachable("NYI"); + + if (langOpts.CUDA && langOpts.CUDAIsDevice) + llvm_unreachable("NYI"); + + if (langOpts.OpenMP) + llvm_unreachable("NYI"); + + return getTargetCIRGenInfo().getGlobalVarAddressSpace(*this, D); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index b33e80d478c5..b8ad57d788ae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -226,6 +226,7 @@ class CIRGenModule : public CIRGenTypeCache { static mlir::cir::GlobalOp createGlobalOp(CIRGenModule &CGM, mlir::Location loc, StringRef name, mlir::Type t, bool isCst = false, + mlir::cir::AddressSpaceAttr addrSpace = {}, mlir::Operation *insertPoint = nullptr); // FIXME: Hardcoding priority here is gross. @@ -328,6 +329,16 @@ class CIRGenModule : public CIRGenTypeCache { return (Twine(".compoundLiteral.") + Twine(CompoundLitaralCnt++)).str(); } + /// Return the AST address space of the underlying global variable for D, as + /// determined by its declaration. Normally this is the same as the address + /// space of D's type, but in CUDA, address spaces are associated with + /// declarations, not types. If D is nullptr, return the default address + /// space for global variable. + /// + /// For languages without explicit address spaces, if D has default address + /// space, target-specific global or constant address space may be returned. + LangAS getGlobalVarAddressSpace(const VarDecl *D); + /// Return the AST address space of constant literal, which is used to emit /// the constant literal as global variable in LLVM IR. /// Note: This is not necessarily the address space of the constant literal diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 29b21b00eb0e..ee39e1b28418 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -552,6 +552,15 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { return ABIArgInfo::getDirect(ResType); } +clang::LangAS +TargetCIRGenInfo::getGlobalVarAddressSpace(cir::CIRGenModule &CGM, + const clang::VarDecl *D) const { + assert(!CGM.getLangOpts().OpenCL && + !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && + "Address space agnostic languages only"); + return D ? D->getType().getAddressSpace() : LangAS::Default; +} + mlir::Value TargetCIRGenInfo::performAddrSpaceCast( CIRGenFunction &CGF, mlir::Value Src, mlir::cir::AddressSpaceAttr SrcAddr, mlir::cir::AddressSpaceAttr DestAddr, mlir::Type DestTy, diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index d2f4f3cdbf88..994fa357c864 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -62,6 +62,13 @@ class TargetCIRGenInfo { std::vector &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const {} + /// Get target favored AST address space of a global variable for languages + /// other than OpenCL and CUDA. + /// If \p D is nullptr, returns the default target favored address space + /// for global variable. + virtual clang::LangAS getGlobalVarAddressSpace(CIRGenModule &CGM, + const clang::VarDecl *D) const; + /// Get the CIR address space for alloca. virtual mlir::cir::AddressSpaceAttr getCIRAllocaAddressSpace() const { // Return the null attribute, which means the target does not care about the diff --git a/clang/test/CIR/CodeGen/OpenCL/global.cl b/clang/test/CIR/CodeGen/OpenCL/global.cl new file mode 100644 index 000000000000..cab7378fd102 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/global.cl @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM + +global int a = 13; +// CIR-DAG: cir.global external addrspace(offload_global) @a = #cir.int<13> : !s32i +// LLVM-DAG: @a = addrspace(1) global i32 13 + +global int b = 15; +// CIR-DAG: cir.global external addrspace(offload_global) @b = #cir.int<15> : !s32i +// LLVM-DAG: @b = addrspace(1) global i32 15 + +kernel void test_get_global() { + a = b; + // CIR: %[[#ADDRB:]] = cir.get_global @b : !cir.ptr + // CIR-NEXT: %[[#LOADB:]] = cir.load %[[#ADDRB]] : !cir.ptr, !s32i + // CIR-NEXT: %[[#ADDRA:]] = cir.get_global @a : !cir.ptr + // CIR-NEXT: cir.store %[[#LOADB]], %[[#ADDRA]] : !s32i, !cir.ptr + + // LLVM: %[[#LOADB:]] = load i32, ptr addrspace(1) @b, align 4 + // LLVM-NEXT: store i32 %[[#LOADB]], ptr addrspace(1) @a, align 4 +} From 41d4ccc19bef16521c8318f1398c5faad637c68e Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 15 Aug 2024 02:45:28 +0800 Subject: [PATCH 1770/2301] [CIR] Fix wrong LLVMIR lowering of fp decrement (#789) Unary decrement expression on floating point operands was lowered to `fsub -1.0` by a typo. This PR fixes this bug. --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +- clang/test/CIR/Lowering/unary-inc-dec.cir | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 9bb4e9caf98d..5e48ae831ead 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2144,7 +2144,7 @@ class CIRUnaryOpLowering auto negOneAttr = rewriter.getFloatAttr(llvmType, -1.0); auto negOneConst = rewriter.create(loc, llvmType, negOneAttr); - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( op, llvmType, negOneConst, adaptor.getInput()); return mlir::success(); } diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir index 9e8856428c84..9ba26b36f61c 100644 --- a/clang/test/CIR/Lowering/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -44,7 +44,7 @@ module { %5 = cir.unary(dec, %4) : !cir.float, !cir.float cir.store %5, %0 : !cir.float, !cir.ptr // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f32) : f32 - // MLIR: = llvm.fsub %[[#D_ONE]], %{{[0-9]+}} : f32 + // MLIR: = llvm.fadd %[[#D_ONE]], %{{[0-9]+}} : f32 %6 = cir.load %1 : !cir.ptr, !cir.double %7 = cir.unary(inc, %6) : !cir.double, !cir.double @@ -56,7 +56,7 @@ module { %9 = cir.unary(dec, %8) : !cir.double, !cir.double cir.store %9, %1 : !cir.double, !cir.ptr // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f64) : f64 - // MLIR: = llvm.fsub %[[#D_ONE]], %{{[0-9]+}} : f64 + // MLIR: = llvm.fadd %[[#D_ONE]], %{{[0-9]+}} : f64 cir.return } From 9628eb2d00256ab16beea071a89d25068b277ca7 Mon Sep 17 00:00:00 2001 From: roro47 <40341016+roro47@users.noreply.github.com> Date: Thu, 15 Aug 2024 18:46:36 +0100 Subject: [PATCH 1771/2301] [CIR][CIRGen] Add new CIR visibility to represent Default, Hidden, Protected (#776) This PR add a new CIR attribute `mlir::cir::VisibilityAttr` to represent CIR visibility. It will represent C/C++ visibility type `Default`, `Hidden`, `Protected`. The PR handles the parsing, printing of CIR visibility and also lower to LLVM. After this PR, there will be more PR's to migrate CIRGen properties that are currently querying MLIR visibility(e.g. `sym_visibility`), to instead query CIR visibility, and remove MLIR's visibility from printing and parsing. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 43 +++++++++- clang/include/clang/CIR/Dialect/IR/CIROps.td | 11 ++- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenModule.cpp | 36 +++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 85 +++++++++++++++---- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 22 +++++ .../Lowering/DirectToLLVM/LoweringHelpers.h | 2 +- clang/test/CIR/CodeGen/attributes.c | 11 ++- clang/test/CIR/CodeGen/visibility-attribute.c | 38 +++++++++ clang/test/CIR/IR/inlineAttr.cir | 2 +- clang/test/CIR/Lowering/alloca.cir | 4 +- 12 files changed, 229 insertions(+), 29 deletions(-) create mode 100644 clang/test/CIR/CodeGen/visibility-attribute.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 3c28768a7fd7..fa3c8004701b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -897,6 +897,48 @@ def ASTExprAttr : AST<"Expr", "expr", def ASTCallExprAttr : AST<"CallExpr", "call.expr", [ASTCallExprInterface]>; + +//===----------------------------------------------------------------------===// +// VisibilityAttr +//===----------------------------------------------------------------------===// + +def VK_Default : I32EnumAttrCase<"Default", 1, "default">; +def VK_Hidden : I32EnumAttrCase<"Hidden", 2, "hidden">; +def VK_Protected : I32EnumAttrCase<"Protected", 3, "protected">; + +def VisibilityKind : I32EnumAttr<"VisibilityKind", "C/C++ visibility", [ + VK_Default, VK_Hidden, VK_Protected +]> { + let cppNamespace = "::mlir::cir"; +} + +def VisibilityAttr : CIR_Attr<"Visibility", "visibility"> { + let summary = "Visibility attribute"; + let description = [{ + Visibility attributes. + }]; + let parameters = (ins "VisibilityKind":$value); + + let assemblyFormat = [{ + $value + }]; + + let builders = [ + AttrBuilder<(ins CArg<"VisibilityKind", "cir::VisibilityKind::Default">:$value), [{ + return $_get($_ctxt, value); + }]> + ]; + + let skipDefaultBuilders = 1; + + let extraClassDeclaration = [{ + bool isDefault() const { return getValue() == VisibilityKind::Default; }; + bool isHidden() const { return getValue() == VisibilityKind::Hidden; }; + bool isProtected() const { return getValue() == VisibilityKind::Protected; }; + }]; +} + + //===----------------------------------------------------------------------===// // ExtraFuncAttr //===----------------------------------------------------------------------===// @@ -915,7 +957,6 @@ def ExtraFuncAttr : CIR_Attr<"ExtraFuncAttributes", "extra"> { // Printing and parsing also available in CIRDialect.cpp } - def NoInline : I32EnumAttrCase<"NoInline", 1, "no">; def AlwaysInline : I32EnumAttrCase<"AlwaysInline", 2, "always">; def InlineHint : I32EnumAttrCase<"InlineHint", 3, "hint">; diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 412c96255f76..5226bf88e82c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2148,6 +2148,8 @@ def GlobalOp : CIR_Op<"global", Symbol visibility in `sym_visibility` is defined in terms of MLIR's visibility and verified to be in accordance to `linkage`. + `visibility_attr` is defined in terms of CIR's visibility. + Example: ```mlir @@ -2160,6 +2162,7 @@ def GlobalOp : CIR_Op<"global", // TODO: sym_visibility can possibly be represented by implementing the // necessary Symbol's interface in terms of linkage instead. let arguments = (ins SymbolNameAttr:$sym_name, + VisibilityAttr:$global_visibility, OptionalAttr:$sym_visibility, TypeAttr:$sym_type, Arg:$linkage, @@ -2172,11 +2175,11 @@ def GlobalOp : CIR_Op<"global", UnitAttr:$dsolocal, OptionalAttr:$alignment, OptionalAttr:$ast, - OptionalAttr:$section - ); + OptionalAttr:$section); let regions = (region AnyRegion:$ctorRegion, AnyRegion:$dtorRegion); let assemblyFormat = [{ ($sym_visibility^)? + custom($global_visibility) (`constant` $constant^)? $linkage (`comdat` $comdat^)? @@ -2214,7 +2217,8 @@ def GlobalOp : CIR_Op<"global", CArg<"function_ref", "nullptr">:$ctorBuilder, CArg<"function_ref", - "nullptr">:$dtorBuilder)> + "nullptr">:$dtorBuilder) + > ]; let hasVerifier = 1; @@ -2939,6 +2943,7 @@ def FuncOp : CIR_Op<"func", [ }]; let arguments = (ins SymbolNameAttr:$sym_name, + VisibilityAttr:$global_visibility, TypeAttrOf:$function_type, UnitAttr:$builtin, UnitAttr:$coroutine, diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index d6e11e4516e2..52cf2d182c4a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -573,6 +573,7 @@ mlir::cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( // FIXME(cir): OG codegen inserts new GV before old one, we probably don't // need that? GV.setVisibility(OldGV.getVisibility()); + GV.setGlobalVisibilityAttr(OldGV.getGlobalVisibilityAttr()); GV.setInitialValueAttr(Init); GV.setTlsModelAttr(OldGV.getTlsModelAttr()); assert(!MissingFeatures::setDSOLocal()); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 22620dd8be13..d5a2ddbc8f19 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -945,6 +945,8 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, GV.setSectionAttr(builder.getStringAttr(SA->getName())); } + GV.setGlobalVisibilityAttr(getGlobalVisibilityAttrFromDecl(D)); + // Handle XCore specific ABI requirements. if (getTriple().getArch() == llvm::Triple::xcore) assert(0 && "not implemented"); @@ -1246,6 +1248,8 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, if (const SectionAttr *SA = D->getAttr()) GV.setSectionAttr(builder.getStringAttr(SA->getName())); + GV.setGlobalVisibilityAttr(getGlobalVisibilityAttrFromDecl(D)); + // TODO(cir): // GV->setAlignment(getContext().getDeclAlign(D).getAsAlign()); @@ -1316,7 +1320,6 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { const auto *D = cast(GD.getDecl()); - if (const auto *FD = dyn_cast(D)) { // At -O0, don't generate CIR for functions with available_externally // linkage. @@ -1345,8 +1348,9 @@ void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { return; } - if (const auto *VD = dyn_cast(D)) + if (const auto *VD = dyn_cast(D)) { return buildGlobalVarDefinition(VD, !VD->hasDefinition()); + } llvm_unreachable("Invalid argument to buildGlobalDefinition()"); } @@ -1782,6 +1786,32 @@ mlir::SymbolTable::Visibility CIRGenModule::getMLIRVisibilityFromCIRLinkage( llvm_unreachable("linkage should be handled above!"); } +mlir::cir::VisibilityKind +CIRGenModule::getGlobalVisibilityKindFromClangVisibility( + clang::VisibilityAttr::VisibilityType visibility) { + switch (visibility) { + case clang::VisibilityAttr::VisibilityType::Default: + return VisibilityKind::Default; + case clang::VisibilityAttr::VisibilityType::Hidden: + return VisibilityKind::Hidden; + case clang::VisibilityAttr::VisibilityType::Protected: + return VisibilityKind::Protected; + } +} + +mlir::cir::VisibilityAttr +CIRGenModule::getGlobalVisibilityAttrFromDecl(const Decl *decl) { + const clang::VisibilityAttr *VA = decl->getAttr(); + mlir::cir::VisibilityAttr cirVisibility = + mlir::cir::VisibilityAttr::get(builder.getContext()); + if (VA) { + cirVisibility = mlir::cir::VisibilityAttr::get( + builder.getContext(), + getGlobalVisibilityKindFromClangVisibility(VA->getVisibility())); + } + return cirVisibility; +} + mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) { if (Linkage == GVA_Internal) @@ -2401,6 +2431,8 @@ void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl, // TODO(cir): Complete the remaining part of the function. assert(!MissingFeatures::setFunctionAttributes()); + auto decl = globalDecl.getDecl(); + func.setGlobalVisibilityAttr(getGlobalVisibilityAttrFromDecl(decl)); } /// If the specified mangled name is not in the module, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index b8ad57d788ae..825735d28717 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -627,6 +627,9 @@ class CIRGenModule : public CIRGenTypeCache { static void setInitializer(mlir::cir::GlobalOp &op, mlir::Attribute value); static mlir::SymbolTable::Visibility getMLIRVisibilityFromCIRLinkage(mlir::cir::GlobalLinkageKind GLK); + static mlir::cir::VisibilityKind getGlobalVisibilityKindFromClangVisibility( + clang::VisibilityAttr::VisibilityType visibility); + mlir::cir::VisibilityAttr getGlobalVisibilityAttrFromDecl(const Decl *decl); static mlir::SymbolTable::Visibility getMLIRVisibility(mlir::cir::GlobalOp op); mlir::cir::GlobalLinkageKind getFunctionLinkage(GlobalDecl GD); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 42af0c8e5fe1..807616655829 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -233,6 +233,36 @@ bool omitRegionTerm(mlir::Region &r) { return singleNonEmptyBlock && yieldsNothing(); } +void printVisibilityAttr(OpAsmPrinter &printer, + mlir::cir::VisibilityAttr &visibility) { + switch (visibility.getValue()) { + case VisibilityKind::Hidden: + printer << "hidden"; + break; + case VisibilityKind::Protected: + printer << "protected"; + break; + default: + break; + } +} + +void parseVisibilityAttr(OpAsmParser &parser, + mlir::cir::VisibilityAttr &visibility) { + VisibilityKind visibilityKind; + + if (parser.parseOptionalKeyword("hidden").succeeded()) { + visibilityKind = VisibilityKind::Hidden; + } else if (parser.parseOptionalKeyword("protected").succeeded()) { + visibilityKind = VisibilityKind::Protected; + } else { + visibilityKind = VisibilityKind::Default; + } + + visibility = + mlir::cir::VisibilityAttr::get(parser.getContext(), visibilityKind); +} + //===----------------------------------------------------------------------===// // CIR Custom Parsers/Printers //===----------------------------------------------------------------------===// @@ -255,6 +285,19 @@ static void printOmittedTerminatorRegion(mlir::OpAsmPrinter &printer, /*printBlockTerminators=*/!omitRegionTerm(region)); } +static mlir::ParseResult +parseOmitDefaultVisibility(mlir::OpAsmParser &parser, + mlir::cir::VisibilityAttr &visibility) { + parseVisibilityAttr(parser, visibility); + return success(); +} + +static void printOmitDefaultVisibility(mlir::OpAsmPrinter &printer, + mlir::cir::GlobalOp &op, + mlir::cir::VisibilityAttr visibility) { + printVisibilityAttr(printer, visibility); +} + //===----------------------------------------------------------------------===// // AllocaOp //===----------------------------------------------------------------------===// @@ -1997,6 +2040,10 @@ void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, odsBuilder.createBlock(dtorRegion); dtorBuilder(odsBuilder, odsState.location); } + + odsState.addAttribute( + getGlobalVisibilityAttrName(odsState.name), + mlir::cir::VisibilityAttr::get(odsBuilder.getContext())); } /// Given the region at `index`, or the parent operation if `index` is None, @@ -2145,6 +2192,9 @@ void cir::FuncOp::build(OpBuilder &builder, OperationState &result, GlobalLinkageKindAttr::get(builder.getContext(), linkage)); result.addAttribute(getCallingConvAttrName(result.name), CallingConvAttr::get(builder.getContext(), callingConv)); + result.addAttribute(getGlobalVisibilityAttrName(result.name), + mlir::cir::VisibilityAttr::get(builder.getContext())); + result.attributes.append(attrs.begin(), attrs.end()); if (argAttrs.empty()) return; @@ -2163,6 +2213,7 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { auto lambdaNameAttr = getLambdaAttrName(state.name); auto visNameAttr = getSymVisibilityAttrName(state.name); auto noProtoNameAttr = getNoProtoAttrName(state.name); + auto visibilityNameAttr = getGlobalVisibilityAttrName(state.name); auto dsolocalNameAttr = getDsolocalAttrName(state.name); if (::mlir::succeeded(parser.parseOptionalKeyword(builtinNameAttr.strref()))) state.addAttribute(builtinNameAttr, parser.getBuilder().getUnitAttr()); @@ -2187,6 +2238,11 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { state.addAttribute(visNameAttr, parser.getBuilder().getStringAttr(visAttrStr)); } + + mlir::cir::VisibilityAttr cirVisibilityAttr; + parseVisibilityAttr(parser, cirVisibilityAttr); + state.addAttribute(visibilityNameAttr, cirVisibilityAttr); + if (parser.parseOptionalKeyword(dsolocalNameAttr).succeeded()) state.addAttribute(dsolocalNameAttr, parser.getBuilder().getUnitAttr()); @@ -2393,6 +2449,10 @@ void cir::FuncOp::print(OpAsmPrinter &p) { if (vis != mlir::SymbolTable::Visibility::Public) p << vis << " "; + auto cirVisibilityAttr = getGlobalVisibilityAttr(); + printVisibilityAttr(p, cirVisibilityAttr); + p << " "; + // Print function name, signature, and control. p.printSymbolName(getSymName()); auto fnType = getFunctionType(); @@ -2407,24 +2467,13 @@ void cir::FuncOp::print(OpAsmPrinter &p) { function_interface_impl::printFunctionAttributes( p, *this, // These are all omitted since they are custom printed already. - { - getAliaseeAttrName(), - getBuiltinAttrName(), - getCoroutineAttrName(), - getDsolocalAttrName(), - getExtraAttrsAttrName(), - getFunctionTypeAttrName(), - getGlobalCtorAttrName(), - getGlobalDtorAttrName(), - getLambdaAttrName(), - getLinkageAttrName(), - getCallingConvAttrName(), - getNoProtoAttrName(), - getSymVisibilityAttrName(), - getArgAttrsAttrName(), - getResAttrsAttrName(), - getComdatAttrName(), - }); + {getAliaseeAttrName(), getBuiltinAttrName(), getCoroutineAttrName(), + getDsolocalAttrName(), getExtraAttrsAttrName(), + getFunctionTypeAttrName(), getGlobalCtorAttrName(), + getGlobalDtorAttrName(), getLambdaAttrName(), getLinkageAttrName(), + getCallingConvAttrName(), getNoProtoAttrName(), + getSymVisibilityAttrName(), getArgAttrsAttrName(), getResAttrsAttrName(), + getComdatAttrName(), getGlobalVisibilityAttrName()}); if (auto aliaseeName = getAliasee()) { p << " alias("; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5e48ae831ead..3bcf1235f505 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -150,6 +150,17 @@ mlir::Type elementTypeIfVector(mlir::Type type) { return type; } +mlir::LLVM::Visibility +lowerCIRVisibilityToLLVMVisibility(mlir::cir::VisibilityKind visibilityKind) { + switch (visibilityKind) { + case mlir::cir::VisibilityKind::Default: + return ::mlir::LLVM::Visibility::Default; + case mlir::cir::VisibilityKind::Hidden: + return ::mlir::LLVM::Visibility::Hidden; + case mlir::cir::VisibilityKind::Protected: + return ::mlir::LLVM::Visibility::Protected; + } +} } // namespace //===----------------------------------------------------------------------===// @@ -1686,6 +1697,10 @@ class CIRFuncLowering : public mlir::OpConversionPattern { Loc, op.getName(), llvmFnTy, linkage, isDsoLocal, cconv, mlir::SymbolRefAttr(), attributes); + fn.setVisibility_Attr(mlir::LLVM::VisibilityAttr::get( + getContext(), lowerCIRVisibilityToLLVMVisibility( + op.getGlobalVisibilityAttr().getValue()))); + rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, &signatureConversion))) @@ -1919,12 +1934,17 @@ class CIRGlobalOpLowering const auto loc = op.getLoc(); std::optional section = op.getSection(); std::optional init = op.getInitialValue(); + mlir::LLVM::VisibilityAttr visibility = mlir::LLVM::VisibilityAttr::get( + getContext(), lowerCIRVisibilityToLLVMVisibility( + op.getGlobalVisibilityAttr().getValue())); SmallVector attributes; if (section.has_value()) attributes.push_back(rewriter.getNamedAttr( "section", rewriter.getStringAttr(section.value()))); + attributes.push_back(rewriter.getNamedAttr("visibility_", visibility)); + // Check for missing funcionalities. if (!init.has_value()) { rewriter.replaceOpWithNewOp( @@ -2020,9 +2040,11 @@ class CIRGlobalOpLowering /*alignment*/ 0, /*addrSpace*/ getGlobalOpTargetAddrSpace(op), /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); + auto mod = op->getParentOfType(); if (op.getComdat()) addComdat(llvmGlobalOp, comdatOp, rewriter, mod); + return mlir::success(); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h b/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h index b42f7c263b0f..46de5dfc7634 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h @@ -71,4 +71,4 @@ mlir::Value createAnd(mlir::OpBuilder &bld, mlir::Value lhs, return bld.create(lhs.getLoc(), lhs, rhsVal); } -#endif // LLVM_CLANG_LIB_LOWERINGHELPERS_H \ No newline at end of file +#endif // LLVM_CLANG_LIB_LOWERINGHELPERS_H diff --git a/clang/test/CIR/CodeGen/attributes.c b/clang/test/CIR/CodeGen/attributes.c index 71c018b081ed..5c11dc9a298b 100644 --- a/clang/test/CIR/CodeGen/attributes.c +++ b/clang/test/CIR/CodeGen/attributes.c @@ -10,4 +10,13 @@ int getExt() { int __attribute__((section(".shared"))) glob = 42; // CIR: cir.global external @glob = #cir.int<42> : !s32i {section = ".shared"} -// LLVM @glob = global i32 42, section ".shared" +// LLVM: @glob = global i32 42, section ".shared" + + +void __attribute__((__visibility__("hidden"))) foo(); +// CIR: cir.func no_proto private hidden @foo(...) extra(#fn_attr) +int bah() +{ + foo(); + return 1; +} diff --git a/clang/test/CIR/CodeGen/visibility-attribute.c b/clang/test/CIR/CodeGen/visibility-attribute.c new file mode 100644 index 000000000000..45ea4c28e272 --- /dev/null +++ b/clang/test/CIR/CodeGen/visibility-attribute.c @@ -0,0 +1,38 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM + +extern int glob_default; +// CIR: cir.global "private" external @glob_default : !s32i +// LLVM: @glob_default = external global i32 + +extern int __attribute__((__visibility__("hidden"))) glob_hidden; +// CIR: cir.global "private" hidden external @glob_hidden : !s32i +// LLVM: @glob_hidden = external hidden global i32 + +extern int __attribute__((__visibility__("protected"))) glob_protected; +// CIR: cir.global "private" protected external @glob_protected : !s32i +// LLVM: @glob_protected = external protected global i32 + +int call_glob() +{ + return glob_default + glob_hidden + glob_protected; +} + +void foo_default(); +// CIR: cir.func no_proto private @foo_default(...) extra(#fn_attr) +// LLVM: declare {{.*}} void @foo_default(...) + +void __attribute__((__visibility__("hidden"))) foo_hidden(); +// CIR: cir.func no_proto private hidden @foo_hidden(...) extra(#fn_attr) +// LLVM: declare {{.*}} hidden void @foo_hidden(...) + +void __attribute__((__visibility__("protected"))) foo_protected(); +// CIR: cir.func no_proto private protected @foo_protected(...) extra(#fn_attr) +// LLVM: declare {{.*}} protected void @foo_protected(...) + +void call_foo() +{ + foo_default(); + foo_hidden(); + foo_protected(); +} diff --git a/clang/test/CIR/IR/inlineAttr.cir b/clang/test/CIR/IR/inlineAttr.cir index 76de9acbb736..3d51efd2b258 100644 --- a/clang/test/CIR/IR/inlineAttr.cir +++ b/clang/test/CIR/IR/inlineAttr.cir @@ -12,4 +12,4 @@ module { // CIR: #fn_attr = #cir})> // CIR: cir.func @l0() extra(#fn_attr) { -// MLIR: llvm.func @l0() attributes {cir.extra_attrs = #fn_attr} +// MLIR: llvm.func @l0() attributes {cir.extra_attrs = #fn_attr, global_visibility = #cir} diff --git a/clang/test/CIR/Lowering/alloca.cir b/clang/test/CIR/Lowering/alloca.cir index 33da38e9e69e..62b8c1c60111 100644 --- a/clang/test/CIR/Lowering/alloca.cir +++ b/clang/test/CIR/Lowering/alloca.cir @@ -5,12 +5,12 @@ module { cir.func @foo(%arg0: !s32i) { %0 = cir.alloca !s32i, !cir.ptr, %arg0 : !s32i, ["tmp"] {alignment = 16 : i64} - cir.return + cir.return } } // MLIR: module { -// MLIR-NEXT: llvm.func @foo(%arg0: i32) attributes {cir.extra_attrs = #fn_attr} { +// MLIR-NEXT: llvm.func @foo(%arg0: i32) attributes {cir.extra_attrs = #fn_attr, global_visibility = #cir} { // MLIR-NEXT: %0 = llvm.alloca %arg0 x i32 {alignment = 16 : i64} : (i32) -> !llvm.ptr // MLIR-NEXT: llvm.return // MLIR-NEXT: } From 2af8723cc490617c8925a8a65ece6f2c301c1c96 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 16 Aug 2024 01:47:03 +0800 Subject: [PATCH 1772/2301] [CIR][CIRGen] Complex unary increment and decrement operator (#790) This PR adds CIRGen and LLVMIR lowering for unary increment and decrement expressions of complex types. Currently blocked by #789 . --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 34 ++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 + .../Dialect/Transforms/LoweringPrepare.cpp | 10 +- clang/test/CIR/CodeGen/complex-arithmetic.c | 140 ++++++++++++++++++ 5 files changed, 183 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 32e4afcbb377..7100e62e1d12 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1325,7 +1325,7 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { LValue LV = buildLValue(E->getSubExpr()); if (E->getType()->isAnyComplexType()) { - assert(0 && "not implemented"); + buildComplexPrePostIncDec(E, LV, isInc, true /*isPre*/); } else { buildScalarPrePostIncDec(E, LV, isInc, isPre); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 33e3b67f8082..7472b039649b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -134,9 +134,7 @@ class ComplexExprEmitter : public StmtVisitor { // Operators. mlir::Value VisitPrePostIncDec(const UnaryOperator *E, bool isInc, - bool isPre) { - llvm_unreachable("NYI"); - } + bool isPre); mlir::Value VisitUnaryPostDec(const UnaryOperator *E) { return VisitPrePostIncDec(E, false, false); } @@ -524,6 +522,12 @@ mlir::Value ComplexExprEmitter::VisitCallExpr(const CallExpr *E) { return CGF.buildCallExpr(E).getComplexVal(); } +mlir::Value ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, + bool isInc, bool isPre) { + LValue LV = CGF.buildLValue(E->getSubExpr()); + return CGF.buildComplexPrePostIncDec(E, LV, isInc, isPre); +} + mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *E, QualType PromotionType) { QualType promotionTy = PromotionType.isNull() @@ -956,3 +960,27 @@ LValue CIRGenFunction::buildComplexCompoundAssignmentLValue( RValue Val; return ComplexExprEmitter(*this).buildCompoundAssignLValue(E, Op, Val); } + +mlir::Value CIRGenFunction::buildComplexPrePostIncDec(const UnaryOperator *E, + LValue LV, bool isInc, + bool isPre) { + mlir::Value InVal = buildLoadOfComplex(LV, E->getExprLoc()); + + auto Loc = getLoc(E->getExprLoc()); + auto OpKind = + isInc ? mlir::cir::UnaryOpKind::Inc : mlir::cir::UnaryOpKind::Dec; + mlir::Value IncVal = builder.createUnaryOp(Loc, OpKind, InVal); + + // Store the updated result through the lvalue. + buildStoreOfComplex(Loc, IncVal, LV, /*init*/ false); + if (getLangOpts().OpenMP) + llvm_unreachable("NYI"); + + // If this is a postinc, return the value read from memory, otherwise use the + // updated value. + return isPre ? IncVal : InVal; +} + +mlir::Value CIRGenFunction::buildLoadOfComplex(LValue src, SourceLocation loc) { + return ComplexExprEmitter(*this).buildLoadOfLValue(src, loc); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 25a74293a19c..a3ca8857e8da 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -666,6 +666,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre); + mlir::Value buildComplexPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre); // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. @@ -799,6 +801,9 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); mlir::Value buildLoadOfScalar(LValue lvalue, mlir::Location Loc); + /// Load a complex number from the specified l-value. + mlir::Value buildLoadOfComplex(LValue src, SourceLocation loc); + Address buildLoadOfReference(LValue RefLVal, mlir::Location Loc, LValueBaseInfo *PointeeBaseInfo = nullptr); LValue buildLoadOfReferenceLValue(LValue RefLVal, mlir::Location Loc); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index c4244b1b2e8f..ca6970f04830 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -356,10 +356,6 @@ void LoweringPreparePass::lowerUnaryOp(UnaryOp op) { auto loc = op.getLoc(); auto opKind = op.getKind(); - assert((opKind == mlir::cir::UnaryOpKind::Plus || - opKind == mlir::cir::UnaryOpKind::Minus || - opKind == mlir::cir::UnaryOpKind::Not) && - "invalid unary op kind on complex numbers"); CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op); @@ -372,6 +368,12 @@ void LoweringPreparePass::lowerUnaryOp(UnaryOp op) { mlir::Value resultReal; mlir::Value resultImag; switch (opKind) { + case mlir::cir::UnaryOpKind::Inc: + case mlir::cir::UnaryOpKind::Dec: + resultReal = builder.createUnaryOp(loc, opKind, operandReal); + resultImag = operandImag; + break; + case mlir::cir::UnaryOpKind::Plus: case mlir::cir::UnaryOpKind::Minus: resultReal = builder.createUnaryOp(loc, opKind, operandReal); diff --git a/clang/test/CIR/CodeGen/complex-arithmetic.c b/clang/test/CIR/CodeGen/complex-arithmetic.c index f7b85000ce6b..c2e86ca43f74 100644 --- a/clang/test/CIR/CodeGen/complex-arithmetic.c +++ b/clang/test/CIR/CodeGen/complex-arithmetic.c @@ -776,3 +776,143 @@ void builtin_conj() { // LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double %[[#RESI]], 1 // CHECK: } + +void pre_increment() { + ++cd1; + ++ci1; +} + +// CLANG: @pre_increment +// CPPLANG: @_Z13pre_incrementv + +// CIRGEN: %{{.+}} = cir.unary(inc, %{{.+}}) : !cir.complex, !cir.complex +// CIRGEN: %{{.+}} = cir.unary(inc, %{{.+}}) : !cir.complex, !cir.complex + +// CIR: %[[#R:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#I:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#IR:]] = cir.unary(inc, %[[#R]]) : !cir.double, !cir.double +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#IR]], %[[#I]] : !cir.double -> !cir.complex + +// CIR: %[[#R:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#I:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#IR:]] = cir.unary(inc, %[[#R]]) : !s32i, !s32i +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#IR]], %[[#I]] : !s32i -> !cir.complex + +// LLVM: %[[#R:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#I:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#IR:]] = fadd double 1.000000e+00, %[[#R]] +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#IR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double %[[#I]], 1 + +// LLVM: %[[#R:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#I:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#IR:]] = add i32 %[[#R]], 1 +// LLVM-NEXT: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %[[#IR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 %[[#I]], 1 + +// CHECK: } + +void post_increment() { + cd1++; + ci1++; +} + +// CLANG: @post_increment +// CPPLANG: @_Z14post_incrementv + +// CIRGEN: %{{.+}} = cir.unary(inc, %{{.+}}) : !cir.complex, !cir.complex +// CIRGEN: %{{.+}} = cir.unary(inc, %{{.+}}) : !cir.complex, !cir.complex + +// CIR: %[[#R:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#I:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#IR:]] = cir.unary(inc, %[[#R]]) : !cir.double, !cir.double +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#IR]], %[[#I]] : !cir.double -> !cir.complex + +// CIR: %[[#R:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#I:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#IR:]] = cir.unary(inc, %[[#R]]) : !s32i, !s32i +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#IR]], %[[#I]] : !s32i -> !cir.complex + +// LLVM: %[[#R:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#I:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#IR:]] = fadd double 1.000000e+00, %[[#R]] +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#IR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double %[[#I]], 1 + +// LLVM: %[[#R:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#I:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#IR:]] = add i32 %[[#R]], 1 +// LLVM-NEXT: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %[[#IR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 %[[#I]], 1 + +// CHECK: } + +void pre_decrement() { + --cd1; + --ci1; +} + +// CLANG: @pre_decrement +// CPPLANG: @_Z13pre_decrementv + +// CIRGEN: %{{.+}} = cir.unary(dec, %{{.+}}) : !cir.complex, !cir.complex +// CIRGEN: %{{.+}} = cir.unary(dec, %{{.+}}) : !cir.complex, !cir.complex + +// CIR: %[[#R:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#I:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#IR:]] = cir.unary(dec, %[[#R]]) : !cir.double, !cir.double +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#IR]], %[[#I]] : !cir.double -> !cir.complex + +// CIR: %[[#R:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#I:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#IR:]] = cir.unary(dec, %[[#R]]) : !s32i, !s32i +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#IR]], %[[#I]] : !s32i -> !cir.complex + +// LLVM: %[[#R:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#I:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#IR:]] = fadd double -1.000000e+00, %[[#R]] +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#IR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double %[[#I]], 1 + +// LLVM: %[[#R:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#I:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#IR:]] = sub i32 %[[#R]], 1 +// LLVM-NEXT: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %[[#IR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 %[[#I]], 1 + +// CHECK: } + +void post_decrement() { + cd1--; + ci1--; +} + +// CLANG: @post_decrement +// CPPLANG: @_Z14post_decrementv + +// CIRGEN: %{{.+}} = cir.unary(dec, %{{.+}}) : !cir.complex, !cir.complex +// CIRGEN: %{{.+}} = cir.unary(dec, %{{.+}}) : !cir.complex, !cir.complex + +// CIR: %[[#R:]] = cir.complex.real %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#I:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double +// CIR-NEXT: %[[#IR:]] = cir.unary(dec, %[[#R]]) : !cir.double, !cir.double +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#IR]], %[[#I]] : !cir.double -> !cir.complex + +// CIR: %[[#R:]] = cir.complex.real %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#I:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i +// CIR-NEXT: %[[#IR:]] = cir.unary(dec, %[[#R]]) : !s32i, !s32i +// CIR-NEXT: %{{.+}} = cir.complex.create %[[#IR]], %[[#I]] : !s32i -> !cir.complex + +// LLVM: %[[#R:]] = extractvalue { double, double } %{{.+}}, 0 +// LLVM-NEXT: %[[#I:]] = extractvalue { double, double } %{{.+}}, 1 +// LLVM-NEXT: %[[#IR:]] = fadd double -1.000000e+00, %[[#R]] +// LLVM-NEXT: %[[#A:]] = insertvalue { double, double } undef, double %[[#IR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { double, double } %[[#A]], double %[[#I]], 1 + +// LLVM: %[[#R:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-NEXT: %[[#I:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-NEXT: %[[#IR:]] = sub i32 %[[#R]], 1 +// LLVM-NEXT: %[[#A:]] = insertvalue { i32, i32 } undef, i32 %[[#IR]], 0 +// LLVM-NEXT: %{{.+}} = insertvalue { i32, i32 } %[[#A]], i32 %[[#I]], 1 + +// CHECK: } From a79311ad9f1184d9dacbf653f8e9388c5f9377d0 Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 16 Aug 2024 01:55:03 +0800 Subject: [PATCH 1773/2301] [CIR][CodeGen] Set address space for OpenCL static and local-qualified variables (#792) In OpenCL, `local`-qualified variables are implicitly considered as static. In order to support it, this PR unblocks code paths related to OpenCL static declarations in `CIRGenDecl.cpp`. Following the approach of LLVM CodeGen, a new class `CIRGenOpenCLRuntime` is added to handle the language hook of creating `local`-qualified variables. The default behavior of this hook is quite simple. It forwards the call to `CGF.buildStaticVarDecl`. So in CIR, the OpenCL local memory representation is equivalent to the one defined by SPIR-LLVM convention: a `cir.global` with `addrspace(local)` and *without initializer*, which corresponds to LLVM `undef` initializer. See check lines in test for more details. A `static global`-qualified variable is also added in the test to exercise the static code path itself. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 17 +++---- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 ++ clang/lib/CIR/CodeGen/CIRGenModule.h | 14 ++++++ clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp | 29 ++++++++++++ clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h | 46 +++++++++++++++++++ clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + .../test/CIR/CodeGen/OpenCL/static-vardecl.cl | 24 ++++++++++ 7 files changed, 127 insertions(+), 8 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp create mode 100644 clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h create mode 100644 clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 52cf2d182c4a..9a08ccd0d1ff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -411,7 +411,7 @@ void CIRGenFunction::buildVarDecl(const VarDecl &D) { } if (D.getType().getAddressSpace() == LangAS::opencl_local) - llvm_unreachable("OpenCL and address space are NYI"); + return CGM.getOpenCLRuntime().buildWorkGroupLocalVarDecl(*this, D); assert(D.hasLocalStorage()); @@ -465,19 +465,19 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, Name = getStaticDeclName(*this, D); mlir::Type LTy = getTypes().convertTypeForMem(Ty); - assert(!MissingFeatures::addressSpace()); + mlir::cir::AddressSpaceAttr AS = + builder.getAddrSpaceAttr(getGlobalVarAddressSpace(&D)); // OpenCL variables in local address space and CUDA shared // variables cannot have an initializer. mlir::Attribute Init = nullptr; - if (Ty.getAddressSpace() == LangAS::opencl_local || - D.hasAttr() || D.hasAttr()) - llvm_unreachable("OpenCL & CUDA are NYI"); - else + if (D.hasAttr() || D.hasAttr()) + llvm_unreachable("CUDA is NYI"); + else if (Ty.getAddressSpace() != LangAS::opencl_local) Init = builder.getZeroInitAttr(getTypes().ConvertType(Ty)); mlir::cir::GlobalOp GV = builder.createVersionedGlobal( - getModule(), getLoc(D.getLocation()), Name, LTy, false, Linkage); + getModule(), getLoc(D.getLocation()), Name, LTy, false, Linkage, AS); // TODO(cir): infer visibility from linkage in global op builder. GV.setVisibility(getMLIRVisibilityFromCIRLinkage(Linkage)); GV.setInitialValueAttr(Init); @@ -492,7 +492,8 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, setGVProperties(GV, &D); // Make sure the result is of the correct type. - assert(!MissingFeatures::addressSpace()); + if (AS != builder.getAddrSpaceAttr(Ty.getAddressSpace())) + llvm_unreachable("address space cast NYI"); // Ensure that the static local gets initialized by making sure the parent // function gets emitted eventually. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index d5a2ddbc8f19..21792ce41e25 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -165,6 +165,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, builder.getContext(), astCtx.getTargetInfo().getMaxPointerWidth(), /*isSigned=*/true); + if (langOpts.OpenCL) { + createOpenCLRuntime(); + } + mlir::cir::sob::SignedOverflowBehavior sob; switch (langOpts.getSignedOverflowBehavior()) { case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Defined: diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 825735d28717..d4d3339f3fdf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -15,6 +15,7 @@ #include "CIRGenBuilder.h" #include "CIRGenCall.h" +#include "CIRGenOpenCLRuntime.h" #include "CIRGenTypeCache.h" #include "CIRGenTypes.h" #include "CIRGenVTables.h" @@ -102,6 +103,9 @@ class CIRGenModule : public CIRGenTypeCache { /// Holds information about C++ vtables. CIRGenVTables VTables; + /// Holds the OpenCL runtime + std::unique_ptr openCLRuntime; + /// Holds the OpenMP runtime std::unique_ptr openMPRuntime; @@ -700,6 +704,16 @@ class CIRGenModule : public CIRGenTypeCache { /// Print out an error that codegen doesn't support the specified decl yet. void ErrorUnsupported(const Decl *D, const char *Type); + /// Return a reference to the configured OpenMP runtime. + CIRGenOpenCLRuntime &getOpenCLRuntime() { + assert(openCLRuntime != nullptr); + return *openCLRuntime; + } + + void createOpenCLRuntime() { + openCLRuntime.reset(new CIRGenOpenCLRuntime(*this)); + } + /// Return a reference to the configured OpenMP runtime. CIRGenOpenMPRuntime &getOpenMPRuntime() { assert(openMPRuntime != nullptr); diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp new file mode 100644 index 000000000000..863caf8629d2 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp @@ -0,0 +1,29 @@ +//===-- CIRGenOpenCLRuntime.cpp - Interface to OpenCL Runtimes ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides an abstract class for OpenCL CIR generation. Concrete +// subclasses of this implement code generation for specific OpenCL +// runtime libraries. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenOpenCLRuntime.h" +#include "CIRGenFunction.h" + +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" + +using namespace clang; +using namespace cir; + +CIRGenOpenCLRuntime::~CIRGenOpenCLRuntime() {} + +void CIRGenOpenCLRuntime::buildWorkGroupLocalVarDecl(CIRGenFunction &CGF, + const VarDecl &D) { + return CGF.buildStaticVarDecl(D, + mlir::cir::GlobalLinkageKind::InternalLinkage); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h new file mode 100644 index 000000000000..891b5bb5fb79 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h @@ -0,0 +1,46 @@ +//===-- CIRGenOpenCLRuntime.h - Interface to OpenCL Runtimes -----*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides an abstract class for OpenCL CIR generation. Concrete +// subclasses of this implement code generation for specific OpenCL +// runtime libraries. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRGENOPENCLRUNTIME_H +#define LLVM_CLANG_LIB_CIR_CIRGENOPENCLRUNTIME_H + +namespace clang { + +class VarDecl; + +} // namespace clang + +namespace cir { + +class CIRGenFunction; +class CIRGenModule; + +class CIRGenOpenCLRuntime { +protected: + CIRGenModule &CGM; + +public: + CIRGenOpenCLRuntime(CIRGenModule &CGM) : CGM(CGM) {} + virtual ~CIRGenOpenCLRuntime(); + + /// Emit the IR required for a work-group-local variable declaration, and add + /// an entry to CGF's LocalDeclMap for D. The base class does this using + /// CIRGenFunction::EmitStaticVarDecl to emit an internal global for D. + virtual void buildWorkGroupLocalVarDecl(CIRGenFunction &CGF, + const clang::VarDecl &D); +}; + +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_CIRGENOPENCLRUNTIME_H diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 552555779d46..97a8ad4f5ea8 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -31,6 +31,7 @@ add_clang_library(clangCIR CIRGenFunction.cpp CIRGenItaniumCXXABI.cpp CIRGenModule.cpp + CIRGenOpenCLRuntime.cpp CIRGenOpenCL.cpp CIRGenOpenMPRuntime.cpp CIRGenStmt.cpp diff --git a/clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl b/clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl new file mode 100644 index 000000000000..9ad8277012c4 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl @@ -0,0 +1,24 @@ +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM + +kernel void test_static(int i) { + static global int b = 15; + // CIR-DAG: cir.global "private" internal dsolocal addrspace(offload_global) @test_static.b = #cir.int<15> : !s32i {alignment = 4 : i64} + // LLVM-DAG: @test_static.b = internal addrspace(1) global i32 15 + + local int c; + // CIR-DAG: cir.global "private" internal dsolocal addrspace(offload_local) @test_static.c : !s32i {alignment = 4 : i64} + // LLVM-DAG: @test_static.c = internal addrspace(3) global i32 undef + + // CIR-DAG: %[[#ADDRB:]] = cir.get_global @test_static.b : !cir.ptr + // CIR-DAG: %[[#ADDRC:]] = cir.get_global @test_static.c : !cir.ptr + + c = b; + // CIR: %[[#LOADB:]] = cir.load %[[#ADDRB]] : !cir.ptr, !s32i + // CIR-NEXT: cir.store %[[#LOADB]], %[[#ADDRC]] : !s32i, !cir.ptr + + // LLVM: %[[#LOADB:]] = load i32, ptr addrspace(1) @test_static.b, align 4 + // LLVM-NEXT: store i32 %[[#LOADB]], ptr addrspace(3) @test_static.c, align 4 +} From d82aa9313ace2827e850cd1533715b58d8800c2d Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Thu, 15 Aug 2024 20:15:32 -0300 Subject: [PATCH 1774/2301] [CIR][ABI][NFC] Follow-up to struct unpacking (#791) This patch fixes a bunch of pending review comments in #784: - Remove data layout attribute from address space testing - Remove incoherent comment - Rename abi_or_pref to abiOrPref - Make comments impersonal - Implement feature guard for ARM's CMSE secure call feature - Track volatile return times feature in CC lowering - Track missing features in the Itanium record builder - Remove incoherent fix me - Clarify comment regarding CIR record layout getter - Track missing cache for record layout getter - Remove unnecessary todo's --- .../clang/CIR/Dialect/IR/CIRDataLayout.h | 4 +-- clang/include/clang/CIR/MissingFeatures.h | 7 ++++++ clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp | 12 ++++----- .../TargetLowering/CIRLowerContext.cpp | 4 +-- .../TargetLowering/CIRRecordLayout.cpp | 7 +++--- .../Transforms/TargetLowering/LowerCall.cpp | 8 +++--- .../TargetLowering/LowerFunction.cpp | 18 +++++-------- .../Transforms/TargetLowering/LowerTypes.cpp | 4 +-- .../TargetLowering/RecordLayoutBuilder.cpp | 25 ++++++------------- .../Transforms/TargetLowering/Targets/X86.cpp | 5 ++-- clang/test/CIR/Lowering/address-space.cir | 3 +-- 11 files changed, 41 insertions(+), 56 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h index 75b3978ba1ad..cdca6d19be1c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h @@ -55,7 +55,7 @@ class CIRDataLayout { const StructLayout *getStructLayout(mlir::cir::StructType Ty) const; /// Internal helper method that returns requested alignment for type. - llvm::Align getAlignment(mlir::Type Ty, bool abi_or_pref) const; + llvm::Align getAlignment(mlir::Type Ty, bool abiOrPref) const; llvm::Align getABITypeAlign(mlir::Type ty) const { return getAlignment(ty, true); @@ -93,8 +93,6 @@ class CIRDataLayout { return layout.getTypeSizeInBits(Ty); } - // The implementation of this method is provided inline as it is particularly - // well suited to constant folding when called on a specific Type subclass. llvm::TypeSize getTypeSizeInBits(mlir::Type Ty) const; mlir::Type getIntPtrType(mlir::Type Ty) const { diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 3f56c8fd29e3..56e20ade88b2 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -135,6 +135,7 @@ struct MissingFeatures { static bool syncScopeID() { return false; } // Misc + static bool cacheRecordLayouts() { return false; } static bool capturedByInit() { return false; } static bool tryEmitAsConstant() { return false; } static bool incrementProfileCounter() { return false; } @@ -282,6 +283,9 @@ struct MissingFeatures { // up argument registers), but we do not yet track such cases. static bool chainCall() { return false; } + // ARM-specific feature that can be specified as a function attribute in C. + static bool cmseNonSecureCallAttr() { return false; } + // ABI-lowering has special handling for regcall calling convention (tries to // pass every argument in regs). We don't support it just yet. static bool regCall() { return false; } @@ -326,6 +330,9 @@ struct MissingFeatures { // CIR modules parsed from text form may not carry the triple or data layout // specs. We should make it always present. static bool makeTripleAlwaysPresent() { return false; } + + // This Itanium bit is currently being skipped in cir. + static bool itaniumRecordLayoutBuilderFinishLayout() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp index 6daa8781b453..12948d0eeb2d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp @@ -157,25 +157,25 @@ CIRDataLayout::getStructLayout(mlir::cir::StructType Ty) const { } /*! - \param abi_or_pref Flag that determines which alignment is returned. true + \param abiOrPref Flag that determines which alignment is returned. true returns the ABI alignment, false returns the preferred alignment. \param Ty The underlying type for which alignment is determined. - Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref + Get the ABI (\a abiOrPref == true) or preferred alignment (\a abiOrPref == false) for the requested type \a Ty. */ -llvm::Align CIRDataLayout::getAlignment(mlir::Type Ty, bool abi_or_pref) const { +llvm::Align CIRDataLayout::getAlignment(mlir::Type Ty, bool abiOrPref) const { if (llvm::isa(Ty)) { // Packed structure types always have an ABI alignment of one. - if (::cir::MissingFeatures::recordDeclIsPacked() && abi_or_pref) + if (::cir::MissingFeatures::recordDeclIsPacked() && abiOrPref) llvm_unreachable("NYI"); // Get the layout annotation... which is lazily created on demand. const StructLayout *Layout = getStructLayout(llvm::cast(Ty)); const llvm::Align Align = - abi_or_pref ? StructAlignment.ABIAlign : StructAlignment.PrefAlign; + abiOrPref ? StructAlignment.ABIAlign : StructAlignment.PrefAlign; return std::max(Align, Layout->getAlignment()); } @@ -184,7 +184,7 @@ llvm::Align CIRDataLayout::getAlignment(mlir::Type Ty, bool abi_or_pref) const { assert(!::cir::MissingFeatures::addressSpace()); // Fetch type alignment from MLIR's data layout. - unsigned align = abi_or_pref ? layout.getTypeABIAlignment(Ty) + unsigned align = abiOrPref ? layout.getTypeABIAlignment(Ty) : layout.getTypePreferredAlignment(Ty); return llvm::Align(align); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index 57d29643ca3c..42aae0a80d04 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -61,8 +61,8 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { // FIXME(cir): Here we fetch the width and alignment of a type considering the // current target. We can likely improve this using MLIR's data layout, or // some other interface, to abstract this away (e.g. type.getWidth() & - // type.getAlign()). I'm not sure if data layoot suffices because this would - // involve some other types such as vectors and complex numbers. + // type.getAlign()). Verify if data layout suffices because this would involve + // some other types such as vectors and complex numbers. // FIXME(cir): In the original codegen, this receives an AST type, meaning it // differs chars from integers, something that is not possible with the // current level of CIR. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp index 68b777fa7755..2744f67d19de 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "CIRRecordLayout.h" +#include "clang/CIR/MissingFeatures.h" namespace mlir { namespace cir { @@ -45,10 +46,8 @@ CIRRecordLayout::CIRRecordLayout( CXXInfo->NonVirtualAlignment = nonvirtualalignment; CXXInfo->PreferredNVAlignment = preferrednvalignment; CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject; - // FIXME(cir): I'm assuming that since we are not dealing with inherited - // classes yet, removing the following lines will be ok. - // CXXInfo->BaseOffsets = BaseOffsets; - // CXXInfo->VBaseOffsets = VBaseOffsets; + // FIXME(cir): Initialize base classes offsets. + assert(!::cir::MissingFeatures::getCXXRecordBases()); CXXInfo->HasOwnVFPtr = hasOwnVFPtr; CXXInfo->VBPtrOffset = vbptroffset; CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index 553c639cf28e..42de07ec6965 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -42,8 +42,8 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, } // TODO(cir): There's some CC stuff related to no-proto functions here, but - // I'm skipping it since it requires CodeGen info. Maybe we can embbed this - // information in the FuncOp during CIRGen. + // its skipped here since it requires CodeGen info. Maybe this information + // could be embbed in the FuncOp during CIRGen. assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; @@ -120,8 +120,8 @@ void LowerModule::constructAttributeList(StringRef Name, // } // NOTE(cir): The original code adds default and no-builtin attributes here as - // well. AFAIK, these are ABI/Target-agnostic, so it would be better handled - // in CIRGen. Regardless, I'm leaving this comment here as a heads up. + // well. These are ABI/Target-agnostic, so it would be better handled in + // CIRGen. // Override some default IR attributes based on declaration-specific // information. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 27b515cc9939..831276ed5f27 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -434,8 +434,7 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { // If there is a dominating store to ReturnValue, we can elide // the load, zap the store, and usually zap the alloca. - // NOTE(cir): This seems like a premature optimization case, so I'm - // skipping it. + // NOTE(cir): This seems like a premature optimization case. Skipping it. if (::cir::MissingFeatures::returnValueDominatingStoreOptmiization()) { llvm_unreachable("NYI"); } @@ -453,12 +452,6 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { mlir::PatternRewriter::InsertionGuard guard(rewriter); NewFn->walk([&](ReturnOp returnOp) { rewriter.setInsertionPoint(returnOp); - - // TODO(cir): I'm not sure if we need this offset here or in CIRGen. - // Perhaps both? For now I'm just ignoring it. - // Value V = emitAddressAtOffset(*this, getResultAlloca(returnOp), - // RetAI); - RV = castReturnValue(returnOp->getOperand(0), RetAI.getCoerceToType(), *this); rewriter.replaceOpWithNewOp(returnOp, RV); @@ -655,7 +648,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, FuncType IRFuncTy = LM.getTypes().getFunctionType(CallInfo); - // NOTE(cir): Some target/ABI related checks happen here. I'm skipping them + // NOTE(cir): Some target/ABI related checks happen here. They are skipped // under the assumption that they are handled in CIRGen. // 1. Set up the arguments. @@ -737,7 +730,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!isa(I->getType())) { llvm_unreachable("NYI"); } else { - // NOTE(cir): I'm leaving L/RValue stuff for CIRGen to handle. + // NOTE(cir): L/RValue stuff are left for CIRGen to handle. Src = *I; } @@ -756,6 +749,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, Value Load = createCoercedValue(Src, ArgInfo.getCoerceToType(), *this); // FIXME(cir): We should probably handle CMSE non-secure calls here + assert(!::cir::MissingFeatures::cmseNonSecureCallAttr()); // since they are a ARM-specific feature. if (::cir::MissingFeatures::undef()) @@ -856,6 +850,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // FIXME(cir): Use return value slot here. Value RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. + assert(!::cir::MissingFeatures::volatileTypes()); // NOTE(cir): If the function returns, there should always be a valid // return value present. Instead of setting the return value here, we @@ -863,6 +858,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!RetVal) { RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. + assert(::cir::MissingFeatures::volatileTypes()); } // An empty record can overlap other data (if declared with @@ -870,8 +866,6 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // actual data to store. if (dyn_cast(RetTy) && cast(RetTy).getNumElements() != 0) { - // NOTE(cir): I'm assuming we don't need to change any offsets here. - // Value StorePtr = emitAddressAtOffset(*this, RetVal, RetAI); RetVal = createCoercedValue(newCallOp.getResult(), RetVal.getType(), *this); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index e7eaa2bda2d0..bdec98a64f43 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -108,8 +108,8 @@ mlir::Type LowerTypes::convertType(Type T) { /// LLVM IR representation for a given AST type. When a the ABI-specific /// function info sets a nullptr for a return or argument type, the default /// type given by this method is used. In CIR's case, its types are already - /// supposed to be ABI-specific, so this method is not really useful here. I'm - /// keeping it here for parity's sake. + /// supposed to be ABI-specific, so this method is not really useful here. + /// It's kept here for codegen parity's sake. // Certain CIR types are already ABI-specific, so we just return them. if (isa(T)) { diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index 2f947c5143ef..16dccb27257d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -239,18 +239,10 @@ void ItaniumRecordLayoutBuilder::layout(const StructType RT) { layoutFields(RT); - // NonVirtualSize = Context.toCharUnitsFromBits( - // llvm::alignTo(getSizeInBits(), - // Context.getTargetInfo().getCharAlign())); - // NonVirtualAlignment = Alignment; - // PreferredNVAlignment = PreferredAlignment; - - // // Lay out the virtual bases and add the primary virtual base offsets. - // LayoutVirtualBases(RD, RD); - - // // Finally, round the size of the total struct up to the alignment - // // of the struct itself. - // FinishLayout(RD); + // FIXME(cir): Handle virtual-related layouts. + assert(!::cir::MissingFeatures::getCXXRecordBases()); + + assert(!::cir::MissingFeatures::itaniumRecordLayoutBuilderFinishLayout()); } void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { @@ -558,10 +550,6 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { return false; case clang::TargetCXXABI::UseTailPaddingUnlessPOD03: - // FIXME: To the extent that this is meant to cover the Itanium ABI - // rules, we should implement the restrictions about over-sized - // bitfields: - // // http://itanium-cxx-abi.github.io/cxx-abi/abi.html#POD : // In general, a type is considered a POD for the purposes of // layout if it is a POD type (in the sense of ISO C++ @@ -605,7 +593,8 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { assert(RT.isComplete() && "Cannot get layout of forward declarations!"); - // FIXME(cir): Cache the layout. Also, use a more MLIR-based approach. + // FIXME(cir): Use a more MLIR-based approach by using it's buitin data layout + // features, such as interfaces, cacheing, and the DLTI dialect. const CIRRecordLayout *NewEntry = nullptr; @@ -642,8 +631,8 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { Builder.PrimaryBaseIsVirtual, nullptr, false, false); } - // TODO(cir): Cache the layout. // TODO(cir): Add option to dump the layouts. + assert(!::cir::MissingFeatures::cacheRecordLayouts()); return *NewEntry; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 4a6124ad898a..f18f638660c4 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -235,9 +235,8 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, } else if (isa(Ty)) { // FIXME(cir): Clang's BuiltinType::Kind allow comparisons (GT, LT, etc). - // We should implement this in CIR to simplify the conditions below. BTW, - // I'm not sure if the comparisons below are truly equivalent to the ones - // in Clang. + // We should implement this in CIR to simplify the conditions below. Hence, + // Comparisons below might not be truly equivalent to the ones in Clang. if (isa(Ty)) { Current = Class::Integer; } diff --git a/clang/test/CIR/Lowering/address-space.cir b/clang/test/CIR/Lowering/address-space.cir index c3426899d1e2..ee857bd32119 100644 --- a/clang/test/CIR/Lowering/address-space.cir +++ b/clang/test/CIR/Lowering/address-space.cir @@ -5,8 +5,7 @@ module attributes { cir.triple = "spirv64-unknown-unknown", - llvm.data_layout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1", - dlti.dl_spec = #dlti.dl_spec<> // Avoid assert errors. + llvm.data_layout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1" } { cir.global external addrspace(offload_global) @addrspace1 = #cir.int<1> : !s32i // LLVM: @addrspace1 = addrspace(1) global i32 From 61473ebc5d98c03943cbc93bb102dc03d15f1aaa Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 21 Aug 2024 06:13:51 +0800 Subject: [PATCH 1775/2301] [CIR][CIRGen] Add CIRGen support for pointer-to-member-functions (#722) This PR adds the initial CIRGen support for pointer-to-member-functions. It contains the following new types, attributes, and operations: - `!cir.method`, which represents the pointer-to-member-function type. - `#cir.method`, which represents a literal pointer-to-member-function value that points to ~~non-virtual~~ member functions. - ~~`#cir.virtual_method`, which represents a literal pointer-to-member-function value that points to virtual member functions.~~ - ~~`cir.get_method_callee`~~ `cir.get_method`, which resolves a pointer-to-member-function to a function pointer as the callee. See the new test at `clang/test/CIR/CIRGen/pointer-to-member-func.cpp` for how these new CIR stuff works to support pointer-to-member-functions. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 42 +++++++++++ .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 48 ++++++++++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 54 ++++++++++++++ .../include/clang/CIR/Dialect/IR/CIRTypes.td | 36 ++++++++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 15 ++++ clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 4 + clang/lib/CIR/CodeGen/CIRGenCall.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 47 ++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 14 +++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 + clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 24 ++++++ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 11 ++- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 73 +++++++++++++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 57 ++++++++++++++- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 33 +++++++++ .../CIR/CodeGen/pointer-to-member-func.cpp | 42 +++++++++++ 18 files changed, 498 insertions(+), 13 deletions(-) create mode 100644 clang/test/CIR/CodeGen/pointer-to-member-func.cpp diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index b327645d1958..d2fc105f502d 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -111,6 +111,10 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return getPointerTo(::mlir::cir::VoidType::get(getContext()), langAS); } + mlir::cir::PointerType getVoidPtrTy(mlir::cir::AddressSpaceAttr cirAS) { + return getPointerTo(::mlir::cir::VoidType::get(getContext()), cirAS); + } + mlir::cir::BoolAttr getCIRBoolAttr(bool state) { return mlir::cir::BoolAttr::get(getContext(), getBoolTy(), state); } @@ -590,6 +594,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return create(loc, value); } + mlir::cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, + mlir::Value stride) { + return create(loc, base.getType(), base, stride); + } + mlir::cir::CallOp createCallOp(mlir::Location loc, mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(), @@ -678,6 +687,39 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createTryCallOp(loc, mlir::SymbolRefAttr(), fn_type.getReturnType(), resOperands); } + + struct GetMethodResults { + mlir::Value callee; + mlir::Value adjustedThis; + }; + + GetMethodResults createGetMethod(mlir::Location loc, mlir::Value method, + mlir::Value objectPtr) { + // Build the callee function type. + auto methodFuncTy = + mlir::cast(method.getType()).getMemberFuncTy(); + auto methodFuncInputTypes = methodFuncTy.getInputs(); + + auto objectPtrTy = mlir::cast(objectPtr.getType()); + auto objectPtrAddrSpace = + mlir::cast_if_present( + objectPtrTy.getAddrSpace()); + auto adjustedThisTy = getVoidPtrTy(objectPtrAddrSpace); + + llvm::SmallVector calleeFuncInputTypes{adjustedThisTy}; + calleeFuncInputTypes.insert(calleeFuncInputTypes.end(), + methodFuncInputTypes.begin(), + methodFuncInputTypes.end()); + auto calleeFuncTy = + methodFuncTy.clone(calleeFuncInputTypes, methodFuncTy.getReturnType()); + // TODO(cir): consider the address space of the callee. + assert(!MissingFeatures::addressSpace()); + auto calleeTy = getPointerTo(calleeFuncTy); + + auto op = create(loc, calleeTy, adjustedThisTy, + method, objectPtr); + return {op.getCallee(), op.getAdjustedThis()}; + } }; } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index fa3c8004701b..75180131adb3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -441,6 +441,54 @@ def DataMemberAttr : CIR_Attr<"DataMember", "data_member", }]; } +//===----------------------------------------------------------------------===// +// MethodAttr +//===----------------------------------------------------------------------===// + +def MethodAttr : CIR_Attr<"Method", "method", [TypedAttrInterface]> { + let summary = "Holds a constant pointer-to-member-function value"; + let description = [{ + A method attribute is a literal attribute that represents a constant + pointer-to-member-function value. + + If the member function is a non-virtual function, the `symbol` parameter + gives the global symbol for the non-virtual member function. + + If the member function is a virtual function, the `vtable_offset` parameter + gives the offset of the vtable entry corresponding to the virtual member + function. + + `symbol` and `vtable_offset` cannot be present at the same time. If both of + `symbol` and `vtable_offset` are not present, the attribute represents a + null pointer constant. + }]; + + let parameters = (ins AttributeSelfTypeParameter< + "", "mlir::cir::MethodType">:$type, + OptionalParameter< + "std::optional">:$symbol, + OptionalParameter< + "std::optional">:$vtable_offset); + + let builders = [ + AttrBuilderWithInferredContext<(ins "mlir::cir::MethodType":$type), [{ + return $_get(type.getContext(), type, std::nullopt, std::nullopt); + }]>, + AttrBuilderWithInferredContext<(ins "mlir::cir::MethodType":$type, + "FlatSymbolRefAttr":$symbol), [{ + return $_get(type.getContext(), type, symbol, std::nullopt); + }]>, + AttrBuilderWithInferredContext<(ins "mlir::cir::MethodType":$type, + "uint64_t":$vtable_offset), [{ + return $_get(type.getContext(), type, std::nullopt, vtable_offset); + }]>, + ]; + + let hasCustomAssemblyFormat = 1; + + let genVerifyDecl = 1; +} + //===----------------------------------------------------------------------===// // SignedOverflowBehaviorAttr //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 5226bf88e82c..e17e6b583d9f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2589,6 +2589,60 @@ def GetRuntimeMemberOp : CIR_Op<"get_runtime_member"> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// GetMethodOp +//===----------------------------------------------------------------------===// + +def GetMethodOp : CIR_Op<"get_method"> { + let summary = "Resolve a method to a function pointer as callee"; + let description = [{ + The `cir.get_method` operation takes a method and an object as input, and + yields a function pointer that points to the actual function corresponding + to the input method. The operation also applies any necessary adjustments to + the input object pointer for calling the method and yields the adjusted + pointer. + + This operation is generated when calling a method through a pointer-to- + member-function in C++: + + ```cpp + // Foo *object; + // int arg; + // void (Foo::*method)(int); + + (object->*method)(arg); + ``` + + The code above will generate CIR similar as: + + ```mlir + // %object = ... + // %arg = ... + // %method = ... + %callee, %this = cir.get_method %method, %object + cir.call %callee(%this, %arg) + ``` + + The method type must match the callee type. That is: + - The return type of the method must match the return type of the callee. + - The first parameter of the callee must have type `!cir.ptr`. + - Types of other parameters of the callee must match the parameters of the + method. + }]; + + let arguments = (ins CIR_MethodType:$method, StructPtr:$object); + let results = (outs FuncPtr:$callee, VoidPtr:$adjusted_this); + + let assemblyFormat = [{ + $method `,` $object + `:` `(` qualified(type($method)) `,` qualified(type($object)) `)` + `->` `(` qualified(type($callee)) `,` qualified(type($adjusted_this)) `)` + attr-dict + }]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // VecInsertOp //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 7f6a3084c80f..1c63fcd84c67 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -408,6 +408,26 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { }]; } +//===----------------------------------------------------------------------===// +// MethodType +//===----------------------------------------------------------------------===// + +def CIR_MethodType : CIR_Type<"Method", "method", + [DeclareTypeInterfaceMethods]> { + let summary = "CIR type that represents C++ pointer-to-member-function type"; + let description = [{ + `cir.method` models the pointer-to-member-function type in C++. The layout + of this type is ABI-dependent. + }]; + + let parameters = (ins "mlir::cir::FuncType":$memberFuncTy, + "mlir::cir::StructType":$clsTy); + + let assemblyFormat = [{ + `<` qualified($memberFuncTy) `in` $clsTy `>` + }]; +} + //===----------------------------------------------------------------------===// // Exception info type // @@ -517,6 +537,15 @@ def ArrayPtr : Type< ]>, "!cir.ptr"> { } +// Pointer to functions +def FuncPtr : Type< + And<[ + CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::mlir::cir::FuncType>(" + "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())">, + ]>, "!cir.ptr"> { +} + //===----------------------------------------------------------------------===// // StructType (defined in cpp files) //===----------------------------------------------------------------------===// @@ -529,9 +558,10 @@ def CIR_StructType : Type($_self)">, //===----------------------------------------------------------------------===// def CIR_AnyType : AnyTypeOf<[ - CIR_IntType, CIR_PointerType, CIR_DataMemberType, CIR_BoolType, CIR_ArrayType, - CIR_VectorType, CIR_FuncType, CIR_VoidType, CIR_StructType, CIR_ExceptionType, - CIR_AnyFloat, CIR_FP16, CIR_BFloat16, CIR_ComplexType + CIR_IntType, CIR_PointerType, CIR_DataMemberType, CIR_MethodType, + CIR_BoolType, CIR_ArrayType, CIR_VectorType, CIR_FuncType, CIR_VoidType, + CIR_StructType, CIR_ExceptionType, CIR_AnyFloat, CIR_FP16, CIR_BFloat16, + CIR_ComplexType ]>; #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 85c8b5aa2bea..f12c6c94fdca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -241,6 +241,16 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::DataMemberAttr::get(getContext(), ty, std::nullopt); } + mlir::cir::MethodAttr getMethodAttr(mlir::cir::MethodType ty, + mlir::cir::FuncOp methodFuncOp) { + auto methodFuncSymbolRef = mlir::FlatSymbolRefAttr::get(methodFuncOp); + return mlir::cir::MethodAttr::get(ty, methodFuncSymbolRef); + } + + mlir::cir::MethodAttr getNullMethodAttr(mlir::cir::MethodType ty) { + return mlir::cir::MethodAttr::get(ty); + } + // TODO(cir): Once we have CIR float types, replace this by something like a // NullableValueInterface to allow for type-independent queries. bool isNullValue(mlir::Attribute attr) const { @@ -520,6 +530,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, ty, getNullDataMemberAttr(ty)); } + mlir::cir::ConstantOp getNullMethodPtr(mlir::cir::MethodType ty, + mlir::Location loc) { + return create(loc, ty, getNullMethodAttr(ty)); + } + mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { // TODO: dispatch creation for primitive types. assert((mlir::isa(ty) || diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index e29f843d235f..2df22b70d91a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -310,6 +310,10 @@ class CIRGenCXXABI { QualType DestRecordTy, mlir::cir::PointerType DestCIRTy, bool isRefCast, Address Src) = 0; + + virtual mlir::cir::MethodAttr + buildVirtualMethodAttr(mlir::cir::MethodType MethodTy, + const CXXMethodDecl *MD) = 0; }; /// Creates and Itanium-family ABI diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 82b6625a43bc..36a25ede4bae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -731,7 +731,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, [[maybe_unused]] auto resultTypes = CalleePtr->getResultTypes(); [[maybe_unused]] auto FuncPtrTy = mlir::dyn_cast(resultTypes.front()); - assert((resultTypes.size() == 1) && FuncPtrTy && + assert(FuncPtrTy && mlir::isa(FuncPtrTy.getPointee()) && "expected pointer to function"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 7100e62e1d12..4d45b22af275 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2842,7 +2842,7 @@ RValue CIRGenFunction::buildCXXMemberCallExpr(const CXXMemberCallExpr *CE, const Expr *callee = CE->getCallee()->IgnoreParens(); if (isa(callee)) - llvm_unreachable("NYI"); + return buildCXXMemberPointerCallExpr(CE, ReturnValue); const auto *ME = cast(callee); const auto *MD = cast(ME->getMemberDecl()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 07a6c01672e1..73e7191cf0ad 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -105,6 +105,53 @@ static CXXRecordDecl *getCXXRecord(const Expr *E) { return cast(Ty->getDecl()); } +RValue +CIRGenFunction::buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, + ReturnValueSlot ReturnValue) { + const BinaryOperator *BO = + cast(E->getCallee()->IgnoreParens()); + const Expr *BaseExpr = BO->getLHS(); + const Expr *MemFnExpr = BO->getRHS(); + + const auto *MPT = MemFnExpr->getType()->castAs(); + const auto *FPT = MPT->getPointeeType()->castAs(); + const auto *RD = + cast(MPT->getClass()->castAs()->getDecl()); + + // Emit the 'this' pointer. + Address This = Address::invalid(); + if (BO->getOpcode() == BO_PtrMemI) + This = buildPointerWithAlignment(BaseExpr, nullptr, KnownNonNull); + else + This = buildLValue(BaseExpr).getAddress(); + + buildTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(), + QualType(MPT->getClass(), 0)); + + // Get the member function pointer. + mlir::Value MemFnPtr = buildScalarExpr(MemFnExpr); + + // Resolve the member function pointer to the actual callee and adjust the + // "this" pointer for call. + auto Loc = getLoc(E->getExprLoc()); + auto [CalleePtr, AdjustedThis] = + builder.createGetMethod(Loc, MemFnPtr, This.getPointer()); + + // Prepare the call arguments. + CallArgList ArgsList; + ArgsList.add(RValue::get(AdjustedThis), getContext().VoidPtrTy); + buildCallArgs(ArgsList, FPT, E->arguments()); + + RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1); + + // Build the call. + CIRGenCallee Callee(FPT, CalleePtr.getDefiningOp()); + return buildCall(CGM.getTypes().arrangeCXXMethodCall(ArgsList, FPT, required, + /*PrefixSize=*/0), + Callee, ReturnValue, ArgsList, nullptr, E == MustTailCall, + Loc); +} + RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 636b1af1b579..1c8c7502f60e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// #include "Address.h" +#include "CIRGenCXXABI.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" @@ -1890,9 +1891,16 @@ mlir::Value CIRGenModule::buildMemberPointerConstant(const UnaryOperator *E) { const auto *decl = cast(E->getSubExpr())->getDecl(); // A member function pointer. - // Member function pointer is not supported yet. - if (const auto *methodDecl = dyn_cast(decl)) - assert(0 && "not implemented"); + if (const auto *methodDecl = dyn_cast(decl)) { + auto ty = mlir::cast(getCIRType(E->getType())); + if (methodDecl->isVirtual()) + return builder.create( + loc, ty, getCXXABI().buildVirtualMethodAttr(ty, methodDecl)); + + auto methodFuncOp = GetAddrOfFunction(methodDecl); + return builder.create( + loc, ty, builder.getMethodAttr(ty, methodFuncOp)); + } auto ty = mlir::cast(getCIRType(E->getType())); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 7933f2f8603d..aed756f71639 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1680,7 +1680,10 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { assert(!MissingFeatures::cxxABI()); const MemberPointerType *MPT = CE->getType()->getAs(); - assert(!MPT->isMemberFunctionPointerType() && "NYI"); + if (MPT->isMemberFunctionPointerType()) { + auto Ty = mlir::cast(CGF.getCIRType(DestTy)); + return Builder.getNullMethodPtr(Ty, CGF.getLoc(E->getExprLoc())); + } auto Ty = mlir::cast(CGF.getCIRType(DestTy)); return Builder.getNullDataMemberPtr(Ty, CGF.getLoc(E->getExprLoc())); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index a3ca8857e8da..c411b86ac067 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -622,6 +622,8 @@ class CIRGenFunction : public CIRGenTypeCache { RValue buildCXXMemberCallExpr(const clang::CXXMemberCallExpr *E, ReturnValueSlot ReturnValue); + RValue buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, + ReturnValueSlot ReturnValue); RValue buildCXXMemberOrOperatorMemberCallExpr( const clang::CallExpr *CE, const clang::CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 260d1c24baa7..a0f3363dd6d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -295,6 +295,10 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { mlir::cir::PointerType DestCIRTy, bool isRefCast, Address Src) override; + mlir::cir::MethodAttr + buildVirtualMethodAttr(mlir::cir::MethodType MethodTy, + const CXXMethodDecl *MD) override; + /**************************** RTTI Uniqueness ******************************/ protected: /// Returns true if the ABI requires RTTI type_info objects to be unique @@ -2486,3 +2490,23 @@ mlir::Value CIRGenItaniumCXXABI::buildDynamicCast( return CGF.getBuilder().createDynCast(Loc, Src.getPointer(), DestCIRTy, isRefCast, castInfo); } + +mlir::cir::MethodAttr +CIRGenItaniumCXXABI::buildVirtualMethodAttr(mlir::cir::MethodType MethodTy, + const CXXMethodDecl *MD) { + assert(MD->isVirtual() && "only deal with virtual member functions"); + + uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD); + uint64_t VTableOffset; + if (CGM.getItaniumVTableContext().isRelativeLayout()) { + // Multiply by 4-byte relative offsets. + VTableOffset = Index * 4; + } else { + const ASTContext &Context = getContext(); + CharUnits PointerWidth = Context.toCharUnitsFromBits( + Context.getTargetInfo().getPointerWidth(LangAS::Default)); + VTableOffset = Index * PointerWidth.getQuantity(); + } + + return mlir::cir::MethodAttr::get(MethodTy, VTableOffset); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 541e15a0930e..ab2e4129ac60 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -712,13 +712,18 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case Type::MemberPointer: { const auto *MPT = cast(Ty); - assert(MPT->isMemberDataPointer() && "ptr-to-member-function is NYI"); auto memberTy = ConvertType(MPT->getPointeeType()); auto clsTy = mlir::cast( ConvertType(QualType(MPT->getClass(), 0))); - ResultType = - mlir::cir::DataMemberType::get(Builder.getContext(), memberTy, clsTy); + if (MPT->isMemberDataPointer()) + ResultType = + mlir::cir::DataMemberType::get(Builder.getContext(), memberTy, clsTy); + else { + auto memberFuncTy = mlir::cast(memberTy); + ResultType = + mlir::cir::MethodType::get(Builder.getContext(), memberFuncTy, clsTy); + } break; } diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index e4675345a00f..d11ebb1c0c2f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -474,6 +474,79 @@ DataMemberAttr::verify(function_ref emitError, return success(); } +//===----------------------------------------------------------------------===// +// MethodAttr definitions +//===----------------------------------------------------------------------===// + +LogicalResult +MethodAttr::verify(function_ref<::mlir::InFlightDiagnostic()> emitError, + mlir::cir::MethodType type, + std::optional symbol, + std::optional vtable_offset) { + if (symbol.has_value() && vtable_offset.has_value()) { + emitError() << "at most one of symbol and vtable_offset can be present " + "in #cir.method"; + return failure(); + } + + return success(); +} + +Attribute MethodAttr::parse(AsmParser &parser, Type odsType) { + auto ty = mlir::cast(odsType); + + if (parser.parseLess()) + return {}; + + // Try to parse the null pointer constant. + if (parser.parseOptionalKeyword("null").succeeded()) { + if (parser.parseGreater()) + return {}; + return get(ty); + } + + // Try to parse a flat symbol ref for a pointer to non-virtual member + // function. + FlatSymbolRefAttr symbol; + auto parseSymbolRefResult = parser.parseOptionalAttribute(symbol); + if (parseSymbolRefResult.has_value()) { + if (parseSymbolRefResult.value().failed()) + return {}; + if (parser.parseGreater()) + return {}; + return get(ty, symbol); + } + + // Parse a uint64 that represents the vtable offset. + std::uint64_t vtableOffset = 0; + if (parser.parseKeyword("vtable_offset")) + return {}; + if (parser.parseEqual()) + return {}; + if (parser.parseInteger(vtableOffset)) + return {}; + + if (parser.parseGreater()) + return {}; + + return get(ty, vtableOffset); +} + +void MethodAttr::print(AsmPrinter &printer) const { + auto symbol = getSymbol(); + auto vtableOffset = getVtableOffset(); + + printer << '<'; + if (symbol.has_value()) { + printer << *symbol; + } else if (vtableOffset.has_value()) { + printer << "vtable_offset = " << *vtableOffset; + } else { + printer << "null"; + } + printer << '>'; +} + //===----------------------------------------------------------------------===// // DynamicCastInfoAtttr definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 807616655829..bd32e78fd4f4 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -375,7 +375,7 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return op->emitOpError("nullptr expects pointer type"); } - if (isa(attrType)) { + if (isa(attrType)) { // More detailed type verifications are already done in // DataMemberAttr::verify. Don't need to repeat here. return success(); @@ -3483,6 +3483,61 @@ LogicalResult GetRuntimeMemberOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// GetMethodOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult GetMethodOp::verify() { + auto methodTy = getMethod().getType(); + + // Assume objectTy is !cir.ptr + auto objectPtrTy = mlir::cast(getObject().getType()); + auto objectTy = objectPtrTy.getPointee(); + + if (methodTy.getClsTy() != objectTy) { + emitError() << "method class type and object type do not match"; + return mlir::failure(); + } + + // Assume methodFuncTy is !cir.func + auto calleePtrTy = mlir::cast(getCallee().getType()); + auto calleeTy = mlir::cast(calleePtrTy.getPointee()); + auto methodFuncTy = methodTy.getMemberFuncTy(); + + // We verify at here that calleeTy is !cir.func, !Args)> + // Note that the first parameter type of the callee is !cir.ptr instead + // of !cir.ptr because the "this" pointer may be adjusted before calling + // the callee. + + if (methodFuncTy.getReturnType() != calleeTy.getReturnType()) { + emitError() << "method return type and callee return type do not match"; + return mlir::failure(); + } + + auto calleeArgsTy = calleeTy.getInputs(); + auto methodFuncArgsTy = methodFuncTy.getInputs(); + + if (calleeArgsTy.empty()) { + emitError() << "callee parameter list lacks receiver object ptr"; + return mlir::failure(); + } + + auto calleeThisArgPtrTy = + mlir::dyn_cast(calleeArgsTy[0]); + if (!calleeThisArgPtrTy || + !mlir::isa(calleeThisArgPtrTy.getPointee())) { + emitError() << "the first parameter of callee must be a void pointer"; + return mlir::failure(); + } + + if (calleeArgsTy.slice(1) != methodFuncArgsTy) { + emitError() << "callee parameters and method parameters do not match"; + return mlir::failure(); + } + + return mlir::success(); +} + //===----------------------------------------------------------------------===// // InlineAsmOp Definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index c83c4a6f1109..c37fe2788020 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -932,6 +932,39 @@ llvm::ArrayRef FuncType::getReturnTypes() const { bool FuncType::isVoid() const { return mlir::isa(getReturnType()); } +//===----------------------------------------------------------------------===// +// MethodType Definitions +//===----------------------------------------------------------------------===// + +static mlir::Type getMethodLayoutType(mlir::MLIRContext *ctx) { + // With Itanium ABI, member function pointers have the same layout as the + // following struct: struct { fnptr_t, ptrdiff_t }, where fnptr_t is a + // function pointer type. + // TODO: consider member function pointer layout in other ABIs + auto voidPtrTy = mlir::cir::PointerType::get(mlir::cir::VoidType::get(ctx)); + mlir::Type fields[2]{voidPtrTy, voidPtrTy}; + return mlir::cir::StructType::get(ctx, fields, /*packed=*/false, + mlir::cir::StructType::Struct); +} + +llvm::TypeSize +MethodType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return dataLayout.getTypeSizeInBits(getMethodLayoutType(getContext())); +} + +uint64_t +MethodType::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return dataLayout.getTypeSizeInBits(getMethodLayoutType(getContext())); +} + +uint64_t +MethodType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return dataLayout.getTypeSizeInBits(getMethodLayoutType(getContext())); +} + //===----------------------------------------------------------------------===// // PointerType Definitions //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/pointer-to-member-func.cpp b/clang/test/CIR/CodeGen/pointer-to-member-func.cpp new file mode 100644 index 000000000000..6d226a6875c2 --- /dev/null +++ b/clang/test/CIR/CodeGen/pointer-to-member-func.cpp @@ -0,0 +1,42 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Foo { + void m1(int); + virtual void m2(int); + virtual void m3(int); +}; + +auto make_non_virtual() -> void (Foo::*)(int) { + return &Foo::m1; +} + +// CHECK-LABEL: cir.func @_Z16make_non_virtualv() -> !cir.method in !ty_22Foo22> +// CHECK: %{{.+}} = cir.const #cir.method<@_ZN3Foo2m1Ei> : !cir.method in !ty_22Foo22> +// CHECK: } + +auto make_virtual() -> void (Foo::*)(int) { + return &Foo::m3; +} + +// CHECK-LABEL: cir.func @_Z12make_virtualv() -> !cir.method in !ty_22Foo22> +// CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_22Foo22> +// CHECK: } + +auto make_null() -> void (Foo::*)(int) { + return nullptr; +} + +// CHECK-LABEL: cir.func @_Z9make_nullv() -> !cir.method in !ty_22Foo22> +// CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_22Foo22> +// CHECK: } + +void call(Foo *obj, void (Foo::*func)(int), int arg) { + (obj->*func)(arg); +} + +// CHECK-LABEL: cir.func @_Z4callP3FooMS_FviEi +// CHECK: %[[CALLEE:.+]], %[[THIS:.+]] = cir.get_method %{{.+}}, %{{.+}} : (!cir.method in !ty_22Foo22>, !cir.ptr) -> (!cir.ptr, !s32i)>>, !cir.ptr) +// CHECK-NEXT: %[[#ARG:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CHECK-NEXT: cir.call %[[CALLEE]](%[[THIS]], %[[#ARG]]) : (!cir.ptr, !s32i)>>, !cir.ptr, !s32i) -> () +// CHECK: } From 9053d41c0ad832f71c5c74e7e6d7a00fb1bdf1bd Mon Sep 17 00:00:00 2001 From: 566hub <134911144+566hub@users.noreply.github.com> Date: Wed, 21 Aug 2024 07:01:00 +0800 Subject: [PATCH 1776/2301] [CIR][CIRGen] Achieve union's bitfields additionally. (#742) Achieve union's bitfields additionally. --- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 17 +++++----- clang/test/CIR/CodeGen/bitfield-union.c | 32 +++++++++++++++++++ 2 files changed, 41 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/bitfield-union.c diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 958b23a8950f..1e58ee4ff198 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -271,12 +271,6 @@ void CIRRecordLowering::lower(bool nonVirtualBaseType) { CharUnits Size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize() : astRecordLayout.getSize(); - if (recordDecl->isUnion()) { - llvm_unreachable("NYI"); - // lowerUnion(); - // computeVolatileBitfields(); - return; - } accumulateFields(); // RD implies C++ @@ -316,13 +310,20 @@ void CIRRecordLowering::lowerUnion() { // type would work fine and be simpler but would be different than what we've // been doing and cause lit tests to change. for (const auto *Field : recordDecl->fields()) { + + mlir::Type FieldType = nullptr; if (Field->isBitField()) { if (Field->isZeroLengthBitField()) continue; - llvm_unreachable("NYI"); + + FieldType = getBitfieldStorageType(Field->getBitWidthValue()); + + setBitFieldInfo(Field, CharUnits::Zero(), FieldType); + } else { + FieldType = getStorageType(Field); } fields[Field->getCanonicalDecl()] = 0; - auto FieldType = getStorageType(Field); + // auto FieldType = getStorageType(Field); // Compute zero-initializable status. // This union might not be zero initialized: it may contain a pointer to // data member which might have some exotic initialization sequence. diff --git a/clang/test/CIR/CodeGen/bitfield-union.c b/clang/test/CIR/CodeGen/bitfield-union.c new file mode 100644 index 000000000000..96b12e367bc9 --- /dev/null +++ b/clang/test/CIR/CodeGen/bitfield-union.c @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void main() { + union demo { + int x; + int y : 4; + int z : 8; + }; + union demo d; + d.x = 1; + d.y = 2; + d.z = 0; +} + +// CHECK: !ty_22demo22 = !cir.struct +// CHECK: #bfi_y = #cir.bitfield_info +// CHECK: #bfi_z = #cir.bitfield_info + +// cir.func no_proto @main() extra(#fn_attr) { +// %0 = cir.alloca !ty_22demo22, !cir.ptr, ["d"] {alignment = 4 : i64} +// %1 = cir.const #cir.int<1> : !s32i +// %2 = cir.get_member %0[0] {name = "x"} : !cir.ptr -> !cir.ptr +// cir.store %1, %2 : !s32i, !cir.ptr +// %3 = cir.const #cir.int<2> : !s32i +// %4 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// %5 = cir.set_bitfield(#bfi_y, %4 : !cir.ptr, %3 : !s32i) -> !s32i +// %6 = cir.const #cir.int<0> : !s32i loc(#loc10) +// %7 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// %8 = cir.set_bitfield(#bfi_z, %7 : !cir.ptr, %6 : !s32i) -> !s32i +// cir.return +// } From a401af3843cb540c739ede3cfe6add6d25108aac Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 20 Aug 2024 17:22:13 -0700 Subject: [PATCH 1777/2301] [CIR] Cleanup most recent warnings --- clang/lib/CIR/CodeGen/CIRGenBuilder.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 +++-- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 2 -- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp index cc2a3aee0ab4..6fda24084658 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp @@ -65,4 +65,4 @@ mlir::cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, return create(loc, intTy, mlir::cir::IntAttr::get(t, C)); } -}; // namespace cir +} // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index f12c6c94fdca..5a49163ac1af 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -881,9 +881,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { Offset %= EltSize; } else if (auto StructTy = mlir::dyn_cast(Ty)) { auto Elts = StructTy.getMembers(); - unsigned Pos = 0; + int64_t Pos = 0; for (size_t I = 0; I < Elts.size(); ++I) { - auto EltSize = Layout.getTypeAllocSize(Elts[I]); + int64_t EltSize = + (int64_t)Layout.getTypeAllocSize(Elts[I]).getFixedValue(); unsigned AlignMask = Layout.getABITypeAlign(Elts[I]).value() - 1; Pos = (Pos + AlignMask) & ~AlignMask; if (Offset < Pos + EltSize) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 73e7191cf0ad..d1be80d1f968 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -115,8 +115,6 @@ CIRGenFunction::buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, const auto *MPT = MemFnExpr->getType()->castAs(); const auto *FPT = MPT->getPointeeType()->castAs(); - const auto *RD = - cast(MPT->getClass()->castAs()->getDecl()); // Emit the 'this' pointer. Address This = Address::invalid(); From 1ab8276695fb62f357e40bc133c75cff7485e488 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 20 Aug 2024 17:33:33 -0700 Subject: [PATCH 1778/2301] [CIR] More warning fixed by another fresh build --- clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp | 3 --- .../Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp | 2 +- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 ++++ 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index ca6970f04830..d968afc4c535 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -385,9 +385,6 @@ void LoweringPreparePass::lowerUnaryOp(UnaryOp op) { resultImag = builder.createUnaryOp(loc, mlir::cir::UnaryOpKind::Minus, operandImag); break; - - default: - llvm_unreachable("unsupported complex unary op kind"); } auto result = builder.createComplexCreate(loc, resultReal, resultImag); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index 16dccb27257d..ea8ef6f28144 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -29,7 +29,7 @@ namespace { /// Keeps track of which empty subobjects exist at different offsets while /// laying out a C++ class. class EmptySubobjectMap { - const CIRLowerContext &Context; + [[maybe_unused]] const CIRLowerContext &Context; uint64_t CharWidth; /// The class whose empty entries we're keeping track of. diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3bcf1235f505..a3393edfd72f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -824,6 +824,10 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { castOp, llvmDstTy, llvmSrcVal); break; } + default: { + return castOp.emitError("Unhandled cast kind: ") + << castOp.getKindAttrName(); + } } return mlir::success(); From 3637ba00c5598e9189914bc65aa097cb3970d5a4 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 22 Aug 2024 00:19:02 +0800 Subject: [PATCH 1779/2301] [CIR] Add select operation (#796) This PR adds a new `cir.select` operation. This operation won't be generated directly by CIRGen but it is useful during further CIR to CIR transformations. This PR addresses #785 . --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 18 ++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 40 +++++++++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 13 +++ .../CIR/Dialect/Transforms/CIRSimplify.cpp | 2 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 82 +++++++++++++++---- clang/test/CIR/Lowering/select.cir | 50 +++++++++++ clang/test/CIR/Transforms/select.cir | 26 ++++++ 7 files changed, 216 insertions(+), 15 deletions(-) create mode 100644 clang/test/CIR/Lowering/select.cir create mode 100644 clang/test/CIR/Transforms/select.cir diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index d2fc105f502d..a458547d330d 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -274,6 +274,24 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createBinop(lhs, mlir::cir::BinOpKind::Mul, val); } + mlir::Value createSelect(mlir::Location loc, mlir::Value condition, + mlir::Value trueValue, mlir::Value falseValue) { + assert(trueValue.getType() == falseValue.getType() && + "trueValue and falseValue should have the same type"); + return create(loc, trueValue.getType(), condition, + trueValue, falseValue); + } + + mlir::Value createLogicalAnd(mlir::Location loc, mlir::Value lhs, + mlir::Value rhs) { + return createSelect(loc, lhs, rhs, getBool(false, loc)); + } + + mlir::Value createLogicalOr(mlir::Location loc, mlir::Value lhs, + mlir::Value rhs) { + return createSelect(loc, lhs, getBool(true, loc), rhs); + } + mlir::Value createComplexCreate(mlir::Location loc, mlir::Value real, mlir::Value imag) { auto resultComplexTy = diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e17e6b583d9f..15d8cbb883e7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -761,6 +761,46 @@ def TernaryOp : CIR_Op<"ternary", }]; } +//===----------------------------------------------------------------------===// +// SelectOp +//===----------------------------------------------------------------------===// + +def SelectOp : CIR_Op<"select", [Pure, + AllTypesMatch<["true_value", "false_value", "result"]>]> { + let summary = "Yield one of two values based on a boolean value"; + let description = [{ + The `cir.select` operation takes three operands. The first operand + `condition` is a boolean value of type `!cir.bool`. The second and the third + operand can be of any CIR types, but their types must be the same. If the + first operand is `true`, the operation yields its second operand. Otherwise, + the operation yields its third operand. + + Example: + + ```mlir + %0 = cir.const #cir.bool : !cir.bool + %1 = cir.const #cir.int<42> : !s32i + %2 = cir.const #cir.int<72> : !s32i + %3 = cir.select if %0 then %1 else %2 : (!cir.bool, !s32i, !s32i) -> !s32i + ``` + }]; + + let arguments = (ins CIR_BoolType:$condition, CIR_AnyType:$true_value, + CIR_AnyType:$false_value); + let results = (outs CIR_AnyType:$result); + + let assemblyFormat = [{ + `if` $condition `then` $true_value `else` $false_value + `:` `(` + qualified(type($condition)) `,` + qualified(type($true_value)) `,` + qualified(type($false_value)) + `)` `->` qualified(type($result)) attr-dict + }]; + + let hasFolder = 1; +} + //===----------------------------------------------------------------------===// // ConditionOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index bd32e78fd4f4..7ae22e411306 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1382,6 +1382,19 @@ void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, result.addTypes(TypeRange{yield.getOperandTypes().front()}); } +//===----------------------------------------------------------------------===// +// SelectOp +//===----------------------------------------------------------------------===// + +OpFoldResult SelectOp::fold(FoldAdaptor adaptor) { + auto condition = adaptor.getCondition(); + if (!condition) + return nullptr; + + auto conditionValue = mlir::cast(condition).getValue(); + return conditionValue ? getTrueValue() : getFalseValue(); +} + //===----------------------------------------------------------------------===// // BrOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp index 07fe0e6b5594..d73420c21790 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp @@ -146,7 +146,7 @@ void CIRSimplifyPass::runOnOperation() { getOperation()->walk([&](Operation *op) { // CastOp here is to perform a manual `fold` in // applyOpPatternsAndFold - if (isa(op)) ops.push_back(op); }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index a3393edfd72f..87b1c909ba7f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2988,6 +2988,60 @@ class CIRRotateOpLowering } }; +class CIRSelectOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::SelectOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto getConstantBool = [](mlir::Value value) -> std::optional { + auto definingOp = mlir::dyn_cast_if_present( + value.getDefiningOp()); + if (!definingOp) + return std::nullopt; + + auto constValue = + mlir::dyn_cast(definingOp.getValue()); + if (!constValue) + return std::nullopt; + + return constValue.getValue(); + }; + + // Two special cases in the LLVMIR codegen of select op: + // - select %0, %1, false => and %0, %1 + // - select %0, true, %1 => or %0, %1 + auto trueValue = op.getTrueValue(); + auto falseValue = op.getFalseValue(); + if (mlir::isa(trueValue.getType())) { + if (std::optional falseValueBool = getConstantBool(falseValue); + falseValueBool.has_value() && !*falseValueBool) { + // select %0, %1, false => and %0, %1 + rewriter.replaceOpWithNewOp( + op, adaptor.getCondition(), adaptor.getTrueValue()); + return mlir::success(); + } + if (std::optional trueValueBool = getConstantBool(trueValue); + trueValueBool.has_value() && *trueValueBool) { + // select %0, true, %1 => or %0, %1 + rewriter.replaceOpWithNewOp( + op, adaptor.getCondition(), adaptor.getFalseValue()); + return mlir::success(); + } + } + + auto llvmCondition = rewriter.create( + op.getLoc(), mlir::IntegerType::get(op->getContext(), 1), + adaptor.getCondition()); + rewriter.replaceOpWithNewOp( + op, llvmCondition, adaptor.getTrueValue(), adaptor.getFalseValue()); + + return mlir::success(); + } +}; + class CIRBrOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -3836,20 +3890,20 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, patterns.add(patterns.getContext()); patterns.add(converter, dataLayout, patterns.getContext()); patterns.add< - CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, - CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, - CIRBitPopcountOpLowering, CIRAtomicCmpXchgLowering, CIRAtomicXchgLowering, - CIRAtomicFetchLowering, CIRByteswapOpLowering, CIRRotateOpLowering, - CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, - CIRTryCallLowering, CIREhInflightOpLowering, CIRUnaryOpLowering, - CIRBinOpLowering, CIRBinOpOverflowOpLowering, CIRShiftOpLowering, - CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRFuncLowering, - CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, - CIRComplexCreateOpLowering, CIRComplexRealOpLowering, - CIRComplexImagOpLowering, CIRComplexRealPtrOpLowering, - CIRComplexImagPtrOpLowering, CIRVAStartLowering, CIRVAEndLowering, - CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, - CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, + CIRCmpOpLowering, CIRSelectOpLowering, CIRBitClrsbOpLowering, + CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, + CIRBitParityOpLowering, CIRBitPopcountOpLowering, + CIRAtomicCmpXchgLowering, CIRAtomicXchgLowering, CIRAtomicFetchLowering, + CIRByteswapOpLowering, CIRRotateOpLowering, CIRBrCondOpLowering, + CIRPtrStrideOpLowering, CIRCallLowering, CIRTryCallLowering, + CIREhInflightOpLowering, CIRUnaryOpLowering, CIRBinOpLowering, + CIRBinOpOverflowOpLowering, CIRShiftOpLowering, CIRLoadLowering, + CIRConstantLowering, CIRStoreLowering, CIRFuncLowering, CIRCastOpLowering, + CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRComplexCreateOpLowering, + CIRComplexRealOpLowering, CIRComplexImagOpLowering, + CIRComplexRealPtrOpLowering, CIRComplexImagPtrOpLowering, + CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, + CIRBrOpLowering, CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, diff --git a/clang/test/CIR/Lowering/select.cir b/clang/test/CIR/Lowering/select.cir new file mode 100644 index 000000000000..1836210d6a7c --- /dev/null +++ b/clang/test/CIR/Lowering/select.cir @@ -0,0 +1,50 @@ +// RUN: cir-translate -cir-to-llvmir -o %t.ll %s +// RUN: FileCheck --input-file=%t.ll -check-prefix=LLVM %s + +!s32i = !cir.int + +module { + cir.func @select_int(%arg0 : !cir.bool, %arg1 : !s32i, %arg2 : !s32i) -> !s32i { + %0 = cir.select if %arg0 then %arg1 else %arg2 : (!cir.bool, !s32i, !s32i) -> !s32i + cir.return %0 : !s32i + } + + // LLVM: define i32 @select_int(i8 %[[#COND:]], i32 %[[#TV:]], i32 %[[#FV:]]) + // LLVM-NEXT: %[[#CONDF:]] = trunc i8 %[[#COND]] to i1 + // LLVM-NEXT: %[[#RES:]] = select i1 %[[#CONDF]], i32 %[[#TV]], i32 %[[#FV]] + // LLVM-NEXT: ret i32 %[[#RES]] + // LLVM-NEXT: } + + cir.func @select_bool(%arg0 : !cir.bool, %arg1 : !cir.bool, %arg2 : !cir.bool) -> !cir.bool { + %0 = cir.select if %arg0 then %arg1 else %arg2 : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool + cir.return %0 : !cir.bool + } + + // LLVM: define i8 @select_bool(i8 %[[#COND:]], i8 %[[#TV:]], i8 %[[#FV:]]) + // LLVM-NEXT: %[[#CONDF:]] = trunc i8 %[[#COND]] to i1 + // LLVM-NEXT: %[[#RES:]] = select i1 %[[#CONDF]], i8 %[[#TV]], i8 %[[#FV]] + // LLVM-NEXT: ret i8 %[[#RES]] + // LLVM-NEXT: } + + cir.func @logical_and(%arg0 : !cir.bool, %arg1 : !cir.bool) -> !cir.bool { + %0 = cir.const #cir.bool : !cir.bool + %1 = cir.select if %arg0 then %arg1 else %0 : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool + cir.return %1 : !cir.bool + } + + // LLVM: define i8 @logical_and(i8 %[[#ARG0:]], i8 %[[#ARG1:]]) + // LLVM-NEXT: %[[#RES:]] = and i8 %[[#ARG0]], %[[#ARG1]] + // LLVM-NEXT: ret i8 %[[#RES]] + // LLVM-NEXT: } + + cir.func @logical_or(%arg0 : !cir.bool, %arg1 : !cir.bool) -> !cir.bool { + %0 = cir.const #cir.bool : !cir.bool + %1 = cir.select if %arg0 then %0 else %arg1 : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool + cir.return %1 : !cir.bool + } + + // LLVM: define i8 @logical_or(i8 %[[#ARG0:]], i8 %[[#ARG1:]]) + // LLVM-NEXT: %[[#RES:]] = or i8 %[[#ARG0]], %[[#ARG1]] + // LLVM-NEXT: ret i8 %[[#RES]] + // LLVM-NEXT: } +} diff --git a/clang/test/CIR/Transforms/select.cir b/clang/test/CIR/Transforms/select.cir new file mode 100644 index 000000000000..c3db14daaf4e --- /dev/null +++ b/clang/test/CIR/Transforms/select.cir @@ -0,0 +1,26 @@ +// RUN: cir-opt --canonicalize -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir %s + +!s32i = !cir.int + +module { + cir.func @fold_true(%arg0 : !s32i, %arg1 : !s32i) -> !s32i { + %0 = cir.const #cir.bool : !cir.bool + %1 = cir.select if %0 then %arg0 else %arg1 : (!cir.bool, !s32i, !s32i) -> !s32i + cir.return %1 : !s32i + } + + // CHECK: cir.func @fold_true(%[[ARG0:.+]]: !s32i, %[[ARG1:.+]]: !s32i) -> !s32i { + // CHECK-NEXT: cir.return %[[ARG0]] : !s32i + // CHECK-NEXT: } + + cir.func @fold_false(%arg0 : !s32i, %arg1 : !s32i) -> !s32i { + %0 = cir.const #cir.bool : !cir.bool + %1 = cir.select if %0 then %arg0 else %arg1 : (!cir.bool, !s32i, !s32i) -> !s32i + cir.return %1 : !s32i + } + + // CHECK: cir.func @fold_false(%[[ARG0:.+]]: !s32i, %[[ARG1:.+]]: !s32i) -> !s32i { + // CHECK-NEXT: cir.return %[[ARG1]] : !s32i + // CHECK-NEXT: } +} From 7892507d15b31f3c58a7f5cd35434d2a4b79f5af Mon Sep 17 00:00:00 2001 From: Ivan Murashko Date: Thu, 22 Aug 2024 22:01:01 +0100 Subject: [PATCH 1780/2301] [CIR][CIRGen] Inline variables processing (#794) There is an implementation for inline variables processing at CIR. The LIT test was taken from clang's cxx1z-inline-variables.cpp where the same functionality is tested for Clang Code generation. The test can be run as follows ``` bin/llvm-lit -v ../clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp ``` Note: the pull request also contains a formatting change for two files: - `clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp` - `clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp` --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 11 +++-- clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp | 2 +- .../Transforms/TargetLowering/Targets/X86.cpp | 5 ++- .../CIR/CodeGen/cxx1z-inline-variables.cpp | 44 +++++++++++++++++++ 4 files changed, 53 insertions(+), 9 deletions(-) create mode 100644 clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 21792ce41e25..c294f1f42bfe 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -511,12 +511,11 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { assert(0 && "OMPDeclareTargetDeclAttr NYI"); } } - // If this declaration may have caused an inline variable definition - // to change linkage, make sure that it's emitted. - // TODO(cir): probably use GetAddrOfGlobalVar(VD) below? - assert((astCtx.getInlineVariableDefinitionKind(VD) != - ASTContext::InlineVariableDefinitionKind::Strong) && - "not implemented"); + // If this declaration may have caused an inline variable definition to + // change linkage, make sure that it's emitted. + if (astCtx.getInlineVariableDefinitionKind(VD) == + ASTContext::InlineVariableDefinitionKind::Strong) + getAddrOfGlobalVar(VD); return; } } diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp index 12948d0eeb2d..c030cd2f352f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp @@ -185,7 +185,7 @@ llvm::Align CIRDataLayout::getAlignment(mlir::Type Ty, bool abiOrPref) const { // Fetch type alignment from MLIR's data layout. unsigned align = abiOrPref ? layout.getTypeABIAlignment(Ty) - : layout.getTypePreferredAlignment(Ty); + : layout.getTypePreferredAlignment(Ty); return llvm::Align(align); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index f18f638660c4..38501f7c3124 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -235,8 +235,9 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, } else if (isa(Ty)) { // FIXME(cir): Clang's BuiltinType::Kind allow comparisons (GT, LT, etc). - // We should implement this in CIR to simplify the conditions below. Hence, - // Comparisons below might not be truly equivalent to the ones in Clang. + // We should implement this in CIR to simplify the conditions below. + // Hence, Comparisons below might not be truly equivalent to the ones in + // Clang. if (isa(Ty)) { Current = Class::Integer; } diff --git a/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp b/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp new file mode 100644 index 000000000000..dc0e95dca555 --- /dev/null +++ b/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp @@ -0,0 +1,44 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck -check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// For compatibility with C++11 and C++14, an out-of-line declaration of a +// static constexpr local variable promotes the variable to weak_odr. +struct compat { + static constexpr int a = 1; + static constexpr int b = 2; + static constexpr int c = 3; + static inline constexpr int d = 4; + static const int e = 5; + static const int f = 6; + static const int g = 7; +}; +const int &compat_use_before_redecl = compat::b; +const int compat::a; +const int compat::b; +const int compat::c; +const int compat::d; +const int compat::e; +constexpr int compat::f; +constexpr inline int compat::g; +const int &compat_use_after_redecl1 = compat::c; +const int &compat_use_after_redecl2 = compat::d; +const int &compat_use_after_redecl3 = compat::g; + +// CIR: cir.global weak_odr @_ZN6compat1bE = #cir.int<2> : !s32i +// CIR: cir.global weak_odr @_ZN6compat1aE = #cir.int<1> : !s32i +// CIR: cir.global weak_odr @_ZN6compat1cE = #cir.int<3> : !s32i +// CIR: cir.global external @_ZN6compat1eE = #cir.int<5> : !s32i +// CIR: cir.global weak_odr @_ZN6compat1fE = #cir.int<6> : !s32i +// CIR: cir.global linkonce_odr @_ZN6compat1dE = #cir.int<4> : !s32i +// CIR: cir.global linkonce_odr @_ZN6compat1gE = #cir.int<7> : !s32i + +// LLVM: @_ZN6compat1bE = weak_odr global i32 2 +// LLVM: @_ZN6compat1aE = weak_odr global i32 1 +// LLVM: @_ZN6compat1cE = weak_odr global i32 3 +// LLVM: @_ZN6compat1eE = global i32 5 +// LLVM: @_ZN6compat1fE = weak_odr global i32 6 +// LLVM: @_ZN6compat1dE = linkonce_odr global i32 4 +// LLVM: @_ZN6compat1gE = linkonce_odr global i32 7 + From 8ce32ff606628f59f60700524e046de8108c397b Mon Sep 17 00:00:00 2001 From: Congcong Cai Date: Fri, 23 Aug 2024 05:09:38 +0800 Subject: [PATCH 1781/2301] [CIR] Incorrect global view index and offset when neg index (#795) mixed signed and unsigned integer operator cause difference result when index of array is negative --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 25 ++++++++++++------- .../CIR/CodeGen/globals-neg-index-array.c | 20 +++++++++++++++ 2 files changed, 36 insertions(+), 9 deletions(-) create mode 100644 clang/test/CIR/CodeGen/globals-neg-index-array.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 5a49163ac1af..9710925029c4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -40,6 +40,7 @@ #include #include #include +#include namespace cir { @@ -869,16 +870,21 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Type SubType; + auto getIndexAndNewOffset = + [](int64_t Offset, int64_t EltSize) -> std::pair { + int64_t DivRet = Offset / EltSize; + if (DivRet < 0) + DivRet -= 1; // make sure offset is positive + int64_t ModRet = Offset - (DivRet * EltSize); + return {DivRet, ModRet}; + }; + if (auto ArrayTy = mlir::dyn_cast(Ty)) { - auto EltSize = Layout.getTypeAllocSize(ArrayTy.getEltType()); - Indices.push_back(Offset / EltSize); + int64_t EltSize = Layout.getTypeAllocSize(ArrayTy.getEltType()); SubType = ArrayTy.getEltType(); - Offset %= EltSize; - } else if (auto PtrTy = mlir::dyn_cast(Ty)) { - auto EltSize = Layout.getTypeAllocSize(PtrTy.getPointee()); - Indices.push_back(Offset / EltSize); - SubType = PtrTy.getPointee(); - Offset %= EltSize; + auto const [Index, NewOffset] = getIndexAndNewOffset(Offset, EltSize); + Indices.push_back(Index); + Offset = NewOffset; } else if (auto StructTy = mlir::dyn_cast(Ty)) { auto Elts = StructTy.getMembers(); int64_t Pos = 0; @@ -887,7 +893,8 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { (int64_t)Layout.getTypeAllocSize(Elts[I]).getFixedValue(); unsigned AlignMask = Layout.getABITypeAlign(Elts[I]).value() - 1; Pos = (Pos + AlignMask) & ~AlignMask; - if (Offset < Pos + EltSize) { + assert(Offset >= 0); + if (static_cast(Offset) < Pos + EltSize) { Indices.push_back(I); SubType = Elts[I]; Offset -= Pos; diff --git a/clang/test/CIR/CodeGen/globals-neg-index-array.c b/clang/test/CIR/CodeGen/globals-neg-index-array.c new file mode 100644 index 000000000000..7f6110d30df5 --- /dev/null +++ b/clang/test/CIR/CodeGen/globals-neg-index-array.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s +// RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +struct __attribute__((packed)) PackedStruct { + char a1; + char a2; + char a3; +}; +struct PackedStruct packed[10]; +char *packed_element = &(packed[-2].a3); +// CHECK: cir.global external @packed = #cir.zero : !cir.array loc(#loc5) +// CHECK: cir.global external @packed_element = #cir.global_view<@packed, [-2 : i32, 2 : i32]> +// LLVM: @packed = global [10 x %struct.PackedStruct] zeroinitializer +// LLVM: @packed_element = global ptr getelementptr inbounds ([10 x %struct.PackedStruct], ptr @packed, i32 -2, i32 2) From f3f657fa11b01a5c641502c09df07c6aa43c6a85 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 23 Aug 2024 06:28:27 +0800 Subject: [PATCH 1782/2301] [CIR][NFC] Replace ternary ops after lowering prepare to select ops (#800) This PR refactors the LoweringPrepare pass and replaces various ternary ops generated by LoweringPrepare with semantically equivalent select ops. --- .../Dialect/Transforms/LoweringPrepare.cpp | 75 +++---------------- clang/test/CIR/CodeGen/complex-arithmetic.c | 55 ++++++-------- clang/test/CIR/CodeGen/complex-cast.c | 70 +++-------------- .../test/CIR/CodeGen/three-way-comparison.cpp | 36 ++------- 4 files changed, 49 insertions(+), 187 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index d968afc4c535..160c0de9b98d 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -310,17 +310,7 @@ static void canonicalizeIntrinsicThreeWayCmp(CIRBaseBuilderTy &builder, loc, mlir::cir::IntAttr::get(input.getType(), yield)); auto eqToTest = builder.createCompare(loc, mlir::cir::CmpOpKind::eq, input, testValue); - return builder - .create( - loc, eqToTest, - [&](OpBuilder &, Location) { - builder.create(loc, - mlir::ValueRange{yieldValue}); - }, - [&](OpBuilder &, Location) { - builder.create(loc, mlir::ValueRange{input}); - }) - ->getResult(0); + return builder.createSelect(loc, eqToTest, yieldValue, input); }; if (cmpInfo.getLt() != -1) @@ -460,32 +450,7 @@ static mlir::Value lowerComplexToScalarCast(MLIRContext &ctx, CastOp op) { builder.createCast(op.getLoc(), elemToBoolKind, srcImag, boolTy); // srcRealToBool || srcImagToBool - return builder - .create( - op.getLoc(), srcRealToBool, - [&](mlir::OpBuilder &, mlir::Location) { - builder.createYield(op.getLoc(), - builder.getTrue(op.getLoc()).getResult()); - }, - [&](mlir::OpBuilder &, mlir::Location) { - auto inner = - builder - .create( - op.getLoc(), srcImagToBool, - [&](mlir::OpBuilder &, mlir::Location) { - builder.createYield( - op.getLoc(), - builder.getTrue(op.getLoc()).getResult()); - }, - [&](mlir::OpBuilder &, mlir::Location) { - builder.createYield( - op.getLoc(), - builder.getFalse(op.getLoc()).getResult()); - }) - .getResult(); - builder.createYield(op.getLoc(), inner); - }) - .getResult(); + return builder.createLogicalOr(op.getLoc(), srcRealToBool, srcImagToBool); } static mlir::Value lowerComplexToComplexCast(MLIRContext &ctx, CastOp op) { @@ -652,26 +617,17 @@ static mlir::Value lowerComplexMul(LoweringPreparePass &pass, // NaN. If so, emit a library call to compute the multiplication instead. // We check a value against NaN by comparing the value against itself. auto resultRealIsNaN = builder.createIsNaN(loc, resultReal); + auto resultImagIsNaN = builder.createIsNaN(loc, resultImag); + auto resultRealAndImagAreNaN = + builder.createLogicalAnd(loc, resultRealIsNaN, resultImagIsNaN); return builder .create( - loc, resultRealIsNaN, + loc, resultRealAndImagAreNaN, [&](mlir::OpBuilder &, mlir::Location) { - auto resultImagIsNaN = builder.createIsNaN(loc, resultImag); - auto inner = - builder - .create( - loc, resultImagIsNaN, - [&](mlir::OpBuilder &, mlir::Location) { - auto libCallResult = buildComplexBinOpLibCall( - pass, builder, &getComplexMulLibCallName, loc, ty, - lhsReal, lhsImag, rhsReal, rhsImag); - builder.createYield(loc, libCallResult); - }, - [&](mlir::OpBuilder &, mlir::Location) { - builder.createYield(loc, algebraicResult); - }) - .getResult(); - builder.createYield(loc, inner); + auto libCallResult = buildComplexBinOpLibCall( + pass, builder, &getComplexMulLibCallName, loc, ty, lhsReal, + lhsImag, rhsReal, rhsImag); + builder.createYield(loc, libCallResult); }, [&](mlir::OpBuilder &, mlir::Location) { builder.createYield(loc, algebraicResult); @@ -877,16 +833,7 @@ void LoweringPreparePass::lowerThreeWayCmpOp(CmpThreeWayOp op) { }; auto buildSelect = [&](mlir::Value condition, mlir::Value trueResult, mlir::Value falseResult) -> mlir::Value { - return builder - .create( - loc, condition, - [&](OpBuilder &, Location) { - builder.create(loc, trueResult); - }, - [&](OpBuilder &, Location) { - builder.create(loc, falseResult); - }) - .getResult(); + return builder.createSelect(loc, condition, trueResult, falseResult); }; mlir::Value transformedResult; diff --git a/clang/test/CIR/CodeGen/complex-arithmetic.c b/clang/test/CIR/CodeGen/complex-arithmetic.c index c2e86ca43f74..8e772e70f2d9 100644 --- a/clang/test/CIR/CodeGen/complex-arithmetic.c +++ b/clang/test/CIR/CodeGen/complex-arithmetic.c @@ -269,15 +269,12 @@ void mul() { // CIR-FULL-NEXT: %[[#F:]] = cir.binop(add, %[[#C]], %[[#D]]) : !cir.double // CIR-FULL-NEXT: %[[#RES:]] = cir.complex.create %[[#E]], %[[#F]] : !cir.double -> !cir.complex // CIR-FULL-NEXT: %[[#COND:]] = cir.cmp(ne, %[[#E]], %[[#E]]) : !cir.double, !cir.bool -// CIR-FULL-NEXT: %{{.+}} = cir.ternary(%[[#COND]], true { -// CIR-FULL-NEXT: %[[#COND2:]] = cir.cmp(ne, %[[#F]], %[[#F]]) : !cir.double, !cir.bool -// CIR-FULL-NEXT: %[[#INNER:]] = cir.ternary(%[[#COND2]], true { -// CIR-FULL-NEXT: %[[#RES2:]] = cir.call @__muldc3(%[[#LHSR]], %[[#LHSI]], %[[#RHSR]], %[[#RHSI]]) : (!cir.double, !cir.double, !cir.double, !cir.double) -> !cir.complex -// CIR-FULL-NEXT: cir.yield %[[#RES2]] : !cir.complex -// CIR-FULL-NEXT: }, false { -// CIR-FULL-NEXT: cir.yield %[[#RES]] : !cir.complex -// CIR-FULL-NEXT: }) : (!cir.bool) -> !cir.complex -// CIR-FULL-NEXT: cir.yield %[[#INNER]] : !cir.complex +// CIR-FULL-NEXT: %[[#COND2:]] = cir.cmp(ne, %[[#F]], %[[#F]]) : !cir.double, !cir.bool +// CIR-FULL-NEXT: %[[#G:]] = cir.const #false +// CIR-FULL-NEXT: %[[#H:]] = cir.select if %[[#COND]] then %[[#COND2]] else %[[#G]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool +// CIR-FULL-NEXT: %{{.+}} = cir.ternary(%[[#H]], true { +// CIR-FULL-NEXT: %[[#RES2:]] = cir.call @__muldc3(%[[#LHSR]], %[[#LHSI]], %[[#RHSR]], %[[#RHSI]]) : (!cir.double, !cir.double, !cir.double, !cir.double) -> !cir.complex +// CIR-FULL-NEXT: cir.yield %[[#RES2]] : !cir.complex // CIR-FULL-NEXT: }, false { // CIR-FULL-NEXT: cir.yield %[[#RES]] : !cir.complex // CIR-FULL-NEXT: }) : (!cir.bool) -> !cir.complex @@ -306,30 +303,22 @@ void mul() { // LLVM-FULL-NEXT: %[[#F:]] = fadd double %[[#C]], %[[#D]] // LLVM-FULL-NEXT: %[[#G:]] = insertvalue { double, double } undef, double %[[#E]], 0 // LLVM-FULL-NEXT: %[[#RES:]] = insertvalue { double, double } %[[#G]], double %[[#F]], 1 -// LLVM-FULL-NEXT: %[[#COND:]] = fcmp une double %[[#E]], %[[#E]] -// LLVM-FULL-NEXT: br i1 %[[#COND]], label %[[#LA:]], label %[[#LB:]] -// LLVM-FULL: [[#LA]]: -// LLVM-FULL-NEXT: %[[#H:]] = fcmp une double %[[#F]], %[[#F]] -// LLVM-FULL-NEXT: br i1 %[[#H]], label %[[#LC:]], label %[[#LD:]] -// LLVM-FULL: [[#LC]]: -// LLVM-FULL-NEXT: %[[#RES2:]] = call { double, double } @__muldc3(double %[[#LHSR]], double %[[#LHSI]], double %[[#RHSR]], double %[[#RHSI]]) -// LLVM-FULL-NEXT: br label %[[#LE:]] -// LLVM-FULL: [[#LD]]: -// LLVM-FULL-NEXT: br label %[[#LE]] -// LLVM-FULL: [[#LE]]: -// LLVM-FULL-NEXT: %[[#RES3:]] = phi { double, double } [ %[[#RES]], %[[#LD]] ], [ %[[#RES2]], %[[#LC]] ] -// LLVM-FULL-NEXT: br label %[[#LF:]] -// LLVM-FULL: [[#LF]]: -// LLVM-FULL-NEXT: br label %[[#LG:]] -// LLVM-FULL: [[#LB]]: -// LLVM-FULL-NEXT: br label %[[#LG]] -// LLVM-FULL: [[#LG]]: -// LLVM-FULL-NEXT: %26 = phi { double, double } [ %[[#RES]], %[[#LB]] ], [ %[[#RES3]], %[[#LF]] ] - -// LLVM-FULL: %[[#LHSR:]] = extractvalue { i32, i32 } %28, 0 -// LLVM-FULL-NEXT: %[[#LHSI:]] = extractvalue { i32, i32 } %28, 1 -// LLVM-FULL-NEXT: %[[#RHSR:]] = extractvalue { i32, i32 } %29, 0 -// LLVM-FULL-NEXT: %[[#RHSI:]] = extractvalue { i32, i32 } %29, 1 +// LLVM-FULL-NEXT: %[[#H:]] = fcmp une double %[[#E]], %[[#E]] +// LLVM-FULL-NEXT: %[[#COND:]] = zext i1 %[[#H]] to i8 +// LLVM-FULL-NEXT: %[[#I:]] = fcmp une double %[[#F]], %[[#F]] +// LLVM-FULL-NEXT: %[[#COND2:]] = zext i1 %[[#I]] to i8 +// LLVM-FULL-NEXT: %[[#J:]] = and i8 %[[#COND]], %[[#COND2]] +// LLVM-FULL-NEXT: %[[#COND3:]] = trunc i8 %[[#J]] to i1 +// LLVM-FULL: {{.+}}: +// LLVM-FULL-NEXT: %{{.+}} = call { double, double } @__muldc3(double %[[#LHSR]], double %[[#LHSI]], double %[[#RHSR]], double %[[#RHSI]]) +// LLVM-FULL-NEXT: br label %{{.+}} +// LLVM-FULL: {{.+}}: +// LLVM-FULL-NEXT: br label %{{.+}} + +// LLVM-FULL: %[[#LHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-FULL-NEXT: %[[#LHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 +// LLVM-FULL-NEXT: %[[#RHSR:]] = extractvalue { i32, i32 } %{{.+}}, 0 +// LLVM-FULL-NEXT: %[[#RHSI:]] = extractvalue { i32, i32 } %{{.+}}, 1 // LLVM-FULL-NEXT: %[[#A:]] = mul i32 %[[#LHSR]], %[[#RHSR]] // LLVM-FULL-NEXT: %[[#B:]] = mul i32 %[[#LHSI]], %[[#RHSI]] // LLVM-FULL-NEXT: %[[#C:]] = mul i32 %[[#LHSR]], %[[#RHSI]] diff --git a/clang/test/CIR/CodeGen/complex-cast.c b/clang/test/CIR/CodeGen/complex-cast.c index 72dd45e652ca..98afabd65340 100644 --- a/clang/test/CIR/CodeGen/complex-cast.c +++ b/clang/test/CIR/CodeGen/complex-cast.c @@ -173,41 +173,16 @@ void complex_to_bool() { // CIR-AFTER-NEXT: %[[#IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !cir.double // CIR-AFTER-NEXT: %[[#RB:]] = cir.cast(float_to_bool, %[[#REAL]] : !cir.double), !cir.bool // CIR-AFTER-NEXT: %[[#IB:]] = cir.cast(float_to_bool, %[[#IMAG]] : !cir.double), !cir.bool -// CIR-AFTER-NEXT: %{{.+}} = cir.ternary(%[[#RB]], true { -// CIR-AFTER-NEXT: %[[#A:]] = cir.const #true -// CIR-AFTER-NEXT: cir.yield %[[#A]] : !cir.bool -// CIR-AFTER-NEXT: }, false { -// CIR-AFTER-NEXT: %[[#B:]] = cir.ternary(%[[#IB]], true { -// CIR-AFTER-NEXT: %[[#C:]] = cir.const #true -// CIR-AFTER-NEXT: cir.yield %[[#C]] : !cir.bool -// CIR-AFTER-NEXT: }, false { -// CIR-AFTER-NEXT: %[[#D:]] = cir.const #false -// CIR-AFTER-NEXT: cir.yield %[[#D]] : !cir.bool -// CIR-AFTER-NEXT: }) : (!cir.bool) -> !cir.bool -// CIR-AFTER-NEXT: cir.yield %[[#B]] : !cir.bool -// CIR-AFTER-NEXT: }) : (!cir.bool) -> !cir.bool +// CIR-AFTER-NEXT: %[[#A:]] = cir.const #true +// CIR-AFTER-NEXT: %{{.+}} = cir.select if %[[#RB]] then %[[#A]] else %[[#IB]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool // LLVM: %[[#REAL:]] = extractvalue { double, double } %{{.+}}, 0 // LLVM-NEXT: %[[#IMAG:]] = extractvalue { double, double } %{{.+}}, 1 // LLVM-NEXT: %[[#RB:]] = fcmp une double %[[#REAL]], 0.000000e+00 +// LLVM-NEXT: %[[#RB2:]] = zext i1 %[[#RB]] to i8 // LLVM-NEXT: %[[#IB:]] = fcmp une double %[[#IMAG]], 0.000000e+00 -// LLVM-NEXT: br i1 %[[#RB]], label %[[#LABEL_RB:]], label %[[#LABEL_RB_NOT:]] -// LLVM: [[#LABEL_RB]]: -// LLVM-NEXT: br label %[[#LABEL_EXIT:]] -// LLVM: [[#LABEL_RB_NOT]]: -// LLVM-NEXT: br i1 %[[#IB]], label %[[#LABEL_IB:]], label %[[#LABEL_IB_NOT:]] -// LLVM: [[#LABEL_IB]]: -// LLVM-NEXT: br label %[[#LABEL_A:]] -// LLVM: [[#LABEL_IB_NOT]]: -// LLVM-NEXT: br label %[[#LABEL_A]] -// LLVM: [[#LABEL_A]]: -// LLVM-NEXT: %[[#A:]] = phi i8 [ 0, %[[#LABEL_IB_NOT]] ], [ 1, %[[#LABEL_IB]] ] -// LLVM-NEXT: br label %[[#LABEL_B:]] -// LLVM: [[#LABEL_B]]: -// LLVM-NEXT: br label %[[#LABEL_EXIT]] -// LLVM: [[#LABEL_EXIT]]: -// LLVM-NEXT: %{{.+}} = phi i8 [ %[[#A]], %[[#LABEL_B]] ], [ 1, %[[#LABEL_RB]] ] -// LLVM-NEXT: br label %{{.+}} +// LLVM-NEXT: %[[#IB2:]] = zext i1 %[[#IB]] to i8 +// LLVM-NEXT: %{{.+}} = or i8 %[[#RB2]], %[[#IB2]] // CIR-BEFORE: %{{.+}} = cir.cast(int_complex_to_bool, %{{.+}} : !cir.complex), !cir.bool @@ -215,41 +190,16 @@ void complex_to_bool() { // CIR-AFTER-NEXT: %[[#IMAG:]] = cir.complex.imag %{{.+}} : !cir.complex -> !s32i // CIR-AFTER-NEXT: %[[#RB:]] = cir.cast(int_to_bool, %[[#REAL]] : !s32i), !cir.bool // CIR-AFTER-NEXT: %[[#IB:]] = cir.cast(int_to_bool, %[[#IMAG]] : !s32i), !cir.bool -// CIR-AFTER-NEXT: %{{.+}} = cir.ternary(%[[#RB]], true { -// CIR-AFTER-NEXT: %[[#A:]] = cir.const #true -// CIR-AFTER-NEXT: cir.yield %[[#A]] : !cir.bool -// CIR-AFTER-NEXT: }, false { -// CIR-AFTER-NEXT: %[[#B:]] = cir.ternary(%[[#IB]], true { -// CIR-AFTER-NEXT: %[[#C:]] = cir.const #true -// CIR-AFTER-NEXT: cir.yield %[[#C]] : !cir.bool -// CIR-AFTER-NEXT: }, false { -// CIR-AFTER-NEXT: %[[#D:]] = cir.const #false -// CIR-AFTER-NEXT: cir.yield %[[#D]] : !cir.bool -// CIR-AFTER-NEXT: }) : (!cir.bool) -> !cir.bool -// CIR-AFTER-NEXT: cir.yield %[[#B]] : !cir.bool -// CIR-AFTER-NEXT: }) : (!cir.bool) -> !cir.bool +// CIR-AFTER-NEXT: %[[#A:]] = cir.const #true +// CIR-AFTER-NEXT: %{{.+}} = cir.select if %[[#RB]] then %[[#A]] else %[[#IB]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool // LLVM: %[[#REAL:]] = extractvalue { i32, i32 } %{{.+}}, 0 // LLVM-NEXT: %[[#IMAG:]] = extractvalue { i32, i32 } %{{.+}}, 1 // LLVM-NEXT: %[[#RB:]] = icmp ne i32 %[[#REAL]], 0 +// LLVM-NEXT: %[[#RB2:]] = zext i1 %[[#RB]] to i8 // LLVM-NEXT: %[[#IB:]] = icmp ne i32 %[[#IMAG]], 0 -// LLVM-NEXT: br i1 %[[#RB]], label %[[#LABEL_RB:]], label %[[#LABEL_RB_NOT:]] -// LLVM: [[#LABEL_RB]]: -// LLVM-NEXT: br label %[[#LABEL_EXIT:]] -// LLVM: [[#LABEL_RB_NOT]]: -// LLVM-NEXT: br i1 %[[#IB]], label %[[#LABEL_IB:]], label %[[#LABEL_IB_NOT:]] -// LLVM: [[#LABEL_IB]]: -// LLVM-NEXT: br label %[[#LABEL_A:]] -// LLVM: [[#LABEL_IB_NOT]]: -// LLVM-NEXT: br label %[[#LABEL_A]] -// LLVM: [[#LABEL_A]]: -// LLVM-NEXT: %[[#A:]] = phi i8 [ 0, %[[#LABEL_IB_NOT]] ], [ 1, %[[#LABEL_IB]] ] -// LLVM-NEXT: br label %[[#LABEL_B:]] -// LLVM: [[#LABEL_B]]: -// LLVM-NEXT: br label %[[#LABEL_EXIT]] -// LLVM: [[#LABEL_EXIT]]: -// LLVM-NEXT: %{{.+}} = phi i8 [ %[[#A]], %[[#LABEL_B]] ], [ 1, %[[#LABEL_RB]] ] -// LLVM-NEXT: br label %{{.+}} +// LLVM-NEXT: %[[#IB2:]] = zext i1 %[[#IB]] to i8 +// LLVM-NEXT: %{{.+}} = or i8 %[[#RB2]], %[[#IB2]] // CHECK: } diff --git a/clang/test/CIR/CodeGen/three-way-comparison.cpp b/clang/test/CIR/CodeGen/three-way-comparison.cpp index 28e094574de5..729602b55cfb 100644 --- a/clang/test/CIR/CodeGen/three-way-comparison.cpp +++ b/clang/test/CIR/CodeGen/three-way-comparison.cpp @@ -33,27 +33,15 @@ auto three_way_strong(int x, int y) { // NONCANONICAL-AFTER-NEXT: %[[#NEGONE:]] = cir.const #cir.int<-1> : !s8i // NONCANONICAL-AFTER-NEXT: %[[#ONE:]] = cir.const #cir.int<1> : !s8i // NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_NEGONE:]] = cir.cmp(eq, %[[#CMP3WAY_RESULT]], %[[#NEGONE]]) : !s8i, !cir.bool -// NONCANONICAL-AFTER-NEXT: %[[#A:]] = cir.ternary(%[[#CMP_TO_NEGONE]], true { -// NONCANONICAL-AFTER-NEXT: cir.yield %[[#ONE]] : !s8i -// NONCANONICAL-AFTER-NEXT: }, false { -// NONCANONICAL-AFTER-NEXT: cir.yield %[[#CMP3WAY_RESULT]] : !s8i -// NONCANONICAL-AFTER-NEXT: }) : (!cir.bool) -> !s8i +// NONCANONICAL-AFTER-NEXT: %[[#A:]] = cir.select if %[[#CMP_TO_NEGONE]] then %[[#ONE]] else %[[#CMP3WAY_RESULT]] : (!cir.bool, !s8i, !s8i) -> !s8i // NONCANONICAL-AFTER-NEXT: %[[#ZERO:]] = cir.const #cir.int<0> : !s8i // NONCANONICAL-AFTER-NEXT: %[[#TWO:]] = cir.const #cir.int<2> : !s8i // NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_ZERO:]] = cir.cmp(eq, %[[#A]], %[[#ZERO]]) : !s8i, !cir.bool -// NONCANONICAL-AFTER-NEXT: %[[#B:]] = cir.ternary(%[[#CMP_TO_ZERO]], true { -// NONCANONICAL-AFTER-NEXT: cir.yield %[[#TWO]] : !s8i -// NONCANONICAL-AFTER-NEXT: }, false { -// NONCANONICAL-AFTER-NEXT: cir.yield %[[#A]] : !s8i -// NONCANONICAL-AFTER-NEXT: }) : (!cir.bool) -> !s8i +// NONCANONICAL-AFTER-NEXT: %[[#B:]] = cir.select if %[[#CMP_TO_ZERO]] then %[[#TWO]] else %[[#A]] : (!cir.bool, !s8i, !s8i) -> !s8i // NONCANONICAL-AFTER-NEXT: %[[#ONE2:]] = cir.const #cir.int<1> : !s8i // NONCANONICAL-AFTER-NEXT: %[[#THREE:]] = cir.const #cir.int<3> : !s8i // NONCANONICAL-AFTER-NEXT: %[[#CMP_TO_ONE:]] = cir.cmp(eq, %[[#B]], %[[#ONE2]]) : !s8i, !cir.bool -// NONCANONICAL-AFTER-NEXT: %{{.+}} = cir.ternary(%[[#CMP_TO_ONE]], true { -// NONCANONICAL-AFTER-NEXT: cir.yield %[[#THREE]] : !s8i -// NONCANONICAL-AFTER-NEXT: }, false { -// NONCANONICAL-AFTER-NEXT: cir.yield %[[#B]] : !s8i -// NONCANONICAL-AFTER-NEXT: }) : (!cir.bool) -> !s8i +// NONCANONICAL-AFTER-NEXT: %{{.+}} = cir.select if %[[#CMP_TO_ONE]] then %[[#THREE]] else %[[#B]] : (!cir.bool, !s8i, !s8i) -> !s8i // NONCANONICAL-AFTER: } auto three_way_weak(float x, float y) { @@ -74,19 +62,7 @@ auto three_way_weak(float x, float y) { // AFTER-NEXT: %[[#CMP_LT:]] = cir.cmp(lt, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool // AFTER-NEXT: %[[#CMP_EQ:]] = cir.cmp(eq, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool // AFTER-NEXT: %[[#CMP_GT:]] = cir.cmp(gt, %[[#LHS]], %[[#RHS]]) : !cir.float, !cir.bool -// AFTER-NEXT: %[[#CMP_EQ_RES:]] = cir.ternary(%[[#CMP_EQ]], true { -// AFTER-NEXT: cir.yield %[[#EQ]] : !s8i -// AFTER-NEXT: }, false { -// AFTER-NEXT: cir.yield %[[#UNORDERED]] : !s8i -// AFTER-NEXT: }) : (!cir.bool) -> !s8i -// AFTER-NEXT: %[[#CMP_GT_RES:]] = cir.ternary(%[[#CMP_GT]], true { -// AFTER-NEXT: cir.yield %[[#GT]] : !s8i -// AFTER-NEXT: }, false { -// AFTER-NEXT: cir.yield %[[#CMP_EQ_RES]] : !s8i -// AFTER-NEXT: }) : (!cir.bool) -> !s8i -// AFTER-NEXT: %{{.+}} = cir.ternary(%[[#CMP_LT]], true { -// AFTER-NEXT: cir.yield %[[#LT]] : !s8i -// AFTER-NEXT: }, false { -// AFTER-NEXT: cir.yield %[[#CMP_GT_RES]] : !s8i -// AFTER-NEXT: }) : (!cir.bool) -> !s8i +// AFTER-NEXT: %[[#CMP_EQ_RES:]] = cir.select if %[[#CMP_EQ]] then %[[#EQ]] else %[[#UNORDERED]] : (!cir.bool, !s8i, !s8i) -> !s8i +// AFTER-NEXT: %[[#CMP_GT_RES:]] = cir.select if %[[#CMP_GT]] then %[[#GT]] else %[[#CMP_EQ_RES]] : (!cir.bool, !s8i, !s8i) -> !s8i +// AFTER-NEXT: %{{.+}} = cir.select if %[[#CMP_LT]] then %[[#LT]] else %[[#CMP_GT_RES]] : (!cir.bool, !s8i, !s8i) -> !s8i // AFTER: } From 8bb83ab65ea91359b9a9ba2fd073cb881e665fc3 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Fri, 23 Aug 2024 22:07:36 +0300 Subject: [PATCH 1783/2301] [CIR][CirGen][Bugfix] Fixes __sync_fetch_and_add for unsigned integers (#799) `__sync_fetch_and_add` currently doesn't support unsigned integers. The following code snippet, for example, raises an error: ``` #include void foo(uint64_t x) { __sync_fetch_and_add(&x, 1); } ``` The error can be traced down to this line `auto intType = builder.getSIntNTy(cgf.getContext().getTypeSize(typ));` from `clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp`. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 5 ++++- clang/test/CIR/CodeGen/atomic.cpp | 27 +++++++++++++++++++++++++ clang/test/CodeGen/atomic.c | 2 +- 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 5ba526584e16..59811b8e9157 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -240,7 +240,10 @@ makeBinaryAtomicValue(CIRGenFunction &cgf, mlir::cir::AtomicFetchKind kind, Address destAddr = checkAtomicAlignment(cgf, expr); auto &builder = cgf.getBuilder(); - auto intType = builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); + auto intType = + expr->getArg(0)->getType()->getPointeeType()->isUnsignedIntegerType() + ? builder.getUIntNTy(cgf.getContext().getTypeSize(typ)) + : builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); mlir::Value val = cgf.buildScalarExpr(expr->getArg(1)); mlir::Type valueType = val.getType(); val = buildToInt(cgf, val, typ, intType); diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index b031c330c275..98215cfb5980 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -471,3 +471,30 @@ void cmp_val_short(short* p, short x, short u) { void cmp_val_byte(char* p, char x, char u) { char r = __sync_val_compare_and_swap(p, x, u); } + +// CHECK-LABEL: @_Z8inc_uint +// CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !u32i, seq_cst) fetch_first : !u32i + +// LLVM-LABEL: @_Z8inc_uint +// LLVM: atomicrmw add ptr {{.*}}, i32 {{.*}} seq_cst, align 4 +void inc_uint(unsigned int* a, int b) { + unsigned int c = __sync_fetch_and_add(a, b); +} + +// CHECK-LABEL: @_Z9inc_ulong +// CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !u64i, seq_cst) fetch_first : !u64i + +// LLVM-LABEL: @_Z9inc_ulong +// LLVM: atomicrmw add ptr {{.*}}, i64 {{.*}} seq_cst, align 8 +void inc_ulong(unsigned long* a, long b) { + unsigned long c = __sync_fetch_and_add(a, b); +} + +// CHECK-LABEL: @_Z9inc_uchar +// CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !u8i, seq_cst) fetch_first : !u8i + +// LLVM-LABEL: @_Z9inc_uchar +// LLVM: atomicrmw add ptr {{.*}}, i8 {{.*}} seq_cst, align 1 +void inc_uchar(unsigned char* a, char b) { + unsigned char c = __sync_fetch_and_add(a, b); +} \ No newline at end of file diff --git a/clang/test/CodeGen/atomic.c b/clang/test/CodeGen/atomic.c index 16c29e282ddd..48e3c3304816 100644 --- a/clang/test/CodeGen/atomic.c +++ b/clang/test/CodeGen/atomic.c @@ -160,4 +160,4 @@ void force_global_uses(void) { // X86: call void @__atomic_load(i32 noundef 16, ptr noundef @glob_longdbl, ptr noundef %atomic-temp // X86-NEXT: %0 = load x86_fp80, ptr %atomic-temp, align 16 // SYSTEMZ: load atomic fp128, ptr @[[GLOB_LONGDBL]] seq_cst -} +} \ No newline at end of file From 5c24defebaf9e6967f79276dc522b9165f3d9fa3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 22 Aug 2024 18:03:34 -0700 Subject: [PATCH 1784/2301] [CIR][CIRGen] Support global initialization with new --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 73 ++++++++++++++----- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 9 +++ clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp | 72 ++++++++++-------- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 22 ++++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 20 ++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 9 ++- clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp | 8 -- 7 files changed, 151 insertions(+), 62 deletions(-) delete mode 100644 clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index c38c5ad6af61..9b7032993399 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -206,37 +206,70 @@ static void buildDeclInit(CIRGenFunction &CGF, const VarDecl *D, } } -static void buildDeclDestory(CIRGenFunction &CGF, const VarDecl *D, - Address DeclPtr) { +static void buildDeclDestroy(CIRGenFunction &CGF, const VarDecl *D) { // Honor __attribute__((no_destroy)) and bail instead of attempting // to emit a reference to a possibly nonexistent destructor, which // in turn can cause a crash. This will result in a global constructor // that isn't balanced out by a destructor call as intended by the // attribute. This also checks for -fno-c++-static-destructors and // bails even if the attribute is not present. - assert(D->needsDestruction(CGF.getContext()) == QualType::DK_cxx_destructor); + QualType::DestructionKind DtorKind = D->needsDestruction(CGF.getContext()); - auto &CGM = CGF.CGM; + // FIXME: __attribute__((cleanup)) ? - // If __cxa_atexit is disabled via a flag, a different helper function is - // generated elsewhere which uses atexit instead, and it takes the destructor - // directly. - auto UsingExternalHelper = CGM.getCodeGenOpts().CXAAtExit; + switch (DtorKind) { + case QualType::DK_none: + return; + + case QualType::DK_cxx_destructor: + break; + + case QualType::DK_objc_strong_lifetime: + case QualType::DK_objc_weak_lifetime: + case QualType::DK_nontrivial_c_struct: + // We don't care about releasing objects during process teardown. + assert(!D->getTLSKind() && "should have rejected this"); + return; + } + + auto &CGM = CGF.CGM; QualType type = D->getType(); + + // Special-case non-array C++ destructors, if they have the right signature. + // Under some ABIs, destructors return this instead of void, and cannot be + // passed directly to __cxa_atexit if the target does not allow this + // mismatch. const CXXRecordDecl *Record = type->getAsCXXRecordDecl(); bool CanRegisterDestructor = Record && (!CGM.getCXXABI().HasThisReturn( GlobalDecl(Record->getDestructor(), Dtor_Complete)) || CGM.getCXXABI().canCallMismatchedFunctionType()); + + // If __cxa_atexit is disabled via a flag, a different helper function is + // generated elsewhere which uses atexit instead, and it takes the destructor + // directly. + auto UsingExternalHelper = CGM.getCodeGenOpts().CXAAtExit; + mlir::cir::FuncOp fnOp; if (Record && (CanRegisterDestructor || UsingExternalHelper)) { assert(!D->getTLSKind() && "TLS NYI"); + assert(!Record->hasTrivialDestructor()); + assert(!MissingFeatures::openCL()); CXXDestructorDecl *Dtor = Record->getDestructor(); - CGM.getCXXABI().buildDestructorCall(CGF, Dtor, Dtor_Complete, - /*ForVirtualBase=*/false, - /*Delegating=*/false, DeclPtr, type); + // In LLVM OG codegen this is done in registerGlobalDtor, but CIRGen + // relies on LoweringPrepare for further decoupling, so build the + // call right here. + auto GD = GlobalDecl(Dtor, Dtor_Complete); + auto structorInfo = CGM.getAddrAndTypeOfCXXStructor(GD); + fnOp = structorInfo.second; + CGF.getBuilder().createCallOp( + CGF.getLoc(D->getSourceRange()), + mlir::FlatSymbolRefAttr::get(fnOp.getSymNameAttr()), + mlir::ValueRange{CGF.CGM.getAddrOfGlobalVar(D)}); } else { llvm_unreachable("array destructors not yet supported!"); } + assert(fnOp && "expected cir.func"); + CGM.getCXXABI().registerGlobalDtor(CGF, D, fnOp, nullptr); } mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { @@ -260,8 +293,8 @@ mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, mlir::cir::GlobalOp Addr, - bool NeedsCtor, - bool NeedsDtor) { + bool NeedsCtor, bool NeedsDtor, + bool isCstStorage) { assert(D && " Expected a global declaration!"); CIRGenFunction CGF{*this, builder, true}; CurCGF = &CGF; @@ -278,14 +311,20 @@ void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, builder.create(Addr->getLoc()); } - if (NeedsDtor) { + if (isCstStorage) { + // buildDeclInvariant(CGF, D, DeclPtr); + llvm_unreachable("NYI"); + } else { + // If not constant storage we'll emit this regardless of NeedsDtor value. mlir::OpBuilder::InsertionGuard guard(builder); auto block = builder.createBlock(&Addr.getDtorRegion()); builder.setInsertionPointToStart(block); - Address DeclAddr(getAddrOfGlobalVar(D), getASTContext().getDeclAlign(D)); - buildDeclDestory(CGF, D, DeclAddr); + buildDeclDestroy(CGF, D); builder.setInsertionPointToEnd(block); - builder.create(Addr->getLoc()); + if (block->empty()) + block->erase(); + else + builder.create(Addr->getLoc()); } CurCGF = nullptr; diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 2df22b70d91a..2aea122f9759 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -164,6 +164,15 @@ class CIRGenCXXABI { bool Delegating, Address This, QualType ThisTy) = 0; + /// Emit code to force the execution of a destructor during global + /// teardown. The default implementation of this uses atexit. + /// + /// \param Dtor - a function taking a single pointer argument + /// \param Addr - a pointer to pass to the destructor function. + virtual void registerGlobalDtor(CIRGenFunction &CGF, const VarDecl *D, + mlir::cir::FuncOp dtor, + mlir::Attribute Addr) = 0; + virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, FunctionArgList &Args) const = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp index ed1cd708e6b5..682eddbe9581 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp @@ -31,9 +31,9 @@ void CIRGenModule::buildCXXGlobalInitFunc() { assert(0 && "NYE"); } -void CIRGenModule::buildGlobalVarDeclInit(const VarDecl *D, - mlir::cir::GlobalOp Addr, - bool PerformInit) { +void CIRGenModule::buildCXXGlobalVarDeclInitFunc(const VarDecl *D, + mlir::cir::GlobalOp Addr, + bool PerformInit) { // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, // __constant__ and __shared__ variables defined in namespace scope, // that are of class type, cannot have a non-empty constructor. All @@ -51,36 +51,48 @@ void CIRGenModule::buildGlobalVarDeclInit(const VarDecl *D, if (I != DelayedCXXInitPosition.end() && I->second == ~0U) return; - if (PerformInit) { - QualType T = D->getType(); + buildCXXGlobalVarDeclInit(D, Addr, PerformInit); +} - // TODO: handle address space - // The address space of a static local variable (DeclPtr) may be different - // from the address space of the "this" argument of the constructor. In that - // case, we need an addrspacecast before calling the constructor. - // - // struct StructWithCtor { - // __device__ StructWithCtor() {...} - // }; - // __device__ void foo() { - // __shared__ StructWithCtor s; - // ... - // } - // - // For example, in the above CUDA code, the static local variable s has a - // "shared" address space qualifier, but the constructor of StructWithCtor - // expects "this" in the "generic" address space. - assert(!MissingFeatures::addressSpace()); +void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *D, + mlir::cir::GlobalOp Addr, + bool PerformInit) { + QualType T = D->getType(); - if (!T->isReferenceType()) { - bool NeedsDtor = - D->needsDestruction(getASTContext()) == QualType::DK_cxx_destructor; - assert(!isTypeConstant(D->getType(), true, !NeedsDtor) && - "invaraint-typed initialization NYI"); + // TODO: handle address space + // The address space of a static local variable (DeclPtr) may be different + // from the address space of the "this" argument of the constructor. In that + // case, we need an addrspacecast before calling the constructor. + // + // struct StructWithCtor { + // __device__ StructWithCtor() {...} + // }; + // __device__ void foo() { + // __shared__ StructWithCtor s; + // ... + // } + // + // For example, in the above CUDA code, the static local variable s has a + // "shared" address space qualifier, but the constructor of StructWithCtor + // expects "this" in the "generic" address space. + assert(!MissingFeatures::addressSpace()); - if (PerformInit || NeedsDtor) - codegenGlobalInitCxxStructor(D, Addr, PerformInit, NeedsDtor); - return; + if (!T->isReferenceType()) { + if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && + D->hasAttr()) { + llvm_unreachable("NYI"); } + bool NeedsDtor = + D->needsDestruction(getASTContext()) == QualType::DK_cxx_destructor; + // PerformInit, constant store invariant / destroy handled below. + bool isCstStorage = + D->getType().isConstantStorage(getASTContext(), true, !NeedsDtor); + codegenGlobalInitCxxStructor(D, Addr, PerformInit, NeedsDtor, isCstStorage); + return; } + + assert(PerformInit && "cannot have constant initializer which needs " + "destruction for reference"); + // TODO(cir): buildReferenceBindingToExpr + llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index a0f3363dd6d0..09de029444ce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -172,6 +172,9 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy) override; + void registerGlobalDtor(CIRGenFunction &CGF, const VarDecl *D, + mlir::cir::FuncOp dtor, + mlir::Attribute Addr) override; virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) override; virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) override; CatchTypeInfo @@ -2132,6 +2135,25 @@ void CIRGenItaniumCXXABI::buildDestructorCall( nullptr); } +void CIRGenItaniumCXXABI::registerGlobalDtor(CIRGenFunction &CGF, + const VarDecl *D, + mlir::cir::FuncOp dtor, + mlir::Attribute Addr) { + if (D->isNoDestroy(CGM.getASTContext())) + return; + + if (D->getTLSKind()) + llvm_unreachable("NYI"); + + // HLSL doesn't support atexit. + if (CGM.getLangOpts().HLSL) + llvm_unreachable("NYI"); + + // The default behavior is to use atexit. This is handled in lowering + // prepare. For now just emit the body for the dtor. + // .... +} + mlir::Value CIRGenItaniumCXXABI::getCXXDestructorImplicitParam( CIRGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c294f1f42bfe..88c3c2b0b397 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1107,6 +1107,18 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, !IsDefinitionAvailableExternally && D->needsDestruction(astCtx) == QualType::DK_cxx_destructor; + // It is helpless to emit the definition for an available_externally variable + // which can't be marked as const. + // We don't need to check if it needs global ctor or dtor. See the above + // comment for ideas. + if (IsDefinitionAvailableExternally && + (!D->hasConstantInitialization() || + // TODO: Update this when we have interface to check constexpr + // destructor. + D->needsDestruction(getASTContext()) || + !D->getType().isConstantStorage(getASTContext(), true, true))) + return; + const VarDecl *InitDecl; const Expr *InitExpr = D->getAnyInitializer(InitDecl); @@ -1200,9 +1212,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, auto Entry = buildGlobal(D, InitType, ForDefinition_t(!IsTentative)); // TODO(cir): Strip off pointer casts from Entry if we get them? - // TODO(cir): LLVM codegen used GlobalValue to handle both Function or - // GlobalVariable here. We currently only support GlobalOp, should this be - // used for FuncOp? + // TODO(cir): use GlobalValue interface assert(dyn_cast(&Entry) && "FuncOp not supported here"); auto GV = Entry; @@ -1315,10 +1325,12 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // TODO(cir): // Emit the initializer function if necessary. if (NeedsGlobalCtor || NeedsGlobalDtor) - buildGlobalVarDeclInit(D, GV, NeedsGlobalCtor); + buildCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor); // TODO(cir): sanitizers (reportGlobalToASan) and global variable debug // information. + assert(!MissingFeatures::sanitizeOther()); + assert(!MissingFeatures::generateDebugInfo()); } void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index d4d3339f3fdf..22f774bbaabc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -577,8 +577,11 @@ class CIRGenModule : public CIRGenTypeCache { bool IsTentative = false); /// Emit the function that initializes the specified global - void buildGlobalVarDeclInit(const VarDecl *D, mlir::cir::GlobalOp Addr, - bool PerformInit); + void buildCXXGlobalVarDeclInit(const VarDecl *D, mlir::cir::GlobalOp Addr, + bool PerformInit); + + void buildCXXGlobalVarDeclInitFunc(const VarDecl *D, mlir::cir::GlobalOp Addr, + bool PerformInit); void addDeferredVTable(const CXXRecordDecl *RD) { DeferredVTables.push_back(RD); @@ -614,7 +617,7 @@ class CIRGenModule : public CIRGenTypeCache { // Produce code for this constructor/destructor for global initialzation. void codegenGlobalInitCxxStructor(const clang::VarDecl *D, mlir::cir::GlobalOp Addr, bool NeedsCtor, - bool NeedsDtor); + bool NeedsDtor, bool isCstStorage); bool lookupRepresentativeDecl(llvm::StringRef MangledName, clang::GlobalDecl &Result) const; diff --git a/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp b/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp deleted file mode 100644 index 2f55a395c4b1..000000000000 --- a/clang/test/CIR/CodeGen/ctor-srcloc-fix.cpp +++ /dev/null @@ -1,8 +0,0 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s - -struct e { e(int); }; -e *g = new e(0); - -//CHECK: {{%.*}} = cir.const #cir.int<1> : !u64i loc(#loc11) -//CHECK: {{%.*}} = cir.call @_Znwm(%1) : (!u64i) -> !cir.ptr loc(#loc6) From 64dfb7897807cc974a12310c2a93aae1b75b15b5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 23 Aug 2024 15:03:45 -0700 Subject: [PATCH 1785/2301] [CIR][NFC] Fix sign-compare warning --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 9710925029c4..5342612fdb7d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -894,7 +894,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { unsigned AlignMask = Layout.getABITypeAlign(Elts[I]).value() - 1; Pos = (Pos + AlignMask) & ~AlignMask; assert(Offset >= 0); - if (static_cast(Offset) < Pos + EltSize) { + if (Offset < Pos + EltSize) { Indices.push_back(I); SubType = Elts[I]; Offset -= Pos; From 342b3899f4fc210990eb62ff3f1033d5cf60d8de Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 23 Aug 2024 15:04:55 -0700 Subject: [PATCH 1786/2301] [CIR] Add missing testcase for previous commit --- clang/test/CIR/CodeGen/global-new.cpp | 29 +++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 clang/test/CIR/CodeGen/global-new.cpp diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp new file mode 100644 index 000000000000..4e0f604e1291 --- /dev/null +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE +// RUN: FileCheck %s -check-prefix=CIR_AFTER --input-file=%t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck %s -check-prefix=LLVM --input-file=%t.ll + +struct e { e(int); }; +e *g = new e(0); + +// CIR_BEFORE: ![[ty:.*]] = !cir.struct { +// CIR_BEFORE: %[[GlobalAddr:.*]] = cir.get_global @g : !cir.ptr> +// CIR_BEFORE: %[[Size:.*]] = cir.const #cir.int<1> : !u64i +// CIR_BEFORE: %[[NewAlloc:.*]] = cir.call @_Znwm(%[[Size]]) : (!u64i) -> !cir.ptr +// CIR_BEFORE: %[[NewCasted:.*]] = cir.cast(bitcast, %[[NewAlloc]] : !cir.ptr), !cir.ptr +// CIR_BEFORE: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CIR_BEFORE: cir.call @_ZN1eC1Ei(%[[NewCasted]], %[[ZERO]]) : (!cir.ptr, !s32i) -> () +// CIR_BEFORE: cir.store %3, %[[GlobalAddr]] : !cir.ptr, !cir.ptr> +// CIR_BEFORE: } + +// CIR_AFTER: {{%.*}} = cir.const #cir.int<1> : !u64i +// CIR_AFTER: {{%.*}} = cir.call @_Znwm(%1) : (!u64i) -> !cir.ptr + +// LLVM-DAG: @llvm.global_ctors = appending constant [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init, ptr null }] +// LLVM: define internal void @__cxx_global_var_init() +// LLVM: call ptr @_Znwm(i64 1) + +// LLVM: define void @_GLOBAL__sub_I_global_new.cpp() +// LLVM: call void @__cxx_global_var_init() From 58053f72d3590b025a908b6731b6f99ee98c975a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 23 Aug 2024 15:31:00 -0700 Subject: [PATCH 1787/2301] [CIR][CIRGen] Add minimal support for building invariant globals --- clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 18 ++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 ++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 3 +-- clang/test/CIR/CodeGen/global-new.cpp | 10 +++++++++- 5 files changed, 29 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 56e20ade88b2..6c26f7e631e7 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -162,6 +162,7 @@ struct MissingFeatures { static bool armComputeVolatileBitfields() { return false; } static bool insertBuiltinUnpredictable() { return false; } static bool createInvariantGroup() { return false; } + static bool createInvariantIntrinsic() { return false; } static bool addAutoInitAnnotation() { return false; } static bool addHeapAllocSiteMetadata() { return false; } static bool loopInfoStack() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 9b7032993399..85013189fd7e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -291,6 +291,21 @@ mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { return Fn; } +/// Emit code to cause the variable at the given address to be considered as +/// constant from this point onwards. +static void buildDeclInvariant(CIRGenFunction &CGF, const VarDecl *D) { + return CGF.buildInvariantStart( + CGF.getContext().getTypeSizeInChars(D->getType())); +} + +void CIRGenFunction::buildInvariantStart([[maybe_unused]] CharUnits Size) { + // Do not emit the intrinsic if we're not optimizing. + if (!CGM.getCodeGenOpts().OptimizationLevel) + return; + + assert(!MissingFeatures::createInvariantIntrinsic()); +} + void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, mlir::cir::GlobalOp Addr, bool NeedsCtor, bool NeedsDtor, @@ -312,8 +327,7 @@ void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, } if (isCstStorage) { - // buildDeclInvariant(CGF, D, DeclPtr); - llvm_unreachable("NYI"); + buildDeclInvariant(CGF, D); } else { // If not constant storage we'll emit this regardless of NeedsDtor value. mlir::OpBuilder::InsertionGuard guard(builder); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index c411b86ac067..7d665305a25c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -866,6 +866,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value buildRuntimeCall(mlir::Location loc, mlir::cir::FuncOp callee, ArrayRef args = {}); + void buildInvariantStart(CharUnits Size); + /// Create a check for a function parameter that may potentially be /// declared as non-null. void buildNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 09de029444ce..ea0edea495dd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2150,8 +2150,7 @@ void CIRGenItaniumCXXABI::registerGlobalDtor(CIRGenFunction &CGF, llvm_unreachable("NYI"); // The default behavior is to use atexit. This is handled in lowering - // prepare. For now just emit the body for the dtor. - // .... + // prepare. Nothing to be done for CIR here. } mlir::Value CIRGenItaniumCXXABI::getCXXDestructorImplicitParam( diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index 4e0f604e1291..d2130a877348 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -21,9 +21,17 @@ e *g = new e(0); // CIR_AFTER: {{%.*}} = cir.const #cir.int<1> : !u64i // CIR_AFTER: {{%.*}} = cir.call @_Znwm(%1) : (!u64i) -> !cir.ptr -// LLVM-DAG: @llvm.global_ctors = appending constant [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init, ptr null }] +// LLVM-DAG: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init, ptr null }, { i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init.1, ptr null }] // LLVM: define internal void @__cxx_global_var_init() // LLVM: call ptr @_Znwm(i64 1) +// LLVM: define internal void @__cxx_global_var_init.1() +// LLVM: call ptr @_Znwm(i64 1) + // LLVM: define void @_GLOBAL__sub_I_global_new.cpp() // LLVM: call void @__cxx_global_var_init() +// LLVM: call void @__cxx_global_var_init.1() + +struct PackedStruct { +}; +PackedStruct*const packed_2 = new PackedStruct(); \ No newline at end of file From 9f3b5dc8a1f1cc3359b4070e435525676d6b0dc7 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 28 Aug 2024 07:29:56 +0800 Subject: [PATCH 1788/2301] [CIR][Lowering] Add the concept of simple lowering and use it for unary fp2fp operations (#806) This PR is the continuation and refinement of PR #434 which is originally authored by @philnik777 . Does not update it in-place since I don't have commit access to Nikolas' repo. This PR basically just rebases #434 onto the latest `main`. I also updated some naming used in the original PR to keep naming styles consistent. Co-authored-by: Nikolas Klauser --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 84 ++++++++++--- .../clang/CIR/Dialect/IR/CMakeLists.txt | 4 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 112 ++---------------- .../CIR/Lowering/builtin-floating-point.cir | 50 ++++++++ clang/utils/TableGen/CIRLoweringEmitter.cpp | 64 ++++++++++ clang/utils/TableGen/CMakeLists.txt | 1 + clang/utils/TableGen/TableGen.cpp | 7 ++ clang/utils/TableGen/TableGenBackends.h | 2 + 8 files changed, 207 insertions(+), 117 deletions(-) create mode 100644 clang/test/CIR/Lowering/builtin-floating-point.cir create mode 100644 clang/utils/TableGen/CIRLoweringEmitter.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 15d8cbb883e7..af9a1e780e1e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -38,8 +38,48 @@ include "mlir/IR/CommonAttrConstraints.td" // CIR Ops //===----------------------------------------------------------------------===// +// LLVMLoweringInfo is used by cir-tablegen to generate LLVM lowering logic +// automatically for CIR operations. The `llvmOp` field gives the name of the +// LLVM IR dialect operation that the CIR operation will be lowered to. The +// input arguments of the CIR operation will be passed in the same order to the +// lowered LLVM IR operation. +// +// Example: +// +// For the following CIR operation definition: +// +// def FooOp : CIR_Op<"foo"> { +// // ... +// let arguments = (ins CIR_AnyType:$arg1, CIR_AnyType:$arg2); +// let llvmOp = "BarOp"; +// } +// +// cir-tablegen will generate LLVM lowering code for the FooOp similar to the +// following: +// +// class CIRFooOpLowering +// : public mlir::OpConversionPattern { +// public: +// using OpConversionPattern::OpConversionPattern; +// +// mlir::LogicalResult matchAndRewrite( +// mlir::cir::FooOp op, +// OpAdaptor adaptor, +// mlir::ConversionPatternRewriter &rewriter) const override { +// rewriter.replaceOpWithNewOp( +// op, adaptor.getOperands()[0], adaptor.getOperands()[1]); +// return mlir::success(); +// } +// } +// +// If you want fully customized LLVM IR lowering logic, simply exclude the +// `llvmOp` field from your CIR operation definition. +class LLVMLoweringInfo { + string llvmOp = ""; +} + class CIR_Op traits = []> : - Op; + Op, LLVMLoweringInfo; //===----------------------------------------------------------------------===// // CIR Op Traits @@ -2708,6 +2748,8 @@ def VecInsertOp : CIR_Op<"vec.insert", [Pure, }]; let hasVerifier = 0; + + let llvmOp = "InsertElementOp"; } //===----------------------------------------------------------------------===// @@ -2732,6 +2774,8 @@ def VecExtractOp : CIR_Op<"vec.extract", [Pure, }]; let hasVerifier = 0; + + let llvmOp = "ExtractElementOp"; } //===----------------------------------------------------------------------===// @@ -3762,30 +3806,32 @@ def LLroundOp : UnaryFPToIntBuiltinOp<"llround">; def LrintOp : UnaryFPToIntBuiltinOp<"lrint">; def LLrintOp : UnaryFPToIntBuiltinOp<"llrint">; -class UnaryFPToFPBuiltinOp +class UnaryFPToFPBuiltinOp : CIR_Op { let arguments = (ins CIR_AnyFloat:$src); let results = (outs CIR_AnyFloat:$result); let summary = "libc builtin equivalent ignoring " "floating point exceptions and errno"; let assemblyFormat = "$src `:` type($src) attr-dict"; -} -def CeilOp : UnaryFPToFPBuiltinOp<"ceil">; -def CosOp : UnaryFPToFPBuiltinOp<"cos">; -def ExpOp : UnaryFPToFPBuiltinOp<"exp">; -def Exp2Op : UnaryFPToFPBuiltinOp<"exp2">; -def FloorOp : UnaryFPToFPBuiltinOp<"floor">; -def FAbsOp : UnaryFPToFPBuiltinOp<"fabs">; -def LogOp : UnaryFPToFPBuiltinOp<"log">; -def Log10Op : UnaryFPToFPBuiltinOp<"log10">; -def Log2Op : UnaryFPToFPBuiltinOp<"log2">; -def NearbyintOp : UnaryFPToFPBuiltinOp<"nearbyint">; -def RintOp : UnaryFPToFPBuiltinOp<"rint">; -def RoundOp : UnaryFPToFPBuiltinOp<"round">; -def SinOp : UnaryFPToFPBuiltinOp<"sin">; -def SqrtOp : UnaryFPToFPBuiltinOp<"sqrt">; -def TruncOp : UnaryFPToFPBuiltinOp<"trunc">; + let llvmOp = llvmOpName; +} + +def CeilOp : UnaryFPToFPBuiltinOp<"ceil", "FCeilOp">; +def CosOp : UnaryFPToFPBuiltinOp<"cos", "CosOp">; +def ExpOp : UnaryFPToFPBuiltinOp<"exp", "ExpOp">; +def Exp2Op : UnaryFPToFPBuiltinOp<"exp2", "Exp2Op">; +def FloorOp : UnaryFPToFPBuiltinOp<"floor", "FFloorOp">; +def FAbsOp : UnaryFPToFPBuiltinOp<"fabs", "FAbsOp">; +def LogOp : UnaryFPToFPBuiltinOp<"log", "LogOp">; +def Log10Op : UnaryFPToFPBuiltinOp<"log10", "Log10Op">; +def Log2Op : UnaryFPToFPBuiltinOp<"log2", "Log2Op">; +def NearbyintOp : UnaryFPToFPBuiltinOp<"nearbyint", "NearbyintOp">; +def RintOp : UnaryFPToFPBuiltinOp<"rint", "RintOp">; +def RoundOp : UnaryFPToFPBuiltinOp<"round", "RoundOp">; +def SinOp : UnaryFPToFPBuiltinOp<"sin", "SinOp">; +def SqrtOp : UnaryFPToFPBuiltinOp<"sqrt", "SqrtOp">; +def TruncOp : UnaryFPToFPBuiltinOp<"trunc", "FTruncOp">; class BinaryFPToFPBuiltinOp : CIR_Op { @@ -3987,6 +4033,8 @@ def StackRestoreOp : CIR_Op<"stack_restore"> { let arguments = (ins CIR_PointerType:$ptr); let assemblyFormat = "$ptr attr-dict `:` qualified(type($ptr))"; + + let llvmOp = "StackRestoreOp"; } def AsmATT : I32EnumAttrCase<"x86_att", 0>; diff --git a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt index c502525d30e8..3d43b06c6217 100644 --- a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt @@ -27,3 +27,7 @@ mlir_tablegen(CIROpsStructs.cpp.inc -gen-attrdef-defs) mlir_tablegen(CIROpsAttributes.h.inc -gen-attrdef-decls) mlir_tablegen(CIROpsAttributes.cpp.inc -gen-attrdef-defs) add_public_tablegen_target(MLIRCIREnumsGen) + +clang_tablegen(CIRBuiltinsLowering.inc -gen-cir-builtins-lowering + SOURCE CIROps.td + TARGET CIRBuiltinsLowering) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 87b1c909ba7f..95f36446c7a6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1331,34 +1331,6 @@ class CIRVectorCreateLowering } }; -class CIRVectorInsertLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(mlir::cir::VecInsertOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp( - op, adaptor.getVec(), adaptor.getValue(), adaptor.getIndex()); - return mlir::success(); - } -}; - -class CIRVectorExtractLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(mlir::cir::VecExtractOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp( - op, adaptor.getVec(), adaptor.getIndex()); - return mlir::success(); - } -}; - class CIRVectorCmpOpLowering : public mlir::OpConversionPattern { public: @@ -3155,19 +3127,6 @@ class CIRPtrDiffOpLowering } }; -class CIRFAbsOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(mlir::cir::FAbsOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp( - op, adaptor.getOperands().front()); - return mlir::success(); - } -}; - class CIRExpectOpLowering : public mlir::OpConversionPattern { public: @@ -3247,19 +3206,8 @@ class CIRStackSaveLowering } }; -class CIRStackRestoreLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(mlir::cir::StackRestoreOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, - adaptor.getPtr()); - return mlir::success(); - } -}; +#define GET_BUILTIN_LOWERING_CLASSES +#include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" class CIRUnreachableLowering : public mlir::OpConversionPattern { @@ -3602,38 +3550,6 @@ class CIRUnaryFPBuiltinOpLowering : public mlir::OpConversionPattern { } }; -using CIRCeilOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRCosOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRExpOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRExp2OpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRFloorOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRFabsOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRLogOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRLog10OpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRLog2OpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRNearbyintOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRRintOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRRoundOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRSinOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRSqrtOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRTruncOpLowering = - CIRUnaryFPBuiltinOpLowering; - using CIRLroundOpLowering = CIRUnaryFPBuiltinOpLowering; using CIRLLroundOpLowering = @@ -3907,23 +3823,21 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, - CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, - CIRVectorSplatLowering, CIRVectorTernaryLowering, + CIRVectorCmpOpLowering, CIRVectorSplatLowering, CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, CIRVectorShuffleVecLowering, - CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, - CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, - CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, - CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, CIRLroundOpLowering, - CIRLLroundOpLowering, CIRLrintOpLowering, CIRLLrintOpLowering, - CIRCeilOpLowering, CIRCosOpLowering, CIRExpOpLowering, CIRExp2OpLowering, - CIRFloorOpLowering, CIRFAbsOpLowering, CIRLogOpLowering, - CIRLog10OpLowering, CIRLog2OpLowering, CIRNearbyintOpLowering, - CIRRintOpLowering, CIRRoundOpLowering, CIRSinOpLowering, - CIRSqrtOpLowering, CIRTruncOpLowering, CIRCopysignOpLowering, + CIRStackSaveLowering, CIRUnreachableLowering, CIRTrapLowering, + CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, + CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, + CIRCmpThreeWayOpLowering, CIRLroundOpLowering, CIRLLroundOpLowering, + CIRLrintOpLowering, CIRLLrintOpLowering, CIRCopysignOpLowering, CIRFModOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering, CIRPowOpLowering, CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, - CIRThrowOpLowering>(converter, patterns.getContext()); + CIRThrowOpLowering +#define GET_BUILTIN_LOWERING_LIST +#include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" +#undef GET_BUILTIN_LOWERING_LIST + >(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/Lowering/builtin-floating-point.cir b/clang/test/CIR/Lowering/builtin-floating-point.cir new file mode 100644 index 000000000000..82b733233da3 --- /dev/null +++ b/clang/test/CIR/Lowering/builtin-floating-point.cir @@ -0,0 +1,50 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s + +module { + cir.func @test(%arg0 : !cir.float) { + %1 = cir.cos %arg0 : !cir.float + // CHECK: llvm.intr.cos(%arg0) : (f32) -> f32 + + %2 = cir.ceil %arg0 : !cir.float + // CHECK: llvm.intr.ceil(%arg0) : (f32) -> f32 + + %3 = cir.exp %arg0 : !cir.float + // CHECK: llvm.intr.exp(%arg0) : (f32) -> f32 + + %4 = cir.exp2 %arg0 : !cir.float + // CHECK: llvm.intr.exp2(%arg0) : (f32) -> f32 + + %5 = cir.fabs %arg0 : !cir.float + // CHECK: llvm.intr.fabs(%arg0) : (f32) -> f32 + + %6 = cir.floor %arg0 : !cir.float + // CHECK: llvm.intr.floor(%arg0) : (f32) -> f32 + + %7 = cir.log %arg0 : !cir.float + // CHECK: llvm.intr.log(%arg0) : (f32) -> f32 + + %8 = cir.log10 %arg0 : !cir.float + // CHECK: llvm.intr.log10(%arg0) : (f32) -> f32 + + %9 = cir.log2 %arg0 : !cir.float + // CHECK: llvm.intr.log2(%arg0) : (f32) -> f32 + + %10 = cir.nearbyint %arg0 : !cir.float + // CHECK: llvm.intr.nearbyint(%arg0) : (f32) -> f32 + + %11 = cir.rint %arg0 : !cir.float + // CHECK: llvm.intr.rint(%arg0) : (f32) -> f32 + + %12 = cir.round %arg0 : !cir.float + // CHECK: llvm.intr.round(%arg0) : (f32) -> f32 + + %13 = cir.sin %arg0 : !cir.float + // CHECK: llvm.intr.sin(%arg0) : (f32) -> f32 + + %14 = cir.sqrt %arg0 : !cir.float + // CHECK: llvm.intr.sqrt(%arg0) : (f32) -> f32 + + cir.return + } +} diff --git a/clang/utils/TableGen/CIRLoweringEmitter.cpp b/clang/utils/TableGen/CIRLoweringEmitter.cpp new file mode 100644 index 000000000000..29daa63be86b --- /dev/null +++ b/clang/utils/TableGen/CIRLoweringEmitter.cpp @@ -0,0 +1,64 @@ +//===- CIRBuiltinsEmitter.cpp - Generate lowering of builtins --=-*- C++ -*--=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "TableGenBackends.h" +#include "llvm/TableGen/TableGenBackend.h" + +using namespace llvm; + +namespace { +std::string ClassDefinitions; +std::string ClassList; + +void GenerateLowering(raw_ostream &OS, const Record *Operation) { + using namespace std::string_literals; + std::string Name = Operation->getName().str(); + std::string LLVMOp = Operation->getValueAsString("llvmOp").str(); + ClassDefinitions += + "class CIR" + Name + + "Lowering : public mlir::OpConversionPattern { + public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::)C++" + + Name + + R"C++( op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op"; + + auto ArgCount = Operation->getValueAsDag("arguments")->getNumArgs(); + for (size_t i = 0; i != ArgCount; ++i) + ClassDefinitions += ", adaptor.getOperands()[" + std::to_string(i) + ']'; + + ClassDefinitions += R"C++(); + return mlir::success(); + } +}; +)C++"; + + ClassList += ", CIR" + Name + "Lowering\n"; +} +} // namespace + +void clang::EmitCIRBuiltinsLowering(const RecordKeeper &Records, + raw_ostream &OS) { + emitSourceFileHeader("Lowering of ClangIR builtins to LLVM IR builtins", OS); + for (const auto *Builtin : + Records.getAllDerivedDefinitions("LLVMLoweringInfo")) { + if (!Builtin->getValueAsString("llvmOp").empty()) + GenerateLowering(OS, Builtin); + } + + OS << "#ifdef GET_BUILTIN_LOWERING_CLASSES\n" + << ClassDefinitions << "\n#undef GET_BUILTIN_LOWERING_CLASSES\n#endif\n"; + OS << "#ifdef GET_BUILTIN_LOWERING_LIST\n" + << ClassList << "\n#undef GET_BUILTIN_LOWERING_LIST\n#endif\n"; +} diff --git a/clang/utils/TableGen/CMakeLists.txt b/clang/utils/TableGen/CMakeLists.txt index 5b072a1ac196..df5d8c03f5a5 100644 --- a/clang/utils/TableGen/CMakeLists.txt +++ b/clang/utils/TableGen/CMakeLists.txt @@ -4,6 +4,7 @@ add_tablegen(clang-tblgen CLANG DESTINATION "${CLANG_TOOLS_INSTALL_DIR}" EXPORT Clang ASTTableGen.cpp + CIRLoweringEmitter.cpp ClangASTNodesEmitter.cpp ClangASTPropertiesEmitter.cpp ClangAttrEmitter.cpp diff --git a/clang/utils/TableGen/TableGen.cpp b/clang/utils/TableGen/TableGen.cpp index 569d7a6a3ac8..a2efc3779fc4 100644 --- a/clang/utils/TableGen/TableGen.cpp +++ b/clang/utils/TableGen/TableGen.cpp @@ -25,6 +25,7 @@ using namespace clang; enum ActionType { PrintRecords, DumpJSON, + GenCIRBuiltinsLowering, GenClangAttrClasses, GenClangAttrParserStringSwitches, GenClangAttrSubjectMatchRulesParserStringSwitches, @@ -121,6 +122,9 @@ cl::opt Action( "Print all records to stdout (default)"), clEnumValN(DumpJSON, "dump-json", "Dump all records as machine-readable JSON"), + clEnumValN(GenCIRBuiltinsLowering, "gen-cir-builtins-lowering", + "Generate lowering of ClangIR builtins to equivalent LLVM " + "IR builtins"), clEnumValN(GenClangAttrClasses, "gen-clang-attr-classes", "Generate clang attribute clases"), clEnumValN(GenClangAttrParserStringSwitches, @@ -330,6 +334,9 @@ bool ClangTableGenMain(raw_ostream &OS, const RecordKeeper &Records) { case DumpJSON: EmitJSON(Records, OS); break; + case GenCIRBuiltinsLowering: + EmitCIRBuiltinsLowering(Records, OS); + break; case GenClangAttrClasses: EmitClangAttrClass(Records, OS); break; diff --git a/clang/utils/TableGen/TableGenBackends.h b/clang/utils/TableGen/TableGenBackends.h index 03ed3dad9363..cefdb5611ad8 100644 --- a/clang/utils/TableGen/TableGenBackends.h +++ b/clang/utils/TableGen/TableGenBackends.h @@ -24,6 +24,8 @@ class RecordKeeper; namespace clang { +void EmitCIRBuiltinsLowering(const llvm::RecordKeeper &RK, + llvm::raw_ostream &OS); void EmitClangDeclContext(const llvm::RecordKeeper &RK, llvm::raw_ostream &OS); /** @param PriorizeIfSubclassOf These classes should be prioritized in the output. From b1c89ad138a5a3581eaa939aff4be96fa564f2c5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Aug 2024 11:29:36 -0700 Subject: [PATCH 1789/2301] [CIR][CIRGen] Exceptions: Use the surrounding scope (if available) for try local allocas --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 11 +++++++++++ clang/test/CIR/CodeGen/try-catch.cpp | 2 +- clang/test/CIR/Lowering/try-catch.cpp | 2 +- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 4d45b22af275..97505aad8172 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2713,6 +2713,17 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, mlir::Block *entryBlock = insertIntoFnEntryBlock ? getCurFunctionEntryBlock() : currLexScope->getEntryBlock(); + + // If this is an alloca in the entry basic block of a cir.try and there's + // a surrounding cir.scope, make sure the alloca ends up in the surrounding + // scope instead. This is necessary in order to guarantee all SSA values are + // reachable during cleanups. + if (auto tryOp = llvm::dyn_cast_if_present( + entryBlock->getParentOp())) { + if (auto scopeOp = llvm::dyn_cast(tryOp->getParentOp())) + entryBlock = &scopeOp.getRegion().front(); + } + return buildAlloca(name, ty, loc, alignment, builder.getBestAllocaInsertPoint(entryBlock), arraySize); } diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 4042afb08dcb..8945bc33b739 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -10,11 +10,11 @@ unsigned long long tc() { try { // CHECK: cir.scope { + // CHECK: %[[local_a:.*]] = cir.alloca !s32i, !cir.ptr, ["a", init] // CHECK: %[[msg:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["msg"] // CHECK: %[[idx:.*]] = cir.alloca !s32i, !cir.ptr, ["idx"] // CHECK: cir.try { - // CHECK: %[[local_a:.*]] = cir.alloca !s32i, !cir.ptr, ["a", init] int a = 4; z = division(x, y); // CHECK: %[[div_res:.*]] = cir.call exception @_Z8divisionii({{.*}}) : (!s32i, !s32i) -> !cir.double diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 34a547d5ed10..b985ecab8cca 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -10,12 +10,12 @@ unsigned long long tc() { int x = 50, y = 3; unsigned long long z; + // CIR_FLAT: cir.alloca !s32i, !cir.ptr, ["a" // CIR_FLAT: cir.alloca !cir.ptr, !cir.ptr>, ["msg"] // CIR_FLAT: cir.alloca !s32i, !cir.ptr, ["idx"] // CIR_FLAT: cir.br ^bb2 try { // CIR_FLAT: ^bb2: // pred: ^bb1 - // CIR_FLAT: cir.alloca !s32i, !cir.ptr // CIR_FLAT: cir.try_call @_Z8divisionii({{.*}}) ^[[CONT:.*]], ^[[LPAD:.*]] : (!s32i, !s32i) int a = 4; z = division(x, y); From e76b39f12daf3623426ccb84af403e90279b8a8d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Aug 2024 14:35:12 -0700 Subject: [PATCH 1790/2301] [CIR][NFC] Exceptions: introduce cleanup region to cir.try --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 ++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 +++++- clang/test/CIR/CodeGen/try-catch.cpp | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index af9a1e780e1e..1efac744329d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3496,10 +3496,12 @@ def TryOp : CIR_Op<"try", let arguments = (ins OptionalAttr:$catch_types); let regions = (region AnyRegion:$try_region, + AnyRegion:$cleanup_region, VariadicRegion:$catch_regions); let assemblyFormat = [{ $try_region + `cleanup` $cleanup_region custom($catch_regions, $catch_types) attr-dict }]; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7ae22e411306..c0ac9bc11701 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1219,8 +1219,11 @@ void TryOp::build( OpBuilder::InsertionGuard guard(builder); Region *tryBodyRegion = result.addRegion(); - builder.createBlock(tryBodyRegion); + mlir::Block *tryBodyBlock = builder.createBlock(tryBodyRegion); + Region *cleanupRegion = result.addRegion(); + + builder.setInsertionPointToStart(tryBodyBlock); tryBodyBuilder(builder, result.location); catchBuilder(builder, result.location, result); } @@ -1236,6 +1239,7 @@ void TryOp::getSuccessorRegions(mlir::RegionBranchPoint point, // If the condition isn't constant, both regions may be executed. regions.push_back(RegionSuccessor(&getTryRegion())); + regions.push_back(RegionSuccessor(&getCleanupRegion())); // FIXME: optimize, ideas include: // - If we know a target function never throws a specific type, we can // remove the catch handler. diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 8945bc33b739..e43e7b61fea8 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -21,6 +21,7 @@ unsigned long long tc() { a++; } catch (int idx) { + // CHECK: } cleanup { // CHECK: } catch [type #cir.global_view<@_ZTIi> : !cir.ptr { // CHECK: %[[catch_idx_addr:.*]] = cir.catch_param -> !cir.ptr // CHECK: %[[idx_load:.*]] = cir.load %[[catch_idx_addr]] : !cir.ptr, !s32i From a372fb0aafbfaca37f29544075ebe083d68fb660 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 26 Aug 2024 16:25:33 -0700 Subject: [PATCH 1791/2301] [CIR][CIRGen] Exceptions: handle cleanups and global initializers Still missing CFG flattening and lowering, coming next. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 22 ++-- clang/include/clang/CIR/MissingFeatures.h | 1 - clang/lib/CIR/CodeGen/CIRGenCall.cpp | 29 ++++- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 44 ++++++-- clang/lib/CIR/CodeGen/CIRGenCleanup.h | 51 +++++++++ clang/lib/CIR/CodeGen/CIRGenException.cpp | 105 ++++++++++++------ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 10 ++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 24 ++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 4 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 9 +- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 14 ++- clang/test/CIR/CodeGen/global-new.cpp | 13 +++ 13 files changed, 252 insertions(+), 79 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1efac744329d..b076e22ac5a6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3482,30 +3482,30 @@ def TryOp : CIR_Op<"try", Holds the lexical scope of `try {}`. Note that resources used on catch clauses are usually allocated in the same parent as `cir.try`. - Example: - %5 = cir.alloca !cir.ptr, !cir.ptr>, ["msg"] - ... - %10 = cir.try { - %11 = cir.alloca !cir.ptr, !cir.ptr>, ["__exception_ptr"] - ... // cir.try_call's - %20 = cir.load %11 : !cir.ptr>, !cir.ptr - cir.yield %20 : !cir.ptr - } : () -> !cir.ptr + `synthetic`: use `cir.try` to represent try/catches not originally + present in the source code (e.g. `g = new Class` under `-fexceptions`). + + Example: TBD ``` }]; - let arguments = (ins OptionalAttr:$catch_types); + let arguments = (ins UnitAttr:$synthetic, + OptionalAttr:$catch_types); let regions = (region AnyRegion:$try_region, AnyRegion:$cleanup_region, VariadicRegion:$catch_regions); let assemblyFormat = [{ - $try_region + (`synthetic` $synthetic^)? $try_region `cleanup` $cleanup_region custom($catch_regions, $catch_types) attr-dict }]; + let extraClassDeclaration = [{ + bool isCleanupActive() { return !getCleanupRegion().empty(); } + }]; + // Everything already covered elsewhere. let hasVerifier = 0; let builders = [ diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 6c26f7e631e7..bdfffd4788c1 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -122,7 +122,6 @@ struct MissingFeatures { static bool fastMathFuncAttributes() { return false; } // Exception handling - static bool setLandingPadCleanup() { return false; } static bool isSEHTryScope() { return false; } static bool ehStack() { return false; } static bool emitStartEHSpec() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 36a25ede4bae..5fb92b012623 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -447,11 +447,27 @@ static mlir::cir::CIRCallOpInterface buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, mlir::cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal, mlir::cir::FuncOp directFuncOp, - SmallVectorImpl &CIRCallArgs, bool InvokeDest, + SmallVectorImpl &CIRCallArgs, + mlir::Operation *InvokeDest, mlir::cir::ExtraFuncAttributesAttr extraFnAttrs) { auto &builder = CGF.getBuilder(); if (InvokeDest) { + // This call can throw, few options: + // - If this call does not have an associated cir.try, use the + // one provided by InvokeDest, + // - User written try/catch clauses require calls to handle + // exceptions under cir.try. + auto tryOp = dyn_cast_if_present(InvokeDest); + mlir::OpBuilder::InsertPoint ip = builder.saveInsertionPoint(); + bool changeInsertion = tryOp && tryOp.getSynthetic(); + if (changeInsertion) { + mlir::Block *lastBlock = &tryOp.getTryRegion().back(); + builder.setInsertionPointToStart(lastBlock); + } else { + assert(builder.getInsertionBlock() && "expected valid basic block"); + } + mlir::cir::CallOp tryCallOp; if (indirectFuncTy) { tryCallOp = builder.createIndirectTryCallOp(callLoc, indirectFuncVal, @@ -460,9 +476,15 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, tryCallOp = builder.createTryCallOp(callLoc, directFuncOp, CIRCallArgs); } tryCallOp->setAttr("extra_attrs", extraFnAttrs); + + if (changeInsertion) { + builder.create(tryOp.getLoc()); + builder.restoreInsertionPoint(ip); + } return tryCallOp; } + assert(builder.getInsertionBlock() && "expected valid basic block"); if (indirectFuncTy) return builder.createIndirectCallOp( callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs, extraFnAttrs); @@ -699,7 +721,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, noThrowAttr.getMnemonic())) CannotThrow = true; } - auto InvokeDest = CannotThrow ? false : getInvokeDest(); + auto InvokeDest = CannotThrow ? nullptr : getInvokeDest(); // TODO: UnusedReturnSizePtr if (const FunctionDecl *FD = dyn_cast_or_null(CurFuncDecl)) @@ -707,10 +729,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // TODO: alignment attributes - // Emit the actual call op. auto callLoc = loc; - assert(builder.getInsertionBlock() && "expected valid basic block"); - mlir::cir::CIRCallOpInterface theCall = [&]() { mlir::cir::FuncType indirectFuncTy; mlir::Value indirectFuncVal; diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index bdf6242a8e2b..c2feb6ef3923 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -128,6 +128,7 @@ static void buildCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, } // Ask the cleanup to emit itself. + assert(CGF.HaveInsertPoint() && "expected insertion point"); Fn->Emit(CGF, flags); assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); @@ -143,8 +144,7 @@ static void buildCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { assert(!EHStack.empty() && "cleanup stack is empty!"); assert(isa(*EHStack.begin()) && "top not a cleanup!"); - [[maybe_unused]] EHCleanupScope &Scope = - cast(*EHStack.begin()); + EHCleanupScope &Scope = cast(*EHStack.begin()); assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); // Remember activation information. @@ -152,9 +152,9 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { Address NormalActiveFlag = Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : Address::invalid(); - [[maybe_unused]] Address EHActiveFlag = Scope.shouldTestFlagInEHCleanup() - ? Scope.getActiveFlag() - : Address::invalid(); + Address EHActiveFlag = Scope.shouldTestFlagInEHCleanup() + ? Scope.getActiveFlag() + : Address::invalid(); // Check whether we need an EH cleanup. This is only true if we've // generated a lazy EH cleanup block. @@ -246,7 +246,17 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { bool IsEHa = getLangOpts().EHAsynch && !Scope.isLifetimeMarker(); // const EHPersonality &Personality = EHPersonality::get(*this); if (!RequiresNormalCleanup) { - llvm_unreachable("NYI"); + // Mark CPP scope end for passed-by-value Arg temp + // per Windows ABI which is "normally" Cleanup in callee + if (IsEHa && getInvokeDest()) { + // If we are deactivating a normal cleanup then we don't have a + // fallthrough. Restore original IP to emit CPP scope ends in the correct + // block. + llvm_unreachable("NYI"); + } + destroyOptimisticNormalEntry(*this, Scope); + Scope.markEmitted(); + EHStack.popCleanup(); } else { // If we have a fallthrough and no other need for the cleanup, // emit it directly. @@ -273,8 +283,16 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // Emit the EH cleanup if required. if (RequiresEHCleanup) { - // FIXME(cir): should we guard insertion point here? - auto *NextAction = getEHDispatchBlock(EHParent); + mlir::cir::TryOp tryOp = nullptr; + if (CGM.globalOpContext) { + SmallVector trys; + CGM.globalOpContext.walk( + [&](mlir::cir::TryOp op) { trys.push_back(op); }); + assert(trys.size() == 1 && "unknow global initialization style"); + tryOp = trys[0]; + } + + auto *NextAction = getEHDispatchBlock(EHParent, tryOp); (void)NextAction; // Push a terminate scope or cleanupendpad scope around the potentially @@ -301,6 +319,16 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // active or was used before it was deactivated. if (EHActiveFlag.isValid() || IsActive) { cleanupFlags.setIsForEHCleanup(); + assert(tryOp.isCleanupActive() && "expected active cleanup"); + mlir::OpBuilder::InsertionGuard guard(builder); + mlir::Block *cleanup = &tryOp.getCleanupRegion().back(); + if (cleanup->empty()) { + builder.setInsertionPointToEnd(cleanup); + builder.createYield(tryOp.getLoc()); + } + + auto yield = cast(cleanup->getTerminator()); + builder.setInsertionPoint(yield); buildCleanup(*this, Fn, cleanupFlags, EHActiveFlag); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h index 661830f83fcf..99a730b51769 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -405,6 +405,57 @@ class alignas(8) EHCleanupScope : public EHScope { static bool classof(const EHScope *Scope) { return (Scope->getKind() == Cleanup); } + + /// Erases auxillary allocas and their usages for an unused cleanup. + /// Cleanups should mark these allocas as 'used' if the cleanup is + /// emitted, otherwise these instructions would be erased. + struct AuxillaryAllocas { + llvm::SmallVector auxAllocas; + bool used = false; + + // Records a potentially unused instruction to be erased later. + void add(mlir::cir::AllocaOp allocaOp) { auxAllocas.push_back(allocaOp); } + + // Mark all recorded instructions as used. These will not be erased later. + void markUsed() { + used = true; + auxAllocas.clear(); + } + + ~AuxillaryAllocas() { + if (used) + return; + llvm::SetVector uses; + for (auto *Inst : llvm::reverse(auxAllocas)) + collectuses(Inst, uses); + // Delete uses in the reverse order of insertion. + for (auto *I : llvm::reverse(uses)) + I->erase(); + } + + private: + void collectuses(mlir::Operation *op, + llvm::SetVector &uses) { + if (!op || !uses.insert(op)) + return; + for (auto *User : op->getUsers()) + collectuses(llvm::cast(User), uses); + } + }; + mutable struct AuxillaryAllocas *auxAllocas = nullptr; + + void markEmitted() { + if (!auxAllocas) + return; + getAuxillaryAllocas().markUsed(); + } + + AuxillaryAllocas &getAuxillaryAllocas() { + if (!auxAllocas) { + auxAllocas = new struct AuxillaryAllocas(); + } + return *auxAllocas; + } }; // NOTE: there's a bunch of different data classes tacked on after an // EHCleanupScope. It is asserted (in EHScopeStack::pushCleanup*) that diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 7674af00b266..67455c81f57f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -252,37 +252,50 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { DeactivateCleanupBlock(cleanup, op); } -mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup) { - // Just like some other try/catch related logic: return the basic block - // pointer but only use it to denote we're tracking things, but there - // shouldn't be any changes to that block after work done in this function. - auto tryOp = currLexScope->getTry(); +static mlir::Block *getResumeBlockFromCatch(mlir::cir::TryOp &tryOp, + mlir::cir::GlobalOp globalParent) { + assert(tryOp && "cir.try expected"); unsigned numCatchRegions = tryOp.getCatchRegions().size(); - assert(tryOp && numCatchRegions && "expected at least one region"); + assert(numCatchRegions && "expected at least one region"); auto &fallbackRegion = tryOp.getCatchRegions()[numCatchRegions - 1]; + return &fallbackRegion.getBlocks().back(); + return nullptr; +} - auto *resumeBlock = &fallbackRegion.getBlocks().back(); - if (!resumeBlock->empty()) - return resumeBlock; +mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup, + mlir::cir::TryOp tryOp) { + + if (ehResumeBlock) + return ehResumeBlock; + // Just like some other try/catch related logic: return the basic block + // pointer but only use it to denote we're tracking things, but there + // shouldn't be any changes to that block after work done in this function. + ehResumeBlock = getResumeBlockFromCatch(tryOp, CGM.globalOpContext); + if (!ehResumeBlock->empty()) + return ehResumeBlock; auto ip = getBuilder().saveInsertionPoint(); - getBuilder().setInsertionPointToStart(resumeBlock); + getBuilder().setInsertionPointToStart(ehResumeBlock); const EHPersonality &Personality = EHPersonality::get(*this); - // This can always be a call because we necessarily didn't find - // anything on the EH stack which needs our help. + // This can always be a call + // because we necessarily didn't + // find anything on the EH stack + // which needs our help. const char *RethrowName = Personality.CatchallRethrowFn; if (RethrowName != nullptr && !isCleanup) { - // FIXME(cir): upon testcase this should just add the 'rethrow' attribute - // to mlir::cir::ResumeOp below. + // FIXME(cir): upon testcase + // this should just add the + // 'rethrow' attribute to + // mlir::cir::ResumeOp below. llvm_unreachable("NYI"); } getBuilder().create(tryOp.getLoc(), mlir::Value{}, mlir::Value{}); getBuilder().restoreInsertionPoint(ip); - return resumeBlock; + return ehResumeBlock; } mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { @@ -387,7 +400,8 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { /// Emit the structure of the dispatch block for the given catch scope. /// It is an invariant that the dispatch block already exists. static void buildCatchDispatchBlock(CIRGenFunction &CGF, - EHCatchScope &catchScope) { + EHCatchScope &catchScope, + mlir::cir::TryOp tryOp) { if (EHPersonality::get(CGF).isWasmPersonality()) llvm_unreachable("NYI"); if (EHPersonality::get(CGF).usesFuncletPads()) @@ -426,7 +440,7 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, // block is the block for the enclosing EH scope. Make sure to call // getEHDispatchBlock for caching it. if (i + 1 == e) { - (void)CGF.getEHDispatchBlock(catchScope.getEnclosingEHScope()); + (void)CGF.getEHDispatchBlock(catchScope.getEnclosingEHScope(), tryOp); nextIsEnd = true; // If the next handler is a catch-all, we're at the end, and the @@ -485,13 +499,13 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { unsigned NumHandlers = S.getNumHandlers(); EHCatchScope &CatchScope = cast(*EHStack.begin()); assert(CatchScope.getNumHandlers() == NumHandlers); + mlir::cir::TryOp tryOp = currLexScope->getTry(); // If the catch was not required, bail out now. if (!CatchScope.hasEHBranches()) { CatchScope.clearHandlerBlocks(); EHStack.popCatch(); // Drop all basic block from all catch regions. - mlir::cir::TryOp tryOp = currLexScope->getTry(); SmallVector eraseBlocks; for (mlir::Region &r : tryOp.getCatchRegions()) { if (r.empty()) @@ -506,7 +520,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { } // Emit the structure of the EH dispatch for this catch. - buildCatchDispatchBlock(*this, CatchScope); + buildCatchDispatchBlock(*this, CatchScope, tryOp); // Copy the handler blocks off before we pop the EH stack. Emitting // the handlers might scribble on this memory. @@ -604,7 +618,7 @@ static bool isNonEHScope(const EHScope &S) { llvm_unreachable("Invalid EHScope Kind!"); } -mlir::Operation *CIRGenFunction::buildLandingPad() { +mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { assert(EHStack.requiresLandingPad()); assert(!CGM.getLangOpts().IgnoreExceptions && "LandingPad should not be emitted when -fignore-exceptions are in " @@ -625,11 +639,6 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { // that leads to this "landing pad" creation site. Otherwise, exceptions // are enabled but a throwing function is called anyways (common pattern // with function local static initializers). - if (!currLexScope->isTry()) { - llvm_unreachable("NYI"); - } - mlir::cir::TryOp tryOp = currLexScope->getTry(); - { // Save the current CIR generation state. mlir::OpBuilder::InsertionGuard guard(builder); @@ -709,8 +718,8 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { // Otherwise, signal that we at least have cleanups. } else if (hasCleanup) { - // FIXME(cir): figure out whether and how we need this in CIR. - assert(!MissingFeatures::setLandingPadCleanup()); + if (!tryOp.isCleanupActive()) + builder.createBlock(&tryOp.getCleanupRegion()); } assert((clauses.size() > 0 || hasCleanup) && "no catch clauses!"); @@ -730,7 +739,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { // landing pad by generating a branch to the dispatch block. In CIR the same // function is called to gather some state, but this block info it's not // useful per-se. - (void)getEHDispatchBlock(EHStack.getInnermostEHScope()); + (void)getEHDispatchBlock(EHStack.getInnermostEHScope(), tryOp); } return tryOp; @@ -741,14 +750,15 @@ mlir::Operation *CIRGenFunction::buildLandingPad() { // However, we keep this around since other parts of CIRGen use // getCachedEHDispatchBlock to infer state. mlir::Block * -CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) { +CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si, + mlir::cir::TryOp tryOp) { if (EHPersonality::get(*this).usesFuncletPads()) llvm_unreachable("NYI"); // The dispatch block for the end of the scope chain is a block that // just resumes unwinding. if (si == EHStack.stable_end()) - return getEHResumeBlock(true); + return getEHResumeBlock(true, tryOp); // Otherwise, we should look at the actual scope. EHScope &scope = *EHStack.find(si); @@ -774,10 +784,13 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) { break; } - case EHScope::Cleanup: - assert(!MissingFeatures::setLandingPadCleanup()); - dispatchBlock = currLexScope->getOrCreateCleanupBlock(builder); + case EHScope::Cleanup: { + assert(tryOp && "expected cir.try available"); + llvm::MutableArrayRef regions = tryOp.getCatchRegions(); + assert(regions.size() == 1 && "expected only one region"); + dispatchBlock = ®ions[0].getBlocks().back(); break; + } case EHScope::Filter: llvm_unreachable("NYI"); @@ -828,8 +841,30 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl() { // We don't need separate landing pads in the funclet model. llvm_unreachable("NYI"); } else { - // Build the landing pad for this scope. - LP = buildLandingPad(); + mlir::cir::TryOp tryOp = nullptr; + if (!currLexScope) { + // In OG, we build the landing pad for this scope. In CIR, we emit a + // synthetic cir.try because this didn't come from codegenerating from a + // try/catch in C++. + tryOp = builder.create( + *currSrcLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) {}, + // Don't emit the code right away for catch clauses, for + // now create the regions and consume the try scope result. + // Note that clauses are later populated in + // CIRGenFunction::buildLandingPad. + [&](mlir::OpBuilder &b, mlir::Location loc, + mlir::OperationState &result) { + // Since this didn't come from an explicit try, we only need one + // handler: unwind. + auto *r = result.addRegion(); + builder.createBlock(r); + }); + tryOp.setSynthetic(true); + } else { + tryOp = currLexScope->getClosestTryParent(); + } + LP = buildLandingPad(tryOp); } assert(LP); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 23740b1be5dc..beb054fa809f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -464,6 +464,16 @@ void CIRGenFunction::LexicalScope::buildImplicitReturn() { (void)buildReturn(localScope->EndLoc); } +mlir::cir::TryOp CIRGenFunction::LexicalScope::getClosestTryParent() { + auto *scope = this; + while (scope) { + if (scope->isTry()) + return getTry(); + scope = ParentScope; + } + return nullptr; +} + void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // CIRGen doesn't use a BreakContinueStack or evaluates OnlySimpleReturnStmts. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 7d665305a25c..09cf51cfac0a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -417,8 +417,8 @@ class CIRGenFunction : public CIRGenTypeCache { // Holds the Decl for the current outermost non-closure context const clang::Decl *CurFuncDecl = nullptr; /// This is the inner-most code context, which includes blocks. - const clang::Decl *CurCodeDecl; - const CIRGenFunctionInfo *CurFnInfo; + const clang::Decl *CurCodeDecl = nullptr; + const CIRGenFunctionInfo *CurFnInfo = nullptr; clang::QualType FnRetTy; /// This is the current function or global initializer that is generated code @@ -1696,21 +1696,24 @@ class CIRGenFunction : public CIRGenTypeCache { }; /// Emits try/catch information for the current EH stack. - mlir::Operation *buildLandingPad(); - mlir::Block *getEHResumeBlock(bool isCleanup); - mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope); + mlir::Operation *buildLandingPad(mlir::cir::TryOp tryOp); + mlir::Block *getEHResumeBlock(bool isCleanup, mlir::cir::TryOp tryOp); + mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope, + mlir::cir::TryOp tryOp); + /// Unified block containing a call to cir.resume + mlir::Block *ehResumeBlock = nullptr; /// The cleanup depth enclosing all the cleanups associated with the /// parameters. EHScopeStack::stable_iterator PrologueCleanupDepth; mlir::Operation *getInvokeDestImpl(); - bool getInvokeDest() { + mlir::Operation *getInvokeDest() { if (!EHStack.requiresLandingPad()) - return false; - // cir.try_call does not require a block destination, but keep the - // overall traditional LLVM codegen names, and just ignore the result. - return (bool)getInvokeDestImpl(); + return nullptr; + // Return the respective cir.try, this can be used to compute + // any other relevant information. + return getInvokeDestImpl(); } /// Takes the old cleanup stack size and emits the cleanup blocks @@ -2009,6 +2012,7 @@ class CIRGenFunction : public CIRGenTypeCache { assert(isTry()); return tryOp; } + mlir::cir::TryOp getClosestTryParent(); void setAsSwitch() { ScopeKind = Kind::Switch; } void setAsTernary() { ScopeKind = Kind::Ternary; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 88c3c2b0b397..42cfb5641207 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1324,8 +1324,11 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // TODO(cir): // Emit the initializer function if necessary. - if (NeedsGlobalCtor || NeedsGlobalDtor) + if (NeedsGlobalCtor || NeedsGlobalDtor) { + globalOpContext = GV; buildCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor); + globalOpContext = nullptr; + } // TODO(cir): sanitizers (reportGlobalToASan) and global variable debug // information. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 22f774bbaabc..1be8f3f6b32d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -161,6 +161,10 @@ class CIRGenModule : public CIRGenTypeCache { /// Emit the function that initializes C++ globals. void buildCXXGlobalInitFunc(); + /// Track whether the CIRGenModule is currently building an initializer + /// for a global (e.g. as opposed to a regular cir.func). + mlir::cir::GlobalOp globalOpContext = nullptr; + /// When a C++ decl with an initializer is deferred, null is /// appended to CXXGlobalInits, and the index of that null is placed /// here so that the initializer will be performed in the correct diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index c0ac9bc11701..a982502d8aef 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1218,12 +1218,15 @@ void TryOp::build( assert(tryBodyBuilder && "expected builder callback for 'cir.try' body"); OpBuilder::InsertionGuard guard(builder); + + // Try body region Region *tryBodyRegion = result.addRegion(); - mlir::Block *tryBodyBlock = builder.createBlock(tryBodyRegion); - Region *cleanupRegion = result.addRegion(); + // Try cleanup region + result.addRegion(); - builder.setInsertionPointToStart(tryBodyBlock); + // Create try body region and set insertion point + builder.createBlock(tryBodyRegion); tryBodyBuilder(builder, result.location); catchBuilder(builder, result.location, result); } diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index b40fe14d9731..40b8321bc1d7 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -460,9 +460,11 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { if (!callOp.isIndirect()) symbol = callOp.getCalleeAttr(); rewriter.setInsertionPointToEnd(callBlock); + mlir::Type resTy = nullptr; + if (callOp.getNumResults() > 0) + resTy = callOp.getResult().getType(); auto tryCall = rewriter.replaceOpWithNewOp( - callOp, symbol, callOp.getResult().getType(), cont, landingPad, - callOp.getOperands()); + callOp, symbol, resTy, cont, landingPad, callOp.getOperands()); tryCall.setExtraAttrsAttr(extraAttrs); if (ast) tryCall.setAstAttr(*ast); @@ -470,9 +472,11 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Quick block cleanup: no indirection to the post try block. auto brOp = dyn_cast(afterTry->getTerminator()); - mlir::Block *srcBlock = brOp.getDest(); - rewriter.eraseOp(brOp); - rewriter.mergeBlocks(srcBlock, afterTry); + if (brOp) { + mlir::Block *srcBlock = brOp.getDest(); + rewriter.eraseOp(brOp); + rewriter.mergeBlocks(srcBlock, afterTry); + } return mlir::success(); } }; diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index d2130a877348..dd54dae9314d 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -3,6 +3,9 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck %s -check-prefix=LLVM --input-file=%t.ll +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir 2>&1 +// RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir + struct e { e(int); }; e *g = new e(0); @@ -21,6 +24,16 @@ e *g = new e(0); // CIR_AFTER: {{%.*}} = cir.const #cir.int<1> : !u64i // CIR_AFTER: {{%.*}} = cir.call @_Znwm(%1) : (!u64i) -> !cir.ptr +// CIR_EH: cir.try synthetic { +// CIR_EH: cir.call exception @_ZN1eC1Ei +// CIR_EH: cir.yield +// CIR_EH: } cleanup { +// CIR_EH: cir.call @_ZdlPvm +// CIR_EH: cir.yield +// CIR_EH: } catch [#cir.unwind { +// CIR_EH: cir.resume +// CIR_EH: }] + // LLVM-DAG: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init, ptr null }, { i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init.1, ptr null }] // LLVM: define internal void @__cxx_global_var_init() // LLVM: call ptr @_Znwm(i64 1) From aa68f5fd48b634c36aef50c4a1ccee72d8b4bf04 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Aug 2024 16:53:31 -0700 Subject: [PATCH 1792/2301] [CIR][FlattenCFG] Exceptions: propagate cleanups to flat CIR --- clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 17 +++++++++++++++++ clang/test/CIR/CodeGen/global-new.cpp | 8 ++++++++ 2 files changed, 25 insertions(+) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 40b8321bc1d7..7309610b1e9f 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -320,6 +320,23 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto selector = inflightEh.getTypeId(); auto exceptionPtr = inflightEh.getExceptionPtr(); + // Time to emit cleanup's. + if (tryOp.isCleanupActive()) { + assert(tryOp.getCleanupRegion().getBlocks().size() == 1 && + "NYI: if this isn't enough, move region instead"); + // TODO(cir): this might need to be duplicated instead of consumed since + // for user-written try/catch we want these cleanups to also run when the + // regular try scope adjurns (in case no exception is triggered). + assert(tryOp.getSynthetic() && + "not implemented for user written try/catch"); + mlir::Block *cleanupBlock = &tryOp.getCleanupRegion().getBlocks().back(); + auto cleanupYield = + cast(cleanupBlock->getTerminator()); + cleanupYield->erase(); + rewriter.mergeBlocks(cleanupBlock, catchBegin); + rewriter.setInsertionPointToEnd(catchBegin); + } + // Handle dispatch. In could in theory use a switch, but let's just // mimic LLVM more closely since we have no specific thing to achieve // doing that (might not play as well with existing optimizers either). diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index dd54dae9314d..2ce6dbd83723 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -5,6 +5,8 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir 2>&1 // RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir 2>&1 +// RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir struct e { e(int); }; e *g = new e(0); @@ -34,6 +36,12 @@ e *g = new e(0); // CIR_EH: cir.resume // CIR_EH: }] +// CIR_FLAT_EH: cir.func internal private @__cxx_global_var_init() +// CIR_FLAT_EH: ^bb3: +// CIR_FLAT_EH: %exception_ptr, %type_id = cir.eh.inflight_exception +// CIR_FLAT_EH: cir.call @_ZdlPvm({{.*}}) : (!cir.ptr, !u64i) -> () +// CIR_FLAT_EH: cir.br ^bb4(%exception_ptr, %type_id : !cir.ptr, !u32i) + // LLVM-DAG: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init, ptr null }, { i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init.1, ptr null }] // LLVM: define internal void @__cxx_global_var_init() // LLVM: call ptr @_Znwm(i64 1) From e5d23e490ed88d4d7f583616fac8cdade9510cc9 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 30 Aug 2024 14:11:14 -0700 Subject: [PATCH 1793/2301] [CIR] Add new HLSLResource enum value coverage --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index ea0edea495dd..d245c935633b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1113,6 +1113,7 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) { // Types added here must also be added to EmitFundamentalRTTIDescriptors. switch (Ty->getKind()) { case BuiltinType::WasmExternRef: + case BuiltinType::HLSLResource: llvm_unreachable("NYI"); case BuiltinType::Void: case BuiltinType::NullPtr: From 54f37fe4ccd8297948cb7a085b1e0b64ca253701 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Aug 2024 17:10:29 -0700 Subject: [PATCH 1794/2301] [CIR] Exceptions: propagate more cleanup info for LLVM lowering use --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 9 ++++++- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 5 +++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 13 ++++++--- clang/test/CIR/CodeGen/global-new.cpp | 27 +++++++++++++++++++ 4 files changed, 48 insertions(+), 6 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b076e22ac5a6..cf69de9958e5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3572,11 +3572,18 @@ def EhInflightOp : CIR_Op<"eh.inflight_exception"> { - `type_id`: pointer to the exception object This operation is expected to be the first one basic blocks on the exception path out of `cir.try_call` operations. + + The `cleanup` attribute indicates that clean up code might run before the + values produced by this operation are used to gather exception information. + This helps CIR to pass down more accurate information for LLVM lowering + to landingpads. }]; - let arguments = (ins OptionalAttr:$sym_type_list); + let arguments = (ins UnitAttr:$cleanup, + OptionalAttr:$sym_type_list); let results = (outs VoidPtr:$exception_ptr, UInt32:$type_id); let assemblyFormat = [{ + (`cleanup` $cleanup^)? ($sym_type_list^)? attr-dict }]; diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 7309610b1e9f..a8493f8f2c45 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -316,7 +316,10 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto typeIdType = mlir::cir::IntType::get(getContext(), 32, false); mlir::ArrayAttr symlist = collectTypeSymbols(tryOp); auto inflightEh = rewriter.create( - loc, exceptionPtrType, typeIdType, symlist); + loc, exceptionPtrType, typeIdType, + tryOp.isCleanupActive() ? mlir::UnitAttr::get(tryOp.getContext()) + : nullptr, + symlist); auto selector = inflightEh.getTypeId(); auto exceptionPtr = inflightEh.getExceptionPtr(); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 95f36446c7a6..6a67a60d0fa4 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -978,10 +978,12 @@ class CIREhInflightOpLowering symAddrs.push_back(addrOp); } } else { - // catch ptr null - mlir::Value nullOp = rewriter.create( - loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext())); - symAddrs.push_back(nullOp); + if (!op.getCleanup()) { + // catch ptr null + mlir::Value nullOp = rewriter.create( + loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext())); + symAddrs.push_back(nullOp); + } } // %slot = extractvalue { ptr, i32 } %x, 0 @@ -991,6 +993,9 @@ class CIREhInflightOpLowering SmallVector slotIdx = {0}; SmallVector selectorIdx = {1}; + if (op.getCleanup()) + padOp.setCleanup(true); + mlir::Value slot = rewriter.create(loc, padOp, slotIdx); mlir::Value selector = diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index 2ce6dbd83723..ad0c42bc9edd 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -7,6 +7,8 @@ // RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir 2>&1 // RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll 2>&1 +// RUN: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll struct e { e(int); }; e *g = new e(0); @@ -42,6 +44,31 @@ e *g = new e(0); // CIR_FLAT_EH: cir.call @_ZdlPvm({{.*}}) : (!cir.ptr, !u64i) -> () // CIR_FLAT_EH: cir.br ^bb4(%exception_ptr, %type_id : !cir.ptr, !u32i) +// LLVM_EH: define internal void @__cxx_global_var_init() personality ptr @__gxx_personality_v0 +// LLVM_EH: call ptr @_Znwm(i64 1) +// LLVM_EH: br label %[[L2:.*]], + +// LLVM_EH: [[L2]]: +// LLVM_EH: invoke void @_ZN1eC1Ei +// LLVM_EH: to label %[[CONT:.*]] unwind label %[[PAD:.*]], + +// LLVM_EH: [[CONT]]: +// LLVM_EH: br label %[[END:.*]], + +// LLVM_EH: [[PAD]]: +// LLVM_EH: landingpad { ptr, i32 } +// LLVM_EH: cleanup +// LLVM_EH: call void @_ZdlPvm +// LLVM_EH: br label %[[RESUME:.*]], + +// LLVM_EH: [[RESUME]]: +// LLVM_EH: resume { ptr, i32 } + +// LLVM_EH: [[END]]: +// LLVM_EH: store ptr {{.*}}, ptr @g, align 8 +// LLVM_EH: ret void +// LLVM_EH: } + // LLVM-DAG: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init, ptr null }, { i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init.1, ptr null }] // LLVM: define internal void @__cxx_global_var_init() // LLVM: call ptr @_Znwm(i64 1) From 4e964f7bcb4d108869ebec626236046e6d9e0142 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Aug 2024 17:50:18 -0700 Subject: [PATCH 1795/2301] [CIR][CIRGen][NFC] Add more skeleton to make crashes fine grained on pushTemporaryCleanup --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 97505aad8172..347cc2a932cf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2187,8 +2187,29 @@ static void pushTemporaryCleanup(CIRGenFunction &CGF, if (!ReferenceTemporaryDtor) return; - // TODO(cir): Call the destructor for the temporary. - assert(0 && "NYI"); + // Call the destructor for the temporary. + switch (M->getStorageDuration()) { + case SD_Static: + case SD_Thread: { + if (E->getType()->isArrayType()) { + llvm_unreachable("SD_Static|SD_Thread + array types not implemented"); + } else { + llvm_unreachable("SD_Static|SD_Thread for general types not implemented"); + } + llvm_unreachable("SD_Static|SD_Thread not implemented"); + } + + case SD_FullExpression: + llvm_unreachable("SD_FullExpression not implemented"); + break; + + case SD_Automatic: + llvm_unreachable("SD_Automatic not implemented"); + break; + + case SD_Dynamic: + llvm_unreachable("temporary cannot have dynamic storage duration"); + } } LValue CIRGenFunction::buildMaterializeTemporaryExpr( From f2931148d2ec5ce45348a35ae92205bce3511a9f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Aug 2024 18:55:42 -0700 Subject: [PATCH 1796/2301] [CIR][NFC] Exceptions: Move a crash to an assert --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 67455c81f57f..a83723498385 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -863,6 +863,7 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl() { tryOp.setSynthetic(true); } else { tryOp = currLexScope->getClosestTryParent(); + assert(tryOp && "cir.try expected"); } LP = buildLandingPad(tryOp); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index beb054fa809f..0718a27d04a1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -469,7 +469,7 @@ mlir::cir::TryOp CIRGenFunction::LexicalScope::getClosestTryParent() { while (scope) { if (scope->isTry()) return getTry(); - scope = ParentScope; + scope = scope->ParentScope; } return nullptr; } From 9cdff786f0e7a4ad8eadfe10862032122f27b047 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 28 Aug 2024 19:27:25 -0700 Subject: [PATCH 1797/2301] [CIR][CIRGen] Handle paren list init and get dtors right --- clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/CIRGenClass.cpp | 29 ++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 19 +++++++++++ clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 15 +++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 19 +++++++++++ clang/test/CIR/CodeGen/paren-list-init.cpp | 39 ++++++++++++++++++++++ 6 files changed, 118 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/paren-list-init.cpp diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index bdfffd4788c1..665f5e5baf62 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -184,6 +184,7 @@ struct MissingFeatures { static bool deferredReplacements() { return false; } static bool shouldInstrumentFunction() { return false; } static bool xray() { return false; } + static bool flagLoad() { return false; } // Inline assembly static bool asmGoto() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index bf18b1dcbba6..03fa0999a3ab 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1236,6 +1236,30 @@ struct CallDtorDelete final : EHScopeStack::Cleanup { }; } // namespace +class DestroyField final : public EHScopeStack::Cleanup { + const FieldDecl *field; + CIRGenFunction::Destroyer *destroyer; + bool useEHCleanupForArray; + +public: + DestroyField(const FieldDecl *field, CIRGenFunction::Destroyer *destroyer, + bool useEHCleanupForArray) + : field(field), destroyer(destroyer), + useEHCleanupForArray(useEHCleanupForArray) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + // Find the address of the field. + Address thisValue = CGF.LoadCXXThisAddress(); + QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); + LValue ThisLV = CGF.makeAddrLValue(thisValue, RecordTy); + LValue LV = CGF.buildLValueForField(ThisLV, field); + assert(LV.isSimple()); + + CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, + flags.isForNormalCleanup() && useEHCleanupForArray); + } +}; + /// Emit all code that comes at the end of class's destructor. This is to call /// destructors on members and base classes in reverse order of their /// construction. @@ -1346,8 +1370,9 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; - [[maybe_unused]] CleanupKind cleanupKind = getCleanupKind(dtorKind); - llvm_unreachable("EHStack.pushCleanup(...) NYI"); + CleanupKind cleanupKind = getCleanupKind(dtorKind); + EHStack.pushCleanup( + cleanupKind, Field, getDestroyer(dtorKind), cleanupKind & EHCleanup); } if (SanitizeFields) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 9a08ccd0d1ff..6ae30a141433 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -1247,3 +1247,22 @@ void CIRGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind, pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true); } + +// Pushes a destroy and defers its deactivation until its +// CleanupDeactivationScope is exited. +void CIRGenFunction::pushDestroyAndDeferDeactivation( + QualType::DestructionKind dtorKind, Address addr, QualType type) { + assert(dtorKind && "cannot push destructor for trivial type"); + + CleanupKind cleanupKind = getCleanupKind(dtorKind); + pushDestroyAndDeferDeactivation( + cleanupKind, addr, type, getDestroyer(dtorKind), cleanupKind & EHCleanup); +} + +void CIRGenFunction::pushDestroyAndDeferDeactivation( + CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, + bool useEHCleanupForArray) { + assert(!MissingFeatures::flagLoad()); + pushDestroy(cleanupKind, addr, type, destroyer, useEHCleanupForArray); + DeferredDeactivationCleanupStack.push_back({EHStack.stable_begin(), nullptr}); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index c403de3c51da..22a0418e031c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -274,6 +274,7 @@ class AggExprEmitter : public StmtVisitor { void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E); void VisitChooseExpr(const ChooseExpr *E) { llvm_unreachable("NYI"); } void VisitInitListExpr(InitListExpr *E); + void VisitCXXParenListInitExpr(CXXParenListInitExpr *E); void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef Args, FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller); @@ -1194,6 +1195,12 @@ void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { // All done! The result is in the Dest slot. } +void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) { + VisitCXXParenListOrInitListExpr(E, E->getInitExprs(), + E->getInitializedFieldInUnion(), + E->getArrayFiller()); +} + void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { // TODO(cir): use something like CGF.ErrorUnsupported if (E->hadArrayRangeDesignator()) @@ -1350,10 +1357,14 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // Push a destructor if necessary. // FIXME: if we have an array of structures, all explicitly // initialized, we can end up pushing a linear number of cleanups. - [[maybe_unused]] bool pushedCleanup = false; if (QualType::DestructionKind dtorKind = field->getType().isDestructedType()) { - llvm_unreachable("NYI"); + assert(LV.isSimple()); + if (dtorKind) { + CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(), + field->getType(), + CGF.getDestroyer(dtorKind), false); + } } // From LLVM codegen, maybe not useful for CIR: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 09cf51cfac0a..76fd973f3a8d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -375,6 +375,20 @@ class CIRGenFunction : public CIRGenTypeCache { EHScopeStack EHStack; llvm::SmallVector LifetimeExtendedCleanupStack; + // A stack of cleanups which were added to EHStack but have to be deactivated + // later before being popped or emitted. These are usually deactivated on + // exiting a `CleanupDeactivationScope` scope. For instance, after a + // full-expr. + // + // These are specially useful for correctly emitting cleanups while + // encountering branches out of expression (through stmt-expr or coroutine + // suspensions). + struct DeferredDeactivateCleanup { + EHScopeStack::stable_iterator Cleanup; + mlir::Operation *DominatingIP; + }; + llvm::SmallVector DeferredDeactivationCleanupStack; + /// A mapping from NRVO variables to the flags used to indicate /// when the NRVO has been applied to this variable. llvm::DenseMap NRVOFlags; @@ -1811,6 +1825,11 @@ class CIRGenFunction : public CIRGenTypeCache { QualType elementType, CharUnits elementAlign, Destroyer *destroyer); + void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, + Address addr, QualType type); + void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr, + QualType type, Destroyer *destroyer, + bool useEHCleanupForArray); void buildArrayDestroy(mlir::Value begin, mlir::Value end, QualType elementType, CharUnits elementAlign, Destroyer *destroyer, bool checkZeroLength, diff --git a/clang/test/CIR/CodeGen/paren-list-init.cpp b/clang/test/CIR/CodeGen/paren-list-init.cpp new file mode 100644 index 000000000000..742dc5cb9c33 --- /dev/null +++ b/clang/test/CIR/CodeGen/paren-list-init.cpp @@ -0,0 +1,39 @@ +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -Wno-unused-value -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +struct Vec { + Vec(); + Vec(Vec&&); + ~Vec(); +}; + +struct S1 { + Vec v; +}; + +// CIR-DAG: ![[VecType:.*]] = !cir.struct +// CIR-DAG: ![[S1:.*]] = !cir.struct + +template +void make1() { + Vec v; + S1((Vec&&) v); +// CIR: cir.func linkonce_odr @_Z5make1ILi0EEvv() +// CIR: %[[VEC:.*]] = cir.alloca ![[VecType]], !cir.ptr +// CIR: cir.call @_ZN3VecC1Ev(%[[VEC]]) : (!cir.ptr) +// CIR: cir.scope { +// CIR: %[[AGG_TMP:.*]] = cir.alloca ![[S1]], !cir.ptr, ["agg.tmp.ensured"] +// CIR: %[[FIELD:.*]] = cir.get_member %[[AGG_TMP]][0] {name = "v"} : !cir.ptr -> !cir.ptr +// CIR: cir.call @_ZN3VecC1EOS_(%[[FIELD]], %[[VEC]]) : (!cir.ptr, !cir.ptr) -> () +// CIR: cir.call @_ZN2S1D1Ev(%[[AGG_TMP]]) : (!cir.ptr) -> () +// CIR: cir.call @_ZN3VecD1Ev(%[[FIELD]]) : (!cir.ptr) -> () +// CIR: } + +// FIXME: implement MissingFeatures::flagLoad(), do not emit this a second time. +// CIR: cir.call @_ZN3VecD1Ev(%[[VEC]]) : (!cir.ptr) -> () +// CIR: cir.return +} + +void foo() { + make1<0>(); +} From ce6d7c6e2bd11663eb6d823d25250734e5069d17 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 29 Aug 2024 16:47:30 -0700 Subject: [PATCH 1798/2301] [CIR][CIRGen] Handle more cleanup situations and fix bug - Tackle a FIXME left in previous commit. - Start turning missing cleanup features into actual NYI asserts. - Make sure we run CleanupDeactivationScope properly and deactivate necessary cleanups. - Add more verifications/asserts for overall state. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 5 ++- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 47 +++++----------------- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 16 ++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 31 ++++++++++++++ clang/test/CIR/CodeGen/paren-list-init.cpp | 3 -- 5 files changed, 60 insertions(+), 42 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 6ae30a141433..30e4019a24d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -1262,7 +1262,8 @@ void CIRGenFunction::pushDestroyAndDeferDeactivation( void CIRGenFunction::pushDestroyAndDeferDeactivation( CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray) { - assert(!MissingFeatures::flagLoad()); + mlir::Operation *flag = + builder.create(builder.getUnknownLoc()); pushDestroy(cleanupKind, addr, type, destroyer, useEHCleanupForArray); - DeferredDeactivationCleanupStack.push_back({EHStack.stable_begin(), nullptr}); + DeferredDeactivationCleanupStack.push_back({EHStack.stable_begin(), flag}); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 22a0418e031c..36941fc1725a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -504,10 +504,13 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, // Exception safety requires us to destroy all the // already-constructed members if an initializer throws. // For that, we'll need an EH cleanup. - [[maybe_unused]] QualType::DestructionKind dtorKind = - elementType.isDestructedType(); + QualType::DestructionKind dtorKind = elementType.isDestructedType(); [[maybe_unused]] Address endOfInit = Address::invalid(); - assert(!CGF.needsEHCleanup(dtorKind) && "destructed types NIY"); + CIRGenFunction::CleanupDeactivationScope deactivation(CGF); + + if (dtorKind) { + llvm_unreachable("dtorKind NYI"); + } // The 'current element to initialize'. The invariants on this // variable are complicated. Essentially, after each iteration of @@ -619,9 +622,6 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, builder.createYield(loc); }); } - - // Leave the partial-array cleanup if we entered one. - assert(!dtorKind && "destructed types NIY"); } /// True if the given aggregate type requires special GC API calls. @@ -910,11 +910,9 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { LLVM_ATTRIBUTE_UNUSED LValue SlotLV = CGF.makeAddrLValue(Slot.getAddress(), E->getType()); - // We'll need to enter cleanup scopes in case any of the element initializers - // throws an exception. - if (MissingFeatures::cleanups()) - llvm_unreachable("NYI"); - mlir::Operation *CleanupDominator = nullptr; + // We'll need to enter cleanup scopes in case any of the element + // initializers throws an exception or contains branch out of the expressions. + CIRGenFunction::CleanupDeactivationScope scope(CGF); auto CurField = E->getLambdaClass()->field_begin(); auto captureInfo = E->capture_begin(); @@ -949,15 +947,6 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { CurField++; captureInfo++; } - - // Deactivate all the partial cleanups in reverse order, which generally means - // popping them. - if (MissingFeatures::cleanups()) - llvm_unreachable("NYI"); - - // Destroy the placeholder if we made one. - if (CleanupDominator) - CleanupDominator->erase(); } void AggExprEmitter::VisitCastExpr(CastExpr *E) { @@ -1266,12 +1255,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // We'll need to enter cleanup scopes in case any of the element // initializers throws an exception. SmallVector cleanups; - // FIXME(cir): placeholder - mlir::Operation *cleanupDominator = nullptr; - [[maybe_unused]] auto addCleanup = - [&](const EHScopeStack::stable_iterator &cleanup) { - llvm_unreachable("NYI"); - }; + CIRGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF); unsigned curInitIndex = 0; @@ -1371,17 +1355,6 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // If the GEP didn't get used because of a dead zero init or something // else, clean it up for -O0 builds and general tidiness. } - - // Deactivate all the partial cleanups in reverse order, which - // generally means popping them. - assert((cleanupDominator || cleanups.empty()) && - "Missing cleanupDominator before deactivating cleanup blocks"); - for (unsigned i = cleanups.size(); i != 0; --i) - llvm_unreachable("NYI"); - - // Destroy the placeholder if we made one. - if (cleanupDominator) - llvm_unreachable("NYI"); } void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 0718a27d04a1..cd6993d9f3d6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -44,6 +44,22 @@ CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, // TODO(CIR): SetFastMathFlags(CurFPFeatures); } +CIRGenFunction::~CIRGenFunction() { + assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); + assert(DeferredDeactivationCleanupStack.empty() && + "missed to deactivate a cleanup"); + + // TODO(cir): set function is finished. + assert(!MissingFeatures::openMPRuntime()); + + // If we have an OpenMPIRBuilder we want to finalize functions (incl. + // outlining etc) at some point. Doing it once the function codegen is done + // seems to be a reasonable spot. We do it here, as opposed to the deletion + // time of the CodeGenModule, because we have to ensure the IR has not yet + // been "emitted" to the outside, thus, modifications are still sensible. + assert(!MissingFeatures::openMPRuntime()); +} + clang::ASTContext &CIRGenFunction::getContext() const { return CGM.getASTContext(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 76fd973f3a8d..cde8eac2a086 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -389,6 +389,36 @@ class CIRGenFunction : public CIRGenTypeCache { }; llvm::SmallVector DeferredDeactivationCleanupStack; + // Enters a new scope for capturing cleanups which are deferred to be + // deactivated, all of which will be deactivated once the scope is exited. + struct CleanupDeactivationScope { + CIRGenFunction &CGF; + size_t OldDeactivateCleanupStackSize; + bool Deactivated; + CleanupDeactivationScope(CIRGenFunction &CGF) + : CGF(CGF), OldDeactivateCleanupStackSize( + CGF.DeferredDeactivationCleanupStack.size()), + Deactivated(false) {} + + void ForceDeactivate() { + assert(!Deactivated && "Deactivating already deactivated scope"); + auto &Stack = CGF.DeferredDeactivationCleanupStack; + for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) { + CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup, + Stack[I - 1].DominatingIP); + Stack[I - 1].DominatingIP->erase(); + } + Stack.resize(OldDeactivateCleanupStackSize); + Deactivated = true; + } + + ~CleanupDeactivationScope() { + if (Deactivated) + return; + ForceDeactivate(); + } + }; + /// A mapping from NRVO variables to the flags used to indicate /// when the NRVO has been applied to this variable. llvm::DenseMap NRVOFlags; @@ -535,6 +565,7 @@ class CIRGenFunction : public CIRGenTypeCache { CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, bool suppressNewContext = false); + ~CIRGenFunction(); CIRGenTypes &getTypes() const { return CGM.getTypes(); } diff --git a/clang/test/CIR/CodeGen/paren-list-init.cpp b/clang/test/CIR/CodeGen/paren-list-init.cpp index 742dc5cb9c33..119717046570 100644 --- a/clang/test/CIR/CodeGen/paren-list-init.cpp +++ b/clang/test/CIR/CodeGen/paren-list-init.cpp @@ -26,10 +26,7 @@ void make1() { // CIR: %[[FIELD:.*]] = cir.get_member %[[AGG_TMP]][0] {name = "v"} : !cir.ptr -> !cir.ptr // CIR: cir.call @_ZN3VecC1EOS_(%[[FIELD]], %[[VEC]]) : (!cir.ptr, !cir.ptr) -> () // CIR: cir.call @_ZN2S1D1Ev(%[[AGG_TMP]]) : (!cir.ptr) -> () -// CIR: cir.call @_ZN3VecD1Ev(%[[FIELD]]) : (!cir.ptr) -> () // CIR: } - -// FIXME: implement MissingFeatures::flagLoad(), do not emit this a second time. // CIR: cir.call @_ZN3VecD1Ev(%[[VEC]]) : (!cir.ptr) -> () // CIR: cir.return } From 00f43659f3e4f0c8fc36cc0aae2b215149bceecf Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 29 Aug 2024 17:54:44 -0700 Subject: [PATCH 1799/2301] [CIR][NFC] Leftover from previous commit --- clang/include/clang/CIR/MissingFeatures.h | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 665f5e5baf62..bdfffd4788c1 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -184,7 +184,6 @@ struct MissingFeatures { static bool deferredReplacements() { return false; } static bool shouldInstrumentFunction() { return false; } static bool xray() { return false; } - static bool flagLoad() { return false; } // Inline assembly static bool asmGoto() { return false; } From e406a383d6ca69518c85fcd6fcb3dae1ec0e864a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 29 Aug 2024 21:49:10 -0700 Subject: [PATCH 1800/2301] [CIR][NFC] Cleanup tests a bit to reflect tested platform --- clang/test/CIR/CodeGen/global-new.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index ad0c42bc9edd..761227b102d8 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -1,13 +1,13 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE // RUN: FileCheck %s -check-prefix=CIR_AFTER --input-file=%t.cir -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck %s -check-prefix=LLVM --input-file=%t.ll -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir 2>&1 +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir // RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir 2>&1 +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir // RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll 2>&1 +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll // RUN: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll struct e { e(int); }; From 3305e25fa06d35ba2b759fbc0157f47c534e2432 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 30 Aug 2024 12:00:34 -0700 Subject: [PATCH 1801/2301] [CIR] Disable test temporatily This is currently crashing on Linux only internally, but not on GitHub's CI, disable it temporarily while we investigate. --- clang/test/CIR/CodeGen/global-new.cpp | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index 761227b102d8..cbfebade4273 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -1,14 +1,13 @@ -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE -// RUN: FileCheck %s -check-prefix=CIR_AFTER --input-file=%t.cir -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll -// RUN: FileCheck %s -check-prefix=LLVM --input-file=%t.ll - -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir -// RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir -// RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll -// RUN: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll +// DISABLED_RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE +// DISABLED_RUN: FileCheck %s -check-prefix=CIR_AFTER --input-file=%t.cir +// DISABLED_RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// DISABLED_RUN: FileCheck %s -check-prefix=LLVM --input-file=%t.ll +// DISABLED_RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir +// DISABLED_RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir +// DISABLED_RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir +// DISABLED_RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir +// DISABLED_RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll +// DISABLED_RUN: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll struct e { e(int); }; e *g = new e(0); From 1e6c3c60f40a77d7877539336fc735ae1f623007 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 30 Aug 2024 14:04:54 -0700 Subject: [PATCH 1802/2301] [CIR] Another attempt to disable tests --- clang/test/CIR/CodeGen/global-new.cpp | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index cbfebade4273..201f818f1723 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -1,13 +1,14 @@ -// DISABLED_RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE -// DISABLED_RUN: FileCheck %s -check-prefix=CIR_AFTER --input-file=%t.cir -// DISABLED_RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll -// DISABLED_RUN: FileCheck %s -check-prefix=LLVM --input-file=%t.ll -// DISABLED_RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir -// DISABLED_RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir -// DISABLED_RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir -// DISABLED_RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir -// DISABLED_RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll -// DISABLED_RUN: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll +// RUN: true +// DISABLED: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE +// DISABLED: FileCheck %s -check-prefix=CIR_AFTER --input-file=%t.cir +// DISABLED: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// DISABLED: FileCheck %s -check-prefix=LLVM --input-file=%t.ll +// DISABLED: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir +// DISABLED: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir +// DISABLED: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir +// DISABLED: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir +// DISABLED: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll +// DISABLED: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll struct e { e(int); }; e *g = new e(0); From 030e65c1032c16a1fe68fdaec2df1ee2f8be3d34 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 30 Aug 2024 18:12:43 -0700 Subject: [PATCH 1803/2301] [CIR] Reland failing tests (#811) --- clang/lib/CIR/CodeGen/CIRGenCleanup.h | 4 ++-- clang/test/CIR/CodeGen/global-new.cpp | 21 ++++++++++----------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h index 99a730b51769..76547ceebfe4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -621,12 +621,12 @@ EHScopeStack::stabilize(iterator ir) const { /// The exceptions personality for a function. struct EHPersonality { - const char *PersonalityFn; + const char *PersonalityFn = nullptr; // If this is non-null, this personality requires a non-standard // function for rethrowing an exception after a catchall cleanup. // This function must have prototype void(void*). - const char *CatchallRethrowFn; + const char *CatchallRethrowFn = nullptr; static const EHPersonality &get(CIRGenModule &CGM, const clang::FunctionDecl *FD); diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index 201f818f1723..9c788c7f087d 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -1,14 +1,13 @@ -// RUN: true -// DISABLED: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE -// DISABLED: FileCheck %s -check-prefix=CIR_AFTER --input-file=%t.cir -// DISABLED: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll -// DISABLED: FileCheck %s -check-prefix=LLVM --input-file=%t.ll -// DISABLED: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir -// DISABLED: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir -// DISABLED: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir -// DISABLED: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir -// DISABLED: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll -// DISABLED: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE +// RUN: FileCheck %s -check-prefix=CIR_AFTER --input-file=%t.cir +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck %s -check-prefix=LLVM --input-file=%t.ll +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir +// RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir +// RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll +// RUN: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll struct e { e(int); }; e *g = new e(0); From 1e54322d7dc836ccefc58faf221519cb12892a44 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Thu, 5 Sep 2024 09:56:49 -0700 Subject: [PATCH 1804/2301] [CIR][CIRGen] Support pure and deleted virtual functions (#823) This is a straightforward adaption of existing CodeGen logic. While I'm here, move block comments inside their blocks, so that they look nicer. --- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 6 +++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 5 +++ clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 43 ++++++++----------- clang/lib/CIR/CodeGen/CIRGenVTables.h | 9 ++-- .../test/CIR/CodeGen/special-virtual-func.cpp | 16 +++++++ 5 files changed, 51 insertions(+), 28 deletions(-) create mode 100644 clang/test/CIR/CodeGen/special-virtual-func.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 2aea122f9759..6c67e849a4c4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -235,6 +235,12 @@ class CIRGenCXXABI { BaseSubobject Base, const CXXRecordDecl *NearestVBase) = 0; + /// Gets the pure virtual member call function. + virtual StringRef getPureVirtualCallName() = 0; + + /// Gets the deleted virtual member call name. + virtual StringRef getDeletedVirtualCallName() = 0; + /// Specify how one should pass an argument of a record type. enum class RecordArgABI { /// Pass it using the normal C aggregate rules for the ABI, potentially diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index d245c935633b..4d9f05b61bb8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -215,6 +215,11 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { return false; } + StringRef getPureVirtualCallName() override { return "__cxa_pure_virtual"; } + StringRef getDeletedVirtualCallName() override { + return "__cxa_deleted_virtual"; + } + /// TODO(cir): seems like could be shared between LLVM IR and CIR codegen. bool mayNeedDestruction(const VarDecl *VD) const { if (VD->needsDestruction(getContext())) diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index b10a12d9e0da..3c2af8fbbfdf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -217,8 +217,7 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, llvm_unreachable("NYI"); } - [[maybe_unused]] auto getSpecialVirtualFn = - [&](StringRef name) -> mlir::Attribute { + auto getSpecialVirtualFn = [&](StringRef name) -> mlir::cir::FuncOp { // FIXME(PR43094): When merging comdat groups, lld can select a local // symbol as the signature symbol even though it cannot be accessed // outside that symbol's TU. The relative vtables ABI would make @@ -235,45 +234,41 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, CGM.getTriple().isNVPTX()) llvm_unreachable("NYI"); - llvm_unreachable("NYI"); - // llvm::FunctionType *fnTy = - // llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); - // llvm::Constant *fn = cast( - // CGM.CreateRuntimeFunction(fnTy, name).getCallee()); - // if (auto f = dyn_cast(fn)) - // f->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); - // return llvm::ConstantExpr::getBitCast(fn, CGM.Int8PtrTy); + mlir::cir::FuncType fnTy = + CGM.getBuilder().getFuncType({}, CGM.getBuilder().getVoidTy()); + mlir::cir::FuncOp fnPtr = CGM.createRuntimeFunction(fnTy, name); + // LLVM codegen handles unnamedAddr + assert(!MissingFeatures::unnamedAddr()); + return fnPtr; }; mlir::cir::FuncOp fnPtr; - // Pure virtual member functions. if (cast(GD.getDecl())->isPureVirtual()) { - llvm_unreachable("NYI"); - // if (!PureVirtualFn) - // PureVirtualFn = - // getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName()); - // fnPtr = PureVirtualFn; + // Pure virtual member functions. + if (!PureVirtualFn) + PureVirtualFn = + getSpecialVirtualFn(CGM.getCXXABI().getPureVirtualCallName()); + fnPtr = PureVirtualFn; - // Deleted virtual member functions. } else if (cast(GD.getDecl())->isDeleted()) { - llvm_unreachable("NYI"); - // if (!DeletedVirtualFn) - // DeletedVirtualFn = - // getSpecialVirtualFn(CGM.getCXXABI().GetDeletedVirtualCallName()); - // fnPtr = DeletedVirtualFn; + // Deleted virtual member functions. + if (!DeletedVirtualFn) + DeletedVirtualFn = + getSpecialVirtualFn(CGM.getCXXABI().getDeletedVirtualCallName()); + fnPtr = DeletedVirtualFn; - // Thunks. } else if (nextVTableThunkIndex < layout.vtable_thunks().size() && layout.vtable_thunks()[nextVTableThunkIndex].first == componentIndex) { + // Thunks. llvm_unreachable("NYI"); // auto &thunkInfo = layout.vtable_thunks()[nextVTableThunkIndex].second; // nextVTableThunkIndex++; // fnPtr = maybeEmitThunk(GD, thunkInfo, /*ForVTable=*/true); - // Otherwise we can use the method definition directly. } else { + // Otherwise we can use the method definition directly. auto fnTy = CGM.getTypes().GetFunctionTypeForVTable(GD); fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true); } diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index e92f60394270..2def67ab1bc6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -19,6 +19,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/AST/VTableBuilder.h" #include "clang/Basic/ABI.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "llvm/ADT/DenseMap.h" namespace clang { @@ -52,11 +53,11 @@ class CIRGenVTables { /// indices. SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices; - // /// Cache for the pure virtual member call function. - // llvm::Constant *PureVirtualFn = nullptr; + /// Cache for the pure virtual member call function. + mlir::cir::FuncOp PureVirtualFn = nullptr; - // /// Cache for the deleted virtual member call function. - // llvm::Constant *DeletedVirtualFn = nullptr; + /// Cache for the deleted virtual member call function. + mlir::cir::FuncOp DeletedVirtualFn = nullptr; // /// Get the address of a thunk and emit it if necessary. // llvm::Constant *maybeEmitThunk(GlobalDecl GD, diff --git a/clang/test/CIR/CodeGen/special-virtual-func.cpp b/clang/test/CIR/CodeGen/special-virtual-func.cpp new file mode 100644 index 000000000000..83e2a27b82f0 --- /dev/null +++ b/clang/test/CIR/CodeGen/special-virtual-func.cpp @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Check that pure and deleted virtual functions are correctly emitted in the +// vtable. +class A { + A(); + virtual void pure() = 0; + virtual void deleted() = delete; +}; + +A::A() = default; + +// CHECK: @_ZTV1A = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr, #cir.global_view<@__cxa_pure_virtual> : !cir.ptr, #cir.global_view<@__cxa_deleted_virtual> : !cir.ptr]> +// CHECK: cir.func private @__cxa_pure_virtual() +// CHECK: cir.func private @__cxa_deleted_virtual() From 203cd7a8418d249f9b12ca74f4c6c9cd9c33df70 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Mon, 9 Sep 2024 17:49:11 -0400 Subject: [PATCH 1805/2301] [CIR][CIRGen][Lowering] Get alignment from frontend and pass it to LLVM (#810) As title. Add setAlignmentAttr for GlobalOps created from AST. LLVM Lowering should have LLVM GlobalOp's alignment attribute inherited from CIR::GlobalOp. This PR is definitely needed to fix issue https://github.com/llvm/clangir/issues/801#issuecomment-2305692250, but the issue doesn't have alignment and comdat attribute for CIR Ops to begin with, so I'll keep investigating and fix CIR problem in another PR. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 3 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 ++-- clang/test/CIR/CodeGen/attributes.c | 4 +-- .../CIR/CodeGen/cxx1z-inline-variables.cpp | 29 +++++++++---------- .../CIR/CodeGen/globals-neg-index-array.c | 2 +- clang/test/CIR/CodeGen/static.cpp | 8 ++--- .../test/CIR/Lowering/ThroughMLIR/vtable.cir | 2 +- clang/test/CIR/Lowering/globals.cir | 3 +- clang/test/CIR/Lowering/hello.cir | 3 +- 9 files changed, 31 insertions(+), 29 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 42cfb5641207..323e885f4fed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -922,10 +922,9 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // FIXME: This code is overly simple and should be merged with other global // handling. - + GV.setAlignmentAttr(getSize(astCtx.getDeclAlign(D))); // TODO(cir): // GV->setConstant(isTypeConstant(D->getType(), false)); - // GV->setAlignment(getContext().getDeclAlign(D).getAsAlign()); // setLinkageForGV(GV, D); if (D->getTLSKind()) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6a67a60d0fa4..8a30ace982ee 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1894,7 +1894,8 @@ class CIRGlobalOpLowering SmallVector attributes; auto newGlobalOp = rewriter.replaceOpWithNewOp( op, llvmType, op.getConstant(), convertLinkage(op.getLinkage()), - op.getSymName(), nullptr, /*alignment*/ 0, + op.getSymName(), nullptr, + /*alignment*/ op.getAlignment().value_or(0), /*addrSpace*/ getGlobalOpTargetAddrSpace(op), /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); @@ -2018,7 +2019,8 @@ class CIRGlobalOpLowering // Rewrite op. auto llvmGlobalOp = rewriter.replaceOpWithNewOp( op, llvmType, isConst, linkage, symbol, init.value(), - /*alignment*/ 0, /*addrSpace*/ getGlobalOpTargetAddrSpace(op), + /*alignment*/op.getAlignment().value_or(0), + /*addrSpace*/ getGlobalOpTargetAddrSpace(op), /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); diff --git a/clang/test/CIR/CodeGen/attributes.c b/clang/test/CIR/CodeGen/attributes.c index 5c11dc9a298b..4231edf968f7 100644 --- a/clang/test/CIR/CodeGen/attributes.c +++ b/clang/test/CIR/CodeGen/attributes.c @@ -5,11 +5,11 @@ extern int __attribute__((section(".shared"))) ext; int getExt() { return ext; } -// CIR: cir.global "private" external @ext : !s32i {section = ".shared"} +// CIR: cir.global "private" external @ext : !s32i {alignment = 4 : i64, section = ".shared"} // LLVM: @ext = external global i32, section ".shared" int __attribute__((section(".shared"))) glob = 42; -// CIR: cir.global external @glob = #cir.int<42> : !s32i {section = ".shared"} +// CIR: cir.global external @glob = #cir.int<42> : !s32i {alignment = 4 : i64, section = ".shared"} // LLVM: @glob = global i32 42, section ".shared" diff --git a/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp b/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp index dc0e95dca555..f54519d72650 100644 --- a/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp +++ b/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp @@ -26,19 +26,18 @@ const int &compat_use_after_redecl1 = compat::c; const int &compat_use_after_redecl2 = compat::d; const int &compat_use_after_redecl3 = compat::g; -// CIR: cir.global weak_odr @_ZN6compat1bE = #cir.int<2> : !s32i -// CIR: cir.global weak_odr @_ZN6compat1aE = #cir.int<1> : !s32i -// CIR: cir.global weak_odr @_ZN6compat1cE = #cir.int<3> : !s32i -// CIR: cir.global external @_ZN6compat1eE = #cir.int<5> : !s32i -// CIR: cir.global weak_odr @_ZN6compat1fE = #cir.int<6> : !s32i -// CIR: cir.global linkonce_odr @_ZN6compat1dE = #cir.int<4> : !s32i -// CIR: cir.global linkonce_odr @_ZN6compat1gE = #cir.int<7> : !s32i - -// LLVM: @_ZN6compat1bE = weak_odr global i32 2 -// LLVM: @_ZN6compat1aE = weak_odr global i32 1 -// LLVM: @_ZN6compat1cE = weak_odr global i32 3 -// LLVM: @_ZN6compat1eE = global i32 5 -// LLVM: @_ZN6compat1fE = weak_odr global i32 6 -// LLVM: @_ZN6compat1dE = linkonce_odr global i32 4 -// LLVM: @_ZN6compat1gE = linkonce_odr global i32 7 +// CIR: cir.global weak_odr @_ZN6compat1bE = #cir.int<2> : !s32i {alignment = 4 : i64} +// CIR: cir.global weak_odr @_ZN6compat1aE = #cir.int<1> : !s32i {alignment = 4 : i64} +// CIR: cir.global weak_odr @_ZN6compat1cE = #cir.int<3> : !s32i {alignment = 4 : i64} +// CIR: cir.global external @_ZN6compat1eE = #cir.int<5> : !s32i {alignment = 4 : i64} +// CIR: cir.global weak_odr @_ZN6compat1fE = #cir.int<6> : !s32i {alignment = 4 : i64} +// CIR: cir.global linkonce_odr @_ZN6compat1dE = #cir.int<4> : !s32i {alignment = 4 : i64} +// CIR: cir.global linkonce_odr @_ZN6compat1gE = #cir.int<7> : !s32i {alignment = 4 : i64} +// LLVM: @_ZN6compat1bE = weak_odr global i32 2, align 4 +// LLVM: @_ZN6compat1aE = weak_odr global i32 1, align 4 +// LLVM: @_ZN6compat1cE = weak_odr global i32 3, align 4 +// LLVM: @_ZN6compat1eE = global i32 5, align 4 +// LLVM: @_ZN6compat1fE = weak_odr global i32 6, align 4 +// LLVM: @_ZN6compat1dE = linkonce_odr global i32 4, align 4 +// LLVM: @_ZN6compat1gE = linkonce_odr global i32 7, align 4 diff --git a/clang/test/CIR/CodeGen/globals-neg-index-array.c b/clang/test/CIR/CodeGen/globals-neg-index-array.c index 7f6110d30df5..609e8f59e087 100644 --- a/clang/test/CIR/CodeGen/globals-neg-index-array.c +++ b/clang/test/CIR/CodeGen/globals-neg-index-array.c @@ -14,7 +14,7 @@ struct __attribute__((packed)) PackedStruct { }; struct PackedStruct packed[10]; char *packed_element = &(packed[-2].a3); -// CHECK: cir.global external @packed = #cir.zero : !cir.array loc(#loc5) +// CHECK: cir.global external @packed = #cir.zero : !cir.array {alignment = 16 : i64} loc(#loc5) // CHECK: cir.global external @packed_element = #cir.global_view<@packed, [-2 : i32, 2 : i32]> // LLVM: @packed = global [10 x %struct.PackedStruct] zeroinitializer // LLVM: @packed_element = global ptr getelementptr inbounds ([10 x %struct.PackedStruct], ptr @packed, i32 -2, i32 2) diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index d756a7834f70..e011ed99233d 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -26,7 +26,7 @@ static Init __ioinit2(false); // BEFORE-NEXT: } dtor { // BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () -// BEFORE-NEXT: } {ast = #cir.var.decl.ast} +// BEFORE-NEXT: } {alignment = 1 : i64, ast = #cir.var.decl.ast} // BEFORE: cir.global "private" internal dsolocal @_ZL9__ioinit2 = ctor : !ty_22Init22 { // BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr // BEFORE-NEXT: %1 = cir.const #false @@ -34,7 +34,7 @@ static Init __ioinit2(false); // BEFORE-NEXT: } dtor { // BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr // BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () -// BEFORE-NEXT: } {ast = #cir.var.decl.ast} +// BEFORE-NEXT: } {alignment = 1 : i64, ast = #cir.var.decl.ast} // BEFORE-NEXT: } @@ -43,7 +43,7 @@ static Init __ioinit2(false); // AFTER-NEXT: cir.func private @__cxa_atexit(!cir.ptr)>>, !cir.ptr, !cir.ptr) // AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) // AFTER-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) -// AFTER-NEXT: cir.global "private" internal dsolocal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} +// AFTER-NEXT: cir.global "private" internal dsolocal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {alignment = 1 : i64, ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init() // AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // AFTER-NEXT: %1 = cir.const #true @@ -55,7 +55,7 @@ static Init __ioinit2(false); // AFTER-NEXT: %6 = cir.get_global @__dso_handle : !cir.ptr // AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () // AFTER-NEXT: cir.return -// AFTER: cir.global "private" internal dsolocal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {ast = #cir.var.decl.ast} +// AFTER: cir.global "private" internal dsolocal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {alignment = 1 : i64, ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init.1() // AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr // AFTER-NEXT: %1 = cir.const #false diff --git a/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir b/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir index acd0117925b9..c1e985cd3e2c 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir @@ -43,7 +43,7 @@ module { cir.func linkonce_odr @_ZN6Father9FatherFooEv(%arg0: !cir.ptr ) { cir.return } } -// MLIR: llvm.mlir.global linkonce_odr @_ZTV5Child() {addr_space = 0 : i32} : !llvm.struct<(array<4 x ptr>, array<3 x ptr>)> { +// MLIR: llvm.mlir.global linkonce_odr @_ZTV5Child() {addr_space = 0 : i32, alignment = 8 : i64} : !llvm.struct<(array<4 x ptr>, array<3 x ptr>)> { // MLIR: %{{[0-9]+}} = llvm.mlir.undef : !llvm.struct<(array<4 x ptr>, array<3 x ptr>)> // MLIR: %{{[0-9]+}} = llvm.mlir.undef : !llvm.array<4 x ptr> // MLIR: %{{[0-9]+}} = llvm.mlir.zero : !llvm.ptr diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index d8193b075d2a..164411ee899f 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -27,7 +27,8 @@ module { cir.global external @alpha = #cir.const_array<[#cir.int<97> : !s8i, #cir.int<98> : !s8i, #cir.int<99> : !s8i, #cir.int<0> : !s8i]> : !cir.array cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global external @s = #cir.global_view<@".str"> : !cir.ptr - // MLIR: llvm.mlir.global internal constant @".str"("example\00") {addr_space = 0 : i32} + // MLIR: llvm.mlir.global internal constant @".str"("example\00") + // MLIR-SAME: {addr_space = 0 : i32, alignment = 1 : i64} // MLIR: llvm.mlir.global external @s() {addr_space = 0 : i32} : !llvm.ptr { // MLIR: %0 = llvm.mlir.addressof @".str" : !llvm.ptr // MLIR: %1 = llvm.bitcast %0 : !llvm.ptr to !llvm.ptr diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index 04017b6876b2..94b546809573 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -21,7 +21,8 @@ module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.sign } // CHECK: llvm.func @printf(!llvm.ptr, ...) -> i32 -// CHECK: llvm.mlir.global internal constant @".str"("Hello, world!\0A\00") {addr_space = 0 : i32} +// CHECK: llvm.mlir.global internal constant @".str"("Hello, world!\0A\00") +// CHECK-SAME: {addr_space = 0 : i32, alignment = 1 : i64} // CHECK: llvm.func @main() -> i32 // CHECK: %0 = llvm.mlir.constant(1 : index) : i64 // CHECK: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr From 9a909b842ee73d00fab65c34b3c50948aead6648 Mon Sep 17 00:00:00 2001 From: 7mile Date: Tue, 10 Sep 2024 05:59:54 +0800 Subject: [PATCH 1806/2301] [CIR][CodeGen] Fix address space of result pointer type of array decay cast op (#812) There are two occurrences of `cir.cast(array_to_ptrdecay, ...)` that drop address spaces unexpectedly for its result pointer type. This PR fixes them with the source address space. ```mlir // Before %1 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr, addrspace(offload_local)>), !cir.ptr // After %1 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr, addrspace(offload_local)>), !cir.ptr ``` --- clang/lib/CIR/CodeGen/CIRGenBuilder.cpp | 4 +++- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 6 +++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 5 ++++ clang/test/CIR/CodeGen/OpenCL/array-decay.cl | 25 ++++++++++++++++++++ clang/test/CIR/IR/invalid.cir | 13 ++++++++++ 5 files changed, 50 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/OpenCL/array-decay.cl diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp index 6fda24084658..13ec20d8eda2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp @@ -19,8 +19,10 @@ mlir::Value CIRGenBuilderTy::maybeBuildArrayDecay(mlir::Location loc, ::mlir::dyn_cast<::mlir::cir::ArrayType>(arrayPtrTy.getPointee()); if (arrayTy) { + auto addrSpace = ::mlir::cast_if_present<::mlir::cir::AddressSpaceAttr>( + arrayPtrTy.getAddrSpace()); mlir::cir::PointerType flatPtrTy = - mlir::cir::PointerType::get(getContext(), arrayTy.getEltType()); + getPointerTo(arrayTy.getEltType(), addrSpace); return create( loc, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, arrayPtr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 36941fc1725a..9ed791a75f83 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -488,8 +488,10 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, QualType elementPtrType = CGF.getContext().getPointerType(elementType); auto cirElementType = CGF.convertType(elementType); - auto cirElementPtrType = mlir::cir::PointerType::get( - CGF.getBuilder().getContext(), cirElementType); + auto cirAddrSpace = mlir::cast_if_present( + DestPtr.getType().getAddrSpace()); + auto cirElementPtrType = + CGF.getBuilder().getPointerTo(cirElementType, cirAddrSpace); auto loc = CGF.getLoc(ExprToVisit->getSourceRange()); // Cast from cir.ptr to cir.ptr diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a982502d8aef..107b435370f3 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -490,6 +490,11 @@ LogicalResult CastOp::verify() { if (!arrayPtrTy || !flatPtrTy) return emitOpError() << "requires !cir.ptr type for source and result"; + if (arrayPtrTy.getAddrSpace() != flatPtrTy.getAddrSpace()) { + return emitOpError() + << "requires same address space for source and result"; + } + auto arrayTy = mlir::dyn_cast(arrayPtrTy.getPointee()); if (!arrayTy) diff --git a/clang/test/CIR/CodeGen/OpenCL/array-decay.cl b/clang/test/CIR/CodeGen/OpenCL/array-decay.cl new file mode 100644 index 000000000000..d81e425729a6 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/array-decay.cl @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM + +// CIR: @func1 +// LLVM: @func1 +kernel void func1(global int *data) { + local int arr[32]; + + local int *ptr = arr; + // CIR: cir.cast(array_to_ptrdecay, %{{[0-9]+}} : !cir.ptr, addrspace(offload_local)>), !cir.ptr + // CIR-NEXT: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr, !cir.ptr, addrspace(offload_private)> + + // LLVM: store ptr addrspace(3) @func1.arr, ptr %{{[0-9]+}} +} + +// CIR: @func2 +// LLVM: @func2 +kernel void func2(global int *data) { + private int arr[32] = {data[2]}; + // CIR: %{{[0-9]+}} = cir.cast(array_to_ptrdecay, %{{[0-9]+}} : !cir.ptr, addrspace(offload_private)>), !cir.ptr + + // LLVM: %{{[0-9]+}} = getelementptr i32, ptr %3, i32 0 +} diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 182845fa2b6f..2473082b9334 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1285,3 +1285,16 @@ module { cir.return } } + +// ----- + +!s32i = !cir.int + +module { + cir.func @array_to_ptrdecay_addrspace() { + %0 = cir.alloca !cir.array, !cir.ptr, addrspace(offload_private)>, ["x", init] + // expected-error@+1 {{requires same address space for source and result}} + %1 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr, addrspace(offload_private)>), !cir.ptr + cir.return + } +} From a1d8539373ec7089748b657592e6c1b6ae6328a6 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 10 Sep 2024 06:12:43 +0800 Subject: [PATCH 1807/2301] [CIR][Transform] Add simplify transformation for select op (#816) As mentioned at https://github.com/llvm/clangir/pull/809#discussion_r1734972785 , this PR adds more simplify transformations for select op: - `cir.select if %0 then x else x` -> `x` - `cir.select if %0 then #true else #false` -> `%0` - `cir.select if %0 then #false else #true` -> `cir.unary not %0` --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 17 ++++++-- .../CIR/Dialect/Transforms/CIRSimplify.cpp | 42 ++++++++++++++++++- clang/test/CIR/Transforms/select.cir | 36 +++++++++++++++- 3 files changed, 89 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 107b435370f3..f1df48c4c367 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1400,11 +1400,20 @@ void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, OpFoldResult SelectOp::fold(FoldAdaptor adaptor) { auto condition = adaptor.getCondition(); - if (!condition) - return nullptr; + if (condition) { + auto conditionValue = mlir::cast(condition).getValue(); + return conditionValue ? getTrueValue() : getFalseValue(); + } - auto conditionValue = mlir::cast(condition).getValue(); - return conditionValue ? getTrueValue() : getFalseValue(); + // cir.select if %0 then x else x -> x + auto trueValue = adaptor.getTrueValue(); + auto falseValue = adaptor.getFalseValue(); + if (trueValue && trueValue == falseValue) + return trueValue; + if (getTrueValue() == getFalseValue()) + return getTrueValue(); + + return nullptr; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp index d73420c21790..7fd04f761f43 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp @@ -107,6 +107,45 @@ struct RemoveTrivialTry : public OpRewritePattern { } }; +struct SimplifySelect : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(SelectOp op, + PatternRewriter &rewriter) const final { + mlir::Operation *trueValueOp = op.getTrueValue().getDefiningOp(); + mlir::Operation *falseValueOp = op.getFalseValue().getDefiningOp(); + auto trueValueConstOp = + mlir::dyn_cast_if_present(trueValueOp); + auto falseValueConstOp = + mlir::dyn_cast_if_present(falseValueOp); + if (!trueValueConstOp || !falseValueConstOp) + return mlir::failure(); + + auto trueValue = + mlir::dyn_cast(trueValueConstOp.getValue()); + auto falseValue = + mlir::dyn_cast(falseValueConstOp.getValue()); + if (!trueValue || !falseValue) + return mlir::failure(); + + // cir.select if %0 then #true else #false -> %0 + if (trueValue.getValue() && !falseValue.getValue()) { + rewriter.replaceAllUsesWith(op, op.getCondition()); + rewriter.eraseOp(op); + return mlir::success(); + } + + // cir.seleft if %0 then #false else #true -> cir.unary not %0 + if (!trueValue.getValue() && falseValue.getValue()) { + rewriter.replaceOpWithNewOp( + op, mlir::cir::UnaryOpKind::Not, op.getCondition()); + return mlir::success(); + } + + return mlir::failure(); + } +}; + //===----------------------------------------------------------------------===// // CIRSimplifyPass //===----------------------------------------------------------------------===// @@ -131,7 +170,8 @@ void populateMergeCleanupPatterns(RewritePatternSet &patterns) { RemoveRedundantBranches, RemoveEmptyScope, RemoveEmptySwitch, - RemoveTrivialTry + RemoveTrivialTry, + SimplifySelect >(patterns.getContext()); // clang-format on } diff --git a/clang/test/CIR/Transforms/select.cir b/clang/test/CIR/Transforms/select.cir index c3db14daaf4e..6d18be0b9439 100644 --- a/clang/test/CIR/Transforms/select.cir +++ b/clang/test/CIR/Transforms/select.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt --canonicalize -o %t.cir %s +// RUN: cir-opt -cir-simplify -o %t.cir %s // RUN: FileCheck --input-file=%t.cir %s !s32i = !cir.int @@ -23,4 +23,38 @@ module { // CHECK: cir.func @fold_false(%[[ARG0:.+]]: !s32i, %[[ARG1:.+]]: !s32i) -> !s32i { // CHECK-NEXT: cir.return %[[ARG1]] : !s32i // CHECK-NEXT: } + + cir.func @fold_to_const(%arg0 : !cir.bool) -> !s32i { + %0 = cir.const #cir.int<42> : !s32i + %1 = cir.select if %arg0 then %0 else %0 : (!cir.bool, !s32i, !s32i) -> !s32i + cir.return %1 : !s32i + } + + // CHECK: cir.func @fold_to_const(%{{.+}}: !cir.bool) -> !s32i { + // CHECK-NEXT: %[[#A:]] = cir.const #cir.int<42> : !s32i + // CHECK-NEXT: cir.return %[[#A]] : !s32i + // CHECK-NEXT: } + + cir.func @simplify_1(%arg0 : !cir.bool) -> !cir.bool { + %0 = cir.const #cir.bool : !cir.bool + %1 = cir.const #cir.bool : !cir.bool + %2 = cir.select if %arg0 then %0 else %1 : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool + cir.return %2 : !cir.bool + } + + // CHECK: cir.func @simplify_1(%[[ARG0:.+]]: !cir.bool) -> !cir.bool { + // CHECK-NEXT: cir.return %[[ARG0]] : !cir.bool + // CHECK-NEXT: } + + cir.func @simplify_2(%arg0 : !cir.bool) -> !cir.bool { + %0 = cir.const #cir.bool : !cir.bool + %1 = cir.const #cir.bool : !cir.bool + %2 = cir.select if %arg0 then %0 else %1 : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool + cir.return %2 : !cir.bool + } + + // CHECK: cir.func @simplify_2(%[[ARG0:.+]]: !cir.bool) -> !cir.bool { + // CHECK-NEXT: %[[#A:]] = cir.unary(not, %[[ARG0]]) : !cir.bool, !cir.bool + // CHECK-NEXT: cir.return %[[#A]] : !cir.bool + // CHECK-NEXT: } } From d7c891850835e6a1c2f396f23f986618e6cc66e0 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 10 Sep 2024 06:13:32 +0800 Subject: [PATCH 1808/2301] [CIR][NFC] Extend simple lowering to unary fp2int ops and binary fp2fp ops (#818) This PR makes simple lowering generate the result type lowering logic and make it suitable for unary fp2int operations and binary fp2fp operations. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 27 ++++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 76 +------------------ clang/utils/TableGen/CIRLoweringEmitter.cpp | 20 ++++- 3 files changed, 35 insertions(+), 88 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index cf69de9958e5..d2a5f393de6d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3796,7 +3796,8 @@ def IterEndOp : CIR_Op<"iterator_end"> { // Floating Point Ops //===----------------------------------------------------------------------===// -class UnaryFPToIntBuiltinOp : CIR_Op { +class UnaryFPToIntBuiltinOp + : CIR_Op { let arguments = (ins CIR_AnyFloat:$src); let results = (outs CIR_IntType:$result); @@ -3808,12 +3809,14 @@ class UnaryFPToIntBuiltinOp : CIR_Op { let assemblyFormat = [{ $src `:` type($src) `->` type($result) attr-dict }]; + + let llvmOp = llvmOpName; } -def LroundOp : UnaryFPToIntBuiltinOp<"lround">; -def LLroundOp : UnaryFPToIntBuiltinOp<"llround">; -def LrintOp : UnaryFPToIntBuiltinOp<"lrint">; -def LLrintOp : UnaryFPToIntBuiltinOp<"llrint">; +def LroundOp : UnaryFPToIntBuiltinOp<"lround", "LroundOp">; +def LLroundOp : UnaryFPToIntBuiltinOp<"llround", "LlroundOp">; +def LrintOp : UnaryFPToIntBuiltinOp<"lrint", "LrintOp">; +def LLrintOp : UnaryFPToIntBuiltinOp<"llrint", "LlrintOp">; class UnaryFPToFPBuiltinOp : CIR_Op { @@ -3842,7 +3845,7 @@ def SinOp : UnaryFPToFPBuiltinOp<"sin", "SinOp">; def SqrtOp : UnaryFPToFPBuiltinOp<"sqrt", "SqrtOp">; def TruncOp : UnaryFPToFPBuiltinOp<"trunc", "FTruncOp">; -class BinaryFPToFPBuiltinOp +class BinaryFPToFPBuiltinOp : CIR_Op { let summary = [{ libc builtin equivalent ignoring floating-point exceptions and errno. @@ -3854,13 +3857,15 @@ class BinaryFPToFPBuiltinOp let assemblyFormat = [{ $lhs `,` $rhs `:` qualified(type($lhs)) attr-dict }]; + + let llvmOp = llvmOpName; } -def CopysignOp : BinaryFPToFPBuiltinOp<"copysign">; -def FMaxOp : BinaryFPToFPBuiltinOp<"fmax">; -def FMinOp : BinaryFPToFPBuiltinOp<"fmin">; -def FModOp : BinaryFPToFPBuiltinOp<"fmod">; -def PowOp : BinaryFPToFPBuiltinOp<"pow">; +def CopysignOp : BinaryFPToFPBuiltinOp<"copysign", "CopySignOp">; +def FMaxOp : BinaryFPToFPBuiltinOp<"fmax", "MaxNumOp">; +def FMinOp : BinaryFPToFPBuiltinOp<"fmin", "MinNumOp">; +def FModOp : BinaryFPToFPBuiltinOp<"fmod", "FRemOp">; +def PowOp : BinaryFPToFPBuiltinOp<"pow", "PowOp">; //===----------------------------------------------------------------------===// // Branch Probability Operations diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8a30ace982ee..ba9ac849c256 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3542,73 +3542,6 @@ class CIRCmpThreeWayOpLowering } }; -template -class CIRUnaryFPBuiltinOpLowering : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(CIROp op, - typename mlir::OpConversionPattern::OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto resTy = this->getTypeConverter()->convertType(op.getType()); - rewriter.replaceOpWithNewOp(op, resTy, adaptor.getSrc()); - return mlir::success(); - } -}; - -using CIRLroundOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRLLroundOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRLrintOpLowering = - CIRUnaryFPBuiltinOpLowering; -using CIRLLrintOpLowering = - CIRUnaryFPBuiltinOpLowering; - -template -class CIRBinaryFPToFPBuiltinOpLowering - : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(CIROp op, - typename mlir::OpConversionPattern::OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto resTy = this->getTypeConverter()->convertType(op.getType()); - rewriter.replaceOpWithNewOp(op, resTy, adaptor.getLhs(), - adaptor.getRhs()); - return mlir::success(); - } -}; - -using CIRCopysignOpLowering = - CIRBinaryFPToFPBuiltinOpLowering; -using CIRFMaxOpLowering = - CIRBinaryFPToFPBuiltinOpLowering; -using CIRFMinOpLowering = - CIRBinaryFPToFPBuiltinOpLowering; -using CIRPowOpLowering = - CIRBinaryFPToFPBuiltinOpLowering; - -// cir.fmod is special. Instead of lowering it to an intrinsic call, lower it to -// the frem LLVM instruction. -class CIRFModOpLowering : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(mlir::cir::FModOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto resTy = this->getTypeConverter()->convertType(op.getType()); - rewriter.replaceOpWithNewOp(op, resTy, adaptor.getLhs(), - adaptor.getRhs()); - return mlir::success(); - } -}; - class CIRClearCacheOpLowering : public mlir::OpConversionPattern { public: @@ -3835,12 +3768,9 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRStackSaveLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, - CIRCmpThreeWayOpLowering, CIRLroundOpLowering, CIRLLroundOpLowering, - CIRLrintOpLowering, CIRLLrintOpLowering, CIRCopysignOpLowering, - CIRFModOpLowering, CIRFMaxOpLowering, CIRFMinOpLowering, CIRPowOpLowering, - CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering, - CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, - CIRThrowOpLowering + CIRCmpThreeWayOpLowering, CIRClearCacheOpLowering, CIRUndefOpLowering, + CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, + CIRAllocExceptionOpLowering, CIRThrowOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/utils/TableGen/CIRLoweringEmitter.cpp b/clang/utils/TableGen/CIRLoweringEmitter.cpp index 29daa63be86b..3e5456e7e692 100644 --- a/clang/utils/TableGen/CIRLoweringEmitter.cpp +++ b/clang/utils/TableGen/CIRLoweringEmitter.cpp @@ -15,7 +15,7 @@ namespace { std::string ClassDefinitions; std::string ClassList; -void GenerateLowering(raw_ostream &OS, const Record *Operation) { +void GenerateLowering(const Record *Operation) { using namespace std::string_literals; std::string Name = Operation->getName().str(); std::string LLVMOp = Operation->getValueAsString("llvmOp").str(); @@ -30,9 +30,21 @@ void GenerateLowering(raw_ostream &OS, const Record *Operation) { mlir::LogicalResult matchAndRewrite(mlir::cir::)C++" + Name + - R"C++( op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { + " op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) " + "const " + "override {"; + + auto ResultCount = Operation->getValueAsDag("results")->getNumArgs(); + if (ResultCount > 0) + ClassDefinitions += R"C++( + auto resTy = this->getTypeConverter()->convertType(op.getType());)C++"; + + ClassDefinitions += R"C++( rewriter.replaceOpWithNewOp(op"; + LLVMOp + ">(op"; + + if (ResultCount > 0) + ClassDefinitions += ", resTy"; auto ArgCount = Operation->getValueAsDag("arguments")->getNumArgs(); for (size_t i = 0; i != ArgCount; ++i) @@ -54,7 +66,7 @@ void clang::EmitCIRBuiltinsLowering(const RecordKeeper &Records, for (const auto *Builtin : Records.getAllDerivedDefinitions("LLVMLoweringInfo")) { if (!Builtin->getValueAsString("llvmOp").empty()) - GenerateLowering(OS, Builtin); + GenerateLowering(Builtin); } OS << "#ifdef GET_BUILTIN_LOWERING_CLASSES\n" From d35980c961a3b25952ac5ded077099379804e0cc Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 10 Sep 2024 06:42:22 +0800 Subject: [PATCH 1809/2301] [CIR][CIRGen] Add initial CIRGen support for local temporary materialization (#820) This PR adds initial support for temporary materialization of local temporaries with trivial cleanups. Materialization of global temporaries and local temporaries with non-trivial cleanups is far more trickier that I initially thought and I decide to submit this easy part first. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 28 +++++++++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 6 ++- .../CIR/CodeGen/temporary-materialization.cpp | 43 +++++++++++++++++++ 3 files changed, 69 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/temporary-materialization.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 347cc2a932cf..3bfb66545c62 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2150,8 +2150,22 @@ static Address createReferenceTemporary(CIRGenFunction &CGF, (Ty->isArrayType() || Ty->isRecordType()) && CGF.CGM.isTypeConstant(Ty, /*ExcludeCtor=*/true, /*ExcludeDtor=*/false)) assert(0 && "NYI"); + + // The temporary memory should be created in the same scope as the extending + // declaration of the temporary materialization expression. + mlir::cir::AllocaOp extDeclAlloca; + if (const clang::ValueDecl *extDecl = M->getExtendingDecl()) { + auto extDeclAddrIter = CGF.LocalDeclMap.find(extDecl); + if (extDeclAddrIter != CGF.LocalDeclMap.end()) { + extDeclAlloca = dyn_cast_if_present( + extDeclAddrIter->second.getDefiningOp()); + } + } + mlir::OpBuilder::InsertPoint ip; + if (extDeclAlloca) + ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()}; return CGF.CreateMemTemp(Ty, CGF.getLoc(M->getSourceRange()), - CGF.getCounterRefTmpAsString(), Alloca); + CGF.getCounterRefTmpAsString(), Alloca, ip); } case SD_Thread: case SD_Static: @@ -2249,7 +2263,7 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( } else { switch (M->getStorageDuration()) { case SD_Automatic: - assert(0 && "NYI"); + assert(!MissingFeatures::shouldEmitLifetimeMarkers()); break; case SD_FullExpression: { @@ -2940,18 +2954,20 @@ void CIRGenFunction::buildUnreachable(SourceLocation Loc) { //===----------------------------------------------------------------------===// Address CIRGenFunction::CreateMemTemp(QualType Ty, mlir::Location Loc, - const Twine &Name, Address *Alloca) { + const Twine &Name, Address *Alloca, + mlir::OpBuilder::InsertPoint ip) { // FIXME: Should we prefer the preferred type alignment here? return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Loc, Name, - Alloca); + Alloca, ip); } Address CIRGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, mlir::Location Loc, const Twine &Name, - Address *Alloca) { + Address *Alloca, + mlir::OpBuilder::InsertPoint ip) { Address Result = CreateTempAlloca(getTypes().convertTypeForMem(Ty), Align, Loc, Name, - /*ArraySize=*/nullptr, Alloca); + /*ArraySize=*/nullptr, Alloca, ip); if (Ty->isConstantMatrixType()) { assert(0 && "NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index cde8eac2a086..4639dfc3024a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -2215,9 +2215,11 @@ class CIRGenFunction : public CIRGenTypeCache { /// appropriate alignmen and cast it to the default address space. Returns /// the original alloca instruction by \p Alloca if it is not nullptr. Address CreateMemTemp(QualType T, mlir::Location Loc, - const Twine &Name = "tmp", Address *Alloca = nullptr); + const Twine &Name = "tmp", Address *Alloca = nullptr, + mlir::OpBuilder::InsertPoint ip = {}); Address CreateMemTemp(QualType T, CharUnits Align, mlir::Location Loc, - const Twine &Name = "tmp", Address *Alloca = nullptr); + const Twine &Name = "tmp", Address *Alloca = nullptr, + mlir::OpBuilder::InsertPoint ip = {}); /// Create a temporary memory object of the given type, with /// appropriate alignment without casting it to the default address space. diff --git a/clang/test/CIR/CodeGen/temporary-materialization.cpp b/clang/test/CIR/CodeGen/temporary-materialization.cpp new file mode 100644 index 000000000000..3b063db09dc3 --- /dev/null +++ b/clang/test/CIR/CodeGen/temporary-materialization.cpp @@ -0,0 +1,43 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int make_int(); + +int test() { + const int &x = make_int(); + return x; +} + +// CHECK: cir.func @_Z4testv() +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %[[#TEMP_SLOT:]] = cir.alloca !s32i, !cir.ptr, ["ref.tmp0", init] {alignment = 4 : i64} +// CHECK-NEXT: %[[#x:]] = cir.alloca !cir.ptr, !cir.ptr>, ["x", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[#TEMP_VALUE:]] = cir.call @_Z8make_intv() : () -> !s32i +// CHECK-NEXT: cir.store %[[#TEMP_VALUE]], %[[#TEMP_SLOT]] : !s32i, !cir.ptr +// CHECK-NEXT: } +// CHECK-NEXT: cir.store %[[#TEMP_SLOT]], %[[#x]] : !cir.ptr, !cir.ptr> +// CHECK: } + +int test_scoped() { + int x = make_int(); + { + const int &y = make_int(); + x = y; + } + return x; +} + +// CHECK: cir.func @_Z11test_scopedv() +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK-NEXT: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK: cir.scope { +// CHECK-NEXT: %[[#TEMP_SLOT:]] = cir.alloca !s32i, !cir.ptr, ["ref.tmp0", init] {alignment = 4 : i64} +// CHECK-NEXT: %[[#y:]] = cir.alloca !cir.ptr, !cir.ptr>, ["y", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[#TEMP_VALUE:]] = cir.call @_Z8make_intv() : () -> !s32i +// CHECK-NEXT: cir.store %[[#TEMP_VALUE]], %[[#TEMP_SLOT]] : !s32i, !cir.ptr +// CHECK-NEXT: } +// CHECK-NEXT: cir.store %[[#TEMP_SLOT]], %[[#y]] : !cir.ptr, !cir.ptr> +// CHECK: } +// CHECK: } From c2b8e4e0b49f228bb5ef3835948910a15ad31a45 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Mon, 9 Sep 2024 16:41:17 -0700 Subject: [PATCH 1810/2301] [CIR][CIRGen] Implement delegating constructors (#821) This is a straightforward adaption from CodeGen. I checked the uses of the Delegating arg that's passed in various places, and it only appears to be used by virtual inheritance, which should be handled by https://github.com/llvm/clangir/pull/624. --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 51 ++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 7 ++ clang/test/CIR/CodeGen/delegating-ctor.cpp | 88 ++++++++++++++++++++++ 4 files changed, 146 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/delegating-ctor.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 03fa0999a3ab..6ca6711dbbb3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -50,7 +50,7 @@ bool CIRGenFunction::IsConstructorDelegationValid( // FIXME: Decide if we can do a delegation of a delegating constructor. if (Ctor->isDelegatingConstructor()) - llvm_unreachable("NYI"); + return false; return true; } @@ -585,7 +585,7 @@ void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType CtorType, FunctionArgList &Args) { if (CD->isDelegatingConstructor()) - llvm_unreachable("NYI"); + return buildDelegatingCXXConstructorCall(CD, Args); const CXXRecordDecl *ClassDecl = CD->getParent(); @@ -1379,6 +1379,51 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, assert(!MissingFeatures::sanitizeDtor()); } +namespace { +struct CallDelegatingCtorDtor final : EHScopeStack::Cleanup { + const CXXDestructorDecl *Dtor; + Address Addr; + CXXDtorType Type; + + CallDelegatingCtorDtor(const CXXDestructorDecl *D, Address Addr, + CXXDtorType Type) + : Dtor(D), Addr(Addr), Type(Type) {} + + void Emit(CIRGenFunction &CGF, Flags flags) override { + // We are calling the destructor from within the constructor. + // Therefore, "this" should have the expected type. + QualType ThisTy = Dtor->getFunctionObjectParameterType(); + CGF.buildCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, + /*Delegating=*/true, Addr, ThisTy); + } +}; +} // end anonymous namespace + +void CIRGenFunction::buildDelegatingCXXConstructorCall( + const CXXConstructorDecl *Ctor, const FunctionArgList &Args) { + assert(Ctor->isDelegatingConstructor()); + + Address ThisPtr = LoadCXXThisAddress(); + + AggValueSlot AggSlot = AggValueSlot::forAddr( + ThisPtr, Qualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + AggValueSlot::MayOverlap, AggValueSlot::IsNotZeroed, + // Checks are made by the code that calls constructor. + AggValueSlot::IsSanitizerChecked); + + buildAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); + + const CXXRecordDecl *ClassDecl = Ctor->getParent(); + if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { + CXXDtorType Type = + CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; + + EHStack.pushCleanup( + EHCleanup, ClassDecl->getDestructor(), ThisPtr, Type); + } +} + void CIRGenFunction::buildCXXDestructorCall(const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, @@ -1710,4 +1755,4 @@ void CIRGenFunction::buildCXXAggrConstructorCall( if (constantCount.use_empty()) constantCount.erase(); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index d1be80d1f968..89f60f52f34d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -402,7 +402,9 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, Type = Ctor_Complete; break; case CXXConstructionKind::Delegating: - llvm_unreachable("NYI"); + // We should be emitting a constructor; GlobalDecl will assert this + Type = CurGD.getCtorType(); + Delegating = true; break; case CXXConstructionKind::VirtualBase: ForVirtualBase = true; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 4639dfc3024a..73aeb524a88a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1649,6 +1649,13 @@ class CIRGenFunction : public CIRGenTypeCache { const FunctionArgList &Args, clang::SourceLocation Loc); + // It's important not to confuse this and the previous function. Delegating + // constructors are the C++11 feature. The constructor delegate optimization + // is used to reduce duplication in the base and complete constructors where + // they are substantially the same. + void buildDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, + const FunctionArgList &Args); + /// We are performing a delegate call; that is, the current function is /// delegating to another one. Produce a r-value suitable for passing the /// given parameter. diff --git a/clang/test/CIR/CodeGen/delegating-ctor.cpp b/clang/test/CIR/CodeGen/delegating-ctor.cpp new file mode 100644 index 000000000000..b230ea6f1d5c --- /dev/null +++ b/clang/test/CIR/CodeGen/delegating-ctor.cpp @@ -0,0 +1,88 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Delegating { + Delegating(); + Delegating(int); +}; + +// Check that the constructor being delegated to is called with the correct +// arguments. +Delegating::Delegating() : Delegating(0) {} + +// CHECK-LABEL: cir.func @_ZN10DelegatingC2Ev(%arg0: !cir.ptr {{.*}}) {{.*}} { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %2 = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.call @_ZN10DelegatingC2Ei(%1, %2) : (!cir.ptr, !s32i) -> () +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +struct DelegatingWithZeroing { + int i; + DelegatingWithZeroing() = default; + DelegatingWithZeroing(int); +}; + +// Check that the delegating constructor performs zero-initialization here. +// FIXME: we should either emit the trivial default constructor or remove the +// call to it in a lowering pass. +DelegatingWithZeroing::DelegatingWithZeroing(int) : DelegatingWithZeroing() {} + +// CHECK-LABEL: cir.func @_ZN21DelegatingWithZeroingC2Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i {{.*}}) {{.*}} { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg1, %1 : !s32i, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %3 = cir.const #cir.zero : !ty_22DelegatingWithZeroing22 +// CHECK-NEXT: cir.store %3, %2 : !ty_22DelegatingWithZeroing22, !cir.ptr +// CHECK-NEXT: cir.call @_ZN21DelegatingWithZeroingC2Ev(%2) : (!cir.ptr) -> () extra(#fn_attr1) +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +void canThrow(); +struct HasNonTrivialDestructor { + HasNonTrivialDestructor(); + HasNonTrivialDestructor(int); + ~HasNonTrivialDestructor(); +}; + +// Check that we call the destructor whenever a cleanup is needed. +// FIXME: enable and check this when exceptions are fully supported. +#if 0 +HasNonTrivialDestructor::HasNonTrivialDestructor(int) + : HasNonTrivialDestructor() { + canThrow(); +} +#endif + +// From clang/test/CodeGenCXX/cxx0x-delegating-ctors.cpp, check that virtual +// inheritance and delegating constructors interact correctly. +// FIXME: enable and check this when virtual inheritance is fully supported. +#if 0 +namespace PR14588 { +void other(); + +class Base { +public: + Base() { squawk(); } + virtual ~Base() {} + + virtual void squawk() { other(); } +}; + +class Foo : public virtual Base { +public: + Foo(); + Foo(const void *inVoid); + virtual ~Foo() {} + + virtual void squawk() { other(); } +}; + +Foo::Foo() : Foo(nullptr) { other(); } +Foo::Foo(const void *inVoid) { squawk(); } +} // namespace PR14588 +#endif From 5240d5d5c4d632296d71e794a4b9b63dd0e21450 Mon Sep 17 00:00:00 2001 From: 7mile Date: Wed, 11 Sep 2024 01:57:07 +0800 Subject: [PATCH 1811/2301] [CIR][Dialect] Verify bitcast does not contain address space conversion (#813) Currently some bitcasts would silently change the address space of source pointer type, which hides some miscompilations of pointer type in CIR. #812 is an example. The address space in result pointer type is dropped, but the bitcast later keep the type consistency no matter what the result type is. Such bitcast is commonly emitted in CodeGen. CIR bitcasts are lowered to LLVM bitcasts, which also don't allow mismatch between address spaces. This PR adds this verification. --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 16 +++++++++++++--- clang/test/CIR/IR/invalid.cir | 15 +++++++++++++++ 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index f1df48c4c367..8ba5ca608093 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -510,11 +510,21 @@ LogicalResult CastOp::verify() { if (isa(srcType) || isa(resType)) return success(); + // Handle the pointer types first. + auto srcPtrTy = mlir::dyn_cast(srcType); + auto resPtrTy = mlir::dyn_cast(resType); + + if (srcPtrTy && resPtrTy) { + if (srcPtrTy.getAddrSpace() != resPtrTy.getAddrSpace()) { + return emitOpError() << "result type address space does not match the " + "address space of the operand"; + } + return success(); + } + // This is the only cast kind where we don't want vector types to decay // into the element type. - if ((!mlir::isa(getSrc().getType()) || - !mlir::isa(getResult().getType())) && - (!mlir::isa(getSrc().getType()) || + if ((!mlir::isa(getSrc().getType()) || !mlir::isa(getResult().getType()))) return emitOpError() << "requires !cir.ptr or !cir.vector type for source and result"; diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 2473082b9334..2055dd89d230 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1298,3 +1298,18 @@ module { cir.return } } + +// ----- + +!s32i = !cir.int + +module { + + cir.func @test_bitcast_addrspace() { + %0 = cir.alloca !s32i, !cir.ptr, ["tmp"] {alignment = 4 : i64} + // expected-error@+1 {{'cir.cast' op result type address space does not match the address space of the operand}} + %1 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr + } + +} + From 10bcf2795fad263fcd046711d0415fbd612127a1 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Tue, 10 Sep 2024 11:18:04 -0700 Subject: [PATCH 1812/2301] [CIR][Dialect] Remove 22 prefix and suffix from type aliases (#826) We were previously printing the type alias for `struct S` as `!ty_22S22` instead of just `!ty_S`. This was because our alias computation for a StructType did the following: os << "ty_" << structType.getName() `structType.getName()` is a `StringAttr`, and writing a `StringAttr` to an output stream adds double quotes around the actual string [1][2]. These double quotes then get hex-encoded as 22 [3]. We can fix this by writing the actual string value instead. Aliases that would end in a number will now receive a trailing underscore because of MLIR's alias sanitization not allowing a trailing digit [4] (which ironically didn't kick in even though our aliases were always previously ending with a number, which might be a bug in the sanitization logic). Aliases containing other special characters (e.g. `::`) will still be escaped as before. In other words: ``` struct S {}; // before: !ty_22S22 = ... // after: !ty_S = ... struct S1 {}; // before: !ty_22S122 = ... // after: !ty_S1_ = ... struct std::string {}; // before: !ty_22std3A3Astring22 // after: !ty_std3A3Astring ``` I'm not a big fan of the trailing underscore special-case, but I also don't want to touch core MLIR logic, and I think the end result is still nicer than before. The tests were mechanically updated with the following command run inside `clang/test/CIR`, and the same commands can be run to update the tests for any in-flight patches. (These are for GNU sed; for macOS change the `-i` to `-i ''`.) find . -type f | xargs sed -i -E -e 's/ty_22([A-Za-z0-9_$]+[0-9])22/ty_\1_/g' -e 's/ty_22([A-Za-z0-9_$]+)22/ty_\1/g' clang/test/CIR/CodeGen/stmtexpr-init.c needed an additional minor fix to swap the expected order of two type aliases in the CIR output. clang/test/CIR/CodeGen/coro-task.cpp needed some surgery because it was searching for `22` to find the end of a type alias; I changed it to search for the actual alias instead. If you run into merge conflicts after this change, you can auto-resolve them via https://github.com/smeenai/clangir/commit/715f061f335bac0c4e8ac5daa940433c9694ad53 [1] https://github.com/llvm/llvm-project/blob/b3d2d5039b9b8aa10a86c593387f200b15c02aef/mlir/lib/IR/AsmPrinter.cpp#L2295 [2] https://github.com/llvm/llvm-project/blob/b3d2d5039b9b8aa10a86c593387f200b15c02aef/mlir/lib/IR/AsmPrinter.cpp#L2763 [3] https://github.com/llvm/llvm-project/blob/b3d2d5039b9b8aa10a86c593387f200b15c02aef/mlir/lib/IR/AsmPrinter.cpp#L1014 [4] https://github.com/llvm/llvm-project/blob/b3d2d5039b9b8aa10a86c593387f200b15c02aef/mlir/lib/IR/AsmPrinter.cpp#L1154 --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 8 +- clang/test/CIR/CodeGen/String.cpp | 30 +++--- clang/test/CIR/CodeGen/abstract-cond.c | 12 +-- clang/test/CIR/CodeGen/agg-copy.c | 78 ++++++++-------- clang/test/CIR/CodeGen/agg-init.cpp | 24 ++--- clang/test/CIR/CodeGen/agg-init2.cpp | 8 +- clang/test/CIR/CodeGen/array-init-destroy.cpp | 38 ++++---- clang/test/CIR/CodeGen/array-init.c | 18 ++-- clang/test/CIR/CodeGen/array.c | 2 +- clang/test/CIR/CodeGen/array.cpp | 2 +- clang/test/CIR/CodeGen/asm.c | 2 +- clang/test/CIR/CodeGen/assign-operator.cpp | 48 +++++----- clang/test/CIR/CodeGen/atomic.cpp | 4 +- clang/test/CIR/CodeGen/bitfield-union.c | 10 +- clang/test/CIR/CodeGen/bitfields.c | 32 +++---- clang/test/CIR/CodeGen/bitfields.cpp | 20 ++-- clang/test/CIR/CodeGen/bool.c | 22 ++--- clang/test/CIR/CodeGen/build-deferred.cpp | 2 +- clang/test/CIR/CodeGen/builtin-bit-cast.cpp | 16 ++-- .../CodeGen/call-via-class-member-funcptr.cpp | 8 +- clang/test/CIR/CodeGen/cast.c | 8 +- clang/test/CIR/CodeGen/cast.cpp | 8 +- clang/test/CIR/CodeGen/compound-literal.c | 22 ++--- clang/test/CIR/CodeGen/cond.cpp | 6 +- clang/test/CIR/CodeGen/const-bitfields.c | 10 +- clang/test/CIR/CodeGen/coro-task.cpp | 52 +++++------ clang/test/CIR/CodeGen/ctor-alias.cpp | 20 ++-- .../CodeGen/ctor-member-lvalue-to-rvalue.cpp | 12 +-- clang/test/CIR/CodeGen/ctor.cpp | 24 ++--- clang/test/CIR/CodeGen/delegating-ctor.cpp | 24 ++--- clang/test/CIR/CodeGen/derived-to-base.cpp | 74 +++++++-------- clang/test/CIR/CodeGen/dtors-scopes.cpp | 8 +- clang/test/CIR/CodeGen/dtors.cpp | 8 +- clang/test/CIR/CodeGen/dynamic-cast-exact.cpp | 30 +++--- .../CodeGen/dynamic-cast-relative-layout.cpp | 12 +-- clang/test/CIR/CodeGen/dynamic-cast.cpp | 38 ++++---- clang/test/CIR/CodeGen/evaluate-expr.c | 2 +- clang/test/CIR/CodeGen/forward-decls.cpp | 12 +-- clang/test/CIR/CodeGen/fun-ptr.c | 24 ++--- .../CIR/CodeGen/globals-neg-index-array.c | 2 +- clang/test/CIR/CodeGen/globals.c | 8 +- clang/test/CIR/CodeGen/lambda.cpp | 46 +++++----- clang/test/CIR/CodeGen/libcall.cpp | 2 +- clang/test/CIR/CodeGen/lvalue-refs.cpp | 8 +- clang/test/CIR/CodeGen/multi-vtable.cpp | 22 ++--- clang/test/CIR/CodeGen/new.cpp | 28 +++--- clang/test/CIR/CodeGen/packed-structs.c | 12 +-- .../CIR/CodeGen/pointer-to-data-member.cpp | 22 ++--- .../CIR/CodeGen/pointer-to-member-func.cpp | 14 +-- clang/test/CIR/CodeGen/rangefor.cpp | 28 +++--- .../skip-functions-from-system-headers.cpp | 2 +- clang/test/CIR/CodeGen/static.cpp | 56 +++++------ clang/test/CIR/CodeGen/stmt-expr.c | 8 +- clang/test/CIR/CodeGen/stmt-expr.cpp | 10 +- clang/test/CIR/CodeGen/stmtexpr-init.c | 2 +- clang/test/CIR/CodeGen/struct-empty.c | 2 +- clang/test/CIR/CodeGen/struct.c | 38 ++++---- clang/test/CIR/CodeGen/struct.cpp | 92 +++++++++---------- clang/test/CIR/CodeGen/structural-binding.cpp | 36 ++++---- .../test/CIR/CodeGen/three-way-comparison.cpp | 4 +- clang/test/CIR/CodeGen/typedef.c | 2 +- clang/test/CIR/CodeGen/union-init.c | 8 +- clang/test/CIR/CodeGen/union.cpp | 48 +++++----- clang/test/CIR/CodeGen/var-arg-float.c | 20 ++-- clang/test/CIR/CodeGen/var-arg-scope.c | 20 ++-- clang/test/CIR/CodeGen/var-arg.c | 20 ++-- clang/test/CIR/CodeGen/vector.cpp | 8 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- clang/test/CIR/IR/data-member-ptr.cir | 18 ++-- clang/test/CIR/IR/getmember.cir | 18 ++-- clang/test/CIR/IR/global.cir | 22 ++--- clang/test/CIR/IR/invalid.cir | 14 +-- clang/test/CIR/IR/struct.cir | 20 ++-- .../test/CIR/Lowering/ThroughMLIR/vtable.cir | 26 +++--- clang/test/CIR/Lowering/array.cir | 4 +- clang/test/CIR/Lowering/class.cir | 34 +++---- clang/test/CIR/Lowering/const.cir | 8 +- clang/test/CIR/Lowering/globals.cir | 28 +++--- clang/test/CIR/Lowering/struct.cir | 34 +++---- clang/test/CIR/Lowering/unions.cir | 18 ++-- clang/test/CIR/Lowering/variadics.cir | 20 ++-- .../x86_64/x86_64-call-conv-lowering-pass.cpp | 14 +-- 82 files changed, 813 insertions(+), 813 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 8ba5ca608093..d44896ae999e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -59,11 +59,11 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { AliasResult getAlias(Type type, raw_ostream &os) const final { if (auto structType = dyn_cast(type)) { - if (!structType.getName()) { + StringAttr nameAttr = structType.getName(); + if (!nameAttr) os << "ty_anon_" << structType.getKindAsStr(); - return AliasResult::OverridableAlias; - } - os << "ty_" << structType.getName(); + else + os << "ty_" << nameAttr.getValue(); return AliasResult::OverridableAlias; } if (auto intType = dyn_cast(type)) { diff --git a/clang/test/CIR/CodeGen/String.cpp b/clang/test/CIR/CodeGen/String.cpp index c504c1d99d0c..5898eb09fbb6 100644 --- a/clang/test/CIR/CodeGen/String.cpp +++ b/clang/test/CIR/CodeGen/String.cpp @@ -18,20 +18,20 @@ void test() { } // CHECK: cir.func linkonce_odr @_ZN6StringC2Ev -// CHECK-NEXT: %0 = cir.alloca !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: %1 = cir.load %0 // CHECK-NEXT: %2 = cir.get_member %1[0] {name = "storage"} // CHECK-NEXT: %3 = cir.const #cir.ptr : !cir.ptr // CHECK-NEXT: cir.store %3, %2 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %4 = cir.get_member %1[1] {name = "size"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %4 = cir.get_member %1[1] {name = "size"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %5 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: %6 = cir.cast(integral, %5 : !s32i), !s64i // CHECK-NEXT: cir.store %6, %4 : !s64i, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2Ei -// CHECK-NEXT: %0 = cir.alloca !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr // CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["size", init] // CHECK-NEXT: cir.store %arg0, %0 // CHECK-NEXT: cir.store %arg1, %1 @@ -39,7 +39,7 @@ void test() { // CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} // CHECK-NEXT: %4 = cir.const #cir.ptr : !cir.ptr // CHECK-NEXT: cir.store %4, %3 -// CHECK-NEXT: %5 = cir.get_member %2[1] {name = "size"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %5 = cir.get_member %2[1] {name = "size"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: %7 = cir.cast(integral, %6 : !s32i), !s64i // CHECK-NEXT: cir.store %7, %5 : !s64i, !cir.ptr @@ -47,27 +47,27 @@ void test() { // CHECK-NEXT: } // CHECK: cir.func linkonce_odr @_ZN6StringC2EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} : !cir.ptr -> !cir.ptr> +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %3 = cir.get_member %2[0] {name = "storage"} : !cir.ptr -> !cir.ptr> // CHECK-NEXT: %4 = cir.const #cir.ptr : !cir.ptr // CHECK-NEXT: cir.store %4, %3 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.return // CHECK: cir.func linkonce_odr @_ZN6StringC1EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: %3 = cir.load %1 : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: cir.call @_ZN6StringC2EPKc(%2, %3) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return // CHECK: cir.func @_Z4testv() -// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () -// CHECK: cir.call @_ZN6StringC1Ei(%1, %3) : (!cir.ptr, !s32i) -> () -// CHECK: cir.call @_ZN6StringC1EPKc(%2, %5) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC1Ei(%1, %3) : (!cir.ptr, !s32i) -> () +// CHECK: cir.call @_ZN6StringC1EPKc(%2, %5) : (!cir.ptr, !cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/abstract-cond.c b/clang/test/CIR/CodeGen/abstract-cond.c index 426adf7337a6..d724c8e1ea28 100644 --- a/clang/test/CIR/CodeGen/abstract-cond.c +++ b/clang/test/CIR/CodeGen/abstract-cond.c @@ -11,17 +11,17 @@ int f6(int a0, struct s6 a1, struct s6 a2) { // CIR-LABEL: @f6 // CIR: %[[A0:.*]] = cir.alloca !s32i, !cir.ptr, ["a0" -// CIR: %[[A1:.*]] = cir.alloca !ty_22s622, !cir.ptr, ["a1" -// CIR: %[[A2:.*]] = cir.alloca !ty_22s622, !cir.ptr, ["a2" -// CIR: %[[TMP:.*]] = cir.alloca !ty_22s622, !cir.ptr, ["tmp"] {alignment = 4 : i64} +// CIR: %[[A1:.*]] = cir.alloca !ty_s6_, !cir.ptr, ["a1" +// CIR: %[[A2:.*]] = cir.alloca !ty_s6_, !cir.ptr, ["a2" +// CIR: %[[TMP:.*]] = cir.alloca !ty_s6_, !cir.ptr, ["tmp"] {alignment = 4 : i64} // CIR: %[[LOAD_A0:.*]] = cir.load %[[A0]] : !cir.ptr, !s32i // CIR: %[[COND:.*]] = cir.cast(int_to_bool, %[[LOAD_A0]] : !s32i), !cir.bool // CIR: cir.if %[[COND]] { -// CIR: cir.copy %[[A1]] to %[[TMP]] : !cir.ptr +// CIR: cir.copy %[[A1]] to %[[TMP]] : !cir.ptr // CIR: } else { -// CIR: cir.copy %[[A2]] to %[[TMP]] : !cir.ptr +// CIR: cir.copy %[[A2]] to %[[TMP]] : !cir.ptr // CIR: } -// CIR: cir.get_member %[[TMP]][0] {name = "f0"} : !cir.ptr -> !cir.ptr +// CIR: cir.get_member %[[TMP]][0] {name = "f0"} : !cir.ptr -> !cir.ptr // LLVM-LABEL: @f6 // LLVM: %[[LOAD_A0:.*]] = load i32, ptr {{.*}} diff --git a/clang/test/CIR/CodeGen/agg-copy.c b/clang/test/CIR/CodeGen/agg-copy.c index f33d29fd1d11..d29f296d878d 100644 --- a/clang/test/CIR/CodeGen/agg-copy.c +++ b/clang/test/CIR/CodeGen/agg-copy.c @@ -10,53 +10,53 @@ typedef struct { } A; // CHECK: cir.func @foo1 -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] -// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a2", init] -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> -// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, !cir.ptr> -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: [[TMP3:%.*]] = cir.const #cir.int<1> : !s32i -// CHECK: [[TMP4:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[TMP3]] : !s32i), !cir.ptr -// CHECK: [[TMP5:%.*]] = cir.load [[TMP1]] : !cir.ptr>, !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[TMP3]] : !s32i), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP1]] : !cir.ptr>, !cir.ptr // CHECK: [[TMP6:%.*]] = cir.const #cir.int<1> : !s32i -// CHECK: [[TMP7:%.*]] = cir.ptr_stride([[TMP5]] : !cir.ptr, [[TMP6]] : !s32i), !cir.ptr -// CHECK: cir.copy [[TMP7]] to [[TMP4]] : !cir.ptr +// CHECK: [[TMP7:%.*]] = cir.ptr_stride([[TMP5]] : !cir.ptr, [[TMP6]] : !s32i), !cir.ptr +// CHECK: cir.copy [[TMP7]] to [[TMP4]] : !cir.ptr void foo1(A* a1, A* a2) { a1[1] = a2[1]; } // CHECK: cir.func @foo2 -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] -// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a2", init] -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> -// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, !cir.ptr> -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][2] {name = "s"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.load [[TMP1]] : !cir.ptr>, !cir.ptr -// CHECK: [[TMP5:%.*]] = cir.get_member [[TMP4]][2] {name = "s"} : !cir.ptr -> !cir.ptr -// CHECK: cir.copy [[TMP5]] to [[TMP3]] : !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] +// CHECK: [[TMP1:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg1, [[TMP1]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][2] {name = "s"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.load [[TMP1]] : !cir.ptr>, !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.get_member [[TMP4]][2] {name = "s"} : !cir.ptr -> !cir.ptr +// CHECK: cir.copy [[TMP5]] to [[TMP3]] : !cir.ptr void foo2(A* a1, A* a2) { a1->s = a2->s; } -// CHECK: cir.global external @a = #cir.zero : !ty_22A22 +// CHECK: cir.global external @a = #cir.zero : !ty_A // CHECK: cir.func @foo3 -// CHECK: [[TMP0]] = cir.alloca !ty_22A22, !cir.ptr, ["__retval"] {alignment = 4 : i64} -// CHECK: [[TMP1]] = cir.get_global @a : !cir.ptr -// CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr -// CHECK: [[TMP2]] = cir.load [[TMP0]] : !cir.ptr, !ty_22A22 -// CHECK: cir.return [[TMP2]] : !ty_22A22 +// CHECK: [[TMP0]] = cir.alloca !ty_A, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: [[TMP1]] = cir.get_global @a : !cir.ptr +// CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr +// CHECK: [[TMP2]] = cir.load [[TMP0]] : !cir.ptr, !ty_A +// CHECK: cir.return [[TMP2]] : !ty_A A a; A foo3(void) { return a; } // CHECK: cir.func @foo4 -// CHECK: [[TMP0]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] -// CHECK: [[TMP1]] = cir.alloca !ty_22A22, !cir.ptr, ["a2", init] -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> -// CHECK: [[TMP2]] = cir.load deref [[TMP0]] : !cir.ptr>, !cir.ptr -// CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr +// CHECK: [[TMP0]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] +// CHECK: [[TMP1]] = cir.alloca !ty_A, !cir.ptr, ["a2", init] +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP2]] = cir.load deref [[TMP0]] : !cir.ptr>, !cir.ptr +// CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr void foo4(A* a1) { A a2 = *a1; } @@ -64,11 +64,11 @@ void foo4(A* a1) { A create() { A a; return a; } // CHECK: cir.func {{.*@foo5}} -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22A22, !cir.ptr, -// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, !cir.ptr, ["tmp"] {alignment = 4 : i64} -// CHECK: [[TMP2:%.*]] = cir.call @create() : () -> !ty_22A22 -// CHECK: cir.store [[TMP2]], [[TMP1]] : !ty_22A22, !cir.ptr -// CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_A, !cir.ptr, +// CHECK: [[TMP1:%.*]] = cir.alloca !ty_A, !cir.ptr, ["tmp"] {alignment = 4 : i64} +// CHECK: [[TMP2:%.*]] = cir.call @create() : () -> !ty_A +// CHECK: cir.store [[TMP2]], [[TMP1]] : !ty_A, !cir.ptr +// CHECK: cir.copy [[TMP1]] to [[TMP0]] : !cir.ptr void foo5() { A a; a = create(); @@ -77,11 +77,11 @@ void foo5() { void foo6(A* a1) { A a2 = (*a1); // CHECK: cir.func {{.*@foo6}} -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] {alignment = 8 : i64} -// CHECK: [[TMP1:%.*]] = cir.alloca !ty_22A22, !cir.ptr, ["a2", init] {alignment = 4 : i64} -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> -// CHECK: [[TMP2:%.*]] = cir.load deref [[TMP0]] : !cir.ptr>, !cir.ptr -// CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["a1", init] {alignment = 8 : i64} +// CHECK: [[TMP1:%.*]] = cir.alloca !ty_A, !cir.ptr, ["a2", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP2:%.*]] = cir.load deref [[TMP0]] : !cir.ptr>, !cir.ptr +// CHECK: cir.copy [[TMP2]] to [[TMP1]] : !cir.ptr } volatile A vol_a; diff --git a/clang/test/CIR/CodeGen/agg-init.cpp b/clang/test/CIR/CodeGen/agg-init.cpp index 3d0d2a279797..68a302a98e2d 100644 --- a/clang/test/CIR/CodeGen/agg-init.cpp +++ b/clang/test/CIR/CodeGen/agg-init.cpp @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// CHECK: !ty_22yep_22 = !cir.struct +// CHECK: !ty_yep_ = !cir.struct typedef enum xxy_ { xxy_Low = 0, @@ -17,11 +17,11 @@ typedef struct yep_ { void use() { yop{}; } // CHECK: cir.func @_Z3usev() -// CHECK: %0 = cir.alloca !ty_22yep_22, !cir.ptr, ["agg.tmp.ensured"] {alignment = 4 : i64} -// CHECK: %1 = cir.get_member %0[0] {name = "Status"} : !cir.ptr -> !cir.ptr +// CHECK: %0 = cir.alloca !ty_yep_, !cir.ptr, ["agg.tmp.ensured"] {alignment = 4 : i64} +// CHECK: %1 = cir.get_member %0[0] {name = "Status"} : !cir.ptr -> !cir.ptr // CHECK: %2 = cir.const #cir.int<0> : !u32i // CHECK: cir.store %2, %1 : !u32i, !cir.ptr -// CHECK: %3 = cir.get_member %0[1] {name = "HC"} : !cir.ptr -> !cir.ptr +// CHECK: %3 = cir.get_member %0[1] {name = "HC"} : !cir.ptr -> !cir.ptr // CHECK: %4 = cir.const #cir.int<0> : !u32i // CHECK: cir.store %4, %3 : !u32i, !cir.ptr // CHECK: cir.return @@ -47,16 +47,16 @@ void yo() { } // CHECK: cir.func @_Z2yov() -// CHECK: %0 = cir.alloca !ty_22Yo22, !cir.ptr, ["ext"] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !ty_22Yo22, !cir.ptr, ["ext2", init] {alignment = 8 : i64} -// CHECK: %2 = cir.const #cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.ptr : !cir.ptr, #cir.int<0> : !u64i}> : !ty_22Yo22 -// CHECK: cir.store %2, %0 : !ty_22Yo22, !cir.ptr -// CHECK: %3 = cir.get_member %1[0] {name = "type"} : !cir.ptr -> !cir.ptr +// CHECK: %0 = cir.alloca !ty_Yo, !cir.ptr, ["ext"] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_Yo, !cir.ptr, ["ext2", init] {alignment = 8 : i64} +// CHECK: %2 = cir.const #cir.const_struct<{#cir.int<1000070000> : !u32i, #cir.ptr : !cir.ptr, #cir.int<0> : !u64i}> : !ty_Yo +// CHECK: cir.store %2, %0 : !ty_Yo, !cir.ptr +// CHECK: %3 = cir.get_member %1[0] {name = "type"} : !cir.ptr -> !cir.ptr // CHECK: %4 = cir.const #cir.int<1000066001> : !u32i // CHECK: cir.store %4, %3 : !u32i, !cir.ptr -// CHECK: %5 = cir.get_member %1[1] {name = "next"} : !cir.ptr -> !cir.ptr> -// CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: %5 = cir.get_member %1[1] {name = "next"} : !cir.ptr -> !cir.ptr> +// CHECK: %6 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, !cir.ptr> -// CHECK: %7 = cir.get_member %1[2] {name = "createFlags"} : !cir.ptr -> !cir.ptr +// CHECK: %7 = cir.get_member %1[2] {name = "createFlags"} : !cir.ptr -> !cir.ptr // CHECK: %8 = cir.const #cir.int<0> : !u64i // CHECK: cir.store %8, %7 : !u64i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/agg-init2.cpp b/clang/test/CIR/CodeGen/agg-init2.cpp index d534f38fc169..cec2d67eb648 100644 --- a/clang/test/CIR/CodeGen/agg-init2.cpp +++ b/clang/test/CIR/CodeGen/agg-init2.cpp @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// CHECK: !ty_22Zero22 = !cir.struct +// CHECK: !ty_Zero = !cir.struct struct Zero { void yolo(); @@ -14,7 +14,7 @@ void f() { } // CHECK: cir.func @_Z1fv() -// CHECK: %0 = cir.alloca !ty_22Zero22, !cir.ptr, ["z0", init] -// CHECK: %1 = cir.alloca !ty_22Zero22, !cir.ptr, ["z1"] -// CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () +// CHECK: %0 = cir.alloca !ty_Zero, !cir.ptr, ["z0", init] +// CHECK: %1 = cir.alloca !ty_Zero, !cir.ptr, ["z1"] +// CHECK: cir.call @_ZN4ZeroC1Ev(%0) : (!cir.ptr) -> () // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/array-init-destroy.cpp b/clang/test/CIR/CodeGen/array-init-destroy.cpp index 2959e0085314..9bc39cec84c0 100644 --- a/clang/test/CIR/CodeGen/array-init-destroy.cpp +++ b/clang/test/CIR/CodeGen/array-init-destroy.cpp @@ -20,42 +20,42 @@ void x() { } // BEFORE: cir.func @_Z1xv() -// BEFORE: %[[ArrayAddr:.*]] = cir.alloca !cir.array +// BEFORE: %[[ArrayAddr:.*]] = cir.alloca !cir.array -// BEFORE: cir.array.ctor(%[[ArrayAddr]] : !cir.ptr>) { -// BEFORE: ^bb0(%arg0: !cir.ptr -// BEFORE: cir.call @_ZN4xptoC1Ev(%arg0) : (!cir.ptr) -> () +// BEFORE: cir.array.ctor(%[[ArrayAddr]] : !cir.ptr>) { +// BEFORE: ^bb0(%arg0: !cir.ptr +// BEFORE: cir.call @_ZN4xptoC1Ev(%arg0) : (!cir.ptr) -> () // BEFORE: cir.yield // BEFORE: } -// BEFORE: cir.array.dtor(%[[ArrayAddr]] : !cir.ptr>) { -// BEFORE: ^bb0(%arg0: !cir.ptr -// BEFORE: cir.call @_ZN4xptoD1Ev(%arg0) : (!cir.ptr) -> () +// BEFORE: cir.array.dtor(%[[ArrayAddr]] : !cir.ptr>) { +// BEFORE: ^bb0(%arg0: !cir.ptr +// BEFORE: cir.call @_ZN4xptoD1Ev(%arg0) : (!cir.ptr) -> () // BEFORE: cir.yield // BEFORE: } // AFTER: cir.func @_Z1xv() -// AFTER: %[[ArrayAddr0:.*]] = cir.alloca !cir.array +// AFTER: %[[ArrayAddr0:.*]] = cir.alloca !cir.array // AFTER: %[[ConstTwo:.*]] = cir.const #cir.int<2> : !u64i -// AFTER: %[[ArrayBegin:.*]] = cir.cast(array_to_ptrdecay, %[[ArrayAddr0]] : !cir.ptr>), !cir.ptr -// AFTER: %[[ArrayPastEnd:.*]] = cir.ptr_stride(%[[ArrayBegin]] : !cir.ptr, %[[ConstTwo]] : !u64i), !cir.ptr -// AFTER: %[[TmpIdx:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// AFTER: cir.store %[[ArrayBegin]], %[[TmpIdx]] : !cir.ptr, !cir.ptr> +// AFTER: %[[ArrayBegin:.*]] = cir.cast(array_to_ptrdecay, %[[ArrayAddr0]] : !cir.ptr>), !cir.ptr +// AFTER: %[[ArrayPastEnd:.*]] = cir.ptr_stride(%[[ArrayBegin]] : !cir.ptr, %[[ConstTwo]] : !u64i), !cir.ptr +// AFTER: %[[TmpIdx:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// AFTER: cir.store %[[ArrayBegin]], %[[TmpIdx]] : !cir.ptr, !cir.ptr> // AFTER: cir.do { -// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : !cir.ptr>, !cir.ptr +// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : !cir.ptr>, !cir.ptr // AFTER: %[[ConstOne:.*]] = cir.const #cir.int<1> : !u64i -// AFTER: cir.call @_ZN4xptoC1Ev(%[[ArrayElt]]) : (!cir.ptr) -> () -// AFTER: %[[NextElt:.*]] = cir.ptr_stride(%[[ArrayElt]] : !cir.ptr, %[[ConstOne]] : !u64i), !cir.ptr -// AFTER: cir.store %[[NextElt]], %[[TmpIdx]] : !cir.ptr, !cir.ptr> +// AFTER: cir.call @_ZN4xptoC1Ev(%[[ArrayElt]]) : (!cir.ptr) -> () +// AFTER: %[[NextElt:.*]] = cir.ptr_stride(%[[ArrayElt]] : !cir.ptr, %[[ConstOne]] : !u64i), !cir.ptr +// AFTER: cir.store %[[NextElt]], %[[TmpIdx]] : !cir.ptr, !cir.ptr> // AFTER: cir.yield // AFTER: } while { -// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : !cir.ptr>, !cir.ptr -// AFTER: %[[ExitCond:.*]] = cir.cmp(eq, %[[ArrayElt]], %[[ArrayPastEnd]]) : !cir.ptr, !cir.bool +// AFTER: %[[ArrayElt:.*]] = cir.load %[[TmpIdx]] : !cir.ptr>, !cir.ptr +// AFTER: %[[ExitCond:.*]] = cir.cmp(eq, %[[ArrayElt]], %[[ArrayPastEnd]]) : !cir.ptr, !cir.bool // AFTER: cir.condition(%[[ExitCond]]) // AFTER: } // AFTER: cir.do { -// AFTER: cir.call @_ZN4xptoD1Ev({{.*}}) : (!cir.ptr) -> () +// AFTER: cir.call @_ZN4xptoD1Ev({{.*}}) : (!cir.ptr) -> () // AFTER: } while { // AFTER: } diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c index d805d4ea1f4a..13999f24a45d 100644 --- a/clang/test/CIR/CodeGen/array-init.c +++ b/clang/test/CIR/CodeGen/array-init.c @@ -10,20 +10,20 @@ void buz(int x) { } // CHECK: cir.func @buz // CHECK-NEXT: [[X_ALLOCA:%.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} -// CHECK-NEXT: [[ARR:%.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 16 : i64} +// CHECK-NEXT: [[ARR:%.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 16 : i64} // CHECK-NEXT: cir.store %arg0, [[X_ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: [[ARR_INIT:%.*]] = cir.const #cir.zero : !cir.array -// CHECK-NEXT: cir.store [[ARR_INIT]], [[ARR]] : !cir.array, !cir.ptr> -// CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: [[A_STORAGE0:%.*]] = cir.get_member [[FI_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: [[B_STORAGE0:%.*]] = cir.get_member [[FI_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: [[ARR_INIT:%.*]] = cir.const #cir.zero : !cir.array +// CHECK-NEXT: cir.store [[ARR_INIT]], [[ARR]] : !cir.array, !cir.ptr> +// CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: [[A_STORAGE0:%.*]] = cir.get_member [[FI_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: [[B_STORAGE0:%.*]] = cir.get_member [[FI_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: [[X_VAL:%.*]] = cir.load [[X_ALLOCA]] : !cir.ptr, !s32i // CHECK-NEXT: [[X_CASTED:%.*]] = cir.cast(integral, [[X_VAL]] : !s32i), !s64i // CHECK-NEXT: cir.store [[X_CASTED]], [[B_STORAGE0]] : !s64i, !cir.ptr // CHECK-NEXT: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride([[FI_EL]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: [[A_STORAGE1:%.*]] = cir.get_member [[SE_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: [[B_STORAGE1:%.*]] = cir.get_member [[SE_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride([[FI_EL]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: [[A_STORAGE1:%.*]] = cir.get_member [[SE_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: [[B_STORAGE1:%.*]] = cir.get_member [[SE_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: cir.return void foo() { diff --git a/clang/test/CIR/CodeGen/array.c b/clang/test/CIR/CodeGen/array.c index 5079f687b2f5..ed83c663bd60 100644 --- a/clang/test/CIR/CodeGen/array.c +++ b/clang/test/CIR/CodeGen/array.c @@ -5,7 +5,7 @@ struct S { int i; } arr[3] = {{1}}; -// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S22, #cir.zero : !ty_22S22, #cir.zero : !ty_22S22]> : !cir.array +// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S, #cir.zero : !ty_S, #cir.zero : !ty_S]> : !cir.array int a[4]; // CHECK: cir.global external @a = #cir.zero : !cir.array diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index 1fc6989058ae..b0807755cfec 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -70,7 +70,7 @@ int globalNullArr[] = {0, 0}; struct S { int i; } arr[3] = {{1}}; -// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S22, #cir.zero : !ty_22S22, #cir.zero : !ty_22S22]> : !cir.array +// CHECK: cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S, #cir.zero : !ty_S, #cir.zero : !ty_S]> : !cir.array void testPointerDecaySubscriptAccess(int arr[]) { // CHECK: cir.func @{{.+}}testPointerDecaySubscriptAccess diff --git a/clang/test/CIR/CodeGen/asm.c b/clang/test/CIR/CodeGen/asm.c index 2079f9b0573d..19b9c7d18637 100644 --- a/clang/test/CIR/CodeGen/asm.c +++ b/clang/test/CIR/CodeGen/asm.c @@ -51,7 +51,7 @@ void empty5(int x) { // CHECK: out = [], // CHECK: in = [], // CHECK: in_out = [%2 : !s32i], -// CHECK: {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !ty_22anon2E022 +// CHECK: {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) side_effects -> !ty_anon2E0_ void empty6(int x) { __asm__ volatile("" : "=&r"(x), "+&r"(x)); } diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index a90642945562..5942beb296dc 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -15,11 +15,11 @@ struct String { // StringView::StringView(String const&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewC2ERK6String - // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} - // CHECK: cir.store %arg0, %0 : !cir.ptr - // CHECK: cir.store %arg1, %1 : !cir.ptr - // CHECK: %2 = cir.load %0 : !cir.ptr> + // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} + // CHECK: cir.store %arg0, %0 : !cir.ptr + // CHECK: cir.store %arg1, %1 : !cir.ptr + // CHECK: %2 = cir.load %0 : !cir.ptr> // Get address of `this->size` @@ -27,7 +27,7 @@ struct String { // Get address of `s` - // CHECK: %4 = cir.load %1 : !cir.ptr> + // CHECK: %4 = cir.load %1 : !cir.ptr> // Get the address of s.size @@ -41,25 +41,25 @@ struct String { // CHECK: } // DISABLE: cir.func linkonce_odr @_ZN10StringViewC2ERK6String - // DISABLE-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} + // DISABLE-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // StringView::operator=(StringView&&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewaSEOS_ - // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["", init] {alignment = 8 : i64} - // CHECK: %2 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} - // CHECK: cir.store %arg0, %0 : !cir.ptr - // CHECK: cir.store %arg1, %1 : !cir.ptr - // CHECK: %3 = cir.load deref %0 : !cir.ptr> - // CHECK: %4 = cir.load %1 : !cir.ptr> + // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["", init] {alignment = 8 : i64} + // CHECK: %2 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} + // CHECK: cir.store %arg0, %0 : !cir.ptr + // CHECK: cir.store %arg1, %1 : !cir.ptr + // CHECK: %3 = cir.load deref %0 : !cir.ptr> + // CHECK: %4 = cir.load %1 : !cir.ptr> // CHECK: %5 = cir.get_member %4[0] {name = "size"} // CHECK: %6 = cir.load %5 : !cir.ptr, !s64i // CHECK: %7 = cir.get_member %3[0] {name = "size"} // CHECK: cir.store %6, %7 : !s64i, !cir.ptr - // CHECK: cir.store %3, %2 : !cir.ptr - // CHECK: %8 = cir.load %2 : !cir.ptr> - // CHECK: cir.return %8 : !cir.ptr + // CHECK: cir.store %3, %2 : !cir.ptr + // CHECK: %8 = cir.load %2 : !cir.ptr> + // CHECK: cir.return %8 : !cir.ptr // CHECK: } // DISABLE: cir.func private @_ZN10StringViewaSEOS_ @@ -83,17 +83,17 @@ int main() { // CHECK: cir.func @main() -> !s32i // CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !ty_22StringView22, !cir.ptr, ["sv", init] {alignment = 8 : i64} -// CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () +// CHECK: %1 = cir.alloca !ty_StringView, !cir.ptr, ["sv", init] {alignment = 8 : i64} +// CHECK: cir.call @_ZN10StringViewC2Ev(%1) : (!cir.ptr) -> () // CHECK: cir.scope { -// CHECK: %3 = cir.alloca !ty_22String22, !cir.ptr, ["s", init] {alignment = 8 : i64} +// CHECK: %3 = cir.alloca !ty_String, !cir.ptr, ["s", init] {alignment = 8 : i64} // CHECK: %4 = cir.get_global @".str" : !cir.ptr> // CHECK: %5 = cir.cast(array_to_ptrdecay, %4 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @_ZN6StringC2EPKc(%3, %5) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC2EPKc(%3, %5) : (!cir.ptr, !cir.ptr) -> () // CHECK: cir.scope { -// CHECK: %6 = cir.alloca !ty_22StringView22, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} -// CHECK: cir.call @_ZN10StringViewC2ERK6String(%6, %3) : (!cir.ptr, !cir.ptr) -> () -// CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %6) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: %6 = cir.alloca !ty_StringView, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: cir.call @_ZN10StringViewC2ERK6String(%6, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %7 = cir.call @_ZN10StringViewaSEOS_(%1, %6) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } // CHECK: } // CHECK: %2 = cir.load %0 : !cir.ptr, !s32i diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 98215cfb5980..abf21d024957 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -230,7 +230,7 @@ void fd3(struct S *a, struct S *b, struct S *c) { } // CHECK-LABEL: @_Z3fd3P1SS0_S0_ -// CHECK: cir.atomic.xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, seq_cst) : !u64i +// CHECK: cir.atomic.xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, seq_cst) : !u64i // FIXME: CIR is producing an over alignment of 8, only 4 needed. // LLVM-LABEL: @_Z3fd3P1SS0_S0_ @@ -252,7 +252,7 @@ bool fd4(struct S *a, struct S *b, struct S *c) { } // CHECK-LABEL: @_Z3fd4P1SS0_S0_ -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) weak : (!u64i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) weak : (!u64i, !cir.bool) // LLVM-LABEL: @_Z3fd4P1SS0_S0_ // LLVM: cmpxchg weak ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 diff --git a/clang/test/CIR/CodeGen/bitfield-union.c b/clang/test/CIR/CodeGen/bitfield-union.c index 96b12e367bc9..b08b035e2fd0 100644 --- a/clang/test/CIR/CodeGen/bitfield-union.c +++ b/clang/test/CIR/CodeGen/bitfield-union.c @@ -13,20 +13,20 @@ void main() { d.z = 0; } -// CHECK: !ty_22demo22 = !cir.struct +// CHECK: !ty_demo = !cir.struct // CHECK: #bfi_y = #cir.bitfield_info // CHECK: #bfi_z = #cir.bitfield_info // cir.func no_proto @main() extra(#fn_attr) { -// %0 = cir.alloca !ty_22demo22, !cir.ptr, ["d"] {alignment = 4 : i64} +// %0 = cir.alloca !ty_demo, !cir.ptr, ["d"] {alignment = 4 : i64} // %1 = cir.const #cir.int<1> : !s32i -// %2 = cir.get_member %0[0] {name = "x"} : !cir.ptr -> !cir.ptr +// %2 = cir.get_member %0[0] {name = "x"} : !cir.ptr -> !cir.ptr // cir.store %1, %2 : !s32i, !cir.ptr // %3 = cir.const #cir.int<2> : !s32i -// %4 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// %4 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr // %5 = cir.set_bitfield(#bfi_y, %4 : !cir.ptr, %3 : !s32i) -> !s32i // %6 = cir.const #cir.int<0> : !s32i loc(#loc10) -// %7 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// %7 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr // %8 = cir.set_bitfield(#bfi_z, %7 : !cir.ptr, %6 : !s32i) -> !s32i // cir.return // } diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index 57cd3b9ba250..f825640a6010 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -53,21 +53,21 @@ typedef struct { // because (tail - startOffset) is 65 after 'l' field } U; -// CHECK: !ty_22D22 = !cir.struct -// CHECK: !ty_22T22 = !cir.struct -// CHECK: !ty_22anon2E122 = !cir.struct +// CHECK: !ty_D = !cir.struct +// CHECK: !ty_T = !cir.struct +// CHECK: !ty_anon2E1_ = !cir.struct // CHECK: !ty_anon_struct = !cir.struct // CHECK: #bfi_a = #cir.bitfield_info // CHECK: #bfi_e = #cir.bitfield_info -// CHECK: !ty_22S22 = !cir.struct, !u16i, !u32i}> -// CHECK: !ty_22U22 = !cir.struct}> -// CHECK: !ty_22__long22 = !cir.struct}> +// CHECK: !ty_S = !cir.struct, !u16i, !u32i}> +// CHECK: !ty_U = !cir.struct}> +// CHECK: !ty___long = !cir.struct}> // CHECK: #bfi_d = #cir.bitfield_info, size = 2, offset = 17, is_signed = true> // CHECK: cir.func {{.*@store_field}} -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_S, !cir.ptr // CHECK: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i -// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP0]][2] {name = "e"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP0]][2] {name = "e"} : !cir.ptr -> !cir.ptr // CHECK: cir.set_bitfield(#bfi_e, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) void store_field() { S s; @@ -75,16 +75,16 @@ void store_field() { } // CHECK: cir.func {{.*@load_field}} -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] -// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr -// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr> // CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr>) -> !s32i int load_field(S* s) { return s->d; } // CHECK: cir.func {{.*@unOp}} -// CHECK: [[TMP0:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP0:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr> // CHECK: [[TMP1:%.*]] = cir.get_bitfield(#bfi_d, [[TMP0]] : !cir.ptr>) -> !s32i // CHECK: [[TMP2:%.*]] = cir.unary(inc, [[TMP1]]) : !s32i, !s32i // CHECK: cir.set_bitfield(#bfi_d, [[TMP0]] : !cir.ptr>, [[TMP2]] : !s32i) @@ -94,7 +94,7 @@ void unOp(S* s) { // CHECK: cir.func {{.*@binOp}} // CHECK: [[TMP0:%.*]] = cir.const #cir.int<42> : !s32i -// CHECK: [[TMP1:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP1:%.*]] = cir.get_member {{.*}}[1] {name = "d"} : !cir.ptr -> !cir.ptr> // CHECK: [[TMP2:%.*]] = cir.get_bitfield(#bfi_d, [[TMP1]] : !cir.ptr>) -> !s32i // CHECK: [[TMP3:%.*]] = cir.binop(or, [[TMP2]], [[TMP0]]) : !s32i // CHECK: cir.set_bitfield(#bfi_d, [[TMP1]] : !cir.ptr>, [[TMP3]] : !s32i) @@ -104,7 +104,7 @@ void binOp(S* s) { // CHECK: cir.func {{.*@load_non_bitfield}} -// CHECK: cir.get_member {{%.}}[3] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK: cir.get_member {{%.}}[3] {name = "f"} : !cir.ptr -> !cir.ptr unsigned load_non_bitfield(S *s) { return s->f; } @@ -122,8 +122,8 @@ void createU() { // for this struct type we create an anon structure with different storage types in initialization // CHECK: cir.func {{.*@createD}} -// CHECK: %0 = cir.alloca !ty_22D22, !cir.ptr, ["d"] {alignment = 4 : i64} -// CHECK: %1 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: %0 = cir.alloca !ty_D, !cir.ptr, ["d"] {alignment = 4 : i64} +// CHECK: %1 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr // CHECK: %2 = cir.const #cir.const_struct<{#cir.int<33> : !u8i, #cir.int<0> : !u8i, #cir.int<3> : !s32i}> : !ty_anon_struct // CHECK: cir.store %2, %1 : !ty_anon_struct, !cir.ptr void createD() { diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index be31118064bd..b5e9ed24d396 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -27,15 +27,15 @@ typedef struct { int a : 3; // one bitfield with size < 8 unsigned b; } T; -// CHECK: !ty_22T22 = !cir.struct -// CHECK: !ty_22anon2E122 = !cir.struct -// CHECK: !ty_22S22 = !cir.struct, !u16i, !u32i}> -// CHECK: !ty_22__long22 = !cir.struct}> +// CHECK: !ty_T = !cir.struct +// CHECK: !ty_anon2E1_ = !cir.struct +// CHECK: !ty_S = !cir.struct, !u16i, !u32i}> +// CHECK: !ty___long = !cir.struct}> // CHECK: cir.func @_Z11store_field -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, !cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_S, !cir.ptr // CHECK: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i -// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr // CHECK: cir.set_bitfield(#bfi_a, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) void store_field() { S s; @@ -43,16 +43,16 @@ void store_field() { } // CHECK: cir.func @_Z10load_field -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] -// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr -// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] +// CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr> // CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr>) -> !s32i int load_field(S& s) { return s.d; } // CHECK: cir.func @_Z17load_non_bitfield -// CHECK: cir.get_member {{%.}}[3] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK: cir.get_member {{%.}}[3] {name = "f"} : !cir.ptr -> !cir.ptr unsigned load_non_bitfield(S& s) { return s.f; } diff --git a/clang/test/CIR/CodeGen/bool.c b/clang/test/CIR/CodeGen/bool.c index 6cc618b094a5..7a9655714210 100644 --- a/clang/test/CIR/CodeGen/bool.c +++ b/clang/test/CIR/CodeGen/bool.c @@ -8,31 +8,31 @@ typedef struct { } S; // CHECK: cir.func @init_bool -// CHECK: [[ALLOC:%.*]] = cir.alloca !ty_22S22, !cir.ptr -// CHECK: [[ZERO:%.*]] = cir.const #cir.zero : !ty_22S22 -// CHECK: cir.store [[ZERO]], [[ALLOC]] : !ty_22S22, !cir.ptr +// CHECK: [[ALLOC:%.*]] = cir.alloca !ty_S, !cir.ptr +// CHECK: [[ZERO:%.*]] = cir.const #cir.zero : !ty_S +// CHECK: cir.store [[ZERO]], [[ALLOC]] : !ty_S, !cir.ptr void init_bool(void) { S s = {0}; } // CHECK: cir.func @store_bool -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr> -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> // CHECK: [[TMP1:%.*]] = cir.const #cir.int<0> : !s32i // CHECK: [[TMP2:%.*]] = cir.cast(int_to_bool, [[TMP1]] : !s32i), !cir.bool -// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.get_member [[TMP3]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.get_member [[TMP3]][0] {name = "x"} : !cir.ptr -> !cir.ptr // CHECK: cir.store [[TMP2]], [[TMP4]] : !cir.bool, !cir.ptr void store_bool(S *s) { s->x = false; } // CHECK: cir.func @load_bool -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} // CHECK: [[TMP1:%.*]] = cir.alloca !cir.bool, !cir.ptr, ["x", init] {alignment = 1 : i64} -// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> -// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr -// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "x"} : !cir.ptr -> !cir.ptr // CHECK: [[TMP4:%.*]] = cir.load [[TMP3]] : !cir.ptr, !cir.bool void load_bool(S *s) { bool x = s->x; diff --git a/clang/test/CIR/CodeGen/build-deferred.cpp b/clang/test/CIR/CodeGen/build-deferred.cpp index bf0f2ce30c9e..f62d8ddc5c99 100644 --- a/clang/test/CIR/CodeGen/build-deferred.cpp +++ b/clang/test/CIR/CodeGen/build-deferred.cpp @@ -24,4 +24,4 @@ void test() { // CHECK-NOT: cir.func linkonce_odr @_ZN6StringC1EPKc // CHECK: cir.func @_Z4testv() -// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/builtin-bit-cast.cpp b/clang/test/CIR/CodeGen/builtin-bit-cast.cpp index 88e584a2d72c..696b472a159f 100644 --- a/clang/test/CIR/CodeGen/builtin-bit-cast.cpp +++ b/clang/test/CIR/CodeGen/builtin-bit-cast.cpp @@ -28,8 +28,8 @@ unsigned long test_aggregate_to_scalar(two_ints &ti) { } // CIR-LABEL: cir.func @_Z24test_aggregate_to_scalarR8two_ints -// CIR: %[[#SRC_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CIR-NEXT: %[[#DST_PTR:]] = cir.cast(bitcast, %[[#SRC_PTR]] : !cir.ptr), !cir.ptr +// CIR: %[[#SRC_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[#DST_PTR:]] = cir.cast(bitcast, %[[#SRC_PTR]] : !cir.ptr), !cir.ptr // CIR-NEXT: %{{.+}} = cir.load %[[#DST_PTR]] : !cir.ptr, !u64i // CIR: } @@ -48,9 +48,9 @@ two_floats test_aggregate_record(two_ints& ti) { } // CIR-LABEL: cir.func @_Z21test_aggregate_recordR8two_ints -// CIR: %[[#SRC_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CIR-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %[[#SRC_PTR]] : !cir.ptr), !cir.ptr -// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr +// CIR: %[[#SRC_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %[[#SRC_PTR]] : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr // CIR-NEXT: %[[#SIZE:]] = cir.const #cir.int<8> : !u64i // CIR-NEXT: cir.libc.memcpy %[[#SIZE]] bytes from %[[#SRC_VOID_PTR]] to %[[#DST_VOID_PTR]] : !u64i, !cir.ptr -> !cir.ptr // CIR: } @@ -69,7 +69,7 @@ two_floats test_aggregate_array(int (&ary)[2]) { // CIR-LABEL: cir.func @_Z20test_aggregate_arrayRA2_i // CIR: %[[#SRC_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> // CIR-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %[[#SRC_PTR]] : !cir.ptr>), !cir.ptr -// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr // CIR-NEXT: %[[#SIZE:]] = cir.const #cir.int<8> : !u64i // CIR-NEXT: cir.libc.memcpy %[[#SIZE]] bytes from %[[#SRC_VOID_PTR]] to %[[#DST_VOID_PTR]] : !u64i, !cir.ptr -> !cir.ptr // CIR: } @@ -87,7 +87,7 @@ two_ints test_scalar_to_aggregate(unsigned long ul) { // CIR-LABEL: cir.func @_Z24test_scalar_to_aggregatem // CIR: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr -// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr // CIR-NEXT: %[[#SIZE:]] = cir.const #cir.int<8> : !u64i // CIR-NEXT: cir.libc.memcpy %[[#SIZE]] bytes from %[[#SRC_VOID_PTR]] to %[[#DST_VOID_PTR]] : !u64i, !cir.ptr -> !cir.ptr // CIR: } @@ -123,7 +123,7 @@ two_ints test_rvalue_aggregate() { // CIR-NEXT: %[[#A:]] = cir.const #cir.int<42> : !u64i // CIR-NEXT: cir.store %[[#A]], %[[#TMP_SLOT]] : !u64i, !cir.ptr // CIR-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %[[#TMP_SLOT]] : !cir.ptr), !cir.ptr -// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#DST_VOID_PTR:]] = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr // CIR-NEXT: %[[#SIZE:]] = cir.const #cir.int<8> : !u64i // CIR-NEXT: cir.libc.memcpy %[[#SIZE]] bytes from %[[#SRC_VOID_PTR]] to %[[#DST_VOID_PTR]] : !u64i, !cir.ptr -> !cir.ptr // CIR-NEXT: } diff --git a/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp index d77c8b450e4a..09f74a108e1b 100644 --- a/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp +++ b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp @@ -16,13 +16,13 @@ class f { const char *f::b() { return g.b(h); } void fn1() { f f1; } -// CIR: ty_22a22 = !cir.struct -// CIR: ty_22f22 = !cir.struct +// CIR: ty_a = !cir.struct +// CIR: ty_f = !cir.struct // CIR: cir.global external @h = #cir.int<0> // CIR: cir.func private @_ZN1a1bEi(!s32i) -> !cir.ptr -// CIR: cir.func @_ZN1f1bEv(%arg0: !cir.ptr loc{{.*}}) -> !cir.ptr +// CIR: cir.func @_ZN1f1bEv(%arg0: !cir.ptr loc{{.*}}) -> !cir.ptr // CIR: [[H_PTR:%.*]] = cir.get_global @h : !cir.ptr loc(#loc18) // CIR: [[H_VAL:%.*]] = cir.load [[H_PTR]] : !cir.ptr, !s32i // CIR: [[RET1_VAL:%.*]] = cir.call @_ZN1a1bEi([[H_VAL]]) : (!s32i) -> !cir.ptr @@ -32,7 +32,7 @@ void fn1() { f f1; } // CIR: cir.return [[RET1_VAL2]] : !cir.ptr // CIR: cir.func @_Z3fn1v() -// CIR: [[CLS_F:%.*]] = cir.alloca !ty_22f22, !cir.ptr, ["f1"] {alignment = 1 : i64} +// CIR: [[CLS_F:%.*]] = cir.alloca !ty_f, !cir.ptr, ["f1"] {alignment = 1 : i64} // CIR: cir.return // LLVM: %class.f = type { %class.a } diff --git a/clang/test/CIR/CodeGen/cast.c b/clang/test/CIR/CodeGen/cast.c index 4490910cad43..710b065f8087 100644 --- a/clang/test/CIR/CodeGen/cast.c +++ b/clang/test/CIR/CodeGen/cast.c @@ -8,11 +8,11 @@ int cstyle_cast_lvalue(A a) { return ((A)(a)).x; } -// CHECK: cir.func @cstyle_cast_lvalue(%arg0: !ty_22A22 loc({{.*}})) -// CHECK: [[ALLOC_A:%.*]] = cir.alloca !ty_22A22, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK: cir.func @cstyle_cast_lvalue(%arg0: !ty_A loc({{.*}})) +// CHECK: [[ALLOC_A:%.*]] = cir.alloca !ty_A, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK: [[ALLOC_RET:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} -// CHECK: cir.store %arg0, [[ALLOC_A]] : !ty_22A22, !cir.ptr -// CHECK: [[X_ADDR:%.*]] = cir.get_member [[ALLOC_A]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: cir.store %arg0, [[ALLOC_A]] : !ty_A, !cir.ptr +// CHECK: [[X_ADDR:%.*]] = cir.get_member [[ALLOC_A]][0] {name = "x"} : !cir.ptr -> !cir.ptr // CHECK: [[X:%.*]] = cir.load [[X_ADDR]] : !cir.ptr, !s32i // CHECK: cir.store [[X]], [[ALLOC_RET]] : !s32i, !cir.ptr // CHECK: [[RET:%.*]] = cir.load [[ALLOC_RET]] : !cir.ptr, !s32i diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index 15991a8f1fd3..b5d1d8e4f43f 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -129,8 +129,8 @@ void null_cast(long ptr) { // CHECK: cir.func @_Z9null_castl // CHECK: %[[ADDR:[0-9]+]] = cir.const #cir.ptr : !cir.ptr // CHECK: cir.store %{{[0-9]+}}, %[[ADDR]] : !s32i, !cir.ptr -// CHECK: %[[BASE:[0-9]+]] = cir.const #cir.ptr : !cir.ptr -// CHECK: %[[FIELD:[0-9]+]] = cir.get_member %[[BASE]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: %[[BASE:[0-9]+]] = cir.const #cir.ptr : !cir.ptr +// CHECK: %[[FIELD:[0-9]+]] = cir.get_member %[[BASE]][0] {name = "x"} : !cir.ptr -> !cir.ptr // CHECK: cir.store %{{[0-9]+}}, %[[FIELD]] : !s32i, !cir.ptr void int_cast(long ptr) { @@ -138,7 +138,7 @@ void int_cast(long ptr) { } // CHECK: cir.func @_Z8int_castl -// CHECK: %[[BASE:[0-9]+]] = cir.cast(int_to_ptr, %{{[0-9]+}} : !u64i), !cir.ptr -// CHECK: %[[FIELD:[0-9]+]] = cir.get_member %[[BASE]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: %[[BASE:[0-9]+]] = cir.cast(int_to_ptr, %{{[0-9]+}} : !u64i), !cir.ptr +// CHECK: %[[FIELD:[0-9]+]] = cir.get_member %[[BASE]][0] {name = "x"} : !cir.ptr -> !cir.ptr // CHECK: cir.store %{{[0-9]+}}, %[[FIELD]] : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c index f0ee805dda44..74669589d084 100644 --- a/clang/test/CIR/CodeGen/compound-literal.c +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -13,7 +13,7 @@ S a = { }; // CIR: cir.global "private" internal @".compoundLiteral.0" = #cir.zero : !cir.array {alignment = 4 : i64} -// CIR: cir.global external @a = #cir.const_struct<{#cir.global_view<@".compoundLiteral.0"> : !cir.ptr}> : !ty_22S22 +// CIR: cir.global external @a = #cir.const_struct<{#cir.global_view<@".compoundLiteral.0"> : !cir.ptr}> : !ty_S // LLVM: @.compoundLiteral.0 = internal global [0 x i32] zeroinitializer // LLVM: @a = global %struct.S { ptr @.compoundLiteral.0 } @@ -23,7 +23,7 @@ S b = { }; // CIR: cir.global "private" internal @".compoundLiteral.1" = #cir.const_array<[#cir.int<1> : !s32i]> : !cir.array {alignment = 4 : i64} -// CIR: cir.global external @b = #cir.const_struct<{#cir.global_view<@".compoundLiteral.1"> : !cir.ptr}> : !ty_22S22 +// CIR: cir.global external @b = #cir.const_struct<{#cir.global_view<@".compoundLiteral.1"> : !cir.ptr}> : !ty_S // LLVM: @.compoundLiteral.1 = internal global [1 x i32] [i32 1] // LLVM: @b = global %struct.S { ptr @.compoundLiteral.1 } @@ -37,8 +37,8 @@ int foo() { // CIR: cir.func no_proto @foo() -> !s32i // CIR: [[RET_MEM:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} -// CIR: [[COMPLITERAL_MEM:%.*]] = cir.alloca !ty_22anon2E122, !cir.ptr, [".compoundliteral"] {alignment = 4 : i64} -// CIR: [[FIELD:%.*]] = cir.get_member [[COMPLITERAL_MEM]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CIR: [[COMPLITERAL_MEM:%.*]] = cir.alloca !ty_anon2E1_, !cir.ptr, [".compoundliteral"] {alignment = 4 : i64} +// CIR: [[FIELD:%.*]] = cir.get_member [[COMPLITERAL_MEM]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i // CIR: cir.store [[ONE]], [[FIELD]] : !s32i, !cir.ptr // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i @@ -52,7 +52,7 @@ struct G g(int x, int y, int z) { } // CIR: cir.func @g -// CIR: %[[RETVAL:.*]] = cir.alloca !ty_22G22, !cir.ptr, ["__retval"] {alignment = 2 : i64} +// CIR: %[[RETVAL:.*]] = cir.alloca !ty_G, !cir.ptr, ["__retval"] {alignment = 2 : i64} // CIR: %[[X:.*]] = cir.get_member %[[RETVAL]][0] {name = "x"} // CIR: cir.store {{.*}}, %[[X]] : !s16i // CIR: %[[Y:.*]] = cir.get_member %[[RETVAL]][1] {name = "y"} @@ -74,21 +74,21 @@ void split_large_page(unsigned long addr, pgprot_t prot) // CIR-LABEL: @split_large_page // CIR: %[[VAL_2:.*]] = cir.alloca !u64i, !cir.ptr, ["addr", init] {alignment = 8 : i64} -// CIR: %[[VAL_3:.*]] = cir.alloca !ty_22pgprot_t22, !cir.ptr, ["prot", init] {alignment = 8 : i64} -// CIR: %[[VAL_4:.*]] = cir.alloca !ty_22pgprot_t22, !cir.ptr, ["tmp"] {alignment = 8 : i64} +// CIR: %[[VAL_3:.*]] = cir.alloca !ty_pgprot_t, !cir.ptr, ["prot", init] {alignment = 8 : i64} +// CIR: %[[VAL_4:.*]] = cir.alloca !ty_pgprot_t, !cir.ptr, ["tmp"] {alignment = 8 : i64} // CIR: cir.store {{.*}}, %[[VAL_2]] : !u64i, !cir.ptr -// CIR: cir.store {{.*}}, %[[VAL_3]] : !ty_22pgprot_t22, !cir.ptr +// CIR: cir.store {{.*}}, %[[VAL_3]] : !ty_pgprot_t, !cir.ptr // CIR: %[[VAL_5:.*]] = cir.load %[[VAL_2]] : !cir.ptr, !u64i // CIR: %[[VAL_6:.*]] = cir.cast(int_to_bool, %[[VAL_5]] : !u64i), !cir.bool // CIR: cir.if %[[VAL_6]] { -// CIR: cir.copy %[[VAL_3]] to %[[VAL_4]] : !cir.ptr +// CIR: cir.copy %[[VAL_3]] to %[[VAL_4]] : !cir.ptr // CIR: } else { -// CIR: %[[VAL_7:.*]] = cir.get_member %[[VAL_4]][0] {name = "pgprot"} : !cir.ptr -> !cir.ptr +// CIR: %[[VAL_7:.*]] = cir.get_member %[[VAL_4]][0] {name = "pgprot"} : !cir.ptr -> !cir.ptr // CIR: %[[VAL_8:.*]] = cir.const #cir.int<1> : !s32i // CIR: %[[VAL_9:.*]] = cir.cast(integral, %[[VAL_8]] : !s32i), !u64i // CIR: cir.store %[[VAL_9]], %[[VAL_7]] : !u64i, !cir.ptr // CIR: } -// CIR: %[[VAL_10:.*]] = cir.get_member %[[VAL_4]][0] {name = "pgprot"} : !cir.ptr -> !cir.ptr +// CIR: %[[VAL_10:.*]] = cir.get_member %[[VAL_4]][0] {name = "pgprot"} : !cir.ptr -> !cir.ptr // CIR: %[[VAL_11:.*]] = cir.load %[[VAL_10]] : !cir.ptr, !u64i // CIR: cir.return // CIR: } diff --git a/clang/test/CIR/CodeGen/cond.cpp b/clang/test/CIR/CodeGen/cond.cpp index f2c063eb574b..e00ee528a72d 100644 --- a/clang/test/CIR/CodeGen/cond.cpp +++ b/clang/test/CIR/CodeGen/cond.cpp @@ -17,11 +17,11 @@ min(const unsigned long& __a, const unsigned long& __b) { // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> // CHECK: cir.scope { -// CHECK: %4 = cir.alloca !ty_22__less22, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} -// CHECK: cir.call @_ZN6__lessC1Ev(%4) : (!cir.ptr) -> () +// CHECK: %4 = cir.alloca !ty___less, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: cir.call @_ZN6__lessC1Ev(%4) : (!cir.ptr) -> () // CHECK: %5 = cir.load %1 : !cir.ptr>, !cir.ptr // CHECK: %6 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK: %7 = cir.call @_ZNK6__lessclERKmS1_(%4, %5, %6) : (!cir.ptr, !cir.ptr, !cir.ptr) -> !cir.bool +// CHECK: %7 = cir.call @_ZNK6__lessclERKmS1_(%4, %5, %6) : (!cir.ptr, !cir.ptr, !cir.ptr) -> !cir.bool // CHECK: %8 = cir.ternary(%7, true { // CHECK: %9 = cir.load %1 : !cir.ptr>, !cir.ptr // CHECK: cir.yield %9 : !cir.ptr diff --git a/clang/test/CIR/CodeGen/const-bitfields.c b/clang/test/CIR/CodeGen/const-bitfields.c index 24a6710af516..0015f4fe5c83 100644 --- a/clang/test/CIR/CodeGen/const-bitfields.c +++ b/clang/test/CIR/CodeGen/const-bitfields.c @@ -15,7 +15,7 @@ struct Inner { }; // CHECK: !ty_anon_struct = !cir.struct -// CHECK: !ty_22T22 = !cir.struct, !s32i} #cir.record.decl.ast> +// CHECK: !ty_T = !cir.struct, !s32i} #cir.record.decl.ast> // CHECK: !ty_anon_struct1 = !cir.struct, !u8i, !u8i, !u8i, !u8i}> // CHECK: #bfi_Z = #cir.bitfield_info, size = 9, offset = 11, is_signed = true> @@ -29,8 +29,8 @@ struct Inner var = { 1, 0, 1, 21}; // CHECK: cir.func {{.*@getZ()}} // CHECK: %1 = cir.get_global @GV : !cir.ptr -// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr -// CHECK: %3 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr> +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: %3 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr> // CHECK: %4 = cir.get_bitfield(#bfi_Z, %3 : !cir.ptr>) -> !s32i int getZ() { return GV.Z; @@ -39,8 +39,8 @@ int getZ() { // check the type used is the type of T struct for plain field // CHECK: cir.func {{.*@getW()}} // CHECK: %1 = cir.get_global @GV : !cir.ptr -// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr -// CHECK: %3 = cir.get_member %2[1] {name = "W"} : !cir.ptr -> !cir.ptr +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: %3 = cir.get_member %2[1] {name = "W"} : !cir.ptr -> !cir.ptr int getW() { return GV.W; } diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 24ee6df5f5c6..d61c626ee763 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -135,7 +135,7 @@ co_invoke_fn co_invoke; // CHECK-DAG: ![[SuspendAlways:.*]] = !cir.struct // CHECK: module {{.*}} { -// CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_22folly3A3Acoro3A3Aco_invoke_fn22 +// CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_folly3A3Acoro3A3Aco_invoke_fn // CHECK: cir.func builtin private @__builtin_coro_id(!u32i, !cir.ptr, !cir.ptr, !cir.ptr) -> !u32i // CHECK: cir.func builtin private @__builtin_coro_alloc(!u32i) -> !cir.bool @@ -274,7 +274,7 @@ folly::coro::Task byRef(const std::string& s) { } // FIXME: this could be less redundant than two allocas + reloads -// CHECK: cir.func coroutine @_Z5byRefRKSt6string(%arg0: !cir.ptr {{.*}}22 extra{{.*}}{ +// CHECK: cir.func coroutine @_Z5byRefRKSt6string(%arg0: !cir.ptr {{.*}} ![[IntTask]] extra{{.*}}{ // CHECK: %[[#AllocaParam:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] // CHECK: %[[#AllocaFnUse:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] @@ -291,7 +291,7 @@ folly::coro::Task silly_coro() { // Make sure we properly handle OnFallthrough coro body sub stmt and // check there are not multiple co_returns emitted. -// CHECK: cir.func coroutine @_Z10silly_corov() {{.*}}22 extra{{.*}}{ +// CHECK: cir.func coroutine @_Z10silly_corov() {{.*}} ![[VoidTask]] extra{{.*}}{ // CHECK: cir.await(init, ready : { // CHECK: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv // CHECK-NOT: cir.call @_ZN5folly4coro4TaskIvE12promise_type11return_voidEv @@ -303,7 +303,7 @@ folly::coro::Task go1() { co_return co_await task; } -// CHECK: cir.func coroutine @_Z3go1v() {{.*}}22 extra{{.*}}{ +// CHECK: cir.func coroutine @_Z3go1v() {{.*}} ![[IntTask]] extra{{.*}}{ // CHECK: %[[#IntTaskAddr:]] = cir.alloca ![[IntTask]], !cir.ptr, ["task", init] // CHECK: cir.await(init, ready : { @@ -338,8 +338,8 @@ folly::coro::Task go1_lambda() { co_return co_await task; } -// CHECK: cir.func coroutine lambda internal private @_ZZ10go1_lambdavENK3$_0clEv{{.*}}22 extra{{.*}}{ -// CHECK: cir.func coroutine @_Z10go1_lambdav() {{.*}}22 extra{{.*}}{ +// CHECK: cir.func coroutine lambda internal private @_ZZ10go1_lambdavENK3$_0clEv{{.*}} ![[IntTask]] extra{{.*}}{ +// CHECK: cir.func coroutine @_Z10go1_lambdav() {{.*}} ![[IntTask]] extra{{.*}}{ folly::coro::Task go4() { auto* fn = +[](int const& i) -> folly::coro::Task { co_return i; }; @@ -347,7 +347,7 @@ folly::coro::Task go4() { co_return co_await std::move(task); } -// CHECK: cir.func coroutine @_Z3go4v() {{.*}}22 extra{{.*}}{ +// CHECK: cir.func coroutine @_Z3go4v() {{.*}} ![[IntTask]] extra{{.*}}{ // CHECK: cir.await(init, ready : { // CHECK: }, suspend : { @@ -356,10 +356,10 @@ folly::coro::Task go4() { // CHECK: } // CHECK: %12 = cir.scope { -// CHECK: %17 = cir.alloca !ty_22anon2E522, !cir.ptr, ["ref.tmp1"] {alignment = 1 : i64} +// CHECK: %17 = cir.alloca !ty_anon2E5_, !cir.ptr, ["ref.tmp1"] {alignment = 1 : i64} // Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` -// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> // CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> // CHECK: cir.yield %19 : !cir.ptr)>> // CHECK: } @@ -387,7 +387,7 @@ folly::coro::Task yield1() { co_yield t; } -// CHECK: cir.func coroutine @_Z6yield1v() -> !ty_22folly3A3Acoro3A3ATask3Cvoid3E22 +// CHECK: cir.func coroutine @_Z6yield1v() -> !ty_folly3A3Acoro3A3ATask3Cvoid3E // CHECK: cir.await(init, ready : { // CHECK: }, suspend : { @@ -395,30 +395,30 @@ folly::coro::Task yield1() { // CHECK: },) // CHECK: cir.scope { -// CHECK-NEXT: %[[#SUSPEND_PTR:]] = cir.alloca !ty_22std3A3Asuspend_always22, !cir.ptr -// CHECK-NEXT: %[[#AWAITER_PTR:]] = cir.alloca !ty_22folly3A3Acoro3A3ATask3Cvoid3E22, !cir.ptr -// CHECK-NEXT: %[[#CORO_PTR:]] = cir.alloca !ty_22std3A3Acoroutine_handle3Cvoid3E22, !cir.ptr -// CHECK-NEXT: %[[#CORO2_PTR:]] = cir.alloca !ty_22std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E22, !cir.ptr -// CHECK-NEXT: cir.call @_ZN5folly4coro4TaskIvEC1ERKS2_(%[[#AWAITER_PTR]], %{{.+}}) : (!cir.ptr, !cir.ptr) -> () -// CHECK-NEXT: %[[#AWAITER:]] = cir.load %[[#AWAITER_PTR]] : !cir.ptr, !ty_22folly3A3Acoro3A3ATask3Cvoid3E22 -// CHECK-NEXT: %[[#SUSPEND:]] = cir.call @_ZN5folly4coro4TaskIvE12promise_type11yield_valueES2_(%{{.+}}, %[[#AWAITER]]) : (!cir.ptr, !ty_22folly3A3Acoro3A3ATask3Cvoid3E22) -> !ty_22std3A3Asuspend_always22 -// CHECK-NEXT: cir.store %[[#SUSPEND]], %[[#SUSPEND_PTR]] : !ty_22std3A3Asuspend_always22, !cir.ptr +// CHECK-NEXT: %[[#SUSPEND_PTR:]] = cir.alloca !ty_std3A3Asuspend_always, !cir.ptr +// CHECK-NEXT: %[[#AWAITER_PTR:]] = cir.alloca !ty_folly3A3Acoro3A3ATask3Cvoid3E, !cir.ptr +// CHECK-NEXT: %[[#CORO_PTR:]] = cir.alloca !ty_std3A3Acoroutine_handle3Cvoid3E, !cir.ptr +// CHECK-NEXT: %[[#CORO2_PTR:]] = cir.alloca !ty_std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5folly4coro4TaskIvEC1ERKS2_(%[[#AWAITER_PTR]], %{{.+}}) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[#AWAITER:]] = cir.load %[[#AWAITER_PTR]] : !cir.ptr, !ty_folly3A3Acoro3A3ATask3Cvoid3E +// CHECK-NEXT: %[[#SUSPEND:]] = cir.call @_ZN5folly4coro4TaskIvE12promise_type11yield_valueES2_(%{{.+}}, %[[#AWAITER]]) : (!cir.ptr, !ty_folly3A3Acoro3A3ATask3Cvoid3E) -> !ty_std3A3Asuspend_always +// CHECK-NEXT: cir.store %[[#SUSPEND]], %[[#SUSPEND_PTR]] : !ty_std3A3Asuspend_always, !cir.ptr // CHECK-NEXT: cir.await(yield, ready : { // CHECK-NEXT: %[[#READY:]] = cir.scope { -// CHECK-NEXT: %[[#A:]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[#SUSPEND_PTR]]) : (!cir.ptr) -> !cir.bool +// CHECK-NEXT: %[[#A:]] = cir.call @_ZNSt14suspend_always11await_readyEv(%[[#SUSPEND_PTR]]) : (!cir.ptr) -> !cir.bool // CHECK-NEXT: cir.yield %[[#A]] : !cir.bool // CHECK-NEXT: } : !cir.bool // CHECK-NEXT: cir.condition(%[[#READY]]) // CHECK-NEXT: }, suspend : { -// CHECK-NEXT: %[[#CORO2:]] = cir.call @_ZNSt16coroutine_handleIN5folly4coro4TaskIvE12promise_typeEE12from_addressEPv(%9) : (!cir.ptr) -> !ty_22std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E22 -// CHECK-NEXT: cir.store %[[#CORO2]], %[[#CORO2_PTR]] : !ty_22std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E22, !cir.ptr -// CHECK-NEXT: %[[#B:]] = cir.load %[[#CORO2_PTR]] : !cir.ptr, !ty_22std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E22 -// CHECK-NEXT: cir.call @_ZNSt16coroutine_handleIvEC1IN5folly4coro4TaskIvE12promise_typeEEES_IT_E(%[[#CORO_PTR]], %[[#B]]) : (!cir.ptr, !ty_22std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E22) -> () -// CHECK-NEXT: %[[#C:]] = cir.load %[[#CORO_PTR]] : !cir.ptr, !ty_22std3A3Acoroutine_handle3Cvoid3E22 -// CHECK-NEXT: cir.call @_ZNSt14suspend_always13await_suspendESt16coroutine_handleIvE(%[[#SUSPEND_PTR]], %[[#C]]) : (!cir.ptr, !ty_22std3A3Acoroutine_handle3Cvoid3E22) -> () +// CHECK-NEXT: %[[#CORO2:]] = cir.call @_ZNSt16coroutine_handleIN5folly4coro4TaskIvE12promise_typeEE12from_addressEPv(%9) : (!cir.ptr) -> !ty_std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E +// CHECK-NEXT: cir.store %[[#CORO2]], %[[#CORO2_PTR]] : !ty_std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E, !cir.ptr +// CHECK-NEXT: %[[#B:]] = cir.load %[[#CORO2_PTR]] : !cir.ptr, !ty_std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E +// CHECK-NEXT: cir.call @_ZNSt16coroutine_handleIvEC1IN5folly4coro4TaskIvE12promise_typeEEES_IT_E(%[[#CORO_PTR]], %[[#B]]) : (!cir.ptr, !ty_std3A3Acoroutine_handle3Cfolly3A3Acoro3A3ATask3Cvoid3E3A3Apromise_type3E) -> () +// CHECK-NEXT: %[[#C:]] = cir.load %[[#CORO_PTR]] : !cir.ptr, !ty_std3A3Acoroutine_handle3Cvoid3E +// CHECK-NEXT: cir.call @_ZNSt14suspend_always13await_suspendESt16coroutine_handleIvE(%[[#SUSPEND_PTR]], %[[#C]]) : (!cir.ptr, !ty_std3A3Acoroutine_handle3Cvoid3E) -> () // CHECK-NEXT: cir.yield // CHECK-NEXT: }, resume : { -// CHECK-NEXT: cir.call @_ZNSt14suspend_always12await_resumeEv(%[[#SUSPEND_PTR]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.call @_ZNSt14suspend_always12await_resumeEv(%[[#SUSPEND_PTR]]) : (!cir.ptr) -> () // CHECK-NEXT: cir.yield // CHECK-NEXT: },) // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp index 6476659ef41f..3739ecef1cce 100644 --- a/clang/test/CIR/CodeGen/ctor-alias.cpp +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -9,20 +9,20 @@ void t() { } // CHECK: cir.func linkonce_odr @_ZN11DummyStringC2EPKc -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NOT: cir.fun @_ZN11DummyStringC1EPKc // CHECK: cir.func @_Z1tv -// CHECK-NEXT: %0 = cir.alloca !ty_22DummyString22, !cir.ptr, ["s4", init] {alignment = 1 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_DummyString, !cir.ptr, ["s4", init] {alignment = 1 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : !cir.ptr> // CHECK-NEXT: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: cir.call @_ZN11DummyStringC2EPKc(%0, %2) : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: cir.return struct B { @@ -31,10 +31,10 @@ struct B { B::B() { } -// CHECK: cir.func @_ZN1BC2Ev(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: cir.func @_ZN1BC2Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: cir.return // CHECK: } -// CHECK: cir.func @_ZN1BC1Ev(!cir.ptr) alias(@_ZN1BC2Ev) \ No newline at end of file +// CHECK: cir.func @_ZN1BC1Ev(!cir.ptr) alias(@_ZN1BC2Ev) \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp index 70fff5a81dfe..d8e42f46429f 100644 --- a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -6,8 +6,8 @@ struct String { long size; String(const String &s) : size{s.size} {} // CHECK: cir.func linkonce_odr @_ZN6StringC2ERKS_ -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 // CHECK: cir.store %arg1, %1 // CHECK: %2 = cir.load %0 @@ -27,9 +27,9 @@ void foo() { String s1{s}; } // CHECK: cir.func @_Z3foov() {{.*}} { -// CHECK: %0 = cir.alloca !ty_22String22, !cir.ptr, ["s", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !ty_22String22, !cir.ptr, ["s1", init] {alignment = 8 : i64} -// CHECK: cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () -// CHECK: cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %0 = cir.alloca !ty_String, !cir.ptr, ["s", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_String, !cir.ptr, ["s1", init] {alignment = 8 : i64} +// CHECK: cir.call @_ZN6StringC2Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN6StringC2ERKS_(%1, %0) : (!cir.ptr, !cir.ptr) -> () // CHECK: cir.return // } diff --git a/clang/test/CIR/CodeGen/ctor.cpp b/clang/test/CIR/CodeGen/ctor.cpp index 18288b15d241..c3ac21d6cd56 100644 --- a/clang/test/CIR/CodeGen/ctor.cpp +++ b/clang/test/CIR/CodeGen/ctor.cpp @@ -11,22 +11,22 @@ void baz() { Struk s; } -// CHECK: !ty_22Struk22 = !cir.struct +// CHECK: !ty_Struk = !cir.struct -// CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN5StrukC2Ev(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.return -// CHECK: cir.func linkonce_odr @_ZN5StrukC1Ev(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () +// CHECK: cir.func linkonce_odr @_ZN5StrukC1Ev(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5StrukC2Ev(%1) : (!cir.ptr) -> () // CHECK-NEXT: cir.return // CHECK: cir.func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !ty_22Struk22, !cir.ptr, ["s", init] {alignment = 4 : i64} -// CHECK-NEXT: cir.call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () +// CHECK-NEXT: %0 = cir.alloca !ty_Struk, !cir.ptr, ["s", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.call @_ZN5StrukC1Ev(%0) : (!cir.ptr) -> () // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/delegating-ctor.cpp b/clang/test/CIR/CodeGen/delegating-ctor.cpp index b230ea6f1d5c..850c0aac9d6d 100644 --- a/clang/test/CIR/CodeGen/delegating-ctor.cpp +++ b/clang/test/CIR/CodeGen/delegating-ctor.cpp @@ -10,12 +10,12 @@ struct Delegating { // arguments. Delegating::Delegating() : Delegating(0) {} -// CHECK-LABEL: cir.func @_ZN10DelegatingC2Ev(%arg0: !cir.ptr {{.*}}) {{.*}} { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-LABEL: cir.func @_ZN10DelegatingC2Ev(%arg0: !cir.ptr {{.*}}) {{.*}} { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: %2 = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.call @_ZN10DelegatingC2Ei(%1, %2) : (!cir.ptr, !s32i) -> () +// CHECK-NEXT: cir.call @_ZN10DelegatingC2Ei(%1, %2) : (!cir.ptr, !s32i) -> () // CHECK-NEXT: cir.return // CHECK-NEXT: } @@ -30,15 +30,15 @@ struct DelegatingWithZeroing { // call to it in a lowering pass. DelegatingWithZeroing::DelegatingWithZeroing(int) : DelegatingWithZeroing() {} -// CHECK-LABEL: cir.func @_ZN21DelegatingWithZeroingC2Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i {{.*}}) {{.*}} { -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-LABEL: cir.func @_ZN21DelegatingWithZeroingC2Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i {{.*}}) {{.*}} { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["", init] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.store %arg1, %1 : !s32i, !cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %3 = cir.const #cir.zero : !ty_22DelegatingWithZeroing22 -// CHECK-NEXT: cir.store %3, %2 : !ty_22DelegatingWithZeroing22, !cir.ptr -// CHECK-NEXT: cir.call @_ZN21DelegatingWithZeroingC2Ev(%2) : (!cir.ptr) -> () extra(#fn_attr1) +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %3 = cir.const #cir.zero : !ty_DelegatingWithZeroing +// CHECK-NEXT: cir.store %3, %2 : !ty_DelegatingWithZeroing, !cir.ptr +// CHECK-NEXT: cir.call @_ZN21DelegatingWithZeroingC2Ev(%2) : (!cir.ptr) -> () extra(#fn_attr1) // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 2de30990d9f4..1f2ae7411ab3 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -75,32 +75,32 @@ void C3::Layer::Initialize() { } } -// CHECK-DAG: !ty_22C23A3ALayer22 = !cir.struct) -> !cir.ptr -// CHECK: %3 = cir.get_member %2[1] {name = "m_C1"} : !cir.ptr -> !cir.ptr> -// CHECK: %4 = cir.load %3 : !cir.ptr>, !cir.ptr -// CHECK: %5 = cir.const #cir.ptr : !cir.ptr -// CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool +// CHECK: %2 = cir.base_class_addr(%1 : !cir.ptr) -> !cir.ptr +// CHECK: %3 = cir.get_member %2[1] {name = "m_C1"} : !cir.ptr -> !cir.ptr> +// CHECK: %4 = cir.load %3 : !cir.ptr>, !cir.ptr +// CHECK: %5 = cir.const #cir.ptr : !cir.ptr +// CHECK: %6 = cir.cmp(eq, %4, %5) : !cir.ptr, !cir.bool enumy C3::Initialize() { return C2::Initialize(); } -// CHECK: cir.func @_ZN2C310InitializeEv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.func @_ZN2C310InitializeEv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK: %2 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK: %3 = cir.base_class_addr(%2 : !cir.ptr) -> !cir.ptr -// CHECK: %4 = cir.call @_ZN2C210InitializeEv(%3) : (!cir.ptr) -> !s32i +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %2 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: %3 = cir.base_class_addr(%2 : !cir.ptr) -> !cir.ptr +// CHECK: %4 = cir.call @_ZN2C210InitializeEv(%3) : (!cir.ptr) -> !s32i void vcall(C1 &c1) { buffy b; @@ -108,21 +108,21 @@ void vcall(C1 &c1) { c1.SetStuff(e, b); } -// CHECK: cir.func @_Z5vcallR2C1(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["c1", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !ty_22buffy22, !cir.ptr, ["b"] {alignment = 8 : i64} +// CHECK: cir.func @_Z5vcallR2C1(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["c1", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !ty_buffy, !cir.ptr, ["b"] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !s32i, !cir.ptr, ["e"] {alignment = 4 : i64} -// CHECK: %3 = cir.alloca !ty_22buffy22, !cir.ptr, ["agg.tmp0"] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK: %4 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: %3 = cir.alloca !ty_buffy, !cir.ptr, ["agg.tmp0"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %4 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %5 = cir.load %2 : !cir.ptr, !s32i -// CHECK: cir.call @_ZN5buffyC2ERKS_(%3, %1) : (!cir.ptr, !cir.ptr) -> () -// CHECK: %6 = cir.load %3 : !cir.ptr, !ty_22buffy22 -// CHECK: %7 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr, !s32i, !ty_22buffy22)>>>> -// CHECK: %8 = cir.load %7 : !cir.ptr, !s32i, !ty_22buffy22)>>>>, !cir.ptr, !s32i, !ty_22buffy22)>>> -// CHECK: %9 = cir.vtable.address_point( %8 : !cir.ptr, !s32i, !ty_22buffy22)>>>, vtable_index = 0, address_point_index = 2) : !cir.ptr, !s32i, !ty_22buffy22)>>> -// CHECK: %10 = cir.load align(8) %9 : !cir.ptr, !s32i, !ty_22buffy22)>>>, !cir.ptr, !s32i, !ty_22buffy22)>> -// CHECK: %11 = cir.call %10(%4, %5, %6) : (!cir.ptr, !s32i, !ty_22buffy22)>>, !cir.ptr, !s32i, !ty_22buffy22) -> !s32i +// CHECK: cir.call @_ZN5buffyC2ERKS_(%3, %1) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %6 = cir.load %3 : !cir.ptr, !ty_buffy +// CHECK: %7 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr, !s32i, !ty_buffy)>>>> +// CHECK: %8 = cir.load %7 : !cir.ptr, !s32i, !ty_buffy)>>>>, !cir.ptr, !s32i, !ty_buffy)>>> +// CHECK: %9 = cir.vtable.address_point( %8 : !cir.ptr, !s32i, !ty_buffy)>>>, vtable_index = 0, address_point_index = 2) : !cir.ptr, !s32i, !ty_buffy)>>> +// CHECK: %10 = cir.load align(8) %9 : !cir.ptr, !s32i, !ty_buffy)>>>, !cir.ptr, !s32i, !ty_buffy)>> +// CHECK: %11 = cir.call %10(%4, %5, %6) : (!cir.ptr, !s32i, !ty_buffy)>>, !cir.ptr, !s32i, !ty_buffy) -> !s32i // CHECK: cir.return // CHECK: } @@ -138,19 +138,19 @@ class B : public A { void foo () { static_cast(*this).foo();} }; -// CHECK: cir.func linkonce_odr @_ZN1B3fooEv(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK: %1 = cir.load deref %0 : !cir.ptr>, !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN1B3fooEv(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load deref %0 : !cir.ptr>, !cir.ptr // CHECK: cir.scope { -// CHECK: %2 = cir.alloca !ty_22A22, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} -// CHECK: %3 = cir.base_class_addr(%1 : !cir.ptr) -> !cir.ptr +// CHECK: %2 = cir.alloca !ty_A, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %3 = cir.base_class_addr(%1 : !cir.ptr) -> !cir.ptr // Call @A::A(A const&) -// CHECK: cir.call @_ZN1AC2ERKS_(%2, %3) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN1AC2ERKS_(%2, %3) : (!cir.ptr, !cir.ptr) -> () // Call @A::foo() -// CHECK: cir.call @_ZN1A3fooEv(%2) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1A3fooEv(%2) : (!cir.ptr) -> () // CHECK: } // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/dtors-scopes.cpp b/clang/test/CIR/CodeGen/dtors-scopes.cpp index 4c10b4d64426..6d363f0254bf 100644 --- a/clang/test/CIR/CodeGen/dtors-scopes.cpp +++ b/clang/test/CIR/CodeGen/dtors-scopes.cpp @@ -17,9 +17,9 @@ void dtor1() { // CHECK: cir.func @_Z5dtor1v() // CHECK: cir.scope { -// CHECK: %4 = cir.alloca !ty_22C22, !cir.ptr, ["c", init] {alignment = 1 : i64} -// CHECK: cir.call @_ZN1CC2Ev(%4) : (!cir.ptr) -> () -// CHECK: cir.call @_ZN1CD2Ev(%4) : (!cir.ptr) -> () +// CHECK: %4 = cir.alloca !ty_C, !cir.ptr, ["c", init] {alignment = 1 : i64} +// CHECK: cir.call @_ZN1CC2Ev(%4) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1CD2Ev(%4) : (!cir.ptr) -> () // CHECK: } // DTOR_BODY: cir.func linkonce_odr @_ZN1CD2Ev{{.*}}{ @@ -29,7 +29,7 @@ void dtor1() { // DTOR_BODY: %5 = cir.call @printf(%4) // DTOR_BODY: cir.return -// DTOR_BODY: cir.func linkonce_odr @_ZN1CD1Ev(%arg0: !cir.ptr +// DTOR_BODY: cir.func linkonce_odr @_ZN1CD1Ev(%arg0: !cir.ptr // DTOR_BODY: cir.call @_ZN1CD2Ev // DTOR_BODY: cir.return diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index b0db1d1cdcda..2202d339b76d 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -42,11 +42,11 @@ class B : public A // CHECK: ![[ClassB:ty_.*]] = !cir.struct // CHECK: cir.func @_Z4bluev() -// CHECK: %0 = cir.alloca !ty_22PSEvent22, !cir.ptr, ["p", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !ty_PSEvent, !cir.ptr, ["p", init] {alignment = 8 : i64} // CHECK: %1 = cir.const #cir.int<1> : !s32i // CHECK: %2 = cir.get_global @".str" : !cir.ptr> // CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr -// CHECK: cir.call @_ZN7PSEventC1E6EFModePKc(%0, %1, %3) : (!cir.ptr, !s32i, !cir.ptr) -> () +// CHECK: cir.call @_ZN7PSEventC1E6EFModePKc(%0, %1, %3) : (!cir.ptr, !s32i, !cir.ptr) -> () // CHECK: cir.return // CHECK: } @@ -57,8 +57,8 @@ class B : public A // void foo() // CHECK: cir.func @_Z3foov() // CHECK: cir.scope { -// CHECK: cir.call @_ZN1BC2Ev(%0) : (!cir.ptr) -> () -// CHECK: cir.call @_ZN1BD2Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1BC2Ev(%0) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1BD2Ev(%0) : (!cir.ptr) -> () // operator delete(void*) declaration // CHECK: cir.func private @_ZdlPvm(!cir.ptr, !u64i) diff --git a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp index d94c8775a2de..04662aed47ea 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp @@ -15,18 +15,18 @@ struct Derived final : Base1 {}; Derived *ptr_cast(Base1 *ptr) { return dynamic_cast(ptr); - // CHECK: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[#EXPECTED_VPTR:]] = cir.vtable.address_point(@_ZTV7Derived, vtable_index = 0, address_point_index = 2) : !cir.ptr>> - // CHECK-NEXT: %[[#SRC_VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr>>> + // CHECK-NEXT: %[[#SRC_VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr>>> // CHECK-NEXT: %[[#SRC_VPTR:]] = cir.load %[[#SRC_VPTR_PTR]] : !cir.ptr>>>, !cir.ptr>> // CHECK-NEXT: %[[#SUCCESS:]] = cir.cmp(eq, %[[#SRC_VPTR]], %[[#EXPECTED_VPTR]]) : !cir.ptr>>, !cir.bool // CHECK-NEXT: %{{.+}} = cir.ternary(%[[#SUCCESS]], true { - // CHECK-NEXT: %[[#RES:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr - // CHECK-NEXT: cir.yield %[[#RES]] : !cir.ptr + // CHECK-NEXT: %[[#RES:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr + // CHECK-NEXT: cir.yield %[[#RES]] : !cir.ptr // CHECK-NEXT: }, false { - // CHECK-NEXT: %[[#NULL:]] = cir.const #cir.ptr : !cir.ptr - // CHECK-NEXT: cir.yield %[[#NULL]] : !cir.ptr - // CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr + // CHECK-NEXT: %[[#NULL:]] = cir.const #cir.ptr : !cir.ptr + // CHECK-NEXT: cir.yield %[[#NULL]] : !cir.ptr + // CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr } // LLVM: define dso_local ptr @_Z8ptr_castP5Base1(ptr readonly %[[#SRC:]]) @@ -38,9 +38,9 @@ Derived *ptr_cast(Base1 *ptr) { Derived &ref_cast(Base1 &ref) { return dynamic_cast(ref); - // CHECK: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[#EXPECTED_VPTR:]] = cir.vtable.address_point(@_ZTV7Derived, vtable_index = 0, address_point_index = 2) : !cir.ptr>> - // CHECK-NEXT: %[[#SRC_VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr>>> + // CHECK-NEXT: %[[#SRC_VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr>>> // CHECK-NEXT: %[[#SRC_VPTR:]] = cir.load %[[#SRC_VPTR_PTR]] : !cir.ptr>>>, !cir.ptr>> // CHECK-NEXT: %[[#SUCCESS:]] = cir.cmp(eq, %[[#SRC_VPTR]], %[[#EXPECTED_VPTR]]) : !cir.ptr>>, !cir.bool // CHECK-NEXT: %[[#FAILED:]] = cir.unary(not, %[[#SUCCESS]]) : !cir.bool, !cir.bool @@ -48,7 +48,7 @@ Derived &ref_cast(Base1 &ref) { // CHECK-NEXT: cir.call @__cxa_bad_cast() : () -> () // CHECK-NEXT: cir.unreachable // CHECK-NEXT: } - // CHECK-NEXT: %{{.+}} = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr + // CHECK-NEXT: %{{.+}} = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr } // LLVM: define dso_local noundef ptr @_Z8ref_castR5Base1(ptr readonly returned %[[#SRC:]]) @@ -64,9 +64,9 @@ Derived &ref_cast(Base1 &ref) { Derived *ptr_cast_always_fail(Base2 *ptr) { return dynamic_cast(ptr); - // CHECK: %{{.+}} = cir.load %{{.+}} : !cir.ptr>, !cir.ptr - // CHECK-NEXT: %[[#RESULT:]] = cir.const #cir.ptr : !cir.ptr - // CHECK-NEXT: cir.store %[[#RESULT]], %{{.+}} : !cir.ptr, !cir.ptr> + // CHECK: %{{.+}} = cir.load %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK-NEXT: %[[#RESULT:]] = cir.const #cir.ptr : !cir.ptr + // CHECK-NEXT: cir.store %[[#RESULT]], %{{.+}} : !cir.ptr, !cir.ptr> } // LLVM: define dso_local noalias noundef ptr @_Z20ptr_cast_always_failP5Base2(ptr nocapture readnone %{{.+}}) @@ -75,8 +75,8 @@ Derived *ptr_cast_always_fail(Base2 *ptr) { Derived &ref_cast_always_fail(Base2 &ref) { return dynamic_cast(ref); - // CHECK: %{{.+}} = cir.load %{{.+}} : !cir.ptr>, !cir.ptr - // CHECK-NEXT: %{{.+}} = cir.const #cir.ptr : !cir.ptr + // CHECK: %{{.+}} = cir.load %{{.+}} : !cir.ptr>, !cir.ptr + // CHECK-NEXT: %{{.+}} = cir.const #cir.ptr : !cir.ptr // CHECK-NEXT: cir.call @__cxa_bad_cast() : () -> () // CHECK-NEXT: cir.unreachable } diff --git a/clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp b/clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp index 58da33f0e60e..27cff8b2d172 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast-relative-layout.cpp @@ -5,25 +5,25 @@ struct Base { virtual ~Base(); }; -// BEFORE: !ty_22Base22 = !cir.struct(ptr); } // BEFORE: cir.func @_Z20ptr_cast_to_completeP4Base -// BEFORE: %{{.+}} = cir.dyn_cast(ptr, %{{.+}} : !cir.ptr relative_layout) -> !cir.ptr +// BEFORE: %{{.+}} = cir.dyn_cast(ptr, %{{.+}} : !cir.ptr relative_layout) -> !cir.ptr // BEFORE: } // AFTER: cir.func @_Z20ptr_cast_to_completeP4Base -// AFTER: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// AFTER-NEXT: %[[#SRC_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#SRC]] : !cir.ptr), !cir.bool +// AFTER: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// AFTER-NEXT: %[[#SRC_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#SRC]] : !cir.ptr), !cir.bool // AFTER-NEXT: %{{.+}} = cir.ternary(%[[#SRC_IS_NOT_NULL]], true { -// AFTER-NEXT: %[[#VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr> +// AFTER-NEXT: %[[#VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr> // AFTER-NEXT: %[[#VPTR:]] = cir.load %[[#VPTR_PTR]] : !cir.ptr>, !cir.ptr // AFTER-NEXT: %[[#OFFSET_TO_TOP_PTR:]] = cir.vtable.address_point( %[[#VPTR]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : !cir.ptr // AFTER-NEXT: %[[#OFFSET_TO_TOP:]] = cir.load align(4) %[[#OFFSET_TO_TOP_PTR]] : !cir.ptr, !s32i -// AFTER-NEXT: %[[#SRC_BYTES_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: %[[#SRC_BYTES_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr // AFTER-NEXT: %[[#DST_BYTES_PTR:]] = cir.ptr_stride(%[[#SRC_BYTES_PTR]] : !cir.ptr, %[[#OFFSET_TO_TOP]] : !s32i), !cir.ptr // AFTER-NEXT: %[[#DST:]] = cir.cast(bitcast, %[[#DST_BYTES_PTR]] : !cir.ptr), !cir.ptr // AFTER-NEXT: cir.yield %[[#DST]] : !cir.ptr diff --git a/clang/test/CIR/CodeGen/dynamic-cast.cpp b/clang/test/CIR/CodeGen/dynamic-cast.cpp index 622702104707..2d1393b4a582 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast.cpp @@ -8,32 +8,32 @@ struct Base { struct Derived : Base {}; // BEFORE: #dyn_cast_info__ZTI4Base__ZTI7Derived = #cir.dyn_cast_info<#cir.global_view<@_ZTI4Base> : !cir.ptr, #cir.global_view<@_ZTI7Derived> : !cir.ptr, @__dynamic_cast, @__cxa_bad_cast, #cir.int<0> : !s64i> -// BEFORE: !ty_22Base22 = !cir.struct -// BEFORE: !ty_22Derived22 = !cir.struct +// BEFORE: !ty_Base = !cir.struct +// BEFORE: !ty_Derived = !cir.struct Derived *ptr_cast(Base *b) { return dynamic_cast(b); } // BEFORE: cir.func @_Z8ptr_castP4Base -// BEFORE: %{{.+}} = cir.dyn_cast(ptr, %{{.+}} : !cir.ptr, #dyn_cast_info__ZTI4Base__ZTI7Derived) -> !cir.ptr +// BEFORE: %{{.+}} = cir.dyn_cast(ptr, %{{.+}} : !cir.ptr, #dyn_cast_info__ZTI4Base__ZTI7Derived) -> !cir.ptr // BEFORE: } // AFTER: cir.func @_Z8ptr_castP4Base -// AFTER: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// AFTER-NEXT: %[[#SRC_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#SRC]] : !cir.ptr), !cir.bool +// AFTER: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// AFTER-NEXT: %[[#SRC_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#SRC]] : !cir.ptr), !cir.bool // AFTER-NEXT: %{{.+}} = cir.ternary(%[[#SRC_IS_NOT_NULL]], true { -// AFTER-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr // AFTER-NEXT: %[[#BASE_RTTI:]] = cir.const #cir.global_view<@_ZTI4Base> : !cir.ptr // AFTER-NEXT: %[[#DERIVED_RTTI:]] = cir.const #cir.global_view<@_ZTI7Derived> : !cir.ptr // AFTER-NEXT: %[[#HINT:]] = cir.const #cir.int<0> : !s64i // AFTER-NEXT: %[[#RT_CALL_RET:]] = cir.call @__dynamic_cast(%[[#SRC_VOID_PTR]], %[[#BASE_RTTI]], %[[#DERIVED_RTTI]], %[[#HINT]]) : (!cir.ptr, !cir.ptr, !cir.ptr, !s64i) -> !cir.ptr -// AFTER-NEXT: %[[#CASTED:]] = cir.cast(bitcast, %[[#RT_CALL_RET]] : !cir.ptr), !cir.ptr -// AFTER-NEXT: cir.yield %[[#CASTED]] : !cir.ptr +// AFTER-NEXT: %[[#CASTED:]] = cir.cast(bitcast, %[[#RT_CALL_RET]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.yield %[[#CASTED]] : !cir.ptr // AFTER-NEXT: }, false { -// AFTER-NEXT: %[[#NULL_PTR:]] = cir.const #cir.ptr : !cir.ptr -// AFTER-NEXT: cir.yield %[[#NULL_PTR]] : !cir.ptr -// AFTER-NEXT: }) : (!cir.bool) -> !cir.ptr +// AFTER-NEXT: %[[#NULL_PTR:]] = cir.const #cir.ptr : !cir.ptr +// AFTER-NEXT: cir.yield %[[#NULL_PTR]] : !cir.ptr +// AFTER-NEXT: }) : (!cir.bool) -> !cir.ptr // AFTER: } Derived &ref_cast(Base &b) { @@ -41,11 +41,11 @@ Derived &ref_cast(Base &b) { } // BEFORE: cir.func @_Z8ref_castR4Base -// BEFORE: %{{.+}} = cir.dyn_cast(ref, %{{.+}} : !cir.ptr, #dyn_cast_info__ZTI4Base__ZTI7Derived) -> !cir.ptr +// BEFORE: %{{.+}} = cir.dyn_cast(ref, %{{.+}} : !cir.ptr, #dyn_cast_info__ZTI4Base__ZTI7Derived) -> !cir.ptr // BEFORE: } // AFTER: cir.func @_Z8ref_castR4Base -// AFTER: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr +// AFTER: %[[#SRC_VOID_PTR:]] = cir.cast(bitcast, %{{.+}} : !cir.ptr), !cir.ptr // AFTER-NEXT: %[[#SRC_RTTI:]] = cir.const #cir.global_view<@_ZTI4Base> : !cir.ptr // AFTER-NEXT: %[[#DEST_RTTI:]] = cir.const #cir.global_view<@_ZTI7Derived> : !cir.ptr // AFTER-NEXT: %[[#OFFSET_HINT:]] = cir.const #cir.int<0> : !s64i @@ -56,7 +56,7 @@ Derived &ref_cast(Base &b) { // AFTER-NEXT: cir.call @__cxa_bad_cast() : () -> () // AFTER-NEXT: cir.unreachable // AFTER-NEXT: } -// AFTER-NEXT: %{{.+}} = cir.cast(bitcast, %[[#CASTED_PTR]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: %{{.+}} = cir.cast(bitcast, %[[#CASTED_PTR]] : !cir.ptr), !cir.ptr // AFTER: } void *ptr_cast_to_complete(Base *ptr) { @@ -64,18 +64,18 @@ void *ptr_cast_to_complete(Base *ptr) { } // BEFORE: cir.func @_Z20ptr_cast_to_completeP4Base -// BEFORE: %{{.+}} = cir.dyn_cast(ptr, %{{.+}} : !cir.ptr) -> !cir.ptr +// BEFORE: %{{.+}} = cir.dyn_cast(ptr, %{{.+}} : !cir.ptr) -> !cir.ptr // BEFORE: } // AFTER: cir.func @_Z20ptr_cast_to_completeP4Base -// AFTER: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// AFTER-NEXT: %[[#SRC_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#SRC]] : !cir.ptr), !cir.bool +// AFTER: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// AFTER-NEXT: %[[#SRC_IS_NOT_NULL:]] = cir.cast(ptr_to_bool, %[[#SRC]] : !cir.ptr), !cir.bool // AFTER-NEXT: %{{.+}} = cir.ternary(%[[#SRC_IS_NOT_NULL]], true { -// AFTER-NEXT: %[[#VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr> +// AFTER-NEXT: %[[#VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr> // AFTER-NEXT: %[[#VPTR:]] = cir.load %[[#VPTR_PTR]] : !cir.ptr>, !cir.ptr // AFTER-NEXT: %[[#BASE_OFFSET_PTR:]] = cir.vtable.address_point( %[[#VPTR]] : !cir.ptr, vtable_index = 0, address_point_index = -2) : !cir.ptr // AFTER-NEXT: %[[#BASE_OFFSET:]] = cir.load align(8) %[[#BASE_OFFSET_PTR]] : !cir.ptr, !s64i -// AFTER-NEXT: %[[#SRC_BYTES_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr +// AFTER-NEXT: %[[#SRC_BYTES_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr // AFTER-NEXT: %[[#DST_BYTES_PTR:]] = cir.ptr_stride(%[[#SRC_BYTES_PTR]] : !cir.ptr, %[[#BASE_OFFSET]] : !s64i), !cir.ptr // AFTER-NEXT: %[[#CASTED_PTR:]] = cir.cast(bitcast, %[[#DST_BYTES_PTR]] : !cir.ptr), !cir.ptr // AFTER-NEXT: cir.yield %[[#CASTED_PTR]] : !cir.ptr diff --git a/clang/test/CIR/CodeGen/evaluate-expr.c b/clang/test/CIR/CodeGen/evaluate-expr.c index 101f423c8e14..4b7146622dbf 100644 --- a/clang/test/CIR/CodeGen/evaluate-expr.c +++ b/clang/test/CIR/CodeGen/evaluate-expr.c @@ -25,7 +25,7 @@ void bar() { } // CHECK: cir.func no_proto @bar() // CHECK: [[ALLOC:%.*]] = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} -// CHECK: {{%.*}} = cir.get_global @s : !cir.ptr +// CHECK: {{%.*}} = cir.get_global @s : !cir.ptr // CHECK: [[CONST:%.*]] = cir.const #cir.int<0> : !s32i // CHECK: cir.store [[CONST]], [[ALLOC]] : !s32i, !cir.ptr // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/forward-decls.cpp b/clang/test/CIR/CodeGen/forward-decls.cpp index 66ae59c226ec..86e374626a20 100644 --- a/clang/test/CIR/CodeGen/forward-decls.cpp +++ b/clang/test/CIR/CodeGen/forward-decls.cpp @@ -68,7 +68,7 @@ void testRecursiveStruct(struct RecursiveStruct *arg) { // types, or all the recursive types are self references. // CHECK4: ![[B:.+]] = !cir.struct>} -// CHECK4: ![[A:.+]] = !cir.struct}> +// CHECK4: ![[A:.+]] = !cir.struct}> struct StructNodeB; struct StructNodeA { int value; @@ -98,11 +98,11 @@ void testIndirectSelfReference(struct StructNodeA arg) { // A sizeable complex struct just to double check that stuff is working. // CHECK5: !cir.struct, !cir.struct>, !cir.struct>, !cir.ptr>, !cir.ptr>} #cir.record.decl.ast>, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>} #cir.record.decl.ast> -// CHECK5: !cir.struct>, !cir.struct, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>, !cir.struct, !cir.struct} #cir.record.decl.ast>>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>} #cir.record.decl.ast> -// CHECK5: !cir.struct>, !ty_22C22, !cir.struct} #cir.record.decl.ast>>, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>} #cir.record.decl.ast> -// CHECK5: !cir.struct>, !ty_22C22, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>, !ty_22anon2E422} #cir.record.decl.ast> -// CHECK5: !cir.struct>, !ty_22C22, !ty_22anon2E522} #cir.record.decl.ast> -// CHECK5: !cir.struct +// CHECK5: !cir.struct>, !cir.struct, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>, !cir.struct, !cir.struct} #cir.record.decl.ast>>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !ty_C, !cir.struct} #cir.record.decl.ast>>, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !ty_C, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>, !ty_anon2E4_} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !ty_C, !ty_anon2E5_} #cir.record.decl.ast> +// CHECK5: !cir.struct struct A { struct { struct A *a1; diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c index f4ad939c01eb..18b78ed0cb7c 100644 --- a/clang/test/CIR/CodeGen/fun-ptr.c +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -17,25 +17,25 @@ typedef struct A { fun_typ fun; } A; -// CIR: !ty_22A22 = !cir.struct>)>>} #cir.record.decl.ast> +// CIR: !ty_A = !cir.struct>)>>} #cir.record.decl.ast> A a = {(fun_typ)0}; int extract_a(Data* d) { return d->a; } -// CIR: cir.func {{@.*foo.*}}(%arg0: !cir.ptr -// CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["d", init] +// CIR: cir.func {{@.*foo.*}}(%arg0: !cir.ptr +// CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["d", init] // CIR: [[TMP1:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] -// CIR: [[TMP2:%.*]] = cir.alloca !cir.ptr)>>, !cir.ptr)>>>, ["f", init] -// CIR: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> -// CIR: [[TMP3:%.*]] = cir.const #cir.ptr : !cir.ptr)>> -// CIR: cir.store [[TMP3]], [[TMP2]] : !cir.ptr)>>, !cir.ptr)>>> -// CIR: [[TMP4:%.*]] = cir.get_global {{@.*extract_a.*}} : !cir.ptr)>> -// CIR: cir.store [[TMP4]], [[TMP2]] : !cir.ptr)>>, !cir.ptr)>>> -// CIR: [[TMP5:%.*]] = cir.load [[TMP2]] : !cir.ptr)>>>, !cir.ptr)>> -// CIR: [[TMP6:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr -// CIR: [[TMP7:%.*]] = cir.call [[TMP5]]([[TMP6]]) : (!cir.ptr)>>, !cir.ptr) -> !s32i +// CIR: [[TMP2:%.*]] = cir.alloca !cir.ptr)>>, !cir.ptr)>>>, ["f", init] +// CIR: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> +// CIR: [[TMP3:%.*]] = cir.const #cir.ptr : !cir.ptr)>> +// CIR: cir.store [[TMP3]], [[TMP2]] : !cir.ptr)>>, !cir.ptr)>>> +// CIR: [[TMP4:%.*]] = cir.get_global {{@.*extract_a.*}} : !cir.ptr)>> +// CIR: cir.store [[TMP4]], [[TMP2]] : !cir.ptr)>>, !cir.ptr)>>> +// CIR: [[TMP5:%.*]] = cir.load [[TMP2]] : !cir.ptr)>>>, !cir.ptr)>> +// CIR: [[TMP6:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr +// CIR: [[TMP7:%.*]] = cir.call [[TMP5]]([[TMP6]]) : (!cir.ptr)>>, !cir.ptr) -> !s32i // CIR: cir.store [[TMP7]], [[TMP1]] : !s32i, !cir.ptr // LLVM: define dso_local i32 {{@.*foo.*}}(ptr %0) diff --git a/clang/test/CIR/CodeGen/globals-neg-index-array.c b/clang/test/CIR/CodeGen/globals-neg-index-array.c index 609e8f59e087..7f7a80ea2c9e 100644 --- a/clang/test/CIR/CodeGen/globals-neg-index-array.c +++ b/clang/test/CIR/CodeGen/globals-neg-index-array.c @@ -14,7 +14,7 @@ struct __attribute__((packed)) PackedStruct { }; struct PackedStruct packed[10]; char *packed_element = &(packed[-2].a3); -// CHECK: cir.global external @packed = #cir.zero : !cir.array {alignment = 16 : i64} loc(#loc5) +// CHECK: cir.global external @packed = #cir.zero : !cir.array {alignment = 16 : i64} loc(#loc5) // CHECK: cir.global external @packed_element = #cir.global_view<@packed, [-2 : i32, 2 : i32]> // LLVM: @packed = global [10 x %struct.PackedStruct] zeroinitializer // LLVM: @packed_element = global ptr getelementptr inbounds ([10 x %struct.PackedStruct], ptr @packed, i32 -2, i32 2) diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 6548bb161cdb..d73e136e2f0f 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -57,25 +57,25 @@ const int i = 12; int i2 = i; struct { int i; } i3 = {i}; // CHECK: cir.global external @i2 = #cir.int<12> : !s32i -// CHECK: cir.global external @i3 = #cir.const_struct<{#cir.int<12> : !s32i}> : !ty_22anon2E722 +// CHECK: cir.global external @i3 = #cir.const_struct<{#cir.int<12> : !s32i}> : !ty_anon2E7_ int a[10][10][10]; int *a2 = &a[3][0][8]; struct { int *p; } a3 = {&a[3][0][8]}; // CHECK: cir.global external @a2 = #cir.global_view<@a, [3 : i32, 0 : i32, 8 : i32]> : !cir.ptr -// CHECK: cir.global external @a3 = #cir.const_struct<{#cir.global_view<@a, [3 : i32, 0 : i32, 8 : i32]> : !cir.ptr}> : !ty_22anon2E922 +// CHECK: cir.global external @a3 = #cir.const_struct<{#cir.global_view<@a, [3 : i32, 0 : i32, 8 : i32]> : !cir.ptr}> : !ty_anon2E9_ int p[10]; int *p1 = &p[0]; struct { int *x; } p2 = {&p[0]}; // CHECK: cir.global external @p1 = #cir.global_view<@p> : !cir.ptr -// CHECK: cir.global external @p2 = #cir.const_struct<{#cir.global_view<@p> : !cir.ptr}> : !ty_22anon2E1122 +// CHECK: cir.global external @p2 = #cir.const_struct<{#cir.global_view<@p> : !cir.ptr}> : !ty_anon2E11_ int q[10]; int *q1 = q; struct { int *x; } q2 = {q}; // CHECK: cir.global external @q1 = #cir.global_view<@q> : !cir.ptr -// CHECK: cir.global external @q2 = #cir.const_struct<{#cir.global_view<@q> : !cir.ptr}> : !ty_22anon2E1322 +// CHECK: cir.global external @q2 = #cir.const_struct<{#cir.global_view<@q> : !cir.ptr}> : !ty_anon2E13_ int foo() { extern int optind; diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 91639e6b3b6d..84b38d567e09 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -6,13 +6,13 @@ void fn() { a(); } -// CHECK: !ty_22anon2E222 = !cir.struct +// CHECK: !ty_anon2E2_ = !cir.struct // CHECK-DAG: module // CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv{{.*}}) extra // CHECK: cir.func @_Z2fnv() -// CHECK-NEXT: %0 = cir.alloca !ty_22anon2E222, !cir.ptr, ["a"] +// CHECK-NEXT: %0 = cir.alloca !ty_anon2E2_, !cir.ptr, ["a"] // CHECK: cir.call @_ZZ2fnvENK3$_0clEv void l0() { @@ -23,15 +23,15 @@ void l0() { // CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv({{.*}}) extra -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK: %2 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: %2 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: %3 = cir.load %2 : !cir.ptr>, !cir.ptr // CHECK: %4 = cir.load %3 : !cir.ptr, !s32i // CHECK: %5 = cir.const #cir.int<1> : !s32i // CHECK: %6 = cir.binop(add, %4, %5) nsw : !s32i -// CHECK: %7 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: %7 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: %8 = cir.load %7 : !cir.ptr>, !cir.ptr // CHECK: cir.store %6, %8 : !s32i, !cir.ptr @@ -45,15 +45,15 @@ auto g() { }; } -// CHECK: cir.func @_Z1gv() -> !ty_22anon2E622 -// CHECK: %0 = cir.alloca !ty_22anon2E622, !cir.ptr, ["__retval"] {alignment = 8 : i64} +// CHECK: cir.func @_Z1gv() -> !ty_anon2E6_ +// CHECK: %0 = cir.alloca !ty_anon2E6_, !cir.ptr, ["__retval"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CHECK: %2 = cir.const #cir.int<12> : !s32i // CHECK: cir.store %2, %1 : !s32i, !cir.ptr -// CHECK: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: cir.store %1, %3 : !cir.ptr, !cir.ptr> -// CHECK: %4 = cir.load %0 : !cir.ptr, !ty_22anon2E622 -// CHECK: cir.return %4 : !ty_22anon2E622 +// CHECK: %4 = cir.load %0 : !cir.ptr, !ty_anon2E6_ +// CHECK: cir.return %4 : !ty_anon2E6_ auto g2() { int i = 12; @@ -65,15 +65,15 @@ auto g2() { } // Should be same as above because of NRVO -// CHECK: cir.func @_Z2g2v() -> !ty_22anon2E822 -// CHECK-NEXT: %0 = cir.alloca !ty_22anon2E822, !cir.ptr, ["__retval", init] {alignment = 8 : i64} +// CHECK: cir.func @_Z2g2v() -> !ty_anon2E8_ +// CHECK-NEXT: %0 = cir.alloca !ty_anon2E8_, !cir.ptr, ["__retval", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.const #cir.int<12> : !s32i // CHECK-NEXT: cir.store %2, %1 : !s32i, !cir.ptr -// CHECK-NEXT: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK-NEXT: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK-NEXT: cir.store %1, %3 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !ty_22anon2E822 -// CHECK-NEXT: cir.return %4 : !ty_22anon2E822 +// CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !ty_anon2E8_ +// CHECK-NEXT: cir.return %4 : !ty_anon2E8_ int f() { return g2()(); @@ -82,10 +82,10 @@ int f() { // CHECK: cir.func @_Z1fv() -> !s32i // CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !ty_22anon2E822, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} -// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_22anon2E822 -// CHECK-NEXT: cir.store %3, %2 : !ty_22anon2E822, !cir.ptr -// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> !s32i +// CHECK-NEXT: %2 = cir.alloca !ty_anon2E8_, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} +// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_anon2E8_ +// CHECK-NEXT: cir.store %3, %2 : !ty_anon2E8_, !cir.ptr +// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> !s32i // CHECK-NEXT: cir.store %4, %0 : !s32i, !cir.ptr // CHECK-NEXT: } // CHECK-NEXT: %1 = cir.load %0 : !cir.ptr, !s32i @@ -114,8 +114,8 @@ int g3() { // 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`. // CHECK: %3 = cir.scope { -// CHECK: %7 = cir.alloca !ty_22anon2E1122, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} -// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %7 = cir.alloca !ty_anon2E11_, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> // CHECK: %9 = cir.unary(plus, %8) : !cir.ptr)>>, !cir.ptr)>> // CHECK: cir.yield %9 : !cir.ptr)>> // CHECK: } diff --git a/clang/test/CIR/CodeGen/libcall.cpp b/clang/test/CIR/CodeGen/libcall.cpp index 8bc469e94368..17d2e7912833 100644 --- a/clang/test/CIR/CodeGen/libcall.cpp +++ b/clang/test/CIR/CodeGen/libcall.cpp @@ -59,5 +59,5 @@ void t(const char* fmt, ...) { // CHECK: %10 = cir.load %1 : !cir.ptr, !u64i // CHECK: %11 = cir.load %3 : !cir.ptr>, !cir.ptr -// CHECK: %12 = cir.load %4 : !cir.ptr>, !cir.ptr +// CHECK: %12 = cir.load %4 : !cir.ptr>, !cir.ptr // CHECK: %13 = cir.call @__vsnprintf_chk(%6, %8, %9, %10, %11, %12) diff --git a/clang/test/CIR/CodeGen/lvalue-refs.cpp b/clang/test/CIR/CodeGen/lvalue-refs.cpp index c4d1866d0291..f1e6dd2fed2a 100644 --- a/clang/test/CIR/CodeGen/lvalue-refs.cpp +++ b/clang/test/CIR/CodeGen/lvalue-refs.cpp @@ -6,8 +6,8 @@ struct String { void split(String &S) {} -// CHECK: cir.func @_Z5splitR6String(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["S", init] +// CHECK: cir.func @_Z5splitR6String(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["S", init] void foo() { String s; @@ -15,5 +15,5 @@ void foo() { } // CHECK: cir.func @_Z3foov() -// CHECK: %0 = cir.alloca !ty_22String22, !cir.ptr, ["s"] -// CHECK: cir.call @_Z5splitR6String(%0) : (!cir.ptr) -> () +// CHECK: %0 = cir.alloca !ty_String, !cir.ptr, ["s"] +// CHECK: cir.call @_Z5splitR6String(%0) : (!cir.ptr) -> () diff --git a/clang/test/CIR/CodeGen/multi-vtable.cpp b/clang/test/CIR/CodeGen/multi-vtable.cpp index c22a769df1f3..c6f1f1397d97 100644 --- a/clang/test/CIR/CodeGen/multi-vtable.cpp +++ b/clang/test/CIR/CodeGen/multi-vtable.cpp @@ -34,13 +34,13 @@ int main() { // CIR: ![[VTableTypeMother:ty_.*]] = !cir.struct x 4>}> // CIR: ![[VTableTypeFather:ty_.*]] = !cir.struct x 3>}> // CIR: ![[VTableTypeChild:ty_.*]] = !cir.struct x 4>, !cir.array x 3>}> -// CIR: !ty_22Father22 = !cir.struct>>} #cir.record.decl.ast> -// CIR: !ty_22Mother22 = !cir.struct>>} #cir.record.decl.ast> -// CIR: !ty_22Child22 = !cir.struct +// CIR: !ty_Father = !cir.struct>>} #cir.record.decl.ast> +// CIR: !ty_Mother = !cir.struct>>} #cir.record.decl.ast> +// CIR: !ty_Child = !cir.struct -// CIR: cir.func linkonce_odr @_ZN6MotherC2Ev(%arg0: !cir.ptr +// CIR: cir.func linkonce_odr @_ZN6MotherC2Ev(%arg0: !cir.ptr // CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV6Mother, vtable_index = 0, address_point_index = 2) : !cir.ptr>> -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> // CIR: cir.store %2, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> // CIR: cir.return // CIR: } @@ -50,14 +50,14 @@ int main() { // LLVM-DAG: ret void // LLVM-DAG: } -// CIR: cir.func linkonce_odr @_ZN5ChildC2Ev(%arg0: !cir.ptr +// CIR: cir.func linkonce_odr @_ZN5ChildC2Ev(%arg0: !cir.ptr // CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV5Child, vtable_index = 0, address_point_index = 2) : !cir.ptr>> -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> // CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> // CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV5Child, vtable_index = 1, address_point_index = 2) : !cir.ptr>> // CIR: %{{[0-9]+}} = cir.const #cir.int<8> : !s64i -// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr -// CIR: %11 = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr +// CIR: %11 = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> // CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> // CIR: cir.return // CIR: } @@ -75,9 +75,9 @@ int main() { // CIR: cir.func @main() -> !s32i extra(#fn_attr) { -// CIR: %{{[0-9]+}} = cir.vtable.address_point( %{{[0-9]+}} : !cir.ptr)>>>, vtable_index = 0, address_point_index = 0) : !cir.ptr)>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point( %{{[0-9]+}} : !cir.ptr)>>>, vtable_index = 0, address_point_index = 0) : !cir.ptr)>>> -// CIR: %{{[0-9]+}} = cir.vtable.address_point( %{{[0-9]+}} : !cir.ptr)>>>, vtable_index = 0, address_point_index = 0) : !cir.ptr)>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point( %{{[0-9]+}} : !cir.ptr)>>>, vtable_index = 0, address_point_index = 0) : !cir.ptr)>>> // CIR: } diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index 4f7226376ddd..acf7df22d5a9 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -14,19 +14,19 @@ void m(int a, int b) { // CHECK: cir.func linkonce_odr @_ZSt11make_sharedI1SJRiS1_EESt10shared_ptrIT_EDpOT0_( // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["args", init] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["args", init] {alignment = 8 : i64} -// CHECK: %2 = cir.alloca !ty_22std3A3Ashared_ptr3CS3E22, !cir.ptr, ["__retval"] {alignment = 1 : i64} +// CHECK: %2 = cir.alloca !ty_std3A3Ashared_ptr3CS3E, !cir.ptr, ["__retval"] {alignment = 1 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> // CHECK: cir.scope { // CHECK: %4 = cir.const #cir.int<1> : !u64i // CHECK: %5 = cir.call @_Znwm(%4) : (!u64i) -> !cir.ptr -// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr +// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr // CHECK: %7 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %8 = cir.load %7 : !cir.ptr, !s32i // CHECK: %9 = cir.load %1 : !cir.ptr>, !cir.ptr // CHECK: %10 = cir.load %9 : !cir.ptr, !s32i -// CHECK: cir.call @_ZN1SC1Eii(%6, %8, %10) : (!cir.ptr, !s32i, !s32i) -> () -// CHECK: cir.call @_ZNSt10shared_ptrI1SEC1EPS0_(%2, %6) : (!cir.ptr, !cir.ptr) -> () +// CHECK: cir.call @_ZN1SC1Eii(%6, %8, %10) : (!cir.ptr, !s32i, !s32i) -> () +// CHECK: cir.call @_ZNSt10shared_ptrI1SEC1EPS0_(%2, %6) : (!cir.ptr, !cir.ptr) -> () // CHECK: } class B { @@ -36,19 +36,19 @@ class B { } }; -// CHECK: cir.func linkonce_odr @_ZN1B9constructEPS_(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["__p", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> -// CHECK: %2 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN1B9constructEPS_(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["__p", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> +// CHECK: %2 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %3 = cir.const #cir.int<1> : !u64i -// CHECK: %4 = cir.load %1 : !cir.ptr>, !cir.ptr -// CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr -// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr +// CHECK: %4 = cir.load %1 : !cir.ptr>, !cir.ptr +// CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr +// CHECK: %6 = cir.cast(bitcast, %5 : !cir.ptr), !cir.ptr // cir.call @B::B()(%new_placament_ptr) -// CHECK: cir.call @_ZN1BC1Ev(%6) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1BC1Ev(%6) : (!cir.ptr) -> () // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/packed-structs.c b/clang/test/CIR/CodeGen/packed-structs.c index 264701b9efe2..3488418f13b0 100644 --- a/clang/test/CIR/CodeGen/packed-structs.c +++ b/clang/test/CIR/CodeGen/packed-structs.c @@ -20,14 +20,14 @@ typedef struct { } __attribute__((aligned(2))) C; -// CHECK: !ty_22A22 = !cir.struct -// CHECK: !ty_22C22 = !cir.struct -// CHECK: !ty_22B22 = !cir.struct}> +// CHECK: !ty_A = !cir.struct +// CHECK: !ty_C = !cir.struct +// CHECK: !ty_B = !cir.struct}> // CHECK: cir.func {{.*@foo()}} -// CHECK: %0 = cir.alloca !ty_22A22, !cir.ptr, ["a"] {alignment = 1 : i64} -// CHECK: %1 = cir.alloca !ty_22B22, !cir.ptr, ["b"] {alignment = 1 : i64} -// CHECK: %2 = cir.alloca !ty_22C22, !cir.ptr, ["c"] {alignment = 2 : i64} +// CHECK: %0 = cir.alloca !ty_A, !cir.ptr, ["a"] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !ty_B, !cir.ptr, ["b"] {alignment = 1 : i64} +// CHECK: %2 = cir.alloca !ty_C, !cir.ptr, ["c"] {alignment = 2 : i64} void foo() { A a; B b; diff --git a/clang/test/CIR/CodeGen/pointer-to-data-member.cpp b/clang/test/CIR/CodeGen/pointer-to-data-member.cpp index 077506cec432..cf86aec8ad97 100644 --- a/clang/test/CIR/CodeGen/pointer-to-data-member.cpp +++ b/clang/test/CIR/CodeGen/pointer-to-data-member.cpp @@ -6,57 +6,57 @@ struct Point { int y; int z; }; -// CHECK-DAG: !ty_22Point22 = !cir.struct +// CHECK-DAG: !ty_Incomplete = !cir.struct int Point::*pt_member = &Point::x; -// CHECK: cir.global external @pt_member = #cir.data_member<0> : !cir.data_member +// CHECK: cir.global external @pt_member = #cir.data_member<0> : !cir.data_member auto test1() -> int Point::* { return &Point::y; } -// CHECK: cir.func @_Z5test1v() -> !cir.data_member -// CHECK: %{{.+}} = cir.const #cir.data_member<1> : !cir.data_member +// CHECK: cir.func @_Z5test1v() -> !cir.data_member +// CHECK: %{{.+}} = cir.const #cir.data_member<1> : !cir.data_member // CHECK: } int test2(const Point &pt, int Point::*member) { return pt.*member; } // CHECK: cir.func @_Z5test2RK5PointMS_i -// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr // CHECK: } int test3(const Point *pt, int Point::*member) { return pt->*member; } // CHECK: cir.func @_Z5test3PK5PointMS_i -// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr // CHECK: } auto test4(int Incomplete::*member) -> int Incomplete::* { return member; } -// CHECK: cir.func @_Z5test4M10Incompletei(%arg0: !cir.data_member loc({{.+}})) -> !cir.data_member +// CHECK: cir.func @_Z5test4M10Incompletei(%arg0: !cir.data_member loc({{.+}})) -> !cir.data_member int test5(Incomplete *ic, int Incomplete::*member) { return ic->*member; } // CHECK: cir.func @_Z5test5P10IncompleteMS_i -// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK: %{{.+}} = cir.get_runtime_member %{{.+}}[%{{.+}} : !cir.data_member] : !cir.ptr -> !cir.ptr // CHECK: } auto test_null() -> int Point::* { return nullptr; } // CHECK: cir.func @_Z9test_nullv -// CHECK: %{{.+}} = cir.const #cir.data_member : !cir.data_member +// CHECK: %{{.+}} = cir.const #cir.data_member : !cir.data_member // CHECK: } auto test_null_incomplete() -> int Incomplete::* { return nullptr; } // CHECK: cir.func @_Z20test_null_incompletev -// CHECK: %{{.+}} = cir.const #cir.data_member : !cir.data_member +// CHECK: %{{.+}} = cir.const #cir.data_member : !cir.data_member // CHECK: } diff --git a/clang/test/CIR/CodeGen/pointer-to-member-func.cpp b/clang/test/CIR/CodeGen/pointer-to-member-func.cpp index 6d226a6875c2..6f8b3363bfa3 100644 --- a/clang/test/CIR/CodeGen/pointer-to-member-func.cpp +++ b/clang/test/CIR/CodeGen/pointer-to-member-func.cpp @@ -11,24 +11,24 @@ auto make_non_virtual() -> void (Foo::*)(int) { return &Foo::m1; } -// CHECK-LABEL: cir.func @_Z16make_non_virtualv() -> !cir.method in !ty_22Foo22> -// CHECK: %{{.+}} = cir.const #cir.method<@_ZN3Foo2m1Ei> : !cir.method in !ty_22Foo22> +// CHECK-LABEL: cir.func @_Z16make_non_virtualv() -> !cir.method in !ty_Foo> +// CHECK: %{{.+}} = cir.const #cir.method<@_ZN3Foo2m1Ei> : !cir.method in !ty_Foo> // CHECK: } auto make_virtual() -> void (Foo::*)(int) { return &Foo::m3; } -// CHECK-LABEL: cir.func @_Z12make_virtualv() -> !cir.method in !ty_22Foo22> -// CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_22Foo22> +// CHECK-LABEL: cir.func @_Z12make_virtualv() -> !cir.method in !ty_Foo> +// CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_Foo> // CHECK: } auto make_null() -> void (Foo::*)(int) { return nullptr; } -// CHECK-LABEL: cir.func @_Z9make_nullv() -> !cir.method in !ty_22Foo22> -// CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_22Foo22> +// CHECK-LABEL: cir.func @_Z9make_nullv() -> !cir.method in !ty_Foo> +// CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_Foo> // CHECK: } void call(Foo *obj, void (Foo::*func)(int), int arg) { @@ -36,7 +36,7 @@ void call(Foo *obj, void (Foo::*func)(int), int arg) { } // CHECK-LABEL: cir.func @_Z4callP3FooMS_FviEi -// CHECK: %[[CALLEE:.+]], %[[THIS:.+]] = cir.get_method %{{.+}}, %{{.+}} : (!cir.method in !ty_22Foo22>, !cir.ptr) -> (!cir.ptr, !s32i)>>, !cir.ptr) +// CHECK: %[[CALLEE:.+]], %[[THIS:.+]] = cir.get_method %{{.+}}, %{{.+}} : (!cir.method in !ty_Foo>, !cir.ptr) -> (!cir.ptr, !s32i)>>, !cir.ptr) // CHECK-NEXT: %[[#ARG:]] = cir.load %{{.+}} : !cir.ptr, !s32i // CHECK-NEXT: cir.call %[[CALLEE]](%[[THIS]], %[[#ARG]]) : (!cir.ptr, !s32i)>>, !cir.ptr, !s32i) -> () // CHECK: } diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 2f5932f51e94..f4e78a725e3e 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -21,9 +21,9 @@ void init(unsigned numImages) { } } -// CHECK-DAG: !ty_22triple22 = !cir.struct, !u32i}> -// CHECK-DAG: ![[VEC:.*]] = !cir.struct" {!cir.ptr, !cir.ptr, !cir.ptr}> -// CHECK-DAG: ![[VEC_IT:.*]] = !cir.struct" {!cir.ptr}> +// CHECK-DAG: !ty_triple = !cir.struct, !u32i}> +// CHECK-DAG: ![[VEC:.*]] = !cir.struct" {!cir.ptr, !cir.ptr, !cir.ptr}> +// CHECK-DAG: ![[VEC_IT:.*]] = !cir.struct" {!cir.ptr}> // CHECK: cir.func @_Z4initj(%arg0: !u32i // CHECK: %0 = cir.alloca !u32i, !cir.ptr, ["numImages", init] {alignment = 4 : i64} @@ -36,7 +36,7 @@ void init(unsigned numImages) { // CHECK: %4 = cir.alloca !cir.ptr, !cir.ptr>, ["__range1", init] {alignment = 8 : i64} // CHECK: %5 = cir.alloca ![[VEC_IT]], !cir.ptr, ["__begin1", init] {alignment = 8 : i64} // CHECK: %6 = cir.alloca ![[VEC_IT]], !cir.ptr, ["__end1", init] {alignment = 8 : i64} -// CHECK: %7 = cir.alloca !cir.ptr, !cir.ptr>, ["image", init] {alignment = 8 : i64} +// CHECK: %7 = cir.alloca !cir.ptr, !cir.ptr>, ["image", init] {alignment = 8 : i64} // CHECK: cir.store %1, %4 : !cir.ptr, !cir.ptr> // CHECK: %8 = cir.load %4 : !cir.ptr>, !cir.ptr // CHECK: %9 = cir.call @_ZNSt6vectorI6tripleE5beginEv(%8) : (!cir.ptr) -> ![[VEC_IT]] @@ -48,19 +48,19 @@ void init(unsigned numImages) { // CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EneERKS3_(%5, %6) : (!cir.ptr, !cir.ptr) -> !cir.bool // CHECK: cir.condition(%12) // CHECK: } body { -// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EdeEv(%5) : (!cir.ptr) -> !cir.ptr -// CHECK: cir.store %12, %7 : !cir.ptr, !cir.ptr> +// CHECK: %12 = cir.call @_ZNK17__vector_iteratorI6triplePS0_RS0_EdeEv(%5) : (!cir.ptr) -> !cir.ptr +// CHECK: cir.store %12, %7 : !cir.ptr, !cir.ptr> // CHECK: cir.scope { -// CHECK: %13 = cir.alloca !ty_22triple22, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} -// CHECK: %14 = cir.const #cir.zero : !ty_22triple22 -// CHECK: cir.store %14, %13 : !ty_22triple22, !cir.ptr -// CHECK: %15 = cir.get_member %13[0] {name = "type"} : !cir.ptr -> !cir.ptr +// CHECK: %13 = cir.alloca !ty_triple, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: %14 = cir.const #cir.zero : !ty_triple +// CHECK: cir.store %14, %13 : !ty_triple, !cir.ptr +// CHECK: %15 = cir.get_member %13[0] {name = "type"} : !cir.ptr -> !cir.ptr // CHECK: %16 = cir.const #cir.int<1000024002> : !u32i // CHECK: cir.store %16, %15 : !u32i, !cir.ptr -// CHECK: %17 = cir.get_member %13[1] {name = "next"} : !cir.ptr -> !cir.ptr> -// CHECK: %18 = cir.get_member %13[2] {name = "image"} : !cir.ptr -> !cir.ptr -// CHECK: %19 = cir.load %7 : !cir.ptr>, !cir.ptr -// CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr +// CHECK: %17 = cir.get_member %13[1] {name = "next"} : !cir.ptr -> !cir.ptr> +// CHECK: %18 = cir.get_member %13[2] {name = "image"} : !cir.ptr -> !cir.ptr +// CHECK: %19 = cir.load %7 : !cir.ptr>, !cir.ptr +// CHECK: %20 = cir.call @_ZN6tripleaSEOS_(%19, %13) : (!cir.ptr, !cir.ptr) -> !cir.ptr // CHECK: } // CHECK: cir.yield // CHECK: } step { diff --git a/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp b/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp index 96730e748a4c..f48602b87676 100644 --- a/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp +++ b/clang/test/CIR/CodeGen/skip-functions-from-system-headers.cpp @@ -15,4 +15,4 @@ void test() { // CHECK-NOT: cir.func linkonce_odr @_ZN6StringC1EPKc // CHECK: cir.func @_Z4testv() -// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file +// CHECK: cir.call @_ZN6StringC1Ev(%0) : (!cir.ptr) -> () \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index e011ed99233d..2ba42118dddb 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -17,23 +17,23 @@ static Init __ioinit(true); static Init __ioinit2(false); // BEFORE: module {{.*}} { -// BEFORE-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) -// BEFORE-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) -// BEFORE-NEXT: cir.global "private" internal dsolocal @_ZL8__ioinit = ctor : !ty_22Init22 { -// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr +// BEFORE-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) +// BEFORE-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) +// BEFORE-NEXT: cir.global "private" internal dsolocal @_ZL8__ioinit = ctor : !ty_Init { +// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // BEFORE-NEXT: %1 = cir.const #true -// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // BEFORE-NEXT: } dtor { -// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr -// BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () +// BEFORE-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr +// BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () // BEFORE-NEXT: } {alignment = 1 : i64, ast = #cir.var.decl.ast} -// BEFORE: cir.global "private" internal dsolocal @_ZL9__ioinit2 = ctor : !ty_22Init22 { -// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr +// BEFORE: cir.global "private" internal dsolocal @_ZL9__ioinit2 = ctor : !ty_Init { +// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr // BEFORE-NEXT: %1 = cir.const #false -// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// BEFORE-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // BEFORE-NEXT: } dtor { -// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr -// BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () +// BEFORE-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr +// BEFORE-NEXT: cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () // BEFORE-NEXT: } {alignment = 1 : i64, ast = #cir.var.decl.ast} // BEFORE-NEXT: } @@ -41,29 +41,29 @@ static Init __ioinit2(false); // AFTER: module {{.*}} attributes {{.*}}cir.global_ctors = [#cir.global_ctor<"__cxx_global_var_init", 65536>, #cir.global_ctor<"__cxx_global_var_init.1", 65536>] // AFTER-NEXT: cir.global "private" external @__dso_handle : i8 // AFTER-NEXT: cir.func private @__cxa_atexit(!cir.ptr)>>, !cir.ptr, !cir.ptr) -// AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) -// AFTER-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) -// AFTER-NEXT: cir.global "private" internal dsolocal @_ZL8__ioinit = #cir.zero : !ty_22Init22 {alignment = 1 : i64, ast = #cir.var.decl.ast} +// AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) +// AFTER-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) +// AFTER-NEXT: cir.global "private" internal dsolocal @_ZL8__ioinit = #cir.zero : !ty_Init {alignment = 1 : i64, ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init() -// AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr +// AFTER-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // AFTER-NEXT: %1 = cir.const #true -// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () -// AFTER-NEXT: %2 = cir.get_global @_ZL8__ioinit : !cir.ptr -// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> -// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> -// AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// AFTER-NEXT: %2 = cir.get_global @_ZL8__ioinit : !cir.ptr +// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> +// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> +// AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr // AFTER-NEXT: %6 = cir.get_global @__dso_handle : !cir.ptr // AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () // AFTER-NEXT: cir.return -// AFTER: cir.global "private" internal dsolocal @_ZL9__ioinit2 = #cir.zero : !ty_22Init22 {alignment = 1 : i64, ast = #cir.var.decl.ast} +// AFTER: cir.global "private" internal dsolocal @_ZL9__ioinit2 = #cir.zero : !ty_Init {alignment = 1 : i64, ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init.1() -// AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr +// AFTER-NEXT: %0 = cir.get_global @_ZL9__ioinit2 : !cir.ptr // AFTER-NEXT: %1 = cir.const #false -// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () -// AFTER-NEXT: %2 = cir.get_global @_ZL9__ioinit2 : !cir.ptr -// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> -// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> -// AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr +// AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () +// AFTER-NEXT: %2 = cir.get_global @_ZL9__ioinit2 : !cir.ptr +// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> +// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> +// AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr // AFTER-NEXT: %6 = cir.get_global @__dso_handle : !cir.ptr // AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () // AFTER-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/stmt-expr.c b/clang/test/CIR/CodeGen/stmt-expr.c index d3944cb72505..0e3daebb9d78 100644 --- a/clang/test/CIR/CodeGen/stmt-expr.c +++ b/clang/test/CIR/CodeGen/stmt-expr.c @@ -24,13 +24,13 @@ void test2() { ({int x = 3; x; }); } struct S { int x; }; int test3() { return ({ struct S s = {1}; s; }).x; } // CHECK: @test3 -// CHECK: %[[#RETVAL:]] = cir.alloca !ty_22S22, !cir.ptr +// CHECK: %[[#RETVAL:]] = cir.alloca !ty_S, !cir.ptr // CHECK: cir.scope { -// CHECK: %[[#VAR:]] = cir.alloca !ty_22S22, !cir.ptr +// CHECK: %[[#VAR:]] = cir.alloca !ty_S, !cir.ptr // [...] -// CHECK: cir.copy %[[#VAR]] to %[[#RETVAL]] : !cir.ptr +// CHECK: cir.copy %[[#VAR]] to %[[#RETVAL]] : !cir.ptr // CHECK: } -// CHECK: %[[#RETADDR:]] = cir.get_member %1[0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: %[[#RETADDR:]] = cir.get_member %1[0] {name = "x"} : !cir.ptr -> !cir.ptr // CHECK: %{{.+}} = cir.load %[[#RETADDR]] : !cir.ptr, !s32i // Expression is wrapped in an expression attribute (just ensure it does not crash). diff --git a/clang/test/CIR/CodeGen/stmt-expr.cpp b/clang/test/CIR/CodeGen/stmt-expr.cpp index 9d6ba7466855..8432df4e15af 100644 --- a/clang/test/CIR/CodeGen/stmt-expr.cpp +++ b/clang/test/CIR/CodeGen/stmt-expr.cpp @@ -19,13 +19,13 @@ void test1() { } // CHECK: @_Z5test1v // CHECK: cir.scope { -// CHECK: %[[#RETVAL:]] = cir.alloca !ty_22A22, !cir.ptr +// CHECK: %[[#RETVAL:]] = cir.alloca !ty_A, !cir.ptr // CHECK: cir.scope { -// CHECK: %[[#VAR:]] = cir.alloca !ty_22A22, !cir.ptr, ["a", init] {alignment = 4 : i64} -// CHECK: cir.call @_ZN1AC1Ev(%[[#VAR]]) : (!cir.ptr) -> () -// CHECK: cir.call @_ZN1AC1ERS_(%[[#RETVAL]], %[[#VAR]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %[[#VAR:]] = cir.alloca !ty_A, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK: cir.call @_ZN1AC1Ev(%[[#VAR]]) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1AC1ERS_(%[[#RETVAL]], %[[#VAR]]) : (!cir.ptr, !cir.ptr) -> () // TODO(cir): the local VAR should be destroyed here. // CHECK: } -// CHECK: cir.call @_ZN1A3FooEv(%[[#RETVAL]]) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1A3FooEv(%[[#RETVAL]]) : (!cir.ptr) -> () // TODO(cir): the temporary RETVAL should be destroyed here. // CHECK: } diff --git a/clang/test/CIR/CodeGen/stmtexpr-init.c b/clang/test/CIR/CodeGen/stmtexpr-init.c index 7fd44aebc991..27e909d2b39c 100644 --- a/clang/test/CIR/CodeGen/stmtexpr-init.c +++ b/clang/test/CIR/CodeGen/stmtexpr-init.c @@ -3,8 +3,8 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s -// CIR: ![[sized_array:.*]] = !cir.struct} // CIR: ![[annon_struct:.*]] = !cir.struct}> +// CIR: ![[sized_array:.*]] = !cir.struct} void escape(const void *); diff --git a/clang/test/CIR/CodeGen/struct-empty.c b/clang/test/CIR/CodeGen/struct-empty.c index 07f04e75d767..678eb50be05b 100644 --- a/clang/test/CIR/CodeGen/struct-empty.c +++ b/clang/test/CIR/CodeGen/struct-empty.c @@ -4,7 +4,7 @@ // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s // CIR: ![[lock:.*]] = !cir.struct -// CIR: ![[fs_struct:.*]] = !cir.struct>} #cir.record.decl.ast> -// CHECK-DAG: !ty_22Bar22 = !cir.struct -// CHECK-DAG: !ty_22Foo22 = !cir.struct +// CHECK-DAG: !ty_Node = !cir.struct>} #cir.record.decl.ast> +// CHECK-DAG: !ty_Bar = !cir.struct +// CHECK-DAG: !ty_Foo = !cir.struct // CHECK-DAG: module {{.*}} { // CHECK: cir.func @baz() -// CHECK-NEXT: %0 = cir.alloca !ty_22Bar22, !cir.ptr, ["b"] {alignment = 4 : i64} -// CHECK-NEXT: %1 = cir.alloca !ty_22Foo22, !cir.ptr, ["f"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_Bar, !cir.ptr, ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %1 = cir.alloca !ty_Foo, !cir.ptr, ["f"] {alignment = 4 : i64} // CHECK-NEXT: cir.return // CHECK-NEXT: } void shouldConstInitStructs(void) { // CHECK: cir.func @shouldConstInitStructs struct Foo f = {1, 2, {3, 4}}; - // CHECK: %[[#V0:]] = cir.alloca !ty_22Foo22, !cir.ptr, ["f"] {alignment = 4 : i64} - // CHECK: %[[#V1:]] = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_22Bar22}> : !ty_22Foo22 - // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22Foo22, !cir.ptr + // CHECK: %[[#V0:]] = cir.alloca !ty_Foo, !cir.ptr, ["f"] {alignment = 4 : i64} + // CHECK: %[[#V1:]] = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_Bar}> : !ty_Foo + // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_Foo, !cir.ptr } // Should zero-initialize uninitialized global structs. struct S { int a,b; } s; -// CHECK-DAG: cir.global external @s = #cir.zero : !ty_22S22 +// CHECK-DAG: cir.global external @s = #cir.zero : !ty_S // Should initialize basic global structs. struct S1 { @@ -52,7 +52,7 @@ struct S1 { float f; int *p; } s1 = {1, .1, 0}; -// CHECK-DAG: cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_22S122 +// CHECK-DAG: cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_S1_ // Should initialize global nested structs. struct S2 { @@ -60,19 +60,19 @@ struct S2 { int a; } s2a; } s2 = {{1}}; -// CHECK-DAG: cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22}> : !ty_22S222 +// CHECK-DAG: cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S2A}> : !ty_S2_ // Should initialize global arrays of structs. struct S3 { int a; } s3[3] = {{1}, {2}, {3}}; -// CHECK-DAG: cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22S322]> : !cir.array +// CHECK-DAG: cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S3_, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_S3_, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_S3_]> : !cir.array void shouldCopyStructAsCallArg(struct S1 s) { // CHECK-DAG: cir.func @shouldCopyStructAsCallArg shouldCopyStructAsCallArg(s); - // CHECK-DAG: %[[#LV:]] = cir.load %{{.+}} : !cir.ptr, !ty_22S122 - // CHECK-DAG: cir.call @shouldCopyStructAsCallArg(%[[#LV]]) : (!ty_22S122) -> () + // CHECK-DAG: %[[#LV:]] = cir.load %{{.+}} : !cir.ptr, !ty_S1_ + // CHECK-DAG: cir.call @shouldCopyStructAsCallArg(%[[#LV]]) : (!ty_S1_) -> () } struct Bar shouldGenerateAndAccessStructArrays(void) { @@ -81,12 +81,12 @@ struct Bar shouldGenerateAndAccessStructArrays(void) { } // CHECK-DAG: cir.func @shouldGenerateAndAccessStructArrays // CHECK-DAG: %[[#STRIDE:]] = cir.const #cir.int<0> : !s32i -// CHECK-DAG: %[[#DARR:]] = cir.cast(array_to_ptrdecay, %{{.+}} : !cir.ptr>), !cir.ptr -// CHECK-DAG: %[[#ELT:]] = cir.ptr_stride(%[[#DARR]] : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr -// CHECK-DAG: cir.copy %[[#ELT]] to %{{.+}} : !cir.ptr +// CHECK-DAG: %[[#DARR:]] = cir.cast(array_to_ptrdecay, %{{.+}} : !cir.ptr>), !cir.ptr +// CHECK-DAG: %[[#ELT:]] = cir.ptr_stride(%[[#DARR]] : !cir.ptr, %[[#STRIDE]] : !s32i), !cir.ptr +// CHECK-DAG: cir.copy %[[#ELT]] to %{{.+}} : !cir.ptr // CHECK-DAG: cir.func @local_decl -// CHECK-DAG: {{%.}} = cir.alloca !ty_22Local22, !cir.ptr, ["a"] +// CHECK-DAG: {{%.}} = cir.alloca !ty_Local, !cir.ptr, ["a"] void local_decl(void) { struct Local { int i; @@ -95,7 +95,7 @@ void local_decl(void) { } // CHECK-DAG: cir.func @useRecursiveType -// CHECK-DAG: cir.get_member {{%.}}[0] {name = "next"} : !cir.ptr -> !cir.ptr> +// CHECK-DAG: cir.get_member {{%.}}[0] {name = "next"} : !cir.ptr -> !cir.ptr> void useRecursiveType(NodeStru* a) { a->next = 0; } diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 1a594f3756ab..3fa7a8ff7600 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -26,37 +26,37 @@ void baz() { struct incomplete; void yoyo(incomplete *i) {} -// CHECK-DAG: !ty_22incomplete22 = !cir.struct - -// CHECK-DAG: !ty_22Foo22 = !cir.struct -// CHECK-DAG: !ty_22Mandalore22 = !cir.struct, !s32i} #cir.record.decl.ast> -// CHECK-DAG: !ty_22Adv22 = !cir.struct -// CHECK-DAG: !ty_22Entry22 = !cir.struct, !cir.ptr)>>}> - -// CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-DAG: !ty_incomplete = !cir.struct + +// CHECK-DAG: !ty_Foo = !cir.struct +// CHECK-DAG: !ty_Mandalore = !cir.struct, !s32i} #cir.record.decl.ast> +// CHECK-DAG: !ty_Adv = !cir.struct +// CHECK-DAG: !ty_Entry = !cir.struct, !cir.ptr)>>}> + +// CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.func linkonce_odr @_ZN3Bar7method2Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.store %arg1, %1 : !s32i, !cir.ptr -// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i -// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.func linkonce_odr @_ZN3Bar7method3Ei(%arg0: !cir.ptr {{.*}}, %arg1: !s32i +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK-NEXT: cir.store %arg1, %1 : !s32i, !cir.ptr -// CHECK-NEXT: %3 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %3 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: %4 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: cir.store %4, %2 : !s32i, !cir.ptr // CHECK-NEXT: %5 = cir.load %2 : !cir.ptr, !s32i @@ -64,14 +64,14 @@ void yoyo(incomplete *i) {} // CHECK-NEXT: } // CHECK: cir.func @_Z3bazv() -// CHECK-NEXT: %0 = cir.alloca !ty_22Bar22, !cir.ptr, ["b"] {alignment = 4 : i64} +// CHECK-NEXT: %0 = cir.alloca !ty_Bar, !cir.ptr, ["b"] {alignment = 4 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["result", init] {alignment = 4 : i64} -// CHECK-NEXT: %2 = cir.alloca !ty_22Foo22, !cir.ptr, ["f"] {alignment = 4 : i64} -// CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () +// CHECK-NEXT: %2 = cir.alloca !ty_Foo, !cir.ptr, ["f"] {alignment = 4 : i64} +// CHECK-NEXT: cir.call @_ZN3Bar6methodEv(%0) : (!cir.ptr) -> () // CHECK-NEXT: %3 = cir.const #cir.int<4> : !s32i -// CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, !s32i) -> () +// CHECK-NEXT: cir.call @_ZN3Bar7method2Ei(%0, %3) : (!cir.ptr, !s32i) -> () // CHECK-NEXT: %4 = cir.const #cir.int<4> : !s32i -// CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, !s32i) -> !s32i +// CHECK-NEXT: %5 = cir.call @_ZN3Bar7method3Ei(%0, %4) : (!cir.ptr, !s32i) -> !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } @@ -94,18 +94,18 @@ class Adv { void m() { Adv C; } -// CHECK: cir.func linkonce_odr @_ZN3AdvC2Ev(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK: %2 = cir.get_member %1[0] {name = "x"} : !cir.ptr -> !cir.ptr -// CHECK: %3 = cir.get_member %2[0] {name = "w"} : !cir.ptr -> !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN3AdvC2Ev(%arg0: !cir.ptr +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: %2 = cir.get_member %1[0] {name = "x"} : !cir.ptr -> !cir.ptr +// CHECK: %3 = cir.get_member %2[0] {name = "w"} : !cir.ptr -> !cir.ptr // CHECK: %4 = cir.const #cir.int<1000024001> : !u32i // CHECK: cir.store %4, %3 : !u32i, !cir.ptr -// CHECK: %5 = cir.get_member %2[1] {name = "n"} : !cir.ptr -> !cir.ptr> +// CHECK: %5 = cir.get_member %2[1] {name = "n"} : !cir.ptr -> !cir.ptr> // CHECK: %6 = cir.const #cir.ptr : !cir.ptr // CHECK: cir.store %6, %5 : !cir.ptr, !cir.ptr> -// CHECK: %7 = cir.get_member %2[2] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: %7 = cir.get_member %2[2] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK: %8 = cir.const #cir.int<0> : !s32i // CHECK: cir.store %8, %7 : !s32i, !cir.ptr // CHECK: cir.return @@ -117,19 +117,19 @@ struct A { // Should globally const-initialize struct members. struct A simpleConstInit = {1}; -// CHECK: cir.global external @simpleConstInit = #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22A22 +// CHECK: cir.global external @simpleConstInit = #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_A // Should globally const-initialize arrays with struct members. struct A arrConstInit[1] = {{1}}; -// CHECK: cir.global external @arrConstInit = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22A22]> : !cir.array +// CHECK: cir.global external @arrConstInit = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_A]> : !cir.array // Should locally copy struct members. void shouldLocallyCopyStructAssignments(void) { struct A a = { 3 }; - // CHECK: %[[#SA:]] = cir.alloca !ty_22A22, !cir.ptr, ["a"] {alignment = 4 : i64} + // CHECK: %[[#SA:]] = cir.alloca !ty_A, !cir.ptr, ["a"] {alignment = 4 : i64} struct A b = a; - // CHECK: %[[#SB:]] = cir.alloca !ty_22A22, !cir.ptr, ["b", init] {alignment = 4 : i64} - // cir.copy %[[#SA]] to %[[SB]] : !cir.ptr + // CHECK: %[[#SB:]] = cir.alloca !ty_A, !cir.ptr, ["b", init] {alignment = 4 : i64} + // cir.copy %[[#SA]] to %[[SB]] : !cir.ptr } A get_default() { return A{2}; } @@ -141,12 +141,12 @@ struct S { void h() { S s; } // CHECK: cir.func @_Z1hv() -// CHECK: %0 = cir.alloca !ty_22S22, !cir.ptr, ["s", init] {alignment = 1 : i64} -// CHECK: %1 = cir.alloca !ty_22A22, !cir.ptr, ["agg.tmp0"] {alignment = 4 : i64} -// CHECK: %2 = cir.call @_Z11get_defaultv() : () -> !ty_22A22 -// CHECK: cir.store %2, %1 : !ty_22A22, !cir.ptr -// CHECK: %3 = cir.load %1 : !cir.ptr, !ty_22A22 -// CHECK: cir.call @_ZN1SC1E1A(%0, %3) : (!cir.ptr, !ty_22A22) -> () +// CHECK: %0 = cir.alloca !ty_S, !cir.ptr, ["s", init] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !ty_A, !cir.ptr, ["agg.tmp0"] {alignment = 4 : i64} +// CHECK: %2 = cir.call @_Z11get_defaultv() : () -> !ty_A +// CHECK: cir.store %2, %1 : !ty_A, !cir.ptr +// CHECK: %3 = cir.load %1 : !cir.ptr, !ty_A +// CHECK: cir.call @_ZN1SC1E1A(%0, %3) : (!cir.ptr, !ty_A) -> () // CHECK: cir.return // CHECK: } @@ -162,6 +162,6 @@ struct Entry { void ppp() { Entry x; } -// CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr +// CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr -// CHECK: cir.get_member %1[0] {name = "procAddr"} : !cir.ptr -> !cir.ptr, !cir.ptr)>>> +// CHECK: cir.get_member %1[0] {name = "procAddr"} : !cir.ptr -> !cir.ptr, !cir.ptr)>>> diff --git a/clang/test/CIR/CodeGen/structural-binding.cpp b/clang/test/CIR/CodeGen/structural-binding.cpp index c7250d39a1e2..c37788b9d678 100644 --- a/clang/test/CIR/CodeGen/structural-binding.cpp +++ b/clang/test/CIR/CodeGen/structural-binding.cpp @@ -49,22 +49,22 @@ void f(A &a) { auto &[x, y, z] = a; (x, y, z); - // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr - // CIR: {{.*}} = cir.get_member %[[a]][0] {name = "a"} : !cir.ptr -> !cir.ptr - // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr - // CIR: {{.*}} = cir.get_member %[[a]][1] {name = "b"} : !cir.ptr -> !cir.ptr - // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr - // CIR: {{.*}} = cir.get_member %[[a]][2] {name = "c"} : !cir.ptr -> !cir.ptr + // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr + // CIR: {{.*}} = cir.get_member %[[a]][0] {name = "a"} : !cir.ptr -> !cir.ptr + // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr + // CIR: {{.*}} = cir.get_member %[[a]][1] {name = "b"} : !cir.ptr -> !cir.ptr + // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr + // CIR: {{.*}} = cir.get_member %[[a]][2] {name = "c"} : !cir.ptr -> !cir.ptr // LLVM: {{.*}} = getelementptr %struct.A, ptr {{.*}}, i32 0, i32 0 // LLVM: {{.*}} = getelementptr %struct.A, ptr {{.*}}, i32 0, i32 1 // LLVM: {{.*}} = getelementptr %struct.A, ptr {{.*}}, i32 0, i32 2 auto [x2, y2, z2] = a; (x2, y2, z2); - // CIR: cir.call @_ZN1AC1ERKS_(%2, {{.*}}) : (!cir.ptr, !cir.ptr) -> () - // CIR: {{.*}} = cir.get_member %2[0] {name = "a"} : !cir.ptr -> !cir.ptr - // CIR: {{.*}} = cir.get_member %2[1] {name = "b"} : !cir.ptr -> !cir.ptr - // CIR: {{.*}} = cir.get_member %2[2] {name = "c"} : !cir.ptr -> !cir.ptr + // CIR: cir.call @_ZN1AC1ERKS_(%2, {{.*}}) : (!cir.ptr, !cir.ptr) -> () + // CIR: {{.*}} = cir.get_member %2[0] {name = "a"} : !cir.ptr -> !cir.ptr + // CIR: {{.*}} = cir.get_member %2[1] {name = "b"} : !cir.ptr -> !cir.ptr + // CIR: {{.*}} = cir.get_member %2[2] {name = "c"} : !cir.ptr -> !cir.ptr // for the rest, just expect the codegen does't crash auto &&[x3, y3, z3] = a; @@ -84,10 +84,10 @@ void g(C &c) { auto [x8, y8] = c; (x8, y8); - // CIR: cir.call @_ZN1CC1ERKS_(%[[c:.*]], %7) : (!cir.ptr, !cir.ptr) -> () - // CIR: %[[x8:.*]] = cir.call @_Z3getILj0EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr + // CIR: cir.call @_ZN1CC1ERKS_(%[[c:.*]], %7) : (!cir.ptr, !cir.ptr) -> () + // CIR: %[[x8:.*]] = cir.call @_Z3getILj0EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr // CIR: cir.store %[[x8]], %[[x8p:.*]] : !cir.ptr, !cir.ptr> - // CIR: %[[x9:.*]] = cir.call @_Z3getILj1EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr + // CIR: %[[x9:.*]] = cir.call @_Z3getILj1EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr // CIR: cir.store %[[x9]], %[[x9p:.*]] : !cir.ptr, !cir.ptr> // CIR: {{.*}} = cir.load %[[x8p]] : !cir.ptr>, !cir.ptr // CIR: {{.*}} = cir.load %[[x9p]] : !cir.ptr>, !cir.ptr @@ -97,12 +97,12 @@ void g(C &c) { auto &[x9, y9] = c; (x9, y9); - // CIR: cir.store %12, %[[cp:.*]] : !cir.ptr, !cir.ptr> - // CIR: %[[c:.*]] = cir.load %[[cp]] : !cir.ptr>, !cir.ptr - // CIR: %[[x8:.*]] = cir.call @_Z3getILj0EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr + // CIR: cir.store %12, %[[cp:.*]] : !cir.ptr, !cir.ptr> + // CIR: %[[c:.*]] = cir.load %[[cp]] : !cir.ptr>, !cir.ptr + // CIR: %[[x8:.*]] = cir.call @_Z3getILj0EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr // CIR: cir.store %[[x8]], %[[x8p:.*]] : !cir.ptr, !cir.ptr> - // CIR: %[[c:.*]] = cir.load %[[cp]] : !cir.ptr>, !cir.ptr - // CIR: %[[x9:.*]] = cir.call @_Z3getILj1EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr + // CIR: %[[c:.*]] = cir.load %[[cp]] : !cir.ptr>, !cir.ptr + // CIR: %[[x9:.*]] = cir.call @_Z3getILj1EERKiRK1C(%[[c]]) : (!cir.ptr) -> !cir.ptr // CIR: cir.store %[[x9]], %[[x9p:.*]] : !cir.ptr, !cir.ptr> // CIR: {{.*}} = cir.load %[[x8p]] : !cir.ptr>, !cir.ptr // CIR: {{.*}} = cir.load %[[x9p]] : !cir.ptr>, !cir.ptr diff --git a/clang/test/CIR/CodeGen/three-way-comparison.cpp b/clang/test/CIR/CodeGen/three-way-comparison.cpp index 729602b55cfb..92841c453d83 100644 --- a/clang/test/CIR/CodeGen/three-way-comparison.cpp +++ b/clang/test/CIR/CodeGen/three-way-comparison.cpp @@ -7,8 +7,8 @@ // BEFORE: #cmp3way_info_partial_ltn1eq0gt1unn127_ = #cir.cmp3way_info // BEFORE: #cmp3way_info_strong_ltn1eq0gt1_ = #cir.cmp3way_info -// BEFORE: !ty_22std3A3A__13A3Apartial_ordering22 = !cir.struct y; diff --git a/clang/test/CIR/CodeGen/typedef.c b/clang/test/CIR/CodeGen/typedef.c index ea841c238b6f..2f34ff824e1c 100644 --- a/clang/test/CIR/CodeGen/typedef.c +++ b/clang/test/CIR/CodeGen/typedef.c @@ -6,5 +6,5 @@ void local_typedef() { } //CHECK: cir.func no_proto @local_typedef() -//CHECK: {{.*}} = cir.alloca !ty_22Struct22, !cir.ptr, ["s"] {alignment = 4 : i64} +//CHECK: {{.*}} = cir.alloca !ty_Struct, !cir.ptr, ["s"] {alignment = 4 : i64} //CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index cb0928d19a39..1147cf52cfb5 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -20,13 +20,13 @@ void foo(int x) { // CHECK-LABEL: cir.func @foo( // CHECK: %[[VAL_1:.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} -// CHECK: %[[VAL_2:.*]] = cir.alloca !ty_22A22, !cir.ptr, ["a", init] {alignment = 4 : i64} +// CHECK: %[[VAL_2:.*]] = cir.alloca !ty_A, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK: cir.store {{.*}}, %[[VAL_1]] : !s32i, !cir.ptr -// CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][1] {name = ""} : !cir.ptr -> !cir.ptr -// CHECK: %[[VAL_4:.*]] = cir.cast(bitcast, %[[VAL_3]] : !cir.ptr), !cir.ptr +// CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][1] {name = ""} : !cir.ptr -> !cir.ptr +// CHECK: %[[VAL_4:.*]] = cir.cast(bitcast, %[[VAL_3]] : !cir.ptr), !cir.ptr // CHECK: %[[VAL_5:.*]] = cir.load %[[VAL_1]] : !cir.ptr, !s32i // CHECK: %[[VAL_6:.*]] = cir.set_bitfield(#[[bfi_x]], %[[VAL_4]] : !cir.ptr, %[[VAL_5]] : !s32i) -> !s32i -// CHECK: %[[VAL_7:.*]] = cir.cast(bitcast, %[[VAL_3]] : !cir.ptr), !cir.ptr +// CHECK: %[[VAL_7:.*]] = cir.cast(bitcast, %[[VAL_3]] : !cir.ptr), !cir.ptr // CHECK: %[[VAL_8:.*]] = cir.const #cir.int<0> : !s32i // CHECK: %[[VAL_9:.*]] = cir.set_bitfield(#[[bfi_y]], %[[VAL_7]] : !cir.ptr, %[[VAL_8]] : !s32i) -> !s32i // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index d9f28057cd2e..7ccb520be2c2 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -6,15 +6,15 @@ typedef union { yolo y; struct { int lifecnt; }; } yolm; typedef union { yolo y; struct { int *lifecnt; int genpad; }; } yolm2; typedef union { yolo y; struct { bool life; int genpad; }; } yolm3; -// CHECK-DAG: !ty_22U23A3ADummy22 = !cir.struct -// CHECK-DAG: !ty_22anon2E522 = !cir.struct -// CHECK-DAG: !ty_22anon2E122 = !cir.struct -// CHECK-DAG: !ty_22yolo22 = !cir.struct -// CHECK-DAG: !ty_22anon2E322 = !cir.struct, !s32i} #cir.record.decl.ast> +// CHECK-DAG: !ty_U23A3ADummy = !cir.struct +// CHECK-DAG: !ty_anon2E5_ = !cir.struct +// CHECK-DAG: !ty_anon2E1_ = !cir.struct +// CHECK-DAG: !ty_yolo = !cir.struct +// CHECK-DAG: !ty_anon2E3_ = !cir.struct, !s32i} #cir.record.decl.ast> -// CHECK-DAG: !ty_22yolm22 = !cir.struct -// CHECK-DAG: !ty_22yolm322 = !cir.struct -// CHECK-DAG: !ty_22yolm222 = !cir.struct +// CHECK-DAG: !ty_yolm = !cir.struct +// CHECK-DAG: !ty_yolm3_ = !cir.struct +// CHECK-DAG: !ty_yolm2_ = !cir.struct // Should generate a union type with all members preserved. union U { @@ -24,7 +24,7 @@ union U { float f; double d; }; -// CHECK-DAG: !ty_22U22 = !cir.struct +// CHECK-DAG: !ty_U = !cir.struct // Should generate unions with complex members. union U2 { @@ -34,14 +34,14 @@ union U2 { float f; } s; } u2; -// CHECK-DAG: !cir.struct +// CHECK-DAG: !cir.struct // Should genereate unions without padding. union U3 { short b; U u; } u3; -// CHECK-DAG: !ty_22U322 = !cir.struct +// CHECK-DAG: !ty_U3_ = !cir.struct void m() { yolm q; @@ -50,31 +50,31 @@ void m() { } // CHECK: cir.func @_Z1mv() -// CHECK: cir.alloca !ty_22yolm22, !cir.ptr, ["q"] {alignment = 4 : i64} -// CHECK: cir.alloca !ty_22yolm222, !cir.ptr, ["q2"] {alignment = 8 : i64} -// CHECK: cir.alloca !ty_22yolm322, !cir.ptr, ["q3"] {alignment = 4 : i64} +// CHECK: cir.alloca !ty_yolm, !cir.ptr, ["q"] {alignment = 4 : i64} +// CHECK: cir.alloca !ty_yolm2_, !cir.ptr, ["q2"] {alignment = 8 : i64} +// CHECK: cir.alloca !ty_yolm3_, !cir.ptr, ["q3"] {alignment = 4 : i64} void shouldGenerateUnionAccess(union U u) { u.b = true; - // CHECK: %[[#BASE:]] = cir.get_member %0[0] {name = "b"} : !cir.ptr -> !cir.ptr + // CHECK: %[[#BASE:]] = cir.get_member %0[0] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.bool, !cir.ptr u.b; - // CHECK: cir.get_member %0[0] {name = "b"} : !cir.ptr -> !cir.ptr + // CHECK: cir.get_member %0[0] {name = "b"} : !cir.ptr -> !cir.ptr u.i = 1; - // CHECK: %[[#BASE:]] = cir.get_member %0[2] {name = "i"} : !cir.ptr -> !cir.ptr + // CHECK: %[[#BASE:]] = cir.get_member %0[2] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK: cir.store %{{.+}}, %[[#BASE]] : !s32i, !cir.ptr u.i; - // CHECK: %[[#BASE:]] = cir.get_member %0[2] {name = "i"} : !cir.ptr -> !cir.ptr + // CHECK: %[[#BASE:]] = cir.get_member %0[2] {name = "i"} : !cir.ptr -> !cir.ptr u.f = 0.1F; - // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr + // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.float, !cir.ptr u.f; - // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr + // CHECK: %[[#BASE:]] = cir.get_member %0[3] {name = "f"} : !cir.ptr -> !cir.ptr u.d = 0.1; - // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr + // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK: cir.store %{{.+}}, %[[#BASE]] : !cir.double, !cir.ptr u.d; - // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr + // CHECK: %[[#BASE:]] = cir.get_member %0[4] {name = "d"} : !cir.ptr -> !cir.ptr } typedef union { @@ -84,8 +84,8 @@ typedef union { void noCrushOnDifferentSizes() { A a = {0}; - // CHECK: %[[#TMP0:]] = cir.alloca !ty_22A22, !cir.ptr, ["a"] {alignment = 4 : i64} - // CHECK: %[[#TMP1:]] = cir.cast(bitcast, %[[#TMP0]] : !cir.ptr), !cir.ptr + // CHECK: %[[#TMP0:]] = cir.alloca !ty_A, !cir.ptr, ["a"] {alignment = 4 : i64} + // CHECK: %[[#TMP1:]] = cir.cast(bitcast, %[[#TMP0]] : !cir.ptr), !cir.ptr // CHECK: %[[#TMP2:]] = cir.const #cir.zero : !ty_anon_struct // CHECK: cir.store %[[#TMP2]], %[[#TMP1]] : !ty_anon_struct, !cir.ptr } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/var-arg-float.c b/clang/test/CIR/CodeGen/var-arg-float.c index bd224c098f5a..2385ba3aaad0 100644 --- a/clang/test/CIR/CodeGen/var-arg-float.c +++ b/clang/test/CIR/CodeGen/var-arg-float.c @@ -13,26 +13,26 @@ double f1(int n, ...) { return res; } -// BEFORE: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} +// BEFORE: !ty___va_list = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} // BEFORE: cir.func @f1(%arg0: !s32i, ...) -> !cir.double // BEFORE: [[RETP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["__retval"] // BEFORE: [[RESP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["res", init] -// BEFORE: cir.va.start [[VARLIST:%.*]] : !cir.ptr -// BEFORE: [[TMP0:%.*]] = cir.va.arg [[VARLIST]] : (!cir.ptr) -> !cir.double +// BEFORE: cir.va.start [[VARLIST:%.*]] : !cir.ptr +// BEFORE: [[TMP0:%.*]] = cir.va.arg [[VARLIST]] : (!cir.ptr) -> !cir.double // BEFORE: cir.store [[TMP0]], [[RESP]] : !cir.double, !cir.ptr -// BEFORE: cir.va.end [[VARLIST]] : !cir.ptr +// BEFORE: cir.va.end [[VARLIST]] : !cir.ptr // BEFORE: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !cir.double // BEFORE: cir.store [[RES]], [[RETP]] : !cir.double, !cir.ptr // BEFORE: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !cir.double // BEFORE: cir.return [[RETV]] : !cir.double // beginning block cir code -// AFTER: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} +// AFTER: !ty___va_list = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} // AFTER: cir.func @f1(%arg0: !s32i, ...) -> !cir.double // AFTER: [[RETP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["__retval"] // AFTER: [[RESP:%.*]] = cir.alloca !cir.double, !cir.ptr, ["res", init] -// AFTER: cir.va.start [[VARLIST:%.*]] : !cir.ptr -// AFTER: [[VR_OFFS_P:%.*]] = cir.get_member [[VARLIST]][4] {name = "vr_offs"} : !cir.ptr -> !cir.ptr +// AFTER: cir.va.start [[VARLIST:%.*]] : !cir.ptr +// AFTER: [[VR_OFFS_P:%.*]] = cir.get_member [[VARLIST]][4] {name = "vr_offs"} : !cir.ptr -> !cir.ptr // AFTER: [[VR_OFFS:%.*]] = cir.load [[VR_OFFS_P]] : !cir.ptr, !s32i // AFTER: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i // AFTER: [[CMP0:%.*]] = cir.cmp(ge, [[VR_OFFS]], [[ZERO]]) : !s32i, !cir.bool @@ -47,7 +47,7 @@ double f1(int n, ...) { // AFTER: [[BB_IN_REG]]: -// AFTER-NEXT: [[VR_TOP_P:%.*]] = cir.get_member [[VARLIST]][2] {name = "vr_top"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[VR_TOP_P:%.*]] = cir.get_member [[VARLIST]][2] {name = "vr_top"} : !cir.ptr -> !cir.ptr> // AFTER-NEXT: [[VR_TOP:%.*]] = cir.load [[VR_TOP_P]] : !cir.ptr>, !cir.ptr // AFTER-NEXT: [[TMP2:%.*]] = cir.cast(bitcast, [[VR_TOP]] : !cir.ptr), !cir.ptr // AFTER-NEXT: [[TMP3:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[VR_OFFS]] : !s32i), !cir.ptr @@ -56,7 +56,7 @@ double f1(int n, ...) { // AFTER: [[BB_ON_STACK]]: -// AFTER-NEXT: [[STACK_P:%.*]] = cir.get_member [[VARLIST]][0] {name = "stack"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[STACK_P:%.*]] = cir.get_member [[VARLIST]][0] {name = "stack"} : !cir.ptr -> !cir.ptr> // AFTER-NEXT: [[STACK_V:%.*]] = cir.load [[STACK_P]] : !cir.ptr>, !cir.ptr // AFTER-NEXT: [[EIGHT_IN_PTR_ARITH:%.*]] = cir.const #cir.int<8> : !u64i // AFTER-NEXT: [[TMP4:%.*]] = cir.cast(bitcast, [[STACK_V]] : !cir.ptr), !cir.ptr @@ -69,7 +69,7 @@ double f1(int n, ...) { // AFTER-NEXT: [[TMP0:%.*]] = cir.cast(bitcast, [[BLK_ARG]] : !cir.ptr), !cir.ptr // AFTER-NEXT: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr, !cir.double // AFTER: cir.store [[TMP1]], [[RESP]] : !cir.double, !cir.ptr -// AFTER: cir.va.end [[VARLIST]] : !cir.ptr +// AFTER: cir.va.end [[VARLIST]] : !cir.ptr // AFTER: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !cir.double // AFTER: cir.store [[RES]], [[RETP]] : !cir.double, !cir.ptr // AFTER: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !cir.double diff --git a/clang/test/CIR/CodeGen/var-arg-scope.c b/clang/test/CIR/CodeGen/var-arg-scope.c index 0829d0b8f81b..f5c3c65cd467 100644 --- a/clang/test/CIR/CodeGen/var-arg-scope.c +++ b/clang/test/CIR/CodeGen/var-arg-scope.c @@ -7,20 +7,20 @@ void f1(__builtin_va_list c) { { __builtin_va_arg(c, void *); } } -// BEFORE: cir.func @f1(%arg0: !ty_22__va_list22) attributes -// BEFORE: [[VAR_LIST:%.*]] = cir.alloca !ty_22__va_list22, !cir.ptr, ["c", init] {alignment = 8 : i64} -// BEFORE: cir.store %arg0, [[VAR_LIST]] : !ty_22__va_list22, !cir.ptr +// BEFORE: cir.func @f1(%arg0: !ty___va_list) attributes +// BEFORE: [[VAR_LIST:%.*]] = cir.alloca !ty___va_list, !cir.ptr, ["c", init] {alignment = 8 : i64} +// BEFORE: cir.store %arg0, [[VAR_LIST]] : !ty___va_list, !cir.ptr // BEFORE: cir.scope { -// BEFORE-NEXT: [[TMP:%.*]] = cir.va.arg [[VAR_LIST]] : (!cir.ptr) -> !cir.ptr +// BEFORE-NEXT: [[TMP:%.*]] = cir.va.arg [[VAR_LIST]] : (!cir.ptr) -> !cir.ptr // BEFORE-NEXT: } // BEFORE-NEXT: cir.return -// AFTER: cir.func @f1(%arg0: !ty_22__va_list22) attributes -// AFTER: [[VARLIST:%.*]] = cir.alloca !ty_22__va_list22, !cir.ptr, ["c", init] {alignment = 8 : i64} -// AFTER: cir.store %arg0, [[VARLIST]] : !ty_22__va_list22, !cir.ptr +// AFTER: cir.func @f1(%arg0: !ty___va_list) attributes +// AFTER: [[VARLIST:%.*]] = cir.alloca !ty___va_list, !cir.ptr, ["c", init] {alignment = 8 : i64} +// AFTER: cir.store %arg0, [[VARLIST]] : !ty___va_list, !cir.ptr // AFTER: cir.scope { // -// AFTER-NEXT: [[GR_OFFS_P:%.*]] = cir.get_member [[VARLIST]][3] {name = "gr_offs"} : !cir.ptr -> !cir.ptr +// AFTER-NEXT: [[GR_OFFS_P:%.*]] = cir.get_member [[VARLIST]][3] {name = "gr_offs"} : !cir.ptr -> !cir.ptr // AFTER-NEXT: [[GR_OFFS:%.*]] = cir.load [[GR_OFFS_P]] : !cir.ptr // AFTER: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i // AFTER: [[CMP0:%.*]] = cir.cmp(ge, [[GR_OFFS]], [[ZERO]]) : !s32i, !cir.bool @@ -36,7 +36,7 @@ void f1(__builtin_va_list c) { // arg is passed in register. // AFTER: [[BB_IN_REG]]: -// AFTER-NEXT: [[GR_TOP_P:%.*]] = cir.get_member [[VARLIST]][1] {name = "gr_top"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[GR_TOP_P:%.*]] = cir.get_member [[VARLIST]][1] {name = "gr_top"} : !cir.ptr -> !cir.ptr> // AFTER-NEXT: [[GR_TOP:%.*]] = cir.load [[GR_TOP_P]] : !cir.ptr>, !cir.ptr // AFTER-NEXT: [[TMP2:%.*]] = cir.cast(bitcast, [[GR_TOP]] : !cir.ptr), !cir.ptr // AFTER-NEXT: [[TMP3:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[GR_OFFS]] : !s32i), !cir.ptr @@ -45,7 +45,7 @@ void f1(__builtin_va_list c) { // arg is passed in stack. // AFTER: [[BB_ON_STACK]]: -// AFTER-NEXT: [[STACK_P:%.*]] = cir.get_member [[VARLIST]][0] {name = "stack"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[STACK_P:%.*]] = cir.get_member [[VARLIST]][0] {name = "stack"} : !cir.ptr -> !cir.ptr> // AFTER-NEXT: [[STACK_V:%.*]] = cir.load [[STACK_P]] : !cir.ptr>, !cir.ptr // AFTER-NEXT: [[EIGHT_IN_PTR_ARITH:%.*]] = cir.const #cir.int<8> : !u64i // AFTER-NEXT: [[TMP4:%.*]] = cir.cast(bitcast, [[STACK_V]] : !cir.ptr), !cir.ptr diff --git a/clang/test/CIR/CodeGen/var-arg.c b/clang/test/CIR/CodeGen/var-arg.c index 8352bc832dd5..bc3cd2f64e0c 100644 --- a/clang/test/CIR/CodeGen/var-arg.c +++ b/clang/test/CIR/CodeGen/var-arg.c @@ -13,25 +13,25 @@ int f1(int n, ...) { return res; } -// BEFORE: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} +// BEFORE: !ty___va_list = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} // BEFORE: cir.func @f1(%arg0: !s32i, ...) -> !s32i // BEFORE: [[RETP:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] // BEFORE: [[RESP:%.*]] = cir.alloca !s32i, !cir.ptr, ["res", init] -// BEFORE: cir.va.start [[VARLIST:%.*]] : !cir.ptr -// BEFORE: [[TMP0:%.*]] = cir.va.arg [[VARLIST]] : (!cir.ptr) -> !s32i +// BEFORE: cir.va.start [[VARLIST:%.*]] : !cir.ptr +// BEFORE: [[TMP0:%.*]] = cir.va.arg [[VARLIST]] : (!cir.ptr) -> !s32i // BEFORE: cir.store [[TMP0]], [[RESP]] : !s32i, !cir.ptr -// BEFORE: cir.va.end [[VARLIST]] : !cir.ptr +// BEFORE: cir.va.end [[VARLIST]] : !cir.ptr // BEFORE: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !s32i // BEFORE: cir.store [[RES]], [[RETP]] : !s32i, !cir.ptr // BEFORE: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !s32i // BEFORE: cir.return [[RETV]] : !s32i -// AFTER: !ty_22__va_list22 = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} +// AFTER: !ty___va_list = !cir.struct, !cir.ptr, !cir.ptr, !s32i, !s32i} // AFTER: cir.func @f1(%arg0: !s32i, ...) -> !s32i // AFTER: [[RETP:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] // AFTER: [[RESP:%.*]] = cir.alloca !s32i, !cir.ptr, ["res", init] -// AFTER: cir.va.start [[VARLIST:%.*]] : !cir.ptr -// AFTER: [[GR_OFFS_P:%.*]] = cir.get_member [[VARLIST]][3] {name = "gr_offs"} : !cir.ptr -> !cir.ptr +// AFTER: cir.va.start [[VARLIST:%.*]] : !cir.ptr +// AFTER: [[GR_OFFS_P:%.*]] = cir.get_member [[VARLIST]][3] {name = "gr_offs"} : !cir.ptr -> !cir.ptr // AFTER: [[GR_OFFS:%.*]] = cir.load [[GR_OFFS_P]] : !cir.ptr, !s32i // AFTER: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i // AFTER: [[CMP0:%.*]] = cir.cmp(ge, [[GR_OFFS]], [[ZERO]]) : !s32i, !cir.bool @@ -47,7 +47,7 @@ int f1(int n, ...) { // arg is passed in register. // AFTER: [[BB_IN_REG]]: -// AFTER-NEXT: [[GR_TOP_P:%.*]] = cir.get_member [[VARLIST]][1] {name = "gr_top"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[GR_TOP_P:%.*]] = cir.get_member [[VARLIST]][1] {name = "gr_top"} : !cir.ptr -> !cir.ptr> // AFTER-NEXT: [[GR_TOP:%.*]] = cir.load [[GR_TOP_P]] : !cir.ptr>, !cir.ptr // AFTER-NEXT: [[TMP2:%.*]] = cir.cast(bitcast, [[GR_TOP]] : !cir.ptr), !cir.ptr // AFTER-NEXT: [[TMP3:%.*]] = cir.ptr_stride([[TMP2]] : !cir.ptr, [[GR_OFFS]] : !s32i), !cir.ptr @@ -56,7 +56,7 @@ int f1(int n, ...) { // arg is passed in stack. // AFTER: [[BB_ON_STACK]]: -// AFTER-NEXT: [[STACK_P:%.*]] = cir.get_member [[VARLIST]][0] {name = "stack"} : !cir.ptr -> !cir.ptr> +// AFTER-NEXT: [[STACK_P:%.*]] = cir.get_member [[VARLIST]][0] {name = "stack"} : !cir.ptr -> !cir.ptr> // AFTER-NEXT: [[STACK_V:%.*]] = cir.load [[STACK_P]] : !cir.ptr>, !cir.ptr // AFTER-NEXT: [[EIGHT_IN_PTR_ARITH:%.*]] = cir.const #cir.int<8> : !u64i // AFTER-NEXT: [[TMP4:%.*]] = cir.cast(bitcast, [[STACK_V]] : !cir.ptr), !cir.ptr @@ -73,7 +73,7 @@ int f1(int n, ...) { // AFTER-NEXT: [[TMP0:%.*]] = cir.cast(bitcast, [[BLK_ARG]] : !cir.ptr), !cir.ptr // AFTER-NEXT: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr, !s32i // AFTER: cir.store [[TMP1]], [[RESP]] : !s32i, !cir.ptr -// AFTER: cir.va.end [[VARLIST]] : !cir.ptr +// AFTER: cir.va.end [[VARLIST]] : !cir.ptr // AFTER: [[RES:%.*]] = cir.load [[RESP]] : !cir.ptr, !s32i // AFTER: cir.store [[RES]], [[RETP]] : !s32i, !cir.ptr // AFTER: [[RETV:%.*]] = cir.load [[RETP]] : !cir.ptr, !s32i diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp index 2ff9dfceacae..ad99c6e4fe6a 100644 --- a/clang/test/CIR/CodeGen/vector.cpp +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -12,13 +12,13 @@ namespace std { } // namespace std // CHECK: cir.func linkonce_odr @_ZNSt6vectorIyE6resizeEm( -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !u64i, !cir.ptr, ["__sz", init] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !u64i, !cir.ptr, ["__cs", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK: cir.store %arg1, %1 : !u64i, !cir.ptr -// CHECK: %3 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK: %4 = cir.call @_ZNKSt6vectorIyE4sizeEv(%3) : (!cir.ptr) -> !u64i +// CHECK: %3 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: %4 = cir.call @_ZNKSt6vectorIyE4sizeEv(%3) : (!cir.ptr) -> !u64i // CHECK: cir.store %4, %2 : !u64i, !cir.ptr // CHECK: cir.scope { // CHECK: %5 = cir.load %2 : !cir.ptr, !u64i diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index ecb20f8d2301..e8c542f9a383 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -55,7 +55,7 @@ class B : public A // // CHECK: cir.func @_Z3foov() // CHECK: cir.scope { -// CHECK: %0 = cir.alloca !ty_22B22, !cir.ptr, ["agg.tmp.ensured"] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !ty_B, !cir.ptr, ["agg.tmp.ensured"] {alignment = 8 : i64} // CHECK: %1 = cir.const #cir.zero : ![[ClassB]] // CHECK: cir.store %1, %0 : ![[ClassB]], !cir.ptr // CHECK: cir.call @_ZN1BC2Ev(%0) : (!cir.ptr) -> () diff --git a/clang/test/CIR/IR/data-member-ptr.cir b/clang/test/CIR/IR/data-member-ptr.cir index a05193c21108..7078510bc594 100644 --- a/clang/test/CIR/IR/data-member-ptr.cir +++ b/clang/test/CIR/IR/data-member-ptr.cir @@ -1,17 +1,17 @@ // RUN: cir-opt %s | cir-opt | FileCheck %s !s32i = !cir.int -!ty_22Foo22 = !cir.struct +!ty_Foo = !cir.struct module { cir.func @null_member() { - %0 = cir.const #cir.data_member : !cir.data_member + %0 = cir.const #cir.data_member : !cir.data_member cir.return } - cir.func @get_runtime_member(%arg0: !cir.ptr) { - %0 = cir.const #cir.data_member<0> : !cir.data_member - %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr + cir.func @get_runtime_member(%arg0: !cir.ptr) { + %0 = cir.const #cir.data_member<0> : !cir.data_member + %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr cir.return } } @@ -19,13 +19,13 @@ module { // CHECK: module { // CHECK-NEXT: cir.func @null_member() { -// CHECK-NEXT: %0 = cir.const #cir.data_member : !cir.data_member +// CHECK-NEXT: %0 = cir.const #cir.data_member : !cir.data_member // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK-NEXT: cir.func @get_runtime_member(%arg0: !cir.ptr) { -// CHECK-NEXT: %0 = cir.const #cir.data_member<0> : !cir.data_member -// CHECK-NEXT: %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr +// CHECK-NEXT: cir.func @get_runtime_member(%arg0: !cir.ptr) { +// CHECK-NEXT: %0 = cir.const #cir.data_member<0> : !cir.data_member +// CHECK-NEXT: %1 = cir.get_runtime_member %arg0[%0 : !cir.data_member] : !cir.ptr -> !cir.ptr // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/IR/getmember.cir b/clang/test/CIR/IR/getmember.cir index 5bfd8f24d161..d9cecc0dea9c 100644 --- a/clang/test/CIR/IR/getmember.cir +++ b/clang/test/CIR/IR/getmember.cir @@ -4,21 +4,21 @@ !u16i = !cir.int !u32i = !cir.int -!ty_22Class22 = !cir.struct -!ty_22Incomplete22 = !cir.struct -!ty_22Struct22 = !cir.struct +!ty_Class = !cir.struct +!ty_Incomplete = !cir.struct +!ty_Struct = !cir.struct module { - cir.func @shouldGetStructMember(%arg0 : !cir.ptr) { - // CHECK: cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr - %0 = cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr + cir.func @shouldGetStructMember(%arg0 : !cir.ptr) { + // CHECK: cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr + %0 = cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr cir.return } // FIXME: remove bypass once codegen for CIR class records is patched. - cir.func @shouldBypassMemberTypeCheckForClassRecords(%arg0 : !cir.ptr) { - // CHECK: cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr> - %0 = cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr> + cir.func @shouldBypassMemberTypeCheckForClassRecords(%arg0 : !cir.ptr) { + // CHECK: cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr> + %0 = cir.get_member %arg0[1] {name = "test"} : !cir.ptr -> !cir.ptr> cir.return } } diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index ad8bba6a9410..cb75684886af 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -3,7 +3,7 @@ !s8i = !cir.int !s32i = !cir.int !s64i = !cir.int -!ty_22Init22 = !cir.struct +!ty_Init = !cir.struct module { cir.global external @a = #cir.int<3> : !s32i cir.global external @rgb = #cir.const_array<[#cir.int<0> : !s8i, #cir.int<-23> : !s8i, #cir.int<33> : !s8i] : !cir.array> @@ -32,15 +32,15 @@ module { #cir.global_view<@type_info_name_B> : !cir.ptr, #cir.global_view<@type_info_A> : !cir.ptr}> : !cir.struct, !cir.ptr, !cir.ptr}> - cir.func private @_ZN4InitC1Eb(!cir.ptr, !s8i) - cir.func private @_ZN4InitD1Ev(!cir.ptr) - cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { - %0 = cir.get_global @_ZL8__ioinit : !cir.ptr + cir.func private @_ZN4InitC1Eb(!cir.ptr, !s8i) + cir.func private @_ZN4InitD1Ev(!cir.ptr) + cir.global "private" internal @_ZL8__ioinit = ctor : !ty_Init { + %0 = cir.get_global @_ZL8__ioinit : !cir.ptr %1 = cir.const #cir.int<3> : !s8i - cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () + cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () } dtor { - %0 = cir.get_global @_ZL8__ioinit : !cir.ptr - cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () + %0 = cir.get_global @_ZL8__ioinit : !cir.ptr + cir.call @_ZN4InitD1Ev(%0) : (!cir.ptr) -> () } cir.func @f31() global_ctor { @@ -87,10 +87,10 @@ module { // CHECK: cir.func @use_global() // CHECK-NEXT: %0 = cir.get_global @a : !cir.ptr -// CHECK: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { -// CHECK-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr +// CHECK: cir.global "private" internal @_ZL8__ioinit = ctor : !ty_Init { +// CHECK-NEXT: %0 = cir.get_global @_ZL8__ioinit : !cir.ptr // CHECK-NEXT: %1 = cir.const #cir.int<3> : !s8i -// CHECK-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () +// CHECK-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !s8i) -> () // CHECK-NEXT: } // CHECK: cir.func @f31() global_ctor diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 2055dd89d230..6a7eadcf0338 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -736,9 +736,9 @@ module { // ----- !s8i = !cir.int -!ty_22Init22 = !cir.struct +!ty_Init = !cir.struct module { - cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { + cir.global "private" internal @_ZL8__ioinit = ctor : !ty_Init { } // expected-error@+1 {{custom op 'cir.global' ctor region must have exactly one block}} } @@ -746,12 +746,12 @@ module { // ----- !s8i = !cir.int #true = #cir.bool : !cir.bool -!ty_22Init22 = !cir.struct +!ty_Init = !cir.struct module { - cir.func private @_ZN4InitC1Eb(!cir.ptr) - cir.global "private" internal @_ZL8__ioinit = ctor : !ty_22Init22 { - %0 = cir.get_global @_ZL8__ioinit : !cir.ptr - cir.call @_ZN4InitC1Eb(%0) : (!cir.ptr) -> () + cir.func private @_ZN4InitC1Eb(!cir.ptr) + cir.global "private" internal @_ZL8__ioinit = ctor : !ty_Init { + %0 = cir.get_global @_ZL8__ioinit : !cir.ptr + cir.call @_ZN4InitC1Eb(%0) : (!cir.ptr) -> () } dtor {} // expected-error@+1 {{custom op 'cir.global' dtor region must have exactly one block}} } diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index abaaf8766e4b..a793e38b1a92 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -8,18 +8,18 @@ !ty_2222 = !cir.struct x 5>}> !ty_22221 = !cir.struct, !cir.ptr, !cir.ptr}> -!ty_22A22 = !cir.struct -!ty_22i22 = !cir.struct -!ty_22S22 = !cir.struct -!ty_22S122 = !cir.struct +!ty_A = !cir.struct +!ty_i = !cir.struct +!ty_S = !cir.struct +!ty_S1_ = !cir.struct // Test recursive struct parsing/printing. -!ty_22Node22 = !cir.struct>} #cir.record.decl.ast> +!ty_Node = !cir.struct>} #cir.record.decl.ast> // CHECK-DAG: !cir.struct>} #cir.record.decl.ast> module { // Dummy function to use types and force them to be printed. - cir.func @useTypes(%arg0: !ty_22Node22) { + cir.func @useTypes(%arg0: !ty_Node) { cir.return } @@ -30,12 +30,12 @@ module { } // CHECK: cir.func @structs() { -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] -// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["i", init] +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["i", init] cir.func @shouldSuccessfullyParseConstStructAttrs() { - %0 = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22S122 - // CHECK: cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_22S122 + %0 = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_S1_ + // CHECK: cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_S1_ cir.return } } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir b/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir index c1e985cd3e2c..66eb06629793 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir @@ -13,34 +13,34 @@ !ty_anon_struct2 = !cir.struct> x 4>}> !ty_anon_struct3 = !cir.struct> x 3>}> !ty_anon_struct4 = !cir.struct> x 4>, !cir.array> x 3>}> -!ty_22Father22 = !cir.struct ()>>>} #cir.record.decl.ast> -!ty_22Mother22 = !cir.struct ()>>>} #cir.record.decl.ast> -!ty_22Child22 = !cir.struct ()>>>} #cir.record.decl.ast>, !cir.struct ()>>>} #cir.record.decl.ast>} #cir.record.decl.ast> +!ty_Father = !cir.struct ()>>>} #cir.record.decl.ast> +!ty_Mother = !cir.struct ()>>>} #cir.record.decl.ast> +!ty_Child = !cir.struct ()>>>} #cir.record.decl.ast>, !cir.struct ()>>>} #cir.record.decl.ast>} #cir.record.decl.ast> module { - cir.func linkonce_odr @_ZN6Mother6simpleEv(%arg0: !cir.ptr) { - %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} - cir.store %arg0, %0 : !cir.ptr, !cir.ptr> - %1 = cir.load %0 : !cir.ptr>, !cir.ptr + cir.func linkonce_odr @_ZN6Mother6simpleEv(%arg0: !cir.ptr) { + %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} + cir.store %arg0, %0 : !cir.ptr, !cir.ptr> + %1 = cir.load %0 : !cir.ptr>, !cir.ptr cir.return } - cir.func private @_ZN5ChildC2Ev(%arg0: !cir.ptr) { cir.return } + cir.func private @_ZN5ChildC2Ev(%arg0: !cir.ptr) { cir.return } cir.global linkonce_odr @_ZTV6Mother = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI6Mother> : !cir.ptr, #cir.global_view<@_ZN6Mother9MotherFooEv> : !cir.ptr, #cir.global_view<@_ZN6Mother10MotherFoo2Ev> : !cir.ptr]> : !cir.array x 4>}> : !ty_anon_struct2 {alignment = 8 : i64} cir.global "private" external @_ZTVN10__cxxabiv117__class_type_infoE : !cir.ptr> cir.global linkonce_odr @_ZTS6Mother = #cir.const_array<"6Mother" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global constant external @_ZTI6Mother = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS6Mother> : !cir.ptr}> : !ty_anon_struct {alignment = 8 : i64} - cir.func linkonce_odr @_ZN6Mother9MotherFooEv(%arg0: !cir.ptr ) { cir.return } - cir.func linkonce_odr @_ZN6Mother10MotherFoo2Ev(%arg0: !cir.ptr ) { cir.return } + cir.func linkonce_odr @_ZN6Mother9MotherFooEv(%arg0: !cir.ptr ) { cir.return } + cir.func linkonce_odr @_ZN6Mother10MotherFoo2Ev(%arg0: !cir.ptr ) { cir.return } cir.global linkonce_odr @_ZTV6Father = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI6Father> : !cir.ptr, #cir.global_view<@_ZN6Father9FatherFooEv> : !cir.ptr]> : !cir.array x 3>}> : !ty_anon_struct3 {alignment = 8 : i64} - cir.func linkonce_odr @_ZN6FatherC2Ev(%arg0: !cir.ptr ) { cir.return } + cir.func linkonce_odr @_ZN6FatherC2Ev(%arg0: !cir.ptr ) { cir.return } cir.global linkonce_odr @_ZTV5Child = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI5Child> : !cir.ptr, #cir.global_view<@_ZN5Child9MotherFooEv> : !cir.ptr, #cir.global_view<@_ZN6Mother10MotherFoo2Ev> : !cir.ptr]> : !cir.array x 4>, #cir.const_array<[#cir.ptr<-8 : i64> : !cir.ptr, #cir.global_view<@_ZTI5Child> : !cir.ptr, #cir.global_view<@_ZN6Father9FatherFooEv> : !cir.ptr]> : !cir.array x 3>}> : !ty_anon_struct4 {alignment = 8 : i64} cir.global "private" external @_ZTVN10__cxxabiv121__vmi_class_type_infoE : !cir.ptr> cir.global linkonce_odr @_ZTS5Child = #cir.const_array<"5Child" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global linkonce_odr @_ZTS6Father = #cir.const_array<"6Father" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global constant external @_ZTI6Father = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv117__class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS6Father> : !cir.ptr}> : !ty_anon_struct {alignment = 8 : i64} cir.global constant external @_ZTI5Child = #cir.typeinfo<{#cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [2 : i32]> : !cir.ptr, #cir.global_view<@_ZTS5Child> : !cir.ptr, #cir.int<0> : !u32i, #cir.int<2> : !u32i, #cir.global_view<@_ZTI6Mother> : !cir.ptr, #cir.int<2> : !s64i, #cir.global_view<@_ZTI6Father> : !cir.ptr, #cir.int<2050> : !s64i}> : !ty_anon_struct1 {alignment = 8 : i64} - cir.func linkonce_odr @_ZN5Child9MotherFooEv(%arg0: !cir.ptr ) { cir.return } - cir.func linkonce_odr @_ZN6Father9FatherFooEv(%arg0: !cir.ptr ) { cir.return } + cir.func linkonce_odr @_ZN5Child9MotherFooEv(%arg0: !cir.ptr ) { cir.return } + cir.func linkonce_odr @_ZN6Father9FatherFooEv(%arg0: !cir.ptr ) { cir.return } } // MLIR: llvm.mlir.global linkonce_odr @_ZTV5Child() {addr_space = 0 : i32, alignment = 8 : i64} : !llvm.struct<(array<4 x ptr>, array<3 x ptr>)> { diff --git a/clang/test/CIR/Lowering/array.cir b/clang/test/CIR/Lowering/array.cir index 278225761d33..554a4a1fc18a 100644 --- a/clang/test/CIR/Lowering/array.cir +++ b/clang/test/CIR/Lowering/array.cir @@ -2,7 +2,7 @@ // RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM !s32i = !cir.int -!ty_22S22 = !cir.struct +!ty_S = !cir.struct module { cir.func @foo() { @@ -21,7 +21,7 @@ module { // LLVM: %1 = alloca [10 x i32], i64 1, align 16 // LLVM-NEXT: ret void - cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S22, #cir.zero : !ty_22S22]> : !cir.array + cir.global external @arr = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S, #cir.zero : !ty_S]> : !cir.array // CHECK: llvm.mlir.global external @arr() {addr_space = 0 : i32} : !llvm.array<2 x struct<"struct.S", (i32)>> { // CHECK: %0 = llvm.mlir.undef : !llvm.array<2 x struct<"struct.S", (i32)>> // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S", (i32)> diff --git a/clang/test/CIR/Lowering/class.cir b/clang/test/CIR/Lowering/class.cir index 03ef0568d1d1..dd028f4c3b7d 100644 --- a/clang/test/CIR/Lowering/class.cir +++ b/clang/test/CIR/Lowering/class.cir @@ -4,28 +4,28 @@ !s32i = !cir.int !u8i = !cir.int !u32i = !cir.int -!ty_22S22 = !cir.struct -!ty_22S2A22 = !cir.struct -!ty_22S122 = !cir.struct} #cir.record.decl.ast> -!ty_22S222 = !cir.struct -!ty_22S322 = !cir.struct +!ty_S = !cir.struct +!ty_S2A = !cir.struct +!ty_S1_ = !cir.struct} #cir.record.decl.ast> +!ty_S2_ = !cir.struct +!ty_S3_ = !cir.struct module { cir.func @test() { - %1 = cir.alloca !ty_22S22, !cir.ptr, ["x"] {alignment = 4 : i64} + %1 = cir.alloca !ty_S, !cir.ptr, ["x"] {alignment = 4 : i64} // CHECK: %[[#ARRSIZE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#CLASS:]] = llvm.alloca %[[#ARRSIZE]] x !llvm.struct<"class.S", (i8, i32)> - %3 = cir.get_member %1[0] {name = "c"} : !cir.ptr -> !cir.ptr + %3 = cir.get_member %1[0] {name = "c"} : !cir.ptr -> !cir.ptr // CHECK: = llvm.getelementptr %[[#CLASS]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"class.S", (i8, i32)> - %5 = cir.get_member %1[1] {name = "i"} : !cir.ptr -> !cir.ptr + %5 = cir.get_member %1[1] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK: = llvm.getelementptr %[[#CLASS]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"class.S", (i8, i32)> cir.return } cir.func @shouldConstInitLocalClassesWithConstStructAttr() { - %0 = cir.alloca !ty_22S2A22, !cir.ptr, ["s"] {alignment = 4 : i64} - %1 = cir.const #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22 - cir.store %1, %0 : !ty_22S2A22, !cir.ptr + %0 = cir.alloca !ty_S2A, !cir.ptr, ["s"] {alignment = 4 : i64} + %1 = cir.const #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S2A + cir.store %1, %0 : !ty_S2A, !cir.ptr cir.return } // CHECK: llvm.func @shouldConstInitLocalClassesWithConstStructAttr() @@ -39,7 +39,7 @@ module { // CHECK: } // Should lower basic #cir.const_struct initializer. - cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_22S122 + cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_S1_ // CHECK: llvm.mlir.global external @s1() {addr_space = 0 : i32} : !llvm.struct<"class.S1", (i32, f32, ptr)> { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"class.S1", (i32, f32, ptr)> // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 @@ -52,7 +52,7 @@ module { // CHECK: } // Should lower nested #cir.const_struct initializer. - cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22}> : !ty_22S222 + cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S2A}> : !ty_S2_ // CHECK: llvm.mlir.global external @s2() {addr_space = 0 : i32} : !llvm.struct<"class.S2", (struct<"class.S2A", (i32)>)> { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"class.S2", (struct<"class.S2A", (i32)>)> // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"class.S2A", (i32)> @@ -62,7 +62,7 @@ module { // CHECK: llvm.return %4 : !llvm.struct<"class.S2", (struct<"class.S2A", (i32)>)> // CHECK: } - cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22S322]> : !cir.array + cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S3_, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_S3_, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_S3_]> : !cir.array // CHECK: llvm.mlir.global external @s3() {addr_space = 0 : i32} : !llvm.array<3 x struct<"class.S3", (i32)>> { // CHECK: %0 = llvm.mlir.undef : !llvm.array<3 x struct<"class.S3", (i32)>> // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"class.S3", (i32)> @@ -82,13 +82,13 @@ module { cir.func @shouldLowerClassCopies() { // CHECK: llvm.func @shouldLowerClassCopies() - %1 = cir.alloca !ty_22S22, !cir.ptr, ["a"] {alignment = 4 : i64} + %1 = cir.alloca !ty_S, !cir.ptr, ["a"] {alignment = 4 : i64} // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#SA:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"class.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr - %2 = cir.alloca !ty_22S22, !cir.ptr, ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !ty_S, !cir.ptr, ["b", init] {alignment = 4 : i64} // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#SB:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"class.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr - cir.copy %1 to %2 : !cir.ptr + cir.copy %1 to %2 : !cir.ptr // CHECK: %[[#SIZE:]] = llvm.mlir.constant(8 : i32) : i32 // CHECK: "llvm.intr.memcpy"(%[[#SB]], %[[#SA]], %[[#SIZE]]) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () cir.return diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 764089c1d6cb..4bb234c56995 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -4,7 +4,7 @@ !s8i = !cir.int !s32i = !cir.int !s64i = !cir.int -!ty_22anon2E122 = !cir.struct, !cir.int} #cir.record.decl.ast> +!ty_anon2E1_ = !cir.struct, !cir.int} #cir.record.decl.ast> module { cir.func @testConstArrInit() { %0 = cir.const #cir.const_array<"string\00" : !cir.array> : !cir.array @@ -36,9 +36,9 @@ module { // CHECK: llvm.return cir.func @testConstArrayOfStructs() { - %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 4 : i64} - %1 = cir.const #cir.const_array<[#cir.const_struct<{#cir.int<0> : !s32i, #cir.int<1> : !s32i}> : !ty_22anon2E122]> : !cir.array - cir.store %1, %0 : !cir.array, !cir.ptr> + %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 4 : i64} + %1 = cir.const #cir.const_array<[#cir.const_struct<{#cir.int<0> : !s32i, #cir.int<1> : !s32i}> : !ty_anon2E1_]> : !cir.array + cir.store %1, %0 : !cir.array, !cir.ptr> cir.return } // CHECK: llvm.func @testConstArrayOfStructs() diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 164411ee899f..c3bd1cc3a726 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -11,11 +11,11 @@ !u32i = !cir.int !u64i = !cir.int !u8i = !cir.int -!ty_22A22 = !cir.struct x 2>} #cir.record.decl.ast> -!ty_22Bar22 = !cir.struct -!ty_22StringStruct22 = !cir.struct, !cir.array, !cir.array} #cir.record.decl.ast> -!ty_22StringStructPtr22 = !cir.struct} #cir.record.decl.ast> -!ty_22anon2E122 = !cir.struct)>>} #cir.record.decl.ast> +!ty_A = !cir.struct x 2>} #cir.record.decl.ast> +!ty_Bar = !cir.struct +!ty_StringStruct = !cir.struct, !cir.array, !cir.array} #cir.record.decl.ast> +!ty_StringStructPtr = !cir.struct} #cir.record.decl.ast> +!ty_anon2E1_ = !cir.struct)>>} #cir.record.decl.ast> module { cir.global external @a = #cir.int<3> : !s32i @@ -91,11 +91,11 @@ module { // The following tests check direclty the resulting LLVM IR because the MLIR // version is two long. Always prefer the MLIR prefix when possible. - cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_22A22 + cir.global external @nestedTwoDim = #cir.const_struct<{#cir.int<1> : !s32i, #cir.const_array<[#cir.const_array<[#cir.int<2> : !s32i, #cir.int<3> : !s32i]> : !cir.array, #cir.const_array<[#cir.int<4> : !s32i, #cir.int<5> : !s32i]> : !cir.array]> : !cir.array x 2>}> : !ty_A // LLVM: @nestedTwoDim = global %struct.A { i32 1, [2 x [2 x i32{{\]\] \[\[}}2 x i32] [i32 2, i32 3], [2 x i32] [i32 4, i32 5{{\]\]}} } - cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array}> : !ty_22StringStruct22 + cir.global external @nestedString = #cir.const_struct<{#cir.const_array<"1\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array, #cir.const_array<"\00\00\00" : !cir.array> : !cir.array}> : !ty_StringStruct // LLVM: @nestedString = global %struct.StringStruct { [3 x i8] c"1\00\00", [3 x i8] zeroinitializer, [3 x i8] zeroinitializer } - cir.global external @nestedStringPtr = #cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr}> : !ty_22StringStructPtr22 + cir.global external @nestedStringPtr = #cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr}> : !ty_StringStructPtr // LLVM: @nestedStringPtr = global %struct.StringStructPtr { ptr @.str } cir.func @_Z11get_globalsv() { @@ -142,7 +142,7 @@ module { // MLIR: %0 = llvm.mlir.zero : !llvm.ptr // MLIR: llvm.return %0 : !llvm.ptr // MLIR: } - cir.global external @zeroStruct = #cir.zero : !ty_22Bar22 + cir.global external @zeroStruct = #cir.zero : !ty_Bar // MLIR: llvm.mlir.global external @zeroStruct() // MLIR: %0 = llvm.mlir.zero : !llvm.struct<"struct.Bar", (i32, i8)> // MLIR: llvm.return %0 : !llvm.struct<"struct.Bar", (i32, i8)> @@ -150,7 +150,7 @@ module { cir.global common @comm = #cir.int<0> : !s32i // MLIR: llvm.mlir.global common @comm(0 : i32) {addr_space = 0 : i32} : i32 - cir.global "private" internal @Handlers = #cir.const_array<[#cir.const_struct<{#cir.global_view<@myfun> : !cir.ptr>}> : !ty_22anon2E122]> : !cir.array + cir.global "private" internal @Handlers = #cir.const_array<[#cir.const_struct<{#cir.global_view<@myfun> : !cir.ptr>}> : !ty_anon2E1_]> : !cir.array cir.func internal private @myfun(%arg0: !s32i) { %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, !cir.ptr @@ -161,11 +161,11 @@ module { %1 = cir.alloca !s32i, !cir.ptr, ["flag", init] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, !cir.ptr cir.store %arg1, %1 : !s32i, !cir.ptr - %2 = cir.get_global @Handlers : !cir.ptr> + %2 = cir.get_global @Handlers : !cir.ptr> %3 = cir.load %0 : !cir.ptr, !s32i - %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr - %5 = cir.ptr_stride(%4 : !cir.ptr, %3 : !s32i), !cir.ptr - %6 = cir.get_member %5[0] {name = "func"} : !cir.ptr -> !cir.ptr>> + %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + %5 = cir.ptr_stride(%4 : !cir.ptr, %3 : !s32i), !cir.ptr + %6 = cir.get_member %5[0] {name = "func"} : !cir.ptr -> !cir.ptr>> %7 = cir.load %6 : !cir.ptr>>, !cir.ptr> %8 = cir.load %1 : !cir.ptr, !s32i cir.call %7(%8) : (!cir.ptr>, !s32i) -> () diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index 7ae152e52713..a1a3d352c8a1 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -4,28 +4,28 @@ !s32i = !cir.int !u8i = !cir.int !u32i = !cir.int -!ty_22S22 = !cir.struct -!ty_22S2A22 = !cir.struct -!ty_22S122 = !cir.struct} #cir.record.decl.ast> -!ty_22S222 = !cir.struct -!ty_22S322 = !cir.struct +!ty_S = !cir.struct +!ty_S2A = !cir.struct +!ty_S1_ = !cir.struct} #cir.record.decl.ast> +!ty_S2_ = !cir.struct +!ty_S3_ = !cir.struct module { cir.func @test() { - %1 = cir.alloca !ty_22S22, !cir.ptr, ["x"] {alignment = 4 : i64} + %1 = cir.alloca !ty_S, !cir.ptr, ["x"] {alignment = 4 : i64} // CHECK: %[[#ARRSIZE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#STRUCT:]] = llvm.alloca %[[#ARRSIZE]] x !llvm.struct<"struct.S", (i8, i32)> - %3 = cir.get_member %1[0] {name = "c"} : !cir.ptr -> !cir.ptr + %3 = cir.get_member %1[0] {name = "c"} : !cir.ptr -> !cir.ptr // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.S", (i8, i32)> - %5 = cir.get_member %1[1] {name = "i"} : !cir.ptr -> !cir.ptr + %5 = cir.get_member %1[1] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK: = llvm.getelementptr %[[#STRUCT]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.S", (i8, i32)> cir.return } cir.func @shouldConstInitLocalStructsWithConstStructAttr() { - %0 = cir.alloca !ty_22S2A22, !cir.ptr, ["s"] {alignment = 4 : i64} - %1 = cir.const #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22 - cir.store %1, %0 : !ty_22S2A22, !cir.ptr + %0 = cir.alloca !ty_S2A, !cir.ptr, ["s"] {alignment = 4 : i64} + %1 = cir.const #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S2A + cir.store %1, %0 : !ty_S2A, !cir.ptr cir.return } // CHECK: llvm.func @shouldConstInitLocalStructsWithConstStructAttr() @@ -39,7 +39,7 @@ module { // CHECK: } // Should lower basic #cir.const_struct initializer. - cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_22S122 + cir.global external @s1 = #cir.const_struct<{#cir.int<1> : !s32i, #cir.fp<1.000000e-01> : !cir.float, #cir.ptr : !cir.ptr}> : !ty_S1_ // CHECK: llvm.mlir.global external @s1() {addr_space = 0 : i32} : !llvm.struct<"struct.S1", (i32, f32, ptr)> { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S1", (i32, f32, ptr)> // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 @@ -52,7 +52,7 @@ module { // CHECK: } // Should lower nested #cir.const_struct initializer. - cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S2A22}> : !ty_22S222 + cir.global external @s2 = #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S2A}> : !ty_S2_ // CHECK: llvm.mlir.global external @s2() {addr_space = 0 : i32} : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S2A", (i32)> @@ -62,7 +62,7 @@ module { // CHECK: llvm.return %4 : !llvm.struct<"struct.S2", (struct<"struct.S2A", (i32)>)> // CHECK: } - cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_22S322, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_22S322]> : !cir.array + cir.global external @s3 = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S3_, #cir.const_struct<{#cir.int<2> : !s32i}> : !ty_S3_, #cir.const_struct<{#cir.int<3> : !s32i}> : !ty_S3_]> : !cir.array // CHECK: llvm.mlir.global external @s3() {addr_space = 0 : i32} : !llvm.array<3 x struct<"struct.S3", (i32)>> { // CHECK: %0 = llvm.mlir.undef : !llvm.array<3 x struct<"struct.S3", (i32)>> // CHECK: %1 = llvm.mlir.undef : !llvm.struct<"struct.S3", (i32)> @@ -82,13 +82,13 @@ module { cir.func @shouldLowerStructCopies() { // CHECK: llvm.func @shouldLowerStructCopies() - %1 = cir.alloca !ty_22S22, !cir.ptr, ["a"] {alignment = 4 : i64} + %1 = cir.alloca !ty_S, !cir.ptr, ["a"] {alignment = 4 : i64} // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#SA:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"struct.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr - %2 = cir.alloca !ty_22S22, !cir.ptr, ["b", init] {alignment = 4 : i64} + %2 = cir.alloca !ty_S, !cir.ptr, ["b", init] {alignment = 4 : i64} // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[#SB:]] = llvm.alloca %[[#ONE]] x !llvm.struct<"struct.S", (i8, i32)> {alignment = 4 : i64} : (i64) -> !llvm.ptr - cir.copy %1 to %2 : !cir.ptr + cir.copy %1 to %2 : !cir.ptr // CHECK: %[[#SIZE:]] = llvm.mlir.constant(8 : i32) : i32 // CHECK: "llvm.intr.memcpy"(%[[#SB]], %[[#SA]], %[[#SIZE]]) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () cir.return diff --git a/clang/test/CIR/Lowering/unions.cir b/clang/test/CIR/Lowering/unions.cir index 6fbcd89b9a97..0cc9d1d15749 100644 --- a/clang/test/CIR/Lowering/unions.cir +++ b/clang/test/CIR/Lowering/unions.cir @@ -4,26 +4,26 @@ !s16i = !cir.int !s32i = !cir.int #true = #cir.bool : !cir.bool -!ty_22U122 = !cir.struct -!ty_22U222 = !cir.struct -!ty_22U322 = !cir.struct +!ty_U1_ = !cir.struct +!ty_U2_ = !cir.struct +!ty_U3_ = !cir.struct module { // Should lower union to struct with only the largest member. - cir.global external @u1 = #cir.zero : !ty_22U122 + cir.global external @u1 = #cir.zero : !ty_U1_ // CHECK: llvm.mlir.global external @u1() {addr_space = 0 : i32} : !llvm.struct<"union.U1", (i32)> // Should recursively find the largest member if there are nested unions. - cir.global external @u2 = #cir.zero : !ty_22U222 - cir.global external @u3 = #cir.zero : !ty_22U322 + cir.global external @u2 = #cir.zero : !ty_U2_ + cir.global external @u3 = #cir.zero : !ty_U3_ // CHECK: llvm.mlir.global external @u2() {addr_space = 0 : i32} : !llvm.struct<"union.U2", (f64)> // CHECK: llvm.mlir.global external @u3() {addr_space = 0 : i32} : !llvm.struct<"union.U3", (i32)> // CHECK: llvm.func @test - cir.func @test(%arg0: !cir.ptr) { + cir.func @test(%arg0: !cir.ptr) { // Should store directly to the union's base address. %5 = cir.const #true - %6 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr + %6 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr cir.store %5, %6 : !cir.bool, !cir.ptr // CHECK: %[[#VAL:]] = llvm.mlir.constant(1 : i8) : i8 // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. @@ -31,7 +31,7 @@ module { // CHECK: llvm.store %[[#VAL]], %[[#ADDR]] {{.*}}: i8, !llvm.ptr // Should load direclty from the union's base address. - %7 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr + %7 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr %8 = cir.load %7 : !cir.ptr, !cir.bool // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. // CHECK: %[[#BASE:]] = llvm.bitcast %{{.+}} : !llvm.ptr diff --git a/clang/test/CIR/Lowering/variadics.cir b/clang/test/CIR/Lowering/variadics.cir index b4a5a30c5e82..dfbfbf66e56f 100644 --- a/clang/test/CIR/Lowering/variadics.cir +++ b/clang/test/CIR/Lowering/variadics.cir @@ -5,30 +5,30 @@ !u32i = !cir.int !u8i = !cir.int -!ty_22__va_list_tag22 = !cir.struct, !cir.ptr} #cir.record.decl.ast> +!ty___va_list_tag = !cir.struct, !cir.ptr} #cir.record.decl.ast> module { cir.func @average(%arg0: !s32i, ...) -> !s32i { %0 = cir.alloca !s32i, !cir.ptr, ["count", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} - %2 = cir.alloca !cir.array, !cir.ptr>, ["args"] {alignment = 16 : i64} - %3 = cir.alloca !cir.array, !cir.ptr>, ["args_copy"] {alignment = 16 : i64} + %2 = cir.alloca !cir.array, !cir.ptr>, ["args"] {alignment = 16 : i64} + %3 = cir.alloca !cir.array, !cir.ptr>, ["args_copy"] {alignment = 16 : i64} cir.store %arg0, %0 : !s32i, !cir.ptr - %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr - cir.va.start %4 : !cir.ptr + %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.start %4 : !cir.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: llvm.intr.vastart %{{[0-9]+}} : !llvm.ptr - %5 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr - %6 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr - cir.va.copy %6 to %5 : !cir.ptr, !cir.ptr + %5 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr + %6 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.copy %6 to %5 : !cir.ptr, !cir.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> // MLIR-NEXT: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: llvm.intr.vacopy %13 to %{{[0-9]+}} : !llvm.ptr, !llvm.ptr - %7 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr - cir.va.end %7 : !cir.ptr + %7 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr + cir.va.end %7 : !cir.ptr // MLIR: %{{[0-9]+}} = llvm.getelementptr %{{[0-9]+}}[0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.__va_list_tag", (i32, i32, ptr, ptr)> // MLIR-NEXT: %{{[0-9]+}} = llvm.bitcast %{{[0-9]+}} : !llvm.ptr to !llvm.ptr // MLIR-NEXT: llvm.intr.vaend %{{[0-9]+}} : !llvm.ptr diff --git a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp index 48345dfc7c0d..3789550ce33b 100644 --- a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp @@ -100,19 +100,19 @@ struct S1 { /// Cast arguments to the expected type. // CHECK: cir.func @_Z2s12S1(%arg0: !u64i loc({{.+}})) -> !u64i -// CHECK: %[[#V0:]] = cir.alloca !ty_22S122, !cir.ptr -// CHECK: %[[#V1:]] = cir.cast(bitcast, %arg0 : !u64i), !ty_22S122 -// CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_22S122, !cir.ptr +// CHECK: %[[#V0:]] = cir.alloca !ty_S1_, !cir.ptr +// CHECK: %[[#V1:]] = cir.cast(bitcast, %arg0 : !u64i), !ty_S1_ +// CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_S1_, !cir.ptr S1 s1(S1 arg) { /// Cast argument and result of the function call to the expected types. - // CHECK: %[[#V9:]] = cir.cast(bitcast, %{{.+}} : !ty_22S122), !u64i + // CHECK: %[[#V9:]] = cir.cast(bitcast, %{{.+}} : !ty_S1_), !u64i // CHECK: %[[#V10:]] = cir.call @_Z2s12S1(%[[#V9]]) : (!u64i) -> !u64i - // CHECK: %[[#V11:]] = cir.cast(bitcast, %[[#V10]] : !u64i), !ty_22S122 + // CHECK: %[[#V11:]] = cir.cast(bitcast, %[[#V10]] : !u64i), !ty_S1_ s1({1, 2}); - // CHECK: %[[#V12:]] = cir.load %{{.+}} : !cir.ptr, !ty_22S122 - // CHECK: %[[#V13:]] = cir.cast(bitcast, %[[#V12]] : !ty_22S122), !u64i + // CHECK: %[[#V12:]] = cir.load %{{.+}} : !cir.ptr, !ty_S1_ + // CHECK: %[[#V13:]] = cir.cast(bitcast, %[[#V12]] : !ty_S1_), !u64i // CHECK: cir.return %[[#V13]] : !u64i return {1, 2}; } From 2f0622d1fa741240bc9f3ea465971b7cd0798cd8 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Tue, 10 Sep 2024 23:56:49 +0300 Subject: [PATCH 1813/2301] [CIR][Lowering] Fix BrCond Lowering (#819) This PR fixes the lowering for BrCond. Consider the following code snippet: ``` #include bool test() { bool x = false; if (x) return x; return x; } ``` Emitting the CIR to `tmp.cir` using `-fclangir-mem2reg` produces the following CIR (truncated): ``` !s32i = !cir.int #fn_attr = #cir, nothrow = #cir.nothrow, optnone = #cir.optnone})> module { cir.func no_proto @test() -> !cir.bool extra(#fn_attr) { %0 = cir.const #cir.int<0> : !s32i %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool cir.br ^bb1 ^bb1: // pred: ^bb0 cir.brcond %1 ^bb2, ^bb3 ^bb2: // pred: ^bb1 cir.return %1 : !cir.bool ^bb3: // pred: ^bb1 cir.br ^bb4 ^bb4: // pred: ^bb3 cir.return %1 : !cir.bool } } ``` Lowering the CIR to LLVM using `cir-opt tmp.cir -cir-to-llvm` fails with: ``` tmp.cir:5:10: error: failed to legalize operation 'llvm.zext' marked as erased ``` The CIR cast `%1 = cir.cast(int_to_bool, %0 : !s32i)` is lowered to a CIR comparison with zero, which is then lowered to an `LLVM::ICmpOp` and `LLVM::ZExtOp`. In the BrCond lowering, the zext is deleted when `zext->use_empty()`, but during this phase the lowering for the CIR above is not complete yet, because the zext will still have usage(s) later. The current check for when the zext is deleted is error-prone and can be improved. To fix this, in addition to checking that the use of the zext is empty, an additional check that the defining operation for the BrCond in the CIR (the cast operation in this case) is used exactly once is added. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 8 +++- clang/test/CIR/Lowering/brcond.cir | 43 +++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/brcond.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ba9ac849c256..f96ddd1f7ba5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -612,12 +612,18 @@ class CIRBrCondOpLowering mlir::ConversionPatternRewriter &rewriter) const override { mlir::Value i1Condition; + auto hasOneUse = false; + + if (auto defOp = brOp.getCond().getDefiningOp()) + hasOneUse = defOp->getResult(0).hasOneUse(); + if (auto defOp = adaptor.getCond().getDefiningOp()) { if (auto zext = dyn_cast(defOp)) { if (zext->use_empty() && zext->getOperand(0).getType() == rewriter.getI1Type()) { i1Condition = zext->getOperand(0); - rewriter.eraseOp(zext); + if (hasOneUse) + rewriter.eraseOp(zext); } } } diff --git a/clang/test/CIR/Lowering/brcond.cir b/clang/test/CIR/Lowering/brcond.cir new file mode 100644 index 000000000000..9586f70cf727 --- /dev/null +++ b/clang/test/CIR/Lowering/brcond.cir @@ -0,0 +1,43 @@ +// RUN: cir-opt %s -cir-to-llvm | FileCheck %s -check-prefix=MLIR +// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +#fn_attr = #cir, nothrow = #cir.nothrow, optnone = #cir.optnone})> +module { cir.func no_proto @test() -> !cir.bool extra(#fn_attr) { + %0 = cir.const #cir.int<0> : !s32i + %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool + cir.br ^bb1 + ^bb1: + cir.brcond %1 ^bb2, ^bb3 + ^bb2: + cir.return %1 : !cir.bool + ^bb3: + cir.br ^bb4 + ^bb4: + cir.return %1 : !cir.bool + } +} + +// MLIR: {{.*}} = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: {{.*}} = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: {{.*}} = llvm.icmp "ne" {{.*}}, {{.*}} : i32 +// MLIR-NEXT: {{.*}} = llvm.zext {{.*}} : i1 to i8 +// MLIR-NEXT: llvm.br ^bb1 +// MLIR-NEXT: ^bb1: +// MLIR-NEXT: llvm.cond_br {{.*}}, ^bb2, ^bb3 +// MLIR-NEXT: ^bb2: +// MLIR-NEXT: llvm.return {{.*}} : i8 +// MLIR-NEXT: ^bb3: +// MLIR-NEXT: llvm.br ^bb4 +// MLIR-NEXT: ^bb4: +// MLIR-NEXT: llvm.return {{.*}} : i8 + +// LLVM: br label {{.*}} +// LLVM: 1: +// LLVM: br i1 false, label {{.*}}, label {{.*}} +// LLVM: 2: +// LLVM: ret i8 0 +// LLVM: 3: +// LLVM: br label {{.*}} +// LLVM: 4: +// LLVM: ret i8 0 From 562dce9e3f4fc09ca2d0076ad9e8a3c256d9cb56 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 10 Sep 2024 14:23:14 -0700 Subject: [PATCH 1814/2301] [CIR] Add some extra dumping to help with intermitent bug We haven't been able to find the root cause of https://github.com/llvm/clangir/issues/829 just yet, the problem does also not show up under a ASANified build. Add some extra information before we crash, hopefully that might shed some light. --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index a83723498385..41c69ce7df6b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -839,6 +839,7 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl() { if (Personality.usesFuncletPads()) { // We don't need separate landing pads in the funclet model. + llvm::errs() << "PersonalityFn: " << Personality.PersonalityFn << "\n"; llvm_unreachable("NYI"); } else { mlir::cir::TryOp tryOp = nullptr; From 8412afe49a05f4d808f3f7be2cf7abe2d5744912 Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Tue, 10 Sep 2024 16:03:50 -0700 Subject: [PATCH 1815/2301] [CIR][driver] Forward -fno-clangir-direct-lowering option to cc1 (#822) Allow from the clang driver the use of lowering from CIR to MLIR standard dialect. Update the test to match the real output when `-fno-clangir-direct-lowering` is used, or with a combination of both `-fclangir-direct-lowering` and `-fno-clangir-direct-lowering`. --------- Co-authored-by: Bruno Cardoso Lopes Co-authored-by: Shoaib Meenai --- clang/lib/Driver/ToolChains/Clang.cpp | 6 +++--- clang/test/CIR/driver.c | 15 ++++++++++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 642e1da2d95a..94fd177b5f7f 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5246,15 +5246,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.hasArg(options::OPT_emit_cir_flat)) CmdArgs.push_back("-fclangir"); - if (Args.hasArg(options::OPT_fclangir_direct_lowering)) - CmdArgs.push_back("-fclangir-direct-lowering"); + Args.addOptOutFlag(CmdArgs, options::OPT_fclangir_direct_lowering, + options::OPT_fno_clangir_direct_lowering); if (Args.hasArg(options::OPT_clangir_disable_passes)) CmdArgs.push_back("-clangir-disable-passes"); if (Args.hasArg(options::OPT_fclangir_call_conv_lowering)) CmdArgs.push_back("-fclangir-call-conv-lowering"); - + if (Args.hasArg(options::OPT_fclangir_mem2reg)) CmdArgs.push_back("-fclangir-mem2reg"); diff --git a/clang/test/CIR/driver.c b/clang/test/CIR/driver.c index bd1d13d0dba9..fcafb71a0a4a 100644 --- a/clang/test/CIR/driver.c +++ b/clang/test/CIR/driver.c @@ -5,13 +5,18 @@ // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fclangir-direct-lowering -S -emit-llvm %s -o %t1.ll // RUN: FileCheck --input-file=%t1.ll %s -check-prefix=LLVM // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -S -emit-llvm %s -o %t2.ll -// RUN: FileCheck --input-file=%t2.ll %s -check-prefix=LLVM -// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fclangir-direct-lowering -c -emit-llvm %s -o %t1.bc +// RUN: FileCheck --input-file=%t2.ll %s -check-prefix=CIR_STD_LLVM +// Test also the cases for both -fclangir-direct-lowering and -fno-clangir-direct-lowering, +// with -fno-clangir-direct-lowering having the preference +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fclangir-direct-lowering -fno-clangir-direct-lowering -S -emit-llvm %s -o %t2.ll +// RUN: FileCheck --input-file=%t2.ll %s -check-prefix=CIR_STD_LLVM +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -fclangir-direct-lowering -c -emit-llvm %s -o %t1.bc +// RUN: FileCheck --input-file=%t2.ll %s -check-prefix=CIR_STD_LLVM // RUN: llvm-dis %t1.bc -o %t1.bc.ll // RUN: FileCheck --input-file=%t1.bc.ll %s -check-prefix=LLVM // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -c -emit-llvm %s -o %t2.bc // RUN: llvm-dis %t2.bc -o %t2.bc.ll -// RUN: FileCheck --input-file=%t2.bc.ll %s -check-prefix=LLVM +// RUN: FileCheck --input-file=%t2.bc.ll %s -check-prefix=CIR_STD_LLVM // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -c %s -o %t.o // RUN: llvm-objdump -d %t.o | FileCheck %s -check-prefix=OBJ // RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -clangir-disable-passes -S -Xclang -emit-cir %s -o %t.cir @@ -39,6 +44,10 @@ void foo(void) {} // LLVM-NEXT: ret void // LLVM-NEXT: } +// CIR_STD_LLVM: define void @foo() +// CIR_STD_LLVM-NEXT: ret void +// CIR_STD_LLVM-NEXT: } + // LLVM_MACOS: define void @foo() // LLVM_MACOS-NEXT: ret void // LLVM_MACOS-NEXT: } From 76978abecf4d5944e9b0b72181268ca1de9eed6b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 10 Sep 2024 17:03:01 -0700 Subject: [PATCH 1816/2301] [CIR][CIRGen] Exceptions: handle synthetic cir.try within functions We can now get the cleanup right for other potential throwing ctors, still missing LLVM lowering support. --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 7 +++ clang/lib/CIR/CodeGen/CIRGenException.cpp | 52 +++++++++++++--------- clang/test/CIR/CodeGen/paren-list-init.cpp | 33 ++++++++++++++ 3 files changed, 70 insertions(+), 22 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index c2feb6ef3923..0296a8cf82e5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -290,8 +290,15 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { [&](mlir::cir::TryOp op) { trys.push_back(op); }); assert(trys.size() == 1 && "unknow global initialization style"); tryOp = trys[0]; + } else { + SmallVector trys; + auto funcOp = dyn_cast(CurFn); + funcOp.walk([&](mlir::cir::TryOp op) { trys.push_back(op); }); + assert(trys.size() == 1 && "nested or multiple try/catch NYI"); + tryOp = trys[0]; } + assert(tryOp && "expected available cir.try"); auto *NextAction = getEHDispatchBlock(EHParent, tryOp); (void)NextAction; diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 41c69ce7df6b..6ded1418b382 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -837,35 +837,43 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl() { // if (!CurFn->hasPersonalityFn()) // CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality)); + auto createSurroundingTryOp = [&]() { + // In OG, we build the landing pad for this scope. In CIR, we emit a + // synthetic cir.try because this didn't come from codegenerating from a + // try/catch in C++. + auto tryOp = builder.create( + *currSrcLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) {}, + // Don't emit the code right away for catch clauses, for + // now create the regions and consume the try scope result. + // Note that clauses are later populated in + // CIRGenFunction::buildLandingPad. + [&](mlir::OpBuilder &b, mlir::Location loc, + mlir::OperationState &result) { + // Since this didn't come from an explicit try, we only need one + // handler: unwind. + auto *r = result.addRegion(); + builder.createBlock(r); + }); + tryOp.setSynthetic(true); + return tryOp; + }; + if (Personality.usesFuncletPads()) { // We don't need separate landing pads in the funclet model. llvm::errs() << "PersonalityFn: " << Personality.PersonalityFn << "\n"; llvm_unreachable("NYI"); } else { mlir::cir::TryOp tryOp = nullptr; - if (!currLexScope) { - // In OG, we build the landing pad for this scope. In CIR, we emit a - // synthetic cir.try because this didn't come from codegenerating from a - // try/catch in C++. - tryOp = builder.create( - *currSrcLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) {}, - // Don't emit the code right away for catch clauses, for - // now create the regions and consume the try scope result. - // Note that clauses are later populated in - // CIRGenFunction::buildLandingPad. - [&](mlir::OpBuilder &b, mlir::Location loc, - mlir::OperationState &result) { - // Since this didn't come from an explicit try, we only need one - // handler: unwind. - auto *r = result.addRegion(); - builder.createBlock(r); - }); - tryOp.setSynthetic(true); - } else { + // Attempt to find a suitable existing parent try/catch, if none + // is available, create a synthetic cir.try in order to wrap the side + // effects of a potential throw. + if (currLexScope) tryOp = currLexScope->getClosestTryParent(); - assert(tryOp && "cir.try expected"); - } + if (!tryOp) + tryOp = createSurroundingTryOp(); + + assert(tryOp && "cir.try expected"); LP = buildLandingPad(tryOp); } diff --git a/clang/test/CIR/CodeGen/paren-list-init.cpp b/clang/test/CIR/CodeGen/paren-list-init.cpp index 119717046570..45d1dae01847 100644 --- a/clang/test/CIR/CodeGen/paren-list-init.cpp +++ b/clang/test/CIR/CodeGen/paren-list-init.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -Wno-unused-value -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++20 -fexceptions -fcxx-exceptions -triple aarch64-none-linux-android21 -Wno-unused-value -fclangir -emit-cir %s -o %t.eh.cir +// RUN: FileCheck --check-prefix=CIR_EH --input-file=%t.eh.cir %s struct Vec { Vec(); @@ -14,6 +16,9 @@ struct S1 { // CIR-DAG: ![[VecType:.*]] = !cir.struct // CIR-DAG: ![[S1:.*]] = !cir.struct +// CIR_EH-DAG: ![[VecType:.*]] = !cir.struct +// CIR_EH-DAG: ![[S1:.*]] = !cir.struct + template void make1() { Vec v; @@ -29,6 +34,34 @@ void make1() { // CIR: } // CIR: cir.call @_ZN3VecD1Ev(%[[VEC]]) : (!cir.ptr) -> () // CIR: cir.return + +// CIR_EH: cir.func linkonce_odr @_Z5make1ILi0EEvv() +// CIR_EH: %[[VEC:.*]] = cir.alloca ![[VecType]], !cir.ptr, ["v", init] + +// Construct v +// CIR_EH: cir.call @_ZN3VecC1Ev(%[[VEC]]) : (!cir.ptr) -> () +// CIR_EH: cir.scope { +// CIR_EH: %1 = cir.alloca ![[S1]], !cir.ptr, ["agg.tmp.ensured"] +// CIR_EH: %2 = cir.get_member %1[0] {name = "v"} : !cir.ptr -> !cir.ptr +// CIR_EH: cir.try synthetic { + +// Call v move ctor +// CIR_EH: cir.call exception @_ZN3VecC1EOS_(%2, %[[VEC]]) : (!cir.ptr, !cir.ptr) -> () +// CIR_EH: cir.yield +// CIR_EH: } cleanup { + +// Destroy v after v move ctor throws +// CIR_EH: cir.call @_ZN3VecD1Ev(%[[VEC]]) : (!cir.ptr) -> () +// CIR_EH: cir.yield +// CIR_EH: } catch [#cir.unwind { +// CIR_EH: cir.resume +// CIR_EH: }] +// CIR_EH: cir.call @_ZN2S1D1Ev(%1) : (!cir.ptr) -> () +// CIR_EH: } + +// Destroy v after successful cir.try +// CIR_EH: cir.call @_ZN3VecD1Ev(%[[VEC]]) : (!cir.ptr) -> () +// CIR_EH: cir.return } void foo() { From 1742e7b713659e10cea51f6c287e9396c6896380 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 12 Sep 2024 00:47:45 +0800 Subject: [PATCH 1817/2301] [CIR][Transform] Add ternary simplification (#809) This PR adds a new transformation that transform suitable ternary operations into select operations. Currently the "suitable" ternary operations are those ternary operations whose both branches satisfy either one of the following criteria: - The branch only contain a single `cir.yield` operation; - The branch contains a `cir.const` followed by a `cir.yield` that yields the constant value produced by the `cir.const`. - ~~The branch contains a `cir.load` followed by a `cir.yield` that yields the value loaded by the `cir.load`. The load operation cannot be volatile and must load from an alloca.~~ These criteria are hardcoded now so that simple C/C++ ternary expressions could be eventually lowered to a `cir.select` operation instead. --- .../CIR/Dialect/Transforms/CIRSimplify.cpp | 89 ++++++++++++++++++- clang/test/CIR/CodeGen/binop.cpp | 27 ++---- clang/test/CIR/CodeGen/ternary.cpp | 16 ++-- clang/test/CIR/Transforms/ternary-fold.cir | 60 +++++++++++++ clang/test/CIR/Transforms/ternary-fold.cpp | 56 ++++++++++++ 5 files changed, 214 insertions(+), 34 deletions(-) create mode 100644 clang/test/CIR/Transforms/ternary-fold.cir create mode 100644 clang/test/CIR/Transforms/ternary-fold.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp index 7fd04f761f43..3aece6d74585 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp @@ -8,11 +8,15 @@ #include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Block.h" +#include "mlir/IR/Operation.h" #include "mlir/IR/PatternMatch.h" +#include "mlir/IR/Region.h" #include "mlir/Support/LogicalResult.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "llvm/ADT/SmallVector.h" using namespace mlir; using namespace cir; @@ -107,6 +111,85 @@ struct RemoveTrivialTry : public OpRewritePattern { } }; +/// Simplify suitable ternary operations into select operations. +/// +/// For now we only simplify those ternary operations whose true and false +/// branches directly yield a value or a constant. That is, both of the true and +/// the false branch must either contain a cir.yield operation as the only +/// operation in the branch, or contain a cir.const operation followed by a +/// cir.yield operation that yields the constant value. +/// +/// For example, we will simplify the following ternary operation: +/// +/// %0 = cir.ternary (%condition, true { +/// %1 = cir.const ... +/// cir.yield %1 +/// } false { +/// cir.yield %2 +/// }) +/// +/// into the following sequence of operations: +/// +/// %1 = cir.const ... +/// %0 = cir.select if %condition then %1 else %2 +struct SimplifyTernary final : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(TernaryOp op, + PatternRewriter &rewriter) const override { + if (op->getNumResults() != 1) + return mlir::failure(); + + if (!isSimpleTernaryBranch(op.getTrueRegion()) || + !isSimpleTernaryBranch(op.getFalseRegion())) + return mlir::failure(); + + mlir::cir::YieldOp trueBranchYieldOp = mlir::cast( + op.getTrueRegion().front().getTerminator()); + mlir::cir::YieldOp falseBranchYieldOp = mlir::cast( + op.getFalseRegion().front().getTerminator()); + auto trueValue = trueBranchYieldOp.getArgs()[0]; + auto falseValue = falseBranchYieldOp.getArgs()[0]; + + rewriter.inlineBlockBefore(&op.getTrueRegion().front(), op); + rewriter.inlineBlockBefore(&op.getFalseRegion().front(), op); + rewriter.eraseOp(trueBranchYieldOp); + rewriter.eraseOp(falseBranchYieldOp); + rewriter.replaceOpWithNewOp(op, op.getCond(), + trueValue, falseValue); + + return mlir::success(); + } + +private: + bool isSimpleTernaryBranch(mlir::Region ®ion) const { + if (!region.hasOneBlock()) + return false; + + mlir::Block &onlyBlock = region.front(); + auto &ops = onlyBlock.getOperations(); + + // The region/block could only contain at most 2 operations. + if (ops.size() > 2) + return false; + + if (ops.size() == 1) { + // The region/block only contain a cir.yield operation. + return true; + } + + // Check whether the region/block contains a cir.const followed by a + // cir.yield that yields the value. + auto yieldOp = mlir::cast(onlyBlock.getTerminator()); + auto yieldValueDefOp = mlir::dyn_cast_if_present( + yieldOp.getArgs()[0].getDefiningOp()); + if (!yieldValueDefOp || yieldValueDefOp->getBlock() != &onlyBlock) + return false; + + return true; + } +}; + struct SimplifySelect : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; @@ -171,6 +254,7 @@ void populateMergeCleanupPatterns(RewritePatternSet &patterns) { RemoveEmptyScope, RemoveEmptySwitch, RemoveTrivialTry, + SimplifyTernary, SimplifySelect >(patterns.getContext()); // clang-format on @@ -186,8 +270,9 @@ void CIRSimplifyPass::runOnOperation() { getOperation()->walk([&](Operation *op) { // CastOp here is to perform a manual `fold` in // applyOpPatternsAndFold - if (isa(op)) + if (isa( + op)) ops.push_back(op); }); diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index 29f6e89282b0..045e78ccf021 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -32,13 +32,7 @@ void b1(bool a, bool b) { // CHECK: cir.ternary(%3, true // CHECK-NEXT: %7 = cir.load %1 -// CHECK-NEXT: cir.ternary(%7, true -// CHECK-NEXT: cir.const #true -// CHECK-NEXT: cir.yield -// CHECK-NEXT: false { -// CHECK-NEXT: cir.const #false -// CHECK-NEXT: cir.yield -// CHECK: cir.yield +// CHECK-NEXT: cir.yield %7 // CHECK-NEXT: false { // CHECK-NEXT: cir.const #false // CHECK-NEXT: cir.yield @@ -48,11 +42,6 @@ void b1(bool a, bool b) { // CHECK-NEXT: cir.yield // CHECK-NEXT: false { // CHECK-NEXT: %7 = cir.load %1 -// CHECK-NEXT: cir.ternary(%7, true -// CHECK-NEXT: cir.const #true -// CHECK-NEXT: cir.yield -// CHECK-NEXT: false { -// CHECK-NEXT: cir.const #false // CHECK-NEXT: cir.yield void b2(bool a) { @@ -90,16 +79,10 @@ void b3(int a, int b, int c, int d) { // CHECK-NEXT: %13 = cir.load %2 // CHECK-NEXT: %14 = cir.load %3 // CHECK-NEXT: %15 = cir.cmp(eq, %13, %14) -// CHECK-NEXT: cir.ternary(%15, true -// CHECK: %9 = cir.load %0 -// CHECK-NEXT: %10 = cir.load %1 -// CHECK-NEXT: %11 = cir.cmp(eq, %9, %10) -// CHECK-NEXT: %12 = cir.ternary(%11, true { -// CHECK: }, false { -// CHECK-NEXT: %13 = cir.load %2 -// CHECK-NEXT: %14 = cir.load %3 -// CHECK-NEXT: %15 = cir.cmp(eq, %13, %14) -// CHECK-NEXT: %16 = cir.ternary(%15, true +// CHECK-NEXT: cir.yield %15 +// CHECK-NEXT: }, false { +// CHECK-NEXT: %13 = cir.const #false +// CHECK-NEXT: cir.yield %13 void testFloatingPointBinOps(float a, float b) { a * b; diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp index 6475add8e2b4..5c17ef5d1a74 100644 --- a/clang/test/CIR/CodeGen/ternary.cpp +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -12,16 +12,12 @@ int x(int y) { // CHECK: %2 = cir.load %0 : !cir.ptr, !s32i // CHECK: %3 = cir.const #cir.int<0> : !s32i // CHECK: %4 = cir.cmp(gt, %2, %3) : !s32i, !cir.bool -// CHECK: %5 = cir.ternary(%4, true { -// CHECK: %7 = cir.const #cir.int<3> : !s32i -// CHECK: cir.yield %7 : !s32i -// CHECK: }, false { -// CHECK: %7 = cir.const #cir.int<5> : !s32i -// CHECK: cir.yield %7 : !s32i -// CHECK: }) : (!cir.bool) -> !s32i -// CHECK: cir.store %5, %1 : !s32i, !cir.ptr -// CHECK: %6 = cir.load %1 : !cir.ptr, !s32i -// CHECK: cir.return %6 : !s32i +// CHECK: %5 = cir.const #cir.int<3> : !s32i +// CHECK: %6 = cir.const #cir.int<5> : !s32i +// CHECK: %7 = cir.select if %4 then %5 else %6 : (!cir.bool, !s32i, !s32i) -> !s32i +// CHECK: cir.store %7, %1 : !s32i, !cir.ptr +// CHECK: %8 = cir.load %1 : !cir.ptr, !s32i +// CHECK: cir.return %8 : !s32i // CHECK: } typedef enum { diff --git a/clang/test/CIR/Transforms/ternary-fold.cir b/clang/test/CIR/Transforms/ternary-fold.cir new file mode 100644 index 000000000000..6778d4744a32 --- /dev/null +++ b/clang/test/CIR/Transforms/ternary-fold.cir @@ -0,0 +1,60 @@ +// RUN: cir-opt -cir-simplify -o %t.cir %s +// RUN: FileCheck --input-file=%t.cir %s + +!s32i = !cir.int + +module { + cir.func @fold_ternary(%arg0: !s32i, %arg1: !s32i) -> !s32i { + %0 = cir.const #cir.bool : !cir.bool + %1 = cir.ternary (%0, true { + cir.yield %arg0 : !s32i + }, false { + cir.yield %arg1 : !s32i + }) : (!cir.bool) -> !s32i + cir.return %1 : !s32i + } + + // CHECK: cir.func @fold_ternary(%{{.+}}: !s32i, %[[ARG:.+]]: !s32i) -> !s32i { + // CHECK-NEXT: cir.return %[[ARG]] : !s32i + // CHECK-NEXT: } + + cir.func @simplify_ternary(%arg0 : !cir.bool, %arg1 : !s32i) -> !s32i { + %0 = cir.ternary (%arg0, true { + %1 = cir.const #cir.int<42> : !s32i + cir.yield %1 : !s32i + }, false { + cir.yield %arg1 : !s32i + }) : (!cir.bool) -> !s32i + cir.return %0 : !s32i + } + + // CHECK: cir.func @simplify_ternary(%[[ARG0:.+]]: !cir.bool, %[[ARG1:.+]]: !s32i) -> !s32i { + // CHECK-NEXT: %[[#A:]] = cir.const #cir.int<42> : !s32i + // CHECK-NEXT: %[[#B:]] = cir.select if %[[ARG0]] then %[[#A]] else %[[ARG1]] : (!cir.bool, !s32i, !s32i) -> !s32i + // CHECK-NEXT: cir.return %[[#B]] : !s32i + // CHECK-NEXT: } + + cir.func @non_simplifiable_ternary(%arg0 : !cir.bool) -> !s32i { + %0 = cir.alloca !s32i, !cir.ptr, ["a", init] + %1 = cir.ternary (%arg0, true { + %2 = cir.const #cir.int<42> : !s32i + cir.yield %2 : !s32i + }, false { + %3 = cir.load %0 : !cir.ptr, !s32i + cir.yield %3 : !s32i + }) : (!cir.bool) -> !s32i + cir.return %1 : !s32i + } + + // CHECK: cir.func @non_simplifiable_ternary(%[[ARG0:.+]]: !cir.bool) -> !s32i { + // CHECK-NEXT: %[[#A:]] = cir.alloca !s32i, !cir.ptr, ["a", init] + // CHECK-NEXT: %[[#B:]] = cir.ternary(%[[ARG0]], true { + // CHECK-NEXT: %[[#C:]] = cir.const #cir.int<42> : !s32i + // CHECK-NEXT: cir.yield %[[#C]] : !s32i + // CHECK-NEXT: }, false { + // CHECK-NEXT: %[[#D:]] = cir.load %[[#A]] : !cir.ptr, !s32i + // CHECK-NEXT: cir.yield %[[#D]] : !s32i + // CHECK-NEXT: }) : (!cir.bool) -> !s32i + // CHECK-NEXT: cir.return %[[#B]] : !s32i + // CHECK-NEXT: } +} diff --git a/clang/test/CIR/Transforms/ternary-fold.cpp b/clang/test/CIR/Transforms/ternary-fold.cpp new file mode 100644 index 000000000000..5f37a8a36b95 --- /dev/null +++ b/clang/test/CIR/Transforms/ternary-fold.cpp @@ -0,0 +1,56 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-simplify %s -o %t1.cir 2>&1 | FileCheck -check-prefix=CIR-BEFORE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-simplify %s -o %t2.cir 2>&1 | FileCheck -check-prefix=CIR-AFTER %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s + +int test(bool x) { + return x ? 1 : 2; +} + +// CIR-BEFORE: cir.func @_Z4testb +// CIR-BEFORE: %{{.+}} = cir.ternary(%{{.+}}, true { +// CIR-BEFORE-NEXT: %[[#A:]] = cir.const #cir.int<1> : !s32i +// CIR-BEFORE-NEXT: cir.yield %[[#A]] : !s32i +// CIR-BEFORE-NEXT: }, false { +// CIR-BEFORE-NEXT: %[[#B:]] = cir.const #cir.int<2> : !s32i +// CIR-BEFORE-NEXT: cir.yield %[[#B]] : !s32i +// CIR-BEFORE-NEXT: }) : (!cir.bool) -> !s32i +// CIR-BEFORE: } + +// CIR-AFTER: cir.func @_Z4testb +// CIR-AFTER: %[[#A:]] = cir.const #cir.int<1> : !s32i +// CIR-AFTER-NEXT: %[[#B:]] = cir.const #cir.int<2> : !s32i +// CIR-AFTER-NEXT: %{{.+}} = cir.select if %{{.+}} then %[[#A]] else %[[#B]] : (!cir.bool, !s32i, !s32i) -> !s32i +// CIR-AFTER: } + +// LLVM: define dso_local i32 @_Z4testb +// LLVM: %{{.+}} = select i1 %{{.+}}, i32 1, i32 2 +// LLVM: } + +int test2(bool cond) { + constexpr int x = 1; + constexpr int y = 2; + return cond ? x : y; +} + +// CIR-BEFORE: cir.func @_Z5test2b +// CIR-BEFORE: %[[#COND:]] = cir.load %{{.+}} : !cir.ptr, !cir.bool +// CIR-BEFORE-NEXT: %{{.+}} = cir.ternary(%[[#COND]], true { +// CIR-BEFORE-NEXT: %[[#A:]] = cir.const #cir.int<1> : !s32i +// CIR-BEFORE-NEXT: cir.yield %[[#A]] : !s32i +// CIR-BEFORE-NEXT: }, false { +// CIR-BEFORE-NEXT: %[[#B:]] = cir.const #cir.int<2> : !s32i +// CIR-BEFORE-NEXT: cir.yield %[[#B]] : !s32i +// CIR-BEFORE-NEXT: }) : (!cir.bool) -> !s32i +// CIR-BEFORE: } + +// CIR-AFTER: cir.func @_Z5test2b +// CIR-AFTER: %[[#COND:]] = cir.load %{{.+}} : !cir.ptr, !cir.bool +// CIR-AFTER-NEXT: %[[#A:]] = cir.const #cir.int<1> : !s32i +// CIR-AFTER-NEXT: %[[#B:]] = cir.const #cir.int<2> : !s32i +// CIR-AFTER-NEXT: %{{.+}} = cir.select if %[[#COND]] then %[[#A]] else %[[#B]] : (!cir.bool, !s32i, !s32i) -> !s32i +// CIR-AFTER: } + +// LLVM: define dso_local i32 @_Z5test2b +// LLVM: %{{.+}} = select i1 %{{.+}}, i32 1, i32 2 +// LLVM: } From 3e3cec5c7b80c0e28d6a51b39c30dfff6017f0ba Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Wed, 11 Sep 2024 09:55:52 -0700 Subject: [PATCH 1818/2301] [CIR][CIRGen] Support a defined pure virtual destructor (#825) This is permitted by the language, and IRGen emits traps for destructors other than the base object destructor. Make CIRGen follow suit. --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 9 ++- .../CIR/CodeGen/defined-pure-virtual-func.cpp | 58 +++++++++++++++++++ 2 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/defined-pure-virtual-func.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 6ca6711dbbb3..6b2be0b7551c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1109,7 +1109,14 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { // in fact emit references to them from other compilations, so emit them // as functions containing a trap instruction. if (DtorType != Dtor_Base && Dtor->getParent()->isAbstract()) { - llvm_unreachable("NYI"); + SourceLocation Loc = + Dtor->hasBody() ? Dtor->getBody()->getBeginLoc() : Dtor->getLocation(); + builder.create(getLoc(Loc)); + // The corresponding clang/CodeGen logic clears the insertion point here, + // but MLIR's builder requires a valid insertion point, so we create a dummy + // block (since the trap is a block terminator). + builder.createBlock(builder.getBlock()->getParent()); + return; } Stmt *Body = Dtor->getBody(); diff --git a/clang/test/CIR/CodeGen/defined-pure-virtual-func.cpp b/clang/test/CIR/CodeGen/defined-pure-virtual-func.cpp new file mode 100644 index 000000000000..86e46ee503bb --- /dev/null +++ b/clang/test/CIR/CodeGen/defined-pure-virtual-func.cpp @@ -0,0 +1,58 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// Pure virtual functions are allowed to be defined, but the vtable should still +// point to __cxa_pure_virtual instead of the definition. For destructors, the +// base object destructor (which is not included in the vtable) should be +// defined as usual. The complete object destructors and deleting destructors +// should contain a trap, and the vtable entries for them should point to +// __cxa_pure_virtual. +class C { + C(); + virtual ~C() = 0; + virtual void pure() = 0; +}; + +C::C() = default; +C::~C() = default; +void C::pure() {} + +// CHECK: @_ZTV1C = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1C> : !cir.ptr +// complete object destructor (D1) +// CHECK-SAME: #cir.global_view<@__cxa_pure_virtual> : !cir.ptr, +// deleting destructor (D0) +// CHECK-SAME: #cir.global_view<@__cxa_pure_virtual> : !cir.ptr, +// C::pure +// CHECK-SAME: #cir.global_view<@__cxa_pure_virtual> : !cir.ptr]> + +// The base object destructor should be emitted as normal. +// CHECK-LABEL: cir.func @_ZN1CD2Ev(%arg0: !cir.ptr loc({{[^)]+}})) {{.*}} { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// The complete object destructor should trap. +// CHECK-LABEL: cir.func @_ZN1CD1Ev(%arg0: !cir.ptr loc({{[^)]+}})) {{.*}} { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.trap +// CHECK-NEXT: } + +// The deleting destructor should trap. +// CHECK-LABEL: cir.func @_ZN1CD0Ev(%arg0: !cir.ptr loc({{[^)]+}})) {{.*}} { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.trap +// CHECK-NEXT: } + +// C::pure should be emitted as normal. +// CHECK-LABEL: cir.func @_ZN1C4pureEv(%arg0: !cir.ptr loc({{[^)]+}})) {{.*}} { +// CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.return +// CHECK-NEXT: } From e10159a0e6f56dd667fbffd98103c52457b7db58 Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 12 Sep 2024 00:57:09 +0800 Subject: [PATCH 1819/2301] [CIR][Dialect] Add calling convention attribute to cir.call op (#828) The first patch to fix #803 . This PR adds the calling convention attribute to CallOp directly, which is similar to LLVM, rather than adding the information to function type, which mimics Clang AST function type. The syntax of it in CIR assembly is between the function type and extra attributes, as follows: ```mlir %1 = cir.call %fnptr(%a) : (!fnptr, !s32i) -> !s32i cc(spir_kernel) extra(#fn_attr) ``` The verification of direct calls is not included. It will be included in the next patch extending CIRGen & Lowering. --- For every builder method of Call Op, an optional parameter `callingConv` is inserted right before the parameter of extra attribute. However, apart from the parser / printer, this PR does not introduce any functional changes. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 64 ++++++++++--------- clang/include/clang/CIR/Dialect/IR/CIROps.td | 19 +++++- .../clang/CIR/Interfaces/CIROpInterfaces.td | 3 + clang/lib/CIR/CodeGen/CIRGenCall.cpp | 6 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 27 +++++++- clang/test/CIR/IR/call-op-call-conv.cir | 27 ++++++++ 6 files changed, 110 insertions(+), 36 deletions(-) create mode 100644 clang/test/CIR/IR/call-op-call-conv.cir diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index a458547d330d..ed7d13588ec2 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -622,10 +622,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(), mlir::Type returnType = mlir::cir::VoidType(), mlir::ValueRange operands = mlir::ValueRange(), + mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { - mlir::cir::CallOp callOp = - create(loc, callee, returnType, operands); + mlir::cir::CallOp callOp = create( + loc, callee, returnType, operands, callingConv); if (extraFnAttr) { callOp->setAttr("extra_attrs", extraFnAttr); @@ -641,41 +642,44 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::cir::CallOp createCallOp(mlir::Location loc, mlir::cir::FuncOp callee, mlir::ValueRange operands = mlir::ValueRange(), + mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { return createCallOp(loc, mlir::SymbolRefAttr::get(callee), callee.getFunctionType().getReturnType(), operands, - extraFnAttr); + callingConv, extraFnAttr); } - mlir::cir::CallOp - createIndirectCallOp(mlir::Location loc, mlir::Value ind_target, - mlir::cir::FuncType fn_type, - mlir::ValueRange operands = mlir::ValueRange(), - mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + mlir::cir::CallOp createIndirectCallOp( + mlir::Location loc, mlir::Value ind_target, mlir::cir::FuncType fn_type, + mlir::ValueRange operands = mlir::ValueRange(), + mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, + mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { llvm::SmallVector resOperands({ind_target}); resOperands.append(operands.begin(), operands.end()); return createCallOp(loc, mlir::SymbolRefAttr(), fn_type.getReturnType(), - resOperands, extraFnAttr); + resOperands, callingConv, extraFnAttr); } mlir::cir::CallOp createCallOp(mlir::Location loc, mlir::SymbolRefAttr callee, mlir::ValueRange operands = mlir::ValueRange(), + mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { return createCallOp(loc, callee, mlir::cir::VoidType(), operands, - extraFnAttr); - } - - mlir::cir::CallOp - createTryCallOp(mlir::Location loc, - mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(), - mlir::Type returnType = mlir::cir::VoidType(), - mlir::ValueRange operands = mlir::ValueRange(), - mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { - mlir::cir::CallOp tryCallOp = create( - loc, callee, returnType, operands, getUnitAttr()); + callingConv, extraFnAttr); + } + + mlir::cir::CallOp createTryCallOp( + mlir::Location loc, mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(), + mlir::Type returnType = mlir::cir::VoidType(), + mlir::ValueRange operands = mlir::ValueRange(), + mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, + mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + mlir::cir::CallOp tryCallOp = + create(loc, callee, returnType, operands, + callingConv, /*exception=*/getUnitAttr()); if (extraFnAttr) { tryCallOp->setAttr("extra_attrs", extraFnAttr); } else { @@ -687,23 +691,23 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return tryCallOp; } - mlir::cir::CallOp - createTryCallOp(mlir::Location loc, mlir::cir::FuncOp callee, - mlir::ValueRange operands, - mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + mlir::cir::CallOp createTryCallOp( + mlir::Location loc, mlir::cir::FuncOp callee, mlir::ValueRange operands, + mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, + mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { return createTryCallOp(loc, mlir::SymbolRefAttr::get(callee), callee.getFunctionType().getReturnType(), operands, - extraFnAttr); + callingConv, extraFnAttr); } - mlir::cir::CallOp createIndirectTryCallOp(mlir::Location loc, - mlir::Value ind_target, - mlir::cir::FuncType fn_type, - mlir::ValueRange operands) { + mlir::cir::CallOp createIndirectTryCallOp( + mlir::Location loc, mlir::Value ind_target, mlir::cir::FuncType fn_type, + mlir::ValueRange operands, + mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C) { llvm::SmallVector resOperands({ind_target}); resOperands.append(operands.begin(), operands.end()); return createTryCallOp(loc, mlir::SymbolRefAttr(), fn_type.getReturnType(), - resOperands); + resOperands, callingConv); } struct GetMethodResults { diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d2a5f393de6d..4f0f8cc3ce0a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3208,6 +3208,7 @@ class CIR_CallOp extra_traits = []> : dag commonArgs = (ins OptionalAttr:$callee, Variadic:$arg_ops, + DefaultValuedAttr:$calling_conv, ExtraFuncAttr:$extra_attrs, OptionalAttr:$ast ); @@ -3251,10 +3252,13 @@ def CallOp : CIR_CallOp<"call"> { let builders = [ OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Type":$resType, CArg<"ValueRange", "{}">:$operands, + CArg<"CallingConv", "CallingConv::C">:$callingConv, CArg<"UnitAttr", "{}">:$exception), [{ $_state.addOperands(operands); if (callee) $_state.addAttribute("callee", callee); + $_state.addAttribute("calling_conv", + CallingConvAttr::get($_builder.getContext(), callingConv)); if (exception) $_state.addAttribute("exception", exception); if (resType && !isa(resType)) @@ -3263,11 +3267,14 @@ def CallOp : CIR_CallOp<"call"> { OpBuilder<(ins "Value":$ind_target, "FuncType":$fn_type, CArg<"ValueRange", "{}">:$operands, + CArg<"CallingConv", "CallingConv::C">:$callingConv, CArg<"UnitAttr", "{}">:$exception), [{ $_state.addOperands(ValueRange{ind_target}); $_state.addOperands(operands); if (!fn_type.isVoid()) $_state.addTypes(fn_type.getReturnType()); + $_state.addAttribute("calling_conv", + CallingConvAttr::get($_builder.getContext(), callingConv)); if (exception) $_state.addAttribute("exception", exception); }]> @@ -3307,13 +3314,17 @@ def TryCallOp : CIR_CallOp<"try_call", "Block *":$cont, "Block *":$landing_pad, CArg<"ValueRange", "{}">:$operands, CArg<"ValueRange", "{}">:$contOperands, - CArg<"ValueRange", "{}">:$landingPadOperands), [{ + CArg<"ValueRange", "{}">:$landingPadOperands, + CArg<"CallingConv", "CallingConv::C">:$callingConv), [{ $_state.addOperands(operands); if (callee) $_state.addAttribute("callee", callee); if (resType && !isa(resType)) $_state.addTypes(resType); + $_state.addAttribute("calling_conv", + CallingConvAttr::get($_builder.getContext(), callingConv)); + // Handle branches $_state.addOperands(contOperands); $_state.addOperands(landingPadOperands); @@ -3332,7 +3343,8 @@ def TryCallOp : CIR_CallOp<"try_call", "Block *":$cont, "Block *":$landing_pad, CArg<"ValueRange", "{}">:$operands, CArg<"ValueRange", "{}">:$contOperands, - CArg<"ValueRange", "{}">:$landingPadOperands), [{ + CArg<"ValueRange", "{}">:$landingPadOperands, + CArg<"CallingConv", "CallingConv::C">:$callingConv), [{ ::llvm::SmallVector finalCallOperands({ind_target}); finalCallOperands.append(operands.begin(), operands.end()); $_state.addOperands(finalCallOperands); @@ -3340,6 +3352,9 @@ def TryCallOp : CIR_CallOp<"try_call", if (!fn_type.isVoid()) $_state.addTypes(fn_type.getReturnType()); + $_state.addAttribute("calling_conv", + CallingConvAttr::get($_builder.getContext(), callingConv)); + // Handle branches $_state.addOperands(contOperands); $_state.addOperands(landingPadOperands); diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td index 09b09e8467f3..fd9c20687c3a 100644 --- a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td @@ -34,6 +34,9 @@ let cppNamespace = "::mlir::cir" in { "Return the number of operands, accounts for indirect call or " "exception info", "unsigned", "getNumArgOperands", (ins)>, + InterfaceMethod< + "Return the calling convention of the call operation", + "mlir::cir::CallingConv", "getCallingConv", (ins)>, ]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 5fb92b012623..28ab4ca7cc59 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -487,8 +487,10 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, assert(builder.getInsertionBlock() && "expected valid basic block"); if (indirectFuncTy) return builder.createIndirectCallOp( - callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs, extraFnAttrs); - return builder.createCallOp(callLoc, directFuncOp, CIRCallArgs, extraFnAttrs); + callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs, + mlir::cir::CallingConv::C, extraFnAttrs); + return builder.createCallOp(callLoc, directFuncOp, CIRCallArgs, + mlir::cir::CallingConv::C, extraFnAttrs); } RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d44896ae999e..81f3e932a605 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2874,6 +2874,18 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, return ::mlir::failure(); } + if (parser.parseOptionalKeyword("cc").succeeded()) { + if (parser.parseLParen().failed()) + return failure(); + mlir::cir::CallingConv callingConv; + if (parseCIRKeyword(parser, callingConv).failed()) + return failure(); + if (parser.parseRParen().failed()) + return failure(); + result.addAttribute("calling_conv", mlir::cir::CallingConvAttr::get( + builder.getContext(), callingConv)); + } + return ::mlir::success(); } @@ -2881,6 +2893,7 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, mlir::FlatSymbolRefAttr flatSym, ::mlir::OpAsmPrinter &state, ::mlir::cir::ExtraFuncAttributesAttr extraAttrs, + ::mlir::cir::CallingConv callingConv, ::mlir::UnitAttr exception = {}, mlir::Block *cont = nullptr, mlir::Block *landingPad = nullptr) { @@ -2932,6 +2945,7 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, elidedAttrs.push_back("callee"); elidedAttrs.push_back("ast"); elidedAttrs.push_back("extra_attrs"); + elidedAttrs.push_back("calling_conv"); elidedAttrs.push_back("exception"); elidedAttrs.push_back("operandSegmentSizes"); @@ -2939,6 +2953,13 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, state << ' ' << ":"; state << ' '; state.printFunctionalType(op->getOperands().getTypes(), op->getResultTypes()); + + if (callingConv != mlir::cir::CallingConv::C) { + state << " cc("; + state << stringifyCallingConv(callingConv); + state << ")"; + } + if (!extraAttrs.getElements().empty()) { state << " extra("; state.printAttributeWithoutType(extraAttrs); @@ -2959,9 +2980,10 @@ ::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, void CallOp::print(::mlir::OpAsmPrinter &state) { mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; + mlir::cir::CallingConv callingConv = getCallingConv(); mlir::UnitAttr exception = getExceptionAttr(); printCallCommon(*this, indirectCallee, getCalleeAttr(), state, - getExtraAttrs(), exception); + getExtraAttrs(), callingConv, exception); } //===----------------------------------------------------------------------===// @@ -3010,8 +3032,9 @@ ::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, void TryCallOp::print(::mlir::OpAsmPrinter &state) { mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; + mlir::cir::CallingConv callingConv = getCallingConv(); printCallCommon(*this, indirectCallee, getCalleeAttr(), state, - getExtraAttrs(), {}, getCont(), getLandingPad()); + getExtraAttrs(), callingConv, {}, getCont(), getLandingPad()); } mlir::SuccessorOperands TryCallOp::getSuccessorOperands(unsigned index) { diff --git a/clang/test/CIR/IR/call-op-call-conv.cir b/clang/test/CIR/IR/call-op-call-conv.cir new file mode 100644 index 000000000000..f97dad73b8ff --- /dev/null +++ b/clang/test/CIR/IR/call-op-call-conv.cir @@ -0,0 +1,27 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +!s32i = !cir.int +!fnptr = !cir.ptr)>> + +module { + cir.func @my_add(%a: !s32i, %b: !s32i) -> !s32i cc(spir_function) { + %c = cir.binop(add, %a, %b) : !s32i + cir.return %c : !s32i + } + + cir.func @ind(%fnptr: !fnptr, %a : !s32i) { + %1 = cir.call %fnptr(%a) : (!fnptr, !s32i) -> !s32i cc(spir_kernel) + %2 = cir.call %fnptr(%a) : (!fnptr, !s32i) -> !s32i cc(spir_function) + + %3 = cir.try_call @my_add(%1, %2) ^continue, ^landing_pad : (!s32i, !s32i) -> !s32i cc(spir_function) + ^continue: + cir.br ^landing_pad + ^landing_pad: + cir.return + } +} + +// CHECK: %{{[0-9]+}} = cir.call %arg0(%arg1) : (!cir.ptr)>>, !s32i) -> !s32i cc(spir_kernel) +// CHECK: %{{[0-9]+}} = cir.call %arg0(%arg1) : (!cir.ptr)>>, !s32i) -> !s32i cc(spir_function) +// CHECK: %{{[0-9]+}} = cir.try_call @my_add(%{{[0-9]+}}, %{{[0-9]+}}) ^{{.+}}, ^{{.+}} : (!s32i, !s32i) -> !s32i cc(spir_function) From bf6bc28c15882751350b70e2214cbe0ca96ab9c1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 10 Sep 2024 19:01:13 -0700 Subject: [PATCH 1820/2301] [CIR][CIRGen] Exception: get scope order right for try/catch with cleanup --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenException.cpp | 19 +++++++------ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 32 ++++++++++++++++++++++ 4 files changed, 45 insertions(+), 9 deletions(-) create mode 100644 clang/test/CIR/CodeGen/try-catch-dtors.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 0296a8cf82e5..5957b35564b1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -270,6 +270,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { destroyOptimisticNormalEntry(*this, Scope); EHStack.popCleanup(); + Scope.markEmitted(); buildCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); // Otherwise, the best approach is to thread everything through diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 6ded1418b382..8d33af65eb84 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -346,7 +346,7 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { // don't populate right away. Reserve some space to store the exception // info but don't emit the bulk right away, for now only make sure the // scope returns the exception information. - auto tryScope = builder.create( + auto tryOp = builder.create( tryLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { beginInsertTryBody = getBuilder().saveInsertionPoint(); @@ -370,20 +370,23 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { // Finally emit the body for try/catch. auto emitTryCatchBody = [&]() -> mlir::LogicalResult { - auto loc = tryScope.getLoc(); + auto loc = tryOp.getLoc(); mlir::OpBuilder::InsertionGuard guard(getBuilder()); getBuilder().restoreInsertionPoint(beginInsertTryBody); - CIRGenFunction::LexicalScope lexScope{*this, loc, + CIRGenFunction::LexicalScope tryScope{*this, loc, getBuilder().getInsertionBlock()}; { - lexScope.setAsTry(tryScope); + tryScope.setAsTry(tryOp); // Attach the basic blocks for the catch regions. - enterCXXTryStmt(S, tryScope); + enterCXXTryStmt(S, tryOp); // Emit the body for the `try {}` part. - if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) - return mlir::failure(); - getBuilder().create(loc); + { + CIRGenFunction::LexicalScope tryBodyScope{ + *this, loc, getBuilder().getInsertionBlock()}; + if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) + return mlir::failure(); + } } { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index cd6993d9f3d6..0c7ac712284d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -484,7 +484,7 @@ mlir::cir::TryOp CIRGenFunction::LexicalScope::getClosestTryParent() { auto *scope = this; while (scope) { if (scope->isTry()) - return getTry(); + return scope->getTry(); scope = scope->ParentScope; } return nullptr; diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp new file mode 100644 index 000000000000..12640542b07f --- /dev/null +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +struct Vec { + Vec(); + Vec(Vec&&); + ~Vec(); +}; + +void yo() { + int r = 1; + try { + Vec v; + } catch (...) { + r++; + } +} + +// CIR-DAG: ![[VecTy:.*]] = !cir.struct + +// CIR: cir.scope { +// CIR: %[[VADDR:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v", init] +// CIR: cir.try { +// CIR: cir.call exception @_ZN3VecC1Ev(%[[VADDR]]) : (!cir.ptr) -> () +// CIR: cir.call @_ZN3VecD1Ev(%[[VADDR]]) : (!cir.ptr) -> () +// CIR: cir.yield +// CIR: } cleanup { +// CIR: } catch [type #cir.all { +// CIR: cir.catch_param -> !cir.ptr +// CIR: }] +// CIR: } +// CIR: cir.return \ No newline at end of file From f0921686e0570158b5db39d3e2bbc9dc9f60da8d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 11 Sep 2024 11:20:21 -0700 Subject: [PATCH 1821/2301] [CIR][LowerToLLVM] Exceptions: llvm.zero needed for landingpad should go into entry block --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 +++ clang/test/CIR/CodeGen/try-catch-dtors.cpp | 31 ++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f96ddd1f7ba5..545d0f1a7603 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -969,6 +969,8 @@ class CIREhInflightOpLowering assert(entryBlock->isEntryBlock()); // %x = landingpad { ptr, i32 } + // Note that since llvm.landingpad has to be the first operation on the + // block, any needed value for its operands has to be added somewhere else. if (symListAttr) { // catch ptr @_ZTIi // catch ptr @_ZTIPKc @@ -986,6 +988,8 @@ class CIREhInflightOpLowering } else { if (!op.getCleanup()) { // catch ptr null + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPointToStart(entryBlock); mlir::Value nullOp = rewriter.create( loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext())); symAddrs.push_back(nullOp); diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index 12640542b07f..f8f3e8f943df 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s struct Vec { Vec(); @@ -29,4 +31,31 @@ void yo() { // CIR: cir.catch_param -> !cir.ptr // CIR: }] // CIR: } -// CIR: cir.return \ No newline at end of file +// CIR: cir.return + +// LLVM-LABEL: @_Z2yov() + +// LLVM: 2: +// LLVM: %[[Vec:.*]] = alloca %struct.Vec +// LLVM: br label %[[INVOKE_BB:.*]], + +// LLVM: [[INVOKE_BB]]: +// LLVM: invoke void @_ZN3VecC1Ev(ptr %[[Vec]]) +// LLVM: to label %[[DTOR_BB:.*]] unwind label %[[LPAD_BB:.*]], + +// LLVM: [[DTOR_BB]]: +// LLVM: call void @_ZN3VecD1Ev(ptr %[[Vec]]) +// LLVM: br label %15 + +// LLVM: [[LPAD_BB]]: +// LLVM: landingpad { ptr, i32 } +// LLVM: catch ptr null +// LLVM: br label %[[CATCH_BB:.*]], + +// LLVM: [[CATCH_BB]]: +// LLVM: call ptr @__cxa_begin_catch +// LLVM: call void @__cxa_end_catch() +// LLVM: br label %[[RET_BB:.*]], + +// LLVM: [[RET_BB]]: +// LLVM: ret void \ No newline at end of file From 83f744711fa3cdaa5fae1c7f228c973e33dcd4a7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 11 Sep 2024 14:55:48 -0700 Subject: [PATCH 1822/2301] [CIR][CIRGen] Exceptions: support nested scope cleanup FlattenCFG will soon get the necessary support for lowering to LLVM, this is CIRGen only for now. --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 3 +- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 46 ++++++++++++++++++++-- 2 files changed, 45 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 5957b35564b1..6147c0285971 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -327,8 +327,9 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // active or was used before it was deactivated. if (EHActiveFlag.isValid() || IsActive) { cleanupFlags.setIsForEHCleanup(); - assert(tryOp.isCleanupActive() && "expected active cleanup"); mlir::OpBuilder::InsertionGuard guard(builder); + if (!tryOp.isCleanupActive()) + builder.createBlock(&tryOp.getCleanupRegion()); mlir::Block *cleanup = &tryOp.getCleanupRegion().back(); if (cleanup->empty()) { builder.setInsertionPointToEnd(cleanup); diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index f8f3e8f943df..2d8db9867867 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -DLLVM_IMPLEMENTED -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s struct Vec { @@ -19,6 +19,7 @@ void yo() { } // CIR-DAG: ![[VecTy:.*]] = !cir.struct +// CIR-DAG: ![[S1:.*]] = !cir.struct // CIR: cir.scope { // CIR: %[[VADDR:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v", init] @@ -58,4 +59,43 @@ void yo() { // LLVM: br label %[[RET_BB:.*]], // LLVM: [[RET_BB]]: -// LLVM: ret void \ No newline at end of file +// LLVM: ret void + +#ifndef LLVM_IMPLEMENTED +struct S1 { + Vec v; +}; + +void yo2() { + int r = 1; + try { + Vec v; + S1((Vec&&) v); + } catch (...) { + r++; + } +} +#endif + +// CIR: cir.func @_Z3yo2v() +// CIR: cir.scope { +// CIR: cir.alloca ![[VecTy]] +// CIR: cir.try { +// CIR: cir.call exception @_ZN3VecC1Ev +// CIR: cir.scope { +// CIR: cir.alloca ![[S1:.*]], !cir.ptr, ["agg.tmp.ensured"] +// CIR: cir.call exception @_ZN3VecC1EOS_ +// CIR: cir.call @_ZN2S1D2Ev +// CIR: } +// CIR: cir.call @_ZN3VecD1Ev +// CIR: cir.yield +// CIR: } cleanup { +// CIR: cir.call @_ZN3VecD1Ev +// CIR: cir.yield +// CIR: } catch [type #cir.all { +// CIR: cir.catch_param -> !cir.ptr +// CIR: cir.yield +// CIR: }] +// CIR: } +// CIR: cir.return +// CIR: } From 130a08afe4ee3b566f79aef96e98b97b9d78353e Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 13 Sep 2024 07:48:47 +0800 Subject: [PATCH 1823/2301] [CIR][CodeGen][NFCI] Unify attribute list handling of func / call by `constructAttributeList` (#831) Similar to #830 , this PR completes the `setCIRFunctionAttributes` part with the call to `constructAttributeList` method, so that func op and call op share the logic of handling these kinds of attributes, which is the design of OG CodeGen. It also includes other refactors. The function `constructAttributeList` now use `mlir::NamedAttrList &` rather than immutable attribute `mlir::DictionaryAttr &` as the inout result parameter, which benefits the additive merging of attributes. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 36 ++++++++++++---------- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 11 +++++-- clang/lib/CIR/CodeGen/CIRGenModule.h | 7 +++-- clang/test/CIR/CodeGen/delegating-ctor.cpp | 2 +- 4 files changed, 33 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 28ab4ca7cc59..656c80ce6f46 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -334,16 +334,17 @@ static void AddAttributesFromFunctionProtoType(CIRGenBuilderTy &builder, /// target-configuration logic, as well as for code defined in library /// modules such as CUDA's libdevice. /// -/// - ConstructAttributeList builds on top of getDefaultFunctionAttributes +/// - constructAttributeList builds on top of getDefaultFunctionAttributes /// and adds declaration-specific, convention-specific, and /// frontend-specific logic. The last is of particular importance: /// attributes that restrict how the frontend generates code must be /// added here rather than getDefaultFunctionAttributes. /// -void CIRGenModule::ConstructAttributeList(StringRef Name, +void CIRGenModule::constructAttributeList(StringRef Name, const CIRGenFunctionInfo &FI, CIRGenCalleeInfo CalleeInfo, - mlir::DictionaryAttr &Attrs, + mlir::NamedAttrList &funcAttrs, + mlir::cir::CallingConv &callingConv, bool AttrOnCallSite, bool IsThunk) { // Implementation Disclaimer // @@ -355,13 +356,13 @@ void CIRGenModule::ConstructAttributeList(StringRef Name, // That said, for the most part, the approach here is very specific compared // to the rest of CIRGen and attributes and other handling should be done upon // demand. - mlir::NamedAttrList FuncAttrs; // Collect function CIR attributes from the CC lowering. + callingConv = FI.getEffectiveCallingConvention(); // TODO: NoReturn, cmse_nonsecure_call // Collect function CIR attributes from the callee prototype if we have one. - AddAttributesFromFunctionProtoType(getBuilder(), astCtx, FuncAttrs, + AddAttributesFromFunctionProtoType(getBuilder(), astCtx, funcAttrs, CalleeInfo.getCalleeFunctionProtoType()); const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); @@ -378,12 +379,12 @@ void CIRGenModule::ConstructAttributeList(StringRef Name, if (TargetDecl->hasAttr()) { auto nu = mlir::cir::NoThrowAttr::get(builder.getContext()); - FuncAttrs.set(nu.getMnemonic(), nu); + funcAttrs.set(nu.getMnemonic(), nu); } if (const FunctionDecl *Fn = dyn_cast(TargetDecl)) { AddAttributesFromFunctionProtoType( - getBuilder(), astCtx, FuncAttrs, + getBuilder(), astCtx, funcAttrs, Fn->getType()->getAs()); if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { // A sane operator new returns a non-aliasing pointer. @@ -439,8 +440,6 @@ void CIRGenModule::ConstructAttributeList(StringRef Name, if (TargetDecl->hasAttr()) ; } - - Attrs = mlir::DictionaryAttr::get(builder.getContext(), FuncAttrs); } static mlir::cir::CIRCallOpInterface @@ -679,11 +678,14 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // TODO: Update the largest vector width if any arguments have vector types. // Compute the calling convention and attributes. - mlir::DictionaryAttr Attrs; + mlir::NamedAttrList Attrs; StringRef FnName; if (auto calleeFnOp = dyn_cast(CalleePtr)) FnName = calleeFnOp.getName(); - CGM.ConstructAttributeList(FnName, CallInfo, Callee.getAbstractInfo(), Attrs, + + mlir::cir::CallingConv callingConv; + CGM.constructAttributeList(FnName, CallInfo, Callee.getAbstractInfo(), Attrs, + callingConv, /*AttrOnCallSite=*/true, /*IsThunk=*/false); @@ -716,7 +718,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, } else { // Otherwise, nounwind call sites will never throw. auto noThrowAttr = mlir::cir::NoThrowAttr::get(builder.getContext()); - CannotThrow = Attrs.contains(noThrowAttr.getMnemonic()); + CannotThrow = Attrs.getNamed(noThrowAttr.getMnemonic()).has_value(); if (auto fptr = dyn_cast(CalleePtr)) if (fptr.getExtraAttrs().getElements().contains( @@ -760,10 +762,12 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, indirectFuncVal = CalleePtr->getResult(0); } - mlir::cir::CIRCallOpInterface callLikeOp = buildCallLikeOp( - *this, callLoc, indirectFuncTy, indirectFuncVal, directFuncOp, - CIRCallArgs, InvokeDest, - mlir::cir::ExtraFuncAttributesAttr::get(builder.getContext(), Attrs)); + auto extraFnAttrs = mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), Attrs.getDictionary(builder.getContext())); + + mlir::cir::CIRCallOpInterface callLikeOp = + buildCallLikeOp(*this, callLoc, indirectFuncTy, indirectFuncVal, + directFuncOp, CIRCallArgs, InvokeDest, extraFnAttrs); if (E) callLikeOp->setAttr( diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 323e885f4fed..8318dbf32eb7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2423,9 +2423,14 @@ void CIRGenModule::setCIRFunctionAttributes(GlobalDecl GD, mlir::cir::FuncOp func, bool isThunk) { // TODO(cir): More logic of constructAttributeList is needed. - // NOTE(cir): Here we only need CallConv, so a call to constructAttributeList - // is omitted for simplicity. - mlir::cir::CallingConv callingConv = info.getEffectiveCallingConvention(); + mlir::cir::CallingConv callingConv; + + // Initialize PAL with existing attributes to merge attributes. + mlir::NamedAttrList PAL{func.getExtraAttrs().getElements().getValue()}; + constructAttributeList(func.getName(), info, GD, PAL, callingConv, + /*AttrOnCallSite=*/false, isThunk); + func.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), PAL.getDictionary(builder.getContext()))); // TODO(cir): Check X86_VectorCall incompatibility with WinARM64EC diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 1be8f3f6b32d..daa74d85ccea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -274,10 +274,11 @@ class CIRGenModule : public CIRGenTypeCache { /// constructed for. If valid, the attributes applied to this decl may /// contribute to the function attributes and calling convention. /// \param Attrs [out] - On return, the attribute list to use. - void ConstructAttributeList(StringRef Name, const CIRGenFunctionInfo &Info, + void constructAttributeList(StringRef Name, const CIRGenFunctionInfo &Info, CIRGenCalleeInfo CalleeInfo, - mlir::DictionaryAttr &Attrs, bool AttrOnCallSite, - bool IsThunk); + mlir::NamedAttrList &Attrs, + mlir::cir::CallingConv &callingConv, + bool AttrOnCallSite, bool IsThunk); /// Will return a global variable of the given type. If a variable with a /// different type already exists then a new variable with the right type diff --git a/clang/test/CIR/CodeGen/delegating-ctor.cpp b/clang/test/CIR/CodeGen/delegating-ctor.cpp index 850c0aac9d6d..3c64d76df2cd 100644 --- a/clang/test/CIR/CodeGen/delegating-ctor.cpp +++ b/clang/test/CIR/CodeGen/delegating-ctor.cpp @@ -38,7 +38,7 @@ DelegatingWithZeroing::DelegatingWithZeroing(int) : DelegatingWithZeroing() {} // CHECK-NEXT: %2 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK-NEXT: %3 = cir.const #cir.zero : !ty_DelegatingWithZeroing // CHECK-NEXT: cir.store %3, %2 : !ty_DelegatingWithZeroing, !cir.ptr -// CHECK-NEXT: cir.call @_ZN21DelegatingWithZeroingC2Ev(%2) : (!cir.ptr) -> () extra(#fn_attr1) +// CHECK-NEXT: cir.call @_ZN21DelegatingWithZeroingC2Ev(%2) : (!cir.ptr) -> () extra(#fn_attr{{[0-9]*}}) // CHECK-NEXT: cir.return // CHECK-NEXT: } From 14d187203d215be87959ef77fcefd55580845081 Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 13 Sep 2024 07:56:18 +0800 Subject: [PATCH 1824/2301] [CIR][CodeGen] Refactor `setExtraAttributesForFunc` to better align with OG (#830) Previously the body of `setExtraAttributesForFunc` corresponds to `SetLLVMFunctionAttributesForDefinition`, but the callsite of it does not reside at the right position. This PR rename it and adjust the calls to it following OG CodeGen. To be specific, `setExtraAttributesForFunc` is called right after the initialization of `FuncOp`. But in OG CodeGen, the list of attributes is constructed by several more functions: `SetLLVMFunctionAttributes` and `SetLLVMFunctionAttributesForDefinition`. This results in diff in attributes of function declarations, which is reflected by the changes of test files. Apart from them, there is no functional change. In other words, the two code path calling `setCIRFunctionAttributesForDefinition` are tested by existing tests: * Caller `buildGlobalFunctionDefinition`: tested by `CIR/CodeGen/function-attrs.cpp`, ... * Caller `codegenCXXStructor`: tested by `CIR/CodeGen/delegating-ctor.cpp`, `defined-pure-virtual-func.cpp`, ... --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 34 ++++++++++--------- clang/lib/CIR/CodeGen/CIRGenModule.h | 10 +++--- clang/test/CIR/CodeGen/attributes.c | 2 +- clang/test/CIR/CodeGen/visibility-attribute.c | 6 ++-- 5 files changed, 29 insertions(+), 25 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 85013189fd7e..d73222f5b79f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -287,7 +287,7 @@ mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { CurCGF = nullptr; setNonAliasAttributes(GD, Fn); - // TODO: SetLLVMFunctionAttributesForDefinition + setCIRFunctionAttributesForDefinition(cast(GD.getDecl()), Fn); return Fn; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 8318dbf32eb7..2bbd5e0fe796 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -588,7 +588,7 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, CurCGF = nullptr; setNonAliasAttributes(GD, Op); - // TODO: SetLLVMFunctionAttributesForDeclaration + setCIRFunctionAttributesForDefinition(D, Fn); if (const ConstructorAttr *CA = D->getAttr()) AddGlobalCtor(Fn, CA->getPriority()); @@ -2265,7 +2265,9 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, mlir::SymbolTable::setSymbolVisibility( f, mlir::SymbolTable::Visibility::Private); - setExtraAttributesForFunc(f, FD); + // Initialize with empty dict of extra attributes. + f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), builder.getDictionaryAttr({}))); if (!curCGF) theModule.push_back(f); @@ -2334,16 +2336,16 @@ static bool hasUnwindExceptions(const LangOptions &LangOpts) { return true; } -void CIRGenModule::setExtraAttributesForFunc(FuncOp f, - const clang::FunctionDecl *FD) { - mlir::NamedAttrList attrs; +void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, + FuncOp f) { + mlir::NamedAttrList attrs{f.getExtraAttrs().getElements().getValue()}; if (!hasUnwindExceptions(getLangOpts())) { auto attr = mlir::cir::NoThrowAttr::get(builder.getContext()); attrs.set(attr.getMnemonic(), attr); } - if (!FD) { + if (!decl) { // If we don't have a declaration to control inlining, the function isn't // explicitly marked as alwaysinline for semantic reasons, and inlining is // disabled, mark the function as noinline. @@ -2352,12 +2354,12 @@ void CIRGenModule::setExtraAttributesForFunc(FuncOp f, builder.getContext(), mlir::cir::InlineKind::AlwaysInline); attrs.set(attr.getMnemonic(), attr); } - } else if (FD->hasAttr()) { + } else if (decl->hasAttr()) { // Add noinline if the function isn't always_inline. auto attr = mlir::cir::InlineAttr::get(builder.getContext(), mlir::cir::InlineKind::NoInline); attrs.set(attr.getMnemonic(), attr); - } else if (FD->hasAttr()) { + } else if (decl->hasAttr()) { // (noinline wins over always_inline, and we can't specify both in IR) auto attr = mlir::cir::InlineAttr::get(builder.getContext(), mlir::cir::InlineKind::AlwaysInline); @@ -2372,18 +2374,18 @@ void CIRGenModule::setExtraAttributesForFunc(FuncOp f, // Otherwise, propagate the inline hint attribute and potentially use its // absence to mark things as noinline. // Search function and template pattern redeclarations for inline. - auto CheckForInline = [](const FunctionDecl *FD) { + auto CheckForInline = [](const FunctionDecl *decl) { auto CheckRedeclForInline = [](const FunctionDecl *Redecl) { return Redecl->isInlineSpecified(); }; - if (any_of(FD->redecls(), CheckRedeclForInline)) + if (any_of(decl->redecls(), CheckRedeclForInline)) return true; - const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern(); + const FunctionDecl *Pattern = decl->getTemplateInstantiationPattern(); if (!Pattern) return false; return any_of(Pattern->redecls(), CheckRedeclForInline); }; - if (CheckForInline(FD)) { + if (CheckForInline(cast(decl))) { auto attr = mlir::cir::InlineAttr::get(builder.getContext(), mlir::cir::InlineKind::InlineHint); attrs.set(attr.getMnemonic(), attr); @@ -2398,10 +2400,10 @@ void CIRGenModule::setExtraAttributesForFunc(FuncOp f, // starting with the default for this optimization level. bool ShouldAddOptNone = !codeGenOpts.DisableO0ImplyOptNone && codeGenOpts.OptimizationLevel == 0; - if (FD) { - ShouldAddOptNone &= !FD->hasAttr(); - ShouldAddOptNone &= !FD->hasAttr(); - ShouldAddOptNone |= FD->hasAttr(); + if (decl) { + ShouldAddOptNone &= !decl->hasAttr(); + ShouldAddOptNone &= !decl->hasAttr(); + ShouldAddOptNone |= decl->hasAttr(); } if (ShouldAddOptNone) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index daa74d85ccea..55d609c93299 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -573,7 +573,12 @@ class CIRGenModule : public CIRGenTypeCache { /// Set the CIR function attributes (sext, zext, etc). void setCIRFunctionAttributes(GlobalDecl GD, const CIRGenFunctionInfo &info, - mlir::cir::FuncOp func, bool isThunk); + mlir::cir::FuncOp func, bool isThunk); + + /// Set the CIR function attributes which only apply to a function + /// definition. + void setCIRFunctionAttributesForDefinition(const Decl *decl, + mlir::cir::FuncOp func); void buildGlobalDefinition(clang::GlobalDecl D, mlir::Operation *Op = nullptr); @@ -666,9 +671,6 @@ class CIRGenModule : public CIRGenTypeCache { void ReplaceUsesOfNonProtoTypeWithRealFunction(mlir::Operation *Old, mlir::cir::FuncOp NewFn); - void setExtraAttributesForFunc(mlir::cir::FuncOp f, - const clang::FunctionDecl *FD); - // TODO: CodeGen also passes an AttributeList here. We'll have to match that // in CIR mlir::cir::FuncOp diff --git a/clang/test/CIR/CodeGen/attributes.c b/clang/test/CIR/CodeGen/attributes.c index 4231edf968f7..f80c479df45a 100644 --- a/clang/test/CIR/CodeGen/attributes.c +++ b/clang/test/CIR/CodeGen/attributes.c @@ -14,7 +14,7 @@ int __attribute__((section(".shared"))) glob = 42; void __attribute__((__visibility__("hidden"))) foo(); -// CIR: cir.func no_proto private hidden @foo(...) extra(#fn_attr) +// CIR: cir.func no_proto private hidden @foo(...) int bah() { foo(); diff --git a/clang/test/CIR/CodeGen/visibility-attribute.c b/clang/test/CIR/CodeGen/visibility-attribute.c index 45ea4c28e272..549f05d052b8 100644 --- a/clang/test/CIR/CodeGen/visibility-attribute.c +++ b/clang/test/CIR/CodeGen/visibility-attribute.c @@ -19,15 +19,15 @@ int call_glob() } void foo_default(); -// CIR: cir.func no_proto private @foo_default(...) extra(#fn_attr) +// CIR: cir.func no_proto private @foo_default(...) // LLVM: declare {{.*}} void @foo_default(...) void __attribute__((__visibility__("hidden"))) foo_hidden(); -// CIR: cir.func no_proto private hidden @foo_hidden(...) extra(#fn_attr) +// CIR: cir.func no_proto private hidden @foo_hidden(...) // LLVM: declare {{.*}} hidden void @foo_hidden(...) void __attribute__((__visibility__("protected"))) foo_protected(); -// CIR: cir.func no_proto private protected @foo_protected(...) extra(#fn_attr) +// CIR: cir.func no_proto private protected @foo_protected(...) // LLVM: declare {{.*}} protected void @foo_protected(...) void call_foo() From daee6f655f00fc9b6684a4b1311c751477d52243 Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Fri, 13 Sep 2024 10:31:41 -0700 Subject: [PATCH 1825/2301] [CIR][Asm] Fix parsing of extra(...) attributes in cir.call (#835) The parser was looking for extra(...) before the return type while the pretty-printer put it after the return type. This was breaking the LSP-server for example. Change the parser behavior accordingly. --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 24 ++++++++++++------------ clang/test/CIR/IR/call.cir | 11 +++++++++++ 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 81f3e932a605..81284af0aeae 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2827,6 +2827,18 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, .failed()) return ::mlir::failure(); + if (parser.parseOptionalAttrDict(result.attributes)) + return ::mlir::failure(); + if (parser.parseColon()) + return ::mlir::failure(); + + ::mlir::FunctionType opsFnTy; + if (parser.parseType(opsFnTy)) + return ::mlir::failure(); + operandsTypes = opsFnTy.getInputs(); + allResultTypes = opsFnTy.getResults(); + result.addTypes(allResultTypes); + auto &builder = parser.getBuilder(); Attribute extraAttrs; if (::mlir::succeeded(parser.parseOptionalKeyword("extra"))) { @@ -2843,18 +2855,6 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, } result.addAttribute(extraAttrsAttrName, extraAttrs); - if (parser.parseOptionalAttrDict(result.attributes)) - return ::mlir::failure(); - if (parser.parseColon()) - return ::mlir::failure(); - - ::mlir::FunctionType opsFnTy; - if (parser.parseType(opsFnTy)) - return ::mlir::failure(); - operandsTypes = opsFnTy.getInputs(); - allResultTypes = opsFnTy.getResults(); - result.addTypes(allResultTypes); - if (parser.resolveOperands(ops, operandsTypes, opsLoc, result.operands)) return ::mlir::failure(); diff --git a/clang/test/CIR/IR/call.cir b/clang/test/CIR/IR/call.cir index 2ed1fa062868..cb0dea099e5b 100644 --- a/clang/test/CIR/IR/call.cir +++ b/clang/test/CIR/IR/call.cir @@ -3,11 +3,22 @@ !s32i = !cir.int !fnptr = !cir.ptr)>> +#fn_attr = #cir, optnone = #cir.optnone})> +#fn_attr1 = #cir + module { + // Excerpt of std::array::operator[](unsigned long) + cir.func linkonce_odr @_ZNSt5arrayIiLm8192EEixEm(%arg0: !s32i) -> !s32i extra(#fn_attr) { + cir.return %arg0 : !s32i + } + cir.func @ind(%fnptr: !fnptr, %a : !s32i) { %r = cir.call %fnptr(%a) : (!fnptr, !s32i) -> !s32i + // Check parse->pretty-print round-trip on extra() attribute + %7 = cir.call @_ZNSt5arrayIiLm8192EEixEm(%a) : (!s32i) -> !s32i extra(#fn_attr1) cir.return } } // CHECK: %0 = cir.call %arg0(%arg1) : (!cir.ptr)>>, !s32i) -> !s32i +// CHECK: %1 = cir.call @_ZNSt5arrayIiLm8192EEixEm(%arg1) : (!s32i) -> !s32i extra(#fn_attr1) From bff6efbafda6ae77966f6c317b0a2c5ff04b12a6 Mon Sep 17 00:00:00 2001 From: 7mile Date: Sat, 14 Sep 2024 01:43:52 +0800 Subject: [PATCH 1826/2301] [CIR][CodeGen][LowerToLLVM] Set calling convention for call ops (#836) This PR implements the CIRGen and Lowering part of calling convention attribute of `cir.call`-like operations. Here we have **4 kinds of operations**: (direct or indirect) x (`call` or `try_call`). According to our need and feasibility of constructing a test case, this PR includes: * For CIRGen, only direct `call`. Until now, the only extra calling conventions are SPIR ones, which cannot be set from source code manually using attributes. Meanwhile, OpenCL C *does not allow* function pointers or exceptions, therefore the only case remaining is direct call. * For Lowering, direct and indirect `call`, but not any `try_call`. Although it's possible to write all 4 kinds of calls with calling convention in ClangIR assembly, exceptions is quite hard to write and read. I prefer source-code-level test for it when it's available in the future. For example, possibly C++ `thiscall` with exceptions. * Extra: the verification of calling convention consistency for direct `call` and direct `try_call`. All unsupported cases are guarded by assertions or MLIR diags. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 19 +++++++----- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 ++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 29 ++++++++++++++----- .../CIR/CodeGen/OpenCL/spir-calling-conv.cl | 4 +++ clang/test/CIR/IR/invalid.cir | 15 ++++++++++ clang/test/CIR/Lowering/call-op-call-conv.cir | 22 ++++++++++++++ 6 files changed, 80 insertions(+), 15 deletions(-) create mode 100644 clang/test/CIR/Lowering/call-op-call-conv.cir diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 656c80ce6f46..9acd226aafc6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -447,7 +447,7 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, mlir::cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal, mlir::cir::FuncOp directFuncOp, SmallVectorImpl &CIRCallArgs, - mlir::Operation *InvokeDest, + mlir::Operation *InvokeDest, mlir::cir::CallingConv callingConv, mlir::cir::ExtraFuncAttributesAttr extraFnAttrs) { auto &builder = CGF.getBuilder(); @@ -468,6 +468,8 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, } mlir::cir::CallOp tryCallOp; + // TODO(cir): Set calling convention for `cir.try_call`. + assert(callingConv == mlir::cir::CallingConv::C && "NYI"); if (indirectFuncTy) { tryCallOp = builder.createIndirectTryCallOp(callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs); @@ -484,12 +486,15 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, } assert(builder.getInsertionBlock() && "expected valid basic block"); - if (indirectFuncTy) + if (indirectFuncTy) { + // TODO(cir): Set calling convention for indirect calls. + assert(callingConv == mlir::cir::CallingConv::C && "NYI"); return builder.createIndirectCallOp( callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs, mlir::cir::CallingConv::C, extraFnAttrs); - return builder.createCallOp(callLoc, directFuncOp, CIRCallArgs, - mlir::cir::CallingConv::C, extraFnAttrs); + } + return builder.createCallOp(callLoc, directFuncOp, CIRCallArgs, callingConv, + extraFnAttrs); } RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, @@ -765,9 +770,9 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, auto extraFnAttrs = mlir::cir::ExtraFuncAttributesAttr::get( builder.getContext(), Attrs.getDictionary(builder.getContext())); - mlir::cir::CIRCallOpInterface callLikeOp = - buildCallLikeOp(*this, callLoc, indirectFuncTy, indirectFuncVal, - directFuncOp, CIRCallArgs, InvokeDest, extraFnAttrs); + mlir::cir::CIRCallOpInterface callLikeOp = buildCallLikeOp( + *this, callLoc, indirectFuncTy, indirectFuncVal, directFuncOp, + CIRCallArgs, InvokeDest, callingConv, extraFnAttrs); if (E) callLikeOp->setAttr( diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 81284af0aeae..8b7769d62b5d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2704,6 +2704,12 @@ verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { << op->getOperand(i).getType() << " for operand number " << i; } + // Calling convention must match. + if (callIf.getCallingConv() != fn.getCallingConv()) + return op->emitOpError("calling convention mismatch: expected ") + << stringifyCallingConv(fn.getCallingConv()) << ", but provided " + << stringifyCallingConv(callIf.getCallingConv()); + // Void function must not return any results. if (fnType.isVoid() && op->getNumResults() != 0) return op->emitOpError("callee returns void but call has results"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 545d0f1a7603..a92bdcebd794 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -875,18 +875,24 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, mlir::Block *landingPadBlock = nullptr) { llvm::SmallVector llvmResults; auto cirResults = op->getResultTypes(); + auto callIf = cast(op); if (converter->convertTypes(cirResults, llvmResults).failed()) return mlir::failure(); + auto cconv = convertCallingConv(callIf.getCallingConv()); + if (calleeAttr) { // direct call - if (landingPadBlock) - rewriter.replaceOpWithNewOp( + if (landingPadBlock) { + auto newOp = rewriter.replaceOpWithNewOp( op, llvmResults, calleeAttr, callOperands, continueBlock, mlir::ValueRange{}, landingPadBlock, mlir::ValueRange{}); - else - rewriter.replaceOpWithNewOp(op, llvmResults, - calleeAttr, callOperands); + newOp.setCConv(cconv); + } else { + auto newOp = rewriter.replaceOpWithNewOp( + op, llvmResults, calleeAttr, callOperands); + newOp.setCConv(cconv); + } } else { // indirect call assert(op->getOperands().size() && "operands list must no be empty for the indirect call"); @@ -899,14 +905,17 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, if (landingPadBlock) { auto llvmFnTy = dyn_cast(converter->convertType(ftyp)); - rewriter.replaceOpWithNewOp( + auto newOp = rewriter.replaceOpWithNewOp( op, llvmFnTy, mlir::FlatSymbolRefAttr{}, callOperands, continueBlock, mlir::ValueRange{}, landingPadBlock, mlir::ValueRange{}); - } else - rewriter.replaceOpWithNewOp( + newOp.setCConv(cconv); + } else { + auto newOp = rewriter.replaceOpWithNewOp( op, dyn_cast(converter->convertType(ftyp)), callOperands); + newOp.setCConv(cconv); + } } return mlir::success(); } @@ -932,6 +941,10 @@ class CIRTryCallLowering mlir::LogicalResult matchAndRewrite(mlir::cir::TryCallOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { + if (op.getCallingConv() != mlir::cir::CallingConv::C) { + return op.emitError( + "non-C calling convention is not implemented for try_call"); + } return rewriteToCallOrInvoke( op.getOperation(), adaptor.getOperands(), rewriter, getTypeConverter(), op.getCalleeAttr(), op.getCont(), op.getLandingPad()); diff --git a/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl b/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl index 96550f721bf5..bf711bec7d46 100644 --- a/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl +++ b/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl @@ -15,6 +15,10 @@ kernel void bar(global int *A); // LLVM-DAG: define{{.*}} spir_kernel void @foo( kernel void foo(global int *A) { int id = get_dummy_id(0); + // CIR: %{{[0-9]+}} = cir.call @get_dummy_id(%2) : (!s32i) -> !s32i cc(spir_function) + // LLVM: %{{[a-z0-9_]+}} = call spir_func i32 @get_dummy_id( A[id] = id; bar(A); + // CIR: cir.call @bar(%8) : (!cir.ptr) -> () cc(spir_kernel) + // LLVM: call spir_kernel void @bar(ptr addrspace(1) } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 6a7eadcf0338..a28569ac0b46 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1304,7 +1304,22 @@ module { !s32i = !cir.int module { + cir.func @subroutine() cc(spir_function) { + cir.return + } + + cir.func @call_conv_match() { + // expected-error@+1 {{'cir.call' op calling convention mismatch: expected spir_function, but provided spir_kernel}} + cir.call @subroutine(): () -> !cir.void cc(spir_kernel) + cir.return + } +} +// ----- + +!s32i = !cir.int + +module { cir.func @test_bitcast_addrspace() { %0 = cir.alloca !s32i, !cir.ptr, ["tmp"] {alignment = 4 : i64} // expected-error@+1 {{'cir.cast' op result type address space does not match the address space of the operand}} diff --git a/clang/test/CIR/Lowering/call-op-call-conv.cir b/clang/test/CIR/Lowering/call-op-call-conv.cir new file mode 100644 index 000000000000..837cc4b82ab9 --- /dev/null +++ b/clang/test/CIR/Lowering/call-op-call-conv.cir @@ -0,0 +1,22 @@ +// RUN: cir-translate -cir-to-llvmir %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM + +!s32i = !cir.int +!fnptr = !cir.ptr> + +module { + cir.func private @my_add(%a: !s32i, %b: !s32i) -> !s32i cc(spir_function) + + cir.func @ind(%fnptr: !fnptr, %a : !s32i) { + %1 = cir.call %fnptr(%a) : (!fnptr, !s32i) -> !s32i cc(spir_kernel) + // LLVM: %{{[0-9]+}} = call spir_kernel i32 %{{[0-9]+}}(i32 %{{[0-9]+}}) + + %2 = cir.call %fnptr(%a) : (!fnptr, !s32i) -> !s32i cc(spir_function) + // LLVM: %{{[0-9]+}} = call spir_func i32 %{{[0-9]+}}(i32 %{{[0-9]+}}) + + %3 = cir.call @my_add(%1, %2) : (!s32i, !s32i) -> !s32i cc(spir_function) + // LLVM: %{{[0-9]+}} = call spir_func i32 @my_add(i32 %{{[0-9]+}}, i32 %{{[0-9]+}}) + + cir.return + } +} From 299b825423ce8945efb5055b6a5a3d6abe73f3bf Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Fri, 13 Sep 2024 20:48:19 +0300 Subject: [PATCH 1827/2301] [CIR][Lowering] Fix static array lowering (#838) Consider the following code snippet `test.c`: ``` int test(int x) { static int arr[10] = {0, 1, 0, 0}; return arr[x]; } ``` When lowering from CIR to LLVM using `bin/clang test.c -Xclang -fclangir -Xclang -emit-llvm -S -o -` It produces: ``` clangir/mlir/lib/IR/BuiltinAttributes.cpp:1015: static mlir::DenseElementsAttr mlir::DenseElementsAttr::get(mlir::ShapedType, llvm::ArrayRef): Assertion `hasSameElementsOrSplat(type, values)' failed. ``` I traced the bug back to `Lowering/LoweringHelpers.cpp` where we fill trailing zeros, and I believe this PR does it the right way. I have also added a very simple test for verification. --- clang/lib/CIR/Lowering/LoweringHelpers.cpp | 2 +- clang/test/CIR/Lowering/static-array.c | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/static-array.c diff --git a/clang/lib/CIR/Lowering/LoweringHelpers.cpp b/clang/lib/CIR/Lowering/LoweringHelpers.cpp index 26b2af82ca19..debe7881d0c4 100644 --- a/clang/lib/CIR/Lowering/LoweringHelpers.cpp +++ b/clang/lib/CIR/Lowering/LoweringHelpers.cpp @@ -91,7 +91,7 @@ void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, auto nestTy = localArrayTy.getEltType(); if (!mlir::isa(nestTy)) - values.insert(values.end(), localArrayTy.getSize() - numTrailingZeros, + values.insert(values.end(), numTrailingZeros, getZeroInitFromType(nestTy)); } } diff --git a/clang/test/CIR/Lowering/static-array.c b/clang/test/CIR/Lowering/static-array.c new file mode 100644 index 000000000000..60cfce0245d6 --- /dev/null +++ b/clang/test/CIR/Lowering/static-array.c @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +int test(int x) { + static int arr[10] = {0, 1, 0, 0}; + return arr[x]; +} +// LLVM: internal global [10 x i32] [i32 0, i32 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0] \ No newline at end of file From b2b089fa7fb840e8c01b72a972e5a5e5e943c658 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 13 Sep 2024 17:58:00 -0400 Subject: [PATCH 1828/2301] [CIR][CIRGen][Lowering] Add support for attribute annotate (#804) The main purpose of this PR is to add support for C/C++ attribute annotate. The PR involves both CIR generation and Lowering Prepare. In the rest of this description, we first introduce the concept of attribute annotate, then talk about expectations of LLVM regarding annotation, after it, we describe how ClangIR handles it in this PR. Finally, we list trivial differences between LLVM code generated by clang codegen and ClangIR codegen. **The concept of attribute annotate. and expected LLVM IR** the following is C code example of annotation. say in example.c `int *b __attribute__((annotate("withargs", "21", 12 ))); int *a __attribute__((annotate("oneargs", "21", ))); int *c __attribute__((annotate("noargs"))); ` here "withargs" is the annotation string, "21" and 12 are arguments for this annotation named "withargs". LLVM-based compiler is expected keep these information and build a global variable capturing all annotations used in the translation unit when emitting into LLVM IR. This global variable itself is **not** constant, but will be initialized with constants that are related to annotation representation, e.g. "withargs" should be literal string variable in IR. This global variable has a fixed name "llvm.global.annotations", and its of array of struct type, and should be initialized with a const array of const structs, each const struct is a representation of an annotation site, which has 5-field. [ptr to global var/func annotated, ptr to translation unit string const, line_no, annotation_name, ptr to arguments const] annotation name string and args constants, as well as this global var should be in section "llvm.metadata". e.g. In the above example, We shall have following in the generated LLVM IR like the following ``` @b = global ptr null, align 8 @.str = private unnamed_addr constant [9 x i8] c"withargs\00", section "llvm.metadata" @.str.1 = private unnamed_addr constant [10 x i8] c"example.c\00", section "llvm.metadata" @.str.2 = private unnamed_addr constant [3 x i8] c"21\00", align 1 @.args = private unnamed_addr constant { ptr, i32 } { ptr @.str.2, i32 12 }, section "llvm.metadata" @a = global ptr null, align 8 @.str.3 = private unnamed_addr constant [8 x i8] c"oneargs\00", section "llvm.metadata" @.args.4 = private unnamed_addr constant { ptr } { ptr @.str.2 }, section "llvm.metadata" @c = global ptr null, align 8 @.str.5 = private unnamed_addr constant [7 x i8] c"noargs\00", section "llvm.metadata" @llvm.global.annotations = appending global [3 x { ptr, ptr, ptr, i32, ptr }] [{ ptr, ptr, ptr, i32, ptr } { ptr @b, ptr @.str, ptr @.str.1, i32 1, ptr @.args }, { ptr, ptr, ptr, i32, ptr } { ptr @a, ptr @.str.3, ptr @.str.1, i32 2, ptr @.args.4 }, { ptr, ptr, ptr, i32, ptr } { ptr @c, ptr @.str.5, ptr @.str.1, i32 3, ptr null }], section "llvm.metadata" ``` notice that since variable c's annotation has no arg, the last field of its corresponding annotation entry is a nullptr. **ClangIR's handling of annotations** In CIR, we introduce AnnotationAttr to GlobalOp and FuncOp to record its annotations. That way, we are able to make fast query about annotation if in future a CIR pass is interested in them. We leave the work of generating const variables as well as global annotations' var to LLVM lowering. But at LoweringPrepare we collect all annotations and create a module attribute "cir.global_annotations" so to facilitate LLVM lowering. **Some implementation details and trivial differences between clangir generated LLVM code and vanilla LLVM code** 1. I suffix names of constants generated for annotation purpose with ".annotation" to avoid redefinition, but clang codegen doesn't do it. 3. clang codegen seems to visit FuncDecls in slightly different orders than CIR, thus, sometimes the order of elements of the initial value const array for llvm.global.annotations var is different from clang generated LLVMIR, it should be trivial, as I don't expect consumer of this var is assuming a fixed order of collecting annotations. Otherwise, clang codegen and clangir pretty much generate same LLVM IR for annotations! --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 71 +++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 7 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 79 +++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 34 +++ clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 40 +++ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 15 +- .../Dialect/Transforms/LoweringPrepare.cpp | 37 +++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 245 +++++++++++++++++- .../CodeGen/attribute-annotate-multiple.cpp | 71 +++++ clang/test/CIR/IR/annotations.cir | 31 +++ clang/test/CIR/IR/invalid-annotations.cir | 32 +++ 11 files changed, 654 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp create mode 100644 clang/test/CIR/IR/annotations.cir create mode 100644 clang/test/CIR/IR/invalid-annotations.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 75180131adb3..59343514b645 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -1101,6 +1101,77 @@ def BitfieldInfoAttr : CIR_Attr<"BitfieldInfo", "bitfield_info"> { ]; } +//===----------------------------------------------------------------------===// +// AnnotationAttr +//===----------------------------------------------------------------------===// + +def AnnotationAttr : CIR_Attr<"Annotation", "annotation"> { + let summary = "Annotation attribute for global variables and functions"; + let description = [{ + Represent C/C++ attribute of annotate in CIR. + Example C code: + ``` + int *a __attribute__((annotate("testptr", "21", 12 ))); + ``` + In this example code, the `AnnotationAttr` has annotation name "testptr", + and arguments "21" and 12 constitutes an `ArrayAttr` type parameter `args` + for global variable `a`. + In CIR, the attribute for above annotation looks like: + ``` + [#cir.annotation] + ``` + }]; + + // The parameter args is empty when there is no arg. + let parameters = (ins "StringAttr":$name, + "ArrayAttr":$args); + + let assemblyFormat = "`<` struct($name, $args) `>`"; + + let extraClassDeclaration = [{ + bool isNoArgs() const { return getArgs().empty(); }; + }]; +} + +//===----------------------------------------------------------------------===// +// GlobalAnnotationValuesAttr +//===----------------------------------------------------------------------===// + +def GlobalAnnotationValuesAttr : CIR_Attr<"GlobalAnnotationValues", + "global_annotations"> { + let summary = "Array of annotations, each element consists of name of" + "a global var or func and one of its annotations"; + let description = [{ + This is annotation value array, which holds the annotation + values for all global variables and functions in a module. + This array is used to create the initial value of a global annotation + metadata variable in LLVM IR. + Example C code: + ``` + double *a __attribute__((annotate("withargs", "21", 12 ))); + int *b __attribute__((annotate("withargs", "21", 12 ))); + void *c __attribute__((annotate("noargvar"))); + void foo(int i) __attribute__((annotate("noargfunc"))) {} + ``` + After CIR lowering prepare pass, compiler generates a + `GlobalAnnotationValuesAttr` like the following: + ``` + #cir], + ["b", #cir.annotation], + ["c", #cir.annotation], + ["foo", #cir.annotation]]> + ``` + }]; + + let parameters = (ins "ArrayAttr":$annotations); + + let assemblyFormat = [{ $annotations }]; + + // Enable verifier. + let genVerifyDecl = 1; +} + include "clang/CIR/Dialect/IR/CIROpenCLAttrs.td" #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 4f0f8cc3ce0a..c0f3bce2b65a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2205,7 +2205,7 @@ def TLSModel : I32EnumAttr< [TLS_GeneralDynamic, TLS_LocalDynamic, TLS_InitialExec, TLS_LocalExec]> { let cppNamespace = "::mlir::cir"; } - + def GlobalOp : CIR_Op<"global", [DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, @@ -2255,7 +2255,8 @@ def GlobalOp : CIR_Op<"global", UnitAttr:$dsolocal, OptionalAttr:$alignment, OptionalAttr:$ast, - OptionalAttr:$section); + OptionalAttr:$section, + OptionalAttr:$annotations); let regions = (region AnyRegion:$ctorRegion, AnyRegion:$dtorRegion); let assemblyFormat = [{ ($sym_visibility^)? @@ -2268,6 +2269,7 @@ def GlobalOp : CIR_Op<"global", (`addrspace` `(` custom($addr_space)^ `)`)? $sym_name custom($sym_type, $initial_value, $ctorRegion, $dtorRegion) + ($annotations^)? attr-dict }]; @@ -3100,6 +3102,7 @@ def FuncOp : CIR_Op<"func", [ OptionalAttr:$aliasee, OptionalAttr:$global_ctor, OptionalAttr:$global_dtor, + OptionalAttr:$annotations, OptionalAttr:$ast); let regions = (region AnyRegion:$body); let skipDefaultBuilders = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 2bbd5e0fe796..b5578fbd6d02 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -484,6 +484,13 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { // Ignore declarations, they will be emitted on their first use. if (const auto *FD = dyn_cast(Global)) { + // Update deferred annotations with the latest declaration if the function + // was already used or defined. + if (FD->hasAttr()) { + StringRef MangledName = getMangledName(GD); + if (getGlobalValue(MangledName)) + deferredAnnotations[MangledName] = FD; + } // Forward declarations are emitted lazily on first use. if (!FD->doesThisDeclarationHaveABody()) { if (!FD->doesDeclarationForceExternallyVisibleDefinition()) @@ -595,7 +602,8 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, if (const DestructorAttr *DA = D->getAttr()) AddGlobalDtor(Fn, DA->getPriority(), true); - assert(!D->getAttr() && "NYI"); + if (D->getAttr()) + deferredAnnotations[getMangledName(GD)] = cast(D); } /// Track functions to be called before main() runs. @@ -1232,7 +1240,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, maybeHandleStaticInExternC(D, GV); if (D->hasAttr()) - assert(0 && "not implemented"); + addGlobalAnnotations(D, GV); // Set CIR's linkage type as appropriate. mlir::cir::GlobalLinkageKind Linkage = @@ -2834,7 +2842,7 @@ void CIRGenModule::Release() { // TODO: PGOReader // TODO: buildCtorList(GlobalCtors); // TODO: builtCtorList(GlobalDtors); - // TODO: buildGlobalAnnotations(); + buildGlobalAnnotations(); // TODO: buildDeferredUnusedCoverageMappings(); // TODO: CIRGenPGO // TODO: CoverageMapping @@ -3188,3 +3196,68 @@ LangAS CIRGenModule::getGlobalVarAddressSpace(const VarDecl *D) { return getTargetCIRGenInfo().getGlobalVarAddressSpace(*this, D); } + +mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(AnnotateAttr *attr) { + ArrayRef exprs = {attr->args_begin(), attr->args_size()}; + if (exprs.empty()) { + return mlir::ArrayAttr::get(builder.getContext(), {}); + } + llvm::FoldingSetNodeID id; + for (Expr *e : exprs) { + id.Add(cast(e)->getAPValueResult()); + } + mlir::ArrayAttr &lookup = annotationArgs[id.ComputeHash()]; + if (lookup) + return lookup; + + llvm::SmallVector args; + args.reserve(exprs.size()); + for (Expr *e : exprs) { + if (auto *const strE = + ::clang::dyn_cast(e->IgnoreParenCasts())) { + // Add trailing null character as StringLiteral->getString() does not + args.push_back(builder.getStringAttr(strE->getString())); + } else if (auto *const intE = ::clang::dyn_cast( + e->IgnoreParenCasts())) { + args.push_back(mlir::IntegerAttr::get( + mlir::IntegerType::get(builder.getContext(), + intE->getValue().getBitWidth()), + intE->getValue())); + } else { + llvm_unreachable("NYI"); + } + } + + lookup = builder.getArrayAttr(args); + return lookup; +} + +mlir::cir::AnnotationAttr +CIRGenModule::buildAnnotateAttr(clang::AnnotateAttr *aa) { + mlir::StringAttr annoGV = builder.getStringAttr(aa->getAnnotation()); + mlir::ArrayAttr args = buildAnnotationArgs(aa); + return mlir::cir::AnnotationAttr::get(builder.getContext(), annoGV, args); +} + +void CIRGenModule::addGlobalAnnotations(const ValueDecl *d, + mlir::Operation *gv) { + assert(d->hasAttr() && "no annotate attribute"); + assert((isa(gv) || isa(gv)) && + "annotation only on globals"); + llvm::SmallVector annotations; + for (auto *i : d->specific_attrs()) + annotations.push_back(buildAnnotateAttr(i)); + if (auto global = dyn_cast(gv)) + global.setAnnotationsAttr(builder.getArrayAttr(annotations)); + else if (auto func = dyn_cast(gv)) + func.setAnnotationsAttr(builder.getArrayAttr(annotations)); +} + +void CIRGenModule::buildGlobalAnnotations() { + for (const auto &[mangledName, vd] : deferredAnnotations) { + mlir::Operation *gv = getGlobalValue(mangledName); + if (gv) + addGlobalAnnotations(vd, gv); + } + deferredAnnotations.clear(); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 55d609c93299..4ec8950a5d33 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -127,6 +127,22 @@ class CIRGenModule : public CIRGenTypeCache { /// for the same decl. llvm::DenseSet DiagnosedConflictingDefinitions; + /// ------- + /// Annotations + /// ------- + + /// We do not store global annotations in the module here, instead, we store + /// each annotation as attribute of GlobalOp and FuncOp. + /// We defer creation of global annotation variable to LoweringPrepare + /// as CIR passes do not need to have a global view of all annotations. + + /// Used for uniquing of annotation arguments. + llvm::DenseMap annotationArgs; + + /// Store deferred function annotations so they can be emitted at the end with + /// most up to date ValueDecl that will have all the inherited annotations. + llvm::DenseMap deferredAnnotations; + public: mlir::ModuleOp getModule() const { return theModule; } CIRGenBuilderTy &getBuilder() { return builder; } @@ -761,6 +777,24 @@ class CIRGenModule : public CIRGenTypeCache { void setNonAliasAttributes(GlobalDecl GD, mlir::Operation *GV); /// Map source language used to a CIR attribute. mlir::cir::SourceLanguage getCIRSourceLanguage(); + + /// Emit all the global annotations. + /// This actually only emits annotations for deffered declarations of + /// functions, because global variables need no deffred emission. + void buildGlobalAnnotations(); + + /// Emit additional args of the annotation. + mlir::ArrayAttr buildAnnotationArgs(clang::AnnotateAttr *attr); + + /// Create cir::AnnotationAttr which contains the annotation + /// information for a given GlobalValue. Notice that a GlobalValue could + /// have multiple annotations, and this function creates attribute for + /// one of them. + mlir::cir::AnnotationAttr buildAnnotateAttr(clang::AnnotateAttr *aa); + + /// Add global annotations for a global value. + /// Those annotations are emitted during lowering to the LLVM code. + void addGlobalAnnotations(const ValueDecl *d, mlir::Operation *gv); }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index d11ebb1c0c2f..c4bbd5390333 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -547,6 +547,46 @@ void MethodAttr::print(AsmPrinter &printer) const { printer << '>'; } +//===----------------------------------------------------------------------===// +// GlobalAnnotationValuesAttr definitions +//===----------------------------------------------------------------------===// + +LogicalResult GlobalAnnotationValuesAttr::verify( + function_ref<::mlir::InFlightDiagnostic()> emitError, + mlir::ArrayAttr annotations) { + if (annotations.empty()) { + emitError() + << "GlobalAnnotationValuesAttr should at least have one annotation"; + return failure(); + } + for (auto &entry : annotations) { + auto annoEntry = ::mlir::dyn_cast(entry); + if (!annoEntry) { + emitError() << "Element of GlobalAnnotationValuesAttr annotations array" + " must be an array"; + return failure(); + } else if (annoEntry.size() != 2) { + emitError() << "Element of GlobalAnnotationValuesAttr annotations array" + << " must be a 2-element array and you have " + << annoEntry.size(); + return failure(); + } else if (!::mlir::isa(annoEntry[0])) { + emitError() << "Element of GlobalAnnotationValuesAttr annotations" + "array must start with a string, which is the name of " + "global op or func it annotates"; + return failure(); + } + auto annoPart = ::mlir::dyn_cast(annoEntry[1]); + if (!annoPart) { + emitError() << "The second element of GlobalAnnotationValuesAttr" + "annotations array element must be of " + "type AnnotationValueAttr"; + return failure(); + } + } + return success(); +} + //===----------------------------------------------------------------------===// // DynamicCastInfoAtttr definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 8b7769d62b5d..9897b112a332 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -717,7 +717,7 @@ bool isIntOrBoolCast(mlir::cir::CastOp op) { Value tryFoldCastChain(CastOp op) { CastOp head = op, tail = op; - while(op) { + while (op) { if (!isIntOrBoolCast(op)) break; head = op; @@ -2259,6 +2259,7 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { auto noProtoNameAttr = getNoProtoAttrName(state.name); auto visibilityNameAttr = getGlobalVisibilityAttrName(state.name); auto dsolocalNameAttr = getDsolocalAttrName(state.name); + auto annotationsNameAttr = getAnnotationsAttrName(state.name); if (::mlir::succeeded(parser.parseOptionalKeyword(builtinNameAttr.strref()))) state.addAttribute(builtinNameAttr, parser.getBuilder().getUnitAttr()); if (::mlir::succeeded( @@ -2290,6 +2291,9 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { if (parser.parseOptionalKeyword(dsolocalNameAttr).succeeded()) state.addAttribute(dsolocalNameAttr, parser.getBuilder().getUnitAttr()); + if (parser.parseOptionalKeyword(annotationsNameAttr).succeeded()) + state.addAttribute(annotationsNameAttr, parser.getBuilder().getUnitAttr()); + StringAttr nameAttr; SmallVector arguments; SmallVector resultAttrs; @@ -2508,6 +2512,12 @@ void cir::FuncOp::print(OpAsmPrinter &p) { else function_interface_impl::printFunctionSignature( p, *this, fnType.getInputs(), fnType.isVarArg(), {}); + + if (mlir::ArrayAttr annotations = getAnnotationsAttr()) { + p << " "; + p.printAttribute(annotations); + } + function_interface_impl::printFunctionAttributes( p, *this, // These are all omitted since they are custom printed already. @@ -2517,7 +2527,8 @@ void cir::FuncOp::print(OpAsmPrinter &p) { getGlobalDtorAttrName(), getLambdaAttrName(), getLinkageAttrName(), getCallingConvAttrName(), getNoProtoAttrName(), getSymVisibilityAttrName(), getArgAttrsAttrName(), getResAttrsAttrName(), - getComdatAttrName(), getGlobalVisibilityAttrName()}); + getComdatAttrName(), getGlobalVisibilityAttrName(), + getAnnotationsAttrName()}); if (auto aliaseeName = getAliasee()) { p << " alias("; diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 160c0de9b98d..ba19c6ec4069 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -85,6 +85,9 @@ struct LoweringPreparePass : public LoweringPrepareBase { void lowerArrayDtor(ArrayDtor op); void lowerArrayCtor(ArrayCtor op); + /// Collect annotations of global values in the module + void addGlobalAnnotations(mlir::Operation *op, mlir::ArrayAttr annotations); + /// Build the function that initializes the specified global FuncOp buildCXXGlobalVarDeclInitFunc(GlobalOp op); @@ -94,6 +97,9 @@ struct LoweringPreparePass : public LoweringPrepareBase { /// Materialize global ctor/dtor list void buildGlobalCtorDtorList(); + /// Build attribute of global annotation values + void buildGlobalAnnotationValues(); + FuncOp buildRuntimeFunction(mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, mlir::cir::FuncType type, @@ -149,6 +155,8 @@ struct LoweringPreparePass : public LoweringPrepareBase { SmallVector globalCtorList; /// List of dtors to be called when unloading module. SmallVector globalDtorList; + /// List of annotations in the module + SmallVector globalAnnotations; }; } // namespace @@ -878,6 +886,11 @@ void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { "custom initialization priority NYI"); dynamicInitializers.push_back(f); } + + std::optional annotations = op.getAnnotations(); + if (annotations) { + addGlobalAnnotations(op, annotations.value()); + } } void LoweringPreparePass::buildGlobalCtorDtorList() { @@ -1061,6 +1074,27 @@ void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { op.erase(); } +void LoweringPreparePass::addGlobalAnnotations(mlir::Operation *op, + mlir::ArrayAttr annotations) { + auto globalValue = cast(op); + mlir::StringAttr globalValueName = globalValue.getNameAttr(); + for (auto &annot : annotations) { + SmallVector entryArray = {globalValueName, annot}; + globalAnnotations.push_back( + mlir::ArrayAttr::get(theModule.getContext(), entryArray)); + } +} + +void LoweringPreparePass::buildGlobalAnnotationValues() { + if (globalAnnotations.empty()) + return; + mlir::ArrayAttr annotationValueArray = + mlir::ArrayAttr::get(theModule.getContext(), globalAnnotations); + theModule->setAttr("cir.global_annotations", + mlir::cir::GlobalAnnotationValuesAttr::get( + theModule.getContext(), annotationValueArray)); +} + void LoweringPreparePass::runOnOp(Operation *op) { if (auto unary = dyn_cast(op)) { lowerUnaryOp(unary); @@ -1094,6 +1128,8 @@ void LoweringPreparePass::runOnOp(Operation *op) { } else if (auto globalDtor = fnOp.getGlobalDtorAttr()) { globalDtorList.push_back(globalDtor); } + if (std::optional annotations = fnOp.getAnnotations()) + addGlobalAnnotations(fnOp, annotations.value()); } } @@ -1118,6 +1154,7 @@ void LoweringPreparePass::runOnOperation() { buildCXXGlobalInitFunc(); buildGlobalCtorDtorList(); + buildGlobalAnnotationValues(); } std::unique_ptr mlir::createLoweringPreparePass() { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index a92bdcebd794..8fe63f89f7aa 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -863,7 +863,10 @@ struct ConvertCIRToLLVMPass } void runOnOperation() final; + void buildGlobalAnnotationsVar(); + virtual StringRef getArgument() const override { return "cir-flat-to-llvm"; } + static constexpr StringRef annotationSection = "llvm.metadata"; }; mlir::LogicalResult @@ -2042,7 +2045,7 @@ class CIRGlobalOpLowering // Rewrite op. auto llvmGlobalOp = rewriter.replaceOpWithNewOp( op, llvmType, isConst, linkage, symbol, init.value(), - /*alignment*/op.getAlignment().value_or(0), + /*alignment*/ op.getAlignment().value_or(0), /*addrSpace*/ getGlobalOpTargetAddrSpace(op), /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); @@ -4058,6 +4061,245 @@ void collect_unreachable(mlir::Operation *parent, } } +// Create a string global for annotation related string. +mlir::LLVM::GlobalOp +getAnnotationStringGlobal(mlir::StringAttr strAttr, mlir::ModuleOp &module, + llvm::StringMap &globalsMap, + mlir::OpBuilder &globalVarBuilder, + mlir::Location &loc, bool isArg = false) { + llvm::StringRef str = strAttr.getValue(); + if (!globalsMap.contains(str)) { + auto llvmStrTy = mlir::LLVM::LLVMArrayType::get( + mlir::IntegerType::get(module.getContext(), 8), str.size() + 1); + auto strGlobalOp = globalVarBuilder.create( + loc, llvmStrTy, + /*isConstant=*/true, mlir::LLVM::Linkage::Private, + ".str" + + (globalsMap.empty() ? "" + : "." + std::to_string(globalsMap.size())) + + ".annotation" + (isArg ? ".arg" : ""), + mlir::StringAttr::get(module.getContext(), std::string(str) + '\0'), + /*alignment=*/isArg ? 1 : 0); + if (!isArg) + strGlobalOp.setSection(ConvertCIRToLLVMPass::annotationSection); + strGlobalOp.setUnnamedAddr(mlir::LLVM::UnnamedAddr::Global); + strGlobalOp.setDsoLocal(true); + globalsMap[str] = strGlobalOp; + } + return globalsMap[str]; +} + +mlir::Value lowerAnnotationValue( + mlir::ArrayAttr annotValue, mlir::ModuleOp &module, + mlir::OpBuilder &varInitBuilder, mlir::OpBuilder &globalVarBuilder, + llvm::StringMap &stringGlobalsMap, + llvm::StringMap &argStringGlobalsMap, + llvm::MapVector &argsVarMap, + llvm::SmallVector &annoStructFields, + mlir::LLVM::LLVMStructType &annoStructTy, + mlir::LLVM::LLVMPointerType &annoPtrTy, mlir::Location &loc) { + mlir::Value valueEntry = + varInitBuilder.create(loc, annoStructTy); + auto globalValueName = mlir::cast(annotValue[0]); + mlir::Operation *globalValue = + mlir::SymbolTable::lookupSymbolIn(module, globalValueName); + // The first field is ptr to the global value + auto globalValueFld = varInitBuilder.create( + loc, annoPtrTy, globalValueName); + + valueEntry = varInitBuilder.create( + loc, valueEntry, globalValueFld, 0); + mlir::cir::AnnotationAttr annotation = + mlir::cast(annotValue[1]); + + // The second field is ptr to the annotation name + mlir::StringAttr annotationName = annotation.getName(); + auto annotationNameFld = varInitBuilder.create( + loc, annoPtrTy, + getAnnotationStringGlobal(annotationName, module, stringGlobalsMap, + globalVarBuilder, loc) + .getSymName()); + + valueEntry = varInitBuilder.create( + loc, valueEntry, annotationNameFld, 1); + + // The third field is ptr to the translation unit name, + // and the fourth field is the line number + auto annotLoc = globalValue->getLoc(); + if (mlir::isa(annotLoc)) { + auto FusedLoc = mlir::cast(annotLoc); + annotLoc = FusedLoc.getLocations()[0]; + } + auto annotFileLoc = mlir::cast(annotLoc); + assert(annotFileLoc && "annotation value has to be FileLineColLoc"); + // To be consistent with clang code gen, we add trailing null char + auto fileName = mlir::StringAttr::get( + module.getContext(), std::string(annotFileLoc.getFilename().getValue())); + auto fileNameFld = varInitBuilder.create( + loc, annoPtrTy, + getAnnotationStringGlobal(fileName, module, stringGlobalsMap, + globalVarBuilder, loc) + .getSymName()); + valueEntry = varInitBuilder.create(loc, valueEntry, + fileNameFld, 2); + unsigned int lineNo = annotFileLoc.getLine(); + auto lineNoFld = varInitBuilder.create( + loc, annoStructFields[3], lineNo); + valueEntry = varInitBuilder.create(loc, valueEntry, + lineNoFld, 3); + // The fifth field is ptr to the annotation args var, it could be null + if (annotation.isNoArgs()) { + auto nullPtrFld = varInitBuilder.create(loc, annoPtrTy); + valueEntry = varInitBuilder.create( + loc, valueEntry, nullPtrFld, 4); + } else { + mlir::ArrayAttr argsAttr = annotation.getArgs(); + // First time we see this argsAttr, create a global for it + // and build its initializer + if (!argsVarMap.contains(argsAttr)) { + llvm::SmallVector argStrutFldTypes; + llvm::SmallVector argStrutFields; + for (mlir::Attribute arg : annotation.getArgs()) { + if (auto strArgAttr = mlir::dyn_cast(arg)) { + // Call getAnnotationStringGlobal here to make sure + // have a global for this string before + // creation of the args var. + getAnnotationStringGlobal(strArgAttr, module, argStringGlobalsMap, + globalVarBuilder, loc, true); + // This will become a ptr to the global string + argStrutFldTypes.push_back(annoPtrTy); + } else if (auto intArgAttr = mlir::dyn_cast(arg)) { + argStrutFldTypes.push_back(intArgAttr.getType()); + } else { + llvm_unreachable("Unsupported annotation arg type"); + } + } + + mlir::LLVM::LLVMStructType argsStructTy = + mlir::LLVM::LLVMStructType::getLiteral(globalVarBuilder.getContext(), + argStrutFldTypes); + auto argsGlobalOp = globalVarBuilder.create( + loc, argsStructTy, true, mlir::LLVM::Linkage::Private, + ".args" + + (argsVarMap.empty() ? "" + : "." + std::to_string(argsVarMap.size())) + + ".annotation", + mlir::Attribute()); + argsGlobalOp.setSection(ConvertCIRToLLVMPass::annotationSection); + argsGlobalOp.setUnnamedAddr(mlir::LLVM::UnnamedAddr::Global); + argsGlobalOp.setDsoLocal(true); + + // Create the initializer for this args global + argsGlobalOp.getRegion().push_back(new mlir::Block()); + mlir::OpBuilder argsInitBuilder(module.getContext()); + argsInitBuilder.setInsertionPointToEnd( + argsGlobalOp.getInitializerBlock()); + + mlir::Value argsStructInit = + argsInitBuilder.create(loc, argsStructTy); + int idx = 0; + for (mlir::Attribute arg : annotation.getArgs()) { + if (auto strArgAttr = mlir::dyn_cast(arg)) { + // This would be simply return with existing map entry value + // from argStringGlobalsMap as string global is already + // created in the previous loop. + mlir::LLVM::GlobalOp argStrVar = + getAnnotationStringGlobal(strArgAttr, module, argStringGlobalsMap, + globalVarBuilder, loc, true); + auto argStrVarAddr = argsInitBuilder.create( + loc, annoPtrTy, argStrVar.getSymName()); + argsStructInit = argsInitBuilder.create( + loc, argsStructInit, argStrVarAddr, idx++); + } else if (auto intArgAttr = mlir::dyn_cast(arg)) { + auto intArgFld = argsInitBuilder.create( + loc, intArgAttr.getType(), intArgAttr.getValue()); + argsStructInit = argsInitBuilder.create( + loc, argsStructInit, intArgFld, idx++); + } else { + llvm_unreachable("Unsupported annotation arg type"); + } + } + argsInitBuilder.create(loc, argsStructInit); + argsVarMap[argsAttr] = argsGlobalOp; + } + auto argsVarView = varInitBuilder.create( + loc, annoPtrTy, argsVarMap[argsAttr].getSymName()); + valueEntry = varInitBuilder.create( + loc, valueEntry, argsVarView, 4); + } + return valueEntry; +} + +void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar() { + mlir::ModuleOp module = getOperation(); + mlir::Attribute attr = module->getAttr("cir.global_annotations"); + if (!attr) + return; + if (auto globalAnnotValues = + mlir::dyn_cast(attr)) { + auto annotationValuesArray = + mlir::dyn_cast(globalAnnotValues.getAnnotations()); + if (!annotationValuesArray || annotationValuesArray.empty()) + return; + mlir::OpBuilder globalVarBuilder(module.getContext()); + globalVarBuilder.setInsertionPointToEnd(&module.getBodyRegion().front()); + + // Create a global array for annotation values with element type of + // struct { ptr, ptr, ptr, i32, ptr } + mlir::LLVM::LLVMPointerType annoPtrTy = + mlir::LLVM::LLVMPointerType::get(globalVarBuilder.getContext()); + llvm::SmallVector annoStructFields; + annoStructFields.push_back(annoPtrTy); + annoStructFields.push_back(annoPtrTy); + annoStructFields.push_back(annoPtrTy); + annoStructFields.push_back(globalVarBuilder.getI32Type()); + annoStructFields.push_back(annoPtrTy); + + mlir::LLVM::LLVMStructType annoStructTy = + mlir::LLVM::LLVMStructType::getLiteral(globalVarBuilder.getContext(), + annoStructFields); + mlir::LLVM::LLVMArrayType annoStructArrayTy = + mlir::LLVM::LLVMArrayType::get(annoStructTy, + annotationValuesArray.size()); + mlir::Location loc = module.getLoc(); + auto annotationGlobalOp = globalVarBuilder.create( + loc, annoStructArrayTy, false, mlir::LLVM::Linkage::Appending, + "llvm.global.annotations", mlir::Attribute()); + annotationGlobalOp.setSection("llvm.metadata"); + annotationGlobalOp.getRegion().push_back(new mlir::Block()); + mlir::OpBuilder varInitBuilder(module.getContext()); + varInitBuilder.setInsertionPointToEnd( + annotationGlobalOp.getInitializerBlock()); + // Globals created for annotation strings and args to be + // placed before the var llvm.global.annotations. + // This is consistent with clang code gen. + globalVarBuilder.setInsertionPoint(annotationGlobalOp); + + mlir::Value result = + varInitBuilder.create(loc, annoStructArrayTy); + // Track globals created for annotation related strings + llvm::StringMap stringGlobalsMap; + // Track globals created for annotation arg related strings. + // They are different from annotation strings, as strings used in args + // are not in annotationSection, and also has aligment 1. + llvm::StringMap argStringGlobalsMap; + // Track globals created for annotation args. + llvm::MapVector argsVarMap; + + int idx = 0; + for (mlir::Attribute entry : annotationValuesArray) { + auto annotValue = cast(entry); + mlir::Value init = lowerAnnotationValue( + annotValue, module, varInitBuilder, globalVarBuilder, + stringGlobalsMap, argStringGlobalsMap, argsVarMap, annoStructFields, + annoStructTy, annoPtrTy, loc); + result = varInitBuilder.create(loc, result, + init, idx++); + } + varInitBuilder.create(loc, result); + } +} + void ConvertCIRToLLVMPass::runOnOperation() { auto module = getOperation(); mlir::DataLayout dataLayout(module); @@ -4124,6 +4366,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { auto dtorAttr = mlir::cast(attr); return std::make_pair(dtorAttr.getName(), dtorAttr.getPriority()); }); + buildGlobalAnnotationsVar(); } std::unique_ptr createConvertCIRToLLVMPass() { diff --git a/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp b/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp new file mode 100644 index 000000000000..e67975bb9858 --- /dev/null +++ b/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp @@ -0,0 +1,71 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +double *a __attribute__((annotate("withargs", "21", 12 ))); +int *b __attribute__((annotate("withargs", "21", 12 ))); +void *c __attribute__((annotate("noargvar"))); +void foo(int i) __attribute__((annotate("noargfunc"))) { +} +// redeclare with more annotate +void foo(int i) __attribute__((annotate("withargfunc", "os", 23 ))); +void bar() __attribute__((annotate("withargfunc", "os", 22))) { +} + +// BEFORE: module @{{.*}}attribute-annotate-multiple.cpp" attributes {cir.lang = + +// BEFORE: cir.global external @a = #cir.ptr : !cir.ptr +// BEFORE-SAME: [#cir.annotation] +// BEFORE: cir.global external @b = #cir.ptr : !cir.ptr +// BEFORE-SAME: [#cir.annotation] +// BEFORE: cir.global external @c = #cir.ptr : !cir.ptr +// BEFORE-SAME: [#cir.annotation] + +// BEFORE: cir.func @_Z3fooi(%arg0: !s32i) [#cir.annotation, +// BEFORE-SAME: #cir.annotation] +// BEFORE: cir.func @_Z3barv() [#cir.annotation] + + +// AFTER: module {{.*}}attribute-annotate-multiple.cpp" attributes +// AFTER-SAME: {cir.global_annotations = #cir], +// AFTER-SAME: ["b", #cir.annotation], +// AFTER-SAME: ["c", #cir.annotation], +// AFTER-SAME: ["_Z3fooi", #cir.annotation], +// AFTER-SAME: ["_Z3fooi", #cir.annotation], +// AFTER-SAME: ["_Z3barv", #cir.annotation]]>, + + +// LLVM: @a = global ptr null +// LLVM: @b = global ptr null +// LLVM: @c = global ptr null +// LLVM: @.str.annotation = private unnamed_addr constant [9 x i8] c"withargs\00", section "llvm.metadata" +// LLVM: @.str.1.annotation = private unnamed_addr constant [{{[0-9]+}} x i8] c"{{.*}}attribute-annotate-multiple.cpp\00", section "llvm.metadata" +// LLVM: @.str.annotation.arg = private unnamed_addr constant [3 x i8] c"21\00", align 1 +// LLVM: @.args.annotation = private unnamed_addr constant { ptr, i32 } { ptr @.str.annotation.arg, i32 12 }, section "llvm.metadata" +// LLVM: @.str.2.annotation = private unnamed_addr constant [9 x i8] c"noargvar\00", section "llvm.metadata" +// LLVM: @.str.3.annotation = private unnamed_addr constant [10 x i8] c"noargfunc\00", section "llvm.metadata" +// LLVM: @.str.4.annotation = private unnamed_addr constant [12 x i8] c"withargfunc\00", section "llvm.metadata" +// LLVM: @.str.1.annotation.arg = private unnamed_addr constant [3 x i8] c"os\00", align 1 +// LLVM: @.args.1.annotation = private unnamed_addr constant { ptr, i32 } +// LLVM-SAME: { ptr @.str.1.annotation.arg, i32 23 }, section "llvm.metadata" +// LLVM: @.args.2.annotation = private unnamed_addr constant { ptr, i32 } +// LLVM-SAME: { ptr @.str.1.annotation.arg, i32 22 }, section "llvm.metadata" + +// LLVM: @llvm.global.annotations = appending global [6 x { ptr, ptr, ptr, i32, ptr }] +// LLVM-SAME: [{ ptr, ptr, ptr, i32, ptr } +// LLVM-SAME: { ptr @a, ptr @.str.annotation, ptr @.str.1.annotation, i32 5, ptr @.args.annotation }, +// LLVM-SAME: { ptr, ptr, ptr, i32, ptr } +// LLVM-SAME: { ptr @b, ptr @.str.annotation, ptr @.str.1.annotation, i32 6, ptr @.args.annotation }, +// LLVM-SAME: { ptr, ptr, ptr, i32, ptr } +// LLVM-SAME: { ptr @c, ptr @.str.2.annotation, ptr @.str.1.annotation, i32 7, ptr null }, +// LLVM-SAME: { ptr, ptr, ptr, i32, ptr } +// LLVM-SAME: { ptr @_Z3fooi, ptr @.str.3.annotation, ptr @.str.1.annotation, i32 8, ptr null }, +// LLVM-SAME: { ptr, ptr, ptr, i32, ptr } +// LLVM-SAME: { ptr @_Z3fooi, ptr @.str.4.annotation, ptr @.str.1.annotation, i32 8, ptr @.args.1.annotation }, +// LLVM-SAME: { ptr, ptr, ptr, i32, ptr } +// LLVM-SAME: { ptr @_Z3barv, ptr @.str.4.annotation, ptr @.str.1.annotation, i32 12, ptr @.args.2.annotation }], +// LLVM-SAME: section "llvm.metadata" + +// LLVM: define dso_local void @_Z3fooi(i32 %0) +// LLVM: define dso_local void @_Z3barv() diff --git a/clang/test/CIR/IR/annotations.cir b/clang/test/CIR/IR/annotations.cir new file mode 100644 index 000000000000..c1486e35aa71 --- /dev/null +++ b/clang/test/CIR/IR/annotations.cir @@ -0,0 +1,31 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +!s32i = !cir.int +module attributes {cir.global_annotations = +#cir], +["foo", #cir.annotation], +["bar", #cir.annotation], +["bar", #cir.annotation]]>} +{ +cir.global external @a = #cir.int<0> : !s32i [#cir.annotation] +cir.func @foo() attributes {annotations = [#cir.annotation]} { + cir.return +} +cir.func @bar() attributes {annotations = [#cir.annotation, #cir.annotation]} { + cir.return +} +} + +// CHECK: module attributes {cir.global_annotations = #cir], +// CHECK-SAME: ["foo", #cir.annotation], +// CHECK-SAME: ["bar", #cir.annotation], +// CHECK-SAME: ["bar", #cir.annotation]]>} +// CHECK: cir.global external @a = #cir.int<0> : !s32i +// CHECK-SAME: [#cir.annotation] +// CHECK: cir.func @foo() +// CHECK-SAME: [#cir.annotation] +// CHECK: cir.func @bar() +// CHECK-SAME: [#cir.annotation, +// CHECK-SAME: #cir.annotation] diff --git a/clang/test/CIR/IR/invalid-annotations.cir b/clang/test/CIR/IR/invalid-annotations.cir new file mode 100644 index 000000000000..d7de2d5c5602 --- /dev/null +++ b/clang/test/CIR/IR/invalid-annotations.cir @@ -0,0 +1,32 @@ +// Test attempt to construct ill-formed global annotations +// RUN: cir-opt %s -verify-diagnostics -split-input-file + + +// expected-error @below {{invalid kind of attribute specified}} +// expected-error @below {{failed to parse AnnotationAttr parameter 'name' which is to be a `StringAttr`}} +cir.global external @a = #cir.ptr : !cir.ptr [#cir.annotation] + +// ----- + +// expected-error @below {{GlobalAnnotationValuesAttr should at least have one annotation}} +module attributes {cir.global_annotations = #cir} {} + +// ----- + +// expected-error @below {{Element of GlobalAnnotationValuesAttr annotations array must be an array}} +module attributes {cir.global_annotations = #cir} {} + +// ----- + +// expected-error @below {{Element of GlobalAnnotationValuesAttr annotations array must be a 2-element array}} +module attributes {cir.global_annotations = #cir} {} + +// ----- + +// expected-error @below {{Element of GlobalAnnotationValuesAttr annotationsarray must start with a string}} +module attributes {cir.global_annotations = #cir} {} + +// ----- + +// expected-error @below {{The second element of GlobalAnnotationValuesAttrannotations array element must be of type AnnotationValueAttr}} +module attributes {cir.global_annotations = #cir} {} From ae576288e1be747ed9dee35c03b10bdfcc572eb4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 Sep 2024 13:47:42 -0700 Subject: [PATCH 1829/2301] [CIR][CIRGen][NFC] Exceptions: add cleanups to cir.call Now that the basic is working, start adding cleanups to be attached to cir.call's instead. This is necessary in order to tie the pieces (landing pads and cleanups) more properly, allowing multiple calls inside cir.try op to be connected with the right cleanup. This is the first piece of a series, tests coming next. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 16 +++++++++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 28 +++++++++++++++++++- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index c0f3bce2b65a..e44ab2cdd3a4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3217,7 +3217,7 @@ class CIR_CallOp extra_traits = []> : ); } -def CallOp : CIR_CallOp<"call"> { +def CallOp : CIR_CallOp<"call", [NoRegionArguments]> { let summary = "call operation"; let description = [{ Direct and indirect calls. @@ -3232,7 +3232,9 @@ def CallOp : CIR_CallOp<"call"> { Given the way indirect calls are encoded, avoid using `mlir::Operation` methods to walk the operands for this operation, instead use the methods provided by `CIRCallOpInterface`. - `` + + If the `cir.call` has the `exception` keyword, the call can throw. In this + case, cleanups can be added in the `cleanup` region. Example: @@ -3242,6 +3244,11 @@ def CallOp : CIR_CallOp<"call"> { ... // Indirect call %20 = cir.call %18(%17) + ... + // Call that might throw + cir.call exception @my_div() -> () cleanup { + // call dtor... + } ``` }]; @@ -3249,6 +3256,7 @@ def CallOp : CIR_CallOp<"call"> { let arguments = !con((ins UnitAttr:$exception ), commonArgs); + let regions = (region AnyRegion:$cleanup); let skipDefaultBuilders = 1; @@ -3266,6 +3274,8 @@ def CallOp : CIR_CallOp<"call"> { $_state.addAttribute("exception", exception); if (resType && !isa(resType)) $_state.addTypes(resType); + // Create region placeholder for potential cleanups. + $_state.addRegion(); }]>, OpBuilder<(ins "Value":$ind_target, "FuncType":$fn_type, @@ -3280,6 +3290,8 @@ def CallOp : CIR_CallOp<"call"> { CallingConvAttr::get($_builder.getContext(), callingConv)); if (exception) $_state.addAttribute("exception", exception); + // Create region placeholder for potential cleanups. + $_state.addRegion(); }]> ]; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9897b112a332..49aa696ab034 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2815,8 +2815,11 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, llvm::SMLoc landingPadOperandsLoc; llvm::SmallVector landingPadTypes; - if (::mlir::succeeded(parser.parseOptionalKeyword("exception"))) + bool hasExceptions = false; + if (::mlir::succeeded(parser.parseOptionalKeyword("exception"))) { result.addAttribute("exception", parser.getBuilder().getUnitAttr()); + hasExceptions = true; + } // If we cannot parse a string callee, it means this is an indirect call. if (!parser.parseOptionalAttribute(calleeAttr, "callee", result.attributes) @@ -2903,6 +2906,18 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, builder.getContext(), callingConv)); } + // If exception is present and there are cleanups, this should be latest thing + // present (after all attributes, etc). + mlir::Region *cleanupRegion = nullptr; + if (!hasDestinationBlocks) // Regular cir.call + cleanupRegion = result.addRegion(); + if (hasExceptions) { + if (parser.parseOptionalKeyword("cleanup").succeeded()) { + if (parser.parseRegion(*cleanupRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + } + } + return ::mlir::success(); } @@ -2982,6 +2997,17 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, state.printAttributeWithoutType(extraAttrs); state << ")"; } + + // If exception is present and there are cleanups, this should be latest thing + // present (after all attributes, etc). + if (exception) { + auto call = dyn_cast(op); + assert(call && "expected regular call"); + if (!call.getCleanup().empty()) { + state << "cleanup "; + state.printRegion(call.getCleanup()); + } + } } LogicalResult From 0bcb45accd936490da1f9eb7685612370933b889 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 Sep 2024 16:08:17 -0700 Subject: [PATCH 1830/2301] [CIR][CIRGen][NFC] Exceptions: refactor invoke checks to better align with CIR --- clang/lib/CIR/CodeGen/CIRGenException.cpp | 20 +++++++++++++++----- clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 + 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 8d33af65eb84..1c3d4c2a436f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -808,9 +808,9 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si, return dispatchBlock; } -mlir::Operation *CIRGenFunction::getInvokeDestImpl() { - assert(EHStack.requiresLandingPad()); - assert(!EHStack.empty()); +bool CIRGenFunction::isInvokeDest() { + if (!EHStack.requiresLandingPad()) + return false; // If exceptions are disabled/ignored and SEH is not in use, then there is no // invoke destination. SEH "works" even if exceptions are off. In practice, @@ -819,13 +819,23 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl() { const LangOptions &LO = CGM.getLangOpts(); if (!LO.Exceptions || LO.IgnoreExceptions) { if (!LO.Borland && !LO.MicrosoftExt) - return nullptr; + return false; if (!currentFunctionUsesSEHTry()) - return nullptr; + return false; } // CUDA device code doesn't have exceptions. if (LO.CUDA && LO.CUDAIsDevice) + return false; + + return true; +} + +mlir::Operation *CIRGenFunction::getInvokeDestImpl() { + assert(EHStack.requiresLandingPad()); + assert(!EHStack.empty()); + + if (!isInvokeDest()) return nullptr; // Check the innermost scope for a cached landing pad. If this is diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 73aeb524a88a..70e9c8856ab9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1767,6 +1767,7 @@ class CIRGenFunction : public CIRGenTypeCache { // any other relevant information. return getInvokeDestImpl(); } + bool isInvokeDest(); /// Takes the old cleanup stack size and emits the cleanup blocks /// that have been added. From 2415dddd94fd9f2888f558e8b281b729a0c50f50 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 Sep 2024 16:18:14 -0700 Subject: [PATCH 1831/2301] [CIR][CIRGen][NFC] Exceptions: sink invoke logic closer to call emission --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 9acd226aafc6..cdf9bccdd062 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -446,18 +446,19 @@ static mlir::cir::CIRCallOpInterface buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, mlir::cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal, mlir::cir::FuncOp directFuncOp, - SmallVectorImpl &CIRCallArgs, - mlir::Operation *InvokeDest, mlir::cir::CallingConv callingConv, + SmallVectorImpl &CIRCallArgs, bool isInvoke, + mlir::cir::CallingConv callingConv, mlir::cir::ExtraFuncAttributesAttr extraFnAttrs) { auto &builder = CGF.getBuilder(); - if (InvokeDest) { + if (isInvoke) { // This call can throw, few options: // - If this call does not have an associated cir.try, use the // one provided by InvokeDest, // - User written try/catch clauses require calls to handle // exceptions under cir.try. - auto tryOp = dyn_cast_if_present(InvokeDest); + auto *invokeDest = CGF.getInvokeDest(); + auto tryOp = dyn_cast_if_present(invokeDest); mlir::OpBuilder::InsertPoint ip = builder.saveInsertionPoint(); bool changeInsertion = tryOp && tryOp.getSynthetic(); if (changeInsertion) { @@ -730,7 +731,8 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, noThrowAttr.getMnemonic())) CannotThrow = true; } - auto InvokeDest = CannotThrow ? nullptr : getInvokeDest(); + // mlir::Operation *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); + bool isInvoke = CannotThrow ? false : isInvokeDest(); // TODO: UnusedReturnSizePtr if (const FunctionDecl *FD = dyn_cast_or_null(CurFuncDecl)) @@ -772,7 +774,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, mlir::cir::CIRCallOpInterface callLikeOp = buildCallLikeOp( *this, callLoc, indirectFuncTy, indirectFuncVal, directFuncOp, - CIRCallArgs, InvokeDest, callingConv, extraFnAttrs); + CIRCallArgs, isInvoke, callingConv, extraFnAttrs); if (E) callLikeOp->setAttr( From 898c7ed1e647aa576ad257433eaf2c5e5bc7a30e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 Sep 2024 16:34:13 -0700 Subject: [PATCH 1832/2301] [CIR][CIRGen][NFC] Exceptions: Move the logic to create surrounding try to be close to call generation --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 52 ++++++++++++++----- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 38 +------------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 6 +-- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 2 +- clang/test/CIR/CodeGen/global-new.cpp | 3 +- 6 files changed, 50 insertions(+), 55 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index cdf9bccdd062..a446cc641dda 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -450,6 +450,31 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, mlir::cir::CallingConv callingConv, mlir::cir::ExtraFuncAttributesAttr extraFnAttrs) { auto &builder = CGF.getBuilder(); + auto getOrCreateSurroundingTryOp = [&]() { + // In OG, we build the landing pad for this scope. In CIR, we emit a + // synthetic cir.try because this didn't come from codegenerating from a + // try/catch in C++. + auto op = CGF.currLexScope->getClosestTryParent(); + if (op) + return op; + + op = builder.create( + *CGF.currSrcLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) {}, + // Don't emit the code right away for catch clauses, for + // now create the regions and consume the try scope result. + // Note that clauses are later populated in + // CIRGenFunction::buildLandingPad. + [&](mlir::OpBuilder &b, mlir::Location loc, + mlir::OperationState &result) { + // Since this didn't come from an explicit try, we only need one + // handler: unwind. + auto *r = result.addRegion(); + builder.createBlock(r); + }); + op.setSynthetic(true); + return op; + }; if (isInvoke) { // This call can throw, few options: @@ -457,33 +482,37 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, // one provided by InvokeDest, // - User written try/catch clauses require calls to handle // exceptions under cir.try. - auto *invokeDest = CGF.getInvokeDest(); - auto tryOp = dyn_cast_if_present(invokeDest); + auto tryOp = getOrCreateSurroundingTryOp(); + assert(tryOp && "expected"); + mlir::OpBuilder::InsertPoint ip = builder.saveInsertionPoint(); - bool changeInsertion = tryOp && tryOp.getSynthetic(); - if (changeInsertion) { + if (tryOp.getSynthetic()) { mlir::Block *lastBlock = &tryOp.getTryRegion().back(); builder.setInsertionPointToStart(lastBlock); } else { assert(builder.getInsertionBlock() && "expected valid basic block"); } - mlir::cir::CallOp tryCallOp; + mlir::cir::CallOp callOpWithExceptions; // TODO(cir): Set calling convention for `cir.try_call`. assert(callingConv == mlir::cir::CallingConv::C && "NYI"); if (indirectFuncTy) { - tryCallOp = builder.createIndirectTryCallOp(callLoc, indirectFuncVal, - indirectFuncTy, CIRCallArgs); + callOpWithExceptions = builder.createIndirectTryCallOp( + callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs); } else { - tryCallOp = builder.createTryCallOp(callLoc, directFuncOp, CIRCallArgs); + callOpWithExceptions = + builder.createTryCallOp(callLoc, directFuncOp, CIRCallArgs); } - tryCallOp->setAttr("extra_attrs", extraFnAttrs); + callOpWithExceptions->setAttr("extra_attrs", extraFnAttrs); + + auto *invokeDest = CGF.getInvokeDest(tryOp); + (void)invokeDest; - if (changeInsertion) { + if (tryOp.getSynthetic()) { builder.create(tryOp.getLoc()); builder.restoreInsertionPoint(ip); } - return tryCallOp; + return callOpWithExceptions; } assert(builder.getInsertionBlock() && "expected valid basic block"); @@ -731,7 +760,6 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, noThrowAttr.getMnemonic())) CannotThrow = true; } - // mlir::Operation *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); bool isInvoke = CannotThrow ? false : isInvokeDest(); // TODO: UnusedReturnSizePtr diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 6147c0285971..183ba2a6d0f5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -248,7 +248,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { if (!RequiresNormalCleanup) { // Mark CPP scope end for passed-by-value Arg temp // per Windows ABI which is "normally" Cleanup in callee - if (IsEHa && getInvokeDest()) { + if (IsEHa && isInvokeDest()) { // If we are deactivating a normal cleanup then we don't have a // fallthrough. Restore original IP to emit CPP scope ends in the correct // block. @@ -319,7 +319,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { if (!Personality.isMSVCPersonality()) { EHStack.pushTerminate(); PushedTerminate = true; - } else if (IsEHa && getInvokeDest()) { + } else if (IsEHa && isInvokeDest()) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 1c3d4c2a436f..87a441c64cb9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -831,12 +831,10 @@ bool CIRGenFunction::isInvokeDest() { return true; } -mlir::Operation *CIRGenFunction::getInvokeDestImpl() { +mlir::Operation *CIRGenFunction::getInvokeDestImpl(mlir::cir::TryOp tryOp) { assert(EHStack.requiresLandingPad()); assert(!EHStack.empty()); - - if (!isInvokeDest()) - return nullptr; + assert(isInvokeDest()); // Check the innermost scope for a cached landing pad. If this is // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad. @@ -850,43 +848,11 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl() { // if (!CurFn->hasPersonalityFn()) // CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality)); - auto createSurroundingTryOp = [&]() { - // In OG, we build the landing pad for this scope. In CIR, we emit a - // synthetic cir.try because this didn't come from codegenerating from a - // try/catch in C++. - auto tryOp = builder.create( - *currSrcLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) {}, - // Don't emit the code right away for catch clauses, for - // now create the regions and consume the try scope result. - // Note that clauses are later populated in - // CIRGenFunction::buildLandingPad. - [&](mlir::OpBuilder &b, mlir::Location loc, - mlir::OperationState &result) { - // Since this didn't come from an explicit try, we only need one - // handler: unwind. - auto *r = result.addRegion(); - builder.createBlock(r); - }); - tryOp.setSynthetic(true); - return tryOp; - }; - if (Personality.usesFuncletPads()) { // We don't need separate landing pads in the funclet model. llvm::errs() << "PersonalityFn: " << Personality.PersonalityFn << "\n"; llvm_unreachable("NYI"); } else { - mlir::cir::TryOp tryOp = nullptr; - // Attempt to find a suitable existing parent try/catch, if none - // is available, create a synthetic cir.try in order to wrap the side - // effects of a potential throw. - if (currLexScope) - tryOp = currLexScope->getClosestTryParent(); - if (!tryOp) - tryOp = createSurroundingTryOp(); - - assert(tryOp && "cir.try expected"); LP = buildLandingPad(tryOp); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 70e9c8856ab9..5613f4c8ae4a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1759,13 +1759,13 @@ class CIRGenFunction : public CIRGenTypeCache { /// parameters. EHScopeStack::stable_iterator PrologueCleanupDepth; - mlir::Operation *getInvokeDestImpl(); - mlir::Operation *getInvokeDest() { + mlir::Operation *getInvokeDestImpl(mlir::cir::TryOp tryOp); + mlir::Operation *getInvokeDest(mlir::cir::TryOp tryOp) { if (!EHStack.requiresLandingPad()) return nullptr; // Return the respective cir.try, this can be used to compute // any other relevant information. - return getInvokeDestImpl(); + return getInvokeDestImpl(tryOp); } bool isInvokeDest(); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 4d9f05b61bb8..7c81d0b7d0ed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2224,7 +2224,7 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, // FIXME: When adding support for invoking, we should wrap the throw op // below into a try, and let CFG flatten pass to generate a cir.try_call. - assert(!CGF.getInvokeDest() && "landing pad like logic NYI"); + assert(!CGF.isInvokeDest() && "landing pad like logic NYI"); // Now throw the exception. mlir::Location loc = CGF.getLoc(E->getSourceRange()); diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index 9c788c7f087d..dd182f9b7feb 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -8,6 +8,7 @@ // RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir // RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll // RUN: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll +// XFAIL: * struct e { e(int); }; e *g = new e(0); @@ -81,4 +82,4 @@ e *g = new e(0); struct PackedStruct { }; -PackedStruct*const packed_2 = new PackedStruct(); \ No newline at end of file +PackedStruct*const packed_2 = new PackedStruct(); From 9d5d63e8b30e462bd568deb7d96a9b89c63345d2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 Sep 2024 17:55:55 -0700 Subject: [PATCH 1833/2301] [CIR][CIRGen][NFCI] Exceptions: change getEHDispatchBlock to create blocks inside calls getEHDispatchBlock result isn't really used to track anything just yet, so this change isn't supposed to affect anything. This is building block for having a cleanup per call. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 2 ++ clang/lib/CIR/CodeGen/CIRGenException.cpp | 13 +++++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 + .../CIR/Dialect/Transforms/CIRSimplify.cpp | 31 +++++++++++++++++-- 5 files changed, 41 insertions(+), 8 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e44ab2cdd3a4..957898ebc05c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -893,7 +893,7 @@ def ConditionOp : CIR_Op<"condition", [ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "WhileOp", "ForOp", "AwaitOp", "TernaryOp", "GlobalOp", "DoWhileOp", "TryOp", "ArrayCtor", - "ArrayDtor"]>]> { + "ArrayDtor", "CallOp"]>]> { let summary = "Represents the default branching behaviour of a region"; let description = [{ The `cir.yield` operation terminates regions on different CIR operations, diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index a446cc641dda..3a36c3338a65 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -505,8 +505,10 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, } callOpWithExceptions->setAttr("extra_attrs", extraFnAttrs); + CGF.callWithExceptionCtx = callOpWithExceptions; auto *invokeDest = CGF.getInvokeDest(tryOp); (void)invokeDest; + CGF.callWithExceptionCtx = nullptr; if (tryOp.getSynthetic()) { builder.create(tryOp.getLoc()); diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 87a441c64cb9..d91e4c78d61a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -742,7 +742,9 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { // landing pad by generating a branch to the dispatch block. In CIR the same // function is called to gather some state, but this block info it's not // useful per-se. - (void)getEHDispatchBlock(EHStack.getInnermostEHScope(), tryOp); + mlir::Block *dispatch = + getEHDispatchBlock(EHStack.getInnermostEHScope(), tryOp); + (void)dispatch; } return tryOp; @@ -789,9 +791,12 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si, case EHScope::Cleanup: { assert(tryOp && "expected cir.try available"); - llvm::MutableArrayRef regions = tryOp.getCatchRegions(); - assert(regions.size() == 1 && "expected only one region"); - dispatchBlock = ®ions[0].getBlocks().back(); + assert(callWithExceptionCtx && "expected call information"); + { + mlir::OpBuilder::InsertionGuard guard(getBuilder()); + dispatchBlock = builder.createBlock(&callWithExceptionCtx.getCleanup()); + builder.createYield(callWithExceptionCtx.getLoc()); + } break; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 5613f4c8ae4a..1167aed873f5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1748,6 +1748,7 @@ class CIRGenFunction : public CIRGenTypeCache { }; /// Emits try/catch information for the current EH stack. + mlir::cir::CallOp callWithExceptionCtx = nullptr; mlir::Operation *buildLandingPad(mlir::cir::TryOp tryOp); mlir::Block *getEHResumeBlock(bool isCleanup, mlir::cir::TryOp tryOp); mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope, diff --git a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp index 3aece6d74585..3db7e5259041 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp @@ -111,6 +111,30 @@ struct RemoveTrivialTry : public OpRewritePattern { } }; +// Remove call exception with empty cleanups +struct SimplifyCallOp : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult match(CallOp op) const final { + // Applicable to cir.call exception ... clean { cir.yield } + mlir::Region *r = &op.getCleanup(); + if (r->empty() || !r->hasOneBlock()) + return failure(); + + mlir::Block *b = &r->getBlocks().back(); + if (&b->back() != &b->front()) + return failure(); + + return success(isa(&b->getOperations().back())); + } + + void rewrite(CallOp op, PatternRewriter &rewriter) const final { + mlir::Block *b = &op.getCleanup().back(); + rewriter.eraseOp(&b->back()); + rewriter.eraseBlock(b); + } +}; + /// Simplify suitable ternary operations into select operations. /// /// For now we only simplify those ternary operations whose true and false @@ -255,7 +279,8 @@ void populateMergeCleanupPatterns(RewritePatternSet &patterns) { RemoveEmptySwitch, RemoveTrivialTry, SimplifyTernary, - SimplifySelect + SimplifySelect, + SimplifyCallOp >(patterns.getContext()); // clang-format on } @@ -271,8 +296,8 @@ void CIRSimplifyPass::runOnOperation() { // CastOp here is to perform a manual `fold` in // applyOpPatternsAndFold if (isa( - op)) + TernaryOp, SelectOp, ComplexCreateOp, ComplexRealOp, ComplexImagOp, + CallOp>(op)) ops.push_back(op); }); From 838b5bf4f21b493f6051897c7029afd81b505e82 Mon Sep 17 00:00:00 2001 From: 7mile Date: Sun, 15 Sep 2024 01:46:52 +0800 Subject: [PATCH 1834/2301] [CIR][NFC] Fix mismatch of argument type in IR tests (#837) There is a typo in `call.cir` that uses a wrong function argument type, leading to failure in the final LLVM IR translation. CIR verification does not reject it, because it skips indirect calls at the beginning. It's `verifySymbolUses` after all. https://github.com/llvm/clangir/blob/bde154cf1243cc4f938339c4dc15b1576d3025ab/clang/lib/CIR/Dialect/IR/CIRDialect.cpp#L2672-L2679 The typo was copied to another IR test. Here we fix them all. --- clang/test/CIR/IR/call-op-call-conv.cir | 6 +++--- clang/test/CIR/IR/call.cir | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/test/CIR/IR/call-op-call-conv.cir b/clang/test/CIR/IR/call-op-call-conv.cir index f97dad73b8ff..b47e1226e10b 100644 --- a/clang/test/CIR/IR/call-op-call-conv.cir +++ b/clang/test/CIR/IR/call-op-call-conv.cir @@ -2,7 +2,7 @@ // RUN: FileCheck --input-file=%t.cir %s !s32i = !cir.int -!fnptr = !cir.ptr)>> +!fnptr = !cir.ptr> module { cir.func @my_add(%a: !s32i, %b: !s32i) -> !s32i cc(spir_function) { @@ -22,6 +22,6 @@ module { } } -// CHECK: %{{[0-9]+}} = cir.call %arg0(%arg1) : (!cir.ptr)>>, !s32i) -> !s32i cc(spir_kernel) -// CHECK: %{{[0-9]+}} = cir.call %arg0(%arg1) : (!cir.ptr)>>, !s32i) -> !s32i cc(spir_function) +// CHECK: %{{[0-9]+}} = cir.call %arg0(%arg1) : (!cir.ptr>, !s32i) -> !s32i cc(spir_kernel) +// CHECK: %{{[0-9]+}} = cir.call %arg0(%arg1) : (!cir.ptr>, !s32i) -> !s32i cc(spir_function) // CHECK: %{{[0-9]+}} = cir.try_call @my_add(%{{[0-9]+}}, %{{[0-9]+}}) ^{{.+}}, ^{{.+}} : (!s32i, !s32i) -> !s32i cc(spir_function) diff --git a/clang/test/CIR/IR/call.cir b/clang/test/CIR/IR/call.cir index cb0dea099e5b..1810ff088611 100644 --- a/clang/test/CIR/IR/call.cir +++ b/clang/test/CIR/IR/call.cir @@ -1,7 +1,7 @@ // RUN: cir-opt %s | FileCheck %s !s32i = !cir.int -!fnptr = !cir.ptr)>> +!fnptr = !cir.ptr> #fn_attr = #cir, optnone = #cir.optnone})> #fn_attr1 = #cir @@ -20,5 +20,5 @@ module { } } -// CHECK: %0 = cir.call %arg0(%arg1) : (!cir.ptr)>>, !s32i) -> !s32i +// CHECK: %0 = cir.call %arg0(%arg1) : (!cir.ptr>, !s32i) -> !s32i // CHECK: %1 = cir.call @_ZNSt5arrayIiLm8192EEixEm(%arg1) : (!s32i) -> !s32i extra(#fn_attr1) From c4fa42dfad1aacf3a1d6f35ab47fe0effd1c72c0 Mon Sep 17 00:00:00 2001 From: 7mile Date: Tue, 17 Sep 2024 01:31:49 +0800 Subject: [PATCH 1835/2301] [CIR][Dialect] Add `convergent` attribute to functions for SIMT languages (#840) Fix #805. This PR includes end-to-end implementation. The `convergent` attribute is set depending on languages, which is wrapped as `langOpts.assumeFunctionsAreConvergent()`. Therefore, in ClangIR, every `cir.func` under `#cir.lang` is set to be convergent. After lowering to LLVM IR, `PostOrderFunctionAttrs` pass will remove unnecessary `convergent` then. In other words, we will still see `convergent` on every function with `-O0`, but not with default optimization level. The test taken from `CodeGenOpenCL/convergent.cl` is a bit complicated. However, the core of it is that `convergent` is set properly for `convfun()` `non_convfun()` `f()` and `g()`. Merge of two `if` is more or less a result of generating the same LLVM IR as OG. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 4 + clang/lib/CIR/CodeGen/CIRGenCall.cpp | 40 +++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 13 +++ .../Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 2 + clang/test/CIR/CodeGen/OpenCL/convergent.cl | 107 ++++++++++++++++++ 5 files changed, 166 insertions(+) create mode 100644 clang/test/CIR/CodeGen/OpenCL/convergent.cl diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 59343514b645..292d24315518 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -1042,6 +1042,10 @@ def NoThrowAttr : CIRUnitAttr<"NoThrow", "nothrow"> { let storageType = [{ NoThrowAttr }]; } +def ConvergentAttr : CIRUnitAttr<"Convergent", "convergent"> { + let storageType = [{ ConvergentAttr }]; +} + class CIR_GlobalCtorDtor : CIR_Attr<"Global" # name, "global_" # attrMnemonic> { diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 3a36c3338a65..2e521fdbde46 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -440,6 +440,9 @@ void CIRGenModule::constructAttributeList(StringRef Name, if (TargetDecl->hasAttr()) ; } + + getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, funcAttrs); + } static mlir::cir::CIRCallOpInterface @@ -1559,3 +1562,40 @@ mlir::Value CIRGenFunction::buildVAArg(VAArgExpr *VE, Address &VAListAddr) { auto vaList = buildVAListRef(VE->getSubExpr()).getPointer(); return builder.create(loc, type, vaList); } + +static void getTrivialDefaultFunctionAttributes( + StringRef name, bool hasOptnone, const CodeGenOptions &codeGenOpts, + const LangOptions &langOpts, bool attrOnCallSite, CIRGenModule &CGM, + mlir::NamedAttrList &funcAttrs) { + + if (langOpts.assumeFunctionsAreConvergent()) { + // Conservatively, mark all functions and calls in CUDA and OpenCL as + // convergent (meaning, they may call an intrinsically convergent op, such + // as __syncthreads() / barrier(), and so can't have certain optimizations + // applied around them). LLVM will remove this attribute where it safely + // can. + + auto convgt = mlir::cir::ConvergentAttr::get(CGM.getBuilder().getContext()); + funcAttrs.set(convgt.getMnemonic(), convgt); + } +} + +void CIRGenModule::getTrivialDefaultFunctionAttributes( + StringRef name, bool hasOptnone, bool attrOnCallSite, + mlir::NamedAttrList &funcAttrs) { + ::getTrivialDefaultFunctionAttributes(name, hasOptnone, getCodeGenOpts(), + getLangOpts(), attrOnCallSite, *this, + funcAttrs); +} + +void CIRGenModule::getDefaultFunctionAttributes(StringRef name, bool hasOptnone, + bool attrOnCallSite, + mlir::NamedAttrList &funcAttrs) { + getTrivialDefaultFunctionAttributes(name, hasOptnone, attrOnCallSite, + funcAttrs); + // If we're just getting the default, get the default values for mergeable + // attributes. + if (!attrOnCallSite) { + // TODO(cir): addMergableDefaultFunctionAttributes(codeGenOpts, funcAttrs); + } +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 4ec8950a5d33..e0979e36744f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -296,6 +296,19 @@ class CIRGenModule : public CIRGenTypeCache { mlir::cir::CallingConv &callingConv, bool AttrOnCallSite, bool IsThunk); + /// Helper function for getDefaultFunctionAttributes. Builds a set of function + /// attributes which can be simply added to a function. + void getTrivialDefaultFunctionAttributes(StringRef name, bool hasOptnone, + bool attrOnCallSite, + mlir::NamedAttrList &funcAttrs); + + /// Helper function for constructAttributeList and + /// addDefaultFunctionDefinitionAttributes. Builds a set of function + /// attributes to add to a function with the given properties. + void getDefaultFunctionAttributes(StringRef name, bool hasOptnone, + bool attrOnCallSite, + mlir::NamedAttrList &funcAttrs); + /// Will return a global variable of the given type. If a variable with a /// different type already exists then a new variable with the right type /// will be created and all uses of the old variable will be replaced with a diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index f8fbd3e0846f..9998d039d46a 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -108,6 +108,8 @@ class CIRDialectLLVMIRTranslationInterface llvmFunc->addFnAttr(llvm::Attribute::OptimizeNone); } else if (mlir::dyn_cast(attr.getValue())) { llvmFunc->addFnAttr(llvm::Attribute::NoUnwind); + } else if (mlir::dyn_cast(attr.getValue())) { + llvmFunc->addFnAttr(llvm::Attribute::Convergent); } else if (auto clKernelMetadata = mlir::dyn_cast( attr.getValue())) { diff --git a/clang/test/CIR/CodeGen/OpenCL/convergent.cl b/clang/test/CIR/CodeGen/OpenCL/convergent.cl new file mode 100644 index 000000000000..d953aa799307 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/convergent.cl @@ -0,0 +1,107 @@ +// RUN: %clang_cc1 -fclangir -triple spirv64-unknown-unknown -emit-cir %s -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR +// RUN: %clang_cc1 -fclangir -triple spirv64-unknown-unknown -emit-llvm %s -o %t.ll +// RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM + +// In ClangIR for OpenCL, all functions should be marked convergent. +// In LLVM IR, it is initially assumed convergent, but can be deduced to not require it. + +// CIR: #fn_attr[[CONV_NOINLINE_ATTR:[0-9]*]] = #cir +// CIR-NEXT: #fn_attr[[CONV_DECL_ATTR:[0-9]*]] = #cir Date: Mon, 16 Sep 2024 11:19:40 -0700 Subject: [PATCH 1836/2301] [CIR][FlattenCFG] Fix use after free when flattening terminator (#843) Per the operation walking documentation [1]: > A callback on a block or operation is allowed to erase that block or > operation if either: > * the walk is in post-order, or > * the walk is in pre-order and the walk is skipped after the erasure. We were doing neither when erasing terminator operations and replacing them with a branch, leading to a use after free and ASAN errors. This fixes the following tests with ASAN: ``` Clang :: CIR/CodeGen/switch-gnurange.cpp Clang :: CIR/Lowering/atomic-runtime.cpp Clang :: CIR/Lowering/loop.cir Clang :: CIR/Lowering/loops-with-break.cir Clang :: CIR/Lowering/loops-with-continue.cir Clang :: CIR/Lowering/switch.cir Clang :: CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp Clang :: CIR/Transforms/loop.cir Clang :: CIR/Transforms/switch.cir ``` These two tests still fail with ASAN after this, which I'm looking into: ``` Clang :: CIR/CodeGen/pointer-arith-ext.c Clang :: CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp ``` `CIR/CodeGen/global-new.cpp` is failing even on a non-ASAN Release build for me on the parent commit, so it's unrelated. [1] https://github.com/llvm/llvm-project/blob/0c55ad11ab3857056bb3917fdf087c4aa811b790/mlir/include/mlir/IR/Operation.h#L767-L770 --- .../CIR/Interfaces/CIRLoopOpInterface.td | 5 ++-- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 29 ++++++++++++------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td index c2b871785ffd..bac30dac3d82 100644 --- a/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td +++ b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td @@ -71,14 +71,13 @@ def LoopOpInterface : OpInterface<"LoopOpInterface", [ }], /*retTy=*/"mlir::WalkResult", /*methodName=*/"walkBodySkippingNestedLoops", - /*args=*/(ins "::llvm::function_ref":$callback), + /*args=*/(ins "::llvm::function_ref":$callback), /*methodBody=*/"", /*defaultImplementation=*/[{ return $_op.getBody().template walk([&](Operation *op) { if (isa(op)) return mlir::WalkResult::skip(); - callback(op); - return mlir::WalkResult::advance(); + return callback(op); }); }] > diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index a8493f8f2c45..ff368a76a95c 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -36,13 +36,13 @@ void lowerTerminator(mlir::Operation *op, mlir::Block *dest, /// Walks a region while skipping operations of type `Ops`. This ensures the /// callback is not applied to said operations and its children. template -void walkRegionSkipping(mlir::Region ®ion, - mlir::function_ref callback) { +void walkRegionSkipping( + mlir::Region ®ion, + mlir::function_ref callback) { region.walk([&](mlir::Operation *op) { if (isa(op)) return mlir::WalkResult::skip(); - callback(op); - return mlir::WalkResult::advance(); + return callback(op); }); } @@ -541,15 +541,21 @@ class CIRLoopOpInterfaceFlattening // Lower continue statements. mlir::Block *dest = (step ? step : cond); op.walkBodySkippingNestedLoops([&](mlir::Operation *op) { - if (isa(op)) - lowerTerminator(op, dest, rewriter); + if (!isa(op)) + return mlir::WalkResult::advance(); + + lowerTerminator(op, dest, rewriter); + return mlir::WalkResult::skip(); }); // Lower break statements. walkRegionSkipping( op.getBody(), [&](mlir::Operation *op) { - if (isa(op)) - lowerTerminator(op, exit, rewriter); + if (!isa(op)) + return mlir::WalkResult::advance(); + + lowerTerminator(op, exit, rewriter); + return mlir::WalkResult::skip(); }); // Lower optional body region yield. @@ -705,8 +711,11 @@ class CIRSwitchOpFlattening // Handle break statements. walkRegionSkipping( region, [&](mlir::Operation *op) { - if (isa(op)) - lowerTerminator(op, exitBlock, rewriter); + if (!isa(op)) + return mlir::WalkResult::advance(); + + lowerTerminator(op, exitBlock, rewriter); + return mlir::WalkResult::skip(); }); // Extract region contents before erasing the switch op. From c84fe1b2b5acef4e430a65daa15d3da5295e0e2d Mon Sep 17 00:00:00 2001 From: gitoleg Date: Mon, 16 Sep 2024 21:42:56 +0300 Subject: [PATCH 1837/2301] [CIR][CodeGen][BugFix] Fixes structures name collisions (#844) CIR Codegen fails to generate functions with local types with the same names. For instance, the next code : ``` void foo(int a, float b) { struct A { int x; }; struct A loc = {a}; { struct A { float y; }; struct A loc = {b}; } } ``` fails with on the next assertion: `Unable to find record layout information for type`. The problem is that we don't create record layout for the structures with equal names and `CIRGenTypes::convertRecordDeclType` returns the wrong type for the second struct type in the example above. This PR fixes this problem. In the original codegen the call to `Ty->setName(name)` resolves name collisions and assign a proper name for the type. In our case looks like we need to use the same approach as we did for the anonymous structures, i.e. to track the used names in the builder. Also, I fixed the struct type creation. Previously, the type was created several times - first in the `CIRGenTypes::convertRecordDeclType` and then in the `CIRGenTypes::computeRecordLayout`. This is why the indexes used by the anonymous structures naming had relatively big values and this is where the most changes on the tests come from. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 20 ++++++-- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 2 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 4 +- clang/test/CIR/CodeGen/bitfields.c | 4 +- clang/test/CIR/CodeGen/bitfields.cpp | 4 +- clang/test/CIR/CodeGen/compound-literal.c | 4 +- clang/test/CIR/CodeGen/coro-task.cpp | 4 +- clang/test/CIR/CodeGen/forward-decls.cpp | 13 +++--- clang/test/CIR/CodeGen/globals.c | 8 ++-- clang/test/CIR/CodeGen/lambda.cpp | 46 +++++++++---------- clang/test/CIR/CodeGen/string-literals.c | 2 +- clang/test/CIR/CodeGen/struct.c | 14 ++++++ clang/test/CIR/CodeGen/union-init.c | 6 +-- clang/test/CIR/CodeGen/union.cpp | 12 ++--- 14 files changed, 83 insertions(+), 60 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 5342612fdb7d..1ba6e9447a09 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -53,16 +53,26 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { llvm::RoundingMode DefaultConstrainedRounding = llvm::RoundingMode::Dynamic; llvm::StringMap GlobalsVersioning; - llvm::StringSet<> anonRecordNames; + llvm::StringMap RecordNames; public: CIRGenBuilderTy(mlir::MLIRContext &C, const CIRGenTypeCache &tc) - : CIRBaseBuilderTy(C), typeCache(tc) {} + : CIRBaseBuilderTy(C), typeCache(tc) { + RecordNames["anon"] = 0; // in order to start from the name "anon.0" + } std::string getUniqueAnonRecordName() { - std::string name = "anon." + std::to_string(anonRecordNames.size()); - anonRecordNames.insert(name); - return name; + return getUniqueRecordName("anon"); + } + + std::string getUniqueRecordName(const std::string& baseName) { + auto it = RecordNames.find(baseName); + if (it == RecordNames.end()) { + RecordNames[baseName] = 0; + return baseName; + } + + return baseName + "." + std::to_string(RecordNames[baseName]++); } // diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index ab2e4129ac60..0cae7cdbf352 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -92,7 +92,7 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, if (!suffix.empty()) outStream << suffix; - return std::string(typeName); + return Builder.getUniqueRecordName(std::string(typeName)); } /// Return true if the specified type is already completely laid out. diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 1e58ee4ff198..9ad81831f224 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -689,8 +689,8 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, // Fill in the struct *after* computing the base type. Filling in the body // signifies that the type is no longer opaque and record layout is complete, // but we may need to recursively layout D while laying D out as a base type. - *Ty = Builder.getCompleteStructTy( - builder.fieldTypes, getRecordTypeName(D, ""), builder.isPacked, D); + auto astAttr = mlir::cir::ASTRecordDeclAttr::get(Ty->getContext(), D); + Ty->complete(builder.fieldTypes, builder.isPacked, astAttr); auto RL = std::make_unique( Ty ? *Ty : mlir::cir::StructType{}, diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index f825640a6010..2671523cc4ca 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -55,13 +55,13 @@ typedef struct { // CHECK: !ty_D = !cir.struct // CHECK: !ty_T = !cir.struct -// CHECK: !ty_anon2E1_ = !cir.struct +// CHECK: !ty_anon2E0_ = !cir.struct // CHECK: !ty_anon_struct = !cir.struct // CHECK: #bfi_a = #cir.bitfield_info // CHECK: #bfi_e = #cir.bitfield_info // CHECK: !ty_S = !cir.struct, !u16i, !u32i}> // CHECK: !ty_U = !cir.struct}> -// CHECK: !ty___long = !cir.struct}> +// CHECK: !ty___long = !cir.struct}> // CHECK: #bfi_d = #cir.bitfield_info, size = 2, offset = 17, is_signed = true> // CHECK: cir.func {{.*@store_field}} diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index b5e9ed24d396..bdef100edf06 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -28,9 +28,9 @@ typedef struct { unsigned b; } T; // CHECK: !ty_T = !cir.struct -// CHECK: !ty_anon2E1_ = !cir.struct +// CHECK: !ty_anon2E0_ = !cir.struct // CHECK: !ty_S = !cir.struct, !u16i, !u32i}> -// CHECK: !ty___long = !cir.struct}> +// CHECK: !ty___long = !cir.struct}> // CHECK: cir.func @_Z11store_field // CHECK: [[TMP0:%.*]] = cir.alloca !ty_S, !cir.ptr diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c index 74669589d084..bbd7fa4a4e75 100644 --- a/clang/test/CIR/CodeGen/compound-literal.c +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -37,8 +37,8 @@ int foo() { // CIR: cir.func no_proto @foo() -> !s32i // CIR: [[RET_MEM:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} -// CIR: [[COMPLITERAL_MEM:%.*]] = cir.alloca !ty_anon2E1_, !cir.ptr, [".compoundliteral"] {alignment = 4 : i64} -// CIR: [[FIELD:%.*]] = cir.get_member [[COMPLITERAL_MEM]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CIR: [[COMPLITERAL_MEM:%.*]] = cir.alloca !ty_anon2E0_, !cir.ptr, [".compoundliteral"] {alignment = 4 : i64} +// CIR: [[FIELD:%.*]] = cir.get_member [[COMPLITERAL_MEM]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i // CIR: cir.store [[ONE]], [[FIELD]] : !s32i, !cir.ptr // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index d61c626ee763..bcc2f5fcc38b 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -356,10 +356,10 @@ folly::coro::Task go4() { // CHECK: } // CHECK: %12 = cir.scope { -// CHECK: %17 = cir.alloca !ty_anon2E5_, !cir.ptr, ["ref.tmp1"] {alignment = 1 : i64} +// CHECK: %17 = cir.alloca !ty_anon2E2_, !cir.ptr, ["ref.tmp1"] {alignment = 1 : i64} // Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` -// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> // CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> // CHECK: cir.yield %19 : !cir.ptr)>> // CHECK: } diff --git a/clang/test/CIR/CodeGen/forward-decls.cpp b/clang/test/CIR/CodeGen/forward-decls.cpp index 86e374626a20..3f004c952d4c 100644 --- a/clang/test/CIR/CodeGen/forward-decls.cpp +++ b/clang/test/CIR/CodeGen/forward-decls.cpp @@ -96,13 +96,12 @@ void testIndirectSelfReference(struct StructNodeA arg) { // RUN: FileCheck --check-prefix=CHECK5 --input-file=%t/complex_struct.cir %s // A sizeable complex struct just to double check that stuff is working. - -// CHECK5: !cir.struct, !cir.struct>, !cir.struct>, !cir.ptr>, !cir.ptr>} #cir.record.decl.ast>, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>} #cir.record.decl.ast> -// CHECK5: !cir.struct>, !cir.struct, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>, !cir.struct, !cir.struct} #cir.record.decl.ast>>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>} #cir.record.decl.ast> -// CHECK5: !cir.struct>, !ty_C, !cir.struct} #cir.record.decl.ast>>, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>} #cir.record.decl.ast> -// CHECK5: !cir.struct>, !ty_C, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>, !ty_anon2E4_} #cir.record.decl.ast> -// CHECK5: !cir.struct>, !ty_C, !ty_anon2E5_} #cir.record.decl.ast> -// CHECK5: !cir.struct +// CHECK5: !cir.struct, !cir.struct>, !cir.struct>, !cir.ptr>, !cir.ptr>} #cir.record.decl.ast>, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !cir.struct, !cir.struct>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>, !cir.struct, !cir.struct} #cir.record.decl.ast>>, !cir.struct>} #cir.record.decl.ast>} #cir.record.decl.ast>} #cir.record.decl.ast>>, !cir.ptr>} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !ty_C, !cir.struct} #cir.record.decl.ast>>, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !ty_C, !cir.struct} #cir.record.decl.ast>} #cir.record.decl.ast>>, !ty_anon2E2_} #cir.record.decl.ast> +// CHECK5: !cir.struct>, !ty_C, !ty_anon2E1_} #cir.record.decl.ast> +// CHECK5: !cir.struct struct A { struct { struct A *a1; diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index d73e136e2f0f..48a4db18bb63 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -57,25 +57,25 @@ const int i = 12; int i2 = i; struct { int i; } i3 = {i}; // CHECK: cir.global external @i2 = #cir.int<12> : !s32i -// CHECK: cir.global external @i3 = #cir.const_struct<{#cir.int<12> : !s32i}> : !ty_anon2E7_ +// CHECK: cir.global external @i3 = #cir.const_struct<{#cir.int<12> : !s32i}> : !ty_anon2E3_ int a[10][10][10]; int *a2 = &a[3][0][8]; struct { int *p; } a3 = {&a[3][0][8]}; // CHECK: cir.global external @a2 = #cir.global_view<@a, [3 : i32, 0 : i32, 8 : i32]> : !cir.ptr -// CHECK: cir.global external @a3 = #cir.const_struct<{#cir.global_view<@a, [3 : i32, 0 : i32, 8 : i32]> : !cir.ptr}> : !ty_anon2E9_ +// CHECK: cir.global external @a3 = #cir.const_struct<{#cir.global_view<@a, [3 : i32, 0 : i32, 8 : i32]> : !cir.ptr}> : !ty_anon2E4_ int p[10]; int *p1 = &p[0]; struct { int *x; } p2 = {&p[0]}; // CHECK: cir.global external @p1 = #cir.global_view<@p> : !cir.ptr -// CHECK: cir.global external @p2 = #cir.const_struct<{#cir.global_view<@p> : !cir.ptr}> : !ty_anon2E11_ +// CHECK: cir.global external @p2 = #cir.const_struct<{#cir.global_view<@p> : !cir.ptr}> : !ty_anon2E5_ int q[10]; int *q1 = q; struct { int *x; } q2 = {q}; // CHECK: cir.global external @q1 = #cir.global_view<@q> : !cir.ptr -// CHECK: cir.global external @q2 = #cir.const_struct<{#cir.global_view<@q> : !cir.ptr}> : !ty_anon2E13_ +// CHECK: cir.global external @q2 = #cir.const_struct<{#cir.global_view<@q> : !cir.ptr}> : !ty_anon2E6_ int foo() { extern int optind; diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 84b38d567e09..b1084ff81bd3 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -6,13 +6,13 @@ void fn() { a(); } -// CHECK: !ty_anon2E2_ = !cir.struct +// CHECK: !ty_anon2E0_ = !cir.struct // CHECK-DAG: module // CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv{{.*}}) extra // CHECK: cir.func @_Z2fnv() -// CHECK-NEXT: %0 = cir.alloca !ty_anon2E2_, !cir.ptr, ["a"] +// CHECK-NEXT: %0 = cir.alloca !ty_anon2E0_, !cir.ptr, ["a"] // CHECK: cir.call @_ZZ2fnvENK3$_0clEv void l0() { @@ -23,15 +23,15 @@ void l0() { // CHECK: cir.func lambda internal private @_ZZ2l0vENK3$_0clEv({{.*}}) extra -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> -// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK: %2 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> +// CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr +// CHECK: %2 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: %3 = cir.load %2 : !cir.ptr>, !cir.ptr // CHECK: %4 = cir.load %3 : !cir.ptr, !s32i // CHECK: %5 = cir.const #cir.int<1> : !s32i // CHECK: %6 = cir.binop(add, %4, %5) nsw : !s32i -// CHECK: %7 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: %7 = cir.get_member %1[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: %8 = cir.load %7 : !cir.ptr>, !cir.ptr // CHECK: cir.store %6, %8 : !s32i, !cir.ptr @@ -45,15 +45,15 @@ auto g() { }; } -// CHECK: cir.func @_Z1gv() -> !ty_anon2E6_ -// CHECK: %0 = cir.alloca !ty_anon2E6_, !cir.ptr, ["__retval"] {alignment = 8 : i64} +// CHECK: cir.func @_Z1gv() -> !ty_anon2E3_ +// CHECK: %0 = cir.alloca !ty_anon2E3_, !cir.ptr, ["__retval"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CHECK: %2 = cir.const #cir.int<12> : !s32i // CHECK: cir.store %2, %1 : !s32i, !cir.ptr -// CHECK: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK: cir.store %1, %3 : !cir.ptr, !cir.ptr> -// CHECK: %4 = cir.load %0 : !cir.ptr, !ty_anon2E6_ -// CHECK: cir.return %4 : !ty_anon2E6_ +// CHECK: %4 = cir.load %0 : !cir.ptr, !ty_anon2E3_ +// CHECK: cir.return %4 : !ty_anon2E3_ auto g2() { int i = 12; @@ -65,15 +65,15 @@ auto g2() { } // Should be same as above because of NRVO -// CHECK: cir.func @_Z2g2v() -> !ty_anon2E8_ -// CHECK-NEXT: %0 = cir.alloca !ty_anon2E8_, !cir.ptr, ["__retval", init] {alignment = 8 : i64} +// CHECK: cir.func @_Z2g2v() -> !ty_anon2E4_ +// CHECK-NEXT: %0 = cir.alloca !ty_anon2E4_, !cir.ptr, ["__retval", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.const #cir.int<12> : !s32i // CHECK-NEXT: cir.store %2, %1 : !s32i, !cir.ptr -// CHECK-NEXT: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> +// CHECK-NEXT: %3 = cir.get_member %0[0] {name = "i"} : !cir.ptr -> !cir.ptr> // CHECK-NEXT: cir.store %1, %3 : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !ty_anon2E8_ -// CHECK-NEXT: cir.return %4 : !ty_anon2E8_ +// CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !ty_anon2E4_ +// CHECK-NEXT: cir.return %4 : !ty_anon2E4_ int f() { return g2()(); @@ -82,10 +82,10 @@ int f() { // CHECK: cir.func @_Z1fv() -> !s32i // CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %2 = cir.alloca !ty_anon2E8_, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} -// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_anon2E8_ -// CHECK-NEXT: cir.store %3, %2 : !ty_anon2E8_, !cir.ptr -// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> !s32i +// CHECK-NEXT: %2 = cir.alloca !ty_anon2E4_, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} +// CHECK-NEXT: %3 = cir.call @_Z2g2v() : () -> !ty_anon2E4_ +// CHECK-NEXT: cir.store %3, %2 : !ty_anon2E4_, !cir.ptr +// CHECK-NEXT: %4 = cir.call @_ZZ2g2vENK3$_0clEv(%2) : (!cir.ptr) -> !s32i // CHECK-NEXT: cir.store %4, %0 : !s32i, !cir.ptr // CHECK-NEXT: } // CHECK-NEXT: %1 = cir.load %0 : !cir.ptr, !s32i @@ -114,8 +114,8 @@ int g3() { // 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`. // CHECK: %3 = cir.scope { -// CHECK: %7 = cir.alloca !ty_anon2E11_, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} -// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> +// CHECK: %7 = cir.alloca !ty_anon2E5_, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} +// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> // CHECK: %9 = cir.unary(plus, %8) : !cir.ptr)>>, !cir.ptr)>> // CHECK: cir.yield %9 : !cir.ptr)>> // CHECK: } diff --git a/clang/test/CIR/CodeGen/string-literals.c b/clang/test/CIR/CodeGen/string-literals.c index 23728b4f4c4c..7be9d6819d3e 100644 --- a/clang/test/CIR/CodeGen/string-literals.c +++ b/clang/test/CIR/CodeGen/string-literals.c @@ -17,7 +17,7 @@ struct { // CIR: }> // LLVM-LABEL: @literals -// LLVM: global %struct.anon.1 { +// LLVM: global %struct.anon.0 { // LLVM: [10 x i8] c"1\00\00\00\00\00\00\00\00\00", // LLVM: [10 x i8] zeroinitializer, // LLVM: [10 x i8] zeroinitializer diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index f473a08fa8ac..43f1576bb09a 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -25,6 +25,8 @@ void baz(void) { // CHECK-DAG: !ty_Node = !cir.struct>} #cir.record.decl.ast> // CHECK-DAG: !ty_Bar = !cir.struct // CHECK-DAG: !ty_Foo = !cir.struct +// CHECK-DAG: !ty_SLocal = !cir.struct +// CHECK-DAG: !ty_SLocal2E0_ = !cir.struct // CHECK-DAG: module {{.*}} { // CHECK: cir.func @baz() // CHECK-NEXT: %0 = cir.alloca !ty_Bar, !cir.ptr, ["b"] {alignment = 4 : i64} @@ -99,3 +101,15 @@ void local_decl(void) { void useRecursiveType(NodeStru* a) { a->next = 0; } + +// CHECK-DAG: cir.alloca !ty_SLocal, !cir.ptr, ["loc", init] {alignment = 4 : i64} +// CHECK-DAG: cir.scope { +// CHECK-DAG: cir.alloca !ty_SLocal2E0_, !cir.ptr, ["loc", init] {alignment = 4 : i64} +void local_structs(int a, float b) { + struct SLocal { int x; }; + struct SLocal loc = {a}; + { + struct SLocal { float y; }; + struct SLocal loc = {b}; + } +} diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index 1147cf52cfb5..8838b67ff283 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -22,11 +22,11 @@ void foo(int x) { // CHECK: %[[VAL_1:.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} // CHECK: %[[VAL_2:.*]] = cir.alloca !ty_A, !cir.ptr, ["a", init] {alignment = 4 : i64} // CHECK: cir.store {{.*}}, %[[VAL_1]] : !s32i, !cir.ptr -// CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][1] {name = ""} : !cir.ptr -> !cir.ptr -// CHECK: %[[VAL_4:.*]] = cir.cast(bitcast, %[[VAL_3]] : !cir.ptr), !cir.ptr +// CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][1] {name = ""} : !cir.ptr -> !cir.ptr +// CHECK: %[[VAL_4:.*]] = cir.cast(bitcast, %[[VAL_3]] : !cir.ptr), !cir.ptr // CHECK: %[[VAL_5:.*]] = cir.load %[[VAL_1]] : !cir.ptr, !s32i // CHECK: %[[VAL_6:.*]] = cir.set_bitfield(#[[bfi_x]], %[[VAL_4]] : !cir.ptr, %[[VAL_5]] : !s32i) -> !s32i -// CHECK: %[[VAL_7:.*]] = cir.cast(bitcast, %[[VAL_3]] : !cir.ptr), !cir.ptr +// CHECK: %[[VAL_7:.*]] = cir.cast(bitcast, %[[VAL_3]] : !cir.ptr), !cir.ptr // CHECK: %[[VAL_8:.*]] = cir.const #cir.int<0> : !s32i // CHECK: %[[VAL_9:.*]] = cir.set_bitfield(#[[bfi_y]], %[[VAL_7]] : !cir.ptr, %[[VAL_8]] : !s32i) -> !s32i // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/union.cpp b/clang/test/CIR/CodeGen/union.cpp index 7ccb520be2c2..9c8a2002eeb1 100644 --- a/clang/test/CIR/CodeGen/union.cpp +++ b/clang/test/CIR/CodeGen/union.cpp @@ -7,14 +7,14 @@ typedef union { yolo y; struct { int *lifecnt; int genpad; }; } yolm2; typedef union { yolo y; struct { bool life; int genpad; }; } yolm3; // CHECK-DAG: !ty_U23A3ADummy = !cir.struct -// CHECK-DAG: !ty_anon2E5_ = !cir.struct -// CHECK-DAG: !ty_anon2E1_ = !cir.struct +// CHECK-DAG: !ty_anon2E0_ = !cir.struct +// CHECK-DAG: !ty_anon2E2_ = !cir.struct // CHECK-DAG: !ty_yolo = !cir.struct -// CHECK-DAG: !ty_anon2E3_ = !cir.struct, !s32i} #cir.record.decl.ast> +// CHECK-DAG: !ty_anon2E1_ = !cir.struct, !s32i} #cir.record.decl.ast> -// CHECK-DAG: !ty_yolm = !cir.struct -// CHECK-DAG: !ty_yolm3_ = !cir.struct -// CHECK-DAG: !ty_yolm2_ = !cir.struct +// CHECK-DAG: !ty_yolm = !cir.struct +// CHECK-DAG: !ty_yolm3_ = !cir.struct +// CHECK-DAG: !ty_yolm2_ = !cir.struct // Should generate a union type with all members preserved. union U { From d92c8fd177a3ca10fb2c7c68b5f447916b3e7a3a Mon Sep 17 00:00:00 2001 From: gitoleg Date: Mon, 16 Sep 2024 21:43:40 +0300 Subject: [PATCH 1838/2301] [CIR][Bugfix] renames minor/major parameters of the OpenCLVersionAttr (#845) Looks like certain names should not be used - I even could not build CIR on the Ubuntu with a relatively old glibc version. In this case `minor` and `major` are macroses and can not be used in this context. You can take a look at the comments in the [mlir/test/lib/Dialect/Test/TestDialect.h](https://github.com/llvm/clangir/blob/main/mlir/test/lib/Dialect/Test/TestDialect.h#L70) reference as well --- clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td | 4 ++-- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td index 1a47186de581..b80ea308608a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td @@ -153,7 +153,7 @@ def OpenCLKernelArgMetadataAttr def OpenCLVersionAttr : CIR_Attr<"OpenCLVersion", "cl.version"> { let summary = "OpenCL version"; - let parameters = (ins "int32_t":$major, "int32_t":$minor); + let parameters = (ins "int32_t":$major_version, "int32_t":$minor_version); let description = [{ Represents the version of OpenCL. @@ -165,7 +165,7 @@ def OpenCLVersionAttr : CIR_Attr<"OpenCLVersion", "cl.version"> { module attributes {cir.cl.version = cir.cl.version<3, 0>} {} ``` }]; - let assemblyFormat = "`<` $major `,` $minor `>`"; + let assemblyFormat = "`<` $major_version `,` $minor_version `>`"; } #endif // MLIR_CIR_DIALECT_CIR_OPENCL_ATTRS diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index 9998d039d46a..4e8e2e9558cc 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -73,9 +73,9 @@ class CIRDialectLLVMIRTranslationInterface auto *int32Ty = llvm::IntegerType::get(llvmContext, 32); llvm::Metadata *oclVerElts[] = { llvm::ConstantAsMetadata::get( - llvm::ConstantInt::get(int32Ty, openclVersionAttr.getMajor())), + llvm::ConstantInt::get(int32Ty, openclVersionAttr.getMajorVersion())), llvm::ConstantAsMetadata::get( - llvm::ConstantInt::get(int32Ty, openclVersionAttr.getMinor()))}; + llvm::ConstantInt::get(int32Ty, openclVersionAttr.getMinorVersion()))}; llvm::NamedMDNode *oclVerMD = llvmModule->getOrInsertNamedMetadata("opencl.ocl.version"); oclVerMD->addOperand(llvm::MDNode::get(llvmContext, oclVerElts)); From a724306acd242f6df31a017eb4a13a22c782d3d9 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 16 Sep 2024 17:57:14 -0400 Subject: [PATCH 1839/2301] [CIR][CodeGen] Support FullExpression storage duration cleanup (#846) --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 ++++- clang/test/CIR/CodeGen/temporaries.cpp | 27 ++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/temporaries.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 3bfb66545c62..ae0909c507e3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -16,6 +16,7 @@ #include "CIRGenModule.h" #include "CIRGenOpenMPRuntime.h" #include "CIRGenValue.h" +#include "EHScopeStack.h" #include "TargetInfo.h" #include "clang/AST/ExprCXX.h" @@ -2214,7 +2215,9 @@ static void pushTemporaryCleanup(CIRGenFunction &CGF, } case SD_FullExpression: - llvm_unreachable("SD_FullExpression not implemented"); + CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(), + CIRGenFunction::destroyCXXObject, + CGF.getLangOpts().Exceptions); break; case SD_Automatic: diff --git a/clang/test/CIR/CodeGen/temporaries.cpp b/clang/test/CIR/CodeGen/temporaries.cpp new file mode 100644 index 000000000000..ea6b2bd20d6d --- /dev/null +++ b/clang/test/CIR/CodeGen/temporaries.cpp @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct E { + ~E(); + E operator!(); +}; + +void f() { + !E(); +} + +// CHECK: cir.func private @_ZN1EC1Ev(!cir.ptr) extra(#fn_attr) +// CHECK-NEXT: cir.func private @_ZN1EntEv(!cir.ptr) -> !ty_E +// CHECK-NEXT: cir.func private @_ZN1ED1Ev(!cir.ptr) extra(#fn_attr) +// CHECK-NEXT: cir.func @_Z1fv() extra(#fn_attr1) { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ONE:[0-9]+]] = cir.alloca !ty_E, !cir.ptr, ["agg.tmp.ensured"] {alignment = 1 : i64} +// CHECK-NEXT: %[[TWO:[0-9]+]] = cir.alloca !ty_E, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} +// CHECK-NEXT: cir.call @_ZN1EC1Ev(%1) : (!cir.ptr) -> () extra(#fn_attr) +// CHECK-NEXT: %[[THREE:[0-9]+]] = cir.call @_ZN1EntEv(%[[TWO]]) : (!cir.ptr) -> !ty_E +// CHECK-NEXT: cir.store %[[THREE]], %[[ONE]] : !ty_E, !cir.ptr +// CHECK-NEXT: cir.call @_ZN1ED1Ev(%[[ONE]]) : (!cir.ptr) -> () extra(#fn_attr) +// CHECK-NEXT: cir.call @_ZN1ED1Ev(%[[TWO]]) : (!cir.ptr) -> () extra(#fn_attr) +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } From a51a2c1377b1d959524827253a08f077f0850e77 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 13 Sep 2024 21:52:38 -0700 Subject: [PATCH 1840/2301] [CIR][CIRGen] Properly link multiple level of cleanups Generalize approach and be able to tie together cleanups with their matching throwing calls. Before this the dtors were not really emitted in the proper order. LLVM support for this still hits a NYI, so nothing special here on the LLVM lowering side. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 15 +++--- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 41 +++++++++------ clang/lib/CIR/CodeGen/CIRGenException.cpp | 34 ++++++------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 1 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 7 +-- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 20 ++++---- clang/test/CIR/CodeGen/global-new.cpp | 10 ++-- clang/test/CIR/CodeGen/paren-list-init.cpp | 10 ++-- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 50 +++++++++++++++++-- clang/test/CIR/CodeGen/try-catch.cpp | 1 - 10 files changed, 115 insertions(+), 74 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 957898ebc05c..87abcb26bd86 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3515,27 +3515,26 @@ def TryOp : CIR_Op<"try", `synthetic`: use `cir.try` to represent try/catches not originally present in the source code (e.g. `g = new Class` under `-fexceptions`). + `cleanup`: signal to targets (LLVM for now) that this try/catch, needs + to specially tag their landing pads as needing "cleanup". + Example: TBD ``` }]; - let arguments = (ins UnitAttr:$synthetic, + let arguments = (ins UnitAttr:$synthetic, UnitAttr:$cleanup, OptionalAttr:$catch_types); let regions = (region AnyRegion:$try_region, - AnyRegion:$cleanup_region, VariadicRegion:$catch_regions); let assemblyFormat = [{ - (`synthetic` $synthetic^)? $try_region - `cleanup` $cleanup_region + (`synthetic` $synthetic^)? + (`cleanup` $cleanup^)? + $try_region custom($catch_regions, $catch_types) attr-dict }]; - let extraClassDeclaration = [{ - bool isCleanupActive() { return !getCleanupRegion().empty(); } - }]; - // Everything already covered elsewhere. let hasVerifier = 0; let builders = [ diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 183ba2a6d0f5..775cc3b5cd80 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -158,9 +158,9 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // Check whether we need an EH cleanup. This is only true if we've // generated a lazy EH cleanup block. - auto *EHEntry = Scope.getCachedEHDispatchBlock(); - assert(Scope.hasEHBranches() == (EHEntry != nullptr)); - bool RequiresEHCleanup = (EHEntry != nullptr); + auto *ehEntry = Scope.getCachedEHDispatchBlock(); + assert(Scope.hasEHBranches() == (ehEntry != nullptr)); + bool RequiresEHCleanup = (ehEntry != nullptr); EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope(); // Check the three conditions which might require a normal cleanup: @@ -300,8 +300,8 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { } assert(tryOp && "expected available cir.try"); - auto *NextAction = getEHDispatchBlock(EHParent, tryOp); - (void)NextAction; + auto *nextAction = getEHDispatchBlock(EHParent, tryOp); + (void)nextAction; // Push a terminate scope or cleanupendpad scope around the potentially // throwing cleanups. For funclet EH personalities, the cleanupendpad models @@ -328,23 +328,34 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { if (EHActiveFlag.isValid() || IsActive) { cleanupFlags.setIsForEHCleanup(); mlir::OpBuilder::InsertionGuard guard(builder); - if (!tryOp.isCleanupActive()) - builder.createBlock(&tryOp.getCleanupRegion()); - mlir::Block *cleanup = &tryOp.getCleanupRegion().back(); - if (cleanup->empty()) { - builder.setInsertionPointToEnd(cleanup); - builder.createYield(tryOp.getLoc()); - } - auto yield = cast(cleanup->getTerminator()); + auto yield = cast(ehEntry->getTerminator()); builder.setInsertionPoint(yield); buildCleanup(*this, Fn, cleanupFlags, EHActiveFlag); } - // In LLVM traditional codegen, here's where it branches off to - // NextAction. if (CPI) llvm_unreachable("NYI"); + else { + // In LLVM traditional codegen, here's where it branches off to + // nextAction. CIR does not have a flat layout at this point, so + // instead patch all the landing pads that need to run this cleanup + // as well. + mlir::Block *currBlock = ehEntry; + while (currBlock && cleanupsToPatch.contains(currBlock)) { + mlir::OpBuilder::InsertionGuard guard(builder); + mlir::Block *blockToPatch = cleanupsToPatch[currBlock]; + auto currYield = cast(blockToPatch->getTerminator()); + builder.setInsertionPoint(currYield); + buildCleanup(*this, Fn, cleanupFlags, EHActiveFlag); + currBlock = blockToPatch; + } + + // The nextAction is yet to be populated, register that this + // cleanup should also incorporate any cleanup from nextAction + // when available. + cleanupsToPatch[nextAction] = ehEntry; + } // Leave the terminate scope. if (PushedTerminate) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index d91e4c78d61a..d7cea55dd462 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -417,7 +417,7 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, // that catch-all as the dispatch block. if (catchScope.getNumHandlers() == 1 && catchScope.getHandler(0).isCatchAll()) { - assert(dispatchBlock == catchScope.getHandler(0).Block); + // assert(dispatchBlock == catchScope.getHandler(0).Block); return; } @@ -721,8 +721,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { // Otherwise, signal that we at least have cleanups. } else if (hasCleanup) { - if (!tryOp.isCleanupActive()) - builder.createBlock(&tryOp.getCleanupRegion()); + tryOp.setCleanup(true); } assert((clauses.size() > 0 || hasCleanup) && "no catch clauses!"); @@ -739,9 +738,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { mlir::ArrayAttr::get(builder.getContext(), clauses)); // In traditional LLVM codegen. this tells the backend how to generate the - // landing pad by generating a branch to the dispatch block. In CIR the same - // function is called to gather some state, but this block info it's not - // useful per-se. + // landing pad by generating a branch to the dispatch block. mlir::Block *dispatch = getEHDispatchBlock(EHStack.getInnermostEHScope(), tryOp); (void)dispatch; @@ -772,28 +769,25 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si, if (!dispatchBlock) { switch (scope.getKind()) { case EHScope::Catch: { - // Apply a special case to a single catch-all. - EHCatchScope &catchScope = cast(scope); - if (catchScope.getNumHandlers() == 1 && - catchScope.getHandler(0).isCatchAll()) { - dispatchBlock = catchScope.getHandler(0).Block; - - // Otherwise, make a dispatch block. - } else { - // As said in the function comment, just signal back we - // have something - even though the block value doesn't - // have any real meaning. - dispatchBlock = catchScope.getHandler(0).Block; - assert(dispatchBlock && "find another approach to signal"); + // LLVM does some optimization with branches here, CIR just keep track of + // the corresponding calls. + assert(callWithExceptionCtx && "expected call information"); + { + mlir::OpBuilder::InsertionGuard guard(getBuilder()); + assert(callWithExceptionCtx.getCleanup().empty() && + "one per call: expected empty region at this point"); + dispatchBlock = builder.createBlock(&callWithExceptionCtx.getCleanup()); + builder.createYield(callWithExceptionCtx.getLoc()); } break; } case EHScope::Cleanup: { - assert(tryOp && "expected cir.try available"); assert(callWithExceptionCtx && "expected call information"); { mlir::OpBuilder::InsertionGuard guard(getBuilder()); + assert(callWithExceptionCtx.getCleanup().empty() && + "one per call: expected empty region at this point"); dispatchBlock = builder.createBlock(&callWithExceptionCtx.getCleanup()); builder.createYield(callWithExceptionCtx.getLoc()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 1167aed873f5..3b4c6828faaf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1755,6 +1755,7 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::cir::TryOp tryOp); /// Unified block containing a call to cir.resume mlir::Block *ehResumeBlock = nullptr; + llvm::DenseMap cleanupsToPatch; /// The cleanup depth enclosing all the cleanups associated with the /// parameters. diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 49aa696ab034..002b8ad026fb 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1237,9 +1237,6 @@ void TryOp::build( // Try body region Region *tryBodyRegion = result.addRegion(); - // Try cleanup region - result.addRegion(); - // Create try body region and set insertion point builder.createBlock(tryBodyRegion); tryBodyBuilder(builder, result.location); @@ -1257,7 +1254,7 @@ void TryOp::getSuccessorRegions(mlir::RegionBranchPoint point, // If the condition isn't constant, both regions may be executed. regions.push_back(RegionSuccessor(&getTryRegion())); - regions.push_back(RegionSuccessor(&getCleanupRegion())); + // FIXME: optimize, ideas include: // - If we know a target function never throws a specific type, we can // remove the catch handler. @@ -3004,7 +3001,7 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, auto call = dyn_cast(op); assert(call && "expected regular call"); if (!call.getCleanup().empty()) { - state << "cleanup "; + state << " cleanup "; state.printRegion(call.getCleanup()); } } diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index ff368a76a95c..aa04e079a524 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -294,10 +294,10 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { return mlir::ArrayAttr::get(caseAttrList.getContext(), symbolList); } - mlir::Block *buildCatchers(mlir::cir::TryOp tryOp, - mlir::PatternRewriter &rewriter, - mlir::Block *afterBody, - mlir::Block *afterTry) const { + mlir::Block * + buildCatchers(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, + mlir::Block *afterBody, mlir::Block *afterTry, + SmallVectorImpl &callsToRewrite) const { auto loc = tryOp.getLoc(); // Replace the tryOp return with a branch that jumps out of the body. rewriter.setInsertionPointToEnd(afterBody); @@ -317,22 +317,22 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { mlir::ArrayAttr symlist = collectTypeSymbols(tryOp); auto inflightEh = rewriter.create( loc, exceptionPtrType, typeIdType, - tryOp.isCleanupActive() ? mlir::UnitAttr::get(tryOp.getContext()) - : nullptr, + tryOp.getCleanup() ? mlir::UnitAttr::get(tryOp.getContext()) : nullptr, symlist); auto selector = inflightEh.getTypeId(); auto exceptionPtr = inflightEh.getExceptionPtr(); // Time to emit cleanup's. - if (tryOp.isCleanupActive()) { - assert(tryOp.getCleanupRegion().getBlocks().size() == 1 && + if (tryOp.getCleanup()) { + assert(callsToRewrite.size() == 1 && "NYI: if this isn't enough, move region instead"); // TODO(cir): this might need to be duplicated instead of consumed since // for user-written try/catch we want these cleanups to also run when the // regular try scope adjurns (in case no exception is triggered). assert(tryOp.getSynthetic() && "not implemented for user written try/catch"); - mlir::Block *cleanupBlock = &tryOp.getCleanupRegion().getBlocks().back(); + mlir::Block *cleanupBlock = + &callsToRewrite[0].getCleanup().getBlocks().back(); auto cleanupYield = cast(cleanupBlock->getTerminator()); cleanupYield->erase(); @@ -465,7 +465,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Build catchers. mlir::Block *landingPad = - buildCatchers(tryOp, rewriter, afterBody, afterTry); + buildCatchers(tryOp, rewriter, afterBody, afterTry, callsToRewrite); rewriter.eraseOp(tryOp); // Rewrite calls. diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index dd182f9b7feb..fe90280ddf9e 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -28,11 +28,11 @@ e *g = new e(0); // CIR_AFTER: {{%.*}} = cir.const #cir.int<1> : !u64i // CIR_AFTER: {{%.*}} = cir.call @_Znwm(%1) : (!u64i) -> !cir.ptr -// CIR_EH: cir.try synthetic { -// CIR_EH: cir.call exception @_ZN1eC1Ei -// CIR_EH: cir.yield -// CIR_EH: } cleanup { -// CIR_EH: cir.call @_ZdlPvm +// CIR_EH: cir.try synthetic cleanup { +// CIR_EH: cir.call exception @_ZN1eC1Ei{{.*}} cleanup { +// CIR_EH: cir.call @_ZdlPvm +// CIR_EH: cir.yield +// CIR_EH: } // CIR_EH: cir.yield // CIR_EH: } catch [#cir.unwind { // CIR_EH: cir.resume diff --git a/clang/test/CIR/CodeGen/paren-list-init.cpp b/clang/test/CIR/CodeGen/paren-list-init.cpp index 45d1dae01847..0fb659e06333 100644 --- a/clang/test/CIR/CodeGen/paren-list-init.cpp +++ b/clang/test/CIR/CodeGen/paren-list-init.cpp @@ -43,15 +43,15 @@ void make1() { // CIR_EH: cir.scope { // CIR_EH: %1 = cir.alloca ![[S1]], !cir.ptr, ["agg.tmp.ensured"] // CIR_EH: %2 = cir.get_member %1[0] {name = "v"} : !cir.ptr -> !cir.ptr -// CIR_EH: cir.try synthetic { +// CIR_EH: cir.try synthetic cleanup { // Call v move ctor -// CIR_EH: cir.call exception @_ZN3VecC1EOS_(%2, %[[VEC]]) : (!cir.ptr, !cir.ptr) -> () -// CIR_EH: cir.yield -// CIR_EH: } cleanup { +// CIR_EH: cir.call exception @_ZN3VecC1EOS_{{.*}} cleanup { // Destroy v after v move ctor throws -// CIR_EH: cir.call @_ZN3VecD1Ev(%[[VEC]]) : (!cir.ptr) -> () +// CIR_EH: cir.call @_ZN3VecD1Ev(%[[VEC]]) +// CIR_EH: cir.yield +// CIR_EH: } // CIR_EH: cir.yield // CIR_EH: } catch [#cir.unwind { // CIR_EH: cir.resume diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index 2d8db9867867..fa6bd18ddf0a 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -27,7 +27,6 @@ void yo() { // CIR: cir.call exception @_ZN3VecC1Ev(%[[VADDR]]) : (!cir.ptr) -> () // CIR: cir.call @_ZN3VecD1Ev(%[[VADDR]]) : (!cir.ptr) -> () // CIR: cir.yield -// CIR: } cleanup { // CIR: } catch [type #cir.all { // CIR: cir.catch_param -> !cir.ptr // CIR: }] @@ -75,6 +74,16 @@ void yo2() { r++; } } + +void yo3(bool x) { + int r = 1; + try { + Vec v1, v2, v3, v4; + } catch (...) { + r++; + } +} + #endif // CIR: cir.func @_Z3yo2v() @@ -84,14 +93,13 @@ void yo2() { // CIR: cir.call exception @_ZN3VecC1Ev // CIR: cir.scope { // CIR: cir.alloca ![[S1:.*]], !cir.ptr, ["agg.tmp.ensured"] -// CIR: cir.call exception @_ZN3VecC1EOS_ +// CIR: cir.call exception @_ZN3VecC1EOS_{{.*}} cleanup { +// CIR: cir.call @_ZN3VecD1Ev +// CIR: cir.yield // CIR: cir.call @_ZN2S1D2Ev // CIR: } // CIR: cir.call @_ZN3VecD1Ev // CIR: cir.yield -// CIR: } cleanup { -// CIR: cir.call @_ZN3VecD1Ev -// CIR: cir.yield // CIR: } catch [type #cir.all { // CIR: cir.catch_param -> !cir.ptr // CIR: cir.yield @@ -99,3 +107,35 @@ void yo2() { // CIR: } // CIR: cir.return // CIR: } + +// CIR: cir.scope { +// CIR: %[[V1:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v1" +// CIR: %[[V2:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v2" +// CIR: %[[V3:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v3" +// CIR: %[[V4:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v4" +// CIR: cir.try { +// CIR: cir.call exception @_ZN3VecC1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR: cir.call exception @_ZN3VecC1Ev(%[[V2]]) : (!cir.ptr) -> () cleanup { +// CIR: cir.call @_ZN3VecD1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR: cir.yield +// CIR: } +// CIR: cir.call exception @_ZN3VecC1Ev(%[[V3]]) : (!cir.ptr) -> () cleanup { +// CIR: cir.call @_ZN3VecD1Ev(%[[V2]]) : (!cir.ptr) -> () +// CIR: cir.call @_ZN3VecD1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR: cir.yield +// CIR: } +// CIR: cir.call exception @_ZN3VecC1Ev(%[[V4]]) : (!cir.ptr) -> () cleanup { +// CIR: cir.call @_ZN3VecD1Ev(%[[V3]]) : (!cir.ptr) -> () +// CIR: cir.call @_ZN3VecD1Ev(%[[V2]]) : (!cir.ptr) -> () +// CIR: cir.call @_ZN3VecD1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR: cir.yield +// CIR: } +// CIR: cir.call @_ZN3VecD1Ev(%[[V4]]) : (!cir.ptr) -> () +// CIR: cir.call @_ZN3VecD1Ev(%[[V3]]) : (!cir.ptr) -> () +// CIR: cir.call @_ZN3VecD1Ev(%[[V2]]) : (!cir.ptr) -> () +// CIR: cir.call @_ZN3VecD1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR: cir.yield +// CIR: } catch [type #cir.all { +// CIR: }] +// CIR: } +// CIR: cir.return diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index e43e7b61fea8..8945bc33b739 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -21,7 +21,6 @@ unsigned long long tc() { a++; } catch (int idx) { - // CHECK: } cleanup { // CHECK: } catch [type #cir.global_view<@_ZTIi> : !cir.ptr { // CHECK: %[[catch_idx_addr:.*]] = cir.catch_param -> !cir.ptr // CHECK: %[[idx_load:.*]] = cir.load %[[catch_idx_addr]] : !cir.ptr, !s32i From 56a2864e689f371e7267300e2c0bd609a71fec66 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Tue, 17 Sep 2024 21:00:02 +0300 Subject: [PATCH 1841/2301] [CIR][CodeGen] Fix packed structures (#839) Consider the following code snippet `test.c`: ``` typedef struct { char b; int c; } D; typedef struct { D e; int f; } E; void f1() { E a = {}; } ``` When emitting the CIR using `bin/clang test.c -Xclang -fclangir -Xclang -emit-cir -S -o -` the current implementation gets: ``` NYI UNREACHABLE executed at ~/clangir/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp:338! ``` This is only one of the many tests where this happens. Comparing the implementations of `CIR/CodeGen/CIRRecordLayoutBuilder.cpp` and clang's codegen `lib/CodeGen/CGRecordLayoutBuilder.cpp`, there is some padding missing for packed structures, and some alignments that need to be corrected. This PR also updates 2 existing tests. In the first test, `structural-binding.cpp`, I updated some `cir.get_member` indexes. In the second test, `packed-structs.c`, I updated the `cir` layout for the structure, and added more tests. I have compared the changes I made in the tests to the original clang codegen and everything seems fine. --- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 35 +++++- clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp | 7 +- clang/test/CIR/CodeGen/packed-structs.c | 110 ++++++++++++++++-- clang/test/CIR/CodeGen/structural-binding.cpp | 10 +- 4 files changed, 145 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 9ad81831f224..b6e48d534790 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -69,6 +69,8 @@ struct CIRRecordLowering final { /// Determines if we need a packed llvm struct. void determinePacked(bool NVBaseType); + /// Inserts padding everywhere it's needed. + void insertPadding(); void computeVolatileBitfields(); void accumulateBases(); @@ -294,6 +296,7 @@ void CIRRecordLowering::lower(bool nonVirtualBaseType) { members.push_back(StorageInfo(Size, getUIntNType(8))); determinePacked(nonVirtualBaseType); + insertPadding(); members.pop_back(); fillOutputFields(); @@ -657,6 +660,33 @@ void CIRRecordLowering::determinePacked(bool NVBaseType) { members.back().data = getUIntNType(astContext.toBits(Alignment)); } +void CIRRecordLowering::insertPadding() { + std::vector> Padding; + CharUnits Size = CharUnits::Zero(); + for (std::vector::const_iterator Member = members.begin(), + MemberEnd = members.end(); + Member != MemberEnd; ++Member) { + if (!Member->data) + continue; + CharUnits Offset = Member->offset; + assert(Offset >= Size); + // Insert padding if we need to. + if (Offset != + Size.alignTo(isPacked ? CharUnits::One() : getAlignment(Member->data))) + Padding.push_back(std::make_pair(Size, Offset - Size)); + Size = Offset + getSize(Member->data); + } + if (Padding.empty()) + return; + // Add the padding to the Members list and sort it. + for (std::vector>::const_iterator + Pad = Padding.begin(), + PadEnd = Padding.end(); + Pad != PadEnd; ++Pad) + members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second))); + llvm::stable_sort(members); +} + std::unique_ptr CIRGenTypes::computeRecordLayout(const RecordDecl *D, mlir::cir::StructType *Ty) { @@ -674,9 +704,8 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, CIRRecordLowering baseBuilder(*this, D, /*Packed=*/builder.isPacked); baseBuilder.lower(/*NonVirtualBaseType=*/true); auto baseIdentifier = getRecordTypeName(D, ".base"); - BaseTy = - Builder.getCompleteStructTy(baseBuilder.fieldTypes, baseIdentifier, - /*packed=*/false, D); + BaseTy = Builder.getCompleteStructTy( + baseBuilder.fieldTypes, baseIdentifier, baseBuilder.isPacked, D); // TODO(cir): add something like addRecordTypeName // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp index c030cd2f352f..2dadc469f789 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp @@ -23,7 +23,8 @@ StructLayout::StructLayout(mlir::cir::StructType ST, const CIRDataLayout &DL) assert(!::cir::MissingFeatures::recordDeclIsPacked() && "Cannot identify packed structs"); - const llvm::Align TyAlign = DL.getABITypeAlign(Ty); + const llvm::Align TyAlign = + ST.getPacked() ? llvm::Align(1) : DL.getABITypeAlign(Ty); // Add padding if necessary to align the data element properly. // Currently the only structure with scalable size will be the homogeneous @@ -171,6 +172,10 @@ llvm::Align CIRDataLayout::getAlignment(mlir::Type Ty, bool abiOrPref) const { if (::cir::MissingFeatures::recordDeclIsPacked() && abiOrPref) llvm_unreachable("NYI"); + auto stTy = llvm::dyn_cast(Ty); + if (stTy && stTy.getPacked() && abiOrPref) + return llvm::Align(1); + // Get the layout annotation... which is lazily created on demand. const StructLayout *Layout = getStructLayout(llvm::cast(Ty)); diff --git a/clang/test/CIR/CodeGen/packed-structs.c b/clang/test/CIR/CodeGen/packed-structs.c index 3488418f13b0..2379c8d06896 100644 --- a/clang/test/CIR/CodeGen/packed-structs.c +++ b/clang/test/CIR/CodeGen/packed-structs.c @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s #pragma pack(1) @@ -20,18 +22,110 @@ typedef struct { } __attribute__((aligned(2))) C; -// CHECK: !ty_A = !cir.struct -// CHECK: !ty_C = !cir.struct -// CHECK: !ty_B = !cir.struct}> +// CIR: !ty_A = !cir.struct +// CIR: !ty_C = !cir.struct +// CIR: !ty_D = !cir.struct}> +// CIR: !ty_I = !cir.struct, ["a"] {alignment = 1 : i64} -// CHECK: %1 = cir.alloca !ty_B, !cir.ptr, ["b"] {alignment = 1 : i64} -// CHECK: %2 = cir.alloca !ty_C, !cir.ptr, ["c"] {alignment = 2 : i64} +// LLVM: %struct.A = type <{ i32, i8 }> +// LLVM: %struct.B = type <{ i32, i8, [6 x %struct.A] }> +// LLVM: %struct.C = type <{ i32, i8, i8 }> +// LLVM: %struct.E = type <{ %struct.D, i32 }> +// LLVM: %struct.D = type <{ i8, i8, i32 }> +// LLVM: %struct.G = type { %struct.F, i8 } +// LLVM: %struct.F = type <{ i64, i8 }> +// LLVM: %struct.J = type <{ i8, i8, i8, i8, %struct.I, i32 }> +// LLVM: %struct.I = type <{ i8, %struct.H }> +// LLVM: %struct.H = type { i32, %union.anon.{{.*}} } + +// CIR: cir.func {{.*@foo()}} +// CIR: {{.*}} = cir.alloca !ty_A, !cir.ptr, ["a"] {alignment = 1 : i64} +// CIR: {{.*}} = cir.alloca !ty_B, !cir.ptr, ["b"] {alignment = 1 : i64} +// CIR: {{.*}} = cir.alloca !ty_C, !cir.ptr, ["c"] {alignment = 2 : i64} + +// LLVM: {{.*}} = alloca %struct.A, i64 1, align 1 +// LLVM: {{.*}} = alloca %struct.B, i64 1, align 1 +// LLVM: {{.*}} = alloca %struct.C, i64 1, align 2 void foo() { A a; B b; C c; } +#pragma pack(2) + +typedef struct { + char b; + int c; +} D; + +typedef struct { + D e; + int f; +} E; + +// CIR: cir.func {{.*@f1()}} +// CIR: {{.*}} = cir.alloca !ty_E, !cir.ptr, ["a"] {alignment = 2 : i64} + +// LLVM: {{.*}} = alloca %struct.E, i64 1, align 2 +void f1() { + E a = {}; +} + +#pragma pack(1) + +typedef struct { + long b; + char c; +} F; + +typedef struct { + F e; + char f; +} G; + +// CIR: cir.func {{.*@f2()}} +// CIR: {{.*}} = cir.alloca !ty_G, !cir.ptr, ["a"] {alignment = 1 : i64} + +// LLVM: {{.*}} = alloca %struct.G, i64 1, align 1 +void f2() { + G a = {}; +} + +#pragma pack(1) + +typedef struct { + int d0; + union { + char null; + int val; + } value; +} H; + +typedef struct { + char t; + H d; +} I; +typedef struct { + char a0; + char a1; + char a2; + char a3; + I c; + int a; +} J; + +// CIR: cir.func {{.*@f3()}} +// CIR: {{.*}} = cir.alloca !ty_J, !cir.ptr, ["a"] {alignment = 1 : i64} + +// LLVM: {{.*}} = alloca %struct.J, i64 1, align 1 +void f3() { + J a = {0}; +} diff --git a/clang/test/CIR/CodeGen/structural-binding.cpp b/clang/test/CIR/CodeGen/structural-binding.cpp index c37788b9d678..d70a9509c6f9 100644 --- a/clang/test/CIR/CodeGen/structural-binding.cpp +++ b/clang/test/CIR/CodeGen/structural-binding.cpp @@ -52,19 +52,19 @@ void f(A &a) { // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr // CIR: {{.*}} = cir.get_member %[[a]][0] {name = "a"} : !cir.ptr -> !cir.ptr // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr - // CIR: {{.*}} = cir.get_member %[[a]][1] {name = "b"} : !cir.ptr -> !cir.ptr + // CIR: {{.*}} = cir.get_member %[[a]][2] {name = "b"} : !cir.ptr -> !cir.ptr // CIR: %[[a:.*]] = cir.load %1 : !cir.ptr>, !cir.ptr - // CIR: {{.*}} = cir.get_member %[[a]][2] {name = "c"} : !cir.ptr -> !cir.ptr + // CIR: {{.*}} = cir.get_member %[[a]][3] {name = "c"} : !cir.ptr -> !cir.ptr // LLVM: {{.*}} = getelementptr %struct.A, ptr {{.*}}, i32 0, i32 0 - // LLVM: {{.*}} = getelementptr %struct.A, ptr {{.*}}, i32 0, i32 1 // LLVM: {{.*}} = getelementptr %struct.A, ptr {{.*}}, i32 0, i32 2 + // LLVM: {{.*}} = getelementptr %struct.A, ptr {{.*}}, i32 0, i32 3 auto [x2, y2, z2] = a; (x2, y2, z2); // CIR: cir.call @_ZN1AC1ERKS_(%2, {{.*}}) : (!cir.ptr, !cir.ptr) -> () // CIR: {{.*}} = cir.get_member %2[0] {name = "a"} : !cir.ptr -> !cir.ptr - // CIR: {{.*}} = cir.get_member %2[1] {name = "b"} : !cir.ptr -> !cir.ptr - // CIR: {{.*}} = cir.get_member %2[2] {name = "c"} : !cir.ptr -> !cir.ptr + // CIR: {{.*}} = cir.get_member %2[2] {name = "b"} : !cir.ptr -> !cir.ptr + // CIR: {{.*}} = cir.get_member %2[3] {name = "c"} : !cir.ptr -> !cir.ptr // for the rest, just expect the codegen does't crash auto &&[x3, y3, z3] = a; From b47377417c29c518f72ee0fcf0c1d3c21de1fd37 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 18 Sep 2024 02:05:58 +0800 Subject: [PATCH 1842/2301] [CIR][CIRGen] add CIRGen support for assume builtins (#841) This PR adds CIRGen support for the following 3 builtins related to compile- time assumptions: - `__builtin_assume` - `__builtin_assume_aligned` - `__builtin_assume_separate_storage` 3 new operations are invented to represent the three builtins. _LLVMIR lowering for these builtins cannot be implemented at this moment_ due to the lack of operand bundle support in LLVMIR dialect. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 80 ++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 39 ++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 19 +++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 11 +++ clang/test/CIR/CodeGen/builtin-assume.cpp | 55 ++++++++++++++ clang/test/CIR/Transforms/builtin-assume.cir | 40 ++++++++++ 6 files changed, 244 insertions(+) create mode 100644 clang/test/CIR/CodeGen/builtin-assume.cpp create mode 100644 clang/test/CIR/Transforms/builtin-assume.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 87abcb26bd86..cd7bc76fb377 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3896,6 +3896,86 @@ def FMinOp : BinaryFPToFPBuiltinOp<"fmin", "MinNumOp">; def FModOp : BinaryFPToFPBuiltinOp<"fmod", "FRemOp">; def PowOp : BinaryFPToFPBuiltinOp<"pow", "PowOp">; +//===----------------------------------------------------------------------===// +// Assume Operations +//===----------------------------------------------------------------------===// + +def AssumeOp : CIR_Op<"assume"> { + let summary = "Tell the optimizer that a boolean value is true"; + let description = [{ + The `cir.assume` operation takes a single boolean prediate as its only + argument and does not have any results. The operation tells the optimizer + that the predicate's value is true. + + This operation corresponds to the `__assume` and the `__builtin_assume` + builtin function. + }]; + + let arguments = (ins CIR_BoolType:$predicate); + let results = (outs); + + let assemblyFormat = [{ + $predicate `:` type($predicate) attr-dict + }]; +} + +def AssumeAlignedOp + : CIR_Op<"assume.aligned", [Pure, AllTypesMatch<["pointer", "result"]>]> { + let summary = "Tell the optimizer that a pointer is aligned"; + let description = [{ + The `cir.assume.aligned` operation takes two or three arguments. + + When the 3rd argument `offset` is absent, this operation tells the optimizer + that the pointer given by the `pointer` argument is aligned to the alignment + given by the `align` argument. + + When the `offset` argument is given, it represents an offset from the + alignment. This operation then tells the optimizer that the pointer given by + the `pointer` argument is always misaligned by the alignment given by the + `align` argument by `offset` bytes, a.k.a. the pointer yielded by + `(char *)pointer - offset` is aligned to the specified alignment. + + The `align` argument is a constant integer represented as an integer + attribute instead of an SSA value. It must be a positive integer. + + The result of this operation has the same value as the `pointer` argument, + but the optimizer has additional knowledge about its alignment. + + This operation corresponds to the `__builtin_assume_aligned` builtin + function. + }]; + + let arguments = (ins CIR_PointerType:$pointer, + I64Attr:$alignment, + Optional:$offset); + let results = (outs CIR_PointerType:$result); + + let assemblyFormat = [{ + $pointer `:` qualified(type($pointer)) + `[` `alignment` $alignment (`,` `offset` $offset^ `:` type($offset))? `]` + attr-dict + }]; +} + +def AssumeSepStorageOp : CIR_Op<"assume.separate_storage", [SameTypeOperands]> { + let summary = + "Tell the optimizer that two pointers point to different allocations"; + let description = [{ + The `cir.assume.separate_storage` operation takes two pointers as arguments, + and the operation tells the optimizer that these two pointers point to + different allocations. + + This operation corresponds to the `__builtin_assume_separate_storage` + builtin function. + }]; + + let arguments = (ins VoidPtr:$ptr1, VoidPtr:$ptr2); + + let assemblyFormat = [{ + $ptr1 `,` $ptr2 `:` qualified(type($ptr1)) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // Branch Probability Operations //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 59811b8e9157..49b40d763fdc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -13,6 +13,7 @@ #include "CIRGenCXXABI.h" #include "CIRGenCall.h" +#include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "TargetInfo.h" @@ -823,6 +824,44 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(buildScalarExpr(E->getArg(0))); } + case Builtin::BI__builtin_assume_aligned: { + const Expr *ptr = E->getArg(0); + mlir::Value ptrValue = buildScalarExpr(ptr); + mlir::Value offsetValue = + (E->getNumArgs() > 2) ? buildScalarExpr(E->getArg(2)) : nullptr; + + mlir::Attribute alignmentAttr = ConstantEmitter(*this).emitAbstract( + E->getArg(1), E->getArg(1)->getType()); + std::int64_t alignment = cast(alignmentAttr).getSInt(); + + ptrValue = buildAlignmentAssumption(ptrValue, ptr, ptr->getExprLoc(), + builder.getI64IntegerAttr(alignment), + offsetValue); + return RValue::get(ptrValue); + } + + case Builtin::BI__assume: + case Builtin::BI__builtin_assume: { + if (E->getArg(0)->HasSideEffects(getContext())) + return RValue::get(nullptr); + + mlir::Value argValue = buildScalarExpr(E->getArg(0)); + builder.create(getLoc(E->getExprLoc()), argValue); + return RValue::get(nullptr); + } + + case Builtin::BI__builtin_assume_separate_storage: { + const Expr *arg0 = E->getArg(0); + const Expr *arg1 = E->getArg(1); + + mlir::Value value0 = buildScalarExpr(arg0); + mlir::Value value1 = buildScalarExpr(arg1); + + builder.create(getLoc(E->getExprLoc()), + value0, value1); + return RValue::get(nullptr); + } + case Builtin::BI__builtin_prefetch: { auto evaluateOperandAsInt = [&](const Expr *Arg) { Expr::EvalResult Res; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 0c7ac712284d..410fcc2a316d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1854,3 +1854,22 @@ CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, return numElements; } + +mlir::Value CIRGenFunction::buildAlignmentAssumption( + mlir::Value ptrValue, QualType ty, SourceLocation loc, + SourceLocation assumptionLoc, mlir::IntegerAttr alignment, + mlir::Value offsetValue) { + if (SanOpts.has(SanitizerKind::Alignment)) + llvm_unreachable("NYI"); + return builder.create( + getLoc(assumptionLoc), ptrValue, alignment, offsetValue); +} + +mlir::Value CIRGenFunction::buildAlignmentAssumption( + mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc, + mlir::IntegerAttr alignment, mlir::Value offsetValue) { + QualType ty = expr->getType(); + SourceLocation loc = expr->getExprLoc(); + return buildAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment, + offsetValue); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 3b4c6828faaf..d8e032604ff1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -969,6 +969,17 @@ class CIRGenFunction : public CIRGenTypeCache { ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch); + mlir::Value buildAlignmentAssumption(mlir::Value ptrValue, QualType ty, + SourceLocation loc, + SourceLocation assumptionLoc, + mlir::IntegerAttr alignment, + mlir::Value offsetValue = nullptr); + + mlir::Value buildAlignmentAssumption(mlir::Value ptrValue, const Expr *expr, + SourceLocation assumptionLoc, + mlir::IntegerAttr alignment, + mlir::Value offsetValue = nullptr); + /// Build a debug stoppoint if we are emitting debug info. void buildStopPoint(const Stmt *S); diff --git a/clang/test/CIR/CodeGen/builtin-assume.cpp b/clang/test/CIR/CodeGen/builtin-assume.cpp new file mode 100644 index 000000000000..da807994f4b1 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-assume.cpp @@ -0,0 +1,55 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir +// RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir + +int test_assume(int x) { + __builtin_assume(x > 0); + return x; +} + +// CIR: cir.func @_Z11test_assumei +// CIR: %[[#x:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#zero:]] = cir.const #cir.int<0> : !s32i +// CIR-NEXT: %[[#cond:]] = cir.cmp(gt, %[[#x]], %[[#zero]]) : !s32i, !cir.bool +// CIR-NEXT: cir.assume %[[#cond]] : !cir.bool +// CIR: } + +int test_assume_aligned(int *ptr) { + int *aligned = (int *)__builtin_assume_aligned(ptr, 8); + return *aligned; +} + +// CIR: cir.func @_Z19test_assume_alignedPi +// CIR: %[[#ptr:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[#aligned:]] = cir.assume.aligned %[[#ptr]] : !cir.ptr[alignment 8] +// CIR-NEXT: cir.store %[[#aligned]], %[[#aligned_slot:]] : !cir.ptr, !cir.ptr> +// CIR-NEXT: %[[#aligned2:]] = cir.load deref %[[#aligned_slot]] : !cir.ptr>, !cir.ptr +// CIR-NEXT: %{{.+}} = cir.load %[[#aligned2]] : !cir.ptr, !s32i +// CIR: } + +int test_assume_aligned_offset(int *ptr) { + int *aligned = (int *)__builtin_assume_aligned(ptr, 8, 4); + return *aligned; +} + +// CIR: cir.func @_Z26test_assume_aligned_offsetPi +// CIR: %[[#ptr:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[#offset:]] = cir.const #cir.int<4> : !s32i +// CIR-NEXT: %[[#offset2:]] = cir.cast(integral, %[[#offset]] : !s32i), !u64i +// CIR-NEXT: %[[#aligned:]] = cir.assume.aligned %[[#ptr]] : !cir.ptr[alignment 8, offset %[[#offset2]] : !u64i] +// CIR-NEXT: cir.store %[[#aligned]], %[[#aligned_slot:]] : !cir.ptr, !cir.ptr> +// CIR-NEXT: %[[#aligned2:]] = cir.load deref %[[#aligned_slot]] : !cir.ptr>, !cir.ptr +// CIR-NEXT: %{{.+}} = cir.load %[[#aligned2]] : !cir.ptr, !s32i +// CIR: } + +int test_separate_storage(int *p1, int *p2) { + __builtin_assume_separate_storage(p1, p2); + return *p1 + *p2; +} + +// CIR: cir.func @_Z21test_separate_storagePiS_ +// CIR: %[[#p1:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[#p1_voidptr:]] = cir.cast(bitcast, %[[#p1]] : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#p2:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[#p2_voidptr:]] = cir.cast(bitcast, %[[#p2]] : !cir.ptr), !cir.ptr +// CIR-NEXT: cir.assume.separate_storage %[[#p1_voidptr]], %[[#p2_voidptr]] : !cir.ptr +// CIR: } diff --git a/clang/test/CIR/Transforms/builtin-assume.cir b/clang/test/CIR/Transforms/builtin-assume.cir new file mode 100644 index 000000000000..72afb3812e53 --- /dev/null +++ b/clang/test/CIR/Transforms/builtin-assume.cir @@ -0,0 +1,40 @@ +// RUN: cir-opt --canonicalize -o %t.cir %s +// RUN: FileCheck --input-file %t.cir %s +// RUN: cir-opt -cir-simplify -o %t.cir %s +// RUN: FileCheck --input-file %t.cir %s + +!s32i = !cir.int +module { + // Make sure canonicalizers don't erase assume builtins. + + cir.func @assume(%arg0: !s32i) { + %0 = cir.const #cir.int<0> : !s32i + %1 = cir.cmp(gt, %arg0, %0) : !s32i, !cir.bool + cir.assume %1 : !cir.bool + cir.return + } + // CHECK: cir.func @assume(%arg0: !s32i) { + // CHECK-NEXT: %0 = cir.const #cir.int<0> : !s32i + // CHECK-NEXT: %1 = cir.cmp(gt, %arg0, %0) : !s32i, !cir.bool + // CHECK-NEXT: cir.assume %1 : !cir.bool + // CHECK-NEXT: cir.return + // CHECK-NEXT: } + + cir.func @assume_aligned(%arg0: !cir.ptr) -> !cir.ptr { + %0 = cir.assume.aligned %arg0 : !cir.ptr[alignment 8] + cir.return %0 : !cir.ptr + } + // CHECK: cir.func @assume_aligned(%arg0: !cir.ptr) -> !cir.ptr { + // CHECK-NEXT: %0 = cir.assume.aligned %arg0 : !cir.ptr[alignment 8] + // CHECK-NEXT: cir.return %0 : !cir.ptr + // CHECK-NEXT: } + + cir.func @assume_separate_storage(%arg0: !cir.ptr, %arg1: !cir.ptr) { + cir.assume.separate_storage %arg0, %arg1 : !cir.ptr + cir.return + } + // CHECK: cir.func @assume_separate_storage(%arg0: !cir.ptr, %arg1: !cir.ptr) { + // CHECK-NEXT: cir.assume.separate_storage %arg0, %arg1 : !cir.ptr + // CHECK-NEXT: cir.return + // CHECK-NEXT: } +} \ No newline at end of file From 62c3e442d8ba055778460f926a505c57afe6a337 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 18 Sep 2024 02:13:01 +0800 Subject: [PATCH 1843/2301] [CIR][NFC] Change default LLVM output file extension (#849) When the output file name is not specified via `-o`, the upstream clang uses `.ll` as the extension of the default output file name. This PR makes ClangIR follow this behavior. --- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 578ccbf8ca2e..b5999195b596 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -358,7 +358,7 @@ getOutputStream(CompilerInstance &ci, StringRef inFile, case CIRGenAction::OutputType::EmitMLIR: return ci.createDefaultOutputFile(false, inFile, "mlir"); case CIRGenAction::OutputType::EmitLLVM: - return ci.createDefaultOutputFile(false, inFile, "llvm"); + return ci.createDefaultOutputFile(false, inFile, "ll"); case CIRGenAction::OutputType::EmitBC: return ci.createDefaultOutputFile(true, inFile, "bc"); case CIRGenAction::OutputType::EmitObj: From 0b472ecb40524101063db685ef0749342750726d Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Tue, 17 Sep 2024 12:16:40 -0700 Subject: [PATCH 1844/2301] [CIR][Asm] Parse extra attributes after calling convention (#847) Align parsing of cir.call with its pretty-printing. --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 32 ++++++++++++------------- clang/test/CIR/IR/call.cir | 13 +++++++--- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 002b8ad026fb..1fb640f89d4d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2856,22 +2856,6 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, allResultTypes = opsFnTy.getResults(); result.addTypes(allResultTypes); - auto &builder = parser.getBuilder(); - Attribute extraAttrs; - if (::mlir::succeeded(parser.parseOptionalKeyword("extra"))) { - if (parser.parseLParen().failed()) - return failure(); - if (parser.parseAttribute(extraAttrs).failed()) - return failure(); - if (parser.parseRParen().failed()) - return failure(); - } else { - NamedAttrList empty; - extraAttrs = mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), empty.getDictionary(builder.getContext())); - } - result.addAttribute(extraAttrsAttrName, extraAttrs); - if (parser.resolveOperands(ops, operandsTypes, opsLoc, result.operands)) return ::mlir::failure(); @@ -2891,6 +2875,7 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, return ::mlir::failure(); } + auto &builder = parser.getBuilder(); if (parser.parseOptionalKeyword("cc").succeeded()) { if (parser.parseLParen().failed()) return failure(); @@ -2903,6 +2888,21 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, builder.getContext(), callingConv)); } + Attribute extraAttrs; + if (::mlir::succeeded(parser.parseOptionalKeyword("extra"))) { + if (parser.parseLParen().failed()) + return failure(); + if (parser.parseAttribute(extraAttrs).failed()) + return failure(); + if (parser.parseRParen().failed()) + return failure(); + } else { + NamedAttrList empty; + extraAttrs = mlir::cir::ExtraFuncAttributesAttr::get( + builder.getContext(), empty.getDictionary(builder.getContext())); + } + result.addAttribute(extraAttrsAttrName, extraAttrs); + // If exception is present and there are cleanups, this should be latest thing // present (after all attributes, etc). mlir::Region *cleanupRegion = nullptr; diff --git a/clang/test/CIR/IR/call.cir b/clang/test/CIR/IR/call.cir index 1810ff088611..0b1fc68622f8 100644 --- a/clang/test/CIR/IR/call.cir +++ b/clang/test/CIR/IR/call.cir @@ -12,13 +12,20 @@ module { cir.return %arg0 : !s32i } + cir.func private @my_add(%a: !s32i, %b: !s32i) -> !s32i cc(spir_function) extra(#fn_attr) + cir.func @ind(%fnptr: !fnptr, %a : !s32i) { %r = cir.call %fnptr(%a) : (!fnptr, !s32i) -> !s32i +// CHECK: %0 = cir.call %arg0(%arg1) : (!cir.ptr>, !s32i) -> !s32i // Check parse->pretty-print round-trip on extra() attribute %7 = cir.call @_ZNSt5arrayIiLm8192EEixEm(%a) : (!s32i) -> !s32i extra(#fn_attr1) +// CHECK: %1 = cir.call @_ZNSt5arrayIiLm8192EEixEm(%arg1) : (!s32i) -> !s32i extra(#fn_attr1) + // Frankenstein's example from clang/test/CIR/Lowering/call-op-call-conv.cir + %3 = cir.try_call @my_add(%r, %7) ^continue, ^landing_pad : (!s32i, !s32i) -> !s32i cc(spir_function) extra(#fn_attr1) +// CHECK: %2 = cir.try_call @my_add(%0, %1) ^bb1, ^bb2 : (!s32i, !s32i) -> !s32i cc(spir_function) extra(#fn_attr1) + ^continue: + cir.br ^landing_pad + ^landing_pad: cir.return } } - -// CHECK: %0 = cir.call %arg0(%arg1) : (!cir.ptr>, !s32i) -> !s32i -// CHECK: %1 = cir.call @_ZNSt5arrayIiLm8192EEixEm(%arg1) : (!s32i) -> !s32i extra(#fn_attr1) From 20adc5c457500284ab90232dc80a766cd80a2f5d Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Tue, 17 Sep 2024 15:40:19 -0400 Subject: [PATCH 1845/2301] [CIR][CIRGen][Lowering] Lower AArch64::BI__builtin_arm_ldrex to llvm intrinsic (#833) As title. And this PR introduces IntrinsicCallOp which will be used to lower intrinsics to llvm intrinsics. This PR handles clang::AArch64::BI__builtin_arm_ldrex. For this particular one, we only have test .cir, because a MLIR issue mentioned https://github.com/llvm/clangir/pull/833#issuecomment-2347159307 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 35 ++++++++++++++ clang/include/clang/CIR/MissingFeatures.h | 1 + .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 40 +++++++++++++++- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 7 ++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 +++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 31 +++++++++++- clang/test/CIR/CodeGen/builtin-arm-ldrex.c | 48 +++++++++++++++++++ clang/test/CIR/IR/invalid-llvm-intrinsic.cir | 11 +++++ clang/test/CIR/IR/llvm-intrinsic.cir | 12 +++++ 9 files changed, 185 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-arm-ldrex.c create mode 100644 clang/test/CIR/IR/invalid-llvm-intrinsic.cir create mode 100644 clang/test/CIR/IR/llvm-intrinsic.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index cd7bc76fb377..7ff25bb35f2f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3168,6 +3168,41 @@ def FuncOp : CIR_Op<"func", [ let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// IntrinsicCallOp +//===----------------------------------------------------------------------===// + +def IntrinsicCallOp : CIR_Op<"llvm.intrinsic"> { + let summary = "Call to intrinsic functions that is not defined in CIR"; + let description = [{ + `cir.llvm.intrinsic` operation represents a call-like expression which has + return type and arguments that maps directly to a llvm intrinsic. + It only records intrinsic `intrinsic_name`. + }]; + + let results = (outs Optional:$result); + let arguments = (ins + StrAttr:$intrinsic_name, Variadic:$arg_ops); + + let skipDefaultBuilders = 1; + + let assemblyFormat = [{ + $intrinsic_name $arg_ops `:` functional-type($arg_ops, $result) attr-dict + }]; + + let builders = [ + OpBuilder<(ins "mlir::StringAttr":$intrinsic_name, "mlir::Type":$resType, + CArg<"ValueRange", "{}">:$operands), [{ + $_state.addAttribute("intrinsic_name", intrinsic_name); + $_state.addOperands(operands); + if (resType) + $_state.addTypes(resType); + }]>, + ]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // CallOp and TryCallOp //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index bdfffd4788c1..64d75550ec21 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -256,6 +256,7 @@ struct MissingFeatures { static bool csmeCall() { return false; } static bool undef() { return false; } static bool noFPClass() { return false; } + static bool llvmIntrinsicElementTypeSupport() { return false; } //-- Other missing features diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index ed87e71dac8e..8272d4a7364c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1569,6 +1569,44 @@ mlir::Value CIRGenFunction::buildScalarOrConstFoldImmArg(unsigned ICEArguments, return Arg; } +static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, + const CallExpr *clangCallExpr, + CIRGenFunction &cgf) { + StringRef intrinsicName; + if (builtinID == clang::AArch64::BI__builtin_arm_ldrex) { + intrinsicName = "llvm.aarch64.ldxr"; + } else { + llvm_unreachable("Unknown builtinID"); + } + // Argument + mlir::Value loadAddr = cgf.buildScalarExpr(clangCallExpr->getArg(0)); + // Get Instrinc call + CIRGenBuilderTy &builder = cgf.getBuilder(); + QualType clangResTy = clangCallExpr->getType(); + mlir::Type realResTy = cgf.ConvertType(clangResTy); + // Return type of LLVM intrinsic is defined in Intrinsic.td, + // which can be found under LLVM IR directory. + mlir::Type funcResTy = builder.getSInt64Ty(); + mlir::Location loc = cgf.getLoc(clangCallExpr->getExprLoc()); + mlir::cir::IntrinsicCallOp op = builder.create( + loc, builder.getStringAttr(intrinsicName), funcResTy, loadAddr); + mlir::Value res = op.getResult(); + + // Convert result type to the expected type. + if (mlir::isa(realResTy)) { + return builder.createIntToPtr(res, realResTy); + } + mlir::cir::IntType intResTy = + builder.getSIntNTy(cgf.CGM.getDataLayout().getTypeSizeInBits(realResTy)); + mlir::Value intCastRes = builder.createIntCast(res, intResTy); + if (mlir::isa(realResTy)) { + return builder.createIntCast(intCastRes, realResTy); + } else { + // Above cases should cover most situations and we have test coverage. + llvm_unreachable("Unsupported return type for now"); + } +} + mlir::Value CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, @@ -1708,7 +1746,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex || BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) { - llvm_unreachable("NYI"); + return buildArmLdrexNon128Intrinsic(BuiltinID, E, *this); } if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex || diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 2e521fdbde46..e4fcf973c54f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -442,7 +442,6 @@ void CIRGenModule::constructAttributeList(StringRef Name, } getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, funcAttrs); - } static mlir::cir::CIRCallOpInterface @@ -1588,9 +1587,9 @@ void CIRGenModule::getTrivialDefaultFunctionAttributes( funcAttrs); } -void CIRGenModule::getDefaultFunctionAttributes(StringRef name, bool hasOptnone, - bool attrOnCallSite, - mlir::NamedAttrList &funcAttrs) { +void CIRGenModule::getDefaultFunctionAttributes( + StringRef name, bool hasOptnone, bool attrOnCallSite, + mlir::NamedAttrList &funcAttrs) { getTrivialDefaultFunctionAttributes(name, hasOptnone, attrOnCallSite, funcAttrs); // If we're just getting the default, get the default values for mergeable diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 1fb640f89d4d..7f1914d1b6e3 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2580,6 +2580,12 @@ LogicalResult cir::FuncOp::verifyType() { return success(); } +LogicalResult cir::IntrinsicCallOp::verify() { + if (!getIntrinsicName().starts_with("llvm.")) + return emitOpError() << "intrinsic name must start with 'llvm.'"; + return success(); +} + // Verifies linkage types // - functions don't have 'common' linkage // - external functions have 'external' or 'extern_weak' linkage diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8fe63f89f7aa..1ee8c44516b2 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2539,6 +2539,35 @@ static mlir::LLVM::CallIntrinsicOp replaceOpWithCallLLVMIntrinsicOp( return callIntrinOp; } +class CIRIntrinsicCallLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::IntrinsicCallOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::Type llvmResTy = + getTypeConverter()->convertType(op->getResultTypes()[0]); + if (!llvmResTy) + return op.emitError("expected LLVM result type"); + StringRef name = op.getIntrinsicName(); + // Some llvm intrinsics require ElementType attribute to be attached to + // the argument of pointer type. That prevents us from generating LLVM IR + // because from LLVM dialect, we have LLVM IR like the below which fails + // LLVM IR verification. + // %3 = call i64 @llvm.aarch64.ldxr.p0(ptr %2) + // The expected LLVM IR should be like + // %3 = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32) %2) + // TODO(cir): MLIR LLVM dialect should handle this part as CIR has no way + // to set LLVM IR attribute. + assert(!::cir::MissingFeatures::llvmIntrinsicElementTypeSupport()); + replaceOpWithCallLLVMIntrinsicOp(rewriter, op, name, llvmResTy, + adaptor.getOperands()); + return mlir::success(); + } +}; + static mlir::Value createLLVMBitOp(mlir::Location loc, const llvm::Twine &llvmIntrinBaseName, mlir::Type resultTy, mlir::Value operand, @@ -3796,7 +3825,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, - CIRAllocExceptionOpLowering, CIRThrowOpLowering + CIRAllocExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/test/CIR/CodeGen/builtin-arm-ldrex.c b/clang/test/CIR/CodeGen/builtin-arm-ldrex.c new file mode 100644 index 000000000000..974a370185eb --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-arm-ldrex.c @@ -0,0 +1,48 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir -emit-cir -target-feature +neon %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +struct twoFldT { + char a, b; +}; +// CIR: !ty_twoFldT = !cir.struct) -> !s64i +// CIR: [[CAST0:%.*]] = cir.cast(integral, [[INTRES0]] : !s64i), !s8i +// CIR: [[CAST_I32:%.*]] = cir.cast(integral, [[CAST0]] : !s8i), !s32i + + sum += __builtin_arm_ldrex((short *)addr); +// CIR: [[INTRES1:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i +// CIR: [[CAST1:%.*]] = cir.cast(integral, [[INTRES1]] : !s64i), !s16i +// CIR: [[CAST_I16:%.*]] = cir.cast(integral, [[CAST1]] : !s16i), !s32i + + sum += __builtin_arm_ldrex((int *)addr); +// CIR: [[INTRES2:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i +// CIR: [[CAST2:%.*]] = cir.cast(integral, [[INTRES2]] : !s64i), !s32i + + sum += __builtin_arm_ldrex((long long *)addr); +// CIR: [[INTRES3:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i + + sum += __builtin_arm_ldrex(addr64); +// CIR: [[INTRES4:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i + + + sum += *__builtin_arm_ldrex((int **)addr); +// CIR: [[INTRES5:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr>) -> !s64i + + sum += __builtin_arm_ldrex((struct twoFldT **)addr)->a; +// CIR: [[INTRES6:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr>) -> !s64i +// CIR: [[CAST3:%.*]] = cir.cast(int_to_ptr, [[INTRES6]] : !s64i), !cir.ptr +// CIR: [[MEMBER_A:%.*]] = cir.get_member [[CAST3]][0] {name = "a"} : !cir.ptr -> !cir.ptr + + + // TODO: Uncomment next 2 lines, add tests when floating result type supported + // sum += __builtin_arm_ldrex(addrfloat); + + // sum += __builtin_arm_ldrex((double *)addr); + + + return sum; +} diff --git a/clang/test/CIR/IR/invalid-llvm-intrinsic.cir b/clang/test/CIR/IR/invalid-llvm-intrinsic.cir new file mode 100644 index 000000000000..38b53a4e1b30 --- /dev/null +++ b/clang/test/CIR/IR/invalid-llvm-intrinsic.cir @@ -0,0 +1,11 @@ +// Test attempt to construct ill-formed global annotations +// RUN: cir-opt %s -verify-diagnostics + +!s32i = !cir.int +!s64i = !cir.int +cir.func @foo() { + %a = cir.alloca !s32i, !cir.ptr, ["a"] {alignment = 4 : i64} + // expected-error @below {{'cir.llvm.intrinsic' op intrinsic name must start with 'llvm.'}} + %i = cir.llvm.intrinsic "ll.aarch64.ldxr" %a : (!cir.ptr) -> !s64i + cir.return +} diff --git a/clang/test/CIR/IR/llvm-intrinsic.cir b/clang/test/CIR/IR/llvm-intrinsic.cir new file mode 100644 index 000000000000..687db64ee09b --- /dev/null +++ b/clang/test/CIR/IR/llvm-intrinsic.cir @@ -0,0 +1,12 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +!s32i = !cir.int +!s64i = !cir.int +cir.func @foo() { + %a = cir.alloca !s32i, !cir.ptr, ["a"] {alignment = 4 : i64} + %i = cir.llvm.intrinsic "llvm.aarch64.ldxr" %a : (!cir.ptr) -> !s64i + cir.return +} + +// CHECK: %1 = cir.llvm.intrinsic "llvm.aarch64.ldxr" %0 : (!cir.ptr) -> !s64i From 34f8983b89ff3460b4e8d3d323117db18b236ca1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 17 Sep 2024 16:31:34 -0700 Subject: [PATCH 1846/2301] [CIR][FlattenCFG][NFC] Exceptions: refactor landing pad code and catches This is in prep for creating more accurate landing pads w.r.t to their functions call and associated cleanup, so far we only support one. --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 195 +++++++++++------- 1 file changed, 116 insertions(+), 79 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index aa04e079a524..00491fad5746 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -294,29 +294,26 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { return mlir::ArrayAttr::get(caseAttrList.getContext(), symbolList); } - mlir::Block * - buildCatchers(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, - mlir::Block *afterBody, mlir::Block *afterTry, - SmallVectorImpl &callsToRewrite) const { - auto loc = tryOp.getLoc(); - // Replace the tryOp return with a branch that jumps out of the body. - rewriter.setInsertionPointToEnd(afterBody); - auto tryBodyYield = cast(afterBody->getTerminator()); - - mlir::Block *beforeCatch = rewriter.getInsertionBlock(); - auto *catchBegin = + struct LandingInfo { + mlir::Block *pad = nullptr; + mlir::Block *dispatch = nullptr; + }; + + LandingInfo + buildLandingPad(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, + mlir::Block *beforeCatch, + SmallVectorImpl &callsToRewrite, + mlir::ArrayAttr &caseAttrList) const { + + auto *landingPadBlock = rewriter.splitBlock(beforeCatch, rewriter.getInsertionPoint()); - rewriter.setInsertionPointToEnd(beforeCatch); - rewriter.replaceOpWithNewOp(tryBodyYield, afterTry); - - // Start the landing pad by getting the inflight exception information. - rewriter.setInsertionPointToEnd(catchBegin); + rewriter.setInsertionPointToEnd(landingPadBlock); auto exceptionPtrType = mlir::cir::PointerType::get( mlir::cir::VoidType::get(rewriter.getContext())); auto typeIdType = mlir::cir::IntType::get(getContext(), 32, false); mlir::ArrayAttr symlist = collectTypeSymbols(tryOp); auto inflightEh = rewriter.create( - loc, exceptionPtrType, typeIdType, + tryOp.getLoc(), exceptionPtrType, typeIdType, tryOp.getCleanup() ? mlir::UnitAttr::get(tryOp.getContext()) : nullptr, symlist); auto selector = inflightEh.getTypeId(); @@ -336,85 +333,125 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto cleanupYield = cast(cleanupBlock->getTerminator()); cleanupYield->erase(); - rewriter.mergeBlocks(cleanupBlock, catchBegin); - rewriter.setInsertionPointToEnd(catchBegin); + rewriter.mergeBlocks(cleanupBlock, landingPadBlock); + rewriter.setInsertionPointToEnd(landingPadBlock); } // Handle dispatch. In could in theory use a switch, but let's just // mimic LLVM more closely since we have no specific thing to achieve // doing that (might not play as well with existing optimizers either). auto *nextDispatcher = - rewriter.splitBlock(catchBegin, rewriter.getInsertionPoint()); - rewriter.setInsertionPointToEnd(catchBegin); - mlir::ArrayAttr caseAttrList = tryOp.getCatchTypesAttr(); - nextDispatcher->addArgument(exceptionPtr.getType(), loc); + rewriter.splitBlock(landingPadBlock, rewriter.getInsertionPoint()); + rewriter.setInsertionPointToEnd(landingPadBlock); + caseAttrList = tryOp.getCatchTypesAttr(); + nextDispatcher->addArgument(exceptionPtr.getType(), tryOp.getLoc()); SmallVector dispatcherInitOps = {exceptionPtr}; bool tryOnlyHasCatchAll = caseAttrList.size() == 1 && isa(caseAttrList[0]); if (!tryOnlyHasCatchAll) { - nextDispatcher->addArgument(selector.getType(), loc); + nextDispatcher->addArgument(selector.getType(), tryOp.getLoc()); dispatcherInitOps.push_back(selector); } - rewriter.create(loc, nextDispatcher, dispatcherInitOps); + rewriter.create(tryOp.getLoc(), nextDispatcher, + dispatcherInitOps); + return LandingInfo{landingPadBlock, nextDispatcher}; + } - // Fill in dispatcher. - rewriter.setInsertionPointToEnd(nextDispatcher); - llvm::MutableArrayRef caseRegions = tryOp.getCatchRegions(); - unsigned caseCnt = 0; + mlir::Block *buildCatch(mlir::cir::TryOp tryOp, + mlir::PatternRewriter &rewriter, + mlir::Block *afterTry, mlir::Block *dispatcher, + SmallVectorImpl &callsToRewrite, + mlir::Attribute catchAttr, + mlir::Attribute nextCatchAttr, + mlir::Region &catchRegion) const { + mlir::Location loc = tryOp.getLoc(); + mlir::Block *nextDispatcher = nullptr; + if (auto typeIdGlobal = dyn_cast(catchAttr)) { + auto *previousDispatcher = dispatcher; + auto typeId = + rewriter.create(loc, typeIdGlobal.getSymbol()); + auto ehPtr = previousDispatcher->getArgument(0); + auto ehSel = previousDispatcher->getArgument(1); + + auto match = rewriter.create( + loc, mlir::cir::BoolType::get(rewriter.getContext()), + mlir::cir::CmpOpKind::eq, ehSel, typeId); + + mlir::Block *typeCatchBlock = + buildTypeCase(rewriter, catchRegion, afterTry, ehPtr.getType()); + nextDispatcher = rewriter.createBlock(afterTry); + rewriter.setInsertionPointToEnd(previousDispatcher); + + // Next dispatcher gets by default both exception ptr and selector info, + // but on a catch all we don't need selector info. + nextDispatcher->addArgument(ehPtr.getType(), loc); + SmallVector nextDispatchOps = {ehPtr}; + if (!isa(nextCatchAttr)) { + nextDispatcher->addArgument(ehSel.getType(), loc); + nextDispatchOps.push_back(ehSel); + } - for (mlir::Attribute caseAttr : caseAttrList) { - if (auto typeIdGlobal = dyn_cast(caseAttr)) { - auto *previousDispatcher = nextDispatcher; - auto typeId = rewriter.create( - loc, typeIdGlobal.getSymbol()); - auto ehPtr = previousDispatcher->getArgument(0); - auto ehSel = previousDispatcher->getArgument(1); - - auto match = rewriter.create( - loc, mlir::cir::BoolType::get(rewriter.getContext()), - mlir::cir::CmpOpKind::eq, ehSel, typeId); - - mlir::Block *typeCatchBlock = buildTypeCase( - rewriter, caseRegions[caseCnt], afterTry, ehPtr.getType()); - nextDispatcher = rewriter.createBlock(afterTry); - rewriter.setInsertionPointToEnd(previousDispatcher); - - // Next dispatcher gets by default both exception ptr and selector info, - // but on a catch all we don't need selector info. - nextDispatcher->addArgument(ehPtr.getType(), loc); - SmallVector nextDispatchOps = {ehPtr}; - if (!isa(caseAttrList[caseCnt + 1])) { - nextDispatcher->addArgument(ehSel.getType(), loc); - nextDispatchOps.push_back(ehSel); - } + rewriter.create( + loc, match, typeCatchBlock, nextDispatcher, mlir::ValueRange{ehPtr}, + nextDispatchOps); + rewriter.setInsertionPointToEnd(nextDispatcher); + } else if (auto catchAll = dyn_cast(catchAttr)) { + // In case the catch(...) is all we got, `dispatcher` shall be + // non-empty. + assert(dispatcher->getArguments().size() == 1 && + "expected one block argument"); + auto ehPtr = dispatcher->getArgument(0); + buildAllCase(rewriter, catchRegion, afterTry, dispatcher, ehPtr); + // Do not update `nextDispatcher`, no more business in try/catch + } else if (auto catchUnwind = + dyn_cast(catchAttr)) { + // assert(dispatcher->empty() && "expect empty dispatcher"); + // assert(!dispatcher->args_empty() && "expected block argument"); + assert(dispatcher->getArguments().size() == 2 && + "expected two block argument"); + buildUnwindCase(rewriter, catchRegion, dispatcher); + // Do not update `nextDispatcher`, no more business in try/catch + } + return nextDispatcher; + } - rewriter.create( - loc, match, typeCatchBlock, nextDispatcher, mlir::ValueRange{ehPtr}, - nextDispatchOps); - rewriter.setInsertionPointToEnd(nextDispatcher); - } else if (auto catchAll = dyn_cast(caseAttr)) { - // In case the catch(...) is all we got, `nextDispatcher` shall be - // non-empty. - assert(nextDispatcher->getArguments().size() == 1 && - "expected one block argument"); - auto ehPtr = nextDispatcher->getArgument(0); - buildAllCase(rewriter, caseRegions[caseCnt], afterTry, nextDispatcher, - ehPtr); - nextDispatcher = nullptr; // No more business in try/catch - } else if (auto catchUnwind = - dyn_cast(caseAttr)) { - // assert(nextDispatcher->empty() && "expect empty dispatcher"); - // assert(!nextDispatcher->args_empty() && "expected block argument"); - assert(nextDispatcher->getArguments().size() == 2 && - "expected two block argument"); - buildUnwindCase(rewriter, caseRegions[caseCnt], nextDispatcher); - nextDispatcher = nullptr; // No more business in try/catch - } - caseCnt++; + mlir::Block * + buildCatchers(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, + mlir::Block *afterBody, mlir::Block *afterTry, + SmallVectorImpl &callsToRewrite) const { + // Replace the tryOp return with a branch that jumps out of the body. + rewriter.setInsertionPointToEnd(afterBody); + auto tryBodyYield = cast(afterBody->getTerminator()); + + mlir::Block *beforeCatch = rewriter.getInsertionBlock(); + rewriter.setInsertionPointToEnd(beforeCatch); + rewriter.replaceOpWithNewOp(tryBodyYield, afterTry); + + // Start the landing pad by getting the inflight exception information. + mlir::ArrayAttr catchAttrList; + LandingInfo landingInfo = buildLandingPad(tryOp, rewriter, beforeCatch, + callsToRewrite, catchAttrList); + mlir::Block *landingPadBlock = landingInfo.pad; + mlir::Block *nextDispatcher = landingInfo.dispatch; + + // Fill in dispatcher to all catch clauses. + rewriter.setInsertionPointToEnd(nextDispatcher); + llvm::MutableArrayRef catchRegions = tryOp.getCatchRegions(); + unsigned catchIdx = 0; + + // Build control-flow for all catch clauses. + for (mlir::Attribute catchAttr : catchAttrList) { + mlir::Attribute nextCatchAttr; + if (catchIdx + 1 < catchAttrList.size()) + nextCatchAttr = catchAttrList[catchIdx + 1]; + nextDispatcher = + buildCatch(tryOp, rewriter, afterTry, nextDispatcher, callsToRewrite, + catchAttr, nextCatchAttr, catchRegions[catchIdx]); + catchIdx++; } - assert(!nextDispatcher && "no dispatcher available anymore"); - return catchBegin; + assert(!nextDispatcher && "last dispatch expected to be nullptr"); + return landingPadBlock; } mlir::Block *buildTryBody(mlir::cir::TryOp tryOp, From 196e9c642331b81ca5d6e325a350b2eab4b0ef7c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 17 Sep 2024 17:36:41 -0700 Subject: [PATCH 1847/2301] [CIR][CIRGen][NFC] Exceptions: refactor more infra for handling multiple landing pads --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 36 ++++++++++++------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 00491fad5746..3ed70122dd21 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -357,6 +357,18 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { return LandingInfo{landingPadBlock, nextDispatcher}; } + mlir::Block * + buildLandingPads(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, + mlir::Block *beforeCatch, + SmallVectorImpl &callsToRewrite, + mlir::ArrayAttr &catchAttrList, + SmallVectorImpl &landingPads) const { + LandingInfo landingInfo = buildLandingPad(tryOp, rewriter, beforeCatch, + callsToRewrite, catchAttrList); + landingPads.push_back(landingInfo.pad); + return landingInfo.dispatch; + } + mlir::Block *buildCatch(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, mlir::Block *afterTry, mlir::Block *dispatcher, @@ -415,10 +427,10 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { return nextDispatcher; } - mlir::Block * - buildCatchers(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, - mlir::Block *afterBody, mlir::Block *afterTry, - SmallVectorImpl &callsToRewrite) const { + void buildCatchers(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, + mlir::Block *afterBody, mlir::Block *afterTry, + SmallVectorImpl &callsToRewrite, + SmallVectorImpl &landingPads) const { // Replace the tryOp return with a branch that jumps out of the body. rewriter.setInsertionPointToEnd(afterBody); auto tryBodyYield = cast(afterBody->getTerminator()); @@ -429,10 +441,9 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Start the landing pad by getting the inflight exception information. mlir::ArrayAttr catchAttrList; - LandingInfo landingInfo = buildLandingPad(tryOp, rewriter, beforeCatch, - callsToRewrite, catchAttrList); - mlir::Block *landingPadBlock = landingInfo.pad; - mlir::Block *nextDispatcher = landingInfo.dispatch; + mlir::Block *nextDispatcher = + buildLandingPads(tryOp, rewriter, beforeCatch, callsToRewrite, + catchAttrList, landingPads); // Fill in dispatcher to all catch clauses. rewriter.setInsertionPointToEnd(nextDispatcher); @@ -451,7 +462,6 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { } assert(!nextDispatcher && "last dispatch expected to be nullptr"); - return landingPadBlock; } mlir::Block *buildTryBody(mlir::cir::TryOp tryOp, @@ -501,9 +511,11 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { mlir::Block *afterTry = buildTryBody(tryOp, rewriter); // Build catchers. - mlir::Block *landingPad = - buildCatchers(tryOp, rewriter, afterBody, afterTry, callsToRewrite); + SmallVector landingPads; + buildCatchers(tryOp, rewriter, afterBody, afterTry, callsToRewrite, + landingPads); rewriter.eraseOp(tryOp); + assert(landingPads.size() == 1 && "NYI"); // Rewrite calls. for (CallOp callOp : callsToRewrite) { @@ -521,7 +533,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { if (callOp.getNumResults() > 0) resTy = callOp.getResult().getType(); auto tryCall = rewriter.replaceOpWithNewOp( - callOp, symbol, resTy, cont, landingPad, callOp.getOperands()); + callOp, symbol, resTy, cont, landingPads[0], callOp.getOperands()); tryCall.setExtraAttrsAttr(extraAttrs); if (ast) tryCall.setAstAttr(*ast); From d170b3d09003d756ece51c94005f11dba009b879 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 17 Sep 2024 18:07:19 -0700 Subject: [PATCH 1848/2301] [CIR][CIRGen][NFCI] Exceptions: generalize landing pad insertion More refactoring, now the infra to generate one landing pad per call is up, but we still have an assert for more than one call, next commit will actually introduce new functionality. --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 38 ++++++++++--------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 3ed70122dd21..0f334c127e40 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -303,7 +303,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { buildLandingPad(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, mlir::Block *beforeCatch, SmallVectorImpl &callsToRewrite, - mlir::ArrayAttr &caseAttrList) const { + mlir::ArrayAttr &caseAttrList, unsigned callIdx) const { auto *landingPadBlock = rewriter.splitBlock(beforeCatch, rewriter.getInsertionPoint()); @@ -320,16 +320,10 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto exceptionPtr = inflightEh.getExceptionPtr(); // Time to emit cleanup's. - if (tryOp.getCleanup()) { - assert(callsToRewrite.size() == 1 && - "NYI: if this isn't enough, move region instead"); - // TODO(cir): this might need to be duplicated instead of consumed since - // for user-written try/catch we want these cleanups to also run when the - // regular try scope adjurns (in case no exception is triggered). - assert(tryOp.getSynthetic() && - "not implemented for user written try/catch"); - mlir::Block *cleanupBlock = - &callsToRewrite[0].getCleanup().getBlocks().back(); + assert(callsToRewrite.size() == 1 && "NYI"); + mlir::cir::CallOp callOp = callsToRewrite[callIdx]; + if (!callOp.getCleanup().empty()) { + mlir::Block *cleanupBlock = &callOp.getCleanup().getBlocks().back(); auto cleanupYield = cast(cleanupBlock->getTerminator()); cleanupYield->erase(); @@ -363,10 +357,16 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { SmallVectorImpl &callsToRewrite, mlir::ArrayAttr &catchAttrList, SmallVectorImpl &landingPads) const { - LandingInfo landingInfo = buildLandingPad(tryOp, rewriter, beforeCatch, - callsToRewrite, catchAttrList); - landingPads.push_back(landingInfo.pad); - return landingInfo.dispatch; + unsigned numCalls = callsToRewrite.size(); + mlir::Block *dispatch = nullptr; + for (unsigned callIdx = 0; callIdx != numCalls; ++callIdx) { + LandingInfo landingInfo = buildLandingPad( + tryOp, rewriter, beforeCatch, callsToRewrite, catchAttrList, callIdx); + landingPads.push_back(landingInfo.pad); + dispatch = landingInfo.dispatch; + } + + return dispatch; } mlir::Block *buildCatch(mlir::cir::TryOp tryOp, @@ -515,9 +515,11 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { buildCatchers(tryOp, rewriter, afterBody, afterTry, callsToRewrite, landingPads); rewriter.eraseOp(tryOp); - assert(landingPads.size() == 1 && "NYI"); + assert((landingPads.size() == callsToRewrite.size()) && + "expected matching number of entries"); // Rewrite calls. + unsigned callIdx = 0; for (CallOp callOp : callsToRewrite) { mlir::Block *callBlock = callOp->getBlock(); mlir::Block *cont = @@ -533,10 +535,12 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { if (callOp.getNumResults() > 0) resTy = callOp.getResult().getType(); auto tryCall = rewriter.replaceOpWithNewOp( - callOp, symbol, resTy, cont, landingPads[0], callOp.getOperands()); + callOp, symbol, resTy, cont, landingPads[callIdx], + callOp.getOperands()); tryCall.setExtraAttrsAttr(extraAttrs); if (ast) tryCall.setAstAttr(*ast); + callIdx++; } // Quick block cleanup: no indirection to the post try block. From fdf62fc79708409d23b55e2a0d5bc5ce22b92076 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 Sep 2024 11:11:09 -0700 Subject: [PATCH 1849/2301] [CIR][FlattenCFG][NFCI] Exceptions: more generalization for dispatch emission --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 56 +++++++++---------- 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 0f334c127e40..ef6ed26eeb01 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -294,19 +294,11 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { return mlir::ArrayAttr::get(caseAttrList.getContext(), symbolList); } - struct LandingInfo { - mlir::Block *pad = nullptr; - mlir::Block *dispatch = nullptr; - }; - - LandingInfo - buildLandingPad(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, - mlir::Block *beforeCatch, - SmallVectorImpl &callsToRewrite, - mlir::ArrayAttr &caseAttrList, unsigned callIdx) const { - - auto *landingPadBlock = - rewriter.splitBlock(beforeCatch, rewriter.getInsertionPoint()); + void buildLandingPad(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, + mlir::Block *beforeCatch, mlir::Block *landingPadBlock, + mlir::Block *catchDispatcher, + SmallVectorImpl &callsToRewrite, + mlir::ArrayAttr &caseAttrList, unsigned callIdx) const { rewriter.setInsertionPointToEnd(landingPadBlock); auto exceptionPtrType = mlir::cir::PointerType::get( mlir::cir::VoidType::get(rewriter.getContext())); @@ -331,42 +323,44 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { rewriter.setInsertionPointToEnd(landingPadBlock); } - // Handle dispatch. In could in theory use a switch, but let's just - // mimic LLVM more closely since we have no specific thing to achieve - // doing that (might not play as well with existing optimizers either). - auto *nextDispatcher = - rewriter.splitBlock(landingPadBlock, rewriter.getInsertionPoint()); - rewriter.setInsertionPointToEnd(landingPadBlock); + // Branch out to the catch clauses dispatcher. caseAttrList = tryOp.getCatchTypesAttr(); - nextDispatcher->addArgument(exceptionPtr.getType(), tryOp.getLoc()); + catchDispatcher->addArgument(exceptionPtr.getType(), tryOp.getLoc()); SmallVector dispatcherInitOps = {exceptionPtr}; bool tryOnlyHasCatchAll = caseAttrList.size() == 1 && isa(caseAttrList[0]); if (!tryOnlyHasCatchAll) { - nextDispatcher->addArgument(selector.getType(), tryOp.getLoc()); + catchDispatcher->addArgument(selector.getType(), tryOp.getLoc()); dispatcherInitOps.push_back(selector); } - rewriter.create(tryOp.getLoc(), nextDispatcher, + rewriter.create(tryOp.getLoc(), catchDispatcher, dispatcherInitOps); - return LandingInfo{landingPadBlock, nextDispatcher}; + return; } mlir::Block * buildLandingPads(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, - mlir::Block *beforeCatch, + mlir::Block *beforeCatch, mlir::Block *afterTry, SmallVectorImpl &callsToRewrite, mlir::ArrayAttr &catchAttrList, SmallVectorImpl &landingPads) const { unsigned numCalls = callsToRewrite.size(); - mlir::Block *dispatch = nullptr; + // Create the first landing pad block and a placeholder for the initial + // catch dispatcher (which will be the common destination for every new + // landing pad we create). + auto *landingPadBlock = + rewriter.splitBlock(beforeCatch, rewriter.getInsertionPoint()); + mlir::Block *dispatcher = rewriter.createBlock(afterTry); + for (unsigned callIdx = 0; callIdx != numCalls; ++callIdx) { - LandingInfo landingInfo = buildLandingPad( - tryOp, rewriter, beforeCatch, callsToRewrite, catchAttrList, callIdx); - landingPads.push_back(landingInfo.pad); - dispatch = landingInfo.dispatch; + buildLandingPad(tryOp, rewriter, beforeCatch, landingPadBlock, dispatcher, + callsToRewrite, catchAttrList, callIdx); + landingPads.push_back(landingPadBlock); + if (callIdx < numCalls - 1) + landingPadBlock = rewriter.createBlock(afterTry); } - return dispatch; + return dispatcher; } mlir::Block *buildCatch(mlir::cir::TryOp tryOp, @@ -442,7 +436,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Start the landing pad by getting the inflight exception information. mlir::ArrayAttr catchAttrList; mlir::Block *nextDispatcher = - buildLandingPads(tryOp, rewriter, beforeCatch, callsToRewrite, + buildLandingPads(tryOp, rewriter, beforeCatch, afterTry, callsToRewrite, catchAttrList, landingPads); // Fill in dispatcher to all catch clauses. From 30648271ac75da51a7e7a21cf9da50496da8331c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 Sep 2024 11:28:43 -0700 Subject: [PATCH 1850/2301] [CIR][FlattenCFG][NFCI] Exceptions: generalize catch dispatch emission --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 40 ++++++++++++------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index ef6ed26eeb01..a92ac51acbb1 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -298,11 +298,10 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { mlir::Block *beforeCatch, mlir::Block *landingPadBlock, mlir::Block *catchDispatcher, SmallVectorImpl &callsToRewrite, - mlir::ArrayAttr &caseAttrList, unsigned callIdx) const { + unsigned callIdx, bool tryOnlyHasCatchAll, + mlir::Type exceptionPtrType, + mlir::Type typeIdType) const { rewriter.setInsertionPointToEnd(landingPadBlock); - auto exceptionPtrType = mlir::cir::PointerType::get( - mlir::cir::VoidType::get(rewriter.getContext())); - auto typeIdType = mlir::cir::IntType::get(getContext(), 32, false); mlir::ArrayAttr symlist = collectTypeSymbols(tryOp); auto inflightEh = rewriter.create( tryOp.getLoc(), exceptionPtrType, typeIdType, @@ -324,13 +323,12 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { } // Branch out to the catch clauses dispatcher. - caseAttrList = tryOp.getCatchTypesAttr(); - catchDispatcher->addArgument(exceptionPtr.getType(), tryOp.getLoc()); + assert(catchDispatcher->getNumArguments() >= 1 && + "expected at least one argument in place"); SmallVector dispatcherInitOps = {exceptionPtr}; - bool tryOnlyHasCatchAll = caseAttrList.size() == 1 && - isa(caseAttrList[0]); if (!tryOnlyHasCatchAll) { - catchDispatcher->addArgument(selector.getType(), tryOp.getLoc()); + assert(catchDispatcher->getNumArguments() == 2 && + "expected two arguments in place"); dispatcherInitOps.push_back(selector); } rewriter.create(tryOp.getLoc(), catchDispatcher, @@ -342,19 +340,29 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { buildLandingPads(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, mlir::Block *beforeCatch, mlir::Block *afterTry, SmallVectorImpl &callsToRewrite, - mlir::ArrayAttr &catchAttrList, - SmallVectorImpl &landingPads) const { + SmallVectorImpl &landingPads, + bool tryOnlyHasCatchAll) const { unsigned numCalls = callsToRewrite.size(); // Create the first landing pad block and a placeholder for the initial // catch dispatcher (which will be the common destination for every new // landing pad we create). auto *landingPadBlock = rewriter.splitBlock(beforeCatch, rewriter.getInsertionPoint()); + + // For the dispatcher, already add the block arguments and prepare the + // proper types the landing pad should use to jump to. mlir::Block *dispatcher = rewriter.createBlock(afterTry); + auto exceptionPtrType = mlir::cir::PointerType::get( + mlir::cir::VoidType::get(rewriter.getContext())); + auto typeIdType = mlir::cir::IntType::get(getContext(), 32, false); + dispatcher->addArgument(exceptionPtrType, tryOp.getLoc()); + if (!tryOnlyHasCatchAll) + dispatcher->addArgument(typeIdType, tryOp.getLoc()); for (unsigned callIdx = 0; callIdx != numCalls; ++callIdx) { buildLandingPad(tryOp, rewriter, beforeCatch, landingPadBlock, dispatcher, - callsToRewrite, catchAttrList, callIdx); + callsToRewrite, callIdx, tryOnlyHasCatchAll, + exceptionPtrType, typeIdType); landingPads.push_back(landingPadBlock); if (callIdx < numCalls - 1) landingPadBlock = rewriter.createBlock(afterTry); @@ -433,11 +441,15 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { rewriter.setInsertionPointToEnd(beforeCatch); rewriter.replaceOpWithNewOp(tryBodyYield, afterTry); + // Retrieve catch list and some properties. + mlir::ArrayAttr catchAttrList = tryOp.getCatchTypesAttr(); + bool tryOnlyHasCatchAll = catchAttrList.size() == 1 && + isa(catchAttrList[0]); + // Start the landing pad by getting the inflight exception information. - mlir::ArrayAttr catchAttrList; mlir::Block *nextDispatcher = buildLandingPads(tryOp, rewriter, beforeCatch, afterTry, callsToRewrite, - catchAttrList, landingPads); + landingPads, tryOnlyHasCatchAll); // Fill in dispatcher to all catch clauses. rewriter.setInsertionPointToEnd(nextDispatcher); From e38a2b953a9f37b5ed3672392e994a0b00377669 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 Sep 2024 11:53:53 -0700 Subject: [PATCH 1851/2301] [CIR][NFC] Move things around in try-catch-dtors.cpp --- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 25 +++++++++++----------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index fa6bd18ddf0a..4679b50e5b01 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -DLLVM_IMPLEMENTED -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -DLLVM_IMPLEMENTED -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s struct Vec { @@ -74,18 +74,6 @@ void yo2() { r++; } } - -void yo3(bool x) { - int r = 1; - try { - Vec v1, v2, v3, v4; - } catch (...) { - r++; - } -} - -#endif - // CIR: cir.func @_Z3yo2v() // CIR: cir.scope { // CIR: cir.alloca ![[VecTy]] @@ -108,6 +96,15 @@ void yo3(bool x) { // CIR: cir.return // CIR: } +void yo3(bool x) { + int r = 1; + try { + Vec v1, v2, v3, v4; + } catch (...) { + r++; + } +} + // CIR: cir.scope { // CIR: %[[V1:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v1" // CIR: %[[V2:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v2" @@ -139,3 +136,5 @@ void yo3(bool x) { // CIR: }] // CIR: } // CIR: cir.return + +#endif From 4d08c777016552cfb1d4d015bcf845e37cef5340 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 Sep 2024 11:48:24 -0700 Subject: [PATCH 1852/2301] [CIR][FlattenCFG] Exceptions: enable many calls / many landing pads support --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 3 +- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 79 ++++++++++++++++++- 2 files changed, 79 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index a92ac51acbb1..f3a39f5eb019 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -311,7 +311,6 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto exceptionPtr = inflightEh.getExceptionPtr(); // Time to emit cleanup's. - assert(callsToRewrite.size() == 1 && "NYI"); mlir::cir::CallOp callOp = callsToRewrite[callIdx]; if (!callOp.getCleanup().empty()) { mlir::Block *cleanupBlock = &callOp.getCleanup().getBlocks().back(); @@ -365,7 +364,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { exceptionPtrType, typeIdType); landingPads.push_back(landingPadBlock); if (callIdx < numCalls - 1) - landingPadBlock = rewriter.createBlock(afterTry); + landingPadBlock = rewriter.createBlock(dispatcher); } return dispatcher; diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index 4679b50e5b01..81f156830e63 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.flat.cir +// RUN: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_FLAT %s // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -DLLVM_IMPLEMENTED -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s @@ -21,6 +23,9 @@ void yo() { // CIR-DAG: ![[VecTy:.*]] = !cir.struct // CIR-DAG: ![[S1:.*]] = !cir.struct +// CIR_FLAT-DAG: ![[VecTy:.*]] = !cir.struct +// CIR_FLAT-DAG: ![[S1:.*]] = !cir.struct + // CIR: cir.scope { // CIR: %[[VADDR:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v", init] // CIR: cir.try { @@ -74,7 +79,7 @@ void yo2() { r++; } } -// CIR: cir.func @_Z3yo2v() +// CIR-LABEL: @_Z3yo2v // CIR: cir.scope { // CIR: cir.alloca ![[VecTy]] // CIR: cir.try { @@ -96,6 +101,32 @@ void yo2() { // CIR: cir.return // CIR: } +// CIR_FLAT-LABEL: @_Z3yo2v +// CIR_FLAT: cir.try_call @_ZN3VecC1Ev(%2) ^[[NEXT_CALL_PREP:.*]], ^[[PAD_NODTOR:.*]] : (!cir.ptr) -> () +// CIR_FLAT: ^[[NEXT_CALL_PREP]]: +// CIR_FLAT: cir.br ^[[NEXT_CALL:.*]] loc +// CIR_FLAT: ^[[NEXT_CALL]]: +// CIR_FLAT: cir.try_call @_ZN3VecC1EOS_({{.*}}) ^[[CONT0:.*]], ^[[PAD_DTOR:.*]] : +// CIR_FLAT: ^[[CONT0]]: +// CIR_FLAT: cir.call @_ZN2S1D2Ev +// CIR_FLAT: cir.br ^[[CONT1:.*]] loc +// CIR_FLAT: ^[[CONT1]]: +// CIR_FLAT: cir.call @_ZN3VecD1Ev +// CIR_FLAT: cir.br ^[[AFTER_TRY:.*]] loc +// CIR_FLAT: ^[[PAD_NODTOR]]: +// CIR_FLAT: %exception_ptr, %type_id = cir.eh.inflight_exception +// CIR_FLAT: cir.br ^[[CATCH_BEGIN:.*]](%exception_ptr : !cir.ptr) +// CIR_FLAT: ^[[PAD_DTOR]]: +// CIR_FLAT: %exception_ptr_0, %type_id_1 = cir.eh.inflight_exception +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%2) : (!cir.ptr) -> () +// CIR_FLAT: cir.br ^[[CATCH_BEGIN]](%exception_ptr_0 : !cir.ptr) +// CIR_FLAT: ^[[CATCH_BEGIN]](%5: !cir.ptr +// CIR_FLAT: cir.catch_param begin +// CIR_FLAT: cir.br ^[[AFTER_TRY]] +// CIR_FLAT: ^[[AFTER_TRY]]: +// CIR_FLAT: cir.return +// CIR_FLAT: } + void yo3(bool x) { int r = 1; try { @@ -105,6 +136,7 @@ void yo3(bool x) { } } +// CIR-LABEL: @_Z3yo3b // CIR: cir.scope { // CIR: %[[V1:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v1" // CIR: %[[V2:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v2" @@ -137,4 +169,49 @@ void yo3(bool x) { // CIR: } // CIR: cir.return +// CIR_FLAT-LABEL: @_Z3yo3b +// CIR_FLAT: ^bb1: +// CIR_FLAT: %[[V1:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v1" +// CIR_FLAT: %[[V2:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v2" +// CIR_FLAT: %[[V3:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v3" +// CIR_FLAT: %[[V4:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v4" +// CIR_FLAT: cir.br ^[[CALL0:.*]] loc +// CIR_FLAT: ^[[CALL0]]: +// CIR_FLAT: cir.try_call @_ZN3VecC1Ev(%[[V1]]) ^[[CALL1:.*]], ^[[CLEANUP_V1:.*]] : (!cir.ptr) -> () +// CIR_FLAT: ^[[CALL1]]: +// CIR_FLAT: cir.try_call @_ZN3VecC1Ev(%[[V2]]) ^[[CALL2:.*]], ^[[CLEANUP_V2:.*]] : (!cir.ptr) -> () +// CIR_FLAT: ^[[CALL2]]: +// CIR_FLAT: cir.try_call @_ZN3VecC1Ev(%[[V3]]) ^[[CALL3:.*]], ^[[CLEANUP_V3:.*]] : (!cir.ptr) -> () +// CIR_FLAT: ^[[CALL3]]: +// CIR_FLAT: cir.try_call @_ZN3VecC1Ev(%[[V4]]) ^[[NOTROW_CLEANUP:.*]], ^[[CLEANUP_V4:.*]] : (!cir.ptr) -> () +// CIR_FLAT: ^[[NOTROW_CLEANUP]]: +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%[[V4]]) : (!cir.ptr) -> () +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%[[V3]]) : (!cir.ptr) -> () +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%[[V2]]) : (!cir.ptr) -> () +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR_FLAT: cir.br ^[[AFTER_TRY:.*]] loc +// CIR_FLAT: ^[[CLEANUP_V1]]: +// CIR_FLAT: %exception_ptr, %type_id = cir.eh.inflight_exception +// CIR_FLAT: cir.br ^[[CATCH_BEGIN:.*]](%exception_ptr : !cir.ptr) +// CIR_FLAT: ^[[CLEANUP_V2]]: +// CIR_FLAT: %exception_ptr_0, %type_id_1 = cir.eh.inflight_exception +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR_FLAT: cir.br ^[[CATCH_BEGIN]](%exception_ptr_0 : !cir.ptr) +// CIR_FLAT: ^[[CLEANUP_V3]]: +// CIR_FLAT: %exception_ptr_2, %type_id_3 = cir.eh.inflight_exception +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%[[V2]]) : (!cir.ptr) -> () +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR_FLAT: cir.br ^[[CATCH_BEGIN]](%exception_ptr_2 : !cir.ptr) +// CIR_FLAT: ^[[CLEANUP_V4]]: +// CIR_FLAT: %exception_ptr_4, %type_id_5 = cir.eh.inflight_exception +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%[[V3]]) : (!cir.ptr) -> () +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%[[V2]]) : (!cir.ptr) -> () +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR_FLAT: cir.br ^[[CATCH_BEGIN]](%exception_ptr_4 : !cir.ptr) +// CIR_FLAT: ^[[CATCH_BEGIN]]({{.*}} +// CIR_FLAT: cir.catch_param begin +// CIR_FLAT: cir.br ^[[AFTER_TRY]] +// CIR_FLAT: ^[[AFTER_TRY]]: +// CIR_FLAT: cir.return + #endif From 8c4d7dbff6b18f971c701b123512c6b90f735657 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 Sep 2024 12:40:22 -0700 Subject: [PATCH 1853/2301] [CIR][LowerToLLVM][NFC] Exceptions: use getOrCreateLLVMFuncOp to create personality functions While here, cleanup getOrCreateLLVMFuncOp usaga a bit. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 55 ++++++++----------- 1 file changed, 24 insertions(+), 31 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 1ee8c44516b2..65c9ccee95d4 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -161,6 +161,22 @@ lowerCIRVisibilityToLLVMVisibility(mlir::cir::VisibilityKind visibilityKind) { return ::mlir::LLVM::Visibility::Protected; } } + +// Make sure the LLVM function we are about to create a call for actually +// exists, if not create one. Returns a function +void getOrCreateLLVMFuncOp(mlir::ConversionPatternRewriter &rewriter, + mlir::Operation *srcOp, llvm::StringRef fnName, + mlir::Type fnTy) { + auto modOp = srcOp->getParentOfType(); + auto enclosingFnOp = srcOp->getParentOfType(); + auto *sourceSymbol = mlir::SymbolTable::lookupSymbolIn(modOp, fnName); + if (!sourceSymbol) { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(enclosingFnOp); + rewriter.create(srcOp->getLoc(), fnName, fnTy); + } +} + } // namespace //===----------------------------------------------------------------------===// @@ -1040,9 +1056,10 @@ class CIREhInflightOpLowering auto personalityFnTy = mlir::LLVM::LLVMFunctionType::get(rewriter.getI32Type(), {}, /*isVarArg=*/true); - auto personalityFn = rewriter.create( - loc, "__gxx_personality_v0", personalityFnTy); - llvmFn.setPersonality(personalityFn.getName()); + // Get or create `__gxx_personality_v0` + StringRef fnName = "__gxx_personality_v0"; + getOrCreateLLVMFuncOp(rewriter, op, fnName, personalityFnTy); + llvmFn.setPersonality(fnName); } return mlir::success(); } @@ -3650,20 +3667,6 @@ class CIREhTypeIdOpLowering } }; -// Make sure the LLVM function we are about to create a call for actually -// exists, if not create one. Returns a function -void getOrCreateLLVMFuncOp(mlir::ConversionPatternRewriter &rewriter, - mlir::Location loc, mlir::ModuleOp mod, - mlir::LLVM::LLVMFuncOp enclosingfnOp, - llvm::StringRef fnName, mlir::Type fnTy) { - auto *sourceSymbol = mlir::SymbolTable::lookupSymbolIn(mod, fnName); - if (!sourceSymbol) { - mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPoint(enclosingfnOp); - rewriter.create(loc, fnName, fnTy); - } -} - class CIRCatchParamOpLowering : public mlir::OpConversionPattern { public: @@ -3672,16 +3675,13 @@ class CIRCatchParamOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::CatchParamOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto modOp = op->getParentOfType(); - auto enclosingFnOp = op->getParentOfType(); if (op.isBegin()) { // Get or create `declare ptr @__cxa_begin_catch(ptr)` StringRef fnName = "__cxa_begin_catch"; auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); auto fnTy = mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {llvmPtrTy}, /*isVarArg=*/false); - getOrCreateLLVMFuncOp(rewriter, op.getLoc(), modOp, enclosingFnOp, fnName, - fnTy); + getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); rewriter.replaceOpWithNewOp( op, mlir::TypeRange{llvmPtrTy}, fnName, mlir::ValueRange{adaptor.getExceptionPtr()}); @@ -3691,8 +3691,7 @@ class CIRCatchParamOpLowering auto fnTy = mlir::LLVM::LLVMFunctionType::get( mlir::LLVM::LLVMVoidType::get(rewriter.getContext()), {}, /*isVarArg=*/false); - getOrCreateLLVMFuncOp(rewriter, op.getLoc(), modOp, enclosingFnOp, fnName, - fnTy); + getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); rewriter.create(op.getLoc(), mlir::TypeRange{}, fnName, mlir::ValueRange{}); rewriter.eraseOp(op); @@ -3740,14 +3739,11 @@ class CIRAllocExceptionOpLowering mlir::ConversionPatternRewriter &rewriter) const override { // Get or create `declare ptr @__cxa_allocate_exception(i64)` StringRef fnName = "__cxa_allocate_exception"; - auto modOp = op->getParentOfType(); - auto enclosingFnOp = op->getParentOfType(); auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); auto int64Ty = mlir::IntegerType::get(rewriter.getContext(), 64); auto fnTy = mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {int64Ty}, /*isVarArg=*/false); - getOrCreateLLVMFuncOp(rewriter, op.getLoc(), modOp, enclosingFnOp, fnName, - fnTy); + getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); auto size = rewriter.create(op.getLoc(), adaptor.getSizeAttr()); rewriter.replaceOpWithNewOp( @@ -3766,15 +3762,12 @@ class CIRThrowOpLowering mlir::ConversionPatternRewriter &rewriter) const override { // Get or create `declare void @__cxa_throw(ptr, ptr, ptr)` StringRef fnName = "__cxa_throw"; - auto modOp = op->getParentOfType(); - auto enclosingFnOp = op->getParentOfType(); auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); auto voidTy = mlir::LLVM::LLVMVoidType::get(rewriter.getContext()); auto fnTy = mlir::LLVM::LLVMFunctionType::get( voidTy, {llvmPtrTy, llvmPtrTy, llvmPtrTy}, /*isVarArg=*/false); - getOrCreateLLVMFuncOp(rewriter, op.getLoc(), modOp, enclosingFnOp, fnName, - fnTy); + getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); mlir::Value typeInfo = rewriter.create( op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), adaptor.getTypeInfoAttr()); From 59c6a3b77e5a5fdc68e9bb77327fa86f38077503 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Wed, 18 Sep 2024 12:50:47 -0700 Subject: [PATCH 1854/2301] [CIR][Lowering] Erase op through rewriter instead of directly (#853) Directly erasing the op causes a use after free later on, presumably because the lowering framework isn't aware of the op being deleted. This fixes `clang/test/CIR/CodeGen/pointer-arith-ext.c` with ASAN. --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 65c9ccee95d4..88485d71510c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -608,7 +608,7 @@ class CIRPtrStrideOpLowering index.getLoc(), index.getType(), mlir::IntegerAttr::get(index.getType(), 0)), index); - sub->erase(); + rewriter.eraseOp(sub); } } From 9af835ea25512312b547d556a8c4782e059f35b5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 Sep 2024 13:03:04 -0700 Subject: [PATCH 1855/2301] [CIR] Exceptions: check LLVM output for more complex dtor order --- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 57 ++++++++++++++++++++-- 1 file changed, 53 insertions(+), 4 deletions(-) diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index 81f156830e63..c093c7ab318e 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -2,7 +2,7 @@ // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.flat.cir // RUN: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_FLAT %s -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -DLLVM_IMPLEMENTED -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s struct Vec { @@ -65,7 +65,6 @@ void yo() { // LLVM: [[RET_BB]]: // LLVM: ret void -#ifndef LLVM_IMPLEMENTED struct S1 { Vec v; }; @@ -120,7 +119,7 @@ void yo2() { // CIR_FLAT: %exception_ptr_0, %type_id_1 = cir.eh.inflight_exception // CIR_FLAT: cir.call @_ZN3VecD1Ev(%2) : (!cir.ptr) -> () // CIR_FLAT: cir.br ^[[CATCH_BEGIN]](%exception_ptr_0 : !cir.ptr) -// CIR_FLAT: ^[[CATCH_BEGIN]](%5: !cir.ptr +// CIR_FLAT: ^[[CATCH_BEGIN]]( // CIR_FLAT: cir.catch_param begin // CIR_FLAT: cir.br ^[[AFTER_TRY]] // CIR_FLAT: ^[[AFTER_TRY]]: @@ -214,4 +213,54 @@ void yo3(bool x) { // CIR_FLAT: ^[[AFTER_TRY]]: // CIR_FLAT: cir.return -#endif +// LLVM-LABEL: @_Z3yo3b +// LLVM: %[[V1:.*]] = alloca %struct.Vec +// LLVM: %[[V2:.*]] = alloca %struct.Vec +// LLVM: %[[V3:.*]] = alloca %struct.Vec +// LLVM: %[[V4:.*]] = alloca %struct.Vec +// LLVM: br label %[[CALL0:.*]], +// LLVM: [[CALL0]]: +// LLVM: invoke void @_ZN3VecC1Ev(ptr %[[V1]]) +// LLVM: to label %[[CALL1:.*]] unwind label %[[LPAD0:.*]], +// LLVM: [[CALL1]]: +// LLVM: invoke void @_ZN3VecC1Ev(ptr %[[V2]]) +// LLVM: to label %[[CALL2:.*]] unwind label %[[LPAD1:.*]], +// LLVM: [[CALL2]]: +// LLVM: invoke void @_ZN3VecC1Ev(ptr %[[V3]]) +// LLVM: to label %[[CALL3:.*]] unwind label %[[LPAD2:.*]], +// LLVM: [[CALL3]]: +// LLVM: invoke void @_ZN3VecC1Ev(ptr %[[V4]]) +// LLVM: to label %[[REGULAR_CLEANUP:.*]] unwind label %[[LPAD3:.*]], +// LLVM: [[REGULAR_CLEANUP]]: +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V4]]), +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V3]]), +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V2]]), +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]), +// LLVM: br label %[[RET:.*]], +// LLVM: [[LPAD0]]: +// LLVM: landingpad { ptr, i32 } +// LLVM: catch ptr null, +// LLVM: br label %[[CATCH:.*]], +// LLVM: [[LPAD1]]: +// LLVM: landingpad { ptr, i32 } +// LLVM: catch ptr null, +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]), +// LLVM: br label %[[CATCH]], +// LLVM: [[LPAD2]]: +// LLVM: landingpad { ptr, i32 } +// LLVM: catch ptr null, +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V2]]), +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]), +// LLVM: br label %[[CATCH]], +// LLVM: [[LPAD3]]: +// LLVM: landingpad { ptr, i32 } +// LLVM: catch ptr null, +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V3]]), +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V2]]), +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]), +// LLVM: br label %[[CATCH]], +// LLVM: [[CATCH]]: +// LLVM: call ptr @__cxa_begin_catch +// LLVM: br label %[[RET]], +// LLVM: [[RET]]: +// LLVM: ret void From 6046a2962fd52c95d1f1fb0a3290aa944e57ad5a Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Wed, 18 Sep 2024 14:31:35 -0700 Subject: [PATCH 1856/2301] [CIR][ABI] Fix use after free from erasing while iterating (#854) The loop was erasing the user of a value while iterating on the value's users, which results in a use after free. We're already assuming (and asserting) that there's only one user, so we can just access it directly instead. CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp was failing with ASAN before this change. We're now ASAN-clean except for https://github.com/llvm/clangir/issues/829 (which is also in progress). --- .../Transforms/TargetLowering/LowerFunction.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 831276ed5f27..9e90c44a7d76 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -386,14 +386,11 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // the argument is used only to be stored in a alloca. Value arg = SrcFn.getArgument(ArgNo); assert(arg.hasOneUse()); - for (auto *firstStore : arg.getUsers()) { - assert(isa(firstStore)); - auto argAlloca = cast(firstStore).getAddr(); - rewriter.replaceAllUsesWith(argAlloca, Alloca); - rewriter.eraseOp(firstStore); - rewriter.eraseOp(argAlloca.getDefiningOp()); - } - + auto *firstStore = *arg.user_begin(); + auto argAlloca = cast(firstStore).getAddr(); + rewriter.replaceAllUsesWith(argAlloca, Alloca); + rewriter.eraseOp(firstStore); + rewriter.eraseOp(argAlloca.getDefiningOp()); break; } default: From d42ae8d1d3631fd3434268f0db7b522057dc3556 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Thu, 19 Sep 2024 05:33:22 +0800 Subject: [PATCH 1857/2301] Recommit [CIR][Pipeline] Support -fclangir-analysis-only (#832) Reland https://github.com/llvm/clangir/pull/638 This was reverted due to https://github.com/llvm/clangir/issues/655. I tried to address the problem in the newest commit. The changes of the PR since the last landed one includes: - Move the definition of `cir::CIRGenConsumer` to `clang/include/clang/CIRFrontendAction/CIRGenConsumer.h`, and leave its `HandleTranslationUnit` interface is left empty. So that `cir::CIRGenConsumer` won't need to depend on CodeGen any more. - Change the old definition of `cir::CIRGenConsumer` in `clang/lib/CIR/FrontendAction/CIRGenAction.cpp` and to `CIRLoweringConsumer`, inherited from `cir::CIRGenConsumer`, which implements the original `HandleTranslationUnit` interface. I feel this may improve the readability more even without my original patch. --- .../clang/CIRFrontendAction/CIRGenAction.h | 55 +++++++++++++++++++ .../clang/CIRFrontendAction/CIRGenConsumer.h | 0 clang/include/clang/Driver/Options.td | 5 ++ .../include/clang/Frontend/FrontendOptions.h | 7 ++- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 52 ++++++++++++++++++ clang/lib/Driver/ToolChains/Clang.cpp | 9 +++ clang/lib/Frontend/CompilerInvocation.cpp | 3 + .../ExecuteCompilerInvocation.cpp | 25 ++++++++- clang/test/CIR/CodeGen/analysis-only.cpp | 8 +++ .../CIR/Transforms/lifetime-check-agg.cpp | 1 + clang/test/CIR/analysis-only.cpp | 2 + 11 files changed, 164 insertions(+), 3 deletions(-) create mode 100644 clang/include/clang/CIRFrontendAction/CIRGenConsumer.h create mode 100644 clang/test/CIR/CodeGen/analysis-only.cpp create mode 100644 clang/test/CIR/analysis-only.cpp diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIRFrontendAction/CIRGenAction.h index b4e183d7978a..13c2a4381573 100644 --- a/clang/include/clang/CIRFrontendAction/CIRGenAction.h +++ b/clang/include/clang/CIRFrontendAction/CIRGenAction.h @@ -9,6 +9,7 @@ #ifndef LLVM_CLANG_CIR_CIRGENACTION_H #define LLVM_CLANG_CIR_CIRGENACTION_H +#include "clang/CodeGen/CodeGenAction.h" #include "clang/Frontend/FrontendAction.h" #include @@ -128,6 +129,60 @@ class EmitObjAction : public CIRGenAction { EmitObjAction(mlir::MLIRContext *mlirCtx = nullptr); }; +// Used for -fclangir-analysis-only: use CIR analysis but still use original LLVM codegen path +class AnalysisOnlyActionBase : public clang::CodeGenAction { + virtual void anchor(); + +protected: + std::unique_ptr + CreateASTConsumer(clang::CompilerInstance &CI, + llvm::StringRef InFile) override; + + AnalysisOnlyActionBase(unsigned _Act, + llvm::LLVMContext *_VMContext = nullptr); +}; + +class AnalysisOnlyAndEmitAssemblyAction : public AnalysisOnlyActionBase { + virtual void anchor() override; + +public: + AnalysisOnlyAndEmitAssemblyAction(llvm::LLVMContext *_VMContext = nullptr); +}; + +class AnalysisOnlyAndEmitBCAction : public AnalysisOnlyActionBase { + virtual void anchor() override; + +public: + AnalysisOnlyAndEmitBCAction(llvm::LLVMContext *_VMContext = nullptr); +}; + +class AnalysisOnlyAndEmitLLVMAction : public AnalysisOnlyActionBase { + virtual void anchor() override; + +public: + AnalysisOnlyAndEmitLLVMAction(llvm::LLVMContext *_VMContext = nullptr); +}; + +class AnalysisOnlyAndEmitLLVMOnlyAction : public AnalysisOnlyActionBase { + virtual void anchor() override; + +public: + AnalysisOnlyAndEmitLLVMOnlyAction(llvm::LLVMContext *_VMContext = nullptr); +}; + +class AnalysisOnlyAndEmitCodeGenOnlyAction : public AnalysisOnlyActionBase { + virtual void anchor() override; + +public: + AnalysisOnlyAndEmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext = nullptr); +}; + +class AnalysisOnlyAndEmitObjAction : public AnalysisOnlyActionBase { + virtual void anchor() override; + +public: + AnalysisOnlyAndEmitObjAction(llvm::LLVMContext *_VMContext = nullptr); +}; } // namespace cir #endif diff --git a/clang/include/clang/CIRFrontendAction/CIRGenConsumer.h b/clang/include/clang/CIRFrontendAction/CIRGenConsumer.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 986d95b77bbc..e5010ef3a066 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3108,6 +3108,11 @@ defm clangir_direct_lowering : BoolFOption<"clangir-direct-lowering", FrontendOpts<"ClangIRDirectLowering">, DefaultTrue, PosFlag, NegFlag>; +defm clangir_analysis_only : BoolFOption<"clangir-analysis-only", + FrontendOpts<"ClangIRAnalysisOnly">, DefaultFalse, + PosFlag, + NegFlag>; def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, Group, HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 865b294586ab..b9e4d09df222 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -454,6 +454,10 @@ class FrontendOptions { // Enable Clang IR mem2reg pass on the flat CIR. unsigned ClangIREnableMem2Reg : 1; + // Enable Clang IR analysis only pipeline that uses tranditional code gen + // pipeline. + unsigned ClangIRAnalysisOnly : 1; + CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. @@ -653,7 +657,8 @@ class FrontendOptions { ClangIRDisablePasses(false), ClangIRDisableCIRVerifier(false), ClangIRDisableEmitCXXDefault(false), ClangIRLifetimeCheck(false), ClangIRIdiomRecognizer(false), ClangIRLibOpt(false), - TimeTraceGranularity(500), TimeTraceVerbose(false) {} + ClangIRAnalysisOnly(false), TimeTraceGranularity(500), + TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file /// extension. For example, "c" would return Language::C. diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index b5999195b596..58536ede6700 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -32,6 +32,7 @@ #include "clang/Driver/DriverDiagnostic.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendDiagnostic.h" +#include "clang/Frontend/MultiplexConsumer.h" #include "clang/Lex/Preprocessor.h" #include "llvm/Bitcode/BitcodeReader.h" #include "llvm/IR/DebugInfo.h" @@ -442,6 +443,7 @@ void CIRGenAction::ExecuteAction() { llvmModule->print(*outstream, nullptr); } +namespace cir { void EmitAssemblyAction::anchor() {} EmitAssemblyAction::EmitAssemblyAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::EmitAssembly, _MLIRContext) {} @@ -473,3 +475,53 @@ EmitBCAction::EmitBCAction(mlir::MLIRContext *_MLIRContext) void EmitObjAction::anchor() {} EmitObjAction::EmitObjAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::EmitObj, _MLIRContext) {} +} // namespace cir + +// Used for -fclangir-analysis-only: use CIR analysis but still use original +// LLVM codegen path +void AnalysisOnlyActionBase::anchor() {} +AnalysisOnlyActionBase::AnalysisOnlyActionBase(unsigned _Act, + llvm::LLVMContext *_VMContext) + : clang::CodeGenAction(_Act, _VMContext) {} + +std::unique_ptr +AnalysisOnlyActionBase::CreateASTConsumer(clang::CompilerInstance &ci, + llvm::StringRef inFile) { + std::vector> Consumers; + Consumers.push_back(clang::CodeGenAction::CreateASTConsumer(ci, inFile)); + Consumers.push_back(std::make_unique( + CIRGenAction::OutputType::None, ci, ci.getDiagnostics(), + &ci.getVirtualFileSystem(), ci.getHeaderSearchOpts(), ci.getCodeGenOpts(), + ci.getTargetOpts(), ci.getLangOpts(), ci.getFrontendOpts(), nullptr)); + return std::make_unique(std::move(Consumers)); +} + +void AnalysisOnlyAndEmitAssemblyAction::anchor() {} +AnalysisOnlyAndEmitAssemblyAction::AnalysisOnlyAndEmitAssemblyAction( + llvm::LLVMContext *_VMContext) + : AnalysisOnlyActionBase(Backend_EmitAssembly, _VMContext) {} + +void AnalysisOnlyAndEmitBCAction::anchor() {} +AnalysisOnlyAndEmitBCAction::AnalysisOnlyAndEmitBCAction( + llvm::LLVMContext *_VMContext) + : AnalysisOnlyActionBase(Backend_EmitBC, _VMContext) {} + +void AnalysisOnlyAndEmitLLVMAction::anchor() {} +AnalysisOnlyAndEmitLLVMAction::AnalysisOnlyAndEmitLLVMAction( + llvm::LLVMContext *_VMContext) + : AnalysisOnlyActionBase(Backend_EmitLL, _VMContext) {} + +void AnalysisOnlyAndEmitLLVMOnlyAction::anchor() {} +AnalysisOnlyAndEmitLLVMOnlyAction::AnalysisOnlyAndEmitLLVMOnlyAction( + llvm::LLVMContext *_VMContext) + : AnalysisOnlyActionBase(Backend_EmitNothing, _VMContext) {} + +void AnalysisOnlyAndEmitCodeGenOnlyAction::anchor() {} +AnalysisOnlyAndEmitCodeGenOnlyAction::AnalysisOnlyAndEmitCodeGenOnlyAction( + llvm::LLVMContext *_VMContext) + : AnalysisOnlyActionBase(Backend_EmitMCNull, _VMContext) {} + +void AnalysisOnlyAndEmitObjAction::anchor() {} +AnalysisOnlyAndEmitObjAction::AnalysisOnlyAndEmitObjAction( + llvm::LLVMContext *_VMContext) + : AnalysisOnlyActionBase(Backend_EmitObj, _VMContext) {} diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 94fd177b5f7f..699d47ac2f64 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5266,6 +5266,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-fclangir-idiom-recognizer"); } + if (Args.hasArg(options::OPT_fclangir_analysis_only)) { + CmdArgs.push_back("-fclangir-analysis-only"); + + // TODO: We should pass some default analysis configuration here. + + // TODO2: Should we emit some diagnostics if the configurations conflict + // with each other? + } + if (IsOpenMPDevice) { // We have to pass the triple of the host if compiling for an OpenMP device. std::string NormalizedTriple = diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 0d7da85829a5..a30e097f9401 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3131,6 +3131,9 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Args.hasArg(OPT_fclangir_call_conv_lowering)) Opts.ClangIREnableCallConvLowering = true; + if (Args.hasArg(OPT_fclangir_analysis_only)) + Opts.ClangIRAnalysisOnly = true; + if (const Arg *A = Args.getLastArg(OPT_fclangir_lifetime_check, OPT_fclangir_lifetime_check_EQ)) { Opts.ClangIRLifetimeCheck = true; diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 134978b7bad7..40a96a38a8d5 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -53,6 +53,7 @@ CreateFrontendBaseAction(CompilerInstance &CI) { auto UseCIR = CI.getFrontendOpts().UseClangIRPipeline; auto Act = CI.getFrontendOpts().ProgramAction; + auto CIRAnalysisOnly = CI.getFrontendOpts().ClangIRAnalysisOnly; auto EmitsCIR = Act == EmitCIR || Act == EmitCIRFlat || Act == EmitCIROnly; if (!UseCIR && EmitsCIR) @@ -76,12 +77,16 @@ CreateFrontendBaseAction(CompilerInstance &CI) { #if CLANG_ENABLE_CIR if (UseCIR) return std::make_unique<::cir::EmitAssemblyAction>(); + if (CIRAnalysisOnly) + return std::make_unique<::cir::AnalysisOnlyAndEmitAssemblyAction>(); #endif return std::make_unique(); case EmitBC: { #if CLANG_ENABLE_CIR if (UseCIR) return std::make_unique<::cir::EmitBCAction>(); + if (CIRAnalysisOnly) + return std::make_unique<::cir::AnalysisOnlyAndEmitBCAction>(); #endif return std::make_unique(); } @@ -102,15 +107,31 @@ CreateFrontendBaseAction(CompilerInstance &CI) { #if CLANG_ENABLE_CIR if (UseCIR) return std::make_unique<::cir::EmitLLVMAction>(); + if (CIRAnalysisOnly) + return std::make_unique<::cir::AnalysisOnlyAndEmitLLVMAction>(); #endif return std::make_unique(); } - case EmitLLVMOnly: return std::make_unique(); - case EmitCodeGenOnly: return std::make_unique(); + case EmitLLVMOnly: { +#if CLANG_ENABLE_CIR + if (CIRAnalysisOnly) + return std::make_unique<::cir::AnalysisOnlyAndEmitLLVMOnlyAction>(); +#endif + return std::make_unique(); + } + case EmitCodeGenOnly: { +#if CLANG_ENABLE_CIR + if (CIRAnalysisOnly) + return std::make_unique<::cir::AnalysisOnlyAndEmitLLVMOnlyAction>(); +#endif + return std::make_unique(); + } case EmitObj: { #if CLANG_ENABLE_CIR if (UseCIR) return std::make_unique<::cir::EmitObjAction>(); + if (CIRAnalysisOnly) + return std::make_unique<::cir::AnalysisOnlyAndEmitObjAction>(); #endif return std::make_unique(); } diff --git a/clang/test/CIR/CodeGen/analysis-only.cpp b/clang/test/CIR/CodeGen/analysis-only.cpp new file mode 100644 index 000000000000..7f427f0de92f --- /dev/null +++ b/clang/test/CIR/CodeGen/analysis-only.cpp @@ -0,0 +1,8 @@ +// Check `-fclangir-analysis-only` would generate code correctly. +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir-analysis-only -std=c++20 \ +// RUN: -O2 -emit-llvm %s -o - | FileCheck %s + +extern "C" void foo() {} + +// CHECK: define{{.*}} @foo( + diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp index fb89c0e6fd8f..ebfe00c2ad56 100644 --- a/clang/test/CIR/Transforms/lifetime-check-agg.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir-analysis-only -fclangir-lifetime-check="history=all;remarks=all" %s -clangir-verify-diagnostics -emit-obj -o /dev/null typedef enum SType { INFO_ENUM_0 = 9, diff --git a/clang/test/CIR/analysis-only.cpp b/clang/test/CIR/analysis-only.cpp new file mode 100644 index 000000000000..7dc58250b91b --- /dev/null +++ b/clang/test/CIR/analysis-only.cpp @@ -0,0 +1,2 @@ +// RUN: %clang %s -fclangir-analysis-only -### -c %s 2>&1 | FileCheck %s +// CHECK: "-fclangir-analysis-only" From 1c017087e3f07c88cc191bf92c8ab301cc53f6f9 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Thu, 19 Sep 2024 00:42:41 +0300 Subject: [PATCH 1858/2301] [CIR][Lowering] Fix lowering for multi dimensional array (#851) This PR fixes the lowering for multi dimensional arrays. Consider the following code snippet `test.c`: ``` void foo() { char arr[4][1] = {"a", "b", "c", "d"}; } ``` When ran with `bin/clang test.c -Xclang -fclangir -Xclang -emit-llvm -S -o -`, It produces the following error: ``` ~/clangir/llvm/include/llvm/Support/Casting.h:566: decltype(auto) llvm::cast(const From&) [with To = mlir::ArrayAttr; From = mlir::Attribute]: Assertion `isa(Val) && "cast() argument of incompatible type!"' failed. ``` The bug can be traced back to `LoweringHelpers.cpp`. It considers the values in the array as integer types, and this causes an error in this case. This PR updates `convertToDenseElementsAttrImpl` when the array contains string attributes. I have also added one more similar test. Note that in the tests I used a **literal match** to avoid matching as regex, so `!dbg` is useful. --- clang/lib/CIR/Lowering/LoweringHelpers.cpp | 38 ++++++++++++++++------ clang/test/CIR/Lowering/array-init.c | 17 ++++++++++ 2 files changed, 45 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/Lowering/LoweringHelpers.cpp b/clang/lib/CIR/Lowering/LoweringHelpers.cpp index debe7881d0c4..06c92ae12472 100644 --- a/clang/lib/CIR/Lowering/LoweringHelpers.cpp +++ b/clang/lib/CIR/Lowering/LoweringHelpers.cpp @@ -61,9 +61,34 @@ mlir::Type getNestedTypeAndElemQuantity(mlir::Type Ty, unsigned &elemQuantity) { return nestTy; } +template +void fillTrailingZeros(mlir::cir::ConstArrayAttr attr, + llvm::SmallVectorImpl &values) { + auto numTrailingZeros = attr.getTrailingZerosNum(); + if (numTrailingZeros) { + auto localArrayTy = mlir::dyn_cast(attr.getType()); + assert(localArrayTy && "expected !cir.array"); + + auto nestTy = localArrayTy.getEltType(); + if (!mlir::isa(nestTy)) + values.insert(values.end(), numTrailingZeros, + getZeroInitFromType(nestTy)); + } +} + template void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, llvm::SmallVectorImpl &values) { + if (auto stringAttr = mlir::dyn_cast(attr.getElts())) { + if (auto arrayType = mlir::dyn_cast(attr.getType())) { + for (auto element : stringAttr) { + auto intAttr = mlir::cir::IntAttr::get(arrayType.getEltType(), element); + values.push_back(mlir::dyn_cast(intAttr).getValue()); + } + return; + } + } + auto arrayAttr = mlir::cast(attr.getElts()); for (auto eltAttr : arrayAttr) { if (auto valueAttr = mlir::dyn_cast(eltAttr)) { @@ -71,6 +96,8 @@ void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, } else if (auto subArrayAttr = mlir::dyn_cast(eltAttr)) { convertToDenseElementsAttrImpl(subArrayAttr, values); + if (mlir::dyn_cast(subArrayAttr.getElts())) + fillTrailingZeros(subArrayAttr, values); } else if (auto zeroAttr = mlir::dyn_cast(eltAttr)) { unsigned numStoredZeros = 0; auto nestTy = @@ -84,16 +111,7 @@ void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, // Only fill in trailing zeros at the local cir.array level where the element // type isn't another array (for the mult-dim case). - auto numTrailingZeros = attr.getTrailingZerosNum(); - if (numTrailingZeros) { - auto localArrayTy = mlir::dyn_cast(attr.getType()); - assert(localArrayTy && "expected !cir.array"); - - auto nestTy = localArrayTy.getEltType(); - if (!mlir::isa(nestTy)) - values.insert(values.end(), numTrailingZeros, - getZeroInitFromType(nestTy)); - } + fillTrailingZeros(attr, values); } template diff --git a/clang/test/CIR/Lowering/array-init.c b/clang/test/CIR/Lowering/array-init.c index ab0ddb4dd0ea..0b9a19b5c9ba 100644 --- a/clang/test/CIR/Lowering/array-init.c +++ b/clang/test/CIR/Lowering/array-init.c @@ -1,6 +1,12 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// LLVM: charInit1.ar = internal global [4 x [4 x i8]] {{.*}}4 x i8] c"aa\00\00", [4 x i8] c"aa\00\00", [4 x i8] c"aa\00\00", [4 x i8] c"aa\00\00"], align 16 +char charInit1() { + static char ar[][4] = {"aa", "aa", "aa", "aa"}; + return ar[0][0]; +} + // LLVM: define dso_local void @zeroInit // LLVM: [[RES:%.*]] = alloca [3 x i32], i64 1 // LLVM: store [3 x i32] zeroinitializer, ptr [[RES]] @@ -8,3 +14,14 @@ void zeroInit() { int a[3] = {0, 0, 0}; } +// LLVM: %1 = alloca [4 x [1 x i8]], i64 1, align 1 +// LLVM: store [4 x [1 x i8]] {{.*}}1 x i8] c"a", [1 x i8] c"b", [1 x i8] c"c", [1 x i8] c"d"], ptr %1, align 1 +void charInit2() { + char arr[4][1] = {"a", "b", "c", "d"}; +} + +// LLVM: %1 = alloca [4 x [2 x i8]], i64 1, align 1 +// LLVM: store [4 x [2 x i8]] {{.*}}2 x i8] c"ab", [2 x i8] c"cd", [2 x i8] c"ef", [2 x i8] c"gh"], ptr %1, align 1 +void charInit3() { + char arr[4][2] = {"ab", "cd", "ef", "gh"}; +} \ No newline at end of file From 85313e4d0bb2914f4e8a2978b8bee6edef4e5d0e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 18 Sep 2024 18:14:38 -0400 Subject: [PATCH 1859/2301] [CIR][CodeGen] Support global temporaries Support expressions at the top level such as const unsigned int n = 1234; const int &r = (const int&)n; Reviewers: bcardosolopes Pull Request: https://github.com/llvm/clangir/pull/857 --- clang/lib/CIR/CodeGen/Address.h | 2 + clang/lib/CIR/CodeGen/CIRGenCstEmitter.h | 3 + clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 22 +++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 139 ++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenModule.h | 18 ++- clang/test/CIR/CodeGen/temporaries.cpp | 12 ++ 6 files changed, 178 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 0d1ca5cd2944..fdddf6fae500 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -26,6 +26,8 @@ namespace cir { // Indicates whether a pointer is known not to be null. enum KnownNonNull_t { NotKnownNonNull, KnownNonNull }; +/// Like RawAddress, an abstract representation of an aligned address, but the +/// pointer contained in this class is possibly signed. class Address { llvm::PointerIntPair PointerAndKnownNonNull; mlir::Type ElementType; diff --git a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h index 28b8a925b1da..d95529e50f4a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h @@ -76,6 +76,9 @@ class ConstantEmitter { mlir::Attribute tryEmitForInitializer(const Expr *E, LangAS destAddrSpace, QualType destType); + mlir::Attribute emitForInitializer(const APValue &value, LangAS destAddrSpace, + QualType destType); + void finalize(mlir::cir::GlobalOp global); // All of the "abstract" emission methods below permit the emission to diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 1c8c7502f60e..c3683b3f0fd1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -24,6 +24,7 @@ #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/Builtins.h" +#include "clang/Basic/Specifiers.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" @@ -1261,7 +1262,7 @@ class ConstantLValueEmitter ConstantLValue VisitBlockExpr(const BlockExpr *E); ConstantLValue VisitCXXTypeidExpr(const CXXTypeidExpr *E); ConstantLValue - VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); + VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *expr); bool hasNonZeroOffset() const { return !Value.getLValueOffset().isZero(); } @@ -1499,9 +1500,13 @@ ConstantLValueEmitter::VisitCXXTypeidExpr(const CXXTypeidExpr *E) { } ConstantLValue ConstantLValueEmitter::VisitMaterializeTemporaryExpr( - const MaterializeTemporaryExpr *E) { - assert(0 && "NYI"); - return nullptr; + const MaterializeTemporaryExpr *expr) { + assert(expr->getStorageDuration() == SD_Static); + const Expr *inner = expr->getSubExpr()->skipRValueSubobjectAdjustments(); + mlir::Operation *globalTemp = CGM.getAddrOfGlobalTemporary(expr, inner); + CIRGenBuilderTy builder = CGM.getBuilder(); + return ConstantLValue( + builder.getGlobalViewAttr(mlir::cast(globalTemp))); } //===----------------------------------------------------------------------===// @@ -1532,6 +1537,15 @@ mlir::Attribute ConstantEmitter::tryEmitForInitializer(const Expr *E, return markIfFailed(tryEmitPrivateForMemory(E, destType)); } +mlir::Attribute ConstantEmitter::emitForInitializer(const APValue &value, + LangAS destAddrSpace, + QualType destType) { + initializeNonAbstract(destAddrSpace); + auto c = tryEmitPrivateForMemory(value, destType); + assert(c && "couldn't emit constant value non-abstractly?"); + return c; +} + void ConstantEmitter::finalize(mlir::cir::GlobalOp global) { assert(InitializedNonAbstract && "finalizing emitter that was used for abstract emission?"); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index b5578fbd6d02..0f4f322f7856 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -24,6 +24,7 @@ #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" @@ -648,13 +649,12 @@ mlir::Value CIRGenModule::getGlobalValue(const Decl *D) { return CurCGF->symbolTable.lookup(D); } -mlir::cir::GlobalOp -CIRGenModule::createGlobalOp(CIRGenModule &CGM, mlir::Location loc, - StringRef name, mlir::Type t, bool isCst, - mlir::cir::AddressSpaceAttr addrSpace, - mlir::Operation *insertPoint) { +mlir::cir::GlobalOp CIRGenModule::createGlobalOp( + CIRGenModule &cgm, mlir::Location loc, StringRef name, mlir::Type t, + bool isConstant, mlir::cir::AddressSpaceAttr addrSpace, + mlir::Operation *insertPoint, mlir::cir::GlobalLinkageKind linkage) { mlir::cir::GlobalOp g; - auto &builder = CGM.getBuilder(); + auto &builder = cgm.getBuilder(); { mlir::OpBuilder::InsertionGuard guard(builder); @@ -662,17 +662,17 @@ CIRGenModule::createGlobalOp(CIRGenModule &CGM, mlir::Location loc, // void s() { const char *s = "yolo"; ... } // // Be sure to insert global before the current function - auto *curCGF = CGM.getCurrCIRGenFun(); + auto *curCGF = cgm.getCurrCIRGenFun(); if (curCGF) builder.setInsertionPoint(curCGF->CurFn); - g = builder.create( - loc, name, t, isCst, GlobalLinkageKind::ExternalLinkage, addrSpace); + g = builder.create(loc, name, t, isConstant, linkage, + addrSpace); if (!curCGF) { if (insertPoint) - CGM.getModule().insert(insertPoint, g); + cgm.getModule().insert(insertPoint, g); else - CGM.getModule().push_back(g); + cgm.getModule().push_back(g); } // Default to private until we can judge based on the initializer, @@ -1565,6 +1565,123 @@ void CIRGenModule::buildLinkageSpec(const LinkageSpecDecl *LSD) { buildDeclContext(LSD); } +mlir::Operation * +CIRGenModule::getAddrOfGlobalTemporary(const MaterializeTemporaryExpr *expr, + const Expr *init) { + assert((expr->getStorageDuration() == SD_Static || + expr->getStorageDuration() == SD_Thread) && + "not a global temporary"); + const auto *varDecl = cast(expr->getExtendingDecl()); + + // If we're not materializing a subobject of the temporay, keep the + // cv-qualifiers from the type of the MaterializeTemporaryExpr. + QualType materializedType = init->getType(); + if (init == expr->getSubExpr()) + materializedType = expr->getType(); + + [[maybe_unused]] CharUnits align = + getASTContext().getTypeAlignInChars(materializedType); + + auto insertResult = materializedGlobalTemporaryMap.insert({expr, nullptr}); + if (!insertResult.second) { + llvm_unreachable("NYI"); + } + + // FIXME: If an externally-visible declaration extends multiple temporaries, + // we need to give each temporary the same name in every translation unit (and + // we also need to make the temporaries externally-visible). + llvm::SmallString<256> name; + llvm::raw_svector_ostream out(name); + getCXXABI().getMangleContext().mangleReferenceTemporary( + varDecl, expr->getManglingNumber(), out); + + APValue *value = nullptr; + if (expr->getStorageDuration() == SD_Static && varDecl->evaluateValue()) { + // If the initializer of the extending declaration is a constant + // initializer, we should have a cached constant initializer for this + // temporay. Note taht this m ight have a different value from the value + // computed by evaluating the initializer if the surrounding constant + // expression modifies the temporary. + value = expr->getOrCreateValue(false); + } + + // Try evaluating it now, it might have a constant initializer + Expr::EvalResult evalResult; + if (!value && init->EvaluateAsRValue(evalResult, getASTContext()) && + !evalResult.hasSideEffects()) + value = &evalResult.Val; + + LangAS addrSpace = getGlobalVarAddressSpace(varDecl); + + std::optional emitter; + mlir::Attribute initialValue = nullptr; + bool isConstant = false; + mlir::Type type; + if (value) { + emitter.emplace(*this); + initialValue = + emitter->emitForInitializer(*value, addrSpace, materializedType); + + isConstant = materializedType.isConstantStorage( + getASTContext(), /*ExcludeCtor*/ value, /*ExcludeDtor*/ false); + + type = mlir::cast(initialValue).getType(); + } else { + // No initializer, the initialization will be provided when we initialize + // the declaration which performed lifetime extension. + llvm_unreachable("else value"); + } + + // Create a global variable for this lifetime-extended temporary. + mlir::cir::GlobalLinkageKind linkage = + getCIRLinkageVarDefinition(varDecl, false); + if (linkage == mlir::cir::GlobalLinkageKind::ExternalLinkage) { + const VarDecl *initVD; + if (varDecl->isStaticDataMember() && varDecl->getAnyInitializer(initVD) && + isa(initVD->getLexicalDeclContext())) { + // Temporaries defined inside a class get linkonce_odr linkage because the + // calss can be defined in multiple translation units. + llvm_unreachable("staticdatamember NYI"); + } else { + // There is no need for this temporary to have external linkage if the + // VarDecl has external linkage. + linkage = mlir::cir::GlobalLinkageKind::InternalLinkage; + } + } + auto targetAS = builder.getAddrSpaceAttr(addrSpace); + + auto loc = getLoc(expr->getSourceRange()); + auto gv = createGlobalOp(*this, loc, name, type, isConstant, targetAS, + nullptr, linkage); + gv.setInitialValueAttr(initialValue); + + if (emitter) + emitter->finalize(gv); + // Don't assign dllimport or dllexport to lcoal linkage globals + if (!gv.hasLocalLinkage()) { + llvm_unreachable("NYI"); + } + gv.setAlignment(align.getAsAlign().value()); + if (supportsCOMDAT() && gv.isWeakForLinker()) + llvm_unreachable("NYI"); + if (varDecl->getTLSKind()) + llvm_unreachable("NYI"); + mlir::Operation *cv = gv; + if (addrSpace != LangAS::Default) + llvm_unreachable("NYI"); + + // Update the map with the new temporay. If we created a placeholder above, + // replace it with the new global now. + mlir::Operation *&entry = materializedGlobalTemporaryMap[expr]; + if (entry) { + entry->replaceAllUsesWith(cv); + entry->erase(); + } + entry = cv; + + return cv; +} + // Emit code for a single top level declaration. void CIRGenModule::buildTopLevelDecl(Decl *decl) { // Ignore dependent declarations diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index e0979e36744f..41126978422c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H #define LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H +#include "Address.h" #include "CIRGenBuilder.h" #include "CIRGenCall.h" #include "CIRGenOpenCLRuntime.h" @@ -143,6 +144,9 @@ class CIRGenModule : public CIRGenTypeCache { /// most up to date ValueDecl that will have all the inherited annotations. llvm::DenseMap deferredAnnotations; + llvm::DenseMap + materializedGlobalTemporaryMap; + public: mlir::ModuleOp getModule() const { return theModule; } CIRGenBuilderTy &getBuilder() { return builder; } @@ -248,10 +252,12 @@ class CIRGenModule : public CIRGenTypeCache { mlir::Type Ty); static mlir::cir::GlobalOp - createGlobalOp(CIRGenModule &CGM, mlir::Location loc, StringRef name, - mlir::Type t, bool isCst = false, + createGlobalOp(CIRGenModule &cgm, mlir::Location loc, StringRef name, + mlir::Type t, bool isConstant = false, mlir::cir::AddressSpaceAttr addrSpace = {}, - mlir::Operation *insertPoint = nullptr); + mlir::Operation *insertPoint = nullptr, + mlir::cir::GlobalLinkageKind linkage = + mlir::cir::GlobalLinkageKind::ExternalLinkage); // FIXME: Hardcoding priority here is gross. void AddGlobalCtor(mlir::cir::FuncOp Ctor, int Priority = 65535); @@ -648,6 +654,12 @@ class CIRGenModule : public CIRGenTypeCache { bool shouldEmitFunction(clang::GlobalDecl GD); + /// Returns a pointer to a global variable representing a temporary with + /// static or thread storage duration. + mlir::Operation * + getAddrOfGlobalTemporary(const MaterializeTemporaryExpr *expr, + const Expr *init); + // Produce code for this constructor/destructor. This method doesn't try to // apply any ABI rules about which other constructors/destructors are needed // or if they are alias to each other. diff --git a/clang/test/CIR/CodeGen/temporaries.cpp b/clang/test/CIR/CodeGen/temporaries.cpp index ea6b2bd20d6d..1dafb75e8a70 100644 --- a/clang/test/CIR/CodeGen/temporaries.cpp +++ b/clang/test/CIR/CodeGen/temporaries.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: cir-translate %t.cir -cir-to-llvmir -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM struct E { ~E(); @@ -25,3 +27,13 @@ void f() { // CHECK-NEXT: } // CHECK-NEXT: cir.return // CHECK-NEXT: } + +const unsigned int n = 1234; +const int &r = (const int&)n; + +// CHECK: cir.global "private" constant internal @_ZGR1r_ = #cir.int<1234> : !s32i +// CHECK-NEXT: cir.global external @r = #cir.global_view<@_ZGR1r_> : !cir.ptr {alignment = 8 : i64} + +// LLVM: @_ZGR1r_ = internal constant i32 1234, align 4 +// LLVM-NEXT: @r = global ptr @_ZGR1r_, align 8 + From 4009579f1ecbadee0be558c36944da8b49fcf8f8 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 18 Sep 2024 18:08:54 -0400 Subject: [PATCH 1860/2301] [CIR][CodeGen][NFC] Move GetUndefRValue to the right file This is to match clang CodeGen --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 5 ----- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 22 ++++++++++++++++++++++ 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index e4fcf973c54f..6f744ce14dc1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -902,11 +902,6 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, return ret; } -RValue CIRGenFunction::GetUndefRValue(QualType Ty) { - assert(Ty->isVoidType() && "Only VoidType supported so far."); - return RValue::get(nullptr); -} - mlir::Value CIRGenFunction::buildRuntimeCall(mlir::Location loc, mlir::cir::FuncOp callee, ArrayRef args) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index ae0909c507e3..59257549fe96 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1380,6 +1380,28 @@ RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, return buildCall(E->getCallee()->getType(), callee, E, ReturnValue); } +RValue CIRGenFunction::GetUndefRValue(QualType ty) { + if (ty->isVoidType()) + return RValue::get(nullptr); + + switch (getEvaluationKind(ty)) { + case TEK_Complex: { + llvm_unreachable("NYI"); + } + + // If this is a use of an undefined aggregate type, the aggregate must have + // an identifiable address. Just because the contents of the value are + // undefined doesn't mean that the address can't be taken and compared. + case TEK_Aggregate: { + llvm_unreachable("NYI"); + } + + case TEK_Scalar: + llvm_unreachable("NYI"); + } + llvm_unreachable("bad evaluation kind"); +} + LValue CIRGenFunction::buildStmtExprLValue(const StmtExpr *E) { // Can only get l-value for message expression returning aggregate type RValue RV = buildAnyExprToTemp(E); From ee610bc63dfa3d3ca7b4fc3fb975d37d07be7cb7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 Sep 2024 16:32:34 -0700 Subject: [PATCH 1861/2301] [CIR][CIRGen] Exceptions: lexical scope issue with global initializers Fix https://github.com/llvm/clangir/issues/829 Thanks @smeenai for pointing out the root cause and UBSan failure! --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 16 ++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 3 ++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 ++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 11 +++++++---- clang/test/CIR/CodeGen/global-new.cpp | 1 - 5 files changed, 25 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index d73222f5b79f..179e128ac2f8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -319,6 +319,10 @@ void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, if (NeedsCtor) { mlir::OpBuilder::InsertionGuard guard(builder); auto block = builder.createBlock(&Addr.getCtorRegion()); + CIRGenFunction::LexicalScope lexScope{*CurCGF, Addr.getLoc(), + builder.getInsertionBlock()}; + lexScope.setAsGlobalInit(); + builder.setInsertionPointToStart(block); Address DeclAddr(getAddrOfGlobalVar(D), getASTContext().getDeclAlign(D)); buildDeclInit(CGF, D, DeclAddr); @@ -327,17 +331,25 @@ void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, } if (isCstStorage) { + // TODO: this leads to a missing feature in the moment, probably also need a + // LexicalScope to be inserted here. buildDeclInvariant(CGF, D); } else { // If not constant storage we'll emit this regardless of NeedsDtor value. mlir::OpBuilder::InsertionGuard guard(builder); auto block = builder.createBlock(&Addr.getDtorRegion()); + CIRGenFunction::LexicalScope lexScope{*CurCGF, Addr.getLoc(), + builder.getInsertionBlock()}; + lexScope.setAsGlobalInit(); + builder.setInsertionPointToStart(block); buildDeclDestroy(CGF, D); builder.setInsertionPointToEnd(block); - if (block->empty()) + if (block->empty()) { block->erase(); - else + // Don't confuse lexical cleanup. + builder.clearInsertionPoint(); + } else builder.create(Addr->getLoc()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 6f744ce14dc1..53252ee78bd6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -456,7 +456,8 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, // In OG, we build the landing pad for this scope. In CIR, we emit a // synthetic cir.try because this didn't come from codegenerating from a // try/catch in C++. - auto op = CGF.currLexScope->getClosestTryParent(); + assert(CGF.currLexScope && "expected scope"); + mlir::cir::TryOp op = CGF.currLexScope->getClosestTryParent(); if (op) return op; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 410fcc2a316d..e011e1f83558 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -393,6 +393,8 @@ void CIRGenFunction::LexicalScope::cleanup() { // If a terminator is already present in the current block, nothing // else to do here. auto *currBlock = builder.getBlock(); + if (isGlobalInit() && !currBlock) + return; if (currBlock->mightHaveTerminator() && currBlock->getTerminator()) return; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index d8e032604ff1..427eee9bcf5f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -2014,10 +2014,11 @@ class CIRGenFunction : public CIRGenTypeCache { // FIXME: perhaps we can use some info encoded in operations. enum Kind { - Regular, // cir.if, cir.scope, if_regions - Ternary, // cir.ternary - Switch, // cir.switch - Try, // cir.try + Regular, // cir.if, cir.scope, if_regions + Ternary, // cir.ternary + Switch, // cir.switch + Try, // cir.try + GlobalInit // cir.global initalization code } ScopeKind = Regular; // Track scope return value. @@ -2075,6 +2076,7 @@ class CIRGenFunction : public CIRGenTypeCache { // --- // Kind // --- + bool isGlobalInit() { return ScopeKind == Kind::GlobalInit; } bool isRegular() { return ScopeKind == Kind::Regular; } bool isSwitch() { return ScopeKind == Kind::Switch; } bool isTernary() { return ScopeKind == Kind::Ternary; } @@ -2085,6 +2087,7 @@ class CIRGenFunction : public CIRGenTypeCache { } mlir::cir::TryOp getClosestTryParent(); + void setAsGlobalInit() { ScopeKind = Kind::GlobalInit; } void setAsSwitch() { ScopeKind = Kind::Switch; } void setAsTernary() { ScopeKind = Kind::Ternary; } void setAsTry(mlir::cir::TryOp op) { diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index fe90280ddf9e..bf2663181077 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -8,7 +8,6 @@ // RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir // RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll // RUN: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll -// XFAIL: * struct e { e(int); }; e *g = new e(0); From b7377fdfcb437258d341318c252798b902e903d5 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Wed, 18 Sep 2024 22:46:24 -0400 Subject: [PATCH 1862/2301] [CIR][CIRGen][Builtin][Neon] Lower __builtin_neon_vrndns_f32 (#858) As title. Also introduced buildAArch64NeonCall skeleton, which is partially the counterpart of OG's EmitNeonCall. And this could be use for many other neon intrinsics. --------- Co-authored-by: Guojin He --- clang/include/clang/CIR/MissingFeatures.h | 4 + .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 79 ++++++++++++++----- .../CIR/CodeGen/arm-neon-directed-rounding.c | 40 ++++++++++ 3 files changed, 103 insertions(+), 20 deletions(-) create mode 100644 clang/test/CIR/CodeGen/arm-neon-directed-rounding.c diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 64d75550ec21..2577af98e5e5 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -133,6 +133,9 @@ struct MissingFeatures { static bool volatileTypes() { return false; } static bool syncScopeID() { return false; } + // AArch64 Neon builtin related. + static bool buildNeonShiftVector() { return false; } + // Misc static bool cacheRecordLayouts() { return false; } static bool capturedByInit() { return false; } @@ -184,6 +187,7 @@ struct MissingFeatures { static bool deferredReplacements() { return false; } static bool shouldInstrumentFunction() { return false; } static bool xray() { return false; } + static bool buildConstrainedFPCall() { return false; } // Inline assembly static bool asmGoto() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 8272d4a7364c..493da9220dcb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -79,19 +79,15 @@ struct ARMVectorIntrinsicInfo { } // end anonymous namespace #define NEONMAP0(NameBase) \ - { #NameBase, NEON::BI__builtin_neon_##NameBase, 0, 0, 0 } + {#NameBase, NEON::BI__builtin_neon_##NameBase, 0, 0, 0} #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ - { \ - #NameBase, NEON::BI__builtin_neon_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ - TypeModifier \ - } + {#NameBase, NEON::BI__builtin_neon_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ + TypeModifier} #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \ - { \ - #NameBase, NEON::BI__builtin_neon_##NameBase, Intrinsic::LLVMIntrinsic, \ - Intrinsic::AltLLVMIntrinsic, TypeModifier \ - } + {#NameBase, NEON::BI__builtin_neon_##NameBase, Intrinsic::LLVMIntrinsic, \ + Intrinsic::AltLLVMIntrinsic, TypeModifier} static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = { NEONMAP0(splat_lane_v), @@ -1097,13 +1093,11 @@ static const std::pair NEONEquivalentIntrinsicMap[] = { #undef NEONMAP2 #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ - { \ - #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ - TypeModifier \ - } + {#NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ + TypeModifier} #define SVEMAP2(NameBase, TypeModifier) \ - { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier } + {#NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier} static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = { #define GET_SVE_LLVM_INTRINSIC_MAP #include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def" @@ -1115,13 +1109,11 @@ static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = { #undef SVEMAP2 #define SMEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ - { \ - #NameBase, SME::BI__builtin_sme_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ - TypeModifier \ - } + {#NameBase, SME::BI__builtin_sme_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ + TypeModifier} #define SMEMAP2(NameBase, TypeModifier) \ - { #NameBase, SME::BI__builtin_sme_##NameBase, 0, 0, TypeModifier } + {#NameBase, SME::BI__builtin_sme_##NameBase, 0, 0, TypeModifier} static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] = { #define GET_SME_LLVM_INTRINSIC_MAP #include "clang/Basic/arm_sme_builtin_cg.inc" @@ -1607,6 +1599,48 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, } } +mlir::Value buildNeonCall(unsigned int builtinID, CIRGenFunction &cgf, + llvm::SmallVector argTypes, + llvm::SmallVector args, + llvm::StringRef intrinsicName, mlir::Type funcResTy, + mlir::Location loc, + bool isConstrainedFPIntrinsic = false, + unsigned shift = 0, bool rightshift = false) { + // TODO: Consider removing the following unreachable when we have + // buildConstrainedFPCall feature implemented + assert(!MissingFeatures::buildConstrainedFPCall()); + if (isConstrainedFPIntrinsic) + llvm_unreachable("isConstrainedFPIntrinsic NYI"); + // TODO: Remove the following unreachable and call it in the loop once + // there is an implementation of buildNeonShiftVector + if (shift > 0) + llvm_unreachable("Argument shift NYI"); + + if (builtinID != clang::NEON::BI__builtin_neon_vrndns_f32) + llvm_unreachable("NYT"); + + CIRGenBuilderTy &builder = cgf.getBuilder(); + for (unsigned j = 0; j < argTypes.size(); ++j) { + if (isConstrainedFPIntrinsic) { + assert(!MissingFeatures::buildConstrainedFPCall()); + } + if (shift > 0 && shift == j) { + assert(!MissingFeatures::buildNeonShiftVector()); + } else { + args[j] = builder.createBitcast(args[j], argTypes[j]); + } + } + if (isConstrainedFPIntrinsic) { + assert(!MissingFeatures::buildConstrainedFPCall()); + return nullptr; + } else { + return builder + .create( + loc, builder.getStringAttr(intrinsicName), funcResTy, args) + .getResult(); + } +} + mlir::Value CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, @@ -2295,6 +2329,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, return V; mlir::Type VTy = Ty; + llvm::SmallVector args; switch (BuiltinID) { default: return nullptr; @@ -2401,7 +2436,11 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } case NEON::BI__builtin_neon_vrndns_f32: { - llvm_unreachable("NYI"); + mlir::Value arg0 = buildScalarExpr(E->getArg(0)); + args.push_back(arg0); + return buildNeonCall(NEON::BI__builtin_neon_vrndns_f32, *this, + {arg0.getType()}, args, "llvm.roundeven.f32", + getCIRGenModule().FloatTy, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndph_f16: { llvm_unreachable("NYI"); diff --git a/clang/test/CIR/CodeGen/arm-neon-directed-rounding.c b/clang/test/CIR/CodeGen/arm-neon-directed-rounding.c new file mode 100644 index 000000000000..5487919f8050 --- /dev/null +++ b/clang/test/CIR/CodeGen/arm-neon-directed-rounding.c @@ -0,0 +1,40 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -ffreestanding -emit-cir -target-feature +neon %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -ffreestanding -emit-llvm -target-feature +neon %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// REQUIRES: aarch64-registered-target || arm-registered-target +#include + +float32_t test_vrndns_f32(float32_t a) { + return vrndns_f32(a); +} +// CIR: cir.func internal private @vrndns_f32(%arg0: !cir.float {{.*}}) -> !cir.float +// CIR: cir.store %arg0, [[ARG_SAVE:%.*]] : !cir.float, !cir.ptr +// CIR: [[INTRIN_ARG:%.*]] = cir.load [[ARG_SAVE]] : !cir.ptr, !cir.float +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.roundeven.f32" [[INTRIN_ARG]] : (!cir.float) +// CIR: cir.return {{%.*}} : !cir.float + +// CIR-LABEL: test_vrndns_f32 +// CIR: cir.store %arg0, [[ARG_SAVE0:%.*]] : !cir.float, !cir.ptr +// CIR: [[FUNC_ARG:%.*]] = cir.load [[ARG_SAVE]] : !cir.ptr, !cir.float +// CIR: [[FUNC_RES:%.*]] = cir.call @vrndns_f32([[FUNC_ARG]]) : (!cir.float) -> !cir.float +// CIR: cir.store [[FUNC_RES]], [[RET_P:%.*]] : !cir.float, !cir.ptr +// CIR: [[RET_VAL:%.*]] = cir.load [[RET_P]] : !cir.ptr, !cir.float +// CIR: cir.return [[RET_VAL]] : !cir.float loc + +// LLVM: define dso_local float @test_vrndns_f32(float [[ARG:%.*]]) +// LLVM: store float [[ARG]], ptr [[ARG_SAVE:%.*]], align 4 +// LLVM: [[P0:%.*]] = load float, ptr [[ARG_SAVE]], align 4, +// LLVM: store float [[P0]], ptr [[P0_SAVE:%.*]], align 4, +// LLVM: [[INTRIN_ARG:%.*]] = load float, ptr [[P0_SAVE]], align 4, +// LLVM: [[INTRIN_RES:%.*]] = call float @llvm.roundeven.f32(float [[INTRIN_ARG]]) +// LLVM: store float [[INTRIN_RES]], ptr [[RES_SAVE0:%.*]], align 4, +// LLVM: [[RES_COPY0:%.*]] = load float, ptr [[RES_SAVE0]], align 4, +// LLVM: store float [[RES_COPY0]], ptr [[RES_SAVE1:%.*]], align 4, +// LLVM: [[RES_COPY1:%.*]] = load float, ptr [[RES_SAVE1]], align 4, +// LLVM: store float [[RES_COPY1]], ptr [[RET_P:%.*]], align 4, +// LLVM: [[RET_VAL:%.*]] = load float, ptr [[RET_P]], align 4, +// LLVM: ret float [[RET_VAL]] From a5bd810b8caf4469282c9867d150c576428e61d4 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 18 Sep 2024 22:48:10 -0400 Subject: [PATCH 1863/2301] [CIR][CodeGen][NFC] Add TBAAAccessInfo stubbed out and many usages of it (#859) --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 263 ++++++++++++----------- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 24 ++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 80 ++++--- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 16 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 16 +- clang/lib/CIR/CodeGen/CIRGenTBAA.h | 28 +++ clang/lib/CIR/CodeGen/CIRGenValue.h | 51 +++-- 9 files changed, 284 insertions(+), 200 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index e4e5c718c16b..f852af7ca979 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -17,14 +17,12 @@ #include "CIRGenOpenMPRuntime.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" -#include "clang/AST/StmtVisitor.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/MissingFeatures.h" -#include "clang/CodeGen/CGFunctionInfo.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "llvm/Support/ErrorHandling.h" #include @@ -179,7 +177,7 @@ class AtomicInfo { llvm_unreachable("NYI"); return LValue::makeAddr(addr, getValueType(), CGF.getContext(), - LVal.getBaseInfo()); + LVal.getBaseInfo(), LVal.getTBAAInfo()); } /// Emits atomic load. diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 59257549fe96..6e1a2ba4d4e1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -15,6 +15,7 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" #include "CIRGenOpenMPRuntime.h" +#include "CIRGenTBAA.h" #include "CIRGenValue.h" #include "EHScopeStack.h" #include "TargetInfo.h" @@ -111,19 +112,20 @@ static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { return false; } -static Address buildPointerWithAlignment(const Expr *E, - LValueBaseInfo *BaseInfo, - KnownNonNull_t IsKnownNonNull, - CIRGenFunction &CGF) { +static Address buildPointerWithAlignment(const Expr *expr, + LValueBaseInfo *baseInfo, + TBAAAccessInfo *tbaaInfo, + KnownNonNull_t isKnownNonNull, + CIRGenFunction &cgf) { // We allow this with ObjC object pointers because of fragile ABIs. - assert(E->getType()->isPointerType() || - E->getType()->isObjCObjectPointerType()); - E = E->IgnoreParens(); + assert(expr->getType()->isPointerType() || + expr->getType()->isObjCObjectPointerType()); + expr = expr->IgnoreParens(); // Casts: - if (const CastExpr *CE = dyn_cast(E)) { + if (const CastExpr *CE = dyn_cast(expr)) { if (const auto *ECE = dyn_cast(CE)) - CGF.CGM.buildExplicitCastExprType(ECE, &CGF); + cgf.CGM.buildExplicitCastExprType(ECE, &cgf); switch (CE->getCastKind()) { default: { @@ -140,44 +142,44 @@ static Address buildPointerWithAlignment(const Expr *E, break; assert(!MissingFeatures::tbaa()); - LValueBaseInfo InnerBaseInfo; - Address Addr = CGF.buildPointerWithAlignment( - CE->getSubExpr(), &InnerBaseInfo, IsKnownNonNull); - if (BaseInfo) - *BaseInfo = InnerBaseInfo; + LValueBaseInfo innerBaseInfo; + Address addr = cgf.buildPointerWithAlignment( + CE->getSubExpr(), &innerBaseInfo, tbaaInfo, isKnownNonNull); + if (baseInfo) + *baseInfo = innerBaseInfo; if (isa(CE)) { assert(!MissingFeatures::tbaa()); LValueBaseInfo TargetTypeBaseInfo; - CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( - E->getType(), &TargetTypeBaseInfo); + CharUnits Align = cgf.CGM.getNaturalPointeeTypeAlignment( + expr->getType(), &TargetTypeBaseInfo); // If the source l-value is opaque, honor the alignment of the // casted-to type. - if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { - if (BaseInfo) - BaseInfo->mergeForCast(TargetTypeBaseInfo); - Addr = Address(Addr.getPointer(), Addr.getElementType(), Align, - IsKnownNonNull); + if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { + if (baseInfo) + baseInfo->mergeForCast(TargetTypeBaseInfo); + addr = Address(addr.getPointer(), addr.getElementType(), Align, + isKnownNonNull); } } - if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) && + if (cgf.SanOpts.has(SanitizerKind::CFIUnrelatedCast) && CE->getCastKind() == CK_BitCast) { - if (auto PT = E->getType()->getAs()) + if (auto PT = expr->getType()->getAs()) llvm_unreachable("NYI"); } auto ElemTy = - CGF.getTypes().convertTypeForMem(E->getType()->getPointeeType()); - Addr = CGF.getBuilder().createElementBitCast( - CGF.getLoc(E->getSourceRange()), Addr, ElemTy); + cgf.getTypes().convertTypeForMem(expr->getType()->getPointeeType()); + addr = cgf.getBuilder().createElementBitCast( + cgf.getLoc(expr->getSourceRange()), addr, ElemTy); if (CE->getCastKind() == CK_AddressSpaceConversion) { assert(!MissingFeatures::addressSpace()); llvm_unreachable("NYI"); } - return Addr; + return addr; } break; @@ -189,7 +191,7 @@ static Address buildPointerWithAlignment(const Expr *E, // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo. case CK_ArrayToPointerDecay: - return CGF.buildArrayToPointerDecay(CE->getSubExpr()); + return cgf.buildArrayToPointerDecay(CE->getSubExpr()); case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { @@ -197,29 +199,29 @@ static Address buildPointerWithAlignment(const Expr *E, // conservatively pretend that the complete object is of the base class // type. assert(!MissingFeatures::tbaa()); - Address Addr = CGF.buildPointerWithAlignment(CE->getSubExpr(), BaseInfo); + Address Addr = cgf.buildPointerWithAlignment(CE->getSubExpr(), baseInfo); auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); - return CGF.getAddressOfBaseClass( + return cgf.getAddressOfBaseClass( Addr, Derived, CE->path_begin(), CE->path_end(), - CGF.shouldNullCheckClassCastValue(CE), CE->getExprLoc()); + cgf.shouldNullCheckClassCastValue(CE), CE->getExprLoc()); } } } // Unary &. - if (const UnaryOperator *UO = dyn_cast(E)) { + if (const UnaryOperator *UO = dyn_cast(expr)) { // TODO(cir): maybe we should use cir.unary for pointers here instead. if (UO->getOpcode() == UO_AddrOf) { - LValue LV = CGF.buildLValue(UO->getSubExpr()); - if (BaseInfo) - *BaseInfo = LV.getBaseInfo(); + LValue LV = cgf.buildLValue(UO->getSubExpr()); + if (baseInfo) + *baseInfo = LV.getBaseInfo(); assert(!MissingFeatures::tbaa()); return LV.getAddress(); } } // std::addressof and variants. - if (auto *Call = dyn_cast(E)) { + if (auto *Call = dyn_cast(expr)) { switch (Call->getBuiltinCallee()) { default: break; @@ -234,9 +236,9 @@ static Address buildPointerWithAlignment(const Expr *E, // TODO: conditional operators, comma. // Otherwise, use the alignment of the type. - return CGF.makeNaturalAddressForPointer( - CGF.buildScalarExpr(E), E->getType()->getPointeeType(), CharUnits(), - /*ForPointeeType=*/true, BaseInfo, IsKnownNonNull); + return cgf.makeNaturalAddressForPointer( + cgf.buildScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(), + /*ForPointeeType=*/true, baseInfo, tbaaInfo, isKnownNonNull); } /// Helper method to check if the underlying ABI is AAPCS @@ -293,8 +295,9 @@ LValue CIRGenFunction::buildLValueForBitField(LValue base, QualType fieldType = field->getType().withCVRQualifiers(base.getVRQualifiers()); assert(!MissingFeatures::tbaa() && "NYI TBAA for bit fields"); - LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); - return LValue::MakeBitfield(Addr, info, fieldType, FieldBaseInfo); + LValueBaseInfo fieldBaseInfo(BaseInfo.getAlignmentSource()); + return LValue::MakeBitfield(Addr, info, fieldType, fieldBaseInfo, + TBAAAccessInfo()); } LValue CIRGenFunction::buildLValueForField(LValue base, @@ -584,49 +587,50 @@ mlir::Value CIRGenFunction::buildToMemory(mlir::Value Value, QualType Ty) { void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue) { // TODO: constant matrix type, no init, non temporal, TBAA buildStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), lvalue.getBaseInfo(), false, false); + lvalue.getType(), lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), false, false); } -void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, - bool Volatile, QualType Ty, - LValueBaseInfo BaseInfo, bool isInit, +void CIRGenFunction::buildStoreOfScalar(mlir::Value value, Address addr, + bool isVolatile, QualType ty, + LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, bool isInit, bool isNontemporal) { - // TODO(CIR): this has fallen out of date with codegen - - Value = buildToMemory(Value, Ty); + value = buildToMemory(value, ty); - LValue AtomicLValue = LValue::makeAddr(Addr, Ty, getContext(), BaseInfo); - if (Ty->isAtomicType() || - (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) { - buildAtomicStore(RValue::get(Value), AtomicLValue, isInit); + LValue atomicLValue = + LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); + if (ty->isAtomicType() || + (!isInit && LValueIsSuitableForInlineAtomic(atomicLValue))) { + buildAtomicStore(RValue::get(value), atomicLValue, isInit); return; } - mlir::Type SrcTy = Value.getType(); - if (const auto *ClangVecTy = Ty->getAs()) { + mlir::Type SrcTy = value.getType(); + if (const auto *ClangVecTy = ty->getAs()) { + auto VecTy = dyn_cast(SrcTy); // TODO(CIR): this has fallen out of date with codegen llvm_unreachable("NYI: Special treatment of 3-element vector store"); - // auto VecTy = dyn_cast(SrcTy); // if (!CGM.getCodeGenOpts().PreserveVec3Type && // ClangVecTy->getNumElements() == 3) { // // Handle vec3 special. // if (VecTy && VecTy.getSize() == 3) { // // Our source is a vec3, do a shuffle vector to make it a vec4. - // Value = builder.createVecShuffle(Value.getLoc(), Value, + // value = builder.createVecShuffle(value.getLoc(), value, // ArrayRef{0, 1, 2, -1}); // SrcTy = mlir::cir::VectorType::get(VecTy.getContext(), // VecTy.getEltType(), 4); // } - // if (Addr.getElementType() != SrcTy) { - // Addr = Addr.withElementType(SrcTy); + // if (addr.getElementType() != SrcTy) { + // addr = addr.withElementType(SrcTy); // } // } } // Update the alloca with more info on initialization. - assert(Addr.getPointer() && "expected pointer to exist"); + assert(addr.getPointer() && "expected pointer to exist"); auto SrcAlloca = - dyn_cast_or_null(Addr.getPointer().getDefiningOp()); + dyn_cast_or_null(addr.getPointer().getDefiningOp()); if (currVarDecl && SrcAlloca) { const VarDecl *VD = currVarDecl; assert(VD && "VarDecl expected"); @@ -635,7 +639,7 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, } assert(currSrcLoc && "must pass in source location"); - builder.createStore(*currSrcLoc, Value, Addr, Volatile); + builder.createStore(*currSrcLoc, value, addr, isVolatile); if (isNontemporal) { llvm_unreachable("NYI"); @@ -652,8 +656,8 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, } buildStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), lvalue.getBaseInfo(), isInit, - lvalue.isNontemporal()); + lvalue.getType(), lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); } /// Given an expression that represents a value lvalue, this @@ -1130,7 +1134,7 @@ CIRGenFunction::buildPointerToDataMemberBinaryExpr(const BinaryOperator *E) { LValue CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { // Emit the base vector as an l-value. - LValue Base; + LValue base; // ExtVectorElementExpr's base can either be a vector or pointer to vector. if (E->isArrow()) { @@ -1141,13 +1145,13 @@ CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { assert(!MissingFeatures::tbaa()); Address Ptr = buildPointerWithAlignment(E->getBase(), &BaseInfo); const auto *PT = E->getBase()->getType()->castAs(); - Base = makeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo); - Base.getQuals().removeObjCGCAttr(); + base = makeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo); + base.getQuals().removeObjCGCAttr(); } else if (E->getBase()->isGLValue()) { // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), // emit the base as an lvalue. assert(E->getBase()->getType()->isVectorType()); - Base = buildLValue(E->getBase()); + base = buildLValue(E->getBase()); } else { // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. assert(E->getBase()->getType()->isVectorType() && @@ -1158,28 +1162,28 @@ CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { QualType BaseTy = E->getBase()->getType(); Address VecMem = CreateMemTemp(BaseTy, Vec.getLoc(), "tmp"); builder.createStore(Vec.getLoc(), Vec, VecMem); - Base = makeAddrLValue(VecMem, BaseTy, AlignmentSource::Decl); + base = makeAddrLValue(VecMem, BaseTy, AlignmentSource::Decl); } QualType type = - E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); + E->getType().withCVRQualifiers(base.getQuals().getCVRQualifiers()); // Encode the element access list into a vector of unsigned indices. SmallVector indices; E->getEncodedElementAccess(indices); - if (Base.isSimple()) { + if (base.isSimple()) { SmallVector attrElts; for (uint32_t i : indices) { attrElts.push_back(static_cast(i)); } auto elts = builder.getI64ArrayAttr(attrElts); - return LValue::MakeExtVectorElt(Base.getAddress(), elts, type, - Base.getBaseInfo()); + return LValue::MakeExtVectorElt(base.getAddress(), elts, type, + base.getBaseInfo(), base.getTBAAInfo()); } - assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); + assert(base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); - mlir::ArrayAttr baseElts = Base.getExtVectorElts(); + mlir::ArrayAttr baseElts = base.getExtVectorElts(); // Composite the two indices SmallVector attrElts; @@ -1188,8 +1192,8 @@ CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { } auto elts = builder.getI64ArrayAttr(attrElts); - return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), elts, type, - Base.getBaseInfo()); + return LValue::MakeExtVectorElt(base.getExtVectorAddress(), elts, type, + base.getBaseInfo(), base.getTBAAInfo()); } LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { @@ -1240,12 +1244,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { /// Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. Address CIRGenFunction::buildPointerWithAlignment( - const Expr *E, LValueBaseInfo *BaseInfo, KnownNonNull_t IsKnownNonNull) { - Address Addr = - ::buildPointerWithAlignment(E, BaseInfo, IsKnownNonNull, *this); - if (IsKnownNonNull && !Addr.isKnownNonNull()) - Addr.setKnownNonNull(); - return Addr; + const Expr *expr, LValueBaseInfo *baseInfo, TBAAAccessInfo *tbaaInfo, + KnownNonNull_t isKnownNonNull) { + Address addr = ::buildPointerWithAlignment(expr, baseInfo, tbaaInfo, + isKnownNonNull, *this); + if (isKnownNonNull && !addr.isKnownNonNull()) + addr.setKnownNonNull(); + return addr; } /// Perform the usual unary conversions on the specified @@ -1752,10 +1757,11 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // with this subscript. if (E->getBase()->getType()->isVectorType() && !isa(E->getBase())) { - LValue LHS = buildLValue(E->getBase()); - auto Index = EmitIdxAfterBase(/*Promote=*/false); - return LValue::MakeVectorElt(LHS.getAddress(), Index, - E->getBase()->getType(), LHS.getBaseInfo()); + LValue lhs = buildLValue(E->getBase()); + auto index = EmitIdxAfterBase(/*Promote=*/false); + return LValue::MakeVectorElt(lhs.getAddress(), index, + E->getBase()->getType(), lhs.getBaseInfo(), + lhs.getTBAAInfo()); } // All the other cases basically behave like simple offsetting. @@ -2568,11 +2574,11 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { if (LV.isSimple()) { // Defend against branches out of gnu statement expressions // surrounded by cleanups. - Address Addr = LV.getAddress(); - auto V = Addr.getPointer(); - LV = LValue::makeAddr(Addr.withPointer(V, NotKnownNonNull), - LV.getType(), getContext(), - LV.getBaseInfo() /*TODO(cir):TBAA*/); + Address addr = LV.getAddress(); + auto v = addr.getPointer(); + LV = LValue::makeAddr(addr.withPointer(v, NotKnownNonNull), + LV.getType(), getContext(), LV.getBaseInfo(), + LV.getTBAAInfo()); } }); @@ -2821,17 +2827,17 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, } mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, - SourceLocation Loc) { + SourceLocation loc) { return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), getLoc(Loc), lvalue.getBaseInfo(), - lvalue.isNontemporal()); + lvalue.getType(), getLoc(loc), lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), lvalue.isNontemporal()); } mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, - mlir::Location Loc) { + mlir::Location loc) { return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), Loc, lvalue.getBaseInfo(), - lvalue.isNontemporal()); + lvalue.getType(), loc, lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), lvalue.isNontemporal()); } mlir::Value CIRGenFunction::buildFromMemory(mlir::Value Value, QualType Ty) { @@ -2842,29 +2848,32 @@ mlir::Value CIRGenFunction::buildFromMemory(mlir::Value Value, QualType Ty) { return Value; } -mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, - QualType Ty, SourceLocation Loc, - LValueBaseInfo BaseInfo, +mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, + QualType ty, SourceLocation loc, + LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, bool isNontemporal) { - return buildLoadOfScalar(Addr, Volatile, Ty, getLoc(Loc), BaseInfo, - isNontemporal); + return buildLoadOfScalar(addr, isVolatile, ty, getLoc(loc), baseInfo, + tbaaInfo, isNontemporal); } -mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, - QualType Ty, mlir::Location Loc, - LValueBaseInfo BaseInfo, +mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, + QualType ty, mlir::Location loc, + LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, bool isNontemporal) { // TODO(CIR): this has fallen out of sync with codegen // Atomic operations have to be done on integral types - LValue AtomicLValue = LValue::makeAddr(Addr, Ty, getContext(), BaseInfo); - if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) { + LValue atomicLValue = + LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); + if (ty->isAtomicType() || LValueIsSuitableForInlineAtomic(atomicLValue)) { llvm_unreachable("NYI"); } - auto ElemTy = Addr.getElementType(); + auto ElemTy = addr.getElementType(); - if (const auto *ClangVecTy = Ty->getAs()) { + if (const auto *ClangVecTy = ty->getAs()) { // Handle vectors of size 3 like size 4 for better performance. const auto VTy = cast(ElemTy); @@ -2872,28 +2881,28 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, llvm_unreachable("NYI: Special treatment of 3-element vector store"); // if (!CGM.getCodeGenOpts().PreserveVec3Type && // ClangVecTy->getNumElements() == 3) { - // auto loc = Addr.getPointer().getLoc(); + // auto loc = addr.getPointer().getLoc(); // auto vec4Ty = // mlir::cir::VectorType::get(VTy.getContext(), VTy.getEltType(), 4); - // Address Cast = Addr.withElementType(vec4Ty); + // Address Cast = addr.withElementType(vec4Ty); // // Now load value. // mlir::Value V = builder.createLoad(loc, Cast); // // Shuffle vector to get vec3. // V = builder.createVecShuffle(loc, V, ArrayRef{0, 1, 2}); - // return buildFromMemory(V, Ty); + // return buildFromMemory(V, ty); // } } - auto Ptr = Addr.getPointer(); + auto Ptr = addr.getPointer(); if (mlir::isa(ElemTy)) { ElemTy = mlir::cir::IntType::get(builder.getContext(), 8, true); auto ElemPtrTy = mlir::cir::PointerType::get(builder.getContext(), ElemTy); - Ptr = builder.create(Loc, ElemPtrTy, + Ptr = builder.create(loc, ElemPtrTy, mlir::cir::CastKind::bitcast, Ptr); } - mlir::Value Load = builder.CIRBaseBuilderTy::createLoad(Loc, Ptr, Volatile); + mlir::Value Load = builder.CIRBaseBuilderTy::createLoad(loc, Ptr, isVolatile); if (isNontemporal) { llvm_unreachable("NYI"); @@ -2902,7 +2911,7 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, assert(!MissingFeatures::tbaa() && "NYI"); assert(!MissingFeatures::emitScalarRangeCheck() && "NYI"); - return buildFromMemory(Load, Ty); + return buildFromMemory(Load, ty); } // Note: this function also emit constructor calls to support a MSVC extensions @@ -2944,20 +2953,22 @@ RValue CIRGenFunction::buildReferenceBindingToExpr(const Expr *E) { return RValue::get(Value); } -Address CIRGenFunction::buildLoadOfReference(LValue RefLVal, mlir::Location Loc, - LValueBaseInfo *PointeeBaseInfo) { - assert(!RefLVal.isVolatile() && "NYI"); - mlir::cir::LoadOp Load = builder.create( - Loc, RefLVal.getAddress().getElementType(), - RefLVal.getAddress().getPointer()); +Address CIRGenFunction::buildLoadOfReference(LValue refLVal, mlir::Location loc, + LValueBaseInfo *pointeeBaseInfo, + TBAAAccessInfo *pointeeTBAAInfo) { + assert(!refLVal.isVolatile() && "NYI"); + mlir::cir::LoadOp load = builder.create( + loc, refLVal.getAddress().getElementType(), + refLVal.getAddress().getPointer()); // TODO(cir): DecorateInstructionWithTBAA relevant for us? assert(!MissingFeatures::tbaa()); - QualType PointeeType = RefLVal.getType()->getPointeeType(); - CharUnits Align = CGM.getNaturalTypeAlignment(PointeeType, PointeeBaseInfo, - /* forPointeeType= */ true); - return Address(Load, getTypes().convertTypeForMem(PointeeType), Align); + QualType pointeeType = refLVal.getType()->getPointeeType(); + CharUnits align = + CGM.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo, pointeeTBAAInfo, + /* forPointeeType= */ true); + return Address(load, getTypes().convertTypeForMem(pointeeType), align); } LValue CIRGenFunction::buildLoadOfReferenceLValue(LValue RefLVal, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 89f60f52f34d..63a18c328fa8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -119,7 +119,7 @@ CIRGenFunction::buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, // Emit the 'this' pointer. Address This = Address::invalid(); if (BO->getOpcode() == BO_PtrMemI) - This = buildPointerWithAlignment(BaseExpr, nullptr, KnownNonNull); + This = buildPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull); else This = buildLValue(BaseExpr).getAddress(); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index e011e1f83558..e686c7b0e63b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -25,6 +25,7 @@ #include "clang/CIR/Dialect/IR/FPEnv.h" #include "clang/Frontend/FrontendDiagnostic.h" +#include "CIRGenTBAA.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Support/LogicalResult.h" @@ -890,22 +891,25 @@ void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { /// Given a value of type T* that may not be to a complete object, construct /// an l-vlaue withi the natural pointee alignment of T. -LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Value V, - QualType T) { +LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Value val, + QualType ty) { // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps // assert on the result type first. - LValueBaseInfo BaseInfo; - CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, + LValueBaseInfo baseInfo; + TBAAAccessInfo tbaaInfo; + CharUnits align = CGM.getNaturalTypeAlignment(ty, &baseInfo, &tbaaInfo, /* for PointeeType= */ true); - return makeAddrLValue(Address(V, Align), T, BaseInfo); + return makeAddrLValue(Address(val, align), ty, baseInfo); } -LValue CIRGenFunction::MakeNaturalAlignAddrLValue(mlir::Value V, QualType T) { - LValueBaseInfo BaseInfo; +LValue CIRGenFunction::MakeNaturalAlignAddrLValue(mlir::Value val, + QualType ty) { + LValueBaseInfo baseInfo; + TBAAAccessInfo tbaaInfo; assert(!MissingFeatures::tbaa()); - CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo); - Address Addr(V, getTypes().convertTypeForMem(T), Alignment); - return LValue::makeAddr(Addr, T, getContext(), BaseInfo); + CharUnits alignment = CGM.getNaturalTypeAlignment(ty, &baseInfo, &tbaaInfo); + Address addr(val, getTypes().convertTypeForMem(ty), alignment); + return LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); } // Map the LangOption for exception behavior into the corresponding enum in diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 427eee9bcf5f..865c9b522e17 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -16,6 +16,7 @@ #include "CIRGenBuilder.h" #include "CIRGenCall.h" #include "CIRGenModule.h" +#include "CIRGenTBAA.h" #include "CIRGenTypeCache.h" #include "CIRGenValue.h" #include "EHScopeStack.h" @@ -816,12 +817,15 @@ class CIRGenFunction : public CIRGenTypeCache { /// the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. RValue buildLoadOfLValue(LValue LV, SourceLocation Loc); - mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, - clang::SourceLocation Loc, - LValueBaseInfo BaseInfo, + mlir::Value buildLoadOfScalar(Address addr, bool isVolatile, + clang::QualType ty, clang::SourceLocation loc, + LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, bool isNontemporal = false); - mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, - mlir::Location Loc, LValueBaseInfo BaseInfo, + mlir::Value buildLoadOfScalar(Address addr, bool isVolatile, + clang::QualType ty, mlir::Location loc, + LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, bool isNontemporal = false); int64_t getAccessedFieldNo(unsigned idx, const mlir::ArrayAttr elts); @@ -834,12 +838,12 @@ class CIRGenFunction : public CIRGenTypeCache { /// Load a scalar value from an address, taking care to appropriately convert /// from the memory representation to CIR value representation. - mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, - clang::SourceLocation Loc, - AlignmentSource Source = AlignmentSource::Type, + mlir::Value buildLoadOfScalar(Address addr, bool isVolatile, + clang::QualType ty, clang::SourceLocation loc, + AlignmentSource source = AlignmentSource::Type, bool isNontemporal = false) { - return buildLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source), - isNontemporal); + return buildLoadOfScalar(addr, isVolatile, ty, loc, LValueBaseInfo(source), + CGM.getTBAAAccessInfo(ty), isNontemporal); } /// Load a scalar value from an address, taking care to appropriately convert @@ -851,8 +855,9 @@ class CIRGenFunction : public CIRGenTypeCache { /// Load a complex number from the specified l-value. mlir::Value buildLoadOfComplex(LValue src, SourceLocation loc); - Address buildLoadOfReference(LValue RefLVal, mlir::Location Loc, - LValueBaseInfo *PointeeBaseInfo = nullptr); + Address buildLoadOfReference(LValue refLVal, mlir::Location loc, + LValueBaseInfo *pointeeBaseInfo = nullptr, + TBAAAccessInfo *pointeeTBAAInfo = nullptr); LValue buildLoadOfReferenceLValue(LValue RefLVal, mlir::Location Loc); LValue buildLoadOfReferenceLValue(Address RefAddr, mlir::Location Loc, @@ -1275,9 +1280,17 @@ class CIRGenFunction : public CIRGenTypeCache { clang::QualType::DestructionKind dtorKind); void buildStoreOfScalar(mlir::Value value, LValue lvalue); - void buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, - clang::QualType Ty, LValueBaseInfo BaseInfo, - bool isInit = false, bool isNontemporal = false); + void buildStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, + clang::QualType ty, LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, bool isInit = false, + bool isNontemporal = false); + void buildStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, + QualType ty, + AlignmentSource source = AlignmentSource::Type, + bool isInit = false, bool isNontemporal = false) { + buildStoreOfScalar(value, addr, isVolatile, ty, LValueBaseInfo(source), + CGM.getTBAAAccessInfo(ty), isInit, isNontemporal); + } void buildStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit); mlir::Value buildToMemory(mlir::Value Value, clang::QualType Ty); @@ -1352,9 +1365,10 @@ class CIRGenFunction : public CIRGenTypeCache { /// reasonable to just ignore the returned alignment when it isn't from an /// explicit source. Address - buildPointerWithAlignment(const clang::Expr *E, - LValueBaseInfo *BaseInfo = nullptr, - KnownNonNull_t IsKnownNonNull = NotKnownNonNull); + buildPointerWithAlignment(const clang::Expr *expr, + LValueBaseInfo *baseInfo = nullptr, + TBAAAccessInfo *tbaaInfo = nullptr, + KnownNonNull_t isKnownNonNull = NotKnownNonNull); LValue buildConditionalOperatorLValue(const AbstractConditionalOperator *expr); @@ -1534,19 +1548,21 @@ class CIRGenFunction : public CIRGenTypeCache { }; LValue MakeNaturalAlignPointeeAddrLValue(mlir::Value V, clang::QualType T); - LValue MakeNaturalAlignAddrLValue(mlir::Value V, QualType T); + LValue MakeNaturalAlignAddrLValue(mlir::Value val, QualType ty); /// Construct an address with the natural alignment of T. If a pointer to T /// is expected to be signed, the pointer passed to this function must have /// been signed, and the returned Address will have the pointer authentication /// information needed to authenticate the signed pointer. Address makeNaturalAddressForPointer( - mlir::Value Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(), - bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr, - KnownNonNull_t IsKnownNonNull = NotKnownNonNull) { - if (Alignment.isZero()) - Alignment = CGM.getNaturalTypeAlignment(T, BaseInfo, ForPointeeType); - return Address(Ptr, convertTypeForMem(T), Alignment, IsKnownNonNull); + mlir::Value ptr, QualType t, CharUnits alignment = CharUnits::Zero(), + bool forPointeeType = false, LValueBaseInfo *baseInfo = nullptr, + TBAAAccessInfo *tbaaInfo = nullptr, + KnownNonNull_t isKnownNonNull = NotKnownNonNull) { + if (alignment.isZero()) + alignment = + CGM.getNaturalTypeAlignment(t, baseInfo, tbaaInfo, forPointeeType); + return Address(ptr, convertTypeForMem(t), alignment, isKnownNonNull); } /// Load the value for 'this'. This function is only valid while generating @@ -1590,14 +1606,16 @@ class CIRGenFunction : public CIRGenTypeCache { QualType DstTy, SourceLocation Loc); - LValue makeAddrLValue(Address Addr, clang::QualType T, - LValueBaseInfo BaseInfo) { - return LValue::makeAddr(Addr, T, getContext(), BaseInfo); + LValue makeAddrLValue(Address addr, clang::QualType ty, + LValueBaseInfo baseInfo) { + return LValue::makeAddr(addr, ty, getContext(), baseInfo, + CGM.getTBAAAccessInfo(ty)); } - LValue makeAddrLValue(Address Addr, clang::QualType T, - AlignmentSource Source = AlignmentSource::Type) { - return LValue::makeAddr(Addr, T, getContext(), LValueBaseInfo(Source)); + LValue makeAddrLValue(Address addr, clang::QualType ty, + AlignmentSource source = AlignmentSource::Type) { + return LValue::makeAddr(addr, ty, getContext(), LValueBaseInfo(source), + CGM.getTBAAAccessInfo(ty)); } void initializeVTablePointers(mlir::Location loc, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 0f4f322f7856..0922a29934b9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -16,6 +16,7 @@ #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenOpenMPRuntime.h" +#include "CIRGenTBAA.h" #include "CIRGenTypes.h" #include "CIRGenValue.h" #include "TargetInfo.h" @@ -244,11 +245,9 @@ CharUnits CIRGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) { /// FIXME: this could likely be a common helper and not necessarily related /// with codegen. -/// TODO: Add TBAAAccessInfo -CharUnits -CIRGenModule::getNaturalPointeeTypeAlignment(QualType T, - LValueBaseInfo *BaseInfo) { - return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, +CharUnits CIRGenModule::getNaturalPointeeTypeAlignment( + QualType ty, LValueBaseInfo *baseInfo, TBAAAccessInfo *tbaaInfo) { + return getNaturalTypeAlignment(ty->getPointeeType(), baseInfo, tbaaInfo, /* forPointeeType= */ true); } @@ -257,6 +256,7 @@ CIRGenModule::getNaturalPointeeTypeAlignment(QualType T, /// TODO: Add TBAAAccessInfo CharUnits CIRGenModule::getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo, + TBAAAccessInfo *tbaaInfo, bool forPointeeType) { // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But // that doesn't return the information we need to compute BaseInfo. @@ -3378,3 +3378,9 @@ void CIRGenModule::buildGlobalAnnotations() { } deferredAnnotations.clear(); } + +TBAAAccessInfo CIRGenModule::getTBAAAccessInfo(QualType accessType) { + if (!tbaa) + return TBAAAccessInfo(); + llvm_unreachable("NYI"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 41126978422c..1f55a83a767c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -17,6 +17,7 @@ #include "CIRGenBuilder.h" #include "CIRGenCall.h" #include "CIRGenOpenCLRuntime.h" +#include "CIRGenTBAA.h" #include "CIRGenTypeCache.h" #include "CIRGenTypes.h" #include "CIRGenVTables.h" @@ -95,6 +96,8 @@ class CIRGenModule : public CIRGenTypeCache { std::unique_ptr ABI; + std::unique_ptr tbaa; + /// Used for `UniqueInternalLinkageNames` option std::string ModuleNameHash = ""; @@ -442,15 +445,16 @@ class CIRGenModule : public CIRGenTypeCache { /// FIXME: this could likely be a common helper and not necessarily related /// with codegen. - /// TODO: Add TBAAAccessInfo - clang::CharUnits getNaturalPointeeTypeAlignment(clang::QualType T, - LValueBaseInfo *BaseInfo); + clang::CharUnits + getNaturalPointeeTypeAlignment(clang::QualType ty, + LValueBaseInfo *baseInfo = nullptr, + TBAAAccessInfo *tbaaInfo = nullptr); /// FIXME: this could likely be a common helper and not necessarily related /// with codegen. - /// TODO: Add TBAAAccessInfo clang::CharUnits getNaturalTypeAlignment(clang::QualType T, LValueBaseInfo *BaseInfo = nullptr, + TBAAAccessInfo *tbaaInfo = nullptr, bool forPointeeType = false); /// TODO: Add TBAAAccessInfo @@ -482,6 +486,10 @@ class CIRGenModule : public CIRGenTypeCache { return VTables.getItaniumVTableContext(); } + /// getTBAAAccessInfo - Gte TBAA information that describes an access to an + /// object of the given type. + TBAAAccessInfo getTBAAAccessInfo(QualType accessType); + /// This contains all the decls which have definitions but which are deferred /// for emission and therefore should only be output if they are actually /// used. If a decl is in this, then it is known to have not been referenced diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.h b/clang/lib/CIR/CodeGen/CIRGenTBAA.h index e69de29bb2d1..ab5ac9b575c0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.h +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.h @@ -0,0 +1,28 @@ +//===--- CIRGenTBAA.h - TBAA information for LLVM CIRGen --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is the code that manages TBAA information and defines the TBAA policy +// for the optimizer to use. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENTBAA_H +#define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENTBAA_H + +namespace cir { + +// TBAAAccessInfo - Describes a memory access in terms of TBAA. +struct TBAAAccessInfo {}; + +/// CIRGenTBAA - This class organizes the cross-module state that is used while +/// lowering AST types to LLVM types. +class CIRGenTBAA {}; + +} // namespace cir + +#endif diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 37acf60457c9..50a925eabdbd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -16,6 +16,7 @@ #include "Address.h" #include "CIRGenRecordLayout.h" +#include "CIRGenTBAA.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" @@ -175,9 +176,12 @@ class LValue { // this lvalue. bool Nontemporal : 1; + TBAAAccessInfo tbaaInfo; + private: void Initialize(clang::QualType Type, clang::Qualifiers Quals, - clang::CharUnits Alignment, LValueBaseInfo BaseInfo) { + clang::CharUnits Alignment, LValueBaseInfo BaseInfo, + TBAAAccessInfo tbaaInfo) { assert((!Alignment.isZero() || Type->isIncompleteType()) && "initializing l-value with zero alignment!"); if (isGlobalReg()) @@ -194,6 +198,7 @@ class LValue { assert(this->Alignment == Alignment.getQuantity() && "Alignment exceeds allowed max!"); this->BaseInfo = BaseInfo; + this->tbaaInfo = tbaaInfo; // TODO: ObjC flags // Initialize Objective-C flags. @@ -270,7 +275,7 @@ class LValue { R.V = address.getPointer(); R.ElementType = address.getElementType(); R.Initialize(T, T.getQualifiers(), address.getAlignment(), - LValueBaseInfo(Source)); + LValueBaseInfo(Source), TBAAAccessInfo()); return R; } @@ -281,25 +286,28 @@ class LValue { R.LVType = Simple; R.V = address.getPointer(); R.ElementType = address.getElementType(); - R.Initialize(T, T.getQualifiers(), address.getAlignment(), LBI); + R.Initialize(T, T.getQualifiers(), address.getAlignment(), LBI, + TBAAAccessInfo()); return R; } static LValue makeAddr(Address address, clang::QualType type, - clang::ASTContext &Context, LValueBaseInfo BaseInfo) { + clang::ASTContext &context, LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo) { clang::Qualifiers qs = type.getQualifiers(); - qs.setObjCGCAttr(Context.getObjCGCAttrKind(type)); + qs.setObjCGCAttr(context.getObjCGCAttrKind(type)); LValue R; R.LVType = Simple; assert(mlir::cast(address.getPointer().getType())); R.V = address.getPointer(); R.ElementType = address.getElementType(); - R.Initialize(type, qs, address.getAlignment(), - BaseInfo); // TODO: TBAAInfo); + R.Initialize(type, qs, address.getAlignment(), baseInfo, tbaaInfo); return R; } + TBAAAccessInfo getTBAAInfo() const { return tbaaInfo; } + const clang::Qualifiers &getQuals() const { return Quals; } clang::Qualifiers &getQuals() { return Quals; } @@ -330,28 +338,29 @@ class LValue { return mlir::cast(VectorElts); } - static LValue MakeVectorElt(Address vecAddress, mlir::Value Index, - clang::QualType type, LValueBaseInfo BaseInfo) { + static LValue MakeVectorElt(Address vecAddress, mlir::Value index, + clang::QualType type, LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo) { LValue R; R.LVType = VectorElt; R.V = vecAddress.getPointer(); R.ElementType = vecAddress.getElementType(); - R.VectorIdx = Index; + R.VectorIdx = index; R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(), - BaseInfo); + baseInfo, tbaaInfo); return R; } static LValue MakeExtVectorElt(Address vecAddress, mlir::ArrayAttr elts, - clang::QualType type, - LValueBaseInfo baseInfo) { + clang::QualType type, LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo) { LValue R; R.LVType = ExtVectorElt; R.V = vecAddress.getPointer(); R.ElementType = vecAddress.getElementType(); R.VectorElts = elts; R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(), - baseInfo); + baseInfo, tbaaInfo); return R; } @@ -376,14 +385,16 @@ class LValue { /// bit-field refers to. /// \param Info - The information describing how to perform the bit-field /// access. - static LValue MakeBitfield(Address Addr, const CIRGenBitFieldInfo &Info, - clang::QualType type, LValueBaseInfo BaseInfo) { + static LValue MakeBitfield(Address addr, const CIRGenBitFieldInfo &info, + clang::QualType type, LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo) { LValue R; R.LVType = BitField; - R.V = Addr.getPointer(); - R.ElementType = Addr.getElementType(); - R.BitFieldInfo = &Info; - R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo); + R.V = addr.getPointer(); + R.ElementType = addr.getElementType(); + R.BitFieldInfo = &info; + R.Initialize(type, type.getQualifiers(), addr.getAlignment(), baseInfo, + tbaaInfo); return R; } }; From 351504bab72bd0d1d376120bb17d291e975922bb Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 18 Sep 2024 23:16:31 -0400 Subject: [PATCH 1864/2301] [CIR][CodeGen] Stub out an empty CIRGenDebugInfo type --- clang/lib/CIR/CodeGen/CIRGenDebugInfo.h | 20 ++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 8 ++++---- 2 files changed, 24 insertions(+), 4 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenDebugInfo.h diff --git a/clang/lib/CIR/CodeGen/CIRGenDebugInfo.h b/clang/lib/CIR/CodeGen/CIRGenDebugInfo.h new file mode 100644 index 000000000000..9aa503bf07e5 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenDebugInfo.h @@ -0,0 +1,20 @@ +//===--- CIRGenDebugInfo.h - DebugInfo for CIRGen ---------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is the source-level debug info generator for CIR translation. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENDEBUGINFO_H +#define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENDEBUGINFO_H + +namespace cir { +class CIRGenDebugInfo {}; +} // namespace cir + +#endif // LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENDEBUGINFO_H diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 865c9b522e17..0e284477cfa7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -15,6 +15,7 @@ #include "CIRGenBuilder.h" #include "CIRGenCall.h" +#include "CIRGenDebugInfo.h" #include "CIRGenModule.h" #include "CIRGenTBAA.h" #include "CIRGenTypeCache.h" @@ -524,6 +525,8 @@ class CIRGenFunction : public CIRGenTypeCache { /// delcs. DeclMapTy LocalDeclMap; + CIRGenDebugInfo *debugInfo = nullptr; + /// Whether llvm.stacksave has been called. Used to avoid /// calling llvm.stacksave for multiple VLAs in the same scope. /// TODO: Translate to MLIR @@ -585,10 +588,7 @@ class CIRGenFunction : public CIRGenTypeCache { const clang::LangOptions &getLangOpts() const { return CGM.getLangOpts(); } - // TODO: This is currently just a dumb stub. But we want to be able to clearly - // assert where we arne't doing things that we know we should and will crash - // as soon as we add a DebugInfo type to this class. - std::nullptr_t *getDebugInfo() { return nullptr; } + CIRGenDebugInfo *getDebugInfo() { return debugInfo; } void buildReturnOfRValue(mlir::Location loc, RValue RV, QualType Ty); From 94d215342203e6877a251301028378b8fbf3adb0 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Fri, 20 Sep 2024 02:03:58 +0800 Subject: [PATCH 1865/2301] [CIR][CIRGen] Implement Nullpointer arithmatic extension (#861) See the test for example. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/test/CIR/CodeGen/null-arithmatic-expression.c | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/null-arithmatic-expression.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index aed756f71639..4116925c448a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1245,7 +1245,7 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, // if (BinaryOperator::isNullPointerArithmeticExtension( CGF.getContext(), op.Opcode, expr->getLHS(), expr->getRHS())) - llvm_unreachable("null pointer arithmetic extension is NYI"); + return CGF.getBuilder().createIntToPtr(index, pointer.getType()); // Differently from LLVM codegen, ABI bits for index sizes is handled during // LLVM lowering. diff --git a/clang/test/CIR/CodeGen/null-arithmatic-expression.c b/clang/test/CIR/CodeGen/null-arithmatic-expression.c new file mode 100644 index 000000000000..62cde494fb58 --- /dev/null +++ b/clang/test/CIR/CodeGen/null-arithmatic-expression.c @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +#define NULL ((void *)0) + +char *foo() { + return (char*)NULL + 1; +} + +// CHECK: cir.func no_proto @foo() +// CHECK: [[CONST_1:%[0-9]+]] = cir.const #cir.int<1> : !s32i +// CHECK: {{.*}} = cir.cast(int_to_ptr, [[CONST_1]] : !s32i) +// CHECK: cir.return From 22fce984088ee657419df8547b838d769a4a9416 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 19 Sep 2024 21:07:16 +0300 Subject: [PATCH 1866/2301] [CIR][Codegen] supports aarch64_be (#864) This PR adds aarch64 big endian support. Basically the support for aarch64_be itself is expressed only in two extra cases for the switch statement and changes in the `CIRDataLayout` are needed to prove that we really support big endian. Hence the idea for the test - I think the best way for proof is something connected with bit-fields, so we compare the results of the original codegen and ours. --- .../clang/CIR/Dialect/IR/CIRDataLayout.h | 3 +- clang/lib/CIR/CodeGen/TargetInfo.cpp | 2 + clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp | 17 ++++-- .../Transforms/TargetLowering/LowerModule.cpp | 1 + clang/test/CIR/CodeGen/bitfields_be.c | 54 +++++++++++++++++++ 5 files changed, 73 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/bitfields_be.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h index cdca6d19be1c..2809e4ed55eb 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h @@ -12,6 +12,7 @@ #ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRDATALAYOUT_H #define LLVM_CLANG_CIR_DIALECT_IR_CIRDATALAYOUT_H +#include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/IR/BuiltinOps.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/IR/DataLayout.h" @@ -41,7 +42,7 @@ class CIRDataLayout { CIRDataLayout(mlir::ModuleOp modOp); /// Parse a data layout string (with fallback to default values). - void reset(); + void reset(mlir::DataLayoutSpecInterface spec); // Free all internal data structures. void clear(); diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index ee39e1b28418..a802abe18313 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -588,6 +588,8 @@ const TargetCIRGenInfo &CIRGenModule::getTargetCIRGenInfo() { switch (Triple.getArch()) { default: assert(false && "Target not yet supported!"); + + case llvm::Triple::aarch64_be: case llvm::Triple::aarch64: { AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; assert(getTarget().getABI() == "aapcs" || diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp index 2dadc469f789..86584ac67e18 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp @@ -112,13 +112,24 @@ class StructLayoutMap { } // namespace -CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { reset(); } +CIRDataLayout::CIRDataLayout(mlir::ModuleOp modOp) : layout{modOp} { + reset(modOp.getDataLayoutSpec()); +} -void CIRDataLayout::reset() { +void CIRDataLayout::reset(mlir::DataLayoutSpecInterface spec) { clear(); - LayoutMap = nullptr; bigEndian = false; + if (spec) { + auto key = mlir::StringAttr::get( + spec.getContext(), mlir::DLTIDialect::kDataLayoutEndiannessKey); + if (auto entry = spec.getSpecForIdentifier(key)) + if (auto str = llvm::dyn_cast(entry.getValue())) + bigEndian = str == mlir::DLTIDialect::kDataLayoutEndiannessBig; + } + + LayoutMap = nullptr; + // ManglingMode = MM_None; // NonIntegralAddressSpaces.clear(); StructAlignment = diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 086822ed4143..715a5f2470d7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -62,6 +62,7 @@ createTargetLoweringInfo(LowerModule &LM) { const llvm::Triple &Triple = Target.getTriple(); switch (Triple.getArch()) { + case llvm::Triple::aarch64_be: case llvm::Triple::aarch64: { AArch64ABIKind Kind = AArch64ABIKind::AAPCS; if (Target.getABI() == "darwinpcs") diff --git a/clang/test/CIR/CodeGen/bitfields_be.c b/clang/test/CIR/CodeGen/bitfields_be.c new file mode 100644 index 000000000000..9063a33fdd8d --- /dev/null +++ b/clang/test/CIR/CodeGen/bitfields_be.c @@ -0,0 +1,54 @@ +// RUN: %clang_cc1 -triple aarch64_be-unknown-linux-gnu -emit-llvm %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=LLVM + +// RUN: %clang_cc1 -triple aarch64_be-unknown-linux-gnu -fclangir -emit-llvm %s -o %t1.cir +// RUN: FileCheck --input-file=%t1.cir %s + +typedef struct { + int a : 4; + int b : 11; + int c : 17; +} S; + +void init(S* s) { + s->a = -4; + s->b = 42; + s->c = -12345; +} + +// field 'a' +// LLVM: %[[PTR0:.*]] = load ptr +// CHECK: %[[PTR0:.*]] = load ptr +// LLVM: %[[VAL0:.*]] = load i32, ptr %[[PTR0]] +// CHECK: %[[VAL0:.*]] = load i32, ptr %[[PTR0]] +// LLVM: %[[AND0:.*]] = and i32 %[[VAL0]], 268435455 +// CHECK: %[[AND0:.*]] = and i32 %[[VAL0]], 268435455 +// LLVM: %[[OR0:.*]] = or i32 %[[AND0]], -1073741824 +// CHECK: %[[OR0:.*]] = or i32 %[[AND0]], -1073741824 +// LLVM: store i32 %[[OR0]], ptr %[[PTR0]] +// CHECK: store i32 %[[OR0]], ptr %[[PTR0]] + +// field 'b' +// LLVM: %[[PTR1:.*]] = load ptr +// CHECK: %[[PTR1:.*]] = load ptr +// LLVM: %[[VAL1:.*]] = load i32, ptr %[[PTR1]] +// CHECK: %[[VAL1:.*]] = load i32, ptr %[[PTR1]] +// LLVM: %[[AND1:.*]] = and i32 %[[VAL1]], -268304385 +// CHECK: %[[AND1:.*]] = and i32 %[[VAL1]], -268304385 +// LLVM: %[[OR1:.*]] = or i32 %[[AND1]], 5505024 +// CHECK: %[[OR1:.*]] = or i32 %[[AND1]], 5505024 +// LLVM: store i32 %[[OR1]], ptr %[[PTR1]] +// CHECK: store i32 %[[OR1]], ptr %[[PTR1]] + +// field 'c' +// LLVM: %[[PTR2:.*]] = load ptr +// CHECK: %[[PTR2:.*]] = load ptr +// LLVM: %[[VAL2:.*]] = load i32, ptr %[[PTR2]] +// CHECK: %[[VAL2:.*]] = load i32, ptr %[[PTR2]] +// LLVM: %[[AND2:.*]] = and i32 %[[VAL2]], -131072 +// CHECK: %[[AND2:.*]] = and i32 %[[VAL2]], -131072 +// LLVM: %[[OR2:.*]] = or i32 %[[AND2]], 118727 +// CHECK: %[[OR2:.*]] = or i32 %[[AND2]], 118727 +// LLVM: store i32 %[[OR2]], ptr %[[PTR2]] +// CHECK: store i32 %[[OR2]], ptr %[[PTR2]] + From 6aa649a25bb654f4fd6cc5eadd1ae46369053fbe Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 20 Sep 2024 02:16:14 +0800 Subject: [PATCH 1867/2301] [CIR] Split cir-simplify into two passes (#868) This PR splits the old `cir-simplify` pass into two new passes, namely `cir-canonicalize` and `cir-simplify` (the new `cir-simplify`). The `cir-canonicalize` pass runs transformations that do not affect CIR-to-source fidelity much, such as operation folding and redundant operation elimination. On the other hand, the new `cir-simplify` pass runs transformations that may significantly change the code and break high-level code analysis passes, such as more aggresive code optimizations. This PR also updates the CIR-to-CIR pipeline to fit these two new passes. The `cir-canonicalize` pass is moved to the very front of the pipeline, while the new `cir-simplify` pass is moved to the back of the pipeline (but still before lowering prepare of course). Additionally, the new `cir-simplify` now only runs when the user specifies a non-zero optimization level on the frontend. Also fixed some typos and resolved some `clang-tidy` complaints along the way. Resolves #827 . --- clang/include/clang/CIR/CIRToCIRPasses.h | 4 +- clang/include/clang/CIR/Dialect/Passes.h | 1 + clang/include/clang/CIR/Dialect/Passes.td | 35 ++-- clang/lib/CIR/CodeGen/CIRPasses.cpp | 9 +- .../Dialect/Transforms/CIRCanonicalize.cpp | 187 ++++++++++++++++++ .../CIR/Dialect/Transforms/CIRSimplify.cpp | 136 +------------ .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 1 + clang/test/CIR/CodeGen/binop.cpp | 2 +- clang/test/CIR/CodeGen/complex.c | 8 +- clang/test/CIR/CodeGen/ternary.cpp | 2 +- clang/test/CIR/Transforms/builtin-assume.cir | 4 +- clang/test/CIR/Transforms/merge-cleanups.cir | 2 +- clang/test/CIR/Transforms/select.cir | 2 +- clang/test/CIR/Transforms/simpl.c | 4 +- clang/test/CIR/Transforms/simpl.cir | 2 +- clang/test/CIR/Transforms/ternary-fold.cir | 2 +- clang/test/CIR/Transforms/ternary-fold.cpp | 10 +- clang/test/CIR/mlirprint.c | 8 +- clang/tools/cir-opt/cir-opt.cpp | 3 + 20 files changed, 250 insertions(+), 173 deletions(-) create mode 100644 clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index a88fba7a76e3..4ad4aeebb22e 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -34,8 +34,8 @@ mlir::LogicalResult runCIRToCIRPasses( llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, llvm::StringRef libOptOpts, std::string &passOptParsingFailure, - bool flattenCIR, bool emitMLIR, bool enableCallConvLowering, - bool enableMem2reg); + bool enableCIRSimplify, bool flattenCIR, bool emitMLIR, + bool enableCallConvLowering, bool enableMem2reg); } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index 116966d52f15..67e9da2246b6 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -26,6 +26,7 @@ std::unique_ptr createLifetimeCheckPass(ArrayRef remark, ArrayRef hist, unsigned hist_limit, clang::ASTContext *astCtx); +std::unique_ptr createCIRCanonicalizePass(); std::unique_ptr createCIRSimplifyPass(); std::unique_ptr createDropASTPass(); std::unique_ptr createSCFPreparePass(); diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index 1c17ca9c5ed1..d72bf0bfd420 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -11,22 +11,33 @@ include "mlir/Pass/PassBase.td" -def CIRSimplify : Pass<"cir-simplify"> { - let summary = "Performs CIR simplification"; +def CIRCanonicalize : Pass<"cir-canonicalize"> { + let summary = "Performs CIR canonicalization"; let description = [{ - The pass rewrites CIR and removes some redundant operations. + Perform canonicalizations on CIR and removes some redundant operations. + + This pass performs basic cleanup and canonicalization transformations that + hopefully do not affect CIR-to-source fidelity and high-level code analysis + passes too much. Example transformations performed in this pass include + empty scope cleanup, trivial try cleanup, redundant branch cleanup, etc. + Those more "heavyweight" transformations and those transformations that + could significantly affect CIR-to-source fidelity are performed in the + `cir-simplify` pass. + }]; - For example, due to canonicalize pass is too aggressive for CIR when - the pipeline is used for C/C++ analysis, this pass runs some rewrites - for scopes, merging some blocks and eliminating unnecessary control-flow. + let constructor = "mlir::createCIRCanonicalizePass()"; + let dependentDialects = ["cir::CIRDialect"]; +} - Also, the pass removes redundant and/or unneccessary cast and unary not - operation e.g. - ```mlir - %1 = cir.cast(bool_to_int, %0 : !cir.bool), !s32i - %2 = cir.cast(int_to_bool, %1 : !s32i), !cir.bool - ``` +def CIRSimplify : Pass<"cir-simplify"> { + let summary = "Performs CIR simplification and code optimization"; + let description = [{ + The pass performs code simplification and optimization on CIR. + Unlike the `cir-canonicalize` pass, this pass contains more aggresive code + transformations that could significantly affect CIR-to-source fidelity. + Example transformations performed in this pass include ternary folding, + code hoisting, etc. }]; let constructor = "mlir::createCIRSimplifyPass()"; let dependentDialects = ["cir::CIRDialect"]; diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 60e393f7985f..4f89daa1cee4 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -26,11 +26,11 @@ mlir::LogicalResult runCIRToCIRPasses( llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, llvm::StringRef libOptOpts, std::string &passOptParsingFailure, - bool flattenCIR, bool emitMLIR, bool enableCallConvLowering, - bool enableMem2Reg) { + bool enableCIRSimplify, bool flattenCIR, bool emitMLIR, + bool enableCallConvLowering, bool enableMem2Reg) { mlir::PassManager pm(mlirCtx); - pm.addPass(mlir::createCIRSimplifyPass()); + pm.addPass(mlir::createCIRCanonicalizePass()); // TODO(CIR): Make this actually propagate errors correctly. This is stubbed // in to get rebases going. @@ -66,6 +66,9 @@ mlir::LogicalResult runCIRToCIRPasses( pm.addPass(std::move(libOpPass)); } + if (enableCIRSimplify) + pm.addPass(mlir::createCIRSimplifyPass()); + pm.addPass(mlir::createLoweringPreparePass(&astCtx)); // FIXME(cir): This pass should run by default, but it is lacking support for diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp new file mode 100644 index 000000000000..e2cac79c2c70 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp @@ -0,0 +1,187 @@ +//===- CIRSimplify.cpp - performs CIR canonicalization --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Block.h" +#include "mlir/IR/Operation.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/IR/Region.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" + +using namespace mlir; +using namespace cir; + +namespace { + +/// Removes branches between two blocks if it is the only branch. +/// +/// From: +/// ^bb0: +/// cir.br ^bb1 +/// ^bb1: // pred: ^bb0 +/// cir.return +/// +/// To: +/// ^bb0: +/// cir.return +struct RemoveRedundantBranches : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(BrOp op, + PatternRewriter &rewriter) const final { + Block *block = op.getOperation()->getBlock(); + Block *dest = op.getDest(); + + if (isa(dest->front())) + return failure(); + + // Single edge between blocks: merge it. + if (block->getNumSuccessors() == 1 && + dest->getSinglePredecessor() == block) { + rewriter.eraseOp(op); + rewriter.mergeBlocks(dest, block); + return success(); + } + + return failure(); + } +}; + +struct RemoveEmptyScope : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult match(ScopeOp op) const final { + return success(op.getRegion().empty() || + (op.getRegion().getBlocks().size() == 1 && + op.getRegion().front().empty())); + } + + void rewrite(ScopeOp op, PatternRewriter &rewriter) const final { + rewriter.eraseOp(op); + } +}; + +struct RemoveEmptySwitch : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult match(SwitchOp op) const final { + return success(op.getRegions().empty()); + } + + void rewrite(SwitchOp op, PatternRewriter &rewriter) const final { + rewriter.eraseOp(op); + } +}; + +struct RemoveTrivialTry : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult match(TryOp op) const final { + // FIXME: also check all catch regions are empty + // return success(op.getTryRegion().hasOneBlock()); + return mlir::failure(); + } + + void rewrite(TryOp op, PatternRewriter &rewriter) const final { + // Move try body to the parent. + assert(op.getTryRegion().hasOneBlock()); + + Block *parentBlock = op.getOperation()->getBlock(); + mlir::Block *tryBody = &op.getTryRegion().getBlocks().front(); + YieldOp y = dyn_cast(tryBody->getTerminator()); + assert(y && "expected well wrapped up try block"); + y->erase(); + + rewriter.inlineBlockBefore(tryBody, parentBlock, Block::iterator(op)); + rewriter.eraseOp(op); + } +}; + +// Remove call exception with empty cleanups +struct SimplifyCallOp : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult match(CallOp op) const final { + // Applicable to cir.call exception ... clean { cir.yield } + mlir::Region *r = &op.getCleanup(); + if (r->empty() || !r->hasOneBlock()) + return failure(); + + mlir::Block *b = &r->getBlocks().back(); + if (&b->back() != &b->front()) + return failure(); + + return success(isa(&b->getOperations().back())); + } + + void rewrite(CallOp op, PatternRewriter &rewriter) const final { + mlir::Block *b = &op.getCleanup().back(); + rewriter.eraseOp(&b->back()); + rewriter.eraseBlock(b); + } +}; + +//===----------------------------------------------------------------------===// +// CIRCanonicalizePass +//===----------------------------------------------------------------------===// + +struct CIRCanonicalizePass : public CIRCanonicalizeBase { + using CIRCanonicalizeBase::CIRCanonicalizeBase; + + // The same operation rewriting done here could have been performed + // by CanonicalizerPass (adding hasCanonicalizer for target Ops and + // implementing the same from above in CIRDialects.cpp). However, it's + // currently too aggressive for static analysis purposes, since it might + // remove things where a diagnostic can be generated. + // + // FIXME: perhaps we can add one more mode to GreedyRewriteConfig to + // disable this behavior. + void runOnOperation() override; +}; + +void populateCIRCanonicalizePatterns(RewritePatternSet &patterns) { + // clang-format off + patterns.add< + RemoveRedundantBranches, + RemoveEmptyScope, + RemoveEmptySwitch, + RemoveTrivialTry, + SimplifyCallOp + >(patterns.getContext()); + // clang-format on +} + +void CIRCanonicalizePass::runOnOperation() { + // Collect rewrite patterns. + RewritePatternSet patterns(&getContext()); + populateCIRCanonicalizePatterns(patterns); + + // Collect operations to apply patterns. + SmallVector ops; + getOperation()->walk([&](Operation *op) { + // CastOp here is to perform a manual `fold` in + // applyOpPatternsAndFold + if (isa(op)) + ops.push_back(op); + }); + + // Apply patterns. + if (applyOpPatternsGreedily(ops, std::move(patterns)).failed()) + signalPassFailure(); +} + +} // namespace + +std::unique_ptr mlir::createCIRCanonicalizePass() { + return std::make_unique(); +} diff --git a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp index 3db7e5259041..225da527b736 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp @@ -27,114 +27,6 @@ using namespace cir; namespace { -/// Removes branches between two blocks if it is the only branch. -/// -/// From: -/// ^bb0: -/// cir.br ^bb1 -/// ^bb1: // pred: ^bb0 -/// cir.return -/// -/// To: -/// ^bb0: -/// cir.return -struct RemoveRedundantBranches : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - LogicalResult matchAndRewrite(BrOp op, - PatternRewriter &rewriter) const final { - Block *block = op.getOperation()->getBlock(); - Block *dest = op.getDest(); - - if (isa(dest->front())) - return failure(); - - // Single edge between blocks: merge it. - if (block->getNumSuccessors() == 1 && - dest->getSinglePredecessor() == block) { - rewriter.eraseOp(op); - rewriter.mergeBlocks(dest, block); - return success(); - } - - return failure(); - } -}; - -struct RemoveEmptyScope : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - LogicalResult match(ScopeOp op) const final { - return success(op.getRegion().empty() || - (op.getRegion().getBlocks().size() == 1 && - op.getRegion().front().empty())); - } - - void rewrite(ScopeOp op, PatternRewriter &rewriter) const final { - rewriter.eraseOp(op); - } -}; - -struct RemoveEmptySwitch : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - LogicalResult match(SwitchOp op) const final { - return success(op.getRegions().empty()); - } - - void rewrite(SwitchOp op, PatternRewriter &rewriter) const final { - rewriter.eraseOp(op); - } -}; - -struct RemoveTrivialTry : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - LogicalResult match(TryOp op) const final { - // FIXME: also check all catch regions are empty - // return success(op.getTryRegion().hasOneBlock()); - return mlir::failure(); - } - - void rewrite(TryOp op, PatternRewriter &rewriter) const final { - // Move try body to the parent. - assert(op.getTryRegion().hasOneBlock()); - - Block *parentBlock = op.getOperation()->getBlock(); - mlir::Block *tryBody = &op.getTryRegion().getBlocks().front(); - YieldOp y = dyn_cast(tryBody->getTerminator()); - assert(y && "expected well wrapped up try block"); - y->erase(); - - rewriter.inlineBlockBefore(tryBody, parentBlock, Block::iterator(op)); - rewriter.eraseOp(op); - } -}; - -// Remove call exception with empty cleanups -struct SimplifyCallOp : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - LogicalResult match(CallOp op) const final { - // Applicable to cir.call exception ... clean { cir.yield } - mlir::Region *r = &op.getCleanup(); - if (r->empty() || !r->hasOneBlock()) - return failure(); - - mlir::Block *b = &r->getBlocks().back(); - if (&b->back() != &b->front()) - return failure(); - - return success(isa(&b->getOperations().back())); - } - - void rewrite(CallOp op, PatternRewriter &rewriter) const final { - mlir::Block *b = &op.getCleanup().back(); - rewriter.eraseOp(&b->back()); - rewriter.eraseBlock(b); - } -}; - /// Simplify suitable ternary operations into select operations. /// /// For now we only simplify those ternary operations whose true and false @@ -207,10 +99,7 @@ struct SimplifyTernary final : public OpRewritePattern { auto yieldOp = mlir::cast(onlyBlock.getTerminator()); auto yieldValueDefOp = mlir::dyn_cast_if_present( yieldOp.getArgs()[0].getDefiningOp()); - if (!yieldValueDefOp || yieldValueDefOp->getBlock() != &onlyBlock) - return false; - - return true; + return yieldValueDefOp && yieldValueDefOp->getBlock() == &onlyBlock; } }; @@ -242,7 +131,7 @@ struct SimplifySelect : public OpRewritePattern { return mlir::success(); } - // cir.seleft if %0 then #false else #true -> cir.unary not %0 + // cir.select if %0 then #false else #true -> cir.unary not %0 if (!trueValue.getValue() && falseValue.getValue()) { rewriter.replaceOpWithNewOp( op, mlir::cir::UnaryOpKind::Not, op.getCondition()); @@ -260,27 +149,14 @@ struct SimplifySelect : public OpRewritePattern { struct CIRSimplifyPass : public CIRSimplifyBase { using CIRSimplifyBase::CIRSimplifyBase; - // The same operation rewriting done here could have been performed - // by CanonicalizerPass (adding hasCanonicalizer for target Ops and - // implementing the same from above in CIRDialects.cpp). However, it's - // currently too aggressive for static analysis purposes, since it might - // remove things where a diagnostic can be generated. - // - // FIXME: perhaps we can add one more mode to GreedyRewriteConfig to - // disable this behavior. void runOnOperation() override; }; void populateMergeCleanupPatterns(RewritePatternSet &patterns) { // clang-format off patterns.add< - RemoveRedundantBranches, - RemoveEmptyScope, - RemoveEmptySwitch, - RemoveTrivialTry, SimplifyTernary, - SimplifySelect, - SimplifyCallOp + SimplifySelect >(patterns.getContext()); // clang-format on } @@ -293,11 +169,7 @@ void CIRSimplifyPass::runOnOperation() { // Collect operations to apply patterns. SmallVector ops; getOperation()->walk([&](Operation *op) { - // CastOp here is to perform a manual `fold` in - // applyOpPatternsAndFold - if (isa(op)) + if (isa(op)) ops.push_back(op); }); diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index ffe1efbe0839..d675f17042b6 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -3,6 +3,7 @@ add_subdirectory(TargetLowering) add_clang_library(MLIRCIRTransforms LifetimeCheck.cpp LoweringPrepare.cpp + CIRCanonicalize.cpp CIRSimplify.cpp DropAST.cpp IdiomRecognizer.cpp diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 58536ede6700..e360f0470b50 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -206,6 +206,7 @@ class CIRGenConsumer : public clang::ASTConsumer { feOptions.ClangIRLifetimeCheck, lifetimeOpts, feOptions.ClangIRIdiomRecognizer, idiomRecognizerOpts, feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure, + codeGenOptions.OptimizationLevel > 0, action == CIRGenAction::OutputType::EmitCIRFlat, action == CIRGenAction::OutputType::EmitMLIR, feOptions.ClangIREnableCallConvLowering, diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index 045e78ccf021..dcfdf7704045 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -O1 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s void b0(int a, int b) { diff --git a/clang/test/CIR/CodeGen/complex.c b/clang/test/CIR/CodeGen/complex.c index a1cab1070aca..1bdf62fe9666 100644 --- a/clang/test/CIR/CodeGen/complex.c +++ b/clang/test/CIR/CodeGen/complex.c @@ -1,7 +1,7 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-simplify -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-BEFORE %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-simplify -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-BEFORE %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-simplify -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-AFTER %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-simplify -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-AFTER %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-BEFORE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-BEFORE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-AFTER %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CHECK-AFTER %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -o %t.ll %s // RUN: FileCheck --input-file=%t.ll --check-prefixes=LLVM %s diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp index 5c17ef5d1a74..7238459bd520 100644 --- a/clang/test/CIR/CodeGen/ternary.cpp +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -O1 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s int x(int y) { diff --git a/clang/test/CIR/Transforms/builtin-assume.cir b/clang/test/CIR/Transforms/builtin-assume.cir index 72afb3812e53..c4f1317abb2b 100644 --- a/clang/test/CIR/Transforms/builtin-assume.cir +++ b/clang/test/CIR/Transforms/builtin-assume.cir @@ -1,6 +1,4 @@ -// RUN: cir-opt --canonicalize -o %t.cir %s -// RUN: FileCheck --input-file %t.cir %s -// RUN: cir-opt -cir-simplify -o %t.cir %s +// RUN: cir-opt -cir-canonicalize -o %t.cir %s // RUN: FileCheck --input-file %t.cir %s !s32i = !cir.int diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 4280768fc4b0..c9d927b7cae7 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s -cir-simplify -o %t.out.cir +// RUN: cir-opt %s -cir-canonicalize -o %t.out.cir // RUN: FileCheck --input-file=%t.out.cir %s #false = #cir.bool : !cir.bool diff --git a/clang/test/CIR/Transforms/select.cir b/clang/test/CIR/Transforms/select.cir index 6d18be0b9439..29a5d1ed1dde 100644 --- a/clang/test/CIR/Transforms/select.cir +++ b/clang/test/CIR/Transforms/select.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt -cir-simplify -o %t.cir %s +// RUN: cir-opt -cir-canonicalize -cir-simplify -o %t.cir %s // RUN: FileCheck --input-file=%t.cir %s !s32i = !cir.int diff --git a/clang/test/CIR/Transforms/simpl.c b/clang/test/CIR/Transforms/simpl.c index efb22ddb3f57..dda9f495ca4c 100644 --- a/clang/test/CIR/Transforms/simpl.c +++ b/clang/test/CIR/Transforms/simpl.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-simplify %s -o %t1.cir 2>&1 | FileCheck -check-prefix=BEFORE %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-simplify %s -o %t2.cir 2>&1 | FileCheck -check-prefix=AFTER %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize %s -o %t1.cir 2>&1 | FileCheck -check-prefix=BEFORE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-canonicalize %s -o %t2.cir 2>&1 | FileCheck -check-prefix=AFTER %s #define CHECK_PTR(ptr) \ diff --git a/clang/test/CIR/Transforms/simpl.cir b/clang/test/CIR/Transforms/simpl.cir index 1ebedc323471..3d5d5ec75ad6 100644 --- a/clang/test/CIR/Transforms/simpl.cir +++ b/clang/test/CIR/Transforms/simpl.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s -cir-simplify -o - | FileCheck %s +// RUN: cir-opt %s -cir-canonicalize -o - | FileCheck %s !s32i = !cir.int !s64i = !cir.int diff --git a/clang/test/CIR/Transforms/ternary-fold.cir b/clang/test/CIR/Transforms/ternary-fold.cir index 6778d4744a32..72ba4815b2db 100644 --- a/clang/test/CIR/Transforms/ternary-fold.cir +++ b/clang/test/CIR/Transforms/ternary-fold.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt -cir-simplify -o %t.cir %s +// RUN: cir-opt -cir-canonicalize -cir-simplify -o %t.cir %s // RUN: FileCheck --input-file=%t.cir %s !s32i = !cir.int diff --git a/clang/test/CIR/Transforms/ternary-fold.cpp b/clang/test/CIR/Transforms/ternary-fold.cpp index 5f37a8a36b95..69934da793df 100644 --- a/clang/test/CIR/Transforms/ternary-fold.cpp +++ b/clang/test/CIR/Transforms/ternary-fold.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-simplify %s -o %t1.cir 2>&1 | FileCheck -check-prefix=CIR-BEFORE %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-simplify %s -o %t2.cir 2>&1 | FileCheck -check-prefix=CIR-AFTER %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -O1 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize %s -o %t1.cir 2>&1 | FileCheck -check-prefix=CIR-BEFORE %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -O1 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-simplify %s -o %t2.cir 2>&1 | FileCheck -check-prefix=CIR-AFTER %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -O1 -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s int test(bool x) { @@ -23,7 +23,7 @@ int test(bool x) { // CIR-AFTER-NEXT: %{{.+}} = cir.select if %{{.+}} then %[[#A]] else %[[#B]] : (!cir.bool, !s32i, !s32i) -> !s32i // CIR-AFTER: } -// LLVM: define dso_local i32 @_Z4testb +// LLVM: @_Z4testb // LLVM: %{{.+}} = select i1 %{{.+}}, i32 1, i32 2 // LLVM: } @@ -51,6 +51,6 @@ int test2(bool cond) { // CIR-AFTER-NEXT: %{{.+}} = cir.select if %[[#COND]] then %[[#A]] else %[[#B]] : (!cir.bool, !s32i, !s32i) -> !s32i // CIR-AFTER: } -// LLVM: define dso_local i32 @_Z5test2b +// LLVM: @_Z5test2b // LLVM: %{{.+}} = select i1 %{{.+}}, i32 1, i32 2 // LLVM: } diff --git a/clang/test/CIR/mlirprint.c b/clang/test/CIR/mlirprint.c index 83336cc73792..3514eb895381 100644 --- a/clang/test/CIR/mlirprint.c +++ b/clang/test/CIR/mlirprint.c @@ -11,7 +11,7 @@ int foo(void) { } -// CIR: IR Dump After CIRSimplify (cir-simplify) +// CIR: IR Dump After CIRCanonicalize (cir-canonicalize) // CIR: cir.func @foo() -> !s32i // CIR: IR Dump After LoweringPrepare (cir-lowering-prepare) // CIR: cir.func @foo() -> !s32i @@ -19,14 +19,14 @@ int foo(void) { // CIR-NOT: IR Dump After SCFPrepare // CIR: IR Dump After DropAST (cir-drop-ast) // CIR: cir.func @foo() -> !s32i -// CIRFLAT: IR Dump After CIRSimplify (cir-simplify) +// CIRFLAT: IR Dump After CIRCanonicalize (cir-canonicalize) // CIRFLAT: cir.func @foo() -> !s32i // CIRFLAT: IR Dump After LoweringPrepare (cir-lowering-prepare) // CIRFLAT: cir.func @foo() -> !s32i // CIRFLAT: IR Dump After FlattenCFG (cir-flatten-cfg) // CIRFLAT: IR Dump After DropAST (cir-drop-ast) // CIRFLAT: cir.func @foo() -> !s32i -// CIRMLIR: IR Dump After CIRSimplify (cir-simplify) +// CIRMLIR: IR Dump After CIRCanonicalize (cir-canonicalize) // CIRMLIR: IR Dump After LoweringPrepare (cir-lowering-prepare) // CIRMLIR: IR Dump After SCFPrepare (cir-mlir-scf-prepare // CIRMLIR: IR Dump After DropAST (cir-drop-ast) @@ -35,7 +35,7 @@ int foo(void) { // LLVM: IR Dump After // LLVM: define dso_local i32 @foo() -// CIRPASS-NOT: IR Dump After CIRSimplify +// CIRPASS-NOT: IR Dump After CIRCanonicalize // CIRPASS: IR Dump After DropAST // CFGPASS: IR Dump Before FlattenCFG (cir-flatten-cfg) diff --git a/clang/tools/cir-opt/cir-opt.cpp b/clang/tools/cir-opt/cir-opt.cpp index 343f2d317137..e7af0b214462 100644 --- a/clang/tools/cir-opt/cir-opt.cpp +++ b/clang/tools/cir-opt/cir-opt.cpp @@ -38,6 +38,9 @@ int main(int argc, char **argv) { ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertMLIRToLLVMPass(); }); + ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { + return mlir::createCIRCanonicalizePass(); + }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return mlir::createCIRSimplifyPass(); }); From 8b97309cf003080dbc48fdf4551647b1b27287ff Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Thu, 19 Sep 2024 21:17:12 +0300 Subject: [PATCH 1868/2301] [CIR][CodeGen] Implement union cast (#867) Currently the C style cast is not implemented/supported for unions. This PR adds support for union casts as done in `CGExprAgg.cpp`. I have also added an extra test in `union-init.c`. --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 16 ++++++++++++++++ clang/test/CIR/CodeGen/union-init.c | 15 +++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 9ed791a75f83..d23e1e8c418d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -980,6 +980,22 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { break; } + case CK_ToUnion: { + // Evaluate even if the destination is ignored. + if (Dest.isIgnored()) { + CGF.buildAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), + /*ignoreResult=*/true); + break; + } + + // GCC union extension + QualType Ty = E->getSubExpr()->getType(); + Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty)); + buildInitializationToLValue(E->getSubExpr(), + CGF.makeAddrLValue(CastPtr, Ty)); + break; + } + case CK_LValueToRValue: // If we're loading from a volatile type, force the destination // into existence. diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index 8838b67ff283..122999de23c2 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -46,3 +46,18 @@ unsigned is_little(void) { // CHECK: %[[VAL_1:.*]] = cir.get_global @is_little.one : !cir.ptr // CHECK: %[[VAL_2:.*]] = cir.cast(bitcast, %[[VAL_1]] : !cir.ptr), !cir.ptr // CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][1] {name = "c"} : !cir.ptr -> !cir.ptr> + +typedef union { + int x; +} U; + +// CHECK: %[[VAL_0:.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK: %[[VAL_1:.*]] = cir.alloca !ty_U, !cir.ptr, ["u", init] {alignment = 4 : i64} +// CHECK: cir.store %arg0, %[[VAL_0]] : !s32i, !cir.ptr +// CHECK: %[[VAL_2:.*]] = cir.load %[[VAL_0]] : !cir.ptr, !s32i +// CHECK: %[[VAL_3:.*]] = cir.cast(bitcast, %[[VAL_1]] : !cir.ptr), !cir.ptr +// CHECK: cir.store %[[VAL_2]], %[[VAL_3]] : !s32i, !cir.ptr + +void union_cast(int x) { + U u = (U) x; +} From 793e851fba0230ea4a0e0d3c45ba02509f879276 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 19 Sep 2024 18:09:41 -0700 Subject: [PATCH 1869/2301] [CIR][CIRGen] Exceptions: unlock nested try/catch support --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 17 +------- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 46 ++++++++++++++++++++++ 2 files changed, 48 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 775cc3b5cd80..a147f8d119f4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -284,21 +284,8 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // Emit the EH cleanup if required. if (RequiresEHCleanup) { - mlir::cir::TryOp tryOp = nullptr; - if (CGM.globalOpContext) { - SmallVector trys; - CGM.globalOpContext.walk( - [&](mlir::cir::TryOp op) { trys.push_back(op); }); - assert(trys.size() == 1 && "unknow global initialization style"); - tryOp = trys[0]; - } else { - SmallVector trys; - auto funcOp = dyn_cast(CurFn); - funcOp.walk([&](mlir::cir::TryOp op) { trys.push_back(op); }); - assert(trys.size() == 1 && "nested or multiple try/catch NYI"); - tryOp = trys[0]; - } - + mlir::cir::TryOp tryOp = + ehEntry->getParentOp()->getParentOfType(); assert(tryOp && "expected available cir.try"); auto *nextAction = getEHDispatchBlock(EHParent, tryOp); (void)nextAction; diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index c093c7ab318e..67a851dff2de 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -264,3 +264,49 @@ void yo3(bool x) { // LLVM: br label %[[RET]], // LLVM: [[RET]]: // LLVM: ret void + +void yo2(bool x) { + int r = 1; + try { + Vec v1, v2; + try { + Vec v3, v4; + } catch (...) { + r++; + } + } catch (...) { + r++; + } +} + +// CIR: cir.scope { +// CIR: %[[V1:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v1" +// CIR: %[[V2:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v2" +// CIR: cir.try { +// CIR: cir.call exception @_ZN3VecC1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR: cir.call exception @_ZN3VecC1Ev(%[[V2]]) : (!cir.ptr) -> () cleanup { +// CIR: cir.call @_ZN3VecD1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR: cir.yield +// CIR: } +// CIR: cir.scope { +// CIR: %[[V3:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v3" +// CIR: %[[V4:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v4" +// CIR: cir.try { +// CIR: cir.call exception @_ZN3VecC1Ev(%[[V3]]) : (!cir.ptr) -> () +// CIR: cir.call exception @_ZN3VecC1Ev(%[[V4]]) : (!cir.ptr) -> () cleanup { +// CIR: cir.call @_ZN3VecD1Ev(%[[V3]]) : (!cir.ptr) -> () +// CIR: cir.yield +// CIR: } +// CIR: cir.call @_ZN3VecD1Ev(%[[V4]]) : (!cir.ptr) -> () +// CIR: cir.call @_ZN3VecD1Ev(%[[V3]]) : (!cir.ptr) -> () +// CIR: cir.yield +// CIR: } catch [type #cir.all { +// CIR: cir.catch_param -> !cir.ptr +// CIR: }] +// CIR: } +// CIR: cir.call @_ZN3VecD1Ev(%[[V2]]) : (!cir.ptr) -> () +// CIR: cir.call @_ZN3VecD1Ev(%[[V1]]) : (!cir.ptr) -> () +// CIR: cir.yield +// CIR: } catch [type #cir.all { +// CIR: cir.catch_param -> !cir.ptr +// CIR: }] From 795513e69707319aaf66790d961ea44833839608 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Fri, 20 Sep 2024 20:36:34 +0300 Subject: [PATCH 1870/2301] [CIR][CodeGen] Fix array initialization in CIRGenExprAgg (#852) Mistakenly closed #850 https://github.com/llvm/clangir/pull/850#pullrequestreview-2310531397 This PR fixes array initialization for expression arguments. Consider the following code snippet `test.c`: ``` typedef struct { int a; int b[2]; } A; int bar() { return 42; } void foo() { A a = {bar(), {}}; } ``` When ran with `bin/clang test.c -Xclang -fclangir -Xclang -emit-cir -S -o -`, It produces the following error: ``` ~/clangir/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp:483: void {anonymous}::AggExprEmitter::buildArrayInit(cir::Address, mlir::cir::ArrayType, clang::QualType, clang::Expr*, llvm::ArrayRef, clang::Expr*): Assertion `NumInitElements != 0' failed. ``` The error can be traced back to `CIRGenExprAgg.cpp`, and the fix is simple. It is possible to have an empty array initialization as an expression argument! --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 1 - clang/test/CIR/CodeGen/array-init.cpp | 38 +++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/array-init.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index d23e1e8c418d..4c321f18779f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -480,7 +480,6 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, uint64_t NumInitElements = Args.size(); uint64_t NumArrayElements = AType.getSize(); - assert(NumInitElements != 0 && "expected at least one initializaed value"); assert(NumInitElements <= NumArrayElements); QualType elementType = diff --git a/clang/test/CIR/CodeGen/array-init.cpp b/clang/test/CIR/CodeGen/array-init.cpp new file mode 100644 index 000000000000..e051c31a9c6c --- /dev/null +++ b/clang/test/CIR/CodeGen/array-init.cpp @@ -0,0 +1,38 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef struct { + int a; + int b[2]; +} A; + +int bar() { + return 42; +} + +void foo() { + A a = {bar(), {}}; +} +// CHECK: %[[VAL_0:.*]] = cir.alloca !ty_A, !cir.ptr, ["a", init] +// CHECK: %[[VAL_1:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp", init] +// CHECK: %[[VAL_2:.*]] = cir.get_member %[[VAL_0]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CHECK: %[[VAL_3:.*]] = cir.call @_Z3barv() : () -> !s32i +// CHECK: cir.store %[[VAL_3]], %[[VAL_2]] : !s32i, !cir.ptr +// CHECK: %[[VAL_4:.*]] = cir.get_member %[[VAL_0]][1] {name = "b"} : !cir.ptr -> !cir.ptr> +// CHECK: %[[VAL_5:.*]] = cir.cast(array_to_ptrdecay, %[[VAL_4]] : !cir.ptr>), !cir.ptr +// CHECK: cir.store %[[VAL_5]], %[[VAL_1]] : !cir.ptr, !cir.ptr> +// CHECK: %[[VAL_6:.*]] = cir.const #cir.int<2> : !s64i +// CHECK: %[[VAL_7:.*]] = cir.ptr_stride(%[[VAL_5]] : !cir.ptr, %[[VAL_6]] : !s64i), !cir.ptr +// CHECK: cir.do { +// CHECK: %[[VAL_8:.*]] = cir.load %[[VAL_1]] : !cir.ptr>, !cir.ptr +// CHECK: %[[VAL_9:.*]] = cir.const #cir.int<0> : !s32i +// CHECK: cir.store %[[VAL_9]], %[[VAL_8]] : !s32i, !cir.ptr +// CHECK: %[[VAL_10:.*]] = cir.const #cir.int<1> : !s64i +// CHECK: %[[VAL_11:.*]] = cir.ptr_stride(%[[VAL_8]] : !cir.ptr, %[[VAL_10]] : !s64i), !cir.ptr +// CHECK: cir.store %[[VAL_11]], %[[VAL_1]] : !cir.ptr, !cir.ptr> +// CHECK: cir.yield +// CHECK: } while { +// CHECK: %[[VAL_8:.*]] = cir.load %[[VAL_1]] : !cir.ptr>, !cir.ptr +// CHECK: %[[VAL_9:.*]] = cir.cmp(ne, %[[VAL_8]], %[[VAL_7]]) : !cir.ptr, !cir.bool +// CHECK: cir.condition(%[[VAL_9]]) +// CHECK: } \ No newline at end of file From ff2dc06fb8ad5eb9d801edbca6c0b0836469a128 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 20 Sep 2024 14:04:14 -0400 Subject: [PATCH 1871/2301] [CIR][CIRGen] Correct isSized predicate for vector type (#869) As title, if element type of vector type is sized, then the vector type should be deemed sized. This would enable us generate code for neon without triggering assertion --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 9 +++++---- clang/test/CIR/CodeGen/vectype-issized.c | 15 +++++++++++++++ 2 files changed, 20 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/vectype-issized.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 1ba6e9447a09..674fb4d781ba 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -61,11 +61,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { RecordNames["anon"] = 0; // in order to start from the name "anon.0" } - std::string getUniqueAnonRecordName() { - return getUniqueRecordName("anon"); - } + std::string getUniqueAnonRecordName() { return getUniqueRecordName("anon"); } - std::string getUniqueRecordName(const std::string& baseName) { + std::string getUniqueRecordName(const std::string &baseName) { auto it = RecordNames.find(baseName); if (it == RecordNames.end()) { RecordNames[baseName] = 0; @@ -500,6 +498,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::ArrayType, mlir::cir::BoolType, mlir::cir::IntType, mlir::cir::CIRFPTypeInterface>(ty)) return true; + if (mlir::isa(ty)) { + return isSized(mlir::cast(ty).getEltType()); + } assert(0 && "Unimplemented size for type"); return false; } diff --git a/clang/test/CIR/CodeGen/vectype-issized.c b/clang/test/CIR/CodeGen/vectype-issized.c new file mode 100644 index 000000000000..380ed7a13f28 --- /dev/null +++ b/clang/test/CIR/CodeGen/vectype-issized.c @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir -emit-cir -target-feature +neon %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir -emit-llvm -target-feature +neon %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +typedef __attribute__((neon_vector_type(8))) short c; +void d() { c a[8]; } + +// CIR-LABEL: d +// CIR: {{%.*}} = cir.alloca !cir.array x 8>, +// CIR-SAME: !cir.ptr x 8>>, ["a"] +// CIR-SAME: {alignment = 16 : i64} + +// LLVM-LABEL: d +// LLVM: {{%.*}} = alloca [8 x <8 x i16>], i64 1, align 16 From 59174c9c66f1f214efb1ac76e912d6bafc0128d2 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 20 Sep 2024 14:07:20 -0400 Subject: [PATCH 1872/2301] [CIR][CIRGen][Builtin][Neon] Lower builtin_neon_vrnda_v and builtin_neon_vrndaq_v (#871) as title. This also added NeonType support for Float32 Co-authored-by: Guojin He --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 11 ++-- .../CIR/CodeGen/arm-neon-directed-rounding.c | 60 +++++++++++++++++++ 2 files changed, 66 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 493da9220dcb..dbe25e8a6a85 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1390,7 +1390,9 @@ static mlir::Type GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags, // so we use v16i8 to represent poly128 and get pattern matched. llvm_unreachable("NYI"); case NeonTypeFlags::Float32: - llvm_unreachable("NYI"); + return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), + CGF->getCIRGenModule().FloatTy, + V1Ty ? 1 : (2 << IsQuad)); case NeonTypeFlags::Float64: llvm_unreachable("NYI"); } @@ -1616,9 +1618,6 @@ mlir::Value buildNeonCall(unsigned int builtinID, CIRGenFunction &cgf, if (shift > 0) llvm_unreachable("Argument shift NYI"); - if (builtinID != clang::NEON::BI__builtin_neon_vrndns_f32) - llvm_unreachable("NYT"); - CIRGenBuilderTy &builder = cgf.getBuilder(); for (unsigned j = 0; j < argTypes.size(); ++j) { if (isConstrainedFPIntrinsic) { @@ -2416,7 +2415,9 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vrnda_v: case NEON::BI__builtin_neon_vrndaq_v: { - llvm_unreachable("NYI"); + assert(!MissingFeatures::buildConstrainedFPCall()); + return buildNeonCall(BuiltinID, *this, {Ty}, Ops, "llvm.round", Ty, + getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndih_f16: { llvm_unreachable("NYI"); diff --git a/clang/test/CIR/CodeGen/arm-neon-directed-rounding.c b/clang/test/CIR/CodeGen/arm-neon-directed-rounding.c index 5487919f8050..92b4a9298eac 100644 --- a/clang/test/CIR/CodeGen/arm-neon-directed-rounding.c +++ b/clang/test/CIR/CodeGen/arm-neon-directed-rounding.c @@ -38,3 +38,63 @@ float32_t test_vrndns_f32(float32_t a) { // LLVM: store float [[RES_COPY1]], ptr [[RET_P:%.*]], align 4, // LLVM: [[RET_VAL:%.*]] = load float, ptr [[RET_P]], align 4, // LLVM: ret float [[RET_VAL]] + +float32x2_t test_vrnda_f32(float32x2_t a) { + return vrnda_f32(a); +} + +// CIR: cir.func internal private @vrnda_f32(%arg0: !cir.vector +// CIR: cir.store %arg0, [[ARG_SAVE:%.*]] : !cir.vector, !cir.ptr> +// CIR: [[INTRIN_ARG:%.*]] = cir.load [[ARG_SAVE]] : !cir.ptr>, !cir.vector +// CIR: [[INTRIN_ARG_CAST:%.*]] = cir.cast(bitcast, [[INTRIN_ARG]] : !cir.vector), !cir.vector +// CIR: [[INTRIN_ARG_BACK:%.*]] = cir.cast(bitcast, [[INTRIN_ARG_CAST]] : !cir.vector), !cir.vector +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.round" [[INTRIN_ARG_BACK]] : (!cir.vector) -> !cir.vector +// CIR: cir.return {{%.*}} : !cir.vector + +// CIR-LABEL: test_vrnda_f32 +// CIR: cir.store %arg0, [[ARG_SAVE0:%.*]] : !cir.vector, !cir.ptr> +// CIR: [[FUNC_ARG:%.*]] = cir.load [[ARG_SAVE]] : !cir.ptr>, !cir.vector +// CIR: [[FUNC_RES:%.*]] = cir.call @vrnda_f32([[FUNC_ARG]]) : (!cir.vector) -> !cir.vector +// CIR: cir.store [[FUNC_RES]], [[RET_P:%.*]] : !cir.vector, !cir.ptr> +// CIR: [[RET_VAL:%.*]] = cir.load [[RET_P]] : !cir.ptr>, !cir.vector +// CIR: cir.return [[RET_VAL]] : !cir.vector + +// LLVM: define dso_local <2 x float> @test_vrnda_f32(<2 x float> [[ARG:%.*]]) +// LLVM: store <2 x float> [[ARG]], ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: [[P0:%.*]] = load <2 x float>, ptr [[ARG_SAVE]], align 8, +// LLVM: store <2 x float> [[P0]], ptr [[P0_SAVE:%.*]], align 8, +// LLVM: [[INTRIN_ARG:%.*]] = load <2 x float>, ptr [[P0_SAVE]], align 8, +// LLVM: [[INTRIN_RES:%.*]] = call <2 x float> @llvm.round.v2f32(<2 x float> [[INTRIN_ARG]]) +// LLVM: store <2 x float> [[INTRIN_RES]], ptr [[RES_SAVE0:%.*]], align 8, +// LLVM: [[RES_COPY0:%.*]] = load <2 x float>, ptr [[RES_SAVE0]], align 8, +// LLVM: store <2 x float> [[RES_COPY0]], ptr [[RES_SAVE1:%.*]], align 8, +// LLVM: [[RES_COPY1:%.*]] = load <2 x float>, ptr [[RES_SAVE1]], align 8, +// LLVM: store <2 x float> [[RES_COPY1]], ptr [[RET_P:%.*]], align 8, +// LLVM: [[RET_VAL:%.*]] = load <2 x float>, ptr [[RET_P]], align 8, +// LLVM: ret <2 x float> [[RET_VAL]] + +float32x4_t test_vrndaq_f32(float32x4_t a) { + return vrndaq_f32(a); +} + +// CIR: cir.func internal private @vrndaq_f32(%arg0: !cir.vector +// CIR: cir.store %arg0, [[ARG_SAVE:%.*]] : !cir.vector, !cir.ptr> +// CIR: [[INTRIN_ARG:%.*]] = cir.load [[ARG_SAVE]] : !cir.ptr>, !cir.vector +// CIR: [[INTRIN_ARG_CAST:%.*]] = cir.cast(bitcast, [[INTRIN_ARG]] : !cir.vector), !cir.vector +// CIR: [[INTRIN_ARG_BACK:%.*]] = cir.cast(bitcast, [[INTRIN_ARG_CAST]] : !cir.vector), !cir.vector +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.round" [[INTRIN_ARG_BACK]] : (!cir.vector) -> !cir.vector +// CIR: cir.return {{%.*}} : !cir.vector + +// LLVM: define dso_local <4 x float> @test_vrndaq_f32(<4 x float> [[ARG:%.*]]) +// LLVM: store <4 x float> [[ARG]], ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: [[P0:%.*]] = load <4 x float>, ptr [[ARG_SAVE]], align 16, +// LLVM: store <4 x float> [[P0]], ptr [[P0_SAVE:%.*]], align 16, +// LLVM: [[INTRIN_ARG:%.*]] = load <4 x float>, ptr [[P0_SAVE]], align 16, +// LLVM: [[INTRIN_RES:%.*]] = call <4 x float> @llvm.round.v4f32(<4 x float> [[INTRIN_ARG]]) +// LLVM: store <4 x float> [[INTRIN_RES]], ptr [[RES_SAVE0:%.*]], align 16, +// LLVM: [[RES_COPY0:%.*]] = load <4 x float>, ptr [[RES_SAVE0]], align 16, +// LLVM: store <4 x float> [[RES_COPY0]], ptr [[RES_SAVE1:%.*]], align 16, +// LLVM: [[RES_COPY1:%.*]] = load <4 x float>, ptr [[RES_SAVE1]], align 16, +// LLVM: store <4 x float> [[RES_COPY1]], ptr [[RET_P:%.*]], align 16, +// LLVM: [[RET_VAL:%.*]] = load <4 x float>, ptr [[RET_P]], align 16, +// LLVM: ret <4 x float> [[RET_VAL]] From a43bd2f1ae71cd5c0ea76f3ea2cf2115c2422db4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 Sep 2024 11:58:03 -0700 Subject: [PATCH 1873/2301] [CIR][CIRGen] Handle VisitCXXRewrittenBinaryOperator for scalars --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/test/CIR/CodeGen/binop.cpp | 24 +++++++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 4116925c448a..d86a40f506ff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -768,7 +768,7 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { - llvm_unreachable("NYI"); + return Visit(E->getSemanticForm()); } // Other Operators. diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index dcfdf7704045..ce68d5a4e9b3 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -O1 -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -O1 -Wno-unused-value -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s void b0(int a, int b) { @@ -94,3 +94,25 @@ void testFloatingPointBinOps(float a, float b) { a - b; // CHECK: cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.float } + +struct S {}; + +struct HasOpEq +{ + bool operator==(const S& other); +}; + +void rewritten_binop() +{ + HasOpEq s1; + S s2; + if (s1 != s2) + return; +} + +// CHECK-LABEL: _Z15rewritten_binopv +// CHECK: cir.scope { +// CHECK: cir.call @_ZN7HasOpEqeqERK1S +// CHECK: %[[COND:.*]] = cir.unary(not +// CHECK: cir.if %[[COND]] +// CHECK: cir.return \ No newline at end of file From c8d3e1709027aa2575f638bbd62e56277d9fd032 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 Sep 2024 12:05:36 -0700 Subject: [PATCH 1874/2301] [CIR][CIRGen][NFC] Cleanups: add skeleton for DominatingValue::saved_type::save --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index a147f8d119f4..cc0df10772fa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -57,7 +57,15 @@ Address CIRGenFunction::createCleanupActiveFlag() { llvm_unreachable("NYI"); } DominatingValue::saved_type DominatingValue::saved_type::save(CIRGenFunction &CGF, RValue rv) { - llvm_unreachable("NYI"); + if (rv.isScalar()) { + llvm_unreachable("scalar NYI"); + } + + if (rv.isComplex()) { + llvm_unreachable("complex NYI"); + } + + llvm_unreachable("aggregate NYI"); } /// Deactive a cleanup that was created in an active state. From f25fd5c6152fb2d72c2ecc73ad6fcfa0647982ed Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 20 Sep 2024 19:39:21 -0400 Subject: [PATCH 1875/2301] [CIR][Infra] Run check-clang-cir against any branch based PR (#873) --- .github/workflows/clang-cir-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/clang-cir-tests.yml b/.github/workflows/clang-cir-tests.yml index c38e952d1f02..3f42b141c4ab 100644 --- a/.github/workflows/clang-cir-tests.yml +++ b/.github/workflows/clang-cir-tests.yml @@ -15,7 +15,7 @@ on: - '!llvm/**' pull_request: branches: - - 'main' + - '**' paths: - 'clang/**' - '.github/workflows/clang-cir-tests.yml' From ff392d89d64c2da87324331718d77b383cef7c3f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 Sep 2024 17:46:49 -0700 Subject: [PATCH 1876/2301] [CIR][CIRGen][NFC] Cleanups: add more skeleton to pushFullExprCleanup It will hit another assert when calling initFullExprCleanup. --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 38 ++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 0e284477cfa7..86f2d8300780 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1909,6 +1909,11 @@ class CIRGenFunction : public CIRGenTypeCache { // we know if a temporary should be destroyed conditionally. ConditionalEvaluation *OutermostConditional = nullptr; + template + typename DominatingValue::saved_type saveValueInCond(T value) { + return DominatingValue::save(*this, value); + } + /// Push a cleanup to be run at the end of the current full-expression. Safe /// against the possibility that we're currently inside a /// conditionally-evaluated expression. @@ -1919,14 +1924,13 @@ class CIRGenFunction : public CIRGenTypeCache { if (!isInConditionalBranch()) return EHStack.pushCleanup(kind, A...); - llvm_unreachable("NYI"); // Stash values in a tuple so we can guarantee the order of saves. - // typedef std::tuple::saved_type...> - // SavedTuple; SavedTuple Saved{saveValueInCond(A)...}; + typedef std::tuple::saved_type...> SavedTuple; + SavedTuple Saved{saveValueInCond(A)...}; - // typedef EHScopeStack::ConditionalCleanup CleanupType; - // EHStack.pushCleanupTuple(kind, Saved); - // initFullExprCleanup(); + typedef EHScopeStack::ConditionalCleanup CleanupType; + EHStack.pushCleanupTuple(kind, Saved); + initFullExprCleanup(); } /// Set up the last cleanup that was pushed as a conditional @@ -2286,6 +2290,28 @@ class CIRGenFunction : public CIRGenTypeCache { QualType getVarArgType(const Expr *Arg); }; +/// Helper class with most of the code for saving a value for a +/// conditional expression cleanup. +struct DominatingCIRValue { + typedef llvm::PointerIntPair saved_type; + + /// Answer whether the given value needs extra work to be saved. + static bool needsSaving(mlir::Value value) { llvm_unreachable("NYI"); } + + static saved_type save(CIRGenFunction &CGF, mlir::Value value); + static mlir::Value restore(CIRGenFunction &CGF, saved_type value); +}; + +inline DominatingCIRValue::saved_type +DominatingCIRValue::save(CIRGenFunction &CGF, mlir::Value value) { + llvm_unreachable("NYI"); +} + +inline mlir::Value DominatingCIRValue::restore(CIRGenFunction &CGF, + saved_type value) { + llvm_unreachable("NYI"); +} + /// A specialization of DominatingValue for RValue. template <> struct DominatingValue { typedef RValue type; From 02c5b251c0cc262baa5aff91158e088feae1a267 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Mon, 23 Sep 2024 20:17:12 +0300 Subject: [PATCH 1877/2301] [CIR][CodeGen][BugFix] don't place alloca before the label (#875) This PR fixes the case, when a temporary var is used, and `alloca` operation is inserted in the block start before the `label` operation. Implementation: when we search for the `alloca` place in a block, we take label operations into account as well. Fix https://github.com/llvm/clangir/issues/870 --------- Co-authored-by: Bruno Cardoso Lopes --- .../clang/CIR/Dialect/Builder/CIRBaseBuilder.h | 8 ++++---- clang/test/CIR/CodeGen/goto.cpp | 18 +++++++++++++++++- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index ed7d13588ec2..bd4c60bb1a61 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -538,14 +538,14 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { // ---------------------- // OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block) { - auto lastAlloca = + auto last = std::find_if(block->rbegin(), block->rend(), [](mlir::Operation &op) { - return mlir::isa(&op); + return mlir::isa(&op); }); - if (lastAlloca != block->rend()) + if (last != block->rend()) return OpBuilder::InsertPoint(block, - ++mlir::Block::iterator(&*lastAlloca)); + ++mlir::Block::iterator(&*last)); return OpBuilder::InsertPoint(block, block->begin()); }; diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 06870feba910..5a8d598d95cd 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -271,4 +271,20 @@ void flatLoopWithNoTerminatorInFront(int* ptr) { // CHECK: ^bb[[#RETURN]]: // CHECK: cir.return // CHECK: } -// CHECK:} \ No newline at end of file +// CHECK:} + +struct S {}; +struct S get(); +void bar(struct S); + +void foo() { + { + label: + bar(get()); + } +} + +// NOFLAT: cir.func @_Z3foov() +// NOFLAT: cir.scope { +// NOFLAT: cir.label "label" +// NOFLAT: %0 = cir.alloca !ty_S, !cir.ptr, ["agg.tmp0"] From 2e2878f5cada5a0b03cd478a69ed7fc652063602 Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Mon, 23 Sep 2024 13:22:47 -0700 Subject: [PATCH 1878/2301] [CIR][CIRGen] Allow constant evaluation of int annotation (#874) __attribute__((annotate()) was only accepting integer literals, preventing some meta-programming usage for example. This should be extended to some other kinds of types. --------- Co-authored-by: Bruno Cardoso Lopes --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 22 ++++++++++----- .../CodeGen/attribute-annotate-multiple.cpp | 28 +++++++++++++------ 2 files changed, 35 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 0922a29934b9..995541395de5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -33,6 +33,8 @@ #include "mlir/IR/OperationSupport.h" #include "mlir/IR/SymbolTable.h" #include "mlir/IR/Verifier.h" +#include "clang/AST/Expr.h" +#include "clang/Basic/Cuda.h" #include "clang/CIR/MissingFeatures.h" #include "clang/AST/ASTConsumer.h" @@ -3330,16 +3332,22 @@ mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(AnnotateAttr *attr) { llvm::SmallVector args; args.reserve(exprs.size()); for (Expr *e : exprs) { + auto &ce = *cast(e); if (auto *const strE = - ::clang::dyn_cast(e->IgnoreParenCasts())) { + clang::dyn_cast(ce.IgnoreParenCasts())) { // Add trailing null character as StringLiteral->getString() does not args.push_back(builder.getStringAttr(strE->getString())); - } else if (auto *const intE = ::clang::dyn_cast( - e->IgnoreParenCasts())) { - args.push_back(mlir::IntegerAttr::get( - mlir::IntegerType::get(builder.getContext(), - intE->getValue().getBitWidth()), - intE->getValue())); + } else if (ce.hasAPValueResult()) { + // Handle case which can be evaluated to some numbers, not only literals + const auto &ap = ce.getAPValueResult(); + if (ap.isInt()) { + args.push_back(mlir::IntegerAttr::get( + mlir::IntegerType::get(builder.getContext(), + ap.getInt().getBitWidth()), + ap.getInt())); + } else { + llvm_unreachable("NYI like float, fixed-point, array..."); + } } else { llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp b/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp index e67975bb9858..ff970f3919f4 100644 --- a/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp +++ b/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp @@ -5,6 +5,10 @@ double *a __attribute__((annotate("withargs", "21", 12 ))); int *b __attribute__((annotate("withargs", "21", 12 ))); void *c __attribute__((annotate("noargvar"))); + +enum : char { npu1 = 42}; +int tile __attribute__((annotate("cir.aie.device.tile", npu1))) = 7; + void foo(int i) __attribute__((annotate("noargfunc"))) { } // redeclare with more annotate @@ -20,6 +24,8 @@ void bar() __attribute__((annotate("withargfunc", "os", 22))) { // BEFORE-SAME: [#cir.annotation] // BEFORE: cir.global external @c = #cir.ptr : !cir.ptr // BEFORE-SAME: [#cir.annotation] +// BEFORE: cir.global external @tile = #cir.int<7> : !s32i +// BEFORE-SAME: #cir.annotation] // BEFORE: cir.func @_Z3fooi(%arg0: !s32i) [#cir.annotation, // BEFORE-SAME: #cir.annotation] @@ -31,6 +37,7 @@ void bar() __attribute__((annotate("withargfunc", "os", 22))) { // AFTER-SAME: ["a", #cir.annotation], // AFTER-SAME: ["b", #cir.annotation], // AFTER-SAME: ["c", #cir.annotation], +// AFTER-SAME: ["tile", #cir.annotation], // AFTER-SAME: ["_Z3fooi", #cir.annotation], // AFTER-SAME: ["_Z3fooi", #cir.annotation], // AFTER-SAME: ["_Z3barv", #cir.annotation]]>, @@ -39,20 +46,23 @@ void bar() __attribute__((annotate("withargfunc", "os", 22))) { // LLVM: @a = global ptr null // LLVM: @b = global ptr null // LLVM: @c = global ptr null +// LLVM: @tile = global i32 7 // LLVM: @.str.annotation = private unnamed_addr constant [9 x i8] c"withargs\00", section "llvm.metadata" // LLVM: @.str.1.annotation = private unnamed_addr constant [{{[0-9]+}} x i8] c"{{.*}}attribute-annotate-multiple.cpp\00", section "llvm.metadata" // LLVM: @.str.annotation.arg = private unnamed_addr constant [3 x i8] c"21\00", align 1 // LLVM: @.args.annotation = private unnamed_addr constant { ptr, i32 } { ptr @.str.annotation.arg, i32 12 }, section "llvm.metadata" // LLVM: @.str.2.annotation = private unnamed_addr constant [9 x i8] c"noargvar\00", section "llvm.metadata" -// LLVM: @.str.3.annotation = private unnamed_addr constant [10 x i8] c"noargfunc\00", section "llvm.metadata" -// LLVM: @.str.4.annotation = private unnamed_addr constant [12 x i8] c"withargfunc\00", section "llvm.metadata" +// LLVM: @.str.3.annotation = private unnamed_addr constant [20 x i8] c"cir.aie.device.tile\00", section "llvm.metadata" +// LLVM: @.args.1.annotation = private unnamed_addr constant { i8 } { i8 42 }, section "llvm.metadata" +// LLVM: @.str.4.annotation = private unnamed_addr constant [10 x i8] c"noargfunc\00", section "llvm.metadata" +// LLVM: @.str.5.annotation = private unnamed_addr constant [12 x i8] c"withargfunc\00", section "llvm.metadata" // LLVM: @.str.1.annotation.arg = private unnamed_addr constant [3 x i8] c"os\00", align 1 -// LLVM: @.args.1.annotation = private unnamed_addr constant { ptr, i32 } -// LLVM-SAME: { ptr @.str.1.annotation.arg, i32 23 }, section "llvm.metadata" // LLVM: @.args.2.annotation = private unnamed_addr constant { ptr, i32 } +// LLVM-SAME: { ptr @.str.1.annotation.arg, i32 23 }, section "llvm.metadata" +// LLVM: @.args.3.annotation = private unnamed_addr constant { ptr, i32 } // LLVM-SAME: { ptr @.str.1.annotation.arg, i32 22 }, section "llvm.metadata" -// LLVM: @llvm.global.annotations = appending global [6 x { ptr, ptr, ptr, i32, ptr }] +// LLVM: @llvm.global.annotations = appending global [7 x { ptr, ptr, ptr, i32, ptr }] // LLVM-SAME: [{ ptr, ptr, ptr, i32, ptr } // LLVM-SAME: { ptr @a, ptr @.str.annotation, ptr @.str.1.annotation, i32 5, ptr @.args.annotation }, // LLVM-SAME: { ptr, ptr, ptr, i32, ptr } @@ -60,11 +70,13 @@ void bar() __attribute__((annotate("withargfunc", "os", 22))) { // LLVM-SAME: { ptr, ptr, ptr, i32, ptr } // LLVM-SAME: { ptr @c, ptr @.str.2.annotation, ptr @.str.1.annotation, i32 7, ptr null }, // LLVM-SAME: { ptr, ptr, ptr, i32, ptr } -// LLVM-SAME: { ptr @_Z3fooi, ptr @.str.3.annotation, ptr @.str.1.annotation, i32 8, ptr null }, +// LLVM-SAME: { ptr @tile, ptr @.str.3.annotation, ptr @.str.1.annotation, i32 10, ptr @.args.1.annotation }, +// LLVM-SAME: { ptr, ptr, ptr, i32, ptr } +// LLVM-SAME: { ptr @_Z3fooi, ptr @.str.4.annotation, ptr @.str.1.annotation, i32 12, ptr null }, // LLVM-SAME: { ptr, ptr, ptr, i32, ptr } -// LLVM-SAME: { ptr @_Z3fooi, ptr @.str.4.annotation, ptr @.str.1.annotation, i32 8, ptr @.args.1.annotation }, +// LLVM-SAME: { ptr @_Z3fooi, ptr @.str.5.annotation, ptr @.str.1.annotation, i32 12, ptr @.args.2.annotation }, // LLVM-SAME: { ptr, ptr, ptr, i32, ptr } -// LLVM-SAME: { ptr @_Z3barv, ptr @.str.4.annotation, ptr @.str.1.annotation, i32 12, ptr @.args.2.annotation }], +// LLVM-SAME: { ptr @_Z3barv, ptr @.str.5.annotation, ptr @.str.1.annotation, i32 16, ptr @.args.3.annotation }], // LLVM-SAME: section "llvm.metadata" // LLVM: define dso_local void @_Z3fooi(i32 %0) From e4f4b87c2c2a6b8fe94fa67d333565eef4a6c871 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 Sep 2024 18:51:54 -0700 Subject: [PATCH 1879/2301] [CIR][CIRGen] Cleanups: handle conditional cleanups Just as the title says, but only covers non-exception path, that's coming next. --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 55 +++++++++++--- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 47 +++++++++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 23 ++++-- .../test/CIR/CodeGen/conditional-cleanup.cpp | 74 +++++++++++++++++++ clang/test/CIR/CodeGen/new-null.cpp | 2 +- 5 files changed, 172 insertions(+), 29 deletions(-) create mode 100644 clang/test/CIR/CodeGen/conditional-cleanup.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index cc0df10772fa..4737890eef59 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -53,7 +53,31 @@ void CIRGenFunction::buildCXXTemporary(const CXXTemporary *Temporary, /*useEHCleanup*/ true); } -Address CIRGenFunction::createCleanupActiveFlag() { llvm_unreachable("NYI"); } +Address CIRGenFunction::createCleanupActiveFlag() { + mlir::Location loc = currSrcLoc ? *currSrcLoc : builder.getUnknownLoc(); + + // Create a variable to decide whether the cleanup needs to be run. + // FIXME: set the insertion point for the alloca to be at the entry + // basic block of the previous scope, not the entry block of the function. + Address active = CreateTempAllocaWithoutCast( + builder.getBoolTy(), CharUnits::One(), loc, "cleanup.cond"); + mlir::Value falseVal, trueVal; + { + // Place true/false flags close to their allocas. + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointAfterValue(active.getPointer()); + falseVal = builder.getFalse(loc); + trueVal = builder.getTrue(loc); + } + + // Initialize it to false at a site that's guaranteed to be run + // before each evaluation. + setBeforeOutermostConditional(falseVal, active); + + // Initialize it to true at the current location. + builder.createStore(loc, trueVal, active); + return active; +} DominatingValue::saved_type DominatingValue::saved_type::save(CIRGenFunction &CGF, RValue rv) { @@ -129,21 +153,30 @@ static void destroyOptimisticNormalEntry(CIRGenFunction &CGF, static void buildCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, EHScopeStack::Cleanup::Flags flags, Address ActiveFlag) { + auto emitCleanup = [&]() { + // Ask the cleanup to emit itself. + assert(CGF.HaveInsertPoint() && "expected insertion point"); + Fn->Emit(CGF, flags); + assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); + }; + // If there's an active flag, load it and skip the cleanup if it's // false. - if (ActiveFlag.isValid()) { - llvm_unreachable("NYI"); - } - - // Ask the cleanup to emit itself. - assert(CGF.HaveInsertPoint() && "expected insertion point"); - Fn->Emit(CGF, flags); - assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); + cir::CIRGenBuilderTy &builder = CGF.getBuilder(); + mlir::Location loc = + CGF.currSrcLoc ? *CGF.currSrcLoc : builder.getUnknownLoc(); - // Emit the continuation block if there was an active flag. if (ActiveFlag.isValid()) { - llvm_unreachable("NYI"); + mlir::Value isActive = builder.createLoad(loc, ActiveFlag); + builder.create(loc, isActive, false, + [&](mlir::OpBuilder &b, mlir::Location) { + emitCleanup(); + builder.createYield(loc); + }); + } else { + emitCleanup(); } + // No need to emit continuation block because CIR uses a cir.if. } /// Pops a cleanup block. If the block includes a normal cleanup, the diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 63a18c328fa8..7dded241490b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -937,18 +937,16 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { // The null-check means that the initializer is conditionally // evaluated. - ConditionalEvaluation conditional(*this); - mlir::OpBuilder::InsertPoint ifBody, postIfBody; + mlir::OpBuilder::InsertPoint ifBody, postIfBody, preIfBody; + mlir::Value nullCmpResult; mlir::Location loc = getLoc(E->getSourceRange()); if (nullCheck) { - conditional.begin(*this); mlir::Value nullPtr = builder.getNullPtr(allocation.getPointer().getType(), loc); - mlir::Value nullCmpResult = builder.createCompare( - loc, mlir::cir::CmpOpKind::ne, allocation.getPointer(), nullPtr); - - // mlir::Value Failed = CGF.getBuilder().createNot(Success); + nullCmpResult = builder.createCompare(loc, mlir::cir::CmpOpKind::ne, + allocation.getPointer(), nullPtr); + preIfBody = builder.saveInsertionPoint(); builder.create(loc, nullCmpResult, /*withElseRegion=*/false, [&](mlir::OpBuilder &, mlir::Location) { @@ -957,10 +955,21 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { postIfBody = builder.saveInsertionPoint(); } + // Make sure the conditional evaluation uses the insertion + // point right before the if check. + mlir::OpBuilder::InsertPoint ip = builder.saveInsertionPoint(); + if (ifBody.isSet()) { + builder.setInsertionPointAfterValue(nullCmpResult); + ip = builder.saveInsertionPoint(); + } + ConditionalEvaluation conditional(ip); + // All the actual work to be done should be placed inside the IfOp above, // so change the insertion point over there. - if (ifBody.isSet()) + if (ifBody.isSet()) { + conditional.begin(*this); builder.restoreInsertionPoint(ifBody); + } // If there's an operator delete, enter a cleanup to call it if an // exception is thrown. @@ -982,9 +991,25 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { llvm_unreachable("NYI"); } - mlir::Type elementTy = getTypes().convertTypeForMem(allocType); - Address result = builder.createElementBitCast(getLoc(E->getSourceRange()), - allocation, elementTy); + mlir::Type elementTy; + Address result = Address::invalid(); + auto createCast = [&]() { + elementTy = getTypes().convertTypeForMem(allocType); + result = builder.createElementBitCast(getLoc(E->getSourceRange()), + allocation, elementTy); + }; + + if (preIfBody.isSet()) { + // Generate any cast before the if condition check on the null because the + // result can be used after the if body and should dominate all potential + // uses. + mlir::OpBuilder::InsertionGuard guard(builder); + assert(nullCmpResult && "expected"); + builder.setInsertionPointAfterValue(nullCmpResult); + createCast(); + } else { + createCast(); + } // Passing pointer through launder.invariant.group to avoid propagation of // vptrs information which may be included in previous type. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 86f2d8300780..421d90dcbe1e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1844,11 +1844,12 @@ class CIRGenFunction : public CIRGenTypeCache { /// An object to manage conditionally-evaluated expressions. class ConditionalEvaluation { - // llvm::BasicBlock *StartBB; + mlir::OpBuilder::InsertPoint insertPt; public: ConditionalEvaluation(CIRGenFunction &CGF) - /*: StartBB(CGF.Builder.GetInsertBlock())*/ {} + : insertPt(CGF.builder.saveInsertionPoint()) {} + ConditionalEvaluation(mlir::OpBuilder::InsertPoint ip) : insertPt(ip) {} void begin(CIRGenFunction &CGF) { assert(CGF.OutermostConditional != this); @@ -1862,9 +1863,10 @@ class CIRGenFunction : public CIRGenTypeCache { CGF.OutermostConditional = nullptr; } - /// Returns a block which will be executed prior to each - /// evaluation of the conditional code. - // llvm::BasicBlock *getStartingBlock() const { return StartBB; } + /// Returns the insertion point which will be executed prior to each + /// evaluation of the conditional code. In LLVM OG, this method + /// is called getStartingBlock. + mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; } }; struct ConditionalInfo { @@ -1882,7 +1884,16 @@ class CIRGenFunction : public CIRGenTypeCache { void setBeforeOutermostConditional(mlir::Value value, Address addr) { assert(isInConditionalBranch()); - llvm_unreachable("NYI"); + { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.restoreInsertionPoint(OutermostConditional->getInsertPoint()); + builder.createStore( + value.getLoc(), value, addr, + /*volatile*/ false, + mlir::IntegerAttr::get( + mlir::IntegerType::get(value.getContext(), 64), + (uint64_t)addr.getAlignment().getAsAlign().value())); + } } void pushIrregularPartialArrayCleanup(mlir::Value arrayBegin, diff --git a/clang/test/CIR/CodeGen/conditional-cleanup.cpp b/clang/test/CIR/CodeGen/conditional-cleanup.cpp new file mode 100644 index 000000000000..211a2672d1aa --- /dev/null +++ b/clang/test/CIR/CodeGen/conditional-cleanup.cpp @@ -0,0 +1,74 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +typedef __typeof(sizeof(0)) size_t; + +// Declare the reserved global placement new. +void *operator new(size_t, void*); + +namespace test7 { + struct A { A(); ~A(); }; + struct B { + static void *operator new(size_t size) throw(); + B(const A&, B*); + ~B(); + }; + + B *test() { + return new B(A(), new B(A(), 0)); + } +} + +// CIR-DAG: ![[A:.*]] = !cir.struct, !cir.ptr>, ["__retval"] {alignment = 8 : i64} +// CIR: cir.scope { +// CIR: %[[TMP_A0:.*]] = cir.alloca ![[A]], !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} +// CIR: %[[CLEANUP_COND_OUTER:.*]] = cir.alloca !cir.bool, !cir.ptr, ["cleanup.cond"] {alignment = 1 : i64} +// CIR: %[[TMP_A1:.*]] = cir.alloca ![[A]], !cir.ptr, ["ref.tmp1"] {alignment = 1 : i64} +// CIR: %[[CLEANUP_COND_INNER:.*]] = cir.alloca !cir.bool, !cir.ptr, ["cleanup.cond"] {alignment = 1 : i64} +// CIR: %[[FALSE0:.*]] = cir.const #false +// CIR: %[[TRUE0:.*]] = cir.const #true +// CIR: %[[FALSE1:.*]] = cir.const #false +// CIR: %[[TRUE1:.*]] = cir.const #true + +// CIR: %[[NULL_CHECK0:.*]] = cir.cmp(ne +// CIR: %[[PTR_B0:.*]] = cir.cast(bitcast +// CIR: cir.store align(1) %[[FALSE1]], %[[CLEANUP_COND_OUTER]] : !cir.bool, !cir.ptr +// CIR: cir.store align(1) %[[FALSE0]], %[[CLEANUP_COND_INNER]] : !cir.bool, !cir.ptr +// CIR: cir.if %[[NULL_CHECK0]] { + +// Ctor call: @test7::A::A() +// CIR: cir.call @_ZN5test71AC1Ev(%[[TMP_A0]]) : (!cir.ptr) -> () +// CIR: cir.store %[[TRUE1]], %[[CLEANUP_COND_OUTER]] : !cir.bool, !cir.ptr + +// CIR: %[[NULL_CHECK1:.*]] = cir.cmp(ne +// CIR: %[[PTR_B1:.*]] = cir.cast(bitcast +// CIR: cir.if %[[NULL_CHECK1]] { + +// Ctor call: @test7::A::A() +// CIR: cir.call @_ZN5test71AC1Ev(%[[TMP_A1]]) : (!cir.ptr) -> () +// CIR: cir.store %[[TRUE0]], %[[CLEANUP_COND_INNER]] : !cir.bool, !cir.ptr +// Ctor call: @test7::B::B() +// CIR: cir.call @_ZN5test71BC1ERKNS_1AEPS0_(%[[PTR_B1]], %[[TMP_A1]], {{.*}}) : (!cir.ptr, !cir.ptr, !cir.ptr) -> () +// CIR: } + +// Ctor call: @test7::B::B() +// CIR: cir.call @_ZN5test71BC1ERKNS_1AEPS0_(%[[PTR_B0]], %[[TMP_A0]], %[[PTR_B1]]) : (!cir.ptr, !cir.ptr, !cir.ptr) -> () +// CIR: } +// CIR: cir.store %[[PTR_B0]], %[[RET_VAL]] : !cir.ptr, !cir.ptr> +// CIR: %[[DO_CLEANUP_INNER:.*]] = cir.load %[[CLEANUP_COND_INNER]] : !cir.ptr, !cir.bool +// CIR: cir.if %[[DO_CLEANUP_INNER]] { +// Dtor call: @test7::A::~A() +// CIR: cir.call @_ZN5test71AD1Ev(%[[TMP_A1]]) : (!cir.ptr) -> () +// CIR: } +// CIR: %[[DO_CLEANUP_OUTER:.*]] = cir.load %[[CLEANUP_COND_OUTER]] : !cir.ptr, !cir.bool +// Dtor call: @test7::A::~A() +// CIR: cir.if %[[DO_CLEANUP_OUTER]] { +// CIR: cir.call @_ZN5test71AD1Ev(%[[TMP_A0]]) : (!cir.ptr) -> () +// CIR: } +// CIR: } +// CIR: cir.return +// CIR: } \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/new-null.cpp b/clang/test/CIR/CodeGen/new-null.cpp index 001423966b12..e0fdc7484c1b 100644 --- a/clang/test/CIR/CodeGen/new-null.cpp +++ b/clang/test/CIR/CodeGen/new-null.cpp @@ -53,8 +53,8 @@ namespace test15 { // CIR: %[[VAL_5:.*]] = cir.call @_ZnwmPvb(%[[VAL_2]], %[[VAL_3]], %[[VAL_4]]) // CIR: %[[VAL_6:.*]] = cir.const #cir.ptr : !cir.ptr // CIR: %[[VAL_7:.*]] = cir.cmp(ne, %[[VAL_5]], %[[VAL_6]]) : !cir.ptr, !cir.bool + // CIR: %[[VAL_8:.*]] = cir.cast(bitcast, %[[VAL_5]] : !cir.ptr), !cir.ptr // CIR: cir.if %[[VAL_7]] { - // CIR: %[[VAL_8:.*]] = cir.cast(bitcast, %[[VAL_5]] : !cir.ptr), !cir.ptr // CIR: cir.call @_ZN6test151AC1Ev(%[[VAL_8]]) : (!cir.ptr) -> () // CIR: } // CIR: cir.return From 5b7b9dfa5b2217f15d2bc879ce0a25b03208463d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 23 Sep 2024 16:22:35 -0700 Subject: [PATCH 1880/2301] [CIR][CIRGen][NFC] Cleanups: Prepare for conditional cleanup Nothing unblocked yet, just hit next assert in the same path. --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 27 +++++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 27 ++++++++++++++++--------- 2 files changed, 43 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 4737890eef59..64024abd781a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -80,9 +80,12 @@ Address CIRGenFunction::createCleanupActiveFlag() { } DominatingValue::saved_type -DominatingValue::saved_type::save(CIRGenFunction &CGF, RValue rv) { +DominatingValue::saved_type::save(CIRGenFunction &cgf, RValue rv) { if (rv.isScalar()) { - llvm_unreachable("scalar NYI"); + mlir::Value val = rv.getScalarVal(); + return saved_type(DominatingCIRValue::save(cgf, val), + DominatingCIRValue::needsSaving(val) ? ScalarAddress + : ScalarLiteral); } if (rv.isComplex()) { @@ -92,6 +95,26 @@ DominatingValue::saved_type::save(CIRGenFunction &CGF, RValue rv) { llvm_unreachable("aggregate NYI"); } +/// Given a saved r-value produced by SaveRValue, perform the code +/// necessary to restore it to usability at the current insertion +/// point. +RValue DominatingValue::saved_type::restore(CIRGenFunction &CGF) { + switch (K) { + case ScalarLiteral: + case ScalarAddress: + return RValue::get(DominatingCIRValue::restore(CGF, Vals.first)); + case AggregateLiteral: + case AggregateAddress: + return RValue::getAggregate( + DominatingValue
::restore(CGF, AggregateAddr)); + case ComplexAddress: { + llvm_unreachable("NYI"); + } + } + + llvm_unreachable("bad saved r-value kind"); +} + /// Deactive a cleanup that was created in an active state. void CIRGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, mlir::Operation *dominatingIP) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 421d90dcbe1e..7b3542241709 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -2334,20 +2334,29 @@ template <> struct DominatingValue { AggregateAddress, ComplexAddress }; - - llvm::Value *Value; - llvm::Type *ElementType; + union { + struct { + DominatingCIRValue::saved_type first, second; + } Vals; + DominatingValue
::saved_type AggregateAddr; + }; + LLVM_PREFERRED_TYPE(Kind) unsigned K : 3; - unsigned Align : 29; - saved_type(llvm::Value *v, llvm::Type *e, Kind k, unsigned a = 0) - : Value(v), ElementType(e), K(k), Align(a) {} + + saved_type(DominatingCIRValue::saved_type Val1, unsigned K) + : Vals{Val1, DominatingCIRValue::saved_type()}, K(K) {} + + saved_type(DominatingCIRValue::saved_type Val1, + DominatingCIRValue::saved_type Val2) + : Vals{Val1, Val2}, K(ComplexAddress) {} + + saved_type(DominatingValue
::saved_type AggregateAddr, unsigned K) + : AggregateAddr(AggregateAddr), K(K) {} public: static bool needsSaving(RValue value); static saved_type save(CIRGenFunction &CGF, RValue value); - RValue restore(CIRGenFunction &CGF) { llvm_unreachable("NYI"); } - - // implementations in CGCleanup.cpp + RValue restore(CIRGenFunction &CGF); }; static bool needsSaving(type value) { return saved_type::needsSaving(value); } From 64d9fbc4618c2f5f88c92fd25c79bf72c790a9f8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 23 Sep 2024 16:34:39 -0700 Subject: [PATCH 1881/2301] [CIR][CIRGen][NFC] Cleanups: more boilerplate work for conditional on exceptions Code path still hits an assert sooner, incremental NFC step. --- .../clang/CIR/Dialect/IR/CIRDataLayout.h | 4 + clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 73 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 46 +++++++++++- 3 files changed, 120 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h index 2809e4ed55eb..3d6379dcd23d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h @@ -62,6 +62,10 @@ class CIRDataLayout { return getAlignment(ty, true); } + llvm::Align getPrefTypeAlign(mlir::Type Ty) const { + return getAlignment(Ty, false); + } + /// Returns the maximum number of bytes that may be overwritten by /// storing the specified type. /// diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 64024abd781a..f6de23ca89b8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -115,6 +115,75 @@ RValue DominatingValue::saved_type::restore(CIRGenFunction &CGF) { llvm_unreachable("bad saved r-value kind"); } +static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, + EHScopeStack::stable_iterator cleanup) { + // If we needed an EH block for any reason, that counts. + if (EHStack.find(cleanup)->hasEHBranches()) + return true; + + // Check whether any enclosed cleanups were needed. + for (EHScopeStack::stable_iterator i = EHStack.getInnermostEHScope(); + i != cleanup;) { + assert(cleanup.strictlyEncloses(i)); + + EHScope &scope = *EHStack.find(i); + if (scope.hasEHBranches()) + return true; + + i = scope.getEnclosingEHScope(); + } + + return false; +} + +enum ForActivation_t { ForActivation, ForDeactivation }; + +/// The given cleanup block is changing activation state. Configure a +/// cleanup variable if necessary. +/// +/// It would be good if we had some way of determining if there were +/// extra uses *after* the change-over point. +static void setupCleanupBlockActivation(CIRGenFunction &CGF, + EHScopeStack::stable_iterator C, + ForActivation_t kind, + mlir::Operation *dominatingIP) { + EHCleanupScope &Scope = cast(*CGF.EHStack.find(C)); + + // We always need the flag if we're activating the cleanup in a + // conditional context, because we have to assume that the current + // location doesn't necessarily dominate the cleanup's code. + bool isActivatedInConditional = + (kind == ForActivation && CGF.isInConditionalBranch()); + + bool needFlag = false; + + // Calculate whether the cleanup was used: + + // - as a normal cleanup + if (Scope.isNormalCleanup()) { + Scope.setTestFlagInNormalCleanup(); + needFlag = true; + } + + // - as an EH cleanup + if (Scope.isEHCleanup() && + (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) { + Scope.setTestFlagInEHCleanup(); + needFlag = true; + } + + // If it hasn't yet been used as either, we're done. + if (!needFlag) + return; + + Address var = Scope.getActiveFlag(); + if (!var.isValid()) { + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); +} + /// Deactive a cleanup that was created in an active state. void CIRGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, mlir::Operation *dominatingIP) { @@ -143,7 +212,9 @@ void CIRGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, return; } - llvm_unreachable("NYI"); + // Otherwise, follow the general case. + setupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP); + Scope.setActive(false); } void CIRGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 7b3542241709..9a2f269e106a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -2307,7 +2307,38 @@ struct DominatingCIRValue { typedef llvm::PointerIntPair saved_type; /// Answer whether the given value needs extra work to be saved. - static bool needsSaving(mlir::Value value) { llvm_unreachable("NYI"); } + static bool needsSaving(mlir::Value value) { + if (!value) + return false; + + // If it's a block argument, we don't need to save. + mlir::Operation *definingOp = value.getDefiningOp(); + if (!definingOp) + return false; + + // If value is defined the function or a global init entry block, we don't + // need to save. + mlir::Block *currBlock = definingOp->getBlock(); + if (!currBlock->isEntryBlock() || !definingOp->getParentOp()) + return false; + + if (auto fnOp = definingOp->getParentOfType()) { + if (&fnOp.getBody().front() == currBlock) + return true; + return false; + } + + if (auto globalOp = definingOp->getParentOfType()) { + assert(globalOp.getNumRegions() == 2 && "other regions NYI"); + if (&globalOp.getCtorRegion().front() == currBlock) + return true; + if (&globalOp.getDtorRegion().front() == currBlock) + return true; + return false; + } + + return false; + } static saved_type save(CIRGenFunction &CGF, mlir::Value value); static mlir::Value restore(CIRGenFunction &CGF, saved_type value); @@ -2315,7 +2346,18 @@ struct DominatingCIRValue { inline DominatingCIRValue::saved_type DominatingCIRValue::save(CIRGenFunction &CGF, mlir::Value value) { - llvm_unreachable("NYI"); + if (!needsSaving(value)) + return saved_type(value, false); + + // Otherwise, we need an alloca. + auto align = CharUnits::fromQuantity( + CGF.CGM.getDataLayout().getPrefTypeAlign(value.getType())); + mlir::Location loc = value.getLoc(); + Address alloca = + CGF.CreateTempAlloca(value.getType(), align, loc, "cond-cleanup.save"); + CGF.getBuilder().createStore(loc, value, alloca); + + return saved_type(alloca.emitRawPointer(), true); } inline mlir::Value DominatingCIRValue::restore(CIRGenFunction &CGF, From ec5f296f18d4751044fcd082ef98766b83c64ba6 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Wed, 25 Sep 2024 01:47:05 +0800 Subject: [PATCH 1882/2301] [CIR][CodeGen] Handling multiple stmt followed after a switch case (#878) Close https://github.com/llvm/clangir/issues/876 We've already considered the case that there are random stmt after a switch case: ``` for (auto *c : compoundStmt->body()) { if (auto *switchCase = dyn_cast(c)) { res = buildSwitchCase(*switchCase, condType, caseAttrs); } else if (lastCaseBlock) { // This means it's a random stmt following up a case, just // emit it as part of previous known case. mlir::OpBuilder::InsertionGuard guardCase(builder); builder.setInsertionPointToEnd(lastCaseBlock); res = buildStmt(c, /*useCurrentScope=*/!isa(c)); } else { llvm_unreachable("statement doesn't belong to any case region, NYI"); } lastCaseBlock = builder.getBlock(); if (res.failed()) break; } ``` However, maybe this is an oversight, in the branch of ` if (lastCaseBlock)`, the insertion point will be updated automatically when the RAII object `guardCase` destroys, then we can assign the correct value for `lastCaseBlock` later. So we will see the weird code pattern in the issue side. BTW, I found the codes in CIRGenStmt.cpp are far more less similar with the ones other code gen places. Is this intentional? And what is the motivation and guide lines here? --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 4 ++-- clang/test/CIR/CodeGen/goto.cpp | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 543b744cc5d0..426da35b5238 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -996,18 +996,18 @@ mlir::LogicalResult CIRGenFunction::buildSwitchBody( for (auto *c : compoundStmt->body()) { if (auto *switchCase = dyn_cast(c)) { res = buildSwitchCase(*switchCase, condType, caseAttrs); + lastCaseBlock = builder.getBlock(); } else if (lastCaseBlock) { // This means it's a random stmt following up a case, just // emit it as part of previous known case. mlir::OpBuilder::InsertionGuard guardCase(builder); builder.setInsertionPointToEnd(lastCaseBlock); res = buildStmt(c, /*useCurrentScope=*/!isa(c)); + lastCaseBlock = builder.getBlock(); } else { llvm_unreachable("statement doesn't belong to any case region, NYI"); } - lastCaseBlock = builder.getBlock(); - if (res.failed()) break; } diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 5a8d598d95cd..81eb4ec43e65 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -288,3 +288,25 @@ void foo() { // NOFLAT: cir.scope { // NOFLAT: cir.label "label" // NOFLAT: %0 = cir.alloca !ty_S, !cir.ptr, ["agg.tmp0"] + +extern "C" void action1(); +extern "C" void action2(); +extern "C" void multiple_non_case(int v) { + switch (v) { + default: + action1(); + l2: + action2(); + break; + } +} + +// NOFLAT: cir.func @multiple_non_case +// NOFLAT: cir.switch +// NOFLAT: case (default) +// NOFLAT: cir.call @action1() +// NOFLAT: cir.br ^[[BB1:[a-zA-Z0-9]+]] +// NOFLAT: ^[[BB1]]: +// NOFLAT: cir.label +// NOFLAT: cir.call @action2() +// NOFLAT: cir.break From 70ec0aacc607b006c99fe10406988a27284b9970 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Tue, 24 Sep 2024 13:50:39 -0400 Subject: [PATCH 1883/2301] [CIR][CIRGen] Generate CIR for empty compound literal (#880) as title. --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 6 +++--- .../test/CIR/CodeGen/compound-literal-empty.c | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/compound-literal-empty.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index d86a40f506ff..1acc5a41b29a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -340,8 +340,8 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitMemberExpr(MemberExpr *E); - mlir::Value VisitCompoundLiteralEpxr(CompoundLiteralExpr *E) { - llvm_unreachable("NYI"); + mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { + return buildLoadOfLValue(E); } mlir::Value VisitInitListExpr(InitListExpr *E); @@ -1930,7 +1930,7 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { if (NumInitElements == 0) { // C++11 value-initialization for the scalar. - llvm_unreachable("NYI"); + return buildNullValue(E->getType(), CGF.getLoc(E->getExprLoc())); } return Visit(E->getInit(0)); diff --git a/clang/test/CIR/CodeGen/compound-literal-empty.c b/clang/test/CIR/CodeGen/compound-literal-empty.c new file mode 100644 index 000000000000..b0007d96b4cb --- /dev/null +++ b/clang/test/CIR/CodeGen/compound-literal-empty.c @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +short b() { return (short){}; } + +// CIR-LABEL: b +// CIR: {{%.*}} = cir.alloca !s16i, !cir.ptr, [".compoundliteral"] {alignment = 2 : i64} + +// LLVM-LABEL: b +// LLVM: [[RET_P:%.*]] = alloca i16, i64 1, align 2 +// LLVM: [[LITERAL:%.*]] = alloca i16, i64 1, align 2 +// LLVM: store i16 0, ptr [[LITERAL]], align 2 +// LLVM: [[T0:%.*]] = load i16, ptr [[LITERAL]], align 2 +// LLVM: store i16 [[T0]], ptr [[RET_P]], align 2 +// LLVM: [[T1:%.*]] = load i16, ptr [[RET_P]], align 2 +// LLVM: ret i16 [[T1]] From fe93ca31d204df9be6687286050370b0101e2181 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Tue, 24 Sep 2024 17:52:06 -0400 Subject: [PATCH 1884/2301] [CIR][CIRGen] Generate CIR for vset_lane and vsetq_lane intrinsics (#882) As title. Notice that for those intrinsics, just like OG, we do not lower to llvm intrinsics, instead, do vector insert. The test case is partially from OG [aarch64-neon-vget.c](https://github.com/llvm/clangir/blob/85bc6407f559221afebe08a60ed2b50bf1edf7fa/clang/test/CodeGen/aarch64-neon-vget.c) But, I did not do all signed and unsigned int tests because unsigned and signed of the same width essentially just use the same intrinsic ID thus exactly same code path as far as this PR concerns. --------- Co-authored-by: Guojin He --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 10 +- clang/test/CIR/CodeGen/aarch64-neon-vset.c | 238 ++++++++++++++++++ 2 files changed, 246 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/aarch64-neon-vset.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index dbe25e8a6a85..cc715550c4cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2165,14 +2165,20 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vset_lane_i16: case NEON::BI__builtin_neon_vset_lane_i32: case NEON::BI__builtin_neon_vset_lane_i64: - case NEON::BI__builtin_neon_vset_lane_bf16: case NEON::BI__builtin_neon_vset_lane_f32: case NEON::BI__builtin_neon_vsetq_lane_i8: case NEON::BI__builtin_neon_vsetq_lane_i16: case NEON::BI__builtin_neon_vsetq_lane_i32: case NEON::BI__builtin_neon_vsetq_lane_i64: - case NEON::BI__builtin_neon_vsetq_lane_bf16: case NEON::BI__builtin_neon_vsetq_lane_f32: + Ops.push_back(buildScalarExpr(E->getArg(2))); + return builder.create(getLoc(E->getExprLoc()), + Ops[1], Ops[0], Ops[2]); + case NEON::BI__builtin_neon_vset_lane_bf16: + case NEON::BI__builtin_neon_vsetq_lane_bf16: + // No support for now as no real/test case for them + // at the moment, the implementation should be the same as above + // vset_lane or vsetq_lane intrinsics llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vset_lane_f64: // The vector type needs a cast for the v1f64 variant. diff --git a/clang/test/CIR/CodeGen/aarch64-neon-vset.c b/clang/test/CIR/CodeGen/aarch64-neon-vset.c new file mode 100644 index 000000000000..5da779ff69eb --- /dev/null +++ b/clang/test/CIR/CodeGen/aarch64-neon-vset.c @@ -0,0 +1,238 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -emit-cir -target-feature +neon %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -emit-llvm -target-feature +neon %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// This test file is similar to but not the same as +// clang/test/CodeGen/aarch64-neon-vget.c +// The difference is that this file only tests uses vset intrinsics, as we feel +// it would be proper to have a separate test file testing vget intrinsics +// with the file name aarch64-neon-vget.c +// Also, for each integer type, we only test signed or unsigned, not both. +// This is because integer types of the same size just use same intrinsic. + +// REQUIRES: aarch64-registered-target || arm-registered-target +#include + +uint8x8_t test_vset_lane_u8(uint8_t a, uint8x8_t b) { + return vset_lane_u8(a, b, 7); +} + +// CIR-LABEL: test_vset_lane_u8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i loc(#loc7) +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local <8 x i8> @test_vset_lane_u8(i8 [[A:%.*]], <8 x i8> [[B:%.*]]) +// LLVM: [[A_ADR:%.*]] = alloca i8, i64 1, align 1 +// LLVM: [[B_ADR:%.*]] = alloca <8 x i8>, i64 1, align 8 +// LLVM: store i8 [[A]], ptr [[A_ADR]], align 1 +// LLVM: store <8 x i8> [[B]], ptr [[B_ADR]], align 8 +// LLVM: [[TMP_A0:%.*]] = load i8, ptr [[A_ADR]], align 1 +// LLVM: store i8 [[TMP_A0]], ptr [[S0:%.*]], align 1 +// LLVM: [[TMP_B0:%.*]] = load <8 x i8>, ptr [[B_ADR]], align 8 +// LLVM: store <8 x i8> [[TMP_B0]], ptr [[S1:%.*]], align 8 +// LLVM: [[INTRN_ARG0:%.*]] = load i8, ptr [[S0]], align 1 +// LLVM: [[INTRN_ARG1:%.*]] = load <8 x i8>, ptr [[S1]], align 8 +// LLVM: [[INTRN_RES:%.*]] = insertelement <8 x i8> [[INTRN_ARG1]], i8 [[INTRN_ARG0]], i32 7 +// LLVM: ret <8 x i8> {{%.*}} + +uint16x4_t test_vset_lane_u16(uint16_t a, uint16x4_t b) { + return vset_lane_u16(a, b, 3); +} + +// CIR-LABEL: test_vset_lane_u16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local <4 x i16> @test_vset_lane_u16(i16 [[A:%.*]], <4 x i16> [[B:%.*]]) +// LLVM: [[A_ADR:%.*]] = alloca i16, i64 1, align 2 +// LLVM: [[B_ADR:%.*]] = alloca <4 x i16>, i64 1, align 8 +// LLVM: store i16 [[A]], ptr [[A_ADR]], align 2 +// LLVM: store <4 x i16> [[B]], ptr [[B_ADR]], align 8 +// LLVM: [[TMP_A0:%.*]] = load i16, ptr [[A_ADR]], align 2 +// LLVM: store i16 [[TMP_A0]], ptr [[S0:%.*]], align 2 +// LLVM: [[TMP_B0:%.*]] = load <4 x i16>, ptr [[B_ADR]], align 8 +// LLVM: store <4 x i16> [[TMP_B0]], ptr [[S1:%.*]], align 8 +// LLVM: [[INTRN_ARG0:%.*]] = load i16, ptr [[S0]], align 2 +// LLVM: [[INTRN_ARG1:%.*]] = load <4 x i16>, ptr [[S1]], align 8 +// LLVM: [[INTRN_RES:%.*]] = insertelement <4 x i16> [[INTRN_ARG1]], i16 [[INTRN_ARG0]], i32 3 +// LLVM: ret <4 x i16> {{%.*}} + +uint32x2_t test_vset_lane_u32(uint32_t a, uint32x2_t b) { + return vset_lane_u32(a, b, 1); +} + +// CIR-LABEL: test_vset_lane_u32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local <2 x i32> @test_vset_lane_u32(i32 [[A:%.*]], <2 x i32> [[B:%.*]]) +// LLVM: [[A_ADR:%.*]] = alloca i32, i64 1, align 4 +// LLVM: [[B_ADR:%.*]] = alloca <2 x i32>, i64 1, align 8 +// LLVM: store i32 [[A]], ptr [[A_ADR]], align 4 +// LLVM: store <2 x i32> [[B]], ptr [[B_ADR]], align 8 +// LLVM: [[TMP_A0:%.*]] = load i32, ptr [[A_ADR]], align 4 +// LLVM: store i32 [[TMP_A0]], ptr [[S0:%.*]], align 4 +// LLVM: [[TMP_B0:%.*]] = load <2 x i32>, ptr [[B_ADR]], align 8 +// LLVM: store <2 x i32> [[TMP_B0]], ptr [[S1:%.*]], align 8 +// LLVM: [[INTRN_ARG0:%.*]] = load i32, ptr [[S0]], align 4 +// LLVM: [[INTRN_ARG1:%.*]] = load <2 x i32>, ptr [[S1]], align 8 +// LLVM: [[INTRN_RES:%.*]] = insertelement <2 x i32> [[INTRN_ARG1]], i32 [[INTRN_ARG0]], i32 1 +// LLVM: ret <2 x i32> {{%.*}} + + +int64x1_t test_vset_lane_u64(int64_t a, int64x1_t b) { + return vset_lane_u64(a, b, 0); +} + +// CIR-LABEL: test_vset_lane_u64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local <1 x i64> @test_vset_lane_u64(i64 [[A:%.*]], <1 x i64> [[B:%.*]]) +// LLVM: [[A_ADR:%.*]] = alloca i64, i64 1, align 8 +// LLVM: [[B_ADR:%.*]] = alloca <1 x i64>, i64 1, align 8 +// LLVM: store i64 [[A]], ptr [[A_ADR]], align 8 +// LLVM: store <1 x i64> [[B]], ptr [[B_ADR]], align 8 +// LLVM: [[TMP_A0:%.*]] = load i64, ptr [[A_ADR]], align 8 +// LLVM: store i64 [[TMP_A0]], ptr [[S0:%.*]], align 8 +// LLVM: [[TMP_B0:%.*]] = load <1 x i64>, ptr [[B_ADR]], align 8 +// LLVM: store <1 x i64> [[TMP_B0]], ptr [[S1:%.*]], align 8 +// LLVM: [[INTRN_ARG0:%.*]] = load i64, ptr [[S0]], align 8 +// LLVM: [[INTRN_ARG1:%.*]] = load <1 x i64>, ptr [[S1]], align 8 +// LLVM: [[INTRN_RES:%.*]] = insertelement <1 x i64> [[INTRN_ARG1]], i64 [[INTRN_ARG0]], i32 0 +// LLVM: ret <1 x i64> {{%.*}} + +float32x2_t test_vset_lane_f32(float32_t a, float32x2_t b) { + return vset_lane_f32(a, b, 1); +} + +// CIR-LABEL: test_vset_lane_f32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local <2 x float> @test_vset_lane_f32(float [[A:%.*]], <2 x float> [[B:%.*]]) +// LLVM: [[A_ADR:%.*]] = alloca float, i64 1, align 4 +// LLVM: [[B_ADR:%.*]] = alloca <2 x float>, i64 1, align 8 +// LLVM: store float [[A]], ptr [[A_ADR]], align 4 +// LLVM: store <2 x float> [[B]], ptr [[B_ADR]], align 8 +// LLVM: [[TMP_A0:%.*]] = load float, ptr [[A_ADR]], align 4 +// LLVM: store float [[TMP_A0]], ptr [[S0:%.*]], align 4 +// LLVM: [[TMP_B0:%.*]] = load <2 x float>, ptr [[B_ADR]], align 8 +// LLVM: store <2 x float> [[TMP_B0]], ptr [[S1:%.*]], align 8 +// LLVM: [[INTRN_ARG0:%.*]] = load float, ptr [[S0]], align 4 +// LLVM: [[INTRN_ARG1:%.*]] = load <2 x float>, ptr [[S1]], align 8 +// LLVM: [[INTRN_RES:%.*]] = insertelement <2 x float> [[INTRN_ARG1]], float [[INTRN_ARG0]], i32 1 +// LLVM: ret <2 x float> {{%.*}} + +uint8x16_t test_vsetq_lane_u8(uint8_t a, uint8x16_t b) { + return vsetq_lane_u8(a, b, 15); +} + +// CIR-LABEL: test_vsetq_lane_u8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<15> : !s32i +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local <16 x i8> @test_vsetq_lane_u8(i8 [[A:%.*]], <16 x i8> [[B:%.*]]) +// LLVM: [[A_ADR:%.*]] = alloca i8, i64 1, align 1 +// LLVM: [[B_ADR:%.*]] = alloca <16 x i8>, i64 1, align 16 +// LLVM: store i8 [[A]], ptr [[A_ADR]], align 1 +// LLVM: store <16 x i8> [[B]], ptr [[B_ADR]], align 16 +// LLVM: [[TMP_A0:%.*]] = load i8, ptr [[A_ADR]], align 1 +// LLVM: store i8 [[TMP_A0]], ptr [[S0:%.*]], align 1 +// LLVM: [[TMP_B0:%.*]] = load <16 x i8>, ptr [[B_ADR]], align 16 +// LLVM: store <16 x i8> [[TMP_B0]], ptr [[S1:%.*]], align 16 +// LLVM: [[INTRN_ARG0:%.*]] = load i8, ptr [[S0]], align 1 +// LLVM: [[INTRN_ARG1:%.*]] = load <16 x i8>, ptr [[S1]], align 16 +// LLVM: [[INTRN_RES:%.*]] = insertelement <16 x i8> [[INTRN_ARG1]], i8 [[INTRN_ARG0]], i32 15 +// LLVM: ret <16 x i8> {{%.*}} + +uint16x8_t test_vsetq_lane_u16(uint16_t a, uint16x8_t b) { + return vsetq_lane_u16(a, b, 7); +} + +// CIR-LABEL: test_vsetq_lane_u16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local <8 x i16> @test_vsetq_lane_u16(i16 [[A:%.*]], <8 x i16> [[B:%.*]]) +// LLVM: [[A_ADR:%.*]] = alloca i16, i64 1, align 2 +// LLVM: [[B_ADR:%.*]] = alloca <8 x i16>, i64 1, align 16 +// LLVM: store i16 [[A]], ptr [[A_ADR]], align 2 +// LLVM: store <8 x i16> [[B]], ptr [[B_ADR]], align 16 +// LLVM: [[TMP_A0:%.*]] = load i16, ptr [[A_ADR]], align 2 +// LLVM: store i16 [[TMP_A0]], ptr [[S0:%.*]], align 2 +// LLVM: [[TMP_B0:%.*]] = load <8 x i16>, ptr [[B_ADR]], align 16 +// LLVM: store <8 x i16> [[TMP_B0]], ptr [[S1:%.*]], align 16 +// LLVM: [[INTRN_ARG0:%.*]] = load i16, ptr [[S0]], align 2 +// LLVM: [[INTRN_ARG1:%.*]] = load <8 x i16>, ptr [[S1]], align 16 +// LLVM: [[INTRN_RES:%.*]] = insertelement <8 x i16> [[INTRN_ARG1]], i16 [[INTRN_ARG0]], i32 7 +// LLVM: ret <8 x i16> {{%.*}} + +uint32x4_t test_vsetq_lane_u32(uint32_t a, uint32x4_t b) { + return vsetq_lane_u32(a, b, 3); +} + +// CIR-LABEL: test_vsetq_lane_u32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local <4 x i32> @test_vsetq_lane_u32(i32 [[A:%.*]], <4 x i32> [[B:%.*]]) +// LLVM: [[A_ADR:%.*]] = alloca i32, i64 1, align 4 +// LLVM: [[B_ADR:%.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: store i32 [[A]], ptr [[A_ADR]], align 4 +// LLVM: store <4 x i32> [[B]], ptr [[B_ADR]], align 16 +// LLVM: [[TMP_A0:%.*]] = load i32, ptr [[A_ADR]], align 4 +// LLVM: store i32 [[TMP_A0]], ptr [[S0:%.*]], align 4 +// LLVM: [[TMP_B0:%.*]] = load <4 x i32>, ptr [[B_ADR]], align 16 +// LLVM: store <4 x i32> [[TMP_B0]], ptr [[S1:%.*]], align 16 +// LLVM: [[INTRN_ARG0:%.*]] = load i32, ptr [[S0]], align 4 +// LLVM: [[INTRN_ARG1:%.*]] = load <4 x i32>, ptr [[S1]], align 16 +// LLVM: [[INTRN_RES:%.*]] = insertelement <4 x i32> [[INTRN_ARG1]], i32 [[INTRN_ARG0]], i32 3 +// LLVM: ret <4 x i32> {{%.*}} + +int64x2_t test_vsetq_lane_s64(int64_t a, int64x2_t b) { + return vsetq_lane_s64(a, b, 1); +} + +// CIR-LABEL: test_vsetq_lane_s64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local <2 x i64> @test_vsetq_lane_s64(i64 [[A:%.*]], <2 x i64> [[B:%.*]]) +// LLVM: [[A_ADR:%.*]] = alloca i64, i64 1, align 8 +// LLVM: [[B_ADR:%.*]] = alloca <2 x i64>, i64 1, align 16 +// LLVM: store i64 [[A]], ptr [[A_ADR]], align 8 +// LLVM: store <2 x i64> [[B]], ptr [[B_ADR]], align 16 +// LLVM: [[TMP_A0:%.*]] = load i64, ptr [[A_ADR]], align 8 +// LLVM: store i64 [[TMP_A0]], ptr [[S0:%.*]], align 8 +// LLVM: [[TMP_B0:%.*]] = load <2 x i64>, ptr [[B_ADR]], align 16 +// LLVM: store <2 x i64> [[TMP_B0]], ptr [[S1:%.*]], align 16 +// LLVM: [[INTRN_ARG0:%.*]] = load i64, ptr [[S0]], align 8 +// LLVM: [[INTRN_ARG1:%.*]] = load <2 x i64>, ptr [[S1]], align 16 +// LLVM: [[INTRN_RES:%.*]] = insertelement <2 x i64> [[INTRN_ARG1]], i64 [[INTRN_ARG0]], i32 1 +// LLVM: ret <2 x i64> {{%.*}} + +float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) { + return vsetq_lane_f32(a, b, 3); +} + +// CIR-LABEL: test_vsetq_lane_f32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local <4 x float> @test_vsetq_lane_f32(float [[A:%.*]], <4 x float> [[B:%.*]]) +// LLVM: [[A_ADR:%.*]] = alloca float, i64 1, align 4 +// LLVM: [[B_ADR:%.*]] = alloca <4 x float>, i64 1, align 16 +// LLVM: store float [[A]], ptr [[A_ADR]], align 4 +// LLVM: store <4 x float> [[B]], ptr [[B_ADR]], align 16 +// LLVM: [[TMP_A0:%.*]] = load float, ptr [[A_ADR]], align 4 +// LLVM: store float [[TMP_A0]], ptr [[S0:%.*]], align 4 +// LLVM: [[TMP_B0:%.*]] = load <4 x float>, ptr [[B_ADR]], align 16 +// LLVM: store <4 x float> [[TMP_B0]], ptr [[S1:%.*]], align 16 +// LLVM: [[INTRN_ARG0:%.*]] = load float, ptr [[S0]], align 4 +// LLVM: [[INTRN_ARG1:%.*]] = load <4 x float>, ptr [[S1]], align 16 +// LLVM: [[INTRN_RES:%.*]] = insertelement <4 x float> [[INTRN_ARG1]], float [[INTRN_ARG0]], i32 3 +// LLVM: ret <4 x float> {{%.*}} From 983603634e69de2fce5610e8f4b4670e11a98e3e Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 24 Sep 2024 20:57:31 -0400 Subject: [PATCH 1885/2301] [CIR][CI] Remove libcxx tests Reviewers: bcardosolopes Reviewed By: bcardosolopes Pull Request: https://github.com/llvm/clangir/pull/881 --- .github/workflows/libcxx-build-and-test.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml index a28bf4d5daf6..98680a365e1a 100644 --- a/.github/workflows/libcxx-build-and-test.yaml +++ b/.github/workflows/libcxx-build-and-test.yaml @@ -22,9 +22,6 @@ on: - 'runtimes/**' - 'cmake/**' - '.github/workflows/libcxx-build-and-test.yaml' - schedule: - # Run nightly at 08:00 UTC (aka 00:00 Pacific, aka 03:00 Eastern) - - cron: '0 8 * * *' permissions: contents: read # Default everything to read-only From 5c29b6f545161c80ed9e2e59f37a3150f796f152 Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 26 Sep 2024 02:08:35 +0800 Subject: [PATCH 1886/2301] [CIR][Dialect][CodeGen] Add a unit attribute for OpenCL kernels (#877) We need a target-independent way to distinguish OpenCL kernels in ClangIR. This PR adds a unit attribute `OpenCLKernelAttr` similar to the one in Clang AST. This attribute is attached to the extra attribute dictionary of `cir.func` operations only. (Not for `cir.call`.) --- .../clang/CIR/Dialect/IR/CIROpenCLAttrs.td | 17 +++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 3 +++ .../test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl | 14 ++++++++++++++ 3 files changed, 34 insertions(+) create mode 100644 clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td index b80ea308608a..576d619fcf7a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td @@ -168,4 +168,21 @@ def OpenCLVersionAttr : CIR_Attr<"OpenCLVersion", "cl.version"> { let assemblyFormat = "`<` $major_version `,` $minor_version `>`"; } + +//===----------------------------------------------------------------------===// +// OpenCLKernelAttr +//===----------------------------------------------------------------------===// + +// TODO: It might be worthwhile to introduce a generic attribute applicable to +// all offloading languages. +def OpenCLKernelAttr : CIRUnitAttr< + "OpenCLKernel", "cl.kernel"> { + let summary = "OpenCL kernel"; + let description = [{ + Indicate the function is a OpenCL kernel. + }]; + + let storageType = [{ OpenCLKernelAttr }]; +} + #endif // MLIR_CIR_DIALECT_CIR_OPENCL_ATTRS diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 53252ee78bd6..2a1b1a69da3d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -430,6 +430,9 @@ void CIRGenModule::constructAttributeList(StringRef Name, } if (TargetDecl->hasAttr()) { + auto cirKernelAttr = + mlir::cir::OpenCLKernelAttr::get(builder.getContext()); + funcAttrs.set(cirKernelAttr.getMnemonic(), cirKernelAttr); assert(!MissingFeatures::openCL()); } diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl new file mode 100644 index 000000000000..01348013bbf0 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR + + +// CIR: #fn_attr[[KERNEL1:[0-9]*]] = {{.+}}cl.kernel = #cir.cl.kernel +// CIR-NEXT: #fn_attr[[FUNC1:[0-9]*]] = +// CIR-NOT: cl.kernel = #cir.cl.kernel + +kernel void kernel1() {} +// CIR: cir.func @kernel1{{.+}} extra(#fn_attr[[KERNEL1]]) + +void func1() {} + +// CIR: cir.func @func1{{.+}} extra(#fn_attr[[FUNC1]]) From a0b9719b8b55398208a1f263ef06c54b581c71f4 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Thu, 26 Sep 2024 02:11:09 +0800 Subject: [PATCH 1887/2301] [CIR][CodeGen] Handle the case of 'case' after label statement after 'case' (#879) Motivation example: ``` extern "C" void action1(); extern "C" void action2(); extern "C" void case_follow_label(int v) { switch (v) { case 1: label: case 2: action1(); break; default: action2(); goto label; } } ``` When we compile it, we will meet: ``` case Stmt::CaseStmtClass: case Stmt::DefaultStmtClass: assert(0 && "Should not get here, currently handled directly from SwitchStmt"); break; ``` in `buildStmt`. The cause is clear. We call `buildStmt` when we build the label stmt. To solve this, I think we should be able to build case stmt in buildStmt. But the new problem is, we need to pass the information like caseAttr and condType. So I tried to add such informations in CIRGenFunction as data member. --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 15 ++++---- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 34 ++++++++++-------- clang/test/CIR/CodeGen/goto.cpp | 48 ++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 20 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9a2f269e106a..8156d8fad059 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -478,6 +478,13 @@ class CIRGenFunction : public CIRGenTypeCache { // applies to. nullptr if there is no 'musttail' on the current statement. const clang::CallExpr *MustTailCall = nullptr; + /// The attributes of cases collected during emitting the body of a switch + /// stmt. + llvm::SmallVector, 2> caseAttrsStack; + + /// The type of the condition for the emitting switch statement. + llvm::SmallVector condTypeStack; + clang::ASTContext &getContext() const; CIRGenBuilderTy &getBuilder() { return builder; } @@ -1210,13 +1217,9 @@ class CIRGenFunction : public CIRGenTypeCache { buildDefaultStmt(const clang::DefaultStmt &S, mlir::Type condType, SmallVector &caseAttrs); - mlir::LogicalResult - buildSwitchCase(const clang::SwitchCase &S, mlir::Type condType, - SmallVector &caseAttrs); + mlir::LogicalResult buildSwitchCase(const clang::SwitchCase &S); - mlir::LogicalResult - buildSwitchBody(const clang::Stmt *S, mlir::Type condType, - SmallVector &caseAttrs); + mlir::LogicalResult buildSwitchBody(const clang::Stmt *S); mlir::cir::FuncOp generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 426da35b5238..1b0829c8e8bb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -303,8 +303,7 @@ mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, case Stmt::CaseStmtClass: case Stmt::DefaultStmtClass: - assert(0 && - "Should not get here, currently handled directly from SwitchStmt"); + return buildSwitchCase(cast(*S)); break; case Stmt::BreakStmtClass: @@ -715,14 +714,19 @@ CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, mlir::Type condType, return buildCaseDefaultCascade(&S, condType, caseAttrs); } -mlir::LogicalResult -CIRGenFunction::buildSwitchCase(const SwitchCase &S, mlir::Type condType, - SmallVector &caseAttrs) { +mlir::LogicalResult CIRGenFunction::buildSwitchCase(const SwitchCase &S) { + assert(!caseAttrsStack.empty() && + "build switch case without seeting case attrs"); + assert(!condTypeStack.empty() && + "build switch case without specifying the type of the condition"); + if (S.getStmtClass() == Stmt::CaseStmtClass) - return buildCaseStmt(cast(S), condType, caseAttrs); + return buildCaseStmt(cast(S), condTypeStack.back(), + caseAttrsStack.back()); if (S.getStmtClass() == Stmt::DefaultStmtClass) - return buildDefaultStmt(cast(S), condType, caseAttrs); + return buildDefaultStmt(cast(S), condTypeStack.back(), + caseAttrsStack.back()); llvm_unreachable("expect case or default stmt"); } @@ -987,15 +991,13 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildSwitchBody( - const Stmt *S, mlir::Type condType, - llvm::SmallVector &caseAttrs) { +mlir::LogicalResult CIRGenFunction::buildSwitchBody(const Stmt *S) { if (auto *compoundStmt = dyn_cast(S)) { mlir::Block *lastCaseBlock = nullptr; auto res = mlir::success(); for (auto *c : compoundStmt->body()) { if (auto *switchCase = dyn_cast(c)) { - res = buildSwitchCase(*switchCase, condType, caseAttrs); + res = buildSwitchCase(*switchCase); lastCaseBlock = builder.getBlock(); } else if (lastCaseBlock) { // This means it's a random stmt following up a case, just @@ -1045,12 +1047,16 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { currLexScope->setAsSwitch(); - llvm::SmallVector caseAttrs; + caseAttrsStack.push_back({}); + condTypeStack.push_back(condV.getType()); - res = buildSwitchBody(S.getBody(), condV.getType(), caseAttrs); + res = buildSwitchBody(S.getBody()); os.addRegions(currLexScope->getSwitchRegions()); - os.addAttribute("cases", builder.getArrayAttr(caseAttrs)); + os.addAttribute("cases", builder.getArrayAttr(caseAttrsStack.back())); + + caseAttrsStack.pop_back(); + condTypeStack.pop_back(); }); if (res.failed()) diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 81eb4ec43e65..2200fc98cfac 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -310,3 +310,51 @@ extern "C" void multiple_non_case(int v) { // NOFLAT: cir.label // NOFLAT: cir.call @action2() // NOFLAT: cir.break + +extern "C" void case_follow_label(int v) { + switch (v) { + case 1: + label: + case 2: + action1(); + break; + default: + action2(); + goto label; + } +} + +// NOFLAT: cir.func @case_follow_label +// NOFLAT: cir.switch +// NOFLAT: case (equal, 1) +// NOFLAT: cir.label "label" +// NOFLAT: cir.yield +// NOFLAT: case (equal, 2) +// NOFLAT: cir.call @action1() +// NOFLAT: cir.break +// NOFLAT: case (default) +// NOFLAT: cir.call @action2() +// NOFLAT: cir.goto "label" + +extern "C" void default_follow_label(int v) { + switch (v) { + case 1: + case 2: + action1(); + break; + label: + default: + action2(); + goto label; + } +} + +// NOFLAT: cir.func @default_follow_label +// NOFLAT: cir.switch +// NOFLAT: case (anyof, [1, 2] : !s32i) +// NOFLAT: cir.call @action1() +// NOFLAT: cir.break +// NOFLAT: cir.label "label" +// NOFLAT: case (default) +// NOFLAT: cir.call @action2() +// NOFLAT: cir.goto "label" From 2d8473de0bfcb6765a2776bb8dc79d1f943ec782 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Wed, 25 Sep 2024 14:17:15 -0400 Subject: [PATCH 1888/2301] [CIR][CIRGen] Generate CIR for neon_vget and neon_vdup lane intrinsics (#884) as title. This PR has simliar test case organization as to [PR882](https://github.com/llvm/clangir/pull/882) Notice that comparing to OG, this PR combines cases for some pairs of intrinsics such as BI__builtin_neon_vget_lane_f32 and BI__builtin_neon_vdups_lane_f32. They have the same code generated in OG and CIRGen OG separate them into different case handling because it passes mnemonics which are different. CIRGen doesn't pass that so why not combine them. Co-authored-by: Guojin He --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 66 ++++-- .../test/CIR/CodeGen/aarch64-neon-vdup-lane.c | 216 +++++++++++++++++ clang/test/CIR/CodeGen/aarch64-neon-vget.c | 219 ++++++++++++++++++ 3 files changed, 485 insertions(+), 16 deletions(-) create mode 100644 clang/test/CIR/CodeGen/aarch64-neon-vdup-lane.c create mode 100644 clang/test/CIR/CodeGen/aarch64-neon-vget.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index cc715550c4cd..6cef5b8d5980 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2189,42 +2189,76 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vget_lane_i8: case NEON::BI__builtin_neon_vdupb_lane_i8: - llvm_unreachable("NYI"); + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt8Ty, 8)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i8: case NEON::BI__builtin_neon_vdupb_laneq_i8: - llvm_unreachable("NYI"); + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt8Ty, 16)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i16: case NEON::BI__builtin_neon_vduph_lane_i16: - llvm_unreachable("NYI"); + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt16Ty, 4)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i16: case NEON::BI__builtin_neon_vduph_laneq_i16: - llvm_unreachable("NYI"); + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt16Ty, 8)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i32: case NEON::BI__builtin_neon_vdups_lane_i32: - llvm_unreachable("NYI"); + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt32Ty, 2)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + case NEON::BI__builtin_neon_vget_lane_f32: case NEON::BI__builtin_neon_vdups_lane_f32: - llvm_unreachable("NYI"); + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), FloatTy, 2)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i32: case NEON::BI__builtin_neon_vdups_laneq_i32: - llvm_unreachable("NYI"); + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt32Ty, 4)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i64: case NEON::BI__builtin_neon_vdupd_lane_i64: - llvm_unreachable("NYI"); + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt64Ty, 1)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vdupd_lane_f64: - llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vget_lane_f64: + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), DoubleTy, 1)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i64: case NEON::BI__builtin_neon_vdupd_laneq_i64: - llvm_unreachable("NYI"); - case NEON::BI__builtin_neon_vget_lane_f32: - llvm_unreachable("NYI"); - case NEON::BI__builtin_neon_vget_lane_f64: - llvm_unreachable("NYI"); + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt64Ty, 2)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_f32: case NEON::BI__builtin_neon_vdups_laneq_f32: - llvm_unreachable("NYI"); + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), FloatTy, 4)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_f64: case NEON::BI__builtin_neon_vdupd_laneq_f64: - llvm_unreachable("NYI"); + Ops[0] = builder.createBitcast( + Ops[0], mlir::cir::VectorType::get(builder.getContext(), DoubleTy, 2)); + return builder.create( + getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vaddh_f16: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vsubh_f16: diff --git a/clang/test/CIR/CodeGen/aarch64-neon-vdup-lane.c b/clang/test/CIR/CodeGen/aarch64-neon-vdup-lane.c new file mode 100644 index 000000000000..4799e0931c55 --- /dev/null +++ b/clang/test/CIR/CodeGen/aarch64-neon-vdup-lane.c @@ -0,0 +1,216 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -emit-cir -target-feature +neon %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -emit-llvm -target-feature +neon %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// Tetsting normal situation of vdup lane intrinsics. + +// REQUIRES: aarch64-registered-target || arm-registered-target +#include + +int8_t test_vdupb_lane_s8(int8x8_t src) { + return vdupb_lane_s8(src, 7); +} + +// CIR-LABEL: test_vdupb_lane_s8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i8 @test_vdupb_lane_s8(<8 x i8> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i8>, i64 1, align 8 +// LLVM: store <8 x i8> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <8 x i8>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <8 x i8> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <8 x i8>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <8 x i8> [[INTRN_ARG]], i32 7 +// LLVM: ret i8 {{%.*}} + +int8_t test_vdupb_laneq_s8(int8x16_t a) { + return vdupb_laneq_s8(a, 15); +} + +// CIR-LABEL: test_vdupb_laneq_s8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<15> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i8 @test_vdupb_laneq_s8(<16 x i8> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <16 x i8>, i64 1, align 16 +// LLVM: store <16 x i8> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <16 x i8>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <16 x i8> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <16 x i8>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <16 x i8> [[INTRN_ARG]], i32 15 +// LLVM: ret i8 {{%.*}} + +int16_t test_vduph_lane_s16(int16x4_t src) { + return vduph_lane_s16(src, 3); +} + +// CIR-LABEL: test_vduph_lane_s16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + + +// LLVM: define dso_local i16 @test_vduph_lane_s16(<4 x i16> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i16>, i64 1, align 8 +// LLVM: store <4 x i16> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <4 x i16>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <4 x i16> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <4 x i16>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <4 x i16> [[INTRN_ARG]], i32 3 +// LLVM: ret i16 {{%.*}} + +int16_t test_vduph_laneq_s16(int16x8_t a) { + return vduph_laneq_s16(a, 7); +} + +// CIR-LABEL: test_vduph_laneq_s16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i16 @test_vduph_laneq_s16(<8 x i16> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i16>, i64 1, align 16 +// LLVM: store <8 x i16> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <8 x i16>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <8 x i16> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <8 x i16>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <8 x i16> [[INTRN_ARG]], i32 7 +// LLVM: ret i16 {{%.*}} + +int32_t test_vdups_lane_s32(int32x2_t a) { + return vdups_lane_s32(a, 1); +} + +// CIR-LABEL: test_vdups_lane_s32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i32 @test_vdups_lane_s32(<2 x i32> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i32>, i64 1, align 8 +// LLVM: store <2 x i32> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <2 x i32>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <2 x i32> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x i32>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <2 x i32> [[INTRN_ARG]], i32 1 +// LLVM: ret i32 {{%.*}} + +int32_t test_vdups_laneq_s32(int32x4_t a) { + return vdups_laneq_s32(a, 3); +} + +// CIR-LABEL: test_vdups_laneq_s32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i32 @test_vdups_laneq_s32(<4 x i32> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: store <4 x i32> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <4 x i32>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <4 x i32> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <4 x i32>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <4 x i32> [[INTRN_ARG]], i32 3 +// LLVM: ret i32 {{%.*}} + +int64_t test_vdupd_lane_s64(int64x1_t src) { + return vdupd_lane_s64(src, 0); +} + +// CIR-LABEL: test_vdupd_lane_s64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i64 @test_vdupd_lane_s64(<1 x i64> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <1 x i64>, i64 1, align 8 +// LLVM: store <1 x i64> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <1 x i64>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <1 x i64> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <1 x i64>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <1 x i64> [[INTRN_ARG]], i32 0 +// LLVM: ret i64 {{%.*}} + +int64_t test_vdupd_laneq_s64(int64x2_t a) { + return vdupd_laneq_s64(a, 1); +} + +// CIR-LABEL: test_vdupd_laneq_s64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i64 @test_vdupd_laneq_s64(<2 x i64> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i64>, i64 1, align 16 +// LLVM: store <2 x i64> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <2 x i64>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <2 x i64> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x i64>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <2 x i64> [[INTRN_ARG]], i32 1 +// LLVM: ret i64 {{%.*}} + +float32_t test_vdups_lane_f32(float32x2_t src) { + return vdups_lane_f32(src, 1); +} + +// CIR-LABEL: test_vdups_lane_f32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local float @test_vdups_lane_f32(<2 x float> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x float>, i64 1, align 8 +// LLVM: store <2 x float> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <2 x float>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <2 x float> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x float>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <2 x float> [[INTRN_ARG]], i32 1 +// LLVM: ret float {{%.*}} + +float64_t test_vdupd_lane_f64(float64x1_t src) { + return vdupd_lane_f64(src, 0); +} + +// CIR-LABEL: test_vdupd_lane_f64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local double @test_vdupd_lane_f64(<1 x double> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <1 x double>, i64 1, align 8 +// LLVM: store <1 x double> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <1 x double>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <1 x double> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <1 x double>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <1 x double> [[INTRN_ARG]], i32 0 +// LLVM: ret double {{%.*}} + +float32_t test_vdups_laneq_f32(float32x4_t src) { + return vdups_laneq_f32(src, 3); +} + +// CIR-LABEL: test_vdups_laneq_f32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local float @test_vdups_laneq_f32(<4 x float> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x float>, i64 1, align 16 +// LLVM: store <4 x float> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <4 x float>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <4 x float> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <4 x float>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <4 x float> [[INTRN_ARG]], i32 3 +// LLVM: ret float {{%.*}} + +float64_t test_vdupd_laneq_f64(float64x2_t src) { + return vdupd_laneq_f64(src, 1); +} + +// CIR-LABEL: test_vdupd_laneq_f64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local double @test_vdupd_laneq_f64(<2 x double> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x double>, i64 1, align 16 +// LLVM: store <2 x double> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <2 x double>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <2 x double> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x double>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <2 x double> [[INTRN_ARG]], i32 1 +// LLVM: ret double {{%.*}} diff --git a/clang/test/CIR/CodeGen/aarch64-neon-vget.c b/clang/test/CIR/CodeGen/aarch64-neon-vget.c new file mode 100644 index 000000000000..b16648691d1b --- /dev/null +++ b/clang/test/CIR/CodeGen/aarch64-neon-vget.c @@ -0,0 +1,219 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -emit-cir -target-feature +neon %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -emit-llvm -target-feature +neon %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// This test file contains test cases to those of +// clang/test/CodeGen/aarch64-neon-vget.c +// The difference is that this file only tests uses vget intrinsics, as we feel +// it would be proper to have a separate test file testing vset intrinsics +// with the file name aarch64-neon-vset.c + +// REQUIRES: aarch64-registered-target || arm-registered-target +#include + +uint8_t test_vget_lane_u8(uint8x8_t a) { + return vget_lane_u8(a, 7); +} + +// CIR-LABEL: test_vget_lane_u8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i8 @test_vget_lane_u8(<8 x i8> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i8>, i64 1, align 8 +// LLVM: store <8 x i8> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <8 x i8>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <8 x i8> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <8 x i8>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <8 x i8> [[INTRN_ARG]], i32 7 +// LLVM: ret i8 {{%.*}} + +uint8_t test_vgetq_lane_u8(uint8x16_t a) { + return vgetq_lane_u8(a, 15); +} + +// CIR-LABEL: test_vgetq_lane_u8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<15> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i8 @test_vgetq_lane_u8(<16 x i8> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <16 x i8>, i64 1, align 16 +// LLVM: store <16 x i8> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <16 x i8>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <16 x i8> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <16 x i8>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <16 x i8> [[INTRN_ARG]], i32 15 +// LLVM: ret i8 {{%.*}} + +uint16_t test_vget_lane_u16(uint16x4_t a) { + return vget_lane_u16(a, 3); +} + +// CIR-LABEL: test_vget_lane_u16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i16 @test_vget_lane_u16(<4 x i16> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i16>, i64 1, align 8 +// LLVM: store <4 x i16> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <4 x i16>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <4 x i16> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <4 x i16>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <4 x i16> [[INTRN_ARG]], i32 3 +// LLVM: ret i16 {{%.*}} + +uint16_t test_vgetq_lane_u16(uint16x8_t a) { + return vgetq_lane_u16(a, 7); +} + +// CIR-LABEL: test_vgetq_lane_u16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i16 @test_vgetq_lane_u16(<8 x i16> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i16>, i64 1, align 16 +// LLVM: store <8 x i16> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <8 x i16>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <8 x i16> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <8 x i16>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <8 x i16> [[INTRN_ARG]], i32 7 +// LLVM: ret i16 {{%.*}} + +uint32_t test_vget_lane_u32(uint32x2_t a) { + return vget_lane_u32(a, 1); +} + +// CIR-LABEL: test_vget_lane_u32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i32 @test_vget_lane_u32(<2 x i32> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i32>, i64 1, align 8 +// LLVM: store <2 x i32> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <2 x i32>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <2 x i32> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x i32>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <2 x i32> [[INTRN_ARG]], i32 1 +// LLVM: ret i32 {{%.*}} + +uint32_t test_vgetq_lane_u32(uint32x4_t a) { + return vgetq_lane_u32(a, 3); +} + +// CIR-LABEL: test_vgetq_lane_u32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i32 @test_vgetq_lane_u32(<4 x i32> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: store <4 x i32> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <4 x i32>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <4 x i32> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <4 x i32>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <4 x i32> [[INTRN_ARG]], i32 3 +// LLVM: ret i32 {{%.*}} + +uint64_t test_vget_lane_u64(uint64x1_t a) { + return vget_lane_u64(a, 0); +} + +// CIR-LABEL: test_vget_lane_u64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i64 @test_vget_lane_u64(<1 x i64> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <1 x i64>, i64 1, align 8 +// LLVM: store <1 x i64> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <1 x i64>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <1 x i64> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <1 x i64>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <1 x i64> [[INTRN_ARG]], i32 0 +// LLVM: ret i64 {{%.*}} + +uint64_t test_vgetq_lane_u64(uint64x2_t a) { + return vgetq_lane_u64(a, 1); +} + +// CIR-LABEL: test_vgetq_lane_u64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i64 @test_vgetq_lane_u64(<2 x i64> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i64>, i64 1, align 16 +// LLVM: store <2 x i64> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <2 x i64>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <2 x i64> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x i64>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <2 x i64> [[INTRN_ARG]], i32 1 +// LLVM: ret i64 {{%.*}} + +float32_t test_vget_lane_f32(float32x2_t a) { + return vget_lane_f32(a, 1); +} + +// CIR-LABEL: test_vget_lane_f32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local float @test_vget_lane_f32(<2 x float> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x float>, i64 1, align 8 +// LLVM: store <2 x float> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <2 x float>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <2 x float> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x float>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <2 x float> [[INTRN_ARG]], i32 1 +// LLVM: ret float {{%.*}} + +float64_t test_vget_lane_f64(float64x1_t a) { + return vget_lane_f64(a, 0); +} + +// CIR-LABEL: test_vget_lane_f64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local double @test_vget_lane_f64(<1 x double> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <1 x double>, i64 1, align 8 +// LLVM: store <1 x double> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <1 x double>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <1 x double> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <1 x double>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <1 x double> [[INTRN_ARG]], i32 0 +// LLVM: ret double {{%.*}} + +float32_t test_vgetq_lane_f32(float32x4_t a) { + return vgetq_lane_f32(a, 3); +} + +// CIR-LABEL: test_vgetq_lane_f32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local float @test_vgetq_lane_f32(<4 x float> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x float>, i64 1, align 16 +// LLVM: store <4 x float> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <4 x float>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <4 x float> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <4 x float>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <4 x float> [[INTRN_ARG]], i32 3 +// LLVM: ret float {{%.*}} + +float64_t test_vgetq_lane_f64(float64x2_t a) { + return vgetq_lane_f64(a, 1); +} + +// CIR-LABEL: test_vgetq_lane_f64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local double @test_vgetq_lane_f64(<2 x double> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x double>, i64 1, align 16 +// LLVM: store <2 x double> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <2 x double>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <2 x double> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x double>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <2 x double> [[INTRN_ARG]], i32 1 +// LLVM: ret double {{%.*}} From 41776c3d0e75328efc7d7df1ff6b9046fee2dd5d Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Wed, 25 Sep 2024 14:29:00 -0400 Subject: [PATCH 1889/2301] [CIR][CIRGen] Allow maybeSetTrivialComdat for GlobalOp (#885) as title, this would complete solution to fix issue [LLVM lowering missing comdat and constant attributes](https://github.com/llvm/clangir/issues/801) --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 13 +++++--- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- .../CIR/CodeGen/cxx1z-inline-variables.cpp | 31 ++++++++++++------- clang/test/CIR/CodeGen/weak.c | 2 +- 4 files changed, 29 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 995541395de5..73d66a789963 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1329,7 +1329,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, setTLSMode(GV, *D); } - // TODO(cir): maybeSetTrivialComdat(*D, *GV); + maybeSetTrivialComdat(*D, GV); // TODO(cir): // Emit the initializer function if necessary. @@ -3009,11 +3009,14 @@ bool CIRGenModule::supportsCOMDAT() const { return getTriple().supportsCOMDAT(); } -void CIRGenModule::maybeSetTrivialComdat(const Decl &D, mlir::Operation *Op) { - if (!shouldBeInCOMDAT(*this, D)) +void CIRGenModule::maybeSetTrivialComdat(const Decl &d, mlir::Operation *op) { + if (!shouldBeInCOMDAT(*this, d)) return; - - // TODO: Op.setComdat + auto globalOp = dyn_cast_or_null(op); + if (globalOp) + globalOp.setComdat(true); + // Keep it as missing feature as we need to implement comdat for FuncOp. + // in the future. assert(!MissingFeatures::setComdat() && "NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 1f55a83a767c..b980ed411c41 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -682,7 +682,7 @@ class CIRGenModule : public CIRGenTypeCache { clang::GlobalDecl &Result) const; bool supportsCOMDAT() const; - void maybeSetTrivialComdat(const clang::Decl &D, mlir::Operation *Op); + void maybeSetTrivialComdat(const clang::Decl &d, mlir::Operation *op); void emitError(const llvm::Twine &message) { theModule.emitError(message); } diff --git a/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp b/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp index f54519d72650..68cddd578767 100644 --- a/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp +++ b/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp @@ -26,18 +26,25 @@ const int &compat_use_after_redecl1 = compat::c; const int &compat_use_after_redecl2 = compat::d; const int &compat_use_after_redecl3 = compat::g; -// CIR: cir.global weak_odr @_ZN6compat1bE = #cir.int<2> : !s32i {alignment = 4 : i64} -// CIR: cir.global weak_odr @_ZN6compat1aE = #cir.int<1> : !s32i {alignment = 4 : i64} -// CIR: cir.global weak_odr @_ZN6compat1cE = #cir.int<3> : !s32i {alignment = 4 : i64} +// CIR: cir.global weak_odr comdat @_ZN6compat1bE = #cir.int<2> : !s32i {alignment = 4 : i64} +// CIR: cir.global weak_odr comdat @_ZN6compat1aE = #cir.int<1> : !s32i {alignment = 4 : i64} +// CIR: cir.global weak_odr comdat @_ZN6compat1cE = #cir.int<3> : !s32i {alignment = 4 : i64} // CIR: cir.global external @_ZN6compat1eE = #cir.int<5> : !s32i {alignment = 4 : i64} -// CIR: cir.global weak_odr @_ZN6compat1fE = #cir.int<6> : !s32i {alignment = 4 : i64} -// CIR: cir.global linkonce_odr @_ZN6compat1dE = #cir.int<4> : !s32i {alignment = 4 : i64} -// CIR: cir.global linkonce_odr @_ZN6compat1gE = #cir.int<7> : !s32i {alignment = 4 : i64} +// CIR: cir.global weak_odr comdat @_ZN6compat1fE = #cir.int<6> : !s32i {alignment = 4 : i64} +// CIR: cir.global linkonce_odr comdat @_ZN6compat1dE = #cir.int<4> : !s32i {alignment = 4 : i64} +// CIR: cir.global linkonce_odr comdat @_ZN6compat1gE = #cir.int<7> : !s32i {alignment = 4 : i64} -// LLVM: @_ZN6compat1bE = weak_odr global i32 2, align 4 -// LLVM: @_ZN6compat1aE = weak_odr global i32 1, align 4 -// LLVM: @_ZN6compat1cE = weak_odr global i32 3, align 4 +// LLVM: $_ZN6compat1bE = comdat any +// LLVM: $_ZN6compat1aE = comdat any +// LLVM: $_ZN6compat1cE = comdat any +// LLVM: $_ZN6compat1fE = comdat any +// LLVM: $_ZN6compat1dE = comdat any +// LLVM: $_ZN6compat1gE = comdat any + +// LLVM: @_ZN6compat1bE = weak_odr global i32 2, comdat, align 4 +// LLVM: @_ZN6compat1aE = weak_odr global i32 1, comdat, align 4 +// LLVM: @_ZN6compat1cE = weak_odr global i32 3, comdat, align 4 // LLVM: @_ZN6compat1eE = global i32 5, align 4 -// LLVM: @_ZN6compat1fE = weak_odr global i32 6, align 4 -// LLVM: @_ZN6compat1dE = linkonce_odr global i32 4, align 4 -// LLVM: @_ZN6compat1gE = linkonce_odr global i32 7, align 4 +// LLVM: @_ZN6compat1fE = weak_odr global i32 6, comdat, align 4 +// LLVM: @_ZN6compat1dE = linkonce_odr global i32 4, comdat, align 4 +// LLVM: @_ZN6compat1gE = linkonce_odr global i32 7, comdat, align 4 diff --git a/clang/test/CIR/CodeGen/weak.c b/clang/test/CIR/CodeGen/weak.c index 25ebf15f8a89..398ac47c73f9 100644 --- a/clang/test/CIR/CodeGen/weak.c +++ b/clang/test/CIR/CodeGen/weak.c @@ -23,7 +23,7 @@ void active (void) // LLVM-NEXT: call void @B() int __attribute__((selectany)) y; -// CIR: cir.global weak_odr @y +// CIR: cir.global weak_odr comdat @y int __attribute__((weak)) x; // CIR: cir.global weak From ce1185e143f827e81ffb20e8b66b7bff38f0228a Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 27 Sep 2024 00:42:43 -0400 Subject: [PATCH 1890/2301] [CIR][CIRGen][Builtin][Neon] Lower neon vqadd_v (#890) as title. Also add function buildCommonNeonBuiltinExpr just like OG's emitCommonNeonBuiltinExpr. This might help consolidate neon cases and share common code. Notice: - I pretty much keep the skeleton of OG's emitCommonNeonBuiltinExpr at the cost of that we didn't use a few variables they calculate. They might help in the future. - The purpose of having CommonNeonBuiltinExpr is to reduce implementation code duplication. So far, we only have one type implemented, and it's hard for CIR to be more generic. But we should see if in future we can have different types of intrinsics share more generic code path. --------- Co-authored-by: Guojin He --- clang/include/clang/CIR/MissingFeatures.h | 3 + clang/lib/CIR/CodeGen/ABIInfo.h | 2 + .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 59 +++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 + clang/lib/CIR/CodeGen/TargetInfo.cpp | 6 + clang/test/CIR/CodeGen/aarch64-neon-vqadd.c | 179 ++++++++++++++++++ 6 files changed, 250 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/aarch64-neon-vqadd.c diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 2577af98e5e5..3540300d622c 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -136,6 +136,9 @@ struct MissingFeatures { // AArch64 Neon builtin related. static bool buildNeonShiftVector() { return false; } + // ABIInfo queries. + static bool useTargetLoweringABIInfo() { return false; } + // Misc static bool cacheRecordLayouts() { return false; } static bool capturedByInit() { return false; } diff --git a/clang/lib/CIR/CodeGen/ABIInfo.h b/clang/lib/CIR/CodeGen/ABIInfo.h index 5a2e3ff56ca4..a4cd7a5a666c 100644 --- a/clang/lib/CIR/CodeGen/ABIInfo.h +++ b/clang/lib/CIR/CodeGen/ABIInfo.h @@ -35,6 +35,8 @@ class ABIInfo { virtual void computeInfo(CIRGenFunctionInfo &FI) const = 0; + virtual bool allowBFloatArgsAndRet() const { return false; } + // Implement the Type::IsPromotableIntegerType for ABI specific needs. The // only difference is that this consideres bit-precise integer types as well. bool isPromotableIntegerTypeForABI(clang::QualType Ty) const; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 6cef5b8d5980..39f6473fcfa8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1603,7 +1603,7 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, mlir::Value buildNeonCall(unsigned int builtinID, CIRGenFunction &cgf, llvm::SmallVector argTypes, - llvm::SmallVector args, + llvm::SmallVectorImpl &args, llvm::StringRef intrinsicName, mlir::Type funcResTy, mlir::Location loc, bool isConstrainedFPIntrinsic = false, @@ -1640,6 +1640,55 @@ mlir::Value buildNeonCall(unsigned int builtinID, CIRGenFunction &cgf, } } +mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( + unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, + const char *nameHint, unsigned modifier, const CallExpr *e, + llvm::SmallVectorImpl &ops, cir::Address ptrOp0, + cir::Address ptrOp1, llvm::Triple::ArchType arch) { + // Get the last argument, which specifies the vector type. + const clang::Expr *arg = e->getArg(e->getNumArgs() - 1); + std::optional neonTypeConst = + arg->getIntegerConstantExpr(getContext()); + if (!neonTypeConst) + return nullptr; + + // Determine the type of this overloaded NEON intrinsic. + NeonTypeFlags neonType(neonTypeConst->getZExtValue()); + bool isUnsigned = neonType.isUnsigned(); + bool isQuad = neonType.isQuad(); + const bool hasLegalHalfType = getTarget().hasLegalHalfType(); + // The value of allowBFloatArgsAndRet is true for AArch64, but it should + // come from ABI info. + const bool allowBFloatArgsAndRet = + getTargetHooks().getABIInfo().allowBFloatArgsAndRet(); + + mlir::Type vTy = GetNeonType(this, neonType, hasLegalHalfType, false, + allowBFloatArgsAndRet); + if (!vTy) + return nullptr; + + unsigned intrinicId = llvmIntrinsic; + if ((modifier & UnsignedAlts) && !isUnsigned) + intrinicId = altLLVMIntrinsic; + + switch (builtinID) { + default: + llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vqadd_v: + mlir::Value res = buildNeonCall(builtinID, *this, {vTy, vTy}, ops, + (intrinicId != altLLVMIntrinsic) + ? "llvm.aarch64.neon.uqadd" + : "llvm.aarch64.neon.sqadd", + vTy, getLoc(e->getExprLoc())); + mlir::Type resultType = ConvertType(e->getType()); + // AArch64 intrinsic one-element vector type cast to + // scalar type expected by the builtin + return builder.createBitcast(res, resultType); + break; + } + return nullptr; +} + mlir::Value CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, @@ -2359,9 +2408,11 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, // defer to common code if it's been added to our special map. Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, AArch64SIMDIntrinsicsProvenSorted); - if (Builtin) { - llvm_unreachable("NYI"); - } + if (Builtin) + return buildCommonNeonBuiltinExpr( + Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, + Builtin->NameHint, Builtin->TypeModifier, E, Ops, + /*never use addresses*/ Address::invalid(), Address::invalid(), Arch); if (mlir::Value V = buildAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 8156d8fad059..048a0c17e24a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -980,6 +980,11 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value buildARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch); + mlir::Value buildCommonNeonBuiltinExpr( + unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, + const char *nameHint, unsigned modifier, const CallExpr *e, + llvm::SmallVectorImpl &ops, cir::Address ptrOp0, + cir::Address ptrOp1, llvm::Triple::ArchType arch); mlir::Value buildAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index a802abe18313..9ef7531406b6 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -5,6 +5,7 @@ #include "CIRGenTypes.h" #include "clang/Basic/TargetInfo.h" +#include "clang/CIR/MissingFeatures.h" #include "clang/CIR/Target/x86.h" using namespace cir; @@ -103,6 +104,11 @@ class AArch64ABIInfo : public ABIInfo { public: AArch64ABIInfo(CIRGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {} + virtual bool allowBFloatArgsAndRet() const override { + // TODO: Should query target info instead of hardcoding. + assert(!cir::MissingFeatures::useTargetLoweringABIInfo()); + return true; + } private: ABIKind getABIKind() const { return Kind; } diff --git a/clang/test/CIR/CodeGen/aarch64-neon-vqadd.c b/clang/test/CIR/CodeGen/aarch64-neon-vqadd.c new file mode 100644 index 000000000000..0932d95866c5 --- /dev/null +++ b/clang/test/CIR/CodeGen/aarch64-neon-vqadd.c @@ -0,0 +1,179 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -emit-cir -target-feature +neon %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -emit-llvm -target-feature +neon %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// Tetsting normal situation of vdup lane intrinsics. + +// REQUIRES: aarch64-registered-target || arm-registered-target +#include + +uint8x8_t test_vqadd_u8(uint8x8_t a, uint8x8_t b) { + return vqadd_u8(a,b); +} + +// CIR-LABEL: vqadd_u8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: cir.return + +// LLVM: {{.*}}test_vqadd_u8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]]) +// LLVM: store <8 x i8> [[A]], ptr [[A_ADDR:%.*]], align 8 +// LLVM: store <8 x i8> [[B]], ptr [[B_ADDR:%.*]], align 8 +// LLVM: [[TMP_A:%.*]] = load <8 x i8>, ptr [[A_ADDR]], align 8 +// LLVM: [[TMP_B:%.*]] = load <8 x i8>, ptr [[B_ADDR]], align 8 +// LLVM: store <8 x i8> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 +// LLVM: store <8 x i8> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 +// LLVM: [[INTRN_A:%.*]] = load <8 x i8>, ptr [[P0_ADDR]], align 8 +// LLVM: [[INTRN_B:%.*]] = load <8 x i8>, ptr [[P1_ADDR]], align 8 +// LLVM: {{%.*}} = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> [[INTRN_A]], <8 x i8> [[INTRN_B]]) +// LLVM: ret <8 x i8> + +int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { + return vqadd_s8(a,b); +} + +// CIR-LABEL: vqadd_s8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: cir.return + +// LLVM: {{.*}}test_vqadd_s8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]]) +// LLVM: store <8 x i8> [[A]], ptr [[A_ADDR:%.*]], align 8 +// LLVM: store <8 x i8> [[B]], ptr [[B_ADDR:%.*]], align 8 +// LLVM: [[TMP_A:%.*]] = load <8 x i8>, ptr [[A_ADDR]], align 8 +// LLVM: [[TMP_B:%.*]] = load <8 x i8>, ptr [[B_ADDR]], align 8 +// LLVM: store <8 x i8> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 +// LLVM: store <8 x i8> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 +// LLVM: [[INTRN_A:%.*]] = load <8 x i8>, ptr [[P0_ADDR]], align 8 +// LLVM: [[INTRN_B:%.*]] = load <8 x i8>, ptr [[P1_ADDR]], align 8 +// LLVM: {{%.*}} = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> [[INTRN_A]], <8 x i8> [[INTRN_B]]) +// LLVM: ret <8 x i8> + +uint16x4_t test_vqadd_u16(uint16x4_t a, uint16x4_t b) { + return vqadd_u16(a,b); +} + +// CIR-LABEL: vqadd_u16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: cir.return + +// LLVM: {{.*}}test_vqadd_u16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]]) +// LLVM: store <4 x i16> [[A]], ptr [[A_ADDR:%.*]], align 8 +// LLVM: store <4 x i16> [[B]], ptr [[B_ADDR:%.*]], align 8 +// LLVM: [[TMP_A:%.*]] = load <4 x i16>, ptr [[A_ADDR]], align 8 +// LLVM: [[TMP_B:%.*]] = load <4 x i16>, ptr [[B_ADDR]], align 8 +// LLVM: store <4 x i16> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 +// LLVM: store <4 x i16> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 +// LLVM: [[INTRN_A:%.*]] = load <4 x i16>, ptr [[P0_ADDR]], align 8 +// LLVM: [[INTRN_B:%.*]] = load <4 x i16>, ptr [[P1_ADDR]], align 8 +// LLVM: {{%.*}} = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> [[INTRN_A]], <4 x i16> [[INTRN_B]]) +// LLVM: ret <4 x i16> + +int16x4_t test_vqadd_s16(int16x4_t a, int16x4_t b) { + return vqadd_s16(a,b); +} + +// CIR-LABEL: vqadd_u16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: cir.return + +// LLVM: {{.*}}test_vqadd_s16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]]) +// LLVM: store <4 x i16> [[A]], ptr [[A_ADDR:%.*]], align 8 +// LLVM: store <4 x i16> [[B]], ptr [[B_ADDR:%.*]], align 8 +// LLVM: [[TMP_A:%.*]] = load <4 x i16>, ptr [[A_ADDR]], align 8 +// LLVM: [[TMP_B:%.*]] = load <4 x i16>, ptr [[B_ADDR]], align 8 +// LLVM: store <4 x i16> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 +// LLVM: store <4 x i16> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 +// LLVM: [[INTRN_A:%.*]] = load <4 x i16>, ptr [[P0_ADDR]], align 8 +// LLVM: [[INTRN_B:%.*]] = load <4 x i16>, ptr [[P1_ADDR]], align 8 +// LLVM: {{%.*}} = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> [[INTRN_A]], <4 x i16> [[INTRN_B]]) +// LLVM: ret <4 x i16> + +uint32x2_t test_vqadd_u32(uint32x2_t a, uint32x2_t b) { + return vqadd_u32(a,b); +} + +// CIR-LABEL: vqadd_u32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: cir.return + +// LLVM: {{.*}}test_vqadd_u32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]]) +// LLVM: store <2 x i32> [[A]], ptr [[A_ADDR:%.*]], align 8 +// LLVM: store <2 x i32> [[B]], ptr [[B_ADDR:%.*]], align 8 +// LLVM: [[TMP_A:%.*]] = load <2 x i32>, ptr [[A_ADDR]], align 8 +// LLVM: [[TMP_B:%.*]] = load <2 x i32>, ptr [[B_ADDR]], align 8 +// LLVM: store <2 x i32> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 +// LLVM: store <2 x i32> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 +// LLVM: [[INTRN_A:%.*]] = load <2 x i32>, ptr [[P0_ADDR]], align 8 +// LLVM: [[INTRN_B:%.*]] = load <2 x i32>, ptr [[P1_ADDR]], align 8 +// LLVM: {{%.*}} = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> [[INTRN_A]], <2 x i32> [[INTRN_B]]) +// LLVM: ret <2 x i32> + +int32x2_t test_vqadd_s32(int32x2_t a, int32x2_t b) { + return vqadd_s32(a,b); +} + +// CIR-LABEL: vqadd_s32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: cir.return + +// LLVM: {{.*}}test_vqadd_s32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]]) +// LLVM: store <2 x i32> [[A]], ptr [[A_ADDR:%.*]], align 8 +// LLVM: store <2 x i32> [[B]], ptr [[B_ADDR:%.*]], align 8 +// LLVM: [[TMP_A:%.*]] = load <2 x i32>, ptr [[A_ADDR]], align 8 +// LLVM: [[TMP_B:%.*]] = load <2 x i32>, ptr [[B_ADDR]], align 8 +// LLVM: store <2 x i32> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 +// LLVM: store <2 x i32> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 +// LLVM: [[INTRN_A:%.*]] = load <2 x i32>, ptr [[P0_ADDR]], align 8 +// LLVM: [[INTRN_B:%.*]] = load <2 x i32>, ptr [[P1_ADDR]], align 8 +// LLVM: {{%.*}} = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> [[INTRN_A]], <2 x i32> [[INTRN_B]]) +// LLVM: ret <2 x i32> + +uint64x1_t test_vqadd_u64(uint64x1_t a, uint64x1_t b) { + return vqadd_u64(a,b); +} + +// CIR-LABEL: vqadd_u64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: cir.return + +// LLVM: {{.*}}test_vqadd_u64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]]) +// LLVM: store <1 x i64> [[A]], ptr [[A_ADDR:%.*]], align 8 +// LLVM: store <1 x i64> [[B]], ptr [[B_ADDR:%.*]], align 8 +// LLVM: [[TMP_A:%.*]] = load <1 x i64>, ptr [[A_ADDR]], align 8 +// LLVM: [[TMP_B:%.*]] = load <1 x i64>, ptr [[B_ADDR]], align 8 +// LLVM: store <1 x i64> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 +// LLVM: store <1 x i64> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 +// LLVM: [[INTRN_A:%.*]] = load <1 x i64>, ptr [[P0_ADDR]], align 8 +// LLVM: [[INTRN_B:%.*]] = load <1 x i64>, ptr [[P1_ADDR]], align 8 +// LLVM: {{%.*}} = call <1 x i64> @llvm.aarch64.neon.uqadd.v1i64(<1 x i64> [[INTRN_A]], <1 x i64> [[INTRN_B]]) +// LLVM: ret <1 x i64> + +int64x1_t test_vqadd_s64(int64x1_t a, int64x1_t b) { + return vqadd_s64(a,b); +} + +// CIR-LABEL: vqadd_s64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: cir.return + +// LLVM: {{.*}}test_vqadd_s64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]]) +// LLVM: store <1 x i64> [[A]], ptr [[A_ADDR:%.*]], align 8 +// LLVM: store <1 x i64> [[B]], ptr [[B_ADDR:%.*]], align 8 +// LLVM: [[TMP_A:%.*]] = load <1 x i64>, ptr [[A_ADDR]], align 8 +// LLVM: [[TMP_B:%.*]] = load <1 x i64>, ptr [[B_ADDR]], align 8 +// LLVM: store <1 x i64> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 +// LLVM: store <1 x i64> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 +// LLVM: [[INTRN_A:%.*]] = load <1 x i64>, ptr [[P0_ADDR]], align 8 +// LLVM: [[INTRN_B:%.*]] = load <1 x i64>, ptr [[P1_ADDR]], align 8 +// LLVM: {{%.*}} = call <1 x i64> @llvm.aarch64.neon.sqadd.v1i64(<1 x i64> [[INTRN_A]], <1 x i64> [[INTRN_B]]) +// LLVM: ret <1 x i64> From 9db5b060baa6bfed72f6bf9d004a084f1374570c Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 24 Sep 2024 12:57:20 -0700 Subject: [PATCH 1891/2301] [CIR][CIRGen][NFC] Split cir.scope creation on buildReturnStmt --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 1b0829c8e8bb..7bcfac452878 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -524,13 +524,22 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { else { mlir::Location scopeLoc = getLoc(RV ? RV->getSourceRange() : S.getSourceRange()); + // First create cir.scope and later emit it's body. Otherwise all CIRGen + // dispatched by `handleReturnVal()` might needs to manipulate blocks and + // look into parents, which are all unlinked. + mlir::OpBuilder::InsertPoint scopeBody; builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScope lexScope{*this, loc, - builder.getInsertionBlock()}; - handleReturnVal(); + scopeBody = b.saveInsertionPoint(); }); + { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.restoreInsertionPoint(scopeBody); + CIRGenFunction::LexicalScope lexScope{*this, scopeLoc, + builder.getInsertionBlock()}; + handleReturnVal(); + } } // Create a new return block (if not existent) and add a branch to From 0601159a7b0320b16a00d4a95eed6a3fde6b45c7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 24 Sep 2024 15:21:13 -0700 Subject: [PATCH 1892/2301] [CIR][NFC] Add helpers for cir.try and do some refactoring --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 9 ++++++++ clang/lib/CIR/CodeGen/CIRGenException.cpp | 13 ++---------- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 21 +++++++++++++++++++ .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 10 ++------- 4 files changed, 34 insertions(+), 19 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7ff25bb35f2f..1057e0308aee 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3570,6 +3570,15 @@ def TryOp : CIR_Op<"try", attr-dict }]; + let extraClassDeclaration = [{ + private: + mlir::Region *getCatchLastRegion(); + public: + mlir::Block *getCatchAllEntryBlock(); + mlir::Block *getCatchUnwindEntryBlock(); + bool isCatchAllOnly(); + }]; + // Everything already covered elsewhere. let hasVerifier = 0; let builders = [ diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index d7cea55dd462..38a94d6f6e19 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -252,16 +252,6 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { DeactivateCleanupBlock(cleanup, op); } -static mlir::Block *getResumeBlockFromCatch(mlir::cir::TryOp &tryOp, - mlir::cir::GlobalOp globalParent) { - assert(tryOp && "cir.try expected"); - unsigned numCatchRegions = tryOp.getCatchRegions().size(); - assert(numCatchRegions && "expected at least one region"); - auto &fallbackRegion = tryOp.getCatchRegions()[numCatchRegions - 1]; - return &fallbackRegion.getBlocks().back(); - return nullptr; -} - mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup, mlir::cir::TryOp tryOp) { @@ -270,7 +260,8 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup, // Just like some other try/catch related logic: return the basic block // pointer but only use it to denote we're tracking things, but there // shouldn't be any changes to that block after work done in this function. - ehResumeBlock = getResumeBlockFromCatch(tryOp, CGM.globalOpContext); + assert(tryOp && "expected available cir.try"); + ehResumeBlock = tryOp.getCatchUnwindEntryBlock(); if (!ehResumeBlock->empty()) return ehResumeBlock; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7f1914d1b6e3..93ceb0acce99 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1243,6 +1243,27 @@ void TryOp::build( catchBuilder(builder, result.location, result); } +mlir::Region *TryOp::getCatchLastRegion() { + unsigned numCatchRegions = getCatchRegions().size(); + assert(numCatchRegions && "expected at least one region"); + auto &lastRegion = getCatchRegions()[numCatchRegions - 1]; + return &lastRegion; +} + +mlir::Block *TryOp::getCatchUnwindEntryBlock() { + return &getCatchLastRegion()->getBlocks().front(); +} + +mlir::Block *TryOp::getCatchAllEntryBlock() { + return &getCatchLastRegion()->getBlocks().front(); +} + +bool TryOp::isCatchAllOnly() { + mlir::ArrayAttr catchAttrList = getCatchTypesAttr(); + return catchAttrList.size() == 1 && + isa(catchAttrList[0]); +} + void TryOp::getSuccessorRegions(mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // If any index all the underlying regions branch back to the parent diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index f3a39f5eb019..aa46b4b92643 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -418,8 +418,6 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Do not update `nextDispatcher`, no more business in try/catch } else if (auto catchUnwind = dyn_cast(catchAttr)) { - // assert(dispatcher->empty() && "expect empty dispatcher"); - // assert(!dispatcher->args_empty() && "expected block argument"); assert(dispatcher->getArguments().size() == 2 && "expected two block argument"); buildUnwindCase(rewriter, catchRegion, dispatcher); @@ -440,15 +438,10 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { rewriter.setInsertionPointToEnd(beforeCatch); rewriter.replaceOpWithNewOp(tryBodyYield, afterTry); - // Retrieve catch list and some properties. - mlir::ArrayAttr catchAttrList = tryOp.getCatchTypesAttr(); - bool tryOnlyHasCatchAll = catchAttrList.size() == 1 && - isa(catchAttrList[0]); - // Start the landing pad by getting the inflight exception information. mlir::Block *nextDispatcher = buildLandingPads(tryOp, rewriter, beforeCatch, afterTry, callsToRewrite, - landingPads, tryOnlyHasCatchAll); + landingPads, tryOp.isCatchAllOnly()); // Fill in dispatcher to all catch clauses. rewriter.setInsertionPointToEnd(nextDispatcher); @@ -456,6 +449,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { unsigned catchIdx = 0; // Build control-flow for all catch clauses. + mlir::ArrayAttr catchAttrList = tryOp.getCatchTypesAttr(); for (mlir::Attribute catchAttr : catchAttrList) { mlir::Attribute nextCatchAttr; if (catchIdx + 1 < catchAttrList.size()) From 643063d428952597fdbc5124a4b03a0412394d25 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 27 Sep 2024 18:48:43 -0400 Subject: [PATCH 1893/2301] [CIR][CIRGen][Builtin] Allow CIRGen for builtin calls with math errorno override (#893) As title. The test case used is abort(), but it is from the real code. Notice: Since CIR implementation for NoReturn Call is pending to implement, the generated llvm code is like: `define dso_local void @test() #1 { call void @abort(), !dbg !8 ret void }` which is not right, right code should be like, ` `define dso_local void @test() #1 { call void @abort(), !dbg !8 unreachable }` ` Still send this PR as Noreturn implementation is a separate issue. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 6 ++++-- clang/test/CIR/CodeGen/builtin-abort.c | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-abort.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 49b40d763fdc..0d6766d3f372 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -337,7 +337,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // '#pragma float_control(precise, on)'. This pragma disables fast-math, // which implies math-errno. if (E->hasStoredFPFeatures()) { - llvm_unreachable("NYI"); + FPOptionsOverride OP = E->getFPFeatures(); + if (OP.hasMathErrnoOverride()) + ErrnoOverriden = OP.getMathErrnoOverride(); } // True if 'atttibute__((optnone)) is used. This attibute overrides // fast-math which implies math-errno. @@ -1627,4 +1629,4 @@ mlir::cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *FD, auto Ty = getTypes().ConvertType(FD->getType()); return GetOrCreateCIRFunction(Name, Ty, D, /*ForVTable=*/false); -} \ No newline at end of file +} diff --git a/clang/test/CIR/CodeGen/builtin-abort.c b/clang/test/CIR/CodeGen/builtin-abort.c new file mode 100644 index 000000000000..d60d0efedd50 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-abort.c @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +void abort(); +void test() { abort(); } + +// TODO: Add test to test unreachable when CIR support for NORETURN is added. + +// CIR-LABEL: test +// CIR: cir.call @abort() : () -> () + +// LLVM-LABEL: test +// LLVM: call void @abort() From e5855c8790e5cd48d96215f085141b0f422f3584 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 27 Sep 2024 18:49:16 -0400 Subject: [PATCH 1894/2301] [CIR][CIRGen] Support __builtin_huge_val for float type (#889) as title. The test cases are from [clang codegen test case](https://github.com/llvm/clangir/blob/52323c17c6a3708b3eb72651465f7d4b82f057e7/clang/test/CodeGen/builtins.c#L37) --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 9 ++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 12 +++++++++-- clang/test/CIR/CodeGen/builtins.c | 28 +++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtins.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 674fb4d781ba..1ccf2f1e0b07 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -536,6 +536,15 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::ConstantOp getConstInt(mlir::Location loc, mlir::Type t, uint64_t C); + + mlir::cir::ConstantOp getConstFP(mlir::Location loc, mlir::Type t, + llvm::APFloat fpVal) { + assert((mlir::isa(t)) && + "expected mlir::cir::SingleType or mlir::cir::DoubleType"); + return create(loc, t, + getAttr(t, fpVal)); + } + /// Create constant nullptr for pointer-to-data-member type ty. mlir::cir::ConstantOp getNullDataMemberPtr(mlir::cir::DataMemberType ty, mlir::Location loc) { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 0d6766d3f372..1959b02ff193 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -313,8 +313,16 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(builder.getConstInt(getLoc(E->getSourceRange()), Result.Val.getInt())); } - if (Result.Val.isFloat()) - llvm_unreachable("NYI"); + if (Result.Val.isFloat()) { + // Note: we are using result type of CallExpr to determine the type of + // the constant. Clang Codegen uses the result value to make judgement + // of the type. We feel it should be Ok to use expression type because + // it is hard to imagine a builtin function evaluates to + // a value that over/underflows its own defined type. + mlir::Type resTy = getCIRType(E->getType()); + return RValue::get(builder.getConstFP(getLoc(E->getExprLoc()), resTy, + Result.Val.getFloat())); + } } // If current long-double semantics is IEEE 128-bit, replace math builtins diff --git a/clang/test/CIR/CodeGen/builtins.c b/clang/test/CIR/CodeGen/builtins.c new file mode 100644 index 000000000000..c843f50bbdda --- /dev/null +++ b/clang/test/CIR/CodeGen/builtins.c @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck -check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + + +void test1() { + float f; + double d; + f = __builtin_huge_valf(); + d = __builtin_huge_val(); +} + +// CIR-LABEL: test1 +// CIR: [[F:%.*]] = cir.alloca !cir.float, !cir.ptr, ["f"] {alignment = 4 : i64} +// CIR: [[D:%.*]] = cir.alloca !cir.double, !cir.ptr, ["d"] {alignment = 8 : i64} +// CIR: [[F_VAL:%.*]] = cir.const #cir.fp<0x7F800000> : !cir.float +// CIR: cir.store [[F_VAL]], [[F]] : !cir.float, !cir.ptr +// CIR: [[D_VAL:%.*]] = cir.const #cir.fp<0x7FF0000000000000> : !cir.double +// CIR: cir.store [[D_VAL]], [[D]] : !cir.double, !cir.ptr loc(#loc17) +// CIR: cir.return + +// LLVM-LABEL: test1 +// [[F:%.*]] = alloca float, align 4 +// [[D:%.*]] = alloca double, align 8 +// store float 0x7FF0000000000000, ptr [[F]], align 4 +// store double 0x7FF0000000000000, ptr[[D]], align 8 +// ret void From 869e71565d4b8269ac7281f54c1b429e1772dc30 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 27 Sep 2024 15:51:48 -0700 Subject: [PATCH 1895/2301] [CIR][NFC] Rename test --- clang/test/CIR/CodeGen/{builtins.c => builtin-constant-fold.c} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename clang/test/CIR/CodeGen/{builtins.c => builtin-constant-fold.c} (100%) diff --git a/clang/test/CIR/CodeGen/builtins.c b/clang/test/CIR/CodeGen/builtin-constant-fold.c similarity index 100% rename from clang/test/CIR/CodeGen/builtins.c rename to clang/test/CIR/CodeGen/builtin-constant-fold.c From 68d7b94f71a0ea8367597255ff224caced1f84ae Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Fri, 27 Sep 2024 22:36:40 -0300 Subject: [PATCH 1896/2301] [CIR][ABI] Apply CC lowering pass by default (#842) Before this patch, the CC lowering pass was applied only when explicitly requested by the user. This update changes the default behavior to always apply the CC lowering pass, with an option to disable it using the `-fno-clangir-call-conv-lowering` flag if necessary. The primary objective is to make this pass a mandatory step in the compilation pipeline. This ensures that future contributions correctly implement the CC lowering for both existing and new targets, resulting in more consistent and accurate code generation. From an implementation perspective, several `llvm_unreachable` statements have been substituted with a new `assert_or_abort` macro. This macro can be configured to either trigger a non-blocking assertion or a blocking unreachable statement. This facilitates a test-by-testa incremental development as it does not required you to know which code path a test will trigger an just cause a crash if it does. A few notable changes: - Support multi-block function in CC lowering - Ignore pointer-related CC lowering - Ignore no-proto functions CC lowering - Handle missing type evaluation kinds - Fix CC lowering for function declarations - Unblock indirect function calls - Disable CC lowering pass on several tests --- clang/include/clang/CIR/Dialect/Passes.h | 2 +- clang/include/clang/CIR/MissingFeatures.h | 41 ++++++++ clang/include/clang/Driver/Options.td | 11 ++- .../include/clang/Frontend/FrontendOptions.h | 2 +- clang/lib/CIR/CodeGen/CIRPasses.cpp | 11 +-- .../Dialect/Transforms/CallConvLowering.cpp | 8 +- .../Transforms/TargetLowering/ABIInfo.cpp | 2 +- .../Transforms/TargetLowering/ABIInfoImpl.cpp | 7 +- .../TargetLowering/CIRLowerContext.cpp | 17 ++-- .../TargetLowering/CIRRecordLayout.cpp | 6 +- .../TargetLowering/CIRToCIRArgMapping.h | 10 +- .../TargetLowering/ItaniumCXXABI.cpp | 6 +- .../Transforms/TargetLowering/LowerCall.cpp | 36 +++---- .../TargetLowering/LowerFunction.cpp | 95 +++++++++++-------- .../TargetLowering/LowerFunctionInfo.h | 9 +- .../Transforms/TargetLowering/LowerModule.cpp | 13 ++- .../Transforms/TargetLowering/LowerModule.h | 2 +- .../Transforms/TargetLowering/LowerTypes.cpp | 14 +-- .../TargetLowering/RecordLayoutBuilder.cpp | 57 +++++------ .../TargetLowering/Targets/AArch64.cpp | 8 +- .../Targets/LoweringPrepareAArch64CXXABI.cpp | 52 +++++----- .../Targets/LoweringPrepareItaniumCXXABI.cpp | 12 +-- .../TargetLowering/Targets/SPIR.cpp | 2 +- .../Transforms/TargetLowering/Targets/X86.cpp | 36 ++++--- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 6 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +- clang/lib/Frontend/CompilerInvocation.cpp | 2 +- clang/test/CIR/CodeGen/global-new.cpp | 2 +- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 2 +- clang/test/CIR/Lowering/try-catch.cpp | 2 +- .../aarch64-call-conv-lowering-pass.cpp | 2 +- .../x86_64/x86_64-call-conv-lowering-pass.cpp | 2 +- 32 files changed, 282 insertions(+), 197 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index 67e9da2246b6..c9b936ca98fb 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -42,7 +42,7 @@ std::unique_ptr createGotoSolverPass(); /// Create a pass to lower ABI-independent function definitions/calls. std::unique_ptr createCallConvLoweringPass(); -void populateCIRPreLoweringPasses(mlir::OpPassManager &pm); +void populateCIRPreLoweringPasses(mlir::OpPassManager &pm, bool useCCLowering); //===----------------------------------------------------------------------===// // Registration diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 3540300d622c..e60ae97b8570 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -15,6 +15,27 @@ #ifndef CLANG_CIR_MISSINGFEATURES_H #define CLANG_CIR_MISSINGFEATURES_H +constexpr bool cirMissingFeatureAssertionMode = + true; // Change to `false` to use llvm_unreachable + +#define NOTE \ + " Target lowering is now required. Disable it with " \ + "-fno-clangir-call-conv-lowering." + +// Special assertion to be used in the target lowering library. +#define cir_tl_assert(cond) assert((cond) && NOTE); + +// Some assertions knowingly generate incorrect code. This macro allows us to +// switch between using `assert` and `llvm_unreachable` for these cases. +#define cir_assert_or_abort(cond, msg) \ + do { \ + if (cirMissingFeatureAssertionMode) { \ + assert((cond) && msg NOTE); \ + } else { \ + llvm_unreachable(msg NOTE); \ + } \ + } while (0) + namespace cir { struct MissingFeatures { @@ -212,6 +233,26 @@ struct MissingFeatures { //===--- ABI lowering --===// + static bool SPIRVABI() { return false; } + + static bool AArch64TypeClassification() { return false; } + + static bool X86ArgTypeClassification() { return false; } + static bool X86DefaultABITypeConvertion() { return false; } + static bool X86GetFPTypeAtOffset() { return false; } + static bool X86RetTypeClassification() { return false; } + static bool X86TypeClassification() { return false; } + + static bool ABIClangTypeKind() { return false; } + static bool ABIEnterStructForCoercedAccess() { return false; } + static bool ABIFuncPtr() { return false; } + static bool ABIInRegAttribute() { return false; } + static bool ABINestedRecordLayout() { return false; } + static bool ABINoProtoFunctions() { return false; } + static bool ABIParameterCoercion() { return false; } + static bool ABIPointerParameterAttrs() { return false; } + static bool ABITransparentUnionHandling() { return false; } + //-- Missing AST queries static bool CXXRecordDeclIsEmptyCXX11() { return false; } diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index e5010ef3a066..ff05be57a99f 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3079,10 +3079,6 @@ def fclangir_lib_opt : Flag<["-"], "fclangir-lib-opt">, Visibility<[ClangOption, CC1Option]>, Group, Alias, HelpText<"Enable C/C++ library based optimizations">; -def fclangir_call_conv_lowering : Flag<["-"], "fclangir-call-conv-lowering">, - Visibility<[ClangOption, CC1Option]>, Group, - HelpText<"Enable ClangIR calling convention lowering">, - MarshallingInfoFlag>; def fclangir_mem2reg : Flag<["-"], "fclangir-mem2reg">, Visibility<[ClangOption, CC1Option]>, Group, HelpText<"Enable mem2reg on the flat ClangIR">, @@ -3113,6 +3109,13 @@ defm clangir_analysis_only : BoolFOption<"clangir-analysis-only", PosFlag, NegFlag>; +// FIXME(cir): Remove this option once all pre-existing tests are compatible with +// the calling convention lowering pass. +defm clangir_call_conv_lowering : BoolFOption<"clangir-call-conv-lowering", + FrontendOpts<"ClangIRCallConvLowering">, DefaultTrue, + PosFlag, + NegFlag, + BothFlags<[], [ClangOption, CC1Option], "">>; def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, Group, HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index b9e4d09df222..64664f41c879 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -449,7 +449,7 @@ class FrontendOptions { unsigned ClangIRLibOpt : 1; // Enable Clang IR call conv lowering pass. - unsigned ClangIREnableCallConvLowering : 1; + unsigned ClangIRCallConvLowering : 1; // Enable Clang IR mem2reg pass on the flat CIR. unsigned ClangIREnableMem2Reg : 1; diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 4f89daa1cee4..d56a7cc61e52 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -71,13 +71,8 @@ mlir::LogicalResult runCIRToCIRPasses( pm.addPass(mlir::createLoweringPreparePass(&astCtx)); - // FIXME(cir): This pass should run by default, but it is lacking support for - // several code bits. Once it's more mature, we should fix this. - if (enableCallConvLowering) - pm.addPass(mlir::createCallConvLoweringPass()); - if (flattenCIR || enableMem2Reg) - mlir::populateCIRPreLoweringPasses(pm); + mlir::populateCIRPreLoweringPasses(pm, enableCallConvLowering); if (enableMem2Reg) pm.addPass(mlir::createMem2Reg()); @@ -97,7 +92,9 @@ mlir::LogicalResult runCIRToCIRPasses( namespace mlir { -void populateCIRPreLoweringPasses(OpPassManager &pm) { +void populateCIRPreLoweringPasses(OpPassManager &pm, bool useCCLowering) { + if (useCCLowering) + pm.addPass(createCallConvLoweringPass()); pm.addPass(createFlattenCFGPass()); pm.addPass(createGotoSolverPass()); } diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 3a4b9b397c5b..f7e4410010ec 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// - #include "TargetLowering/LowerModule.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/IR/BuiltinOps.h" @@ -14,6 +13,7 @@ #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/MissingFeatures.h" #define GEN_PASS_DEF_CALLCONVLOWERING #include "clang/CIR/Dialect/Passes.h.inc" @@ -44,6 +44,12 @@ struct CallConvLoweringPattern : public OpRewritePattern { auto calls = op.getSymbolUses(module); if (calls.has_value()) { for (auto call : calls.value()) { + // FIXME(cir): Function pointers are ignored. + if (isa(call.getUser())) { + cir_assert_or_abort(!::cir::MissingFeatures::ABIFuncPtr(), "NYI"); + continue; + } + auto callOp = cast(call.getUser()); if (lowerModule->rewriteFunctionCall(callOp, op).failed()) return failure(); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index 4e2a81de9fc1..6cb69c7eeb88 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -37,7 +37,7 @@ bool ABIInfo::isPromotableIntegerTypeForABI(Type Ty) const { if (getContext().isPromotableIntegerType(Ty)) return true; - assert(!::cir::MissingFeatures::fixedWidthIntegers()); + cir_tl_assert(!::cir::MissingFeatures::fixedWidthIntegers()); return false; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index 041c801dbe2e..38f9fb8ffaa4 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -26,21 +26,22 @@ bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, Type Ty = FI.getReturnType(); if (const auto RT = dyn_cast(Ty)) { - assert(!::cir::MissingFeatures::isCXXRecordDecl()); + cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl()); } return CXXABI.classifyReturnType(FI); } bool isAggregateTypeForABI(Type T) { - assert(!::cir::MissingFeatures::functionMemberPointerType()); + cir_tl_assert(!::cir::MissingFeatures::functionMemberPointerType()); return !LowerFunction::hasScalarEvaluationKind(T); } Type useFirstFieldIfTransparentUnion(Type Ty) { if (auto RT = dyn_cast(Ty)) { if (RT.isUnion()) - llvm_unreachable("NYI"); + cir_assert_or_abort( + !::cir::MissingFeatures::ABITransparentUnionHandling(), "NYI"); } return Ty; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index 42aae0a80d04..ecca0db0deb9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -55,7 +55,10 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { } else if (isa(T)) { typeKind = clang::Type::Record; } else { - llvm_unreachable("Unhandled type class"); + cir_assert_or_abort(!::cir::MissingFeatures::ABIClangTypeKind(), + "Unhandled type class"); + // FIXME(cir): Completely wrong. Just here to make it non-blocking. + typeKind = clang::Type::Builtin; } // FIXME(cir): Here we fetch the width and alignment of a type considering the @@ -96,10 +99,10 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { } case clang::Type::Record: { const auto RT = dyn_cast(T); - assert(!::cir::MissingFeatures::tagTypeClassAbstraction()); + cir_tl_assert(!::cir::MissingFeatures::tagTypeClassAbstraction()); // Only handle TagTypes (names types) for now. - assert(RT.getName() && "Anonymous record is NYI"); + cir_tl_assert(RT.getName() && "Anonymous record is NYI"); // NOTE(cir): Clang does some hanlding of invalid tagged declarations here. // Not sure if this is necessary in CIR. @@ -111,14 +114,14 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { const CIRRecordLayout &Layout = getCIRRecordLayout(RT); Width = toBits(Layout.getSize()); Align = toBits(Layout.getAlignment()); - assert(!::cir::MissingFeatures::recordDeclHasAlignmentAttr()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclHasAlignmentAttr()); break; } default: llvm_unreachable("Unhandled type class"); } - assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); + cir_tl_assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); return clang::TypeInfo(Width, Align, AlignRequirement); } @@ -126,7 +129,7 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { Type Ty; // NOTE(cir): Clang does more stuff here. Not sure if we need to do the same. - assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); switch (K) { case clang::BuiltinType::Char_S: Ty = IntType::get(getMLIRContext(), 8, true); @@ -141,7 +144,7 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { void CIRLowerContext::initBuiltinTypes(const clang::TargetInfo &Target, const clang::TargetInfo *AuxTarget) { - assert((!this->Target || this->Target == &Target) && + cir_tl_assert((!this->Target || this->Target == &Target) && "Incorrect target reinitialization"); this->Target = &Target; this->AuxTarget = AuxTarget; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp index 2744f67d19de..bd964d654267 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp @@ -38,16 +38,16 @@ CIRRecordLayout::CIRRecordLayout( FieldOffsets.insert(FieldOffsets.end(), fieldoffsets.begin(), fieldoffsets.end()); - assert(!PrimaryBase && "Layout for class with inheritance is NYI"); + cir_tl_assert(!PrimaryBase && "Layout for class with inheritance is NYI"); // CXXInfo->PrimaryBase.setPointer(PrimaryBase); - assert(!IsPrimaryBaseVirtual && "Layout for virtual base class is NYI"); + cir_tl_assert(!IsPrimaryBaseVirtual && "Layout for virtual base class is NYI"); // CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual); CXXInfo->NonVirtualSize = nonvirtualsize; CXXInfo->NonVirtualAlignment = nonvirtualalignment; CXXInfo->PreferredNVAlignment = preferrednvalignment; CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject; // FIXME(cir): Initialize base classes offsets. - assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); CXXInfo->HasOwnVFPtr = hasOwnVFPtr; CXXInfo->VBPtrOffset = vbptroffset; CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index dd09122b94d9..664fd05ea658 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -58,7 +58,7 @@ class CIRToCIRArgMapping { unsigned totalIRArgs() const { return TotalIRArgs; } bool hasPaddingArg(unsigned ArgNo) const { - assert(ArgNo < ArgInfo.size()); + cir_tl_assert(ArgNo < ArgInfo.size()); return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; } @@ -77,7 +77,7 @@ class CIRToCIRArgMapping { onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); for (LowerFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; ++I, ++ArgNo) { - assert(I != FI.arg_end()); + cir_tl_assert(I != FI.arg_end()); // Type ArgType = I->type; const ::cir::ABIArgInfo &AI = I->info; // Collect data about IR arguments corresponding to Clang argument ArgNo. @@ -91,7 +91,7 @@ class CIRToCIRArgMapping { case ::cir::ABIArgInfo::Extend: case ::cir::ABIArgInfo::Direct: { // FIXME(cir): handle sseregparm someday... - assert(AI.getCoerceToType() && "Missing coerced type!!"); + cir_tl_assert(AI.getCoerceToType() && "Missing coerced type!!"); StructType STy = dyn_cast(AI.getCoerceToType()); if (AI.isDirect() && AI.getCanBeFlattened() && STy) { llvm_unreachable("NYI"); @@ -114,7 +114,7 @@ class CIRToCIRArgMapping { if (IRArgNo == 1 && SwapThisWithSRet) IRArgNo++; } - assert(ArgNo == ArgInfo.size()); + cir_tl_assert(ArgNo == ArgInfo.size()); if (::cir::MissingFeatures::inallocaArgs()) { llvm_unreachable("NYI"); @@ -126,7 +126,7 @@ class CIRToCIRArgMapping { /// Returns index of first IR argument corresponding to ArgNo, and their /// quantity. std::pair getIRArgs(unsigned ArgNo) const { - assert(ArgNo < ArgInfo.size()); + cir_tl_assert(ArgNo < ArgInfo.size()); return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, ArgInfo[ArgNo].NumberOfArgs); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index c0add1ecc1df..3cd27c35cf55 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -46,9 +46,9 @@ class ItaniumCXXABI : public CIRCXXABI { // FIXME(cir): This expects a CXXRecordDecl! Not any record type. RecordArgABI getRecordArgABI(const StructType RD) const override { - assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); // If C++ prohibits us from making a copy, pass by address. - assert(!::cir::MissingFeatures::recordDeclCanPassInRegisters()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclCanPassInRegisters()); return RAA_Default; } }; @@ -76,7 +76,7 @@ CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { case clang::TargetCXXABI::AppleARM64: // TODO: this isn't quite right, clang uses AppleARM64CXXABI which inherits // from ARMCXXABI. We'll have to follow suit. - assert(!::cir::MissingFeatures::appleArm64CXXABI()); + cir_tl_assert(!::cir::MissingFeatures::appleArm64CXXABI()); return new ItaniumCXXABI(LM, /*UseARMMethodPtrABI=*/true, /*UseARMGuardVarABI=*/true); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index 42de07ec6965..af036efef8cc 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -23,9 +23,9 @@ const LowerFunctionInfo & arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, const OperandRange &args, const FuncType fnType, unsigned numExtraRequiredArgs, bool chainCall) { - assert(args.size() >= numExtraRequiredArgs); + cir_tl_assert(args.size() >= numExtraRequiredArgs); - assert(!::cir::MissingFeatures::extParamInfo()); + cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); // In most cases, there are no optional arguments. RequiredArgs required = RequiredArgs::All; @@ -35,7 +35,7 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, // FIXME(cir): Properly check if function is no-proto. if (/*IsPrototypedFunction=*/true) { if (fnType.isVarArg()) - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::isVarArg(), "NYI"); if (::cir::MissingFeatures::extParamInfo()) llvm_unreachable("NYI"); @@ -45,7 +45,7 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, // its skipped here since it requires CodeGen info. Maybe this information // could be embbed in the FuncOp during CIRGen. - assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); + cir_tl_assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; return LT.arrangeLLVMFunctionInfo(fnType.getReturnType(), opts, fnType.getInputs(), required); @@ -60,7 +60,7 @@ static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { return; } - assert(MissingFeatures::extParamInfo()); + cir_tl_assert(MissingFeatures::extParamInfo()); llvm_unreachable("NYI"); } @@ -74,11 +74,11 @@ static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { static const LowerFunctionInfo & arrangeCIRFunctionInfo(LowerTypes &CGT, bool instanceMethod, SmallVectorImpl &prefix, FuncType fnTy) { - assert(!MissingFeatures::extParamInfo()); + cir_tl_assert(!MissingFeatures::extParamInfo()); RequiredArgs Required = RequiredArgs::forPrototypePlus(fnTy, prefix.size()); // FIXME: Kill copy. appendParameterTypes(prefix, fnTy); - assert(!MissingFeatures::qualifiedTypes()); + cir_tl_assert(!MissingFeatures::qualifiedTypes()); Type resultType = fnTy.getReturnType(); FnInfoOpts opts = @@ -110,7 +110,7 @@ void LowerModule::constructAttributeList(StringRef Name, // TODO(cir): Implement AddAttributesFromFunctionProtoType here. // TODO(cir): Implement AddAttributesFromOMPAssumes here. - assert(!MissingFeatures::openMP()); + cir_tl_assert(!MissingFeatures::openMP()); // TODO(cir): Skipping a bunch of AST queries here. We will need to partially // implement some of them as this section sets target-specific attributes @@ -147,8 +147,8 @@ void LowerModule::constructAttributeList(StringRef Name, [[fallthrough]]; case ABIArgInfo::Direct: if (RetAI.getInReg()) - llvm_unreachable("InReg attribute is NYI"); - assert(!::cir::MissingFeatures::noFPClass()); + cir_assert_or_abort(!::cir::MissingFeatures::ABIInRegAttribute(), "NYI"); + cir_tl_assert(!::cir::MissingFeatures::noFPClass()); break; case ABIArgInfo::Ignore: break; @@ -216,7 +216,7 @@ void LowerModule::constructAttributeList(StringRef Name, else if (AI.getInReg()) llvm_unreachable("InReg attribute is NYI"); // Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); - assert(!::cir::MissingFeatures::noFPClass()); + cir_tl_assert(!::cir::MissingFeatures::noFPClass()); break; default: llvm_unreachable("Missing ABIArgInfo::Kind"); @@ -227,7 +227,7 @@ void LowerModule::constructAttributeList(StringRef Name, } // TODO(cir): Missing some swift and nocapture stuff here. - assert(!::cir::MissingFeatures::extParamInfo()); + cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); if (!Attrs.empty()) { unsigned FirstIRArg, NumIRArgs; @@ -236,7 +236,7 @@ void LowerModule::constructAttributeList(StringRef Name, newFn.setArgAttrs(FirstIRArg + i, Attrs); } } - assert(ArgNo == FI.arg_size()); + cir_tl_assert(ArgNo == FI.arg_size()); } /// Arrange the argument and result information for the declaration or @@ -245,15 +245,15 @@ const LowerFunctionInfo &LowerTypes::arrangeFunctionDeclaration(FuncOp fnOp) { if (MissingFeatures::funcDeclIsCXXMethodDecl()) llvm_unreachable("NYI"); - assert(!MissingFeatures::qualifiedTypes()); + cir_tl_assert(!MissingFeatures::qualifiedTypes()); FuncType FTy = fnOp.getFunctionType(); - assert(!MissingFeatures::CUDA()); + cir_tl_assert(!MissingFeatures::CUDA()); // When declaring a function without a prototype, always use a // non-variadic type. if (fnOp.getNoProto()) { - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::ABINoProtoFunctions(), "NYI"); } return arrangeFreeFunctionType(FTy); @@ -300,12 +300,12 @@ const LowerFunctionInfo & LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, ArrayRef argTypes, RequiredArgs required) { - assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); LowerFunctionInfo *FI = nullptr; // FIXME(cir): Allow user-defined CCs (e.g. __attribute__((vectorcall))). - assert(!::cir::MissingFeatures::extParamInfo()); + cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); unsigned CC = clangCallConvToLLVMCallConv(clang::CallingConv::CC_C); // Construct the function info. We co-allocate the ArgInfos. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 9e90c44a7d76..749c91b9d8d1 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -63,7 +63,10 @@ Value enterStructPointerForCoercedAccess(Value SrcPtr, StructType SrcSTy, FirstEltSize < CGF.LM.getDataLayout().getTypeStoreSize(SrcSTy)) return SrcPtr; - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::ABIEnterStructForCoercedAccess(), + "NYI"); + return SrcPtr; // FIXME: This is a temporary workaround for the assertion + // above. } /// Create a store to \param Dst from \param Src where the source and @@ -80,13 +83,13 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, } // FIXME(cir): We need a better way to handle datalayout queries. - assert(isa(SrcTy)); + cir_tl_assert(isa(SrcTy)); llvm::TypeSize SrcSize = CGF.LM.getDataLayout().getTypeAllocSize(SrcTy); if (StructType DstSTy = dyn_cast(DstTy)) { Dst = enterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize.getFixedValue(), CGF); - assert(isa(Dst.getType())); + cir_tl_assert(isa(Dst.getType())); DstTy = cast(Dst.getType()).getPointee(); } @@ -107,7 +110,7 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, llvm::TypeSize DstSize = CGF.LM.getDataLayout().getTypeAllocSize(DstTy); // If store is legal, just bitcast the src pointer. - assert(!::cir::MissingFeatures::vectorType()); + cir_tl_assert(!::cir::MissingFeatures::vectorType()); if (SrcSize.getFixedValue() <= DstSize.getFixedValue()) { // Dst = Dst.withElementType(SrcTy); CGF.buildAggregateStore(Src, Dst, DstIsVolatile); @@ -257,14 +260,14 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // are dealt with in CIRGen. CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), FI); - assert(Fn.getNumArguments() == IRFunctionArgs.totalIRArgs()); + cir_tl_assert(Fn.getNumArguments() == IRFunctionArgs.totalIRArgs()); // If we're using inalloca, all the memory arguments are GEPs off of the last // parameter, which is a pointer to the complete memory area. - assert(!::cir::MissingFeatures::inallocaArgs()); + cir_tl_assert(!::cir::MissingFeatures::inallocaArgs()); // Name the struct return parameter. - assert(!::cir::MissingFeatures::sretArgs()); + cir_tl_assert(!::cir::MissingFeatures::sretArgs()); // Track if we received the parameter as a pointer (indirect, byval, or // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it @@ -272,11 +275,18 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, SmallVector ArgVals; ArgVals.reserve(Args.size()); + // FIXME(cir): non-blocking workaround for argument types that are not yet + // properly handled by the ABI. + if (cirMissingFeatureAssertionMode && FI.arg_size() != Args.size()) { + cir_tl_assert(::cir::MissingFeatures::ABIParameterCoercion()); + return success(); + } + // Create a pointer value for every parameter declaration. This usually // entails copying one or more LLVM IR arguments into an alloca. Don't push // any cleanups or do anything that might unwind. We do that separately, so // we can push the cleanups in the correct order for the ABI. - assert(FI.arg_size() == Args.size()); + cir_tl_assert(FI.arg_size() == Args.size()); unsigned ArgNo = 0; LowerFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); for (MutableArrayRef::const_iterator i = Args.begin(), @@ -294,7 +304,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, llvm_unreachable("NYI"); else Ty = Arg.getType(); - assert(!::cir::MissingFeatures::evaluationKind()); + cir_tl_assert(!::cir::MissingFeatures::evaluationKind()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -310,14 +320,15 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // http://llvm.org/docs/LangRef.html#paramattrs. if (ArgI.getDirectOffset() == 0 && isa(LTy) && isa(ArgI.getCoerceToType())) { - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::ABIPointerParameterAttrs(), + "NYI"); } // Prepare the argument value. If we have the trivial case, handle it // with no muss and fuss. if (!isa(ArgI.getCoerceToType()) && ArgI.getCoerceToType() == Ty && ArgI.getDirectOffset() == 0) { - assert(NumIRArgs == 1); + cir_tl_assert(NumIRArgs == 1); // LLVM expects swifterror parameters to be used in very restricted // ways. Copy the value into a less-restricted temporary. @@ -344,7 +355,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, break; } - assert(!::cir::MissingFeatures::vectorType()); + cir_tl_assert(!::cir::MissingFeatures::vectorType()); // Allocate original argument to be "uncoerced". // FIXME(cir): We should have a alloca op builder that does not required @@ -366,7 +377,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, llvm_unreachable("NYI"); } else { // Simple case, just do a coerced store of the argument into the alloca. - assert(NumIRArgs == 1); + cir_tl_assert(NumIRArgs == 1); Value AI = Fn.getArgument(FirstIRArg); // TODO(cir): Set argument name in the new function. createCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); @@ -385,7 +396,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // RAUW the original argument alloca with the new one. This assumes that // the argument is used only to be stored in a alloca. Value arg = SrcFn.getArgument(ArgNo); - assert(arg.hasOneUse()); + cir_tl_assert(arg.hasOneUse()); auto *firstStore = *arg.user_begin(); auto argAlloca = cast(firstStore).getAddr(); rewriter.replaceAllUsesWith(argAlloca, Alloca); @@ -471,28 +482,33 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { /// focuses on the ABI-specific details. So a lot of codegen stuff is removed. LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, const LowerFunctionInfo &FnInfo) { - assert(newFn && "generating code for null Function"); + cir_tl_assert(newFn && "generating code for null Function"); auto Args = oldFn.getArguments(); // Emit the ABI-specific function prologue. - assert(newFn.empty() && "Function already has a body"); + cir_tl_assert(newFn.empty() && "Function already has a body"); rewriter.setInsertionPointToEnd(newFn.addEntryBlock()); if (buildFunctionProlog(FnInfo, newFn, oldFn.getArguments()).failed()) return failure(); // Ensure that old ABI-agnostic arguments uses were replaced. const auto hasNoUses = [](Value val) { return val.getUses().empty(); }; - assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && "Missing RAUW?"); + cir_tl_assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && "Missing RAUW?"); + + // NOTE(cir): While the new function has the ABI-aware parameters, the old + // function still has the function logic. To complete the migration, we have + // to move the old function body to the new function. + + // Backup references to entry blocks. + Block *srcBlock = &oldFn.getBody().front(); + Block *dstBlock = &newFn.getBody().front(); // Migrate function body to new ABI-aware function. - assert(oldFn.getBody().hasOneBlock() && - "Multiple blocks in original function not supported"); + rewriter.inlineRegionBefore(oldFn.getBody(), newFn.getBody(), + newFn.getBody().end()); - // Move old function body to new function. - // FIXME(cir): The merge below is not very good: will not work if SrcFn has - // multiple blocks and it mixes the new and old prologues. - rewriter.mergeBlocks(&oldFn.getBody().front(), &newFn.getBody().front(), - newFn.getArguments()); + // Merge entry blocks to ensure correct branching. + rewriter.mergeBlocks(srcBlock, dstBlock, newFn.getArguments()); // FIXME(cir): What about saving parameters for corotines? Should we do // something about it in this pass? If the change with the calling @@ -511,14 +527,14 @@ void LowerFunction::buildAggregateStore(Value Val, Value Dest, // Function to store a first-class aggregate into memory. We prefer to // store the elements rather than the aggregate to be more friendly to // fast-isel. - assert(mlir::isa(Dest.getType()) && "Storing in a non-pointer!"); + cir_tl_assert(mlir::isa(Dest.getType()) && "Storing in a non-pointer!"); (void)DestIsVolatile; // Circumvent CIR's type checking. Type pointeeTy = mlir::cast(Dest.getType()).getPointee(); if (Val.getType() != pointeeTy) { // NOTE(cir): We only bitcast and store if the types have the same size. - assert((LM.getDataLayout().getTypeSizeInBits(Val.getType()) == + cir_tl_assert((LM.getDataLayout().getTypeSizeInBits(Val.getType()) == LM.getDataLayout().getTypeSizeInBits(pointeeTy)) && "Incompatible types"); auto loc = Val.getLoc(); @@ -552,7 +568,7 @@ LogicalResult LowerFunction::rewriteCallOp(CallOp op, // NOTE(cir): There is no direct way to fetch the function type from the // CallOp, so we fetch it from the source function. This assumes the // function definition has not yet been lowered. - assert(SrcFn && "No source function"); + cir_tl_assert(SrcFn && "No source function"); auto fnType = SrcFn.getFunctionType(); // Rewrite the call operation to abide to the ABI calling convention. @@ -610,10 +626,10 @@ Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, // Chain calls use this same code path to add the invisible chain parameter // to the function type. if (origCallee.getNoProto() || Chain) { - llvm_unreachable("NYI"); + cir_assert_or_abort(::cir::MissingFeatures::ABINoProtoFunctions(), "NYI"); } - assert(!::cir::MissingFeatures::CUDA()); + cir_tl_assert(!::cir::MissingFeatures::CUDA()); // TODO(cir): LLVM IR has the concept of "CallBase", which is a base class // for all types of calls. Perhaps we should have a CIR interface to mimic @@ -665,12 +681,12 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, llvm_unreachable("NYI"); } - assert(!::cir::MissingFeatures::swift()); + cir_tl_assert(!::cir::MissingFeatures::swift()); // NOTE(cir): Skipping lifetime markers here. // Translate all of the arguments as necessary to match the IR lowering. - assert(CallInfo.arg_size() == CallArgs.size() && + cir_tl_assert(CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."); unsigned ArgNo = 0; LowerFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); @@ -696,7 +712,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!isa(ArgInfo.getCoerceToType()) && ArgInfo.getCoerceToType() == info_it->type && ArgInfo.getDirectOffset() == 0) { - assert(NumIRArgs == 1); + cir_tl_assert(NumIRArgs == 1); Value V; if (!isa(I->getType())) { V = *I; @@ -742,11 +758,11 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, llvm_unreachable("NYI"); } else { // In the simple case, just pass the coerced loaded value. - assert(NumIRArgs == 1); + cir_tl_assert(NumIRArgs == 1); Value Load = createCoercedValue(Src, ArgInfo.getCoerceToType(), *this); // FIXME(cir): We should probably handle CMSE non-secure calls here - assert(!::cir::MissingFeatures::cmseNonSecureCallAttr()); + cir_tl_assert(!::cir::MissingFeatures::cmseNonSecureCallAttr()); // since they are a ARM-specific feature. if (::cir::MissingFeatures::undef()) @@ -771,7 +787,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // debugging stuff here. // Update the largest vector width if any arguments have vector types. - assert(!::cir::MissingFeatures::vectorType()); + cir_tl_assert(!::cir::MissingFeatures::vectorType()); // Compute the calling convention and attributes. @@ -797,7 +813,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, rewriter.getAttr(rewriter.getDictionaryAttr({})); newCallOp->setAttr("extra_attrs", extraAttrs); - assert(!::cir::MissingFeatures::vectorType()); + cir_tl_assert(!::cir::MissingFeatures::vectorType()); // NOTE(cir): Skipping some ObjC, tail-call, debug, and attribute stuff // here. @@ -847,7 +863,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // FIXME(cir): Use return value slot here. Value RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. - assert(!::cir::MissingFeatures::volatileTypes()); + cir_tl_assert(!::cir::MissingFeatures::volatileTypes()); // NOTE(cir): If the function returns, there should always be a valid // return value present. Instead of setting the return value here, we @@ -855,7 +871,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!RetVal) { RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. - assert(::cir::MissingFeatures::volatileTypes()); + cir_tl_assert(::cir::MissingFeatures::volatileTypes()); } // An empty record can overlap other data (if declared with @@ -897,7 +913,8 @@ ::cir::TypeEvaluationKind LowerFunction::getEvaluationKind(Type type) { // FIXME(cir): Implement type classes for CIR types. if (isa(type)) return ::cir::TypeEvaluationKind::TEK_Aggregate; - if (isa(type)) + if (isa(type)) return ::cir::TypeEvaluationKind::TEK_Scalar; llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index c81335c9985a..0c30c955a6c1 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -35,7 +35,7 @@ class RequiredArgs { enum All_t { All }; RequiredArgs(All_t _) : NumRequired(~0U) {} - explicit RequiredArgs(unsigned n) : NumRequired(n) { assert(n != ~0U); } + explicit RequiredArgs(unsigned n) : NumRequired(n) { cir_tl_assert(n != ~0U); } /// Compute the arguments required by the given formal prototype, /// given that there may be some additional, non-formal arguments @@ -47,7 +47,8 @@ class RequiredArgs { if (!prototype.isVarArg()) return All; - llvm_unreachable("Variadic function is NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::variadicFunctions(), "NYI"); + return All; // FIXME(cir): Temporary workaround for the assertion above. } bool allowsOptionalArgs() const { return NumRequired != ~0U; } @@ -105,7 +106,7 @@ class LowerFunctionInfo final ArrayRef argTypes, RequiredArgs required) { // TODO(cir): Add assertions? - assert(!::cir::MissingFeatures::extParamInfo()); + cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); void *buffer = operator new(totalSizeToAlloc(argTypes.size() + 1)); LowerFunctionInfo *FI = new (buffer) LowerFunctionInfo(); @@ -146,7 +147,7 @@ class LowerFunctionInfo final unsigned arg_size() const { return NumArgs; } bool isVariadic() const { - assert(!::cir::MissingFeatures::variadicFunctions()); + cir_tl_assert(!::cir::MissingFeatures::variadicFunctions()); return false; } unsigned getNumRequiredArgs() const { diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 715a5f2470d7..d0d88fa52008 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -199,10 +199,13 @@ LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { llvm_unreachable("ExtraAttrs are NYI"); } - if (LowerFunction(*this, rewriter, op, newFn) - .generateCode(op, newFn, FI) - .failed()) - return failure(); + // Is a function definition: handle the body. + if (!op.isDeclaration()) { + if (LowerFunction(*this, rewriter, op, newFn) + .generateCode(op, newFn, FI) + .failed()) + return failure(); + } // Erase original ABI-agnostic function. rewriter.eraseOp(op); @@ -239,7 +242,7 @@ std::unique_ptr createLowerModule(ModuleOp module, // FIXME(cir): This just uses the default language options. We need to account // for custom options. // Create context. - assert(!::cir::MissingFeatures::langOpts()); + cir_tl_assert(!::cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; return std::make_unique(langOpts, module, dataLayoutStr, diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index 44cd5a0ae1cb..a7f3e1fa187a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -68,7 +68,7 @@ class LowerModule { // FIXME(cir): This would be in ASTContext, not CodeGenModule. clang::TargetCXXABI::Kind getCXXABIKind() const { auto kind = getTarget().getCXXABI().getKind(); - assert(!::cir::MissingFeatures::langOpts()); + cir_tl_assert(!::cir::MissingFeatures::langOpts()); return kind; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index bdec98a64f43..fa1e34140167 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -60,10 +60,10 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); // Add type for sret argument. - assert(!::cir::MissingFeatures::sretArgs()); + cir_tl_assert(!::cir::MissingFeatures::sretArgs()); // Add type for inalloca argument. - assert(!::cir::MissingFeatures::inallocaArgs()); + cir_tl_assert(!::cir::MissingFeatures::inallocaArgs()); // Add in all of the required arguments. unsigned ArgNo = 0; @@ -72,7 +72,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { for (; it != ie; ++it, ++ArgNo) { const ABIArgInfo &ArgInfo = it->info; - assert(!::cir::MissingFeatures::argumentPadding()); + cir_tl_assert(!::cir::MissingFeatures::argumentPadding()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -85,11 +85,11 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { Type argType = ArgInfo.getCoerceToType(); StructType st = dyn_cast(argType); if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { - assert(NumIRArgs == st.getNumElements()); + cir_tl_assert(NumIRArgs == st.getNumElements()); for (unsigned i = 0, e = st.getNumElements(); i != e; ++i) ArgTypes[FirstIRArg + i] = st.getMembers()[i]; } else { - assert(NumIRArgs == 1); + cir_tl_assert(NumIRArgs == 1); ArgTypes[FirstIRArg] = argType; } break; @@ -117,5 +117,7 @@ mlir::Type LowerTypes::convertType(Type T) { } llvm::outs() << "Missing default ABI-specific type for " << T << "\n"; - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::X86DefaultABITypeConvertion(), + "NYI"); + return T; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index ea8ef6f28144..55484923f1b3 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -58,11 +58,11 @@ class EmptySubobjectMap { void EmptySubobjectMap::ComputeEmptySubobjectSizes() { // Check the bases. - assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); // Check the fields. for (const auto FT : Class.getMembers()) { - assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); const auto RT = dyn_cast(FT); // We only care about record types. @@ -70,7 +70,8 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() { continue; // TODO(cir): Handle nested record types. - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::ABINestedRecordLayout(), + "NYI"); } } @@ -206,7 +207,7 @@ class ItaniumRecordLayoutBuilder { bool isPacked, const Type Ty); clang::CharUnits getSize() const { - assert(Size % Context.getCharWidth() == 0); + cir_tl_assert(Size % Context.getCharWidth() == 0); return Context.toCharUnitsFromBits(Size); } uint64_t getSizeInBits() const { return Size; } @@ -215,7 +216,7 @@ class ItaniumRecordLayoutBuilder { void setSize(uint64_t NewSize) { Size = NewSize; } clang::CharUnits getDataSize() const { - assert(DataSize % Context.getCharWidth() == 0); + cir_tl_assert(DataSize % Context.getCharWidth() == 0); return Context.toCharUnitsFromBits(DataSize); } @@ -234,24 +235,24 @@ void ItaniumRecordLayoutBuilder::layout(const StructType RT) { initializeLayout(RT); // Lay out the vtable and the non-virtual bases. - assert(!::cir::MissingFeatures::isCXXRecordDecl() && + cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl() && !::cir::MissingFeatures::CXXRecordIsDynamicClass()); layoutFields(RT); // FIXME(cir): Handle virtual-related layouts. - assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); - assert(!::cir::MissingFeatures::itaniumRecordLayoutBuilderFinishLayout()); + cir_tl_assert(!::cir::MissingFeatures::itaniumRecordLayoutBuilderFinishLayout()); } void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { if (const auto RT = dyn_cast(Ty)) { IsUnion = RT.isUnion(); - assert(!::cir::MissingFeatures::recordDeclIsMSStruct()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclIsMSStruct()); } - assert(!::cir::MissingFeatures::recordDeclIsPacked()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclIsPacked()); // Honor the default struct packing maximum alignment flag. if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) { @@ -289,7 +290,7 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { void ItaniumRecordLayoutBuilder::layoutField(const Type D, bool InsertExtraPadding) { // auto FieldClass = D.dyn_cast(); - assert(!::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping() && + cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping() && !::cir::MissingFeatures::CXXRecordDeclIsEmptyCXX11()); bool IsOverlappingEmptyField = false; // FIXME(cir): Needs more features. @@ -304,7 +305,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, llvm_unreachable("NYI"); } - assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit; // Reset the unfilled bits. @@ -344,7 +345,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, llvm_unreachable("NYI"); } - assert(!::cir::MissingFeatures::recordDeclIsPacked() && + cir_tl_assert(!::cir::MissingFeatures::recordDeclIsPacked() && !::cir::MissingFeatures::CXXRecordDeclIsPOD()); bool FieldPacked = false; // FIXME(cir): Needs more features. @@ -383,7 +384,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, clang::CharUnits UnpackedFieldOffset = FieldOffset; // clang::CharUnits OriginalFieldAlign = UnpackedFieldAlign; - assert(!::cir::MissingFeatures::fieldDeclGetMaxFieldAlignment()); + cir_tl_assert(!::cir::MissingFeatures::fieldDeclGetMaxFieldAlignment()); clang::CharUnits MaxAlignmentInChars = clang::CharUnits::Zero(); PackedFieldAlign = std::max(PackedFieldAlign, MaxAlignmentInChars); PreferredAlign = std::max(PreferredAlign, MaxAlignmentInChars); @@ -456,7 +457,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, // laid out. A regular mlir::Type has not way of doing this. In fact, we will // likely need an external abstraction, as I don't think this is possible with // just the field type. - assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + cir_tl_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); if (Packed && !FieldPacked && PackedFieldAlign < FieldAlign) llvm_unreachable("NYI"); @@ -465,10 +466,10 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, void ItaniumRecordLayoutBuilder::layoutFields(const StructType D) { // Layout each field, for now, just sequentially, respecting alignment. In // the future, this will need to be tweakable by targets. - assert(!::cir::MissingFeatures::recordDeclMayInsertExtraPadding() && + cir_tl_assert(!::cir::MissingFeatures::recordDeclMayInsertExtraPadding() && !Context.getLangOpts().SanitizeAddressFieldPadding); bool InsertExtraPadding = false; - assert(!::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()); bool HasFlexibleArrayMember = false; for (const auto FT : D.getMembers()) { layoutField(FT, InsertExtraPadding && (FT != D.getMembers().back() || @@ -485,19 +486,19 @@ void ItaniumRecordLayoutBuilder::UpdateAlignment( return; if (NewAlignment > Alignment) { - assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) && + cir_tl_assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) && "Alignment not a power of 2"); Alignment = NewAlignment; } if (UnpackedNewAlignment > UnpackedAlignment) { - assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) && + cir_tl_assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) && "Alignment not a power of 2"); UnpackedAlignment = UnpackedNewAlignment; } if (PreferredNewAlignment > PreferredAlignment) { - assert(llvm::isPowerOf2_64(PreferredNewAlignment.getQuantity()) && + cir_tl_assert(llvm::isPowerOf2_64(PreferredNewAlignment.getQuantity()) && "Alignment not a power of 2"); PreferredAlignment = PreferredNewAlignment; } @@ -525,7 +526,7 @@ void ItaniumRecordLayoutBuilder::checkFieldPadding( PadSize = PadSize / CharBitNum; // InBits = false; } - assert(::cir::MissingFeatures::bitFieldPaddingDiagnostics()); + cir_tl_assert(::cir::MissingFeatures::bitFieldPaddingDiagnostics()); } if (isPacked && Offset != UnpackedOffset) { HasPackedField = true; @@ -544,7 +545,7 @@ bool isMsLayout(const CIRLowerContext &Context) { /// of the given class (considering it as a base class) when allocating /// objects? static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { - assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); switch (ABI.getTailPaddingUseRules()) { case clang::TargetCXXABI::AlwaysUseTailPadding: return false; @@ -566,7 +567,7 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { // intended. // FIXME(cir): This always returns true since we can't check if a CIR record // is a POD type. - assert(!::cir::MissingFeatures::CXXRecordDeclIsPOD()); + cir_tl_assert(!::cir::MissingFeatures::CXXRecordDeclIsPOD()); return true; case clang::TargetCXXABI::UseTailPaddingUnlessPOD11: @@ -588,10 +589,10 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { /// (struct/union/class), which indicates its size and field position /// information. const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { - assert(isa(D) && "Not a record type"); + cir_tl_assert(isa(D) && "Not a record type"); auto RT = dyn_cast(D); - assert(RT.isComplete() && "Cannot get layout of forward declarations!"); + cir_tl_assert(RT.isComplete() && "Cannot get layout of forward declarations!"); // FIXME(cir): Use a more MLIR-based approach by using it's buitin data layout // features, such as interfaces, cacheing, and the DLTI dialect. @@ -602,7 +603,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { llvm_unreachable("NYI"); } else { // FIXME(cir): Add if-else separating C and C++ records. - assert(!::cir::MissingFeatures::isCXXRecordDecl()); + cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl()); EmptySubobjectMap EmptySubobjects(*this, RT); ItaniumRecordLayoutBuilder Builder(*this, &EmptySubobjects); Builder.layout(RT); @@ -617,7 +618,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { skipTailPadding ? Builder.getSize() : Builder.getDataSize(); clang::CharUnits NonVirtualSize = skipTailPadding ? DataSize : Builder.NonVirtualSize; - assert(!::cir::MissingFeatures::CXXRecordIsDynamicClass()); + cir_tl_assert(!::cir::MissingFeatures::CXXRecordIsDynamicClass()); // FIXME(cir): Whose responsible for freeing the allocation below? NewEntry = new CIRRecordLayout( *this, Builder.getSize(), Builder.Alignment, Builder.PreferredAlignment, @@ -632,7 +633,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { } // TODO(cir): Add option to dump the layouts. - assert(!::cir::MissingFeatures::cacheRecordLayouts()); + cir_tl_assert(!::cir::MissingFeatures::cacheRecordLayouts()); return *NewEntry; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index a3406b722c41..28b363664387 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -60,7 +60,7 @@ class AArch64TargetLoweringInfo : public TargetLoweringInfo { public: AArch64TargetLoweringInfo(LowerTypes <, AArch64ABIKind Kind) : TargetLoweringInfo(std::make_unique(LT, Kind)) { - assert(!MissingFeature::swift()); + cir_tl_assert(!MissingFeature::swift()); } unsigned getTargetAddrSpaceFromCIRAddrSpace( @@ -87,7 +87,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, return ABIArgInfo::getIgnore(); if (const auto _ = dyn_cast(RetTy)) { - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::vectorType(), "NYI"); } // Large vector types should be returned via memory. @@ -128,7 +128,9 @@ AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, : ABIArgInfo::getDirect()); } - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::AArch64TypeClassification(), + "NYI"); + return {}; } std::unique_ptr diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp index 7d43000877b7..788f8a8f4739 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp @@ -70,13 +70,13 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // Homogenous Aggregate type not supported and indirect arg // passing not supported yet. And for these supported types, // we should not have alignment greater than 8 problem. - assert(isSupportedType); - assert(!cir::MissingFeatures::classifyArgumentTypeForAArch64()); + cir_tl_assert(isSupportedType); + cir_tl_assert(!cir::MissingFeatures::classifyArgumentTypeForAArch64()); // indirect arg passing would expect one more level of pointer dereference. - assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); // false as a place holder for now, as we don't have a way to query bool isIndirect = false; - assert(!cir::MissingFeatures::supportgetCoerceToTypeForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportgetCoerceToTypeForAArch64()); // we don't convert to LLVM Type here as we are lowering to CIR here. // so baseTy is the just type of the result of va_arg. // but it depends on arg type indirectness and coercion defined by ABI. @@ -120,8 +120,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // though anyone passing 2GB of arguments, each at most 16 bytes, deserves // whatever they get). - assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); - assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // One is just place holder for now, as we don't have a way to query // type size and alignment. clang::CharUnits tySize = @@ -132,7 +132,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // indirectness, type size and type alignment all // decide regSize, but they are all ABI defined // thus need ABI lowering query system. - assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); int regSize = isIndirect ? 8 : tySize.getQuantity(); int regTopIndex; mlir::Value regOffsP; @@ -187,8 +187,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we // align __gr_offs to calculate the potential address. if (!IsFPR && !isIndirect && tyAlign.getQuantity() > 8) { - assert(!cir::MissingFeatures::handleAArch64Indirect()); - assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); llvm_unreachable("register alignment correction NYI"); } @@ -224,19 +224,19 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( loc, castRegTop.getType(), castRegTop, regOffs); if (isIndirect) { - assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); llvm_unreachable("indirect arg passing NYI"); } // TODO: isHFA, numMembers and base should be query result from query uint64_t numMembers = 0; - assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); bool isHFA = false; // though endianess can be known from datalayout, it might need an unified // ABI lowering query system to answer the question. - assert(!cir::MissingFeatures::supportisEndianQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportisEndianQueryForAArch64()); bool isBigEndian = datalayout.isBigEndian(); - assert(!cir::MissingFeatures::supportisAggregateTypeForABIAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportisAggregateTypeForABIAArch64()); // TODO: isAggregateTypeForABI should be query result from ABI info bool isAggregateTypeForABI = false; if (isHFA && numMembers > 1) { @@ -244,10 +244,10 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // and stored 16-bytes apart regardless of size (they're notionally in qN, // qN+1, ...). We reload and store into a temporary local variable // contiguously. - assert(!isIndirect && "Homogeneous aggregates should be passed directly"); + cir_tl_assert(!isIndirect && "Homogeneous aggregates should be passed directly"); llvm_unreachable("Homogeneous aggregates NYI"); } else { - assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // TODO: slotSize should be query result about alignment. clang::CharUnits slotSize = clang::CharUnits::fromQuantity(8); if (isBigEndian && !isIndirect && (isHFA || isAggregateTypeForABI) && @@ -266,11 +266,11 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // On big-endian platforms, the value will be right-aligned in its stack slot. // and we also need to think about other ABI lowering concerns listed below. - assert(!cir::MissingFeatures::handleBigEndian()); - assert(!cir::MissingFeatures::handleAArch64Indirect()); - assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); - assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); - assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::handleBigEndian()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); builder.create(loc, mlir::ValueRange{resAsVoidP}, contBlock); @@ -284,8 +284,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( auto ptrDiffTy = mlir::cir::IntType::get(builder.getContext(), 64, /*signed=*/false); - assert(!cir::MissingFeatures::handleAArch64Indirect()); - assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // Again, stack arguments may need realignment. In this case both integer and // floating-point ones might be affected. if (!isIndirect && tyAlign.getQuantity() > 8) { @@ -307,8 +307,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // which are all ABI defined thus need ABI lowering query system. // The implementation we have now supports most common cases which assumes // no indirectness, no alignment greater than 8, and little endian. - assert(!cir::MissingFeatures::handleBigEndian()); - assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::handleBigEndian()); + cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); auto stackSizeC = builder.create( loc, ptrDiffTy, @@ -340,12 +340,12 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( builder.setInsertionPoint(op); contBlock->addArgument(onStackPtr.getType(), loc); auto resP = contBlock->getArgument(0); - assert(mlir::isa(resP.getType())); + cir_tl_assert(mlir::isa(resP.getType())); auto opResPTy = mlir::cir::PointerType::get(builder.getContext(), opResTy); auto castResP = builder.createBitcast(resP, opResPTy); auto res = builder.create(loc, castResP); // there would be another level of ptr dereference if indirect arg passing - assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); if (isIndirect) { res = builder.create(loc, res.getResult()); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index 9d79fb7ccb43..8b13ba556558 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -34,7 +34,7 @@ cir::LoweringPrepareCXXABI *cir::LoweringPrepareCXXABI::createItaniumABI() { static void buildBadCastCall(CIRBaseBuilderTy &builder, mlir::Location loc, mlir::FlatSymbolRefAttr badCastFuncRef) { // TODO(cir): set the calling convention to __cxa_bad_cast. - assert(!MissingFeatures::setCallingConv()); + cir_tl_assert(!MissingFeatures::setCallingConv()); builder.createCallOp(loc, badCastFuncRef, mlir::ValueRange{}); builder.create(loc); @@ -48,7 +48,7 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, auto castInfo = op.getInfo().value(); // TODO(cir): consider address space - assert(!MissingFeatures::addressSpace()); + cir_tl_assert(!MissingFeatures::addressSpace()); auto srcPtr = builder.createBitcast(srcValue, builder.getVoidPtrTy()); auto srcRtti = builder.getConstant(loc, castInfo.getSrcRtti()); @@ -59,14 +59,14 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, mlir::Value dynCastFuncArgs[4] = {srcPtr, srcRtti, destRtti, offsetHint}; // TODO(cir): set the calling convention for __dynamic_cast. - assert(!MissingFeatures::setCallingConv()); + cir_tl_assert(!MissingFeatures::setCallingConv()); mlir::Value castedPtr = builder .createCallOp(loc, dynCastFuncRef, builder.getVoidPtrTy(), dynCastFuncArgs) .getResult(); - assert(mlir::isa(castedPtr.getType()) && + cir_tl_assert(mlir::isa(castedPtr.getType()) && "the return value of __dynamic_cast should be a ptr"); /// C++ [expr.dynamic.cast]p9: @@ -93,7 +93,7 @@ buildDynamicCastToVoidAfterNullCheck(CIRBaseBuilderTy &builder, bool vtableUsesRelativeLayout = op.getRelativeLayout(); // TODO(cir): consider address space in this function. - assert(!MissingFeatures::addressSpace()); + cir_tl_assert(!MissingFeatures::addressSpace()); mlir::Type vtableElemTy; uint64_t vtableElemAlign; @@ -141,7 +141,7 @@ LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, auto loc = op->getLoc(); auto srcValue = op.getSrc(); - assert(!MissingFeatures::buildTypeCheck()); + cir_tl_assert(!MissingFeatures::buildTypeCheck()); if (op.isRefcast()) return buildDynamicCastAfterNullCheck(builder, op); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp index f5a7250dffd0..f5540e221d9d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp @@ -33,7 +33,7 @@ class SPIRVABIInfo : public ABIInfo { private: void computeInfo(LowerFunctionInfo &FI) const override { - llvm_unreachable("ABI NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::SPIRVABI(), "NYI"); } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 38501f7c3124..11092381960e 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -95,7 +95,8 @@ Type getFPTypeAtOffset(Type IRType, unsigned IROffset, if (IROffset == 0 && isa(IRType)) return IRType; - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::X86GetFPTypeAtOffset(), "NYI"); + return IRType; // FIXME(cir): Temporary workaround for the assertion above. } } // namespace @@ -193,7 +194,7 @@ class X86_64TargetLoweringInfo : public TargetLoweringInfo { public: X86_64TargetLoweringInfo(LowerTypes &LM, X86AVXABILevel AVXLevel) : TargetLoweringInfo(std::make_unique(LM, AVXLevel)) { - assert(!::cir::MissingFeatures::swift()); + cir_tl_assert(!::cir::MissingFeatures::swift()); } unsigned getTargetAddrSpaceFromCIRAddrSpace( @@ -273,7 +274,7 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, Current = Class::NoClass; // If this is a C++ record, classify the bases first. - assert(!::cir::MissingFeatures::isCXXRecordDecl() && + cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl() && !::cir::MissingFeatures::getCXXRecordBases()); // Classify the fields one at a time, merging the results. @@ -283,10 +284,10 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, bool IsUnion = RT.isUnion() && !UseClang11Compat; // FIXME(cir): An interface to handle field declaration might be needed. - assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + cir_tl_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); for (auto [idx, FT] : llvm::enumerate(RT.getMembers())) { uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); - assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); bool BitField = false; // Ignore padding bit-fields. @@ -337,7 +338,8 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, postMerge(Size, Lo, Hi); } else { llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::X86TypeClassification(), + "NYI"); } // FIXME: _Decimal32 and _Decimal64 are SSE. // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). @@ -400,7 +402,7 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, // returning an 8-byte unit starting with it. See if we can safely use it. if (IROffset == 0) { // Pointers and int64's always fill the 8-byte unit. - assert(!isa(DestTy) && "Ptrs are NYI"); + cir_tl_assert(!isa(DestTy) && "Ptrs are NYI"); // If we have a 1/2/4-byte integer, we can use it only if the rest of the // goodness in the source type is just tail padding. This is allowed to @@ -436,7 +438,9 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, unsigned TySizeInBytes = (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); - assert(TySizeInBytes != SourceOffset && "Empty field?"); + // FIXME(cir): Temporary workaround to make things non-blocking. + if (!cirMissingFeatureAssertionMode) + cir_tl_assert(TySizeInBytes != SourceOffset && "Empty field?"); // It is always safe to classify this as an integer type up to i64 that // isn't larger than the structure. @@ -458,9 +462,9 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { classify(RetTy, 0, Lo, Hi, true); // Check some invariants. - assert((Hi != Class::Memory || Lo == Class::Memory) && + cir_tl_assert((Hi != Class::Memory || Lo == Class::Memory) && "Invalid memory classification."); - assert((Hi != Class::SSEUp || Lo == Class::SSE) && + cir_tl_assert((Hi != Class::SSEUp || Lo == Class::SSE) && "Invalid SSEUp classification."); Type resType = {}; @@ -492,7 +496,8 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { break; default: - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::X86RetTypeClassification(), + "NYI"); } Type HighPart = {}; @@ -526,9 +531,9 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, // Check some invariants. // FIXME: Enforce these by construction. - assert((Hi != Class::Memory || Lo == Class::Memory) && + cir_tl_assert((Hi != Class::Memory || Lo == Class::Memory) && "Invalid memory classification."); - assert((Hi != Class::SSEUp || Lo == Class::SSE) && + cir_tl_assert((Hi != Class::SSEUp || Lo == Class::SSE) && "Invalid SSEUp classification."); neededInt = 0; @@ -566,7 +571,8 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, break; } default: - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::X86ArgTypeClassification(), + "NYI"); } Type HighPart = {}; @@ -670,7 +676,7 @@ X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { // Accum should never be memory (we should have returned) or // ComplexX87 (because this cannot be passed in a structure). - assert((Accum != Class::Memory && Accum != Class::ComplexX87) && + cir_tl_assert((Accum != Class::Memory && Accum != Class::ComplexX87) && "Invalid accumulated classification during merge."); if (Accum == Field || Field == Class::NoClass) return Accum; diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index e360f0470b50..2eb77a3ec33a 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -199,6 +199,9 @@ class CIRGenConsumer : public clang::ASTConsumer { if (feOptions.ClangIRLibOpt) libOptOpts = sanitizePassOptions(feOptions.ClangIRLibOptOpts); + bool enableCCLowering = feOptions.ClangIRCallConvLowering && + action == CIRGenAction::OutputType::EmitCIRFlat; + // Setup and run CIR pipeline. std::string passOptParsingFailure; if (runCIRToCIRPasses( @@ -208,8 +211,7 @@ class CIRGenConsumer : public clang::ASTConsumer { feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure, codeGenOptions.OptimizationLevel > 0, action == CIRGenAction::OutputType::EmitCIRFlat, - action == CIRGenAction::OutputType::EmitMLIR, - feOptions.ClangIREnableCallConvLowering, + action == CIRGenAction::OutputType::EmitMLIR, enableCCLowering, feOptions.ClangIREnableMem2Reg) .failed()) { if (!passOptParsingFailure.empty()) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 88485d71510c..3c80b2dca766 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4396,7 +4396,7 @@ std::unique_ptr createConvertCIRToLLVMPass() { } void populateCIRToLLVMPasses(mlir::OpPassManager &pm) { - populateCIRPreLoweringPasses(pm); + populateCIRPreLoweringPasses(pm, true); pm.addPass(createConvertCIRToLLVMPass()); } diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index a30e097f9401..dcd979c35e9d 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3129,7 +3129,7 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Opts.ClangIRVerifyDiags = true; if (Args.hasArg(OPT_fclangir_call_conv_lowering)) - Opts.ClangIREnableCallConvLowering = true; + Opts.ClangIRCallConvLowering = true; if (Args.hasArg(OPT_fclangir_analysis_only)) Opts.ClangIRAnalysisOnly = true; diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index bf2663181077..966b1da6e9f3 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -4,7 +4,7 @@ // RUN: FileCheck %s -check-prefix=LLVM --input-file=%t.ll // RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir // RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -fno-clangir-call-conv-lowering -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir // RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir // RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll // RUN: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index 67a851dff2de..ca69cb279d81 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.flat.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -fno-clangir-call-conv-lowering -emit-cir-flat %s -o %t.flat.cir // RUN: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_FLAT %s // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index b985ecab8cca..068d8c10b3b3 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.flat.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -fno-clangir-call-conv-lowering -emit-cir-flat %s -o %t.flat.cir // RUN: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_FLAT %s // RUN_DISABLED: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll // RUN_DISABLED: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_LLVM %s diff --git a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp index 209679ebf383..f3a926aa93a6 100644 --- a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple aarch64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple aarch64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s // CHECK: @_Z4Voidv() diff --git a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp index 3789550ce33b..a3c2d6960c39 100644 --- a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s // Test call conv lowering for trivial cases. // From 7c4796f166fa065ca204ee3b68da45972903cd3a Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:15:21 -0400 Subject: [PATCH 1897/2301] [CIR][CIRGen][Builtin][Neon] Lower vqrshrun_n and add getAArch64SIMDIntrinsicString (#899) as title. In addition, this PR has 2 extra changes. 1. change return type of GetNeonType into mlir::cir::VectorType so we don't have to do cast all the time, this is consistent with [OG](https://github.com/llvm/clangir/blob/db6b7c07c076cb738d0acae248d7c3c199b2b952/clang/lib/CodeGen/CGBuiltin.cpp#L6234) as well. 2. add getAArch64SIMDIntrinsicString helper function so we have better debug info when hitting NYI in buildCommonNeonBuiltinExpr --------- Co-authored-by: Guojin He --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 22 + .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 575 +++++++++++++++++- .../CIR/CodeGen/aarch64-neon-simd-shift.c | 69 +++ 3 files changed, 658 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/aarch64-neon-simd-shift.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 1ccf2f1e0b07..9dab3d640665 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -372,6 +372,28 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } bool isInt(mlir::Type i) { return mlir::isa(i); } + mlir::cir::IntType getExtendedIntTy(mlir::cir::IntType ty, bool isSigned) { + if (isInt8Ty(ty)) { + return isSigned ? getSInt16Ty() : getUInt16Ty(); + } + if (isInt16Ty(ty)) { + return isSigned ? getSInt32Ty() : getUInt32Ty(); + } + if (isInt32Ty(ty)) { + return isSigned ? getSInt64Ty() : getUInt64Ty(); + } + llvm_unreachable("NYI"); + } + + mlir::cir::VectorType getExtendedElementVectorType(mlir::cir::VectorType vt, + bool isSigned = false) { + auto elementTy = + mlir::dyn_cast_or_null(vt.getEltType()); + assert(elementTy && "expected int vector"); + return mlir::cir::VectorType::get( + getContext(), getExtendedIntTy(elementTy, isSigned), vt.getSize()); + } + mlir::cir::LongDoubleType getLongDoubleTy(const llvm::fltSemantics &format) const { if (&format == &llvm::APFloat::IEEEdouble()) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 39f6473fcfa8..d05219cf02b9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -693,6 +693,557 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = { NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType), }; +/// Get name of intrinsics in the AArch64SIMDIntrinsicMap defined above. +static std::string getAArch64SIMDIntrinsicString(unsigned int intrinsicID) { + switch (intrinsicID) { + default: + return std::string("Unexpected intrinsic id " + + std::to_string(intrinsicID)); + case NEON::BI__builtin_neon_splat_lane_v: + return "NEON::BI__builtin_neon_splat_lane_v"; + case NEON::BI__builtin_neon_splat_laneq_v: + return "NEON::BI__builtin_neon_splat_laneq_v"; + case NEON::BI__builtin_neon_splatq_lane_v: + return "NEON::BI__builtin_neon_splatq_lane_v"; + case NEON::BI__builtin_neon_splatq_laneq_v: + return "NEON::BI__builtin_neon_splatq_laneq_v"; + case NEON::BI__builtin_neon_vabs_v: + return "NEON::BI__builtin_neon_vabs_v"; + case NEON::BI__builtin_neon_vabsq_v: + return "NEON::BI__builtin_neon_vabsq_v"; + case NEON::BI__builtin_neon_vadd_v: + return "NEON::BI__builtin_neon_vadd_v"; + case NEON::BI__builtin_neon_vaddhn_v: + return "NEON::BI__builtin_neon_vaddhn_v"; + case NEON::BI__builtin_neon_vaddq_p128: + return "NEON::BI__builtin_neon_vaddq_p128"; + case NEON::BI__builtin_neon_vaddq_v: + return "NEON::BI__builtin_neon_vaddq_v"; + case NEON::BI__builtin_neon_vaesdq_u8: + return "NEON::BI__builtin_neon_vaesdq_u8"; + case NEON::BI__builtin_neon_vaeseq_u8: + return "NEON::BI__builtin_neon_vaeseq_u8"; + case NEON::BI__builtin_neon_vaesimcq_u8: + return "NEON::BI__builtin_neon_vaesimcq_u8"; + case NEON::BI__builtin_neon_vaesmcq_u8: + return "NEON::BI__builtin_neon_vaesmcq_u8"; + case NEON::BI__builtin_neon_vbcaxq_s16: + return "NEON::BI__builtin_neon_vbcaxq_s16"; + case NEON::BI__builtin_neon_vbcaxq_s32: + return "NEON::BI__builtin_neon_vbcaxq_s32"; + case NEON::BI__builtin_neon_vbcaxq_s64: + return "NEON::BI__builtin_neon_vbcaxq_s64"; + case NEON::BI__builtin_neon_vbcaxq_s8: + return "NEON::BI__builtin_neon_vbcaxq_s8"; + case NEON::BI__builtin_neon_vbcaxq_u16: + return "NEON::BI__builtin_neon_vbcaxq_u16"; + case NEON::BI__builtin_neon_vbcaxq_u32: + return "NEON::BI__builtin_neon_vbcaxq_u32"; + case NEON::BI__builtin_neon_vbcaxq_u64: + return "NEON::BI__builtin_neon_vbcaxq_u64"; + case NEON::BI__builtin_neon_vbcaxq_u8: + return "NEON::BI__builtin_neon_vbcaxq_u8"; + case NEON::BI__builtin_neon_vbfdot_f32: + return "NEON::BI__builtin_neon_vbfdot_f32"; + case NEON::BI__builtin_neon_vbfdotq_f32: + return "NEON::BI__builtin_neon_vbfdotq_f32"; + case NEON::BI__builtin_neon_vbfmlalbq_f32: + return "NEON::BI__builtin_neon_vbfmlalbq_f32"; + case NEON::BI__builtin_neon_vbfmlaltq_f32: + return "NEON::BI__builtin_neon_vbfmlaltq_f32"; + case NEON::BI__builtin_neon_vbfmmlaq_f32: + return "NEON::BI__builtin_neon_vbfmmlaq_f32"; + case NEON::BI__builtin_neon_vcadd_rot270_f16: + return "NEON::BI__builtin_neon_vcadd_rot270_f16"; + case NEON::BI__builtin_neon_vcadd_rot270_f32: + return "NEON::BI__builtin_neon_vcadd_rot270_f32"; + case NEON::BI__builtin_neon_vcadd_rot90_f16: + return "NEON::BI__builtin_neon_vcadd_rot90_f16"; + case NEON::BI__builtin_neon_vcadd_rot90_f32: + return "NEON::BI__builtin_neon_vcadd_rot90_f32"; + case NEON::BI__builtin_neon_vcaddq_rot270_f16: + return "NEON::BI__builtin_neon_vcaddq_rot270_f16"; + case NEON::BI__builtin_neon_vcaddq_rot270_f32: + return "NEON::BI__builtin_neon_vcaddq_rot270_f32"; + case NEON::BI__builtin_neon_vcaddq_rot270_f64: + return "NEON::BI__builtin_neon_vcaddq_rot270_f64"; + case NEON::BI__builtin_neon_vcaddq_rot90_f16: + return "NEON::BI__builtin_neon_vcaddq_rot90_f16"; + case NEON::BI__builtin_neon_vcaddq_rot90_f32: + return "NEON::BI__builtin_neon_vcaddq_rot90_f32"; + case NEON::BI__builtin_neon_vcaddq_rot90_f64: + return "NEON::BI__builtin_neon_vcaddq_rot90_f64"; + case NEON::BI__builtin_neon_vcage_v: + return "NEON::BI__builtin_neon_vcage_v"; + case NEON::BI__builtin_neon_vcageq_v: + return "NEON::BI__builtin_neon_vcageq_v"; + case NEON::BI__builtin_neon_vcagt_v: + return "NEON::BI__builtin_neon_vcagt_v"; + case NEON::BI__builtin_neon_vcagtq_v: + return "NEON::BI__builtin_neon_vcagtq_v"; + case NEON::BI__builtin_neon_vcale_v: + return "NEON::BI__builtin_neon_vcale_v"; + case NEON::BI__builtin_neon_vcaleq_v: + return "NEON::BI__builtin_neon_vcaleq_v"; + case NEON::BI__builtin_neon_vcalt_v: + return "NEON::BI__builtin_neon_vcalt_v"; + case NEON::BI__builtin_neon_vcaltq_v: + return "NEON::BI__builtin_neon_vcaltq_v"; + case NEON::BI__builtin_neon_vceqz_v: + return "NEON::BI__builtin_neon_vceqz_v"; + case NEON::BI__builtin_neon_vceqzq_v: + return "NEON::BI__builtin_neon_vceqzq_v"; + case NEON::BI__builtin_neon_vcgez_v: + return "NEON::BI__builtin_neon_vcgez_v"; + case NEON::BI__builtin_neon_vcgezq_v: + return "NEON::BI__builtin_neon_vcgezq_v"; + case NEON::BI__builtin_neon_vcgtz_v: + return "NEON::BI__builtin_neon_vcgtz_v"; + case NEON::BI__builtin_neon_vcgtzq_v: + return "NEON::BI__builtin_neon_vcgtzq_v"; + case NEON::BI__builtin_neon_vclez_v: + return "NEON::BI__builtin_neon_vclez_v"; + case NEON::BI__builtin_neon_vclezq_v: + return "NEON::BI__builtin_neon_vclezq_v"; + case NEON::BI__builtin_neon_vcls_v: + return "NEON::BI__builtin_neon_vcls_v"; + case NEON::BI__builtin_neon_vclsq_v: + return "NEON::BI__builtin_neon_vclsq_v"; + case NEON::BI__builtin_neon_vcltz_v: + return "NEON::BI__builtin_neon_vcltz_v"; + case NEON::BI__builtin_neon_vcltzq_v: + return "NEON::BI__builtin_neon_vcltzq_v"; + case NEON::BI__builtin_neon_vclz_v: + return "NEON::BI__builtin_neon_vclz_v"; + case NEON::BI__builtin_neon_vclzq_v: + return "NEON::BI__builtin_neon_vclzq_v"; + case NEON::BI__builtin_neon_vcmla_f16: + return "NEON::BI__builtin_neon_vcmla_f16"; + case NEON::BI__builtin_neon_vcmla_f32: + return "NEON::BI__builtin_neon_vcmla_f32"; + case NEON::BI__builtin_neon_vcmla_rot180_f16: + return "NEON::BI__builtin_neon_vcmla_rot180_f16"; + case NEON::BI__builtin_neon_vcmla_rot180_f32: + return "NEON::BI__builtin_neon_vcmla_rot180_f32"; + case NEON::BI__builtin_neon_vcmla_rot270_f16: + return "NEON::BI__builtin_neon_vcmla_rot270_f16"; + case NEON::BI__builtin_neon_vcmla_rot270_f32: + return "NEON::BI__builtin_neon_vcmla_rot270_f32"; + case NEON::BI__builtin_neon_vcmla_rot90_f16: + return "NEON::BI__builtin_neon_vcmla_rot90_f16"; + case NEON::BI__builtin_neon_vcmla_rot90_f32: + return "NEON::BI__builtin_neon_vcmla_rot90_f32"; + case NEON::BI__builtin_neon_vcmlaq_f16: + return "NEON::BI__builtin_neon_vcmlaq_f16"; + case NEON::BI__builtin_neon_vcmlaq_f32: + return "NEON::BI__builtin_neon_vcmlaq_f32"; + case NEON::BI__builtin_neon_vcmlaq_f64: + return "NEON::BI__builtin_neon_vcmlaq_f64"; + case NEON::BI__builtin_neon_vcmlaq_rot180_f16: + return "NEON::BI__builtin_neon_vcmlaq_rot180_f16"; + case NEON::BI__builtin_neon_vcmlaq_rot180_f32: + return "NEON::BI__builtin_neon_vcmlaq_rot180_f32"; + case NEON::BI__builtin_neon_vcmlaq_rot180_f64: + return "NEON::BI__builtin_neon_vcmlaq_rot180_f64"; + case NEON::BI__builtin_neon_vcmlaq_rot270_f16: + return "NEON::BI__builtin_neon_vcmlaq_rot270_f16"; + case NEON::BI__builtin_neon_vcmlaq_rot270_f32: + return "NEON::BI__builtin_neon_vcmlaq_rot270_f32"; + case NEON::BI__builtin_neon_vcmlaq_rot270_f64: + return "NEON::BI__builtin_neon_vcmlaq_rot270_f64"; + case NEON::BI__builtin_neon_vcmlaq_rot90_f16: + return "NEON::BI__builtin_neon_vcmlaq_rot90_f16"; + case NEON::BI__builtin_neon_vcmlaq_rot90_f32: + return "NEON::BI__builtin_neon_vcmlaq_rot90_f32"; + case NEON::BI__builtin_neon_vcmlaq_rot90_f64: + return "NEON::BI__builtin_neon_vcmlaq_rot90_f64"; + case NEON::BI__builtin_neon_vcnt_v: + return "NEON::BI__builtin_neon_vcnt_v"; + case NEON::BI__builtin_neon_vcntq_v: + return "NEON::BI__builtin_neon_vcntq_v"; + case NEON::BI__builtin_neon_vcvt_f16_f32: + return "NEON::BI__builtin_neon_vcvt_f16_f32"; + case NEON::BI__builtin_neon_vcvt_f16_s16: + return "NEON::BI__builtin_neon_vcvt_f16_s16"; + case NEON::BI__builtin_neon_vcvt_f16_u16: + return "NEON::BI__builtin_neon_vcvt_f16_u16"; + case NEON::BI__builtin_neon_vcvt_f32_f16: + return "NEON::BI__builtin_neon_vcvt_f32_f16"; + case NEON::BI__builtin_neon_vcvt_f32_v: + return "NEON::BI__builtin_neon_vcvt_f32_v"; + case NEON::BI__builtin_neon_vcvt_n_f16_s16: + return "NEON::BI__builtin_neon_vcvt_n_f16_s16"; + case NEON::BI__builtin_neon_vcvt_n_f16_u16: + return "NEON::BI__builtin_neon_vcvt_n_f16_u16"; + case NEON::BI__builtin_neon_vcvt_n_f32_v: + return "NEON::BI__builtin_neon_vcvt_n_f32_v"; + case NEON::BI__builtin_neon_vcvt_n_f64_v: + return "NEON::BI__builtin_neon_vcvt_n_f64_v"; + case NEON::BI__builtin_neon_vcvt_n_s16_f16: + return "NEON::BI__builtin_neon_vcvt_n_s16_f16"; + case NEON::BI__builtin_neon_vcvt_n_s32_v: + return "NEON::BI__builtin_neon_vcvt_n_s32_v"; + case NEON::BI__builtin_neon_vcvt_n_s64_v: + return "NEON::BI__builtin_neon_vcvt_n_s64_v"; + case NEON::BI__builtin_neon_vcvt_n_u16_f16: + return "NEON::BI__builtin_neon_vcvt_n_u16_f16"; + case NEON::BI__builtin_neon_vcvt_n_u32_v: + return "NEON::BI__builtin_neon_vcvt_n_u32_v"; + case NEON::BI__builtin_neon_vcvt_n_u64_v: + return "NEON::BI__builtin_neon_vcvt_n_u64_v"; + case NEON::BI__builtin_neon_vcvtq_f16_s16: + return "NEON::BI__builtin_neon_vcvtq_f16_s16"; + case NEON::BI__builtin_neon_vcvtq_f16_u16: + return "NEON::BI__builtin_neon_vcvtq_f16_u16"; + case NEON::BI__builtin_neon_vcvtq_f32_v: + return "NEON::BI__builtin_neon_vcvtq_f32_v"; + case NEON::BI__builtin_neon_vcvtq_high_bf16_f32: + return "NEON::BI__builtin_neon_vcvtq_high_bf16_f32"; + case NEON::BI__builtin_neon_vcvtq_n_f16_s16: + return "NEON::BI__builtin_neon_vcvtq_n_f16_s16"; + case NEON::BI__builtin_neon_vcvtq_n_f16_u16: + return "NEON::BI__builtin_neon_vcvtq_n_f16_u16"; + case NEON::BI__builtin_neon_vcvtq_n_f32_v: + return "NEON::BI__builtin_neon_vcvtq_n_f32_v"; + case NEON::BI__builtin_neon_vcvtq_n_f64_v: + return "NEON::BI__builtin_neon_vcvtq_n_f64_v"; + case NEON::BI__builtin_neon_vcvtq_n_s16_f16: + return "NEON::BI__builtin_neon_vcvtq_n_s16_f16"; + case NEON::BI__builtin_neon_vcvtq_n_s32_v: + return "NEON::BI__builtin_neon_vcvtq_n_s32_v"; + case NEON::BI__builtin_neon_vcvtq_n_s64_v: + return "NEON::BI__builtin_neon_vcvtq_n_s64_v"; + case NEON::BI__builtin_neon_vcvtq_n_u16_f16: + return "NEON::BI__builtin_neon_vcvtq_n_u16_f16"; + case NEON::BI__builtin_neon_vcvtq_n_u32_v: + return "NEON::BI__builtin_neon_vcvtq_n_u32_v"; + case NEON::BI__builtin_neon_vcvtq_n_u64_v: + return "NEON::BI__builtin_neon_vcvtq_n_u64_v"; + case NEON::BI__builtin_neon_vcvtx_f32_v: + return "NEON::BI__builtin_neon_vcvtx_f32_v"; + case NEON::BI__builtin_neon_vdot_s32: + return "NEON::BI__builtin_neon_vdot_s32"; + case NEON::BI__builtin_neon_vdot_u32: + return "NEON::BI__builtin_neon_vdot_u32"; + case NEON::BI__builtin_neon_vdotq_s32: + return "NEON::BI__builtin_neon_vdotq_s32"; + case NEON::BI__builtin_neon_vdotq_u32: + return "NEON::BI__builtin_neon_vdotq_u32"; + case NEON::BI__builtin_neon_veor3q_s16: + return "NEON::BI__builtin_neon_veor3q_s16"; + case NEON::BI__builtin_neon_veor3q_s32: + return "NEON::BI__builtin_neon_veor3q_s32"; + case NEON::BI__builtin_neon_veor3q_s64: + return "NEON::BI__builtin_neon_veor3q_s64"; + case NEON::BI__builtin_neon_veor3q_s8: + return "NEON::BI__builtin_neon_veor3q_s8"; + case NEON::BI__builtin_neon_veor3q_u16: + return "NEON::BI__builtin_neon_veor3q_u16"; + case NEON::BI__builtin_neon_veor3q_u32: + return "NEON::BI__builtin_neon_veor3q_u32"; + case NEON::BI__builtin_neon_veor3q_u64: + return "NEON::BI__builtin_neon_veor3q_u64"; + case NEON::BI__builtin_neon_veor3q_u8: + return "NEON::BI__builtin_neon_veor3q_u8"; + case NEON::BI__builtin_neon_vext_v: + return "NEON::BI__builtin_neon_vext_v"; + case NEON::BI__builtin_neon_vextq_v: + return "NEON::BI__builtin_neon_vextq_v"; + case NEON::BI__builtin_neon_vfma_v: + return "NEON::BI__builtin_neon_vfma_v"; + case NEON::BI__builtin_neon_vfmaq_v: + return "NEON::BI__builtin_neon_vfmaq_v"; + case NEON::BI__builtin_neon_vfmlal_high_f16: + return "NEON::BI__builtin_neon_vfmlal_high_f16"; + case NEON::BI__builtin_neon_vfmlal_low_f16: + return "NEON::BI__builtin_neon_vfmlal_low_f16"; + case NEON::BI__builtin_neon_vfmlalq_high_f16: + return "NEON::BI__builtin_neon_vfmlalq_high_f16"; + case NEON::BI__builtin_neon_vfmlalq_low_f16: + return "NEON::BI__builtin_neon_vfmlalq_low_f16"; + case NEON::BI__builtin_neon_vfmlsl_high_f16: + return "NEON::BI__builtin_neon_vfmlsl_high_f16"; + case NEON::BI__builtin_neon_vfmlsl_low_f16: + return "NEON::BI__builtin_neon_vfmlsl_low_f16"; + case NEON::BI__builtin_neon_vfmlslq_high_f16: + return "NEON::BI__builtin_neon_vfmlslq_high_f16"; + case NEON::BI__builtin_neon_vfmlslq_low_f16: + return "NEON::BI__builtin_neon_vfmlslq_low_f16"; + case NEON::BI__builtin_neon_vhadd_v: + return "NEON::BI__builtin_neon_vhadd_v"; + case NEON::BI__builtin_neon_vhaddq_v: + return "NEON::BI__builtin_neon_vhaddq_v"; + case NEON::BI__builtin_neon_vhsub_v: + return "NEON::BI__builtin_neon_vhsub_v"; + case NEON::BI__builtin_neon_vhsubq_v: + return "NEON::BI__builtin_neon_vhsubq_v"; + case NEON::BI__builtin_neon_vld1_x2_v: + return "NEON::BI__builtin_neon_vld1_x2_v"; + case NEON::BI__builtin_neon_vld1_x3_v: + return "NEON::BI__builtin_neon_vld1_x3_v"; + case NEON::BI__builtin_neon_vld1_x4_v: + return "NEON::BI__builtin_neon_vld1_x4_v"; + case NEON::BI__builtin_neon_vld1q_x2_v: + return "NEON::BI__builtin_neon_vld1q_x2_v"; + case NEON::BI__builtin_neon_vld1q_x3_v: + return "NEON::BI__builtin_neon_vld1q_x3_v"; + case NEON::BI__builtin_neon_vld1q_x4_v: + return "NEON::BI__builtin_neon_vld1q_x4_v"; + case NEON::BI__builtin_neon_vmmlaq_s32: + return "NEON::BI__builtin_neon_vmmlaq_s32"; + case NEON::BI__builtin_neon_vmmlaq_u32: + return "NEON::BI__builtin_neon_vmmlaq_u32"; + case NEON::BI__builtin_neon_vmovl_v: + return "NEON::BI__builtin_neon_vmovl_v"; + case NEON::BI__builtin_neon_vmovn_v: + return "NEON::BI__builtin_neon_vmovn_v"; + case NEON::BI__builtin_neon_vmul_v: + return "NEON::BI__builtin_neon_vmul_v"; + case NEON::BI__builtin_neon_vmulq_v: + return "NEON::BI__builtin_neon_vmulq_v"; + case NEON::BI__builtin_neon_vpadd_v: + return "NEON::BI__builtin_neon_vpadd_v"; + case NEON::BI__builtin_neon_vpaddl_v: + return "NEON::BI__builtin_neon_vpaddl_v"; + case NEON::BI__builtin_neon_vpaddlq_v: + return "NEON::BI__builtin_neon_vpaddlq_v"; + case NEON::BI__builtin_neon_vpaddq_v: + return "NEON::BI__builtin_neon_vpaddq_v"; + case NEON::BI__builtin_neon_vqabs_v: + return "NEON::BI__builtin_neon_vqabs_v"; + case NEON::BI__builtin_neon_vqabsq_v: + return "NEON::BI__builtin_neon_vqabsq_v"; + case NEON::BI__builtin_neon_vqadd_v: + return "NEON::BI__builtin_neon_vqadd_v"; + case NEON::BI__builtin_neon_vqaddq_v: + return "NEON::BI__builtin_neon_vqaddq_v"; + case NEON::BI__builtin_neon_vqdmlal_v: + return "NEON::BI__builtin_neon_vqdmlal_v"; + case NEON::BI__builtin_neon_vqdmlsl_v: + return "NEON::BI__builtin_neon_vqdmlsl_v"; + case NEON::BI__builtin_neon_vqdmulh_lane_v: + return "NEON::BI__builtin_neon_vqdmulh_lane_v"; + case NEON::BI__builtin_neon_vqdmulh_laneq_v: + return "NEON::BI__builtin_neon_vqdmulh_laneq_v"; + case NEON::BI__builtin_neon_vqdmulh_v: + return "NEON::BI__builtin_neon_vqdmulh_v"; + case NEON::BI__builtin_neon_vqdmulhq_lane_v: + return "NEON::BI__builtin_neon_vqdmulhq_lane_v"; + case NEON::BI__builtin_neon_vqdmulhq_laneq_v: + return "NEON::BI__builtin_neon_vqdmulhq_laneq_v"; + case NEON::BI__builtin_neon_vqdmulhq_v: + return "NEON::BI__builtin_neon_vqdmulhq_v"; + case NEON::BI__builtin_neon_vqdmull_v: + return "NEON::BI__builtin_neon_vqdmull_v"; + case NEON::BI__builtin_neon_vqmovn_v: + return "NEON::BI__builtin_neon_vqmovn_v"; + case NEON::BI__builtin_neon_vqmovun_v: + return "NEON::BI__builtin_neon_vqmovun_v"; + case NEON::BI__builtin_neon_vqneg_v: + return "NEON::BI__builtin_neon_vqneg_v"; + case NEON::BI__builtin_neon_vqnegq_v: + return "NEON::BI__builtin_neon_vqnegq_v"; + case NEON::BI__builtin_neon_vqrdmlah_s16: + return "NEON::BI__builtin_neon_vqrdmlah_s16"; + case NEON::BI__builtin_neon_vqrdmlah_s32: + return "NEON::BI__builtin_neon_vqrdmlah_s32"; + case NEON::BI__builtin_neon_vqrdmlahq_s16: + return "NEON::BI__builtin_neon_vqrdmlahq_s16"; + case NEON::BI__builtin_neon_vqrdmlahq_s32: + return "NEON::BI__builtin_neon_vqrdmlahq_s32"; + case NEON::BI__builtin_neon_vqrdmlsh_s16: + return "NEON::BI__builtin_neon_vqrdmlsh_s16"; + case NEON::BI__builtin_neon_vqrdmlsh_s32: + return "NEON::BI__builtin_neon_vqrdmlsh_s32"; + case NEON::BI__builtin_neon_vqrdmlshq_s16: + return "NEON::BI__builtin_neon_vqrdmlshq_s16"; + case NEON::BI__builtin_neon_vqrdmlshq_s32: + return "NEON::BI__builtin_neon_vqrdmlshq_s32"; + case NEON::BI__builtin_neon_vqrdmulh_lane_v: + return "NEON::BI__builtin_neon_vqrdmulh_lane_v"; + case NEON::BI__builtin_neon_vqrdmulh_laneq_v: + return "NEON::BI__builtin_neon_vqrdmulh_laneq_v"; + case NEON::BI__builtin_neon_vqrdmulh_v: + return "NEON::BI__builtin_neon_vqrdmulh_v"; + case NEON::BI__builtin_neon_vqrdmulhq_lane_v: + return "NEON::BI__builtin_neon_vqrdmulhq_lane_v"; + case NEON::BI__builtin_neon_vqrdmulhq_laneq_v: + return "NEON::BI__builtin_neon_vqrdmulhq_laneq_v"; + case NEON::BI__builtin_neon_vqrdmulhq_v: + return "NEON::BI__builtin_neon_vqrdmulhq_v"; + case NEON::BI__builtin_neon_vqrshl_v: + return "NEON::BI__builtin_neon_vqrshl_v"; + case NEON::BI__builtin_neon_vqrshlq_v: + return "NEON::BI__builtin_neon_vqrshlq_v"; + case NEON::BI__builtin_neon_vqshl_n_v: + return "NEON::BI__builtin_neon_vqshl_n_v"; + case NEON::BI__builtin_neon_vqshl_v: + return "NEON::BI__builtin_neon_vqshl_v"; + case NEON::BI__builtin_neon_vqshlq_n_v: + return "NEON::BI__builtin_neon_vqshlq_n_v"; + case NEON::BI__builtin_neon_vqshlq_v: + return "NEON::BI__builtin_neon_vqshlq_v"; + case NEON::BI__builtin_neon_vqshlu_n_v: + return "NEON::BI__builtin_neon_vqshlu_n_v"; + case NEON::BI__builtin_neon_vqshluq_n_v: + return "NEON::BI__builtin_neon_vqshluq_n_v"; + case NEON::BI__builtin_neon_vqsub_v: + return "NEON::BI__builtin_neon_vqsub_v"; + case NEON::BI__builtin_neon_vqsubq_v: + return "NEON::BI__builtin_neon_vqsubq_v"; + case NEON::BI__builtin_neon_vraddhn_v: + return "NEON::BI__builtin_neon_vraddhn_v"; + case NEON::BI__builtin_neon_vrax1q_u64: + return "NEON::BI__builtin_neon_vrax1q_u64"; + case NEON::BI__builtin_neon_vrecpe_v: + return "NEON::BI__builtin_neon_vrecpe_v"; + case NEON::BI__builtin_neon_vrecpeq_v: + return "NEON::BI__builtin_neon_vrecpeq_v"; + case NEON::BI__builtin_neon_vrecps_v: + return "NEON::BI__builtin_neon_vrecps_v"; + case NEON::BI__builtin_neon_vrecpsq_v: + return "NEON::BI__builtin_neon_vrecpsq_v"; + case NEON::BI__builtin_neon_vrhadd_v: + return "NEON::BI__builtin_neon_vrhadd_v"; + case NEON::BI__builtin_neon_vrhaddq_v: + return "NEON::BI__builtin_neon_vrhaddq_v"; + case NEON::BI__builtin_neon_vrnd32x_f32: + return "NEON::BI__builtin_neon_vrnd32x_f32"; + case NEON::BI__builtin_neon_vrnd32x_f64: + return "NEON::BI__builtin_neon_vrnd32x_f64"; + case NEON::BI__builtin_neon_vrnd32xq_f32: + return "NEON::BI__builtin_neon_vrnd32xq_f32"; + case NEON::BI__builtin_neon_vrnd32xq_f64: + return "NEON::BI__builtin_neon_vrnd32xq_f64"; + case NEON::BI__builtin_neon_vrnd32z_f32: + return "NEON::BI__builtin_neon_vrnd32z_f32"; + case NEON::BI__builtin_neon_vrnd32z_f64: + return "NEON::BI__builtin_neon_vrnd32z_f64"; + case NEON::BI__builtin_neon_vrnd32zq_f32: + return "NEON::BI__builtin_neon_vrnd32zq_f32"; + case NEON::BI__builtin_neon_vrnd32zq_f64: + return "NEON::BI__builtin_neon_vrnd32zq_f64"; + case NEON::BI__builtin_neon_vrnd64x_f32: + return "NEON::BI__builtin_neon_vrnd64x_f32"; + case NEON::BI__builtin_neon_vrnd64x_f64: + return "NEON::BI__builtin_neon_vrnd64x_f64"; + case NEON::BI__builtin_neon_vrnd64xq_f32: + return "NEON::BI__builtin_neon_vrnd64xq_f32"; + case NEON::BI__builtin_neon_vrnd64xq_f64: + return "NEON::BI__builtin_neon_vrnd64xq_f64"; + case NEON::BI__builtin_neon_vrnd64z_f32: + return "NEON::BI__builtin_neon_vrnd64z_f32"; + case NEON::BI__builtin_neon_vrnd64z_f64: + return "NEON::BI__builtin_neon_vrnd64z_f64"; + case NEON::BI__builtin_neon_vrnd64zq_f32: + return "NEON::BI__builtin_neon_vrnd64zq_f32"; + case NEON::BI__builtin_neon_vrnd64zq_f64: + return "NEON::BI__builtin_neon_vrnd64zq_f64"; + case NEON::BI__builtin_neon_vrndi_v: + return "NEON::BI__builtin_neon_vrndi_v"; + case NEON::BI__builtin_neon_vrndiq_v: + return "NEON::BI__builtin_neon_vrndiq_v"; + case NEON::BI__builtin_neon_vrshl_v: + return "NEON::BI__builtin_neon_vrshl_v"; + case NEON::BI__builtin_neon_vrshlq_v: + return "NEON::BI__builtin_neon_vrshlq_v"; + case NEON::BI__builtin_neon_vrshr_n_v: + return "NEON::BI__builtin_neon_vrshr_n_v"; + case NEON::BI__builtin_neon_vrshrq_n_v: + return "NEON::BI__builtin_neon_vrshrq_n_v"; + case NEON::BI__builtin_neon_vrsqrte_v: + return "NEON::BI__builtin_neon_vrsqrte_v"; + case NEON::BI__builtin_neon_vrsqrteq_v: + return "NEON::BI__builtin_neon_vrsqrteq_v"; + case NEON::BI__builtin_neon_vrsqrts_v: + return "NEON::BI__builtin_neon_vrsqrts_v"; + case NEON::BI__builtin_neon_vrsqrtsq_v: + return "NEON::BI__builtin_neon_vrsqrtsq_v"; + case NEON::BI__builtin_neon_vrsubhn_v: + return "NEON::BI__builtin_neon_vrsubhn_v"; + case NEON::BI__builtin_neon_vsha1su0q_u32: + return "NEON::BI__builtin_neon_vsha1su0q_u32"; + case NEON::BI__builtin_neon_vsha1su1q_u32: + return "NEON::BI__builtin_neon_vsha1su1q_u32"; + case NEON::BI__builtin_neon_vsha256h2q_u32: + return "NEON::BI__builtin_neon_vsha256h2q_u32"; + case NEON::BI__builtin_neon_vsha256hq_u32: + return "NEON::BI__builtin_neon_vsha256hq_u32"; + case NEON::BI__builtin_neon_vsha256su0q_u32: + return "NEON::BI__builtin_neon_vsha256su0q_u32"; + case NEON::BI__builtin_neon_vsha256su1q_u32: + return "NEON::BI__builtin_neon_vsha256su1q_u32"; + case NEON::BI__builtin_neon_vsha512h2q_u64: + return "NEON::BI__builtin_neon_vsha512h2q_u64"; + case NEON::BI__builtin_neon_vsha512hq_u64: + return "NEON::BI__builtin_neon_vsha512hq_u64"; + case NEON::BI__builtin_neon_vsha512su0q_u64: + return "NEON::BI__builtin_neon_vsha512su0q_u64"; + case NEON::BI__builtin_neon_vsha512su1q_u64: + return "NEON::BI__builtin_neon_vsha512su1q_u64"; + case NEON::BI__builtin_neon_vshl_n_v: + return "NEON::BI__builtin_neon_vshl_n_v"; + case NEON::BI__builtin_neon_vshl_v: + return "NEON::BI__builtin_neon_vshl_v"; + case NEON::BI__builtin_neon_vshll_n_v: + return "NEON::BI__builtin_neon_vshll_n_v"; + case NEON::BI__builtin_neon_vshlq_n_v: + return "NEON::BI__builtin_neon_vshlq_n_v"; + case NEON::BI__builtin_neon_vshlq_v: + return "NEON::BI__builtin_neon_vshlq_v"; + case NEON::BI__builtin_neon_vshr_n_v: + return "NEON::BI__builtin_neon_vshr_n_v"; + case NEON::BI__builtin_neon_vshrn_n_v: + return "NEON::BI__builtin_neon_vshrn_n_v"; + case NEON::BI__builtin_neon_vshrq_n_v: + return "NEON::BI__builtin_neon_vshrq_n_v"; + case NEON::BI__builtin_neon_vsm3partw1q_u32: + return "NEON::BI__builtin_neon_vsm3partw1q_u32"; + case NEON::BI__builtin_neon_vsm3partw2q_u32: + return "NEON::BI__builtin_neon_vsm3partw2q_u32"; + case NEON::BI__builtin_neon_vsm3ss1q_u32: + return "NEON::BI__builtin_neon_vsm3ss1q_u32"; + case NEON::BI__builtin_neon_vsm3tt1aq_u32: + return "NEON::BI__builtin_neon_vsm3tt1aq_u32"; + case NEON::BI__builtin_neon_vsm3tt1bq_u32: + return "NEON::BI__builtin_neon_vsm3tt1bq_u32"; + case NEON::BI__builtin_neon_vsm3tt2aq_u32: + return "NEON::BI__builtin_neon_vsm3tt2aq_u32"; + case NEON::BI__builtin_neon_vsm3tt2bq_u32: + return "NEON::BI__builtin_neon_vsm3tt2bq_u32"; + case NEON::BI__builtin_neon_vsm4ekeyq_u32: + return "NEON::BI__builtin_neon_vsm4ekeyq_u32"; + case NEON::BI__builtin_neon_vsm4eq_u32: + return "NEON::BI__builtin_neon_vsm4eq_u32"; + case NEON::BI__builtin_neon_vst1_x2_v: + return "NEON::BI__builtin_neon_vst1_x2_v"; + case NEON::BI__builtin_neon_vst1_x3_v: + return "NEON::BI__builtin_neon_vst1_x3_v"; + case NEON::BI__builtin_neon_vst1_x4_v: + return "NEON::BI__builtin_neon_vst1_x4_v"; + case NEON::BI__builtin_neon_vst1q_x2_v: + return "NEON::BI__builtin_neon_vst1q_x2_v"; + case NEON::BI__builtin_neon_vst1q_x3_v: + return "NEON::BI__builtin_neon_vst1q_x3_v"; + case NEON::BI__builtin_neon_vst1q_x4_v: + return "NEON::BI__builtin_neon_vst1q_x4_v"; + case NEON::BI__builtin_neon_vsubhn_v: + return "NEON::BI__builtin_neon_vsubhn_v"; + case NEON::BI__builtin_neon_vtst_v: + return "NEON::BI__builtin_neon_vtst_v"; + case NEON::BI__builtin_neon_vtstq_v: + return "NEON::BI__builtin_neon_vtstq_v"; + case NEON::BI__builtin_neon_vusdot_s32: + return "NEON::BI__builtin_neon_vusdot_s32"; + case NEON::BI__builtin_neon_vusdotq_s32: + return "NEON::BI__builtin_neon_vusdotq_s32"; + case NEON::BI__builtin_neon_vusmmlaq_s32: + return "NEON::BI__builtin_neon_vusmmlaq_s32"; + case NEON::BI__builtin_neon_vxarq_u64: + return "NEON::BI__builtin_neon_vxarq_u64"; + } +} + // Some intrinsics are equivalent for codegen. static const std::pair NEONEquivalentIntrinsicMap[] = { { @@ -1346,9 +1897,11 @@ findARMVectorIntrinsicInMap(ArrayRef IntrinsicMap, return nullptr; } -static mlir::Type GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags, - bool HasLegalHalfType = true, bool V1Ty = false, - bool AllowBFloatArgsAndRet = true) { +static mlir::cir::VectorType GetNeonType(CIRGenFunction *CGF, + NeonTypeFlags TypeFlags, + bool HasLegalHalfType = true, + bool V1Ty = false, + bool AllowBFloatArgsAndRet = true) { int IsQuad = TypeFlags.isQuad(); switch (TypeFlags.getEltType()) { case NeonTypeFlags::Int8: @@ -1449,7 +2002,7 @@ static mlir::Value buildAArch64TblBuiltinExpr(CIRGenFunction &CGF, // Determine the type of this overloaded NEON intrinsic. NeonTypeFlags Type = Result->getZExtValue(); - auto Ty = GetNeonType(&CGF, Type); + mlir::cir::VectorType Ty = GetNeonType(&CGF, Type); if (!Ty) return nullptr; @@ -1662,8 +2215,8 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( const bool allowBFloatArgsAndRet = getTargetHooks().getABIInfo().allowBFloatArgsAndRet(); - mlir::Type vTy = GetNeonType(this, neonType, hasLegalHalfType, false, - allowBFloatArgsAndRet); + mlir::cir::VectorType vTy = GetNeonType(this, neonType, hasLegalHalfType, + false, allowBFloatArgsAndRet); if (!vTy) return nullptr; @@ -1673,6 +2226,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( switch (builtinID) { default: + llvm::errs() << getAArch64SIMDIntrinsicString(builtinID) << " "; llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vqadd_v: mlir::Value res = buildNeonCall(builtinID, *this, {vTy, vTy}, ops, @@ -2400,7 +2954,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } } - auto Ty = GetNeonType(this, Type); + mlir::cir::VectorType Ty = GetNeonType(this, Type); if (!Ty) return nullptr; @@ -2494,7 +3048,12 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vqshrun_n_v: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vqrshrun_n_v: - llvm_unreachable("NYI"); + // The prototype of builtin_neon_vqrshrun_n can be found at + // https://developer.arm.com/architectures/instruction-sets/intrinsics/ + return buildNeonCall( + BuiltinID, *this, + {builder.getExtendedElementVectorType(Ty, true), SInt32Ty}, Ops, + "llvm.aarch64.neon.sqrshrun", Ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqshrn_n_v: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vrshrn_n_v: diff --git a/clang/test/CIR/CodeGen/aarch64-neon-simd-shift.c b/clang/test/CIR/CodeGen/aarch64-neon-simd-shift.c new file mode 100644 index 000000000000..8619ad0c78d6 --- /dev/null +++ b/clang/test/CIR/CodeGen/aarch64-neon-simd-shift.c @@ -0,0 +1,69 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -ffreestanding -emit-cir -target-feature +neon %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -ffreestanding -emit-llvm -target-feature +neon %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// REQUIRES: aarch64-registered-target || arm-registered-target +#include + +uint8x8_t test_vqrshrun_n_s16(int16x8_t a) { + return vqrshrun_n_s16(a, 3); +} + +// CIR-LABEL: test_vqrshrun_n_s16 +// CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : +// CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + +// LLVM: {{.*}}test_vqrshrun_n_s16(<8 x i16>{{.*}} [[A:%.*]]) +// LLVM: store <8 x i16> [[A]], ptr [[A_ADDR:%.*]], align 16 +// LLVM: [[A_VAL:%.*]] = load <8 x i16>, ptr [[A_ADDR]], align 16 +// LLVM: store <8 x i16> [[A_VAL]], ptr [[S0:%.*]], align 16 +// LLVM: [[S0_VAL:%.*]] = load <8 x i16>, ptr [[S0]], align 16 +// LLVM: [[S0_VAL_CAST:%.*]] = bitcast <8 x i16> [[S0_VAL]] to <16 x i8> +// LLVM: [[INTRN_ARG:%.*]] = bitcast <16 x i8> [[S0_VAL_CAST]] to <8 x i16> +// LLVM: {{%.*}} = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> [[INTRN_ARG]], i32 3) +// LLVM: ret <8 x i8> {{%.*}} + +uint16x4_t test_vqrshrun_n_s32(int32x4_t a) { + return vqrshrun_n_s32(a, 7); +} + +// CIR-LABEL: test_vqrshrun_n_s32 +// CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : +// CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + +// LLVM: {{.*}}test_vqrshrun_n_s32(<4 x i32>{{.*}} [[A:%.*]]) +// LLVM: store <4 x i32> [[A]], ptr [[A_ADDR:%.*]], align 16 +// LLVM: [[A_VAL:%.*]] = load <4 x i32>, ptr [[A_ADDR]], align 16 +// LLVM: store <4 x i32> [[A_VAL]], ptr [[S0:%.*]], align 16 +// LLVM: [[S0_VAL:%.*]] = load <4 x i32>, ptr [[S0]], align 16 +// LLVM: [[S0_VAL_CAST:%.*]] = bitcast <4 x i32> [[S0_VAL]] to <16 x i8> +// LLVM: [[INTRN_ARG:%.*]] = bitcast <16 x i8> [[S0_VAL_CAST]] to <4 x i32> +// LLVM: {{%.*}} = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> [[INTRN_ARG]], i32 7) +// LLVM: ret <4 x i16> {{%.*}} + +uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { + return vqrshrun_n_s64(a, 15); +} + +// CIR-LABEL: test_vqrshrun_n_s64 +// CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<15> : !s32i +// CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : +// CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + +// LLVM: {{.*}}test_vqrshrun_n_s64(<2 x i64>{{.*}} [[A:%.*]]) +// LLVM: store <2 x i64> [[A]], ptr [[A_ADDR:%.*]], align 16 +// LLVM: [[A_VAL:%.*]] = load <2 x i64>, ptr [[A_ADDR]], align 16 +// LLVM: store <2 x i64> [[A_VAL]], ptr [[S0:%.*]], align 16 +// LLVM: [[S0_VAL:%.*]] = load <2 x i64>, ptr [[S0]], align 16 +// LLVM: [[S0_VAL_CAST:%.*]] = bitcast <2 x i64> [[S0_VAL]] to <16 x i8> +// LLVM: [[INTRN_ARG:%.*]] = bitcast <16 x i8> [[S0_VAL_CAST]] to <2 x i64> +// LLVM: {{%.*}} = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> [[INTRN_ARG]], i32 15) +// LLVM: ret <2 x i32> {{%.*}} From 8e0f14d4f77f3ca3b30f02191c2208a73578d600 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 27 Sep 2024 19:02:25 -0700 Subject: [PATCH 1898/2301] [CIR][NFC] Silence unused warning --- clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index d05219cf02b9..e9cfdc1ebbed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2208,7 +2208,6 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( // Determine the type of this overloaded NEON intrinsic. NeonTypeFlags neonType(neonTypeConst->getZExtValue()); bool isUnsigned = neonType.isUnsigned(); - bool isQuad = neonType.isQuad(); const bool hasLegalHalfType = getTarget().hasLegalHalfType(); // The value of allowBFloatArgsAndRet is true for AArch64, but it should // come from ABI info. From f5c5f92cba580d8081f711609c2ccd03b8107a0d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 30 Sep 2024 14:23:19 -0700 Subject: [PATCH 1899/2301] Revert "[CIR][ABI] Apply CC lowering pass by default (#842)" Fix https://github.com/llvm/clangir/issues/895 and it's also missing some more throughout behavior for the pass, it also needs to be enabled by default when emitting object files. This reverts commit db6b7c07c076cb738d0acae248d7c3c199b2b952. --- clang/include/clang/CIR/Dialect/Passes.h | 2 +- clang/include/clang/CIR/MissingFeatures.h | 41 -------- clang/include/clang/Driver/Options.td | 11 +-- .../include/clang/Frontend/FrontendOptions.h | 2 +- clang/lib/CIR/CodeGen/CIRPasses.cpp | 11 ++- .../Dialect/Transforms/CallConvLowering.cpp | 8 +- .../Transforms/TargetLowering/ABIInfo.cpp | 2 +- .../Transforms/TargetLowering/ABIInfoImpl.cpp | 7 +- .../TargetLowering/CIRLowerContext.cpp | 17 ++-- .../TargetLowering/CIRRecordLayout.cpp | 6 +- .../TargetLowering/CIRToCIRArgMapping.h | 10 +- .../TargetLowering/ItaniumCXXABI.cpp | 6 +- .../Transforms/TargetLowering/LowerCall.cpp | 36 +++---- .../TargetLowering/LowerFunction.cpp | 95 ++++++++----------- .../TargetLowering/LowerFunctionInfo.h | 9 +- .../Transforms/TargetLowering/LowerModule.cpp | 13 +-- .../Transforms/TargetLowering/LowerModule.h | 2 +- .../Transforms/TargetLowering/LowerTypes.cpp | 14 ++- .../TargetLowering/RecordLayoutBuilder.cpp | 57 ++++++----- .../TargetLowering/Targets/AArch64.cpp | 8 +- .../Targets/LoweringPrepareAArch64CXXABI.cpp | 52 +++++----- .../Targets/LoweringPrepareItaniumCXXABI.cpp | 12 +-- .../TargetLowering/Targets/SPIR.cpp | 2 +- .../Transforms/TargetLowering/Targets/X86.cpp | 36 +++---- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 6 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +- clang/lib/Frontend/CompilerInvocation.cpp | 2 +- clang/test/CIR/CodeGen/global-new.cpp | 2 +- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 2 +- clang/test/CIR/Lowering/try-catch.cpp | 2 +- .../aarch64-call-conv-lowering-pass.cpp | 2 +- .../x86_64/x86_64-call-conv-lowering-pass.cpp | 2 +- 32 files changed, 197 insertions(+), 282 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index c9b936ca98fb..67e9da2246b6 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -42,7 +42,7 @@ std::unique_ptr createGotoSolverPass(); /// Create a pass to lower ABI-independent function definitions/calls. std::unique_ptr createCallConvLoweringPass(); -void populateCIRPreLoweringPasses(mlir::OpPassManager &pm, bool useCCLowering); +void populateCIRPreLoweringPasses(mlir::OpPassManager &pm); //===----------------------------------------------------------------------===// // Registration diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index e60ae97b8570..3540300d622c 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -15,27 +15,6 @@ #ifndef CLANG_CIR_MISSINGFEATURES_H #define CLANG_CIR_MISSINGFEATURES_H -constexpr bool cirMissingFeatureAssertionMode = - true; // Change to `false` to use llvm_unreachable - -#define NOTE \ - " Target lowering is now required. Disable it with " \ - "-fno-clangir-call-conv-lowering." - -// Special assertion to be used in the target lowering library. -#define cir_tl_assert(cond) assert((cond) && NOTE); - -// Some assertions knowingly generate incorrect code. This macro allows us to -// switch between using `assert` and `llvm_unreachable` for these cases. -#define cir_assert_or_abort(cond, msg) \ - do { \ - if (cirMissingFeatureAssertionMode) { \ - assert((cond) && msg NOTE); \ - } else { \ - llvm_unreachable(msg NOTE); \ - } \ - } while (0) - namespace cir { struct MissingFeatures { @@ -233,26 +212,6 @@ struct MissingFeatures { //===--- ABI lowering --===// - static bool SPIRVABI() { return false; } - - static bool AArch64TypeClassification() { return false; } - - static bool X86ArgTypeClassification() { return false; } - static bool X86DefaultABITypeConvertion() { return false; } - static bool X86GetFPTypeAtOffset() { return false; } - static bool X86RetTypeClassification() { return false; } - static bool X86TypeClassification() { return false; } - - static bool ABIClangTypeKind() { return false; } - static bool ABIEnterStructForCoercedAccess() { return false; } - static bool ABIFuncPtr() { return false; } - static bool ABIInRegAttribute() { return false; } - static bool ABINestedRecordLayout() { return false; } - static bool ABINoProtoFunctions() { return false; } - static bool ABIParameterCoercion() { return false; } - static bool ABIPointerParameterAttrs() { return false; } - static bool ABITransparentUnionHandling() { return false; } - //-- Missing AST queries static bool CXXRecordDeclIsEmptyCXX11() { return false; } diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index ff05be57a99f..e5010ef3a066 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3079,6 +3079,10 @@ def fclangir_lib_opt : Flag<["-"], "fclangir-lib-opt">, Visibility<[ClangOption, CC1Option]>, Group, Alias, HelpText<"Enable C/C++ library based optimizations">; +def fclangir_call_conv_lowering : Flag<["-"], "fclangir-call-conv-lowering">, + Visibility<[ClangOption, CC1Option]>, Group, + HelpText<"Enable ClangIR calling convention lowering">, + MarshallingInfoFlag>; def fclangir_mem2reg : Flag<["-"], "fclangir-mem2reg">, Visibility<[ClangOption, CC1Option]>, Group, HelpText<"Enable mem2reg on the flat ClangIR">, @@ -3109,13 +3113,6 @@ defm clangir_analysis_only : BoolFOption<"clangir-analysis-only", PosFlag, NegFlag>; -// FIXME(cir): Remove this option once all pre-existing tests are compatible with -// the calling convention lowering pass. -defm clangir_call_conv_lowering : BoolFOption<"clangir-call-conv-lowering", - FrontendOpts<"ClangIRCallConvLowering">, DefaultTrue, - PosFlag, - NegFlag, - BothFlags<[], [ClangOption, CC1Option], "">>; def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, Group, HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 64664f41c879..b9e4d09df222 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -449,7 +449,7 @@ class FrontendOptions { unsigned ClangIRLibOpt : 1; // Enable Clang IR call conv lowering pass. - unsigned ClangIRCallConvLowering : 1; + unsigned ClangIREnableCallConvLowering : 1; // Enable Clang IR mem2reg pass on the flat CIR. unsigned ClangIREnableMem2Reg : 1; diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index d56a7cc61e52..4f89daa1cee4 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -71,8 +71,13 @@ mlir::LogicalResult runCIRToCIRPasses( pm.addPass(mlir::createLoweringPreparePass(&astCtx)); + // FIXME(cir): This pass should run by default, but it is lacking support for + // several code bits. Once it's more mature, we should fix this. + if (enableCallConvLowering) + pm.addPass(mlir::createCallConvLoweringPass()); + if (flattenCIR || enableMem2Reg) - mlir::populateCIRPreLoweringPasses(pm, enableCallConvLowering); + mlir::populateCIRPreLoweringPasses(pm); if (enableMem2Reg) pm.addPass(mlir::createMem2Reg()); @@ -92,9 +97,7 @@ mlir::LogicalResult runCIRToCIRPasses( namespace mlir { -void populateCIRPreLoweringPasses(OpPassManager &pm, bool useCCLowering) { - if (useCCLowering) - pm.addPass(createCallConvLoweringPass()); +void populateCIRPreLoweringPasses(OpPassManager &pm) { pm.addPass(createFlattenCFGPass()); pm.addPass(createGotoSolverPass()); } diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index f7e4410010ec..3a4b9b397c5b 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// + #include "TargetLowering/LowerModule.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/IR/BuiltinOps.h" @@ -13,7 +14,6 @@ #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "clang/CIR/MissingFeatures.h" #define GEN_PASS_DEF_CALLCONVLOWERING #include "clang/CIR/Dialect/Passes.h.inc" @@ -44,12 +44,6 @@ struct CallConvLoweringPattern : public OpRewritePattern { auto calls = op.getSymbolUses(module); if (calls.has_value()) { for (auto call : calls.value()) { - // FIXME(cir): Function pointers are ignored. - if (isa(call.getUser())) { - cir_assert_or_abort(!::cir::MissingFeatures::ABIFuncPtr(), "NYI"); - continue; - } - auto callOp = cast(call.getUser()); if (lowerModule->rewriteFunctionCall(callOp, op).failed()) return failure(); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index 6cb69c7eeb88..4e2a81de9fc1 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -37,7 +37,7 @@ bool ABIInfo::isPromotableIntegerTypeForABI(Type Ty) const { if (getContext().isPromotableIntegerType(Ty)) return true; - cir_tl_assert(!::cir::MissingFeatures::fixedWidthIntegers()); + assert(!::cir::MissingFeatures::fixedWidthIntegers()); return false; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index 38f9fb8ffaa4..041c801dbe2e 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -26,22 +26,21 @@ bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, Type Ty = FI.getReturnType(); if (const auto RT = dyn_cast(Ty)) { - cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl()); + assert(!::cir::MissingFeatures::isCXXRecordDecl()); } return CXXABI.classifyReturnType(FI); } bool isAggregateTypeForABI(Type T) { - cir_tl_assert(!::cir::MissingFeatures::functionMemberPointerType()); + assert(!::cir::MissingFeatures::functionMemberPointerType()); return !LowerFunction::hasScalarEvaluationKind(T); } Type useFirstFieldIfTransparentUnion(Type Ty) { if (auto RT = dyn_cast(Ty)) { if (RT.isUnion()) - cir_assert_or_abort( - !::cir::MissingFeatures::ABITransparentUnionHandling(), "NYI"); + llvm_unreachable("NYI"); } return Ty; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index ecca0db0deb9..42aae0a80d04 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -55,10 +55,7 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { } else if (isa(T)) { typeKind = clang::Type::Record; } else { - cir_assert_or_abort(!::cir::MissingFeatures::ABIClangTypeKind(), - "Unhandled type class"); - // FIXME(cir): Completely wrong. Just here to make it non-blocking. - typeKind = clang::Type::Builtin; + llvm_unreachable("Unhandled type class"); } // FIXME(cir): Here we fetch the width and alignment of a type considering the @@ -99,10 +96,10 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { } case clang::Type::Record: { const auto RT = dyn_cast(T); - cir_tl_assert(!::cir::MissingFeatures::tagTypeClassAbstraction()); + assert(!::cir::MissingFeatures::tagTypeClassAbstraction()); // Only handle TagTypes (names types) for now. - cir_tl_assert(RT.getName() && "Anonymous record is NYI"); + assert(RT.getName() && "Anonymous record is NYI"); // NOTE(cir): Clang does some hanlding of invalid tagged declarations here. // Not sure if this is necessary in CIR. @@ -114,14 +111,14 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { const CIRRecordLayout &Layout = getCIRRecordLayout(RT); Width = toBits(Layout.getSize()); Align = toBits(Layout.getAlignment()); - cir_tl_assert(!::cir::MissingFeatures::recordDeclHasAlignmentAttr()); + assert(!::cir::MissingFeatures::recordDeclHasAlignmentAttr()); break; } default: llvm_unreachable("Unhandled type class"); } - cir_tl_assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); + assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); return clang::TypeInfo(Width, Align, AlignRequirement); } @@ -129,7 +126,7 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { Type Ty; // NOTE(cir): Clang does more stuff here. Not sure if we need to do the same. - cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); + assert(!::cir::MissingFeatures::qualifiedTypes()); switch (K) { case clang::BuiltinType::Char_S: Ty = IntType::get(getMLIRContext(), 8, true); @@ -144,7 +141,7 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { void CIRLowerContext::initBuiltinTypes(const clang::TargetInfo &Target, const clang::TargetInfo *AuxTarget) { - cir_tl_assert((!this->Target || this->Target == &Target) && + assert((!this->Target || this->Target == &Target) && "Incorrect target reinitialization"); this->Target = &Target; this->AuxTarget = AuxTarget; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp index bd964d654267..2744f67d19de 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp @@ -38,16 +38,16 @@ CIRRecordLayout::CIRRecordLayout( FieldOffsets.insert(FieldOffsets.end(), fieldoffsets.begin(), fieldoffsets.end()); - cir_tl_assert(!PrimaryBase && "Layout for class with inheritance is NYI"); + assert(!PrimaryBase && "Layout for class with inheritance is NYI"); // CXXInfo->PrimaryBase.setPointer(PrimaryBase); - cir_tl_assert(!IsPrimaryBaseVirtual && "Layout for virtual base class is NYI"); + assert(!IsPrimaryBaseVirtual && "Layout for virtual base class is NYI"); // CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual); CXXInfo->NonVirtualSize = nonvirtualsize; CXXInfo->NonVirtualAlignment = nonvirtualalignment; CXXInfo->PreferredNVAlignment = preferrednvalignment; CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject; // FIXME(cir): Initialize base classes offsets. - cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); + assert(!::cir::MissingFeatures::getCXXRecordBases()); CXXInfo->HasOwnVFPtr = hasOwnVFPtr; CXXInfo->VBPtrOffset = vbptroffset; CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index 664fd05ea658..dd09122b94d9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -58,7 +58,7 @@ class CIRToCIRArgMapping { unsigned totalIRArgs() const { return TotalIRArgs; } bool hasPaddingArg(unsigned ArgNo) const { - cir_tl_assert(ArgNo < ArgInfo.size()); + assert(ArgNo < ArgInfo.size()); return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; } @@ -77,7 +77,7 @@ class CIRToCIRArgMapping { onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); for (LowerFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; ++I, ++ArgNo) { - cir_tl_assert(I != FI.arg_end()); + assert(I != FI.arg_end()); // Type ArgType = I->type; const ::cir::ABIArgInfo &AI = I->info; // Collect data about IR arguments corresponding to Clang argument ArgNo. @@ -91,7 +91,7 @@ class CIRToCIRArgMapping { case ::cir::ABIArgInfo::Extend: case ::cir::ABIArgInfo::Direct: { // FIXME(cir): handle sseregparm someday... - cir_tl_assert(AI.getCoerceToType() && "Missing coerced type!!"); + assert(AI.getCoerceToType() && "Missing coerced type!!"); StructType STy = dyn_cast(AI.getCoerceToType()); if (AI.isDirect() && AI.getCanBeFlattened() && STy) { llvm_unreachable("NYI"); @@ -114,7 +114,7 @@ class CIRToCIRArgMapping { if (IRArgNo == 1 && SwapThisWithSRet) IRArgNo++; } - cir_tl_assert(ArgNo == ArgInfo.size()); + assert(ArgNo == ArgInfo.size()); if (::cir::MissingFeatures::inallocaArgs()) { llvm_unreachable("NYI"); @@ -126,7 +126,7 @@ class CIRToCIRArgMapping { /// Returns index of first IR argument corresponding to ArgNo, and their /// quantity. std::pair getIRArgs(unsigned ArgNo) const { - cir_tl_assert(ArgNo < ArgInfo.size()); + assert(ArgNo < ArgInfo.size()); return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, ArgInfo[ArgNo].NumberOfArgs); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index 3cd27c35cf55..c0add1ecc1df 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -46,9 +46,9 @@ class ItaniumCXXABI : public CIRCXXABI { // FIXME(cir): This expects a CXXRecordDecl! Not any record type. RecordArgABI getRecordArgABI(const StructType RD) const override { - cir_tl_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); // If C++ prohibits us from making a copy, pass by address. - cir_tl_assert(!::cir::MissingFeatures::recordDeclCanPassInRegisters()); + assert(!::cir::MissingFeatures::recordDeclCanPassInRegisters()); return RAA_Default; } }; @@ -76,7 +76,7 @@ CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { case clang::TargetCXXABI::AppleARM64: // TODO: this isn't quite right, clang uses AppleARM64CXXABI which inherits // from ARMCXXABI. We'll have to follow suit. - cir_tl_assert(!::cir::MissingFeatures::appleArm64CXXABI()); + assert(!::cir::MissingFeatures::appleArm64CXXABI()); return new ItaniumCXXABI(LM, /*UseARMMethodPtrABI=*/true, /*UseARMGuardVarABI=*/true); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index af036efef8cc..42de07ec6965 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -23,9 +23,9 @@ const LowerFunctionInfo & arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, const OperandRange &args, const FuncType fnType, unsigned numExtraRequiredArgs, bool chainCall) { - cir_tl_assert(args.size() >= numExtraRequiredArgs); + assert(args.size() >= numExtraRequiredArgs); - cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); + assert(!::cir::MissingFeatures::extParamInfo()); // In most cases, there are no optional arguments. RequiredArgs required = RequiredArgs::All; @@ -35,7 +35,7 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, // FIXME(cir): Properly check if function is no-proto. if (/*IsPrototypedFunction=*/true) { if (fnType.isVarArg()) - cir_assert_or_abort(!::cir::MissingFeatures::isVarArg(), "NYI"); + llvm_unreachable("NYI"); if (::cir::MissingFeatures::extParamInfo()) llvm_unreachable("NYI"); @@ -45,7 +45,7 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, // its skipped here since it requires CodeGen info. Maybe this information // could be embbed in the FuncOp during CIRGen. - cir_tl_assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); + assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; return LT.arrangeLLVMFunctionInfo(fnType.getReturnType(), opts, fnType.getInputs(), required); @@ -60,7 +60,7 @@ static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { return; } - cir_tl_assert(MissingFeatures::extParamInfo()); + assert(MissingFeatures::extParamInfo()); llvm_unreachable("NYI"); } @@ -74,11 +74,11 @@ static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { static const LowerFunctionInfo & arrangeCIRFunctionInfo(LowerTypes &CGT, bool instanceMethod, SmallVectorImpl &prefix, FuncType fnTy) { - cir_tl_assert(!MissingFeatures::extParamInfo()); + assert(!MissingFeatures::extParamInfo()); RequiredArgs Required = RequiredArgs::forPrototypePlus(fnTy, prefix.size()); // FIXME: Kill copy. appendParameterTypes(prefix, fnTy); - cir_tl_assert(!MissingFeatures::qualifiedTypes()); + assert(!MissingFeatures::qualifiedTypes()); Type resultType = fnTy.getReturnType(); FnInfoOpts opts = @@ -110,7 +110,7 @@ void LowerModule::constructAttributeList(StringRef Name, // TODO(cir): Implement AddAttributesFromFunctionProtoType here. // TODO(cir): Implement AddAttributesFromOMPAssumes here. - cir_tl_assert(!MissingFeatures::openMP()); + assert(!MissingFeatures::openMP()); // TODO(cir): Skipping a bunch of AST queries here. We will need to partially // implement some of them as this section sets target-specific attributes @@ -147,8 +147,8 @@ void LowerModule::constructAttributeList(StringRef Name, [[fallthrough]]; case ABIArgInfo::Direct: if (RetAI.getInReg()) - cir_assert_or_abort(!::cir::MissingFeatures::ABIInRegAttribute(), "NYI"); - cir_tl_assert(!::cir::MissingFeatures::noFPClass()); + llvm_unreachable("InReg attribute is NYI"); + assert(!::cir::MissingFeatures::noFPClass()); break; case ABIArgInfo::Ignore: break; @@ -216,7 +216,7 @@ void LowerModule::constructAttributeList(StringRef Name, else if (AI.getInReg()) llvm_unreachable("InReg attribute is NYI"); // Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); - cir_tl_assert(!::cir::MissingFeatures::noFPClass()); + assert(!::cir::MissingFeatures::noFPClass()); break; default: llvm_unreachable("Missing ABIArgInfo::Kind"); @@ -227,7 +227,7 @@ void LowerModule::constructAttributeList(StringRef Name, } // TODO(cir): Missing some swift and nocapture stuff here. - cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); + assert(!::cir::MissingFeatures::extParamInfo()); if (!Attrs.empty()) { unsigned FirstIRArg, NumIRArgs; @@ -236,7 +236,7 @@ void LowerModule::constructAttributeList(StringRef Name, newFn.setArgAttrs(FirstIRArg + i, Attrs); } } - cir_tl_assert(ArgNo == FI.arg_size()); + assert(ArgNo == FI.arg_size()); } /// Arrange the argument and result information for the declaration or @@ -245,15 +245,15 @@ const LowerFunctionInfo &LowerTypes::arrangeFunctionDeclaration(FuncOp fnOp) { if (MissingFeatures::funcDeclIsCXXMethodDecl()) llvm_unreachable("NYI"); - cir_tl_assert(!MissingFeatures::qualifiedTypes()); + assert(!MissingFeatures::qualifiedTypes()); FuncType FTy = fnOp.getFunctionType(); - cir_tl_assert(!MissingFeatures::CUDA()); + assert(!MissingFeatures::CUDA()); // When declaring a function without a prototype, always use a // non-variadic type. if (fnOp.getNoProto()) { - cir_assert_or_abort(!::cir::MissingFeatures::ABINoProtoFunctions(), "NYI"); + llvm_unreachable("NYI"); } return arrangeFreeFunctionType(FTy); @@ -300,12 +300,12 @@ const LowerFunctionInfo & LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, ArrayRef argTypes, RequiredArgs required) { - cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); + assert(!::cir::MissingFeatures::qualifiedTypes()); LowerFunctionInfo *FI = nullptr; // FIXME(cir): Allow user-defined CCs (e.g. __attribute__((vectorcall))). - cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); + assert(!::cir::MissingFeatures::extParamInfo()); unsigned CC = clangCallConvToLLVMCallConv(clang::CallingConv::CC_C); // Construct the function info. We co-allocate the ArgInfos. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 749c91b9d8d1..9e90c44a7d76 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -63,10 +63,7 @@ Value enterStructPointerForCoercedAccess(Value SrcPtr, StructType SrcSTy, FirstEltSize < CGF.LM.getDataLayout().getTypeStoreSize(SrcSTy)) return SrcPtr; - cir_assert_or_abort(!::cir::MissingFeatures::ABIEnterStructForCoercedAccess(), - "NYI"); - return SrcPtr; // FIXME: This is a temporary workaround for the assertion - // above. + llvm_unreachable("NYI"); } /// Create a store to \param Dst from \param Src where the source and @@ -83,13 +80,13 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, } // FIXME(cir): We need a better way to handle datalayout queries. - cir_tl_assert(isa(SrcTy)); + assert(isa(SrcTy)); llvm::TypeSize SrcSize = CGF.LM.getDataLayout().getTypeAllocSize(SrcTy); if (StructType DstSTy = dyn_cast(DstTy)) { Dst = enterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize.getFixedValue(), CGF); - cir_tl_assert(isa(Dst.getType())); + assert(isa(Dst.getType())); DstTy = cast(Dst.getType()).getPointee(); } @@ -110,7 +107,7 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, llvm::TypeSize DstSize = CGF.LM.getDataLayout().getTypeAllocSize(DstTy); // If store is legal, just bitcast the src pointer. - cir_tl_assert(!::cir::MissingFeatures::vectorType()); + assert(!::cir::MissingFeatures::vectorType()); if (SrcSize.getFixedValue() <= DstSize.getFixedValue()) { // Dst = Dst.withElementType(SrcTy); CGF.buildAggregateStore(Src, Dst, DstIsVolatile); @@ -260,14 +257,14 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // are dealt with in CIRGen. CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), FI); - cir_tl_assert(Fn.getNumArguments() == IRFunctionArgs.totalIRArgs()); + assert(Fn.getNumArguments() == IRFunctionArgs.totalIRArgs()); // If we're using inalloca, all the memory arguments are GEPs off of the last // parameter, which is a pointer to the complete memory area. - cir_tl_assert(!::cir::MissingFeatures::inallocaArgs()); + assert(!::cir::MissingFeatures::inallocaArgs()); // Name the struct return parameter. - cir_tl_assert(!::cir::MissingFeatures::sretArgs()); + assert(!::cir::MissingFeatures::sretArgs()); // Track if we received the parameter as a pointer (indirect, byval, or // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it @@ -275,18 +272,11 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, SmallVector ArgVals; ArgVals.reserve(Args.size()); - // FIXME(cir): non-blocking workaround for argument types that are not yet - // properly handled by the ABI. - if (cirMissingFeatureAssertionMode && FI.arg_size() != Args.size()) { - cir_tl_assert(::cir::MissingFeatures::ABIParameterCoercion()); - return success(); - } - // Create a pointer value for every parameter declaration. This usually // entails copying one or more LLVM IR arguments into an alloca. Don't push // any cleanups or do anything that might unwind. We do that separately, so // we can push the cleanups in the correct order for the ABI. - cir_tl_assert(FI.arg_size() == Args.size()); + assert(FI.arg_size() == Args.size()); unsigned ArgNo = 0; LowerFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); for (MutableArrayRef::const_iterator i = Args.begin(), @@ -304,7 +294,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, llvm_unreachable("NYI"); else Ty = Arg.getType(); - cir_tl_assert(!::cir::MissingFeatures::evaluationKind()); + assert(!::cir::MissingFeatures::evaluationKind()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -320,15 +310,14 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // http://llvm.org/docs/LangRef.html#paramattrs. if (ArgI.getDirectOffset() == 0 && isa(LTy) && isa(ArgI.getCoerceToType())) { - cir_assert_or_abort(!::cir::MissingFeatures::ABIPointerParameterAttrs(), - "NYI"); + llvm_unreachable("NYI"); } // Prepare the argument value. If we have the trivial case, handle it // with no muss and fuss. if (!isa(ArgI.getCoerceToType()) && ArgI.getCoerceToType() == Ty && ArgI.getDirectOffset() == 0) { - cir_tl_assert(NumIRArgs == 1); + assert(NumIRArgs == 1); // LLVM expects swifterror parameters to be used in very restricted // ways. Copy the value into a less-restricted temporary. @@ -355,7 +344,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, break; } - cir_tl_assert(!::cir::MissingFeatures::vectorType()); + assert(!::cir::MissingFeatures::vectorType()); // Allocate original argument to be "uncoerced". // FIXME(cir): We should have a alloca op builder that does not required @@ -377,7 +366,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, llvm_unreachable("NYI"); } else { // Simple case, just do a coerced store of the argument into the alloca. - cir_tl_assert(NumIRArgs == 1); + assert(NumIRArgs == 1); Value AI = Fn.getArgument(FirstIRArg); // TODO(cir): Set argument name in the new function. createCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); @@ -396,7 +385,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // RAUW the original argument alloca with the new one. This assumes that // the argument is used only to be stored in a alloca. Value arg = SrcFn.getArgument(ArgNo); - cir_tl_assert(arg.hasOneUse()); + assert(arg.hasOneUse()); auto *firstStore = *arg.user_begin(); auto argAlloca = cast(firstStore).getAddr(); rewriter.replaceAllUsesWith(argAlloca, Alloca); @@ -482,33 +471,28 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { /// focuses on the ABI-specific details. So a lot of codegen stuff is removed. LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, const LowerFunctionInfo &FnInfo) { - cir_tl_assert(newFn && "generating code for null Function"); + assert(newFn && "generating code for null Function"); auto Args = oldFn.getArguments(); // Emit the ABI-specific function prologue. - cir_tl_assert(newFn.empty() && "Function already has a body"); + assert(newFn.empty() && "Function already has a body"); rewriter.setInsertionPointToEnd(newFn.addEntryBlock()); if (buildFunctionProlog(FnInfo, newFn, oldFn.getArguments()).failed()) return failure(); // Ensure that old ABI-agnostic arguments uses were replaced. const auto hasNoUses = [](Value val) { return val.getUses().empty(); }; - cir_tl_assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && "Missing RAUW?"); - - // NOTE(cir): While the new function has the ABI-aware parameters, the old - // function still has the function logic. To complete the migration, we have - // to move the old function body to the new function. - - // Backup references to entry blocks. - Block *srcBlock = &oldFn.getBody().front(); - Block *dstBlock = &newFn.getBody().front(); + assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && "Missing RAUW?"); // Migrate function body to new ABI-aware function. - rewriter.inlineRegionBefore(oldFn.getBody(), newFn.getBody(), - newFn.getBody().end()); + assert(oldFn.getBody().hasOneBlock() && + "Multiple blocks in original function not supported"); - // Merge entry blocks to ensure correct branching. - rewriter.mergeBlocks(srcBlock, dstBlock, newFn.getArguments()); + // Move old function body to new function. + // FIXME(cir): The merge below is not very good: will not work if SrcFn has + // multiple blocks and it mixes the new and old prologues. + rewriter.mergeBlocks(&oldFn.getBody().front(), &newFn.getBody().front(), + newFn.getArguments()); // FIXME(cir): What about saving parameters for corotines? Should we do // something about it in this pass? If the change with the calling @@ -527,14 +511,14 @@ void LowerFunction::buildAggregateStore(Value Val, Value Dest, // Function to store a first-class aggregate into memory. We prefer to // store the elements rather than the aggregate to be more friendly to // fast-isel. - cir_tl_assert(mlir::isa(Dest.getType()) && "Storing in a non-pointer!"); + assert(mlir::isa(Dest.getType()) && "Storing in a non-pointer!"); (void)DestIsVolatile; // Circumvent CIR's type checking. Type pointeeTy = mlir::cast(Dest.getType()).getPointee(); if (Val.getType() != pointeeTy) { // NOTE(cir): We only bitcast and store if the types have the same size. - cir_tl_assert((LM.getDataLayout().getTypeSizeInBits(Val.getType()) == + assert((LM.getDataLayout().getTypeSizeInBits(Val.getType()) == LM.getDataLayout().getTypeSizeInBits(pointeeTy)) && "Incompatible types"); auto loc = Val.getLoc(); @@ -568,7 +552,7 @@ LogicalResult LowerFunction::rewriteCallOp(CallOp op, // NOTE(cir): There is no direct way to fetch the function type from the // CallOp, so we fetch it from the source function. This assumes the // function definition has not yet been lowered. - cir_tl_assert(SrcFn && "No source function"); + assert(SrcFn && "No source function"); auto fnType = SrcFn.getFunctionType(); // Rewrite the call operation to abide to the ABI calling convention. @@ -626,10 +610,10 @@ Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, // Chain calls use this same code path to add the invisible chain parameter // to the function type. if (origCallee.getNoProto() || Chain) { - cir_assert_or_abort(::cir::MissingFeatures::ABINoProtoFunctions(), "NYI"); + llvm_unreachable("NYI"); } - cir_tl_assert(!::cir::MissingFeatures::CUDA()); + assert(!::cir::MissingFeatures::CUDA()); // TODO(cir): LLVM IR has the concept of "CallBase", which is a base class // for all types of calls. Perhaps we should have a CIR interface to mimic @@ -681,12 +665,12 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, llvm_unreachable("NYI"); } - cir_tl_assert(!::cir::MissingFeatures::swift()); + assert(!::cir::MissingFeatures::swift()); // NOTE(cir): Skipping lifetime markers here. // Translate all of the arguments as necessary to match the IR lowering. - cir_tl_assert(CallInfo.arg_size() == CallArgs.size() && + assert(CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."); unsigned ArgNo = 0; LowerFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); @@ -712,7 +696,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!isa(ArgInfo.getCoerceToType()) && ArgInfo.getCoerceToType() == info_it->type && ArgInfo.getDirectOffset() == 0) { - cir_tl_assert(NumIRArgs == 1); + assert(NumIRArgs == 1); Value V; if (!isa(I->getType())) { V = *I; @@ -758,11 +742,11 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, llvm_unreachable("NYI"); } else { // In the simple case, just pass the coerced loaded value. - cir_tl_assert(NumIRArgs == 1); + assert(NumIRArgs == 1); Value Load = createCoercedValue(Src, ArgInfo.getCoerceToType(), *this); // FIXME(cir): We should probably handle CMSE non-secure calls here - cir_tl_assert(!::cir::MissingFeatures::cmseNonSecureCallAttr()); + assert(!::cir::MissingFeatures::cmseNonSecureCallAttr()); // since they are a ARM-specific feature. if (::cir::MissingFeatures::undef()) @@ -787,7 +771,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // debugging stuff here. // Update the largest vector width if any arguments have vector types. - cir_tl_assert(!::cir::MissingFeatures::vectorType()); + assert(!::cir::MissingFeatures::vectorType()); // Compute the calling convention and attributes. @@ -813,7 +797,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, rewriter.getAttr(rewriter.getDictionaryAttr({})); newCallOp->setAttr("extra_attrs", extraAttrs); - cir_tl_assert(!::cir::MissingFeatures::vectorType()); + assert(!::cir::MissingFeatures::vectorType()); // NOTE(cir): Skipping some ObjC, tail-call, debug, and attribute stuff // here. @@ -863,7 +847,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // FIXME(cir): Use return value slot here. Value RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. - cir_tl_assert(!::cir::MissingFeatures::volatileTypes()); + assert(!::cir::MissingFeatures::volatileTypes()); // NOTE(cir): If the function returns, there should always be a valid // return value present. Instead of setting the return value here, we @@ -871,7 +855,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!RetVal) { RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. - cir_tl_assert(::cir::MissingFeatures::volatileTypes()); + assert(::cir::MissingFeatures::volatileTypes()); } // An empty record can overlap other data (if declared with @@ -913,8 +897,7 @@ ::cir::TypeEvaluationKind LowerFunction::getEvaluationKind(Type type) { // FIXME(cir): Implement type classes for CIR types. if (isa(type)) return ::cir::TypeEvaluationKind::TEK_Aggregate; - if (isa(type)) + if (isa(type)) return ::cir::TypeEvaluationKind::TEK_Scalar; llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index 0c30c955a6c1..c81335c9985a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -35,7 +35,7 @@ class RequiredArgs { enum All_t { All }; RequiredArgs(All_t _) : NumRequired(~0U) {} - explicit RequiredArgs(unsigned n) : NumRequired(n) { cir_tl_assert(n != ~0U); } + explicit RequiredArgs(unsigned n) : NumRequired(n) { assert(n != ~0U); } /// Compute the arguments required by the given formal prototype, /// given that there may be some additional, non-formal arguments @@ -47,8 +47,7 @@ class RequiredArgs { if (!prototype.isVarArg()) return All; - cir_assert_or_abort(!::cir::MissingFeatures::variadicFunctions(), "NYI"); - return All; // FIXME(cir): Temporary workaround for the assertion above. + llvm_unreachable("Variadic function is NYI"); } bool allowsOptionalArgs() const { return NumRequired != ~0U; } @@ -106,7 +105,7 @@ class LowerFunctionInfo final ArrayRef argTypes, RequiredArgs required) { // TODO(cir): Add assertions? - cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); + assert(!::cir::MissingFeatures::extParamInfo()); void *buffer = operator new(totalSizeToAlloc(argTypes.size() + 1)); LowerFunctionInfo *FI = new (buffer) LowerFunctionInfo(); @@ -147,7 +146,7 @@ class LowerFunctionInfo final unsigned arg_size() const { return NumArgs; } bool isVariadic() const { - cir_tl_assert(!::cir::MissingFeatures::variadicFunctions()); + assert(!::cir::MissingFeatures::variadicFunctions()); return false; } unsigned getNumRequiredArgs() const { diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index d0d88fa52008..715a5f2470d7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -199,13 +199,10 @@ LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { llvm_unreachable("ExtraAttrs are NYI"); } - // Is a function definition: handle the body. - if (!op.isDeclaration()) { - if (LowerFunction(*this, rewriter, op, newFn) - .generateCode(op, newFn, FI) - .failed()) - return failure(); - } + if (LowerFunction(*this, rewriter, op, newFn) + .generateCode(op, newFn, FI) + .failed()) + return failure(); // Erase original ABI-agnostic function. rewriter.eraseOp(op); @@ -242,7 +239,7 @@ std::unique_ptr createLowerModule(ModuleOp module, // FIXME(cir): This just uses the default language options. We need to account // for custom options. // Create context. - cir_tl_assert(!::cir::MissingFeatures::langOpts()); + assert(!::cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; return std::make_unique(langOpts, module, dataLayoutStr, diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index a7f3e1fa187a..44cd5a0ae1cb 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -68,7 +68,7 @@ class LowerModule { // FIXME(cir): This would be in ASTContext, not CodeGenModule. clang::TargetCXXABI::Kind getCXXABIKind() const { auto kind = getTarget().getCXXABI().getKind(); - cir_tl_assert(!::cir::MissingFeatures::langOpts()); + assert(!::cir::MissingFeatures::langOpts()); return kind; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index fa1e34140167..bdec98a64f43 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -60,10 +60,10 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); // Add type for sret argument. - cir_tl_assert(!::cir::MissingFeatures::sretArgs()); + assert(!::cir::MissingFeatures::sretArgs()); // Add type for inalloca argument. - cir_tl_assert(!::cir::MissingFeatures::inallocaArgs()); + assert(!::cir::MissingFeatures::inallocaArgs()); // Add in all of the required arguments. unsigned ArgNo = 0; @@ -72,7 +72,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { for (; it != ie; ++it, ++ArgNo) { const ABIArgInfo &ArgInfo = it->info; - cir_tl_assert(!::cir::MissingFeatures::argumentPadding()); + assert(!::cir::MissingFeatures::argumentPadding()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -85,11 +85,11 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { Type argType = ArgInfo.getCoerceToType(); StructType st = dyn_cast(argType); if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { - cir_tl_assert(NumIRArgs == st.getNumElements()); + assert(NumIRArgs == st.getNumElements()); for (unsigned i = 0, e = st.getNumElements(); i != e; ++i) ArgTypes[FirstIRArg + i] = st.getMembers()[i]; } else { - cir_tl_assert(NumIRArgs == 1); + assert(NumIRArgs == 1); ArgTypes[FirstIRArg] = argType; } break; @@ -117,7 +117,5 @@ mlir::Type LowerTypes::convertType(Type T) { } llvm::outs() << "Missing default ABI-specific type for " << T << "\n"; - cir_assert_or_abort(!::cir::MissingFeatures::X86DefaultABITypeConvertion(), - "NYI"); - return T; + llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index 55484923f1b3..ea8ef6f28144 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -58,11 +58,11 @@ class EmptySubobjectMap { void EmptySubobjectMap::ComputeEmptySubobjectSizes() { // Check the bases. - cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); + assert(!::cir::MissingFeatures::getCXXRecordBases()); // Check the fields. for (const auto FT : Class.getMembers()) { - cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); + assert(!::cir::MissingFeatures::qualifiedTypes()); const auto RT = dyn_cast(FT); // We only care about record types. @@ -70,8 +70,7 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() { continue; // TODO(cir): Handle nested record types. - cir_assert_or_abort(!::cir::MissingFeatures::ABINestedRecordLayout(), - "NYI"); + llvm_unreachable("NYI"); } } @@ -207,7 +206,7 @@ class ItaniumRecordLayoutBuilder { bool isPacked, const Type Ty); clang::CharUnits getSize() const { - cir_tl_assert(Size % Context.getCharWidth() == 0); + assert(Size % Context.getCharWidth() == 0); return Context.toCharUnitsFromBits(Size); } uint64_t getSizeInBits() const { return Size; } @@ -216,7 +215,7 @@ class ItaniumRecordLayoutBuilder { void setSize(uint64_t NewSize) { Size = NewSize; } clang::CharUnits getDataSize() const { - cir_tl_assert(DataSize % Context.getCharWidth() == 0); + assert(DataSize % Context.getCharWidth() == 0); return Context.toCharUnitsFromBits(DataSize); } @@ -235,24 +234,24 @@ void ItaniumRecordLayoutBuilder::layout(const StructType RT) { initializeLayout(RT); // Lay out the vtable and the non-virtual bases. - cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl() && + assert(!::cir::MissingFeatures::isCXXRecordDecl() && !::cir::MissingFeatures::CXXRecordIsDynamicClass()); layoutFields(RT); // FIXME(cir): Handle virtual-related layouts. - cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); + assert(!::cir::MissingFeatures::getCXXRecordBases()); - cir_tl_assert(!::cir::MissingFeatures::itaniumRecordLayoutBuilderFinishLayout()); + assert(!::cir::MissingFeatures::itaniumRecordLayoutBuilderFinishLayout()); } void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { if (const auto RT = dyn_cast(Ty)) { IsUnion = RT.isUnion(); - cir_tl_assert(!::cir::MissingFeatures::recordDeclIsMSStruct()); + assert(!::cir::MissingFeatures::recordDeclIsMSStruct()); } - cir_tl_assert(!::cir::MissingFeatures::recordDeclIsPacked()); + assert(!::cir::MissingFeatures::recordDeclIsPacked()); // Honor the default struct packing maximum alignment flag. if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) { @@ -290,7 +289,7 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { void ItaniumRecordLayoutBuilder::layoutField(const Type D, bool InsertExtraPadding) { // auto FieldClass = D.dyn_cast(); - cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping() && + assert(!::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping() && !::cir::MissingFeatures::CXXRecordDeclIsEmptyCXX11()); bool IsOverlappingEmptyField = false; // FIXME(cir): Needs more features. @@ -305,7 +304,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, llvm_unreachable("NYI"); } - cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit; // Reset the unfilled bits. @@ -345,7 +344,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, llvm_unreachable("NYI"); } - cir_tl_assert(!::cir::MissingFeatures::recordDeclIsPacked() && + assert(!::cir::MissingFeatures::recordDeclIsPacked() && !::cir::MissingFeatures::CXXRecordDeclIsPOD()); bool FieldPacked = false; // FIXME(cir): Needs more features. @@ -384,7 +383,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, clang::CharUnits UnpackedFieldOffset = FieldOffset; // clang::CharUnits OriginalFieldAlign = UnpackedFieldAlign; - cir_tl_assert(!::cir::MissingFeatures::fieldDeclGetMaxFieldAlignment()); + assert(!::cir::MissingFeatures::fieldDeclGetMaxFieldAlignment()); clang::CharUnits MaxAlignmentInChars = clang::CharUnits::Zero(); PackedFieldAlign = std::max(PackedFieldAlign, MaxAlignmentInChars); PreferredAlign = std::max(PreferredAlign, MaxAlignmentInChars); @@ -457,7 +456,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, // laid out. A regular mlir::Type has not way of doing this. In fact, we will // likely need an external abstraction, as I don't think this is possible with // just the field type. - cir_tl_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + assert(!::cir::MissingFeatures::fieldDeclAbstraction()); if (Packed && !FieldPacked && PackedFieldAlign < FieldAlign) llvm_unreachable("NYI"); @@ -466,10 +465,10 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, void ItaniumRecordLayoutBuilder::layoutFields(const StructType D) { // Layout each field, for now, just sequentially, respecting alignment. In // the future, this will need to be tweakable by targets. - cir_tl_assert(!::cir::MissingFeatures::recordDeclMayInsertExtraPadding() && + assert(!::cir::MissingFeatures::recordDeclMayInsertExtraPadding() && !Context.getLangOpts().SanitizeAddressFieldPadding); bool InsertExtraPadding = false; - cir_tl_assert(!::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()); + assert(!::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()); bool HasFlexibleArrayMember = false; for (const auto FT : D.getMembers()) { layoutField(FT, InsertExtraPadding && (FT != D.getMembers().back() || @@ -486,19 +485,19 @@ void ItaniumRecordLayoutBuilder::UpdateAlignment( return; if (NewAlignment > Alignment) { - cir_tl_assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) && + assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) && "Alignment not a power of 2"); Alignment = NewAlignment; } if (UnpackedNewAlignment > UnpackedAlignment) { - cir_tl_assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) && + assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) && "Alignment not a power of 2"); UnpackedAlignment = UnpackedNewAlignment; } if (PreferredNewAlignment > PreferredAlignment) { - cir_tl_assert(llvm::isPowerOf2_64(PreferredNewAlignment.getQuantity()) && + assert(llvm::isPowerOf2_64(PreferredNewAlignment.getQuantity()) && "Alignment not a power of 2"); PreferredAlignment = PreferredNewAlignment; } @@ -526,7 +525,7 @@ void ItaniumRecordLayoutBuilder::checkFieldPadding( PadSize = PadSize / CharBitNum; // InBits = false; } - cir_tl_assert(::cir::MissingFeatures::bitFieldPaddingDiagnostics()); + assert(::cir::MissingFeatures::bitFieldPaddingDiagnostics()); } if (isPacked && Offset != UnpackedOffset) { HasPackedField = true; @@ -545,7 +544,7 @@ bool isMsLayout(const CIRLowerContext &Context) { /// of the given class (considering it as a base class) when allocating /// objects? static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { - cir_tl_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); switch (ABI.getTailPaddingUseRules()) { case clang::TargetCXXABI::AlwaysUseTailPadding: return false; @@ -567,7 +566,7 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { // intended. // FIXME(cir): This always returns true since we can't check if a CIR record // is a POD type. - cir_tl_assert(!::cir::MissingFeatures::CXXRecordDeclIsPOD()); + assert(!::cir::MissingFeatures::CXXRecordDeclIsPOD()); return true; case clang::TargetCXXABI::UseTailPaddingUnlessPOD11: @@ -589,10 +588,10 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { /// (struct/union/class), which indicates its size and field position /// information. const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { - cir_tl_assert(isa(D) && "Not a record type"); + assert(isa(D) && "Not a record type"); auto RT = dyn_cast(D); - cir_tl_assert(RT.isComplete() && "Cannot get layout of forward declarations!"); + assert(RT.isComplete() && "Cannot get layout of forward declarations!"); // FIXME(cir): Use a more MLIR-based approach by using it's buitin data layout // features, such as interfaces, cacheing, and the DLTI dialect. @@ -603,7 +602,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { llvm_unreachable("NYI"); } else { // FIXME(cir): Add if-else separating C and C++ records. - cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl()); + assert(!::cir::MissingFeatures::isCXXRecordDecl()); EmptySubobjectMap EmptySubobjects(*this, RT); ItaniumRecordLayoutBuilder Builder(*this, &EmptySubobjects); Builder.layout(RT); @@ -618,7 +617,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { skipTailPadding ? Builder.getSize() : Builder.getDataSize(); clang::CharUnits NonVirtualSize = skipTailPadding ? DataSize : Builder.NonVirtualSize; - cir_tl_assert(!::cir::MissingFeatures::CXXRecordIsDynamicClass()); + assert(!::cir::MissingFeatures::CXXRecordIsDynamicClass()); // FIXME(cir): Whose responsible for freeing the allocation below? NewEntry = new CIRRecordLayout( *this, Builder.getSize(), Builder.Alignment, Builder.PreferredAlignment, @@ -633,7 +632,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { } // TODO(cir): Add option to dump the layouts. - cir_tl_assert(!::cir::MissingFeatures::cacheRecordLayouts()); + assert(!::cir::MissingFeatures::cacheRecordLayouts()); return *NewEntry; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index 28b363664387..a3406b722c41 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -60,7 +60,7 @@ class AArch64TargetLoweringInfo : public TargetLoweringInfo { public: AArch64TargetLoweringInfo(LowerTypes <, AArch64ABIKind Kind) : TargetLoweringInfo(std::make_unique(LT, Kind)) { - cir_tl_assert(!MissingFeature::swift()); + assert(!MissingFeature::swift()); } unsigned getTargetAddrSpaceFromCIRAddrSpace( @@ -87,7 +87,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, return ABIArgInfo::getIgnore(); if (const auto _ = dyn_cast(RetTy)) { - cir_assert_or_abort(!::cir::MissingFeatures::vectorType(), "NYI"); + llvm_unreachable("NYI"); } // Large vector types should be returned via memory. @@ -128,9 +128,7 @@ AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, : ABIArgInfo::getDirect()); } - cir_assert_or_abort(!::cir::MissingFeatures::AArch64TypeClassification(), - "NYI"); - return {}; + llvm_unreachable("NYI"); } std::unique_ptr diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp index 788f8a8f4739..7d43000877b7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp @@ -70,13 +70,13 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // Homogenous Aggregate type not supported and indirect arg // passing not supported yet. And for these supported types, // we should not have alignment greater than 8 problem. - cir_tl_assert(isSupportedType); - cir_tl_assert(!cir::MissingFeatures::classifyArgumentTypeForAArch64()); + assert(isSupportedType); + assert(!cir::MissingFeatures::classifyArgumentTypeForAArch64()); // indirect arg passing would expect one more level of pointer dereference. - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + assert(!cir::MissingFeatures::handleAArch64Indirect()); // false as a place holder for now, as we don't have a way to query bool isIndirect = false; - cir_tl_assert(!cir::MissingFeatures::supportgetCoerceToTypeForAArch64()); + assert(!cir::MissingFeatures::supportgetCoerceToTypeForAArch64()); // we don't convert to LLVM Type here as we are lowering to CIR here. // so baseTy is the just type of the result of va_arg. // but it depends on arg type indirectness and coercion defined by ABI. @@ -120,8 +120,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // though anyone passing 2GB of arguments, each at most 16 bytes, deserves // whatever they get). - cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); - cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // One is just place holder for now, as we don't have a way to query // type size and alignment. clang::CharUnits tySize = @@ -132,7 +132,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // indirectness, type size and type alignment all // decide regSize, but they are all ABI defined // thus need ABI lowering query system. - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + assert(!cir::MissingFeatures::handleAArch64Indirect()); int regSize = isIndirect ? 8 : tySize.getQuantity(); int regTopIndex; mlir::Value regOffsP; @@ -187,8 +187,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we // align __gr_offs to calculate the potential address. if (!IsFPR && !isIndirect && tyAlign.getQuantity() > 8) { - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); - cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + assert(!cir::MissingFeatures::handleAArch64Indirect()); + assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); llvm_unreachable("register alignment correction NYI"); } @@ -224,19 +224,19 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( loc, castRegTop.getType(), castRegTop, regOffs); if (isIndirect) { - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + assert(!cir::MissingFeatures::handleAArch64Indirect()); llvm_unreachable("indirect arg passing NYI"); } // TODO: isHFA, numMembers and base should be query result from query uint64_t numMembers = 0; - cir_tl_assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); + assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); bool isHFA = false; // though endianess can be known from datalayout, it might need an unified // ABI lowering query system to answer the question. - cir_tl_assert(!cir::MissingFeatures::supportisEndianQueryForAArch64()); + assert(!cir::MissingFeatures::supportisEndianQueryForAArch64()); bool isBigEndian = datalayout.isBigEndian(); - cir_tl_assert(!cir::MissingFeatures::supportisAggregateTypeForABIAArch64()); + assert(!cir::MissingFeatures::supportisAggregateTypeForABIAArch64()); // TODO: isAggregateTypeForABI should be query result from ABI info bool isAggregateTypeForABI = false; if (isHFA && numMembers > 1) { @@ -244,10 +244,10 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // and stored 16-bytes apart regardless of size (they're notionally in qN, // qN+1, ...). We reload and store into a temporary local variable // contiguously. - cir_tl_assert(!isIndirect && "Homogeneous aggregates should be passed directly"); + assert(!isIndirect && "Homogeneous aggregates should be passed directly"); llvm_unreachable("Homogeneous aggregates NYI"); } else { - cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // TODO: slotSize should be query result about alignment. clang::CharUnits slotSize = clang::CharUnits::fromQuantity(8); if (isBigEndian && !isIndirect && (isHFA || isAggregateTypeForABI) && @@ -266,11 +266,11 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // On big-endian platforms, the value will be right-aligned in its stack slot. // and we also need to think about other ABI lowering concerns listed below. - cir_tl_assert(!cir::MissingFeatures::handleBigEndian()); - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); - cir_tl_assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); - cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); - cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + assert(!cir::MissingFeatures::handleBigEndian()); + assert(!cir::MissingFeatures::handleAArch64Indirect()); + assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); + assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); builder.create(loc, mlir::ValueRange{resAsVoidP}, contBlock); @@ -284,8 +284,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( auto ptrDiffTy = mlir::cir::IntType::get(builder.getContext(), 64, /*signed=*/false); - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); - cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + assert(!cir::MissingFeatures::handleAArch64Indirect()); + assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // Again, stack arguments may need realignment. In this case both integer and // floating-point ones might be affected. if (!isIndirect && tyAlign.getQuantity() > 8) { @@ -307,8 +307,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // which are all ABI defined thus need ABI lowering query system. // The implementation we have now supports most common cases which assumes // no indirectness, no alignment greater than 8, and little endian. - cir_tl_assert(!cir::MissingFeatures::handleBigEndian()); - cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + assert(!cir::MissingFeatures::handleBigEndian()); + assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); auto stackSizeC = builder.create( loc, ptrDiffTy, @@ -340,12 +340,12 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( builder.setInsertionPoint(op); contBlock->addArgument(onStackPtr.getType(), loc); auto resP = contBlock->getArgument(0); - cir_tl_assert(mlir::isa(resP.getType())); + assert(mlir::isa(resP.getType())); auto opResPTy = mlir::cir::PointerType::get(builder.getContext(), opResTy); auto castResP = builder.createBitcast(resP, opResPTy); auto res = builder.create(loc, castResP); // there would be another level of ptr dereference if indirect arg passing - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + assert(!cir::MissingFeatures::handleAArch64Indirect()); if (isIndirect) { res = builder.create(loc, res.getResult()); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index 8b13ba556558..9d79fb7ccb43 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -34,7 +34,7 @@ cir::LoweringPrepareCXXABI *cir::LoweringPrepareCXXABI::createItaniumABI() { static void buildBadCastCall(CIRBaseBuilderTy &builder, mlir::Location loc, mlir::FlatSymbolRefAttr badCastFuncRef) { // TODO(cir): set the calling convention to __cxa_bad_cast. - cir_tl_assert(!MissingFeatures::setCallingConv()); + assert(!MissingFeatures::setCallingConv()); builder.createCallOp(loc, badCastFuncRef, mlir::ValueRange{}); builder.create(loc); @@ -48,7 +48,7 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, auto castInfo = op.getInfo().value(); // TODO(cir): consider address space - cir_tl_assert(!MissingFeatures::addressSpace()); + assert(!MissingFeatures::addressSpace()); auto srcPtr = builder.createBitcast(srcValue, builder.getVoidPtrTy()); auto srcRtti = builder.getConstant(loc, castInfo.getSrcRtti()); @@ -59,14 +59,14 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, mlir::Value dynCastFuncArgs[4] = {srcPtr, srcRtti, destRtti, offsetHint}; // TODO(cir): set the calling convention for __dynamic_cast. - cir_tl_assert(!MissingFeatures::setCallingConv()); + assert(!MissingFeatures::setCallingConv()); mlir::Value castedPtr = builder .createCallOp(loc, dynCastFuncRef, builder.getVoidPtrTy(), dynCastFuncArgs) .getResult(); - cir_tl_assert(mlir::isa(castedPtr.getType()) && + assert(mlir::isa(castedPtr.getType()) && "the return value of __dynamic_cast should be a ptr"); /// C++ [expr.dynamic.cast]p9: @@ -93,7 +93,7 @@ buildDynamicCastToVoidAfterNullCheck(CIRBaseBuilderTy &builder, bool vtableUsesRelativeLayout = op.getRelativeLayout(); // TODO(cir): consider address space in this function. - cir_tl_assert(!MissingFeatures::addressSpace()); + assert(!MissingFeatures::addressSpace()); mlir::Type vtableElemTy; uint64_t vtableElemAlign; @@ -141,7 +141,7 @@ LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, auto loc = op->getLoc(); auto srcValue = op.getSrc(); - cir_tl_assert(!MissingFeatures::buildTypeCheck()); + assert(!MissingFeatures::buildTypeCheck()); if (op.isRefcast()) return buildDynamicCastAfterNullCheck(builder, op); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp index f5540e221d9d..f5a7250dffd0 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp @@ -33,7 +33,7 @@ class SPIRVABIInfo : public ABIInfo { private: void computeInfo(LowerFunctionInfo &FI) const override { - cir_assert_or_abort(!::cir::MissingFeatures::SPIRVABI(), "NYI"); + llvm_unreachable("ABI NYI"); } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 11092381960e..38501f7c3124 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -95,8 +95,7 @@ Type getFPTypeAtOffset(Type IRType, unsigned IROffset, if (IROffset == 0 && isa(IRType)) return IRType; - cir_assert_or_abort(!::cir::MissingFeatures::X86GetFPTypeAtOffset(), "NYI"); - return IRType; // FIXME(cir): Temporary workaround for the assertion above. + llvm_unreachable("NYI"); } } // namespace @@ -194,7 +193,7 @@ class X86_64TargetLoweringInfo : public TargetLoweringInfo { public: X86_64TargetLoweringInfo(LowerTypes &LM, X86AVXABILevel AVXLevel) : TargetLoweringInfo(std::make_unique(LM, AVXLevel)) { - cir_tl_assert(!::cir::MissingFeatures::swift()); + assert(!::cir::MissingFeatures::swift()); } unsigned getTargetAddrSpaceFromCIRAddrSpace( @@ -274,7 +273,7 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, Current = Class::NoClass; // If this is a C++ record, classify the bases first. - cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl() && + assert(!::cir::MissingFeatures::isCXXRecordDecl() && !::cir::MissingFeatures::getCXXRecordBases()); // Classify the fields one at a time, merging the results. @@ -284,10 +283,10 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, bool IsUnion = RT.isUnion() && !UseClang11Compat; // FIXME(cir): An interface to handle field declaration might be needed. - cir_tl_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + assert(!::cir::MissingFeatures::fieldDeclAbstraction()); for (auto [idx, FT] : llvm::enumerate(RT.getMembers())) { uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); - cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); bool BitField = false; // Ignore padding bit-fields. @@ -338,8 +337,7 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, postMerge(Size, Lo, Hi); } else { llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; - cir_assert_or_abort(!::cir::MissingFeatures::X86TypeClassification(), - "NYI"); + llvm_unreachable("NYI"); } // FIXME: _Decimal32 and _Decimal64 are SSE. // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). @@ -402,7 +400,7 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, // returning an 8-byte unit starting with it. See if we can safely use it. if (IROffset == 0) { // Pointers and int64's always fill the 8-byte unit. - cir_tl_assert(!isa(DestTy) && "Ptrs are NYI"); + assert(!isa(DestTy) && "Ptrs are NYI"); // If we have a 1/2/4-byte integer, we can use it only if the rest of the // goodness in the source type is just tail padding. This is allowed to @@ -438,9 +436,7 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, unsigned TySizeInBytes = (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); - // FIXME(cir): Temporary workaround to make things non-blocking. - if (!cirMissingFeatureAssertionMode) - cir_tl_assert(TySizeInBytes != SourceOffset && "Empty field?"); + assert(TySizeInBytes != SourceOffset && "Empty field?"); // It is always safe to classify this as an integer type up to i64 that // isn't larger than the structure. @@ -462,9 +458,9 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { classify(RetTy, 0, Lo, Hi, true); // Check some invariants. - cir_tl_assert((Hi != Class::Memory || Lo == Class::Memory) && + assert((Hi != Class::Memory || Lo == Class::Memory) && "Invalid memory classification."); - cir_tl_assert((Hi != Class::SSEUp || Lo == Class::SSE) && + assert((Hi != Class::SSEUp || Lo == Class::SSE) && "Invalid SSEUp classification."); Type resType = {}; @@ -496,8 +492,7 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { break; default: - cir_assert_or_abort(!::cir::MissingFeatures::X86RetTypeClassification(), - "NYI"); + llvm_unreachable("NYI"); } Type HighPart = {}; @@ -531,9 +526,9 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, // Check some invariants. // FIXME: Enforce these by construction. - cir_tl_assert((Hi != Class::Memory || Lo == Class::Memory) && + assert((Hi != Class::Memory || Lo == Class::Memory) && "Invalid memory classification."); - cir_tl_assert((Hi != Class::SSEUp || Lo == Class::SSE) && + assert((Hi != Class::SSEUp || Lo == Class::SSE) && "Invalid SSEUp classification."); neededInt = 0; @@ -571,8 +566,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, break; } default: - cir_assert_or_abort(!::cir::MissingFeatures::X86ArgTypeClassification(), - "NYI"); + llvm_unreachable("NYI"); } Type HighPart = {}; @@ -676,7 +670,7 @@ X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { // Accum should never be memory (we should have returned) or // ComplexX87 (because this cannot be passed in a structure). - cir_tl_assert((Accum != Class::Memory && Accum != Class::ComplexX87) && + assert((Accum != Class::Memory && Accum != Class::ComplexX87) && "Invalid accumulated classification during merge."); if (Accum == Field || Field == Class::NoClass) return Accum; diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 2eb77a3ec33a..e360f0470b50 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -199,9 +199,6 @@ class CIRGenConsumer : public clang::ASTConsumer { if (feOptions.ClangIRLibOpt) libOptOpts = sanitizePassOptions(feOptions.ClangIRLibOptOpts); - bool enableCCLowering = feOptions.ClangIRCallConvLowering && - action == CIRGenAction::OutputType::EmitCIRFlat; - // Setup and run CIR pipeline. std::string passOptParsingFailure; if (runCIRToCIRPasses( @@ -211,7 +208,8 @@ class CIRGenConsumer : public clang::ASTConsumer { feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure, codeGenOptions.OptimizationLevel > 0, action == CIRGenAction::OutputType::EmitCIRFlat, - action == CIRGenAction::OutputType::EmitMLIR, enableCCLowering, + action == CIRGenAction::OutputType::EmitMLIR, + feOptions.ClangIREnableCallConvLowering, feOptions.ClangIREnableMem2Reg) .failed()) { if (!passOptParsingFailure.empty()) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3c80b2dca766..88485d71510c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4396,7 +4396,7 @@ std::unique_ptr createConvertCIRToLLVMPass() { } void populateCIRToLLVMPasses(mlir::OpPassManager &pm) { - populateCIRPreLoweringPasses(pm, true); + populateCIRPreLoweringPasses(pm); pm.addPass(createConvertCIRToLLVMPass()); } diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index dcd979c35e9d..a30e097f9401 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3129,7 +3129,7 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Opts.ClangIRVerifyDiags = true; if (Args.hasArg(OPT_fclangir_call_conv_lowering)) - Opts.ClangIRCallConvLowering = true; + Opts.ClangIREnableCallConvLowering = true; if (Args.hasArg(OPT_fclangir_analysis_only)) Opts.ClangIRAnalysisOnly = true; diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index 966b1da6e9f3..bf2663181077 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -4,7 +4,7 @@ // RUN: FileCheck %s -check-prefix=LLVM --input-file=%t.ll // RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir // RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -fno-clangir-call-conv-lowering -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir // RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir // RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll // RUN: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index ca69cb279d81..67a851dff2de 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -fno-clangir-call-conv-lowering -emit-cir-flat %s -o %t.flat.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.flat.cir // RUN: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_FLAT %s // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index 068d8c10b3b3..b985ecab8cca 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -fno-clangir-call-conv-lowering -emit-cir-flat %s -o %t.flat.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.flat.cir // RUN: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_FLAT %s // RUN_DISABLED: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll // RUN_DISABLED: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_LLVM %s diff --git a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp index f3a926aa93a6..209679ebf383 100644 --- a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple aarch64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple aarch64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s // CHECK: @_Z4Voidv() diff --git a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp index a3c2d6960c39..3789550ce33b 100644 --- a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s // Test call conv lowering for trivial cases. // From 3b623b6b1209f39aa51119607d2e3d3b04afa843 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Tue, 1 Oct 2024 05:50:21 +0800 Subject: [PATCH 1900/2301] [CIR][CIRGen] Add time trace to several CIRGen pieces (#898) Then we can observe the time consumed in different part of CIR. This patch is not complete. But I think it is fine given we can always add them easily. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 14 ++++++++++++++ clang/lib/CIR/CodeGen/CIRPasses.cpp | 4 ++++ .../CIR/Dialect/Transforms/CallConvLowering.cpp | 4 ++++ clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp | 3 +++ clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 ++ .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 7 +++++++ .../CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 5 +++++ 7 files changed, 39 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 73d66a789963..2072a332c6b1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -75,6 +75,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Support/TimeProfiler.h" #include #include @@ -463,6 +464,19 @@ void CIRGenModule::setDSOLocal(CIRGlobalValueInterface GV) const { } void CIRGenModule::buildGlobal(GlobalDecl GD) { + llvm::TimeTraceScope scope("build CIR Global", [&]() -> std::string { + auto *ND = dyn_cast(GD.getDecl()); + if (!ND) + // TODO: How to print decls which is not named decl? + return "Unnamed decl"; + + std::string Name; + llvm::raw_string_ostream OS(Name); + ND->getNameForDiagnostic(OS, getASTContext().getPrintingPolicy(), + /*Qualified=*/true); + return Name; + }); + const auto *Global = cast(GD.getDecl()); assert(!Global->hasAttr() && "NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 4f89daa1cee4..7940a3f03066 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -19,6 +19,8 @@ #include "mlir/Support/LogicalResult.h" #include "mlir/Transforms/Passes.h" +#include "llvm/Support/TimeProfiler.h" + namespace cir { mlir::LogicalResult runCIRToCIRPasses( mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, @@ -29,6 +31,8 @@ mlir::LogicalResult runCIRToCIRPasses( bool enableCIRSimplify, bool flattenCIR, bool emitMLIR, bool enableCallConvLowering, bool enableMem2Reg) { + llvm::TimeTraceScope scope("CIR To CIR Passes"); + mlir::PassManager pm(mlirCtx); pm.addPass(mlir::createCIRCanonicalizePass()); diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 3a4b9b397c5b..85e74eab724a 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -18,6 +18,8 @@ #define GEN_PASS_DEF_CALLCONVLOWERING #include "clang/CIR/Dialect/Passes.h.inc" +#include "llvm/Support/TimeProfiler.h" + namespace mlir { namespace cir { @@ -30,6 +32,8 @@ struct CallConvLoweringPattern : public OpRewritePattern { LogicalResult matchAndRewrite(FuncOp op, PatternRewriter &rewriter) const final { + llvm::TimeTraceScope scope("Call Conv Lowering Pass", op.getSymName().str()); + const auto module = op->getParentOfType(); if (!op.getAst()) diff --git a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp index 34eb488b732c..56e2308272ed 100644 --- a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp +++ b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp @@ -7,6 +7,8 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "llvm/Support/TimeProfiler.h" + using namespace mlir; using namespace mlir::cir; @@ -43,6 +45,7 @@ static void process(mlir::cir::FuncOp func) { } void GotoSolverPass::runOnOperation() { + llvm::TimeTraceScope scope("Goto Solver"); SmallVector ops; getOperation()->walk([&](mlir::cir::FuncOp op) { process(op); }); } diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index e360f0470b50..0eb57fa48681 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -172,6 +172,8 @@ class CIRGenConsumer : public clang::ASTConsumer { } void HandleTranslationUnit(ASTContext &C) override { + llvm::TimeTraceScope scope("CIR Gen"); + // Note that this method is called after `HandleTopLevelDecl` has already // ran all over the top level decls. Here clang mostly wraps defered and // global codegen, followed by running CIR passes. diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 88485d71510c..5bb38735d8cd 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -64,6 +64,7 @@ #include "llvm/IR/DerivedTypes.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TimeProfiler.h" #include #include #include @@ -4323,6 +4324,8 @@ void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar() { } void ConvertCIRToLLVMPass::runOnOperation() { + llvm::TimeTraceScope scope("Convert CIR to LLVM Pass"); + auto module = getOperation(); mlir::DataLayout dataLayout(module); mlir::LLVMTypeConverter converter(&getContext()); @@ -4405,6 +4408,8 @@ extern void registerCIRDialectTranslation(mlir::MLIRContext &context); std::unique_ptr lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx, bool disableVerifier) { + llvm::TimeTraceScope scope("lower from CIR to LLVM directly"); + mlir::MLIRContext *mlirCtx = theModule.getContext(); mlir::PassManager pm(mlirCtx); populateCIRToLLVMPasses(pm); @@ -4436,6 +4441,8 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx, mlir::registerOpenMPDialectTranslation(*mlirCtx); registerCIRDialectTranslation(*mlirCtx); + llvm::TimeTraceScope __scope("translateModuleToLLVMIR"); + auto ModuleName = theModule.getName(); auto llvmModule = mlir::translateModuleToLLVMIR( theModule, llvmCtx, ModuleName ? *ModuleName : "CIRToLLVMModule"); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index f4ec4a4ba260..1b3bd7f8f5d8 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -53,6 +53,7 @@ #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TypeSwitch.h" +#include "llvm/Support/TimeProfiler.h" using namespace cir; using namespace llvm; @@ -1419,6 +1420,8 @@ std::unique_ptr lowerFromCIRToMLIRToLLVMIR(mlir::ModuleOp theModule, std::unique_ptr mlirCtx, LLVMContext &llvmCtx) { + llvm::TimeTraceScope scope("Lower from CIR to MLIR To LLVM"); + mlir::PassManager pm(mlirCtx.get()); pm.addPass(createConvertCIRToMLIRPass()); @@ -1451,6 +1454,8 @@ std::unique_ptr createConvertCIRToMLIRPass() { mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx) { + llvm::TimeTraceScope scope("Lower CIR To MLIR"); + mlir::PassManager pm(mlirCtx); pm.addPass(createConvertCIRToMLIRPass()); From e88222d308d481379a918d27788658aec77010ec Mon Sep 17 00:00:00 2001 From: 7mile Date: Tue, 1 Oct 2024 05:51:59 +0800 Subject: [PATCH 1901/2301] [CIR][Dialect] Support OpenCL work group uniformity attribute (#896) > To keep information about whether an OpenCL kernel has uniform work > group size or not, clang generates 'uniform-work-group-size' function > attribute for every kernel: > > "uniform-work-group-size"="true" for OpenCL 1.2 and lower, > "uniform-work-group-size"="true" for OpenCL 2.0 and higher if '-cl-uniform-work-group-size' option was specified, > "uniform-work-group-size"="false" for OpenCL 2.0 and higher if no '-cl-uniform-work-group-size' options was specified. > If the function is not an OpenCL kernel, 'uniform-work-group-size' > attribute isn't generated. > > *From [Differential 43570](https://reviews.llvm.org/D43570)* This PR introduces the `OpenCLKernelUniformWorkGroupSizeAttr` attribute to the ClangIR pipeline, towards the completeness in attributes for OpenCL. While this attribute is represented as a unit attribute in MLIR, its absence signifies either non-kernel functions or a `false` value for kernel functions. To match the original LLVM IR behavior, we also consider whether a function is an OpenCL kernel during lowering: * If the function is not a kernel, the attribute is ignored. No LLVM function attribute is set. * If the function is a kernel: * and the `OpenCLKernelUniformWorkGroupSizeAttr` is present, we generate the LLVM function attribute `"uniform-work-group-size"="true"`. * If absent, we generate `"uniform-work-group-size"="false"`. --- .../clang/CIR/Dialect/IR/CIROpenCLAttrs.td | 21 ++++++++++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 17 +++++++- .../Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 10 +++++ .../CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl | 41 +++++++++++++++++++ 4 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td index 576d619fcf7a..a6932c8ca178 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td @@ -185,4 +185,25 @@ def OpenCLKernelAttr : CIRUnitAttr< let storageType = [{ OpenCLKernelAttr }]; } +//===----------------------------------------------------------------------===// +// OpenCLKernelUniformWorkGroupSizeAttr +//===----------------------------------------------------------------------===// + +def OpenCLKernelUniformWorkGroupSizeAttr : CIRUnitAttr< + "OpenCLKernelUniformWorkGroupSize", "cl.uniform_work_group_size"> { + let summary = "OpenCL kernel work-group uniformity"; + let description = [{ + In OpenCL v2.0, work groups can either be uniform or non-uniform. + This attribute is associated with kernels to represent the work group type. + Non-kernel entities should not interact with this attribute. + + Clang's `-cl-uniform-work-group-size` compilation option provides a hint to + the compiler, indicating that the global work size should be a multiple of + the work-group size specified in the `clEnqueueNDRangeKernel` function, + thereby ensuring that the work groups are uniform. + }]; + + let storageType = [{ OpenCLKernelUniformWorkGroupSizeAttr }]; +} + #endif // MLIR_CIR_DIALECT_CIR_OPENCL_ATTRS diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 2a1b1a69da3d..c5b7e2a2edae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -433,7 +433,22 @@ void CIRGenModule::constructAttributeList(StringRef Name, auto cirKernelAttr = mlir::cir::OpenCLKernelAttr::get(builder.getContext()); funcAttrs.set(cirKernelAttr.getMnemonic(), cirKernelAttr); - assert(!MissingFeatures::openCL()); + + auto uniformAttr = mlir::cir::OpenCLKernelUniformWorkGroupSizeAttr::get( + builder.getContext()); + if (getLangOpts().OpenCLVersion <= 120) { + // OpenCL v1.2 Work groups are always uniform + funcAttrs.set(uniformAttr.getMnemonic(), uniformAttr); + } else { + // OpenCL v2.0 Work groups may be whether uniform or not. + // '-cl-uniform-work-group-size' compile option gets a hint + // to the compiler that the global work-size be a multiple of + // the work-group size specified to clEnqueueNDRangeKernel + // (i.e. work groups are uniform). + if (getLangOpts().OffloadUniformBlock) { + funcAttrs.set(uniformAttr.getMnemonic(), uniformAttr); + } + } } if (TargetDecl->hasAttr() && diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index 4e8e2e9558cc..7b520ab2d72e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -91,6 +91,7 @@ class CIRDialectLLVMIRTranslationInterface mlir::NamedAttribute attribute, mlir::LLVM::ModuleTranslation &moduleTranslation) const { llvm::Function *llvmFunc = moduleTranslation.lookupFunction(func.getName()); + llvm::LLVMContext &llvmCtx = moduleTranslation.getLLVMContext(); if (auto extraAttr = mlir::dyn_cast( attribute.getValue())) { for (auto attr : extraAttr.getElements()) { @@ -110,6 +111,15 @@ class CIRDialectLLVMIRTranslationInterface llvmFunc->addFnAttr(llvm::Attribute::NoUnwind); } else if (mlir::dyn_cast(attr.getValue())) { llvmFunc->addFnAttr(llvm::Attribute::Convergent); + } else if (mlir::dyn_cast( + attr.getValue())) { + const auto uniformAttrName = + mlir::cir::OpenCLKernelUniformWorkGroupSizeAttr::getMnemonic(); + const bool isUniform = + extraAttr.getElements().getNamed(uniformAttrName).has_value(); + auto attrs = llvmFunc->getAttributes().addFnAttribute( + llvmCtx, "uniform-work-group-size", isUniform ? "true" : "false"); + llvmFunc->setAttributes(attrs); } else if (auto clKernelMetadata = mlir::dyn_cast( attr.getValue())) { diff --git a/clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl b/clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl new file mode 100644 index 000000000000..e6d6ce1ca25a --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl @@ -0,0 +1,41 @@ +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL1.2 -o %t.cl12.cir %s +// RUN: FileCheck %s -input-file=%t.cl12.cir -check-prefixes CIR,CIR-UNIFORM +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL2.0 -o %t.cl20.cir %s +// RUN: FileCheck %s -input-file=%t.cl20.cir -check-prefixes CIR,CIR-NONUNIFORM +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL2.0 -cl-uniform-work-group-size -o %t.cl20.uniform1.cir %s +// RUN: FileCheck %s -input-file=%t.cl20.uniform1.cir -check-prefixes CIR,CIR-UNIFORM +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL2.0 -foffload-uniform-block -o %t.cl20.uniform2.cir %s +// RUN: FileCheck %s -input-file=%t.cl20.uniform2.cir -check-prefixes CIR,CIR-UNIFORM + +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -O0 -cl-std=CL1.2 -o %t.cl12.ll %s +// RUN: FileCheck %s -input-file=%t.cl12.ll -check-prefixes LLVM,LLVM-UNIFORM +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -O0 -cl-std=CL2.0 -o %t.cl20.ll %s +// RUN: FileCheck %s -input-file=%t.cl20.ll -check-prefixes LLVM,LLVM-NONUNIFORM +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -O0 -cl-std=CL2.0 -cl-uniform-work-group-size -o %t.cl20.uniform1.ll %s +// RUN: FileCheck %s -input-file=%t.cl20.uniform1.ll -check-prefixes LLVM,LLVM-UNIFORM +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -O0 -cl-std=CL2.0 -foffload-uniform-block -o %t.cl20.uniform2.ll %s +// RUN: FileCheck %s -input-file=%t.cl20.uniform2.ll -check-prefixes LLVM,LLVM-UNIFORM + +// CIR-LABEL: #fn_attr = +// CIR: cl.kernel = #cir.cl.kernel +// CIR-UNIFORM: cl.uniform_work_group_size = #cir.cl.uniform_work_group_size +// CIR-NONUNIFORM-NOT: cl.uniform_work_group_size = #cir.cl.uniform_work_group_size + +// CIR-LABEL: #fn_attr1 = +// CIR-NOT: cl.kernel = #cir.cl.kernel +// CIR-NOT: cl.uniform_work_group_size + +kernel void ker() {}; +// CIR: cir.func @ker{{.*}} extra(#fn_attr) { +// LLVM: define{{.*}}@ker() #0 + +void foo() {}; +// CIR: cir.func @foo{{.*}} extra(#fn_attr1) { +// LLVM: define{{.*}}@foo() #1 + +// LLVM-LABEL: attributes #0 +// LLVM-UNIFORM: "uniform-work-group-size"="true" +// LLVM-NONUNIFORM: "uniform-work-group-size"="false" + +// LLVM-LABEL: attributes #1 +// LLVM-NOT: uniform-work-group-size From e4c08381cae1d4474633021a28827bdc121ed280 Mon Sep 17 00:00:00 2001 From: 7mile Date: Tue, 1 Oct 2024 05:53:14 +0800 Subject: [PATCH 1902/2301] [CIR][CodeGen][NFC] Rename the confusing `buildGlobal` overload (#897) `CIRGenModule::buildGlobal` --[rename]--> `CIRGenModule::getOrCreateCIRGlobal` We already have `CIRGenModule::buildGlobal` that corresponds to `CodeGenModule::EmitGlobal`. But there is an overload of `buildGlobal` used by `getAddrOfGlobalVar`. Since this name is confusing, this PR rename it to `getOrCreateCIRGlobal`. Note that `getOrCreateCIRGlobal` already exists. It is intentional to make the renamed function an overload to it. The reason here is that the renamed function is basically a wrapper of the original `getOrCreateCIRGlobal` with more specific parameters: `getOrCreateCIRGlobal(decl, type, isDef)` --[call]--> `getOrCreateCIRGlobal(getMangledName(decl), type, decl->getType()->getAS(), decl, isDef)` --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 11 ++++++----- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 2072a332c6b1..76bad240e9c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1003,8 +1003,9 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, return GV; } -mlir::cir::GlobalOp CIRGenModule::buildGlobal(const VarDecl *D, mlir::Type Ty, - ForDefinition_t IsForDefinition) { +mlir::cir::GlobalOp +CIRGenModule::getOrCreateCIRGlobal(const VarDecl *D, mlir::Type Ty, + ForDefinition_t IsForDefinition) { assert(D->hasGlobalStorage() && "Not a global variable"); QualType ASTTy = D->getType(); if (!Ty) @@ -1029,7 +1030,7 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty, Ty = getTypes().convertTypeForMem(ASTTy); bool tlsAccess = D->getTLSKind() != VarDecl::TLS_None; - auto g = buildGlobal(D, Ty, IsForDefinition); + auto g = getOrCreateCIRGlobal(D, Ty, IsForDefinition); auto ptrTy = builder.getPointerTo(g.getSymType(), g.getAddrSpaceAttr()); return builder.create( getLoc(D->getSourceRange()), ptrTy, g.getSymName(), tlsAccess); @@ -1043,7 +1044,7 @@ CIRGenModule::getAddrOfGlobalVarAttr(const VarDecl *D, mlir::Type Ty, if (!Ty) Ty = getTypes().convertTypeForMem(ASTTy); - auto globalOp = buildGlobal(D, Ty, IsForDefinition); + auto globalOp = getOrCreateCIRGlobal(D, Ty, IsForDefinition); return builder.getGlobalViewAttr(builder.getPointerTo(Ty), globalOp); } @@ -1232,7 +1233,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, } assert(!mlir::isa(InitType) && "Should have a type by now"); - auto Entry = buildGlobal(D, InitType, ForDefinition_t(!IsTentative)); + auto Entry = getOrCreateCIRGlobal(D, InitType, ForDefinition_t(!IsTentative)); // TODO(cir): Strip off pointer casts from Entry if we get them? // TODO(cir): use GlobalValue interface diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index b980ed411c41..b652ec4f9ef7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -234,7 +234,7 @@ class CIRGenModule : public CIRGenTypeCache { getOrCreateStaticVarDecl(const VarDecl &D, mlir::cir::GlobalLinkageKind Linkage); - mlir::cir::GlobalOp buildGlobal(const VarDecl *D, mlir::Type Ty, + mlir::cir::GlobalOp getOrCreateCIRGlobal(const VarDecl *D, mlir::Type Ty, ForDefinition_t IsForDefinition); /// TODO(cir): once we have cir.module, add this as a convenience method From 2f573c1e32fb19a71148daa27b7c2d9ef5a1aa02 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Mon, 30 Sep 2024 17:53:43 -0400 Subject: [PATCH 1903/2301] [CIR][CIRGen][Builtin][Neon] Lower neon vld1_lane and vld1q_lane (#901) just as title. --------- Co-authored-by: Guojin He --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 8 +- clang/test/CIR/CodeGen/aarch64-neon-ldst.c | 376 ++++++++++++++++++ 2 files changed, 382 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/aarch64-neon-ldst.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index e9cfdc1ebbed..ca6f09156b66 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2971,7 +2971,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, buildAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) return V; - mlir::Type VTy = Ty; + mlir::cir::VectorType VTy = Ty; llvm::SmallVector args; switch (BuiltinID) { default: @@ -3404,7 +3404,11 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vld1_lane_v: case NEON::BI__builtin_neon_vld1q_lane_v: { - llvm_unreachable("NYI"); + Ops[1] = builder.createBitcast(Ops[1], VTy); + Ops[0] = builder.createAlignedLoad(Ops[0].getLoc(), VTy.getEltType(), + Ops[0], PtrOp0.getAlignment()); + return builder.create(getLoc(E->getExprLoc()), + Ops[1], Ops[0], Ops[2]); } case NEON::BI__builtin_neon_vldap1_lane_s64: case NEON::BI__builtin_neon_vldap1q_lane_s64: { diff --git a/clang/test/CIR/CodeGen/aarch64-neon-ldst.c b/clang/test/CIR/CodeGen/aarch64-neon-ldst.c new file mode 100644 index 000000000000..9b6ed9ee479c --- /dev/null +++ b/clang/test/CIR/CodeGen/aarch64-neon-ldst.c @@ -0,0 +1,376 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -ffreestanding -emit-cir -target-feature +neon %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -ffreestanding -emit-llvm -target-feature +neon %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// REQUIRES: aarch64-registered-target || arm-registered-target +#include + +int8x8_t test_vld1_lane_s8(int8_t const * ptr, int8x8_t src) { + return vld1_lane_s8(ptr, src, 7); +} + +// CIR-LABEL: test_vld1_lane_s8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(1) [[PTR]] : !cir.ptr, !s8i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1_lane_s8(ptr{{.*}}[[PTR:%.*]], <8 x i8>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <8 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[SRC_VAL:%.*]] = load <8 x i8>, ptr [[SRC_ADDR]], align 8 +// LLVM: store <8 x i8> [[SRC_VAL]], ptr [[S1:%.*]], align 8 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <8 x i8>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR_VAL]], align 1 +// LLVM: {{.*}} = insertelement <8 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 7 +// LLVM: ret <8 x i8> {{.*}} + +int8x16_t test_vld1q_lane_s8(int8_t const * ptr, int8x16_t src) { + return vld1q_lane_s8(ptr, src, 15); +} + +// CIR-LABEL: test_vld1q_lane_s8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<15> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(1) [[PTR]] : !cir.ptr, !s8i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1q_lane_s8(ptr{{.*}}[[PTR:%.*]], <16 x i8>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <16 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[SRC_VAL:%.*]] = load <16 x i8>, ptr [[SRC_ADDR]], align 16 +// LLVM: store <16 x i8> [[SRC_VAL]], ptr [[S1:%.*]], align 16 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <16 x i8>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR_VAL]], align 1 +// LLVM: {{.*}} = insertelement <16 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 15 +// LLVM: ret <16 x i8> {{.*}} + +uint8x16_t test_vld1q_lane_u8(uint8_t const * ptr, uint8x16_t src) { + return vld1q_lane_u8(ptr, src, 15); +} + +// CIR-LABEL: test_vld1q_lane_u8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<15> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(1) [[PTR]] : !cir.ptr, !u8i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1q_lane_u8(ptr{{.*}}[[PTR:%.*]], <16 x i8>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <16 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[SRC_VAL:%.*]] = load <16 x i8>, ptr [[SRC_ADDR]], align 16 +// LLVM: store <16 x i8> [[SRC_VAL]], ptr [[S1:%.*]], align 16 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <16 x i8>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR_VAL]], align 1 +// LLVM: {{.*}} = insertelement <16 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 15 +// LLVM: ret <16 x i8> {{.*}} + + +uint8x8_t test_vld1_lane_u8(uint8_t const * ptr, uint8x8_t src) { + return vld1_lane_u8(ptr, src, 7); +} + +// CIR-LABEL: test_vld1_lane_u8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(1) [[PTR]] : !cir.ptr, !u8i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1_lane_u8(ptr{{.*}}[[PTR:%.*]], <8 x i8>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <8 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[SRC_VAL:%.*]] = load <8 x i8>, ptr [[SRC_ADDR]], align 8 +// LLVM: store <8 x i8> [[SRC_VAL]], ptr [[S1:%.*]], align 8 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <8 x i8>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR_VAL]], align 1 +// LLVM: {{.*}} = insertelement <8 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 7 +// LLVM: ret <8 x i8> {{.*}} + + +int16x4_t test_vld1_lane_s16(int16_t const * ptr, int16x4_t src) { + return vld1_lane_s16(ptr, src, 3); +} + +// CIR-LABEL: test_vld1_lane_s16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(2) [[PTR]] : !cir.ptr, !s16i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1_lane_s16(ptr{{.*}}[[PTR:%.*]], <4 x i16>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <4 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[SRC_VAL:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 +// LLVM: store <4 x i16> [[SRC_VAL]], ptr [[S1:%.*]], align 8 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <4 x i16>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i16> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <4 x i16> +// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR_VAL]], align 2 +// LLVM: {{.*}} = insertelement <4 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 3 +// LLVM: ret <4 x i16> {{.*}} + +uint16x4_t test_vld1_lane_u16(uint16_t const * ptr, uint16x4_t src) { + return vld1_lane_u16(ptr, src, 3); +} + +// CIR-LABEL: test_vld1_lane_u16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(2) [[PTR]] : !cir.ptr, !u16i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1_lane_u16(ptr{{.*}}[[PTR:%.*]], <4 x i16>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <4 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[SRC_VAL:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 +// LLVM: store <4 x i16> [[SRC_VAL]], ptr [[S1:%.*]], align 8 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <4 x i16>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i16> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <4 x i16> +// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR_VAL]], align 2 +// LLVM: {{.*}} = insertelement <4 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 3 +// LLVM: ret <4 x i16> {{.*}} + +int16x8_t test_vld1q_lane_s16(int16_t const * ptr, int16x8_t src) { + return vld1q_lane_s16(ptr, src, 7); +} + +// CIR-LABEL: test_vld1q_lane_s16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(2) [[PTR]] : !cir.ptr, !s16i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1q_lane_s16(ptr{{.*}}[[PTR:%.*]], <8 x i16>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <8 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[SRC_VAL:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 +// LLVM: store <8 x i16> [[SRC_VAL]], ptr [[S1:%.*]], align 16 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <8 x i16>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <8 x i16> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <8 x i16> +// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR_VAL]], align 2 +// LLVM: {{.*}} = insertelement <8 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 7 +// LLVM: ret <8 x i16> {{.*}} + +uint16x8_t test_vld1q_lane_u16(uint16_t const * ptr, uint16x8_t src) { + return vld1q_lane_u16(ptr, src, 7); +} + +// CIR-LABEL: test_vld1q_lane_u16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(2) [[PTR]] : !cir.ptr, !u16i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1q_lane_u16(ptr{{.*}}[[PTR:%.*]], <8 x i16>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <8 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[SRC_VAL:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 +// LLVM: store <8 x i16> [[SRC_VAL]], ptr [[S1:%.*]], align 16 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <8 x i16>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <8 x i16> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <8 x i16> +// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR_VAL]], align 2 +// LLVM: {{.*}} = insertelement <8 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 7 +// LLVM: ret <8 x i16> {{.*}} + + + + +int32x2_t test_vld1_lane_s32(int32_t const * ptr, int32x2_t src) { + return vld1_lane_s32(ptr, src, 1); +} + +// CIR-LABEL: test_vld1_lane_s32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(4) [[PTR]] : !cir.ptr, !s32i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1_lane_s32(ptr{{.*}}[[PTR:%.*]], <2 x i32>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <2 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[SRC_VAL:%.*]] = load <2 x i32>, ptr [[SRC_ADDR]], align 8 +// LLVM: store <2 x i32> [[SRC_VAL]], ptr [[S1:%.*]], align 8 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <2 x i32>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i32> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <2 x i32> +// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR_VAL]], align 4 +// LLVM: {{.*}} = insertelement <2 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 1 +// LLVM: ret <2 x i32> {{.*}} + +uint32x2_t test_vld1_lane_u32(uint32_t const * ptr, uint32x2_t src) { + return vld1_lane_u32(ptr, src, 1); +} + +// CIR-LABEL: test_vld1_lane_u32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(4) [[PTR]] : !cir.ptr, !u32i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1_lane_u32(ptr{{.*}}[[PTR:%.*]], <2 x i32>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <2 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[SRC_VAL:%.*]] = load <2 x i32>, ptr [[SRC_ADDR]], align 8 +// LLVM: store <2 x i32> [[SRC_VAL]], ptr [[S1:%.*]], align 8 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <2 x i32>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i32> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <2 x i32> +// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR_VAL]], align 4 +// LLVM: {{.*}} = insertelement <2 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 1 +// LLVM: ret <2 x i32> {{.*}} + + +int32x4_t test_vld1q_lane_s32(int32_t const * ptr, int32x4_t src) { + return vld1q_lane_s32(ptr, src, 3); +} + +// CIR-LABEL: test_vld1q_lane_s32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(4) [[PTR]] : !cir.ptr, !s32i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1q_lane_s32(ptr{{.*}}[[PTR:%.*]], <4 x i32>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <4 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[SRC_VAL:%.*]] = load <4 x i32>, ptr [[SRC_ADDR]], align 16 +// LLVM: store <4 x i32> [[SRC_VAL]], ptr [[S1:%.*]], align 16 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <4 x i32>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i32> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <4 x i32> +// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR_VAL]], align 4 +// LLVM: {{.*}} = insertelement <4 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 3 +// LLVM: ret <4 x i32> {{.*}} + + +uint32x4_t test_vld1q_lane_u32(uint32_t const * ptr, uint32x4_t src) { + return vld1q_lane_u32(ptr, src, 3); +} + +// CIR-LABEL: test_vld1q_lane_u32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(4) [[PTR]] : !cir.ptr, !u32i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1q_lane_u32(ptr{{.*}}[[PTR:%.*]], <4 x i32>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <4 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[SRC_VAL:%.*]] = load <4 x i32>, ptr [[SRC_ADDR]], align 16 +// LLVM: store <4 x i32> [[SRC_VAL]], ptr [[S1:%.*]], align 16 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <4 x i32>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i32> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <4 x i32> +// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR_VAL]], align 4 +// LLVM: {{.*}} = insertelement <4 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 3 +// LLVM: ret <4 x i32> {{.*}} + +int64x1_t test_vld1_lane_s64(int64_t const * ptr, int64x1_t src) { + return vld1_lane_s64(ptr, src, 0); +} + +// CIR-LABEL: test_vld1_lane_s64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) [[PTR]] : !cir.ptr, !s64i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1_lane_s64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <1 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[SRC_VAL:%.*]] = load <1 x i64>, ptr [[SRC_ADDR]], align 8 +// LLVM: store <1 x i64> [[SRC_VAL]], ptr [[S1:%.*]], align 8 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <1 x i64>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <1 x i64> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <1 x i64> +// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR_VAL]], align 8 +// LLVM: {{.*}} = insertelement <1 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 0 +// LLVM: ret <1 x i64> {{.*}} + +uint64x1_t test_vld1_lane_u64(uint64_t const * ptr, uint64x1_t src) { + return vld1_lane_u64(ptr, src, 0); +} + +// CIR-LABEL: test_vld1_lane_u64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) [[PTR]] : !cir.ptr, !u64i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1_lane_u64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <1 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[SRC_VAL:%.*]] = load <1 x i64>, ptr [[SRC_ADDR]], align 8 +// LLVM: store <1 x i64> [[SRC_VAL]], ptr [[S1:%.*]], align 8 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <1 x i64>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <1 x i64> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <1 x i64> +// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR_VAL]], align 8 +// LLVM: {{.*}} = insertelement <1 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 0 +// LLVM: ret <1 x i64> {{.*}} + +int64x2_t test_vld1q_lane_s64(int64_t const * ptr, int64x2_t src) { + return vld1q_lane_s64(ptr, src, 1); +} + +// CIR-LABEL: test_vld1q_lane_s64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) [[PTR]] : !cir.ptr, !s64i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1q_lane_s64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <2 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[SRC_VAL:%.*]] = load <2 x i64>, ptr [[SRC_ADDR]], align 16 +// LLVM: store <2 x i64> [[SRC_VAL]], ptr [[S1:%.*]], align 16 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <2 x i64>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i64> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <2 x i64> +// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR_VAL]], align 8 +// LLVM: {{.*}} = insertelement <2 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 1 +// LLVM: ret <2 x i64> {{.*}} + +uint64x2_t test_vld1q_lane_u64(uint64_t const * ptr, uint64x2_t src) { + return vld1q_lane_u64(ptr, src, 1); +} + +// CIR-LABEL: test_vld1q_lane_u64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) [[PTR]] : !cir.ptr, !u64i +// CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vld1q_lane_u64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 +// LLVM: store <2 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[SRC_VAL:%.*]] = load <2 x i64>, ptr [[SRC_ADDR]], align 16 +// LLVM: store <2 x i64> [[SRC_VAL]], ptr [[S1:%.*]], align 16 +// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <2 x i64>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i64> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <2 x i64> +// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR_VAL]], align 8 +// LLVM: {{.*}} = insertelement <2 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 1 +// LLVM: ret <2 x i64> {{.*}} From 81e204386724de2429aa0cdd43f02743aae9a5ef Mon Sep 17 00:00:00 2001 From: 7mile Date: Wed, 2 Oct 2024 06:38:54 +0800 Subject: [PATCH 1904/2301] [CIR][CodeGen][NFC] Break the missing feature flag for OpenCL into smaller pieces (#902) The missing feature flag for OpenCL has very few occurrences now. This PR rearranges them into proper pieces to better track them. --- clang/include/clang/CIR/MissingFeatures.h | 2 +- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 3540300d622c..5f92e4e60cba 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -154,7 +154,7 @@ struct MissingFeatures { static bool CGFPOptionsRAII() { return false; } static bool getFPFeaturesInEffect() { return false; } static bool cxxABI() { return false; } - static bool openCL() { return false; } + static bool openCLCXX() { return false; } static bool openCLBuiltinTypes() { return false; } static bool CUDA() { return false; } static bool openMP() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 179e128ac2f8..e81ff16fd659 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -253,7 +253,7 @@ static void buildDeclDestroy(CIRGenFunction &CGF, const VarDecl *D) { if (Record && (CanRegisterDestructor || UsingExternalHelper)) { assert(!D->getTLSKind() && "TLS NYI"); assert(!Record->hasTrivialDestructor()); - assert(!MissingFeatures::openCL()); + assert(!MissingFeatures::openCLCXX()); CXXDestructorDecl *Dtor = Record->getDestructor(); // In LLVM OG codegen this is done in registerGlobalDtor, but CIRGen // relies on LoweringPrepare for further decoupling, so build the diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 30e4019a24d0..a52ef462552b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -36,7 +36,6 @@ CIRGenFunction::AutoVarEmission CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, mlir::OpBuilder::InsertPoint ip) { QualType Ty = D.getType(); - assert(!MissingFeatures::openCL()); assert( Ty.getAddressSpace() == LangAS::Default || (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL)); From 591a7e9dfa34ea045b0faf5e371d1175d35a5359 Mon Sep 17 00:00:00 2001 From: 7mile Date: Wed, 2 Oct 2024 06:39:45 +0800 Subject: [PATCH 1905/2301] [CIR][CodeGen] Add `nothrow` for functions in OpenCL languages (#903) Heterogeneous languages do not support exceptions, which corresponds to `nothrow` in ClangIR and `nounwind` in LLVM IR. This PR adds nothrow attributes for all functions for OpenCL languages in CIRGen. The Lowering for it is already supported previously. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 10 ++++++++ clang/test/CIR/CodeGen/OpenCL/convergent.cl | 5 ++-- clang/test/CIR/CodeGen/OpenCL/nothrow.cl | 26 +++++++++++++++++++++ 3 files changed, 38 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/OpenCL/nothrow.cl diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index c5b7e2a2edae..2deb709e38f4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1591,6 +1591,16 @@ static void getTrivialDefaultFunctionAttributes( auto convgt = mlir::cir::ConvergentAttr::get(CGM.getBuilder().getContext()); funcAttrs.set(convgt.getMnemonic(), convgt); } + + // TODO: NoThrow attribute should be added for other GPU modes CUDA, SYCL, + // HIP, OpenMP offload. + // AFAIK, neither of them support exceptions in device code. + if ((langOpts.CUDA && langOpts.CUDAIsDevice) || langOpts.SYCLIsDevice) + llvm_unreachable("NYI"); + if (langOpts.OpenCL) { + auto noThrow = mlir::cir::NoThrowAttr::get(CGM.getBuilder().getContext()); + funcAttrs.set(noThrow.getMnemonic(), noThrow); + } } void CIRGenModule::getTrivialDefaultFunctionAttributes( diff --git a/clang/test/CIR/CodeGen/OpenCL/convergent.cl b/clang/test/CIR/CodeGen/OpenCL/convergent.cl index d953aa799307..a2d4a910004c 100644 --- a/clang/test/CIR/CodeGen/OpenCL/convergent.cl +++ b/clang/test/CIR/CodeGen/OpenCL/convergent.cl @@ -8,7 +8,6 @@ // CIR: #fn_attr[[CONV_NOINLINE_ATTR:[0-9]*]] = #cir // CIR-NEXT: #fn_attr[[CONV_DECL_ATTR:[0-9]*]] = #cir Date: Wed, 2 Oct 2024 06:41:38 +0800 Subject: [PATCH 1906/2301] [CIR][CodeGen] Set constant properly for global variables (#904) Fix #801 (the remaining `constant` part). Actually the missing stage is CIRGen. There are two places where `GV.setConstant` is called: * `buildGlobalVarDefinition` * `getOrCreateCIRGlobal` Therefore, the primary test `global-constant.c` contains a global definition and a global declaration with use, which should be enough to cover the two paths. A test for OpenCL `constant` qualified global is also added. Some existing testcases need tweaking to avoid failure of missing constant. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 9 +++--- clang/test/CIR/CodeGen/OpenCL/global.cl | 4 +++ .../CIR/CodeGen/cxx1z-inline-variables.cpp | 28 +++++++++---------- clang/test/CIR/CodeGen/global-constant.c | 16 +++++++++++ clang/test/CIR/CodeGen/globals.c | 2 +- clang/test/CIR/CodeGen/temporaries.cpp | 4 +-- 6 files changed, 41 insertions(+), 22 deletions(-) create mode 100644 clang/test/CIR/CodeGen/global-constant.c diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 76bad240e9c5..68e8e93e5b77 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -947,9 +947,8 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // FIXME: This code is overly simple and should be merged with other global // handling. GV.setAlignmentAttr(getSize(astCtx.getDeclAlign(D))); - // TODO(cir): - // GV->setConstant(isTypeConstant(D->getType(), false)); - // setLinkageForGV(GV, D); + GV.setConstant(isTypeConstant(D->getType(), false, false)); + // TODO(cir): setLinkageForGV(GV, D); if (D->getTLSKind()) { if (D->getTLSKind() == VarDecl::TLS_Dynamic) @@ -1278,8 +1277,8 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, emitter->finalize(GV); // TODO(cir): If it is safe to mark the global 'constant', do so now. - // GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor && - // isTypeConstant(D->getType(), true)); + GV.setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor && + isTypeConstant(D->getType(), true, true)); // If it is in a read-only section, mark it 'constant'. if (const SectionAttr *SA = D->getAttr()) diff --git a/clang/test/CIR/CodeGen/OpenCL/global.cl b/clang/test/CIR/CodeGen/OpenCL/global.cl index cab7378fd102..3ec7ee36fd80 100644 --- a/clang/test/CIR/CodeGen/OpenCL/global.cl +++ b/clang/test/CIR/CodeGen/OpenCL/global.cl @@ -11,6 +11,10 @@ global int b = 15; // CIR-DAG: cir.global external addrspace(offload_global) @b = #cir.int<15> : !s32i // LLVM-DAG: @b = addrspace(1) global i32 15 +constant int c[2] = {18, 21}; +// CIR-DAG: cir.global constant {{.*}}addrspace(offload_constant) {{.*}}@c +// LLVM-DAG: @c = addrspace(2) constant + kernel void test_get_global() { a = b; // CIR: %[[#ADDRB:]] = cir.get_global @b : !cir.ptr diff --git a/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp b/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp index 68cddd578767..8da371e6abe4 100644 --- a/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp +++ b/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp @@ -26,13 +26,13 @@ const int &compat_use_after_redecl1 = compat::c; const int &compat_use_after_redecl2 = compat::d; const int &compat_use_after_redecl3 = compat::g; -// CIR: cir.global weak_odr comdat @_ZN6compat1bE = #cir.int<2> : !s32i {alignment = 4 : i64} -// CIR: cir.global weak_odr comdat @_ZN6compat1aE = #cir.int<1> : !s32i {alignment = 4 : i64} -// CIR: cir.global weak_odr comdat @_ZN6compat1cE = #cir.int<3> : !s32i {alignment = 4 : i64} -// CIR: cir.global external @_ZN6compat1eE = #cir.int<5> : !s32i {alignment = 4 : i64} -// CIR: cir.global weak_odr comdat @_ZN6compat1fE = #cir.int<6> : !s32i {alignment = 4 : i64} -// CIR: cir.global linkonce_odr comdat @_ZN6compat1dE = #cir.int<4> : !s32i {alignment = 4 : i64} -// CIR: cir.global linkonce_odr comdat @_ZN6compat1gE = #cir.int<7> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant weak_odr comdat @_ZN6compat1bE = #cir.int<2> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant weak_odr comdat @_ZN6compat1aE = #cir.int<1> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant weak_odr comdat @_ZN6compat1cE = #cir.int<3> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant external @_ZN6compat1eE = #cir.int<5> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant weak_odr comdat @_ZN6compat1fE = #cir.int<6> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant linkonce_odr comdat @_ZN6compat1dE = #cir.int<4> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant linkonce_odr comdat @_ZN6compat1gE = #cir.int<7> : !s32i {alignment = 4 : i64} // LLVM: $_ZN6compat1bE = comdat any // LLVM: $_ZN6compat1aE = comdat any @@ -41,10 +41,10 @@ const int &compat_use_after_redecl3 = compat::g; // LLVM: $_ZN6compat1dE = comdat any // LLVM: $_ZN6compat1gE = comdat any -// LLVM: @_ZN6compat1bE = weak_odr global i32 2, comdat, align 4 -// LLVM: @_ZN6compat1aE = weak_odr global i32 1, comdat, align 4 -// LLVM: @_ZN6compat1cE = weak_odr global i32 3, comdat, align 4 -// LLVM: @_ZN6compat1eE = global i32 5, align 4 -// LLVM: @_ZN6compat1fE = weak_odr global i32 6, comdat, align 4 -// LLVM: @_ZN6compat1dE = linkonce_odr global i32 4, comdat, align 4 -// LLVM: @_ZN6compat1gE = linkonce_odr global i32 7, comdat, align 4 +// LLVM: @_ZN6compat1bE = weak_odr constant i32 2, comdat, align 4 +// LLVM: @_ZN6compat1aE = weak_odr constant i32 1, comdat, align 4 +// LLVM: @_ZN6compat1cE = weak_odr constant i32 3, comdat, align 4 +// LLVM: @_ZN6compat1eE = constant i32 5, align 4 +// LLVM: @_ZN6compat1fE = weak_odr constant i32 6, comdat, align 4 +// LLVM: @_ZN6compat1dE = linkonce_odr constant i32 4, comdat, align 4 +// LLVM: @_ZN6compat1gE = linkonce_odr constant i32 7, comdat, align 4 diff --git a/clang/test/CIR/CodeGen/global-constant.c b/clang/test/CIR/CodeGen/global-constant.c new file mode 100644 index 000000000000..4301fcee7a7a --- /dev/null +++ b/clang/test/CIR/CodeGen/global-constant.c @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +const int global_no_use = 12; +// CIR: cir.global constant {{.*}}@global_no_use +// LLVM: @global_no_use = constant + +const float global_used = 1.2f; +// CIR: cir.global constant {{.*}}@global_used +// LLVM: @global_used = constant + +float const * get_float_ptr() { + return &global_used; +} diff --git a/clang/test/CIR/CodeGen/globals.c b/clang/test/CIR/CodeGen/globals.c index 48a4db18bb63..8ed3b54cf0ba 100644 --- a/clang/test/CIR/CodeGen/globals.c +++ b/clang/test/CIR/CodeGen/globals.c @@ -92,7 +92,7 @@ struct Glob { } glob; double *const glob_ptr = &glob.b[1]; -// CHECK: cir.global external @glob_ptr = #cir.global_view<@glob, [2 : i32, 1 : i32]> : !cir.ptr +// CHECK: cir.global constant external @glob_ptr = #cir.global_view<@glob, [2 : i32, 1 : i32]> : !cir.ptr // TODO: test tentatives with internal linkage. diff --git a/clang/test/CIR/CodeGen/temporaries.cpp b/clang/test/CIR/CodeGen/temporaries.cpp index 1dafb75e8a70..23e0adb70b2d 100644 --- a/clang/test/CIR/CodeGen/temporaries.cpp +++ b/clang/test/CIR/CodeGen/temporaries.cpp @@ -32,8 +32,8 @@ const unsigned int n = 1234; const int &r = (const int&)n; // CHECK: cir.global "private" constant internal @_ZGR1r_ = #cir.int<1234> : !s32i -// CHECK-NEXT: cir.global external @r = #cir.global_view<@_ZGR1r_> : !cir.ptr {alignment = 8 : i64} +// CHECK-NEXT: cir.global constant external @r = #cir.global_view<@_ZGR1r_> : !cir.ptr {alignment = 8 : i64} // LLVM: @_ZGR1r_ = internal constant i32 1234, align 4 -// LLVM-NEXT: @r = global ptr @_ZGR1r_, align 8 +// LLVM-NEXT: @r = constant ptr @_ZGR1r_, align 8 From 20c01aabcdc48701b00675e2b894aed9fc8502d8 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Wed, 2 Oct 2024 14:24:27 -0400 Subject: [PATCH 1907/2301] [CIR][Test][NFC] Organize CIR CodeGen AArch64 neon tests (#910) as title. --------- Co-authored-by: Guojin He --- .../neon-arith.c} | 3 + .../neon-ldst.c} | 3 + .../neon-misc.c} | 213 +++++++++++++++- .../neon.c} | 238 +++++++++++------- .../CIR/CodeGen/aarch64-neon-simd-shift.c | 69 ----- clang/test/CIR/CodeGen/aarch64-neon-vget.c | 219 ---------------- clang/test/CIR/CodeGen/aarch64-neon-vqadd.c | 179 ------------- 7 files changed, 355 insertions(+), 569 deletions(-) rename clang/test/CIR/CodeGen/{arm-neon-directed-rounding.c => AArch64/neon-arith.c} (98%) rename clang/test/CIR/CodeGen/{aarch64-neon-ldst.c => AArch64/neon-ldst.c} (99%) rename clang/test/CIR/CodeGen/{aarch64-neon-vset.c => AArch64/neon-misc.c} (55%) rename clang/test/CIR/CodeGen/{aarch64-neon-intrinsics.c => AArch64/neon.c} (99%) delete mode 100644 clang/test/CIR/CodeGen/aarch64-neon-simd-shift.c delete mode 100644 clang/test/CIR/CodeGen/aarch64-neon-vget.c delete mode 100644 clang/test/CIR/CodeGen/aarch64-neon-vqadd.c diff --git a/clang/test/CIR/CodeGen/arm-neon-directed-rounding.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c similarity index 98% rename from clang/test/CIR/CodeGen/arm-neon-directed-rounding.c rename to clang/test/CIR/CodeGen/AArch64/neon-arith.c index 92b4a9298eac..192486579143 100644 --- a/clang/test/CIR/CodeGen/arm-neon-directed-rounding.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -8,6 +8,9 @@ // REQUIRES: aarch64-registered-target || arm-registered-target #include +// This test file contains tests for aarch64 NEON arithmetic intrinsics +// that are not vector type related. + float32_t test_vrndns_f32(float32_t a) { return vrndns_f32(a); } diff --git a/clang/test/CIR/CodeGen/aarch64-neon-ldst.c b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c similarity index 99% rename from clang/test/CIR/CodeGen/aarch64-neon-ldst.c rename to clang/test/CIR/CodeGen/AArch64/neon-ldst.c index 9b6ed9ee479c..d112f3a81808 100644 --- a/clang/test/CIR/CodeGen/aarch64-neon-ldst.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c @@ -6,6 +6,9 @@ // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s // REQUIRES: aarch64-registered-target || arm-registered-target + +// This test file contains tests for the AArch64 NEON load/store intrinsics. + #include int8x8_t test_vld1_lane_s8(int8_t const * ptr, int8x8_t src) { diff --git a/clang/test/CIR/CodeGen/aarch64-neon-vset.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c similarity index 55% rename from clang/test/CIR/CodeGen/aarch64-neon-vset.c rename to clang/test/CIR/CodeGen/AArch64/neon-misc.c index 5da779ff69eb..0c20576e62d8 100644 --- a/clang/test/CIR/CodeGen/aarch64-neon-vset.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -5,13 +5,8 @@ // RUN: -emit-llvm -target-feature +neon %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s -// This test file is similar to but not the same as -// clang/test/CodeGen/aarch64-neon-vget.c -// The difference is that this file only tests uses vset intrinsics, as we feel -// it would be proper to have a separate test file testing vget intrinsics -// with the file name aarch64-neon-vget.c -// Also, for each integer type, we only test signed or unsigned, not both. -// This is because integer types of the same size just use same intrinsic. +// This test file contains tests of AArch64 NEON intrinsics +// that are not covered by other tests. // REQUIRES: aarch64-registered-target || arm-registered-target #include @@ -236,3 +231,207 @@ float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) { // LLVM: [[INTRN_ARG1:%.*]] = load <4 x float>, ptr [[S1]], align 16 // LLVM: [[INTRN_RES:%.*]] = insertelement <4 x float> [[INTRN_ARG1]], float [[INTRN_ARG0]], i32 3 // LLVM: ret <4 x float> {{%.*}} + +uint8_t test_vget_lane_u8(uint8x8_t a) { + return vget_lane_u8(a, 7); +} + +// CIR-LABEL: test_vget_lane_u8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i8 @test_vget_lane_u8(<8 x i8> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i8>, i64 1, align 8 +// LLVM: store <8 x i8> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <8 x i8>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <8 x i8> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <8 x i8>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <8 x i8> [[INTRN_ARG]], i32 7 +// LLVM: ret i8 {{%.*}} + +uint8_t test_vgetq_lane_u8(uint8x16_t a) { + return vgetq_lane_u8(a, 15); +} + +// CIR-LABEL: test_vgetq_lane_u8 +// CIR: [[IDX:%.*]] = cir.const #cir.int<15> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i8 @test_vgetq_lane_u8(<16 x i8> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <16 x i8>, i64 1, align 16 +// LLVM: store <16 x i8> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <16 x i8>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <16 x i8> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <16 x i8>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <16 x i8> [[INTRN_ARG]], i32 15 +// LLVM: ret i8 {{%.*}} + +uint16_t test_vget_lane_u16(uint16x4_t a) { + return vget_lane_u16(a, 3); +} + +// CIR-LABEL: test_vget_lane_u16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i16 @test_vget_lane_u16(<4 x i16> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i16>, i64 1, align 8 +// LLVM: store <4 x i16> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <4 x i16>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <4 x i16> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <4 x i16>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <4 x i16> [[INTRN_ARG]], i32 3 +// LLVM: ret i16 {{%.*}} + +uint16_t test_vgetq_lane_u16(uint16x8_t a) { + return vgetq_lane_u16(a, 7); +} + +// CIR-LABEL: test_vgetq_lane_u16 +// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i16 @test_vgetq_lane_u16(<8 x i16> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i16>, i64 1, align 16 +// LLVM: store <8 x i16> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <8 x i16>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <8 x i16> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <8 x i16>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <8 x i16> [[INTRN_ARG]], i32 7 +// LLVM: ret i16 {{%.*}} + +uint32_t test_vget_lane_u32(uint32x2_t a) { + return vget_lane_u32(a, 1); +} + +// CIR-LABEL: test_vget_lane_u32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i32 @test_vget_lane_u32(<2 x i32> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i32>, i64 1, align 8 +// LLVM: store <2 x i32> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <2 x i32>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <2 x i32> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x i32>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <2 x i32> [[INTRN_ARG]], i32 1 +// LLVM: ret i32 {{%.*}} + +uint32_t test_vgetq_lane_u32(uint32x4_t a) { + return vgetq_lane_u32(a, 3); +} + +// CIR-LABEL: test_vgetq_lane_u32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i32 @test_vgetq_lane_u32(<4 x i32> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: store <4 x i32> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <4 x i32>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <4 x i32> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <4 x i32>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <4 x i32> [[INTRN_ARG]], i32 3 +// LLVM: ret i32 {{%.*}} + +uint64_t test_vget_lane_u64(uint64x1_t a) { + return vget_lane_u64(a, 0); +} + +// CIR-LABEL: test_vget_lane_u64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i64 @test_vget_lane_u64(<1 x i64> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <1 x i64>, i64 1, align 8 +// LLVM: store <1 x i64> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <1 x i64>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <1 x i64> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <1 x i64>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <1 x i64> [[INTRN_ARG]], i32 0 +// LLVM: ret i64 {{%.*}} + +uint64_t test_vgetq_lane_u64(uint64x2_t a) { + return vgetq_lane_u64(a, 1); +} + +// CIR-LABEL: test_vgetq_lane_u64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local i64 @test_vgetq_lane_u64(<2 x i64> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i64>, i64 1, align 16 +// LLVM: store <2 x i64> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <2 x i64>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <2 x i64> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x i64>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <2 x i64> [[INTRN_ARG]], i32 1 +// LLVM: ret i64 {{%.*}} + +float32_t test_vget_lane_f32(float32x2_t a) { + return vget_lane_f32(a, 1); +} + +// CIR-LABEL: test_vget_lane_f32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local float @test_vget_lane_f32(<2 x float> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x float>, i64 1, align 8 +// LLVM: store <2 x float> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <2 x float>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <2 x float> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x float>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <2 x float> [[INTRN_ARG]], i32 1 +// LLVM: ret float {{%.*}} + +float64_t test_vget_lane_f64(float64x1_t a) { + return vget_lane_f64(a, 0); +} + +// CIR-LABEL: test_vget_lane_f64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local double @test_vget_lane_f64(<1 x double> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <1 x double>, i64 1, align 8 +// LLVM: store <1 x double> [[ARG]], ptr [[ARG_SAVE]], align 8 +// LLVM: [[TMP:%.*]] = load <1 x double>, ptr [[ARG_SAVE:%.*]], align 8 +// LLVM: store <1 x double> [[TMP]], ptr [[S0:%.*]], align 8 +// LLVM: [[INTRN_ARG:%.*]] = load <1 x double>, ptr [[S0]], align 8 +// LLVM: {{%.*}} = extractelement <1 x double> [[INTRN_ARG]], i32 0 +// LLVM: ret double {{%.*}} + +float32_t test_vgetq_lane_f32(float32x4_t a) { + return vgetq_lane_f32(a, 3); +} + +// CIR-LABEL: test_vgetq_lane_f32 +// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local float @test_vgetq_lane_f32(<4 x float> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x float>, i64 1, align 16 +// LLVM: store <4 x float> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <4 x float>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <4 x float> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <4 x float>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <4 x float> [[INTRN_ARG]], i32 3 +// LLVM: ret float {{%.*}} + +float64_t test_vgetq_lane_f64(float64x2_t a) { + return vgetq_lane_f64(a, 1); +} + +// CIR-LABEL: test_vgetq_lane_f64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: define dso_local double @test_vgetq_lane_f64(<2 x double> [[ARG:%.*]]) +// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x double>, i64 1, align 16 +// LLVM: store <2 x double> [[ARG]], ptr [[ARG_SAVE]], align 16 +// LLVM: [[TMP:%.*]] = load <2 x double>, ptr [[ARG_SAVE:%.*]], align 16 +// LLVM: store <2 x double> [[TMP]], ptr [[S0:%.*]], align 16 +// LLVM: [[INTRN_ARG:%.*]] = load <2 x double>, ptr [[S0]], align 16 +// LLVM: {{%.*}} = extractelement <2 x double> [[INTRN_ARG]], i32 1 +// LLVM: ret double {{%.*}} diff --git a/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c b/clang/test/CIR/CodeGen/AArch64/neon.c similarity index 99% rename from clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c rename to clang/test/CIR/CodeGen/AArch64/neon.c index 02aa70a4d628..54520e688a59 100644 --- a/clang/test/CIR/CodeGen/aarch64-neon-intrinsics.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -2839,79 +2839,103 @@ // return vrhaddq_u32(v1, v2); // } -// NYI-LABEL: @test_vqadd_s8( -// NYI: [[VQADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i8> [[VQADD_V_I]] -// int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { -// return vqadd_s8(a, b); -// } -// NYI-LABEL: @test_vqadd_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VQADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: [[VQADD_V3_I:%.*]] = bitcast <4 x i16> [[VQADD_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VQADD_V2_I]] -// int16x4_t test_vqadd_s16(int16x4_t a, int16x4_t b) { -// return vqadd_s16(a, b); -// } +int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { + return vqadd_s8(a, b); + // CIR-LABEL: vqadd_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vqadd_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VQADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: [[VQADD_V3_I:%.*]] = bitcast <2 x i32> [[VQADD_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VQADD_V2_I]] -// int32x2_t test_vqadd_s32(int32x2_t a, int32x2_t b) { -// return vqadd_s32(a, b); -// } - -// NYI-LABEL: @test_vqadd_s64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> -// NYI: [[VQADD_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqadd.v1i64(<1 x i64> %a, <1 x i64> %b) -// NYI: [[VQADD_V3_I:%.*]] = bitcast <1 x i64> [[VQADD_V2_I]] to <8 x i8> -// NYI: ret <1 x i64> [[VQADD_V2_I]] -// int64x1_t test_vqadd_s64(int64x1_t a, int64x1_t b) { -// return vqadd_s64(a, b); -// } - -// NYI-LABEL: @test_vqadd_u8( -// NYI: [[VQADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i8> [[VQADD_V_I]] -// uint8x8_t test_vqadd_u8(uint8x8_t a, uint8x8_t b) { -// return vqadd_u8(a, b); -// } - -// NYI-LABEL: @test_vqadd_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VQADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: [[VQADD_V3_I:%.*]] = bitcast <4 x i16> [[VQADD_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VQADD_V2_I]] -// uint16x4_t test_vqadd_u16(uint16x4_t a, uint16x4_t b) { -// return vqadd_u16(a, b); -// } - -// NYI-LABEL: @test_vqadd_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VQADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: [[VQADD_V3_I:%.*]] = bitcast <2 x i32> [[VQADD_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VQADD_V2_I]] -// uint32x2_t test_vqadd_u32(uint32x2_t a, uint32x2_t b) { -// return vqadd_u32(a, b); -// } + // LLVM-LABEL: @test_vqadd_s8( + // LLVM: [[VQADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> %0, <8 x i8> %1) + // LLVM: ret <8 x i8> [[VQADD_V_I]] +} -// NYI-LABEL: @test_vqadd_u64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> -// NYI: [[VQADD_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uqadd.v1i64(<1 x i64> %a, <1 x i64> %b) -// NYI: [[VQADD_V3_I:%.*]] = bitcast <1 x i64> [[VQADD_V2_I]] to <8 x i8> -// NYI: ret <1 x i64> [[VQADD_V2_I]] -// uint64x1_t test_vqadd_u64(uint64x1_t a, uint64x1_t b) { -// return vqadd_u64(a, b); -// } + int16x4_t test_vqadd_s16(int16x4_t a, int16x4_t b) { + return vqadd_s16(a, b); + // CIR-LABEL: vqadd_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vqadd_s16( + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> %1 to <8 x i8> + // LLVM: [[VQADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> %0, <4 x i16> %1) + // LLVM: [[VQADD_V3_I:%.*]] = bitcast <4 x i16> [[VQADD_V2_I]] to <8 x i8> + // LLVM: ret <4 x i16> [[VQADD_V2_I]] + } + + int32x2_t test_vqadd_s32(int32x2_t a, int32x2_t b) { + return vqadd_s32(a, b); + // CIR-LABEL: vqadd_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vqadd_s32( + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> %1 to <8 x i8> + // LLVM: [[VQADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> %0, <2 x i32> %1) + // LLVM: [[VQADD_V3_I:%.*]] = bitcast <2 x i32> [[VQADD_V2_I]] to <8 x i8> + // LLVM: ret <2 x i32> [[VQADD_V2_I]] + } + + int64x1_t test_vqadd_s64(int64x1_t a, int64x1_t b) { + return vqadd_s64(a, b); + // CIR-LABEL: vqadd_s64 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vqadd_s64( + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> %1 to <8 x i8> + // LLVM: [[VQADD_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqadd.v1i64(<1 x i64> %0, <1 x i64> %1) + // LLVM: [[VQADD_V3_I:%.*]] = bitcast <1 x i64> [[VQADD_V2_I]] to <8 x i8> + // LLVM: ret <1 x i64> [[VQADD_V2_I]] + } + + uint8x8_t test_vqadd_u8(uint8x8_t a, uint8x8_t b) { + return vqadd_u8(a, b); + // CIR-LABEL: vqadd_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vqadd_u8( + // LLVM: [[VQADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> %0, <8 x i8> %1) + // LLVM: ret <8 x i8> [[VQADD_V_I]] + } + + uint16x4_t test_vqadd_u16(uint16x4_t a, uint16x4_t b) { + return vqadd_u16(a, b); + // CIR-LABEL: vqadd_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vqadd_u16( + // LLVM: [[VQADD_V_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> %0, <4 x i16> %1) + // LLVM: ret <4 x i16> [[VQADD_V_I]] + } + + uint32x2_t test_vqadd_u32(uint32x2_t a, uint32x2_t b) { + return vqadd_u32(a, b); + // CIR-LABEL: vqadd_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vqadd_u32( + // LLVM: [[VQADD_V_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> %0, <2 x i32> %1) + // LLVM: ret <2 x i32> [[VQADD_V_I]] + } + + uint64x1_t test_vqadd_u64(uint64x1_t a, uint64x1_t b) { + return vqadd_u64(a, b); + // CIR-LABEL: vqadd_u64 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vqadd_u64( + // LLVM: [[VQADD_V_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uqadd.v1i64(<1 x i64> %0, <1 x i64> %1) + // LLVM: ret <1 x i64> [[VQADD_V_I]] + } // NYI-LABEL: @test_vqaddq_s8( // NYI: [[VQADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8> %a, <16 x i8> %b) @@ -5972,32 +5996,56 @@ // return vrshrn_high_n_u64(a, b, 19); // } -// NYI-LABEL: @test_vqrshrun_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VQRSHRUN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> [[VQRSHRUN_N]], i32 3) -// NYI: ret <8 x i8> [[VQRSHRUN_N1]] -// uint8x8_t test_vqrshrun_n_s16(int16x8_t a) { -// return vqrshrun_n_s16(a, 3); -// } +uint8x8_t test_vqrshrun_n_s16(int16x8_t a) { + return vqrshrun_n_s16(a, 3); + // CIR-LABEL: test_vqrshrun_n_s16 + // CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<3> : !s32i + // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM-LABEL: @test_vqrshrun_n_s16( + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> {{%.*}} to <16 x i8> + // LLVM: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VQRSHRUN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> [[VQRSHRUN_N]], i32 3) + // LLVM: store <8 x i8> [[VQRSHRUN_N1]], ptr [[RET:%.*]], align 8 + // LLVM: [[RETVAL:%.*]] = load <8 x i8>, ptr [[RET]], align 8 + // LLVM: ret <8 x i8> [[RETVAL]] +} -// NYI-LABEL: @test_vqrshrun_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VQRSHRUN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> [[VQRSHRUN_N]], i32 9) -// NYI: ret <4 x i16> [[VQRSHRUN_N1]] -// uint16x4_t test_vqrshrun_n_s32(int32x4_t a) { -// return vqrshrun_n_s32(a, 9); -// } +uint16x4_t test_vqrshrun_n_s32(int32x4_t a) { + return vqrshrun_n_s32(a, 9); + // CIR-LABEL: test_vqrshrun_n_s32 + // CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<9> : !s32i + // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM-LABEL: @test_vqrshrun_n_s32( + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> {{%.*}} to <16 x i8> + // LLVM: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VQRSHRUN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> [[VQRSHRUN_N]], i32 9) + // LLVM: store <4 x i16> [[VQRSHRUN_N1]], ptr [[RET:%.*]], align 8 + // LLVM: [[RETVAL:%.*]] = load <4 x i16>, ptr [[RET]], align 8 + // LLVM: ret <4 x i16> [[RETVAL]] +} -// NYI-LABEL: @test_vqrshrun_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VQRSHRUN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> [[VQRSHRUN_N]], i32 19) -// NYI: ret <2 x i32> [[VQRSHRUN_N1]] -// uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { -// return vqrshrun_n_s64(a, 19); -// } +uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { + return vqrshrun_n_s64(a, 19); + // CIR-LABEL: test_vqrshrun_n_s64 + // CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<19> : !s32i + // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM-LABEL: @test_vqrshrun_n_s64( + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> {{%.*}} to <16 x i8> + // LLVM: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VQRSHRUN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> [[VQRSHRUN_N]], i32 19) + // LLVM: store <2 x i32> [[VQRSHRUN_N1]], ptr [[RET:%.*]], align 8 + // LLVM: [[RETVAL:%.*]] = load <2 x i32>, ptr [[RET]], align 8 + // LLVM: ret <2 x i32> [[RETVAL]] +} // NYI-LABEL: @test_vqrshrun_high_n_s16( // NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> @@ -6041,7 +6089,7 @@ // NYI-LABEL: @test_vqshrn_n_s32( // NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> // NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VQSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> [[VQSHRN_N]], i32 9) +// NYI: [[VQSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> [[VQSHRN_N]], i32 19) // NYI: ret <4 x i16> [[VQSHRN_N1]] // int16x4_t test_vqshrn_n_s32(int32x4_t a) { // return vqshrn_n_s32(a, 9); diff --git a/clang/test/CIR/CodeGen/aarch64-neon-simd-shift.c b/clang/test/CIR/CodeGen/aarch64-neon-simd-shift.c deleted file mode 100644 index 8619ad0c78d6..000000000000 --- a/clang/test/CIR/CodeGen/aarch64-neon-simd-shift.c +++ /dev/null @@ -1,69 +0,0 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -ffreestanding -emit-cir -target-feature +neon %s -o %t.cir -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -ffreestanding -emit-llvm -target-feature +neon %s -o %t.ll -// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s - -// REQUIRES: aarch64-registered-target || arm-registered-target -#include - -uint8x8_t test_vqrshrun_n_s16(int16x8_t a) { - return vqrshrun_n_s16(a, 3); -} - -// CIR-LABEL: test_vqrshrun_n_s16 -// CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<3> : !s32i -// CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : -// CIR-SAME: (!cir.vector, !s32i) -> !cir.vector - -// LLVM: {{.*}}test_vqrshrun_n_s16(<8 x i16>{{.*}} [[A:%.*]]) -// LLVM: store <8 x i16> [[A]], ptr [[A_ADDR:%.*]], align 16 -// LLVM: [[A_VAL:%.*]] = load <8 x i16>, ptr [[A_ADDR]], align 16 -// LLVM: store <8 x i16> [[A_VAL]], ptr [[S0:%.*]], align 16 -// LLVM: [[S0_VAL:%.*]] = load <8 x i16>, ptr [[S0]], align 16 -// LLVM: [[S0_VAL_CAST:%.*]] = bitcast <8 x i16> [[S0_VAL]] to <16 x i8> -// LLVM: [[INTRN_ARG:%.*]] = bitcast <16 x i8> [[S0_VAL_CAST]] to <8 x i16> -// LLVM: {{%.*}} = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> [[INTRN_ARG]], i32 3) -// LLVM: ret <8 x i8> {{%.*}} - -uint16x4_t test_vqrshrun_n_s32(int32x4_t a) { - return vqrshrun_n_s32(a, 7); -} - -// CIR-LABEL: test_vqrshrun_n_s32 -// CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<7> : !s32i -// CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : -// CIR-SAME: (!cir.vector, !s32i) -> !cir.vector - -// LLVM: {{.*}}test_vqrshrun_n_s32(<4 x i32>{{.*}} [[A:%.*]]) -// LLVM: store <4 x i32> [[A]], ptr [[A_ADDR:%.*]], align 16 -// LLVM: [[A_VAL:%.*]] = load <4 x i32>, ptr [[A_ADDR]], align 16 -// LLVM: store <4 x i32> [[A_VAL]], ptr [[S0:%.*]], align 16 -// LLVM: [[S0_VAL:%.*]] = load <4 x i32>, ptr [[S0]], align 16 -// LLVM: [[S0_VAL_CAST:%.*]] = bitcast <4 x i32> [[S0_VAL]] to <16 x i8> -// LLVM: [[INTRN_ARG:%.*]] = bitcast <16 x i8> [[S0_VAL_CAST]] to <4 x i32> -// LLVM: {{%.*}} = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> [[INTRN_ARG]], i32 7) -// LLVM: ret <4 x i16> {{%.*}} - -uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { - return vqrshrun_n_s64(a, 15); -} - -// CIR-LABEL: test_vqrshrun_n_s64 -// CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<15> : !s32i -// CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : -// CIR-SAME: (!cir.vector, !s32i) -> !cir.vector - -// LLVM: {{.*}}test_vqrshrun_n_s64(<2 x i64>{{.*}} [[A:%.*]]) -// LLVM: store <2 x i64> [[A]], ptr [[A_ADDR:%.*]], align 16 -// LLVM: [[A_VAL:%.*]] = load <2 x i64>, ptr [[A_ADDR]], align 16 -// LLVM: store <2 x i64> [[A_VAL]], ptr [[S0:%.*]], align 16 -// LLVM: [[S0_VAL:%.*]] = load <2 x i64>, ptr [[S0]], align 16 -// LLVM: [[S0_VAL_CAST:%.*]] = bitcast <2 x i64> [[S0_VAL]] to <16 x i8> -// LLVM: [[INTRN_ARG:%.*]] = bitcast <16 x i8> [[S0_VAL_CAST]] to <2 x i64> -// LLVM: {{%.*}} = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> [[INTRN_ARG]], i32 15) -// LLVM: ret <2 x i32> {{%.*}} diff --git a/clang/test/CIR/CodeGen/aarch64-neon-vget.c b/clang/test/CIR/CodeGen/aarch64-neon-vget.c deleted file mode 100644 index b16648691d1b..000000000000 --- a/clang/test/CIR/CodeGen/aarch64-neon-vget.c +++ /dev/null @@ -1,219 +0,0 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -emit-cir -target-feature +neon %s -o %t.cir -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -emit-llvm -target-feature +neon %s -o %t.ll -// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s - -// This test file contains test cases to those of -// clang/test/CodeGen/aarch64-neon-vget.c -// The difference is that this file only tests uses vget intrinsics, as we feel -// it would be proper to have a separate test file testing vset intrinsics -// with the file name aarch64-neon-vset.c - -// REQUIRES: aarch64-registered-target || arm-registered-target -#include - -uint8_t test_vget_lane_u8(uint8x8_t a) { - return vget_lane_u8(a, 7); -} - -// CIR-LABEL: test_vget_lane_u8 -// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local i8 @test_vget_lane_u8(<8 x i8> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i8>, i64 1, align 8 -// LLVM: store <8 x i8> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <8 x i8>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <8 x i8> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <8 x i8>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <8 x i8> [[INTRN_ARG]], i32 7 -// LLVM: ret i8 {{%.*}} - -uint8_t test_vgetq_lane_u8(uint8x16_t a) { - return vgetq_lane_u8(a, 15); -} - -// CIR-LABEL: test_vgetq_lane_u8 -// CIR: [[IDX:%.*]] = cir.const #cir.int<15> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local i8 @test_vgetq_lane_u8(<16 x i8> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <16 x i8>, i64 1, align 16 -// LLVM: store <16 x i8> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <16 x i8>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <16 x i8> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <16 x i8>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <16 x i8> [[INTRN_ARG]], i32 15 -// LLVM: ret i8 {{%.*}} - -uint16_t test_vget_lane_u16(uint16x4_t a) { - return vget_lane_u16(a, 3); -} - -// CIR-LABEL: test_vget_lane_u16 -// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local i16 @test_vget_lane_u16(<4 x i16> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i16>, i64 1, align 8 -// LLVM: store <4 x i16> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <4 x i16>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <4 x i16> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <4 x i16>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <4 x i16> [[INTRN_ARG]], i32 3 -// LLVM: ret i16 {{%.*}} - -uint16_t test_vgetq_lane_u16(uint16x8_t a) { - return vgetq_lane_u16(a, 7); -} - -// CIR-LABEL: test_vgetq_lane_u16 -// CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local i16 @test_vgetq_lane_u16(<8 x i16> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i16>, i64 1, align 16 -// LLVM: store <8 x i16> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <8 x i16>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <8 x i16> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <8 x i16>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <8 x i16> [[INTRN_ARG]], i32 7 -// LLVM: ret i16 {{%.*}} - -uint32_t test_vget_lane_u32(uint32x2_t a) { - return vget_lane_u32(a, 1); -} - -// CIR-LABEL: test_vget_lane_u32 -// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local i32 @test_vget_lane_u32(<2 x i32> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i32>, i64 1, align 8 -// LLVM: store <2 x i32> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <2 x i32>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <2 x i32> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <2 x i32>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <2 x i32> [[INTRN_ARG]], i32 1 -// LLVM: ret i32 {{%.*}} - -uint32_t test_vgetq_lane_u32(uint32x4_t a) { - return vgetq_lane_u32(a, 3); -} - -// CIR-LABEL: test_vgetq_lane_u32 -// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local i32 @test_vgetq_lane_u32(<4 x i32> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i32>, i64 1, align 16 -// LLVM: store <4 x i32> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <4 x i32>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <4 x i32> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <4 x i32>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <4 x i32> [[INTRN_ARG]], i32 3 -// LLVM: ret i32 {{%.*}} - -uint64_t test_vget_lane_u64(uint64x1_t a) { - return vget_lane_u64(a, 0); -} - -// CIR-LABEL: test_vget_lane_u64 -// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local i64 @test_vget_lane_u64(<1 x i64> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <1 x i64>, i64 1, align 8 -// LLVM: store <1 x i64> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <1 x i64>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <1 x i64> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <1 x i64>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <1 x i64> [[INTRN_ARG]], i32 0 -// LLVM: ret i64 {{%.*}} - -uint64_t test_vgetq_lane_u64(uint64x2_t a) { - return vgetq_lane_u64(a, 1); -} - -// CIR-LABEL: test_vgetq_lane_u64 -// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local i64 @test_vgetq_lane_u64(<2 x i64> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i64>, i64 1, align 16 -// LLVM: store <2 x i64> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <2 x i64>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <2 x i64> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <2 x i64>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <2 x i64> [[INTRN_ARG]], i32 1 -// LLVM: ret i64 {{%.*}} - -float32_t test_vget_lane_f32(float32x2_t a) { - return vget_lane_f32(a, 1); -} - -// CIR-LABEL: test_vget_lane_f32 -// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local float @test_vget_lane_f32(<2 x float> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x float>, i64 1, align 8 -// LLVM: store <2 x float> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <2 x float>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <2 x float> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <2 x float>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <2 x float> [[INTRN_ARG]], i32 1 -// LLVM: ret float {{%.*}} - -float64_t test_vget_lane_f64(float64x1_t a) { - return vget_lane_f64(a, 0); -} - -// CIR-LABEL: test_vget_lane_f64 -// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local double @test_vget_lane_f64(<1 x double> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <1 x double>, i64 1, align 8 -// LLVM: store <1 x double> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <1 x double>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <1 x double> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <1 x double>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <1 x double> [[INTRN_ARG]], i32 0 -// LLVM: ret double {{%.*}} - -float32_t test_vgetq_lane_f32(float32x4_t a) { - return vgetq_lane_f32(a, 3); -} - -// CIR-LABEL: test_vgetq_lane_f32 -// CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local float @test_vgetq_lane_f32(<4 x float> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x float>, i64 1, align 16 -// LLVM: store <4 x float> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <4 x float>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <4 x float> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <4 x float>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <4 x float> [[INTRN_ARG]], i32 3 -// LLVM: ret float {{%.*}} - -float64_t test_vgetq_lane_f64(float64x2_t a) { - return vgetq_lane_f64(a, 1); -} - -// CIR-LABEL: test_vgetq_lane_f64 -// CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i -// CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector - -// LLVM: define dso_local double @test_vgetq_lane_f64(<2 x double> [[ARG:%.*]]) -// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x double>, i64 1, align 16 -// LLVM: store <2 x double> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <2 x double>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <2 x double> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <2 x double>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <2 x double> [[INTRN_ARG]], i32 1 -// LLVM: ret double {{%.*}} diff --git a/clang/test/CIR/CodeGen/aarch64-neon-vqadd.c b/clang/test/CIR/CodeGen/aarch64-neon-vqadd.c deleted file mode 100644 index 0932d95866c5..000000000000 --- a/clang/test/CIR/CodeGen/aarch64-neon-vqadd.c +++ /dev/null @@ -1,179 +0,0 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -emit-cir -target-feature +neon %s -o %t.cir -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -emit-llvm -target-feature +neon %s -o %t.ll -// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s - -// Tetsting normal situation of vdup lane intrinsics. - -// REQUIRES: aarch64-registered-target || arm-registered-target -#include - -uint8x8_t test_vqadd_u8(uint8x8_t a, uint8x8_t b) { - return vqadd_u8(a,b); -} - -// CIR-LABEL: vqadd_u8 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : -// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// CIR: cir.return - -// LLVM: {{.*}}test_vqadd_u8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]]) -// LLVM: store <8 x i8> [[A]], ptr [[A_ADDR:%.*]], align 8 -// LLVM: store <8 x i8> [[B]], ptr [[B_ADDR:%.*]], align 8 -// LLVM: [[TMP_A:%.*]] = load <8 x i8>, ptr [[A_ADDR]], align 8 -// LLVM: [[TMP_B:%.*]] = load <8 x i8>, ptr [[B_ADDR]], align 8 -// LLVM: store <8 x i8> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 -// LLVM: store <8 x i8> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 -// LLVM: [[INTRN_A:%.*]] = load <8 x i8>, ptr [[P0_ADDR]], align 8 -// LLVM: [[INTRN_B:%.*]] = load <8 x i8>, ptr [[P1_ADDR]], align 8 -// LLVM: {{%.*}} = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> [[INTRN_A]], <8 x i8> [[INTRN_B]]) -// LLVM: ret <8 x i8> - -int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { - return vqadd_s8(a,b); -} - -// CIR-LABEL: vqadd_s8 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : -// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// CIR: cir.return - -// LLVM: {{.*}}test_vqadd_s8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]]) -// LLVM: store <8 x i8> [[A]], ptr [[A_ADDR:%.*]], align 8 -// LLVM: store <8 x i8> [[B]], ptr [[B_ADDR:%.*]], align 8 -// LLVM: [[TMP_A:%.*]] = load <8 x i8>, ptr [[A_ADDR]], align 8 -// LLVM: [[TMP_B:%.*]] = load <8 x i8>, ptr [[B_ADDR]], align 8 -// LLVM: store <8 x i8> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 -// LLVM: store <8 x i8> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 -// LLVM: [[INTRN_A:%.*]] = load <8 x i8>, ptr [[P0_ADDR]], align 8 -// LLVM: [[INTRN_B:%.*]] = load <8 x i8>, ptr [[P1_ADDR]], align 8 -// LLVM: {{%.*}} = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> [[INTRN_A]], <8 x i8> [[INTRN_B]]) -// LLVM: ret <8 x i8> - -uint16x4_t test_vqadd_u16(uint16x4_t a, uint16x4_t b) { - return vqadd_u16(a,b); -} - -// CIR-LABEL: vqadd_u16 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : -// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// CIR: cir.return - -// LLVM: {{.*}}test_vqadd_u16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]]) -// LLVM: store <4 x i16> [[A]], ptr [[A_ADDR:%.*]], align 8 -// LLVM: store <4 x i16> [[B]], ptr [[B_ADDR:%.*]], align 8 -// LLVM: [[TMP_A:%.*]] = load <4 x i16>, ptr [[A_ADDR]], align 8 -// LLVM: [[TMP_B:%.*]] = load <4 x i16>, ptr [[B_ADDR]], align 8 -// LLVM: store <4 x i16> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 -// LLVM: store <4 x i16> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 -// LLVM: [[INTRN_A:%.*]] = load <4 x i16>, ptr [[P0_ADDR]], align 8 -// LLVM: [[INTRN_B:%.*]] = load <4 x i16>, ptr [[P1_ADDR]], align 8 -// LLVM: {{%.*}} = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> [[INTRN_A]], <4 x i16> [[INTRN_B]]) -// LLVM: ret <4 x i16> - -int16x4_t test_vqadd_s16(int16x4_t a, int16x4_t b) { - return vqadd_s16(a,b); -} - -// CIR-LABEL: vqadd_u16 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : -// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// CIR: cir.return - -// LLVM: {{.*}}test_vqadd_s16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]]) -// LLVM: store <4 x i16> [[A]], ptr [[A_ADDR:%.*]], align 8 -// LLVM: store <4 x i16> [[B]], ptr [[B_ADDR:%.*]], align 8 -// LLVM: [[TMP_A:%.*]] = load <4 x i16>, ptr [[A_ADDR]], align 8 -// LLVM: [[TMP_B:%.*]] = load <4 x i16>, ptr [[B_ADDR]], align 8 -// LLVM: store <4 x i16> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 -// LLVM: store <4 x i16> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 -// LLVM: [[INTRN_A:%.*]] = load <4 x i16>, ptr [[P0_ADDR]], align 8 -// LLVM: [[INTRN_B:%.*]] = load <4 x i16>, ptr [[P1_ADDR]], align 8 -// LLVM: {{%.*}} = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> [[INTRN_A]], <4 x i16> [[INTRN_B]]) -// LLVM: ret <4 x i16> - -uint32x2_t test_vqadd_u32(uint32x2_t a, uint32x2_t b) { - return vqadd_u32(a,b); -} - -// CIR-LABEL: vqadd_u32 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : -// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// CIR: cir.return - -// LLVM: {{.*}}test_vqadd_u32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]]) -// LLVM: store <2 x i32> [[A]], ptr [[A_ADDR:%.*]], align 8 -// LLVM: store <2 x i32> [[B]], ptr [[B_ADDR:%.*]], align 8 -// LLVM: [[TMP_A:%.*]] = load <2 x i32>, ptr [[A_ADDR]], align 8 -// LLVM: [[TMP_B:%.*]] = load <2 x i32>, ptr [[B_ADDR]], align 8 -// LLVM: store <2 x i32> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 -// LLVM: store <2 x i32> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 -// LLVM: [[INTRN_A:%.*]] = load <2 x i32>, ptr [[P0_ADDR]], align 8 -// LLVM: [[INTRN_B:%.*]] = load <2 x i32>, ptr [[P1_ADDR]], align 8 -// LLVM: {{%.*}} = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> [[INTRN_A]], <2 x i32> [[INTRN_B]]) -// LLVM: ret <2 x i32> - -int32x2_t test_vqadd_s32(int32x2_t a, int32x2_t b) { - return vqadd_s32(a,b); -} - -// CIR-LABEL: vqadd_s32 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : -// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// CIR: cir.return - -// LLVM: {{.*}}test_vqadd_s32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]]) -// LLVM: store <2 x i32> [[A]], ptr [[A_ADDR:%.*]], align 8 -// LLVM: store <2 x i32> [[B]], ptr [[B_ADDR:%.*]], align 8 -// LLVM: [[TMP_A:%.*]] = load <2 x i32>, ptr [[A_ADDR]], align 8 -// LLVM: [[TMP_B:%.*]] = load <2 x i32>, ptr [[B_ADDR]], align 8 -// LLVM: store <2 x i32> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 -// LLVM: store <2 x i32> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 -// LLVM: [[INTRN_A:%.*]] = load <2 x i32>, ptr [[P0_ADDR]], align 8 -// LLVM: [[INTRN_B:%.*]] = load <2 x i32>, ptr [[P1_ADDR]], align 8 -// LLVM: {{%.*}} = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> [[INTRN_A]], <2 x i32> [[INTRN_B]]) -// LLVM: ret <2 x i32> - -uint64x1_t test_vqadd_u64(uint64x1_t a, uint64x1_t b) { - return vqadd_u64(a,b); -} - -// CIR-LABEL: vqadd_u64 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : -// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// CIR: cir.return - -// LLVM: {{.*}}test_vqadd_u64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]]) -// LLVM: store <1 x i64> [[A]], ptr [[A_ADDR:%.*]], align 8 -// LLVM: store <1 x i64> [[B]], ptr [[B_ADDR:%.*]], align 8 -// LLVM: [[TMP_A:%.*]] = load <1 x i64>, ptr [[A_ADDR]], align 8 -// LLVM: [[TMP_B:%.*]] = load <1 x i64>, ptr [[B_ADDR]], align 8 -// LLVM: store <1 x i64> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 -// LLVM: store <1 x i64> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 -// LLVM: [[INTRN_A:%.*]] = load <1 x i64>, ptr [[P0_ADDR]], align 8 -// LLVM: [[INTRN_B:%.*]] = load <1 x i64>, ptr [[P1_ADDR]], align 8 -// LLVM: {{%.*}} = call <1 x i64> @llvm.aarch64.neon.uqadd.v1i64(<1 x i64> [[INTRN_A]], <1 x i64> [[INTRN_B]]) -// LLVM: ret <1 x i64> - -int64x1_t test_vqadd_s64(int64x1_t a, int64x1_t b) { - return vqadd_s64(a,b); -} - -// CIR-LABEL: vqadd_s64 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : -// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// CIR: cir.return - -// LLVM: {{.*}}test_vqadd_s64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]]) -// LLVM: store <1 x i64> [[A]], ptr [[A_ADDR:%.*]], align 8 -// LLVM: store <1 x i64> [[B]], ptr [[B_ADDR:%.*]], align 8 -// LLVM: [[TMP_A:%.*]] = load <1 x i64>, ptr [[A_ADDR]], align 8 -// LLVM: [[TMP_B:%.*]] = load <1 x i64>, ptr [[B_ADDR]], align 8 -// LLVM: store <1 x i64> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8 -// LLVM: store <1 x i64> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8 -// LLVM: [[INTRN_A:%.*]] = load <1 x i64>, ptr [[P0_ADDR]], align 8 -// LLVM: [[INTRN_B:%.*]] = load <1 x i64>, ptr [[P1_ADDR]], align 8 -// LLVM: {{%.*}} = call <1 x i64> @llvm.aarch64.neon.sqadd.v1i64(<1 x i64> [[INTRN_A]], <1 x i64> [[INTRN_B]]) -// LLVM: ret <1 x i64> From f7a8e47be356a2e883a7fe92abb76548502c7bc9 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Wed, 2 Oct 2024 22:21:49 +0300 Subject: [PATCH 1908/2301] [CIR][Lowering] Fix Global Attr Lowering (#906) Consider the following code snippet `tmp.c`: ``` #define N 3200 struct S { double a[N]; double b[N]; } s; double *b = s.b; void foo() { double x = 0; for (int i = 0; i < N; i++) x += b[i]; } int main() { foo(); return 0; } ``` Running `bin/clang tmp.c -fclangir -o tmp && ./tmp` causes a segmentation fault. I compared the LLVM IR with and without CIR and noticed a difference which causes this: `@b = global ptr getelementptr inbounds (%struct.S, ptr @s, i32 0, i32 1)` // no CIR `@b = global ptr getelementptr inbounds (%struct.S, ptr @s, i32 1)` // with CIR It seems there is a missing index when creating global pointers from structs. I have updated `Lowering/DirectToLLVM/LowerToLLVM.cpp`, and added a few tests. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 5 ++ clang/test/CIR/Lowering/global-ptr.c | 54 +++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 clang/test/CIR/Lowering/global-ptr.c diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5bb38735d8cd..027858d1c322 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -407,6 +407,11 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, if (globalAttr.getIndices()) { llvm::SmallVector indices; + + if (auto stTy = dyn_cast(sourceType)) + if (stTy.isIdentified()) + indices.push_back(0); + for (auto idx : globalAttr.getIndices()) { auto intAttr = dyn_cast(idx); assert(intAttr && "index must be integers"); diff --git a/clang/test/CIR/Lowering/global-ptr.c b/clang/test/CIR/Lowering/global-ptr.c new file mode 100644 index 000000000000..02d0e2aaa33c --- /dev/null +++ b/clang/test/CIR/Lowering/global-ptr.c @@ -0,0 +1,54 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +// LLVM: %struct.S1 = type { [3200 x double], [3200 x double] } +// LLVM: %struct.S2 = type { [10 x ptr] } +// LLVM: %struct.S3 = type { [2000 x i32], [2000 x i32], [2000 x i32] } +// LLVM: %struct.S4 = type { i32, i32, i32 } +// LLVM: %union.U1 = type { [2000 x i32] } + +// LLVM: @s1 = global %struct.S1 zeroinitializer, align 8 +// LLVM: @b1 = global ptr getelementptr inbounds (%struct.S1, ptr @s1, i32 0, i32 1), align 8 +// LLVM: @s2 = global %struct.S2 zeroinitializer, align 8 +// LLVM: @b2 = global ptr @s2, align 8 +// LLVM: @s3 = global %struct.S3 zeroinitializer, align 4 +// LLVM: @b3 = global ptr getelementptr inbounds (%struct.S3, ptr @s3, i32 0, i32 2), align 8 +// LLVM: @s4 = global %struct.S4 zeroinitializer, align 4 +// LLVM: @b4 = global ptr getelementptr inbounds (%struct.S4, ptr @s4, i32 0, i32 2), align 8 +// LLVM: @u1 = global %union.U1 zeroinitializer, align 4 +// LLVM: @b5 = global ptr @u1, align 8 + +struct S1 { + double a[3200]; + double b[3200]; +} s1; + +double *b1 = s1.b; + +struct S2 { + double* a[10]; +} s2; + +double **b2 = s2.a; + +struct S3 { + int a[2000]; + int b[2000]; + int c[2000]; +} s3; + +double *b3 = s3.c; + +struct S4 { + int a, b, c; +} s4; + +int* b4 = &s4.c; + +union U1 { + int a[2000]; + int b[2000]; + int c[2000]; +} u1; + +double *b5 = u1.a; \ No newline at end of file From 09e97fa01de34c341c5815cdff46f212fb45293e Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Wed, 2 Oct 2024 18:23:55 -0400 Subject: [PATCH 1909/2301] [CIR][CIRGen][Builtin] Implement builtin __sync_fetch_and_sub (#932) as title. Notice this is not target specific nor neon intrinsics. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 10 ++++ clang/test/CIR/CodeGen/atomic.cpp | 72 ++++++++++++++++++++++++- 2 files changed, 81 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 1959b02ff193..eb3a67a7ebf6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1164,6 +1164,16 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return buildBinaryAtomic(*this, mlir::cir::AtomicFetchKind::Add, E); } + case Builtin::BI__sync_fetch_and_sub: + llvm_unreachable("Shouldn't make it through sema"); + case Builtin::BI__sync_fetch_and_sub_1: + case Builtin::BI__sync_fetch_and_sub_2: + case Builtin::BI__sync_fetch_and_sub_4: + case Builtin::BI__sync_fetch_and_sub_8: + case Builtin::BI__sync_fetch_and_sub_16: { + return buildBinaryAtomic(*this, mlir::cir::AtomicFetchKind::Sub, E); + } + case Builtin::BI__sync_val_compare_and_swap_1: case Builtin::BI__sync_val_compare_and_swap_2: case Builtin::BI__sync_val_compare_and_swap_4: diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index abf21d024957..3e68db4aa053 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -351,6 +351,19 @@ void inc_int(int* a, int b) { // LLVM-LABEL: @_Z7inc_int // LLVM: atomicrmw add ptr {{.*}}, i32 {{.*}} seq_cst, align 4 +void sub_int(int* a, int b) { + int c = __sync_fetch_and_sub(a, b); +} + +// CHECK-LABEL: _Z7sub_int +// CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// CHECK: %[[VAL:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: %[[RES:.*]] = cir.atomic.fetch(sub, %[[PTR]] : !cir.ptr, %[[VAL]] : !s32i, seq_cst) fetch_first : !s32i +// CHECK: cir.store %[[RES]], {{.*}} : !s32i, !cir.ptr + +// LLVM-LABEL: _Z7sub_int +// LLVM: atomicrmw sub ptr {{.*}}, i32 {{.*}} seq_cst, align 4 + // CHECK-LABEL: @_Z8inc_long // CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !s64i, seq_cst) fetch_first : !s64i @@ -362,6 +375,17 @@ void inc_long(long* a, long b) { long c = __sync_fetch_and_add(a, 2); } +// CHECK-LABEL: @_Z8sub_long +// CHECK: cir.atomic.fetch(sub, {{.*}} : !cir.ptr, {{.*}} : !s64i, seq_cst) fetch_first : !s64i + +// LLVM-LABEL: @_Z8sub_long +// LLVM: atomicrmw sub ptr {{.*}}, i64 {{.*}} seq_cst, align 8 + +void sub_long(long* a, long b) { + long c = __sync_fetch_and_sub(a, 2); +} + + // CHECK-LABEL: @_Z9inc_short // CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !s16i, seq_cst) fetch_first : !s16i @@ -371,6 +395,16 @@ void inc_short(short* a, short b) { short c = __sync_fetch_and_add(a, 2); } +// CHECK-LABEL: @_Z9sub_short +// CHECK: cir.atomic.fetch(sub, {{.*}} : !cir.ptr, {{.*}} : !s16i, seq_cst) fetch_first : !s16i + +// LLVM-LABEL: @_Z9sub_short +// LLVM: atomicrmw sub ptr {{.*}}, i16 {{.*}} seq_cst, align 2 +void sub_short(short* a, short b) { + short c = __sync_fetch_and_sub(a, 2); +} + + // CHECK-LABEL: @_Z8inc_byte // CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !s8i, seq_cst) fetch_first : !s8i @@ -380,6 +414,14 @@ void inc_byte(char* a, char b) { char c = __sync_fetch_and_add(a, b); } +// CHECK-LABEL: @_Z8sub_byte +// CHECK: cir.atomic.fetch(sub, {{.*}} : !cir.ptr, {{.*}} : !s8i, seq_cst) fetch_first : !s8i + +// LLVM-LABEL: @_Z8sub_byte +// LLVM: atomicrmw sub ptr {{.*}}, i8 {{.*}} seq_cst, align 1 +void sub_byte(char* a, char b) { + char c = __sync_fetch_and_sub(a, b); +} // CHECK-LABEL: @_Z12cmp_bool_int // CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr @@ -481,6 +523,15 @@ void inc_uint(unsigned int* a, int b) { unsigned int c = __sync_fetch_and_add(a, b); } +// CHECK-LABEL: @_Z8sub_uint +// CHECK: cir.atomic.fetch(sub, {{.*}} : !cir.ptr, {{.*}} : !u32i, seq_cst) fetch_first : !u32i + +// LLVM-LABEL: @_Z8sub_uint +// LLVM: atomicrmw sub ptr {{.*}}, i32 {{.*}} seq_cst, align 4 +void sub_uint(unsigned int* a, int b) { + unsigned int c = __sync_fetch_and_sub(a, b); +} + // CHECK-LABEL: @_Z9inc_ulong // CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !u64i, seq_cst) fetch_first : !u64i @@ -490,6 +541,16 @@ void inc_ulong(unsigned long* a, long b) { unsigned long c = __sync_fetch_and_add(a, b); } +// CHECK-LABEL: @_Z9sub_ulong +// CHECK: cir.atomic.fetch(sub, {{.*}} : !cir.ptr, {{.*}} : !u64i, seq_cst) fetch_first : !u64i + +// LLVM-LABEL: @_Z9sub_ulong +// LLVM: atomicrmw sub ptr {{.*}}, i64 {{.*}} seq_cst, align 8 +void sub_ulong(unsigned long* a, long b) { + unsigned long c = __sync_fetch_and_sub(a, b); +} + + // CHECK-LABEL: @_Z9inc_uchar // CHECK: cir.atomic.fetch(add, {{.*}} : !cir.ptr, {{.*}} : !u8i, seq_cst) fetch_first : !u8i @@ -497,4 +558,13 @@ void inc_ulong(unsigned long* a, long b) { // LLVM: atomicrmw add ptr {{.*}}, i8 {{.*}} seq_cst, align 1 void inc_uchar(unsigned char* a, char b) { unsigned char c = __sync_fetch_and_add(a, b); -} \ No newline at end of file +} + +// CHECK-LABEL: @_Z9sub_uchar +// CHECK: cir.atomic.fetch(sub, {{.*}} : !cir.ptr, {{.*}} : !u8i, seq_cst) fetch_first : !u8i + +// LLVM-LABEL: @_Z9sub_uchar +// LLVM: atomicrmw sub ptr {{.*}}, i8 {{.*}} seq_cst, align 1 +void sub_uchar(unsigned char* a, char b) { + unsigned char c = __sync_fetch_and_sub(a, b); +} From b34974a885de1b2d7c6450934f23fee17dd8d93e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 23 Sep 2024 18:27:55 -0700 Subject: [PATCH 1910/2301] [CIR][CIRGen] Cleanup: enable conditional cleanup with exceptions Entails several minor changes: - Duplicate resume blocks around. - Disable LP caching, we repeat them as often as necessary. - Update maps accordingly for tracking places to patch up. - Make changes to clean up block handling. - Fix an issue in flatten cfg. --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 20 ++- clang/lib/CIR/CodeGen/CIRGenException.cpp | 113 +++++++----- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 12 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 14 +- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 2 +- .../test/CIR/CodeGen/conditional-cleanup.cpp | 163 +++++++++++++++++- 6 files changed, 275 insertions(+), 49 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index f6de23ca89b8..90fbdc6277e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -181,7 +181,11 @@ static void setupCleanupBlockActivation(CIRGenFunction &CGF, llvm_unreachable("NYI"); } - llvm_unreachable("NYI"); + auto builder = CGF.getBuilder(); + mlir::Location loc = var.getPointer().getLoc(); + mlir::Value trueOrFalse = + kind == ForActivation ? builder.getTrue(loc) : builder.getFalse(loc); + CGF.getBuilder().createStore(loc, trueOrFalse, var); } /// Deactive a cleanup that was created in an active state. @@ -421,7 +425,6 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { if (RequiresEHCleanup) { mlir::cir::TryOp tryOp = ehEntry->getParentOp()->getParentOfType(); - assert(tryOp && "expected available cir.try"); auto *nextAction = getEHDispatchBlock(EHParent, tryOp); (void)nextAction; @@ -469,6 +472,19 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { mlir::Block *blockToPatch = cleanupsToPatch[currBlock]; auto currYield = cast(blockToPatch->getTerminator()); builder.setInsertionPoint(currYield); + + // If nextAction is an EH resume block, also update all try locations + // for these "to-patch" blocks with the appropriate resume content. + if (nextAction == ehResumeBlock) { + if (auto tryToPatch = currYield->getParentOp() + ->getParentOfType()) { + mlir::Block *resumeBlockToPatch = + tryToPatch.getCatchUnwindEntryBlock(); + buildEHResumeBlock(/*isCleanup=*/true, resumeBlockToPatch, + tryToPatch.getLoc()); + } + } + buildCleanup(*this, Fn, cleanupFlags, EHActiveFlag); currBlock = blockToPatch; } diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 38a94d6f6e19..903011159e47 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -252,19 +252,9 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { DeactivateCleanupBlock(cleanup, op); } -mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup, - mlir::cir::TryOp tryOp) { - - if (ehResumeBlock) - return ehResumeBlock; - // Just like some other try/catch related logic: return the basic block - // pointer but only use it to denote we're tracking things, but there - // shouldn't be any changes to that block after work done in this function. - assert(tryOp && "expected available cir.try"); - ehResumeBlock = tryOp.getCatchUnwindEntryBlock(); - if (!ehResumeBlock->empty()) - return ehResumeBlock; - +void CIRGenFunction::buildEHResumeBlock(bool isCleanup, + mlir::Block *ehResumeBlock, + mlir::Location loc) { auto ip = getBuilder().saveInsertionPoint(); getBuilder().setInsertionPointToStart(ehResumeBlock); @@ -283,9 +273,22 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup, llvm_unreachable("NYI"); } - getBuilder().create(tryOp.getLoc(), mlir::Value{}, - mlir::Value{}); + getBuilder().create(loc, mlir::Value{}, mlir::Value{}); getBuilder().restoreInsertionPoint(ip); +} + +mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup, + mlir::cir::TryOp tryOp) { + + if (ehResumeBlock) + return ehResumeBlock; + // Setup unwind. + assert(tryOp && "expected available cir.try"); + ehResumeBlock = tryOp.getCatchUnwindEntryBlock(); + if (!ehResumeBlock->empty()) + return ehResumeBlock; + + buildEHResumeBlock(isCleanup, ehResumeBlock, tryOp.getLoc()); return ehResumeBlock; } @@ -599,7 +602,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { /// Check whether this is a non-EH scope, i.e. a scope which doesn't /// affect exception handling. Currently, the only non-EH scopes are /// normal-only cleanup scopes. -static bool isNonEHScope(const EHScope &S) { +[[maybe_unused]] static bool isNonEHScope(const EHScope &S) { switch (S.getKind()) { case EHScope::Cleanup: return !cast(S).isEHCleanup(); @@ -625,15 +628,16 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { case EHScope::Catch: case EHScope::Cleanup: case EHScope::Filter: - if (auto *lpad = innermostEHScope.getCachedLandingPad()) - return lpad; + // CIR does not cache landing pads. + break; } // If there's an existing TryOp, it means we got a `cir.try` scope // that leads to this "landing pad" creation site. Otherwise, exceptions // are enabled but a throwing function is called anyways (common pattern // with function local static initializers). - { + mlir::ArrayAttr catches = tryOp.getCatchTypesAttr(); + if (!catches || catches.empty()) { // Save the current CIR generation state. mlir::OpBuilder::InsertionGuard guard(builder); assert(!MissingFeatures::generateDebugInfo() && "NYI"); @@ -727,14 +731,16 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { // Add final array of clauses into TryOp. tryOp.setCatchTypesAttr( mlir::ArrayAttr::get(builder.getContext(), clauses)); - - // In traditional LLVM codegen. this tells the backend how to generate the - // landing pad by generating a branch to the dispatch block. - mlir::Block *dispatch = - getEHDispatchBlock(EHStack.getInnermostEHScope(), tryOp); - (void)dispatch; } + // In traditional LLVM codegen. this tells the backend how to generate the + // landing pad by generating a branch to the dispatch block. In CIR, + // getEHDispatchBlock is used to populate blocks for later filing during + // cleanup handling. + mlir::Block *dispatch = + getEHDispatchBlock(EHStack.getInnermostEHScope(), tryOp); + (void)dispatch; + return tryOp; } @@ -755,8 +761,21 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si, // Otherwise, we should look at the actual scope. EHScope &scope = *EHStack.find(si); - auto *dispatchBlock = scope.getCachedEHDispatchBlock(); + + mlir::Block *originalBlock = nullptr; + if (dispatchBlock && tryOp) { + // If the dispatch is cached but comes from a different tryOp, make sure: + // - Populate current `tryOp` with a new dispatch block regardless. + // - Update the map to enqueue new dispatchBlock to also get a cleanup. See + // code at the end of the function. + mlir::Operation *parentOp = dispatchBlock->getParentOp(); + if (tryOp != parentOp->getParentOfType()) { + originalBlock = dispatchBlock; + dispatchBlock = nullptr; + } + } + if (!dispatchBlock) { switch (scope.getKind()) { case EHScope::Catch: { @@ -774,13 +793,25 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si, } case EHScope::Cleanup: { - assert(callWithExceptionCtx && "expected call information"); - { + if (callWithExceptionCtx && "expected call information") { mlir::OpBuilder::InsertionGuard guard(getBuilder()); assert(callWithExceptionCtx.getCleanup().empty() && "one per call: expected empty region at this point"); dispatchBlock = builder.createBlock(&callWithExceptionCtx.getCleanup()); builder.createYield(callWithExceptionCtx.getLoc()); + } else { + // Usually coming from general cir.scope cleanups that aren't + // tried to a specific throwing call. + assert(currLexScope && currLexScope->isRegular() && + "expected regular cleanup"); + dispatchBlock = currLexScope->getOrCreateCleanupBlock(builder); + if (dispatchBlock->empty()) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToEnd(dispatchBlock); + mlir::Location loc = + currSrcLoc ? *currSrcLoc : builder.getUnknownLoc(); + builder.createYield(loc); + } } break; } @@ -793,6 +824,14 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si, llvm_unreachable("NYI"); break; } + } + + if (originalBlock) { + // As mentioned above: update the map to enqueue new dispatchBlock to also + // get a cleanup. + cleanupsToPatch[originalBlock] = dispatchBlock; + dispatchBlock = originalBlock; + } else { scope.setCachedEHDispatchBlock(dispatchBlock); } return dispatchBlock; @@ -826,18 +865,13 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl(mlir::cir::TryOp tryOp) { assert(!EHStack.empty()); assert(isInvokeDest()); - // Check the innermost scope for a cached landing pad. If this is - // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad. - auto *LP = EHStack.begin()->getCachedLandingPad(); - if (LP) - return LP; - + // CIR does not cache landing pads. const EHPersonality &Personality = EHPersonality::get(*this); // FIXME(cir): add personality function // if (!CurFn->hasPersonalityFn()) // CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality)); - + mlir::Operation *LP = nullptr; if (Personality.usesFuncletPads()) { // We don't need separate landing pads in the funclet model. llvm::errs() << "PersonalityFn: " << Personality.PersonalityFn << "\n"; @@ -848,14 +882,7 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl(mlir::cir::TryOp tryOp) { assert(LP); - // Cache the landing pad on the innermost scope. If this is a - // non-EH scope, cache the landing pad on the enclosing scope, too. - for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) { - ir->setCachedLandingPad(LP); - if (!isNonEHScope(*ir)) - break; - } - + // CIR does not cache landing pads. return LP; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index e686c7b0e63b..428f2b2ccedb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -363,9 +363,19 @@ void CIRGenFunction::LexicalScope::cleanup() { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToEnd(InsPt); + // If we still don't have a cleanup block, it means that `applyCleanup` + // below might be able to get us one. + mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder); + // Leverage and defers to RunCleanupsScope's dtor and scope handling. applyCleanup(); + // If we now have one after `applyCleanup`, hook it up properly. + if (!cleanupBlock && localScope->getCleanupBlock(builder)) { + cleanupBlock = localScope->getCleanupBlock(builder); + builder.create(InsPt->back().getLoc(), cleanupBlock); + } + if (localScope->Depth == 0) { buildImplicitReturn(); return; @@ -374,7 +384,7 @@ void CIRGenFunction::LexicalScope::cleanup() { // End of any local scope != function // Ternary ops have to deal with matching arms for yielding types // and do return a value, it must do its own cir.yield insertion. - if (!localScope->isTernary()) { + if (!localScope->isTernary() && !InsPt->mightHaveTerminator()) { !retVal ? builder.create(localScope->EndLoc) : builder.create(localScope->EndLoc, retVal); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 048a0c17e24a..322d740d58ef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1787,6 +1787,8 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emits try/catch information for the current EH stack. mlir::cir::CallOp callWithExceptionCtx = nullptr; mlir::Operation *buildLandingPad(mlir::cir::TryOp tryOp); + void buildEHResumeBlock(bool isCleanup, mlir::Block *ehResumeBlock, + mlir::Location loc); mlir::Block *getEHResumeBlock(bool isCleanup, mlir::cir::TryOp tryOp); mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope, mlir::cir::TryOp tryOp); @@ -2370,7 +2372,17 @@ DominatingCIRValue::save(CIRGenFunction &CGF, mlir::Value value) { inline mlir::Value DominatingCIRValue::restore(CIRGenFunction &CGF, saved_type value) { - llvm_unreachable("NYI"); + // If the value says it wasn't saved, trust that it's still dominating. + if (!value.getInt()) + return value.getPointer(); + + // Otherwise, it should be an alloca instruction, as set up in save(). + auto alloca = cast(value.getPointer().getDefiningOp()); + mlir::Value val = CGF.getBuilder().createAlignedLoad( + alloca.getLoc(), alloca.getType(), alloca); + mlir::cir::LoadOp loadOp = cast(val.getDefiningOp()); + loadOp.setAlignment(alloca.getAlignment()); + return val; } /// A specialization of DominatingValue for RValue. diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index aa46b4b92643..7fd7cc6f21fc 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -544,7 +544,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Quick block cleanup: no indirection to the post try block. auto brOp = dyn_cast(afterTry->getTerminator()); - if (brOp) { + if (brOp && brOp.getDest()->hasNoPredecessors()) { mlir::Block *srcBlock = brOp.getDest(); rewriter.eraseOp(brOp); rewriter.mergeBlocks(srcBlock, afterTry); diff --git a/clang/test/CIR/CodeGen/conditional-cleanup.cpp b/clang/test/CIR/CodeGen/conditional-cleanup.cpp index 211a2672d1aa..e9272093a1cf 100644 --- a/clang/test/CIR/CodeGen/conditional-cleanup.cpp +++ b/clang/test/CIR/CodeGen/conditional-cleanup.cpp @@ -1,5 +1,9 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.eh.cir +// RUN: FileCheck --check-prefix=CIR_EH --input-file=%t.eh.cir %s +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.eh.flat.cir +// RUN: FileCheck --check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir %s typedef __typeof(sizeof(0)) size_t; @@ -22,6 +26,9 @@ namespace test7 { // CIR-DAG: ![[A:.*]] = !cir.struct, !cir.ptr>, ["__retval"] {alignment = 8 : i64} // CIR: cir.scope { @@ -71,4 +78,158 @@ namespace test7 { // CIR: } // CIR: } // CIR: cir.return -// CIR: } \ No newline at end of file +// CIR: } + +// CIR_EH-DAG: #[[$ATTR_0:.+]] = #cir.bool : !cir.bool +// CIR_EH-DAG: #[[$ATTR_1:.+]] = #cir +// CIR_EH-DAG: #[[$ATTR_2:.+]] = #cir, optnone = #cir.optnone})> +// CIR_EH-DAG: #[[$ATTR_3:.+]] = #cir.bool : !cir.bool + +// CIR_EH-LABEL: @_ZN5test74testEv +// CIR_EH: %[[VAL_0:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} +// CIR_EH: cir.scope { +// CIR_EH: %[[VAL_1:.*]] = cir.alloca !cir.bool, !cir.ptr, ["cleanup.cond"] {alignment = 1 : i64} +// CIR_EH: %[[VAL_2:.*]] = cir.alloca !ty_test73A3AA, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} +// CIR_EH: %[[VAL_3:.*]] = cir.alloca !cir.bool, !cir.ptr, ["cleanup.cond"] {alignment = 1 : i64} +// CIR_EH: %[[VAL_4:.*]] = cir.alloca !cir.bool, !cir.ptr, ["cleanup.cond"] {alignment = 1 : i64} +// CIR_EH: %[[VAL_5:.*]] = cir.alloca !ty_test73A3AA, !cir.ptr, ["ref.tmp1"] {alignment = 1 : i64} +// CIR_EH: %[[VAL_6:.*]] = cir.alloca !cir.bool, !cir.ptr, ["cleanup.cond"] {alignment = 1 : i64} +// CIR_EH: %[[VAL_7:.*]] = cir.const #[[$ATTR_0]] +// CIR_EH: %[[VAL_8:.*]] = cir.const #[[$ATTR_3]] +// CIR_EH: %[[VAL_9:.*]] = cir.const #[[$ATTR_0]] +// CIR_EH: %[[VAL_10:.*]] = cir.const #[[$ATTR_3]] +// CIR_EH: %[[VAL_11:.*]] = cir.const #[[$ATTR_0]] +// CIR_EH: %[[VAL_12:.*]] = cir.const #[[$ATTR_3]] +// CIR_EH: %[[VAL_13:.*]] = cir.const #[[$ATTR_0]] +// CIR_EH: %[[VAL_14:.*]] = cir.const #[[$ATTR_3]] +// CIR_EH: %[[VAL_15:.*]] = cir.const #{{.*}}<1> : !u64i +// CIR_EH: %[[VAL_16:.*]] = cir.call @_ZN5test71BnwEm(%[[VAL_15]]) : (!u64i) -> !cir.ptr +// CIR_EH: %[[VAL_17:.*]] = cir.const #{{.*}} : !cir.ptr +// CIR_EH: %[[VAL_18:.*]] = cir.cmp(ne, %[[VAL_16]], %[[VAL_17]]) : !cir.ptr, !cir.bool +// CIR_EH: %[[VAL_19:.*]] = cir.cast(bitcast, %[[VAL_16]] : !cir.ptr), !cir.ptr +// CIR_EH: cir.store align(1) %[[VAL_13]], %[[VAL_1]] : !cir.bool, !cir.ptr +// CIR_EH: cir.store align(1) %[[VAL_11]], %[[VAL_3]] : !cir.bool, !cir.ptr +// CIR_EH: cir.store align(1) %[[VAL_9]], %[[VAL_4]] : !cir.bool, !cir.ptr +// CIR_EH: cir.store align(1) %[[VAL_7]], %[[VAL_6]] : !cir.bool, !cir.ptr +// CIR_EH: cir.if %[[VAL_18]] { +// CIR_EH: cir.store %[[VAL_14]], %[[VAL_1]] : !cir.bool, !cir.ptr +// CIR_EH: cir.try synthetic cleanup { +// CIR_EH: cir.call exception @_ZN5test71AC1Ev(%[[VAL_2]]) : (!cir.ptr) -> () cleanup { +// CIR_EH: %[[VAL_20:.*]] = cir.load %[[VAL_1]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_20]] { +// CIR_EH: cir.call @_ZdlPvm(%[[VAL_16]], %[[VAL_15]]) : (!cir.ptr, !u64i) -> () +// CIR_EH: } +// CIR_EH: cir.yield +// CIR_EH: } +// CIR_EH: cir.yield +// CIR_EH: } catch [#{{.*}} { +// CIR_EH: cir.resume +// CIR_EH: }] +// CIR_EH: cir.store %[[VAL_12]], %[[VAL_3]] : !cir.bool, !cir.ptr +// CIR_EH: %[[VAL_21:.*]] = cir.const #{{.*}}<1> : !u64i +// CIR_EH: %[[VAL_22:.*]] = cir.call @_ZN5test71BnwEm(%[[VAL_21]]) : (!u64i) -> !cir.ptr +// CIR_EH: %[[VAL_23:.*]] = cir.const #{{.*}} : !cir.ptr +// CIR_EH: %[[VAL_24:.*]] = cir.cmp(ne, %[[VAL_22]], %[[VAL_23]]) : !cir.ptr, !cir.bool +// CIR_EH: %[[VAL_25:.*]] = cir.cast(bitcast, %[[VAL_22]] : !cir.ptr), !cir.ptr +// CIR_EH: cir.if %[[VAL_24]] { +// CIR_EH: cir.store %[[VAL_10]], %[[VAL_4]] : !cir.bool, !cir.ptr +// CIR_EH: cir.try synthetic cleanup { +// CIR_EH: cir.call exception @_ZN5test71AC1Ev(%[[VAL_5]]) : (!cir.ptr) -> () cleanup { +// CIR_EH: %[[VAL_26:.*]] = cir.load %[[VAL_4]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_26]] { +// CIR_EH: cir.call @_ZdlPvm(%[[VAL_22]], %[[VAL_21]]) : (!cir.ptr, !u64i) -> () +// CIR_EH: } +// CIR_EH: %[[VAL_27:.*]] = cir.load %[[VAL_3]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_27]] { +// CIR_EH: cir.call @_ZN5test71AD1Ev(%[[VAL_2]]) : (!cir.ptr) -> () +// CIR_EH: } +// CIR_EH: %[[VAL_28:.*]] = cir.load %[[VAL_1]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_28]] { +// CIR_EH: cir.call @_ZdlPvm(%[[VAL_16]], %[[VAL_15]]) : (!cir.ptr, !u64i) -> () +// CIR_EH: } +// CIR_EH: cir.yield +// CIR_EH: } +// CIR_EH: cir.yield +// CIR_EH: } catch [#{{.*}} { +// CIR_EH: cir.resume +// CIR_EH: }] +// CIR_EH: cir.store %[[VAL_8]], %[[VAL_6]] : !cir.bool, !cir.ptr +// CIR_EH: %[[VAL_29:.*]] = cir.const #{{.*}} : !cir.ptr +// CIR_EH: cir.try synthetic cleanup { +// CIR_EH: cir.call exception @_ZN5test71BC1ERKNS_1AEPS0_(%[[VAL_25]], %[[VAL_5]], %[[VAL_29]]) : (!cir.ptr, !cir.ptr, !cir.ptr) -> () cleanup { +// CIR_EH: %[[VAL_30:.*]] = cir.load %[[VAL_6]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_30]] { +// CIR_EH: cir.call @_ZN5test71AD1Ev(%[[VAL_5]]) : (!cir.ptr) -> () +// CIR_EH: } +// CIR_EH: %[[VAL_31:.*]] = cir.load %[[VAL_4]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_31]] { +// CIR_EH: cir.call @_ZdlPvm(%[[VAL_22]], %[[VAL_21]]) : (!cir.ptr, !u64i) -> () +// CIR_EH: } +// CIR_EH: %[[VAL_32:.*]] = cir.load %[[VAL_3]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_32]] { +// CIR_EH: cir.call @_ZN5test71AD1Ev(%[[VAL_2]]) : (!cir.ptr) -> () +// CIR_EH: } +// CIR_EH: %[[VAL_33:.*]] = cir.load %[[VAL_1]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_33]] { +// CIR_EH: cir.call @_ZdlPvm(%[[VAL_16]], %[[VAL_15]]) : (!cir.ptr, !u64i) -> () +// CIR_EH: } +// CIR_EH: cir.yield +// CIR_EH: } +// CIR_EH: cir.yield +// CIR_EH: } catch [#{{.*}} { +// CIR_EH: cir.resume +// CIR_EH: }] +// CIR_EH: %[[VAL_34:.*]] = cir.const #[[$ATTR_0]] +// CIR_EH: cir.store %[[VAL_34]], %[[VAL_4]] : !cir.bool, !cir.ptr +// CIR_EH: } +// CIR_EH: cir.try synthetic cleanup { +// CIR_EH: cir.call exception @_ZN5test71BC1ERKNS_1AEPS0_(%[[VAL_19]], %[[VAL_2]], %[[VAL_25]]) : (!cir.ptr, !cir.ptr, !cir.ptr) -> () cleanup { +// CIR_EH: %[[VAL_35:.*]] = cir.load %[[VAL_6]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_35]] { +// CIR_EH: cir.call @_ZN5test71AD1Ev(%[[VAL_5]]) : (!cir.ptr) -> () +// CIR_EH: } +// CIR_EH: %[[VAL_36:.*]] = cir.load %[[VAL_4]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_36]] { +// CIR_EH: cir.call @_ZdlPvm(%[[VAL_22]], %[[VAL_21]]) : (!cir.ptr, !u64i) -> () +// CIR_EH: } +// CIR_EH: %[[VAL_37:.*]] = cir.load %[[VAL_3]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_37]] { +// CIR_EH: cir.call @_ZN5test71AD1Ev(%[[VAL_2]]) : (!cir.ptr) -> () +// CIR_EH: } +// CIR_EH: %[[VAL_38:.*]] = cir.load %[[VAL_1]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_38]] { +// CIR_EH: cir.call @_ZdlPvm(%[[VAL_16]], %[[VAL_15]]) : (!cir.ptr, !u64i) -> () +// CIR_EH: } +// CIR_EH: cir.yield +// CIR_EH: } +// CIR_EH: cir.yield +// CIR_EH: } catch [#{{.*}} { +// CIR_EH: cir.resume +// CIR_EH: }] +// CIR_EH: %[[VAL_39:.*]] = cir.const #[[$ATTR_0]] +// CIR_EH: cir.store %[[VAL_39]], %[[VAL_1]] : !cir.bool, !cir.ptr +// CIR_EH: } +// CIR_EH: cir.store %[[VAL_19]], %[[VAL_0]] : !cir.ptr, !cir.ptr> +// CIR_EH: %[[VAL_40:.*]] = cir.load %[[VAL_6]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_40]] { +// CIR_EH: cir.call @_ZN5test71AD1Ev(%[[VAL_5]]) : (!cir.ptr) -> () +// CIR_EH: } +// CIR_EH: %[[VAL_41:.*]] = cir.load %[[VAL_3]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_41]] { +// CIR_EH: cir.call @_ZN5test71AD1Ev(%[[VAL_2]]) : (!cir.ptr) -> () +// CIR_EH: } +// CIR_EH: %[[VAL_42:.*]] = cir.load %[[VAL_3]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_42]] { +// CIR_EH: cir.call @_ZN5test71AD1Ev(%[[VAL_2]]) : (!cir.ptr) -> () +// CIR_EH: } +// CIR_EH: %[[VAL_43:.*]] = cir.load %[[VAL_1]] : !cir.ptr, !cir.bool +// CIR_EH: cir.if %[[VAL_43]] { +// CIR_EH: cir.call @_ZdlPvm(%[[VAL_16]], %[[VAL_15]]) : (!cir.ptr, !u64i) -> () +// CIR_EH: } +// CIR_EH: } +// CIR_EH: %[[VAL_44:.*]] = cir.load %[[VAL_0]] : !cir.ptr>, !cir.ptr +// CIR_EH: cir.return %[[VAL_44]] : !cir.ptr +// CIR_EH: } + +// Nothing special, just test it passes! +// CIR_FLAT_EH-LABEL: @_ZN5test74testEv \ No newline at end of file From 660f03377b3b1ce8d275172c6dadadcbeefbb54a Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 4 Oct 2024 19:51:44 -0400 Subject: [PATCH 1911/2301] [CIR][CIRGen][Builtin][Neon] Lower BI__builtin_neon_vmovn_v (#909) as title. The current implementation of this PR is use cir::CastOP integral casting to implement vector type truncation. Thus, LLVM lowering code has been change to accommodate it. In addition. Added code into [CIRGenBuiltinAArch64.cpp](https://github.com/llvm/clangir/pull/909/files#diff-6f7700013aa60ed524eb6ddcbab90c4dd288c384f9434547b038357868334932) to make it more similar to OG. ``` mlir::Type ty = vTy; if (!ty) ``` Added test case into neon.c as the file already contains similar vector move test cases such as vmovl --------- Co-authored-by: Guojin He --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 20 +++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 26 ++++--- clang/test/CIR/CodeGen/AArch64/neon.c | 72 +++++++++++++++++++ 3 files changed, 103 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index ca6f09156b66..2a1718ab28d8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2216,13 +2216,31 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( mlir::cir::VectorType vTy = GetNeonType(this, neonType, hasLegalHalfType, false, allowBFloatArgsAndRet); - if (!vTy) + mlir::Type ty = vTy; + if (!ty) return nullptr; unsigned intrinicId = llvmIntrinsic; if ((modifier & UnsignedAlts) && !isUnsigned) intrinicId = altLLVMIntrinsic; + // This first switch is for the intrinsics that cannot have a more generic + // codegen solution. + switch (builtinID) { + default: + break; + case NEON::BI__builtin_neon_vmovn_v: { + mlir::cir::VectorType qTy = builder.getExtendedElementVectorType( + vTy, mlir::cast(vTy.getEltType()).isSigned()); + ops[0] = builder.createBitcast(ops[0], qTy); + // It really is truncation in this context. + // In CIR, integral cast op supports vector of int type truncating. + return builder.createIntCast(ops[0], ty); + } + } + + // This second switch is for the intrinsics that might have a more generic + // codegen solution so we can use the common codegen in future. switch (builtinID) { default: llvm::errs() << getAArch64SIMDIntrinsicString(builtinID) << " "; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 027858d1c322..a1b0a7008e6c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -542,11 +542,9 @@ class CIRMemCpyOpLowering }; static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter, - mlir::Value llvmSrc, - mlir::IntegerType llvmDstIntTy, - bool isUnsigned, uint64_t cirDstIntWidth) { - auto cirSrcWidth = - mlir::cast(llvmSrc.getType()).getWidth(); + mlir::Value llvmSrc, mlir::Type llvmDstIntTy, + bool isUnsigned, uint64_t cirSrcWidth, + uint64_t cirDstIntWidth) { if (cirSrcWidth == cirDstIntWidth) return llvmSrc; @@ -604,7 +602,7 @@ class CIRPtrStrideOpLowering auto llvmDstType = mlir::IntegerType::get(ctx, *layoutWidth); index = getLLVMIntCast(rewriter, index, llvmDstType, ptrStrideOp.getStride().getType().isUnsigned(), - *layoutWidth); + width, *layoutWidth); // Rewrite the sub in front of extensions/trunc if (rewriteSub) { @@ -709,10 +707,9 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { mlir::cir::IntType dstIntType = mlir::cast(elementTypeIfVector(dstType)); rewriter.replaceOp( - castOp, - getLLVMIntCast(rewriter, llvmSrcVal, - mlir::cast(llvmDstType), - srcIntType.isUnsigned(), dstIntType.getWidth())); + castOp, getLLVMIntCast(rewriter, llvmSrcVal, llvmDstType, + srcIntType.isUnsigned(), srcIntType.getWidth(), + dstIntType.getWidth())); break; } case mlir::cir::CastKind::floating: { @@ -2486,7 +2483,8 @@ class CIRShiftOpLowering // Ensure shift amount is the same type as the value. Some undefined // behavior might occur in the casts below as per [C99 6.5.7.3]. amt = getLLVMIntCast(rewriter, amt, mlir::cast(llvmTy), - !cirAmtTy.isSigned(), cirValTy.getWidth()); + !cirAmtTy.isSigned(), cirAmtTy.getWidth(), + cirValTy.getWidth()); // Lower to the proper LLVM shift operation. if (op.getIsShiftleft()) @@ -2618,9 +2616,9 @@ static mlir::Value createLLVMBitOp(mlir::Location loc, operand.getType(), operand); } - return getLLVMIntCast(rewriter, op->getResult(0), - mlir::cast(resultTy), - /*isUnsigned=*/true, resultIntTy.getWidth()); + return getLLVMIntCast( + rewriter, op->getResult(0), mlir::cast(resultTy), + /*isUnsigned=*/true, operandIntTy.getWidth(), resultIntTy.getWidth()); } class CIRBitClrsbOpLowering diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 54520e688a59..969751fe65b4 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -17511,3 +17511,75 @@ void test_vst1q_s64(int64_t *a, int64x2_t b) { // uint64_t test_vaddlv_u32(uint32x2_t a) { // return vaddlv_u32(a); // } + +uint8x8_t test_vmovn_u16(uint16x8_t a) { + return vmovn_u16(a); + // CIR-LABEL: vmovn_u16 + // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}@test_vmovn_u16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVN_1:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[VMOVN_I:%.*]] = trunc <8 x i16> [[A]] to <8 x i8> + // LLVM: ret <8 x i8> [[VMOVN_I]] +} + +uint16x4_t test_vmovn_u32(uint32x4_t a) { + return vmovn_u32(a); + // CIR-LABEL: vmovn_u32 + // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}@test_vmovn_u32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVN_1:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[VMOVN_I:%.*]] = trunc <4 x i32> [[A]] to <4 x i16> + // LLVM: ret <4 x i16> [[VMOVN_I]] +} + +uint32x2_t test_vmovn_u64(uint64x2_t a) { + return vmovn_u64(a); + // CIR-LABEL: vmovn_u64 + // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}@test_vmovn_u64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVN_1:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VMOVN_I:%.*]] = trunc <2 x i64> [[A]] to <2 x i32> + // LLVM: ret <2 x i32> [[VMOVN_I]] +} + +int8x8_t test_vmovn_s16(int16x8_t a) { + return vmovn_s16(a); + // CIR-LABEL: vmovn_s16 + // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}@test_vmovn_s16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVN_1:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[VMOVN_I:%.*]] = trunc <8 x i16> [[A]] to <8 x i8> + // LLVM: ret <8 x i8> [[VMOVN_I]] +} + +int16x4_t test_vmovn_s32(int32x4_t a) { + return vmovn_s32(a); + // CIR-LABEL: vmovn_s32 + // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}@test_vmovn_s32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVN_1:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[VMOVN_I:%.*]] = trunc <4 x i32> [[A]] to <4 x i16> + // LLVM: ret <4 x i16> [[VMOVN_I]] +} + +int32x2_t test_vmovn_s64(int64x2_t a) { + return vmovn_s64(a); + // CIR-LABEL: vmovn_s64 + // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}@test_vmovn_s64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVN_1:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VMOVN_I:%.*]] = trunc <2 x i64> [[A]] to <2 x i32> + // LLVM: ret <2 x i32> [[VMOVN_I]] +} From 3e3a063c70259372d5d5e746aaf275f05d6548f0 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 4 Oct 2024 19:52:23 -0400 Subject: [PATCH 1912/2301] [CIR][CIRGen][Builtin][Neon] Lower neon vst1q_lane and vst1_lane (#935) as title. Also changed [neon-ldst.c](https://github.com/llvm/clangir/compare/main...ghehg:clangir-llvm-ghehg:macM3?expand=1#diff-ea4814b6503bff2b7bc4afc6400565e6e89e5785bfcda587dc8401d8de5d3a22) to make it have the same RUN options as OG [clang/test/CodeGen/aarch64-neon-intrinsics.c](https://github.com/llvm/clangir/blob/main/clang/test/CodeGen/aarch64-neon-intrinsics.c) Those options help us to avoid checking load/store pairs thus make the test less verbose and easier to compare against OG. Co-authored-by: Guojin He --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 14 +- clang/test/CIR/CodeGen/AArch64/neon-ldst.c | 394 +++++++++++++----- 2 files changed, 300 insertions(+), 108 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 2a1718ab28d8..f6dad0196e72 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1947,7 +1947,9 @@ static mlir::cir::VectorType GetNeonType(CIRGenFunction *CGF, CGF->getCIRGenModule().FloatTy, V1Ty ? 1 : (2 << IsQuad)); case NeonTypeFlags::Float64: - llvm_unreachable("NYI"); + return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), + CGF->getCIRGenModule().DoubleTy, + V1Ty ? 1 : (1 << IsQuad)); } llvm_unreachable("Unknown vector element type!"); } @@ -3437,8 +3439,14 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } case NEON::BI__builtin_neon_vst1_lane_v: - case NEON::BI__builtin_neon_vst1q_lane_v: - llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vst1q_lane_v: { + Ops[1] = builder.createBitcast(Ops[1], Ty); + Ops[1] = builder.create(Ops[1].getLoc(), Ops[1], + Ops[2]); + (void)builder.createAlignedStore(getLoc(E->getExprLoc()), Ops[1], Ops[0], + PtrOp0.getAlignment()); + return Ops[1]; + } case NEON::BI__builtin_neon_vstl1_lane_s64: case NEON::BI__builtin_neon_vstl1q_lane_s64: { llvm_unreachable("NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c index d112f3a81808..6b6d46cbf03d 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c @@ -1,8 +1,12 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -ffreestanding -emit-cir -target-feature +neon %s -o %t.cir +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -ffreestanding -emit-llvm -target-feature +neon %s -o %t.ll + +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-llvm -o - %s \ +// RUN: | opt -S -passes=mem2reg,simplifycfg -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s // REQUIRES: aarch64-registered-target || arm-registered-target @@ -22,13 +26,9 @@ int8x8_t test_vld1_lane_s8(int8_t const * ptr, int8x8_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_s8(ptr{{.*}}[[PTR:%.*]], <8 x i8>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <8 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[SRC_VAL:%.*]] = load <8 x i8>, ptr [[SRC_ADDR]], align 8 -// LLVM: store <8 x i8> [[SRC_VAL]], ptr [[S1:%.*]], align 8 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <8 x i8>, ptr [[S1]], align 8 -// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR_VAL]], align 1 +// LLVM: [[INTRN_VEC:%.*]] = load <8 x i8>, ptr [[SRC_ADDR]], align 8 +// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR]], align 1 // LLVM: {{.*}} = insertelement <8 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 7 // LLVM: ret <8 x i8> {{.*}} @@ -43,13 +43,9 @@ int8x16_t test_vld1q_lane_s8(int8_t const * ptr, int8x16_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_s8(ptr{{.*}}[[PTR:%.*]], <16 x i8>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <16 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[SRC_VAL:%.*]] = load <16 x i8>, ptr [[SRC_ADDR]], align 16 -// LLVM: store <16 x i8> [[SRC_VAL]], ptr [[S1:%.*]], align 16 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <16 x i8>, ptr [[S1]], align 16 -// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR_VAL]], align 1 +// LLVM: [[INTRN_VEC:%.*]] = load <16 x i8>, ptr [[SRC_ADDR]], align 16 +// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR]], align 1 // LLVM: {{.*}} = insertelement <16 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 15 // LLVM: ret <16 x i8> {{.*}} @@ -64,17 +60,12 @@ uint8x16_t test_vld1q_lane_u8(uint8_t const * ptr, uint8x16_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_u8(ptr{{.*}}[[PTR:%.*]], <16 x i8>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <16 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[SRC_VAL:%.*]] = load <16 x i8>, ptr [[SRC_ADDR]], align 16 -// LLVM: store <16 x i8> [[SRC_VAL]], ptr [[S1:%.*]], align 16 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <16 x i8>, ptr [[S1]], align 16 -// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR_VAL]], align 1 +// LLVM: [[INTRN_VEC:%.*]] = load <16 x i8>, ptr [[SRC_ADDR]], align 16 +// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR]], align 1 // LLVM: {{.*}} = insertelement <16 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 15 // LLVM: ret <16 x i8> {{.*}} - uint8x8_t test_vld1_lane_u8(uint8_t const * ptr, uint8x8_t src) { return vld1_lane_u8(ptr, src, 7); } @@ -86,17 +77,12 @@ uint8x8_t test_vld1_lane_u8(uint8_t const * ptr, uint8x8_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_u8(ptr{{.*}}[[PTR:%.*]], <8 x i8>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <8 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[SRC_VAL:%.*]] = load <8 x i8>, ptr [[SRC_ADDR]], align 8 -// LLVM: store <8 x i8> [[SRC_VAL]], ptr [[S1:%.*]], align 8 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <8 x i8>, ptr [[S1]], align 8 -// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR_VAL]], align 1 +// LLVM: [[INTRN_VEC:%.*]] = load <8 x i8>, ptr [[SRC_ADDR]], align 8 +// LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR]], align 1 // LLVM: {{.*}} = insertelement <8 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 7 // LLVM: ret <8 x i8> {{.*}} - int16x4_t test_vld1_lane_s16(int16_t const * ptr, int16x4_t src) { return vld1_lane_s16(ptr, src, 3); } @@ -108,15 +94,11 @@ int16x4_t test_vld1_lane_s16(int16_t const * ptr, int16x4_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_s16(ptr{{.*}}[[PTR:%.*]], <4 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <4 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[SRC_VAL:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 -// LLVM: store <4 x i16> [[SRC_VAL]], ptr [[S1:%.*]], align 8 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <4 x i16>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i16> [[INTRN_VEC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <4 x i16> -// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR_VAL]], align 2 +// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR]], align 2 // LLVM: {{.*}} = insertelement <4 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 3 // LLVM: ret <4 x i16> {{.*}} @@ -131,15 +113,11 @@ uint16x4_t test_vld1_lane_u16(uint16_t const * ptr, uint16x4_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_u16(ptr{{.*}}[[PTR:%.*]], <4 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <4 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[SRC_VAL:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 -// LLVM: store <4 x i16> [[SRC_VAL]], ptr [[S1:%.*]], align 8 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <4 x i16>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i16> [[INTRN_VEC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <4 x i16> -// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR_VAL]], align 2 +// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR]], align 2 // LLVM: {{.*}} = insertelement <4 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 3 // LLVM: ret <4 x i16> {{.*}} @@ -154,15 +132,11 @@ int16x8_t test_vld1q_lane_s16(int16_t const * ptr, int16x8_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_s16(ptr{{.*}}[[PTR:%.*]], <8 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <8 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[SRC_VAL:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 -// LLVM: store <8 x i16> [[SRC_VAL]], ptr [[S1:%.*]], align 16 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <8 x i16>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <8 x i16> [[INTRN_VEC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <8 x i16> -// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR_VAL]], align 2 +// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR]], align 2 // LLVM: {{.*}} = insertelement <8 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 7 // LLVM: ret <8 x i16> {{.*}} @@ -177,21 +151,14 @@ uint16x8_t test_vld1q_lane_u16(uint16_t const * ptr, uint16x8_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_u16(ptr{{.*}}[[PTR:%.*]], <8 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <8 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[SRC_VAL:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 -// LLVM: store <8 x i16> [[SRC_VAL]], ptr [[S1:%.*]], align 16 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <8 x i16>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <8 x i16> [[INTRN_VEC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <8 x i16> -// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR_VAL]], align 2 +// LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR]], align 2 // LLVM: {{.*}} = insertelement <8 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 7 // LLVM: ret <8 x i16> {{.*}} - - - int32x2_t test_vld1_lane_s32(int32_t const * ptr, int32x2_t src) { return vld1_lane_s32(ptr, src, 1); } @@ -203,15 +170,11 @@ int32x2_t test_vld1_lane_s32(int32_t const * ptr, int32x2_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_s32(ptr{{.*}}[[PTR:%.*]], <2 x i32>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <2 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[SRC_VAL:%.*]] = load <2 x i32>, ptr [[SRC_ADDR]], align 8 -// LLVM: store <2 x i32> [[SRC_VAL]], ptr [[S1:%.*]], align 8 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <2 x i32>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <2 x i32>, ptr [[SRC_ADDR]], align 8 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i32> [[INTRN_VEC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <2 x i32> -// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR_VAL]], align 4 +// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR]], align 4 // LLVM: {{.*}} = insertelement <2 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 1 // LLVM: ret <2 x i32> {{.*}} @@ -226,15 +189,11 @@ uint32x2_t test_vld1_lane_u32(uint32_t const * ptr, uint32x2_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_u32(ptr{{.*}}[[PTR:%.*]], <2 x i32>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <2 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[SRC_VAL:%.*]] = load <2 x i32>, ptr [[SRC_ADDR]], align 8 -// LLVM: store <2 x i32> [[SRC_VAL]], ptr [[S1:%.*]], align 8 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <2 x i32>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <2 x i32>, ptr [[SRC_ADDR]], align 8 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i32> [[INTRN_VEC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <2 x i32> -// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR_VAL]], align 4 +// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR]], align 4 // LLVM: {{.*}} = insertelement <2 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 1 // LLVM: ret <2 x i32> {{.*}} @@ -250,15 +209,11 @@ int32x4_t test_vld1q_lane_s32(int32_t const * ptr, int32x4_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_s32(ptr{{.*}}[[PTR:%.*]], <4 x i32>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <4 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[SRC_VAL:%.*]] = load <4 x i32>, ptr [[SRC_ADDR]], align 16 -// LLVM: store <4 x i32> [[SRC_VAL]], ptr [[S1:%.*]], align 16 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <4 x i32>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC:%.*]] = load <4 x i32>, ptr [[SRC_ADDR]], align 16 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i32> [[INTRN_VEC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <4 x i32> -// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR_VAL]], align 4 +// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR]], align 4 // LLVM: {{.*}} = insertelement <4 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 3 // LLVM: ret <4 x i32> {{.*}} @@ -274,15 +229,11 @@ uint32x4_t test_vld1q_lane_u32(uint32_t const * ptr, uint32x4_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_u32(ptr{{.*}}[[PTR:%.*]], <4 x i32>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <4 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[SRC_VAL:%.*]] = load <4 x i32>, ptr [[SRC_ADDR]], align 16 -// LLVM: store <4 x i32> [[SRC_VAL]], ptr [[S1:%.*]], align 16 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <4 x i32>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC:%.*]] = load <4 x i32>, ptr [[SRC_ADDR]], align 16 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i32> [[INTRN_VEC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <4 x i32> -// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR_VAL]], align 4 +// LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR]], align 4 // LLVM: {{.*}} = insertelement <4 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 3 // LLVM: ret <4 x i32> {{.*}} @@ -297,15 +248,11 @@ int64x1_t test_vld1_lane_s64(int64_t const * ptr, int64x1_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_s64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <1 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[SRC_VAL:%.*]] = load <1 x i64>, ptr [[SRC_ADDR]], align 8 -// LLVM: store <1 x i64> [[SRC_VAL]], ptr [[S1:%.*]], align 8 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <1 x i64>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <1 x i64>, ptr [[SRC_ADDR]], align 8 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <1 x i64> [[INTRN_VEC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <1 x i64> -// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR_VAL]], align 8 +// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR]], align 8 // LLVM: {{.*}} = insertelement <1 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 0 // LLVM: ret <1 x i64> {{.*}} @@ -320,15 +267,11 @@ uint64x1_t test_vld1_lane_u64(uint64_t const * ptr, uint64x1_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_u64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <1 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[SRC_VAL:%.*]] = load <1 x i64>, ptr [[SRC_ADDR]], align 8 -// LLVM: store <1 x i64> [[SRC_VAL]], ptr [[S1:%.*]], align 8 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <1 x i64>, ptr [[S1]], align 8 +// LLVM: [[INTRN_VEC:%.*]] = load <1 x i64>, ptr [[SRC_ADDR]], align 8 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <1 x i64> [[INTRN_VEC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <1 x i64> -// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR_VAL]], align 8 +// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR]], align 8 // LLVM: {{.*}} = insertelement <1 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 0 // LLVM: ret <1 x i64> {{.*}} @@ -343,15 +286,11 @@ int64x2_t test_vld1q_lane_s64(int64_t const * ptr, int64x2_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_s64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <2 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[SRC_VAL:%.*]] = load <2 x i64>, ptr [[SRC_ADDR]], align 16 -// LLVM: store <2 x i64> [[SRC_VAL]], ptr [[S1:%.*]], align 16 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <2 x i64>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC:%.*]] = load <2 x i64>, ptr [[SRC_ADDR]], align 16 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i64> [[INTRN_VEC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <2 x i64> -// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR_VAL]], align 8 +// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR]], align 8 // LLVM: {{.*}} = insertelement <2 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 1 // LLVM: ret <2 x i64> {{.*}} @@ -366,14 +305,259 @@ uint64x2_t test_vld1q_lane_u64(uint64_t const * ptr, uint64x2_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_u64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) -// LLVM: store ptr [[PTR]], ptr [[PTR_ADDR:%.*]], align 8 // LLVM: store <2 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[SRC_VAL:%.*]] = load <2 x i64>, ptr [[SRC_ADDR]], align 16 -// LLVM: store <2 x i64> [[SRC_VAL]], ptr [[S1:%.*]], align 16 -// LLVM: [[PTR_VAL:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <2 x i64>, ptr [[S1]], align 16 +// LLVM: [[INTRN_VEC:%.*]] = load <2 x i64>, ptr [[SRC_ADDR]], align 16 // LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i64> [[INTRN_VEC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <2 x i64> -// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR_VAL]], align 8 +// LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR]], align 8 // LLVM: {{.*}} = insertelement <2 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 1 // LLVM: ret <2 x i64> {{.*}} + +void test_vst1_lane_s8(int8_t * ptr, int8x8_t src) { + vst1_lane_s8(ptr, src, 7); +} + +// CIR-LABEL: test_vst1_lane_s8 +// CIR: [[LANE:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(1) [[VAL]], [[PTR]] : !s8i, !cir.ptr + +// LLVM: {{.*}}test_vst1_lane_s8(ptr{{.*}}[[PTR:%.*]], <8 x i8>{{.*}}[[SRC:%.*]]) +// LLVM: store <8 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[VEC:%.*]] = load <8 x i8>, ptr [[SRC_ADDR]], align 8 +// LLVM: [[RES:%.*]] = extractelement <8 x i8> [[VEC]], i32 7 +// LLVM: store i8 [[RES]], ptr [[PTR]], align 1 + +void test_vst1_lane_s16(int16_t * ptr, int16x4_t src) { + vst1_lane_s16(ptr, src, 3); +} + +// CIR-LABEL: test_vst1_lane_s16 +// CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(2) [[VAL]], [[PTR]] : !s16i, !cir.ptr + +// LLVM: {{.*}}test_vst1_lane_s16(ptr{{.*}}[[PTR:%.*]], <4 x i16>{{.*}}[[SRC:%.*]]) +// LLVM: store <4 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[VEC:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x i16> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <4 x i16> +// LLVM: [[RES:%.*]] = extractelement <4 x i16> [[VEC_CAST1]], i32 3 +// LLVM: store i16 [[RES]], ptr [[PTR]], align 2 + +void test_vst1_lane_u16(uint16_t * ptr, uint16x4_t src) { + vst1_lane_u16(ptr, src, 3); +} + +// CIR-LABEL: test_vst1_lane_u16 +// CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(2) [[VAL]], [[PTR]] : !u16i, !cir.ptr + +// LLVM: {{.*}}test_vst1_lane_u16(ptr{{.*}}[[PTR:%.*]], <4 x i16>{{.*}}[[SRC:%.*]]) +// LLVM: store <4 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[VEC:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x i16> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <4 x i16> +// LLVM: [[RES:%.*]] = extractelement <4 x i16> [[VEC_CAST1]], i32 3 +// LLVM: store i16 [[RES]], ptr [[PTR]], align 2 + +void test_vst1_lane_s32(int32_t * ptr, int32x2_t src) { + vst1_lane_s32(ptr, src, 1); +} + +// CIR-LABEL: test_vst1_lane_s32 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(4) [[VAL]], [[PTR]] : !s32i, !cir.ptr + +// LLVM: {{.*}}test_vst1_lane_s32(ptr{{.*}}[[PTR:%.*]], <2 x i32>{{.*}}[[SRC:%.*]]) +// LLVM: store <2 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[VEC:%.*]] = load <2 x i32>, ptr [[SRC_ADDR]], align 8 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x i32> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <2 x i32> +// LLVM: [[RES:%.*]] = extractelement <2 x i32> [[VEC_CAST1]], i32 1 +// LLVM: store i32 [[RES]], ptr [[PTR]], align 4 + +void test_vst1_lane_f32(float32_t * ptr, float32x2_t src) { + vst1_lane_f32(ptr, src, 1); +} + +// CIR-LABEL: test_vst1_lane_f32 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(4) [[VAL]], [[PTR]] : !cir.float, !cir.ptr + +// LLVM: {{.*}}test_vst1_lane_f32(ptr{{.*}}[[PTR:%.*]], <2 x float>{{.*}}[[SRC:%.*]]) +// LLVM: store <2 x float> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[VEC:%.*]] = load <2 x float>, ptr [[SRC_ADDR]], align 8 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x float> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <2 x float> +// LLVM: [[RES:%.*]] = extractelement <2 x float> [[VEC_CAST1]], i32 1 +// LLVM: store float [[RES]], ptr [[PTR]], align 4 + +void test_vst1_lane_s64(int64_t * ptr, int64x1_t src) { + vst1_lane_s64(ptr, src, 0); +} + +// CIR-LABEL: test_vst1_lane_s64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) [[VAL]], [[PTR]] : !s64i, !cir.ptr + +// LLVM: {{.*}}test_vst1_lane_s64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: store <1 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[VEC:%.*]] = load <1 x i64>, ptr [[SRC_ADDR]], align 8 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <1 x i64> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <1 x i64> +// LLVM: [[RES:%.*]] = extractelement <1 x i64> [[VEC_CAST1]], i32 0 +// LLVM: store i64 [[RES]], ptr [[PTR]], align 8 + +void test_vst1_lane_f64(float64_t * ptr, float64x1_t src) { + vst1_lane_f64(ptr, src, 0); +} + +// CIR-LABEL: test_vst1_lane_f64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) [[VAL]], [[PTR]] : !cir.double, !cir.ptr + +// LLVM: {{.*}}test_vst1_lane_f64(ptr{{.*}}[[PTR:%.*]], <1 x double>{{.*}}[[SRC:%.*]]) +// LLVM: store <1 x double> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 +// LLVM: [[VEC:%.*]] = load <1 x double>, ptr [[SRC_ADDR]], align 8 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <1 x double> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <1 x double> +// LLVM: [[RES:%.*]] = extractelement <1 x double> [[VEC_CAST1]], i32 0 +// LLVM: store double [[RES]], ptr [[PTR]], align 8 + +void test_vst1q_lane_s8(int8_t * ptr, int8x16_t src) { + vst1q_lane_s8(ptr, src, 15); +} + +// CIR-LABEL: test_vst1q_lane_s8 +// CIR: [[LANE:%.*]] = cir.const #cir.int<15> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(1) [[VAL]], [[PTR]] : !s8i, !cir.ptr + +// LLVM: {{.*}}test_vst1q_lane_s8(ptr{{.*}}[[PTR:%.*]], <16 x i8>{{.*}}[[SRC:%.*]]) +// LLVM: store <16 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[VEC:%.*]] = load <16 x i8>, ptr [[SRC_ADDR]], align 16 +// LLVM: [[RES:%.*]] = extractelement <16 x i8> [[VEC]], i32 15 +// LLVM: store i8 [[RES]], ptr [[PTR]], align 1 + + +void test_vst1q_lane_s16(int16_t * ptr, int16x8_t src) { + vst1q_lane_s16(ptr, src, 7); +} + +// CIR-LABEL: test_vst1q_lane_s16 +// CIR: [[LANE:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(2) [[VAL]], [[PTR]] : !s16i, !cir.ptr + +// LLVM: {{.*}}test_vst1q_lane_s16(ptr{{.*}}[[PTR:%.*]], <8 x i16>{{.*}}[[SRC:%.*]]) +// LLVM: store <8 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[VEC:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <8 x i16> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <8 x i16> +// LLVM: [[RES:%.*]] = extractelement <8 x i16> [[VEC_CAST1]], i32 7 +// LLVM: store i16 [[RES]], ptr [[PTR]], align 2 + +void test_vst1q_lane_u16(uint16_t * ptr, uint16x8_t src) { + vst1q_lane_u16(ptr, src, 7); +} + +// CIR-LABEL: test_vst1q_lane_u16 +// CIR: [[LANE:%.*]] = cir.const #cir.int<7> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(2) [[VAL]], [[PTR]] : !u16i, !cir.ptr + +// LLVM: {{.*}}test_vst1q_lane_u16(ptr{{.*}}[[PTR:%.*]], <8 x i16>{{.*}}[[SRC:%.*]]) +// LLVM: store <8 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[VEC:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <8 x i16> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <8 x i16> +// LLVM: [[RES:%.*]] = extractelement <8 x i16> [[VEC_CAST1]], i32 7 +// LLVM: store i16 [[RES]], ptr [[PTR]], align 2 + +void test_vst1q_lane_s32(int32_t * ptr, int32x4_t src) { + vst1q_lane_s32(ptr, src, 3); +} + +// CIR-LABEL: test_vst1q_lane_s32 +// CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(4) [[VAL]], [[PTR]] : !s32i, !cir.ptr + +// LLVM: {{.*}}test_vst1q_lane_s32(ptr{{.*}}[[PTR:%.*]], <4 x i32>{{.*}}[[SRC:%.*]]) +// LLVM: store <4 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[VEC:%.*]] = load <4 x i32>, ptr [[SRC_ADDR]], align 16 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x i32> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <4 x i32> +// LLVM: [[RES:%.*]] = extractelement <4 x i32> [[VEC_CAST1]], i32 3 +// LLVM: store i32 [[RES]], ptr [[PTR]], align 4 + +void test_vst1q_lane_s64(int64_t * ptr, int64x2_t src) { + vst1q_lane_s64(ptr, src, 1); +} + +// CIR-LABEL: test_vst1q_lane_s64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) [[VAL]], [[PTR]] : !s64i, !cir.ptr + +// LLVM: {{.*}}test_vst1q_lane_s64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: store <2 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[VEC:%.*]] = load <2 x i64>, ptr [[SRC_ADDR]], align 16 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x i64> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <2 x i64> +// LLVM: [[RES:%.*]] = extractelement <2 x i64> [[VEC_CAST1]], i32 1 +// LLVM: store i64 [[RES]], ptr [[PTR]], align 8 + +void test_vst1q_lane_f32(float32_t * ptr, float32x4_t src) { + vst1q_lane_f32(ptr, src, 3); +} + +// CIR-LABEL: test_vst1q_lane_f32 +// CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(4) [[VAL]], [[PTR]] : !cir.float, !cir.ptr + +// LLVM: {{.*}}test_vst1q_lane_f32(ptr{{.*}}[[PTR:%.*]], <4 x float>{{.*}}[[SRC:%.*]]) +// LLVM: store <4 x float> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[VEC:%.*]] = load <4 x float>, ptr [[SRC_ADDR]], align 16 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x float> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <4 x float> +// LLVM: [[RES:%.*]] = extractelement <4 x float> [[VEC_CAST1]], i32 3 +// LLVM: store float [[RES]], ptr [[PTR]], align 4 + +void test_vst1q_lane_f64(float64_t * ptr, float64x2_t src) { + vst1q_lane_f64(ptr, src, 1); +} + +// CIR-LABEL: test_vst1q_lane_f64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) [[VAL]], [[PTR]] : !cir.double, !cir.ptr + +// LLVM: {{.*}}test_vst1q_lane_f64(ptr{{.*}}[[PTR:%.*]], <2 x double>{{.*}}[[SRC:%.*]]) +// LLVM: store <2 x double> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 +// LLVM: [[VEC:%.*]] = load <2 x double>, ptr [[SRC_ADDR]], align 16 +// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x double> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <2 x double> +// LLVM: [[RES:%.*]] = extractelement <2 x double> [[VEC_CAST1]], i32 1 +// LLVM: store double [[RES]], ptr [[PTR]], align 8 From 6f175a10d3683f0a0bb7adb8478286466e055202 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Fri, 4 Oct 2024 17:50:59 -0700 Subject: [PATCH 1913/2301] [CIR] Derived-to-base conversions (#937) Implement derived-to-base address conversions for non-virtual base classes. The code gen for this situation was only implemented when the offset was zero, and it simply created a `cir.base_class_addr` op for which no lowering or other transformation existed. Conversion to a virtual base class is not yet implemented. Two new fields are added to the `cir.base_class_addr` operation: the byte offset of the necessary adjustment, and a boolean flag indicating whether the source operand may be null. The offset is easy to compute in the front end while the entire path of intermediate classes is still available. It would be difficult for the back end to recompute the offset. So it is best to store it in the operation. The null-pointer check is best done late in the lowering process. But whether or not the null-pointer check is needed is only known by the front end; the back end can't figure that out. So that flag needs to be stored in the operation. `CIRGenFunction::getAddressOfBaseClass` was largely rewritten. The code path no longer matches the equivalent function in the LLVM IR code gen, because the generated ClangIR is quite different from the generated LLVM IR. `cir.base_class_addr` is lowered to LLVM IR as a `getelementptr` operation. If a null-pointer check is needed, then that is wrapped in a `select` operation. When generating code for a constructor or destructor, an incorrect `cir.ptr_stride` op was used to convert the pointer to a base class. The code was assuming that the operand of `cir.ptr_stride` was measured in bytes; the operand is the number elements, not the number of bytes. So the base class constructor was being called on the wrong chunk of memory. Fix this by using a `cir.base_class_addr` op instead of `cir.ptr_stride` in this scenario. The use of `cir.ptr_stride` in `ApplyNonVirtualAndVirtualOffset` had the same problem. Continue using `cir.ptr_stride` here, but temporarily convert the pointer to type `char*` so the pointer is adjusted correctly. Adjust the expected results of three existing tests in response to these changes. Add two new tests, one code gen and one lowering, to cover the case where a base class is at a non-zero offset. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 22 ++++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 83 +++++++++---------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 36 +++++++- clang/test/CIR/CodeGen/derived-to-base.cpp | 33 +++++++- clang/test/CIR/CodeGen/multi-vtable.cpp | 8 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 2 +- clang/test/CIR/Lowering/derived-to-base.cpp | 28 +++++++ 8 files changed, 159 insertions(+), 61 deletions(-) create mode 100644 clang/test/CIR/Lowering/derived-to-base.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1057e0308aee..4b3a5ec6862e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2960,23 +2960,37 @@ def BaseClassAddrOp : CIR_Op<"base_class_addr"> { let summary = "Get the base class address for a class/struct"; let description = [{ The `cir.base_class_addr` operaration gets the address of a particular - base class given a derived class pointer. + non-virtual base class given a derived class pointer. The offset in bytes + of the base class must be passed in, since it is easier for the front end + to calculate that than the MLIR passes. The operation contains a flag for + whether or not the operand may be nullptr. That depends on the context and + cannot be known by the operation, and that information affects how the + operation is lowered. Example: + ```c++ + struct Base { }; + struct Derived : Base { }; + Derived d; + Base& b = d; + ``` + will generate ```mlir - TBD + %3 = cir.base_class_addr (%1 : !cir.ptr nonnull) [0] -> !cir.ptr ``` }]; let arguments = (ins - Arg:$derived_addr); + Arg:$derived_addr, + IndexAttr:$offset, UnitAttr:$assume_not_null); let results = (outs Res:$base_addr); let assemblyFormat = [{ `(` $derived_addr `:` qualified(type($derived_addr)) - `)` `->` qualified(type($base_addr)) attr-dict + (`nonnull` $assume_not_null^)? + `)` `[` $offset `]` `->` qualified(type($base_addr)) attr-dict }]; // FIXME: add verifier. diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 9dab3d640665..c82b1c3a710c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -684,14 +684,14 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } cir::Address createBaseClassAddr(mlir::Location loc, cir::Address addr, - mlir::Type destType) { + mlir::Type destType, unsigned offset, + bool assumeNotNull) { if (destType == addr.getElementType()) return addr; auto ptrTy = getPointerTo(destType); - auto baseAddr = - create(loc, ptrTy, addr.getPointer()); - + auto baseAddr = create( + loc, ptrTy, addr.getPointer(), mlir::APInt(64, offset), assumeNotNull); return Address(baseAddr, ptrTy, addr.getAlignment()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 6b2be0b7551c..121f27bba22d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -530,17 +530,9 @@ Address CIRGenFunction::getAddressOfDirectBaseInCompleteClass( else Offset = Layout.getBaseClassOffset(Base); - // Shift and cast down to the base type. - // TODO: for complete types, this should be possible with a GEP. - Address V = This; - if (!Offset.isZero()) { - mlir::Value OffsetVal = builder.getSInt32(Offset.getQuantity(), loc); - mlir::Value VBaseThisPtr = builder.create( - loc, This.getPointer().getType(), This.getPointer(), OffsetVal); - V = Address(VBaseThisPtr, CXXABIThisAlignment); - } - V = builder.createElementBitCast(loc, V, ConvertType(Base)); - return V; + return builder.createBaseClassAddr(loc, This, ConvertType(Base), + Offset.getQuantity(), + /*assume_not_null=*/true); } static void buildBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, @@ -680,10 +672,17 @@ static Address ApplyNonVirtualAndVirtualOffset( baseOffset = virtualOffset; } - // Apply the base offset. + // Apply the base offset. cir.ptr_stride adjusts by a number of elements, + // not bytes. So the pointer must be cast to a byte pointer and back. + mlir::Value ptr = addr.getPointer(); - ptr = CGF.getBuilder().create(loc, ptr.getType(), ptr, - baseOffset); + mlir::Type charPtrType = CGF.CGM.UInt8PtrTy; + mlir::Value charPtr = CGF.getBuilder().createCast( + mlir::cir::CastKind::bitcast, ptr, charPtrType); + mlir::Value adjusted = CGF.getBuilder().create( + loc, charPtrType, charPtr, baseOffset); + ptr = CGF.getBuilder().createCast(mlir::cir::CastKind::bitcast, adjusted, + ptr.getType()); // If we have a virtual component, the alignment of the result will // be relative only to the known alignment of that vbase. @@ -1481,7 +1480,7 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, // *start* with a step down to the correct virtual base subobject, // and hence will not require any further steps. if ((*Start)->isVirtual()) { - llvm_unreachable("NYI"); + llvm_unreachable("NYI: Cast to virtual base class"); } // Compute the static offset of the ultimate destination within its @@ -1494,55 +1493,51 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, // For now, that's limited to when the derived type is final. // TODO: "devirtualize" this for accesses to known-complete objects. if (VBase && Derived->hasAttr()) { - llvm_unreachable("NYI"); + const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); + CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); + NonVirtualOffset += vBaseOffset; + VBase = nullptr; // we no longer have a virtual step } // Get the base pointer type. auto BaseValueTy = convertType((PathEnd[-1])->getType()); assert(!MissingFeatures::addressSpace()); - // auto BasePtrTy = builder.getPointerTo(BaseValueTy); - // QualType DerivedTy = getContext().getRecordType(Derived); - // CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived); - // If the static offset is zero and we don't have a virtual step, - // just do a bitcast; null checks are unnecessary. - if (NonVirtualOffset.isZero() && !VBase) { + // If there is no virtual base, use cir.base_class_addr. It takes care of + // the adjustment and the null pointer check. + if (!VBase) { if (sanitizePerformTypeCheck()) { - llvm_unreachable("NYI"); + llvm_unreachable("NYI: sanitizePerformTypeCheck"); } - return builder.createBaseClassAddr(getLoc(Loc), Value, BaseValueTy); + return builder.createBaseClassAddr(getLoc(Loc), Value, BaseValueTy, + NonVirtualOffset.getQuantity(), + /*assumeNotNull=*/not NullCheckValue); } - // Skip over the offset (and the vtable load) if we're supposed to - // null-check the pointer. - if (NullCheckValue) { - llvm_unreachable("NYI"); - } - - if (sanitizePerformTypeCheck()) { - llvm_unreachable("NYI"); - } + // Conversion to a virtual base. cir.base_class_addr can't handle this. + // Generate the code to look up the address in the virtual table. - // Compute the virtual offset. - mlir::Value VirtualOffset{}; - if (VBase) { - llvm_unreachable("NYI"); - } + llvm_unreachable("NYI: Cast to virtual base class"); - // Apply both offsets. + // This is just an outline of what the code might look like, since I can't + // actually test it. +#if 0 + mlir::Value VirtualOffset = ...; // This is a dynamic expression. Creating + // it requires calling an ABI-specific + // function. Value = ApplyNonVirtualAndVirtualOffset(getLoc(Loc), *this, Value, NonVirtualOffset, VirtualOffset, Derived, VBase); - // Cast to the destination type. Value = builder.createElementBitCast(Value.getPointer().getLoc(), Value, BaseValueTy); - - // Build a phi if we needed a null check. + if (sanitizePerformTypeCheck()) { + // Do something here + } if (NullCheckValue) { - llvm_unreachable("NYI"); + // Convert to 'derivedPtr == nullptr ? nullptr : basePtr' } +#endif - llvm_unreachable("NYI"); return Value; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index a1b0a7008e6c..f5129adf49ce 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -622,6 +622,39 @@ class CIRPtrStrideOpLowering } }; +class CIRBaseClassAddrOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern< + mlir::cir::BaseClassAddrOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::BaseClassAddrOp baseClassOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + const auto resultType = + getTypeConverter()->convertType(baseClassOp.getType()); + mlir::Value derivedAddr = adaptor.getDerivedAddr(); + llvm::SmallVector offset = { + adaptor.getOffset().getZExtValue()}; + mlir::Type byteType = mlir::IntegerType::get(resultType.getContext(), 8, + mlir::IntegerType::Signless); + if (baseClassOp.getAssumeNotNull()) { + rewriter.replaceOpWithNewOp( + baseClassOp, resultType, byteType, derivedAddr, offset); + } else { + auto loc = baseClassOp.getLoc(); + mlir::Value isNull = rewriter.create( + loc, mlir::LLVM::ICmpPredicate::eq, derivedAddr, + rewriter.create(loc, derivedAddr.getType())); + mlir::Value adjusted = rewriter.create( + loc, resultType, byteType, derivedAddr, offset); + rewriter.replaceOpWithNewOp(baseClassOp, isNull, + derivedAddr, adjusted); + } + return mlir::success(); + } +}; + class CIRBrCondOpLowering : public mlir::OpConversionPattern { public: @@ -3822,7 +3855,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, - CIRAllocExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering + CIRAllocExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, + CIRBaseClassAddrOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 1f2ae7411ab3..2fcdbd21583d 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -84,7 +84,7 @@ void C3::Layer::Initialize() { // CHECK: cir.func @_ZN2C35Layer10InitializeEv // CHECK: cir.scope { -// CHECK: %2 = cir.base_class_addr(%1 : !cir.ptr) -> !cir.ptr +// CHECK: %2 = cir.base_class_addr(%1 : !cir.ptr nonnull) [0] -> !cir.ptr // CHECK: %3 = cir.get_member %2[1] {name = "m_C1"} : !cir.ptr -> !cir.ptr> // CHECK: %4 = cir.load %3 : !cir.ptr>, !cir.ptr // CHECK: %5 = cir.const #cir.ptr : !cir.ptr @@ -99,7 +99,7 @@ enumy C3::Initialize() { // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK: %2 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK: %3 = cir.base_class_addr(%2 : !cir.ptr) -> !cir.ptr +// CHECK: %3 = cir.base_class_addr(%2 : !cir.ptr nonnull) [0] -> !cir.ptr // CHECK: %4 = cir.call @_ZN2C210InitializeEv(%3) : (!cir.ptr) -> !s32i void vcall(C1 &c1) { @@ -144,7 +144,7 @@ class B : public A { // CHECK: %1 = cir.load deref %0 : !cir.ptr>, !cir.ptr // CHECK: cir.scope { // CHECK: %2 = cir.alloca !ty_A, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} -// CHECK: %3 = cir.base_class_addr(%1 : !cir.ptr) -> !cir.ptr +// CHECK: %3 = cir.base_class_addr(%1 : !cir.ptr nonnull) [0] -> !cir.ptr // Call @A::A(A const&) // CHECK: cir.call @_ZN1AC2ERKS_(%2, %3) : (!cir.ptr, !cir.ptr) -> () @@ -171,4 +171,29 @@ int test_ref() { int x = 42; C c(x); return c.ref; -} \ No newline at end of file +} + +// Multiple base classes, to test non-zero offsets +struct Base1 { int a; }; +struct Base2 { int b; }; +struct Derived : Base1, Base2 { int c; }; +void test_multi_base() { + Derived d; + + Base2& bref = d; // no null check needed + // CHECK: %6 = cir.base_class_addr(%0 : !cir.ptr nonnull) [4] -> !cir.ptr + + Base2* bptr = &d; // has null pointer check + // CHECK: %7 = cir.base_class_addr(%0 : !cir.ptr) [4] -> !cir.ptr + + int a = d.a; + // CHECK: %8 = cir.base_class_addr(%0 : !cir.ptr nonnull) [0] -> !cir.ptr + // CHECK: %9 = cir.get_member %8[0] {name = "a"} : !cir.ptr -> !cir.ptr + + int b = d.b; + // CHECK: %11 = cir.base_class_addr(%0 : !cir.ptr nonnull) [4] -> !cir.ptr + // CHECK: %12 = cir.get_member %11[0] {name = "b"} : !cir.ptr -> !cir.ptr + + int c = d.c; + // CHECK: %14 = cir.get_member %0[2] {name = "c"} : !cir.ptr -> !cir.ptr +} diff --git a/clang/test/CIR/CodeGen/multi-vtable.cpp b/clang/test/CIR/CodeGen/multi-vtable.cpp index c6f1f1397d97..da81091890cf 100644 --- a/clang/test/CIR/CodeGen/multi-vtable.cpp +++ b/clang/test/CIR/CodeGen/multi-vtable.cpp @@ -56,8 +56,10 @@ int main() { // CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> // CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV5Child, vtable_index = 1, address_point_index = 2) : !cir.ptr>> // CIR: %{{[0-9]+}} = cir.const #cir.int<8> : !s64i -// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr -// CIR: %11 = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr +// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> // CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> // CIR: cir.return // CIR: } @@ -68,7 +70,7 @@ int main() { // LLVM-DAG: define linkonce_odr void @_ZN5ChildC2Ev(ptr %0) // LLVM-DAG: store ptr getelementptr inbounds ({ [4 x ptr], [3 x ptr] }, ptr @_ZTV5Child, i32 0, i32 0, i32 2), ptr %{{[0-9]+}}, align 8 -// LLVM-DAG: %{{[0-9]+}} = getelementptr %class.Child, ptr %3, i64 8 +// LLVM-DAG: %{{[0-9]+}} = getelementptr i8, ptr %3, i64 8 // LLVM-DAG: store ptr getelementptr inbounds ({ [4 x ptr], [3 x ptr] }, ptr @_ZTV5Child, i32 0, i32 1, i32 2), ptr %{{[0-9]+}}, align 8 // LLVM-DAG: ret void // } diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index e8c542f9a383..9e86b41f1d30 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -43,7 +43,7 @@ class B : public A // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr +// CHECK: %2 = cir.base_class_addr(%1 : !cir.ptr nonnull) [0] -> !cir.ptr // CHECK: cir.call @_ZN1AC2Ev(%2) : (!cir.ptr) -> () // CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : !cir.ptr>> // CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> diff --git a/clang/test/CIR/Lowering/derived-to-base.cpp b/clang/test/CIR/Lowering/derived-to-base.cpp new file mode 100644 index 000000000000..ef02ed0639b0 --- /dev/null +++ b/clang/test/CIR/Lowering/derived-to-base.cpp @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +struct Base1 { int a; }; +struct Base2 { int b; }; +struct Derived : Base1, Base2 { int c; }; +void test_multi_base() { + Derived d; + + Base2& bref = d; // no null check needed + // LLVM: %7 = getelementptr i8, ptr %1, i32 4 + + Base2* bptr = &d; // has null pointer check + // LLVM: %8 = icmp eq ptr %1, null + // LLVM: %9 = getelementptr i8, ptr %1, i32 4 + // LLVM: %10 = select i1 %8, ptr %1, ptr %9 + + int a = d.a; + // LLVM: %11 = getelementptr i8, ptr %1, i32 0 + // LLVM: %12 = getelementptr %struct.Base1, ptr %11, i32 0, i32 0 + + int b = d.b; + // LLVM: %14 = getelementptr i8, ptr %1, i32 4 + // LLVM: %15 = getelementptr %struct.Base2, ptr %14, i32 0, i32 0 + + int c = d.c; + // LLVM: %17 = getelementptr %struct.Derived, ptr %1, i32 0, i32 2 +} From 5273295ea8a5e1c829ec5e840ede7490091d893f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 4 Oct 2024 17:59:47 -0700 Subject: [PATCH 1914/2301] [CIR][NFC] Updates against -Wswitch after rebase --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 3 +++ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 2 ++ 2 files changed, 5 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 7bcfac452878..4da7f2d20b88 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -261,6 +261,9 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, case Stmt::OMPInteropDirectiveClass: case Stmt::OMPDispatchDirectiveClass: case Stmt::OMPGenericLoopDirectiveClass: + case Stmt::OMPReverseDirectiveClass: + case Stmt::OMPInterchangeDirectiveClass: + case Stmt::OMPAssumeDirectiveClass: case Stmt::OMPMaskedDirectiveClass: { llvm::errs() << "CIR codegen for '" << S->getStmtClassName() << "' not implemented\n"; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 0cae7cdbf352..6ab3306800bc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -390,6 +390,8 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case Type::Builtin: { switch (cast(Ty)->getKind()) { + case BuiltinType::HLSLResource: + llvm_unreachable("NYI"); case BuiltinType::SveBoolx2: case BuiltinType::SveBoolx4: case BuiltinType::SveCount: From 4935d4ad8be43b2551f0d13c57bb24259a624020 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 4 Oct 2024 19:39:30 -0700 Subject: [PATCH 1915/2301] [CIR][CIRGen] Exceptions: fix agg store for temporaries Fix https://github.com/llvm/clangir/issues/934 While here move scope op codegen outside the builder, so it's easier to dump blocks and operations while debugging. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 4 ++ clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 15 ++++++-- clang/test/CIR/CodeGen/temporaries.cpp | 49 ++++++++++++++++--------- 3 files changed, 46 insertions(+), 22 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 2deb709e38f4..6287aa8f65ef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -298,6 +298,10 @@ void CIRGenFunction::buildAggregateStore(mlir::Value Val, Address Dest, // struct), which can later be broken down in other CIR levels (or prior // to dialect codegen). (void)DestIsVolatile; + // Stored result for the callers of this function expected to be in the same + // scope as the value, don't make assumptions about current insertion point. + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointAfter(Val.getDefiningOp()); builder.createStore(*currSrcLoc, Val, Dest); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 4c321f18779f..aae0189a10f5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -896,13 +896,20 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { auto &builder = CGF.getBuilder(); auto scopeLoc = CGF.getLoc(E->getSourceRange()); - [[maybe_unused]] auto scope = builder.create( + mlir::OpBuilder::InsertPoint scopeBegin; + builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScope lexScope{CGF, loc, - builder.getInsertionBlock()}; - Visit(E->getSubExpr()); + scopeBegin = b.saveInsertionPoint(); }); + + { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.restoreInsertionPoint(scopeBegin); + CIRGenFunction::LexicalScope lexScope{CGF, scopeLoc, + builder.getInsertionBlock()}; + Visit(E->getSubExpr()); + } } void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { diff --git a/clang/test/CIR/CodeGen/temporaries.cpp b/clang/test/CIR/CodeGen/temporaries.cpp index 23e0adb70b2d..885ba0db8f0a 100644 --- a/clang/test/CIR/CodeGen/temporaries.cpp +++ b/clang/test/CIR/CodeGen/temporaries.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fcxx-exceptions -fexceptions -emit-cir %s -o %t.eh.cir +// RUN: FileCheck --input-file=%t.eh.cir %s -check-prefix=CIR_EH // RUN: cir-translate %t.cir -cir-to-llvmir -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM @@ -12,27 +14,38 @@ void f() { !E(); } -// CHECK: cir.func private @_ZN1EC1Ev(!cir.ptr) extra(#fn_attr) -// CHECK-NEXT: cir.func private @_ZN1EntEv(!cir.ptr) -> !ty_E -// CHECK-NEXT: cir.func private @_ZN1ED1Ev(!cir.ptr) extra(#fn_attr) -// CHECK-NEXT: cir.func @_Z1fv() extra(#fn_attr1) { -// CHECK-NEXT: cir.scope { -// CHECK-NEXT: %[[ONE:[0-9]+]] = cir.alloca !ty_E, !cir.ptr, ["agg.tmp.ensured"] {alignment = 1 : i64} -// CHECK-NEXT: %[[TWO:[0-9]+]] = cir.alloca !ty_E, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} -// CHECK-NEXT: cir.call @_ZN1EC1Ev(%1) : (!cir.ptr) -> () extra(#fn_attr) -// CHECK-NEXT: %[[THREE:[0-9]+]] = cir.call @_ZN1EntEv(%[[TWO]]) : (!cir.ptr) -> !ty_E -// CHECK-NEXT: cir.store %[[THREE]], %[[ONE]] : !ty_E, !cir.ptr -// CHECK-NEXT: cir.call @_ZN1ED1Ev(%[[ONE]]) : (!cir.ptr) -> () extra(#fn_attr) -// CHECK-NEXT: cir.call @_ZN1ED1Ev(%[[TWO]]) : (!cir.ptr) -> () extra(#fn_attr) -// CHECK-NEXT: } -// CHECK-NEXT: cir.return -// CHECK-NEXT: } +// CIR: cir.func private @_ZN1EC1Ev(!cir.ptr) extra(#fn_attr) +// CIR-NEXT: cir.func private @_ZN1EntEv(!cir.ptr) -> !ty_E +// CIR-NEXT: cir.func private @_ZN1ED1Ev(!cir.ptr) extra(#fn_attr) +// CIR-NEXT: cir.func @_Z1fv() extra(#fn_attr1) { +// CIR-NEXT: cir.scope { +// CIR-NEXT: %[[ONE:[0-9]+]] = cir.alloca !ty_E, !cir.ptr, ["agg.tmp.ensured"] {alignment = 1 : i64} +// CIR-NEXT: %[[TWO:[0-9]+]] = cir.alloca !ty_E, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} +// CIR-NEXT: cir.call @_ZN1EC1Ev(%1) : (!cir.ptr) -> () extra(#fn_attr) +// CIR-NEXT: %[[THREE:[0-9]+]] = cir.call @_ZN1EntEv(%[[TWO]]) : (!cir.ptr) -> !ty_E +// CIR-NEXT: cir.store %[[THREE]], %[[ONE]] : !ty_E, !cir.ptr +// CIR-NEXT: cir.call @_ZN1ED1Ev(%[[ONE]]) : (!cir.ptr) -> () extra(#fn_attr) +// CIR-NEXT: cir.call @_ZN1ED1Ev(%[[TWO]]) : (!cir.ptr) -> () extra(#fn_attr) +// CIR-NEXT: } +// CIR-NEXT: cir.return +// CIR-NEXT: } + +// CIR_EH-LABEL: @_Z1fv +// CIR_EH: %[[AGG_TMP:.*]] = cir.alloca {{.*}} ["agg.tmp.ensured"] +// CIR_EH: cir.try synthetic cleanup { +// CIR_EH: %[[RVAL:.*]] = cir.call exception {{.*}} cleanup { +// CIR_EH: cir.call @_ZN1ED1Ev +// CIR_EH: cir.yield +// CIR_EH: } +// CIR_EH: cir.store %[[RVAL]], %[[AGG_TMP]] +// CIR_EH: cir.yield +// CIR_EH: } catch [#cir.unwind { const unsigned int n = 1234; const int &r = (const int&)n; -// CHECK: cir.global "private" constant internal @_ZGR1r_ = #cir.int<1234> : !s32i -// CHECK-NEXT: cir.global constant external @r = #cir.global_view<@_ZGR1r_> : !cir.ptr {alignment = 8 : i64} +// CIR: cir.global "private" constant internal @_ZGR1r_ = #cir.int<1234> : !s32i +// CIR-NEXT: cir.global constant external @r = #cir.global_view<@_ZGR1r_> : !cir.ptr {alignment = 8 : i64} // LLVM: @_ZGR1r_ = internal constant i32 1234, align 4 // LLVM-NEXT: @r = constant ptr @_ZGR1r_, align 8 From 8b448a49c459d30091349b08b8ec6d40c61c14c7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 7 Oct 2024 15:21:47 -0700 Subject: [PATCH 1916/2301] [CIR][CIRGen] Lower cir.throw in absence of dtors --- clang/include/clang/CIR/MissingFeatures.h | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 17 +++++----- clang/test/CIR/CodeGen/eh.cpp | 31 +++++++++++++++++++ 4 files changed, 43 insertions(+), 9 deletions(-) create mode 100644 clang/test/CIR/CodeGen/eh.cpp diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 5f92e4e60cba..1be7d5f63183 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -103,7 +103,7 @@ struct MissingFeatures { static bool shouldCreateMemCpyFromGlobal() { return false; } static bool shouldReverseUnaryCondOnBoolExpr() { return false; } static bool fieldMemcpyizerBuildMemcpy() { return false; } - static bool isTrivialAndisDefaultConstructor() { return false; } + static bool isTrivialCtorOrDtor() { return false; } static bool isMemcpyEquivalentSpecialMember() { return false; } static bool constructABIArgDirectExtend() { return false; } static bool mayHaveIntegerOverflow() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 428f2b2ccedb..f5f27b3f7dd8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -814,7 +814,7 @@ void CIRGenFunction::buildCXXConstructorCall( // In LLVM: do nothing. // In CIR: emit as a regular call, other later passes should lower the // ctor call into trivial initialization. - assert(!MissingFeatures::isTrivialAndisDefaultConstructor()); + assert(!MissingFeatures::isTrivialCtorOrDtor()); if (isMemcpyEquivalentSpecialMember(D)) { assert(!MissingFeatures::isMemcpyEquivalentSpecialMember()); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 7c81d0b7d0ed..50c30f8f692a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2210,16 +2210,19 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, // LoweringPrepare or some other pass to skip passing the // trivial function. // - // TODO(cir): alternatively, dtor could be ignored here and - // the type used to gather the relevant dtor during - // LoweringPrepare. + // TODO(cir): However, such lowering is still NYI, and for + // the sake of getting cir.throw right, the same OG path is + // follows here. mlir::FlatSymbolRefAttr dtor{}; if (const RecordType *recordTy = clangThrowType->getAs()) { CXXRecordDecl *rec = cast(recordTy->getDecl()); - CXXDestructorDecl *dtorD = rec->getDestructor(); - dtor = mlir::FlatSymbolRefAttr::get( - CGM.getAddrOfCXXStructor(GlobalDecl(dtorD, Dtor_Complete)) - .getSymNameAttr()); + assert(!MissingFeatures::isTrivialCtorOrDtor()); + if (!rec->hasTrivialDestructor()) { + CXXDestructorDecl *dtorD = rec->getDestructor(); + dtor = mlir::FlatSymbolRefAttr::get( + CGM.getAddrOfCXXStructor(GlobalDecl(dtorD, Dtor_Complete)) + .getSymNameAttr()); + } } // FIXME: When adding support for invoking, we should wrap the throw op diff --git a/clang/test/CIR/CodeGen/eh.cpp b/clang/test/CIR/CodeGen/eh.cpp new file mode 100644 index 000000000000..e23ade3dbe72 --- /dev/null +++ b/clang/test/CIR/CodeGen/eh.cpp @@ -0,0 +1,31 @@ +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -fcxx-exceptions -fexceptions -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -fcxx-exceptions -fexceptions -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +struct test1_D { + double d; +} d1; + +void test1() { + throw d1; +} + +// CIR-LABEL: @_Z5test1v +// FIXME: this is overaligned, should be 4. +// CIR: %[[ALLOC:.*]] = cir.alloc.exception 8 -> !cir.ptr +// CIR: %[[G:.*]] = cir.get_global @d1 : !cir.ptr +// CIR: cir.call @_ZN7test1_DC1ERKS_(%[[ALLOC]], %[[G]]) : (!cir.ptr, !cir.ptr) -> () +// CIR: cir.throw %[[ALLOC]] : !cir.ptr, @_ZTI7test1_D +// CIR: cir.unreachable +// CIR: } + +// LLVM-LABEL: @_Z5test1v +// FIXME: this is overaligned, should be 4. +// LLVM: %[[ALLOC:.*]] = call ptr @__cxa_allocate_exception(i64 8) + +// FIXME: this is a simple store once we fix isTrivialCtorOrDtor(). +// LLVM: call void @_ZN7test1_DC1ERKS_(ptr %[[ALLOC]], ptr @d1) +// LLVM: call void @__cxa_throw(ptr %[[ALLOC]], ptr @_ZTI7test1_D, ptr null) +// LLVM: unreachable +// LLVM: } \ No newline at end of file From 3aad2f509f20edc0c2f514833ee4bf270a146bf9 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 7 Oct 2024 15:46:34 -0700 Subject: [PATCH 1917/2301] [CIR][NFC] Update wrong comments from previous commit --- clang/test/CIR/CodeGen/eh.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/clang/test/CIR/CodeGen/eh.cpp b/clang/test/CIR/CodeGen/eh.cpp index e23ade3dbe72..321d1d6254d0 100644 --- a/clang/test/CIR/CodeGen/eh.cpp +++ b/clang/test/CIR/CodeGen/eh.cpp @@ -12,7 +12,6 @@ void test1() { } // CIR-LABEL: @_Z5test1v -// FIXME: this is overaligned, should be 4. // CIR: %[[ALLOC:.*]] = cir.alloc.exception 8 -> !cir.ptr // CIR: %[[G:.*]] = cir.get_global @d1 : !cir.ptr // CIR: cir.call @_ZN7test1_DC1ERKS_(%[[ALLOC]], %[[G]]) : (!cir.ptr, !cir.ptr) -> () @@ -21,10 +20,9 @@ void test1() { // CIR: } // LLVM-LABEL: @_Z5test1v -// FIXME: this is overaligned, should be 4. // LLVM: %[[ALLOC:.*]] = call ptr @__cxa_allocate_exception(i64 8) -// FIXME: this is a simple store once we fix isTrivialCtorOrDtor(). +// FIXME: this is a llvm.memcpy.p0.p0.i64 once we fix isTrivialCtorOrDtor(). // LLVM: call void @_ZN7test1_DC1ERKS_(ptr %[[ALLOC]], ptr @d1) // LLVM: call void @__cxa_throw(ptr %[[ALLOC]], ptr @_ZTI7test1_D, ptr null) // LLVM: unreachable From 2fe681b754c2232b591026d09bbdaff2a2869fc6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 7 Oct 2024 16:19:43 -0700 Subject: [PATCH 1918/2301] [CIR][CIRGen] Exceptions: support free'ing allocated exception resources --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 35 ++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenException.cpp | 7 +++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 25 +++++++++++- clang/test/CIR/CodeGen/eh.cpp | 39 ++++++++++++++++++- 4 files changed, 101 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 4b3a5ec6862e..43d3295096c2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4092,7 +4092,7 @@ def VAArgOp : CIR_Op<"va.arg">, } //===----------------------------------------------------------------------===// -// AllocExceptionOp +// AllocExceptionOp & FreeExceptionOp //===----------------------------------------------------------------------===// def AllocExceptionOp : CIR_Op<"alloc.exception"> { @@ -4129,6 +4129,39 @@ def AllocExceptionOp : CIR_Op<"alloc.exception"> { let hasVerifier = 0; } +def FreeExceptionOp : CIR_Op<"free.exception"> { + let summary = "Frees an exception according to Itanium ABI"; + let description = [{ + Implements a slightly higher level version of: + `void __cxa_free_exception(void *thrown_exception);` + + Example: + + ```mlir + %0 = cir.alloc.exception 16 -> !cir.ptr + %1 = cir.get_global @d2 : !cir.ptr + cir.try synthetic cleanup { + cir.call exception @_ZN7test2_DC1ERKS_(%0, %1) : (!cir.ptr, !cir.ptr) -> () cleanup { + %2 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr + cir.free.exception %2 + cir.yield + } + ... + } + ``` + }]; + + let arguments = (ins VoidPtr:$ptr); + let results = (outs); + + let assemblyFormat = [{ + $ptr attr-dict + }]; + + // Constraints already described. + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // ThrowOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 903011159e47..9d04947234e3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -214,7 +214,12 @@ struct FreeException final : EHScopeStack::Cleanup { mlir::Value exn; FreeException(mlir::Value exn) : exn(exn) {} void Emit(CIRGenFunction &CGF, Flags flags) override { - llvm_unreachable("call to cxa_free or equivalent op NYI"); + // OG LLVM codegen emits a no unwind call, CIR emits an operation. + cir::CIRGenBuilderTy &builder = CGF.getBuilder(); + mlir::Location loc = + CGF.currSrcLoc ? *CGF.currSrcLoc : builder.getUnknownLoc(); + builder.create( + loc, builder.createBitcast(exn, builder.getVoidPtrTy())); } }; } // end anonymous namespace diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f5129adf49ce..934457b16bdc 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3789,6 +3789,27 @@ class CIRAllocExceptionOpLowering } }; +class CIRFreeExceptionOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::FreeExceptionOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // Get or create `declare void @__cxa_free_exception(ptr)` + StringRef fnName = "__cxa_free_exception"; + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + auto voidTy = mlir::LLVM::LLVMVoidType::get(rewriter.getContext()); + auto fnTy = mlir::LLVM::LLVMFunctionType::get(voidTy, {llvmPtrTy}, + /*isVarArg=*/false); + getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); + rewriter.replaceOpWithNewOp( + op, mlir::TypeRange{}, fnName, mlir::ValueRange{adaptor.getPtr()}); + return mlir::success(); + } +}; + class CIRThrowOpLowering : public mlir::OpConversionPattern { public: @@ -3855,8 +3876,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, - CIRAllocExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, - CIRBaseClassAddrOpLowering + CIRAllocExceptionOpLowering, CIRFreeExceptionOpLowering, + CIRThrowOpLowering, CIRIntrinsicCallLowering, CIRBaseClassAddrOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/test/CIR/CodeGen/eh.cpp b/clang/test/CIR/CodeGen/eh.cpp index 321d1d6254d0..75f7c63471aa 100644 --- a/clang/test/CIR/CodeGen/eh.cpp +++ b/clang/test/CIR/CodeGen/eh.cpp @@ -26,4 +26,41 @@ void test1() { // LLVM: call void @_ZN7test1_DC1ERKS_(ptr %[[ALLOC]], ptr @d1) // LLVM: call void @__cxa_throw(ptr %[[ALLOC]], ptr @_ZTI7test1_D, ptr null) // LLVM: unreachable -// LLVM: } \ No newline at end of file +// LLVM: } + +struct test2_D { + test2_D(const test2_D&o); + test2_D(); + virtual void bar() { } + int i; int j; +} d2; + +void test2() { + throw d2; +} + +// CIR-LABEL: @_Z5test2v +// CIR: %[[ALLOC:.*]] = cir.alloc.exception 16 -> !cir.ptr +// CIR: %[[G:.*]] = cir.get_global @d2 : !cir.ptr +// CIR: cir.try synthetic cleanup { +// CIR: cir.call exception @_ZN7test2_DC1ERKS_(%[[ALLOC]], %[[G]]) : (!cir.ptr, !cir.ptr) -> () cleanup { +// CIR: %[[VOID_PTR:.*]] = cir.cast(bitcast, %[[ALLOC]] : !cir.ptr), !cir.ptr +// CIR: cir.free.exception %[[VOID_PTR]] +// CIR: cir.yield +// CIR: } +// CIR: cir.yield +// CIR: } catch [#cir.unwind { +// CIR: cir.resume +// CIR: }] +// CIR: cir.throw %[[ALLOC]] : !cir.ptr, @_ZTI7test2_D +// CIR: cir.unreachable + +// LLVM-LABEL: @_Z5test2v + +// LLVM: %[[ALLOC:.*]] = call ptr @__cxa_allocate_exception(i64 16) + +// LLVM: landingpad { ptr, i32 } +// LLVM: cleanup +// LLVM: extractvalue { ptr, i32 } +// LLVM: extractvalue { ptr, i32 } +// LLVM: call void @__cxa_free_exception(ptr %[[ALLOC]]) \ No newline at end of file From 7937880d57d47c9461dc29f9de5cc99490456a72 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Mon, 7 Oct 2024 19:37:10 -0700 Subject: [PATCH 1919/2301] [Lowering][DirectToLLVM] Fix calling variadic functions (#945) After 5da431008222e2653f618f3a112af58a94417251, the LLVM dialect requires the variadic callee type to be present for variadic calls. The op builders take care of this automatically if you pass the function type, so change our lowering logic to do so. Add tests for this as well as a missing test for indirect function call lowering. Fixes https://github.com/llvm/clangir/issues/913 Fixes https://github.com/llvm/clangir/issues/933 --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 42 ++++++------- clang/test/CIR/Lowering/call.cir | 63 +++++++++++++++++++ clang/test/CIR/Lowering/hello.cir | 3 +- 3 files changed, 82 insertions(+), 26 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 934457b16bdc..b04d3f6d47fc 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -937,17 +937,14 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, auto cconv = convertCallingConv(callIf.getCallingConv()); + mlir::LLVM::LLVMFunctionType llvmFnTy; if (calleeAttr) { // direct call - if (landingPadBlock) { - auto newOp = rewriter.replaceOpWithNewOp( - op, llvmResults, calleeAttr, callOperands, continueBlock, - mlir::ValueRange{}, landingPadBlock, mlir::ValueRange{}); - newOp.setCConv(cconv); - } else { - auto newOp = rewriter.replaceOpWithNewOp( - op, llvmResults, calleeAttr, callOperands); - newOp.setCConv(cconv); - } + auto fn = + mlir::SymbolTable::lookupNearestSymbolFrom( + op, calleeAttr); + assert(fn && "Did not find function for call"); + llvmFnTy = cast( + converter->convertType(fn.getFunctionType())); } else { // indirect call assert(op->getOperands().size() && "operands list must no be empty for the indirect call"); @@ -956,21 +953,18 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, auto ptyp = dyn_cast(typ); auto ftyp = dyn_cast(ptyp.getPointee()); assert(ftyp && "expected a pointer to a function as the first operand"); + llvmFnTy = cast(converter->convertType(ftyp)); + } - if (landingPadBlock) { - auto llvmFnTy = - dyn_cast(converter->convertType(ftyp)); - auto newOp = rewriter.replaceOpWithNewOp( - op, llvmFnTy, mlir::FlatSymbolRefAttr{}, callOperands, continueBlock, - mlir::ValueRange{}, landingPadBlock, mlir::ValueRange{}); - newOp.setCConv(cconv); - } else { - auto newOp = rewriter.replaceOpWithNewOp( - op, - dyn_cast(converter->convertType(ftyp)), - callOperands); - newOp.setCConv(cconv); - } + if (landingPadBlock) { + auto newOp = rewriter.replaceOpWithNewOp( + op, llvmFnTy, calleeAttr, callOperands, continueBlock, + mlir::ValueRange{}, landingPadBlock, mlir::ValueRange{}); + newOp.setCConv(cconv); + } else { + auto newOp = rewriter.replaceOpWithNewOp( + op, llvmFnTy, calleeAttr, callOperands); + newOp.setCConv(cconv); } return mlir::success(); } diff --git a/clang/test/CIR/Lowering/call.cir b/clang/test/CIR/Lowering/call.cir index 2c40bb88e523..eab7fb598830 100644 --- a/clang/test/CIR/Lowering/call.cir +++ b/clang/test/CIR/Lowering/call.cir @@ -1,6 +1,7 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR // RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +!s32i = !cir.int module { cir.func @a() { cir.return @@ -36,4 +37,66 @@ module { cir.return %0 : !cir.ptr } + // check indirect call lowering + cir.global "private" external @fp : !cir.ptr> + cir.func @callIndirect(%arg: !s32i) -> !s32i { + %fpp = cir.get_global @fp : !cir.ptr>> + %fp = cir.load %fpp : !cir.ptr>>, !cir.ptr> + %retval = cir.call %fp(%arg) : (!cir.ptr>, !s32i) -> !s32i + cir.return %retval : !s32i + } + + // MLIR: llvm.mlir.global external @fp() {addr_space = 0 : i32} : !llvm.ptr + // MLIR: llvm.func @callIndirect(%arg0: i32) -> i32 + // MLIR-NEXT: %0 = llvm.mlir.addressof @fp : !llvm.ptr + // MLIR-NEXT: %1 = llvm.load %0 {{.*}} : !llvm.ptr -> !llvm.ptr + // MLIR-NEXT: %2 = llvm.call %1(%arg0) : !llvm.ptr, (i32) -> i32 + // MLIR-NEXT: llvm.return %2 : i32 + + // LLVM: define i32 @callIndirect(i32 %0) + // LLVM-NEXT: %2 = load ptr, ptr @fp + // LLVM-NEXT: %3 = call i32 %2(i32 %0) + // LLVM-NEXT: ret i32 %3 + + // check direct vararg call lowering + cir.func private @varargCallee(!s32i, ...) -> !s32i + cir.func @varargCaller() -> !s32i { + %zero = cir.const #cir.int<0> : !s32i + %retval = cir.call @varargCallee(%zero, %zero) : (!s32i, !s32i) -> !s32i + cir.return %retval : !s32i + } + + // MLIR: llvm.func @varargCallee(i32, ...) -> i32 + // MLIR: llvm.func @varargCaller() -> i32 + // MLIR-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 + // MLIR-NEXT: %1 = llvm.call @varargCallee(%0, %0) vararg(!llvm.func) : (i32, i32) -> i32 + // MLIR-NEXT: llvm.return %1 : i32 + + // LLVM: define i32 @varargCaller() + // LLVM-NEXT: %1 = call i32 (i32, ...) @varargCallee(i32 0, i32 0) + // LLVM-NEXT: ret i32 %1 + + // check indirect vararg call lowering + cir.global "private" external @varargfp : !cir.ptr> + cir.func @varargCallIndirect() -> !s32i { + %fpp = cir.get_global @varargfp : !cir.ptr>> + %fp = cir.load %fpp : !cir.ptr>>, !cir.ptr> + %zero = cir.const #cir.int<0> : !s32i + %retval = cir.call %fp(%zero, %zero) : (!cir.ptr>, !s32i, !s32i) -> !s32i + cir.return %retval : !s32i + } + + // MLIR: llvm.mlir.global external @varargfp() {addr_space = 0 : i32} : !llvm.ptr + // MLIR: llvm.func @varargCallIndirect() -> i32 + // MLIR-NEXT: %0 = llvm.mlir.addressof @varargfp : !llvm.ptr + // MLIR-NEXT: %1 = llvm.load %0 {{.*}} : !llvm.ptr -> !llvm.ptr + // MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 + // MLIR-NEXT: %3 = llvm.call %1(%2, %2) vararg(!llvm.func) : !llvm.ptr, (i32, i32) -> i32 + // MLIR-NEXT: llvm.return %3 : i32 + + // LLVM: define i32 @varargCallIndirect() + // LLVM-NEXT: %1 = load ptr, ptr @varargfp + // LLVM-NEXT: %2 = call i32 (i32, ...) %1(i32 0, i32 0) + // LLVM-NEXT: ret i32 %2 + } // end module diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index 94b546809573..195cbf28fbde 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -1,6 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -// XFAIL: * !s32i = !cir.int !s8i = !cir.int @@ -28,7 +27,7 @@ module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.sign // CHECK: %1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // CHECK: %2 = llvm.mlir.addressof @".str" : !llvm.ptr // CHECK: %3 = llvm.getelementptr %2[0] : (!llvm.ptr) -> !llvm.ptr, i8 -// CHECK: %4 = llvm.call @printf(%3) : (!llvm.ptr) -> i32 +// CHECK: %4 = llvm.call @printf(%3) vararg(!llvm.func) : (!llvm.ptr) -> i32 // CHECK: %5 = llvm.mlir.constant(0 : i32) : i32 // CHECK: llvm.store %5, %1 {{.*}} : i32, !llvm.ptr // CHECK: %6 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32 From 4e3ee2be01843072d070988b766f9e0e4cd7da3c Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Wed, 9 Oct 2024 02:08:22 +0800 Subject: [PATCH 1920/2301] [CIR] [CodeGen] Remove NYI in buildPointerWithAlignment (#949) See the test for the reproducer. It would crash due the NYI. See https://github.com/llvm/llvm-project/blob/327124ece7d59de56ca0f9faa2cd82af68c011b9/clang/lib/CodeGen/CGExpr.cpp#L1295-L1373, I found we've implemented all the cases in CGExpr.cpp. IIUC, I think we can remove the NYI. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 15 +++++---------- .../test/CIR/CodeGen/function-to-pointer-decay.c | 13 +++++++++++++ 2 files changed, 18 insertions(+), 10 deletions(-) create mode 100644 clang/test/CIR/CodeGen/function-to-pointer-decay.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 6e1a2ba4d4e1..2e9a7c1ed35d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -128,10 +128,6 @@ static Address buildPointerWithAlignment(const Expr *expr, cgf.CGM.buildExplicitCastExprType(ECE, &cgf); switch (CE->getCastKind()) { - default: { - llvm::errs() << CE->getCastKindName() << "\n"; - assert(0 && "not implemented"); - } // Non-converting casts (but not C's implicit conversion from void*). case CK_BitCast: case CK_NoOp: @@ -183,12 +179,6 @@ static Address buildPointerWithAlignment(const Expr *expr, } break; - // Nothing to do here... - case CK_LValueToRValue: - case CK_NullToPointer: - case CK_IntegralToPointer: - break; - // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo. case CK_ArrayToPointerDecay: return cgf.buildArrayToPointerDecay(CE->getSubExpr()); @@ -205,6 +195,11 @@ static Address buildPointerWithAlignment(const Expr *expr, Addr, Derived, CE->path_begin(), CE->path_end(), cgf.shouldNullCheckClassCastValue(CE), CE->getExprLoc()); } + + // TODO: Is there any reason to treat base-to-derived conversions + // specially? + default: + break; } } diff --git a/clang/test/CIR/CodeGen/function-to-pointer-decay.c b/clang/test/CIR/CodeGen/function-to-pointer-decay.c new file mode 100644 index 000000000000..c8541ceb3d09 --- /dev/null +++ b/clang/test/CIR/CodeGen/function-to-pointer-decay.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +void f(void); + +void test_call_lvalue_cast() { + (*(void (*)(int))f)(42); +} + +// CHECK: cir.func {{.*}}@test_call_lvalue_cast() +// CHECK: [[F:%.+]] = cir.get_global @f +// CHECK: [[CASTED:%.+]] = cir.cast(bitcast, [[F]] +// CHECK: [[CONST:%.+]] = cir.const #cir.int<42> +// CHECK: cir.call [[CASTED]]([[CONST]]) From 15bf3107841d3e37b017b6af26e0ff8a8d88e670 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Thu, 10 Oct 2024 02:16:55 +0800 Subject: [PATCH 1921/2301] [CIR][Lowering] Introduce HoistAllocasPass (#887) Close https://github.com/llvm/clangir/issues/883. See the above issue for details --- clang/include/clang/CIR/Dialect/Passes.h | 1 + clang/include/clang/CIR/Dialect/Passes.td | 10 ++ clang/lib/CIR/CodeGen/CIRPasses.cpp | 1 + .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + .../CIR/Dialect/Transforms/HoistAllocas.cpp | 65 ++++++++++ clang/test/CIR/CodeGen/AArch64/neon-ldst.c | 120 +++++------------- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 32 +++++ clang/test/CIR/CodeGen/AArch64/neon.c | 12 +- .../test/CIR/CodeGen/aarch64-neon-vdup-lane.c | 12 ++ clang/test/CIR/CodeGen/builtin-bit-cast.cpp | 4 +- clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp | 4 +- .../CIR/CodeGen/initlist-ptr-unsigned.cpp | 4 +- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 6 +- clang/test/CIR/Lowering/OpenMP/parallel.cir | 2 +- clang/test/CIR/Lowering/dot.cir | 16 +-- clang/test/CIR/Lowering/goto-interscope.c | 32 +++++ clang/test/CIR/Lowering/scope.cir | 20 +-- 17 files changed, 214 insertions(+), 128 deletions(-) create mode 100644 clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp create mode 100644 clang/test/CIR/Lowering/goto-interscope.c diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index 67e9da2246b6..5f41da6e411c 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -37,6 +37,7 @@ std::unique_ptr createIdiomRecognizerPass(clang::ASTContext *astCtx); std::unique_ptr createLibOptPass(); std::unique_ptr createLibOptPass(clang::ASTContext *astCtx); std::unique_ptr createFlattenCFGPass(); +std::unique_ptr createHoistAllocasPass(); std::unique_ptr createGotoSolverPass(); /// Create a pass to lower ABI-independent function definitions/calls. diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index d72bf0bfd420..d1383fb48109 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -107,6 +107,16 @@ def SCFPrepare : Pass<"cir-mlir-scf-prepare"> { let dependentDialects = ["cir::CIRDialect"]; } +def HoistAllocas : Pass<"cir-hoist-allocas"> { + let summary = "Hoist allocas to the entry of the function"; + let description = [{ + This pass hoist all non-dynamic allocas to the entry of the function. + This is helpful for later code generation. + }]; + let constructor = "mlir::createHoistAllocasPass()"; + let dependentDialects = ["cir::CIRDialect"]; +} + def FlattenCFG : Pass<"cir-flatten-cfg"> { let summary = "Produces flatten cfg"; let description = [{ diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 7940a3f03066..a84acc0d6322 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -102,6 +102,7 @@ mlir::LogicalResult runCIRToCIRPasses( namespace mlir { void populateCIRPreLoweringPasses(OpPassManager &pm) { + pm.addPass(createHoistAllocasPass()); pm.addPass(createFlattenCFGPass()); pm.addPass(createGotoSolverPass()); } diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index d675f17042b6..76ac0cbf1c8d 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -13,6 +13,7 @@ add_clang_library(MLIRCIRTransforms GotoSolver.cpp SCFPrepare.cpp CallConvLowering.cpp + HoistAllocas.cpp DEPENDS MLIRCIRPassIncGen diff --git a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp new file mode 100644 index 000000000000..bafdca89e481 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp @@ -0,0 +1,65 @@ +//====- HoistAllocas.cpp --------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/DialectConversion.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Passes.h" + +#include "llvm/Support/TimeProfiler.h" + +using namespace mlir; +using namespace mlir::cir; + +namespace { + +struct HoistAllocasPass : public HoistAllocasBase { + + HoistAllocasPass() = default; + void runOnOperation() override; +}; + +static void process(mlir::cir::FuncOp func) { + if (func.getRegion().empty()) + return; + + // Hoist all static allocas to the entry block. + mlir::Block &entryBlock = func.getRegion().front(); + llvm::SmallVector allocas; + func.getBody().walk([&](mlir::cir::AllocaOp alloca) { + if (alloca->getBlock() == &entryBlock) + return; + // Don't hoist allocas with dynamic alloca size. + if (alloca.getDynAllocSize()) + return; + allocas.push_back(alloca); + }); + if (allocas.empty()) + return; + + mlir::Operation *insertPoint = &*entryBlock.begin(); + + for (auto alloca : allocas) + alloca->moveBefore(insertPoint); +} + +void HoistAllocasPass::runOnOperation() { + llvm::TimeTraceScope scope("Hoist Allocas"); + SmallVector ops; + getOperation()->walk([&](mlir::cir::FuncOp op) { process(op); }); +} + +} // namespace + +std::unique_ptr mlir::createHoistAllocasPass() { + return std::make_unique(); +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c index 6b6d46cbf03d..10df33358d36 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c @@ -26,10 +26,8 @@ int8x8_t test_vld1_lane_s8(int8_t const * ptr, int8x8_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_s8(ptr{{.*}}[[PTR:%.*]], <8 x i8>{{.*}}[[SRC:%.*]]) -// LLVM: store <8 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <8 x i8>, ptr [[SRC_ADDR]], align 8 // LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR]], align 1 -// LLVM: {{.*}} = insertelement <8 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 7 +// LLVM: {{.*}} = insertelement <8 x i8> [[SRC]], i8 [[INTRN_VAL]], i32 7 // LLVM: ret <8 x i8> {{.*}} int8x16_t test_vld1q_lane_s8(int8_t const * ptr, int8x16_t src) { @@ -43,10 +41,8 @@ int8x16_t test_vld1q_lane_s8(int8_t const * ptr, int8x16_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_s8(ptr{{.*}}[[PTR:%.*]], <16 x i8>{{.*}}[[SRC:%.*]]) -// LLVM: store <16 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[INTRN_VEC:%.*]] = load <16 x i8>, ptr [[SRC_ADDR]], align 16 // LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR]], align 1 -// LLVM: {{.*}} = insertelement <16 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 15 +// LLVM: {{.*}} = insertelement <16 x i8> [[SRC]], i8 [[INTRN_VAL]], i32 15 // LLVM: ret <16 x i8> {{.*}} uint8x16_t test_vld1q_lane_u8(uint8_t const * ptr, uint8x16_t src) { @@ -60,10 +56,8 @@ uint8x16_t test_vld1q_lane_u8(uint8_t const * ptr, uint8x16_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_u8(ptr{{.*}}[[PTR:%.*]], <16 x i8>{{.*}}[[SRC:%.*]]) -// LLVM: store <16 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[INTRN_VEC:%.*]] = load <16 x i8>, ptr [[SRC_ADDR]], align 16 // LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR]], align 1 -// LLVM: {{.*}} = insertelement <16 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 15 +// LLVM: {{.*}} = insertelement <16 x i8> [[SRC]], i8 [[INTRN_VAL]], i32 15 // LLVM: ret <16 x i8> {{.*}} uint8x8_t test_vld1_lane_u8(uint8_t const * ptr, uint8x8_t src) { @@ -77,10 +71,8 @@ uint8x8_t test_vld1_lane_u8(uint8_t const * ptr, uint8x8_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_u8(ptr{{.*}}[[PTR:%.*]], <8 x i8>{{.*}}[[SRC:%.*]]) -// LLVM: store <8 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <8 x i8>, ptr [[SRC_ADDR]], align 8 // LLVM: [[INTRN_VAL:%.*]] = load i8, ptr [[PTR]], align 1 -// LLVM: {{.*}} = insertelement <8 x i8> [[INTRN_VEC]], i8 [[INTRN_VAL]], i32 7 +// LLVM: {{.*}} = insertelement <8 x i8> [[SRC]], i8 [[INTRN_VAL]], i32 7 // LLVM: ret <8 x i8> {{.*}} int16x4_t test_vld1_lane_s16(int16_t const * ptr, int16x4_t src) { @@ -94,9 +86,7 @@ int16x4_t test_vld1_lane_s16(int16_t const * ptr, int16x4_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_s16(ptr{{.*}}[[PTR:%.*]], <4 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store <4 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i16> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i16> [[SRC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <4 x i16> // LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR]], align 2 // LLVM: {{.*}} = insertelement <4 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 3 @@ -113,9 +103,7 @@ uint16x4_t test_vld1_lane_u16(uint16_t const * ptr, uint16x4_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_u16(ptr{{.*}}[[PTR:%.*]], <4 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store <4 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i16> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i16> [[SRC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <4 x i16> // LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR]], align 2 // LLVM: {{.*}} = insertelement <4 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 3 @@ -132,9 +120,7 @@ int16x8_t test_vld1q_lane_s16(int16_t const * ptr, int16x8_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_s16(ptr{{.*}}[[PTR:%.*]], <8 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store <8 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[INTRN_VEC:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <8 x i16> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <8 x i16> [[SRC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <8 x i16> // LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR]], align 2 // LLVM: {{.*}} = insertelement <8 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 7 @@ -151,9 +137,7 @@ uint16x8_t test_vld1q_lane_u16(uint16_t const * ptr, uint16x8_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_u16(ptr{{.*}}[[PTR:%.*]], <8 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store <8 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[INTRN_VEC:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <8 x i16> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <8 x i16> [[SRC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <8 x i16> // LLVM: [[INTRN_VAL:%.*]] = load i16, ptr [[PTR]], align 2 // LLVM: {{.*}} = insertelement <8 x i16> [[INTRN_VEC_CAST1]], i16 [[INTRN_VAL]], i32 7 @@ -170,9 +154,7 @@ int32x2_t test_vld1_lane_s32(int32_t const * ptr, int32x2_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_s32(ptr{{.*}}[[PTR:%.*]], <2 x i32>{{.*}}[[SRC:%.*]]) -// LLVM: store <2 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <2 x i32>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i32> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i32> [[SRC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <2 x i32> // LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR]], align 4 // LLVM: {{.*}} = insertelement <2 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 1 @@ -189,9 +171,7 @@ uint32x2_t test_vld1_lane_u32(uint32_t const * ptr, uint32x2_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_u32(ptr{{.*}}[[PTR:%.*]], <2 x i32>{{.*}}[[SRC:%.*]]) -// LLVM: store <2 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <2 x i32>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i32> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i32> [[SRC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <2 x i32> // LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR]], align 4 // LLVM: {{.*}} = insertelement <2 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 1 @@ -209,9 +189,7 @@ int32x4_t test_vld1q_lane_s32(int32_t const * ptr, int32x4_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_s32(ptr{{.*}}[[PTR:%.*]], <4 x i32>{{.*}}[[SRC:%.*]]) -// LLVM: store <4 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[INTRN_VEC:%.*]] = load <4 x i32>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i32> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i32> [[SRC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <4 x i32> // LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR]], align 4 // LLVM: {{.*}} = insertelement <4 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 3 @@ -229,9 +207,7 @@ uint32x4_t test_vld1q_lane_u32(uint32_t const * ptr, uint32x4_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_u32(ptr{{.*}}[[PTR:%.*]], <4 x i32>{{.*}}[[SRC:%.*]]) -// LLVM: store <4 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[INTRN_VEC:%.*]] = load <4 x i32>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i32> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <4 x i32> [[SRC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <4 x i32> // LLVM: [[INTRN_VAL:%.*]] = load i32, ptr [[PTR]], align 4 // LLVM: {{.*}} = insertelement <4 x i32> [[INTRN_VEC_CAST1]], i32 [[INTRN_VAL]], i32 3 @@ -248,9 +224,7 @@ int64x1_t test_vld1_lane_s64(int64_t const * ptr, int64x1_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_s64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) -// LLVM: store <1 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <1 x i64>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <1 x i64> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <1 x i64> // LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR]], align 8 // LLVM: {{.*}} = insertelement <1 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 0 @@ -267,9 +241,7 @@ uint64x1_t test_vld1_lane_u64(uint64_t const * ptr, uint64x1_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1_lane_u64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) -// LLVM: store <1 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[INTRN_VEC:%.*]] = load <1 x i64>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <1 x i64> [[INTRN_VEC]] to <8 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <8 x i8> [[INTRN_VEC_CAST0]] to <1 x i64> // LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR]], align 8 // LLVM: {{.*}} = insertelement <1 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 0 @@ -286,9 +258,7 @@ int64x2_t test_vld1q_lane_s64(int64_t const * ptr, int64x2_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_s64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) -// LLVM: store <2 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[INTRN_VEC:%.*]] = load <2 x i64>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i64> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <2 x i64> // LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR]], align 8 // LLVM: {{.*}} = insertelement <2 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 1 @@ -305,9 +275,7 @@ uint64x2_t test_vld1q_lane_u64(uint64_t const * ptr, uint64x2_t src) { // CIR: {{%.*}} = cir.vec.insert [[VAL]], {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: {{.*}}test_vld1q_lane_u64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) -// LLVM: store <2 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[INTRN_VEC:%.*]] = load <2 x i64>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i64> [[INTRN_VEC]] to <16 x i8> +// LLVM: [[INTRN_VEC_CAST0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8> // LLVM: [[INTRN_VEC_CAST1:%.*]] = bitcast <16 x i8> [[INTRN_VEC_CAST0]] to <2 x i64> // LLVM: [[INTRN_VAL:%.*]] = load i64, ptr [[PTR]], align 8 // LLVM: {{.*}} = insertelement <2 x i64> [[INTRN_VEC_CAST1]], i64 [[INTRN_VAL]], i32 1 @@ -324,9 +292,7 @@ void test_vst1_lane_s8(int8_t * ptr, int8x8_t src) { // CIR: cir.store align(1) [[VAL]], [[PTR]] : !s8i, !cir.ptr // LLVM: {{.*}}test_vst1_lane_s8(ptr{{.*}}[[PTR:%.*]], <8 x i8>{{.*}}[[SRC:%.*]]) -// LLVM: store <8 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[VEC:%.*]] = load <8 x i8>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[RES:%.*]] = extractelement <8 x i8> [[VEC]], i32 7 +// LLVM: [[RES:%.*]] = extractelement <8 x i8> [[SRC]], i32 7 // LLVM: store i8 [[RES]], ptr [[PTR]], align 1 void test_vst1_lane_s16(int16_t * ptr, int16x4_t src) { @@ -340,9 +306,7 @@ void test_vst1_lane_s16(int16_t * ptr, int16x4_t src) { // CIR: cir.store align(2) [[VAL]], [[PTR]] : !s16i, !cir.ptr // LLVM: {{.*}}test_vst1_lane_s16(ptr{{.*}}[[PTR:%.*]], <4 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store <4 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[VEC:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x i16> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x i16> [[SRC]] to <8 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <4 x i16> // LLVM: [[RES:%.*]] = extractelement <4 x i16> [[VEC_CAST1]], i32 3 // LLVM: store i16 [[RES]], ptr [[PTR]], align 2 @@ -358,9 +322,7 @@ void test_vst1_lane_u16(uint16_t * ptr, uint16x4_t src) { // CIR: cir.store align(2) [[VAL]], [[PTR]] : !u16i, !cir.ptr // LLVM: {{.*}}test_vst1_lane_u16(ptr{{.*}}[[PTR:%.*]], <4 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store <4 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[VEC:%.*]] = load <4 x i16>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x i16> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x i16> [[SRC]] to <8 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <4 x i16> // LLVM: [[RES:%.*]] = extractelement <4 x i16> [[VEC_CAST1]], i32 3 // LLVM: store i16 [[RES]], ptr [[PTR]], align 2 @@ -376,9 +338,7 @@ void test_vst1_lane_s32(int32_t * ptr, int32x2_t src) { // CIR: cir.store align(4) [[VAL]], [[PTR]] : !s32i, !cir.ptr // LLVM: {{.*}}test_vst1_lane_s32(ptr{{.*}}[[PTR:%.*]], <2 x i32>{{.*}}[[SRC:%.*]]) -// LLVM: store <2 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[VEC:%.*]] = load <2 x i32>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x i32> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x i32> [[SRC]] to <8 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <2 x i32> // LLVM: [[RES:%.*]] = extractelement <2 x i32> [[VEC_CAST1]], i32 1 // LLVM: store i32 [[RES]], ptr [[PTR]], align 4 @@ -394,9 +354,7 @@ void test_vst1_lane_f32(float32_t * ptr, float32x2_t src) { // CIR: cir.store align(4) [[VAL]], [[PTR]] : !cir.float, !cir.ptr // LLVM: {{.*}}test_vst1_lane_f32(ptr{{.*}}[[PTR:%.*]], <2 x float>{{.*}}[[SRC:%.*]]) -// LLVM: store <2 x float> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[VEC:%.*]] = load <2 x float>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x float> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x float> [[SRC]] to <8 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <2 x float> // LLVM: [[RES:%.*]] = extractelement <2 x float> [[VEC_CAST1]], i32 1 // LLVM: store float [[RES]], ptr [[PTR]], align 4 @@ -412,9 +370,7 @@ void test_vst1_lane_s64(int64_t * ptr, int64x1_t src) { // CIR: cir.store align(8) [[VAL]], [[PTR]] : !s64i, !cir.ptr // LLVM: {{.*}}test_vst1_lane_s64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) -// LLVM: store <1 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[VEC:%.*]] = load <1 x i64>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <1 x i64> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <1 x i64> // LLVM: [[RES:%.*]] = extractelement <1 x i64> [[VEC_CAST1]], i32 0 // LLVM: store i64 [[RES]], ptr [[PTR]], align 8 @@ -430,9 +386,7 @@ void test_vst1_lane_f64(float64_t * ptr, float64x1_t src) { // CIR: cir.store align(8) [[VAL]], [[PTR]] : !cir.double, !cir.ptr // LLVM: {{.*}}test_vst1_lane_f64(ptr{{.*}}[[PTR:%.*]], <1 x double>{{.*}}[[SRC:%.*]]) -// LLVM: store <1 x double> [[SRC]], ptr [[SRC_ADDR:%.*]], align 8 -// LLVM: [[VEC:%.*]] = load <1 x double>, ptr [[SRC_ADDR]], align 8 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <1 x double> [[VEC]] to <8 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <1 x double> [[SRC]] to <8 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <8 x i8> [[VEC_CAST0]] to <1 x double> // LLVM: [[RES:%.*]] = extractelement <1 x double> [[VEC_CAST1]], i32 0 // LLVM: store double [[RES]], ptr [[PTR]], align 8 @@ -448,9 +402,7 @@ void test_vst1q_lane_s8(int8_t * ptr, int8x16_t src) { // CIR: cir.store align(1) [[VAL]], [[PTR]] : !s8i, !cir.ptr // LLVM: {{.*}}test_vst1q_lane_s8(ptr{{.*}}[[PTR:%.*]], <16 x i8>{{.*}}[[SRC:%.*]]) -// LLVM: store <16 x i8> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[VEC:%.*]] = load <16 x i8>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[RES:%.*]] = extractelement <16 x i8> [[VEC]], i32 15 +// LLVM: [[RES:%.*]] = extractelement <16 x i8> [[SRC]], i32 15 // LLVM: store i8 [[RES]], ptr [[PTR]], align 1 @@ -465,9 +417,7 @@ void test_vst1q_lane_s16(int16_t * ptr, int16x8_t src) { // CIR: cir.store align(2) [[VAL]], [[PTR]] : !s16i, !cir.ptr // LLVM: {{.*}}test_vst1q_lane_s16(ptr{{.*}}[[PTR:%.*]], <8 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store <8 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[VEC:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <8 x i16> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <8 x i16> [[SRC]] to <16 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <8 x i16> // LLVM: [[RES:%.*]] = extractelement <8 x i16> [[VEC_CAST1]], i32 7 // LLVM: store i16 [[RES]], ptr [[PTR]], align 2 @@ -483,9 +433,7 @@ void test_vst1q_lane_u16(uint16_t * ptr, uint16x8_t src) { // CIR: cir.store align(2) [[VAL]], [[PTR]] : !u16i, !cir.ptr // LLVM: {{.*}}test_vst1q_lane_u16(ptr{{.*}}[[PTR:%.*]], <8 x i16>{{.*}}[[SRC:%.*]]) -// LLVM: store <8 x i16> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[VEC:%.*]] = load <8 x i16>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <8 x i16> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <8 x i16> [[SRC]] to <16 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <8 x i16> // LLVM: [[RES:%.*]] = extractelement <8 x i16> [[VEC_CAST1]], i32 7 // LLVM: store i16 [[RES]], ptr [[PTR]], align 2 @@ -501,9 +449,7 @@ void test_vst1q_lane_s32(int32_t * ptr, int32x4_t src) { // CIR: cir.store align(4) [[VAL]], [[PTR]] : !s32i, !cir.ptr // LLVM: {{.*}}test_vst1q_lane_s32(ptr{{.*}}[[PTR:%.*]], <4 x i32>{{.*}}[[SRC:%.*]]) -// LLVM: store <4 x i32> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[VEC:%.*]] = load <4 x i32>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x i32> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x i32> [[SRC]] to <16 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <4 x i32> // LLVM: [[RES:%.*]] = extractelement <4 x i32> [[VEC_CAST1]], i32 3 // LLVM: store i32 [[RES]], ptr [[PTR]], align 4 @@ -519,9 +465,7 @@ void test_vst1q_lane_s64(int64_t * ptr, int64x2_t src) { // CIR: cir.store align(8) [[VAL]], [[PTR]] : !s64i, !cir.ptr // LLVM: {{.*}}test_vst1q_lane_s64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) -// LLVM: store <2 x i64> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[VEC:%.*]] = load <2 x i64>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x i64> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <2 x i64> // LLVM: [[RES:%.*]] = extractelement <2 x i64> [[VEC_CAST1]], i32 1 // LLVM: store i64 [[RES]], ptr [[PTR]], align 8 @@ -537,9 +481,7 @@ void test_vst1q_lane_f32(float32_t * ptr, float32x4_t src) { // CIR: cir.store align(4) [[VAL]], [[PTR]] : !cir.float, !cir.ptr // LLVM: {{.*}}test_vst1q_lane_f32(ptr{{.*}}[[PTR:%.*]], <4 x float>{{.*}}[[SRC:%.*]]) -// LLVM: store <4 x float> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[VEC:%.*]] = load <4 x float>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x float> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <4 x float> [[SRC]] to <16 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <4 x float> // LLVM: [[RES:%.*]] = extractelement <4 x float> [[VEC_CAST1]], i32 3 // LLVM: store float [[RES]], ptr [[PTR]], align 4 @@ -555,9 +497,7 @@ void test_vst1q_lane_f64(float64_t * ptr, float64x2_t src) { // CIR: cir.store align(8) [[VAL]], [[PTR]] : !cir.double, !cir.ptr // LLVM: {{.*}}test_vst1q_lane_f64(ptr{{.*}}[[PTR:%.*]], <2 x double>{{.*}}[[SRC:%.*]]) -// LLVM: store <2 x double> [[SRC]], ptr [[SRC_ADDR:%.*]], align 16 -// LLVM: [[VEC:%.*]] = load <2 x double>, ptr [[SRC_ADDR]], align 16 -// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x double> [[VEC]] to <16 x i8> +// LLVM: [[VEC_CAST0:%.*]] = bitcast <2 x double> [[SRC]] to <16 x i8> // LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <2 x double> // LLVM: [[RES:%.*]] = extractelement <2 x double> [[VEC_CAST1]], i32 1 // LLVM: store double [[RES]], ptr [[PTR]], align 8 diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index 0c20576e62d8..6154da28f35f 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -20,6 +20,8 @@ uint8x8_t test_vset_lane_u8(uint8_t a, uint8x8_t b) { // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local <8 x i8> @test_vset_lane_u8(i8 [[A:%.*]], <8 x i8> [[B:%.*]]) +// LLVM: alloca <8 x i8> +// LLVM: alloca i8 // LLVM: [[A_ADR:%.*]] = alloca i8, i64 1, align 1 // LLVM: [[B_ADR:%.*]] = alloca <8 x i8>, i64 1, align 8 // LLVM: store i8 [[A]], ptr [[A_ADR]], align 1 @@ -42,6 +44,8 @@ uint16x4_t test_vset_lane_u16(uint16_t a, uint16x4_t b) { // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local <4 x i16> @test_vset_lane_u16(i16 [[A:%.*]], <4 x i16> [[B:%.*]]) +// LLVM: alloca <4 x i16> +// LLVM: alloca i16 // LLVM: [[A_ADR:%.*]] = alloca i16, i64 1, align 2 // LLVM: [[B_ADR:%.*]] = alloca <4 x i16>, i64 1, align 8 // LLVM: store i16 [[A]], ptr [[A_ADR]], align 2 @@ -64,6 +68,8 @@ uint32x2_t test_vset_lane_u32(uint32_t a, uint32x2_t b) { // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local <2 x i32> @test_vset_lane_u32(i32 [[A:%.*]], <2 x i32> [[B:%.*]]) +// LLVM: alloca <2 x i32> +// LLVM: alloca i32 // LLVM: [[A_ADR:%.*]] = alloca i32, i64 1, align 4 // LLVM: [[B_ADR:%.*]] = alloca <2 x i32>, i64 1, align 8 // LLVM: store i32 [[A]], ptr [[A_ADR]], align 4 @@ -87,6 +93,8 @@ int64x1_t test_vset_lane_u64(int64_t a, int64x1_t b) { // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local <1 x i64> @test_vset_lane_u64(i64 [[A:%.*]], <1 x i64> [[B:%.*]]) +// LLVM: alloca <1 x i64> +// LLVM: alloca i64 // LLVM: [[A_ADR:%.*]] = alloca i64, i64 1, align 8 // LLVM: [[B_ADR:%.*]] = alloca <1 x i64>, i64 1, align 8 // LLVM: store i64 [[A]], ptr [[A_ADR]], align 8 @@ -109,6 +117,8 @@ float32x2_t test_vset_lane_f32(float32_t a, float32x2_t b) { // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local <2 x float> @test_vset_lane_f32(float [[A:%.*]], <2 x float> [[B:%.*]]) +// LLVM: alloca <2 x float> +// LLVM: alloca float // LLVM: [[A_ADR:%.*]] = alloca float, i64 1, align 4 // LLVM: [[B_ADR:%.*]] = alloca <2 x float>, i64 1, align 8 // LLVM: store float [[A]], ptr [[A_ADR]], align 4 @@ -131,6 +141,8 @@ uint8x16_t test_vsetq_lane_u8(uint8_t a, uint8x16_t b) { // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local <16 x i8> @test_vsetq_lane_u8(i8 [[A:%.*]], <16 x i8> [[B:%.*]]) +// LLVM: alloca <16 x i8> +// LLVM: alloca i8 // LLVM: [[A_ADR:%.*]] = alloca i8, i64 1, align 1 // LLVM: [[B_ADR:%.*]] = alloca <16 x i8>, i64 1, align 16 // LLVM: store i8 [[A]], ptr [[A_ADR]], align 1 @@ -153,6 +165,8 @@ uint16x8_t test_vsetq_lane_u16(uint16_t a, uint16x8_t b) { // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local <8 x i16> @test_vsetq_lane_u16(i16 [[A:%.*]], <8 x i16> [[B:%.*]]) +// LLVM: alloca <8 x i16> +// LLVM: alloca i16 // LLVM: [[A_ADR:%.*]] = alloca i16, i64 1, align 2 // LLVM: [[B_ADR:%.*]] = alloca <8 x i16>, i64 1, align 16 // LLVM: store i16 [[A]], ptr [[A_ADR]], align 2 @@ -175,6 +189,8 @@ uint32x4_t test_vsetq_lane_u32(uint32_t a, uint32x4_t b) { // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local <4 x i32> @test_vsetq_lane_u32(i32 [[A:%.*]], <4 x i32> [[B:%.*]]) +// LLVM: alloca <4 x i32> +// LLVM: alloca i32 // LLVM: [[A_ADR:%.*]] = alloca i32, i64 1, align 4 // LLVM: [[B_ADR:%.*]] = alloca <4 x i32>, i64 1, align 16 // LLVM: store i32 [[A]], ptr [[A_ADR]], align 4 @@ -197,6 +213,8 @@ int64x2_t test_vsetq_lane_s64(int64_t a, int64x2_t b) { // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local <2 x i64> @test_vsetq_lane_s64(i64 [[A:%.*]], <2 x i64> [[B:%.*]]) +// LLVM: alloca <2 x i64> +// LLVM: alloca i64 // LLVM: [[A_ADR:%.*]] = alloca i64, i64 1, align 8 // LLVM: [[B_ADR:%.*]] = alloca <2 x i64>, i64 1, align 16 // LLVM: store i64 [[A]], ptr [[A_ADR]], align 8 @@ -219,6 +237,8 @@ float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) { // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local <4 x float> @test_vsetq_lane_f32(float [[A:%.*]], <4 x float> [[B:%.*]]) +// LLVM: alloca <4 x float> +// LLVM: alloca float // LLVM: [[A_ADR:%.*]] = alloca float, i64 1, align 4 // LLVM: [[B_ADR:%.*]] = alloca <4 x float>, i64 1, align 16 // LLVM: store float [[A]], ptr [[A_ADR]], align 4 @@ -241,6 +261,7 @@ uint8_t test_vget_lane_u8(uint8x8_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i8 @test_vget_lane_u8(<8 x i8> [[ARG:%.*]]) +// LLVM: alloca <8 x i8> // LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i8>, i64 1, align 8 // LLVM: store <8 x i8> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <8 x i8>, ptr [[ARG_SAVE:%.*]], align 8 @@ -258,6 +279,7 @@ uint8_t test_vgetq_lane_u8(uint8x16_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i8 @test_vgetq_lane_u8(<16 x i8> [[ARG:%.*]]) +// LLVM: alloca <16 x i8> // LLVM: [[ARG_SAVE:%.*]] = alloca <16 x i8>, i64 1, align 16 // LLVM: store <16 x i8> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <16 x i8>, ptr [[ARG_SAVE:%.*]], align 16 @@ -275,6 +297,7 @@ uint16_t test_vget_lane_u16(uint16x4_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i16 @test_vget_lane_u16(<4 x i16> [[ARG:%.*]]) +// LLVM: alloca <4 x i16> // LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i16>, i64 1, align 8 // LLVM: store <4 x i16> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <4 x i16>, ptr [[ARG_SAVE:%.*]], align 8 @@ -292,6 +315,7 @@ uint16_t test_vgetq_lane_u16(uint16x8_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i16 @test_vgetq_lane_u16(<8 x i16> [[ARG:%.*]]) +// LLVM: alloca <8 x i16> // LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i16>, i64 1, align 16 // LLVM: store <8 x i16> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <8 x i16>, ptr [[ARG_SAVE:%.*]], align 16 @@ -309,6 +333,7 @@ uint32_t test_vget_lane_u32(uint32x2_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i32 @test_vget_lane_u32(<2 x i32> [[ARG:%.*]]) +// LLVM: alloca <2 x i32> // LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i32>, i64 1, align 8 // LLVM: store <2 x i32> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <2 x i32>, ptr [[ARG_SAVE:%.*]], align 8 @@ -326,6 +351,7 @@ uint32_t test_vgetq_lane_u32(uint32x4_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i32 @test_vgetq_lane_u32(<4 x i32> [[ARG:%.*]]) +// LLVM: alloca <4 x i32> // LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i32>, i64 1, align 16 // LLVM: store <4 x i32> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <4 x i32>, ptr [[ARG_SAVE:%.*]], align 16 @@ -343,6 +369,7 @@ uint64_t test_vget_lane_u64(uint64x1_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i64 @test_vget_lane_u64(<1 x i64> [[ARG:%.*]]) +// LLVM: alloca <1 x i64> // LLVM: [[ARG_SAVE:%.*]] = alloca <1 x i64>, i64 1, align 8 // LLVM: store <1 x i64> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <1 x i64>, ptr [[ARG_SAVE:%.*]], align 8 @@ -360,6 +387,7 @@ uint64_t test_vgetq_lane_u64(uint64x2_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i64 @test_vgetq_lane_u64(<2 x i64> [[ARG:%.*]]) +// LLVM: alloca <2 x i64> // LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i64>, i64 1, align 16 // LLVM: store <2 x i64> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <2 x i64>, ptr [[ARG_SAVE:%.*]], align 16 @@ -377,6 +405,7 @@ float32_t test_vget_lane_f32(float32x2_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local float @test_vget_lane_f32(<2 x float> [[ARG:%.*]]) +// LLVM: alloca <2 x float> // LLVM: [[ARG_SAVE:%.*]] = alloca <2 x float>, i64 1, align 8 // LLVM: store <2 x float> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <2 x float>, ptr [[ARG_SAVE:%.*]], align 8 @@ -394,6 +423,7 @@ float64_t test_vget_lane_f64(float64x1_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local double @test_vget_lane_f64(<1 x double> [[ARG:%.*]]) +// LLVM: alloca <1 x double> // LLVM: [[ARG_SAVE:%.*]] = alloca <1 x double>, i64 1, align 8 // LLVM: store <1 x double> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <1 x double>, ptr [[ARG_SAVE:%.*]], align 8 @@ -411,6 +441,7 @@ float32_t test_vgetq_lane_f32(float32x4_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local float @test_vgetq_lane_f32(<4 x float> [[ARG:%.*]]) +// LLVM: alloca <4 x float> // LLVM: [[ARG_SAVE:%.*]] = alloca <4 x float>, i64 1, align 16 // LLVM: store <4 x float> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <4 x float>, ptr [[ARG_SAVE:%.*]], align 16 @@ -428,6 +459,7 @@ float64_t test_vgetq_lane_f64(float64x2_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local double @test_vgetq_lane_f64(<2 x double> [[ARG:%.*]]) +// LLVM: alloca <2 x double> // LLVM: [[ARG_SAVE:%.*]] = alloca <2 x double>, i64 1, align 16 // LLVM: store <2 x double> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <2 x double>, ptr [[ARG_SAVE:%.*]], align 16 diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 969751fe65b4..288ea8308cf3 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -6008,9 +6008,7 @@ uint8x8_t test_vqrshrun_n_s16(int16x8_t a) { // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> {{%.*}} to <16 x i8> // LLVM: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // LLVM: [[VQRSHRUN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> [[VQRSHRUN_N]], i32 3) - // LLVM: store <8 x i8> [[VQRSHRUN_N1]], ptr [[RET:%.*]], align 8 - // LLVM: [[RETVAL:%.*]] = load <8 x i8>, ptr [[RET]], align 8 - // LLVM: ret <8 x i8> [[RETVAL]] + // LLVM: ret <8 x i8> [[VQRSHRUN_N1]] } uint16x4_t test_vqrshrun_n_s32(int32x4_t a) { @@ -6025,9 +6023,7 @@ uint16x4_t test_vqrshrun_n_s32(int32x4_t a) { // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> {{%.*}} to <16 x i8> // LLVM: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // LLVM: [[VQRSHRUN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> [[VQRSHRUN_N]], i32 9) - // LLVM: store <4 x i16> [[VQRSHRUN_N1]], ptr [[RET:%.*]], align 8 - // LLVM: [[RETVAL:%.*]] = load <4 x i16>, ptr [[RET]], align 8 - // LLVM: ret <4 x i16> [[RETVAL]] + // LLVM: ret <4 x i16> [[VQRSHRUN_N1]] } uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { @@ -6042,9 +6038,7 @@ uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> {{%.*}} to <16 x i8> // LLVM: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // LLVM: [[VQRSHRUN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> [[VQRSHRUN_N]], i32 19) - // LLVM: store <2 x i32> [[VQRSHRUN_N1]], ptr [[RET:%.*]], align 8 - // LLVM: [[RETVAL:%.*]] = load <2 x i32>, ptr [[RET]], align 8 - // LLVM: ret <2 x i32> [[RETVAL]] + // LLVM: ret <2 x i32> [[VQRSHRUN_N1]] } // NYI-LABEL: @test_vqrshrun_high_n_s16( diff --git a/clang/test/CIR/CodeGen/aarch64-neon-vdup-lane.c b/clang/test/CIR/CodeGen/aarch64-neon-vdup-lane.c index 4799e0931c55..e9b95525c0f0 100644 --- a/clang/test/CIR/CodeGen/aarch64-neon-vdup-lane.c +++ b/clang/test/CIR/CodeGen/aarch64-neon-vdup-lane.c @@ -19,6 +19,7 @@ int8_t test_vdupb_lane_s8(int8x8_t src) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i8 @test_vdupb_lane_s8(<8 x i8> [[ARG:%.*]]) +// LLVM: alloca <8 x i8> // LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i8>, i64 1, align 8 // LLVM: store <8 x i8> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <8 x i8>, ptr [[ARG_SAVE:%.*]], align 8 @@ -36,6 +37,7 @@ int8_t test_vdupb_laneq_s8(int8x16_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i8 @test_vdupb_laneq_s8(<16 x i8> [[ARG:%.*]]) +// LLVM: alloca <16 x i8> // LLVM: [[ARG_SAVE:%.*]] = alloca <16 x i8>, i64 1, align 16 // LLVM: store <16 x i8> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <16 x i8>, ptr [[ARG_SAVE:%.*]], align 16 @@ -54,6 +56,7 @@ int16_t test_vduph_lane_s16(int16x4_t src) { // LLVM: define dso_local i16 @test_vduph_lane_s16(<4 x i16> [[ARG:%.*]]) +// LLVM: alloca <4 x i16> // LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i16>, i64 1, align 8 // LLVM: store <4 x i16> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <4 x i16>, ptr [[ARG_SAVE:%.*]], align 8 @@ -71,6 +74,7 @@ int16_t test_vduph_laneq_s16(int16x8_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i16 @test_vduph_laneq_s16(<8 x i16> [[ARG:%.*]]) +// LLVM: alloca <8 x i16> // LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i16>, i64 1, align 16 // LLVM: store <8 x i16> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <8 x i16>, ptr [[ARG_SAVE:%.*]], align 16 @@ -88,6 +92,7 @@ int32_t test_vdups_lane_s32(int32x2_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i32 @test_vdups_lane_s32(<2 x i32> [[ARG:%.*]]) +// LLVM: alloca <2 x i32> // LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i32>, i64 1, align 8 // LLVM: store <2 x i32> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <2 x i32>, ptr [[ARG_SAVE:%.*]], align 8 @@ -105,6 +110,7 @@ int32_t test_vdups_laneq_s32(int32x4_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i32 @test_vdups_laneq_s32(<4 x i32> [[ARG:%.*]]) +// LLVM: alloca <4 x i32> // LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i32>, i64 1, align 16 // LLVM: store <4 x i32> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <4 x i32>, ptr [[ARG_SAVE:%.*]], align 16 @@ -122,6 +128,7 @@ int64_t test_vdupd_lane_s64(int64x1_t src) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i64 @test_vdupd_lane_s64(<1 x i64> [[ARG:%.*]]) +// LLVM: alloca <1 x i64> // LLVM: [[ARG_SAVE:%.*]] = alloca <1 x i64>, i64 1, align 8 // LLVM: store <1 x i64> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <1 x i64>, ptr [[ARG_SAVE:%.*]], align 8 @@ -139,6 +146,7 @@ int64_t test_vdupd_laneq_s64(int64x2_t a) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local i64 @test_vdupd_laneq_s64(<2 x i64> [[ARG:%.*]]) +// LLVM: alloca <2 x i64> // LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i64>, i64 1, align 16 // LLVM: store <2 x i64> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <2 x i64>, ptr [[ARG_SAVE:%.*]], align 16 @@ -156,6 +164,7 @@ float32_t test_vdups_lane_f32(float32x2_t src) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local float @test_vdups_lane_f32(<2 x float> [[ARG:%.*]]) +// LLVM: alloca <2 x float> // LLVM: [[ARG_SAVE:%.*]] = alloca <2 x float>, i64 1, align 8 // LLVM: store <2 x float> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <2 x float>, ptr [[ARG_SAVE:%.*]], align 8 @@ -173,6 +182,7 @@ float64_t test_vdupd_lane_f64(float64x1_t src) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local double @test_vdupd_lane_f64(<1 x double> [[ARG:%.*]]) +// LLVM: alloca <1 x double> // LLVM: [[ARG_SAVE:%.*]] = alloca <1 x double>, i64 1, align 8 // LLVM: store <1 x double> [[ARG]], ptr [[ARG_SAVE]], align 8 // LLVM: [[TMP:%.*]] = load <1 x double>, ptr [[ARG_SAVE:%.*]], align 8 @@ -190,6 +200,7 @@ float32_t test_vdups_laneq_f32(float32x4_t src) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local float @test_vdups_laneq_f32(<4 x float> [[ARG:%.*]]) +// LLVM: alloca <4 x float> // LLVM: [[ARG_SAVE:%.*]] = alloca <4 x float>, i64 1, align 16 // LLVM: store <4 x float> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <4 x float>, ptr [[ARG_SAVE:%.*]], align 16 @@ -207,6 +218,7 @@ float64_t test_vdupd_laneq_f64(float64x2_t src) { // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector // LLVM: define dso_local double @test_vdupd_laneq_f64(<2 x double> [[ARG:%.*]]) +// LLVM: alloca <2 x double> // LLVM: [[ARG_SAVE:%.*]] = alloca <2 x double>, i64 1, align 16 // LLVM: store <2 x double> [[ARG]], ptr [[ARG_SAVE]], align 16 // LLVM: [[TMP:%.*]] = load <2 x double>, ptr [[ARG_SAVE:%.*]], align 16 diff --git a/clang/test/CIR/CodeGen/builtin-bit-cast.cpp b/clang/test/CIR/CodeGen/builtin-bit-cast.cpp index 696b472a159f..d7aedac960b7 100644 --- a/clang/test/CIR/CodeGen/builtin-bit-cast.cpp +++ b/clang/test/CIR/CodeGen/builtin-bit-cast.cpp @@ -130,7 +130,7 @@ two_ints test_rvalue_aggregate() { // CIR: } // LLVM-LABEL: define dso_local %struct.two_ints @_Z21test_rvalue_aggregatev -// LLVM: %[[#SRC_SLOT:]] = alloca i64, i64 1, align 8 -// LLVM-NEXT: store i64 42, ptr %[[#SRC_SLOT]], align 8 +// LLVM: %[[#SRC_SLOT:]] = alloca i64, i64 1, align 8 +// LLVM: store i64 42, ptr %[[#SRC_SLOT]], align 8 // LLVM-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %{{.+}}, ptr %[[#SRC_SLOT]], i64 8, i1 false) // LLVM: } diff --git a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp index f20f687ecf9a..2d9c97420311 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp @@ -63,10 +63,10 @@ void test() { // LLVM: } // LLVM: define dso_local void @_ZSt4testv() -// LLVM: br label %[[SCOPE_START:.*]], -// LLVM: [[SCOPE_START]]: ; preds = %0 // LLVM: [[INIT_STRUCT:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, // LLVM: [[ELEM_ARRAY_PTR:%.*]] = alloca [2 x ptr], i64 1, align 8, +// LLVM: br label %[[SCOPE_START:.*]], +// LLVM: [[SCOPE_START]]: ; preds = %0 // LLVM: [[PTR_FIRST_ELEM:%.*]] = getelementptr ptr, ptr [[ELEM_ARRAY_PTR]], i32 0, // LLVM: store ptr @.str, ptr [[PTR_FIRST_ELEM]], align 8, // LLVM: [[PTR_SECOND_ELEM:%.*]] = getelementptr ptr, ptr [[PTR_FIRST_ELEM]], i64 1, diff --git a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp index 183a04d78045..2cf24f7f159a 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp @@ -47,10 +47,10 @@ void test() { // LLVM: store %"class.std::initializer_list" [[ARG]], ptr [[LOCAL]], align 8, // LLVM: define dso_local void @_ZSt4testv() -// LLVM: br label %[[SCOPE_START:.*]], -// LLVM: [[SCOPE_START]]: ; preds = %0 // LLVM: [[INIT_STRUCT:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, // LLVM: [[ELEM_ARRAY:%.*]] = alloca [1 x i32], i64 1, align 4, +// LLVM: br label %[[SCOPE_START:.*]], +// LLVM: [[SCOPE_START]]: ; preds = %0 // LLVM: [[PTR_FIRST_ELEM:%.*]] = getelementptr i32, ptr [[ELEM_ARRAY]], i32 0, // LLVM: store i32 7, ptr [[PTR_FIRST_ELEM]], align 4, // LLVM: [[ELEM_ARRAY_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0, diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index 67a851dff2de..69f7c351c671 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -40,7 +40,6 @@ void yo() { // LLVM-LABEL: @_Z2yov() -// LLVM: 2: // LLVM: %[[Vec:.*]] = alloca %struct.Vec // LLVM: br label %[[INVOKE_BB:.*]], @@ -101,7 +100,7 @@ void yo2() { // CIR: } // CIR_FLAT-LABEL: @_Z3yo2v -// CIR_FLAT: cir.try_call @_ZN3VecC1Ev(%2) ^[[NEXT_CALL_PREP:.*]], ^[[PAD_NODTOR:.*]] : (!cir.ptr) -> () +// CIR_FLAT: cir.try_call @_ZN3VecC1Ev(%[[vec:.+]]) ^[[NEXT_CALL_PREP:.*]], ^[[PAD_NODTOR:.*]] : (!cir.ptr) -> () // CIR_FLAT: ^[[NEXT_CALL_PREP]]: // CIR_FLAT: cir.br ^[[NEXT_CALL:.*]] loc // CIR_FLAT: ^[[NEXT_CALL]]: @@ -117,7 +116,7 @@ void yo2() { // CIR_FLAT: cir.br ^[[CATCH_BEGIN:.*]](%exception_ptr : !cir.ptr) // CIR_FLAT: ^[[PAD_DTOR]]: // CIR_FLAT: %exception_ptr_0, %type_id_1 = cir.eh.inflight_exception -// CIR_FLAT: cir.call @_ZN3VecD1Ev(%2) : (!cir.ptr) -> () +// CIR_FLAT: cir.call @_ZN3VecD1Ev(%[[vec]]) : (!cir.ptr) -> () // CIR_FLAT: cir.br ^[[CATCH_BEGIN]](%exception_ptr_0 : !cir.ptr) // CIR_FLAT: ^[[CATCH_BEGIN]]( // CIR_FLAT: cir.catch_param begin @@ -169,7 +168,6 @@ void yo3(bool x) { // CIR: cir.return // CIR_FLAT-LABEL: @_Z3yo3b -// CIR_FLAT: ^bb1: // CIR_FLAT: %[[V1:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v1" // CIR_FLAT: %[[V2:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v2" // CIR_FLAT: %[[V3:.*]] = cir.alloca ![[VecTy]], !cir.ptr, ["v3" diff --git a/clang/test/CIR/Lowering/OpenMP/parallel.cir b/clang/test/CIR/Lowering/OpenMP/parallel.cir index da98868eddb1..81f6bbaa59cf 100644 --- a/clang/test/CIR/Lowering/OpenMP/parallel.cir +++ b/clang/test/CIR/Lowering/OpenMP/parallel.cir @@ -26,8 +26,8 @@ module { // CHECK: ret void // CHECK-NEXT: } // CHECK: define{{.*}} void @omp_parallel..omp_par(ptr +// CHECK: %[[XVar:.*]] = load ptr, ptr %{{.*}}, align 8 // CHECK: %[[YVar:.*]] = load ptr, ptr %{{.*}}, align 8 -// CHECK: %[[XVar:.*]] = alloca i32, i64 1, align 4 // CHECK: store i32 1, ptr %[[XVar]], align 4 // CHECK: %[[XVal:.*]] = load i32, ptr %[[XVar]], align 4 // CHECK: %[[BinOp:.*]] = add i32 %[[XVal]], 1 diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 5c5ed4736f7a..ad1241e1cad3 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -53,6 +53,8 @@ module { } // MLIR-LABEL: llvm.func @dot( +// MLIR: %[[VAL_1:.*]] = llvm.mlir.constant(1 : index) : i64 +// MLIR: %[[VAL_2:.*]] = llvm.alloca %[[VAL_1]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR: %[[VAL_3:.*]] = llvm.mlir.constant(1 : index) : i64 // MLIR: %[[VAL_4:.*]] = llvm.alloca %[[VAL_3]] x !llvm.ptr {alignment = 8 : i64} : (i64) -> !llvm.ptr // MLIR: %[[VAL_5:.*]] = llvm.mlir.constant(1 : index) : i64 @@ -70,13 +72,11 @@ module { // MLIR: llvm.store %[[VAL_13]], %[[VAL_12]] {{.*}}: f64, !llvm.ptr // MLIR: llvm.br ^bb1 // MLIR: ^bb1: -// MLIR: %[[VAL_14:.*]] = llvm.mlir.constant(1 : index) : i64 -// MLIR: %[[VAL_15:.*]] = llvm.alloca %[[VAL_14]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr // MLIR: %[[VAL_16:.*]] = llvm.mlir.constant(0 : i32) : i32 -// MLIR: llvm.store %[[VAL_16]], %[[VAL_15]] {{.*}}: i32, !llvm.ptr +// MLIR: llvm.store %[[VAL_16]], %[[VAL_2]] {{.*}}: i32, !llvm.ptr // MLIR: llvm.br ^bb2 // MLIR: ^bb2: -// MLIR: %[[VAL_17:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 +// MLIR: %[[VAL_17:.*]] = llvm.load %[[VAL_2]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_18:.*]] = llvm.load %[[VAL_8]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_19:.*]] = llvm.icmp "slt" %[[VAL_17]], %[[VAL_18]] : i32 // MLIR: %[[VAL_20:.*]] = llvm.zext %[[VAL_19]] : i1 to i32 @@ -85,12 +85,12 @@ module { // MLIR: llvm.cond_br %[[VAL_22]], ^bb3, ^bb5 // MLIR: ^bb3: // MLIR: %[[VAL_23:.*]] = llvm.load %[[VAL_4]] {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr -// MLIR: %[[VAL_24:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 +// MLIR: %[[VAL_24:.*]] = llvm.load %[[VAL_2]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_25:.*]] = llvm.sext %[[VAL_24]] : i32 to i64 // MLIR: %[[VAL_26:.*]] = llvm.getelementptr %[[VAL_23]]{{\[}}%[[VAL_25]]] : (!llvm.ptr, i64) -> !llvm.ptr, f64 // MLIR: %[[VAL_27:.*]] = llvm.load %[[VAL_26]] {alignment = 8 : i64} : !llvm.ptr -> f64 // MLIR: %[[VAL_28:.*]] = llvm.load %[[VAL_6]] {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr -// MLIR: %[[VAL_29:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 +// MLIR: %[[VAL_29:.*]] = llvm.load %[[VAL_2]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_30:.*]] = llvm.sext %[[VAL_29]] : i32 to i64 // MLIR: %[[VAL_31:.*]] = llvm.getelementptr %[[VAL_28]]{{\[}}%[[VAL_30]]] : (!llvm.ptr, i64) -> !llvm.ptr, f64 // MLIR: %[[VAL_32:.*]] = llvm.load %[[VAL_31]] {alignment = 8 : i64} : !llvm.ptr -> f64 @@ -100,10 +100,10 @@ module { // MLIR: llvm.store %[[VAL_35]], %[[VAL_12]] {{.*}}: f64, !llvm.ptr // MLIR: llvm.br ^bb4 // MLIR: ^bb4: -// MLIR: %[[VAL_36:.*]] = llvm.load %[[VAL_15]] {alignment = 4 : i64} : !llvm.ptr -> i32 +// MLIR: %[[VAL_36:.*]] = llvm.load %[[VAL_2]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_37:.*]] = llvm.mlir.constant(1 : i32) : i32 // MLIR: %[[VAL_38:.*]] = llvm.add %[[VAL_36]], %[[VAL_37]] : i32 -// MLIR: llvm.store %[[VAL_38]], %[[VAL_15]] {{.*}}: i32, !llvm.ptr +// MLIR: llvm.store %[[VAL_38]], %[[VAL_2]] {{.*}}: i32, !llvm.ptr // MLIR: llvm.br ^bb2 // MLIR: ^bb5: // MLIR: llvm.br ^bb6 diff --git a/clang/test/CIR/Lowering/goto-interscope.c b/clang/test/CIR/Lowering/goto-interscope.c new file mode 100644 index 000000000000..bcaf89d50690 --- /dev/null +++ b/clang/test/CIR/Lowering/goto-interscope.c @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s +struct def; +typedef struct def *decl; +struct def { + int index; +}; +struct def d; +int foo(unsigned char cond, unsigned num) +{ + if (cond) + goto label; + { + decl b = &d; + label: + return b->index; + } + + { + int a[num]; + if (num > 0) + return a[0] + a[1]; + } + return 0; +} +// It is fine enough to check the LLVM IR are generated succesfully. +// CHECK: define {{.*}}i32 @foo +// CHECK: alloca ptr +// CHECK: alloca i8 +// Check the dynamic alloca is not hoisted and live in a seperate block. +// CHECK: : +// Check we have a dynamic alloca +// CHECK: alloca i32, i64 %{{.*}} diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index add46429cba2..48f8bfdcc5a3 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -14,23 +14,23 @@ module { } // MLIR: llvm.func @foo() -// MLIR-NEXT: llvm.br ^bb1 -// MLIR-NEXT: ^bb1: -// MLIR-DAG: [[v1:%[0-9]]] = llvm.mlir.constant(4 : i32) : i32 -// MLIR-DAG: [[v2:%[0-9]]] = llvm.mlir.constant(1 : index) : i64 -// MLIR-DAG: [[v3:%[0-9]]] = llvm.alloca [[v2]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr -// MLIR-NEXT: llvm.store [[v1]], [[v3]] {{.*}}: i32, !llvm.ptr +// MLIR: [[v2:%[0-9]]] = llvm.mlir.constant(1 : index) : i64 +// MLIR: [[v3:%[0-9]]] = llvm.alloca [[v2]] x i32 {alignment = 4 : i64} : (i64) -> !llvm.ptr +// MLIR: llvm.br ^bb1 +// MLIR: ^bb1: +// MLIR-DAG: [[v1:%[0-9]]] = llvm.mlir.constant(4 : i32) : i32 +// MLIR: llvm.store [[v1]], [[v3]] {{.*}}: i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 // MLIR-NEXT: ^bb2: // MLIR-NEXT: llvm.return // LLVM: define void @foo() -// LLVM-NEXT: br label %1 +// LLVM-NEXT: %1 = alloca i32, i64 1, align 4 +// LLVM-NEXT: br label %2 // LLVM-EMPTY: -// LLVM-NEXT: 1: -// LLVM-NEXT: %2 = alloca i32, i64 1, align 4 -// LLVM-NEXT: store i32 4, ptr %2, align 4 +// LLVM-NEXT: 2: +// LLVM-NEXT: store i32 4, ptr %1, align 4 // LLVM-NEXT: br label %3 // LLVM-EMPTY: // LLVM-NEXT: 3: From 7c32a265ecbb597efbc6c247a228df5f011d491f Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Wed, 9 Oct 2024 21:19:41 +0300 Subject: [PATCH 1922/2301] [CIR][CodeGen] Enable -fno-PIE (#940) The title describes the purpose of the PR. The logic was gotten from the original CodeGen, and I added a test to check that `-fno-PIE` is indeed enabled. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 19 ++++++++++++++++++- clang/test/CIR/CodeGen/no-pie.c | 11 +++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/no-pie.c diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 68e8e93e5b77..f8d39e40b6c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -449,7 +449,24 @@ static bool shouldAssumeDSOLocal(const CIRGenModule &CGM, return false; if (CGOpts.DirectAccessExternalData) { - llvm_unreachable("-fdirect-access-external-data not supported"); + // If -fdirect-access-external-data (default for -fno-pic), set dso_local + // for non-thread-local variables. If the symbol is not defined in the + // executable, a copy relocation will be needed at link time. dso_local is + // excluded for thread-local variables because they generally don't support + // copy relocations. + if (auto gv = dyn_cast(GV.getOperation())) + if (!gv.getTlsModelAttr()) + return true; + + // -fno-pic sets dso_local on a function declaration to allow direct + // accesses when taking its address (similar to a data symbol). If the + // function is not defined in the executable, a canonical PLT entry will be + // needed at link time. -fno-direct-access-external-data can avoid the + // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as + // it could just cause trouble without providing perceptible benefits. + if (isa(GV) && !CGOpts.NoPLT && + RM == llvm::Reloc::Static) + return true; } // If we can use copy relocations we can assume it is local. diff --git a/clang/test/CIR/CodeGen/no-pie.c b/clang/test/CIR/CodeGen/no-pie.c new file mode 100644 index 000000000000..c0ffd9790392 --- /dev/null +++ b/clang/test/CIR/CodeGen/no-pie.c @@ -0,0 +1,11 @@ +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fno-PIE -S -Xclang -emit-cir %s -o %t1.cir +// RUN: FileCheck --input-file=%t1.cir %s -check-prefix=CIR +// RUN: %clang -target x86_64-unknown-linux-gnu -fclangir -fno-PIE -S -Xclang -emit-llvm %s -o %t1.ll +// RUN: FileCheck --input-file=%t1.ll %s -check-prefix=LLVM + +extern int var; +int get() { + return var; +} +// CIR: cir.global "private" external dsolocal @var : !s32i {alignment = 4 : i64} +// LLVM: @var = external dso_local global i32 \ No newline at end of file From 4779878a0563f5767d3f3968d70b1be4249e428f Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 10 Oct 2024 02:29:15 +0800 Subject: [PATCH 1923/2301] [CIR][CIRGen] Add support for __fp16 type (#950) This PR adds support for the `__fp16` type. CIRGen and LLVM lowering is included. Resolve #900 . --- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 64 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 7 +- clang/test/CIR/CodeGen/fp16-ops.c | 805 +++++++++++++++++++++ 3 files changed, 866 insertions(+), 10 deletions(-) create mode 100644 clang/test/CIR/CodeGen/fp16-ops.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 1acc5a41b29a..6d2e956731ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -502,8 +502,17 @@ class ScalarExprEmitter : public StmtVisitor { // TODO(cir): CGFPOptionsRAII assert(!MissingFeatures::CGFPOptionsRAII()); - if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) - llvm_unreachable("__fp16 type NYI"); + if (type->isHalfType() && + !CGF.getContext().getLangOpts().NativeHalfType) { + // Another special case: half FP increment should be done via float + if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { + llvm_unreachable("cast via llvm.convert.from.fp16 is NYI"); + } else { + value = Builder.createCast(CGF.getLoc(E->getExprLoc()), + mlir::cir::CastKind::floating, input, + CGF.CGM.FloatTy); + } + } if (mlir::isa( value.getType())) { @@ -511,7 +520,7 @@ class ScalarExprEmitter : public StmtVisitor { // NOTE(CIR): clang calls CreateAdd but folds this to a unary op auto kind = (isInc ? mlir::cir::UnaryOpKind::Inc : mlir::cir::UnaryOpKind::Dec); - value = buildUnaryOp(E, kind, input); + value = buildUnaryOp(E, kind, value); } else { // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or // __float128. Convert from float. @@ -537,8 +546,16 @@ class ScalarExprEmitter : public StmtVisitor { value = Builder.createBinop(value, mlir::cir::BinOpKind::Add, amt); } - if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) - llvm_unreachable("NYI"); + if (type->isHalfType() && + !CGF.getContext().getLangOpts().NativeHalfType) { + if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { + llvm_unreachable("cast via llvm.convert.to.fp16 is NYI"); + } else { + value = Builder.createCast(CGF.getLoc(E->getExprLoc()), + mlir::cir::CastKind::floating, value, + input.getType()); + } + } } else if (type->isFixedPointType()) { llvm_unreachable("no fixed point inc/dec yet"); @@ -1043,7 +1060,23 @@ class ScalarExprEmitter : public StmtVisitor { // Cast from half through float if half isn't a native type. if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { - llvm_unreachable("not implemented"); + // Cast to FP using the intrinsic if the half type itself isn't supported. + if (mlir::isa(DstTy)) { + if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) + llvm_unreachable("cast via llvm.convert.from.fp16 is NYI"); + } else { + // Cast to other types through float, using either the intrinsic or + // FPExt, depending on whether the half type itself is supported (as + // opposed to operations on half, available with NativeHalfType). + if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { + llvm_unreachable("cast via llvm.convert.from.fp16 is NYI"); + } else { + Src = Builder.createCast( + CGF.getLoc(Loc), mlir::cir::CastKind::floating, Src, CGF.FloatTy); + } + SrcType = CGF.getContext().FloatTy; + SrcTy = CGF.FloatTy; + } } // TODO(cir): LLVM codegen ignore conversions like int -> uint, @@ -1098,13 +1131,28 @@ class ScalarExprEmitter : public StmtVisitor { // Cast to half through float if half isn't a native type. if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { - llvm_unreachable("NYI"); + // Make sure we cast in a single step if from another FP type. + if (mlir::isa(SrcTy)) { + // Use the intrinsic if the half type itself isn't supported + // (as opposed to operations on half, available with NativeHalfType). + if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) + llvm_unreachable("cast via llvm.convert.to.fp16 is NYI"); + // If the half type is supported, just use an fptrunc. + return Builder.createCast(CGF.getLoc(Loc), + mlir::cir::CastKind::floating, Src, DstTy); + } + DstTy = CGF.FloatTy; } Res = buildScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); if (DstTy != ResTy) { - llvm_unreachable("NYI"); + if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { + llvm_unreachable("cast via llvm.convert.to.fp16 is NYI"); + } else { + Res = Builder.createCast(CGF.getLoc(Loc), mlir::cir::CastKind::floating, + Res, ResTy); + } } if (Opts.EmitImplicitIntegerTruncationChecks) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 6ab3306800bc..936e747d26b2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -470,8 +470,11 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { ResultType = CGM.FP16Ty; break; case BuiltinType::Half: - // Should be the same as above? - assert(0 && "not implemented"); + if (Context.getLangOpts().NativeHalfType || + !Context.getTargetInfo().useFP16ConversionIntrinsics()) + ResultType = CGM.FP16Ty; + else + llvm_unreachable("NYI"); break; case BuiltinType::BFloat16: ResultType = CGM.BFloat16Ty; diff --git a/clang/test/CIR/CodeGen/fp16-ops.c b/clang/test/CIR/CodeGen/fp16-ops.c new file mode 100644 index 000000000000..04cf64700d74 --- /dev/null +++ b/clang/test/CIR/CodeGen/fp16-ops.c @@ -0,0 +1,805 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -o %t.cir %s +// FileCheck --input-file=%t.cir --check-prefix=CHECK %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -o %t.ll %s +// FileCheck --input-file=%t.ll --check-prefix=CHECK-LLVM %s + +// TODO: once we have support for targets that does not have native fp16 +// support but have fp16 conversion intrinsic support, add tests for +// these targets. + +volatile unsigned test; +volatile int i0; +volatile __fp16 h0 = 0.0, h1 = 1.0, h2; +volatile float f0, f1, f2; +volatile double d0; +short s0; + +void foo(void) { + test = (h0); + // CHECK: %{{.+}} = cir.cast(float_to_int, %{{.+}} : !cir.f16), !u32i + + // CHECK-LLVM: %{{.+}} = fptoui half %{{.+}} to i32 + + h0 = (test); + // CHECK: %{{.+}} = cir.cast(int_to_float, %{{.+}} : !u32i), !cir.f16 + + // CHECK-LLVM: %{{.+}} = uitofp i32 %{{.+}} to half + + test = (!h1); + // CHECK: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.f16), !cir.bool + // CHECK-NEXT: %[[#B:]] = cir.unary(not, %[[#A]]) : !cir.bool, !cir.bool + // CHECK-NEXT: %[[#C:]] = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // CHECK-LLVM: %[[#A:]] = fcmp une half %{{.+}}, 0xH0000 + // CHECK-LLVM-NEXT: %[[#B:]] = zext i1 %[[#A]] to i8 + // CHECK-LLVM-NEXT: %[[#C:]] = xor i8 %[[#B]], 1 + // CHECK-LLVM-NEXT: %{{.+}} = zext i8 %[[#C]] to i32 + + h1 = -h1; + // CHECK-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.f16 + // CHECK: %{{.+}} = cir.unary(minus, %{{.+}}) : !cir.f16, !cir.f16 + + // CHECK-LLVM: %{{.+}} = fneg half %{{.+}} + + h1 = +h1; + // CHECK-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NOT: %{{.+}} = cir.cast(floating, %{{.+}} : !cir.float), !cir.f16 + // CHECK: %{{.+}} = cir.unary(plus, %{{.+}}) : !cir.f16, !cir.f16 + + // CHECK-LLVM: %[[#A:]] = load volatile half, ptr @h1, align 2 + // CHECK-LLVM-NEXT: store volatile half %[[#A]], ptr @h1, align 2 + + h1++; + // CHECK: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.f16 + // CHECK-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + // CHECK-LLVM: %{.+} = fadd half %{.+}, 0xH3C00 + + ++h1; + // CHECK: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.f16 + // CHECK-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + // CHECK-LLVM: %{.+} = fadd half %{.+}, 0xH3C00 + + --h1; + // CHECK: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.f16 + // CHECK-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + // CHECK-LLVM: %{.+} = fadd half %{.+}, 0xHBC00 + + h1--; + // CHECK: %[[#A:]] = cir.const #cir.fp<-1.000000e+00> : !cir.f16 + // CHECK-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + // CHECK-LLVM: %{.+} = fadd half %{.+}, 0xHBC00 + + h1 = h0 * h2; + // CHECK: %{{.+}} = cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.f16 + + // CHECK-LLVM: %{{.+}} = fmul half %{{.+}}, %{{.+}} + + h1 = h0 * (__fp16) -2.0f; + // CHECK: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // CHECK-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // CHECK-NEXT: %{{.+}} = cir.binop(mul, %{{.+}}, %[[#C]]) : !cir.f16 + + // CHECK-LLVM: %{{.+}} = fmul half %{{.+}}, 0xHC000 + + h1 = h0 * f2; + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float + // CHECK-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM: %[[#RES:]] = fmul float %[[#LHS]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + h1 = f0 * h2; + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.float + // CHECK-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM-NEXT: %[[#RES:]] = fmul float %{{.+}}, %[[#RHS]] + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + h1 = h0 * i0; + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %{{.+}} = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.f16 + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %{{.+}} = fmul half %{{.+}}, %[[#A]] + + h1 = (h0 / h2); + // CHECK: %{{.+}} = cir.binop(div, %{{.+}}, %{{.+}}) : !cir.f16 + + // CHECK-LLVM: %{{.+}} = fdiv half %{{.+}}, %{{.+}} + + h1 = (h0 / (__fp16) -2.0f); + // CHECK: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // CHECK-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // CHECK-NEXT: %{{.+}} = cir.binop(div, %{{.+}}, %[[#C]]) : !cir.f16 + + // CHECK-LLVM: %{{.+}} = fdiv half %{{.+}}, 0xHC000 + + h1 = (h0 / f2); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float + // CHECK-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM: %[[#RES:]] = fdiv float %[[#LHS]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + h1 = (f0 / h2); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.float + // CHECK-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM-NEXT: %[[#RES:]] = fdiv float %{{.+}}, %[[#RHS]] + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + h1 = (h0 / i0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %{{.+}} = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.f16 + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %{{.+}} = fdiv half %{{.+}}, %[[#A]] + + h1 = (h2 + h0); + // CHECK: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.f16 + + // CHECK-LLVM: %{{.+}} = fadd half %{{.+}}, %{{.+}} + + h1 = ((__fp16)-2.0 + h0); + // CHECK: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double + // CHECK-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 + // CHECK: %{{.+}} = cir.binop(add, %[[#C]], %{{.+}}) : !cir.f16 + + // CHECK-LLVM: %{{.+}} = fadd half 0xHC000, %{{.+}} + + h1 = (h2 + f0); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float + // CHECK-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM: %[[#RES:]] = fadd float %[[#LHS]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + h1 = (f2 + h0); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.float + // CHECK-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.=}} to float + // CHECK-LLVM-NEXT: %[[#RES:]] = fadd float %{{.+}}, %[[#RHS]] + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + h1 = (h0 + i0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %{{.+}} = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %{{.+}} = fadd half %{{.+}}, %[[#A]] + + h1 = (h2 - h0); + // CHECK: %{{.+}} = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.f16 + + // CHECK-LLVM: %{{.+}} = fsub half %{{.+}}, %{{.+}} + + h1 = ((__fp16)-2.0f - h0); + // CHECK: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // CHECK-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // CHECK: %{{.+}} = cir.binop(sub, %[[#C]], %{{.+}}) : !cir.f16 + + // CHECK-LLVM: %{{.+}} = fsub half 0xHC000, %{{.+}} + + h1 = (h2 - f0); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float + // CHECK-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM: %[[#RES:]] = fsub float %[[#LHS]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + h1 = (f2 - h0); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.float + // CHECK-NEXT: %{{.+}} = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + + // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.=}} to float + // CHECK-LLVM-NEXT: %[[#RES:]] = fsub float %{{.+}}, %[[#RHS]] + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + h1 = (h0 - i0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %{{.+}} = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.f16 + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %{{.+}} = fsub half %{{.+}}, %[[#A]] + + test = (h2 < h0); + // CHECK: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp olt half %{{.+}}, %{{.+}} + + test = (h2 < (__fp16)42.0); + // CHECK: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp olt half %{{.+}}, 0xH5140 + + test = (h2 < f0); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // CHECK-LLVM: %{{.+}} = fcmp olt float %[[#A]], %{{.+}} + + test = (f2 < h0); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#A:]] = fpext half %{{.=}} to float + // CHECK-LLVM-NEXT: %{{.+}} = fcmp olt float %{{.+}}, %[[#A]] + + test = (i0 < h0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM: %{{.+}} = fcmp olt half %[[#A]], %{{.+}} + + test = (h0 < i0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %{{.+}} = fcmp olt half %{{.+}}, %[[#A]] + + test = (h0 > h2); + // CHECK: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp ogt half %{{.+}}, %{{.+}} + + test = ((__fp16)42.0 > h2); + // CHECK: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // CHECK: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp ogt half 0xH5140, %{{.+}} + + test = (h0 > f2); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.=}} to float + // CHECK-LLVM: %{{.+}} = fcmp ogt float %[[#LHS]], %{{.+}} + + test = (f0 > h2); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM-NEXT: %{{.+}} = fcmp ogt float %{{.+}}, %[[#RHS]] + + test = (i0 > h0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM: %{{.+}} = fcmp ogt half %[[#LHS]], %{{.+}} + + test = (h0 > i0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %{{.+}} = fcmp ogt half %{{.+}}, %[[#RHS]] + + test = (h2 <= h0); + // CHECK: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp ole half %{{.+}}, %{{.+}} + + test = (h2 <= (__fp16)42.0); + // CHECK: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double + // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp ole half %{{.+}}, 0xH5140 + + test = (h2 <= f0); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM: %{{.+}} = fcmp ole float %[[#LHS]], %{{.+}} + + test = (f2 <= h0); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM-NEXT: %{{.+}} = fcmp ole float %{{.+}}, %[[#RHS]] + + test = (i0 <= h0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM: %{{.+}} = fcmp ole half %[[#LHS]], %{{.+}} + + test = (h0 <= i0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %{{.+}} = fcmp ole half %{{.+}}, %[[#RHS]] + + test = (h0 >= h2); + // CHECK: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp oge half %{{.+}}, %{{.+}} + + test = (h0 >= (__fp16)-2.0); + // CHECK: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double + // CHECK-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 + // CHECK-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp oge half %{{.+}}, 0xHC000 + + test = (h0 >= f2); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM: %{{.+}} = fcmp oge float %[[#LHS]], %{{.+}} + + test = (f0 >= h2); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM-NEXT: %{{.+}} = fcmp oge float %{{.+}}, %[[#RHS]] + + test = (i0 >= h0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM: %{{.+}} = fcmp oge half %[[#LHS]], %{{.+}} + + test = (h0 >= i0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %{{.+}} = fcmp oge half %{{.+}}, %[[#RHS]] + + test = (h1 == h2); + // CHECK: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp oeq half %{{.+}}, %{{.+}} + + test = (h1 == (__fp16)1.0); + // CHECK: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp oeq half %{{.+}}, 0xH3C00 + + test = (h1 == f1); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM: %{{.+}} = fcmp oeq float %[[#LHS]], %{{.+}} + + test = (f1 == h1); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM-NEXT: %{{.+}} = fcmp oeq float %{{.+}}, %[[#RHS]] + + test = (i0 == h0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM: %{{.+}} = fcmp oeq half %[[#LHS]], %{{.+}} + + test = (h0 == i0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %{{.=}} = fcmp oeq half %{{.+}}, %[[#RHS]] + + test = (h1 != h2); + // CHECK: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp une half %{{.+}}, %{{.+}} + + test = (h1 != (__fp16)1.0); + // CHECK: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.cmp(ne, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + + // CHECK-LLVM: %{{.+}} = fcmp une half %{{.+}}, 0xH3C00 + + test = (h1 != f1); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.=}} to float + // CHECK-LLVM: %{{.+}} = fcmp une float %[[#LHS]], %{{.+}} + + test = (f1 != h1); + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#A:]] = fpext half %{{.+}} to float + // CHECK-LLVM-NEXT: %{{.+}} = fcmp une float %{{.+}}, %[[#A]] + + test = (i0 != h0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM: %{{.+}} = fcmp une half %[[#LHS]], %{{.+}} + + test = (h0 != i0); + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + + // CHECK-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %{{.+}} = fcmp une half %{{.+}}, %[[#RHS]] + + h1 = (h1 ? h2 : h0); + // CHECK: %[[#A:]] = cir.cast(float_to_bool, %{{.+}} : !cir.f16), !cir.bool + // CHECK-NEXT: %[[#B:]] = cir.ternary(%[[#A]], true { + // CHECK: cir.yield %{{.+}} : !cir.f16 + // CHECK-NEXT: }, false { + // CHECK: cir.yield %{{.+}} : !cir.f16 + // CHECK-NEXT: }) : (!cir.bool) -> !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.get_global @h1 : !cir.ptr + // CHECK-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = fcmp une half %{{.+}}, 0xH0000 + // CHECK-LLVM-NEXT: br i1 %[[#A]], label %[[#LABEL_A:]], label %[[#LABEL_B:]] + // CHECK-LLVM: [[#LABEL_A]]: + // CHECK-LLVM-NEXT: %[[#B:]] = load volatile half, ptr @h2, align 2 + // CHECK-LLVM-NEXT: br label %[[#LABEL_C:]] + // CHECK-LLVM: [[#LABEL_B]]: + // CHECK-LLVM-NEXT: %[[#C:]] = load volatile half, ptr @h0, align 2 + // CHECK-LLVM-NEXT: br label %[[#LABEL_C]] + // CHECK-LLVM: [[#LABEL_C]]: + // CHECK-LLVM-NEXT: %8 = phi half [ %[[#C]], %[[#LABEL_B]] ], [ %[[#B]], %[[#LABEL_A]] ] + + h0 = h1; + // CHECK: %[[#A:]] = cir.get_global @h1 : !cir.ptr + // CHECK-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.get_global @h0 : !cir.ptr + // CHECK-NEXT: cir.store volatile %[[#B]], %[[#C]] : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = load volatile half, ptr @h1, align 2 + // CHECK-LLVM-NEXT: store volatile half %[[#A]], ptr @h0, align 2 + + h0 = (__fp16)-2.0f; + // CHECK: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.float + // CHECK-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.float, !cir.float + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // CHECK-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // CHECK-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + // CHECK-LLVM: store volatile half 0xHC000, ptr @h0, align 2 + + h0 = f0; + // CHECK: %[[#A:]] = cir.get_global @f0 : !cir.ptr + // CHECK-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.float + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // CHECK-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // CHECK-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = load volatile float, ptr @f0, align 4 + // CHECK-LLVM-NEXT: %[[#B:]] = fptrunc float %[[#A]] to half + // CHECK-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 + + h0 = i0; + // CHECK: %[[#A:]] = cir.get_global @i0 : !cir.ptr + // CHECK-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !s32i + // CHECK-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s32i), !cir.f16 + // CHECK-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // CHECK-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = load volatile i32, ptr @i0, align 4 + // CHECK-LLVM-NEXT: %[[#B:]] = sitofp i32 %[[#A]] to half + // CHECK-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 + + i0 = h0; + // CHECK: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // CHECK-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i + // CHECK-NEXT: %[[#D:]] = cir.get_global @i0 : !cir.ptr + // CHECK-NEXT: cir.store volatile %[[#C]], %[[#D]] : !s32i, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = load volatile half, ptr @h0, align 2 + // CHECK-LLVM-NEXT: %[[#B:]] = fptosi half %[[#A]] to i32 + // CHECK-LLVM-NEXT: store volatile i32 %[[#B]], ptr @i0, align 4 + + h0 += h1; + // CHECK: %[[#A:]] = cir.binop(add, %{{.+}}, %{{.+}}) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %{{.+}} = fadd half %{{.+}}, %{{.+}} + + h0 += (__fp16)1.0f; + // CHECK: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.float + // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.float), !cir.f16 + // CHECK: %[[#C:]] = cir.binop(add, %{{.+}}, %[[#B]]) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %{{.+}} = fadd half %{{.+}}, 0xH3C00 + + h0 += f2; + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.float + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM-NEXT: %[[#RES:]] = fadd float %[[#LHS]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + i0 += h0; + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %[[#B:]] = cir.binop(add, %[[#A]], %{{.+}}) : !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %[[#B:]] = fadd half %[[#A]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptosi half %[[#B]] to i32 + + h0 += i0; + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK: %[[#B:]] = cir.binop(add, %{{.+}}, %[[#A]]) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM: %{{.+}} = fadd half %{{.+}}, %[[#A]] + + h0 -= h1; + // CHECK: %[[#A:]] = cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %{{.+}} = fsub half %{{.+}}, %{{.+}} + + h0 -= (__fp16)1.0; + // CHECK: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // CHECK: %[[#C:]] = cir.binop(sub, %{{.+}}, %[[#B]]) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %{{.+}} = fsub half %{{.+}}, 0xH3C00 + + h0 -= f2; + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.float + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM-NEXT: %[[#RES:]] = fsub float %[[#LHS]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + i0 -= h0; + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %[[#B:]] = cir.binop(sub, %[[#A]], %{{.+}}) : !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %[[#B:]] = fsub half %[[#A]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptosi half %[[#B]] to i32 + + h0 -= i0; + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK: %[[#B:]] = cir.binop(sub, %{{.+}}, %[[#A]]) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM: %{{.+}} = fsub half %{{.+}}, %[[#A]] + + h0 *= h1; + // CHECK: %[[#A:]] = cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %{{.+}} = fmul half %{{.+}}, %{{.+}} + + h0 *= (__fp16)1.0; + // CHECK: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // CHECK: %[[#C:]] = cir.binop(mul, %{{.+}}, %[[#B]]) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %{{.+}} = fmul half %{{.+}}, 0xH3C00 + + h0 *= f2; + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.float + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM-NEXT: %[[#RES:]] = fmul float %[[#LHS]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + i0 *= h0; + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %[[#B:]] = cir.binop(mul, %[[#A]], %{{.+}}) : !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %[[#B:]] = fmul half %[[#A]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptosi half %[[#B]] to i32 + + h0 *= i0; + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK: %[[#B:]] = cir.binop(mul, %{{.+}}, %[[#A]]) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM: %{{.+}} = fmul half %{{.+}}, %[[#A]] + + h0 /= h1; + // CHECK: %[[#A:]] = cir.binop(div, %{{.+}}, %{{.+}}) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#A]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %{{.+}} = fdiv half %{{.+}}, %{{.+}} + + h0 /= (__fp16)1.0; + // CHECK: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double + // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 + // CHECK: %[[#C:]] = cir.binop(div, %{{.+}}, %[[#B]]) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %{{.+}} = fdiv half %{{.+}}, 0xH3C00 + + h0 /= f2; + // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float + // CHECK-NEXT: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.float + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.float), !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float + // CHECK-LLVM-NEXT: %[[#RES:]] = fdiv float %[[#LHS]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptrunc float %[[#RES]] to half + + i0 /= h0; + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK-NEXT: %[[#B:]] = cir.binop(div, %[[#A]], %{{.+}}) : !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.cast(float_to_int, %[[#B]] : !cir.f16), !s32i + // CHECK-NEXT: cir.store volatile %[[#C]], %{{.+}} : !s32i, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM-NEXT: %[[#B:]] = fdiv half %[[#A]], %{{.+}} + // CHECK-LLVM-NEXT: %{{.+}} = fptosi half %[[#B]] to i32 + + h0 /= i0; + // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 + // CHECK: %[[#B:]] = cir.binop(div, %{{.+}}, %[[#A]]) : !cir.f16 + // CHECK-NEXT: cir.store volatile %[[#B]], %{{.+}} : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half + // CHECK-LLVM: %{{.+}} = fdiv half %{{.+}}, %[[#A]] + + h0 = d0; + // CHECK: %[[#A:]] = cir.get_global @d0 : !cir.ptr + // CHECK-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 + // CHECK-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // CHECK-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = load volatile double, ptr @d0, align 8 + // CHECK-LLVM-NEXT: %[[#B:]] = fptrunc double %[[#A]] to half + // CHECK-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 + + h0 = (float)d0; + // CHECK: %[[#A:]] = cir.get_global @d0 : !cir.ptr + // CHECK-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.double + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.float + // CHECK-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.f16 + // CHECK-NEXT: %[[#E:]] = cir.get_global @h0 : !cir.ptr + // CHECK-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = load volatile double, ptr @d0, align 8 + // CHECK-LLVM-NEXT: %[[#B:]] = fptrunc double %[[#A]] to float + // CHECK-LLVM-NEXT: %[[#C:]] = fptrunc float %[[#B]] to half + // CHECK-LLVM-NEXT: store volatile half %[[#C]], ptr @h0, align 2 + + d0 = h0; + // CHECK: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // CHECK-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.double + // CHECK-NEXT: %[[#D:]] = cir.get_global @d0 : !cir.ptr + // CHECK-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.double, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = load volatile half, ptr @h0, align 2 + // CHECK-LLVM-NEXT: %[[#B:]] = fpext half %[[#A]] to double + // CHECK-LLVM-NEXT: store volatile double %[[#B]], ptr @d0, align 8 + + d0 = (float)h0; + // CHECK: %[[#A:]] = cir.get_global @h0 : !cir.ptr + // CHECK-NEXT: %[[#B:]] = cir.load volatile %[[#A]] : !cir.ptr, !cir.f16 + // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.f16), !cir.float + // CHECK-NEXT: %[[#D:]] = cir.cast(floating, %[[#C]] : !cir.float), !cir.double + // CHECK-NEXT: %[[#E:]] = cir.get_global @d0 : !cir.ptr + // CHECK-NEXT: cir.store volatile %[[#D]], %[[#E]] : !cir.double, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = load volatile half, ptr @h0, align 2 + // CHECK-LLVM-NEXT: %[[#B:]] = fpext half %[[#A]] to float + // CHECK-LLVM-NEXT: %[[#C:]] = fpext float %[[#B]] to double + // CHECK-LLVM-NEXT: store volatile double %[[#C]], ptr @d0, align 8 + + h0 = s0; + // CHECK: %[[#A:]] = cir.get_global @s0 : !cir.ptr + // CHECK-NEXT: %[[#B:]] = cir.load %[[#A]] : !cir.ptr, !s16i + // CHECK-NEXT: %[[#C:]] = cir.cast(int_to_float, %[[#B]] : !s16i), !cir.f16 + // CHECK-NEXT: %[[#D:]] = cir.get_global @h0 : !cir.ptr + // CHECK-NEXT: cir.store volatile %[[#C]], %[[#D]] : !cir.f16, !cir.ptr + + // CHECK-LLVM: %[[#A:]] = load i16, ptr @s0, align 2 + // CHECK-LLVM-NEXT: %[[#B:]] = sitofp i16 %[[#A]] to half + // CHECK-LLVM-NEXT: store volatile half %[[#B]], ptr @h0, align 2 +} From c080ae5654e6197eebc8d6d607cc35a0e8729267 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Wed, 9 Oct 2024 14:31:05 -0400 Subject: [PATCH 1924/2301] [CIR][CIRGen][Builtin] Support unsigned type for _sync_(bool/val)_compare_and_swap (#955) as title. Actually just follow the way in `makeBinaryAtomicValue` in the same file which did the right thing by creating SInt or UInt based on first argument's signess. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 5 +- clang/test/CIR/CodeGen/atomic.cpp | 81 +++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index eb3a67a7ebf6..c72eb7a14e74 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -269,7 +269,10 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, Address destAddr = checkAtomicAlignment(cgf, expr); auto &builder = cgf.getBuilder(); - auto intType = builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); + auto intType = + expr->getArg(0)->getType()->getPointeeType()->isUnsignedIntegerType() + ? builder.getUIntNTy(cgf.getContext().getTypeSize(typ)) + : builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); auto cmpVal = cgf.buildScalarExpr(expr->getArg(1)); cmpVal = buildToInt(cgf, cmpVal, typ, intType); auto newVal = diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 3e68db4aa053..a284fd80ec25 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -442,6 +442,7 @@ void cmp_bool_int(int* p, int x, int u) { bool r = __sync_bool_compare_and_swap(p, x, u); } + // CHECK-LABEL: @_Z13cmp_bool_long // CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s64i, {{.*}} : !s64i, success = seq_cst, failure = seq_cst) : (!s64i, !cir.bool) @@ -568,3 +569,83 @@ void inc_uchar(unsigned char* a, char b) { void sub_uchar(unsigned char* a, char b) { unsigned char c = __sync_fetch_and_sub(a, b); } + +// CHECK-LABEL: @_Z13cmp_bool_uint +// CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// CHECK: %[[CMP:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: %[[CMP_U:.*]] = cir.cast(integral, %[[CMP]] : !s32i), !u32i +// CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: %[[UPD_U:.*]] = cir.cast(integral, %[[UPD]] : !s32i), !u32i +// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP_U]] : +// CHECK-SAME: !u32i, %[[UPD_U]] : !u32i, success = seq_cst, failure = seq_cst) : (!u32i, !cir.bool) +// CHECK: cir.store %[[RES]], {{.*}} : !cir.bool, !cir.ptr + +// LLVM-LABEL: @_Z13cmp_bool_uint +// LLVM: %[[PTR:.*]] = load ptr +// LLVM: %[[CMP:.*]] = load i32 +// LLVM: %[[UPD:.*]] = load i32 +// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst +// LLVM: %[[TMP:.*]] = extractvalue { i32, i1 } %[[RES]], 1 +// LLVM: %[[EXT:.*]] = zext i1 %[[TMP]] to i8 +// LLVM: store i8 %[[EXT]], ptr {{.*}} +void cmp_bool_uint(unsigned int* p, int x, int u) { + bool r = __sync_bool_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z15cmp_bool_ushort +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u16i, {{.*}} : !u16i, success = seq_cst, failure = seq_cst) : (!u16i, !cir.bool) + +// LLVM-LABEL: @_Z15cmp_bool_ushort +// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst +void cmp_bool_ushort(unsigned short* p, short x, short u) { + bool r = __sync_bool_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z14cmp_bool_ulong +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) : (!u64i, !cir.bool) + +// LLVM-LABEL: @_Z14cmp_bool_ulong +// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst +void cmp_bool_ulong(unsigned long* p, long x, long u) { + bool r = __sync_bool_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z12cmp_val_uint +// CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr +// CHECK: %[[CMP:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: %[[CMP_U:.*]] = cir.cast(integral, %[[CMP]] : !s32i), !u32i +// CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: %[[UPD_U:.*]] = cir.cast(integral, %[[UPD]] : !s32i), !u32i +// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP_U]] : +// CHECK-SAME: !u32i, %[[UPD_U]] : !u32i, success = seq_cst, failure = seq_cst) : (!u32i, !cir.bool) +// CHECK: %[[R:.*]] = cir.cast(integral, %[[OLD]] : !u32i), !s32i +// CHECK: cir.store %[[R]], {{.*}} : !s32i, !cir.ptr + +// LLVM-LABEL: @_Z12cmp_val_uint +// LLVM: %[[PTR:.*]] = load ptr +// LLVM: %[[CMP:.*]] = load i32 +// LLVM: %[[UPD:.*]] = load i32 +// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst +// LLVM: %[[TMP:.*]] = extractvalue { i32, i1 } %[[RES]], 0 +// LLVM: store i32 %[[TMP]], ptr {{.*}} +void cmp_val_uint(unsigned int* p, int x, int u) { + int r = __sync_val_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z14cmp_val_ushort +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u16i, {{.*}} : !u16i, success = seq_cst, failure = seq_cst) : (!u16i, !cir.bool) + +// LLVM-LABEL: @_Z14cmp_val_ushort +// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst +void cmp_val_ushort(unsigned short* p, short x, short u) { + short r = __sync_val_compare_and_swap(p, x, u); +} + +// CHECK-LABEL: @_Z13cmp_val_ulong +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) : (!u64i, !cir.bool) + +// LLVM-LABEL: @_Z13cmp_val_ulong +// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst +void cmp_val_ulong(unsigned long* p, long x, long u) { + long r = __sync_val_compare_and_swap(p, x, u); +} From 47f47ee392f4fd8cb0b2452185232a2c277e1362 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 9 Oct 2024 12:33:14 -0700 Subject: [PATCH 1925/2301] [CIR][CIRGen][NFC] Improve buildAutoVarAlloca skeleton and add hooks for buildVarAnnotations --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 11 ++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 7 +++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 +++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index a52ef462552b..3dbb656858e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -39,7 +39,6 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, assert( Ty.getAddressSpace() == LangAS::Default || (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL)); - assert(!D.hasAttr() && "not implemented"); auto loc = getLoc(D.getSourceRange()); bool NRVO = @@ -195,6 +194,16 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, emission.Addr = address; setAddrOfLocalVar(&D, emission.Addr); + + // Emit debug info for local var declaration. + assert(!MissingFeatures::generateDebugInfo()); + + if (D.hasAttr() && HaveInsertPoint()) + buildVarAnnotations(&D, address.emitRawPointer()); + + // TODO(cir): in LLVM this calls @llvm.lifetime.end. + assert(!MissingFeatures::shouldEmitLifetimeMarkers()); + return emission; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index f5f27b3f7dd8..e14565ed3082 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1889,3 +1889,10 @@ mlir::Value CIRGenFunction::buildAlignmentAssumption( return buildAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment, offsetValue); } + +void CIRGenFunction::buildVarAnnotations(const VarDecl *decl, mlir::Value val) { + assert(decl->hasAttr() && "no annotate attribute"); + for ([[maybe_unused]] const auto *I : decl->specific_attrs()) { + llvm_unreachable("NYI"); + } +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 322d740d58ef..479112cfe148 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1120,6 +1120,9 @@ class CIRGenFunction : public CIRGenTypeCache { void buildDecl(const clang::Decl &D); + /// Emit local annotations for the local variable V, declared by D. + void buildVarAnnotations(const VarDecl *decl, mlir::Value val); + /// If the specified expression does not fold to a constant, or if it does but /// contains a label, return false. If it constant folds return true and set /// the boolean result in Result. From 4ec1bfa6a91f043c077f6040f89dad2fc766bca6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 9 Oct 2024 13:21:46 -0700 Subject: [PATCH 1926/2301] [CIR][CIRGen] Support annotations on local var decl LLVM lowering support coming next. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 ++ clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 8 ++++++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenModule.h | 14 +++++++------- clang/test/CIR/CodeGen/annotations-var.c | 8 ++++++++ 6 files changed, 26 insertions(+), 12 deletions(-) create mode 100644 clang/test/CIR/CodeGen/annotations-var.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 43d3295096c2..aabe655c35b0 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -493,6 +493,7 @@ def AllocaOp : CIR_Op<"alloca", [ StrAttr:$name, UnitAttr:$init, ConfinedAttr, [IntMinValue<0>]>:$alignment, + OptionalAttr:$annotations, OptionalAttr:$ast ); @@ -530,6 +531,7 @@ def AllocaOp : CIR_Op<"alloca", [ `[` $name (`,` `init` $init^)? `]` + ($annotations^)? (`ast` $ast^)? attr-dict }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 3dbb656858e0..1f6692ef8163 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -198,7 +198,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, // Emit debug info for local var declaration. assert(!MissingFeatures::generateDebugInfo()); - if (D.hasAttr() && HaveInsertPoint()) + if (D.hasAttr()) buildVarAnnotations(&D, address.emitRawPointer()); // TODO(cir): in LLVM this calls @llvm.lifetime.end. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index e14565ed3082..2cec6203d3f0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1892,7 +1892,11 @@ mlir::Value CIRGenFunction::buildAlignmentAssumption( void CIRGenFunction::buildVarAnnotations(const VarDecl *decl, mlir::Value val) { assert(decl->hasAttr() && "no annotate attribute"); - for ([[maybe_unused]] const auto *I : decl->specific_attrs()) { - llvm_unreachable("NYI"); + llvm::SmallVector annotations; + for (const auto *annot : decl->specific_attrs()) { + annotations.push_back(CGM.buildAnnotateAttr(annot)); } + auto allocaOp = dyn_cast_or_null(val.getDefiningOp()); + assert(allocaOp && "expects available alloca"); + allocaOp.setAnnotationsAttr(builder.getArrayAttr(annotations)); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index f8d39e40b6c9..21813577dbad 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -3350,7 +3350,7 @@ LangAS CIRGenModule::getGlobalVarAddressSpace(const VarDecl *D) { return getTargetCIRGenInfo().getGlobalVarAddressSpace(*this, D); } -mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(AnnotateAttr *attr) { +mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(const AnnotateAttr *attr) { ArrayRef exprs = {attr->args_begin(), attr->args_size()}; if (exprs.empty()) { return mlir::ArrayAttr::get(builder.getContext(), {}); @@ -3392,7 +3392,7 @@ mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(AnnotateAttr *attr) { } mlir::cir::AnnotationAttr -CIRGenModule::buildAnnotateAttr(clang::AnnotateAttr *aa) { +CIRGenModule::buildAnnotateAttr(const clang::AnnotateAttr *aa) { mlir::StringAttr annoGV = builder.getStringAttr(aa->getAnnotation()); mlir::ArrayAttr args = buildAnnotationArgs(aa); return mlir::cir::AnnotationAttr::get(builder.getContext(), annoGV, args); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index b652ec4f9ef7..1aee19d541e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -796,6 +796,12 @@ class CIRGenModule : public CIRGenTypeCache { /// Emits OpenCL specific Metadata e.g. OpenCL version. void buildOpenCLMetadata(); + /// Create cir::AnnotationAttr which contains the annotation + /// information for a given GlobalValue. Notice that a GlobalValue could + /// have multiple annotations, and this function creates attribute for + /// one of them. + mlir::cir::AnnotationAttr buildAnnotateAttr(const clang::AnnotateAttr *aa); + private: // An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector MangledDeclNames; @@ -817,13 +823,7 @@ class CIRGenModule : public CIRGenTypeCache { void buildGlobalAnnotations(); /// Emit additional args of the annotation. - mlir::ArrayAttr buildAnnotationArgs(clang::AnnotateAttr *attr); - - /// Create cir::AnnotationAttr which contains the annotation - /// information for a given GlobalValue. Notice that a GlobalValue could - /// have multiple annotations, and this function creates attribute for - /// one of them. - mlir::cir::AnnotationAttr buildAnnotateAttr(clang::AnnotateAttr *aa); + mlir::ArrayAttr buildAnnotationArgs(const clang::AnnotateAttr *attr); /// Add global annotations for a global value. /// Those annotations are emitted during lowering to the LLVM code. diff --git a/clang/test/CIR/CodeGen/annotations-var.c b/clang/test/CIR/CodeGen/annotations-var.c new file mode 100644 index 000000000000..cc617672dec0 --- /dev/null +++ b/clang/test/CIR/CodeGen/annotations-var.c @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR + +void local(void) { + int localvar __attribute__((annotate("localvar_ann_0"))) __attribute__((annotate("localvar_ann_1"))) = 3; +// CIR-LABEL: @local +// CIR: %0 = cir.alloca !s32i, !cir.ptr, ["localvar", init] [#cir.annotation, #cir.annotation] +} \ No newline at end of file From 8b1560fe91cdc7e139646a4a646fdf1502b9bb5b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 9 Oct 2024 15:37:52 -0700 Subject: [PATCH 1927/2301] [CIR][LowerToLLVM][NFC] Move annotation lowering around --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 345 +++++++++--------- 1 file changed, 173 insertions(+), 172 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b04d3f6d47fc..b7ec6ffb6013 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -178,6 +178,177 @@ void getOrCreateLLVMFuncOp(mlir::ConversionPatternRewriter &rewriter, } } +static constexpr StringRef llvmMetadataSectionName = "llvm.metadata"; + +// Create a string global for annotation related string. +mlir::LLVM::GlobalOp +getAnnotationStringGlobal(mlir::StringAttr strAttr, mlir::ModuleOp &module, + llvm::StringMap &globalsMap, + mlir::OpBuilder &globalVarBuilder, + mlir::Location &loc, bool isArg = false) { + llvm::StringRef str = strAttr.getValue(); + if (!globalsMap.contains(str)) { + auto llvmStrTy = mlir::LLVM::LLVMArrayType::get( + mlir::IntegerType::get(module.getContext(), 8), str.size() + 1); + auto strGlobalOp = globalVarBuilder.create( + loc, llvmStrTy, + /*isConstant=*/true, mlir::LLVM::Linkage::Private, + ".str" + + (globalsMap.empty() ? "" + : "." + std::to_string(globalsMap.size())) + + ".annotation" + (isArg ? ".arg" : ""), + mlir::StringAttr::get(module.getContext(), std::string(str) + '\0'), + /*alignment=*/isArg ? 1 : 0); + if (!isArg) + strGlobalOp.setSection(llvmMetadataSectionName); + strGlobalOp.setUnnamedAddr(mlir::LLVM::UnnamedAddr::Global); + strGlobalOp.setDsoLocal(true); + globalsMap[str] = strGlobalOp; + } + return globalsMap[str]; +} + +mlir::Value lowerAnnotationValue( + mlir::ArrayAttr annotValue, mlir::ModuleOp &module, + mlir::OpBuilder &varInitBuilder, mlir::OpBuilder &globalVarBuilder, + llvm::StringMap &stringGlobalsMap, + llvm::StringMap &argStringGlobalsMap, + llvm::MapVector &argsVarMap, + llvm::SmallVector &annoStructFields, + mlir::LLVM::LLVMStructType &annoStructTy, + mlir::LLVM::LLVMPointerType &annoPtrTy, mlir::Location &loc) { + mlir::Value valueEntry = + varInitBuilder.create(loc, annoStructTy); + auto globalValueName = mlir::cast(annotValue[0]); + mlir::Operation *globalValue = + mlir::SymbolTable::lookupSymbolIn(module, globalValueName); + // The first field is ptr to the global value + auto globalValueFld = varInitBuilder.create( + loc, annoPtrTy, globalValueName); + + valueEntry = varInitBuilder.create( + loc, valueEntry, globalValueFld, 0); + mlir::cir::AnnotationAttr annotation = + mlir::cast(annotValue[1]); + + // The second field is ptr to the annotation name + mlir::StringAttr annotationName = annotation.getName(); + auto annotationNameFld = varInitBuilder.create( + loc, annoPtrTy, + getAnnotationStringGlobal(annotationName, module, stringGlobalsMap, + globalVarBuilder, loc) + .getSymName()); + + valueEntry = varInitBuilder.create( + loc, valueEntry, annotationNameFld, 1); + + // The third field is ptr to the translation unit name, + // and the fourth field is the line number + auto annotLoc = globalValue->getLoc(); + if (mlir::isa(annotLoc)) { + auto FusedLoc = mlir::cast(annotLoc); + annotLoc = FusedLoc.getLocations()[0]; + } + auto annotFileLoc = mlir::cast(annotLoc); + assert(annotFileLoc && "annotation value has to be FileLineColLoc"); + // To be consistent with clang code gen, we add trailing null char + auto fileName = mlir::StringAttr::get( + module.getContext(), std::string(annotFileLoc.getFilename().getValue())); + auto fileNameFld = varInitBuilder.create( + loc, annoPtrTy, + getAnnotationStringGlobal(fileName, module, stringGlobalsMap, + globalVarBuilder, loc) + .getSymName()); + valueEntry = varInitBuilder.create(loc, valueEntry, + fileNameFld, 2); + unsigned int lineNo = annotFileLoc.getLine(); + auto lineNoFld = varInitBuilder.create( + loc, annoStructFields[3], lineNo); + valueEntry = varInitBuilder.create(loc, valueEntry, + lineNoFld, 3); + // The fifth field is ptr to the annotation args var, it could be null + if (annotation.isNoArgs()) { + auto nullPtrFld = varInitBuilder.create(loc, annoPtrTy); + valueEntry = varInitBuilder.create( + loc, valueEntry, nullPtrFld, 4); + } else { + mlir::ArrayAttr argsAttr = annotation.getArgs(); + // First time we see this argsAttr, create a global for it + // and build its initializer + if (!argsVarMap.contains(argsAttr)) { + llvm::SmallVector argStrutFldTypes; + llvm::SmallVector argStrutFields; + for (mlir::Attribute arg : annotation.getArgs()) { + if (auto strArgAttr = mlir::dyn_cast(arg)) { + // Call getAnnotationStringGlobal here to make sure + // have a global for this string before + // creation of the args var. + getAnnotationStringGlobal(strArgAttr, module, argStringGlobalsMap, + globalVarBuilder, loc, true); + // This will become a ptr to the global string + argStrutFldTypes.push_back(annoPtrTy); + } else if (auto intArgAttr = mlir::dyn_cast(arg)) { + argStrutFldTypes.push_back(intArgAttr.getType()); + } else { + llvm_unreachable("Unsupported annotation arg type"); + } + } + + mlir::LLVM::LLVMStructType argsStructTy = + mlir::LLVM::LLVMStructType::getLiteral(globalVarBuilder.getContext(), + argStrutFldTypes); + auto argsGlobalOp = globalVarBuilder.create( + loc, argsStructTy, true, mlir::LLVM::Linkage::Private, + ".args" + + (argsVarMap.empty() ? "" + : "." + std::to_string(argsVarMap.size())) + + ".annotation", + mlir::Attribute()); + argsGlobalOp.setSection(llvmMetadataSectionName); + argsGlobalOp.setUnnamedAddr(mlir::LLVM::UnnamedAddr::Global); + argsGlobalOp.setDsoLocal(true); + + // Create the initializer for this args global + argsGlobalOp.getRegion().push_back(new mlir::Block()); + mlir::OpBuilder argsInitBuilder(module.getContext()); + argsInitBuilder.setInsertionPointToEnd( + argsGlobalOp.getInitializerBlock()); + + mlir::Value argsStructInit = + argsInitBuilder.create(loc, argsStructTy); + int idx = 0; + for (mlir::Attribute arg : annotation.getArgs()) { + if (auto strArgAttr = mlir::dyn_cast(arg)) { + // This would be simply return with existing map entry value + // from argStringGlobalsMap as string global is already + // created in the previous loop. + mlir::LLVM::GlobalOp argStrVar = + getAnnotationStringGlobal(strArgAttr, module, argStringGlobalsMap, + globalVarBuilder, loc, true); + auto argStrVarAddr = argsInitBuilder.create( + loc, annoPtrTy, argStrVar.getSymName()); + argsStructInit = argsInitBuilder.create( + loc, argsStructInit, argStrVarAddr, idx++); + } else if (auto intArgAttr = mlir::dyn_cast(arg)) { + auto intArgFld = argsInitBuilder.create( + loc, intArgAttr.getType(), intArgAttr.getValue()); + argsStructInit = argsInitBuilder.create( + loc, argsStructInit, intArgFld, idx++); + } else { + llvm_unreachable("Unsupported annotation arg type"); + } + } + argsInitBuilder.create(loc, argsStructInit); + argsVarMap[argsAttr] = argsGlobalOp; + } + auto argsVarView = varInitBuilder.create( + loc, annoPtrTy, argsVarMap[argsAttr].getSymName()); + valueEntry = varInitBuilder.create( + loc, valueEntry, argsVarView, 4); + } + return valueEntry; +} + } // namespace //===----------------------------------------------------------------------===// @@ -918,7 +1089,6 @@ struct ConvertCIRToLLVMPass void buildGlobalAnnotationsVar(); virtual StringRef getArgument() const override { return "cir-flat-to-llvm"; } - static constexpr StringRef annotationSection = "llvm.metadata"; }; mlir::LogicalResult @@ -4136,175 +4306,6 @@ void collect_unreachable(mlir::Operation *parent, } } -// Create a string global for annotation related string. -mlir::LLVM::GlobalOp -getAnnotationStringGlobal(mlir::StringAttr strAttr, mlir::ModuleOp &module, - llvm::StringMap &globalsMap, - mlir::OpBuilder &globalVarBuilder, - mlir::Location &loc, bool isArg = false) { - llvm::StringRef str = strAttr.getValue(); - if (!globalsMap.contains(str)) { - auto llvmStrTy = mlir::LLVM::LLVMArrayType::get( - mlir::IntegerType::get(module.getContext(), 8), str.size() + 1); - auto strGlobalOp = globalVarBuilder.create( - loc, llvmStrTy, - /*isConstant=*/true, mlir::LLVM::Linkage::Private, - ".str" + - (globalsMap.empty() ? "" - : "." + std::to_string(globalsMap.size())) + - ".annotation" + (isArg ? ".arg" : ""), - mlir::StringAttr::get(module.getContext(), std::string(str) + '\0'), - /*alignment=*/isArg ? 1 : 0); - if (!isArg) - strGlobalOp.setSection(ConvertCIRToLLVMPass::annotationSection); - strGlobalOp.setUnnamedAddr(mlir::LLVM::UnnamedAddr::Global); - strGlobalOp.setDsoLocal(true); - globalsMap[str] = strGlobalOp; - } - return globalsMap[str]; -} - -mlir::Value lowerAnnotationValue( - mlir::ArrayAttr annotValue, mlir::ModuleOp &module, - mlir::OpBuilder &varInitBuilder, mlir::OpBuilder &globalVarBuilder, - llvm::StringMap &stringGlobalsMap, - llvm::StringMap &argStringGlobalsMap, - llvm::MapVector &argsVarMap, - llvm::SmallVector &annoStructFields, - mlir::LLVM::LLVMStructType &annoStructTy, - mlir::LLVM::LLVMPointerType &annoPtrTy, mlir::Location &loc) { - mlir::Value valueEntry = - varInitBuilder.create(loc, annoStructTy); - auto globalValueName = mlir::cast(annotValue[0]); - mlir::Operation *globalValue = - mlir::SymbolTable::lookupSymbolIn(module, globalValueName); - // The first field is ptr to the global value - auto globalValueFld = varInitBuilder.create( - loc, annoPtrTy, globalValueName); - - valueEntry = varInitBuilder.create( - loc, valueEntry, globalValueFld, 0); - mlir::cir::AnnotationAttr annotation = - mlir::cast(annotValue[1]); - - // The second field is ptr to the annotation name - mlir::StringAttr annotationName = annotation.getName(); - auto annotationNameFld = varInitBuilder.create( - loc, annoPtrTy, - getAnnotationStringGlobal(annotationName, module, stringGlobalsMap, - globalVarBuilder, loc) - .getSymName()); - - valueEntry = varInitBuilder.create( - loc, valueEntry, annotationNameFld, 1); - - // The third field is ptr to the translation unit name, - // and the fourth field is the line number - auto annotLoc = globalValue->getLoc(); - if (mlir::isa(annotLoc)) { - auto FusedLoc = mlir::cast(annotLoc); - annotLoc = FusedLoc.getLocations()[0]; - } - auto annotFileLoc = mlir::cast(annotLoc); - assert(annotFileLoc && "annotation value has to be FileLineColLoc"); - // To be consistent with clang code gen, we add trailing null char - auto fileName = mlir::StringAttr::get( - module.getContext(), std::string(annotFileLoc.getFilename().getValue())); - auto fileNameFld = varInitBuilder.create( - loc, annoPtrTy, - getAnnotationStringGlobal(fileName, module, stringGlobalsMap, - globalVarBuilder, loc) - .getSymName()); - valueEntry = varInitBuilder.create(loc, valueEntry, - fileNameFld, 2); - unsigned int lineNo = annotFileLoc.getLine(); - auto lineNoFld = varInitBuilder.create( - loc, annoStructFields[3], lineNo); - valueEntry = varInitBuilder.create(loc, valueEntry, - lineNoFld, 3); - // The fifth field is ptr to the annotation args var, it could be null - if (annotation.isNoArgs()) { - auto nullPtrFld = varInitBuilder.create(loc, annoPtrTy); - valueEntry = varInitBuilder.create( - loc, valueEntry, nullPtrFld, 4); - } else { - mlir::ArrayAttr argsAttr = annotation.getArgs(); - // First time we see this argsAttr, create a global for it - // and build its initializer - if (!argsVarMap.contains(argsAttr)) { - llvm::SmallVector argStrutFldTypes; - llvm::SmallVector argStrutFields; - for (mlir::Attribute arg : annotation.getArgs()) { - if (auto strArgAttr = mlir::dyn_cast(arg)) { - // Call getAnnotationStringGlobal here to make sure - // have a global for this string before - // creation of the args var. - getAnnotationStringGlobal(strArgAttr, module, argStringGlobalsMap, - globalVarBuilder, loc, true); - // This will become a ptr to the global string - argStrutFldTypes.push_back(annoPtrTy); - } else if (auto intArgAttr = mlir::dyn_cast(arg)) { - argStrutFldTypes.push_back(intArgAttr.getType()); - } else { - llvm_unreachable("Unsupported annotation arg type"); - } - } - - mlir::LLVM::LLVMStructType argsStructTy = - mlir::LLVM::LLVMStructType::getLiteral(globalVarBuilder.getContext(), - argStrutFldTypes); - auto argsGlobalOp = globalVarBuilder.create( - loc, argsStructTy, true, mlir::LLVM::Linkage::Private, - ".args" + - (argsVarMap.empty() ? "" - : "." + std::to_string(argsVarMap.size())) + - ".annotation", - mlir::Attribute()); - argsGlobalOp.setSection(ConvertCIRToLLVMPass::annotationSection); - argsGlobalOp.setUnnamedAddr(mlir::LLVM::UnnamedAddr::Global); - argsGlobalOp.setDsoLocal(true); - - // Create the initializer for this args global - argsGlobalOp.getRegion().push_back(new mlir::Block()); - mlir::OpBuilder argsInitBuilder(module.getContext()); - argsInitBuilder.setInsertionPointToEnd( - argsGlobalOp.getInitializerBlock()); - - mlir::Value argsStructInit = - argsInitBuilder.create(loc, argsStructTy); - int idx = 0; - for (mlir::Attribute arg : annotation.getArgs()) { - if (auto strArgAttr = mlir::dyn_cast(arg)) { - // This would be simply return with existing map entry value - // from argStringGlobalsMap as string global is already - // created in the previous loop. - mlir::LLVM::GlobalOp argStrVar = - getAnnotationStringGlobal(strArgAttr, module, argStringGlobalsMap, - globalVarBuilder, loc, true); - auto argStrVarAddr = argsInitBuilder.create( - loc, annoPtrTy, argStrVar.getSymName()); - argsStructInit = argsInitBuilder.create( - loc, argsStructInit, argStrVarAddr, idx++); - } else if (auto intArgAttr = mlir::dyn_cast(arg)) { - auto intArgFld = argsInitBuilder.create( - loc, intArgAttr.getType(), intArgAttr.getValue()); - argsStructInit = argsInitBuilder.create( - loc, argsStructInit, intArgFld, idx++); - } else { - llvm_unreachable("Unsupported annotation arg type"); - } - } - argsInitBuilder.create(loc, argsStructInit); - argsVarMap[argsAttr] = argsGlobalOp; - } - auto argsVarView = varInitBuilder.create( - loc, annoPtrTy, argsVarMap[argsAttr].getSymName()); - valueEntry = varInitBuilder.create( - loc, valueEntry, argsVarView, 4); - } - return valueEntry; -} - void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar() { mlir::ModuleOp module = getOperation(); mlir::Attribute attr = module->getAttr("cir.global_annotations"); @@ -4340,7 +4341,7 @@ void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar() { auto annotationGlobalOp = globalVarBuilder.create( loc, annoStructArrayTy, false, mlir::LLVM::Linkage::Appending, "llvm.global.annotations", mlir::Attribute()); - annotationGlobalOp.setSection("llvm.metadata"); + annotationGlobalOp.setSection(llvmMetadataSectionName); annotationGlobalOp.getRegion().push_back(new mlir::Block()); mlir::OpBuilder varInitBuilder(module.getContext()); varInitBuilder.setInsertionPointToEnd( @@ -4356,7 +4357,7 @@ void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar() { llvm::StringMap stringGlobalsMap; // Track globals created for annotation arg related strings. // They are different from annotation strings, as strings used in args - // are not in annotationSection, and also has aligment 1. + // are not in llvmMetadataSectionName, and also has aligment 1. llvm::StringMap argStringGlobalsMap; // Track globals created for annotation args. llvm::MapVector argsVarMap; From 5b2a2d1b0923af5bd75f7bf3a3b83c4c40e67328 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 9 Oct 2024 16:19:58 -0700 Subject: [PATCH 1928/2301] [CIR][NFC] Fix post-rebase warning --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 2cec6203d3f0..d60b2d177919 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1694,6 +1694,7 @@ void CIRGenFunction::buildVariablyModifiedType(QualType type) { case clang::Type::CountAttributed: case clang::Type::PackIndexing: case clang::Type::ArrayParameter: + case clang::Type::HLSLAttributedResource: llvm_unreachable("NYI"); #define TYPE(Class, Base) From 7a05fed086767f48daf09611ec7706d0deda9c35 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 10 Oct 2024 14:51:09 -0700 Subject: [PATCH 1929/2301] [CIR][LowerToLLVM] Add support for local var annotations --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 300 +++++++++++------- clang/test/CIR/CodeGen/annotations-var.c | 13 +- 2 files changed, 194 insertions(+), 119 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b7ec6ffb6013..fb5f5496fa70 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -208,43 +208,109 @@ getAnnotationStringGlobal(mlir::StringAttr strAttr, mlir::ModuleOp &module, return globalsMap[str]; } -mlir::Value lowerAnnotationValue( - mlir::ArrayAttr annotValue, mlir::ModuleOp &module, +mlir::LLVM::GlobalOp getOrCreateAnnotationArgsVar( + mlir::Location &loc, mlir::ModuleOp &module, + mlir::OpBuilder &globalVarBuilder, + llvm::StringMap &argStringGlobalsMap, + llvm::MapVector &argsVarMap, + mlir::ArrayAttr argsAttr) { + if (argsVarMap.contains(argsAttr)) + return argsVarMap[argsAttr]; + + mlir::LLVM::LLVMPointerType annoPtrTy = + mlir::LLVM::LLVMPointerType::get(globalVarBuilder.getContext()); + llvm::SmallVector argStrutFldTypes; + llvm::SmallVector argStrutFields; + for (mlir::Attribute arg : argsAttr) { + if (auto strArgAttr = mlir::dyn_cast(arg)) { + // Call getAnnotationStringGlobal here to make sure + // have a global for this string before + // creation of the args var. + getAnnotationStringGlobal(strArgAttr, module, argStringGlobalsMap, + globalVarBuilder, loc, true); + // This will become a ptr to the global string + argStrutFldTypes.push_back(annoPtrTy); + } else if (auto intArgAttr = mlir::dyn_cast(arg)) { + argStrutFldTypes.push_back(intArgAttr.getType()); + } else { + llvm_unreachable("Unsupported annotation arg type"); + } + } + + mlir::LLVM::LLVMStructType argsStructTy = + mlir::LLVM::LLVMStructType::getLiteral(globalVarBuilder.getContext(), + argStrutFldTypes); + auto argsGlobalOp = globalVarBuilder.create( + loc, argsStructTy, true, mlir::LLVM::Linkage::Private, + ".args" + + (argsVarMap.empty() ? "" : "." + std::to_string(argsVarMap.size())) + + ".annotation", + mlir::Attribute()); + argsGlobalOp.setSection(llvmMetadataSectionName); + argsGlobalOp.setUnnamedAddr(mlir::LLVM::UnnamedAddr::Global); + argsGlobalOp.setDsoLocal(true); + + // Create the initializer for this args global + argsGlobalOp.getRegion().push_back(new mlir::Block()); + mlir::OpBuilder argsInitBuilder(module.getContext()); + argsInitBuilder.setInsertionPointToEnd(argsGlobalOp.getInitializerBlock()); + + mlir::Value argsStructInit = + argsInitBuilder.create(loc, argsStructTy); + int idx = 0; + for (mlir::Attribute arg : argsAttr) { + if (auto strArgAttr = mlir::dyn_cast(arg)) { + // This would be simply return with existing map entry value + // from argStringGlobalsMap as string global is already + // created in the previous loop. + mlir::LLVM::GlobalOp argStrVar = getAnnotationStringGlobal( + strArgAttr, module, argStringGlobalsMap, globalVarBuilder, loc, true); + auto argStrVarAddr = argsInitBuilder.create( + loc, annoPtrTy, argStrVar.getSymName()); + argsStructInit = argsInitBuilder.create( + loc, argsStructInit, argStrVarAddr, idx++); + } else if (auto intArgAttr = mlir::dyn_cast(arg)) { + auto intArgFld = argsInitBuilder.create( + loc, intArgAttr.getType(), intArgAttr.getValue()); + argsStructInit = argsInitBuilder.create( + loc, argsStructInit, intArgFld, idx++); + } else { + llvm_unreachable("Unsupported annotation arg type"); + } + } + argsInitBuilder.create(loc, argsStructInit); + argsVarMap[argsAttr] = argsGlobalOp; + return argsGlobalOp; +} + +/// Lower an annotation value to a series of LLVM globals, `outVals` contains +/// all values which are either used to build other globals or for intrisic call +/// arguments. +void lowerAnnotationValue( + mlir::Location &localLoc, mlir::Location annotLoc, + mlir::cir::AnnotationAttr annotation, mlir::ModuleOp &module, mlir::OpBuilder &varInitBuilder, mlir::OpBuilder &globalVarBuilder, llvm::StringMap &stringGlobalsMap, llvm::StringMap &argStringGlobalsMap, llvm::MapVector &argsVarMap, - llvm::SmallVector &annoStructFields, - mlir::LLVM::LLVMStructType &annoStructTy, - mlir::LLVM::LLVMPointerType &annoPtrTy, mlir::Location &loc) { - mlir::Value valueEntry = - varInitBuilder.create(loc, annoStructTy); - auto globalValueName = mlir::cast(annotValue[0]); - mlir::Operation *globalValue = - mlir::SymbolTable::lookupSymbolIn(module, globalValueName); - // The first field is ptr to the global value - auto globalValueFld = varInitBuilder.create( - loc, annoPtrTy, globalValueName); - - valueEntry = varInitBuilder.create( - loc, valueEntry, globalValueFld, 0); - mlir::cir::AnnotationAttr annotation = - mlir::cast(annotValue[1]); + SmallVectorImpl &outVals) { + mlir::LLVM::LLVMPointerType annoPtrTy = + mlir::LLVM::LLVMPointerType::get(globalVarBuilder.getContext()); + // First field is either a global name or a alloca address and is handled + // by the caller, this function deals with content from `AnnotationAttr` + // only. // The second field is ptr to the annotation name mlir::StringAttr annotationName = annotation.getName(); auto annotationNameFld = varInitBuilder.create( - loc, annoPtrTy, + localLoc, annoPtrTy, getAnnotationStringGlobal(annotationName, module, stringGlobalsMap, - globalVarBuilder, loc) + globalVarBuilder, localLoc) .getSymName()); - - valueEntry = varInitBuilder.create( - loc, valueEntry, annotationNameFld, 1); + outVals.push_back(annotationNameFld->getResult(0)); // The third field is ptr to the translation unit name, // and the fourth field is the line number - auto annotLoc = globalValue->getLoc(); if (mlir::isa(annotLoc)) { auto FusedLoc = mlir::cast(annotLoc); annotLoc = FusedLoc.getLocations()[0]; @@ -255,98 +321,31 @@ mlir::Value lowerAnnotationValue( auto fileName = mlir::StringAttr::get( module.getContext(), std::string(annotFileLoc.getFilename().getValue())); auto fileNameFld = varInitBuilder.create( - loc, annoPtrTy, + localLoc, annoPtrTy, getAnnotationStringGlobal(fileName, module, stringGlobalsMap, - globalVarBuilder, loc) + globalVarBuilder, localLoc) .getSymName()); - valueEntry = varInitBuilder.create(loc, valueEntry, - fileNameFld, 2); + outVals.push_back(fileNameFld->getResult(0)); + unsigned int lineNo = annotFileLoc.getLine(); auto lineNoFld = varInitBuilder.create( - loc, annoStructFields[3], lineNo); - valueEntry = varInitBuilder.create(loc, valueEntry, - lineNoFld, 3); + localLoc, globalVarBuilder.getI32Type(), lineNo); + outVals.push_back(lineNoFld->getResult(0)); + // The fifth field is ptr to the annotation args var, it could be null if (annotation.isNoArgs()) { - auto nullPtrFld = varInitBuilder.create(loc, annoPtrTy); - valueEntry = varInitBuilder.create( - loc, valueEntry, nullPtrFld, 4); + auto nullPtrFld = + varInitBuilder.create(localLoc, annoPtrTy); + outVals.push_back(nullPtrFld->getResult(0)); } else { mlir::ArrayAttr argsAttr = annotation.getArgs(); - // First time we see this argsAttr, create a global for it - // and build its initializer - if (!argsVarMap.contains(argsAttr)) { - llvm::SmallVector argStrutFldTypes; - llvm::SmallVector argStrutFields; - for (mlir::Attribute arg : annotation.getArgs()) { - if (auto strArgAttr = mlir::dyn_cast(arg)) { - // Call getAnnotationStringGlobal here to make sure - // have a global for this string before - // creation of the args var. - getAnnotationStringGlobal(strArgAttr, module, argStringGlobalsMap, - globalVarBuilder, loc, true); - // This will become a ptr to the global string - argStrutFldTypes.push_back(annoPtrTy); - } else if (auto intArgAttr = mlir::dyn_cast(arg)) { - argStrutFldTypes.push_back(intArgAttr.getType()); - } else { - llvm_unreachable("Unsupported annotation arg type"); - } - } - - mlir::LLVM::LLVMStructType argsStructTy = - mlir::LLVM::LLVMStructType::getLiteral(globalVarBuilder.getContext(), - argStrutFldTypes); - auto argsGlobalOp = globalVarBuilder.create( - loc, argsStructTy, true, mlir::LLVM::Linkage::Private, - ".args" + - (argsVarMap.empty() ? "" - : "." + std::to_string(argsVarMap.size())) + - ".annotation", - mlir::Attribute()); - argsGlobalOp.setSection(llvmMetadataSectionName); - argsGlobalOp.setUnnamedAddr(mlir::LLVM::UnnamedAddr::Global); - argsGlobalOp.setDsoLocal(true); - - // Create the initializer for this args global - argsGlobalOp.getRegion().push_back(new mlir::Block()); - mlir::OpBuilder argsInitBuilder(module.getContext()); - argsInitBuilder.setInsertionPointToEnd( - argsGlobalOp.getInitializerBlock()); - - mlir::Value argsStructInit = - argsInitBuilder.create(loc, argsStructTy); - int idx = 0; - for (mlir::Attribute arg : annotation.getArgs()) { - if (auto strArgAttr = mlir::dyn_cast(arg)) { - // This would be simply return with existing map entry value - // from argStringGlobalsMap as string global is already - // created in the previous loop. - mlir::LLVM::GlobalOp argStrVar = - getAnnotationStringGlobal(strArgAttr, module, argStringGlobalsMap, - globalVarBuilder, loc, true); - auto argStrVarAddr = argsInitBuilder.create( - loc, annoPtrTy, argStrVar.getSymName()); - argsStructInit = argsInitBuilder.create( - loc, argsStructInit, argStrVarAddr, idx++); - } else if (auto intArgAttr = mlir::dyn_cast(arg)) { - auto intArgFld = argsInitBuilder.create( - loc, intArgAttr.getType(), intArgAttr.getValue()); - argsStructInit = argsInitBuilder.create( - loc, argsStructInit, intArgFld, idx++); - } else { - llvm_unreachable("Unsupported annotation arg type"); - } - } - argsInitBuilder.create(loc, argsStructInit); - argsVarMap[argsAttr] = argsGlobalOp; - } + mlir::LLVM::GlobalOp annotArgsVar = + getOrCreateAnnotationArgsVar(localLoc, module, globalVarBuilder, + argStringGlobalsMap, argsVarMap, argsAttr); auto argsVarView = varInitBuilder.create( - loc, annoPtrTy, argsVarMap[argsAttr].getSymName()); - valueEntry = varInitBuilder.create( - loc, valueEntry, argsVarView, 4); + localLoc, annoPtrTy, annotArgsVar.getSymName()); + outVals.push_back(argsVarView->getResult(0)); } - return valueEntry; } } // namespace @@ -1276,6 +1275,43 @@ class CIRAllocaLowering : OpConversionPattern(typeConverter, context), dataLayout(dataLayout) {} + void buildAllocaAnnotations(mlir::LLVM::AllocaOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::ArrayAttr annotationValuesArray) const { + mlir::ModuleOp module = op->getParentOfType(); + mlir::OpBuilder globalVarBuilder(module.getContext()); + + mlir::OpBuilder::InsertPoint afterAlloca = rewriter.saveInsertionPoint(); + globalVarBuilder.setInsertionPointToEnd(&module.getBodyRegion().front()); + + mlir::Location loc = op.getLoc(); + mlir::OpBuilder varInitBuilder(module.getContext()); + varInitBuilder.restoreInsertionPoint(afterAlloca); + + // Track globals created for annotation related strings + llvm::StringMap stringGlobalsMap; + // Track globals created for annotation arg related strings. + // They are different from annotation strings, as strings used in args + // are not in llvmMetadataSectionName, and also has aligment 1. + llvm::StringMap argStringGlobalsMap; + // Track globals created for annotation args. + llvm::MapVector argsVarMap; + + auto intrinRetTy = mlir::LLVM::LLVMVoidType::get(getContext()); + constexpr const char *intrinNameAttr = "llvm.var.annotation.p0.p0"; + for (mlir::Attribute entry : annotationValuesArray) { + SmallVector intrinsicArgs; + intrinsicArgs.push_back(op.getRes()); + auto annot = cast(entry); + lowerAnnotationValue(loc, loc, annot, module, varInitBuilder, + globalVarBuilder, stringGlobalsMap, + argStringGlobalsMap, argsVarMap, intrinsicArgs); + rewriter.create( + loc, intrinRetTy, mlir::StringAttr::get(getContext(), intrinNameAttr), + intrinsicArgs); + } + } + mlir::LogicalResult matchAndRewrite(mlir::cir::AllocaOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { @@ -1303,8 +1339,18 @@ class CIRAllocaLowering << dlAllocaAS; } } - rewriter.replaceOpWithNewOp( + + // If there are annotations available, copy them out before we destroy the + // original cir.alloca. + mlir::ArrayAttr annotations; + if (op.getAnnotations()) + annotations = op.getAnnotationsAttr(); + + auto llvmAlloca = rewriter.replaceOpWithNewOp( op, resultTy, elementTy, size, op.getAlignmentAttr().getInt()); + + if (annotations && !annotations.empty()) + buildAllocaAnnotations(llvmAlloca, adaptor, rewriter, annotations); return mlir::success(); } }; @@ -4337,9 +4383,9 @@ void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar() { mlir::LLVM::LLVMArrayType annoStructArrayTy = mlir::LLVM::LLVMArrayType::get(annoStructTy, annotationValuesArray.size()); - mlir::Location loc = module.getLoc(); + mlir::Location moduleLoc = module.getLoc(); auto annotationGlobalOp = globalVarBuilder.create( - loc, annoStructArrayTy, false, mlir::LLVM::Linkage::Appending, + moduleLoc, annoStructArrayTy, false, mlir::LLVM::Linkage::Appending, "llvm.global.annotations", mlir::Attribute()); annotationGlobalOp.setSection(llvmMetadataSectionName); annotationGlobalOp.getRegion().push_back(new mlir::Block()); @@ -4351,8 +4397,8 @@ void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar() { // This is consistent with clang code gen. globalVarBuilder.setInsertionPoint(annotationGlobalOp); - mlir::Value result = - varInitBuilder.create(loc, annoStructArrayTy); + mlir::Value result = varInitBuilder.create( + moduleLoc, annoStructArrayTy); // Track globals created for annotation related strings llvm::StringMap stringGlobalsMap; // Track globals created for annotation arg related strings. @@ -4365,14 +4411,32 @@ void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar() { int idx = 0; for (mlir::Attribute entry : annotationValuesArray) { auto annotValue = cast(entry); - mlir::Value init = lowerAnnotationValue( - annotValue, module, varInitBuilder, globalVarBuilder, - stringGlobalsMap, argStringGlobalsMap, argsVarMap, annoStructFields, - annoStructTy, annoPtrTy, loc); - result = varInitBuilder.create(loc, result, - init, idx++); + mlir::Value valueEntry = + varInitBuilder.create(moduleLoc, annoStructTy); + SmallVector vals; + + auto globalValueName = mlir::cast(annotValue[0]); + mlir::Operation *globalValue = + mlir::SymbolTable::lookupSymbolIn(module, globalValueName); + // The first field is ptr to the global value + auto globalValueFld = varInitBuilder.create( + moduleLoc, annoPtrTy, globalValueName); + vals.push_back(globalValueFld->getResult(0)); + + mlir::cir::AnnotationAttr annot = + mlir::cast(annotValue[1]); + lowerAnnotationValue(moduleLoc, globalValue->getLoc(), annot, module, + varInitBuilder, globalVarBuilder, stringGlobalsMap, + argStringGlobalsMap, argsVarMap, vals); + for (unsigned valIdx = 0, endIdx = vals.size(); valIdx != endIdx; + ++valIdx) { + valueEntry = varInitBuilder.create( + moduleLoc, valueEntry, vals[valIdx], valIdx); + } + result = varInitBuilder.create( + moduleLoc, result, valueEntry, idx++); } - varInitBuilder.create(loc, result); + varInitBuilder.create(moduleLoc, result); } } diff --git a/clang/test/CIR/CodeGen/annotations-var.c b/clang/test/CIR/CodeGen/annotations-var.c index cc617672dec0..931248c1a18d 100644 --- a/clang/test/CIR/CodeGen/annotations-var.c +++ b/clang/test/CIR/CodeGen/annotations-var.c @@ -1,8 +1,19 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +// LLVM-DAG: @.str.annotation = private unnamed_addr constant [15 x i8] c"localvar_ann_0\00", section "llvm.metadata" +// LLVM-DAG: @.str.1.annotation = private unnamed_addr constant [{{.*}} x i8] c"{{.*}}annotations-var.c\00", section "llvm.metadata" +// LLVM-DAG: @.str.2.annotation = private unnamed_addr constant [15 x i8] c"localvar_ann_1\00", section "llvm.metadata" void local(void) { int localvar __attribute__((annotate("localvar_ann_0"))) __attribute__((annotate("localvar_ann_1"))) = 3; // CIR-LABEL: @local // CIR: %0 = cir.alloca !s32i, !cir.ptr, ["localvar", init] [#cir.annotation, #cir.annotation] -} \ No newline at end of file + +// LLVM-LABEL: @local +// LLVM: %[[ALLOC:.*]] = alloca i32 +// LLVM: call void @llvm.var.annotation.p0.p0(ptr %[[ALLOC]], ptr @.str.annotation, ptr @.str.1.annotation, i32 11, ptr null) +// LLVM: call void @llvm.var.annotation.p0.p0(ptr %[[ALLOC]], ptr @.str.2.annotation, ptr @.str.1.annotation, i32 11, ptr null) +} From 783f05431da45d4a5a19b5c5ba6d780735a0064c Mon Sep 17 00:00:00 2001 From: Vinicius Couto Espindola <34522047+sitio-couto@users.noreply.github.com> Date: Thu, 10 Oct 2024 21:41:49 -0300 Subject: [PATCH 1930/2301] Reapply and patch "[CIR][ABI] Apply CC lowering pass by default (#842)" (#944) This reverts commit 8f699fd and fixes some issues, namely: - CC lowering pass will no longer fail if the function has no AST information that won't be used. - Fixed CC lowering not disabling when running certain `cc1` compilation commands. - CC lowering can now be disabled when calling `cir-opt` and `cir-translate`. - Compilation commands that generate Object files should now invoke CC lowering by default. --- clang/include/clang/CIR/Dialect/Passes.h | 2 +- clang/include/clang/CIR/LowerToLLVM.h | 7 +- clang/include/clang/CIR/MissingFeatures.h | 41 +++++++ clang/include/clang/CIR/Passes.h | 2 +- clang/include/clang/Driver/Options.td | 11 +- .../include/clang/Frontend/FrontendOptions.h | 2 +- clang/lib/CIR/CodeGen/CIRPasses.cpp | 11 +- .../Dialect/Transforms/CallConvLowering.cpp | 11 +- .../Transforms/TargetLowering/ABIInfo.cpp | 2 +- .../Transforms/TargetLowering/ABIInfoImpl.cpp | 7 +- .../TargetLowering/CIRLowerContext.cpp | 19 ++-- .../TargetLowering/CIRRecordLayout.cpp | 7 +- .../TargetLowering/CIRToCIRArgMapping.h | 10 +- .../TargetLowering/ItaniumCXXABI.cpp | 6 +- .../Transforms/TargetLowering/LowerCall.cpp | 36 +++--- .../TargetLowering/LowerFunction.cpp | 103 +++++++++++------- .../TargetLowering/LowerFunctionInfo.h | 11 +- .../Transforms/TargetLowering/LowerModule.cpp | 16 ++- .../Transforms/TargetLowering/LowerModule.h | 2 +- .../Transforms/TargetLowering/LowerTypes.cpp | 14 ++- .../TargetLowering/RecordLayoutBuilder.cpp | 73 +++++++------ .../TargetLowering/Targets/AArch64.cpp | 8 +- .../Targets/LoweringPrepareAArch64CXXABI.cpp | 55 +++++----- .../Targets/LoweringPrepareItaniumCXXABI.cpp | 14 +-- .../TargetLowering/Targets/SPIR.cpp | 2 +- .../Transforms/TargetLowering/Targets/X86.cpp | 48 ++++---- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 26 +++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 8 +- clang/lib/Frontend/CompilerInvocation.cpp | 2 +- clang/test/CIR/CodeGen/AArch64/neon-arith.c | 4 +- clang/test/CIR/CodeGen/AArch64/neon-ldst.c | 4 +- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 4 +- clang/test/CIR/CodeGen/AArch64/neon.c | 4 +- .../CIR/CodeGen/OpenCL/addrspace-alloca.cl | 4 +- clang/test/CIR/CodeGen/OpenCL/array-decay.cl | 4 +- .../CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl | 16 +-- clang/test/CIR/CodeGen/OpenCL/convergent.cl | 4 +- clang/test/CIR/CodeGen/OpenCL/global.cl | 4 +- .../OpenCL/kernel-arg-info-single-as.cl | 4 +- .../CIR/CodeGen/OpenCL/kernel-arg-info.cl | 8 +- .../CIR/CodeGen/OpenCL/kernel-arg-metadata.cl | 4 +- .../CIR/CodeGen/OpenCL/kernel-attributes.cl | 4 +- .../CIR/CodeGen/OpenCL/kernel-unit-attr.cl | 2 +- clang/test/CIR/CodeGen/OpenCL/nothrow.cl | 4 +- .../test/CIR/CodeGen/OpenCL/opencl-c-lang.cl | 2 +- .../test/CIR/CodeGen/OpenCL/opencl-version.cl | 8 +- .../CIR/CodeGen/OpenCL/spir-calling-conv.cl | 4 +- clang/test/CIR/CodeGen/OpenCL/spirv-target.cl | 4 +- .../test/CIR/CodeGen/OpenCL/static-vardecl.cl | 4 +- clang/test/CIR/CodeGen/abstract-cond.c | 4 +- clang/test/CIR/CodeGen/attributes.c | 4 +- clang/test/CIR/CodeGen/builtin-bit-cast.cpp | 4 +- clang/test/CIR/CodeGen/complex-arithmetic.c | 36 +++--- clang/test/CIR/CodeGen/compound-literal.c | 4 +- .../test/CIR/CodeGen/conditional-cleanup.cpp | 2 +- clang/test/CIR/CodeGen/dynamic-cast-exact.cpp | 4 +- clang/test/CIR/CodeGen/eh.cpp | 2 +- clang/test/CIR/CodeGen/global-new.cpp | 8 +- clang/test/CIR/CodeGen/goto.cpp | 2 +- clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp | 4 +- .../CIR/CodeGen/initlist-ptr-unsigned.cpp | 4 +- clang/test/CIR/CodeGen/multi-vtable.cpp | 4 +- clang/test/CIR/CodeGen/temporaries.cpp | 6 +- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 6 +- clang/test/CIR/CodeGen/var-arg-float.c | 4 +- clang/test/CIR/CodeGen/var-arg-scope.c | 6 +- clang/test/CIR/CodeGen/var-arg.c | 4 +- clang/test/CIR/CodeGen/visibility-attribute.c | 4 +- clang/test/CIR/Lowering/OpenMP/barrier.cir | 2 +- clang/test/CIR/Lowering/OpenMP/parallel.cir | 2 +- clang/test/CIR/Lowering/OpenMP/taskwait.cir | 2 +- clang/test/CIR/Lowering/OpenMP/taskyield.cir | 2 +- clang/test/CIR/Lowering/address-space.cir | 2 +- clang/test/CIR/Lowering/array.cir | 2 +- clang/test/CIR/Lowering/binop-fp.cir | 2 +- clang/test/CIR/Lowering/binop-overflow.cir | 2 +- .../test/CIR/Lowering/binop-unsigned-int.cir | 2 +- clang/test/CIR/Lowering/bitint.cir | 2 +- clang/test/CIR/Lowering/bool-to-int.cir | 2 +- clang/test/CIR/Lowering/bool.cir | 2 +- clang/test/CIR/Lowering/branch.cir | 2 +- clang/test/CIR/Lowering/brcond.cir | 2 +- clang/test/CIR/Lowering/bswap.cir | 2 +- clang/test/CIR/Lowering/call-op-call-conv.cir | 2 +- clang/test/CIR/Lowering/call.cir | 2 +- clang/test/CIR/Lowering/cmp3way.cir | 2 +- clang/test/CIR/Lowering/complex.cir | 2 +- clang/test/CIR/Lowering/const-array.cir | 2 +- clang/test/CIR/Lowering/data-member.cir | 2 +- clang/test/CIR/Lowering/exceptions.cir | 2 +- clang/test/CIR/Lowering/expect.cir | 2 +- clang/test/CIR/Lowering/func-call-conv.cir | 2 +- clang/test/CIR/Lowering/globals.cir | 2 +- clang/test/CIR/Lowering/if.cir | 2 +- clang/test/CIR/Lowering/int-wrap.cir | 2 +- clang/test/CIR/Lowering/intrinsics.cir | 2 +- clang/test/CIR/Lowering/ptrdiff.cir | 2 +- clang/test/CIR/Lowering/region-simplify.cir | 2 +- clang/test/CIR/Lowering/scope.cir | 2 +- clang/test/CIR/Lowering/select.cir | 2 +- clang/test/CIR/Lowering/try-catch.cpp | 2 +- clang/test/CIR/Lowering/unary-inc-dec.cir | 2 +- clang/test/CIR/Lowering/unary-not.cir | 2 +- .../aarch64-call-conv-lowering-pass.cpp | 2 +- .../x86_64/x86_64-call-conv-lowering-pass.cpp | 2 +- clang/test/CodeGen/compound-literal.c | 4 +- clang/tools/cir-opt/cir-opt.cpp | 18 ++- clang/tools/cir-translate/cir-translate.cpp | 23 ++-- 108 files changed, 498 insertions(+), 385 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index 5f41da6e411c..41086e36748e 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -43,7 +43,7 @@ std::unique_ptr createGotoSolverPass(); /// Create a pass to lower ABI-independent function definitions/calls. std::unique_ptr createCallConvLoweringPass(); -void populateCIRPreLoweringPasses(mlir::OpPassManager &pm); +void populateCIRPreLoweringPasses(mlir::OpPassManager &pm, bool useCCLowering); //===----------------------------------------------------------------------===// // Registration diff --git a/clang/include/clang/CIR/LowerToLLVM.h b/clang/include/clang/CIR/LowerToLLVM.h index 88713bf6e07f..325cbf3afd5d 100644 --- a/clang/include/clang/CIR/LowerToLLVM.h +++ b/clang/include/clang/CIR/LowerToLLVM.h @@ -29,10 +29,9 @@ class ModuleOp; namespace cir { namespace direct { -std::unique_ptr -lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, - llvm::LLVMContext &llvmCtx, - bool disableVerifier = false); +std::unique_ptr lowerDirectlyFromCIRToLLVMIR( + mlir::ModuleOp theModule, llvm::LLVMContext &llvmCtx, + bool disableVerifier = false, bool disableCCLowering = false); } // Lower directly from pristine CIR to LLVMIR. diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 1be7d5f63183..c75edc6d94d6 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -15,6 +15,27 @@ #ifndef CLANG_CIR_MISSINGFEATURES_H #define CLANG_CIR_MISSINGFEATURES_H +constexpr bool cirMissingFeatureAssertionMode = + true; // Change to `false` to use llvm_unreachable + +#define NOTE \ + " Target lowering is now required. Disable it with " \ + "-fno-clangir-call-conv-lowering." + +// Special assertion to be used in the target lowering library. +#define cir_tl_assert(cond) assert((cond) && NOTE); + +// Some assertions knowingly generate incorrect code. This macro allows us to +// switch between using `assert` and `llvm_unreachable` for these cases. +#define cir_assert_or_abort(cond, msg) \ + do { \ + if (cirMissingFeatureAssertionMode) { \ + assert((cond) && msg NOTE); \ + } else { \ + llvm_unreachable(msg NOTE); \ + } \ + } while (0) + namespace cir { struct MissingFeatures { @@ -212,6 +233,26 @@ struct MissingFeatures { //===--- ABI lowering --===// + static bool SPIRVABI() { return false; } + + static bool AArch64TypeClassification() { return false; } + + static bool X86ArgTypeClassification() { return false; } + static bool X86DefaultABITypeConvertion() { return false; } + static bool X86GetFPTypeAtOffset() { return false; } + static bool X86RetTypeClassification() { return false; } + static bool X86TypeClassification() { return false; } + + static bool ABIClangTypeKind() { return false; } + static bool ABIEnterStructForCoercedAccess() { return false; } + static bool ABIFuncPtr() { return false; } + static bool ABIInRegAttribute() { return false; } + static bool ABINestedRecordLayout() { return false; } + static bool ABINoProtoFunctions() { return false; } + static bool ABIParameterCoercion() { return false; } + static bool ABIPointerParameterAttrs() { return false; } + static bool ABITransparentUnionHandling() { return false; } + //-- Missing AST queries static bool CXXRecordDeclIsEmptyCXX11() { return false; } diff --git a/clang/include/clang/CIR/Passes.h b/clang/include/clang/CIR/Passes.h index 6b1d2fdc75c4..3f8a174aac0c 100644 --- a/clang/include/clang/CIR/Passes.h +++ b/clang/include/clang/CIR/Passes.h @@ -30,7 +30,7 @@ namespace direct { std::unique_ptr createConvertCIRToLLVMPass(); /// Adds passes that fully lower CIR to the LLVMIR dialect. -void populateCIRToLLVMPasses(mlir::OpPassManager &pm); +void populateCIRToLLVMPasses(mlir::OpPassManager &pm, bool useCCLowering); } // namespace direct } // end namespace cir diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index e5010ef3a066..ff05be57a99f 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3079,10 +3079,6 @@ def fclangir_lib_opt : Flag<["-"], "fclangir-lib-opt">, Visibility<[ClangOption, CC1Option]>, Group, Alias, HelpText<"Enable C/C++ library based optimizations">; -def fclangir_call_conv_lowering : Flag<["-"], "fclangir-call-conv-lowering">, - Visibility<[ClangOption, CC1Option]>, Group, - HelpText<"Enable ClangIR calling convention lowering">, - MarshallingInfoFlag>; def fclangir_mem2reg : Flag<["-"], "fclangir-mem2reg">, Visibility<[ClangOption, CC1Option]>, Group, HelpText<"Enable mem2reg on the flat ClangIR">, @@ -3113,6 +3109,13 @@ defm clangir_analysis_only : BoolFOption<"clangir-analysis-only", PosFlag, NegFlag>; +// FIXME(cir): Remove this option once all pre-existing tests are compatible with +// the calling convention lowering pass. +defm clangir_call_conv_lowering : BoolFOption<"clangir-call-conv-lowering", + FrontendOpts<"ClangIRCallConvLowering">, DefaultTrue, + PosFlag, + NegFlag, + BothFlags<[], [ClangOption, CC1Option], "">>; def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, Group, HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index b9e4d09df222..64664f41c879 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -449,7 +449,7 @@ class FrontendOptions { unsigned ClangIRLibOpt : 1; // Enable Clang IR call conv lowering pass. - unsigned ClangIREnableCallConvLowering : 1; + unsigned ClangIRCallConvLowering : 1; // Enable Clang IR mem2reg pass on the flat CIR. unsigned ClangIREnableMem2Reg : 1; diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index a84acc0d6322..65b43cfc6ffd 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -75,13 +75,8 @@ mlir::LogicalResult runCIRToCIRPasses( pm.addPass(mlir::createLoweringPreparePass(&astCtx)); - // FIXME(cir): This pass should run by default, but it is lacking support for - // several code bits. Once it's more mature, we should fix this. - if (enableCallConvLowering) - pm.addPass(mlir::createCallConvLoweringPass()); - if (flattenCIR || enableMem2Reg) - mlir::populateCIRPreLoweringPasses(pm); + mlir::populateCIRPreLoweringPasses(pm, enableCallConvLowering); if (enableMem2Reg) pm.addPass(mlir::createMem2Reg()); @@ -101,7 +96,9 @@ mlir::LogicalResult runCIRToCIRPasses( namespace mlir { -void populateCIRPreLoweringPasses(OpPassManager &pm) { +void populateCIRPreLoweringPasses(OpPassManager &pm, bool useCCLowering) { + if (useCCLowering) + pm.addPass(createCallConvLoweringPass()); pm.addPass(createHoistAllocasPass()); pm.addPass(createFlattenCFGPass()); pm.addPass(createGotoSolverPass()); diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 85e74eab724a..7026f046ce97 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// - #include "TargetLowering/LowerModule.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/IR/BuiltinOps.h" @@ -14,6 +13,7 @@ #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/MissingFeatures.h" #define GEN_PASS_DEF_CALLCONVLOWERING #include "clang/CIR/Dialect/Passes.h.inc" @@ -36,9 +36,6 @@ struct CallConvLoweringPattern : public OpRewritePattern { const auto module = op->getParentOfType(); - if (!op.getAst()) - return op.emitError("function has no AST information"); - auto modOp = op->getParentOfType(); std::unique_ptr lowerModule = createLowerModule(modOp, rewriter); @@ -48,6 +45,12 @@ struct CallConvLoweringPattern : public OpRewritePattern { auto calls = op.getSymbolUses(module); if (calls.has_value()) { for (auto call : calls.value()) { + // FIXME(cir): Function pointers are ignored. + if (isa(call.getUser())) { + cir_assert_or_abort(!::cir::MissingFeatures::ABIFuncPtr(), "NYI"); + continue; + } + auto callOp = cast(call.getUser()); if (lowerModule->rewriteFunctionCall(callOp, op).failed()) return failure(); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index 4e2a81de9fc1..6cb69c7eeb88 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -37,7 +37,7 @@ bool ABIInfo::isPromotableIntegerTypeForABI(Type Ty) const { if (getContext().isPromotableIntegerType(Ty)) return true; - assert(!::cir::MissingFeatures::fixedWidthIntegers()); + cir_tl_assert(!::cir::MissingFeatures::fixedWidthIntegers()); return false; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index 041c801dbe2e..38f9fb8ffaa4 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -26,21 +26,22 @@ bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, Type Ty = FI.getReturnType(); if (const auto RT = dyn_cast(Ty)) { - assert(!::cir::MissingFeatures::isCXXRecordDecl()); + cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl()); } return CXXABI.classifyReturnType(FI); } bool isAggregateTypeForABI(Type T) { - assert(!::cir::MissingFeatures::functionMemberPointerType()); + cir_tl_assert(!::cir::MissingFeatures::functionMemberPointerType()); return !LowerFunction::hasScalarEvaluationKind(T); } Type useFirstFieldIfTransparentUnion(Type Ty) { if (auto RT = dyn_cast(Ty)) { if (RT.isUnion()) - llvm_unreachable("NYI"); + cir_assert_or_abort( + !::cir::MissingFeatures::ABITransparentUnionHandling(), "NYI"); } return Ty; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index 42aae0a80d04..5b5eb7602ffa 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -55,7 +55,10 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { } else if (isa(T)) { typeKind = clang::Type::Record; } else { - llvm_unreachable("Unhandled type class"); + cir_assert_or_abort(!::cir::MissingFeatures::ABIClangTypeKind(), + "Unhandled type class"); + // FIXME(cir): Completely wrong. Just here to make it non-blocking. + typeKind = clang::Type::Builtin; } // FIXME(cir): Here we fetch the width and alignment of a type considering the @@ -96,10 +99,10 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { } case clang::Type::Record: { const auto RT = dyn_cast(T); - assert(!::cir::MissingFeatures::tagTypeClassAbstraction()); + cir_tl_assert(!::cir::MissingFeatures::tagTypeClassAbstraction()); // Only handle TagTypes (names types) for now. - assert(RT.getName() && "Anonymous record is NYI"); + cir_tl_assert(RT.getName() && "Anonymous record is NYI"); // NOTE(cir): Clang does some hanlding of invalid tagged declarations here. // Not sure if this is necessary in CIR. @@ -111,14 +114,14 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { const CIRRecordLayout &Layout = getCIRRecordLayout(RT); Width = toBits(Layout.getSize()); Align = toBits(Layout.getAlignment()); - assert(!::cir::MissingFeatures::recordDeclHasAlignmentAttr()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclHasAlignmentAttr()); break; } default: llvm_unreachable("Unhandled type class"); } - assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); + cir_tl_assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); return clang::TypeInfo(Width, Align, AlignRequirement); } @@ -126,7 +129,7 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { Type Ty; // NOTE(cir): Clang does more stuff here. Not sure if we need to do the same. - assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); switch (K) { case clang::BuiltinType::Char_S: Ty = IntType::get(getMLIRContext(), 8, true); @@ -141,8 +144,8 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { void CIRLowerContext::initBuiltinTypes(const clang::TargetInfo &Target, const clang::TargetInfo *AuxTarget) { - assert((!this->Target || this->Target == &Target) && - "Incorrect target reinitialization"); + cir_tl_assert((!this->Target || this->Target == &Target) && + "Incorrect target reinitialization"); this->Target = &Target; this->AuxTarget = AuxTarget; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp index 2744f67d19de..9bec714e6376 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp @@ -38,16 +38,17 @@ CIRRecordLayout::CIRRecordLayout( FieldOffsets.insert(FieldOffsets.end(), fieldoffsets.begin(), fieldoffsets.end()); - assert(!PrimaryBase && "Layout for class with inheritance is NYI"); + cir_tl_assert(!PrimaryBase && "Layout for class with inheritance is NYI"); // CXXInfo->PrimaryBase.setPointer(PrimaryBase); - assert(!IsPrimaryBaseVirtual && "Layout for virtual base class is NYI"); + cir_tl_assert(!IsPrimaryBaseVirtual && + "Layout for virtual base class is NYI"); // CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual); CXXInfo->NonVirtualSize = nonvirtualsize; CXXInfo->NonVirtualAlignment = nonvirtualalignment; CXXInfo->PreferredNVAlignment = preferrednvalignment; CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject; // FIXME(cir): Initialize base classes offsets. - assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); CXXInfo->HasOwnVFPtr = hasOwnVFPtr; CXXInfo->VBPtrOffset = vbptroffset; CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index dd09122b94d9..664fd05ea658 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -58,7 +58,7 @@ class CIRToCIRArgMapping { unsigned totalIRArgs() const { return TotalIRArgs; } bool hasPaddingArg(unsigned ArgNo) const { - assert(ArgNo < ArgInfo.size()); + cir_tl_assert(ArgNo < ArgInfo.size()); return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; } @@ -77,7 +77,7 @@ class CIRToCIRArgMapping { onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); for (LowerFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; ++I, ++ArgNo) { - assert(I != FI.arg_end()); + cir_tl_assert(I != FI.arg_end()); // Type ArgType = I->type; const ::cir::ABIArgInfo &AI = I->info; // Collect data about IR arguments corresponding to Clang argument ArgNo. @@ -91,7 +91,7 @@ class CIRToCIRArgMapping { case ::cir::ABIArgInfo::Extend: case ::cir::ABIArgInfo::Direct: { // FIXME(cir): handle sseregparm someday... - assert(AI.getCoerceToType() && "Missing coerced type!!"); + cir_tl_assert(AI.getCoerceToType() && "Missing coerced type!!"); StructType STy = dyn_cast(AI.getCoerceToType()); if (AI.isDirect() && AI.getCanBeFlattened() && STy) { llvm_unreachable("NYI"); @@ -114,7 +114,7 @@ class CIRToCIRArgMapping { if (IRArgNo == 1 && SwapThisWithSRet) IRArgNo++; } - assert(ArgNo == ArgInfo.size()); + cir_tl_assert(ArgNo == ArgInfo.size()); if (::cir::MissingFeatures::inallocaArgs()) { llvm_unreachable("NYI"); @@ -126,7 +126,7 @@ class CIRToCIRArgMapping { /// Returns index of first IR argument corresponding to ArgNo, and their /// quantity. std::pair getIRArgs(unsigned ArgNo) const { - assert(ArgNo < ArgInfo.size()); + cir_tl_assert(ArgNo < ArgInfo.size()); return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, ArgInfo[ArgNo].NumberOfArgs); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index c0add1ecc1df..3cd27c35cf55 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -46,9 +46,9 @@ class ItaniumCXXABI : public CIRCXXABI { // FIXME(cir): This expects a CXXRecordDecl! Not any record type. RecordArgABI getRecordArgABI(const StructType RD) const override { - assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); // If C++ prohibits us from making a copy, pass by address. - assert(!::cir::MissingFeatures::recordDeclCanPassInRegisters()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclCanPassInRegisters()); return RAA_Default; } }; @@ -76,7 +76,7 @@ CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { case clang::TargetCXXABI::AppleARM64: // TODO: this isn't quite right, clang uses AppleARM64CXXABI which inherits // from ARMCXXABI. We'll have to follow suit. - assert(!::cir::MissingFeatures::appleArm64CXXABI()); + cir_tl_assert(!::cir::MissingFeatures::appleArm64CXXABI()); return new ItaniumCXXABI(LM, /*UseARMMethodPtrABI=*/true, /*UseARMGuardVarABI=*/true); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index 42de07ec6965..af036efef8cc 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -23,9 +23,9 @@ const LowerFunctionInfo & arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, const OperandRange &args, const FuncType fnType, unsigned numExtraRequiredArgs, bool chainCall) { - assert(args.size() >= numExtraRequiredArgs); + cir_tl_assert(args.size() >= numExtraRequiredArgs); - assert(!::cir::MissingFeatures::extParamInfo()); + cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); // In most cases, there are no optional arguments. RequiredArgs required = RequiredArgs::All; @@ -35,7 +35,7 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, // FIXME(cir): Properly check if function is no-proto. if (/*IsPrototypedFunction=*/true) { if (fnType.isVarArg()) - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::isVarArg(), "NYI"); if (::cir::MissingFeatures::extParamInfo()) llvm_unreachable("NYI"); @@ -45,7 +45,7 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, // its skipped here since it requires CodeGen info. Maybe this information // could be embbed in the FuncOp during CIRGen. - assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); + cir_tl_assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; return LT.arrangeLLVMFunctionInfo(fnType.getReturnType(), opts, fnType.getInputs(), required); @@ -60,7 +60,7 @@ static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { return; } - assert(MissingFeatures::extParamInfo()); + cir_tl_assert(MissingFeatures::extParamInfo()); llvm_unreachable("NYI"); } @@ -74,11 +74,11 @@ static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { static const LowerFunctionInfo & arrangeCIRFunctionInfo(LowerTypes &CGT, bool instanceMethod, SmallVectorImpl &prefix, FuncType fnTy) { - assert(!MissingFeatures::extParamInfo()); + cir_tl_assert(!MissingFeatures::extParamInfo()); RequiredArgs Required = RequiredArgs::forPrototypePlus(fnTy, prefix.size()); // FIXME: Kill copy. appendParameterTypes(prefix, fnTy); - assert(!MissingFeatures::qualifiedTypes()); + cir_tl_assert(!MissingFeatures::qualifiedTypes()); Type resultType = fnTy.getReturnType(); FnInfoOpts opts = @@ -110,7 +110,7 @@ void LowerModule::constructAttributeList(StringRef Name, // TODO(cir): Implement AddAttributesFromFunctionProtoType here. // TODO(cir): Implement AddAttributesFromOMPAssumes here. - assert(!MissingFeatures::openMP()); + cir_tl_assert(!MissingFeatures::openMP()); // TODO(cir): Skipping a bunch of AST queries here. We will need to partially // implement some of them as this section sets target-specific attributes @@ -147,8 +147,8 @@ void LowerModule::constructAttributeList(StringRef Name, [[fallthrough]]; case ABIArgInfo::Direct: if (RetAI.getInReg()) - llvm_unreachable("InReg attribute is NYI"); - assert(!::cir::MissingFeatures::noFPClass()); + cir_assert_or_abort(!::cir::MissingFeatures::ABIInRegAttribute(), "NYI"); + cir_tl_assert(!::cir::MissingFeatures::noFPClass()); break; case ABIArgInfo::Ignore: break; @@ -216,7 +216,7 @@ void LowerModule::constructAttributeList(StringRef Name, else if (AI.getInReg()) llvm_unreachable("InReg attribute is NYI"); // Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); - assert(!::cir::MissingFeatures::noFPClass()); + cir_tl_assert(!::cir::MissingFeatures::noFPClass()); break; default: llvm_unreachable("Missing ABIArgInfo::Kind"); @@ -227,7 +227,7 @@ void LowerModule::constructAttributeList(StringRef Name, } // TODO(cir): Missing some swift and nocapture stuff here. - assert(!::cir::MissingFeatures::extParamInfo()); + cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); if (!Attrs.empty()) { unsigned FirstIRArg, NumIRArgs; @@ -236,7 +236,7 @@ void LowerModule::constructAttributeList(StringRef Name, newFn.setArgAttrs(FirstIRArg + i, Attrs); } } - assert(ArgNo == FI.arg_size()); + cir_tl_assert(ArgNo == FI.arg_size()); } /// Arrange the argument and result information for the declaration or @@ -245,15 +245,15 @@ const LowerFunctionInfo &LowerTypes::arrangeFunctionDeclaration(FuncOp fnOp) { if (MissingFeatures::funcDeclIsCXXMethodDecl()) llvm_unreachable("NYI"); - assert(!MissingFeatures::qualifiedTypes()); + cir_tl_assert(!MissingFeatures::qualifiedTypes()); FuncType FTy = fnOp.getFunctionType(); - assert(!MissingFeatures::CUDA()); + cir_tl_assert(!MissingFeatures::CUDA()); // When declaring a function without a prototype, always use a // non-variadic type. if (fnOp.getNoProto()) { - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::ABINoProtoFunctions(), "NYI"); } return arrangeFreeFunctionType(FTy); @@ -300,12 +300,12 @@ const LowerFunctionInfo & LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, ArrayRef argTypes, RequiredArgs required) { - assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); LowerFunctionInfo *FI = nullptr; // FIXME(cir): Allow user-defined CCs (e.g. __attribute__((vectorcall))). - assert(!::cir::MissingFeatures::extParamInfo()); + cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); unsigned CC = clangCallConvToLLVMCallConv(clang::CallingConv::CC_C); // Construct the function info. We co-allocate the ArgInfos. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 9e90c44a7d76..6dbefd138002 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -63,7 +63,10 @@ Value enterStructPointerForCoercedAccess(Value SrcPtr, StructType SrcSTy, FirstEltSize < CGF.LM.getDataLayout().getTypeStoreSize(SrcSTy)) return SrcPtr; - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::ABIEnterStructForCoercedAccess(), + "NYI"); + return SrcPtr; // FIXME: This is a temporary workaround for the assertion + // above. } /// Create a store to \param Dst from \param Src where the source and @@ -80,13 +83,13 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, } // FIXME(cir): We need a better way to handle datalayout queries. - assert(isa(SrcTy)); + cir_tl_assert(isa(SrcTy)); llvm::TypeSize SrcSize = CGF.LM.getDataLayout().getTypeAllocSize(SrcTy); if (StructType DstSTy = dyn_cast(DstTy)) { Dst = enterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize.getFixedValue(), CGF); - assert(isa(Dst.getType())); + cir_tl_assert(isa(Dst.getType())); DstTy = cast(Dst.getType()).getPointee(); } @@ -107,7 +110,7 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, llvm::TypeSize DstSize = CGF.LM.getDataLayout().getTypeAllocSize(DstTy); // If store is legal, just bitcast the src pointer. - assert(!::cir::MissingFeatures::vectorType()); + cir_tl_assert(!::cir::MissingFeatures::vectorType()); if (SrcSize.getFixedValue() <= DstSize.getFixedValue()) { // Dst = Dst.withElementType(SrcTy); CGF.buildAggregateStore(Src, Dst, DstIsVolatile); @@ -257,14 +260,14 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // are dealt with in CIRGen. CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), FI); - assert(Fn.getNumArguments() == IRFunctionArgs.totalIRArgs()); + cir_tl_assert(Fn.getNumArguments() == IRFunctionArgs.totalIRArgs()); // If we're using inalloca, all the memory arguments are GEPs off of the last // parameter, which is a pointer to the complete memory area. - assert(!::cir::MissingFeatures::inallocaArgs()); + cir_tl_assert(!::cir::MissingFeatures::inallocaArgs()); // Name the struct return parameter. - assert(!::cir::MissingFeatures::sretArgs()); + cir_tl_assert(!::cir::MissingFeatures::sretArgs()); // Track if we received the parameter as a pointer (indirect, byval, or // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it @@ -272,11 +275,18 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, SmallVector ArgVals; ArgVals.reserve(Args.size()); + // FIXME(cir): non-blocking workaround for argument types that are not yet + // properly handled by the ABI. + if (cirMissingFeatureAssertionMode && FI.arg_size() != Args.size()) { + cir_tl_assert(::cir::MissingFeatures::ABIParameterCoercion()); + return success(); + } + // Create a pointer value for every parameter declaration. This usually // entails copying one or more LLVM IR arguments into an alloca. Don't push // any cleanups or do anything that might unwind. We do that separately, so // we can push the cleanups in the correct order for the ABI. - assert(FI.arg_size() == Args.size()); + cir_tl_assert(FI.arg_size() == Args.size()); unsigned ArgNo = 0; LowerFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); for (MutableArrayRef::const_iterator i = Args.begin(), @@ -294,7 +304,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, llvm_unreachable("NYI"); else Ty = Arg.getType(); - assert(!::cir::MissingFeatures::evaluationKind()); + cir_tl_assert(!::cir::MissingFeatures::evaluationKind()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -310,14 +320,15 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // http://llvm.org/docs/LangRef.html#paramattrs. if (ArgI.getDirectOffset() == 0 && isa(LTy) && isa(ArgI.getCoerceToType())) { - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::ABIPointerParameterAttrs(), + "NYI"); } // Prepare the argument value. If we have the trivial case, handle it // with no muss and fuss. if (!isa(ArgI.getCoerceToType()) && ArgI.getCoerceToType() == Ty && ArgI.getDirectOffset() == 0) { - assert(NumIRArgs == 1); + cir_tl_assert(NumIRArgs == 1); // LLVM expects swifterror parameters to be used in very restricted // ways. Copy the value into a less-restricted temporary. @@ -344,7 +355,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, break; } - assert(!::cir::MissingFeatures::vectorType()); + cir_tl_assert(!::cir::MissingFeatures::vectorType()); // Allocate original argument to be "uncoerced". // FIXME(cir): We should have a alloca op builder that does not required @@ -366,7 +377,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, llvm_unreachable("NYI"); } else { // Simple case, just do a coerced store of the argument into the alloca. - assert(NumIRArgs == 1); + cir_tl_assert(NumIRArgs == 1); Value AI = Fn.getArgument(FirstIRArg); // TODO(cir): Set argument name in the new function. createCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); @@ -385,7 +396,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // RAUW the original argument alloca with the new one. This assumes that // the argument is used only to be stored in a alloca. Value arg = SrcFn.getArgument(ArgNo); - assert(arg.hasOneUse()); + cir_tl_assert(arg.hasOneUse()); auto *firstStore = *arg.user_begin(); auto argAlloca = cast(firstStore).getAddr(); rewriter.replaceAllUsesWith(argAlloca, Alloca); @@ -471,28 +482,34 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { /// focuses on the ABI-specific details. So a lot of codegen stuff is removed. LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, const LowerFunctionInfo &FnInfo) { - assert(newFn && "generating code for null Function"); + cir_tl_assert(newFn && "generating code for null Function"); auto Args = oldFn.getArguments(); // Emit the ABI-specific function prologue. - assert(newFn.empty() && "Function already has a body"); + cir_tl_assert(newFn.empty() && "Function already has a body"); rewriter.setInsertionPointToEnd(newFn.addEntryBlock()); if (buildFunctionProlog(FnInfo, newFn, oldFn.getArguments()).failed()) return failure(); // Ensure that old ABI-agnostic arguments uses were replaced. const auto hasNoUses = [](Value val) { return val.getUses().empty(); }; - assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && "Missing RAUW?"); + cir_tl_assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && + "Missing RAUW?"); + + // NOTE(cir): While the new function has the ABI-aware parameters, the old + // function still has the function logic. To complete the migration, we have + // to move the old function body to the new function. + + // Backup references to entry blocks. + Block *srcBlock = &oldFn.getBody().front(); + Block *dstBlock = &newFn.getBody().front(); // Migrate function body to new ABI-aware function. - assert(oldFn.getBody().hasOneBlock() && - "Multiple blocks in original function not supported"); + rewriter.inlineRegionBefore(oldFn.getBody(), newFn.getBody(), + newFn.getBody().end()); - // Move old function body to new function. - // FIXME(cir): The merge below is not very good: will not work if SrcFn has - // multiple blocks and it mixes the new and old prologues. - rewriter.mergeBlocks(&oldFn.getBody().front(), &newFn.getBody().front(), - newFn.getArguments()); + // Merge entry blocks to ensure correct branching. + rewriter.mergeBlocks(srcBlock, dstBlock, newFn.getArguments()); // FIXME(cir): What about saving parameters for corotines? Should we do // something about it in this pass? If the change with the calling @@ -511,16 +528,17 @@ void LowerFunction::buildAggregateStore(Value Val, Value Dest, // Function to store a first-class aggregate into memory. We prefer to // store the elements rather than the aggregate to be more friendly to // fast-isel. - assert(mlir::isa(Dest.getType()) && "Storing in a non-pointer!"); + cir_tl_assert(mlir::isa(Dest.getType()) && + "Storing in a non-pointer!"); (void)DestIsVolatile; // Circumvent CIR's type checking. Type pointeeTy = mlir::cast(Dest.getType()).getPointee(); if (Val.getType() != pointeeTy) { // NOTE(cir): We only bitcast and store if the types have the same size. - assert((LM.getDataLayout().getTypeSizeInBits(Val.getType()) == - LM.getDataLayout().getTypeSizeInBits(pointeeTy)) && - "Incompatible types"); + cir_tl_assert((LM.getDataLayout().getTypeSizeInBits(Val.getType()) == + LM.getDataLayout().getTypeSizeInBits(pointeeTy)) && + "Incompatible types"); auto loc = Val.getLoc(); Val = rewriter.create(loc, pointeeTy, CastKind::bitcast, Val); } @@ -552,7 +570,7 @@ LogicalResult LowerFunction::rewriteCallOp(CallOp op, // NOTE(cir): There is no direct way to fetch the function type from the // CallOp, so we fetch it from the source function. This assumes the // function definition has not yet been lowered. - assert(SrcFn && "No source function"); + cir_tl_assert(SrcFn && "No source function"); auto fnType = SrcFn.getFunctionType(); // Rewrite the call operation to abide to the ABI calling convention. @@ -610,10 +628,10 @@ Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, // Chain calls use this same code path to add the invisible chain parameter // to the function type. if (origCallee.getNoProto() || Chain) { - llvm_unreachable("NYI"); + cir_assert_or_abort(::cir::MissingFeatures::ABINoProtoFunctions(), "NYI"); } - assert(!::cir::MissingFeatures::CUDA()); + cir_tl_assert(!::cir::MissingFeatures::CUDA()); // TODO(cir): LLVM IR has the concept of "CallBase", which is a base class // for all types of calls. Perhaps we should have a CIR interface to mimic @@ -665,13 +683,13 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, llvm_unreachable("NYI"); } - assert(!::cir::MissingFeatures::swift()); + cir_tl_assert(!::cir::MissingFeatures::swift()); // NOTE(cir): Skipping lifetime markers here. // Translate all of the arguments as necessary to match the IR lowering. - assert(CallInfo.arg_size() == CallArgs.size() && - "Mismatch between function signature & arguments."); + cir_tl_assert(CallInfo.arg_size() == CallArgs.size() && + "Mismatch between function signature & arguments."); unsigned ArgNo = 0; LowerFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); for (auto I = CallArgs.begin(), E = CallArgs.end(); I != E; @@ -696,7 +714,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!isa(ArgInfo.getCoerceToType()) && ArgInfo.getCoerceToType() == info_it->type && ArgInfo.getDirectOffset() == 0) { - assert(NumIRArgs == 1); + cir_tl_assert(NumIRArgs == 1); Value V; if (!isa(I->getType())) { V = *I; @@ -742,11 +760,11 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, llvm_unreachable("NYI"); } else { // In the simple case, just pass the coerced loaded value. - assert(NumIRArgs == 1); + cir_tl_assert(NumIRArgs == 1); Value Load = createCoercedValue(Src, ArgInfo.getCoerceToType(), *this); // FIXME(cir): We should probably handle CMSE non-secure calls here - assert(!::cir::MissingFeatures::cmseNonSecureCallAttr()); + cir_tl_assert(!::cir::MissingFeatures::cmseNonSecureCallAttr()); // since they are a ARM-specific feature. if (::cir::MissingFeatures::undef()) @@ -771,7 +789,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // debugging stuff here. // Update the largest vector width if any arguments have vector types. - assert(!::cir::MissingFeatures::vectorType()); + cir_tl_assert(!::cir::MissingFeatures::vectorType()); // Compute the calling convention and attributes. @@ -797,7 +815,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, rewriter.getAttr(rewriter.getDictionaryAttr({})); newCallOp->setAttr("extra_attrs", extraAttrs); - assert(!::cir::MissingFeatures::vectorType()); + cir_tl_assert(!::cir::MissingFeatures::vectorType()); // NOTE(cir): Skipping some ObjC, tail-call, debug, and attribute stuff // here. @@ -847,7 +865,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // FIXME(cir): Use return value slot here. Value RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. - assert(!::cir::MissingFeatures::volatileTypes()); + cir_tl_assert(!::cir::MissingFeatures::volatileTypes()); // NOTE(cir): If the function returns, there should always be a valid // return value present. Instead of setting the return value here, we @@ -855,7 +873,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!RetVal) { RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. - assert(::cir::MissingFeatures::volatileTypes()); + cir_tl_assert(::cir::MissingFeatures::volatileTypes()); } // An empty record can overlap other data (if declared with @@ -897,7 +915,8 @@ ::cir::TypeEvaluationKind LowerFunction::getEvaluationKind(Type type) { // FIXME(cir): Implement type classes for CIR types. if (isa(type)) return ::cir::TypeEvaluationKind::TEK_Aggregate; - if (isa(type)) + if (isa(type)) return ::cir::TypeEvaluationKind::TEK_Scalar; llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index c81335c9985a..47687cfa2235 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -35,7 +35,9 @@ class RequiredArgs { enum All_t { All }; RequiredArgs(All_t _) : NumRequired(~0U) {} - explicit RequiredArgs(unsigned n) : NumRequired(n) { assert(n != ~0U); } + explicit RequiredArgs(unsigned n) : NumRequired(n) { + cir_tl_assert(n != ~0U); + } /// Compute the arguments required by the given formal prototype, /// given that there may be some additional, non-formal arguments @@ -47,7 +49,8 @@ class RequiredArgs { if (!prototype.isVarArg()) return All; - llvm_unreachable("Variadic function is NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::variadicFunctions(), "NYI"); + return All; // FIXME(cir): Temporary workaround for the assertion above. } bool allowsOptionalArgs() const { return NumRequired != ~0U; } @@ -105,7 +108,7 @@ class LowerFunctionInfo final ArrayRef argTypes, RequiredArgs required) { // TODO(cir): Add assertions? - assert(!::cir::MissingFeatures::extParamInfo()); + cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); void *buffer = operator new(totalSizeToAlloc(argTypes.size() + 1)); LowerFunctionInfo *FI = new (buffer) LowerFunctionInfo(); @@ -146,7 +149,7 @@ class LowerFunctionInfo final unsigned arg_size() const { return NumArgs; } bool isVariadic() const { - assert(!::cir::MissingFeatures::variadicFunctions()); + cir_tl_assert(!::cir::MissingFeatures::variadicFunctions()); return false; } unsigned getNumRequiredArgs() const { diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 715a5f2470d7..88344533fe38 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -199,10 +199,13 @@ LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { llvm_unreachable("ExtraAttrs are NYI"); } - if (LowerFunction(*this, rewriter, op, newFn) - .generateCode(op, newFn, FI) - .failed()) - return failure(); + // Is a function definition: handle the body. + if (!op.isDeclaration()) { + if (LowerFunction(*this, rewriter, op, newFn) + .generateCode(op, newFn, FI) + .failed()) + return failure(); + } // Erase original ABI-agnostic function. rewriter.eraseOp(op); @@ -225,6 +228,9 @@ LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, FuncOp funcOp) { // TODO: not to create it every time std::unique_ptr createLowerModule(ModuleOp module, PatternRewriter &rewriter) { + assert(module->getAttr(LLVM::LLVMDialect::getDataLayoutAttrName()) && + "Missing data layout attribute"); + // Fetch the LLVM data layout string. auto dataLayoutStr = cast( module->getAttr(LLVM::LLVMDialect::getDataLayoutAttrName())); @@ -239,7 +245,7 @@ std::unique_ptr createLowerModule(ModuleOp module, // FIXME(cir): This just uses the default language options. We need to account // for custom options. // Create context. - assert(!::cir::MissingFeatures::langOpts()); + cir_tl_assert(!::cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; return std::make_unique(langOpts, module, dataLayoutStr, diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index 44cd5a0ae1cb..a7f3e1fa187a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -68,7 +68,7 @@ class LowerModule { // FIXME(cir): This would be in ASTContext, not CodeGenModule. clang::TargetCXXABI::Kind getCXXABIKind() const { auto kind = getTarget().getCXXABI().getKind(); - assert(!::cir::MissingFeatures::langOpts()); + cir_tl_assert(!::cir::MissingFeatures::langOpts()); return kind; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index bdec98a64f43..fa1e34140167 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -60,10 +60,10 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); // Add type for sret argument. - assert(!::cir::MissingFeatures::sretArgs()); + cir_tl_assert(!::cir::MissingFeatures::sretArgs()); // Add type for inalloca argument. - assert(!::cir::MissingFeatures::inallocaArgs()); + cir_tl_assert(!::cir::MissingFeatures::inallocaArgs()); // Add in all of the required arguments. unsigned ArgNo = 0; @@ -72,7 +72,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { for (; it != ie; ++it, ++ArgNo) { const ABIArgInfo &ArgInfo = it->info; - assert(!::cir::MissingFeatures::argumentPadding()); + cir_tl_assert(!::cir::MissingFeatures::argumentPadding()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -85,11 +85,11 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { Type argType = ArgInfo.getCoerceToType(); StructType st = dyn_cast(argType); if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { - assert(NumIRArgs == st.getNumElements()); + cir_tl_assert(NumIRArgs == st.getNumElements()); for (unsigned i = 0, e = st.getNumElements(); i != e; ++i) ArgTypes[FirstIRArg + i] = st.getMembers()[i]; } else { - assert(NumIRArgs == 1); + cir_tl_assert(NumIRArgs == 1); ArgTypes[FirstIRArg] = argType; } break; @@ -117,5 +117,7 @@ mlir::Type LowerTypes::convertType(Type T) { } llvm::outs() << "Missing default ABI-specific type for " << T << "\n"; - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::X86DefaultABITypeConvertion(), + "NYI"); + return T; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index ea8ef6f28144..48855caf617a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -58,11 +58,11 @@ class EmptySubobjectMap { void EmptySubobjectMap::ComputeEmptySubobjectSizes() { // Check the bases. - assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); // Check the fields. for (const auto FT : Class.getMembers()) { - assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); const auto RT = dyn_cast(FT); // We only care about record types. @@ -70,7 +70,8 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() { continue; // TODO(cir): Handle nested record types. - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::ABINestedRecordLayout(), + "NYI"); } } @@ -206,7 +207,7 @@ class ItaniumRecordLayoutBuilder { bool isPacked, const Type Ty); clang::CharUnits getSize() const { - assert(Size % Context.getCharWidth() == 0); + cir_tl_assert(Size % Context.getCharWidth() == 0); return Context.toCharUnitsFromBits(Size); } uint64_t getSizeInBits() const { return Size; } @@ -215,7 +216,7 @@ class ItaniumRecordLayoutBuilder { void setSize(uint64_t NewSize) { Size = NewSize; } clang::CharUnits getDataSize() const { - assert(DataSize % Context.getCharWidth() == 0); + cir_tl_assert(DataSize % Context.getCharWidth() == 0); return Context.toCharUnitsFromBits(DataSize); } @@ -234,24 +235,25 @@ void ItaniumRecordLayoutBuilder::layout(const StructType RT) { initializeLayout(RT); // Lay out the vtable and the non-virtual bases. - assert(!::cir::MissingFeatures::isCXXRecordDecl() && - !::cir::MissingFeatures::CXXRecordIsDynamicClass()); + cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl() && + !::cir::MissingFeatures::CXXRecordIsDynamicClass()); layoutFields(RT); // FIXME(cir): Handle virtual-related layouts. - assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); - assert(!::cir::MissingFeatures::itaniumRecordLayoutBuilderFinishLayout()); + cir_tl_assert( + !::cir::MissingFeatures::itaniumRecordLayoutBuilderFinishLayout()); } void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { if (const auto RT = dyn_cast(Ty)) { IsUnion = RT.isUnion(); - assert(!::cir::MissingFeatures::recordDeclIsMSStruct()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclIsMSStruct()); } - assert(!::cir::MissingFeatures::recordDeclIsPacked()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclIsPacked()); // Honor the default struct packing maximum alignment flag. if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) { @@ -289,8 +291,8 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { void ItaniumRecordLayoutBuilder::layoutField(const Type D, bool InsertExtraPadding) { // auto FieldClass = D.dyn_cast(); - assert(!::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping() && - !::cir::MissingFeatures::CXXRecordDeclIsEmptyCXX11()); + cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping() && + !::cir::MissingFeatures::CXXRecordDeclIsEmptyCXX11()); bool IsOverlappingEmptyField = false; // FIXME(cir): Needs more features. clang::CharUnits FieldOffset = (IsUnion || IsOverlappingEmptyField) @@ -304,7 +306,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, llvm_unreachable("NYI"); } - assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit; // Reset the unfilled bits. @@ -344,8 +346,8 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, llvm_unreachable("NYI"); } - assert(!::cir::MissingFeatures::recordDeclIsPacked() && - !::cir::MissingFeatures::CXXRecordDeclIsPOD()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclIsPacked() && + !::cir::MissingFeatures::CXXRecordDeclIsPOD()); bool FieldPacked = false; // FIXME(cir): Needs more features. // When used as part of a typedef, or together with a 'packed' attribute, the @@ -383,7 +385,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, clang::CharUnits UnpackedFieldOffset = FieldOffset; // clang::CharUnits OriginalFieldAlign = UnpackedFieldAlign; - assert(!::cir::MissingFeatures::fieldDeclGetMaxFieldAlignment()); + cir_tl_assert(!::cir::MissingFeatures::fieldDeclGetMaxFieldAlignment()); clang::CharUnits MaxAlignmentInChars = clang::CharUnits::Zero(); PackedFieldAlign = std::max(PackedFieldAlign, MaxAlignmentInChars); PreferredAlign = std::max(PreferredAlign, MaxAlignmentInChars); @@ -456,7 +458,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, // laid out. A regular mlir::Type has not way of doing this. In fact, we will // likely need an external abstraction, as I don't think this is possible with // just the field type. - assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + cir_tl_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); if (Packed && !FieldPacked && PackedFieldAlign < FieldAlign) llvm_unreachable("NYI"); @@ -465,10 +467,10 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, void ItaniumRecordLayoutBuilder::layoutFields(const StructType D) { // Layout each field, for now, just sequentially, respecting alignment. In // the future, this will need to be tweakable by targets. - assert(!::cir::MissingFeatures::recordDeclMayInsertExtraPadding() && - !Context.getLangOpts().SanitizeAddressFieldPadding); + cir_tl_assert(!::cir::MissingFeatures::recordDeclMayInsertExtraPadding() && + !Context.getLangOpts().SanitizeAddressFieldPadding); bool InsertExtraPadding = false; - assert(!::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()); bool HasFlexibleArrayMember = false; for (const auto FT : D.getMembers()) { layoutField(FT, InsertExtraPadding && (FT != D.getMembers().back() || @@ -485,20 +487,20 @@ void ItaniumRecordLayoutBuilder::UpdateAlignment( return; if (NewAlignment > Alignment) { - assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) && - "Alignment not a power of 2"); + cir_tl_assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) && + "Alignment not a power of 2"); Alignment = NewAlignment; } if (UnpackedNewAlignment > UnpackedAlignment) { - assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) && - "Alignment not a power of 2"); + cir_tl_assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) && + "Alignment not a power of 2"); UnpackedAlignment = UnpackedNewAlignment; } if (PreferredNewAlignment > PreferredAlignment) { - assert(llvm::isPowerOf2_64(PreferredNewAlignment.getQuantity()) && - "Alignment not a power of 2"); + cir_tl_assert(llvm::isPowerOf2_64(PreferredNewAlignment.getQuantity()) && + "Alignment not a power of 2"); PreferredAlignment = PreferredNewAlignment; } } @@ -525,7 +527,7 @@ void ItaniumRecordLayoutBuilder::checkFieldPadding( PadSize = PadSize / CharBitNum; // InBits = false; } - assert(::cir::MissingFeatures::bitFieldPaddingDiagnostics()); + cir_tl_assert(::cir::MissingFeatures::bitFieldPaddingDiagnostics()); } if (isPacked && Offset != UnpackedOffset) { HasPackedField = true; @@ -544,7 +546,7 @@ bool isMsLayout(const CIRLowerContext &Context) { /// of the given class (considering it as a base class) when allocating /// objects? static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { - assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + cir_tl_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); switch (ABI.getTailPaddingUseRules()) { case clang::TargetCXXABI::AlwaysUseTailPadding: return false; @@ -566,7 +568,7 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { // intended. // FIXME(cir): This always returns true since we can't check if a CIR record // is a POD type. - assert(!::cir::MissingFeatures::CXXRecordDeclIsPOD()); + cir_tl_assert(!::cir::MissingFeatures::CXXRecordDeclIsPOD()); return true; case clang::TargetCXXABI::UseTailPaddingUnlessPOD11: @@ -588,10 +590,11 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { /// (struct/union/class), which indicates its size and field position /// information. const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { - assert(isa(D) && "Not a record type"); + cir_tl_assert(isa(D) && "Not a record type"); auto RT = dyn_cast(D); - assert(RT.isComplete() && "Cannot get layout of forward declarations!"); + cir_tl_assert(RT.isComplete() && + "Cannot get layout of forward declarations!"); // FIXME(cir): Use a more MLIR-based approach by using it's buitin data layout // features, such as interfaces, cacheing, and the DLTI dialect. @@ -602,7 +605,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { llvm_unreachable("NYI"); } else { // FIXME(cir): Add if-else separating C and C++ records. - assert(!::cir::MissingFeatures::isCXXRecordDecl()); + cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl()); EmptySubobjectMap EmptySubobjects(*this, RT); ItaniumRecordLayoutBuilder Builder(*this, &EmptySubobjects); Builder.layout(RT); @@ -617,7 +620,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { skipTailPadding ? Builder.getSize() : Builder.getDataSize(); clang::CharUnits NonVirtualSize = skipTailPadding ? DataSize : Builder.NonVirtualSize; - assert(!::cir::MissingFeatures::CXXRecordIsDynamicClass()); + cir_tl_assert(!::cir::MissingFeatures::CXXRecordIsDynamicClass()); // FIXME(cir): Whose responsible for freeing the allocation below? NewEntry = new CIRRecordLayout( *this, Builder.getSize(), Builder.Alignment, Builder.PreferredAlignment, @@ -632,7 +635,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { } // TODO(cir): Add option to dump the layouts. - assert(!::cir::MissingFeatures::cacheRecordLayouts()); + cir_tl_assert(!::cir::MissingFeatures::cacheRecordLayouts()); return *NewEntry; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index a3406b722c41..28b363664387 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -60,7 +60,7 @@ class AArch64TargetLoweringInfo : public TargetLoweringInfo { public: AArch64TargetLoweringInfo(LowerTypes <, AArch64ABIKind Kind) : TargetLoweringInfo(std::make_unique(LT, Kind)) { - assert(!MissingFeature::swift()); + cir_tl_assert(!MissingFeature::swift()); } unsigned getTargetAddrSpaceFromCIRAddrSpace( @@ -87,7 +87,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, return ABIArgInfo::getIgnore(); if (const auto _ = dyn_cast(RetTy)) { - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::vectorType(), "NYI"); } // Large vector types should be returned via memory. @@ -128,7 +128,9 @@ AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, : ABIArgInfo::getDirect()); } - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::AArch64TypeClassification(), + "NYI"); + return {}; } std::unique_ptr diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp index 7d43000877b7..1e02c9c370bd 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp @@ -70,13 +70,13 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // Homogenous Aggregate type not supported and indirect arg // passing not supported yet. And for these supported types, // we should not have alignment greater than 8 problem. - assert(isSupportedType); - assert(!cir::MissingFeatures::classifyArgumentTypeForAArch64()); + cir_tl_assert(isSupportedType); + cir_tl_assert(!cir::MissingFeatures::classifyArgumentTypeForAArch64()); // indirect arg passing would expect one more level of pointer dereference. - assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); // false as a place holder for now, as we don't have a way to query bool isIndirect = false; - assert(!cir::MissingFeatures::supportgetCoerceToTypeForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportgetCoerceToTypeForAArch64()); // we don't convert to LLVM Type here as we are lowering to CIR here. // so baseTy is the just type of the result of va_arg. // but it depends on arg type indirectness and coercion defined by ABI. @@ -120,8 +120,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // though anyone passing 2GB of arguments, each at most 16 bytes, deserves // whatever they get). - assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); - assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // One is just place holder for now, as we don't have a way to query // type size and alignment. clang::CharUnits tySize = @@ -132,7 +132,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // indirectness, type size and type alignment all // decide regSize, but they are all ABI defined // thus need ABI lowering query system. - assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); int regSize = isIndirect ? 8 : tySize.getQuantity(); int regTopIndex; mlir::Value regOffsP; @@ -187,8 +187,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we // align __gr_offs to calculate the potential address. if (!IsFPR && !isIndirect && tyAlign.getQuantity() > 8) { - assert(!cir::MissingFeatures::handleAArch64Indirect()); - assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); llvm_unreachable("register alignment correction NYI"); } @@ -224,19 +224,20 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( loc, castRegTop.getType(), castRegTop, regOffs); if (isIndirect) { - assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); llvm_unreachable("indirect arg passing NYI"); } // TODO: isHFA, numMembers and base should be query result from query uint64_t numMembers = 0; - assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); + cir_tl_assert( + !cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); bool isHFA = false; // though endianess can be known from datalayout, it might need an unified // ABI lowering query system to answer the question. - assert(!cir::MissingFeatures::supportisEndianQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportisEndianQueryForAArch64()); bool isBigEndian = datalayout.isBigEndian(); - assert(!cir::MissingFeatures::supportisAggregateTypeForABIAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportisAggregateTypeForABIAArch64()); // TODO: isAggregateTypeForABI should be query result from ABI info bool isAggregateTypeForABI = false; if (isHFA && numMembers > 1) { @@ -244,10 +245,11 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // and stored 16-bytes apart regardless of size (they're notionally in qN, // qN+1, ...). We reload and store into a temporary local variable // contiguously. - assert(!isIndirect && "Homogeneous aggregates should be passed directly"); + cir_tl_assert(!isIndirect && + "Homogeneous aggregates should be passed directly"); llvm_unreachable("Homogeneous aggregates NYI"); } else { - assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // TODO: slotSize should be query result about alignment. clang::CharUnits slotSize = clang::CharUnits::fromQuantity(8); if (isBigEndian && !isIndirect && (isHFA || isAggregateTypeForABI) && @@ -266,11 +268,12 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // On big-endian platforms, the value will be right-aligned in its stack slot. // and we also need to think about other ABI lowering concerns listed below. - assert(!cir::MissingFeatures::handleBigEndian()); - assert(!cir::MissingFeatures::handleAArch64Indirect()); - assert(!cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); - assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); - assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::handleBigEndian()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert( + !cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); builder.create(loc, mlir::ValueRange{resAsVoidP}, contBlock); @@ -284,8 +287,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( auto ptrDiffTy = mlir::cir::IntType::get(builder.getContext(), 64, /*signed=*/false); - assert(!cir::MissingFeatures::handleAArch64Indirect()); - assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // Again, stack arguments may need realignment. In this case both integer and // floating-point ones might be affected. if (!isIndirect && tyAlign.getQuantity() > 8) { @@ -307,8 +310,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // which are all ABI defined thus need ABI lowering query system. // The implementation we have now supports most common cases which assumes // no indirectness, no alignment greater than 8, and little endian. - assert(!cir::MissingFeatures::handleBigEndian()); - assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + cir_tl_assert(!cir::MissingFeatures::handleBigEndian()); + cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); auto stackSizeC = builder.create( loc, ptrDiffTy, @@ -340,12 +343,12 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( builder.setInsertionPoint(op); contBlock->addArgument(onStackPtr.getType(), loc); auto resP = contBlock->getArgument(0); - assert(mlir::isa(resP.getType())); + cir_tl_assert(mlir::isa(resP.getType())); auto opResPTy = mlir::cir::PointerType::get(builder.getContext(), opResTy); auto castResP = builder.createBitcast(resP, opResPTy); auto res = builder.create(loc, castResP); // there would be another level of ptr dereference if indirect arg passing - assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); if (isIndirect) { res = builder.create(loc, res.getResult()); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index 9d79fb7ccb43..b35476225b3d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -34,7 +34,7 @@ cir::LoweringPrepareCXXABI *cir::LoweringPrepareCXXABI::createItaniumABI() { static void buildBadCastCall(CIRBaseBuilderTy &builder, mlir::Location loc, mlir::FlatSymbolRefAttr badCastFuncRef) { // TODO(cir): set the calling convention to __cxa_bad_cast. - assert(!MissingFeatures::setCallingConv()); + cir_tl_assert(!MissingFeatures::setCallingConv()); builder.createCallOp(loc, badCastFuncRef, mlir::ValueRange{}); builder.create(loc); @@ -48,7 +48,7 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, auto castInfo = op.getInfo().value(); // TODO(cir): consider address space - assert(!MissingFeatures::addressSpace()); + cir_tl_assert(!MissingFeatures::addressSpace()); auto srcPtr = builder.createBitcast(srcValue, builder.getVoidPtrTy()); auto srcRtti = builder.getConstant(loc, castInfo.getSrcRtti()); @@ -59,15 +59,15 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, mlir::Value dynCastFuncArgs[4] = {srcPtr, srcRtti, destRtti, offsetHint}; // TODO(cir): set the calling convention for __dynamic_cast. - assert(!MissingFeatures::setCallingConv()); + cir_tl_assert(!MissingFeatures::setCallingConv()); mlir::Value castedPtr = builder .createCallOp(loc, dynCastFuncRef, builder.getVoidPtrTy(), dynCastFuncArgs) .getResult(); - assert(mlir::isa(castedPtr.getType()) && - "the return value of __dynamic_cast should be a ptr"); + cir_tl_assert(mlir::isa(castedPtr.getType()) && + "the return value of __dynamic_cast should be a ptr"); /// C++ [expr.dynamic.cast]p9: /// A failed cast to reference type throws std::bad_cast @@ -93,7 +93,7 @@ buildDynamicCastToVoidAfterNullCheck(CIRBaseBuilderTy &builder, bool vtableUsesRelativeLayout = op.getRelativeLayout(); // TODO(cir): consider address space in this function. - assert(!MissingFeatures::addressSpace()); + cir_tl_assert(!MissingFeatures::addressSpace()); mlir::Type vtableElemTy; uint64_t vtableElemAlign; @@ -141,7 +141,7 @@ LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, auto loc = op->getLoc(); auto srcValue = op.getSrc(); - assert(!MissingFeatures::buildTypeCheck()); + cir_tl_assert(!MissingFeatures::buildTypeCheck()); if (op.isRefcast()) return buildDynamicCastAfterNullCheck(builder, op); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp index f5a7250dffd0..f5540e221d9d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp @@ -33,7 +33,7 @@ class SPIRVABIInfo : public ABIInfo { private: void computeInfo(LowerFunctionInfo &FI) const override { - llvm_unreachable("ABI NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::SPIRVABI(), "NYI"); } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 38501f7c3124..81e7c513ade7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -95,7 +95,8 @@ Type getFPTypeAtOffset(Type IRType, unsigned IROffset, if (IROffset == 0 && isa(IRType)) return IRType; - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::X86GetFPTypeAtOffset(), "NYI"); + return IRType; // FIXME(cir): Temporary workaround for the assertion above. } } // namespace @@ -193,7 +194,7 @@ class X86_64TargetLoweringInfo : public TargetLoweringInfo { public: X86_64TargetLoweringInfo(LowerTypes &LM, X86AVXABILevel AVXLevel) : TargetLoweringInfo(std::make_unique(LM, AVXLevel)) { - assert(!::cir::MissingFeatures::swift()); + cir_tl_assert(!::cir::MissingFeatures::swift()); } unsigned getTargetAddrSpaceFromCIRAddrSpace( @@ -273,8 +274,8 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, Current = Class::NoClass; // If this is a C++ record, classify the bases first. - assert(!::cir::MissingFeatures::isCXXRecordDecl() && - !::cir::MissingFeatures::getCXXRecordBases()); + cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl() && + !::cir::MissingFeatures::getCXXRecordBases()); // Classify the fields one at a time, merging the results. bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <= @@ -283,10 +284,10 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, bool IsUnion = RT.isUnion() && !UseClang11Compat; // FIXME(cir): An interface to handle field declaration might be needed. - assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + cir_tl_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); for (auto [idx, FT] : llvm::enumerate(RT.getMembers())) { uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); - assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); bool BitField = false; // Ignore padding bit-fields. @@ -337,7 +338,8 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, postMerge(Size, Lo, Hi); } else { llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::X86TypeClassification(), + "NYI"); } // FIXME: _Decimal32 and _Decimal64 are SSE. // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). @@ -400,7 +402,7 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, // returning an 8-byte unit starting with it. See if we can safely use it. if (IROffset == 0) { // Pointers and int64's always fill the 8-byte unit. - assert(!isa(DestTy) && "Ptrs are NYI"); + cir_tl_assert(!isa(DestTy) && "Ptrs are NYI"); // If we have a 1/2/4-byte integer, we can use it only if the rest of the // goodness in the source type is just tail padding. This is allowed to @@ -436,7 +438,9 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, unsigned TySizeInBytes = (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); - assert(TySizeInBytes != SourceOffset && "Empty field?"); + // FIXME(cir): Temporary workaround to make things non-blocking. + if (!cirMissingFeatureAssertionMode) + cir_tl_assert(TySizeInBytes != SourceOffset && "Empty field?"); // It is always safe to classify this as an integer type up to i64 that // isn't larger than the structure. @@ -458,10 +462,10 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { classify(RetTy, 0, Lo, Hi, true); // Check some invariants. - assert((Hi != Class::Memory || Lo == Class::Memory) && - "Invalid memory classification."); - assert((Hi != Class::SSEUp || Lo == Class::SSE) && - "Invalid SSEUp classification."); + cir_tl_assert((Hi != Class::Memory || Lo == Class::Memory) && + "Invalid memory classification."); + cir_tl_assert((Hi != Class::SSEUp || Lo == Class::SSE) && + "Invalid SSEUp classification."); Type resType = {}; switch (Lo) { @@ -492,7 +496,8 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { break; default: - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::X86RetTypeClassification(), + "NYI"); } Type HighPart = {}; @@ -526,10 +531,10 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, // Check some invariants. // FIXME: Enforce these by construction. - assert((Hi != Class::Memory || Lo == Class::Memory) && - "Invalid memory classification."); - assert((Hi != Class::SSEUp || Lo == Class::SSE) && - "Invalid SSEUp classification."); + cir_tl_assert((Hi != Class::Memory || Lo == Class::Memory) && + "Invalid memory classification."); + cir_tl_assert((Hi != Class::SSEUp || Lo == Class::SSE) && + "Invalid SSEUp classification."); neededInt = 0; neededSSE = 0; @@ -566,7 +571,8 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, break; } default: - llvm_unreachable("NYI"); + cir_assert_or_abort(!::cir::MissingFeatures::X86ArgTypeClassification(), + "NYI"); } Type HighPart = {}; @@ -670,8 +676,8 @@ X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { // Accum should never be memory (we should have returned) or // ComplexX87 (because this cannot be passed in a structure). - assert((Accum != Class::Memory && Accum != Class::ComplexX87) && - "Invalid accumulated classification during merge."); + cir_tl_assert((Accum != Class::Memory && Accum != Class::ComplexX87) && + "Invalid accumulated classification during merge."); if (Accum == Field || Field == Class::NoClass) return Accum; if (Field == Class::Memory) diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 0eb57fa48681..d97a415e3b12 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -91,14 +91,13 @@ getBackendActionFromOutputType(CIRGenAction::OutputType action) { } } -static std::unique_ptr -lowerFromCIRToLLVMIR(const clang::FrontendOptions &feOptions, - mlir::ModuleOp mlirMod, - std::unique_ptr mlirCtx, - llvm::LLVMContext &llvmCtx, bool disableVerifier = false) { +static std::unique_ptr lowerFromCIRToLLVMIR( + const clang::FrontendOptions &feOptions, mlir::ModuleOp mlirMod, + std::unique_ptr mlirCtx, llvm::LLVMContext &llvmCtx, + bool disableVerifier = false, bool disableCCLowering = false) { if (feOptions.ClangIRDirectLowering) - return direct::lowerDirectlyFromCIRToLLVMIR(mlirMod, llvmCtx, - disableVerifier); + return direct::lowerDirectlyFromCIRToLLVMIR( + mlirMod, llvmCtx, disableVerifier, disableCCLowering); else return lowerFromCIRToMLIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); } @@ -201,6 +200,9 @@ class CIRGenConsumer : public clang::ASTConsumer { if (feOptions.ClangIRLibOpt) libOptOpts = sanitizePassOptions(feOptions.ClangIRLibOptOpts); + bool enableCCLowering = feOptions.ClangIRCallConvLowering && + action != CIRGenAction::OutputType::EmitCIR; + // Setup and run CIR pipeline. std::string passOptParsingFailure; if (runCIRToCIRPasses( @@ -210,8 +212,7 @@ class CIRGenConsumer : public clang::ASTConsumer { feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure, codeGenOptions.OptimizationLevel > 0, action == CIRGenAction::OutputType::EmitCIRFlat, - action == CIRGenAction::OutputType::EmitMLIR, - feOptions.ClangIREnableCallConvLowering, + action == CIRGenAction::OutputType::EmitMLIR, enableCCLowering, feOptions.ClangIREnableMem2Reg) .failed()) { if (!passOptParsingFailure.empty()) @@ -288,7 +289,8 @@ class CIRGenConsumer : public clang::ASTConsumer { llvm::LLVMContext llvmCtx; auto llvmModule = lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx, - feOptions.ClangIRDisableCIRVerifier); + feOptions.ClangIRDisableCIRVerifier, + !feOptions.ClangIRCallConvLowering); llvmModule->setTargetTriple(targetOptions.Triple); @@ -437,10 +439,12 @@ void CIRGenAction::ExecuteAction() { if (!mlirModule) return; + // FIXME(cir): This compilation path does not account for some flags. llvm::LLVMContext llvmCtx; auto llvmModule = lowerFromCIRToLLVMIR( ci.getFrontendOpts(), mlirModule.release(), - std::unique_ptr(mlirContext), llvmCtx); + std::unique_ptr(mlirContext), llvmCtx, + /*disableVerifier=*/false, /*disableCCLowering=*/true); if (outstream) llvmModule->print(*outstream, nullptr); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index fb5f5496fa70..01b9d2535c9d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4515,8 +4515,8 @@ std::unique_ptr createConvertCIRToLLVMPass() { return std::make_unique(); } -void populateCIRToLLVMPasses(mlir::OpPassManager &pm) { - populateCIRPreLoweringPasses(pm); +void populateCIRToLLVMPasses(mlir::OpPassManager &pm, bool useCCLowering) { + populateCIRPreLoweringPasses(pm, useCCLowering); pm.addPass(createConvertCIRToLLVMPass()); } @@ -4524,12 +4524,12 @@ extern void registerCIRDialectTranslation(mlir::MLIRContext &context); std::unique_ptr lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx, - bool disableVerifier) { + bool disableVerifier, bool disableCCLowering) { llvm::TimeTraceScope scope("lower from CIR to LLVM directly"); mlir::MLIRContext *mlirCtx = theModule.getContext(); mlir::PassManager pm(mlirCtx); - populateCIRToLLVMPasses(pm); + populateCIRToLLVMPasses(pm, !disableCCLowering); // This is necessary to have line tables emitted and basic // debugger working. In the future we will add proper debug information diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index a30e097f9401..dcd979c35e9d 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3129,7 +3129,7 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Opts.ClangIRVerifyDiags = true; if (Args.hasArg(OPT_fclangir_call_conv_lowering)) - Opts.ClangIREnableCallConvLowering = true; + Opts.ClangIRCallConvLowering = true; if (Args.hasArg(OPT_fclangir_analysis_only)) Opts.ClangIRAnalysisOnly = true; diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index 192486579143..52d6d1a0c003 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -1,8 +1,8 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -ffreestanding -emit-cir -target-feature +neon %s -o %t.cir +// RUN: -ffreestanding -emit-cir -fno-clangir-call-conv-lowering -target-feature +neon %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -ffreestanding -emit-llvm -target-feature +neon %s -o %t.ll +// RUN: -ffreestanding -emit-llvm -fno-clangir-call-conv-lowering -target-feature +neon %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s // REQUIRES: aarch64-registered-target || arm-registered-target diff --git a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c index 10df33358d36..9f2e431d9a9e 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c @@ -1,11 +1,11 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ // RUN: -fclangir -disable-O0-optnone \ -// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s +// RUN: -flax-vector-conversions=none -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ // RUN: -fclangir -disable-O0-optnone \ -// RUN: -flax-vector-conversions=none -emit-llvm -o - %s \ +// RUN: -flax-vector-conversions=none -emit-llvm -fno-clangir-call-conv-lowering -o - %s \ // RUN: | opt -S -passes=mem2reg,simplifycfg -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index 6154da28f35f..80afd1bf17c6 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -1,8 +1,8 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -emit-cir -target-feature +neon %s -o %t.cir +// RUN: -emit-cir -fno-clangir-call-conv-lowering -target-feature +neon %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -emit-llvm -target-feature +neon %s -o %t.ll +// RUN: -emit-llvm -fno-clangir-call-conv-lowering -target-feature +neon %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s // This test file contains tests of AArch64 NEON intrinsics diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 288ea8308cf3..a5067303dc8e 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -1,11 +1,11 @@ // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ // RUN: -fclangir -disable-O0-optnone \ -// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s +// RUN: -flax-vector-conversions=none -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ // RUN: -fclangir -disable-O0-optnone \ -// RUN: -flax-vector-conversions=none -emit-llvm -o - %s \ +// RUN: -flax-vector-conversions=none -emit-llvm -fno-clangir-call-conv-lowering -o - %s \ // RUN: | opt -S -passes=mem2reg,simplifycfg -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s diff --git a/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl index c64b5015f369..256da726fb8e 100644 --- a/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl +++ b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/OpenCL/array-decay.cl b/clang/test/CIR/CodeGen/OpenCL/array-decay.cl index d81e425729a6..e42bc9096a4b 100644 --- a/clang/test/CIR/CodeGen/OpenCL/array-decay.cl +++ b/clang/test/CIR/CodeGen/OpenCL/array-decay.cl @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM // CIR: @func1 diff --git a/clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl b/clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl index e6d6ce1ca25a..ad5ec6651e90 100644 --- a/clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl +++ b/clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl @@ -1,19 +1,19 @@ -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL1.2 -o %t.cl12.cir %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -O0 -cl-std=CL1.2 -o %t.cl12.cir %s // RUN: FileCheck %s -input-file=%t.cl12.cir -check-prefixes CIR,CIR-UNIFORM -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL2.0 -o %t.cl20.cir %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -O0 -cl-std=CL2.0 -o %t.cl20.cir %s // RUN: FileCheck %s -input-file=%t.cl20.cir -check-prefixes CIR,CIR-NONUNIFORM -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL2.0 -cl-uniform-work-group-size -o %t.cl20.uniform1.cir %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -O0 -cl-std=CL2.0 -cl-uniform-work-group-size -o %t.cl20.uniform1.cir %s // RUN: FileCheck %s -input-file=%t.cl20.uniform1.cir -check-prefixes CIR,CIR-UNIFORM -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL2.0 -foffload-uniform-block -o %t.cl20.uniform2.cir %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -O0 -cl-std=CL2.0 -foffload-uniform-block -o %t.cl20.uniform2.cir %s // RUN: FileCheck %s -input-file=%t.cl20.uniform2.cir -check-prefixes CIR,CIR-UNIFORM -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -O0 -cl-std=CL1.2 -o %t.cl12.ll %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -fno-clangir-call-conv-lowering -O0 -cl-std=CL1.2 -o %t.cl12.ll %s // RUN: FileCheck %s -input-file=%t.cl12.ll -check-prefixes LLVM,LLVM-UNIFORM -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -O0 -cl-std=CL2.0 -o %t.cl20.ll %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -fno-clangir-call-conv-lowering -O0 -cl-std=CL2.0 -o %t.cl20.ll %s // RUN: FileCheck %s -input-file=%t.cl20.ll -check-prefixes LLVM,LLVM-NONUNIFORM -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -O0 -cl-std=CL2.0 -cl-uniform-work-group-size -o %t.cl20.uniform1.ll %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -fno-clangir-call-conv-lowering -O0 -cl-std=CL2.0 -cl-uniform-work-group-size -o %t.cl20.uniform1.ll %s // RUN: FileCheck %s -input-file=%t.cl20.uniform1.ll -check-prefixes LLVM,LLVM-UNIFORM -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -O0 -cl-std=CL2.0 -foffload-uniform-block -o %t.cl20.uniform2.ll %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -fno-clangir-call-conv-lowering -O0 -cl-std=CL2.0 -foffload-uniform-block -o %t.cl20.uniform2.ll %s // RUN: FileCheck %s -input-file=%t.cl20.uniform2.ll -check-prefixes LLVM,LLVM-UNIFORM // CIR-LABEL: #fn_attr = diff --git a/clang/test/CIR/CodeGen/OpenCL/convergent.cl b/clang/test/CIR/CodeGen/OpenCL/convergent.cl index a2d4a910004c..8da6d0fc51d9 100644 --- a/clang/test/CIR/CodeGen/OpenCL/convergent.cl +++ b/clang/test/CIR/CodeGen/OpenCL/convergent.cl @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -fclangir -triple spirv64-unknown-unknown -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -fclangir -triple spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR -// RUN: %clang_cc1 -fclangir -triple spirv64-unknown-unknown -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -fclangir -triple spirv64-unknown-unknown -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM // In ClangIR for OpenCL, all functions should be marked convergent. diff --git a/clang/test/CIR/CodeGen/OpenCL/global.cl b/clang/test/CIR/CodeGen/OpenCL/global.cl index 3ec7ee36fd80..83fe24c573cb 100644 --- a/clang/test/CIR/CodeGen/OpenCL/global.cl +++ b/clang/test/CIR/CodeGen/OpenCL/global.cl @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM global int a = 13; diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl index b78ee6dddbf7..c72ede55d797 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl @@ -1,9 +1,9 @@ // Test that the kernel argument info always refers to SPIR address spaces, // even if the target has only one address space like x86_64 does. -// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -o - -triple x86_64-unknown-linux-gnu -o %t.cir +// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -fno-clangir-call-conv-lowering -o - -triple x86_64-unknown-linux-gnu -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR -// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-llvm -o - -triple x86_64-unknown-linux-gnu -o %t.ll +// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-llvm -fno-clangir-call-conv-lowering -o - -triple x86_64-unknown-linux-gnu -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM kernel void foo(__global int * G, __constant int *C, __local int *L) { diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info.cl index 6c7b69368974..d07fc1db7fb3 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info.cl @@ -1,12 +1,12 @@ // See also clang/test/CodeGenOpenCL/kernel-arg-info.cl -// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -o - -triple spirv64-unknown-unknown -o %t.cir +// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -fno-clangir-call-conv-lowering -o - -triple spirv64-unknown-unknown -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR -// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -o - -triple spirv64-unknown-unknown -cl-kernel-arg-info -o %t.arginfo.cir +// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -fno-clangir-call-conv-lowering -o - -triple spirv64-unknown-unknown -cl-kernel-arg-info -o %t.arginfo.cir // RUN: FileCheck %s --input-file=%t.arginfo.cir --check-prefix=CIR-ARGINFO -// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-llvm -o - -triple spirv64-unknown-unknown -o %t.ll +// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-llvm -fno-clangir-call-conv-lowering -o - -triple spirv64-unknown-unknown -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM -// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-llvm -o - -triple spirv64-unknown-unknown -cl-kernel-arg-info -o %t.arginfo.ll +// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-llvm -fno-clangir-call-conv-lowering -o - -triple spirv64-unknown-unknown -cl-kernel-arg-info -o %t.arginfo.ll // RUN: FileCheck %s --input-file=%t.arginfo.ll --check-prefix=LLVM-ARGINFO kernel void foo(global int * globalintp, global int * restrict globalintrestrictp, diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl index ccc8ce967e50..14d8e29397c1 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 %s -fclangir -triple spirv64-unknown-unknown -emit-cir -o %t.cir +// RUN: %clang_cc1 %s -fclangir -triple spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR -// RUN: %clang_cc1 %s -fclangir -triple spirv64-unknown-unknown -emit-llvm -o %t.ll +// RUN: %clang_cc1 %s -fclangir -triple spirv64-unknown-unknown -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM __kernel void kernel_function() {} diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl index 8a32f1d8088d..b3d6d73eb789 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR -// RUN: %clang_cc1 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: %clang_cc1 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM typedef unsigned int uint4 __attribute__((ext_vector_type(4))); diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl index 01348013bbf0..5acbc5eea395 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR diff --git a/clang/test/CIR/CodeGen/OpenCL/nothrow.cl b/clang/test/CIR/CodeGen/OpenCL/nothrow.cl index c1c167f880ea..3adf7be62962 100644 --- a/clang/test/CIR/CodeGen/OpenCL/nothrow.cl +++ b/clang/test/CIR/CodeGen/OpenCL/nothrow.cl @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck %s -input-file=%t.cir -check-prefixes CIR -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -o %t.ll %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll %s // RUN: FileCheck %s -input-file=%t.ll -check-prefixes LLVM // CIR-LABEL: #fn_attr = diff --git a/clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl b/clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl index 67aeda32c2a1..724ca098295b 100644 --- a/clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl +++ b/clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR // CIR: module{{.*}} attributes {{{.*}}cir.lang = #cir.lang diff --git a/clang/test/CIR/CodeGen/OpenCL/opencl-version.cl b/clang/test/CIR/CodeGen/OpenCL/opencl-version.cl index f0536a560b97..018d7f1efe25 100644 --- a/clang/test/CIR/CodeGen/OpenCL/opencl-version.cl +++ b/clang/test/CIR/CodeGen/OpenCL/opencl-version.cl @@ -1,10 +1,10 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR-CL30 -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM-CL30 -// RUN: %clang_cc1 -cl-std=CL1.2 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL1.2 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR-CL12 -// RUN: %clang_cc1 -cl-std=CL1.2 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: %clang_cc1 -cl-std=CL1.2 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM-CL12 // CIR-CL30: module {{.*}} attributes {{{.*}}cir.cl.version = #cir.cl.version<3, 0> diff --git a/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl b/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl index bf711bec7d46..9a6644cb09f0 100644 --- a/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl +++ b/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -fclangir %s -O0 -triple "spirv64-unknown-unknown" -emit-cir -o %t.cir +// RUN: %clang_cc1 -fclangir %s -O0 -triple "spirv64-unknown-unknown" -emit-cir -fno-clangir-call-conv-lowering -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR -// RUN: %clang_cc1 -fclangir %s -O0 -triple "spirv64-unknown-unknown" -emit-llvm -o %t.ll +// RUN: %clang_cc1 -fclangir %s -O0 -triple "spirv64-unknown-unknown" -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM // CIR: cir.func {{.*}}@get_dummy_id{{.*}} cc(spir_function) diff --git a/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl index dadf4e6022b5..4dbfc5c37df1 100644 --- a/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl +++ b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl @@ -1,7 +1,7 @@ // See also: clang/test/CodeGenOpenCL/spirv_target.cl -// RUN: %clang_cc1 -cl-std=CL3.0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t_64.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t_64.cir // RUN: FileCheck --input-file=%t_64.cir %s --check-prefix=CIR-SPIRV64 -// RUN: %clang_cc1 -cl-std=CL3.0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t_64.ll +// RUN: %clang_cc1 -cl-std=CL3.0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t_64.ll // RUN: FileCheck --input-file=%t_64.ll %s --check-prefix=LLVM-SPIRV64 diff --git a/clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl b/clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl index 9ad8277012c4..8f458c5696c7 100644 --- a/clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl +++ b/clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -triple spirv64-unknown-unknown %s -o %t.ll +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM kernel void test_static(int i) { diff --git a/clang/test/CIR/CodeGen/abstract-cond.c b/clang/test/CIR/CodeGen/abstract-cond.c index d724c8e1ea28..4f084503a11c 100644 --- a/clang/test/CIR/CodeGen/abstract-cond.c +++ b/clang/test/CIR/CodeGen/abstract-cond.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s // ?: in "lvalue" diff --git a/clang/test/CIR/CodeGen/attributes.c b/clang/test/CIR/CodeGen/attributes.c index f80c479df45a..97117d71b935 100644 --- a/clang/test/CIR/CodeGen/attributes.c +++ b/clang/test/CIR/CodeGen/attributes.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o - | FileCheck %s -check-prefix=LLVM extern int __attribute__((section(".shared"))) ext; int getExt() { diff --git a/clang/test/CIR/CodeGen/builtin-bit-cast.cpp b/clang/test/CIR/CodeGen/builtin-bit-cast.cpp index d7aedac960b7..8747d8ec572f 100644 --- a/clang/test/CIR/CodeGen/builtin-bit-cast.cpp +++ b/clang/test/CIR/CodeGen/builtin-bit-cast.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir --check-prefix=CIR %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s float test_scalar(int &oper) { diff --git a/clang/test/CIR/CodeGen/complex-arithmetic.c b/clang/test/CIR/CodeGen/complex-arithmetic.c index 8e772e70f2d9..9c6a44959237 100644 --- a/clang/test/CIR/CodeGen/complex-arithmetic.c +++ b/clang/test/CIR/CodeGen/complex-arithmetic.c @@ -1,46 +1,46 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIRGEN,CIRGEN-BASIC,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIRGEN,CIRGEN-BASIC,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIRGEN,CIRGEN-IMPROVED,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIRGEN,CIRGEN-IMPROVED,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIRGEN,CIRGEN-FULL,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIRGEN,CIRGEN-FULL,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIR,CIR-BASIC,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIR,CIR-BASIC,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIR,CIR-IMPROVED,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIR,CIR-IMPROVED,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIR,CIR-FULL,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIR,CIR-FULL,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -emit-llvm -o %t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll %s // RUN: FileCheck --input-file=%t.ll --check-prefixes=CLANG,LLVM,LLVM-BASIC,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -emit-llvm -o %t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll %s // RUN: FileCheck --input-file=%t.ll --check-prefixes=CPPLANG,LLVM,LLVM-BASIC,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -emit-llvm -o %t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll %s // RUN: FileCheck --input-file=%t.ll --check-prefixes=CLANG,LLVM,LLVM-IMPROVED,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -emit-llvm -o %t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll %s // RUN: FileCheck --input-file=%t.ll --check-prefixes=CPPLANG,LLVM,LLVM-IMPROVED,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -emit-llvm -o %t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll %s // RUN: FileCheck --input-file=%t.ll --check-prefixes=CLANG,LLVM,LLVM-FULL,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -emit-llvm -o %t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll %s // RUN: FileCheck --input-file=%t.ll --check-prefixes=CPPLANG,LLVM,LLVM-FULL,CHECK %s double _Complex cd1, cd2; diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c index bbd7fa4a4e75..248b6bfa9dab 100644 --- a/clang/test/CIR/CodeGen/compound-literal.c +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-call-conv-lowering -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-call-conv-lowering -Wno-unused-value -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/conditional-cleanup.cpp b/clang/test/CIR/CodeGen/conditional-cleanup.cpp index e9272093a1cf..00b08cdddce5 100644 --- a/clang/test/CIR/CodeGen/conditional-cleanup.cpp +++ b/clang/test/CIR/CodeGen/conditional-cleanup.cpp @@ -2,7 +2,7 @@ // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.eh.cir // RUN: FileCheck --check-prefix=CIR_EH --input-file=%t.eh.cir %s -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.eh.flat.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat -fno-clangir-call-conv-lowering %s -o %t.eh.flat.cir // RUN: FileCheck --check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir %s typedef __typeof(sizeof(0)) size_t; diff --git a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp index 04662aed47ea..aef196d7a2c7 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -O1 -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -O1 -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s // RUN: FileCheck --input-file=%t.cir %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -O1 -fclangir -emit-llvm -o %t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -O1 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll %s // RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s struct Base1 { diff --git a/clang/test/CIR/CodeGen/eh.cpp b/clang/test/CIR/CodeGen/eh.cpp index 75f7c63471aa..96a8633a3252 100644 --- a/clang/test/CIR/CodeGen/eh.cpp +++ b/clang/test/CIR/CodeGen/eh.cpp @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -fcxx-exceptions -fexceptions -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -fcxx-exceptions -fexceptions -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -fcxx-exceptions -fexceptions -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM struct test1_D { diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index bf2663181077..c0b3eac11a58 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -1,12 +1,12 @@ -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE // RUN: FileCheck %s -check-prefix=CIR_AFTER --input-file=%t.cir // RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck %s -check-prefix=LLVM --input-file=%t.ll -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering -fexceptions -fcxx-exceptions %s -o %t.eh.cir // RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir-flat -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -fno-clangir-call-conv-lowering -emit-cir-flat -fno-clangir-call-conv-lowering -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir // RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fexceptions -fcxx-exceptions %s -o %t.eh.ll +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -fexceptions -fcxx-exceptions %s -o %t.eh.ll // RUN: FileCheck %s -check-prefix=LLVM_EH --input-file=%t.eh.ll struct e { e(int); }; diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 2200fc98cfac..6b9b64d175a9 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat %s -o %t1.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -fno-clangir-call-conv-lowering %s -o %t1.cir // RUN: FileCheck --input-file=%t1.cir %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t2.cir // RUN: FileCheck --input-file=%t2.cir %s -check-prefix=NOFLAT diff --git a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp index 2d9c97420311..c28b265c4c2b 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM namespace std { diff --git a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp index 2cf24f7f159a..893f2a24d008 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM namespace std { diff --git a/clang/test/CIR/CodeGen/multi-vtable.cpp b/clang/test/CIR/CodeGen/multi-vtable.cpp index da81091890cf..b696d6fa61de 100644 --- a/clang/test/CIR/CodeGen/multi-vtable.cpp +++ b/clang/test/CIR/CodeGen/multi-vtable.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s class Mother { diff --git a/clang/test/CIR/CodeGen/temporaries.cpp b/clang/test/CIR/CodeGen/temporaries.cpp index 885ba0db8f0a..bf72994e6726 100644 --- a/clang/test/CIR/CodeGen/temporaries.cpp +++ b/clang/test/CIR/CodeGen/temporaries.cpp @@ -1,8 +1,8 @@ -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fcxx-exceptions -fexceptions -emit-cir %s -o %t.eh.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fcxx-exceptions -fexceptions -emit-cir -fno-clangir-call-conv-lowering %s -o %t.eh.cir // RUN: FileCheck --input-file=%t.eh.cir %s -check-prefix=CIR_EH -// RUN: cir-translate %t.cir -cir-to-llvmir -o %t.ll +// RUN: cir-translate %t.cir -cir-to-llvmir --disable-cc-lowering -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM struct E { diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index 69f7c351c671..002e676bbc63 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -1,8 +1,8 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.flat.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat -fno-clangir-call-conv-lowering %s -o %t.flat.cir // RUN: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_FLAT %s -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s struct Vec { diff --git a/clang/test/CIR/CodeGen/var-arg-float.c b/clang/test/CIR/CodeGen/var-arg-float.c index 2385ba3aaad0..e9f0881d9fa8 100644 --- a/clang/test/CIR/CodeGen/var-arg-float.c +++ b/clang/test/CIR/CodeGen/var-arg-float.c @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM #include diff --git a/clang/test/CIR/CodeGen/var-arg-scope.c b/clang/test/CIR/CodeGen/var-arg-scope.c index f5c3c65cd467..c586487af559 100644 --- a/clang/test/CIR/CodeGen/var-arg-scope.c +++ b/clang/test/CIR/CodeGen/var-arg-scope.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM void f1(__builtin_va_list c) { diff --git a/clang/test/CIR/CodeGen/var-arg.c b/clang/test/CIR/CodeGen/var-arg.c index bc3cd2f64e0c..a1a9e1cdb4ef 100644 --- a/clang/test/CIR/CodeGen/var-arg.c +++ b/clang/test/CIR/CodeGen/var-arg.c @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare -fno-clangir-call-conv-lowering %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM #include diff --git a/clang/test/CIR/CodeGen/visibility-attribute.c b/clang/test/CIR/CodeGen/visibility-attribute.c index 549f05d052b8..baf31b5788a4 100644 --- a/clang/test/CIR/CodeGen/visibility-attribute.c +++ b/clang/test/CIR/CodeGen/visibility-attribute.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - | FileCheck %s -check-prefix=LLVM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o - | FileCheck %s -check-prefix=LLVM extern int glob_default; // CIR: cir.global "private" external @glob_default : !s32i diff --git a/clang/test/CIR/Lowering/OpenMP/barrier.cir b/clang/test/CIR/Lowering/OpenMP/barrier.cir index 52fee8fff6c1..145117ab54a0 100644 --- a/clang/test/CIR/Lowering/OpenMP/barrier.cir +++ b/clang/test/CIR/Lowering/OpenMP/barrier.cir @@ -1,5 +1,5 @@ -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s module { diff --git a/clang/test/CIR/Lowering/OpenMP/parallel.cir b/clang/test/CIR/Lowering/OpenMP/parallel.cir index 81f6bbaa59cf..3422eac75ea0 100644 --- a/clang/test/CIR/Lowering/OpenMP/parallel.cir +++ b/clang/test/CIR/Lowering/OpenMP/parallel.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s !s32i = !cir.int module { diff --git a/clang/test/CIR/Lowering/OpenMP/taskwait.cir b/clang/test/CIR/Lowering/OpenMP/taskwait.cir index 336bbda4f1bf..83e8119bc479 100644 --- a/clang/test/CIR/Lowering/OpenMP/taskwait.cir +++ b/clang/test/CIR/Lowering/OpenMP/taskwait.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s module { diff --git a/clang/test/CIR/Lowering/OpenMP/taskyield.cir b/clang/test/CIR/Lowering/OpenMP/taskyield.cir index 5104e9c31be1..a701365b798f 100644 --- a/clang/test/CIR/Lowering/OpenMP/taskyield.cir +++ b/clang/test/CIR/Lowering/OpenMP/taskyield.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s module { diff --git a/clang/test/CIR/Lowering/address-space.cir b/clang/test/CIR/Lowering/address-space.cir index ee857bd32119..733c6ddda940 100644 --- a/clang/test/CIR/Lowering/address-space.cir +++ b/clang/test/CIR/Lowering/address-space.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate %s -cir-to-llvmir -o %t.ll +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/array.cir b/clang/test/CIR/Lowering/array.cir index 554a4a1fc18a..30a5aae7bfae 100644 --- a/clang/test/CIR/Lowering/array.cir +++ b/clang/test/CIR/Lowering/array.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o - | FileCheck %s -check-prefix=LLVM !s32i = !cir.int !ty_S = !cir.struct diff --git a/clang/test/CIR/Lowering/binop-fp.cir b/clang/test/CIR/Lowering/binop-fp.cir index dfda6e91cb51..a2800a847c85 100644 --- a/clang/test/CIR/Lowering/binop-fp.cir +++ b/clang/test/CIR/Lowering/binop-fp.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/binop-overflow.cir b/clang/test/CIR/Lowering/binop-overflow.cir index 5cdd9d82ae7b..196771150dbe 100644 --- a/clang/test/CIR/Lowering/binop-overflow.cir +++ b/clang/test/CIR/Lowering/binop-overflow.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o - | FileCheck %s -check-prefix=LLVM !u32i = !cir.int !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir index 9633a7f4d966..04de2e049ae0 100644 --- a/clang/test/CIR/Lowering/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !u32i = !cir.int module { diff --git a/clang/test/CIR/Lowering/bitint.cir b/clang/test/CIR/Lowering/bitint.cir index b1c9d031b7cc..61db545b0d07 100644 --- a/clang/test/CIR/Lowering/bitint.cir +++ b/clang/test/CIR/Lowering/bitint.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/bool-to-int.cir b/clang/test/CIR/Lowering/bool-to-int.cir index 1b4bb73f80f9..97ee3c1daee0 100644 --- a/clang/test/CIR/Lowering/bool-to-int.cir +++ b/clang/test/CIR/Lowering/bool-to-int.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s !s32i = !cir.int #false = #cir.bool : !cir.bool diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index 9b424355aa18..2d3fc2d8590b 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM #false = #cir.bool : !cir.bool #true = #cir.bool : !cir.bool diff --git a/clang/test/CIR/Lowering/branch.cir b/clang/test/CIR/Lowering/branch.cir index bbfb61e582a0..a99a217f18da 100644 --- a/clang/test/CIR/Lowering/branch.cir +++ b/clang/test/CIR/Lowering/branch.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !s32i = !cir.int cir.func @foo(%arg0: !cir.bool) -> !s32i { diff --git a/clang/test/CIR/Lowering/brcond.cir b/clang/test/CIR/Lowering/brcond.cir index 9586f70cf727..d2df89740358 100644 --- a/clang/test/CIR/Lowering/brcond.cir +++ b/clang/test/CIR/Lowering/brcond.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !s32i = !cir.int #fn_attr = #cir, nothrow = #cir.nothrow, optnone = #cir.optnone})> diff --git a/clang/test/CIR/Lowering/bswap.cir b/clang/test/CIR/Lowering/bswap.cir index 7733b4de1dae..0f8478ba8936 100644 --- a/clang/test/CIR/Lowering/bswap.cir +++ b/clang/test/CIR/Lowering/bswap.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !u32i = !cir.int diff --git a/clang/test/CIR/Lowering/call-op-call-conv.cir b/clang/test/CIR/Lowering/call-op-call-conv.cir index 837cc4b82ab9..21e9e01c14ae 100644 --- a/clang/test/CIR/Lowering/call-op-call-conv.cir +++ b/clang/test/CIR/Lowering/call-op-call-conv.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate -cir-to-llvmir %s -o %t.ll +// RUN: cir-translate -cir-to-llvmir --disable-cc-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/call.cir b/clang/test/CIR/Lowering/call.cir index eab7fb598830..ade54037b76b 100644 --- a/clang/test/CIR/Lowering/call.cir +++ b/clang/test/CIR/Lowering/call.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { diff --git a/clang/test/CIR/Lowering/cmp3way.cir b/clang/test/CIR/Lowering/cmp3way.cir index 6e00a9440f59..9c18dfce5769 100644 --- a/clang/test/CIR/Lowering/cmp3way.cir +++ b/clang/test/CIR/Lowering/cmp3way.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !s8i = !cir.int !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/complex.cir b/clang/test/CIR/Lowering/complex.cir index 91ded659997d..27180865e377 100644 --- a/clang/test/CIR/Lowering/complex.cir +++ b/clang/test/CIR/Lowering/complex.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate -cir-to-llvmir -o %t.ll %s +// RUN: cir-translate -cir-to-llvmir --disable-cc-lowering -o %t.ll %s // RUN: FileCheck --input-file %t.ll -check-prefix=LLVM %s !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/const-array.cir b/clang/test/CIR/Lowering/const-array.cir index 69917ddb3a36..41cfbad3daba 100644 --- a/clang/test/CIR/Lowering/const-array.cir +++ b/clang/test/CIR/Lowering/const-array.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate %s -cir-to-llvmir -o - | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o - | FileCheck %s -check-prefix=LLVM !u8i = !cir.int diff --git a/clang/test/CIR/Lowering/data-member.cir b/clang/test/CIR/Lowering/data-member.cir index 40846c53f920..1609ac43ff03 100644 --- a/clang/test/CIR/Lowering/data-member.cir +++ b/clang/test/CIR/Lowering/data-member.cir @@ -1,5 +1,5 @@ // RUN: cir-opt -cir-to-llvm -o - %s | FileCheck -check-prefix=MLIR %s -// RUN: cir-translate -cir-to-llvmir -o - %s | FileCheck -check-prefix=LLVM %s +// RUN: cir-translate -cir-to-llvmir --disable-cc-lowering -o - %s | FileCheck -check-prefix=LLVM %s !s32i = !cir.int !s64i = !cir.int diff --git a/clang/test/CIR/Lowering/exceptions.cir b/clang/test/CIR/Lowering/exceptions.cir index 5b0414e2ee78..1d99e9e2e620 100644 --- a/clang/test/CIR/Lowering/exceptions.cir +++ b/clang/test/CIR/Lowering/exceptions.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate %s -cir-to-llvmir -o %t.ll +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/expect.cir b/clang/test/CIR/Lowering/expect.cir index 64c9c10e6277..57f9cf2e35da 100644 --- a/clang/test/CIR/Lowering/expect.cir +++ b/clang/test/CIR/Lowering/expect.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !s64i = !cir.int module { diff --git a/clang/test/CIR/Lowering/func-call-conv.cir b/clang/test/CIR/Lowering/func-call-conv.cir index a32e67a7d1de..577eb854d47b 100644 --- a/clang/test/CIR/Lowering/func-call-conv.cir +++ b/clang/test/CIR/Lowering/func-call-conv.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate %s -cir-to-llvmir -o %t.ll +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index c3bd1cc3a726..482ee8490fca 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -1,6 +1,6 @@ // RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir -o %t.ll +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM !void = !cir.void diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir index cd42497983e4..cb2960b69a32 100644 --- a/clang/test/CIR/Lowering/if.cir +++ b/clang/test/CIR/Lowering/if.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { diff --git a/clang/test/CIR/Lowering/int-wrap.cir b/clang/test/CIR/Lowering/int-wrap.cir index b6b8bd385b89..f74f64feb2e8 100644 --- a/clang/test/CIR/Lowering/int-wrap.cir +++ b/clang/test/CIR/Lowering/int-wrap.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { diff --git a/clang/test/CIR/Lowering/intrinsics.cir b/clang/test/CIR/Lowering/intrinsics.cir index 25b0b34738bc..778aeb9f9182 100644 --- a/clang/test/CIR/Lowering/intrinsics.cir +++ b/clang/test/CIR/Lowering/intrinsics.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM module { cir.func @test_unreachable() { diff --git a/clang/test/CIR/Lowering/ptrdiff.cir b/clang/test/CIR/Lowering/ptrdiff.cir index ff1248ddad66..c0b1a4b3e314 100644 --- a/clang/test/CIR/Lowering/ptrdiff.cir +++ b/clang/test/CIR/Lowering/ptrdiff.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s !s32i = !cir.int !u64i = !cir.int diff --git a/clang/test/CIR/Lowering/region-simplify.cir b/clang/test/CIR/Lowering/region-simplify.cir index 5f32205cb032..a76d73d03d8e 100644 --- a/clang/test/CIR/Lowering/region-simplify.cir +++ b/clang/test/CIR/Lowering/region-simplify.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -canonicalize -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-opt %s -canonicalize -o - | cir-translate -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-opt %s -canonicalize -o - | cir-translate -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !u32i = !cir.int diff --git a/clang/test/CIR/Lowering/scope.cir b/clang/test/CIR/Lowering/scope.cir index 48f8bfdcc5a3..850b1ec5e051 100644 --- a/clang/test/CIR/Lowering/scope.cir +++ b/clang/test/CIR/Lowering/scope.cir @@ -1,6 +1,6 @@ // RUN: cir-opt %s -cir-to-llvm -o %t.cir // RUN: FileCheck %s --input-file=%t.cir -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !u32i = !cir.int module { diff --git a/clang/test/CIR/Lowering/select.cir b/clang/test/CIR/Lowering/select.cir index 1836210d6a7c..1ac56496e138 100644 --- a/clang/test/CIR/Lowering/select.cir +++ b/clang/test/CIR/Lowering/select.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate -cir-to-llvmir -o %t.ll %s +// RUN: cir-translate -cir-to-llvmir --disable-cc-lowering -o %t.ll %s // RUN: FileCheck --input-file=%t.ll -check-prefix=LLVM %s !s32i = !cir.int diff --git a/clang/test/CIR/Lowering/try-catch.cpp b/clang/test/CIR/Lowering/try-catch.cpp index b985ecab8cca..068d8c10b3b3 100644 --- a/clang/test/CIR/Lowering/try-catch.cpp +++ b/clang/test/CIR/Lowering/try-catch.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat %s -o %t.flat.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -fno-clangir-call-conv-lowering -emit-cir-flat %s -o %t.flat.cir // RUN: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_FLAT %s // RUN_DISABLED: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-llvm %s -o %t.ll // RUN_DISABLED: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_LLVM %s diff --git a/clang/test/CIR/Lowering/unary-inc-dec.cir b/clang/test/CIR/Lowering/unary-inc-dec.cir index 9ba26b36f61c..4dac6ac55318 100644 --- a/clang/test/CIR/Lowering/unary-inc-dec.cir +++ b/clang/test/CIR/Lowering/unary-inc-dec.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @foo() { diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index 48e2705e756d..4d686f3875af 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -1,5 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR -// RUN: cir-translate %s -cir-to-llvmir | FileCheck %s -check-prefix=LLVM +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering | FileCheck %s -check-prefix=LLVM !s32i = !cir.int module { cir.func @foo() -> !s32i { diff --git a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp index 209679ebf383..f3a926aa93a6 100644 --- a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple aarch64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple aarch64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s // CHECK: @_Z4Voidv() diff --git a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp index 3789550ce33b..a3c2d6960c39 100644 --- a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp +++ b/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s // Test call conv lowering for trivial cases. // diff --git a/clang/test/CodeGen/compound-literal.c b/clang/test/CodeGen/compound-literal.c index 5fe9594c0f95..1dc3227b56ee 100644 --- a/clang/test/CodeGen/compound-literal.c +++ b/clang/test/CodeGen/compound-literal.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm %s -o - | FileCheck %s -// RUN: %clang_cc1 -triple x86_64-apple-darwin -fexperimental-new-constant-interpreter -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm -fno-clangir-call-conv-lowering %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin -fexperimental-new-constant-interpreter -emit-llvm -fno-clangir-call-conv-lowering %s -o - | FileCheck %s // Capture the type and name so matching later is cleaner. struct CompoundTy { int a; }; diff --git a/clang/tools/cir-opt/cir-opt.cpp b/clang/tools/cir-opt/cir-opt.cpp index e7af0b214462..a51b3a602baa 100644 --- a/clang/tools/cir-opt/cir-opt.cpp +++ b/clang/tools/cir-opt/cir-opt.cpp @@ -21,12 +21,23 @@ #include "mlir/Dialect/OpenMP/OpenMPDialect.h" #include "mlir/InitAllPasses.h" #include "mlir/Pass/PassManager.h" +#include "mlir/Pass/PassOptions.h" #include "mlir/Pass/PassRegistry.h" #include "mlir/Tools/mlir-opt/MlirOptMain.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/Passes.h" +struct CIRToLLVMPipelineOptions + : public mlir::PassPipelineOptions { + // When lowering to LLVM, we should apply the CC lowering pass by default. The + // option below allows us to disable it for testing purposes. + Option disableCCLowering{ + *this, "disable-cc-lowering", + llvm::cl::desc("Skips calling convetion lowering pass."), + llvm::cl::init(false)}; +}; + int main(int argc, char **argv) { // TODO: register needed MLIR passes for CIR? mlir::DialectRegistry registry; @@ -52,9 +63,10 @@ int main(int argc, char **argv) { return cir::createConvertCIRToMLIRPass(); }); - mlir::PassPipelineRegistration pipeline( - "cir-to-llvm", "", [](mlir::OpPassManager &pm) { - cir::direct::populateCIRToLLVMPasses(pm); + mlir::PassPipelineRegistration pipeline( + "cir-to-llvm", "", + [](mlir::OpPassManager &pm, const CIRToLLVMPipelineOptions &options) { + cir::direct::populateCIRToLLVMPasses(pm, options.disableCCLowering); }); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { diff --git a/clang/tools/cir-translate/cir-translate.cpp b/clang/tools/cir-translate/cir-translate.cpp index 9ff379a26588..b465a7dfb1fc 100644 --- a/clang/tools/cir-translate/cir-translate.cpp +++ b/clang/tools/cir-translate/cir-translate.cpp @@ -1,4 +1,5 @@ -//===- cir-translate.cpp - CIR Translate Driver ------------------*- C++ -*-===// +//===- cir-translate.cpp - CIR Translate Driver ------------------*- C++ +//-*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -24,20 +25,25 @@ namespace cir { namespace direct { extern void registerCIRDialectTranslation(mlir::DialectRegistry ®istry); -extern std::unique_ptr -lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, - llvm::LLVMContext &llvmCtx, - bool disableVerifier = false); +extern std::unique_ptr lowerDirectlyFromCIRToLLVMIR( + mlir::ModuleOp theModule, llvm::LLVMContext &llvmCtx, + bool disableVerifier = false, bool disableCCLowering = false); } // namespace direct -} +} // namespace cir void registerToLLVMTranslation() { + static llvm::cl::opt disableCCLowering( + "disable-cc-lowering", + llvm::cl::desc("Disable calling convention lowering pass"), + llvm::cl::init(false)); + mlir::TranslateFromMLIRRegistration registration( "cir-to-llvmir", "Translate CIR to LLVMIR", [](mlir::Operation *op, mlir::raw_ostream &output) { llvm::LLVMContext llvmContext; auto llvmModule = cir::direct::lowerDirectlyFromCIRToLLVMIR( - llvm::dyn_cast(op), llvmContext); + llvm::dyn_cast(op), llvmContext, + /*disableVerifier=*/false, disableCCLowering); if (!llvmModule) return mlir::failure(); llvmModule->print(output, nullptr); @@ -52,6 +58,5 @@ void registerToLLVMTranslation() { int main(int argc, char **argv) { registerToLLVMTranslation(); - return failed( - mlir::mlirTranslateMain(argc, argv, "CIR Translation Tool")); + return failed(mlir::mlirTranslateMain(argc, argv, "CIR Translation Tool")); } From a1a4e86b211a4263327820b9c19f3fb79e8863dd Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 11 Oct 2024 03:43:19 +0300 Subject: [PATCH 1931/2301] [CIR][CodeGen] kr-style for function arguments (#938) I tried to run llvm-test-suite and turned out that there are many tests fail with segfault due to old C style (let's remember Kernighan and Ritchie) . This PR fix it by the usual copy-pasta from the original codegen :) So let's take a look at the code: ``` void foo(x) short x; {} int main() { foo(4); return 0; } ``` and CIR for `foo` function is: ``` cir.func @foo(%arg0: !s32i) { %0 = cir.alloca !s16i, !cir.ptr, ["x", init] %1 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr cir.store %arg0, %1 : !s32i, !cir.ptr cir.return } ``` We bitcast the **address** (!!!) and store a value of a bigger size there. And now everything looks fine: ``` cir.func no_proto @foo(%arg0: !s32i) { %0 = cir.alloca !s16i, !cir.ptr, ["x", init] %1 = cir.cast(integral, %arg0 : !s32i), !s16i cir.store %1, %0 : !s16i, !cir.ptr cir.return } ``` We truncate an argument and store it. P.S. The `bitcast` that was there before looks a little bit suspicious and dangerous. Are we sure we can do this unconditional cast while we create `StoreOp` ? --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 32 +++++++++++++++++++++++- clang/test/CIR/CodeGen/kr-func-promote.c | 13 ++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/kr-func-promote.c diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index d60b2d177919..d7a7ac0a1f0f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -970,6 +970,29 @@ static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { return true; } +/// TODO: this should live in `buildFunctionProlog` +/// An argument came in as a promoted argument; demote it back to its +/// declared type. +static mlir::Value emitArgumentDemotion(CIRGenFunction &CGF, const VarDecl *var, + mlir::Value value) { + mlir::Type ty = CGF.ConvertType(var->getType()); + + // This can happen with promotions that actually don't change the + // underlying type, like the enum promotions. + if (value.getType() == ty) + return value; + + assert( + (isa(ty) || mlir::cir::isAnyFloatingPointType(ty)) && + "unexpected promotion type"); + + if (isa(ty)) + return CGF.getBuilder().CIRBaseBuilderTy::createIntCast(value, ty); + + return CGF.getBuilder().CIRBaseBuilderTy::createCast( + mlir::cir::CastKind::floating, value, ty); +} + void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, mlir::cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, @@ -1239,7 +1262,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // Declare all the function arguments in the symbol table. for (const auto nameValue : llvm::zip(Args, EntryBB->getArguments())) { auto *paramVar = std::get<0>(nameValue); - auto paramVal = std::get<1>(nameValue); + mlir::Value paramVal = std::get<1>(nameValue); auto alignment = getContext().getDeclAlign(paramVar); auto paramLoc = getLoc(paramVar->getSourceRange()); paramVal.setLoc(paramLoc); @@ -1252,6 +1275,13 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, auto address = Address(addr, alignment); setAddrOfLocalVar(paramVar, address); + // TODO: this should live in `buildFunctionProlog` + bool isPromoted = isa(paramVar) && + cast(paramVar)->isKNRPromoted(); + assert(!MissingFeatures::constructABIArgDirectExtend()); + if (isPromoted) + paramVal = emitArgumentDemotion(*this, paramVar, paramVal); + // Location of the store to the param storage tracked as beginning of // the function body. auto fnBodyBegin = getLoc(FD->getBody()->getBeginLoc()); diff --git a/clang/test/CIR/CodeGen/kr-func-promote.c b/clang/test/CIR/CodeGen/kr-func-promote.c new file mode 100644 index 000000000000..1edfcd805c6f --- /dev/null +++ b/clang/test/CIR/CodeGen/kr-func-promote.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s + +// CHECK: cir.func {{.*}}@foo(%arg0: !s32i +// CHECK: %0 = cir.alloca !s16i, !cir.ptr, ["x", init] +// CHECK: %1 = cir.cast(integral, %arg0 : !s32i) +// CHECK: cir.store %1, %0 : !s16i, !cir.ptr +void foo(x) short x; {} + +// CHECK: cir.func no_proto @bar(%arg0: !cir.double +// CHECK: %0 = cir.alloca !cir.float, !cir.ptr, ["f", init] +// CHECK: %1 = cir.cast(floating, %arg0 : !cir.double), !cir.float +// CHECK: cir.store %1, %0 : !cir.float, !cir.ptr +void bar(f) float f; {} From bffff3d495214cea1c74c571ec3723c16d306827 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 11 Oct 2024 08:45:12 +0800 Subject: [PATCH 1932/2301] [CIR][CIRGen] Add const attribute to alloca operations (#892) This PR tries to give a simple initial implementation for eliminating redundant loads of constant objects, an idea originally posted by OfekShilon. Specifically, this PR adds a new unit attribute `const` to the `cir.alloca` operation. Presence of this attribute indicates that the alloca-ed object is declared `const` in the input source program. CIRGen is updated accordingly to start emitting this new attribute. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 ++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 14 ++--- clang/test/CIR/CodeGen/assign-operator.cpp | 4 +- clang/test/CIR/CodeGen/basic.cpp | 4 +- clang/test/CIR/CodeGen/bitfields.cpp | 2 +- clang/test/CIR/CodeGen/cond.cpp | 4 +- clang/test/CIR/CodeGen/const-alloca.cpp | 52 +++++++++++++++++++ clang/test/CIR/CodeGen/coro-task.cpp | 4 +- .../CodeGen/ctor-member-lvalue-to-rvalue.cpp | 2 +- clang/test/CIR/CodeGen/derived-to-base.cpp | 2 +- clang/test/CIR/CodeGen/lvalue-refs.cpp | 2 +- clang/test/CIR/CodeGen/new.cpp | 4 +- clang/test/CIR/CodeGen/rangefor.cpp | 4 +- clang/test/CIR/CodeGen/return.cpp | 2 +- .../CIR/CodeGen/temporary-materialization.cpp | 4 +- 15 files changed, 84 insertions(+), 25 deletions(-) create mode 100644 clang/test/CIR/CodeGen/const-alloca.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index aabe655c35b0..c959c7176473 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -469,6 +469,9 @@ def AllocaOp : CIR_Op<"alloca", [ cases, the first use contains the initialization (a cir.store, a cir.call to a ctor, etc). + The presence of the `const` attribute indicates that the local variable is + declared with C/C++ `const` keyword. + The `dynAllocSize` specifies the size to dynamically allocate on the stack and ignores the allocation size based on the original type. This is useful when handling VLAs and is omitted when declaring regular local variables. @@ -492,6 +495,7 @@ def AllocaOp : CIR_Op<"alloca", [ TypeAttr:$allocaType, StrAttr:$name, UnitAttr:$init, + UnitAttr:$constant, ConfinedAttr, [IntMinValue<0>]>:$alignment, OptionalAttr:$annotations, OptionalAttr:$ast @@ -530,6 +534,7 @@ def AllocaOp : CIR_Op<"alloca", [ ($dynAllocSize^ `:` type($dynAllocSize) `,`)? `[` $name (`,` `init` $init^)? + (`,` `const` $constant^)? `]` ($annotations^)? (`ast` $ast^)? attr-dict diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index d7a7ac0a1f0f..e21ca7e87620 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -305,10 +305,11 @@ mlir::LogicalResult CIRGenFunction::declare(const Decl *var, QualType ty, assert(!symbolTable.count(var) && "not supposed to be available just yet"); addr = buildAlloca(namedVar->getName(), ty, loc, alignment); - if (isParam) { - auto allocaOp = cast(addr.getDefiningOp()); + auto allocaOp = cast(addr.getDefiningOp()); + if (isParam) allocaOp.setInitAttr(mlir::UnitAttr::get(builder.getContext())); - } + if (ty->isReferenceType() || ty.isConstQualified()) + allocaOp.setConstantAttr(mlir::UnitAttr::get(builder.getContext())); symbolTable.insert(var, addr); return mlir::success(); @@ -324,10 +325,11 @@ mlir::LogicalResult CIRGenFunction::declare(Address addr, const Decl *var, assert(!symbolTable.count(var) && "not supposed to be available just yet"); addrVal = addr.getPointer(); - if (isParam) { - auto allocaOp = cast(addrVal.getDefiningOp()); + auto allocaOp = cast(addrVal.getDefiningOp()); + if (isParam) allocaOp.setInitAttr(mlir::UnitAttr::get(builder.getContext())); - } + if (ty->isReferenceType() || ty.isConstQualified()) + allocaOp.setConstantAttr(mlir::UnitAttr::get(builder.getContext())); symbolTable.insert(var, addrVal); return mlir::success(); diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 5942beb296dc..63fc25c5817f 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -16,7 +16,7 @@ struct String { // // CHECK: cir.func linkonce_odr @_ZN10StringViewC2ERK6String // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init, const] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr // CHECK: cir.store %arg1, %1 : !cir.ptr // CHECK: %2 = cir.load %0 : !cir.ptr> @@ -47,7 +47,7 @@ struct String { // // CHECK: cir.func linkonce_odr @_ZN10StringViewaSEOS_ // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} - // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["", init] {alignment = 8 : i64} + // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["", init, const] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr // CHECK: cir.store %arg1, %1 : !cir.ptr diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index 8817f97dca10..7a05532ad36c 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -155,8 +155,8 @@ void x() { } // CHECK: cir.func @_Z1xv() -// CHECK: %0 = cir.alloca !cir.bool, !cir.ptr, ["b0", init] {alignment = 1 : i64} -// CHECK: %1 = cir.alloca !cir.bool, !cir.ptr, ["b1", init] {alignment = 1 : i64} +// CHECK: %0 = cir.alloca !cir.bool, !cir.ptr, ["b0", init, const] {alignment = 1 : i64} +// CHECK: %1 = cir.alloca !cir.bool, !cir.ptr, ["b1", init, const] {alignment = 1 : i64} // CHECK: %2 = cir.const #true // CHECK: cir.store %2, %0 : !cir.bool, !cir.ptr // CHECK: %3 = cir.const #false diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index bdef100edf06..d54c7fbaa6a9 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -43,7 +43,7 @@ void store_field() { } // CHECK: cir.func @_Z10load_field -// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init, const] // CHECK: [[TMP1:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr // CHECK: [[TMP2:%.*]] = cir.get_member [[TMP1]][1] {name = "d"} : !cir.ptr -> !cir.ptr> // CHECK: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr>) -> !s32i diff --git a/clang/test/CIR/CodeGen/cond.cpp b/clang/test/CIR/CodeGen/cond.cpp index e00ee528a72d..aea901a8ee9d 100644 --- a/clang/test/CIR/CodeGen/cond.cpp +++ b/clang/test/CIR/CodeGen/cond.cpp @@ -11,8 +11,8 @@ min(const unsigned long& __a, const unsigned long& __b) { } // CHECK: cir.func @_Z3minRKmS0_(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["__a", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["__b", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["__a", init, const] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["__b", init, const] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/const-alloca.cpp b/clang/test/CIR/CodeGen/const-alloca.cpp new file mode 100644 index 000000000000..c15e77d306ed --- /dev/null +++ b/clang/test/CIR/CodeGen/const-alloca.cpp @@ -0,0 +1,52 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int produce_int(); +void blackbox(const int &); + +void local_const_int() { + const int x = produce_int(); +} + +// CHECK-LABEL: @_Z15local_const_intv +// CHECK: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init, const] +// CHECK: } + +void param_const_int(const int x) {} + +// CHECK-LABEL: @_Z15param_const_inti +// CHECK: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init, const] +// CHECK: } + +void local_constexpr_int() { + constexpr int x = 42; + blackbox(x); +} + +// CHECK-LABEL: @_Z19local_constexpr_intv +// CHECK: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init, const] +// CHECK: } + +void local_reference() { + int x = 0; + int &r = x; +} + +// CHECK-LABEL: @_Z15local_referencev +// CHECK: %{{.+}} = cir.alloca !cir.ptr, !cir.ptr>, ["r", init, const] +// CHECK: } + +struct Foo { + int a; + int b; +}; + +Foo produce_foo(); + +void local_const_struct() { + const Foo x = produce_foo(); +} + +// CHECK-LABEL: @_Z18local_const_structv +// CHECK: %{{.+}} = cir.alloca !ty_Foo, !cir.ptr, ["x", init, const] +// CHECK: } diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index bcc2f5fcc38b..acd2818e7a4b 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -275,8 +275,8 @@ folly::coro::Task byRef(const std::string& s) { // FIXME: this could be less redundant than two allocas + reloads // CHECK: cir.func coroutine @_Z5byRefRKSt6string(%arg0: !cir.ptr {{.*}} ![[IntTask]] extra{{.*}}{ -// CHECK: %[[#AllocaParam:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] -// CHECK: %[[#AllocaFnUse:]] = cir.alloca !cir.ptr, {{.*}} ["s", init] +// CHECK: %[[#AllocaParam:]] = cir.alloca !cir.ptr, {{.*}} ["s", init, const] +// CHECK: %[[#AllocaFnUse:]] = cir.alloca !cir.ptr, {{.*}} ["s", init, const] folly::coro::Task silly_coro() { std::optional> task; diff --git a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp index d8e42f46429f..251ff35bfcff 100644 --- a/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp +++ b/clang/test/CIR/CodeGen/ctor-member-lvalue-to-rvalue.cpp @@ -7,7 +7,7 @@ struct String { String(const String &s) : size{s.size} {} // CHECK: cir.func linkonce_odr @_ZN6StringC2ERKS_ // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init, const] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 // CHECK: cir.store %arg1, %1 // CHECK: %2 = cir.load %0 diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 2fcdbd21583d..e3a860d99ac3 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -109,7 +109,7 @@ void vcall(C1 &c1) { } // CHECK: cir.func @_Z5vcallR2C1(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["c1", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["c1", init, const] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !ty_buffy, !cir.ptr, ["b"] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !s32i, !cir.ptr, ["e"] {alignment = 4 : i64} // CHECK: %3 = cir.alloca !ty_buffy, !cir.ptr, ["agg.tmp0"] {alignment = 8 : i64} diff --git a/clang/test/CIR/CodeGen/lvalue-refs.cpp b/clang/test/CIR/CodeGen/lvalue-refs.cpp index f1e6dd2fed2a..951c6b66fc72 100644 --- a/clang/test/CIR/CodeGen/lvalue-refs.cpp +++ b/clang/test/CIR/CodeGen/lvalue-refs.cpp @@ -7,7 +7,7 @@ struct String { void split(String &S) {} // CHECK: cir.func @_Z5splitR6String(%arg0: !cir.ptr -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["S", init] +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["S", init, const] void foo() { String s; diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index acf7df22d5a9..bcce7d566793 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -12,8 +12,8 @@ void m(int a, int b) { } // CHECK: cir.func linkonce_odr @_ZSt11make_sharedI1SJRiS1_EESt10shared_ptrIT_EDpOT0_( -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["args", init] {alignment = 8 : i64} -// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["args", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["args", init, const] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["args", init, const] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !ty_std3A3Ashared_ptr3CS3E, !cir.ptr, ["__retval"] {alignment = 1 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK: cir.store %arg1, %1 : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index f4e78a725e3e..8c63b688cdd0 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -33,10 +33,10 @@ void init(unsigned numImages) { // CHECK: %3 = cir.cast(integral, %2 : !u32i), !u64i // CHECK: cir.call @_ZNSt6vectorI6tripleEC1Em(%1, %3) : (!cir.ptr, !u64i) -> () // CHECK: cir.scope { -// CHECK: %4 = cir.alloca !cir.ptr, !cir.ptr>, ["__range1", init] {alignment = 8 : i64} +// CHECK: %4 = cir.alloca !cir.ptr, !cir.ptr>, ["__range1", init, const] {alignment = 8 : i64} // CHECK: %5 = cir.alloca ![[VEC_IT]], !cir.ptr, ["__begin1", init] {alignment = 8 : i64} // CHECK: %6 = cir.alloca ![[VEC_IT]], !cir.ptr, ["__end1", init] {alignment = 8 : i64} -// CHECK: %7 = cir.alloca !cir.ptr, !cir.ptr>, ["image", init] {alignment = 8 : i64} +// CHECK: %7 = cir.alloca !cir.ptr, !cir.ptr>, ["image", init, const] {alignment = 8 : i64} // CHECK: cir.store %1, %4 : !cir.ptr, !cir.ptr> // CHECK: %8 = cir.load %4 : !cir.ptr>, !cir.ptr // CHECK: %9 = cir.call @_ZNSt6vectorI6tripleE5beginEv(%8) : (!cir.ptr) -> ![[VEC_IT]] diff --git a/clang/test/CIR/CodeGen/return.cpp b/clang/test/CIR/CodeGen/return.cpp index 8391e647d46b..7b8bbdb86670 100644 --- a/clang/test/CIR/CodeGen/return.cpp +++ b/clang/test/CIR/CodeGen/return.cpp @@ -5,7 +5,7 @@ int &ret0(int &x) { } // CHECK: cir.func @_Z4ret0Ri -// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["x", init] {alignment = 8 : i64} +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["x", init, const] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK: %2 = cir.load %0 : !cir.ptr>, !cir.ptr diff --git a/clang/test/CIR/CodeGen/temporary-materialization.cpp b/clang/test/CIR/CodeGen/temporary-materialization.cpp index 3b063db09dc3..e72c16a25abc 100644 --- a/clang/test/CIR/CodeGen/temporary-materialization.cpp +++ b/clang/test/CIR/CodeGen/temporary-materialization.cpp @@ -11,7 +11,7 @@ int test() { // CHECK: cir.func @_Z4testv() // CHECK-NEXT: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: %[[#TEMP_SLOT:]] = cir.alloca !s32i, !cir.ptr, ["ref.tmp0", init] {alignment = 4 : i64} -// CHECK-NEXT: %[[#x:]] = cir.alloca !cir.ptr, !cir.ptr>, ["x", init] {alignment = 8 : i64} +// CHECK-NEXT: %[[#x:]] = cir.alloca !cir.ptr, !cir.ptr>, ["x", init, const] {alignment = 8 : i64} // CHECK-NEXT: cir.scope { // CHECK-NEXT: %[[#TEMP_VALUE:]] = cir.call @_Z8make_intv() : () -> !s32i // CHECK-NEXT: cir.store %[[#TEMP_VALUE]], %[[#TEMP_SLOT]] : !s32i, !cir.ptr @@ -33,7 +33,7 @@ int test_scoped() { // CHECK-NEXT: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} // CHECK: cir.scope { // CHECK-NEXT: %[[#TEMP_SLOT:]] = cir.alloca !s32i, !cir.ptr, ["ref.tmp0", init] {alignment = 4 : i64} -// CHECK-NEXT: %[[#y:]] = cir.alloca !cir.ptr, !cir.ptr>, ["y", init] {alignment = 8 : i64} +// CHECK-NEXT: %[[#y:]] = cir.alloca !cir.ptr, !cir.ptr>, ["y", init, const] {alignment = 8 : i64} // CHECK-NEXT: cir.scope { // CHECK-NEXT: %[[#TEMP_VALUE:]] = cir.call @_Z8make_intv() : () -> !s32i // CHECK-NEXT: cir.store %[[#TEMP_VALUE]], %[[#TEMP_SLOT]] : !s32i, !cir.ptr From 7234849e70805fe33afa4fb7de9996e4778665b3 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Thu, 10 Oct 2024 20:58:03 -0400 Subject: [PATCH 1933/2301] [CIR][Lowering] VecCreateOp and VecSplatOp lowering choose LLVM:PoisonOp (#959) They should use PoisonOp (which becomes PoisonValue in LLVMIR) as it is the OG's choice. Proof: We generate VecCreateOp [here ](https://github.com/llvm/clangir/blob/2ca12fe5ec3a1e7279256f069010be2d68200585/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp#L1975) And it's OG counterpart is [here](https://github.com/llvm/clangir/blob/2ca12fe5ec3a1e7279256f069010be2d68200585/clang/lib/CodeGen/CGExprScalar.cpp#L2096) OG uses PoisonValue. As to VecSplatOp, OG unconditionally [chooses PoisonValue ](https://github.com/llvm/clangir/blob/2ca12fe5ec3a1e7279256f069010be2d68200585/llvm/lib/IR/IRBuilder.cpp#L1204) A even more solid proof for this case is that when we use OG to generate code for our test case I changed in this PR , its always using poison instead of undef as far as VecSplat and VecCreate is concerned. The [OG generated code for vectype-ext.cpp ](https://godbolt.org/z/eqx1rns86) here. The [OG generated code for vectype.cpp ](https://godbolt.org/z/frMjbKGeT) here. For reference, generated CIR for the test case vectype-ext.cpp is [here](https://godbolt.org/z/frMjbKGeT) This is to unblock https://github.com/llvm/clangir/pull/936/ to help it set on the right path. Note: There might be other CIR vec ops that need to choose Poison to be consistent with OG, but I'd limit the scope of this PR, and wait to see issue pop up in the future. --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 ++-- clang/test/CIR/CodeGen/vectype-ext.cpp | 8 ++++---- clang/test/CIR/Lowering/vectype.cpp | 11 ++++++----- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 01b9d2535c9d..e8449c6a3f8d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1611,7 +1611,7 @@ class CIRVectorCreateLowering assert(vecTy && "result type of cir.vec.create op is not VectorType"); auto llvmTy = typeConverter->convertType(vecTy); auto loc = op.getLoc(); - mlir::Value result = rewriter.create(loc, llvmTy); + mlir::Value result = rewriter.create(loc, llvmTy); assert(vecTy.getSize() == op.getElements().size() && "cir.vec.create op count doesn't match vector type elements count"); for (uint64_t i = 0; i < vecTy.getSize(); ++i) { @@ -1676,7 +1676,7 @@ class CIRVectorSplatLowering assert(vecTy && "result type of cir.vec.splat op is not VectorType"); auto llvmTy = typeConverter->convertType(vecTy); auto loc = op.getLoc(); - mlir::Value undef = rewriter.create(loc, llvmTy); + mlir::Value undef = rewriter.create(loc, llvmTy); mlir::Value indexValue = rewriter.create(loc, rewriter.getI64Type(), 0); mlir::Value elementValue = adaptor.getValue(); diff --git a/clang/test/CIR/CodeGen/vectype-ext.cpp b/clang/test/CIR/CodeGen/vectype-ext.cpp index 49f3a2ade69f..b5e64499d7b4 100644 --- a/clang/test/CIR/CodeGen/vectype-ext.cpp +++ b/clang/test/CIR/CodeGen/vectype-ext.cpp @@ -26,7 +26,7 @@ void vector_int_test(int x) { // LLVM: %[[#X1:]] = load i32, ptr %{{[0-9]+}}, align 4 // LLVM-NEXT: %[[#X2:]] = load i32, ptr %{{[0-9]+}}, align 4 // LLVM-NEXT: %[[#SUM:]] = add nsw i32 %[[#X2]], 1 - // LLVM-NEXT: %[[#VEC1:]] = insertelement <4 x i32> undef, i32 %[[#X1]], i64 0 + // LLVM-NEXT: %[[#VEC1:]] = insertelement <4 x i32> poison, i32 %[[#X1]], i64 0 // LLVM-NEXT: %[[#VEC2:]] = insertelement <4 x i32> %[[#VEC1]], i32 5, i64 1 // LLVM-NEXT: %[[#VEC3:]] = insertelement <4 x i32> %[[#VEC2]], i32 6, i64 2 // LLVM-NEXT: %[[#VEC4:]] = insertelement <4 x i32> %[[#VEC3]], i32 %[[#SUM]], i64 3 @@ -39,7 +39,7 @@ void vector_int_test(int x) { // LLVM: %[[#X1:]] = load i32, ptr %{{[0-9]+}}, align 4 // LLVM-NEXT: %[[#X2:]] = load i32, ptr %{{[0-9]+}}, align 4 // LLVM-NEXT: %[[#SUM:]] = add nsw i32 %[[#X2]], 1 - // LLVM-NEXT: %[[#VEC1:]] = insertelement <4 x i32> undef, i32 %[[#X1]], i64 0 + // LLVM-NEXT: %[[#VEC1:]] = insertelement <4 x i32> poison, i32 %[[#X1]], i64 0 // LLVM-NEXT: %[[#VEC2:]] = insertelement <4 x i32> %[[#VEC1]], i32 %[[#SUM]], i64 1 // LLVM-NEXT: %[[#VEC3:]] = insertelement <4 x i32> %[[#VEC2]], i32 0, i64 2 // LLVM-NEXT: %[[#VEC4:]] = insertelement <4 x i32> %[[#VEC3]], i32 0, i64 3 @@ -212,7 +212,7 @@ void vector_double_test(int x, double y) { // LLVM: %[[#Y1:]] = load double, ptr %{{[0-9]+}}, align 8 // LLVM-NEXT: %[[#Y2:]] = load double, ptr %{{[0-9]+}}, align 8 // LLVM-NEXT: %[[#SUM:]] = fadd double %[[#Y2]], 1.000000e+00 - // LLVM-NEXT: %[[#VEC1:]] = insertelement <2 x double> undef, double %[[#Y1]], i64 0 + // LLVM-NEXT: %[[#VEC1:]] = insertelement <2 x double> poison, double %[[#Y1]], i64 0 // LLVM-NEXT: %[[#VEC2:]] = insertelement <2 x double> %[[#VEC1]], double %[[#SUM]], i64 1 // LLVM-NEXT: store <2 x double> %[[#VEC2]], ptr %{{[0-9]+}}, align 16 @@ -222,7 +222,7 @@ void vector_double_test(int x, double y) { // CIR: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %[[#dzero]] : !cir.double, !cir.double) : !cir.vector // LLVM: %[[#Y1:]] = load double, ptr %{{[0-9]+}}, align 8 - // LLVM-NEXT: %[[#VEC1:]] = insertelement <2 x double> undef, double %[[#Y1]], i64 0 + // LLVM-NEXT: %[[#VEC1:]] = insertelement <2 x double> poison, double %[[#Y1]], i64 0 // LLVM-NEXT: %[[#VEC2:]] = insertelement <2 x double> %[[#VEC1]], double 0.000000e+00, i64 1 // LLVM-NEXT: store <2 x double> %[[#VEC2]], ptr %{{[0-9]+}}, align 16 diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index 5c436798209d..41b214634a20 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: cir-opt %t.cir -cir-to-llvm -o %t.mlir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ii // RUN: FileCheck --input-file=%t.mlir %s // XFAIL: * @@ -23,7 +24,7 @@ void vector_int_test(int x) { // CHECK: %[[#T46:]] = llvm.load %[[#T1]] {alignment = 4 : i64} : !llvm.ptr -> i32 // CHECK: %[[#T47:]] = llvm.mlir.constant(1 : i32) : i32 // CHECK: %[[#T48:]] = llvm.add %[[#T46]], %[[#T47]] overflow : i32 - // CHECK: %[[#T49:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#T49:]] = llvm.mlir.poison : vector<4xi32> // CHECK: %[[#T50:]] = llvm.mlir.constant(0 : i64) : i64 // CHECK: %[[#T51:]] = llvm.insertelement %[[#T43]], %[[#T49]][%[[#T50]] : i64] : vector<4xi32> // CHECK: %[[#T52:]] = llvm.mlir.constant(1 : i64) : i64 @@ -42,10 +43,10 @@ void vector_int_test(int x) { // Scalar to vector conversion, a.k.a. vector splat. b = a + 7; - // CHECK: %[[#undef:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#poison:]] = llvm.mlir.poison : vector<4xi32> // CHECK: %[[#zeroInt:]] = llvm.mlir.constant(0 : i64) : i64 - // CHECK: %[[#inserted:]] = llvm.insertelement %[[#seven:]], %[[#undef]][%[[#zeroInt]] : i64] : vector<4xi32> - // CHECK: %[[#shuffled:]] = llvm.shufflevector %[[#inserted]], %[[#undef]] [0, 0, 0, 0] : vector<4xi32> + // CHECK: %[[#inserted:]] = llvm.insertelement %[[#seven:]], %[[#poison]][%[[#zeroInt]] : i64] : vector<4xi32> + // CHECK: %[[#shuffled:]] = llvm.shufflevector %[[#inserted]], %[[#poison]] [0, 0, 0, 0] : vector<4xi32> // Extract element. int c = a[x]; @@ -234,7 +235,7 @@ void vector_double_test(int x, double y) { // CHECK: %[[#T30:]] = llvm.load %[[#T3]] {alignment = 8 : i64} : !llvm.ptr -> f64 // CHECK: %[[#T31:]] = llvm.mlir.constant(1.000000e+00 : f64) : f64 // CHECK: %[[#T32:]] = llvm.fadd %[[#T30]], %[[#T31]] : f64 - // CHECK: %[[#T33:]] = llvm.mlir.undef : vector<2xf64> + // CHECK: %[[#T33:]] = llvm.mlir.poison : vector<2xf64> // CHECK: %[[#T34:]] = llvm.mlir.constant(0 : i64) : i64 // CHECK: %[[#T35:]] = llvm.insertelement %[[#T29]], %[[#T33]][%[[#T34]] : i64] : vector<2xf64> // CHECK: %[[#T36:]] = llvm.mlir.constant(1 : i64) : i64 From ef2e1a0e91c14d6abc379ca9811d7c7fb6c93f56 Mon Sep 17 00:00:00 2001 From: ghehg <166402688+ghehg@users.noreply.github.com> Date: Fri, 11 Oct 2024 13:09:01 -0400 Subject: [PATCH 1934/2301] [CIR][NFC][Testing] Fix test failure (#963) as title. Base on my experience of [this type of test(https://github.com/llvm/clangir/blob/a7ac2b4e2055e169d9f556abf5821a1ccab666cd/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp#L51), The number of characters varies in this line as it's about full file path which changes during environment. --- clang/test/CIR/CodeGen/annotations-var.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/annotations-var.c b/clang/test/CIR/CodeGen/annotations-var.c index 931248c1a18d..ffd4bd9b18a4 100644 --- a/clang/test/CIR/CodeGen/annotations-var.c +++ b/clang/test/CIR/CodeGen/annotations-var.c @@ -4,7 +4,7 @@ // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM // LLVM-DAG: @.str.annotation = private unnamed_addr constant [15 x i8] c"localvar_ann_0\00", section "llvm.metadata" -// LLVM-DAG: @.str.1.annotation = private unnamed_addr constant [{{.*}} x i8] c"{{.*}}annotations-var.c\00", section "llvm.metadata" +// LLVM-DAG: @.str.1.annotation = private unnamed_addr constant [{{[0-9]+}} x i8] c"{{.*}}annotations-var.c\00", section "llvm.metadata" // LLVM-DAG: @.str.2.annotation = private unnamed_addr constant [15 x i8] c"localvar_ann_1\00", section "llvm.metadata" void local(void) { From 2d4f506a18ae0f588e8d93095513547fbe011b3a Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 18 Sep 2024 23:06:20 -0400 Subject: [PATCH 1935/2301] [CIR][CodeGen] Support static references to temporaries Pull Request: https://github.com/llvm/clangir/pull/872 --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 143 +++++++++++++----- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 3 +- clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp | 45 ------ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 26 +++- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 9 +- clang/test/CIR/CodeGen/tempref.cpp | 42 +++++ 8 files changed, 176 insertions(+), 99 deletions(-) create mode 100644 clang/test/CIR/CodeGen/tempref.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index e81ff16fd659..543ba8b7cfda 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -306,51 +306,120 @@ void CIRGenFunction::buildInvariantStart([[maybe_unused]] CharUnits Size) { assert(!MissingFeatures::createInvariantIntrinsic()); } -void CIRGenModule::codegenGlobalInitCxxStructor(const VarDecl *D, - mlir::cir::GlobalOp Addr, - bool NeedsCtor, bool NeedsDtor, - bool isCstStorage) { - assert(D && " Expected a global declaration!"); - CIRGenFunction CGF{*this, builder, true}; - CurCGF = &CGF; - CurCGF->CurFn = Addr; - Addr.setAstAttr(mlir::cir::ASTVarDeclAttr::get(builder.getContext(), D)); +void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, + mlir::cir::GlobalOp addr, + bool performInit) { + const Expr *init = varDecl->getInit(); + QualType ty = varDecl->getType(); + + // TODO: handle address space + // The address space of a static local variable (DeclPtr) may be different + // from the address space of the "this" argument of the constructor. In that + // case, we need an addrspacecast before calling the constructor. + // + // struct StructWithCtor { + // __device__ StructWithCtor() {...} + // }; + // __device__ void foo() { + // __shared__ StructWithCtor s; + // ... + // } + // + // For example, in the above CUDA code, the static local variable s has a + // "shared" address space qualifier, but the constructor of StructWithCtor + // expects "this" in the "generic" address space. + assert(!MissingFeatures::addressSpace()); + + if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && + varDecl->hasAttr()) { + llvm_unreachable("NYI"); + } - if (NeedsCtor) { - mlir::OpBuilder::InsertionGuard guard(builder); - auto block = builder.createBlock(&Addr.getCtorRegion()); - CIRGenFunction::LexicalScope lexScope{*CurCGF, Addr.getLoc(), - builder.getInsertionBlock()}; - lexScope.setAsGlobalInit(); + assert(varDecl && " Expected a global declaration!"); + CIRGenFunction cgf{*this, builder, true}; + CurCGF = &cgf; + CurCGF->CurFn = addr; - builder.setInsertionPointToStart(block); - Address DeclAddr(getAddrOfGlobalVar(D), getASTContext().getDeclAlign(D)); - buildDeclInit(CGF, D, DeclAddr); - builder.setInsertionPointToEnd(block); - builder.create(Addr->getLoc()); - } + CIRGenFunction::SourceLocRAIIObject fnLoc{cgf, + getLoc(varDecl->getLocation())}; - if (isCstStorage) { - // TODO: this leads to a missing feature in the moment, probably also need a - // LexicalScope to be inserted here. - buildDeclInvariant(CGF, D); - } else { - // If not constant storage we'll emit this regardless of NeedsDtor value. + addr.setAstAttr( + mlir::cir::ASTVarDeclAttr::get(builder.getContext(), varDecl)); + + if (ty->isReferenceType()) { mlir::OpBuilder::InsertionGuard guard(builder); - auto block = builder.createBlock(&Addr.getDtorRegion()); - CIRGenFunction::LexicalScope lexScope{*CurCGF, Addr.getLoc(), + auto *block = builder.createBlock(&addr.getCtorRegion()); + CIRGenFunction::LexicalScope lexScope{*CurCGF, addr.getLoc(), builder.getInsertionBlock()}; lexScope.setAsGlobalInit(); - builder.setInsertionPointToStart(block); - buildDeclDestroy(CGF, D); + auto getGlobal = builder.createGetGlobal(addr); + + Address declAddr(getGlobal, getGlobal.getType(), + getASTContext().getDeclAlign(varDecl)); + assert(performInit && "cannot have constant initializer which needs " + "destruction for reference"); + RValue rv = cgf.buildReferenceBindingToExpr(init); + { + mlir::OpBuilder::InsertionGuard guard(builder); + mlir::Operation *rvalueDefOp = rv.getScalarVal().getDefiningOp(); + if (rvalueDefOp && rvalueDefOp->getBlock()) { + mlir::Block *rvalSrcBlock = rvalueDefOp->getBlock(); + if (!rvalSrcBlock->empty() && + isa(rvalSrcBlock->back())) { + auto &front = rvalSrcBlock->front(); + getGlobal.getDefiningOp()->moveBefore(&front); + auto yield = cast(rvalSrcBlock->back()); + builder.setInsertionPoint(yield); + } + } + cgf.buildStoreOfScalar(rv.getScalarVal(), declAddr, false, ty); + } builder.setInsertionPointToEnd(block); - if (block->empty()) { - block->erase(); - // Don't confuse lexical cleanup. - builder.clearInsertionPoint(); - } else - builder.create(Addr->getLoc()); + builder.create(addr->getLoc()); + } else { + bool needsDtor = varDecl->needsDestruction(getASTContext()) == + QualType::DK_cxx_destructor; + // PerformInit, constant store invariant / destroy handled below. + bool isConstantStorage = + varDecl->getType().isConstantStorage(getASTContext(), true, !needsDtor); + if (performInit) { + mlir::OpBuilder::InsertionGuard guard(builder); + auto *block = builder.createBlock(&addr.getCtorRegion()); + CIRGenFunction::LexicalScope lexScope{*CurCGF, addr.getLoc(), + builder.getInsertionBlock()}; + lexScope.setAsGlobalInit(); + + builder.setInsertionPointToStart(block); + Address declAddr(getAddrOfGlobalVar(varDecl), + getASTContext().getDeclAlign(varDecl)); + buildDeclInit(cgf, varDecl, declAddr); + builder.setInsertionPointToEnd(block); + builder.create(addr->getLoc()); + } + + if (isConstantStorage) { + // TODO: this leads to a missing feature in the moment, probably also need + // a LexicalScope to be inserted here. + buildDeclInvariant(cgf, varDecl); + } else { + // If not constant storage we'll emit this regardless of NeedsDtor value. + mlir::OpBuilder::InsertionGuard guard(builder); + auto *block = builder.createBlock(&addr.getDtorRegion()); + CIRGenFunction::LexicalScope lexScope{*CurCGF, addr.getLoc(), + builder.getInsertionBlock()}; + lexScope.setAsGlobalInit(); + + builder.setInsertionPointToStart(block); + buildDeclDestroy(cgf, varDecl); + builder.setInsertionPointToEnd(block); + if (block->empty()) { + block->erase(); + // Don't confuse lexical cleanup. + builder.clearInsertionPoint(); + } else + builder.create(addr->getLoc()); + } } CurCGF = nullptr; diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 6c67e849a4c4..7b0acae564b0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -170,8 +170,7 @@ class CIRGenCXXABI { /// \param Dtor - a function taking a single pointer argument /// \param Addr - a pointer to pass to the destructor function. virtual void registerGlobalDtor(CIRGenFunction &CGF, const VarDecl *D, - mlir::cir::FuncOp dtor, - mlir::Attribute Addr) = 0; + mlir::cir::FuncOp dtor, mlir::Value Addr) = 0; virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, FunctionArgList &Args) const = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp index 682eddbe9581..d50866853377 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp @@ -44,8 +44,6 @@ void CIRGenModule::buildCXXGlobalVarDeclInitFunc(const VarDecl *D, D->hasAttr())) return; - assert(!getLangOpts().OpenMP && "OpenMP global var init not implemented"); - // Check if we've already initialized this decl. auto I = DelayedCXXInitPosition.find(D); if (I != DelayedCXXInitPosition.end() && I->second == ~0U) @@ -53,46 +51,3 @@ void CIRGenModule::buildCXXGlobalVarDeclInitFunc(const VarDecl *D, buildCXXGlobalVarDeclInit(D, Addr, PerformInit); } - -void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *D, - mlir::cir::GlobalOp Addr, - bool PerformInit) { - QualType T = D->getType(); - - // TODO: handle address space - // The address space of a static local variable (DeclPtr) may be different - // from the address space of the "this" argument of the constructor. In that - // case, we need an addrspacecast before calling the constructor. - // - // struct StructWithCtor { - // __device__ StructWithCtor() {...} - // }; - // __device__ void foo() { - // __shared__ StructWithCtor s; - // ... - // } - // - // For example, in the above CUDA code, the static local variable s has a - // "shared" address space qualifier, but the constructor of StructWithCtor - // expects "this" in the "generic" address space. - assert(!MissingFeatures::addressSpace()); - - if (!T->isReferenceType()) { - if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && - D->hasAttr()) { - llvm_unreachable("NYI"); - } - bool NeedsDtor = - D->needsDestruction(getASTContext()) == QualType::DK_cxx_destructor; - // PerformInit, constant store invariant / destroy handled below. - bool isCstStorage = - D->getType().isConstantStorage(getASTContext(), true, !NeedsDtor); - codegenGlobalInitCxxStructor(D, Addr, PerformInit, NeedsDtor, isCstStorage); - return; - } - - assert(PerformInit && "cannot have constant initializer which needs " - "destruction for reference"); - // TODO(cir): buildReferenceBindingToExpr - llvm_unreachable("NYI"); -} diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 2e9a7c1ed35d..3ead6e02d6f5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -20,6 +20,7 @@ #include "EHScopeStack.h" #include "TargetInfo.h" +#include "clang/AST/Decl.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/GlobalDecl.h" #include "clang/Basic/Builtins.h" @@ -1949,6 +1950,9 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { // CK_NoOp can model a qualification conversion, which can remove an array // bound and change the IR type. LValue LV = buildLValue(E->getSubExpr()); + // Propagate the volatile qualifier to LValue, if exists in E. + if (E->changesVolatileQualification()) + llvm_unreachable("NYI"); if (LV.isSimple()) { Address V = LV.getAddress(); if (V.isValid()) { @@ -2192,8 +2196,14 @@ static Address createReferenceTemporary(CIRGenFunction &CGF, CGF.getCounterRefTmpAsString(), Alloca, ip); } case SD_Thread: - case SD_Static: - assert(0 && "NYI"); + case SD_Static: { + auto a = mlir::cast( + CGF.CGM.getAddrOfGlobalTemporary(M, Inner)); + auto f = CGF.CGM.getBuilder().createGetGlobal(a); + assert(a.getAlignment().has_value() && + "This should always have an alignment"); + return Address(f, clang::CharUnits::fromQuantity(a.getAlignment().value())); + } case SD_Dynamic: llvm_unreachable("temporary can't have dynamic storage duration"); @@ -2229,12 +2239,20 @@ static void pushTemporaryCleanup(CIRGenFunction &CGF, switch (M->getStorageDuration()) { case SD_Static: case SD_Thread: { + mlir::cir::FuncOp cleanupFn; + mlir::Value cleanupArg; if (E->getType()->isArrayType()) { llvm_unreachable("SD_Static|SD_Thread + array types not implemented"); } else { - llvm_unreachable("SD_Static|SD_Thread for general types not implemented"); + cleanupFn = CGF.CGM + .getAddrAndTypeOfCXXStructor( + GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)) + .second; + cleanupArg = ReferenceTemporary.emitRawPointer(); } - llvm_unreachable("SD_Static|SD_Thread not implemented"); + CGF.CGM.getCXXABI().registerGlobalDtor( + CGF, cast(M->getExtendingDecl()), cleanupFn, cleanupArg); + break; } case SD_FullExpression: diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 50c30f8f692a..fdb2519a2ac8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -173,8 +173,7 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { bool Delegating, Address This, QualType ThisTy) override; void registerGlobalDtor(CIRGenFunction &CGF, const VarDecl *D, - mlir::cir::FuncOp dtor, - mlir::Attribute Addr) override; + mlir::cir::FuncOp dtor, mlir::Value Addr) override; virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) override; virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) override; CatchTypeInfo @@ -2144,7 +2143,7 @@ void CIRGenItaniumCXXABI::buildDestructorCall( void CIRGenItaniumCXXABI::registerGlobalDtor(CIRGenFunction &CGF, const VarDecl *D, mlir::cir::FuncOp dtor, - mlir::Attribute Addr) { + mlir::Value Addr) { if (D->isNoDestroy(CGM.getASTContext())) return; diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 21813577dbad..7da61a261e21 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1662,7 +1662,7 @@ CIRGenModule::getAddrOfGlobalTemporary(const MaterializeTemporaryExpr *expr, } else { // No initializer, the initialization will be provided when we initialize // the declaration which performed lifetime extension. - llvm_unreachable("else value"); + type = getTypes().convertTypeForMem(materializedType); } // Create a global variable for this lifetime-extended temporary. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 1aee19d541e0..79521e8aabfc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -630,8 +630,8 @@ class CIRGenModule : public CIRGenTypeCache { bool IsTentative = false); /// Emit the function that initializes the specified global - void buildCXXGlobalVarDeclInit(const VarDecl *D, mlir::cir::GlobalOp Addr, - bool PerformInit); + void buildCXXGlobalVarDeclInit(const VarDecl *varDecl, + mlir::cir::GlobalOp addr, bool performInit); void buildCXXGlobalVarDeclInitFunc(const VarDecl *D, mlir::cir::GlobalOp Addr, bool PerformInit); @@ -673,11 +673,6 @@ class CIRGenModule : public CIRGenTypeCache { // or if they are alias to each other. mlir::cir::FuncOp codegenCXXStructor(clang::GlobalDecl GD); - // Produce code for this constructor/destructor for global initialzation. - void codegenGlobalInitCxxStructor(const clang::VarDecl *D, - mlir::cir::GlobalOp Addr, bool NeedsCtor, - bool NeedsDtor, bool isCstStorage); - bool lookupRepresentativeDecl(llvm::StringRef MangledName, clang::GlobalDecl &Result) const; diff --git a/clang/test/CIR/CodeGen/tempref.cpp b/clang/test/CIR/CodeGen/tempref.cpp new file mode 100644 index 000000000000..9c7ac0eccb86 --- /dev/null +++ b/clang/test/CIR/CodeGen/tempref.cpp @@ -0,0 +1,42 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: cir-translate %t.cir -cir-to-llvmir -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +struct A { ~A(); }; +A &&a = dynamic_cast(A{}); + +// CHECK: cir.func private @_ZN1AD1Ev(!cir.ptr) extra(#fn_attr) +// CHECK-NEXT: cir.global external @a = #cir.ptr : !cir.ptr {alignment = 8 : i64, ast = #cir.var.decl.ast} +// CHECK-NEXT: cir.func internal private @__cxx_global_var_init() { +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[SEVEN:[0-9]+]] = cir.get_global @a : !cir.ptr> +// CHECK-NEXT: %[[EIGHT:[0-9]+]] = cir.get_global @_ZGR1a_ : !cir.ptr +// CHECK-NEXT: cir.store %[[EIGHT]], %[[SEVEN]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: } +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: cir.func private @_GLOBAL__sub_I_tempref.cpp() { +// CHECK-NEXT: cir.call @__cxx_global_var_init() : () -> () +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + +// LLVM: @_ZGR1a_ = internal global %struct.A undef +// LLVM-DAG: @a = global ptr null, align 8 +// LLVM-DAG: @llvm.global_ctors = appending constant [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init, ptr null }] + +// LLVM-DAG: declare {{.*}} void @_ZN1AD1Ev(ptr) + +// LLVM-DAG: define internal void @__cxx_global_var_init() +// LLVM-DAG: br label %[[L1:[0-9]+]] +// LLVM-DAG: [[L1]]: +// LLVM-DAG: store ptr @_ZGR1a_, ptr @a, align 8 +// LLVM-DAG: br label %[[L2:[0-9]+]] +// LLVM-DAG: [[L2]]: +// LLVM-DAG: ret void +// LLVM-DAG: } + +// LLVM-DAG: define void @_GLOBAL__sub_I_tempref.cpp() +// LLVM-DAG: call void @__cxx_global_var_init() +// LLVM-DAG: ret void +// LLVM-DAG: } From c37994d6164ce85aa58e26d8cb818b8712e89922 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 21 May 2024 13:21:20 +0800 Subject: [PATCH 1936/2301] [CIR][CIRGen] Support CodeGen for vbase constructors 1. Add new `cir.vtt.address_point` op for visiting the element of VTT to initialize the virtual pointer. 2. Implement `getVirtualBaseClassOffset` method which provides a virtual offset to adjust to actual virtual pointers in virtual base. 3. Follows the original clang CodeGen scheme for the implementation of most other parts. @bcardosolopes's note: this is cherry-picked from an older PR from Jing Zhang and slightly modified for updates: applied review, test, doc and operation syntax. It does not yet has LLVM lowering support, I'm going to make incremental changes on top of this. Any necessary CIR modifications to this design should follow up shortly too. Also, to make this work I also added more logic to `addImplicitStructorParam`s` and `buildThisParam`. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 59 ++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 13 ++ clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 16 ++- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 72 ++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 8 ++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 105 +++++++++++++-- clang/lib/CIR/CodeGen/CIRGenModule.h | 5 + clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 127 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenVTables.h | 88 +++--------- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 4 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 48 +++++++ clang/test/CIR/CodeGen/vtt.cpp | 125 +++++++++++++++++ 15 files changed, 579 insertions(+), 101 deletions(-) create mode 100644 clang/test/CIR/CodeGen/vtt.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index c959c7176473..58d6f19ca123 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2399,6 +2399,65 @@ def VTableAddrPointOp : CIR_Op<"vtable.address_point", let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// VTTAddrPointOp +//===----------------------------------------------------------------------===// + +def VTTAddrPointOp : CIR_Op<"vtt.address_point", + [Pure, DeclareOpInterfaceMethods]> { + let summary = "Get the VTT address point"; + let description = [{ + The `vtt.address_point` operation retrieves an element from the VTT, + which is the address point of a C++ vtable. In virtual inheritance, + A set of internal `__vptr` for an object are initialized by this operation, + which assigns an element from the VTT. The initialization order is as follows: + + The complete object constructors and destructors find the VTT, + via the mangled name of VTT global variable. They pass the address of + the subobject's sub-VTT entry in the VTT as a second parameter + when calling the base object constructors and destructors. + The base object constructors and destructors use the addresses passed to + initialize the primary virtual pointer and virtual pointers that point to + the classes which either have virtual bases or override virtual functions + with a virtual step. + + The first parameter is either the mangled name of VTT global variable + or the address of the subobject's sub-VTT entry in the VTT. + The second parameter `offset` provides a virtual step to adjust to + the actual address point of the vtable. + + The return type is always a `!cir.ptr>`. + + Example: + ```mlir + cir.global linkonce_odr @_ZTV1B = ... + ... + %3 = cir.base_class_addr(%1 : !cir.ptr nonnull) [0] -> !cir.ptr + %4 = cir.vtt.address_point @_ZTT1D, offset = 1 -> !cir.ptr> + cir.call @_ZN1BC2Ev(%3, %4) + ``` + Or: + ```mlir + %7 = cir.vtt.address_point %3 : !cir.ptr>, offset = 1 -> !cir.ptr> + ``` + }]; + + let arguments = (ins OptionalAttr:$name, + Optional:$sym_addr, + I32Attr:$offset); + let results = (outs Res:$addr); + + let assemblyFormat = [{ + ($name^)? + ($sym_addr^ `:` type($sym_addr))? + `,` + `offset` `=` $offset + `->` qualified(type($addr)) attr-dict + }]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // SetBitfieldOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index c82b1c3a710c..c0345df376cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -31,6 +31,7 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Location.h" #include "mlir/IR/Types.h" +#include "mlir/IR/Value.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FloatingPointMode.h" @@ -695,6 +696,18 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return Address(baseAddr, ptrTy, addr.getAlignment()); } + mlir::Value createVTTAddrPoint(mlir::Location loc, mlir::Type retTy, + mlir::Value addr, uint64_t offset) { + return create( + loc, retTy, mlir::FlatSymbolRefAttr{}, addr, offset); + } + + mlir::Value createVTTAddrPoint(mlir::Location loc, mlir::Type retTy, + mlir::FlatSymbolRefAttr sym, uint64_t offset) { + return create(loc, retTy, sym, mlir::Value{}, + offset); + } + // FIXME(cir): CIRGenBuilder class should have an attribute with a reference // to the module so that we don't have search for it or pass it around. // FIXME(cir): Track a list of globals, or at least the last one inserted, so diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp index b17206772c3f..27b04503d788 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp @@ -66,7 +66,7 @@ void CIRGenCXXABI::buildThisParam(CIRGenFunction &CGF, isThisCompleteObject(CGF.CurGD)) { CGF.CXXABIThisAlignment = Layout.getAlignment(); } else { - llvm_unreachable("NYI"); + CGF.CXXABIThisAlignment = Layout.getNonVirtualAlignment(); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 7b0acae564b0..83da6ad3c49d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -95,7 +95,8 @@ class CIRGenCXXABI { clang::CXXCtorType Type, bool ForVirtualBase, bool Delegating) = 0; /// Emit the ABI-specific prolog for the function - virtual void buildInstanceFunctionProlog(CIRGenFunction &CGF) = 0; + virtual void buildInstanceFunctionProlog(SourceLocation Loc, + CIRGenFunction &CGF) = 0; /// Get the type of the implicit "this" parameter used by a method. May return /// zero if no specific type is applicable, e.g. if the ABI expects the "this" @@ -120,6 +121,14 @@ class CIRGenCXXABI { return CGF.CXXStructorImplicitParamDecl; } + mlir::Value getStructorImplicitParamValue(CIRGenFunction &CGF) { + return CGF.CXXStructorImplicitParamValue; + } + + void setStructorImplicitParamValue(CIRGenFunction &CGF, mlir::Value val) { + CGF.CXXStructorImplicitParamValue = val; + } + /// Perform ABI-specific "this" argument adjustment required prior to /// a call of a virtual function. /// The "VirtualCall" argument is true iff the call itself is virtual. @@ -319,6 +328,11 @@ class CIRGenCXXABI { virtual void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) = 0; + virtual mlir::Value + getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &CGF, + Address This, const CXXRecordDecl *ClassDecl, + const CXXRecordDecl *BaseClassDecl) = 0; + virtual mlir::Value buildDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, QualType DestRecordTy, diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 6287aa8f65ef..5051488cf2ee 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1186,7 +1186,8 @@ CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { if (auto *CD = dyn_cast(MD)) { // A base class inheriting constructor doesn't get forwarded arguments // needed to construct a virtual base (or base class thereof) - assert(!CD->getInheritedConstructor() && "Inheritance NYI"); + if (auto Inherited = CD->getInheritedConstructor()) + PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); } CanQual FTP = GetFormalType(MD); @@ -1194,6 +1195,9 @@ CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { if (PassParams) appendParameterTypes(*this, argTypes, paramInfos, FTP); + CIRGenCXXABI::AddedStructorArgCounts AddedArgs = + TheCXXABI.buildStructorSignature(GD, argTypes); + (void)AddedArgs; assert(paramInfos.empty() && "NYI"); assert(!MD->isVariadic() && "Variadic fns NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 121f27bba22d..c1f2a2480b1e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -689,9 +689,8 @@ static Address ApplyNonVirtualAndVirtualOffset( CharUnits alignment; if (virtualOffset) { assert(nearestVBase && "virtual offset without vbase?"); - llvm_unreachable("NYI"); - // alignment = CGF.CGM.getVBaseAlignment(addr.getAlignment(), - // derivedClass, nearestVBase); + alignment = CGF.CGM.getVBaseAlignment(addr.getAlignment(), derivedClass, + nearestVBase); } else { alignment = addr.getAlignment(); } @@ -714,7 +713,11 @@ void CIRGenFunction::initializeVTablePointer(mlir::Location loc, CharUnits NonVirtualOffset = CharUnits::Zero(); if (CGM.getCXXABI().isVirtualOffsetNeededForVTableField(*this, Vptr)) { - llvm_unreachable("NYI"); + // We need to use the virtual base offset offset because the virtual base + // might have a different offset in the most derived class. + VirtualOffset = CGM.getCXXABI().getVirtualBaseClassOffset( + loc, *this, LoadCXXThisAddress(), Vptr.VTableClass, Vptr.NearestVBase); + NonVirtualOffset = Vptr.OffsetFromNearestVBase; } else { // We can just use the base offset in the complete class. NonVirtualOffset = Vptr.Base.getBaseOffset(); @@ -797,7 +800,16 @@ void CIRGenFunction::getVTablePointers(BaseSubobject Base, bool BaseDeclIsNonVirtualPrimaryBase; if (I.isVirtual()) { - llvm_unreachable("NYI"); + // Check if we've visited this virtual base before. + if (!VBases.insert(BaseDecl).second) + continue; + + const ASTRecordLayout &Layout = + getContext().getASTRecordLayout(VTableClass); + + BaseOffset = Layout.getVBaseClassOffset(BaseDecl); + BaseOffsetFromNearestVBase = CharUnits::Zero(); + BaseDeclIsNonVirtualPrimaryBase = false; } else { const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); @@ -1449,18 +1461,34 @@ mlir::Value CIRGenFunction::GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, const CXXRecordDecl *RD = cast(CurCodeDecl)->getParent(); const CXXRecordDecl *Base = cast(GD.getDecl())->getParent(); + uint64_t SubVTTIndex; + if (Delegating) { llvm_unreachable("NYI"); } else if (RD == Base) { llvm_unreachable("NYI"); } else { - llvm_unreachable("NYI"); + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); + CharUnits BaseOffset = ForVirtualBase ? Layout.getVBaseClassOffset(Base) + : Layout.getBaseClassOffset(Base); + + SubVTTIndex = + CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); + assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); } + auto Loc = CGM.getLoc(RD->getBeginLoc()); if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { - llvm_unreachable("NYI"); + // A VTT parameter was passed to the constructor, use it. + auto VTT = LoadCXXVTT(); + return CGM.getBuilder().createVTTAddrPoint(Loc, VTT.getType(), VTT, + SubVTTIndex); } else { - llvm_unreachable("NYI"); + // We're the complete constructor, so get the VTT by name. + auto VTT = CGM.getVTables().getAddrOfVTT(RD); + return CGM.getBuilder().createVTTAddrPoint( + Loc, CGM.getBuilder().getPointerTo(CGM.VoidPtrTy), + mlir::FlatSymbolRefAttr::get(VTT.getSymNameAttr()), SubVTTIndex); } } @@ -1645,8 +1673,25 @@ CIRGenModule::getDynamicOffsetAlignment(clang::CharUnits actualBaseAlign, return std::min(actualBaseAlign, expectedTargetAlign); } -/// Emit a loop to call a particular constructor for each of several members of -/// an array. +/// Return the best known alignment for a pointer to a virtual base, +/// given the alignment of a pointer to the derived class. +clang::CharUnits +CIRGenModule::getVBaseAlignment(CharUnits actualDerivedAlign, + const CXXRecordDecl *derivedClass, + const CXXRecordDecl *vbaseClass) { + // The basic idea here is that an underaligned derived pointer might + // indicate an underaligned base pointer. + + assert(vbaseClass->isCompleteDefinition()); + auto &baseLayout = getASTContext().getASTRecordLayout(vbaseClass); + CharUnits expectedVBaseAlign = baseLayout.getNonVirtualAlignment(); + + return getDynamicOffsetAlignment(actualDerivedAlign, derivedClass, + expectedVBaseAlign); +} + +/// Emit a loop to call a particular constructor for each of several members +/// of an array. /// /// \param ctor the constructor to call for each element /// \param arrayType the type of the array to initialize @@ -1663,8 +1708,8 @@ void CIRGenFunction::buildCXXAggrConstructorCall( NewPointerIsChecked, zeroInitialize); } -/// Emit a loop to call a particular constructor for each of several members of -/// an array. +/// Emit a loop to call a particular constructor for each of several members +/// of an array. /// /// \param ctor the constructor to call for each element /// \param numElements the number of elements in the array; @@ -1749,7 +1794,8 @@ void CIRGenFunction::buildCXXAggrConstructorCall( AggValueSlot::DoesNotOverlap, AggValueSlot::IsNotZeroed, NewPointerIsChecked ? AggValueSlot::IsSanitizerChecked : AggValueSlot::IsNotSanitizerChecked); - buildCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false, + buildCXXConstructorCall(ctor, Ctor_Complete, + /*ForVirtualBase=*/false, /*Delegating=*/false, currAVS, E); builder.create(loc); }); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index e21ca7e87620..f4cc98277252 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1301,7 +1301,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, } if (D && isa(D) && cast(D)->isInstance()) { - CGM.getCXXABI().buildInstanceFunctionProlog(*this); + CGM.getCXXABI().buildInstanceFunctionProlog(Loc, *this); const auto *MD = cast(D); if (MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 479112cfe148..b8adb165b548 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1585,6 +1585,14 @@ class CIRGenFunction : public CIRGenTypeCache { } Address LoadCXXThisAddress(); + /// Load the VTT parameter to base constructors/destructors have virtual + /// bases. FIXME: Every place that calls LoadCXXVTT is something that needs to + /// be abstracted properly. + mlir::Value LoadCXXVTT() { + assert(CXXStructorImplicitParamValue && "no VTT value for this function"); + return CXXStructorImplicitParamValue; + } + /// Convert the given pointer to a complete class to the given direct base. Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address Value, diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index fdb2519a2ac8..79d4f127ca23 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -155,7 +155,8 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { return false; } - void buildInstanceFunctionProlog(CIRGenFunction &CGF) override; + void buildInstanceFunctionProlog(SourceLocation Loc, + CIRGenFunction &CGF) override; void addImplicitStructorParams(CIRGenFunction &CGF, QualType &ResTy, FunctionArgList &Params) override; @@ -195,6 +196,9 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { SourceLocation Loc) override; mlir::Value getVTableAddressPoint(BaseSubobject Base, const CXXRecordDecl *VTableClass) override; + mlir::Value getVTableAddressPointInStructorWithVTT( + CIRGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, + const CXXRecordDecl *NearestVBase); bool isVirtualOffsetNeededForVTableField(CIRGenFunction &CGF, CIRGenFunction::VPtr Vptr) override; bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const; @@ -291,12 +295,16 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) override; + mlir::Value + getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &CGF, + Address This, const CXXRecordDecl *ClassDecl, + const CXXRecordDecl *BaseClassDecl) override; + // The traditional clang CodeGen emits calls to `__dynamic_cast` directly into // LLVM in the `emitDynamicCastCall` function. In CIR, `dynamic_cast` // expressions are lowered to `cir.dyn_cast` ops instead of calls to runtime // functions. So during CIRGen we don't need the `emitDynamicCastCall` // function that clang CodeGen has. - mlir::Value buildDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, QualType DestRecordTy, mlir::cir::PointerType DestCIRTy, bool isRefCast, @@ -341,9 +349,19 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { CIRGenCXXABI::AddedStructorArgs CIRGenItaniumCXXABI::getImplicitConstructorArgs( CIRGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating) { - assert(!NeedsVTTParameter(GlobalDecl(D, Type)) && "VTT NYI"); - - return {}; + if (!NeedsVTTParameter(GlobalDecl(D, Type))) + return AddedStructorArgs{}; + + // Insert the implicit 'vtt' argument as the second argument. Make sure to + // correctly reflect its address space, which can differ from generic on + // some targets. + mlir::Value VTT = + CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating); + // LangAS AS = CGM.getGlobalConstantAddressSpace(nullptr); + LangAS AS = LangAS::Default; + QualType Q = getContext().getAddrSpaceQualType(getContext().VoidPtrTy, AS); + QualType VTTTy = getContext().getPointerType(Q); + return AddedStructorArgs::prefix({{VTT, VTTTy}}); } /// Return whether the given global decl needs a VTT parameter, which it does if @@ -399,8 +417,11 @@ CIRGenItaniumCXXABI::buildStructorSignature( if ((isa(GD.getDecl()) ? GD.getCtorType() == Ctor_Base : GD.getDtorType() == Dtor_Base) && cast(GD.getDecl())->getParent()->getNumVBases() != 0) { - llvm_unreachable("NYI"); - (void)Context; + LangAS AS = CGM.getGlobalVarAddressSpace(nullptr); + QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS); + ArgTys.insert(ArgTys.begin() + 1, + Context.getPointerType(CanQualType::CreateUnsafe(Q))); + return AddedStructorArgCounts::prefix(1); } return AddedStructorArgCounts{}; @@ -541,7 +562,17 @@ void CIRGenItaniumCXXABI::addImplicitStructorParams(CIRGenFunction &CGF, // Check if we need a VTT parameter as well. if (NeedsVTTParameter(CGF.CurGD)) { - llvm_unreachable("NYI"); + ASTContext &Context = getContext(); + + // FIXME: avoid the fake decl + LangAS AS = CGM.getGlobalVarAddressSpace(nullptr); + QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS); + QualType T = Context.getPointerType(Q); + auto *VTTDecl = ImplicitParamDecl::Create( + Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"), + T, ImplicitParamKind::CXXVTT); + Params.insert(Params.begin() + 1, VTTDecl); + getStructorImplicitParamDecl(CGF) = VTTDecl; } } @@ -556,7 +587,8 @@ void CIRGenCXXABI::setCXXABIThisValue(CIRGenFunction &CGF, CGF.CXXABIThisValue = ThisPtr; } -void CIRGenItaniumCXXABI::buildInstanceFunctionProlog(CIRGenFunction &CGF) { +void CIRGenItaniumCXXABI::buildInstanceFunctionProlog(SourceLocation Loc, + CIRGenFunction &CGF) { // Naked functions have no prolog. if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr()) llvm_unreachable("NYI"); @@ -567,7 +599,10 @@ void CIRGenItaniumCXXABI::buildInstanceFunctionProlog(CIRGenFunction &CGF) { /// Initialize the 'vtt' slot if needed. if (getStructorImplicitParamDecl(CGF)) { - llvm_unreachable("NYI"); + auto Val = CGF.getBuilder().createLoad( + CGF.getLoc(Loc), + CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF))); + setStructorImplicitParamValue(CGF, Val); } /// If this is a function that the ABI specifies returns 'this', initialize @@ -899,6 +934,27 @@ CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer( return Callee; } +mlir::Value CIRGenItaniumCXXABI::getVTableAddressPointInStructorWithVTT( + CIRGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, + const CXXRecordDecl *NearestVBase) { + assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && + NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT"); + + // Get the secondary vpointer index. + uint64_t VirtualPointerIndex = + CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); + + /// Load the VTT. + auto VTTPtr = CGF.LoadCXXVTT(); + auto Loc = CGF.getLoc(VTableClass->getSourceRange()); + // Calculate the address point from the VTT, and the offset may be zero. + VTTPtr = CGF.getBuilder().createVTTAddrPoint(Loc, VTTPtr.getType(), VTTPtr, + VirtualPointerIndex); + // And load the address point from the VTT. + return CGF.getBuilder().createAlignedLoad(Loc, CGF.VoidPtrTy, VTTPtr, + CGF.getPointerAlign()); +} + mlir::Value CIRGenItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, const CXXRecordDecl *VTableClass) { @@ -926,7 +982,8 @@ mlir::Value CIRGenItaniumCXXABI::getVTableAddressPointInStructor( if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && NeedsVTTParameter(CGF.CurGD)) { - llvm_unreachable("NYI"); + return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base, + NearestVBase); } return getVTableAddressPoint(Base, VTableClass); } @@ -2235,6 +2292,32 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, builder.create(loc); } +mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset( + mlir::Location loc, CIRGenFunction &CGF, Address This, + const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl) { + auto VTablePtr = CGF.getVTablePtr(loc, This, CGM.UInt8PtrTy, ClassDecl); + CharUnits VBaseOffsetOffset = + CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl, + BaseClassDecl); + mlir::Value OffsetVal = + CGF.getBuilder().getSInt64(VBaseOffsetOffset.getQuantity(), loc); + auto VBaseOffsetPtr = CGF.getBuilder().create( + loc, VTablePtr.getType(), VTablePtr, + OffsetVal); // vbase.offset.ptr + + mlir::Value VBaseOffset; + if (CGM.getItaniumVTableContext().isRelativeLayout()) { + VBaseOffset = CGF.getBuilder().createLoad( + loc, Address(VBaseOffsetPtr, CGM.SInt32Ty, + CharUnits::fromQuantity(4))); // vbase.offset + } else { + VBaseOffset = CGF.getBuilder().createLoad( + loc, Address(VBaseOffsetPtr, CGM.PtrDiffTy, + CGF.getPointerAlign())); // vbase.offset + } + return VBaseOffset; +} + static mlir::cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { // Prototype: void __cxa_bad_cast(); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 79521e8aabfc..827e7ac82839 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -463,6 +463,11 @@ class CIRGenModule : public CIRGenTypeCache { const clang::CXXRecordDecl *baseDecl, clang::CharUnits expectedTargetAlign); + /// Returns the assumed alignment of a virtual base of a class. + clang::CharUnits getVBaseAlignment(CharUnits DerivedAlign, + const CXXRecordDecl *Derived, + const CXXRecordDecl *VBase); + mlir::cir::FuncOp getAddrOfCXXStructor( clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, mlir::cir::FuncType FnType = nullptr, bool DontDefer = false, diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 3c2af8fbbfdf..450dda5fa1be 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -318,6 +318,75 @@ void CIRGenVTables::createVTableInitializer(ConstantStructBuilder &builder, } } +mlir::cir::GlobalOp CIRGenVTables::generateConstructionVTable( + const CXXRecordDecl *RD, const BaseSubobject &Base, bool BaseIsVirtual, + mlir::cir::GlobalLinkageKind Linkage, + VTableAddressPointsMapTy &AddressPoints) { + if (CGM.getModuleDebugInfo()) + llvm_unreachable("NYI"); + + std::unique_ptr VTLayout( + getItaniumVTableContext().createConstructionVTableLayout( + Base.getBase(), Base.getBaseOffset(), BaseIsVirtual, RD)); + + // Add the address points. + AddressPoints = VTLayout->getAddressPoints(); + + // Get the mangled construction vtable name. + SmallString<256> OutName; + llvm::raw_svector_ostream Out(OutName); + cast(CGM.getCXXABI().getMangleContext()) + .mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), + Base.getBase(), Out); + SmallString<256> Name(OutName); + + bool UsingRelativeLayout = getItaniumVTableContext().isRelativeLayout(); + assert(!UsingRelativeLayout && "NYI"); + + auto VTType = getVTableType(*VTLayout); + + // Construction vtable symbols are not part of the Itanium ABI, so we cannot + // guarantee that they actually will be available externally. Instead, when + // emitting an available_externally VTT, we provide references to an internal + // linkage construction vtable. The ABI only requires complete-object vtables + // to be the same for all instances of a type, not construction vtables. + if (Linkage == mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage) + Linkage = mlir::cir::GlobalLinkageKind::InternalLinkage; + + auto Align = CGM.getDataLayout().getABITypeAlign(VTType); + auto Loc = CGM.getLoc(RD->getSourceRange()); + + // Create the variable that will hold the construction vtable. + auto VTable = CGM.createOrReplaceCXXRuntimeVariable( + Loc, Name, VTType, Linkage, CharUnits::fromQuantity(Align)); + + // V-tables are always unnamed_addr. + assert(!MissingFeatures::unnamedAddr() && "NYI"); + + auto RTTI = CGM.getAddrOfRTTIDescriptor( + Loc, CGM.getASTContext().getTagDeclType(Base.getBase())); + + // Create and set the initializer. + ConstantInitBuilder builder(CGM); + auto components = builder.beginStruct(); + createVTableInitializer(components, *VTLayout, RTTI, + mlir::cir::isLocalLinkage(VTable.getLinkage())); + components.finishAndSetAsInitializer(VTable); + + // Set properties only after the initializer has been set to ensure that the + // GV is treated as definition and not declaration. + assert(!VTable.isDeclaration() && "Shouldn't set properties on declaration"); + CGM.setGVProperties(VTable, RD); + + CGM.buildVTableTypeMetadata(RD, VTable, *VTLayout.get()); + + if (UsingRelativeLayout) { + llvm_unreachable("NYI"); + } + + return VTable; +} + /// Compute the required linkage of the vtable for the given class. /// /// Note that we only call this at the end of the translation unit. @@ -425,8 +494,9 @@ getAddrOfVTTVTable(CIRGenVTables &CGVT, CIRGenModule &CGM, // This is a regular vtable. return CGM.getCXXABI().getAddrOfVTable(MostDerivedClass, CharUnits()); } - - llvm_unreachable("generateConstructionVTable NYI"); + return CGVT.generateConstructionVTable( + MostDerivedClass, vtable.getBaseSubobject(), vtable.isVirtual(), linkage, + addressPoints); } mlir::cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *RD) { @@ -456,6 +526,59 @@ mlir::cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *RD) { return VTT; } +uint64_t CIRGenVTables::getSubVTTIndex(const CXXRecordDecl *RD, + BaseSubobject Base) { + BaseSubobjectPairTy ClassSubobjectPair(RD, Base); + + SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassSubobjectPair); + if (I != SubVTTIndicies.end()) + return I->second; + + VTTBuilder Builder(CGM.getASTContext(), RD, /*GenerateDefinition=*/false); + + for (llvm::DenseMap::const_iterator + I = Builder.getSubVTTIndices().begin(), + E = Builder.getSubVTTIndices().end(); + I != E; ++I) { + // Insert all indices. + BaseSubobjectPairTy ClassSubobjectPair(RD, I->first); + + SubVTTIndicies.insert(std::make_pair(ClassSubobjectPair, I->second)); + } + + I = SubVTTIndicies.find(ClassSubobjectPair); + assert(I != SubVTTIndicies.end() && "Did not find index!"); + + return I->second; +} + +uint64_t CIRGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, + BaseSubobject Base) { + SecondaryVirtualPointerIndicesMapTy::iterator I = + SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base)); + + if (I != SecondaryVirtualPointerIndices.end()) + return I->second; + + VTTBuilder Builder(CGM.getASTContext(), RD, /*GenerateDefinition=*/false); + + // Insert all secondary vpointer indices. + for (llvm::DenseMap::const_iterator + I = Builder.getSecondaryVirtualPointerIndices().begin(), + E = Builder.getSecondaryVirtualPointerIndices().end(); + I != E; ++I) { + std::pair Pair = + std::make_pair(RD, I->first); + + SecondaryVirtualPointerIndices.insert(std::make_pair(Pair, I->second)); + } + + I = SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base)); + assert(I != SecondaryVirtualPointerIndices.end() && "Did not find index!"); + + return I->second; +} + /// Emit the definition of the given vtable. void CIRGenVTables::buildVTTDefinition(mlir::cir::GlobalOp VTT, mlir::cir::GlobalLinkageKind Linkage, diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index 2def67ab1bc6..d439284de679 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -28,28 +28,26 @@ class CXXRecordDecl; namespace cir { class CIRGenModule; -// class ConstantArrayBuilder; -// class ConstantStructBuilder; class CIRGenVTables { CIRGenModule &CGM; clang::VTableContextBase *VTContext; - /// VTableAddressPointsMapTy - Address points for a single vtable. + /// Address points for a single vtable. typedef clang::VTableLayout::AddressPointsMapTy VTableAddressPointsMapTy; typedef std::pair BaseSubobjectPairTy; typedef llvm::DenseMap SubVTTIndiciesMapTy; - /// SubVTTIndicies - Contains indices into the various sub-VTTs. + /// Contains indices into the various sub-VTTs. SubVTTIndiciesMapTy SubVTTIndicies; typedef llvm::DenseMap SecondaryVirtualPointerIndicesMapTy; - /// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer + /// Contains the secondary virtual pointer /// indices. SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices; @@ -59,42 +57,13 @@ class CIRGenVTables { /// Cache for the deleted virtual member call function. mlir::cir::FuncOp DeletedVirtualFn = nullptr; - // /// Get the address of a thunk and emit it if necessary. - // llvm::Constant *maybeEmitThunk(GlobalDecl GD, - // const ThunkInfo &ThunkAdjustments, - // bool ForVTable); - void addVTableComponent(ConstantArrayBuilder &builder, const VTableLayout &layout, unsigned componentIndex, mlir::Attribute rtti, unsigned &nextVTableThunkIndex, unsigned vtableAddressPoint, bool vtableHasLocalLinkage); - // /// Add a 32-bit offset to a component relative to the vtable when using - // the - // /// relative vtables ABI. The array builder points to the start of the - // vtable. void addRelativeComponent(ConstantArrayBuilder &builder, - // llvm::Constant *component, - // unsigned vtableAddressPoint, - // bool vtableHasLocalLinkage, - // bool isCompleteDtor) const; - - // /// Create a dso_local stub that will be used for a relative reference in - // the - // /// relative vtable layout. This stub will just be a tail call to the - // original - // /// function and propagate any function attributes from the original. If - // the - // /// original function is already dso_local, the original is returned - // instead - // /// and a stub is not created. - // llvm::Function * - // getOrCreateRelativeStub(llvm::Function *func, - // llvm::GlobalValue::LinkageTypes stubLinkage, - // bool isCompleteDtor) const; - bool useRelativeLayout() const; - mlir::Type getVTableComponentType(); public: @@ -114,30 +83,21 @@ class CIRGenVTables { return *llvm::cast(VTContext); } - // MicrosoftVTableContext &getMicrosoftVTableContext() { - // return *cast(VTContext); - // } - - // /// getSubVTTIndex - Return the index of the sub-VTT for the base class - // of the - // /// given record decl. - // uint64_t getSubVTTIndex(const CXXRecordDecl *RD, BaseSubobject Base); - - // /// getSecondaryVirtualPointerIndex - Return the index in the VTT where - // the - // /// virtual pointer for the given subobject is located. - // uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, - // BaseSubobject Base); - - // /// GenerateConstructionVTable - Generate a construction vtable for the - // given - // /// base subobject. - // llvm::GlobalVariable * - // GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject - // &Base, - // bool BaseIsVirtual, - // llvm::GlobalVariable::LinkageTypes Linkage, - // VTableAddressPointsMapTy &AddressPoints); + /// Return the index of the sub-VTT for the base class of the given record + /// decl. + uint64_t getSubVTTIndex(const CXXRecordDecl *RD, BaseSubobject Base); + + /// Return the index in the VTT where the virtual pointer for the given + /// subobject is located. + uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, + BaseSubobject Base); + + /// Generate a construction vtable for the given base subobject. + mlir::cir::GlobalOp + generateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base, + bool BaseIsVirtual, + mlir::cir::GlobalLinkageKind Linkage, + VTableAddressPointsMapTy &AddressPoints); /// Get the address of the VTT for the given record decl. mlir::cir::GlobalOp getAddrOfVTT(const CXXRecordDecl *RD); @@ -161,18 +121,6 @@ class CIRGenVTables { /// arrays of pointers, with one struct element for each vtable in the vtable /// group. mlir::Type getVTableType(const clang::VTableLayout &layout); - - // /// Generate a public facing alias for the vtable and make the vtable - // either - // /// hidden or private. The alias will have the original linkage and - // visibility - // /// of the vtable. This is used for cases under the relative vtables ABI - // /// when a vtable may not be dso_local. - // void GenerateRelativeVTableAlias(llvm::GlobalVariable *VTable, - // llvm::StringRef AliasNameRef); - - // /// Specify a global should not be instrumented with hwasan. - // void RemoveHwasanMetadata(llvm::GlobalValue *GV) const; }; } // end namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index b6e48d534790..b376c9476b05 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -409,7 +409,9 @@ void CIRRecordLowering::computeVolatileBitfields() { void CIRRecordLowering::accumulateBases() { // If we've got a primary virtual base, we need to add it with the bases. if (astRecordLayout.isPrimaryBaseVirtual()) { - llvm_unreachable("NYI"); + const CXXRecordDecl *BaseDecl = astRecordLayout.getPrimaryBase(); + members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::Base, + getStorageType(BaseDecl), BaseDecl)); } // Accumulate the non-virtual bases. diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 93ceb0acce99..0251bc53c084 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2231,6 +2231,54 @@ LogicalResult cir::VTableAddrPointOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// VTTAddrPointOp +//===----------------------------------------------------------------------===// + +LogicalResult +VTTAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + // VTT ptr is not coming from a symbol. + if (!getName()) + return success(); + auto name = *getName(); + + // Verify that the result type underlying pointer type matches the type of + // the referenced cir.global or cir.func op. + auto op = dyn_cast_or_null( + symbolTable.lookupNearestSymbolFrom(*this, getNameAttr())); + if (!op) + return emitOpError("'") + << name << "' does not reference a valid cir.global"; + auto init = op.getInitialValue(); + if (!init) + return success(); + if (!isa(*init)) + return emitOpError("Expected array in initializer for global VTT'") + << name << "'"; + return success(); +} + +LogicalResult cir::VTTAddrPointOp::verify() { + // The operation uses either a symbol or a value to operate, but not both + if (getName() && getSymAddr()) + return emitOpError("should use either a symbol or value, but not both"); + + // If not a symbol, stick with the concrete type used for getSymAddr. + if (getSymAddr()) + return success(); + + auto resultType = getAddr().getType(); + + auto resTy = mlir::cir::PointerType::get( + getContext(), mlir::cir::PointerType::get( + getContext(), mlir::cir::VoidType::get(getContext()))); + + if (resultType != resTy) + return emitOpError("result type must be '") + << resTy << "', but provided result type is '" << resultType << "'"; + return success(); +} + //===----------------------------------------------------------------------===// // FuncOp //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp new file mode 100644 index 000000000000..eac47dd36804 --- /dev/null +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -0,0 +1,125 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +class A { +public: + int a; + virtual void v() {} +}; + +class B : public virtual A { +public: + int b; + virtual void w(); +}; + +class C : public virtual A { +public: + long c; + virtual void x() {} +}; + +class D : public B, public C { +public: + long d; + virtual void y() {} +}; + + +int main() { + B *b = new D (); + return 0; +} + +// Vtable of Class A +// CIR: cir.global linkonce_odr @_ZTV1A = #cir.vtable<{#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1A> : !cir.ptr, #cir.global_view<@_ZN1A1vEv> : !cir.ptr]> : !cir.array x 3>}> : !ty_anon_struct3 {alignment = 8 : i64} + +// Class A constructor +// CIR: cir.func linkonce_odr @_ZN1AC2Ev(%arg0: !cir.ptr +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : !cir.ptr>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> +// CIR: } + +// Vtable of Class D +// CIR: cir.global linkonce_odr @_ZTV1D = #cir.vtable<{#cir.const_array<[#cir.ptr<40 : i64> : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1D> : !cir.ptr, #cir.global_view<@_ZN1B1wEv> : !cir.ptr, #cir.global_view<@_ZN1D1yEv> : !cir.ptr]> : !cir.array x 5>, #cir.const_array<[#cir.ptr<24 : i64> : !cir.ptr, #cir.ptr<-16 : i64> : !cir.ptr, #cir.global_view<@_ZTI1D> : !cir.ptr, #cir.global_view<@_ZN1C1xEv> : !cir.ptr]> : !cir.array x 4>, #cir.const_array<[#cir.ptr : !cir.ptr, #cir.ptr<-40 : i64> : !cir.ptr, #cir.global_view<@_ZTI1D> : !cir.ptr, #cir.global_view<@_ZN1A1vEv> : !cir.ptr]> : !cir.array x 4>}> : !ty_anon_struct4 {alignment = 8 : i64} +// VTT of class D +// CIR: cir.global linkonce_odr @_ZTT1D = #cir.const_array<[#cir.global_view<@_ZTV1D, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D0_1B, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D0_1B, [0 : i32, 1 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D16_1C, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D16_1C, [0 : i32, 1 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTV1D, [0 : i32, 2 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTV1D, [0 : i32, 1 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 7> {alignment = 8 : i64} + +// Class B constructor +// CIR: cir.func linkonce_odr @_ZN1BC2Ev(%arg0: !cir.ptr loc({{.*}}), %arg1: !cir.ptr> loc({{.*}})) extra(#fn_attr) { +// CIR: %{{[0-9]+}} = cir.vtt.address_point %{{[0-9]+}} : !cir.ptr>, offset = 0 -> !cir.ptr> +// CIR: %{{[0-9]+}} = cir.load align(8) %{{[0-9]+}} : !cir.ptr>, !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr, !cir.ptr> + +// CIR: %{{[0-9]+}} = cir.vtt.address_point %{{[0-9]+}} : !cir.ptr>, offset = 1 -> !cir.ptr> +// CIR: %{{[0-9]+}} = cir.load align(8) %{{[0-9]+}} : !cir.ptr>, !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr> +// CIR: %{{[0-9]+}} = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.ptr +// CIR: %{{[0-9]+}} = cir.const #cir.int<-24> : !s64i +// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr +// CIR: %{{[0-9]+}} = cir.load %{{[0-9]+}} : !cir.ptr, !s64i +// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr, !cir.ptr> +// CIR: } + +// Class C constructor +// CIR: cir.func linkonce_odr @_ZN1CC2Ev(%arg0: !cir.ptr loc({{.*}}), %arg1: !cir.ptr> loc({{.*}})) extra(#fn_attr) { +// CIR: %{{[0-9]+}} = cir.vtt.address_point %{{[0-9]+}} : !cir.ptr>, offset = 0 -> !cir.ptr> +// CIR: %{{[0-9]+}} = cir.load align(8) %{{[0-9]+}} : !cir.ptr>, !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr, !cir.ptr> + +// CIR: %{{[0-9]+}} = cir.vtt.address_point %{{[0-9]+}} : !cir.ptr>, offset = 1 -> !cir.ptr> +// CIR: %{{[0-9]+}} = cir.load align(8) %{{[0-9]+}} : !cir.ptr>, !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr> +// CIR: %{{[0-9]+}} = cir.load %{{[0-9]+}} : !cir.ptr>, !cir.ptr +// CIR: %{{[0-9]+}} = cir.const #cir.int<-24> : !s64i +// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr +// CIR: %{{[0-9]+}} = cir.load %{{[0-9]+}} : !cir.ptr, !s64i +// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr, !cir.ptr> +// CIR: } + +// Class D constructor +// CIR: cir.func linkonce_odr @_ZN1DC1Ev(%arg0: !cir.ptr loc({{.*}})) extra(#fn_attr) { +// CIR: %{{[0-9]+}} = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CIR: cir.store %arg0, %{{[0-9]+}} : !cir.ptr, !cir.ptr> +// CIR: %[[D_PTR:.*]] = cir.load %0 : !cir.ptr>, !cir.ptr +// CIR: %[[A_PTR:.*]] = cir.base_class_addr(%[[D_PTR]] : !cir.ptr nonnull) [40] -> !cir.ptr +// CIR: cir.call @_ZN1AC2Ev(%[[A_PTR]]) : (!cir.ptr) -> () + +// CIR: %[[B_PTR:.*]] = cir.base_class_addr(%[[D_PTR]] : !cir.ptr nonnull) [0] -> !cir.ptr +// CIR: %[[VTT_D_TO_B:.*]] = cir.vtt.address_point @_ZTT1D, offset = 1 -> !cir.ptr> +// CIR: cir.call @_ZN1BC2Ev(%[[B_PTR]], %[[VTT_D_TO_B]]) : (!cir.ptr, !cir.ptr>) -> () + +// CIR: %[[C_PTR:.*]] = cir.base_class_addr(%1 : !cir.ptr nonnull) [16] -> !cir.ptr +// CIR: %[[VTT_D_TO_C:.*]] = cir.vtt.address_point @_ZTT1D, offset = 3 -> !cir.ptr> +// CIR: cir.call @_ZN1CC2Ev(%[[C_PTR]], %[[VTT_D_TO_C]]) : (!cir.ptr, !cir.ptr>) -> () + +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1D, vtable_index = 0, address_point_index = 3) : !cir.ptr>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1D, vtable_index = 2, address_point_index = 3) : !cir.ptr>> + +// CIR: %{{[0-9]+}} = cir.const #cir.int<40> : !s64i +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr +// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1D, vtable_index = 1, address_point_index = 3) : !cir.ptr>> + +// CIR: %{{[0-9]+}} = cir.const #cir.int<16> : !s64i +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr +// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> +// CIR: cir.return +// CIR: } \ No newline at end of file From 8e8676874dc75f47202d15864946c5714b821e4b Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 11 Oct 2024 19:41:56 -0400 Subject: [PATCH 1937/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vtrn and neon_vtrnq (#942) as title. The generated code is the same as Clang codeden except in a small discrepancy when GEP: OG generates code like this: `%6 = getelementptr inbounds <4 x i16>, ptr %retval.i, i32 1` CIR generates a bit differently: `%6 = getelementptr <4 x i16>, ptr %retval.i, i64 1` Ptr offest might be trivial because choosing i64 over i32 as index type seems to be LLVM Dialect's choice. The lack of `inbounds` keyword might be an issue as `mlir::cir::PtrStrideOp` is currently not lowering to LLVM:GEPOp with `inbounds` attribute as `mlir::cir::PtrStrideOp` itself has no `inbounds`. It's probably because there was no need for it though we do have an implementation of [`CIRGenFunction::buildCheckedInBoundsGEP` ](https://github.com/llvm/clangir/blob/10d6f4b94da7e0181a070f0265d079419d96cf78/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp#L2762). Anyway, the issue is not in the scope of this PR and should be addressed in a separate PR. If we think this is an issue, I can create another PR and probably add optional attribute to `mlir::cir::PtrStrideOp` to achieve it. In addition to lowering work, a couple of more works: 1. Did a little refactoring on variable name changing into desired CamelBack case. 2. Changed neon-misc RUN Options to be consistent with other neon test files and make test case more concise. --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 58 +- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 520 +++++++++--------- 2 files changed, 298 insertions(+), 280 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index f6dad0196e72..645ada341f5e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2973,8 +2973,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } } - mlir::cir::VectorType Ty = GetNeonType(this, Type); - if (!Ty) + mlir::cir::VectorType ty = GetNeonType(this, Type); + if (!ty) return nullptr; // Not all intrinsics handled by the common case work for AArch64 yet, so only @@ -2991,7 +2991,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, buildAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) return V; - mlir::cir::VectorType VTy = Ty; + mlir::cir::VectorType vTy = ty; llvm::SmallVector args; switch (BuiltinID) { default: @@ -3071,8 +3071,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, // https://developer.arm.com/architectures/instruction-sets/intrinsics/ return buildNeonCall( BuiltinID, *this, - {builder.getExtendedElementVectorType(Ty, true), SInt32Ty}, Ops, - "llvm.aarch64.neon.sqrshrun", Ty, getLoc(E->getExprLoc())); + {builder.getExtendedElementVectorType(ty, true), SInt32Ty}, Ops, + "llvm.aarch64.neon.sqrshrun", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqshrn_n_v: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vrshrn_n_v: @@ -3085,7 +3085,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vrnda_v: case NEON::BI__builtin_neon_vrndaq_v: { assert(!MissingFeatures::buildConstrainedFPCall()); - return buildNeonCall(BuiltinID, *this, {Ty}, Ops, "llvm.round", Ty, + return buildNeonCall(BuiltinID, *this, {ty}, Ops, "llvm.round", ty, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndih_f16: { @@ -3412,20 +3412,20 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vld1_v: case NEON::BI__builtin_neon_vld1q_v: { - return builder.createAlignedLoad(Ops[0].getLoc(), VTy, Ops[0], + return builder.createAlignedLoad(Ops[0].getLoc(), vTy, Ops[0], PtrOp0.getAlignment()); } case NEON::BI__builtin_neon_vst1_v: case NEON::BI__builtin_neon_vst1q_v: { - Ops[1] = builder.createBitcast(Ops[1], VTy); + Ops[1] = builder.createBitcast(Ops[1], vTy); (void)builder.createAlignedStore(Ops[1].getLoc(), Ops[1], Ops[0], PtrOp0.getAlignment()); return Ops[1]; } case NEON::BI__builtin_neon_vld1_lane_v: case NEON::BI__builtin_neon_vld1q_lane_v: { - Ops[1] = builder.createBitcast(Ops[1], VTy); - Ops[0] = builder.createAlignedLoad(Ops[0].getLoc(), VTy.getEltType(), + Ops[1] = builder.createBitcast(Ops[1], vTy); + Ops[0] = builder.createAlignedLoad(Ops[0].getLoc(), vTy.getEltType(), Ops[0], PtrOp0.getAlignment()); return builder.create(getLoc(E->getExprLoc()), Ops[1], Ops[0], Ops[2]); @@ -3440,7 +3440,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vst1_lane_v: case NEON::BI__builtin_neon_vst1q_lane_v: { - Ops[1] = builder.createBitcast(Ops[1], Ty); + Ops[1] = builder.createBitcast(Ops[1], ty); Ops[1] = builder.create(Ops[1].getLoc(), Ops[1], Ops[2]); (void)builder.createAlignedStore(getLoc(E->getExprLoc()), Ops[1], Ops[0], @@ -3513,7 +3513,41 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vtrn_v: case NEON::BI__builtin_neon_vtrnq_v: { - llvm_unreachable("NYI"); + // This set of neon intrinsics implement SIMD matrix transpose. + // The matrix transposed is always 2x2, and these intrincis transpose + // multiple 2x2 matrices in parallel, that is why result type is + // always 2-D matrix whose last dimension is 2. + // For example `vtrn_s16` would have: + // input 1: {0, 1, 2, 3} + // input 2; {4, 5, 6, 7} + // This basically represents two 2x2 matrices: + // [ 0, 1 ] and [ 2, 3] + // [ 4, 5 ] [ 6, 7] + // They should be simultaneously and independently transposed. + // Thus, result is : + // { {0, 4, 2, 6}, + // {1, 5, 3, 7 } } + Ops[1] = builder.createBitcast(Ops[1], ty); + Ops[2] = builder.createBitcast(Ops[2], ty); + // Adding a bitcast here as Ops[0] might be a void pointer. + mlir::Value baseAddr = + builder.createBitcast(Ops[0], builder.getPointerTo(ty)); + mlir::Value sv; + mlir::Location loc = getLoc(E->getExprLoc()); + + for (unsigned vi = 0; vi != 2; ++vi) { + llvm::SmallVector indices; + for (unsigned i = 0, e = vTy.getSize(); i != e; i += 2) { + indices.push_back(i + vi); + indices.push_back(i + e + vi); + } + mlir::cir::ConstantOp idx = builder.getConstInt(loc, SInt32Ty, vi); + mlir::Value addr = builder.create( + loc, baseAddr.getType(), baseAddr, idx); + sv = builder.createVecShuffle(loc, Ops[1], Ops[2], indices); + (void)builder.CIRBaseBuilderTy::createStore(loc, sv, addr); + } + return sv; } case NEON::BI__builtin_neon_vuzp_v: case NEON::BI__builtin_neon_vuzpq_v: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index 80afd1bf17c6..42465990244e 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -1,14 +1,20 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -emit-cir -fno-clangir-call-conv-lowering -target-feature +neon %s -o %t.cir + +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -fno-clangir-call-conv-lowering -emit-cir -o %t.cir %s // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -emit-llvm -fno-clangir-call-conv-lowering -target-feature +neon %s -o %t.ll -// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s -// This test file contains tests of AArch64 NEON intrinsics -// that are not covered by other tests. +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -fno-clangir-call-conv-lowering -emit-llvm -o - %s \ +// RUN: | opt -S -passes=mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s // REQUIRES: aarch64-registered-target || arm-registered-target + +// This test file contains test cases for the intrinsics that are not covered +// by the other neon test files. + #include uint8x8_t test_vset_lane_u8(uint8_t a, uint8x8_t b) { @@ -19,21 +25,9 @@ uint8x8_t test_vset_lane_u8(uint8_t a, uint8x8_t b) { // CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i loc(#loc7) // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local <8 x i8> @test_vset_lane_u8(i8 [[A:%.*]], <8 x i8> [[B:%.*]]) -// LLVM: alloca <8 x i8> -// LLVM: alloca i8 -// LLVM: [[A_ADR:%.*]] = alloca i8, i64 1, align 1 -// LLVM: [[B_ADR:%.*]] = alloca <8 x i8>, i64 1, align 8 -// LLVM: store i8 [[A]], ptr [[A_ADR]], align 1 -// LLVM: store <8 x i8> [[B]], ptr [[B_ADR]], align 8 -// LLVM: [[TMP_A0:%.*]] = load i8, ptr [[A_ADR]], align 1 -// LLVM: store i8 [[TMP_A0]], ptr [[S0:%.*]], align 1 -// LLVM: [[TMP_B0:%.*]] = load <8 x i8>, ptr [[B_ADR]], align 8 -// LLVM: store <8 x i8> [[TMP_B0]], ptr [[S1:%.*]], align 8 -// LLVM: [[INTRN_ARG0:%.*]] = load i8, ptr [[S0]], align 1 -// LLVM: [[INTRN_ARG1:%.*]] = load <8 x i8>, ptr [[S1]], align 8 -// LLVM: [[INTRN_RES:%.*]] = insertelement <8 x i8> [[INTRN_ARG1]], i8 [[INTRN_ARG0]], i32 7 -// LLVM: ret <8 x i8> {{%.*}} +// LLVM: {{.*}}test_vset_lane_u8(i8{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <8 x i8> [[B]], i8 [[A]], i32 7 +// LLVM: ret <8 x i8> [[INTRN_RES]] uint16x4_t test_vset_lane_u16(uint16_t a, uint16x4_t b) { return vset_lane_u16(a, b, 3); @@ -43,21 +37,9 @@ uint16x4_t test_vset_lane_u16(uint16_t a, uint16x4_t b) { // CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local <4 x i16> @test_vset_lane_u16(i16 [[A:%.*]], <4 x i16> [[B:%.*]]) -// LLVM: alloca <4 x i16> -// LLVM: alloca i16 -// LLVM: [[A_ADR:%.*]] = alloca i16, i64 1, align 2 -// LLVM: [[B_ADR:%.*]] = alloca <4 x i16>, i64 1, align 8 -// LLVM: store i16 [[A]], ptr [[A_ADR]], align 2 -// LLVM: store <4 x i16> [[B]], ptr [[B_ADR]], align 8 -// LLVM: [[TMP_A0:%.*]] = load i16, ptr [[A_ADR]], align 2 -// LLVM: store i16 [[TMP_A0]], ptr [[S0:%.*]], align 2 -// LLVM: [[TMP_B0:%.*]] = load <4 x i16>, ptr [[B_ADR]], align 8 -// LLVM: store <4 x i16> [[TMP_B0]], ptr [[S1:%.*]], align 8 -// LLVM: [[INTRN_ARG0:%.*]] = load i16, ptr [[S0]], align 2 -// LLVM: [[INTRN_ARG1:%.*]] = load <4 x i16>, ptr [[S1]], align 8 -// LLVM: [[INTRN_RES:%.*]] = insertelement <4 x i16> [[INTRN_ARG1]], i16 [[INTRN_ARG0]], i32 3 -// LLVM: ret <4 x i16> {{%.*}} +// LLVM: {{.*}}test_vset_lane_u16(i16{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <4 x i16> [[B]], i16 [[A]], i32 3 +// LLVM: ret <4 x i16> [[INTRN_RES]] uint32x2_t test_vset_lane_u32(uint32_t a, uint32x2_t b) { return vset_lane_u32(a, b, 1); @@ -67,24 +49,11 @@ uint32x2_t test_vset_lane_u32(uint32_t a, uint32x2_t b) { // CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local <2 x i32> @test_vset_lane_u32(i32 [[A:%.*]], <2 x i32> [[B:%.*]]) -// LLVM: alloca <2 x i32> -// LLVM: alloca i32 -// LLVM: [[A_ADR:%.*]] = alloca i32, i64 1, align 4 -// LLVM: [[B_ADR:%.*]] = alloca <2 x i32>, i64 1, align 8 -// LLVM: store i32 [[A]], ptr [[A_ADR]], align 4 -// LLVM: store <2 x i32> [[B]], ptr [[B_ADR]], align 8 -// LLVM: [[TMP_A0:%.*]] = load i32, ptr [[A_ADR]], align 4 -// LLVM: store i32 [[TMP_A0]], ptr [[S0:%.*]], align 4 -// LLVM: [[TMP_B0:%.*]] = load <2 x i32>, ptr [[B_ADR]], align 8 -// LLVM: store <2 x i32> [[TMP_B0]], ptr [[S1:%.*]], align 8 -// LLVM: [[INTRN_ARG0:%.*]] = load i32, ptr [[S0]], align 4 -// LLVM: [[INTRN_ARG1:%.*]] = load <2 x i32>, ptr [[S1]], align 8 -// LLVM: [[INTRN_RES:%.*]] = insertelement <2 x i32> [[INTRN_ARG1]], i32 [[INTRN_ARG0]], i32 1 -// LLVM: ret <2 x i32> {{%.*}} - - -int64x1_t test_vset_lane_u64(int64_t a, int64x1_t b) { +// LLVM: {{.*}}test_vset_lane_u32(i32{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <2 x i32> [[B]], i32 [[A]], i32 1 +// LLVM: ret <2 x i32> [[INTRN_RES]] + +uint64x1_t test_vset_lane_u64(uint64_t a, uint64x1_t b) { return vset_lane_u64(a, b, 0); } @@ -92,21 +61,9 @@ int64x1_t test_vset_lane_u64(int64_t a, int64x1_t b) { // CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local <1 x i64> @test_vset_lane_u64(i64 [[A:%.*]], <1 x i64> [[B:%.*]]) -// LLVM: alloca <1 x i64> -// LLVM: alloca i64 -// LLVM: [[A_ADR:%.*]] = alloca i64, i64 1, align 8 -// LLVM: [[B_ADR:%.*]] = alloca <1 x i64>, i64 1, align 8 -// LLVM: store i64 [[A]], ptr [[A_ADR]], align 8 -// LLVM: store <1 x i64> [[B]], ptr [[B_ADR]], align 8 -// LLVM: [[TMP_A0:%.*]] = load i64, ptr [[A_ADR]], align 8 -// LLVM: store i64 [[TMP_A0]], ptr [[S0:%.*]], align 8 -// LLVM: [[TMP_B0:%.*]] = load <1 x i64>, ptr [[B_ADR]], align 8 -// LLVM: store <1 x i64> [[TMP_B0]], ptr [[S1:%.*]], align 8 -// LLVM: [[INTRN_ARG0:%.*]] = load i64, ptr [[S0]], align 8 -// LLVM: [[INTRN_ARG1:%.*]] = load <1 x i64>, ptr [[S1]], align 8 -// LLVM: [[INTRN_RES:%.*]] = insertelement <1 x i64> [[INTRN_ARG1]], i64 [[INTRN_ARG0]], i32 0 -// LLVM: ret <1 x i64> {{%.*}} +// LLVM: {{.*}}test_vset_lane_u64(i64{{.*}}[[A:%.*]], <1 x i64>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <1 x i64> [[B]], i64 [[A]], i32 0 +// LLVM: ret <1 x i64> [[INTRN_RES]] float32x2_t test_vset_lane_f32(float32_t a, float32x2_t b) { return vset_lane_f32(a, b, 1); @@ -116,21 +73,9 @@ float32x2_t test_vset_lane_f32(float32_t a, float32x2_t b) { // CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local <2 x float> @test_vset_lane_f32(float [[A:%.*]], <2 x float> [[B:%.*]]) -// LLVM: alloca <2 x float> -// LLVM: alloca float -// LLVM: [[A_ADR:%.*]] = alloca float, i64 1, align 4 -// LLVM: [[B_ADR:%.*]] = alloca <2 x float>, i64 1, align 8 -// LLVM: store float [[A]], ptr [[A_ADR]], align 4 -// LLVM: store <2 x float> [[B]], ptr [[B_ADR]], align 8 -// LLVM: [[TMP_A0:%.*]] = load float, ptr [[A_ADR]], align 4 -// LLVM: store float [[TMP_A0]], ptr [[S0:%.*]], align 4 -// LLVM: [[TMP_B0:%.*]] = load <2 x float>, ptr [[B_ADR]], align 8 -// LLVM: store <2 x float> [[TMP_B0]], ptr [[S1:%.*]], align 8 -// LLVM: [[INTRN_ARG0:%.*]] = load float, ptr [[S0]], align 4 -// LLVM: [[INTRN_ARG1:%.*]] = load <2 x float>, ptr [[S1]], align 8 -// LLVM: [[INTRN_RES:%.*]] = insertelement <2 x float> [[INTRN_ARG1]], float [[INTRN_ARG0]], i32 1 -// LLVM: ret <2 x float> {{%.*}} +// LLVM: {{.*}}test_vset_lane_f32(float{{.*}}[[A:%.*]], <2 x float>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <2 x float> [[B]], float [[A]], i32 1 +// LLVM: ret <2 x float> [[INTRN_RES]] uint8x16_t test_vsetq_lane_u8(uint8_t a, uint8x16_t b) { return vsetq_lane_u8(a, b, 15); @@ -140,21 +85,9 @@ uint8x16_t test_vsetq_lane_u8(uint8_t a, uint8x16_t b) { // CIR: [[IDX:%.*]] = cir.const #cir.int<15> : !s32i // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local <16 x i8> @test_vsetq_lane_u8(i8 [[A:%.*]], <16 x i8> [[B:%.*]]) -// LLVM: alloca <16 x i8> -// LLVM: alloca i8 -// LLVM: [[A_ADR:%.*]] = alloca i8, i64 1, align 1 -// LLVM: [[B_ADR:%.*]] = alloca <16 x i8>, i64 1, align 16 -// LLVM: store i8 [[A]], ptr [[A_ADR]], align 1 -// LLVM: store <16 x i8> [[B]], ptr [[B_ADR]], align 16 -// LLVM: [[TMP_A0:%.*]] = load i8, ptr [[A_ADR]], align 1 -// LLVM: store i8 [[TMP_A0]], ptr [[S0:%.*]], align 1 -// LLVM: [[TMP_B0:%.*]] = load <16 x i8>, ptr [[B_ADR]], align 16 -// LLVM: store <16 x i8> [[TMP_B0]], ptr [[S1:%.*]], align 16 -// LLVM: [[INTRN_ARG0:%.*]] = load i8, ptr [[S0]], align 1 -// LLVM: [[INTRN_ARG1:%.*]] = load <16 x i8>, ptr [[S1]], align 16 -// LLVM: [[INTRN_RES:%.*]] = insertelement <16 x i8> [[INTRN_ARG1]], i8 [[INTRN_ARG0]], i32 15 -// LLVM: ret <16 x i8> {{%.*}} +// LLVM: {{.*}}test_vsetq_lane_u8(i8{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <16 x i8> [[B]], i8 [[A]], i32 15 +// LLVM: ret <16 x i8> [[INTRN_RES]] uint16x8_t test_vsetq_lane_u16(uint16_t a, uint16x8_t b) { return vsetq_lane_u16(a, b, 7); @@ -164,21 +97,9 @@ uint16x8_t test_vsetq_lane_u16(uint16_t a, uint16x8_t b) { // CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local <8 x i16> @test_vsetq_lane_u16(i16 [[A:%.*]], <8 x i16> [[B:%.*]]) -// LLVM: alloca <8 x i16> -// LLVM: alloca i16 -// LLVM: [[A_ADR:%.*]] = alloca i16, i64 1, align 2 -// LLVM: [[B_ADR:%.*]] = alloca <8 x i16>, i64 1, align 16 -// LLVM: store i16 [[A]], ptr [[A_ADR]], align 2 -// LLVM: store <8 x i16> [[B]], ptr [[B_ADR]], align 16 -// LLVM: [[TMP_A0:%.*]] = load i16, ptr [[A_ADR]], align 2 -// LLVM: store i16 [[TMP_A0]], ptr [[S0:%.*]], align 2 -// LLVM: [[TMP_B0:%.*]] = load <8 x i16>, ptr [[B_ADR]], align 16 -// LLVM: store <8 x i16> [[TMP_B0]], ptr [[S1:%.*]], align 16 -// LLVM: [[INTRN_ARG0:%.*]] = load i16, ptr [[S0]], align 2 -// LLVM: [[INTRN_ARG1:%.*]] = load <8 x i16>, ptr [[S1]], align 16 -// LLVM: [[INTRN_RES:%.*]] = insertelement <8 x i16> [[INTRN_ARG1]], i16 [[INTRN_ARG0]], i32 7 -// LLVM: ret <8 x i16> {{%.*}} +// LLVM: {{.*}}test_vsetq_lane_u16(i16{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <8 x i16> [[B]], i16 [[A]], i32 7 +// LLVM: ret <8 x i16> [[INTRN_RES]] uint32x4_t test_vsetq_lane_u32(uint32_t a, uint32x4_t b) { return vsetq_lane_u32(a, b, 3); @@ -188,21 +109,9 @@ uint32x4_t test_vsetq_lane_u32(uint32_t a, uint32x4_t b) { // CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local <4 x i32> @test_vsetq_lane_u32(i32 [[A:%.*]], <4 x i32> [[B:%.*]]) -// LLVM: alloca <4 x i32> -// LLVM: alloca i32 -// LLVM: [[A_ADR:%.*]] = alloca i32, i64 1, align 4 -// LLVM: [[B_ADR:%.*]] = alloca <4 x i32>, i64 1, align 16 -// LLVM: store i32 [[A]], ptr [[A_ADR]], align 4 -// LLVM: store <4 x i32> [[B]], ptr [[B_ADR]], align 16 -// LLVM: [[TMP_A0:%.*]] = load i32, ptr [[A_ADR]], align 4 -// LLVM: store i32 [[TMP_A0]], ptr [[S0:%.*]], align 4 -// LLVM: [[TMP_B0:%.*]] = load <4 x i32>, ptr [[B_ADR]], align 16 -// LLVM: store <4 x i32> [[TMP_B0]], ptr [[S1:%.*]], align 16 -// LLVM: [[INTRN_ARG0:%.*]] = load i32, ptr [[S0]], align 4 -// LLVM: [[INTRN_ARG1:%.*]] = load <4 x i32>, ptr [[S1]], align 16 -// LLVM: [[INTRN_RES:%.*]] = insertelement <4 x i32> [[INTRN_ARG1]], i32 [[INTRN_ARG0]], i32 3 -// LLVM: ret <4 x i32> {{%.*}} +// LLVM: {{.*}}test_vsetq_lane_u32(i32{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <4 x i32> [[B]], i32 [[A]], i32 3 +// LLVM: ret <4 x i32> [[INTRN_RES]] int64x2_t test_vsetq_lane_s64(int64_t a, int64x2_t b) { return vsetq_lane_s64(a, b, 1); @@ -212,21 +121,9 @@ int64x2_t test_vsetq_lane_s64(int64_t a, int64x2_t b) { // CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local <2 x i64> @test_vsetq_lane_s64(i64 [[A:%.*]], <2 x i64> [[B:%.*]]) -// LLVM: alloca <2 x i64> -// LLVM: alloca i64 -// LLVM: [[A_ADR:%.*]] = alloca i64, i64 1, align 8 -// LLVM: [[B_ADR:%.*]] = alloca <2 x i64>, i64 1, align 16 -// LLVM: store i64 [[A]], ptr [[A_ADR]], align 8 -// LLVM: store <2 x i64> [[B]], ptr [[B_ADR]], align 16 -// LLVM: [[TMP_A0:%.*]] = load i64, ptr [[A_ADR]], align 8 -// LLVM: store i64 [[TMP_A0]], ptr [[S0:%.*]], align 8 -// LLVM: [[TMP_B0:%.*]] = load <2 x i64>, ptr [[B_ADR]], align 16 -// LLVM: store <2 x i64> [[TMP_B0]], ptr [[S1:%.*]], align 16 -// LLVM: [[INTRN_ARG0:%.*]] = load i64, ptr [[S0]], align 8 -// LLVM: [[INTRN_ARG1:%.*]] = load <2 x i64>, ptr [[S1]], align 16 -// LLVM: [[INTRN_RES:%.*]] = insertelement <2 x i64> [[INTRN_ARG1]], i64 [[INTRN_ARG0]], i32 1 -// LLVM: ret <2 x i64> {{%.*}} +// LLVM: {{.*}}test_vsetq_lane_s64(i64{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <2 x i64> [[B]], i64 [[A]], i32 1 +// LLVM: ret <2 x i64> [[INTRN_RES]] float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) { return vsetq_lane_f32(a, b, 3); @@ -236,21 +133,9 @@ float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) { // CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local <4 x float> @test_vsetq_lane_f32(float [[A:%.*]], <4 x float> [[B:%.*]]) -// LLVM: alloca <4 x float> -// LLVM: alloca float -// LLVM: [[A_ADR:%.*]] = alloca float, i64 1, align 4 -// LLVM: [[B_ADR:%.*]] = alloca <4 x float>, i64 1, align 16 -// LLVM: store float [[A]], ptr [[A_ADR]], align 4 -// LLVM: store <4 x float> [[B]], ptr [[B_ADR]], align 16 -// LLVM: [[TMP_A0:%.*]] = load float, ptr [[A_ADR]], align 4 -// LLVM: store float [[TMP_A0]], ptr [[S0:%.*]], align 4 -// LLVM: [[TMP_B0:%.*]] = load <4 x float>, ptr [[B_ADR]], align 16 -// LLVM: store <4 x float> [[TMP_B0]], ptr [[S1:%.*]], align 16 -// LLVM: [[INTRN_ARG0:%.*]] = load float, ptr [[S0]], align 4 -// LLVM: [[INTRN_ARG1:%.*]] = load <4 x float>, ptr [[S1]], align 16 -// LLVM: [[INTRN_RES:%.*]] = insertelement <4 x float> [[INTRN_ARG1]], float [[INTRN_ARG0]], i32 3 -// LLVM: ret <4 x float> {{%.*}} +// LLVM: {{.*}}test_vsetq_lane_f32(float{{.*}}[[A:%.*]], <4 x float>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <4 x float> [[B]], float [[A]], i32 3 +// LLVM: ret <4 x float> [[INTRN_RES]] uint8_t test_vget_lane_u8(uint8x8_t a) { return vget_lane_u8(a, 7); @@ -260,15 +145,9 @@ uint8_t test_vget_lane_u8(uint8x8_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local i8 @test_vget_lane_u8(<8 x i8> [[ARG:%.*]]) -// LLVM: alloca <8 x i8> -// LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i8>, i64 1, align 8 -// LLVM: store <8 x i8> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <8 x i8>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <8 x i8> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <8 x i8>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <8 x i8> [[INTRN_ARG]], i32 7 -// LLVM: ret i8 {{%.*}} +// LLVM: {{.*}}test_vget_lane_u8(<8 x i8>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <8 x i8> [[ARG]], i32 7 +// LLVM: ret i8 [[RES]] uint8_t test_vgetq_lane_u8(uint8x16_t a) { return vgetq_lane_u8(a, 15); @@ -278,15 +157,9 @@ uint8_t test_vgetq_lane_u8(uint8x16_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<15> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local i8 @test_vgetq_lane_u8(<16 x i8> [[ARG:%.*]]) -// LLVM: alloca <16 x i8> -// LLVM: [[ARG_SAVE:%.*]] = alloca <16 x i8>, i64 1, align 16 -// LLVM: store <16 x i8> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <16 x i8>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <16 x i8> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <16 x i8>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <16 x i8> [[INTRN_ARG]], i32 15 -// LLVM: ret i8 {{%.*}} +// LLVM: {{.*}}test_vgetq_lane_u8(<16 x i8>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <16 x i8> [[ARG]], i32 15 +// LLVM: ret i8 [[RES]] uint16_t test_vget_lane_u16(uint16x4_t a) { return vget_lane_u16(a, 3); @@ -296,15 +169,9 @@ uint16_t test_vget_lane_u16(uint16x4_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local i16 @test_vget_lane_u16(<4 x i16> [[ARG:%.*]]) -// LLVM: alloca <4 x i16> -// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i16>, i64 1, align 8 -// LLVM: store <4 x i16> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <4 x i16>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <4 x i16> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <4 x i16>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <4 x i16> [[INTRN_ARG]], i32 3 -// LLVM: ret i16 {{%.*}} +// LLVM: {{.*}}test_vget_lane_u16(<4 x i16>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <4 x i16> [[ARG]], i32 3 +// LLVM: ret i16 [[RES]] uint16_t test_vgetq_lane_u16(uint16x8_t a) { return vgetq_lane_u16(a, 7); @@ -314,15 +181,9 @@ uint16_t test_vgetq_lane_u16(uint16x8_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<7> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local i16 @test_vgetq_lane_u16(<8 x i16> [[ARG:%.*]]) -// LLVM: alloca <8 x i16> -// LLVM: [[ARG_SAVE:%.*]] = alloca <8 x i16>, i64 1, align 16 -// LLVM: store <8 x i16> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <8 x i16>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <8 x i16> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <8 x i16>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <8 x i16> [[INTRN_ARG]], i32 7 -// LLVM: ret i16 {{%.*}} +// LLVM: {{.*}}test_vgetq_lane_u16(<8 x i16>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <8 x i16> [[ARG]], i32 7 +// LLVM: ret i16 [[RES]] uint32_t test_vget_lane_u32(uint32x2_t a) { return vget_lane_u32(a, 1); @@ -332,15 +193,9 @@ uint32_t test_vget_lane_u32(uint32x2_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local i32 @test_vget_lane_u32(<2 x i32> [[ARG:%.*]]) -// LLVM: alloca <2 x i32> -// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i32>, i64 1, align 8 -// LLVM: store <2 x i32> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <2 x i32>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <2 x i32> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <2 x i32>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <2 x i32> [[INTRN_ARG]], i32 1 -// LLVM: ret i32 {{%.*}} +// LLVM: {{.*}}test_vget_lane_u32(<2 x i32>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <2 x i32> [[ARG]], i32 1 +// LLVM: ret i32 [[RES]] uint32_t test_vgetq_lane_u32(uint32x4_t a) { return vgetq_lane_u32(a, 3); @@ -350,15 +205,9 @@ uint32_t test_vgetq_lane_u32(uint32x4_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local i32 @test_vgetq_lane_u32(<4 x i32> [[ARG:%.*]]) -// LLVM: alloca <4 x i32> -// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x i32>, i64 1, align 16 -// LLVM: store <4 x i32> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <4 x i32>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <4 x i32> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <4 x i32>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <4 x i32> [[INTRN_ARG]], i32 3 -// LLVM: ret i32 {{%.*}} +// LLVM: {{.*}}test_vgetq_lane_u32(<4 x i32>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <4 x i32> [[ARG]], i32 3 +// LLVM: ret i32 [[RES]] uint64_t test_vget_lane_u64(uint64x1_t a) { return vget_lane_u64(a, 0); @@ -368,15 +217,9 @@ uint64_t test_vget_lane_u64(uint64x1_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local i64 @test_vget_lane_u64(<1 x i64> [[ARG:%.*]]) -// LLVM: alloca <1 x i64> -// LLVM: [[ARG_SAVE:%.*]] = alloca <1 x i64>, i64 1, align 8 -// LLVM: store <1 x i64> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <1 x i64>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <1 x i64> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <1 x i64>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <1 x i64> [[INTRN_ARG]], i32 0 -// LLVM: ret i64 {{%.*}} +// LLVM: {{.*}}test_vget_lane_u64(<1 x i64>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <1 x i64> [[ARG]], i32 0 +// LLVM: ret i64 [[RES]] uint64_t test_vgetq_lane_u64(uint64x2_t a) { return vgetq_lane_u64(a, 1); @@ -386,15 +229,9 @@ uint64_t test_vgetq_lane_u64(uint64x2_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local i64 @test_vgetq_lane_u64(<2 x i64> [[ARG:%.*]]) -// LLVM: alloca <2 x i64> -// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x i64>, i64 1, align 16 -// LLVM: store <2 x i64> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <2 x i64>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <2 x i64> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <2 x i64>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <2 x i64> [[INTRN_ARG]], i32 1 -// LLVM: ret i64 {{%.*}} +// LLVM: {{.*}}test_vgetq_lane_u64(<2 x i64>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <2 x i64> [[ARG]], i32 1 +// LLVM: ret i64 [[RES]] float32_t test_vget_lane_f32(float32x2_t a) { return vget_lane_f32(a, 1); @@ -404,15 +241,9 @@ float32_t test_vget_lane_f32(float32x2_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local float @test_vget_lane_f32(<2 x float> [[ARG:%.*]]) -// LLVM: alloca <2 x float> -// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x float>, i64 1, align 8 -// LLVM: store <2 x float> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <2 x float>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <2 x float> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <2 x float>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <2 x float> [[INTRN_ARG]], i32 1 -// LLVM: ret float {{%.*}} +// LLVM: {{.*}}test_vget_lane_f32(<2 x float>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <2 x float> [[ARG]], i32 1 +// LLVM: ret float [[RES]] float64_t test_vget_lane_f64(float64x1_t a) { return vget_lane_f64(a, 0); @@ -422,15 +253,9 @@ float64_t test_vget_lane_f64(float64x1_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local double @test_vget_lane_f64(<1 x double> [[ARG:%.*]]) -// LLVM: alloca <1 x double> -// LLVM: [[ARG_SAVE:%.*]] = alloca <1 x double>, i64 1, align 8 -// LLVM: store <1 x double> [[ARG]], ptr [[ARG_SAVE]], align 8 -// LLVM: [[TMP:%.*]] = load <1 x double>, ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: store <1 x double> [[TMP]], ptr [[S0:%.*]], align 8 -// LLVM: [[INTRN_ARG:%.*]] = load <1 x double>, ptr [[S0]], align 8 -// LLVM: {{%.*}} = extractelement <1 x double> [[INTRN_ARG]], i32 0 -// LLVM: ret double {{%.*}} +// LLVM: {{.*}}test_vget_lane_f64(<1 x double>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <1 x double> [[ARG]], i32 0 +// LLVM: ret double [[RES]] float32_t test_vgetq_lane_f32(float32x4_t a) { return vgetq_lane_f32(a, 3); @@ -440,15 +265,9 @@ float32_t test_vgetq_lane_f32(float32x4_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<3> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local float @test_vgetq_lane_f32(<4 x float> [[ARG:%.*]]) -// LLVM: alloca <4 x float> -// LLVM: [[ARG_SAVE:%.*]] = alloca <4 x float>, i64 1, align 16 -// LLVM: store <4 x float> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <4 x float>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <4 x float> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <4 x float>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <4 x float> [[INTRN_ARG]], i32 3 -// LLVM: ret float {{%.*}} +// LLVM: {{.*}}test_vgetq_lane_f32(<4 x float>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <4 x float> [[ARG]], i32 3 +// LLVM: ret float [[RES]] float64_t test_vgetq_lane_f64(float64x2_t a) { return vgetq_lane_f64(a, 1); @@ -458,12 +277,177 @@ float64_t test_vgetq_lane_f64(float64x2_t a) { // CIR: [[IDX:%.*]] = cir.const #cir.int<1> : !s32i // CIR: {{%.*}} = cir.vec.extract {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: define dso_local double @test_vgetq_lane_f64(<2 x double> [[ARG:%.*]]) -// LLVM: alloca <2 x double> -// LLVM: [[ARG_SAVE:%.*]] = alloca <2 x double>, i64 1, align 16 -// LLVM: store <2 x double> [[ARG]], ptr [[ARG_SAVE]], align 16 -// LLVM: [[TMP:%.*]] = load <2 x double>, ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: store <2 x double> [[TMP]], ptr [[S0:%.*]], align 16 -// LLVM: [[INTRN_ARG:%.*]] = load <2 x double>, ptr [[S0]], align 16 -// LLVM: {{%.*}} = extractelement <2 x double> [[INTRN_ARG]], i32 1 -// LLVM: ret double {{%.*}} +// LLVM: {{.*}}test_vgetq_lane_f64(<2 x double>{{.*}}[[ARG:%.*]]) +// LLVM: [[RES:%.*]] = extractelement <2 x double> [[ARG]], i32 1 +// LLVM: ret double [[RES]] + +uint8x8x2_t test_vtrn_u8(uint8x8_t a, uint8x8_t b) { + return vtrn_u8(a, b); + + // CIR-LABEL: vtrn_u8 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<8> : !s32i, #cir.int<2> : !s32i, #cir.int<10> : !s32i, + // CIR-SAME: #cir.int<4> : !s32i, #cir.int<12> : !s32i, #cir.int<6> : !s32i, + // CIR-SAME: #cir.int<14> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<9> : !s32i, #cir.int<3> : !s32i, #cir.int<11> : !s32i, + // CIR-SAME: #cir.int<5> : !s32i, #cir.int<13> : !s32i, #cir.int<7> : !s32i, #cir.int<15> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vtrn_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VTRN:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[B]], + // LLVM-SAME: <8 x i32> + // LLVM: store <8 x i8> [[VTRN]], ptr [[RES:%.*]], align 8 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<8 x i8>, ptr [[RES]], i64 1 + // LLVM: [[VTRN1:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[B]], <8 x i32> + // LLVM: store <8 x i8> [[VTRN1]], ptr [[RES1]], align 8 + // LLVM: ret %struct.uint8x8x2_t {{.*}} +} + +uint16x4x2_t test_vtrn_u16(uint16x4_t a, uint16x4_t b) { + return vtrn_u16(a, b); + + // CIR-LABEL: vtrn_u16 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<4> : !s32i, #cir.int<2> : !s32i, #cir.int<6> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<5> : !s32i, #cir.int<3> : !s32i, #cir.int<7> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vtrn_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[VTRN:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[B]], + // LLVM-SAME: <4 x i32> + // LLVM: store <4 x i16> [[VTRN]], ptr [[RES:%.*]], align 8 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<4 x i16>, ptr [[RES]], i64 1 + // LLVM: [[VTRN1:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[B]], + // LLVM-SAME: <4 x i32> + // LLVM: store <4 x i16> [[VTRN1]], ptr [[RES1]], align 8 + // LLVM: ret %struct.uint16x4x2_t {{.*}} +} + +int32x2x2_t test_vtrn_s32(int32x2_t a, int32x2_t b) { + return vtrn_s32(a, b); + + // CIR-LABEL: vtrn_s32 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<2> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<3> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vtrn_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[VTRN:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: store <2 x i32> [[VTRN]], ptr [[RES:%.*]], align 8 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<2 x i32>, ptr [[RES]], i64 1 + // LLVM: [[VTRN1:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: store <2 x i32> [[VTRN1]], ptr [[RES1]], align 8 + // LLVM: ret %struct.int32x2x2_t {{.*}} +} + +uint8x16x2_t test_vtrnq_u8(uint8x16_t a, uint8x16_t b) { + return vtrnq_u8(a, b); + + // CIR-LABEL: vtrnq_u8 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<16> : !s32i, #cir.int<2> : !s32i, #cir.int<18> : !s32i, + // CIR-SAME: #cir.int<4> : !s32i, #cir.int<20> : !s32i, #cir.int<6> : !s32i, #cir.int<22> : !s32i, + // CIR-SAME: #cir.int<8> : !s32i, #cir.int<24> : !s32i, #cir.int<10> : !s32i, #cir.int<26> : !s32i, + // CIR-SAME: #cir.int<12> : !s32i, #cir.int<28> : !s32i, #cir.int<14> : !s32i, #cir.int<30> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<17> : !s32i, #cir.int<3> : !s32i, #cir.int<19> : !s32i, + // CIR-SAME: #cir.int<5> : !s32i, #cir.int<21> : !s32i, #cir.int<7> : !s32i, #cir.int<23> : !s32i, + // CIR-SAME: #cir.int<9> : !s32i, #cir.int<25> : !s32i, #cir.int<11> : !s32i, #cir.int<27> : !s32i, + // CIR-SAME: #cir.int<13> : !s32i, #cir.int<29> : !s32i, #cir.int<15> : !s32i, #cir.int<31> : !s32i] : !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vtrnq_u8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VTRN:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[B]], + // LLVM-SAME: <16 x i32> + // LLVM: store <16 x i8> [[VTRN]], ptr [[RES:%.*]], align 16 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<16 x i8>, ptr [[RES]], i64 1 + // LLVM: [[VTRN1:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[B]], + // LLVM-SAME: <16 x i32> + // LLVM: store <16 x i8> [[VTRN1]], ptr [[RES1]], align 16 + // LLVM: ret %struct.uint8x16x2_t {{.*}} +} + +int16x8x2_t test_vtrnq_s16(int16x8_t a, int16x8_t b) { + return vtrnq_s16(a, b); + + // CIR-LABEL: vtrnq_s16 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<8> : !s32i, #cir.int<2> : !s32i, #cir.int<10> : !s32i, + // CIR-SAME: #cir.int<4> : !s32i, #cir.int<12> : !s32i, #cir.int<6> : !s32i, + // CIR-SAME: #cir.int<14> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<9> : !s32i, #cir.int<3> : !s32i, #cir.int<11> : !s32i, + // CIR-SAME: #cir.int<5> : !s32i, #cir.int<13> : !s32i, #cir.int<7> : !s32i, #cir.int<15> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vtrnq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[VTRN:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[B]], + // LLVM-SAME: <8 x i32> + // LLVM: store <8 x i16> [[VTRN]], ptr [[RES:%.*]], align 16 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<8 x i16>, ptr [[RES]], i64 1 + // LLVM: [[VTRN1:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[B]], <8 x i32> + // LLVM: store <8 x i16> [[VTRN1]], ptr [[RES1]], align 16 + // LLVM: ret %struct.int16x8x2_t {{.*}} +} + +uint32x4x2_t test_vtrnq_u32(uint32x4_t a, uint32x4_t b) { + return vtrnq_u32(a, b); + + // CIR-LABEL: vtrnq_u32 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<4> : !s32i, #cir.int<2> : !s32i, #cir.int<6> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<5> : !s32i, #cir.int<3> : !s32i, #cir.int<7> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + // LLVM: ret %struct.uint32x4x2_t {{.*}} +} From 4d1fa029e67d9af282aa19ca9de601cad23b2cdc Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 11 Oct 2024 19:43:04 -0400 Subject: [PATCH 1938/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vext_v and neon_vextq_v (#951) as title. There are two highlights of the PR 1. The PR introduced a new test file to cover neon intrinsics that move data, which is a big category. This would the 5th neon test file. And we're committed to keep total number of neon test files within 6. This file uses another opt option instcombine, which makes test LLVM code more concise, and our -fclangir generated LLVM code would be identical to OG with this. It looks like OG did some instcombine optimization. 2. `getIntFromMLIRValue` helper function could be substituted by [`mlir::cir::IntAttr getConstOpIntAttr` in CIRGenAtomic.cpp](https://github.com/llvm/clangir/blob/24b24557c98d1c031572a567b658cfb6254f8a89/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp#L337). The function `mlir::cir::IntAttr getConstOpIntAttr` is doing more than `getIntFromMLIRValue`, and there is FIXME in the comment, so not sure if we should just use `mlir::cir::IntAttr getConstOpIntAttr`, either is fine with me. --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 20 ++ clang/test/CIR/CodeGen/AArch64/neon-ext-mov.c | 215 ++++++++++++++++++ 2 files changed, 235 insertions(+) create mode 100644 clang/test/CIR/CodeGen/AArch64/neon-ext-mov.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 645ada341f5e..b52c0acbda9d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2195,6 +2195,14 @@ mlir::Value buildNeonCall(unsigned int builtinID, CIRGenFunction &cgf, } } +/// Get integer from a mlir::Value that is an int constant or a constant op. +static int64_t getIntValueFromConstOp(mlir::Value val) { + auto constOp = mlir::cast(val.getDefiningOp()); + return (mlir::cast(constOp.getValue())) + .getValue() + .getSExtValue(); +} + mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, @@ -2239,6 +2247,18 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( // In CIR, integral cast op supports vector of int type truncating. return builder.createIntCast(ops[0], ty); } + case NEON::BI__builtin_neon_vext_v: + case NEON::BI__builtin_neon_vextq_v: { + int cv = getIntValueFromConstOp(ops[2]); + llvm::SmallVector indices; + for (unsigned i = 0, e = vTy.getSize(); i != e; ++i) + indices.push_back(i + cv); + + ops[0] = builder.createBitcast(ops[0], ty); + ops[1] = builder.createBitcast(ops[1], ty); + return builder.createVecShuffle(getLoc(e->getExprLoc()), ops[0], ops[1], + indices); + } } // This second switch is for the intrinsics that might have a more generic diff --git a/clang/test/CIR/CodeGen/AArch64/neon-ext-mov.c b/clang/test/CIR/CodeGen/AArch64/neon-ext-mov.c new file mode 100644 index 000000000000..bbaf896e5a32 --- /dev/null +++ b/clang/test/CIR/CodeGen/AArch64/neon-ext-mov.c @@ -0,0 +1,215 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ +// RUN: -fclangir -disable-O0-optnone -fno-clangir-call-conv-lowering \ +// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ +// RUN: -fclangir -disable-O0-optnone -fno-clangir-call-conv-lowering \ +// RUN: -flax-vector-conversions=none -emit-llvm -o - %s \ +// RUN: | opt -S -passes=instcombine,mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// REQUIRES: aarch64-registered-target || arm-registered-target + +// This test file contains test cases for the intrinsics that move data between +// registers and vectors, such as mov, get, set, and ext. We dedicate this file +// to them becuase they are many. The file neon.c covers some such intrinsics +// that are not in this file. + +#include + +int8x8_t test_vext_s8(int8x8_t a, int8x8_t b) { + return vext_s8(a, b, 2); + + // CIR-LABEL: vext_s8 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<2> : !s32i, #cir.int<3> : !s32i, #cir.int<4> : !s32i, + // CIR-SAME: #cir.int<5> : !s32i, #cir.int<6> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<8> : !s32i, #cir.int<9> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vext_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[B]], + // LLVM-SAME: <8 x i32> + // LLVM: ret <8 x i8> [[RES]] +} + +int8x16_t test_vextq_s8(int8x16_t a, int8x16_t b) { + return vextq_s8(a, b, 2); + + // CIR-LABEL: vextq_s8 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<2> : !s32i, #cir.int<3> : !s32i, #cir.int<4> : !s32i, + // CIR-SAME: #cir.int<5> : !s32i, #cir.int<6> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<8> : !s32i, #cir.int<9> : !s32i, #cir.int<10> : !s32i, + // CIR-SAME: #cir.int<11> : !s32i, #cir.int<12> : !s32i, #cir.int<13> : !s32i, + // CIR-SAME: #cir.int<14> : !s32i, #cir.int<15> : !s32i, #cir.int<16> : !s32i, + // CIR-SAME: #cir.int<17> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vextq_s8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[B]], + // LLVM-SAME: <16 x i32> + // LLVM: ret <16 x i8> [[RES]] +} + +int16x4_t test_vext_s16(int16x4_t a, int16x4_t b) { + return vext_s16(a, b, 3); + + // CIR-LABEL: vext_s16 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i, + // CIR-SAME: #cir.int<6> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vext_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[B]], + // LLVM-SAME: <4 x i32> + // LLVM: ret <4 x i16> [[RES]] +} + +int16x8_t test_vextq_s16(int16x8_t a, int16x8_t b) { + return vextq_s16(a, b, 3); + + // CIR-LABEL: vextq_s16 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i, + // CIR-SAME: #cir.int<6> : !s32i, #cir.int<7> : !s32i, #cir.int<8> : !s32i, + // CIR-SAME: #cir.int<9> : !s32i, #cir.int<10> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vextq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[B]], + // LLVM-SAME: <8 x i32> + // LLVM: ret <8 x i16> [[RES]] +} + + +uint16x4_t test_vext_u16(uint16x4_t a, uint16x4_t b) { + return vext_u16(a, b, 3); + + // CIR-LABEL: vext_u16 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i, + // CIR-SAME: #cir.int<6> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vext_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[B]], + // LLVM-SAME: <4 x i32> + // LLVM: ret <4 x i16> [[RES]] +} + +uint16x8_t test_vextq_u16(uint16x8_t a, uint16x8_t b) { + return vextq_u16(a, b, 3); + + // CIR-LABEL: vextq_u16 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i, + // CIR-SAME: #cir.int<6> : !s32i, #cir.int<7> : !s32i, #cir.int<8> : !s32i, + // CIR-SAME: #cir.int<9> : !s32i, #cir.int<10> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vextq_u16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[B]], + // LLVM-SAME: <8 x i32> + // LLVM: ret <8 x i16> [[RES]] +} + +int32x2_t test_vext_s32(int32x2_t a, int32x2_t b) { + return vext_s32(a, b, 1); + + // CIR-LABEL: vext_s32 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<2> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vext_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: ret <2 x i32> [[RES]] +} + +int32x4_t test_vextq_s32(int32x4_t a, int32x4_t b) { + return vextq_s32(a, b, 1); + + // CIR-LABEL: vextq_s32 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<2> : !s32i, + // CIR-SAME: #cir.int<3> : !s32i, #cir.int<4> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vextq_s32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> [[B]], + // LLVM-SAME: <4 x i32> + // LLVM: ret <4 x i32> [[RES]] +} + +int64x1_t test_vext_s64(int64x1_t a, int64x1_t b) { + return vext_s64(a, b, 0); + + // CIR-LABEL: vext_s64 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vext_s64(<1 x i64>{{.*}}[[A:%.*]], <1 x i64>{{.*}}[[B:%.*]]) + // LLVM: ret <1 x i64> [[A]] +} + +int64x2_t test_vextq_s64(int64x2_t a, int64x2_t b) { + return vextq_s64(a, b, 1); + + // CIR-LABEL: vextq_s64 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<2> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vextq_s64(<2 x i64>{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <2 x i64> [[A]], <2 x i64> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: ret <2 x i64> [[RES]] +} + +float32x2_t test_vext_f32(float32x2_t a, float32x2_t b) { + return vext_f32(a, b, 1); + + // CIR-LABEL: vext_f32 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<2> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vext_f32(<2 x float>{{.*}}[[A:%.*]], <2 x float>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: ret <2 x float> [[RES]] +} + +float32x4_t test_vextq_f32(float32x4_t a, float32x4_t b) { + return vextq_f32(a, b, 1); + + // CIR-LABEL: vextq_f32 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<2> : !s32i, #cir.int<3> : !s32i, + // CIR-SAME: #cir.int<4> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vextq_f32(<4 x float>{{.*}}[[A:%.*]], <4 x float>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <4 x float> [[A]], <4 x float> [[B]], + // LLVM-SAME: <4 x i32> + // LLVM: ret <4 x float> [[RES]] +} + + +float64x1_t test_vext_f64(float64x1_t a, float64x1_t b) { + return vext_f64(a, b, 0); + + // CIR-LABEL: vext_f64 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vext_f64(<1 x double>{{.*}}[[A:%.*]], <1 x double>{{.*}}[[B:%.*]]) + // LLVM: ret <1 x double> [[A]] +} + +float64x2_t test_vextq_f64(float64x2_t a, float64x2_t b) { + return vextq_f64(a, b, 1); + + // CIR-LABEL: vextq_f64 + // CIR: {{%.*}}= cir.vec.shuffle({{%.*}}, {{%.*}} : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<2> : !s32i] : !cir.vector + + // LLVM: {{.*}}test_vextq_f64(<2 x double>{{.*}}[[A:%.*]], <2 x double>{{.*}}[[B:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <2 x double> [[A]], <2 x double> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: ret <2 x double> [[RES]] +} From 8b18cde45c1c3980e74a2646f5ab1bfa3fe7c7ca Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Mon, 14 Oct 2024 12:24:29 +0800 Subject: [PATCH 1939/2301] [CIR][Lowering] Handling Lowering of multiple dimension array correctly (#961) Close https://github.com/llvm/clangir/issues/957 the previous algorithm to convert a multiple dimension array to a tensor is: fill the value one by one and fill the zero values in conditions. And it has some problems handling the multiple dimension array as above issue shows so that the generated values are not in the same shape with the original array. the new algorithm here is, full fill the values ahead of time with the correct element size and full fill the values to different slots and we only need to maintain the index to write. I feel the new version has better performance (avoid allocation) and better readability slightly. --- clang/lib/CIR/Lowering/LoweringHelpers.cpp | 107 +++++++++------------ clang/test/CIR/Lowering/multi-array.c | 36 +++++++ 2 files changed, 81 insertions(+), 62 deletions(-) create mode 100644 clang/test/CIR/Lowering/multi-array.c diff --git a/clang/lib/CIR/Lowering/LoweringHelpers.cpp b/clang/lib/CIR/Lowering/LoweringHelpers.cpp index 06c92ae12472..3d5dae642cff 100644 --- a/clang/lib/CIR/Lowering/LoweringHelpers.cpp +++ b/clang/lib/CIR/Lowering/LoweringHelpers.cpp @@ -45,84 +45,67 @@ template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty) { llvm_unreachable("NYI"); } -// return the nested type and quantity of elements for cir.array type. -// e.g: for !cir.array x 1> -// it returns !s32i as return value and stores 3 to elemQuantity. -mlir::Type getNestedTypeAndElemQuantity(mlir::Type Ty, unsigned &elemQuantity) { - assert(mlir::isa(Ty) && "expected ArrayType"); - - elemQuantity = 1; - mlir::Type nestTy = Ty; - while (auto ArrTy = mlir::dyn_cast(nestTy)) { - nestTy = ArrTy.getEltType(); - elemQuantity *= ArrTy.getSize(); - } - - return nestTy; -} - -template -void fillTrailingZeros(mlir::cir::ConstArrayAttr attr, - llvm::SmallVectorImpl &values) { - auto numTrailingZeros = attr.getTrailingZerosNum(); - if (numTrailingZeros) { - auto localArrayTy = mlir::dyn_cast(attr.getType()); - assert(localArrayTy && "expected !cir.array"); - - auto nestTy = localArrayTy.getEltType(); - if (!mlir::isa(nestTy)) - values.insert(values.end(), numTrailingZeros, - getZeroInitFromType(nestTy)); - } -} - +/// \param attr the ConstArrayAttr to convert +/// \param values the output parameter, the values array to fill +/// \param currentDims the shpae of tensor we're going to convert to +/// \param dimIndex the current dimension we're processing +/// \param currentIndex the current index in the values array template -void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, - llvm::SmallVectorImpl &values) { +void convertToDenseElementsAttrImpl( + mlir::cir::ConstArrayAttr attr, llvm::SmallVectorImpl &values, + const llvm::SmallVectorImpl ¤tDims, int64_t dimIndex, + int64_t currentIndex) { if (auto stringAttr = mlir::dyn_cast(attr.getElts())) { if (auto arrayType = mlir::dyn_cast(attr.getType())) { for (auto element : stringAttr) { auto intAttr = mlir::cir::IntAttr::get(arrayType.getEltType(), element); - values.push_back(mlir::dyn_cast(intAttr).getValue()); + values[currentIndex++] = mlir::dyn_cast(intAttr).getValue(); } return; } } + dimIndex++; + std::size_t elementsSizeInCurrentDim = 1; + for (std::size_t i = dimIndex; i < currentDims.size(); i++) + elementsSizeInCurrentDim *= currentDims[i]; + auto arrayAttr = mlir::cast(attr.getElts()); for (auto eltAttr : arrayAttr) { if (auto valueAttr = mlir::dyn_cast(eltAttr)) { - values.push_back(valueAttr.getValue()); - } else if (auto subArrayAttr = - mlir::dyn_cast(eltAttr)) { - convertToDenseElementsAttrImpl(subArrayAttr, values); - if (mlir::dyn_cast(subArrayAttr.getElts())) - fillTrailingZeros(subArrayAttr, values); - } else if (auto zeroAttr = mlir::dyn_cast(eltAttr)) { - unsigned numStoredZeros = 0; - auto nestTy = - getNestedTypeAndElemQuantity(zeroAttr.getType(), numStoredZeros); - values.insert(values.end(), numStoredZeros, - getZeroInitFromType(nestTy)); - } else { - llvm_unreachable("unknown element in ConstArrayAttr"); + values[currentIndex++] = valueAttr.getValue(); + continue; } - } - // Only fill in trailing zeros at the local cir.array level where the element - // type isn't another array (for the mult-dim case). - fillTrailingZeros(attr, values); + if (auto subArrayAttr = + mlir::dyn_cast(eltAttr)) { + convertToDenseElementsAttrImpl(subArrayAttr, values, currentDims, + dimIndex, currentIndex); + currentIndex += elementsSizeInCurrentDim; + continue; + } + + if (mlir::isa(eltAttr)) + continue; + + llvm_unreachable("unknown element in ConstArrayAttr"); + } } template -mlir::DenseElementsAttr -convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, - const llvm::SmallVectorImpl &dims, - mlir::Type type) { - auto values = llvm::SmallVector{}; - convertToDenseElementsAttrImpl(attr, values); - return mlir::DenseElementsAttr::get(mlir::RankedTensorType::get(dims, type), - llvm::ArrayRef(values)); +mlir::DenseElementsAttr convertToDenseElementsAttr( + mlir::cir::ConstArrayAttr attr, const llvm::SmallVectorImpl &dims, + mlir::Type elementType, mlir::Type convertedElementType) { + unsigned vector_size = 1; + for (auto dim : dims) + vector_size *= dim; + auto values = llvm::SmallVector( + vector_size, getZeroInitFromType(elementType)); + convertToDenseElementsAttrImpl(attr, values, dims, /*currentDim=*/0, + /*initialIndex=*/0); + return mlir::DenseElementsAttr::get( + mlir::RankedTensorType::get(dims, convertedElementType), + llvm::ArrayRef(values)); } std::optional @@ -151,10 +134,10 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, converter->convertType(type)); if (mlir::isa(type)) return convertToDenseElementsAttr( - constArr, dims, converter->convertType(type)); + constArr, dims, type, converter->convertType(type)); if (mlir::isa(type)) return convertToDenseElementsAttr( - constArr, dims, converter->convertType(type)); + constArr, dims, type, converter->convertType(type)); return std::nullopt; } diff --git a/clang/test/CIR/Lowering/multi-array.c b/clang/test/CIR/Lowering/multi-array.c new file mode 100644 index 000000000000..75db845ce75f --- /dev/null +++ b/clang/test/CIR/Lowering/multi-array.c @@ -0,0 +1,36 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +unsigned char table[10][5] = +{ + {1,0}, + {7,6,5}, +}; + +// LLVM: @table = {{.*}}[10 x [5 x i8]] {{.*}}[5 x i8] c"\01\00\00\00\00", [5 x i8] c"\07\06\05\00\00", [5 x i8] zeroinitializer + +unsigned char table2[15][16] = +{ + {1,0}, + {1,1,0}, + {3,2,1,0}, + {3,2,1,1,0}, + {3,2,3,2,1,0}, + {3,0,1,3,2,5,4}, + {7,6,5,4,3,2,1,1,1,1,1,1,1,1,1}, +}; + +// LLVM: @table2 = {{.*}}[15 x [16 x i8]] {{.*}}[16 x i8] c"\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\01\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\03\02\01\00\00\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\03\02\01\01\00\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\03\02\03\02\01\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\03\00\01\03\02\05\04\00\00\00\00\00\00\00\00\00", [16 x i8] c"\07\06\05\04\03\02\01\01\01\01\01\01\01\01\01\00", [16 x i8] zeroinitializer + +unsigned char table3[15][16] = +{ + {1,1}, + {1,2,2}, + {2,2,2,2}, + {2,2,2,3,3}, + {2,2,3,3,3,3}, + {2,3,3,3,3,3,3}, + {3,3,3,3,3,3,3,4,5,6,7,8,9,10,11}, +}; + +// LLVM: @table3 = {{.*}}[15 x [16 x i8]] {{.*}}[16 x i8] c"\01\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\01\02\02\00\00\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\02\02\02\02\00\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\02\02\02\03\03\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\02\02\03\03\03\03\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\02\03\03\03\03\03\03\00\00\00\00\00\00\00\00\00", [16 x i8] c"\03\03\03\03\03\03\03\04\05\06\07\08\09\0A\0B\00", [16 x i8] zeroinitializer From 3bbbe5290494a24051e4d7d25e68a2575aaff41f Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 14 Oct 2024 13:40:41 -0400 Subject: [PATCH 1940/2301] [CIR][CIRGen][Builtin][Neon] Lower vld1_dup and vld1q_dup (#936) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 6 +- clang/test/CIR/CodeGen/AArch64/neon.c | 208 ++++++++++++++++++ 2 files changed, 213 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index b52c0acbda9d..645cfb14cc5c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3456,7 +3456,11 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vld1_dup_v: case NEON::BI__builtin_neon_vld1q_dup_v: { - llvm_unreachable("NYI"); + cir::Address ptrAddr = PtrOp0.withElementType(vTy.getEltType()); + mlir::Value val = builder.createLoad(getLoc(E->getExprLoc()), ptrAddr); + mlir::cir::VecSplatOp vecSplat = builder.create( + getLoc(E->getExprLoc()), vTy, val); + return vecSplat; } case NEON::BI__builtin_neon_vst1_lane_v: case NEON::BI__builtin_neon_vst1q_lane_v: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index a5067303dc8e..e9079a0c8397 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -17577,3 +17577,211 @@ int32x2_t test_vmovn_s64(int64x2_t a) { // LLVM: [[VMOVN_I:%.*]] = trunc <2 x i64> [[A]] to <2 x i32> // LLVM: ret <2 x i32> [[VMOVN_I]] } + +uint8x8_t test_vld1_dup_u8(uint8_t const * ptr) { + return vld1_dup_u8(ptr); +} + +// CIR-LABEL: vld1_dup_u8 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !u8i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !u8i, !cir.vector + +// LLVM: {{.*}}test_vld1_dup_u8(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i8, ptr [[PTR]], align 1 +// LLVM: [[VEC:%.*]] = insertelement <8 x i8> poison, i8 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <8 x i8> [[VEC]], <8 x i8> poison, <8 x i32> zeroinitializer + +int8x8_t test_vld1_dup_s8(int8_t const * ptr) { + return vld1_dup_s8(ptr); +} + +// CIR-LABEL: test_vld1_dup_s8 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !s8i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !s8i, !cir.vector + +// LLVM: {{.*}}test_vld1_dup_s8(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i8, ptr [[PTR]], align 1 +// LLVM: [[VEC:%.*]] = insertelement <8 x i8> poison, i8 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <8 x i8> [[VEC]], <8 x i8> poison, <8 x i32> zeroinitializer + +uint16x4_t test_vld1_dup_u16(uint16_t const * ptr) { + return vld1_dup_u16(ptr); +} + +// CIR-LABEL: test_vld1_dup_u16 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !u16i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !u16i, !cir.vector + +// LLVM: {{.*}}test_vld1_dup_u16(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i16, ptr [[PTR]], align 2 +// LLVM: [[VEC:%.*]] = insertelement <4 x i16> poison, i16 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <4 x i16> [[VEC]], <4 x i16> poison, <4 x i32> zeroinitializer + +int16x4_t test_vld1_dup_s16(int16_t const * ptr) { + return vld1_dup_s16(ptr); +} + +// CIR-LABEL: test_vld1_dup_s16 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !s16i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !s16i, !cir.vector + +// LLVM: {{.*}}test_vld1_dup_s16(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i16, ptr [[PTR]], align 2 +// LLVM: [[VEC:%.*]] = insertelement <4 x i16> poison, i16 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <4 x i16> [[VEC]], <4 x i16> poison, <4 x i32> zeroinitializer + +int32x2_t test_vld1_dup_s32(int32_t const * ptr) { + return vld1_dup_s32(ptr); +} + +// CIR-LABEL: test_vld1_dup_s32 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !s32i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !s32i, !cir.vector + +// LLVM: {{.*}}test_vld1_dup_s32(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i32, ptr [[PTR]], align 4 +// LLVM: [[VEC:%.*]] = insertelement <2 x i32> poison, i32 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <2 x i32> [[VEC]], <2 x i32> poison, <2 x i32> zeroinitializer + +int64x1_t test_vld1_dup_s64(int64_t const * ptr) { + return vld1_dup_s64(ptr); +} + +// CIR-LABEL: test_vld1_dup_s64 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !s64i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !s64i, !cir.vector + +// LLVM: {{.*}}test_vld1_dup_s64(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i64, ptr [[PTR]], align 8 +// LLVM: [[VEC:%.*]] = insertelement <1 x i64> poison, i64 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <1 x i64> [[VEC]], <1 x i64> poison, <1 x i32> zeroinitializer + +float32x2_t test_vld1_dup_f32(float32_t const * ptr) { + return vld1_dup_f32(ptr); +} + +// CIR-LABEL: test_vld1_dup_f32 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !cir.float +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !cir.float, !cir.vector + +// LLVM: {{.*}}test_vld1_dup_f32(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load float, ptr [[PTR]], align 4 +// LLVM: [[VEC:%.*]] = insertelement <2 x float> poison, float [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <2 x float> [[VEC]], <2 x float> poison, <2 x i32> zeroinitializer + +float64x1_t test_vld1_dup_f64(float64_t const * ptr) { + return vld1_dup_f64(ptr); +} + +// CIR-LABEL: test_vld1_dup_f64 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !cir.double +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !cir.double, !cir.vector + +// LLVM: {{.*}}test_vld1_dup_f64(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load double, ptr [[PTR]], align 8 +// LLVM: [[VEC:%.*]] = insertelement <1 x double> poison, double [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <1 x double> [[VEC]], <1 x double> poison, <1 x i32> zeroinitializer + +uint8x16_t test_vld1q_dup_u8(uint8_t const * ptr) { + return vld1q_dup_u8(ptr); +} + +// CIR-LABEL: test_vld1q_dup_u8 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !u8i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !u8i, !cir.vector + +// LLVM: {{.*}}test_vld1q_dup_u8(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i8, ptr [[PTR]], align 1 +// LLVM: [[VEC:%.*]] = insertelement <16 x i8> poison, i8 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <16 x i8> [[VEC]], <16 x i8> poison, <16 x i32> zeroinitializer + +int8x16_t test_vld1q_dup_s8(int8_t const * ptr) { + return vld1q_dup_s8(ptr); +} + +// CIR-LABEL: test_vld1q_dup_s8 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !s8i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !s8i, !cir.vector + +// LLVM: {{.*}}test_vld1q_dup_s8(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i8, ptr [[PTR]], align 1 +// LLVM: [[VEC:%.*]] = insertelement <16 x i8> poison, i8 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <16 x i8> [[VEC]], <16 x i8> poison, <16 x i32> zeroinitializer + +uint16x8_t test_vld1q_dup_u16(uint16_t const * ptr) { + return vld1q_dup_u16(ptr); +} + +// CIR-LABEL: test_vld1q_dup_u16 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !u16i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !u16i, !cir.vector + +// LLVM: {{.*}}test_vld1q_dup_u16(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i16, ptr [[PTR]], align 2 +// LLVM: [[VEC:%.*]] = insertelement <8 x i16> poison, i16 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <8 x i16> [[VEC]], <8 x i16> poison, <8 x i32> zeroinitializer + +int16x8_t test_vld1q_dup_s16(int16_t const * ptr) { + return vld1q_dup_s16(ptr); +} + +// CIR-LABEL: test_vld1q_dup_s16 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !s16i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !s16i, !cir.vector + +// LLVM: {{.*}}test_vld1q_dup_s16(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i16, ptr [[PTR]], align 2 +// LLVM: [[VEC:%.*]] = insertelement <8 x i16> poison, i16 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <8 x i16> [[VEC]], <8 x i16> poison, <8 x i32> zeroinitializer + +int32x4_t test_vld1q_dup_s32(int32_t const * ptr) { + return vld1q_dup_s32(ptr); +} + +// CIR-LABEL: test_vld1q_dup_s32 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !s32i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !s32i, !cir.vector + +// LLVM: {{.*}}test_vld1q_dup_s32(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i32, ptr [[PTR]], align 4 +// LLVM: [[VEC:%.*]] = insertelement <4 x i32> poison, i32 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <4 x i32> [[VEC]], <4 x i32> poison, <4 x i32> zeroinitializer + +int64x2_t test_vld1q_dup_s64(int64_t const * ptr) { + return vld1q_dup_s64(ptr); +} + +// CIR-LABEL: test_vld1q_dup_s64 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !s64i +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !s64i, !cir.vector + +// LLVM: {{.*}}test_vld1q_dup_s64(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load i64, ptr [[PTR]], align 8 +// LLVM: [[VEC:%.*]] = insertelement <2 x i64> poison, i64 [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <2 x i64> [[VEC]], <2 x i64> poison, <2 x i32> zeroinitializer + +float32x4_t test_vld1q_dup_f32(float32_t const * ptr) { + return vld1q_dup_f32(ptr); +} + +// CIR-LABEL: test_vld1q_dup_f32 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !cir.float +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !cir.float, !cir.vector + +// LLVM: {{.*}}test_vld1q_dup_f32(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load float, ptr [[PTR]], align 4 +// LLVM: [[VEC:%.*]] = insertelement <4 x float> poison, float [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <4 x float> [[VEC]], <4 x float> poison, <4 x i32> zeroinitializer + +float64x2_t test_vld1q_dup_f64(float64_t const * ptr) { + return vld1q_dup_f64(ptr); +} + +// CIR-LABEL: test_vld1q_dup_f64 +// CIR: [[VAL:%.*]] = cir.load {{%.*}} : !cir.ptr, !cir.double +// CIR: {{%.*}} = cir.vec.splat [[VAL]] : !cir.double, !cir.vector + +// LLVM: {{.*}}test_vld1q_dup_f64(ptr{{.*}}[[PTR:%.*]]) +// LLVM: [[VAL:%.*]] = load double, ptr [[PTR]], align 8 +// LLVM: [[VEC:%.*]] = insertelement <2 x double> poison, double [[VAL]], i64 0 +// LLVM: {{%.*}} = shufflevector <2 x double> [[VEC]], <2 x double> poison, <2 x i32> zeroinitializer From 01b4b504d86c7ffd606958e350e8bfae67f883b6 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 14 Oct 2024 13:42:56 -0400 Subject: [PATCH 1941/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vpadd_v and neon_vpaddq_v into llvm intrinsic (#960) This PR refactored Neon Built in code in clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp a bit to make it cleaner. Also changed RUNOption of test file clang/test/CIR/CodeGen/AArch64/neon-arith.c to make test more concise, and easy to compare against OG (to compare, just remove -fclangir from llvm gen part of RUN, and the test should still pass) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 60 ++++-- clang/test/CIR/CodeGen/AArch64/neon-arith.c | 203 ++++++++++++++---- 2 files changed, 202 insertions(+), 61 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 645cfb14cc5c..03768a979625 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -11,6 +11,8 @@ // //===----------------------------------------------------------------------===// +#include + #include "CIRGenCXXABI.h" #include "CIRGenCall.h" #include "CIRGenFunction.h" @@ -2156,7 +2158,7 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, } } -mlir::Value buildNeonCall(unsigned int builtinID, CIRGenFunction &cgf, +mlir::Value buildNeonCall(CIRGenBuilderTy &builder, llvm::SmallVector argTypes, llvm::SmallVectorImpl &args, llvm::StringRef intrinsicName, mlir::Type funcResTy, @@ -2173,7 +2175,6 @@ mlir::Value buildNeonCall(unsigned int builtinID, CIRGenFunction &cgf, if (shift > 0) llvm_unreachable("Argument shift NYI"); - CIRGenBuilderTy &builder = cgf.getBuilder(); for (unsigned j = 0; j < argTypes.size(); ++j) { if (isConstrainedFPIntrinsic) { assert(!MissingFeatures::buildConstrainedFPCall()); @@ -2203,6 +2204,24 @@ static int64_t getIntValueFromConstOp(mlir::Value val) { .getSExtValue(); } +/// This function `buildCommonNeonCallPattern0` implements a common way +// to generate neon intrinsic call that has following pattern: +// 1. There is a need to cast result of the intrinsic call back to +// expression type. +// 2. Function arg types are given, not deduced from actual arg types. +static mlir::Value +buildCommonNeonCallPattern0(CIRGenFunction &cgf, std::string &intrincsName, + llvm::SmallVector argTypes, + llvm::SmallVectorImpl &ops, + mlir::Type funcResTy, const clang::CallExpr *e) { + CIRGenBuilderTy &builder = cgf.getBuilder(); + mlir::Value res = + buildNeonCall(builder, std::move(argTypes), ops, intrincsName, funcResTy, + cgf.getLoc(e->getExprLoc())); + mlir::Type resultType = cgf.ConvertType(e->getType()); + return builder.createBitcast(res, resultType); +} + mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, @@ -2267,18 +2286,25 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( default: llvm::errs() << getAArch64SIMDIntrinsicString(builtinID) << " "; llvm_unreachable("NYI"); - case NEON::BI__builtin_neon_vqadd_v: - mlir::Value res = buildNeonCall(builtinID, *this, {vTy, vTy}, ops, - (intrinicId != altLLVMIntrinsic) - ? "llvm.aarch64.neon.uqadd" - : "llvm.aarch64.neon.sqadd", - vTy, getLoc(e->getExprLoc())); - mlir::Type resultType = ConvertType(e->getType()); - // AArch64 intrinsic one-element vector type cast to - // scalar type expected by the builtin - return builder.createBitcast(res, resultType); + + case NEON::BI__builtin_neon_vpadd_v: + case NEON::BI__builtin_neon_vpaddq_v: { + std::string intrincsName = mlir::isa(vTy.getEltType()) + ? "llvm.aarch64.neon.faddp" + : "llvm.aarch64.neon.addp"; + return buildCommonNeonCallPattern0(*this, intrincsName, {vTy, vTy}, ops, + vTy, e); break; } + case NEON::BI__builtin_neon_vqadd_v: { + std::string intrincsName = (intrinicId != altLLVMIntrinsic) + ? "llvm.aarch64.neon.uqadd" + : "llvm.aarch64.neon.sqadd"; + return buildCommonNeonCallPattern0(*this, intrincsName, {vTy, vTy}, ops, + vTy, e); + break; + } + } return nullptr; } @@ -3090,9 +3116,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, // The prototype of builtin_neon_vqrshrun_n can be found at // https://developer.arm.com/architectures/instruction-sets/intrinsics/ return buildNeonCall( - BuiltinID, *this, - {builder.getExtendedElementVectorType(ty, true), SInt32Ty}, Ops, - "llvm.aarch64.neon.sqrshrun", ty, getLoc(E->getExprLoc())); + builder, {builder.getExtendedElementVectorType(ty, true), SInt32Ty}, + Ops, "llvm.aarch64.neon.sqrshrun", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqshrn_n_v: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vrshrn_n_v: @@ -3105,7 +3130,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vrnda_v: case NEON::BI__builtin_neon_vrndaq_v: { assert(!MissingFeatures::buildConstrainedFPCall()); - return buildNeonCall(BuiltinID, *this, {ty}, Ops, "llvm.round", ty, + return buildNeonCall(builder, {ty}, Ops, "llvm.round", ty, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndih_f16: { @@ -3128,8 +3153,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vrndns_f32: { mlir::Value arg0 = buildScalarExpr(E->getArg(0)); args.push_back(arg0); - return buildNeonCall(NEON::BI__builtin_neon_vrndns_f32, *this, - {arg0.getType()}, args, "llvm.roundeven.f32", + return buildNeonCall(builder, {arg0.getType()}, args, "llvm.roundeven.f32", getCIRGenModule().FloatTy, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndph_f16: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index 52d6d1a0c003..42c1fd389b17 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -1,8 +1,14 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -ffreestanding -emit-cir -fno-clangir-call-conv-lowering -target-feature +neon %s -o %t.cir +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -fno-clangir-call-conv-lowering \ +// RUN: -emit-cir -o %t.cir %s // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ -// RUN: -ffreestanding -emit-llvm -fno-clangir-call-conv-lowering -target-feature +neon %s -o %t.ll + +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -fno-clangir-call-conv-lowering \ +// RUN: -emit-llvm -o - %s \ +// RUN: | opt -S -passes=instcombine,mem2reg,simplifycfg -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s // REQUIRES: aarch64-registered-target || arm-registered-target @@ -28,19 +34,9 @@ float32_t test_vrndns_f32(float32_t a) { // CIR: [[RET_VAL:%.*]] = cir.load [[RET_P]] : !cir.ptr, !cir.float // CIR: cir.return [[RET_VAL]] : !cir.float loc -// LLVM: define dso_local float @test_vrndns_f32(float [[ARG:%.*]]) -// LLVM: store float [[ARG]], ptr [[ARG_SAVE:%.*]], align 4 -// LLVM: [[P0:%.*]] = load float, ptr [[ARG_SAVE]], align 4, -// LLVM: store float [[P0]], ptr [[P0_SAVE:%.*]], align 4, -// LLVM: [[INTRIN_ARG:%.*]] = load float, ptr [[P0_SAVE]], align 4, -// LLVM: [[INTRIN_RES:%.*]] = call float @llvm.roundeven.f32(float [[INTRIN_ARG]]) -// LLVM: store float [[INTRIN_RES]], ptr [[RES_SAVE0:%.*]], align 4, -// LLVM: [[RES_COPY0:%.*]] = load float, ptr [[RES_SAVE0]], align 4, -// LLVM: store float [[RES_COPY0]], ptr [[RES_SAVE1:%.*]], align 4, -// LLVM: [[RES_COPY1:%.*]] = load float, ptr [[RES_SAVE1]], align 4, -// LLVM: store float [[RES_COPY1]], ptr [[RET_P:%.*]], align 4, -// LLVM: [[RET_VAL:%.*]] = load float, ptr [[RET_P]], align 4, -// LLVM: ret float [[RET_VAL]] +// LLVM: {{.*}}test_vrndns_f32(float{{.*}}[[ARG:%.*]]) +// LLVM: [[INTRIN_RES:%.*]] = call float @llvm.roundeven.f32(float [[ARG]]) +// LLVM: ret float [[INTRIN_RES]] float32x2_t test_vrnda_f32(float32x2_t a) { return vrnda_f32(a); @@ -62,19 +58,9 @@ float32x2_t test_vrnda_f32(float32x2_t a) { // CIR: [[RET_VAL:%.*]] = cir.load [[RET_P]] : !cir.ptr>, !cir.vector // CIR: cir.return [[RET_VAL]] : !cir.vector -// LLVM: define dso_local <2 x float> @test_vrnda_f32(<2 x float> [[ARG:%.*]]) -// LLVM: store <2 x float> [[ARG]], ptr [[ARG_SAVE:%.*]], align 8 -// LLVM: [[P0:%.*]] = load <2 x float>, ptr [[ARG_SAVE]], align 8, -// LLVM: store <2 x float> [[P0]], ptr [[P0_SAVE:%.*]], align 8, -// LLVM: [[INTRIN_ARG:%.*]] = load <2 x float>, ptr [[P0_SAVE]], align 8, -// LLVM: [[INTRIN_RES:%.*]] = call <2 x float> @llvm.round.v2f32(<2 x float> [[INTRIN_ARG]]) -// LLVM: store <2 x float> [[INTRIN_RES]], ptr [[RES_SAVE0:%.*]], align 8, -// LLVM: [[RES_COPY0:%.*]] = load <2 x float>, ptr [[RES_SAVE0]], align 8, -// LLVM: store <2 x float> [[RES_COPY0]], ptr [[RES_SAVE1:%.*]], align 8, -// LLVM: [[RES_COPY1:%.*]] = load <2 x float>, ptr [[RES_SAVE1]], align 8, -// LLVM: store <2 x float> [[RES_COPY1]], ptr [[RET_P:%.*]], align 8, -// LLVM: [[RET_VAL:%.*]] = load <2 x float>, ptr [[RET_P]], align 8, -// LLVM: ret <2 x float> [[RET_VAL]] +// LLVM: {{.*}}test_vrnda_f32(<2 x float>{{.*}}[[ARG:%.*]]) +// LLVM: [[INTRIN_RES:%.*]] = call <2 x float> @llvm.round.v2f32(<2 x float> [[ARG]]) +// LLVM: ret <2 x float> [[INTRIN_RES]] float32x4_t test_vrndaq_f32(float32x4_t a) { return vrndaq_f32(a); @@ -88,16 +74,147 @@ float32x4_t test_vrndaq_f32(float32x4_t a) { // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.round" [[INTRIN_ARG_BACK]] : (!cir.vector) -> !cir.vector // CIR: cir.return {{%.*}} : !cir.vector -// LLVM: define dso_local <4 x float> @test_vrndaq_f32(<4 x float> [[ARG:%.*]]) -// LLVM: store <4 x float> [[ARG]], ptr [[ARG_SAVE:%.*]], align 16 -// LLVM: [[P0:%.*]] = load <4 x float>, ptr [[ARG_SAVE]], align 16, -// LLVM: store <4 x float> [[P0]], ptr [[P0_SAVE:%.*]], align 16, -// LLVM: [[INTRIN_ARG:%.*]] = load <4 x float>, ptr [[P0_SAVE]], align 16, -// LLVM: [[INTRIN_RES:%.*]] = call <4 x float> @llvm.round.v4f32(<4 x float> [[INTRIN_ARG]]) -// LLVM: store <4 x float> [[INTRIN_RES]], ptr [[RES_SAVE0:%.*]], align 16, -// LLVM: [[RES_COPY0:%.*]] = load <4 x float>, ptr [[RES_SAVE0]], align 16, -// LLVM: store <4 x float> [[RES_COPY0]], ptr [[RES_SAVE1:%.*]], align 16, -// LLVM: [[RES_COPY1:%.*]] = load <4 x float>, ptr [[RES_SAVE1]], align 16, -// LLVM: store <4 x float> [[RES_COPY1]], ptr [[RET_P:%.*]], align 16, -// LLVM: [[RET_VAL:%.*]] = load <4 x float>, ptr [[RET_P]], align 16, -// LLVM: ret <4 x float> [[RET_VAL]] +// LLVM: {{.*}}test_vrndaq_f32(<4 x float>{{.*}}[[ARG:%.*]]) +// LLVM: [[INTRIN_RES:%.*]] = call <4 x float> @llvm.round.v4f32(<4 x float> [[ARG]]) +// LLVM: ret <4 x float> [[INTRIN_RES]] + +int8x8_t test_vpadd_s8(int8x8_t a, int8x8_t b) { + return vpadd_s8(a, b); +} + +// CIR-LABEL: vpadd_s8 +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vpadd_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> [[A]], <8 x i8> [[B]]) +// LLVM: ret <8 x i8> [[RES]] + + +int8x16_t test_vpaddq_s8(int8x16_t a, int8x16_t b) { + return vpaddq_s8(a, b); +} + +// CIR-LABEL: vpaddq_s8 +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vpaddq_s8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) +// LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) +// LLVM: ret <16 x i8> [[RES]] + +uint8x8_t test_vpadd_u8(uint8x8_t a, uint8x8_t b) { + return vpadd_u8(a, b); +} + +// CIR-LABEL: vpadd_u8 +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vpadd_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> [[A]], <8 x i8> [[B]]) +// LLVM: ret <8 x i8> [[RES]] + +int16x4_t test_vpadd_s16(int16x4_t a, int16x4_t b) { + return vpadd_s16(a, b); +} + +// CIR-LABEL: vpadd_s16 +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector + +// LLVM: {{.*}}test_vpadd_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> [[A]], <4 x i16> [[B]]) +// LLVM: ret <4 x i16> [[RES]] + +int16x8_t test_vpaddq_s16(int16x8_t a, int16x8_t b) { + return vpaddq_s16(a, b); +} + +// CIR-LABEL: vpaddq_s16 +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector + +// LLVM: {{.*}}test_vpaddq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) +// LLVM: ret <8 x i16> [[RES]] + +uint16x4_t test_vpadd_u16(uint16x4_t a, uint16x4_t b) { + return vpadd_u16(a, b); +} + +// CIR-LABEL: vpadd_u16 +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector + +// LLVM: {{.*}}test_vpadd_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> [[A]], <4 x i16> [[B]]) +// LLVM: ret <4 x i16> [[RES]] + +int32x2_t test_vpadd_s32(int32x2_t a, int32x2_t b) { + return vpadd_s32(a, b); +} + +// CIR-LABEL: vpadd_s32 +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector + +// LLVM: {{.*}}test_vpadd_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32> [[A]], <2 x i32> [[B]]) +// LLVM: ret <2 x i32> [[RES]] + +int32x4_t test_vpaddq_s32(int32x4_t a, int32x4_t b) { + return vpaddq_s32(a, b); +} + +// CIR-LABEL: vpaddq_s32 +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector + +// LLVM: {{.*}}test_vpaddq_s32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) +// LLVM: ret <4 x i32> [[RES]] + +float32x2_t test_vpadd_f32(float32x2_t a, float32x2_t b) { + return vpadd_f32(a, b); +} + +// CIR-LABEL: vpadd_f32 +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector + +// LLVM: {{.*}}test_vpadd_f32(<2 x float>{{.*}}[[A:%.*]], <2 x float>{{.*}}[[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x float> @llvm.aarch64.neon.faddp.v2f32(<2 x float> [[A]], <2 x float> [[B]]) +// LLVM: ret <2 x float> [[RES]] + +float32x4_t test_vpaddq_f32(float32x4_t a, float32x4_t b) { + return vpaddq_f32(a, b); +} + +// CIR-LABEL: vpaddq_f32 +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector + +// LLVM: {{.*}}test_vpaddq_f32(<4 x float>{{.*}}[[A:%.*]], <4 x float>{{.*}}[[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x float> @llvm.aarch64.neon.faddp.v4f32(<4 x float> [[A]], <4 x float> [[B]]) +// LLVM: ret <4 x float> [[RES]] + +float64x2_t test_vpaddq_f64(float64x2_t a, float64x2_t b) { + return vpaddq_f64(a, b); +} + +// CIR-LABEL: vpaddq_f64 +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector +// CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector + +// LLVM: {{.*}}test_vpaddq_f64(<2 x double>{{.*}}[[A:%.*]], <2 x double>{{.*}}[[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x double> @llvm.aarch64.neon.faddp.v2f64(<2 x double> [[A]], <2 x double> [[B]]) +// LLVM: ret <2 x double> [[RES]] From cf0f9af4b6e553549aa229bfc567f576f5154309 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Tue, 15 Oct 2024 01:54:52 +0800 Subject: [PATCH 1942/2301] [CIR] [Lowering] Fix handling of multiple array for ZeroAttr (#970) This is the following up fix for the previous fix https://github.com/llvm/clangir/pull/961 See the attached new test for the reproducer. Sorry for the initial overlook. --- clang/lib/CIR/Lowering/LoweringHelpers.cpp | 4 +++- clang/test/CIR/Lowering/multi-array.c | 22 ++++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Lowering/LoweringHelpers.cpp b/clang/lib/CIR/Lowering/LoweringHelpers.cpp index 3d5dae642cff..94f4d251d370 100644 --- a/clang/lib/CIR/Lowering/LoweringHelpers.cpp +++ b/clang/lib/CIR/Lowering/LoweringHelpers.cpp @@ -85,8 +85,10 @@ void convertToDenseElementsAttrImpl( continue; } - if (mlir::isa(eltAttr)) + if (mlir::isa(eltAttr)) { + currentIndex += elementsSizeInCurrentDim; continue; + } llvm_unreachable("unknown element in ConstArrayAttr"); } diff --git a/clang/test/CIR/Lowering/multi-array.c b/clang/test/CIR/Lowering/multi-array.c index 75db845ce75f..8d01028a9bf1 100644 --- a/clang/test/CIR/Lowering/multi-array.c +++ b/clang/test/CIR/Lowering/multi-array.c @@ -34,3 +34,25 @@ unsigned char table3[15][16] = }; // LLVM: @table3 = {{.*}}[15 x [16 x i8]] {{.*}}[16 x i8] c"\01\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\01\02\02\00\00\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\02\02\02\02\00\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\02\02\02\03\03\00\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\02\02\03\03\03\03\00\00\00\00\00\00\00\00\00\00", [16 x i8] c"\02\03\03\03\03\03\03\00\00\00\00\00\00\00\00\00", [16 x i8] c"\03\03\03\03\03\03\03\04\05\06\07\08\09\0A\0B\00", [16 x i8] zeroinitializer + + +unsigned char table4[][20] = +{ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 2, 2, 2, 2, 0, 0, 0, 0 } +}; +// LLVM: @table4 = {{.*}}[2 x [20 x i8]] {{.*}}[20 x i8] zeroinitializer, [20 x i8] c"\00\00\00\00\00\00\00\01\01\01\01\00\02\02\02\02\00\00\00\00"] + +unsigned char table5[][20] = +{ + { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 2, 2, 2, 2, 0, 0, 0, 0 } +}; +// LLVM: @table5 = {{.*}}[2 x [20 x i8]] {{.*}}[20 x i8] c"\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00", [20 x i8] c"\00\00\00\00\00\00\00\01\01\01\01\00\02\02\02\02\00\00\00\00"] + +unsigned char table6[][20] = +{ + { 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 2, 2, 2, 2, 0, 0, 0, 0 } +}; +// LLVM: @table6 = {{.*}}[2 x [20 x i8]] {{.*}}[20 x i8] c"\01\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00", [20 x i8] c"\00\00\00\00\00\00\00\01\01\01\01\00\02\02\02\02\00\00\00\00"] From 8ce92f22d85040e0820356c1514e2440f70ed649 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 14 Oct 2024 16:54:17 -0700 Subject: [PATCH 1943/2301] [CIR][Driver] Fix -fclangir-call-conv-lowering behavior - The flag is the default even for cc1, so make it disable two level deep. - While here, remove the unnecessary flag disable for pure `-emit-cir`. --- clang/lib/Driver/ToolChains/Clang.cpp | 5 ++-- clang/test/CIR/CodeGen/AArch64/neon-ext-mov.c | 2 +- clang/test/CIR/CodeGen/AArch64/neon-ldst.c | 2 +- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 2 +- clang/test/CIR/CodeGen/AArch64/neon.c | 2 +- .../CIR/CodeGen/OpenCL/addrspace-alloca.cl | 2 +- clang/test/CIR/CodeGen/OpenCL/array-decay.cl | 2 +- .../CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl | 8 +++---- clang/test/CIR/CodeGen/OpenCL/convergent.cl | 2 +- clang/test/CIR/CodeGen/OpenCL/global.cl | 2 +- .../OpenCL/kernel-arg-info-single-as.cl | 2 +- .../CIR/CodeGen/OpenCL/kernel-arg-info.cl | 4 ++-- .../CIR/CodeGen/OpenCL/kernel-arg-metadata.cl | 2 +- .../CIR/CodeGen/OpenCL/kernel-attributes.cl | 2 +- .../CIR/CodeGen/OpenCL/kernel-unit-attr.cl | 2 +- clang/test/CIR/CodeGen/OpenCL/nothrow.cl | 2 +- .../test/CIR/CodeGen/OpenCL/opencl-c-lang.cl | 2 +- .../test/CIR/CodeGen/OpenCL/opencl-version.cl | 4 ++-- .../CIR/CodeGen/OpenCL/spir-calling-conv.cl | 2 +- clang/test/CIR/CodeGen/OpenCL/spirv-target.cl | 2 +- .../test/CIR/CodeGen/OpenCL/static-vardecl.cl | 2 +- clang/test/CIR/CodeGen/abstract-cond.c | 2 +- clang/test/CIR/CodeGen/attributes.c | 2 +- clang/test/CIR/CodeGen/builtin-bit-cast.cpp | 2 +- clang/test/CIR/CodeGen/complex-arithmetic.c | 24 +++++++++---------- clang/test/CIR/CodeGen/compound-literal.c | 2 +- clang/test/CIR/CodeGen/dynamic-cast-exact.cpp | 2 +- clang/test/CIR/CodeGen/global-new.cpp | 4 ++-- clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp | 2 +- .../CIR/CodeGen/initlist-ptr-unsigned.cpp | 2 +- clang/test/CIR/CodeGen/multi-vtable.cpp | 2 +- clang/test/CIR/CodeGen/temporaries.cpp | 4 ++-- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 2 +- clang/test/CIR/CodeGen/var-arg-float.c | 2 +- clang/test/CIR/CodeGen/var-arg-scope.c | 4 ++-- clang/test/CIR/CodeGen/visibility-attribute.c | 2 +- clang/test/CIR/Driver/callconv.cpp | 4 ++++ 37 files changed, 60 insertions(+), 57 deletions(-) create mode 100644 clang/test/CIR/Driver/callconv.cpp diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 699d47ac2f64..4bd9104228f4 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5252,9 +5252,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasArg(options::OPT_clangir_disable_passes)) CmdArgs.push_back("-clangir-disable-passes"); - if (Args.hasArg(options::OPT_fclangir_call_conv_lowering)) - CmdArgs.push_back("-fclangir-call-conv-lowering"); - + Args.addOptOutFlag(CmdArgs, options::OPT_fclangir_call_conv_lowering, + options::OPT_fno_clangir_call_conv_lowering); if (Args.hasArg(options::OPT_fclangir_mem2reg)) CmdArgs.push_back("-fclangir-mem2reg"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon-ext-mov.c b/clang/test/CIR/CodeGen/AArch64/neon-ext-mov.c index bbaf896e5a32..525b0d46defa 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-ext-mov.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-ext-mov.c @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ -// RUN: -fclangir -disable-O0-optnone -fno-clangir-call-conv-lowering \ +// RUN: -fclangir -disable-O0-optnone \ // RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s diff --git a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c index 9f2e431d9a9e..7ee0423398b1 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ // RUN: -fclangir -disable-O0-optnone \ -// RUN: -flax-vector-conversions=none -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index 42465990244e..4675aee2bc27 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ // RUN: -fclangir -disable-O0-optnone \ -// RUN: -flax-vector-conversions=none -fno-clangir-call-conv-lowering -emit-cir -o %t.cir %s +// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -target-feature +neon \ diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index e9079a0c8397..2b9be7610a28 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ // RUN: -fclangir -disable-O0-optnone \ -// RUN: -flax-vector-conversions=none -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ diff --git a/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl index 256da726fb8e..64bf88fa188c 100644 --- a/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl +++ b/clang/test/CIR/CodeGen/OpenCL/addrspace-alloca.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR // RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/OpenCL/array-decay.cl b/clang/test/CIR/CodeGen/OpenCL/array-decay.cl index e42bc9096a4b..cc5632b36e2d 100644 --- a/clang/test/CIR/CodeGen/OpenCL/array-decay.cl +++ b/clang/test/CIR/CodeGen/OpenCL/array-decay.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR // RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl b/clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl index ad5ec6651e90..51d64e717b9f 100644 --- a/clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl +++ b/clang/test/CIR/CodeGen/OpenCL/cl-uniform-wg-size.cl @@ -1,10 +1,10 @@ -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -O0 -cl-std=CL1.2 -o %t.cl12.cir %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL1.2 -o %t.cl12.cir %s // RUN: FileCheck %s -input-file=%t.cl12.cir -check-prefixes CIR,CIR-UNIFORM -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -O0 -cl-std=CL2.0 -o %t.cl20.cir %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL2.0 -o %t.cl20.cir %s // RUN: FileCheck %s -input-file=%t.cl20.cir -check-prefixes CIR,CIR-NONUNIFORM -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -O0 -cl-std=CL2.0 -cl-uniform-work-group-size -o %t.cl20.uniform1.cir %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL2.0 -cl-uniform-work-group-size -o %t.cl20.uniform1.cir %s // RUN: FileCheck %s -input-file=%t.cl20.uniform1.cir -check-prefixes CIR,CIR-UNIFORM -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -O0 -cl-std=CL2.0 -foffload-uniform-block -o %t.cl20.uniform2.cir %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -O0 -cl-std=CL2.0 -foffload-uniform-block -o %t.cl20.uniform2.cir %s // RUN: FileCheck %s -input-file=%t.cl20.uniform2.cir -check-prefixes CIR,CIR-UNIFORM // RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -fno-clangir-call-conv-lowering -O0 -cl-std=CL1.2 -o %t.cl12.ll %s diff --git a/clang/test/CIR/CodeGen/OpenCL/convergent.cl b/clang/test/CIR/CodeGen/OpenCL/convergent.cl index 8da6d0fc51d9..ba8a57f98d04 100644 --- a/clang/test/CIR/CodeGen/OpenCL/convergent.cl +++ b/clang/test/CIR/CodeGen/OpenCL/convergent.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fclangir -triple spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -fclangir -triple spirv64-unknown-unknown -emit-cir %s -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR // RUN: %clang_cc1 -fclangir -triple spirv64-unknown-unknown -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/OpenCL/global.cl b/clang/test/CIR/CodeGen/OpenCL/global.cl index 83fe24c573cb..40a66dec00e3 100644 --- a/clang/test/CIR/CodeGen/OpenCL/global.cl +++ b/clang/test/CIR/CodeGen/OpenCL/global.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR // RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl index c72ede55d797..f07a2cc0c81e 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info-single-as.cl @@ -1,6 +1,6 @@ // Test that the kernel argument info always refers to SPIR address spaces, // even if the target has only one address space like x86_64 does. -// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -fno-clangir-call-conv-lowering -o - -triple x86_64-unknown-linux-gnu -o %t.cir +// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -o - -triple x86_64-unknown-linux-gnu -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR // RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-llvm -fno-clangir-call-conv-lowering -o - -triple x86_64-unknown-linux-gnu -o %t.ll diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info.cl index d07fc1db7fb3..8162a04f97b2 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-info.cl @@ -1,7 +1,7 @@ // See also clang/test/CodeGenOpenCL/kernel-arg-info.cl -// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -fno-clangir-call-conv-lowering -o - -triple spirv64-unknown-unknown -o %t.cir +// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -o - -triple spirv64-unknown-unknown -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR -// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -fno-clangir-call-conv-lowering -o - -triple spirv64-unknown-unknown -cl-kernel-arg-info -o %t.arginfo.cir +// RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-cir -o - -triple spirv64-unknown-unknown -cl-kernel-arg-info -o %t.arginfo.cir // RUN: FileCheck %s --input-file=%t.arginfo.cir --check-prefix=CIR-ARGINFO // RUN: %clang_cc1 -fclangir %s -cl-std=CL2.0 -emit-llvm -fno-clangir-call-conv-lowering -o - -triple spirv64-unknown-unknown -o %t.ll diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl index 14d8e29397c1..be8d2eb2fef7 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-arg-metadata.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -fclangir -triple spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -o %t.cir +// RUN: %clang_cc1 %s -fclangir -triple spirv64-unknown-unknown -emit-cir -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR // RUN: %clang_cc1 %s -fclangir -triple spirv64-unknown-unknown -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl index b3d6d73eb789..f8cd9e5c1ed9 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-attributes.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR // RUN: %clang_cc1 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl b/clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl index 5acbc5eea395..01348013bbf0 100644 --- a/clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl +++ b/clang/test/CIR/CodeGen/OpenCL/kernel-unit-attr.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR diff --git a/clang/test/CIR/CodeGen/OpenCL/nothrow.cl b/clang/test/CIR/CodeGen/OpenCL/nothrow.cl index 3adf7be62962..a45cf51ef649 100644 --- a/clang/test/CIR/CodeGen/OpenCL/nothrow.cl +++ b/clang/test/CIR/CodeGen/OpenCL/nothrow.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-cir -o %t.cir %s // RUN: FileCheck %s -input-file=%t.cir -check-prefixes CIR // RUN: %clang_cc1 -fclangir -triple=spirv64-unknown-unknown -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll %s // RUN: FileCheck %s -input-file=%t.ll -check-prefixes LLVM diff --git a/clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl b/clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl index 724ca098295b..67aeda32c2a1 100644 --- a/clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl +++ b/clang/test/CIR/CodeGen/OpenCL/opencl-c-lang.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR // CIR: module{{.*}} attributes {{{.*}}cir.lang = #cir.lang diff --git a/clang/test/CIR/CodeGen/OpenCL/opencl-version.cl b/clang/test/CIR/CodeGen/OpenCL/opencl-version.cl index 018d7f1efe25..f64cdb917ed0 100644 --- a/clang/test/CIR/CodeGen/OpenCL/opencl-version.cl +++ b/clang/test/CIR/CodeGen/OpenCL/opencl-version.cl @@ -1,8 +1,8 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR-CL30 // RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM-CL30 -// RUN: %clang_cc1 -cl-std=CL1.2 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL1.2 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR-CL12 // RUN: %clang_cc1 -cl-std=CL1.2 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM-CL12 diff --git a/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl b/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl index 9a6644cb09f0..fa0928720285 100644 --- a/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl +++ b/clang/test/CIR/CodeGen/OpenCL/spir-calling-conv.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fclangir %s -O0 -triple "spirv64-unknown-unknown" -emit-cir -fno-clangir-call-conv-lowering -o %t.cir +// RUN: %clang_cc1 -fclangir %s -O0 -triple "spirv64-unknown-unknown" -emit-cir -o %t.cir // RUN: FileCheck %s --input-file=%t.cir --check-prefix=CIR // RUN: %clang_cc1 -fclangir %s -O0 -triple "spirv64-unknown-unknown" -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll // RUN: FileCheck %s --input-file=%t.ll --check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl index 4dbfc5c37df1..ef8bd27076c5 100644 --- a/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl +++ b/clang/test/CIR/CodeGen/OpenCL/spirv-target.cl @@ -1,5 +1,5 @@ // See also: clang/test/CodeGenOpenCL/spirv_target.cl -// RUN: %clang_cc1 -cl-std=CL3.0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t_64.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t_64.cir // RUN: FileCheck --input-file=%t_64.cir %s --check-prefix=CIR-SPIRV64 // RUN: %clang_cc1 -cl-std=CL3.0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t_64.ll // RUN: FileCheck --input-file=%t_64.ll %s --check-prefix=LLVM-SPIRV64 diff --git a/clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl b/clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl index 8f458c5696c7..3030583a0ca6 100644 --- a/clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl +++ b/clang/test/CIR/CodeGen/OpenCL/static-vardecl.cl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.cir +// RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-cir -triple spirv64-unknown-unknown %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR // RUN: %clang_cc1 -cl-std=CL3.0 -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -triple spirv64-unknown-unknown %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/abstract-cond.c b/clang/test/CIR/CodeGen/abstract-cond.c index 4f084503a11c..dc3df811d8f4 100644 --- a/clang/test/CIR/CodeGen/abstract-cond.c +++ b/clang/test/CIR/CodeGen/abstract-cond.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s diff --git a/clang/test/CIR/CodeGen/attributes.c b/clang/test/CIR/CodeGen/attributes.c index 97117d71b935..67bb467a8c5d 100644 --- a/clang/test/CIR/CodeGen/attributes.c +++ b/clang/test/CIR/CodeGen/attributes.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o - | FileCheck %s -check-prefix=LLVM extern int __attribute__((section(".shared"))) ext; diff --git a/clang/test/CIR/CodeGen/builtin-bit-cast.cpp b/clang/test/CIR/CodeGen/builtin-bit-cast.cpp index 8747d8ec572f..c8d26106b7d9 100644 --- a/clang/test/CIR/CodeGen/builtin-bit-cast.cpp +++ b/clang/test/CIR/CodeGen/builtin-bit-cast.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir --check-prefix=CIR %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s diff --git a/clang/test/CIR/CodeGen/complex-arithmetic.c b/clang/test/CIR/CodeGen/complex-arithmetic.c index 9c6a44959237..eddedc2d3a27 100644 --- a/clang/test/CIR/CodeGen/complex-arithmetic.c +++ b/clang/test/CIR/CodeGen/complex-arithmetic.c @@ -1,31 +1,31 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIRGEN,CIRGEN-BASIC,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIRGEN,CIRGEN-BASIC,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIRGEN,CIRGEN-IMPROVED,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIRGEN,CIRGEN-IMPROVED,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIRGEN,CIRGEN-FULL,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIRGEN,CIRGEN-FULL,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIR,CIR-BASIC,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=basic -fclangir -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIR,CIR-BASIC,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=improved -fclangir -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIR,CIR-IMPROVED,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=improved -fclangir -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIR,CIR-IMPROVED,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=full -fclangir -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CLANG,CIR,CIR-FULL,CHECK %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -complex-range=full -fclangir -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir --check-prefixes=CPPLANG,CIR,CIR-FULL,CHECK %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=basic -fclangir -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll %s diff --git a/clang/test/CIR/CodeGen/compound-literal.c b/clang/test/CIR/CodeGen/compound-literal.c index 248b6bfa9dab..e07c30bf5466 100644 --- a/clang/test/CIR/CodeGen/compound-literal.c +++ b/clang/test/CIR/CodeGen/compound-literal.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-call-conv-lowering -Wno-unused-value -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-call-conv-lowering -Wno-unused-value -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp index aef196d7a2c7..cf5a9f9f6f64 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -O1 -fclangir -clangir-disable-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -O1 -fclangir -clangir-disable-passes -emit-cir -o %t.cir %s // RUN: FileCheck --input-file=%t.cir %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -O1 -fclangir -emit-llvm -fno-clangir-call-conv-lowering -o %t.ll %s // RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index c0b3eac11a58..63cf667d259e 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -1,8 +1,8 @@ -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=CIR_BEFORE // RUN: FileCheck %s -check-prefix=CIR_AFTER --input-file=%t.cir // RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck %s -check-prefix=LLVM --input-file=%t.ll -// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering -fexceptions -fcxx-exceptions %s -o %t.eh.cir +// RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fexceptions -fcxx-exceptions %s -o %t.eh.cir // RUN: FileCheck %s -check-prefix=CIR_EH --input-file=%t.eh.cir // RUN: %clang_cc1 -std=c++20 -triple aarch64-none-linux-android21 -fclangir -fno-clangir-call-conv-lowering -emit-cir-flat -fno-clangir-call-conv-lowering -fexceptions -fcxx-exceptions %s -o %t.eh.flat.cir // RUN: FileCheck %s -check-prefix=CIR_FLAT_EH --input-file=%t.eh.flat.cir diff --git a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp index c28b265c4c2b..37b9a680f80a 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp index 893f2a24d008..f80ae2ec46cd 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/multi-vtable.cpp b/clang/test/CIR/CodeGen/multi-vtable.cpp index b696d6fa61de..a00e29f45109 100644 --- a/clang/test/CIR/CodeGen/multi-vtable.cpp +++ b/clang/test/CIR/CodeGen/multi-vtable.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s diff --git a/clang/test/CIR/CodeGen/temporaries.cpp b/clang/test/CIR/CodeGen/temporaries.cpp index bf72994e6726..589849bf52c3 100644 --- a/clang/test/CIR/CodeGen/temporaries.cpp +++ b/clang/test/CIR/CodeGen/temporaries.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fcxx-exceptions -fexceptions -emit-cir -fno-clangir-call-conv-lowering %s -o %t.eh.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -fcxx-exceptions -fexceptions -emit-cir %s -o %t.eh.cir // RUN: FileCheck --input-file=%t.eh.cir %s -check-prefix=CIR_EH // RUN: cir-translate %t.cir -cir-to-llvmir --disable-cc-lowering -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index 002e676bbc63..7b6c80535e30 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fcxx-exceptions -fexceptions -mconstructor-aliases -fclangir -emit-cir-flat -fno-clangir-call-conv-lowering %s -o %t.flat.cir // RUN: FileCheck --input-file=%t.flat.cir --check-prefix=CIR_FLAT %s diff --git a/clang/test/CIR/CodeGen/var-arg-float.c b/clang/test/CIR/CodeGen/var-arg-float.c index e9f0881d9fa8..2b3f5099dd1b 100644 --- a/clang/test/CIR/CodeGen/var-arg-float.c +++ b/clang/test/CIR/CodeGen/var-arg-float.c @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/var-arg-scope.c b/clang/test/CIR/CodeGen/var-arg-scope.c index c586487af559..2a52f5621f37 100644 --- a/clang/test/CIR/CodeGen/var-arg-scope.c +++ b/clang/test/CIR/CodeGen/var-arg-scope.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -fno-clangir-call-conv-lowering -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=BEFORE +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck %s -check-prefix=AFTER // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM diff --git a/clang/test/CIR/CodeGen/visibility-attribute.c b/clang/test/CIR/CodeGen/visibility-attribute.c index baf31b5788a4..fb675fb51751 100644 --- a/clang/test/CIR/CodeGen/visibility-attribute.c +++ b/clang/test/CIR/CodeGen/visibility-attribute.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o - | FileCheck %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o - | FileCheck %s -check-prefix=LLVM extern int glob_default; diff --git a/clang/test/CIR/Driver/callconv.cpp b/clang/test/CIR/Driver/callconv.cpp new file mode 100644 index 000000000000..f857369d9215 --- /dev/null +++ b/clang/test/CIR/Driver/callconv.cpp @@ -0,0 +1,4 @@ +// RUN: %clang %s -fno-clangir-call-conv-lowering -### -c %s 2>&1 | FileCheck --check-prefix=DISABLE %s +// DISABLE: "-fno-clangir-call-conv-lowering" +// RUN: %clang %s -fclangir-call-conv-lowering -### -c %s 2>&1 | FileCheck --check-prefix=ENABLE %s +// ENABLE-NOT: "-fclangir-call-conv-lowering" From ea1fac113e80417456bf3faef580668dde8f623b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 14 Oct 2024 17:38:08 -0700 Subject: [PATCH 1944/2301] [CIR] Add more user facing messages for -fno-clangir-call-conv-lowering --- clang/include/clang/CIR/MissingFeatures.h | 11 ++- .../Transforms/TargetLowering/ABIInfoImpl.cpp | 2 +- .../TargetLowering/CIRLowerContext.cpp | 16 ++-- .../TargetLowering/CIRToCIRArgMapping.h | 10 +-- .../TargetLowering/ItaniumCXXABI.cpp | 8 +- .../Transforms/TargetLowering/LowerCall.cpp | 36 ++++----- .../TargetLowering/LowerFunction.cpp | 80 +++++++++---------- .../TargetLowering/LowerFunctionInfo.h | 2 +- .../Transforms/TargetLowering/LowerModule.cpp | 28 +++---- .../Transforms/TargetLowering/LowerTypes.cpp | 6 +- .../TargetLowering/RecordLayoutBuilder.cpp | 50 ++++++------ .../TargetLowering/Targets/AArch64.cpp | 12 +-- .../Targets/LoweringPrepareAArch64CXXABI.cpp | 16 ++-- .../Targets/LoweringPrepareItaniumCXXABI.cpp | 2 +- .../TargetLowering/Targets/SPIR.cpp | 2 +- .../Transforms/TargetLowering/Targets/X86.cpp | 54 ++++++------- 16 files changed, 171 insertions(+), 164 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index c75edc6d94d6..0946f44991b1 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -19,12 +19,19 @@ constexpr bool cirMissingFeatureAssertionMode = true; // Change to `false` to use llvm_unreachable #define NOTE \ - " Target lowering is now required. Disable it with " \ - "-fno-clangir-call-conv-lowering." + " Target lowering is now required. To workaround use " \ + "-fno-clangir-call-conv-lowering. This flag is going to be removed at some" \ + " point." // Special assertion to be used in the target lowering library. #define cir_tl_assert(cond) assert((cond) && NOTE); +// Special +#define cir_unreachable(msg) \ + do { \ + llvm_unreachable(msg NOTE); \ + } while (0) + // Some assertions knowingly generate incorrect code. This macro allows us to // switch between using `assert` and `llvm_unreachable` for these cases. #define cir_assert_or_abort(cond, msg) \ diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index 38f9fb8ffaa4..c76808ce1086 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -49,7 +49,7 @@ Type useFirstFieldIfTransparentUnion(Type Ty) { CIRCXXABI::RecordArgABI getRecordArgABI(const StructType RT, CIRCXXABI &CXXABI) { if (::cir::MissingFeatures::typeIsCXXRecordDecl()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } return CXXABI.getRecordArgABI(RT); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index 5b5eb7602ffa..a6ae8e8a9df5 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -94,7 +94,7 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { Align = Target->getDoubleAlign(); break; } - llvm_unreachable("Unknown builtin type!"); + cir_unreachable("Unknown builtin type!"); break; } case clang::Type::Record: { @@ -108,7 +108,7 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { // Not sure if this is necessary in CIR. if (::cir::MissingFeatures::typeGetAsEnumType()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } const CIRRecordLayout &Layout = getCIRRecordLayout(RT); @@ -118,7 +118,7 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { break; } default: - llvm_unreachable("Unhandled type class"); + cir_unreachable("Unhandled type class"); } cir_tl_assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); @@ -135,7 +135,7 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { Ty = IntType::get(getMLIRContext(), 8, true); break; default: - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } Types.push_back(Ty); @@ -153,7 +153,7 @@ void CIRLowerContext::initBuiltinTypes(const clang::TargetInfo &Target, if (LangOpts.CharIsSigned) CharTy = initBuiltinType(clang::BuiltinType::Char_S); else - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } /// Convert a size in bits to a size in characters. @@ -168,7 +168,7 @@ int64_t CIRLowerContext::toBits(clang::CharUnits CharSize) const { clang::TypeInfoChars CIRLowerContext::getTypeInfoInChars(Type T) const { if (auto arrTy = dyn_cast(T)) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); clang::TypeInfo Info = getTypeInfo(T); return clang::TypeInfoChars(toCharUnitsFromBits(Info.Width), toCharUnitsFromBits(Info.Align), @@ -179,7 +179,7 @@ bool CIRLowerContext::isPromotableIntegerType(Type T) const { // HLSL doesn't promote all small integer types to int, it // just uses the rank-based promotion rules for all types. if (::cir::MissingFeatures::langOpts()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); // FIXME(cir): CIR does not distinguish between char, short, etc. So we just // assume it is promotable if smaller than 32 bits. This is wrong since, for @@ -198,7 +198,7 @@ bool CIRLowerContext::isPromotableIntegerType(Type T) const { // TODO(cir): CIR doesn't know if a integer originated from an enum. Improve // CIR or add an AST query here. if (::cir::MissingFeatures::typeGetAsEnumType()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } return false; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index 664fd05ea658..9dd5f8de95a6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -69,7 +69,7 @@ class CIRToCIRArgMapping { const ::cir::ABIArgInfo &RetAI = FI.getReturnInfo(); if (RetAI.getKind() == ::cir::ABIArgInfo::Indirect) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } unsigned ArgNo = 0; @@ -84,7 +84,7 @@ class CIRToCIRArgMapping { auto &IRArgs = ArgInfo[ArgNo]; if (::cir::MissingFeatures::argumentPadding()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } switch (AI.getKind()) { @@ -94,14 +94,14 @@ class CIRToCIRArgMapping { cir_tl_assert(AI.getCoerceToType() && "Missing coerced type!!"); StructType STy = dyn_cast(AI.getCoerceToType()); if (AI.isDirect() && AI.getCanBeFlattened() && STy) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { IRArgs.NumberOfArgs = 1; } break; } default: - llvm_unreachable("Missing ABIArgInfo::Kind"); + cir_unreachable("Missing ABIArgInfo::Kind"); } if (IRArgs.NumberOfArgs > 0) { @@ -117,7 +117,7 @@ class CIRToCIRArgMapping { cir_tl_assert(ArgNo == ArgInfo.size()); if (::cir::MissingFeatures::inallocaArgs()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } TotalIRArgs = IRArgNo; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index 3cd27c35cf55..be73937e9772 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -62,7 +62,7 @@ bool ItaniumCXXABI::classifyReturnType(LowerFunctionInfo &FI) const { // If C++ prohibits us from making a copy, return by address. if (::cir::MissingFeatures::recordDeclCanPassInRegisters()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); return false; } @@ -84,12 +84,12 @@ CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { return new ItaniumCXXABI(LM); case clang::TargetCXXABI::Microsoft: - llvm_unreachable("Microsoft ABI is not Itanium-based"); + cir_unreachable("Microsoft ABI is not Itanium-based"); default: - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } - llvm_unreachable("bad ABI kind"); + cir_unreachable("bad ABI kind"); } } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index af036efef8cc..f7386b4227fb 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -38,7 +38,7 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, cir_assert_or_abort(!::cir::MissingFeatures::isVarArg(), "NYI"); if (::cir::MissingFeatures::extParamInfo()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // TODO(cir): There's some CC stuff related to no-proto functions here, but @@ -61,7 +61,7 @@ static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { } cir_tl_assert(MissingFeatures::extParamInfo()); - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } /// Arrange the LLVM function layout for a value of the given function @@ -104,9 +104,9 @@ void LowerModule::constructAttributeList(StringRef Name, CallingConv = FI.getCallingConvention(); // FIXME(cir): No-return should probably be set in CIRGen (ABI-agnostic). if (MissingFeatures::noReturn()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); if (MissingFeatures::csmeCall()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); // TODO(cir): Implement AddAttributesFromFunctionProtoType here. // TODO(cir): Implement AddAttributesFromOMPAssumes here. @@ -153,23 +153,23 @@ void LowerModule::constructAttributeList(StringRef Name, case ABIArgInfo::Ignore: break; default: - llvm_unreachable("Missing ABIArgInfo::Kind"); + cir_unreachable("Missing ABIArgInfo::Kind"); } if (!IsThunk) { if (MissingFeatures::qualTypeIsReferenceType()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } } // Attach attributes to sret. if (MissingFeatures::sretArgs()) { - llvm_unreachable("sret is NYI"); + cir_unreachable("sret is NYI"); } // Attach attributes to inalloca arguments. if (MissingFeatures::inallocaArgs()) { - llvm_unreachable("inalloca is NYI"); + cir_unreachable("inalloca is NYI"); } // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, @@ -177,7 +177,7 @@ void LowerModule::constructAttributeList(StringRef Name, // FIXME: fix this properly, https://reviews.llvm.org/D100388 if (MissingFeatures::funcDeclIsCXXMethodDecl() || MissingFeatures::inallocaArgs()) { - llvm_unreachable("`this` argument attributes are NYI"); + cir_unreachable("`this` argument attributes are NYI"); } unsigned ArgNo = 0; @@ -190,7 +190,7 @@ void LowerModule::constructAttributeList(StringRef Name, // Add attribute for padding argument, if necessary. if (IRFunctionArgs.hasPaddingArg(ArgNo)) { - llvm_unreachable("Padding argument is NYI"); + cir_unreachable("Padding argument is NYI"); } // TODO(cir): Mark noundef arguments and return values. Although this @@ -212,18 +212,18 @@ void LowerModule::constructAttributeList(StringRef Name, [[fallthrough]]; case ABIArgInfo::Direct: if (ArgNo == 0 && ::cir::MissingFeatures::chainCall()) - llvm_unreachable("ChainCall is NYI"); + cir_unreachable("ChainCall is NYI"); else if (AI.getInReg()) - llvm_unreachable("InReg attribute is NYI"); + cir_unreachable("InReg attribute is NYI"); // Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); cir_tl_assert(!::cir::MissingFeatures::noFPClass()); break; default: - llvm_unreachable("Missing ABIArgInfo::Kind"); + cir_unreachable("Missing ABIArgInfo::Kind"); } if (::cir::MissingFeatures::qualTypeIsReferenceType()) { - llvm_unreachable("Reference handling is NYI"); + cir_unreachable("Reference handling is NYI"); } // TODO(cir): Missing some swift and nocapture stuff here. @@ -243,7 +243,7 @@ void LowerModule::constructAttributeList(StringRef Name, /// definition of the given function. const LowerFunctionInfo &LowerTypes::arrangeFunctionDeclaration(FuncOp fnOp) { if (MissingFeatures::funcDeclIsCXXMethodDecl()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); cir_tl_assert(!MissingFeatures::qualifiedTypes()); FuncType FTy = fnOp.getFunctionType(); @@ -283,7 +283,7 @@ const LowerFunctionInfo &LowerTypes::arrangeFreeFunctionType(FuncType FTy) { const LowerFunctionInfo &LowerTypes::arrangeGlobalDeclaration(FuncOp fnOp) { if (MissingFeatures::funcDeclIsCXXConstructorDecl() || MissingFeatures::funcDeclIsCXXDestructorDecl()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); return arrangeFunctionDeclaration(fnOp); } @@ -316,9 +316,9 @@ LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, // Compute ABI information. if (CC == llvm::CallingConv::SPIR_KERNEL) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else if (::cir::MissingFeatures::extParamInfo()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { // NOTE(cir): This corects the initial function info data. getABIInfo().computeInfo(*FI); // FIXME(cir): Args should be set to null. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 6dbefd138002..a9dc60234b6a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -37,7 +37,7 @@ namespace { Value buildAddressAtOffset(LowerFunction &LF, Value addr, const ABIArgInfo &info) { if (unsigned offset = info.getDirectOffset()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } return addr; } @@ -50,7 +50,7 @@ Value enterStructPointerForCoercedAccess(Value SrcPtr, StructType SrcSTy, uint64_t DstSize, LowerFunction &CGF) { // We can't dive into a zero-element struct. if (SrcSTy.getNumElements() == 0) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); Type FirstElt = SrcSTy.getMembers()[0]; @@ -79,7 +79,7 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, Type SrcTy = Src.getType(); Type DstTy = Dst.getType(); if (SrcTy == DstTy) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // FIXME(cir): We need a better way to handle datalayout queries. @@ -97,14 +97,14 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, PointerType DstPtrTy = dyn_cast(DstTy); // TODO(cir): Implement address space. if (SrcPtrTy && DstPtrTy && !::cir::MissingFeatures::addressSpace()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // If the source and destination are integer or pointer types, just do an // extension or truncation to the desired type. if ((isa(SrcTy) || isa(SrcTy)) && (isa(DstTy) || isa(DstTy))) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } llvm::TypeSize DstSize = CGF.LM.getDataLayout().getTypeAllocSize(DstTy); @@ -115,7 +115,7 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, // Dst = Dst.withElementType(SrcTy); CGF.buildAggregateStore(Src, Dst, DstIsVolatile); } else { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } } @@ -162,7 +162,7 @@ Value createCoercedValue(Value Src, Type Ty, LowerFunction &CGF) { // extension or truncation to the desired type. if ((isa(Ty) || isa(Ty)) && (isa(SrcTy) || isa(SrcTy))) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // If load is legal, just bitcast the src pointer. @@ -178,13 +178,13 @@ Value createCoercedValue(Value Src, Type Ty, LowerFunction &CGF) { return CGF.buildAggregateBitcast(Src, Ty); } - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } Value emitAddressAtOffset(LowerFunction &LF, Value addr, const ABIArgInfo &info) { if (unsigned offset = info.getDirectOffset()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } return addr; } @@ -218,7 +218,7 @@ Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { if ((isa(Ty) || isa(Ty)) && (isa(SrcTy) || isa(SrcTy))) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // If load is legal, just bitcast the src pointer. @@ -234,7 +234,7 @@ Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { Src); } - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } } // namespace @@ -301,7 +301,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // CGFunctionInfo::ArgInfo type with subsequent argument demotion. Type Ty = {}; if (isPromoted) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); else Ty = Arg.getType(); cir_tl_assert(!::cir::MissingFeatures::evaluationKind()); @@ -334,15 +334,15 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // ways. Copy the value into a less-restricted temporary. Value V = AI; if (::cir::MissingFeatures::extParamInfo()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // Ensure the argument is the correct type. if (V.getType() != ArgI.getCoerceToType()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); if (isPromoted) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); ArgVals.push_back(V); @@ -374,7 +374,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, StructType STy = dyn_cast(ArgI.getCoerceToType()); if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && STy.getNumElements() > 1) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { // Simple case, just do a coerced store of the argument into the alloca. cir_tl_assert(NumIRArgs == 1); @@ -385,7 +385,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // Match to what EmitParamDecl is expecting for this type. if (::cir::MissingFeatures::evaluationKind()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { // FIXME(cir): Should we have an ParamValue abstraction like in the // original codegen? @@ -405,12 +405,12 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, break; } default: - llvm_unreachable("Unhandled ABIArgInfo::Kind"); + cir_unreachable("Unhandled ABIArgInfo::Kind"); } } if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { // FIXME(cir): In the original codegen, EmitParamDecl is called here. It // is likely that said function considers ABI details during emission, so @@ -444,7 +444,7 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { // the load, zap the store, and usually zap the alloca. // NOTE(cir): This seems like a premature optimization case. Skipping it. if (::cir::MissingFeatures::returnValueDominatingStoreOptmiization()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // Otherwise, we have to do a simple load. else { @@ -470,7 +470,7 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { break; default: - llvm_unreachable("Unhandled ABIArgInfo::Kind"); + cir_unreachable("Unhandled ABIArgInfo::Kind"); } return success(); @@ -598,7 +598,7 @@ Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, // CIRGen. CallArgList Args; if (Chain) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); // NOTE(cir): Call args were already emitted in CIRGen. Skip the evaluation // order done in CIRGen and just fetch the exiting arguments here. @@ -671,7 +671,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // If we're using inalloca, insert the allocation after the stack save. // FIXME: Do this earlier rather than hacking it in here! if (StructType ArgStruct = CallInfo.getArgStruct()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), CallInfo); @@ -680,7 +680,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // If the call returns a temporary with struct return, create a temporary // alloca to hold the result, unless one is given to us. if (RetAI.isIndirect() || RetAI.isCoerceAndExpand() || RetAI.isInAlloca()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } cir_tl_assert(!::cir::MissingFeatures::swift()); @@ -697,7 +697,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, const ABIArgInfo &ArgInfo = info_it->info; if (IRFunctionArgs.hasPaddingArg(ArgNo)) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -719,23 +719,23 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!isa(I->getType())) { V = *I; } else { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } if (::cir::MissingFeatures::extParamInfo()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } if (ArgInfo.getCoerceToType() != V.getType() && isa(V.getType())) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); if (FirstIRArg < IRFuncTy.getNumInputs() && V.getType() != IRFuncTy.getInput(FirstIRArg)) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); if (::cir::MissingFeatures::undef()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); IRCallArgs[FirstIRArg] = V; break; } @@ -743,7 +743,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // FIXME: Avoid the conversion through memory if possible. Value Src = {}; if (!isa(I->getType())) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { // NOTE(cir): L/RValue stuff are left for CIRGen to handle. Src = *I; @@ -757,7 +757,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // FCAs, so we flatten them if this is safe to do for this argument. StructType STy = dyn_cast(ArgInfo.getCoerceToType()); if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { // In the simple case, just pass the coerced loaded value. cir_tl_assert(NumIRArgs == 1); @@ -768,7 +768,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // since they are a ARM-specific feature. if (::cir::MissingFeatures::undef()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); IRCallArgs[FirstIRArg] = Load; } @@ -776,7 +776,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, } default: llvm::outs() << "Missing ABIArgInfo::Kind: " << ArgInfo.getKind() << "\n"; - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } } @@ -847,11 +847,11 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // Perhaps this section should handle CIR's boolean case. Value V = newCallOp.getResult(); if (V.getType() != RetIRTy) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); return V; } default: - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } } @@ -859,7 +859,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // compatibility, and the types match, use the llvm.vector.extract // intrinsic to perform the conversion. if (::cir::MissingFeatures::vectorType()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // FIXME(cir): Use return value slot here. @@ -891,7 +891,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, } default: llvm::errs() << "Unhandled ABIArgInfo kind: " << RetAI.getKind() << "\n"; - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } }(); @@ -908,7 +908,7 @@ Value LowerFunction::getUndefRValue(Type Ty) { return nullptr; llvm::outs() << "Missing undef handler for value type: " << Ty << "\n"; - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } ::cir::TypeEvaluationKind LowerFunction::getEvaluationKind(Type type) { @@ -918,7 +918,7 @@ ::cir::TypeEvaluationKind LowerFunction::getEvaluationKind(Type type) { if (isa(type)) return ::cir::TypeEvaluationKind::TEK_Scalar; - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index 47687cfa2235..89b493ec9eb6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -154,7 +154,7 @@ class LowerFunctionInfo final } unsigned getNumRequiredArgs() const { if (isVariadic()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); return arg_size(); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 88344533fe38..2a704bc505be 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -50,10 +50,10 @@ static CIRCXXABI *createCXXABI(LowerModule &CGM) { case clang::TargetCXXABI::XL: return CreateItaniumCXXABI(CGM); case clang::TargetCXXABI::Microsoft: - llvm_unreachable("Windows ABI NYI"); + cir_unreachable("Windows ABI NYI"); } - llvm_unreachable("invalid C++ ABI kind"); + cir_unreachable("invalid C++ ABI kind"); } static std::unique_ptr @@ -66,18 +66,18 @@ createTargetLoweringInfo(LowerModule &LM) { case llvm::Triple::aarch64: { AArch64ABIKind Kind = AArch64ABIKind::AAPCS; if (Target.getABI() == "darwinpcs") - llvm_unreachable("DarwinPCS ABI NYI"); + cir_unreachable("DarwinPCS ABI NYI"); else if (Triple.isOSWindows()) - llvm_unreachable("Windows ABI NYI"); + cir_unreachable("Windows ABI NYI"); else if (Target.getABI() == "aapcs-soft") - llvm_unreachable("AAPCS-soft ABI NYI"); + cir_unreachable("AAPCS-soft ABI NYI"); return createAArch64TargetLoweringInfo(LM, Kind); } case llvm::Triple::x86_64: { switch (Triple.getOS()) { case llvm::Triple::Win32: - llvm_unreachable("Windows ABI NYI"); + cir_unreachable("Windows ABI NYI"); default: return createX86_64TargetLoweringInfo(LM, X86AVXABILevel::None); } @@ -85,7 +85,7 @@ createTargetLoweringInfo(LowerModule &LM) { case llvm::Triple::spirv64: return createSPIRVTargetLoweringInfo(LM); default: - llvm_unreachable("ABI NYI"); + cir_unreachable("ABI NYI"); } } @@ -143,29 +143,29 @@ void LowerModule::setFunctionAttributes(FuncOp oldFn, FuncOp newFn, // If we plan on emitting this inline builtin, we can't treat it as a builtin. if (MissingFeatures::funcDeclIsInlineBuiltinDeclaration()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } if (MissingFeatures::funcDeclIsReplaceableGlobalAllocationFunction()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } if (MissingFeatures::funcDeclIsCXXConstructorDecl() || MissingFeatures::funcDeclIsCXXDestructorDecl()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); else if (MissingFeatures::funcDeclIsCXXMethodDecl()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); // NOTE(cir) Skipping emissions that depend on codegen options, as well as // sanitizers handling here. Do this in CIRGen. if (MissingFeatures::langOpts() && MissingFeatures::openMP()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); // NOTE(cir): Skipping more things here that depend on codegen options. if (MissingFeatures::extParamInfo()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } } @@ -196,7 +196,7 @@ LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { // Set up ABI-specific function attributes. setFunctionAttributes(op, newFn, false, /*IsThunk=*/false); if (MissingFeatures::extParamInfo()) { - llvm_unreachable("ExtraAttrs are NYI"); + cir_unreachable("ExtraAttrs are NYI"); } // Is a function definition: handle the body. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index fa1e34140167..cb444283e735 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -29,7 +29,7 @@ unsigned LowerTypes::clangCallConvToLLVMCallConv(clang::CallingConv CC) { case clang::CC_C: return llvm::CallingConv::C; default: - llvm_unreachable("calling convention NYI"); + cir_unreachable("calling convention NYI"); } } @@ -53,7 +53,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { resultType = VoidType::get(getMLIRContext()); break; default: - llvm_unreachable("Missing ABIArgInfo::Kind"); + cir_unreachable("Missing ABIArgInfo::Kind"); } CIRToCIRArgMapping IRFunctionArgs(getContext(), FI, true); @@ -95,7 +95,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { break; } default: - llvm_unreachable("Missing ABIArgInfo::Kind"); + cir_unreachable("Missing ABIArgInfo::Kind"); } } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index 48855caf617a..936739e2831a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -77,7 +77,7 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() { bool EmptySubobjectMap::canPlaceFieldAtOffset(const Type Ty, clang::CharUnits Offset) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } //===-----------------------------------------------------------------------==// @@ -257,7 +257,7 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { // Honor the default struct packing maximum alignment flag. if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // mac68k alignment supersedes maximum field alignment and attribute aligned, @@ -265,16 +265,16 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { // allude to additional (more complicated) semantics, especially with regard // to bit-fields, but gcc appears not to follow that. if (::cir::MissingFeatures::declHasAlignMac68kAttr()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { if (::cir::MissingFeatures::declHasAlignNaturalAttr()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); if (::cir::MissingFeatures::declHasMaxFieldAlignmentAttr()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); if (::cir::MissingFeatures::declGetMaxAlignment()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } HandledFirstNonOverlappingEmptyField = @@ -283,7 +283,7 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { // If there is an external AST source, ask it for the various offsets. if (const auto RT = dyn_cast(Ty)) { if (::cir::MissingFeatures::astContextGetExternalSource()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } } } @@ -303,7 +303,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, Context.getTargetInfo().defaultsToAIXPowerAlignment(); bool FoundFirstNonOverlappingEmptyFieldForAIX = false; if (DefaultsToAIXPowerAlignment && !HandledFirstNonOverlappingEmptyField) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); @@ -335,15 +335,15 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, }; if (isa(D) && cast(D).getSize() == 0) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { setDeclInfo(false /* IsIncompleteArrayType */); if (::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); if (IsMsStruct) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } cir_tl_assert(!::cir::MissingFeatures::recordDeclIsPacked() && @@ -375,7 +375,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, clang::CharUnits PreferredAlign = FieldAlign; if (DefaultsToAIXPowerAlignment && !alignedAttrCanDecreaseAIXAlignment() && (FoundFirstNonOverlappingEmptyFieldForAIX || IsNaturalAlign)) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // The align if the field is not packed. This is to check if the attribute @@ -393,15 +393,15 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, // The maximum field alignment overrides the aligned attribute. if (!MaxFieldAlignment.isZero()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } if (!FieldPacked) FieldAlign = UnpackedFieldAlign; if (DefaultsToAIXPowerAlignment) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); if (FieldPacked) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } clang::CharUnits AlignTo = @@ -411,13 +411,13 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, UnpackedFieldOffset = UnpackedFieldOffset.alignTo(UnpackedFieldAlign); if (UseExternalLayout) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { if (!IsUnion && EmptySubobjects) { // Check if we can place the field at this offset. while (/*!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)*/ false) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } } } @@ -431,21 +431,21 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, Context.toBits(UnpackedFieldAlign), FieldPacked, D); if (InsertExtraPadding) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // Reserve space for this field. if (!IsOverlappingEmptyField) { // uint64_t EffectiveFieldSizeInBits = Context.toBits(EffectiveFieldSize); if (IsUnion) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); else setDataSize(FieldOffset + EffectiveFieldSize); PaddedFieldSize = std::max(PaddedFieldSize, FieldOffset + FieldSize); setSize(std::max(getSizeInBits(), getDataSizeInBits())); } else { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // Remember max struct/class ABI-specified alignment. @@ -461,7 +461,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, cir_tl_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); if (Packed && !FieldPacked && PackedFieldAlign < FieldAlign) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } void ItaniumRecordLayoutBuilder::layoutFields(const StructType D) { @@ -511,7 +511,7 @@ void ItaniumRecordLayoutBuilder::checkFieldPadding( // We let objc ivars without warning, objc interfaces generally are not used // for padding tricks. if (::cir::MissingFeatures::objCIvarDecls()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); // FIXME(cir): Should the following be skiped in CIR? // Don't warn about structs created without a SourceLocation. This can @@ -578,10 +578,10 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { // mode; fortunately, that is true because we want to assign // consistently semantics to the type-traits intrinsics (or at // least as many of them as possible). - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } - llvm_unreachable("bad tail-padding use kind"); + cir_unreachable("bad tail-padding use kind"); } } // namespace @@ -602,7 +602,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { const CIRRecordLayout *NewEntry = nullptr; if (isMsLayout(*this)) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { // FIXME(cir): Add if-else separating C and C++ records. cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl()); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index 28b363664387..246f75c84e57 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -74,7 +74,7 @@ class AArch64TargetLoweringInfo : public TargetLoweringInfo { case Kind::offload_generic: return 0; default: - llvm_unreachable("Unknown CIR address space for this target"); + cir_unreachable("Unknown CIR address space for this target"); } } }; @@ -92,20 +92,20 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, // Large vector types should be returned via memory. if (isa(RetTy) && getContext().getTypeSize(RetTy) > 128) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); if (!isAggregateTypeForABI(RetTy)) { // NOTE(cir): Skip enum handling. if (MissingFeature::fixedSizeIntType()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS() ? ABIArgInfo::getExtend(RetTy) : ABIArgInfo::getDirect()); } - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } ABIArgInfo @@ -115,13 +115,13 @@ AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, // TODO(cir): check for illegal vector types. if (MissingFeature::vectorType()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); if (!isAggregateTypeForABI(Ty)) { // NOTE(cir): Enum is IntType in CIR. Skip enum handling here. if (MissingFeature::fixedSizeIntType()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS() ? ABIArgInfo::getExtend(Ty) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp index 1e02c9c370bd..749e9144676d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp @@ -40,12 +40,12 @@ class LoweringPrepareAArch64CXXABI : public LoweringPrepareItaniumCXXABI { mlir::Value lowerMSVAArg(cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, const cir::CIRDataLayout &datalayout) { - llvm_unreachable("MSVC ABI not supported yet"); + cir_unreachable("MSVC ABI not supported yet"); } mlir::Value lowerDarwinVAArg(cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, const cir::CIRDataLayout &datalayout) { - llvm_unreachable("Darwin ABI not supported yet"); + cir_unreachable("Darwin ABI not supported yet"); } }; } // namespace @@ -83,13 +83,13 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( auto baseTy = opResTy; if (mlir::isa(baseTy)) { - llvm_unreachable("ArrayType VAArg loweing NYI"); + cir_unreachable("ArrayType VAArg loweing NYI"); } // numRegs may not be 1 if ArrayType is supported. unsigned numRegs = 1; if (Kind == AArch64ABIKind::AAPCSSoft) { - llvm_unreachable("AAPCSSoft cir.var_arg lowering NYI"); + cir_unreachable("AAPCSSoft cir.var_arg lowering NYI"); } bool IsFPR = mlir::cir::isAnyFloatingPointType(baseTy); @@ -189,7 +189,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( if (!IsFPR && !isIndirect && tyAlign.getQuantity() > 8) { cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); - llvm_unreachable("register alignment correction NYI"); + cir_unreachable("register alignment correction NYI"); } // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. @@ -225,7 +225,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( if (isIndirect) { cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); - llvm_unreachable("indirect arg passing NYI"); + cir_unreachable("indirect arg passing NYI"); } // TODO: isHFA, numMembers and base should be query result from query @@ -247,7 +247,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // contiguously. cir_tl_assert(!isIndirect && "Homogeneous aggregates should be passed directly"); - llvm_unreachable("Homogeneous aggregates NYI"); + cir_unreachable("Homogeneous aggregates NYI"); } else { cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // TODO: slotSize should be query result about alignment. @@ -294,7 +294,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( if (!isIndirect && tyAlign.getQuantity() > 8) { // TODO: this algorithm requres casting from ptr type to int type, then // back to ptr type thus needs careful handling. NYI now. - llvm_unreachable("alignment greater than 8 NYI"); + cir_unreachable("alignment greater than 8 NYI"); } // All stack slots are multiples of 8 bytes. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index b35476225b3d..b7f5fdd9215f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -169,5 +169,5 @@ mlir::Value LoweringPrepareItaniumCXXABI::lowerVAArg( const ::cir::CIRDataLayout &datalayout) { // There is no generic cir lowering for var_arg, here we fail // so to prevent attempt of calling lowerVAArg for ItaniumCXXABI - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp index f5540e221d9d..b97cb490961e 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp @@ -57,7 +57,7 @@ class SPIRVTargetLoweringInfo : public TargetLoweringInfo { case Kind::offload_generic: return 4; default: - llvm_unreachable("Unknown CIR address space for this target"); + cir_unreachable("Unknown CIR address space for this target"); } } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 81e7c513ade7..30fb7e4f93c2 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -30,7 +30,7 @@ unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { case X86AVXABILevel::None: return 128; } - llvm_unreachable("Unknown AVXLevel"); + cir_unreachable("Unknown AVXLevel"); } /// Return true if the specified [start,end) bit range is known to either be @@ -50,7 +50,7 @@ static bool BitsContainNoUserData(Type Ty, unsigned StartBit, unsigned EndBit, return true; if (auto arrTy = llvm::dyn_cast(Ty)) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } if (auto structTy = llvm::dyn_cast(Ty)) { @@ -59,7 +59,7 @@ static bool BitsContainNoUserData(Type Ty, unsigned StartBit, unsigned EndBit, // If this is a C++ record, check the bases first. if (::cir::MissingFeatures::isCXXRecordDecl() || ::cir::MissingFeatures::getCXXRecordBases()) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // Verify that no field has data that overlaps the region of interest. Yes @@ -208,7 +208,7 @@ class X86_64TargetLoweringInfo : public TargetLoweringInfo { case Kind::offload_generic: return 0; default: - llvm_unreachable("Unknown CIR address space for this target"); + cir_unreachable("Unknown CIR address space for this target"); } } }; @@ -256,17 +256,17 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger // than eight eightbytes, ..., it has class MEMORY. if (Size > 512) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial // copy constructor or a non-trivial destructor, it is passed by invisible // reference. if (getRecordArgABI(RT, getCXXABI())) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); // Assume variable sized types are passed in memory. if (::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); const auto &Layout = getContext().getCIRRecordLayout(Ty); @@ -292,7 +292,7 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, // Ignore padding bit-fields. if (BitField && !::cir::MissingFeatures::fieldDeclisUnnamedBitField()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than // eight eightbytes, or it contains unaligned fields, it has class @@ -306,11 +306,11 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, // than 128. if (Size > 128 && ((!IsUnion && Size != getContext().getTypeSize(FT)) || Size > getNativeVectorSizeForAVXABI(AVXLevel))) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // Note, skip this test for bit-fields, see below. if (!BitField && Offset % getContext().getTypeAlign(RT)) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // Classify this field. @@ -325,7 +325,7 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, // structure to be passed in memory even if unaligned, and // therefore they can straddle an eightbyte. if (BitField) { - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { classify(FT, Offset, FieldLo, FieldHi, isNamedArg); } @@ -347,7 +347,7 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, } llvm::outs() << "Missing X86 classification for non-builtin types\n"; - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } /// Return a type that will be passed by the backend in the low 8 bytes of an @@ -365,12 +365,12 @@ Type X86_64ABIInfo::GetSSETypeAtOffset(Type IRType, unsigned IROffset, Type T1 = {}; unsigned T0Size = TD.getTypeAllocSize(T0); if (SourceSize > T0Size) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); if (T1 == nullptr) { // Check if IRType is a half/bfloat + float. float type will be in // IROffset+4 due to its alignment. if (isa(T0) && SourceSize > 4) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); // If we can't get a second FP type, return a simple half or float. // avx512fp16-abi.c:pr51813_2 shows it works to return float for // {float, i8} too. @@ -378,7 +378,7 @@ Type X86_64ABIInfo::GetSSETypeAtOffset(Type IRType, unsigned IROffset, return T0; } - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } /// The ABI specifies that a value should be passed in an 8-byte GPR. This @@ -507,14 +507,14 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { break; default: - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } // If a high part was specified, merge it together with the low part. It is // known to pass in the high eightbyte of the result. We do this by forming // a first class struct aggregate with the high and low part: {low, high} if (HighPart) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); return ABIArgInfo::getDirect(resType); } @@ -580,11 +580,11 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, case Class::NoClass: break; default: - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } if (HighPart) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); return ABIArgInfo::getDirect(ResType); } @@ -595,7 +595,7 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { // using __attribute__((ms_abi)). In such case to correctly emit Win64 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo. if (CallingConv == llvm::CallingConv::Win64) { - llvm_unreachable("Win64 CC is NYI"); + cir_unreachable("Win64 CC is NYI"); } bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; @@ -607,7 +607,7 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { if (!::mlir::cir::classifyReturnType(getCXXABI(), FI, *this)) { if (IsRegCall || ::cir::MissingFeatures::regCall()) { - llvm_unreachable("RegCall is NYI"); + cir_unreachable("RegCall is NYI"); } else FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); } @@ -615,13 +615,13 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { // If the return value is indirect, then the hidden argument is consuming // one integer register. if (FI.getReturnInfo().isIndirect()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); else if (NeededSSE && MaxVectorWidth) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); // The chain argument effectively gives us another free register. if (::cir::MissingFeatures::chainCall()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); unsigned NumRequiredArgs = FI.getNumRequiredArgs(); // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers @@ -632,7 +632,7 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { bool IsNamedArg = ArgNo < NumRequiredArgs; if (IsRegCall && ::cir::MissingFeatures::regCall()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); else it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, NeededSSE, IsNamedArg); @@ -645,9 +645,9 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { FreeIntRegs -= NeededInt; FreeSSERegs -= NeededSSE; if (::cir::MissingFeatures::vectorType()) - llvm_unreachable("NYI"); + cir_unreachable("NYI"); } else { - llvm_unreachable("Indirect results are NYI"); + cir_unreachable("Indirect results are NYI"); } } } From 1384e8edbd153231a40db76b744914ee2675c0c7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 14 Oct 2024 20:35:06 -0700 Subject: [PATCH 1945/2301] [CIR] Make the asserts to display suggestion for -fno-clangir-call-conv-lowering While here, add more unrecheables to cover some of the current errors, so that our users can see a clear message instead of a random cast assert of sorts. This covers at least all crashes seen when removing -fno-clangir-call-conv-lowering from all tests, there are probably other things we'll find as we exercise this path. --- clang/include/clang/CIR/MissingFeatures.h | 12 ++++++++++-- .../lib/CIR/Dialect/Transforms/CallConvLowering.cpp | 4 +++- .../Transforms/TargetLowering/LowerFunction.cpp | 3 +++ 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 0946f44991b1..db13fccc343c 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -15,6 +15,8 @@ #ifndef CLANG_CIR_MISSINGFEATURES_H #define CLANG_CIR_MISSINGFEATURES_H +#include + constexpr bool cirMissingFeatureAssertionMode = true; // Change to `false` to use llvm_unreachable @@ -24,9 +26,15 @@ constexpr bool cirMissingFeatureAssertionMode = " point." // Special assertion to be used in the target lowering library. -#define cir_tl_assert(cond) assert((cond) && NOTE); +#define cir_tl_assert(cond) \ + do { \ + if (!(cond)) \ + llvm::errs() << NOTE << "\n"; \ + assert((cond)); \ + } while (0) -// Special +// Special version of cir_unreachable to give more info to the user on how +// to temporaruly disable target lowering. #define cir_unreachable(msg) \ do { \ llvm_unreachable(msg NOTE); \ diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 7026f046ce97..3382e4b7d7f7 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -51,7 +51,9 @@ struct CallConvLoweringPattern : public OpRewritePattern { continue; } - auto callOp = cast(call.getUser()); + auto callOp = dyn_cast_or_null(call.getUser()); + if (!callOp) + cir_unreachable("NYI empty callOp"); if (lowerModule->rewriteFunctionCall(callOp, op).failed()) return failure(); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index a9dc60234b6a..b66c48e62bfb 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -210,6 +210,9 @@ Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { if (isa(SrcTy) && isa(Ty)) return createBitcast(Src, Ty, LF); + auto intTy = dyn_cast(Ty); + if (intTy && !intTy.isPrimitive()) + cir_unreachable("non-primitive types NYI"); llvm::TypeSize DstSize = LF.LM.getDataLayout().getTypeAllocSize(Ty); // FIXME(cir): Do we need the EnterStructPointerForCoercedAccess routine here? From 0fa29af0d877f48c1ae613a6bf2e7e16357d810b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 14 Oct 2024 20:41:49 -0700 Subject: [PATCH 1946/2301] [CIR][NFC] Massively rename workarounds for callconv lowering These are not meant to be used by any other component, make sure it's very specific. --- clang/include/clang/CIR/MissingFeatures.h | 22 +-- .../Dialect/Transforms/CallConvLowering.cpp | 5 +- .../Transforms/TargetLowering/ABIInfo.cpp | 2 +- .../Transforms/TargetLowering/ABIInfoImpl.cpp | 8 +- .../TargetLowering/CIRLowerContext.cpp | 35 ++-- .../TargetLowering/CIRRecordLayout.cpp | 8 +- .../TargetLowering/CIRToCIRArgMapping.h | 20 +-- .../TargetLowering/ItaniumCXXABI.cpp | 14 +- .../Transforms/TargetLowering/LowerCall.cpp | 74 ++++---- .../TargetLowering/LowerFunction.cpp | 163 +++++++++--------- .../TargetLowering/LowerFunctionInfo.h | 11 +- .../Transforms/TargetLowering/LowerModule.cpp | 30 ++-- .../Transforms/TargetLowering/LowerModule.h | 2 +- .../Transforms/TargetLowering/LowerTypes.cpp | 20 +-- .../TargetLowering/RecordLayoutBuilder.cpp | 125 +++++++------- .../TargetLowering/Targets/AArch64.cpp | 20 +-- .../Targets/LoweringPrepareAArch64CXXABI.cpp | 71 ++++---- .../Targets/LoweringPrepareItaniumCXXABI.cpp | 16 +- .../TargetLowering/Targets/SPIR.cpp | 4 +- .../Transforms/TargetLowering/Targets/X86.cpp | 105 +++++------ 20 files changed, 382 insertions(+), 373 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index db13fccc343c..18974d3286b8 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -17,37 +17,37 @@ #include -constexpr bool cirMissingFeatureAssertionMode = +constexpr bool cirCConvAssertionMode = true; // Change to `false` to use llvm_unreachable -#define NOTE \ +#define CIR_CCONV_NOTE \ " Target lowering is now required. To workaround use " \ "-fno-clangir-call-conv-lowering. This flag is going to be removed at some" \ " point." // Special assertion to be used in the target lowering library. -#define cir_tl_assert(cond) \ +#define cir_cconv_assert(cond) \ do { \ if (!(cond)) \ - llvm::errs() << NOTE << "\n"; \ + llvm::errs() << CIR_CCONV_NOTE << "\n"; \ assert((cond)); \ } while (0) -// Special version of cir_unreachable to give more info to the user on how +// Special version of cir_cconv_unreachable to give more info to the user on how // to temporaruly disable target lowering. -#define cir_unreachable(msg) \ +#define cir_cconv_unreachable(msg) \ do { \ - llvm_unreachable(msg NOTE); \ + llvm_unreachable(msg CIR_CCONV_NOTE); \ } while (0) // Some assertions knowingly generate incorrect code. This macro allows us to // switch between using `assert` and `llvm_unreachable` for these cases. -#define cir_assert_or_abort(cond, msg) \ +#define cir_cconv_assert_or_abort(cond, msg) \ do { \ - if (cirMissingFeatureAssertionMode) { \ - assert((cond) && msg NOTE); \ + if (cirCConvAssertionMode) { \ + assert((cond) && msg CIR_CCONV_NOTE); \ } else { \ - llvm_unreachable(msg NOTE); \ + llvm_unreachable(msg CIR_CCONV_NOTE); \ } \ } while (0) diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 3382e4b7d7f7..9026d5135031 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -47,13 +47,14 @@ struct CallConvLoweringPattern : public OpRewritePattern { for (auto call : calls.value()) { // FIXME(cir): Function pointers are ignored. if (isa(call.getUser())) { - cir_assert_or_abort(!::cir::MissingFeatures::ABIFuncPtr(), "NYI"); + cir_cconv_assert_or_abort(!::cir::MissingFeatures::ABIFuncPtr(), + "NYI"); continue; } auto callOp = dyn_cast_or_null(call.getUser()); if (!callOp) - cir_unreachable("NYI empty callOp"); + cir_cconv_unreachable("NYI empty callOp"); if (lowerModule->rewriteFunctionCall(callOp, op).failed()) return failure(); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index 6cb69c7eeb88..f5cb64059d32 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -37,7 +37,7 @@ bool ABIInfo::isPromotableIntegerTypeForABI(Type Ty) const { if (getContext().isPromotableIntegerType(Ty)) return true; - cir_tl_assert(!::cir::MissingFeatures::fixedWidthIntegers()); + cir_cconv_assert(!::cir::MissingFeatures::fixedWidthIntegers()); return false; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index c76808ce1086..493ddffdce3d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -26,21 +26,21 @@ bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, Type Ty = FI.getReturnType(); if (const auto RT = dyn_cast(Ty)) { - cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl()); + cir_cconv_assert(!::cir::MissingFeatures::isCXXRecordDecl()); } return CXXABI.classifyReturnType(FI); } bool isAggregateTypeForABI(Type T) { - cir_tl_assert(!::cir::MissingFeatures::functionMemberPointerType()); + cir_cconv_assert(!::cir::MissingFeatures::functionMemberPointerType()); return !LowerFunction::hasScalarEvaluationKind(T); } Type useFirstFieldIfTransparentUnion(Type Ty) { if (auto RT = dyn_cast(Ty)) { if (RT.isUnion()) - cir_assert_or_abort( + cir_cconv_assert_or_abort( !::cir::MissingFeatures::ABITransparentUnionHandling(), "NYI"); } return Ty; @@ -49,7 +49,7 @@ Type useFirstFieldIfTransparentUnion(Type Ty) { CIRCXXABI::RecordArgABI getRecordArgABI(const StructType RT, CIRCXXABI &CXXABI) { if (::cir::MissingFeatures::typeIsCXXRecordDecl()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } return CXXABI.getRecordArgABI(RT); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index a6ae8e8a9df5..c4912c651d21 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -55,8 +55,8 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { } else if (isa(T)) { typeKind = clang::Type::Record; } else { - cir_assert_or_abort(!::cir::MissingFeatures::ABIClangTypeKind(), - "Unhandled type class"); + cir_cconv_assert_or_abort(!::cir::MissingFeatures::ABIClangTypeKind(), + "Unhandled type class"); // FIXME(cir): Completely wrong. Just here to make it non-blocking. typeKind = clang::Type::Builtin; } @@ -94,34 +94,35 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { Align = Target->getDoubleAlign(); break; } - cir_unreachable("Unknown builtin type!"); + cir_cconv_unreachable("Unknown builtin type!"); break; } case clang::Type::Record: { const auto RT = dyn_cast(T); - cir_tl_assert(!::cir::MissingFeatures::tagTypeClassAbstraction()); + cir_cconv_assert(!::cir::MissingFeatures::tagTypeClassAbstraction()); // Only handle TagTypes (names types) for now. - cir_tl_assert(RT.getName() && "Anonymous record is NYI"); + cir_cconv_assert(RT.getName() && "Anonymous record is NYI"); // NOTE(cir): Clang does some hanlding of invalid tagged declarations here. // Not sure if this is necessary in CIR. if (::cir::MissingFeatures::typeGetAsEnumType()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } const CIRRecordLayout &Layout = getCIRRecordLayout(RT); Width = toBits(Layout.getSize()); Align = toBits(Layout.getAlignment()); - cir_tl_assert(!::cir::MissingFeatures::recordDeclHasAlignmentAttr()); + cir_cconv_assert(!::cir::MissingFeatures::recordDeclHasAlignmentAttr()); break; } default: - cir_unreachable("Unhandled type class"); + cir_cconv_unreachable("Unhandled type class"); } - cir_tl_assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); + cir_cconv_assert(llvm::isPowerOf2_32(Align) && + "Alignment must be power of 2"); return clang::TypeInfo(Width, Align, AlignRequirement); } @@ -129,13 +130,13 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { Type Ty; // NOTE(cir): Clang does more stuff here. Not sure if we need to do the same. - cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_cconv_assert(!::cir::MissingFeatures::qualifiedTypes()); switch (K) { case clang::BuiltinType::Char_S: Ty = IntType::get(getMLIRContext(), 8, true); break; default: - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } Types.push_back(Ty); @@ -144,8 +145,8 @@ Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { void CIRLowerContext::initBuiltinTypes(const clang::TargetInfo &Target, const clang::TargetInfo *AuxTarget) { - cir_tl_assert((!this->Target || this->Target == &Target) && - "Incorrect target reinitialization"); + cir_cconv_assert((!this->Target || this->Target == &Target) && + "Incorrect target reinitialization"); this->Target = &Target; this->AuxTarget = AuxTarget; @@ -153,7 +154,7 @@ void CIRLowerContext::initBuiltinTypes(const clang::TargetInfo &Target, if (LangOpts.CharIsSigned) CharTy = initBuiltinType(clang::BuiltinType::Char_S); else - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } /// Convert a size in bits to a size in characters. @@ -168,7 +169,7 @@ int64_t CIRLowerContext::toBits(clang::CharUnits CharSize) const { clang::TypeInfoChars CIRLowerContext::getTypeInfoInChars(Type T) const { if (auto arrTy = dyn_cast(T)) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); clang::TypeInfo Info = getTypeInfo(T); return clang::TypeInfoChars(toCharUnitsFromBits(Info.Width), toCharUnitsFromBits(Info.Align), @@ -179,7 +180,7 @@ bool CIRLowerContext::isPromotableIntegerType(Type T) const { // HLSL doesn't promote all small integer types to int, it // just uses the rank-based promotion rules for all types. if (::cir::MissingFeatures::langOpts()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); // FIXME(cir): CIR does not distinguish between char, short, etc. So we just // assume it is promotable if smaller than 32 bits. This is wrong since, for @@ -198,7 +199,7 @@ bool CIRLowerContext::isPromotableIntegerType(Type T) const { // TODO(cir): CIR doesn't know if a integer originated from an enum. Improve // CIR or add an AST query here. if (::cir::MissingFeatures::typeGetAsEnumType()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } return false; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp index 9bec714e6376..76a8f60bd549 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp @@ -38,17 +38,17 @@ CIRRecordLayout::CIRRecordLayout( FieldOffsets.insert(FieldOffsets.end(), fieldoffsets.begin(), fieldoffsets.end()); - cir_tl_assert(!PrimaryBase && "Layout for class with inheritance is NYI"); + cir_cconv_assert(!PrimaryBase && "Layout for class with inheritance is NYI"); // CXXInfo->PrimaryBase.setPointer(PrimaryBase); - cir_tl_assert(!IsPrimaryBaseVirtual && - "Layout for virtual base class is NYI"); + cir_cconv_assert(!IsPrimaryBaseVirtual && + "Layout for virtual base class is NYI"); // CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual); CXXInfo->NonVirtualSize = nonvirtualsize; CXXInfo->NonVirtualAlignment = nonvirtualalignment; CXXInfo->PreferredNVAlignment = preferrednvalignment; CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject; // FIXME(cir): Initialize base classes offsets. - cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_cconv_assert(!::cir::MissingFeatures::getCXXRecordBases()); CXXInfo->HasOwnVFPtr = hasOwnVFPtr; CXXInfo->VBPtrOffset = vbptroffset; CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index 9dd5f8de95a6..139f279385e6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -58,7 +58,7 @@ class CIRToCIRArgMapping { unsigned totalIRArgs() const { return TotalIRArgs; } bool hasPaddingArg(unsigned ArgNo) const { - cir_tl_assert(ArgNo < ArgInfo.size()); + cir_cconv_assert(ArgNo < ArgInfo.size()); return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; } @@ -69,7 +69,7 @@ class CIRToCIRArgMapping { const ::cir::ABIArgInfo &RetAI = FI.getReturnInfo(); if (RetAI.getKind() == ::cir::ABIArgInfo::Indirect) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } unsigned ArgNo = 0; @@ -77,31 +77,31 @@ class CIRToCIRArgMapping { onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); for (LowerFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; ++I, ++ArgNo) { - cir_tl_assert(I != FI.arg_end()); + cir_cconv_assert(I != FI.arg_end()); // Type ArgType = I->type; const ::cir::ABIArgInfo &AI = I->info; // Collect data about IR arguments corresponding to Clang argument ArgNo. auto &IRArgs = ArgInfo[ArgNo]; if (::cir::MissingFeatures::argumentPadding()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } switch (AI.getKind()) { case ::cir::ABIArgInfo::Extend: case ::cir::ABIArgInfo::Direct: { // FIXME(cir): handle sseregparm someday... - cir_tl_assert(AI.getCoerceToType() && "Missing coerced type!!"); + cir_cconv_assert(AI.getCoerceToType() && "Missing coerced type!!"); StructType STy = dyn_cast(AI.getCoerceToType()); if (AI.isDirect() && AI.getCanBeFlattened() && STy) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { IRArgs.NumberOfArgs = 1; } break; } default: - cir_unreachable("Missing ABIArgInfo::Kind"); + cir_cconv_unreachable("Missing ABIArgInfo::Kind"); } if (IRArgs.NumberOfArgs > 0) { @@ -114,10 +114,10 @@ class CIRToCIRArgMapping { if (IRArgNo == 1 && SwapThisWithSRet) IRArgNo++; } - cir_tl_assert(ArgNo == ArgInfo.size()); + cir_cconv_assert(ArgNo == ArgInfo.size()); if (::cir::MissingFeatures::inallocaArgs()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } TotalIRArgs = IRArgNo; @@ -126,7 +126,7 @@ class CIRToCIRArgMapping { /// Returns index of first IR argument corresponding to ArgNo, and their /// quantity. std::pair getIRArgs(unsigned ArgNo) const { - cir_tl_assert(ArgNo < ArgInfo.size()); + cir_cconv_assert(ArgNo < ArgInfo.size()); return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, ArgInfo[ArgNo].NumberOfArgs); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index be73937e9772..87a1c5061aef 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -46,9 +46,9 @@ class ItaniumCXXABI : public CIRCXXABI { // FIXME(cir): This expects a CXXRecordDecl! Not any record type. RecordArgABI getRecordArgABI(const StructType RD) const override { - cir_tl_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + cir_cconv_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); // If C++ prohibits us from making a copy, pass by address. - cir_tl_assert(!::cir::MissingFeatures::recordDeclCanPassInRegisters()); + cir_cconv_assert(!::cir::MissingFeatures::recordDeclCanPassInRegisters()); return RAA_Default; } }; @@ -62,7 +62,7 @@ bool ItaniumCXXABI::classifyReturnType(LowerFunctionInfo &FI) const { // If C++ prohibits us from making a copy, return by address. if (::cir::MissingFeatures::recordDeclCanPassInRegisters()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); return false; } @@ -76,7 +76,7 @@ CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { case clang::TargetCXXABI::AppleARM64: // TODO: this isn't quite right, clang uses AppleARM64CXXABI which inherits // from ARMCXXABI. We'll have to follow suit. - cir_tl_assert(!::cir::MissingFeatures::appleArm64CXXABI()); + cir_cconv_assert(!::cir::MissingFeatures::appleArm64CXXABI()); return new ItaniumCXXABI(LM, /*UseARMMethodPtrABI=*/true, /*UseARMGuardVarABI=*/true); @@ -84,12 +84,12 @@ CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { return new ItaniumCXXABI(LM); case clang::TargetCXXABI::Microsoft: - cir_unreachable("Microsoft ABI is not Itanium-based"); + cir_cconv_unreachable("Microsoft ABI is not Itanium-based"); default: - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } - cir_unreachable("bad ABI kind"); + cir_cconv_unreachable("bad ABI kind"); } } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index f7386b4227fb..c314b6f3977c 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -23,9 +23,9 @@ const LowerFunctionInfo & arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, const OperandRange &args, const FuncType fnType, unsigned numExtraRequiredArgs, bool chainCall) { - cir_tl_assert(args.size() >= numExtraRequiredArgs); + cir_cconv_assert(args.size() >= numExtraRequiredArgs); - cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); + cir_cconv_assert(!::cir::MissingFeatures::extParamInfo()); // In most cases, there are no optional arguments. RequiredArgs required = RequiredArgs::All; @@ -35,17 +35,17 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, // FIXME(cir): Properly check if function is no-proto. if (/*IsPrototypedFunction=*/true) { if (fnType.isVarArg()) - cir_assert_or_abort(!::cir::MissingFeatures::isVarArg(), "NYI"); + cir_cconv_assert_or_abort(!::cir::MissingFeatures::isVarArg(), "NYI"); if (::cir::MissingFeatures::extParamInfo()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // TODO(cir): There's some CC stuff related to no-proto functions here, but // its skipped here since it requires CodeGen info. Maybe this information // could be embbed in the FuncOp during CIRGen. - cir_tl_assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); + cir_cconv_assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; return LT.arrangeLLVMFunctionInfo(fnType.getReturnType(), opts, fnType.getInputs(), required); @@ -60,8 +60,8 @@ static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { return; } - cir_tl_assert(MissingFeatures::extParamInfo()); - cir_unreachable("NYI"); + cir_cconv_assert(MissingFeatures::extParamInfo()); + cir_cconv_unreachable("NYI"); } /// Arrange the LLVM function layout for a value of the given function @@ -74,11 +74,11 @@ static void appendParameterTypes(SmallVectorImpl &prefix, FuncType fnTy) { static const LowerFunctionInfo & arrangeCIRFunctionInfo(LowerTypes &CGT, bool instanceMethod, SmallVectorImpl &prefix, FuncType fnTy) { - cir_tl_assert(!MissingFeatures::extParamInfo()); + cir_cconv_assert(!MissingFeatures::extParamInfo()); RequiredArgs Required = RequiredArgs::forPrototypePlus(fnTy, prefix.size()); // FIXME: Kill copy. appendParameterTypes(prefix, fnTy); - cir_tl_assert(!MissingFeatures::qualifiedTypes()); + cir_cconv_assert(!MissingFeatures::qualifiedTypes()); Type resultType = fnTy.getReturnType(); FnInfoOpts opts = @@ -104,13 +104,13 @@ void LowerModule::constructAttributeList(StringRef Name, CallingConv = FI.getCallingConvention(); // FIXME(cir): No-return should probably be set in CIRGen (ABI-agnostic). if (MissingFeatures::noReturn()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); if (MissingFeatures::csmeCall()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); // TODO(cir): Implement AddAttributesFromFunctionProtoType here. // TODO(cir): Implement AddAttributesFromOMPAssumes here. - cir_tl_assert(!MissingFeatures::openMP()); + cir_cconv_assert(!MissingFeatures::openMP()); // TODO(cir): Skipping a bunch of AST queries here. We will need to partially // implement some of them as this section sets target-specific attributes @@ -147,29 +147,30 @@ void LowerModule::constructAttributeList(StringRef Name, [[fallthrough]]; case ABIArgInfo::Direct: if (RetAI.getInReg()) - cir_assert_or_abort(!::cir::MissingFeatures::ABIInRegAttribute(), "NYI"); - cir_tl_assert(!::cir::MissingFeatures::noFPClass()); + cir_cconv_assert_or_abort(!::cir::MissingFeatures::ABIInRegAttribute(), + "NYI"); + cir_cconv_assert(!::cir::MissingFeatures::noFPClass()); break; case ABIArgInfo::Ignore: break; default: - cir_unreachable("Missing ABIArgInfo::Kind"); + cir_cconv_unreachable("Missing ABIArgInfo::Kind"); } if (!IsThunk) { if (MissingFeatures::qualTypeIsReferenceType()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } } // Attach attributes to sret. if (MissingFeatures::sretArgs()) { - cir_unreachable("sret is NYI"); + cir_cconv_unreachable("sret is NYI"); } // Attach attributes to inalloca arguments. if (MissingFeatures::inallocaArgs()) { - cir_unreachable("inalloca is NYI"); + cir_cconv_unreachable("inalloca is NYI"); } // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, @@ -177,7 +178,7 @@ void LowerModule::constructAttributeList(StringRef Name, // FIXME: fix this properly, https://reviews.llvm.org/D100388 if (MissingFeatures::funcDeclIsCXXMethodDecl() || MissingFeatures::inallocaArgs()) { - cir_unreachable("`this` argument attributes are NYI"); + cir_cconv_unreachable("`this` argument attributes are NYI"); } unsigned ArgNo = 0; @@ -190,7 +191,7 @@ void LowerModule::constructAttributeList(StringRef Name, // Add attribute for padding argument, if necessary. if (IRFunctionArgs.hasPaddingArg(ArgNo)) { - cir_unreachable("Padding argument is NYI"); + cir_cconv_unreachable("Padding argument is NYI"); } // TODO(cir): Mark noundef arguments and return values. Although this @@ -212,22 +213,22 @@ void LowerModule::constructAttributeList(StringRef Name, [[fallthrough]]; case ABIArgInfo::Direct: if (ArgNo == 0 && ::cir::MissingFeatures::chainCall()) - cir_unreachable("ChainCall is NYI"); + cir_cconv_unreachable("ChainCall is NYI"); else if (AI.getInReg()) - cir_unreachable("InReg attribute is NYI"); + cir_cconv_unreachable("InReg attribute is NYI"); // Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); - cir_tl_assert(!::cir::MissingFeatures::noFPClass()); + cir_cconv_assert(!::cir::MissingFeatures::noFPClass()); break; default: - cir_unreachable("Missing ABIArgInfo::Kind"); + cir_cconv_unreachable("Missing ABIArgInfo::Kind"); } if (::cir::MissingFeatures::qualTypeIsReferenceType()) { - cir_unreachable("Reference handling is NYI"); + cir_cconv_unreachable("Reference handling is NYI"); } // TODO(cir): Missing some swift and nocapture stuff here. - cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); + cir_cconv_assert(!::cir::MissingFeatures::extParamInfo()); if (!Attrs.empty()) { unsigned FirstIRArg, NumIRArgs; @@ -236,24 +237,25 @@ void LowerModule::constructAttributeList(StringRef Name, newFn.setArgAttrs(FirstIRArg + i, Attrs); } } - cir_tl_assert(ArgNo == FI.arg_size()); + cir_cconv_assert(ArgNo == FI.arg_size()); } /// Arrange the argument and result information for the declaration or /// definition of the given function. const LowerFunctionInfo &LowerTypes::arrangeFunctionDeclaration(FuncOp fnOp) { if (MissingFeatures::funcDeclIsCXXMethodDecl()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); - cir_tl_assert(!MissingFeatures::qualifiedTypes()); + cir_cconv_assert(!MissingFeatures::qualifiedTypes()); FuncType FTy = fnOp.getFunctionType(); - cir_tl_assert(!MissingFeatures::CUDA()); + cir_cconv_assert(!MissingFeatures::CUDA()); // When declaring a function without a prototype, always use a // non-variadic type. if (fnOp.getNoProto()) { - cir_assert_or_abort(!::cir::MissingFeatures::ABINoProtoFunctions(), "NYI"); + cir_cconv_assert_or_abort(!::cir::MissingFeatures::ABINoProtoFunctions(), + "NYI"); } return arrangeFreeFunctionType(FTy); @@ -283,7 +285,7 @@ const LowerFunctionInfo &LowerTypes::arrangeFreeFunctionType(FuncType FTy) { const LowerFunctionInfo &LowerTypes::arrangeGlobalDeclaration(FuncOp fnOp) { if (MissingFeatures::funcDeclIsCXXConstructorDecl() || MissingFeatures::funcDeclIsCXXDestructorDecl()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); return arrangeFunctionDeclaration(fnOp); } @@ -300,12 +302,12 @@ const LowerFunctionInfo & LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, ArrayRef argTypes, RequiredArgs required) { - cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_cconv_assert(!::cir::MissingFeatures::qualifiedTypes()); LowerFunctionInfo *FI = nullptr; // FIXME(cir): Allow user-defined CCs (e.g. __attribute__((vectorcall))). - cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); + cir_cconv_assert(!::cir::MissingFeatures::extParamInfo()); unsigned CC = clangCallConvToLLVMCallConv(clang::CallingConv::CC_C); // Construct the function info. We co-allocate the ArgInfos. @@ -316,9 +318,9 @@ LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, // Compute ABI information. if (CC == llvm::CallingConv::SPIR_KERNEL) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else if (::cir::MissingFeatures::extParamInfo()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { // NOTE(cir): This corects the initial function info data. getABIInfo().computeInfo(*FI); // FIXME(cir): Args should be set to null. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index b66c48e62bfb..dccc52ad635d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -37,7 +37,7 @@ namespace { Value buildAddressAtOffset(LowerFunction &LF, Value addr, const ABIArgInfo &info) { if (unsigned offset = info.getDirectOffset()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } return addr; } @@ -50,7 +50,7 @@ Value enterStructPointerForCoercedAccess(Value SrcPtr, StructType SrcSTy, uint64_t DstSize, LowerFunction &CGF) { // We can't dive into a zero-element struct. if (SrcSTy.getNumElements() == 0) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); Type FirstElt = SrcSTy.getMembers()[0]; @@ -63,8 +63,8 @@ Value enterStructPointerForCoercedAccess(Value SrcPtr, StructType SrcSTy, FirstEltSize < CGF.LM.getDataLayout().getTypeStoreSize(SrcSTy)) return SrcPtr; - cir_assert_or_abort(!::cir::MissingFeatures::ABIEnterStructForCoercedAccess(), - "NYI"); + cir_cconv_assert_or_abort( + !::cir::MissingFeatures::ABIEnterStructForCoercedAccess(), "NYI"); return SrcPtr; // FIXME: This is a temporary workaround for the assertion // above. } @@ -79,17 +79,17 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, Type SrcTy = Src.getType(); Type DstTy = Dst.getType(); if (SrcTy == DstTy) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // FIXME(cir): We need a better way to handle datalayout queries. - cir_tl_assert(isa(SrcTy)); + cir_cconv_assert(isa(SrcTy)); llvm::TypeSize SrcSize = CGF.LM.getDataLayout().getTypeAllocSize(SrcTy); if (StructType DstSTy = dyn_cast(DstTy)) { Dst = enterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize.getFixedValue(), CGF); - cir_tl_assert(isa(Dst.getType())); + cir_cconv_assert(isa(Dst.getType())); DstTy = cast(Dst.getType()).getPointee(); } @@ -97,25 +97,25 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, PointerType DstPtrTy = dyn_cast(DstTy); // TODO(cir): Implement address space. if (SrcPtrTy && DstPtrTy && !::cir::MissingFeatures::addressSpace()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // If the source and destination are integer or pointer types, just do an // extension or truncation to the desired type. if ((isa(SrcTy) || isa(SrcTy)) && (isa(DstTy) || isa(DstTy))) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } llvm::TypeSize DstSize = CGF.LM.getDataLayout().getTypeAllocSize(DstTy); // If store is legal, just bitcast the src pointer. - cir_tl_assert(!::cir::MissingFeatures::vectorType()); + cir_cconv_assert(!::cir::MissingFeatures::vectorType()); if (SrcSize.getFixedValue() <= DstSize.getFixedValue()) { // Dst = Dst.withElementType(SrcTy); CGF.buildAggregateStore(Src, Dst, DstIsVolatile); } else { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } } @@ -162,7 +162,7 @@ Value createCoercedValue(Value Src, Type Ty, LowerFunction &CGF) { // extension or truncation to the desired type. if ((isa(Ty) || isa(Ty)) && (isa(SrcTy) || isa(SrcTy))) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // If load is legal, just bitcast the src pointer. @@ -178,13 +178,13 @@ Value createCoercedValue(Value Src, Type Ty, LowerFunction &CGF) { return CGF.buildAggregateBitcast(Src, Ty); } - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } Value emitAddressAtOffset(LowerFunction &LF, Value addr, const ABIArgInfo &info) { if (unsigned offset = info.getDirectOffset()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } return addr; } @@ -212,7 +212,7 @@ Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { auto intTy = dyn_cast(Ty); if (intTy && !intTy.isPrimitive()) - cir_unreachable("non-primitive types NYI"); + cir_cconv_unreachable("non-primitive types NYI"); llvm::TypeSize DstSize = LF.LM.getDataLayout().getTypeAllocSize(Ty); // FIXME(cir): Do we need the EnterStructPointerForCoercedAccess routine here? @@ -221,7 +221,7 @@ Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { if ((isa(Ty) || isa(Ty)) && (isa(SrcTy) || isa(SrcTy))) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // If load is legal, just bitcast the src pointer. @@ -237,7 +237,7 @@ Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { Src); } - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } } // namespace @@ -263,14 +263,14 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // are dealt with in CIRGen. CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), FI); - cir_tl_assert(Fn.getNumArguments() == IRFunctionArgs.totalIRArgs()); + cir_cconv_assert(Fn.getNumArguments() == IRFunctionArgs.totalIRArgs()); // If we're using inalloca, all the memory arguments are GEPs off of the last // parameter, which is a pointer to the complete memory area. - cir_tl_assert(!::cir::MissingFeatures::inallocaArgs()); + cir_cconv_assert(!::cir::MissingFeatures::inallocaArgs()); // Name the struct return parameter. - cir_tl_assert(!::cir::MissingFeatures::sretArgs()); + cir_cconv_assert(!::cir::MissingFeatures::sretArgs()); // Track if we received the parameter as a pointer (indirect, byval, or // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it @@ -280,8 +280,8 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // FIXME(cir): non-blocking workaround for argument types that are not yet // properly handled by the ABI. - if (cirMissingFeatureAssertionMode && FI.arg_size() != Args.size()) { - cir_tl_assert(::cir::MissingFeatures::ABIParameterCoercion()); + if (cirCConvAssertionMode && FI.arg_size() != Args.size()) { + cir_cconv_assert(::cir::MissingFeatures::ABIParameterCoercion()); return success(); } @@ -289,7 +289,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // entails copying one or more LLVM IR arguments into an alloca. Don't push // any cleanups or do anything that might unwind. We do that separately, so // we can push the cleanups in the correct order for the ABI. - cir_tl_assert(FI.arg_size() == Args.size()); + cir_cconv_assert(FI.arg_size() == Args.size()); unsigned ArgNo = 0; LowerFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); for (MutableArrayRef::const_iterator i = Args.begin(), @@ -304,10 +304,10 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // CGFunctionInfo::ArgInfo type with subsequent argument demotion. Type Ty = {}; if (isPromoted) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); else Ty = Arg.getType(); - cir_tl_assert(!::cir::MissingFeatures::evaluationKind()); + cir_cconv_assert(!::cir::MissingFeatures::evaluationKind()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -323,29 +323,29 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // http://llvm.org/docs/LangRef.html#paramattrs. if (ArgI.getDirectOffset() == 0 && isa(LTy) && isa(ArgI.getCoerceToType())) { - cir_assert_or_abort(!::cir::MissingFeatures::ABIPointerParameterAttrs(), - "NYI"); + cir_cconv_assert_or_abort( + !::cir::MissingFeatures::ABIPointerParameterAttrs(), "NYI"); } // Prepare the argument value. If we have the trivial case, handle it // with no muss and fuss. if (!isa(ArgI.getCoerceToType()) && ArgI.getCoerceToType() == Ty && ArgI.getDirectOffset() == 0) { - cir_tl_assert(NumIRArgs == 1); + cir_cconv_assert(NumIRArgs == 1); // LLVM expects swifterror parameters to be used in very restricted // ways. Copy the value into a less-restricted temporary. Value V = AI; if (::cir::MissingFeatures::extParamInfo()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // Ensure the argument is the correct type. if (V.getType() != ArgI.getCoerceToType()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); if (isPromoted) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); ArgVals.push_back(V); @@ -358,7 +358,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, break; } - cir_tl_assert(!::cir::MissingFeatures::vectorType()); + cir_cconv_assert(!::cir::MissingFeatures::vectorType()); // Allocate original argument to be "uncoerced". // FIXME(cir): We should have a alloca op builder that does not required @@ -377,10 +377,10 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, StructType STy = dyn_cast(ArgI.getCoerceToType()); if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && STy.getNumElements() > 1) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { // Simple case, just do a coerced store of the argument into the alloca. - cir_tl_assert(NumIRArgs == 1); + cir_cconv_assert(NumIRArgs == 1); Value AI = Fn.getArgument(FirstIRArg); // TODO(cir): Set argument name in the new function. createCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); @@ -388,7 +388,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // Match to what EmitParamDecl is expecting for this type. if (::cir::MissingFeatures::evaluationKind()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { // FIXME(cir): Should we have an ParamValue abstraction like in the // original codegen? @@ -399,7 +399,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // RAUW the original argument alloca with the new one. This assumes that // the argument is used only to be stored in a alloca. Value arg = SrcFn.getArgument(ArgNo); - cir_tl_assert(arg.hasOneUse()); + cir_cconv_assert(arg.hasOneUse()); auto *firstStore = *arg.user_begin(); auto argAlloca = cast(firstStore).getAddr(); rewriter.replaceAllUsesWith(argAlloca, Alloca); @@ -408,12 +408,12 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, break; } default: - cir_unreachable("Unhandled ABIArgInfo::Kind"); + cir_cconv_unreachable("Unhandled ABIArgInfo::Kind"); } } if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { // FIXME(cir): In the original codegen, EmitParamDecl is called here. It // is likely that said function considers ABI details during emission, so @@ -447,7 +447,7 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { // the load, zap the store, and usually zap the alloca. // NOTE(cir): This seems like a premature optimization case. Skipping it. if (::cir::MissingFeatures::returnValueDominatingStoreOptmiization()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // Otherwise, we have to do a simple load. else { @@ -473,7 +473,7 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { break; default: - cir_unreachable("Unhandled ABIArgInfo::Kind"); + cir_cconv_unreachable("Unhandled ABIArgInfo::Kind"); } return success(); @@ -485,19 +485,19 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { /// focuses on the ABI-specific details. So a lot of codegen stuff is removed. LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, const LowerFunctionInfo &FnInfo) { - cir_tl_assert(newFn && "generating code for null Function"); + cir_cconv_assert(newFn && "generating code for null Function"); auto Args = oldFn.getArguments(); // Emit the ABI-specific function prologue. - cir_tl_assert(newFn.empty() && "Function already has a body"); + cir_cconv_assert(newFn.empty() && "Function already has a body"); rewriter.setInsertionPointToEnd(newFn.addEntryBlock()); if (buildFunctionProlog(FnInfo, newFn, oldFn.getArguments()).failed()) return failure(); // Ensure that old ABI-agnostic arguments uses were replaced. const auto hasNoUses = [](Value val) { return val.getUses().empty(); }; - cir_tl_assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && - "Missing RAUW?"); + cir_cconv_assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && + "Missing RAUW?"); // NOTE(cir): While the new function has the ABI-aware parameters, the old // function still has the function logic. To complete the migration, we have @@ -531,17 +531,17 @@ void LowerFunction::buildAggregateStore(Value Val, Value Dest, // Function to store a first-class aggregate into memory. We prefer to // store the elements rather than the aggregate to be more friendly to // fast-isel. - cir_tl_assert(mlir::isa(Dest.getType()) && - "Storing in a non-pointer!"); + cir_cconv_assert(mlir::isa(Dest.getType()) && + "Storing in a non-pointer!"); (void)DestIsVolatile; // Circumvent CIR's type checking. Type pointeeTy = mlir::cast(Dest.getType()).getPointee(); if (Val.getType() != pointeeTy) { // NOTE(cir): We only bitcast and store if the types have the same size. - cir_tl_assert((LM.getDataLayout().getTypeSizeInBits(Val.getType()) == - LM.getDataLayout().getTypeSizeInBits(pointeeTy)) && - "Incompatible types"); + cir_cconv_assert((LM.getDataLayout().getTypeSizeInBits(Val.getType()) == + LM.getDataLayout().getTypeSizeInBits(pointeeTy)) && + "Incompatible types"); auto loc = Val.getLoc(); Val = rewriter.create(loc, pointeeTy, CastKind::bitcast, Val); } @@ -573,7 +573,7 @@ LogicalResult LowerFunction::rewriteCallOp(CallOp op, // NOTE(cir): There is no direct way to fetch the function type from the // CallOp, so we fetch it from the source function. This assumes the // function definition has not yet been lowered. - cir_tl_assert(SrcFn && "No source function"); + cir_cconv_assert(SrcFn && "No source function"); auto fnType = SrcFn.getFunctionType(); // Rewrite the call operation to abide to the ABI calling convention. @@ -601,7 +601,7 @@ Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, // CIRGen. CallArgList Args; if (Chain) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); // NOTE(cir): Call args were already emitted in CIRGen. Skip the evaluation // order done in CIRGen and just fetch the exiting arguments here. @@ -631,10 +631,11 @@ Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, // Chain calls use this same code path to add the invisible chain parameter // to the function type. if (origCallee.getNoProto() || Chain) { - cir_assert_or_abort(::cir::MissingFeatures::ABINoProtoFunctions(), "NYI"); + cir_cconv_assert_or_abort(::cir::MissingFeatures::ABINoProtoFunctions(), + "NYI"); } - cir_tl_assert(!::cir::MissingFeatures::CUDA()); + cir_cconv_assert(!::cir::MissingFeatures::CUDA()); // TODO(cir): LLVM IR has the concept of "CallBase", which is a base class // for all types of calls. Perhaps we should have a CIR interface to mimic @@ -674,7 +675,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // If we're using inalloca, insert the allocation after the stack save. // FIXME: Do this earlier rather than hacking it in here! if (StructType ArgStruct = CallInfo.getArgStruct()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), CallInfo); @@ -683,16 +684,16 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // If the call returns a temporary with struct return, create a temporary // alloca to hold the result, unless one is given to us. if (RetAI.isIndirect() || RetAI.isCoerceAndExpand() || RetAI.isInAlloca()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } - cir_tl_assert(!::cir::MissingFeatures::swift()); + cir_cconv_assert(!::cir::MissingFeatures::swift()); // NOTE(cir): Skipping lifetime markers here. // Translate all of the arguments as necessary to match the IR lowering. - cir_tl_assert(CallInfo.arg_size() == CallArgs.size() && - "Mismatch between function signature & arguments."); + cir_cconv_assert(CallInfo.arg_size() == CallArgs.size() && + "Mismatch between function signature & arguments."); unsigned ArgNo = 0; LowerFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); for (auto I = CallArgs.begin(), E = CallArgs.end(); I != E; @@ -700,7 +701,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, const ABIArgInfo &ArgInfo = info_it->info; if (IRFunctionArgs.hasPaddingArg(ArgNo)) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -717,28 +718,28 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!isa(ArgInfo.getCoerceToType()) && ArgInfo.getCoerceToType() == info_it->type && ArgInfo.getDirectOffset() == 0) { - cir_tl_assert(NumIRArgs == 1); + cir_cconv_assert(NumIRArgs == 1); Value V; if (!isa(I->getType())) { V = *I; } else { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } if (::cir::MissingFeatures::extParamInfo()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } if (ArgInfo.getCoerceToType() != V.getType() && isa(V.getType())) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); if (FirstIRArg < IRFuncTy.getNumInputs() && V.getType() != IRFuncTy.getInput(FirstIRArg)) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); if (::cir::MissingFeatures::undef()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); IRCallArgs[FirstIRArg] = V; break; } @@ -746,7 +747,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // FIXME: Avoid the conversion through memory if possible. Value Src = {}; if (!isa(I->getType())) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { // NOTE(cir): L/RValue stuff are left for CIRGen to handle. Src = *I; @@ -760,18 +761,18 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // FCAs, so we flatten them if this is safe to do for this argument. StructType STy = dyn_cast(ArgInfo.getCoerceToType()); if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { // In the simple case, just pass the coerced loaded value. - cir_tl_assert(NumIRArgs == 1); + cir_cconv_assert(NumIRArgs == 1); Value Load = createCoercedValue(Src, ArgInfo.getCoerceToType(), *this); // FIXME(cir): We should probably handle CMSE non-secure calls here - cir_tl_assert(!::cir::MissingFeatures::cmseNonSecureCallAttr()); + cir_cconv_assert(!::cir::MissingFeatures::cmseNonSecureCallAttr()); // since they are a ARM-specific feature. if (::cir::MissingFeatures::undef()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); IRCallArgs[FirstIRArg] = Load; } @@ -779,7 +780,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, } default: llvm::outs() << "Missing ABIArgInfo::Kind: " << ArgInfo.getKind() << "\n"; - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } } @@ -792,7 +793,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // debugging stuff here. // Update the largest vector width if any arguments have vector types. - cir_tl_assert(!::cir::MissingFeatures::vectorType()); + cir_cconv_assert(!::cir::MissingFeatures::vectorType()); // Compute the calling convention and attributes. @@ -818,7 +819,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, rewriter.getAttr(rewriter.getDictionaryAttr({})); newCallOp->setAttr("extra_attrs", extraAttrs); - cir_tl_assert(!::cir::MissingFeatures::vectorType()); + cir_cconv_assert(!::cir::MissingFeatures::vectorType()); // NOTE(cir): Skipping some ObjC, tail-call, debug, and attribute stuff // here. @@ -850,11 +851,11 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // Perhaps this section should handle CIR's boolean case. Value V = newCallOp.getResult(); if (V.getType() != RetIRTy) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); return V; } default: - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } } @@ -862,13 +863,13 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // compatibility, and the types match, use the llvm.vector.extract // intrinsic to perform the conversion. if (::cir::MissingFeatures::vectorType()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // FIXME(cir): Use return value slot here. Value RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. - cir_tl_assert(!::cir::MissingFeatures::volatileTypes()); + cir_cconv_assert(!::cir::MissingFeatures::volatileTypes()); // NOTE(cir): If the function returns, there should always be a valid // return value present. Instead of setting the return value here, we @@ -876,7 +877,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!RetVal) { RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. - cir_tl_assert(::cir::MissingFeatures::volatileTypes()); + cir_cconv_assert(::cir::MissingFeatures::volatileTypes()); } // An empty record can overlap other data (if declared with @@ -894,7 +895,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, } default: llvm::errs() << "Unhandled ABIArgInfo kind: " << RetAI.getKind() << "\n"; - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } }(); @@ -911,7 +912,7 @@ Value LowerFunction::getUndefRValue(Type Ty) { return nullptr; llvm::outs() << "Missing undef handler for value type: " << Ty << "\n"; - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } ::cir::TypeEvaluationKind LowerFunction::getEvaluationKind(Type type) { @@ -921,7 +922,7 @@ ::cir::TypeEvaluationKind LowerFunction::getEvaluationKind(Type type) { if (isa(type)) return ::cir::TypeEvaluationKind::TEK_Scalar; - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index 89b493ec9eb6..394bd2b62951 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -36,7 +36,7 @@ class RequiredArgs { RequiredArgs(All_t _) : NumRequired(~0U) {} explicit RequiredArgs(unsigned n) : NumRequired(n) { - cir_tl_assert(n != ~0U); + cir_cconv_assert(n != ~0U); } /// Compute the arguments required by the given formal prototype, @@ -49,7 +49,8 @@ class RequiredArgs { if (!prototype.isVarArg()) return All; - cir_assert_or_abort(!::cir::MissingFeatures::variadicFunctions(), "NYI"); + cir_cconv_assert_or_abort(!::cir::MissingFeatures::variadicFunctions(), + "NYI"); return All; // FIXME(cir): Temporary workaround for the assertion above. } @@ -108,7 +109,7 @@ class LowerFunctionInfo final ArrayRef argTypes, RequiredArgs required) { // TODO(cir): Add assertions? - cir_tl_assert(!::cir::MissingFeatures::extParamInfo()); + cir_cconv_assert(!::cir::MissingFeatures::extParamInfo()); void *buffer = operator new(totalSizeToAlloc(argTypes.size() + 1)); LowerFunctionInfo *FI = new (buffer) LowerFunctionInfo(); @@ -149,12 +150,12 @@ class LowerFunctionInfo final unsigned arg_size() const { return NumArgs; } bool isVariadic() const { - cir_tl_assert(!::cir::MissingFeatures::variadicFunctions()); + cir_cconv_assert(!::cir::MissingFeatures::variadicFunctions()); return false; } unsigned getNumRequiredArgs() const { if (isVariadic()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); return arg_size(); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 2a704bc505be..85d08b4ce03f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -50,10 +50,10 @@ static CIRCXXABI *createCXXABI(LowerModule &CGM) { case clang::TargetCXXABI::XL: return CreateItaniumCXXABI(CGM); case clang::TargetCXXABI::Microsoft: - cir_unreachable("Windows ABI NYI"); + cir_cconv_unreachable("Windows ABI NYI"); } - cir_unreachable("invalid C++ ABI kind"); + cir_cconv_unreachable("invalid C++ ABI kind"); } static std::unique_ptr @@ -66,18 +66,18 @@ createTargetLoweringInfo(LowerModule &LM) { case llvm::Triple::aarch64: { AArch64ABIKind Kind = AArch64ABIKind::AAPCS; if (Target.getABI() == "darwinpcs") - cir_unreachable("DarwinPCS ABI NYI"); + cir_cconv_unreachable("DarwinPCS ABI NYI"); else if (Triple.isOSWindows()) - cir_unreachable("Windows ABI NYI"); + cir_cconv_unreachable("Windows ABI NYI"); else if (Target.getABI() == "aapcs-soft") - cir_unreachable("AAPCS-soft ABI NYI"); + cir_cconv_unreachable("AAPCS-soft ABI NYI"); return createAArch64TargetLoweringInfo(LM, Kind); } case llvm::Triple::x86_64: { switch (Triple.getOS()) { case llvm::Triple::Win32: - cir_unreachable("Windows ABI NYI"); + cir_cconv_unreachable("Windows ABI NYI"); default: return createX86_64TargetLoweringInfo(LM, X86AVXABILevel::None); } @@ -85,7 +85,7 @@ createTargetLoweringInfo(LowerModule &LM) { case llvm::Triple::spirv64: return createSPIRVTargetLoweringInfo(LM); default: - cir_unreachable("ABI NYI"); + cir_cconv_unreachable("ABI NYI"); } } @@ -143,29 +143,29 @@ void LowerModule::setFunctionAttributes(FuncOp oldFn, FuncOp newFn, // If we plan on emitting this inline builtin, we can't treat it as a builtin. if (MissingFeatures::funcDeclIsInlineBuiltinDeclaration()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } if (MissingFeatures::funcDeclIsReplaceableGlobalAllocationFunction()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } if (MissingFeatures::funcDeclIsCXXConstructorDecl() || MissingFeatures::funcDeclIsCXXDestructorDecl()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); else if (MissingFeatures::funcDeclIsCXXMethodDecl()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); // NOTE(cir) Skipping emissions that depend on codegen options, as well as // sanitizers handling here. Do this in CIRGen. if (MissingFeatures::langOpts() && MissingFeatures::openMP()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); // NOTE(cir): Skipping more things here that depend on codegen options. if (MissingFeatures::extParamInfo()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } } @@ -196,7 +196,7 @@ LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { // Set up ABI-specific function attributes. setFunctionAttributes(op, newFn, false, /*IsThunk=*/false); if (MissingFeatures::extParamInfo()) { - cir_unreachable("ExtraAttrs are NYI"); + cir_cconv_unreachable("ExtraAttrs are NYI"); } // Is a function definition: handle the body. @@ -245,7 +245,7 @@ std::unique_ptr createLowerModule(ModuleOp module, // FIXME(cir): This just uses the default language options. We need to account // for custom options. // Create context. - cir_tl_assert(!::cir::MissingFeatures::langOpts()); + cir_cconv_assert(!::cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; return std::make_unique(langOpts, module, dataLayoutStr, diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index a7f3e1fa187a..2d5e928e93f3 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -68,7 +68,7 @@ class LowerModule { // FIXME(cir): This would be in ASTContext, not CodeGenModule. clang::TargetCXXABI::Kind getCXXABIKind() const { auto kind = getTarget().getCXXABI().getKind(); - cir_tl_assert(!::cir::MissingFeatures::langOpts()); + cir_cconv_assert(!::cir::MissingFeatures::langOpts()); return kind; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index cb444283e735..ea9f51f002f6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -29,7 +29,7 @@ unsigned LowerTypes::clangCallConvToLLVMCallConv(clang::CallingConv CC) { case clang::CC_C: return llvm::CallingConv::C; default: - cir_unreachable("calling convention NYI"); + cir_cconv_unreachable("calling convention NYI"); } } @@ -53,17 +53,17 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { resultType = VoidType::get(getMLIRContext()); break; default: - cir_unreachable("Missing ABIArgInfo::Kind"); + cir_cconv_unreachable("Missing ABIArgInfo::Kind"); } CIRToCIRArgMapping IRFunctionArgs(getContext(), FI, true); SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); // Add type for sret argument. - cir_tl_assert(!::cir::MissingFeatures::sretArgs()); + cir_cconv_assert(!::cir::MissingFeatures::sretArgs()); // Add type for inalloca argument. - cir_tl_assert(!::cir::MissingFeatures::inallocaArgs()); + cir_cconv_assert(!::cir::MissingFeatures::inallocaArgs()); // Add in all of the required arguments. unsigned ArgNo = 0; @@ -72,7 +72,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { for (; it != ie; ++it, ++ArgNo) { const ABIArgInfo &ArgInfo = it->info; - cir_tl_assert(!::cir::MissingFeatures::argumentPadding()); + cir_cconv_assert(!::cir::MissingFeatures::argumentPadding()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -85,17 +85,17 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { Type argType = ArgInfo.getCoerceToType(); StructType st = dyn_cast(argType); if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { - cir_tl_assert(NumIRArgs == st.getNumElements()); + cir_cconv_assert(NumIRArgs == st.getNumElements()); for (unsigned i = 0, e = st.getNumElements(); i != e; ++i) ArgTypes[FirstIRArg + i] = st.getMembers()[i]; } else { - cir_tl_assert(NumIRArgs == 1); + cir_cconv_assert(NumIRArgs == 1); ArgTypes[FirstIRArg] = argType; } break; } default: - cir_unreachable("Missing ABIArgInfo::Kind"); + cir_cconv_unreachable("Missing ABIArgInfo::Kind"); } } @@ -117,7 +117,7 @@ mlir::Type LowerTypes::convertType(Type T) { } llvm::outs() << "Missing default ABI-specific type for " << T << "\n"; - cir_assert_or_abort(!::cir::MissingFeatures::X86DefaultABITypeConvertion(), - "NYI"); + cir_cconv_assert_or_abort( + !::cir::MissingFeatures::X86DefaultABITypeConvertion(), "NYI"); return T; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index 936739e2831a..cb5f5eff5f7d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -58,11 +58,11 @@ class EmptySubobjectMap { void EmptySubobjectMap::ComputeEmptySubobjectSizes() { // Check the bases. - cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_cconv_assert(!::cir::MissingFeatures::getCXXRecordBases()); // Check the fields. for (const auto FT : Class.getMembers()) { - cir_tl_assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_cconv_assert(!::cir::MissingFeatures::qualifiedTypes()); const auto RT = dyn_cast(FT); // We only care about record types. @@ -70,14 +70,14 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() { continue; // TODO(cir): Handle nested record types. - cir_assert_or_abort(!::cir::MissingFeatures::ABINestedRecordLayout(), - "NYI"); + cir_cconv_assert_or_abort(!::cir::MissingFeatures::ABINestedRecordLayout(), + "NYI"); } } bool EmptySubobjectMap::canPlaceFieldAtOffset(const Type Ty, clang::CharUnits Offset) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } //===-----------------------------------------------------------------------==// @@ -207,7 +207,7 @@ class ItaniumRecordLayoutBuilder { bool isPacked, const Type Ty); clang::CharUnits getSize() const { - cir_tl_assert(Size % Context.getCharWidth() == 0); + cir_cconv_assert(Size % Context.getCharWidth() == 0); return Context.toCharUnitsFromBits(Size); } uint64_t getSizeInBits() const { return Size; } @@ -216,7 +216,7 @@ class ItaniumRecordLayoutBuilder { void setSize(uint64_t NewSize) { Size = NewSize; } clang::CharUnits getDataSize() const { - cir_tl_assert(DataSize % Context.getCharWidth() == 0); + cir_cconv_assert(DataSize % Context.getCharWidth() == 0); return Context.toCharUnitsFromBits(DataSize); } @@ -235,29 +235,29 @@ void ItaniumRecordLayoutBuilder::layout(const StructType RT) { initializeLayout(RT); // Lay out the vtable and the non-virtual bases. - cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl() && - !::cir::MissingFeatures::CXXRecordIsDynamicClass()); + cir_cconv_assert(!::cir::MissingFeatures::isCXXRecordDecl() && + !::cir::MissingFeatures::CXXRecordIsDynamicClass()); layoutFields(RT); // FIXME(cir): Handle virtual-related layouts. - cir_tl_assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_cconv_assert(!::cir::MissingFeatures::getCXXRecordBases()); - cir_tl_assert( + cir_cconv_assert( !::cir::MissingFeatures::itaniumRecordLayoutBuilderFinishLayout()); } void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { if (const auto RT = dyn_cast(Ty)) { IsUnion = RT.isUnion(); - cir_tl_assert(!::cir::MissingFeatures::recordDeclIsMSStruct()); + cir_cconv_assert(!::cir::MissingFeatures::recordDeclIsMSStruct()); } - cir_tl_assert(!::cir::MissingFeatures::recordDeclIsPacked()); + cir_cconv_assert(!::cir::MissingFeatures::recordDeclIsPacked()); // Honor the default struct packing maximum alignment flag. if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // mac68k alignment supersedes maximum field alignment and attribute aligned, @@ -265,16 +265,16 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { // allude to additional (more complicated) semantics, especially with regard // to bit-fields, but gcc appears not to follow that. if (::cir::MissingFeatures::declHasAlignMac68kAttr()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { if (::cir::MissingFeatures::declHasAlignNaturalAttr()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); if (::cir::MissingFeatures::declHasMaxFieldAlignmentAttr()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); if (::cir::MissingFeatures::declGetMaxAlignment()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } HandledFirstNonOverlappingEmptyField = @@ -283,7 +283,7 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { // If there is an external AST source, ask it for the various offsets. if (const auto RT = dyn_cast(Ty)) { if (::cir::MissingFeatures::astContextGetExternalSource()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } } } @@ -291,8 +291,9 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { void ItaniumRecordLayoutBuilder::layoutField(const Type D, bool InsertExtraPadding) { // auto FieldClass = D.dyn_cast(); - cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping() && - !::cir::MissingFeatures::CXXRecordDeclIsEmptyCXX11()); + cir_cconv_assert( + !::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping() && + !::cir::MissingFeatures::CXXRecordDeclIsEmptyCXX11()); bool IsOverlappingEmptyField = false; // FIXME(cir): Needs more features. clang::CharUnits FieldOffset = (IsUnion || IsOverlappingEmptyField) @@ -303,10 +304,10 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, Context.getTargetInfo().defaultsToAIXPowerAlignment(); bool FoundFirstNonOverlappingEmptyFieldForAIX = false; if (DefaultsToAIXPowerAlignment && !HandledFirstNonOverlappingEmptyField) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } - cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + cir_cconv_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit; // Reset the unfilled bits. @@ -335,19 +336,19 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, }; if (isa(D) && cast(D).getSize() == 0) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { setDeclInfo(false /* IsIncompleteArrayType */); if (::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); if (IsMsStruct) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } - cir_tl_assert(!::cir::MissingFeatures::recordDeclIsPacked() && - !::cir::MissingFeatures::CXXRecordDeclIsPOD()); + cir_cconv_assert(!::cir::MissingFeatures::recordDeclIsPacked() && + !::cir::MissingFeatures::CXXRecordDeclIsPOD()); bool FieldPacked = false; // FIXME(cir): Needs more features. // When used as part of a typedef, or together with a 'packed' attribute, the @@ -375,7 +376,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, clang::CharUnits PreferredAlign = FieldAlign; if (DefaultsToAIXPowerAlignment && !alignedAttrCanDecreaseAIXAlignment() && (FoundFirstNonOverlappingEmptyFieldForAIX || IsNaturalAlign)) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // The align if the field is not packed. This is to check if the attribute @@ -385,7 +386,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, clang::CharUnits UnpackedFieldOffset = FieldOffset; // clang::CharUnits OriginalFieldAlign = UnpackedFieldAlign; - cir_tl_assert(!::cir::MissingFeatures::fieldDeclGetMaxFieldAlignment()); + cir_cconv_assert(!::cir::MissingFeatures::fieldDeclGetMaxFieldAlignment()); clang::CharUnits MaxAlignmentInChars = clang::CharUnits::Zero(); PackedFieldAlign = std::max(PackedFieldAlign, MaxAlignmentInChars); PreferredAlign = std::max(PreferredAlign, MaxAlignmentInChars); @@ -393,15 +394,15 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, // The maximum field alignment overrides the aligned attribute. if (!MaxFieldAlignment.isZero()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } if (!FieldPacked) FieldAlign = UnpackedFieldAlign; if (DefaultsToAIXPowerAlignment) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); if (FieldPacked) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } clang::CharUnits AlignTo = @@ -411,13 +412,13 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, UnpackedFieldOffset = UnpackedFieldOffset.alignTo(UnpackedFieldAlign); if (UseExternalLayout) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { if (!IsUnion && EmptySubobjects) { // Check if we can place the field at this offset. while (/*!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)*/ false) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } } } @@ -431,21 +432,21 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, Context.toBits(UnpackedFieldAlign), FieldPacked, D); if (InsertExtraPadding) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // Reserve space for this field. if (!IsOverlappingEmptyField) { // uint64_t EffectiveFieldSizeInBits = Context.toBits(EffectiveFieldSize); if (IsUnion) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); else setDataSize(FieldOffset + EffectiveFieldSize); PaddedFieldSize = std::max(PaddedFieldSize, FieldOffset + FieldSize); setSize(std::max(getSizeInBits(), getDataSizeInBits())); } else { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // Remember max struct/class ABI-specified alignment. @@ -458,19 +459,19 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, // laid out. A regular mlir::Type has not way of doing this. In fact, we will // likely need an external abstraction, as I don't think this is possible with // just the field type. - cir_tl_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + cir_cconv_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); if (Packed && !FieldPacked && PackedFieldAlign < FieldAlign) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } void ItaniumRecordLayoutBuilder::layoutFields(const StructType D) { // Layout each field, for now, just sequentially, respecting alignment. In // the future, this will need to be tweakable by targets. - cir_tl_assert(!::cir::MissingFeatures::recordDeclMayInsertExtraPadding() && - !Context.getLangOpts().SanitizeAddressFieldPadding); + cir_cconv_assert(!::cir::MissingFeatures::recordDeclMayInsertExtraPadding() && + !Context.getLangOpts().SanitizeAddressFieldPadding); bool InsertExtraPadding = false; - cir_tl_assert(!::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()); + cir_cconv_assert(!::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()); bool HasFlexibleArrayMember = false; for (const auto FT : D.getMembers()) { layoutField(FT, InsertExtraPadding && (FT != D.getMembers().back() || @@ -487,20 +488,20 @@ void ItaniumRecordLayoutBuilder::UpdateAlignment( return; if (NewAlignment > Alignment) { - cir_tl_assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) && - "Alignment not a power of 2"); + cir_cconv_assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) && + "Alignment not a power of 2"); Alignment = NewAlignment; } if (UnpackedNewAlignment > UnpackedAlignment) { - cir_tl_assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) && - "Alignment not a power of 2"); + cir_cconv_assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) && + "Alignment not a power of 2"); UnpackedAlignment = UnpackedNewAlignment; } if (PreferredNewAlignment > PreferredAlignment) { - cir_tl_assert(llvm::isPowerOf2_64(PreferredNewAlignment.getQuantity()) && - "Alignment not a power of 2"); + cir_cconv_assert(llvm::isPowerOf2_64(PreferredNewAlignment.getQuantity()) && + "Alignment not a power of 2"); PreferredAlignment = PreferredNewAlignment; } } @@ -511,7 +512,7 @@ void ItaniumRecordLayoutBuilder::checkFieldPadding( // We let objc ivars without warning, objc interfaces generally are not used // for padding tricks. if (::cir::MissingFeatures::objCIvarDecls()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); // FIXME(cir): Should the following be skiped in CIR? // Don't warn about structs created without a SourceLocation. This can @@ -527,7 +528,7 @@ void ItaniumRecordLayoutBuilder::checkFieldPadding( PadSize = PadSize / CharBitNum; // InBits = false; } - cir_tl_assert(::cir::MissingFeatures::bitFieldPaddingDiagnostics()); + cir_cconv_assert(::cir::MissingFeatures::bitFieldPaddingDiagnostics()); } if (isPacked && Offset != UnpackedOffset) { HasPackedField = true; @@ -546,7 +547,7 @@ bool isMsLayout(const CIRLowerContext &Context) { /// of the given class (considering it as a base class) when allocating /// objects? static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { - cir_tl_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + cir_cconv_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); switch (ABI.getTailPaddingUseRules()) { case clang::TargetCXXABI::AlwaysUseTailPadding: return false; @@ -568,7 +569,7 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { // intended. // FIXME(cir): This always returns true since we can't check if a CIR record // is a POD type. - cir_tl_assert(!::cir::MissingFeatures::CXXRecordDeclIsPOD()); + cir_cconv_assert(!::cir::MissingFeatures::CXXRecordDeclIsPOD()); return true; case clang::TargetCXXABI::UseTailPaddingUnlessPOD11: @@ -578,10 +579,10 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { // mode; fortunately, that is true because we want to assign // consistently semantics to the type-traits intrinsics (or at // least as many of them as possible). - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } - cir_unreachable("bad tail-padding use kind"); + cir_cconv_unreachable("bad tail-padding use kind"); } } // namespace @@ -590,11 +591,11 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { /// (struct/union/class), which indicates its size and field position /// information. const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { - cir_tl_assert(isa(D) && "Not a record type"); + cir_cconv_assert(isa(D) && "Not a record type"); auto RT = dyn_cast(D); - cir_tl_assert(RT.isComplete() && - "Cannot get layout of forward declarations!"); + cir_cconv_assert(RT.isComplete() && + "Cannot get layout of forward declarations!"); // FIXME(cir): Use a more MLIR-based approach by using it's buitin data layout // features, such as interfaces, cacheing, and the DLTI dialect. @@ -602,10 +603,10 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { const CIRRecordLayout *NewEntry = nullptr; if (isMsLayout(*this)) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { // FIXME(cir): Add if-else separating C and C++ records. - cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl()); + cir_cconv_assert(!::cir::MissingFeatures::isCXXRecordDecl()); EmptySubobjectMap EmptySubobjects(*this, RT); ItaniumRecordLayoutBuilder Builder(*this, &EmptySubobjects); Builder.layout(RT); @@ -620,7 +621,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { skipTailPadding ? Builder.getSize() : Builder.getDataSize(); clang::CharUnits NonVirtualSize = skipTailPadding ? DataSize : Builder.NonVirtualSize; - cir_tl_assert(!::cir::MissingFeatures::CXXRecordIsDynamicClass()); + cir_cconv_assert(!::cir::MissingFeatures::CXXRecordIsDynamicClass()); // FIXME(cir): Whose responsible for freeing the allocation below? NewEntry = new CIRRecordLayout( *this, Builder.getSize(), Builder.Alignment, Builder.PreferredAlignment, @@ -635,7 +636,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { } // TODO(cir): Add option to dump the layouts. - cir_tl_assert(!::cir::MissingFeatures::cacheRecordLayouts()); + cir_cconv_assert(!::cir::MissingFeatures::cacheRecordLayouts()); return *NewEntry; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index 246f75c84e57..ed40b50188c1 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -60,7 +60,7 @@ class AArch64TargetLoweringInfo : public TargetLoweringInfo { public: AArch64TargetLoweringInfo(LowerTypes <, AArch64ABIKind Kind) : TargetLoweringInfo(std::make_unique(LT, Kind)) { - cir_tl_assert(!MissingFeature::swift()); + cir_cconv_assert(!MissingFeature::swift()); } unsigned getTargetAddrSpaceFromCIRAddrSpace( @@ -74,7 +74,7 @@ class AArch64TargetLoweringInfo : public TargetLoweringInfo { case Kind::offload_generic: return 0; default: - cir_unreachable("Unknown CIR address space for this target"); + cir_cconv_unreachable("Unknown CIR address space for this target"); } } }; @@ -87,25 +87,25 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, return ABIArgInfo::getIgnore(); if (const auto _ = dyn_cast(RetTy)) { - cir_assert_or_abort(!::cir::MissingFeatures::vectorType(), "NYI"); + cir_cconv_assert_or_abort(!::cir::MissingFeatures::vectorType(), "NYI"); } // Large vector types should be returned via memory. if (isa(RetTy) && getContext().getTypeSize(RetTy) > 128) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); if (!isAggregateTypeForABI(RetTy)) { // NOTE(cir): Skip enum handling. if (MissingFeature::fixedSizeIntType()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS() ? ABIArgInfo::getExtend(RetTy) : ABIArgInfo::getDirect()); } - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } ABIArgInfo @@ -115,21 +115,21 @@ AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, // TODO(cir): check for illegal vector types. if (MissingFeature::vectorType()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); if (!isAggregateTypeForABI(Ty)) { // NOTE(cir): Enum is IntType in CIR. Skip enum handling here. if (MissingFeature::fixedSizeIntType()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS() ? ABIArgInfo::getExtend(Ty) : ABIArgInfo::getDirect()); } - cir_assert_or_abort(!::cir::MissingFeatures::AArch64TypeClassification(), - "NYI"); + cir_cconv_assert_or_abort( + !::cir::MissingFeatures::AArch64TypeClassification(), "NYI"); return {}; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp index 749e9144676d..b8ca5f663cc9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp @@ -40,12 +40,12 @@ class LoweringPrepareAArch64CXXABI : public LoweringPrepareItaniumCXXABI { mlir::Value lowerMSVAArg(cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, const cir::CIRDataLayout &datalayout) { - cir_unreachable("MSVC ABI not supported yet"); + cir_cconv_unreachable("MSVC ABI not supported yet"); } mlir::Value lowerDarwinVAArg(cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, const cir::CIRDataLayout &datalayout) { - cir_unreachable("Darwin ABI not supported yet"); + cir_cconv_unreachable("Darwin ABI not supported yet"); } }; } // namespace @@ -70,26 +70,26 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // Homogenous Aggregate type not supported and indirect arg // passing not supported yet. And for these supported types, // we should not have alignment greater than 8 problem. - cir_tl_assert(isSupportedType); - cir_tl_assert(!cir::MissingFeatures::classifyArgumentTypeForAArch64()); + cir_cconv_assert(isSupportedType); + cir_cconv_assert(!cir::MissingFeatures::classifyArgumentTypeForAArch64()); // indirect arg passing would expect one more level of pointer dereference. - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_cconv_assert(!cir::MissingFeatures::handleAArch64Indirect()); // false as a place holder for now, as we don't have a way to query bool isIndirect = false; - cir_tl_assert(!cir::MissingFeatures::supportgetCoerceToTypeForAArch64()); + cir_cconv_assert(!cir::MissingFeatures::supportgetCoerceToTypeForAArch64()); // we don't convert to LLVM Type here as we are lowering to CIR here. // so baseTy is the just type of the result of va_arg. // but it depends on arg type indirectness and coercion defined by ABI. auto baseTy = opResTy; if (mlir::isa(baseTy)) { - cir_unreachable("ArrayType VAArg loweing NYI"); + cir_cconv_unreachable("ArrayType VAArg loweing NYI"); } // numRegs may not be 1 if ArrayType is supported. unsigned numRegs = 1; if (Kind == AArch64ABIKind::AAPCSSoft) { - cir_unreachable("AAPCSSoft cir.var_arg lowering NYI"); + cir_cconv_unreachable("AAPCSSoft cir.var_arg lowering NYI"); } bool IsFPR = mlir::cir::isAnyFloatingPointType(baseTy); @@ -120,8 +120,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // though anyone passing 2GB of arguments, each at most 16 bytes, deserves // whatever they get). - cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); - cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_cconv_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + cir_cconv_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // One is just place holder for now, as we don't have a way to query // type size and alignment. clang::CharUnits tySize = @@ -132,7 +132,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // indirectness, type size and type alignment all // decide regSize, but they are all ABI defined // thus need ABI lowering query system. - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_cconv_assert(!cir::MissingFeatures::handleAArch64Indirect()); int regSize = isIndirect ? 8 : tySize.getQuantity(); int regTopIndex; mlir::Value regOffsP; @@ -187,9 +187,9 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we // align __gr_offs to calculate the potential address. if (!IsFPR && !isIndirect && tyAlign.getQuantity() > 8) { - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); - cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); - cir_unreachable("register alignment correction NYI"); + cir_cconv_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_cconv_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_cconv_unreachable("register alignment correction NYI"); } // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. @@ -224,20 +224,21 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( loc, castRegTop.getType(), castRegTop, regOffs); if (isIndirect) { - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); - cir_unreachable("indirect arg passing NYI"); + cir_cconv_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_cconv_unreachable("indirect arg passing NYI"); } // TODO: isHFA, numMembers and base should be query result from query uint64_t numMembers = 0; - cir_tl_assert( + cir_cconv_assert( !cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); bool isHFA = false; // though endianess can be known from datalayout, it might need an unified // ABI lowering query system to answer the question. - cir_tl_assert(!cir::MissingFeatures::supportisEndianQueryForAArch64()); + cir_cconv_assert(!cir::MissingFeatures::supportisEndianQueryForAArch64()); bool isBigEndian = datalayout.isBigEndian(); - cir_tl_assert(!cir::MissingFeatures::supportisAggregateTypeForABIAArch64()); + cir_cconv_assert( + !cir::MissingFeatures::supportisAggregateTypeForABIAArch64()); // TODO: isAggregateTypeForABI should be query result from ABI info bool isAggregateTypeForABI = false; if (isHFA && numMembers > 1) { @@ -245,11 +246,11 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // and stored 16-bytes apart regardless of size (they're notionally in qN, // qN+1, ...). We reload and store into a temporary local variable // contiguously. - cir_tl_assert(!isIndirect && - "Homogeneous aggregates should be passed directly"); - cir_unreachable("Homogeneous aggregates NYI"); + cir_cconv_assert(!isIndirect && + "Homogeneous aggregates should be passed directly"); + cir_cconv_unreachable("Homogeneous aggregates NYI"); } else { - cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_cconv_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // TODO: slotSize should be query result about alignment. clang::CharUnits slotSize = clang::CharUnits::fromQuantity(8); if (isBigEndian && !isIndirect && (isHFA || isAggregateTypeForABI) && @@ -268,12 +269,12 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // On big-endian platforms, the value will be right-aligned in its stack slot. // and we also need to think about other ABI lowering concerns listed below. - cir_tl_assert(!cir::MissingFeatures::handleBigEndian()); - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); - cir_tl_assert( + cir_cconv_assert(!cir::MissingFeatures::handleBigEndian()); + cir_cconv_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_cconv_assert( !cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); - cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); - cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_cconv_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + cir_cconv_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); builder.create(loc, mlir::ValueRange{resAsVoidP}, contBlock); @@ -287,14 +288,14 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( auto ptrDiffTy = mlir::cir::IntType::get(builder.getContext(), 64, /*signed=*/false); - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); - cir_tl_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); + cir_cconv_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_cconv_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); // Again, stack arguments may need realignment. In this case both integer and // floating-point ones might be affected. if (!isIndirect && tyAlign.getQuantity() > 8) { // TODO: this algorithm requres casting from ptr type to int type, then // back to ptr type thus needs careful handling. NYI now. - cir_unreachable("alignment greater than 8 NYI"); + cir_cconv_unreachable("alignment greater than 8 NYI"); } // All stack slots are multiples of 8 bytes. @@ -310,8 +311,8 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // which are all ABI defined thus need ABI lowering query system. // The implementation we have now supports most common cases which assumes // no indirectness, no alignment greater than 8, and little endian. - cir_tl_assert(!cir::MissingFeatures::handleBigEndian()); - cir_tl_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); + cir_cconv_assert(!cir::MissingFeatures::handleBigEndian()); + cir_cconv_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); auto stackSizeC = builder.create( loc, ptrDiffTy, @@ -343,12 +344,12 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( builder.setInsertionPoint(op); contBlock->addArgument(onStackPtr.getType(), loc); auto resP = contBlock->getArgument(0); - cir_tl_assert(mlir::isa(resP.getType())); + cir_cconv_assert(mlir::isa(resP.getType())); auto opResPTy = mlir::cir::PointerType::get(builder.getContext(), opResTy); auto castResP = builder.createBitcast(resP, opResPTy); auto res = builder.create(loc, castResP); // there would be another level of ptr dereference if indirect arg passing - cir_tl_assert(!cir::MissingFeatures::handleAArch64Indirect()); + cir_cconv_assert(!cir::MissingFeatures::handleAArch64Indirect()); if (isIndirect) { res = builder.create(loc, res.getResult()); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index b7f5fdd9215f..992786b7676d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -34,7 +34,7 @@ cir::LoweringPrepareCXXABI *cir::LoweringPrepareCXXABI::createItaniumABI() { static void buildBadCastCall(CIRBaseBuilderTy &builder, mlir::Location loc, mlir::FlatSymbolRefAttr badCastFuncRef) { // TODO(cir): set the calling convention to __cxa_bad_cast. - cir_tl_assert(!MissingFeatures::setCallingConv()); + cir_cconv_assert(!MissingFeatures::setCallingConv()); builder.createCallOp(loc, badCastFuncRef, mlir::ValueRange{}); builder.create(loc); @@ -48,7 +48,7 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, auto castInfo = op.getInfo().value(); // TODO(cir): consider address space - cir_tl_assert(!MissingFeatures::addressSpace()); + cir_cconv_assert(!MissingFeatures::addressSpace()); auto srcPtr = builder.createBitcast(srcValue, builder.getVoidPtrTy()); auto srcRtti = builder.getConstant(loc, castInfo.getSrcRtti()); @@ -59,15 +59,15 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, mlir::Value dynCastFuncArgs[4] = {srcPtr, srcRtti, destRtti, offsetHint}; // TODO(cir): set the calling convention for __dynamic_cast. - cir_tl_assert(!MissingFeatures::setCallingConv()); + cir_cconv_assert(!MissingFeatures::setCallingConv()); mlir::Value castedPtr = builder .createCallOp(loc, dynCastFuncRef, builder.getVoidPtrTy(), dynCastFuncArgs) .getResult(); - cir_tl_assert(mlir::isa(castedPtr.getType()) && - "the return value of __dynamic_cast should be a ptr"); + cir_cconv_assert(mlir::isa(castedPtr.getType()) && + "the return value of __dynamic_cast should be a ptr"); /// C++ [expr.dynamic.cast]p9: /// A failed cast to reference type throws std::bad_cast @@ -93,7 +93,7 @@ buildDynamicCastToVoidAfterNullCheck(CIRBaseBuilderTy &builder, bool vtableUsesRelativeLayout = op.getRelativeLayout(); // TODO(cir): consider address space in this function. - cir_tl_assert(!MissingFeatures::addressSpace()); + cir_cconv_assert(!MissingFeatures::addressSpace()); mlir::Type vtableElemTy; uint64_t vtableElemAlign; @@ -141,7 +141,7 @@ LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, auto loc = op->getLoc(); auto srcValue = op.getSrc(); - cir_tl_assert(!MissingFeatures::buildTypeCheck()); + cir_cconv_assert(!MissingFeatures::buildTypeCheck()); if (op.isRefcast()) return buildDynamicCastAfterNullCheck(builder, op); @@ -169,5 +169,5 @@ mlir::Value LoweringPrepareItaniumCXXABI::lowerVAArg( const ::cir::CIRDataLayout &datalayout) { // There is no generic cir lowering for var_arg, here we fail // so to prevent attempt of calling lowerVAArg for ItaniumCXXABI - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp index b97cb490961e..a0d48fb1f5a3 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp @@ -33,7 +33,7 @@ class SPIRVABIInfo : public ABIInfo { private: void computeInfo(LowerFunctionInfo &FI) const override { - cir_assert_or_abort(!::cir::MissingFeatures::SPIRVABI(), "NYI"); + cir_cconv_assert_or_abort(!::cir::MissingFeatures::SPIRVABI(), "NYI"); } }; @@ -57,7 +57,7 @@ class SPIRVTargetLoweringInfo : public TargetLoweringInfo { case Kind::offload_generic: return 4; default: - cir_unreachable("Unknown CIR address space for this target"); + cir_cconv_unreachable("Unknown CIR address space for this target"); } } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 30fb7e4f93c2..60ec92ca230c 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -30,7 +30,7 @@ unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { case X86AVXABILevel::None: return 128; } - cir_unreachable("Unknown AVXLevel"); + cir_cconv_unreachable("Unknown AVXLevel"); } /// Return true if the specified [start,end) bit range is known to either be @@ -50,7 +50,7 @@ static bool BitsContainNoUserData(Type Ty, unsigned StartBit, unsigned EndBit, return true; if (auto arrTy = llvm::dyn_cast(Ty)) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } if (auto structTy = llvm::dyn_cast(Ty)) { @@ -59,7 +59,7 @@ static bool BitsContainNoUserData(Type Ty, unsigned StartBit, unsigned EndBit, // If this is a C++ record, check the bases first. if (::cir::MissingFeatures::isCXXRecordDecl() || ::cir::MissingFeatures::getCXXRecordBases()) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // Verify that no field has data that overlaps the region of interest. Yes @@ -95,7 +95,8 @@ Type getFPTypeAtOffset(Type IRType, unsigned IROffset, if (IROffset == 0 && isa(IRType)) return IRType; - cir_assert_or_abort(!::cir::MissingFeatures::X86GetFPTypeAtOffset(), "NYI"); + cir_cconv_assert_or_abort(!::cir::MissingFeatures::X86GetFPTypeAtOffset(), + "NYI"); return IRType; // FIXME(cir): Temporary workaround for the assertion above. } @@ -194,7 +195,7 @@ class X86_64TargetLoweringInfo : public TargetLoweringInfo { public: X86_64TargetLoweringInfo(LowerTypes &LM, X86AVXABILevel AVXLevel) : TargetLoweringInfo(std::make_unique(LM, AVXLevel)) { - cir_tl_assert(!::cir::MissingFeatures::swift()); + cir_cconv_assert(!::cir::MissingFeatures::swift()); } unsigned getTargetAddrSpaceFromCIRAddrSpace( @@ -208,7 +209,7 @@ class X86_64TargetLoweringInfo : public TargetLoweringInfo { case Kind::offload_generic: return 0; default: - cir_unreachable("Unknown CIR address space for this target"); + cir_cconv_unreachable("Unknown CIR address space for this target"); } } }; @@ -256,17 +257,17 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger // than eight eightbytes, ..., it has class MEMORY. if (Size > 512) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial // copy constructor or a non-trivial destructor, it is passed by invisible // reference. if (getRecordArgABI(RT, getCXXABI())) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); // Assume variable sized types are passed in memory. if (::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); const auto &Layout = getContext().getCIRRecordLayout(Ty); @@ -274,8 +275,8 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, Current = Class::NoClass; // If this is a C++ record, classify the bases first. - cir_tl_assert(!::cir::MissingFeatures::isCXXRecordDecl() && - !::cir::MissingFeatures::getCXXRecordBases()); + cir_cconv_assert(!::cir::MissingFeatures::isCXXRecordDecl() && + !::cir::MissingFeatures::getCXXRecordBases()); // Classify the fields one at a time, merging the results. bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <= @@ -284,15 +285,15 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, bool IsUnion = RT.isUnion() && !UseClang11Compat; // FIXME(cir): An interface to handle field declaration might be needed. - cir_tl_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + cir_cconv_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); for (auto [idx, FT] : llvm::enumerate(RT.getMembers())) { uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); - cir_tl_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + cir_cconv_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); bool BitField = false; // Ignore padding bit-fields. if (BitField && !::cir::MissingFeatures::fieldDeclisUnnamedBitField()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than // eight eightbytes, or it contains unaligned fields, it has class @@ -306,11 +307,11 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, // than 128. if (Size > 128 && ((!IsUnion && Size != getContext().getTypeSize(FT)) || Size > getNativeVectorSizeForAVXABI(AVXLevel))) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // Note, skip this test for bit-fields, see below. if (!BitField && Offset % getContext().getTypeAlign(RT)) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // Classify this field. @@ -325,7 +326,7 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, // structure to be passed in memory even if unaligned, and // therefore they can straddle an eightbyte. if (BitField) { - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { classify(FT, Offset, FieldLo, FieldHi, isNamedArg); } @@ -338,8 +339,8 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, postMerge(Size, Lo, Hi); } else { llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; - cir_assert_or_abort(!::cir::MissingFeatures::X86TypeClassification(), - "NYI"); + cir_cconv_assert_or_abort( + !::cir::MissingFeatures::X86TypeClassification(), "NYI"); } // FIXME: _Decimal32 and _Decimal64 are SSE. // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). @@ -347,7 +348,7 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, } llvm::outs() << "Missing X86 classification for non-builtin types\n"; - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } /// Return a type that will be passed by the backend in the low 8 bytes of an @@ -365,12 +366,12 @@ Type X86_64ABIInfo::GetSSETypeAtOffset(Type IRType, unsigned IROffset, Type T1 = {}; unsigned T0Size = TD.getTypeAllocSize(T0); if (SourceSize > T0Size) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); if (T1 == nullptr) { // Check if IRType is a half/bfloat + float. float type will be in // IROffset+4 due to its alignment. if (isa(T0) && SourceSize > 4) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); // If we can't get a second FP type, return a simple half or float. // avx512fp16-abi.c:pr51813_2 shows it works to return float for // {float, i8} too. @@ -378,7 +379,7 @@ Type X86_64ABIInfo::GetSSETypeAtOffset(Type IRType, unsigned IROffset, return T0; } - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } /// The ABI specifies that a value should be passed in an 8-byte GPR. This @@ -402,7 +403,7 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, // returning an 8-byte unit starting with it. See if we can safely use it. if (IROffset == 0) { // Pointers and int64's always fill the 8-byte unit. - cir_tl_assert(!isa(DestTy) && "Ptrs are NYI"); + cir_cconv_assert(!isa(DestTy) && "Ptrs are NYI"); // If we have a 1/2/4-byte integer, we can use it only if the rest of the // goodness in the source type is just tail padding. This is allowed to @@ -439,8 +440,8 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); // FIXME(cir): Temporary workaround to make things non-blocking. - if (!cirMissingFeatureAssertionMode) - cir_tl_assert(TySizeInBytes != SourceOffset && "Empty field?"); + if (!cirCConvAssertionMode) + cir_cconv_assert(TySizeInBytes != SourceOffset && "Empty field?"); // It is always safe to classify this as an integer type up to i64 that // isn't larger than the structure. @@ -462,10 +463,10 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { classify(RetTy, 0, Lo, Hi, true); // Check some invariants. - cir_tl_assert((Hi != Class::Memory || Lo == Class::Memory) && - "Invalid memory classification."); - cir_tl_assert((Hi != Class::SSEUp || Lo == Class::SSE) && - "Invalid SSEUp classification."); + cir_cconv_assert((Hi != Class::Memory || Lo == Class::Memory) && + "Invalid memory classification."); + cir_cconv_assert((Hi != Class::SSEUp || Lo == Class::SSE) && + "Invalid SSEUp classification."); Type resType = {}; switch (Lo) { @@ -496,8 +497,8 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { break; default: - cir_assert_or_abort(!::cir::MissingFeatures::X86RetTypeClassification(), - "NYI"); + cir_cconv_assert_or_abort( + !::cir::MissingFeatures::X86RetTypeClassification(), "NYI"); } Type HighPart = {}; @@ -507,14 +508,14 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { break; default: - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } // If a high part was specified, merge it together with the low part. It is // known to pass in the high eightbyte of the result. We do this by forming // a first class struct aggregate with the high and low part: {low, high} if (HighPart) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); return ABIArgInfo::getDirect(resType); } @@ -531,10 +532,10 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, // Check some invariants. // FIXME: Enforce these by construction. - cir_tl_assert((Hi != Class::Memory || Lo == Class::Memory) && - "Invalid memory classification."); - cir_tl_assert((Hi != Class::SSEUp || Lo == Class::SSE) && - "Invalid SSEUp classification."); + cir_cconv_assert((Hi != Class::Memory || Lo == Class::Memory) && + "Invalid memory classification."); + cir_cconv_assert((Hi != Class::SSEUp || Lo == Class::SSE) && + "Invalid SSEUp classification."); neededInt = 0; neededSSE = 0; @@ -571,8 +572,8 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, break; } default: - cir_assert_or_abort(!::cir::MissingFeatures::X86ArgTypeClassification(), - "NYI"); + cir_cconv_assert_or_abort( + !::cir::MissingFeatures::X86ArgTypeClassification(), "NYI"); } Type HighPart = {}; @@ -580,11 +581,11 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, case Class::NoClass: break; default: - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } if (HighPart) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); return ABIArgInfo::getDirect(ResType); } @@ -595,7 +596,7 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { // using __attribute__((ms_abi)). In such case to correctly emit Win64 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo. if (CallingConv == llvm::CallingConv::Win64) { - cir_unreachable("Win64 CC is NYI"); + cir_cconv_unreachable("Win64 CC is NYI"); } bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; @@ -607,7 +608,7 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { if (!::mlir::cir::classifyReturnType(getCXXABI(), FI, *this)) { if (IsRegCall || ::cir::MissingFeatures::regCall()) { - cir_unreachable("RegCall is NYI"); + cir_cconv_unreachable("RegCall is NYI"); } else FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); } @@ -615,13 +616,13 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { // If the return value is indirect, then the hidden argument is consuming // one integer register. if (FI.getReturnInfo().isIndirect()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); else if (NeededSSE && MaxVectorWidth) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); // The chain argument effectively gives us another free register. if (::cir::MissingFeatures::chainCall()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); unsigned NumRequiredArgs = FI.getNumRequiredArgs(); // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers @@ -632,7 +633,7 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { bool IsNamedArg = ArgNo < NumRequiredArgs; if (IsRegCall && ::cir::MissingFeatures::regCall()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); else it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, NeededSSE, IsNamedArg); @@ -645,9 +646,9 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { FreeIntRegs -= NeededInt; FreeSSERegs -= NeededSSE; if (::cir::MissingFeatures::vectorType()) - cir_unreachable("NYI"); + cir_cconv_unreachable("NYI"); } else { - cir_unreachable("Indirect results are NYI"); + cir_cconv_unreachable("Indirect results are NYI"); } } } @@ -676,8 +677,8 @@ X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { // Accum should never be memory (we should have returned) or // ComplexX87 (because this cannot be passed in a structure). - cir_tl_assert((Accum != Class::Memory && Accum != Class::ComplexX87) && - "Invalid accumulated classification during merge."); + cir_cconv_assert((Accum != Class::Memory && Accum != Class::ComplexX87) && + "Invalid accumulated classification during merge."); if (Accum == Field || Field == Class::NoClass) return Accum; if (Field == Class::Memory) From a639a5f4da8b4ebd5e3e300ddee87109733cd715 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Tue, 15 Oct 2024 18:54:26 -0700 Subject: [PATCH 1947/2301] [CIR][CIRGen] Implement CIRGenModule::shouldEmitFunction (#984) This is the usual copy-paste-modify from CodeGen, though I changed all the variable names to conform to our new style. All these functions should be pulled out as common helpers when we're upstream. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 107 ++++++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 4 +- clang/test/CIR/CodeGen/linkage.c | 11 ++- 3 files changed, 114 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 7da61a261e21..96f32e16ddc1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -3031,9 +3031,110 @@ void CIRGenModule::Release() { // TODO: FINISH THE REST OF THIS } -bool CIRGenModule::shouldEmitFunction(GlobalDecl GD) { - // TODO: implement this -- requires defining linkage for CIR - return true; +namespace { +// TODO(cir): This should be a common helper shared with CodeGen. +struct FunctionIsDirectlyRecursive + : public ConstStmtVisitor { + const StringRef name; + const Builtin::Context &builtinCtx; + FunctionIsDirectlyRecursive(StringRef name, + const Builtin::Context &builtinCtx) + : name(name), builtinCtx(builtinCtx) {} + + bool VisitCallExpr(const CallExpr *expr) { + const FunctionDecl *func = expr->getDirectCallee(); + if (!func) + return false; + AsmLabelAttr *attr = func->getAttr(); + if (attr && name == attr->getLabel()) + return true; + unsigned builtinId = func->getBuiltinID(); + if (!builtinId || !builtinCtx.isLibFunction(builtinId)) + return false; + StringRef builtinName = builtinCtx.getName(builtinId); + if (builtinName.starts_with("__builtin_") && + name == builtinName.slice(strlen("__builtin_"), StringRef::npos)) { + return true; + } + return false; + } + + bool VisitStmt(const Stmt *stmt) { + for (const Stmt *child : stmt->children()) + if (child && this->Visit(child)) + return true; + return false; + } +}; +} // namespace + +// isTriviallyRecursive - Check if this function calls another +// decl that, because of the asm attribute or the other decl being a builtin, +// ends up pointing to itself. +// TODO(cir): This should be a common helper shared with CodeGen. +bool CIRGenModule::isTriviallyRecursive(const FunctionDecl *func) { + StringRef name; + if (getCXXABI().getMangleContext().shouldMangleDeclName(func)) { + // asm labels are a special kind of mangling we have to support. + AsmLabelAttr *attr = func->getAttr(); + if (!attr) + return false; + name = attr->getLabel(); + } else { + name = func->getName(); + } + + FunctionIsDirectlyRecursive walker(name, astCtx.BuiltinInfo); + const Stmt *body = func->getBody(); + return body ? walker.Visit(body) : false; +} + +// TODO(cir): This should be a common helper shared with CodeGen. +bool CIRGenModule::shouldEmitFunction(GlobalDecl globalDecl) { + if (getFunctionLinkage(globalDecl) != + GlobalLinkageKind::AvailableExternallyLinkage) + return true; + + const auto *func = cast(globalDecl.getDecl()); + // Inline builtins declaration must be emitted. They often are fortified + // functions. + if (func->isInlineBuiltinDeclaration()) + return true; + + if (codeGenOpts.OptimizationLevel == 0 && !func->hasAttr()) + return false; + + // We don't import function bodies from other named module units since that + // behavior may break ABI compatibility of the current unit. + if (const Module *mod = func->getOwningModule(); + mod && mod->getTopLevelModule()->isNamedModule() && + astCtx.getCurrentNamedModule() != mod->getTopLevelModule()) { + // There are practices to mark template member function as always-inline + // and mark the template as extern explicit instantiation but not give + // the definition for member function. So we have to emit the function + // from explicitly instantiation with always-inline. + // + // See https://github.com/llvm/llvm-project/issues/86893 for details. + // + // TODO: Maybe it is better to give it a warning if we call a non-inline + // function from other module units which is marked as always-inline. + if (!func->isTemplateInstantiation() || !func->hasAttr()) + return false; + } + + if (func->hasAttr()) + return false; + + if (func->hasAttr() && !func->hasAttr()) + assert(!MissingFeatures::setDLLImportDLLExport() && + "shouldEmitFunction for dllimport is NYI"); + + // PR9614. Avoid cases where the source code is lying to us. An available + // externally function should have an equivalent function somewhere else, + // but a function that calls itself through asm label/`__builtin_` trickery is + // clearly not equivalent to the real implementation. + // This happens in glibc's btowc and in some configure checks. + return !isTriviallyRecursive(func); } bool CIRGenModule::supportsCOMDAT() const { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 827e7ac82839..16f95c164712 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -665,7 +665,9 @@ class CIRGenModule : public CIRGenTypeCache { // Finalize CIR code generation. void Release(); - bool shouldEmitFunction(clang::GlobalDecl GD); + bool isTriviallyRecursive(const clang::FunctionDecl *func); + + bool shouldEmitFunction(clang::GlobalDecl globalDecl); /// Returns a pointer to a global variable representing a temporary with /// static or thread storage duration. diff --git a/clang/test/CIR/CodeGen/linkage.c b/clang/test/CIR/CodeGen/linkage.c index 1b087f43ca81..49fdb643b2cb 100644 --- a/clang/test/CIR/CodeGen/linkage.c +++ b/clang/test/CIR/CodeGen/linkage.c @@ -1,5 +1,7 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t-O0.cir +// RUN: FileCheck --input-file=%t-O0.cir %s -check-prefixes=CIR,CIR-O0 +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -O1 -o %t-O1.cir +// RUN: FileCheck --input-file=%t-O1.cir %s -check-prefixes=CIR,CIR-O1 // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM @@ -24,7 +26,8 @@ int get_var(void) { return var; } -// Should generate available_externally linkage. +// Should generate available_externally linkage when optimizing. inline int availableExternallyMethod(void) { return 0; } void callAvailableExternallyMethod(void) { availableExternallyMethod(); } -// CIR: cir.func available_externally @availableExternallyMethod +// CIR-O0-NOT: cir.func available_externally @availableExternallyMethod +// CIR-O1: cir.func available_externally @availableExternallyMethod From 25784cbd7cb1034e2c68b0883f022525d78bc104 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Tue, 15 Oct 2024 18:55:05 -0700 Subject: [PATCH 1948/2301] [CIR][CIRGen] Port 1d0bd8e51be2627f79bede54735c38b917ea04ee (#983) https://github.com/llvm/llvm-project/commit/1d0bd8e51be2627f79bede54735c38b917ea04ee moves a conditional from CodeGen to AST, and this follows suit for consistency. (Our support for the Microsoft ABI is NYI anyway; this is just to make things simpler to follow when matching up logic between CodeGen and CIRGen.) --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 96f32e16ddc1..089fae6f89a0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2130,16 +2130,6 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { if (const auto *Dtor = dyn_cast(D)) return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType()); - if (isa(D) && - cast(D)->isInheritingConstructor() && - astCtx.getTargetInfo().getCXXABI().isMicrosoft()) { - // Just like in LLVM codegen: - // Our approach to inheriting constructors is fundamentally different from - // that used by the MS ABI, so keep our inheriting constructor thunks - // internal rather than trying to pick an unambiguous mangling for them. - return mlir::cir::GlobalLinkageKind::InternalLinkage; - } - return getCIRLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false); } From 7784bdf477c567b1e104e8c0e64f2f5fc6c131ef Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 15 Oct 2024 17:18:08 -0700 Subject: [PATCH 1949/2301] [CIR][LowerToLLVM] Lower cir.vtt.address_point --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 79 ++++++++++++++----- clang/test/CIR/CodeGen/vtt.cpp | 28 ++++++- 2 files changed, 88 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e8449c6a3f8d..c15fc31b29da 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -825,6 +825,60 @@ class CIRBaseClassAddrOpLowering } }; +static mlir::Value +getValueForVTableSymbol(mlir::Operation *op, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter, + mlir::FlatSymbolRefAttr nameAttr, mlir::Type &eltType) { + auto module = op->getParentOfType(); + auto *symbol = mlir::SymbolTable::lookupSymbolIn(module, nameAttr); + if (auto llvmSymbol = dyn_cast(symbol)) { + eltType = llvmSymbol.getType(); + } else if (auto cirSymbol = dyn_cast(symbol)) { + eltType = converter->convertType(cirSymbol.getSymType()); + } + return rewriter.create( + op->getLoc(), mlir::LLVM::LLVMPointerType::get(op->getContext()), + nameAttr.getValue()); +} + +class CIRVTTAddrPointOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern< + mlir::cir::VTTAddrPointOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VTTAddrPointOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + const mlir::Type resultType = getTypeConverter()->convertType(op.getType()); + llvm::SmallVector offsets; + mlir::Type eltType; + mlir::Value llvmAddr = adaptor.getSymAddr(); + + if (op.getSymAddr()) { + if (op.getOffset() == 0) { + rewriter.replaceAllUsesWith(op, llvmAddr); + rewriter.eraseOp(op); + return mlir::success(); + } + + offsets.push_back(adaptor.getOffset()); + eltType = mlir::IntegerType::get(resultType.getContext(), 8, + mlir::IntegerType::Signless); + } else { + llvmAddr = getValueForVTableSymbol(op, rewriter, getTypeConverter(), + op.getNameAttr(), eltType); + assert(eltType && "Shouldn't ever be missing an eltType here"); + offsets.push_back(0); + offsets.push_back(adaptor.getOffset()); + } + rewriter.replaceOpWithNewOp(op, resultType, eltType, + llvmAddr, offsets, true); + return mlir::success(); + } +}; + class CIRBrCondOpLowering : public mlir::OpConversionPattern { public: @@ -3486,18 +3540,8 @@ class CIRVTableAddrPointOpLowering llvm::SmallVector offsets; mlir::Type eltType; if (!symAddr) { - // Get the vtable address point from a global variable - auto module = op->getParentOfType(); - auto *symbol = - mlir::SymbolTable::lookupSymbolIn(module, op.getNameAttr()); - if (auto llvmSymbol = dyn_cast(symbol)) { - eltType = llvmSymbol.getType(); - } else if (auto cirSymbol = dyn_cast(symbol)) { - eltType = converter->convertType(cirSymbol.getSymType()); - } - symAddr = rewriter.create( - op.getLoc(), mlir::LLVM::LLVMPointerType::get(getContext()), - *op.getName()); + symAddr = getValueForVTableSymbol(op, rewriter, getTypeConverter(), + op.getNameAttr(), eltType); offsets = llvm::SmallVector{ 0, op.getVtableIndex(), op.getAddressPointIndex()}; } else { @@ -3508,11 +3552,9 @@ class CIRVTableAddrPointOpLowering llvm::SmallVector{op.getAddressPointIndex()}; } - if (eltType) - rewriter.replaceOpWithNewOp(op, targetType, eltType, - symAddr, offsets, true); - else - llvm_unreachable("Shouldn't ever be missing an eltType here"); + assert(eltType && "Shouldn't ever be missing an eltType here"); + rewriter.replaceOpWithNewOp(op, targetType, eltType, + symAddr, offsets, true); return mlir::success(); } @@ -4087,7 +4129,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRCmpThreeWayOpLowering, CIRClearCacheOpLowering, CIRUndefOpLowering, CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, CIRFreeExceptionOpLowering, - CIRThrowOpLowering, CIRIntrinsicCallLowering, CIRBaseClassAddrOpLowering + CIRThrowOpLowering, CIRIntrinsicCallLowering, CIRBaseClassAddrOpLowering, + CIRVTTAddrPointOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp index eac47dd36804..cf24bae036c8 100644 --- a/clang/test/CIR/CodeGen/vtt.cpp +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s class A { public: @@ -66,6 +68,17 @@ int main() { // CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr, !cir.ptr> // CIR: } +// LLVM-LABEL: @_ZN1BC2Ev +// LLVM: %[[THIS_ADDR:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[VTT_ADDR:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]], align 8 +// LLVM: %[[V:.*]] = load ptr, ptr %[[VTT]], align 8 +// LLVM: store ptr %[[V]], ptr %[[THIS]], align 8 +// LLVM: getelementptr inbounds i8, ptr %[[VTT]], i32 1 +// LLVM: ret void +// LLVM: } + // Class C constructor // CIR: cir.func linkonce_odr @_ZN1CC2Ev(%arg0: !cir.ptr loc({{.*}}), %arg1: !cir.ptr> loc({{.*}})) extra(#fn_attr) { // CIR: %{{[0-9]+}} = cir.vtt.address_point %{{[0-9]+}} : !cir.ptr>, offset = 0 -> !cir.ptr> @@ -122,4 +135,17 @@ int main() { // CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> // CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> // CIR: cir.return -// CIR: } \ No newline at end of file +// CIR: } + +// LLVM-LABEL: @_ZN1DC1Ev +// LLVM: %2 = alloca ptr, i64 1, align 8 +// LLVM: store ptr %0, ptr %2, align 8 +// LLVM: %[[THIS:.*]] = load ptr, ptr %2, align 8 +// LLVM: %[[BASE_A:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 +// LLVM: call void @_ZN1AC2Ev(ptr %[[BASE_A]]) +// LLVM: %[[BASE_B:.*]] = getelementptr i8, ptr %[[THIS]], i32 0 +// LLVM: call void @_ZN1BC2Ev(ptr %[[BASE_B]], ptr getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i32 0, i32 1)) +// LLVM: %[[BASE_C:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM: call void @_ZN1CC2Ev(ptr %[[BASE_C]], ptr getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i32 0, i32 3)) +// LLVM: ret void +// LLVM: } \ No newline at end of file From 660f609408f5fd398b9972d5b6bf6e603bf1c42f Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 15 Oct 2024 19:14:57 -0700 Subject: [PATCH 1950/2301] [CIR][NFC] Rename function in a test --- clang/test/CIR/CodeGen/vtt.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp index cf24bae036c8..3f51d7a22c86 100644 --- a/clang/test/CIR/CodeGen/vtt.cpp +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -28,9 +28,9 @@ class D : public B, public C { }; -int main() { - B *b = new D (); - return 0; +int f() { + B *b = new D (); + return 0; } // Vtable of Class A From 19ab882da99dcd2593694507698d249ad4050189 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 15 Oct 2024 19:25:48 -0700 Subject: [PATCH 1951/2301] [CIR][CIRGen] Get more vtable and dtor working --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 10 ++++- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 15 ++++++- clang/test/CIR/CodeGen/vtt.cpp | 41 +++++++++++++++++-- 4 files changed, 59 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index c1f2a2480b1e..a726b6c77475 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1205,7 +1205,8 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { if (CGM.getCodeGenOpts().StrictVTablePointers && CGM.getCodeGenOpts().OptimizationLevel > 0) llvm_unreachable("NYI"); - llvm_unreachable("NYI"); + initializeVTablePointers(getLoc(Dtor->getSourceRange()), + Dtor->getParent()); } if (isTryBody) @@ -1466,7 +1467,12 @@ mlir::Value CIRGenFunction::GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, if (Delegating) { llvm_unreachable("NYI"); } else if (RD == Base) { - llvm_unreachable("NYI"); + // If the record matches the base, this is the complete ctor/dtor + // variant calling the base variant in a class with virtual bases. + assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) && + "doing no-op VTT offset in base dtor/ctor?"); + assert(!ForVirtualBase && "Can't have same class as virtual base!"); + SubVTTIndex = 0; } else { const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); CharUnits BaseOffset = ForVirtualBase ? Layout.getVBaseClassOffset(Base) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 7dded241490b..212991871dce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -52,7 +52,7 @@ commonBuildCXXMemberOrOperatorCall(CIRGenFunction &CGF, const CXXMethodDecl *MD, // If there is an implicit parameter (e.g. VTT), emit it. if (ImplicitParam) { - llvm_unreachable("NYI"); + Args.add(RValue::get(ImplicitParam), ImplicitParamTy); } const auto *FPT = MD->getType()->castAs(); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 79d4f127ca23..b3561f345b91 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -131,7 +131,18 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { // The Itanium ABI has separate complete-object vs. base-object variants of // both constructors and destructors. if (isa(GD.getDecl())) { - llvm_unreachable("NYI"); + switch (GD.getDtorType()) { + case Dtor_Complete: + case Dtor_Deleting: + return true; + + case Dtor_Base: + return false; + + case Dtor_Comdat: + llvm_unreachable("emitting dtor comdat as function?"); + } + llvm_unreachable("bad dtor kind"); } if (isa(GD.getDecl())) { switch (GD.getCtorType()) { @@ -379,7 +390,7 @@ bool CIRGenItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { // Check if we have a base destructor. if (isa(MD) && GD.getDtorType() == Dtor_Base) - llvm_unreachable("NYI"); + return true; return false; } diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp index 3f51d7a22c86..25437e295afc 100644 --- a/clang/test/CIR/CodeGen/vtt.cpp +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -49,7 +49,7 @@ int f() { // CIR: cir.global linkonce_odr @_ZTT1D = #cir.const_array<[#cir.global_view<@_ZTV1D, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D0_1B, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D0_1B, [0 : i32, 1 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D16_1C, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D16_1C, [0 : i32, 1 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTV1D, [0 : i32, 2 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTV1D, [0 : i32, 1 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 7> {alignment = 8 : i64} // Class B constructor -// CIR: cir.func linkonce_odr @_ZN1BC2Ev(%arg0: !cir.ptr loc({{.*}}), %arg1: !cir.ptr> loc({{.*}})) extra(#fn_attr) { +// CIR: cir.func linkonce_odr @_ZN1BC2Ev(%arg0: !cir.ptr // CIR: %{{[0-9]+}} = cir.vtt.address_point %{{[0-9]+}} : !cir.ptr>, offset = 0 -> !cir.ptr> // CIR: %{{[0-9]+}} = cir.load align(8) %{{[0-9]+}} : !cir.ptr>, !cir.ptr // CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr> @@ -80,7 +80,7 @@ int f() { // LLVM: } // Class C constructor -// CIR: cir.func linkonce_odr @_ZN1CC2Ev(%arg0: !cir.ptr loc({{.*}}), %arg1: !cir.ptr> loc({{.*}})) extra(#fn_attr) { +// CIR: cir.func linkonce_odr @_ZN1CC2Ev(%arg0: !cir.ptr // CIR: %{{[0-9]+}} = cir.vtt.address_point %{{[0-9]+}} : !cir.ptr>, offset = 0 -> !cir.ptr> // CIR: %{{[0-9]+}} = cir.load align(8) %{{[0-9]+}} : !cir.ptr>, !cir.ptr // CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr> @@ -100,7 +100,7 @@ int f() { // CIR: } // Class D constructor -// CIR: cir.func linkonce_odr @_ZN1DC1Ev(%arg0: !cir.ptr loc({{.*}})) extra(#fn_attr) { +// CIR: cir.func linkonce_odr @_ZN1DC1Ev(%arg0: !cir.ptr // CIR: %{{[0-9]+}} = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CIR: cir.store %arg0, %{{[0-9]+}} : !cir.ptr, !cir.ptr> // CIR: %[[D_PTR:.*]] = cir.load %0 : !cir.ptr>, !cir.ptr @@ -148,4 +148,37 @@ int f() { // LLVM: %[[BASE_C:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 // LLVM: call void @_ZN1CC2Ev(ptr %[[BASE_C]], ptr getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i32 0, i32 3)) // LLVM: ret void -// LLVM: } \ No newline at end of file +// LLVM: } + +namespace other { + struct A { + A(); + ~A(); + }; + + struct B : virtual A { + B(); + ~B(); + }; + + extern int foo(); + B::B() { + int x = foo(); + } + + B::~B() { + int y = foo(); + } +} + +// CIR-LABEL: cir.func @_ZN5other1BD1Ev( +// CIR-SAME: %[[VAL_0:.*]]: !cir.ptr +// CIR: %[[VAL_1:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CIR: cir.store %[[VAL_0]], %[[VAL_1]] : !cir.ptr, !cir.ptr> +// CIR: %[[VAL_2:.*]] = cir.load %[[VAL_1]] : !cir.ptr>, !cir.ptr +// CIR: %[[VAL_3:.*]] = cir.vtt.address_point @_ZTTN5other1BE, offset = 0 -> !cir.ptr> +// CIR: cir.call @_ZN5other1BD2Ev(%[[VAL_2]], %[[VAL_3]]) : (!cir.ptr, !cir.ptr>) -> () +// CIR: %[[VAL_4:.*]] = cir.base_class_addr(%[[VAL_2]] : !cir.ptr nonnull) [0] -> !cir.ptr +// CIR: cir.call @_ZN5other1AD2Ev(%[[VAL_4]]) : (!cir.ptr) -> () +// CIR: cir.return +// CIR: } \ No newline at end of file From d9f206486921e7ec522e933dd8b420418d66a62d Mon Sep 17 00:00:00 2001 From: Guojin Date: Tue, 15 Oct 2024 22:42:25 -0400 Subject: [PATCH 1952/2301] [CIR][CIRGen][NFC] Consolidate RUN lines for builtin tests (#968) There is no change to testing functionality. This refacot let those files have the same Run options that is easier to maintain and extend. --- clang/test/CIR/CodeGen/builtin-assume.cpp | 2 +- clang/test/CIR/CodeGen/builtin-bits.cpp | 140 +++--- .../CodeGen/builtin-constant-evaluated.cpp | 15 +- clang/test/CIR/CodeGen/builtins-overflow.cpp | 426 +++++++++--------- 4 files changed, 292 insertions(+), 291 deletions(-) diff --git a/clang/test/CIR/CodeGen/builtin-assume.cpp b/clang/test/CIR/CodeGen/builtin-assume.cpp index da807994f4b1..6776dde7c26f 100644 --- a/clang/test/CIR/CodeGen/builtin-assume.cpp +++ b/clang/test/CIR/CodeGen/builtin-assume.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir int test_assume(int x) { diff --git a/clang/test/CIR/CodeGen/builtin-bits.cpp b/clang/test/CIR/CodeGen/builtin-bits.cpp index 6b82f75187b8..3fd23bc51b53 100644 --- a/clang/test/CIR/CodeGen/builtin-bits.cpp +++ b/clang/test/CIR/CodeGen/builtin-bits.cpp @@ -1,186 +1,186 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir int test_builtin_clrsb(int x) { return __builtin_clrsb(x); } -// CHECK: cir.func @_Z18test_builtin_clrsbi -// CHECK: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s32i) : !s32i -// CHECK: } +// CIR: cir.func @_Z18test_builtin_clrsbi +// CIR: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s32i) : !s32i +// CIR: } int test_builtin_clrsbl(long x) { return __builtin_clrsbl(x); } -// CHECK: cir.func @_Z19test_builtin_clrsbll -// CHECK: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z19test_builtin_clrsbll +// CIR: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s64i) : !s32i +// CIR: } int test_builtin_clrsbll(long long x) { return __builtin_clrsbll(x); } -// CHECK: cir.func @_Z20test_builtin_clrsbllx -// CHECK: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z20test_builtin_clrsbllx +// CIR: %{{.+}} = cir.bit.clrsb(%{{.+}} : !s64i) : !s32i +// CIR: } int test_builtin_ctzs(unsigned short x) { return __builtin_ctzs(x); } -// CHECK: cir.func @_Z17test_builtin_ctzst -// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u16i) : !s32i +// CIR: cir.func @_Z17test_builtin_ctzst +// CIR: %{{.+}} = cir.bit.ctz(%{{.+}} : !u16i) : !s32i // CHEKC: } int test_builtin_ctz(unsigned x) { return __builtin_ctz(x); } -// CHECK: cir.func @_Z16test_builtin_ctzj -// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u32i) : !s32i -// CHECK: } +// CIR: cir.func @_Z16test_builtin_ctzj +// CIR: %{{.+}} = cir.bit.ctz(%{{.+}} : !u32i) : !s32i +// CIR: } int test_builtin_ctzl(unsigned long x) { return __builtin_ctzl(x); } -// CHECK: cir.func @_Z17test_builtin_ctzlm -// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z17test_builtin_ctzlm +// CIR: %{{.+}} = cir.bit.ctz(%{{.+}} : !u64i) : !s32i +// CIR: } int test_builtin_ctzll(unsigned long long x) { return __builtin_ctzll(x); } -// CHECK: cir.func @_Z18test_builtin_ctzlly -// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z18test_builtin_ctzlly +// CIR: %{{.+}} = cir.bit.ctz(%{{.+}} : !u64i) : !s32i +// CIR: } int test_builtin_ctzg(unsigned x) { return __builtin_ctzg(x); } -// CHECK: cir.func @_Z17test_builtin_ctzgj -// CHECK: %{{.+}} = cir.bit.ctz(%{{.+}} : !u32i) : !s32i -// CHECK: } +// CIR: cir.func @_Z17test_builtin_ctzgj +// CIR: %{{.+}} = cir.bit.ctz(%{{.+}} : !u32i) : !s32i +// CIR: } int test_builtin_clzs(unsigned short x) { return __builtin_clzs(x); } -// CHECK: cir.func @_Z17test_builtin_clzst -// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u16i) : !s32i -// CHECK: } +// CIR: cir.func @_Z17test_builtin_clzst +// CIR: %{{.+}} = cir.bit.clz(%{{.+}} : !u16i) : !s32i +// CIR: } int test_builtin_clz(unsigned x) { return __builtin_clz(x); } -// CHECK: cir.func @_Z16test_builtin_clzj -// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u32i) : !s32i -// CHECK: } +// CIR: cir.func @_Z16test_builtin_clzj +// CIR: %{{.+}} = cir.bit.clz(%{{.+}} : !u32i) : !s32i +// CIR: } int test_builtin_clzl(unsigned long x) { return __builtin_clzl(x); } -// CHECK: cir.func @_Z17test_builtin_clzlm -// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z17test_builtin_clzlm +// CIR: %{{.+}} = cir.bit.clz(%{{.+}} : !u64i) : !s32i +// CIR: } int test_builtin_clzll(unsigned long long x) { return __builtin_clzll(x); } -// CHECK: cir.func @_Z18test_builtin_clzlly -// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z18test_builtin_clzlly +// CIR: %{{.+}} = cir.bit.clz(%{{.+}} : !u64i) : !s32i +// CIR: } int test_builtin_clzg(unsigned x) { return __builtin_clzg(x); } -// CHECK: cir.func @_Z17test_builtin_clzgj -// CHECK: %{{.+}} = cir.bit.clz(%{{.+}} : !u32i) : !s32i -// CHECK: } +// CIR: cir.func @_Z17test_builtin_clzgj +// CIR: %{{.+}} = cir.bit.clz(%{{.+}} : !u32i) : !s32i +// CIR: } int test_builtin_ffs(int x) { return __builtin_ffs(x); } -// CHECK: cir.func @_Z16test_builtin_ffsi -// CHECK: %{{.+}} = cir.bit.ffs(%{{.+}} : !s32i) : !s32i -// CHECK: } +// CIR: cir.func @_Z16test_builtin_ffsi +// CIR: %{{.+}} = cir.bit.ffs(%{{.+}} : !s32i) : !s32i +// CIR: } int test_builtin_ffsl(long x) { return __builtin_ffsl(x); } -// CHECK: cir.func @_Z17test_builtin_ffsll -// CHECK: %{{.+}} = cir.bit.ffs(%{{.+}} : !s64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z17test_builtin_ffsll +// CIR: %{{.+}} = cir.bit.ffs(%{{.+}} : !s64i) : !s32i +// CIR: } int test_builtin_ffsll(long long x) { return __builtin_ffsll(x); } -// CHECK: cir.func @_Z18test_builtin_ffsllx -// CHECK: %{{.+}} = cir.bit.ffs(%{{.+}} : !s64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z18test_builtin_ffsllx +// CIR: %{{.+}} = cir.bit.ffs(%{{.+}} : !s64i) : !s32i +// CIR: } int test_builtin_parity(unsigned x) { return __builtin_parity(x); } -// CHECK: cir.func @_Z19test_builtin_parityj -// CHECK: %{{.+}} = cir.bit.parity(%{{.+}} : !u32i) : !s32i -// CHECK: } +// CIR: cir.func @_Z19test_builtin_parityj +// CIR: %{{.+}} = cir.bit.parity(%{{.+}} : !u32i) : !s32i +// CIR: } int test_builtin_parityl(unsigned long x) { return __builtin_parityl(x); } -// CHECK: cir.func @_Z20test_builtin_paritylm -// CHECK: %{{.+}} = cir.bit.parity(%{{.+}} : !u64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z20test_builtin_paritylm +// CIR: %{{.+}} = cir.bit.parity(%{{.+}} : !u64i) : !s32i +// CIR: } int test_builtin_parityll(unsigned long long x) { return __builtin_parityll(x); } -// CHECK: cir.func @_Z21test_builtin_paritylly -// CHECK: %{{.+}} = cir.bit.parity(%{{.+}} : !u64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z21test_builtin_paritylly +// CIR: %{{.+}} = cir.bit.parity(%{{.+}} : !u64i) : !s32i +// CIR: } int test_builtin_popcount(unsigned x) { return __builtin_popcount(x); } -// CHECK: cir.func @_Z21test_builtin_popcountj -// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u32i) : !s32i -// CHECK: } +// CIR: cir.func @_Z21test_builtin_popcountj +// CIR: %{{.+}} = cir.bit.popcount(%{{.+}} : !u32i) : !s32i +// CIR: } int test_builtin_popcountl(unsigned long x) { return __builtin_popcountl(x); } -// CHECK: cir.func @_Z22test_builtin_popcountlm -// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z22test_builtin_popcountlm +// CIR: %{{.+}} = cir.bit.popcount(%{{.+}} : !u64i) : !s32i +// CIR: } int test_builtin_popcountll(unsigned long long x) { return __builtin_popcountll(x); } -// CHECK: cir.func @_Z23test_builtin_popcountlly -// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u64i) : !s32i -// CHECK: } +// CIR: cir.func @_Z23test_builtin_popcountlly +// CIR: %{{.+}} = cir.bit.popcount(%{{.+}} : !u64i) : !s32i +// CIR: } int test_builtin_popcountg(unsigned x) { return __builtin_popcountg(x); } -// CHECK: cir.func @_Z22test_builtin_popcountgj -// CHECK: %{{.+}} = cir.bit.popcount(%{{.+}} : !u32i) : !s32i -// CHECK: } +// CIR: cir.func @_Z22test_builtin_popcountgj +// CIR: %{{.+}} = cir.bit.popcount(%{{.+}} : !u32i) : !s32i +// CIR: } diff --git a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp index d09a60085f81..a53d85fbf55b 100644 --- a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp +++ b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp @@ -1,11 +1,12 @@ -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir auto func() { return __builtin_strcmp("", ""); - // CHECK: cir.func @_Z4funcv() - // CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} loc(#loc2) - // CHECK-NEXT: %1 = cir.const #cir.int<0> : !s32i loc(#loc7) - // CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr loc(#loc8) - // CHECK-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i loc(#loc8) - // CHECK-NEXT: cir.return %2 : !s32i loc(#loc8) + // CIR: cir.func @_Z4funcv() + // CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} loc(#loc2) + // CIR-NEXT: %1 = cir.const #cir.int<0> : !s32i loc(#loc7) + // CIR-NEXT: cir.store %1, %0 : !s32i, !cir.ptr loc(#loc8) + // CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i loc(#loc8) + // CIR-NEXT: cir.return %2 : !s32i loc(#loc8) } diff --git a/clang/test/CIR/CodeGen/builtins-overflow.cpp b/clang/test/CIR/CodeGen/builtins-overflow.cpp index d4652527cb56..b61a1d3f5d0b 100644 --- a/clang/test/CIR/CodeGen/builtins-overflow.cpp +++ b/clang/test/CIR/CodeGen/builtins-overflow.cpp @@ -1,364 +1,364 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir bool test_add_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) { return __builtin_add_overflow(x, y, res); } -// CHECK: cir.func @_Z32test_add_overflow_uint_uint_uintjjPj -// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z32test_add_overflow_uint_uint_uintjjPj +// CIR: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CIR: } bool test_add_overflow_int_int_int(int x, int y, int *res) { return __builtin_add_overflow(x, y, res); } -// CHECK: cir.func @_Z29test_add_overflow_int_int_intiiPi -// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z29test_add_overflow_int_int_intiiPi +// CIR: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CIR: } bool test_add_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) { return __builtin_add_overflow(x, y, res); } -// CHECK: cir.func @_Z38test_add_overflow_xint31_xint31_xint31DB31_S_PS_ -// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int -// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> -// CHECK: } +// CIR: cir.func @_Z38test_add_overflow_xint31_xint31_xint31DB31_S_PS_ +// CIR: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CIR-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> +// CIR: } bool test_sub_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) { return __builtin_sub_overflow(x, y, res); } -// CHECK: cir.func @_Z32test_sub_overflow_uint_uint_uintjjPj -// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z32test_sub_overflow_uint_uint_uintjjPj +// CIR: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CIR: } bool test_sub_overflow_int_int_int(int x, int y, int *res) { return __builtin_sub_overflow(x, y, res); } -// CHECK: cir.func @_Z29test_sub_overflow_int_int_intiiPi -// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z29test_sub_overflow_int_int_intiiPi +// CIR: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CIR: } bool test_sub_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) { return __builtin_sub_overflow(x, y, res); } -// CHECK: cir.func @_Z38test_sub_overflow_xint31_xint31_xint31DB31_S_PS_ -// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int -// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> -// CHECK: } +// CIR: cir.func @_Z38test_sub_overflow_xint31_xint31_xint31DB31_S_PS_ +// CIR: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CIR-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> +// CIR: } bool test_mul_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) { return __builtin_mul_overflow(x, y, res); } -// CHECK: cir.func @_Z32test_mul_overflow_uint_uint_uintjjPj -// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z32test_mul_overflow_uint_uint_uintjjPj +// CIR: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CIR: } bool test_mul_overflow_int_int_int(int x, int y, int *res) { return __builtin_mul_overflow(x, y, res); } -// CHECK: cir.func @_Z29test_mul_overflow_int_int_intiiPi -// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z29test_mul_overflow_int_int_intiiPi +// CIR: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CIR: } bool test_mul_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) { return __builtin_mul_overflow(x, y, res); } -// CHECK: cir.func @_Z38test_mul_overflow_xint31_xint31_xint31DB31_S_PS_ -// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int -// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> -// CHECK: } +// CIR: cir.func @_Z38test_mul_overflow_xint31_xint31_xint31DB31_S_PS_ +// CIR: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CIR-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr>, !cir.int +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>>, !cir.ptr> +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : , (, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !cir.int, !cir.ptr> +// CIR: } bool test_mul_overflow_ulong_ulong_long(unsigned long x, unsigned long y, unsigned long *res) { return __builtin_mul_overflow(x, y, res); } -// CHECK: cir.func @_Z34test_mul_overflow_ulong_ulong_longmmPm -// CHECK: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !u64i, (!u64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z34test_mul_overflow_ulong_ulong_longmmPm +// CIR: %[[#LHS:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#RHS:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !u64i, (!u64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CIR: } bool test_add_overflow_uint_int_int(unsigned x, int y, int *res) { return __builtin_add_overflow(x, y, res); } -// CHECK: cir.func @_Z30test_add_overflow_uint_int_intjiPi -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[#PROM_X:]] = cir.cast(integral, %[[#X]] : !u32i), !cir.int -// CHECK-NEXT: %[[#PROM_Y:]] = cir.cast(integral, %[[#Y]] : !s32i), !cir.int -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#PROM_X]], %[[#PROM_Y]]) : , (!s32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z30test_add_overflow_uint_int_intjiPi +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[#PROM_X:]] = cir.cast(integral, %[[#X]] : !u32i), !cir.int +// CIR-NEXT: %[[#PROM_Y:]] = cir.cast(integral, %[[#Y]] : !s32i), !cir.int +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#PROM_X]], %[[#PROM_Y]]) : , (!s32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CIR: } bool test_add_overflow_volatile(int x, int y, volatile int *res) { return __builtin_add_overflow(x, y, res); } -// CHECK: cir.func @_Z26test_add_overflow_volatileiiPVi -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) -// CHECK-NEXT: cir.store volatile %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z26test_add_overflow_volatileiiPVi +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CIR-NEXT: cir.store volatile %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CIR: } bool test_uadd_overflow(unsigned x, unsigned y, unsigned *res) { return __builtin_uadd_overflow(x, y, res); } -// CHECK: cir.func @_Z18test_uadd_overflowjjPj -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z18test_uadd_overflowjjPj +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CIR: } bool test_uaddl_overflow(unsigned long x, unsigned long y, unsigned long *res) { return __builtin_uaddl_overflow(x, y, res); } -// CHECK: cir.func @_Z19test_uaddl_overflowmmPm -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z19test_uaddl_overflowmmPm +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CIR: } bool test_uaddll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) { return __builtin_uaddll_overflow(x, y, res); } -// CHECK: cir.func @_Z20test_uaddll_overflowyyPy -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z20test_uaddll_overflowyyPy +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CIR: } bool test_usub_overflow(unsigned x, unsigned y, unsigned *res) { return __builtin_usub_overflow(x, y, res); } -// CHECK: cir.func @_Z18test_usub_overflowjjPj -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z18test_usub_overflowjjPj +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CIR: } bool test_usubl_overflow(unsigned long x, unsigned long y, unsigned long *res) { return __builtin_usubl_overflow(x, y, res); } -// CHECK: cir.func @_Z19test_usubl_overflowmmPm -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z19test_usubl_overflowmmPm +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CIR: } bool test_usubll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) { return __builtin_usubll_overflow(x, y, res); } -// CHECK: cir.func @_Z20test_usubll_overflowyyPy -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z20test_usubll_overflowyyPy +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CIR: } bool test_umul_overflow(unsigned x, unsigned y, unsigned *res) { return __builtin_umul_overflow(x, y, res); } -// CHECK: cir.func @_Z18test_umul_overflowjjPj -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z18test_umul_overflowjjPj +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr +// CIR: } bool test_umull_overflow(unsigned long x, unsigned long y, unsigned long *res) { return __builtin_umull_overflow(x, y, res); } -// CHECK: cir.func @_Z19test_umull_overflowmmPm -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z19test_umull_overflowmmPm +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CIR: } bool test_umulll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) { return __builtin_umulll_overflow(x, y, res); } -// CHECK: cir.func @_Z20test_umulll_overflowyyPy -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z20test_umulll_overflowyyPy +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !u64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr +// CIR: } bool test_sadd_overflow(int x, int y, int *res) { return __builtin_sadd_overflow(x, y, res); } -// CHECK: cir.func @_Z18test_sadd_overflowiiPi -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z18test_sadd_overflowiiPi +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CIR: } bool test_saddl_overflow(long x, long y, long *res) { return __builtin_saddl_overflow(x, y, res); } -// CHECK: cir.func @_Z19test_saddl_overflowllPl -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z19test_saddl_overflowllPl +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CIR: } bool test_saddll_overflow(long long x, long long y, long long *res) { return __builtin_saddll_overflow(x, y, res); } -// CHECK: cir.func @_Z20test_saddll_overflowxxPx -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z20test_saddll_overflowxxPx +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CIR: } bool test_ssub_overflow(int x, int y, int *res) { return __builtin_ssub_overflow(x, y, res); } -// CHECK: cir.func @_Z18test_ssub_overflowiiPi -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z18test_ssub_overflowiiPi +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CIR: } bool test_ssubl_overflow(long x, long y, long *res) { return __builtin_ssubl_overflow(x, y, res); } -// CHECK: cir.func @_Z19test_ssubl_overflowllPl -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z19test_ssubl_overflowllPl +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CIR: } bool test_ssubll_overflow(long long x, long long y, long long *res) { return __builtin_ssubll_overflow(x, y, res); } -// CHECK: cir.func @_Z20test_ssubll_overflowxxPx -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z20test_ssubll_overflowxxPx +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CIR: } bool test_smul_overflow(int x, int y, int *res) { return __builtin_smul_overflow(x, y, res); } -// CHECK: cir.func @_Z18test_smul_overflowiiPi -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z18test_smul_overflowiiPi +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr +// CIR: } bool test_smull_overflow(long x, long y, long *res) { return __builtin_smull_overflow(x, y, res); } -// CHECK: cir.func @_Z19test_smull_overflowllPl -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z19test_smull_overflowllPl +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CIR: } bool test_smulll_overflow(long long x, long long y, long long *res) { return __builtin_smulll_overflow(x, y, res); } -// CHECK: cir.func @_Z20test_smulll_overflowxxPx -// CHECK: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i -// CHECK-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) -// CHECK-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr -// CHECK: } +// CIR: cir.func @_Z20test_smulll_overflowxxPx +// CIR: %[[#X:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#Y:]] = cir.load %{{.+}} : !cir.ptr, !s64i +// CIR-NEXT: %[[#RES_PTR:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr +// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool) +// CIR-NEXT: cir.store %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr +// CIR: } From ec00ce0cc0a01470a43fd6326e11ec5933f535bf Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Wed, 16 Oct 2024 10:48:11 +0800 Subject: [PATCH 1953/2301] [CIR] [Lowering] care trailing zero for lowering constant array (#976) Close https://github.com/llvm/clangir/issues/975 See the attached test case for example --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 5 ++++- clang/test/CIR/Lowering/str.c | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/Lowering/str.c diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index c15fc31b29da..178a4341f5b3 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2284,7 +2284,10 @@ class CIRGlobalOpLowering if (auto constArr = mlir::dyn_cast(init.value())) { if (auto attr = mlir::dyn_cast(constArr.getElts())) { - init = rewriter.getStringAttr(attr.getValue()); + llvm::SmallString<256> literal(attr.getValue()); + if (constArr.getTrailingZerosNum()) + literal.append(constArr.getTrailingZerosNum(), '\0'); + init = rewriter.getStringAttr(literal); } else if (auto attr = mlir::dyn_cast(constArr.getElts())) { // Failed to use a compact attribute as an initializer: diff --git a/clang/test/CIR/Lowering/str.c b/clang/test/CIR/Lowering/str.c new file mode 100644 index 000000000000..cc393daee884 --- /dev/null +++ b/clang/test/CIR/Lowering/str.c @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +void f(char *fmt, ...); +void test() { + f("test\0"); +} + +// LLVM: @.str = {{.*}}[6 x i8] c"test\00\00" From 205b144fa439d2ceff86e9fe8995f0e74248f6a5 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 16 Oct 2024 10:49:24 +0800 Subject: [PATCH 1954/2301] [CIR] Add support for __int128 type (#980) This PR adds initial support for the `__int128` type. The `!cir.int` type is extended to support 128-bit integer types. This PR comes with a simple test that verifies the CIRGen and LLVM lowering of `!s128i` and `!u128i` work. Resolve #953 . --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 10 ++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 + clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 8 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 5 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 8 +- clang/test/CIR/CodeGen/int128.cpp | 74 +++++++++++++++++++ clang/test/CIR/IR/invalid.cir | 4 +- 7 files changed, 98 insertions(+), 15 deletions(-) create mode 100644 clang/test/CIR/CodeGen/int128.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 1c63fcd84c67..de4bf213dc87 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -38,9 +38,11 @@ def CIR_IntType : CIR_Type<"Int", "int", [DeclareTypeInterfaceMethods]> { let summary = "Integer type with arbitrary precision up to a fixed limit"; let description = [{ - CIR type that represents C/C++ primitive integer types. - Said types are: `char`, `short`, `int`, `long`, `long long`, and their \ - unsigned variations. + CIR type that represents integer types with arbitrary precision. + + Those integer types that are directly available in C/C++ standard are called + primitive integer types. Said types are: `signed char`, `short`, `int`, + `long`, `long long`, and their unsigned variations. }]; let parameters = (ins "unsigned":$width, "bool":$isSigned); let hasCustomAssemblyFormat = 1; @@ -62,7 +64,7 @@ def CIR_IntType : CIR_Type<"Int", "int", /// Returns a minimum bitwidth of cir::IntType static unsigned minBitwidth() { return 1; } /// Returns a maximum bitwidth of cir::IntType - static unsigned maxBitwidth() { return 64; } + static unsigned maxBitwidth() { return 128; } /// Returns true if cir::IntType that represents a primitive integer type /// can be constructed from the provided bitwidth. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 089fae6f89a0..fcf6ec27c1f2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -120,6 +120,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, ::mlir::cir::IntType::get(builder.getContext(), 32, /*isSigned=*/true); SInt64Ty = ::mlir::cir::IntType::get(builder.getContext(), 64, /*isSigned=*/true); + SInt128Ty = + ::mlir::cir::IntType::get(builder.getContext(), 128, /*isSigned=*/true); // Initialize CIR unsigned integer types cache. UInt8Ty = @@ -130,6 +132,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, ::mlir::cir::IntType::get(builder.getContext(), 32, /*isSigned=*/false); UInt64Ty = ::mlir::cir::IntType::get(builder.getContext(), 64, /*isSigned=*/false); + UInt128Ty = + ::mlir::cir::IntType::get(builder.getContext(), 128, /*isSigned=*/false); VoidTy = ::mlir::cir::VoidType::get(builder.getContext()); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index e07e46be68e5..1ba8095522bd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -31,10 +31,10 @@ struct CIRGenTypeCache { /// void mlir::cir::VoidType VoidTy; - // char, int, short, long - mlir::cir::IntType SInt8Ty, SInt16Ty, SInt32Ty, SInt64Ty; - // usigned char, unsigned, unsigned short, unsigned long - mlir::cir::IntType UInt8Ty, UInt16Ty, UInt32Ty, UInt64Ty; + // char, int, short, long, __int128 + mlir::cir::IntType SInt8Ty, SInt16Ty, SInt32Ty, SInt64Ty, SInt128Ty; + // usigned char, unsigned, unsigned short, unsigned long, unsigned __int128 + mlir::cir::IntType UInt8Ty, UInt16Ty, UInt32Ty, UInt64Ty, UInt128Ty; /// half, bfloat, float, double, fp80 mlir::cir::FP16Type FP16Ty; mlir::cir::BF16Type BFloat16Ty; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 936e747d26b2..b14a0aa1e1cf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -504,9 +504,10 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { break; case BuiltinType::UInt128: + ResultType = CGM.UInt128Ty; + break; case BuiltinType::Int128: - assert(0 && "not implemented"); - // FIXME: ResultType = Builder.getIntegerType(128); + ResultType = CGM.SInt128Ty; break; #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index c37fe2788020..e64c139f1844 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -604,8 +604,9 @@ Type IntType::parse(mlir::AsmParser &parser) { // Fetch integer size. if (parser.parseInteger(width)) return {}; - if (width < 1 || width > 64) { - parser.emitError(loc, "expected integer width to be from 1 up to 64"); + if (width < IntType::minBitwidth() || width > IntType::maxBitwidth()) { + parser.emitError(loc, "expected integer width to be from ") + << IntType::minBitwidth() << " up to " << IntType::maxBitwidth(); return {}; } @@ -643,7 +644,8 @@ IntType::verify(llvm::function_ref emitError, if (width < IntType::minBitwidth() || width > IntType::maxBitwidth()) { emitError() << "IntType only supports widths from " - << IntType::minBitwidth() << "up to " << IntType::maxBitwidth(); + << IntType::minBitwidth() << " up to " + << IntType::maxBitwidth(); return mlir::failure(); } diff --git a/clang/test/CIR/CodeGen/int128.cpp b/clang/test/CIR/CodeGen/int128.cpp new file mode 100644 index 000000000000..97539e4317c9 --- /dev/null +++ b/clang/test/CIR/CodeGen/int128.cpp @@ -0,0 +1,74 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s + +// TODO: remove the -fno-clangir-call-conv-lowering flag when ABI lowering for +// int128 is supported. + +// CHECK-LABEL: @_Z5test1n +// LLVM-LABEL: @_Z5test1n +__int128 test1(__int128 x) { + return x; + // CHECK: cir.return %{{.+}} : !s128i + // LLVM: ret i128 %{{.+}} +} + +// CHECK-LABEL: @_Z5test2o +// LLVM-LABEL: @_Z5test2o +unsigned __int128 test2(unsigned __int128 x) { + return x; + // CHECK: cir.return %{{.+}} : !u128i + // LLVM: ret i128 %{{.+}} +} + +// CHECK-LABEL: @_Z11unary_arithn +// LLVM-LABEL: @_Z11unary_arithn +__int128 unary_arith(__int128 x) { + return ++x; + // CHECK: %{{.+}} = cir.unary(inc, %{{.+}}) : !s128i, !s128i + // LLVM: %{{.+}} = add i128 %{{.+}}, 1 +} + +// CHECK-LABEL: @_Z12binary_arithnn +// LLVM-LABEL: @_Z12binary_arithnn +__int128 binary_arith(__int128 x, __int128 y) { + return x + y; + // CHECK: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) nsw : !s128i + // LLVM: %{{.+}} = add nsw i128 %{{.+}}, %{{.+}} +} + +volatile int int_var; +volatile double double_var; + +// CHECK-LABEL: @_Z19integral_conversionn +// LLVM-LABEL: @_Z19integral_conversionn +__int128 integral_conversion(__int128 x) { + int_var = x; + // CHECK: %[[#VAL:]] = cir.cast(integral, %{{.+}} : !s128i), !s32i + // LLVM: %{{.+}} = trunc i128 %{{.+}} to i32 + + return int_var; + // CHECK: %{{.+}} = cir.cast(integral, %{{.+}} : !s32i), !s128i + // LLVM: %{{.+}} = sext i32 %{{.+}} to i128 +} + +// CHECK-LABEL: @_Z16float_conversionn +// LLVM-LABEL: @_Z16float_conversionn +__int128 float_conversion(__int128 x) { + double_var = x; + // CHECK: %[[#VAL:]] = cir.cast(int_to_float, %{{.+}} : !s128i), !cir.double + // LLVM: %{{.+}} = sitofp i128 %{{.+}} to double + + return double_var; + // CHECK: %{{.+}} = cir.cast(float_to_int, %{{.+}} : !cir.double), !s128i + // LLVM: %{{.+}} = fptosi double %{{.+}} to i128 +} + +// CHECK-LABEL: @_Z18boolean_conversionn +// LLVM-LABEL: @_Z18boolean_conversionn +bool boolean_conversion(__int128 x) { + return x; + // CHECK: %{{.+}} = cir.cast(int_to_bool, %{{.+}} : !s128i), !cir.bool + // LLVM: %{{.+}} = icmp ne i128 %{{.+}}, 0 +} diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index a28569ac0b46..3283b60726c4 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -593,8 +593,8 @@ module { // // ----- module { - // expected-error@below {{expected integer width to be from 1 up to 64}} - cir.func @l0(%arg0: !cir.int) -> () { + // expected-error@below {{expected integer width to be from 1 up to 128}} + cir.func @l0(%arg0: !cir.int) -> () { cir.return } } From ee504109a516a107f48508278cf51c0435142be7 Mon Sep 17 00:00:00 2001 From: Guojin Date: Tue, 15 Oct 2024 22:52:42 -0400 Subject: [PATCH 1955/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vrshrn_n to llvm intrinsic call (#982) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 7 +- clang/test/CIR/CodeGen/AArch64/neon.c | 85 ++++++++++++------- 2 files changed, 59 insertions(+), 33 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 03768a979625..c02980221ba5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3121,7 +3121,12 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vqshrn_n_v: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vrshrn_n_v: - llvm_unreachable("NYI"); + return buildNeonCall( + builder, + {builder.getExtendedElementVectorType( + vTy, mlir::cast(vTy.getEltType()).isSigned()), + SInt32Ty}, + Ops, "llvm.aarch64.neon.rshrn", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqrshrn_n_v: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vrndah_f16: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 2b9be7610a28..4acd2e8131c2 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -5900,41 +5900,62 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { // return vrshrn_n_s32(a, 9); // } -// NYI-LABEL: @test_vrshrn_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> [[VRSHRN_N]], i32 19) -// NYI: ret <2 x i32> [[VRSHRN_N1]] -// int32x2_t test_vrshrn_n_s64(int64x2_t a) { -// return vrshrn_n_s64(a, 19); -// } +int32x2_t test_vrshrn_n_s64(int64x2_t a) { + return vrshrn_n_s64(a, 19); + + // CIR-LABEL: vrshrn_n_s64 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM: {{.*}}test_vrshrn_n_s64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> [[VRSHRN_N]], i32 19) + // LLVM: ret <2 x i32> [[VRSHRN_N1]] +} -// NYI-LABEL: @test_vrshrn_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> [[VRSHRN_N]], i32 3) -// NYI: ret <8 x i8> [[VRSHRN_N1]] -// uint8x8_t test_vrshrn_n_u16(uint16x8_t a) { -// return vrshrn_n_u16(a, 3); -// } +uint8x8_t test_vrshrn_n_u16(uint16x8_t a) { + return vrshrn_n_u16(a, 3); -// NYI-LABEL: @test_vrshrn_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> [[VRSHRN_N]], i32 9) -// NYI: ret <4 x i16> [[VRSHRN_N1]] -// uint16x4_t test_vrshrn_n_u32(uint32x4_t a) { -// return vrshrn_n_u32(a, 9); -// } + // CIR-LABEL: vrshrn_n_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector -// NYI-LABEL: @test_vrshrn_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> [[VRSHRN_N]], i32 19) -// NYI: ret <2 x i32> [[VRSHRN_N1]] -// uint32x2_t test_vrshrn_n_u64(uint64x2_t a) { -// return vrshrn_n_u64(a, 19); -// } + // LLVM: {{.*}}test_vrshrn_n_u16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> [[VRSHRN_N]], i32 3) + // LLVM: ret <8 x i8> [[VRSHRN_N1]] +} + +uint16x4_t test_vrshrn_n_u32(uint32x4_t a) { + return vrshrn_n_u32(a, 9); + + // CIR-LABEL: vrshrn_n_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM: {{.*}}vrshrn_n_u32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> [[VRSHRN_N]], i32 9) + // LLVM: ret <4 x i16> [[VRSHRN_N1]] +} + +uint32x2_t test_vrshrn_n_u64(uint64x2_t a) { + return vrshrn_n_u64(a, 19); + + // CIR-LABEL: vrshrn_n_u64 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM: {{.*}}test_vrshrn_n_u64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> [[VRSHRN_N]], i32 19) + // LLVM: ret <2 x i32> [[VRSHRN_N1]] + +} // NYI-LABEL: @test_vrshrn_high_n_s16( // NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> From 0905eede7610004f9366605fb1d6f85c3f04424c Mon Sep 17 00:00:00 2001 From: Guojin Date: Tue, 15 Oct 2024 22:53:07 -0400 Subject: [PATCH 1956/2301] [CIR][CIRGen][Builtin][Neon] Lower vqdmulhq_lane, vqdmulh_lane, vqrdmulhq_lane and vqrdmulh_lane (#985) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 20 ++++ clang/test/CIR/CodeGen/AArch64/neon-arith.c | 113 ++++++++++++++++++ 2 files changed, 133 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index c02980221ba5..349bc2d7884e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2278,6 +2278,26 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( return builder.createVecShuffle(getLoc(e->getExprLoc()), ops[0], ops[1], indices); } + case NEON::BI__builtin_neon_vqdmulhq_lane_v: + case NEON::BI__builtin_neon_vqdmulh_lane_v: + case NEON::BI__builtin_neon_vqrdmulhq_lane_v: + case NEON::BI__builtin_neon_vqrdmulh_lane_v: { + mlir::cir::VectorType resTy = + (builtinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || + builtinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v) + ? mlir::cir::VectorType::get(builder.getContext(), vTy.getEltType(), + vTy.getSize() * 2) + : vTy; + mlir::cir::VectorType mulVecT = + GetNeonType(this, NeonTypeFlags(neonType.getEltType(), false, + /*isQuad*/ false)); + return buildNeonCall(builder, {resTy, mulVecT, SInt32Ty}, ops, + (builtinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || + builtinID == NEON::BI__builtin_neon_vqdmulh_lane_v) + ? "llvm.aarch64.neon.sqdmulh.lane" + : "llvm.aarch64.neon.sqrdmulh.lane", + resTy, getLoc(e->getExprLoc())); + } } // This second switch is for the intrinsics that might have a more generic diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index 42c1fd389b17..ab37dded4881 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -218,3 +218,116 @@ float64x2_t test_vpaddq_f64(float64x2_t a, float64x2_t b) { // LLVM: {{.*}}test_vpaddq_f64(<2 x double>{{.*}}[[A:%.*]], <2 x double>{{.*}}[[B:%.*]]) // LLVM: [[RES:%.*]] = call <2 x double> @llvm.aarch64.neon.faddp.v2f64(<2 x double> [[A]], <2 x double> [[B]]) // LLVM: ret <2 x double> [[RES]] + +int16x4_t test_vqdmulh_lane_s16(int16x4_t a, int16x4_t v) { + return vqdmulh_lane_s16(a, v, 3); +} + +// CIR-LABEL: vqdmulh_lane_s16 +// CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: (!cir.vector, !cir.vector, !s32i) -> !cir.vector + +// LLVM: {{.*}}test_vqdmulh_lane_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[V:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqdmulh.lane.v4i16.v4i16 +// LLVM-SAME: (<4 x i16> [[A]], <4 x i16> [[V]], i32 3) +// LLVM: ret <4 x i16> [[RES]] + + +int32x2_t test_vqdmulh_lane_s32(int32x2_t a, int32x2_t v) { + return vqdmulh_lane_s32(a, v, 1); +} + +// CIR-LABEL: vqdmulh_lane_s32 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: (!cir.vector, !cir.vector, !s32i) -> !cir.vector + +// LLVM: {{.*}}test_vqdmulh_lane_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[V:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqdmulh.lane.v2i32.v2i32 +// LLVM-SAME: (<2 x i32> [[A]], <2 x i32> [[V]], i32 1) +// LLVM: ret <2 x i32> [[RES]] + +int16x8_t test_vqdmulhq_lane_s16(int16x8_t a, int16x4_t v) { + return vqdmulhq_lane_s16(a, v, 3); +} + +// CIR-LABEL: vqdmulhq_lane_s16 +// CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: (!cir.vector, !cir.vector, !s32i) -> !cir.vector + +// LLVM: {{.*}}test_vqdmulhq_lane_s16(<8 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[V:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqdmulh.lane.v8i16.v4i16 +// LLVM-SAME: (<8 x i16> [[A]], <4 x i16> [[V]], i32 3) +// LLVM: ret <8 x i16> [[RES]] + +int32x4_t test_vqdmulhq_lane_s32(int32x4_t a, int32x2_t v) { + return vqdmulhq_lane_s32(a, v, 1); +} + +// CIR-LABEL: vqdmulhq_lane_s32 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: (!cir.vector, !cir.vector, !s32i) -> !cir.vector + +// LLVM: {{.*}}test_vqdmulhq_lane_s32(<4 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[V:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmulh.lane.v4i32.v2i32 +// LLVM-SAME: (<4 x i32> [[A]], <2 x i32> [[V]], i32 1) +// LLVM: ret <4 x i32> [[RES]] + +int16x4_t test_vqrdmulh_lane_s16(int16x4_t a, int16x4_t v) { + return vqrdmulh_lane_s16(a, v, 3); +} + +// CIR-LABEL: vqrdmulh_lane_s16 +// CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR-SAME: (!cir.vector, !cir.vector, !s32i) -> !cir.vector + +// LLVM: {{.*}}test_vqrdmulh_lane_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[V:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.lane.v4i16.v4i16 +// LLVM-SAME: (<4 x i16> [[A]], <4 x i16> [[V]], i32 3) +// LLVM: ret <4 x i16> [[RES]] + +int16x8_t test_vqrdmulhq_lane_s16(int16x8_t a, int16x4_t v) { + return vqrdmulhq_lane_s16(a, v, 3); +} + +// CIR-LABEL: vqrdmulhq_lane_s16 +// CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR-SAME: (!cir.vector, !cir.vector, !s32i) -> !cir.vector + +// LLVM: {{.*}}test_vqrdmulhq_lane_s16(<8 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[V:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.lane.v8i16.v4i16 +// LLVM-SAME: (<8 x i16> [[A]], <4 x i16> [[V]], i32 3) +// LLVM: ret <8 x i16> [[RES]] + +int32x2_t test_vqrdmulh_lane_s32(int32x2_t a, int32x2_t v) { + return vqrdmulh_lane_s32(a, v, 1); +} + +// CIR-LABEL: vqrdmulh_lane_s32 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR-SAME: (!cir.vector, !cir.vector, !s32i) -> !cir.vector + +// LLVM: {{.*}}test_vqrdmulh_lane_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[V:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.lane.v2i32.v2i32 +// LLVM-SAME: (<2 x i32> [[A]], <2 x i32> [[V]], i32 1) +// LLVM: ret <2 x i32> [[RES]] + +int32x4_t test_vqrdmulhq_lane_s32(int32x4_t a, int32x2_t v) { + return vqrdmulhq_lane_s32(a, v, 1); +} + +// CIR-LABEL: vqrdmulhq_lane_s32 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR-SAME: (!cir.vector, !cir.vector, !s32i) -> !cir.vector + +// LLVM: {{.*}}test_vqrdmulhq_lane_s32(<4 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[V:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.lane.v4i32.v2i32 +// LLVM-SAME: (<4 x i32> [[A]], <2 x i32> [[V]], i32 1) +// LLVM: ret <4 x i32> [[RES]] From 45adae526cb3c62f03cdbf9f5a4be9dc7277998a Mon Sep 17 00:00:00 2001 From: M V V S Manoj Kumar Date: Wed, 16 Oct 2024 09:42:17 +0530 Subject: [PATCH 1957/2301] [CIR][CIRGen][Builtin][Type] Support for IEEE Quad (long double) added (in CIR + Direct to LLVM) (#966) Fixes https://github.com/llvm/clangir/issues/931 Added type definition in CIRTypes.td, created appropriate functions for the same in CIRTypes.cpp like getPreferredAlignment, getPreferredAlignment, etc. Optionally added lowering in LowerToLLVM.cpp --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 9 +++++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 7 ++-- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 3 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 23 ++++++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 ++ clang/test/CIR/CodeGen/types-IEEE-quad.c | 32 +++++++++++++++++++ 7 files changed, 72 insertions(+), 7 deletions(-) create mode 100644 clang/test/CIR/CodeGen/types-IEEE-quad.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index de4bf213dc87..c20915bbd6ef 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -173,6 +173,13 @@ def CIR_FP80 : CIR_FloatType<"FP80", "f80"> { }]; } +def CIR_FP128 : CIR_FloatType<"FP128", "f128"> { + let summary = "CIR type that represents IEEEquad 128-bit floating-point format"; + let description = [{ + Floating-point type that represents the IEEEquad 128-bit floating-point format. + }]; +} + def CIR_LongDouble : CIR_FloatType<"LongDouble", "long_double"> { let summary = "CIR extended-precision float type"; let description = [{ @@ -195,7 +202,7 @@ def CIR_LongDouble : CIR_FloatType<"LongDouble", "long_double"> { // Constraints -def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_FP80, CIR_LongDouble]>; +def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_FP80, CIR_FP128, CIR_LongDouble]>; def CIR_AnyIntOrFloat: AnyTypeOf<[CIR_AnyFloat, CIR_IntType]>; //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index c0345df376cd..a711589ef5d1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -402,7 +402,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { if (&format == &llvm::APFloat::x87DoubleExtended()) return mlir::cir::LongDoubleType::get(getContext(), typeCache.FP80Ty); if (&format == &llvm::APFloat::IEEEquad()) - llvm_unreachable("NYI"); + return mlir::cir::LongDoubleType::get(getContext(), typeCache.FP128Ty); if (&format == &llvm::APFloat::PPCDoubleDouble()) llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index fcf6ec27c1f2..cb96adf1e257 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -35,6 +35,7 @@ #include "mlir/IR/Verifier.h" #include "clang/AST/Expr.h" #include "clang/Basic/Cuda.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/MissingFeatures.h" #include "clang/AST/ASTConsumer.h" @@ -74,8 +75,8 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FileSystem.h" -#include "llvm/Support/raw_ostream.h" #include "llvm/Support/TimeProfiler.h" +#include "llvm/Support/raw_ostream.h" #include #include @@ -145,6 +146,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, FloatTy = ::mlir::cir::SingleType::get(builder.getContext()); DoubleTy = ::mlir::cir::DoubleType::get(builder.getContext()); FP80Ty = ::mlir::cir::FP80Type::get(builder.getContext()); + FP128Ty = ::mlir::cir::FP128Type::get(builder.getContext()); // TODO: PointerWidthInBits PointerAlignInBytes = @@ -196,8 +198,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, theModule->setAttr("cir.sob", mlir::cir::SignedOverflowBehaviorAttr::get(&context, sob)); auto lang = SourceLanguageAttr::get(&context, getCIRSourceLanguage()); - theModule->setAttr( - "cir.lang", mlir::cir::LangAttr::get(&context, lang)); + theModule->setAttr("cir.lang", mlir::cir::LangAttr::get(&context, lang)); theModule->setAttr("cir.triple", builder.getStringAttr(getTriple().str())); // Set the module name to be the name of the main file. TranslationUnitDecl // often contains invalid source locations and isn't a reliable source for the diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 1ba8095522bd..f07c76e94760 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -17,8 +17,8 @@ #include "mlir/IR/Types.h" #include "clang/AST/CharUnits.h" #include "clang/Basic/AddressSpaces.h" -#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/MissingFeatures.h" namespace cir { @@ -41,6 +41,7 @@ struct CIRGenTypeCache { mlir::cir::SingleType FloatTy; mlir::cir::DoubleType DoubleTy; mlir::cir::FP80Type FP80Ty; + mlir::cir::FP128Type FP128Ty; /// int mlir::Type UIntTy; diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index e64c139f1844..ae50f79fff30 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -763,6 +763,27 @@ FP80Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, return 16; } +const llvm::fltSemantics &FP128Type::getFloatSemantics() const { + return llvm::APFloat::IEEEquad(); +} + +llvm::TypeSize +FP128Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return llvm::TypeSize::getFixed(16); +} + +uint64_t FP128Type::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { + return 16; +} + +uint64_t +FP128Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { + return 16; +} + const llvm::fltSemantics &LongDoubleType::getFloatSemantics() const { return mlir::cast(getUnderlying()) .getFloatSemantics(); @@ -792,7 +813,7 @@ uint64_t LongDoubleType::getPreferredAlignment( LogicalResult LongDoubleType::verify(function_ref emitError, mlir::Type underlying) { - if (!mlir::isa(underlying)) { + if (!mlir::isa(underlying)) { emitError() << "invalid underlying type for long double"; return failure(); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 178a4341f5b3..979354a10bc5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4208,6 +4208,9 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, converter.addConversion([&](mlir::cir::FP80Type type) -> mlir::Type { return mlir::Float80Type::get(type.getContext()); }); + converter.addConversion([&](mlir::cir::FP128Type type) -> mlir::Type { + return mlir::Float128Type::get(type.getContext()); + }); converter.addConversion([&](mlir::cir::LongDoubleType type) -> mlir::Type { return converter.convertType(type.getUnderlying()); }); diff --git a/clang/test/CIR/CodeGen/types-IEEE-quad.c b/clang/test/CIR/CodeGen/types-IEEE-quad.c new file mode 100644 index 000000000000..607374ef0407 --- /dev/null +++ b/clang/test/CIR/CodeGen/types-IEEE-quad.c @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CIR %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +long double i = 0; +long double t2(long double i2) { + return i2 + i ; +} + +// CIR: cir.global external @i = #cir.fp<0.000000e+00> : !cir.long_double {alignment = 16 : i64} loc({{.*}}) +// CIR-LABEL: cir.func @t2(%arg0: !cir.long_double loc({{.*}})) -> !cir.long_double +// CIR-NEXT: %[[#I2:]] = cir.alloca !cir.long_double, !cir.ptr>, ["i2", init] {alignment = 16 : i64} +// CIR-NEXT: %[[#RETVAL:]] = cir.alloca !cir.long_double, !cir.ptr>, ["__retval"] {alignment = 16 : i64} +// CIR-NEXT: cir.store %arg0, %[[#I2]] : !cir.long_double, !cir.ptr> +// CIR-NEXT: %[[#I2_LOAD:]] = cir.load %[[#I2]] : !cir.ptr>, !cir.long_double +// CIR-NEXT: %[[#I:]] = cir.get_global @i : !cir.ptr> +// CIR-NEXT: %[[#I_LOAD:]] = cir.load %[[#I]] : !cir.ptr>, !cir.long_double +// CIR-NEXT: %[[#ADD:]] = cir.binop(add, %[[#I2_LOAD]], %[[#I_LOAD]]) : !cir.long_double +// CIR-NEXT: cir.store %[[#ADD]], %[[#RETVAL]] : !cir.long_double, !cir.ptr> +// CIR-NEXT: %[[#RETVAL_LOAD:]] = cir.load %[[#RETVAL]] : !cir.ptr>, !cir.long_double +// CIR-NEXT: cir.return %[[#RETVAL_LOAD]] : !cir.long_double + +//LLVM: @i = global fp128 0xL00000000000000000000000000000000, align 16 +//LLVM-LABEL: define dso_local fp128 @t2(fp128 noundef %i2) +//LLVM-NEXT : entry: +//LLVM-NEXT : %[[#I2_ADDR:]]= alloca fp128, align 16 +//LLVM-NEXT : store fp128 %i2, ptr %[[#I2_ADDR]], align 16 +//LLVM-NEXT : %[[#I2_LOAD:]] = load fp128, ptr %[[#I2_ADDR]], align 16 +//LLVM-NEXT : %[[#I_LOAD:]] = load fp128, ptr @i, align 16 +//LLVM-NEXT : %[[#RETVAL:]] = fadd fp128 %[[#I2_LOAD]], %[[#I_LOAD]] +//LLVM-NEXT : ret fp128 %[[#RETVAL]] From ec5e9c300b9fb270fdd0f8afd1b7bf33e53be51d Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 16 Oct 2024 12:02:30 -0700 Subject: [PATCH 1958/2301] [CIR][NFC] Fix some consistency issues with missing features --- .../Transforms/TargetLowering/Targets/AArch64.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index ed40b50188c1..b986adb46ae9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -19,7 +19,7 @@ using AArch64ABIKind = ::cir::AArch64ABIKind; using ABIArgInfo = ::cir::ABIArgInfo; -using MissingFeature = ::cir::MissingFeatures; +using MissingFeatures = ::cir::MissingFeatures; namespace mlir { namespace cir { @@ -60,7 +60,7 @@ class AArch64TargetLoweringInfo : public TargetLoweringInfo { public: AArch64TargetLoweringInfo(LowerTypes <, AArch64ABIKind Kind) : TargetLoweringInfo(std::make_unique(LT, Kind)) { - cir_cconv_assert(!MissingFeature::swift()); + cir_cconv_assert(!MissingFeatures::swift()); } unsigned getTargetAddrSpaceFromCIRAddrSpace( @@ -97,7 +97,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, if (!isAggregateTypeForABI(RetTy)) { // NOTE(cir): Skip enum handling. - if (MissingFeature::fixedSizeIntType()) + if (MissingFeatures::fixedSizeIntType()) cir_cconv_unreachable("NYI"); return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS() @@ -114,13 +114,13 @@ AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, Ty = useFirstFieldIfTransparentUnion(Ty); // TODO(cir): check for illegal vector types. - if (MissingFeature::vectorType()) + if (MissingFeatures::vectorType()) cir_cconv_unreachable("NYI"); if (!isAggregateTypeForABI(Ty)) { // NOTE(cir): Enum is IntType in CIR. Skip enum handling here. - if (MissingFeature::fixedSizeIntType()) + if (MissingFeatures::fixedSizeIntType()) cir_cconv_unreachable("NYI"); return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS() From 70245f901255076352697b0e015e689667aa29c1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 16 Oct 2024 15:18:50 -0700 Subject: [PATCH 1959/2301] [CIR] Disable `-fclangir-call-conv-lowering` from default in the LLVM pipeline This is causing lots of churn. `-fclangir-call-conv-lowering` is not mature enough, assumptions are leading to crashes we cannot track with special messages, leading to not great user experience. Turn this off until we have someone dedicated to roll this out. --- clang/include/clang/Driver/Options.td | 2 +- clang/lib/Driver/ToolChains/Clang.cpp | 2 ++ clang/test/CIR/Driver/callconv.cpp | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index ff05be57a99f..3b6ea8bde75e 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3112,7 +3112,7 @@ defm clangir_analysis_only : BoolFOption<"clangir-analysis-only", // FIXME(cir): Remove this option once all pre-existing tests are compatible with // the calling convention lowering pass. defm clangir_call_conv_lowering : BoolFOption<"clangir-call-conv-lowering", - FrontendOpts<"ClangIRCallConvLowering">, DefaultTrue, + FrontendOpts<"ClangIRCallConvLowering">, DefaultFalse, PosFlag, NegFlag, BothFlags<[], [ClangOption, CC1Option], "">>; diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 4bd9104228f4..7961007757a9 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5254,6 +5254,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.addOptOutFlag(CmdArgs, options::OPT_fclangir_call_conv_lowering, options::OPT_fno_clangir_call_conv_lowering); + Args.addOptInFlag(CmdArgs, options::OPT_fclangir_call_conv_lowering, + options::OPT_fno_clangir_call_conv_lowering); if (Args.hasArg(options::OPT_fclangir_mem2reg)) CmdArgs.push_back("-fclangir-mem2reg"); diff --git a/clang/test/CIR/Driver/callconv.cpp b/clang/test/CIR/Driver/callconv.cpp index f857369d9215..3227820ad721 100644 --- a/clang/test/CIR/Driver/callconv.cpp +++ b/clang/test/CIR/Driver/callconv.cpp @@ -1,4 +1,4 @@ // RUN: %clang %s -fno-clangir-call-conv-lowering -### -c %s 2>&1 | FileCheck --check-prefix=DISABLE %s // DISABLE: "-fno-clangir-call-conv-lowering" // RUN: %clang %s -fclangir-call-conv-lowering -### -c %s 2>&1 | FileCheck --check-prefix=ENABLE %s -// ENABLE-NOT: "-fclangir-call-conv-lowering" +// ENABLE: "-fclangir-call-conv-lowering" From 46cb0971262352fc8cfd1d2b32847c9a07be5392 Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 16 Oct 2024 18:36:30 -0400 Subject: [PATCH 1960/2301] [CIR][CIRGen][Builtin] Implement builtin addressof (#987) --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 4 ++ clang/test/CIR/CodeGen/builtins.cpp | 58 +++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 clang/test/CIR/CodeGen/builtins.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index c72eb7a14e74..59dce4184c5e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1341,6 +1341,10 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(ArithResult.overflow); } + case Builtin::BIaddressof: + case Builtin::BI__addressof: + case Builtin::BI__builtin_addressof: + return RValue::get(buildLValue(E->getArg(0)).getPointer()); } // If this is an alias for a lib function (e.g. __builtin_sin), emit diff --git a/clang/test/CIR/CodeGen/builtins.cpp b/clang/test/CIR/CodeGen/builtins.cpp new file mode 100644 index 000000000000..fa7b51e88016 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtins.cpp @@ -0,0 +1,58 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -emit-llvm -fno-clangir-call-conv-lowering -o - %s \ +// RUN: | opt -S -passes=instcombine,mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// This test file is a collection of test cases for all target-independent +// builtins that are related to memory operations. + +int s; + +int *test_addressof() { + return __builtin_addressof(s); + + // CIR-LABEL: test_addressof + // CIR: [[ADDR:%.*]] = cir.get_global @s : !cir.ptr + // CIR: cir.store [[ADDR]], [[SAVE:%.*]] : !cir.ptr, !cir.ptr> + // CIR: [[RES:%.*]] = cir.load [[SAVE]] : !cir.ptr>, !cir.ptr + // CIR: cir.return [[RES]] : !cir.ptr + + // LLVM-LABEL: test_addressof + // LLVM: store ptr @s, ptr [[ADDR:%.*]], align 8 + // LLVM: [[RES:%.*]] = load ptr, ptr [[ADDR]], align 8 + // LLVM: ret ptr [[RES]] +} + +namespace std { template T *addressof(T &); } +int *test_std_addressof() { + return std::addressof(s); + + // CIR-LABEL: test_std_addressof + // CIR: [[ADDR:%.*]] = cir.get_global @s : !cir.ptr + // CIR: cir.store [[ADDR]], [[SAVE:%.*]] : !cir.ptr, !cir.ptr> + // CIR: [[RES:%.*]] = cir.load [[SAVE]] : !cir.ptr>, !cir.ptr + // CIR: cir.return [[RES]] : !cir.ptr + + // LLVM-LABEL: test_std_addressof + // LLVM: store ptr @s, ptr [[ADDR:%.*]], align 8 + // LLVM: [[RES:%.*]] = load ptr, ptr [[ADDR]], align 8 + // LLVM: ret ptr [[RES]] +} + +namespace std { template T *__addressof(T &); } +int *test_std_addressof2() { + return std::__addressof(s); + + // CIR-LABEL: test_std_addressof2 + // CIR: [[ADDR:%.*]] = cir.get_global @s : !cir.ptr + // CIR: cir.store [[ADDR]], [[SAVE:%.*]] : !cir.ptr, !cir.ptr> + // CIR: [[RES:%.*]] = cir.load [[SAVE]] : !cir.ptr>, !cir.ptr + // CIR: cir.return [[RES]] : !cir.ptr + + /// LLVM-LABEL: test_std_addressof2 + // LLVM: store ptr @s, ptr [[ADDR:%.*]], align 8 + // LLVM: [[RES:%.*]] = load ptr, ptr [[ADDR]], align 8 + // LLVM: ret ptr [[RES]] +} From b56d427d740f2b4142b68aa04f5852902db9c936 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 16 Oct 2024 16:00:55 -0700 Subject: [PATCH 1961/2301] [CIR][NFC] Improve documentation about goto's --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 61 +++++++++++++++----- 1 file changed, 45 insertions(+), 16 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 58d6f19ca123..ab89c87c3ae4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1911,6 +1911,9 @@ def BrOp : CIR_Op<"br", The `cir.br` branches unconditionally to a block. Used to represent C/C++ goto's and general block branching. + Note that for source level `goto`'s crossing scope boundaries, those are + usually represented with the "symbolic" `cir.goto` operation. + Example: ```mlir @@ -4599,26 +4602,52 @@ def SwitchFlatOp : CIR_Op<"switch.flat", [AttrSizedOperandSegments, Terminator]> //===----------------------------------------------------------------------===// def GotoOp : CIR_Op<"goto", [Terminator]> { - let description = [{ Transfers control to the specified label. + let description = [{ - Example: - ```C++ - void foo() { - goto exit; + Transfers control to the specified `label`. This requires a corresponding + `cir.label` to exist and is used by to represent source level `goto`s + that jump across region boundaries. Alternatively, `cir.br` is used to + construct goto's that don't violate such boundaries. - exit: - return; - } - ``` + `cir.goto` is completely symbolic (i.e. it "jumps" on a label that isn't + yet materialized) and should be taken into account by passes and analysis + when deciding if it's safe to make some assumptions about a given region + or basic block. - ```mlir - cir.func @foo() { - cir.goto "exit" - ^bb1: - cir.label "exit" - cir.return + Example: + ```C++ + int test(int x) { + if (x) + goto label; + { + x = 10; + label: + return x; + } } - ``` + ``` + + ```mlir + cir.scope { // REGION #1 + %2 = cir.load %0 : !cir.ptr, !s32i + %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool + cir.if %3 { + cir.goto "label" + } + } + cir.scope { // REGION #2 + %2 = cir.const #cir.int<10> : !s32i + cir.store %2, %0 : !s32i, !cir.ptr + cir.br ^bb1 + ^bb1: // pred: ^bb0 + cir.label "label" + %3 = cir.load %0 : !cir.ptr, !s32i + cir.store %3, %1 : !s32i, !cir.ptr + %4 = cir.load %1 : !cir.ptr, !s32i + cir.return %4 : !s32i + } + cir.unreachable + ``` }]; let arguments = (ins StrAttr:$label); let assemblyFormat = [{ $label attr-dict }]; From 26207bd3f985d213d4bf3f7007148546ac76a540 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 16 Oct 2024 16:59:57 -0700 Subject: [PATCH 1962/2301] [CIR][NFC] Move code around to match OG --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 82 ++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 98 ------------------------ 2 files changed, 82 insertions(+), 98 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index a726b6c77475..f12c61a2ea2f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1810,3 +1810,85 @@ void CIRGenFunction::buildCXXAggrConstructorCall( if (constantCount.use_empty()) constantCount.erase(); } + +void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, + bool ForVirtualBase, + bool Delegating, + AggValueSlot ThisAVS, + const clang::CXXConstructExpr *E) { + CallArgList Args; + Address This = ThisAVS.getAddress(); + LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace(); + QualType ThisType = D->getThisType(); + LangAS ThisAS = ThisType.getTypePtr()->getPointeeType().getAddressSpace(); + mlir::Value ThisPtr = This.getPointer(); + + assert(SlotAS == ThisAS && "This edge case NYI"); + + Args.add(RValue::get(ThisPtr), D->getThisType()); + + // In LLVM Codegen: If this is a trivial constructor, just emit what's needed. + // If this is a union copy constructor, we must emit a memcpy, because the AST + // does not model that copy. + if (isMemcpyEquivalentSpecialMember(D)) { + assert(!MissingFeatures::isMemcpyEquivalentSpecialMember()); + } + + const FunctionProtoType *FPT = D->getType()->castAs(); + EvaluationOrder Order = E->isListInitialization() + ? EvaluationOrder::ForceLeftToRight + : EvaluationOrder::Default; + + buildCallArgs(Args, FPT, E->arguments(), E->getConstructor(), + /*ParamsToSkip*/ 0, Order); + + buildCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args, + ThisAVS.mayOverlap(), E->getExprLoc(), + ThisAVS.isSanitizerChecked()); +} + +void CIRGenFunction::buildCXXConstructorCall( + const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, + bool Delegating, Address This, CallArgList &Args, + AggValueSlot::Overlap_t Overlap, SourceLocation Loc, + bool NewPointerIsChecked) { + + const auto *ClassDecl = D->getParent(); + + if (!NewPointerIsChecked) + buildTypeCheck(CIRGenFunction::TCK_ConstructorCall, Loc, This.getPointer(), + getContext().getRecordType(ClassDecl), CharUnits::Zero()); + + // If this is a call to a trivial default constructor: + // In LLVM: do nothing. + // In CIR: emit as a regular call, other later passes should lower the + // ctor call into trivial initialization. + assert(!MissingFeatures::isTrivialCtorOrDtor()); + + if (isMemcpyEquivalentSpecialMember(D)) { + assert(!MissingFeatures::isMemcpyEquivalentSpecialMember()); + } + + bool PassPrototypeArgs = true; + + assert(!D->getInheritedConstructor() && "inheritance NYI"); + + // Insert any ABI-specific implicit constructor arguments. + CIRGenCXXABI::AddedStructorArgCounts ExtraArgs = + CGM.getCXXABI().addImplicitConstructorArgs(*this, D, Type, ForVirtualBase, + Delegating, Args); + + // Emit the call. + auto CalleePtr = CGM.getAddrOfCXXStructor(GlobalDecl(D, Type)); + const CIRGenFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall( + Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); + CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); + mlir::cir::CIRCallOpInterface C; + buildCall(Info, Callee, ReturnValueSlot(), Args, &C, false, getLoc(Loc)); + + assert(CGM.getCodeGenOpts().OptimizationLevel == 0 || + ClassDecl->isDynamicClass() || Type == Ctor_Base || + !CGM.getCodeGenOpts().StrictVTablePointers && + "vtable assumption loads NYI"); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index f4cc98277252..a78bfe322537 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -747,104 +747,6 @@ mlir::Value CIRGenFunction::createLoad(const VarDecl *VD, const char *Name) { addr.getElementType(), addr.getPointer()); } -static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) { - auto *CD = llvm::dyn_cast(D); - if (!(CD && CD->isCopyOrMoveConstructor()) && - !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator()) - return false; - - // We can emit a memcpy for a trivial copy or move constructor/assignment - if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) - return true; - - if (D->getParent()->isUnion() && D->isDefaulted()) - return true; - - return false; -} - -void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, - clang::CXXCtorType Type, - bool ForVirtualBase, - bool Delegating, - AggValueSlot ThisAVS, - const clang::CXXConstructExpr *E) { - CallArgList Args; - Address This = ThisAVS.getAddress(); - LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace(); - QualType ThisType = D->getThisType(); - LangAS ThisAS = ThisType.getTypePtr()->getPointeeType().getAddressSpace(); - mlir::Value ThisPtr = This.getPointer(); - - assert(SlotAS == ThisAS && "This edge case NYI"); - - Args.add(RValue::get(ThisPtr), D->getThisType()); - - // In LLVM Codegen: If this is a trivial constructor, just emit what's needed. - // If this is a union copy constructor, we must emit a memcpy, because the AST - // does not model that copy. - if (isMemcpyEquivalentSpecialMember(D)) { - assert(!MissingFeatures::isMemcpyEquivalentSpecialMember()); - } - - const FunctionProtoType *FPT = D->getType()->castAs(); - EvaluationOrder Order = E->isListInitialization() - ? EvaluationOrder::ForceLeftToRight - : EvaluationOrder::Default; - - buildCallArgs(Args, FPT, E->arguments(), E->getConstructor(), - /*ParamsToSkip*/ 0, Order); - - buildCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args, - ThisAVS.mayOverlap(), E->getExprLoc(), - ThisAVS.isSanitizerChecked()); -} - -void CIRGenFunction::buildCXXConstructorCall( - const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, - bool Delegating, Address This, CallArgList &Args, - AggValueSlot::Overlap_t Overlap, SourceLocation Loc, - bool NewPointerIsChecked) { - - const auto *ClassDecl = D->getParent(); - - if (!NewPointerIsChecked) - buildTypeCheck(CIRGenFunction::TCK_ConstructorCall, Loc, This.getPointer(), - getContext().getRecordType(ClassDecl), CharUnits::Zero()); - - // If this is a call to a trivial default constructor: - // In LLVM: do nothing. - // In CIR: emit as a regular call, other later passes should lower the - // ctor call into trivial initialization. - assert(!MissingFeatures::isTrivialCtorOrDtor()); - - if (isMemcpyEquivalentSpecialMember(D)) { - assert(!MissingFeatures::isMemcpyEquivalentSpecialMember()); - } - - bool PassPrototypeArgs = true; - - assert(!D->getInheritedConstructor() && "inheritance NYI"); - - // Insert any ABI-specific implicit constructor arguments. - CIRGenCXXABI::AddedStructorArgCounts ExtraArgs = - CGM.getCXXABI().addImplicitConstructorArgs(*this, D, Type, ForVirtualBase, - Delegating, Args); - - // Emit the call. - auto CalleePtr = CGM.getAddrOfCXXStructor(GlobalDecl(D, Type)); - const CIRGenFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall( - Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); - CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); - mlir::cir::CIRCallOpInterface C; - buildCall(Info, Callee, ReturnValueSlot(), Args, &C, false, getLoc(Loc)); - - assert(CGM.getCodeGenOpts().OptimizationLevel == 0 || - ClassDecl->isDynamicClass() || Type == Ctor_Base || - !CGM.getCodeGenOpts().StrictVTablePointers && - "vtable assumption loads NYI"); -} - void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { // TODO: EmitAsanPrologueOrEpilogue(true); const auto *Ctor = cast(CurGD.getDecl()); From 70bf5c3131d9c0194e4455348e0f929a5fe7c83a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 17 Oct 2024 17:43:42 -0700 Subject: [PATCH 1963/2301] [CIR][CIRGen][NFC] Add more skeleton for handling inheritance ctors While here add some bits for ptr auth and match OG. --- clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/Address.h | 7 +++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 62 ++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 12 +++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 28 ++++++++++ clang/lib/CIR/CodeGen/CIRGenPointerAuth.cpp | 23 ++++++++ clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + 7 files changed, 130 insertions(+), 4 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenPointerAuth.cpp diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 18974d3286b8..a22112bfdf55 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -60,6 +60,7 @@ struct MissingFeatures { static bool tbaa() { return false; } static bool cleanups() { return false; } static bool emitNullabilityCheck() { return false; } + static bool ptrAuth() { return false; } // GNU vectors are done, but other kinds of vectors haven't been implemented. static bool scalableVectors() { return false; } diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index fdddf6fae500..433aa5db6b89 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -91,6 +91,13 @@ class Address { return PointerAndKnownNonNull.getPointer(); } + mlir::Value getBasePointer() const { + // TODO(cir): Remove the version above when we catchup with OG codegen on + // ptr auth. + assert(isValid() && "pointer isn't valid"); + return getPointer(); + } + /// Return the alignment of this pointer. clang::CharUnits getAlignment() const { // assert(isValid()); diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index f12c61a2ea2f..29bc5e5938c6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1811,6 +1811,30 @@ void CIRGenFunction::buildCXXAggrConstructorCall( constantCount.erase(); } +static bool canEmitDelegateCallArgs(CIRGenFunction &CGF, + const CXXConstructorDecl *Ctor, + CXXCtorType Type, CallArgList &Args) { + // We can't forward a variadic call. + if (Ctor->isVariadic()) + return false; + + if (CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { + // If the parameters are callee-cleanup, it's not safe to forward. + for (auto *P : Ctor->parameters()) + if (P->needsDestruction(CGF.getContext())) + return false; + + // Likewise if they're inalloca. + const CIRGenFunctionInfo &Info = + CGF.CGM.getTypes().arrangeCXXConstructorCall(Args, Ctor, Type, 0, 0); + if (Info.usesInAlloca()) + return false; + } + + // Anything else should be OK. + return true; +} + void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, clang::CXXCtorType Type, bool ForVirtualBase, @@ -1872,7 +1896,14 @@ void CIRGenFunction::buildCXXConstructorCall( bool PassPrototypeArgs = true; - assert(!D->getInheritedConstructor() && "inheritance NYI"); + // Check whether we can actually emit the constructor before trying to do so. + if (auto Inherited = D->getInheritedConstructor()) { + PassPrototypeArgs = getTypes().inheritingCtorHasParams(Inherited, Type); + if (PassPrototypeArgs && !canEmitDelegateCallArgs(*this, D, Type, Args)) { + llvm_unreachable("NYI"); + return; + } + } // Insert any ABI-specific implicit constructor arguments. CIRGenCXXABI::AddedStructorArgCounts ExtraArgs = @@ -1891,4 +1922,33 @@ void CIRGenFunction::buildCXXConstructorCall( ClassDecl->isDynamicClass() || Type == Ctor_Base || !CGM.getCodeGenOpts().StrictVTablePointers && "vtable assumption loads NYI"); +} + +void CIRGenFunction::buildInheritedCXXConstructorCall( + const CXXConstructorDecl *D, bool ForVirtualBase, Address This, + bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) { + CallArgList Args; + CallArg ThisArg(RValue::get(getAsNaturalPointerTo( + This, D->getThisType()->getPointeeType())), + D->getThisType()); + + // Forward the parameters. + if (InheritedFromVBase && + CGM.getTarget().getCXXABI().hasConstructorVariants()) { + llvm_unreachable("NYI"); + } else if (!CXXInheritedCtorInitExprArgs.empty()) { + // The inheriting constructor was inlined; just inject its arguments. + llvm_unreachable("NYI"); + } else { + // The inheriting constructor was not inlined. Emit delegating arguments. + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); +} + +void CIRGenFunction::buildInlinedInheritingCXXConstructorCall( + const CXXConstructorDecl *Ctor, CXXCtorType CtorType, bool ForVirtualBase, + bool Delegating, CallArgList &Args) { + llvm_unreachable("NYI"); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index aae0189a10f5..2658e391576e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -296,9 +296,7 @@ class AggExprEmitter : public StmtVisitor { } void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); void VisitCXXConstructExpr(const CXXConstructExpr *E); - void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E) { - llvm_unreachable("NYI"); - } + void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); void VisitLambdaExpr(LambdaExpr *E); void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { ASTContext &Ctx = CGF.getContext(); @@ -1456,6 +1454,14 @@ void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { Visit(E->getRHS()); } +void AggExprEmitter::VisitCXXInheritedCtorInitExpr( + const CXXInheritedCtorInitExpr *E) { + AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); + CGF.buildInheritedCXXConstructorCall(E->getConstructor(), + E->constructsVBase(), Slot.getAddress(), + E->inheritedFromVBase(), E); +} + //===----------------------------------------------------------------------===// // Helpers and dispatcher //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index b8adb165b548..66c62a23ebae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -655,6 +655,24 @@ class CIRGenFunction : public CIRGenTypeCache { void buildCXXConstructExpr(const clang::CXXConstructExpr *E, AggValueSlot Dest); + /// Emit a call to an inheriting constructor (that is, one that invokes a + /// constructor inherited from a base class) by inlining its definition. This + /// is necessary if the ABI does not support forwarding the arguments to the + /// base class constructor (because they're variadic or similar). + void buildInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor, + CXXCtorType CtorType, + bool ForVirtualBase, + bool Delegating, + CallArgList &Args); + + /// Emit a call to a constructor inherited from a base class, passing the + /// current constructor's arguments along unmodified (without even making + /// a copy). + void buildInheritedCXXConstructorCall(const CXXConstructorDecl *D, + bool ForVirtualBase, Address This, + bool InheritedFromVBase, + const CXXInheritedCtorInitExpr *E); + void buildCXXConstructorCall(const clang::CXXConstructorDecl *D, clang::CXXCtorType Type, bool ForVirtualBase, bool Delegating, AggValueSlot ThisAVS, @@ -920,6 +938,12 @@ class CIRGenFunction : public CIRGenTypeCache { RValue buildCallExpr(const clang::CallExpr *E, ReturnValueSlot ReturnValue = ReturnValueSlot()); + Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy); + + mlir::Value getAsNaturalPointerTo(Address Addr, QualType PointeeType) { + return getAsNaturalAddressOf(Addr, PointeeType).getBasePointer(); + } + mlir::Value buildRuntimeCall(mlir::Location loc, mlir::cir::FuncOp callee, ArrayRef args = {}); @@ -1937,6 +1961,10 @@ class CIRGenFunction : public CIRGenTypeCache { Destroyer *destroyer, bool checkZeroLength, bool useEHCleanup); + /// The values of function arguments to use when evaluating + /// CXXInheritedCtorInitExprs within this context. + CallArgList CXXInheritedCtorInitExprArgs; + // Points to the outermost active conditional control. This is used so that // we know if a temporary should be destroyed conditionally. ConditionalEvaluation *OutermostConditional = nullptr; diff --git a/clang/lib/CIR/CodeGen/CIRGenPointerAuth.cpp b/clang/lib/CIR/CodeGen/CIRGenPointerAuth.cpp new file mode 100644 index 000000000000..842cb361423f --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenPointerAuth.cpp @@ -0,0 +1,23 @@ +//===--- CIRGenPointerAuth.cpp - CIR generation for ptr auth --------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains common routines relating to the emission of +// pointer authentication operations. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenFunction.h" + +using namespace clang; +using namespace cir; + +Address CIRGenFunction::getAsNaturalAddressOf(Address Addr, + QualType PointeeTy) { + assert(!MissingFeatures::ptrAuth() && "NYI"); + return Addr; +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 97a8ad4f5ea8..02ac813ef732 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -34,6 +34,7 @@ add_clang_library(clangCIR CIRGenOpenCLRuntime.cpp CIRGenOpenCL.cpp CIRGenOpenMPRuntime.cpp + CIRGenPointerAuth.cpp CIRGenStmt.cpp CIRGenStmtOpenMP.cpp CIRGenTBAA.cpp From 709eb99923e260393eb375894f10991cbd864e0b Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Thu, 17 Oct 2024 19:58:05 -0700 Subject: [PATCH 1964/2301] [CIR][CIRGen] Ensure default visibility for local linkage functions (#990) LLVM's verifier enforces this, which was previously causing us to fail verification. This is a bit of a band-aid; the overall linkage and visibility setting flow needs some work to match the original. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 11 +++++++++-- clang/test/CIR/CodeGen/visibility-attribute.c | 15 +++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index cb96adf1e257..872e5e9e05c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2608,8 +2608,15 @@ void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl, // TODO(cir): Complete the remaining part of the function. assert(!MissingFeatures::setFunctionAttributes()); - auto decl = globalDecl.getDecl(); - func.setGlobalVisibilityAttr(getGlobalVisibilityAttrFromDecl(decl)); + + // TODO(cir): This needs a lot of work to better match CodeGen. That + // ultimately ends up in setGlobalVisibility, which already has the linkage of + // the LLVM GV (corresponding to our FuncOp) computed, so it doesn't have to + // recompute it here. This is a minimal fix for now. + if (!isLocalLinkage(getFunctionLinkage(globalDecl))) { + auto decl = globalDecl.getDecl(); + func.setGlobalVisibilityAttr(getGlobalVisibilityAttrFromDecl(decl)); + } } /// If the specified mangled name is not in the module, diff --git a/clang/test/CIR/CodeGen/visibility-attribute.c b/clang/test/CIR/CodeGen/visibility-attribute.c index fb675fb51751..07834d78910e 100644 --- a/clang/test/CIR/CodeGen/visibility-attribute.c +++ b/clang/test/CIR/CodeGen/visibility-attribute.c @@ -30,9 +30,24 @@ void __attribute__((__visibility__("protected"))) foo_protected(); // CIR: cir.func no_proto private protected @foo_protected(...) // LLVM: declare {{.*}} protected void @foo_protected(...) +static void static_foo_default() {} +// CIR: cir.func no_proto internal private @static_foo_default() +// LLVM: define internal void @static_foo_default() + +static void __attribute__((__visibility__("hidden"))) static_foo_hidden() {} +// CIR: cir.func no_proto internal private @static_foo_hidden() +// LLVM: define internal void @static_foo_hidden() + +static void __attribute__((__visibility__("protected"))) static_foo_protected() {} +// CIR: cir.func no_proto internal private @static_foo_protected() +// LLVM: define internal void @static_foo_protected() + void call_foo() { foo_default(); foo_hidden(); foo_protected(); + static_foo_default(); + static_foo_hidden(); + static_foo_protected(); } From d0d73098c4cdca93a1e33db8acd71408beeb1cc9 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 18 Oct 2024 14:30:15 -0400 Subject: [PATCH 1965/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vshl_n_v and neon_vshlq_n_v (#965) As title, but important step in this PR is to allow CIR ShiftOp to take vector of int type as input type. As result, I added a verifier to ShiftOp with 2 constraints 1. Input type either all vector or int type. This is consistent with LLVM::ShlOp, vector shift amount is expected. 2. In the spirit of C99 6.5.7.3, shift amount type must be the same as result type, the if vector type is used. (This is enforced in LLVM lowering for scalar int type). --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 14 +- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 3 + .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 32 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 17 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 28 +- clang/test/CIR/CodeGen/AArch64/neon.c | 281 +++++++++++------- clang/test/CIR/CodeGen/vectype.cpp | 18 +- clang/test/CIR/IR/cir-ops.cir | 16 + clang/test/CIR/IR/invalid.cir | 33 +- clang/test/CIR/Lowering/vectype.cpp | 18 ++ 10 files changed, 342 insertions(+), 118 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index ab89c87c3ae4..918898d898ed 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1181,15 +1181,20 @@ def ShiftOp : CIR_Op<"shift", [Pure]> { let summary = "Shift"; let description = [{ Shift `left` or `right`, according to the first operand. Second operand is - the shift target and the third the amount. + the shift target and the third the amount. Second and the thrid operand can + be either integer type or vector of integer type. However, they must be + either all vector of integer type, or all integer type. If they are vectors, + each vector element of the shift target is shifted by the corresponding + shift amount in the shift amount vector. ```mlir %7 = cir.shift(left, %1 : !u64i, %4 : !s32i) -> !u64i + %8 = cir.shift(left, %2 : !cir.vector, %3 : !cir.vector) -> !cir.vector ``` }]; - let results = (outs CIR_IntType:$result); - let arguments = (ins CIR_IntType:$value, CIR_IntType:$amount, + let results = (outs CIR_AnyIntOrVecOfInt:$result); + let arguments = (ins CIR_AnyIntOrVecOfInt:$value, CIR_AnyIntOrVecOfInt:$amount, UnitAttr:$isShiftleft); let assemblyFormat = [{ @@ -1200,8 +1205,7 @@ def ShiftOp : CIR_Op<"shift", [Pure]> { `)` `->` type($result) attr-dict }]; - // Already covered by the traits - let hasVerifier = 0; + let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index c20915bbd6ef..81b939df6b1f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -537,6 +537,9 @@ def IntegerVector : Type< ]>, "!cir.vector of !cir.int"> { } +// Constraints +def CIR_AnyIntOrVecOfInt: AnyTypeOf<[CIR_IntType, IntegerVector]>; + // Pointer to Arrays def ArrayPtr : Type< And<[ diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 349bc2d7884e..6e8c2be4c94d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2205,10 +2205,10 @@ static int64_t getIntValueFromConstOp(mlir::Value val) { } /// This function `buildCommonNeonCallPattern0` implements a common way -// to generate neon intrinsic call that has following pattern: -// 1. There is a need to cast result of the intrinsic call back to -// expression type. -// 2. Function arg types are given, not deduced from actual arg types. +/// to generate neon intrinsic call that has following pattern: +/// 1. There is a need to cast result of the intrinsic call back to +/// expression type. +/// 2. Function arg types are given, not deduced from actual arg types. static mlir::Value buildCommonNeonCallPattern0(CIRGenFunction &cgf, std::string &intrincsName, llvm::SmallVector argTypes, @@ -2222,6 +2222,23 @@ buildCommonNeonCallPattern0(CIRGenFunction &cgf, std::string &intrincsName, return builder.createBitcast(res, resultType); } +/// Build a constant shift amount vector of `vecTy` to shift a vector +/// Here `shitfVal` is a constant integer that will be splated into a +/// a const vector of `vecTy` which is the return of this function +static mlir::Value buildNeonShiftVector(CIRGenBuilderTy &builder, + mlir::Value shiftVal, + mlir::cir::VectorType vecTy, + mlir::Location loc, bool neg) { + int shiftAmt = getIntValueFromConstOp(shiftVal); + llvm::SmallVector vecAttr{ + vecTy.getSize(), + // ConstVectorAttr requires cir::IntAttr + mlir::cir::IntAttr::get(vecTy.getEltType(), shiftAmt)}; + mlir::cir::ConstVectorAttr constVecAttr = mlir::cir::ConstVectorAttr::get( + vecTy, mlir::ArrayAttr::get(builder.getContext(), vecAttr)); + return builder.create(loc, vecTy, constVecAttr); +} + mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, @@ -2298,6 +2315,13 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( : "llvm.aarch64.neon.sqrdmulh.lane", resTy, getLoc(e->getExprLoc())); } + case NEON::BI__builtin_neon_vshl_n_v: + case NEON::BI__builtin_neon_vshlq_n_v: { + mlir::Location loc = getLoc(e->getExprLoc()); + ops[1] = buildNeonShiftVector(builder, ops[1], vTy, loc, false); + return builder.create( + loc, vTy, builder.createBitcast(ops[0], vTy), ops[1], true); + } } // This second switch is for the intrinsics that might have a more generic diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 0251bc53c084..5c412597c624 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -3939,6 +3939,23 @@ LogicalResult BinOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// ShiftOp Definitions +//===----------------------------------------------------------------------===// +LogicalResult ShiftOp::verify() { + mlir::Operation *op = getOperation(); + mlir::Type resType = getResult().getType(); + bool isOp0Vec = mlir::isa(op->getOperand(0).getType()); + bool isOp1Vec = mlir::isa(op->getOperand(1).getType()); + if (isOp0Vec != isOp1Vec) + return emitOpError() << "input types cannot be one vector and one scalar"; + if (isOp1Vec && op->getOperand(1).getType() != resType) { + return emitOpError() << "shift amount must have the type of the result " + << "if it is vector shift"; + } + return mlir::success(); +} + //===----------------------------------------------------------------------===// // LabelOp Definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 979354a10bc5..0af6065829c5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2773,24 +2773,40 @@ class CIRShiftOpLowering auto cirAmtTy = mlir::dyn_cast(op.getAmount().getType()); auto cirValTy = mlir::dyn_cast(op.getValue().getType()); + + // Operands could also be vector type + auto cirAmtVTy = + mlir::dyn_cast(op.getAmount().getType()); + auto cirValVTy = + mlir::dyn_cast(op.getValue().getType()); auto llvmTy = getTypeConverter()->convertType(op.getType()); mlir::Value amt = adaptor.getAmount(); mlir::Value val = adaptor.getValue(); - assert(cirValTy && cirAmtTy && "non-integer shift is NYI"); - assert(cirValTy == op.getType() && "inconsistent operands' types NYI"); + assert(((cirValTy && cirAmtTy) || (cirAmtVTy && cirValVTy)) && + "shift input type must be integer or vector type, otherwise NYI"); + + assert((cirValTy == op.getType() || cirValVTy == op.getType()) && + "inconsistent operands' types NYI"); // Ensure shift amount is the same type as the value. Some undefined // behavior might occur in the casts below as per [C99 6.5.7.3]. - amt = getLLVMIntCast(rewriter, amt, mlir::cast(llvmTy), - !cirAmtTy.isSigned(), cirAmtTy.getWidth(), - cirValTy.getWidth()); + // Vector type shift amount needs no cast as type consistency is expected to + // be already be enforced at CIRGen. + if (cirAmtTy) + amt = getLLVMIntCast(rewriter, amt, mlir::cast(llvmTy), + !cirAmtTy.isSigned(), cirAmtTy.getWidth(), + cirValTy.getWidth()); // Lower to the proper LLVM shift operation. if (op.getIsShiftleft()) rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); else { - if (cirValTy.isUnsigned()) + bool isUnSigned = + cirValTy ? !cirValTy.isSigned() + : !mlir::cast(cirValVTy.getEltType()) + .isSigned(); + if (isUnSigned) rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); else rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 4acd2e8131c2..730a7acee887 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -4634,123 +4634,202 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { // return vmulxq_f64(a, b); // } -// NYI-LABEL: @test_vshl_n_s8( -// NYI: [[VSHL_N:%.*]] = shl <8 x i8> %a, -// NYI: ret <8 x i8> [[VSHL_N]] -// int8x8_t test_vshl_n_s8(int8x8_t a) { -// return vshl_n_s8(a, 3); -// } -// NYI-LABEL: @test_vshl_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[VSHL_N:%.*]] = shl <4 x i16> [[TMP1]], -// NYI: ret <4 x i16> [[VSHL_N]] -// int16x4_t test_vshl_n_s16(int16x4_t a) { -// return vshl_n_s16(a, 3); -// } +int8x8_t test_vshl_n_s8(int8x8_t a) { + return vshl_n_s8(a, 3); -// NYI-LABEL: @test_vshl_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[VSHL_N:%.*]] = shl <2 x i32> [[TMP1]], -// NYI: ret <2 x i32> [[VSHL_N]] -// int32x2_t test_vshl_n_s32(int32x2_t a) { -// return vshl_n_s32(a, 3); -// } + // CIR-LABEL: @test_vshl_n_s8 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshl_n_s8(<8 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VSHL_N:%.*]] = shl <8 x i8> [[A]], splat (i8 3) + // LLVM: ret <8 x i8> [[VSHL_N]] +} + + +int16x4_t test_vshl_n_s16(int16x4_t a) { + return vshl_n_s16(a, 3); + + // CIR-LABEL: @test_vshl_n_s16 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR-SAME: #cir.int<3> : !s16i]> + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshl_n_s16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[VSHL_N:%.*]] = shl <4 x i16> [[TMP1]], splat (i16 3) + // LLVM: ret <4 x i16> [[VSHL_N]] +} + +int32x2_t test_vshl_n_s32(int32x2_t a) { + return vshl_n_s32(a, 3); + + // CIR-LABEL: @test_vshl_n_s32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : !s32i]> + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshl_n_s32(<2 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[VSHL_N:%.*]] = shl <2 x i32> [[TMP1]], splat (i32 3) + // LLVM: ret <2 x i32> [[VSHL_N]] +} // NYI-LABEL: @test_vshlq_n_s8( // NYI: [[VSHL_N:%.*]] = shl <16 x i8> %a, // NYI: ret <16 x i8> [[VSHL_N]] -// int8x16_t test_vshlq_n_s8(int8x16_t a) { -// return vshlq_n_s8(a, 3); -// } +int8x16_t test_vshlq_n_s8(int8x16_t a) { + return vshlq_n_s8(a, 3); + + // CIR-LABEL: @test_vshlq_n_s8 + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshlq_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VSHL_N:%.*]] = shl <8 x i16> [[TMP1]], -// NYI: ret <8 x i16> [[VSHL_N]] -// int16x8_t test_vshlq_n_s16(int16x8_t a) { -// return vshlq_n_s16(a, 3); -// } + // LLVM: {{.*}}@test_vshlq_n_s8(<16 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VSHL_N:%.*]] = shl <16 x i8> [[A]], splat (i8 3) + // LLVM: ret <16 x i8> [[VSHL_N]] +} -// NYI-LABEL: @test_vshlq_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VSHL_N:%.*]] = shl <4 x i32> [[TMP1]], -// NYI: ret <4 x i32> [[VSHL_N]] -// int32x4_t test_vshlq_n_s32(int32x4_t a) { -// return vshlq_n_s32(a, 3); -// } +int16x8_t test_vshlq_n_s16(int16x8_t a) { + return vshlq_n_s16(a, 3); -// NYI-LABEL: @test_vshlq_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VSHL_N:%.*]] = shl <2 x i64> [[TMP1]], -// NYI: ret <2 x i64> [[VSHL_N]] -// int64x2_t test_vshlq_n_s64(int64x2_t a) { -// return vshlq_n_s64(a, 3); -// } + // CIR-LABEL: @test_vshlq_n_s16 + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshl_n_u8( -// NYI: [[VSHL_N:%.*]] = shl <8 x i8> %a, -// NYI: ret <8 x i8> [[VSHL_N]] -// uint8x8_t test_vshl_n_u8(uint8x8_t a) { -// return vshl_n_u8(a, 3); -// } + // LLVM: {{.*}}@test_vshlq_n_s16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VSHL_N:%.*]] = shl <8 x i16> [[TMP1]], splat (i16 3) + // LLVM: ret <8 x i16> [[VSHL_N]] +} -// NYI-LABEL: @test_vshl_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[VSHL_N:%.*]] = shl <4 x i16> [[TMP1]], -// NYI: ret <4 x i16> [[VSHL_N]] -// uint16x4_t test_vshl_n_u16(uint16x4_t a) { -// return vshl_n_u16(a, 3); -// } -// NYI-LABEL: @test_vshl_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[VSHL_N:%.*]] = shl <2 x i32> [[TMP1]], -// NYI: ret <2 x i32> [[VSHL_N]] -// uint32x2_t test_vshl_n_u32(uint32x2_t a) { -// return vshl_n_u32(a, 3); -// } +int32x4_t test_vshlq_n_s32(int32x4_t a) { + return vshlq_n_s32(a, 3); -// NYI-LABEL: @test_vshlq_n_u8( -// NYI: [[VSHL_N:%.*]] = shl <16 x i8> %a, -// NYI: ret <16 x i8> [[VSHL_N]] -// uint8x16_t test_vshlq_n_u8(uint8x16_t a) { -// return vshlq_n_u8(a, 3); -// } + // CIR-LABEL: @test_vshlq_n_s32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : + // CIR-SAME: !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i]> + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : + // CIR-SAME: !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshlq_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VSHL_N:%.*]] = shl <8 x i16> [[TMP1]], -// NYI: ret <8 x i16> [[VSHL_N]] -// uint16x8_t test_vshlq_n_u16(uint16x8_t a) { -// return vshlq_n_u16(a, 3); -// } + // LLVM: {{.*}}@test_vshlq_n_s32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VSHL_N:%.*]] = shl <4 x i32> [[TMP1]], splat (i32 3) + // LLVM: ret <4 x i32> [[VSHL_N]] +} -// NYI-LABEL: @test_vshlq_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VSHL_N:%.*]] = shl <4 x i32> [[TMP1]], -// NYI: ret <4 x i32> [[VSHL_N]] -// uint32x4_t test_vshlq_n_u32(uint32x4_t a) { -// return vshlq_n_u32(a, 3); -// } +int64x2_t test_vshlq_n_s64(int64x2_t a) { + return vshlq_n_s64(a, 3); -// NYI-LABEL: @test_vshlq_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VSHL_N:%.*]] = shl <2 x i64> [[TMP1]], -// NYI: ret <2 x i64> [[VSHL_N]] -// uint64x2_t test_vshlq_n_u64(uint64x2_t a) { -// return vshlq_n_u64(a, 3); -// } + // CIR-LABEL: @test_vshlq_n_s64 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i, #cir.int<3> : !s64i]> + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : + // CIR-SAME: !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshlq_n_s64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VSHL_N:%.*]] = shl <2 x i64> [[TMP1]], splat (i64 3) + // LLVM: ret <2 x i64> [[VSHL_N]] +} + +uint8x8_t test_vshl_n_u8(uint8x8_t a) { + return vshl_n_u8(a, 3); + + // CIR-LABEL: @test_vshl_n_u8 + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshl_n_u8(<8 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VSHL_N:%.*]] = shl <8 x i8> [[A]], splat (i8 3) + // LLVM: ret <8 x i8> [[VSHL_N]] +} + +uint16x4_t test_vshl_n_u16(uint16x4_t a) { + return vshl_n_u16(a, 3); + + // CIR-LABEL: @test_vshl_n_u16 + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshl_n_u16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[VSHL_N:%.*]] = shl <4 x i16> [[TMP1]], splat (i16 3) + // LLVM: ret <4 x i16> [[VSHL_N]] +} + +uint32x2_t test_vshl_n_u32(uint32x2_t a) { + return vshl_n_u32(a, 3); + + // CIR-LABEL: @test_vshl_n_u32 + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshl_n_u32(<2 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] + // LLVM: [[VSHL_N:%.*]] = shl <2 x i32> [[TMP1]], splat (i32 3) + // LLVM: ret <2 x i32> [[VSHL_N]] +} + +uint8x16_t test_vshlq_n_u8(uint8x16_t a) { + return vshlq_n_u8(a, 3); + + // CIR-LABEL: @test_vshlq_n_u8 + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshlq_n_u8(<16 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VSHL_N:%.*]] = shl <16 x i8> [[A]], splat (i8 3) + // LLVM: ret <16 x i8> [[VSHL_N]] +} + +uint16x8_t test_vshlq_n_u16(uint16x8_t a) { + return vshlq_n_u16(a, 3); + + // CIR-LABEL: @test_vshlq_n_u16 + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshlq_n_u16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VSHL_N:%.*]] = shl <8 x i16> [[TMP1]], splat (i16 3) + // LLVM: ret <8 x i16> [[VSHL_N]] +} + +uint32x4_t test_vshlq_n_u32(uint32x4_t a) { + return vshlq_n_u32(a, 3); + + // CIR-LABEL: @test_vshlq_n_u32 + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshlq_n_u32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] + // LLVM: [[VSHL_N:%.*]] = shl <4 x i32> [[TMP1]], splat (i32 3) + // LLVM: ret <4 x i32> [[VSHL_N]] +} + +uint64x2_t test_vshlq_n_u64(uint64x2_t a) { + return vshlq_n_u64(a, 3); + + // CIR-LABEL: @test_vshlq_n_u64 + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshlq_n_u64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] + // LLVM: [[VSHL_N:%.*]] = shl <2 x i64> [[TMP1]], splat (i64 3) + // LLVM: ret <2 x i64> [[VSHL_N]] +} // NYI-LABEL: @test_vshr_n_s8( // NYI: [[VSHR_N:%.*]] = ashr <8 x i8> %a, diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index 312a1dcba47f..be5087344fd5 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -6,7 +6,7 @@ typedef double vd2 __attribute__((vector_size(16))); typedef long long vll2 __attribute__((vector_size(16))); typedef unsigned short vus2 __attribute__((vector_size(4))); -void vector_int_test(int x) { +void vector_int_test(int x, unsigned short usx) { // Vector constant. vi4 a = { 1, 2, 3, 4 }; @@ -103,6 +103,22 @@ void vector_int_test(int x) { // CHECK: %{{[0-9]+}} = cir.vec.shuffle(%{{[0-9]+}}, %{{[0-9]+}} : !cir.vector) [#cir.int<7> : !s64i, #cir.int<5> : !s64i, #cir.int<3> : !s64i, #cir.int<1> : !s64i] : !cir.vector vi4 v = __builtin_shufflevector(a, b); // CHECK: %{{[0-9]+}} = cir.vec.shuffle.dynamic %{{[0-9]+}} : !cir.vector, %{{[0-9]+}} : !cir.vector + + // Shifts + vi4 w = a << b; + // CHECK: %{{[0-9]+}} = cir.shift(left, {{%.*}} : !cir.vector, + // CHECK-SAME: {{%.*}} : !cir.vector) -> !cir.vector + vi4 y = a >> b; + // CHECK: %{{[0-9]+}} = cir.shift( right, {{%.*}} : !cir.vector, + // CHECK-SAME: {{%.*}} : !cir.vector) -> !cir.vector + + vus2 z = { usx, usx }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : !u16i, !u16i) : !cir.vector + vus2 zamt = { 3, 4 }; + // CHECK: %{{[0-9]+}} = cir.const #cir.const_vector<[#cir.int<3> : !u16i, #cir.int<4> : !u16i]> : !cir.vector + vus2 zzz = z >> zamt; + // CHECK: %{{[0-9]+}} = cir.shift( right, {{%.*}} : !cir.vector, + // CHECK-SAME: {{%.*}} : !cir.vector) -> !cir.vector } void vector_double_test(int x, double y) { diff --git a/clang/test/CIR/IR/cir-ops.cir b/clang/test/CIR/IR/cir-ops.cir index 73a8de8c40cd..2c2d137ab92e 100644 --- a/clang/test/CIR/IR/cir-ops.cir +++ b/clang/test/CIR/IR/cir-ops.cir @@ -57,6 +57,14 @@ module { %5 = cir.objsize(%3 : , min) -> !u64i cir.return } + + cir.func @shiftvec() { + %0 = cir.alloca !cir.vector, !cir.ptr>, ["a", init] {alignment = 8 : i64} + %1 = cir.load %0 : !cir.ptr>, !cir.vector + %2 = cir.const #cir.const_vector<[#cir.int<12> : !s32i, #cir.int<12> : !s32i]> : !cir.vector + %3 = cir.shift(left, %1 : !cir.vector, %2 : !cir.vector) -> !cir.vector + cir.return + } } // CHECK: module { @@ -102,4 +110,12 @@ module { // CHECK-NEXT: cir.return // CHECK-NEXT: } +// CHECK: cir.func @shiftvec() { +// CHECK-NEXT: %0 = cir.alloca !cir.vector, !cir.ptr> +// CHECK-NEXT: %1 = cir.load %0 : !cir.ptr>, !cir.vector +// CHECK-NEXT: %2 = cir.const #cir.const_vector<[#cir.int<12> : !s32i, #cir.int<12> : !s32i]> : !cir.vector +// CHECK-NEXT: %3 = cir.shift(left, %1 : !cir.vector, %2 : !cir.vector) -> !cir.vector +// CHECK-NEXT: cir.return +// CHECK-NEXT: } + // CHECK: } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 3283b60726c4..893655d78919 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1142,7 +1142,7 @@ cir.func @bad_goto() -> () { ^bb1: cir.label "label" cir.return -} +} // ----- @@ -1328,3 +1328,34 @@ module { } +// ----- + +!s32i = !cir.int +!s16i = !cir.int +module { + cir.func @test_shift_vec() { + %0 = cir.alloca !cir.vector, !cir.ptr>, ["a", init] {alignment = 8 : i64} + %1 = cir.load %0 : !cir.ptr>, !cir.vector + %2 = cir.const #cir.int<12> : !s32i + %4 = cir.const #cir.const_vector<[#cir.int<12> : !s16i, #cir.int<12> : !s16i]> : !cir.vector + // expected-error@+1 {{'cir.shift' op input types cannot be one vector and one scalar}} + %3 = cir.shift(left, %1 : !cir.vector, %2 : !s32i) -> !cir.vector + %5 = cir.shift(left, %1 : !cir.vector, %4 : !cir.vector) -> !cir.vector + cir.return + } +} + +// ----- + +!s32i = !cir.int +!s16i = !cir.int +module { + cir.func @test_shift_vec2() { + %0 = cir.alloca !cir.vector, !cir.ptr>, ["a", init] {alignment = 8 : i64} + %1 = cir.load %0 : !cir.ptr>, !cir.vector + %4 = cir.const #cir.const_vector<[#cir.int<12> : !s16i, #cir.int<12> : !s16i]> : !cir.vector + // expected-error@+1 {{'cir.shift' op shift amount must have the type of the result if it is vector shift}} + %5 = cir.shift(left, %1 : !cir.vector, %4 : !cir.vector) -> !cir.vector + cir.return + } +} diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index 41b214634a20..ad8472abea01 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -220,6 +220,24 @@ void vector_int_test(int x) { // CHECK: %[[#svQ:]] = llvm.extractelement %[[#sv_a]][%[[#svP:]] : i32] : vector<4xi32> // CHECK: %[[#svR:]] = llvm.insertelement %[[#svQ]], %[[#svN]][%[[#svO]] : i64] : vector<4xi32> // CHECK: llvm.store %[[#svR]], %[[#sv_v:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr + + // Shifts + vi4 w = a << b; + // CHECK: %[[#T198:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T199:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %{{[0-9]+}} = llvm.shl %[[#T198]], %[[#T199]] : vector<4xi32> + vi4 y = a >> b; + // CHECK: %[[#T201:]] = llvm.load %[[#T3]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T202:]] = llvm.load %[[#T5]] {alignment = 16 : i64} : !llvm.ptr -> vector<4xi32> + // CHECK: %{{[0-9]+}} = llvm.ashr %[[#T201]], %[[#T202]] : vector<4xi32> + + vus2 z = { (unsigned short)x, (unsigned short)x }; + vus2 zamt = { 3, 4 }; + // CHECK: %[[#T219:]] = llvm.mlir.constant(dense<[3, 4]> : vector<2xi16>) : vector<2xi16> + // CHECK: llvm.store %[[#T219]], %[[#AMT_SAVE:]] {alignment = 4 : i64} : vector<2xi16> + // CHECK: %[[#T221:]] = llvm.load %[[#AMT_SAVE]] {alignment = 4 : i64} : !llvm.ptr -> vector<2xi16> + vus2 zzz = z >> zamt; + // CHECK: %{{[0-9]+}} = llvm.lshr %{{[0-9]+}}, %[[#T221]] : vector<2xi16> } void vector_double_test(int x, double y) { From 058671adf24edaa28ca80400e8041e4f87248033 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 18 Oct 2024 14:31:12 -0400 Subject: [PATCH 1966/2301] [CIR][CIRGen] Use Clang Codegen's skeleton in CIRGenFunction::buildBuiltinExpr (#967) This PR helps us to triage unimplemented builtins (that are target independent). There are unhandled builtins in CIR Codegen `[CIRGenFunction::buildBuiltinExpr](https://github.com/llvm/clangir/blob/4c446b3287895879da598e23164d338d04bced3e/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp#L305)`. And those builtins have implementation in [OG](https://github.com/llvm/clangir/blob/4c446b3287895879da598e23164d338d04bced3e/clang/lib/CodeGen/CGBuiltin.cpp#L2573). Currently, those builtins just are treated as LibraryCall or some other ways which eventually get failure, and failure messages are confusing. This PR address this problem by refactoring `CIRGenFunction::buildBuiltinExpr` to keep the same skeleton as OG counterpart `CodeGenFunction::EmitBuiltinExpr`, and add builtin name to NYI message --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 1175 ++++++++++++++++++----- 1 file changed, 955 insertions(+), 220 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 59dce4184c5e..6692e63f6b90 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -390,6 +390,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, ConstAlways = true; break; } + case Builtin::BI__builtin_fmaf16: + llvm_unreachable("Builtin::BI__builtin_fmaf16 NYI"); + break; default: break; } @@ -435,6 +438,36 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, if (GenerateIntrinsics) { switch (BuiltinIDIfNoAsmLabel) { + case Builtin::BIacos: + case Builtin::BIacosf: + case Builtin::BIacosl: + case Builtin::BI__builtin_acos: + case Builtin::BI__builtin_acosf: + case Builtin::BI__builtin_acosf16: + case Builtin::BI__builtin_acosl: + case Builtin::BI__builtin_acosf128: + llvm_unreachable("Builtin::BIacos like NYI"); + + case Builtin::BIasin: + case Builtin::BIasinf: + case Builtin::BIasinl: + case Builtin::BI__builtin_asin: + case Builtin::BI__builtin_asinf: + case Builtin::BI__builtin_asinf16: + case Builtin::BI__builtin_asinl: + case Builtin::BI__builtin_asinf128: + llvm_unreachable("Builtin::BIasin like NYI"); + + case Builtin::BIatan: + case Builtin::BIatanf: + case Builtin::BIatanl: + case Builtin::BI__builtin_atan: + case Builtin::BI__builtin_atanf: + case Builtin::BI__builtin_atanf16: + case Builtin::BI__builtin_atanl: + case Builtin::BI__builtin_atanf128: + llvm_unreachable("Builtin::BIatan like NYI"); + case Builtin::BIceil: case Builtin::BIceilf: case Builtin::BIceill: @@ -455,7 +488,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_copysignf16: case Builtin::BI__builtin_copysignf128: - llvm_unreachable("NYI"); + llvm_unreachable("BI__builtin_copysignf16 like NYI"); case Builtin::BIcos: case Builtin::BIcosf: @@ -468,6 +501,16 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, assert(!MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); + case Builtin::BIcosh: + case Builtin::BIcoshf: + case Builtin::BIcoshl: + case Builtin::BI__builtin_cosh: + case Builtin::BI__builtin_coshf: + case Builtin::BI__builtin_coshf16: + case Builtin::BI__builtin_coshl: + case Builtin::BI__builtin_coshf128: + llvm_unreachable("Builtin::BIcosh like NYI"); + case Builtin::BIexp: case Builtin::BIexpf: case Builtin::BIexpl: @@ -490,6 +533,13 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, assert(!MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); + case Builtin::BI__builtin_exp10: + case Builtin::BI__builtin_exp10f: + case Builtin::BI__builtin_exp10f16: + case Builtin::BI__builtin_exp10l: + case Builtin::BI__builtin_exp10f128: + llvm_unreachable("BI__builtin_exp10 like NYI"); + case Builtin::BIfabs: case Builtin::BIfabsf: case Builtin::BIfabsl: @@ -518,7 +568,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmaf16: case Builtin::BI__builtin_fmal: case Builtin::BI__builtin_fmaf128: - llvm_unreachable("NYI"); + llvm_unreachable("Builtin::BIfma like NYI"); case Builtin::BIfmax: case Builtin::BIfmaxf: @@ -531,7 +581,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmaxf16: case Builtin::BI__builtin_fmaxf128: - llvm_unreachable("NYI"); + llvm_unreachable("BI__builtin_fmaxf16 like NYI"); case Builtin::BIfmin: case Builtin::BIfminf: @@ -544,7 +594,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fminf16: case Builtin::BI__builtin_fminf128: - llvm_unreachable("NYI"); + llvm_unreachable("BI__builtin_fminf16 like NYI"); // fmod() is a special-case. It maps to the frem instruction rather than an // LLVM intrinsic. @@ -559,7 +609,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmodf16: case Builtin::BI__builtin_fmodf128: - llvm_unreachable("NYI"); + case Builtin::BI__builtin_elementwise_fmod: + llvm_unreachable("BI__builtin_fmodf16 like NYI"); case Builtin::BIlog: case Builtin::BIlogf: @@ -615,7 +666,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_powf16: case Builtin::BI__builtin_powf128: - llvm_unreachable("NYI"); + llvm_unreachable("BI__builtin_powf16 like NYI"); case Builtin::BIrint: case Builtin::BIrintf: @@ -637,6 +688,16 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_roundf128: return buildUnaryFPBuiltin(*this, *E); + case Builtin::BIroundeven: + case Builtin::BIroundevenf: + case Builtin::BIroundevenl: + case Builtin::BI__builtin_roundeven: + case Builtin::BI__builtin_roundevenf: + case Builtin::BI__builtin_roundevenf16: + case Builtin::BI__builtin_roundevenl: + case Builtin::BI__builtin_roundevenf128: + llvm_unreachable("Builtin::BIroundeven like NYI"); + case Builtin::BIsin: case Builtin::BIsinf: case Builtin::BIsinl: @@ -659,6 +720,29 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, assert(!MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); + case Builtin::BI__builtin_elementwise_sqrt: + llvm_unreachable("BI__builtin_elementwise_sqrt NYI"); + + case Builtin::BItan: + case Builtin::BItanf: + case Builtin::BItanl: + case Builtin::BI__builtin_tan: + case Builtin::BI__builtin_tanf: + case Builtin::BI__builtin_tanf16: + case Builtin::BI__builtin_tanl: + case Builtin::BI__builtin_tanf128: + llvm_unreachable("Builtin::BItan like NYI"); + + case Builtin::BItanh: + case Builtin::BItanhf: + case Builtin::BItanhl: + case Builtin::BI__builtin_tanh: + case Builtin::BI__builtin_tanhf: + case Builtin::BI__builtin_tanhf16: + case Builtin::BI__builtin_tanhl: + case Builtin::BI__builtin_tanhf128: + llvm_unreachable("Builtin::BItanh like NYI"); + case Builtin::BItrunc: case Builtin::BItruncf: case Builtin::BItruncl: @@ -679,7 +763,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, *this, *E); case Builtin::BI__builtin_lroundf128: - llvm_unreachable("NYI"); + llvm_unreachable("BI__builtin_lroundf128 NYI"); case Builtin::BIllround: case Builtin::BIllroundf: @@ -691,7 +775,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, *this, *E); case Builtin::BI__builtin_llroundf128: - llvm_unreachable("NYI"); + llvm_unreachable("BI__builtin_llroundf128 NYI"); case Builtin::BIlrint: case Builtin::BIlrintf: @@ -703,7 +787,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, *E); case Builtin::BI__builtin_lrintf128: - llvm_unreachable("NYI"); + llvm_unreachable("BI__builtin_lrintf128 NYI"); case Builtin::BIllrint: case Builtin::BIllrintf: @@ -715,7 +799,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, *this, *E); case Builtin::BI__builtin_llrintf128: - llvm_unreachable("NYI"); + llvm_unreachable("BI__builtin_llrintf128 NYI"); + + case Builtin::BI__builtin_ldexp: + case Builtin::BI__builtin_ldexpf: + case Builtin::BI__builtin_ldexpl: + case Builtin::BI__builtin_ldexpf16: + case Builtin::BI__builtin_ldexpf128: + llvm_unreachable("Builtin::BI__builtin_ldexp NYI"); default: break; @@ -726,6 +817,36 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, default: break; + case Builtin::BI__builtin___CFStringMakeConstantString: + case Builtin::BI__builtin___NSStringMakeConstantString: + llvm_unreachable("BI__builtin___CFStringMakeConstantString like NYI"); + + // C stdarg builtins. + case Builtin::BI__builtin_stdarg_start: + case Builtin::BI__builtin_va_start: + case Builtin::BI__va_start: + case Builtin::BI__builtin_va_end: { + buildVAStartEnd(BuiltinID == Builtin::BI__va_start + ? buildScalarExpr(E->getArg(0)) + : buildVAListRef(E->getArg(0)).getPointer(), + BuiltinID != Builtin::BI__builtin_va_end); + return {}; + } + case Builtin::BI__builtin_va_copy: { + auto dstPtr = buildVAListRef(E->getArg(0)).getPointer(); + auto srcPtr = buildVAListRef(E->getArg(1)).getPointer(); + builder.create(dstPtr.getLoc(), dstPtr, srcPtr); + return {}; + } + + case Builtin::BIabs: + case Builtin::BIlabs: + case Builtin::BIllabs: + case Builtin::BI__builtin_abs: + case Builtin::BI__builtin_labs: + case Builtin::BI__builtin_llabs: + llvm_unreachable("Builtin::BIabs like NYI"); + case Builtin::BI__builtin_complex: { mlir::Value Real = buildScalarExpr(E->getArg(0)); mlir::Value Imag = buildScalarExpr(E->getArg(1)); @@ -734,6 +855,18 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::getComplex(Complex); } + case Builtin::BI__builtin_conj: + case Builtin::BI__builtin_conjf: + case Builtin::BI__builtin_conjl: + case Builtin::BIconj: + case Builtin::BIconjf: + case Builtin::BIconjl: { + mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); + mlir::Value Conj = builder.createUnaryOp( + getLoc(E->getExprLoc()), mlir::cir::UnaryOpKind::Not, ComplexVal); + return RValue::getComplex(Conj); + } + case Builtin::BI__builtin_creal: case Builtin::BI__builtin_crealf: case Builtin::BI__builtin_creall: @@ -746,6 +879,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(Real); } + case Builtin::BI__builtin_preserve_access_index: + llvm_unreachable("Builtin::BI__builtin_preserve_access_index NYI"); + case Builtin::BI__builtin_cimag: case Builtin::BI__builtin_cimagf: case Builtin::BI__builtin_cimagl: @@ -758,45 +894,53 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(Real); } - case Builtin::BI__builtin_conj: - case Builtin::BI__builtin_conjf: - case Builtin::BI__builtin_conjl: - case Builtin::BIconj: - case Builtin::BIconjf: - case Builtin::BIconjl: { - mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); - mlir::Value Conj = builder.createUnaryOp( - getLoc(E->getExprLoc()), mlir::cir::UnaryOpKind::Not, ComplexVal); - return RValue::getComplex(Conj); - } + case Builtin::BI__builtin_clrsb: + case Builtin::BI__builtin_clrsbl: + case Builtin::BI__builtin_clrsbll: + return buildBuiltinBitOp(*this, E, std::nullopt); - case Builtin::BI__builtin___CFStringMakeConstantString: - case Builtin::BI__builtin___NSStringMakeConstantString: - llvm_unreachable("NYI"); + case Builtin::BI__builtin_ctzs: + case Builtin::BI__builtin_ctz: + case Builtin::BI__builtin_ctzl: + case Builtin::BI__builtin_ctzll: + case Builtin::BI__builtin_ctzg: + return buildBuiltinBitOp(*this, E, BCK_CTZPassedZero); - case Builtin::BIprintf: - if (getTarget().getTriple().isNVPTX() || - getTarget().getTriple().isAMDGCN()) { - llvm_unreachable("NYI"); - } - break; + case Builtin::BI__builtin_clzs: + case Builtin::BI__builtin_clz: + case Builtin::BI__builtin_clzl: + case Builtin::BI__builtin_clzll: + case Builtin::BI__builtin_clzg: + return buildBuiltinBitOp(*this, E, BCK_CLZPassedZero); - // C stdarg builtins. - case Builtin::BI__builtin_stdarg_start: - case Builtin::BI__builtin_va_start: - case Builtin::BI__va_start: - case Builtin::BI__builtin_va_end: { - buildVAStartEnd(BuiltinID == Builtin::BI__va_start - ? buildScalarExpr(E->getArg(0)) - : buildVAListRef(E->getArg(0)).getPointer(), - BuiltinID != Builtin::BI__builtin_va_end); - return {}; - } - case Builtin::BI__builtin_va_copy: { - auto dstPtr = buildVAListRef(E->getArg(0)).getPointer(); - auto srcPtr = buildVAListRef(E->getArg(1)).getPointer(); - builder.create(dstPtr.getLoc(), dstPtr, srcPtr); - return {}; + case Builtin::BI__builtin_ffs: + case Builtin::BI__builtin_ffsl: + case Builtin::BI__builtin_ffsll: + return buildBuiltinBitOp(*this, E, std::nullopt); + + case Builtin::BI__builtin_parity: + case Builtin::BI__builtin_parityl: + case Builtin::BI__builtin_parityll: + return buildBuiltinBitOp(*this, E, std::nullopt); + + case Builtin::BI__lzcnt16: + case Builtin::BI__lzcnt: + case Builtin::BI__lzcnt64: + llvm_unreachable("BI__lzcnt16 like NYI"); + + case Builtin::BI__popcnt16: + case Builtin::BI__popcnt: + case Builtin::BI__popcnt64: + case Builtin::BI__builtin_popcount: + case Builtin::BI__builtin_popcountl: + case Builtin::BI__builtin_popcountll: + case Builtin::BI__builtin_popcountg: + return buildBuiltinBitOp(*this, E, std::nullopt); + + case Builtin::BI__builtin_unpredictable: { + if (CGM.getCodeGenOpts().OptimizationLevel != 0) + assert(!MissingFeatures::insertBuiltinUnpredictable()); + return RValue::get(buildScalarExpr(E->getArg(0))); } case Builtin::BI__builtin_expect: @@ -831,11 +975,6 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(result); } - case Builtin::BI__builtin_unpredictable: { - if (CGM.getCodeGenOpts().OptimizationLevel != 0) - assert(!MissingFeatures::insertBuiltinUnpredictable()); - return RValue::get(buildScalarExpr(E->getArg(0))); - } case Builtin::BI__builtin_assume_aligned: { const Expr *ptr = E->getArg(0); @@ -875,174 +1014,11 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(nullptr); } - case Builtin::BI__builtin_prefetch: { - auto evaluateOperandAsInt = [&](const Expr *Arg) { - Expr::EvalResult Res; - [[maybe_unused]] bool EvalSucceed = - Arg->EvaluateAsInt(Res, CGM.getASTContext()); - assert(EvalSucceed && "expression should be able to evaluate as int"); - return Res.Val.getInt().getZExtValue(); - }; - - bool IsWrite = false; - if (E->getNumArgs() > 1) - IsWrite = evaluateOperandAsInt(E->getArg(1)); - - int Locality = 0; - if (E->getNumArgs() > 2) - Locality = evaluateOperandAsInt(E->getArg(2)); - - mlir::Value Address = buildScalarExpr(E->getArg(0)); - builder.create(getLoc(E->getSourceRange()), Address, - Locality, IsWrite); - return RValue::get(nullptr); - } - - case Builtin::BI__builtin___clear_cache: { - mlir::Type voidTy = mlir::cir::VoidType::get(builder.getContext()); - mlir::Value begin = - builder.createPtrBitcast(buildScalarExpr(E->getArg(0)), voidTy); - mlir::Value end = - builder.createPtrBitcast(buildScalarExpr(E->getArg(1)), voidTy); - builder.create(getLoc(E->getSourceRange()), begin, - end); - return RValue::get(nullptr); - } - - // C++ std:: builtins. - case Builtin::BImove: - case Builtin::BImove_if_noexcept: - case Builtin::BIforward: - case Builtin::BIas_const: - return RValue::get(buildLValue(E->getArg(0)).getPointer()); - case Builtin::BI__GetExceptionInfo: { - llvm_unreachable("NYI"); - } - - case Builtin::BI__fastfail: - llvm_unreachable("NYI"); - - case Builtin::BI__builtin_coro_id: - case Builtin::BI__builtin_coro_promise: - case Builtin::BI__builtin_coro_resume: - case Builtin::BI__builtin_coro_noop: - case Builtin::BI__builtin_coro_destroy: - case Builtin::BI__builtin_coro_done: - case Builtin::BI__builtin_coro_alloc: - case Builtin::BI__builtin_coro_begin: - case Builtin::BI__builtin_coro_end: - case Builtin::BI__builtin_coro_suspend: - case Builtin::BI__builtin_coro_align: - llvm_unreachable("NYI"); - - case Builtin::BI__builtin_coro_frame: { - return buildCoroutineFrame(); - } - case Builtin::BI__builtin_coro_free: - case Builtin::BI__builtin_coro_size: { - GlobalDecl gd{FD}; - mlir::Type ty = CGM.getTypes().GetFunctionType( - CGM.getTypes().arrangeGlobalDeclaration(GD)); - const auto *ND = cast(GD.getDecl()); - auto fnOp = - CGM.GetOrCreateCIRFunction(ND->getName(), ty, gd, /*ForVTable=*/false, - /*DontDefer=*/false); - fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); - return buildCall(E->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), - E, ReturnValue); - } - case Builtin::BI__builtin_dynamic_object_size: { - // Fallthrough below, assert until we have a testcase. - llvm_unreachable("NYI"); - } - case Builtin::BI__builtin_object_size: { - unsigned Type = - E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); - auto ResType = - mlir::dyn_cast(ConvertType(E->getType())); - assert(ResType && "not sure what to do?"); - - // We pass this builtin onto the optimizer so that it can figure out the - // object size in more complex cases. - bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size; - return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, - /*EmittedE=*/nullptr, IsDynamic)); - } - case Builtin::BI__builtin_unreachable: { - buildUnreachable(E->getExprLoc()); - - // We do need to preserve an insertion point. - builder.createBlock(builder.getBlock()->getParent()); - - return RValue::get(nullptr); - } - case Builtin::BI__builtin_trap: { - builder.create(getLoc(E->getExprLoc())); - - // Note that cir.trap is a terminator so we need to start a new block to - // preserve the insertion point. - builder.createBlock(builder.getBlock()->getParent()); - - return RValue::get(nullptr); - } - case Builtin::BImemcpy: - case Builtin::BI__builtin_memcpy: - case Builtin::BImempcpy: - case Builtin::BI__builtin_mempcpy: { - Address Dest = buildPointerWithAlignment(E->getArg(0)); - Address Src = buildPointerWithAlignment(E->getArg(1)); - mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); - buildNonNullArgCheck(RValue::get(Dest.getPointer()), - E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), - FD, 0); - buildNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), - E->getArg(1)->getExprLoc(), FD, 1); - builder.createMemCpy(getLoc(E->getSourceRange()), Dest.getPointer(), - Src.getPointer(), SizeVal); - if (BuiltinID == Builtin::BImempcpy || - BuiltinID == Builtin::BI__builtin_mempcpy) - llvm_unreachable("mempcpy is NYI"); - else - return RValue::get(Dest.getPointer()); - } - - case Builtin::BI__builtin_clrsb: - case Builtin::BI__builtin_clrsbl: - case Builtin::BI__builtin_clrsbll: - return buildBuiltinBitOp(*this, E, std::nullopt); - - case Builtin::BI__builtin_ctzs: - case Builtin::BI__builtin_ctz: - case Builtin::BI__builtin_ctzl: - case Builtin::BI__builtin_ctzll: - case Builtin::BI__builtin_ctzg: - return buildBuiltinBitOp(*this, E, BCK_CTZPassedZero); - - case Builtin::BI__builtin_clzs: - case Builtin::BI__builtin_clz: - case Builtin::BI__builtin_clzl: - case Builtin::BI__builtin_clzll: - case Builtin::BI__builtin_clzg: - return buildBuiltinBitOp(*this, E, BCK_CLZPassedZero); - - case Builtin::BI__builtin_ffs: - case Builtin::BI__builtin_ffsl: - case Builtin::BI__builtin_ffsll: - return buildBuiltinBitOp(*this, E, std::nullopt); - - case Builtin::BI__builtin_parity: - case Builtin::BI__builtin_parityl: - case Builtin::BI__builtin_parityll: - return buildBuiltinBitOp(*this, E, std::nullopt); + case Builtin::BI__builtin_allow_runtime_check: + llvm_unreachable("BI__builtin_allow_runtime_check NYI"); - case Builtin::BI__popcnt16: - case Builtin::BI__popcnt: - case Builtin::BI__popcnt64: - case Builtin::BI__builtin_popcount: - case Builtin::BI__builtin_popcountl: - case Builtin::BI__builtin_popcountll: - case Builtin::BI__builtin_popcountg: - return buildBuiltinBitOp(*this, E, std::nullopt); + case Builtin::BI__arithmetic_fence: + llvm_unreachable("BI__arithmetic_fence NYI"); case Builtin::BI__builtin_bswap16: case Builtin::BI__builtin_bswap32: @@ -1055,6 +1031,12 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, getLoc(E->getSourceRange()), arg)); } + case Builtin::BI__builtin_bitreverse8: + case Builtin::BI__builtin_bitreverse16: + case Builtin::BI__builtin_bitreverse32: + case Builtin::BI__builtin_bitreverse64: + llvm_unreachable("BI__builtin_bitreverse8 like NYI"); + case Builtin::BI__builtin_rotateleft8: case Builtin::BI__builtin_rotateleft16: case Builtin::BI__builtin_rotateleft32: @@ -1113,6 +1095,251 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(Result); } + case Builtin::BI__builtin_dynamic_object_size: { + // Fallthrough below, assert until we have a testcase. + llvm_unreachable("BI__builtin_dynamic_object_size NYI"); + } + case Builtin::BI__builtin_object_size: { + unsigned Type = + E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); + auto ResType = + mlir::dyn_cast(ConvertType(E->getType())); + assert(ResType && "not sure what to do?"); + + // We pass this builtin onto the optimizer so that it can figure out the + // object size in more complex cases. + bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size; + return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, + /*EmittedE=*/nullptr, IsDynamic)); + } + + case Builtin::BI__builtin_prefetch: { + auto evaluateOperandAsInt = [&](const Expr *Arg) { + Expr::EvalResult Res; + [[maybe_unused]] bool EvalSucceed = + Arg->EvaluateAsInt(Res, CGM.getASTContext()); + assert(EvalSucceed && "expression should be able to evaluate as int"); + return Res.Val.getInt().getZExtValue(); + }; + + bool IsWrite = false; + if (E->getNumArgs() > 1) + IsWrite = evaluateOperandAsInt(E->getArg(1)); + + int Locality = 0; + if (E->getNumArgs() > 2) + Locality = evaluateOperandAsInt(E->getArg(2)); + + mlir::Value Address = buildScalarExpr(E->getArg(0)); + builder.create(getLoc(E->getSourceRange()), Address, + Locality, IsWrite); + return RValue::get(nullptr); + } + case Builtin::BI__builtin_readcyclecounter: + llvm_unreachable("BI__builtin_readcyclecounter NYI"); + case Builtin::BI__builtin_readsteadycounter: + llvm_unreachable("BI__builtin_readsteadycounter NYI"); + + case Builtin::BI__builtin___clear_cache: { + mlir::Type voidTy = mlir::cir::VoidType::get(builder.getContext()); + mlir::Value begin = + builder.createPtrBitcast(buildScalarExpr(E->getArg(0)), voidTy); + mlir::Value end = + builder.createPtrBitcast(buildScalarExpr(E->getArg(1)), voidTy); + builder.create(getLoc(E->getSourceRange()), begin, + end); + return RValue::get(nullptr); + } + case Builtin::BI__builtin_trap: { + builder.create(getLoc(E->getExprLoc())); + + // Note that cir.trap is a terminator so we need to start a new block to + // preserve the insertion point. + builder.createBlock(builder.getBlock()->getParent()); + + return RValue::get(nullptr); + } + case Builtin::BI__builtin_verbose_trap: + llvm_unreachable("BI__builtin_verbose_trap NYI"); + case Builtin::BI__debugbreak: + llvm_unreachable("BI__debugbreak NYI"); + case Builtin::BI__builtin_unreachable: { + buildUnreachable(E->getExprLoc()); + + // We do need to preserve an insertion point. + builder.createBlock(builder.getBlock()->getParent()); + + return RValue::get(nullptr); + } + + case Builtin::BI__builtin_powi: + case Builtin::BI__builtin_powif: + case Builtin::BI__builtin_powil: + llvm_unreachable("BI__builtin_powi like NYI"); + + case Builtin::BI__builtin_frexp: + case Builtin::BI__builtin_frexpf: + case Builtin::BI__builtin_frexpf128: + case Builtin::BI__builtin_frexpf16: + llvm_unreachable("BI__builtin_frexp like NYI"); + + case Builtin::BI__builtin_isgreater: + case Builtin::BI__builtin_isgreaterequal: + case Builtin::BI__builtin_isless: + case Builtin::BI__builtin_islessequal: + case Builtin::BI__builtin_islessgreater: + case Builtin::BI__builtin_isunordered: + llvm_unreachable("BI__builtin_isgreater and BI__builtin_isless like NYI"); + + case Builtin::BI__builtin_isnan: + llvm_unreachable("BI__builtin_isnan NYI"); + + case Builtin::BI__builtin_issignaling: + llvm_unreachable("BI__builtin_issignaling NYI"); + + case Builtin::BI__builtin_isinf: + llvm_unreachable("BI__builtin_isinf NYI"); + + case Builtin::BIfinite: + case Builtin::BI__finite: + case Builtin::BIfinitef: + case Builtin::BI__finitef: + case Builtin::BIfinitel: + case Builtin::BI__finitel: + case Builtin::BI__builtin_isfinite: + llvm_unreachable("Builtin::BIfinite like NYI"); + + case Builtin::BI__builtin_isnormal: + llvm_unreachable("BI__builtin_isnormal NYI"); + + case Builtin::BI__builtin_issubnormal: + llvm_unreachable("BI__builtin_issubnormal NYI"); + + case Builtin::BI__builtin_iszero: + llvm_unreachable("BI__builtin_iszero NYI"); + + case Builtin::BI__builtin_isfpclass: + llvm_unreachable("BI__builtin_isfpclass NYI"); + + case Builtin::BI__builtin_nondeterministic_value: + llvm_unreachable("BI__builtin_nondeterministic_value NYI"); + + case Builtin::BI__builtin_elementwise_abs: + llvm_unreachable("BI__builtin_elementwise_abs NYI"); + + case Builtin::BI__builtin_elementwise_acos: + llvm_unreachable("BI__builtin_elementwise_acos NYI"); + case Builtin::BI__builtin_elementwise_asin: + llvm_unreachable("BI__builtin_elementwise_asin NYI"); + case Builtin::BI__builtin_elementwise_atan: + llvm_unreachable("BI__builtin_elementwise_atan NYI"); + case Builtin::BI__builtin_elementwise_atan2: + llvm_unreachable("BI__builtin_elementwise_atan2 NYI"); + case Builtin::BI__builtin_elementwise_ceil: + llvm_unreachable("BI__builtin_elementwise_ceil NYI"); + case Builtin::BI__builtin_elementwise_exp: + llvm_unreachable("BI__builtin_elementwise_exp NYI"); + case Builtin::BI__builtin_elementwise_exp2: + llvm_unreachable("BI__builtin_elementwise_exp2 NYI"); + case Builtin::BI__builtin_elementwise_log: + llvm_unreachable("BI__builtin_elementwise_log NYI"); + case Builtin::BI__builtin_elementwise_log2: + llvm_unreachable("BI__builtin_elementwise_log2 NYI"); + case Builtin::BI__builtin_elementwise_log10: + llvm_unreachable("BI__builtin_elementwise_log10 NYI"); + case Builtin::BI__builtin_elementwise_pow: + llvm_unreachable("BI__builtin_elementwise_pow NYI"); + case Builtin::BI__builtin_elementwise_bitreverse: + llvm_unreachable("BI__builtin_elementwise_bitreverse NYI"); + case Builtin::BI__builtin_elementwise_cos: + llvm_unreachable("BI__builtin_elementwise_cos NYI"); + case Builtin::BI__builtin_elementwise_cosh: + llvm_unreachable("BI__builtin_elementwise_cosh NYI"); + case Builtin::BI__builtin_elementwise_floor: + llvm_unreachable("BI__builtin_elementwise_floor NYI"); + case Builtin::BI__builtin_elementwise_popcount: + llvm_unreachable("BI__builtin_elementwise_popcount NYI"); + case Builtin::BI__builtin_elementwise_roundeven: + llvm_unreachable("BI__builtin_elementwise_roundeven NYI"); + case Builtin::BI__builtin_elementwise_round: + llvm_unreachable("BI__builtin_elementwise_round NYI"); + case Builtin::BI__builtin_elementwise_rint: + llvm_unreachable("BI__builtin_elementwise_rint NYI"); + case Builtin::BI__builtin_elementwise_nearbyint: + llvm_unreachable("BI__builtin_elementwise_nearbyint NYI"); + case Builtin::BI__builtin_elementwise_sin: + llvm_unreachable("BI__builtin_elementwise_sin NYI"); + case Builtin::BI__builtin_elementwise_sinh: + llvm_unreachable("BI__builtin_elementwise_sinh NYI"); + case Builtin::BI__builtin_elementwise_tan: + llvm_unreachable("BI__builtin_elementwise_tan NYI"); + case Builtin::BI__builtin_elementwise_tanh: + llvm_unreachable("BI__builtin_elementwise_tanh NYI"); + case Builtin::BI__builtin_elementwise_trunc: + llvm_unreachable("BI__builtin_elementwise_trunc NYI"); + case Builtin::BI__builtin_elementwise_canonicalize: + llvm_unreachable("BI__builtin_elementwise_canonicalize NYI"); + case Builtin::BI__builtin_elementwise_copysign: + llvm_unreachable("BI__builtin_elementwise_copysign NYI"); + case Builtin::BI__builtin_elementwise_fma: + llvm_unreachable("BI__builtin_elementwise_fma NYI"); + case Builtin::BI__builtin_elementwise_add_sat: + case Builtin::BI__builtin_elementwise_sub_sat: + llvm_unreachable("BI__builtin_elementwise_add/sub_sat NYI"); + + case Builtin::BI__builtin_elementwise_max: + llvm_unreachable("BI__builtin_elementwise_max NYI"); + case Builtin::BI__builtin_elementwise_min: + llvm_unreachable("BI__builtin_elementwise_min NYI"); + + case Builtin::BI__builtin_elementwise_maximum: + llvm_unreachable("BI__builtin_elementwise_maximum NYI"); + + case Builtin::BI__builtin_elementwise_minimum: + llvm_unreachable("BI__builtin_elementwise_minimum NYI"); + + case Builtin::BI__builtin_reduce_max: + llvm_unreachable("BI__builtin_reduce_max NYI"); + + case Builtin::BI__builtin_reduce_min: + llvm_unreachable("BI__builtin_reduce_min NYI"); + + case Builtin::BI__builtin_reduce_add: + llvm_unreachable("BI__builtin_reduce_add NYI"); + case Builtin::BI__builtin_reduce_mul: + llvm_unreachable("BI__builtin_reduce_mul NYI"); + case Builtin::BI__builtin_reduce_xor: + llvm_unreachable("BI__builtin_reduce_xor NYI"); + case Builtin::BI__builtin_reduce_or: + llvm_unreachable("BI__builtin_reduce_or NYI"); + case Builtin::BI__builtin_reduce_and: + llvm_unreachable("BI__builtin_reduce_and NYI"); + case Builtin::BI__builtin_reduce_maximum: + llvm_unreachable("BI__builtin_reduce_maximum NYI"); + case Builtin::BI__builtin_reduce_minimum: + llvm_unreachable("BI__builtin_reduce_minimum NYI"); + + case Builtin::BI__builtin_matrix_transpose: + llvm_unreachable("BI__builtin_matrix_transpose NYI"); + + case Builtin::BI__builtin_matrix_column_major_load: + llvm_unreachable("BI__builtin_matrix_column_major_load NYI"); + + case Builtin::BI__builtin_matrix_column_major_store: + llvm_unreachable("BI__builtin_matrix_column_major_store NYI"); + + case Builtin::BI__builtin_isinf_sign: + llvm_unreachable("BI__builtin_isinf_sign NYI"); + + case Builtin::BI__builtin_flt_rounds: + llvm_unreachable("BI__builtin_flt_rounds NYI"); + + case Builtin::BI__builtin_set_flt_rounds: + llvm_unreachable("BI__builtin_set_flt_rounds NYI"); + + case Builtin::BI__builtin_fpclassify: + llvm_unreachable("BI__builtin_fpclassify NYI"); + case Builtin::BIalloca: case Builtin::BI_alloca: case Builtin::BI__builtin_alloca_uninitialized: @@ -1157,8 +1384,118 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, builder.createBitcast(AllocaAddr, builder.getVoidPtrTy())); } + case Builtin::BI__builtin_alloca_with_align_uninitialized: + case Builtin::BI__builtin_alloca_with_align: + llvm_unreachable("BI__builtin_alloca_with_align like NYI"); + + case Builtin::BIbzero: + case Builtin::BI__builtin_bzero: + llvm_unreachable("BIbzero like NYI"); + + case Builtin::BIbcopy: + case Builtin::BI__builtin_bcopy: + llvm_unreachable("BIbcopy like NYI"); + + case Builtin::BImemcpy: + case Builtin::BI__builtin_memcpy: + case Builtin::BImempcpy: + case Builtin::BI__builtin_mempcpy: { + Address Dest = buildPointerWithAlignment(E->getArg(0)); + Address Src = buildPointerWithAlignment(E->getArg(1)); + mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); + buildNonNullArgCheck(RValue::get(Dest.getPointer()), + E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), + FD, 0); + buildNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), + E->getArg(1)->getExprLoc(), FD, 1); + builder.createMemCpy(getLoc(E->getSourceRange()), Dest.getPointer(), + Src.getPointer(), SizeVal); + if (BuiltinID == Builtin::BImempcpy || + BuiltinID == Builtin::BI__builtin_mempcpy) + llvm_unreachable("mempcpy is NYI"); + else + return RValue::get(Dest.getPointer()); + } + + case Builtin::BI__builtin_memcpy_inline: + llvm_unreachable("BI__builtin_memcpy_inline NYI"); + + case Builtin::BI__builtin_char_memchr: + llvm_unreachable("BI__builtin_char_memchr NYI"); + + case Builtin::BI__builtin___memcpy_chk: + llvm_unreachable("BI__builtin___memcpy_chk NYI"); + + case Builtin::BI__builtin_objc_memmove_collectable: + llvm_unreachable("BI__builtin_objc_memmove_collectable NYI"); + + case Builtin::BI__builtin___memmove_chk: + llvm_unreachable("BI__builtin___memmove_chk NYI"); + + case Builtin::BImemmove: + case Builtin::BI__builtin_memmove: + llvm_unreachable("BImemmove like NYI"); + + case Builtin::BImemset: + case Builtin::BI__builtin_memset: + llvm_unreachable("BImemset like NYI"); + + case Builtin::BI__builtin_memset_inline: + llvm_unreachable("BI__builtin_memset_inline NYI"); + case Builtin::BI__builtin___memset_chk: + llvm_unreachable("BI__builtin___memset_chk NYI"); + case Builtin::BI__builtin_wmemchr: + llvm_unreachable("BI__builtin_wmemchr NYI"); + case Builtin::BI__builtin_wmemcmp: + llvm_unreachable("BI__builtin_wmemcmp NYI"); + case Builtin::BI__builtin_dwarf_cfa: + llvm_unreachable("BI__builtin_dwarf_cfa NYI"); + case Builtin::BI__builtin_return_address: + llvm_unreachable("BI__builtin_return_address NYI"); + case Builtin::BI_ReturnAddress: + llvm_unreachable("BI_ReturnAddress NYI"); + case Builtin::BI__builtin_frame_address: + llvm_unreachable("BI__builtin_frame_address NYI"); + case Builtin::BI__builtin_extract_return_addr: + llvm_unreachable("BI__builtin_extract_return_addr NYI"); + case Builtin::BI__builtin_frob_return_addr: + llvm_unreachable("BI__builtin_frob_return_addr NYI"); + case Builtin::BI__builtin_dwarf_sp_column: + llvm_unreachable("BI__builtin_dwarf_sp_column NYI"); + case Builtin::BI__builtin_init_dwarf_reg_size_table: + llvm_unreachable("BI__builtin_init_dwarf_reg_size_table NYI"); + case Builtin::BI__builtin_eh_return: + llvm_unreachable("BI__builtin_eh_return NYI"); + case Builtin::BI__builtin_unwind_init: + llvm_unreachable("BI__builtin_unwind_init NYI"); + case Builtin::BI__builtin_extend_pointer: + llvm_unreachable("BI__builtin_extend_pointer NYI"); + case Builtin::BI__builtin_setjmp: + llvm_unreachable("BI__builtin_setjmp NYI"); + case Builtin::BI__builtin_longjmp: + llvm_unreachable("BI__builtin_longjmp NYI"); + case Builtin::BI__builtin_launder: + llvm_unreachable("BI__builtin_launder NYI"); + case Builtin::BI__sync_fetch_and_add: + case Builtin::BI__sync_fetch_and_sub: + case Builtin::BI__sync_fetch_and_or: + case Builtin::BI__sync_fetch_and_and: + case Builtin::BI__sync_fetch_and_xor: + case Builtin::BI__sync_fetch_and_nand: + case Builtin::BI__sync_add_and_fetch: + case Builtin::BI__sync_sub_and_fetch: + case Builtin::BI__sync_and_and_fetch: + case Builtin::BI__sync_or_and_fetch: + case Builtin::BI__sync_xor_and_fetch: + case Builtin::BI__sync_nand_and_fetch: + case Builtin::BI__sync_val_compare_and_swap: + case Builtin::BI__sync_bool_compare_and_swap: + case Builtin::BI__sync_lock_test_and_set: + case Builtin::BI__sync_lock_release: + case Builtin::BI__sync_swap: llvm_unreachable("Shouldn't make it through sema"); + case Builtin::BI__sync_fetch_and_add_1: case Builtin::BI__sync_fetch_and_add_2: case Builtin::BI__sync_fetch_and_add_4: @@ -1167,8 +1504,6 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return buildBinaryAtomic(*this, mlir::cir::AtomicFetchKind::Add, E); } - case Builtin::BI__sync_fetch_and_sub: - llvm_unreachable("Shouldn't make it through sema"); case Builtin::BI__sync_fetch_and_sub_1: case Builtin::BI__sync_fetch_and_sub_2: case Builtin::BI__sync_fetch_and_sub_4: @@ -1177,6 +1512,83 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return buildBinaryAtomic(*this, mlir::cir::AtomicFetchKind::Sub, E); } + case Builtin::BI__sync_fetch_and_or_1: + case Builtin::BI__sync_fetch_and_or_2: + case Builtin::BI__sync_fetch_and_or_4: + case Builtin::BI__sync_fetch_and_or_8: + case Builtin::BI__sync_fetch_and_or_16: + llvm_unreachable("BI__sync_fetch_and_or NYI"); + case Builtin::BI__sync_fetch_and_and_1: + case Builtin::BI__sync_fetch_and_and_2: + case Builtin::BI__sync_fetch_and_and_4: + case Builtin::BI__sync_fetch_and_and_8: + case Builtin::BI__sync_fetch_and_and_16: + llvm_unreachable("BI__sync_fetch_and_and NYI"); + case Builtin::BI__sync_fetch_and_xor_1: + case Builtin::BI__sync_fetch_and_xor_2: + case Builtin::BI__sync_fetch_and_xor_4: + case Builtin::BI__sync_fetch_and_xor_8: + case Builtin::BI__sync_fetch_and_xor_16: + llvm_unreachable("BI__sync_fetch_and_xor NYI"); + case Builtin::BI__sync_fetch_and_nand_1: + case Builtin::BI__sync_fetch_and_nand_2: + case Builtin::BI__sync_fetch_and_nand_4: + case Builtin::BI__sync_fetch_and_nand_8: + case Builtin::BI__sync_fetch_and_nand_16: + llvm_unreachable("BI__sync_fetch_and_nand NYI"); + + // Clang extensions: not overloaded yet. + case Builtin::BI__sync_fetch_and_min: + llvm_unreachable("BI__sync_fetch_and_min NYI"); + case Builtin::BI__sync_fetch_and_max: + llvm_unreachable("BI__sync_fetch_and_max NYI"); + case Builtin::BI__sync_fetch_and_umin: + llvm_unreachable("BI__sync_fetch_and_umin NYI"); + case Builtin::BI__sync_fetch_and_umax: + llvm_unreachable("BI__sync_fetch_and_umax NYI"); + + case Builtin::BI__sync_add_and_fetch_1: + case Builtin::BI__sync_add_and_fetch_2: + case Builtin::BI__sync_add_and_fetch_4: + case Builtin::BI__sync_add_and_fetch_8: + case Builtin::BI__sync_add_and_fetch_16: + llvm_unreachable("BI__sync_add_and_fetch like NYI"); + + case Builtin::BI__sync_sub_and_fetch_1: + case Builtin::BI__sync_sub_and_fetch_2: + case Builtin::BI__sync_sub_and_fetch_4: + case Builtin::BI__sync_sub_and_fetch_8: + case Builtin::BI__sync_sub_and_fetch_16: + llvm_unreachable("BI__sync_sub_and_fetch like NYI"); + + case Builtin::BI__sync_and_and_fetch_1: + case Builtin::BI__sync_and_and_fetch_2: + case Builtin::BI__sync_and_and_fetch_4: + case Builtin::BI__sync_and_and_fetch_8: + case Builtin::BI__sync_and_and_fetch_16: + llvm_unreachable("BI__sync_and_and_fetch like NYI"); + + case Builtin::BI__sync_or_and_fetch_1: + case Builtin::BI__sync_or_and_fetch_2: + case Builtin::BI__sync_or_and_fetch_4: + case Builtin::BI__sync_or_and_fetch_8: + case Builtin::BI__sync_or_and_fetch_16: + llvm_unreachable("BI__sync_or_and_fetch like NYI"); + + case Builtin::BI__sync_xor_and_fetch_1: + case Builtin::BI__sync_xor_and_fetch_2: + case Builtin::BI__sync_xor_and_fetch_4: + case Builtin::BI__sync_xor_and_fetch_8: + case Builtin::BI__sync_xor_and_fetch_16: + llvm_unreachable("BI__sync_xor_and_fetch like NYI"); + + case Builtin::BI__sync_nand_and_fetch_1: + case Builtin::BI__sync_nand_and_fetch_2: + case Builtin::BI__sync_nand_and_fetch_4: + case Builtin::BI__sync_nand_and_fetch_8: + case Builtin::BI__sync_nand_and_fetch_16: + llvm_unreachable("BI__sync_nand_and_fetch like NYI"); + case Builtin::BI__sync_val_compare_and_swap_1: case Builtin::BI__sync_val_compare_and_swap_2: case Builtin::BI__sync_val_compare_and_swap_4: @@ -1191,6 +1603,71 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_bool_compare_and_swap_16: return RValue::get(MakeAtomicCmpXchgValue(*this, E, true)); + case Builtin::BI__sync_swap_1: + case Builtin::BI__sync_swap_2: + case Builtin::BI__sync_swap_4: + case Builtin::BI__sync_swap_8: + case Builtin::BI__sync_swap_16: + llvm_unreachable("BI__sync_swap1 like NYI"); + + case Builtin::BI__sync_lock_test_and_set_1: + case Builtin::BI__sync_lock_test_and_set_2: + case Builtin::BI__sync_lock_test_and_set_4: + case Builtin::BI__sync_lock_test_and_set_8: + case Builtin::BI__sync_lock_test_and_set_16: + llvm_unreachable("BI__sync_lock_test_and_set_1 like NYI"); + + case Builtin::BI__sync_lock_release_1: + case Builtin::BI__sync_lock_release_2: + case Builtin::BI__sync_lock_release_4: + case Builtin::BI__sync_lock_release_8: + case Builtin::BI__sync_lock_release_16: + llvm_unreachable("BI__sync_lock_release_1 like NYI"); + + case Builtin::BI__sync_synchronize: + llvm_unreachable("BI__sync_synchronize NYI"); + case Builtin::BI__builtin_nontemporal_load: + llvm_unreachable("BI__builtin_nontemporal_load NYI"); + case Builtin::BI__builtin_nontemporal_store: + llvm_unreachable("BI__builtin_nontemporal_store NYI"); + case Builtin::BI__c11_atomic_is_lock_free: + llvm_unreachable("BI__c11_atomic_is_lock_free NYI"); + case Builtin::BI__atomic_is_lock_free: + llvm_unreachable("BI__atomic_is_lock_free NYI"); + case Builtin::BI__atomic_test_and_set: + llvm_unreachable("BI__atomic_test_and_set NYI"); + case Builtin::BI__atomic_clear: + llvm_unreachable("BI__atomic_clear NYI"); + + case Builtin::BI__atomic_thread_fence: + case Builtin::BI__atomic_signal_fence: + case Builtin::BI__c11_atomic_thread_fence: + case Builtin::BI__c11_atomic_signal_fence: + llvm_unreachable("BI__atomic_thread_fence like NYI"); + + case Builtin::BI__builtin_signbit: + case Builtin::BI__builtin_signbitf: + case Builtin::BI__builtin_signbitl: + llvm_unreachable("BI__builtin_signbit like NYI"); + + case Builtin::BI__warn_memset_zero_len: + llvm_unreachable("BI__warn_memset_zero_len NYI"); + case Builtin::BI__annotation: + llvm_unreachable("BI__annotation NYI"); + case Builtin::BI__builtin_annotation: + llvm_unreachable("BI__builtin_annotation NYI"); + case Builtin::BI__builtin_addcb: + case Builtin::BI__builtin_addcs: + case Builtin::BI__builtin_addc: + case Builtin::BI__builtin_addcl: + case Builtin::BI__builtin_addcll: + case Builtin::BI__builtin_subcb: + case Builtin::BI__builtin_subcs: + case Builtin::BI__builtin_subc: + case Builtin::BI__builtin_subcl: + case Builtin::BI__builtin_subcll: + llvm_unreachable("BI__builtin_addcb like NYI"); + case Builtin::BI__builtin_add_overflow: case Builtin::BI__builtin_sub_overflow: case Builtin::BI__builtin_mul_overflow: { @@ -1341,10 +1818,268 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(ArithResult.overflow); } + case Builtin::BIaddressof: case Builtin::BI__addressof: case Builtin::BI__builtin_addressof: return RValue::get(buildLValue(E->getArg(0)).getPointer()); + case Builtin::BI__builtin_function_start: + llvm_unreachable("BI__builtin_function_start NYI"); + case Builtin::BI__builtin_operator_new: + llvm_unreachable("BI__builtin_operator_new NYI"); + case Builtin::BI__builtin_operator_delete: + llvm_unreachable("BI__builtin_operator_delete NYI"); + case Builtin::BI__builtin_is_aligned: + llvm_unreachable("BI__builtin_is_aligned NYI"); + case Builtin::BI__builtin_align_up: + llvm_unreachable("BI__builtin_align_up NYI"); + case Builtin::BI__builtin_align_down: + llvm_unreachable("BI__builtin_align_down NYI"); + + case Builtin::BI__noop: + // __noop always evaluates to an integer literal zero. + llvm_unreachable("BI__noop NYI"); + case Builtin::BI__builtin_call_with_static_chain: + llvm_unreachable("BI__builtin_call_with_static_chain NYI"); + case Builtin::BI_InterlockedExchange8: + case Builtin::BI_InterlockedExchange16: + case Builtin::BI_InterlockedExchange: + case Builtin::BI_InterlockedExchangePointer: + llvm_unreachable("BI_InterlockedExchange8 like NYI"); + case Builtin::BI_InterlockedCompareExchangePointer: + case Builtin::BI_InterlockedCompareExchangePointer_nf: + llvm_unreachable("BI_InterlockedCompareExchangePointer like NYI"); + case Builtin::BI_InterlockedCompareExchange8: + case Builtin::BI_InterlockedCompareExchange16: + case Builtin::BI_InterlockedCompareExchange: + case Builtin::BI_InterlockedCompareExchange64: + llvm_unreachable("BI_InterlockedCompareExchange8 like NYI"); + case Builtin::BI_InterlockedIncrement16: + case Builtin::BI_InterlockedIncrement: + llvm_unreachable("BI_InterlockedIncrement16 like NYI"); + case Builtin::BI_InterlockedDecrement16: + case Builtin::BI_InterlockedDecrement: + llvm_unreachable("BI_InterlockedDecrement16 like NYI"); + case Builtin::BI_InterlockedAnd8: + case Builtin::BI_InterlockedAnd16: + case Builtin::BI_InterlockedAnd: + llvm_unreachable("BI_InterlockedAnd8 like NYI"); + case Builtin::BI_InterlockedExchangeAdd8: + case Builtin::BI_InterlockedExchangeAdd16: + case Builtin::BI_InterlockedExchangeAdd: + llvm_unreachable("BI_InterlockedExchangeAdd8 like NYI"); + case Builtin::BI_InterlockedExchangeSub8: + case Builtin::BI_InterlockedExchangeSub16: + case Builtin::BI_InterlockedExchangeSub: + llvm_unreachable("BI_InterlockedExchangeSub8 like NYI"); + case Builtin::BI_InterlockedOr8: + case Builtin::BI_InterlockedOr16: + case Builtin::BI_InterlockedOr: + llvm_unreachable("BI_InterlockedOr8 like NYI"); + case Builtin::BI_InterlockedXor8: + case Builtin::BI_InterlockedXor16: + case Builtin::BI_InterlockedXor: + llvm_unreachable("BI_InterlockedXor8 like NYI"); + + case Builtin::BI_bittest64: + case Builtin::BI_bittest: + case Builtin::BI_bittestandcomplement64: + case Builtin::BI_bittestandcomplement: + case Builtin::BI_bittestandreset64: + case Builtin::BI_bittestandreset: + case Builtin::BI_bittestandset64: + case Builtin::BI_bittestandset: + case Builtin::BI_interlockedbittestandreset: + case Builtin::BI_interlockedbittestandreset64: + case Builtin::BI_interlockedbittestandset64: + case Builtin::BI_interlockedbittestandset: + case Builtin::BI_interlockedbittestandset_acq: + case Builtin::BI_interlockedbittestandset_rel: + case Builtin::BI_interlockedbittestandset_nf: + case Builtin::BI_interlockedbittestandreset_acq: + case Builtin::BI_interlockedbittestandreset_rel: + case Builtin::BI_interlockedbittestandreset_nf: + llvm_unreachable("BI_bittest64 like NYI"); + + // These builtins exist to emit regular volatile loads and stores not + // affected by the -fms-volatile setting. + case Builtin::BI__iso_volatile_load8: + case Builtin::BI__iso_volatile_load16: + case Builtin::BI__iso_volatile_load32: + case Builtin::BI__iso_volatile_load64: + llvm_unreachable("BI__iso_volatile_load8 like NYI"); + case Builtin::BI__iso_volatile_store8: + case Builtin::BI__iso_volatile_store16: + case Builtin::BI__iso_volatile_store32: + case Builtin::BI__iso_volatile_store64: + llvm_unreachable("BI__iso_volatile_store8 like NYI"); + + case Builtin::BI__builtin_ptrauth_sign_constant: + llvm_unreachable("BI__builtin_ptrauth_sign_constant NYI"); + + case Builtin::BI__builtin_ptrauth_auth: + case Builtin::BI__builtin_ptrauth_auth_and_resign: + case Builtin::BI__builtin_ptrauth_blend_discriminator: + case Builtin::BI__builtin_ptrauth_sign_generic_data: + case Builtin::BI__builtin_ptrauth_sign_unauthenticated: + case Builtin::BI__builtin_ptrauth_strip: + llvm_unreachable("BI__builtin_ptrauth_auth like NYI"); + + case Builtin::BI__exception_code: + case Builtin::BI_exception_code: + llvm_unreachable("BI__exception_code like NYI"); + case Builtin::BI__exception_info: + case Builtin::BI_exception_info: + llvm_unreachable("BI__exception_info like NYI"); + case Builtin::BI__abnormal_termination: + case Builtin::BI_abnormal_termination: + llvm_unreachable("BI__abnormal_termination like NYI"); + case Builtin::BI_setjmpex: + llvm_unreachable("BI_setjmpex NYI"); + break; + case Builtin::BI_setjmp: + llvm_unreachable("BI_setjmp NYI"); + break; + + // C++ std:: builtins. + case Builtin::BImove: + case Builtin::BImove_if_noexcept: + case Builtin::BIforward: + case Builtin::BIas_const: + return RValue::get(buildLValue(E->getArg(0)).getPointer()); + case Builtin::BIforward_like: + llvm_unreachable("BIforward_like NYI"); + case Builtin::BI__GetExceptionInfo: + llvm_unreachable("BI__GetExceptionInfo NYI"); + + case Builtin::BI__fastfail: + llvm_unreachable("BI__fastfail NYI"); + + case Builtin::BI__builtin_coro_id: + case Builtin::BI__builtin_coro_promise: + case Builtin::BI__builtin_coro_resume: + case Builtin::BI__builtin_coro_noop: + case Builtin::BI__builtin_coro_destroy: + case Builtin::BI__builtin_coro_done: + case Builtin::BI__builtin_coro_alloc: + case Builtin::BI__builtin_coro_begin: + case Builtin::BI__builtin_coro_end: + case Builtin::BI__builtin_coro_suspend: + case Builtin::BI__builtin_coro_align: + llvm_unreachable("BI__builtin_coro_id like NYI"); + + case Builtin::BI__builtin_coro_frame: { + return buildCoroutineFrame(); + } + case Builtin::BI__builtin_coro_free: + case Builtin::BI__builtin_coro_size: { + GlobalDecl gd{FD}; + mlir::Type ty = CGM.getTypes().GetFunctionType( + CGM.getTypes().arrangeGlobalDeclaration(GD)); + const auto *ND = cast(GD.getDecl()); + auto fnOp = + CGM.GetOrCreateCIRFunction(ND->getName(), ty, gd, /*ForVTable=*/false, + /*DontDefer=*/false); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + return buildCall(E->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), + E, ReturnValue); + } + + case Builtin::BIread_pipe: + case Builtin::BIwrite_pipe: + llvm_unreachable("BIread_pipe and BIwrite_pipe NYI"); + + // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write + // functions + case Builtin::BIreserve_read_pipe: + case Builtin::BIreserve_write_pipe: + case Builtin::BIwork_group_reserve_read_pipe: + case Builtin::BIwork_group_reserve_write_pipe: + case Builtin::BIsub_group_reserve_read_pipe: + case Builtin::BIsub_group_reserve_write_pipe: + llvm_unreachable("BIreserve_read_pipe like NYI"); + + // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write + // functions + case Builtin::BIcommit_read_pipe: + case Builtin::BIcommit_write_pipe: + case Builtin::BIwork_group_commit_read_pipe: + case Builtin::BIwork_group_commit_write_pipe: + case Builtin::BIsub_group_commit_read_pipe: + case Builtin::BIsub_group_commit_write_pipe: + llvm_unreachable("BIcommit_read_pipe like NYI"); + // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions + case Builtin::BIget_pipe_num_packets: + case Builtin::BIget_pipe_max_packets: + llvm_unreachable("BIget_pipe_num_packets like NYI"); + + // OpenCL v2.0 s6.13.9 - Address space qualifier functions. + case Builtin::BIto_global: + case Builtin::BIto_local: + case Builtin::BIto_private: + llvm_unreachable("Builtin::BIto_global like NYI"); + + // OpenCL v2.0, s6.13.17 - Enqueue kernel function. + // Table 6.13.17.1 specifies four overload forms of enqueue_kernel. + // The code below expands the builtin call to a call to one of the following + // functions that an OpenCL runtime library will have to provide: + // __enqueue_kernel_basic + // __enqueue_kernel_varargs + // __enqueue_kernel_basic_events + // __enqueue_kernel_events_varargs + case Builtin::BIenqueue_kernel: + llvm_unreachable("BIenqueue_kernel NYI"); + // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block + // parameter. + case Builtin::BIget_kernel_work_group_size: + llvm_unreachable("BIget_kernel_work_group_size NYI"); + case Builtin::BIget_kernel_preferred_work_group_size_multiple: + llvm_unreachable("BIget_kernel_preferred_work_group_size_multiple NYI"); + + case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: + case Builtin::BIget_kernel_sub_group_count_for_ndrange: + llvm_unreachable("BIget_kernel_max_sub_group_size_for_ndrange like NYI"); + + case Builtin::BI__builtin_store_half: + case Builtin::BI__builtin_store_halff: + llvm_unreachable("BI__builtin_store_half like NYI"); + case Builtin::BI__builtin_load_half: + llvm_unreachable("BI__builtin_load_half NYI"); + case Builtin::BI__builtin_load_halff: + llvm_unreachable("BI__builtin_load_halff NYI"); + + case Builtin::BI__builtin_printf: + llvm_unreachable("BI__builtin_printf NYI"); + case Builtin::BIprintf: + if (getTarget().getTriple().isNVPTX() || + getTarget().getTriple().isAMDGCN()) { + llvm_unreachable("BIprintf NYI"); + } + break; + + case Builtin::BI__builtin_canonicalize: + case Builtin::BI__builtin_canonicalizef: + case Builtin::BI__builtin_canonicalizef16: + case Builtin::BI__builtin_canonicalizel: + llvm_unreachable("BI__builtin_canonicalize like NYI"); + + case Builtin::BI__builtin_thread_pointer: + llvm_unreachable("BI__builtin_thread_pointer NYI"); + case Builtin::BI__builtin_os_log_format: + llvm_unreachable("BI__builtin_os_log_format NYI"); + case Builtin::BI__xray_customevent: + llvm_unreachable("BI__xray_customevent NYI"); + case Builtin::BI__xray_typedevent: + llvm_unreachable("BI__xray_typedevent NYI"); + + case Builtin::BI__builtin_ms_va_start: + case Builtin::BI__builtin_ms_va_end: + llvm_unreachable("BI__builtin_ms_va_start like NYI"); + + case Builtin::BI__builtin_ms_va_copy: + llvm_unreachable("BI__builtin_ms_va_copy NYI"); + case Builtin::BI__builtin_get_device_side_mangled_name: + llvm_unreachable("BI__builtin_get_device_side_mangled_name NYI"); } // If this is an alias for a lib function (e.g. __builtin_sin), emit From 22ad88c6cf836df54bf46867779bb1cd6a2dbcc1 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 18 Oct 2024 14:45:10 -0400 Subject: [PATCH 1967/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vqaddq_v, neon_vqsubq and neon_vqsub (#988) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 30 +- clang/test/CIR/CodeGen/AArch64/neon-arith.c | 276 ++++++++++++++++++ 2 files changed, 294 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 6e8c2be4c94d..e8a802318c61 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2210,7 +2210,7 @@ static int64_t getIntValueFromConstOp(mlir::Value val) { /// expression type. /// 2. Function arg types are given, not deduced from actual arg types. static mlir::Value -buildCommonNeonCallPattern0(CIRGenFunction &cgf, std::string &intrincsName, +buildCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, llvm::SmallVector argTypes, llvm::SmallVectorImpl &ops, mlir::Type funcResTy, const clang::CallExpr *e) { @@ -2326,6 +2326,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( // This second switch is for the intrinsics that might have a more generic // codegen solution so we can use the common codegen in future. + llvm::StringRef intrincsName; switch (builtinID) { default: llvm::errs() << getAArch64SIMDIntrinsicString(builtinID) << " "; @@ -2333,22 +2334,27 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vpadd_v: case NEON::BI__builtin_neon_vpaddq_v: { - std::string intrincsName = mlir::isa(vTy.getEltType()) - ? "llvm.aarch64.neon.faddp" - : "llvm.aarch64.neon.addp"; - return buildCommonNeonCallPattern0(*this, intrincsName, {vTy, vTy}, ops, - vTy, e); + intrincsName = mlir::isa(vTy.getEltType()) + ? "llvm.aarch64.neon.faddp" + : "llvm.aarch64.neon.addp"; break; } - case NEON::BI__builtin_neon_vqadd_v: { - std::string intrincsName = (intrinicId != altLLVMIntrinsic) - ? "llvm.aarch64.neon.uqadd" - : "llvm.aarch64.neon.sqadd"; - return buildCommonNeonCallPattern0(*this, intrincsName, {vTy, vTy}, ops, - vTy, e); + case NEON::BI__builtin_neon_vqadd_v: + case NEON::BI__builtin_neon_vqaddq_v: { + intrincsName = (intrinicId != altLLVMIntrinsic) ? "llvm.aarch64.neon.uqadd" + : "llvm.aarch64.neon.sqadd"; + break; + } + case NEON::BI__builtin_neon_vqsub_v: + case NEON::BI__builtin_neon_vqsubq_v: { + intrincsName = (intrinicId != altLLVMIntrinsic) ? "llvm.aarch64.neon.uqsub" + : "llvm.aarch64.neon.sqsub"; break; } } + if (!intrincsName.empty()) + return buildCommonNeonCallPattern0(*this, intrincsName, {vTy, vTy}, ops, + vTy, e); return nullptr; } diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index ab37dded4881..2bfa4e89505f 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -331,3 +331,279 @@ int32x4_t test_vqrdmulhq_lane_s32(int32x4_t a, int32x2_t v) { // LLVM: [[RES:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.lane.v4i32.v2i32 // LLVM-SAME: (<4 x i32> [[A]], <2 x i32> [[V]], i32 1) // LLVM: ret <4 x i32> [[RES]] + +int8x16_t test_vqaddq_s8(int8x16_t a, int8x16_t b) { + return vqaddq_s8(a, b); +} + +// CIR-LABEL: vqaddq_s8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_s8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) +// LLVM: ret <16 x i8> [[RES]] + +uint8x16_t test_vqaddq_u8(uint8x16_t a, uint8x16_t b) { + return vqaddq_u8(a, b); +} + +// CIR-LABEL: vqaddq_u8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_u8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.neon.uqadd.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) +// LLVM: ret <16 x i8> [[RES]] + +int16x8_t test_vqaddq_s16(int16x8_t a, int16x8_t b) { + return vqaddq_s16(a, b); +} + +// CIR-LABEL: vqaddq_s16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_s16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) +// LLVM: ret <8 x i16> [[RES]] + +uint16x8_t test_vqaddq_u16(uint16x8_t a, uint16x8_t b) { + return vqaddq_u16(a, b); +} + +// CIR-LABEL: vqaddq_u16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_u16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i16> @llvm.aarch64.neon.uqadd.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) +// LLVM: ret <8 x i16> [[RES]] + +int32x4_t test_vqaddq_s32(int32x4_t a, int32x4_t b) { + return vqaddq_s32(a, b); +} + +// CIR-LABEL: vqaddq_s32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_s32(<4 x i32>{{.*}} [[A:%.*]], <4 x i32>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) +// LLVM: ret <4 x i32> [[RES]] + +int64x2_t test_vqaddq_s64(int64x2_t a, int64x2_t b) { + return vqaddq_s64(a, b); +} + +// CIR-LABEL: vqaddq_s64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_s64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) +// LLVM: ret <2 x i64> [[RES]] + +uint64x2_t test_vqaddq_u64(uint64x2_t a, uint64x2_t b) { + return vqaddq_u64(a, b); +} + +// CIR-LABEL: vqaddq_u64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_u64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) +// LLVM: ret <2 x i64> [[RES]] + +int8x8_t test_vqsub_s8(int8x8_t a, int8x8_t b) { + return vqsub_s8(a, b); +} + +// CIR-LABEL: vqsub_s8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_s8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> [[A]], <8 x i8> [[B]]) +// LLVM: ret <8 x i8> [[RES]] + +uint8x8_t test_vqsub_u8(uint8x8_t a, uint8x8_t b) { + return vqsub_u8(a, b); +} + +// CIR-LABEL: vqsub_u8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_u8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> [[A]], <8 x i8> [[B]]) +// LLVM: ret <8 x i8> [[RES]] + +int16x4_t test_vqsub_s16(int16x4_t a, int16x4_t b) { + return vqsub_s16(a, b); +} + +// CIR-LABEL: vqsub_s16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_s16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> [[A]], <4 x i16> [[B]]) +// LLVM: ret <4 x i16> [[RES]] + +uint16x4_t test_vqsub_u16(uint16x4_t a, uint16x4_t b) { + return vqsub_u16(a, b); +} + +// CIR-LABEL: vqsub_u16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_u16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> [[A]], <4 x i16> [[B]]) +// LLVM: ret <4 x i16> [[RES]] + +int32x2_t test_vqsub_s32(int32x2_t a, int32x2_t b) { + return vqsub_s32(a, b); +} + +// CIR-LABEL: vqsub_s32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_s32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> [[A]], <2 x i32> [[B]]) +// LLVM: ret <2 x i32> [[RES]] + +uint32x2_t test_vqsub_u32(uint32x2_t a, uint32x2_t b) { + return vqsub_u32(a, b); +} + +// CIR-LABEL: vqsub_u32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_u32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32> [[A]], <2 x i32> [[B]]) +// LLVM: ret <2 x i32> [[RES]] + +int64x1_t test_vqsub_s64(int64x1_t a, int64x1_t b) { + return vqsub_s64(a, b); +} + +// CIR-LABEL: vqsub_s64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_s64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqsub.v1i64(<1 x i64> [[A]], <1 x i64> [[B]]) +// LLVM: ret <1 x i64> [[RES]] + +uint64x1_t test_vqsub_u64(uint64x1_t a, uint64x1_t b) { + return vqsub_u64(a, b); +} + +// CIR-LABEL: vqsub_u64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_u64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <1 x i64> @llvm.aarch64.neon.uqsub.v1i64(<1 x i64> [[A]], <1 x i64> [[B]]) +// LLVM: ret <1 x i64> [[RES]] + +int8x16_t test_vqsubq_s8(int8x16_t a, int8x16_t b) { + return vqsubq_s8(a, b); +} + +// CIR-LABEL: vqsubq_s8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_s8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) +// LLVM: ret <16 x i8> [[RES]] + +uint8x16_t test_vqsubq_u8(uint8x16_t a, uint8x16_t b) { + return vqsubq_u8(a, b); +} + +// CIR-LABEL: vqsubq_u8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_u8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) +// LLVM: ret <16 x i8> [[RES]] + +int16x8_t test_vqsubq_s16(int16x8_t a, int16x8_t b) { + return vqsubq_s16(a, b); +} + +// CIR-LABEL: vqsubq_s16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_s16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) +// LLVM: ret <8 x i16> [[RES]] + +uint16x8_t test_vqsubq_u16(uint16x8_t a, uint16x8_t b) { + return vqsubq_u16(a, b); +} + +// CIR-LABEL: vqsubq_u16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_u16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) +// LLVM: ret <8 x i16> [[RES]] + +int32x4_t test_vqsubq_s32(int32x4_t a, int32x4_t b) { + return vqsubq_s32(a, b); +} + +// CIR-LABEL: vqsubq_s32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_s32(<4 x i32>{{.*}} [[A:%.*]], <4 x i32>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) +// LLVM: ret <4 x i32> [[RES]] + +uint32x4_t test_vqsubq_u32(uint32x4_t a, uint32x4_t b) { + return vqsubq_u32(a, b); +} + +// CIR-LABEL: vqsubq_u32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_u32(<4 x i32>{{.*}} [[A:%.*]], <4 x i32>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i32> @llvm.aarch64.neon.uqsub.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) +// LLVM: ret <4 x i32> [[RES]] + +int64x2_t test_vqsubq_s64(int64x2_t a, int64x2_t b) { + return vqsubq_s64(a, b); +} + +// CIR-LABEL: vqsubq_s64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_s64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) +// LLVM: ret <2 x i64> [[RES]] + +uint64x2_t test_vqsubq_u64(uint64x2_t a, uint64x2_t b) { + return vqsubq_u64(a, b); +} + +// CIR-LABEL: vqsubq_u64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_u64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) +// LLVM: ret <2 x i64> [[RES]] From 7a8e321ba0d0f73794651e27744a25acd530b5ca Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 18 Oct 2024 14:46:21 -0400 Subject: [PATCH 1968/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vmovl_v (#989) --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 38 +++++-- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 21 +++- clang/test/CIR/CodeGen/AArch64/neon.c | 100 +++++++++++------- 3 files changed, 103 insertions(+), 56 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index a711589ef5d1..5a11d79e58b6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -374,25 +374,41 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { bool isInt(mlir::Type i) { return mlir::isa(i); } mlir::cir::IntType getExtendedIntTy(mlir::cir::IntType ty, bool isSigned) { - if (isInt8Ty(ty)) { - return isSigned ? getSInt16Ty() : getUInt16Ty(); - } - if (isInt16Ty(ty)) { - return isSigned ? getSInt32Ty() : getUInt32Ty(); + switch (ty.getWidth()) { + case 8: + return isSigned ? typeCache.SInt16Ty : typeCache.UInt16Ty; + case 16: + return isSigned ? typeCache.SInt32Ty : typeCache.UInt32Ty; + case 32: + return isSigned ? typeCache.SInt64Ty : typeCache.UInt64Ty; + default: + llvm_unreachable("NYI"); } - if (isInt32Ty(ty)) { - return isSigned ? getSInt64Ty() : getUInt64Ty(); + } + + mlir::cir::IntType getTruncatedIntTy(mlir::cir::IntType ty, bool isSigned) { + switch (ty.getWidth()) { + case 16: + return isSigned ? typeCache.SInt8Ty : typeCache.UInt8Ty; + case 32: + return isSigned ? typeCache.SInt16Ty : typeCache.UInt16Ty; + case 64: + return isSigned ? typeCache.SInt32Ty : typeCache.UInt32Ty; + default: + llvm_unreachable("NYI"); } - llvm_unreachable("NYI"); } - mlir::cir::VectorType getExtendedElementVectorType(mlir::cir::VectorType vt, - bool isSigned = false) { + mlir::cir::VectorType getExtendedOrTruncatedElementVectorType( + mlir::cir::VectorType vt, bool isExtended, bool isSigned = false) { auto elementTy = mlir::dyn_cast_or_null(vt.getEltType()); assert(elementTy && "expected int vector"); return mlir::cir::VectorType::get( - getContext(), getExtendedIntTy(elementTy, isSigned), vt.getSize()); + getContext(), + isExtended ? getExtendedIntTy(elementTy, isSigned) + : getTruncatedIntTy(elementTy, isSigned), + vt.getSize()); } mlir::cir::LongDoubleType diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index e8a802318c61..12efc23b2587 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2275,9 +2275,17 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( switch (builtinID) { default: break; + case NEON::BI__builtin_neon_vmovl_v: { + mlir::cir::VectorType dTy = builder.getExtendedOrTruncatedElementVectorType( + vTy, false /* truncate */, + mlir::cast(vTy.getEltType()).isSigned()); + // This cast makes sure arg type conforms intrinsic expected arg type. + ops[0] = builder.createBitcast(ops[0], dTy); + return builder.createIntCast(ops[0], ty); + } case NEON::BI__builtin_neon_vmovn_v: { - mlir::cir::VectorType qTy = builder.getExtendedElementVectorType( - vTy, mlir::cast(vTy.getEltType()).isSigned()); + mlir::cir::VectorType qTy = builder.getExtendedOrTruncatedElementVectorType( + vTy, true, mlir::cast(vTy.getEltType()).isSigned()); ops[0] = builder.createBitcast(ops[0], qTy); // It really is truncation in this context. // In CIR, integral cast op supports vector of int type truncating. @@ -3166,15 +3174,18 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, // The prototype of builtin_neon_vqrshrun_n can be found at // https://developer.arm.com/architectures/instruction-sets/intrinsics/ return buildNeonCall( - builder, {builder.getExtendedElementVectorType(ty, true), SInt32Ty}, + builder, + {builder.getExtendedOrTruncatedElementVectorType(ty, true, true), + SInt32Ty}, Ops, "llvm.aarch64.neon.sqrshrun", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqshrn_n_v: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vrshrn_n_v: return buildNeonCall( builder, - {builder.getExtendedElementVectorType( - vTy, mlir::cast(vTy.getEltType()).isSigned()), + {builder.getExtendedOrTruncatedElementVectorType( + vTy, true /* extend */, + mlir::cast(vTy.getEltType()).isSigned()), SInt32Ty}, Ops, "llvm.aarch64.neon.rshrn", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqrshrn_n_v: diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 730a7acee887..30a1c595af6e 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -6517,51 +6517,71 @@ uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { // return vshll_high_n_u32(a, 19); // } -// NYI-LABEL: @test_vmovl_s8( -// NYI: [[VMOVL_I:%.*]] = sext <8 x i8> %a to <8 x i16> -// NYI: ret <8 x i16> [[VMOVL_I]] -// int16x8_t test_vmovl_s8(int8x8_t a) { -// return vmovl_s8(a); -// } +int16x8_t test_vmovl_s8(int8x8_t a) { + return vmovl_s8(a); -// NYI-LABEL: @test_vmovl_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[VMOVL_I:%.*]] = sext <4 x i16> %a to <4 x i32> -// NYI: ret <4 x i32> [[VMOVL_I]] -// int32x4_t test_vmovl_s16(int16x4_t a) { -// return vmovl_s16(a); -// } + // CIR-LABEL: vmovl_s8 + // CIR: {{%.*}} = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector -// NYI-LABEL: @test_vmovl_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[VMOVL_I:%.*]] = sext <2 x i32> %a to <2 x i64> -// NYI: ret <2 x i64> [[VMOVL_I]] -// int64x2_t test_vmovl_s32(int32x2_t a) { -// return vmovl_s32(a); -// } + // LLVM: {{.*}}test_vmovl_s8(<8 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVL_I:%.*]] = sext <8 x i8> [[A]] to <8 x i16> + // LLVM: ret <8 x i16> [[VMOVL_I]] +} -// NYI-LABEL: @test_vmovl_u8( -// NYI: [[VMOVL_I:%.*]] = zext <8 x i8> %a to <8 x i16> -// NYI: ret <8 x i16> [[VMOVL_I]] -// uint16x8_t test_vmovl_u8(uint8x8_t a) { -// return vmovl_u8(a); -// } +int32x4_t test_vmovl_s16(int16x4_t a) { + return vmovl_s16(a); -// NYI-LABEL: @test_vmovl_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[VMOVL_I:%.*]] = zext <4 x i16> %a to <4 x i32> -// NYI: ret <4 x i32> [[VMOVL_I]] -// uint32x4_t test_vmovl_u16(uint16x4_t a) { -// return vmovl_u16(a); -// } + // CIR-LABEL: vmovl_s16 + // CIR: {{%.*}} = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector -// NYI-LABEL: @test_vmovl_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[VMOVL_I:%.*]] = zext <2 x i32> %a to <2 x i64> -// NYI: ret <2 x i64> [[VMOVL_I]] -// uint64x2_t test_vmovl_u32(uint32x2_t a) { -// return vmovl_u32(a); -// } + // LLVM: {{.*}}test_vmovl_s16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVL_I:%.*]] = sext <4 x i16> [[A]] to <4 x i32> + // LLVM: ret <4 x i32> [[VMOVL_I]] +} + +int64x2_t test_vmovl_s32(int32x2_t a) { + return vmovl_s32(a); + + // CIR-LABEL: vmovl_s32 + // CIR: {{%.*}} = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vmovl_s32(<2 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVL_I:%.*]] = sext <2 x i32> [[A]] to <2 x i64> + // LLVM: ret <2 x i64> [[VMOVL_I]] +} + +uint16x8_t test_vmovl_u8(uint8x8_t a) { + return vmovl_u8(a); + + // CIR-LABEL: vmovl_u8 + // CIR: {{%.*}} = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vmovl_u8(<8 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVL_I:%.*]] = zext <8 x i8> [[A]] to <8 x i16> + // LLVM: ret <8 x i16> [[VMOVL_I]] +} + +uint32x4_t test_vmovl_u16(uint16x4_t a) { + return vmovl_u16(a); + + // CIR-LABEL: vmovl_u16 + // CIR: {{%.*}} = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vmovl_u16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVL_I:%.*]] = zext <4 x i16> [[A]] to <4 x i32> + // LLVM: ret <4 x i32> [[VMOVL_I]] +} + +uint64x2_t test_vmovl_u32(uint32x2_t a) { + return vmovl_u32(a); + + // CIR-LABEL: vmovl_u32 + // CIR: {{%.*}} = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vmovl_u32(<2 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[VMOVL_I:%.*]] = zext <2 x i32> [[A]] to <2 x i64> + // LLVM: ret <2 x i64> [[VMOVL_I]] +} // NYI-LABEL: @test_vmovl_high_s8( // NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> From 84be0af2d79be1b027662933fddc6204c17a52f3 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 17 Oct 2024 19:03:23 -0700 Subject: [PATCH 1969/2301] [CIR][CIRGen] Support initial cases of inheritance ctor/dtor Add more NFC skeleton while here. --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 52 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 55 +++++++++++++++++++ .../CIR/CodeGen/inheriting-constructor.cpp | 12 ++++ 3 files changed, 117 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/inheriting-constructor.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 29bc5e5938c6..4554e41030e7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1941,14 +1941,62 @@ void CIRGenFunction::buildInheritedCXXConstructorCall( llvm_unreachable("NYI"); } else { // The inheriting constructor was not inlined. Emit delegating arguments. - llvm_unreachable("NYI"); + Args.push_back(ThisArg); + const auto *OuterCtor = cast(CurCodeDecl); + assert(OuterCtor->getNumParams() == D->getNumParams()); + assert(!OuterCtor->isVariadic() && "should have been inlined"); + for (const auto *Param : OuterCtor->parameters()) { + assert(getContext().hasSameUnqualifiedType( + OuterCtor->getParamDecl(Param->getFunctionScopeIndex())->getType(), + Param->getType())); + buildDelegateCallArg(Args, Param, E->getLocation()); + + // Forward __attribute__(pass_object_size). + if (Param->hasAttr()) { + auto *POSParam = SizeArguments[Param]; + assert(POSParam && "missing pass_object_size value for forwarding"); + buildDelegateCallArg(Args, POSParam, E->getLocation()); + } + } } - llvm_unreachable("NYI"); + buildCXXConstructorCall(D, Ctor_Base, ForVirtualBase, /*Delegating*/ false, + This, Args, AggValueSlot::MayOverlap, + E->getLocation(), + /*NewPointerIsChecked*/ true); } void CIRGenFunction::buildInlinedInheritingCXXConstructorCall( const CXXConstructorDecl *Ctor, CXXCtorType CtorType, bool ForVirtualBase, bool Delegating, CallArgList &Args) { + GlobalDecl GD(Ctor, CtorType); + llvm_unreachable("NYI"); + InlinedInheritingConstructorScope Scope(*this, GD); + // TODO(cir): ApplyInlineDebugLocation + assert(!MissingFeatures::generateDebugInfo()); + RunCleanupsScope RunCleanups(*this); + + // Save the arguments to be passed to the inherited constructor. + CXXInheritedCtorInitExprArgs = Args; + + FunctionArgList Params; + QualType RetType = buildFunctionArgList(CurGD, Params); + FnRetTy = RetType; + + // Insert any ABI-specific implicit constructor arguments. + CGM.getCXXABI().addImplicitConstructorArgs(*this, Ctor, CtorType, + ForVirtualBase, Delegating, Args); + + // Emit a simplified prolog. We only need to emit the implicit params. + assert(Args.size() >= Params.size() && "too few arguments for call"); + for (unsigned I = 0, N = Args.size(); I != N; ++I) { + if (I < Params.size() && isa(Params[I])) { + const RValue &RV = + Args[I].getRValue(*this, getLoc(Ctor->getSourceRange())); + assert(!RV.isComplex() && "complex indirect params not supported"); + llvm_unreachable("NYI"); + } + } + llvm_unreachable("NYI"); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 66c62a23ebae..94f557526e52 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -2263,6 +2263,61 @@ class CIRGenFunction : public CIRGenTypeCache { LexicalScope *currLexScope = nullptr; + class InlinedInheritingConstructorScope { + public: + InlinedInheritingConstructorScope(CIRGenFunction &CGF, GlobalDecl GD) + : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl), + OldCurCodeDecl(CGF.CurCodeDecl), + OldCXXABIThisDecl(CGF.CXXABIThisDecl), + OldCXXABIThisValue(CGF.CXXABIThisValue), + OldCXXThisValue(CGF.CXXThisValue), + OldCXXABIThisAlignment(CGF.CXXABIThisAlignment), + OldCXXThisAlignment(CGF.CXXThisAlignment), + OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy), + OldCXXInheritedCtorInitExprArgs( + std::move(CGF.CXXInheritedCtorInitExprArgs)) { + CGF.CurGD = GD; + CGF.CurFuncDecl = CGF.CurCodeDecl = + cast(GD.getDecl()); + CGF.CXXABIThisDecl = nullptr; + CGF.CXXABIThisValue = nullptr; + CGF.CXXThisValue = nullptr; + CGF.CXXABIThisAlignment = CharUnits(); + CGF.CXXThisAlignment = CharUnits(); + CGF.ReturnValue = Address::invalid(); + CGF.FnRetTy = QualType(); + CGF.CXXInheritedCtorInitExprArgs.clear(); + } + ~InlinedInheritingConstructorScope() { + CGF.CurGD = OldCurGD; + CGF.CurFuncDecl = OldCurFuncDecl; + CGF.CurCodeDecl = OldCurCodeDecl; + CGF.CXXABIThisDecl = OldCXXABIThisDecl; + CGF.CXXABIThisValue = OldCXXABIThisValue; + CGF.CXXThisValue = OldCXXThisValue; + CGF.CXXABIThisAlignment = OldCXXABIThisAlignment; + CGF.CXXThisAlignment = OldCXXThisAlignment; + CGF.ReturnValue = OldReturnValue; + CGF.FnRetTy = OldFnRetTy; + CGF.CXXInheritedCtorInitExprArgs = + std::move(OldCXXInheritedCtorInitExprArgs); + } + + private: + CIRGenFunction &CGF; + GlobalDecl OldCurGD; + const Decl *OldCurFuncDecl; + const Decl *OldCurCodeDecl; + ImplicitParamDecl *OldCXXABIThisDecl; + mlir::Value OldCXXABIThisValue; + mlir::Value OldCXXThisValue; + CharUnits OldCXXABIThisAlignment; + CharUnits OldCXXThisAlignment; + Address OldReturnValue; + QualType OldFnRetTy; + CallArgList OldCXXInheritedCtorInitExprArgs; + }; + /// CIR build helpers /// ----------------- diff --git a/clang/test/CIR/CodeGen/inheriting-constructor.cpp b/clang/test/CIR/CodeGen/inheriting-constructor.cpp new file mode 100644 index 000000000000..9d3d673e6f65 --- /dev/null +++ b/clang/test/CIR/CodeGen/inheriting-constructor.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR_ITANIUM --input-file=%t.cir %s + +struct A { A(int); virtual ~A(); }; +struct B : A { using A::A; ~B(); }; +B::~B() {} + +B b(123); + +// CIR_ITANIUM-LABEL: @_ZN1BD2Ev +// CIR_ITANIUM-LABEL: @_ZN1BD1Ev +// CIR_ITANIUM-LABEL: @_ZN1BD0Ev \ No newline at end of file From 68dd3378f3ee62ff0eba020c0c96eb984cf6cebb Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 18 Oct 2024 16:59:37 -0700 Subject: [PATCH 1970/2301] [CIR][CIRGen] Null init some inheritance components --- clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp | 4 ++ clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 3 ++ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 48 ++++++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 11 ++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 4 ++ 5 files changed, 69 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp index 27b04503d788..8f6040f8cea5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp @@ -75,4 +75,8 @@ mlir::cir::GlobalLinkageKind CIRGenCXXABI::getCXXDestructorLinkage( // Delegate back to CGM by default. return CGM.getCIRLinkageForDeclarator(Dtor, Linkage, /*IsConstantVariable=*/false); +} + +std::vector CIRGenCXXABI::getVBPtrOffsets(const CXXRecordDecl *RD) { + return std::vector(); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 83da6ad3c49d..159b0e5e8e2b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -268,6 +268,9 @@ class CIRGenCXXABI { virtual RecordArgABI getRecordArgABI(const clang::CXXRecordDecl *RD) const = 0; + /// Gets the offsets of all the virtual base pointers in a given class. + virtual std::vector getVBPtrOffsets(const CXXRecordDecl *RD); + /// Insert any ABI-specific implicit parameters into the parameter list for a /// function. This generally involves extra data for constructors and /// destructors. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 212991871dce..d1efeb0c77e1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -344,6 +344,51 @@ CIRGenFunction::buildCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, /*IsArrow=*/false, E->getArg(0)); } +static void buildNullBaseClassInitialization(CIRGenFunction &CGF, + Address DestPtr, + const CXXRecordDecl *Base) { + if (Base->isEmpty()) + return; + + DestPtr = DestPtr.withElementType(CGF.UInt8Ty); + + const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base); + CharUnits NVSize = Layout.getNonVirtualSize(); + + // We cannot simply zero-initialize the entire base sub-object if vbptrs are + // present, they are initialized by the most derived class before calling the + // constructor. + SmallVector, 1> Stores; + Stores.emplace_back(CharUnits::Zero(), NVSize); + + // Each store is split by the existence of a vbptr. + CharUnits VBPtrWidth = CGF.getPointerSize(); + std::vector VBPtrOffsets = + CGF.CGM.getCXXABI().getVBPtrOffsets(Base); + for (CharUnits VBPtrOffset : VBPtrOffsets) { + // Stop before we hit any virtual base pointers located in virtual bases. + if (VBPtrOffset >= NVSize) + break; + std::pair LastStore = Stores.pop_back_val(); + CharUnits LastStoreOffset = LastStore.first; + CharUnits LastStoreSize = LastStore.second; + + CharUnits SplitBeforeOffset = LastStoreOffset; + CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset; + assert(!SplitBeforeSize.isNegative() && "negative store size!"); + if (!SplitBeforeSize.isZero()) + Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize); + + CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth; + CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset; + assert(!SplitAfterSize.isNegative() && "negative store size!"); + if (!SplitAfterSize.isZero()) + Stores.emplace_back(SplitAfterOffset, SplitAfterSize); + } + + llvm_unreachable("NYI"); +} + void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest) { assert(!Dest.isIgnored() && "Must have a destination!"); @@ -362,7 +407,8 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, break; case CXXConstructionKind::VirtualBase: case CXXConstructionKind::NonVirtualBase: - llvm_unreachable("NYI"); + buildNullBaseClassInitialization(*this, Dest.getAddress(), + CD->getParent()); break; } } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index c3683b3f0fd1..68832ec2b7ba 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1957,3 +1957,14 @@ mlir::Attribute ConstantEmitter::emitNullForMemory(mlir::Location loc, assert(cstOp && "expected cir.const op"); return emitForMemory(CGM, cstOp.getValue(), T); } + +static mlir::Value buildNullConstant(CIRGenModule &CGM, + const RecordDecl *record, + bool asCompleteObject) { + llvm_unreachable("NYI"); +} + +mlir::Value +CIRGenModule::buildNullConstantForBase(const CXXRecordDecl *Record) { + return ::buildNullConstant(*this, Record, false); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 16f95c164712..7dd265f9a785 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -606,6 +606,10 @@ class CIRGenModule : public CIRGenTypeCache { /// null constant. mlir::Value buildNullConstant(QualType T, mlir::Location loc); + /// Return a null constant appropriate for zero-initializing a base class with + /// the given type. This is usually, but not always, an LLVM null constant. + mlir::Value buildNullConstantForBase(const CXXRecordDecl *Record); + mlir::Value buildMemberPointerConstant(const UnaryOperator *E); llvm::StringRef getMangledName(clang::GlobalDecl GD); From ec7ef44bb835cdb2b7dfc3e1422923eed6d4cc32 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 18 Oct 2024 17:18:22 -0700 Subject: [PATCH 1971/2301] [CIR][CIRGen][NFC] More skeleton for building constants --- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 19 ++++++- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 68 +++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- 3 files changed, 82 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index d1efeb0c77e1..4bbb3a7ed23f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -386,7 +386,24 @@ static void buildNullBaseClassInitialization(CIRGenFunction &CGF, Stores.emplace_back(SplitAfterOffset, SplitAfterSize); } - llvm_unreachable("NYI"); + // If the type contains a pointer to data member we can't memset it to zero. + // Instead, create a null constant and copy it to the destination. + // TODO: there are other patterns besides zero that we can usefully memset, + // like -1, which happens to be the pattern used by member-pointers. + // TODO: isZeroInitializable can be over-conservative in the case where a + // virtual base contains a member pointer. + // TODO(cir): `nullConstantForBase` might be better off as a value instead + // of an mlir::TypedAttr? Once this moves out of skeleton, make sure to double + // check on what's better. + mlir::Attribute nullConstantForBase = CGF.CGM.buildNullConstantForBase(Base); + if (!CGF.getBuilder().isNullValue(nullConstantForBase)) { + llvm_unreachable("NYI"); + // Otherwise, just memset the whole thing to zero. This is legal + // because in LLVM, all default initializers (other than the ones we just + // handled above) are guaranteed to have a bit pattern of all zeros. + } else { + llvm_unreachable("NYI"); + } } void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 68832ec2b7ba..c94166456974 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1958,13 +1958,71 @@ mlir::Attribute ConstantEmitter::emitNullForMemory(mlir::Location loc, return emitForMemory(CGM, cstOp.getValue(), T); } -static mlir::Value buildNullConstant(CIRGenModule &CGM, - const RecordDecl *record, - bool asCompleteObject) { - llvm_unreachable("NYI"); +static mlir::TypedAttr buildNullConstant(CIRGenModule &CGM, + const RecordDecl *record, + bool asCompleteObject) { + const CIRGenRecordLayout &layout = + CGM.getTypes().getCIRGenRecordLayout(record); + mlir::Type ty = (asCompleteObject ? layout.getCIRType() + : layout.getBaseSubobjectCIRType()); + auto structure = dyn_cast(ty); + assert(structure && "expected"); + + unsigned numElements = structure.getNumElements(); + SmallVector elements(numElements); + + auto CXXR = dyn_cast(record); + // Fill in all the bases. + if (CXXR) { + for (const auto &I : CXXR->bases()) { + if (I.isVirtual()) { + // Ignore virtual bases; if we're laying out for a complete + // object, we'll lay these out later. + continue; + } + llvm_unreachable("NYI"); + } + } + + // Fill in all the fields. + for (const auto *Field : record->fields()) { + // Fill in non-bitfields. (Bitfields always use a zero pattern, which we + // will fill in later.) + if (!Field->isBitField()) { + // TODO(cir) check for !isEmptyFieldForLayout(CGM.getContext(), Field)) + llvm_unreachable("NYI"); + } + + // For unions, stop after the first named field. + if (record->isUnion()) { + if (Field->getIdentifier()) + break; + if (const auto *FieldRD = Field->getType()->getAsRecordDecl()) + if (FieldRD->findFirstNamedDataMember()) + break; + } + } + + // Fill in the virtual bases, if we're working with the complete object. + if (CXXR && asCompleteObject) { + for ([[maybe_unused]] const auto &I : CXXR->vbases()) { + llvm_unreachable("NYI"); + } + } + + // Now go through all other fields and zero them out. + for (unsigned i = 0; i != numElements; ++i) { + if (!elements[i]) { + llvm_unreachable("NYI"); + } + } + + mlir::MLIRContext *mlirCtx = structure.getContext(); + return mlir::cir::ConstStructAttr::get( + mlirCtx, structure, mlir::ArrayAttr::get(mlirCtx, elements)); } -mlir::Value +mlir::TypedAttr CIRGenModule::buildNullConstantForBase(const CXXRecordDecl *Record) { return ::buildNullConstant(*this, Record, false); } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 7dd265f9a785..0c1f0756a947 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -608,7 +608,7 @@ class CIRGenModule : public CIRGenTypeCache { /// Return a null constant appropriate for zero-initializing a base class with /// the given type. This is usually, but not always, an LLVM null constant. - mlir::Value buildNullConstantForBase(const CXXRecordDecl *Record); + mlir::TypedAttr buildNullConstantForBase(const CXXRecordDecl *Record); mlir::Value buildMemberPointerConstant(const UnaryOperator *E); From 9272c33d0657b2fc63cc8fec28722dfc9d1bb2c8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 18 Oct 2024 18:28:39 -0700 Subject: [PATCH 1972/2301] [CIR][CIRGen] Add missing testcase for null base class Forgot to git add in cb0cb34b7d6a8fa22e6ca54b6f9783dbf938d271 --- clang/test/CIR/CodeGen/const-baseclass.cpp | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 clang/test/CIR/CodeGen/const-baseclass.cpp diff --git a/clang/test/CIR/CodeGen/const-baseclass.cpp b/clang/test/CIR/CodeGen/const-baseclass.cpp new file mode 100644 index 000000000000..5f2669bfe23f --- /dev/null +++ b/clang/test/CIR/CodeGen/const-baseclass.cpp @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct Empty { }; +struct A { +}; + +struct B : A, Empty { + B() : A(), Empty() { } +}; + +void f() { + B b1; +} + +// CHECK-LABEL: @_ZN1BC2Ev +// CHECK: %[[A:.*]] = cir.base_class_addr({{.*}}) [0] -> !cir.ptr +// CHECK: cir.call @_ZN1AC2Ev(%[[A:.*]]) : (!cir.ptr) -> () +// CHECK: %[[BASE:.*]] = cir.base_class_addr({{.*}}) [0] -> !cir.ptr +// CHECK: cir.call @_ZN5EmptyC2Ev(%[[BASE]]) : (!cir.ptr) -> () \ No newline at end of file From d037a8f32d5e711ee1bd6d4a492a115ef3d40436 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Tue, 22 Oct 2024 02:22:41 +0800 Subject: [PATCH 1973/2301] [CIR] [CodeGen] Introduce IsFPClassOp to support builtin_isfpclass (#971) The llvm's intrinsic `llvm.is.fpclass` is used to support multiple float point builtins: https://clang.llvm.org/docs/LanguageExtensions.html#builtin-isfpclass > The `__builtin_isfpclass()` builtin is a generalization of functions > isnan, isinf, isfinite and some others defined by the C standard. It tests > if the floating-point value, specified by the first argument, falls into > any of data classes, specified by the second argument. I meant to support this by creating IntrinsicCallOp directly. But I can't make it due to https://github.com/llvm/clangir/issues/480 since the return type of the intrinsic will mismatch. So I have to create a new Op for it. But I feel it might not be too bad. At least it is more explicit and more expressive. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 33 ++++ .../include/clang/CIR/Dialect/IR/CIRTypes.td | 3 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 + clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 148 ++++++++++++++---- clang/lib/CIR/CodeGen/TargetInfo.h | 10 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 29 +++- clang/test/CIR/CodeGen/builtin-isfpclass.c | 129 +++++++++++++++ clang/test/CIR/Lowering/builtin-isfpclass.c | 125 +++++++++++++++ 8 files changed, 447 insertions(+), 35 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-isfpclass.c create mode 100644 clang/test/CIR/Lowering/builtin-isfpclass.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 918898d898ed..e9f3e9b7753a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4027,6 +4027,39 @@ def FMinOp : BinaryFPToFPBuiltinOp<"fmin", "MinNumOp">; def FModOp : BinaryFPToFPBuiltinOp<"fmod", "FRemOp">; def PowOp : BinaryFPToFPBuiltinOp<"pow", "PowOp">; +def IsFPClassOp : CIR_Op<"is_fp_class"> { + let summary = "Corresponding to the `__builtin_fpclassify` builtin function in clang"; + + let description = [{ + The `cir.is_fp_class` operation takes a floating-point value as its first + argument and a bitfield of flags as its second argument. The operation + returns a boolean value indicating whether the floating-point value + satisfies the given flags. + + The flags must be a compile time constant and the values are: + + | Bit # | floating-point class | + | -------- | ------- | + | 0 | Signaling NaN | + | 1 | Quiet NaN | + | 2 | Negative infinity | + | 3 | Negative normal | + | 4 | Negative subnormal | + | 5 | Negative zero | + | 6 | Positive zero | + | 7 | Positive subnormal | + | 8 | Positive normal | + | 9 | Positive infinity | + }]; + + let arguments = (ins CIR_AnyFloat:$src, + I32Attr:$flags); + let results = (outs CIR_BoolType:$result); + let assemblyFormat = [{ + $src `,` $flags `:` functional-type($src, $result) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // Assume Operations //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 81b939df6b1f..fe136a58a4a4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -202,7 +202,8 @@ def CIR_LongDouble : CIR_FloatType<"LongDouble", "long_double"> { // Constraints -def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_FP80, CIR_FP128, CIR_LongDouble]>; +def CIR_AnyFloat: AnyTypeOf<[CIR_Single, CIR_Double, CIR_FP80, CIR_FP128, CIR_LongDouble, + CIR_FP16, CIR_BFloat16]>; def CIR_AnyIntOrFloat: AnyTypeOf<[CIR_AnyFloat, CIR_IntType]>; //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 5a11d79e58b6..9038ee7fca81 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -584,6 +584,11 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { getAttr(t, fpVal)); } + mlir::cir::IsFPClassOp createIsFPClass(mlir::Location loc, mlir::Value src, + unsigned flags) { + return create(loc, src, flags); + } + /// Create constant nullptr for pointer-to-data-member type ty. mlir::cir::ConstantOp getNullDataMemberPtr(mlir::cir::DataMemberType ty, mlir::Location loc) { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 6692e63f6b90..3df3ff6ce1ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -46,6 +46,17 @@ static RValue buildLibraryCall(CIRGenFunction &CGF, const FunctionDecl *FD, return CGF.buildCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); } +static mlir::Value tryUseTestFPKind(CIRGenFunction &CGF, unsigned BuiltinID, + mlir::Value V) { + if (CGF.getBuilder().getIsFPConstrained() && + CGF.getBuilder().getDefaultConstrainedExcept() != fp::ebIgnore) { + if (mlir::Value Result = CGF.getTargetHooks().testFPKind( + V, BuiltinID, CGF.getBuilder(), CGF.CGM)) + return Result; + } + return nullptr; +} + template static RValue buildUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { auto Arg = CGF.buildScalarExpr(E.getArg(0)); @@ -1191,36 +1202,6 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_isunordered: llvm_unreachable("BI__builtin_isgreater and BI__builtin_isless like NYI"); - case Builtin::BI__builtin_isnan: - llvm_unreachable("BI__builtin_isnan NYI"); - - case Builtin::BI__builtin_issignaling: - llvm_unreachable("BI__builtin_issignaling NYI"); - - case Builtin::BI__builtin_isinf: - llvm_unreachable("BI__builtin_isinf NYI"); - - case Builtin::BIfinite: - case Builtin::BI__finite: - case Builtin::BIfinitef: - case Builtin::BI__finitef: - case Builtin::BIfinitel: - case Builtin::BI__finitel: - case Builtin::BI__builtin_isfinite: - llvm_unreachable("Builtin::BIfinite like NYI"); - - case Builtin::BI__builtin_isnormal: - llvm_unreachable("BI__builtin_isnormal NYI"); - - case Builtin::BI__builtin_issubnormal: - llvm_unreachable("BI__builtin_issubnormal NYI"); - - case Builtin::BI__builtin_iszero: - llvm_unreachable("BI__builtin_iszero NYI"); - - case Builtin::BI__builtin_isfpclass: - llvm_unreachable("BI__builtin_isfpclass NYI"); - case Builtin::BI__builtin_nondeterministic_value: llvm_unreachable("BI__builtin_nondeterministic_value NYI"); @@ -1328,9 +1309,6 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_matrix_column_major_store: llvm_unreachable("BI__builtin_matrix_column_major_store NYI"); - case Builtin::BI__builtin_isinf_sign: - llvm_unreachable("BI__builtin_isinf_sign NYI"); - case Builtin::BI__builtin_flt_rounds: llvm_unreachable("BI__builtin_flt_rounds NYI"); @@ -2080,6 +2058,110 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_ms_va_copy NYI"); case Builtin::BI__builtin_get_device_side_mangled_name: llvm_unreachable("BI__builtin_get_device_side_mangled_name NYI"); + + // From https://clang.llvm.org/docs/LanguageExtensions.html#builtin-isfpclass + // : + // + // The `__builtin_isfpclass()` builtin is a generalization of functions + // isnan, isinf, isfinite and some others defined by the C standard. It tests + // if the floating-point value, specified by the first argument, falls into + // any of data classes, specified by the second argument. + case Builtin::BI__builtin_isnan: { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); + mlir::Value V = buildScalarExpr(E->getArg(0)); + if (mlir::Value Result = tryUseTestFPKind(*this, BuiltinID, V)) + return RValue::get(Result); + mlir::Location Loc = getLoc(E->getBeginLoc()); + // FIXME: We should use builder.createZExt once createZExt is available. + return RValue::get(builder.createZExtOrBitCast( + Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcNan), + ConvertType(E->getType()))); + } + + case Builtin::BI__builtin_issignaling: { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); + mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Location Loc = getLoc(E->getBeginLoc()); + // FIXME: We should use builder.createZExt once createZExt is available. + return RValue::get(builder.createZExtOrBitCast( + Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcSNan), + ConvertType(E->getType()))); + } + + case Builtin::BI__builtin_isinf: { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); + mlir::Value V = buildScalarExpr(E->getArg(0)); + if (mlir::Value Result = tryUseTestFPKind(*this, BuiltinID, V)) + return RValue::get(Result); + mlir::Location Loc = getLoc(E->getBeginLoc()); + // FIXME: We should use builder.createZExt once createZExt is available. + return RValue::get(builder.createZExtOrBitCast( + Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcInf), + ConvertType(E->getType()))); + } + + case Builtin::BIfinite: + case Builtin::BI__finite: + case Builtin::BIfinitef: + case Builtin::BI__finitef: + case Builtin::BIfinitel: + case Builtin::BI__finitel: + case Builtin::BI__builtin_isfinite: { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); + mlir::Value V = buildScalarExpr(E->getArg(0)); + if (mlir::Value Result = tryUseTestFPKind(*this, BuiltinID, V)) + return RValue::get(Result); + mlir::Location Loc = getLoc(E->getBeginLoc()); + // FIXME: We should use builder.createZExt once createZExt is available. + return RValue::get(builder.createZExtOrBitCast( + Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcFinite), + ConvertType(E->getType()))); + } + + case Builtin::BI__builtin_isnormal: { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); + mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Location Loc = getLoc(E->getBeginLoc()); + // FIXME: We should use builder.createZExt once createZExt is available. + return RValue::get(builder.createZExtOrBitCast( + Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcNormal), + ConvertType(E->getType()))); + } + + case Builtin::BI__builtin_issubnormal: { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); + mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Location Loc = getLoc(E->getBeginLoc()); + // FIXME: We should use builder.createZExt once createZExt is available. + return RValue::get(builder.createZExtOrBitCast( + Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcSubnormal), + ConvertType(E->getType()))); + } + + case Builtin::BI__builtin_iszero: { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); + mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Location Loc = getLoc(E->getBeginLoc()); + // FIXME: We should use builder.createZExt once createZExt is available. + return RValue::get(builder.createZExtOrBitCast( + Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcZero), + ConvertType(E->getType()))); + } + + case Builtin::BI__builtin_isfpclass: { + Expr::EvalResult Result; + if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getASTContext())) + break; + + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); + mlir::Value V = buildScalarExpr(E->getArg(0)); + uint64_t Test = Result.Val.getInt().getLimitedValue(); + mlir::Location Loc = getLoc(E->getBeginLoc()); + + // FIXME: We should use builder.createZExt once createZExt is available. + return RValue::get(builder.createZExtOrBitCast( + Loc, builder.createIsFPClass(Loc, V, Test), ConvertType(E->getType()))); + } } // If this is an alias for a lib function (e.g. __builtin_sin), emit diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index 994fa357c864..873b2ef0fb00 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -25,6 +25,7 @@ namespace cir { class CIRGenFunction; class CIRGenModule; +class CIRGenBuilderTy; /// This class organizes various target-specific codegeneration issues, like /// target-specific attributes, builtins and so on. @@ -43,6 +44,15 @@ class TargetCIRGenInfo { return false; } + /// Performs a target specific test of a floating point value for things + /// like IsNaN, Infinity, ... Nullptr is returned if no implementation + /// exists. + virtual mlir::Value testFPKind(mlir::Value V, unsigned BuiltinID, + CIRGenBuilderTy &Builder, + CIRGenModule &CGM) const { + return {}; + } + /// Corrects the MLIR type for a given constraint and "usual" /// type. /// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 0af6065829c5..08313ac9eef6 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4117,6 +4117,33 @@ class CIRThrowOpLowering } }; +class CIRIsFPClassOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::IsFPClassOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto src = adaptor.getSrc(); + auto flags = adaptor.getFlags(); + auto retTy = rewriter.getI1Type(); + + auto loc = op->getLoc(); + + auto intrinsic = + rewriter.create(loc, retTy, src, flags); + // FIMXE: CIR now will convert cir::BoolType to i8 type unconditionally. + // Remove this conversion after we fix + // https://github.com/llvm/clangir/issues/480 + auto converted = rewriter.create( + loc, rewriter.getI8Type(), intrinsic->getResult(0)); + + rewriter.replaceOp(op, converted); + return mlir::success(); + } +}; + void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter, mlir::DataLayout &dataLayout) { @@ -4149,7 +4176,7 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, CIRFreeExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, CIRBaseClassAddrOpLowering, - CIRVTTAddrPointOpLowering + CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/test/CIR/CodeGen/builtin-isfpclass.c b/clang/test/CIR/CodeGen/builtin-isfpclass.c new file mode 100644 index 000000000000..d1e295203dfd --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-isfpclass.c @@ -0,0 +1,129 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +int finite(double); + +// CHECK: cir.func {{.*}}@test_is_finite +void test_is_finite(__fp16 *H, float F, double D, long double LD) { + volatile int res; + res = __builtin_isinf(*H); + // CHECK: cir.is_fp_class %{{.*}}, 516 : (!cir.f16) -> !cir.bool + + res = __builtin_isinf(F); + // CHECK: cir.is_fp_class %{{.*}}, 516 : (!cir.float) -> !cir.bool + + res = __builtin_isinf(D); + // CHECK: cir.is_fp_class %{{.*}}, 516 : (!cir.double) -> !cir.bool + + res = __builtin_isinf(LD); + // CHECK: cir.is_fp_class %{{.*}}, 516 : (!cir.long_double) -> !cir.bool + + res = __builtin_isfinite(*H); + // CHECK: cir.is_fp_class %{{.*}}, 504 : (!cir.f16) -> !cir.bool + res = __builtin_isfinite(F); + // CHECK: cir.is_fp_class %{{.*}}, 504 : (!cir.float) -> !cir.bool + res = finite(D); + // CHECK: cir.is_fp_class %{{.*}}, 504 : (!cir.double) -> !cir.bool + + res = __builtin_isnormal(*H); + // CHECK: cir.is_fp_class %{{.*}}, 264 : (!cir.f16) -> !cir.bool + res = __builtin_isnormal(F); + // CHECK: cir.is_fp_class %{{.*}}, 264 : (!cir.float) -> !cir.bool + + res = __builtin_issubnormal(F); + // CHECK: cir.is_fp_class %{{.*}}, 144 : (!cir.float) -> !cir.bool + res = __builtin_iszero(F); + // CHECK: cir.is_fp_class %{{.*}}, 96 : (!cir.float) -> !cir.bool + res = __builtin_issignaling(F); + // CHECK: cir.is_fp_class %{{.*}}, 1 : (!cir.float) -> !cir.bool +} + +_Bool check_isfpclass_finite(float x) { + return __builtin_isfpclass(x, 504 /*Finite*/); +} + +// CHECK: cir.func {{.*}}@check_isfpclass_finite +// CHECK: cir.is_fp_class %{{.*}}, 504 : (!cir.float) + +_Bool check_isfpclass_nan_f32(float x) { + return __builtin_isfpclass(x, 3 /*NaN*/); +} + +// CHECK: cir.func {{.*}}@check_isfpclass_nan_f32 +// CHECK: cir.is_fp_class %{{.*}}, 3 : (!cir.float) + + +_Bool check_isfpclass_snan_f64(double x) { + return __builtin_isfpclass(x, 1 /*SNaN*/); +} + +// CHECK: cir.func {{.*}}@check_isfpclass_snan_f64 +// CHECK: cir.is_fp_class %{{.*}}, 1 : (!cir.double) + + +_Bool check_isfpclass_zero_f16(_Float16 x) { + return __builtin_isfpclass(x, 96 /*Zero*/); +} + +// CHECK: cir.func {{.*}}@check_isfpclass_zero_f16 +// CHECK: cir.is_fp_class %{{.*}}, 96 : (!cir.f16) + +// Update when we support FP pragma in functions and can convert BoolType in prvalue to i1. + +// _Bool check_isfpclass_finite_strict(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfpclass(x, 504 /*Finite*/); +// } +// +// _Bool check_isfpclass_nan_f32_strict(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfpclass(x, 3 /*NaN*/); +// } +// +// _Bool check_isfpclass_snan_f64_strict(double x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfpclass(x, 1 /*NaN*/); +// } +// +// _Bool check_isfpclass_zero_f16_strict(_Float16 x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfpclass(x, 96 /*Zero*/); +// } +// +// _Bool check_isnan(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isnan(x); +// } +// +// _Bool check_isinf(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isinf(x); +// } +// +// _Bool check_isfinite(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfinite(x); +// } +// +// _Bool check_isnormal(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isnormal(x); +// } +// +// typedef float __attribute__((ext_vector_type(4))) float4; +// typedef double __attribute__((ext_vector_type(4))) double4; +// typedef int __attribute__((ext_vector_type(4))) int4; +// typedef long __attribute__((ext_vector_type(4))) long4; +// +// int4 check_isfpclass_nan_v4f32(float4 x) { +// return __builtin_isfpclass(x, 3 /*NaN*/); +// } +// +// int4 check_isfpclass_nan_strict_v4f32(float4 x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfpclass(x, 3 /*NaN*/); +// } +// +// long4 check_isfpclass_nan_v4f64(double4 x) { +// return __builtin_isfpclass(x, 3 /*NaN*/); +// } diff --git a/clang/test/CIR/Lowering/builtin-isfpclass.c b/clang/test/CIR/Lowering/builtin-isfpclass.c new file mode 100644 index 000000000000..630ded117ab3 --- /dev/null +++ b/clang/test/CIR/Lowering/builtin-isfpclass.c @@ -0,0 +1,125 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s + +int finite(double); + +// CHECK: define {{.*}}@test_is_finite +void test_is_finite(__fp16 *H, float F, double D, long double LD) { + volatile int res; + res = __builtin_isinf(*H); + // CHECK: call i1 @llvm.is.fpclass.f16(half %{{.*}}, i32 516) + res = __builtin_isinf(F); + // CHECK: call i1 @llvm.is.fpclass.f32(float %{{.*}}, i32 516) + res = __builtin_isinf(D); + // CHECK: call i1 @llvm.is.fpclass.f64(double %{{.*}}, i32 516) + res = __builtin_isinf(LD); + // CHECK: call i1 @llvm.is.fpclass.f80(x86_fp80 %{{.*}}, i32 516) + + res = __builtin_isfinite(*H); + // CHECK: call i1 @llvm.is.fpclass.f16(half %{{.*}}, i32 504) + res = __builtin_isfinite(F); + // CHECK: call i1 @llvm.is.fpclass.f32(float %{{.*}}, i32 504) + res = finite(D); + // CHECK: call i1 @llvm.is.fpclass.f64(double %{{.*}}, i32 504) + + res = __builtin_isnormal(*H); + // CHECK: call i1 @llvm.is.fpclass.f16(half %{{.*}}, i32 264) + res = __builtin_isnormal(F); + // CHECK: call i1 @llvm.is.fpclass.f32(float %{{.*}}, i32 264) + + res = __builtin_issubnormal(F); + // CHECK: call i1 @llvm.is.fpclass.f32(float %{{.*}}, i32 144) + res = __builtin_iszero(F); + // CHECK: call i1 @llvm.is.fpclass.f32(float %{{.*}}, i32 96) + res = __builtin_issignaling(F); + // CHECK: call i1 @llvm.is.fpclass.f32(float %{{.*}}, i32 1) +} + +_Bool check_isfpclass_finite(float x) { + return __builtin_isfpclass(x, 504 /*Finite*/); +} + +// CHECK: define {{.*}}@check_isfpclass_finite +// CHECK: call i1 @llvm.is.fpclass.f32(float %{{.*}}, i32 504) + +_Bool check_isfpclass_nan_f32(float x) { + return __builtin_isfpclass(x, 3 /*NaN*/); +} + +// CHECK: define {{.*}}@check_isfpclass_nan_f32 +// CHECK: call i1 @llvm.is.fpclass.f32(float %{{.*}}, i32 3) + +_Bool check_isfpclass_snan_f64(double x) { + return __builtin_isfpclass(x, 1 /*SNaN*/); +} + +// CHECK: define {{.*}}@check_isfpclass_snan_f64 +// CHECK: call i1 @llvm.is.fpclass.f64(double %{{.*}}, i32 1) + + +_Bool check_isfpclass_zero_f16(_Float16 x) { + return __builtin_isfpclass(x, 96 /*Zero*/); +} + +// CHECK: define {{.*}}@check_isfpclass_zero_f16 +// CHECK: call i1 @llvm.is.fpclass.f16(half %{{.*}}, i32 96) + +// Update when we support FP pragma in functions. + +// _Bool check_isfpclass_finite_strict(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfpclass(x, 504 /*Finite*/); +// } +// +// _Bool check_isfpclass_nan_f32_strict(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfpclass(x, 3 /*NaN*/); +// } +// +// _Bool check_isfpclass_snan_f64_strict(double x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfpclass(x, 1 /*NaN*/); +// } +// +// _Bool check_isfpclass_zero_f16_strict(_Float16 x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfpclass(x, 96 /*Zero*/); +// } +// +// _Bool check_isnan(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isnan(x); +// } +// +// _Bool check_isinf(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isinf(x); +// } +// +// _Bool check_isfinite(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfinite(x); +// } +// +// _Bool check_isnormal(float x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isnormal(x); +// } +// +// typedef float __attribute__((ext_vector_type(4))) float4; +// typedef double __attribute__((ext_vector_type(4))) double4; +// typedef int __attribute__((ext_vector_type(4))) int4; +// typedef long __attribute__((ext_vector_type(4))) long4; +// +// int4 check_isfpclass_nan_v4f32(float4 x) { +// return __builtin_isfpclass(x, 3 /*NaN*/); +// } +// +// int4 check_isfpclass_nan_strict_v4f32(float4 x) { +// #pragma STDC FENV_ACCESS ON +// return __builtin_isfpclass(x, 3 /*NaN*/); +// } +// +// long4 check_isfpclass_nan_v4f64(double4 x) { +// return __builtin_isfpclass(x, 3 /*NaN*/); +// } From 8c8a71451789bfdfb3a49b51b85124b017401503 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 21 Oct 2024 12:06:48 -0700 Subject: [PATCH 1974/2301] [CIR][NFC] Move callconv tests around --- .../AArch64/basic.cpp} | 0 .../x86_64/basic.cpp} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename clang/test/CIR/{Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp => CallConvLowering/AArch64/basic.cpp} (100%) rename clang/test/CIR/{Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp => CallConvLowering/x86_64/basic.cpp} (100%) diff --git a/clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp b/clang/test/CIR/CallConvLowering/AArch64/basic.cpp similarity index 100% rename from clang/test/CIR/Transforms/Target/aarch64/aarch64-call-conv-lowering-pass.cpp rename to clang/test/CIR/CallConvLowering/AArch64/basic.cpp diff --git a/clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp b/clang/test/CIR/CallConvLowering/x86_64/basic.cpp similarity index 100% rename from clang/test/CIR/Transforms/Target/x86_64/x86_64-call-conv-lowering-pass.cpp rename to clang/test/CIR/CallConvLowering/x86_64/basic.cpp From 756bfc251f824ae4e3285450e99b2e1e5cb98c9a Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Mon, 21 Oct 2024 13:34:42 -0700 Subject: [PATCH 1975/2301] [CIR][Asm] Implement parser for cir.func annotations (#981) --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 488 ++++++++++-------- clang/test/CIR/IR/annotations.cir | 11 +- 3 files changed, 277 insertions(+), 224 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 292d24315518..8b18cae1da5b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -994,7 +994,7 @@ def VisibilityAttr : CIR_Attr<"Visibility", "visibility"> { def ExtraFuncAttr : CIR_Attr<"ExtraFuncAttributes", "extra"> { let summary = "Represents aggregated attributes for a function"; let description = [{ - This is a wrapper of dictionary attrbiute that contains extra attributes of + This is a wrapper of attribute dictionary that contains extra attributes of a function. }]; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 5c412597c624..d22572205cf5 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -16,6 +16,7 @@ #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Interfaces/CIRLoopOpInterface.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" #include #include @@ -41,7 +42,6 @@ #include "mlir/Support/LogicalResult.h" using namespace mlir; -using namespace mlir::cir; #include "clang/CIR/Dialect/IR/CIROpsEnums.cpp.inc" #include "clang/CIR/Dialect/IR/CIROpsStructs.cpp.inc" @@ -58,7 +58,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { using OpAsmDialectInterface::OpAsmDialectInterface; AliasResult getAlias(Type type, raw_ostream &os) const final { - if (auto structType = dyn_cast(type)) { + if (auto structType = dyn_cast(type)) { StringAttr nameAttr = structType.getName(); if (!nameAttr) os << "ty_anon_" << structType.getKindAsStr(); @@ -66,7 +66,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << "ty_" << nameAttr.getValue(); return AliasResult::OverridableAlias; } - if (auto intType = dyn_cast(type)) { + if (auto intType = dyn_cast(type)) { // We only provide alias for standard integer types (i.e. integer types // whose width is divisible by 8). if (intType.getWidth() % 8 != 0) @@ -74,7 +74,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << intType.getAlias(); return AliasResult::OverridableAlias; } - if (auto voidType = dyn_cast(type)) { + if (auto voidType = dyn_cast(type)) { os << voidType.getAlias(); return AliasResult::OverridableAlias; } @@ -114,7 +114,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { /// Dialect initialization, the instance will be owned by the context. This is /// the point of registration of types and operations for the dialect. -void cir::CIRDialect::initialize() { +void mlir::cir::CIRDialect::initialize() { registerTypes(); registerAttributes(); addOperations< @@ -124,10 +124,10 @@ void cir::CIRDialect::initialize() { addInterfaces(); } -Operation *cir::CIRDialect::materializeConstant(mlir::OpBuilder &builder, - mlir::Attribute value, - mlir::Type type, - mlir::Location loc) { +Operation *mlir::cir::CIRDialect::materializeConstant(mlir::OpBuilder &builder, + mlir::Attribute value, + mlir::Type type, + mlir::Location loc) { return builder.create( loc, type, mlir::cast(value)); } @@ -152,9 +152,13 @@ namespace { template struct EnumTraits {}; #define REGISTER_ENUM_TYPE(Ty) \ - template <> struct EnumTraits { \ - static StringRef stringify(Ty value) { return stringify##Ty(value); } \ - static unsigned getMaxEnumVal() { return getMaxEnumValFor##Ty(); } \ + template <> struct EnumTraits { \ + static StringRef stringify(mlir::cir::Ty value) { \ + return stringify##Ty(value); \ + } \ + static unsigned getMaxEnumVal() { \ + return mlir::cir::getMaxEnumValFor##Ty(); \ + } \ } #define REGISTER_ENUM_TYPE_WITH_NS(NS, Ty) \ template <> struct EnumTraits { \ @@ -166,7 +170,7 @@ template struct EnumTraits {}; REGISTER_ENUM_TYPE(GlobalLinkageKind); REGISTER_ENUM_TYPE(CallingConv); -REGISTER_ENUM_TYPE_WITH_NS(sob, SignedOverflowBehavior); +REGISTER_ENUM_TYPE_WITH_NS(mlir::cir::sob, SignedOverflowBehavior); } // namespace /// Parse an enum from the keyword, or default to the provided default value. @@ -219,7 +223,7 @@ LogicalResult ensureRegionTerm(OpAsmParser &parser, Region ®ion, return parser.emitError(errLoc, "empty region must not omit terminator"); // Terminator was omited correctly: recreate it. - region.back().push_back(builder.create(eLoc)); + region.back().push_back(builder.create(eLoc)); return success(); } @@ -227,7 +231,7 @@ LogicalResult ensureRegionTerm(OpAsmParser &parser, Region ®ion, bool omitRegionTerm(mlir::Region &r) { const auto singleNonEmptyBlock = r.hasOneBlock() && !r.back().empty(); const auto yieldsNothing = [&r]() { - YieldOp y = dyn_cast(r.back().getTerminator()); + auto y = dyn_cast(r.back().getTerminator()); return y && y.getArgs().empty(); }; return singleNonEmptyBlock && yieldsNothing(); @@ -236,10 +240,10 @@ bool omitRegionTerm(mlir::Region &r) { void printVisibilityAttr(OpAsmPrinter &printer, mlir::cir::VisibilityAttr &visibility) { switch (visibility.getValue()) { - case VisibilityKind::Hidden: + case mlir::cir::VisibilityKind::Hidden: printer << "hidden"; break; - case VisibilityKind::Protected: + case mlir::cir::VisibilityKind::Protected: printer << "protected"; break; default: @@ -249,14 +253,14 @@ void printVisibilityAttr(OpAsmPrinter &printer, void parseVisibilityAttr(OpAsmParser &parser, mlir::cir::VisibilityAttr &visibility) { - VisibilityKind visibilityKind; + mlir::cir::VisibilityKind visibilityKind; if (parser.parseOptionalKeyword("hidden").succeeded()) { - visibilityKind = VisibilityKind::Hidden; + visibilityKind = mlir::cir::VisibilityKind::Hidden; } else if (parser.parseOptionalKeyword("protected").succeeded()) { - visibilityKind = VisibilityKind::Protected; + visibilityKind = mlir::cir::VisibilityKind::Protected; } else { - visibilityKind = VisibilityKind::Default; + visibilityKind = mlir::cir::VisibilityKind::Default; } visibility = @@ -302,10 +306,11 @@ static void printOmitDefaultVisibility(mlir::OpAsmPrinter &printer, // AllocaOp //===----------------------------------------------------------------------===// -void AllocaOp::build(::mlir::OpBuilder &odsBuilder, - ::mlir::OperationState &odsState, ::mlir::Type addr, - ::mlir::Type allocaType, ::llvm::StringRef name, - ::mlir::IntegerAttr alignment) { +void mlir::cir::AllocaOp::build(::mlir::OpBuilder &odsBuilder, + ::mlir::OperationState &odsState, + ::mlir::Type addr, ::mlir::Type allocaType, + ::llvm::StringRef name, + ::mlir::IntegerAttr alignment) { odsState.addAttribute(getAllocaTypeAttrName(odsState.name), ::mlir::TypeAttr::get(allocaType)); odsState.addAttribute(getNameAttrName(odsState.name), @@ -320,7 +325,7 @@ void AllocaOp::build(::mlir::OpBuilder &odsBuilder, // BreakOp //===----------------------------------------------------------------------===// -LogicalResult BreakOp::verify() { +LogicalResult mlir::cir::BreakOp::verify() { if (!getOperation()->getParentOfType() && !getOperation()->getParentOfType()) return emitOpError("must be within a loop or switch"); @@ -334,7 +339,7 @@ LogicalResult BreakOp::verify() { //===---------------------------------- // BranchOpTerminatorInterface Methods -void ConditionOp::getSuccessorRegions( +void mlir::cir::ConditionOp::getSuccessorRegions( ArrayRef operands, SmallVectorImpl ®ions) { // TODO(cir): The condition value may be folded to a constant, narrowing // down its list of possible successors. @@ -352,12 +357,12 @@ void ConditionOp::getSuccessorRegions( } MutableOperandRange -ConditionOp::getMutableSuccessorOperands(RegionBranchPoint point) { +mlir::cir::ConditionOp::getMutableSuccessorOperands(RegionBranchPoint point) { // No values are yielded to the successor region. return MutableOperandRange(getOperation(), 0, 0); } -LogicalResult ConditionOp::verify() { +LogicalResult mlir::cir::ConditionOp::verify() { if (!isa(getOperation()->getParentOp())) return emitOpError("condition must be within a conditional region"); return success(); @@ -369,19 +374,19 @@ LogicalResult ConditionOp::verify() { static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, mlir::Attribute attrType) { - if (isa(attrType)) { + if (isa(attrType)) { if (::mlir::isa<::mlir::cir::PointerType>(opType)) return success(); return op->emitOpError("nullptr expects pointer type"); } - if (isa(attrType)) { + if (isa(attrType)) { // More detailed type verifications are already done in // DataMemberAttr::verify. Don't need to repeat here. return success(); } - if (isa(attrType)) { + if (isa(attrType)) { if (::mlir::isa<::mlir::cir::StructType, ::mlir::cir::ArrayType, ::mlir::cir::ComplexType>(opType)) return success(); @@ -427,20 +432,22 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, << cast(attrType).getType() << " not supported"; } -LogicalResult ConstantOp::verify() { +LogicalResult mlir::cir::ConstantOp::verify() { // ODS already generates checks to make sure the result type is valid. We just // need to additionally check that the value's attribute type is consistent // with the result type. return checkConstantTypes(getOperation(), getType(), getValue()); } -OpFoldResult ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } +OpFoldResult mlir::cir::ConstantOp::fold(FoldAdaptor /*adaptor*/) { + return getValue(); +} //===----------------------------------------------------------------------===// // ContinueOp //===----------------------------------------------------------------------===// -LogicalResult ContinueOp::verify() { +LogicalResult mlir::cir::ContinueOp::verify() { if (!this->getOperation()->getParentOfType()) return emitOpError("must be within a loop"); return success(); @@ -450,7 +457,7 @@ LogicalResult ContinueOp::verify() { // CastOp //===----------------------------------------------------------------------===// -LogicalResult CastOp::verify() { +LogicalResult mlir::cir::CastOp::verify() { auto resType = getResult().getType(); auto srcType = getSrc().getType(); @@ -714,14 +721,14 @@ bool isIntOrBoolCast(mlir::cir::CastOp op) { kind == mlir::cir::CastKind::integral; } -Value tryFoldCastChain(CastOp op) { - CastOp head = op, tail = op; +Value tryFoldCastChain(mlir::cir::CastOp op) { + mlir::cir::CastOp head = op, tail = op; while (op) { if (!isIntOrBoolCast(op)) break; head = op; - op = dyn_cast_or_null(head.getSrc().getDefiningOp()); + op = dyn_cast_or_null(head.getSrc().getDefiningOp()); } if (head == tail) @@ -743,7 +750,7 @@ Value tryFoldCastChain(CastOp op) { return {}; } -OpFoldResult CastOp::fold(FoldAdaptor adaptor) { +OpFoldResult mlir::cir::CastOp::fold(FoldAdaptor adaptor) { if (getSrc().getType() == getResult().getType()) { switch (getKind()) { case mlir::cir::CastKind::integral: { @@ -769,7 +776,7 @@ OpFoldResult CastOp::fold(FoldAdaptor adaptor) { } static bool isBoolNot(mlir::cir::UnaryOp op) { - return isa(op.getInput().getType()) && + return isa(op.getInput().getType()) && op.getKind() == mlir::cir::UnaryOpKind::Not; } @@ -782,7 +789,7 @@ static bool isBoolNot(mlir::cir::UnaryOp op) { // ``` // // and the argument of the first one (%0) will be used instead. -OpFoldResult UnaryOp::fold(FoldAdaptor adaptor) { +OpFoldResult mlir::cir::UnaryOp::fold(FoldAdaptor adaptor) { if (isBoolNot(*this)) if (auto previous = dyn_cast_or_null(getInput().getDefiningOp())) if (isBoolNot(previous)) @@ -795,7 +802,7 @@ OpFoldResult UnaryOp::fold(FoldAdaptor adaptor) { // DynamicCastOp //===----------------------------------------------------------------------===// -LogicalResult DynamicCastOp::verify() { +LogicalResult mlir::cir::DynamicCastOp::verify() { auto resultPointeeTy = mlir::cast(getType()).getPointee(); if (!mlir::isa(resultPointeeTy)) @@ -809,7 +816,7 @@ LogicalResult DynamicCastOp::verify() { // ComplexCreateOp //===----------------------------------------------------------------------===// -LogicalResult ComplexCreateOp::verify() { +LogicalResult mlir::cir::ComplexCreateOp::verify() { if (getType().getElementTy() != getReal().getType()) { emitOpError() << "operand type of cir.complex.create does not match its result type"; @@ -819,7 +826,7 @@ LogicalResult ComplexCreateOp::verify() { return success(); } -OpFoldResult ComplexCreateOp::fold(FoldAdaptor adaptor) { +OpFoldResult mlir::cir::ComplexCreateOp::fold(FoldAdaptor adaptor) { auto real = adaptor.getReal(); auto imag = adaptor.getImag(); @@ -843,7 +850,7 @@ OpFoldResult ComplexCreateOp::fold(FoldAdaptor adaptor) { // ComplexRealOp and ComplexImagOp //===----------------------------------------------------------------------===// -LogicalResult ComplexRealOp::verify() { +LogicalResult mlir::cir::ComplexRealOp::verify() { if (getType() != getOperand().getType().getElementTy()) { emitOpError() << "cir.complex.real result type does not match operand type"; return failure(); @@ -851,7 +858,7 @@ LogicalResult ComplexRealOp::verify() { return success(); } -OpFoldResult ComplexRealOp::fold(FoldAdaptor adaptor) { +OpFoldResult mlir::cir::ComplexRealOp::fold(FoldAdaptor adaptor) { auto input = mlir::cast_if_present(adaptor.getOperand()); if (input) @@ -859,7 +866,7 @@ OpFoldResult ComplexRealOp::fold(FoldAdaptor adaptor) { return nullptr; } -LogicalResult ComplexImagOp::verify() { +LogicalResult mlir::cir::ComplexImagOp::verify() { if (getType() != getOperand().getType().getElementTy()) { emitOpError() << "cir.complex.imag result type does not match operand type"; return failure(); @@ -867,7 +874,7 @@ LogicalResult ComplexImagOp::verify() { return success(); } -OpFoldResult ComplexImagOp::fold(FoldAdaptor adaptor) { +OpFoldResult mlir::cir::ComplexImagOp::fold(FoldAdaptor adaptor) { auto input = mlir::cast_if_present(adaptor.getOperand()); if (input) @@ -879,7 +886,7 @@ OpFoldResult ComplexImagOp::fold(FoldAdaptor adaptor) { // ComplexRealPtrOp and ComplexImagPtrOp //===----------------------------------------------------------------------===// -LogicalResult ComplexRealPtrOp::verify() { +LogicalResult mlir::cir::ComplexRealPtrOp::verify() { auto resultPointeeTy = mlir::cast(getType()).getPointee(); auto operandPtrTy = @@ -896,7 +903,7 @@ LogicalResult ComplexRealPtrOp::verify() { return success(); } -LogicalResult ComplexImagPtrOp::verify() { +LogicalResult mlir::cir::ComplexImagPtrOp::verify() { auto resultPointeeTy = mlir::cast(getType()).getPointee(); auto operandPtrTy = @@ -917,7 +924,7 @@ LogicalResult ComplexImagPtrOp::verify() { // VecCreateOp //===----------------------------------------------------------------------===// -LogicalResult VecCreateOp::verify() { +LogicalResult mlir::cir::VecCreateOp::verify() { // Verify that the number of arguments matches the number of elements in the // vector, and that the type of all the arguments matches the type of the // elements in the vector. @@ -942,7 +949,7 @@ LogicalResult VecCreateOp::verify() { // VecTernaryOp //===----------------------------------------------------------------------===// -LogicalResult VecTernaryOp::verify() { +LogicalResult mlir::cir::VecTernaryOp::verify() { // Verify that the condition operand has the same number of elements as the // other operands. (The automatic verification already checked that all // operands are vector types and that the second and third operands are the @@ -960,7 +967,7 @@ LogicalResult VecTernaryOp::verify() { // VecShuffle //===----------------------------------------------------------------------===// -LogicalResult VecShuffleOp::verify() { +LogicalResult mlir::cir::VecShuffleOp::verify() { // The number of elements in the indices array must match the number of // elements in the result type. if (getIndices().size() != getResult().getType().getSize()) { @@ -987,7 +994,7 @@ LogicalResult VecShuffleOp::verify() { // VecShuffleDynamic //===----------------------------------------------------------------------===// -LogicalResult VecShuffleDynamicOp::verify() { +LogicalResult mlir::cir::VecShuffleDynamicOp::verify() { // The number of elements in the two input vectors must match. if (getVec().getType().getSize() != mlir::cast(getIndices().getType()).getSize()) { @@ -1001,8 +1008,8 @@ LogicalResult VecShuffleDynamicOp::verify() { // ReturnOp //===----------------------------------------------------------------------===// -static mlir::LogicalResult checkReturnAndFunction(ReturnOp op, - cir::FuncOp function) { +static mlir::LogicalResult checkReturnAndFunction(mlir::cir::ReturnOp op, + mlir::cir::FuncOp function) { // ReturnOps currently only have a single optional operand. if (op.getNumOperands() > 1) return op.emitOpError() << "expects at most 1 return operand"; @@ -1019,7 +1026,7 @@ static mlir::LogicalResult checkReturnAndFunction(ReturnOp op, return mlir::success(); } -mlir::LogicalResult ReturnOp::verify() { +mlir::LogicalResult mlir::cir::ReturnOp::verify() { // Returns can be present in multiple different scopes, get the // wrapping function and start from there. auto *fnOp = getOperation()->getParentOp(); @@ -1037,7 +1044,7 @@ mlir::LogicalResult ReturnOp::verify() { // ThrowOp //===----------------------------------------------------------------------===// -mlir::LogicalResult ThrowOp::verify() { +mlir::LogicalResult mlir::cir::ThrowOp::verify() { // For the no-rethrow version, it must have at least the exception pointer. if (rethrows()) return success(); @@ -1055,7 +1062,8 @@ mlir::LogicalResult ThrowOp::verify() { // IfOp //===----------------------------------------------------------------------===// -ParseResult cir::IfOp::parse(OpAsmParser &parser, OperationState &result) { +ParseResult mlir::cir::IfOp::parse(OpAsmParser &parser, + OperationState &result) { // Create the regions for 'then'. result.regions.reserve(2); Region *thenRegion = result.addRegion(); @@ -1092,7 +1100,7 @@ ParseResult cir::IfOp::parse(OpAsmParser &parser, OperationState &result) { return success(); } -void cir::IfOp::print(OpAsmPrinter &p) { +void mlir::cir::IfOp::print(OpAsmPrinter &p) { p << " " << getCondition() << " "; auto &thenRegion = this->getThenRegion(); p.printRegion(thenRegion, @@ -1119,8 +1127,8 @@ void mlir::cir::buildTerminatedBody(OpBuilder &builder, Location loc) {} /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void IfOp::getSuccessorRegions(mlir::RegionBranchPoint point, - SmallVectorImpl ®ions) { +void mlir::cir::IfOp::getSuccessorRegions( + mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // The `then` and the `else` region branch back to the parent operation. if (!point.isParent()) { regions.push_back(RegionSuccessor()); @@ -1151,10 +1159,10 @@ void IfOp::getSuccessorRegions(mlir::RegionBranchPoint point, return; } -void IfOp::build(OpBuilder &builder, OperationState &result, Value cond, - bool withElseRegion, - function_ref thenBuilder, - function_ref elseBuilder) { +void mlir::cir::IfOp::build( + OpBuilder &builder, OperationState &result, Value cond, bool withElseRegion, + function_ref thenBuilder, + function_ref elseBuilder) { assert(thenBuilder && "the builder callback for 'then' must be present"); result.addOperands(cond); @@ -1172,7 +1180,7 @@ void IfOp::build(OpBuilder &builder, OperationState &result, Value cond, elseBuilder(builder, result.location); } -LogicalResult IfOp::verify() { return success(); } +LogicalResult mlir::cir::IfOp::verify() { return success(); } //===----------------------------------------------------------------------===// // ScopeOp @@ -1183,8 +1191,8 @@ LogicalResult IfOp::verify() { return success(); } /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void ScopeOp::getSuccessorRegions(mlir::RegionBranchPoint point, - SmallVectorImpl ®ions) { +void mlir::cir::ScopeOp::getSuccessorRegions( + mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // The only region always branch back to the parent operation. if (!point.isParent()) { regions.push_back(RegionSuccessor(getODSResults(0))); @@ -1195,7 +1203,7 @@ void ScopeOp::getSuccessorRegions(mlir::RegionBranchPoint point, regions.push_back(RegionSuccessor(&getScopeRegion())); } -void ScopeOp::build( +void mlir::cir::ScopeOp::build( OpBuilder &builder, OperationState &result, function_ref scopeBuilder) { assert(scopeBuilder && "the builder callback for 'then' must be present"); @@ -1211,8 +1219,9 @@ void ScopeOp::build( result.addTypes(TypeRange{yieldTy}); } -void ScopeOp::build(OpBuilder &builder, OperationState &result, - function_ref scopeBuilder) { +void mlir::cir::ScopeOp::build( + OpBuilder &builder, OperationState &result, + function_ref scopeBuilder) { assert(scopeBuilder && "the builder callback for 'then' must be present"); OpBuilder::InsertionGuard guard(builder); Region *scopeRegion = result.addRegion(); @@ -1220,13 +1229,13 @@ void ScopeOp::build(OpBuilder &builder, OperationState &result, scopeBuilder(builder, result.location); } -LogicalResult ScopeOp::verify() { return success(); } +LogicalResult mlir::cir::ScopeOp::verify() { return success(); } //===----------------------------------------------------------------------===// // TryOp //===----------------------------------------------------------------------===// -void TryOp::build( +void mlir::cir::TryOp::build( OpBuilder &builder, OperationState &result, function_ref tryBodyBuilder, function_ref catchBuilder) { @@ -1243,29 +1252,29 @@ void TryOp::build( catchBuilder(builder, result.location, result); } -mlir::Region *TryOp::getCatchLastRegion() { +mlir::Region *mlir::cir::TryOp::getCatchLastRegion() { unsigned numCatchRegions = getCatchRegions().size(); assert(numCatchRegions && "expected at least one region"); auto &lastRegion = getCatchRegions()[numCatchRegions - 1]; return &lastRegion; } -mlir::Block *TryOp::getCatchUnwindEntryBlock() { +mlir::Block *mlir::cir::TryOp::getCatchUnwindEntryBlock() { return &getCatchLastRegion()->getBlocks().front(); } -mlir::Block *TryOp::getCatchAllEntryBlock() { +mlir::Block *mlir::cir::TryOp::getCatchAllEntryBlock() { return &getCatchLastRegion()->getBlocks().front(); } -bool TryOp::isCatchAllOnly() { +bool mlir::cir::TryOp::isCatchAllOnly() { mlir::ArrayAttr catchAttrList = getCatchTypesAttr(); return catchAttrList.size() == 1 && isa(catchAttrList[0]); } -void TryOp::getSuccessorRegions(mlir::RegionBranchPoint point, - SmallVectorImpl ®ions) { +void mlir::cir::TryOp::getSuccessorRegions( + mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // If any index all the underlying regions branch back to the parent // operation. if (!point.isParent()) { @@ -1283,7 +1292,7 @@ void TryOp::getSuccessorRegions(mlir::RegionBranchPoint point, regions.push_back(RegionSuccessor(&r)); } -void printCatchRegions(OpAsmPrinter &p, TryOp op, +void printCatchRegions(OpAsmPrinter &p, mlir::cir::TryOp op, mlir::MutableArrayRef<::mlir::Region> regions, mlir::ArrayAttr catchList) { @@ -1384,8 +1393,8 @@ ParseResult parseCatchRegions( /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void TernaryOp::getSuccessorRegions(mlir::RegionBranchPoint point, - SmallVectorImpl ®ions) { +void mlir::cir::TernaryOp::getSuccessorRegions( + mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // The `true` and the `false` region branch back to the parent operation. if (!point.isParent()) { regions.push_back(RegionSuccessor(this->getODSResults(0))); @@ -1403,9 +1412,10 @@ void TernaryOp::getSuccessorRegions(mlir::RegionBranchPoint point, return; } -void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, - function_ref trueBuilder, - function_ref falseBuilder) { +void mlir::cir::TernaryOp::build( + OpBuilder &builder, OperationState &result, Value cond, + function_ref trueBuilder, + function_ref falseBuilder) { result.addOperands(cond); OpBuilder::InsertionGuard guard(builder); Region *trueRegion = result.addRegion(); @@ -1426,7 +1436,7 @@ void TernaryOp::build(OpBuilder &builder, OperationState &result, Value cond, // SelectOp //===----------------------------------------------------------------------===// -OpFoldResult SelectOp::fold(FoldAdaptor adaptor) { +OpFoldResult mlir::cir::SelectOp::fold(FoldAdaptor adaptor) { auto condition = adaptor.getCondition(); if (condition) { auto conditionValue = mlir::cast(condition).getValue(); @@ -1448,24 +1458,28 @@ OpFoldResult SelectOp::fold(FoldAdaptor adaptor) { // BrOp //===----------------------------------------------------------------------===// -mlir::SuccessorOperands BrOp::getSuccessorOperands(unsigned index) { +mlir::SuccessorOperands mlir::cir::BrOp::getSuccessorOperands(unsigned index) { assert(index == 0 && "invalid successor index"); return mlir::SuccessorOperands(getDestOperandsMutable()); } -Block *BrOp::getSuccessorForOperands(ArrayRef) { return getDest(); } +Block *mlir::cir::BrOp::getSuccessorForOperands(ArrayRef) { + return getDest(); +} //===----------------------------------------------------------------------===// // BrCondOp //===----------------------------------------------------------------------===// -mlir::SuccessorOperands BrCondOp::getSuccessorOperands(unsigned index) { +mlir::SuccessorOperands +mlir::cir::BrCondOp::getSuccessorOperands(unsigned index) { assert(index < getNumSuccessors() && "invalid successor index"); return SuccessorOperands(index == 0 ? getDestOperandsTrueMutable() : getDestOperandsFalseMutable()); } -Block *BrCondOp::getSuccessorForOperands(ArrayRef operands) { +Block * +mlir::cir::BrCondOp::getSuccessorForOperands(ArrayRef operands) { if (IntegerAttr condAttr = dyn_cast_if_present(operands.front())) return condAttr.getValue().isOne() ? getDestTrue() : getDestFalse(); return nullptr; @@ -1556,7 +1570,7 @@ parseSwitchOp(OpAsmParser &parser, mlir::ArrayAttr caseValueList; switch (kindAttr.getValue()) { - case cir::CaseOpKind::Equal: { + case mlir::cir::CaseOpKind::Equal: { if (parser.parseComma().failed()) return mlir::failure(); int64_t val = 0; @@ -1565,8 +1579,8 @@ parseSwitchOp(OpAsmParser &parser, caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(intCondType, val)); break; } - case cir::CaseOpKind::Range: - case cir::CaseOpKind::Anyof: { + case mlir::cir::CaseOpKind::Range: + case mlir::cir::CaseOpKind::Anyof: { if (parser.parseComma().failed()) return mlir::failure(); if (parser.parseLSquare().failed()) @@ -1584,10 +1598,10 @@ parseSwitchOp(OpAsmParser &parser, return mlir::failure(); break; } - case cir::CaseOpKind::Default: { + case mlir::cir::CaseOpKind::Default: { if (parser.parseRParen().failed()) return parser.emitError(parser.getCurrentLocation(), "expected ')'"); - cases.push_back(cir::CaseAttr::get( + cases.push_back(mlir::cir::CaseAttr::get( parser.getContext(), parser.getBuilder().getArrayAttr({}), kindAttr)); return parseAndCheckRegion(); } @@ -1595,7 +1609,7 @@ parseSwitchOp(OpAsmParser &parser, caseValueList = parser.getBuilder().getArrayAttr(caseEltValueListAttr); cases.push_back( - cir::CaseAttr::get(parser.getContext(), caseValueList, kindAttr)); + mlir::cir::CaseAttr::get(parser.getContext(), caseValueList, kindAttr)); if (succeeded(parser.parseOptionalColon())) { Type caseIntTy; if (parser.parseType(caseIntTy).failed()) @@ -1632,7 +1646,7 @@ parseSwitchOp(OpAsmParser &parser, return ::mlir::success(); } -void printSwitchOp(OpAsmPrinter &p, SwitchOp op, +void printSwitchOp(OpAsmPrinter &p, mlir::cir::SwitchOp op, mlir::MutableArrayRef<::mlir::Region> regions, mlir::ArrayAttr casesAttr, mlir::Value condition, mlir::Type condType) { @@ -1650,10 +1664,12 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, for (auto &r : regions) { p << "case ("; - auto attr = cast(casesAttr[idx]); + auto attr = cast(casesAttr[idx]); auto kind = attr.getKind().getValue(); - assert((kind == CaseOpKind::Default || kind == CaseOpKind::Equal || - kind == CaseOpKind::Anyof || kind == CaseOpKind::Range) && + assert((kind == mlir::cir::CaseOpKind::Default || + kind == mlir::cir::CaseOpKind::Equal || + kind == mlir::cir::CaseOpKind::Anyof || + kind == mlir::cir::CaseOpKind::Range) && "unknown case"); // Case kind @@ -1661,22 +1677,22 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, // Case value switch (kind) { - case cir::CaseOpKind::Equal: { + case mlir::cir::CaseOpKind::Equal: { p << ", "; - auto intAttr = cast(attr.getValue()[0]); - auto intAttrTy = cast(intAttr.getType()); + auto intAttr = cast(attr.getValue()[0]); + auto intAttrTy = cast(intAttr.getType()); (intAttrTy.isSigned() ? p << intAttr.getSInt() : p << intAttr.getUInt()); break; } - case cir::CaseOpKind::Range: + case mlir::cir::CaseOpKind::Range: assert(attr.getValue().size() == 2 && "range must have two values"); // The print format of the range is the same as anyof LLVM_FALLTHROUGH; - case cir::CaseOpKind::Anyof: { + case mlir::cir::CaseOpKind::Anyof: { p << ", ["; llvm::interleaveComma(attr.getValue(), p, [&](const Attribute &a) { - auto intAttr = cast(a); - auto intAttrTy = cast(intAttr.getType()); + auto intAttr = cast(a); + auto intAttrTy = cast(intAttr.getType()); (intAttrTy.isSigned() ? p << intAttr.getSInt() : p << intAttr.getUInt()); }); @@ -1686,7 +1702,7 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, p.printType(typedAttr.getType()); break; } - case cir::CaseOpKind::Default: + case mlir::cir::CaseOpKind::Default: break; } @@ -1706,8 +1722,8 @@ void printSwitchOp(OpAsmPrinter &p, SwitchOp op, /// during the flow of control. `operands` is a set of optional attributes /// that correspond to a constant value for each operand, or null if that /// operand is not a constant. -void SwitchOp::getSuccessorRegions(mlir::RegionBranchPoint point, - SmallVectorImpl ®ions) { +void mlir::cir::SwitchOp::getSuccessorRegions( + mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // If any index all the underlying regions branch back to the parent // operation. if (!point.isParent()) { @@ -1735,13 +1751,13 @@ void SwitchOp::getSuccessorRegions(mlir::RegionBranchPoint point, regions.push_back(RegionSuccessor(&r)); } -LogicalResult SwitchOp::verify() { +LogicalResult mlir::cir::SwitchOp::verify() { if (getCases().has_value() && getCases()->size() != getNumRegions()) return emitOpError("number of cases attributes and regions must match"); return success(); } -void SwitchOp::build( +void mlir::cir::SwitchOp::build( OpBuilder &builder, OperationState &result, Value cond, function_ref switchBuilder) { assert(switchBuilder && "the builder callback for regions must be present"); @@ -1754,11 +1770,12 @@ void SwitchOp::build( // SwitchFlatOp //===----------------------------------------------------------------------===// -void SwitchFlatOp::build(OpBuilder &builder, OperationState &result, - Value value, Block *defaultDestination, - ValueRange defaultOperands, ArrayRef caseValues, - BlockRange caseDestinations, - ArrayRef caseOperands) { +void mlir::cir::SwitchFlatOp::build(OpBuilder &builder, OperationState &result, + Value value, Block *defaultDestination, + ValueRange defaultOperands, + ArrayRef caseValues, + BlockRange caseDestinations, + ArrayRef caseOperands) { std::vector caseValuesAttrs; for (auto &val : caseValues) { @@ -1788,7 +1805,7 @@ static ParseResult parseSwitchFlatOpCases( if (failed(parser.parseInteger(value))) return failure(); - values.push_back(IntAttr::get(flagType, value)); + values.push_back(mlir::cir::IntAttr::get(flagType, value)); Block *destination; SmallVector operands; @@ -1814,7 +1831,7 @@ static ParseResult parseSwitchFlatOpCases( return parser.parseRSquare(); } -static void printSwitchFlatOpCases(OpAsmPrinter &p, SwitchFlatOp op, +static void printSwitchFlatOpCases(OpAsmPrinter &p, mlir::cir::SwitchFlatOp op, Type flagType, mlir::ArrayAttr caseValues, SuccessorRange caseDestinations, OperandRangeRange caseOperands, @@ -1848,31 +1865,35 @@ static void printSwitchFlatOpCases(OpAsmPrinter &p, SwitchFlatOp op, // LoopOpInterface Methods //===----------------------------------------------------------------------===// -void DoWhileOp::getSuccessorRegions( +void mlir::cir::DoWhileOp::getSuccessorRegions( ::mlir::RegionBranchPoint point, ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); } -::llvm::SmallVector DoWhileOp::getLoopRegions() { +::llvm::SmallVector mlir::cir::DoWhileOp::getLoopRegions() { return {&getBody()}; } -void WhileOp::getSuccessorRegions( +void mlir::cir::WhileOp::getSuccessorRegions( ::mlir::RegionBranchPoint point, ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); } -::llvm::SmallVector WhileOp::getLoopRegions() { return {&getBody()}; } +::llvm::SmallVector mlir::cir::WhileOp::getLoopRegions() { + return {&getBody()}; +} -void ForOp::getSuccessorRegions( +void mlir::cir::ForOp::getSuccessorRegions( ::mlir::RegionBranchPoint point, ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); } -::llvm::SmallVector ForOp::getLoopRegions() { return {&getBody()}; } +::llvm::SmallVector mlir::cir::ForOp::getLoopRegions() { + return {&getBody()}; +} //===----------------------------------------------------------------------===// // GlobalOp @@ -1890,17 +1911,19 @@ static void printConstant(OpAsmPrinter &p, Attribute value) { p.printAttribute(value); } -static ParseResult parseGlobalOpAddrSpace(OpAsmParser &p, - AddressSpaceAttr &addrSpaceAttr) { +static ParseResult +parseGlobalOpAddrSpace(OpAsmParser &p, + mlir::cir::AddressSpaceAttr &addrSpaceAttr) { return parseAddrSpaceAttribute(p, addrSpaceAttr); } -static void printGlobalOpAddrSpace(OpAsmPrinter &p, GlobalOp op, - AddressSpaceAttr addrSpaceAttr) { +static void printGlobalOpAddrSpace(OpAsmPrinter &p, mlir::cir::GlobalOp op, + mlir::cir::AddressSpaceAttr addrSpaceAttr) { printAddrSpaceAttribute(p, addrSpaceAttr); } -static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, +static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, + mlir::cir::GlobalOp op, TypeAttr type, Attribute initAttr, mlir::Region &ctorRegion, mlir::Region &dtorRegion) { @@ -1993,7 +2016,7 @@ static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, return success(); } -LogicalResult GlobalOp::verify() { +LogicalResult mlir::cir::GlobalOp::verify() { // Verify that the initial value, if present, is either a unit attribute or // an attribute CIR supports. if (getInitialValue().has_value()) { @@ -2070,12 +2093,12 @@ LogicalResult GlobalOp::verify() { return success(); } -void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, - StringRef sym_name, Type sym_type, bool isConstant, - cir::GlobalLinkageKind linkage, - cir::AddressSpaceAttr addrSpace, - function_ref ctorBuilder, - function_ref dtorBuilder) { +void mlir::cir::GlobalOp::build( + OpBuilder &odsBuilder, OperationState &odsState, StringRef sym_name, + Type sym_type, bool isConstant, cir::GlobalLinkageKind linkage, + cir::AddressSpaceAttr addrSpace, + function_ref ctorBuilder, + function_ref dtorBuilder) { odsState.addAttribute(getSymNameAttrName(odsState.name), odsBuilder.getStringAttr(sym_name)); odsState.addAttribute(getSymTypeAttrName(odsState.name), @@ -2113,8 +2136,8 @@ void GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void GlobalOp::getSuccessorRegions(mlir::RegionBranchPoint point, - SmallVectorImpl ®ions) { +void mlir::cir::GlobalOp::getSuccessorRegions( + mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // The `ctor` and `dtor` regions always branch back to the parent operation. if (!point.isParent()) { regions.push_back(RegionSuccessor()); @@ -2143,7 +2166,7 @@ void GlobalOp::getSuccessorRegions(mlir::RegionBranchPoint point, //===----------------------------------------------------------------------===// LogicalResult -GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { +mlir::cir::GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // Verify that the result type underlying pointer type matches the type of // the referenced cir.global or cir.func op. auto op = symbolTable.lookupNearestSymbolFrom(*this, getNameAttr()); @@ -2186,8 +2209,8 @@ GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // VTableAddrPointOp //===----------------------------------------------------------------------===// -LogicalResult -VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { +LogicalResult mlir::cir::VTableAddrPointOp::verifySymbolUses( + SymbolTableCollection &symbolTable) { // vtable ptr is not coming from a symbol. if (!getName()) return success(); @@ -2209,7 +2232,7 @@ VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return success(); } -LogicalResult cir::VTableAddrPointOp::verify() { +LogicalResult mlir::cir::VTableAddrPointOp::verify() { // The operation uses either a symbol or a value to operate, but not both if (getName() && getSymAddr()) return emitOpError("should use either a symbol or value, but not both"); @@ -2235,8 +2258,8 @@ LogicalResult cir::VTableAddrPointOp::verify() { // VTTAddrPointOp //===----------------------------------------------------------------------===// -LogicalResult -VTTAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { +LogicalResult mlir::cir::VTTAddrPointOp::verifySymbolUses( + SymbolTableCollection &symbolTable) { // VTT ptr is not coming from a symbol. if (!getName()) return success(); @@ -2258,7 +2281,7 @@ VTTAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return success(); } -LogicalResult cir::VTTAddrPointOp::verify() { +LogicalResult mlir::cir::VTTAddrPointOp::verify() { // The operation uses either a symbol or a value to operate, but not both if (getName() && getSymAddr()) return emitOpError("should use either a symbol or value, but not both"); @@ -2287,11 +2310,12 @@ LogicalResult cir::VTTAddrPointOp::verify() { /// the name of the attribute in ODS. static StringRef getLinkageAttrNameString() { return "linkage"; } -void cir::FuncOp::build(OpBuilder &builder, OperationState &result, - StringRef name, cir::FuncType type, - GlobalLinkageKind linkage, CallingConv callingConv, - ArrayRef attrs, - ArrayRef argAttrs) { +void mlir::cir::FuncOp::build(OpBuilder &builder, OperationState &result, + StringRef name, mlir::cir::FuncType type, + GlobalLinkageKind linkage, + CallingConv callingConv, + ArrayRef attrs, + ArrayRef argAttrs) { result.addRegion(); result.addAttribute(SymbolTable::getSymbolAttrName(), builder.getStringAttr(name)); @@ -2315,7 +2339,8 @@ void cir::FuncOp::build(OpBuilder &builder, OperationState &result, getResAttrsAttrName(result.name)); } -ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { +ParseResult mlir::cir::FuncOp::parse(OpAsmParser &parser, + OperationState &state) { llvm::SMLoc loc = parser.getCurrentLocation(); auto builtinNameAttr = getBuiltinAttrName(state.name); @@ -2336,6 +2361,9 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { if (parser.parseOptionalKeyword(noProtoNameAttr).succeeded()) state.addAttribute(noProtoNameAttr, parser.getBuilder().getUnitAttr()); + // TODO: Missing comdat + assert(!::cir::MissingFeatures::setComdat()); + // Default to external linkage if no keyword is provided. state.addAttribute(getLinkageAttrNameString(), GlobalLinkageKindAttr::get( @@ -2354,12 +2382,10 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { parseVisibilityAttr(parser, cirVisibilityAttr); state.addAttribute(visibilityNameAttr, cirVisibilityAttr); + // TODO: It is unclear whether this is printed in the pretty-printer if (parser.parseOptionalKeyword(dsolocalNameAttr).succeeded()) state.addAttribute(dsolocalNameAttr, parser.getBuilder().getUnitAttr()); - if (parser.parseOptionalKeyword(annotationsNameAttr).succeeded()) - state.addAttribute(annotationsNameAttr, parser.getBuilder().getUnitAttr()); - StringAttr nameAttr; SmallVector arguments; SmallVector resultAttrs; @@ -2397,6 +2423,15 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { state.addAttribute(getFunctionTypeAttrName(state.name), TypeAttr::get(fnType)); + { + // Parse an OptionalAttr:$annotations + mlir::ArrayAttr annotations; + // TODO: Is there a way to restrict the element type to cir.annotation? + // parseOptionalAttribute takes a type, but unclear how to use this. + if (auto oa = parser.parseOptionalAttribute(annotations); oa.has_value()) + state.addAttribute(annotationsNameAttr, annotations); + } + // If additional attributes are present, parse them. if (parser.parseOptionalAttrDictWithKeyword(state.attributes)) return failure(); @@ -2509,7 +2544,7 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { return success(); } -bool cir::FuncOp::isDeclaration() { +bool mlir::cir::FuncOp::isDeclaration() { auto aliasee = getAliasee(); if (!aliasee) return isExternal(); @@ -2521,7 +2556,7 @@ bool cir::FuncOp::isDeclaration() { return targetFn.isDeclaration(); } -::mlir::Region *cir::FuncOp::getCallableRegion() { +::mlir::Region *mlir::cir::FuncOp::getCallableRegion() { auto aliasee = getAliasee(); if (!aliasee) return isExternal() ? nullptr : &getBody(); @@ -2535,7 +2570,7 @@ ::mlir::Region *cir::FuncOp::getCallableRegion() { return targetFn.getCallableRegion(); } -void cir::FuncOp::print(OpAsmPrinter &p) { +void mlir::cir::FuncOp::print(OpAsmPrinter &p) { p << ' '; // When adding a specific keyword here, do not forget to omit it in @@ -2565,6 +2600,12 @@ void cir::FuncOp::print(OpAsmPrinter &p) { auto cirVisibilityAttr = getGlobalVisibilityAttr(); printVisibilityAttr(p, cirVisibilityAttr); + // TODO: This is a problematic space to be handled conditionally by + // printVisibilityAttr which leads often to a double space in the output. But + // it looks like from here we have also switched from adding a conditional + // trailing space to inserting a leading space, to avoid trailing space at + // EOL. + // TODO: Only use the "insert leading space everywhere". p << " "; // Print function name, signature, and control. @@ -2638,7 +2679,7 @@ void cir::FuncOp::print(OpAsmPrinter &p) { // Hook for OpTrait::FunctionLike, called after verifying that the 'type' // attribute is present. This can check for preconditions of the // getNumArguments hook not failing. -LogicalResult cir::FuncOp::verifyType() { +LogicalResult mlir::cir::FuncOp::verifyType() { auto type = getFunctionType(); if (!isa(type)) return emitOpError("requires '" + getFunctionTypeAttrName().str() + @@ -2649,7 +2690,7 @@ LogicalResult cir::FuncOp::verifyType() { return success(); } -LogicalResult cir::IntrinsicCallOp::verify() { +LogicalResult mlir::cir::IntrinsicCallOp::verify() { if (!getIntrinsicName().starts_with("llvm.")) return emitOpError() << "intrinsic name must start with 'llvm.'"; return success(); @@ -2659,7 +2700,7 @@ LogicalResult cir::IntrinsicCallOp::verify() { // - functions don't have 'common' linkage // - external functions have 'external' or 'extern_weak' linkage // - coroutine body must use at least one cir.await operation. -LogicalResult cir::FuncOp::verify() { +LogicalResult mlir::cir::FuncOp::verify() { if (getLinkage() == cir::GlobalLinkageKind::CommonLinkage) return emitOpError() << "functions cannot have '" << stringifyGlobalLinkageKind( @@ -2724,29 +2765,29 @@ LogicalResult cir::FuncOp::verify() { // CallOp //===----------------------------------------------------------------------===// -mlir::Value cir::CallOp::getIndirectCall() { +mlir::Value mlir::cir::CallOp::getIndirectCall() { assert(isIndirect()); return getOperand(0); } -mlir::Operation::operand_iterator cir::CallOp::arg_operand_begin() { +mlir::Operation::operand_iterator mlir::cir::CallOp::arg_operand_begin() { auto arg_begin = operand_begin(); if (isIndirect()) arg_begin++; return arg_begin; } -mlir::Operation::operand_iterator cir::CallOp::arg_operand_end() { +mlir::Operation::operand_iterator mlir::cir::CallOp::arg_operand_end() { return operand_end(); } /// Return the operand at index 'i', accounts for indirect call. -Value cir::CallOp::getArgOperand(unsigned i) { +Value mlir::cir::CallOp::getArgOperand(unsigned i) { if (isIndirect()) i++; return getOperand(i); } /// Return the number of operands, accounts for indirect call. -unsigned cir::CallOp::getNumArgOperands() { +unsigned mlir::cir::CallOp::getNumArgOperands() { if (isIndirect()) return this->getOperation()->getNumOperands() - 1; return this->getOperation()->getNumOperands(); @@ -2759,7 +2800,7 @@ verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { if (!fnAttr) return success(); - FuncOp fn = + mlir::cir::FuncOp fn = symbolTable.lookupNearestSymbolFrom(op, fnAttr); if (!fn) return op->emitOpError() << "'" << fnAttr.getValue() @@ -2940,7 +2981,7 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, {static_cast(continueOperands.size()), static_cast(landingPadOperands.size()), static_cast(ops.size())}), - result.getOrAddProperties() + result.getOrAddProperties() .operandSegmentSizes.begin()); if (parser.resolveOperands(continueOperands, continueTypes, continueOperandsLoc, result.operands)) @@ -3083,17 +3124,17 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, } LogicalResult -cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { +mlir::cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return verifyCallCommInSymbolUses(*this, symbolTable); } -::mlir::ParseResult CallOp::parse(::mlir::OpAsmParser &parser, - ::mlir::OperationState &result) { +::mlir::ParseResult mlir::cir::CallOp::parse(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { return parseCallCommon(parser, result, getExtraAttrsAttrName(result.name)); } -void CallOp::print(::mlir::OpAsmPrinter &state) { +void mlir::cir::CallOp::print(::mlir::OpAsmPrinter &state) { mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; mlir::cir::CallingConv callingConv = getCallingConv(); mlir::UnitAttr exception = getExceptionAttr(); @@ -3105,54 +3146,56 @@ void CallOp::print(::mlir::OpAsmPrinter &state) { // TryCallOp //===----------------------------------------------------------------------===// -mlir::Value cir::TryCallOp::getIndirectCall() { +mlir::Value mlir::cir::TryCallOp::getIndirectCall() { assert(isIndirect()); return getOperand(0); } -mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_begin() { +mlir::Operation::operand_iterator mlir::cir::TryCallOp::arg_operand_begin() { auto arg_begin = operand_begin(); if (isIndirect()) arg_begin++; return arg_begin; } -mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_end() { +mlir::Operation::operand_iterator mlir::cir::TryCallOp::arg_operand_end() { return operand_end(); } /// Return the operand at index 'i', accounts for indirect call. -Value cir::TryCallOp::getArgOperand(unsigned i) { +Value mlir::cir::TryCallOp::getArgOperand(unsigned i) { if (isIndirect()) i++; return getOperand(i); } /// Return the number of operands, accounts for indirect call. -unsigned cir::TryCallOp::getNumArgOperands() { +unsigned mlir::cir::TryCallOp::getNumArgOperands() { if (isIndirect()) return this->getOperation()->getNumOperands() - 1; return this->getOperation()->getNumOperands(); } LogicalResult -cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { +mlir::cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return verifyCallCommInSymbolUses(*this, symbolTable); } -::mlir::ParseResult TryCallOp::parse(::mlir::OpAsmParser &parser, - ::mlir::OperationState &result) { +::mlir::ParseResult +mlir::cir::TryCallOp::parse(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { return parseCallCommon(parser, result, getExtraAttrsAttrName(result.name), /*hasDestinationBlocks=*/true); } -void TryCallOp::print(::mlir::OpAsmPrinter &state) { +void mlir::cir::TryCallOp::print(::mlir::OpAsmPrinter &state) { mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; mlir::cir::CallingConv callingConv = getCallingConv(); printCallCommon(*this, indirectCallee, getCalleeAttr(), state, getExtraAttrs(), callingConv, {}, getCont(), getLandingPad()); } -mlir::SuccessorOperands TryCallOp::getSuccessorOperands(unsigned index) { +mlir::SuccessorOperands +mlir::cir::TryCallOp::getSuccessorOperands(unsigned index) { assert(index < getNumSuccessors() && "invalid successor index"); if (index == 0) return SuccessorOperands(getContOperandsMutable()); @@ -3167,7 +3210,7 @@ mlir::SuccessorOperands TryCallOp::getSuccessorOperands(unsigned index) { // UnaryOp //===----------------------------------------------------------------------===// -LogicalResult UnaryOp::verify() { +LogicalResult mlir::cir::UnaryOp::verify() { switch (getKind()) { case cir::UnaryOpKind::Inc: case cir::UnaryOpKind::Dec: @@ -3185,11 +3228,11 @@ LogicalResult UnaryOp::verify() { // AwaitOp //===----------------------------------------------------------------------===// -void AwaitOp::build(OpBuilder &builder, OperationState &result, - mlir::cir::AwaitKind kind, - function_ref readyBuilder, - function_ref suspendBuilder, - function_ref resumeBuilder) { +void mlir::cir::AwaitOp::build( + OpBuilder &builder, OperationState &result, mlir::cir::AwaitKind kind, + function_ref readyBuilder, + function_ref suspendBuilder, + function_ref resumeBuilder) { result.addAttribute(getKindAttrName(result.name), cir::AwaitKindAttr::get(builder.getContext(), kind)); { @@ -3219,8 +3262,8 @@ void AwaitOp::build(OpBuilder &builder, OperationState &result, /// during the flow of control. `operands` is a set of optional attributes /// that correspond to a constant value for each operand, or null if that /// operand is not a constant. -void AwaitOp::getSuccessorRegions(mlir::RegionBranchPoint point, - SmallVectorImpl ®ions) { +void mlir::cir::AwaitOp::getSuccessorRegions( + mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // If any index all the underlying regions branch back to the parent // operation. if (!point.isParent()) { @@ -3235,7 +3278,7 @@ void AwaitOp::getSuccessorRegions(mlir::RegionBranchPoint point, regions.push_back(RegionSuccessor(&this->getResume())); } -LogicalResult AwaitOp::verify() { +LogicalResult mlir::cir::AwaitOp::verify() { if (!isa(this->getReady().back().getTerminator())) return emitOpError("ready region must end with cir.condition"); return success(); @@ -3338,8 +3381,8 @@ LogicalResult mlir::cir::ConstArrayAttr::verify( return eltTypeCheck; } -::mlir::Attribute ConstArrayAttr::parse(::mlir::AsmParser &parser, - ::mlir::Type type) { +::mlir::Attribute mlir::cir::ConstArrayAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { ::mlir::FailureOr<::mlir::Type> resultTy; ::mlir::FailureOr resultVal; ::llvm::SMLoc loc = parser.getCurrentLocation(); @@ -3407,10 +3450,10 @@ ::mlir::Attribute ConstArrayAttr::parse(::mlir::AsmParser &parser, loc, parser.getContext(), resultTy.value(), resultVal.value(), zeros); } -void ConstArrayAttr::print(::mlir::AsmPrinter &printer) const { +void mlir::cir::ConstArrayAttr::print(::mlir::AsmPrinter &printer) const { printer << "<"; printer.printStrippedAttrOrType(getElts()); - if (auto zeros = getTrailingZerosNum()) + if (getTrailingZerosNum()) printer << ", trailing_zeros"; printer << ">"; } @@ -3448,8 +3491,8 @@ LogicalResult mlir::cir::ConstVectorAttr::verify( return elementTypeCheck; } -::mlir::Attribute ConstVectorAttr::parse(::mlir::AsmParser &parser, - ::mlir::Type type) { +::mlir::Attribute mlir::cir::ConstVectorAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { ::mlir::FailureOr<::mlir::Type> resultType; ::mlir::FailureOr resultValue; ::llvm::SMLoc loc = parser.getCurrentLocation(); @@ -3489,14 +3532,15 @@ ::mlir::Attribute ConstVectorAttr::parse(::mlir::AsmParser &parser, loc, parser.getContext(), resultType.value(), resultValue.value()); } -void ConstVectorAttr::print(::mlir::AsmPrinter &printer) const { +void mlir::cir::ConstVectorAttr::print(::mlir::AsmPrinter &printer) const { printer << "<"; printer.printStrippedAttrOrType(getElts()); printer << ">"; } -::mlir::Attribute SignedOverflowBehaviorAttr::parse(::mlir::AsmParser &parser, - ::mlir::Type type) { +::mlir::Attribute +mlir::cir::SignedOverflowBehaviorAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { if (parser.parseLess()) return {}; auto behavior = parseOptionalCIRKeyword( @@ -3507,7 +3551,8 @@ ::mlir::Attribute SignedOverflowBehaviorAttr::parse(::mlir::AsmParser &parser, return SignedOverflowBehaviorAttr::get(parser.getContext(), behavior); } -void SignedOverflowBehaviorAttr::print(::mlir::AsmPrinter &printer) const { +void mlir::cir::SignedOverflowBehaviorAttr::print( + ::mlir::AsmPrinter &printer) const { printer << "<"; switch (getBehavior()) { case sob::SignedOverflowBehavior::undefined: @@ -3523,7 +3568,7 @@ void SignedOverflowBehaviorAttr::print(::mlir::AsmPrinter &printer) const { printer << ">"; } -LogicalResult TypeInfoAttr::verify( +LogicalResult mlir::cir::TypeInfoAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::Type type, ::mlir::ArrayAttr typeinfoData) { @@ -3541,9 +3586,9 @@ LogicalResult TypeInfoAttr::verify( return success(); } -LogicalResult -VTableAttr::verify(::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, - ::mlir::Type type, ::mlir::ArrayAttr vtableData) { +LogicalResult mlir::cir::VTableAttr::verify( + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, + ::mlir::Type type, ::mlir::ArrayAttr vtableData) { auto sTy = mlir::dyn_cast_if_present(type); if (!sTy) { emitError() << "expected !cir.struct type result"; @@ -3592,7 +3637,7 @@ VTableAttr::verify(::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, // CopyOp Definitions //===----------------------------------------------------------------------===// -LogicalResult CopyOp::verify() { +LogicalResult mlir::cir::CopyOp::verify() { // A data layout is required for us to know the number of bytes to be copied. if (!getType().getPointee().hasTrait()) @@ -3608,7 +3653,7 @@ LogicalResult CopyOp::verify() { // MemCpyOp Definitions //===----------------------------------------------------------------------===// -LogicalResult MemCpyOp::verify() { +LogicalResult mlir::cir::MemCpyOp::verify() { auto voidPtr = cir::PointerType::get(getContext(), cir::VoidType::get(getContext())); @@ -3625,7 +3670,7 @@ LogicalResult MemCpyOp::verify() { // GetMemberOp Definitions //===----------------------------------------------------------------------===// -LogicalResult GetMemberOp::verify() { +LogicalResult mlir::cir::GetMemberOp::verify() { const auto recordTy = dyn_cast(getAddrTy().getPointee()); if (!recordTy) @@ -3647,7 +3692,7 @@ LogicalResult GetMemberOp::verify() { // GetRuntimeMemberOp Definitions //===----------------------------------------------------------------------===// -LogicalResult GetRuntimeMemberOp::verify() { +LogicalResult mlir::cir::GetRuntimeMemberOp::verify() { auto recordTy = cast(cast(getAddr().getType()).getPointee()); auto memberPtrTy = getMember().getType(); @@ -3669,7 +3714,7 @@ LogicalResult GetRuntimeMemberOp::verify() { // GetMethodOp Definitions //===----------------------------------------------------------------------===// -LogicalResult GetMethodOp::verify() { +LogicalResult mlir::cir::GetMethodOp::verify() { auto methodTy = getMethod().getType(); // Assume objectTy is !cir.ptr @@ -3724,7 +3769,7 @@ LogicalResult GetMethodOp::verify() { // InlineAsmOp Definitions //===----------------------------------------------------------------------===// -void cir::InlineAsmOp::print(OpAsmPrinter &p) { +void mlir::cir::InlineAsmOp::print(OpAsmPrinter &p) { p << '(' << getAsmFlavor() << ", "; p.increaseIndent(); p.printNewline(); @@ -3773,8 +3818,8 @@ void cir::InlineAsmOp::print(OpAsmPrinter &p) { p << " -> " << v.getType(); } -ParseResult cir::InlineAsmOp::parse(OpAsmParser &parser, - OperationState &result) { +ParseResult mlir::cir::InlineAsmOp::parse(OpAsmParser &parser, + OperationState &result) { llvm::SmallVector operand_attrs; llvm::SmallVector operandsGroupSizes; std::string asm_string, constraints; @@ -3904,7 +3949,7 @@ ParseResult cir::InlineAsmOp::parse(OpAsmParser &parser, // Atomic Definitions //===----------------------------------------------------------------------===// -LogicalResult AtomicFetch::verify() { +LogicalResult mlir::cir::AtomicFetch::verify() { if (getBinop() == mlir::cir::AtomicFetchKind::Add || getBinop() == mlir::cir::AtomicFetchKind::Sub) return mlir::success(); @@ -3915,7 +3960,7 @@ LogicalResult AtomicFetch::verify() { return mlir::success(); } -LogicalResult BinOp::verify() { +LogicalResult mlir::cir::BinOp::verify() { bool noWrap = getNoUnsignedWrap() || getNoSignedWrap(); if (!isa(getType()) && noWrap) @@ -3942,7 +3987,7 @@ LogicalResult BinOp::verify() { //===----------------------------------------------------------------------===// // ShiftOp Definitions //===----------------------------------------------------------------------===// -LogicalResult ShiftOp::verify() { +LogicalResult mlir::cir::ShiftOp::verify() { mlir::Operation *op = getOperation(); mlir::Type resType = getResult().getType(); bool isOp0Vec = mlir::isa(op->getOperand(0).getType()); @@ -3960,7 +4005,7 @@ LogicalResult ShiftOp::verify() { // LabelOp Definitions //===----------------------------------------------------------------------===// -LogicalResult LabelOp::verify() { +LogicalResult mlir::cir::LabelOp::verify() { auto *op = getOperation(); auto *blk = op->getBlock(); if (&blk->front() != op) @@ -3972,7 +4017,8 @@ LogicalResult LabelOp::verify() { // EhTypeIdOp //===----------------------------------------------------------------------===// -LogicalResult EhTypeIdOp::verifySymbolUses(SymbolTableCollection &symbolTable) { +LogicalResult +mlir::cir::EhTypeIdOp::verifySymbolUses(SymbolTableCollection &symbolTable) { auto op = symbolTable.lookupNearestSymbolFrom(*this, getTypeSymAttr()); if (!isa(op)) return emitOpError("'") @@ -3984,7 +4030,7 @@ LogicalResult EhTypeIdOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // CatchParamOp //===----------------------------------------------------------------------===// -LogicalResult cir::CatchParamOp::verify() { +LogicalResult mlir::cir::CatchParamOp::verify() { if (getExceptionPtr()) { auto kind = getKind(); if (!kind || *kind != mlir::cir::CatchParamKind::begin) diff --git a/clang/test/CIR/IR/annotations.cir b/clang/test/CIR/IR/annotations.cir index c1486e35aa71..1f92b8e8e126 100644 --- a/clang/test/CIR/IR/annotations.cir +++ b/clang/test/CIR/IR/annotations.cir @@ -6,7 +6,8 @@ module attributes {cir.global_annotations = #cir], ["foo", #cir.annotation], ["bar", #cir.annotation], -["bar", #cir.annotation]]>} +["bar", #cir.annotation], +["_Z1fv", #cir.annotation]]>} { cir.global external @a = #cir.int<0> : !s32i [#cir.annotation] cir.func @foo() attributes {annotations = [#cir.annotation]} { @@ -15,13 +16,18 @@ cir.func @foo() attributes {annotations = [#cir.annotation, #cir.annotation]} { cir.return } +// Check that the pretty-printed syntax is also correctly parsed + cir.func @_Z1fv() [#cir.annotation] { + cir.return + } } // CHECK: module attributes {cir.global_annotations = #cir], // CHECK-SAME: ["foo", #cir.annotation], // CHECK-SAME: ["bar", #cir.annotation], -// CHECK-SAME: ["bar", #cir.annotation]]>} +// CHECK-SAME: ["bar", #cir.annotation], +// CHECK-SAME: ["_Z1fv", #cir.annotation]]>} // CHECK: cir.global external @a = #cir.int<0> : !s32i // CHECK-SAME: [#cir.annotation] // CHECK: cir.func @foo() @@ -29,3 +35,4 @@ cir.func @bar() attributes {annotations = [#cir.annotation, // CHECK-SAME: #cir.annotation] +// CHECK: cir.func @_Z1fv() [#cir.annotation] { From 682f7ea85c5dc82064c201ab0c0b1f365c2fa849 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 21 Oct 2024 16:38:43 -0400 Subject: [PATCH 1976/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vabd_v and neon_vabdq_v (#996) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 10 +- clang/test/CIR/CodeGen/AArch64/neon.c | 277 +++++++++++------- 2 files changed, 173 insertions(+), 114 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 12efc23b2587..52036da077d9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3139,8 +3139,14 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } case NEON::BI__builtin_neon_vabd_v: - case NEON::BI__builtin_neon_vabdq_v: - llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vabdq_v: { + llvm::StringRef name = + usgn ? "llvm.aarch64.neon.uabd" : "llvm.aarch64.neon.sabd"; + if (mlir::cir::isFPOrFPVectorTy(ty)) + name = "llvm.aarch64.neon.fabd"; + return buildNeonCall(builder, {ty, ty}, Ops, name, ty, + getLoc(E->getExprLoc())); + } case NEON::BI__builtin_neon_vpadal_v: case NEON::BI__builtin_neon_vpadalq_v: { llvm_unreachable("NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 30a1c595af6e..958207dcdc66 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -842,132 +842,185 @@ // return vabaq_u32(v1, v2, v3); // } -// NYI-LABEL: @test_vabd_s8( -// NYI: [[VABD_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %v1, <8 x i8> %v2) -// NYI: ret <8 x i8> [[VABD_I]] -// int8x8_t test_vabd_s8(int8x8_t v1, int8x8_t v2) { -// return vabd_s8(v1, v2); -// } +int8x8_t test_vabd_s8(int8x8_t v1, int8x8_t v2) { + return vabd_s8(v1, v2); -// NYI-LABEL: @test_vabd_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> -// NYI: [[VABD2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %v1, <4 x i16> %v2) -// NYI: ret <4 x i16> [[VABD2_I]] -// int16x4_t test_vabd_s16(int16x4_t v1, int16x4_t v2) { -// return vabd_s16(v1, v2); -// } + // CIR-LABEL: vabd_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -// NYI-LABEL: @test_vabd_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> -// NYI: [[VABD2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %v1, <2 x i32> %v2) -// NYI: ret <2 x i32> [[VABD2_I]] -// int32x2_t test_vabd_s32(int32x2_t v1, int32x2_t v2) { -// return vabd_s32(v1, v2); -// } + // LLVM: {{.*}}test_vabd_s8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]]) + // LLVM: ret <8 x i8> [[VABD_I]] +} -// NYI-LABEL: @test_vabd_u8( -// NYI: [[VABD_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %v1, <8 x i8> %v2) -// NYI: ret <8 x i8> [[VABD_I]] -// uint8x8_t test_vabd_u8(uint8x8_t v1, uint8x8_t v2) { -// return vabd_u8(v1, v2); -// } +int16x4_t test_vabd_s16(int16x4_t v1, int16x4_t v2) { + return vabd_s16(v1, v2); -// NYI-LABEL: @test_vabd_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> -// NYI: [[VABD2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %v1, <4 x i16> %v2) -// NYI: ret <4 x i16> [[VABD2_I]] -// uint16x4_t test_vabd_u16(uint16x4_t v1, uint16x4_t v2) { -// return vabd_u16(v1, v2); -// } + // CIR-LABEL: vabd_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -// NYI-LABEL: @test_vabd_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> -// NYI: [[VABD2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %v1, <2 x i32> %v2) -// NYI: ret <2 x i32> [[VABD2_I]] -// uint32x2_t test_vabd_u32(uint32x2_t v1, uint32x2_t v2) { -// return vabd_u32(v1, v2); -// } + // LLVM: {{.*}}test_vabd_s16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]]) + // LLVM: ret <4 x i16> [[VABD_I]] +} -// NYI-LABEL: @test_vabd_f32( -// NYI: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8> -// NYI: [[VABD2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fabd.v2f32(<2 x float> %v1, <2 x float> %v2) -// NYI: ret <2 x float> [[VABD2_I]] -// float32x2_t test_vabd_f32(float32x2_t v1, float32x2_t v2) { -// return vabd_f32(v1, v2); -// } +int32x2_t test_vabd_s32(int32x2_t v1, int32x2_t v2) { + return vabd_s32(v1, v2); -// NYI-LABEL: @test_vabdq_s8( -// NYI: [[VABD_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> %v1, <16 x i8> %v2) -// NYI: ret <16 x i8> [[VABD_I]] -// int8x16_t test_vabdq_s8(int8x16_t v1, int8x16_t v2) { -// return vabdq_s8(v1, v2); -// } + // CIR-LABEL: vabd_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -// NYI-LABEL: @test_vabdq_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> -// NYI: [[VABD2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %v1, <8 x i16> %v2) -// NYI: ret <8 x i16> [[VABD2_I]] -// int16x8_t test_vabdq_s16(int16x8_t v1, int16x8_t v2) { -// return vabdq_s16(v1, v2); -// } + // LLVM: {{.*}}test_vabd_s32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]]) + // LLVM: ret <2 x i32> [[VABD_I]] +} -// NYI-LABEL: @test_vabdq_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> -// NYI: [[VABD2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %v1, <4 x i32> %v2) -// NYI: ret <4 x i32> [[VABD2_I]] -// int32x4_t test_vabdq_s32(int32x4_t v1, int32x4_t v2) { -// return vabdq_s32(v1, v2); -// } +uint8x8_t test_vabd_u8(uint8x8_t v1, uint8x8_t v2) { + return vabd_u8(v1, v2); -// NYI-LABEL: @test_vabdq_u8( -// NYI: [[VABD_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %v1, <16 x i8> %v2) -// NYI: ret <16 x i8> [[VABD_I]] -// uint8x16_t test_vabdq_u8(uint8x16_t v1, uint8x16_t v2) { -// return vabdq_u8(v1, v2); -// } + // CIR-LABEL: vabd_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -// NYI-LABEL: @test_vabdq_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> -// NYI: [[VABD2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %v1, <8 x i16> %v2) -// NYI: ret <8 x i16> [[VABD2_I]] -// uint16x8_t test_vabdq_u16(uint16x8_t v1, uint16x8_t v2) { -// return vabdq_u16(v1, v2); -// } + // LLVM: {{.*}}test_vabd_u8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]]) + // LLVM: ret <8 x i8> [[VABD_I]] +} -// NYI-LABEL: @test_vabdq_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> -// NYI: [[VABD2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %v1, <4 x i32> %v2) -// NYI: ret <4 x i32> [[VABD2_I]] -// uint32x4_t test_vabdq_u32(uint32x4_t v1, uint32x4_t v2) { -// return vabdq_u32(v1, v2); -// } +uint16x4_t test_vabd_u16(uint16x4_t v1, uint16x4_t v2) { + return vabd_u16(v1, v2); -// NYI-LABEL: @test_vabdq_f32( -// NYI: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8> -// NYI: [[VABD2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fabd.v4f32(<4 x float> %v1, <4 x float> %v2) -// NYI: ret <4 x float> [[VABD2_I]] -// float32x4_t test_vabdq_f32(float32x4_t v1, float32x4_t v2) { -// return vabdq_f32(v1, v2); -// } + // CIR-LABEL: vabd_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -// NYI-LABEL: @test_vabdq_f64( -// NYI: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8> -// NYI: [[VABD2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fabd.v2f64(<2 x double> %v1, <2 x double> %v2) -// NYI: ret <2 x double> [[VABD2_I]] -// float64x2_t test_vabdq_f64(float64x2_t v1, float64x2_t v2) { -// return vabdq_f64(v1, v2); -// } + // LLVM: {{.*}}test_vabd_u16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]]) + // LLVM: ret <4 x i16> [[VABD_I]] +} + +uint32x2_t test_vabd_u32(uint32x2_t v1, uint32x2_t v2) { + return vabd_u32(v1, v2); + + // CIR-LABEL: vabd_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) + + // LLVM: {{.*}}test_vabd_u32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]]) + // LLVM: ret <2 x i32> [[VABD_I]] +} + +float32x2_t test_vabd_f32(float32x2_t v1, float32x2_t v2) { + return vabd_f32(v1, v2); + + // CIR-LABEL: vabd_f32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) + + // LLVM: {{.*}}test_vabd_f32(<2 x float>{{.*}}[[V1:%.*]], <2 x float>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_F:%.*]] = call <2 x float> @llvm.aarch64.neon.fabd.v2f32(<2 x float> [[V1]], <2 x float> [[V2]]) + // LLVM: ret <2 x float> [[VABD_F]] +} + +int8x16_t test_vabdq_s8(int8x16_t v1, int8x16_t v2) { + return vabdq_s8(v1, v2); + + // CIR-LABEL: vabdq_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) + + // LLVM: {{.*}}test_vabdq_s8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]]) + // LLVM: ret <16 x i8> [[VABD_I]] +} + +int16x8_t test_vabdq_s16(int16x8_t v1, int16x8_t v2) { + return vabdq_s16(v1, v2); + + // CIR-LABEL: vabdq_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) + + // LLVM: {{.*}}test_vabdq_s16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]]) + // LLVM: ret <8 x i16> [[VABD_I]] +} + +int32x4_t test_vabdq_s32(int32x4_t v1, int32x4_t v2) { + return vabdq_s32(v1, v2); + + // CIR-LABEL: vabdq_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) + + // LLVM: {{.*}}test_vabdq_s32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]]) + // LLVM: ret <4 x i32> [[VABD_I]] +} + +uint8x16_t test_vabdq_u8(uint8x16_t v1, uint8x16_t v2) { + return vabdq_u8(v1, v2); + + // CIR-LABEL: vabdq_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) + + // LLVM: {{.*}}test_vabdq_u8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]]) + // LLVM: ret <16 x i8> [[VABD_I]] +} + +uint16x8_t test_vabdq_u16(uint16x8_t v1, uint16x8_t v2) { + return vabdq_u16(v1, v2); + + // CIR-LABEL: vabdq_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) + + // LLVM: {{.*}}test_vabdq_u16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]]) + // LLVM: ret <8 x i16> [[VABD_I]] +} + +uint32x4_t test_vabdq_u32(uint32x4_t v1, uint32x4_t v2) { + return vabdq_u32(v1, v2); + + // CIR-LABEL: vabdq_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) + + // LLVM: {{.*}}test_vabdq_u32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]]) + // LLVM: ret <4 x i32> [[VABD_I]] +} + +float32x4_t test_vabdq_f32(float32x4_t v1, float32x4_t v2) { + return vabdq_f32(v1, v2); + + // CIR-LABEL: vabdq_f32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) + + // LLVM: {{.*}}test_vabdq_f32(<4 x float>{{.*}}[[V1:%.*]], <4 x float>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_F:%.*]] = call <4 x float> @llvm.aarch64.neon.fabd.v4f32(<4 x float> [[V1]], <4 x float> [[V2]]) + // LLVM: ret <4 x float> [[VABD_F]] +} + +float64x2_t test_vabdq_f64(float64x2_t v1, float64x2_t v2) { + return vabdq_f64(v1, v2); + + // CIR-LABEL: vabdq_f64 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fabd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) + + // LLVM: {{.*}}test_vabdq_f64(<2 x double>{{.*}}[[V1:%.*]], <2 x double>{{.*}}[[V2:%.*]]) + // LLVM: [[VABD_F:%.*]] = call <2 x double> @llvm.aarch64.neon.fabd.v2f64(<2 x double> [[V1]], <2 x double> [[V2]]) + // LLVM: ret <2 x double> [[VABD_F]] +} // NYI-LABEL: @test_vbsl_s8( // NYI: [[VBSL_I:%.*]] = and <8 x i8> %v1, %v2 From 8e4fa91c1d564e6a97190f020fed3dac0471dd38 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 21 Oct 2024 16:39:42 -0400 Subject: [PATCH 1977/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vmull_v (#998) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 13 +- clang/test/CIR/CodeGen/AArch64/neon.c | 135 +++++++++++------- 2 files changed, 96 insertions(+), 52 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 52036da077d9..73f6a37aa025 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3124,8 +3124,17 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vfmad_laneq_f64: { llvm_unreachable("NYI"); } - case NEON::BI__builtin_neon_vmull_v: - llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vmull_v: { + llvm::StringRef name = + usgn ? "llvm.aarch64.neon.umull" : "llvm.aarch64.neon.smull"; + if (Type.isPoly()) + name = "llvm.aarch64.neon.pmull"; + mlir::cir::VectorType argTy = + builder.getExtendedOrTruncatedElementVectorType( + ty, false /* truncated */, !usgn); + return buildNeonCall(builder, {argTy, argTy}, Ops, name, ty, + getLoc(E->getExprLoc())); + } case NEON::BI__builtin_neon_vmax_v: case NEON::BI__builtin_neon_vmaxq_v: llvm_unreachable("NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 958207dcdc66..81db52901d5c 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -8084,55 +8084,85 @@ uint64x2_t test_vmovl_u32(uint32x2_t a) { // return vabal_high_u32(a, b, c); // } -// NYI-LABEL: @test_vmull_s8( -// NYI: [[VMULL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i16> [[VMULL_I]] -// int16x8_t test_vmull_s8(int8x8_t a, int8x8_t b) { -// return vmull_s8(a, b); -// } +int16x8_t test_vmull_s8(int8x8_t a, int8x8_t b) { + return vmull_s8(a, b); -// NYI-LABEL: @test_vmull_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VMULL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %b) -// NYI: ret <4 x i32> [[VMULL2_I]] -// int32x4_t test_vmull_s16(int16x4_t a, int16x4_t b) { -// return vmull_s16(a, b); -// } + // CIR-LABEL: vmull_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smull" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vmull_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VMULL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %b) -// NYI: ret <2 x i64> [[VMULL2_I]] -// int64x2_t test_vmull_s32(int32x2_t a, int32x2_t b) { -// return vmull_s32(a, b); -// } + // LLVM: {{.*}}test_vmull_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VMULL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> [[A]], <8 x i8> [[B]]) + // LLVM: ret <8 x i16> [[VMULL_I]] +} -// NYI-LABEL: @test_vmull_u8( -// NYI: [[VMULL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i16> [[VMULL_I]] -// uint16x8_t test_vmull_u8(uint8x8_t a, uint8x8_t b) { -// return vmull_u8(a, b); -// } +int32x4_t test_vmull_s16(int16x4_t a, int16x4_t b) { + return vmull_s16(a, b); -// NYI-LABEL: @test_vmull_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VMULL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %b) -// NYI: ret <4 x i32> [[VMULL2_I]] -// uint32x4_t test_vmull_u16(uint16x4_t a, uint16x4_t b) { -// return vmull_u16(a, b); -// } + // CIR-LABEL: vmull_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smull" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vmull_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VMULL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %b) -// NYI: ret <2 x i64> [[VMULL2_I]] -// uint64x2_t test_vmull_u32(uint32x2_t a, uint32x2_t b) { -// return vmull_u32(a, b); -// } + // LLVM: {{.*}}test_vmull_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8> + // LLVM: [[VMULL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> [[A]], <4 x i16> [[B]]) + // LLVM: ret <4 x i32> [[VMULL2_I]] +} + +int64x2_t test_vmull_s32(int32x2_t a, int32x2_t b) { + return vmull_s32(a, b); + + // CIR-LABEL: vmull_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smull" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vmull_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8> + // LLVM: [[VMULL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> [[A]], <2 x i32> [[B]]) + // LLVM: ret <2 x i64> [[VMULL2_I]] +} + +uint16x8_t test_vmull_u8(uint8x8_t a, uint8x8_t b) { + return vmull_u8(a, b); + + // CIR-LABEL: vmull_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umull" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vmull_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VMULL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> [[A]], <8 x i8> [[B]]) + // LLVM: ret <8 x i16> [[VMULL_I]] +} + +uint32x4_t test_vmull_u16(uint16x4_t a, uint16x4_t b) { + return vmull_u16(a, b); + + // CIR-LABEL: vmull_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umull" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vmull_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8> + // LLVM: [[VMULL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> [[A]], <4 x i16> [[B]]) + // LLVM: ret <4 x i32> [[VMULL2_I]] +} + +uint64x2_t test_vmull_u32(uint32x2_t a, uint32x2_t b) { + return vmull_u32(a, b); + + // CIR-LABEL: vmull_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umull" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vmull_u32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8> + // LLVM: [[VMULL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> [[A]], <2 x i32> [[B]]) + // LLVM: ret <2 x i64> [[VMULL2_I]] +} // NYI-LABEL: @test_vmull_high_s8( // NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> @@ -8584,12 +8614,17 @@ uint64x2_t test_vmovl_u32(uint32x2_t a) { // return vqdmlsl_high_s32(a, b, c); // } -// NYI-LABEL: @test_vmull_p8( -// NYI: [[VMULL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i16> [[VMULL_I]] -// poly16x8_t test_vmull_p8(poly8x8_t a, poly8x8_t b) { -// return vmull_p8(a, b); -// } +poly16x8_t test_vmull_p8(poly8x8_t a, poly8x8_t b) { + return vmull_p8(a, b); + + // CIR-LABEL: vmull_p8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.pmull" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vmull_p8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VMULL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> [[A]], <8 x i8> [[B]]) + // LLVM: ret <8 x i16> [[VMULL_I]] +} // NYI-LABEL: @test_vmull_high_p8( // NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> From 85263f23a266287fb626f0f5755b8817422d2ff0 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 21 Oct 2024 16:40:46 -0400 Subject: [PATCH 1978/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vrhadd_v and neon_vrhaddq_v (#999) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 7 + clang/test/CIR/CodeGen/AArch64/neon.c | 251 +++++++++++------- 2 files changed, 162 insertions(+), 96 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 73f6a37aa025..3b261904dcca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2359,6 +2359,13 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( : "llvm.aarch64.neon.sqsub"; break; } + case NEON::BI__builtin_neon_vrhadd_v: + case NEON::BI__builtin_neon_vrhaddq_v: { + intrincsName = (intrinicId != altLLVMIntrinsic) + ? "llvm.aarch64.neon.urhadd" + : "llvm.aarch64.neon.srhadd"; + break; + } } if (!intrincsName.empty()) return buildCommonNeonCallPattern0(*this, intrincsName, {vTy, vTy}, ops, diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 81db52901d5c..1578e00c048e 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -2784,114 +2784,173 @@ float64x2_t test_vabdq_f64(float64x2_t v1, float64x2_t v2) { // return vhsubq_u32(v1, v2); // } -// NYI-LABEL: @test_vrhadd_s8( -// NYI: [[VRHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %v1, <8 x i8> %v2) -// NYI: ret <8 x i8> [[VRHADD_V_I]] -// int8x8_t test_vrhadd_s8(int8x8_t v1, int8x8_t v2) { -// return vrhadd_s8(v1, v2); -// } +int8x8_t test_vrhadd_s8(int8x8_t v1, int8x8_t v2) { + return vrhadd_s8(v1, v2); -// NYI-LABEL: @test_vrhadd_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> -// NYI: [[VRHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> %v1, <4 x i16> %v2) -// NYI: [[VRHADD_V3_I:%.*]] = bitcast <4 x i16> [[VRHADD_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VRHADD_V2_I]] -// int16x4_t test_vrhadd_s16(int16x4_t v1, int16x4_t v2) { -// return vrhadd_s16(v1, v2); -// } + // CIR-LABEL: vrhadd_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrhadd_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> -// NYI: [[VRHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> %v1, <2 x i32> %v2) -// NYI: [[VRHADD_V3_I:%.*]] = bitcast <2 x i32> [[VRHADD_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VRHADD_V2_I]] -// int32x2_t test_vrhadd_s32(int32x2_t v1, int32x2_t v2) { -// return vrhadd_s32(v1, v2); -// } + // LLVM: {{.*}}@test_vrhadd_s8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VRHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]]) + // LLVM: ret <8 x i8> [[VRHADD_V_I]] +} -// NYI-LABEL: @test_vrhadd_u8( -// NYI: [[VRHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> %v1, <8 x i8> %v2) -// NYI: ret <8 x i8> [[VRHADD_V_I]] -// uint8x8_t test_vrhadd_u8(uint8x8_t v1, uint8x8_t v2) { -// return vrhadd_u8(v1, v2); -// } +int16x4_t test_vrhadd_s16(int16x4_t v1, int16x4_t v2) { + return vrhadd_s16(v1, v2); -// NYI-LABEL: @test_vrhadd_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> -// NYI: [[VRHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16> %v1, <4 x i16> %v2) -// NYI: [[VRHADD_V3_I:%.*]] = bitcast <4 x i16> [[VRHADD_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VRHADD_V2_I]] -// uint16x4_t test_vrhadd_u16(uint16x4_t v1, uint16x4_t v2) { -// return vrhadd_u16(v1, v2); -// } + // CIR-LABEL: vrhadd_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrhadd_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> -// NYI: [[VRHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32> %v1, <2 x i32> %v2) -// NYI: [[VRHADD_V3_I:%.*]] = bitcast <2 x i32> [[VRHADD_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VRHADD_V2_I]] -// uint32x2_t test_vrhadd_u32(uint32x2_t v1, uint32x2_t v2) { -// return vrhadd_u32(v1, v2); -// } + // LLVM: {{.*}}@test_vrhadd_s16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8> + // LLVM: [[VRHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]]) + // LLVM: [[VRHADD_V3_I:%.*]] = bitcast <4 x i16> [[VRHADD_V2_I]] to <8 x i8> + // LLVM: ret <4 x i16> [[VRHADD_V2_I]] +} -// NYI-LABEL: @test_vrhaddq_s8( -// NYI: [[VRHADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %v1, <16 x i8> %v2) -// NYI: ret <16 x i8> [[VRHADDQ_V_I]] -// int8x16_t test_vrhaddq_s8(int8x16_t v1, int8x16_t v2) { -// return vrhaddq_s8(v1, v2); -// } +int32x2_t test_vrhadd_s32(int32x2_t v1, int32x2_t v2) { + return vrhadd_s32(v1, v2); -// NYI-LABEL: @test_vrhaddq_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> -// NYI: [[VRHADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %v1, <8 x i16> %v2) -// NYI: [[VRHADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VRHADDQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VRHADDQ_V2_I]] -// int16x8_t test_vrhaddq_s16(int16x8_t v1, int16x8_t v2) { -// return vrhaddq_s16(v1, v2); -// } + // CIR-LABEL: vrhadd_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrhaddq_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> -// NYI: [[VRHADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> %v1, <4 x i32> %v2) -// NYI: [[VRHADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VRHADDQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VRHADDQ_V2_I]] -// int32x4_t test_vrhaddq_s32(int32x4_t v1, int32x4_t v2) { -// return vrhaddq_s32(v1, v2); -// } + // LLVM: {{.*}}@test_vrhadd_s32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8> + // LLVM: [[VRHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]]) + // LLVM: [[VRHADD_V3_I:%.*]] = bitcast <2 x i32> [[VRHADD_V2_I]] to <8 x i8> + // LLVM: ret <2 x i32> [[VRHADD_V2_I]] +} -// NYI-LABEL: @test_vrhaddq_u8( -// NYI: [[VRHADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %v1, <16 x i8> %v2) -// NYI: ret <16 x i8> [[VRHADDQ_V_I]] -// uint8x16_t test_vrhaddq_u8(uint8x16_t v1, uint8x16_t v2) { -// return vrhaddq_u8(v1, v2); -// } +uint8x8_t test_vrhadd_u8(uint8x8_t v1, uint8x8_t v2) { + return vrhadd_u8(v1, v2); -// NYI-LABEL: @test_vrhaddq_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> -// NYI: [[VRHADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %v1, <8 x i16> %v2) -// NYI: [[VRHADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VRHADDQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VRHADDQ_V2_I]] -// uint16x8_t test_vrhaddq_u16(uint16x8_t v1, uint16x8_t v2) { -// return vrhaddq_u16(v1, v2); -// } + // CIR-LABEL: vrhadd_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrhaddq_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> -// NYI: [[VRHADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32> %v1, <4 x i32> %v2) -// NYI: [[VRHADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VRHADDQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VRHADDQ_V2_I]] -// uint32x4_t test_vrhaddq_u32(uint32x4_t v1, uint32x4_t v2) { -// return vrhaddq_u32(v1, v2); -// } + // LLVM: {{.*}}@test_vrhadd_u8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VRHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]]) + // LLVM: ret <8 x i8> [[VRHADD_V_I]] +} + +uint16x4_t test_vrhadd_u16(uint16x4_t v1, uint16x4_t v2) { + return vrhadd_u16(v1, v2); + + // CIR-LABEL: vrhadd_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrhadd_u16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8> + // LLVM: [[VRHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]]) + // LLVM: [[VRHADD_V3_I:%.*]] = bitcast <4 x i16> [[VRHADD_V2_I]] to <8 x i8> + // LLVM: ret <4 x i16> [[VRHADD_V2_I]] +} + +uint32x2_t test_vrhadd_u32(uint32x2_t v1, uint32x2_t v2) { + return vrhadd_u32(v1, v2); + + // CIR-LABEL: vrhadd_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrhadd_u32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8> + // LLVM: [[VRHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]]) + // LLVM: [[VRHADD_V3_I:%.*]] = bitcast <2 x i32> [[VRHADD_V2_I]] to <8 x i8> + // LLVM: ret <2 x i32> [[VRHADD_V2_I]] +} + +int8x16_t test_vrhaddq_s8(int8x16_t v1, int8x16_t v2) { + return vrhaddq_s8(v1, v2); + + // CIR-LABEL: vrhaddq_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrhaddq_s8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VRHADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]]) + // LLVM: ret <16 x i8> [[VRHADDQ_V_I]] +} + +int16x8_t test_vrhaddq_s16(int16x8_t v1, int16x8_t v2) { + return vrhaddq_s16(v1, v2); + + // CIR-LABEL: vrhaddq_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrhaddq_s16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8> + // LLVM: [[VRHADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]]) + // LLVM: [[VRHADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VRHADDQ_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VRHADDQ_V2_I]] +} + +int32x4_t test_vrhaddq_s32(int32x4_t v1, int32x4_t v2) { + return vrhaddq_s32(v1, v2); + + // CIR-LABEL: vrhaddq_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrhaddq_s32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8> + // LLVM: [[VRHADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]]) + // LLVM: [[VRHADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VRHADDQ_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VRHADDQ_V2_I]] +} + +uint8x16_t test_vrhaddq_u8(uint8x16_t v1, uint8x16_t v2) { + return vrhaddq_u8(v1, v2); + + // CIR-LABEL: vrhaddq_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrhaddq_u8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VRHADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]]) + // LLVM: ret <16 x i8> [[VRHADDQ_V_I]] +} + +uint16x8_t test_vrhaddq_u16(uint16x8_t v1, uint16x8_t v2) { + return vrhaddq_u16(v1, v2); + + // CIR-LABEL: vrhaddq_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrhaddq_u16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8> + // LLVM: [[VRHADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]]) + // LLVM: [[VRHADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VRHADDQ_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VRHADDQ_V2_I]] +} + +uint32x4_t test_vrhaddq_u32(uint32x4_t v1, uint32x4_t v2) { + return vrhaddq_u32(v1, v2); + // CIR-LABEL: vrhaddq_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrhaddq_u32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8> + // LLVM: [[VRHADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]]) + // LLVM: [[VRHADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VRHADDQ_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VRHADDQ_V2_I]] +} int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { return vqadd_s8(a, b); From c56171f24497616b4b85e0dfd34b48752159de81 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 21 Oct 2024 16:41:20 -0400 Subject: [PATCH 1979/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vmin_v and neon_vminq_v (#1000) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 10 +- clang/test/CIR/CodeGen/AArch64/neon.c | 308 +++++++++++------- 2 files changed, 195 insertions(+), 123 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 3b261904dcca..1efad486b007 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3149,8 +3149,14 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } case NEON::BI__builtin_neon_vmin_v: - case NEON::BI__builtin_neon_vminq_v: - llvm_unreachable("NYI"); + case NEON::BI__builtin_neon_vminq_v: { + llvm::StringRef name = + usgn ? "llvm.aarch64.neon.umin" : "llvm.aarch64.neon.smin"; + if (mlir::cir::isFPOrFPVectorTy(ty)) + name = "llvm.aarch64.neon.fmin"; + return buildNeonCall(builder, {ty, ty}, Ops, name, ty, + getLoc(E->getExprLoc())); + } case NEON::BI__builtin_neon_vminh_f16: { llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 1578e00c048e..0e6cbec7c7c1 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -4012,132 +4012,207 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { // return vmaxq_f64(a, b); // } -// NYI-LABEL: @test_vmin_s8( -// NYI: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i8> [[VMIN_I]] -// int8x8_t test_vmin_s8(int8x8_t a, int8x8_t b) { -// return vmin_s8(a, b); -// } +int8x8_t test_vmin_s8(int8x8_t a, int8x8_t b) { + return vmin_s8(a, b); -// NYI-LABEL: @test_vmin_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: ret <4 x i16> [[VMIN2_I]] -// int16x4_t test_vmin_s16(int16x4_t a, int16x4_t b) { -// return vmin_s16(a, b); -// } + // CIR-LABEL: vmin_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vmin_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[A]], <8 x i8> [[B]]) + // LLVM: ret <8 x i8> [[VMIN_I]] +} -// NYI-LABEL: @test_vmin_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: ret <2 x i32> [[VMIN2_I]] -// int32x2_t test_vmin_s32(int32x2_t a, int32x2_t b) { -// return vmin_s32(a, b); -// } +int16x4_t test_vmin_s16(int16x4_t a, int16x4_t b) { + return vmin_s16(a, b); -// NYI-LABEL: @test_vmin_u8( -// NYI: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i8> [[VMIN_I]] -// uint8x8_t test_vmin_u8(uint8x8_t a, uint8x8_t b) { -// return vmin_u8(a, b); -// } + // CIR-LABEL: vmin_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vmin_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: ret <4 x i16> [[VMIN2_I]] -// uint16x4_t test_vmin_u16(uint16x4_t a, uint16x4_t b) { -// return vmin_u16(a, b); -// } + // LLVM: {{.*}}@test_vmin_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8> + // LLVM: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> [[A]], <4 x i16> [[B]]) + // LLVM: ret <4 x i16> [[VMIN2_I]] +} -// NYI-LABEL: @test_vmin_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: ret <2 x i32> [[VMIN2_I]] -// uint32x2_t test_vmin_u32(uint32x2_t a, uint32x2_t b) { -// return vmin_u32(a, b); -// } +int32x2_t test_vmin_s32(int32x2_t a, int32x2_t b) { + return vmin_s32(a, b); -// NYI-LABEL: @test_vmin_f32( -// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> -// NYI: [[VMIN2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> %a, <2 x float> %b) -// NYI: ret <2 x float> [[VMIN2_I]] -// float32x2_t test_vmin_f32(float32x2_t a, float32x2_t b) { -// return vmin_f32(a, b); -// } + // CIR-LABEL: vmin_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vminq_s8( -// NYI: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> %a, <16 x i8> %b) -// NYI: ret <16 x i8> [[VMIN_I]] -// int8x16_t test_vminq_s8(int8x16_t a, int8x16_t b) { -// return vminq_s8(a, b); -// } + // LLVM: {{.*}}@test_vmin_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8> + // LLVM: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> [[A]], <2 x i32> [[B]]) + // LLVM: ret <2 x i32> [[VMIN2_I]] +} -// NYI-LABEL: @test_vminq_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> %a, <8 x i16> %b) -// NYI: ret <8 x i16> [[VMIN2_I]] -// int16x8_t test_vminq_s16(int16x8_t a, int16x8_t b) { -// return vminq_s16(a, b); -// } +uint8x8_t test_vmin_u8(uint8x8_t a, uint8x8_t b) { + return vmin_u8(a, b); -// NYI-LABEL: @test_vminq_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> %a, <4 x i32> %b) -// NYI: ret <4 x i32> [[VMIN2_I]] -// int32x4_t test_vminq_s32(int32x4_t a, int32x4_t b) { -// return vminq_s32(a, b); -// } + // CIR-LABEL: vmin_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vminq_u8( -// NYI: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> %a, <16 x i8> %b) -// NYI: ret <16 x i8> [[VMIN_I]] -// uint8x16_t test_vminq_u8(uint8x16_t a, uint8x16_t b) { -// return vminq_u8(a, b); -// } + // LLVM: {{.*}}@test_vmin_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[A]], <8 x i8> [[B]]) + // LLVM: ret <8 x i8> [[VMIN_I]] +} -// NYI-LABEL: @test_vminq_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> %a, <8 x i16> %b) -// NYI: ret <8 x i16> [[VMIN2_I]] -// uint16x8_t test_vminq_u16(uint16x8_t a, uint16x8_t b) { -// return vminq_u16(a, b); -// } +uint16x4_t test_vmin_u16(uint16x4_t a, uint16x4_t b) { + return vmin_u16(a, b); -// NYI-LABEL: @test_vminq_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> %a, <4 x i32> %b) -// NYI: ret <4 x i32> [[VMIN2_I]] -// uint32x4_t test_vminq_u32(uint32x4_t a, uint32x4_t b) { -// return vminq_u32(a, b); -// } + // CIR-LABEL: vmin_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vminq_f32( -// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> -// NYI: [[VMIN2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> %a, <4 x float> %b) -// NYI: ret <4 x float> [[VMIN2_I]] -// float32x4_t test_vminq_f32(float32x4_t a, float32x4_t b) { -// return vminq_f32(a, b); -// } + // LLVM: {{.*}}@test_vmin_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8> + // LLVM: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> [[A]], <4 x i16> [[B]]) + // LLVM: ret <4 x i16> [[VMIN2_I]] +} -// NYI-LABEL: @test_vminq_f64( -// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> -// NYI: [[VMIN2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> %a, <2 x double> %b) -// NYI: ret <2 x double> [[VMIN2_I]] -// float64x2_t test_vminq_f64(float64x2_t a, float64x2_t b) { -// return vminq_f64(a, b); -// } +uint32x2_t test_vmin_u32(uint32x2_t a, uint32x2_t b) { + return vmin_u32(a, b); + + // CIR-LABEL: vmin_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vmin_u32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8> + // LLVM: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> [[A]], <2 x i32> [[B]]) + // LLVM: ret <2 x i32> [[VMIN2_I]] +} + +float32x2_t test_vmin_f32(float32x2_t a, float32x2_t b) { + return vmin_f32(a, b); + + // CIR-LABEL: vmin_f32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fmin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vmin_f32(<2 x float>{{.*}}[[A:%.*]], <2 x float>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x float> [[B]] to <8 x i8> + // LLVM: [[VMIN2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> [[A]], <2 x float> [[B]]) + // LLVM: ret <2 x float> [[VMIN2_I]] +} + +float64x1_t test_vmin_f64(float64x1_t a, float64x1_t b) { + return vmin_f64(a, b); + + // CIR-LABEL: vmin_f64 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fmin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vmin_f64(<1 x double>{{.*}}[[A:%.*]], <1 x double>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x double> [[B]] to <8 x i8> + // LLVM: [[VMIN2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> [[A]], <1 x double> [[B]]) + // LLVM: ret <1 x double> [[VMIN2_I]] +} + +int8x16_t test_vminq_s8(int8x16_t a, int8x16_t b) { + return vminq_s8(a, b); + + // CIR-LABEL: vminq_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vminq_s8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) + // LLVM: ret <16 x i8> [[VMIN_I]] +} + +int16x8_t test_vminq_s16(int16x8_t a, int16x8_t b) { + return vminq_s16(a, b); + + // CIR-LABEL: vminq_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vminq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8> + // LLVM: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) + // LLVM: ret <8 x i16> [[VMIN2_I]] +} + +int32x4_t test_vminq_s32(int32x4_t a, int32x4_t b) { + return vminq_s32(a, b); + + // CIR-LABEL: vminq_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vminq_s32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8> + // LLVM: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> [[A]], <4 x i32> + // LLVM: ret <4 x i32> [[VMIN2_I]] +} + +uint8x16_t test_vminq_u8(uint8x16_t a, uint8x16_t b) { + return vminq_u8(a, b); + + // CIR-LABEL: vminq_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vminq_u8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) + // LLVM: ret <16 x i8> [[VMIN_I]] +} + +uint16x8_t test_vminq_u16(uint16x8_t a, uint16x8_t b) { + return vminq_u16(a, b); + + // CIR-LABEL: vminq_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vminq_u16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8> + // LLVM: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> [[A]], <8 x i16> + // LLVM: ret <8 x i16> [[VMIN2_I]] +} + +uint32x4_t test_vminq_u32(uint32x4_t a, uint32x4_t b) { + return vminq_u32(a, b); + + // CIR-LABEL: vminq_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vminq_u32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8> + // LLVM: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> [[A]], <4 x i32> + // LLVM: ret <4 x i32> [[VMIN2_I]] +} + +float64x2_t test_vminq_f64(float64x2_t a, float64x2_t b) { + return vminq_f64(a, b); + + // CIR-LABEL: vminq_f64 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fmin" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vminq_f64(<2 x double>{{.*}}[[A:%.*]], <2 x double>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x double> [[B]] to <16 x i8> + // LLVM: [[VMIN2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[A]], <2 x double> + // LLVM: ret <2 x double> [[VMIN2_I]] +} // NYI-LABEL: @test_vmaxnm_f32( // NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> @@ -17444,15 +17519,6 @@ void test_vst1q_s64(int64_t *a, int64x2_t b) { // return vmax_f64(a, b); // } -// NYI-LABEL: @test_vmin_f64( -// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> -// NYI: [[VMIN2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> %a, <1 x double> %b) -// NYI: ret <1 x double> [[VMIN2_I]] -// float64x1_t test_vmin_f64(float64x1_t a, float64x1_t b) { -// return vmin_f64(a, b); -// } - // NYI-LABEL: @test_vmaxnm_f64( // NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> // NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> From 74116414a9b444de651cd8d81c43b21a96d14d00 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Sat, 26 Oct 2024 05:14:25 +0300 Subject: [PATCH 1980/2301] [CIR][Transforms] Fix CallConv Function Lowering (#979) Re #958 > Consider the following code snippet `tmp.c`: > ``` > typedef struct { > int a, b; > } S; > > void foo(S s) {} > ``` > Running `bin/clang tmp.c -fclangir -Xclang -emit-llvm -Xclang -fclangir-call-conv-lowering -S -o -`, we get: > ``` > loc(fused["tmp.c":5:1, "tmp.c":5:16]): error: 'llvm.bitcast' op result #0 must be LLVM-compatible non-aggregate type, but got '!llvm.struct<"struct.S", (i32, i32)>' > ``` > We can also produce a similar error from this: > ``` > typedef struct { > int a, b; > } S; > > S init() { S s; return s; } > ``` > gives: > ``` > loc(fused["tmp.c":5:17, "tmp.c":5:24]): error: 'llvm.bitcast' op operand #0 must be LLVM-compatible non-aggregate type, but got '!llvm.struct<"struct.S", (i32, i32)>' > ``` > I've traced the errors back to `lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp` in `LowerFunction::buildAggregateStore`, `castReturnValue`, and `buildAggregateBitcast`. > > `withElementType(SrcTy)` is currently commented out/ignored in `LowerFunction.cpp`, but it is important. > > This PR adds/fixes this and updates one test. I thought [about it](https://github.com/llvm/clangir/pull/958#issuecomment-2405766921) and I understand adding `cir.bitcast` to circumvent the CIR checks, but I am not sure how we can ignore/drop the bitcast while lowering. I think we can just make the CIR casts correct. I have added a number of lowering tests to verify that the CIR is lowered properly. cc: @sitio-couto @bcardosolopes. --- .../TargetLowering/LowerFunction.cpp | 32 +++- .../CIR/CallConvLowering/AArch64/struct.c | 167 ++++++++++++++++++ .../CIR/CallConvLowering/x86_64/basic.cpp | 32 ++-- 3 files changed, 213 insertions(+), 18 deletions(-) create mode 100644 clang/test/CIR/CallConvLowering/AArch64/struct.c diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index dccc52ad635d..fee7a752d7fb 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -42,6 +42,17 @@ Value buildAddressAtOffset(LowerFunction &LF, Value addr, return addr; } +Value createCoercedBitcast(Value Src, Type DestTy, LowerFunction &CGF) { + auto destPtrTy = PointerType::get(CGF.getRewriter().getContext(), DestTy); + + if (auto load = dyn_cast(Src.getDefiningOp())) + return CGF.getRewriter().create(Src.getLoc(), destPtrTy, + CastKind::bitcast, load.getAddr()); + + return CGF.getRewriter().create(Src.getLoc(), destPtrTy, + CastKind::bitcast, Src); +} + /// Given a struct pointer that we are accessing some number of bytes out of it, /// try to gep into the struct to get at its inner goodness. Dive as deep as /// possible without entering an element with an in-memory size smaller than @@ -112,7 +123,7 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, // If store is legal, just bitcast the src pointer. cir_cconv_assert(!::cir::MissingFeatures::vectorType()); if (SrcSize.getFixedValue() <= DstSize.getFixedValue()) { - // Dst = Dst.withElementType(SrcTy); + Dst = createCoercedBitcast(Dst, SrcTy, CGF); CGF.buildAggregateStore(Src, Dst, DstIsVolatile); } else { cir_cconv_unreachable("NYI"); @@ -174,7 +185,6 @@ Value createCoercedValue(Value Src, Type Ty, LowerFunction &CGF) { // // FIXME: Assert that we aren't truncating non-padding bits when have access // to that information. - // Src = Src.withElementType(); return CGF.buildAggregateBitcast(Src, Ty); } @@ -233,8 +243,8 @@ Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { // // FIXME: Assert that we aren't truncating non-padding bits when have access // to that information. - return LF.getRewriter().create(Src.getLoc(), Ty, CastKind::bitcast, - Src); + auto Cast = createCoercedBitcast(Src, Ty, LF); + return LF.getRewriter().create(Src.getLoc(), Cast); } cir_cconv_unreachable("NYI"); @@ -550,7 +560,8 @@ void LowerFunction::buildAggregateStore(Value Val, Value Dest, } Value LowerFunction::buildAggregateBitcast(Value Val, Type DestTy) { - return rewriter.create(Val.getLoc(), DestTy, CastKind::bitcast, Val); + auto Cast = createCoercedBitcast(Val, DestTy, *this); + return rewriter.create(Val.getLoc(), Cast); } /// Rewrite a call operation to abide to the ABI calling convention. @@ -885,8 +896,15 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // actual data to store. if (dyn_cast(RetTy) && cast(RetTy).getNumElements() != 0) { - RetVal = - createCoercedValue(newCallOp.getResult(), RetVal.getType(), *this); + RetVal = newCallOp.getResult(); + + for (auto user : Caller.getOperation()->getUsers()) { + if (auto storeOp = dyn_cast(user)) { + auto DestPtr = createCoercedBitcast(storeOp.getAddr(), + RetVal.getType(), *this); + rewriter.replaceOpWithNewOp(storeOp, RetVal, DestPtr); + } + } } // NOTE(cir): No need to convert from a temp to an RValue. This is diff --git a/clang/test/CIR/CallConvLowering/AArch64/struct.c b/clang/test/CIR/CallConvLowering/AArch64/struct.c new file mode 100644 index 000000000000..f5dfd43dfcf5 --- /dev/null +++ b/clang/test/CIR/CallConvLowering/AArch64/struct.c @@ -0,0 +1,167 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -fclangir-call-conv-lowering +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +typedef struct { + int a, b; +} S; + +// CIR: cir.func @init(%arg0: !u64i +// CIR: %[[#V0:]] = cir.alloca !ty_S, !cir.ptr, [""] {alignment = 4 : i64} +// CIR: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CIR: cir.store %arg0, %[[#V1]] : !u64i, !cir.ptr +// CIR: %[[#V2:]] = cir.alloca !ty_S, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CIR: %[[#V3:]] = cir.const #cir.int<1> : !s32i +// CIR: %[[#V4:]] = cir.get_member %[[#V0]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CIR: cir.store %[[#V3]], %[[#V4]] : !s32i, !cir.ptr +// CIR: %[[#V5:]] = cir.const #cir.int<2> : !s32i +// CIR: %[[#V6:]] = cir.get_member %[[#V0]][1] {name = "b"} : !cir.ptr -> !cir.ptr +// CIR: cir.store %[[#V5]], %[[#V6]] : !s32i, !cir.ptr +// CIR: cir.copy %[[#V0]] to %[[#V2]] : !cir.ptr +// CIR: %[[#V7:]] = cir.cast(bitcast, %[[#V2]] : !cir.ptr), !cir.ptr +// CIR: %[[#V8:]] = cir.load %[[#V7]] : !cir.ptr, !u64i +// CIR: cir.return %[[#V8]] : !u64i + +// LLVM: @init(i64 %[[#V0:]]) +// LLVM: %[[#V2:]] = alloca %struct.S, i64 1, align 4 +// LLVM: store i64 %[[#V0]], ptr %[[#V2]], align 8 +// LLVM: %[[#V3:]] = alloca %struct.S, i64 1, align 4 +// LLVM: %[[#V4:]] = getelementptr %struct.S, ptr %[[#V2]], i32 0, i32 0 +// LLVM: store i32 1, ptr %[[#V4]], align 4 +// LLVM: %[[#V5:]] = getelementptr %struct.S, ptr %[[#V2]], i32 0, i32 1 +// LLVM: store i32 2, ptr %[[#V5]], align 4 +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[#V3]], ptr %[[#V2]], i32 8, i1 false) +// LLVM: %[[#V6:]] = load i64, ptr %[[#V3]], align 8 +// LLVM: ret i64 %[[#V6]] +S init(S s) { + s.a = 1; + s.b = 2; + return s; +} + +// CIR: cir.func no_proto @foo1 +// CIR: %[[#V0:]] = cir.alloca !ty_S, !cir.ptr, ["s"] +// CIR: %[[#V1:]] = cir.alloca !ty_S, !cir.ptr, ["tmp"] {alignment = 4 : i64} +// CIR: %[[#V2:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CIR: %[[#V3:]] = cir.load %[[#V2]] : !cir.ptr, !u64i +// CIR: %[[#V4:]] = cir.call @init(%[[#V3]]) : (!u64i) -> !u64i +// CIR: %[[#V5:]] = cir.cast(bitcast, %[[#V1]] : !cir.ptr), !cir.ptr +// CIR: cir.store %[[#V4]], %[[#V5]] : !u64i, !cir.ptr +// CIR: cir.copy %[[#V1]] to %[[#V0]] : !cir.ptr +// CIR: cir.return + +// LLVM: @foo1() +// LLVM: %[[#V1:]] = alloca %struct.S, i64 1, align 4 +// LLVM: %[[#V2:]] = alloca %struct.S, i64 1, align 4 +// LLVM: %[[#V3:]] = load i64, ptr %[[#V1]], align 8 +// LLVM: %[[#V4:]] = call i64 @init(i64 %[[#V3]]) +// LLVM: store i64 %[[#V4]], ptr %[[#V2]], align 8 +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[#V1]], ptr %[[#V2]], i32 8, i1 false) +void foo1() { + S s; + s = init(s); +} + +// CIR: cir.func @foo2(%arg0: !u64i +// CIR: %[[#V0:]] = cir.alloca !ty_S, !cir.ptr, [""] {alignment = 4 : i64} +// CIR: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CIR: cir.store %arg0, %[[#V1]] : !u64i, !cir.ptr +// CIR: %[[#V2:]] = cir.alloca !ty_S, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CIR: %[[#V3:]] = cir.alloca !ty_S, !cir.ptr, ["s2"] +// CIR: %[[#V4:]] = cir.alloca !ty_S, !cir.ptr, ["tmp"] {alignment = 4 : i64} +// CIR: %[[#V5:]] = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s32i}> : !ty_S +// CIR: cir.store %[[#V5]], %[[#V3]] : !ty_S, !cir.ptr +// CIR: %[[#V6:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CIR: %[[#V7:]] = cir.load %[[#V6]] : !cir.ptr, !u64i +// CIR: %[[#V8:]] = cir.call @foo2(%[[#V7]]) : (!u64i) -> !u64i +// CIR: %[[#V9:]] = cir.cast(bitcast, %[[#V4]] : !cir.ptr), !cir.ptr +// CIR: cir.store %[[#V8]], %[[#V9]] : !u64i, !cir.ptr +// CIR: cir.copy %[[#V4]] to %[[#V0]] : !cir.ptr +// CIR: cir.copy %[[#V0]] to %[[#V2]] : !cir.ptr +// CIR: %[[#V10:]] = cir.cast(bitcast, %[[#V2]] : !cir.ptr), !cir.ptr +// CIR: %[[#V11:]] = cir.load %[[#V10]] : !cir.ptr, !u64i +// CIR: cir.return %[[#V11]] : !u64i + +// LLVM: @foo2(i64 %[[#V0:]]) +// LLVM: %[[#V2:]] = alloca %struct.S, i64 1, align 4 +// LLVM: store i64 %[[#V0]], ptr %[[#V2]], align 8 +// LLVM: %[[#V3:]] = alloca %struct.S, i64 1, align 4 +// LLVM: %[[#V4:]] = alloca %struct.S, i64 1, align 4 +// LLVM: %[[#V5:]] = alloca %struct.S, i64 1, align 4 +// LLVM: store %struct.S { i32 1, i32 2 }, ptr %[[#V4]], align 4 +// LLVM: %[[#V6:]] = load i64, ptr %[[#V2]], align 8 +// LLVM: %[[#V7:]] = call i64 @foo2(i64 %[[#V6]]) +// LLVM: store i64 %[[#V7]], ptr %[[#V5]], align 8 +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[#V2]], ptr %[[#V5]], i32 8, i1 false) +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[#V3]], ptr %[[#V2]], i32 8, i1 false) +// LLVM: %[[#V8:]] = load i64, ptr %[[#V3]], align 8 +// LLVM: ret i64 %[[#V8]] +S foo2(S s1) { + S s2 = {1, 2}; + s1 = foo2(s1); + return s1; +} + +typedef struct { + char a; + char b; +} S2; + +// CIR: cir.func @init2(%arg0: !u16i +// CIR: %[[#V0:]] = cir.alloca !ty_S2_, !cir.ptr, [""] {alignment = 4 : i64} +// CIR: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CIR: cir.store %arg0, %[[#V1]] : !u16i, !cir.ptr +// CIR: %[[#V2:]] = cir.alloca !ty_S2_, !cir.ptr, ["__retval"] {alignment = 1 : i64} +// CIR: %[[#V3:]] = cir.const #cir.int<1> : !s32i +// CIR: %[[#V4:]] = cir.cast(integral, %[[#V3]] : !s32i), !s8i +// CIR: %[[#V5:]] = cir.get_member %[[#V0]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CIR: cir.store %[[#V4]], %[[#V5]] : !s8i, !cir.ptr +// CIR: %[[#V6:]] = cir.const #cir.int<2> : !s32i +// CIR: %[[#V7:]] = cir.cast(integral, %[[#V6]] : !s32i), !s8i +// CIR: %[[#V8:]] = cir.get_member %[[#V0]][1] {name = "b"} : !cir.ptr -> !cir.ptr +// CIR: cir.store %[[#V7]], %[[#V8]] : !s8i, !cir.ptr +// CIR: cir.copy %[[#V0]] to %[[#V2]] : !cir.ptr +// CIR: %[[#V9:]] = cir.cast(bitcast, %[[#V2]] : !cir.ptr), !cir.ptr +// CIR: %[[#V10:]] = cir.load %[[#V9]] : !cir.ptr, !u16i +// CIR: cir.return %[[#V10]] : !u16i + +// LLVM: @init2(i16 %[[#V0:]]) +// LLVM: %[[#V2:]] = alloca %struct.S2, i64 1, align 4 +// LLVM: store i16 %[[#V0]], ptr %[[#V2]], align 2 +// LLVM: %[[#V3:]] = alloca %struct.S2, i64 1, align 1 +// LLVM: %[[#V4:]] = getelementptr %struct.S2, ptr %[[#V2]], i32 0, i32 0 +// LLVM: store i8 1, ptr %[[#V4]], align 1 +// LLVM: %[[#V5:]] = getelementptr %struct.S2, ptr %[[#V2]], i32 0, i32 1 +// LLVM: store i8 2, ptr %[[#V5]], align 1 +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[#V3]], ptr %[[#V2]], i32 2, i1 false) +// LLVM: %[[#V6:]] = load i16, ptr %[[#V3]], align 2 +// LLVM: ret i16 %[[#V6]] +S2 init2(S2 s) { + s.a = 1; + s.b = 2; + return s; +} + +// CIR: cir.func no_proto @foo3() +// CIR: %[[#V0:]] = cir.alloca !ty_S2_, !cir.ptr, ["s"] +// CIR: %[[#V1:]] = cir.alloca !ty_S2_, !cir.ptr, ["tmp"] {alignment = 1 : i64} +// CIR: %[[#V2:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CIR: %[[#V3:]] = cir.load %[[#V2]] : !cir.ptr, !u16i +// CIR: %[[#V4:]] = cir.call @init2(%[[#V3]]) : (!u16i) -> !u16i +// CIR: %[[#V5:]] = cir.cast(bitcast, %[[#V1]] : !cir.ptr), !cir.ptr +// CIR: cir.store %[[#V4]], %[[#V5]] : !u16i, !cir.ptr +// CIR: cir.copy %[[#V1]] to %[[#V0]] : !cir.ptr +// CIR: cir.return + +// LLVM: @foo3() +// LLVM: %[[#V1:]] = alloca %struct.S2, i64 1, align 1 +// LLVM: %[[#V2:]] = alloca %struct.S2, i64 1, align 1 +// LLVM: %[[#V3:]] = load i16, ptr %[[#V1]], align 2 +// LLVM: %[[#V4:]] = call i16 @init2(i16 %[[#V3]]) +// LLVM: store i16 %[[#V4]], ptr %[[#V2]], align 2 +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[#V1]], ptr %[[#V2]], i32 2, i1 false) +void foo3() { + S2 s; + s = init2(s); +} \ No newline at end of file diff --git a/clang/test/CIR/CallConvLowering/x86_64/basic.cpp b/clang/test/CIR/CallConvLowering/x86_64/basic.cpp index a3c2d6960c39..5bef1d34f974 100644 --- a/clang/test/CIR/CallConvLowering/x86_64/basic.cpp +++ b/clang/test/CIR/CallConvLowering/x86_64/basic.cpp @@ -99,20 +99,30 @@ struct S1 { /// Validate coerced argument and cast it to the expected type. /// Cast arguments to the expected type. -// CHECK: cir.func @_Z2s12S1(%arg0: !u64i loc({{.+}})) -> !u64i -// CHECK: %[[#V0:]] = cir.alloca !ty_S1_, !cir.ptr -// CHECK: %[[#V1:]] = cir.cast(bitcast, %arg0 : !u64i), !ty_S1_ -// CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_S1_, !cir.ptr +// CHECK: %[[#V0:]] = cir.alloca !ty_S1_, !cir.ptr, [""] {alignment = 4 : i64} +// CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: cir.store %arg0, %[[#V1]] : !u64i, !cir.ptr +// CHECK: %[[#V2:]] = cir.alloca !ty_S1_, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: %[[#V3:]] = cir.alloca !ty_S1_, !cir.ptr, ["agg.tmp0"] {alignment = 4 : i64} +// CHECK: %[[#V4:]] = cir.alloca !ty_S1_, !cir.ptr, ["agg.tmp1"] {alignment = 4 : i64} S1 s1(S1 arg) { /// Cast argument and result of the function call to the expected types. - // CHECK: %[[#V9:]] = cir.cast(bitcast, %{{.+}} : !ty_S1_), !u64i - // CHECK: %[[#V10:]] = cir.call @_Z2s12S1(%[[#V9]]) : (!u64i) -> !u64i - // CHECK: %[[#V11:]] = cir.cast(bitcast, %[[#V10]] : !u64i), !ty_S1_ + // CHECK: %[[#V9:]] = cir.cast(bitcast, %[[#V3]] : !cir.ptr), !cir.ptr + // CHECK: %[[#V10:]] = cir.load %[[#V9]] : !cir.ptr, !u64i + // CHECK: %[[#V11:]] = cir.call @_Z2s12S1(%[[#V10]]) : (!u64i) -> !u64i + // CHECK: %[[#V12:]] = cir.cast(bitcast, %[[#V4]] : !cir.ptr), !cir.ptr + // CHECK: cir.store %[[#V11]], %[[#V12]] : !u64i, !cir.ptr s1({1, 2}); - // CHECK: %[[#V12:]] = cir.load %{{.+}} : !cir.ptr, !ty_S1_ - // CHECK: %[[#V13:]] = cir.cast(bitcast, %[[#V12]] : !ty_S1_), !u64i - // CHECK: cir.return %[[#V13]] : !u64i + // CHECK: %[[#V13:]] = cir.get_member %[[#V2]][0] {name = "a"} : !cir.ptr -> !cir.ptr + // CHECK: %[[#V14:]] = cir.const #cir.int<1> : !s32i + // CHECK: cir.store %[[#V14]], %[[#V13]] : !s32i, !cir.ptr + // CHECK: %[[#V15:]] = cir.get_member %[[#V2]][1] {name = "b"} : !cir.ptr -> !cir.ptr + // CHECK: %[[#V16:]] = cir.const #cir.int<2> : !s32i + // CHECK: cir.store %[[#V16]], %[[#V15]] : !s32i, !cir.ptr + // CHECK: %[[#V17:]] = cir.cast(bitcast, %[[#V2]] : !cir.ptr), !cir.ptr + // CHECK: %[[#V18:]] = cir.load %[[#V17]] : !cir.ptr, !u64i + // CHECK: cir.return %[[#V18]] : !u64i return {1, 2}; -} +} \ No newline at end of file From 1196c24af2feebe62cdc0bd860f280420f389db5 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 25 Oct 2024 22:18:36 -0400 Subject: [PATCH 1981/2301] [CIR][CIRGen][Lowering] Use same set of annotation-related global vars for source code global and local vars (#1001) Now CIR supports annotations for both globals and locals. They all should just use the same set of annotation related globals including file name string, annotation name string, and arguments. This PR makes sure this is the case. FYI: for the test case we have, OG generates [ code ](https://godbolt.org/z/Ev5Ycoqj1), pretty much the same code except annotation variable names. This would fix the crash like > error: redefinition of symbol named '.str.annotation' > fatal error: error in backend: The pass manager failed to lower CIR to LLVMIR dialect! --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 76 ++++++++++++------- clang/test/CIR/CodeGen/annotations-var.c | 26 ++++++- 2 files changed, 71 insertions(+), 31 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 08313ac9eef6..e8177d93647f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1139,7 +1139,10 @@ struct ConvertCIRToLLVMPass } void runOnOperation() final; - void buildGlobalAnnotationsVar(); + void buildGlobalAnnotationsVar( + llvm::StringMap &stringGlobalsMap, + llvm::StringMap &argStringGlobalsMap, + llvm::MapVector &argsVarMap); virtual StringRef getArgument() const override { return "cir-flat-to-llvm"; } }; @@ -1321,13 +1324,26 @@ class CIREhInflightOpLowering class CIRAllocaLowering : public mlir::OpConversionPattern { mlir::DataLayout const &dataLayout; + // Track globals created for annotation related strings + llvm::StringMap &stringGlobalsMap; + // Track globals created for annotation arg related strings. + // They are different from annotation strings, as strings used in args + // are not in llvmMetadataSectionName, and also has aligment 1. + llvm::StringMap &argStringGlobalsMap; + // Track globals created for annotation args. + llvm::MapVector &argsVarMap; public: - CIRAllocaLowering(mlir::TypeConverter const &typeConverter, - mlir::DataLayout const &dataLayout, - mlir::MLIRContext *context) + CIRAllocaLowering( + mlir::TypeConverter const &typeConverter, + mlir::DataLayout const &dataLayout, + llvm::StringMap &stringGlobalsMap, + llvm::StringMap &argStringGlobalsMap, + llvm::MapVector &argsVarMap, + mlir::MLIRContext *context) : OpConversionPattern(typeConverter, context), - dataLayout(dataLayout) {} + dataLayout(dataLayout), stringGlobalsMap(stringGlobalsMap), + argStringGlobalsMap(argStringGlobalsMap), argsVarMap(argsVarMap) {} void buildAllocaAnnotations(mlir::LLVM::AllocaOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter, @@ -1342,15 +1358,6 @@ class CIRAllocaLowering mlir::OpBuilder varInitBuilder(module.getContext()); varInitBuilder.restoreInsertionPoint(afterAlloca); - // Track globals created for annotation related strings - llvm::StringMap stringGlobalsMap; - // Track globals created for annotation arg related strings. - // They are different from annotation strings, as strings used in args - // are not in llvmMetadataSectionName, and also has aligment 1. - llvm::StringMap argStringGlobalsMap; - // Track globals created for annotation args. - llvm::MapVector argsVarMap; - auto intrinRetTy = mlir::LLVM::LLVMVoidType::get(getContext()); constexpr const char *intrinNameAttr = "llvm.var.annotation.p0.p0"; for (mlir::Attribute entry : annotationValuesArray) { @@ -4144,11 +4151,16 @@ class CIRIsFPClassOpLowering } }; -void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, - mlir::TypeConverter &converter, - mlir::DataLayout &dataLayout) { +void populateCIRToLLVMConversionPatterns( + mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter, + mlir::DataLayout &dataLayout, + llvm::StringMap &stringGlobalsMap, + llvm::StringMap &argStringGlobalsMap, + llvm::MapVector &argsVarMap) { patterns.add(patterns.getContext()); - patterns.add(converter, dataLayout, patterns.getContext()); + patterns.add(converter, dataLayout, stringGlobalsMap, + argStringGlobalsMap, argsVarMap, + patterns.getContext()); patterns.add< CIRCmpOpLowering, CIRSelectOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, @@ -4444,7 +4456,10 @@ void collect_unreachable(mlir::Operation *parent, } } -void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar() { +void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar( + llvm::StringMap &stringGlobalsMap, + llvm::StringMap &argStringGlobalsMap, + llvm::MapVector &argsVarMap) { mlir::ModuleOp module = getOperation(); mlir::Attribute attr = module->getAttr("cir.global_annotations"); if (!attr) @@ -4491,14 +4506,6 @@ void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar() { mlir::Value result = varInitBuilder.create( moduleLoc, annoStructArrayTy); - // Track globals created for annotation related strings - llvm::StringMap stringGlobalsMap; - // Track globals created for annotation arg related strings. - // They are different from annotation strings, as strings used in args - // are not in llvmMetadataSectionName, and also has aligment 1. - llvm::StringMap argStringGlobalsMap; - // Track globals created for annotation args. - llvm::MapVector argsVarMap; int idx = 0; for (mlir::Attribute entry : annotationValuesArray) { @@ -4544,7 +4551,18 @@ void ConvertCIRToLLVMPass::runOnOperation() { mlir::RewritePatternSet patterns(&getContext()); - populateCIRToLLVMConversionPatterns(patterns, converter, dataLayout); + // Track globals created for annotation related strings + llvm::StringMap stringGlobalsMap; + // Track globals created for annotation arg related strings. + // They are different from annotation strings, as strings used in args + // are not in llvmMetadataSectionName, and also has aligment 1. + llvm::StringMap argStringGlobalsMap; + // Track globals created for annotation args. + llvm::MapVector argsVarMap; + + populateCIRToLLVMConversionPatterns(patterns, converter, dataLayout, + stringGlobalsMap, argStringGlobalsMap, + argsVarMap); mlir::populateFuncToLLVMConversionPatterns(converter, patterns); mlir::ConversionTarget target(getContext()); @@ -4600,7 +4618,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { auto dtorAttr = mlir::cast(attr); return std::make_pair(dtorAttr.getName(), dtorAttr.getPriority()); }); - buildGlobalAnnotationsVar(); + buildGlobalAnnotationsVar(stringGlobalsMap, argStringGlobalsMap, argsVarMap); } std::unique_ptr createConvertCIRToLLVMPass() { diff --git a/clang/test/CIR/CodeGen/annotations-var.c b/clang/test/CIR/CodeGen/annotations-var.c index ffd4bd9b18a4..5bb3989bc9d0 100644 --- a/clang/test/CIR/CodeGen/annotations-var.c +++ b/clang/test/CIR/CodeGen/annotations-var.c @@ -3,17 +3,39 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +// CIR-DAG: cir.global external @globalvar = #cir.int<3> : !s32i [#cir.annotation] {alignment = 4 : i64} +// CIR-DAG: cir.global external @globalvar2 = #cir.int<2> : !s32i [#cir.annotation] {alignment = 4 : i64} + // LLVM-DAG: @.str.annotation = private unnamed_addr constant [15 x i8] c"localvar_ann_0\00", section "llvm.metadata" // LLVM-DAG: @.str.1.annotation = private unnamed_addr constant [{{[0-9]+}} x i8] c"{{.*}}annotations-var.c\00", section "llvm.metadata" // LLVM-DAG: @.str.2.annotation = private unnamed_addr constant [15 x i8] c"localvar_ann_1\00", section "llvm.metadata" +// LLVM-DAG: @.str.3.annotation = private unnamed_addr constant [11 x i8] c"common_ann\00", section "llvm.metadata" +// LLVM-DAG: @.str.annotation.arg = private unnamed_addr constant [3 x i8] c"os\00", align 1 +// LLVM-DAG: @.args.annotation = private unnamed_addr constant { ptr, i32 } { ptr @.str.annotation.arg, i32 21 }, section "llvm.metadata" +// LLVM-DAG: @.str.4.annotation = private unnamed_addr constant [16 x i8] c"globalvar_ann_0\00", section "llvm.metadata" +// LLVM-DAG: @llvm.global.annotations = appending global [2 x { ptr, ptr, ptr, i32, ptr }] +// LLVM-DAG-SAME: [{ ptr, ptr, ptr, i32, ptr } { ptr @globalvar, ptr @.str.4.annotation, ptr @.str.1.annotation, i32 18, ptr null }, { ptr, ptr, ptr, i32, ptr } +// LLVM-DAG-SAME: { ptr @globalvar2, ptr @.str.3.annotation, ptr @.str.1.annotation, i32 19, ptr @.args.annotation }], section "llvm.metadata" +int globalvar __attribute__((annotate("globalvar_ann_0"))) = 3; +int globalvar2 __attribute__((annotate("common_ann", "os", 21))) = 2; void local(void) { int localvar __attribute__((annotate("localvar_ann_0"))) __attribute__((annotate("localvar_ann_1"))) = 3; + int localvar2 __attribute__((annotate("localvar_ann_0"))) = 3; + int localvar3 __attribute__((annotate("common_ann", "os", 21))) = 3; // CIR-LABEL: @local // CIR: %0 = cir.alloca !s32i, !cir.ptr, ["localvar", init] [#cir.annotation, #cir.annotation] +// CIR: %1 = cir.alloca !s32i, !cir.ptr, ["localvar2", init] [#cir.annotation] +// CIR: %2 = cir.alloca !s32i, !cir.ptr, ["localvar3", init] [#cir.annotation] + // LLVM-LABEL: @local // LLVM: %[[ALLOC:.*]] = alloca i32 -// LLVM: call void @llvm.var.annotation.p0.p0(ptr %[[ALLOC]], ptr @.str.annotation, ptr @.str.1.annotation, i32 11, ptr null) -// LLVM: call void @llvm.var.annotation.p0.p0(ptr %[[ALLOC]], ptr @.str.2.annotation, ptr @.str.1.annotation, i32 11, ptr null) +// LLVM: call void @llvm.var.annotation.p0.p0(ptr %[[ALLOC]], ptr @.str.annotation, ptr @.str.1.annotation, i32 23, ptr null) +// LLVM: call void @llvm.var.annotation.p0.p0(ptr %[[ALLOC]], ptr @.str.2.annotation, ptr @.str.1.annotation, i32 23, ptr null) +// LLVM: %[[ALLOC2:.*]] = alloca i32 +// LLVM: call void @llvm.var.annotation.p0.p0(ptr %[[ALLOC2]], ptr @.str.annotation, ptr @.str.1.annotation, i32 24, ptr null) +// LLVM: %[[ALLOC3:.*]] = alloca i32 +// LLVM: call void @llvm.var.annotation.p0.p0(ptr %[[ALLOC3]], ptr @.str.3.annotation, +// LLVM-SAME: ptr @.str.1.annotation, i32 25, ptr @.args.annotation), } From 6198692dbe984335e0b58d39645a4ed706c57cf6 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 25 Oct 2024 19:19:31 -0700 Subject: [PATCH 1982/2301] [CIR][CodeGen][NFC] Cleanup CIRGenFunction::StartFunction to match OG more (#1002) --- clang/include/clang/CIR/MissingFeatures.h | 2 + clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 107 +++++++++++++++++----- clang/lib/CIR/CodeGen/CIRGenModule.h | 8 ++ 3 files changed, 94 insertions(+), 23 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index a22112bfdf55..6b82809a742a 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -120,6 +120,8 @@ struct MissingFeatures { static bool parameterAttributes() { return false; } static bool minLegalVectorWidthAttr() { return false; } static bool vscaleRangeAttr() { return false; } + static bool stackrealign() { return false; } + static bool zerocallusedregs() { return false; } // Coroutines static bool unhandledException() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index a78bfe322537..44e3c31ba592 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -14,6 +14,8 @@ #include "CIRGenCXXABI.h" #include "CIRGenModule.h" #include "CIRGenOpenMPRuntime.h" +#include "clang/AST/Attrs.inc" +#include "clang/Basic/CodeGenOptions.h" #include "clang/CIR/MissingFeatures.h" #include "clang/AST/ASTLambda.h" @@ -24,6 +26,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/FPEnv.h" #include "clang/Frontend/FrontendDiagnostic.h" +#include "llvm/ADT/PointerIntPair.h" #include "CIRGenTBAA.h" #include "mlir/Dialect/Func/IR/FuncOps.h" @@ -930,7 +933,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, #include "clang/Basic/Sanitizers.def" #undef SANITIZER - } while (0); + } while (false); if (D) { const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds); @@ -994,6 +997,9 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, if (SanOpts.has(SanitizerKind::ShadowCallStack)) assert(!MissingFeatures::sanitizeOther()); + if (SanOpts.has(SanitizerKind::Realtime)) + llvm_unreachable("NYI"); + // Apply fuzzing attribute to the function. if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink)) assert(!MissingFeatures::sanitizeOther()); @@ -1022,6 +1028,17 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass) SanOpts.Mask &= ~SanitizerKind::Null; + // Add pointer authentication attriburtes. + const CodeGenOptions &codeGenOptions = CGM.getCodeGenOpts(); + if (codeGenOptions.PointerAuth.ReturnAddresses) + llvm_unreachable("NYI"); + if (codeGenOptions.PointerAuth.FunctionPointers) + llvm_unreachable("NYI"); + if (codeGenOptions.PointerAuth.AuthTraps) + llvm_unreachable("NYI"); + if (codeGenOptions.PointerAuth.IndirectGotos) + llvm_unreachable("NYI"); + // Apply xray attributes to the function (as a string, for now) if (const auto *XRayAttr = D ? D->getAttr() : nullptr) { assert(!MissingFeatures::xray()); @@ -1048,6 +1065,15 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, if (Count && Offset <= Count) { llvm_unreachable("NYI"); } + // Instruct that functions for COFF/CodeView targets should start with a + // pathable instruction, but only on x86/x64. Don't forward this to ARM/ARM64 + // backends as they don't need it -- instructions on these architectures are + // always automatically patachable at runtime. + if (CGM.getCodeGenOpts().HotPatch && + getContext().getTargetInfo().getTriple().isX86() && + getContext().getTargetInfo().getTriple().getEnvironment() != + llvm::Triple::CODE16) + llvm_unreachable("NYI"); // Add no-jump-tables value. if (CGM.getCodeGenOpts().NoUseJumpTables) @@ -1070,10 +1096,28 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, if (D && D->hasAttr()) llvm_unreachable("NYI"); - if (FD && getLangOpts().OpenCL) { + if (D && D->hasAttr()) + llvm_unreachable("NYI"); + + if (D) { + // Funciton attribiutes take precedence over command line flags. + if ([[maybe_unused]] auto *a = D->getAttr()) { + llvm_unreachable("NYI"); + } else if (CGM.getCodeGenOpts().FunctionReturnThunks) + llvm_unreachable("NYI"); + } + + if (FD && (getLangOpts().OpenCL || + ((getLangOpts().HIP || getLangOpts().OffloadViaLLVM) && + getLangOpts().CUDAIsDevice))) { + // Add metadata for a kernel function. buildKernelMetadata(FD, Fn); } + if (FD && FD->hasAttr()) { + llvm_unreachable("NYI"); + } + // If we are checking function types, emit a function type signature as // prologue data. if (FD && getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { @@ -1115,13 +1159,18 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm_unreachable("NYI"); } - // TODO: stackrealign attr + if (MissingFeatures::stackrealign()) + llvm_unreachable("NYI"); + + if (FD && FD->isMain() && MissingFeatures::zerocallusedregs()) + llvm_unreachable("NYI"); mlir::Block *EntryBB = &Fn.getBlocks().front(); // TODO: allocapt insertion? probably don't need for CIR - // TODO: return value checking + if (MissingFeatures::requiresReturnValueCheck()) + llvm_unreachable("NYI"); if (getDebugInfo()) { llvm_unreachable("NYI"); @@ -1151,9 +1200,18 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // FIXME(cir): vla.c test currently crashes here. // PrologueCleanupDepth = EHStack.stable_begin(); + // Emit OpenMP specific initialization of the device functions. if (getLangOpts().OpenMP && CurCodeDecl) CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl); + if (FD && getLangOpts().HLSL) { + // Handle emitting HLSL entry functions. + if (FD->hasAttr()) { + llvm_unreachable("NYI"); + } + llvm_unreachable("NYI"); + } + // TODO: buildFunctionProlog { @@ -1250,32 +1308,35 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, } // If any of the arguments have a variably modified type, make sure to emit - // the type size. - for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; - ++i) { - const VarDecl *VD = *i; - - // Dig out the type as written from ParmVarDecls; it's unclear whether the - // standard (C99 6.9.1p10) requires this, but we're following the - // precedent set by gcc. - QualType Ty; - if (const auto *PVD = dyn_cast(VD)) - Ty = PVD->getOriginalType(); - else - Ty = VD->getType(); - - if (Ty->isVariablyModifiedType()) - buildVariablyModifiedType(Ty); + // the type size, but only if the function is not naked. Naked functions have + // no prolog to run this evaluation. + if (!FD || !FD->hasAttr()) { + for (const VarDecl *vd : Args) { + // Dig out the type as written from ParmVarDecls; it's unclear whether the + // standard (C99 6.9.1p10) requires this, but we're following the + // precedent set by gcc. + QualType ty; + if (const auto *pvd = dyn_cast(vd)) + ty = pvd->getOriginalType(); + else + ty = vd->getType(); + + if (ty->isVariablyModifiedType()) + buildVariablyModifiedType(ty); + } } // Emit a location at the end of the prologue. if (getDebugInfo()) llvm_unreachable("NYI"); - // TODO: Do we need to handle this in two places like we do with // target-features/target-cpu? if (CurFuncDecl) - if (const auto *VecWidth = CurFuncDecl->getAttr()) + if ([[maybe_unused]] const auto *vecWidth = + CurFuncDecl->getAttr()) llvm_unreachable("NYI"); + + if (CGM.shouldEmitConvergenceTokens()) + llvm_unreachable("NYI"); } /// Return true if the current function should be instrumented with @@ -1834,4 +1895,4 @@ void CIRGenFunction::buildVarAnnotations(const VarDecl *decl, mlir::Value val) { auto allocaOp = dyn_cast_or_null(val.getDefiningOp()); assert(allocaOp && "expects available alloca"); allocaOp.setAnnotationsAttr(builder.getArrayAttr(annotations)); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 0c1f0756a947..560d9fe4a22c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -267,6 +267,14 @@ class CIRGenModule : public CIRGenTypeCache { void AddGlobalDtor(mlir::cir::FuncOp Dtor, int Priority = 65535, bool IsDtorAttrFunc = false); + // Return whether structured convergence intrinsics should be generated for + // this target. + bool shouldEmitConvergenceTokens() const { + // TODO: this shuld probably become unconditional once the controlled + // convergence becomes the norm. + return getTriple().isSPIRVLogical(); + } + /// Return the mlir::Value for the address of the given global variable. /// If Ty is non-null and if the global doesn't exist, then it will be created /// with the specified type instead of whatever the normal requested type From ed8257cb101cf2a64141f9b4125ac0e51ffc82a6 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Sat, 26 Oct 2024 10:47:38 +0800 Subject: [PATCH 1983/2301] [ClangIR][Lowering] Handle lowered array index (#1008) Previously we didn't generate the index for array correct. The previous test is incorrect already: globals-neg-index-array.c ``` // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s // RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s // RUN: %clang_cc1 -x c++ -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s struct __attribute__((packed)) PackedStruct { char a1; char a2; char a3; }; struct PackedStruct packed[10]; char *packed_element = &(packed[-2].a3); // CHECK: cir.global external @packed = #cir.zero : !cir.array {alignment = 16 : i64} loc(#loc5) // CHECK: cir.global external @packed_element = #cir.global_view<@packed, [-2 : i32, 2 : i32]> // LLVM: @packed = global [10 x %struct.PackedStruct] zeroinitializer + // LLVM: @packed_element = global ptr getelementptr inbounds ([10 x %struct.PackedStruct], ptr @packed, i32 0, i32 -2, i32 2) - // LLVM: @packed_element = global ptr getelementptr inbounds ([10 x %struct.PackedStruct], ptr @packed, i32 -2, i32 2) ``` Compile it with `-fclangir -S`, we got: ``` packed: .zero 30 packed_element: .quad packed-54 ``` but the traditional pipeline shows (https://godbolt.org/z/eTj96EP1E): ``` packed: .zero 30 packed_element: .quad packed-4 ``` this may be a simple mismatch. --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 5 ++++- clang/test/CIR/CodeGen/globals-neg-index-array.c | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index e8177d93647f..4183f9707efa 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -578,9 +578,12 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, if (globalAttr.getIndices()) { llvm::SmallVector indices; - if (auto stTy = dyn_cast(sourceType)) + if (auto stTy = dyn_cast(sourceType)) { if (stTy.isIdentified()) indices.push_back(0); + } else if (isa(sourceType)) { + indices.push_back(0); + } for (auto idx : globalAttr.getIndices()) { auto intAttr = dyn_cast(idx); diff --git a/clang/test/CIR/CodeGen/globals-neg-index-array.c b/clang/test/CIR/CodeGen/globals-neg-index-array.c index 7f7a80ea2c9e..62a59498ba64 100644 --- a/clang/test/CIR/CodeGen/globals-neg-index-array.c +++ b/clang/test/CIR/CodeGen/globals-neg-index-array.c @@ -17,4 +17,4 @@ char *packed_element = &(packed[-2].a3); // CHECK: cir.global external @packed = #cir.zero : !cir.array {alignment = 16 : i64} loc(#loc5) // CHECK: cir.global external @packed_element = #cir.global_view<@packed, [-2 : i32, 2 : i32]> // LLVM: @packed = global [10 x %struct.PackedStruct] zeroinitializer -// LLVM: @packed_element = global ptr getelementptr inbounds ([10 x %struct.PackedStruct], ptr @packed, i32 -2, i32 2) +// LLVM: @packed_element = global ptr getelementptr inbounds ([10 x %struct.PackedStruct], ptr @packed, i32 0, i32 -2, i32 2) From 5250fdb22927b1ba2994ba3d3c2db97a01b60ca1 Mon Sep 17 00:00:00 2001 From: MarcoCalabretta <80850988+MarcoCalabretta@users.noreply.github.com> Date: Sun, 27 Oct 2024 22:55:51 -0400 Subject: [PATCH 1984/2301] [CIR][CIRGen] Removed extra space in "cir.shift( right)" (#997) (#1009) The MLIR docs at https://mlir.llvm.org/docs/DefiningDialects/Operations/#literals specify that "An empty literal `` may be used to remove a space that is inserted implicitly after certain literal elements", so I inserted one before the `right` literal to remove the extra space that was being printed. Oddly, the bug is also fixed by inserting an empty literal _after_ the `left` literal, which leads me to believe that tablegen is inserting an implicit space after the `left` literal. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 +- clang/test/CIR/CodeGen/binassign.cpp | 2 +- clang/test/CIR/CodeGen/binop.cpp | 2 +- clang/test/CIR/CodeGen/vectype.cpp | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e9f3e9b7753a..0101b5386391 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1199,7 +1199,7 @@ def ShiftOp : CIR_Op<"shift", [Pure]> { let assemblyFormat = [{ `(` - (`left` $isShiftleft^) : (`right`)? + (`left` $isShiftleft^) : (```right`)? `,` $value `:` type($value) `,` $amount `:` type($amount) `)` `->` type($result) attr-dict diff --git a/clang/test/CIR/CodeGen/binassign.cpp b/clang/test/CIR/CodeGen/binassign.cpp index 3e09281072e2..7e6cf992ef2d 100644 --- a/clang/test/CIR/CodeGen/binassign.cpp +++ b/clang/test/CIR/CodeGen/binassign.cpp @@ -34,7 +34,7 @@ int foo(int a, int b) { // CHECK: = cir.binop(sub, // CHECK: cir.store {{.*}}[[Value]] // CHECK: = cir.load {{.*}}[[Value]] -// CHECK: = cir.shift( right +// CHECK: = cir.shift(right // CHECK: cir.store {{.*}}[[Value]] // CHECK: = cir.load {{.*}}[[Value]] // CHECK: = cir.shift(left diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index ce68d5a4e9b3..8cd70da251a5 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -19,7 +19,7 @@ void b0(int a, int b) { // CHECK: = cir.binop(rem, %9, %10) : !s32i // CHECK: = cir.binop(add, %12, %13) nsw : !s32i // CHECK: = cir.binop(sub, %15, %16) nsw : !s32i -// CHECK: = cir.shift( right, %18 : !s32i, %19 : !s32i) -> !s32i +// CHECK: = cir.shift(right, %18 : !s32i, %19 : !s32i) -> !s32i // CHECK: = cir.shift(left, %21 : !s32i, %22 : !s32i) -> !s32i // CHECK: = cir.binop(and, %24, %25) : !s32i // CHECK: = cir.binop(xor, %27, %28) : !s32i diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index be5087344fd5..df4fe6ff9459 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -109,7 +109,7 @@ void vector_int_test(int x, unsigned short usx) { // CHECK: %{{[0-9]+}} = cir.shift(left, {{%.*}} : !cir.vector, // CHECK-SAME: {{%.*}} : !cir.vector) -> !cir.vector vi4 y = a >> b; - // CHECK: %{{[0-9]+}} = cir.shift( right, {{%.*}} : !cir.vector, + // CHECK: %{{[0-9]+}} = cir.shift(right, {{%.*}} : !cir.vector, // CHECK-SAME: {{%.*}} : !cir.vector) -> !cir.vector vus2 z = { usx, usx }; @@ -117,7 +117,7 @@ void vector_int_test(int x, unsigned short usx) { vus2 zamt = { 3, 4 }; // CHECK: %{{[0-9]+}} = cir.const #cir.const_vector<[#cir.int<3> : !u16i, #cir.int<4> : !u16i]> : !cir.vector vus2 zzz = z >> zamt; - // CHECK: %{{[0-9]+}} = cir.shift( right, {{%.*}} : !cir.vector, + // CHECK: %{{[0-9]+}} = cir.shift(right, {{%.*}} : !cir.vector, // CHECK-SAME: {{%.*}} : !cir.vector) -> !cir.vector } From 6e9ba9027718b61b550ad2514362a1490a8c8622 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 28 Oct 2024 18:21:15 -0400 Subject: [PATCH 1985/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vshll_n (#1010) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 28 +++- clang/test/CIR/CodeGen/AArch64/neon.c | 138 +++++++++++------- 2 files changed, 113 insertions(+), 53 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 1efad486b007..ec75534097cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2239,6 +2239,19 @@ static mlir::Value buildNeonShiftVector(CIRGenBuilderTy &builder, return builder.create(loc, vecTy, constVecAttr); } +/// Build ShiftOp of vector type whose shift amount is a vector built +/// from a constant integer using `buildNeonShiftVector` function +static mlir::Value buildCommonNeonShift(CIRGenBuilderTy &builder, + mlir::Location loc, + mlir::cir::VectorType resTy, + mlir::Value shifTgt, + mlir::Value shiftAmt, bool shiftLeft, + bool negAmt = false) { + shiftAmt = buildNeonShiftVector(builder, shiftAmt, resTy, loc, negAmt); + return builder.create( + loc, resTy, builder.createBitcast(shifTgt, resTy), shiftAmt, shiftLeft); +} + mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, @@ -2326,9 +2339,18 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vshl_n_v: case NEON::BI__builtin_neon_vshlq_n_v: { mlir::Location loc = getLoc(e->getExprLoc()); - ops[1] = buildNeonShiftVector(builder, ops[1], vTy, loc, false); - return builder.create( - loc, vTy, builder.createBitcast(ops[0], vTy), ops[1], true); + return buildCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); + } + case NEON::BI__builtin_neon_vshll_n_v: { + mlir::Location loc = getLoc(e->getExprLoc()); + mlir::cir::VectorType srcTy = + builder.getExtendedOrTruncatedElementVectorType( + vTy, false /* truncate */, + mlir::cast(vTy.getEltType()).isSigned()); + ops[0] = builder.createBitcast(ops[0], srcTy); + // The following cast will be lowered to SExt or ZExt in LLVM. + ops[0] = builder.createIntCast(ops[0], vTy); + return buildCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); } } diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 0e6cbec7c7c1..7a9732b09690 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -6586,61 +6586,99 @@ uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { // return vqrshrn_high_n_u64(a, b, 19); // } -// NYI-LABEL: @test_vshll_n_s8( -// NYI: [[TMP0:%.*]] = sext <8 x i8> %a to <8 x i16> -// NYI: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], -// NYI: ret <8 x i16> [[VSHLL_N]] -// int16x8_t test_vshll_n_s8(int8x8_t a) { -// return vshll_n_s8(a, 3); -// } +int16x8_t test_vshll_n_s8(int8x8_t a) { + return vshll_n_s8(a, 3); + + // CIR-LABEL: vshll_n_s8 + // CIR: [[SHIFT_TGT:%.*]] = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[SHIFT_AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(left, [[SHIFT_TGT]] : !cir.vector, [[SHIFT_AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vshll_n_s8(<8 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = sext <8 x i8> [[A]] to <8 x i16> + // LLVM: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], splat (i16 3) + // LLVM: ret <8 x i16> [[VSHLL_N]] +} -// NYI-LABEL: @test_vshll_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i32> -// NYI: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], -// NYI: ret <4 x i32> [[VSHLL_N]] -// int32x4_t test_vshll_n_s16(int16x4_t a) { -// return vshll_n_s16(a, 9); -// } +int32x4_t test_vshll_n_s16(int16x4_t a) { + return vshll_n_s16(a, 9); -// NYI-LABEL: @test_vshll_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[TMP2:%.*]] = sext <2 x i32> [[TMP1]] to <2 x i64> -// NYI: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], -// NYI: ret <2 x i64> [[VSHLL_N]] -// int64x2_t test_vshll_n_s32(int32x2_t a) { -// return vshll_n_s32(a, 19); -// } + // CIR-LABEL: vshll_n_s16 + // CIR: [[SHIFT_TGT:%.*]] = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[SHIFT_AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<9> : !s32i, #cir.int<9> : !s32i, #cir.int<9> : + // CIR-SAME: !s32i, #cir.int<9> : !s32i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(left, [[SHIFT_TGT]] : !cir.vector, [[SHIFT_AMT]] : !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshll_n_u8( -// NYI: [[TMP0:%.*]] = zext <8 x i8> %a to <8 x i16> -// NYI: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], -// NYI: ret <8 x i16> [[VSHLL_N]] -// uint16x8_t test_vshll_n_u8(uint8x8_t a) { -// return vshll_n_u8(a, 3); -// } + // LLVM: {{.*}}@test_vshll_n_s16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i32> + // LLVM: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], splat (i32 9) + // LLVM: ret <4 x i32> [[VSHLL_N]] +} -// NYI-LABEL: @test_vshll_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32> -// NYI: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], -// NYI: ret <4 x i32> [[VSHLL_N]] -// uint32x4_t test_vshll_n_u16(uint16x4_t a) { -// return vshll_n_u16(a, 9); -// } +int64x2_t test_vshll_n_s32(int32x2_t a) { + return vshll_n_s32(a, 19); -// NYI-LABEL: @test_vshll_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64> -// NYI: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], -// NYI: ret <2 x i64> [[VSHLL_N]] -// uint64x2_t test_vshll_n_u32(uint32x2_t a) { -// return vshll_n_u32(a, 19); -// } + // CIR-LABEL: vshll_n_s32 + // CIR: [[SHIFT_TGT:%.*]] = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[SHIFT_AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<19> : !s64i, #cir.int<19> : !s64i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(left, [[SHIFT_TGT]] : !cir.vector, [[SHIFT_AMT]] : !cir.vector) + + // LLVM: {{.*}}@test_vshll_n_s32(<2 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[TMP2:%.*]] = sext <2 x i32> [[TMP1]] to <2 x i64> + // LLVM: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], splat (i64 19) + // LLVM: ret <2 x i64> [[VSHLL_N]] +} + +uint16x8_t test_vshll_n_u8(uint8x8_t a) { + return vshll_n_u8(a, 3); + + // CIR-LABEL: vshll_n_u8 + // CIR: [[SHIFT_TGT:%.*]] = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[SHIFT_AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, + // CIR-SAME: #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(left, [[SHIFT_TGT]] : !cir.vector, [[SHIFT_AMT]] : !cir.vector) + + // LLVM: {{.*}}@test_vshll_n_u8(<8 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = zext <8 x i8> [[A]] to <8 x i16> + // LLVM: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], splat (i16 3) +} + +uint32x4_t test_vshll_n_u16(uint16x4_t a) { + return vshll_n_u16(a, 9); + + // CIR-LABEL: vshll_n_u16 + // CIR: [[SHIFT_TGT:%.*]] = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[SHIFT_AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<9> : !u32i, #cir.int<9> : !u32i, + // CIR-SAME: #cir.int<9> : !u32i, #cir.int<9> : !u32i]> : !cir.vector + + // LLVM: {{.*}}@test_vshll_n_u16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32> + // LLVM: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], splat (i32 9) + // LLVM: ret <4 x i32> [[VSHLL_N]] +} + +uint64x2_t test_vshll_n_u32(uint32x2_t a) { + return vshll_n_u32(a, 19); + + // CIR-LABEL: vshll_n_u32 + // CIR: [[SHIFT_TGT:%.*]] = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[SHIFT_AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<19> : !u64i, #cir.int<19> : !u64i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(left, [[SHIFT_TGT]] : !cir.vector, [[SHIFT_AMT]] : !cir.vector) + + // LLVM: {{.*}}@test_vshll_n_u32(<2 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64> + // LLVM: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], splat (i64 19) + // LLVM: ret <2 x i64> [[VSHLL_N]] +} // NYI-LABEL: @test_vshll_high_n_s8( // NYI: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> From fd52f9742fa7751a7772fa03a3933d1dda3200ed Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Wed, 30 Oct 2024 01:47:34 +0800 Subject: [PATCH 1986/2301] [ClangIR][CIRGen] Introduce CaseOp and refactor SwitchOp (#1006) Close https://github.com/llvm/clangir/issues/522 This solves the issue we can't handle `case` in nested scopes and we can't handle if the switch body is not a compound statement. The core idea of the patch is to introduce the `cir.case` operation to the language. Then we can get the cases by traversing the body of the `cir.switch` operation easily instead of counting the regions and the attributes. Every `cir.case` operation has a region and now the `cir.switch` has only one region too. But to make the analysis and optimizations easier, I add a new concept `simple form` here. That a simple `cir.switch` operation is: all the `cir.case` operation owned by the `cir.switch` lives in the top level blocks of the `cir.switch` region and there is no other operations except the ending `cir.yield`. This solves the previous `simplified for common-case` vs `general solution` discussion in https://github.com/llvm/clangir/issues/522. After implemented this, I feel the correct answer to it is, we want a general solution for constructing and lowering the operations but we like simple and common case for analysis and optimizations. We just mixed the different phases. For other semantics, see `CIROps.td`. For lowering, we can make it generally by lower the cases one by one and finally lower the switch itself. Although this patch has 1000+ lines of changes, I feel it is relatively neat especially it erases some odd behaviors before. Tested with Spec2017's C benchmarks except 500.perlbench_r. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 210 ++++++++++--- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 137 ++++---- clang/lib/CIR/CodeGen/CIRGenFunction.h | 49 +-- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 285 +++++++++-------- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 293 +++++------------- .../Dialect/Transforms/CIRCanonicalize.cpp | 3 +- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 124 +++++--- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 14 +- clang/test/CIR/CodeGen/atomic-runtime.cpp | 142 ++++----- clang/test/CIR/CodeGen/goto.cpp | 13 +- clang/test/CIR/CodeGen/switch-gnurange.cpp | 109 ++++--- clang/test/CIR/CodeGen/switch.cpp | 178 ++++++----- clang/test/CIR/IR/invalid.cir | 14 +- clang/test/CIR/IR/switch.cir | 38 +-- clang/test/CIR/Lowering/nested-switch.cpp | 69 +++++ clang/test/CIR/Lowering/switch-while.c | 84 +++++ clang/test/CIR/Lowering/switch.cir | 106 ++++--- clang/test/CIR/Transforms/merge-cleanups.cir | 35 ++- clang/test/CIR/Transforms/switch.cir | 129 ++++---- 19 files changed, 1124 insertions(+), 908 deletions(-) create mode 100644 clang/test/CIR/Lowering/nested-switch.cpp create mode 100644 clang/test/CIR/Lowering/switch-while.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 0101b5386391..d5782ebee3fe 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -665,7 +665,7 @@ def StoreOp : CIR_Op<"store", [ def ReturnOp : CIR_Op<"return", [ParentOneOf<["FuncOp", "ScopeOp", "IfOp", "SwitchOp", "DoWhileOp", - "WhileOp", "ForOp"]>, + "WhileOp", "ForOp", "CaseOp"]>, Terminator]> { let summary = "Return from function"; let description = [{ @@ -900,7 +900,7 @@ def ConditionOp : CIR_Op<"condition", [ def YieldOp : CIR_Op<"yield", [ReturnLike, Terminator, ParentOneOf<["IfOp", "ScopeOp", "SwitchOp", "WhileOp", "ForOp", "AwaitOp", "TernaryOp", "GlobalOp", "DoWhileOp", "TryOp", "ArrayCtor", - "ArrayDtor", "CallOp"]>]> { + "ArrayDtor", "CallOp", "CaseOp"]>]> { let summary = "Represents the default branching behaviour of a region"; let description = [{ The `cir.yield` operation terminates regions on different CIR operations, @@ -1819,22 +1819,38 @@ def CaseOpKind : I32EnumAttr< let cppNamespace = "::mlir::cir"; } -def CaseEltValueListAttr : - TypedArrayAttrBase { - let constBuilderCall = ?; -} +def CaseOp : CIR_Op<"case", [ + DeclareOpInterfaceMethods, + RecursivelySpeculatable, AutomaticAllocationScope]> { + let summary = "Case operation"; + let description = [{ + The `cir.case` operation represents a case within a C/C++ switch. + The `cir.case` operation must be in a `cir.switch` operation directly or indirectly. -def CaseAttr : AttrDef { - // FIXME: value should probably be optional for more clear "default" - // representation. - let parameters = (ins "ArrayAttr":$value, "CaseOpKindAttr":$kind); - let mnemonic = "case"; - let assemblyFormat = "`<` struct(params) `>`"; -} + The `cir.case` have 4 kinds: + - `equal, `: equality of the second case operand against the + condition. + - `anyof, [constant-list]`: equals to any of the values in a subsequent + following list. + - `range, [lower-bound, upper-bound]`: the condition is within the closed interval. + - `default`: any other value. + + Each case region must be explicitly terminated. + }]; + + let arguments = (ins ArrayAttr:$value, CaseOpKind:$kind); + let regions = (region AnyRegion:$caseRegion); + + let assemblyFormat = "`(` $kind `,` $value `)` $caseRegion attr-dict"; + + let hasVerifier = 1; -def CaseArrayAttr : - TypedArrayAttrBase { - let constBuilderCall = ?; + let skipDefaultBuilders = 1; + let builders = [ + OpBuilder<(ins "ArrayAttr":$value, + "CaseOpKind":$kind, + "OpBuilder::InsertPoint &":$insertPoint)> + ]; } def SwitchOp : CIR_Op<"switch", @@ -1847,45 +1863,136 @@ def SwitchOp : CIR_Op<"switch", conditionally executing multiple regions of code. The operand to an switch is an integral condition value. - A variadic list of "case" attribute operands and regions track the possible - control flow within `cir.switch`. A `case` must be in one of the following forms: - - `equal, `: equality of the second case operand against the - condition. - - `anyof, [constant-list]`: equals to any of the values in a subsequent - following list. - - `range, [lower-bound, upper-bound]`: the condition is within the closed interval. - - `default`: any other value. + The set of `cir.case` operations and their enclosing `cir.switch` + represents the semantics of a C/C++ switch statement. Users can use + `collectCases(llvm::SmallVector &cases)` to collect the `cir.case` + operation in the `cir.switch` operation easily. - Each case region must be explicitly terminated. + The `cir.case` operations doesn't have to be in the region of `cir.switch` + directly. However, when all the `cir.case` operations lives in the region + of `cir.switch` directly and there is no other operations except the ending + `cir.yield` operation in the region of `cir.switch` directly, we call the + `cir.switch` operation is in a simple form. Users can use + `bool isSimpleForm(llvm::SmallVector &cases)` member function to + detect if the `cir.switch` operation is in a simple form. The simple form + makes analysis easier to handle the `cir.switch` operation + and makes the boundary to give up pretty clear. - Examples: + To make the simple form as common as possible, CIR code generation attaches + operations corresponding to the statements that lives between top level + cases into the closest `cir.case` operation. - ```mlir - cir.switch (%b : i32) [ - case (equal, 20) { - ... - cir.yield break - }, - case (anyof, [1, 2, 3] : i32) { - ... - cir.return ... + For example, + + ``` + switch(int cond) { + case 4: + a++; + + b++; + case 5; + c++; + + ... + } + ``` + + The statement `b++` is not a sub-statement of the case statement `case 4`. + But to make the generated `cir.switch` a simple form, we will attach the + statement `b++` into the closest `cir.case` operation. So that the generated + code will be like: + + ``` + cir.switch(int cond) { + cir.case(equal, 4) { + a++; + b++; + cir.yield } - case (range, [10, 15]) { - ... - cir.yield break - }, - case (default) { - ... - cir.yield fallthrough + cir.case(equal, 5) { + c++; + cir.yield } - ] + ... + } + ``` + + For the same reason, we will hoist the case statement as the substatement + of another case statement so that they will be in the same level. For + example, + + ``` + switch(int cond) { + case 4: + default; + case 5; + a++; + ... + } + ``` + + will be generated as + + ``` + cir.switch(int cond) { + cir.case(equal, 4) { + cir.yield + } + cir.case(default) { + cir.yield + } + cir.case(equal, 5) { + a++; + cir.yield + } + ... + } + ``` + + The cir.switch might not be considered "simple" if any of the following is + true: + - There are case statements of the switch statement lives in other scopes + other than the top level compound statement scope. Note that a case + statement itself doesn't form a scope. + - The sub-statement of the switch statement is not a compound statement. + - There are codes before the first case statement. For example, + + ``` + switch(int cond) { + l: + b++; + + case 4: + a++; + break; + + case 5: + goto l; + ... + } + ``` + + the generated CIR for this non-simple switch would be: + + ``` + cir.switch(int cond) { + cir.label "l" + b++; + cir.case(4) { + a++; + cir.break + } + cir.case(5) { + goto "l" + } + cir.yield + } ``` }]; - let arguments = (ins CIR_IntType:$condition, - OptionalAttr:$cases); + let arguments = (ins CIR_IntType:$condition); - let regions = (region VariadicRegion:$regions); + let regions = (region AnyRegion:$body); let hasVerifier = 1; @@ -1897,10 +2004,19 @@ def SwitchOp : CIR_Op<"switch", let assemblyFormat = [{ custom( - $regions, $cases, $condition, type($condition) + $body, $condition, type($condition) ) attr-dict }]; + + let extraClassDeclaration = [{ + // Collect cases in the switch. + void collectCases(llvm::SmallVector &cases); + + // Check if the switch is in a simple form. If yes, collect the cases to \param cases. + // This is an expensive and need to be used with caution. + bool isSimpleForm(llvm::SmallVector &cases); + }]; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index f852af7ca979..26482f4e9fa2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -370,77 +370,46 @@ static bool isCstWeak(mlir::Value weakVal, bool &val) { // Functions that help with the creation of compiler-generated switch // statements that are used to implement non-constant memory order parameters. -// Create a new region. Create a block within the region. Add a "break" -// statement to the block. Set the builder's insertion point to before the -// "break" statement. Add the new region to the given container. -template -static void startRegion(mlir::OpBuilder &builder, RegionsCont &Regions, - mlir::Location loc) { - - Regions.push_back(std::make_unique()); - mlir::Region *Region = Regions.back().get(); - mlir::Block *Block = builder.createBlock(Region); - builder.setInsertionPointToEnd(Block); - auto Break = builder.create(loc); - builder.setInsertionPoint(Break); -} - // Create a "default:" label and add it to the given collection of case labels. // Create the region that will hold the body of the "default:" block. -template -static void buildDefaultCase(mlir::OpBuilder &builder, CaseAttrsCont &CaseAttrs, - RegionsCont &Regions, mlir::Location loc) { - - auto Context = builder.getContext(); +static void buildDefaultCase(CIRGenBuilderTy &builder, mlir::Location loc) { auto EmptyArrayAttr = builder.getArrayAttr({}); - auto DefaultKind = - mlir::cir::CaseOpKindAttr::get(Context, mlir::cir::CaseOpKind::Default); - auto DefaultAttr = - mlir::cir::CaseAttr::get(Context, EmptyArrayAttr, DefaultKind); - CaseAttrs.push_back(DefaultAttr); - startRegion(builder, Regions, loc); + mlir::OpBuilder::InsertPoint insertPoint; + builder.create( + loc, EmptyArrayAttr, mlir::cir::CaseOpKind::Default, insertPoint); + builder.restoreInsertionPoint(insertPoint); } // Create a single "case" label with the given MemOrder as its value. Add the // "case" label to the given collection of case labels. Create the region that // will hold the body of the "case" block. -template -static void -buildSingleMemOrderCase(mlir::OpBuilder &builder, CaseAttrsCont &CaseAttrs, - RegionsCont &Regions, mlir::Location loc, - mlir::Type Type, mlir::cir::MemOrder Order) { - - auto Context = builder.getContext(); +static void buildSingleMemOrderCase(CIRGenBuilderTy &builder, + mlir::Location loc, mlir::Type Type, + mlir::cir::MemOrder Order) { SmallVector OneOrder{ mlir::cir::IntAttr::get(Type, static_cast(Order))}; auto OneAttribute = builder.getArrayAttr(OneOrder); - auto CaseKind = - mlir::cir::CaseOpKindAttr::get(Context, mlir::cir::CaseOpKind::Equal); - auto CaseAttr = mlir::cir::CaseAttr::get(Context, OneAttribute, CaseKind); - CaseAttrs.push_back(CaseAttr); - startRegion(builder, Regions, loc); + mlir::OpBuilder::InsertPoint insertPoint; + builder.create(loc, OneAttribute, + mlir::cir::CaseOpKind::Equal, insertPoint); + builder.restoreInsertionPoint(insertPoint); } // Create a pair of "case" labels with the given MemOrders as their values. // Add the combined "case" attribute to the given collection of case labels. // Create the region that will hold the body of the "case" block. -template -static void buildDoubleMemOrderCase(mlir::OpBuilder &builder, - CaseAttrsCont &CaseAttrs, - RegionsCont &Regions, mlir::Location loc, - mlir::Type Type, mlir::cir::MemOrder Order1, +static void buildDoubleMemOrderCase(CIRGenBuilderTy &builder, + mlir::Location loc, mlir::Type Type, + mlir::cir::MemOrder Order1, mlir::cir::MemOrder Order2) { - - auto Context = builder.getContext(); SmallVector TwoOrders{ mlir::cir::IntAttr::get(Type, static_cast(Order1)), mlir::cir::IntAttr::get(Type, static_cast(Order2))}; auto TwoAttributes = builder.getArrayAttr(TwoOrders); - auto CaseKind = - mlir::cir::CaseOpKindAttr::get(Context, mlir::cir::CaseOpKind::Anyof); - auto CaseAttr = mlir::cir::CaseAttr::get(Context, TwoAttributes, CaseKind); - CaseAttrs.push_back(CaseAttr); - startRegion(builder, Regions, loc); + mlir::OpBuilder::InsertPoint insertPoint; + builder.create(loc, TwoAttributes, + mlir::cir::CaseOpKind::Anyof, insertPoint); + builder.restoreInsertionPoint(insertPoint); } static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, @@ -526,42 +495,48 @@ static void buildAtomicCmpXchgFailureSet( // compile-time value. CGF.getBuilder().create( FailureOrderVal.getLoc(), FailureOrderVal, - [&](mlir::OpBuilder &builder, mlir::Location loc, - mlir::OperationState &os) { - SmallVector CaseAttrs; - SmallVector, 3> Regions; + [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { + auto &builder = CGF.getBuilder(); + + mlir::Block *switchBlock = builder.getBlock(); // default: // Unsupported memory orders get generated as memory_order_relaxed, // because there is no practical way to report an error at runtime. - buildDefaultCase(builder, CaseAttrs, Regions, loc); + buildDefaultCase(builder, loc); buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, mlir::cir::MemOrder::Relaxed, Scope); + builder.createBreak(loc); + + builder.setInsertionPointToEnd(switchBlock); // case consume: // case acquire: // memory_order_consume is not implemented and always falls back to // memory_order_acquire - buildDoubleMemOrderCase( - builder, CaseAttrs, Regions, loc, FailureOrderVal.getType(), - mlir::cir::MemOrder::Consume, mlir::cir::MemOrder::Acquire); + buildDoubleMemOrderCase(builder, loc, FailureOrderVal.getType(), + mlir::cir::MemOrder::Consume, + mlir::cir::MemOrder::Acquire); buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, mlir::cir::MemOrder::Acquire, Scope); + builder.createBreak(loc); + + builder.setInsertionPointToEnd(switchBlock); // A failed compare-exchange is a read-only operation. So // memory_order_release and memory_order_acq_rel are not supported for // the failure memory order. They fall back to memory_order_relaxed. // case seq_cst: - buildSingleMemOrderCase(builder, CaseAttrs, Regions, loc, - FailureOrderVal.getType(), + buildSingleMemOrderCase(builder, loc, FailureOrderVal.getType(), mlir::cir::MemOrder::SequentiallyConsistent); buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, mlir::cir::MemOrder::SequentiallyConsistent, Scope); + builder.createBreak(loc); - os.addRegions(Regions); - os.addAttribute("cases", builder.getArrayAttr(CaseAttrs)); + builder.setInsertionPointToEnd(switchBlock); + builder.createYield(loc); }); } @@ -1271,19 +1246,20 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // compile-time value. builder.create( Order.getLoc(), Order, - [&](mlir::OpBuilder &builder, mlir::Location loc, - mlir::OperationState &os) { - llvm::SmallVector CaseAttrs; - llvm::SmallVector, 6> Regions; + [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { + mlir::Block *switchBlock = builder.getBlock(); // default: // Use memory_order_relaxed for relaxed operations and for any memory // order value that is not supported. There is no good way to report // an unsupported memory order at runtime, hence the fallback to // memory_order_relaxed. - buildDefaultCase(builder, CaseAttrs, Regions, loc); + buildDefaultCase(builder, loc); buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, mlir::cir::MemOrder::Relaxed, Scope); + builder.createBreak(loc); + + builder.setInsertionPointToEnd(switchBlock); if (!IsStore) { // case consume: @@ -1291,42 +1267,49 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // memory_order_consume is not implemented; it is always treated like // memory_order_acquire. These memory orders are not valid for // write-only operations. - buildDoubleMemOrderCase(builder, CaseAttrs, Regions, loc, - Order.getType(), mlir::cir::MemOrder::Consume, + buildDoubleMemOrderCase(builder, loc, Order.getType(), + mlir::cir::MemOrder::Consume, mlir::cir::MemOrder::Acquire); buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, mlir::cir::MemOrder::Acquire, Scope); + builder.createBreak(loc); } + builder.setInsertionPointToEnd(switchBlock); + if (!IsLoad) { // case release: // memory_order_release is not valid for read-only operations. - buildSingleMemOrderCase(builder, CaseAttrs, Regions, loc, - Order.getType(), + buildSingleMemOrderCase(builder, loc, Order.getType(), mlir::cir::MemOrder::Release); buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, mlir::cir::MemOrder::Release, Scope); + builder.createBreak(loc); } + builder.setInsertionPointToEnd(switchBlock); + if (!IsLoad && !IsStore) { // case acq_rel: // memory_order_acq_rel is only valid for read-write operations. - buildSingleMemOrderCase(builder, CaseAttrs, Regions, loc, - Order.getType(), + buildSingleMemOrderCase(builder, loc, Order.getType(), mlir::cir::MemOrder::AcquireRelease); buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, mlir::cir::MemOrder::AcquireRelease, Scope); + builder.createBreak(loc); } + builder.setInsertionPointToEnd(switchBlock); + // case seq_cst: - buildSingleMemOrderCase(builder, CaseAttrs, Regions, loc, - Order.getType(), + buildSingleMemOrderCase(builder, loc, Order.getType(), mlir::cir::MemOrder::SequentiallyConsistent); buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, mlir::cir::MemOrder::SequentiallyConsistent, Scope); + builder.createBreak(loc); - os.addRegions(Regions); - os.addAttribute("cases", builder.getArrayAttr(CaseAttrs)); + builder.setInsertionPointToEnd(switchBlock); + builder.createYield(loc); }); if (RValTy->isVoidType()) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 94f557526e52..9da7b671bbb1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -478,10 +478,6 @@ class CIRGenFunction : public CIRGenTypeCache { // applies to. nullptr if there is no 'musttail' on the current statement. const clang::CallExpr *MustTailCall = nullptr; - /// The attributes of cases collected during emitting the body of a switch - /// stmt. - llvm::SmallVector, 2> caseAttrsStack; - /// The type of the condition for the emitting switch statement. llvm::SmallVector condTypeStack; @@ -1234,22 +1230,25 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Type getCIRType(const clang::QualType &type); const CaseStmt *foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, - SmallVector &caseAttrs); + mlir::ArrayAttr &value, + mlir::cir::CaseOpKind &kind); template mlir::LogicalResult buildCaseDefaultCascade(const T *stmt, mlir::Type condType, - SmallVector &caseAttrs); + mlir::ArrayAttr value, mlir::cir::CaseOpKind kind, + bool buildingTopLevelCase); mlir::LogicalResult buildCaseStmt(const clang::CaseStmt &S, mlir::Type condType, - SmallVector &caseAttrs); + bool buildingTopLevelCase); - mlir::LogicalResult - buildDefaultStmt(const clang::DefaultStmt &S, mlir::Type condType, - SmallVector &caseAttrs); + mlir::LogicalResult buildDefaultStmt(const clang::DefaultStmt &S, + mlir::Type condType, + bool buildingTopLevelCase); - mlir::LogicalResult buildSwitchCase(const clang::SwitchCase &S); + mlir::LogicalResult buildSwitchCase(const clang::SwitchCase &S, + bool buildingTopLevelCase); mlir::LogicalResult buildSwitchBody(const clang::Stmt *S); @@ -2210,13 +2209,17 @@ class CIRGenFunction : public CIRGenTypeCache { // have their own scopes but are distinct regions nonetheless. llvm::SmallVector RetBlocks; llvm::SmallVector> RetLocs; + llvm::DenseMap RetBlockInCaseIndex; + std::optional NormalRetBlockIndex; llvm::SmallVector> SwitchRegions; // There's usually only one ret block per scope, but this needs to be // get or create because of potential unreachable return statements, note // that for those, all source location maps to the first one found. mlir::Block *createRetBlock(CIRGenFunction &CGF, mlir::Location loc) { - assert((isSwitch() || RetBlocks.size() == 0) && + assert((isa_and_nonnull( + CGF.builder.getBlock()->getParentOp()) || + RetBlocks.size() == 0) && "only switches can hold more than one ret block"); // Create the cleanup block but dont hook it up around just yet. @@ -2247,12 +2250,22 @@ class CIRGenFunction : public CIRGenTypeCache { } mlir::Block *getOrCreateRetBlock(CIRGenFunction &CGF, mlir::Location loc) { - unsigned int regionIdx = 0; - if (isSwitch()) - regionIdx = SwitchRegions.size() - 1; - if (regionIdx >= RetBlocks.size()) - return createRetBlock(CGF, loc); - return &*RetBlocks.back(); + if (auto caseOp = dyn_cast_if_present( + CGF.builder.getBlock()->getParentOp())) { + auto iter = RetBlockInCaseIndex.find(caseOp); + if (iter != RetBlockInCaseIndex.end()) + return RetBlocks[iter->second]; + + mlir::Block *ret = createRetBlock(CGF, loc); + RetBlockInCaseIndex[caseOp] = RetBlocks.size() - 1; + return ret; + } + if (!NormalRetBlockIndex) { + mlir::Block *ret = createRetBlock(CGF, loc); + NormalRetBlockIndex = RetBlocks.size() - 1; + return ret; + } + return &*RetBlocks[*NormalRetBlockIndex]; } // Scope entry block tracking diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 4da7f2d20b88..c3c5562bcdf1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -306,7 +306,9 @@ mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, case Stmt::CaseStmtClass: case Stmt::DefaultStmtClass: - return buildSwitchCase(cast(*S)); + // If we reached here, we must not handling a switch case in the top level. + return buildSwitchCase(cast(*S), + /*buildingTopLevelCase=*/false); break; case Stmt::BreakStmtClass: @@ -618,17 +620,14 @@ mlir::LogicalResult CIRGenFunction::buildBreakStmt(const clang::BreakStmt &S) { return mlir::success(); } -const CaseStmt * -CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, - SmallVector &caseAttrs) { - auto *ctxt = builder.getContext(); - +const CaseStmt *CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, + mlir::Type condType, + mlir::ArrayAttr &value, + mlir::cir::CaseOpKind &kind) { const CaseStmt *caseStmt = &S; const CaseStmt *lastCase = &S; SmallVector caseEltValueListAttr; - int caseAttrCount = 0; - // Fold cascading cases whenever possible to simplify codegen a bit. while (caseStmt) { lastCase = caseStmt; @@ -640,105 +639,146 @@ CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, SmallVector rangeCaseAttr = { mlir::cir::IntAttr::get(condType, intVal), mlir::cir::IntAttr::get(condType, endVal)}; - auto caseAttr = mlir::cir::CaseAttr::get( - ctxt, builder.getArrayAttr(rangeCaseAttr), - CaseOpKindAttr::get(ctxt, mlir::cir::CaseOpKind::Range)); - caseAttrs.push_back(caseAttr); - ++caseAttrCount; - } else { - caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(condType, intVal)); + value = builder.getArrayAttr(rangeCaseAttr); + kind = mlir::cir::CaseOpKind::Range; + + // We may not be able to fold rangaes. Due to we can't present range case + // with other trivial cases now. + return caseStmt; } + caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(condType, intVal)); + caseStmt = dyn_cast_or_null(caseStmt->getSubStmt()); - } - if (!caseEltValueListAttr.empty()) { - auto caseOpKind = caseEltValueListAttr.size() > 1 - ? mlir::cir::CaseOpKind::Anyof - : mlir::cir::CaseOpKind::Equal; - auto caseAttr = mlir::cir::CaseAttr::get( - ctxt, builder.getArrayAttr(caseEltValueListAttr), - CaseOpKindAttr::get(ctxt, caseOpKind)); - caseAttrs.push_back(caseAttr); - ++caseAttrCount; + // Break early if we found ranges. We can't fold ranges due to the same + // reason above. + if (caseStmt && caseStmt->getRHS()) + break; } - assert(caseAttrCount > 0 && "there should be at least one valid case attr"); - - for (int i = 1; i < caseAttrCount; ++i) { - // If there are multiple case attributes, we need to create a new region - auto *region = currLexScope->createSwitchRegion(); - builder.createBlock(region); + if (!caseEltValueListAttr.empty()) { + value = builder.getArrayAttr(caseEltValueListAttr); + kind = caseEltValueListAttr.size() > 1 ? mlir::cir::CaseOpKind::Anyof + : mlir::cir::CaseOpKind::Equal; } return lastCase; } template -mlir::LogicalResult CIRGenFunction::buildCaseDefaultCascade( - const T *stmt, mlir::Type condType, - SmallVector &caseAttrs) { +mlir::LogicalResult +CIRGenFunction::buildCaseDefaultCascade(const T *stmt, mlir::Type condType, + mlir::ArrayAttr value, CaseOpKind kind, + bool buildingTopLevelCase) { assert((isa(stmt)) && "only case or default stmt go here"); - auto res = mlir::success(); + mlir::LogicalResult result = mlir::success(); - // Update scope information with the current region we are - // emitting code for. This is useful to allow return blocks to be - // automatically and properly placed during cleanup. - auto *region = currLexScope->createSwitchRegion(); - auto *block = builder.createBlock(region); - builder.setInsertionPointToEnd(block); + auto loc = getLoc(stmt->getBeginLoc()); + enum class SubStmtKind { Case, Default, Other }; + SubStmtKind subStmtKind = SubStmtKind::Other; auto *sub = stmt->getSubStmt(); - if (isa(sub) && isa(stmt)) { - builder.createYield(getLoc(stmt->getBeginLoc())); - res = buildDefaultStmt(*dyn_cast(sub), condType, caseAttrs); - } else if (isa(sub) && isa(stmt)) { - builder.createYield(getLoc(stmt->getBeginLoc())); - res = buildCaseStmt(*dyn_cast(sub), condType, caseAttrs); - } else { - res = buildStmt(sub, /*useCurrentScope=*/!isa(sub)); + mlir::OpBuilder::InsertPoint insertPoint; + builder.create(loc, value, kind, insertPoint); + + { + mlir::OpBuilder::InsertionGuard guardSwitch(builder); + builder.restoreInsertionPoint(insertPoint); + + if (isa(sub) && isa(stmt)) { + subStmtKind = SubStmtKind::Default; + builder.createYield(loc); + } else if (isa(sub) && isa(stmt)) { + subStmtKind = SubStmtKind::Case; + builder.createYield(loc); + } else + result = buildStmt(sub, /*useCurrentScope=*/!isa(sub)); + + insertPoint = builder.saveInsertionPoint(); } - return res; + // If the substmt is default stmt or case stmt, try to handle the special case + // to make it into the simple form. e.g. + // + // swtich () { + // case 1: + // default: + // ... + // } + // + // we prefer generating + // + // cir.switch() { + // cir.case(equal, 1) { + // cir.yield + // } + // cir.case(default) { + // ... + // } + // } + // + // than + // + // cir.switch() { + // cir.case(equal, 1) { + // cir.case(default) { + // ... + // } + // } + // } + // + // We don't need to revert this if we find the current switch can't be in + // simple form later since the conversion itself should be harmless. + if (subStmtKind == SubStmtKind::Case) + result = + buildCaseStmt(*cast(sub), condType, buildingTopLevelCase); + else if (subStmtKind == SubStmtKind::Default) + result = buildDefaultStmt(*cast(sub), condType, + buildingTopLevelCase); + else if (buildingTopLevelCase) + // If we're building a top level case, try to restore the insert point to + // the case we're building, then we can attach more random stmts to the + // case to make generating `cir.switch` operation to be a simple form. + builder.restoreInsertionPoint(insertPoint); + + return result; } -mlir::LogicalResult -CIRGenFunction::buildCaseStmt(const CaseStmt &S, mlir::Type condType, - SmallVector &caseAttrs) { - auto *caseStmt = foldCaseStmt(S, condType, caseAttrs); - return buildCaseDefaultCascade(caseStmt, condType, caseAttrs); +mlir::LogicalResult CIRGenFunction::buildCaseStmt(const CaseStmt &S, + mlir::Type condType, + bool buildingTopLevelCase) { + mlir::ArrayAttr value; + CaseOpKind kind; + auto *caseStmt = foldCaseStmt(S, condType, value, kind); + return buildCaseDefaultCascade(caseStmt, condType, value, kind, + buildingTopLevelCase); } mlir::LogicalResult CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, mlir::Type condType, - SmallVector &caseAttrs) { - auto ctxt = builder.getContext(); - - auto defAttr = mlir::cir::CaseAttr::get( - ctxt, builder.getArrayAttr({}), - CaseOpKindAttr::get(ctxt, mlir::cir::CaseOpKind::Default)); - - caseAttrs.push_back(defAttr); - return buildCaseDefaultCascade(&S, condType, caseAttrs); + bool buildingTopLevelCase) { + return buildCaseDefaultCascade(&S, condType, builder.getArrayAttr({}), + mlir::cir::CaseOpKind::Default, + buildingTopLevelCase); } -mlir::LogicalResult CIRGenFunction::buildSwitchCase(const SwitchCase &S) { - assert(!caseAttrsStack.empty() && - "build switch case without seeting case attrs"); +mlir::LogicalResult CIRGenFunction::buildSwitchCase(const SwitchCase &S, + bool buildingTopLevelCase) { assert(!condTypeStack.empty() && "build switch case without specifying the type of the condition"); if (S.getStmtClass() == Stmt::CaseStmtClass) return buildCaseStmt(cast(S), condTypeStack.back(), - caseAttrsStack.back()); + buildingTopLevelCase); if (S.getStmtClass() == Stmt::DefaultStmtClass) return buildDefaultStmt(cast(S), condTypeStack.back(), - caseAttrsStack.back()); + buildingTopLevelCase); llvm_unreachable("expect case or default stmt"); } @@ -1004,31 +1044,40 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { } mlir::LogicalResult CIRGenFunction::buildSwitchBody(const Stmt *S) { - if (auto *compoundStmt = dyn_cast(S)) { - mlir::Block *lastCaseBlock = nullptr; - auto res = mlir::success(); - for (auto *c : compoundStmt->body()) { - if (auto *switchCase = dyn_cast(c)) { - res = buildSwitchCase(*switchCase); - lastCaseBlock = builder.getBlock(); - } else if (lastCaseBlock) { - // This means it's a random stmt following up a case, just - // emit it as part of previous known case. - mlir::OpBuilder::InsertionGuard guardCase(builder); - builder.setInsertionPointToEnd(lastCaseBlock); - res = buildStmt(c, /*useCurrentScope=*/!isa(c)); - lastCaseBlock = builder.getBlock(); - } else { - llvm_unreachable("statement doesn't belong to any case region, NYI"); - } + // It is rare but legal if the switch body is not a compound stmt. e.g., + // + // switch(a) + // while(...) { + // case1 + // ... + // case2 + // ... + // } + if (!isa(S)) + return buildStmt(S, /*useCurrentScope=*/!false); + + auto *compoundStmt = cast(S); + + mlir::Block *swtichBlock = builder.getBlock(); + for (auto *c : compoundStmt->body()) { + if (auto *switchCase = dyn_cast(c)) { + builder.setInsertionPointToEnd(swtichBlock); + // Reset insert point automatically, so that we can attach following + // random stmt to the region of previous built case op to try to make + // the being generated `cir.switch` to be in simple form. + if (mlir::failed( + buildSwitchCase(*switchCase, /*buildingTopLevelCase=*/true))) + return mlir::failure(); - if (res.failed()) - break; + continue; } - return res; + + // Otherwise, just build the statements in the nearest case region. + if (mlir::failed(buildStmt(c, /*useCurrentScope=*/!isa(c)))) + return mlir::failure(); } - llvm_unreachable("switch body is not CompoundStmt, NYI"); + return mlir::success(); } mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { @@ -1037,9 +1086,7 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { // nothing to be done here. // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))... - auto res = mlir::success(); SwitchOp swop; - auto switchStmtBuilder = [&]() -> mlir::LogicalResult { if (S.getInit()) if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) @@ -1053,31 +1100,26 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts()) // TODO: if the switch has a condition wrapped by __builtin_unpredictable? + auto res = mlir::success(); swop = builder.create( getLoc(S.getBeginLoc()), condV, /*switchBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { currLexScope->setAsSwitch(); - caseAttrsStack.push_back({}); condTypeStack.push_back(condV.getType()); res = buildSwitchBody(S.getBody()); - os.addRegions(currLexScope->getSwitchRegions()); - os.addAttribute("cases", builder.getArrayAttr(caseAttrsStack.back())); - - caseAttrsStack.pop_back(); condTypeStack.pop_back(); }); - if (res.failed()) - return res; - return mlir::success(); + return res; }; // The switch scope contains the full source range for SwitchStmt. auto scopeLoc = getLoc(S.getSourceRange()); + auto res = mlir::success(); builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -1085,44 +1127,13 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { res = switchStmtBuilder(); }); - if (res.failed()) - return res; - - // Any block in a case region without a terminator is considered a - // fallthrough yield. In practice there shouldn't be more than one - // block without a terminator, we patch any block we see though and - // let mlir's SwitchOp verifier enforce rules. - auto terminateCaseRegion = [&](mlir::Region &r, mlir::Location loc) { - if (r.empty()) - return; - - SmallVector eraseBlocks; - unsigned numBlocks = r.getBlocks().size(); - for (auto &block : r.getBlocks()) { - // Already cleanup after return operations, which might create - // empty blocks if emitted as last stmt. - if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() && - block.hasNoSuccessors()) - eraseBlocks.push_back(&block); - - if (block.empty() || - !block.back().hasTrait()) { - mlir::OpBuilder::InsertionGuard guardCase(builder); - builder.setInsertionPointToEnd(&block); - builder.createYield(loc); - } - } - - for (auto *b : eraseBlocks) - b->erase(); - }; + llvm::SmallVector cases; + swop.collectCases(cases); + for (auto caseOp : cases) + terminateBody(builder, caseOp.getCaseRegion(), caseOp.getLoc()); + terminateBody(builder, swop.getBody(), swop.getLoc()); - // Make sure all case regions are terminated by inserting fallthroughs - // when necessary. - // FIXME: find a better way to get accurante with location here. - for (auto &r : swop.getRegions()) - terminateCaseRegion(r, swop.getLoc()); - return mlir::success(); + return res; } void CIRGenFunction::buildReturnOfRValue(mlir::Location loc, RValue RV, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d22572205cf5..d7eb3b51bc57 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1486,142 +1486,42 @@ mlir::cir::BrCondOp::getSuccessorForOperands(ArrayRef operands) { } //===----------------------------------------------------------------------===// -// SwitchOp +// CaseOp //===----------------------------------------------------------------------===// -ParseResult -parseSwitchOp(OpAsmParser &parser, - llvm::SmallVectorImpl> ®ions, - ::mlir::ArrayAttr &casesAttr, - mlir::OpAsmParser::UnresolvedOperand &cond, - mlir::Type &condType) { - mlir::cir::IntType intCondType; - SmallVector cases; - - auto parseAndCheckRegion = [&]() -> ParseResult { - // Parse region attached to case - regions.emplace_back(new Region); - Region &currRegion = *regions.back().get(); - auto parserLoc = parser.getCurrentLocation(); - if (parser.parseRegion(currRegion, /*arguments=*/{}, /*argTypes=*/{})) { - regions.clear(); - return failure(); - } - - if (currRegion.empty()) { - return parser.emitError(parser.getCurrentLocation(), - "case region shall not be empty"); - } - - if (!(currRegion.back().mightHaveTerminator() && - currRegion.back().getTerminator())) - return parser.emitError(parserLoc, - "case regions must be explicitly terminated"); - - return success(); - }; +void mlir::cir::CaseOp::getSuccessorRegions( + mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } - auto parseCase = [&]() -> ParseResult { - auto loc = parser.getCurrentLocation(); - if (parser.parseKeyword("case").failed()) - return parser.emitError(loc, "expected 'case' keyword here"); + regions.push_back(RegionSuccessor(&getCaseRegion())); +} - if (parser.parseLParen().failed()) - return parser.emitError(parser.getCurrentLocation(), "expected '('"); +void mlir::cir::CaseOp::build(OpBuilder &builder, OperationState &result, + ArrayAttr value, CaseOpKind kind, + OpBuilder::InsertPoint &insertPoint) { + OpBuilder::InsertionGuard guardSwitch(builder); + result.addAttribute("value", value); + result.getOrAddProperties().kind = + ::mlir::cir::CaseOpKindAttr::get(builder.getContext(), kind); + Region *caseRegion = result.addRegion(); + builder.createBlock(caseRegion); - ::llvm::StringRef attrStr; - ::mlir::NamedAttrList attrStorage; - - // case (equal, 20) { - // ... - // 1. Get the case kind - // 2. Get the value (next in list) - - // These needs to be in sync with CIROps.td - if (parser.parseOptionalKeyword(&attrStr, - {"default", "equal", "anyof", "range"})) { - ::mlir::StringAttr attrVal; - ::mlir::OptionalParseResult parseResult = parser.parseOptionalAttribute( - attrVal, parser.getBuilder().getNoneType(), "kind", attrStorage); - if (parseResult.has_value()) { - if (failed(*parseResult)) - return ::mlir::failure(); - attrStr = attrVal.getValue(); - } - } + insertPoint = builder.saveInsertionPoint(); +} - if (attrStr.empty()) { - return parser.emitError( - loc, - "expected string or keyword containing one of the following " - "enum values for attribute 'kind' [default, equal, anyof, range]"); - } +LogicalResult mlir::cir::CaseOp::verify() { return success(); } - auto attrOptional = ::mlir::cir::symbolizeCaseOpKind(attrStr.str()); - if (!attrOptional) - return parser.emitError(loc, "invalid ") - << "kind attribute specification: \"" << attrStr << '"'; - - auto kindAttr = ::mlir::cir::CaseOpKindAttr::get( - parser.getBuilder().getContext(), attrOptional.value()); - - // `,` value or `,` [values,...] - SmallVector caseEltValueListAttr; - mlir::ArrayAttr caseValueList; - - switch (kindAttr.getValue()) { - case mlir::cir::CaseOpKind::Equal: { - if (parser.parseComma().failed()) - return mlir::failure(); - int64_t val = 0; - if (parser.parseInteger(val).failed()) - return ::mlir::failure(); - caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(intCondType, val)); - break; - } - case mlir::cir::CaseOpKind::Range: - case mlir::cir::CaseOpKind::Anyof: { - if (parser.parseComma().failed()) - return mlir::failure(); - if (parser.parseLSquare().failed()) - return mlir::failure(); - if (parser.parseCommaSeparatedList([&]() { - int64_t val = 0; - if (parser.parseInteger(val).failed()) - return ::mlir::failure(); - caseEltValueListAttr.push_back( - mlir::cir::IntAttr::get(intCondType, val)); - return ::mlir::success(); - })) - return mlir::failure(); - if (parser.parseRSquare().failed()) - return mlir::failure(); - break; - } - case mlir::cir::CaseOpKind::Default: { - if (parser.parseRParen().failed()) - return parser.emitError(parser.getCurrentLocation(), "expected ')'"); - cases.push_back(mlir::cir::CaseAttr::get( - parser.getContext(), parser.getBuilder().getArrayAttr({}), kindAttr)); - return parseAndCheckRegion(); - } - } +//===----------------------------------------------------------------------===// +// SwitchOp +//===----------------------------------------------------------------------===// - caseValueList = parser.getBuilder().getArrayAttr(caseEltValueListAttr); - cases.push_back( - mlir::cir::CaseAttr::get(parser.getContext(), caseValueList, kindAttr)); - if (succeeded(parser.parseOptionalColon())) { - Type caseIntTy; - if (parser.parseType(caseIntTy).failed()) - return parser.emitError(parser.getCurrentLocation(), "expected type"); - if (intCondType != caseIntTy) - return parser.emitError(parser.getCurrentLocation(), - "expected a match with the condition type"); - } - if (parser.parseRParen().failed()) - return parser.emitError(parser.getCurrentLocation(), "expected ')'"); - return parseAndCheckRegion(); - }; +ParseResult parseSwitchOp(OpAsmParser &parser, mlir::Region ®ions, + mlir::OpAsmParser::UnresolvedOperand &cond, + mlir::Type &condType) { + mlir::cir::IntType intCondType; if (parser.parseLParen()) return ::mlir::failure(); @@ -1635,93 +1535,26 @@ parseSwitchOp(OpAsmParser &parser, condType = intCondType; if (parser.parseRParen()) return ::mlir::failure(); - - if (parser - .parseCommaSeparatedList(OpAsmParser::Delimiter::Square, parseCase, - " in cases list") - .failed()) + if (parser.parseRegion(regions, /*arguments=*/{}, /*argTypes=*/{})) return failure(); - casesAttr = parser.getBuilder().getArrayAttr(cases); return ::mlir::success(); } void printSwitchOp(OpAsmPrinter &p, mlir::cir::SwitchOp op, - mlir::MutableArrayRef<::mlir::Region> regions, - mlir::ArrayAttr casesAttr, mlir::Value condition, + mlir::Region &bodyRegion, mlir::Value condition, mlir::Type condType) { - int idx = 0, lastIdx = regions.size() - 1; - p << "("; p << condition; p << " : "; p.printStrippedAttrOrType(condType); - p << ") ["; - // FIXME: ideally we want some extra indentation for "cases" but too - // cumbersome to pull it out now, since most handling is private. Perhaps - // better improve overall mechanism. - p.printNewline(); - for (auto &r : regions) { - p << "case ("; - - auto attr = cast(casesAttr[idx]); - auto kind = attr.getKind().getValue(); - assert((kind == mlir::cir::CaseOpKind::Default || - kind == mlir::cir::CaseOpKind::Equal || - kind == mlir::cir::CaseOpKind::Anyof || - kind == mlir::cir::CaseOpKind::Range) && - "unknown case"); - - // Case kind - p << stringifyCaseOpKind(kind); - - // Case value - switch (kind) { - case mlir::cir::CaseOpKind::Equal: { - p << ", "; - auto intAttr = cast(attr.getValue()[0]); - auto intAttrTy = cast(intAttr.getType()); - (intAttrTy.isSigned() ? p << intAttr.getSInt() : p << intAttr.getUInt()); - break; - } - case mlir::cir::CaseOpKind::Range: - assert(attr.getValue().size() == 2 && "range must have two values"); - // The print format of the range is the same as anyof - LLVM_FALLTHROUGH; - case mlir::cir::CaseOpKind::Anyof: { - p << ", ["; - llvm::interleaveComma(attr.getValue(), p, [&](const Attribute &a) { - auto intAttr = cast(a); - auto intAttrTy = cast(intAttr.getType()); - (intAttrTy.isSigned() ? p << intAttr.getSInt() - : p << intAttr.getUInt()); - }); - p << "] : "; - auto typedAttr = dyn_cast(attr.getValue()[0]); - assert(typedAttr && "this should never not have a type!"); - p.printType(typedAttr.getType()); - break; - } - case mlir::cir::CaseOpKind::Default: - break; - } + p << ")"; - p << ") "; - p.printRegion(r, /*printEntryBLockArgs=*/false, - /*printBlockTerminators=*/true); - if (idx < lastIdx) - p << ","; - p.printNewline(); - idx++; - } - p << "]"; + p << ' '; + p.printRegion(bodyRegion, /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/true); } -/// Given the region at `index`, or the parent operation if `index` is None, -/// return the successor regions. These are the regions that may be selected -/// during the flow of control. `operands` is a set of optional attributes -/// that correspond to a constant value for each operand, or null if that -/// operand is not a constant. void mlir::cir::SwitchOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // If any index all the underlying regions branch back to the parent @@ -1731,41 +1564,53 @@ void mlir::cir::SwitchOp::getSuccessorRegions( return; } - // for (auto &r : this->getRegions()) { - // If we can figure out the case stmt we are landing, this can be - // overly simplified. - // bool condition; - // if (auto condAttr = operands.front().dyn_cast_or_null()) { - // assert(0 && "not implemented"); - // (void)r; - // condition = condAttr.getValue().isOneValue(); - // Add the successor regions using the condition. - // regions.push_back(RegionSuccessor(condition ? &thenRegion() : - // elseRegion)); - // return; - // } - // } - - // If the condition isn't constant, all regions may be executed. - for (auto &r : this->getRegions()) - regions.push_back(RegionSuccessor(&r)); + regions.push_back(RegionSuccessor(&getBody())); } -LogicalResult mlir::cir::SwitchOp::verify() { - if (getCases().has_value() && getCases()->size() != getNumRegions()) - return emitOpError("number of cases attributes and regions must match"); - return success(); -} +LogicalResult mlir::cir::SwitchOp::verify() { return success(); } void mlir::cir::SwitchOp::build( OpBuilder &builder, OperationState &result, Value cond, function_ref switchBuilder) { assert(switchBuilder && "the builder callback for regions must be present"); OpBuilder::InsertionGuard guardSwitch(builder); + Region *swtichRegion = result.addRegion(); + builder.createBlock(swtichRegion); result.addOperands({cond}); switchBuilder(builder, result.location, result); } +void mlir::cir::SwitchOp::collectCases(llvm::SmallVector &cases) { + walk([&](mlir::Operation *op) { + // Don't walk in nested switch op. + if (isa(op) && op != *this) + return WalkResult::skip(); + + if (isa(op)) + cases.push_back(cast(*op)); + + return WalkResult::advance(); + }); +} + +bool mlir::cir::SwitchOp::isSimpleForm(llvm::SmallVector &cases) { + collectCases(cases); + + if (getBody().empty()) + return false; + + if (!isa(getBody().front().back())) + return false; + + if (!llvm::all_of(getBody().front(), + [](Operation &op) { return isa(op); })) + return false; + + return llvm::all_of(cases, [this](CaseOp op) { + return op->getParentOfType() == *this; + }); +} + //===----------------------------------------------------------------------===// // SwitchFlatOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp index e2cac79c2c70..b5096722f42d 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp @@ -74,7 +74,8 @@ struct RemoveEmptySwitch : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult match(SwitchOp op) const final { - return success(op.getRegions().empty()); + return success(op.getBody().empty() || + isa(op.getBody().front().front())); } void rewrite(SwitchOp op, PatternRewriter &rewriter) const final { diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 7fd7cc6f21fc..bebb6c6770de 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -686,16 +686,48 @@ class CIRSwitchOpFlattening mlir::LogicalResult matchAndRewrite(mlir::cir::SwitchOp op, mlir::PatternRewriter &rewriter) const override { + llvm::SmallVector cases; + op.collectCases(cases); + // Empty switch statement: just erase it. - if (!op.getCases().has_value() || op.getCases()->empty()) { + if (cases.empty()) { rewriter.eraseOp(op); return mlir::success(); } - // Create exit block. - rewriter.setInsertionPointAfter(op); - auto *exitBlock = - rewriter.splitBlock(rewriter.getBlock(), rewriter.getInsertionPoint()); + // Create exit block from the next node of cir.switch op. + auto *exitBlock = rewriter.splitBlock(rewriter.getBlock(), + op->getNextNode()->getIterator()); + + // We lower cir.switch op in the following process: + // 1. Inline the region from the switch op after switch op. + // 2. Traverse each cir.case op: + // a. Record the entry block, block arguments and condition for every + // case. b. Inline the case region after the case op. + // 3. Replace the empty cir.switch.op with the new cir.switchflat op by the + // recorded block and conditions. + + // inline everything from switch body between the switch op and the exit + // block. + { + mlir::cir::YieldOp switchYield = nullptr; + // Clear switch operation. + for (auto &block : llvm::make_early_inc_range(op.getBody().getBlocks())) + if (auto yieldOp = dyn_cast(block.getTerminator())) + switchYield = yieldOp; + + assert(!op.getBody().empty()); + mlir::Block *originalBlock = op->getBlock(); + mlir::Block *swopBlock = + rewriter.splitBlock(originalBlock, op->getIterator()); + rewriter.inlineRegionBefore(op.getBody(), exitBlock); + + if (switchYield) + rewriteYieldOp(rewriter, switchYield, exitBlock); + + rewriter.setInsertionPointToEnd(originalBlock); + rewriter.create(op.getLoc(), swopBlock); + } // Allocate required data structures (disconsider default case in // vectors). @@ -711,55 +743,36 @@ class CIRSwitchOpFlattening mlir::Block *defaultDestination = exitBlock; mlir::ValueRange defaultOperands = exitBlock->getArguments(); - // Track fallthrough between cases. - mlir::cir::YieldOp fallthroughYieldOp = nullptr; - // Digest the case statements values and bodies. - for (size_t i = 0; i < op.getCases()->size(); ++i) { - auto ®ion = op.getRegion(i); - auto caseAttr = cast(op.getCases()->getValue()[i]); + for (auto caseOp : cases) { + mlir::Region ®ion = caseOp.getCaseRegion(); // Found default case: save destination and operands. - switch (caseAttr.getKind().getValue()) { + switch (caseOp.getKind()) { case mlir::cir::CaseOpKind::Default: defaultDestination = ®ion.front(); - defaultOperands = region.getArguments(); + defaultOperands = defaultDestination->getArguments(); break; case mlir::cir::CaseOpKind::Range: - assert(caseAttr.getValue().size() == 2 && + assert(caseOp.getValue().size() == 2 && "Case range should have 2 case value"); rangeValues.push_back( - {cast(caseAttr.getValue()[0]).getValue(), - cast(caseAttr.getValue()[1]).getValue()}); + {cast(caseOp.getValue()[0]).getValue(), + cast(caseOp.getValue()[1]).getValue()}); rangeDestinations.push_back(®ion.front()); - rangeOperands.push_back(region.getArguments()); + rangeOperands.push_back(rangeDestinations.back()->getArguments()); break; case mlir::cir::CaseOpKind::Anyof: case mlir::cir::CaseOpKind::Equal: // AnyOf cases kind can have multiple values, hence the loop below. - for (auto &value : caseAttr.getValue()) { + for (auto &value : caseOp.getValue()) { caseValues.push_back(cast(value).getValue()); - caseOperands.push_back(region.getArguments()); caseDestinations.push_back(®ion.front()); + caseOperands.push_back(caseDestinations.back()->getArguments()); } break; } - // Previous case is a fallthrough: branch it to this case. - if (fallthroughYieldOp) { - rewriteYieldOp(rewriter, fallthroughYieldOp, ®ion.front()); - fallthroughYieldOp = nullptr; - } - - for (auto &blk : region.getBlocks()) { - if (blk.getNumSuccessors()) - continue; - - // Handle switch-case yields. - if (auto yieldOp = dyn_cast(blk.getTerminator())) - fallthroughYieldOp = yieldOp; - } - // Handle break statements. walkRegionSkipping( region, [&](mlir::Operation *op) { @@ -770,14 +783,45 @@ class CIRSwitchOpFlattening return mlir::WalkResult::skip(); }); - // Extract region contents before erasing the switch op. - rewriter.inlineRegionBefore(region, exitBlock); + // Track fallthrough in cases. + for (auto &blk : region.getBlocks()) { + if (blk.getNumSuccessors()) + continue; + + if (auto yieldOp = dyn_cast(blk.getTerminator())) { + mlir::Operation *nextOp = caseOp->getNextNode(); + assert(nextOp && "caseOp is not expected to be the last op"); + mlir::Block *oldBlock = nextOp->getBlock(); + mlir::Block *newBlock = + rewriter.splitBlock(oldBlock, nextOp->getIterator()); + rewriter.setInsertionPointToEnd(oldBlock); + rewriter.create(nextOp->getLoc(), mlir::ValueRange(), + newBlock); + rewriteYieldOp(rewriter, yieldOp, newBlock); + } + } + + mlir::Block *oldBlock = caseOp->getBlock(); + mlir::Block *newBlock = + rewriter.splitBlock(oldBlock, caseOp->getIterator()); + + mlir::Block &entryBlock = caseOp.getCaseRegion().front(); + rewriter.inlineRegionBefore(caseOp.getCaseRegion(), newBlock); + + // Create a branch to the entry of the inlined region. + rewriter.setInsertionPointToEnd(oldBlock); + rewriter.create(caseOp.getLoc(), &entryBlock); } - // Last case is a fallthrough: branch it to exit. - if (fallthroughYieldOp) { - rewriteYieldOp(rewriter, fallthroughYieldOp, exitBlock); - fallthroughYieldOp = nullptr; + // Remove all cases since we've inlined the regions. + for (auto caseOp : cases) { + mlir::Block *caseBlock = caseOp->getBlock(); + // Erase the block with no predecessors here to make the generated code + // simpler a little bit. + if (caseBlock->hasNoPredecessors()) + rewriter.eraseBlock(caseBlock); + else + rewriter.eraseOp(caseOp); } for (size_t index = 0; index < rangeValues.size(); ++index) { diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 99398bba908f..a781c9287c0f 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -791,8 +791,11 @@ void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { // See checkIf for additional explanations. SmallVector pmapOps; - // If there are no regions, pmap is the same. - if (switchOp.getRegions().empty()) + // If there are no regions, return early pmap is the same. + // TODO: if the switch is not in a simple form, return early now and try to + // see if we can handle more complex form in future. + llvm::SmallVector cases; + if (!switchOp.isSimpleForm(cases)) return; auto isCaseFallthroughTerminated = [&](Region &r) { @@ -807,8 +810,7 @@ void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { return true; }; - auto regions = switchOp.getRegions(); - for (unsigned regionCurrent = 0, regionPastEnd = regions.size(); + for (unsigned regionCurrent = 0, regionPastEnd = cases.size(); regionCurrent != regionPastEnd; ++regionCurrent) { // Intentional pmap copy, basis to start new path. PMapType locaCasePmap = getPmap(); @@ -823,8 +825,8 @@ void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { // Note that for 'if' regions we use checkRegionWithScope, since // there are lexical scopes associated with each region, this is // not the case for switch's. - checkRegion(regions[idx]); - if (!isCaseFallthroughTerminated(regions[idx])) + checkRegion(cases[idx].getRegion()); + if (!isCaseFallthroughTerminated(cases[idx].getRegion())) break; idx++; } diff --git a/clang/test/CIR/CodeGen/atomic-runtime.cpp b/clang/test/CIR/CodeGen/atomic-runtime.cpp index dfe74a9e77c9..28220ee0f5e6 100644 --- a/clang/test/CIR/CodeGen/atomic-runtime.cpp +++ b/clang/test/CIR/CodeGen/atomic-runtime.cpp @@ -16,23 +16,23 @@ int runtime_load(int *ptr, int order) { // CHECK: %[[ptr:.*]] = cir.load %[[ptr_var:.*]] : !cir.ptr>, !cir.ptr // CHECK: %[[order:.*]] = cir.load %[[order_var:.*]] : !cir.ptr, !s32i -// CHECK: cir.switch (%[[order]] : !s32i) [ -// CHECK: case (default) { +// CHECK: cir.switch (%[[order]] : !s32i) { +// CHECK: cir.case(default, []) { // CHECK: %[[T8:.*]] = cir.load atomic(relaxed) %[[ptr]] : !cir.ptr, !s32i // CHECK: cir.store %[[T8]], %[[temp_var:.*]] : !s32i, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: } +// CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[T8:.*]] = cir.load atomic(acquire) %[[ptr]] : !cir.ptr, !s32i // CHECK: cir.store %[[T8]], %[[temp_var]] : !s32i, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 5) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[T8:.*]] = cir.load atomic(seq_cst) %[[ptr]] : !cir.ptr, !s32i // CHECK: cir.store %[[T8]], %[[temp_var]] : !s32i, !cir.ptr // CHECK: cir.break // CHECK: } -// CHECK: ] +// CHECK: } void atomic_store_n(int* ptr, int val, int order) { __atomic_store_n(ptr, val, order); @@ -42,23 +42,23 @@ void atomic_store_n(int* ptr, int val, int order) { // CHECK: %[[order:.*]] = cir.load %[[order_var:.*]] : !cir.ptr, !s32i // CHECK: %[[val:.*]] = cir.load %[[val_var:.*]] : !cir.ptr, !s32i // CHECK: cir.store %[[val]], %[[temp_var:.*]] : !s32i, !cir.ptr -// CHECK: cir.switch (%[[order]] : !s32i) [ -// CHECK: case (default) { +// CHECK: cir.switch (%[[order]] : !s32i) { +// CHECK: cir.case(default, []) { // CHECK: %[[T7:.*]] = cir.load %[[temp_var:.*]] : !cir.ptr, !s32i // CHECK: cir.store atomic(relaxed) %[[T7]], %[[ptr]] : !s32i, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 3) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<3> : !s32i]) { // CHECK: %[[T7:.*]] = cir.load %[[temp_var:.*]] : !cir.ptr, !s32i // CHECK: cir.store atomic(release) %[[T7]], %[[ptr]] : !s32i, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 5) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[T7:.*]] = cir.load %[[temp_var:.*]] : !cir.ptr, !s32i // CHECK: cir.store atomic(seq_cst) %[[T7]], %[[ptr]] : !s32i, !cir.ptr // CHECK: cir.break // CHECK: } -// CHECK: ] +// CHECK: } int atomic_exchange_n(int* ptr, int val, int order) { return __atomic_exchange_n(ptr, val, order); @@ -68,38 +68,38 @@ int atomic_exchange_n(int* ptr, int val, int order) { // CHECK: %[[order:.*]] = cir.load %[[order_var:.*]] : !cir.ptr, !s32i // CHECK: %[[val:.*]] = cir.load %[[val_var:.*]] : !cir.ptr, !s32i // CHECK: cir.store %[[val]], %[[temp_var:.*]] : !s32i, !cir.ptr -// CHECK: cir.switch (%[[order]] : !s32i) [ -// CHECK: case (default) { +// CHECK: cir.switch (%[[order]] : !s32i) { +// CHECK: cir.case(default, []) { // CHECK: %[[T11:.*]] = cir.load %[[temp_var]] : !cir.ptr, !s32i // CHECK: %[[T12:.*]] = cir.atomic.xchg(%[[ptr]] : !cir.ptr, %[[T11]] : !s32i, relaxed) : !s32i // CHECK: cir.store %[[T12]], %[[result:.*]] : !s32i, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: } +// CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[T11:.*]] = cir.load %[[temp_var]] : !cir.ptr, !s32i // CHECK: %[[T12:.*]] = cir.atomic.xchg(%[[ptr]] : !cir.ptr, %[[T11]] : !s32i, acquire) : !s32i // CHECK: cir.store %[[T12]], %[[result]] : !s32i, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 3) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<3> : !s32i]) { // CHECK: %[[T11:.*]] = cir.load %[[temp_var]] : !cir.ptr, !s32i // CHECK: %[[T12:.*]] = cir.atomic.xchg(%[[ptr]] : !cir.ptr, %[[T11]] : !s32i, release) : !s32i // CHECK: cir.store %[[T12]], %[[result]] : !s32i, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 4) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<4> : !s32i]) { // CHECK: %[[T11:.*]] = cir.load %[[temp_var]] : !cir.ptr, !s32i // CHECK: %[[T12:.*]] = cir.atomic.xchg(%[[ptr]] : !cir.ptr, %[[T11]] : !s32i, acq_rel) : !s32i // CHECK: cir.store %[[T12]], %[[result]] : !s32i, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 5) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[T11:.*]] = cir.load %[[temp_var]] : !cir.ptr, !s32i // CHECK: %[[T12:.*]] = cir.atomic.xchg(%[[ptr]] : !cir.ptr, %[[T11]] : !s32i, seq_cst) : !s32i // CHECK: cir.store %[[T12]], %[[result]] : !s32i, !cir.ptr // CHECK: cir.break // CHECK: } -// CHECK: ] +// CHECK: } bool atomic_compare_exchange_n(int* ptr, int* expected, int desired, int success, int failure) { @@ -114,10 +114,10 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.store %[[T11]], %[[desired_var:.*]] : !s32i, !cir.ptr // CHECK: %[[failure:.*]] = cir.load %[[T4:.*]] : !cir.ptr, !s32i // CHECK: %[[T13:.*]] = cir.const #false -// CHECK: cir.switch (%[[success]] : !s32i) [ -// CHECK: case (default) { -// CHECK: cir.switch (%[[failure]] : !s32i) [ -// CHECK: case (default) { +// CHECK: cir.switch (%[[success]] : !s32i) { +// CHECK: cir.case(default, []) { +// CHECK: cir.switch (%[[failure]] : !s32i) { +// CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = relaxed) : (!s32i, !cir.bool) @@ -127,8 +127,8 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: } // CHECK: cir.store %cmp, %[[result_var:.*]] : !cir.bool, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: } +// CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = acquire) : (!s32i, !cir.bool) @@ -138,8 +138,8 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: } // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 5) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = seq_cst) : (!s32i, !cir.bool) @@ -150,12 +150,12 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break // CHECK: } -// CHECK: ] +// CHECK: } // CHECK: cir.break -// CHECK: }, -// CHECK: case (anyof, [1, 2] : !s32i) { -// CHECK: cir.switch (%[[failure]] : !s32i) [ -// CHECK: case (default) { +// CHECK: } +// CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { +// CHECK: cir.switch (%[[failure]] : !s32i) { +// CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = relaxed) : (!s32i, !cir.bool) @@ -165,8 +165,8 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: } // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: } +// CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = acquire) : (!s32i, !cir.bool) @@ -176,8 +176,8 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: } // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 5) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = seq_cst) : (!s32i, !cir.bool) @@ -188,12 +188,12 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break // CHECK: } -// CHECK: ] +// CHECK: } // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 3) { -// CHECK: cir.switch (%[[failure]] : !s32i) [ -// CHECK: case (default) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<3> : !s32i]) +// CHECK: cir.switch (%[[failure]] : !s32i) { +// CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = relaxed) : (!s32i, !cir.bool) @@ -203,8 +203,8 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: } // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: } +// CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = acquire) : (!s32i, !cir.bool) @@ -214,8 +214,8 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: } // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 5) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = seq_cst) : (!s32i, !cir.bool) @@ -226,12 +226,12 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break // CHECK: } -// CHECK: ] +// CHECK: } // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 4) { -// CHECK: cir.switch (%[[failure]] : !s32i) [ -// CHECK: case (default) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<4> : !s32i]) { +// CHECK: cir.switch (%[[failure]] : !s32i) { +// CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = relaxed) : (!s32i, !cir.bool) @@ -241,8 +241,8 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: } // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: } +// CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = acquire) : (!s32i, !cir.bool) @@ -252,8 +252,8 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: } // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 5) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = seq_cst) : (!s32i, !cir.bool) @@ -264,12 +264,12 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break // CHECK: } -// CHECK: ] +// CHECK: } // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 5) { -// CHECK: cir.switch (%[[failure]] : !s32i) [ -// CHECK: case (default) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { +// CHECK: cir.switch (%[[failure]] : !s32i) { +// CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = relaxed) : (!s32i, !cir.bool) @@ -279,8 +279,8 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: } // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (anyof, [1, 2] : !s32i) { +// CHECK: } +// CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = acquire) : (!s32i, !cir.bool) @@ -290,8 +290,8 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: } // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break -// CHECK: }, -// CHECK: case (equal, 5) { +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i // CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = seq_cst) : (!s32i, !cir.bool) @@ -302,8 +302,8 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.store %cmp, %[[result_var]] : !cir.bool, !cir.ptr // CHECK: cir.break // CHECK: } -// CHECK: ] +// CHECK: } // CHECK: cir.break // CHECK: } -// CHECK: ] +// CHECK: } diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 6b9b64d175a9..aa7547d306df 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -303,7 +303,7 @@ extern "C" void multiple_non_case(int v) { // NOFLAT: cir.func @multiple_non_case // NOFLAT: cir.switch -// NOFLAT: case (default) +// NOFLAT: cir.case(default, []) { // NOFLAT: cir.call @action1() // NOFLAT: cir.br ^[[BB1:[a-zA-Z0-9]+]] // NOFLAT: ^[[BB1]]: @@ -326,13 +326,12 @@ extern "C" void case_follow_label(int v) { // NOFLAT: cir.func @case_follow_label // NOFLAT: cir.switch -// NOFLAT: case (equal, 1) +// NOFLAT: cir.case(equal, [#cir.int<1> : !s32i]) { // NOFLAT: cir.label "label" -// NOFLAT: cir.yield -// NOFLAT: case (equal, 2) +// NOFLAT: cir.case(equal, [#cir.int<2> : !s32i]) { // NOFLAT: cir.call @action1() // NOFLAT: cir.break -// NOFLAT: case (default) +// NOFLAT: cir.case(default, []) { // NOFLAT: cir.call @action2() // NOFLAT: cir.goto "label" @@ -351,10 +350,10 @@ extern "C" void default_follow_label(int v) { // NOFLAT: cir.func @default_follow_label // NOFLAT: cir.switch -// NOFLAT: case (anyof, [1, 2] : !s32i) +// NOFLAT: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // NOFLAT: cir.call @action1() // NOFLAT: cir.break // NOFLAT: cir.label "label" -// NOFLAT: case (default) +// NOFLAT: cir.case(default, []) { // NOFLAT: cir.call @action2() // NOFLAT: cir.goto "label" diff --git a/clang/test/CIR/CodeGen/switch-gnurange.cpp b/clang/test/CIR/CodeGen/switch-gnurange.cpp index f48a32506252..b6f03cb6d4b0 100644 --- a/clang/test/CIR/CodeGen/switch-gnurange.cpp +++ b/clang/test/CIR/CodeGen/switch-gnurange.cpp @@ -22,25 +22,18 @@ int sw1(enum letter c) { // CIR: cir.func @_Z3sw16letter // CIR: cir.scope { // CIR: cir.switch -// CIR-NEXT: case (range, [0, 2] : !s32i) { -// CIR-NEXT: cir.yield -// CIR-NEXT: }, -// CIR-NEXT: case (range, [4, 5] : !s32i) { -// CIR-NEXT: cir.yield -// CIR-NEXT: }, -// CIR-NEXT: case (range, [6, 10] : !s32i) { -// CIR-NEXT: cir.yield -// CIR-NEXT: }, -// CIR-NEXT: case (equal, 3) { -// CIR-NEXT: cir.int<1> -// CIR: cir.return -// CIR-NEXT: }, -// CIR-NEXT: case (default) { +// CIR-NEXT: cir.case(range, [#cir.int<0> : !s32i, #cir.int<2> : !s32i]) { +// CIR-NEXT: cir.case(equal, [#cir.int<3> : !s32i]) { +// CIR-NEXT: cir.case(range, [#cir.int<4> : !s32i, #cir.int<5> : !s32i]) { +// CIR-NEXT: cir.case(range, [#cir.int<6> : !s32i, #cir.int<10> : !s32i]) { +// CIR: cir.int<1> +// CIR: cir.return +// CIR: cir.yield +// CIR: cir.yield +// CIR: cir.yield +// CIR: cir.case(default, []) { // CIR-NEXT: cir.int<0> // CIR: cir.return -// CIR-NEXT: } -// CIR-NEXT: ] -// CIR-NEXT: } // LLVM: @_Z3sw16letter // LLVM: switch i32 %[[C:[0-9]+]], label %[[DEFAULT:[0-9]+]] [ @@ -57,12 +50,12 @@ int sw1(enum letter c) { // LLVM-NEXT: i32 10, label %[[CASE_6_10]] // LLVM-NEXT: ] // LLVM: [[CASE_0_2]]: +// LLVM: br label %[[CASE_3]] +// LLVM: [[CASE_3]]: // LLVM: br label %[[CASE_4_5]] // LLVM: [[CASE_4_5]]: // LLVM: br label %[[CASE_6_10]] // LLVM: [[CASE_6_10]]: -// LLVM: br label %[[CASE_3]] -// LLVM: [[CASE_3]]: // LLVM: store i32 1 // LLVM: ret // LLVM: [[DEFAULT]]: @@ -83,14 +76,13 @@ int sw2(enum letter c) { // CIR: cir.func @_Z3sw26letter // CIR: cir.scope { // CIR: cir.switch -// CIR-NEXT: case (range, [0, 2] : !s32i) { +// CIR-NEXT: cir.case(range, [#cir.int<0> : !s32i, #cir.int<2> : !s32i]) { +// CIR: cir.case(range, [#cir.int<10> : !s32i, #cir.int<0> : !s32i]) { // CIR: cir.return -// CIR-NEXT: }, -// CIR-NEXT: case (default) { +// CIR-NEXT: } +// CIR: cir.case(default, []) { // CIR: cir.return // CIR-NEXT: } -// CIR-NEXT: ] -// CIR-NEXT: } // LLVM: @_Z3sw26letter // LLVM: switch i32 %[[C:[0-9]+]], label %[[DEFAULT:[0-9]+]] [ @@ -99,6 +91,8 @@ int sw2(enum letter c) { // LLVM-NEXT: i32 2, label %[[CASE]] // LLVM-NEXT: ] // LLVM: [[CASE]]: +// LLVM: br label %[[IMPL:[0-9]+]] +// LLVM: [[IMPL]]: // LLVM: store i32 1 // LLVM: ret // LLVM: [[DEFAULT]]: @@ -126,24 +120,22 @@ void sw3(enum letter c) { // CIR: cir.func @_Z3sw36letter // CIR: cir.scope { // CIR: cir.switch -// CIR-NEXT: case (range, [0, 2] : !s32i) { +// CIR-NEXT: cir.case(range, [#cir.int<0> : !s32i, #cir.int<2> : !s32i]) { // CIR-NEXT: cir.int<1> // CIR: cir.break -// CIR-NEXT: }, -// CIR-NEXT: case (range, [3, 5] : !s32i) { +// CIR-NEXT: } +// CIR: cir.case(range, [#cir.int<3> : !s32i, #cir.int<5> : !s32i]) { // CIR-NEXT: cir.int<2> // CIR: cir.break -// CIR-NEXT: }, -// CIR-NEXT: case (range, [6, 8] : !s32i) { +// CIR-NEXT: } +// CIR: cir.case(range, [#cir.int<6> : !s32i, #cir.int<8> : !s32i]) { // CIR-NEXT: cir.int<3> // CIR: cir.break -// CIR-NEXT: }, -// CIR-NEXT: case (range, [9, 10] : !s32i) { +// CIR-NEXT: } +// CIR: cir.case(range, [#cir.int<9> : !s32i, #cir.int<10> : !s32i]) { // CIR-NEXT: cir.int<4> // CIR: cir.break // CIR-NEXT: } -// CIR-NEXT: ] -// CIR-NEXT: } // LLVM: @_Z3sw36letter // LLVM: switch i32 %[[C:[0-9]+]], label %[[DEFAULT:[0-9]+]] [ @@ -188,18 +180,19 @@ void sw4(int x) { // CIR: cir.func @_Z3sw4i // CIR: cir.scope { // CIR: cir.switch -// CIR-NEXT: case (range, [66, 233] : !s32i) { +// CIR-NEXT: cir.case(range, [#cir.int<66> : !s32i, #cir.int<233> : !s32i]) { // CIR-NEXT: cir.break -// CIR-NEXT: }, -// CIR-NEXT: case (range, [-50, 50] : !s32i) { +// CIR-NEXT: } +// CIR: cir.case(range, [#cir.int<-50> : !s32i, #cir.int<50> : !s32i]) { // CIR-NEXT: cir.break // CIR-NEXT: } -// CIR-NEXT: ] -// CIR-NEXT: } + // LLVM: @_Z3sw4i // LLVM: switch i32 %[[X:[0-9]+]], label %[[JUDGE_NEG50_50:[0-9]+]] [ // LLVM-NEXT: ] +// LLVM: [[UNREACHABLE_BB:[0-9]+]]: {{.*}} No predecessors! +// LLVM-NEXT: br label // LLVM: [[CASE_66_233:[0-9]+]]: // LLVM-NEXT: br label %[[EPILOG:[0-9]+]] // LLVM: [[CASE_NEG50_50:[0-9]+]]: @@ -228,18 +221,21 @@ void sw5(int x) { // CIR: cir.func @_Z3sw5i // CIR: cir.scope { // CIR: cir.switch -// CIR-NEXT: case (range, [100, -100] : !s32i) { +// CIR-NEXT: cir.case(range, [#cir.int<100> : !s32i, #cir.int<-100> : !s32i]) { // CIR-NEXT: cir.int<1> // CIR: cir.yield // CIR-NEXT: } -// CIR-NEXT: ] // LLVM: @_Z3sw5i // LLVM: switch i32 %[[X:[0-9]+]], label %[[EPILOG:[0-9]+]] [ // LLVM-NEXT: ] +// LLVM: [[UNREACHABLE_BB:[0-9]+]]: {{.*}} No predecessors! +// LLVM-NEXT: br label // LLVM: [[CASE_100_NEG100:[0-9]+]]: // LLVM-NEXT: store i32 1, ptr %[[Y:[0-9]+]] -// LLVM-NEXT: br label %[[EPILOG]] +// LLVM-NEXT: br label %[[EPILOG_PRED:.+]] +// LLVM: [[EPILOG_PRED:[0-9]+]]: +// LLVM-NEXT: br label %[[EPILOG]] // LLVM: [[EPILOG]]: // LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] // LLVM: [[EPILOG_END]]: @@ -256,22 +252,23 @@ void sw6(int x) { // CIR: cir.func @_Z3sw6i // CIR: cir.scope { // CIR: cir.switch -// CIR-NEXT: case (range, [-2147483648, 2147483647] : !s32i) { +// CIR-NEXT: cir.case(range, [#cir.int<-2147483648> : !s32i, #cir.int<2147483647> : !s32i]) { // CIR-NEXT: cir.int<1> // CIR: cir.yield // CIR-NEXT: } -// CIR-NEXT: ] // LLVM: @_Z3sw6i // LLVM: switch i32 %[[X:[0-9]+]], label %[[DEFAULT:[0-9]+]] [ // LLVM-NEXT: ] +// LLVM: [[UNREACHABLE_BB:[0-9]+]]: {{.*}} No predecessors! +// LLVM-NEXT: br label // LLVM: [[CASE_MIN_MAX:[0-9]+]]: // LLVM-NEXT: store i32 1, ptr %[[Y:[0-9]+]] -// LLVM-NEXT: br label %[[EPILOG:[0-9]+]] +// LLVM-NEXT: br label // LLVM: [[DEFAULT]]: // LLVM-NEXT: %[[DIFF:[0-9]+]] = sub i32 %[[X]], -2147483648 // LLVM-NEXT: %[[DIFF_CMP:[0-9]+]] = icmp ule i32 %[[DIFF]], -1 -// LLVM-NEXT: br i1 %[[DIFF_CMP]], label %[[CASE_MIN_MAX]], label %[[EPILOG]] +// LLVM-NEXT: br i1 %[[DIFF_CMP]], label %[[CASE_MIN_MAX]], label %[[EPILOG:[0-9]+]] // LLVM: [[EPILOG]]: // LLVM-NEXT: br label %[[EPILOG_END:[0-9]+]] // LLVM: [[EPILOG_END]]: @@ -297,22 +294,22 @@ void sw7(int x) { // CIR: cir.func @_Z3sw7i // CIR: cir.scope { // CIR: cir.switch -// CIR-NEXT: case (equal, 0) { +// CIR-NEXT: cir.case(equal, [#cir.int<0> : !s32i]) { // CIR-NEXT: cir.break -// CIR-NEXT: }, -// CIR-NEXT: case (range, [100, 200] : !s32i) { +// CIR-NEXT: } +// CIR-NEXT: cir.case(range, [#cir.int<100> : !s32i, #cir.int<200> : !s32i]) { // CIR-NEXT: cir.break -// CIR-NEXT: }, -// CIR-NEXT: case (equal, 1) { +// CIR-NEXT: } +// CIR-NEXT: cir.case(equal, [#cir.int<1> : !s32i]) { // CIR-NEXT: cir.break -// CIR-NEXT: }, -// CIR-NEXT: case (range, [300, 400] : !s32i) { +// CIR-NEXT: } +// CIR-NEXT: cir.case(range, [#cir.int<300> : !s32i, #cir.int<400> : !s32i]) { // CIR-NEXT: cir.break -// CIR-NEXT: }, -// CIR-NEXT: case (default) { +// CIR-NEXT: } +// CIR-NEXT: cir.case(default, []) { // CIR-NEXT: cir.break -// CIR-NEXT: }, -// CIR-NEXT: case (range, [500, 600] : !s32i) { +// CIR-NEXT: } +// CIR-NEXT: cir.case(range, [#cir.int<500> : !s32i, #cir.int<600> : !s32i]) { // CIR-NEXT: cir.break // CIR-NEXT: } diff --git a/clang/test/CIR/CodeGen/switch.cpp b/clang/test/CIR/CodeGen/switch.cpp index 74b1312fd229..3433c7e97e16 100644 --- a/clang/test/CIR/CodeGen/switch.cpp +++ b/clang/test/CIR/CodeGen/switch.cpp @@ -16,30 +16,15 @@ void sw1(int a) { } } // CHECK: cir.func @_Z3sw1i -// CHECK: cir.switch (%3 : !s32i) [ -// CHECK-NEXT: case (equal, 0) { -// CHECK-NEXT: %4 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) nsw : !s32i -// CHECK-NEXT: cir.store %6, %1 : !s32i, !cir.ptr -// CHECK-NEXT: cir.break -// CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 1) { -// CHECK-NEXT: cir.break -// CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 2) { -// CHECK-NEXT: cir.scope { -// CHECK-NEXT: %4 = cir.alloca !s32i, !cir.ptr, ["yolo", init] -// CHECK-NEXT: %5 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %6 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %7 = cir.binop(add, %5, %6) nsw : !s32i -// CHECK-NEXT: cir.store %7, %1 : !s32i, !cir.ptr -// CHECK-NEXT: %8 = cir.const #cir.int<100> : !s32i -// CHECK-NEXT: cir.store %8, %4 : !s32i, !cir.ptr -// CHECK-NEXT: cir.break -// CHECK-NEXT: } -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } +// CHECK: cir.switch (%3 : !s32i) { +// CHECK-NEXT: cir.case(equal, [#cir.int<0> : !s32i]) { +// CHECK: cir.break +// CHECK: cir.case(equal, [#cir.int<1> : !s32i]) { +// CHECK-NEXT: cir.break +// CHECK: cir.case(equal, [#cir.int<2> : !s32i]) { +// CHECK: cir.scope { +// CHECK: cir.alloca !s32i, !cir.ptr, ["yolo", init] +// CHECK: cir.break void sw2(int a) { switch (int yolo = 2; a) { @@ -55,8 +40,8 @@ void sw2(int a) { // CHECK: cir.scope { // CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["yolo", init] // CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["fomo", init] -// CHECK: cir.switch (%4 : !s32i) [ -// CHECK-NEXT: case (equal, 3) { +// CHECK: cir.switch (%4 : !s32i) { +// CHECK-NEXT: cir.case(equal, [#cir.int<3> : !s32i]) { // CHECK-NEXT: %5 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store %5, %2 : !s32i, !cir.ptr @@ -70,11 +55,12 @@ void sw3(int a) { // CHECK: cir.func @_Z3sw3i // CHECK: cir.scope { // CHECK-NEXT: %1 = cir.load %0 : !cir.ptr, !s32i -// CHECK-NEXT: cir.switch (%1 : !s32i) [ -// CHECK-NEXT: case (default) { +// CHECK-NEXT: cir.switch (%1 : !s32i) { +// CHECK-NEXT: cir.case(default, []) { // CHECK-NEXT: cir.break // CHECK-NEXT: } -// CHECK-NEXT: ] +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } int sw4(int a) { switch (a) { @@ -88,8 +74,8 @@ int sw4(int a) { } // CHECK: cir.func @_Z3sw4i -// CHECK: cir.switch (%4 : !s32i) [ -// CHECK-NEXT: case (equal, 42) { +// CHECK: cir.switch (%4 : !s32i) { +// CHECK-NEXT: cir.case(equal, [#cir.int<42> : !s32i]) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %5 = cir.const #cir.int<3> : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr @@ -97,14 +83,15 @@ int sw4(int a) { // CHECK-NEXT: cir.return %6 : !s32i // CHECK-NEXT: } // CHECK-NEXT: cir.yield -// CHECK-NEXT: }, -// CHECK-NEXT: case (default) { +// CHECK-NEXT: } +// CHECK-NEXT: cir.case(default, []) { // CHECK-NEXT: %5 = cir.const #cir.int<2> : !s32i // CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr // CHECK-NEXT: %6 = cir.load %1 : !cir.ptr, !s32i // CHECK-NEXT: cir.return %6 : !s32i // CHECK-NEXT: } -// CHECK-NEXT: ] +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } void sw5(int a) { switch (a) { @@ -113,9 +100,12 @@ void sw5(int a) { } // CHECK: cir.func @_Z3sw5i -// CHECK: cir.switch (%1 : !s32i) [ -// CHECK-NEXT: case (equal, 1) { +// CHECK: cir.switch (%1 : !s32i) { +// CHECK-NEXT: cir.case(equal, [#cir.int<1> : !s32i]) { // CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } void sw6(int a) { switch (a) { @@ -131,11 +121,11 @@ void sw6(int a) { } // CHECK: cir.func @_Z3sw6i -// CHECK: cir.switch (%1 : !s32i) [ -// CHECK-NEXT: case (anyof, [0, 1, 2] : !s32i) { +// CHECK: cir.switch (%1 : !s32i) { +// CHECK-NEXT: cir.case(anyof, [#cir.int<0> : !s32i, #cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK-NEXT: cir.break -// CHECK-NEXT: }, -// CHECK-NEXT: case (anyof, [3, 4, 5] : !s32i) { +// CHECK-NEXT: } +// CHECK-NEXT: cir.case(anyof, [#cir.int<3> : !s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i]) { // CHECK-NEXT: cir.break // CHECK-NEXT: } @@ -153,10 +143,10 @@ void sw7(int a) { } // CHECK: cir.func @_Z3sw7i -// CHECK: case (anyof, [0, 1, 2] : !s32i) { +// CHECK: cir.case(anyof, [#cir.int<0> : !s32i, #cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK-NEXT: cir.yield -// CHECK-NEXT: }, -// CHECK-NEXT: case (anyof, [3, 4, 5] : !s32i) { +// CHECK-NEXT: } +// CHECK-NEXT: cir.case(anyof, [#cir.int<3> : !s32i, #cir.int<4> : !s32i, #cir.int<5> : !s32i]) { // CHECK-NEXT: cir.break // CHECK-NEXT: } @@ -172,13 +162,13 @@ void sw8(int a) { } //CHECK: cir.func @_Z3sw8i -//CHECK: case (equal, 3) +//CHECK: cir.case(equal, [#cir.int<3> : !s32i]) { //CHECK-NEXT: cir.break -//CHECK-NEXT: }, -//CHECK-NEXT: case (equal, 4) { +//CHECK-NEXT: } +//CHECK-NEXT: cir.case(equal, [#cir.int<4> : !s32i]) { //CHECK-NEXT: cir.yield //CHECK-NEXT: } -//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.case(default, []) { //CHECK-NEXT: cir.break //CHECK-NEXT: } @@ -194,13 +184,13 @@ void sw9(int a) { } //CHECK: cir.func @_Z3sw9i -//CHECK: case (equal, 3) { +//CHECK: cir.case(equal, [#cir.int<3> : !s32i]) { //CHECK-NEXT: cir.break //CHECK-NEXT: } -//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.case(default, []) { //CHECK-NEXT: cir.yield //CHECK-NEXT: } -//CHECK: case (equal, 4) +//CHECK-NEXT: cir.case(equal, [#cir.int<4> : !s32i]) { //CHECK-NEXT: cir.break //CHECK-NEXT: } @@ -217,16 +207,16 @@ void sw10(int a) { } //CHECK: cir.func @_Z4sw10i -//CHECK: case (equal, 3) +//CHECK: cir.case(equal, [#cir.int<3> : !s32i]) { //CHECK-NEXT: cir.break -//CHECK-NEXT: }, -//CHECK-NEXT: case (equal, 4) { +//CHECK-NEXT: } +//CHECK-NEXT: cir.case(equal, [#cir.int<4> : !s32i]) { //CHECK-NEXT: cir.yield //CHECK-NEXT: } -//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.case(default, []) { //CHECK-NEXT: cir.yield //CHECK-NEXT: } -//CHECK-NEXT: case (equal, 5) { +//CHECK-NEXT: cir.case(equal, [#cir.int<5> : !s32i]) { //CHECK-NEXT: cir.break //CHECK-NEXT: } @@ -245,16 +235,16 @@ void sw11(int a) { } //CHECK: cir.func @_Z4sw11i -//CHECK: case (equal, 3) +//CHECK: cir.case(equal, [#cir.int<3> : !s32i]) { //CHECK-NEXT: cir.break -//CHECK-NEXT: }, -//CHECK-NEXT: case (anyof, [4, 5] : !s32i) { +//CHECK-NEXT: } +//CHECK-NEXT: cir.case(anyof, [#cir.int<4> : !s32i, #cir.int<5> : !s32i]) { //CHECK-NEXT: cir.yield //CHECK-NEXT: } -//CHECK-NEXT: case (default) { +//CHECK-NEXT: cir.case(default, []) { //CHECK-NEXT: cir.yield //CHECK-NEXT: } -//CHECK-NEXT: case (anyof, [6, 7] : !s32i) { +//CHECK-NEXT: cir.case(anyof, [#cir.int<6> : !s32i, #cir.int<7> : !s32i]) { //CHECK-NEXT: cir.break //CHECK-NEXT: } @@ -270,7 +260,7 @@ void sw12(int a) { // CHECK: cir.func @_Z4sw12i // CHECK: cir.scope { // CHECK: cir.switch -// CHECK-NEXT: case (equal, 3) { +// CHECK-NEXT: cir.case(equal, [#cir.int<3> : !s32i]) { // CHECK-NEXT: cir.return // CHECK-NEXT: ^bb1: // no predecessors // CHECK-NEXT: cir.break @@ -289,16 +279,16 @@ void sw13(int a, int b) { // CHECK: cir.func @_Z4sw13ii // CHECK: cir.scope { // CHECK: cir.switch -// CHECK-NEXT: case (equal, 1) { +// CHECK-NEXT: cir.case(equal, [#cir.int<1> : !s32i]) { // CHECK-NEXT: cir.scope { // CHECK: cir.switch -// CHECK-NEXT: case (equal, 2) { +// CHECK-NEXT: cir.case(equal, [#cir.int<2> : !s32i]) { // CHECK-NEXT: cir.break // CHECK-NEXT: } -// CHECK-NEXT: ] +// CHECK-NEXT: cir.yield // CHECK-NEXT: } -// CHECK-NEXT: cir.yield // CHECK-NEXT: } +// CHECK: cir.yield // CHECK: } // CHECK: cir.return @@ -315,17 +305,18 @@ void fallthrough(int x) { // CHECK: cir.func @_Z11fallthroughi // CHECK: cir.scope { -// CHECK: cir.switch (%1 : !s32i) [ -// CHECK-NEXT: case (equal, 1) { +// CHECK: cir.switch (%1 : !s32i) { +// CHECK-NEXT: cir.case(equal, [#cir.int<1> : !s32i]) { // CHECK-NEXT: cir.yield -// CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 2) { +// CHECK-NEXT: } +// CHECK-NEXT: cir.case(equal, [#cir.int<2> : !s32i]) { // CHECK-NEXT: cir.break -// CHECK-NEXT: }, -// CHECK-NEXT: case (default) { +// CHECK-NEXT: } +// CHECK-NEXT: cir.case(default, []) { // CHECK-NEXT: cir.break // CHECK-NEXT: } -// CHECK-NEXT: ] +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: } int unreachable_after_break_1(int a) { @@ -342,8 +333,49 @@ int unreachable_after_break_1(int a) { } // CHECK: cir.func @_Z25unreachable_after_break_1i -// CHECK: case (equal, 42) { +// CHECK: cir.case(equal, [#cir.int<42> : !s32i]) { // CHECK: cir.break // CHECK: ^bb1: // no predecessors // CHECK: cir.goto "exit" // CHECK: } + +int nested_switch(int a) { + switch (int b = 1; a) { + case 0: + b = b + 1; + case 1: + return b; + case 2: { + b = b + 1; + if (a > 1000) { + case 9: + b += a; + } + if (a > 500) { + case 7: + return a + b; + } + break; + } + } + + return 0; +} + +// CHECK: cir.switch (%6 : !s32i) { +// CHECK: cir.case(equal, [#cir.int<0> : !s32i]) { +// CHECK: cir.yield +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<1> : !s32i]) { +// CHECK: cir.return +// CHECK: } +// CHECK: cir.case(equal, [#cir.int<2> : !s32i]) { +// CHECK: cir.scope { +// CHECK: cir.scope { +// CHECK: cir.if +// CHECK: cir.case(equal, [#cir.int<9> : !s32i]) { +// CHECK: cir.yield +// CHECK: cir.scope { +// CHECK: cir.if +// CHECK: cir.case(equal, [#cir.int<7> : !s32i]) { +// CHECK: cir.return diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 893655d78919..215cf1d870f1 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -75,11 +75,11 @@ cir.func @yieldcontinue() { !s32i = !cir.int cir.func @s0() { %1 = cir.const #cir.int<2> : !s32i - cir.switch (%1 : !s32i) [ - case (equal, 5) { // expected-error {{custom op 'cir.switch' case regions must be explicitly terminated}} - %2 = cir.const #cir.int<3> : !s32i + cir.switch (%1 : !s32i) { + cir.case (equal, [#cir.int<5> : !s32i]) { + %2 = cir.const #cir.int<3> : !s32i // expected-error {{block with no terminator}} } - ] + } cir.return } @@ -88,10 +88,10 @@ cir.func @s0() { !s32i = !cir.int cir.func @s1() { %1 = cir.const #cir.int<2> : !s32i - cir.switch (%1 : !s32i) [ - case (equal, 5) { + cir.switch (%1 : !s32i) { + cir.case (equal, [#cir.int<5> : !s32i]) { // expected-error {{block with no terminator}} } - ] // expected-error {{case region shall not be empty}} + } cir.return } diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index b5c0c9cafb6c..0bdc9c1e7e89 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -3,34 +3,36 @@ cir.func @s0() { %1 = cir.const #cir.int<2> : !s32i - cir.switch (%1 : !s32i) [ - case (default) { + cir.switch (%1 : !s32i) { + cir.case (default, []) { cir.return - }, - case (equal, 3) { + } + cir.case (equal, [#cir.int<3> : !s32i]) { cir.yield - }, - case (anyof, [6, 7, 8] : !s32i) { + } + cir.case (anyof, [#cir.int<6> : !s32i, #cir.int<7> : !s32i, #cir.int<8> : !s32i]) { cir.break - }, - case (equal, 5 : !s32i) { + } + cir.case (equal, [#cir.int<5> : !s32i]) { cir.yield } - ] + cir.yield + } cir.return } -// CHECK: cir.switch (%0 : !s32i) [ -// CHECK-NEXT: case (default) { +// CHECK: cir.switch (%0 : !s32i) { +// CHECK-NEXT: cir.case(default, []) { // CHECK-NEXT: cir.return -// CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 3) { +// CHECK-NEXT: } +// CHECK-NEXT: cir.case(equal, [#cir.int<3> : !s32i]) { // CHECK-NEXT: cir.yield -// CHECK-NEXT: }, -// CHECK-NEXT: case (anyof, [6, 7, 8] : !s32i) { +// CHECK-NEXT: } +// CHECK-NEXT: cir.case(anyof, [#cir.int<6> : !s32i, #cir.int<7> : !s32i, #cir.int<8> : !s32i]) { // CHECK-NEXT: cir.break -// CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 5) { +// CHECK-NEXT: } +// CHECK-NEXT: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK-NEXT: cir.yield // CHECK-NEXT: } -// CHECK-NEXT: ] +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } diff --git a/clang/test/CIR/Lowering/nested-switch.cpp b/clang/test/CIR/Lowering/nested-switch.cpp new file mode 100644 index 000000000000..5f6961a84018 --- /dev/null +++ b/clang/test/CIR/Lowering/nested-switch.cpp @@ -0,0 +1,69 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s + +int nested_switch(int a) { + switch (int b = 1; a) { + case 0: + b = b + 1; + case 1: + return b; + case 2: { + b = b + 1; + if (a > 1000) { + case 9: + b += a; + } + if (a > 500) { + case 7: + return a + b; + } + break; + } + } + + return 0; +} + +// CHECK: define {{.*}}@_Z13nested_switchi( +// CHECK: switch i32 %6, label %[[DEFAULT_BB:[0-9]+]] [ +// CHECK: i32 0, label %[[ZERO_BB:[0-9]+]] +// CHECK: i32 1, label %[[ONE_BB:[0-9]+]] +// CHECK: i32 2, label %[[TWO_BB:[0-9]+]] +// CHECK: i32 9, label %[[NINE_BB:[0-9]+]] +// CHECK: i32 7, label %[[SEVEN_BB:[0-9]+]] +// CHECK: ] +// +// CHECK: [[ZERO_BB]]: +// CHECK: add {{.*}}, 1 +// CHECK: br label %[[ONE_BB]] +// +// CHECK: [[ONE_BB]]: +// CHECK: ret +// +// CHECK: [[TWO_BB]]: +// CHECK: add {{.*}}, 1 +// CHECK: br label %[[IF_BB:[0-9]+]] +// +// CHECK: [[IF_BB]]: +// CHECK: %[[CMP:.+]] = icmp sgt i32 %{{.*}}, 1000 +// CHECK: br i1 %[[CMP]], label %[[IF_TRUE_BB:[0-9]+]], label %[[IF_FALSE_BB:[0-9]+]] +// +// CHECK: [[IF_TRUE_BB]]: +// CHECK: br label %[[NINE_BB]] +// +// CHECK: [[NINE_BB]]: +// CHECK: %[[A_VALUE:.+]] = load i32 +// CHECK: %[[B_VALUE:.+]] = load i32 +// CHECK: add nsw i32 %[[B_VALUE]], %[[A_VALUE]] +// +// CHECK: %[[CMP2:.+]] = icmp sgt i32 %{{.*}}, 500 +// CHECK: br i1 %[[CMP2]], label %[[IF2_TRUE_BB:[0-9]+]], label %[[IF2_FALSE_BB:[0-9]+]] +// +// CHECK: [[IF2_TRUE_BB]]: +// CHECK: br label %[[SEVEN_BB]] +// +// CHECK: [[SEVEN_BB]]: +// CHECK: ret +// +// CHECK: [[DEFAULT_BB]]: +// CHECK: ret diff --git a/clang/test/CIR/Lowering/switch-while.c b/clang/test/CIR/Lowering/switch-while.c new file mode 100644 index 000000000000..ed8d177323f1 --- /dev/null +++ b/clang/test/CIR/Lowering/switch-while.c @@ -0,0 +1,84 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s +void func100(); +int f(int a, int cond) { + int b = 1; + switch (a) + while (1) { + b++; + + default: + if (cond) + return a; + + a = a + b; + + case 2: + a++; + + case 3: + continue; + + case 5: + break; + + case 100: + func100(); + } + + return a; +} + +// CHECK: switch i32 %[[A:.+]], label %[[DEFAULT_BB:.+]] [ +// CHECK: i32 2, label %[[TWO_BB:.+]] +// CHECK: i32 3, label %[[THREE_BB:.+]] +// CHECK: i32 5, label %[[FIVE_BB:.+]] +// CHECK: i32 100, label %[[HUNDRED_BB:.+]] +// CHECK: ], +// +// CHECK: [[UNREACHABLE_BB:.+]]: {{.*}}; No predecessors! +// +// CHECK: [[LOOP_ENTRY:.+]]: +// CHECK: br label %[[LOOP_HEADER:.+]], +// +// CHECK: [[LOOP_HEADER]]: +// CHECK: add i32 %{{.*}}, 1 +// CHECK: br label %[[DEFAULT_BB:.+]], +// +// CHECK: [[DEFAULT_BB]]: +// CHECK: br label %[[IF_BB:.+]], +// +// CHECK: [[IF_BB]]: +// CHECK: %[[CMP:.+]] = icmp ne i32 %[[COND:.+]], 0 +// CHECK: br i1 %[[CMP]], label %[[IF_TRUE_BB:.+]], label %[[IF_FALSE_BB:.+]], +// +// CHECK: [[IF_TRUE_BB]]: +// CHECK: ret +// +// CHECK: [[IF_FALSE_BB]]: +// CHECK: %[[V1:.+]] = load i32 +// CHECK: %[[V2:.+]] = load i32 +// CHECK: add nsw i32 %[[V1]], %[[V2]] +// +// CHECK: [[TWO_BB]]: +// CHECK: add i32 %{{.*}}, 1 +// CHECK: br label %[[FALLTHOUGH_BB:.+]], +// +// CHECK: [[FALLTHOUGH_BB]]: +// CHECK: br label %[[LOOP_HEADER]], +// +// CHECK: [[FIVE_BB]]: +// CHECK: br label %[[LOOP_EXIT_BB:.+]], +// +// CHECK: [[HUNDRED_BB]]: +// CHECK: call {{.*}}@func100() +// CHECK: br label %[[CONTINUE_BB:.+]], +// +// CHECK: [[CONTINUE_BB]]: +// CHECK: br label %[[LOOP_HEADER]] +// +// CHECK: [[LOOP_EXIT_BB]]: +// CHECK: br label %[[RET_BB:.+]], +// +// CHECK: [[RET_BB]]: +// CHECK: ret diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir index dee8e98db858..81cc6efdc92d 100644 --- a/clang/test/CIR/Lowering/switch.cir +++ b/clang/test/CIR/Lowering/switch.cir @@ -7,101 +7,107 @@ module { cir.func @shouldLowerSwitchWithDefault(%arg0: !s8i) { - cir.switch (%arg0 : !s8i) [ + cir.switch (%arg0 : !s8i) { // CHECK: llvm.switch %arg0 : i8, ^bb[[#DEFAULT:]] [ // CHECK: 1: ^bb[[#CASE1:]] // CHECK: ] - case (equal, 1) { + cir.case (equal, [#cir.int<1> : !s8i]) { cir.break - }, + } // CHECK: ^bb[[#CASE1]]: // CHECK: llvm.br ^bb[[#EXIT:]] - case (default) { + cir.case (default, []) { cir.break } // CHECK: ^bb[[#DEFAULT]]: // CHECK: llvm.br ^bb[[#EXIT]] - ] + cir.yield + } // CHECK: ^bb[[#EXIT]]: cir.return } cir.func @shouldLowerSwitchWithoutDefault(%arg0: !s32i) { - cir.switch (%arg0 : !s32i) [ + cir.switch (%arg0 : !s32i) { // Default block is the exit block: // CHECK: llvm.switch %arg0 : i32, ^bb[[#EXIT:]] [ // CHECK: 1: ^bb[[#CASE1:]] // CHECK: ] - case (equal, 1) { + cir.case (equal, [#cir.int<1> : !s32i]) { cir.break } // CHECK: ^bb[[#CASE1]]: // CHECK: llvm.br ^bb[[#EXIT]] - ] + cir.yield + } // CHECK: ^bb[[#EXIT]]: cir.return } cir.func @shouldLowerSwitchWithImplicitFallthrough(%arg0: !s64i) { - cir.switch (%arg0 : !s64i) [ + cir.switch (%arg0 : !s64i) { // CHECK: llvm.switch %arg0 : i64, ^bb[[#EXIT:]] [ // CHECK: 1: ^bb[[#CASE1N2:]], // CHECK: 2: ^bb[[#CASE1N2]] // CHECK: ] - case (anyof, [1, 2] : !s64i) { // case 1 and 2 use same region + cir.case (anyof, [#cir.int<1> : !s64i, #cir.int<2> : !s64i]) { // case 1 and 2 use same region cir.break } // CHECK: ^bb[[#CASE1N2]]: // CHECK: llvm.br ^bb[[#EXIT]] - ] + cir.yield + } // CHECK: ^bb[[#EXIT]]: cir.return } cir.func @shouldLowerSwitchWithExplicitFallthrough(%arg0: !s64i) { - cir.switch (%arg0 : !s64i) [ + cir.switch (%arg0 : !s64i) { // CHECK: llvm.switch %arg0 : i64, ^bb[[#EXIT:]] [ // CHECK: 1: ^bb[[#CASE1:]], // CHECK: 2: ^bb[[#CASE2:]] // CHECK: ] - case (equal, 1 : !s64i) { // case 1 has its own region + cir.case (equal, [#cir.int<1> : !s64i]) { // case 1 has its own region cir.yield // fallthrough to case 2 - }, + } // CHECK: ^bb[[#CASE1]]: // CHECK: llvm.br ^bb[[#CASE2]] - case (equal, 2 : !s64i) { + cir.case (equal, [#cir.int<2> : !s64i]) { cir.break } // CHECK: ^bb[[#CASE2]]: // CHECK: llvm.br ^bb[[#EXIT]] - ] + cir.yield + } // CHECK: ^bb[[#EXIT]]: cir.return } cir.func @shouldLowerSwitchWithFallthroughToExit(%arg0: !s64i) { - cir.switch (%arg0 : !s64i) [ + cir.switch (%arg0 : !s64i) { // CHECK: llvm.switch %arg0 : i64, ^bb[[#EXIT:]] [ // CHECK: 1: ^bb[[#CASE1:]] // CHECK: ] - case (equal, 1 : !s64i) { + cir.case (equal, [#cir.int<1> : !s64i]) { cir.yield // fallthrough to exit } // CHECK: ^bb[[#CASE1]]: // CHECK: llvm.br ^bb[[#EXIT]] - ] + cir.yield + } // CHECK: ^bb[[#EXIT]]: cir.return } cir.func @shouldDropEmptySwitch(%arg0: !s64i) { - cir.switch (%arg0 : !s64i) [ - ] + cir.switch (%arg0 : !s64i) { + cir.yield + } // CHECK-NOT: llvm.switch cir.return } @@ -111,28 +117,27 @@ module { cir.store %arg0, %0 : !s32i, !cir.ptr cir.scope { %1 = cir.load %0 : !cir.ptr, !s32i - cir.switch (%1 : !s32i) [ - case (equal, 3) { + cir.switch (%1 : !s32i) { + cir.case (equal, [#cir.int<3> : !s32i]) { cir.return ^bb1: // no predecessors cir.break } - ] + cir.yield + } } cir.return } // CHECK: llvm.func @shouldLowerMultiBlockCase // CHECK: ^bb1: // pred: ^bb0 - // CHECK: llvm.switch {{.*}} : i32, ^bb4 [ - // CHECK: 3: ^bb2 + // CHECK: llvm.switch {{.*}} : i32, ^[[DEFAULT_BB:.+]] [ + // CHECK: 3: ^[[DIRECTLY_RET_BB:.+]] // CHECK: ] - // CHECK: ^bb2: // pred: ^bb1 + // CHECK: ^[[DIRECTLY_RET_BB]]: // CHECK: llvm.return - // CHECK: ^bb3: // no predecessors - // CHECK: llvm.br ^bb4 - // CHECK: ^bb4: // 2 preds: ^bb1, ^bb3 - // CHECK: llvm.br ^bb5 - // CHECK: ^bb5: // pred: ^bb4 + // CHECK: ^[[DEFAULT_BB:.+]]: + // CHECK: llvm.br ^[[RET_BB:.+]] + // CHECK: ^[[RET_BB:.+]]: // pred: ^[[DEFAULT_BB:.+]] // CHECK: llvm.return // CHECK: } @@ -144,8 +149,8 @@ module { cir.store %arg1, %1 : !s32i, !cir.ptr cir.scope { %5 = cir.load %0 : !cir.ptr, !s32i - cir.switch (%5 : !s32i) [ - case (equal, 0) { + cir.switch (%5 : !s32i) { + cir.case (equal, [#cir.int<0> : !s32i]) { cir.scope { %6 = cir.load %1 : !cir.ptr, !s32i %7 = cir.const #cir.int<0> : !s32i @@ -157,7 +162,8 @@ module { } cir.break } - ] + cir.yield + } } %3 = cir.const #cir.int<3> : !s32i cir.store %3, %2 : !s32i, !cir.ptr @@ -165,21 +171,21 @@ module { cir.return %4 : !s32i } // CHECK: llvm.func @shouldLowerNestedBreak - // CHECK: llvm.switch %6 : i32, ^bb7 [ - // CHECK: 0: ^bb2 + // CHECK: llvm.switch %6 : i32, ^[[DEFAULT_BB:.+]] [ + // CHECK: 0: ^[[ZERO_BB:.+]] // CHECK: ] - // CHECK: ^bb2: // pred: ^bb1 - // CHECK: llvm.br ^bb3 - // CHECK: ^bb3: // pred: ^bb2 - // CHECK: llvm.cond_br {{%.*}}, ^bb4, ^bb5 - // CHECK: ^bb4: // pred: ^bb3 - // CHECK: llvm.br ^bb7 - // CHECK: ^bb5: // pred: ^bb3 - // CHECK: llvm.br ^bb6 - // CHECK: ^bb6: // pred: ^bb5 - // CHECK: llvm.br ^bb7 - // CHECK: ^bb7: // 3 preds: ^bb1, ^bb4, ^bb6 - // CHECK: llvm.br ^bb8 - // CHECK: ^bb8: // pred: ^bb7 + // CHECK: ^[[ZERO_BB]]: + // CHECK: llvm.br ^[[ZERO_BB_SUCC:.+]] + // CHECK: ^[[ZERO_BB_SUCC]]: // pred: ^[[ZERO_BB:]] + // CHECK: llvm.cond_br {{%.*}}, ^[[DEFAULT_BB_PRED1:.+]], ^[[DEFAULT_BB_PRED12:.+]] + // CHECK: ^[[DEFAULT_BB_PRED1]]: // pred: ^[[ZERO_BB_SUCC]] + // CHECK: llvm.br ^[[DEFAULT_BB]] + // CHECK: ^[[DEFAULT_BB_PRED12]]: // pred: ^[[ZERO_BB_SUCC]] + // CHECK: llvm.br ^[[DEFAULT_BB_PRED1:.+]] + // CHECK: ^[[DEFAULT_BB_PRED1]]: // pred: ^[[DEFAULT_BB_PRED12]] + // CHECK: llvm.br ^[[DEFAULT_BB]] + // CHECK: ^[[DEFAULT_BB]]: + // CHECK: llvm.br ^[[RET_BB:.+]] + // CHECK: ^[[RET_BB]]: // pred: ^[[DEFAULT_BB]] // CHECK: llvm.return } diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index c9d927b7cae7..715c7525b94d 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -15,8 +15,8 @@ module { %3 = cir.const #cir.int<1> : !s32i cir.store %3, %2 : !s32i, !cir.ptr %4 = cir.load %0 : !cir.ptr, !s32i - cir.switch (%4 : !s32i) [ - case (equal, 0 : !s32i) { + cir.switch (%4 : !s32i) { + cir.case (equal, [#cir.int<0> : !s32i]) { %5 = cir.load %2 : !cir.ptr, !s32i %6 = cir.const #cir.int<1> : !s32i %7 = cir.binop(add, %5, %6) : !s32i @@ -24,8 +24,8 @@ module { cir.br ^bb1 ^bb1: // pred: ^bb0 cir.return - }, - case (equal, 1 : !s32i) { + } + cir.case (equal, [#cir.int<1> : !s32i]) { cir.scope { cir.scope { %5 = cir.load %1 : !cir.ptr, !s32i @@ -40,8 +40,8 @@ module { cir.break } cir.yield - }, - case (equal, 2 : !s32i) { + } + cir.case (equal, [#cir.int<2> : !s32i]) { cir.scope { %5 = cir.alloca !s32i, !cir.ptr, ["yolo", init] {alignment = 4 : i64} %6 = cir.load %2 : !cir.ptr, !s32i @@ -56,20 +56,21 @@ module { } cir.yield } - ] + cir.yield + } } cir.return } -// CHECK: cir.switch (%4 : !s32i) [ -// CHECK-NEXT: case (equal, 0) { +// CHECK: cir.switch (%4 : !s32i) { +// CHECK-NEXT: cir.case(equal, [#cir.int<0> : !s32i]) { // CHECK-NEXT: %5 = cir.load %2 : !cir.ptr, !s32i // CHECK-NEXT: %6 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %7 = cir.binop(add, %5, %6) : !s32i // CHECK-NEXT: cir.store %7, %2 : !s32i, !cir.ptr // CHECK-NEXT: cir.return -// CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 1) { +// CHECK-NEXT: } +// CHECK-NEXT: cir.case(equal, [#cir.int<1> : !s32i]) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %5 = cir.load %1 : !cir.ptr, !s32i @@ -82,8 +83,8 @@ module { // CHECK-NEXT: cir.break // CHECK-NEXT: } // CHECK-NEXT: cir.yield -// CHECK-NEXT: }, -// CHECK-NEXT: case (equal, 2) { +// CHECK-NEXT: } +// CHECK-NEXT: cir.case(equal, [#cir.int<2> : !s32i]) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %5 = cir.alloca !s32i, !cir.ptr, ["yolo", init] {alignment = 4 : i64} // CHECK-NEXT: %6 = cir.load %2 : !cir.ptr, !s32i @@ -96,7 +97,8 @@ module { // CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } -// CHECK-NEXT: ] +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // Should remove empty scopes. cir.func @removeEmptyScope() { @@ -110,8 +112,9 @@ module { // Should remove empty switch-case statements. cir.func @removeEmptySwitch(%arg0: !s32i) { // CHECK: cir.func @removeEmptySwitch - cir.switch (%arg0 : !s32i) [ - ] + cir.switch (%arg0 : !s32i) { + cir.yield + } // CHECK-NOT: cir.switch cir.return // CHECK: cir.return diff --git a/clang/test/CIR/Transforms/switch.cir b/clang/test/CIR/Transforms/switch.cir index 177dfc98c8af..f7cc8fb31196 100644 --- a/clang/test/CIR/Transforms/switch.cir +++ b/clang/test/CIR/Transforms/switch.cir @@ -6,14 +6,15 @@ module { cir.func @shouldFlatSwitchWithDefault(%arg0: !s8i) { - cir.switch (%arg0 : !s8i) [ - case (equal, 1) { + cir.switch (%arg0 : !s8i) { + cir.case (equal, [#cir.int<1> : !s8i]) { cir.break - }, - case (default) { + } + cir.case (default, []) { cir.break } - ] + cir.yield + } cir.return } // CHECK: cir.func @shouldFlatSwitchWithDefault(%arg0: !s8i) { @@ -21,19 +22,20 @@ module { // CHECK: 1: ^bb[[#CASE1:]] // CHECK: ] // CHECK: ^bb[[#CASE1]]: -// CHECK: cir.br ^bb3 -// CHECK: ^bb[[#DEFAULT]]: // CHECK: cir.br ^bb[[#EXIT:]] +// CHECK: ^bb[[#DEFAULT]]: +// CHECK: cir.br ^bb[[#EXIT]] // CHECK: ^bb[[#EXIT]]: // CHECK: cir.return // CHECK: } cir.func @shouldFlatSwitchWithoutDefault(%arg0: !s32i) { - cir.switch (%arg0 : !s32i) [ - case (equal, 1) { + cir.switch (%arg0 : !s32i) { + cir.case (equal, [#cir.int<1> : !s32i]) { cir.break } - ] + cir.yield + } cir.return } // CHECK: cir.func @shouldFlatSwitchWithoutDefault(%arg0: !s32i) { @@ -48,11 +50,12 @@ module { cir.func @shouldFlatSwitchWithImplicitFallthrough(%arg0: !s64i) { - cir.switch (%arg0 : !s64i) [ - case (anyof, [1, 2] : !s64i) { + cir.switch (%arg0 : !s64i) { + cir.case (anyof, [#cir.int<1> : !s64i, #cir.int<2> : !s64i]) { cir.break } - ] + cir.yield + } cir.return } // CHECK: cir.func @shouldFlatSwitchWithImplicitFallthrough(%arg0: !s64i) { @@ -69,14 +72,15 @@ module { cir.func @shouldFlatSwitchWithExplicitFallthrough(%arg0: !s64i) { - cir.switch (%arg0 : !s64i) [ - case (equal, 1 : !s64i) { // case 1 has its own region + cir.switch (%arg0 : !s64i) { + cir.case (equal, [#cir.int<1> : !s64i]) { // case 1 has its own region cir.yield // fallthrough to case 2 - }, - case (equal, 2 : !s64i) { + } + cir.case (equal, [#cir.int<2> : !s64i]) { cir.break } - ] + cir.yield + } cir.return } // CHECK: cir.func @shouldFlatSwitchWithExplicitFallthrough(%arg0: !s64i) { @@ -93,11 +97,12 @@ module { // CHECK: } cir.func @shouldFlatSwitchWithFallthroughToExit(%arg0: !s64i) { - cir.switch (%arg0 : !s64i) [ - case (equal, 1 : !s64i) { + cir.switch (%arg0 : !s64i) { + cir.case (equal, [#cir.int<1> : !s64i]) { cir.yield // fallthrough to exit } - ] + cir.yield + } cir.return } // CHECK: cir.func @shouldFlatSwitchWithFallthroughToExit(%arg0: !s64i) { @@ -111,8 +116,9 @@ module { // CHECK: } cir.func @shouldDropEmptySwitch(%arg0: !s64i) { - cir.switch (%arg0 : !s64i) [ - ] + cir.switch (%arg0 : !s64i) { + cir.yield + } // CHECK-NOT: llvm.switch cir.return } @@ -125,13 +131,14 @@ module { cir.store %arg0, %0 : !s32i, !cir.ptr cir.scope { %1 = cir.load %0 : !cir.ptr, !s32i - cir.switch (%1 : !s32i) [ - case (equal, 3) { + cir.switch (%1 : !s32i) { + cir.case (equal, [#cir.int<3> : !s32i]) { cir.return ^bb1: // no predecessors cir.break } - ] + cir.yield + } } cir.return } @@ -142,16 +149,14 @@ module { // CHECK: cir.br ^bb1 // CHECK: ^bb1: // pred: ^bb0 // CHECK: %1 = cir.load %0 : !cir.ptr, !s32i -// CHECK: cir.switch.flat %1 : !s32i, ^bb4 [ -// CHECK: 3: ^bb2 +// CHECK: cir.switch.flat %1 : !s32i, ^bb[[#DEFAULT:]] [ +// CHECK: 3: ^bb[[#BB1:]] // CHECK: ] -// CHECK: ^bb2: // pred: ^bb1 +// CHECK: ^bb[[#BB1]]: // CHECK: cir.return -// CHECK: ^bb3: // no predecessors -// CHECK: cir.br ^bb4 -// CHECK: ^bb4: // 2 preds: ^bb1, ^bb3 -// CHECK: cir.br ^bb5 -// CHECK: ^bb5: // pred: ^bb4 +// CHECK: ^bb[[#DEFAULT]]: +// CHECK: cir.br ^bb[[#RET_BB:]] +// CHECK: ^bb[[#RET_BB]]: // pred: ^bb[[#DEFAULT]] // CHECK: cir.return // CHECK: } @@ -164,8 +169,8 @@ module { cir.store %arg1, %1 : !s32i, !cir.ptr cir.scope { %5 = cir.load %0 : !cir.ptr, !s32i - cir.switch (%5 : !s32i) [ - case (equal, 0) { + cir.switch (%5 : !s32i) { + cir.case (equal, [#cir.int<0> : !s32i]) { cir.scope { %6 = cir.load %1 : !cir.ptr, !s32i %7 = cir.const #cir.int<0> : !s32i @@ -177,7 +182,8 @@ module { } cir.break } - ] + cir.yield + } } %3 = cir.const #cir.int<3> : !s32i cir.store %3, %2 : !s32i, !cir.ptr @@ -185,23 +191,23 @@ module { cir.return %4 : !s32i } // CHECK: cir.func @shouldFlatNestedBreak(%arg0: !s32i, %arg1: !s32i) -> !s32i { -// CHECK: cir.switch.flat %3 : !s32i, ^bb7 [ -// CHECK: 0: ^bb2 +// CHECK: cir.switch.flat %3 : !s32i, ^bb[[#DEFAULT_BB:]] [ +// CHECK: 0: ^bb[[#BB1:]] // CHECK: ] -// CHECK: ^bb2: // pred: ^bb1 -// CHECK: cir.br ^bb3 -// CHECK: ^bb3: // pred: ^bb2 -// CHECK: cir.brcond {{%.*}} ^bb4, ^bb5 -// CHECK: ^bb4: // pred: ^bb3 -// CHECK: cir.br ^bb7 -// CHECK: ^bb5: // pred: ^bb3 -// CHECK: cir.br ^bb6 -// CHECK: ^bb6: // pred: ^bb5 -// CHECK: cir.br ^bb7 -// CHECK: ^bb7: // 3 preds: ^bb1, ^bb4, ^bb6 -// CHECK: cir.br ^bb8 -// CHECK: ^bb8: // pred: ^bb7 -// CHECK: cir.return %9 : !s32i +// CHECK: ^bb[[#BB1]]: +// CHECK: cir.br ^bb[[#COND_BB:]] +// CHECK: ^bb[[#COND_BB]]: +// CHECK: cir.brcond {{%.*}} ^bb[[#TRUE_BB:]], ^bb[[#FALSE_BB:]] +// CHECK: ^bb[[#TRUE_BB]]: +// CHECK: cir.br ^bb[[#DEFAULT_BB]] +// CHECK: ^bb[[#FALSE_BB]]: +// CHECK: cir.br ^bb[[#PRED_BB:]] +// CHECK: ^bb[[#PRED_BB]]: +// CHECK: cir.br ^bb[[#DEFAULT_BB]] +// CHECK: ^bb[[#DEFAULT_BB]]: +// CHECK: cir.br ^bb[[#RET_BB:]] +// CHECK: ^bb[[#RET_BB]]: +// CHECK: cir.return // CHECK: } @@ -214,23 +220,24 @@ module { cir.store %3, %2 : !s32i, !cir.ptr cir.scope { %6 = cir.load %0 : !cir.ptr, !s32i - cir.switch (%6 : !s32i) [ - case (equal, -100) { + cir.switch (%6 : !s32i) { + cir.case (equal, [#cir.int<-100> : !s32i]) { %7 = cir.const #cir.int<1> : !s32i cir.store %7, %2 : !s32i, !cir.ptr cir.break - }, - case (range, [1, 100] : !s32i) { + } + cir.case (range, [#cir.int<1> : !s32i, #cir.int<100> : !s32i]) { %7 = cir.const #cir.int<2> : !s32i cir.store %7, %2 : !s32i, !cir.ptr cir.break - }, - case (default) { + } + cir.case (default, []) { %7 = cir.const #cir.int<3> : !s32i cir.store %7, %2 : !s32i, !cir.ptr cir.break } - ] + cir.yield + } } %4 = cir.load %2 : !cir.ptr, !s32i cir.store %4, %1 : !s32i, !cir.ptr @@ -241,6 +248,8 @@ module { // CHECK: cir.switch.flat %[[X:[0-9]+]] : !s32i, ^[[JUDGE_RANGE:bb[0-9]+]] [ // CHECK-NEXT: -100: ^[[CASE_EQUAL:bb[0-9]+]] // CHECK-NEXT: ] +// CHECK-NEXT: ^[[UNRACHABLE_BB:.+]]: // no predecessors +// CHECK-NEXT: cir.br ^[[CASE_EQUAL]] // CHECK-NEXT: ^[[CASE_EQUAL]]: // CHECK-NEXT: cir.int<1> // CHECK-NEXT: cir.store From 06a3039cafb2cb40826e11215e7010140a7a9fe6 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 29 Oct 2024 23:24:30 +0300 Subject: [PATCH 1987/2301] [CIR][ABI][AArch64][Lowering] Initial support for return of struct types (#1004) This PR adds a support for return values of a struct type. There are two cases that are not covered by this PR and will be added later. --- clang/include/clang/CIR/MissingFeatures.h | 1 + .../TargetLowering/Targets/AArch64.cpp | 34 +++++++++- .../AArch64/aarch64-cc-structs.c | 62 +++++++++++++++++++ 3 files changed, 96 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 6b82809a742a..db74ee63ee8e 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -230,6 +230,7 @@ struct MissingFeatures { static bool shouldInstrumentFunction() { return false; } static bool xray() { return false; } static bool buildConstrainedFPCall() { return false; } + static bool emitEmptyRecordCheck() { return false; } // Inline assembly static bool asmGoto() { return false; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index b986adb46ae9..586f4a3d22e1 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -5,7 +5,6 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// - #include "clang/CIR/Target/AArch64.h" #include "ABIInfoImpl.h" #include "LowerFunctionInfo.h" @@ -105,6 +104,39 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, : ABIArgInfo::getDirect()); } + uint64_t Size = getContext().getTypeSize(RetTy); + cir_cconv_assert(!::cir::MissingFeatures::emitEmptyRecordCheck()); + cir_cconv_assert( + !::cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); + + // Aggregates <= 16 bytes are returned directly in registers or on the stack. + if (Size <= 128) { + if (Size <= 64 && !getDataLayout().isBigEndian()) { + // Composite types are returned in lower bits of a 64-bit register for LE, + // and in higher bits for BE. However, integer types are always returned + // in lower bits for both LE and BE, and they are not rounded up to + // 64-bits. We can skip rounding up of composite types for LE, but not for + // BE, otherwise composite types will be indistinguishable from integer + // types. + return ABIArgInfo::getDirect( + mlir::cir::IntType::get(LT.getMLIRContext(), Size, false)); + } + + unsigned Alignment = getContext().getTypeAlign(RetTy); + Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes + + // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. + // For aggregates with 16-byte alignment, we use i128. + if (Alignment < 128 && Size == 128) { + mlir::Type baseTy = + mlir::cir::IntType::get(LT.getMLIRContext(), 64, false); + return ABIArgInfo::getDirect( + mlir::cir::ArrayType::get(LT.getMLIRContext(), baseTy, Size / 64)); + } + + cir_cconv_unreachable("NYI"); + } + cir_cconv_unreachable("NYI"); } diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c new file mode 100644 index 000000000000..884580305282 --- /dev/null +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -0,0 +1,62 @@ +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +#include + +typedef struct { + short a; +} LT_64; + +typedef struct { + int64_t a; +} EQ_64; + +typedef struct { + int64_t a; + int b; +} LT_128; + +typedef struct { + int64_t a; + int64_t b; +} EQ_128; + +// CHECK: cir.func {{.*@ret_lt_64}}() -> !u16i +// CHECK: %[[#V0:]] = cir.alloca !ty_LT_64_, !cir.ptr, ["__retval"] +// CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V2:]] = cir.load %[[#V1]] : !cir.ptr, !u16i +// CHECK: cir.return %[[#V2]] : !u16i +LT_64 ret_lt_64() { + LT_64 x; + return x; +} + +// CHECK: cir.func {{.*@ret_eq_64}}() -> !u64i +// CHECK: %[[#V0:]] = cir.alloca !ty_EQ_64_, !cir.ptr, ["__retval"] +// CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V2:]] = cir.load %[[#V1]] : !cir.ptr, !u64i +// CHECK: cir.return %[[#V2]] : !u64i +EQ_64 ret_eq_64() { + EQ_64 x; + return x; +} + +// CHECK: cir.func {{.*@ret_lt_128}}() -> !cir.array +// CHECK: %[[#V0:]] = cir.alloca !ty_LT_128_, !cir.ptr, ["__retval"] +// CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr> +// CHECK: %[[#V2:]] = cir.load %[[#V1]] : !cir.ptr>, !cir.array +// CHECK: cir.return %[[#V2]] : !cir.array +LT_128 ret_lt_128() { + LT_128 x; + return x; +} + +// CHECK: cir.func {{.*@ret_eq_128}}() -> !cir.array +// CHECK: %[[#V0:]] = cir.alloca !ty_EQ_128_, !cir.ptr, ["__retval"] +// CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr> +// CHECK: %[[#V2:]] = cir.load %[[#V1]] : !cir.ptr>, !cir.array +// CHECK: cir.return %[[#V2]] : !cir.array +EQ_128 ret_eq_128() { + EQ_128 x; + return x; +} From d4161857cdf8621b5915e7ff34c23ba90fa43829 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 29 Oct 2024 23:24:37 +0300 Subject: [PATCH 1988/2301] [CIR][Lowering] Supports varargs in the CallingConvention pass (#1005) This PR adds several copy-pasted lines and a small test and now var args seems to work in the calling convention pass --- .../Transforms/TargetLowering/LowerCall.cpp | 9 +++++++-- .../TargetLowering/LowerFunctionInfo.h | 18 ++++++++---------- .../test/CIR/CallConvLowering/x86_64/varargs.c | 17 +++++++++++++++++ 3 files changed, 32 insertions(+), 12 deletions(-) create mode 100644 clang/test/CIR/CallConvLowering/x86_64/varargs.c diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index c314b6f3977c..1f59e5094d18 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -47,8 +47,13 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, cir_cconv_assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; - return LT.arrangeLLVMFunctionInfo(fnType.getReturnType(), opts, - fnType.getInputs(), required); + + SmallVector argTypes; + for (const auto &a : args) + argTypes.push_back(a.getType()); + + return LT.arrangeLLVMFunctionInfo(fnType.getReturnType(), opts, argTypes, + required); } /// Adds the formal parameters in FPT to the given prefix. If any parameter in diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index 394bd2b62951..8da01bc23ada 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -49,12 +49,15 @@ class RequiredArgs { if (!prototype.isVarArg()) return All; - cir_cconv_assert_or_abort(!::cir::MissingFeatures::variadicFunctions(), - "NYI"); - return All; // FIXME(cir): Temporary workaround for the assertion above. + return RequiredArgs(prototype.getNumInputs() + additional); } bool allowsOptionalArgs() const { return NumRequired != ~0U; } + + unsigned getNumRequiredArgs() const { + assert(allowsOptionalArgs()); + return NumRequired; + } }; // Implementation detail of LowerFunctionInfo, factored out so it can be @@ -149,14 +152,9 @@ class LowerFunctionInfo final unsigned arg_size() const { return NumArgs; } - bool isVariadic() const { - cir_cconv_assert(!::cir::MissingFeatures::variadicFunctions()); - return false; - } + bool isVariadic() const { return Required.allowsOptionalArgs(); } unsigned getNumRequiredArgs() const { - if (isVariadic()) - cir_cconv_unreachable("NYI"); - return arg_size(); + return isVariadic() ? Required.getNumRequiredArgs() : arg_size(); } Type getReturnType() const { return getArgsBuffer()[0].type; } diff --git a/clang/test/CIR/CallConvLowering/x86_64/varargs.c b/clang/test/CIR/CallConvLowering/x86_64/varargs.c new file mode 100644 index 000000000000..fdc505aa6c5f --- /dev/null +++ b/clang/test/CIR/CallConvLowering/x86_64/varargs.c @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir + +int printf(const char *str, ...); + +// CHECK: cir.func {{.*@bar}} +// CHECK: %[[#V1:]] = cir.alloca !s32i, !cir.ptr, ["a", init] +// CHECK: %[[#V2:]] = cir.alloca !s32i, !cir.ptr, ["b", init] +// CHECK: cir.store %arg0, %[[#V0]] : !s32i, !cir.ptr +// CHECK: cir.store %arg1, %[[#V1]] : !s32i, !cir.ptr +// CHECK: %[[#V2:]] = cir.get_global @".str" : !cir.ptr> +// CHECK: %[[#V3:]] = cir.cast(array_to_ptrdecay, %[[#V2]] : !cir.ptr>), !cir.ptr +// CHECK: %[[#V4:]] = cir.load %[[#V1]] : !cir.ptr, !s32i +// CHECK: %[[#V5:]] = cir.load %[[#V2]] : !cir.ptr, !s32i +// CHECK: %[[#V6:]] = cir.call @printf(%[[#V3]], %[[#V4]], %[[#V5]]) : (!cir.ptr, !s32i, !s32i) -> !s32i +void bar(int a, int b) { + printf("%d %d\n", a, b); +} From 0847f5dfa85b59378b947a22558649fd26afc5d5 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 29 Oct 2024 21:54:55 -0400 Subject: [PATCH 1989/2301] [CIR][CodeGen][NFC] Add some missing guards for unreachable Reviewers: smeenai Reviewed By: smeenai Pull Request: https://github.com/llvm/clangir/pull/1022 --- clang/include/clang/CIR/MissingFeatures.h | 2 ++ clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 3 +++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 10 +++++++++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 ++ 4 files changed, 16 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index db74ee63ee8e..04f9cee7a2db 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -401,6 +401,8 @@ struct MissingFeatures { // This Itanium bit is currently being skipped in cir. static bool itaniumRecordLayoutBuilderFinishLayout() { return false; } + + static bool mustProgress() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 1f6692ef8163..886c8ace7ef3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -679,6 +679,9 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, else if (D.hasAttr()) llvm_unreachable("llvm.compiler.used metadata is NYI"); + if (CGM.getCodeGenOpts().KeepPersistentStorageVariables) + llvm_unreachable("NYI"); + // From traditional codegen: // We may have to cast the constant because of the initializer // mismatch above. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 44e3c31ba592..0f0f3de66441 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -699,8 +699,16 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, if (Body && isa_and_nonnull(Body)) llvm::append_range(FnArgs, FD->parameters()); + // Ensure that the function adheres to the forward progress guarantee, which + // is required by certain optimizations. + // In C++11 and up, the attribute will be removed if the body contains a + // trivial empty loop. + if (MissingFeatures::mustProgress()) + llvm_unreachable("NYI"); + // Generate the body of the function. // TODO: PGO.assignRegionCounters + assert(!MissingFeatures::shouldInstrumentFunction()); if (isa(FD)) buildDestructorBody(Args); else if (isa(FD)) @@ -751,7 +759,7 @@ mlir::Value CIRGenFunction::createLoad(const VarDecl *VD, const char *Name) { } void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { - // TODO: EmitAsanPrologueOrEpilogue(true); + assert(!MissingFeatures::emitAsanPrologueOrEpilogue()); const auto *Ctor = cast(CurGD.getDecl()); auto CtorType = CurGD.getCtorType(); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 872e5e9e05c5..5a8715ee2246 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2434,6 +2434,8 @@ mlir::cir::FuncOp CIRGenModule::createRuntimeFunction( if (AssumeConvergent) { llvm_unreachable("NYI"); } + if (Local) + llvm_unreachable("NYI"); auto entry = GetOrCreateCIRFunction(Name, Ty, GlobalDecl(), /*ForVtable=*/false); From c2bb0f34dc49ab86adb3c78157be4ebd96f1d764 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 30 Oct 2024 13:08:20 -0400 Subject: [PATCH 1990/2301] [CIR][CodeGen] Store the old CIRGenFunction when popping to a new one We diverge from CodeGen here by delaying the function emission that happens for a global variable. However, due to situations where a global can be emitted while building out a function the old CGF might not be invalid. So we need to store it here just in case. Reviewers: bcardosolopes, smeenai Reviewed By: smeenai Pull Request: https://github.com/llvm/clangir/pull/1023 --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 543ba8b7cfda..4d8174aaa3d1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -18,6 +18,7 @@ #include "clang/AST/GlobalDecl.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/SaveAndRestore.h" #include using namespace clang; @@ -337,7 +338,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, assert(varDecl && " Expected a global declaration!"); CIRGenFunction cgf{*this, builder, true}; - CurCGF = &cgf; + llvm::SaveAndRestore savedCGF(CurCGF, &cgf); CurCGF->CurFn = addr; CIRGenFunction::SourceLocRAIIObject fnLoc{cgf, @@ -421,6 +422,4 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, builder.create(addr->getLoc()); } } - - CurCGF = nullptr; } From 19f89dd98dd0a16df2709ffa16ae7d483fd418c7 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 30 Oct 2024 13:09:01 -0400 Subject: [PATCH 1991/2301] [CIR][CodeGen][NFC] Implement a missing function This was declared but never implemented. Upon first usage in a later commit this fails to link. Reviewers: bcardosolopes, smeenai Reviewed By: smeenai Pull Request: https://github.com/llvm/clangir/pull/1024 --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 90fbdc6277e0..f9fc641cee21 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -707,4 +707,15 @@ void EHScopeStack::pushTerminate() { char *Buffer = allocate(EHTerminateScope::getSize()); new (Buffer) EHTerminateScope(InnermostEHScope); InnermostEHScope = stable_begin(); -} \ No newline at end of file +} + +bool EHScopeStack::containsOnlyLifetimeMarkers( + EHScopeStack::stable_iterator old) const { + for (EHScopeStack::iterator it = begin(); stabilize(it) != old; it++) { + EHCleanupScope *cleanup = dyn_cast(&*it); + if (!cleanup || !cleanup->isLifetimeMarker()) + return false; + } + + return true; +} From 4ad28d29375e2a9d073f37d136b71f9b9e95f022 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 30 Oct 2024 21:14:36 +0300 Subject: [PATCH 1992/2301] [CIR][ABI][Lowering] Supports function pointers in the calling convention lowering pass (#1003) This PR adds initial function pointers support for the calling convention lowering pass. This is a suggestion, so any other ideas are welcome. Several ideas was described in the #995 and basically what I'm trying to do is to generate a clean CIR code without additional `bitcast` operations for function pointers and without mix of lowered and initial function types. Looks like we can not just lower the function type and cast the value since too many operations are involved. For instance, for the next simple code: ``` typedef struct { int a; } S; typedef int (*myfptr)(S); int foo(S s) { return 42 + s.a; } void bar() { myfptr a = foo; } ``` we get the next CIR for the function `bar` , before the calling convention lowering pass: ``` cir.func no_proto @bar() extra(#fn_attr) { %0 = cir.alloca !cir.ptr>, !cir.ptr>>, ["a", init] %1 = cir.get_global @foo : !cir.ptr> cir.store %1, %0 : !cir.ptr>, !cir.ptr>> cir.return } ``` As one can see, first three operations depend on the function type. Once `foo` is lowered, we need to fix `GetGlobalOp`: otherwise the code will fail with the verification error since actual `foo` type (lowered) differs from the one currently expected by the `GetGlobalOp`. First idea would just rewrite only the `GetGlobalOp` and insert a bitcast after, so both `AllocaOp` and `StoreOp` would work witth proper types. Once the code will be more complex, we will need to take care about possible use cases, e.g. if we use arrays, we will need to track array accesses to it as well in order to insert this bitcast every time the array element is needed. One workaround I can think of: we fix the `GetGlobalOp` type and cast from the lowered type to the initial, and cast back before the actual call happens - but it doesn't sound as a good and clean approach (from my point of view, of course). So I suggest to use type converter and rewrite any operation that may deal with function pointers and make sure it has a proper type, and we don't have any unlowered function type in the program after the calling convention lowering pass. I added lowering for `AllocaOp`, `GetGlobalOp`, and split the lowering for `FuncOp` (former `CallConvLoweringPattern`) and lower `CallOp` separately. Frankly speaking, I tried to implement a pattern for each operation, but for some reasons the tests are not passed for windows and macOs in this case - something weird happens inside `applyPatternsAndFold` function. I suspect it's due to two different rewriters used - one in the `LoweringModule` and one in the mentioned function. So I decided to follow the same approach as it's done for the `LoweringPrepare` pass and don't involve this complex rewriting framework. Next I will add a type converter for the struct type, patterns for `ConstantOp` (for const arrays and `GlobalViewAttr`) In the end of the day we'll have (at least I hope so) a clean CIR code without any bitcasts for function pointers. cc @sitio-couto @bcardosolopes --- .../Dialect/Transforms/CallConvLowering.cpp | 126 +++++++++++------- .../test/CIR/CallConvLowering/x86_64/fptrs.c | 18 +++ 2 files changed, 94 insertions(+), 50 deletions(-) create mode 100644 clang/test/CIR/CallConvLowering/x86_64/fptrs.c diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 9026d5135031..9cdd734ef2a5 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -5,12 +5,12 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// - #include "TargetLowering/LowerModule.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Pass/Pass.h" +#include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/MissingFeatures.h" @@ -23,50 +23,93 @@ namespace mlir { namespace cir { -//===----------------------------------------------------------------------===// -// Rewrite Patterns -//===----------------------------------------------------------------------===// - -struct CallConvLoweringPattern : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; +FuncType getFuncPointerTy(mlir::Type typ) { + if (auto ptr = dyn_cast(typ)) + return dyn_cast(ptr.getPointee()); + return {}; +} - LogicalResult matchAndRewrite(FuncOp op, - PatternRewriter &rewriter) const final { - llvm::TimeTraceScope scope("Call Conv Lowering Pass", op.getSymName().str()); +bool isFuncPointerTy(mlir::Type typ) { return (bool)getFuncPointerTy(typ); } - const auto module = op->getParentOfType(); +struct CallConvLowering { - auto modOp = op->getParentOfType(); - std::unique_ptr lowerModule = - createLowerModule(modOp, rewriter); + CallConvLowering(ModuleOp module) + : rewriter(module.getContext()), + lowerModule(createLowerModule(module, rewriter)) {} - // Rewrite function calls before definitions. This should be done before - // lowering the definition. + void lower(FuncOp op) { + // Fail the pass on unimplemented function users + const auto module = op->getParentOfType(); auto calls = op.getSymbolUses(module); if (calls.has_value()) { for (auto call : calls.value()) { - // FIXME(cir): Function pointers are ignored. - if (isa(call.getUser())) { + if (auto g = dyn_cast(call.getUser())) + rewriteGetGlobalOp(g); + else if (auto c = dyn_cast(call.getUser())) + lowerDirectCallOp(c, op); + else { cir_cconv_assert_or_abort(!::cir::MissingFeatures::ABIFuncPtr(), "NYI"); - continue; } - - auto callOp = dyn_cast_or_null(call.getUser()); - if (!callOp) - cir_cconv_unreachable("NYI empty callOp"); - if (lowerModule->rewriteFunctionCall(callOp, op).failed()) - return failure(); } } - // TODO(cir): Instead of re-emmiting every load and store, bitcast arguments - // and return values to their ABI-specific counterparts when possible. - if (lowerModule->rewriteFunctionDefinition(op).failed()) - return failure(); + op.walk([&](CallOp c) { + if (c.isIndirect()) + lowerIndirectCallOp(c); + }); - return success(); + lowerModule->rewriteFunctionDefinition(op); } + +private: + FuncType convert(FuncType t) { + auto &typs = lowerModule->getTypes(); + return typs.getFunctionType(typs.arrangeFreeFunctionType(t)); + } + + mlir::Type convert(mlir::Type t) { + if (auto fTy = getFuncPointerTy(t)) + return PointerType::get(rewriter.getContext(), convert(fTy)); + return t; + } + + void bitcast(Value src, Type newTy) { + if (src.getType() != newTy) { + auto cast = + rewriter.create(src.getLoc(), newTy, CastKind::bitcast, src); + rewriter.replaceAllUsesExcept(src, cast, cast); + } + } + + void rewriteGetGlobalOp(GetGlobalOp op) { + auto resTy = op.getResult().getType(); + if (isFuncPointerTy(resTy)) { + rewriter.setInsertionPoint(op); + auto newOp = rewriter.replaceOpWithNewOp(op, convert(resTy), + op.getName()); + rewriter.setInsertionPointAfter(newOp); + bitcast(newOp, resTy); + } + } + + void lowerDirectCallOp(CallOp op, FuncOp callee) { + lowerModule->rewriteFunctionCall(op, callee); + } + + void lowerIndirectCallOp(CallOp op) { + cir_cconv_assert(op.isIndirect()); + + rewriter.setInsertionPoint(op); + auto typ = op.getIndirectCall().getType(); + if (isFuncPointerTy(typ)) { + cir_cconv_unreachable("Indirect calls NYI"); + } + } + +private: + mlir::PatternRewriter rewriter; + std::unique_ptr lowerModule; }; //===----------------------------------------------------------------------===// @@ -81,27 +124,10 @@ struct CallConvLoweringPass StringRef getArgument() const override { return "cir-call-conv-lowering"; }; }; -void populateCallConvLoweringPassPatterns(RewritePatternSet &patterns) { - patterns.add(patterns.getContext()); -} - void CallConvLoweringPass::runOnOperation() { - - // Collect rewrite patterns. - RewritePatternSet patterns(&getContext()); - populateCallConvLoweringPassPatterns(patterns); - - // Collect operations to be considered by the pass. - SmallVector ops; - getOperation()->walk([&](FuncOp op) { ops.push_back(op); }); - - // Configure rewrite to ignore new ops created during the pass. - GreedyRewriteConfig config; - config.strictMode = GreedyRewriteStrictness::ExistingOps; - - // Apply patterns. - if (failed(applyOpPatternsGreedily(ops, std::move(patterns), config))) - signalPassFailure(); + auto module = dyn_cast(getOperation()); + CallConvLowering cc(module); + module.walk([&](FuncOp op) { cc.lower(op); }); } } // namespace cir diff --git a/clang/test/CIR/CallConvLowering/x86_64/fptrs.c b/clang/test/CIR/CallConvLowering/x86_64/fptrs.c new file mode 100644 index 000000000000..47111165d049 --- /dev/null +++ b/clang/test/CIR/CallConvLowering/x86_64/fptrs.c @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -fclangir-call-conv-lowering %s -o - | FileCheck %s + +typedef struct { + int a; +} S; + +typedef int (*myfptr)(S); + +int foo(S s) { return 42 + s.a; } + +// CHECK: cir.func {{.*@bar}} +// CHECK: %[[#V0:]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["a", init] +// CHECK: %[[#V1:]] = cir.get_global @foo : !cir.ptr> +// CHECK: %[[#V2:]] = cir.cast(bitcast, %[[#V1]] : !cir.ptr>), !cir.ptr> +// CHECK: cir.store %[[#V2]], %[[#V0]] : !cir.ptr>, !cir.ptr>> +void bar() { + myfptr a = foo; +} From 04c1dca022e91d321c61af9f09f4885578b0d502 Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 30 Oct 2024 14:27:59 -0400 Subject: [PATCH 1993/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vqmovun_v (#1012) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 23 +++++++++-- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 41 +++++++++++++++++++ 2 files changed, 60 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index ec75534097cd..a3244a0e5d71 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2215,6 +2215,13 @@ buildCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, llvm::SmallVectorImpl &ops, mlir::Type funcResTy, const clang::CallExpr *e) { CIRGenBuilderTy &builder = cgf.getBuilder(); + if (argTypes.empty()) { + // The most common arg types is {funcResTy, funcResTy} for neon intrinsic + // functions. Thus, it is as default so call site does not need to + // provide it. Every neon intrinsic function has at least one argument, + // Thus empty argTypes really just means {funcResTy, funcResTy}. + argTypes = {funcResTy, funcResTy}; + } mlir::Value res = buildNeonCall(builder, std::move(argTypes), ops, intrincsName, funcResTy, cgf.getLoc(e->getExprLoc())); @@ -2357,6 +2364,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( // This second switch is for the intrinsics that might have a more generic // codegen solution so we can use the common codegen in future. llvm::StringRef intrincsName; + llvm::SmallVector argTypes; switch (builtinID) { default: llvm::errs() << getAArch64SIMDIntrinsicString(builtinID) << " "; @@ -2388,11 +2396,18 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( : "llvm.aarch64.neon.srhadd"; break; } + case NEON::BI__builtin_neon_vqmovun_v: { + intrincsName = "llvm.aarch64.neon.sqxtun"; + argTypes.push_back(builder.getExtendedOrTruncatedElementVectorType( + vTy, true /* extended */, true /* signed */)); + break; } - if (!intrincsName.empty()) - return buildCommonNeonCallPattern0(*this, intrincsName, {vTy, vTy}, ops, - vTy, e); - return nullptr; + } + + if (intrincsName.empty()) + return nullptr; + return buildCommonNeonCallPattern0(*this, intrincsName, argTypes, ops, vTy, + e); } mlir::Value diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index 4675aee2bc27..b1c22af112c5 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -451,3 +451,44 @@ uint32x4x2_t test_vtrnq_u32(uint32x4_t a, uint32x4_t b) { // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> // LLVM: ret %struct.uint32x4x2_t {{.*}} } + +uint8x8_t test_vqmovun_s16(int16x8_t a) { + return vqmovun_s16(a); + + // CIR-LABEL: vqmovun_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqxtun" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqmovun_s16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[VQMOVUN_V1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> [[A]]) + // LLVM: ret <8 x i8> [[VQMOVUN_V1_I]] +} + +uint16x4_t test_vqmovun_s32(int32x4_t a) { + return vqmovun_s32(a); + + // CIR-LABEL: vqmovun_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqxtun" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqmovun_s32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[VQMOVUN_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> [[A]]) + // LLVM: [[VQMOVUN_V2_I:%.*]] = bitcast <4 x i16> [[VQMOVUN_V1_I]] to <8 x i8> + // LLVM: ret <4 x i16> [[VQMOVUN_V1_I]] +} + +uint32x2_t test_vqmovun_s64(int64x2_t a) { + return vqmovun_s64(a); + + // CIR-LABEL: vqmovun_s64 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqxtun" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqmovun_s64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VQMOVUN_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> [[A]]) + // LLVM: [[VQMOVUN_V2_I:%.*]] = bitcast <2 x i32> [[VQMOVUN_V1_I]] to <8 x i8> + // LLVM: ret <2 x i32> [[VQMOVUN_V1_I]] +} From f54bc2ab355a45d15c98ff0413849e09d8260b50 Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 30 Oct 2024 14:39:01 -0400 Subject: [PATCH 1994/2301] [CIR][CIRGen] Enable comdat for static variables (#1015) --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 2 +- clang/test/CIR/CodeGen/static-vars.cpp | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 886c8ace7ef3..5646d028ad86 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -492,7 +492,7 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, GV.setAlignment(getASTContext().getDeclAlign(&D).getAsAlign().value()); if (supportsCOMDAT() && GV.isWeakForLinker()) - llvm_unreachable("COMDAT globals are NYI"); + GV.setComdat(true); if (D.getTLSKind()) llvm_unreachable("TLS mode is NYI"); diff --git a/clang/test/CIR/CodeGen/static-vars.cpp b/clang/test/CIR/CodeGen/static-vars.cpp index c1c65bea0748..e0c405521e5d 100644 --- a/clang/test/CIR/CodeGen/static-vars.cpp +++ b/clang/test/CIR/CodeGen/static-vars.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t1.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t1.ll %s void func1(void) { // Should lower default-initialized static vars. @@ -35,3 +37,13 @@ void func2(void) { static float j; // CHECK-DAG: cir.global "private" internal dsolocal @_ZZ5func2vE1j = #cir.fp<0.000000e+00> : !cir.float } + +// CHECK-DAG: cir.global linkonce_odr comdat @_ZZ4testvE1c = #cir.int<0> : !s32i + +// LLVM-DAG: $_ZZ4testvE1c = comdat any +// LLVM-DAG: @_ZZ4testvE1c = linkonce_odr global i32 0, comdat, align 4 + +inline void test() { static int c; } +// CHECK-LABEL: @_Z4testv +// CHECK: {{%.*}} = cir.get_global @_ZZ4testvE1c : !cir.ptr +void foo() { test(); } From fc29c087ed61f87ecd24d649f7b729acd96bac77 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Wed, 30 Oct 2024 11:40:09 -0700 Subject: [PATCH 1995/2301] [CIR][CIRGen] Fix "definition with same mangled name" error (#1016) We had some incorrect logic when creating functions and getting their address which resulted in spurious "definition with the same mangled name" errors. Fix that logic to match original CodeGen, which also fixes these errors. It's expected that decls can appear in the deferred decl list multiple times, and CodeGen has to guard against that. In the case that triggered the error, both `CIRGenerator::HandleInlineFunctionDefinition` and CIRGenModule were deferring the declaration. Something else I discovered here is that we emit these functions in the opposite order as regular CodeGen: https://godbolt.org/z/4PrKG7h9b. That might be a meaningful difference worth investigating further. Fixes https://github.com/llvm/clangir/issues/991 --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 26 +++++++++----------- clang/test/CIR/CodeGen/same-mangled-name.cpp | 15 +++++++++++ 2 files changed, 27 insertions(+), 14 deletions(-) create mode 100644 clang/test/CIR/CodeGen/same-mangled-name.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 5a8715ee2246..cde22e4d7c9b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -607,20 +607,17 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, auto Ty = getTypes().GetFunctionType(FI); // Get or create the prototype for the function. - // if (!V || (V.getValueType() != Ty)) - // TODO(cir): Figure out what to do here? llvm uses a GlobalValue for the - // FuncOp in mlir - Op = GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/true, - ForDefinition); - - auto globalVal = dyn_cast_or_null(Op); - if (globalVal && !globalVal.isDeclaration()) { - // Already emitted. + auto Fn = dyn_cast_if_present(Op); + if (!Fn || Fn.getFunctionType() != Ty) + Fn = GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/true, + ForDefinition); + + // Already emitted. + if (!Fn.isDeclaration()) return; - } - auto Fn = cast(Op); + setFunctionLinkage(GD, Fn); - setGVProperties(Op, D); + setGVProperties(Fn, D); // TODO(cir): MaubeHandleStaticInExternC // TODO(cir): maybeSetTrivialComdat // TODO(cir): setLLVMFunctionFEnvAttributes @@ -633,7 +630,7 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, } CurCGF = nullptr; - setNonAliasAttributes(GD, Op); + setNonAliasAttributes(GD, Fn); setCIRFunctionAttributesForDefinition(D, Fn); if (const ConstructorAttr *CA = D->getAttr()) @@ -2672,7 +2669,8 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // CHeck that GD is not yet in DiagnosedConflictingDefinitions is required // to make sure that we issue and error only once. if (lookupRepresentativeDecl(MangledName, OtherGD) && - (GD.getCanonicalDecl().getDecl()) && + (GD.getCanonicalDecl().getDecl() != + OtherGD.getCanonicalDecl().getDecl()) && DiagnosedConflictingDefinitions.insert(GD).second) { getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name) << MangledName; diff --git a/clang/test/CIR/CodeGen/same-mangled-name.cpp b/clang/test/CIR/CodeGen/same-mangled-name.cpp new file mode 100644 index 000000000000..fe6255cd3640 --- /dev/null +++ b/clang/test/CIR/CodeGen/same-mangled-name.cpp @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +// This would previously emit a "definition with same mangled name as another +// definition" error: https://github.com/llvm/clangir/issues/991. +namespace N { +struct S { + // CHECK: cir.func linkonce_odr @_ZN1N1S3fooEv({{.*}} { + void foo() {} +}; + +// CHECK: cir.func @_ZN1N1fEv() {{.*}} { +// CHECK: cir.call @_ZN1N1S3fooEv( +void f() { S().foo(); } +} // namespace N From 995d78bd15a6fa2b91043ddf79dd17bda49505dc Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 30 Oct 2024 14:56:27 -0400 Subject: [PATCH 1996/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vrshr_n and vrshrq_n to llvm intrinsics (#1020) In this PR, also changed `buildNeonShiftVector` to allow it generates negative shift values. When the shift value is negative, the shift amount vector is not used in any ShiftOp of IR (as they don't need sign to know shift direction), instead, it is just input argument to shift intrinsic function call. --- clang/include/clang/CIR/MissingFeatures.h | 3 - .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 88 +++-- clang/test/CIR/CodeGen/AArch64/neon.c | 337 ++++++++++++------ 3 files changed, 286 insertions(+), 142 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 04f9cee7a2db..91c87dcc04a3 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -172,9 +172,6 @@ struct MissingFeatures { static bool volatileTypes() { return false; } static bool syncScopeID() { return false; } - // AArch64 Neon builtin related. - static bool buildNeonShiftVector() { return false; } - // ABIInfo queries. static bool useTargetLoweringABIInfo() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index a3244a0e5d71..df061eed5b79 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2158,6 +2158,43 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, } } +/// Given a vector of unsigned int type `vecTy`, return a vector type of +/// signed int type with the same element type width and vector size. +static mlir::cir::VectorType getSignedVectorType(CIRGenBuilderTy &builder, + mlir::cir::VectorType vecTy) { + auto elemTy = mlir::cast(vecTy.getEltType()); + elemTy = builder.getSIntNTy(elemTy.getWidth()); + return mlir::cir::VectorType::get(builder.getContext(), elemTy, + vecTy.getSize()); +} + +/// Get integer from a mlir::Value that is an int constant or a constant op. +static int64_t getIntValueFromConstOp(mlir::Value val) { + auto constOp = mlir::cast(val.getDefiningOp()); + return (mlir::cast(constOp.getValue())) + .getValue() + .getSExtValue(); +} + +/// Build a constant shift amount vector of `vecTy` to shift a vector +/// Here `shitfVal` is a constant integer that will be splated into a +/// a const vector of `vecTy` which is the return of this function +static mlir::Value buildNeonShiftVector(CIRGenBuilderTy &builder, + mlir::Value shiftVal, + mlir::cir::VectorType vecTy, + mlir::Location loc, bool neg) { + int shiftAmt = getIntValueFromConstOp(shiftVal); + if (neg) + shiftAmt = -shiftAmt; + llvm::SmallVector vecAttr{ + vecTy.getSize(), + // ConstVectorAttr requires cir::IntAttr + mlir::cir::IntAttr::get(vecTy.getEltType(), shiftAmt)}; + mlir::cir::ConstVectorAttr constVecAttr = mlir::cir::ConstVectorAttr::get( + vecTy, mlir::ArrayAttr::get(builder.getContext(), vecAttr)); + return builder.create(loc, vecTy, constVecAttr); +} + mlir::Value buildNeonCall(CIRGenBuilderTy &builder, llvm::SmallVector argTypes, llvm::SmallVectorImpl &args, @@ -2170,17 +2207,15 @@ mlir::Value buildNeonCall(CIRGenBuilderTy &builder, assert(!MissingFeatures::buildConstrainedFPCall()); if (isConstrainedFPIntrinsic) llvm_unreachable("isConstrainedFPIntrinsic NYI"); - // TODO: Remove the following unreachable and call it in the loop once - // there is an implementation of buildNeonShiftVector - if (shift > 0) - llvm_unreachable("Argument shift NYI"); for (unsigned j = 0; j < argTypes.size(); ++j) { if (isConstrainedFPIntrinsic) { assert(!MissingFeatures::buildConstrainedFPCall()); } if (shift > 0 && shift == j) { - assert(!MissingFeatures::buildNeonShiftVector()); + args[j] = buildNeonShiftVector( + builder, args[j], mlir::cast(argTypes[j]), loc, + rightshift); } else { args[j] = builder.createBitcast(args[j], argTypes[j]); } @@ -2188,20 +2223,11 @@ mlir::Value buildNeonCall(CIRGenBuilderTy &builder, if (isConstrainedFPIntrinsic) { assert(!MissingFeatures::buildConstrainedFPCall()); return nullptr; - } else { - return builder - .create( - loc, builder.getStringAttr(intrinsicName), funcResTy, args) - .getResult(); } -} - -/// Get integer from a mlir::Value that is an int constant or a constant op. -static int64_t getIntValueFromConstOp(mlir::Value val) { - auto constOp = mlir::cast(val.getDefiningOp()); - return (mlir::cast(constOp.getValue())) - .getValue() - .getSExtValue(); + return builder + .create( + loc, builder.getStringAttr(intrinsicName), funcResTy, args) + .getResult(); } /// This function `buildCommonNeonCallPattern0` implements a common way @@ -2229,23 +2255,6 @@ buildCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, return builder.createBitcast(res, resultType); } -/// Build a constant shift amount vector of `vecTy` to shift a vector -/// Here `shitfVal` is a constant integer that will be splated into a -/// a const vector of `vecTy` which is the return of this function -static mlir::Value buildNeonShiftVector(CIRGenBuilderTy &builder, - mlir::Value shiftVal, - mlir::cir::VectorType vecTy, - mlir::Location loc, bool neg) { - int shiftAmt = getIntValueFromConstOp(shiftVal); - llvm::SmallVector vecAttr{ - vecTy.getSize(), - // ConstVectorAttr requires cir::IntAttr - mlir::cir::IntAttr::get(vecTy.getEltType(), shiftAmt)}; - mlir::cir::ConstVectorAttr constVecAttr = mlir::cir::ConstVectorAttr::get( - vecTy, mlir::ArrayAttr::get(builder.getContext(), vecAttr)); - return builder.create(loc, vecTy, constVecAttr); -} - /// Build ShiftOp of vector type whose shift amount is a vector built /// from a constant integer using `buildNeonShiftVector` function static mlir::Value buildCommonNeonShift(CIRGenBuilderTy &builder, @@ -2343,6 +2352,15 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( : "llvm.aarch64.neon.sqrdmulh.lane", resTy, getLoc(e->getExprLoc())); } + case NEON::BI__builtin_neon_vrshr_n_v: + case NEON::BI__builtin_neon_vrshrq_n_v: { + return buildNeonCall( + builder, {vTy, isUnsigned ? getSignedVectorType(builder, vTy) : vTy}, + ops, isUnsigned ? "llvm.aarch64.neon.urshl" : "llvm.aarch64.neon.srshl", + vTy, getLoc(e->getExprLoc()), false, /* not fp constrained op*/ + 1, /* second arg is shift amount */ + true /* rightshift */); + } case NEON::BI__builtin_neon_vshl_n_v: case NEON::BI__builtin_neon_vshlq_n_v: { mlir::Location loc = getLoc(e->getExprLoc()); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 7a9732b09690..aeb2bc808f27 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -5288,123 +5288,252 @@ uint64x2_t test_vshlq_n_u64(uint64x2_t a) { // return vsraq_n_u64(a, b, 3); // } -// NYI-LABEL: @test_vrshr_n_s8( -// NYI: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %a, <8 x i8> ) -// NYI: ret <8 x i8> [[VRSHR_N]] -// int8x8_t test_vrshr_n_s8(int8x8_t a) { -// return vrshr_n_s8(a, 3); -// } +int8x8_t test_vrshr_n_s8(int8x8_t a) { + return vrshr_n_s8(a, 3); + + // CIR-LABEL: vrshr_n_s8 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s8i, #cir.int<-3> : !s8i, + // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, + // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrshr_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> ) -// NYI: ret <4 x i16> [[VRSHR_N1]] -// int16x4_t test_vrshr_n_s16(int16x4_t a) { -// return vrshr_n_s16(a, 3); -// } + // LLVM: {{.*}}@test_vrshr_n_s8(<8 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> [[A]], <8 x i8> splat (i8 -3)) + // LLVM: ret <8 x i8> [[VRSHR_N]] +} -// NYI-LABEL: @test_vrshr_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> ) -// NYI: ret <2 x i32> [[VRSHR_N1]] -// int32x2_t test_vrshr_n_s32(int32x2_t a) { -// return vrshr_n_s32(a, 3); -// } +uint8x8_t test_vrshr_n_u8(uint8x8_t a) { + return vrshr_n_u8(a, 3); -// NYI-LABEL: @test_vrshrq_n_s8( -// NYI: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %a, <16 x i8> ) -// NYI: ret <16 x i8> [[VRSHR_N]] -// int8x16_t test_vrshrq_n_s8(int8x16_t a) { -// return vrshrq_n_s8(a, 3); -// } + // CIR-LABEL: vrshr_n_u8 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s8i, #cir.int<-3> : !s8i, + // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, + // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrshrq_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> ) -// NYI: ret <8 x i16> [[VRSHR_N1]] -// int16x8_t test_vrshrq_n_s16(int16x8_t a) { -// return vrshrq_n_s16(a, 3); -// } + // LLVM: {{.*}}@test_vrshr_n_u8(<8 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> [[A]], <8 x i8> splat (i8 -3)) + // LLVM: ret <8 x i8> [[VRSHR_N]] +} -// NYI-LABEL: @test_vrshrq_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> ) -// NYI: ret <4 x i32> [[VRSHR_N1]] -// int32x4_t test_vrshrq_n_s32(int32x4_t a) { -// return vrshrq_n_s32(a, 3); -// } +int16x4_t test_vrshr_n_s16(int16x4_t a) { + return vrshr_n_s16(a, 3); -// NYI-LABEL: @test_vrshrq_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> ) -// NYI: ret <2 x i64> [[VRSHR_N1]] -// int64x2_t test_vrshrq_n_s64(int64x2_t a) { -// return vrshrq_n_s64(a, 3); -// } + // CIR-LABEL: vrshr_n_s16 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, + // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrshr_n_u8( -// NYI: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %a, <8 x i8> ) -// NYI: ret <8 x i8> [[VRSHR_N]] -// uint8x8_t test_vrshr_n_u8(uint8x8_t a) { -// return vrshr_n_u8(a, 3); -// } + // LLVM: {{.*}}@test_vrshr_n_s16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> splat (i16 -3)) + // LLVM: ret <4 x i16> [[VRSHR_N1]] +} -// NYI-LABEL: @test_vrshr_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> ) -// NYI: ret <4 x i16> [[VRSHR_N1]] -// uint16x4_t test_vrshr_n_u16(uint16x4_t a) { -// return vrshr_n_u16(a, 3); -// } +uint16x4_t test_vrshr_n_u16(uint16x4_t a) { + return vrshr_n_u16(a, 3); -// NYI-LABEL: @test_vrshr_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> ) -// NYI: ret <2 x i32> [[VRSHR_N1]] -// uint32x2_t test_vrshr_n_u32(uint32x2_t a) { -// return vrshr_n_u32(a, 3); -// } + // CIR-LABEL: vrshr_n_u16 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, + // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrshrq_n_u8( -// NYI: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %a, <16 x i8> ) -// NYI: ret <16 x i8> [[VRSHR_N]] -// uint8x16_t test_vrshrq_n_u8(uint8x16_t a) { -// return vrshrq_n_u8(a, 3); -// } + // LLVM: {{.*}}@test_vrshr_n_u16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> splat (i16 -3)) + // LLVM: ret <4 x i16> [[VRSHR_N1]] +} -// NYI-LABEL: @test_vrshrq_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> ) -// NYI: ret <8 x i16> [[VRSHR_N1]] -// uint16x8_t test_vrshrq_n_u16(uint16x8_t a) { -// return vrshrq_n_u16(a, 3); -// } +int32x2_t test_vrshr_n_s32(int32x2_t a) { + return vrshr_n_s32(a, 3); -// NYI-LABEL: @test_vrshrq_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> ) -// NYI: ret <4 x i32> [[VRSHR_N1]] -// uint32x4_t test_vrshrq_n_u32(uint32x4_t a) { -// return vrshrq_n_u32(a, 3); -// } + // CIR-LABEL: vrshr_n_s32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrshrq_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> ) -// NYI: ret <2 x i64> [[VRSHR_N1]] -// uint64x2_t test_vrshrq_n_u64(uint64x2_t a) { -// return vrshrq_n_u64(a, 3); -// } + // LLVM: {{.*}}@test_vrshr_n_s32(<2 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> splat (i32 -3)) + // LLVM: ret <2 x i32> [[VRSHR_N1]] +} + +uint32x2_t test_vrshr_n_u32(uint32x2_t a) { + return vrshr_n_u32(a, 3); + + // CIR-LABEL: vrshr_n_u32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrshr_n_u32(<2 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> splat (i32 -3)) + // LLVM: ret <2 x i32> [[VRSHR_N1]] +} + +int64x1_t test_vrshr_n_s64(int64x1_t a) { + return vrshr_n_s64(a, 3); + + // CIR-LABEL: vrshr_n_s64 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrshr_n_s64(<1 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> splat (i64 -3)) + // LLVM: ret <1 x i64> [[VRSHR_N1]] +} + +uint64x1_t test_vrshr_n_u64(uint64x1_t a) { + return vrshr_n_u64(a, 3); + + // CIR-LABEL: vrshr_n_u64 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrshr_n_u64(<1 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> splat (i64 -3)) + // LLVM: ret <1 x i64> [[VRSHR_N1]] +} + +int8x16_t test_vrshrq_n_s8(int8x16_t a) { + return vrshrq_n_s8(a, 3); + + // CIR-LABEL: vrshrq_n_s8 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, + // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, + // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, + // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrshrq_n_s8(<16 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> [[A]], <16 x i8> splat (i8 -3)) + // LLVM: ret <16 x i8> [[VRSHR_N]] +} + +uint8x16_t test_vrshrq_n_u8(uint8x16_t a) { + return vrshrq_n_u8(a, 3); + + // CIR-LABEL: vrshrq_n_u8 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, + // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, + // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, + // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrshrq_n_u8(<16 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> [[A]], <16 x i8> splat (i8 -3)) + // LLVM: ret <16 x i8> [[VRSHR_N]] +} + +int16x8_t test_vrshrq_n_s16(int16x8_t a) { + return vrshrq_n_s16(a, 3); + + // CIR-LABEL: vrshrq_n_s16 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, + // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrshrq_n_s16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> splat (i16 -3)) + // LLVM: ret <8 x i16> [[VRSHR_N1]] +} + +uint16x8_t test_vrshrq_n_u16(uint16x8_t a) { + return vrshrq_n_u16(a, 3); + + // CIR-LABEL: vrshrq_n_u16 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, + // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrshrq_n_u16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> splat (i16 -3)) + // LLVM: ret <8 x i16> [[VRSHR_N1]] +} + +int32x4_t test_vrshrq_n_s32(int32x4_t a) { + return vrshrq_n_s32(a, 3); + + // CIR-LABEL: vrshrq_n_s32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i, #cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrshrq_n_s32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> splat (i32 -3)) + // LLVM: ret <4 x i32> [[VRSHR_N1]] +} + +uint32x4_t test_vrshrq_n_u32(uint32x4_t a) { + return vrshrq_n_u32(a, 3); + + // CIR-LABEL: vrshrq_n_u32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i, + // CIR-SAME: #cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrshrq_n_u32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> splat (i32 -3)) + // LLVM: ret <4 x i32> [[VRSHR_N1]] +} + +int64x2_t test_vrshrq_n_s64(int64x2_t a) { + return vrshrq_n_s64(a, 3); + + // CIR-LABEL: vrshrq_n_s64 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i, #cir.int<-3> : !s64i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrshrq_n_s64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> splat (i64 -3)) + // LLVM: ret <2 x i64> [[VRSHR_N1]] +} + +uint64x2_t test_vrshrq_n_u64(uint64x2_t a) { + return vrshrq_n_u64(a, 3); + + // CIR-LABEL: vrshrq_n_u64 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i, #cir.int<-3> : !s64i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vrshrq_n_u64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> splat (i64 -3)) + // LLVM: ret <2 x i64> [[VRSHR_N1]] +} // NYI-LABEL: @test_vrsra_n_s8( // NYI: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %b, <8 x i8> ) From 8565855d595715a1d7cb4474e4b652d8eb8dd1da Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 31 Oct 2024 03:34:40 +0800 Subject: [PATCH 1997/2301] [CIR][Dialect][NFC] Fix double whitespaces in `cir.func` assembly (#1028) This PR fixes the notorious double whitespaces introduced by visibility attribute, for `cir.func` only. It uses "leading whitespace" for every print. And the printing of visibility attr is properly guarded by a check of `!isDefault()`. Double whitespaces in test files are removed. --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 31 ++++++++----------- .../CodeGen/attribute-annotate-multiple.cpp | 4 +-- clang/test/CIR/CodeGen/goto.cpp | 6 ++-- clang/test/CIR/CodeGen/temporaries.cpp | 2 +- clang/test/CIR/CodeGen/vtt.cpp | 2 +- clang/test/CIR/Lowering/loops-with-break.cir | 2 +- clang/test/CIR/Transforms/ternary-fold.cpp | 2 +- 7 files changed, 22 insertions(+), 27 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d7eb3b51bc57..20592b495dcb 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2416,44 +2416,39 @@ ::mlir::Region *mlir::cir::FuncOp::getCallableRegion() { } void mlir::cir::FuncOp::print(OpAsmPrinter &p) { - p << ' '; - // When adding a specific keyword here, do not forget to omit it in // printFunctionAttributes below or there will be a syntax error when // parsing if (getBuiltin()) - p << "builtin "; + p << " builtin"; if (getCoroutine()) - p << "coroutine "; + p << " coroutine"; if (getLambda()) - p << "lambda "; + p << " lambda"; if (getNoProto()) - p << "no_proto "; + p << " no_proto"; if (getComdat()) - p << "comdat "; + p << " comdat"; if (getLinkage() != GlobalLinkageKind::ExternalLinkage) - p << stringifyGlobalLinkageKind(getLinkage()) << ' '; + p << ' ' << stringifyGlobalLinkageKind(getLinkage()); auto vis = getVisibility(); if (vis != mlir::SymbolTable::Visibility::Public) - p << vis << " "; + p << ' ' << vis; auto cirVisibilityAttr = getGlobalVisibilityAttr(); - printVisibilityAttr(p, cirVisibilityAttr); - // TODO: This is a problematic space to be handled conditionally by - // printVisibilityAttr which leads often to a double space in the output. But - // it looks like from here we have also switched from adding a conditional - // trailing space to inserting a leading space, to avoid trailing space at - // EOL. - // TODO: Only use the "insert leading space everywhere". - p << " "; + if (!cirVisibilityAttr.isDefault()) { + p << ' '; + printVisibilityAttr(p, cirVisibilityAttr); + } // Print function name, signature, and control. + p << ' '; p.printSymbolName(getSymName()); auto fnType = getFunctionType(); SmallVector resultTypes; @@ -2466,7 +2461,7 @@ void mlir::cir::FuncOp::print(OpAsmPrinter &p) { p, *this, fnType.getInputs(), fnType.isVarArg(), {}); if (mlir::ArrayAttr annotations = getAnnotationsAttr()) { - p << " "; + p << ' '; p.printAttribute(annotations); } diff --git a/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp b/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp index ff970f3919f4..9c360d6bda02 100644 --- a/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp +++ b/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp @@ -27,9 +27,9 @@ void bar() __attribute__((annotate("withargfunc", "os", 22))) { // BEFORE: cir.global external @tile = #cir.int<7> : !s32i // BEFORE-SAME: #cir.annotation] -// BEFORE: cir.func @_Z3fooi(%arg0: !s32i) [#cir.annotation, +// BEFORE: cir.func @_Z3fooi(%arg0: !s32i) [#cir.annotation, // BEFORE-SAME: #cir.annotation] -// BEFORE: cir.func @_Z3barv() [#cir.annotation] +// BEFORE: cir.func @_Z3barv() [#cir.annotation] // AFTER: module {{.*}}attribute-annotate-multiple.cpp" attributes diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index aa7547d306df..840b6227696c 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -284,7 +284,7 @@ void foo() { } } -// NOFLAT: cir.func @_Z3foov() +// NOFLAT: cir.func @_Z3foov() // NOFLAT: cir.scope { // NOFLAT: cir.label "label" // NOFLAT: %0 = cir.alloca !ty_S, !cir.ptr, ["agg.tmp0"] @@ -324,7 +324,7 @@ extern "C" void case_follow_label(int v) { } } -// NOFLAT: cir.func @case_follow_label +// NOFLAT: cir.func @case_follow_label // NOFLAT: cir.switch // NOFLAT: cir.case(equal, [#cir.int<1> : !s32i]) { // NOFLAT: cir.label "label" @@ -348,7 +348,7 @@ extern "C" void default_follow_label(int v) { } } -// NOFLAT: cir.func @default_follow_label +// NOFLAT: cir.func @default_follow_label // NOFLAT: cir.switch // NOFLAT: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // NOFLAT: cir.call @action1() diff --git a/clang/test/CIR/CodeGen/temporaries.cpp b/clang/test/CIR/CodeGen/temporaries.cpp index 589849bf52c3..2fbbe03b2370 100644 --- a/clang/test/CIR/CodeGen/temporaries.cpp +++ b/clang/test/CIR/CodeGen/temporaries.cpp @@ -17,7 +17,7 @@ void f() { // CIR: cir.func private @_ZN1EC1Ev(!cir.ptr) extra(#fn_attr) // CIR-NEXT: cir.func private @_ZN1EntEv(!cir.ptr) -> !ty_E // CIR-NEXT: cir.func private @_ZN1ED1Ev(!cir.ptr) extra(#fn_attr) -// CIR-NEXT: cir.func @_Z1fv() extra(#fn_attr1) { +// CIR-NEXT: cir.func @_Z1fv() extra(#fn_attr1) { // CIR-NEXT: cir.scope { // CIR-NEXT: %[[ONE:[0-9]+]] = cir.alloca !ty_E, !cir.ptr, ["agg.tmp.ensured"] {alignment = 1 : i64} // CIR-NEXT: %[[TWO:[0-9]+]] = cir.alloca !ty_E, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp index 25437e295afc..797e94475ede 100644 --- a/clang/test/CIR/CodeGen/vtt.cpp +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -171,7 +171,7 @@ namespace other { } } -// CIR-LABEL: cir.func @_ZN5other1BD1Ev( +// CIR-LABEL: cir.func @_ZN5other1BD1Ev( // CIR-SAME: %[[VAL_0:.*]]: !cir.ptr // CIR: %[[VAL_1:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CIR: cir.store %[[VAL_0]], %[[VAL_1]] : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir index 34b6bfd7618e..6a7ef3e8c023 100644 --- a/clang/test/CIR/Lowering/loops-with-break.cir +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -166,7 +166,7 @@ module { // [...] // CHECK: } - cir.func @testWhile() { + cir.func @testWhile() { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} %1 = cir.const #cir.int<0> : !s32i cir.store %1, %0 : !s32i, !cir.ptr diff --git a/clang/test/CIR/Transforms/ternary-fold.cpp b/clang/test/CIR/Transforms/ternary-fold.cpp index 69934da793df..350502e8b9b7 100644 --- a/clang/test/CIR/Transforms/ternary-fold.cpp +++ b/clang/test/CIR/Transforms/ternary-fold.cpp @@ -33,7 +33,7 @@ int test2(bool cond) { return cond ? x : y; } -// CIR-BEFORE: cir.func @_Z5test2b +// CIR-BEFORE: cir.func @_Z5test2b // CIR-BEFORE: %[[#COND:]] = cir.load %{{.+}} : !cir.ptr, !cir.bool // CIR-BEFORE-NEXT: %{{.+}} = cir.ternary(%[[#COND]], true { // CIR-BEFORE-NEXT: %[[#A:]] = cir.const #cir.int<1> : !s32i From 59d933fceef5e9269a3f0900af97516b387fcf59 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Thu, 31 Oct 2024 03:41:54 +0800 Subject: [PATCH 1998/2301] [CIR][CIRGen] Add support for abs (#1011) This patch introduces support for the abs family of built-in functions (abs, labs, llabs). --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 20 +++++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 30 ++++++++++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 16 ++++++++- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 25 ++++++++++---- clang/test/CIR/CodeGen/libc.c | 34 +++++++++++++++++++ clang/test/CIR/Lowering/ThroughMLIR/abs.cir | 23 +++++++++++++ 6 files changed, 137 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/abs.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index d5782ebee3fe..30a7cc198424 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4121,6 +4121,26 @@ def SinOp : UnaryFPToFPBuiltinOp<"sin", "SinOp">; def SqrtOp : UnaryFPToFPBuiltinOp<"sqrt", "SqrtOp">; def TruncOp : UnaryFPToFPBuiltinOp<"trunc", "FTruncOp">; +def AbsOp : CIR_Op<"abs", [Pure, SameOperandsAndResultType]> { + let arguments = (ins PrimitiveSInt:$src, UnitAttr:$poison); + let results = (outs PrimitiveSInt:$result); + let summary = [{ + libc builtin equivalent abs, labs, llabs + + The `poison` argument indicate whether the result value + is a poison value if the first argument is statically or + dynamically an INT_MIN value. + + Example: + + ```mlir + %0 = cir.const #cir.int<-42> : s32i + %1 = cir.abs %0 poison : s32i + ``` + }]; + let assemblyFormat = "$src ( `poison` $poison^ )? `:` type($src) attr-dict"; +} + class BinaryFPToFPBuiltinOp : CIR_Op { let summary = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 3df3ff6ce1ca..d83ecd3d73f9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -855,9 +855,33 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIllabs: case Builtin::BI__builtin_abs: case Builtin::BI__builtin_labs: - case Builtin::BI__builtin_llabs: - llvm_unreachable("Builtin::BIabs like NYI"); - + case Builtin::BI__builtin_llabs: { + bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow); + auto Arg = buildScalarExpr(E->getArg(0)); + mlir::Value Result; + switch (getLangOpts().getSignedOverflowBehavior()) { + case LangOptions::SOB_Defined: { + auto Call = getBuilder().create( + getLoc(E->getExprLoc()), Arg.getType(), Arg, false); + Result = Call->getResult(0); + break; + } + case LangOptions::SOB_Undefined: { + if (!SanitizeOverflow) { + auto Call = getBuilder().create( + getLoc(E->getExprLoc()), Arg.getType(), Arg, true); + Result = Call->getResult(0); + break; + } + llvm_unreachable("BI__builtin_abs with LangOptions::SOB_Undefined when " + "SanitizeOverflow is true"); + } + [[fallthrough]]; + case LangOptions::SOB_Trapping: + llvm_unreachable("BI__builtin_abs with LangOptions::SOB_Trapping"); + } + return RValue::get(Result); + } case Builtin::BI__builtin_complex: { mlir::Value Real = buildScalarExpr(E->getArg(0)); mlir::Value Imag = buildScalarExpr(E->getArg(1)); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 4183f9707efa..cccf538c4b08 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4153,6 +4153,20 @@ class CIRIsFPClassOpLowering return mlir::success(); } }; +class CIRAbsOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AbsOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto resTy = this->getTypeConverter()->convertType(op.getType()); + auto absOp = rewriter.create( + op.getLoc(), resTy, adaptor.getOperands()[0], adaptor.getPoison()); + rewriter.replaceOp(op, absOp); + return mlir::success(); + } +}; void populateCIRToLLVMConversionPatterns( mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter, @@ -4191,7 +4205,7 @@ void populateCIRToLLVMConversionPatterns( CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, CIRFreeExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, CIRBaseClassAddrOpLowering, - CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering + CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering, CIRAbsOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 1b3bd7f8f5d8..7238edbbc407 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -46,8 +46,8 @@ #include "mlir/Transforms/DialectConversion.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/LowerToMLIR.h" +#include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/Passes.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Sequence.h" @@ -244,6 +244,17 @@ class CIRFAbsOpLowering : public mlir::OpConversionPattern { return mlir::LogicalResult::success(); } }; +class CIRAbsOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AbsOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); + return mlir::LogicalResult::success(); + } +}; class CIRFloorOpLowering : public mlir::OpConversionPattern { @@ -1324,12 +1335,12 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, CIRCosOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRCastOpLowering, CIRPtrStrideOpLowering, CIRSqrtOpLowering, CIRCeilOpLowering, CIRExp2OpLowering, CIRExpOpLowering, CIRFAbsOpLowering, - CIRFloorOpLowering, CIRLog10OpLowering, CIRLog2OpLowering, - CIRLogOpLowering, CIRRoundOpLowering, CIRPtrStrideOpLowering, - CIRSinOpLowering, CIRShiftOpLowering, CIRBitClzOpLowering, - CIRBitCtzOpLowering, CIRBitPopcountOpLowering, CIRBitClrsbOpLowering, - CIRBitFfsOpLowering, CIRBitParityOpLowering, CIRIfOpLowering, - CIRVectorCreateLowering, CIRVectorInsertLowering, + CIRAbsOpLowering, CIRFloorOpLowering, CIRLog10OpLowering, + CIRLog2OpLowering, CIRLogOpLowering, CIRRoundOpLowering, + CIRPtrStrideOpLowering, CIRSinOpLowering, CIRShiftOpLowering, + CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitPopcountOpLowering, + CIRBitClrsbOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, + CIRIfOpLowering, CIRVectorCreateLowering, CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering>(converter, patterns.getContext()); } diff --git a/clang/test/CIR/CodeGen/libc.c b/clang/test/CIR/CodeGen/libc.c index f6cf6a8e50e6..12a35a379803 100644 --- a/clang/test/CIR/CodeGen/libc.c +++ b/clang/test/CIR/CodeGen/libc.c @@ -1,5 +1,12 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -fwrapv +// RUN: FileCheck --check-prefix=CIR_NO_POSION --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -fwrapv +// RUN: FileCheck --check-prefix=LLVM_NO_POSION --input-file=%t.ll %s // Should generate CIR's builtin memcpy op. void *memcpy(void *, const void *, unsigned long); @@ -19,3 +26,30 @@ float testFabsf(float x) { return fabsf(x); // CHECK: cir.fabs %{{.+}} : !cir.float } + +int abs(int); +int testAbs(int x) { + return abs(x); + // CHECK: cir.abs %{{.+}} poison : !s32i + // LLVM: %{{.+}} = call i32 @llvm.abs.i32(i32 %{{.+}}, i1 true) + // CIR_NO_POSION: cir.abs %{{.+}} : !s32i + // LLVM_NO_POSION: %{{.+}} = call i32 @llvm.abs.i32(i32 %{{.+}}, i1 false) +} + +long labs(long); +long testLabs(long x) { + return labs(x); + // CHECK: cir.abs %{{.+}} poison : !s64i + // LLVM: %{{.+}} = call i64 @llvm.abs.i64(i64 %{{.+}}, i1 true) + // CIR_NO_POSION: cir.abs %{{.+}} : !s64i + // LLVM_NO_POSION: %{{.+}} = call i64 @llvm.abs.i64(i64 %{{.+}}, i1 false) +} + +long long llabs(long long); +long long testLlabs(long long x) { + return llabs(x); + // CHECK: cir.abs %{{.+}} poison : !s64i + // LLVM: %{{.+}} = call i64 @llvm.abs.i64(i64 %{{.+}}, i1 true) + // CIR_NO_POSION: cir.abs %{{.+}} : !s64i + // LLVM_NO_POSION: %{{.+}} = call i64 @llvm.abs.i64(i64 %{{.+}}, i1 false) +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/ThroughMLIR/abs.cir b/clang/test/CIR/Lowering/ThroughMLIR/abs.cir new file mode 100644 index 000000000000..e5a4dd4d095f --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/abs.cir @@ -0,0 +1,23 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir +!s32i = !cir.int +!s64i = !cir.int +module { + cir.func @foo() { + %0 = cir.const #cir.int<-1> : !s32i + %1 = cir.const #cir.int<-2> : !s64i + %4 = cir.abs %0 : !s32i + %5 = cir.abs %1 : !s64i + cir.return + } +} + +// CHECK: module { +// CHECK-NEXT: func.func @foo() { +// CHECK-NEXT: %[[C0:.+]] = arith.constant -1 : i32 +// CHECK-NEXT: %[[C1:.+]] = arith.constant -2 : i64 +// CHECK-NEXT: %{{.+}} = math.absi %[[C0]] : i32 +// CHECK-NEXT: %{{.+}} = math.absi %[[C1]] : i64 +// CHECK-NEXT: return +// CHECK-NEXT: } +// CHECK-NEXT: } From 702ad3149bfe34c26d8ed3bc68ba9bdfebf2f27b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 29 Oct 2024 16:30:20 -0700 Subject: [PATCH 1999/2301] [CIR][Lowering] Transform cir.store of const arrays into cir.copy Add lowering prepare logic to lower stores to cir.copy. This bring LLVM lowering closer to OG and turns out the rest of the compiler understands memcpys better and generate better assembly code for at least arm64 and x86_64. Note that current lowering to memcpy is only using i32 intrinsic version, this PR does not touch that code and that will be addressed in another PR. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 19 ++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 19 ------ .../Dialect/Transforms/LoweringPrepare.cpp | 66 ++++++++++++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 1 + clang/test/CIR/CodeGen/array-init.c | 10 +-- clang/test/CIR/CodeGen/const-array.c | 8 +-- clang/test/CIR/Lowering/array-init.c | 14 ++-- clang/test/CIR/Lowering/store-memcpy.cpp | 21 ++++++ 8 files changed, 124 insertions(+), 34 deletions(-) create mode 100644 clang/test/CIR/Lowering/store-memcpy.cpp diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index bd4c60bb1a61..1fa64cfe7fc6 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -386,6 +386,25 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createAlloca(loc, addrType, type, name, alignmentIntAttr); } + mlir::Value createGetGlobal(mlir::cir::GlobalOp global, + bool threadLocal = false) { + return create( + global.getLoc(), + getPointerTo(global.getSymType(), global.getAddrSpaceAttr()), + global.getName(), threadLocal); + } + + /// Create a copy with inferred length. + mlir::cir::CopyOp createCopy(mlir::Value dst, mlir::Value src, + bool isVolatile = false) { + return create(dst.getLoc(), dst, src, isVolatile); + } + + mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, + mlir::Value src, mlir::Value len) { + return create(loc, dst, src, len); + } + mlir::Value createSub(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, bool hasNSW = false) { auto op = create(lhs.getLoc(), lhs.getType(), diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 9038ee7fca81..d419cf18fa18 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -613,12 +613,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { // -------------------------- // - /// Create a copy with inferred length. - mlir::cir::CopyOp createCopy(mlir::Value dst, mlir::Value src, - bool isVolatile = false) { - return create(dst.getLoc(), dst, src, isVolatile); - } - /// Create a break operation. mlir::cir::BreakOp createBreak(mlir::Location loc) { return create(loc); @@ -629,11 +623,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc); } - mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, - mlir::Value src, mlir::Value len) { - return create(loc, dst, src, len); - } - mlir::Value createNeg(mlir::Value value) { if (auto intTy = mlir::dyn_cast(value.getType())) { @@ -764,14 +753,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { addrSpace); } - mlir::Value createGetGlobal(mlir::cir::GlobalOp global, - bool threadLocal = false) { - return create( - global.getLoc(), - getPointerTo(global.getSymType(), global.getAddrSpaceAttr()), - global.getName(), threadLocal); - } - mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType, mlir::Value addr, mlir::Type storageType, const CIRGenBitFieldInfo &info, diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index ba19c6ec4069..b11a028cbc2f 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -82,6 +82,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { void lowerStdFindOp(StdFindOp op); void lowerIterBeginOp(IterBeginOp op); void lowerIterEndOp(IterEndOp op); + void lowerToMemCpy(StoreOp op); void lowerArrayDtor(ArrayDtor op); void lowerArrayCtor(ArrayCtor op); @@ -112,6 +113,10 @@ struct LoweringPreparePass : public LoweringPrepareBase { mlir::cir::GlobalLinkageKind Linkage = mlir::cir::GlobalLinkageKind::ExternalLinkage); + /// Track the current number of global array string count for when the symbol + /// has an empty name, and prevent collisions. + uint64_t annonGlobalConstArrayCount = 0; + /// /// AST related /// ----------- @@ -1029,6 +1034,61 @@ void LoweringPreparePass::lowerArrayDtor(ArrayDtor op) { lowerArrayDtorCtorIntoLoop(builder, op, eltTy, op.getAddr(), arrayLen); } +static std::string getGlobalVarNameForConstString(mlir::cir::StoreOp op, + uint64_t &cnt) { + llvm::SmallString<64> finalName; + llvm::raw_svector_ostream Out(finalName); + + Out << "__const."; + if (auto fnOp = op->getParentOfType()) { + Out << fnOp.getSymNameAttr().getValue() << "."; + } else { + Out << "module."; + } + + auto allocaOp = + dyn_cast_or_null(op.getAddr().getDefiningOp()); + if (allocaOp && !allocaOp.getName().empty()) + Out << allocaOp.getName(); + else + Out << cnt++; + return finalName.c_str(); +} + +void LoweringPreparePass::lowerToMemCpy(StoreOp op) { + // Now that basic filter is done, do more checks before proceding with the + // transformation. + auto cstOp = + dyn_cast_if_present(op.getValue().getDefiningOp()); + if (!cstOp) + return; + + if (!isa(cstOp.getValue())) + return; + CIRBaseBuilderTy builder(getContext()); + + // Create a global which is initialized with the attribute that is either a + // constant array or struct. + assert(!::cir::MissingFeatures::unnamedAddr() && "NYI"); + builder.setInsertionPointToStart(&theModule.getBodyRegion().front()); + std::string globalName = + getGlobalVarNameForConstString(op, annonGlobalConstArrayCount); + mlir::cir::GlobalOp globalCst = buildRuntimeVariable( + builder, globalName, op.getLoc(), op.getValue().getType(), + mlir::cir::GlobalLinkageKind::PrivateLinkage); + globalCst.setInitialValueAttr(cstOp.getValue()); + globalCst.setConstant(true); + + // Transform the store into a cir.copy. + builder.setInsertionPointAfter(op.getOperation()); + mlir::cir::CopyOp memCpy = + builder.createCopy(op.getAddr(), builder.createGetGlobal(globalCst)); + op->replaceAllUsesWith(memCpy); + op->erase(); + if (cstOp->getResult(0).getUsers().empty()) + cstOp->erase(); +} + void LoweringPreparePass::lowerArrayCtor(ArrayCtor op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op.getOperation()); @@ -1122,6 +1182,10 @@ void LoweringPreparePass::runOnOp(Operation *op) { lowerArrayCtor(arrayCtor); } else if (auto arrayDtor = dyn_cast(op)) { lowerArrayDtor(arrayDtor); + } else if (auto storeOp = dyn_cast(op)) { + mlir::Type valTy = storeOp.getValue().getType(); + if (isa(valTy) || isa(valTy)) + lowerToMemCpy(storeOp); } else if (auto fnOp = dyn_cast(op)) { if (auto globalCtor = fnOp.getGlobalCtorAttr()) { globalCtorList.push_back(globalCtor); @@ -1145,7 +1209,7 @@ void LoweringPreparePass::runOnOperation() { op->walk([&](Operation *op) { if (isa(op)) + ArrayCtor, ArrayDtor, mlir::cir::FuncOp, StoreOp>(op)) opsToTransform.push_back(op); }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index cccf538c4b08..7193e410e298 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1479,6 +1479,7 @@ class CIRStoreLowering : public mlir::OpConversionPattern { auto ordering = getLLVMMemOrder(memorder); auto alignOpt = op.getAlignment(); unsigned alignment = 0; + if (!alignOpt) { const auto llvmTy = getTypeConverter()->convertType(op.getValue().getType()); diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c index 13999f24a45d..aa4c7f7ea2d0 100644 --- a/clang/test/CIR/CodeGen/array-init.c +++ b/clang/test/CIR/CodeGen/array-init.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s +// CHECK-DAG: cir.global "private" constant cir_private @__const.foo.bar = #cir.const_array<[#cir.fp<9.000000e+00> : !cir.double, #cir.fp<8.000000e+00> : !cir.double, #cir.fp<7.000000e+00> : !cir.double]> : !cir.array typedef struct { int a; long b; @@ -29,14 +30,14 @@ void buz(int x) { void foo() { double bar[] = {9,8,7}; } +// CHECK-LABEL: @foo +// CHECK: %[[DST:.*]] = cir.alloca !cir.array, !cir.ptr>, ["bar"] +// CHECK: %[[SRC:.*]] = cir.get_global @__const.foo.bar : !cir.ptr> +// CHECK: cir.copy %[[SRC]] to %[[DST]] : !cir.ptr> -// CHECK: %0 = cir.alloca !cir.array, !cir.ptr>, ["bar"] {alignment = 16 : i64} -// CHECK-NEXT: %1 = cir.const #cir.const_array<[#cir.fp<9.000000e+00> : !cir.double, #cir.fp<8.000000e+00> : !cir.double, #cir.fp<7.000000e+00> : !cir.double]> : !cir.array -// CHECK-NEXT: cir.store %1, %0 : !cir.array, !cir.ptr> void bar(int a, int b, int c) { int arr[] = {a,b,c}; } - // CHECK: cir.func @bar // CHECK: [[ARR:%.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 4 : i64} // CHECK-NEXT: cir.store %arg0, [[A:%.*]] : !s32i, !cir.ptr @@ -56,7 +57,6 @@ void bar(int a, int b, int c) { void zero_init(int x) { int arr[3] = {x}; } - // CHECK: cir.func @zero_init // CHECK: [[VAR_ALLOC:%.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} // CHECK: %1 = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/const-array.c b/clang/test/CIR/CodeGen/const-array.c index 0020d47d9fc3..dd9a68ea2d37 100644 --- a/clang/test/CIR/CodeGen/const-array.c +++ b/clang/test/CIR/CodeGen/const-array.c @@ -12,7 +12,7 @@ void foo() { int a[10] = {1}; } -// CHECK: cir.func {{.*@foo}} -// CHECK: %0 = cir.alloca !cir.array, !cir.ptr>, ["a"] {alignment = 16 : i64} -// CHECK: %1 = cir.const #cir.const_array<[#cir.int<1> : !s32i], trailing_zeros> : !cir.array -// CHECK: cir.store %1, %0 : !cir.array, !cir.ptr> +// CHECK-LABEL: @foo() +// CHECK: %[[ADDR:.*]] = cir.alloca !cir.array, !cir.ptr>, ["a"] +// CHECK: %[[SRC:.*]] = cir.get_global @__const.foo.a : !cir.ptr> +// CHECK: cir.copy %[[SRC]] to %[[ADDR]] : !cir.ptr> \ No newline at end of file diff --git a/clang/test/CIR/Lowering/array-init.c b/clang/test/CIR/Lowering/array-init.c index 0b9a19b5c9ba..dd61ba3234c5 100644 --- a/clang/test/CIR/Lowering/array-init.c +++ b/clang/test/CIR/Lowering/array-init.c @@ -1,7 +1,9 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// LLVM: charInit1.ar = internal global [4 x [4 x i8]] {{.*}}4 x i8] c"aa\00\00", [4 x i8] c"aa\00\00", [4 x i8] c"aa\00\00", [4 x i8] c"aa\00\00"], align 16 +// LLVM-DAG: @__const.charInit3.arr +// LLVM-DAG: @__const.charInit2.arr +// LLVM-DAG: @charInit1.ar = internal global [4 x [4 x i8]] {{.*}}4 x i8] c"aa\00\00", [4 x i8] c"aa\00\00", [4 x i8] c"aa\00\00", [4 x i8] c"aa\00\00"], align 16 char charInit1() { static char ar[][4] = {"aa", "aa", "aa", "aa"}; return ar[0][0]; @@ -14,14 +16,16 @@ void zeroInit() { int a[3] = {0, 0, 0}; } -// LLVM: %1 = alloca [4 x [1 x i8]], i64 1, align 1 -// LLVM: store [4 x [1 x i8]] {{.*}}1 x i8] c"a", [1 x i8] c"b", [1 x i8] c"c", [1 x i8] c"d"], ptr %1, align 1 +// LLVM: %[[PTR:.*]] = alloca [4 x [1 x i8]], i64 1, align 1 +// FIXME: OG uses @llvm.memcpy.p0.p0.i64 +// LLVM: void @llvm.memcpy.p0.p0.i32(ptr %[[PTR]], ptr @__const.charInit2.arr, i32 4, i1 false) void charInit2() { char arr[4][1] = {"a", "b", "c", "d"}; } -// LLVM: %1 = alloca [4 x [2 x i8]], i64 1, align 1 -// LLVM: store [4 x [2 x i8]] {{.*}}2 x i8] c"ab", [2 x i8] c"cd", [2 x i8] c"ef", [2 x i8] c"gh"], ptr %1, align 1 +// LLVM: %[[PTR:.*]] = alloca [4 x [2 x i8]], i64 1, align 1 +// FIXME: OG uses @llvm.memcpy.p0.p0.i64 +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[PTR]], ptr @__const.charInit3.arr, i32 8, i1 false), !dbg !16 void charInit3() { char arr[4][2] = {"ab", "cd", "ef", "gh"}; } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/store-memcpy.cpp b/clang/test/CIR/Lowering/store-memcpy.cpp new file mode 100644 index 000000000000..f53bb3ea5d61 --- /dev/null +++ b/clang/test/CIR/Lowering/store-memcpy.cpp @@ -0,0 +1,21 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t2.cir 2>&1 | FileCheck -check-prefix=AFTER %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +void foo() { + char s1[] = "Hello"; +} +// AFTER-DAG: cir.global "private" constant cir_private @__const._Z3foov.s1 = #cir.const_array<"Hello\00" : !cir.array> : !cir.array +// AFTER: @_Z3foov +// AFTER: %[[S1:.*]] = cir.alloca !cir.array, !cir.ptr>, ["s1"] +// AFTER: %[[HELLO:.*]] = cir.get_global @__const._Z3foov.s1 : !cir.ptr> +// AFTER: cir.copy %[[HELLO]] to %[[S1]] : !cir.ptr> +// AFTER: cir.return +// AFTER: } + +// LLVM: @__const._Z3foov.s1 = private constant [6 x i8] c"Hello\00" +// LLVM: @_Z3foov() +// LLVM: %[[S1:.*]] = alloca [6 x i8], i64 1, align 1 +// FIXME: LLVM OG uses @llvm.memcpy.p0.p0.i64 +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[S1]], ptr @__const._Z3foov.s1, i32 6, i1 false) +// LLVM: ret void \ No newline at end of file From 0bf6a4cec0178e4bb9d0128088df5a19f02a3e94 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Wed, 30 Oct 2024 16:57:17 -0700 Subject: [PATCH 2000/2301] [CIR][CIRGen] Fix typo in test check POSION -> POISON --- clang/test/CIR/CodeGen/libc.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/clang/test/CIR/CodeGen/libc.c b/clang/test/CIR/CodeGen/libc.c index 12a35a379803..526a72427722 100644 --- a/clang/test/CIR/CodeGen/libc.c +++ b/clang/test/CIR/CodeGen/libc.c @@ -4,9 +4,9 @@ // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -fwrapv -// RUN: FileCheck --check-prefix=CIR_NO_POSION --input-file=%t.cir %s +// RUN: FileCheck --check-prefix=CIR_NO_POISON --input-file=%t.cir %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -fwrapv -// RUN: FileCheck --check-prefix=LLVM_NO_POSION --input-file=%t.ll %s +// RUN: FileCheck --check-prefix=LLVM_NO_POISON --input-file=%t.ll %s // Should generate CIR's builtin memcpy op. void *memcpy(void *, const void *, unsigned long); @@ -32,8 +32,8 @@ int testAbs(int x) { return abs(x); // CHECK: cir.abs %{{.+}} poison : !s32i // LLVM: %{{.+}} = call i32 @llvm.abs.i32(i32 %{{.+}}, i1 true) - // CIR_NO_POSION: cir.abs %{{.+}} : !s32i - // LLVM_NO_POSION: %{{.+}} = call i32 @llvm.abs.i32(i32 %{{.+}}, i1 false) + // CIR_NO_POISON: cir.abs %{{.+}} : !s32i + // LLVM_NO_POISON: %{{.+}} = call i32 @llvm.abs.i32(i32 %{{.+}}, i1 false) } long labs(long); @@ -41,8 +41,8 @@ long testLabs(long x) { return labs(x); // CHECK: cir.abs %{{.+}} poison : !s64i // LLVM: %{{.+}} = call i64 @llvm.abs.i64(i64 %{{.+}}, i1 true) - // CIR_NO_POSION: cir.abs %{{.+}} : !s64i - // LLVM_NO_POSION: %{{.+}} = call i64 @llvm.abs.i64(i64 %{{.+}}, i1 false) + // CIR_NO_POISON: cir.abs %{{.+}} : !s64i + // LLVM_NO_POISON: %{{.+}} = call i64 @llvm.abs.i64(i64 %{{.+}}, i1 false) } long long llabs(long long); @@ -50,6 +50,6 @@ long long testLlabs(long long x) { return llabs(x); // CHECK: cir.abs %{{.+}} poison : !s64i // LLVM: %{{.+}} = call i64 @llvm.abs.i64(i64 %{{.+}}, i1 true) - // CIR_NO_POSION: cir.abs %{{.+}} : !s64i - // LLVM_NO_POSION: %{{.+}} = call i64 @llvm.abs.i64(i64 %{{.+}}, i1 false) -} \ No newline at end of file + // CIR_NO_POISON: cir.abs %{{.+}} : !s64i + // LLVM_NO_POISON: %{{.+}} = call i64 @llvm.abs.i64(i64 %{{.+}}, i1 false) +} From b9c5a2757529437b7fcb95e039c13e6d0fcf36ff Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Wed, 30 Oct 2024 17:04:16 -0700 Subject: [PATCH 2001/2301] [CIR][CIRGen] Fix swapped parameters in test Clang recognizes the function anyway, but this is an obvious error. --- clang/test/CIR/CodeGen/libc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/libc.c b/clang/test/CIR/CodeGen/libc.c index 526a72427722..b5bea609b5b2 100644 --- a/clang/test/CIR/CodeGen/libc.c +++ b/clang/test/CIR/CodeGen/libc.c @@ -10,7 +10,7 @@ // Should generate CIR's builtin memcpy op. void *memcpy(void *, const void *, unsigned long); -void testMemcpy(void *src, const void *dst, unsigned long size) { +void testMemcpy(void *dst, const void *src, unsigned long size) { memcpy(dst, src, size); // CHECK: cir.libc.memcpy %{{.+}} bytes from %{{.+}} to %{{.+}} : !u64i, !cir.ptr -> !cir.ptr } From e69bda62493698ab9bff0ca0bc8ca5d33121f06c Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 31 Oct 2024 16:58:29 -0400 Subject: [PATCH 2002/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vtst_v and neon_vtstq_v (#1013) In addition, this PR enables ZeroAttr of vector type so that CIR can generate a vector initialized with all zero values. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 3 +- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 11 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 2 +- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 296 ++++++++++++++++++ 4 files changed, 310 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index d419cf18fa18..61e80a68f0ee 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -603,7 +603,8 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { // TODO: dispatch creation for primitive types. assert((mlir::isa(ty) || - mlir::isa(ty)) && + mlir::isa(ty) || + mlir::isa(ty)) && "NYI for other types"); return create(loc, ty, getZeroAttr(ty)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index df061eed5b79..e920dcd2425b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2377,6 +2377,17 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( ops[0] = builder.createIntCast(ops[0], vTy); return buildCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); } + case NEON::BI__builtin_neon_vtst_v: + case NEON::BI__builtin_neon_vtstq_v: { + mlir::Location loc = getLoc(e->getExprLoc()); + ops[0] = builder.createBitcast(ops[0], ty); + ops[1] = builder.createBitcast(ops[1], ty); + ops[0] = builder.createAnd(ops[0], ops[1]); + // Note that during LLVM Lowering, result of `VecCmpOp` is sign extended, + // matching traditional codegen behavior. + return builder.create( + loc, ty, mlir::cir::CmpOpKind::ne, ops[0], builder.getZero(loc, ty)); + } } // This second switch is for the intrinsics that might have a more generic diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 20592b495dcb..e7d282ee6c43 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -388,7 +388,7 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, if (isa(attrType)) { if (::mlir::isa<::mlir::cir::StructType, ::mlir::cir::ArrayType, - ::mlir::cir::ComplexType>(opType)) + ::mlir::cir::ComplexType, ::mlir::cir::VectorType>(opType)) return success(); return op->emitOpError("zero expects struct or array type"); } diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index b1c22af112c5..62a78324bad8 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -492,3 +492,299 @@ uint32x2_t test_vqmovun_s64(int64x2_t a) { // LLVM: [[VQMOVUN_V2_I:%.*]] = bitcast <2 x i32> [[VQMOVUN_V1_I]] to <8 x i8> // LLVM: ret <2 x i32> [[VQMOVUN_V1_I]] } + +uint8x8_t test_vtst_s8(int8x8_t v1, int8x8_t v2) { + return vtst_s8(v1, v2); + + // CIR-LABEL: vtst_s8 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) : !cir.vector + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtst_s8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = and <8 x i8> [[V1]], [[V2]] + // LLVM: [[TMP1:%.*]] = icmp ne <8 x i8> [[TMP0]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i8> + // LLVM: ret <8 x i8> [[VTST_I]] +} + +uint8x8_t test_vtst_u8(uint8x8_t v1, uint8x8_t v2) { + return vtst_u8(v1, v2); + + // CIR-LABEL: vtst_u8 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) : !cir.vector + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtst_u8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = and <8 x i8> [[V1]], [[V2]] + // LLVM: [[TMP1:%.*]] = icmp ne <8 x i8> [[TMP0]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i8> + // LLVM: ret <8 x i8> [[VTST_I]] +} + +uint16x4_t test_vtst_s16(int16x4_t v1, int16x4_t v2) { + return vtst_s16(v1, v2); + + // CIR-LABEL: vtst_s16 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) : !cir.vector + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtst_s16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = and <4 x i16> [[V1]], [[V2]] + // LLVM: [[TMP3:%.*]] = icmp ne <4 x i16> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i16> + // LLVM: ret <4 x i16> [[VTST_I]] +} + +uint16x4_t test_vtst_u16(uint16x4_t v1, uint16x4_t v2) { + return vtst_u16(v1, v2); + + // CIR-LABEL: vtst_u16 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) : !cir.vector + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtst_u16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = and <4 x i16> [[V1]], [[V2]] + // LLVM: [[TMP3:%.*]] = icmp ne <4 x i16> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i16> + // LLVM: ret <4 x i16> [[VTST_I]] +} + +uint32x2_t test_vtst_s32(int32x2_t v1, int32x2_t v2) { + return vtst_s32(v1, v2); + + // CIR-LABEL: vtst_s32 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) : !cir.vector + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtst_s32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = and <2 x i32> [[V1]], [[V2]] + // LLVM: [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i32> + // LLVM: ret <2 x i32> [[VTST_I]] +} + +uint32x2_t test_vtst_u32(uint32x2_t v1, uint32x2_t v2) { + return vtst_u32(v1, v2); + + // CIR-LABEL: vtst_u32 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) : !cir.vector + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtst_u32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = and <2 x i32> [[V1]], [[V2]] + // LLVM: [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i32> + // LLVM: ret <2 x i32> [[VTST_I]] +} + +uint64x1_t test_vtst_s64(int64x1_t a, int64x1_t b) { + return vtst_s64(a, b); + + // CIR-LABEL: vtst_s64 + // CIR: [[A:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[B:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[A]], [[B]]) : !cir.vector + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtst_s64(<1 x i64>{{.*}}[[A:%.*]], <1 x i64>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = and <1 x i64> [[A]], [[B]] + // LLVM: [[TMP3:%.*]] = icmp ne <1 x i64> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <1 x i1> [[TMP3]] to <1 x i64> + // LLVM: ret <1 x i64> [[VTST_I]] +} + +uint64x1_t test_vtst_u64(uint64x1_t a, uint64x1_t b) { + return vtst_u64(a, b); + + // CIR-LABEL: vtst_u64 + // CIR: [[A:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[B:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[A]], [[B]]) : !cir.vector + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtst_u64(<1 x i64>{{.*}}[[A:%.*]], <1 x i64>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = and <1 x i64> [[A]], [[B]] + // LLVM: [[TMP3:%.*]] = icmp ne <1 x i64> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <1 x i1> [[TMP3]] to <1 x i64> + // LLVM: ret <1 x i64> [[VTST_I]] +} + +uint8x16_t test_vtstq_s8(int8x16_t v1, int8x16_t v2) { + return vtstq_s8(v1, v2); + + // CIR-LABEL: vtstq_s8 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) : !cir.vector + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtstq_s8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = and <16 x i8> [[V1]], [[V2]] + // LLVM: [[TMP1:%.*]] = icmp ne <16 x i8> [[TMP0]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <16 x i1> [[TMP1]] to <16 x i8> + // LLVM: ret <16 x i8> [[VTST_I]] +} + +uint8x16_t test_vtstq_u8(uint8x16_t v1, uint8x16_t v2) { + return vtstq_u8(v1, v2); + + // CIR-LABEL: vtstq_u8 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) : !cir.vector + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtstq_u8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = and <16 x i8> [[V1]], [[V2]] + // LLVM: [[TMP1:%.*]] = icmp ne <16 x i8> [[TMP0]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <16 x i1> [[TMP1]] to <16 x i8> + // LLVM: ret <16 x i8> [[VTST_I]] +} + +uint16x8_t test_vtstq_s16(int16x8_t v1, int16x8_t v2) { + return vtstq_s16(v1, v2); + + // CIR-LABEL: vtstq_s16 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtstq_s16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8> + // LLVM: [[TMP2:%.*]] = and <8 x i16> [[V1]], [[V2]] + // LLVM: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16> + // LLVM: ret <8 x i16> [[VTST_I]] +} + +uint16x8_t test_vtstq_u16(uint16x8_t v1, uint16x8_t v2) { + return vtstq_u16(v1, v2); + + // CIR-LABEL: vtstq_u16 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtstq_u16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8> + // LLVM: [[TMP2:%.*]] = and <8 x i16> [[V1]], [[V2]] + // LLVM: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16> + // LLVM: ret <8 x i16> [[VTST_I]] +} + +uint32x4_t test_vtstq_s32(int32x4_t v1, int32x4_t v2) { + return vtstq_s32(v1, v2); + + // CIR-LABEL: vtstq_s32 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtstq_s32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8> + // LLVM: [[TMP2:%.*]] = and <4 x i32> [[V1]], [[V2]] + // LLVM: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> + // LLVM: ret <4 x i32> [[VTST_I]] +} + +uint32x4_t test_vtstq_u32(uint32x4_t v1, uint32x4_t v2) { + return vtstq_u32(v1, v2); + + // CIR-LABEL: vtstq_u32 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtstq_u32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8> + // LLVM: [[TMP2:%.*]] = and <4 x i32> [[V1]], [[V2]] + // LLVM: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> + // LLVM: ret <4 x i32> [[VTST_I]] +} + +uint64x2_t test_vtstq_s64(int64x2_t v1, int64x2_t v2) { + return vtstq_s64(v1, v2); + + // CIR-LABEL: vtstq_s64 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtstq_s64(<2 x i64>{{.*}}[[V1:%.*]], <2 x i64>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> [[V2]] to <16 x i8> + // LLVM: [[TMP2:%.*]] = and <2 x i64> [[V1]], [[V2]] + // LLVM: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64> + // LLVM: ret <2 x i64> [[VTST_I]] +} + +uint64x2_t test_vtstq_u64(uint64x2_t v1, uint64x2_t v2) { + return vtstq_u64(v1, v2); + + // CIR-LABEL: vtstq_u64 + // CIR: [[V1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[V2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AND:%.*]] = cir.binop(and, [[V1]], [[V2]]) + // CIR: [[ZERO_VEC:%.*]] = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.vec.cmp(ne, [[AND]], [[ZERO_VEC]]) : !cir.vector, !cir.vector + + // LLVM: {{.*}}test_vtstq_u64(<2 x i64>{{.*}}[[V1:%.*]], <2 x i64>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> [[V2]] to <16 x i8> + // LLVM: [[TMP2:%.*]] = and <2 x i64> [[V1]], [[V2]] + // LLVM: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer + // LLVM: [[VTST_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64> + // LLVM: ret <2 x i64> [[VTST_I]] +} From dfe0f3cd1aeaa373788fcb5a25fed3727dc8ac3a Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Fri, 1 Nov 2024 05:02:29 +0800 Subject: [PATCH 2003/2301] [CIR][CIRGen] Add support for memmove (#1019) due to the issue described in https://github.com/llvm/clangir/issues/1018, the MLIR lowering for `memmove` has been excluded in this patch. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 47 ++++++++++++++----- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 10 ++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 16 +++++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 17 ------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 17 ++++++- clang/test/CIR/CodeGen/libc.c | 8 ++++ clang/test/CIR/IR/invalid.cir | 19 ++++++-- 7 files changed, 97 insertions(+), 37 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 30a7cc198424..534d4f056ce2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3927,13 +3927,22 @@ def CopyOp : CIR_Op<"copy", } //===----------------------------------------------------------------------===// -// MemCpyOp +// MemCpyOp && MemMoveOp //===----------------------------------------------------------------------===// -def MemCpyOp : CIR_Op<"libc.memcpy"> { - let arguments = (ins Arg:$dst, - Arg:$src, - PrimitiveInt:$len); +class CIR_MemCpyOp: CIR_Op]> { + let arguments = (ins Arg:$dst, + Arg:$src, + PrimitiveUInt:$len); + let hasVerifier = 0; + + let extraClassDeclaration = [{ + /// Returns the byte length type. + mlir::cir::IntType getLenTy() { return getLen().getType(); } + }]; +} + +def MemCpyOp : CIR_MemCpyOp<"libc.memcpy"> { let summary = "Equivalent to libc's `memcpy`"; let description = [{ Given two CIR pointers, `src` and `dst`, `cir.libc.memcpy` will copy `len` @@ -3956,17 +3965,29 @@ def MemCpyOp : CIR_Op<"libc.memcpy"> { $len `bytes` `from` $src `to` $dst attr-dict `:` type($len) `` `,` qualified(type($src)) `->` qualified(type($dst)) }]; - let hasVerifier = 1; +} - let extraClassDeclaration = [{ - /// Returns the data source pointer type. - mlir::cir::PointerType getSrcTy() { return getSrc().getType(); } +def MemMoveOp : CIR_MemCpyOp<"libc.memmove"> { + let summary = "Equivalent to libc's `memmove`"; + let description = [{ + Given two CIR pointers, `src` and `dst`, `cir.libc.memmove` will copy `len` + bytes from the memory pointed by `src` to the memory pointed by `dst`. - /// Returns the data destination pointer type. - mlir::cir::PointerType getDstTy() { return getDst().getType(); } + similiar to `cir.libc.memcpy` but accounts for overlapping memory. - /// Returns the byte length type. - mlir::cir::IntType getLenTy() { return getLen().getType(); } + Examples: + + ```mlir + // Copying 2 bytes from one array to a struct: + %2 = cir.const #cir.int<2> : !u32i + cir.libc.memmove %2 bytes from %arr to %struct : !cir.ptr, !u64i + ``` + }]; + + + let assemblyFormat = [{ + $len `bytes` `from` $src `to` $dst attr-dict + `:` qualified(type($dst)) `,` type($len) }]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 61e80a68f0ee..68abf7c728dc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -624,6 +624,16 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc); } + mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, + mlir::Value src, mlir::Value len) { + return create(loc, dst, src, len); + } + + mlir::cir::MemMoveOp createMemMove(mlir::Location loc, mlir::Value dst, + mlir::Value src, mlir::Value len) { + return create(loc, dst, src, len); + } + mlir::Value createNeg(mlir::Value value) { if (auto intTy = mlir::dyn_cast(value.getType())) { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index d83ecd3d73f9..cf72072e1fb0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1435,9 +1435,19 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin___memmove_chk NYI"); case Builtin::BImemmove: - case Builtin::BI__builtin_memmove: - llvm_unreachable("BImemmove like NYI"); - + case Builtin::BI__builtin_memmove: { + Address Dest = buildPointerWithAlignment(E->getArg(0)); + Address Src = buildPointerWithAlignment(E->getArg(1)); + mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); + buildNonNullArgCheck(RValue::get(Dest.getPointer()), + E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), + FD, 0); + buildNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), + E->getArg(1)->getExprLoc(), FD, 1); + builder.createMemMove(getLoc(E->getSourceRange()), Dest.getPointer(), + Src.getPointer(), SizeVal); + return RValue::get(Dest.getPointer()); + } case Builtin::BImemset: case Builtin::BI__builtin_memset: llvm_unreachable("BImemset like NYI"); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index e7d282ee6c43..d449fea2882c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -3489,23 +3489,6 @@ LogicalResult mlir::cir::CopyOp::verify() { return mlir::success(); } -//===----------------------------------------------------------------------===// -// MemCpyOp Definitions -//===----------------------------------------------------------------------===// - -LogicalResult mlir::cir::MemCpyOp::verify() { - auto voidPtr = - cir::PointerType::get(getContext(), cir::VoidType::get(getContext())); - - if (!getLenTy().isUnsigned()) - return emitError() << "memcpy length must be an unsigned integer"; - - if (getSrcTy() != voidPtr || getDstTy() != voidPtr) - return emitError() << "memcpy src and dst must be void pointers"; - - return mlir::success(); -} - //===----------------------------------------------------------------------===// // GetMemberOp Definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7193e410e298..c8ae9d4e107a 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -713,6 +713,20 @@ class CIRMemCpyOpLowering return mlir::success(); } }; +class CIRMemMoveOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::MemMoveOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLen(), + /*isVolatile=*/false); + return mlir::success(); + } +}; static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter, mlir::Value llvmSrc, mlir::Type llvmDstIntTy, @@ -4206,7 +4220,8 @@ void populateCIRToLLVMConversionPatterns( CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, CIRFreeExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, CIRBaseClassAddrOpLowering, - CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering, CIRAbsOpLowering + CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering, CIRAbsOpLowering, + CIRMemMoveOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/test/CIR/CodeGen/libc.c b/clang/test/CIR/CodeGen/libc.c index b5bea609b5b2..d1246d3a1447 100644 --- a/clang/test/CIR/CodeGen/libc.c +++ b/clang/test/CIR/CodeGen/libc.c @@ -15,6 +15,14 @@ void testMemcpy(void *dst, const void *src, unsigned long size) { // CHECK: cir.libc.memcpy %{{.+}} bytes from %{{.+}} to %{{.+}} : !u64i, !cir.ptr -> !cir.ptr } +// Should generate CIR's builtin memmove op. +void *memmove(void *, const void *, unsigned long); +void testMemmove(void *src, const void *dst, unsigned long size) { + memmove(dst, src, size); + // CHECK: cir.libc.memmove %{{.+}} bytes from %{{.+}} to %{{.+}} : !cir.ptr, !u64i + // LLVM: call void @llvm.memmove.{{.+}}.i64(ptr %{{.+}}, ptr %{{.+}}, i64 %{{.+}}, i1 false), +} + double fabs(double); double testFabs(double x) { return fabs(x); diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 215cf1d870f1..f3fe81db5dd9 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -715,7 +715,7 @@ module { module { // Should not memcpy with invalid length type. cir.func @invalid_memcpy_len(%arg0 : !cir.ptr, %arg1 : !s8i) { - // expected-error@+1 {{memcpy length must be an unsigned integer}} + // expected-error@+1 {{'cir.libc.memcpy' op operand #2 must be primitive unsigned int, but got '!cir.int'}} cir.libc.memcpy %arg1 bytes from %arg0 to %arg0 : !s8i, !cir.ptr -> !cir.ptr cir.return } @@ -727,13 +727,26 @@ module { !u32i = !cir.int module { // Should not memcpy non-void pointers. - cir.func @invalid_memcpy_len(%arg0 : !cir.ptr, %arg1 : !u32i) { - // expected-error@+1 {{memcpy src and dst must be void pointers}} + cir.func @invalid_memcpy_pointer_0(%arg0 : !cir.ptr, %arg1 : !u32i) { + // expected-error@+1 {{'cir.libc.memcpy' op operand #0 must be void*, but got '!cir.ptr>'}} cir.libc.memcpy %arg1 bytes from %arg0 to %arg0 : !u32i, !cir.ptr -> !cir.ptr cir.return } } +// ----- + +!s8i = !cir.int +!u32i = !cir.int +module { + // Should not memcpy non-void pointers. + cir.func @invalid_memcpy_pointer_1(%arg0 : !cir.ptr, %arg1 : !cir.ptr, %arg2 : !u32i) { + // expected-error@+1 {{'cir.libc.memcpy' op operand #1 must be void*, but got '!cir.ptr>'}} + cir.libc.memcpy %arg2 bytes from %arg1 to %arg0 : !u32i, !cir.ptr -> !cir.ptr + cir.return + } +} + // ----- !s8i = !cir.int !ty_Init = !cir.struct From b1a6b7b97e8b84e6e2077c54356c807e7f436c30 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 1 Nov 2024 00:05:45 +0300 Subject: [PATCH 2004/2301] [CIR][ABI][AArch64] support for return struct types greater than 128 bits (#1027) This PR adds a support for the return values of struct types > 128 bits in size. As usually, lot's of copy-pasted lines from the original codegen, except the `AllocaOp` replacement for the return value. --- clang/include/clang/CIR/ABIArgInfo.h | 40 ++++++++++++++++ clang/include/clang/CIR/MissingFeatures.h | 1 + .../Transforms/TargetLowering/ABIInfo.cpp | 7 +++ .../Transforms/TargetLowering/ABIInfo.h | 4 ++ .../TargetLowering/CIRToCIRArgMapping.h | 19 +++++++- .../Transforms/TargetLowering/LowerCall.cpp | 2 + .../TargetLowering/LowerFunction.cpp | 48 ++++++++++++++++++- .../Transforms/TargetLowering/LowerTypes.cpp | 7 ++- .../TargetLowering/Targets/AArch64.cpp | 2 +- .../AArch64/aarch64-cc-structs.c | 13 +++++ 10 files changed, 138 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/ABIArgInfo.h b/clang/include/clang/CIR/ABIArgInfo.h index d330b2c3e24d..582feae157b2 100644 --- a/clang/include/clang/CIR/ABIArgInfo.h +++ b/clang/include/clang/CIR/ABIArgInfo.h @@ -103,6 +103,9 @@ class ABIArgInfo { bool InReg : 1; // isDirect() || isExtend() || isIndirect() bool CanBeFlattened : 1; // isDirect() bool SignExt : 1; // isExtend() + bool IndirectByVal : 1; // isIndirect() + bool IndirectRealign : 1; // isIndirect() + bool SRetAfterThis : 1; // isIndirect() bool canHavePaddingType() const { return isDirect() || isExtend() || isIndirect() || isIndirectAliased() || @@ -195,6 +198,43 @@ class ABIArgInfo { static ABIArgInfo getIgnore() { return ABIArgInfo(Ignore); } + static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true, + bool Realign = false, + mlir::Type Padding = nullptr) { + auto AI = ABIArgInfo(Indirect); + AI.setIndirectAlign(Alignment); + AI.setIndirectByVal(ByVal); + AI.setIndirectRealign(Realign); + AI.setSRetAfterThis(false); + AI.setPaddingType(Padding); + return AI; + } + + void setIndirectAlign(unsigned align) { + assert((isIndirect() || isIndirectAliased()) && "Invalid kind!"); + IndirectAttr.Align = align; + } + + void setIndirectByVal(bool IBV) { + assert(isIndirect() && "Invalid kind!"); + IndirectByVal = IBV; + } + + void setIndirectRealign(bool IR) { + assert((isIndirect() || isIndirectAliased()) && "Invalid kind!"); + IndirectRealign = IR; + } + + void setSRetAfterThis(bool AfterThis) { + assert(isIndirect() && "Invalid kind!"); + SRetAfterThis = AfterThis; + } + + bool isSRetAfterThis() const { + assert(isIndirect() && "Invalid kind!"); + return SRetAfterThis; + } + Kind getKind() const { return TheKind; } bool isDirect() const { return TheKind == Direct; } bool isInAlloca() const { return TheKind == InAlloca; } diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 91c87dcc04a3..9b1eb419f3e0 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -268,6 +268,7 @@ struct MissingFeatures { static bool ABIParameterCoercion() { return false; } static bool ABIPointerParameterAttrs() { return false; } static bool ABITransparentUnionHandling() { return false; } + static bool ABIPotentialArgAccess() { return false; } //-- Missing AST queries diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index f5cb64059d32..7ff24be12b35 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -42,5 +42,12 @@ bool ABIInfo::isPromotableIntegerTypeForABI(Type Ty) const { return false; } +::cir::ABIArgInfo ABIInfo::getNaturalAlignIndirect(mlir::Type Ty, bool ByVal, + bool Realign, + mlir::Type Padding) const { + return ::cir::ABIArgInfo::getIndirect(getContext().getTypeAlign(Ty), ByVal, + Realign, Padding); +} + } // namespace cir } // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h index bbcd906e849a..0b67d84570ea 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h @@ -50,6 +50,10 @@ class ABIInfo { // Implement the Type::IsPromotableIntegerType for ABI specific needs. The // only difference is that this considers bit-precise integer types as well. bool isPromotableIntegerTypeForABI(Type Ty) const; + + ::cir::ABIArgInfo getNaturalAlignIndirect(mlir::Type Ty, bool ByVal = true, + bool Realign = false, + mlir::Type Padding = {}) const; }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index 139f279385e6..05c853b875c4 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -29,6 +29,7 @@ namespace cir { /// LoweringFunctionInfo should be passed to actual CIR function. class CIRToCIRArgMapping { static const unsigned InvalidIndex = ~0U; + unsigned SRetArgNo; unsigned TotalIRArgs; /// Arguments of CIR function corresponding to single CIR argument. @@ -51,7 +52,8 @@ class CIRToCIRArgMapping { public: CIRToCIRArgMapping(const CIRLowerContext &context, const LowerFunctionInfo &FI, bool onlyRequiredArgs = false) - : ArgInfo(onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { + : SRetArgNo(InvalidIndex), + ArgInfo(onlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { construct(context, FI, onlyRequiredArgs); }; @@ -69,7 +71,8 @@ class CIRToCIRArgMapping { const ::cir::ABIArgInfo &RetAI = FI.getReturnInfo(); if (RetAI.getKind() == ::cir::ABIArgInfo::Indirect) { - cir_cconv_unreachable("NYI"); + SwapThisWithSRet = RetAI.isSRetAfterThis(); + SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; } unsigned ArgNo = 0; @@ -100,6 +103,11 @@ class CIRToCIRArgMapping { } break; } + case ::cir::ABIArgInfo::Indirect: + case ::cir::ABIArgInfo::IndirectAliased: + IRArgs.NumberOfArgs = 1; + break; + default: cir_cconv_unreachable("Missing ABIArgInfo::Kind"); } @@ -130,6 +138,13 @@ class CIRToCIRArgMapping { return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, ArgInfo[ArgNo].NumberOfArgs); } + + bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } + + unsigned getSRetArgNo() const { + assert(hasSRetArg()); + return SRetArgNo; + } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index 1f59e5094d18..54fe89838e82 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -157,6 +157,8 @@ void LowerModule::constructAttributeList(StringRef Name, cir_cconv_assert(!::cir::MissingFeatures::noFPClass()); break; case ABIArgInfo::Ignore: + case ABIArgInfo::Indirect: + cir_cconv_assert(!::cir::MissingFeatures::ABIPotentialArgAccess()); break; default: cir_cconv_unreachable("Missing ABIArgInfo::Kind"); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index fee7a752d7fb..f36522b187fc 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -10,7 +10,6 @@ // are adapted to operate on the CIR dialect, however. // //===----------------------------------------------------------------------===// - #include "LowerFunction.h" #include "CIRToCIRArgMapping.h" #include "LowerCall.h" @@ -433,6 +432,23 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, return success(); } +mlir::cir::AllocaOp findAlloca(Operation *op) { + if (!op) + return {}; + + if (auto al = dyn_cast(op)) { + return al; + } else if (auto ret = dyn_cast(op)) { + auto vals = ret.getInput(); + if (vals.size() == 1) + return findAlloca(vals[0].getDefiningOp()); + } else if (auto load = dyn_cast(op)) { + return findAlloca(load.getAddr().getDefiningOp()); + } + + return {}; +} + LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { // NOTE(cir): no-return, naked, and no result functions should be handled in // CIRGen. @@ -446,6 +462,27 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { case ABIArgInfo::Ignore: break; + case ABIArgInfo::Indirect: { + Value RVAddr = {}; + CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), FI, true); + if (IRFunctionArgs.hasSRetArg()) { + auto &entry = NewFn.getBody().front(); + RVAddr = entry.getArgument(IRFunctionArgs.getSRetArgNo()); + } + + if (RVAddr) { + mlir::PatternRewriter::InsertionGuard guard(rewriter); + NewFn->walk([&](ReturnOp ret) { + if (auto al = findAlloca(ret)) { + rewriter.replaceAllUsesWith(al.getResult(), RVAddr); + rewriter.eraseOp(al); + rewriter.replaceOpWithNewOp(ret); + } + }); + } + break; + } + case ABIArgInfo::Extend: case ABIArgInfo::Direct: // FIXME(cir): Should we call ConvertType(RetTy) here? @@ -517,6 +554,15 @@ LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, Block *srcBlock = &oldFn.getBody().front(); Block *dstBlock = &newFn.getBody().front(); + // Ensure both blocks have the same number of arguments in order to + // safely merge them. + CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), FnInfo, true); + if (IRFunctionArgs.hasSRetArg()) { + auto dstIndex = IRFunctionArgs.getSRetArgNo(); + auto retArg = dstBlock->getArguments()[dstIndex]; + srcBlock->insertArgument(dstIndex, retArg.getType(), retArg.getLoc()); + } + // Migrate function body to new ABI-aware function. rewriter.inlineRegionBefore(oldFn.getBody(), newFn.getBody(), newFn.getBody().end()); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index ea9f51f002f6..8ed553a8f7d2 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -50,6 +50,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { resultType = retAI.getCoerceToType(); break; case ::cir::ABIArgInfo::Ignore: + case ::cir::ABIArgInfo::Indirect: resultType = VoidType::get(getMLIRContext()); break; default: @@ -60,7 +61,11 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); // Add type for sret argument. - cir_cconv_assert(!::cir::MissingFeatures::sretArgs()); + if (IRFunctionArgs.hasSRetArg()) { + mlir::Type ret = FI.getReturnType(); + ArgTypes[IRFunctionArgs.getSRetArgNo()] = + mlir::cir::PointerType::get(getMLIRContext(), ret); + } // Add type for inalloca argument. cir_cconv_assert(!::cir::MissingFeatures::inallocaArgs()); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index 586f4a3d22e1..b4e02d8e08fb 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -137,7 +137,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, cir_cconv_unreachable("NYI"); } - cir_cconv_unreachable("NYI"); + return getNaturalAlignIndirect(RetTy); } ABIArgInfo diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index 884580305282..649811a2265a 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -21,6 +21,12 @@ typedef struct { int64_t b; } EQ_128; +typedef struct { + int64_t a; + int64_t b; + int64_t c; +} GT_128; + // CHECK: cir.func {{.*@ret_lt_64}}() -> !u16i // CHECK: %[[#V0:]] = cir.alloca !ty_LT_64_, !cir.ptr, ["__retval"] // CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr @@ -60,3 +66,10 @@ EQ_128 ret_eq_128() { EQ_128 x; return x; } + +// CHECK: cir.func {{.*@ret_gt_128}}(%arg0: !cir.ptr +// CHECK-NOT: cir.return {{%.*}} +GT_128 ret_gt_128() { + GT_128 x; + return x; +} From 456e7b92497f3810921f80c6fc0e82c2c52da90e Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 1 Nov 2024 00:21:40 +0300 Subject: [PATCH 2005/2301] [CIR][ABI][Lowering] Supports call by function pointer in the calling convention lowering pass (#1034) This PR adds a support for calls by function pointers. @sitio-couto I think would be great if you'll also take a look --- .../Dialect/Transforms/CallConvLowering.cpp | 2 +- .../TargetLowering/LowerFunction.cpp | 31 ++++++++++++++++--- .../Transforms/TargetLowering/LowerModule.h | 2 +- .../test/CIR/CallConvLowering/x86_64/fptrs.c | 29 +++++++++++++++++ 4 files changed, 57 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index 9cdd734ef2a5..f29c67210288 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -103,7 +103,7 @@ struct CallConvLowering { rewriter.setInsertionPoint(op); auto typ = op.getIndirectCall().getType(); if (isFuncPointerTy(typ)) { - cir_cconv_unreachable("Indirect calls NYI"); + lowerModule->rewriteFunctionCall(op); } } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index f36522b187fc..a575e1ee2161 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -630,8 +630,16 @@ LogicalResult LowerFunction::rewriteCallOp(CallOp op, // NOTE(cir): There is no direct way to fetch the function type from the // CallOp, so we fetch it from the source function. This assumes the // function definition has not yet been lowered. - cir_cconv_assert(SrcFn && "No source function"); - auto fnType = SrcFn.getFunctionType(); + + FuncType fnType; + if (SrcFn) { + fnType = SrcFn.getFunctionType(); + } else if (op.isIndirect()) { + if (auto ptrTy = dyn_cast(op.getIndirectCall().getType())) + fnType = dyn_cast(ptrTy.getPointee()); + } + + cir_cconv_assert(fnType && "No callee function type"); // Rewrite the call operation to abide to the ABI calling convention. auto Ret = rewriteCallOp(fnType, SrcFn, op, retValSlot); @@ -687,7 +695,7 @@ Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, // // Chain calls use this same code path to add the invisible chain parameter // to the function type. - if (origCallee.getNoProto() || Chain) { + if ((origCallee && origCallee.getNoProto()) || Chain) { cir_cconv_assert_or_abort(::cir::MissingFeatures::ABINoProtoFunctions(), "NYI"); } @@ -870,8 +878,21 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // NOTE(cir): We don't know if the callee was already lowered, so we only // fetch the name from the callee, while the return type is fetch from the // lowering types manager. - CallOp newCallOp = rewriter.create( - loc, Caller.getCalleeAttr(), IRFuncTy.getReturnType(), IRCallArgs); + + CallOp newCallOp; + + if (Caller.isIndirect()) { + rewriter.setInsertionPoint(Caller); + auto val = Caller.getIndirectCall(); + auto ptrTy = PointerType::get(val.getContext(), IRFuncTy); + auto callee = + rewriter.create(val.getLoc(), ptrTy, CastKind::bitcast, val); + newCallOp = rewriter.create(loc, callee, IRFuncTy, IRCallArgs); + } else { + newCallOp = rewriter.create(loc, Caller.getCalleeAttr(), + IRFuncTy.getReturnType(), IRCallArgs); + } + auto extraAttrs = rewriter.getAttr(rewriter.getDictionaryAttr({})); newCallOp->setAttr("extra_attrs", extraAttrs); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index 2d5e928e93f3..07e74fe4dc51 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -96,7 +96,7 @@ class LowerModule { LogicalResult rewriteFunctionDefinition(FuncOp op); // Rewrite CIR CallOp to match the target ABI. - LogicalResult rewriteFunctionCall(CallOp callOp, FuncOp funcOp); + LogicalResult rewriteFunctionCall(CallOp callOp, FuncOp funcOp = {}); }; std::unique_ptr createLowerModule(ModuleOp module, diff --git a/clang/test/CIR/CallConvLowering/x86_64/fptrs.c b/clang/test/CIR/CallConvLowering/x86_64/fptrs.c index 47111165d049..f2a7538919c2 100644 --- a/clang/test/CIR/CallConvLowering/x86_64/fptrs.c +++ b/clang/test/CIR/CallConvLowering/x86_64/fptrs.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat -fclangir-call-conv-lowering %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fclangir-call-conv-lowering %s -o -| FileCheck %s -check-prefix=LLVM typedef struct { int a; @@ -16,3 +17,31 @@ int foo(S s) { return 42 + s.a; } void bar() { myfptr a = foo; } + +// CHECK: cir.func {{.*@baz}}(%arg0: !s32i +// CHECK: %[[#V0:]] = cir.alloca !ty_S, !cir.ptr, [""] {alignment = 4 : i64} +// CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: cir.store %arg0, %[[#V1]] : !s32i, !cir.ptr +// CHECK: %[[#V2:]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["a", init] +// CHECK: %[[#V3:]] = cir.get_global @foo : !cir.ptr> +// CHECK: %[[#V4:]] = cir.cast(bitcast, %[[#V3]] : !cir.ptr>), !cir.ptr> +// CHECK: cir.store %[[#V4]], %[[#V2]] : !cir.ptr>, !cir.ptr>> +// CHECK: %[[#V5:]] = cir.load %[[#V2]] : !cir.ptr>>, !cir.ptr> +// CHECK: %[[#V6:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V7:]] = cir.load %[[#V6]] : !cir.ptr, !s32i +// CHECK: %[[#V8:]] = cir.cast(bitcast, %[[#V5]] : !cir.ptr>), !cir.ptr> +// CHECK: %[[#V9:]] = cir.call %[[#V8]](%[[#V7]]) : (!cir.ptr>, !s32i) -> !s32i + +// LLVM: define dso_local void @baz(i32 %0) +// LLVM: %[[#V1:]] = alloca %struct.S, i64 1 +// LLVM: store i32 %0, ptr %[[#V1]] +// LLVM: %[[#V2:]] = alloca ptr, i64 1 +// LLVM: store ptr @foo, ptr %[[#V2]] +// LLVM: %[[#V3:]] = load ptr, ptr %[[#V2]] +// LLVM: %[[#V4:]] = load i32, ptr %[[#V1]] +// LLVM: %[[#V5:]] = call i32 %[[#V3]](i32 %[[#V4]]) + +void baz(S s) { + myfptr a = foo; + a(s); +} From 1ced7fdcc8d5f4dada769373f1f575792bd01ede Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 31 Oct 2024 17:24:44 -0400 Subject: [PATCH 2006/2301] [CIR][CIRGen][Builtin] Support BI__builtin_operator_new and BI__builtin_operator_delete (#1035) The added test cases are from [OG's counterpart](https://github.com/llvm/clangir/blob/f9c5477ee10c9bc005ffbfe698691cc02193ea81/clang/test/CodeGenCXX/builtin-operator-new-delete.cpp#L7). I changed run option to -std=c++17 to support [std::align_val_t](https://en.cppreference.com/w/cpp/memory/new/align_val_t) --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 7 +++- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 17 +++++++++ clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 ++ clang/test/CIR/CodeGen/new-null.cpp | 49 +++++++++++++++++++++++-- 4 files changed, 71 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index cf72072e1fb0..c97f0e382cc1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1838,9 +1838,12 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_function_start: llvm_unreachable("BI__builtin_function_start NYI"); case Builtin::BI__builtin_operator_new: - llvm_unreachable("BI__builtin_operator_new NYI"); + return buildBuiltinNewDeleteCall( + E->getCallee()->getType()->castAs(), E, false); case Builtin::BI__builtin_operator_delete: - llvm_unreachable("BI__builtin_operator_delete NYI"); + buildBuiltinNewDeleteCall( + E->getCallee()->getType()->castAs(), E, true); + return RValue::get(nullptr); case Builtin::BI__builtin_is_aligned: llvm_unreachable("BI__builtin_is_aligned NYI"); case Builtin::BI__builtin_align_up: diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 4bbb3a7ed23f..860c81cf2a9b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -1175,6 +1175,23 @@ static RValue buildNewDeleteCall(CIRGenFunction &CGF, return RV; } +RValue CIRGenFunction::buildBuiltinNewDeleteCall(const FunctionProtoType *type, + const CallExpr *theCall, + bool isDelete) { + CallArgList args; + buildCallArgs(args, type, theCall->arguments()); + // Find the allocation or deallocation function that we're calling. + ASTContext &ctx = getContext(); + DeclarationName name = + ctx.DeclarationNames.getCXXOperatorName(isDelete ? OO_Delete : OO_New); + + for (auto *decl : ctx.getTranslationUnitDecl()->lookup(name)) + if (auto *fd = dyn_cast(decl)) + if (ctx.hasSameType(fd->getType(), QualType(type, 0))) + return buildNewDeleteCall(*this, fd, type, args); + llvm_unreachable("predeclared global operator new/delete is missing"); +} + void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, mlir::Value Ptr, QualType DeleteTy, mlir::Value NumElements, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9da7b671bbb1..7623b52c3695 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -729,6 +729,9 @@ class CIRGenFunction : public CIRGenTypeCache { QualType DeleteTy, mlir::Value NumElements = nullptr, CharUnits CookieSize = CharUnits()); + RValue buildBuiltinNewDeleteCall(const FunctionProtoType *type, + const CallExpr *theCallExpr, bool isDelete); + mlir::Value buildDynamicCast(Address ThisAddr, const CXXDynamicCastExpr *DCE); mlir::Value createLoad(const clang::VarDecl *VD, const char *Name); diff --git a/clang/test/CIR/CodeGen/new-null.cpp b/clang/test/CIR/CodeGen/new-null.cpp index e0fdc7484c1b..773e2c63f85d 100644 --- a/clang/test/CIR/CodeGen/new-null.cpp +++ b/clang/test/CIR/CodeGen/new-null.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-linux-gnu %s -fclangir -emit-cir -o %t.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu %s -fclangir -emit-cir -o %t.cir // RUN: FileCheck --input-file=%t.cir -check-prefix=CIR %s -// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-linux-gnu %s -fclangir -emit-llvm -o %t.ll +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu %s -fclangir -emit-llvm -o %t.ll // RUN: FileCheck --input-file=%t.ll -check-prefix=LLVM %s // TODO: This file is inspired by clang/test/CodeGenCXX/new.cpp, add all tests from it. @@ -16,6 +16,8 @@ void *operator new[](size_t); namespace std { struct nothrow_t {}; + enum class align_val_t : size_t { __zero = 0, + __max = (size_t)-1 }; } std::nothrow_t nothrow; @@ -76,4 +78,45 @@ namespace test15 { void test0b(void *p) { new (p, true) A(); } -} \ No newline at end of file +} + +extern "C" void test_basic() { + __builtin_operator_delete(__builtin_operator_new(4)); + // CIR-LABEL: cir.func @test_basic + // CIR: [[P:%.*]] = cir.call @_Znwm({{%.*}}) : (!u64i) -> !cir.ptr + // CIR: cir.call @_ZdlPv([[P]]) : (!cir.ptr) -> () + // CIR: cir.return + + // LLVM-LABEL: define{{.*}} void @test_basic() + // LLVM: [[P:%.*]] = call ptr @_Znwm(i64 4) + // LLVM: call void @_ZdlPv(ptr [[P]]) + // LLVM: ret void +} + +extern "C" void test_aligned_alloc() { + __builtin_operator_delete(__builtin_operator_new(4, std::align_val_t(4)), std::align_val_t(4)); + + // CIR-LABEL: cir.func @test_aligned_alloc + // CIR: [[P:%.*]] = cir.call @_ZnwmSt11align_val_t({{%.*}}, {{%.*}}) : (!u64i, !u64i) -> !cir.ptr + // CIR: cir.call @_ZdlPvSt11align_val_t([[P]], {{%.*}}) : (!cir.ptr, !u64i) -> () + // CIR: cir.return + + // LLVM-LABEL: define{{.*}} void @test_aligned_alloc() + // LLVM: [[P:%.*]] = call ptr @_ZnwmSt11align_val_t(i64 4, i64 4) + // LLVM: call void @_ZdlPvSt11align_val_t(ptr [[P]], i64 4) + // LLVM: ret void +} + +extern "C" void test_sized_delete() { + __builtin_operator_delete(__builtin_operator_new(4), 4); + + // CIR-LABEL: cir.func @test_sized_delete + // CIR: [[P:%.*]] = cir.call @_Znwm({{%.*}}) : (!u64i) -> !cir.ptr + // CIR: cir.call @_ZdlPvm([[P]], {{%.*}}) : (!cir.ptr, !u64i) -> () + // CIR: cir.return + + // LLVM-LABEL: define{{.*}} void @test_sized_delete() + // LLVM: [[P:%.*]] = call ptr @_Znwm(i64 4) + // LLVM: call void @_ZdlPvm(ptr [[P]], i64 4) + // LLVM: ret void +} From 397f98515ed649d9e1e5ae05a1ffb2a0cf14d464 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 1 Nov 2024 05:26:23 +0800 Subject: [PATCH 2007/2301] [CIR][ABI][Lowering] Add CCLower support for int128 on x86_64 (#1036) This PR adds calling convention lowering support for the int128 type on x86_64. This is a follow up on #953 . --- .../TargetLowering/CIRToCIRArgMapping.h | 2 +- .../TargetLowering/LowerFunction.cpp | 52 ++++++++++++- .../Transforms/TargetLowering/Targets/X86.cpp | 74 ++++++++++++++++++- .../CIR/CallConvLowering/x86_64/int128.cpp | 54 ++++++++++++++ 4 files changed, 175 insertions(+), 7 deletions(-) create mode 100644 clang/test/CIR/CallConvLowering/x86_64/int128.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index 05c853b875c4..12b45a56e881 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -97,7 +97,7 @@ class CIRToCIRArgMapping { cir_cconv_assert(AI.getCoerceToType() && "Missing coerced type!!"); StructType STy = dyn_cast(AI.getCoerceToType()); if (AI.isDirect() && AI.getCanBeFlattened() && STy) { - cir_cconv_unreachable("NYI"); + IRArgs.NumberOfArgs = STy.getNumElements(); } else { IRArgs.NumberOfArgs = 1; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index a575e1ee2161..440a0a129108 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -369,6 +369,12 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, cir_cconv_assert(!::cir::MissingFeatures::vectorType()); + StructType STy = dyn_cast(ArgI.getCoerceToType()); + if (ArgI.isDirect() && !ArgI.getCanBeFlattened() && STy && + STy.getNumElements() > 1) { + cir_cconv_unreachable("NYI"); + } + // Allocate original argument to be "uncoerced". // FIXME(cir): We should have a alloca op builder that does not required // the pointer type to be explicitly passed. @@ -383,10 +389,45 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. - StructType STy = dyn_cast(ArgI.getCoerceToType()); if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && STy.getNumElements() > 1) { - cir_cconv_unreachable("NYI"); + auto ptrType = cast(Ptr.getType()); + llvm::TypeSize structSize = + LM.getTypes().getDataLayout().getTypeAllocSize(STy); + llvm::TypeSize ptrElementSize = + LM.getTypes().getDataLayout().getTypeAllocSize( + ptrType.getPointee()); + if (structSize.isScalable()) { + cir_cconv_unreachable("NYI"); + } else { + uint64_t srcSize = structSize.getFixedValue(); + uint64_t dstSize = ptrElementSize.getFixedValue(); + + Value addrToStoreInto; + if (srcSize <= dstSize) { + addrToStoreInto = rewriter.create( + Ptr.getLoc(), PointerType::get(STy, ptrType.getAddrSpace()), + CastKind::bitcast, Ptr); + } else { + cir_cconv_unreachable("NYI"); + } + + assert(STy.getNumElements() == NumIRArgs); + for (unsigned i = 0, e = STy.getNumElements(); i != e; ++i) { + Value ai = Fn.getArgument(FirstIRArg + i); + Type elementTy = STy.getMembers()[i]; + Value eltPtr = rewriter.create( + ai.getLoc(), + PointerType::get(elementTy, ptrType.getAddrSpace()), + addrToStoreInto, + /*name=*/"", /*index=*/i); + rewriter.create(ai.getLoc(), ai, eltPtr); + } + + if (srcSize > dstSize) { + cir_cconv_unreachable("NYI"); + } + } } else { // Simple case, just do a coerced store of the argument into the alloca. cir_cconv_assert(NumIRArgs == 1); @@ -567,8 +608,13 @@ LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, rewriter.inlineRegionBefore(oldFn.getBody(), newFn.getBody(), newFn.getBody().end()); + // The block arguments of srcBlock are the old function's arguments. At this + // point, all old arguments should be replaced with the lowered values. + // Thus we could safely remove all the block arguments on srcBlock here. + srcBlock->eraseArguments(0, srcBlock->getNumArguments()); + // Merge entry blocks to ensure correct branching. - rewriter.mergeBlocks(srcBlock, dstBlock, newFn.getArguments()); + rewriter.mergeBlocks(srcBlock, dstBlock); // FIXME(cir): What about saving parameters for corotines? Should we do // something about it in this pass? If the change with the calling diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 60ec92ca230c..b50702a5ee68 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -234,7 +234,12 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, if (/*isBuitinType=*/true) { if (isa(Ty)) { Current = Class::NoClass; - } else if (isa(Ty)) { + } else if (auto IntTy = dyn_cast(Ty)) { + if (IntTy.getWidth() == 128) { + Lo = Class::Integer; + Hi = Class::Integer; + return; + } // FIXME(cir): Clang's BuiltinType::Kind allow comparisons (GT, LT, etc). // We should implement this in CIR to simplify the conditions below. @@ -456,6 +461,50 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, std::min(TySizeInBytes - SourceOffset, 8U) * 8, isSigned); } +/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally +/// be used as elements of a two register pair to pass or return, return a +/// first class aggregate to represent them. For example, if the low part of +/// a by-value argument should be passed as i32* and the high part as float, +/// return {i32*, float}. +static mlir::Type GetX86_64ByValArgumentPair(mlir::Type lo, mlir::Type hi, + const ::cir::CIRDataLayout &td) { + // In order to correctly satisfy the ABI, we need to the high part to start + // at offset 8. If the high and low parts we inferred are both 4-byte types + // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have + // the second element at offset 8. Check for this: + unsigned loSize = (unsigned)td.getTypeAllocSize(lo); + llvm::Align highAlign = td.getABITypeAlign(hi); + unsigned highStart = llvm::alignTo(loSize, highAlign); + assert(highStart != 0 && highStart <= 8 && "Invalid x86-64 argument pair!"); + + // To handle this, we have to increase the size of the low part so that the + // second element will start at an 8 byte offset. We can't increase the size + // of the second element because it might make us access off the end of the + // struct. + if (highStart != 8) { + // There are usually two sorts of types the ABI generation code can produce + // for the low part of a pair that aren't 8 bytes in size: half, float or + // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and + // NaCl). + // Promote these to a larger type. + if (isa(lo)) + lo = DoubleType::get(lo.getContext()); + else { + assert((isa(lo)) && "Invalid/unknown lo type"); + // TODO(cir): does the sign of the int64 type matter here? + lo = IntType::get(lo.getContext(), 64, true); + } + } + + auto result = StructType::get(lo.getContext(), {lo, hi}, /*packed=*/false, + StructType::Struct); + + // Verify that the second element is at an 8-byte offset. + assert(td.getStructLayout(result)->getElementOffset(1) == 8 && + "Invalid x86-64 argument pair!"); + return result; +} + ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the // classification algorithm. @@ -507,6 +556,12 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { case Class::NoClass: break; + case Class::Integer: + HighPart = GetINTEGERTypeAtOffset(RetTy, 8, RetTy, 8); + if (Lo == Class::NoClass) // Return HighPart at offset 8 in memory. + return ABIArgInfo::getDirect(HighPart, 8); + break; + default: cir_cconv_unreachable("NYI"); } @@ -515,7 +570,7 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { // known to pass in the high eightbyte of the result. We do this by forming // a first class struct aggregate with the high and low part: {low, high} if (HighPart) - cir_cconv_unreachable("NYI"); + resType = GetX86_64ByValArgumentPair(resType, HighPart, getDataLayout()); return ABIArgInfo::getDirect(resType); } @@ -580,12 +635,25 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, switch (Hi) { case Class::NoClass: break; + + case Class::Integer: + ++neededInt; + // Pick an 8-byte type based on the preferred type. + HighPart = GetINTEGERTypeAtOffset(Ty, 8, Ty, 8); + + if (Lo == Class::NoClass) // Pass HighPart at offset 8 in memory. + return ABIArgInfo::getDirect(HighPart, 8); + break; + default: cir_cconv_unreachable("NYI"); } + // If a high part was specified, merge it together with the low part. It is + // known to pass in the high eightbyte of the result. We do this by forming a + // first class struct aggregate with the high and low part: {low, high} if (HighPart) - cir_cconv_unreachable("NYI"); + ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); return ABIArgInfo::getDirect(ResType); } diff --git a/clang/test/CIR/CallConvLowering/x86_64/int128.cpp b/clang/test/CIR/CallConvLowering/x86_64/int128.cpp new file mode 100644 index 000000000000..a8ca701331f7 --- /dev/null +++ b/clang/test/CIR/CallConvLowering/x86_64/int128.cpp @@ -0,0 +1,54 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM + +// CHECK: ![[I128_STRUCT:.+]] = !cir.struct + +// CHECK: @_Z5test1nn(%[[ARG0:.+]]: !s64i loc({{.+}}), %[[ARG1:.+]]: !s64i loc({{.+}}), %[[ARG2:.+]]: !s64i loc({{.+}}), %[[ARG3:.+]]: !s64i loc({{.+}})) -> ![[I128_STRUCT]] +// LLVM: define dso_local { i64, i64 } @_Z5test1nn(i64 %[[#A_LO:]], i64 %[[#A_HI:]], i64 %[[#B_LO:]], i64 %[[#B_HI:]]) +__int128 test1(__int128 a, __int128 b) { + // CHECK: %[[#SLOT_A:]] = cir.alloca !s128i, !cir.ptr + // CHECK-NEXT: %[[#SLOT_A2:]] = cir.cast(bitcast, %[[#SLOT_A]] : !cir.ptr), !cir.ptr + // CHECK-NEXT: %[[#SLOT_A_LO:]] = cir.get_member %[[#SLOT_A2]][0] {name = ""} : !cir.ptr -> !cir.ptr + // CHECK-NEXT: cir.store %[[ARG0]], %[[#SLOT_A_LO]] : !s64i, !cir.ptr + // CHECK-NEXT: %[[#SLOT_A_HI:]] = cir.get_member %[[#SLOT_A2]][1] {name = ""} : !cir.ptr -> !cir.ptr + // CHECK-NEXT: cir.store %arg1, %[[#SLOT_A_HI]] : !s64i, !cir.ptr + // CHECK-NEXT: %[[#SLOT_B:]] = cir.alloca !s128i, !cir.ptr + // CHECK-NEXT: %[[#SLOT_B2:]] = cir.cast(bitcast, %[[#SLOT_B]] : !cir.ptr), !cir.ptr + // CHECK-NEXT: %[[#SLOT_B_LO:]] = cir.get_member %[[#SLOT_B2]][0] {name = ""} : !cir.ptr -> !cir.ptr + // CHECK-NEXT: cir.store %arg2, %[[#SLOT_B_LO]] : !s64i, !cir.ptr + // CHECK-NEXT: %[[#SLOT_B_HI:]] = cir.get_member %[[#SLOT_B2]][1] {name = ""} : !cir.ptr -> !cir.ptr + // CHECK-NEXT: cir.store %arg3, %[[#SLOT_B_HI]] : !s64i, !cir.ptr + // CHECK-NEXT: %[[#SLOT_RET:]] = cir.alloca !s128i, !cir.ptr, ["__retval"] + + // LLVM: %[[#A_SLOT:]] = alloca i128, i64 1, align 4 + // LLVM-NEXT: %[[#A_SLOT_LO:]] = getelementptr { i64, i64 }, ptr %[[#A_SLOT]], i32 0, i32 0 + // LLVM-NEXT: store i64 %[[#A_LO]], ptr %[[#A_SLOT_LO]], align 8 + // LLVM-NEXT: %[[#A_SLOT_HI:]] = getelementptr { i64, i64 }, ptr %[[#A_SLOT]], i32 0, i32 1 + // LLVM-NEXT: store i64 %[[#A_HI]], ptr %[[#A_SLOT_HI]], align 8 + // LLVM-NEXT: %[[#B_SLOT:]] = alloca i128, i64 1, align 4 + // LLVM-NEXT: %[[#B_SLOT_LO:]] = getelementptr { i64, i64 }, ptr %[[#B_SLOT]], i32 0, i32 0 + // LLVM-NEXT: store i64 %[[#B_LO]], ptr %[[#B_SLOT_LO]], align 8 + // LLVM-NEXT: %[[#B_SLOT_HI:]] = getelementptr { i64, i64 }, ptr %[[#B_SLOT]], i32 0, i32 1 + // LLVM-NEXT: store i64 %[[#B_HI]], ptr %[[#B_SLOT_HI]], align 8 + // LLVM-NEXT: %[[#RET_SLOT:]] = alloca i128, i64 1, align 16 + + return a + b; + // CHECK: %[[#A:]] = cir.load %[[#SLOT_A]] : !cir.ptr, !s128i + // CHECK-NEXT: %[[#B:]] = cir.load %[[#SLOT_B]] : !cir.ptr, !s128i + // CHECK-NEXT: %[[#SUM:]] = cir.binop(add, %[[#A]], %[[#B]]) nsw : !s128i + // CHECK-NEXT: cir.store %[[#SUM]], %[[#SLOT_RET]] : !s128i, !cir.ptr + + // LLVM: %[[#A:]] = load i128, ptr %5, align 16 + // LLVM-NEXT: %[[#B:]] = load i128, ptr %8, align 16 + // LLVM-NEXT: %[[#SUM:]] = add nsw i128 %[[#A]], %[[#B]] + // LLVM-NEXT: store i128 %[[#SUM]], ptr %[[#RET_SLOT]], align 16 + + // CHECK: %[[#SLOT_RET2:]] = cir.cast(bitcast, %[[#SLOT_RET]] : !cir.ptr), !cir.ptr + // CHECK-NEXT: %[[#RET:]] = cir.load %[[#SLOT_RET2]] : !cir.ptr, ![[I128_STRUCT]] + // CHECK-NEXT: cir.return %[[#RET]] : ![[I128_STRUCT]] + + // LLVM: %[[#RET:]] = load { i64, i64 }, ptr %[[#RET_SLOT]], align 8 + // LLVM-NEXT: ret { i64, i64 } %[[#RET]] +} From 26e5b741c0be85e2ae089a30b9048ad0880a15b0 Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 31 Oct 2024 17:26:56 -0400 Subject: [PATCH 2008/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vqshlu_n and neon_vqshluq_n (#1037) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 33 ++-- clang/test/CIR/CodeGen/AArch64/neon.c | 153 ++++++++++++------ 2 files changed, 125 insertions(+), 61 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index e920dcd2425b..a36a6c7389f1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2158,12 +2158,15 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, } } -/// Given a vector of unsigned int type `vecTy`, return a vector type of -/// signed int type with the same element type width and vector size. -static mlir::cir::VectorType getSignedVectorType(CIRGenBuilderTy &builder, - mlir::cir::VectorType vecTy) { +/// Given a vector of int type `vecTy`, return a vector type of +/// int type with the same element type width, different signedness, +/// and the same vector size. +static mlir::cir::VectorType +getSignChangedVectorType(CIRGenBuilderTy &builder, + mlir::cir::VectorType vecTy) { auto elemTy = mlir::cast(vecTy.getEltType()); - elemTy = builder.getSIntNTy(elemTy.getWidth()); + elemTy = elemTy.isSigned() ? builder.getUIntNTy(elemTy.getWidth()) + : builder.getSIntNTy(elemTy.getWidth()); return mlir::cir::VectorType::get(builder.getContext(), elemTy, vecTy.getSize()); } @@ -2352,13 +2355,25 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( : "llvm.aarch64.neon.sqrdmulh.lane", resTy, getLoc(e->getExprLoc())); } + case NEON::BI__builtin_neon_vqshlu_n_v: + case NEON::BI__builtin_neon_vqshluq_n_v: { + // These intrinsics expect signed vector type as input, but + // return unsigned vector type. + mlir::cir::VectorType srcTy = getSignChangedVectorType(builder, vTy); + return buildNeonCall( + builder, {srcTy, srcTy}, ops, "llvm.aarch64.neon.sqshlu", vTy, + getLoc(e->getExprLoc()), false, /* not fp constrained op */ + 1, /* second arg is shift amount */ + false /* leftshift */); + } case NEON::BI__builtin_neon_vrshr_n_v: case NEON::BI__builtin_neon_vrshrq_n_v: { return buildNeonCall( - builder, {vTy, isUnsigned ? getSignedVectorType(builder, vTy) : vTy}, - ops, isUnsigned ? "llvm.aarch64.neon.urshl" : "llvm.aarch64.neon.srshl", - vTy, getLoc(e->getExprLoc()), false, /* not fp constrained op*/ - 1, /* second arg is shift amount */ + builder, + {vTy, isUnsigned ? getSignChangedVectorType(builder, vTy) : vTy}, ops, + isUnsigned ? "llvm.aarch64.neon.urshl" : "llvm.aarch64.neon.srshl", vTy, + getLoc(e->getExprLoc()), false, /* not fp constrained op*/ + 1, /* second arg is shift amount */ true /* rightshift */); } case NEON::BI__builtin_neon_vshl_n_v: diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index aeb2bc808f27..e136c92c615e 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -6035,64 +6035,113 @@ uint64x2_t test_vrshrq_n_u64(uint64x2_t a) { // return vsliq_n_p16(a, b, 15); // } -// NYI-LABEL: @test_vqshlu_n_s8( -// NYI: [[VQSHLU_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8> %a, <8 x i8> ) -// NYI: ret <8 x i8> [[VQSHLU_N]] -// uint8x8_t test_vqshlu_n_s8(int8x8_t a) { -// return vqshlu_n_s8(a, 3); -// } +uint8x8_t test_vqshlu_n_s8(int8x8_t a) { + return vqshlu_n_s8(a, 3); -// NYI-LABEL: @test_vqshlu_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[VQSHLU_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[VQSHLU_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16> [[VQSHLU_N]], <4 x i16> ) -// NYI: ret <4 x i16> [[VQSHLU_N1]] -// uint16x4_t test_vqshlu_n_s16(int16x4_t a) { -// return vqshlu_n_s16(a, 3); -// } + // CIR-LABEL: vqshlu_n_s8 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vqshlu_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[VQSHLU_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[VQSHLU_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32> [[VQSHLU_N]], <2 x i32> ) -// NYI: ret <2 x i32> [[VQSHLU_N1]] -// uint32x2_t test_vqshlu_n_s32(int32x2_t a) { -// return vqshlu_n_s32(a, 3); -// } + // LLVM: {{.*}}@test_vqshlu_n_s8(<8 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VQSHLU_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8> [[A]], <8 x i8> splat (i8 3)) + // LLVM: ret <8 x i8> [[VQSHLU_N]] +} -// NYI-LABEL: @test_vqshluq_n_s8( -// NYI: [[VQSHLU_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8> %a, <16 x i8> ) -// NYI: ret <16 x i8> [[VQSHLU_N]] -// uint8x16_t test_vqshluq_n_s8(int8x16_t a) { -// return vqshluq_n_s8(a, 3); -// } +uint16x4_t test_vqshlu_n_s16(int16x4_t a) { + return vqshlu_n_s16(a, 3); -// NYI-LABEL: @test_vqshluq_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[VQSHLU_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VQSHLU_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16> [[VQSHLU_N]], <8 x i16> ) -// NYI: ret <8 x i16> [[VQSHLU_N1]] -// uint16x8_t test_vqshluq_n_s16(int16x8_t a) { -// return vqshluq_n_s16(a, 3); -// } + // CIR-LABEL: vqshlu_n_s16 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR-SAME:#cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vqshluq_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[VQSHLU_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VQSHLU_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32> [[VQSHLU_N]], <4 x i32> ) -// NYI: ret <4 x i32> [[VQSHLU_N1]] -// uint32x4_t test_vqshluq_n_s32(int32x4_t a) { -// return vqshluq_n_s32(a, 3); -// } + // LLVM: {{.*}}@test_vqshlu_n_s16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[VQSHLU_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[VQSHLU_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16> [[VQSHLU_N]], <4 x i16> splat (i16 3)) + // LLVM: ret <4 x i16> [[VQSHLU_N1]] +} -// NYI-LABEL: @test_vqshluq_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[VQSHLU_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VQSHLU_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64> [[VQSHLU_N]], <2 x i64> ) -// NYI: ret <2 x i64> [[VQSHLU_N1]] -// uint64x2_t test_vqshluq_n_s64(int64x2_t a) { -// return vqshluq_n_s64(a, 3); -// } +uint32x2_t test_vqshlu_n_s32(int32x2_t a) { + return vqshlu_n_s32(a, 3); + + // CIR-LABEL: vqshlu_n_s32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : !s32i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vqshlu_n_s32(<2 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[VQSHLU_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[VQSHLU_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32> [[VQSHLU_N]], <2 x i32> splat (i32 3)) +} + +uint8x16_t test_vqshluq_n_s8(int8x16_t a) { + return vqshluq_n_s8(a, 3); + + // CIR-LABEL: vqshluq_n_s8 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vqshluq_n_s8(<16 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VQSHLUQ_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8> [[A]], <16 x i8> splat (i8 3)) + // LLVM: ret <16 x i8> [[VQSHLUQ_N]] +} + +uint16x8_t test_vqshluq_n_s16(int16x8_t a) { + return vqshluq_n_s16(a, 3); + + // CIR-LABEL: vqshluq_n_s16 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vqshluq_n_s16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[VQSHLUQ_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VQSHLUQ_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16> [[VQSHLUQ_N]], <8 x i16> splat (i16 3)) + // LLVM: ret <8 x i16> [[VQSHLUQ_N1]] +} + +uint32x4_t test_vqshluq_n_s32(int32x4_t a) { + return vqshluq_n_s32(a, 3); + + // CIR-LABEL: vqshluq_n_s32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : !s32i, + // CIR-SAME: #cir.int<3> : !s32i, #cir.int<3> : !s32i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vqshluq_n_s32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[VQSHLUQ_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VQSHLUQ_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32> [[VQSHLUQ_N]], <4 x i32> splat (i32 3)) + // LLVM: ret <4 x i32> [[VQSHLUQ_N1]] +} + +uint64x2_t test_vqshluq_n_s64(int64x2_t a) { + return vqshluq_n_s64(a, 3); + + // CIR-LABEL: vqshluq_n_s64 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i, #cir.int<3> : !s64i]> : !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vqshluq_n_s64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VQSHLUQ_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VQSHLUQ_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64> [[VQSHLUQ_N]], <2 x i64> splat (i64 3)) + // LLVM: ret <2 x i64> [[VQSHLUQ_N1]] +} // NYI-LABEL: @test_vshrn_n_s16( // NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> From 387891537a0af98eb55788f7e62496ba830189de Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 31 Oct 2024 16:08:27 -0700 Subject: [PATCH 2009/2301] [CIR][CIRGen] Support more member init variations --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 12 ++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 10 ----- clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp | 5 +++ clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 4 ++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 7 +++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 12 ++++-- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 7 ++++ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenValue.h | 4 ++ clang/test/CIR/CodeGen/member-init-struct.cpp | 40 +++++++++++++++++++ 11 files changed, 90 insertions(+), 16 deletions(-) create mode 100644 clang/test/CIR/CodeGen/member-init-struct.cpp diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 1fa64cfe7fc6..49f1256db284 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -115,6 +115,16 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return getPointerTo(::mlir::cir::VoidType::get(getContext()), cirAS); } + mlir::cir::MethodAttr getMethodAttr(mlir::cir::MethodType ty, + mlir::cir::FuncOp methodFuncOp) { + auto methodFuncSymbolRef = mlir::FlatSymbolRefAttr::get(methodFuncOp); + return mlir::cir::MethodAttr::get(ty, methodFuncSymbolRef); + } + + mlir::cir::MethodAttr getNullMethodAttr(mlir::cir::MethodType ty) { + return mlir::cir::MethodAttr::get(ty); + } + mlir::cir::BoolAttr getCIRBoolAttr(bool state) { return mlir::cir::BoolAttr::get(getContext(), getBoolTy(), state); } @@ -142,6 +152,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return getConstNullPtrAttr(ptrTy); if (auto structTy = mlir::dyn_cast(ty)) return getZeroAttr(structTy); + if (auto methodTy = mlir::dyn_cast(ty)) + return getNullMethodAttr(methodTy); if (mlir::isa(ty)) { return getCIRBoolAttr(false); } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 68abf7c728dc..2cbefce453ab 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -251,16 +251,6 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return mlir::cir::DataMemberAttr::get(getContext(), ty, std::nullopt); } - mlir::cir::MethodAttr getMethodAttr(mlir::cir::MethodType ty, - mlir::cir::FuncOp methodFuncOp) { - auto methodFuncSymbolRef = mlir::FlatSymbolRefAttr::get(methodFuncOp); - return mlir::cir::MethodAttr::get(ty, methodFuncSymbolRef); - } - - mlir::cir::MethodAttr getNullMethodAttr(mlir::cir::MethodType ty) { - return mlir::cir::MethodAttr::get(ty); - } - // TODO(cir): Once we have CIR float types, replace this by something like a // NullableValueInterface to allow for type-independent queries. bool isNullValue(mlir::Attribute attr) const { diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp index 8f6040f8cea5..e4b38b99dbfd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp @@ -79,4 +79,9 @@ mlir::cir::GlobalLinkageKind CIRGenCXXABI::getCXXDestructorLinkage( std::vector CIRGenCXXABI::getVBPtrOffsets(const CXXRecordDecl *RD) { return std::vector(); +} + +bool CIRGenCXXABI::isZeroInitializable(const MemberPointerType *MPT) { + // Fake answer. + return true; } \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 159b0e5e8e2b..1b0dca9512d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -268,6 +268,10 @@ class CIRGenCXXABI { virtual RecordArgABI getRecordArgABI(const clang::CXXRecordDecl *RD) const = 0; + /// Return true if the given member pointer can be zero-initialized + /// (in the C++ sense) with an LLVM zeroinitializer. + virtual bool isZeroInitializable(const MemberPointerType *MPT); + /// Gets the offsets of all the virtual base pointers in a given class. virtual std::vector getVBPtrOffsets(const CXXRecordDecl *RD); diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 4554e41030e7..cc43ae370c4a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -205,7 +205,12 @@ static void buildLValueForAnyFieldInitialization(CIRGenFunction &CGF, LValue &LHS) { FieldDecl *Field = MemberInit->getAnyMember(); if (MemberInit->isIndirectMemberInitializer()) { - llvm_unreachable("NYI"); + // If we are initializing an anonymous union field, drill down to the field. + IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); + for (const auto *I : IndirectField->chain()) { + auto *fd = cast(I); + LHS = CGF.buildLValueForFieldInitialization(LHS, fd, fd->getName()); + } } else { LHS = CGF.buildLValueForFieldInitialization(LHS, Field, Field->getName()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 3ead6e02d6f5..f78fb5912723 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2649,7 +2649,7 @@ RValue CIRGenFunction::convertTempToRValue(Address addr, clang::QualType type, case TEK_Complex: llvm_unreachable("NYI"); case TEK_Aggregate: - llvm_unreachable("NYI"); + return lvalue.asAggregateRValue(); case TEK_Scalar: return RValue::get(buildLoadOfScalar(lvalue, loc)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 2658e391576e..e7456dae5530 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -282,9 +282,7 @@ class AggExprEmitter : public StmtVisitor { llvm::Value *outerBegin = nullptr) { llvm_unreachable("NYI"); } - void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { - llvm_unreachable("NYI"); - } + void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); void VisitNoInitExpr(NoInitExpr *E) { llvm_unreachable("NYI"); } void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { CIRGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); @@ -1462,6 +1460,14 @@ void AggExprEmitter::VisitCXXInheritedCtorInitExpr( E->inheritedFromVBase(), E); } +void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { + QualType T = E->getType(); + mlir::Location loc = CGF.getLoc(E->getSourceRange()); + AggValueSlot Slot = EnsureSlot(loc, T); + buildNullInitializationToLValue(loc, + CGF.makeAddrLValue(Slot.getAddress(), T)); +} + //===----------------------------------------------------------------------===// // Helpers and dispatcher //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index b3561f345b91..efe193861b08 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -122,6 +122,7 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { } bool classifyReturnType(CIRGenFunctionInfo &FI) const override; + bool isZeroInitializable(const MemberPointerType *MPT) override; AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, @@ -2633,3 +2634,9 @@ CIRGenItaniumCXXABI::buildVirtualMethodAttr(mlir::cir::MethodType MethodTy, return mlir::cir::MethodAttr::get(MethodTy, VTableOffset); } + +/// The Itanium ABI requires non-zero initialization only for data +/// member pointers, for which '0' is a valid offset. +bool CIRGenItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { + return MPT->isMemberFunctionPointer(); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index b14a0aa1e1cf..61c32abf7409 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -1,4 +1,5 @@ #include "CIRGenTypes.h" +#include "CIRGenCXXABI.h" #include "CIRGenCall.h" #include "CIRGenFunctionInfo.h" #include "CIRGenModule.h" @@ -920,7 +921,7 @@ bool CIRGenTypes::isZeroInitializable(QualType T) { // We have to ask the ABI about member pointers. if (const MemberPointerType *MPT = T->getAs()) - llvm_unreachable("NYI"); + return TheCXXABI.isZeroInitializable(MPT); // Everything else is okay. return true; diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 50a925eabdbd..79193c39a6a3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -397,6 +397,10 @@ class LValue { tbaaInfo); return R; } + + RValue asAggregateRValue() const { + return RValue::getAggregate(getAddress(), isVolatileQualified()); + } }; /// An aggregate value slot. diff --git a/clang/test/CIR/CodeGen/member-init-struct.cpp b/clang/test/CIR/CodeGen/member-init-struct.cpp new file mode 100644 index 000000000000..169577a98a36 --- /dev/null +++ b/clang/test/CIR/CodeGen/member-init-struct.cpp @@ -0,0 +1,40 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +struct A {int a;}; +struct B {float a;}; +struct C { + union { + A a; + B b[10]; + }; + int c; + int d[10]; + void (C::*e)(); + C() : a(), c(), d(), e() {} + C(A x) : a(x) {} + C(void (C::*x)(), int y) : b(), c(y), e(x) {} +}; + +// CHECK-LABEL: cir.global external @x = #cir.zero : !ty_A +A x; +C a, b(x), c(0, 2); + +// CHECK-LABEL: @_ZN1CC2Ev +// CHECK: %[[VAL_1:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: cir.store %{{.*}}, %[[VAL_1]] : !cir.ptr, !cir.ptr> +// CHECK: %[[VAL_2:.*]] = cir.load %[[VAL_1]] : !cir.ptr>, !cir.ptr +// CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][0] {name = ""} : !cir.ptr -> !cir.ptr +// CHECK: %[[VAL_4:.*]] = cir.get_member %[[VAL_3]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CHECK: %[[VAL_5:.*]] = cir.const {{.*}} : !ty_A +// CHECK: cir.store %[[VAL_5]], %[[VAL_4]] : !ty_A, !cir.ptr +// CHECK: cir.call @_ZN1AC1Ev(%[[VAL_4]]) : (!cir.ptr) -> () +// CHECK: %[[VAL_6:.*]] = cir.get_member %[[VAL_2]][1] {name = "c"} : !cir.ptr -> !cir.ptr +// CHECK: %[[VAL_7:.*]] = cir.const {{.*}}<0> : !s32i +// CHECK: cir.store %[[VAL_7]], %[[VAL_6]] : !s32i, !cir.ptr +// CHECK: %[[VAL_8:.*]] = cir.get_member %[[VAL_2]][2] {name = "d"} : !cir.ptr -> !cir.ptr> +// CHECK: %[[VAL_9:.*]] = cir.const {{.*}} : !cir.array +// CHECK: cir.store %[[VAL_9]], %[[VAL_8]] : !cir.array, !cir.ptr> +// CHECK: %[[VAL_10:.*]] = cir.get_member %[[VAL_2]][4] {name = "e"} : !cir.ptr -> !cir.ptr in !ty_C>> +// CHECK: %[[VAL_11:.*]] = cir.const #cir.method : !cir.method in !ty_C> +// CHECK: cir.store %[[VAL_11]], %[[VAL_10]] : !cir.method in !ty_C>, !cir.ptr in !ty_C>> +// CHECK: cir.return \ No newline at end of file From a9c6240cb82f3c414519ffe2213c45ffa1d42e18 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Fri, 1 Nov 2024 15:28:17 -0400 Subject: [PATCH 2010/2301] [Driver] Bring back -emit-cir as a frontend option Upstream accepted this being a ClangOption but it got lost in the rebase. Bring it back here. --- clang/include/clang/Driver/Options.td | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 3b6ea8bde75e..6722cb60bafa 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3117,7 +3117,7 @@ defm clangir_call_conv_lowering : BoolFOption<"clangir-call-conv-lowering", NegFlag, BothFlags<[], [ClangOption, CC1Option], "">>; -def emit_cir : Flag<["-"], "emit-cir">, Visibility<[CC1Option]>, +def emit_cir : Flag<["-"], "emit-cir">, Visibility<[ClangOption, CC1Option]>, Group, HelpText<"Build ASTs and then lower to ClangIR, emit the .cir file">; def emit_cir_only : Flag<["-"], "emit-cir-only">, HelpText<"Build ASTs and convert to CIR, discarding output">; From dbbd21aa62ba6ad5f1e963bc9e94174c02324c24 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Fri, 1 Nov 2024 15:51:49 -0700 Subject: [PATCH 2011/2301] [CIR] Add a cir.undef attr CodeGen sometimes emits undef constants directly, e.g. when initializing an empty struct (https://godbolt.org/z/68od33aa8). We want to match this behavior, so we need a cir.undef attr to represent the constant. This change implements the lowering for the new op, which matches how cir.zero is lowered. A follow-up will change CIRGen to use it. It also replaces UndefOf with a ConstantOp of UndefAttr to avoid redundancy. Pull Request resolved: https://github.com/llvm/clangir/pull/993 --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 15 +++++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 7 --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 ++ clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp | 4 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 58 +++++++++---------- clang/lib/CIR/Lowering/LoweringHelpers.cpp | 2 +- clang/test/CIR/IR/invalid.cir | 6 ++ clang/test/CIR/Lowering/const.cir | 2 + clang/test/CIR/Lowering/globals.cir | 7 +++ 9 files changed, 68 insertions(+), 39 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 8b18cae1da5b..058e335928aa 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -106,6 +106,21 @@ def ZeroAttr : CIR_Attr<"Zero", "zero", [TypedAttrInterface]> { let assemblyFormat = [{}]; } +//===----------------------------------------------------------------------===// +// UndefAttr +//===----------------------------------------------------------------------===// + +def UndefAttr : CIR_Attr<"Undef", "undef", [TypedAttrInterface]> { + let summary = "Represent an undef constant"; + let description = [{ + The UndefAttr represents an undef constant, corresponding to LLVM's notion + of undef. + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type); + let assemblyFormat = [{}]; +} + //===----------------------------------------------------------------------===// // ConstArrayAttr //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 534d4f056ce2..db98d272ee2c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4993,11 +4993,4 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", let hasVerifier = 0; } -def UndefOp : CIR_Op<"undef", [Pure]> { - let summary = "Creates an undefined value of CIR dialect type."; - let description = [{ `cir.undef` is similar to the one in the LLVM IR dialect }]; - let results = (outs AnyType:$res); - let assemblyFormat = "attr-dict `:` type($res)"; -} - #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d449fea2882c..c517a080d9f9 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -393,6 +393,12 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return op->emitOpError("zero expects struct or array type"); } + if (isa(attrType)) { + if (!::mlir::isa<::mlir::cir::VoidType>(opType)) + return success(); + return op->emitOpError("undef expects non-void type"); + } + if (mlir::isa(attrType)) { if (!mlir::isa(opType)) return op->emitOpError("result type (") diff --git a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp index 2ced31cbbad8..d49a4613ec86 100644 --- a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp @@ -15,6 +15,7 @@ #include "mlir/IR/PatternMatch.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Interfaces/MemorySlotInterfaces.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/ADT/STLExtras.h" @@ -41,7 +42,8 @@ llvm::SmallVector cir::AllocaOp::getPromotableSlots() { Value cir::AllocaOp::getDefaultValue(const MemorySlot &slot, OpBuilder &builder) { - return builder.create(getLoc(), slot.elemType); + return builder.create( + getLoc(), slot.elemType, builder.getAttr(slot.elemType)); } void cir::AllocaOp::handleBlockArgument(const MemorySlot &slot, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index c8ae9d4e107a..0c98e2786773 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -418,6 +418,16 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ZeroAttr zeroAttr, loc, converter->convertType(zeroAttr.getType())); } +/// UndefAttr visitor. +inline mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::UndefAttr undefAttr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); + return rewriter.create( + loc, converter->convertType(undefAttr.getType())); +} + /// ConstStruct visitor. mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ConstStructAttr constStruct, @@ -629,6 +639,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, return lowerCirAttrAsValue(parentOp, boolAttr, rewriter, converter); if (const auto zeroAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, zeroAttr, rewriter, converter); + if (const auto undefAttr = mlir::dyn_cast(attr)) + return lowerCirAttrAsValue(parentOp, undefAttr, rewriter, converter); if (const auto globalAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, globalAttr, rewriter, converter); if (const auto vtableAttr = mlir::dyn_cast(attr)) @@ -1620,7 +1632,8 @@ class CIRConstantLowering // Fetch operation constant array initializer. auto constArr = mlir::dyn_cast(op.getValue()); - if (!constArr && !isa(op.getValue())) + if (!constArr && + !isa(op.getValue())) return op.emitError() << "array does not have a constant initializer"; std::optional denseAttr; @@ -1652,8 +1665,9 @@ class CIRConstantLowering return mlir::success(); } else if (auto strTy = mlir::dyn_cast(op.getType())) { - if (auto zero = mlir::dyn_cast(op.getValue())) { - auto initVal = lowerCirAttrAsValue(op, zero, rewriter, typeConverter); + auto attr = op.getValue(); + if (mlir::isa(attr)) { + auto initVal = lowerCirAttrAsValue(op, attr, rewriter, typeConverter); rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); return mlir::success(); @@ -2341,11 +2355,11 @@ class CIRGlobalOpLowering } else if (auto boolAttr = mlir::dyn_cast(init.value())) { init = rewriter.getBoolAttr(boolAttr.getValue()); - } else if (isa( - init.value())) { - // TODO(cir): once LLVM's dialect has a proper zeroinitializer attribute - // this should be updated. For now, we use a custom op to initialize - // globals to zero. + } else if (isa(init.value())) { + // TODO(cir): once LLVM's dialect has proper equivalent attributes this + // should be updated. For now, we use a custom op to initialize globals + // to the appropriate value. setupRegionInitializedLLVMGlobalOp(op, rewriter); auto value = lowerCirAttrAsValue(op, init.value(), rewriter, typeConverter); @@ -2383,7 +2397,7 @@ class CIRGlobalOpLowering lowerCirAttrAsValue(op, typeinfoAttr, rewriter, typeConverter)); return mlir::success(); } else { - op.emitError() << "usupported initializer '" << init.value() << "'"; + op.emitError() << "unsupported initializer '" << init.value() << "'"; return mlir::failure(); } @@ -3966,21 +3980,6 @@ class CIRClearCacheOpLowering } }; -class CIRUndefOpLowering - : public mlir::OpConversionPattern { - - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(mlir::cir::UndefOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto typ = getTypeConverter()->convertType(op.getRes().getType()); - - rewriter.replaceOpWithNewOp(op, typ); - return mlir::success(); - } -}; - class CIREhTypeIdOpLowering : public mlir::OpConversionPattern { public: @@ -4216,12 +4215,11 @@ void populateCIRToLLVMConversionPatterns( CIRStackSaveLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, - CIRCmpThreeWayOpLowering, CIRClearCacheOpLowering, CIRUndefOpLowering, - CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, - CIRAllocExceptionOpLowering, CIRFreeExceptionOpLowering, - CIRThrowOpLowering, CIRIntrinsicCallLowering, CIRBaseClassAddrOpLowering, - CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering, CIRAbsOpLowering, - CIRMemMoveOpLowering + CIRCmpThreeWayOpLowering, CIRClearCacheOpLowering, CIREhTypeIdOpLowering, + CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, + CIRFreeExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, + CIRBaseClassAddrOpLowering, CIRVTTAddrPointOpLowering, + CIRIsFPClassOpLowering, CIRAbsOpLowering, CIRMemMoveOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/lib/CIR/Lowering/LoweringHelpers.cpp b/clang/lib/CIR/Lowering/LoweringHelpers.cpp index 94f4d251d370..98d5158ea716 100644 --- a/clang/lib/CIR/Lowering/LoweringHelpers.cpp +++ b/clang/lib/CIR/Lowering/LoweringHelpers.cpp @@ -85,7 +85,7 @@ void convertToDenseElementsAttrImpl( continue; } - if (mlir::isa(eltAttr)) { + if (mlir::isa(eltAttr)) { currentIndex += elementsSizeInCurrentDim; continue; } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index f3fe81db5dd9..9df6e0c858fb 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -386,6 +386,12 @@ module { // ----- +module { + cir.global external @v = #cir.undef : !cir.void // expected-error {{undef expects non-void type}} +} + +// ----- + !s32i = !cir.int cir.func @vec_op_size() { %0 = cir.const #cir.int<1> : !s32i diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 4bb234c56995..43e635226000 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -15,6 +15,8 @@ module { // CHECK: llvm.mlir.constant(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) : !llvm.array<2 x f32> %4 = cir.const #cir.zero : !cir.array // CHECK: llvm.mlir.zero : !llvm.array<3 x i32> + %5 = cir.const #cir.undef : !cir.array + // CHECK: llvm.mlir.undef : !llvm.array<3 x i32> cir.return } diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 482ee8490fca..6290ca19c1e0 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -150,6 +150,13 @@ module { cir.global common @comm = #cir.int<0> : !s32i // MLIR: llvm.mlir.global common @comm(0 : i32) {addr_space = 0 : i32} : i32 + cir.global external @undefStruct = #cir.undef : !ty_Bar + // MLIR: llvm.mlir.global external @undefStruct() + // MLIR: %0 = llvm.mlir.undef : !llvm.struct<"struct.Bar", (i32, i8)> + // MLIR: llvm.return %0 : !llvm.struct<"struct.Bar", (i32, i8)> + // MLIR: } + // LLVM: @undefStruct = global %struct.Bar undef + cir.global "private" internal @Handlers = #cir.const_array<[#cir.const_struct<{#cir.global_view<@myfun> : !cir.ptr>}> : !ty_anon2E1_]> : !cir.array cir.func internal private @myfun(%arg0: !s32i) { %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} From 153921d04d625beb73139a45dfd1a7a752c154a6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 31 Oct 2024 17:16:05 -0700 Subject: [PATCH 2012/2301] [CIR][CIRGen] Scalar emission for casts of base to derived --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 61 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 12 ++++ clang/lib/CIR/CodeGen/CIRGenClass.cpp | 37 ++++++++++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 20 +++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 6 ++ clang/lib/CIR/CodeGen/CIRGenModule.h | 7 +++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 39 +++++++++++- clang/test/CIR/CodeGen/derived-cast.cpp | 45 ++++++++++++++ 8 files changed, 219 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/derived-cast.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index db98d272ee2c..8c326bb25cc4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3142,7 +3142,7 @@ def VecShuffleDynamicOp : CIR_Op<"vec.shuffle.dynamic", } //===----------------------------------------------------------------------===// -// BaseClassAddr +// BaseClassAddr & BaseClassAddr //===----------------------------------------------------------------------===// def BaseClassAddrOp : CIR_Op<"base_class_addr"> { @@ -3182,8 +3182,63 @@ def BaseClassAddrOp : CIR_Op<"base_class_addr"> { `)` `[` $offset `]` `->` qualified(type($base_addr)) attr-dict }]; - // FIXME: add verifier. - // Check whether both src/dst pointee's are compatible. + // The validity of the relationship of derived and base cannot + // yet be verified, currently not worth adding such a feature + // just for this. + let hasVerifier = 0; +} + +def DerivedClassAddrOp : CIR_Op<"derived_class_addr"> { + let summary = "Get the derived class address for a class/struct"; + let description = [{ + The `cir.derived_class_addr` operaration gets the address of a particular + derived class given a non-virtual base class pointer. The offset in bytes + of the base class must be passed in, similar to `cir.base_class_addr`, but + going into the other direction (In the itanium ABI this means lowering to + a negative offset). + + The operation contains a flag for whether or not the operand may be nullptr. + That depends on the context and cannot be known by the operation, and that + information affects how the operation is lowered. + + Example: + ```c++ + class A { int a; }; + class B { int b; + public: + A *getAsA(); + }; + class X : public A, public B { int x; }; + + A *B::getAsA() { + return static_cast(this); + ``` + + leads to + ```mlir + %2 = cir.load %0 : !cir.ptr>, !cir.ptr + %3 = cir.derived_class_addr(%2 : !cir.ptr nonnull) [4] -> !cir.ptr + %4 = cir.base_class_addr(%3 : !cir.ptr) [0] -> !cir.ptr + cir.return %4 + ``` + }]; + + let arguments = (ins + Arg:$base_addr, + IndexAttr:$offset, UnitAttr:$assume_not_null); + + let results = (outs Res:$derived_addr); + + let assemblyFormat = [{ + `(` + $base_addr `:` qualified(type($base_addr)) + (`nonnull` $assume_not_null^)? + `)` `[` $offset `]` `->` qualified(type($derived_addr)) attr-dict + }]; + + // The validity of the relationship of derived and base cannot + // yet be verified, currently not worth adding such a feature + // just for this. let hasVerifier = 0; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 2cbefce453ab..9844ef32a15b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -707,6 +707,18 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return Address(baseAddr, ptrTy, addr.getAlignment()); } + cir::Address createDerivedClassAddr(mlir::Location loc, cir::Address addr, + mlir::Type destType, unsigned offset, + bool assumeNotNull) { + if (destType == addr.getElementType()) + return addr; + + auto ptrTy = getPointerTo(destType); + auto derivedAddr = create( + loc, ptrTy, addr.getPointer(), mlir::APInt(64, offset), assumeNotNull); + return Address(derivedAddr, ptrTy, addr.getAlignment()); + } + mlir::Value createVTTAddrPoint(mlir::Location loc, mlir::Type retTy, mlir::Value addr, uint64_t offset) { return create( diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index cc43ae370c4a..bbcf98905684 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1503,6 +1503,37 @@ mlir::Value CIRGenFunction::GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, } } +CharUnits CIRGenModule::getNonVirtualBaseClassOffset( + const CXXRecordDecl *classDecl, CastExpr::path_const_iterator pathBegin, + CastExpr::path_const_iterator pathEnd) { + assert(pathBegin != pathEnd && "Base path should not be empty!"); + + CharUnits Offset = + computeNonVirtualBaseClassOffset(classDecl, pathBegin, pathEnd); + return Offset; +} + +Address CIRGenFunction::getAddressOfDerivedClass( + Address baseAddr, const CXXRecordDecl *derived, + CastExpr::path_const_iterator pathBegin, + CastExpr::path_const_iterator pathEnd, bool nullCheckValue) { + assert(pathBegin != pathEnd && "Base path should not be empty!"); + + QualType derivedTy = + getContext().getCanonicalType(getContext().getTagDeclType(derived)); + mlir::Type derivedValueTy = ConvertType(derivedTy); + CharUnits nonVirtualOffset = + CGM.getNonVirtualBaseClassOffset(derived, pathBegin, pathEnd); + + // Note that in OG, no offset (nonVirtualOffset.getQuantity() == 0) means it + // just gives the address back. In CIR a `cir.derived_class` is created and + // made into a nop later on during lowering. + return builder.createDerivedClassAddr(getLoc(derived->getSourceRange()), + baseAddr, derivedValueTy, + nonVirtualOffset.getQuantity(), + /*assumeNotNull=*/not nullCheckValue); +} + Address CIRGenFunction::getAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, @@ -1553,7 +1584,11 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, /*assumeNotNull=*/not NullCheckValue); } - // Conversion to a virtual base. cir.base_class_addr can't handle this. + if (sanitizePerformTypeCheck()) { + assert(!MissingFeatures::sanitizeOther()); + } + + // Conversion to a virtual base. cir.base_class_addr can't handle this. // Generate the code to look up the address in the virtual table. llvm_unreachable("NYI: Cast to virtual base class"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 6d2e956731ca..560b0be47b4c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1696,8 +1696,24 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } return V; } - case CK_BaseToDerived: - llvm_unreachable("NYI"); + case CK_BaseToDerived: { + const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); + assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); + Address Base = CGF.buildPointerWithAlignment(E); + Address Derived = CGF.getAddressOfDerivedClass( + Base, DerivedClassDecl, CE->path_begin(), CE->path_end(), + CGF.shouldNullCheckClassCastValue(CE)); + + // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is + // performed and the object is not of the derived type. + if (CGF.sanitizePerformTypeCheck()) + assert(!MissingFeatures::sanitizeOther()); + + if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast)) + assert(!MissingFeatures::sanitizeOther()); + + return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType()); + } case CK_DerivedToBase: { // The EmitPointerWithAlignment path does this fine; just discard // the alignment. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 7623b52c3695..e3bc68124e90 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1631,6 +1631,12 @@ class CIRGenFunction : public CIRGenTypeCache { CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc); + Address getAddressOfDerivedClass(Address baseAddr, + const CXXRecordDecl *derived, + CastExpr::path_const_iterator pathBegin, + CastExpr::path_const_iterator pathEnd, + bool nullCheckValue); + /// Emit code for the start of a function. /// \param Loc The location to be associated with the function. /// \param StartLoc The location of the function body. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 560d9fe4a22c..a4380bde009d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -298,6 +298,13 @@ class CIRGenModule : public CIRGenTypeCache { CastExpr::path_const_iterator Start, CastExpr::path_const_iterator End); + /// Returns the offset from a derived class to a class. Returns null if the + /// offset is 0. + CharUnits + getNonVirtualBaseClassOffset(const CXXRecordDecl *classDecl, + CastExpr::path_const_iterator pathBegin, + CastExpr::path_const_iterator pathEnd); + /// Get the CIR attributes and calling convention to use for a particular /// function type. /// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 0c98e2786773..bfe3da45f069 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -854,6 +854,40 @@ class CIRBaseClassAddrOpLowering } }; +class CIRDerivedClassAddrOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern< + mlir::cir::DerivedClassAddrOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::DerivedClassAddrOp derivedClassOp, + OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + const auto resultType = + getTypeConverter()->convertType(derivedClassOp.getType()); + mlir::Value baseAddr = adaptor.getBaseAddr(); + int64_t offsetVal = adaptor.getOffset().getZExtValue() * -1; + llvm::SmallVector offset = {offsetVal}; + mlir::Type byteType = mlir::IntegerType::get(resultType.getContext(), 8, + mlir::IntegerType::Signless); + if (derivedClassOp.getAssumeNotNull()) { + rewriter.replaceOpWithNewOp( + derivedClassOp, resultType, byteType, baseAddr, offset); + } else { + auto loc = derivedClassOp.getLoc(); + mlir::Value isNull = rewriter.create( + loc, mlir::LLVM::ICmpPredicate::eq, baseAddr, + rewriter.create(loc, baseAddr.getType())); + mlir::Value adjusted = rewriter.create( + loc, resultType, byteType, baseAddr, offset); + rewriter.replaceOpWithNewOp(derivedClassOp, isNull, + baseAddr, adjusted); + } + return mlir::success(); + } +}; + static mlir::Value getValueForVTableSymbol(mlir::Operation *op, mlir::ConversionPatternRewriter &rewriter, @@ -4218,8 +4252,9 @@ void populateCIRToLLVMConversionPatterns( CIRCmpThreeWayOpLowering, CIRClearCacheOpLowering, CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, CIRFreeExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, - CIRBaseClassAddrOpLowering, CIRVTTAddrPointOpLowering, - CIRIsFPClassOpLowering, CIRAbsOpLowering, CIRMemMoveOpLowering + CIRBaseClassAddrOpLowering, CIRDerivedClassAddrOpLowering, + CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering, CIRAbsOpLowering, + CIRMemMoveOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/test/CIR/CodeGen/derived-cast.cpp b/clang/test/CIR/CodeGen/derived-cast.cpp new file mode 100644 index 000000000000..3c89deb50c4a --- /dev/null +++ b/clang/test/CIR/CodeGen/derived-cast.cpp @@ -0,0 +1,45 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +class A { + int a; +}; + +class B { + int b; +public: + A *getAsA(); +}; + +class X : public A, public B { + int x; +}; + +A *B::getAsA() { + return static_cast(this); + + // CHECK-LABEL: define{{.*}} ptr @_ZN1B6getAsAEv + // CHECK: %[[THIS:.*]] = load ptr, ptr + // CHECK-NEXT: getelementptr inbounds i8, ptr %[[THIS]], i64 -4 +} + +// CIR-LABEL: @_ZN1B6getAsAEv +// CIR: %[[VAL_1:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CIR: %[[VAL_2:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} +// CIR: %[[VAL_3:.*]] = cir.load %[[VAL_1]] : !cir.ptr>, !cir.ptr +// CIR: %[[VAL_4:.*]] = cir.derived_class_addr(%[[VAL_3]] : !cir.ptr nonnull) [4] -> !cir.ptr +// CIR: %[[VAL_5:.*]] = cir.base_class_addr(%[[VAL_4]] : !cir.ptr) [0] -> !cir.ptr +// CIR: cir.store %[[VAL_5]], %[[VAL_2]] : !cir.ptr, !cir.ptr> +// CIR: %[[VAL_6:.*]] = cir.load %[[VAL_2]] : !cir.ptr>, !cir.ptr +// CIR: cir.return %[[VAL_6]] : !cir.ptr + +// LLVM-LABEL: @_ZN1B6getAsAEv +// LLVM: %[[VAL_1:.*]] = alloca ptr, i64 1, align 8 +// LLVM: store ptr %[[VAL_2:.*]], ptr %[[VAL_0:.*]], align 8 +// LLVM: %[[VAL_3:.*]] = load ptr, ptr %[[VAL_0]], align 8 +// LLVM: %[[VAL_4:.*]] = getelementptr i8, ptr %[[VAL_3]], i32 -4 +// LLVM: %[[VAL_5:.*]] = icmp eq ptr %[[VAL_4]], null, +// LLVM: %[[VAL_6:.*]] = getelementptr i8, ptr %[[VAL_4]], i32 0, +// LLVM: %[[VAL_7:.*]] = select i1 %[[VAL_5]], ptr %[[VAL_4]], ptr %[[VAL_6]], \ No newline at end of file From 21d794258c4a366de1eaf6d5fbf545abd5981d13 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Fri, 1 Nov 2024 15:51:49 -0700 Subject: [PATCH 2013/2301] [CIR][CIRGen] Fix const codegen for empty struct If an empty struct has a non-trivial constexpr constructor, CodeGen emits an undef constant. CIRGen was previously emitting an empty attribute, which got interpreted as constant evaluation failing, resulting in a global variable initializer being emitted. Change to undef to match CodeGen. https://godbolt.org/z/7M9EnEddx has the comparison between current CIRGen vs. original CodeGen; it should match after this lands. Pull Request resolved: https://github.com/llvm/clangir/pull/994 --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 3 ++- clang/test/CIR/CodeGen/struct.cpp | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index c94166456974..c8fa52b70fae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -346,7 +346,8 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( ConstantAggregateBuilderUtils Utils(CGM); if (Elems.empty()) - return {}; + return mlir::cir::UndefAttr::get(CGM.getBuilder().getContext(), DesiredTy); + auto Offset = [&](size_t I) { return Offsets[I] - StartOffset; }; // If we want an array type, see if all the elements are the same type and diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 3fa7a8ff7600..91acb833a706 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -123,6 +123,14 @@ struct A simpleConstInit = {1}; struct A arrConstInit[1] = {{1}}; // CHECK: cir.global external @arrConstInit = #cir.const_array<[#cir.const_struct<{#cir.int<1> : !s32i}> : !ty_A]> : !cir.array +// Should globally const-initialize empty structs with a non-trivial constexpr +// constructor (as undef, to match existing clang CodeGen behavior). +struct NonTrivialConstexprConstructor { + constexpr NonTrivialConstexprConstructor() {} +} nonTrivialConstexprConstructor; +// CHECK: cir.global external @nonTrivialConstexprConstructor = #cir.undef : !ty_NonTrivialConstexprConstructor {alignment = 1 : i64} +// CHECK-NOT: @__cxx_global_var_init + // Should locally copy struct members. void shouldLocallyCopyStructAssignments(void) { struct A a = { 3 }; From 82386671a745eba793876828e9fd8869dfccf083 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 1 Nov 2024 20:58:25 -0400 Subject: [PATCH 2014/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vhadd_v and neon_vhaddq_v (#1038) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 6 + clang/test/CIR/CodeGen/AArch64/neon.c | 252 +++++++++++------- 2 files changed, 162 insertions(+), 96 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index a36a6c7389f1..63fe69320721 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2440,6 +2440,12 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( : "llvm.aarch64.neon.srhadd"; break; } + case NEON::BI__builtin_neon_vhadd_v: + case NEON::BI__builtin_neon_vhaddq_v: { + intrincsName = (intrinicId != altLLVMIntrinsic) ? "llvm.aarch64.neon.uhadd" + : "llvm.aarch64.neon.shadd"; + break; + } case NEON::BI__builtin_neon_vqmovun_v: { intrincsName = "llvm.aarch64.neon.sqxtun"; argTypes.push_back(builder.getExtendedOrTruncatedElementVectorType( diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index e136c92c615e..36f9bc337e51 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -2568,113 +2568,173 @@ float64x2_t test_vabdq_f64(float64x2_t v1, float64x2_t v2) { // return vcltq_f64(v1, v2); // } -// NYI-LABEL: @test_vhadd_s8( -// NYI: [[VHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %v1, <8 x i8> %v2) -// NYI: ret <8 x i8> [[VHADD_V_I]] -// int8x8_t test_vhadd_s8(int8x8_t v1, int8x8_t v2) { -// return vhadd_s8(v1, v2); -// } +int8x8_t test_vhadd_s8(int8x8_t v1, int8x8_t v2) { + return vhadd_s8(v1, v2); -// NYI-LABEL: @test_vhadd_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> -// NYI: [[VHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %v1, <4 x i16> %v2) -// NYI: [[VHADD_V3_I:%.*]] = bitcast <4 x i16> [[VHADD_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VHADD_V2_I]] -// int16x4_t test_vhadd_s16(int16x4_t v1, int16x4_t v2) { -// return vhadd_s16(v1, v2); -// } + // CIR-LABEL: vhadd_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vhadd_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> -// NYI: [[VHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %v1, <2 x i32> %v2) -// NYI: [[VHADD_V3_I:%.*]] = bitcast <2 x i32> [[VHADD_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VHADD_V2_I]] -// int32x2_t test_vhadd_s32(int32x2_t v1, int32x2_t v2) { -// return vhadd_s32(v1, v2); -// } + // LLVM: {{.*}}test_vhadd_s8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]]) + // LLVM: ret <8 x i8> [[VHADD_V_I]] +} -// NYI-LABEL: @test_vhadd_u8( -// NYI: [[VHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %v1, <8 x i8> %v2) -// NYI: ret <8 x i8> [[VHADD_V_I]] -// uint8x8_t test_vhadd_u8(uint8x8_t v1, uint8x8_t v2) { -// return vhadd_u8(v1, v2); -// } +int16x4_t test_vhadd_s16(int16x4_t v1, int16x4_t v2) { + return vhadd_s16(v1, v2); -// NYI-LABEL: @test_vhadd_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> -// NYI: [[VHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> %v1, <4 x i16> %v2) -// NYI: [[VHADD_V3_I:%.*]] = bitcast <4 x i16> [[VHADD_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VHADD_V2_I]] -// uint16x4_t test_vhadd_u16(uint16x4_t v1, uint16x4_t v2) { -// return vhadd_u16(v1, v2); -// } + // CIR-LABEL: vhadd_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vhadd_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> -// NYI: [[VHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32> %v1, <2 x i32> %v2) -// NYI: [[VHADD_V3_I:%.*]] = bitcast <2 x i32> [[VHADD_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VHADD_V2_I]] -// uint32x2_t test_vhadd_u32(uint32x2_t v1, uint32x2_t v2) { -// return vhadd_u32(v1, v2); -// } + // LLVM: {{.*}}test_vhadd_s16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8> + // LLVM: [[VHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]]) + // LLVM: [[VHADD_V3_I:%.*]] = bitcast <4 x i16> [[VHADD_V2_I]] to <8 x i8> + // LLVM: ret <4 x i16> [[VHADD_V2_I]] +} -// NYI-LABEL: @test_vhaddq_s8( -// NYI: [[VHADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %v1, <16 x i8> %v2) -// NYI: ret <16 x i8> [[VHADDQ_V_I]] -// int8x16_t test_vhaddq_s8(int8x16_t v1, int8x16_t v2) { -// return vhaddq_s8(v1, v2); -// } +int32x2_t test_vhadd_s32(int32x2_t v1, int32x2_t v2) { + return vhadd_s32(v1, v2); -// NYI-LABEL: @test_vhaddq_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> -// NYI: [[VHADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %v1, <8 x i16> %v2) -// NYI: [[VHADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VHADDQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VHADDQ_V2_I]] -// int16x8_t test_vhaddq_s16(int16x8_t v1, int16x8_t v2) { -// return vhaddq_s16(v1, v2); -// } + // CIR-LABEL: vhadd_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vhaddq_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> -// NYI: [[VHADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> %v1, <4 x i32> %v2) -// NYI: [[VHADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VHADDQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VHADDQ_V2_I]] -// int32x4_t test_vhaddq_s32(int32x4_t v1, int32x4_t v2) { -// return vhaddq_s32(v1, v2); -// } + // LLVM: {{.*}}test_vhadd_s32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8> + // LLVM: [[VHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]]) + // LLVM: [[VHADD_V3_I:%.*]] = bitcast <2 x i32> [[VHADD_V2_I]] to <8 x i8> + // LLVM: ret <2 x i32> [[VHADD_V2_I]] +} -// NYI-LABEL: @test_vhaddq_u8( -// NYI: [[VHADDQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %v1, <16 x i8> %v2) -// NYI: ret <16 x i8> [[VHADDQ_V_I]] -// uint8x16_t test_vhaddq_u8(uint8x16_t v1, uint8x16_t v2) { -// return vhaddq_u8(v1, v2); -// } +uint8x8_t test_vhadd_u8(uint8x8_t v1, uint8x8_t v2) { + return vhadd_u8(v1, v2); -// NYI-LABEL: @test_vhaddq_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> -// NYI: [[VHADDQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %v1, <8 x i16> %v2) -// NYI: [[VHADDQ_V3_I:%.*]] = bitcast <8 x i16> [[VHADDQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VHADDQ_V2_I]] -// uint16x8_t test_vhaddq_u16(uint16x8_t v1, uint16x8_t v2) { -// return vhaddq_u16(v1, v2); -// } + // CIR-LABEL: vhadd_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vhaddq_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> -// NYI: [[VHADDQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32> %v1, <4 x i32> %v2) -// NYI: [[VHADDQ_V3_I:%.*]] = bitcast <4 x i32> [[VHADDQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VHADDQ_V2_I]] -// uint32x4_t test_vhaddq_u32(uint32x4_t v1, uint32x4_t v2) { -// return vhaddq_u32(v1, v2); -// } + // LLVM: {{.*}}test_vhadd_u8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VHADD_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]]) + // LLVM: ret <8 x i8> [[VHADD_V_I]] +} + +uint16x4_t test_vhadd_u16(uint16x4_t v1, uint16x4_t v2) { + return vhadd_u16(v1, v2); + + // CIR-LABEL: vhadd_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vhadd_u16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8> + // LLVM: [[VHADD_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]]) + // LLVM: [[VHADD_V3_I:%.*]] = bitcast <4 x i16> [[VHADD_V2_I]] to <8 x i8> + // LLVM: ret <4 x i16> [[VHADD_V2_I]] +} + +uint32x2_t test_vhadd_u32(uint32x2_t v1, uint32x2_t v2) { + return vhadd_u32(v1, v2); + + // CIR-LABEL: vhadd_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vhadd_u32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8> + // LLVM: [[VHADD_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]]) + // LLVM: [[VHADD_V3_I:%.*]] = bitcast <2 x i32> [[VHADD_V2_I]] to <8 x i8> + // LLVM: ret <2 x i32> [[VHADD_V2_I]] +} + +int8x16_t test_vhaddq_s8(int8x16_t v1, int8x16_t v2) { + return vhaddq_s8(v1, v2); + + // CIR-LABEL: vhaddq_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vhaddq_s8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VHADD_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]]) + // LLVM: ret <16 x i8> [[VHADD_V_I]] +} + +int16x8_t test_vhaddq_s16(int16x8_t v1, int16x8_t v2) { + return vhaddq_s16(v1, v2); + + // CIR-LABEL: vhaddq_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vhaddq_s16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8> + // LLVM: [[VHADD_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]]) + // LLVM: [[VHADD_V3_I:%.*]] = bitcast <8 x i16> [[VHADD_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VHADD_V2_I]] +} + +int32x4_t test_vhaddq_s32(int32x4_t v1, int32x4_t v2) { + return vhaddq_s32(v1, v2); + + // CIR-LABEL: vhaddq_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vhaddq_s32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8> + // LLVM: [[VHADD_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]]) + // LLVM: [[VHADD_V3_I:%.*]] = bitcast <4 x i32> [[VHADD_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VHADD_V2_I]] +} + +uint8x16_t test_vhaddq_u8(uint8x16_t v1, uint8x16_t v2) { + return vhaddq_u8(v1, v2); + + // CIR-LABEL: vhaddq_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vhaddq_u8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) + // LLVM: [[VHADD_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]]) + // LLVM: ret <16 x i8> [[VHADD_V_I]] +} + +uint16x8_t test_vhaddq_u16(uint16x8_t v1, uint16x8_t v2) { + return vhaddq_u16(v1, v2); + + // CIR-LABEL: vhaddq_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vhaddq_u16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8> + // LLVM: [[VHADD_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]]) + // LLVM: [[VHADD_V3_I:%.*]] = bitcast <8 x i16> [[VHADD_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VHADD_V2_I]] +} + +uint32x4_t test_vhaddq_u32(uint32x4_t v1, uint32x4_t v2) { + return vhaddq_u32(v1, v2); + + // CIR-LABEL: vhaddq_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vhaddq_u32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8> + // LLVM: [[VHADD_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]]) + // LLVM: [[VHADD_V3_I:%.*]] = bitcast <4 x i32> [[VHADD_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VHADD_V2_I]] +} // NYI-LABEL: @test_vhsub_s8( // NYI: [[VHSUB_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.shsub.v8i8(<8 x i8> %v1, <8 x i8> %v2) From 8f3b212303427355bf3b0bdeabbd5e85b68403db Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 1 Nov 2024 20:58:54 -0400 Subject: [PATCH 2015/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vshrn_n_v (#1040) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 10 ++ clang/test/CIR/CodeGen/AArch64/neon.c | 153 +++++++++++------- 2 files changed, 109 insertions(+), 54 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 63fe69320721..ce25c8b59b43 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2392,6 +2392,16 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( ops[0] = builder.createIntCast(ops[0], vTy); return buildCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); } + case NEON::BI__builtin_neon_vshrn_n_v: { + mlir::Location loc = getLoc(e->getExprLoc()); + mlir::cir::VectorType srcTy = + builder.getExtendedOrTruncatedElementVectorType( + vTy, true /* extended */, + mlir::cast(vTy.getEltType()).isSigned()); + ops[0] = builder.createBitcast(ops[0], srcTy); + ops[0] = buildCommonNeonShift(builder, loc, srcTy, ops[0], ops[1], false); + return builder.createIntCast(ops[0], vTy); + } case NEON::BI__builtin_neon_vtst_v: case NEON::BI__builtin_neon_vtstq_v: { mlir::Location loc = getLoc(e->getExprLoc()); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 36f9bc337e51..5ad33629b10e 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -6203,65 +6203,110 @@ uint64x2_t test_vqshluq_n_s64(int64x2_t a) { // LLVM: ret <2 x i64> [[VQSHLUQ_N1]] } -// NYI-LABEL: @test_vshrn_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[TMP2:%.*]] = ashr <8 x i16> [[TMP1]], -// NYI: [[VSHRN_N:%.*]] = trunc <8 x i16> [[TMP2]] to <8 x i8> -// NYI: ret <8 x i8> [[VSHRN_N]] -// int8x8_t test_vshrn_n_s16(int16x8_t a) { -// return vshrn_n_s16(a, 3); -// } +int8x8_t test_vshrn_n_s16(int16x8_t a) { + return vshrn_n_s16(a, 3); -// NYI-LABEL: @test_vshrn_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[TMP2:%.*]] = ashr <4 x i32> [[TMP1]], -// NYI: [[VSHRN_N:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16> -// NYI: ret <4 x i16> [[VSHRN_N]] -// int16x4_t test_vshrn_n_s32(int32x4_t a) { -// return vshrn_n_s32(a, 9); -// } + // CIR-LABEL: vshrn_n_s16 + // CIR: [[TGT:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector + // CIR: [[RES:%.*]] = cir.shift(right, [[TGT]] : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[RES]] : !cir.vector), !cir.vector -// NYI-LABEL: @test_vshrn_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[TMP2:%.*]] = ashr <2 x i64> [[TMP1]], -// NYI: [[VSHRN_N:%.*]] = trunc <2 x i64> [[TMP2]] to <2 x i32> -// NYI: ret <2 x i32> [[VSHRN_N]] -// int32x2_t test_vshrn_n_s64(int64x2_t a) { -// return vshrn_n_s64(a, 19); -// } + // LLVM: {{.*}}@test_vshrn_n_s16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[TMP2:%.*]] = ashr <8 x i16> [[TMP1]], splat (i16 3) + // LLVM: [[VSHRN_N:%.*]] = trunc <8 x i16> [[TMP2]] to <8 x i8> + // LLVM: ret <8 x i8> [[VSHRN_N]] +} -// NYI-LABEL: @test_vshrn_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[TMP2:%.*]] = lshr <8 x i16> [[TMP1]], -// NYI: [[VSHRN_N:%.*]] = trunc <8 x i16> [[TMP2]] to <8 x i8> -// NYI: ret <8 x i8> [[VSHRN_N]] -// uint8x8_t test_vshrn_n_u16(uint16x8_t a) { -// return vshrn_n_u16(a, 3); -// } +int16x4_t test_vshrn_n_s32(int32x4_t a) { + return vshrn_n_s32(a, 9); -// NYI-LABEL: @test_vshrn_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[TMP2:%.*]] = lshr <4 x i32> [[TMP1]], -// NYI: [[VSHRN_N:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16> -// NYI: ret <4 x i16> [[VSHRN_N]] -// uint16x4_t test_vshrn_n_u32(uint32x4_t a) { -// return vshrn_n_u32(a, 9); -// } + // CIR-LABEL: vshrn_n_s32 + // CIR: [[TGT:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<9> : !s32i, #cir.int<9> : !s32i, + // CIR-SAME: #cir.int<9> : !s32i, #cir.int<9> : !s32i]> : !cir.vector + // CIR: [[RES:%.*]] = cir.shift(right, [[TGT]] : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[RES]] : !cir.vector), !cir.vector -// NYI-LABEL: @test_vshrn_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[TMP2:%.*]] = lshr <2 x i64> [[TMP1]], -// NYI: [[VSHRN_N:%.*]] = trunc <2 x i64> [[TMP2]] to <2 x i32> -// NYI: ret <2 x i32> [[VSHRN_N]] -// uint32x2_t test_vshrn_n_u64(uint64x2_t a) { -// return vshrn_n_u64(a, 19); -// } + // LLVM: {{.*}}@test_vshrn_n_s32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[TMP2:%.*]] = ashr <4 x i32> [[TMP1]], splat (i32 9) + // LLVM: [[VSHRN_N:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16> + // LLVM: ret <4 x i16> [[VSHRN_N]] +} + +int32x2_t test_vshrn_n_s64(int64x2_t a) { + return vshrn_n_s64(a, 19); + + // CIR-LABEL: vshrn_n_s64 + // CIR: [[TGT:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<19> : !s64i, #cir.int<19> : !s64i]> : !cir.vector + // CIR: [[RES:%.*]] = cir.shift(right, [[TGT]] : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[RES]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}@test_vshrn_n_s64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[TMP2:%.*]] = ashr <2 x i64> [[TMP1]], splat (i64 19) + // LLVM: [[VSHRN_N:%.*]] = trunc <2 x i64> [[TMP2]] to <2 x i32> + // LLVM: ret <2 x i32> [[VSHRN_N]] +} + +uint8x8_t test_vshrn_n_u16(uint16x8_t a) { + return vshrn_n_u16(a, 3); + + // CIR-LABEL: vshrn_n_u16 + // CIR: [[TGT:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, + // CIR-SAME: #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i]> : !cir.vector + // CIR: [[RES:%.*]] = cir.shift(right, [[TGT]] : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[RES]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}@test_vshrn_n_u16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[TMP2:%.*]] = lshr <8 x i16> [[TMP1]], splat (i16 3) + // LLVM: [[VSHRN_N:%.*]] = trunc <8 x i16> [[TMP2]] to <8 x i8> + // LLVM: ret <8 x i8> [[VSHRN_N]] +} + +uint16x4_t test_vshrn_n_u32(uint32x4_t a) { + return vshrn_n_u32(a, 9); + + // CIR-LABEL: vshrn_n_u32 + // CIR: [[TGT:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<9> : !u32i, #cir.int<9> : !u32i, + // CIR-SAME: #cir.int<9> : !u32i, #cir.int<9> : !u32i]> : !cir.vector + // CIR: [[RES:%.*]] = cir.shift(right, [[TGT]] : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[RES]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}@test_vshrn_n_u32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[TMP2:%.*]] = lshr <4 x i32> [[TMP1]], splat (i32 9) + // LLVM: [[VSHRN_N:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16> + // LLVM: ret <4 x i16> [[VSHRN_N]] +} + +uint32x2_t test_vshrn_n_u64(uint64x2_t a) { + return vshrn_n_u64(a, 19); + + // CIR-LABEL: vshrn_n_u64 + // CIR: [[TGT:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<19> : !u64i, #cir.int<19> : !u64i]> : !cir.vector + // CIR: [[RES:%.*]] = cir.shift(right, [[TGT]] : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + // CIR: {{%.*}} = cir.cast(integral, [[RES]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}@test_vshrn_n_u64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[TMP2:%.*]] = lshr <2 x i64> [[TMP1]], splat (i64 19) + // LLVM: [[VSHRN_N:%.*]] = trunc <2 x i64> [[TMP2]] to <2 x i32> +} // NYI-LABEL: @test_vshrn_high_n_s16( // NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> From 51c97820d191376cfd1c97045cdd7716acafd13b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 1 Nov 2024 17:54:10 -0700 Subject: [PATCH 2016/2301] [CIR][NFC] Remove leftovers from previous commit --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 +- clang/test/CIR/CodeGen/derived-cast.cpp | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8c326bb25cc4..15f9cb5b0eba 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3142,7 +3142,7 @@ def VecShuffleDynamicOp : CIR_Op<"vec.shuffle.dynamic", } //===----------------------------------------------------------------------===// -// BaseClassAddr & BaseClassAddr +// BaseClassAddr & DerivedClassAddrOp //===----------------------------------------------------------------------===// def BaseClassAddrOp : CIR_Op<"base_class_addr"> { diff --git a/clang/test/CIR/CodeGen/derived-cast.cpp b/clang/test/CIR/CodeGen/derived-cast.cpp index 3c89deb50c4a..c65bbe2dcc38 100644 --- a/clang/test/CIR/CodeGen/derived-cast.cpp +++ b/clang/test/CIR/CodeGen/derived-cast.cpp @@ -19,10 +19,6 @@ class X : public A, public B { A *B::getAsA() { return static_cast(this); - - // CHECK-LABEL: define{{.*}} ptr @_ZN1B6getAsAEv - // CHECK: %[[THIS:.*]] = load ptr, ptr - // CHECK-NEXT: getelementptr inbounds i8, ptr %[[THIS]], i64 -4 } // CIR-LABEL: @_ZN1B6getAsAEv From 7c849de2a9876edf01dd11c4f7a08d442a1e6d9e Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 1 Nov 2024 18:29:12 -0700 Subject: [PATCH 2017/2301] [CIR][LowerToLLVM] Fold base address computation if offset is zero --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 ++++++ clang/test/CIR/CodeGen/derived-cast.cpp | 5 ++--- clang/test/CIR/CodeGen/vtt.cpp | 3 +-- clang/test/CIR/Lowering/derived-to-base.cpp | 17 ++++++++--------- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index bfe3da45f069..51483a160c2b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -837,6 +837,12 @@ class CIRBaseClassAddrOpLowering adaptor.getOffset().getZExtValue()}; mlir::Type byteType = mlir::IntegerType::get(resultType.getContext(), 8, mlir::IntegerType::Signless); + if (adaptor.getOffset().getZExtValue() == 0) { + rewriter.replaceOpWithNewOp( + baseClassOp, resultType, adaptor.getDerivedAddr()); + return mlir::success(); + } + if (baseClassOp.getAssumeNotNull()) { rewriter.replaceOpWithNewOp( baseClassOp, resultType, byteType, derivedAddr, offset); diff --git a/clang/test/CIR/CodeGen/derived-cast.cpp b/clang/test/CIR/CodeGen/derived-cast.cpp index c65bbe2dcc38..28109f553a5e 100644 --- a/clang/test/CIR/CodeGen/derived-cast.cpp +++ b/clang/test/CIR/CodeGen/derived-cast.cpp @@ -36,6 +36,5 @@ A *B::getAsA() { // LLVM: store ptr %[[VAL_2:.*]], ptr %[[VAL_0:.*]], align 8 // LLVM: %[[VAL_3:.*]] = load ptr, ptr %[[VAL_0]], align 8 // LLVM: %[[VAL_4:.*]] = getelementptr i8, ptr %[[VAL_3]], i32 -4 -// LLVM: %[[VAL_5:.*]] = icmp eq ptr %[[VAL_4]], null, -// LLVM: %[[VAL_6:.*]] = getelementptr i8, ptr %[[VAL_4]], i32 0, -// LLVM: %[[VAL_7:.*]] = select i1 %[[VAL_5]], ptr %[[VAL_4]], ptr %[[VAL_6]], \ No newline at end of file +// LLVM-NOT: select i1 +// LLVM: ret ptr \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp index 797e94475ede..16203276c544 100644 --- a/clang/test/CIR/CodeGen/vtt.cpp +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -143,8 +143,7 @@ int f() { // LLVM: %[[THIS:.*]] = load ptr, ptr %2, align 8 // LLVM: %[[BASE_A:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 // LLVM: call void @_ZN1AC2Ev(ptr %[[BASE_A]]) -// LLVM: %[[BASE_B:.*]] = getelementptr i8, ptr %[[THIS]], i32 0 -// LLVM: call void @_ZN1BC2Ev(ptr %[[BASE_B]], ptr getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i32 0, i32 1)) +// LLVM: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i32 0, i32 1)) // LLVM: %[[BASE_C:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 // LLVM: call void @_ZN1CC2Ev(ptr %[[BASE_C]], ptr getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i32 0, i32 3)) // LLVM: ret void diff --git a/clang/test/CIR/Lowering/derived-to-base.cpp b/clang/test/CIR/Lowering/derived-to-base.cpp index ef02ed0639b0..6e29c16c6608 100644 --- a/clang/test/CIR/Lowering/derived-to-base.cpp +++ b/clang/test/CIR/Lowering/derived-to-base.cpp @@ -8,21 +8,20 @@ void test_multi_base() { Derived d; Base2& bref = d; // no null check needed - // LLVM: %7 = getelementptr i8, ptr %1, i32 4 + // LLVM: getelementptr i8, ptr %[[D:.*]], i32 4 Base2* bptr = &d; // has null pointer check - // LLVM: %8 = icmp eq ptr %1, null - // LLVM: %9 = getelementptr i8, ptr %1, i32 4 - // LLVM: %10 = select i1 %8, ptr %1, ptr %9 + // LLVM: %[[CHECK:.*]] = icmp eq ptr %[[D]], null + // LLVM: %[[BPTR:.*]] = getelementptr i8, ptr %[[D]], i32 4 + // LLVM: select i1 %[[CHECK]], ptr %[[D]], ptr %[[BPTR]] int a = d.a; - // LLVM: %11 = getelementptr i8, ptr %1, i32 0 - // LLVM: %12 = getelementptr %struct.Base1, ptr %11, i32 0, i32 0 + // LLVM: getelementptr %struct.Base1, ptr %[[D]], i32 0, i32 0 int b = d.b; - // LLVM: %14 = getelementptr i8, ptr %1, i32 4 - // LLVM: %15 = getelementptr %struct.Base2, ptr %14, i32 0, i32 0 + // LLVM: %[[BASE2_OFFSET:.*]] = getelementptr i8, ptr %[[D]], i32 4 + // LLVM: %[[BASE2:.*]] = getelementptr %struct.Base2, ptr %[[BASE2_OFFSET]], i32 0, i32 0 int c = d.c; - // LLVM: %17 = getelementptr %struct.Derived, ptr %1, i32 0, i32 2 + // LLVM: getelementptr %struct.Derived, ptr %[[D]], i32 0, i32 2 } From d2fcfecc2deca4c12cccf2891c8da7280cda43f5 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 1 Nov 2024 23:51:16 -0400 Subject: [PATCH 2018/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vshlq_v (#1042) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 5 + clang/test/CIR/CodeGen/AArch64/neon.c | 172 +++++++++++------- 2 files changed, 111 insertions(+), 66 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index ce25c8b59b43..8d860c16d6b8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2450,6 +2450,11 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( : "llvm.aarch64.neon.srhadd"; break; } + case NEON::BI__builtin_neon_vshlq_v: { + intrincsName = (intrinicId != altLLVMIntrinsic) ? "llvm.aarch64.neon.ushl" + : "llvm.aarch64.neon.sshl"; + break; + } case NEON::BI__builtin_neon_vhadd_v: case NEON::BI__builtin_neon_vhaddq_v: { intrincsName = (intrinicId != altLLVMIntrinsic) ? "llvm.aarch64.neon.uhadd" diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 5ad33629b10e..5a8b5969be47 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -3405,79 +3405,119 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { // return vshl_u64(a, b); // } -// NYI-LABEL: @test_vshlq_s8( -// NYI: [[VSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> %a, <16 x i8> %b) -// NYI: ret <16 x i8> [[VSHLQ_V_I]] -// int8x16_t test_vshlq_s8(int8x16_t a, int8x16_t b) { -// return vshlq_s8(a, b); -// } +int8x16_t test_vshlq_s8(int8x16_t a, int8x16_t b) { + return vshlq_s8(a, b); -// NYI-LABEL: @test_vshlq_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sshl.v8i16(<8 x i16> %a, <8 x i16> %b) -// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VSHLQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VSHLQ_V2_I]] -// int16x8_t test_vshlq_s16(int16x8_t a, int16x8_t b) { -// return vshlq_s16(a, b); -// } + // CIR-LABEL: vshlq_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshlq_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %a, <4 x i32> %b) -// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VSHLQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VSHLQ_V2_I]] -// int32x4_t test_vshlq_s32(int32x4_t a, int32x4_t b) { -// return vshlq_s32(a, b); -// } + // LLVM: {{.*}}test_vshlq_s8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) + // LLVM: ret <16 x i8> [[VSHLQ_V_I]] +} -// NYI-LABEL: @test_vshlq_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> -// NYI: [[VSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> %a, <2 x i64> %b) -// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VSHLQ_V2_I]] to <16 x i8> -// NYI: ret <2 x i64> [[VSHLQ_V2_I]] -// int64x2_t test_vshlq_s64(int64x2_t a, int64x2_t b) { -// return vshlq_s64(a, b); -// } +int16x8_t test_vshlq_s16(int16x8_t a, int16x8_t b) { + return vshlq_s16(a, b); -// NYI-LABEL: @test_vshlq_u8( -// NYI: [[VSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.ushl.v16i8(<16 x i8> %a, <16 x i8> %b) -// NYI: ret <16 x i8> [[VSHLQ_V_I]] -// uint8x16_t test_vshlq_u8(uint8x16_t a, int8x16_t b) { -// return vshlq_u8(a, b); -// } + // CIR-LABEL: vshlq_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshlq_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.ushl.v8i16(<8 x i16> %a, <8 x i16> %b) -// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VSHLQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VSHLQ_V2_I]] -// uint16x8_t test_vshlq_u16(uint16x8_t a, int16x8_t b) { -// return vshlq_u16(a, b); -// } + // LLVM: {{.*}}test_vshlq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sshl.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) + // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VSHLQ_V2_I]] +} -// NYI-LABEL: @test_vshlq_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> %a, <4 x i32> %b) -// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VSHLQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VSHLQ_V2_I]] -// uint32x4_t test_vshlq_u32(uint32x4_t a, int32x4_t b) { -// return vshlq_u32(a, b); -// } +int32x4_t test_vshlq_s32(int32x4_t a, int32x4_t b) { + return vshlq_s32(a, b); -// NYI-LABEL: @test_vshlq_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> -// NYI: [[VSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.ushl.v2i64(<2 x i64> %a, <2 x i64> %b) -// NYI: [[VSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VSHLQ_V2_I]] to <16 x i8> -// NYI: ret <2 x i64> [[VSHLQ_V2_I]] -// uint64x2_t test_vshlq_u64(uint64x2_t a, int64x2_t b) { -// return vshlq_u64(a, b); -// } + // CIR-LABEL: vshlq_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshlq_s32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) + // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VSHLQ_V2_I]] +} + +int64x2_t test_vshlq_s64(int64x2_t a, int64x2_t b) { + return vshlq_s64(a, b); + + // CIR-LABEL: vshlq_s64 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshlq_s64(<2 x i64>{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) + // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <2 x i64> [[VSHLQ_V2_I]] +} + +uint8x16_t test_vshlq_u8(uint8x16_t a, int8x16_t b) { + return vshlq_u8(a, b); + + // CIR-LABEL: vshlq_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.ushl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshlq_u8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.ushl.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) + // LLVM: ret <16 x i8> [[VSHLQ_V_I]] +} + +uint16x8_t test_vshlq_u16(uint16x8_t a, int16x8_t b) { + return vshlq_u16(a, b); + + // CIR-LABEL: vshlq_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.ushl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshlq_u16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.ushl.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) + // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VSHLQ_V2_I]] +} + +uint32x4_t test_vshlq_u32(uint32x4_t a, int32x4_t b) { + return vshlq_u32(a, b); + + // CIR-LABEL: vshlq_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.ushl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshlq_u32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) + // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VSHLQ_V2_I]] +} + +uint64x2_t test_vshlq_u64(uint64x2_t a, int64x2_t b) { + return vshlq_u64(a, b); + + // CIR-LABEL: vshlq_u64 + // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.ushl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshlq_u64(<2 x i64>{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.ushl.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) + // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <2 x i64> [[VSHLQ_V2_I]] +} // NYI-LABEL: @test_vqshl_s8( // NYI: [[VQSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %a, <8 x i8> %b) From c0f5bf968cf99922199d46bbee9a9a6b88762598 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Mon, 4 Nov 2024 09:21:30 -0800 Subject: [PATCH 2019/2301] Remove disablement of Wdeprecated-declarations --- clang/CMakeLists.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt index b5a8d465e2d4..b79e570667b2 100644 --- a/clang/CMakeLists.txt +++ b/clang/CMakeLists.txt @@ -25,8 +25,6 @@ list(INSERT CMAKE_MODULE_PATH 0 include(GNUInstallDirs) include(GetDarwinLinkerVersion) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations") - if(CLANG_BUILT_STANDALONE) set(CMAKE_CXX_STANDARD 17 CACHE STRING "C++ standard to conform to") set(CMAKE_CXX_STANDARD_REQUIRED YES) From 58a3223ec09a2c7c12cb3387256b4d8b1df2223f Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Tue, 5 Nov 2024 02:18:39 +0800 Subject: [PATCH 2020/2301] [CIR][CIRGen] Add support for memset (#1026) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 30 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 6 ++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 13 ++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 19 +++++++++++- clang/test/CIR/CodeGen/libc.c | 8 +++++ 5 files changed, 73 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 15f9cb5b0eba..700d9769231d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4045,6 +4045,36 @@ def MemMoveOp : CIR_MemCpyOp<"libc.memmove"> { `:` qualified(type($dst)) `,` type($len) }]; } +//===----------------------------------------------------------------------===// +// MemSetOp +//===----------------------------------------------------------------------===// + +def MemSetOp : CIR_Op<"libc.memset"> { + let arguments = (ins Arg:$dst, + SInt32:$val, + PrimitiveUInt:$len); + let summary = "Equivalent to libc's `memset`"; + let description = [{ + Given the CIR pointer, `dst`, `cir.libc.memset` will set the first `len` + bytes of the memory pointed by `dst` to the specified `val`. + + Examples: + + ```mlir + // Set 2 bytes from a struct to 0: + %2 = cir.const #cir.int<2> : !u32i + %3 = cir.const #cir.int<0> : !u32i + %zero = cir.cast(integral, %3 : !s32i), !u8i + cir.libc.memset %2 bytes from %struct set to %zero : !cir.ptr, !s32i, !u64i + ``` + }]; + + let assemblyFormat = [{ + $len `bytes` `from` $dst `set` `to` $val attr-dict + `:` qualified(type($dst)) `,` type($val) `,` type($len) + }]; + let hasVerifier = 0; +} //===----------------------------------------------------------------------===// // MemChrOp diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 9844ef32a15b..a940a7b4dc1e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -624,6 +624,12 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return create(loc, dst, src, len); } + mlir::cir::MemSetOp createMemSet(mlir::Location loc, mlir::Value dst, + mlir::Value val, mlir::Value len) { + val = createIntCast(val, mlir::cir::IntType::get(getContext(), 32, true)); + return create(loc, dst, val, len); + } + mlir::Value createNeg(mlir::Value value) { if (auto intTy = mlir::dyn_cast(value.getType())) { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index c97f0e382cc1..ac663170cd22 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1449,8 +1449,17 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(Dest.getPointer()); } case Builtin::BImemset: - case Builtin::BI__builtin_memset: - llvm_unreachable("BImemset like NYI"); + case Builtin::BI__builtin_memset: { + Address Dest = buildPointerWithAlignment(E->getArg(0)); + mlir::Value ByteVal = buildScalarExpr(E->getArg(1)); + mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); + buildNonNullArgCheck(RValue::get(Dest.getPointer()), + E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), + FD, 0); + builder.createMemSet(getLoc(E->getSourceRange()), Dest.getPointer(), + ByteVal, SizeVal); + return RValue::get(Dest.getPointer()); + } case Builtin::BI__builtin_memset_inline: llvm_unreachable("BI__builtin_memset_inline NYI"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 51483a160c2b..7b10bb166252 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -740,6 +740,23 @@ class CIRMemMoveOpLowering } }; +class CIRMemsetOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + mlir::LogicalResult + matchAndRewrite(mlir::cir::MemSetOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto converted = rewriter.create( + op.getLoc(), mlir::IntegerType::get(op.getContext(), 8), + adaptor.getVal()); + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), converted, adaptor.getLen(), + /*isVolatile=*/false); + return mlir::success(); + } +}; + static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter, mlir::Value llvmSrc, mlir::Type llvmDstIntTy, bool isUnsigned, uint64_t cirSrcWidth, @@ -4260,7 +4277,7 @@ void populateCIRToLLVMConversionPatterns( CIRFreeExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, CIRBaseClassAddrOpLowering, CIRDerivedClassAddrOpLowering, CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering, CIRAbsOpLowering, - CIRMemMoveOpLowering + CIRMemMoveOpLowering, CIRMemsetOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/test/CIR/CodeGen/libc.c b/clang/test/CIR/CodeGen/libc.c index d1246d3a1447..c11d3bc764e5 100644 --- a/clang/test/CIR/CodeGen/libc.c +++ b/clang/test/CIR/CodeGen/libc.c @@ -23,6 +23,14 @@ void testMemmove(void *src, const void *dst, unsigned long size) { // LLVM: call void @llvm.memmove.{{.+}}.i64(ptr %{{.+}}, ptr %{{.+}}, i64 %{{.+}}, i1 false), } +// Should generate CIR's builtin memset op. +void *memset(void *, int, unsigned long); +void testMemset(void *dst, int val, unsigned long size) { + memset(dst, val, size); + // CHECK: cir.libc.memset %{{.+}} bytes from %{{.+}} set to %{{.+}} : !cir.ptr, !s32i, !u64i + // LLVM: call void @llvm.memset.{{.+}}.i64(ptr %{{.+}}, i8 %{{.+}}, i64 %{{.+}}, i1 false) +} + double fabs(double); double testFabs(double x) { return fabs(x); From 970580e7c102e70ff57703a2f43c8cd49598eaa4 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 4 Nov 2024 14:40:25 -0500 Subject: [PATCH 2021/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vshr_n_v and neon_vshrq_n_v (#1045) Note in the test file, `test_vshrq_n_s32_32` and `test_vshr_n_u16_16` are addition to what traditional clang code gen already has. They tested the case where shift amount is the same as element size ( compiler errors if shift amount is greater than elem size). OG didn't test that case here, but [has somewhat tested elsewhere](https://github.com/llvm/clangir/blob/3d16a0f8499c43497a18a46d838313ab4deeadea/clang/test/CodeGen/aarch64-neon-shifts.c#L23) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 56 ++- clang/test/CIR/CodeGen/AArch64/neon.c | 371 ++++++++++++------ 2 files changed, 292 insertions(+), 135 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 8d860c16d6b8..5b68f31797d5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2198,6 +2198,45 @@ static mlir::Value buildNeonShiftVector(CIRGenBuilderTy &builder, return builder.create(loc, vecTy, constVecAttr); } +/// Build ShiftOp of vector type whose shift amount is a vector built +/// from a constant integer using `buildNeonShiftVector` function +static mlir::Value buildCommonNeonShift(CIRGenBuilderTy &builder, + mlir::Location loc, + mlir::cir::VectorType resTy, + mlir::Value shifTgt, + mlir::Value shiftAmt, bool shiftLeft, + bool negAmt = false) { + shiftAmt = buildNeonShiftVector(builder, shiftAmt, resTy, loc, negAmt); + return builder.create( + loc, resTy, builder.createBitcast(shifTgt, resTy), shiftAmt, shiftLeft); +} + +/// Right-shift a vector by a constant. +static mlir::Value buildNeonRShiftImm(CIRGenFunction &cgf, mlir::Value shiftVec, + mlir::Value shiftVal, + mlir::cir::VectorType vecTy, bool usgn, + mlir::Location loc) { + CIRGenBuilderTy &builder = cgf.getBuilder(); + int64_t shiftAmt = getIntValueFromConstOp(shiftVal); + int eltSize = cgf.CGM.getDataLayout().getTypeSizeInBits(vecTy.getEltType()); + + shiftVec = builder.createBitcast(shiftVec, vecTy); + // lshr/ashr are undefined when the shift amount is equal to the vector + // element size. + if (shiftAmt == eltSize) { + if (usgn) { + // Right-shifting an unsigned value by its size yields 0. + return builder.getZero(loc, vecTy); + } + // Right-shifting a signed value by its size is equivalent + // to a shift of size-1. + --shiftAmt; + shiftVal = builder.getConstInt(loc, vecTy.getEltType(), shiftAmt); + } + return buildCommonNeonShift(builder, loc, vecTy, shiftVec, shiftVal, + false /* right shift */); +} + mlir::Value buildNeonCall(CIRGenBuilderTy &builder, llvm::SmallVector argTypes, llvm::SmallVectorImpl &args, @@ -2258,19 +2297,6 @@ buildCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, return builder.createBitcast(res, resultType); } -/// Build ShiftOp of vector type whose shift amount is a vector built -/// from a constant integer using `buildNeonShiftVector` function -static mlir::Value buildCommonNeonShift(CIRGenBuilderTy &builder, - mlir::Location loc, - mlir::cir::VectorType resTy, - mlir::Value shifTgt, - mlir::Value shiftAmt, bool shiftLeft, - bool negAmt = false) { - shiftAmt = buildNeonShiftVector(builder, shiftAmt, resTy, loc, negAmt); - return builder.create( - loc, resTy, builder.createBitcast(shifTgt, resTy), shiftAmt, shiftLeft); -} - mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, @@ -2402,6 +2428,10 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( ops[0] = buildCommonNeonShift(builder, loc, srcTy, ops[0], ops[1], false); return builder.createIntCast(ops[0], vTy); } + case NEON::BI__builtin_neon_vshr_n_v: + case NEON::BI__builtin_neon_vshrq_n_v: + return buildNeonRShiftImm(*this, ops[0], ops[1], vTy, isUnsigned, + getLoc(e->getExprLoc())); case NEON::BI__builtin_neon_vtst_v: case NEON::BI__builtin_neon_vtstq_v: { mlir::Location loc = getLoc(e->getExprLoc()); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 5a8b5969be47..09b881782193 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -5118,123 +5118,268 @@ uint64x2_t test_vshlq_n_u64(uint64x2_t a) { // LLVM: ret <2 x i64> [[VSHL_N]] } -// NYI-LABEL: @test_vshr_n_s8( -// NYI: [[VSHR_N:%.*]] = ashr <8 x i8> %a, -// NYI: ret <8 x i8> [[VSHR_N]] -// int8x8_t test_vshr_n_s8(int8x8_t a) { -// return vshr_n_s8(a, 3); -// } +int8x8_t test_vshr_n_s8(int8x8_t a) { + return vshr_n_s8(a, 3); -// NYI-LABEL: @test_vshr_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[VSHR_N:%.*]] = ashr <4 x i16> [[TMP1]], -// NYI: ret <4 x i16> [[VSHR_N]] -// int16x4_t test_vshr_n_s16(int16x4_t a) { -// return vshr_n_s16(a, 3); -// } + // CIR-LABEL: vshr_n_s8 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshr_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[VSHR_N:%.*]] = ashr <2 x i32> [[TMP1]], -// NYI: ret <2 x i32> [[VSHR_N]] -// int32x2_t test_vshr_n_s32(int32x2_t a) { -// return vshr_n_s32(a, 3); -// } + // LLVM: {{.*}}test_vshr_n_s8(<8 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VSHR_N:%.*]] = ashr <8 x i8> [[A]], splat (i8 3) + // LLVM: ret <8 x i8> [[VSHR_N]] +} -// NYI-LABEL: @test_vshrq_n_s8( -// NYI: [[VSHR_N:%.*]] = ashr <16 x i8> %a, -// NYI: ret <16 x i8> [[VSHR_N]] -// int8x16_t test_vshrq_n_s8(int8x16_t a) { -// return vshrq_n_s8(a, 3); -// } +int16x4_t test_vshr_n_s16(int16x4_t a) { + return vshr_n_s16(a, 3); -// NYI-LABEL: @test_vshrq_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VSHR_N:%.*]] = ashr <8 x i16> [[TMP1]], -// NYI: ret <8 x i16> [[VSHR_N]] -// int16x8_t test_vshrq_n_s16(int16x8_t a) { -// return vshrq_n_s16(a, 3); -// } + // CIR-LABEL: vshr_n_s16 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshrq_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VSHR_N:%.*]] = ashr <4 x i32> [[TMP1]], -// NYI: ret <4 x i32> [[VSHR_N]] -// int32x4_t test_vshrq_n_s32(int32x4_t a) { -// return vshrq_n_s32(a, 3); -// } + // LLVM: {{.*}}test_vshr_n_s16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[VSHR_N:%.*]] = ashr <4 x i16> [[TMP1]], splat (i16 3) + // LLVM: ret <4 x i16> [[VSHR_N]] +} -// NYI-LABEL: @test_vshrq_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VSHR_N:%.*]] = ashr <2 x i64> [[TMP1]], -// NYI: ret <2 x i64> [[VSHR_N]] -// int64x2_t test_vshrq_n_s64(int64x2_t a) { -// return vshrq_n_s64(a, 3); -// } +int32x2_t test_vshr_n_s32(int32x2_t a) { + return vshr_n_s32(a, 3); -// NYI-LABEL: @test_vshr_n_u8( -// NYI: [[VSHR_N:%.*]] = lshr <8 x i8> %a, -// NYI: ret <8 x i8> [[VSHR_N]] -// uint8x8_t test_vshr_n_u8(uint8x8_t a) { -// return vshr_n_u8(a, 3); -// } + // CIR-LABEL: vshr_n_s32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : !s32i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshr_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[VSHR_N:%.*]] = lshr <4 x i16> [[TMP1]], -// NYI: ret <4 x i16> [[VSHR_N]] -// uint16x4_t test_vshr_n_u16(uint16x4_t a) { -// return vshr_n_u16(a, 3); -// } + // LLVM: {{.*}}test_vshr_n_s32(<2 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[VSHR_N:%.*]] = ashr <2 x i32> [[TMP1]], splat (i32 3) + // LLVM: ret <2 x i32> [[VSHR_N]] +} -// NYI-LABEL: @test_vshr_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[VSHR_N:%.*]] = lshr <2 x i32> [[TMP1]], -// NYI: ret <2 x i32> [[VSHR_N]] -// uint32x2_t test_vshr_n_u32(uint32x2_t a) { -// return vshr_n_u32(a, 3); -// } +int64x1_t test_vshr_n_s64(int64x1_t a) { + return vshr_n_s64(a, 3); -// NYI-LABEL: @test_vshrq_n_u8( -// NYI: [[VSHR_N:%.*]] = lshr <16 x i8> %a, -// NYI: ret <16 x i8> [[VSHR_N]] -// uint8x16_t test_vshrq_n_u8(uint8x16_t a) { -// return vshrq_n_u8(a, 3); -// } + // CIR-LABEL: vshr_n_s64 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshrq_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VSHR_N:%.*]] = lshr <8 x i16> [[TMP1]], -// NYI: ret <8 x i16> [[VSHR_N]] -// uint16x8_t test_vshrq_n_u16(uint16x8_t a) { -// return vshrq_n_u16(a, 3); -// } + // LLVM: {{.*}}test_vshr_n_s64(<1 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[VSHR_N:%.*]] = ashr <1 x i64> [[TMP1]], splat (i64 3) + // LLVM: ret <1 x i64> [[VSHR_N]] +} -// NYI-LABEL: @test_vshrq_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VSHR_N:%.*]] = lshr <4 x i32> [[TMP1]], -// NYI: ret <4 x i32> [[VSHR_N]] -// uint32x4_t test_vshrq_n_u32(uint32x4_t a) { -// return vshrq_n_u32(a, 3); -// } +int8x16_t test_vshrq_n_s8(int8x16_t a) { + return vshrq_n_s8(a, 3); -// NYI-LABEL: @test_vshrq_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VSHR_N:%.*]] = lshr <2 x i64> [[TMP1]], -// NYI: ret <2 x i64> [[VSHR_N]] -// uint64x2_t test_vshrq_n_u64(uint64x2_t a) { -// return vshrq_n_u64(a, 3); -// } + // CIR-LABEL: vshrq_n_s8 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshrq_n_s8(<16 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VSHR_N:%.*]] = ashr <16 x i8> [[A]], splat (i8 3) + // LLVM: ret <16 x i8> [[VSHR_N]] +} + +int16x8_t test_vshrq_n_s16(int16x8_t a) { + return vshrq_n_s16(a, 3); + + // CIR-LABEL: vshrq_n_s16 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR-SAME: #cir.int<3> : !s16i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshrq_n_s16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VSHR_N:%.*]] = ashr <8 x i16> [[TMP1]], splat (i16 3) + // LLVM: ret <8 x i16> [[VSHR_N]] +} + +int32x4_t test_vshrq_n_s32(int32x4_t a) { + return vshrq_n_s32(a, 3); + + // CIR-LABEL: vshrq_n_s32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : !s32i, + // CIR-SAME: #cir.int<3> : !s32i, #cir.int<3> : !s32i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshrq_n_s32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VSHR_N:%.*]] = ashr <4 x i32> [[TMP1]], splat (i32 3) + // LLVM: ret <4 x i32> [[VSHR_N]] +} + +// Vector lashr/ashr are undefined when the shift amount is equal to the vector +// element size. Thus in code gen, for singed input, we make the shift amount +// one less than the vector element size. +int32x4_t test_vshrq_n_s32_32(int32x4_t a) { + return vshrq_n_s32(a, 32); + + // CIR-LABEL: vshrq_n_s32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<31> : !s32i, #cir.int<31> : !s32i, + // CIR-SAME: #cir.int<31> : !s32i, #cir.int<31> : !s32i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshrq_n_s32_32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VSHR_N:%.*]] = ashr <4 x i32> [[TMP1]], splat (i32 31) + // LLVM: ret <4 x i32> [[VSHR_N]] +} + +int64x2_t test_vshrq_n_s64(int64x2_t a) { + return vshrq_n_s64(a, 3); + + // CIR-LABEL: vshrq_n_s64 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i, #cir.int<3> : !s64i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshrq_n_s64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VSHR_N:%.*]] = ashr <2 x i64> [[TMP1]], splat (i64 3) + // LLVM: ret <2 x i64> [[VSHR_N]] +} + +uint8x8_t test_vshr_n_u8(uint8x8_t a) { + return vshr_n_u8(a, 3); + + // CIR-LABEL: vshr_n_u8 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, + // CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshr_n_u8(<8 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VSHR_N:%.*]] = lshr <8 x i8> [[A]], splat (i8 3) + // LLVM: ret <8 x i8> [[VSHR_N]] +} + +uint16x4_t test_vshr_n_u16(uint16x4_t a) { + return vshr_n_u16(a, 3); + + // CIR-LABEL: vshr_n_u16 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, #cir.int<3> : !u16i, + // CIR-SAME: #cir.int<3> : !u16i, #cir.int<3> : !u16i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshr_n_u16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[VSHR_N:%.*]] = lshr <4 x i16> [[TMP1]], splat (i16 3) + // LLVM: ret <4 x i16> [[VSHR_N]] +} + +// Vector lashr/ashr are undefined when the shift amount is equal to the vector +// element size. Thus in code gen, for unsinged input, return zero vector. +uint16x4_t test_vshr_n_u16_16(uint16x4_t a) { + return vshr_n_u16(a, 16); + + // CIR-LABEL: vshr_n_u16 + // CIR: {{%.*}} = cir.const #cir.int<16> : !s32i + // CIR: {{%.*}} = cir.const #cir.zero : !cir.vector + // CIR-NOT: cir.shift + + // LLVM: {{.*}}test_vshr_n_u16_16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: ret <4 x i16> zeroinitializer +} + +uint32x2_t test_vshr_n_u32(uint32x2_t a) { + return vshr_n_u32(a, 3); + + // CIR-LABEL: vshr_n_u32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u32i, #cir.int<3> : !u32i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshr_n_u32(<2 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[VSHR_N:%.*]] = lshr <2 x i32> [[TMP1]], splat (i32 3) + // LLVM: ret <2 x i32> [[VSHR_N]] +} + +uint64x1_t test_vshr_n_u64(uint64x1_t a) { + return vshr_n_u64(a, 1); + + // CIR-LABEL: vshr_n_u64 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<1> : !u64i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshr_n_u64(<1 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[VSHR_N:%.*]] = lshr <1 x i64> [[TMP1]], splat (i64 1) + // LLVM: ret <1 x i64> [[VSHR_N]] +} + +uint8x16_t test_vshrq_n_u8(uint8x16_t a) { + return vshrq_n_u8(a, 3); + + // CIR-LABEL: vshrq_n_u8 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, + // CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, + // CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, + // CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i]> : !cir.vector + + // LLVM: {{.*}}test_vshrq_n_u8(<16 x i8>{{.*}}[[A:%.*]]) + // LLVM: [[VSHR_N:%.*]] = lshr <16 x i8> [[A]], splat (i8 3) + // LLVM: ret <16 x i8> [[VSHR_N]] +} + +uint16x8_t test_vshrq_n_u16(uint16x8_t a) { + return vshrq_n_u16(a, 3); + + // CIR-LABEL: vshrq_n_u16 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, + // CIR-SAME: #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, + // CIR-SAME: #cir.int<3> : !u16i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshrq_n_u16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VSHR_N:%.*]] = lshr <8 x i16> [[TMP1]], splat (i16 3) + // LLVM: ret <8 x i16> [[VSHR_N]] +} + +uint32x4_t test_vshrq_n_u32(uint32x4_t a) { + return vshrq_n_u32(a, 3); + + // CIR-LABEL: vshrq_n_u32 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u32i, #cir.int<3> : !u32i, + // CIR-SAME: #cir.int<3> : !u32i, #cir.int<3> : !u32i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshrq_n_u32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VSHR_N:%.*]] = lshr <4 x i32> [[TMP1]], splat (i32 3) + // LLVM: ret <4 x i32> [[VSHR_N]] +} + +uint64x2_t test_vshrq_n_u64(uint64x2_t a) { + return vshrq_n_u64(a, 3); + + // CIR-LABEL: vshrq_n_u64 + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u64i, #cir.int<3> : !u64i]> : !cir.vector + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshrq_n_u64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VSHR_N:%.*]] = lshr <2 x i64> [[TMP1]], splat (i64 3) + // LLVM: ret <2 x i64> [[VSHR_N]] +} // NYI-LABEL: @test_vsra_n_s8( // NYI: [[VSRA_N:%.*]] = ashr <8 x i8> %b, @@ -14288,15 +14433,6 @@ void test_vst1q_s64(int64_t *a, int64x2_t b) { // return (int64_t)vshrd_n_s64(a, 1); // } -// NYI-LABEL: @test_vshr_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> -// NYI: [[VSHR_N:%.*]] = ashr <1 x i64> [[TMP1]], -// NYI: ret <1 x i64> [[VSHR_N]] -// int64x1_t test_vshr_n_s64(int64x1_t a) { -// return vshr_n_s64(a, 1); -// } - // NYI-LABEL: @test_vshrd_n_u64( // NYI: ret i64 0 // uint64_t test_vshrd_n_u64(uint64_t a) { @@ -14310,15 +14446,6 @@ void test_vst1q_s64(int64_t *a, int64x2_t b) { // return vshrd_n_u64(a, 64); // } -// NYI-LABEL: @test_vshr_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> -// NYI: [[VSHR_N:%.*]] = lshr <1 x i64> [[TMP1]], -// NYI: ret <1 x i64> [[VSHR_N]] -// uint64x1_t test_vshr_n_u64(uint64x1_t a) { -// return vshr_n_u64(a, 1); -// } - // NYI-LABEL: @test_vrshrd_n_s64( // NYI: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 %a, i64 -63) // NYI: ret i64 [[VRSHR_N]] From ccd8de461e8dc8e69c6e617c73c66334dbf1f1b4 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Tue, 5 Nov 2024 03:50:54 +0800 Subject: [PATCH 2022/2301] [ClangIR][CIRGen] Handle nested union in arrays of struct (#1007) Reproducer: ``` struct nested { union { const char *single; const char *const *multi; } output; }; static const char * const test[] = { "test", }; const struct nested data[] = { { { .multi = test, }, }, { { .single = "hello", }, }, }; ``` ClangIR now failed to recognize `data` as an array since it failed to recognize the initializer for union. This comes from a fundamental difference between CIR and LLVM IR. In LLVM IR, the union is simply a struct with the largest member. So it is fine to have only one init element. But in CIR, the union has the information for all members. So if we only pass a single init element, we may be in trouble. We solve the problem by appending placeholder attribute for the uninitialized fields. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 15 +++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 3 ++ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 29 ++++++++++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 9 +++++ clang/test/CIR/CodeGen/nested-union-array.c | 33 +++++++++++++++++++ clang/test/CIR/CodeGen/union-init.c | 13 ++++---- 6 files changed, 94 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/nested-union-array.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 058e335928aa..a6a27006f357 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -92,6 +92,21 @@ def CIR_BoolAttr : CIR_Attr<"Bool", "bool", [TypedAttrInterface]> { }]; } +//===----------------------------------------------------------------------===// +// InactiveUnionFieldAttr +//===----------------------------------------------------------------------===// + +def InactiveUnionFieldAttr : CIR_Attr<"InactiveUnionField", "inactive_field", [TypedAttrInterface]> { + let summary = "Attribute to represent an uninitialized field for a union."; + let description = [{ + The InactiveUnionFieldAttr is used to represent an uninitialized field + for a union. + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type); + let assemblyFormat = [{}]; +} + //===----------------------------------------------------------------------===// // ZeroAttr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index a940a7b4dc1e..22e3d56bd78b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -300,6 +300,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return true; } + if (mlir::isa(attr)) + return true; + llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index c8fa52b70fae..377b3770cae6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -378,7 +378,32 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( CharUnits AlignedSize = Size.alignTo(Align); bool Packed = false; - ArrayRef UnpackedElems = Elems; + ArrayRef UnpackedElems; + + // Fill the init elements for union. This comes from a fundamental + // difference between CIR and LLVM IR. In LLVM IR, the union is simply a + // struct with the largest member. So it is fine to have only one init + // element. But in CIR, the union has the information for all members. So if + // we only pass a single init element, we may be in trouble. We solve the + // problem by appending placeholder attribute for the uninitialized fields. + if (auto desired = dyn_cast(DesiredTy); + desired && desired.isUnion() && + Elems.size() != desired.getNumElements()) { + llvm::SmallVector UnionElemsStorage; + + for (auto elemTy : desired.getMembers()) { + if (auto Ty = mlir::dyn_cast(Elems.back()); + Ty && Ty.getType() == elemTy) + UnionElemsStorage.push_back(Elems.back()); + else + UnionElemsStorage.push_back(mlir::cir::InactiveUnionFieldAttr::get( + CGM.getBuilder().getContext(), elemTy)); + } + + UnpackedElems = UnionElemsStorage; + } else + UnpackedElems = Elems; + llvm::SmallVector UnpackedElemStorage; if (DesiredSize < AlignedSize || DesiredSize.alignTo(Align) != DesiredSize) { NaturalLayout = false; @@ -386,7 +411,7 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( } else if (DesiredSize > AlignedSize) { // The natural layout would be too small. Add padding to fix it. (This // is ignored if we choose a packed layout.) - UnpackedElemStorage.assign(Elems.begin(), Elems.end()); + UnpackedElemStorage.assign(UnpackedElems.begin(), UnpackedElems.end()); UnpackedElemStorage.push_back(Utils.getPadding(DesiredSize - Size)); UnpackedElems = UnpackedElemStorage; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7b10bb166252..acb2596bfd1d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -439,6 +439,15 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, // Iteratively lower each constant element of the struct. for (auto [idx, elt] : llvm::enumerate(constStruct.getMembers())) { + if (auto constStructType = + dyn_cast(constStruct.getType()); + constStructType && constStructType.isUnion()) { + if (isa(elt)) + continue; + + idx = 0; + } + mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); result = rewriter.create(loc, result, init, idx); } diff --git a/clang/test/CIR/CodeGen/nested-union-array.c b/clang/test/CIR/CodeGen/nested-union-array.c new file mode 100644 index 000000000000..7684e3c951c7 --- /dev/null +++ b/clang/test/CIR/CodeGen/nested-union-array.c @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM + +struct nested +{ + union { + const char *single; + const char *const *multi; + } output; +}; +static const char * const test[] = { + "test", +}; +const struct nested data[] = +{ + { + { + .multi = test, + }, + }, + { + { + .single = "hello", + }, + }, +}; + +// CIR: ![[ANON_TY:.+]] = !cir.struct, !cir.ptr> +// CIR: ![[NESTED_TY:.+]] = !cir.struct, #cir.global_view<@test> : !cir.ptr>}> : ![[ANON_TY]]}> : ![[NESTED_TY:.+]], #cir.const_struct<{#cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr, #cir.inactive_field : !cir.ptr>}> : ![[ANON_TY]]}> : ![[NESTED_TY:.+]]]> : !cir.array +// LLVM: @data = constant [2 x {{.*}}] diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index 122999de23c2..1d0886644b2b 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -12,10 +12,12 @@ void foo(int x) { A a = {.x = x}; } -// CHECK-DAG: ![[anon0:.*]] = !cir.struct -// CHECK-DAG: ![[anon:.*]] = !cir.struct +// CHECK-DAG: ![[TY_U:.*]] = !cir.struct +// CHECK-DAG: ![[anon0:.*]] = !cir.struct // CHECK-DAG: #[[bfi_y:.*]] = #cir.bitfield_info +// CHECK-DAG: ![[TY_A:.*]] = !cir.struct // CHECK-DAG: ![[anon1:.*]] = !cir.struct} // CHECK-LABEL: cir.func @foo( @@ -32,7 +34,7 @@ void foo(int x) { // CHECK: cir.return union { int i; float f; } u = { }; -// CHECK: cir.global external @u = #cir.zero : ![[anon]] +// CHECK: cir.global external @u = #cir.zero : ![[TY_u]] unsigned is_little(void) { const union { @@ -43,9 +45,8 @@ unsigned is_little(void) { } // CHECK: cir.func @is_little -// CHECK: %[[VAL_1:.*]] = cir.get_global @is_little.one : !cir.ptr -// CHECK: %[[VAL_2:.*]] = cir.cast(bitcast, %[[VAL_1]] : !cir.ptr), !cir.ptr -// CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][1] {name = "c"} : !cir.ptr -> !cir.ptr> +// CHECK: %[[VAL_1:.*]] = cir.get_global @is_little.one : !cir.ptr +// CHECK: %[[VAL_2:.*]] = cir.get_member %[[VAL_1]][1] {name = "c"} : !cir.ptr -> !cir.ptr> typedef union { int x; From 5194a3d42ad7404f981a4618268c185852eef6b5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 4 Nov 2024 13:18:31 -0800 Subject: [PATCH 2023/2301] [CIR][NFC] Fix warnings post rebase and some pre-existing ones --- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 2 ++ clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 8 ++++++++ .../CIR/Dialect/Transforms/CallConvLowering.cpp | 14 +++++++++----- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 0f0f3de66441..a319046b6901 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -85,6 +85,7 @@ TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { llvm_unreachable("non-canonical or dependent type in IR-generation"); case Type::ArrayParameter: + case Type::HLSLAttributedResource: llvm_unreachable("NYI"); case Type::Auto: diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index efe193861b08..c6bd049e9224 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1545,6 +1545,7 @@ void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, switch (Ty->getTypeClass()) { case Type::ArrayParameter: + case Type::HLSLAttributedResource: llvm_unreachable("NYI"); #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) @@ -1920,6 +1921,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( switch (Ty->getTypeClass()) { case Type::ArrayParameter: + case Type::HLSLAttributedResource: llvm_unreachable("NYI"); #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 61c32abf7409..afb7349705f7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -387,12 +387,20 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { llvm_unreachable("Non-canonical or dependent types aren't possible."); case Type::ArrayParameter: + case Type::HLSLAttributedResource: llvm_unreachable("NYI"); case Type::Builtin: { switch (cast(Ty)->getKind()) { case BuiltinType::HLSLResource: llvm_unreachable("NYI"); + case BuiltinType::SveMFloat8: + case BuiltinType::SveMFloat8x2: + case BuiltinType::SveMFloat8x3: + case BuiltinType::SveMFloat8x4: + case BuiltinType::MFloat8: + case BuiltinType::MFloat8x8: + case BuiltinType::MFloat8x16: case BuiltinType::SveBoolx2: case BuiltinType::SveBoolx4: case BuiltinType::SveCount: diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index f29c67210288..d9247a834070 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -59,7 +59,8 @@ struct CallConvLowering { lowerIndirectCallOp(c); }); - lowerModule->rewriteFunctionDefinition(op); + if (lowerModule->rewriteFunctionDefinition(op).failed()) + op.emitError("Unable to rewrite function definition"); } private: @@ -94,7 +95,8 @@ struct CallConvLowering { } void lowerDirectCallOp(CallOp op, FuncOp callee) { - lowerModule->rewriteFunctionCall(op, callee); + if (lowerModule->rewriteFunctionCall(op, callee).failed()) + op.emitError("Unable to rewrite function call"); } void lowerIndirectCallOp(CallOp op) { @@ -102,9 +104,11 @@ struct CallConvLowering { rewriter.setInsertionPoint(op); auto typ = op.getIndirectCall().getType(); - if (isFuncPointerTy(typ)) { - lowerModule->rewriteFunctionCall(op); - } + if (!isFuncPointerTy(typ)) + return; + + if (lowerModule->rewriteFunctionCall(op).failed()) + op.emitError("Unable to rewrite function call"); } private: From 4d65b0321f4f2228c74ddfcd551d343051e1842e Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Mon, 4 Nov 2024 15:03:51 -0800 Subject: [PATCH 2024/2301] [CIR][CIRGen] Support __builtin___memcpy_chk (#1032) This is just the usual adaption from CodeGen. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 19 +++++++- clang/test/CIR/CodeGen/builtins-memory.c | 56 ++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtins-memory.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index ac663170cd22..06eda8a527ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1425,8 +1425,23 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_char_memchr: llvm_unreachable("BI__builtin_char_memchr NYI"); - case Builtin::BI__builtin___memcpy_chk: - llvm_unreachable("BI__builtin___memcpy_chk NYI"); + case Builtin::BI__builtin___memcpy_chk: { + // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. + Expr::EvalResult sizeResult, dstSizeResult; + if (!E->getArg(2)->EvaluateAsInt(sizeResult, CGM.getASTContext()) || + !E->getArg(3)->EvaluateAsInt(dstSizeResult, CGM.getASTContext())) + break; + llvm::APSInt size = sizeResult.Val.getInt(); + llvm::APSInt dstSize = dstSizeResult.Val.getInt(); + if (size.ugt(dstSize)) + break; + Address dest = buildPointerWithAlignment(E->getArg(0)); + Address src = buildPointerWithAlignment(E->getArg(1)); + auto loc = getLoc(E->getSourceRange()); + ConstantOp sizeOp = builder.getConstInt(loc, size); + builder.createMemCpy(loc, dest.getPointer(), src.getPointer(), sizeOp); + return RValue::get(dest.getPointer()); + } case Builtin::BI__builtin_objc_memmove_collectable: llvm_unreachable("BI__builtin_objc_memmove_collectable NYI"); diff --git a/clang/test/CIR/CodeGen/builtins-memory.c b/clang/test/CIR/CodeGen/builtins-memory.c new file mode 100644 index 000000000000..51be880d9cb7 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtins-memory.c @@ -0,0 +1,56 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir + +typedef __SIZE_TYPE__ size_t; +void test_memcpy_chk(void *dest, const void *src, size_t n) { + // CIR-LABEL: cir.func @test_memcpy_chk + // CIR: %[[#DEST:]] = cir.alloca {{.*}} ["dest", init] + // CIR: %[[#SRC:]] = cir.alloca {{.*}} ["src", init] + // CIR: %[[#N:]] = cir.alloca {{.*}} ["n", init] + + // An unchecked memcpy should be emitted when the count and buffer size are + // constants and the count is less than or equal to the buffer size. + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<8> + // CIR: cir.libc.memcpy %[[#COUNT]] bytes from %[[#SRC_LOAD]] to %[[#DEST_LOAD]] + __builtin___memcpy_chk(dest, src, 8, 10); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<10> + // CIR: cir.libc.memcpy %[[#COUNT]] bytes from %[[#SRC_LOAD]] to %[[#DEST_LOAD]] + __builtin___memcpy_chk(dest, src, 10, 10); + + // __memcpy_chk should be called when the count is greater than the buffer + // size, or when either the count or buffer size isn't a constant. + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<10> + // CIR: %[[#SIZE:]] = cir.const #cir.int<8> + // CIR: cir.call @__memcpy_chk(%[[#DEST_LOAD]], %[[#SRC_LOAD]], %[[#COUNT]], %[[#SIZE]]) + __builtin___memcpy_chk(dest, src, 10lu, 8lu); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#N_LOAD:]] = cir.load %[[#N]] + // CIR: %[[#SIZE:]] = cir.const #cir.int<10> + // CIR: cir.call @__memcpy_chk(%[[#DEST_LOAD]], %[[#SRC_LOAD]], %[[#N_LOAD]], %[[#SIZE]]) + __builtin___memcpy_chk(dest, src, n, 10lu); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<10> + // CIR: %[[#N_LOAD:]] = cir.load %[[#N]] + // CIR: cir.call @__memcpy_chk(%[[#DEST_LOAD]], %[[#SRC_LOAD]], %[[#COUNT]], %[[#N_LOAD]]) + __builtin___memcpy_chk(dest, src, 10lu, n); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#N_LOAD1:]] = cir.load %[[#N]] + // CIR: %[[#N_LOAD2:]] = cir.load %[[#N]] + // CIR: cir.call @__memcpy_chk(%[[#DEST_LOAD]], %[[#SRC_LOAD]], %[[#N_LOAD1]], %[[#N_LOAD2]]) + __builtin___memcpy_chk(dest, src, n, n); +} From bb3d1acb769a58084fb1e9679b4486e7cfe33b46 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Fri, 1 Nov 2024 17:17:43 -0700 Subject: [PATCH 2025/2301] [CIR][CIRGen] Support __builtin___memset_chk (#1053) This follows the same implementation as CodeGen. https://github.com/llvm/clangir/issues/1051 tracks potentially switching to a different strategy in the future. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 19 ++++++++- clang/test/CIR/CodeGen/builtins-memory.c | 53 ++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 06eda8a527ca..4a1b64f9cf4d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1478,8 +1478,23 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_memset_inline: llvm_unreachable("BI__builtin_memset_inline NYI"); - case Builtin::BI__builtin___memset_chk: - llvm_unreachable("BI__builtin___memset_chk NYI"); + case Builtin::BI__builtin___memset_chk: { + // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. + Expr::EvalResult sizeResult, dstSizeResult; + if (!E->getArg(2)->EvaluateAsInt(sizeResult, CGM.getASTContext()) || + !E->getArg(3)->EvaluateAsInt(dstSizeResult, CGM.getASTContext())) + break; + llvm::APSInt size = sizeResult.Val.getInt(); + llvm::APSInt dstSize = dstSizeResult.Val.getInt(); + if (size.ugt(dstSize)) + break; + Address dest = buildPointerWithAlignment(E->getArg(0)); + mlir::Value byteVal = buildScalarExpr(E->getArg(1)); + auto loc = getLoc(E->getSourceRange()); + ConstantOp sizeOp = builder.getConstInt(loc, size); + builder.createMemSet(loc, dest.getPointer(), byteVal, sizeOp); + return RValue::get(dest.getPointer()); + } case Builtin::BI__builtin_wmemchr: llvm_unreachable("BI__builtin_wmemchr NYI"); case Builtin::BI__builtin_wmemcmp: diff --git a/clang/test/CIR/CodeGen/builtins-memory.c b/clang/test/CIR/CodeGen/builtins-memory.c index 51be880d9cb7..439b82e98d33 100644 --- a/clang/test/CIR/CodeGen/builtins-memory.c +++ b/clang/test/CIR/CodeGen/builtins-memory.c @@ -54,3 +54,56 @@ void test_memcpy_chk(void *dest, const void *src, size_t n) { // CIR: cir.call @__memcpy_chk(%[[#DEST_LOAD]], %[[#SRC_LOAD]], %[[#N_LOAD1]], %[[#N_LOAD2]]) __builtin___memcpy_chk(dest, src, n, n); } + +void test_memset_chk(void *dest, int ch, size_t n) { + // CIR-LABEL: cir.func @test_memset_chk + // CIR: %[[#DEST:]] = cir.alloca {{.*}} ["dest", init] + // CIR: %[[#CH:]] = cir.alloca {{.*}} ["ch", init] + // CIR: %[[#N:]] = cir.alloca {{.*}} ["n", init] + + // An unchecked memset should be emitted when the count and buffer size are + // constants and the count is less than or equal to the buffer size. + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#CH_LOAD:]] = cir.load %[[#CH]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<8> + // CIR: cir.libc.memset %[[#COUNT]] bytes from %[[#DEST_LOAD]] set to %[[#CH_LOAD]] + __builtin___memset_chk(dest, ch, 8, 10); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#CH_LOAD:]] = cir.load %[[#CH]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<10> + // CIR: cir.libc.memset %[[#COUNT]] bytes from %[[#DEST_LOAD]] set to %[[#CH_LOAD]] + __builtin___memset_chk(dest, ch, 10, 10); + + // __memset_chk should be called when the count is greater than the buffer + // size, or when either the count or buffer size isn't a constant. + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#CH_LOAD:]] = cir.load %[[#CH]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<10> + // CIR: %[[#SIZE:]] = cir.const #cir.int<8> + // CIR: cir.call @__memset_chk(%[[#DEST_LOAD]], %[[#CH_LOAD]], %[[#COUNT]], %[[#SIZE]]) + __builtin___memset_chk(dest, ch, 10lu, 8lu); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#CH_LOAD:]] = cir.load %[[#CH]] + // CIR: %[[#N_LOAD:]] = cir.load %[[#N]] + // CIR: %[[#SIZE:]] = cir.const #cir.int<10> + // CIR: cir.call @__memset_chk(%[[#DEST_LOAD]], %[[#CH_LOAD]], %[[#N_LOAD]], %[[#SIZE]]) + __builtin___memset_chk(dest, ch, n, 10lu); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#CH_LOAD:]] = cir.load %[[#CH]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<10> + // CIR: %[[#N_LOAD:]] = cir.load %[[#N]] + // CIR: cir.call @__memset_chk(%[[#DEST_LOAD]], %[[#CH_LOAD]], %[[#COUNT]], %[[#N_LOAD]]) + __builtin___memset_chk(dest, ch, 10lu, n); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#CH_LOAD:]] = cir.load %[[#CH]] + // CIR: %[[#N_LOAD1:]] = cir.load %[[#N]] + // CIR: %[[#N_LOAD2:]] = cir.load %[[#N]] + // CIR: cir.call @__memset_chk(%[[#DEST_LOAD]], %[[#CH_LOAD]], %[[#N_LOAD1]], %[[#N_LOAD2]]) + __builtin___memset_chk(dest, ch, n, n); +} From e1f0663595131a1b021b183ff776c382305a2031 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Mon, 4 Nov 2024 16:02:25 -0800 Subject: [PATCH 2026/2301] [CIR][CIRGen] Emit required vtables (#1054) We were missing an override for this previously and thus not emitting vtables when key functions were defined. --- clang/include/clang/CIR/CIRGenerator.h | 1 + clang/lib/CIR/CodeGen/CIRGenModule.h | 2 ++ clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 10 ++++++++++ clang/lib/CIR/CodeGen/CIRGenerator.cpp | 7 +++++++ clang/test/CIR/CodeGen/vtable-emission.cpp | 15 +++++++++++++++ 5 files changed, 35 insertions(+) create mode 100644 clang/test/CIR/CodeGen/vtable-emission.cpp diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index c0712de63313..85fed2c926d8 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -90,6 +90,7 @@ class CIRGenerator : public clang::ASTConsumer { void HandleTagDeclRequiredDefinition(const clang::TagDecl *D) override; void HandleCXXStaticMemberVarInstantiation(clang::VarDecl *D) override; void CompleteTentativeDefinition(clang::VarDecl *D) override; + void HandleVTable(clang::CXXRecordDecl *rd) override; mlir::ModuleOp getModule(); std::unique_ptr takeContext() { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index a4380bde009d..6daf0a2fbeaf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -345,6 +345,8 @@ class CIRGenModule : public CIRGenTypeCache { void buildDeferredVTables(); bool shouldOpportunisticallyEmitVTables(); + void buildVTable(CXXRecordDecl *rd); + void setDSOLocal(mlir::cir::CIRGlobalValueInterface GV) const; /// Return the appropriate linkage for the vtable, VTT, and type information diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 450dda5fa1be..a565adc9703a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -150,6 +150,16 @@ void CIRGenModule::buildDeferredVTables() { DeferredVTables.clear(); } +/// This is a callback from Sema to tell us that a particular vtable is +/// required to be emitted in this translation unit. +/// +/// This is only called for vtables that _must_ be emitted (mainly due to key +/// functions). For weak vtables, CodeGen tracks when they are needed and +/// emits them as-needed. +void CIRGenModule::buildVTable(CXXRecordDecl *rd) { + VTables.GenerateClassData(rd); +} + void CIRGenVTables::GenerateClassData(const CXXRecordDecl *RD) { assert(!MissingFeatures::generateDebugInfo()); diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 4d6a6c6c5d84..758dee16103c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -190,3 +190,10 @@ void CIRGenerator::CompleteTentativeDefinition(VarDecl *D) { CGM->buildTentativeDefinition(D); } + +void CIRGenerator::HandleVTable(CXXRecordDecl *rd) { + if (Diags.hasErrorOccurred()) + return; + + CGM->buildVTable(rd); +} diff --git a/clang/test/CIR/CodeGen/vtable-emission.cpp b/clang/test/CIR/CodeGen/vtable-emission.cpp new file mode 100644 index 000000000000..f63a9fe3cd97 --- /dev/null +++ b/clang/test/CIR/CodeGen/vtable-emission.cpp @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct S { + virtual void key(); + virtual void nonKey() {} +}; + +void S::key() {} + +// The definition of the key function should result in the vtable being emitted. +// CHECK: cir.global external @_ZTV1S = #cir.vtable + +// The reference from the vtable should result in nonKey being emitted. +// CHECK: cir.func linkonce_odr @_ZN1S6nonKeyEv({{.*}} { From e5f439863d41780f88a2e0e9f6266194a90c6ffa Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 5 Nov 2024 13:43:36 -0500 Subject: [PATCH 2027/2301] [CIR][CodeGen][NFC] Add `getMLIRContext` to CIRGenModule Upstream review of a PR requested that we be more explicit with differentiating things from MLIR to similarly named things from clang AST/LLVM/etc. So add an MLIRContext getter that we should start using. Reviewers: bcardosolopes Reviewed By: bcardosolopes Pull Request: https://github.com/llvm/clangir/pull/1047 --- clang/lib/CIR/CodeGen/CIRAsm.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 8 +- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 26 ++--- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 13 +-- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 7 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 19 ++- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 16 +-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 + clang/lib/CIR/CodeGen/CIRGenModule.cpp | 110 +++++++++--------- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 +- clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp | 8 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 4 +- 16 files changed, 119 insertions(+), 122 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index e88eb1da098f..252f20b186b7 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -212,7 +212,7 @@ std::pair CIRGenFunction::buildAsmInputLValue( uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); if ((Size <= 64 && llvm::isPowerOf2_64(Size)) || getTargetHooks().isScalarizableAsmOperand(*this, Ty)) { - Ty = mlir::cir::IntType::get(builder.getContext(), Size, false); + Ty = mlir::cir::IntType::get(&getMLIRContext(), Size, false); return {builder.createLoad(getLoc(Loc), InputValue.getAddress().withElementType(Ty)), @@ -434,7 +434,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { if (RequiresCast) { unsigned Size = getContext().getTypeSize(QTy); - Ty = mlir::cir::IntType::get(builder.getContext(), Size, false); + Ty = mlir::cir::IntType::get(&getMLIRContext(), Size, false); } ResultRegTypes.push_back(Ty); // If this output is tied to an input, and if the input is larger, then @@ -657,7 +657,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { assert(cast(op.getType()).getPointee() == typ && "element type differs from pointee type!"); - operandAttrs.push_back(mlir::UnitAttr::get(builder.getContext())); + operandAttrs.push_back(mlir::UnitAttr::get(&getMLIRContext())); } else { // We need to add an attribute for every arg since later, during // the lowering to LLVM IR the attributes will be assigned to the diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 4a1b64f9cf4d..e33c5bc03dbd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1001,7 +1001,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Probability.convert(llvm::APFloat::IEEEdouble(), llvm::RoundingMode::Dynamic, &LoseInfo); ProbAttr = mlir::FloatAttr::get( - mlir::Float64Type::get(builder.getContext()), Probability); + mlir::Float64Type::get(&getMLIRContext()), Probability); } auto result = builder.create( @@ -1176,7 +1176,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_readsteadycounter NYI"); case Builtin::BI__builtin___clear_cache: { - mlir::Type voidTy = mlir::cir::VoidType::get(builder.getContext()); + mlir::Type voidTy = mlir::cir::VoidType::get(&getMLIRContext()); mlir::Value begin = builder.createPtrBitcast(buildScalarExpr(E->getArg(0)), voidTy); mlir::Value end = @@ -1743,7 +1743,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo}); auto EncompassingCIRTy = mlir::cir::IntType::get( - builder.getContext(), EncompassingInfo.Width, EncompassingInfo.Signed); + &getMLIRContext(), EncompassingInfo.Width, EncompassingInfo.Signed); auto ResultCIRTy = mlir::cast(CGM.getTypes().ConvertType(ResultQTy)); @@ -2034,7 +2034,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, auto fnOp = CGM.GetOrCreateCIRFunction(ND->getName(), ty, gd, /*ForVTable=*/false, /*DontDefer=*/false); - fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(&getMLIRContext())); return buildCall(E->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), E, ReturnValue); } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 5b68f31797d5..55e428ae380e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2368,7 +2368,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( mlir::cir::VectorType resTy = (builtinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || builtinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v) - ? mlir::cir::VectorType::get(builder.getContext(), vTy.getEltType(), + ? mlir::cir::VectorType::get(&getMLIRContext(), vTy.getEltType(), vTy.getSize() * 2) : vTy; mlir::cir::VectorType mulVecT = @@ -3055,73 +3055,73 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vget_lane_i8: case NEON::BI__builtin_neon_vdupb_lane_i8: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt8Ty, 8)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt8Ty, 8)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i8: case NEON::BI__builtin_neon_vdupb_laneq_i8: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt8Ty, 16)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt8Ty, 16)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i16: case NEON::BI__builtin_neon_vduph_lane_i16: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt16Ty, 4)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt16Ty, 4)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i16: case NEON::BI__builtin_neon_vduph_laneq_i16: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt16Ty, 8)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt16Ty, 8)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i32: case NEON::BI__builtin_neon_vdups_lane_i32: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt32Ty, 2)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt32Ty, 2)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_f32: case NEON::BI__builtin_neon_vdups_lane_f32: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), FloatTy, 2)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), FloatTy, 2)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i32: case NEON::BI__builtin_neon_vdups_laneq_i32: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt32Ty, 4)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt32Ty, 4)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i64: case NEON::BI__builtin_neon_vdupd_lane_i64: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt64Ty, 1)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt64Ty, 1)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vdupd_lane_f64: case NEON::BI__builtin_neon_vget_lane_f64: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), DoubleTy, 1)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), DoubleTy, 1)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i64: case NEON::BI__builtin_neon_vdupd_laneq_i64: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), UInt64Ty, 2)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt64Ty, 2)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_f32: case NEON::BI__builtin_neon_vdups_laneq_f32: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), FloatTy, 4)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), FloatTy, 4)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_f64: case NEON::BI__builtin_neon_vdupd_laneq_f64: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(builder.getContext(), DoubleTy, 2)); + Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), DoubleTy, 2)); return builder.create( getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vaddh_f16: diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 4d8174aaa3d1..52399e8e128c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -344,8 +344,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, CIRGenFunction::SourceLocRAIIObject fnLoc{cgf, getLoc(varDecl->getLocation())}; - addr.setAstAttr( - mlir::cir::ASTVarDeclAttr::get(builder.getContext(), varDecl)); + addr.setAstAttr(mlir::cir::ASTVarDeclAttr::get(&getMLIRContext(), varDecl)); if (ty->isReferenceType()) { mlir::OpBuilder::InsertionGuard guard(builder); diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 5051488cf2ee..0c3234a6519e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -382,7 +382,7 @@ void CIRGenModule::constructAttributeList(StringRef Name, if (TargetDecl) { if (TargetDecl->hasAttr()) { - auto nu = mlir::cir::NoThrowAttr::get(builder.getContext()); + auto nu = mlir::cir::NoThrowAttr::get(&getMLIRContext()); funcAttrs.set(nu.getMnemonic(), nu); } @@ -434,12 +434,11 @@ void CIRGenModule::constructAttributeList(StringRef Name, } if (TargetDecl->hasAttr()) { - auto cirKernelAttr = - mlir::cir::OpenCLKernelAttr::get(builder.getContext()); + auto cirKernelAttr = mlir::cir::OpenCLKernelAttr::get(&getMLIRContext()); funcAttrs.set(cirKernelAttr.getMnemonic(), cirKernelAttr); auto uniformAttr = mlir::cir::OpenCLKernelUniformWorkGroupSizeAttr::get( - builder.getContext()); + &getMLIRContext()); if (getLangOpts().OpenCLVersion <= 120) { // OpenCL v1.2 Work groups are always uniform funcAttrs.set(uniformAttr.getMnemonic(), uniformAttr); @@ -779,7 +778,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, CannotThrow = true; } else { // Otherwise, nounwind call sites will never throw. - auto noThrowAttr = mlir::cir::NoThrowAttr::get(builder.getContext()); + auto noThrowAttr = mlir::cir::NoThrowAttr::get(&getMLIRContext()); CannotThrow = Attrs.getNamed(noThrowAttr.getMnemonic()).has_value(); if (auto fptr = dyn_cast(CalleePtr)) @@ -825,7 +824,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, } auto extraFnAttrs = mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), Attrs.getDictionary(builder.getContext())); + &getMLIRContext(), Attrs.getDictionary(&getMLIRContext())); mlir::cir::CIRCallOpInterface callLikeOp = buildCallLikeOp( *this, callLoc, indirectFuncTy, indirectFuncVal, directFuncOp, @@ -833,7 +832,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, if (E) callLikeOp->setAttr( - "ast", mlir::cir::ASTCallExprAttr::get(builder.getContext(), *E)); + "ast", mlir::cir::ASTCallExprAttr::get(&getMLIRContext(), *E)); if (callOrTryCall) *callOrTryCall = callLikeOp; diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 5aece0476abd..7d31cd8cbb4e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -175,7 +175,7 @@ mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, int32Ty), /*FD=*/nullptr); assert(fnOp && "should always succeed"); - fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(&getMLIRContext())); } else fnOp = cast(builtin); @@ -197,7 +197,7 @@ CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { mlir::cir::FuncType::get({int32Ty}, boolTy), /*FD=*/nullptr); assert(fnOp && "should always succeed"); - fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(&getMLIRContext())); } else fnOp = cast(builtin); @@ -218,7 +218,7 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, mlir::cir::FuncType::get({int32Ty, VoidPtrTy}, VoidPtrTy), /*FD=*/nullptr); assert(fnOp && "should always succeed"); - fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(&getMLIRContext())); } else fnOp = cast(builtin); @@ -239,7 +239,7 @@ mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, mlir::cir::FuncType::get({VoidPtrTy, boolTy}, boolTy), /*FD=*/nullptr); assert(fnOp && "should always succeed"); - fnOp.setBuiltinAttr(mlir::UnitAttr::get(builder.getContext())); + fnOp.setBuiltinAttr(mlir::UnitAttr::get(&getMLIRContext())); } else fnOp = cast(builtin); @@ -254,7 +254,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { auto Fn = dyn_cast(CurFn); assert(Fn && "other callables NYI"); - Fn.setCoroutineAttr(mlir::UnitAttr::get(builder.getContext())); + Fn.setCoroutineAttr(mlir::UnitAttr::get(&getMLIRContext())); auto coroId = buildCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); createCoroData(*this, CurCoro, coroId); diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 5646d028ad86..da420ce5e539 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -337,7 +337,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { assert(allocaOp && "Address should come straight out of the alloca"); if (!allocaOp.use_empty()) - allocaOp.setInitAttr(mlir::UnitAttr::get(builder.getContext())); + allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext())); return; } @@ -595,7 +595,7 @@ mlir::cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( // Given those constraints, thread in the GetGlobalOp and update it // directly. GVAddr.getAddr().setType( - mlir::cir::PointerType::get(builder.getContext(), Init.getType())); + mlir::cir::PointerType::get(&getMLIRContext(), Init.getType())); OldGV->erase(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 9d04947234e3..df07da9cd8eb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -708,7 +708,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { assert(!(hasCatchAll && hasFilter)); if (hasCatchAll) { // Attach the catch_all region. Can't coexist with an unwind one. - auto catchAll = mlir::cir::CatchAllAttr::get(builder.getContext()); + auto catchAll = mlir::cir::CatchAllAttr::get(&getMLIRContext()); clauses.push_back(catchAll); // If we have an EH filter, we need to add those handlers in the @@ -729,13 +729,12 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { // If there's no catch_all, attach the unwind region. This needs to be the // last region in the TryOp operation catch list. if (!hasCatchAll) { - auto catchUnwind = mlir::cir::CatchUnwindAttr::get(builder.getContext()); + auto catchUnwind = mlir::cir::CatchUnwindAttr::get(&getMLIRContext()); clauses.push_back(catchUnwind); } // Add final array of clauses into TryOp. - tryOp.setCatchTypesAttr( - mlir::ArrayAttr::get(builder.getContext(), clauses)); + tryOp.setCatchTypesAttr(mlir::ArrayAttr::get(&getMLIRContext(), clauses)); } // In traditional LLVM codegen. this tells the backend how to generate the diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index f78fb5912723..32d476890b5c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -631,7 +631,7 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, Address addr, const VarDecl *VD = currVarDecl; assert(VD && "VarDecl expected"); if (VD->hasInit()) - SrcAlloca.setInitAttr(mlir::UnitAttr::get(builder.getContext())); + SrcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext())); } assert(currSrcLoc && "must pass in source location"); @@ -1286,7 +1286,7 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { // Tag 'load' with deref attribute. if (auto loadOp = dyn_cast<::mlir::cir::LoadOp>(Addr.getPointer().getDefiningOp())) { - loadOp.setIsDerefAttr(mlir::UnitAttr::get(builder.getContext())); + loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext())); } LValue LV = LValue::makeAddr(Addr, T, BaseInfo); @@ -1497,15 +1497,14 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, // get non-variadic function type CalleeTy = mlir::cir::FuncType::get(CalleeTy.getInputs(), CalleeTy.getReturnType(), false); - auto CalleePtrTy = - mlir::cir::PointerType::get(builder.getContext(), CalleeTy); + auto CalleePtrTy = mlir::cir::PointerType::get(&getMLIRContext(), CalleeTy); auto *Fn = Callee.getFunctionPointer(); mlir::Value Addr; if (auto funcOp = llvm::dyn_cast(Fn)) { Addr = builder.create( getLoc(E->getSourceRange()), - mlir::cir::PointerType::get(builder.getContext(), + mlir::cir::PointerType::get(&getMLIRContext(), funcOp.getFunctionType()), funcOp.getSymName()); } else { @@ -2675,7 +2674,7 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, // one fused location that has either 2 or 4 total locations, depending // on else's availability. auto getStmtLoc = [this](const Stmt &s) { - return mlir::FusedLoc::get(builder.getContext(), + return mlir::FusedLoc::get(&getMLIRContext(), {getLoc(s.getSourceRange().getBegin()), getLoc(s.getSourceRange().getEnd())}); }; @@ -2716,7 +2715,7 @@ mlir::cir::IfOp CIRGenFunction::buildIfOnBoolExpr( SmallVector ifLocs{thenLoc}; if (elseLoc) ifLocs.push_back(*elseLoc); - auto loc = mlir::FusedLoc::get(builder.getContext(), ifLocs); + auto loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs); // Emit the code with the fully general case. mlir::Value condV = buildOpOnBoolExpr(loc, cond); @@ -2825,7 +2824,7 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, /*var type*/ ty, name, alignIntAttr, arraySize); if (currVarDecl) { auto alloca = cast(addr.getDefiningOp()); - alloca.setAstAttr(ASTVarDeclAttr::get(builder.getContext(), currVarDecl)); + alloca.setAstAttr(ASTVarDeclAttr::get(&getMLIRContext(), currVarDecl)); } } return addr; @@ -2909,8 +2908,8 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, auto Ptr = addr.getPointer(); if (mlir::isa(ElemTy)) { - ElemTy = mlir::cir::IntType::get(builder.getContext(), 8, true); - auto ElemPtrTy = mlir::cir::PointerType::get(builder.getContext(), ElemTy); + ElemTy = mlir::cir::IntType::get(&getMLIRContext(), 8, true); + auto ElemPtrTy = mlir::cir::PointerType::get(&getMLIRContext(), ElemTy); Ptr = builder.create(loc, ElemPtrTy, mlir::cir::CastKind::bitcast, Ptr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 377b3770cae6..f621035d02a6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -2051,4 +2051,4 @@ static mlir::TypedAttr buildNullConstant(CIRGenModule &CGM, mlir::TypedAttr CIRGenModule::buildNullConstantForBase(const CXXRecordDecl *Record) { return ::buildNullConstant(*this, Record, false); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index a319046b6901..03029c4fc664 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -164,7 +164,7 @@ mlir::Location CIRGenFunction::getLoc(SourceRange SLoc) { mlir::Location E = getLoc(SLoc.getEnd()); SmallVector locs = {B, E}; mlir::Attribute metadata; - return mlir::FusedLoc::get(locs, metadata, builder.getContext()); + return mlir::FusedLoc::get(locs, metadata, &getMLIRContext()); } else if (currSrcLoc) { return *currSrcLoc; } @@ -176,7 +176,7 @@ mlir::Location CIRGenFunction::getLoc(SourceRange SLoc) { mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) { SmallVector locs = {lhs, rhs}; mlir::Attribute metadata; - return mlir::FusedLoc::get(locs, metadata, builder.getContext()); + return mlir::FusedLoc::get(locs, metadata, &getMLIRContext()); } /// Return true if the statement contains a label in it. If @@ -311,9 +311,9 @@ mlir::LogicalResult CIRGenFunction::declare(const Decl *var, QualType ty, addr = buildAlloca(namedVar->getName(), ty, loc, alignment); auto allocaOp = cast(addr.getDefiningOp()); if (isParam) - allocaOp.setInitAttr(mlir::UnitAttr::get(builder.getContext())); + allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext())); if (ty->isReferenceType() || ty.isConstQualified()) - allocaOp.setConstantAttr(mlir::UnitAttr::get(builder.getContext())); + allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext())); symbolTable.insert(var, addr); return mlir::success(); @@ -331,9 +331,9 @@ mlir::LogicalResult CIRGenFunction::declare(Address addr, const Decl *var, addrVal = addr.getPointer(); auto allocaOp = cast(addrVal.getDefiningOp()); if (isParam) - allocaOp.setInitAttr(mlir::UnitAttr::get(builder.getContext())); + allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext())); if (ty->isReferenceType() || ty.isConstQualified()) - allocaOp.setConstantAttr(mlir::UnitAttr::get(builder.getContext())); + allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext())); symbolTable.insert(var, addrVal); return mlir::success(); @@ -682,7 +682,7 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, auto FnBeginLoc = bSrcLoc.isValid() ? getLoc(bSrcLoc) : unknownLoc; auto FnEndLoc = eSrcLoc.isValid() ? getLoc(eSrcLoc) : unknownLoc; const auto fusedLoc = - mlir::FusedLoc::get(builder.getContext(), {FnBeginLoc, FnEndLoc}); + mlir::FusedLoc::get(&getMLIRContext(), {FnBeginLoc, FnEndLoc}); SourceLocRAIIObject fnLoc{*this, Loc.isValid() ? getLoc(Loc) : unknownLoc}; assert(Fn.isDeclaration() && "Function already has body?"); @@ -1277,7 +1277,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // We're in a lambda. auto Fn = dyn_cast(CurFn); assert(Fn && "other callables NYI"); - Fn.setLambdaAttr(mlir::UnitAttr::get(builder.getContext())); + Fn.setLambdaAttr(mlir::UnitAttr::get(&getMLIRContext())); // Figure out the captures. MD->getParent()->getCaptureFields(LambdaCaptureFields, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index e3bc68124e90..48b89e70ceda 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -31,6 +31,7 @@ #include "clang/Basic/TargetInfo.h" #include "clang/CIR/TypeEvaluationKind.h" +#include "mlir/IR/MLIRContext.h" #include "mlir/IR/TypeRange.h" #include "mlir/IR/Value.h" #include "mlir/Support/LogicalResult.h" @@ -577,6 +578,7 @@ class CIRGenFunction : public CIRGenTypeCache { CIRGenTypes &getTypes() const { return CGM.getTypes(); } const TargetInfo &getTarget() const { return CGM.getTarget(); } + mlir::MLIRContext &getMLIRContext() { return CGM.getMLIRContext(); } const TargetCIRGenInfo &getTargetHooks() const { return CGM.getTargetCIRGenInfo(); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index cde22e4d7c9b..4bb3ffb03680 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -113,40 +113,38 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, VTables{*this}, openMPRuntime(new CIRGenOpenMPRuntime(*this)) { // Initialize CIR signed integer types cache. - SInt8Ty = - ::mlir::cir::IntType::get(builder.getContext(), 8, /*isSigned=*/true); + SInt8Ty = ::mlir::cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true); SInt16Ty = - ::mlir::cir::IntType::get(builder.getContext(), 16, /*isSigned=*/true); + ::mlir::cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/true); SInt32Ty = - ::mlir::cir::IntType::get(builder.getContext(), 32, /*isSigned=*/true); + ::mlir::cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/true); SInt64Ty = - ::mlir::cir::IntType::get(builder.getContext(), 64, /*isSigned=*/true); + ::mlir::cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/true); SInt128Ty = - ::mlir::cir::IntType::get(builder.getContext(), 128, /*isSigned=*/true); + ::mlir::cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/true); // Initialize CIR unsigned integer types cache. - UInt8Ty = - ::mlir::cir::IntType::get(builder.getContext(), 8, /*isSigned=*/false); + UInt8Ty = ::mlir::cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/false); UInt16Ty = - ::mlir::cir::IntType::get(builder.getContext(), 16, /*isSigned=*/false); + ::mlir::cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/false); UInt32Ty = - ::mlir::cir::IntType::get(builder.getContext(), 32, /*isSigned=*/false); + ::mlir::cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false); UInt64Ty = - ::mlir::cir::IntType::get(builder.getContext(), 64, /*isSigned=*/false); + ::mlir::cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false); UInt128Ty = - ::mlir::cir::IntType::get(builder.getContext(), 128, /*isSigned=*/false); + ::mlir::cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/false); - VoidTy = ::mlir::cir::VoidType::get(builder.getContext()); + VoidTy = ::mlir::cir::VoidType::get(&getMLIRContext()); // Initialize CIR pointer types cache. - VoidPtrTy = ::mlir::cir::PointerType::get(builder.getContext(), VoidTy); + VoidPtrTy = ::mlir::cir::PointerType::get(&getMLIRContext(), VoidTy); - FP16Ty = ::mlir::cir::FP16Type::get(builder.getContext()); - BFloat16Ty = ::mlir::cir::BF16Type::get(builder.getContext()); - FloatTy = ::mlir::cir::SingleType::get(builder.getContext()); - DoubleTy = ::mlir::cir::DoubleType::get(builder.getContext()); - FP80Ty = ::mlir::cir::FP80Type::get(builder.getContext()); - FP128Ty = ::mlir::cir::FP128Type::get(builder.getContext()); + FP16Ty = ::mlir::cir::FP16Type::get(&getMLIRContext()); + BFloat16Ty = ::mlir::cir::BF16Type::get(&getMLIRContext()); + FloatTy = ::mlir::cir::SingleType::get(&getMLIRContext()); + DoubleTy = ::mlir::cir::DoubleType::get(&getMLIRContext()); + FP80Ty = ::mlir::cir::FP80Type::get(&getMLIRContext()); + FP128Ty = ::mlir::cir::FP128Type::get(&getMLIRContext()); // TODO: PointerWidthInBits PointerAlignInBytes = @@ -156,14 +154,14 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, .getQuantity(); // TODO: SizeSizeInBytes // TODO: IntAlignInBytes - UCharTy = ::mlir::cir::IntType::get(builder.getContext(), + UCharTy = ::mlir::cir::IntType::get(&getMLIRContext(), astCtx.getTargetInfo().getCharWidth(), /*isSigned=*/false); - UIntTy = ::mlir::cir::IntType::get(builder.getContext(), + UIntTy = ::mlir::cir::IntType::get(&getMLIRContext(), astCtx.getTargetInfo().getIntWidth(), /*isSigned=*/false); UIntPtrTy = ::mlir::cir::IntType::get( - builder.getContext(), astCtx.getTargetInfo().getMaxPointerWidth(), + &getMLIRContext(), astCtx.getTargetInfo().getMaxPointerWidth(), /*isSigned=*/false); UInt8PtrTy = builder.getPointerTo(UInt8Ty); UInt8PtrPtrTy = builder.getPointerTo(UInt8PtrTy); @@ -173,7 +171,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, CIRAllocaAddressSpace = getTargetCIRGenInfo().getCIRAllocaAddressSpace(); PtrDiffTy = ::mlir::cir::IntType::get( - builder.getContext(), astCtx.getTargetInfo().getMaxPointerWidth(), + &getMLIRContext(), astCtx.getTargetInfo().getMaxPointerWidth(), /*isSigned=*/true); if (langOpts.OpenCL) { @@ -652,7 +650,7 @@ void CIRGenModule::AddGlobalCtor(mlir::cir::FuncOp Ctor, int Priority) { // // FIXME(from traditional LLVM): Type coercion of void()* types. Ctor->setAttr(Ctor.getGlobalCtorAttrName(), - mlir::cir::GlobalCtorAttr::get(builder.getContext(), + mlir::cir::GlobalCtorAttr::get(&getMLIRContext(), Ctor.getName(), Priority)); } @@ -668,7 +666,7 @@ void CIRGenModule::AddGlobalDtor(mlir::cir::FuncOp Dtor, int Priority, // FIXME(from traditional LLVM): Type coercion of void()* types. Dtor->setAttr(Dtor.getGlobalDtorAttrName(), - mlir::cir::GlobalDtorAttr::get(builder.getContext(), + mlir::cir::GlobalDtorAttr::get(&getMLIRContext(), Dtor.getName(), Priority)); } @@ -807,7 +805,7 @@ void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, if (auto GGO = dyn_cast(Use.getUser())) { auto UseOpResultValue = GGO.getAddr(); UseOpResultValue.setType( - mlir::cir::PointerType::get(builder.getContext(), NewTy)); + mlir::cir::PointerType::get(&getMLIRContext(), NewTy)); } } } @@ -878,7 +876,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, if (D && !D->hasAttr()) { auto LT = mlir::cir::GlobalLinkageKind::ExternalLinkage; Entry.setLinkageAttr( - mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), LT)); + mlir::cir::GlobalLinkageKindAttr::get(&getMLIRContext(), LT)); mlir::SymbolTable::setSymbolVisibility(Entry, getMLIRVisibility(Entry)); } } @@ -1242,8 +1240,8 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // TODO(cir): pointer to array decay. Should this be modeled explicitly in // CIR? if (arrayTy) - InitType = mlir::cir::PointerType::get(builder.getContext(), - arrayTy.getEltType()); + InitType = + mlir::cir::PointerType::get(&getMLIRContext(), arrayTy.getEltType()); } else { assert(mlir::isa(Init) && "This should have a type"); auto TypedInitAttr = mlir::cast(Init); @@ -1462,7 +1460,7 @@ CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { for (uint64_t i = 0; i < arraySize; ++i) elements.push_back(mlir::cir::IntAttr::get(arrayEltTy, elementValues[i])); - auto elementsAttr = mlir::ArrayAttr::get(builder.getContext(), elements); + auto elementsAttr = mlir::ArrayAttr::get(&getMLIRContext(), elements); return builder.getConstArray(elementsAttr, arrayTy); } @@ -1572,7 +1570,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, auto ArrayTy = mlir::dyn_cast(GV.getSymType()); assert(ArrayTy && "String literal must be array"); auto PtrTy = - mlir::cir::PointerType::get(builder.getContext(), ArrayTy.getEltType()); + mlir::cir::PointerType::get(&getMLIRContext(), ArrayTy.getEltType()); return builder.getGlobalViewAttr(PtrTy, GV); } @@ -1980,10 +1978,10 @@ mlir::cir::VisibilityAttr CIRGenModule::getGlobalVisibilityAttrFromDecl(const Decl *decl) { const clang::VisibilityAttr *VA = decl->getAttr(); mlir::cir::VisibilityAttr cirVisibility = - mlir::cir::VisibilityAttr::get(builder.getContext()); + mlir::cir::VisibilityAttr::get(&getMLIRContext()); if (VA) { cirVisibility = mlir::cir::VisibilityAttr::get( - builder.getContext(), + &getMLIRContext(), getGlobalVisibilityKindFromClangVisibility(VA->getVisibility())); } return cirVisibility; @@ -2110,7 +2108,7 @@ void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( dyn_cast(Use.getUser())) { // Replace type getGlobalOp.getAddr().setType(mlir::cir::PointerType::get( - builder.getContext(), NewFn.getFunctionType())); + &getMLIRContext(), NewFn.getFunctionType())); } else { llvm_unreachable("NIY"); } @@ -2401,7 +2399,7 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, f = builder.create(loc, name, Ty); if (FD) - f.setAstAttr(makeFuncDeclAttr(FD, builder.getContext())); + f.setAstAttr(makeFuncDeclAttr(FD, &getMLIRContext())); if (FD && !FD->hasPrototype()) f.setNoProtoAttr(builder.getUnitAttr()); @@ -2411,13 +2409,13 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, // A declaration gets private visibility by default, but external linkage // as the default linkage. f.setLinkageAttr(mlir::cir::GlobalLinkageKindAttr::get( - builder.getContext(), mlir::cir::GlobalLinkageKind::ExternalLinkage)); + &getMLIRContext(), mlir::cir::GlobalLinkageKind::ExternalLinkage)); mlir::SymbolTable::setSymbolVisibility( f, mlir::SymbolTable::Visibility::Private); // Initialize with empty dict of extra attributes. f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), builder.getDictionaryAttr({}))); + &getMLIRContext(), builder.getDictionaryAttr({}))); if (!curCGF) theModule.push_back(f); @@ -2493,7 +2491,7 @@ void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, mlir::NamedAttrList attrs{f.getExtraAttrs().getElements().getValue()}; if (!hasUnwindExceptions(getLangOpts())) { - auto attr = mlir::cir::NoThrowAttr::get(builder.getContext()); + auto attr = mlir::cir::NoThrowAttr::get(&getMLIRContext()); attrs.set(attr.getMnemonic(), attr); } @@ -2503,23 +2501,23 @@ void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, // disabled, mark the function as noinline. if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { auto attr = mlir::cir::InlineAttr::get( - builder.getContext(), mlir::cir::InlineKind::AlwaysInline); + &getMLIRContext(), mlir::cir::InlineKind::AlwaysInline); attrs.set(attr.getMnemonic(), attr); } } else if (decl->hasAttr()) { // Add noinline if the function isn't always_inline. - auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + auto attr = mlir::cir::InlineAttr::get(&getMLIRContext(), mlir::cir::InlineKind::NoInline); attrs.set(attr.getMnemonic(), attr); } else if (decl->hasAttr()) { // (noinline wins over always_inline, and we can't specify both in IR) - auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + auto attr = mlir::cir::InlineAttr::get(&getMLIRContext(), mlir::cir::InlineKind::AlwaysInline); attrs.set(attr.getMnemonic(), attr); } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { // If we're not inlining, then force everything that isn't always_inline // to carry an explicit noinline attribute. - auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + auto attr = mlir::cir::InlineAttr::get(&getMLIRContext(), mlir::cir::InlineKind::NoInline); attrs.set(attr.getMnemonic(), attr); } else { @@ -2538,11 +2536,11 @@ void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, return any_of(Pattern->redecls(), CheckRedeclForInline); }; if (CheckForInline(cast(decl))) { - auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + auto attr = mlir::cir::InlineAttr::get(&getMLIRContext(), mlir::cir::InlineKind::InlineHint); attrs.set(attr.getMnemonic(), attr); } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyHintInlining) { - auto attr = mlir::cir::InlineAttr::get(builder.getContext(), + auto attr = mlir::cir::InlineAttr::get(&getMLIRContext(), mlir::cir::InlineKind::NoInline); attrs.set(attr.getMnemonic(), attr); } @@ -2559,17 +2557,17 @@ void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, } if (ShouldAddOptNone) { - auto optNoneAttr = mlir::cir::OptNoneAttr::get(builder.getContext()); + auto optNoneAttr = mlir::cir::OptNoneAttr::get(&getMLIRContext()); attrs.set(optNoneAttr.getMnemonic(), optNoneAttr); // OptimizeNone implies noinline; we should not be inlining such functions. auto noInlineAttr = mlir::cir::InlineAttr::get( - builder.getContext(), mlir::cir::InlineKind::NoInline); + &getMLIRContext(), mlir::cir::InlineKind::NoInline); attrs.set(noInlineAttr.getMnemonic(), noInlineAttr); } f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), attrs.getDictionary(builder.getContext()))); + &getMLIRContext(), attrs.getDictionary(&getMLIRContext()))); } void CIRGenModule::setCIRFunctionAttributes(GlobalDecl GD, @@ -2584,7 +2582,7 @@ void CIRGenModule::setCIRFunctionAttributes(GlobalDecl GD, constructAttributeList(func.getName(), info, GD, PAL, callingConv, /*AttrOnCallSite=*/false, isThunk); func.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), PAL.getDictionary(builder.getContext()))); + &getMLIRContext(), PAL.getDictionary(&getMLIRContext()))); // TODO(cir): Check X86_VectorCall incompatibility with WinARM64EC @@ -2820,13 +2818,13 @@ mlir::Location CIRGenModule::getLoc(SourceRange SLoc) { mlir::Location E = getLoc(SLoc.getEnd()); SmallVector locs = {B, E}; mlir::Attribute metadata; - return mlir::FusedLoc::get(locs, metadata, builder.getContext()); + return mlir::FusedLoc::get(locs, metadata, &getMLIRContext()); } mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { SmallVector locs = {lhs, rhs}; mlir::Attribute metadata; - return mlir::FusedLoc::get(locs, metadata, builder.getContext()); + return mlir::FusedLoc::get(locs, metadata, &getMLIRContext()); } void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { @@ -2945,7 +2943,7 @@ void CIRGenModule::buildDefaultMethods() { } mlir::IntegerAttr CIRGenModule::getSize(CharUnits size) { - return builder.getSizeFromCharUnits(builder.getContext(), size); + return builder.getSizeFromCharUnits(&getMLIRContext(), size); } mlir::Operation * @@ -3265,7 +3263,7 @@ mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( // Set up extra information and add to the module GV.setLinkageAttr( - mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), Linkage)); + mlir::cir::GlobalLinkageKindAttr::get(&getMLIRContext(), Linkage)); mlir::SymbolTable::setSymbolVisibility(GV, CIRGenModule::getMLIRVisibility(GV)); @@ -3456,7 +3454,7 @@ LangAS CIRGenModule::getGlobalVarAddressSpace(const VarDecl *D) { mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(const AnnotateAttr *attr) { ArrayRef exprs = {attr->args_begin(), attr->args_size()}; if (exprs.empty()) { - return mlir::ArrayAttr::get(builder.getContext(), {}); + return mlir::ArrayAttr::get(&getMLIRContext(), {}); } llvm::FoldingSetNodeID id; for (Expr *e : exprs) { @@ -3479,7 +3477,7 @@ mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(const AnnotateAttr *attr) { const auto &ap = ce.getAPValueResult(); if (ap.isInt()) { args.push_back(mlir::IntegerAttr::get( - mlir::IntegerType::get(builder.getContext(), + mlir::IntegerType::get(&getMLIRContext(), ap.getInt().getBitWidth()), ap.getInt())); } else { @@ -3498,7 +3496,7 @@ mlir::cir::AnnotationAttr CIRGenModule::buildAnnotateAttr(const clang::AnnotateAttr *aa) { mlir::StringAttr annoGV = builder.getStringAttr(aa->getAnnotation()); mlir::ArrayAttr args = buildAnnotationArgs(aa); - return mlir::cir::AnnotationAttr::get(builder.getContext(), annoGV, args); + return mlir::cir::AnnotationAttr::get(&getMLIRContext(), annoGV, args); } void CIRGenModule::addGlobalAnnotations(const ValueDecl *d, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 6daf0a2fbeaf..c2728a172473 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -167,6 +167,7 @@ class CIRGenModule : public CIRGenTypeCache { } CIRGenCXXABI &getCXXABI() const { return *ABI; } + mlir::MLIRContext &getMLIRContext() { return *builder.getContext(); } /// ------- /// Handling globals @@ -728,7 +729,7 @@ class CIRGenModule : public CIRGenTypeCache { void setFunctionLinkage(GlobalDecl GD, mlir::cir::FuncOp f) { auto L = getFunctionLinkage(GD); f.setLinkageAttr( - mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), L)); + mlir::cir::GlobalLinkageKindAttr::get(&getMLIRContext(), L)); mlir::SymbolTable::setSymbolVisibility(f, getMLIRVisibilityFromCIRLinkage(L)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp index 6c2e7542fbbb..9af4272ad679 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp @@ -179,7 +179,7 @@ void CIRGenModule::genKernelArgMetadata(mlir::cir::FuncOp Fn, auto oldValue = items.set(value.getMnemonic(), value); if (oldValue != value) { Fn.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), builder.getDictionaryAttr(items))); + &getMLIRContext(), builder.getDictionaryAttr(items))); } } else { if (shouldEmitArgName) @@ -242,12 +242,12 @@ void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, attrs.append(Fn.getExtraAttrs().getElements()); auto kernelMetadataAttr = OpenCLKernelMetadataAttr::get( - builder.getContext(), workGroupSizeHintAttr, reqdWorkGroupSizeAttr, + &getMLIRContext(), workGroupSizeHintAttr, reqdWorkGroupSizeAttr, vecTypeHintAttr, vecTypeHintSignedness, intelReqdSubGroupSizeAttr); attrs.append(kernelMetadataAttr.getMnemonic(), kernelMetadataAttr); Fn.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( - builder.getContext(), attrs.getDictionary(builder.getContext()))); + &getMLIRContext(), attrs.getDictionary(&getMLIRContext()))); } void CIRGenModule::buildOpenCLMetadata() { @@ -259,7 +259,7 @@ void CIRGenModule::buildOpenCLMetadata() { unsigned minor = (version % 100) / 10; auto clVersionAttr = - mlir::cir::OpenCLVersionAttr::get(builder.getContext(), major, minor); + mlir::cir::OpenCLVersionAttr::get(&getMLIRContext(), major, minor); theModule->setAttr("cir.cl.version", clVersionAttr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index afb7349705f7..2263cca6c4cc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -418,7 +418,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { break; case BuiltinType::Bool: - ResultType = ::mlir::cir::BoolType::get(Builder.getContext()); + ResultType = ::mlir::cir::BoolType::get(&getMLIRContext()); break; // Signed types. @@ -443,7 +443,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::SatShortAccum: case BuiltinType::SatShortFract: ResultType = - mlir::cir::IntType::get(Builder.getContext(), Context.getTypeSize(T), + mlir::cir::IntType::get(&getMLIRContext(), Context.getTypeSize(T), /*isSigned=*/true); break; // Unsigned types. From 6bedbeade7b8ffcf41a92bb5545636251cfd3600 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 5 Nov 2024 13:48:20 -0500 Subject: [PATCH 2028/2301] [CIR][Dialect][NFC] Add some helpers to LoadOp These are just missing getters/setters that should be there already. They are in use in a patch coming up. I'm splitting them out here for reviewability. Reviewers: bcardosolopes Pull Request: https://github.com/llvm/clangir/pull/1021 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 18 +++ clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 128 +++++++++++-------- 3 files changed, 97 insertions(+), 51 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 700d9769231d..2b36931de22d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -599,6 +599,13 @@ def LoadOp : CIR_Op<"load", [ $addr `:` qualified(type($addr)) `,` type($result) attr-dict }]; + let extraClassDeclaration = [{ + // TODO(CIR): The final interface here should include an argument for the + // SyncScope::ID. + // This should be used over the ODS generated setMemOrder. + void setAtomic(mlir::cir::MemOrder order); + }]; + // FIXME: add verifier. } @@ -656,6 +663,13 @@ def StoreOp : CIR_Op<"store", [ $value `,` $addr attr-dict `:` type($value) `,` qualified(type($addr)) }]; + let extraClassDeclaration = [{ + // TODO(CIR): The final interface here should include an argument for the + // SyncScope::ID. + // This should be used over the ODS generated setMemOrder. + void setAtomic(mlir::cir::MemOrder order); + }]; + // FIXME: add verifier. } @@ -2411,8 +2425,12 @@ def GlobalOp : CIR_Op<"global", bool hasAvailableExternallyLinkage() { return mlir::cir::isAvailableExternallyLinkage(getLinkage()); } + bool hasInternalLinkage() { + return mlir::cir::isInternalLinkage(getLinkage()); + } /// Whether the definition of this global may be replaced at link time. bool isWeakForLinker() { return cir::isWeakForLinker(getLinkage()); } + bool isDSOLocal() { return getDsolocal(); } }]; let skipDefaultBuilders = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 26482f4e9fa2..ea887b8ff67c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -1462,7 +1462,7 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, MO = mlir::cir::MemOrder::Release; // Initializations don't need to be atomic. if (!isInit) - store.setMemOrder(MO); + store.setAtomic(MO); // Other decoration. if (IsVolatile) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index c517a080d9f9..6f842d489d0f 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -49,6 +49,7 @@ using namespace mlir; #include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "clang/CIR/Interfaces/CIROpInterfaces.h" +#include //===----------------------------------------------------------------------===// // CIR Dialect @@ -476,28 +477,28 @@ LogicalResult mlir::cir::CastOp::verify() { } switch (getKind()) { - case cir::CastKind::int_to_bool: { + case mlir::cir::CastKind::int_to_bool: { if (!mlir::isa(resType)) return emitOpError() << "requires !cir.bool type for result"; if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.int type for source"; return success(); } - case cir::CastKind::ptr_to_bool: { + case mlir::cir::CastKind::ptr_to_bool: { if (!mlir::isa(resType)) return emitOpError() << "requires !cir.bool type for result"; if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.ptr type for source"; return success(); } - case cir::CastKind::integral: { + case mlir::cir::CastKind::integral: { if (!mlir::isa(resType)) return emitOpError() << "requires !cir.int type for result"; if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.int type for source"; return success(); } - case cir::CastKind::array_to_ptrdecay: { + case mlir::cir::CastKind::array_to_ptrdecay: { auto arrayPtrTy = mlir::dyn_cast(srcType); auto flatPtrTy = mlir::dyn_cast(resType); if (!arrayPtrTy || !flatPtrTy) @@ -518,7 +519,7 @@ LogicalResult mlir::cir::CastOp::verify() { << "requires same type for array element and pointee result"; return success(); } - case cir::CastKind::bitcast: { + case mlir::cir::CastKind::bitcast: { // Allow bitcast of structs for calling conventions. if (isa(srcType) || isa(resType)) return success(); @@ -543,62 +544,62 @@ LogicalResult mlir::cir::CastOp::verify() { << "requires !cir.ptr or !cir.vector type for source and result"; return success(); } - case cir::CastKind::floating: { + case mlir::cir::CastKind::floating: { if (!mlir::isa(srcType) || !mlir::isa(resType)) return emitOpError() << "requires !cir.float type for source and result"; return success(); } - case cir::CastKind::float_to_int: { + case mlir::cir::CastKind::float_to_int: { if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.float type for source"; if (!mlir::dyn_cast(resType)) return emitOpError() << "requires !cir.int type for result"; return success(); } - case cir::CastKind::int_to_ptr: { + case mlir::cir::CastKind::int_to_ptr: { if (!mlir::dyn_cast(srcType)) return emitOpError() << "requires !cir.int type for source"; if (!mlir::dyn_cast(resType)) return emitOpError() << "requires !cir.ptr type for result"; return success(); } - case cir::CastKind::ptr_to_int: { + case mlir::cir::CastKind::ptr_to_int: { if (!mlir::dyn_cast(srcType)) return emitOpError() << "requires !cir.ptr type for source"; if (!mlir::dyn_cast(resType)) return emitOpError() << "requires !cir.int type for result"; return success(); } - case cir::CastKind::float_to_bool: { + case mlir::cir::CastKind::float_to_bool: { if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.float type for source"; if (!mlir::isa(resType)) return emitOpError() << "requires !cir.bool type for result"; return success(); } - case cir::CastKind::bool_to_int: { + case mlir::cir::CastKind::bool_to_int: { if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.bool type for source"; if (!mlir::isa(resType)) return emitOpError() << "requires !cir.int type for result"; return success(); } - case cir::CastKind::int_to_float: { + case mlir::cir::CastKind::int_to_float: { if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.int type for source"; if (!mlir::isa(resType)) return emitOpError() << "requires !cir.float type for result"; return success(); } - case cir::CastKind::bool_to_float: { + case mlir::cir::CastKind::bool_to_float: { if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.bool type for source"; if (!mlir::isa(resType)) return emitOpError() << "requires !cir.float type for result"; return success(); } - case cir::CastKind::address_space: { + case mlir::cir::CastKind::address_space: { auto srcPtrTy = mlir::dyn_cast(srcType); auto resPtrTy = mlir::dyn_cast(resType); if (!srcPtrTy || !resPtrTy) @@ -607,7 +608,7 @@ LogicalResult mlir::cir::CastOp::verify() { return emitOpError() << "requires two types differ in addrspace only"; return success(); } - case cir::CastKind::float_to_complex: { + case mlir::cir::CastKind::float_to_complex: { if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.float type for source"; auto resComplexTy = mlir::dyn_cast(resType); @@ -617,7 +618,7 @@ LogicalResult mlir::cir::CastOp::verify() { return emitOpError() << "requires source type match result element type"; return success(); } - case cir::CastKind::int_to_complex: { + case mlir::cir::CastKind::int_to_complex: { if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.int type for source"; auto resComplexTy = mlir::dyn_cast(resType); @@ -627,7 +628,7 @@ LogicalResult mlir::cir::CastOp::verify() { return emitOpError() << "requires source type match result element type"; return success(); } - case cir::CastKind::float_complex_to_real: { + case mlir::cir::CastKind::float_complex_to_real: { auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy) return emitOpError() << "requires !cir.complex type for source"; @@ -637,7 +638,7 @@ LogicalResult mlir::cir::CastOp::verify() { return emitOpError() << "requires source element type match result type"; return success(); } - case cir::CastKind::int_complex_to_real: { + case mlir::cir::CastKind::int_complex_to_real: { auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy) return emitOpError() << "requires !cir.complex type for source"; @@ -647,7 +648,7 @@ LogicalResult mlir::cir::CastOp::verify() { return emitOpError() << "requires source element type match result type"; return success(); } - case cir::CastKind::float_complex_to_bool: { + case mlir::cir::CastKind::float_complex_to_bool: { auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy || !mlir::isa(srcComplexTy.getElementTy())) @@ -657,7 +658,7 @@ LogicalResult mlir::cir::CastOp::verify() { return emitOpError() << "requires !cir.bool type for result"; return success(); } - case cir::CastKind::int_complex_to_bool: { + case mlir::cir::CastKind::int_complex_to_bool: { auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy || !mlir::isa(srcComplexTy.getElementTy())) @@ -667,7 +668,7 @@ LogicalResult mlir::cir::CastOp::verify() { return emitOpError() << "requires !cir.bool type for result"; return success(); } - case cir::CastKind::float_complex: { + case mlir::cir::CastKind::float_complex: { auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy || !mlir::isa(srcComplexTy.getElementTy())) @@ -680,7 +681,7 @@ LogicalResult mlir::cir::CastOp::verify() { << "requires !cir.complex type for result"; return success(); } - case cir::CastKind::float_complex_to_int_complex: { + case mlir::cir::CastKind::float_complex_to_int_complex: { auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy || !mlir::isa(srcComplexTy.getElementTy())) @@ -692,7 +693,7 @@ LogicalResult mlir::cir::CastOp::verify() { return emitOpError() << "requires !cir.complex type for result"; return success(); } - case cir::CastKind::int_complex: { + case mlir::cir::CastKind::int_complex: { auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy || !mlir::isa(srcComplexTy.getElementTy())) @@ -703,7 +704,7 @@ LogicalResult mlir::cir::CastOp::verify() { return emitOpError() << "requires !cir.complex type for result"; return success(); } - case cir::CastKind::int_complex_to_float_complex: { + case mlir::cir::CastKind::int_complex_to_float_complex: { auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy || !mlir::isa(srcComplexTy.getElementTy())) @@ -926,6 +927,30 @@ LogicalResult mlir::cir::ComplexImagPtrOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// LoadOp +//===----------------------------------------------------------------------===// + +// TODO(CIR): The final interface here should include an argument for the +// SyncScope::ID. +void mlir::cir::LoadOp::setAtomic(mlir::cir::MemOrder order) { + setMemOrder(order); + if (::cir::MissingFeatures::syncScopeID()) + llvm_unreachable("NYI"); +} + +//===----------------------------------------------------------------------===// +// StoreOp +//===----------------------------------------------------------------------===// + +// TODO(CIR): The final interface here should include an argument for the +// SyncScope::ID. +void mlir::cir::StoreOp::setAtomic(mlir::cir::MemOrder order) { + setMemOrder(order); + if (::cir::MissingFeatures::syncScopeID()) + llvm_unreachable("NYI"); +} + //===----------------------------------------------------------------------===// // VecCreateOp //===----------------------------------------------------------------------===// @@ -1036,11 +1061,11 @@ mlir::LogicalResult mlir::cir::ReturnOp::verify() { // Returns can be present in multiple different scopes, get the // wrapping function and start from there. auto *fnOp = getOperation()->getParentOp(); - while (!isa(fnOp)) + while (!isa(fnOp)) fnOp = fnOp->getParentOp(); // Make sure return types match function return type. - if (checkReturnAndFunction(*this, cast(fnOp)).failed()) + if (checkReturnAndFunction(*this, cast(fnOp)).failed()) return failure(); return success(); @@ -1946,8 +1971,8 @@ LogicalResult mlir::cir::GlobalOp::verify() { void mlir::cir::GlobalOp::build( OpBuilder &odsBuilder, OperationState &odsState, StringRef sym_name, - Type sym_type, bool isConstant, cir::GlobalLinkageKind linkage, - cir::AddressSpaceAttr addrSpace, + Type sym_type, bool isConstant, mlir::cir::GlobalLinkageKind linkage, + mlir::cir::AddressSpaceAttr addrSpace, function_ref ctorBuilder, function_ref dtorBuilder) { odsState.addAttribute(getSymNameAttrName(odsState.name), @@ -1959,7 +1984,7 @@ void mlir::cir::GlobalOp::build( odsBuilder.getUnitAttr()); ::mlir::cir::GlobalLinkageKindAttr linkageAttr = - cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage); + mlir::cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage); odsState.addAttribute(getLinkageAttrName(odsState.name), linkageAttr); if (addrSpace) @@ -2527,7 +2552,7 @@ void mlir::cir::FuncOp::print(OpAsmPrinter &p) { // getNumArguments hook not failing. LogicalResult mlir::cir::FuncOp::verifyType() { auto type = getFunctionType(); - if (!isa(type)) + if (!isa(type)) return emitOpError("requires '" + getFunctionTypeAttrName().str() + "' attribute of function type"); if (!getNoProto() && type.isVarArg() && type.getNumInputs() == 0) @@ -2550,19 +2575,20 @@ LogicalResult mlir::cir::FuncOp::verify() { if (getLinkage() == cir::GlobalLinkageKind::CommonLinkage) return emitOpError() << "functions cannot have '" << stringifyGlobalLinkageKind( - cir::GlobalLinkageKind::CommonLinkage) + mlir::cir::GlobalLinkageKind::CommonLinkage) << "' linkage"; if (isExternal()) { - if (getLinkage() != cir::GlobalLinkageKind::ExternalLinkage && - getLinkage() != cir::GlobalLinkageKind::ExternalWeakLinkage) - return emitOpError() << "external functions must have '" - << stringifyGlobalLinkageKind( - cir::GlobalLinkageKind::ExternalLinkage) - << "' or '" - << stringifyGlobalLinkageKind( - cir::GlobalLinkageKind::ExternalWeakLinkage) - << "' linkage"; + if (getLinkage() != mlir::cir::GlobalLinkageKind::ExternalLinkage && + getLinkage() != mlir::cir::GlobalLinkageKind::ExternalWeakLinkage) + return emitOpError() + << "external functions must have '" + << stringifyGlobalLinkageKind( + mlir::cir::GlobalLinkageKind::ExternalLinkage) + << "' or '" + << stringifyGlobalLinkageKind( + mlir::cir::GlobalLinkageKind::ExternalWeakLinkage) + << "' linkage"; return success(); } @@ -3058,11 +3084,11 @@ mlir::cir::TryCallOp::getSuccessorOperands(unsigned index) { LogicalResult mlir::cir::UnaryOp::verify() { switch (getKind()) { - case cir::UnaryOpKind::Inc: - case cir::UnaryOpKind::Dec: - case cir::UnaryOpKind::Plus: - case cir::UnaryOpKind::Minus: - case cir::UnaryOpKind::Not: + case mlir::cir::UnaryOpKind::Inc: + case mlir::cir::UnaryOpKind::Dec: + case mlir::cir::UnaryOpKind::Plus: + case mlir::cir::UnaryOpKind::Minus: + case mlir::cir::UnaryOpKind::Not: // Nothing to verify. return success(); } @@ -3079,8 +3105,9 @@ void mlir::cir::AwaitOp::build( function_ref readyBuilder, function_ref suspendBuilder, function_ref resumeBuilder) { - result.addAttribute(getKindAttrName(result.name), - cir::AwaitKindAttr::get(builder.getContext(), kind)); + result.addAttribute( + getKindAttrName(result.name), + mlir::cir::AwaitKindAttr::get(builder.getContext(), kind)); { OpBuilder::InsertionGuard guard(builder); Region *readyRegion = result.addRegion(); @@ -3192,7 +3219,7 @@ LogicalResult mlir::cir::ConstArrayAttr::verify( if (auto strAttr = mlir::dyn_cast(attr)) { mlir::cir::ArrayType at = mlir::cast(type); - auto intTy = mlir::dyn_cast(at.getEltType()); + auto intTy = mlir::dyn_cast(at.getEltType()); // TODO: add CIR type for char. if (!intTy || intTy.getWidth() != 8) { @@ -3309,8 +3336,9 @@ LogicalResult mlir::cir::ConstVectorAttr::verify( ::mlir::Type type, mlir::ArrayAttr arrayAttr) { if (!mlir::isa(type)) { - return emitError() - << "type of cir::ConstVectorAttr is not a cir::VectorType: " << type; + return emitError() << "type of mlir::cir::ConstVectorAttr is not a " + "mlir::cir::VectorType: " + << type; } auto vecType = mlir::cast(type); From 19c27aae6ed26e531f522be3aa28a9428544fa29 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 5 Nov 2024 21:53:53 +0300 Subject: [PATCH 2029/2301] [CIR][ABI][AArch64][Lowering] Initial support for passing struct types (#1041) This PR adds a support for some basic cases for struct types passed by value. The hardest part probably is `createCoercedStore` function, which I rewrote significantly in order to make it closer to the orignal codegen. --- .../TargetLowering/LowerFunction.cpp | 121 +++++++++++++----- .../TargetLowering/Targets/AArch64.cpp | 31 ++++- .../AArch64/aarch64-cc-structs.c | 46 ++++++- 3 files changed, 162 insertions(+), 36 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 440a0a129108..1b638411cc59 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -79,6 +79,64 @@ Value enterStructPointerForCoercedAccess(Value SrcPtr, StructType SrcSTy, // above. } +/// Convert a value Val to the specific Ty where both +/// are either integers or pointers. This does a truncation of the value if it +/// is too large or a zero extension if it is too small. +/// +/// This behaves as if the value were coerced through memory, so on big-endian +/// targets the high bits are preserved in a truncation, while little-endian +/// targets preserve the low bits. +static Value coerceIntOrPtrToIntOrPtr(Value val, Type typ, LowerFunction &CGF) { + if (val.getType() == typ) + return val; + + auto &bld = CGF.getRewriter(); + + if (isa(val.getType())) { + // If this is Pointer->Pointer avoid conversion to and from int. + if (isa(typ)) + return bld.create(val.getLoc(), typ, CastKind::bitcast, val); + + // Convert the pointer to an integer so we can play with its width. + val = bld.create(val.getLoc(), typ, CastKind::ptr_to_int, val); + } + + auto dstIntTy = typ; + if (isa(dstIntTy)) + cir_cconv_unreachable("NYI"); + + if (val.getType() != dstIntTy) { + const auto &layout = CGF.LM.getDataLayout(); + if (layout.isBigEndian()) { + // Preserve the high bits on big-endian targets. + // That is what memory coercion does. + uint64_t srcSize = layout.getTypeSizeInBits(val.getType()); + uint64_t dstSize = layout.getTypeSizeInBits(dstIntTy); + uint64_t diff = srcSize > dstSize ? srcSize - dstSize : dstSize - srcSize; + auto loc = val.getLoc(); + if (srcSize > dstSize) { + auto intAttr = IntAttr::get(val.getType(), diff); + auto amount = bld.create(loc, intAttr); + val = bld.create(loc, val.getType(), val, amount, false); + val = bld.create(loc, dstIntTy, CastKind::integral, val); + } else { + val = bld.create(loc, dstIntTy, CastKind::integral, val); + auto intAttr = IntAttr::get(val.getType(), diff); + auto amount = bld.create(loc, intAttr); + val = bld.create(loc, val.getType(), val, amount, true); + } + } else { + // Little-endian targets preserve the low bits. No shifts required. + val = bld.create(val.getLoc(), dstIntTy, CastKind::integral, val); + } + } + + if (isa(typ)) + val = bld.create(val.getLoc(), typ, CastKind::int_to_ptr, val); + + return val; +} + /// Create a store to \param Dst from \param Src where the source and /// destination may have different types. /// @@ -92,38 +150,39 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, cir_cconv_unreachable("NYI"); } - // FIXME(cir): We need a better way to handle datalayout queries. - cir_cconv_assert(isa(SrcTy)); llvm::TypeSize SrcSize = CGF.LM.getDataLayout().getTypeAllocSize(SrcTy); - - if (StructType DstSTy = dyn_cast(DstTy)) { - Dst = enterStructPointerForCoercedAccess(Dst, DstSTy, - SrcSize.getFixedValue(), CGF); - cir_cconv_assert(isa(Dst.getType())); - DstTy = cast(Dst.getType()).getPointee(); - } - - PointerType SrcPtrTy = dyn_cast(SrcTy); - PointerType DstPtrTy = dyn_cast(DstTy); - // TODO(cir): Implement address space. - if (SrcPtrTy && DstPtrTy && !::cir::MissingFeatures::addressSpace()) { - cir_cconv_unreachable("NYI"); - } - - // If the source and destination are integer or pointer types, just do an - // extension or truncation to the desired type. - if ((isa(SrcTy) || isa(SrcTy)) && - (isa(DstTy) || isa(DstTy))) { - cir_cconv_unreachable("NYI"); - } - - llvm::TypeSize DstSize = CGF.LM.getDataLayout().getTypeAllocSize(DstTy); - - // If store is legal, just bitcast the src pointer. - cir_cconv_assert(!::cir::MissingFeatures::vectorType()); - if (SrcSize.getFixedValue() <= DstSize.getFixedValue()) { - Dst = createCoercedBitcast(Dst, SrcTy, CGF); - CGF.buildAggregateStore(Src, Dst, DstIsVolatile); + auto dstPtrTy = dyn_cast(DstTy); + + if (dstPtrTy) + if (auto dstSTy = dyn_cast(dstPtrTy.getPointee())) + if (SrcTy != dstSTy) + Dst = enterStructPointerForCoercedAccess(Dst, dstSTy, + SrcSize.getFixedValue(), CGF); + + auto &layout = CGF.LM.getDataLayout(); + llvm::TypeSize DstSize = dstPtrTy + ? layout.getTypeAllocSize(dstPtrTy.getPointee()) + : layout.getTypeAllocSize(DstTy); + + if (SrcSize.isScalable() || SrcSize <= DstSize) { + if (isa(SrcTy) && dstPtrTy && + isa(dstPtrTy.getPointee()) && + SrcSize == layout.getTypeAllocSize(dstPtrTy.getPointee())) { + cir_cconv_unreachable("NYI"); + } else if (auto STy = dyn_cast(SrcTy)) { + cir_cconv_unreachable("NYI"); + } else { + Dst = createCoercedBitcast(Dst, SrcTy, CGF); + CGF.buildAggregateStore(Src, Dst, DstIsVolatile); + } + } else if (isa(SrcTy)) { + auto &bld = CGF.getRewriter(); + auto *ctxt = CGF.LM.getMLIRContext(); + auto dstIntTy = IntType::get(ctxt, DstSize.getFixedValue() * 8, false); + Src = coerceIntOrPtrToIntOrPtr(Src, dstIntTy, CGF); + auto ptrTy = PointerType::get(ctxt, dstIntTy); + auto addr = bld.create(Dst.getLoc(), ptrTy, CastKind::bitcast, Dst); + bld.create(Dst.getLoc(), Src, addr); } else { cir_cconv_unreachable("NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index b4e02d8e08fb..7ecf1399367e 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -160,9 +160,34 @@ AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, : ABIArgInfo::getDirect()); } - cir_cconv_assert_or_abort( - !::cir::MissingFeatures::AArch64TypeClassification(), "NYI"); - return {}; + uint64_t Size = getContext().getTypeSize(Ty); + const Type Base = nullptr; + + // Aggregates <= 16 bytes are passed directly in registers or on the stack. + if (Size <= 128) { + unsigned Alignment; + if (Kind == AArch64ABIKind::AAPCS) { + Alignment = getContext().getTypeAlign(Ty); + Alignment = Alignment < 128 ? 64 : 128; + } else { + Alignment = std::max( + getContext().getTypeAlign(Ty), + (unsigned)getTarget().getPointerWidth(clang::LangAS::Default)); + } + Size = llvm::alignTo(Size, Alignment); + + // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. + // For aggregates with 16-byte alignment, we use i128. + Type baseTy = + mlir::cir::IntType::get(LT.getMLIRContext(), Alignment, false); + auto argTy = Size == Alignment + ? baseTy + : mlir::cir::ArrayType::get(LT.getMLIRContext(), baseTy, + Size / Alignment); + return ABIArgInfo::getDirect(argTy); + } + + cir_cconv_unreachable("NYI"); } std::unique_ptr diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index 649811a2265a..bc2f1d37d729 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-cir-flat -fclangir-call-conv-lowering %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-llvm -fclangir-call-conv-lowering %s -o -| FileCheck %s -check-prefix=LLVM #include @@ -73,3 +73,45 @@ GT_128 ret_gt_128() { GT_128 x; return x; } + +// CHECK: cir.func {{.*@pass_lt_64}}(%arg0: !u64 +// CHECK: %[[#V0:]] = cir.alloca !ty_LT_64_, !cir.ptr +// CHECK: %[[#V1:]] = cir.cast(integral, %arg0 : !u64i), !u16i +// CHECK: %[[#V2:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: cir.store %[[#V1]], %[[#V2]] : !u16i, !cir.ptr + +// LLVM: void @pass_lt_64(i64 %0) +// LLVM: %[[#V1:]] = alloca %struct.LT_64, i64 1, align 4 +// LLVM: %[[#V2:]] = trunc i64 %0 to i16 +// LLVM: store i16 %[[#V2]], ptr %[[#V1]], align 2 +void pass_lt_64(LT_64 s) {} + +// CHECK: cir.func {{.*@pass_eq_64}}(%arg0: !u64i +// CHECK: %[[#V0:]] = cir.alloca !ty_EQ_64_, !cir.ptr +// CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: cir.store %arg0, %[[#V1]] : !u64i, !cir.ptr + +// LLVM: void @pass_eq_64(i64 %0) +// LLVM: %[[#V1:]] = alloca %struct.EQ_64, i64 1, align 4 +// LLVM: store i64 %0, ptr %[[#V1]], align 8 +void pass_eq_64(EQ_64 s) {} + +// CHECK: cir.func {{.*@pass_lt_128}}(%arg0: !cir.array +// CHECK: %[[#V0:]] = cir.alloca !ty_LT_128_, !cir.ptr +// CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr> +// CHECK: cir.store %arg0, %[[#V1]] : !cir.array, !cir.ptr> + +// LLVM: void @pass_lt_128([2 x i64] %0) +// LLVM: %[[#V1:]] = alloca %struct.LT_128, i64 1, align 4 +// LLVM: store [2 x i64] %0, ptr %[[#V1]], align 8 +void pass_lt_128(LT_128 s) {} + +// CHECK: cir.func {{.*@pass_eq_128}}(%arg0: !cir.array +// CHECK: %[[#V0:]] = cir.alloca !ty_EQ_128_, !cir.ptr +// CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr> +// CHECK: cir.store %arg0, %[[#V1]] : !cir.array, !cir.ptr> + +// LLVM: void @pass_eq_128([2 x i64] %0) +// LLVM: %[[#V1]] = alloca %struct.EQ_128, i64 1, align 4 +// LLVM: store [2 x i64] %0, ptr %[[#V1]], align 8 +void pass_eq_128(EQ_128 s) {} From 4b63bfa05a271f4e75ecc2897617f55c6102b628 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 5 Nov 2024 21:57:00 +0300 Subject: [PATCH 2030/2301] [CIR][ABI][AArch64] convers aarch64_be return struct case (#1049) This PR adds a support return struct as a value for one missed case for AArch64 big endian arch --- .../TargetLowering/Targets/AArch64.cpp | 3 ++- .../AArch64/aarch64_be-cc-structs.c | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CallConvLowering/AArch64/aarch64_be-cc-structs.c diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index 7ecf1399367e..e28766985995 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -134,7 +134,8 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, mlir::cir::ArrayType::get(LT.getMLIRContext(), baseTy, Size / 64)); } - cir_cconv_unreachable("NYI"); + return ABIArgInfo::getDirect( + IntType::get(LT.getMLIRContext(), Size, false)); } return getNaturalAlignIndirect(RetTy); diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64_be-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64_be-cc-structs.c new file mode 100644 index 000000000000..226f415572a8 --- /dev/null +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64_be-cc-structs.c @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -triple aarch64_be-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +typedef struct { + int a; + int b; +} __attribute__((alligned (4))) S; + +// CHECK: cir.func {{.*@init}}() -> !u64i +// CHECK: %[[#V0:]] = cir.alloca !ty_S, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V2:]] = cir.load %[[#V1]] : !cir.ptr, !u64i +// CHECK: cir.return %[[#V2]] : !u64i +S init() { + S s; + return s; +} From 9ef9412aca7123afc6772d4985db34ee97a82b70 Mon Sep 17 00:00:00 2001 From: Guojin Date: Tue, 5 Nov 2024 13:58:26 -0500 Subject: [PATCH 2031/2301] [CIR][CIRGen][Builtin][NFC] Refactor IntrinsicCallOp (#1056) `IntrinsicCallOp` is now named `LLVMIntrinsicCallOp` to better reflect its purpose. And now In CIR, we do not have "llvm" prefix which will be added later during LLVMLowering. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 7 +- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 77 +++---- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 - .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 9 +- clang/test/CIR/CodeGen/AArch64/neon-arith.c | 90 ++++---- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 6 +- clang/test/CIR/CodeGen/AArch64/neon.c | 214 +++++++++--------- clang/test/CIR/CodeGen/builtin-arm-ldrex.c | 14 +- clang/test/CIR/IR/invalid-llvm-intrinsic.cir | 11 - 9 files changed, 207 insertions(+), 227 deletions(-) delete mode 100644 clang/test/CIR/IR/invalid-llvm-intrinsic.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 2b36931de22d..0a7c7222968d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3445,11 +3445,11 @@ def FuncOp : CIR_Op<"func", [ } //===----------------------------------------------------------------------===// -// IntrinsicCallOp +// LLVMIntrinsicCallOp //===----------------------------------------------------------------------===// -def IntrinsicCallOp : CIR_Op<"llvm.intrinsic"> { - let summary = "Call to intrinsic functions that is not defined in CIR"; +def LLVMIntrinsicCallOp : CIR_Op<"llvm.intrinsic"> { + let summary = "Call to llvm intrinsic functions that is not defined in CIR"; let description = [{ `cir.llvm.intrinsic` operation represents a call-like expression which has return type and arguments that maps directly to a llvm intrinsic. @@ -3476,7 +3476,6 @@ def IntrinsicCallOp : CIR_Op<"llvm.intrinsic"> { }]>, ]; - let hasVerifier = 1; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 55e428ae380e..d3b1add144fc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2125,7 +2125,7 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, CIRGenFunction &cgf) { StringRef intrinsicName; if (builtinID == clang::AArch64::BI__builtin_arm_ldrex) { - intrinsicName = "llvm.aarch64.ldxr"; + intrinsicName = "aarch64.ldxr"; } else { llvm_unreachable("Unknown builtinID"); } @@ -2139,8 +2139,9 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, // which can be found under LLVM IR directory. mlir::Type funcResTy = builder.getSInt64Ty(); mlir::Location loc = cgf.getLoc(clangCallExpr->getExprLoc()); - mlir::cir::IntrinsicCallOp op = builder.create( - loc, builder.getStringAttr(intrinsicName), funcResTy, loadAddr); + mlir::cir::LLVMIntrinsicCallOp op = + builder.create( + loc, builder.getStringAttr(intrinsicName), funcResTy, loadAddr); mlir::Value res = op.getResult(); // Convert result type to the expected type. @@ -2267,7 +2268,7 @@ mlir::Value buildNeonCall(CIRGenBuilderTy &builder, return nullptr; } return builder - .create( + .create( loc, builder.getStringAttr(intrinsicName), funcResTy, args) .getResult(); } @@ -2377,8 +2378,8 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( return buildNeonCall(builder, {resTy, mulVecT, SInt32Ty}, ops, (builtinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || builtinID == NEON::BI__builtin_neon_vqdmulh_lane_v) - ? "llvm.aarch64.neon.sqdmulh.lane" - : "llvm.aarch64.neon.sqrdmulh.lane", + ? "aarch64.neon.sqdmulh.lane" + : "aarch64.neon.sqrdmulh.lane", resTy, getLoc(e->getExprLoc())); } case NEON::BI__builtin_neon_vqshlu_n_v: @@ -2386,18 +2387,18 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( // These intrinsics expect signed vector type as input, but // return unsigned vector type. mlir::cir::VectorType srcTy = getSignChangedVectorType(builder, vTy); - return buildNeonCall( - builder, {srcTy, srcTy}, ops, "llvm.aarch64.neon.sqshlu", vTy, - getLoc(e->getExprLoc()), false, /* not fp constrained op */ - 1, /* second arg is shift amount */ - false /* leftshift */); + return buildNeonCall(builder, {srcTy, srcTy}, ops, "aarch64.neon.sqshlu", + vTy, getLoc(e->getExprLoc()), + false, /* not fp constrained op */ + 1, /* second arg is shift amount */ + false /* leftshift */); } case NEON::BI__builtin_neon_vrshr_n_v: case NEON::BI__builtin_neon_vrshrq_n_v: { return buildNeonCall( builder, {vTy, isUnsigned ? getSignChangedVectorType(builder, vTy) : vTy}, ops, - isUnsigned ? "llvm.aarch64.neon.urshl" : "llvm.aarch64.neon.srshl", vTy, + isUnsigned ? "aarch64.neon.urshl" : "aarch64.neon.srshl", vTy, getLoc(e->getExprLoc()), false, /* not fp constrained op*/ 1, /* second arg is shift amount */ true /* rightshift */); @@ -2438,7 +2439,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( ops[0] = builder.createBitcast(ops[0], ty); ops[1] = builder.createBitcast(ops[1], ty); ops[0] = builder.createAnd(ops[0], ops[1]); - // Note that during LLVM Lowering, result of `VecCmpOp` is sign extended, + // Note that during vmVM Lowering, result of `VecCmpOp` is sign extended, // matching traditional codegen behavior. return builder.create( loc, ty, mlir::cir::CmpOpKind::ne, ops[0], builder.getZero(loc, ty)); @@ -2457,42 +2458,41 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vpadd_v: case NEON::BI__builtin_neon_vpaddq_v: { intrincsName = mlir::isa(vTy.getEltType()) - ? "llvm.aarch64.neon.faddp" - : "llvm.aarch64.neon.addp"; + ? "aarch64.neon.faddp" + : "aarch64.neon.addp"; break; } case NEON::BI__builtin_neon_vqadd_v: case NEON::BI__builtin_neon_vqaddq_v: { - intrincsName = (intrinicId != altLLVMIntrinsic) ? "llvm.aarch64.neon.uqadd" - : "llvm.aarch64.neon.sqadd"; + intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.uqadd" + : "aarch64.neon.sqadd"; break; } case NEON::BI__builtin_neon_vqsub_v: case NEON::BI__builtin_neon_vqsubq_v: { - intrincsName = (intrinicId != altLLVMIntrinsic) ? "llvm.aarch64.neon.uqsub" - : "llvm.aarch64.neon.sqsub"; + intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.uqsub" + : "aarch64.neon.sqsub"; break; } case NEON::BI__builtin_neon_vrhadd_v: case NEON::BI__builtin_neon_vrhaddq_v: { - intrincsName = (intrinicId != altLLVMIntrinsic) - ? "llvm.aarch64.neon.urhadd" - : "llvm.aarch64.neon.srhadd"; + intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.urhadd" + : "aarch64.neon.srhadd"; break; } case NEON::BI__builtin_neon_vshlq_v: { - intrincsName = (intrinicId != altLLVMIntrinsic) ? "llvm.aarch64.neon.ushl" - : "llvm.aarch64.neon.sshl"; + intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.ushl" + : "aarch64.neon.sshl"; break; } case NEON::BI__builtin_neon_vhadd_v: case NEON::BI__builtin_neon_vhaddq_v: { - intrincsName = (intrinicId != altLLVMIntrinsic) ? "llvm.aarch64.neon.uhadd" - : "llvm.aarch64.neon.shadd"; + intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.uhadd" + : "aarch64.neon.shadd"; break; } case NEON::BI__builtin_neon_vqmovun_v: { - intrincsName = "llvm.aarch64.neon.sqxtun"; + intrincsName = "aarch64.neon.sqxtun"; argTypes.push_back(builder.getExtendedOrTruncatedElementVectorType( vTy, true /* extended */, true /* signed */)); break; @@ -3264,10 +3264,9 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } case NEON::BI__builtin_neon_vmull_v: { - llvm::StringRef name = - usgn ? "llvm.aarch64.neon.umull" : "llvm.aarch64.neon.smull"; + llvm::StringRef name = usgn ? "aarch64.neon.umull" : "aarch64.neon.smull"; if (Type.isPoly()) - name = "llvm.aarch64.neon.pmull"; + name = "aarch64.neon.pmull"; mlir::cir::VectorType argTy = builder.getExtendedOrTruncatedElementVectorType( ty, false /* truncated */, !usgn); @@ -3282,10 +3281,9 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vmin_v: case NEON::BI__builtin_neon_vminq_v: { - llvm::StringRef name = - usgn ? "llvm.aarch64.neon.umin" : "llvm.aarch64.neon.smin"; + llvm::StringRef name = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin"; if (mlir::cir::isFPOrFPVectorTy(ty)) - name = "llvm.aarch64.neon.fmin"; + name = "aarch64.neon.fmin"; return buildNeonCall(builder, {ty, ty}, Ops, name, ty, getLoc(E->getExprLoc())); } @@ -3294,10 +3292,9 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vabd_v: case NEON::BI__builtin_neon_vabdq_v: { - llvm::StringRef name = - usgn ? "llvm.aarch64.neon.uabd" : "llvm.aarch64.neon.sabd"; + llvm::StringRef name = usgn ? "aarch64.neon.uabd" : "aarch64.neon.sabd"; if (mlir::cir::isFPOrFPVectorTy(ty)) - name = "llvm.aarch64.neon.fabd"; + name = "aarch64.neon.fabd"; return buildNeonCall(builder, {ty, ty}, Ops, name, ty, getLoc(E->getExprLoc())); } @@ -3337,7 +3334,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, builder, {builder.getExtendedOrTruncatedElementVectorType(ty, true, true), SInt32Ty}, - Ops, "llvm.aarch64.neon.sqrshrun", ty, getLoc(E->getExprLoc())); + Ops, "aarch64.neon.sqrshrun", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqshrn_n_v: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vrshrn_n_v: @@ -3347,7 +3344,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, vTy, true /* extend */, mlir::cast(vTy.getEltType()).isSigned()), SInt32Ty}, - Ops, "llvm.aarch64.neon.rshrn", ty, getLoc(E->getExprLoc())); + Ops, "aarch64.neon.rshrn", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqrshrn_n_v: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vrndah_f16: { @@ -3356,7 +3353,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vrnda_v: case NEON::BI__builtin_neon_vrndaq_v: { assert(!MissingFeatures::buildConstrainedFPCall()); - return buildNeonCall(builder, {ty}, Ops, "llvm.round", ty, + return buildNeonCall(builder, {ty}, Ops, "round", ty, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndih_f16: { @@ -3379,7 +3376,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vrndns_f32: { mlir::Value arg0 = buildScalarExpr(E->getArg(0)); args.push_back(arg0); - return buildNeonCall(builder, {arg0.getType()}, args, "llvm.roundeven.f32", + return buildNeonCall(builder, {arg0.getType()}, args, "roundeven.f32", getCIRGenModule().FloatTy, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndph_f16: { diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 6f842d489d0f..0dd68f57e7dd 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2561,12 +2561,6 @@ LogicalResult mlir::cir::FuncOp::verifyType() { return success(); } -LogicalResult mlir::cir::IntrinsicCallOp::verify() { - if (!getIntrinsicName().starts_with("llvm.")) - return emitOpError() << "intrinsic name must start with 'llvm.'"; - return success(); -} - // Verifies linkage types // - functions don't have 'common' linkage // - external functions have 'external' or 'extern_weak' linkage diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index acb2596bfd1d..ea84cef8ed3e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2982,12 +2982,13 @@ static mlir::LLVM::CallIntrinsicOp replaceOpWithCallLLVMIntrinsicOp( } class CIRIntrinsicCallLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern< + mlir::cir::LLVMIntrinsicCallOp>::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::IntrinsicCallOp op, OpAdaptor adaptor, + matchAndRewrite(mlir::cir::LLVMIntrinsicCallOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Type llvmResTy = getTypeConverter()->convertType(op->getResultTypes()[0]); @@ -3004,7 +3005,7 @@ class CIRIntrinsicCallLowering // TODO(cir): MLIR LLVM dialect should handle this part as CIR has no way // to set LLVM IR attribute. assert(!::cir::MissingFeatures::llvmIntrinsicElementTypeSupport()); - replaceOpWithCallLLVMIntrinsicOp(rewriter, op, name, llvmResTy, + replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm." + name, llvmResTy, adaptor.getOperands()); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index 2bfa4e89505f..5df338e81ab6 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -23,7 +23,7 @@ float32_t test_vrndns_f32(float32_t a) { // CIR: cir.func internal private @vrndns_f32(%arg0: !cir.float {{.*}}) -> !cir.float // CIR: cir.store %arg0, [[ARG_SAVE:%.*]] : !cir.float, !cir.ptr // CIR: [[INTRIN_ARG:%.*]] = cir.load [[ARG_SAVE]] : !cir.ptr, !cir.float -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.roundeven.f32" [[INTRIN_ARG]] : (!cir.float) +// CIR: {{%.*}} = cir.llvm.intrinsic "roundeven.f32" [[INTRIN_ARG]] : (!cir.float) // CIR: cir.return {{%.*}} : !cir.float // CIR-LABEL: test_vrndns_f32 @@ -47,7 +47,7 @@ float32x2_t test_vrnda_f32(float32x2_t a) { // CIR: [[INTRIN_ARG:%.*]] = cir.load [[ARG_SAVE]] : !cir.ptr>, !cir.vector // CIR: [[INTRIN_ARG_CAST:%.*]] = cir.cast(bitcast, [[INTRIN_ARG]] : !cir.vector), !cir.vector // CIR: [[INTRIN_ARG_BACK:%.*]] = cir.cast(bitcast, [[INTRIN_ARG_CAST]] : !cir.vector), !cir.vector -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.round" [[INTRIN_ARG_BACK]] : (!cir.vector) -> !cir.vector +// CIR: {{%.*}} = cir.llvm.intrinsic "round" [[INTRIN_ARG_BACK]] : (!cir.vector) -> !cir.vector // CIR: cir.return {{%.*}} : !cir.vector // CIR-LABEL: test_vrnda_f32 @@ -71,7 +71,7 @@ float32x4_t test_vrndaq_f32(float32x4_t a) { // CIR: [[INTRIN_ARG:%.*]] = cir.load [[ARG_SAVE]] : !cir.ptr>, !cir.vector // CIR: [[INTRIN_ARG_CAST:%.*]] = cir.cast(bitcast, [[INTRIN_ARG]] : !cir.vector), !cir.vector // CIR: [[INTRIN_ARG_BACK:%.*]] = cir.cast(bitcast, [[INTRIN_ARG_CAST]] : !cir.vector), !cir.vector -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.round" [[INTRIN_ARG_BACK]] : (!cir.vector) -> !cir.vector +// CIR: {{%.*}} = cir.llvm.intrinsic "round" [[INTRIN_ARG_BACK]] : (!cir.vector) -> !cir.vector // CIR: cir.return {{%.*}} : !cir.vector // LLVM: {{.*}}test_vrndaq_f32(<4 x float>{{.*}}[[ARG:%.*]]) @@ -83,7 +83,7 @@ int8x8_t test_vpadd_s8(int8x8_t a, int8x8_t b) { } // CIR-LABEL: vpadd_s8 -// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "aarch64.neon.addp" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vpadd_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) @@ -96,7 +96,7 @@ int8x16_t test_vpaddq_s8(int8x16_t a, int8x16_t b) { } // CIR-LABEL: vpaddq_s8 -// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "aarch64.neon.addp" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vpaddq_s8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) @@ -108,7 +108,7 @@ uint8x8_t test_vpadd_u8(uint8x8_t a, uint8x8_t b) { } // CIR-LABEL: vpadd_u8 -// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "aarch64.neon.addp" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vpadd_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) @@ -120,7 +120,7 @@ int16x4_t test_vpadd_s16(int16x4_t a, int16x4_t b) { } // CIR-LABEL: vpadd_s16 -// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "aarch64.neon.addp" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector @@ -133,7 +133,7 @@ int16x8_t test_vpaddq_s16(int16x8_t a, int16x8_t b) { } // CIR-LABEL: vpaddq_s16 -// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "aarch64.neon.addp" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector @@ -146,7 +146,7 @@ uint16x4_t test_vpadd_u16(uint16x4_t a, uint16x4_t b) { } // CIR-LABEL: vpadd_u16 -// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "aarch64.neon.addp" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector @@ -159,7 +159,7 @@ int32x2_t test_vpadd_s32(int32x2_t a, int32x2_t b) { } // CIR-LABEL: vpadd_s32 -// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "aarch64.neon.addp" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector @@ -172,7 +172,7 @@ int32x4_t test_vpaddq_s32(int32x4_t a, int32x4_t b) { } // CIR-LABEL: vpaddq_s32 -// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "aarch64.neon.addp" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector @@ -185,7 +185,7 @@ float32x2_t test_vpadd_f32(float32x2_t a, float32x2_t b) { } // CIR-LABEL: vpadd_f32 -// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "aarch64.neon.addp" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector @@ -198,7 +198,7 @@ float32x4_t test_vpaddq_f32(float32x4_t a, float32x4_t b) { } // CIR-LABEL: vpaddq_f32 -// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "aarch64.neon.addp" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector @@ -211,7 +211,7 @@ float64x2_t test_vpaddq_f64(float64x2_t a, float64x2_t b) { } // CIR-LABEL: vpaddq_f64 -// CIR: [[RES:%.*]] = cir.llvm.intrinsic "llvm.aarch64.neon.addp" {{%.*}}, {{%.*}} : +// CIR: [[RES:%.*]] = cir.llvm.intrinsic "aarch64.neon.addp" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // CIR: {{%.*}} = cir.cast(bitcast, [[RES]] : !cir.vector), !cir.vector @@ -225,7 +225,7 @@ int16x4_t test_vqdmulh_lane_s16(int16x4_t a, int16x4_t v) { // CIR-LABEL: vqdmulh_lane_s16 // CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : // CIR: (!cir.vector, !cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vqdmulh_lane_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[V:%.*]]) @@ -240,7 +240,7 @@ int32x2_t test_vqdmulh_lane_s32(int32x2_t a, int32x2_t v) { // CIR-LABEL: vqdmulh_lane_s32 // CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : // CIR: (!cir.vector, !cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vqdmulh_lane_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[V:%.*]]) @@ -254,7 +254,7 @@ int16x8_t test_vqdmulhq_lane_s16(int16x8_t a, int16x4_t v) { // CIR-LABEL: vqdmulhq_lane_s16 // CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : // CIR: (!cir.vector, !cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vqdmulhq_lane_s16(<8 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[V:%.*]]) @@ -268,7 +268,7 @@ int32x4_t test_vqdmulhq_lane_s32(int32x4_t a, int32x2_t v) { // CIR-LABEL: vqdmulhq_lane_s32 // CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : // CIR: (!cir.vector, !cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vqdmulhq_lane_s32(<4 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[V:%.*]]) @@ -282,7 +282,7 @@ int16x4_t test_vqrdmulh_lane_s16(int16x4_t a, int16x4_t v) { // CIR-LABEL: vqrdmulh_lane_s16 // CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : // CIR-SAME: (!cir.vector, !cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vqrdmulh_lane_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[V:%.*]]) @@ -296,7 +296,7 @@ int16x8_t test_vqrdmulhq_lane_s16(int16x8_t a, int16x4_t v) { // CIR-LABEL: vqrdmulhq_lane_s16 // CIR: [[LANE:%.*]] = cir.const #cir.int<3> : !s32i -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : // CIR-SAME: (!cir.vector, !cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vqrdmulhq_lane_s16(<8 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[V:%.*]]) @@ -310,7 +310,7 @@ int32x2_t test_vqrdmulh_lane_s32(int32x2_t a, int32x2_t v) { // CIR-LABEL: vqrdmulh_lane_s32 // CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : // CIR-SAME: (!cir.vector, !cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vqrdmulh_lane_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[V:%.*]]) @@ -324,7 +324,7 @@ int32x4_t test_vqrdmulhq_lane_s32(int32x4_t a, int32x2_t v) { // CIR-LABEL: vqrdmulhq_lane_s32 // CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrdmulh.lane" {{%.*}}, {{%.*}}, [[LANE]] : // CIR-SAME: (!cir.vector, !cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vqrdmulhq_lane_s32(<4 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[V:%.*]]) @@ -337,7 +337,7 @@ int8x16_t test_vqaddq_s8(int8x16_t a, int8x16_t b) { } // CIR-LABEL: vqaddq_s8 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqaddq_s8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) @@ -349,7 +349,7 @@ uint8x16_t test_vqaddq_u8(uint8x16_t a, uint8x16_t b) { } // CIR-LABEL: vqaddq_u8 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqaddq_u8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) @@ -361,7 +361,7 @@ int16x8_t test_vqaddq_s16(int16x8_t a, int16x8_t b) { } // CIR-LABEL: vqaddq_s16 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqaddq_s16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) @@ -373,7 +373,7 @@ uint16x8_t test_vqaddq_u16(uint16x8_t a, uint16x8_t b) { } // CIR-LABEL: vqaddq_u16 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqaddq_u16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) @@ -385,7 +385,7 @@ int32x4_t test_vqaddq_s32(int32x4_t a, int32x4_t b) { } // CIR-LABEL: vqaddq_s32 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqaddq_s32(<4 x i32>{{.*}} [[A:%.*]], <4 x i32>{{.*}} [[B:%.*]]) @@ -397,7 +397,7 @@ int64x2_t test_vqaddq_s64(int64x2_t a, int64x2_t b) { } // CIR-LABEL: vqaddq_s64 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqaddq_s64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) @@ -409,7 +409,7 @@ uint64x2_t test_vqaddq_u64(uint64x2_t a, uint64x2_t b) { } // CIR-LABEL: vqaddq_u64 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqaddq_u64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) @@ -421,7 +421,7 @@ int8x8_t test_vqsub_s8(int8x8_t a, int8x8_t b) { } // CIR-LABEL: vqsub_s8 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsub_s8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]]) @@ -433,7 +433,7 @@ uint8x8_t test_vqsub_u8(uint8x8_t a, uint8x8_t b) { } // CIR-LABEL: vqsub_u8 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsub_u8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]]) @@ -445,7 +445,7 @@ int16x4_t test_vqsub_s16(int16x4_t a, int16x4_t b) { } // CIR-LABEL: vqsub_s16 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsub_s16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]]) @@ -457,7 +457,7 @@ uint16x4_t test_vqsub_u16(uint16x4_t a, uint16x4_t b) { } // CIR-LABEL: vqsub_u16 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsub_u16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]]) @@ -469,7 +469,7 @@ int32x2_t test_vqsub_s32(int32x2_t a, int32x2_t b) { } // CIR-LABEL: vqsub_s32 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsub_s32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]]) @@ -481,7 +481,7 @@ uint32x2_t test_vqsub_u32(uint32x2_t a, uint32x2_t b) { } // CIR-LABEL: vqsub_u32 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsub_u32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]]) @@ -493,7 +493,7 @@ int64x1_t test_vqsub_s64(int64x1_t a, int64x1_t b) { } // CIR-LABEL: vqsub_s64 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsub_s64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]]) @@ -505,7 +505,7 @@ uint64x1_t test_vqsub_u64(uint64x1_t a, uint64x1_t b) { } // CIR-LABEL: vqsub_u64 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsub_u64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]]) @@ -517,7 +517,7 @@ int8x16_t test_vqsubq_s8(int8x16_t a, int8x16_t b) { } // CIR-LABEL: vqsubq_s8 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsubq_s8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) @@ -529,7 +529,7 @@ uint8x16_t test_vqsubq_u8(uint8x16_t a, uint8x16_t b) { } // CIR-LABEL: vqsubq_u8 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsubq_u8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) @@ -541,7 +541,7 @@ int16x8_t test_vqsubq_s16(int16x8_t a, int16x8_t b) { } // CIR-LABEL: vqsubq_s16 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsubq_s16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) @@ -553,7 +553,7 @@ uint16x8_t test_vqsubq_u16(uint16x8_t a, uint16x8_t b) { } // CIR-LABEL: vqsubq_u16 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsubq_u16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) @@ -565,7 +565,7 @@ int32x4_t test_vqsubq_s32(int32x4_t a, int32x4_t b) { } // CIR-LABEL: vqsubq_s32 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsubq_s32(<4 x i32>{{.*}} [[A:%.*]], <4 x i32>{{.*}} [[B:%.*]]) @@ -577,7 +577,7 @@ uint32x4_t test_vqsubq_u32(uint32x4_t a, uint32x4_t b) { } // CIR-LABEL: vqsubq_u32 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsubq_u32(<4 x i32>{{.*}} [[A:%.*]], <4 x i32>{{.*}} [[B:%.*]]) @@ -589,7 +589,7 @@ int64x2_t test_vqsubq_s64(int64x2_t a, int64x2_t b) { } // CIR-LABEL: vqsubq_s64 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsubq_s64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) @@ -601,7 +601,7 @@ uint64x2_t test_vqsubq_u64(uint64x2_t a, uint64x2_t b) { } // CIR-LABEL: vqsubq_u64 -// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqsub" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqsubq_u64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index 62a78324bad8..abbf2510b466 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -456,7 +456,7 @@ uint8x8_t test_vqmovun_s16(int16x8_t a) { return vqmovun_s16(a); // CIR-LABEL: vqmovun_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqxtun" {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqxtun" {{%.*}} : // CIR-SAME: (!cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqmovun_s16(<8 x i16>{{.*}}[[A:%.*]]) @@ -469,7 +469,7 @@ uint16x4_t test_vqmovun_s32(int32x4_t a) { return vqmovun_s32(a); // CIR-LABEL: vqmovun_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqxtun" {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqxtun" {{%.*}} : // CIR-SAME: (!cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqmovun_s32(<4 x i32>{{.*}}[[A:%.*]]) @@ -483,7 +483,7 @@ uint32x2_t test_vqmovun_s64(int64x2_t a) { return vqmovun_s64(a); // CIR-LABEL: vqmovun_s64 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqxtun" {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqxtun" {{%.*}} : // CIR-SAME: (!cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqmovun_s64(<2 x i64>{{.*}}[[A:%.*]]) diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 09b881782193..246bdfea61c5 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -846,7 +846,7 @@ int8x8_t test_vabd_s8(int8x8_t v1, int8x8_t v2) { return vabd_s8(v1, v2); // CIR-LABEL: vabd_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_s8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) @@ -858,7 +858,7 @@ int16x4_t test_vabd_s16(int16x4_t v1, int16x4_t v2) { return vabd_s16(v1, v2); // CIR-LABEL: vabd_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_s16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -870,7 +870,7 @@ int32x2_t test_vabd_s32(int32x2_t v1, int32x2_t v2) { return vabd_s32(v1, v2); // CIR-LABEL: vabd_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_s32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -882,7 +882,7 @@ uint8x8_t test_vabd_u8(uint8x8_t v1, uint8x8_t v2) { return vabd_u8(v1, v2); // CIR-LABEL: vabd_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_u8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) @@ -894,7 +894,7 @@ uint16x4_t test_vabd_u16(uint16x4_t v1, uint16x4_t v2) { return vabd_u16(v1, v2); // CIR-LABEL: vabd_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_u16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -906,7 +906,7 @@ uint32x2_t test_vabd_u32(uint32x2_t v1, uint32x2_t v2) { return vabd_u32(v1, v2); // CIR-LABEL: vabd_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_u32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -918,7 +918,7 @@ float32x2_t test_vabd_f32(float32x2_t v1, float32x2_t v2) { return vabd_f32(v1, v2); // CIR-LABEL: vabd_f32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.fabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_f32(<2 x float>{{.*}}[[V1:%.*]], <2 x float>{{.*}}[[V2:%.*]]) @@ -930,7 +930,7 @@ int8x16_t test_vabdq_s8(int8x16_t v1, int8x16_t v2) { return vabdq_s8(v1, v2); // CIR-LABEL: vabdq_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabdq_s8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) @@ -942,7 +942,7 @@ int16x8_t test_vabdq_s16(int16x8_t v1, int16x8_t v2) { return vabdq_s16(v1, v2); // CIR-LABEL: vabdq_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabdq_s16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) @@ -954,7 +954,7 @@ int32x4_t test_vabdq_s32(int32x4_t v1, int32x4_t v2) { return vabdq_s32(v1, v2); // CIR-LABEL: vabdq_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabdq_s32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) @@ -966,7 +966,7 @@ uint8x16_t test_vabdq_u8(uint8x16_t v1, uint8x16_t v2) { return vabdq_u8(v1, v2); // CIR-LABEL: vabdq_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabdq_u8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) @@ -978,7 +978,7 @@ uint16x8_t test_vabdq_u16(uint16x8_t v1, uint16x8_t v2) { return vabdq_u16(v1, v2); // CIR-LABEL: vabdq_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabdq_u16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) @@ -990,7 +990,7 @@ uint32x4_t test_vabdq_u32(uint32x4_t v1, uint32x4_t v2) { return vabdq_u32(v1, v2); // CIR-LABEL: vabdq_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabdq_u32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) @@ -1002,7 +1002,7 @@ float32x4_t test_vabdq_f32(float32x4_t v1, float32x4_t v2) { return vabdq_f32(v1, v2); // CIR-LABEL: vabdq_f32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.fabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabdq_f32(<4 x float>{{.*}}[[V1:%.*]], <4 x float>{{.*}}[[V2:%.*]]) @@ -1014,7 +1014,7 @@ float64x2_t test_vabdq_f64(float64x2_t v1, float64x2_t v2) { return vabdq_f64(v1, v2); // CIR-LABEL: vabdq_f64 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.fabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabdq_f64(<2 x double>{{.*}}[[V1:%.*]], <2 x double>{{.*}}[[V2:%.*]]) @@ -2572,7 +2572,7 @@ int8x8_t test_vhadd_s8(int8x8_t v1, int8x8_t v2) { return vhadd_s8(v1, v2); // CIR-LABEL: vhadd_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_s8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) @@ -2584,7 +2584,7 @@ int16x4_t test_vhadd_s16(int16x4_t v1, int16x4_t v2) { return vhadd_s16(v1, v2); // CIR-LABEL: vhadd_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_s16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -2599,7 +2599,7 @@ int32x2_t test_vhadd_s32(int32x2_t v1, int32x2_t v2) { return vhadd_s32(v1, v2); // CIR-LABEL: vhadd_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_s32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -2614,7 +2614,7 @@ uint8x8_t test_vhadd_u8(uint8x8_t v1, uint8x8_t v2) { return vhadd_u8(v1, v2); // CIR-LABEL: vhadd_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_u8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) @@ -2626,7 +2626,7 @@ uint16x4_t test_vhadd_u16(uint16x4_t v1, uint16x4_t v2) { return vhadd_u16(v1, v2); // CIR-LABEL: vhadd_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_u16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -2641,7 +2641,7 @@ uint32x2_t test_vhadd_u32(uint32x2_t v1, uint32x2_t v2) { return vhadd_u32(v1, v2); // CIR-LABEL: vhadd_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_u32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -2656,7 +2656,7 @@ int8x16_t test_vhaddq_s8(int8x16_t v1, int8x16_t v2) { return vhaddq_s8(v1, v2); // CIR-LABEL: vhaddq_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_s8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) @@ -2668,7 +2668,7 @@ int16x8_t test_vhaddq_s16(int16x8_t v1, int16x8_t v2) { return vhaddq_s16(v1, v2); // CIR-LABEL: vhaddq_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_s16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) @@ -2683,7 +2683,7 @@ int32x4_t test_vhaddq_s32(int32x4_t v1, int32x4_t v2) { return vhaddq_s32(v1, v2); // CIR-LABEL: vhaddq_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_s32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) @@ -2698,7 +2698,7 @@ uint8x16_t test_vhaddq_u8(uint8x16_t v1, uint8x16_t v2) { return vhaddq_u8(v1, v2); // CIR-LABEL: vhaddq_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_u8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) @@ -2710,7 +2710,7 @@ uint16x8_t test_vhaddq_u16(uint16x8_t v1, uint16x8_t v2) { return vhaddq_u16(v1, v2); // CIR-LABEL: vhaddq_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_u16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) @@ -2725,7 +2725,7 @@ uint32x4_t test_vhaddq_u32(uint32x4_t v1, uint32x4_t v2) { return vhaddq_u32(v1, v2); // CIR-LABEL: vhaddq_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_u32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) @@ -2848,7 +2848,7 @@ int8x8_t test_vrhadd_s8(int8x8_t v1, int8x8_t v2) { return vrhadd_s8(v1, v2); // CIR-LABEL: vrhadd_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_s8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) @@ -2860,7 +2860,7 @@ int16x4_t test_vrhadd_s16(int16x4_t v1, int16x4_t v2) { return vrhadd_s16(v1, v2); // CIR-LABEL: vrhadd_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_s16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -2875,7 +2875,7 @@ int32x2_t test_vrhadd_s32(int32x2_t v1, int32x2_t v2) { return vrhadd_s32(v1, v2); // CIR-LABEL: vrhadd_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_s32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -2890,7 +2890,7 @@ uint8x8_t test_vrhadd_u8(uint8x8_t v1, uint8x8_t v2) { return vrhadd_u8(v1, v2); // CIR-LABEL: vrhadd_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_u8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) @@ -2902,7 +2902,7 @@ uint16x4_t test_vrhadd_u16(uint16x4_t v1, uint16x4_t v2) { return vrhadd_u16(v1, v2); // CIR-LABEL: vrhadd_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_u16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -2917,7 +2917,7 @@ uint32x2_t test_vrhadd_u32(uint32x2_t v1, uint32x2_t v2) { return vrhadd_u32(v1, v2); // CIR-LABEL: vrhadd_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_u32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -2932,7 +2932,7 @@ int8x16_t test_vrhaddq_s8(int8x16_t v1, int8x16_t v2) { return vrhaddq_s8(v1, v2); // CIR-LABEL: vrhaddq_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_s8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) @@ -2944,7 +2944,7 @@ int16x8_t test_vrhaddq_s16(int16x8_t v1, int16x8_t v2) { return vrhaddq_s16(v1, v2); // CIR-LABEL: vrhaddq_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_s16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) @@ -2959,7 +2959,7 @@ int32x4_t test_vrhaddq_s32(int32x4_t v1, int32x4_t v2) { return vrhaddq_s32(v1, v2); // CIR-LABEL: vrhaddq_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_s32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) @@ -2974,7 +2974,7 @@ uint8x16_t test_vrhaddq_u8(uint8x16_t v1, uint8x16_t v2) { return vrhaddq_u8(v1, v2); // CIR-LABEL: vrhaddq_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_u8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) @@ -2986,7 +2986,7 @@ uint16x8_t test_vrhaddq_u16(uint16x8_t v1, uint16x8_t v2) { return vrhaddq_u16(v1, v2); // CIR-LABEL: vrhaddq_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_u16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) @@ -3001,7 +3001,7 @@ uint32x4_t test_vrhaddq_u32(uint32x4_t v1, uint32x4_t v2) { return vrhaddq_u32(v1, v2); // CIR-LABEL: vrhaddq_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_u32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) @@ -3015,7 +3015,7 @@ uint32x4_t test_vrhaddq_u32(uint32x4_t v1, uint32x4_t v2) { int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { return vqadd_s8(a, b); // CIR-LABEL: vqadd_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_s8( @@ -3026,7 +3026,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { int16x4_t test_vqadd_s16(int16x4_t a, int16x4_t b) { return vqadd_s16(a, b); // CIR-LABEL: vqadd_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_s16( @@ -3040,7 +3040,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { int32x2_t test_vqadd_s32(int32x2_t a, int32x2_t b) { return vqadd_s32(a, b); // CIR-LABEL: vqadd_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_s32( @@ -3054,7 +3054,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { int64x1_t test_vqadd_s64(int64x1_t a, int64x1_t b) { return vqadd_s64(a, b); // CIR-LABEL: vqadd_s64 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_s64( @@ -3068,7 +3068,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { uint8x8_t test_vqadd_u8(uint8x8_t a, uint8x8_t b) { return vqadd_u8(a, b); // CIR-LABEL: vqadd_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_u8( @@ -3079,7 +3079,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { uint16x4_t test_vqadd_u16(uint16x4_t a, uint16x4_t b) { return vqadd_u16(a, b); // CIR-LABEL: vqadd_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_u16( @@ -3090,7 +3090,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { uint32x2_t test_vqadd_u32(uint32x2_t a, uint32x2_t b) { return vqadd_u32(a, b); // CIR-LABEL: vqadd_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_u32( @@ -3101,7 +3101,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { uint64x1_t test_vqadd_u64(uint64x1_t a, uint64x1_t b) { return vqadd_u64(a, b); // CIR-LABEL: vqadd_u64 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_u64( @@ -3409,7 +3409,7 @@ int8x16_t test_vshlq_s8(int8x16_t a, int8x16_t b) { return vshlq_s8(a, b); // CIR-LABEL: vshlq_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) @@ -3421,7 +3421,7 @@ int16x8_t test_vshlq_s16(int16x8_t a, int16x8_t b) { return vshlq_s16(a, b); // CIR-LABEL: vshlq_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) @@ -3436,7 +3436,7 @@ int32x4_t test_vshlq_s32(int32x4_t a, int32x4_t b) { return vshlq_s32(a, b); // CIR-LABEL: vshlq_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) @@ -3451,7 +3451,7 @@ int64x2_t test_vshlq_s64(int64x2_t a, int64x2_t b) { return vshlq_s64(a, b); // CIR-LABEL: vshlq_s64 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s64(<2 x i64>{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]]) @@ -3466,7 +3466,7 @@ uint8x16_t test_vshlq_u8(uint8x16_t a, int8x16_t b) { return vshlq_u8(a, b); // CIR-LABEL: vshlq_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.ushl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_u8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) @@ -3478,7 +3478,7 @@ uint16x8_t test_vshlq_u16(uint16x8_t a, int16x8_t b) { return vshlq_u16(a, b); // CIR-LABEL: vshlq_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.ushl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_u16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) @@ -3493,7 +3493,7 @@ uint32x4_t test_vshlq_u32(uint32x4_t a, int32x4_t b) { return vshlq_u32(a, b); // CIR-LABEL: vshlq_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.ushl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_u32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) @@ -3508,7 +3508,7 @@ uint64x2_t test_vshlq_u64(uint64x2_t a, int64x2_t b) { return vshlq_u64(a, b); // CIR-LABEL: vshlq_u64 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.ushl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_u64(<2 x i64>{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]]) @@ -4116,7 +4116,7 @@ int8x8_t test_vmin_s8(int8x8_t a, int8x8_t b) { return vmin_s8(a, b); // CIR-LABEL: vmin_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vmin_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) @@ -4128,7 +4128,7 @@ int16x4_t test_vmin_s16(int16x4_t a, int16x4_t b) { return vmin_s16(a, b); // CIR-LABEL: vmin_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vmin_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) @@ -4142,7 +4142,7 @@ int32x2_t test_vmin_s32(int32x2_t a, int32x2_t b) { return vmin_s32(a, b); // CIR-LABEL: vmin_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vmin_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) @@ -4156,7 +4156,7 @@ uint8x8_t test_vmin_u8(uint8x8_t a, uint8x8_t b) { return vmin_u8(a, b); // CIR-LABEL: vmin_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vmin_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) @@ -4168,7 +4168,7 @@ uint16x4_t test_vmin_u16(uint16x4_t a, uint16x4_t b) { return vmin_u16(a, b); // CIR-LABEL: vmin_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vmin_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) @@ -4182,7 +4182,7 @@ uint32x2_t test_vmin_u32(uint32x2_t a, uint32x2_t b) { return vmin_u32(a, b); // CIR-LABEL: vmin_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vmin_u32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) @@ -4196,7 +4196,7 @@ float32x2_t test_vmin_f32(float32x2_t a, float32x2_t b) { return vmin_f32(a, b); // CIR-LABEL: vmin_f32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fmin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.fmin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vmin_f32(<2 x float>{{.*}}[[A:%.*]], <2 x float>{{.*}}[[B:%.*]]) @@ -4210,7 +4210,7 @@ float64x1_t test_vmin_f64(float64x1_t a, float64x1_t b) { return vmin_f64(a, b); // CIR-LABEL: vmin_f64 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fmin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.fmin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vmin_f64(<1 x double>{{.*}}[[A:%.*]], <1 x double>{{.*}}[[B:%.*]]) @@ -4224,7 +4224,7 @@ int8x16_t test_vminq_s8(int8x16_t a, int8x16_t b) { return vminq_s8(a, b); // CIR-LABEL: vminq_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vminq_s8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) @@ -4236,7 +4236,7 @@ int16x8_t test_vminq_s16(int16x8_t a, int16x8_t b) { return vminq_s16(a, b); // CIR-LABEL: vminq_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vminq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) @@ -4250,7 +4250,7 @@ int32x4_t test_vminq_s32(int32x4_t a, int32x4_t b) { return vminq_s32(a, b); // CIR-LABEL: vminq_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vminq_s32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) @@ -4264,7 +4264,7 @@ uint8x16_t test_vminq_u8(uint8x16_t a, uint8x16_t b) { return vminq_u8(a, b); // CIR-LABEL: vminq_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vminq_u8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) @@ -4276,7 +4276,7 @@ uint16x8_t test_vminq_u16(uint16x8_t a, uint16x8_t b) { return vminq_u16(a, b); // CIR-LABEL: vminq_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vminq_u16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) @@ -4290,7 +4290,7 @@ uint32x4_t test_vminq_u32(uint32x4_t a, uint32x4_t b) { return vminq_u32(a, b); // CIR-LABEL: vminq_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vminq_u32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) @@ -4304,7 +4304,7 @@ float64x2_t test_vminq_f64(float64x2_t a, float64x2_t b) { return vminq_f64(a, b); // CIR-LABEL: vminq_f64 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.fmin" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.fmin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vminq_f64(<2 x double>{{.*}}[[A:%.*]], <2 x double>{{.*}}[[B:%.*]]) @@ -5540,7 +5540,7 @@ int8x8_t test_vrshr_n_s8(int8x8_t a) { // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_s8(<8 x i8>{{.*}}[[A:%.*]]) @@ -5555,7 +5555,7 @@ uint8x8_t test_vrshr_n_u8(uint8x8_t a) { // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_u8(<8 x i8>{{.*}}[[A:%.*]]) @@ -5569,7 +5569,7 @@ int16x4_t test_vrshr_n_s16(int16x4_t a) { // CIR-LABEL: vrshr_n_s16 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_s16(<4 x i16>{{.*}}[[A:%.*]]) @@ -5585,7 +5585,7 @@ uint16x4_t test_vrshr_n_u16(uint16x4_t a) { // CIR-LABEL: vrshr_n_u16 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_u16(<4 x i16>{{.*}}[[A:%.*]]) @@ -5600,7 +5600,7 @@ int32x2_t test_vrshr_n_s32(int32x2_t a) { // CIR-LABEL: vrshr_n_s32 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_s32(<2 x i32>{{.*}}[[A:%.*]]) @@ -5615,7 +5615,7 @@ uint32x2_t test_vrshr_n_u32(uint32x2_t a) { // CIR-LABEL: vrshr_n_u32 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_u32(<2 x i32>{{.*}}[[A:%.*]]) @@ -5630,7 +5630,7 @@ int64x1_t test_vrshr_n_s64(int64x1_t a) { // CIR-LABEL: vrshr_n_s64 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_s64(<1 x i64>{{.*}}[[A:%.*]]) @@ -5645,7 +5645,7 @@ uint64x1_t test_vrshr_n_u64(uint64x1_t a) { // CIR-LABEL: vrshr_n_u64 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_u64(<1 x i64>{{.*}}[[A:%.*]]) @@ -5663,7 +5663,7 @@ int8x16_t test_vrshrq_n_s8(int8x16_t a) { // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_s8(<16 x i8>{{.*}}[[A:%.*]]) @@ -5679,7 +5679,7 @@ uint8x16_t test_vrshrq_n_u8(uint8x16_t a) { // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_u8(<16 x i8>{{.*}}[[A:%.*]]) @@ -5693,7 +5693,7 @@ int16x8_t test_vrshrq_n_s16(int16x8_t a) { // CIR-LABEL: vrshrq_n_s16 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_s16(<8 x i16>{{.*}}[[A:%.*]]) @@ -5709,7 +5709,7 @@ uint16x8_t test_vrshrq_n_u16(uint16x8_t a) { // CIR-LABEL: vrshrq_n_u16 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_u16(<8 x i16>{{.*}}[[A:%.*]]) @@ -5724,7 +5724,7 @@ int32x4_t test_vrshrq_n_s32(int32x4_t a) { // CIR-LABEL: vrshrq_n_s32 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i, #cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_s32(<4 x i32>{{.*}}[[A:%.*]]) @@ -5740,7 +5740,7 @@ uint32x4_t test_vrshrq_n_u32(uint32x4_t a) { // CIR-LABEL: vrshrq_n_u32 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i, // CIR-SAME: #cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_u32(<4 x i32>{{.*}}[[A:%.*]]) @@ -5755,7 +5755,7 @@ int64x2_t test_vrshrq_n_s64(int64x2_t a) { // CIR-LABEL: vrshrq_n_s64 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i, #cir.int<-3> : !s64i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_s64(<2 x i64>{{.*}}[[A:%.*]]) @@ -5770,7 +5770,7 @@ uint64x2_t test_vrshrq_n_u64(uint64x2_t a) { // CIR-LABEL: vrshrq_n_u64 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i, #cir.int<-3> : !s64i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_u64(<2 x i64>{{.*}}[[A:%.*]]) @@ -6286,7 +6286,7 @@ uint8x8_t test_vqshlu_n_s8(int8x8_t a) { // CIR-LABEL: vqshlu_n_s8 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, // CIR: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshlu_n_s8(<8 x i8>{{.*}}[[A:%.*]]) @@ -6300,7 +6300,7 @@ uint16x4_t test_vqshlu_n_s16(int16x4_t a) { // CIR-LABEL: vqshlu_n_s16 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, // CIR-SAME:#cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshlu_n_s16(<4 x i16>{{.*}}[[A:%.*]]) @@ -6315,7 +6315,7 @@ uint32x2_t test_vqshlu_n_s32(int32x2_t a) { // CIR-LABEL: vqshlu_n_s32 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshlu_n_s32(<2 x i32>{{.*}}[[A:%.*]]) @@ -6332,7 +6332,7 @@ uint8x16_t test_vqshluq_n_s8(int8x16_t a) { // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshluq_n_s8(<16 x i8>{{.*}}[[A:%.*]]) @@ -6347,7 +6347,7 @@ uint16x8_t test_vqshluq_n_s16(int16x8_t a) { // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshluq_n_s16(<8 x i16>{{.*}}[[A:%.*]]) @@ -6363,7 +6363,7 @@ uint32x4_t test_vqshluq_n_s32(int32x4_t a) { // CIR-LABEL: vqshluq_n_s32 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : !s32i, // CIR-SAME: #cir.int<3> : !s32i, #cir.int<3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshluq_n_s32(<4 x i32>{{.*}}[[A:%.*]]) @@ -6378,7 +6378,7 @@ uint64x2_t test_vqshluq_n_s64(int64x2_t a) { // CIR-LABEL: vqshluq_n_s64 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i, #cir.int<3> : !s64i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshluq_n_s64(<2 x i64>{{.*}}[[A:%.*]]) @@ -6638,7 +6638,7 @@ int32x2_t test_vrshrn_n_s64(int64x2_t a) { return vrshrn_n_s64(a, 19); // CIR-LABEL: vrshrn_n_s64 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vrshrn_n_s64(<2 x i64>{{.*}}[[A:%.*]]) @@ -6652,7 +6652,7 @@ uint8x8_t test_vrshrn_n_u16(uint16x8_t a) { return vrshrn_n_u16(a, 3); // CIR-LABEL: vrshrn_n_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vrshrn_n_u16(<8 x i16>{{.*}}[[A:%.*]]) @@ -6666,7 +6666,7 @@ uint16x4_t test_vrshrn_n_u32(uint32x4_t a) { return vrshrn_n_u32(a, 9); // CIR-LABEL: vrshrn_n_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}vrshrn_n_u32(<4 x i32>{{.*}}[[A:%.*]]) @@ -6680,7 +6680,7 @@ uint32x2_t test_vrshrn_n_u64(uint64x2_t a) { return vrshrn_n_u64(a, 19); // CIR-LABEL: vrshrn_n_u64 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vrshrn_n_u64(<2 x i64>{{.*}}[[A:%.*]]) @@ -6756,7 +6756,7 @@ uint8x8_t test_vqrshrun_n_s16(int16x8_t a) { // CIR-LABEL: test_vqrshrun_n_s16 // CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<3> : !s32i // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector // LLVM-LABEL: @test_vqrshrun_n_s16( @@ -6771,7 +6771,7 @@ uint16x4_t test_vqrshrun_n_s32(int32x4_t a) { // CIR-LABEL: test_vqrshrun_n_s32 // CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<9> : !s32i // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector // LLVM-LABEL: @test_vqrshrun_n_s32( @@ -6786,7 +6786,7 @@ uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { // CIR-LABEL: test_vqrshrun_n_s64 // CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<19> : !s32i // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector // LLVM-LABEL: @test_vqrshrun_n_s64( @@ -8728,7 +8728,7 @@ int16x8_t test_vmull_s8(int8x8_t a, int8x8_t b) { return vmull_s8(a, b); // CIR-LABEL: vmull_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) @@ -8740,7 +8740,7 @@ int32x4_t test_vmull_s16(int16x4_t a, int16x4_t b) { return vmull_s16(a, b); // CIR-LABEL: vmull_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) @@ -8754,7 +8754,7 @@ int64x2_t test_vmull_s32(int32x2_t a, int32x2_t b) { return vmull_s32(a, b); // CIR-LABEL: vmull_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.smull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) @@ -8768,7 +8768,7 @@ uint16x8_t test_vmull_u8(uint8x8_t a, uint8x8_t b) { return vmull_u8(a, b); // CIR-LABEL: vmull_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) @@ -8780,7 +8780,7 @@ uint32x4_t test_vmull_u16(uint16x4_t a, uint16x4_t b) { return vmull_u16(a, b); // CIR-LABEL: vmull_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) @@ -8794,7 +8794,7 @@ uint64x2_t test_vmull_u32(uint32x2_t a, uint32x2_t b) { return vmull_u32(a, b); // CIR-LABEL: vmull_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.umull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_u32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) @@ -9258,7 +9258,7 @@ poly16x8_t test_vmull_p8(poly8x8_t a, poly8x8_t b) { return vmull_p8(a, b); // CIR-LABEL: vmull_p8 - // CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.pmull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.pmull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_p8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) diff --git a/clang/test/CIR/CodeGen/builtin-arm-ldrex.c b/clang/test/CIR/CodeGen/builtin-arm-ldrex.c index 974a370185eb..ba9bbb171bb7 100644 --- a/clang/test/CIR/CodeGen/builtin-arm-ldrex.c +++ b/clang/test/CIR/CodeGen/builtin-arm-ldrex.c @@ -9,31 +9,31 @@ int test_ldrex(char *addr, long long *addr64, float *addrfloat) { // CIR-LABEL: @test_ldrex int sum = 0; sum += __builtin_arm_ldrex(addr); -// CIR: [[INTRES0:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i +// CIR: [[INTRES0:%.*]] = cir.llvm.intrinsic "aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i // CIR: [[CAST0:%.*]] = cir.cast(integral, [[INTRES0]] : !s64i), !s8i // CIR: [[CAST_I32:%.*]] = cir.cast(integral, [[CAST0]] : !s8i), !s32i sum += __builtin_arm_ldrex((short *)addr); -// CIR: [[INTRES1:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i +// CIR: [[INTRES1:%.*]] = cir.llvm.intrinsic "aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i // CIR: [[CAST1:%.*]] = cir.cast(integral, [[INTRES1]] : !s64i), !s16i // CIR: [[CAST_I16:%.*]] = cir.cast(integral, [[CAST1]] : !s16i), !s32i sum += __builtin_arm_ldrex((int *)addr); -// CIR: [[INTRES2:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i +// CIR: [[INTRES2:%.*]] = cir.llvm.intrinsic "aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i // CIR: [[CAST2:%.*]] = cir.cast(integral, [[INTRES2]] : !s64i), !s32i sum += __builtin_arm_ldrex((long long *)addr); -// CIR: [[INTRES3:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i +// CIR: [[INTRES3:%.*]] = cir.llvm.intrinsic "aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i sum += __builtin_arm_ldrex(addr64); -// CIR: [[INTRES4:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i +// CIR: [[INTRES4:%.*]] = cir.llvm.intrinsic "aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i sum += *__builtin_arm_ldrex((int **)addr); -// CIR: [[INTRES5:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr>) -> !s64i +// CIR: [[INTRES5:%.*]] = cir.llvm.intrinsic "aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr>) -> !s64i sum += __builtin_arm_ldrex((struct twoFldT **)addr)->a; -// CIR: [[INTRES6:%.*]] = cir.llvm.intrinsic "llvm.aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr>) -> !s64i +// CIR: [[INTRES6:%.*]] = cir.llvm.intrinsic "aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr>) -> !s64i // CIR: [[CAST3:%.*]] = cir.cast(int_to_ptr, [[INTRES6]] : !s64i), !cir.ptr // CIR: [[MEMBER_A:%.*]] = cir.get_member [[CAST3]][0] {name = "a"} : !cir.ptr -> !cir.ptr diff --git a/clang/test/CIR/IR/invalid-llvm-intrinsic.cir b/clang/test/CIR/IR/invalid-llvm-intrinsic.cir deleted file mode 100644 index 38b53a4e1b30..000000000000 --- a/clang/test/CIR/IR/invalid-llvm-intrinsic.cir +++ /dev/null @@ -1,11 +0,0 @@ -// Test attempt to construct ill-formed global annotations -// RUN: cir-opt %s -verify-diagnostics - -!s32i = !cir.int -!s64i = !cir.int -cir.func @foo() { - %a = cir.alloca !s32i, !cir.ptr, ["a"] {alignment = 4 : i64} - // expected-error @below {{'cir.llvm.intrinsic' op intrinsic name must start with 'llvm.'}} - %i = cir.llvm.intrinsic "ll.aarch64.ldxr" %a : (!cir.ptr) -> !s64i - cir.return -} From 299c54ba05b4aab137d5a16557d847c0fb51160c Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 6 Nov 2024 13:35:33 -0500 Subject: [PATCH 2032/2301] [CIR][CIRGen][Builtin] Support __builtin_char_memchr (#1050) This should fix NYI like `BI__builtin_char_memchr NYI UNREACHABLE executed at clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp:1402` The test is from [OG](https://github.com/llvm/clangir/blob/3ef67c19917ad26ed8b19d4d13a43458a952fddb/clang/test/CodeGenCXX/builtins.cpp#L64) see builtin's prototype [char *__builtin_char_memchr(const char *haystack, int needle, size_t size); ](https://clang.llvm.org/docs/LanguageExtensions.html#string-builtins) --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 11 ++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 48 +++++++++++++++---- clang/test/CIR/CodeGen/builtins.cpp | 19 ++++++++ 3 files changed, 68 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index e33c5bc03dbd..cbdb9bd09fc7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1423,7 +1423,16 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_memcpy_inline NYI"); case Builtin::BI__builtin_char_memchr: - llvm_unreachable("BI__builtin_char_memchr NYI"); + case Builtin::BI__builtin_memchr: { + Address srcPtr = buildPointerWithAlignment(E->getArg(0)); + mlir::Value src = + builder.createBitcast(srcPtr.getPointer(), builder.getVoidPtrTy()); + mlir::Value pattern = buildScalarExpr(E->getArg(1)); + mlir::Value len = buildScalarExpr(E->getArg(2)); + mlir::Value res = + builder.create(getLoc(E->getExprLoc()), src, pattern, len); + return RValue::get(res); + } case Builtin::BI__builtin___memcpy_chk: { // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index ea84cef8ed3e..899be15a2b39 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -734,6 +734,34 @@ class CIRMemCpyOpLowering return mlir::success(); } }; + +class CIRMemChrOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::MemChrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + llvm::SmallVector arguments; + const mlir::TypeConverter *converter = getTypeConverter(); + mlir::Type srcTy = converter->convertType(op.getSrc().getType()); + mlir::Type patternTy = converter->convertType(op.getPattern().getType()); + mlir::Type lenTy = converter->convertType(op.getLen().getType()); + auto fnTy = + mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {srcTy, patternTy, lenTy}, + /*isVarArg=*/false); + llvm::StringRef fnName = "memchr"; + getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); + rewriter.replaceOpWithNewOp( + op, mlir::TypeRange{llvmPtrTy}, fnName, + mlir::ValueRange{adaptor.getSrc(), adaptor.getPattern(), + adaptor.getLen()}); + return mlir::success(); + } +}; + class CIRMemMoveOpLowering : public mlir::OpConversionPattern { public: @@ -4234,6 +4262,7 @@ class CIRIsFPClassOpLowering return mlir::success(); } }; + class CIRAbsOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -4275,15 +4304,16 @@ void populateCIRToLLVMConversionPatterns( CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, - CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, - CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, - CIRVectorCmpOpLowering, CIRVectorSplatLowering, CIRVectorTernaryLowering, - CIRVectorShuffleIntsLowering, CIRVectorShuffleVecLowering, - CIRStackSaveLowering, CIRUnreachableLowering, CIRTrapLowering, - CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, - CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, - CIRCmpThreeWayOpLowering, CIRClearCacheOpLowering, CIREhTypeIdOpLowering, - CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, + CIRMemCpyOpLowering, CIRMemChrOpLowering, CIRFAbsOpLowering, + CIRExpectOpLowering, CIRVTableAddrPointOpLowering, + CIRVectorCreateLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, + CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, + CIRVectorShuffleVecLowering, CIRStackSaveLowering, CIRUnreachableLowering, + CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, + CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, + CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, + CIRClearCacheOpLowering, CIREhTypeIdOpLowering, CIRCatchParamOpLowering, + CIRResumeOpLowering, CIRAllocExceptionOpLowering, CIRFreeExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, CIRBaseClassAddrOpLowering, CIRDerivedClassAddrOpLowering, CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering, CIRAbsOpLowering, diff --git a/clang/test/CIR/CodeGen/builtins.cpp b/clang/test/CIR/CodeGen/builtins.cpp index fa7b51e88016..202ae61f0db2 100644 --- a/clang/test/CIR/CodeGen/builtins.cpp +++ b/clang/test/CIR/CodeGen/builtins.cpp @@ -56,3 +56,22 @@ int *test_std_addressof2() { // LLVM: [[RES:%.*]] = load ptr, ptr [[ADDR]], align 8 // LLVM: ret ptr [[RES]] } + +extern "C" char* test_memchr(const char arg[32]) { + return __builtin_char_memchr(arg, 123, 32); + + // CIR-LABEL: test_memchr + // CIR: [[PATTERN:%.*]] = cir.const #cir.int<123> : !s32i + // CIR: [[LEN:%.*]] = cir.const #cir.int<32> : !s32i + // CIR: [[LEN_U64:%.*]] = cir.cast(integral, [[LEN]] : !s32i), !u64i + // CIR: {{%.*}} = cir.libc.memchr({{%.*}}, [[PATTERN]], [[LEN_U64]]) + + // LLVM: {{.*}}@test_memchr(ptr{{.*}}[[ARG:%.*]]) + // LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8 + // LLVM: store ptr [[ARG]], ptr [[TMP0]], align 8 + // LLVM: [[SRC:%.*]] = load ptr, ptr [[TMP0]], align 8 + // LLVM: [[RES:%.*]] = call ptr @memchr(ptr [[SRC]], i32 123, i64 32) + // LLVM: store ptr [[RES]], ptr [[RET_P:%.*]], align 8 + // LLVM: [[RET:%.*]] = load ptr, ptr [[RET_P]], align 8 + // LLVM: ret ptr [[RET]] +} From ebe5c32d44a8eb52d5b45d0355b6d9a454205673 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Thu, 7 Nov 2024 02:36:05 +0800 Subject: [PATCH 2033/2301] [CIR] fix getTypeSizeInBits with fp80 and fp128 (#1058) fix https://github.com/llvm/clangir/issues/1057 --------- Co-authored-by: Bruno Cardoso Lopes Co-authored-by: Sirui Mu --- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index ae50f79fff30..b1e234e1dcdc 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -749,7 +749,10 @@ const llvm::fltSemantics &FP80Type::getFloatSemantics() const { llvm::TypeSize FP80Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, mlir::DataLayoutEntryListRef params) const { - return llvm::TypeSize::getFixed(16); + // The size of FP80Type should be 16 bytes, or 128 bits. The lower 80 bits + // take part in the value representation and the higher 48 bits are just + // paddings. + return llvm::TypeSize::getFixed(128); } uint64_t FP80Type::getABIAlignment(const mlir::DataLayout &dataLayout, @@ -770,7 +773,7 @@ const llvm::fltSemantics &FP128Type::getFloatSemantics() const { llvm::TypeSize FP128Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, mlir::DataLayoutEntryListRef params) const { - return llvm::TypeSize::getFixed(16); + return llvm::TypeSize::getFixed(128); } uint64_t FP128Type::getABIAlignment(const mlir::DataLayout &dataLayout, From a196a9de00996aa002d73405abfde9b0a8052d31 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 6 Nov 2024 21:38:16 +0300 Subject: [PATCH 2034/2301] [CIR][ABI][Lowering] covers return struct case with coercion through memory (#1059) This PR covers one more case for return values of struct type, where `memcpy` is emitted. --- .../TargetLowering/LowerFunction.cpp | 59 +++++++++++++------ .../AArch64/aarch64-cc-structs.c | 26 ++++++++ 2 files changed, 68 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 1b638411cc59..77aef630caf5 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -257,6 +257,23 @@ Value emitAddressAtOffset(LowerFunction &LF, Value addr, return addr; } +mlir::cir::AllocaOp findAlloca(Operation *op) { + if (!op) + return {}; + + if (auto al = dyn_cast(op)) { + return al; + } else if (auto ret = dyn_cast(op)) { + auto vals = ret.getInput(); + if (vals.size() == 1) + return findAlloca(vals[0].getDefiningOp()); + } else if (auto load = dyn_cast(op)) { + return findAlloca(load.getAddr().getDefiningOp()); + } + + return {}; +} + /// After the calling convention is lowered, an ABI-agnostic type might have to /// be loaded back to its ABI-aware couterpart so it may be returned. If they /// differ, we have to do a coerced load. A coerced load, which means to load a @@ -305,6 +322,31 @@ Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { return LF.getRewriter().create(Src.getLoc(), Cast); } + // Otherwise do coercion through memory. + if (auto addr = findAlloca(Src.getDefiningOp())) { + auto &rewriter = LF.getRewriter(); + auto *ctxt = LF.LM.getMLIRContext(); + auto ptrTy = PointerType::get(ctxt, Ty); + auto voidPtr = PointerType::get(ctxt, mlir::cir::VoidType::get(ctxt)); + + // insert alloca near the previuos one + auto point = rewriter.saveInsertionPoint(); + rewriter.setInsertionPointAfter(addr); + auto align = LF.LM.getDataLayout().getABITypeAlign(Ty); + auto alignAttr = rewriter.getI64IntegerAttr(align.value()); + auto tmp = + rewriter.create(Src.getLoc(), ptrTy, Ty, "tmp", alignAttr); + rewriter.restoreInsertionPoint(point); + + auto srcVoidPtr = createBitcast(addr, voidPtr, LF); + auto dstVoidPtr = createBitcast(tmp, voidPtr, LF); + auto i64Ty = IntType::get(ctxt, 64, false); + auto len = rewriter.create( + Src.getLoc(), IntAttr::get(i64Ty, SrcSize.getFixedValue())); + rewriter.create(Src.getLoc(), dstVoidPtr, srcVoidPtr, len); + return rewriter.create(Src.getLoc(), tmp.getResult()); + } + cir_cconv_unreachable("NYI"); } @@ -532,23 +574,6 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, return success(); } -mlir::cir::AllocaOp findAlloca(Operation *op) { - if (!op) - return {}; - - if (auto al = dyn_cast(op)) { - return al; - } else if (auto ret = dyn_cast(op)) { - auto vals = ret.getInput(); - if (vals.size() == 1) - return findAlloca(vals[0].getDefiningOp()); - } else if (auto load = dyn_cast(op)) { - return findAlloca(load.getAddr().getDefiningOp()); - } - - return {}; -} - LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { // NOTE(cir): no-return, naked, and no result functions should be handled in // CIRGen. diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index bc2f1d37d729..245e28d834b1 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -74,6 +74,32 @@ GT_128 ret_gt_128() { return x; } +typedef struct { + int a; + int b; + int c; +} S; + +// CHECK: cir.func {{.*@retS}}() -> !cir.array +// CHECK: %[[#V0:]] = cir.alloca !ty_S, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: %[[#V1:]] = cir.alloca !cir.array, !cir.ptr>, ["tmp"] {alignment = 8 : i64} +// CHECK: %[[#V2:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V3:]] = cir.cast(bitcast, %[[#V1]] : !cir.ptr>), !cir.ptr +// CHECK: %[[#V4:]] = cir.const #cir.int<12> : !u64i +// CHECK: cir.libc.memcpy %[[#V4]] bytes from %[[#V2]] to %[[#V3]] : !u64i, !cir.ptr -> !cir.ptr +// CHECK: %[[#V5:]] = cir.load %[[#V1]] : !cir.ptr>, !cir.array +// CHECK: cir.return %[[#V5]] : !cir.array + +// LLVM: [2 x i64] @retS() +// LLVM: %[[#V1:]] = alloca %struct.S, i64 1, align 4 +// LLVM: %[[#V2:]] = alloca [2 x i64], i64 1, align 8 +// LLVM: call void @llvm.memcpy.p0.p0.i64(ptr %[[#V2]], ptr %[[#V1]], i64 12, i1 false) +// LLVM: %[[#V3:]] = load [2 x i64], ptr %[[#V2]], align 8 +// LLVM: ret [2 x i64] %[[#V3]] +S retS() { + S s; + return s; +} // CHECK: cir.func {{.*@pass_lt_64}}(%arg0: !u64 // CHECK: %[[#V0:]] = cir.alloca !ty_LT_64_, !cir.ptr // CHECK: %[[#V1:]] = cir.cast(integral, %arg0 : !u64i), !u16i From aa07457f2ab1e74a6d623e8f527ebc6ea0e2853d Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 6 Nov 2024 13:38:45 -0500 Subject: [PATCH 2035/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vqrdmulh_v and neon_vqrdmulhq_v (#1063) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 5 + clang/test/CIR/CodeGen/AArch64/neon.c | 92 +++++++++++-------- 2 files changed, 61 insertions(+), 36 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index d3b1add144fc..fd465ff78a37 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2468,6 +2468,11 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( : "aarch64.neon.sqadd"; break; } + case NEON::BI__builtin_neon_vqrdmulh_v: + case NEON::BI__builtin_neon_vqrdmulhq_v: { + intrincsName = "aarch64.neon.sqrdmulh"; + break; + } case NEON::BI__builtin_neon_vqsub_v: case NEON::BI__builtin_neon_vqsubq_v: { intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.uqsub" diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 246bdfea61c5..231d2154cb9b 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -4854,45 +4854,65 @@ float64x2_t test_vminq_f64(float64x2_t a, float64x2_t b) { // return vqdmulhq_s32(a, b); // } -// NYI-LABEL: @test_vqrdmulh_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VQRDMULH_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: [[VQRDMULH_V3_I:%.*]] = bitcast <4 x i16> [[VQRDMULH_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VQRDMULH_V2_I]] -// int16x4_t test_vqrdmulh_s16(int16x4_t a, int16x4_t b) { -// return vqrdmulh_s16(a, b); -// } +int16x4_t test_vqrdmulh_s16(int16x4_t a, int16x4_t b) { + return vqrdmulh_s16(a, b); -// NYI-LABEL: @test_vqrdmulh_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VQRDMULH_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: [[VQRDMULH_V3_I:%.*]] = bitcast <2 x i32> [[VQRDMULH_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VQRDMULH_V2_I]] -// int32x2_t test_vqrdmulh_s32(int32x2_t a, int32x2_t b) { -// return vqrdmulh_s32(a, b); -// } + // CIR-LABEL: vqrdmulh_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrdmulh" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vqrdmulhq_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VQRDMULHQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %a, <8 x i16> %b) -// NYI: [[VQRDMULHQ_V3_I:%.*]] = bitcast <8 x i16> [[VQRDMULHQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VQRDMULHQ_V2_I]] -// int16x8_t test_vqrdmulhq_s16(int16x8_t a, int16x8_t b) { -// return vqrdmulhq_s16(a, b); -// } + // LLVM: {{.*}}test_vqrdmulh_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8> + // LLVM: [[VQRDMULH_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[A]], <4 x i16> [[B]]) + // LLVM: [[VQRDMULH_V3_I:%.*]] = bitcast <4 x i16> [[VQRDMULH_V2_I]] to <8 x i8> + // LLVM: ret <4 x i16> [[VQRDMULH_V2_I]] +} -// NYI-LABEL: @test_vqrdmulhq_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VQRDMULHQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %a, <4 x i32> %b) -// NYI: [[VQRDMULHQ_V3_I:%.*]] = bitcast <4 x i32> [[VQRDMULHQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VQRDMULHQ_V2_I]] -// int32x4_t test_vqrdmulhq_s32(int32x4_t a, int32x4_t b) { -// return vqrdmulhq_s32(a, b); -// } +int32x2_t test_vqrdmulh_s32(int32x2_t a, int32x2_t b) { + return vqrdmulh_s32(a, b); + + // CIR-LABEL: vqrdmulh_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrdmulh" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqrdmulh_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8> + // LLVM: [[VQRDMULH_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> [[A]], <2 x i32> [[B]]) + // LLVM: [[VQRDMULH_V3_I:%.*]] = bitcast <2 x i32> [[VQRDMULH_V2_I]] to <8 x i8> + // LLVM: ret <2 x i32> [[VQRDMULH_V2_I]] +} + +int16x8_t test_vqrdmulhq_s16(int16x8_t a, int16x8_t b) { + return vqrdmulhq_s16(a, b); + + // CIR-LABEL: vqrdmulhq_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrdmulh" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqrdmulhq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8> + // LLVM: [[VQRDMULHQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) + // LLVM: [[VQRDMULHQ_V3_I:%.*]] = bitcast <8 x i16> [[VQRDMULHQ_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VQRDMULHQ_V2_I]] +} + +int32x4_t test_vqrdmulhq_s32(int32x4_t a, int32x4_t b) { + return vqrdmulhq_s32(a, b); + + // CIR-LABEL: vqrdmulhq_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrdmulh" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqrdmulhq_s32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8> + // LLVM: [[VQRDMULHQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) + // LLVM: [[VQRDMULHQ_V3_I:%.*]] = bitcast <4 x i32> [[VQRDMULHQ_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VQRDMULHQ_V2_I]] +} // NYI-LABEL: @test_vmulx_f32( // NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> From 874102ef7f39ebec11abab464a62882ebd91bfa4 Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 6 Nov 2024 13:39:13 -0500 Subject: [PATCH 2036/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vpaddl and neon_vpaddlq (#1064) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 20 +++ clang/test/CIR/CodeGen/AArch64/neon-arith.c | 132 ++++++++++++++++++ 2 files changed, 152 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index fd465ff78a37..5586c40c8e48 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2172,6 +2172,16 @@ getSignChangedVectorType(CIRGenBuilderTy &builder, vecTy.getSize()); } +static mlir::cir::VectorType +getHalfEltSizeTwiceNumElemsVecType(CIRGenBuilderTy &builder, + mlir::cir::VectorType vecTy) { + auto elemTy = mlir::cast(vecTy.getEltType()); + elemTy = elemTy.isSigned() ? builder.getSIntNTy(elemTy.getWidth() / 2) + : builder.getUIntNTy(elemTy.getWidth() / 2); + return mlir::cir::VectorType::get(builder.getContext(), elemTy, + vecTy.getSize() * 2); +} + /// Get integer from a mlir::Value that is an int constant or a constant op. static int64_t getIntValueFromConstOp(mlir::Value val) { auto constOp = mlir::cast(val.getDefiningOp()); @@ -2350,6 +2360,16 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( // In CIR, integral cast op supports vector of int type truncating. return builder.createIntCast(ops[0], ty); } + case NEON::BI__builtin_neon_vpaddl_v: + case NEON::BI__builtin_neon_vpaddlq_v: { + // The source operand type has twice as many elements of half the size. + mlir::cir::VectorType narrowTy = + getHalfEltSizeTwiceNumElemsVecType(builder, vTy); + return buildNeonCall(builder, {narrowTy}, ops, + isUnsigned ? "aarch64.neon.uaddlp" + : "aarch64.neon.saddlp", + vTy, getLoc(e->getExprLoc())); + } case NEON::BI__builtin_neon_vext_v: case NEON::BI__builtin_neon_vextq_v: { int cv = getIntValueFromConstOp(ops[2]); diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index 5df338e81ab6..96125b44b3bb 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -607,3 +607,135 @@ uint64x2_t test_vqsubq_u64(uint64x2_t a, uint64x2_t b) { // LLVM: {{.*}}test_vqsubq_u64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) // LLVM: [[RES:%.*]] = call <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) // LLVM: ret <2 x i64> [[RES]] + +int16x4_t test_vpaddl_s8(int8x8_t a) { + return vpaddl_s8(a); + + // CIR-LABEL: vpaddl_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddl_s8(<8 x i8>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> [[A]]) + // LLVM: ret <4 x i16> [[VPADDL1_I]] +} + +int32x2_t test_vpaddl_s16(int16x4_t a) { + return vpaddl_s16(a); + + // CIR-LABEL: vpaddl_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddl_s16(<4 x i16>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> [[A]]) + // LLVM: ret <2 x i32> [[VPADDL1_I]] +} + +int64x1_t test_vpaddl_s32(int32x2_t a) { + return vpaddl_s32(a); + + // CIR-LABEL: vpaddl_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddl_s32(<2 x i32>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.saddlp.v1i64.v2i32(<2 x i32> [[A]]) + // LLVM: ret <1 x i64> [[VPADDL1_I]] +} + +uint16x4_t test_vpaddl_u8(uint8x8_t a) { + return vpaddl_u8(a); + + // CIR-LABEL: vpaddl_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddl_u8(<8 x i8>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> [[A]]) + // LLVM: ret <4 x i16> [[VPADDL1_I]] +} + +uint32x2_t test_vpaddl_u16(uint16x4_t a) { + return vpaddl_u16(a); + + // CIR-LABEL: vpaddl_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddl_u16(<4 x i16>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> [[A]]) + // LLVM: ret <2 x i32> [[VPADDL1_I]] +} + +uint64x1_t test_vpaddl_u32(uint32x2_t a) { + return vpaddl_u32(a); + + // CIR-LABEL: vpaddl_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddl_u32(<2 x i32>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> [[A]]) + // LLVM: ret <1 x i64> [[VPADDL1_I]] +} + +int16x8_t test_vpaddlq_s8(int8x16_t a) { + return vpaddlq_s8(a); + + // CIR-LABEL: vpaddlq_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddlq_s8(<16 x i8>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> [[A]]) + // LLVM: ret <8 x i16> [[VPADDL1_I]] +} + +int32x4_t test_vpaddlq_s16(int16x8_t a) { + return vpaddlq_s16(a); + + // CIR-LABEL: vpaddlq_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddlq_s16(<8 x i16>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> [[A]]) + // LLVM: ret <4 x i32> [[VPADDL1_I]] +} + +int64x2_t test_vpaddlq_s32(int32x4_t a) { + return vpaddlq_s32(a); + + // CIR-LABEL: vpaddlq_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddlq_s32(<4 x i32>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> [[A]]) + // LLVM: ret <2 x i64> [[VPADDL1_I]] +} + +uint16x8_t test_vpaddlq_u8(uint8x16_t a) { + return vpaddlq_u8(a); + + // CIR-LABEL: vpaddlq_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddlq_u8(<16 x i8>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> [[A]]) + // LLVM: ret <8 x i16> [[VPADDL1_I]] +} + +uint32x4_t test_vpaddlq_u16(uint16x8_t a) { + return vpaddlq_u16(a); + + // CIR-LABEL: vpaddlq_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddlq_u16(<8 x i16>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> [[A]]) + // LLVM: ret <4 x i32> [[VPADDL1_I]] +} + +uint64x2_t test_vpaddlq_u32(uint32x4_t a) { + return vpaddlq_u32(a); + + // CIR-LABEL: vpaddlq_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vpaddlq_u32(<4 x i32>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[VPADDL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> [[A]]) + // LLVM: ret <2 x i64> [[VPADDL1_I]] +} From cd152c20e629ae7be68426033dc5955c98210ab9 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 7 Nov 2024 02:41:31 +0800 Subject: [PATCH 2037/2301] [CIR][Lowering] Add LLVM lowering support for cir.assume (#1066) This PR adds LLVMIR lowering support for `cir.assume`, `cir.assume.aligned`, and `cir.assume.separate_storage`. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 61 +++++++++++++++++++ clang/test/CIR/CodeGen/builtin-assume.cpp | 21 +++++++ 2 files changed, 82 insertions(+) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 899be15a2b39..98d8e4de46d4 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3039,6 +3039,66 @@ class CIRIntrinsicCallLowering } }; +class CIRAssumeLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AssumeOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto cond = rewriter.create( + op.getLoc(), rewriter.getI1Type(), adaptor.getPredicate()); + rewriter.replaceOpWithNewOp(op, cond); + return mlir::success(); + } +}; + +class CIRAssumeAlignedLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AssumeAlignedOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + SmallVector opBundleArgs{adaptor.getPointer()}; + + auto alignment = rewriter.create( + op.getLoc(), rewriter.getI64Type(), op.getAlignment()); + opBundleArgs.push_back(alignment); + + if (mlir::Value offset = adaptor.getOffset()) + opBundleArgs.push_back(offset); + + auto cond = rewriter.create( + op.getLoc(), rewriter.getI1Type(), 1); + rewriter.create(op.getLoc(), cond, "align", + opBundleArgs); + rewriter.replaceAllUsesWith(op, op.getPointer()); + rewriter.eraseOp(op); + + return mlir::success(); + } +}; + +class CIRAssumeSepStorageLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::AssumeSepStorageOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto cond = rewriter.create( + op.getLoc(), rewriter.getI1Type(), 1); + rewriter.replaceOpWithNewOp( + op, cond, "separate_storage", + mlir::ValueRange{adaptor.getPtr1(), adaptor.getPtr2()}); + return mlir::success(); + } +}; + static mlir::Value createLLVMBitOp(mlir::Location loc, const llvm::Twine &llvmIntrinBaseName, mlir::Type resultTy, mlir::Value operand, @@ -4315,6 +4375,7 @@ void populateCIRToLLVMConversionPatterns( CIRClearCacheOpLowering, CIREhTypeIdOpLowering, CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, CIRFreeExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, + CIRAssumeLowering, CIRAssumeAlignedLowering, CIRAssumeSepStorageLowering, CIRBaseClassAddrOpLowering, CIRDerivedClassAddrOpLowering, CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering, CIRAbsOpLowering, CIRMemMoveOpLowering, CIRMemsetOpLowering diff --git a/clang/test/CIR/CodeGen/builtin-assume.cpp b/clang/test/CIR/CodeGen/builtin-assume.cpp index 6776dde7c26f..88e8ad11565e 100644 --- a/clang/test/CIR/CodeGen/builtin-assume.cpp +++ b/clang/test/CIR/CodeGen/builtin-assume.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck %s --check-prefix=LLVM --input-file=%t.ll int test_assume(int x) { __builtin_assume(x > 0); @@ -13,6 +15,10 @@ int test_assume(int x) { // CIR-NEXT: cir.assume %[[#cond]] : !cir.bool // CIR: } +// LLVM: @_Z11test_assumei +// LLVM: %[[#cond:]] = trunc i8 %{{.+}} to i1 +// LLVM-NEXT: call void @llvm.assume(i1 %[[#cond]]) + int test_assume_aligned(int *ptr) { int *aligned = (int *)__builtin_assume_aligned(ptr, 8); return *aligned; @@ -26,6 +32,11 @@ int test_assume_aligned(int *ptr) { // CIR-NEXT: %{{.+}} = cir.load %[[#aligned2]] : !cir.ptr, !s32i // CIR: } +// LLVM: @_Z19test_assume_alignedPi +// LLVM: %[[#ptr:]] = load ptr, ptr %{{.+}}, align 8 +// LLVM-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr %[[#ptr]], i64 8) ] +// LLVM-NEXT: store ptr %[[#ptr]], ptr %{{.+}}, align 8 + int test_assume_aligned_offset(int *ptr) { int *aligned = (int *)__builtin_assume_aligned(ptr, 8, 4); return *aligned; @@ -41,6 +52,11 @@ int test_assume_aligned_offset(int *ptr) { // CIR-NEXT: %{{.+}} = cir.load %[[#aligned2]] : !cir.ptr, !s32i // CIR: } +// LLVM: @_Z26test_assume_aligned_offsetPi +// LLVM: %[[#ptr:]] = load ptr, ptr %{{.+}}, align 8 +// LLVM-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr %[[#ptr]], i64 8, i64 4) ] +// LLVM-NEXT: store ptr %[[#ptr]], ptr %{{.+}}, align 8 + int test_separate_storage(int *p1, int *p2) { __builtin_assume_separate_storage(p1, p2); return *p1 + *p2; @@ -53,3 +69,8 @@ int test_separate_storage(int *p1, int *p2) { // CIR-NEXT: %[[#p2_voidptr:]] = cir.cast(bitcast, %[[#p2]] : !cir.ptr), !cir.ptr // CIR-NEXT: cir.assume.separate_storage %[[#p1_voidptr]], %[[#p2_voidptr]] : !cir.ptr // CIR: } + +// LLVM: @_Z21test_separate_storagePiS_ +// LLVM: %[[#ptr1:]] = load ptr, ptr %{{.+}}, align 8 +// LLVM-NEXT: %[[#ptr2:]] = load ptr, ptr %{{.+}}, align 8 +// LLVM-NEXT: call void @llvm.assume(i1 true) [ "separate_storage"(ptr %[[#ptr1]], ptr %[[#ptr2]]) ] From c73d5efdf760c0c761da43316ebdcf3f8a7ea73f Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 7 Nov 2024 02:42:25 +0800 Subject: [PATCH 2038/2301] [CIR][CodeGen] Fix the default linkage of string literals (#1067) In OG CodeGen, string literals has `private` linkage as default (marked by `cir_private` in CIR assembly). But CIR uses `internal`, which is probably an ancient typo. This PR keeps align with it and thus modifies the existing test files. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/test/CIR/CodeGen/array.cpp | 2 +- clang/test/CIR/CodeGen/globals.cpp | 4 ++-- clang/test/CIR/CodeGen/hello.c | 2 +- clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp | 8 ++++---- clang/test/CIR/CodeGen/wide-string.cpp | 8 ++++---- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 4bb3ffb03680..6881003c4604 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1551,7 +1551,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, !getLangOpts().WritableStrings) { assert(0 && "not implemented"); } else { - LT = mlir::cir::GlobalLinkageKind::InternalLinkage; + LT = mlir::cir::GlobalLinkageKind::PrivateLinkage; GlobalVariableName = Name; } diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index b0807755cfec..4504b58c7078 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -40,7 +40,7 @@ void local_stringlit() { const char *s = "whatnow"; } -// CHECK: cir.global "private" constant internal dsolocal @".str" = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global "private" constant cir_private dsolocal @".str" = #cir.const_array<"whatnow\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.func @_Z15local_stringlitv() // CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.get_global @".str" : !cir.ptr> diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 4df6dface2c6..19edbbf22491 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -49,10 +49,10 @@ int use_func() { return func(); } // CHECK-NEXT: cir.global external @rgb = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<233> : !u8i, #cir.int<33> : !u8i]> : !cir.array // CHECK-NEXT: cir.global external @alpha = #cir.const_array<"abc\00" : !cir.array> : !cir.array -// CHECK-NEXT: cir.global "private" constant internal dsolocal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global "private" constant cir_private dsolocal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK-NEXT: cir.global external @s = #cir.global_view<@".str"> : !cir.ptr -// CHECK-NEXT: cir.global "private" constant internal dsolocal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global "private" constant cir_private dsolocal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK-NEXT: cir.global external @s1 = #cir.global_view<@".str1"> : !cir.ptr // CHECK-NEXT: cir.global external @s2 = #cir.global_view<@".str"> : !cir.ptr diff --git a/clang/test/CIR/CodeGen/hello.c b/clang/test/CIR/CodeGen/hello.c index 3eff7227943c..8fb49131784c 100644 --- a/clang/test/CIR/CodeGen/hello.c +++ b/clang/test/CIR/CodeGen/hello.c @@ -8,7 +8,7 @@ int main (void) { } // CHECK: cir.func private @printf(!cir.ptr, ...) -> !s32i -// CHECK: cir.global "private" constant internal dsolocal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global "private" constant cir_private dsolocal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.func @main() -> !s32i // CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK: %1 = cir.get_global @printf : !cir.ptr, ...)>> diff --git a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp index 37b9a680f80a..e245d24122cf 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp @@ -21,8 +21,8 @@ void test() { // CIR: cir.store %arg0, [[LOCAL]] : [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]> // CIR: cir.return -// CIR: cir.global "private" constant internal dsolocal [[STR_XY:@.*]] = #cir.const_array<"xy\00" : !cir.array> : !cir.array -// CIR: cir.global "private" constant internal dsolocal [[STR_UV:@.*]] = #cir.const_array<"uv\00" : !cir.array> : !cir.array +// CIR: cir.global "private" constant cir_private dsolocal [[STR_XY:@.*]] = #cir.const_array<"xy\00" : !cir.array> : !cir.array +// CIR: cir.global "private" constant cir_private dsolocal [[STR_UV:@.*]] = #cir.const_array<"uv\00" : !cir.array> : !cir.array // CIR: cir.func @_ZSt4testv() // CIR: cir.scope { @@ -53,8 +53,8 @@ void test() { // LLVM: %"class.std::initializer_list" = type { ptr, ptr } -// LLVM: @.str = internal constant [3 x i8] c"xy\00" -// LLVM: @.str1 = internal constant [3 x i8] c"uv\00" +// LLVM: @.str = private constant [3 x i8] c"xy\00" +// LLVM: @.str1 = private constant [3 x i8] c"uv\00" // LLVM: define linkonce_odr void @_ZSt1fIPKcEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG0:%.*]]) // LLVM: [[LOCAL_PTR:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, diff --git a/clang/test/CIR/CodeGen/wide-string.cpp b/clang/test/CIR/CodeGen/wide-string.cpp index b02380041ce1..3e3292510d9f 100644 --- a/clang/test/CIR/CodeGen/wide-string.cpp +++ b/clang/test/CIR/CodeGen/wide-string.cpp @@ -5,22 +5,22 @@ const char16_t *test_utf16() { return u"你好世界"; } -// CHECK: cir.global "private" constant internal dsolocal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u16i, #cir.int<22909> : !u16i, #cir.int<19990> : !u16i, #cir.int<30028> : !u16i, #cir.int<0> : !u16i]> : !cir.array +// CHECK: cir.global "private" constant cir_private dsolocal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u16i, #cir.int<22909> : !u16i, #cir.int<19990> : !u16i, #cir.int<30028> : !u16i, #cir.int<0> : !u16i]> : !cir.array const char32_t *test_utf32() { return U"你好世界"; } -// CHECK: cir.global "private" constant internal dsolocal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u32i, #cir.int<22909> : !u32i, #cir.int<19990> : !u32i, #cir.int<30028> : !u32i, #cir.int<0> : !u32i]> : !cir.array +// CHECK: cir.global "private" constant cir_private dsolocal @{{.+}} = #cir.const_array<[#cir.int<20320> : !u32i, #cir.int<22909> : !u32i, #cir.int<19990> : !u32i, #cir.int<30028> : !u32i, #cir.int<0> : !u32i]> : !cir.array const char16_t *test_zero16() { return u"\0\0\0\0"; } -// CHECK: cir.global "private" constant internal dsolocal @{{.+}} = #cir.zero : !cir.array +// CHECK: cir.global "private" constant cir_private dsolocal @{{.+}} = #cir.zero : !cir.array const char32_t *test_zero32() { return U"\0\0\0\0"; } -// CHECK: cir.global "private" constant internal dsolocal @{{.+}} = #cir.zero : !cir.array +// CHECK: cir.global "private" constant cir_private dsolocal @{{.+}} = #cir.zero : !cir.array From f0f29cb0e14437a68d9ef74606bc5cee328df500 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 6 Nov 2024 21:51:20 +0300 Subject: [PATCH 2039/2301] [CIR][ABI][AArch64][Lowering] support for passing struct types > 128 bits (#1068) This PR adds a partial support for so-called indirect function arguments for struct types with size > 128 bits for aarch64. #### Couple words about the implementation The hard part is that it's not one-to-one copy from the original codegen, but the code is inspired by it of course. In the original codegen there is no much job is done for the indirect arguments inside the loop in the `EmitFunctionProlog`, and additional alloca is added in the end, in the call for `EmitParamDecl` function. In our case, for the indirect argument (which is a pointer) we replace the original alloca with a new one, and store the pointer in there. And replace all the uses of the old alloca with the load from the new one, i.e. in both cases users works with the pointer to a structure. Also, I added several missed features in the `constructAttributeList` for indirect arguments, but didn't preserve the original code structure, so let me know if I need to do it. --- clang/include/clang/CIR/ABIArgInfo.h | 10 +++++ clang/include/clang/CIR/MissingFeatures.h | 3 ++ .../Transforms/TargetLowering/LowerCall.cpp | 8 ++++ .../TargetLowering/LowerFunction.cpp | 45 +++++++++++++++++++ .../Transforms/TargetLowering/LowerTypes.cpp | 6 +++ .../TargetLowering/Targets/AArch64.cpp | 2 +- .../AArch64/aarch64-cc-structs.c | 11 +++++ 7 files changed, 84 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/ABIArgInfo.h b/clang/include/clang/CIR/ABIArgInfo.h index 582feae157b2..818d3b62f13f 100644 --- a/clang/include/clang/CIR/ABIArgInfo.h +++ b/clang/include/clang/CIR/ABIArgInfo.h @@ -215,11 +215,21 @@ class ABIArgInfo { IndirectAttr.Align = align; } + bool getIndirectByVal() const { + assert(isIndirect() && "Invalid kind!"); + return IndirectByVal; + } + void setIndirectByVal(bool IBV) { assert(isIndirect() && "Invalid kind!"); IndirectByVal = IBV; } + bool getIndirectRealign() const { + assert((isIndirect() || isIndirectAliased()) && "Invalid kind!"); + return IndirectRealign; + } + void setIndirectRealign(bool IR) { assert((isIndirect() || isIndirectAliased()) && "Invalid kind!"); IndirectRealign = IR; diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 9b1eb419f3e0..412ac4385f4b 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -269,6 +269,9 @@ struct MissingFeatures { static bool ABIPointerParameterAttrs() { return false; } static bool ABITransparentUnionHandling() { return false; } static bool ABIPotentialArgAccess() { return false; } + static bool ABIByValAttribute() { return false; } + static bool ABIAlignmentAttribute() { return false; } + static bool ABINoAliasAttribute() { return false; } //-- Missing AST queries diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index 54fe89838e82..23c6c85a9723 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -226,6 +226,14 @@ void LowerModule::constructAttributeList(StringRef Name, // Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); cir_cconv_assert(!::cir::MissingFeatures::noFPClass()); break; + case ABIArgInfo::Indirect: { + cir_cconv_assert(!::cir::MissingFeatures::ABIInRegAttribute()); + cir_cconv_assert(!::cir::MissingFeatures::ABIByValAttribute()); + cir_cconv_assert(!::cir::MissingFeatures::ABINoAliasAttribute()); + cir_cconv_assert(!::cir::MissingFeatures::ABIAlignmentAttribute()); + cir_cconv_assert(!::cir::MissingFeatures::ABIPotentialArgAccess()); + break; + } default: cir_cconv_unreachable("Missing ABIArgInfo::Kind"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 77aef630caf5..69bb78283610 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -558,6 +558,51 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, rewriter.eraseOp(argAlloca.getDefiningOp()); break; } + case ABIArgInfo::Indirect: { + auto AI = Fn.getArgument(FirstIRArg); + if (!hasScalarEvaluationKind(Ty)) { + // Aggregates and complex variables are accessed by reference. All we + // need to do is realign the value, if requested. Also, if the address + // may be aliased, copy it to ensure that the parameter variable is + // mutable and has a unique adress, as C requires. + if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { + cir_cconv_unreachable("NYI"); + } else { + // Inspired by EmitParamDecl, which is called in the end of + // EmitFunctionProlog in the original codegen + cir_cconv_assert(!ArgI.getIndirectByVal() && + "For truly ABI indirect arguments"); + + auto ptrTy = rewriter.getType(Arg.getType()); + Value arg = SrcFn.getArgument(ArgNo); + cir_cconv_assert(arg.hasOneUse()); + auto *firstStore = *arg.user_begin(); + auto argAlloca = cast(firstStore).getAddr(); + + rewriter.setInsertionPoint(argAlloca.getDefiningOp()); + auto align = LM.getDataLayout().getABITypeAlign(ptrTy); + auto alignAttr = rewriter.getI64IntegerAttr(align.value()); + auto newAlloca = rewriter.create( + Fn.getLoc(), rewriter.getType(ptrTy), ptrTy, + /*name=*/StringRef(""), + /*alignment=*/alignAttr); + + rewriter.create(newAlloca.getLoc(), AI, + newAlloca.getResult()); + auto load = rewriter.create(newAlloca.getLoc(), + newAlloca.getResult()); + + rewriter.replaceAllUsesWith(argAlloca, load); + rewriter.eraseOp(firstStore); + rewriter.eraseOp(argAlloca.getDefiningOp()); + + ArgVals.push_back(AI); + } + } else { + cir_cconv_unreachable("NYI"); + } + break; + } default: cir_cconv_unreachable("Unhandled ABIArgInfo::Kind"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index 8ed553a8f7d2..5a3382dc40e6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -99,6 +99,12 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { } break; } + case ABIArgInfo::Indirect: { + mlir::Type argType = (FI.arg_begin() + ArgNo)->type; + ArgTypes[FirstIRArg] = + mlir::cir::PointerType::get(getMLIRContext(), argType); + break; + } default: cir_cconv_unreachable("Missing ABIArgInfo::Kind"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index e28766985995..74de795ed622 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -188,7 +188,7 @@ AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, return ABIArgInfo::getDirect(argTy); } - cir_cconv_unreachable("NYI"); + return getNaturalAlignIndirect(Ty, /*ByVal=*/false); } std::unique_ptr diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index 245e28d834b1..eb1899840713 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -141,3 +141,14 @@ void pass_lt_128(LT_128 s) {} // LLVM: %[[#V1]] = alloca %struct.EQ_128, i64 1, align 4 // LLVM: store [2 x i64] %0, ptr %[[#V1]], align 8 void pass_eq_128(EQ_128 s) {} + +// CHECK: cir.func @pass_gt_128(%arg0: !cir.ptr +// CHECK: %[[#V0:]] = cir.alloca !cir.ptr, !cir.ptr>, [""] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %[[#V0]] : !cir.ptr, !cir.ptr> +// CHECK: %[[#V1:]] = cir.load %[[#V0]] : !cir.ptr>, !cir.ptr + +// LLVM: void @pass_gt_128(ptr %0) +// LLVM: %[[#V1:]] = alloca ptr, i64 1, align 8 +// LLVM: store ptr %0, ptr %[[#V1]], align 8 +// LLVM: %[[#V2:]] = load ptr, ptr %[[#V1]], align 8 +void pass_gt_128(GT_128 s) {} From 056fe30b1d8c2d11a7f8c2de4dabac471f0522b0 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 4 Nov 2024 15:54:43 -0800 Subject: [PATCH 2040/2301] [CIR][CIRGen] Add aliases for virtual dtors calls Note that we lack two pieces of support for aliases in LLVM IR dialect globals: the `alias` keyword and function types `void (ptr)`, this needs to be done before we can nail this for good, but it's outside the scope of this commit. The behavior is slightly different under -O1, which will be addressed next. --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 31 ++++++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 5 +- clang/test/CIR/CodeGen/ctor-alias.cpp | 2 +- .../CIR/CodeGen/virtual-destructor-calls.cpp | 81 +++++++++++++++++++ 5 files changed, 117 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/CodeGen/virtual-destructor-calls.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 52399e8e128c..aee027113ddf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -123,13 +123,13 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { // Check if we have it already. StringRef MangledName = getMangledName(AliasDecl); auto Entry = getGlobalValue(MangledName); - auto globalValue = dyn_cast(Entry); + auto globalValue = + dyn_cast_or_null(Entry); if (Entry && globalValue && !globalValue.isDeclaration()) return false; if (Replacements.count(MangledName)) return false; - assert(globalValue && "only knows how to handle GlobalValue"); [[maybe_unused]] auto AliasValueType = getTypes().GetFunctionType(AliasDecl); // Find the referent. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 860c81cf2a9b..2aa2f283ab71 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -258,7 +258,8 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( DevirtualizedMethod ? DevirtualizedMethod : MD; const CIRGenFunctionInfo *FInfo = nullptr; if (const auto *Dtor = dyn_cast(CalleeDecl)) - llvm_unreachable("NYI"); + FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration( + GlobalDecl(Dtor, Dtor_Complete)); else FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); @@ -295,7 +296,33 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( bool useVirtualCall = CanUseVirtualCall && !DevirtualizedMethod; if (const auto *dtor = dyn_cast(CalleeDecl)) { - llvm_unreachable("NYI"); + assert(CE->arg_begin() == CE->arg_end() && + "Destructor shouldn't have explicit parameters"); + assert(ReturnValue.isNull() && "Destructor shouldn't have return value"); + if (useVirtualCall) { + llvm_unreachable("NYI"); + } else { + GlobalDecl globalDecl(dtor, Dtor_Complete); + CIRGenCallee Callee; + if (getLangOpts().AppleKext && dtor->isVirtual() && HasQualifier) + llvm_unreachable("NYI"); + else if (!DevirtualizedMethod) + Callee = CIRGenCallee::forDirect( + CGM.getAddrOfCXXStructor(globalDecl, FInfo, Ty), globalDecl); + else { + Callee = CIRGenCallee::forDirect(CGM.GetAddrOfFunction(globalDecl, Ty), + globalDecl); + } + + QualType thisTy = + IsArrow ? Base->getType()->getPointeeType() : Base->getType(); + // CIRGen does not pass CallOrInvoke here (different from OG LLVM codegen) + // because in practice it always null even in OG. + buildCXXDestructorCall(globalDecl, Callee, This.getPointer(), thisTy, + /*ImplicitParam=*/nullptr, + /*ImplicitParamTy=*/QualType(), CE); + } + return RValue::get(nullptr); } // FIXME: Uses of 'MD' past this point need to be audited. We may need to use diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 6881003c4604..be6b11d0b367 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2144,8 +2144,11 @@ void CIRGenModule::buildAliasForGlobal(StringRef mangledName, mangledName, aliasee.getFunctionType(), aliasFD); alias.setAliasee(aliasee.getName()); alias.setLinkage(linkage); + // Declarations cannot have public MLIR visibility, just mark them private + // but this really should have no meaning since CIR should not be using + // this information to derive linkage information. mlir::SymbolTable::setSymbolVisibility( - alias, getMLIRVisibilityFromCIRLinkage(linkage)); + alias, mlir::SymbolTable::Visibility::Private); // Alias constructors and destructors are always unnamed_addr. assert(!MissingFeatures::unnamedAddr()); diff --git a/clang/test/CIR/CodeGen/ctor-alias.cpp b/clang/test/CIR/CodeGen/ctor-alias.cpp index 3739ecef1cce..8cca664a5b3c 100644 --- a/clang/test/CIR/CodeGen/ctor-alias.cpp +++ b/clang/test/CIR/CodeGen/ctor-alias.cpp @@ -37,4 +37,4 @@ B::B() { // CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: cir.return // CHECK: } -// CHECK: cir.func @_ZN1BC1Ev(!cir.ptr) alias(@_ZN1BC2Ev) \ No newline at end of file +// CHECK: cir.func private @_ZN1BC1Ev(!cir.ptr) alias(@_ZN1BC2Ev) \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp new file mode 100644 index 000000000000..79d4d0b96ffc --- /dev/null +++ b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp @@ -0,0 +1,81 @@ + +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -std=c++20 -mconstructor-aliases -O0 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -std=c++20 -mconstructor-aliases -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// PREV: %clang_cc1 -emit-llvm %s -o - -triple=x86_64-apple-darwin10 -mconstructor-aliases -O1 -disable-llvm-passes | FileCheck %s + +struct Member { + ~Member(); +}; + +struct A { + virtual ~A(); +}; + +struct B : A { + Member m; + virtual ~B(); +}; + +// Base dtor: actually calls A's base dtor. +// CIR: cir.func @_ZN1BD2Ev +// CIR: cir.call @_ZN6MemberD1Ev +// CIR: cir.call @_ZN1AD2Ev +// LLVM: define{{.*}} void @_ZN1BD2Ev(ptr +// LLVM: call void @_ZN6MemberD1Ev +// LLVM: call void @_ZN1AD2Ev + +// Complete dtor: just an alias because there are no virtual bases. +// CIR: cir.func private @_ZN1BD1Ev(!cir.ptr) alias(@_ZN1BD2Ev) +// FIXME: LLVM output should be: @_ZN1BD1Ev ={{.*}} unnamed_addr alias {{.*}} @_ZN1BD2Ev +// LLVM: declare {{.*}} dso_local void @_ZN1BD1Ev(ptr) + +// Deleting dtor: defers to the complete dtor. +// LLVM: define{{.*}} void @_ZN1BD0Ev(ptr +// LLVM: call void @_ZN1BD1Ev +// LLVM: call void @_ZdlPv + +// (aliases from C) +// FIXME: this should be an alias declaration. +// CIR: cir.func @_ZN1CD2Ev(%arg0: !cir.ptr{{.*}})) {{.*}} { +// CIR: cir.func private @_ZN1CD1Ev(!cir.ptr) alias(@_ZN1CD2Ev) + +// FIXME: LLVM output should be: @_ZN1CD2Ev ={{.*}} unnamed_addr alias {{.*}} @_ZN1BD2Ev +// LLVM: define dso_local void @_ZN1CD2Ev(ptr +// FIXME: LLVM output should be: @_ZN1CD1Ev ={{.*}} unnamed_addr alias {{.*}} @_ZN1CD2Ev +// LLVM: declare {{.*}} dso_local void @_ZN1CD1Ev(ptr) + +B::~B() { } + +struct C : B { + ~C(); +}; + +C::~C() { } + +// Complete dtor: just an alias (checked above). + +// Deleting dtor: defers to the complete dtor. +// CIR: cir.func @_ZN1CD0Ev +// CIR: cir.call @_ZN1CD1Ev +// CIR: cir.call @_ZdlPvm +// LLVM: define{{.*}} void @_ZN1CD0Ev(ptr +// LLVM: call void @_ZN1CD1Ev +// LLVM: call void @_ZdlPv + +// Base dtor: just an alias to B's base dtor. + +namespace PR12798 { + // A qualified call to a base class destructor should not undergo virtual + // dispatch. Template instantiation used to lose the qualifier. + struct A { virtual ~A(); }; + template void f(T *p) { p->A::~A(); } + + // CIR: cir.func weak_odr @_ZN7PR127981fINS_1AEEEvPT_ + // CIR: cir.call @_ZN7PR127981AD1Ev + // LLVM: define {{.*}} @_ZN7PR127981fINS_1AEEEvPT_( + // LLVM: call void @_ZN7PR127981AD1Ev( + template void f(A*); +} From 13be7319a101a2022dfdfafdd6d4676bad5a62a4 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 6 Nov 2024 15:30:53 -0800 Subject: [PATCH 2041/2301] [CIR][NFC] Fix unused warning --- .../CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index 74de795ed622..e1e6d098965a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -162,7 +162,6 @@ AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, } uint64_t Size = getContext().getTypeSize(Ty); - const Type Base = nullptr; // Aggregates <= 16 bytes are passed directly in registers or on the stack. if (Size <= 128) { From d8e1ad3a5c7029efe6d9dd4467e6bb797043937d Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 8 Nov 2024 02:56:23 +0800 Subject: [PATCH 2042/2301] [CIR][CodeGen] Use the same SSA name as OG's for string literals (#1073) This PR changes the naming format of string literals from `.str1` to `.str.1`, making it easier to reuse the existing testcases of OG CodeGen. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/test/CIR/CodeGen/dtors-scopes.cpp | 2 +- clang/test/CIR/CodeGen/globals.cpp | 4 ++-- clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp | 4 ++-- clang/test/CIR/CodeGen/predefined.cpp | 4 ++-- clang/test/CIR/IR/global.cir | 8 ++++---- clang/test/CIR/IR/invalid.cir | 2 +- clang/test/CIR/Lowering/globals.cir | 4 ++-- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index be6b11d0b367..7d5fee92adc0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1536,7 +1536,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, SmallString<256> StringNameBuffer = Name; llvm::raw_svector_ostream Out(StringNameBuffer); if (StringLiteralCnt) - Out << StringLiteralCnt; + Out << '.' << StringLiteralCnt; Name = Out.str(); StringLiteralCnt++; diff --git a/clang/test/CIR/CodeGen/dtors-scopes.cpp b/clang/test/CIR/CodeGen/dtors-scopes.cpp index 6d363f0254bf..c9bdb1dd2da8 100644 --- a/clang/test/CIR/CodeGen/dtors-scopes.cpp +++ b/clang/test/CIR/CodeGen/dtors-scopes.cpp @@ -24,7 +24,7 @@ void dtor1() { // DTOR_BODY: cir.func linkonce_odr @_ZN1CD2Ev{{.*}}{ // DTOR_BODY: %2 = cir.get_global @printf -// DTOR_BODY: %3 = cir.get_global @".str2" +// DTOR_BODY: %3 = cir.get_global @".str.2" // DTOR_BODY: %4 = cir.cast(array_to_ptrdecay, %3 // DTOR_BODY: %5 = cir.call @printf(%4) // DTOR_BODY: cir.return diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index 19edbbf22491..ca8161b1cb8f 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -52,8 +52,8 @@ int use_func() { return func(); } // CHECK-NEXT: cir.global "private" constant cir_private dsolocal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK-NEXT: cir.global external @s = #cir.global_view<@".str"> : !cir.ptr -// CHECK-NEXT: cir.global "private" constant cir_private dsolocal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK-NEXT: cir.global external @s1 = #cir.global_view<@".str1"> : !cir.ptr +// CHECK-NEXT: cir.global "private" constant cir_private dsolocal @".str.1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK-NEXT: cir.global external @s1 = #cir.global_view<@".str.1"> : !cir.ptr // CHECK-NEXT: cir.global external @s2 = #cir.global_view<@".str"> : !cir.ptr diff --git a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp index e245d24122cf..6808c5a89e33 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp @@ -54,7 +54,7 @@ void test() { // LLVM: %"class.std::initializer_list" = type { ptr, ptr } // LLVM: @.str = private constant [3 x i8] c"xy\00" -// LLVM: @.str1 = private constant [3 x i8] c"uv\00" +// LLVM: @.str.1 = private constant [3 x i8] c"uv\00" // LLVM: define linkonce_odr void @_ZSt1fIPKcEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG0:%.*]]) // LLVM: [[LOCAL_PTR:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, @@ -70,7 +70,7 @@ void test() { // LLVM: [[PTR_FIRST_ELEM:%.*]] = getelementptr ptr, ptr [[ELEM_ARRAY_PTR]], i32 0, // LLVM: store ptr @.str, ptr [[PTR_FIRST_ELEM]], align 8, // LLVM: [[PTR_SECOND_ELEM:%.*]] = getelementptr ptr, ptr [[PTR_FIRST_ELEM]], i64 1, -// LLVM: store ptr @.str1, ptr [[PTR_SECOND_ELEM]], align 8, +// LLVM: store ptr @.str.1, ptr [[PTR_SECOND_ELEM]], align 8, // LLVM: [[INIT_START_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0, // LLVM: [[INIT_END_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 1, // LLVM: [[ELEM_ARRAY_END:%.*]] = getelementptr [2 x ptr], ptr [[ELEM_ARRAY_PTR]], i64 2, diff --git a/clang/test/CIR/CodeGen/predefined.cpp b/clang/test/CIR/CodeGen/predefined.cpp index b5ec86d41aff..08cafd4cbb29 100644 --- a/clang/test/CIR/CodeGen/predefined.cpp +++ b/clang/test/CIR/CodeGen/predefined.cpp @@ -13,9 +13,9 @@ void m() { // CHECK: %0 = cir.get_global @".str" : !cir.ptr> // CHECK: %1 = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr // CHECK: %2 = cir.const #cir.int<79> : !s32i -// CHECK: %3 = cir.get_global @".str1" : !cir.ptr> +// CHECK: %3 = cir.get_global @".str.1" : !cir.ptr> // CHECK: %4 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr -// CHECK: %5 = cir.get_global @".str2" : !cir.ptr> +// CHECK: %5 = cir.get_global @".str.2" : !cir.ptr> // CHECK: %6 = cir.cast(array_to_ptrdecay, %5 : !cir.ptr>), !cir.ptr // CHECK: cir.call @__assert2(%1, %2, %4, %6) : (!cir.ptr, !s32i, !cir.ptr, !cir.ptr) -> () // CHECK: cir.return diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index cb75684886af..ae08062b1c43 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -11,8 +11,8 @@ module { cir.global external @rgb2 = #cir.const_struct<{#cir.int<0> : !s8i, #cir.int<5> : !s64i, #cir.ptr : !cir.ptr}> : !cir.struct}> cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} cir.global "private" internal @c : !s32i - cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} - cir.global external @s = #cir.global_view<@".str2"> : !cir.ptr + cir.global "private" constant internal @".str.2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global external @s = #cir.global_view<@".str.2"> : !cir.ptr cir.func @use_global() { %0 = cir.get_global @a : !cir.ptr cir.return @@ -80,8 +80,8 @@ module { // CHECK: cir.global external @b = #cir.const_array<"example\00" : !cir.array> // CHECK: cir.global "private" constant internal @".str" : !cir.array {alignment = 1 : i64} // CHECK: cir.global "private" internal @c : !s32i -// CHECK: cir.global "private" constant internal @".str2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} -// CHECK: cir.global external @s = #cir.global_view<@".str2"> : !cir.ptr +// CHECK: cir.global "private" constant internal @".str.2" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} +// CHECK: cir.global external @s = #cir.global_view<@".str.2"> : !cir.ptr // CHECK: cir.func @use_global() diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 9df6e0c858fb..7b6424abcefc 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -352,7 +352,7 @@ module { module { // expected-error@+1 {{expected type declaration for string literal}} - cir.global "private" constant external @".str2" = #cir.const_array<"example\00"> {alignment = 1 : i64} + cir.global "private" constant external @".str.2" = #cir.const_array<"example\00"> {alignment = 1 : i64} } // ----- diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 6290ca19c1e0..765544e8c125 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -41,8 +41,8 @@ module { // MLIR: %0 = llvm.mlir.addressof @a : !llvm.ptr // MLIR: llvm.return %0 : !llvm.ptr // MLIR: } - cir.global "private" constant internal @".str1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} - cir.global external @s1 = #cir.global_view<@".str1"> : !cir.ptr + cir.global "private" constant internal @".str.1" = #cir.const_array<"example1\00" : !cir.array> : !cir.array {alignment = 1 : i64} + cir.global external @s1 = #cir.global_view<@".str.1"> : !cir.ptr cir.global external @s2 = #cir.global_view<@".str"> : !cir.ptr cir.func @_Z10use_globalv() { %0 = cir.alloca !s32i, !cir.ptr, ["li", init] {alignment = 4 : i64} From fe01d5a93289f38b74c8e5d6c36eaac61bb98357 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Thu, 7 Nov 2024 13:35:03 -0800 Subject: [PATCH 2043/2301] [CIR] Make X86ArgClass an `enum class` (#1080) It's currently polluting the `cir` namespace with very generic symbols like `Integer` and `Memory`, which is pretty confusing. `X86_64ABIInfo` already has `Class` alias for `X86ArgClass`, so we can use that alias to qualify all uses. --- clang/include/clang/CIR/Target/x86.h | 2 +- clang/lib/CIR/CodeGen/TargetInfo.cpp | 50 +++++++++++++++------------- 2 files changed, 28 insertions(+), 24 deletions(-) diff --git a/clang/include/clang/CIR/Target/x86.h b/clang/include/clang/CIR/Target/x86.h index 08c6cae7b94f..1116415bda75 100644 --- a/clang/include/clang/CIR/Target/x86.h +++ b/clang/include/clang/CIR/Target/x86.h @@ -23,7 +23,7 @@ enum class X86AVXABILevel { }; // Possible argument classifications according to the x86 ABI documentation. -enum X86ArgClass { +enum class X86ArgClass { Integer = 0, SSE, SSEUp, diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 9ef7531406b6..561531ce2636 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -379,8 +379,10 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, // Check some invariants // FIXME: Enforce these by construction. - assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); - assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + assert((Hi != Class::Memory || Lo == Class::Memory) && + "Invalid memory classification."); + assert((Hi != Class::SSEUp || Lo == Class::SSE) && + "Invalid SSEUp classification."); neededInt = 0; neededSSE = 0; @@ -391,7 +393,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next available // register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 and %r9 is used. - case Integer: + case Class::Integer: ++neededInt; // Pick an 8-byte type based on the preferred type. @@ -399,7 +401,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, // If we have a sign or zero extended integer, make sure to return Extend so // that the parameter gets the right LLVM IR attributes. - if (Hi == NoClass && mlir::isa(ResType)) { + if (Hi == Class::NoClass && mlir::isa(ResType)) { assert(!Ty->getAs() && "NYI"); if (Ty->isSignedIntegerOrEnumerationType() && isPromotableIntegerTypeForABI(Ty)) @@ -411,7 +413,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next available SSE // register is used, the registers are taken in the order from %xmm0 to // %xmm7. - case SSE: { + case Class::SSE: { mlir::Type CIRType = CGT.ConvertType(Ty); ResType = GetSSETypeAtOffset(CIRType, 0, Ty, 0); ++neededSSE; @@ -423,7 +425,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, switch (Hi) { default: assert(false && "NYI"); - case NoClass: + case Class::NoClass: break; } @@ -453,23 +455,23 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo, // shouldn't be passed in registers for example, so there is no chance they // can straddle an eightbyte. Verify & simplify. - Lo = Hi = NoClass; + Lo = Hi = Class::NoClass; Class &Current = OffsetBase < 64 ? Lo : Hi; - Current = Memory; + Current = Class::Memory; if (const auto *BT = Ty->getAs()) { BuiltinType::Kind k = BT->getKind(); if (k == BuiltinType::Void) { - Current = NoClass; + Current = Class::NoClass; } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { assert(false && "NYI"); - Lo = Integer; - Hi = Integer; + Lo = Class::Integer; + Hi = Class::Integer; } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { - Current = Integer; + Current = Class::Integer; } else if (k == BuiltinType::Float || k == BuiltinType::Double || k == BuiltinType::Float16) { - Current = SSE; + Current = Class::SSE; } else if (k == BuiltinType::LongDouble) { assert(false && "NYI"); } else @@ -482,7 +484,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo, assert(!Ty->getAs() && "Enums NYI"); if (Ty->hasPointerRepresentation()) { - Current = Integer; + Current = Class::Integer; return; } @@ -508,27 +510,29 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); // Check some invariants. - assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); - assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + assert((Hi != Class::Memory || Lo == Class::Memory) && + "Invalid memory classification."); + assert((Hi != Class::SSEUp || Lo == Class::SSE) && + "Invalid SSEUp classification."); mlir::Type ResType = nullptr; - assert(Lo == NoClass || Lo == Integer || - Lo == SSE && "Only NoClass and Integer supported so far"); + assert(Lo == Class::NoClass || Lo == Class::Integer || + Lo == Class::SSE && "Only NoClass and Integer supported so far"); switch (Lo) { - case NoClass: - assert(Hi == NoClass && "Only NoClass supported so far for Hi"); + case Class::NoClass: + assert(Hi == Class::NoClass && "Only NoClass supported so far for Hi"); return ABIArgInfo::getIgnore(); // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next available // register of the sequence %rax, %rdx is used. - case Integer: + case Class::Integer: ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); // If we have a sign or zero extended integer, make sure to return Extend so // that the parameter gets the right LLVM IR attributes. // TODO: extend the above consideration to MLIR - if (Hi == NoClass && mlir::isa(ResType)) { + if (Hi == Class::NoClass && mlir::isa(ResType)) { // Treat an enum type as its underlying type. if (const auto *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); @@ -542,7 +546,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next available SSE // register of the sequence %xmm0, %xmm1 is used. - case SSE: + case Class::SSE: ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); break; From b15d9ade7583e47f4d2edc569c5c06d73aef0eb8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Nov 2024 14:28:26 -0800 Subject: [PATCH 2044/2301] [CIR][CIRGen] Fix some alias issues under -O1 and above Note that there are still missing pieces, which will be incrementally addressed. --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 12 +++++++++--- clang/test/CIR/CodeGen/virtual-destructor-calls.cpp | 7 ++++++- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index aee027113ddf..2cf867482149 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -174,7 +174,7 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { llvm_unreachable("NYI"); // Create the alias with no name. - buildAliasForGlobal("", Entry, AliasDecl, Aliasee, Linkage); + buildAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 7d5fee92adc0..7a65329325f0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2139,9 +2139,15 @@ void CIRGenModule::buildAliasForGlobal(StringRef mangledName, mlir::cir::GlobalLinkageKind linkage) { auto *aliasFD = dyn_cast(aliasGD.getDecl()); assert(aliasFD && "expected FunctionDecl"); - auto alias = - createCIRFunction(getLoc(aliasGD.getDecl()->getSourceRange()), - mangledName, aliasee.getFunctionType(), aliasFD); + + // The aliasee function type is different from the alias one, this difference + // is specific to CIR because in LLVM the ptr types are already erased at this + // point. + auto &fnInfo = getTypes().arrangeCXXStructorDeclaration(aliasGD); + auto fnType = getTypes().GetFunctionType(fnInfo); + + auto alias = createCIRFunction(getLoc(aliasGD.getDecl()->getSourceRange()), + mangledName, fnType, aliasFD); alias.setAliasee(aliasee.getName()); alias.setLinkage(linkage); // Declarations cannot have public MLIR visibility, just mark them private diff --git a/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp index 79d4d0b96ffc..44b732b11098 100644 --- a/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp +++ b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp @@ -1,6 +1,8 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -std=c++20 -mconstructor-aliases -O0 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -std=c++20 -mconstructor-aliases -O1 -fclangir -emit-cir %s -o %t-o1.cir +// RUN: FileCheck --check-prefix=CIR_O1 --input-file=%t-o1.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -std=c++20 -mconstructor-aliases -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s @@ -38,8 +40,11 @@ struct B : A { // LLVM: call void @_ZdlPv // (aliases from C) -// FIXME: this should be an alias declaration. +// FIXME: this should be an alias declaration even in -O0 // CIR: cir.func @_ZN1CD2Ev(%arg0: !cir.ptr{{.*}})) {{.*}} { +// CIR_O1-NOT: cir.func @_ZN1CD2Ev(%arg0: !cir.ptr{{.*}})) {{.*}} { +// CIR_O1: cir.func private @_ZN1CD2Ev(!cir.ptr) alias(@_ZN1BD2Ev) + // CIR: cir.func private @_ZN1CD1Ev(!cir.ptr) alias(@_ZN1CD2Ev) // FIXME: LLVM output should be: @_ZN1CD2Ev ={{.*}} unnamed_addr alias {{.*}} @_ZN1BD2Ev From 8fc5d4b582ae802df19f9edb856041c81315fb18 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Nov 2024 14:45:17 -0800 Subject: [PATCH 2045/2301] [CIR][NFC] Add more checks and notes for virtual-destructor-calls.cpp test --- clang/test/CIR/CodeGen/virtual-destructor-calls.cpp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp index 44b732b11098..df0ac0914e76 100644 --- a/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp +++ b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp @@ -6,7 +6,9 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -std=c++20 -mconstructor-aliases -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s -// PREV: %clang_cc1 -emit-llvm %s -o - -triple=x86_64-apple-darwin10 -mconstructor-aliases -O1 -disable-llvm-passes | FileCheck %s +// FIXME: LLVM IR dialect does not yet support function ptr globals, which precludes +// a lot of the proper semantics for properly representing alias functions in LLVM +// (see the note on LLVM_O1 below). struct Member { ~Member(); @@ -42,15 +44,20 @@ struct B : A { // (aliases from C) // FIXME: this should be an alias declaration even in -O0 // CIR: cir.func @_ZN1CD2Ev(%arg0: !cir.ptr{{.*}})) {{.*}} { +// CIR: cir.func private @_ZN1CD1Ev(!cir.ptr) alias(@_ZN1CD2Ev) + // CIR_O1-NOT: cir.func @_ZN1CD2Ev(%arg0: !cir.ptr{{.*}})) {{.*}} { // CIR_O1: cir.func private @_ZN1CD2Ev(!cir.ptr) alias(@_ZN1BD2Ev) - -// CIR: cir.func private @_ZN1CD1Ev(!cir.ptr) alias(@_ZN1CD2Ev) +// FIXME: LLVM alias directly to @_ZN1BD2Ev instead of through @_ZN1CD2Ev +// CIR_O1: cir.func private @_ZN1CD1Ev(!cir.ptr) alias(@_ZN1CD2Ev) // FIXME: LLVM output should be: @_ZN1CD2Ev ={{.*}} unnamed_addr alias {{.*}} @_ZN1BD2Ev // LLVM: define dso_local void @_ZN1CD2Ev(ptr // FIXME: LLVM output should be: @_ZN1CD1Ev ={{.*}} unnamed_addr alias {{.*}} @_ZN1CD2Ev // LLVM: declare {{.*}} dso_local void @_ZN1CD1Ev(ptr) +// FIXME: note that LLVM_O1 cannot be tested because the canocalizers running +// on top of LLVM IR dialect delete _ZN1CD2Ev in its current form (a function +// declaration) since its not used in the TU. B::~B() { } From 1a52c818ecfad5378c98d22e3a6b929e4da3b3a8 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Nov 2024 17:10:24 -0800 Subject: [PATCH 2046/2301] [CIR][NFC] Update test comment Just verified this is actually done by some LLVM optimization, not by the frontend emitting directly, so this is a non-goal now, since CIR can also use LLVM opts to do the same once we have real global alias. --- clang/test/CIR/CodeGen/virtual-destructor-calls.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp index df0ac0914e76..7a7ba70669d8 100644 --- a/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp +++ b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp @@ -48,7 +48,6 @@ struct B : A { // CIR_O1-NOT: cir.func @_ZN1CD2Ev(%arg0: !cir.ptr{{.*}})) {{.*}} { // CIR_O1: cir.func private @_ZN1CD2Ev(!cir.ptr) alias(@_ZN1BD2Ev) -// FIXME: LLVM alias directly to @_ZN1BD2Ev instead of through @_ZN1CD2Ev // CIR_O1: cir.func private @_ZN1CD1Ev(!cir.ptr) alias(@_ZN1CD2Ev) // FIXME: LLVM output should be: @_ZN1CD2Ev ={{.*}} unnamed_addr alias {{.*}} @_ZN1BD2Ev From 5d1abbe3e63cf3ab0ef979ddf1673edbb84a2e30 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 7 Nov 2024 17:15:28 -0800 Subject: [PATCH 2047/2301] [CIR][NFC] More comments removed for ir differences Also verified this does not apply anymore, we match -O0. The only remaing part is to lower to proper LLVM globals once LLVM IR dialect gets the global alias support. --- clang/test/CIR/CodeGen/virtual-destructor-calls.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp index 7a7ba70669d8..78f5866d48a7 100644 --- a/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp +++ b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp @@ -42,7 +42,6 @@ struct B : A { // LLVM: call void @_ZdlPv // (aliases from C) -// FIXME: this should be an alias declaration even in -O0 // CIR: cir.func @_ZN1CD2Ev(%arg0: !cir.ptr{{.*}})) {{.*}} { // CIR: cir.func private @_ZN1CD1Ev(!cir.ptr) alias(@_ZN1CD2Ev) From e6753e4c4af5ff504d2c1d386ac8924d40689310 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Sat, 9 Nov 2024 02:20:58 +0800 Subject: [PATCH 2048/2301] Revert "[CIR] fix getTypeSizeInBits with fp80 and fp128" (#1089) Reverts llvm/clangir#1058 --- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index b1e234e1dcdc..ae50f79fff30 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -749,10 +749,7 @@ const llvm::fltSemantics &FP80Type::getFloatSemantics() const { llvm::TypeSize FP80Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, mlir::DataLayoutEntryListRef params) const { - // The size of FP80Type should be 16 bytes, or 128 bits. The lower 80 bits - // take part in the value representation and the higher 48 bits are just - // paddings. - return llvm::TypeSize::getFixed(128); + return llvm::TypeSize::getFixed(16); } uint64_t FP80Type::getABIAlignment(const mlir::DataLayout &dataLayout, @@ -773,7 +770,7 @@ const llvm::fltSemantics &FP128Type::getFloatSemantics() const { llvm::TypeSize FP128Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, mlir::DataLayoutEntryListRef params) const { - return llvm::TypeSize::getFixed(128); + return llvm::TypeSize::getFixed(16); } uint64_t FP128Type::getABIAlignment(const mlir::DataLayout &dataLayout, From baec4d1535bca4fbbafcfffbc35378bc5f462383 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 8 Nov 2024 13:22:18 -0500 Subject: [PATCH 2049/2301] [CIR][CIRGen][Builtin] Support __builtin_return_address (#1046) test case is from [Traditional Clang CodeGen test file](https://github.com/llvm/clangir/blob/723e78afb5ae4fbd000269a057410913ade3ef44/clang/test/CodeGen/2004-02-13-BuiltinFrameReturnAddress.c#L5) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 31 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 10 ++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 26 ++++++++++++++-- clang/test/CIR/CodeGen/builtins.cpp | 11 +++++++ clang/test/CIR/IR/builtins.cir | 14 +++++++++ 5 files changed, 88 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/IR/builtins.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 0a7c7222968d..e29547a6ed6c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4125,6 +4125,37 @@ def MemChrOp : CIR_Op<"libc.memchr"> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// ReturnAddrOp +//===----------------------------------------------------------------------===// + +def ReturnAddrOp : CIR_Op<"return_address"> { + let arguments = (ins UInt32:$level); + let summary = "The return address of the current function, or of one of its callers"; + let results = (outs Res:$result); + + let description = [{ + Represents call to builtin function ` __builtin_return_address` in CIR. + This builtin function returns the return address of the current function, + or of one of its callers. + The `level` argument is number of frames to scan up the call stack. + For instance, value of 0 yields the return address of the current function, + value of 1 yields the return address of the caller of the current function, + and so forth. + + Examples: + + ```mlir + %p = return_address(%level) -> !cir.ptr + ``` + }]; + + let assemblyFormat = [{ + `(` $level `)` attr-dict + }]; + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // StdFindOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index cbdb9bd09fc7..00bc27ef6087 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1510,8 +1510,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_wmemcmp NYI"); case Builtin::BI__builtin_dwarf_cfa: llvm_unreachable("BI__builtin_dwarf_cfa NYI"); - case Builtin::BI__builtin_return_address: - llvm_unreachable("BI__builtin_return_address NYI"); + case Builtin::BI__builtin_return_address: { + mlir::Location loc = getLoc(E->getExprLoc()); + mlir::Attribute levelAttr = ConstantEmitter(*this).emitAbstract( + E->getArg(0), E->getArg(0)->getType()); + int64_t level = mlir::cast(levelAttr).getSInt(); + return RValue::get(builder.create( + loc, builder.getUInt32(level, loc))); + } case Builtin::BI_ReturnAddress: llvm_unreachable("BI_ReturnAddress NYI"); case Builtin::BI__builtin_frame_address: diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 98d8e4de46d4..65912b81fd9d 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4116,6 +4116,21 @@ class CIRCmpThreeWayOpLowering } }; +class CIRReturnAddrOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ReturnAddrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm.returnaddress", + llvmPtrTy, adaptor.getOperands()); + return mlir::success(); + } +}; + class CIRClearCacheOpLowering : public mlir::OpConversionPattern { public: @@ -4371,9 +4386,16 @@ void populateCIRToLLVMConversionPatterns( CIRVectorShuffleVecLowering, CIRStackSaveLowering, CIRUnreachableLowering, CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, + CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, CIRMemCpyOpLowering, + CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, + CIRVectorCreateLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, + CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, + CIRVectorShuffleVecLowering, CIRStackSaveLowering, CIRUnreachableLowering, + CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, + CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, - CIRClearCacheOpLowering, CIREhTypeIdOpLowering, CIRCatchParamOpLowering, - CIRResumeOpLowering, CIRAllocExceptionOpLowering, + CIRReturnAddrOpLowering, CIRClearCacheOpLowering, CIREhTypeIdOpLowering, + CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, CIRFreeExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, CIRAssumeLowering, CIRAssumeAlignedLowering, CIRAssumeSepStorageLowering, CIRBaseClassAddrOpLowering, CIRDerivedClassAddrOpLowering, diff --git a/clang/test/CIR/CodeGen/builtins.cpp b/clang/test/CIR/CodeGen/builtins.cpp index 202ae61f0db2..971fc09e07d9 100644 --- a/clang/test/CIR/CodeGen/builtins.cpp +++ b/clang/test/CIR/CodeGen/builtins.cpp @@ -75,3 +75,14 @@ extern "C" char* test_memchr(const char arg[32]) { // LLVM: [[RET:%.*]] = load ptr, ptr [[RET_P]], align 8 // LLVM: ret ptr [[RET]] } + +extern "C" void *test_return_address(void) { + return __builtin_return_address(1); + + // CIR-LABEL: test_return_address + // [[ARG:%.*]] = cir.const #cir.int<1> : !u32i + // {{%.*}} = cir.return_address([[ARG]]) + + // LLVM-LABEL: @test_return_address + // LLVM: {{%.*}} = call ptr @llvm.returnaddress(i32 1) +} diff --git a/clang/test/CIR/IR/builtins.cir b/clang/test/CIR/IR/builtins.cir new file mode 100644 index 000000000000..86c0c57825de --- /dev/null +++ b/clang/test/CIR/IR/builtins.cir @@ -0,0 +1,14 @@ +// RUN: cir-opt %s | cir-opt | FileCheck %s +!u32i = !cir.int + +module { + cir.func @test1() { + %0 = cir.const #cir.int<1> : !u32i + %1 = cir.return_address(%0) + cir.return + } + // CHECK: cir.func @test1() + // CHECK: %0 = cir.const #cir.int<1> : !u32i + // CHECK: %1 = cir.return_address(%0) + // CHECK: cir.return +} From fe3236512d956e9a6313e814b69430ef6f4fb0da Mon Sep 17 00:00:00 2001 From: orbiri Date: Fri, 8 Nov 2024 20:43:00 +0200 Subject: [PATCH 2050/2301] [CIR] Extend support for floating point attributes (#572) This commit extends the support for floating point attributes parsing by using the new `AsmParser::parseFloat(fltSemnatics, APFloat&)` interface. As a drive-by, this commit also harmonizes the cir.fp print/parse namespace usage, and adds the constraint of supporting only "CIRFPType"s for cir.fp in tablegen instead of verifying it manually in the parsing logic. --- This commit is based on top of a to-be-upstreamed commit which extends the upstream MLIR float type parsing. Upstream parsing of float type has full capability only through parsing the Builtin Dialect's `FloatAttr`. Thos commit exposes the same capabilities to downstream users. --- This PR should resolve (at least) `GCC-C-execute-ieee-fp-cmp-2` and `GCC-C-execute-ieee-fp-cmp-4`, paving the way to other `GCC-C-execute-ieee-*` tests passing from the SingleSource suite. It resolves #559 . --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 25 ++++-- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 52 ++++------- clang/test/CIR/IR/attribute.cir | 25 ++++++ clang/test/CIR/IR/float.cir | 90 +++++++++++++++++++ clang/test/CIR/IR/invalid.cir | 59 ++++++++++++ clang/test/CIR/Lowering/class.cir | 2 +- clang/test/CIR/Lowering/struct.cir | 2 +- 7 files changed, 209 insertions(+), 46 deletions(-) create mode 100644 clang/test/CIR/IR/attribute.cir create mode 100644 clang/test/CIR/IR/float.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index a6a27006f357..a81ac5037caa 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -285,13 +285,20 @@ def FPAttr : CIR_Attr<"FP", "fp", [TypedAttrInterface]> { let summary = "An attribute containing a floating-point value"; let description = [{ An fp attribute is a literal attribute that represents a floating-point - value of the specified floating-point type. + value of the specified floating-point type. Supporting only CIR FP types. }]; - let parameters = (ins AttributeSelfTypeParameter<"">:$type, "APFloat":$value); + let parameters = (ins + AttributeSelfTypeParameter<"", "::mlir::cir::CIRFPTypeInterface">:$type, + APFloatParameter<"">:$value + ); let builders = [ AttrBuilderWithInferredContext<(ins "Type":$type, "const APFloat &":$value), [{ - return $_get(type.getContext(), type, value); + return $_get(type.getContext(), mlir::cast(type), value); + }]>, + AttrBuilder<(ins "Type":$type, + "const APFloat &":$value), [{ + return $_get($_ctxt, mlir::cast(type), value); }]>, ]; let extraClassDeclaration = [{ @@ -319,7 +326,7 @@ def ComplexAttr : CIR_Attr<"Complex", "complex", [TypedAttrInterface]> { contains values of the same CIR type. }]; - let parameters = (ins + let parameters = (ins AttributeSelfTypeParameter<"", "mlir::cir::ComplexType">:$type, "mlir::TypedAttr":$real, "mlir::TypedAttr":$imag); @@ -820,7 +827,7 @@ def AddressSpaceAttr : CIR_Attr<"AddressSpace", "addrspace"> { let extraClassDeclaration = [{ static constexpr char kTargetKeyword[] = "}]#targetASCase.symbol#[{"; static constexpr int32_t kFirstTargetASValue = }]#targetASCase.value#[{; - + bool isLang() const; bool isTarget() const; unsigned getTargetValue() const; @@ -980,7 +987,7 @@ def ASTCallExprAttr : AST<"CallExpr", "call.expr", // VisibilityAttr //===----------------------------------------------------------------------===// -def VK_Default : I32EnumAttrCase<"Default", 1, "default">; +def VK_Default : I32EnumAttrCase<"Default", 1, "default">; def VK_Hidden : I32EnumAttrCase<"Hidden", 2, "hidden">; def VK_Protected : I32EnumAttrCase<"Protected", 3, "protected">; @@ -1013,7 +1020,7 @@ def VisibilityAttr : CIR_Attr<"Visibility", "visibility"> { bool isDefault() const { return getValue() == VisibilityKind::Default; }; bool isHidden() const { return getValue() == VisibilityKind::Hidden; }; bool isProtected() const { return getValue() == VisibilityKind::Protected; }; - }]; + }]; } @@ -1160,7 +1167,7 @@ def AnnotationAttr : CIR_Attr<"Annotation", "annotation"> { let parameters = (ins "StringAttr":$name, "ArrayAttr":$args); - let assemblyFormat = "`<` struct($name, $args) `>`"; + let assemblyFormat = "`<` struct($name, $args) `>`"; let extraClassDeclaration = [{ bool isNoArgs() const { return getArgs().empty(); }; @@ -1187,7 +1194,7 @@ def GlobalAnnotationValuesAttr : CIR_Attr<"GlobalAnnotationValues", void *c __attribute__((annotate("noargvar"))); void foo(int i) __attribute__((annotate("noargfunc"))) {} ``` - After CIR lowering prepare pass, compiler generates a + After CIR lowering prepare pass, compiler generates a `GlobalAnnotationValuesAttr` like the following: ``` #cir &value, mlir::Type ty); + mlir::FailureOr &value, + mlir::cir::CIRFPTypeInterface fpType); static mlir::ParseResult parseConstPtr(mlir::AsmParser &parser, mlir::IntegerAttr &value); @@ -313,50 +314,31 @@ LogicalResult IntAttr::verify(function_ref emitError, // FPAttr definitions //===----------------------------------------------------------------------===// -static void printFloatLiteral(mlir::AsmPrinter &p, llvm::APFloat value, - mlir::Type ty) { +static void printFloatLiteral(AsmPrinter &p, APFloat value, Type ty) { p << value; } -static mlir::ParseResult -parseFloatLiteral(mlir::AsmParser &parser, - mlir::FailureOr &value, mlir::Type ty) { - double rawValue; - if (parser.parseFloat(rawValue)) { - return parser.emitError(parser.getCurrentLocation(), - "expected floating-point value"); - } - - auto losesInfo = false; - value.emplace(rawValue); +static ParseResult parseFloatLiteral(AsmParser &parser, + FailureOr &value, + CIRFPTypeInterface fpType) { - auto tyFpInterface = dyn_cast(ty); - if (!tyFpInterface) { - // Parsing of the current floating-point literal has succeeded, but the - // given attribute type is invalid. This error will be reported later when - // the attribute is being verified. - return success(); - } + APFloat parsedValue(0.0); + if (parser.parseFloat(fpType.getFloatSemantics(), parsedValue)) + return failure(); - value->convert(tyFpInterface.getFloatSemantics(), - llvm::RoundingMode::TowardZero, &losesInfo); + value.emplace(parsedValue); return success(); } -cir::FPAttr cir::FPAttr::getZero(mlir::Type type) { - return get( - type, APFloat::getZero( - mlir::cast(type).getFloatSemantics())); +FPAttr FPAttr::getZero(Type type) { + return get(type, + APFloat::getZero( + mlir::cast(type).getFloatSemantics())); } -LogicalResult cir::FPAttr::verify(function_ref emitError, - Type type, APFloat value) { - auto fltTypeInterface = mlir::dyn_cast(type); - if (!fltTypeInterface) { - emitError() << "expected floating-point type"; - return failure(); - } - if (APFloat::SemanticsToEnum(fltTypeInterface.getFloatSemantics()) != +LogicalResult FPAttr::verify(function_ref emitError, + CIRFPTypeInterface fpType, APFloat value) { + if (APFloat::SemanticsToEnum(fpType.getFloatSemantics()) != APFloat::SemanticsToEnum(value.getSemantics())) { emitError() << "floating-point semantics mismatch"; return failure(); diff --git a/clang/test/CIR/IR/attribute.cir b/clang/test/CIR/IR/attribute.cir new file mode 100644 index 000000000000..4c9d4083ad4a --- /dev/null +++ b/clang/test/CIR/IR/attribute.cir @@ -0,0 +1,25 @@ +// RUN: cir-opt %s -split-input-file -allow-unregistered-dialect -verify-diagnostics | FileCheck %s + +cir.func @float_attrs_pass() { + "test.float_attrs"() { + // CHECK: float_attr = #cir.fp<2.000000e+00> : !cir.float + float_attr = #cir.fp<2.> : !cir.float + } : () -> () + "test.float_attrs"() { + // CHECK: float_attr = #cir.fp<-2.000000e+00> : !cir.float + float_attr = #cir.fp<-2.> : !cir.float + } : () -> () + "test.float_attrs"() { + // CHECK: float_attr = #cir.fp<2.000000e+00> : !cir.double + float_attr = #cir.fp<2.> : !cir.double + } : () -> () + "test.float_attrs"() { + // CHECK: float_attr = #cir.fp<2.000000e+00> : !cir.long_double + float_attr = #cir.fp<2.> : !cir.long_double + } : () -> () + "test.float_attrs"() { + // CHECK: float_attr = #cir.fp<2.000000e+00> : !cir.long_double + float_attr = #cir.fp<2.> : !cir.long_double + } : () -> () + cir.return +} \ No newline at end of file diff --git a/clang/test/CIR/IR/float.cir b/clang/test/CIR/IR/float.cir new file mode 100644 index 000000000000..1be52c339ad5 --- /dev/null +++ b/clang/test/CIR/IR/float.cir @@ -0,0 +1,90 @@ +// RUN: cir-opt %s | FileCheck %s + +// Adapted from mlir/test/IR/parser.mlir + +// CHECK-LABEL: @f32_special_values +cir.func @f32_special_values() { + // F32 signaling NaNs. + // CHECK: cir.const #cir.fp<0x7F800001> : !cir.float + %0 = cir.const #cir.fp<0x7F800001> : !cir.float + // CHECK: cir.const #cir.fp<0x7FBFFFFF> : !cir.float + %1 = cir.const #cir.fp<0x7FBFFFFF> : !cir.float + + // F32 quiet NaNs. + // CHECK: cir.const #cir.fp<0x7FC00000> : !cir.float + %2 = cir.const #cir.fp<0x7FC00000> : !cir.float + // CHECK: cir.const #cir.fp<0xFFFFFFFF> : !cir.float + %3 = cir.const #cir.fp<0xFFFFFFFF> : !cir.float + + // F32 positive infinity. + // CHECK: cir.const #cir.fp<0x7F800000> : !cir.float + %4 = cir.const #cir.fp<0x7F800000> : !cir.float + // F32 negative infinity. + // CHECK: cir.const #cir.fp<0xFF800000> : !cir.float + %5 = cir.const #cir.fp<0xFF800000> : !cir.float + + cir.return +} + +// CHECK-LABEL: @f64_special_values +cir.func @f64_special_values() { + // F64 signaling NaNs. + // CHECK: cir.const #cir.fp<0x7FF0000000000001> : !cir.double + %0 = cir.const #cir.fp<0x7FF0000000000001> : !cir.double + // CHECK: cir.const #cir.fp<0x7FF8000000000000> : !cir.double + %1 = cir.const #cir.fp<0x7FF8000000000000> : !cir.double + + // F64 quiet NaNs. + // CHECK: cir.const #cir.fp<0x7FF0000001000000> : !cir.double + %2 = cir.const #cir.fp<0x7FF0000001000000> : !cir.double + // CHECK: cir.const #cir.fp<0xFFF0000001000000> : !cir.double + %3 = cir.const #cir.fp<0xFFF0000001000000> : !cir.double + + // F64 positive infinity. + // CHECK: cir.const #cir.fp<0x7FF0000000000000> : !cir.double + %4 = cir.const #cir.fp<0x7FF0000000000000> : !cir.double + // F64 negative infinity. + // CHECK: cir.const #cir.fp<0xFFF0000000000000> : !cir.double + %5 = cir.const #cir.fp<0xFFF0000000000000> : !cir.double + + // Check that values that can't be represented with the default format, use + // hex instead. + // CHECK: cir.const #cir.fp<0xC1CDC00000000000> : !cir.double + %6 = cir.const #cir.fp<0xC1CDC00000000000> : !cir.double + + cir.return +} + +// CHECK-LABEL: @f80_special_values +cir.func @f80_special_values() { + // F80 signaling NaNs. + // CHECK: cir.const #cir.fp<0x7FFFE000000000000001> : !cir.long_double + %0 = cir.const #cir.fp<0x7FFFE000000000000001> : !cir.long_double + // CHECK: cir.const #cir.fp<0x7FFFB000000000000011> : !cir.long_double + %1 = cir.const #cir.fp<0x7FFFB000000000000011> : !cir.long_double + + // F80 quiet NaNs. + // CHECK: cir.const #cir.fp<0x7FFFC000000000100000> : !cir.long_double + %2 = cir.const #cir.fp<0x7FFFC000000000100000> : !cir.long_double + // CHECK: cir.const #cir.fp<0x7FFFE000000001000000> : !cir.long_double + %3 = cir.const #cir.fp<0x7FFFE000000001000000> : !cir.long_double + + // F80 positive infinity. + // CHECK: cir.const #cir.fp<0x7FFF8000000000000000> : !cir.long_double + %4 = cir.const #cir.fp<0x7FFF8000000000000000> : !cir.long_double + // F80 negative infinity. + // CHECK: cir.const #cir.fp<0xFFFF8000000000000000> : !cir.long_double + %5 = cir.const #cir.fp<0xFFFF8000000000000000> : !cir.long_double + + cir.return +} + +// We want to print floats in exponential notation with 6 significant digits, +// but it may lead to precision loss when parsing back, in which case we print +// the decimal form instead. +// CHECK-LABEL: @f32_potential_precision_loss() +cir.func @f32_potential_precision_loss() { + // CHECK: cir.const #cir.fp<1.23697901> : !cir.float + %0 = cir.const #cir.fp<1.23697901> : !cir.float + cir.return +} diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 7b6424abcefc..6acb9592246a 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1378,3 +1378,62 @@ module { cir.return } } +// ----- + +// Type of the attribute must be a CIR floating point type + +// expected-error @below {{invalid kind of type specified}} +cir.global external @f = #cir.fp<0.5> : !cir.int + +// ----- + +// Value must be a floating point literal or integer literal + +// expected-error @below {{expected floating point literal}} +cir.global external @f = #cir.fp<"blabla"> : !cir.float + +// ----- + +// Integer value must be in the width of the floating point type + +// expected-error @below {{hexadecimal float constant out of range for type}} +cir.global external @f = #cir.fp<0x7FC000000> : !cir.float + +// ----- + +// Integer value must be in the width of the floating point type + +// expected-error @below {{hexadecimal float constant out of range for type}} +cir.global external @f = #cir.fp<0x7FC000007FC0000000> : !cir.double + +// ----- + +// Integer value must be in the width of the floating point type + +// expected-error @below {{hexadecimal float constant out of range for type}} +cir.global external @f = #cir.fp<0x7FC0000007FC0000007FC000000> : !cir.long_double + +// ----- + +// Long double with `double` semnatics should have a value that fits in a double. + +// CHECK: cir.global external @f = #cir.fp<0x7FC000007FC000000000> : !cir.long_double +cir.global external @f = #cir.fp<0x7FC000007FC000000000> : !cir.long_double + +// expected-error @below {{hexadecimal float constant out of range for type}} +cir.global external @f = #cir.fp<0x7FC000007FC000000000> : !cir.long_double + +// ----- + +// Verify no need for type inside the attribute + +// expected-error @below {{expected '>'}} +cir.global external @f = #cir.fp<0x7FC00000 : !cir.float> : !cir.float + +// ----- + +// Verify literal must be hex or float + +// expected-error @below {{unexpected decimal integer literal for a floating point value}} +// expected-note @below {{add a trailing dot to make the literal a float}} +cir.global external @f = #cir.fp<42> : !cir.float diff --git a/clang/test/CIR/Lowering/class.cir b/clang/test/CIR/Lowering/class.cir index dd028f4c3b7d..4f0c25151179 100644 --- a/clang/test/CIR/Lowering/class.cir +++ b/clang/test/CIR/Lowering/class.cir @@ -44,7 +44,7 @@ module { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"class.S1", (i32, f32, ptr)> // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 // CHECK: %2 = llvm.insertvalue %1, %0[0] : !llvm.struct<"class.S1", (i32, f32, ptr)> - // CHECK: %3 = llvm.mlir.constant(0.099999994 : f32) : f32 + // CHECK: %3 = llvm.mlir.constant(1.000000e-01 : f32) : f32 // CHECK: %4 = llvm.insertvalue %3, %2[1] : !llvm.struct<"class.S1", (i32, f32, ptr)> // CHECK: %5 = llvm.mlir.zero : !llvm.ptr // CHECK: %6 = llvm.insertvalue %5, %4[2] : !llvm.struct<"class.S1", (i32, f32, ptr)> diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index a1a3d352c8a1..c89a58a9772e 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -44,7 +44,7 @@ module { // CHECK: %0 = llvm.mlir.undef : !llvm.struct<"struct.S1", (i32, f32, ptr)> // CHECK: %1 = llvm.mlir.constant(1 : i32) : i32 // CHECK: %2 = llvm.insertvalue %1, %0[0] : !llvm.struct<"struct.S1", (i32, f32, ptr)> - // CHECK: %3 = llvm.mlir.constant(0.099999994 : f32) : f32 + // CHECK: %3 = llvm.mlir.constant(1.000000e-01 : f32) : f32 // CHECK: %4 = llvm.insertvalue %3, %2[1] : !llvm.struct<"struct.S1", (i32, f32, ptr)> // CHECK: %5 = llvm.mlir.zero : !llvm.ptr // CHECK: %6 = llvm.insertvalue %5, %4[2] : !llvm.struct<"struct.S1", (i32, f32, ptr)> From abbf84708c11ab5e2c9f7e4b3f40ca4cee52e2bc Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 8 Nov 2024 13:44:02 -0500 Subject: [PATCH 2051/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vqmovn_v (#1071) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 10 +++ clang/test/CIR/CodeGen/AArch64/neon-misc.c | 72 +++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 5586c40c8e48..12616d43fc7f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2516,6 +2516,16 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( : "aarch64.neon.shadd"; break; } + + case NEON::BI__builtin_neon_vqmovn_v: { + intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.uqxtn" + : "aarch64.neon.sqxtn"; + argTypes.push_back(builder.getExtendedOrTruncatedElementVectorType( + vTy, true /* extended */, + mlir::cast(vTy.getEltType()).isSigned())); + break; + } + case NEON::BI__builtin_neon_vqmovun_v: { intrincsName = "aarch64.neon.sqxtun"; argTypes.push_back(builder.getExtendedOrTruncatedElementVectorType( diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index abbf2510b466..2869a95fe884 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -788,3 +788,75 @@ uint64x2_t test_vtstq_u64(uint64x2_t v1, uint64x2_t v2) { // LLVM: [[VTST_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64> // LLVM: ret <2 x i64> [[VTST_I]] } + +int8x8_t test_vqmovn_s16(int16x8_t a) { + return vqmovn_s16(a); + + // CIR-LABEL: vqmovn_s16 + // {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqxtn" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vqmovn_s16(<8 x i16>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[VQMOVN_V1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> [[A]]) + // LLVM: ret <8 x i8> [[VQMOVN_V1_I]] +} + +int16x4_t test_vqmovn_s32(int32x4_t a) { + return vqmovn_s32(a); + + // CIR-LABEL: vqmovn_s32 + // {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqxtn" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vqmovn_s32(<4 x i32>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[VQMOVN_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> [[A]]) + // LLVM: ret <4 x i16> [[VQMOVN_V1_I]] +} + +int32x2_t test_vqmovn_s64(int64x2_t a) { + return vqmovn_s64(a); + + // CIR-LABEL: vqmovn_s64 + // {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqxtn" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vqmovn_s64(<2 x i64>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VQMOVN_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> [[A]]) + // LLVM: ret <2 x i32> [[VQMOVN_V1_I]] +} + +uint8x8_t test_vqmovn_u16(uint16x8_t a) { + return vqmovn_u16(a); + + // CIR-LABEL: vqmovn_u16 + // {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqxtn" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vqmovn_u16(<8 x i16>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[VQMOVN_V1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> [[A]]) + // LLVM: ret <8 x i8> [[VQMOVN_V1_I]] +} + +uint16x4_t test_vqmovn_u32(uint32x4_t a) { + return vqmovn_u32(a); + + // CIR-LABEL: vqmovn_u32 + // {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqxtn" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vqmovn_u32(<4 x i32>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[VQMOVN_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> [[A]]) + // LLVM: ret <4 x i16> [[VQMOVN_V1_I]] +} + +uint32x2_t test_vqmovn_u64(uint64x2_t a) { + return vqmovn_u64(a); + + // CIR-LABEL: vqmovn_u64 + // {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqxtn" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vqmovn_u64(<2 x i64>{{.*}}[[A:%[a-z0-9]+]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VQMOVN_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> [[A]]) + // LLVM: ret <2 x i32> [[VQMOVN_V1_I]] +} From 7dd16786130965916c8142a9c5bc4c3131e45e5b Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 8 Nov 2024 13:44:27 -0500 Subject: [PATCH 2052/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vaesmcq_u8 (#1072) The test case is from [clang/test/CodeGen/neon-crypto.c](https://github.com/llvm/clangir/blob/dbf320e5c3db0410566ae561067c595308870bad/clang/test/CodeGen/neon-crypto.c#L28) Need a dedicated test file as RunOptions has crypto related target features. --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 6 ++++- clang/test/CIR/CodeGen/AArch64/neon-crypto.c | 25 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/AArch64/neon-crypto.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 12616d43fc7f..8d9f30eae085 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2474,7 +2474,11 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( default: llvm::errs() << getAArch64SIMDIntrinsicString(builtinID) << " "; llvm_unreachable("NYI"); - + case NEON::BI__builtin_neon_vaesmcq_u8: { + intrincsName = "aarch64.crypto.aesmc"; + argTypes.push_back(vTy); + break; + } case NEON::BI__builtin_neon_vpadd_v: case NEON::BI__builtin_neon_vpaddq_v: { intrincsName = mlir::isa(vTy.getEltType()) diff --git a/clang/test/CIR/CodeGen/AArch64/neon-crypto.c b/clang/test/CIR/CodeGen/AArch64/neon-crypto.c new file mode 100644 index 000000000000..7b2f7be0efa4 --- /dev/null +++ b/clang/test/CIR/CodeGen/AArch64/neon-crypto.c @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ +// RUN: -target-feature +sha2 -target-feature +aes \ +// RUN: -disable-O0-optnone -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ +// RUN: -target-feature +sha2 -target-feature +aes \ +// RUN: -disable-O0-optnone -emit-llvm -o - %s \ +// RUN: | opt -S -passes=mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// REQUIRES: aarch64-registered-target || arm-registered-target + +#include + +uint8x16_t test_vaesmcq_u8(uint8x16_t data) { + return vaesmcq_u8(data); + + // CIR-LABEL: vaesmcq_u8 + // {{%.*}} = cir.llvm.intrinsic "aarch64.crypto.aesmc" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}vaesmcq_u8(<16 x i8>{{.*}}[[DATA:%.*]]) + // LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> [[DATA]]) + // LLVM: ret <16 x i8> [[RES]] +} From d815f580bae37e8c2a0533eabc53f6322dcd28ec Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Fri, 8 Nov 2024 10:46:20 -0800 Subject: [PATCH 2053/2301] [CIR][CIRGen] Move CIRGen types into clang::CIRGen (#1082) https://github.com/llvm/clangir/issues/1025 explains why we want to move the CIR dialect from the `mlir::cir` to the `cir` namespace. To avoid overloading the `cir` namespace too much afterwards, move all symbols whose equivalents live inside the `clang::CodeGen` namespace to a new `clang::CIRGen` namespace, so that we match the original CodeGen's structure more closely. There's some symbols that live under `clang/include/clang/CIR` whose equivalents live in `clang/lib/CodeGen` and are in the `clang::CodeGen` namespace. We have these symbols in a common location since they're also used by lowering, so I've also left them in the `cir` namespace. Those symbols are: - AArch64ABIKind - ABIArgInfo - FnInfoOpts - TypeEvaluationKind - X86AVXABILevel This is a pretty large PR out of necessity. To make it slightly more reviewable, I've split it out into three commits (which will be squashed together when the PR lands): - The first commit manually switches places to the `clang::CIRGen` namespace. This has to be manual because we only want to move things selectively. - The second commit adjusts namespace prefixes to make builds work. I ran https://gist.github.com/smeenai/f4dd441fb61c53e835c4e6057f8c322f to make this change. The script is idempotent, and I added substitutions one at a time and reviewed each one afterwards (using `git diff --color-words=.`) to ensure only intended changes were being made. - The third commit runs `git clang-format`. Because I went one-by-one with all my substitutions and checked each one afterwards, I'm pretty confident in the validity of all the changes (despite the size of the PR). --- clang/include/clang/CIR/CIRGenerator.h | 8 +- clang/lib/CIR/CodeGen/ABIInfo.h | 5 +- clang/lib/CIR/CodeGen/Address.h | 4 +- clang/lib/CIR/CodeGen/CIRAsm.cpp | 20 +-- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 32 ++--- clang/lib/CIR/CodeGen/CIRGenBuilder.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 54 ++++---- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 40 +++--- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 16 +-- clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 16 +-- clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 6 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 78 +++++------ clang/lib/CIR/CodeGen/CIRGenCall.h | 4 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 58 ++++---- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 8 +- clang/lib/CIR/CodeGen/CIRGenCleanup.h | 4 +- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenCstEmitter.h | 4 +- clang/lib/CIR/CodeGen/CIRGenDebugInfo.h | 4 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 52 +++---- clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 12 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 130 +++++++++--------- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 42 +++--- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 24 ++-- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 18 +-- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 55 ++++---- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 126 ++++++++--------- clang/lib/CIR/CodeGen/CIRGenFunction.h | 26 ++-- clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h | 12 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 72 +++++----- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 86 ++++++------ clang/lib/CIR/CodeGen/CIRGenModule.h | 7 +- clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h | 4 +- clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp | 20 +-- clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h | 4 +- clang/lib/CIR/CodeGen/CIRGenPointerAuth.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 4 +- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 52 +++---- clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenTBAA.h | 4 +- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 4 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 12 +- clang/lib/CIR/CodeGen/CIRGenTypes.h | 10 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenVTables.h | 4 +- clang/lib/CIR/CodeGen/CIRGenValue.h | 4 +- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 4 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 6 +- clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp | 2 +- clang/lib/CIR/CodeGen/ConstantInitBuilder.h | 10 +- clang/lib/CIR/CodeGen/ConstantInitFuture.h | 17 +-- clang/lib/CIR/CodeGen/EHScopeStack.h | 4 +- clang/lib/CIR/CodeGen/TargetInfo.cpp | 86 ++++++------ clang/lib/CIR/CodeGen/TargetInfo.h | 4 +- 60 files changed, 671 insertions(+), 666 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 85fed2c926d8..78e6055ca993 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -34,12 +34,14 @@ namespace clang { class ASTContext; class DeclGroupRef; class FunctionDecl; -} // namespace clang -namespace cir { +namespace CIRGen { class CIRGenModule; class CIRGenTypes; +} // namespace CIRGen +} // namespace clang +namespace cir { class CIRGenerator : public clang::ASTConsumer { virtual void anchor(); clang::DiagnosticsEngine &Diags; @@ -70,7 +72,7 @@ class CIRGenerator : public clang::ASTConsumer { protected: std::unique_ptr mlirCtx; - std::unique_ptr CGM; + std::unique_ptr CGM; private: llvm::SmallVector DeferredInlineMemberFuncDefs; diff --git a/clang/lib/CIR/CodeGen/ABIInfo.h b/clang/lib/CIR/CodeGen/ABIInfo.h index a4cd7a5a666c..6ac37bb01350 100644 --- a/clang/lib/CIR/CodeGen/ABIInfo.h +++ b/clang/lib/CIR/CodeGen/ABIInfo.h @@ -11,9 +11,8 @@ #include "clang/AST/Type.h" -namespace cir { +namespace clang::CIRGen { -class ABIArgInfo; class CIRGenCXXABI; class CIRGenFunctionInfo; class CIRGenTypes; @@ -42,6 +41,6 @@ class ABIInfo { bool isPromotableIntegerTypeForABI(clang::QualType Ty) const; }; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 433aa5db6b89..db5878b2fec2 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -21,7 +21,7 @@ #include "mlir/IR/Value.h" -namespace cir { +namespace clang::CIRGen { // Indicates whether a pointer is known not to be null. enum KnownNonNull_t { NotKnownNonNull, KnownNonNull }; @@ -143,6 +143,6 @@ class Address { } }; -} // namespace cir +} // namespace clang::CIRGen #endif // LLVM_CLANG_LIB_CIR_ADDRESS_H diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 252f20b186b7..781a74c67a21 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -5,8 +5,8 @@ #include "TargetInfo.h" #include "clang/CIR/MissingFeatures.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; using namespace mlir::cir; static bool isAggregateType(mlir::Type typ) { @@ -285,7 +285,7 @@ static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, mlir::Type TruncTy = ResultTruncRegTypes[i]; if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) { - assert(!MissingFeatures::asmLLVMAssume()); + assert(!cir::MissingFeatures::asmLLVMAssume()); } // If the result type of the LLVM IR asm doesn't match the result type of @@ -311,7 +311,7 @@ static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, } else if (isa(TruncTy)) { Tmp = Builder.createIntCast(Tmp, TruncTy); } else if (false /*TruncTy->isVectorTy()*/) { - assert(!MissingFeatures::asmVectorType()); + assert(!cir::MissingFeatures::asmVectorType()); } } @@ -468,7 +468,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { } // Update largest vector width for any vector types. - assert(!MissingFeatures::asmVectorType()); + assert(!cir::MissingFeatures::asmVectorType()); } else { Address DestAddr = Dest.getAddress(); @@ -504,7 +504,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { Arg = builder.createBitcast(Arg, AdjTy); // Update largest vector width for any vector types. - assert(!MissingFeatures::asmVectorType()); + assert(!cir::MissingFeatures::asmVectorType()); // Only tie earlyclobber physregs. if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber())) @@ -521,7 +521,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX) // to the return value slot. Only do this when returning in registers. if (isa(&S)) { - const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); + const cir::ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); if (RetAI.isDirect() || RetAI.isExtend()) { // Make a fake lvalue for the return value slot. LValue ReturnSlot = makeAddrLValue(ReturnValue, FnRetTy); @@ -593,7 +593,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { << InputExpr->getType() << InputConstraint; // Update largest vector width for any vector types. - assert(!MissingFeatures::asmVectorType()); + assert(!cir::MissingFeatures::asmVectorType()); ArgTypes.push_back(Arg.getType()); ArgElemTypes.push_back(ArgElemType); @@ -636,11 +636,11 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { HasSideEffect, inferFlavor(CGM, S), mlir::ArrayAttr()); if (false /*IsGCCAsmGoto*/) { - assert(!MissingFeatures::asmGoto()); + assert(!cir::MissingFeatures::asmGoto()); } else if (HasUnwindClobber) { - assert(!MissingFeatures::asmUnwindClobber()); + assert(!cir::MissingFeatures::asmUnwindClobber()); } else { - assert(!MissingFeatures::asmMemoryEffects()); + assert(!cir::MissingFeatures::asmMemoryEffects()); mlir::Value result; if (IA.getNumResults()) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index ea887b8ff67c..6e1c05949a33 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -30,8 +30,8 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Value.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; namespace { class AtomicInfo { @@ -42,7 +42,7 @@ class AtomicInfo { uint64_t ValueSizeInBits; CharUnits AtomicAlign; CharUnits ValueAlign; - TypeEvaluationKind EvaluationKind; + cir::TypeEvaluationKind EvaluationKind; bool UseLibcall; LValue LVal; CIRGenBitFieldInfo BFI; @@ -51,7 +51,7 @@ class AtomicInfo { public: AtomicInfo(CIRGenFunction &CGF, LValue &lvalue, mlir::Location l) : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0), - EvaluationKind(TEK_Scalar), UseLibcall(true), loc(l) { + EvaluationKind(cir::TEK_Scalar), UseLibcall(true), loc(l) { assert(!lvalue.isGlobalReg()); ASTContext &C = CGF.getContext(); if (lvalue.isSimple()) { @@ -102,7 +102,7 @@ class AtomicInfo { CharUnits getAtomicAlignment() const { return AtomicAlign; } uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; } uint64_t getValueSizeInBits() const { return ValueSizeInBits; } - TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; } + cir::TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; } bool shouldUseLibcall() const { return UseLibcall; } const LValue &getAtomicLValue() const { return LVal; } mlir::Value getAtomicPointer() const { @@ -287,13 +287,13 @@ bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const { switch (getEvaluationKind()) { // For scalars and complexes, check whether the store size of the // type uses the full size. - case TEK_Scalar: + case cir::TEK_Scalar: return !isFullSizeType(CGF.CGM, ty, AtomicSizeInBits); - case TEK_Complex: + case cir::TEK_Complex: llvm_unreachable("NYI"); // Padding in structs has an undefined bit pattern. User beware. - case TEK_Aggregate: + case cir::TEK_Aggregate: return false; } llvm_unreachable("bad evaluation kind"); @@ -545,7 +545,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, mlir::Value IsWeak, mlir::Value FailureOrder, uint64_t Size, mlir::cir::MemOrder Order, uint8_t Scope) { - assert(!MissingFeatures::syncScopeID()); + assert(!cir::MissingFeatures::syncScopeID()); StringRef Op; auto &builder = CGF.getBuilder(); @@ -592,7 +592,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__scoped_atomic_load: { auto *load = builder.createLoad(loc, Ptr).getDefiningOp(); // FIXME(cir): add scope information. - assert(!MissingFeatures::syncScopeID()); + assert(!cir::MissingFeatures::syncScopeID()); load->setAttr("mem_order", orderAttr); if (E->isVolatile()) load->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); @@ -618,7 +618,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__scoped_atomic_store_n: { auto loadVal1 = builder.createLoad(loc, Val1); // FIXME(cir): add scope information. - assert(!MissingFeatures::syncScopeID()); + assert(!cir::MissingFeatures::syncScopeID()); builder.createStore(loc, loadVal1, Ptr, E->isVolatile(), /*alignment=*/mlir::IntegerAttr{}, orderAttr); return; @@ -791,7 +791,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, // LLVM atomic instructions always have synch scope. If clang atomic // expression has no scope operand, use default LLVM synch scope. if (!ScopeModel) { - assert(!MissingFeatures::syncScopeID()); + assert(!cir::MissingFeatures::syncScopeID()); buildAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, Order, /*FIXME(cir): LLVM default scope*/ 1); return; @@ -799,7 +799,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, // Handle constant scope. if (getConstOpIntAttr(Scope)) { - assert(!MissingFeatures::syncScopeID()); + assert(!cir::MissingFeatures::syncScopeID()); llvm_unreachable("NYI"); return; } @@ -1469,7 +1469,7 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, store.setIsVolatile(true); // DecorateInstructionWithTBAA - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); return; } @@ -1480,18 +1480,18 @@ void CIRGenFunction::buildAtomicInit(Expr *init, LValue dest) { AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange())); switch (atomics.getEvaluationKind()) { - case TEK_Scalar: { + case cir::TEK_Scalar: { mlir::Value value = buildScalarExpr(init); atomics.emitCopyIntoMemory(RValue::get(value)); return; } - case TEK_Complex: { + case cir::TEK_Complex: { llvm_unreachable("NYI"); return; } - case TEK_Aggregate: { + case cir::TEK_Aggregate: { // Fix up the destination if the initializer isn't an expression // of atomic type. llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp index 13ec20d8eda2..e9fd05a71633 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// #include "CIRGenBuilder.h" -namespace cir { +using namespace clang::CIRGen; mlir::Value CIRGenBuilderTy::maybeBuildArrayDecay(mlir::Location loc, mlir::Value arrayPtr, @@ -67,4 +67,3 @@ mlir::cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, return create(loc, intTy, mlir::cir::IntAttr::get(t, C)); } -} // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 22e3d56bd78b..d58de4cb5d76 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -43,14 +43,14 @@ #include #include -namespace cir { +namespace clang::CIRGen { class CIRGenFunction; -class CIRGenBuilderTy : public CIRBaseBuilderTy { +class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { const CIRGenTypeCache &typeCache; bool IsFPConstrained = false; - fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict; + cir::fp::ExceptionBehavior DefaultConstrainedExcept = cir::fp::ebStrict; llvm::RoundingMode DefaultConstrainedRounding = llvm::RoundingMode::Dynamic; llvm::StringMap GlobalsVersioning; @@ -96,10 +96,10 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } /// Set the exception handling to be used with constrained floating point - void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) { + void setDefaultConstrainedExcept(cir::fp::ExceptionBehavior NewExcept) { #ifndef NDEBUG std::optional ExceptStr = - convertExceptionBehaviorToStr(NewExcept); + cir::convertExceptionBehaviorToStr(NewExcept); assert(ExceptStr && "Garbage strict exception behavior!"); #endif DefaultConstrainedExcept = NewExcept; @@ -109,14 +109,14 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { void setDefaultConstrainedRounding(llvm::RoundingMode NewRounding) { #ifndef NDEBUG std::optional RoundingStr = - convertRoundingModeToStr(NewRounding); + cir::convertRoundingModeToStr(NewRounding); assert(RoundingStr && "Garbage strict rounding mode!"); #endif DefaultConstrainedRounding = NewRounding; } /// Get the exception handling used with constrained floating point - fp::ExceptionBehavior getDefaultConstrainedExcept() { + cir::fp::ExceptionBehavior getDefaultConstrainedExcept() { return DefaultConstrainedExcept; } @@ -422,7 +422,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special // type so it's a bit more clear and C++ idiomatic. auto fnTy = mlir::cir::FuncType::get({}, getUInt32Ty(), isVarArg); - assert(!MissingFeatures::isVarArg()); + assert(!cir::MissingFeatures::isVarArg()); return getPointerTo(getPointerTo(fnTy)); } @@ -657,30 +657,30 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { } mlir::Value createFSub(mlir::Value lhs, mlir::Value rhs) { - assert(!MissingFeatures::metaDataNode()); + assert(!cir::MissingFeatures::metaDataNode()); if (IsFPConstrained) llvm_unreachable("Constrained FP NYI"); - assert(!MissingFeatures::foldBinOpFMF()); + assert(!cir::MissingFeatures::foldBinOpFMF()); return create(lhs.getLoc(), mlir::cir::BinOpKind::Sub, lhs, rhs); } mlir::Value createFAdd(mlir::Value lhs, mlir::Value rhs) { - assert(!MissingFeatures::metaDataNode()); + assert(!cir::MissingFeatures::metaDataNode()); if (IsFPConstrained) llvm_unreachable("Constrained FP NYI"); - assert(!MissingFeatures::foldBinOpFMF()); + assert(!cir::MissingFeatures::foldBinOpFMF()); return create(lhs.getLoc(), mlir::cir::BinOpKind::Add, lhs, rhs); } mlir::Value createFMul(mlir::Value lhs, mlir::Value rhs) { - assert(!MissingFeatures::metaDataNode()); + assert(!cir::MissingFeatures::metaDataNode()); if (IsFPConstrained) llvm_unreachable("Constrained FP NYI"); - assert(!MissingFeatures::foldBinOpFMF()); + assert(!cir::MissingFeatures::foldBinOpFMF()); return create(lhs.getLoc(), mlir::cir::BinOpKind::Mul, lhs, rhs); } @@ -697,16 +697,16 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Value createDynCastToVoid(mlir::Location loc, mlir::Value src, bool vtableUseRelativeLayout) { // TODO(cir): consider address space here. - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); auto destTy = getVoidPtrTy(); return create( loc, destTy, mlir::cir::DynamicCastKind::ptr, src, mlir::cir::DynamicCastInfoAttr{}, vtableUseRelativeLayout); } - cir::Address createBaseClassAddr(mlir::Location loc, cir::Address addr, - mlir::Type destType, unsigned offset, - bool assumeNotNull) { + Address createBaseClassAddr(mlir::Location loc, Address addr, + mlir::Type destType, unsigned offset, + bool assumeNotNull) { if (destType == addr.getElementType()) return addr; @@ -716,9 +716,9 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { return Address(baseAddr, ptrTy, addr.getAlignment()); } - cir::Address createDerivedClassAddr(mlir::Location loc, cir::Address addr, - mlir::Type destType, unsigned offset, - bool assumeNotNull) { + Address createDerivedClassAddr(mlir::Location loc, Address addr, + mlir::Type destType, unsigned offset, + bool assumeNotNull) { if (destType == addr.getElementType()) return addr; @@ -833,8 +833,8 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { /// Cast the element type of the given address to a different type, /// preserving information like the alignment. - cir::Address createElementBitCast(mlir::Location loc, cir::Address addr, - mlir::Type destType) { + Address createElementBitCast(mlir::Location loc, Address addr, + mlir::Type destType) { if (destType == addr.getElementType()) return addr; @@ -869,7 +869,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value ptr, llvm::MaybeAlign align) { // TODO: make sure callsites shouldn't be really passing volatile. - assert(!MissingFeatures::volatileLoadOrStore()); + assert(!cir::MissingFeatures::volatileLoadOrStore()); return createAlignedLoad(loc, ty, ptr, align, /*isVolatile=*/false); } @@ -942,7 +942,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { // but currently some parts of Clang AST, which we don't want to touch just // yet, return them. void computeGlobalViewIndicesFromFlatOffset( - int64_t Offset, mlir::Type Ty, CIRDataLayout Layout, + int64_t Offset, mlir::Type Ty, cir::CIRDataLayout Layout, llvm::SmallVectorImpl &Indices) { if (!Offset) return; @@ -1046,7 +1046,7 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::cast(memberPtr.getType()); // TODO(cir): consider address space. - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); auto resultTy = getPointerTo(memberPtrTy.getMemberTy()); return create(loc, resultTy, objectPtr, @@ -1067,5 +1067,5 @@ class CIRGenBuilderTy : public CIRBaseBuilderTy { mlir::Type eltTy); }; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 00bc27ef6087..55ec6977410f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -34,8 +34,8 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "llvm/Support/ErrorHandling.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; using namespace mlir::cir; using namespace llvm; @@ -49,7 +49,7 @@ static RValue buildLibraryCall(CIRGenFunction &CGF, const FunctionDecl *FD, static mlir::Value tryUseTestFPKind(CIRGenFunction &CGF, unsigned BuiltinID, mlir::Value V) { if (CGF.getBuilder().getIsFPConstrained() && - CGF.getBuilder().getDefaultConstrainedExcept() != fp::ebIgnore) { + CGF.getBuilder().getDefaultConstrainedExcept() != cir::fp::ebIgnore) { if (mlir::Value Result = CGF.getTargetHooks().testFPKind( V, BuiltinID, CGF.getBuilder(), CGF.CGM)) return Result; @@ -509,7 +509,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_cosf16: case Builtin::BI__builtin_cosl: case Builtin::BI__builtin_cosf128: - assert(!MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIcosh: @@ -530,7 +530,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_expf16: case Builtin::BI__builtin_expl: case Builtin::BI__builtin_expf128: - assert(!MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIexp2: @@ -541,7 +541,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_exp2f16: case Builtin::BI__builtin_exp2l: case Builtin::BI__builtin_exp2f128: - assert(!MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BI__builtin_exp10: @@ -615,7 +615,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmod: case Builtin::BI__builtin_fmodf: case Builtin::BI__builtin_fmodl: - assert(!MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fastMathFlags()); return buildBinaryFPBuiltin(*this, *E); case Builtin::BI__builtin_fmodf16: @@ -631,7 +631,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_logf16: case Builtin::BI__builtin_logl: case Builtin::BI__builtin_logf128: - assert(!MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlog10: @@ -642,7 +642,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log10f16: case Builtin::BI__builtin_log10l: case Builtin::BI__builtin_log10f128: - assert(!MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlog2: @@ -653,7 +653,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log2f16: case Builtin::BI__builtin_log2l: case Builtin::BI__builtin_log2f128: - assert(!MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BInearbyint: @@ -671,7 +671,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_pow: case Builtin::BI__builtin_powf: case Builtin::BI__builtin_powl: - assert(!MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fastMathFlags()); return RValue::get( buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); @@ -717,7 +717,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sinf16: case Builtin::BI__builtin_sinl: case Builtin::BI__builtin_sinf128: - assert(!MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BIsqrt: @@ -728,7 +728,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sqrtf16: case Builtin::BI__builtin_sqrtl: case Builtin::BI__builtin_sqrtf128: - assert(!MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fastMathFlags()); return buildUnaryFPBuiltin(*this, *E); case Builtin::BI__builtin_elementwise_sqrt: @@ -974,7 +974,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_unpredictable: { if (CGM.getCodeGenOpts().OptimizationLevel != 0) - assert(!MissingFeatures::insertBuiltinUnpredictable()); + assert(!cir::MissingFeatures::insertBuiltinUnpredictable()); return RValue::get(buildScalarExpr(E->getArg(0))); } @@ -1373,7 +1373,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // default (e.g. in C / C++ auto vars are in the generic address space). At // the AST level this is handled within CreateTempAlloca et al., but for the // builtin / dynamic alloca we have to handle it here. - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); auto AAS = getCIRAllocaAddressSpace(); auto EAS = builder.getAddrSpaceAttr( E->getType()->getPointeeType().getAddressSpace()); @@ -2301,21 +2301,21 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force // ReturnValue to be non-null, so that the target-specific emission code can // always just emit into it. - TypeEvaluationKind EvalKind = getEvaluationKind(E->getType()); - if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) { + cir::TypeEvaluationKind EvalKind = getEvaluationKind(E->getType()); + if (EvalKind == cir::TEK_Aggregate && ReturnValue.isNull()) { llvm_unreachable("NYI"); } // Now see if we can emit a target-specific builtin. if (auto V = buildTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { switch (EvalKind) { - case TEK_Scalar: + case cir::TEK_Scalar: if (mlir::isa(V.getType())) return RValue::get(nullptr); return RValue::get(V); - case TEK_Aggregate: + case cir::TEK_Aggregate: llvm_unreachable("NYI"); - case TEK_Complex: + case cir::TEK_Complex: llvm_unreachable("No current target builtin returns complex"); } llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr"); @@ -2336,7 +2336,7 @@ mlir::Value CIRGenFunction::buildCheckedArgForBuiltin(const Expr *E, if (!SanOpts.has(SanitizerKind::Builtin)) return value; - assert(!MissingFeatures::sanitizerBuiltin()); + assert(!cir::MissingFeatures::sanitizerBuiltin()); llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 8d9f30eae085..11fa4e2f94a4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -36,8 +36,8 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/ErrorHandling.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; using namespace mlir::cir; using namespace llvm; @@ -2257,13 +2257,13 @@ mlir::Value buildNeonCall(CIRGenBuilderTy &builder, unsigned shift = 0, bool rightshift = false) { // TODO: Consider removing the following unreachable when we have // buildConstrainedFPCall feature implemented - assert(!MissingFeatures::buildConstrainedFPCall()); + assert(!cir::MissingFeatures::buildConstrainedFPCall()); if (isConstrainedFPIntrinsic) llvm_unreachable("isConstrainedFPIntrinsic NYI"); for (unsigned j = 0; j < argTypes.size(); ++j) { if (isConstrainedFPIntrinsic) { - assert(!MissingFeatures::buildConstrainedFPCall()); + assert(!cir::MissingFeatures::buildConstrainedFPCall()); } if (shift > 0 && shift == j) { args[j] = buildNeonShiftVector( @@ -2274,7 +2274,7 @@ mlir::Value buildNeonCall(CIRGenBuilderTy &builder, } } if (isConstrainedFPIntrinsic) { - assert(!MissingFeatures::buildConstrainedFPCall()); + assert(!cir::MissingFeatures::buildConstrainedFPCall()); return nullptr; } return builder @@ -2311,8 +2311,8 @@ buildCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, - llvm::SmallVectorImpl &ops, cir::Address ptrOp0, - cir::Address ptrOp1, llvm::Triple::ArchType arch) { + llvm::SmallVectorImpl &ops, Address ptrOp0, Address ptrOp1, + llvm::Triple::ArchType arch) { // Get the last argument, which specifies the vector type. const clang::Expr *arg = e->getArg(e->getNumArgs() - 1); std::optional neonTypeConst = @@ -3391,7 +3391,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vrnda_v: case NEON::BI__builtin_neon_vrndaq_v: { - assert(!MissingFeatures::buildConstrainedFPCall()); + assert(!cir::MissingFeatures::buildConstrainedFPCall()); return buildNeonCall(builder, {ty}, Ops, "round", ty, getLoc(E->getExprLoc())); } @@ -3742,7 +3742,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vld1_dup_v: case NEON::BI__builtin_neon_vld1q_dup_v: { - cir::Address ptrAddr = PtrOp0.withElementType(vTy.getEltType()); + Address ptrAddr = PtrOp0.withElementType(vTy.getEltType()); mlir::Value val = builder.createLoad(getLoc(E->getExprLoc()), ptrAddr); mlir::cir::VecSplatOp vecSplat = builder.create( getLoc(E->getExprLoc()), vTy, val); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp index d6e23a9f0a25..ffcaae1eb5b0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp @@ -27,11 +27,11 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/ErrorHandling.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; using namespace mlir::cir; mlir::Value CIRGenFunction::buildX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E) { llvm_unreachable("NYI"); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 2cf867482149..f77b61e51a56 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -22,7 +22,7 @@ #include using namespace clang; -using namespace cir; +using namespace clang::CIRGen; /// Try to emit a base destructor as an alias to its primary /// base-class destructor. @@ -39,7 +39,7 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { // an alias, unless this class owns no members. if (getCodeGenOpts().SanitizeMemoryUseAfterDtor && !D->getParent()->field_empty()) - assert(!MissingFeatures::sanitizeDtor()); + assert(!cir::MissingFeatures::sanitizeDtor()); // If the destructor doesn't have a trivial body, we have to emit it // separately. @@ -192,17 +192,17 @@ static void buildDeclInit(CIRGenFunction &CGF, const VarDecl *D, const Expr *Init = D->getInit(); switch (CIRGenFunction::getEvaluationKind(type)) { - case TEK_Aggregate: + case cir::TEK_Aggregate: CGF.buildAggExpr( Init, AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); return; - case TEK_Scalar: + case cir::TEK_Scalar: CGF.buildScalarInit(Init, CGF.getLoc(D->getLocation()), lv, false); return; - case TEK_Complex: + case cir::TEK_Complex: llvm_unreachable("complext evaluation NYI"); } } @@ -254,7 +254,7 @@ static void buildDeclDestroy(CIRGenFunction &CGF, const VarDecl *D) { if (Record && (CanRegisterDestructor || UsingExternalHelper)) { assert(!D->getTLSKind() && "TLS NYI"); assert(!Record->hasTrivialDestructor()); - assert(!MissingFeatures::openCLCXX()); + assert(!cir::MissingFeatures::openCLCXX()); CXXDestructorDecl *Dtor = Record->getDestructor(); // In LLVM OG codegen this is done in registerGlobalDtor, but CIRGen // relies on LoweringPrepare for further decoupling, so build the @@ -304,7 +304,7 @@ void CIRGenFunction::buildInvariantStart([[maybe_unused]] CharUnits Size) { if (!CGM.getCodeGenOpts().OptimizationLevel) return; - assert(!MissingFeatures::createInvariantIntrinsic()); + assert(!cir::MissingFeatures::createInvariantIntrinsic()); } void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, @@ -329,7 +329,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, // For example, in the above CUDA code, the static local variable s has a // "shared" address space qualifier, but the constructor of StructWithCtor // expects "this" in the "generic" address space. - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && varDecl->hasAttr()) { diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp index e4b38b99dbfd..8ee041423e00 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp @@ -18,8 +18,8 @@ #include "clang/AST/Mangle.h" #include "clang/AST/RecordLayout.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; CIRGenCXXABI::~CIRGenCXXABI() {} @@ -84,4 +84,4 @@ std::vector CIRGenCXXABI::getVBPtrOffsets(const CXXRecordDecl *RD) { bool CIRGenCXXABI::isZeroInitializable(const MemberPointerType *MPT) { // Fake answer. return true; -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 1b0dca9512d0..dae488656474 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -22,7 +22,7 @@ #include "mlir/IR/Attributes.h" #include "clang/AST/Mangle.h" -namespace cir { +namespace clang::CIRGen { class CIRGenFunction; class CIRGenFunctionInfo; @@ -30,7 +30,7 @@ class CIRGenFunctionInfo; /// Implements C++ ABI-specific code generation functions. class CIRGenCXXABI { protected: - cir::CIRGenModule &CGM; + CIRGenModule &CGM; std::unique_ptr MangleCtx; CIRGenCXXABI(CIRGenModule &CGM) @@ -354,6 +354,6 @@ class CIRGenCXXABI { /// Creates and Itanium-family ABI CIRGenCXXABI *CreateCIRGenItaniumCXXABI(CIRGenModule &CGM); -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 0c3234a6519e..ca4904d60693 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -35,8 +35,8 @@ #include "mlir/IR/Types.h" #include "clang/CIR/MissingFeatures.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; CIRGenFunctionInfo *CIRGenFunctionInfo::create( mlir::cir::CallingConv cirCC, bool instanceMethod, bool chainCall, @@ -139,16 +139,16 @@ void ClangToCIRArgMapping::construct(const ASTContext &Context, bool OnlyRequiredArgs) { unsigned CIRArgNo = 0; bool SwapThisWithSRet = false; - const ABIArgInfo &RetAI = FI.getReturnInfo(); + const cir::ABIArgInfo &RetAI = FI.getReturnInfo(); - assert(RetAI.getKind() != ABIArgInfo::Indirect && "NYI"); + assert(RetAI.getKind() != cir::ABIArgInfo::Indirect && "NYI"); unsigned ArgNo = 0; unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); for (CIRGenFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; ++I, ++ArgNo) { assert(I != FI.arg_end()); - const ABIArgInfo &AI = I->info; + const cir::ABIArgInfo &AI = I->info; // Collect data about CIR arguments corresponding to Clang argument ArgNo. auto &CIRArgs = ArgInfo[ArgNo]; @@ -157,15 +157,15 @@ void ClangToCIRArgMapping::construct(const ASTContext &Context, switch (AI.getKind()) { default: llvm_unreachable("NYI"); - case ABIArgInfo::Extend: - case ABIArgInfo::Direct: { + case cir::ABIArgInfo::Extend: + case cir::ABIArgInfo::Direct: { // Postpone splitting structs into elements since this makes it way // more complicated for analysis to obtain information on the original // arguments. // // TODO(cir): a LLVM lowering prepare pass should break this down into // the appropriated pieces. - assert(!MissingFeatures::constructABIArgDirectExtend()); + assert(!cir::MissingFeatures::constructABIArgDirectExtend()); CIRArgs.NumberOfArgs = 1; break; } @@ -206,16 +206,16 @@ mlir::cir::FuncType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { assert(Inserted && "Recursively being processed?"); mlir::Type resultType = nullptr; - const ABIArgInfo &retAI = FI.getReturnInfo(); + const cir::ABIArgInfo &retAI = FI.getReturnInfo(); switch (retAI.getKind()) { - case ABIArgInfo::Ignore: + case cir::ABIArgInfo::Ignore: // TODO(CIR): This should probably be the None type from the builtin // dialect. resultType = nullptr; break; - case ABIArgInfo::Extend: - case ABIArgInfo::Direct: + case cir::ABIArgInfo::Extend: + case cir::ABIArgInfo::Direct: resultType = retAI.getCoerceToType(); break; @@ -245,8 +245,8 @@ mlir::cir::FuncType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { switch (ArgInfo.getKind()) { default: llvm_unreachable("NYI"); - case ABIArgInfo::Extend: - case ABIArgInfo::Direct: { + case cir::ABIArgInfo::Extend: + case cir::ABIArgInfo::Direct: { mlir::Type argType = ArgInfo.getCoerceToType(); // TODO: handle the test against llvm::StructType from codegen assert(NumCIRArgs == 1); @@ -306,7 +306,7 @@ void CIRGenFunction::buildAggregateStore(mlir::Value Val, Address Dest, } static Address emitAddressAtOffset(CIRGenFunction &CGF, Address addr, - const ABIArgInfo &info) { + const cir::ABIArgInfo &info) { if (unsigned offset = info.getDirectOffset()) { llvm_unreachable("NYI"); } @@ -456,7 +456,7 @@ void CIRGenModule::constructAttributeList(StringRef Name, if (TargetDecl->hasAttr() && getLangOpts().OffloadUniformBlock) - assert(!MissingFeatures::CUDA()); + assert(!cir::MissingFeatures::CUDA()); if (TargetDecl->hasAttr()) ; @@ -592,7 +592,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // Some architectures (such as x86-64) have the ABI changed based on // attribute-target/features. Give them a chance to diagnose. - assert(!MissingFeatures::checkFunctionCallABI()); + assert(!cir::MissingFeatures::checkFunctionCallABI()); } // TODO: add DNEBUG code @@ -615,7 +615,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // When passing arguments using temporary allocas, we need to add the // appropriate lifetime markers. This vector keeps track of all the lifetime // markers that need to be ended right after the call. - assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); + assert(!cir::MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); // Translate all of the arguments as necessary to match the CIR lowering. assert(CallInfo.arg_size() == CallArgs.size() && @@ -624,7 +624,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, CIRGenFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); I != E; ++I, ++info_it, ++ArgNo) { - const ABIArgInfo &ArgInfo = info_it->info; + const cir::ABIArgInfo &ArgInfo = info_it->info; // Insert a padding argument to ensure proper alignment. assert(!CIRFunctionArgs.hasPaddingArg(ArgNo) && "Padding args NYI"); @@ -633,7 +633,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, std::tie(FirstCIRArg, NumCIRArgs) = CIRFunctionArgs.getCIRArgs(ArgNo); switch (ArgInfo.getKind()) { - case ABIArgInfo::Direct: { + case cir::ABIArgInfo::Direct: { if (!mlir::isa(ArgInfo.getCoerceToType()) && ArgInfo.getCoerceToType() == convertType(info_it->type) && ArgInfo.getDirectOffset() == 0) { @@ -866,11 +866,11 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // Extract the return value. RValue ret = [&] { switch (RetAI.getKind()) { - case ABIArgInfo::Direct: { + case cir::ABIArgInfo::Direct: { mlir::Type RetCIRTy = convertType(RetTy); if (RetAI.getCoerceToType() == RetCIRTy && RetAI.getDirectOffset() == 0) { switch (getEvaluationKind(RetTy)) { - case TEK_Aggregate: { + case cir::TEK_Aggregate: { Address DestPtr = ReturnValue.getValue(); bool DestIsVolatile = ReturnValue.isVolatile(); @@ -886,7 +886,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, buildAggregateStore(Results[0], DestPtr, DestIsVolatile); return RValue::getAggregate(DestPtr); } - case TEK_Scalar: { + case cir::TEK_Scalar: { // If the argument doesn't match, perform a bitcast to coerce it. This // can happen due to trivial type mismatches. auto Results = theCall->getOpResults(); @@ -902,7 +902,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, } } - case ABIArgInfo::Ignore: + case cir::ABIArgInfo::Ignore: // If we are ignoring an argument that had a result, make sure to // construct the appropriate return value for our caller. return GetUndefRValue(RetTy); @@ -928,7 +928,7 @@ mlir::Value CIRGenFunction::buildRuntimeCall(mlir::Location loc, mlir::cir::FuncOp callee, ArrayRef args) { // TODO(cir): set the calling convention to this runtime call. - assert(!MissingFeatures::setCallingConv()); + assert(!cir::MissingFeatures::setCallingConv()); auto call = builder.createCallOp(loc, callee, args); assert(call->getNumResults() <= 1 && @@ -1210,7 +1210,7 @@ CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { CanQualType resultType = Context.VoidTy; (void)resultType; - return arrangeCIRFunctionInfo(resultType, FnInfoOpts::IsInstanceMethod, + return arrangeCIRFunctionInfo(resultType, cir::FnInfoOpts::IsInstanceMethod, argTypes, extInfo, paramInfos, required); } @@ -1235,7 +1235,7 @@ CanQualType CIRGenTypes::DeriveThisType(const CXXRecordDecl *RD, /// Arrange the CIR function layout for a value of the given function type, on /// top of any implicit parameters already stored. static const CIRGenFunctionInfo & -arrangeCIRFunctionInfo(CIRGenTypes &CGT, FnInfoOpts instanceMethod, +arrangeCIRFunctionInfo(CIRGenTypes &CGT, cir::FnInfoOpts instanceMethod, SmallVectorImpl &prefix, CanQual FTP) { SmallVector paramInfos; @@ -1253,7 +1253,7 @@ arrangeCIRFunctionInfo(CIRGenTypes &CGT, FnInfoOpts instanceMethod, const CIRGenFunctionInfo & CIRGenTypes::arrangeFreeFunctionType(CanQual FTP) { SmallVector argTypes; - return ::arrangeCIRFunctionInfo(*this, FnInfoOpts::None, argTypes, FTP); + return ::arrangeCIRFunctionInfo(*this, cir::FnInfoOpts::None, argTypes, FTP); } /// Arrange the argument and result information for a value of the given @@ -1263,7 +1263,7 @@ CIRGenTypes::arrangeFreeFunctionType(CanQual FTNP) { // When translating an unprototyped function type, always use a // variadic type. return arrangeCIRFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), - FnInfoOpts::None, std::nullopt, + cir::FnInfoOpts::None, std::nullopt, FTNP->getExtInfo(), {}, RequiredArgs(0)); } @@ -1314,7 +1314,7 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXConstructorCall( // which never have param info. assert(!FPT->hasExtParameterInfos() && "NYI"); - return arrangeCIRFunctionInfo(ResultType, FnInfoOpts::IsInstanceMethod, + return arrangeCIRFunctionInfo(ResultType, cir::FnInfoOpts::IsInstanceMethod, ArgTypes, Info, ParamInfos, Required); } @@ -1396,9 +1396,9 @@ static const CIRGenFunctionInfo & arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, - FnInfoOpts chainCall) { + cir::FnInfoOpts chainCall) { assert(args.size() >= numExtraRequiredArgs); - assert((chainCall != FnInfoOpts::IsChainCall) && "Chain call NYI"); + assert((chainCall != cir::FnInfoOpts::IsChainCall) && "Chain call NYI"); llvm::SmallVector paramInfos; @@ -1415,7 +1415,7 @@ arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, args.size()); } else if (llvm::isa(fnType)) { - assert(!MissingFeatures::targetCodeGenInfoIsProtoCallVariadic()); + assert(!cir::MissingFeatures::targetCodeGenInfoIsProtoCallVariadic()); required = RequiredArgs(args.size()); } @@ -1465,8 +1465,8 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXMethodCall( auto info = proto->getExtInfo(); return arrangeCIRFunctionInfo(GetReturnType(proto->getReturnType()), - FnInfoOpts::IsInstanceMethod, argTypes, info, - paramInfos, required); + cir::FnInfoOpts::IsInstanceMethod, argTypes, + info, paramInfos, required); } /// Figure out the rules for calling a function with the given formal type using @@ -1477,7 +1477,7 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeFreeFunctionCall( assert(!ChainCall && "ChainCall NYI"); return arrangeFreeFunctionLikeCall( *this, CGM, args, fnType, ChainCall ? 1 : 0, - ChainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None); + ChainCall ? cir::FnInfoOpts::IsChainCall : cir::FnInfoOpts::None); } /// Set calling convention for CUDA/HIP kernel. @@ -1524,7 +1524,7 @@ CIRGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, argTypes.push_back(DeriveThisType(RD, MD)); return ::arrangeCIRFunctionInfo( - *this, FnInfoOpts::IsChainCall, argTypes, + *this, cir::FnInfoOpts::IsChainCall, argTypes, FTP->getCanonicalTypeUnqualified().getAs()); } @@ -1544,9 +1544,9 @@ CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { // When declaring a function without a prototype, always use a non-variadic // type. if (CanQual noProto = FTy.getAs()) { - return arrangeCIRFunctionInfo(noProto->getReturnType(), FnInfoOpts::None, - std::nullopt, noProto->getExtInfo(), {}, - RequiredArgs::All); + return arrangeCIRFunctionInfo(noProto->getReturnType(), + cir::FnInfoOpts::None, std::nullopt, + noProto->getExtInfo(), {}, RequiredArgs::All); } return arrangeFreeFunctionType(FTy.castAs()); diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index ea8e9e546352..16a61c9c537d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -25,7 +25,7 @@ #include "mlir/IR/BuiltinOps.h" -namespace cir { +namespace clang::CIRGen { class CIRGenFunction; /// Abstract information about a function or function prototype. @@ -290,6 +290,6 @@ class ReturnValueSlot { Address getAddress() const { return Addr; } }; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index bbcf98905684..7fb6a6a0645e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -20,7 +20,7 @@ #include "clang/CIR/MissingFeatures.h" using namespace clang; -using namespace cir; +using namespace clang::CIRGen; /// Checks whether the given constructor is a valid subject for the /// complete-to-base constructor delgation optimization, i.e. emitting the @@ -277,7 +277,7 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { if (!MemcpyableCtor) return false; - assert(!MissingFeatures::fieldMemcpyizerBuildMemcpy()); + assert(!cir::MissingFeatures::fieldMemcpyizerBuildMemcpy()); return false; } @@ -740,11 +740,11 @@ void CIRGenFunction::initializeVTablePointer(mlir::Location loc, // // vtable field is derived from `this` pointer, therefore they should be in // the same addr space. - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); VTableField = builder.createElementBitCast(loc, VTableField, VTableAddressPoint.getType()); builder.createStore(loc, VTableAddressPoint, VTableField); - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); } void CIRGenFunction::initializeVTablePointers(mlir::Location loc, @@ -851,17 +851,17 @@ void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init) { QualType FieldType = Field->getType(); switch (getEvaluationKind(FieldType)) { - case TEK_Scalar: + case cir::TEK_Scalar: if (LHS.isSimple()) { buildExprAsInit(Init, Field, LHS, false); } else { llvm_unreachable("NYI"); } break; - case TEK_Complex: + case cir::TEK_Complex: llvm_unreachable("NYI"); break; - case TEK_Aggregate: { + case cir::TEK_Aggregate: { AggValueSlot Slot = AggValueSlot::forLValue( LHS, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, getOverlapForFieldInit(Field), @@ -877,7 +877,7 @@ void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, // constructor. QualType::DestructionKind dtorKind = FieldType.isDestructedType(); (void)dtorKind; - if (MissingFeatures::cleanups()) + if (cir::MissingFeatures::cleanups()) llvm_unreachable("NYI"); } @@ -924,7 +924,7 @@ void CIRGenFunction::buildImplicitAssignmentOperatorBody( // LexicalScope Scope(*this, RootCS->getSourceRange()); // FIXME(cir): add all of the below under a new scope. - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); AssignmentMemcpyizer AM(*this, AssignOp, Args); for (auto *I : RootCS->body()) AM.emitAssignment(I); @@ -945,7 +945,7 @@ void CIRGenFunction::buildForwardingCallToLambda( QualType resultType = FPT->getReturnType(); ReturnValueSlot returnSlot; if (!resultType->isVoidType() && - calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && + calleeFnInfo.getReturnInfo().getKind() == cir::ABIArgInfo::Indirect && !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) { llvm_unreachable("NYI"); } @@ -1137,7 +1137,7 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { Stmt *Body = Dtor->getBody(); if (Body) - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); // The call to operator delete in a deleting destructor happens // outside of the function-try-block, which means it's always @@ -1162,7 +1162,7 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { llvm_unreachable("NYI"); // EnterCXXTryStmt(*cast(Body), true); } - if (MissingFeatures::emitAsanPrologueOrEpilogue()) + if (cir::MissingFeatures::emitAsanPrologueOrEpilogue()) llvm_unreachable("NYI"); // Enter the epilogue cleanups. @@ -1325,7 +1325,7 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && SanOpts.has(SanitizerKind::Memory) && ClassDecl->getNumVBases() && ClassDecl->isPolymorphic()) - assert(!MissingFeatures::sanitizeDtor()); + assert(!cir::MissingFeatures::sanitizeDtor()); // We push them in the forward order so that they'll be popped in // the reverse order. @@ -1337,7 +1337,7 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, // Under SanitizeMemoryUseAfterDtor, poison the trivial base class // memory. For non-trival base classes the same is done in the class // destructor. - assert(!MissingFeatures::sanitizeDtor()); + assert(!cir::MissingFeatures::sanitizeDtor()); } else { EHStack.pushCleanup(NormalAndEHCleanup, BaseClassDecl, /*BaseIsVirtual*/ true); @@ -1353,7 +1353,7 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && SanOpts.has(SanitizerKind::Memory) && !ClassDecl->getNumVBases() && ClassDecl->isPolymorphic()) - assert(!MissingFeatures::sanitizeDtor()); + assert(!cir::MissingFeatures::sanitizeDtor()); // Destroy non-virtual bases. for (const auto &Base : ClassDecl->bases()) { @@ -1366,7 +1366,7 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, if (BaseClassDecl->hasTrivialDestructor()) { if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && SanOpts.has(SanitizerKind::Memory) && !BaseClassDecl->isEmpty()) - assert(!MissingFeatures::sanitizeDtor()); + assert(!cir::MissingFeatures::sanitizeDtor()); } else { EHStack.pushCleanup(NormalAndEHCleanup, BaseClassDecl, /*BaseIsVirtual*/ false); @@ -1377,12 +1377,12 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, // invoked, and before the base class destructor runs, is invalid. bool SanitizeFields = CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && SanOpts.has(SanitizerKind::Memory); - assert(!MissingFeatures::sanitizeDtor()); + assert(!cir::MissingFeatures::sanitizeDtor()); // Destroy direct fields. for (const auto *Field : ClassDecl->fields()) { if (SanitizeFields) - assert(!MissingFeatures::sanitizeDtor()); + assert(!cir::MissingFeatures::sanitizeDtor()); QualType type = Field->getType(); QualType::DestructionKind dtorKind = type.isDestructedType(); @@ -1400,7 +1400,7 @@ void CIRGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, } if (SanitizeFields) - assert(!MissingFeatures::sanitizeDtor()); + assert(!cir::MissingFeatures::sanitizeDtor()); } namespace { @@ -1571,7 +1571,7 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, // Get the base pointer type. auto BaseValueTy = convertType((PathEnd[-1])->getType()); - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); // If there is no virtual base, use cir.base_class_addr. It takes care of // the adjustment and the null pointer check. @@ -1585,7 +1585,7 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, } if (sanitizePerformTypeCheck()) { - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); } // Conversion to a virtual base. cir.base_class_addr can't handle this. @@ -1651,11 +1651,11 @@ mlir::Value CIRGenFunction::getVTablePtr(mlir::Location Loc, Address This, const CXXRecordDecl *RD) { Address VTablePtrSrc = builder.createElementBitCast(Loc, This, VTableTy); auto VTable = builder.createLoad(Loc, VTablePtrSrc); - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); if (CGM.getCodeGenOpts().OptimizationLevel > 0 && CGM.getCodeGenOpts().StrictVTablePointers) { - assert(!MissingFeatures::createInvariantGroup()); + assert(!cir::MissingFeatures::createInvariantGroup()); } return VTable; @@ -1664,7 +1664,7 @@ mlir::Value CIRGenFunction::getVTablePtr(mlir::Location Loc, Address This, Address CIRGenFunction::buildCXXMemberDataPointerAddress( const Expr *E, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo) { - assert(!MissingFeatures::cxxABI()); + assert(!cir::MissingFeatures::cxxABI()); auto op = builder.createGetIndirectMember(getLoc(E->getSourceRange()), base.getPointer(), memberPtr); @@ -1896,7 +1896,7 @@ void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, // If this is a union copy constructor, we must emit a memcpy, because the AST // does not model that copy. if (isMemcpyEquivalentSpecialMember(D)) { - assert(!MissingFeatures::isMemcpyEquivalentSpecialMember()); + assert(!cir::MissingFeatures::isMemcpyEquivalentSpecialMember()); } const FunctionProtoType *FPT = D->getType()->castAs(); @@ -1928,10 +1928,10 @@ void CIRGenFunction::buildCXXConstructorCall( // In LLVM: do nothing. // In CIR: emit as a regular call, other later passes should lower the // ctor call into trivial initialization. - assert(!MissingFeatures::isTrivialCtorOrDtor()); + assert(!cir::MissingFeatures::isTrivialCtorOrDtor()); if (isMemcpyEquivalentSpecialMember(D)) { - assert(!MissingFeatures::isMemcpyEquivalentSpecialMember()); + assert(!cir::MissingFeatures::isMemcpyEquivalentSpecialMember()); } bool PassPrototypeArgs = true; @@ -2013,7 +2013,7 @@ void CIRGenFunction::buildInlinedInheritingCXXConstructorCall( llvm_unreachable("NYI"); InlinedInheritingConstructorScope Scope(*this, GD); // TODO(cir): ApplyInlineDebugLocation - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); RunCleanupsScope RunCleanups(*this); // Save the arguments to be passed to the inherited constructor. @@ -2039,4 +2039,4 @@ void CIRGenFunction::buildInlinedInheritingCXXConstructorCall( } llvm_unreachable("NYI"); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index f9fc641cee21..679d9a9399f7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -21,8 +21,8 @@ #include "CIRGenCleanup.h" #include "CIRGenFunction.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; using namespace mlir::cir; //===----------------------------------------------------------------------===// @@ -38,7 +38,7 @@ mlir::cir::BrOp CIRGenFunction::buildBranchThroughCleanup(mlir::Location Loc, // Remove this once we go for making sure unreachable code is // well modeled (or not). assert(builder.getInsertionBlock() && "not yet implemented"); - assert(!MissingFeatures::ehStack()); + assert(!cir::MissingFeatures::ehStack()); // Insert a branch: to the cleanup block (unsolved) or to the already // materialized label. Keep track of unsolved goto's. @@ -260,7 +260,7 @@ static void buildCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, // If there's an active flag, load it and skip the cleanup if it's // false. - cir::CIRGenBuilderTy &builder = CGF.getBuilder(); + CIRGenBuilderTy &builder = CGF.getBuilder(); mlir::Location loc = CGF.currSrcLoc ? *CGF.currSrcLoc : builder.getUnknownLoc(); @@ -502,7 +502,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // FIXME(cir): LLVM traditional codegen tries to simplify some of the // codegen here. Once we are further down with EH support revisit whether we // need to this during lowering. - assert(!MissingFeatures::simplifyCleanupEntry()); + assert(!cir::MissingFeatures::simplifyCleanupEntry()); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h index 76547ceebfe4..87fefe34e103 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -24,7 +24,7 @@ namespace clang { class FunctionDecl; } -namespace cir { +namespace clang::CIRGen { class CIRGenModule; class CIRGenFunction; @@ -665,6 +665,6 @@ struct EHPersonality { bool isMSVCXXPersonality() const { return this == &MSVC_CxxFrameHandler3; } }; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 7d31cd8cbb4e..eed9019fdbbb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -17,9 +17,9 @@ #include "llvm/ADT/ScopeExit.h" using namespace clang; -using namespace cir; +using namespace clang::CIRGen; -struct cir::CGCoroData { +struct clang::CIRGen::CGCoroData { // What is the current await expression kind and how many // await/yield expressions were encountered so far. // These are used to generate pretty labels for await expressions in LLVM IR. @@ -296,7 +296,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { // FIXME(cir): create a new scope to copy out the params? // LLVM create scope cleanups here, but might be due to the use // of many basic blocks? - assert(!MissingFeatures::generateDebugInfo() && "NYI"); + assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); ParamReferenceReplacerRAII ParamReplacer(LocalDeclMap); // Create mapping between parameters and copy-params for coroutine @@ -306,7 +306,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { "ParamMoves and FnArgs should be the same size for coroutine " "function"); // For zipping the arg map into debug info. - assert(!MissingFeatures::generateDebugInfo() && "NYI"); + assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); // Create parameter copies. We do it before creating a promise, since an // evolution of coroutine TS may allow promise constructor to observe @@ -347,7 +347,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { // FIXME(cir): wrap buildBodyAndFallthrough with try/catch bits. if (S.getExceptionHandler()) - assert(!MissingFeatures::unhandledException() && "NYI"); + assert(!cir::MissingFeatures::unhandledException() && "NYI"); if (buildBodyAndFallthrough(*this, S, S.getBody(), currLexScope).failed()) return mlir::failure(); diff --git a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h index d95529e50f4a..dd5700668c59 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h @@ -21,7 +21,7 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" -namespace cir { +namespace clang::CIRGen { class ConstantEmitter { public: @@ -162,6 +162,6 @@ class ConstantEmitter { mlir::Attribute validateAndPopAbstract(mlir::Attribute C, AbstractState save); }; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenDebugInfo.h b/clang/lib/CIR/CodeGen/CIRGenDebugInfo.h index 9aa503bf07e5..56a8d9946db1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDebugInfo.h +++ b/clang/lib/CIR/CodeGen/CIRGenDebugInfo.h @@ -13,8 +13,8 @@ #ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENDEBUGINFO_H #define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENDEBUGINFO_H -namespace cir { +namespace clang::CIRGen { class CIRGenDebugInfo {}; -} // namespace cir +} // namespace clang::CIRGen #endif // LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENDEBUGINFO_H diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index da420ce5e539..d68dceabbf22 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -29,8 +29,8 @@ #include "llvm/Support/ErrorHandling.h" #include -using namespace cir; using namespace clang; +using namespace clang::CIRGen; CIRGenFunction::AutoVarEmission CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, @@ -53,8 +53,8 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, if (Ty->isVariablyModifiedType()) buildVariablyModifiedType(Ty); - assert(!MissingFeatures::generateDebugInfo()); - assert(!MissingFeatures::cxxABI()); + assert(!cir::MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::cxxABI()); Address address = Address::invalid(); Address allocaAddr = Address::invalid(); @@ -152,7 +152,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, } // TODO: what about emitting lifetime markers for MSVC catch parameters? // TODO: something like @llvm.lifetime.start/end here? revisit this later. - assert(!MissingFeatures::shouldEmitLifetimeMarkers()); + assert(!cir::MissingFeatures::shouldEmitLifetimeMarkers()); } } else { // not openmp nor constant sized type bool VarAllocated = false; @@ -189,20 +189,20 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, // If we have debug info enabled, properly describe the VLA dimensions for // this type by registering the vla size expression for each of the // dimensions. - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); } emission.Addr = address; setAddrOfLocalVar(&D, emission.Addr); // Emit debug info for local var declaration. - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); if (D.hasAttr()) buildVarAnnotations(&D, address.emitRawPointer()); // TODO(cir): in LLVM this calls @llvm.lifetime.end. - assert(!MissingFeatures::shouldEmitLifetimeMarkers()); + assert(!cir::MissingFeatures::shouldEmitLifetimeMarkers()); return emission; } @@ -231,12 +231,12 @@ static void emitStoresForConstant(CIRGenModule &CGM, const VarDecl &D, uint64_t ConstantSize = layout.getTypeAllocSize(Ty); if (!ConstantSize) return; - assert(!MissingFeatures::addAutoInitAnnotation()); - assert(!MissingFeatures::vectorConstants()); - assert(!MissingFeatures::shouldUseBZeroPlusStoresToInitialize()); - assert(!MissingFeatures::shouldUseMemSetToInitialize()); - assert(!MissingFeatures::shouldSplitConstantStore()); - assert(!MissingFeatures::shouldCreateMemCpyFromGlobal()); + assert(!cir::MissingFeatures::addAutoInitAnnotation()); + assert(!cir::MissingFeatures::vectorConstants()); + assert(!cir::MissingFeatures::shouldUseBZeroPlusStoresToInitialize()); + assert(!cir::MissingFeatures::shouldUseMemSetToInitialize()); + assert(!cir::MissingFeatures::shouldSplitConstantStore()); + assert(!cir::MissingFeatures::shouldCreateMemCpyFromGlobal()); // In CIR we want to emit a store for the whole thing, later lowering // prepare to LLVM should unwrap this into the best policy (see asserts // above). @@ -284,7 +284,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { // Check whether this is a byref variable that's potentially // captured and moved by its own initializer. If so, we'll need to // emit the initializer first, then copy into the variable. - assert(!MissingFeatures::capturedByInit() && "NYI"); + assert(!cir::MissingFeatures::capturedByInit() && "NYI"); // Note: constexpr already initializes everything correctly. LangOptions::TrivialAutoVarInitKind trivialAutoVarInit = @@ -528,7 +528,7 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, // never defer them. assert(isa(DC) && "unexpected parent code decl"); } - if (GD.getDecl() && MissingFeatures::openMP()) { + if (GD.getDecl() && cir::MissingFeatures::openMP()) { // Disable emission of the parent function for the OpenMP device codegen. llvm_unreachable("OpenMP is NYI"); } @@ -585,9 +585,9 @@ mlir::cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( GV.setGlobalVisibilityAttr(OldGV.getGlobalVisibilityAttr()); GV.setInitialValueAttr(Init); GV.setTlsModelAttr(OldGV.getTlsModelAttr()); - assert(!MissingFeatures::setDSOLocal()); - assert(!MissingFeatures::setComdat()); - assert(!MissingFeatures::addressSpaceInGlobalVar()); + assert(!cir::MissingFeatures::setDSOLocal()); + assert(!cir::MissingFeatures::setComdat()); + assert(!cir::MissingFeatures::addressSpaceInGlobalVar()); // Normally this should be done with a call to CGM.replaceGlobal(OldGV, GV), // but since at this point the current block hasn't been really attached, @@ -692,7 +692,7 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment); CGM.setStaticLocalDeclAddress(&D, var); - assert(!MissingFeatures::reportGlobalToASan()); + assert(!cir::MissingFeatures::reportGlobalToASan()); // Emit global variable debug descriptor for static vars. auto *DI = getDebugInfo(); @@ -712,14 +712,14 @@ void CIRGenFunction::buildNullabilityCheck(LValue LHS, mlir::Value RHS, void CIRGenFunction::buildScalarInit(const Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit) { Qualifiers::ObjCLifetime lifetime = Qualifiers::ObjCLifetime::OCL_None; - assert(!MissingFeatures::objCLifetime()); + assert(!cir::MissingFeatures::objCLifetime()); if (!lifetime) { SourceLocRAIIObject Loc{*this, loc}; mlir::Value value = buildScalarExpr(init); if (capturedByInit) llvm_unreachable("NYI"); - assert(!MissingFeatures::emitNullabilityCheck()); + assert(!cir::MissingFeatures::emitNullabilityCheck()); buildStoreThroughLValue(RValue::get(value), lvalue, true); return; } @@ -743,10 +743,10 @@ void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, return; } switch (CIRGenFunction::getEvaluationKind(type)) { - case TEK_Scalar: + case cir::TEK_Scalar: buildScalarInit(init, getLoc(D->getSourceRange()), lvalue); return; - case TEK_Complex: { + case cir::TEK_Complex: { mlir::Value complex = buildComplexExpr(init); if (capturedByInit) llvm_unreachable("NYI"); @@ -754,7 +754,7 @@ void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, /*init*/ true); return; } - case TEK_Aggregate: + case cir::TEK_Aggregate: assert(!type->isAtomicType() && "NYI"); AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap; if (isa(D)) @@ -865,7 +865,7 @@ void CIRGenFunction::buildDecl(const Decl &D) { case Decl::Using: // using X; [C++] case Decl::UsingEnum: // using enum X; [C++] case Decl::UsingDirective: // using namespace X; [C++] - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); return; case Decl::UsingPack: assert(0 && "Not implemented"); @@ -891,7 +891,7 @@ void CIRGenFunction::buildDecl(const Decl &D) { case Decl::TypeAlias: { // using X = int; [C++0x] QualType Ty = cast(D).getUnderlyingType(); if (auto *DI = getDebugInfo()) - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); if (Ty->isVariablyModifiedType()) buildVariablyModifiedType(Ty); return; diff --git a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp index d50866853377..8e89095aecff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp @@ -17,8 +17,8 @@ #include "clang/Basic/LangOptions.h" using namespace clang; +using namespace clang::CIRGen; using namespace mlir::cir; -using namespace cir; void CIRGenModule::buildCXXGlobalInitFunc() { while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index df07da9cd8eb..b7b6ba81b907 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -29,8 +29,8 @@ #include "mlir/IR/Value.h" #include "llvm/Support/SaveAndRestore.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; const EHPersonality EHPersonality::GNU_C = {"__gcc_personality_v0", nullptr}; const EHPersonality EHPersonality::GNU_C_SJLJ = {"__gcc_personality_sj0", @@ -215,7 +215,7 @@ struct FreeException final : EHScopeStack::Cleanup { FreeException(mlir::Value exn) : exn(exn) {} void Emit(CIRGenFunction &CGF, Flags flags) override { // OG LLVM codegen emits a no unwind call, CIR emits an operation. - cir::CIRGenBuilderTy &builder = CGF.getBuilder(); + CIRGenBuilderTy &builder = CGF.getBuilder(); mlir::Location loc = CGF.currSrcLoc ? *CGF.currSrcLoc : builder.getUnknownLoc(); builder.create( @@ -435,7 +435,7 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, assert(typeValue && "fell into catch-all case!"); // Check for address space mismatch: if (typeValue->getType() != // argTy) - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); bool nextIsEnd = false; // If this is the last handler, we're at the end, and the next @@ -566,7 +566,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { CGM.getCXXABI().emitBeginCatch(*this, C); // Emit the PGO counter increment. - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); // Perform the body of the catch. (void)buildStmt(C->getHandlerBlock(), /*useCurrentScope=*/true); @@ -601,7 +601,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { llvm_unreachable("NYI"); } - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); } /// Check whether this is a non-EH scope, i.e. a scope which doesn't @@ -645,7 +645,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { if (!catches || catches.empty()) { // Save the current CIR generation state. mlir::OpBuilder::InsertionGuard guard(builder); - assert(!MissingFeatures::generateDebugInfo() && "NYI"); + assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); // Traditional LLVM codegen creates the lpad basic block, extract // values, landing pad instructions, etc. diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 32d476890b5c..30c8f5e33230 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -39,8 +39,8 @@ #include "mlir/IR/Operation.h" #include "mlir/IR/Value.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; using namespace mlir::cir; static mlir::cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, @@ -137,7 +137,7 @@ static Address buildPointerWithAlignment(const Expr *expr, CE->getSubExpr()->getType()->getAs()) { if (PtrTy->getPointeeType()->isVoidType()) break; - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); LValueBaseInfo innerBaseInfo; Address addr = cgf.buildPointerWithAlignment( @@ -146,7 +146,7 @@ static Address buildPointerWithAlignment(const Expr *expr, *baseInfo = innerBaseInfo; if (isa(CE)) { - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); LValueBaseInfo TargetTypeBaseInfo; CharUnits Align = cgf.CGM.getNaturalPointeeTypeAlignment( @@ -173,7 +173,7 @@ static Address buildPointerWithAlignment(const Expr *expr, addr = cgf.getBuilder().createElementBitCast( cgf.getLoc(expr->getSourceRange()), addr, ElemTy); if (CE->getCastKind() == CK_AddressSpaceConversion) { - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); llvm_unreachable("NYI"); } return addr; @@ -189,7 +189,7 @@ static Address buildPointerWithAlignment(const Expr *expr, // TODO: Support accesses to members of base classes in TBAA. For now, we // conservatively pretend that the complete object is of the base class // type. - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); Address Addr = cgf.buildPointerWithAlignment(CE->getSubExpr(), baseInfo); auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); return cgf.getAddressOfBaseClass( @@ -211,7 +211,7 @@ static Address buildPointerWithAlignment(const Expr *expr, LValue LV = cgf.buildLValue(UO->getSubExpr()); if (baseInfo) *baseInfo = LV.getBaseInfo(); - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); return LV.getAddress(); } } @@ -290,7 +290,7 @@ LValue CIRGenFunction::buildLValueForBitField(LValue base, QualType fieldType = field->getType().withCVRQualifiers(base.getVRQualifiers()); - assert(!MissingFeatures::tbaa() && "NYI TBAA for bit fields"); + assert(!cir::MissingFeatures::tbaa() && "NYI TBAA for bit fields"); LValueBaseInfo fieldBaseInfo(BaseInfo.getAlignmentSource()); return LValue::MakeBitfield(Addr, info, fieldType, fieldBaseInfo, TBAAAccessInfo()); @@ -309,15 +309,15 @@ LValue CIRGenFunction::buildLValueForField(LValue base, const RecordDecl *rec = field->getParent(); AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); - if (MissingFeatures::tbaa() || rec->hasAttr() || + if (cir::MissingFeatures::tbaa() || rec->hasAttr() || FieldType->isVectorType()) { - assert(!MissingFeatures::tbaa() && "NYI"); + assert(!cir::MissingFeatures::tbaa() && "NYI"); } else if (rec->isUnion()) { - assert(!MissingFeatures::tbaa() && "NYI"); + assert(!cir::MissingFeatures::tbaa() && "NYI"); } else { // If no base type been assigned for the base access, then try to generate // one for this base lvalue. - assert(!MissingFeatures::tbaa() && "NYI"); + assert(!cir::MissingFeatures::tbaa() && "NYI"); } Address addr = base.getAddress(); @@ -342,11 +342,11 @@ LValue CIRGenFunction::buildLValueForField(LValue base, hasAnyVptr(FieldType, getContext())) // Because unions can easily skip invariant.barriers, we need to add // a barrier every time CXXRecord field with vptr is referenced. - assert(!MissingFeatures::createInvariantGroup()); + assert(!cir::MissingFeatures::createInvariantGroup()); if (IsInPreservedAIRegion || (getDebugInfo() && rec->hasAttr())) { - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); } if (FieldType->isReferenceType()) @@ -368,7 +368,7 @@ LValue CIRGenFunction::buildLValueForField(LValue base, // If this is a reference field, load the reference right now. if (FieldType->isReferenceType()) { - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); LValue RefLVal = makeAddrLValue(addr, FieldType, FieldBaseInfo); if (RecordCVR & Qualifiers::Volatile) RefLVal.getQuals().addVolatile(); @@ -390,7 +390,7 @@ LValue CIRGenFunction::buildLValueForField(LValue base, if (field->hasAttr()) llvm_unreachable("NYI"); - if (MissingFeatures::tbaa()) + if (cir::MissingFeatures::tbaa()) // Next line should take a TBAA object llvm_unreachable("NYI"); LValue LV = makeAddrLValue(addr, FieldType, FieldBaseInfo); @@ -426,7 +426,7 @@ LValue CIRGenFunction::buildLValueForFieldInitialization( LValueBaseInfo BaseInfo = Base.getBaseInfo(); AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource)); - assert(!MissingFeatures::tbaa() && "NYI"); + assert(!cir::MissingFeatures::tbaa() && "NYI"); return makeAddrLValue(V, FieldType, FieldBaseInfo); } @@ -482,7 +482,7 @@ static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { bool IsPredefinedLibFunction = CGM.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID); bool HasAttributeNoBuiltin = false; - assert(!MissingFeatures::attributeNoBuiltin() && "NYI"); + assert(!cir::MissingFeatures::attributeNoBuiltin() && "NYI"); // bool HasAttributeNoBuiltin = // CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) || // CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins); @@ -641,7 +641,7 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, Address addr, llvm_unreachable("NYI"); } - if (MissingFeatures::tbaa()) + if (cir::MissingFeatures::tbaa()) llvm_unreachable("NYI"); } @@ -738,7 +738,7 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, auto field = builder.createGetBitfield(getLoc(Loc), resLTy, ptr.getPointer(), ptr.getElementType(), info, LV.isVolatile(), useVolatile); - assert(!MissingFeatures::emitScalarRangeCheck() && "NYI"); + assert(!cir::MissingFeatures::emitScalarRangeCheck() && "NYI"); return RValue::get(field); } @@ -919,7 +919,7 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, assert(0 && "NYI"); else LV = CGF.makeAddrLValue(Addr, T, AlignmentSource::Decl); - assert(!MissingFeatures::setObjCGCLValueClass() && "NYI"); + assert(!cir::MissingFeatures::setObjCGCLValueClass() && "NYI"); return LV; } @@ -975,12 +975,12 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { VD = VD->getCanonicalDecl(); if (auto *FD = LambdaCaptureFields.lookup(VD)) return buildCapturedFieldLValue(*this, FD, CXXABIThisValue); - assert(!MissingFeatures::CGCapturedStmtInfo() && "NYI"); + assert(!cir::MissingFeatures::CGCapturedStmtInfo() && "NYI"); // TODO[OpenMP]: Find the appropiate captured variable value and return // it. // TODO[OpenMP]: Set non-temporal information in the captured LVal. // LLVM codegen: - assert(!MissingFeatures::openMP()); + assert(!cir::MissingFeatures::openMP()); // Address addr = GetAddrOfBlockDecl(VD); // return MakeAddrLValue(addr, T, AlignmentSource::Decl); } @@ -1053,15 +1053,15 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { bool NonGCable = isLocalStorage && !VD->getType()->isReferenceType() && !isBlockByref; - if (NonGCable && MissingFeatures::setNonGC()) { + if (NonGCable && cir::MissingFeatures::setNonGC()) { llvm_unreachable("garbage collection is NYI"); } bool isImpreciseLifetime = (isLocalStorage && !VD->hasAttr()); - if (isImpreciseLifetime && MissingFeatures::ARC()) + if (isImpreciseLifetime && cir::MissingFeatures::ARC()) llvm_unreachable("imprecise lifetime is NYI"); - assert(!MissingFeatures::setObjCGCLValueClass()); + assert(!cir::MissingFeatures::setObjCGCLValueClass()); // Statics are defined as globals, so they are not include in the function's // symbol table. @@ -1076,7 +1076,7 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { // Emit debuginfo for the function declaration if the target wants to. if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); return LV; } @@ -1120,7 +1120,7 @@ CIRGenFunction::buildPointerToDataMemberBinaryExpr(const BinaryOperator *E) { LValueBaseInfo baseInfo; // TODO(cir): add TBAA - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); auto memberAddr = buildCXXMemberDataPointerAddress(E, baseAddr, memberPtr, memberPtrTy, &baseInfo); @@ -1138,7 +1138,7 @@ CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { // it. LValueBaseInfo BaseInfo; // TODO(cir): Support TBAA - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); Address Ptr = buildPointerWithAlignment(E->getBase(), &BaseInfo); const auto *PT = E->getBase()->getType()->castAs(); base = makeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo); @@ -1208,7 +1208,7 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { // evaluated first just in case the variable gets moved by the RHS. switch (CIRGenFunction::getEvaluationKind(E->getType())) { - case TEK_Scalar: { + case cir::TEK_Scalar: { assert(E->getLHS()->getType().getObjCLifetime() == clang::Qualifiers::ObjCLifetime::OCL_None && "not implemented"); @@ -1229,9 +1229,9 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { return LV; } - case TEK_Complex: + case cir::TEK_Complex: return buildComplexAssignmentLValue(E); - case TEK_Aggregate: + case cir::TEK_Aggregate: assert(0 && "not implemented"); } llvm_unreachable("bad evaluation kind"); @@ -1315,7 +1315,7 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { ? buildAddrOfRealComponent(Loc, LV.getAddress(), LV.getType()) : buildAddrOfImagComponent(Loc, LV.getAddress(), LV.getType())); // TODO(cir): TBAA info. - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); LValue ElemLV = makeAddrLValue(Component, T, LV.getBaseInfo()); ElemLV.getQuals().addQualifiers(LV.getQuals()); return ElemLV; @@ -1342,11 +1342,11 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { RValue CIRGenFunction::buildAnyExpr(const Expr *E, AggValueSlot aggSlot, bool ignoreResult) { switch (CIRGenFunction::getEvaluationKind(E->getType())) { - case TEK_Scalar: + case cir::TEK_Scalar: return RValue::get(buildScalarExpr(E)); - case TEK_Complex: + case cir::TEK_Complex: return RValue::getComplex(buildComplexExpr(E)); - case TEK_Aggregate: { + case cir::TEK_Aggregate: { if (!ignoreResult && aggSlot.isIgnored()) aggSlot = CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), getCounterAggTmpAsString()); @@ -1386,18 +1386,18 @@ RValue CIRGenFunction::GetUndefRValue(QualType ty) { return RValue::get(nullptr); switch (getEvaluationKind(ty)) { - case TEK_Complex: { + case cir::TEK_Complex: { llvm_unreachable("NYI"); } // If this is a use of an undefined aggregate type, the aggregate must have // an identifiable address. Just because the contents of the value are // undefined doesn't mean that the address can't be taken and compared. - case TEK_Aggregate: { + case cir::TEK_Aggregate: { llvm_unreachable("NYI"); } - case TEK_Scalar: + case cir::TEK_Scalar: llvm_unreachable("NYI"); } llvm_unreachable("bad evaluation kind"); @@ -1491,8 +1491,8 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, // Chain calls use the same code path to add the inviisble chain parameter to // the function type. if (isa(FnType) || Chain) { - assert(!MissingFeatures::chainCall()); - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::chainCall()); + assert(!cir::MissingFeatures::addressSpace()); auto CalleeTy = getTypes().GetFunctionType(FnInfo); // get non-variadic function type CalleeTy = mlir::cir::FuncType::get(CalleeTy.getInputs(), @@ -1570,7 +1570,7 @@ Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); if (BaseInfo) *BaseInfo = LV.getBaseInfo(); - assert(!MissingFeatures::tbaa() && "NYI"); + assert(!cir::MissingFeatures::tbaa() && "NYI"); mlir::Value ptr = CGM.getBuilder().maybeBuildArrayDecay( CGM.getLoc(E->getSourceRange()), Addr.getPointer(), @@ -1666,7 +1666,7 @@ buildArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, // TODO(cir): LLVM codegen emits in bound gep check here, is there anything // that would enhance tracking this later in CIR? if (inbounds) - assert(!MissingFeatures::emitCheckedInBoundsGEP() && "NYI"); + assert(!cir::MissingFeatures::emitCheckedInBoundsGEP() && "NYI"); return CGM.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx, shouldDecay); } @@ -1766,7 +1766,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, llvm_unreachable("extvector subscript is NYI"); } - assert(!MissingFeatures::tbaa() && "TBAA is NYI"); + assert(!cir::MissingFeatures::tbaa() && "TBAA is NYI"); LValueBaseInfo EltBaseInfo; Address Addr = Address::invalid(); if (const VariableArrayType *vla = @@ -1818,11 +1818,11 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, E->getBase()); EltBaseInfo = ArrayLV.getBaseInfo(); // TODO(cir): EltTBAAInfo - assert(!MissingFeatures::tbaa() && "TBAA is NYI"); + assert(!cir::MissingFeatures::tbaa() && "TBAA is NYI"); } else { // The base must be a pointer; emit it with an estimate of its alignment. // TODO(cir): EltTBAAInfo - assert(!MissingFeatures::tbaa() && "TBAA is NYI"); + assert(!cir::MissingFeatures::tbaa() && "TBAA is NYI"); Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); auto Idx = EmitIdxAfterBase(/*Promote*/ true); QualType ptrType = E->getBase()->getType(); @@ -1981,7 +1981,7 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { // TODO: Support accesses to members of base classes in TBAA. For now, we // conservatively pretend that the complete object is of the base class // type. - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); return makeAddrLValue(Base, E->getType(), LV.getBaseInfo()); } case CK_ToUnion: @@ -2000,7 +2000,7 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { auto DestAS = builder.getAddrSpaceAttr(E->getType().getAddressSpace()); mlir::Value V = getTargetHooks().performAddrSpaceCast( *this, LV.getPointer(), SrcAS, DestAS, ConvertType(DestTy)); - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); return makeAddrLValue(Address(V, getTypes().convertTypeForMem(E->getType()), LV.getAddress().getAlignment()), E->getType(), LV.getBaseInfo()); @@ -2101,7 +2101,7 @@ LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { NamedDecl *ND = E->getMemberDecl(); if (auto *Field = dyn_cast(ND)) { LValue LV = buildLValueForField(BaseLV, Field); - assert(!MissingFeatures::setObjCGCLValueClass() && "NYI"); + assert(!cir::MissingFeatures::setObjCGCLValueClass() && "NYI"); if (getLangOpts().OpenMP) { // If the member was explicitly marked as nontemporal, mark it as // nontemporal. If the base lvalue is marked as nontemporal, mark access @@ -2136,11 +2136,11 @@ void CIRGenFunction::buildAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInit) { // FIXME: This function should take an LValue as an argument. switch (getEvaluationKind(E->getType())) { - case TEK_Complex: + case cir::TEK_Complex: assert(0 && "NYI"); return; - case TEK_Aggregate: { + case cir::TEK_Aggregate: { buildAggExpr(E, AggValueSlot::forAddr(Location, Quals, AggValueSlot::IsDestructed_t(IsInit), AggValueSlot::DoesNotNeedGCBarriers, @@ -2149,7 +2149,7 @@ void CIRGenFunction::buildAnyExprToMem(const Expr *E, Address Location, return; } - case TEK_Scalar: { + case cir::TEK_Scalar: { RValue RV = RValue::get(buildScalarExpr(E)); LValue LV = makeAddrLValue(Location, E->getType()); buildStoreThroughLValue(RV, LV); @@ -2306,7 +2306,7 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( } else { switch (M->getStorageDuration()) { case SD_Automatic: - assert(!MissingFeatures::shouldEmitLifetimeMarkers()); + assert(!cir::MissingFeatures::shouldEmitLifetimeMarkers()); break; case SD_FullExpression: { @@ -2383,7 +2383,7 @@ std::optional HandleConditionalOperatorLValueSimpleCase( if (!CGF.ContainsLabel(Dead)) { // If the true case is live, we need to track its region. if (CondExprBool) { - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); } // If a throw expression we emit it and return an undefined lvalue // because it can't be used. @@ -2457,7 +2457,8 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, *this, loc, b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - assert(!MissingFeatures::incrementProfileCounter()); + assert( + !cir::MissingFeatures::incrementProfileCounter()); eval.begin(CGF); Info.LHS = BranchGenFunc(CGF, trueExpr); auto lhs = Info.LHS->getPointer(); @@ -2478,7 +2479,8 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, *this, loc, b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - assert(!MissingFeatures::incrementProfileCounter()); + assert( + !cir::MissingFeatures::incrementProfileCounter()); eval.begin(CGF); Info.RHS = BranchGenFunc(CGF, falseExpr); auto rhs = Info.RHS->getPointer(); @@ -2531,7 +2533,7 @@ LValue CIRGenFunction::buildConditionalOperatorLValue( AlignmentSource alignSource = std::max(Info.LHS->getBaseInfo().getAlignmentSource(), Info.RHS->getBaseInfo().getAlignmentSource()); - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource)); } else { llvm_unreachable("NYI"); @@ -2645,11 +2647,11 @@ RValue CIRGenFunction::convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc) { LValue lvalue = makeAddrLValue(addr, type, AlignmentSource::Decl); switch (getEvaluationKind(type)) { - case TEK_Complex: + case cir::TEK_Complex: llvm_unreachable("NYI"); - case TEK_Aggregate: + case cir::TEK_Aggregate: return lvalue.asAggregateRValue(); - case TEK_Scalar: + case cir::TEK_Scalar: return RValue::get(buildLoadOfScalar(lvalue, loc)); } llvm_unreachable("NYI"); @@ -2741,7 +2743,7 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, // This should be done in CIR prior to LLVM lowering, if we do now // we can make CIR based diagnostics misleading. // cir.ternary(!x, t, f) -> cir.ternary(x, f, t) - assert(!MissingFeatures::shouldReverseUnaryCondOnBoolExpr()); + assert(!cir::MissingFeatures::shouldReverseUnaryCondOnBoolExpr()); } if (const ConditionalOperator *CondOp = dyn_cast(cond)) { @@ -2777,7 +2779,7 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, // Don't bother if not optimizing because that metadata would not be used. auto *Call = dyn_cast(cond->IgnoreImpCasts()); if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { - assert(!MissingFeatures::insertBuiltinUnpredictable()); + assert(!cir::MissingFeatures::insertBuiltinUnpredictable()); } // Emit the code with the fully general case. @@ -2920,8 +2922,8 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, llvm_unreachable("NYI"); } - assert(!MissingFeatures::tbaa() && "NYI"); - assert(!MissingFeatures::emitScalarRangeCheck() && "NYI"); + assert(!cir::MissingFeatures::tbaa() && "NYI"); + assert(!cir::MissingFeatures::emitScalarRangeCheck() && "NYI"); return buildFromMemory(Load, ty); } @@ -2974,7 +2976,7 @@ Address CIRGenFunction::buildLoadOfReference(LValue refLVal, mlir::Location loc, refLVal.getAddress().getPointer()); // TODO(cir): DecorateInstructionWithTBAA relevant for us? - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); QualType pointeeType = refLVal.getType()->getPointeeType(); CharUnits align = diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index e7456dae5530..d8bd131c278d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -26,8 +26,8 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; namespace { @@ -208,7 +208,7 @@ class AggExprEmitter : public StmtVisitor { void VisitCallExpr(const CallExpr *E); void VisitStmtExpr(const StmtExpr *E) { - assert(!MissingFeatures::stmtExprEvaluation() && "NYI"); + assert(!cir::MissingFeatures::stmtExprEvaluation() && "NYI"); CGF.buildCompoundStmt(*E->getSubStmt(), /*getLast=*/true, Dest); } @@ -235,7 +235,7 @@ class AggExprEmitter : public StmtVisitor { // do an atomic copy. if (lhs.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(lhs)) { - assert(!MissingFeatures::atomicTypes()); + assert(!cir::MissingFeatures::atomicTypes()); return; } @@ -246,7 +246,7 @@ class AggExprEmitter : public StmtVisitor { // A non-volatile aggregate destination might have volatile member. if (!lhsSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType())) - assert(!MissingFeatures::atomicTypes()); + assert(!cir::MissingFeatures::atomicTypes()); CGF.buildAggExpr(E->getRHS(), lhsSlot); @@ -386,7 +386,7 @@ void AggExprEmitter::buildAggLoadOfLValue(const Expr *E) { // If the type of the l-value is atomic, then do an atomic load. if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV) || - MissingFeatures::atomicTypes()) + cir::MissingFeatures::atomicTypes()) llvm_unreachable("atomic load is NYI"); buildFinalDestCopy(E->getType(), LV); @@ -411,7 +411,7 @@ void AggExprEmitter::buildFinalDestCopy(QualType type, const LValue &src, // Copy non-trivial C structs here. if (Dest.isVolatile()) - assert(!MissingFeatures::volatileTypes()); + assert(!cir::MissingFeatures::volatileTypes()); if (SrcValueKind == EVK_RValue) { if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { @@ -594,7 +594,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, [&](mlir::OpBuilder &b, mlir::Location loc) { auto currentElement = builder.createLoad(loc, tmpAddr); - if (MissingFeatures::cleanups()) + if (cir::MissingFeatures::cleanups()) llvm_unreachable("NYI"); // Emit the actual filler expression. @@ -831,17 +831,17 @@ void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { } switch (CGF.getEvaluationKind(type)) { - case TEK_Complex: + case cir::TEK_Complex: llvm_unreachable("NYI"); return; - case TEK_Aggregate: + case cir::TEK_Aggregate: CGF.buildAggExpr( E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, AggValueSlot::MayOverlap, Dest.isZeroed())); return; - case TEK_Scalar: + case cir::TEK_Scalar: if (LV.isSimple()) { CGF.buildScalarInit(E, CGF.getLoc(E->getSourceRange()), LV); } else { @@ -887,7 +887,7 @@ void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { } void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { - if (MissingFeatures::cleanups()) + if (cir::MissingFeatures::cleanups()) llvm_unreachable("NYI"); auto &builder = CGF.getBuilder(); @@ -1002,7 +1002,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { // If we're loading from a volatile type, force the destination // into existence. if (E->getSubExpr()->getType().isVolatileQualified() || - MissingFeatures::volatileTypes()) { + cir::MissingFeatures::volatileTypes()) { bool Destruct = !Dest.isExternallyDestructed() && E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; @@ -1119,14 +1119,14 @@ void AggExprEmitter::withReturnValueSlot( (RequiresDestruction && !Dest.getAddress().isValid()); Address RetAddr = Address::invalid(); - assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); + assert(!cir::MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); if (!UseTemp) { RetAddr = Dest.getAddress(); } else { RetAddr = CGF.CreateMemTemp(RetTy, CGF.getLoc(E->getSourceRange()), "tmp", &RetAddr); - assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); + assert(!cir::MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); } RValue Src = @@ -1143,7 +1143,7 @@ void AggExprEmitter::withReturnValueSlot( // If there's no dtor to run, the copy was the last use of our temporary. // Since we're not guaranteed to be in an ExprWithCleanups, clean up // eagerly. - assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); + assert(!cir::MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); } } @@ -1345,7 +1345,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( LValue LV = CGF.buildLValueForFieldInitialization(DestLV, field, field->getName()); // We never generate write-barries for initialized fields. - assert(!MissingFeatures::setNonGC()); + assert(!cir::MissingFeatures::setNonGC()); if (curInitIndex < NumInitElements) { // Store the initializer into the field. @@ -1401,7 +1401,7 @@ void AggExprEmitter::VisitAbstractConditionalOperator( // Bind the common expression if necessary. CIRGenFunction::OpaqueValueMapping binding(CGF, E); CIRGenFunction::ConditionalEvaluation eval(CGF); - assert(!MissingFeatures::getProfileCount()); + assert(!cir::MissingFeatures::getProfileCount()); // Save whether the destination's lifetime is externally managed. bool isExternallyDestructed = Dest.isExternallyDestructed(); @@ -1418,7 +1418,7 @@ void AggExprEmitter::VisitAbstractConditionalOperator( CIRGenFunction::LexicalScope lexScope{CGF, loc, builder.getInsertionBlock()}; Dest.setExternallyDestructed(isExternallyDestructed); - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); Visit(E->getTrueExpr()); } eval.end(CGF); @@ -1435,7 +1435,7 @@ void AggExprEmitter::VisitAbstractConditionalOperator( // with us, and we can safely emit the RHS into the same slot, but // we shouldn't claim that it's already being destructed. Dest.setExternallyDestructed(isExternallyDestructed); - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); Visit(E->getFalseExpr()); } eval.end(CGF); @@ -1444,7 +1444,7 @@ void AggExprEmitter::VisitAbstractConditionalOperator( if (destructNonTrivialCStruct) llvm_unreachable("NYI"); - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); } void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { @@ -1715,7 +1715,7 @@ void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, // Determine the metadata to describe the position of any padding in this // memcpy, as well as the TBAA tags for the members of the struct, in case // the optimizer wishes to expand it in to scalar memory operations. - if (CGM.getCodeGenOpts().NewStructPathTBAA || MissingFeatures::tbaa()) + if (CGM.getCodeGenOpts().NewStructPathTBAA || cir::MissingFeatures::tbaa()) llvm_unreachable("TBAA is NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 2aa2f283ab71..6d5a20cbd4ea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -19,8 +19,8 @@ #include -using namespace cir; using namespace clang; +using namespace clang::CIRGen; namespace { struct MemberCallInfo { @@ -183,7 +183,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( // one or the one of the full expression, we would have to build // a derived-to-base cast to compute the correct this pointer, but // we don't have support for that yet, so do a virtual call. - assert(!MissingFeatures::buildDerivedToBaseCastForDevirt()); + assert(!cir::MissingFeatures::buildDerivedToBaseCastForDevirt()); DevirtualizedMethod = nullptr; } } @@ -218,7 +218,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( LValue This; if (IsArrow) { LValueBaseInfo BaseInfo; - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); Address ThisValue = buildPointerWithAlignment(Base, &BaseInfo); This = makeAddrLValue(ThisValue, Base->getType(), BaseInfo); } else { @@ -284,7 +284,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( SkippedChecks.set(SanitizerKind::Null, true); } - if (MissingFeatures::buildTypeCheck()) + if (cir::MissingFeatures::buildTypeCheck()) llvm_unreachable("NYI"); // C++ [class.virtual]p12: @@ -726,14 +726,14 @@ static void StoreAnyExprIntoOneUnit(CIRGenFunction &CGF, const Expr *Init, AggValueSlot::Overlap_t MayOverlap) { // FIXME: Refactor with buildExprAsInit. switch (CGF.getEvaluationKind(AllocType)) { - case TEK_Scalar: + case cir::TEK_Scalar: CGF.buildScalarInit(Init, CGF.getLoc(Init->getSourceRange()), CGF.makeAddrLValue(NewPtr, AllocType), false); return; - case TEK_Complex: + case cir::TEK_Complex: llvm_unreachable("NYI"); return; - case TEK_Aggregate: { + case cir::TEK_Aggregate: { AggValueSlot Slot = AggValueSlot::forAddr( NewPtr, AllocType.getQualifiers(), AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, @@ -750,7 +750,7 @@ static void buildNewInitializer(CIRGenFunction &CGF, const CXXNewExpr *E, QualType ElementType, mlir::Type ElementTy, Address NewPtr, mlir::Value NumElements, mlir::Value AllocSizeWithoutCookie) { - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); if (E->isArray()) { llvm_unreachable("NYI"); } else if (const Expr *Init = E->getInitializer()) { @@ -872,7 +872,7 @@ static bool EmitObjectDelete(CIRGenFunction &CGF, const CXXDeleteExpr *DE, // In traditional LLVM codegen null checks are emitted to save a delete call. // In CIR we optimize for size by default, the null check should be added into // this function callers. - assert(!MissingFeatures::emitNullCheckForDeleteCalls()); + assert(!cir::MissingFeatures::emitNullCheckForDeleteCalls()); CGF.PopCleanupBlock(); return false; @@ -892,7 +892,7 @@ void CIRGenFunction::buildCXXDeleteExpr(const CXXDeleteExpr *E) { // // CIR note: emit the code size friendly by default for now, such as mentioned // in `EmitObjectDelete`. - assert(!MissingFeatures::emitNullCheckForDeleteCalls()); + assert(!cir::MissingFeatures::emitNullCheckForDeleteCalls()); QualType DeleteTy = E->getDestroyedType(); // A destroying operator delete overrides the entire operation of the @@ -998,7 +998,7 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { buildNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); // Set !heapallocsite metadata on the call to operator new. - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); // If this was a call to a global replaceable allocation function that does // not take an alignment argument, the allocator is known to produce storage @@ -1198,7 +1198,7 @@ static RValue buildNewDeleteCall(CIRGenFunction &CGF, /// to a replaceable global allocation function. /// /// We model such elidable calls with the 'builtin' attribute. - assert(!MissingFeatures::attributeBuiltin()); + assert(!cir::MissingFeatures::attributeBuiltin()); return RV; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 7472b039649b..2113539bda98 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -10,8 +10,8 @@ #include "clang/AST/StmtVisitor.h" #include "llvm/Support/ErrorHandling.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; namespace { @@ -54,7 +54,7 @@ class ComplexExprEmitter : public StmtVisitor { //===--------------------------------------------------------------------===// mlir::Value Visit(Expr *E) { - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); return StmtVisitor::Visit(E); } @@ -489,7 +489,7 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, case CK_FloatingRealToComplex: case CK_IntegralRealToComplex: { - assert(!MissingFeatures::CGFPOptionsRAII()); + assert(!cir::MissingFeatures::CGFPOptionsRAII()); return buildScalarToComplexCast(CGF.buildScalarExpr(Op), Op->getType(), DestTy, Op->getExprLoc()); } @@ -498,7 +498,7 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, case CK_FloatingComplexToIntegralComplex: case CK_IntegralComplexCast: case CK_IntegralComplexToFloatingComplex: { - assert(!MissingFeatures::CGFPOptionsRAII()); + assert(!cir::MissingFeatures::CGFPOptionsRAII()); return buildComplexToComplexCast(Visit(Op), Op->getType(), DestTy, Op->getExprLoc()); } @@ -657,7 +657,7 @@ LValue ComplexExprEmitter::buildCompoundAssignLValue( BinOpInfo OpInfo{CGF.getLoc(E->getExprLoc())}; OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); - assert(!MissingFeatures::CGFPOptionsRAII()); + assert(!cir::MissingFeatures::CGFPOptionsRAII()); // Load the RHS and LHS operands. // __block variables need to have the rhs evaluated first, plus this should @@ -771,12 +771,12 @@ mlir::Value ComplexExprEmitter::buildCompoundAssign( } mlir::Value ComplexExprEmitter::buildBinAdd(const BinOpInfo &Op) { - assert(!MissingFeatures::CGFPOptionsRAII()); + assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexAdd(Op.Loc, Op.LHS, Op.RHS); } mlir::Value ComplexExprEmitter::buildBinSub(const BinOpInfo &Op) { - assert(!MissingFeatures::CGFPOptionsRAII()); + assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexSub(Op.Loc, Op.LHS, Op.RHS); } @@ -797,14 +797,14 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) { } mlir::Value ComplexExprEmitter::buildBinMul(const BinOpInfo &Op) { - assert(!MissingFeatures::CGFPOptionsRAII()); + assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexMul( Op.Loc, Op.LHS, Op.RHS, getComplexRangeAttr(Op.FPFeatures.getComplexRange()), FPHasBeenPromoted); } mlir::Value ComplexExprEmitter::buildBinDiv(const BinOpInfo &Op) { - assert(!MissingFeatures::CGFPOptionsRAII()); + assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexDiv( Op.Loc, Op.LHS, Op.RHS, getComplexRangeAttr(Op.FPFeatures.getComplexRange()), FPHasBeenPromoted); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index f621035d02a6..ce27635e3f44 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -34,7 +34,7 @@ #include using namespace clang; -using namespace cir; +using namespace clang::CIRGen; //===----------------------------------------------------------------------===// // ConstantAggregateBuilder @@ -51,7 +51,7 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, struct ConstantAggregateBuilderUtils { CIRGenModule &CGM; - CIRDataLayout dataLayout; + cir::CIRDataLayout dataLayout; ConstantAggregateBuilderUtils(CIRGenModule &CGM) : CGM(CGM), dataLayout{CGM.getModule()} {} @@ -1295,7 +1295,7 @@ class ConstantLValueEmitter /// Return GEP-like value offset mlir::ArrayAttr getOffset(mlir::Type Ty) { auto Offset = Value.getLValueOffset().getQuantity(); - CIRDataLayout Layout(CGM.getModule()); + cir::CIRDataLayout Layout(CGM.getModule()); SmallVector Idx; CGM.getBuilder().computeGlobalViewIndicesFromFlatOffset(Offset, Ty, Layout, Idx); @@ -1872,7 +1872,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, Desired, mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Elts)); } case APValue::MemberPointer: { - assert(!MissingFeatures::cxxABI()); + assert(!cir::MissingFeatures::cxxABI()); const ValueDecl *memberDecl = Value.getMemberPointerDecl(); assert(!Value.isMemberPointerToDerivedMember() && "NYI"); @@ -1924,7 +1924,7 @@ mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { } mlir::Value CIRGenModule::buildMemberPointerConstant(const UnaryOperator *E) { - assert(!MissingFeatures::cxxABI()); + assert(!cir::MissingFeatures::cxxABI()); auto loc = getLoc(E->getSourceRange()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 560b0be47b4c..c13d95e69cc4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -29,8 +29,8 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Value.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; namespace { @@ -60,7 +60,7 @@ struct BinOpInfo { return true; llvm::APInt Result; - assert(!MissingFeatures::mayHaveIntegerOverflow()); + assert(!cir::MissingFeatures::mayHaveIntegerOverflow()); llvm_unreachable("NYI"); return false; } @@ -284,7 +284,7 @@ class ScalarExprEmitter : public StmtVisitor { // Do we need anything like TestAndClearIgnoreResultAssign()? if (E->getBase()->getType()->isVectorType()) { - assert(!MissingFeatures::scalableVectors() && + assert(!cir::MissingFeatures::scalableVectors() && "NYI: index into scalable vector"); // Subscript of vector type. This is handled differently, with a custom // operation. @@ -360,7 +360,7 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitCallExpr(const CallExpr *E); mlir::Value VisitStmtExpr(StmtExpr *E) { - assert(!MissingFeatures::stmtExprEvaluation() && "NYI"); + assert(!cir::MissingFeatures::stmtExprEvaluation() && "NYI"); Address retAlloca = CGF.buildCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType()); if (!retAlloca.isValid()) @@ -493,14 +493,14 @@ class ScalarExprEmitter : public StmtVisitor { } else { value = builder.create(loc, value.getType(), value, amt); - assert(!MissingFeatures::emitCheckedInBoundsGEP()); + assert(!cir::MissingFeatures::emitCheckedInBoundsGEP()); } } } else if (type->isVectorType()) { llvm_unreachable("no vector inc/dec yet"); } else if (type->isRealFloatingType()) { // TODO(cir): CGFPOptionsRAII - assert(!MissingFeatures::CGFPOptionsRAII()); + assert(!cir::MissingFeatures::CGFPOptionsRAII()); if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { @@ -848,7 +848,7 @@ class ScalarExprEmitter : public StmtVisitor { Result.Opcode = E->getOpcode(); Result.Loc = E->getSourceRange(); // TODO: Result.FPFeatures - assert(!MissingFeatures::getFPFeaturesInEffect()); + assert(!cir::MissingFeatures::getFPFeaturesInEffect()); Result.E = E; return Result; } @@ -1499,7 +1499,7 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { // LLVM we shall take VLA's, division by element size, etc. // // See more in `EmitSub` in CGExprScalar.cpp. - assert(!MissingFeatures::llvmLoweringPtrDiffConsidersPointee()); + assert(!cir::MissingFeatures::llvmLoweringPtrDiffConsidersPointee()); return Builder.create(CGF.getLoc(Ops.Loc), CGF.PtrDiffTy, Ops.LHS, Ops.RHS); } @@ -1614,7 +1614,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { LValue DestLVal = CGF.makeAddrLValue(DestAddr, DestTy); if (Kind == CK_LValueToRValueBitCast) - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); return buildLoadOfLValue(DestLVal, CE->getExprLoc()); } @@ -1626,7 +1626,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { auto Src = Visit(const_cast(E)); mlir::Type DstTy = CGF.convertType(DestTy); - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { llvm_unreachable("NYI"); } @@ -1636,17 +1636,17 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } // Update heapallocsite metadata when there is an explicit pointer cast. - assert(!MissingFeatures::addHeapAllocSiteMetadata()); + assert(!cir::MissingFeatures::addHeapAllocSiteMetadata()); // If Src is a fixed vector and Dst is a scalable vector, and both have the // same element type, use the llvm.vector.insert intrinsic to perform the // bitcast. - assert(!MissingFeatures::scalableVectors()); + assert(!cir::MissingFeatures::scalableVectors()); // If Src is a scalable vector and Dst is a fixed vector, and both have the // same element type, use the llvm.vector.extract intrinsic to perform the // bitcast. - assert(!MissingFeatures::scalableVectors()); + assert(!cir::MissingFeatures::scalableVectors()); // Perform VLAT <-> VLST bitcast through memory. // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics @@ -1654,7 +1654,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // need to keep this around for bitcasts between VLAT <-> VLST where // the element types of the vectors are not the same, until we figure // out a better way of doing these casts. - assert(!MissingFeatures::scalableVectors()); + assert(!cir::MissingFeatures::scalableVectors()); return CGF.getBuilder().createBitcast(CGF.getLoc(E->getSourceRange()), Src, DstTy); @@ -1707,10 +1707,10 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is // performed and the object is not of the derived type. if (CGF.sanitizePerformTypeCheck()) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast)) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType()); } @@ -1741,7 +1741,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { if (MustVisitNullValue(E)) CGF.buildIgnoredExpr(E); - assert(!MissingFeatures::cxxABI()); + assert(!cir::MissingFeatures::cxxABI()); const MemberPointerType *MPT = CE->getType()->getAs(); if (MPT->isMemberFunctionPointerType()) { @@ -1894,14 +1894,14 @@ mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *E) { return buildLoadOfLValue(E); auto V = CGF.buildCallExpr(E).getScalarVal(); - assert(!MissingFeatures::buildLValueAlignmentAssumption()); + assert(!cir::MissingFeatures::buildLValueAlignmentAssumption()); return V; } mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { // TODO(cir): Folding all this constants sound like work for MLIR optimizers, // keep assertion for now. - assert(!MissingFeatures::tryEmitAsConstant()); + assert(!cir::MissingFeatures::tryEmitAsConstant()); Expr::EvalResult Result; if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { llvm::APSInt Value = Result.Val.getInt(); @@ -1971,8 +1971,9 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { llvm_unreachable("NYI"); if (E->getType()->isVectorType()) { - assert(!MissingFeatures::scalableVectors() && "NYI: scalable vector init"); - assert(!MissingFeatures::vectorConstants() && "NYI: vector constants"); + assert(!cir::MissingFeatures::scalableVectors() && + "NYI: scalable vector init"); + assert(!cir::MissingFeatures::vectorConstants() && "NYI: vector constants"); auto VectorType = mlir::dyn_cast(CGF.getCIRType(E->getType())); SmallVector Elements; @@ -2458,7 +2459,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( // If the dead side doesn't have labels we need, just emit the Live part. if (!CGF.ContainsLabel(dead)) { if (CondExprBool) - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); auto Result = Visit(live); // If the live part is a throw expression, it acts like it has a void @@ -2496,7 +2497,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { bool lhsIsVoid = false; auto condV = CGF.evaluateExprAsBool(condExpr); - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); return builder .create( @@ -2556,7 +2557,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); eval.begin(CGF); auto lhs = Visit(lhsExpr); eval.end(CGF); @@ -2576,7 +2577,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::incrementProfileCounter()); eval.begin(CGF); auto rhs = Visit(rhsExpr); eval.end(CGF); @@ -2777,7 +2778,7 @@ mlir::Value ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { QualType Ty = VE->getType(); if (Ty->isVariablyModifiedType()) - assert(!MissingFeatures::variablyModifiedTypeEmission() && "NYI"); + assert(!cir::MissingFeatures::variablyModifiedTypeEmission() && "NYI"); Address ArgValue = Address::invalid(); mlir::Value Val = CGF.buildVAArg(VE, ArgValue); @@ -2837,6 +2838,6 @@ mlir::Value CIRGenFunction::buildCheckedInBoundsGEP( // TODO(cir): the unreachable code below hides a substantial amount of code // from the original codegen related with pointer overflow sanitizer. - assert(MissingFeatures::pointerOverflowSanitizer()); + assert(cir::MissingFeatures::pointerOverflowSanitizer()); llvm_unreachable("pointer overflow sanitizer NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 03029c4fc664..155aceed7f2f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -32,8 +32,8 @@ #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Support/LogicalResult.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; using namespace mlir::cir; CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, @@ -54,14 +54,14 @@ CIRGenFunction::~CIRGenFunction() { "missed to deactivate a cleanup"); // TODO(cir): set function is finished. - assert(!MissingFeatures::openMPRuntime()); + assert(!cir::MissingFeatures::openMPRuntime()); // If we have an OpenMPIRBuilder we want to finalize functions (incl. // outlining etc) at some point. Doing it once the function codegen is done // seems to be a reasonable spot. We do it here, as opposed to the deletion // time of the CodeGenModule, because we have to ensure the IR has not yet // been "emitted" to the outside, thus, modifications are still sensible. - assert(!MissingFeatures::openMPRuntime()); + assert(!cir::MissingFeatures::openMPRuntime()); } clang::ASTContext &CIRGenFunction::getContext() const { @@ -72,7 +72,7 @@ mlir::Type CIRGenFunction::ConvertType(QualType T) { return CGM.getTypes().ConvertType(T); } -TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { +cir::TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { type = type.getCanonicalType(); while (true) { switch (type->getTypeClass()) { @@ -108,11 +108,11 @@ TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { case Type::ObjCObjectPointer: case Type::Pipe: case Type::BitInt: - return TEK_Scalar; + return cir::TEK_Scalar; // Complexes. case Type::Complex: - return TEK_Complex; + return cir::TEK_Complex; // Arrays, records, and Objective-C objects. case Type::ConstantArray: @@ -121,7 +121,7 @@ TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { case Type::Record: case Type::ObjCObject: case Type::ObjCInterface: - return TEK_Aggregate; + return cir::TEK_Aggregate; // We operate on atomic values according to their underlying type. case Type::Atomic: @@ -281,11 +281,13 @@ void CIRGenFunction::buildAndUpdateRetAlloca(QualType ty, mlir::Location loc, // Count the implicit return. if (!endsWithReturn(CurFuncDecl)) ++NumReturnExprs; - } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) { + } else if (CurFnInfo->getReturnInfo().getKind() == + cir::ABIArgInfo::Indirect) { // TODO(CIR): Consider this implementation in CIRtoLLVM llvm_unreachable("NYI"); // TODO(CIR): Consider this implementation in CIRtoLLVM - } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca) { + } else if (CurFnInfo->getReturnInfo().getKind() == + cir::ABIArgInfo::InAlloca) { llvm_unreachable("NYI"); } else { auto addr = buildAlloca("__retval", ty, loc, alignment); @@ -349,7 +351,7 @@ void CIRGenFunction::LexicalScope::cleanup() { auto applyCleanup = [&]() { if (PerformCleanup) { // ApplyDebugLocation - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); ForceCleanup(); } }; @@ -479,7 +481,7 @@ void CIRGenFunction::LexicalScope::buildImplicitReturn() { FD->getASTContext(), FD->getReturnType()); if (CGF.SanOpts.has(SanitizerKind::Return)) { - assert(!MissingFeatures::sanitizerReturn()); + assert(!cir::MissingFeatures::sanitizerReturn()); llvm_unreachable("NYI"); } else if (shouldEmitUnreachable) { if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { @@ -525,7 +527,7 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // instructions will get the location of the return statements and // all will be fine. if (auto *DI = getDebugInfo()) - assert(!MissingFeatures::generateDebugInfo() && "NYI"); + assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); // Pop any cleanups that might have been associated with the // parameters. Do this in whatever block we're currently in; it's @@ -536,7 +538,7 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // Make sure the line table doesn't jump back into the body for // the ret after it's been at EndLoc. if (auto *DI = getDebugInfo()) - assert(!MissingFeatures::generateDebugInfo() && "NYI"); + assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); // FIXME(cir): vla.c test currently crashes here. // PopCleanupBlocks(PrologueCleanupDepth); } @@ -547,41 +549,41 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // this as part of LexicalScope instead, given CIR might have multiple // blocks with `cir.return`. if (ShouldInstrumentFunction()) { - assert(!MissingFeatures::shouldInstrumentFunction() && "NYI"); + assert(!cir::MissingFeatures::shouldInstrumentFunction() && "NYI"); } // Emit debug descriptor for function end. if (auto *DI = getDebugInfo()) - assert(!MissingFeatures::generateDebugInfo() && "NYI"); + assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); // Reset the debug location to that of the simple 'return' expression, if any // rather than that of the end of the function's scope '}'. - assert(!MissingFeatures::generateDebugInfo() && "NYI"); + assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); - assert(!MissingFeatures::emitFunctionEpilog() && "NYI"); - assert(!MissingFeatures::emitEndEHSpec() && "NYI"); + assert(!cir::MissingFeatures::emitFunctionEpilog() && "NYI"); + assert(!cir::MissingFeatures::emitEndEHSpec() && "NYI"); // FIXME(cir): vla.c test currently crashes here. // assert(EHStack.empty() && "did not remove all scopes from cleanup stack!"); // If someone did an indirect goto, emit the indirect goto block at the end of // the function. - assert(!MissingFeatures::indirectBranch() && "NYI"); + assert(!cir::MissingFeatures::indirectBranch() && "NYI"); // If some of our locals escaped, insert a call to llvm.localescape in the // entry block. - assert(!MissingFeatures::escapedLocals() && "NYI"); + assert(!cir::MissingFeatures::escapedLocals() && "NYI"); // If someone took the address of a label but never did an indirect goto, we // made a zero entry PHI node, which is illegal, zap it now. - assert(!MissingFeatures::indirectBranch() && "NYI"); + assert(!cir::MissingFeatures::indirectBranch() && "NYI"); // CIRGen doesn't need to emit EHResumeBlock, TerminateLandingPad, // TerminateHandler, UnreachableBlock, TerminateFunclets, NormalCleanupDest // here because the basic blocks aren't shared. - assert(!MissingFeatures::emitDeclMetadata() && "NYI"); - assert(!MissingFeatures::deferredReplacements() && "NYI"); + assert(!cir::MissingFeatures::emitDeclMetadata() && "NYI"); + assert(!cir::MissingFeatures::deferredReplacements() && "NYI"); // Add the min-legal-vector-width attribute. This contains the max width from: // 1. min-vector-width attribute used in the source program. @@ -590,10 +592,10 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // 4. Width of vector arguments and return types for this function. // 5. Width of vector arguments and return types for functions called by // this function. - assert(!MissingFeatures::minLegalVectorWidthAttr() && "NYI"); + assert(!cir::MissingFeatures::minLegalVectorWidthAttr() && "NYI"); // Add vscale_range attribute if appropriate. - assert(!MissingFeatures::vscaleRangeAttr() && "NYI"); + assert(!cir::MissingFeatures::vscaleRangeAttr() && "NYI"); // In traditional LLVM codegen, if clang generated an unreachable return // block, it'd be deleted now. Same for unused ret allocas from ReturnValue @@ -631,7 +633,7 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, // Check if we should generate debug info for this function. if (FD->hasAttr()) { - assert(!MissingFeatures::noDebugInfo()); + assert(!cir::MissingFeatures::noDebugInfo()); } // The function might not have a body if we're generating thunks for a @@ -664,12 +666,12 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, // tests when the time comes, but CIR should be intrinsically scope // accurate, so no need to tie coroutines to such markers. if (isa(Body)) - assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); + assert(!cir::MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); // Initialize helper which will detect jumps which can cause invalid // lifetime markers. if (ShouldEmitLifetimeMarkers) - assert(!MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); + assert(!cir::MissingFeatures::shouldEmitLifetimeMarkers() && "NYI"); } // Create a scope in the symbol table to hold variable declarations. @@ -704,12 +706,12 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, // is required by certain optimizations. // In C++11 and up, the attribute will be removed if the body contains a // trivial empty loop. - if (MissingFeatures::mustProgress()) + if (cir::MissingFeatures::mustProgress()) llvm_unreachable("NYI"); // Generate the body of the function. // TODO: PGO.assignRegionCounters - assert(!MissingFeatures::shouldInstrumentFunction()); + assert(!cir::MissingFeatures::shouldInstrumentFunction()); if (isa(FD)) buildDestructorBody(Args); else if (isa(FD)) @@ -748,7 +750,7 @@ CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, // If we haven't marked the function nothrow through other means, do a quick // pass now to see if we can. - assert(!MissingFeatures::tryMarkNoThrow()); + assert(!cir::MissingFeatures::tryMarkNoThrow()); return Fn; } @@ -760,7 +762,7 @@ mlir::Value CIRGenFunction::createLoad(const VarDecl *VD, const char *Name) { } void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { - assert(!MissingFeatures::emitAsanPrologueOrEpilogue()); + assert(!cir::MissingFeatures::emitAsanPrologueOrEpilogue()); const auto *Ctor = cast(CurGD.getDecl()); auto CtorType = CurGD.getCtorType(); @@ -832,7 +834,7 @@ LValue CIRGenFunction::MakeNaturalAlignAddrLValue(mlir::Value val, QualType ty) { LValueBaseInfo baseInfo; TBAAAccessInfo tbaaInfo; - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); CharUnits alignment = CGM.getNaturalTypeAlignment(ty, &baseInfo, &tbaaInfo); Address addr(val, getTypes().convertTypeForMem(ty), alignment); return LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); @@ -969,49 +971,49 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // TODO(cir): set llvm::Attribute::NoSanitizeBounds if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds)) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); // TODO(cir): set llvm::Attribute::NoSanitizeCoverage if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage()) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); // Some passes need the non-negated no_sanitize attribute. Pass them on. if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) { // TODO(cir): set no_sanitize_thread if (no_sanitize_mask & SanitizerKind::Thread) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); } } if (ShouldSkipSanitizerInstrumentation()) { - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); } else { // Apply sanitizer attributes to the function. if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress)) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); if (SanOpts.has(SanitizerKind::MemtagStack)) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); if (SanOpts.has(SanitizerKind::Thread)) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); if (SanOpts.has(SanitizerKind::NumericalStability)) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory)) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); } if (SanOpts.has(SanitizerKind::SafeStack)) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); if (SanOpts.has(SanitizerKind::ShadowCallStack)) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); if (SanOpts.has(SanitizerKind::Realtime)) llvm_unreachable("NYI"); // Apply fuzzing attribute to the function. if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink)) - assert(!MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::sanitizeOther()); // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize, // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time. @@ -1050,17 +1052,17 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // Apply xray attributes to the function (as a string, for now) if (const auto *XRayAttr = D ? D->getAttr() : nullptr) { - assert(!MissingFeatures::xray()); + assert(!cir::MissingFeatures::xray()); } else { - assert(!MissingFeatures::xray()); + assert(!cir::MissingFeatures::xray()); } if (ShouldXRayInstrumentFunction()) { - assert(!MissingFeatures::xray()); + assert(!cir::MissingFeatures::xray()); } if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) { - assert(!MissingFeatures::getProfileCount()); + assert(!cir::MissingFeatures::getProfileCount()); } unsigned Count, Offset; @@ -1168,17 +1170,17 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm_unreachable("NYI"); } - if (MissingFeatures::stackrealign()) + if (cir::MissingFeatures::stackrealign()) llvm_unreachable("NYI"); - if (FD && FD->isMain() && MissingFeatures::zerocallusedregs()) + if (FD && FD->isMain() && cir::MissingFeatures::zerocallusedregs()) llvm_unreachable("NYI"); mlir::Block *EntryBB = &Fn.getBlocks().front(); // TODO: allocapt insertion? probably don't need for CIR - if (MissingFeatures::requiresReturnValueCheck()) + if (cir::MissingFeatures::requiresReturnValueCheck()) llvm_unreachable("NYI"); if (getDebugInfo()) { @@ -1205,7 +1207,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm_unreachable("NYI"); } - assert(!MissingFeatures::emitStartEHSpec() && "NYI"); + assert(!cir::MissingFeatures::emitStartEHSpec() && "NYI"); // FIXME(cir): vla.c test currently crashes here. // PrologueCleanupDepth = EHStack.stable_begin(); @@ -1249,7 +1251,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // TODO: this should live in `buildFunctionProlog` bool isPromoted = isa(paramVar) && cast(paramVar)->isKNRPromoted(); - assert(!MissingFeatures::constructABIArgDirectExtend()); + assert(!cir::MissingFeatures::constructABIArgDirectExtend()); if (isPromoted) paramVal = emitArgumentDemotion(*this, paramVar, paramVal); @@ -1312,7 +1314,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, MD->getParent()->getLambdaCaptureDefault() == LCD_None) SkippedChecks.set(SanitizerKind::Null, true); - assert(!MissingFeatures::buildTypeCheck() && "NYI"); + assert(!cir::MissingFeatures::buildTypeCheck() && "NYI"); } } @@ -1509,7 +1511,7 @@ void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper( return; // TODO(cir): create guard to restore fast math configurations. - assert(!MissingFeatures::fastMathGuard()); + assert(!cir::MissingFeatures::fastMathGuard()); llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode(); // TODO(cir): override rounding behaviour once FM configs are guarded. @@ -1519,17 +1521,17 @@ void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper( // TODO(cir): override exception behaviour once FM configs are guarded. // TODO(cir): override FP flags once FM configs are guarded. - assert(!MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fastMathFlags()); assert((CGF.CurFuncDecl == nullptr || CGF.builder.getIsFPConstrained() || isa(CGF.CurFuncDecl) || isa(CGF.CurFuncDecl) || - (NewExceptionBehavior == fp::ebIgnore && + (NewExceptionBehavior == cir::fp::ebIgnore && NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) && "FPConstrained should be enabled on entire function"); // TODO(cir): mark CIR function with fast math attributes. - assert(!MissingFeatures::fastMathFuncAttributes()); + assert(!cir::MissingFeatures::fastMathFuncAttributes()); } CIRGenFunction::CIRGenFPOptionsRAII::~CIRGenFPOptionsRAII() { @@ -1561,7 +1563,7 @@ bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *CE) { void CIRGenFunction::buildDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init) { - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); } Address CIRGenFunction::buildVAListRef(const Expr *E) { @@ -1773,7 +1775,7 @@ void CIRGenFunction::buildVariablyModifiedType(QualType type) { mlir::Value &entry = VLASizeMap[sizeExpr]; if (!entry) { mlir::Value size = buildScalarExpr(sizeExpr); - assert(!MissingFeatures::sanitizeVLABound()); + assert(!cir::MissingFeatures::sanitizeVLABound()); // Always zexting here would be wrong if it weren't // undefined behavior to have a negative bound. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 48b89e70ceda..1e92a7bc029b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -51,7 +51,7 @@ class ScalarExprEmitter; class AggExprEmitter; } // namespace -namespace cir { +namespace clang::CIRGen { struct CGCoroData; @@ -158,7 +158,7 @@ class CIRGenFunction : public CIRGenTypeCache { // Work around an extremely aggressive peephole optimization in // EmitScalarConversion which assumes that all other uses of a // value are extant. - assert(!MissingFeatures::peepholeProtection() && "NYI"); + assert(!cir::MissingFeatures::peepholeProtection() && "NYI"); return data; } @@ -172,7 +172,7 @@ class CIRGenFunction : public CIRGenTypeCache { CGF.OpaqueLValues.erase(OpaqueValue); } else { CGF.OpaqueRValues.erase(OpaqueValue); - assert(!MissingFeatures::peepholeProtection() && "NYI"); + assert(!cir::MissingFeatures::peepholeProtection() && "NYI"); } } }; @@ -508,7 +508,7 @@ class CIRGenFunction : public CIRGenTypeCache { void ConstructorHelper(clang::FPOptions FPFeatures); CIRGenFunction &CGF; clang::FPOptions OldFPFeatures; - fp::ExceptionBehavior OldExcept; + cir::fp::ExceptionBehavior OldExcept; llvm::RoundingMode OldRounding; }; clang::FPOptions CurFPFeatures; @@ -560,15 +560,15 @@ class CIRGenFunction : public CIRGenTypeCache { return ConvertType(getContext().getTypeDeclType(T)); } - /// Return the TypeEvaluationKind of QualType \c T. - static TypeEvaluationKind getEvaluationKind(clang::QualType T); + /// Return the cir::TypeEvaluationKind of QualType \c T. + static cir::TypeEvaluationKind getEvaluationKind(clang::QualType T); static bool hasScalarEvaluationKind(clang::QualType T) { - return getEvaluationKind(T) == TEK_Scalar; + return getEvaluationKind(T) == cir::TEK_Scalar; } static bool hasAggregateEvaluationKind(clang::QualType T) { - return getEvaluationKind(T) == TEK_Aggregate; + return getEvaluationKind(T) == cir::TEK_Aggregate; } CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, @@ -1008,8 +1008,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value buildCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, - llvm::SmallVectorImpl &ops, cir::Address ptrOp0, - cir::Address ptrOp1, llvm::Triple::ArchType arch); + llvm::SmallVectorImpl &ops, Address ptrOp0, Address ptrOp1, + llvm::Triple::ArchType arch); mlir::Value buildAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, @@ -1058,7 +1058,7 @@ class CIRGenFunction : public CIRGenTypeCache { bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; } /// Returns true inside SEH __try blocks. - bool isSEHTryScope() const { return MissingFeatures::isSEHTryScope(); } + bool isSEHTryScope() const { return cir::MissingFeatures::isSEHTryScope(); } mlir::Operation *CurrentFuncletPad = nullptr; @@ -2145,7 +2145,7 @@ class CIRGenFunction : public CIRGenTypeCache { ~LexicalScope() { // EmitLexicalBlockEnd - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); // If we should perform a cleanup, force them now. Note that // this ends the cleanup scope before rescoping any labels. cleanup(); @@ -2548,6 +2548,6 @@ template <> struct DominatingValue { } }; -} // namespace cir +} // namespace clang::CIRGen #endif // LLVM_CLANG_LIB_CIR_CIRGENFUNCTION_H diff --git a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h index a07f62fe28d7..c93fa188f717 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h @@ -22,11 +22,11 @@ #include "llvm/ADT/FoldingSet.h" #include "llvm/Support/TrailingObjects.h" -namespace cir { +namespace clang::CIRGen { struct CIRGenFunctionInfoArgInfo { clang::CanQualType type; - ABIArgInfo info; + cir::ABIArgInfo info; }; /// A class for recording the number of arguments that a function signature @@ -264,8 +264,10 @@ class CIRGenFunctionInfo final clang::CanQualType getReturnType() const { return getArgsBuffer()[0].type; } - ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; } - const ABIArgInfo &getReturnInfo() const { return getArgsBuffer()[0].info; } + cir::ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; } + const cir::ABIArgInfo &getReturnInfo() const { + return getArgsBuffer()[0].info; + } bool isChainCall() const { return ChainCall; } @@ -281,6 +283,6 @@ class CIRGenFunctionInfo final bool usesInAlloca() const { return ArgStruct; } }; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index c6bd049e9224..dd79a1b7aaff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -30,11 +30,11 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "llvm/Support/ErrorHandling.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; namespace { -class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { +class CIRGenItaniumCXXABI : public CIRGenCXXABI { /// All the vtables which have been defined. llvm::DenseMap VTables; @@ -44,7 +44,7 @@ class CIRGenItaniumCXXABI : public cir::CIRGenCXXABI { bool Use32BitVTableOffsetABI; ItaniumMangleContext &getMangleContext() { - return cast(cir::CIRGenCXXABI::getMangleContext()); + return cast(CIRGenCXXABI::getMangleContext()); } bool isVTableHidden(const CXXRecordDecl *RD) const { @@ -396,14 +396,14 @@ bool CIRGenItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { return false; } -CIRGenCXXABI *cir::CreateCIRGenItaniumCXXABI(CIRGenModule &CGM) { +CIRGenCXXABI *clang::CIRGen::CreateCIRGenItaniumCXXABI(CIRGenModule &CGM) { switch (CGM.getASTContext().getCXXABIKind()) { case TargetCXXABI::GenericItanium: case TargetCXXABI::GenericAArch64: case TargetCXXABI::AppleARM64: // TODO: this isn't quite right, clang uses AppleARM64CXXABI which inherits // from ARMCXXABI. We'll have to follow suit. - assert(!MissingFeatures::appleArm64CXXABI()); + assert(!cir::MissingFeatures::appleArm64CXXABI()); return new CIRGenItaniumCXXABI(CGM); default: @@ -733,8 +733,8 @@ static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, } // Scalars and complexes. - TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); - if (TEK != TEK_Aggregate) { + cir::TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); + if (TEK != cir::TEK_Aggregate) { // Notes for LLVM lowering: // If the catch type is a pointer type, __cxa_begin_catch returns // the pointer by value. @@ -745,7 +745,7 @@ static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, case Qualifiers::OCL_Strong: llvm_unreachable("NYI"); // arc retain non block: - assert(!MissingFeatures::ARC()); + assert(!cir::MissingFeatures::ARC()); [[fallthrough]]; case Qualifiers::OCL_None: @@ -758,7 +758,7 @@ static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, case Qualifiers::OCL_Weak: llvm_unreachable("NYI"); // arc init weak: - assert(!MissingFeatures::ARC()); + assert(!cir::MissingFeatures::ARC()); return; } llvm_unreachable("bad ownership qualifier!"); @@ -770,15 +770,15 @@ static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, LValue srcLV = CGF.MakeNaturalAlignAddrLValue(catchParam, CatchType); LValue destLV = CGF.makeAddrLValue(ParamAddr, CatchType); switch (TEK) { - case TEK_Complex: + case cir::TEK_Complex: llvm_unreachable("NYI"); return; - case TEK_Scalar: { + case cir::TEK_Scalar: { auto exnLoad = CGF.buildLoadOfScalar(srcLV, catchParam.getLoc()); CGF.buildStoreOfScalar(exnLoad, destLV, /*init*/ true); return; } - case TEK_Aggregate: + case cir::TEK_Aggregate: llvm_unreachable("evaluation kind filtered out!"); } llvm_unreachable("bad evaluation kind"); @@ -882,7 +882,7 @@ CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, mlir::cir::GlobalLinkageKind::ExternalLinkage, getContext().toCharUnitsFromBits(PAlign)); // LLVM codegen handles unnamedAddr - assert(!MissingFeatures::unnamedAddr()); + assert(!cir::MissingFeatures::unnamedAddr()); // In MS C++ if you have a class with virtual functions in which you are using // selective member import/export, then all virtual functions must be exported @@ -1511,8 +1511,8 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo(mlir::Location loc, // Give the type_info object and name the formal visibility of the // type itself. - assert(!MissingFeatures::hiddenVisibility()); - assert(!MissingFeatures::protectedVisibility()); + assert(!cir::MissingFeatures::hiddenVisibility()); + assert(!cir::MissingFeatures::protectedVisibility()); mlir::SymbolTable::Visibility symVisibility; if (mlir::cir::isLocalLinkage(Linkage)) // If the linkage is local, only default visibility makes sense. @@ -1523,7 +1523,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo(mlir::Location loc, else symVisibility = CIRGenModule::getCIRVisibility(Ty->getVisibility()); - assert(!MissingFeatures::setDLLStorageClass()); + assert(!cir::MissingFeatures::setDLLStorageClass()); return BuildTypeInfo(loc, Ty, Linkage, symVisibility); } @@ -1656,7 +1656,7 @@ void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, CGM.getBuilder().getUInt8PtrTy()); } - if (MissingFeatures::setDSOLocal()) + if (cir::MissingFeatures::setDSOLocal()) llvm_unreachable("NYI"); // The vtable address point is 2. @@ -1896,7 +1896,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( mlir::Location loc, QualType Ty, mlir::cir::GlobalLinkageKind Linkage, mlir::SymbolTable::Visibility Visibility) { auto &builder = CGM.getBuilder(); - assert(!MissingFeatures::setDLLStorageClass()); + assert(!cir::MissingFeatures::setDLLStorageClass()); // Add the vtable pointer. BuildVTablePointer(loc, cast(Ty)); @@ -2013,7 +2013,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( break; } - assert(!MissingFeatures::setDLLImportDLLExport()); + assert(!cir::MissingFeatures::setDLLImportDLLExport()); auto init = builder.getTypeInfo(builder.getArrayAttr(Fields)); SmallString<256> Name; @@ -2044,7 +2044,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( } if (CGM.supportsCOMDAT() && mlir::cir::isWeakForLinker(GV.getLinkage())) { - assert(!MissingFeatures::setComdat()); + assert(!cir::MissingFeatures::setComdat()); llvm_unreachable("NYI"); } @@ -2068,16 +2068,16 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( // object and the type_info name be uniqued when weakly emitted. // TODO(cir): setup other bits for TypeName - assert(!MissingFeatures::setDLLStorageClass()); - assert(!MissingFeatures::setPartition()); - assert(!MissingFeatures::setDSOLocal()); + assert(!cir::MissingFeatures::setDLLStorageClass()); + assert(!cir::MissingFeatures::setPartition()); + assert(!cir::MissingFeatures::setDSOLocal()); mlir::SymbolTable::setSymbolVisibility( TypeName, CIRGenModule::getMLIRVisibility(TypeName)); // TODO(cir): setup other bits for GV - assert(!MissingFeatures::setDLLStorageClass()); - assert(!MissingFeatures::setPartition()); - assert(!MissingFeatures::setDSOLocal()); + assert(!cir::MissingFeatures::setDLLStorageClass()); + assert(!cir::MissingFeatures::setPartition()); + assert(!cir::MissingFeatures::setDSOLocal()); CIRGenModule::setInitializer(GV, init); return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), GV); @@ -2113,7 +2113,7 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, VTable.setLinkage(Linkage); if (CGM.supportsCOMDAT() && mlir::cir::isWeakForLinker(Linkage)) { - assert(!MissingFeatures::setComdat()); + assert(!cir::MissingFeatures::setComdat()); } // Set the right visibility. @@ -2150,7 +2150,7 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, if (isDeclarationForLinker) { llvm_unreachable("NYI"); assert(CGM.getCodeGenOpts().WholeProgramVTables); - assert(!MissingFeatures::addCompilerUsedGlobal()); + assert(!cir::MissingFeatures::addCompilerUsedGlobal()); } } @@ -2286,7 +2286,7 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, mlir::FlatSymbolRefAttr dtor{}; if (const RecordType *recordTy = clangThrowType->getAs()) { CXXRecordDecl *rec = cast(recordTy->getDecl()); - assert(!MissingFeatures::isTrivialCtorOrDtor()); + assert(!cir::MissingFeatures::isTrivialCtorOrDtor()); if (!rec->hasTrivialDestructor()) { CXXDestructorDecl *dtorD = rec->getDestructor(); dtor = mlir::FlatSymbolRefAttr::get( @@ -2336,7 +2336,7 @@ static mlir::cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { // Prototype: void __cxa_bad_cast(); // TODO(cir): set the calling convention of the runtime function. - assert(!MissingFeatures::setCallingConv()); + assert(!cir::MissingFeatures::setCallingConv()); mlir::cir::FuncType FTy = CGF.getBuilder().getFuncType({}, CGF.getBuilder().getVoidTy()); @@ -2345,7 +2345,7 @@ static mlir::cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { static void buildCallToBadCast(CIRGenFunction &CGF, mlir::Location loc) { // TODO(cir): set the calling convention to the runtime function. - assert(!MissingFeatures::setCallingConv()); + assert(!cir::MissingFeatures::setCallingConv()); CGF.buildRuntimeCall(loc, getBadCastFn(CGF)); CGF.getBuilder().create(loc); @@ -2421,7 +2421,7 @@ static mlir::cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { // TODO(cir): mark the function as nowind readonly. // TODO(cir): set the calling convention of the runtime function. - assert(!MissingFeatures::setCallingConv()); + assert(!cir::MissingFeatures::setCallingConv()); mlir::cir::FuncType FTy = CGF.getBuilder().getFuncType( {VoidPtrTy, RTTIPtrTy, RTTIPtrTy, PtrDiffTy}, VoidPtrTy); @@ -2513,7 +2513,7 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, ABI.getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl); // TODO(cir): handle address space here. - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); mlir::Type VPtrTy = ExpectedVPtr.getType(); mlir::Type VPtrPtrTy = CGF.getBuilder().getPointerTo(VPtrTy); Address SrcVPtrPtr( @@ -2522,7 +2522,7 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, mlir::Value SrcVPtr = CGF.getBuilder().createLoad(Loc, SrcVPtrPtr); // TODO(cir): decorate SrcVPtr with TBAA info. - assert(!MissingFeatures::tbaa()); + assert(!cir::MissingFeatures::tbaa()); mlir::Value Success = CGF.getBuilder().createCompare( Loc, mlir::cir::CmpOpKind::eq, SrcVPtr, ExpectedVPtr); @@ -2532,7 +2532,7 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, return CGF.getBuilder().createBitcast(Src.getPointer(), DestCIRTy); // TODO(cir): handle address space here. - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); mlir::Type U8PtrTy = CGF.getBuilder().getPointerTo(CGF.getBuilder().getUInt8Ty()); @@ -2641,4 +2641,4 @@ CIRGenItaniumCXXABI::buildVirtualMethodAttr(mlir::cir::MethodType MethodTy, /// member pointers, for which '0' is a valid offset. bool CIRGenItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { return MPT->isMemberFunctionPointer(); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 7a65329325f0..cdf3f4b93ac0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -82,8 +82,8 @@ #include using namespace mlir::cir; -using namespace cir; using namespace clang; +using namespace clang::CIRGen; using llvm::cast; using llvm::dyn_cast; @@ -388,7 +388,7 @@ static bool shouldAssumeDSOLocal(const CIRGenModule &CGM, // DLLImport explicitly marks the GV as external. // so it shouldn't be dso_local // But we don't have the info set now - assert(!MissingFeatures::setDLLImportDLLExport()); + assert(!cir::MissingFeatures::setDLLImportDLLExport()); const llvm::Triple &TT = CGM.getTriple(); const auto &CGOpts = CGM.getCodeGenOpts(); @@ -506,15 +506,15 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { if (langOpts.OpenMP) { // If this is OpenMP, check if it is legal to emit this global normally. if (openMPRuntime && openMPRuntime->emitTargetGlobal(GD)) { - assert(!MissingFeatures::openMPRuntime()); + assert(!cir::MissingFeatures::openMPRuntime()); return; } if (auto *DRD = dyn_cast(Global)) { - assert(!MissingFeatures::openMP()); + assert(!cir::MissingFeatures::openMP()); return; } if (auto *DMD = dyn_cast(Global)) { - assert(!MissingFeatures::openMP()); + assert(!cir::MissingFeatures::openMP()); return; } } @@ -721,10 +721,10 @@ void CIRGenModule::setCommonAttributes(GlobalDecl GD, mlir::Operation *GV) { if (isa_and_nonnull(D)) setGVProperties(GV, dyn_cast(D)); else - assert(!MissingFeatures::setDefaultVisibility()); + assert(!cir::MissingFeatures::setDefaultVisibility()); if (D && D->hasAttr()) - assert(!MissingFeatures::addUsedOrCompilerUsedGlobal()); + assert(!cir::MissingFeatures::addUsedOrCompilerUsedGlobal()); if (const auto *VD = dyn_cast_if_present(D); VD && @@ -733,7 +733,7 @@ void CIRGenModule::setCommonAttributes(GlobalDecl GD, mlir::Operation *GV) { VD->getStorageDuration() == SD_Thread)) || (codeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static && VD->getType().isConstQualified()))) - assert(!MissingFeatures::addUsedOrCompilerUsedGlobal()); + assert(!cir::MissingFeatures::addUsedOrCompilerUsedGlobal()); } void CIRGenModule::setNonAliasAttributes(GlobalDecl GD, mlir::Operation *GO) { @@ -744,40 +744,40 @@ void CIRGenModule::setNonAliasAttributes(GlobalDecl GD, mlir::Operation *GO) { auto GV = llvm::dyn_cast_or_null(GO); if (GV) { if (D->hasAttr()) - assert(!MissingFeatures::addUsedGlobal()); + assert(!cir::MissingFeatures::addUsedGlobal()); if (auto *SA = D->getAttr()) - assert(!MissingFeatures::addSectionAttributes()); + assert(!cir::MissingFeatures::addSectionAttributes()); if (auto *SA = D->getAttr()) - assert(!MissingFeatures::addSectionAttributes()); + assert(!cir::MissingFeatures::addSectionAttributes()); if (auto *SA = D->getAttr()) - assert(!MissingFeatures::addSectionAttributes()); + assert(!cir::MissingFeatures::addSectionAttributes()); if (auto *SA = D->getAttr()) - assert(!MissingFeatures::addSectionAttributes()); + assert(!cir::MissingFeatures::addSectionAttributes()); } auto F = llvm::dyn_cast_or_null(GO); if (F) { if (D->hasAttr()) - assert(!MissingFeatures::addUsedGlobal()); + assert(!cir::MissingFeatures::addUsedGlobal()); if (auto *SA = D->getAttr()) if (!D->getAttr()) - assert(!MissingFeatures::setSectionForFuncOp()); + assert(!cir::MissingFeatures::setSectionForFuncOp()); - assert(!MissingFeatures::updateCPUAndFeaturesAttributes()); + assert(!cir::MissingFeatures::updateCPUAndFeaturesAttributes()); } if (const auto *CSA = D->getAttr()) { - assert(!MissingFeatures::setSectionForFuncOp()); + assert(!cir::MissingFeatures::setSectionForFuncOp()); if (GV) GV.setSection(CSA->getName()); if (F) - assert(!MissingFeatures::setSectionForFuncOp()); + assert(!cir::MissingFeatures::setSectionForFuncOp()); } else if (const auto *SA = D->getAttr()) if (GV) GV.setSection(SA->getName()); if (F) - assert(!MissingFeatures::setSectionForFuncOp()); + assert(!cir::MissingFeatures::setSectionForFuncOp()); } - assert(!MissingFeatures::setTargetAttributes()); + assert(!cir::MissingFeatures::setTargetAttributes()); } void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, @@ -884,7 +884,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // Handle dropped DLL attributes. if (D && !D->hasAttr() && !D->hasAttr()) - assert(!MissingFeatures::setDLLStorageClass() && "NYI"); + assert(!cir::MissingFeatures::setDLLStorageClass() && "NYI"); if (langOpts.OpenMP && !langOpts.OpenMPSimd && D) getOpenMPRuntime().registerTargetGlobalVariable(D, Entry); @@ -1334,11 +1334,11 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, GV.setVisibility(getMLIRVisibilityFromCIRLinkage(Linkage)); // TODO(cir): handle DLL storage classes in CIR? if (D->hasAttr()) - assert(!MissingFeatures::setDLLStorageClass()); + assert(!cir::MissingFeatures::setDLLStorageClass()); else if (D->hasAttr()) - assert(!MissingFeatures::setDLLStorageClass()); + assert(!cir::MissingFeatures::setDLLStorageClass()); else - assert(!MissingFeatures::setDLLStorageClass()); + assert(!cir::MissingFeatures::setDLLStorageClass()); if (Linkage == mlir::cir::GlobalLinkageKind::CommonLinkage) { // common vars aren't constant even if declared const. @@ -1372,8 +1372,8 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // TODO(cir): sanitizers (reportGlobalToASan) and global variable debug // information. - assert(!MissingFeatures::sanitizeOther()); - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::sanitizeOther()); + assert(!cir::MissingFeatures::generateDebugInfo()); } void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { @@ -1765,7 +1765,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { break; case Decl::ClassTemplateSpecialization: { // const auto *Spec = cast(decl); - assert(!MissingFeatures::generateDebugInfo() && "NYI"); + assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); } [[fallthrough]]; case Decl::CXXRecord: { @@ -1792,7 +1792,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { case Decl::UsingEnum: // using enum X; [C++] case Decl::NamespaceAlias: case Decl::UsingDirective: // using namespace X; [C++] - assert(!MissingFeatures::generateDebugInfo() && "NYI"); + assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); break; case Decl::CXXConstructor: getCXXABI().buildCXXConstructors(cast(decl)); @@ -1813,7 +1813,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { case Decl::TypeAlias: // using foo = bar; [C++11] case Decl::Record: case Decl::Enum: - assert(!MissingFeatures::generateDebugInfo() && "NYI"); + assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); break; } } @@ -2081,9 +2081,9 @@ void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( return; // TODO(cir): this RAUW ignores the features below. - assert(!MissingFeatures::exceptions() && "Call vs Invoke NYI"); - assert(!MissingFeatures::parameterAttributes()); - assert(!MissingFeatures::operandBundles()); + assert(!cir::MissingFeatures::exceptions() && "Call vs Invoke NYI"); + assert(!cir::MissingFeatures::parameterAttributes()); + assert(!cir::MissingFeatures::operandBundles()); assert(OldFn->getAttrs().size() > 1 && "Attribute forwarding NYI"); // Mark new function as originated from a no-proto declaration. @@ -2157,7 +2157,7 @@ void CIRGenModule::buildAliasForGlobal(StringRef mangledName, alias, mlir::SymbolTable::Visibility::Private); // Alias constructors and destructors are always unnamed_addr. - assert(!MissingFeatures::unnamedAddr()); + assert(!cir::MissingFeatures::unnamedAddr()); // Switch any previous uses to the alias. if (op) { @@ -2354,11 +2354,11 @@ void CIRGenModule::buildTentativeDefinition(const VarDecl *D) { void CIRGenModule::setGlobalVisibility(mlir::Operation *GV, const NamedDecl *D) const { - assert(!MissingFeatures::setGlobalVisibility()); + assert(!cir::MissingFeatures::setGlobalVisibility()); } void CIRGenModule::setDSOLocal(mlir::Operation *Op) const { - assert(!MissingFeatures::setDSOLocal()); + assert(!cir::MissingFeatures::setDSOLocal()); if (auto globalValue = dyn_cast(Op)) { setDSOLocal(globalValue); } @@ -2366,7 +2366,7 @@ void CIRGenModule::setDSOLocal(mlir::Operation *Op) const { void CIRGenModule::setGVProperties(mlir::Operation *Op, const NamedDecl *D) const { - assert(!MissingFeatures::setDLLImportDLLExport()); + assert(!cir::MissingFeatures::setDLLImportDLLExport()); setGVPropertiesAux(Op, D); } @@ -2374,7 +2374,7 @@ void CIRGenModule::setGVPropertiesAux(mlir::Operation *Op, const NamedDecl *D) const { setGlobalVisibility(Op, D); setDSOLocal(Op); - assert(!MissingFeatures::setPartition()); + assert(!cir::MissingFeatures::setPartition()); } bool CIRGenModule::lookupRepresentativeDecl(StringRef MangledName, @@ -2613,7 +2613,7 @@ void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl, } // TODO(cir): Complete the remaining part of the function. - assert(!MissingFeatures::setFunctionAttributes()); + assert(!cir::MissingFeatures::setFunctionAttributes()); // TODO(cir): This needs a lot of work to better match CodeGen. That // ultimately ends up in setGlobalVisibility, which already has the linkage of @@ -2808,7 +2808,7 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( } // TODO(cir): Might need bitcast to different address space. - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); return F; } @@ -3135,7 +3135,7 @@ bool CIRGenModule::shouldEmitFunction(GlobalDecl globalDecl) { return false; if (func->hasAttr() && !func->hasAttr()) - assert(!MissingFeatures::setDLLImportDLLExport() && + assert(!cir::MissingFeatures::setDLLImportDLLExport() && "shouldEmitFunction for dllimport is NYI"); // PR9614. Avoid cases where the source code is lying to us. An available @@ -3158,7 +3158,7 @@ void CIRGenModule::maybeSetTrivialComdat(const Decl &d, mlir::Operation *op) { globalOp.setComdat(true); // Keep it as missing feature as we need to implement comdat for FuncOp. // in the future. - assert(!MissingFeatures::setComdat() && "NYI"); + assert(!cir::MissingFeatures::setComdat() && "NYI"); } bool CIRGenModule::isInNoSanitizeList(SanitizerMask Kind, mlir::cir::FuncOp Fn, @@ -3230,7 +3230,7 @@ void CIRGenModule::buildExplicitCastExprType(const ExplicitCastExpr *E, if (CGF && E->getType()->isVariablyModifiedType()) llvm_unreachable("NYI"); - assert(!MissingFeatures::generateDebugInfo() && "NYI"); + assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); } void CIRGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { @@ -3347,7 +3347,7 @@ mlir::cir::GlobalOp CIRGenModule::getOrInsertGlobal( // If the variable exists but has the wrong type, return a bitcast to the // right type. auto GVTy = GV.getSymType(); - assert(!MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); auto PTy = builder.getPointerTo(Ty); if (GVTy != PTy) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index c2728a172473..5094370c9d2f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -44,8 +44,7 @@ #include "mlir/IR/MLIRContext.h" #include "mlir/IR/Value.h" -using namespace clang; -namespace cir { +namespace clang::CIRGen { class CIRGenFunction; class CIRGenCXXABI; @@ -160,7 +159,7 @@ class CIRGenModule : public CIRGenTypeCache { CIRGenTypes &getTypes() { return genTypes; } const clang::LangOptions &getLangOpts() const { return langOpts; } CIRGenFunction *getCurrCIRGenFun() const { return CurCGF; } - const CIRDataLayout getDataLayout() const { + const cir::CIRDataLayout getDataLayout() const { // FIXME(cir): instead of creating a CIRDataLayout every time, set it as an // attribute for the CIRModule class. return {theModule}; @@ -853,6 +852,6 @@ class CIRGenModule : public CIRGenTypeCache { /// Those annotations are emitted during lowering to the LLVM code. void addGlobalAnnotations(const ValueDecl *d, mlir::Operation *gv); }; -} // namespace cir +} // namespace clang::CIRGen #endif // LLVM_CLANG_LIB_CODEGEN_CIRGENMODULE_H diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp index 9af4272ad679..116255b36f26 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp @@ -13,8 +13,8 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; // Returns the address space id that should be produced to the // kernel_arg_addr_space metadata. This is always fixed to the ids @@ -71,7 +71,7 @@ void CIRGenModule::genKernelArgMetadata(mlir::cir::FuncOp Fn, SmallVector argNames; // OpenCL image and pipe types require special treatments for some metadata - assert(!MissingFeatures::openCLBuiltinTypes()); + assert(!cir::MissingFeatures::openCLBuiltinTypes()); if (FD && CGF) for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp index 863caf8629d2..a8e0f7dbeb5f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp @@ -18,7 +18,7 @@ #include "clang/CIR/Dialect/IR/CIROpsEnums.h" using namespace clang; -using namespace cir; +using namespace clang::CIRGen; CIRGenOpenCLRuntime::~CIRGenOpenCLRuntime() {} diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h index 891b5bb5fb79..f08ed0bf31e8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h @@ -21,7 +21,7 @@ class VarDecl; } // namespace clang -namespace cir { +namespace clang::CIRGen { class CIRGenFunction; class CIRGenModule; @@ -41,6 +41,6 @@ class CIRGenOpenCLRuntime { const clang::VarDecl &D); }; -} // namespace cir +} // namespace clang::CIRGen #endif // LLVM_CLANG_LIB_CIR_CIRGENOPENCLRUNTIME_H diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp index fa2cc5a174b3..382291fddfea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp @@ -14,42 +14,42 @@ #include "CIRGenFunction.h" #include "CIRGenModule.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; CIRGenOpenMPRuntime::CIRGenOpenMPRuntime(CIRGenModule &CGM) : CGM(CGM) {} Address CIRGenOpenMPRuntime::getAddressOfLocalVariable(CIRGenFunction &CGF, const VarDecl *VD) { - assert(!MissingFeatures::openMPRuntime()); + assert(!cir::MissingFeatures::openMPRuntime()); return Address::invalid(); } void CIRGenOpenMPRuntime::checkAndEmitLastprivateConditional( CIRGenFunction &CGF, const Expr *LHS) { - assert(!MissingFeatures::openMPRuntime()); + assert(!cir::MissingFeatures::openMPRuntime()); return; } void CIRGenOpenMPRuntime::registerTargetGlobalVariable( const clang::VarDecl *VD, mlir::cir::GlobalOp globalOp) { - assert(!MissingFeatures::openMPRuntime()); + assert(!cir::MissingFeatures::openMPRuntime()); return; } void CIRGenOpenMPRuntime::emitDeferredTargetDecls() const { - assert(!MissingFeatures::openMPRuntime()); + assert(!cir::MissingFeatures::openMPRuntime()); return; } void CIRGenOpenMPRuntime::emitFunctionProlog(CIRGenFunction &CGF, const clang::Decl *D) { - assert(!MissingFeatures::openMPRuntime()); + assert(!cir::MissingFeatures::openMPRuntime()); return; } bool CIRGenOpenMPRuntime::emitTargetGlobal(clang::GlobalDecl &GD) { - assert(!MissingFeatures::openMPRuntime()); + assert(!cir::MissingFeatures::openMPRuntime()); return false; } @@ -70,14 +70,14 @@ void CIRGenOpenMPRuntime::emitTaskWaitCall(CIRGenBuilderTy &builder, } else { llvm_unreachable("NYI"); } - assert(!MissingFeatures::openMPRegionInfo()); + assert(!cir::MissingFeatures::openMPRegionInfo()); } void CIRGenOpenMPRuntime::emitBarrierCall(CIRGenBuilderTy &builder, CIRGenFunction &CGF, mlir::Location Loc) { - assert(!MissingFeatures::openMPRegionInfo()); + assert(!cir::MissingFeatures::openMPRegionInfo()); if (CGF.CGM.getLangOpts().OpenMPIRBuilder) { builder.create(Loc); @@ -103,5 +103,5 @@ void CIRGenOpenMPRuntime::emitTaskyieldCall(CIRGenBuilderTy &builder, llvm_unreachable("NYI"); } - assert(!MissingFeatures::openMPRegionInfo()); + assert(!cir::MissingFeatures::openMPRegionInfo()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h index 15a47eddd58c..8c1c4f4a19f7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h @@ -35,7 +35,7 @@ class GlobalDecl; class VarDecl; } // namespace clang -namespace cir { +namespace clang::CIRGen { class CIRGenModule; class CIRGenFunction; @@ -108,6 +108,6 @@ class CIRGenOpenMPRuntime { protected: CIRGenModule &CGM; }; -} // namespace cir +} // namespace clang::CIRGen #endif // LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENOPENMPRUNTIME_H diff --git a/clang/lib/CIR/CodeGen/CIRGenPointerAuth.cpp b/clang/lib/CIR/CodeGen/CIRGenPointerAuth.cpp index 842cb361423f..77451800b388 100644 --- a/clang/lib/CIR/CodeGen/CIRGenPointerAuth.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenPointerAuth.cpp @@ -14,10 +14,10 @@ #include "CIRGenFunction.h" using namespace clang; -using namespace cir; +using namespace clang::CIRGen; Address CIRGenFunction::getAsNaturalAddressOf(Address Addr, QualType PointeeTy) { - assert(!MissingFeatures::ptrAuth() && "NYI"); + assert(!cir::MissingFeatures::ptrAuth() && "NYI"); return Addr; -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index 16a8a1e2894e..a7bb94effd8d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -14,7 +14,7 @@ #include "llvm/Support/raw_ostream.h" -namespace cir { +namespace clang::CIRGen { /// Structure with information about how a bitfield should be accessed. This is /// very similar to what LLVM codegen does, once CIR evolves it's possible we @@ -205,6 +205,6 @@ class CIRGenRecordLayout { } }; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index c3c5562bcdf1..8d39419a2b97 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -20,8 +20,8 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/ErrorHandling.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; using namespace mlir::cir; Address CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S, @@ -83,7 +83,7 @@ Address CIRGenFunction::buildCompoundStmt(const CompoundStmt &S, bool getLast, } void CIRGenFunction::buildStopPoint(const Stmt *S) { - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); } // Build CIR for a statement. useCurrentScope should be true if no @@ -425,11 +425,11 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { // TODO(cir): there is still an empty cir.scope generated by the caller. return mlir::success(); } - assert(!MissingFeatures::constantFoldsToSimpleInteger()); + assert(!cir::MissingFeatures::constantFoldsToSimpleInteger()); } - assert(!MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); - assert(!MissingFeatures::incrementProfileCounter()); + assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); + assert(!cir::MissingFeatures::incrementProfileCounter()); return buildIfOnBoolExpr(S.getCond(), S.getThen(), S.getElse()); }; @@ -461,7 +461,7 @@ mlir::LogicalResult CIRGenFunction::buildDeclStmt(const DeclStmt &S) { } mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { - assert(!MissingFeatures::requiresReturnValueCheck()); + assert(!cir::MissingFeatures::requiresReturnValueCheck()); auto loc = getLoc(S.getSourceRange()); // Emit the result value, even if unused, to evaluate the side effects. @@ -479,7 +479,7 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { auto handleReturnVal = [&]() { if (getContext().getLangOpts().ElideConstructors && S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) { - assert(!MissingFeatures::openMP()); + assert(!cir::MissingFeatures::openMP()); // Apply the named return value optimization for this return statement, // which means doing nothing: the appropriate result has already been // constructed into the NRVO variable. @@ -504,16 +504,16 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { } else { mlir::Value V = nullptr; switch (CIRGenFunction::getEvaluationKind(RV->getType())) { - case TEK_Scalar: + case cir::TEK_Scalar: V = buildScalarExpr(RV); builder.CIRBaseBuilderTy::createStore(loc, V, *FnRetAlloca); break; - case TEK_Complex: + case cir::TEK_Complex: buildComplexExprIntoLValue(RV, makeAddrLValue(ReturnValue, RV->getType()), /*isInit*/ true); break; - case TEK_Aggregate: + case cir::TEK_Aggregate: buildAggExpr( RV, AggValueSlot::forAddr( ReturnValue, Qualifiers(), AggValueSlot::IsDestructed, @@ -802,19 +802,19 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, if (buildStmt(S.getEndStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - assert(!MissingFeatures::loopInfoStack()); + assert(!cir::MissingFeatures::loopInfoStack()); // From LLVM: if there are any cleanups between here and the loop-exit // scope, create a block to stage a loop exit along. // We probably already do the right thing because of ScopeOp, but make // sure we handle all cases. - assert(!MissingFeatures::requiresCleanups()); + assert(!cir::MissingFeatures::requiresCleanups()); forOp = builder.createFor( getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - assert(!MissingFeatures::createProfileWeightsForLoop()); - assert(!MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); + assert(!cir::MissingFeatures::createProfileWeightsForLoop()); + assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); mlir::Value condVal = evaluateExprAsBool(S.getCond()); builder.createCondition(condVal); }, @@ -869,19 +869,19 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { if (S.getInit()) if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - assert(!MissingFeatures::loopInfoStack()); + assert(!cir::MissingFeatures::loopInfoStack()); // From LLVM: if there are any cleanups between here and the loop-exit // scope, create a block to stage a loop exit along. // We probably already do the right thing because of ScopeOp, but make // sure we handle all cases. - assert(!MissingFeatures::requiresCleanups()); + assert(!cir::MissingFeatures::requiresCleanups()); forOp = builder.createFor( getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - assert(!MissingFeatures::createProfileWeightsForLoop()); - assert(!MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); + assert(!cir::MissingFeatures::createProfileWeightsForLoop()); + assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); mlir::Value condVal; if (S.getCond()) { // If the for statement has a condition scope, @@ -944,19 +944,19 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { // TODO: pass in array of attributes. auto doStmtBuilder = [&]() -> mlir::LogicalResult { auto loopRes = mlir::success(); - assert(!MissingFeatures::loopInfoStack()); + assert(!cir::MissingFeatures::loopInfoStack()); // From LLVM: if there are any cleanups between here and the loop-exit // scope, create a block to stage a loop exit along. // We probably already do the right thing because of ScopeOp, but make // sure we handle all cases. - assert(!MissingFeatures::requiresCleanups()); + assert(!cir::MissingFeatures::requiresCleanups()); doWhileOp = builder.createDoWhile( getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - assert(!MissingFeatures::createProfileWeightsForLoop()); - assert(!MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); + assert(!cir::MissingFeatures::createProfileWeightsForLoop()); + assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); // C99 6.8.5p2/p4: The first substatement is executed if the // expression compares unequal to 0. The condition must be a // scalar type. @@ -994,19 +994,19 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { // TODO: pass in array of attributes. auto whileStmtBuilder = [&]() -> mlir::LogicalResult { auto loopRes = mlir::success(); - assert(!MissingFeatures::loopInfoStack()); + assert(!cir::MissingFeatures::loopInfoStack()); // From LLVM: if there are any cleanups between here and the loop-exit // scope, create a block to stage a loop exit along. // We probably already do the right thing because of ScopeOp, but make // sure we handle all cases. - assert(!MissingFeatures::requiresCleanups()); + assert(!cir::MissingFeatures::requiresCleanups()); whileOp = builder.createWhile( getLoc(S.getSourceRange()), /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - assert(!MissingFeatures::createProfileWeightsForLoop()); - assert(!MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); + assert(!cir::MissingFeatures::createProfileWeightsForLoop()); + assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); mlir::Value condVal; // If the for statement has a condition scope, // emit the local variable declaration. diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp index 0c996156f71e..999d5be7ba3b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp @@ -28,8 +28,8 @@ #include "mlir/IR/Value.h" #include "mlir/Support/LogicalResult.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; using namespace mlir::omp; static void buildDependences(const OMPExecutableDirective &S, @@ -118,4 +118,4 @@ CIRGenFunction::buildOMPBarrierDirective(const OMPBarrierDirective &S) { CGM.getOpenMPRuntime().emitBarrierCall(builder, *this, getLoc(S.getSourceRange())); return res; -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.h b/clang/lib/CIR/CodeGen/CIRGenTBAA.h index ab5ac9b575c0..2b33f0da16d4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.h +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.h @@ -14,7 +14,7 @@ #ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENTBAA_H #define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENTBAA_H -namespace cir { +namespace clang::CIRGen { // TBAAAccessInfo - Describes a memory access in terms of TBAA. struct TBAAAccessInfo {}; @@ -23,6 +23,6 @@ struct TBAAAccessInfo {}; /// lowering AST types to LLVM types. class CIRGenTBAA {}; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index f07c76e94760..e8ce46e409c0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -21,7 +21,7 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/MissingFeatures.h" -namespace cir { +namespace clang::CIRGen { /// This structure provides a set of types that are commonly used /// during IR emission. It's initialized once in CodeGenModule's @@ -130,6 +130,6 @@ struct CIRGenTypeCache { } }; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 2263cca6c4cc..1e1263ae9756 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -23,7 +23,7 @@ #include "llvm/Support/ErrorHandling.h" using namespace clang; -using namespace cir; +using namespace clang::CIRGen; mlir::cir::CallingConv CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { @@ -773,14 +773,14 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { } const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( - CanQualType resultType, FnInfoOpts opts, + CanQualType resultType, cir::FnInfoOpts opts, llvm::ArrayRef argTypes, FunctionType::ExtInfo info, llvm::ArrayRef paramInfos, RequiredArgs required) { assert(llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })); - bool instanceMethod = opts == FnInfoOpts::IsInstanceMethod; - bool chainCall = opts == FnInfoOpts::IsChainCall; + bool instanceMethod = opts == cir::FnInfoOpts::IsInstanceMethod; + bool chainCall = opts == cir::FnInfoOpts::IsChainCall; // Lookup or create unique function info. llvm::FoldingSetNodeID ID; @@ -817,7 +817,7 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( // Loop over all of the computed argument and return value info. If any of // them are direct or extend without a specified coerce type, specify the // default now. - ABIArgInfo &retInfo = FI->getReturnInfo(); + cir::ABIArgInfo &retInfo = FI->getReturnInfo(); if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) retInfo.setCoerceToType(ConvertType(FI->getReturnType())); @@ -861,7 +861,7 @@ void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { } // If necessary, provide the full definition of a type only used with a // declaration so far. - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 3bb5bafb194d..16df1bc99ee8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -67,7 +67,7 @@ class StructType; } // namespace cir } // namespace mlir -namespace cir { +namespace clang::CIRGen { class CallArgList; class CIRGenCXXABI; class CIRGenModule; @@ -78,7 +78,7 @@ class CIRGenBuilderTy; /// AST types to CIR types. class CIRGenTypes { clang::ASTContext &Context; - cir::CIRGenBuilderTy &Builder; + CIRGenBuilderTy &Builder; CIRGenModule &CGM; const clang::TargetInfo &Target; CIRGenCXXABI &TheCXXABI; @@ -118,7 +118,7 @@ class CIRGenTypes { CIRGenTypes(CIRGenModule &cgm); ~CIRGenTypes(); - cir::CIRGenBuilderTy &getBuilder() const { return Builder; } + CIRGenBuilderTy &getBuilder() const { return Builder; } CIRGenModule &getModule() const { return CGM; } /// Utility to check whether a function type can be converted to a CIR type @@ -270,12 +270,12 @@ class CIRGenTypes { /// /// \param argTypes - must all actually be canonical as params const CIRGenFunctionInfo &arrangeCIRFunctionInfo( - clang::CanQualType returnType, FnInfoOpts opts, + clang::CanQualType returnType, cir::FnInfoOpts opts, llvm::ArrayRef argTypes, clang::FunctionType::ExtInfo info, llvm::ArrayRef paramInfos, RequiredArgs args); }; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index a565adc9703a..1f12fed80243 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -30,7 +30,7 @@ #include using namespace clang; -using namespace cir; +using namespace clang::CIRGen; CIRGenVTables::CIRGenVTables(CIRGenModule &CGM) : CGM(CGM), VTContext(CGM.getASTContext().getVTableContext()) {} @@ -161,7 +161,7 @@ void CIRGenModule::buildVTable(CXXRecordDecl *rd) { } void CIRGenVTables::GenerateClassData(const CXXRecordDecl *RD) { - assert(!MissingFeatures::generateDebugInfo()); + assert(!cir::MissingFeatures::generateDebugInfo()); if (RD->getNumVBases()) CGM.getCXXABI().emitVirtualInheritanceTables(RD); @@ -248,7 +248,7 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, CGM.getBuilder().getFuncType({}, CGM.getBuilder().getVoidTy()); mlir::cir::FuncOp fnPtr = CGM.createRuntimeFunction(fnTy, name); // LLVM codegen handles unnamedAddr - assert(!MissingFeatures::unnamedAddr()); + assert(!cir::MissingFeatures::unnamedAddr()); return fnPtr; }; @@ -371,7 +371,7 @@ mlir::cir::GlobalOp CIRGenVTables::generateConstructionVTable( Loc, Name, VTType, Linkage, CharUnits::fromQuantity(Align)); // V-tables are always unnamed_addr. - assert(!MissingFeatures::unnamedAddr() && "NYI"); + assert(!cir::MissingFeatures::unnamedAddr() && "NYI"); auto RTTI = CGM.getAddrOfRTTIDescriptor( Loc, CGM.getASTContext().getTagDeclType(Base.getBase())); @@ -652,7 +652,7 @@ void CIRGenVTables::buildVTTDefinition(mlir::cir::GlobalOp VTT, CIRGenModule::getMLIRVisibility(VTT)); if (CGM.supportsCOMDAT() && VTT.isWeakForLinker()) { - assert(!MissingFeatures::setComdat()); + assert(!cir::MissingFeatures::setComdat()); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index d439284de679..8440fdbafcfb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -26,7 +26,7 @@ namespace clang { class CXXRecordDecl; } -namespace cir { +namespace clang::CIRGen { class CIRGenModule; class CIRGenVTables { @@ -123,5 +123,5 @@ class CIRGenVTables { mlir::Type getVTableType(const clang::VTableLayout &layout); }; -} // end namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 79193c39a6a3..e795fe97a51d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -27,7 +27,7 @@ #include "mlir/IR/Value.h" -namespace cir { +namespace clang::CIRGen { /// This trivial value class is used to represent the result of an /// expression that is evaluated. It can be one of three things: either a @@ -551,6 +551,6 @@ class AggValueSlot { } }; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 758dee16103c..f5ea438dae38 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -60,8 +60,8 @@ void CIRGenerator::Initialize(ASTContext &astCtx) { mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); - CGM = std::make_unique(*mlirCtx.get(), astCtx, codeGenOpts, - Diags); + CGM = std::make_unique(*mlirCtx.get(), astCtx, + codeGenOpts, Diags); auto mod = CGM->getModule(); auto layout = llvm::DataLayout(astCtx.getTargetInfo().getDataLayoutString()); setMLIRDataLayout(mod, layout); diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index b376c9476b05..4614aa717d9c 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -18,7 +18,7 @@ using namespace llvm; using namespace clang; -using namespace cir; +using namespace clang::CIRGen; namespace { /// The CIRRecordLowering is responsible for lowering an ASTRecordLayout to a @@ -215,7 +215,7 @@ struct CIRRecordLowering final { llvm::DenseMap bitFields; llvm::DenseMap nonVirtualBases; llvm::DenseMap virtualBases; - CIRDataLayout dataLayout; + cir::CIRDataLayout dataLayout; bool IsZeroInitializable : 1; bool IsZeroInitializableAsBase : 1; bool isPacked : 1; @@ -402,7 +402,7 @@ void CIRRecordLowering::computeVolatileBitfields() { return; for ([[maybe_unused]] auto &I : bitFields) { - assert(!MissingFeatures::armComputeVolatileBitfields()); + assert(!cir::MissingFeatures::armComputeVolatileBitfields()); } } diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp index 522f59adff60..086c4ece6b3d 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -16,7 +16,7 @@ #include "CIRGenModule.h" using namespace clang; -using namespace cir; +using namespace clang::CIRGen; ConstantInitBuilderBase::ConstantInitBuilderBase(CIRGenModule &CGM) : CGM(CGM), builder(CGM.getBuilder()) {} diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h index d78584f42e71..8f1852cdfe27 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -26,9 +26,7 @@ #include #include -using namespace clang; - -namespace cir { +namespace clang::CIRGen { class CIRGenModule; @@ -563,7 +561,7 @@ class ConstantArrayBuilder // The use of explicit qualification is a GCC workaround. template - friend class cir::ConstantAggregateBuilderTemplateBase; + friend class ConstantAggregateBuilderTemplateBase; ConstantArrayBuilder(ConstantInitBuilder &builder, ConstantAggregateBuilderBase *parent, mlir::Type eltTy) @@ -578,7 +576,7 @@ class ConstantStructBuilder // The use of explicit qualification is a GCC workaround. template - friend class cir::ConstantAggregateBuilderTemplateBase; + friend class ConstantAggregateBuilderTemplateBase; ConstantStructBuilder(ConstantInitBuilder &builder, ConstantAggregateBuilderBase *parent, @@ -586,6 +584,6 @@ class ConstantStructBuilder : ConstantStructBuilderTemplateBase(builder, parent, structTy) {} }; -} // end namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/ConstantInitFuture.h b/clang/lib/CIR/CodeGen/ConstantInitFuture.h index 97631d5da88c..3801c0dec199 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitFuture.h +++ b/clang/lib/CIR/CodeGen/ConstantInitFuture.h @@ -24,13 +24,14 @@ // Forward-declare ConstantInitBuilderBase and give it a // PointerLikeTypeTraits specialization so that we can safely use it // in a PointerUnion below. -namespace cir { +namespace clang::CIRGen { class ConstantInitBuilderBase; -} // namespace cir +} // namespace clang::CIRGen namespace llvm { -template <> struct PointerLikeTypeTraits<::cir::ConstantInitBuilderBase *> { - using T = ::cir::ConstantInitBuilderBase *; +template <> +struct PointerLikeTypeTraits { + using T = clang::CIRGen::ConstantInitBuilderBase *; static inline void *getAsVoidPointer(T p) { return p; } static inline T getFromVoidPointer(void *p) { return static_cast(p); } @@ -38,7 +39,7 @@ template <> struct PointerLikeTypeTraits<::cir::ConstantInitBuilderBase *> { }; } // namespace llvm -namespace cir { +namespace clang::CIRGen { /// A "future" for a completed constant initializer, which can be passed /// around independently of any sub-builders (but not the original parent). @@ -81,12 +82,12 @@ class ConstantInitFuture { llvm::PointerLikeTypeTraits::NumLowBitsAvailable; }; -} // namespace cir +} // namespace clang::CIRGen namespace llvm { -template <> struct PointerLikeTypeTraits<::cir::ConstantInitFuture> { - using T = ::cir::ConstantInitFuture; +template <> struct PointerLikeTypeTraits { + using T = clang::CIRGen::ConstantInitFuture; static inline void *getAsVoidPointer(T future) { return future.getOpaqueValue(); diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h index 5ab356df319f..550af12cd7fa 100644 --- a/clang/lib/CIR/CodeGen/EHScopeStack.h +++ b/clang/lib/CIR/CodeGen/EHScopeStack.h @@ -23,7 +23,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" -namespace cir { +namespace clang::CIRGen { class CIRGenFunction; @@ -416,6 +416,6 @@ class EHScopeStack { void clearFixups() { BranchFixups.clear(); } }; -} // namespace cir +} // namespace clang::CIRGen #endif diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 561531ce2636..7ca3baf67d65 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -8,8 +8,8 @@ #include "clang/CIR/MissingFeatures.h" #include "clang/CIR/Target/x86.h" -using namespace cir; using namespace clang; +using namespace clang::CIRGen; static bool testIfIsVoidTy(QualType Ty) { const auto *BT = Ty->getAs(); @@ -44,9 +44,9 @@ class DefaultABIInfo : public ABIInfo { virtual ~DefaultABIInfo() = default; - ABIArgInfo classifyReturnType(QualType RetTy) const { + cir::ABIArgInfo classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) - return ABIArgInfo::getIgnore(); + return cir::ABIArgInfo::getIgnore(); if (isAggregateTypeForABI(RetTy)) llvm_unreachable("NYI"); @@ -58,11 +58,12 @@ class DefaultABIInfo : public ABIInfo { if (const auto *EIT = RetTy->getAs()) llvm_unreachable("NYI"); - return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) - : ABIArgInfo::getDirect()); + return (isPromotableIntegerTypeForABI(RetTy) + ? cir::ABIArgInfo::getExtend(RetTy) + : cir::ABIArgInfo::getDirect()); } - ABIArgInfo classifyArgumentType(QualType Ty) const { + cir::ABIArgInfo classifyArgumentType(QualType Ty) const { Ty = useFirstFieldIfTransparentUnion(Ty); if (isAggregateTypeForABI(Ty)) { @@ -76,8 +77,8 @@ class DefaultABIInfo : public ABIInfo { if (const auto *EIT = Ty->getAs()) llvm_unreachable("NYI"); - return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) - : ABIArgInfo::getDirect()); + return (isPromotableIntegerTypeForABI(Ty) ? cir::ABIArgInfo::getExtend(Ty) + : cir::ABIArgInfo::getDirect()); } void computeInfo(CIRGenFunctionInfo &FI) const override { @@ -114,9 +115,9 @@ class AArch64ABIInfo : public ABIInfo { ABIKind getABIKind() const { return Kind; } bool isDarwinPCS() const { return Kind == DarwinPCS; } - ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const; - ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic, - unsigned CallingConvention) const; + cir::ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const; + cir::ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic, + unsigned CallingConvention) const; void computeInfo(CIRGenFunctionInfo &FI) const override { // Top leevl CIR has unlimited arguments and return types. Lowering for ABI @@ -126,15 +127,15 @@ class AArch64ABIInfo : public ABIInfo { ie = FI.arg_end(); it != ie; ++it) { if (testIfIsVoidTy(it->type)) - it->info = ABIArgInfo::getIgnore(); + it->info = cir::ABIArgInfo::getIgnore(); else - it->info = ABIArgInfo::getDirect(CGT.ConvertType(it->type)); + it->info = cir::ABIArgInfo::getDirect(CGT.ConvertType(it->type)); } auto RetTy = FI.getReturnType(); if (testIfIsVoidTy(RetTy)) - FI.getReturnInfo() = ABIArgInfo::getIgnore(); + FI.getReturnInfo() = cir::ABIArgInfo::getIgnore(); else - FI.getReturnInfo() = ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); + FI.getReturnInfo() = cir::ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); return; } @@ -158,7 +159,7 @@ namespace { using X86AVXABILevel = ::cir::X86AVXABILevel; class X86_64ABIInfo : public ABIInfo { - using Class = X86ArgClass; + using Class = cir::X86ArgClass; // X86AVXABILevel AVXLevel; // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 64-bit @@ -204,11 +205,11 @@ class X86_64ABIInfo : public ABIInfo { clang::QualType SourceTy, unsigned SourceOffset) const; - ABIArgInfo classifyReturnType(QualType RetTy) const; + cir::ABIArgInfo classifyReturnType(QualType RetTy) const; - ABIArgInfo classifyArgumentType(clang::QualType Ty, unsigned freeIntRegs, - unsigned &neededInt, unsigned &neededSSE, - bool isNamedArg) const; + cir::ABIArgInfo classifyArgumentType(clang::QualType Ty, unsigned freeIntRegs, + unsigned &neededInt, unsigned &neededSSE, + bool isNamedArg) const; mlir::Type GetINTEGERTypeAtOffset(mlir::Type CIRType, unsigned CIROffset, QualType SourceTy, @@ -219,7 +220,7 @@ class X86_64ABIInfo : public ABIInfo { /// /// \param freeIntRegs - The number of free integer registers remaining /// available. - ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; + cir::ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; }; class X86_64TargetCIRGenInfo : public TargetCIRGenInfo { @@ -263,21 +264,20 @@ class SPIRVABIInfo : public CommonSPIRABIInfo { } private: - ABIArgInfo classifyKernelArgumentType(QualType Ty) const { + cir::ABIArgInfo classifyKernelArgumentType(QualType Ty) const { assert(!getContext().getLangOpts().CUDAIsDevice && "NYI"); return classifyArgumentType(Ty); } }; } // namespace -namespace cir { -void computeSPIRKernelABIInfo(CIRGenModule &CGM, CIRGenFunctionInfo &FI) { +void clang::CIRGen::computeSPIRKernelABIInfo(CIRGenModule &CGM, + CIRGenFunctionInfo &FI) { if (CGM.getTarget().getTriple().isSPIRV()) SPIRVABIInfo(CGM.getTypes()).computeInfo(FI); else CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI); } -} // namespace cir namespace { @@ -320,8 +320,8 @@ CIRGenCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); } clang::ASTContext &ABIInfo::getContext() const { return CGT.getContext(); } -ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, - unsigned freeIntRegs) const { +cir::ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, + unsigned freeIntRegs) const { assert(false && "NYI"); } @@ -332,15 +332,15 @@ void X86_64ABIInfo::computeInfo(CIRGenFunctionInfo &FI) const { for (CIRGenFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); it != ie; ++it) { if (testIfIsVoidTy(it->type)) - it->info = ABIArgInfo::getIgnore(); + it->info = cir::ABIArgInfo::getIgnore(); else - it->info = ABIArgInfo::getDirect(CGT.ConvertType(it->type)); + it->info = cir::ABIArgInfo::getDirect(CGT.ConvertType(it->type)); } auto RetTy = FI.getReturnType(); if (testIfIsVoidTy(RetTy)) - FI.getReturnInfo() = ABIArgInfo::getIgnore(); + FI.getReturnInfo() = cir::ABIArgInfo::getIgnore(); else - FI.getReturnInfo() = ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); + FI.getReturnInfo() = cir::ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); } /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in @@ -367,11 +367,11 @@ mlir::Type X86_64ABIInfo::GetINTEGERTypeAtOffset(mlir::Type CIRType, return CIRType; } -ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, - unsigned int freeIntRegs, - unsigned int &neededInt, - unsigned int &neededSSE, - bool isNamedArg) const { +cir::ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, + unsigned int freeIntRegs, + unsigned int &neededInt, + unsigned int &neededSSE, + bool isNamedArg) const { Ty = useFirstFieldIfTransparentUnion(Ty); X86_64ABIInfo::Class Lo, Hi; @@ -405,7 +405,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, assert(!Ty->getAs() && "NYI"); if (Ty->isSignedIntegerOrEnumerationType() && isPromotableIntegerTypeForABI(Ty)) - return ABIArgInfo::getExtend(Ty); + return cir::ABIArgInfo::getExtend(Ty); } break; @@ -431,7 +431,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, assert(!HighPart && "NYI"); - return ABIArgInfo::getDirect(ResType); + return cir::ABIArgInfo::getDirect(ResType); } ABIInfo::~ABIInfo() {} @@ -503,7 +503,7 @@ mlir::Type X86_64ABIInfo::GetSSETypeAtOffset(mlir::Type CIRType, return CIRType; } -ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { +cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the classification // algorithm. X86_64ABIInfo::Class Lo, Hi; @@ -522,7 +522,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { switch (Lo) { case Class::NoClass: assert(Hi == Class::NoClass && "Only NoClass supported so far for Hi"); - return ABIArgInfo::getIgnore(); + return cir::ABIArgInfo::getIgnore(); // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next available // register of the sequence %rax, %rdx is used. @@ -539,7 +539,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isIntegralOrEnumerationType() && isPromotableIntegerTypeForABI(RetTy)) { - return ABIArgInfo::getExtend(RetTy); + return cir::ABIArgInfo::getExtend(RetTy); } } break; @@ -559,11 +559,11 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { if (HighPart) assert(false && "NYI"); - return ABIArgInfo::getDirect(ResType); + return cir::ABIArgInfo::getDirect(ResType); } clang::LangAS -TargetCIRGenInfo::getGlobalVarAddressSpace(cir::CIRGenModule &CGM, +TargetCIRGenInfo::getGlobalVarAddressSpace(CIRGenModule &CGM, const clang::VarDecl *D) const { assert(!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index 873b2ef0fb00..7eb07c093833 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -21,7 +21,7 @@ #include -namespace cir { +namespace clang::CIRGen { class CIRGenFunction; class CIRGenModule; @@ -118,6 +118,6 @@ class TargetCIRGenInfo { void computeSPIRKernelABIInfo(CIRGenModule &CGM, CIRGenFunctionInfo &FI); -} // namespace cir +} // namespace clang::CIRGen #endif From 6ddac78457f1ea76917a4d3aa816bac54c2f541f Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Fri, 8 Nov 2024 21:51:40 +0300 Subject: [PATCH 2054/2301] [CIR][ABI][AArch64][Lowering] Support unions (#1075) As the title says, this PR adds support for unions for AArch64 lowering. The idea is basically the same as the [original](https://github.com/llvm/clangir/blob/dbf320e5c3db0410566ae561067c595308870bad/clang/lib/AST/RecordLayoutBuilder.cpp#L2111) codegen, and I added a couple of tests. --- .../TargetLowering/RecordLayoutBuilder.cpp | 4 +- .../test/CIR/CallConvLowering/AArch64/union.c | 41 +++++++++++++++++++ 2 files changed, 43 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CallConvLowering/AArch64/union.c diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index cb5f5eff5f7d..6b3229ac2ea6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -437,9 +437,9 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, // Reserve space for this field. if (!IsOverlappingEmptyField) { - // uint64_t EffectiveFieldSizeInBits = Context.toBits(EffectiveFieldSize); + uint64_t EffectiveFieldSizeInBits = Context.toBits(EffectiveFieldSize); if (IsUnion) - cir_cconv_unreachable("NYI"); + setDataSize(std::max(getDataSizeInBits(), EffectiveFieldSizeInBits)); else setDataSize(FieldOffset + EffectiveFieldSize); diff --git a/clang/test/CIR/CallConvLowering/AArch64/union.c b/clang/test/CIR/CallConvLowering/AArch64/union.c new file mode 100644 index 000000000000..4f622f0215c3 --- /dev/null +++ b/clang/test/CIR/CallConvLowering/AArch64/union.c @@ -0,0 +1,41 @@ +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -fclangir-call-conv-lowering +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +// CIR: !ty_U = !cir.struct +// LLVM: %union.U = type { i32 } +typedef union { + int a, b, c; +} U; + +// CIR: cir.func @foo(%arg0: !u64i +// CIR: %[[#V0:]] = cir.alloca !ty_U, !cir.ptr, [""] {alignment = 4 : i64} +// CIR: %[[#V1:]] = cir.cast(integral, %arg0 : !u64i), !u32i +// CIR: %[[#V2:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CIR: cir.store %[[#V1]], %[[#V2]] : !u32i, !cir.ptr +// CIR: cir.return + +// LLVM: void @foo(i64 %[[#V0:]] +// LLVM: %[[#V2:]] = alloca %union.U, i64 1, align 4 +// LLVM: %[[#V3:]] = trunc i64 %[[#V0]] to i32 +// LLVM: store i32 %[[#V3]], ptr %[[#V2]], align 4 +// LLVM: ret void +void foo(U u) {} + +// CIR: cir.func no_proto @init() -> !u32i +// CIR: %[[#V0:]] = cir.alloca !ty_U, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CIR: %[[#V1:]] = cir.load %[[#V0]] : !cir.ptr, !ty_U +// CIR: %[[#V2:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CIR: %[[#V3:]] = cir.load %[[#V2]] : !cir.ptr, !u32i +// CIR: cir.return %[[#V3]] : !u32i + +// LLVM: i32 @init() +// LLVM: %[[#V1:]] = alloca %union.U, i64 1, align 4 +// LLVM: %[[#V2:]] = load %union.U, ptr %[[#V1]], align 4 +// LLVM: %[[#V3:]] = load i32, ptr %[[#V1]], align 4 +// LLVM: ret i32 %[[#V3]] +U init() { + U u; + return u; +} \ No newline at end of file From 51b2e5299b454f3ba46b804cb5655cc092871c69 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Fri, 8 Nov 2024 10:55:59 -0800 Subject: [PATCH 2055/2301] [CIR] Merge the mlir::cir namespace into cir (#1084) https://github.com/llvm/clangir/issues/1025 explains why we want to move the CIR dialect from the `mlir::cir` to the `cir` namespace. This is a large PR, and I've split it out into four commits (that'll be squashed when landing). The first commit changes `mlir::cir` to `cir` everywhere. This was originally done mechanically with: ``` find clang \( -name '*.h' -o -name '*.cpp' -o -name '*.td' \) -print0 | xargs -0 perl -pi -e 's/mlir::cir/cir/g' find clang \( -name '*.h' -o -name '*.cpp' \) -print0 | xargs -0 perl -pi -e 's/::cir/cir/g' find clang \( -name '*.h' -o -name '*.cpp' \) -print0 | xargs -0 perl -0777 -pi -e 's/namespace mlir \{\nnamespace cir \{/namespace cir {/g' find clang \( -name '*.h' -o -name '*.cpp' \) -print0 | xargs -0 perl -0777 -pi -e 's!\} // namespace cir\n\} // namespace mlir!} // namespace cir!g' ``` It then required some manual fixups to address edge cases. Code that lived under `mlir::cir` could refer to the `mlir` namespace without qualification, but after the namespace change, we need to explicitly qualify all our usages. This is done in the second commit via https://gist.github.com/smeenai/996200fd45ad123bbf22b412d59479b6, which is an idempotent script to add all qualifications. I added cases to the script one at a time and reviewed each change afterwards to ensure we were only making the intended modifications, so I feel pretty confident in the end result. I also removed `using namespace llvm` from some headers to avoid conflicts, which in turn required adding some `llvm::` qualifiers as well. The third commit fixes a test, since an error message now contains the mlir namespace. Similar tests in flang also have the namespace in their error messages, so this is an expected change. The fourth change runs `git clang-format`. Unfortunately, that doesn't work for TableGen files, so we'll have a few instances of undesirable formatting left there. I'll look into fixing that as a follow-up. I validated the end result by examining the symbols in the built Clang binary. There's nothing in the `mlir::cir` namespace anymore. https://gist.github.com/smeenai/8438fd01588109fcdbde5c8652781dc0 had the symbols which lived in `cir` and should have moved to `clang::CIRGen`, and I validated that all the symbols were moved, with the exceptions noted in https://github.com/llvm/clangir/pull/1082 and the duplicated symbols noted in https://github.com/llvm/clangir/issues/1025. --- clang/include/clang/CIR/ABIArgInfo.h | 8 +- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 472 +++--- clang/include/clang/CIR/Dialect/IR/CIRAttrs.h | 2 - .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 134 +- .../clang/CIR/Dialect/IR/CIRDataLayout.h | 12 +- .../include/clang/CIR/Dialect/IR/CIRDialect.h | 12 +- .../clang/CIR/Dialect/IR/CIRDialect.td | 16 +- .../clang/CIR/Dialect/IR/CIROpenCLAttrs.td | 28 +- clang/include/clang/CIR/Dialect/IR/CIROps.td | 290 ++-- .../clang/CIR/Dialect/IR/CIROpsEnums.h | 14 +- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 89 +- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 108 +- .../clang/CIR/Dialect/IR/CIRTypesDetails.h | 32 +- clang/include/clang/CIR/Dialect/Passes.h | 4 +- clang/include/clang/CIR/Dialect/Passes.td | 2 +- .../clang/CIR/Interfaces/ASTAttrInterfaces.h | 4 - .../clang/CIR/Interfaces/ASTAttrInterfaces.td | 16 +- .../CIR/Interfaces/CIRFPTypeInterface.td | 2 +- .../clang/CIR/Interfaces/CIRLoopOpInterface.h | 2 - .../CIR/Interfaces/CIRLoopOpInterface.td | 10 +- .../clang/CIR/Interfaces/CIROpInterfaces.h | 4 - .../clang/CIR/Interfaces/CIROpInterfaces.td | 12 +- clang/include/clang/CIR/LoweringHelpers.h | 9 +- clang/lib/CIR/CodeGen/Address.h | 13 +- clang/lib/CIR/CodeGen/CIRAsm.cpp | 36 +- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 240 ++-- clang/lib/CIR/CodeGen/CIRGenBuilder.cpp | 40 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 515 ++++--- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 217 ++- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 265 ++-- clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 37 +- clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 19 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 107 +- clang/lib/CIR/CodeGen/CIRGenCall.h | 6 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 27 +- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 26 +- clang/lib/CIR/CodeGen/CIRGenCleanup.h | 2 +- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 97 +- clang/lib/CIR/CodeGen/CIRGenCstEmitter.h | 4 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 39 +- clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 42 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 205 ++- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 49 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 28 +- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 65 +- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 116 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 409 +++--- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 49 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 136 +- clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h | 16 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 217 ++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 468 +++--- clang/lib/CIR/CodeGen/CIRGenModule.h | 189 ++- clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp | 15 +- clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h | 2 +- clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 14 +- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 119 +- clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 40 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 49 +- clang/lib/CIR/CodeGen/CIRGenTypes.h | 15 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 95 +- clang/lib/CIR/CodeGen/CIRGenVTables.h | 14 +- clang/lib/CIR/CodeGen/CIRGenValue.h | 2 +- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 2 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 32 +- clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp | 25 +- clang/lib/CIR/CodeGen/ConstantInitBuilder.h | 69 +- clang/lib/CIR/CodeGen/ConstantInitFuture.h | 2 +- clang/lib/CIR/CodeGen/EHScopeStack.h | 2 +- clang/lib/CIR/CodeGen/TargetInfo.cpp | 27 +- clang/lib/CIR/CodeGen/TargetInfo.h | 10 +- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 35 +- clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp | 31 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 1004 +++++++------ clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp | 3 +- clang/lib/CIR/Dialect/IR/CIROpenCLAttrs.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 85 +- .../Dialect/Transforms/CIRCanonicalize.cpp | 4 +- .../CIR/Dialect/Transforms/CIRSimplify.cpp | 31 +- .../Dialect/Transforms/CallConvLowering.cpp | 26 +- clang/lib/CIR/Dialect/Transforms/DropAST.cpp | 2 +- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 303 ++-- .../lib/CIR/Dialect/Transforms/GotoSolver.cpp | 16 +- .../CIR/Dialect/Transforms/HoistAllocas.cpp | 12 +- .../Dialect/Transforms/IdiomRecognizer.cpp | 16 +- clang/lib/CIR/Dialect/Transforms/LibOpt.cpp | 10 +- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 87 +- .../Dialect/Transforms/LoweringPrepare.cpp | 419 +++--- .../Transforms/LoweringPrepareCXXABI.h | 7 +- .../Transforms/LoweringPrepareItaniumCXXABI.h | 4 +- clang/lib/CIR/Dialect/Transforms/PassDetail.h | 8 +- .../lib/CIR/Dialect/Transforms/SCFPrepare.cpp | 12 +- .../lib/CIR/Dialect/Transforms/StdHelpers.cpp | 4 +- clang/lib/CIR/Dialect/Transforms/StdHelpers.h | 2 - .../Transforms/TargetLowering/ABIInfo.cpp | 18 +- .../Transforms/TargetLowering/ABIInfo.h | 12 +- .../Transforms/TargetLowering/ABIInfoImpl.cpp | 20 +- .../Transforms/TargetLowering/ABIInfoImpl.h | 6 +- .../Transforms/TargetLowering/CIRCXXABI.cpp | 2 - .../Transforms/TargetLowering/CIRCXXABI.h | 9 +- .../TargetLowering/CIRLowerContext.cpp | 53 +- .../TargetLowering/CIRLowerContext.h | 30 +- .../TargetLowering/CIRRecordLayout.cpp | 10 +- .../TargetLowering/CIRRecordLayout.h | 11 +- .../TargetLowering/CIRToCIRArgMapping.h | 22 +- .../TargetLowering/ItaniumCXXABI.cpp | 16 +- .../Transforms/TargetLowering/LowerCall.cpp | 58 +- .../Transforms/TargetLowering/LowerCall.h | 8 +- .../TargetLowering/LowerFunction.cpp | 333 ++--- .../Transforms/TargetLowering/LowerFunction.h | 53 +- .../TargetLowering/LowerFunctionInfo.h | 20 +- .../Transforms/TargetLowering/LowerModule.cpp | 43 +- .../Transforms/TargetLowering/LowerModule.h | 41 +- .../Transforms/TargetLowering/LowerTypes.cpp | 33 +- .../Transforms/TargetLowering/LowerTypes.h | 28 +- .../TargetLowering/RecordLayoutBuilder.cpp | 69 +- .../Transforms/TargetLowering/TargetInfo.cpp | 2 - .../Transforms/TargetLowering/TargetInfo.h | 8 +- .../TargetLowering/TargetLoweringInfo.h | 4 +- .../TargetLowering/Targets/AArch64.cpp | 49 +- .../Targets/LoweringPrepareAArch64CXXABI.cpp | 106 +- .../Targets/LoweringPrepareItaniumCXXABI.cpp | 31 +- .../TargetLowering/Targets/SPIR.cpp | 12 +- .../Transforms/TargetLowering/Targets/X86.cpp | 164 +-- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 +- .../lib/CIR/Interfaces/ASTAttrInterfaces.cpp | 2 +- .../lib/CIR/Interfaces/CIRFPTypeInterface.cpp | 2 +- .../lib/CIR/Interfaces/CIRLoopOpInterface.cpp | 16 +- clang/lib/CIR/Interfaces/CIROpInterfaces.cpp | 8 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 1259 ++++++++--------- .../Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 37 +- .../Lowering/DirectToLLVM/LoweringHelpers.h | 4 +- clang/lib/CIR/Lowering/LoweringHelpers.cpp | 42 +- .../ThroughMLIR/LowerCIRLoopToSCF.cpp | 86 +- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 426 +++--- .../Lowering/ThroughMLIR/LowerToMLIRHelpers.h | 11 +- .../ExecuteCompilerInvocation.cpp | 31 +- clang/test/CIR/CodeGen/bf16-ops.c | 1 - clang/test/CIR/IR/invalid-annotations.cir | 2 +- clang/tools/cir-lsp-server/cir-lsp-server.cpp | 2 +- clang/tools/cir-opt/cir-opt.cpp | 8 +- clang/utils/TableGen/CIRLoweringEmitter.cpp | 6 +- 148 files changed, 5212 insertions(+), 5639 deletions(-) diff --git a/clang/include/clang/CIR/ABIArgInfo.h b/clang/include/clang/CIR/ABIArgInfo.h index 818d3b62f13f..b3c3d68b9572 100644 --- a/clang/include/clang/CIR/ABIArgInfo.h +++ b/clang/include/clang/CIR/ABIArgInfo.h @@ -167,8 +167,7 @@ class ABIArgInfo { } static ABIArgInfo getZeroExtend(mlir::Type Ty, mlir::Type T = nullptr) { // NOTE(cir): Enumerations are IntTypes in CIR. - assert(mlir::isa(Ty) || - mlir::isa(Ty)); + assert(mlir::isa(Ty) || mlir::isa(Ty)); auto AI = ABIArgInfo(Extend); AI.setCoerceToType(T); AI.setPaddingType(nullptr); @@ -190,9 +189,8 @@ class ABIArgInfo { // NOTE(cir): The original can apply this method on both integers and // enumerations, but in CIR, these two types are one and the same. Booleans // will also fall into this category, but they have their own type. - if (mlir::isa(Ty) && - mlir::cast(Ty).isSigned()) - return getSignExtend(mlir::cast(Ty), T); + if (mlir::isa(Ty) && mlir::cast(Ty).isSigned()) + return getSignExtend(mlir::cast(Ty), T); return getZeroExtend(Ty, T); } diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 49f1256db284..76a4c7174f25 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -43,118 +43,105 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { CIRBaseBuilderTy(mlir::MLIRContext &C) : mlir::OpBuilder(&C) {} mlir::Value getConstAPSInt(mlir::Location loc, const llvm::APSInt &val) { - auto ty = mlir::cir::IntType::get(getContext(), val.getBitWidth(), - val.isSigned()); - return create(loc, ty, - getAttr(ty, val)); + auto ty = + cir::IntType::get(getContext(), val.getBitWidth(), val.isSigned()); + return create(loc, ty, getAttr(ty, val)); } mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, const llvm::APInt &val) { - return create(loc, typ, - getAttr(typ, val)); + return create(loc, typ, getAttr(typ, val)); } - mlir::cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr) { - return create(loc, attr.getType(), attr); + cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr) { + return create(loc, attr.getType(), attr); } // Creates constant null value for integral type ty. - mlir::cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc) { - return create(loc, ty, getZeroInitAttr(ty)); + cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc) { + return create(loc, ty, getZeroInitAttr(ty)); } - mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { - return create(loc, getBoolTy(), - getCIRBoolAttr(state)); - } - mlir::cir::ConstantOp getFalse(mlir::Location loc) { - return getBool(false, loc); - } - mlir::cir::ConstantOp getTrue(mlir::Location loc) { - return getBool(true, loc); + cir::ConstantOp getBool(bool state, mlir::Location loc) { + return create(loc, getBoolTy(), getCIRBoolAttr(state)); } + cir::ConstantOp getFalse(mlir::Location loc) { return getBool(false, loc); } + cir::ConstantOp getTrue(mlir::Location loc) { return getBool(true, loc); } - mlir::cir::BoolType getBoolTy() { - return ::mlir::cir::BoolType::get(getContext()); - } + cir::BoolType getBoolTy() { return cir::BoolType::get(getContext()); } - mlir::cir::VoidType getVoidTy() { - return ::mlir::cir::VoidType::get(getContext()); - } + cir::VoidType getVoidTy() { return cir::VoidType::get(getContext()); } - mlir::cir::IntType getUIntNTy(int N) { - return mlir::cir::IntType::get(getContext(), N, false); + cir::IntType getUIntNTy(int N) { + return cir::IntType::get(getContext(), N, false); } - mlir::cir::IntType getSIntNTy(int N) { - return mlir::cir::IntType::get(getContext(), N, true); + cir::IntType getSIntNTy(int N) { + return cir::IntType::get(getContext(), N, true); } - mlir::cir::AddressSpaceAttr getAddrSpaceAttr(clang::LangAS langAS) { + cir::AddressSpaceAttr getAddrSpaceAttr(clang::LangAS langAS) { if (langAS == clang::LangAS::Default) return {}; - return mlir::cir::AddressSpaceAttr::get(getContext(), langAS); + return cir::AddressSpaceAttr::get(getContext(), langAS); } - mlir::cir::PointerType getPointerTo(mlir::Type ty, - mlir::cir::AddressSpaceAttr cirAS = {}) { - return mlir::cir::PointerType::get(getContext(), ty, cirAS); + cir::PointerType getPointerTo(mlir::Type ty, + cir::AddressSpaceAttr cirAS = {}) { + return cir::PointerType::get(getContext(), ty, cirAS); } - mlir::cir::PointerType getPointerTo(mlir::Type ty, clang::LangAS langAS) { + cir::PointerType getPointerTo(mlir::Type ty, clang::LangAS langAS) { return getPointerTo(ty, getAddrSpaceAttr(langAS)); } - mlir::cir::PointerType - getVoidPtrTy(clang::LangAS langAS = clang::LangAS::Default) { - return getPointerTo(::mlir::cir::VoidType::get(getContext()), langAS); + cir::PointerType getVoidPtrTy(clang::LangAS langAS = clang::LangAS::Default) { + return getPointerTo(cir::VoidType::get(getContext()), langAS); } - mlir::cir::PointerType getVoidPtrTy(mlir::cir::AddressSpaceAttr cirAS) { - return getPointerTo(::mlir::cir::VoidType::get(getContext()), cirAS); + cir::PointerType getVoidPtrTy(cir::AddressSpaceAttr cirAS) { + return getPointerTo(cir::VoidType::get(getContext()), cirAS); } - mlir::cir::MethodAttr getMethodAttr(mlir::cir::MethodType ty, - mlir::cir::FuncOp methodFuncOp) { + cir::MethodAttr getMethodAttr(cir::MethodType ty, cir::FuncOp methodFuncOp) { auto methodFuncSymbolRef = mlir::FlatSymbolRefAttr::get(methodFuncOp); - return mlir::cir::MethodAttr::get(ty, methodFuncSymbolRef); + return cir::MethodAttr::get(ty, methodFuncSymbolRef); } - mlir::cir::MethodAttr getNullMethodAttr(mlir::cir::MethodType ty) { - return mlir::cir::MethodAttr::get(ty); + cir::MethodAttr getNullMethodAttr(cir::MethodType ty) { + return cir::MethodAttr::get(ty); } - mlir::cir::BoolAttr getCIRBoolAttr(bool state) { - return mlir::cir::BoolAttr::get(getContext(), getBoolTy(), state); + cir::BoolAttr getCIRBoolAttr(bool state) { + return cir::BoolAttr::get(getContext(), getBoolTy(), state); } mlir::TypedAttr getZeroAttr(mlir::Type t) { - return mlir::cir::ZeroAttr::get(getContext(), t); + return cir::ZeroAttr::get(getContext(), t); } mlir::TypedAttr getZeroInitAttr(mlir::Type ty) { - if (mlir::isa(ty)) - return mlir::cir::IntAttr::get(ty, 0); - if (auto fltType = mlir::dyn_cast(ty)) - return mlir::cir::FPAttr::getZero(fltType); - if (auto fltType = mlir::dyn_cast(ty)) - return mlir::cir::FPAttr::getZero(fltType); - if (auto fltType = mlir::dyn_cast(ty)) - return mlir::cir::FPAttr::getZero(fltType); - if (auto fltType = mlir::dyn_cast(ty)) - return mlir::cir::FPAttr::getZero(fltType); - if (auto complexType = mlir::dyn_cast(ty)) + if (mlir::isa(ty)) + return cir::IntAttr::get(ty, 0); + if (auto fltType = mlir::dyn_cast(ty)) + return cir::FPAttr::getZero(fltType); + if (auto fltType = mlir::dyn_cast(ty)) + return cir::FPAttr::getZero(fltType); + if (auto fltType = mlir::dyn_cast(ty)) + return cir::FPAttr::getZero(fltType); + if (auto fltType = mlir::dyn_cast(ty)) + return cir::FPAttr::getZero(fltType); + if (auto complexType = mlir::dyn_cast(ty)) return getZeroAttr(complexType); - if (auto arrTy = mlir::dyn_cast(ty)) + if (auto arrTy = mlir::dyn_cast(ty)) return getZeroAttr(arrTy); - if (auto ptrTy = mlir::dyn_cast(ty)) + if (auto ptrTy = mlir::dyn_cast(ty)) return getConstNullPtrAttr(ptrTy); - if (auto structTy = mlir::dyn_cast(ty)) + if (auto structTy = mlir::dyn_cast(ty)) return getZeroAttr(structTy); - if (auto methodTy = mlir::dyn_cast(ty)) + if (auto methodTy = mlir::dyn_cast(ty)) return getNullMethodAttr(methodTy); - if (mlir::isa(ty)) { + if (mlir::isa(ty)) { return getCIRBoolAttr(false); } llvm_unreachable("Zero initializer for given type is NYI"); @@ -167,9 +154,9 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { intAttr = mlir::IntegerAttr::get( mlir::IntegerType::get(ptr.getContext(), 64), alignment); - return create(loc, ptr, /*isDeref=*/false, isVolatile, - /*alignment=*/intAttr, - /*mem_order=*/mlir::cir::MemOrderAttr{}); + return create(loc, ptr, /*isDeref=*/false, isVolatile, + /*alignment=*/intAttr, + /*mem_order=*/cir::MemOrderAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr, @@ -178,51 +165,49 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { } mlir::Value createNot(mlir::Value value) { - return create(value.getLoc(), value.getType(), - mlir::cir::UnaryOpKind::Not, value); + return create(value.getLoc(), value.getType(), + cir::UnaryOpKind::Not, value); } - mlir::cir::CmpOp createCompare(mlir::Location loc, mlir::cir::CmpOpKind kind, - mlir::Value lhs, mlir::Value rhs) { - return create(loc, getBoolTy(), kind, lhs, rhs); + cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, + mlir::Value lhs, mlir::Value rhs) { + return create(loc, getBoolTy(), kind, lhs, rhs); } mlir::Value createIsNaN(mlir::Location loc, mlir::Value operand) { - return createCompare(loc, mlir::cir::CmpOpKind::ne, operand, operand); + return createCompare(loc, cir::CmpOpKind::ne, operand, operand); } - mlir::Value createUnaryOp(mlir::Location loc, mlir::cir::UnaryOpKind kind, + mlir::Value createUnaryOp(mlir::Location loc, cir::UnaryOpKind kind, mlir::Value operand) { - return create(loc, kind, operand); + return create(loc, kind, operand); } - mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + mlir::Value createBinop(mlir::Value lhs, cir::BinOpKind kind, const llvm::APInt &rhs) { - return create( - lhs.getLoc(), lhs.getType(), kind, lhs, - getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); + return create(lhs.getLoc(), lhs.getType(), kind, lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); } - mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + mlir::Value createBinop(mlir::Value lhs, cir::BinOpKind kind, mlir::Value rhs) { - return create(lhs.getLoc(), lhs.getType(), kind, lhs, - rhs); + return create(lhs.getLoc(), lhs.getType(), kind, lhs, rhs); } mlir::Value createBinop(mlir::Location loc, mlir::Value lhs, - mlir::cir::BinOpKind kind, mlir::Value rhs) { - return create(loc, lhs.getType(), kind, lhs, rhs); + cir::BinOpKind kind, mlir::Value rhs) { + return create(loc, lhs.getType(), kind, lhs, rhs); } mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, bool isShiftLeft) { - return create( - lhs.getLoc(), lhs.getType(), lhs, - getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft); + return create(lhs.getLoc(), lhs.getType(), lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), + isShiftLeft); } mlir::Value createShift(mlir::Value lhs, unsigned bits, bool isShiftLeft) { - auto width = mlir::dyn_cast(lhs.getType()).getWidth(); + auto width = mlir::dyn_cast(lhs.getType()).getWidth(); auto shift = llvm::APInt(width, bits); return createShift(lhs, shift, isShiftLeft); } @@ -238,36 +223,36 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, unsigned bits) { auto val = llvm::APInt::getLowBitsSet(size, bits); - auto typ = mlir::cir::IntType::get(getContext(), size, false); + auto typ = cir::IntType::get(getContext(), size, false); return getConstAPInt(loc, typ, val); } mlir::Value createAnd(mlir::Value lhs, llvm::APInt rhs) { auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); - return createBinop(lhs, mlir::cir::BinOpKind::And, val); + return createBinop(lhs, cir::BinOpKind::And, val); } mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { - return createBinop(lhs, mlir::cir::BinOpKind::And, rhs); + return createBinop(lhs, cir::BinOpKind::And, rhs); } mlir::Value createAnd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) { - return createBinop(loc, lhs, mlir::cir::BinOpKind::And, rhs); + return createBinop(loc, lhs, cir::BinOpKind::And, rhs); } mlir::Value createOr(mlir::Value lhs, llvm::APInt rhs) { auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); - return createBinop(lhs, mlir::cir::BinOpKind::Or, val); + return createBinop(lhs, cir::BinOpKind::Or, val); } mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { - return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); + return createBinop(lhs, cir::BinOpKind::Or, rhs); } mlir::Value createMul(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, bool hasNSW = false) { - auto op = create(lhs.getLoc(), lhs.getType(), - mlir::cir::BinOpKind::Mul, lhs, rhs); + auto op = create(lhs.getLoc(), lhs.getType(), + cir::BinOpKind::Mul, lhs, rhs); if (hasNUW) op.setNoUnsignedWrap(true); if (hasNSW) @@ -283,15 +268,15 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::Value createMul(mlir::Value lhs, llvm::APInt rhs) { auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); - return createBinop(lhs, mlir::cir::BinOpKind::Mul, val); + return createBinop(lhs, cir::BinOpKind::Mul, val); } mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue) { assert(trueValue.getType() == falseValue.getType() && "trueValue and falseValue should have the same type"); - return create(loc, trueValue.getType(), condition, - trueValue, falseValue); + return create(loc, trueValue.getType(), condition, trueValue, + falseValue); } mlir::Value createLogicalAnd(mlir::Location loc, mlir::Value lhs, @@ -306,77 +291,69 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::Value createComplexCreate(mlir::Location loc, mlir::Value real, mlir::Value imag) { - auto resultComplexTy = - mlir::cir::ComplexType::get(getContext(), real.getType()); - return create(loc, resultComplexTy, real, imag); + auto resultComplexTy = cir::ComplexType::get(getContext(), real.getType()); + return create(loc, resultComplexTy, real, imag); } mlir::Value createComplexReal(mlir::Location loc, mlir::Value operand) { - auto operandTy = mlir::cast(operand.getType()); - return create(loc, operandTy.getElementTy(), - operand); + auto operandTy = mlir::cast(operand.getType()); + return create(loc, operandTy.getElementTy(), operand); } mlir::Value createComplexImag(mlir::Location loc, mlir::Value operand) { - auto operandTy = mlir::cast(operand.getType()); - return create(loc, operandTy.getElementTy(), - operand); + auto operandTy = mlir::cast(operand.getType()); + return create(loc, operandTy.getElementTy(), operand); } mlir::Value createComplexBinOp(mlir::Location loc, mlir::Value lhs, - mlir::cir::ComplexBinOpKind kind, - mlir::Value rhs, - mlir::cir::ComplexRangeKind range, - bool promoted) { - return create(loc, kind, lhs, rhs, range, - promoted); + cir::ComplexBinOpKind kind, mlir::Value rhs, + cir::ComplexRangeKind range, bool promoted) { + return create(loc, kind, lhs, rhs, range, promoted); } mlir::Value createComplexAdd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) { - return createBinop(loc, lhs, mlir::cir::BinOpKind::Add, rhs); + return createBinop(loc, lhs, cir::BinOpKind::Add, rhs); } mlir::Value createComplexSub(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) { - return createBinop(loc, lhs, mlir::cir::BinOpKind::Sub, rhs); + return createBinop(loc, lhs, cir::BinOpKind::Sub, rhs); } mlir::Value createComplexMul(mlir::Location loc, mlir::Value lhs, - mlir::Value rhs, - mlir::cir::ComplexRangeKind range, + mlir::Value rhs, cir::ComplexRangeKind range, bool promoted) { - return createComplexBinOp(loc, lhs, mlir::cir::ComplexBinOpKind::Mul, rhs, - range, promoted); + return createComplexBinOp(loc, lhs, cir::ComplexBinOpKind::Mul, rhs, range, + promoted); } mlir::Value createComplexDiv(mlir::Location loc, mlir::Value lhs, - mlir::Value rhs, - mlir::cir::ComplexRangeKind range, + mlir::Value rhs, cir::ComplexRangeKind range, bool promoted) { - return createComplexBinOp(loc, lhs, mlir::cir::ComplexBinOpKind::Div, rhs, - range, promoted); + return createComplexBinOp(loc, lhs, cir::ComplexBinOpKind::Div, rhs, range, + promoted); } - mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, - mlir::Value dst, bool _volatile = false, - ::mlir::IntegerAttr align = {}, - ::mlir::cir::MemOrderAttr order = {}) { - if (mlir::cast(dst.getType()).getPointee() != + cir::StoreOp createStore(mlir::Location loc, mlir::Value val, mlir::Value dst, + bool _volatile = false, + ::mlir::IntegerAttr align = {}, + cir::MemOrderAttr order = {}) { + if (mlir::cast(dst.getType()).getPointee() != val.getType()) dst = createPtrBitcast(dst, val.getType()); - return create(loc, val, dst, _volatile, align, order); + return create(loc, val, dst, _volatile, align, order); } - mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize) { - return create(loc, addrType, type, name, alignment, - dynAllocSize); + return create(loc, addrType, type, name, alignment, + dynAllocSize); } - mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, clang::CharUnits alignment, mlir::Value dynAllocSize) { @@ -385,42 +362,41 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { dynAllocSize); } - mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment) { - return create(loc, addrType, type, name, alignment); + return create(loc, addrType, type, name, alignment); } - mlir::Value createAlloca(mlir::Location loc, mlir::cir::PointerType addrType, + mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, clang::CharUnits alignment) { auto alignmentIntAttr = getSizeFromCharUnits(getContext(), alignment); return createAlloca(loc, addrType, type, name, alignmentIntAttr); } - mlir::Value createGetGlobal(mlir::cir::GlobalOp global, - bool threadLocal = false) { - return create( + mlir::Value createGetGlobal(cir::GlobalOp global, bool threadLocal = false) { + return create( global.getLoc(), getPointerTo(global.getSymType(), global.getAddrSpaceAttr()), global.getName(), threadLocal); } /// Create a copy with inferred length. - mlir::cir::CopyOp createCopy(mlir::Value dst, mlir::Value src, - bool isVolatile = false) { - return create(dst.getLoc(), dst, src, isVolatile); + cir::CopyOp createCopy(mlir::Value dst, mlir::Value src, + bool isVolatile = false) { + return create(dst.getLoc(), dst, src, isVolatile); } - mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, - mlir::Value src, mlir::Value len) { - return create(loc, dst, src, len); + cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, + mlir::Value src, mlir::Value len) { + return create(loc, dst, src, len); } mlir::Value createSub(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, bool hasNSW = false) { - auto op = create(lhs.getLoc(), lhs.getType(), - mlir::cir::BinOpKind::Sub, lhs, rhs); + auto op = create(lhs.getLoc(), lhs.getType(), + cir::BinOpKind::Sub, lhs, rhs); if (hasNUW) op.setNoUnsignedWrap(true); if (hasNSW) @@ -438,8 +414,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::Value createAdd(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, bool hasNSW = false) { - auto op = create(lhs.getLoc(), lhs.getType(), - mlir::cir::BinOpKind::Add, lhs, rhs); + auto op = create(lhs.getLoc(), lhs.getType(), + cir::BinOpKind::Add, lhs, rhs); if (hasNUW) op.setNoUnsignedWrap(true); if (hasNSW) @@ -460,10 +436,10 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { }; BinOpOverflowResults createBinOpOverflowOp(mlir::Location loc, - mlir::cir::IntType resultTy, - mlir::cir::BinOpOverflowKind kind, + cir::IntType resultTy, + cir::BinOpOverflowKind kind, mlir::Value lhs, mlir::Value rhs) { - auto op = create(loc, resultTy, kind, lhs, rhs); + auto op = create(loc, resultTy, kind, lhs, rhs); return {op.getResult(), op.getOverflow()}; } @@ -471,14 +447,14 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { // Cast/Conversion Operators //===--------------------------------------------------------------------===// - mlir::Value createCast(mlir::Location loc, mlir::cir::CastKind kind, + mlir::Value createCast(mlir::Location loc, cir::CastKind kind, mlir::Value src, mlir::Type newTy) { if (newTy == src.getType()) return src; - return create(loc, newTy, kind, src); + return create(loc, newTy, kind, src); } - mlir::Value createCast(mlir::cir::CastKind kind, mlir::Value src, + mlir::Value createCast(cir::CastKind kind, mlir::Value src, mlir::Type newTy) { if (newTy == src.getType()) return src; @@ -486,33 +462,31 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { } mlir::Value createIntCast(mlir::Value src, mlir::Type newTy) { - return createCast(mlir::cir::CastKind::integral, src, newTy); + return createCast(cir::CastKind::integral, src, newTy); } mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy) { - return createCast(mlir::cir::CastKind::int_to_ptr, src, newTy); + return createCast(cir::CastKind::int_to_ptr, src, newTy); } mlir::Value createGetMemberOp(mlir::Location &loc, mlir::Value structPtr, const char *fldName, unsigned idx) { - assert(mlir::isa(structPtr.getType())); + assert(mlir::isa(structPtr.getType())); auto structBaseTy = - mlir::cast(structPtr.getType()).getPointee(); - assert(mlir::isa(structBaseTy)); - auto fldTy = - mlir::cast(structBaseTy).getMembers()[idx]; - auto fldPtrTy = ::mlir::cir::PointerType::get(getContext(), fldTy); - return create(loc, fldPtrTy, structPtr, fldName, - idx); + mlir::cast(structPtr.getType()).getPointee(); + assert(mlir::isa(structBaseTy)); + auto fldTy = mlir::cast(structBaseTy).getMembers()[idx]; + auto fldPtrTy = cir::PointerType::get(getContext(), fldTy); + return create(loc, fldPtrTy, structPtr, fldName, idx); } mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy) { - return createCast(mlir::cir::CastKind::ptr_to_int, src, newTy); + return createCast(cir::CastKind::ptr_to_int, src, newTy); } mlir::Value createPtrToBoolCast(mlir::Value v) { - return createCast(mlir::cir::CastKind::ptr_to_bool, v, getBoolTy()); + return createCast(cir::CastKind::ptr_to_bool, v, getBoolTy()); } // TODO(cir): the following function was introduced to keep in sync with LLVM @@ -525,35 +499,33 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { if (srcTy == newTy) return src; - if (mlir::isa(srcTy) && - mlir::isa(newTy)) + if (mlir::isa(srcTy) && mlir::isa(newTy)) return createBoolToInt(src, newTy); llvm_unreachable("unhandled extension cast"); } mlir::Value createBoolToInt(mlir::Value src, mlir::Type newTy) { - return createCast(mlir::cir::CastKind::bool_to_int, src, newTy); + return createCast(cir::CastKind::bool_to_int, src, newTy); } mlir::Value createBitcast(mlir::Value src, mlir::Type newTy) { - return createCast(mlir::cir::CastKind::bitcast, src, newTy); + return createCast(cir::CastKind::bitcast, src, newTy); } mlir::Value createBitcast(mlir::Location loc, mlir::Value src, mlir::Type newTy) { - return createCast(loc, mlir::cir::CastKind::bitcast, src, newTy); + return createCast(loc, cir::CastKind::bitcast, src, newTy); } mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy) { - assert(mlir::isa(src.getType()) && - "expected ptr src"); + assert(mlir::isa(src.getType()) && "expected ptr src"); return createBitcast(src, getPointerTo(newPointeeTy)); } mlir::Value createAddrSpaceCast(mlir::Location loc, mlir::Value src, mlir::Type newTy) { - return createCast(loc, mlir::cir::CastKind::address_space, src, newTy); + return createCast(loc, cir::CastKind::address_space, src, newTy); } mlir::Value createAddrSpaceCast(mlir::Value src, mlir::Type newTy) { @@ -571,7 +543,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block) { auto last = std::find_if(block->rbegin(), block->rend(), [](mlir::Operation &op) { - return mlir::isa(&op); + return mlir::isa(&op); }); if (last != block->rend()) @@ -582,7 +554,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::IntegerAttr getSizeFromCharUnits(mlir::MLIRContext *ctx, clang::CharUnits size) { - // Note that mlir::IntegerType is used instead of mlir::cir::IntType here + // Note that mlir::IntegerType is used instead of cir::IntType here // because we don't need sign information for this to be useful, so keep // it simple. return mlir::IntegerAttr::get(mlir::IntegerType::get(ctx, 64), @@ -590,101 +562,99 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { } /// Create a do-while operation. - mlir::cir::DoWhileOp createDoWhile( + cir::DoWhileOp createDoWhile( mlir::Location loc, llvm::function_ref condBuilder, llvm::function_ref bodyBuilder) { - return create(loc, condBuilder, bodyBuilder); + return create(loc, condBuilder, bodyBuilder); } /// Create a while operation. - mlir::cir::WhileOp createWhile( + cir::WhileOp createWhile( mlir::Location loc, llvm::function_ref condBuilder, llvm::function_ref bodyBuilder) { - return create(loc, condBuilder, bodyBuilder); + return create(loc, condBuilder, bodyBuilder); } /// Create a for operation. - mlir::cir::ForOp createFor( + cir::ForOp createFor( mlir::Location loc, llvm::function_ref condBuilder, llvm::function_ref bodyBuilder, llvm::function_ref stepBuilder) { - return create(loc, condBuilder, bodyBuilder, stepBuilder); + return create(loc, condBuilder, bodyBuilder, stepBuilder); } mlir::TypedAttr getConstPtrAttr(mlir::Type t, int64_t v) { auto val = mlir::IntegerAttr::get(mlir::IntegerType::get(t.getContext(), 64), v); - return mlir::cir::ConstPtrAttr::get( - getContext(), mlir::cast(t), val); + return cir::ConstPtrAttr::get(getContext(), mlir::cast(t), + val); } mlir::TypedAttr getConstNullPtrAttr(mlir::Type t) { - assert(mlir::isa(t) && "expected cir.ptr"); + assert(mlir::isa(t) && "expected cir.ptr"); return getConstPtrAttr(t, 0); } // Creates constant nullptr for pointer type ty. - mlir::cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { + cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { assert(!MissingFeatures::targetCodeGenInfoGetNullPointer()); - return create(loc, ty, getConstPtrAttr(ty, 0)); + return create(loc, ty, getConstPtrAttr(ty, 0)); } /// Create a loop condition. - mlir::cir::ConditionOp createCondition(mlir::Value condition) { - return create(condition.getLoc(), condition); + cir::ConditionOp createCondition(mlir::Value condition) { + return create(condition.getLoc(), condition); } /// Create a yield operation. - mlir::cir::YieldOp createYield(mlir::Location loc, - mlir::ValueRange value = {}) { - return create(loc, value); + cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value = {}) { + return create(loc, value); } - mlir::cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, - mlir::Value stride) { - return create(loc, base.getType(), base, stride); + cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, + mlir::Value stride) { + return create(loc, base.getType(), base, stride); } - mlir::cir::CallOp - createCallOp(mlir::Location loc, - mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(), - mlir::Type returnType = mlir::cir::VoidType(), - mlir::ValueRange operands = mlir::ValueRange(), - mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, - mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + cir::CallOp createCallOp(mlir::Location loc, + mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(), + mlir::Type returnType = cir::VoidType(), + mlir::ValueRange operands = mlir::ValueRange(), + cir::CallingConv callingConv = cir::CallingConv::C, + cir::ExtraFuncAttributesAttr extraFnAttr = {}) { - mlir::cir::CallOp callOp = create( - loc, callee, returnType, operands, callingConv); + cir::CallOp callOp = + create(loc, callee, returnType, operands, callingConv); if (extraFnAttr) { callOp->setAttr("extra_attrs", extraFnAttr); } else { mlir::NamedAttrList empty; callOp->setAttr("extra_attrs", - mlir::cir::ExtraFuncAttributesAttr::get( + cir::ExtraFuncAttributesAttr::get( getContext(), empty.getDictionary(getContext()))); } return callOp; } - mlir::cir::CallOp - createCallOp(mlir::Location loc, mlir::cir::FuncOp callee, - mlir::ValueRange operands = mlir::ValueRange(), - mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, - mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + cir::CallOp createCallOp(mlir::Location loc, cir::FuncOp callee, + mlir::ValueRange operands = mlir::ValueRange(), + cir::CallingConv callingConv = cir::CallingConv::C, + cir::ExtraFuncAttributesAttr extraFnAttr = {}) { return createCallOp(loc, mlir::SymbolRefAttr::get(callee), callee.getFunctionType().getReturnType(), operands, callingConv, extraFnAttr); } - mlir::cir::CallOp createIndirectCallOp( - mlir::Location loc, mlir::Value ind_target, mlir::cir::FuncType fn_type, - mlir::ValueRange operands = mlir::ValueRange(), - mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, - mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + cir::CallOp + createIndirectCallOp(mlir::Location loc, mlir::Value ind_target, + cir::FuncType fn_type, + mlir::ValueRange operands = mlir::ValueRange(), + cir::CallingConv callingConv = cir::CallingConv::C, + cir::ExtraFuncAttributesAttr extraFnAttr = {}) { llvm::SmallVector resOperands({ind_target}); resOperands.append(operands.begin(), operands.end()); @@ -693,48 +663,49 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { resOperands, callingConv, extraFnAttr); } - mlir::cir::CallOp - createCallOp(mlir::Location loc, mlir::SymbolRefAttr callee, - mlir::ValueRange operands = mlir::ValueRange(), - mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, - mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { - return createCallOp(loc, callee, mlir::cir::VoidType(), operands, - callingConv, extraFnAttr); - } - - mlir::cir::CallOp createTryCallOp( - mlir::Location loc, mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(), - mlir::Type returnType = mlir::cir::VoidType(), - mlir::ValueRange operands = mlir::ValueRange(), - mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, - mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { - mlir::cir::CallOp tryCallOp = - create(loc, callee, returnType, operands, - callingConv, /*exception=*/getUnitAttr()); + cir::CallOp createCallOp(mlir::Location loc, mlir::SymbolRefAttr callee, + mlir::ValueRange operands = mlir::ValueRange(), + cir::CallingConv callingConv = cir::CallingConv::C, + cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + return createCallOp(loc, callee, cir::VoidType(), operands, callingConv, + extraFnAttr); + } + + cir::CallOp + createTryCallOp(mlir::Location loc, + mlir::SymbolRefAttr callee = mlir::SymbolRefAttr(), + mlir::Type returnType = cir::VoidType(), + mlir::ValueRange operands = mlir::ValueRange(), + cir::CallingConv callingConv = cir::CallingConv::C, + cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + cir::CallOp tryCallOp = + create(loc, callee, returnType, operands, callingConv, + /*exception=*/getUnitAttr()); if (extraFnAttr) { tryCallOp->setAttr("extra_attrs", extraFnAttr); } else { mlir::NamedAttrList empty; tryCallOp->setAttr("extra_attrs", - mlir::cir::ExtraFuncAttributesAttr::get( + cir::ExtraFuncAttributesAttr::get( getContext(), empty.getDictionary(getContext()))); } return tryCallOp; } - mlir::cir::CallOp createTryCallOp( - mlir::Location loc, mlir::cir::FuncOp callee, mlir::ValueRange operands, - mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C, - mlir::cir::ExtraFuncAttributesAttr extraFnAttr = {}) { + cir::CallOp + createTryCallOp(mlir::Location loc, cir::FuncOp callee, + mlir::ValueRange operands, + cir::CallingConv callingConv = cir::CallingConv::C, + cir::ExtraFuncAttributesAttr extraFnAttr = {}) { return createTryCallOp(loc, mlir::SymbolRefAttr::get(callee), callee.getFunctionType().getReturnType(), operands, callingConv, extraFnAttr); } - mlir::cir::CallOp createIndirectTryCallOp( - mlir::Location loc, mlir::Value ind_target, mlir::cir::FuncType fn_type, - mlir::ValueRange operands, - mlir::cir::CallingConv callingConv = mlir::cir::CallingConv::C) { + cir::CallOp + createIndirectTryCallOp(mlir::Location loc, mlir::Value ind_target, + cir::FuncType fn_type, mlir::ValueRange operands, + cir::CallingConv callingConv = cir::CallingConv::C) { llvm::SmallVector resOperands({ind_target}); resOperands.append(operands.begin(), operands.end()); return createTryCallOp(loc, mlir::SymbolRefAttr(), fn_type.getReturnType(), @@ -750,13 +721,12 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::Value objectPtr) { // Build the callee function type. auto methodFuncTy = - mlir::cast(method.getType()).getMemberFuncTy(); + mlir::cast(method.getType()).getMemberFuncTy(); auto methodFuncInputTypes = methodFuncTy.getInputs(); - auto objectPtrTy = mlir::cast(objectPtr.getType()); - auto objectPtrAddrSpace = - mlir::cast_if_present( - objectPtrTy.getAddrSpace()); + auto objectPtrTy = mlir::cast(objectPtr.getType()); + auto objectPtrAddrSpace = mlir::cast_if_present( + objectPtrTy.getAddrSpace()); auto adjustedThisTy = getVoidPtrTy(objectPtrAddrSpace); llvm::SmallVector calleeFuncInputTypes{adjustedThisTy}; @@ -769,8 +739,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { assert(!MissingFeatures::addressSpace()); auto calleeTy = getPointerTo(calleeFuncTy); - auto op = create(loc, calleeTy, adjustedThisTy, - method, objectPtr); + auto op = create(loc, calleeTy, adjustedThisTy, method, + objectPtr); return {op.getCallee(), op.getAdjustedThis()}; } }; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h index 5961f77629b5..7e60de084265 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.h @@ -35,13 +35,11 @@ class VarDecl; class RecordDecl; } // namespace clang -namespace mlir { namespace cir { class ArrayType; class StructType; class BoolType; } // namespace cir -} // namespace mlir #define GET_ATTRDEF_CLASSES #include "clang/CIR/Dialect/IR/CIROpsAttributes.h.inc" diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index a81ac5037caa..c931b9376287 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -48,7 +48,7 @@ def OpenCLC : I32EnumAttrCase<"OpenCLC", 3, "opencl_c">; def SourceLanguage : I32EnumAttr<"SourceLanguage", "Source language", [ C, CXX, OpenCLC ]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def LangAttr : CIR_Attr<"Lang", "lang"> { @@ -84,7 +84,7 @@ def CIR_BoolAttr : CIR_Attr<"Bool", "bool", [TypedAttrInterface]> { }]; let parameters = (ins AttributeSelfTypeParameter< - "", "mlir::cir::BoolType">:$type, + "", "cir::BoolType">:$type, "bool":$value); let assemblyFormat = [{ @@ -147,16 +147,16 @@ def ConstArrayAttr : CIR_Attr<"ConstArray", "const_array", [TypedAttrInterface]> }]; let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "Attribute":$elts, + "mlir::Attribute":$elts, "int":$trailingZerosNum); // Define a custom builder for the type; that removes the need to pass // in an MLIRContext instance, as it can be infered from the `type`. let builders = [ - AttrBuilderWithInferredContext<(ins "mlir::cir::ArrayType":$type, - "Attribute":$elts), [{ + AttrBuilderWithInferredContext<(ins "cir::ArrayType":$type, + "mlir::Attribute":$elts), [{ int zeros = 0; - auto typeSize = mlir::cast(type).getSize(); + auto typeSize = mlir::cast(type).getSize(); if (auto str = mlir::dyn_cast(elts)) zeros = typeSize - str.size(); else @@ -190,13 +190,13 @@ def ConstVectorAttr : CIR_Attr<"ConstVector", "const_vector", }]; let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "ArrayAttr":$elts); + "mlir::ArrayAttr":$elts); // Define a custom builder for the type; that removes the need to pass in an // MLIRContext instance, as it can be inferred from the `type`. let builders = [ - AttrBuilderWithInferredContext<(ins "mlir::cir::VectorType":$type, - "ArrayAttr":$elts), [{ + AttrBuilderWithInferredContext<(ins "cir::VectorType":$type, + "mlir::ArrayAttr":$elts), [{ return $_get(type.getContext(), type, elts); }]> ]; @@ -229,11 +229,11 @@ def ConstStructAttr : CIR_Attr<"ConstStruct", "const_struct", }]; let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "ArrayAttr":$members); + "mlir::ArrayAttr":$members); let builders = [ - AttrBuilderWithInferredContext<(ins "mlir::cir::StructType":$type, - "ArrayAttr":$members), [{ + AttrBuilderWithInferredContext<(ins "cir::StructType":$type, + "mlir::ArrayAttr":$members), [{ return $_get(type.getContext(), type, members); }]> ]; @@ -255,13 +255,13 @@ def IntAttr : CIR_Attr<"Int", "int", [TypedAttrInterface]> { An integer attribute is a literal attribute that represents an integral value of the specified integer type. }]; - let parameters = (ins AttributeSelfTypeParameter<"">:$type, "APInt":$value); + let parameters = (ins AttributeSelfTypeParameter<"">:$type, "llvm::APInt":$value); let builders = [ - AttrBuilderWithInferredContext<(ins "Type":$type, - "const APInt &":$value), [{ + AttrBuilderWithInferredContext<(ins "mlir::Type":$type, + "const llvm::APInt &":$value), [{ return $_get(type.getContext(), type, value); }]>, - AttrBuilderWithInferredContext<(ins "Type":$type, "int64_t":$value), [{ + AttrBuilderWithInferredContext<(ins "mlir::Type":$type, "int64_t":$value), [{ IntType intType = mlir::cast(type); mlir::APInt apValue(intType.getWidth(), value, intType.isSigned()); return $_get(intType.getContext(), intType, apValue); @@ -288,16 +288,16 @@ def FPAttr : CIR_Attr<"FP", "fp", [TypedAttrInterface]> { value of the specified floating-point type. Supporting only CIR FP types. }]; let parameters = (ins - AttributeSelfTypeParameter<"", "::mlir::cir::CIRFPTypeInterface">:$type, + AttributeSelfTypeParameter<"", "::cir::CIRFPTypeInterface">:$type, APFloatParameter<"">:$value ); let builders = [ - AttrBuilderWithInferredContext<(ins "Type":$type, - "const APFloat &":$value), [{ + AttrBuilderWithInferredContext<(ins "mlir::Type":$type, + "const llvm::APFloat &":$value), [{ return $_get(type.getContext(), mlir::cast(type), value); }]>, - AttrBuilder<(ins "Type":$type, - "const APFloat &":$value), [{ + AttrBuilder<(ins "mlir::Type":$type, + "const llvm::APFloat &":$value), [{ return $_get($_ctxt, mlir::cast(type), value); }]>, ]; @@ -326,12 +326,12 @@ def ComplexAttr : CIR_Attr<"Complex", "complex", [TypedAttrInterface]> { contains values of the same CIR type. }]; - let parameters = (ins - AttributeSelfTypeParameter<"", "mlir::cir::ComplexType">:$type, + let parameters = (ins + AttributeSelfTypeParameter<"", "cir::ComplexType">:$type, "mlir::TypedAttr":$real, "mlir::TypedAttr":$imag); let builders = [ - AttrBuilderWithInferredContext<(ins "mlir::cir::ComplexType":$type, + AttrBuilderWithInferredContext<(ins "cir::ComplexType":$type, "mlir::TypedAttr":$real, "mlir::TypedAttr":$imag), [{ return $_get(type.getContext(), type, real, imag); @@ -352,19 +352,19 @@ def ComplexAttr : CIR_Attr<"Complex", "complex", [TypedAttrInterface]> { def ConstPtrAttr : CIR_Attr<"ConstPtr", "ptr", [TypedAttrInterface]> { let summary = "Holds a constant pointer value"; let parameters = (ins - AttributeSelfTypeParameter<"", "::mlir::cir::PointerType">:$type, + AttributeSelfTypeParameter<"", "::cir::PointerType">:$type, "mlir::IntegerAttr":$value); let description = [{ A pointer attribute is a literal attribute that represents an integral value of a pointer type. }]; let builders = [ - AttrBuilderWithInferredContext<(ins "Type":$type, "mlir::IntegerAttr":$value), [{ - return $_get(type.getContext(), mlir::cast(type), value); + AttrBuilderWithInferredContext<(ins "mlir::Type":$type, "mlir::IntegerAttr":$value), [{ + return $_get(type.getContext(), mlir::cast(type), value); }]>, - AttrBuilder<(ins "Type":$type, + AttrBuilder<(ins "mlir::Type":$type, "mlir::IntegerAttr":$value), [{ - return $_get($_ctxt, mlir::cast(type), value); + return $_get($_ctxt, mlir::cast(type), value); }]>, ]; let extraClassDeclaration = [{ @@ -387,7 +387,7 @@ def CmpOrdering : I32EnumAttr< "CmpOrdering", "three-way comparison ordering kind", [CmpOrdering_Strong, CmpOrdering_Partial] > { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def CmpThreeWayInfoAttr : CIR_Attr<"CmpThreeWayInfo", "cmp3way_info"> { @@ -446,7 +446,7 @@ def DataMemberAttr : CIR_Attr<"DataMember", "data_member", [TypedAttrInterface]> { let summary = "Holds a constant data member pointer value"; let parameters = (ins AttributeSelfTypeParameter< - "", "mlir::cir::DataMemberType">:$type, + "", "cir::DataMemberType">:$type, OptionalParameter< "std::optional">:$member_index); let description = [{ @@ -501,21 +501,21 @@ def MethodAttr : CIR_Attr<"Method", "method", [TypedAttrInterface]> { }]; let parameters = (ins AttributeSelfTypeParameter< - "", "mlir::cir::MethodType">:$type, + "", "cir::MethodType">:$type, OptionalParameter< - "std::optional">:$symbol, + "std::optional">:$symbol, OptionalParameter< "std::optional">:$vtable_offset); let builders = [ - AttrBuilderWithInferredContext<(ins "mlir::cir::MethodType":$type), [{ + AttrBuilderWithInferredContext<(ins "cir::MethodType":$type), [{ return $_get(type.getContext(), type, std::nullopt, std::nullopt); }]>, - AttrBuilderWithInferredContext<(ins "mlir::cir::MethodType":$type, - "FlatSymbolRefAttr":$symbol), [{ + AttrBuilderWithInferredContext<(ins "cir::MethodType":$type, + "mlir::FlatSymbolRefAttr":$symbol), [{ return $_get(type.getContext(), type, symbol, std::nullopt); }]>, - AttrBuilderWithInferredContext<(ins "mlir::cir::MethodType":$type, + AttrBuilderWithInferredContext<(ins "cir::MethodType":$type, "uint64_t":$vtable_offset), [{ return $_get(type.getContext(), type, std::nullopt, vtable_offset); }]>, @@ -572,13 +572,13 @@ def GlobalViewAttr : CIR_Attr<"GlobalView", "global_view", [TypedAttrInterface]> }]; let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "FlatSymbolRefAttr":$symbol, - OptionalParameter<"ArrayAttr">:$indices); + "mlir::FlatSymbolRefAttr":$symbol, + OptionalParameter<"mlir::ArrayAttr">:$indices); let builders = [ - AttrBuilderWithInferredContext<(ins "Type":$type, - "FlatSymbolRefAttr":$symbol, - CArg<"ArrayAttr", "{}">:$indices), [{ + AttrBuilderWithInferredContext<(ins "mlir::Type":$type, + "mlir::FlatSymbolRefAttr":$symbol, + CArg<"mlir::ArrayAttr", "{}">:$indices), [{ return $_get(type.getContext(), type, symbol, indices); }]> ]; @@ -623,7 +623,7 @@ def TypeInfoAttr : CIR_Attr<"TypeInfo", "typeinfo", [TypedAttrInterface]> { "mlir::ArrayAttr":$data); let builders = [ - AttrBuilderWithInferredContext<(ins "Type":$type, + AttrBuilderWithInferredContext<(ins "mlir::Type":$type, "mlir::ArrayAttr":$data), [{ return $_get(type.getContext(), type, data); }]> @@ -662,11 +662,11 @@ def VTableAttr : CIR_Attr<"VTable", "vtable", [TypedAttrInterface]> { // `vtable_data` is const struct with one element, containing an array of // vtable information. let parameters = (ins AttributeSelfTypeParameter<"">:$type, - "ArrayAttr":$vtable_data); + "mlir::ArrayAttr":$vtable_data); let builders = [ - AttrBuilderWithInferredContext<(ins "Type":$type, - "ArrayAttr":$vtable_data), [{ + AttrBuilderWithInferredContext<(ins "mlir::Type":$type, + "mlir::ArrayAttr":$vtable_data), [{ return $_get(type.getContext(), type, vtable_data); }]> ]; @@ -739,15 +739,15 @@ def DynamicCastInfoAttr let parameters = (ins GlobalViewAttr:$srcRtti, GlobalViewAttr:$destRtti, - "FlatSymbolRefAttr":$runtimeFunc, - "FlatSymbolRefAttr":$badCastFunc, + "mlir::FlatSymbolRefAttr":$runtimeFunc, + "mlir::FlatSymbolRefAttr":$badCastFunc, IntAttr:$offsetHint); let builders = [ AttrBuilderWithInferredContext<(ins "GlobalViewAttr":$srcRtti, "GlobalViewAttr":$destRtti, - "FlatSymbolRefAttr":$runtimeFunc, - "FlatSymbolRefAttr":$badCastFunc, + "mlir::FlatSymbolRefAttr":$runtimeFunc, + "mlir::FlatSymbolRefAttr":$badCastFunc, "IntAttr":$offsetHint), [{ return $_get(srcRtti.getContext(), srcRtti, destRtti, runtimeFunc, badCastFunc, offsetHint); @@ -809,7 +809,7 @@ def AddressSpaceAttr : CIR_Attr<"AddressSpace", "addrspace"> { }]> ]; - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; // The following codes implement these conversions: // clang::LangAS -> int32_t <-> text-form CIR @@ -939,10 +939,10 @@ class AST traits = []> // Nothing to print besides the mnemonics. } - LogicalResult $cppClass::verify( + llvm::LogicalResult $cppClass::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, }] # clang_name # [{ decl) { - return success(); + return mlir::success(); } }]; } @@ -994,7 +994,7 @@ def VK_Protected : I32EnumAttrCase<"Protected", 3, "protected">; def VisibilityKind : I32EnumAttr<"VisibilityKind", "C/C++ visibility", [ VK_Default, VK_Hidden, VK_Protected ]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def VisibilityAttr : CIR_Attr<"Visibility", "visibility"> { @@ -1035,7 +1035,7 @@ def ExtraFuncAttr : CIR_Attr<"ExtraFuncAttributes", "extra"> { a function. }]; - let parameters = (ins "DictionaryAttr":$elements); + let parameters = (ins "mlir::DictionaryAttr":$elements); let assemblyFormat = [{ `(` $elements `)` }]; @@ -1049,7 +1049,7 @@ def InlineHint : I32EnumAttrCase<"InlineHint", 3, "hint">; def InlineKind : I32EnumAttr<"InlineKind", "inlineKind", [ NoInline, AlwaysInline, InlineHint ]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def InlineAttr : CIR_Attr<"Inline", "inline"> { @@ -1089,16 +1089,16 @@ class CIR_GlobalCtorDtor` }]; let builders = [ - AttrBuilder<(ins "StringRef":$name, + AttrBuilder<(ins "llvm::StringRef":$name, CArg<"int", "65536">:$priority), [{ - return $_get($_ctxt, StringAttr::get($_ctxt, name), priority); + return $_get($_ctxt, mlir::StringAttr::get($_ctxt, name), priority); }]> ]; let extraClassDeclaration = [{ @@ -1122,8 +1122,8 @@ def BitfieldInfoAttr : CIR_Attr<"BitfieldInfo", "bitfield_info"> { Holds the next information about bitfields: name, storage type, a bitfield size and position in the storage, if the bitfield is signed or not. }]; - let parameters = (ins "StringAttr":$name, - "Type":$storage_type, + let parameters = (ins "mlir::StringAttr":$name, + "mlir::Type":$storage_type, "uint64_t":$size, "uint64_t":$offset, "bool":$is_signed); @@ -1131,13 +1131,13 @@ def BitfieldInfoAttr : CIR_Attr<"BitfieldInfo", "bitfield_info"> { let assemblyFormat = "`<` struct($name, $storage_type, $size, $offset, $is_signed) `>`"; let builders = [ - AttrBuilder<(ins "StringRef":$name, - "Type":$storage_type, + AttrBuilder<(ins "llvm::StringRef":$name, + "mlir::Type":$storage_type, "uint64_t":$size, "uint64_t":$offset, "bool":$is_signed ), [{ - return $_get($_ctxt, StringAttr::get($_ctxt, name), storage_type, size, offset, is_signed); + return $_get($_ctxt, mlir::StringAttr::get($_ctxt, name), storage_type, size, offset, is_signed); }]> ]; } @@ -1164,8 +1164,8 @@ def AnnotationAttr : CIR_Attr<"Annotation", "annotation"> { }]; // The parameter args is empty when there is no arg. - let parameters = (ins "StringAttr":$name, - "ArrayAttr":$args); + let parameters = (ins "mlir::StringAttr":$name, + "mlir::ArrayAttr":$args); let assemblyFormat = "`<` struct($name, $args) `>`"; @@ -1205,7 +1205,7 @@ def GlobalAnnotationValuesAttr : CIR_Attr<"GlobalAnnotationValues", ``` }]; - let parameters = (ins "ArrayAttr":$annotations); + let parameters = (ins "mlir::ArrayAttr":$annotations); let assemblyFormat = [{ $annotations }]; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h index 3d6379dcd23d..89a724594081 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h @@ -53,7 +53,7 @@ class CIRDataLayout { /// struct, its size, and the offsets of its fields. /// /// Note that this information is lazily cached. - const StructLayout *getStructLayout(mlir::cir::StructType Ty) const; + const StructLayout *getStructLayout(cir::StructType Ty) const; /// Internal helper method that returns requested alignment for type. llvm::Align getAlignment(mlir::Type Ty, bool abiOrPref) const; @@ -93,7 +93,7 @@ class CIRDataLayout { } llvm::TypeSize getPointerTypeSizeInBits(mlir::Type Ty) const { - assert(mlir::isa(Ty) && + assert(mlir::isa(Ty) && "This should only be called with a pointer type"); return layout.getTypeSizeInBits(Ty); } @@ -101,9 +101,9 @@ class CIRDataLayout { llvm::TypeSize getTypeSizeInBits(mlir::Type Ty) const; mlir::Type getIntPtrType(mlir::Type Ty) const { - assert(mlir::isa(Ty) && "Expected pointer type"); - auto IntTy = mlir::cir::IntType::get(Ty.getContext(), - getPointerTypeSizeInBits(Ty), false); + assert(mlir::isa(Ty) && "Expected pointer type"); + auto IntTy = + cir::IntType::get(Ty.getContext(), getPointerTypeSizeInBits(Ty), false); return IntTy; } }; @@ -153,7 +153,7 @@ class StructLayout final private: friend class CIRDataLayout; // Only DataLayout can create this class - StructLayout(mlir::cir::StructType ST, const CIRDataLayout &DL); + StructLayout(cir::StructType ST, const CIRDataLayout &DL); size_t numTrailingObjects(OverloadToken) const { return NumElements; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h index d59b4ede3091..be928d1ee19b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.h @@ -55,7 +55,7 @@ template class SameFirstOperandAndResultType : public TraitBase { public: - static LogicalResult verifyTrait(Operation *op) { + static llvm::LogicalResult verifyTrait(Operation *op) { return impl::verifySameFirstOperandAndResultType(op); } }; @@ -67,7 +67,7 @@ template class SameSecondOperandAndResultType : public TraitBase { public: - static LogicalResult verifyTrait(Operation *op) { + static llvm::LogicalResult verifyTrait(Operation *op) { return impl::verifySameSecondOperandAndResultType(op); } }; @@ -79,19 +79,19 @@ template class SameFirstSecondOperandAndResultType : public TraitBase { public: - static LogicalResult verifyTrait(Operation *op) { + static llvm::LogicalResult verifyTrait(Operation *op) { return impl::verifySameFirstSecondOperandAndResultType(op); } }; } // namespace OpTrait +} // namespace mlir + namespace cir { -void buildTerminatedBody(OpBuilder &builder, Location loc); +void buildTerminatedBody(mlir::OpBuilder &builder, mlir::Location loc); } // namespace cir -} // namespace mlir - #define GET_OP_CLASSES #include "clang/CIR/Dialect/IR/CIROps.h.inc" diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td index fc87df7c86a2..ddf5bdfe5ce7 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -22,7 +22,7 @@ def CIR_Dialect : Dialect { let summary = "A high-level dialect for analyzing and optimizing Clang " "supported languages"; - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; let useDefaultAttributePrinterParser = 0; let useDefaultTypePrinterParser = 0; @@ -32,19 +32,19 @@ def CIR_Dialect : Dialect { let extraClassDeclaration = [{ // Names of CIR parameter attributes. - static StringRef getSExtAttrName() { return "cir.signext"; } - static StringRef getZExtAttrName() { return "cir.zeroext"; } + static llvm::StringRef getSExtAttrName() { return "cir.signext"; } + static llvm::StringRef getZExtAttrName() { return "cir.zeroext"; } void registerAttributes(); void registerTypes(); - Type parseType(DialectAsmParser &parser) const override; - void printType(Type type, DialectAsmPrinter &printer) const override; + mlir::Type parseType(mlir::DialectAsmParser &parser) const override; + void printType(mlir::Type type, mlir::DialectAsmPrinter &printer) const override; - Attribute parseAttribute(DialectAsmParser &parser, - Type type) const override; + mlir::Attribute parseAttribute(mlir::DialectAsmParser &parser, + mlir::Type type) const override; - void printAttribute(Attribute attr, DialectAsmPrinter &os) const override; + void printAttribute(mlir::Attribute attr, mlir::DialectAsmPrinter &os) const override; }]; } diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td index a6932c8ca178..b07a8cced975 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROpenCLAttrs.td @@ -57,11 +57,11 @@ def OpenCLKernelMetadataAttr }]; let parameters = (ins - OptionalParameter<"ArrayAttr">:$work_group_size_hint, - OptionalParameter<"ArrayAttr">:$reqd_work_group_size, - OptionalParameter<"TypeAttr">:$vec_type_hint, + OptionalParameter<"mlir::ArrayAttr">:$work_group_size_hint, + OptionalParameter<"mlir::ArrayAttr">:$reqd_work_group_size, + OptionalParameter<"mlir::TypeAttr">:$vec_type_hint, OptionalParameter<"std::optional">:$vec_type_hint_signedness, - OptionalParameter<"IntegerAttr">:$intel_reqd_sub_group_size + OptionalParameter<"mlir::IntegerAttr">:$intel_reqd_sub_group_size ); let assemblyFormat = "`<` struct(params) `>`"; @@ -76,14 +76,14 @@ def OpenCLKernelMetadataAttr let extraClassDefinition = [{ std::optional $cppClass::isSignedHint(mlir::Type hintQTy) { // Only types in CIR carry signedness - if (!mlir::isa(hintQTy.getDialect())) + if (!mlir::isa(hintQTy.getDialect())) return std::nullopt; // See also clang::CodeGen::CodeGenFunction::EmitKernelMetadata - auto hintEltQTy = mlir::dyn_cast(hintQTy); + auto hintEltQTy = mlir::dyn_cast(hintQTy); auto isCIRSignedIntType = [](mlir::Type t) { - return mlir::isa(t) && - mlir::cast(t).isSigned(); + return mlir::isa(t) && + mlir::cast(t).isSigned(); }; return isCIRSignedIntType(hintQTy) || (hintEltQTy && isCIRSignedIntType(hintEltQTy.getEltType())); @@ -134,12 +134,12 @@ def OpenCLKernelArgMetadataAttr }]; let parameters = (ins - "ArrayAttr":$addr_space, - "ArrayAttr":$access_qual, - "ArrayAttr":$type, - "ArrayAttr":$base_type, - "ArrayAttr":$type_qual, - OptionalParameter<"ArrayAttr">:$name + "mlir::ArrayAttr":$addr_space, + "mlir::ArrayAttr":$access_qual, + "mlir::ArrayAttr":$type, + "mlir::ArrayAttr":$base_type, + "mlir::ArrayAttr":$type_qual, + OptionalParameter<"mlir::ArrayAttr">:$name ); let assemblyFormat = "`<` struct(params) `>`"; diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e29547a6ed6c..2cf9b6889c82 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -58,12 +58,12 @@ include "mlir/IR/CommonAttrConstraints.td" // following: // // class CIRFooOpLowering -// : public mlir::OpConversionPattern { +// : public mlir::OpConversionPattern { // public: -// using OpConversionPattern::OpConversionPattern; +// using OpConversionPattern::OpConversionPattern; // // mlir::LogicalResult matchAndRewrite( -// mlir::cir::FooOp op, +// cir::FooOp op, // OpAdaptor adaptor, // mlir::ConversionPatternRewriter &rewriter) const override { // rewriter.replaceOpWithNewOp( @@ -136,7 +136,7 @@ def CastKind : I32EnumAttr< CK_IntegralComplexToBoolean, CK_FloatComplexCast, CK_FloatComplexToIntegralComplex, CK_IntegralComplexCast, CK_IntegralComplexToFloatComplex]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def CastOp : CIR_Op<"cast", @@ -205,7 +205,7 @@ def DCK_RefCast : I32EnumAttrCase<"ref", 2>; def DynamicCastKind : I32EnumAttr< "DynamicCastKind", "dynamic cast kind", [DCK_PtrCast, DCK_RefCast]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def DynamicCastOp : CIR_Op<"dyn_cast"> { @@ -262,7 +262,7 @@ def DynamicCastOp : CIR_Op<"dyn_cast"> { let extraClassDeclaration = [{ /// Determine whether this operation models reference casting in C++. bool isRefcast() { - return getKind() == ::mlir::cir::DynamicCastKind::ref; + return getKind() == ::cir::DynamicCastKind::ref; } /// Determine whether this operation represents a dynamic cast to a void @@ -286,7 +286,7 @@ def SizeInfoType : I32EnumAttr< "SizeInfoType", "size info type", [SizeInfoTypeMin, SizeInfoTypeMax]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def ObjSizeOp : CIR_Op<"objsize", [Pure]> { @@ -320,7 +320,7 @@ def PtrDiffOp : CIR_Op<"ptr_diff", [Pure, SameTypeOperands]> { let summary = "Pointer subtraction arithmetic"; let description = [{ `cir.ptr_diff` performs a subtraction between two pointer types with the - same element type and produces a `mlir::cir::IntType` result. + same element type and produces a `cir::IntType` result. Note that the result considers the pointer size according to the ABI for the pointee sizes, e.g. the subtraction between two `!cir.ptr` might @@ -371,7 +371,7 @@ def PtrStrideOp : CIR_Op<"ptr_stride", let extraClassDeclaration = [{ // Get type pointed by the base pointer. mlir::Type getElementTy() { - return mlir::cast(getBase().getType()).getPointee(); + return mlir::cast(getBase().getType()).getPointee(); } }]; @@ -412,7 +412,7 @@ def ConstantOp : CIR_Op<"const", let extraClassDeclaration = [{ bool isNullPtr() { - if (const auto ptrAttr = mlir::dyn_cast(getValue())) + if (const auto ptrAttr = mlir::dyn_cast(getValue())) return ptrAttr.isNullValue(); return false; } @@ -437,7 +437,7 @@ def MemOrder : I32EnumAttr< "Memory order according to C++11 memory model", [MemOrderRelaxed, MemOrderConsume, MemOrderAcquire, MemOrderRelease, MemOrderAcqRel, MemOrderSeqCst]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } //===----------------------------------------------------------------------===// @@ -506,15 +506,15 @@ def AllocaOp : CIR_Op<"alloca", [ let skipDefaultBuilders = 1; let builders = [ - OpBuilder<(ins "Type":$addr, "Type":$allocaType, - "StringRef":$name, - "IntegerAttr":$alignment)>, - - OpBuilder<(ins "Type":$addr, - "Type":$allocaType, - "StringRef":$name, - "IntegerAttr":$alignment, - "Value":$dynAllocSize), + OpBuilder<(ins "mlir::Type":$addr, "mlir::Type":$allocaType, + "llvm::StringRef":$name, + "mlir::IntegerAttr":$alignment)>, + + OpBuilder<(ins "mlir::Type":$addr, + "mlir::Type":$allocaType, + "llvm::StringRef":$name, + "mlir::IntegerAttr":$alignment, + "mlir::Value":$dynAllocSize), [{ if (dynAllocSize) $_state.addOperands(dynAllocSize); @@ -524,7 +524,7 @@ def AllocaOp : CIR_Op<"alloca", [ let extraClassDeclaration = [{ // Whether the alloca input type is a pointer. - bool isPointerType() { return ::mlir::isa<::mlir::cir::PointerType>(getAllocaType()); } + bool isPointerType() { return ::mlir::isa<::cir::PointerType>(getAllocaType()); } bool isDynamic() { return (bool)getDynAllocSize(); } }]; @@ -603,7 +603,7 @@ def LoadOp : CIR_Op<"load", [ // TODO(CIR): The final interface here should include an argument for the // SyncScope::ID. // This should be used over the ODS generated setMemOrder. - void setAtomic(mlir::cir::MemOrder order); + void setAtomic(cir::MemOrder order); }]; // FIXME: add verifier. @@ -644,7 +644,7 @@ def StoreOp : CIR_Op<"store", [ }]; let builders = [ - OpBuilder<(ins "Value":$value, "Value":$addr), [{ + OpBuilder<(ins "mlir::Value":$value, "mlir::Value":$addr), [{ $_state.addOperands({value, addr}); }]> ]; @@ -667,7 +667,7 @@ def StoreOp : CIR_Op<"store", [ // TODO(CIR): The final interface here should include an argument for the // SyncScope::ID. // This should be used over the ODS generated setMemOrder. - void setAtomic(mlir::cir::MemOrder order); + void setAtomic(cir::MemOrder order); }]; // FIXME: add verifier. @@ -761,10 +761,10 @@ def IfOp : CIR_Op<"if", let skipDefaultBuilders = 1; let builders = [ - OpBuilder<(ins "Value":$cond, "bool":$withElseRegion, - CArg<"function_ref", + OpBuilder<(ins "mlir::Value":$cond, "bool":$withElseRegion, + CArg<"llvm::function_ref", "buildTerminatedBody">:$thenBuilder, - CArg<"function_ref", + CArg<"llvm::function_ref", "nullptr">:$elseBuilder)> ]; } @@ -805,9 +805,9 @@ def TernaryOp : CIR_Op<"ternary", let skipDefaultBuilders = 1; let builders = [ - OpBuilder<(ins "Value":$cond, - "function_ref":$trueBuilder, - "function_ref":$falseBuilder) + OpBuilder<(ins "mlir::Value":$cond, + "llvm::function_ref":$trueBuilder, + "llvm::function_ref":$falseBuilder) > ]; @@ -1072,9 +1072,9 @@ def ScopeOp : CIR_Op<"scope", [ let builders = [ // Scopes for yielding values. OpBuilder<(ins - "function_ref":$scopeBuilder)>, + "llvm::function_ref":$scopeBuilder)>, // Scopes without yielding values. - OpBuilder<(ins "function_ref":$scopeBuilder)> + OpBuilder<(ins "llvm::function_ref":$scopeBuilder)> ]; } @@ -1097,7 +1097,7 @@ def UnaryOpKind : I32EnumAttr< UnaryOpKind_Minus, UnaryOpKind_Not, ]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } // FIXME: Pure won't work when we add overloading. @@ -1148,7 +1148,7 @@ def BinOpKind : I32EnumAttr< BinOpKind_Add, BinOpKind_Sub, BinOpKind_And, BinOpKind_Xor, BinOpKind_Or]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } // FIXME: Pure won't work when we add overloading. @@ -1238,7 +1238,7 @@ def CmpOpKind : I32EnumAttr< "compare operation kind", [CmpOpKind_LT, CmpOpKind_LE, CmpOpKind_GT, CmpOpKind_GE, CmpOpKind_EQ, CmpOpKind_NE]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } // FIXME: Pure might not work when we add overloading. @@ -1276,7 +1276,7 @@ def BinOpOverflowKind : I32EnumAttr< "BinOpOverflowKind", "checked binary arithmetic operation kind", [BinOpKind_Add, BinOpKind_Sub, BinOpKind_Mul]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def BinOpOverflowOp : CIR_Op<"binop.overflow", [Pure, SameTypeOperands]> { @@ -1316,11 +1316,11 @@ def BinOpOverflowOp : CIR_Op<"binop.overflow", [Pure, SameTypeOperands]> { }]; let builders = [ - OpBuilder<(ins "mlir::cir::IntType":$resultTy, - "mlir::cir::BinOpOverflowKind":$kind, + OpBuilder<(ins "cir::IntType":$resultTy, + "cir::BinOpOverflowKind":$kind, "mlir::Value":$lhs, "mlir::Value":$rhs), [{ - auto overflowTy = mlir::cir::BoolType::get($_builder.getContext()); + auto overflowTy = cir::BoolType::get($_builder.getContext()); build($_builder, $_state, resultTy, overflowTy, kind, lhs, rhs); }]> ]; @@ -1473,7 +1473,7 @@ def ComplexBinOpKind : I32EnumAttr< "ComplexBinOpKind", "complex number binary operation kind", [BinOpKind_Mul, BinOpKind_Div]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def ComplexRangeKind_Full : I32EnumAttrCase<"Full", 1, "full">; @@ -1488,7 +1488,7 @@ def ComplexRangeKind : I32EnumAttr< [ComplexRangeKind_Full, ComplexRangeKind_Improved, ComplexRangeKind_Promoted, ComplexRangeKind_Basic, ComplexRangeKind_None]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def ComplexBinOp : CIR_Op<"complex.binop", @@ -1807,12 +1807,12 @@ def CmpThreeWayOp : CIR_Op<"cmp3way", [Pure, SameTypeOperands]> { let extraClassDeclaration = [{ /// Determine whether this three-way comparison produces a strong ordering. bool isStrongOrdering() { - return getInfo().getOrdering() == mlir::cir::CmpOrdering::Strong; + return getInfo().getOrdering() == cir::CmpOrdering::Strong; } /// Determine whether this three-way comparison compares integral operands. bool isIntegralComparison() { - return mlir::isa(getLhs().getType()); + return mlir::isa(getLhs().getType()); } }]; } @@ -1830,7 +1830,7 @@ def CaseOpKind : I32EnumAttr< "CaseOpKind", "case kind", [CaseOpKind_DT, CaseOpKind_EQ, CaseOpKind_AO, CaseOpKind_RG]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def CaseOp : CIR_Op<"case", [ @@ -1861,9 +1861,9 @@ def CaseOp : CIR_Op<"case", [ let skipDefaultBuilders = 1; let builders = [ - OpBuilder<(ins "ArrayAttr":$value, + OpBuilder<(ins "mlir::ArrayAttr":$value, "CaseOpKind":$kind, - "OpBuilder::InsertPoint &":$insertPoint)> + "mlir::OpBuilder::InsertPoint &":$insertPoint)> ]; } @@ -2012,8 +2012,8 @@ def SwitchOp : CIR_Op<"switch", let skipDefaultBuilders = 1; let builders = [ - OpBuilder<(ins "Value":$condition, - "function_ref":$switchBuilder)> + OpBuilder<(ins "mlir::Value":$condition, + "llvm::function_ref":$switchBuilder)> ]; let assemblyFormat = [{ @@ -2059,8 +2059,8 @@ def BrOp : CIR_Op<"br", }]; let builders = [ - OpBuilder<(ins "Block *":$dest, - CArg<"ValueRange", "{}">:$destOperands), [{ + OpBuilder<(ins "mlir::Block *":$dest, + CArg<"mlir::ValueRange", "{}">:$destOperands), [{ $_state.addSuccessors(dest); $_state.addOperands(destOperands); }]> @@ -2099,9 +2099,9 @@ def BrCondOp : CIR_Op<"brcond", }]; let builders = [ - OpBuilder<(ins "Value":$cond, "Block *":$destTrue, "Block *":$destFalse, - CArg<"ValueRange", "{}">:$destOperandsTrue, - CArg<"ValueRange", "{}">:$destOperandsFalse), [{ + OpBuilder<(ins "mlir::Value":$cond, "mlir::Block *":$destTrue, "mlir::Block *":$destFalse, + CArg<"mlir::ValueRange", "{}">:$destOperandsTrue, + CArg<"mlir::ValueRange", "{}">:$destOperandsFalse), [{ build($_builder, $_state, cond, destOperandsTrue, destOperandsFalse, destTrue, destFalse); }]> @@ -2131,9 +2131,9 @@ class WhileOpBase : CIR_Op":$condBuilder, - "function_ref":$bodyBuilder), [{ - OpBuilder::InsertionGuard guard($_builder); + OpBuilder<(ins "llvm::function_ref":$condBuilder, + "llvm::function_ref":$bodyBuilder), [{ + mlir::OpBuilder::InsertionGuard guard($_builder); $_builder.createBlock($_state.addRegion()); }] # !if(isWhile, [{ condBuilder($_builder, $_state.location); @@ -2177,7 +2177,7 @@ def DoWhileOp : WhileOpBase<"do"> { let assemblyFormat = " $body `while` $cond attr-dict"; let extraClassDeclaration = [{ - Region &getEntry() { return getBody(); } + mlir::Region &getEntry() { return getBody(); } }]; let description = [{ @@ -2238,10 +2238,10 @@ def ForOp : CIR_Op<"for", [LoopOpInterface, NoRegionArguments]> { }]; let builders = [ - OpBuilder<(ins "function_ref":$condBuilder, - "function_ref":$bodyBuilder, - "function_ref":$stepBuilder), [{ - OpBuilder::InsertionGuard guard($_builder); + OpBuilder<(ins "llvm::function_ref":$condBuilder, + "llvm::function_ref":$bodyBuilder, + "llvm::function_ref":$stepBuilder), [{ + mlir::OpBuilder::InsertionGuard guard($_builder); // Build condition region. $_builder.createBlock($_state.addRegion()); @@ -2258,9 +2258,9 @@ def ForOp : CIR_Op<"for", [LoopOpInterface, NoRegionArguments]> { ]; let extraClassDeclaration = [{ - Region *maybeGetStep() { return &getStep(); } - llvm::SmallVector getRegionsInExecutionOrder() { - return llvm::SmallVector{&getCond(), &getBody(), &getStep()}; + mlir::Region *maybeGetStep() { return &getStep(); } + llvm::SmallVector getRegionsInExecutionOrder() { + return llvm::SmallVector{&getCond(), &getBody(), &getStep()}; } }]; } @@ -2319,7 +2319,7 @@ def GlobalLinkageKind : I32EnumAttr< Global_InternalLinkage, Global_PrivateLinkage, Global_ExternalWeakLinkage, Global_CommonLinkage ]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def SOB_Undefined : I32EnumAttrCase<"undefined", 1>; @@ -2330,7 +2330,7 @@ def SignedOverflowBehaviorEnum : I32EnumAttr< "SignedOverflowBehavior", "the behavior for signed overflow", [SOB_Undefined, SOB_Defined, SOB_Trapping]> { - let cppNamespace = "::mlir::cir::sob"; + let cppNamespace = "::cir::sob"; } /// Definition of TLS related kinds. @@ -2347,7 +2347,7 @@ def TLSModel : I32EnumAttr< "TLS_Model", "TLS model", [TLS_GeneralDynamic, TLS_LocalDynamic, TLS_InitialExec, TLS_LocalExec]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def GlobalOp : CIR_Op<"global", @@ -2423,10 +2423,10 @@ def GlobalOp : CIR_Op<"global", } bool hasInitializer() { return !isDeclaration(); } bool hasAvailableExternallyLinkage() { - return mlir::cir::isAvailableExternallyLinkage(getLinkage()); + return cir::isAvailableExternallyLinkage(getLinkage()); } bool hasInternalLinkage() { - return mlir::cir::isInternalLinkage(getLinkage()); + return cir::isInternalLinkage(getLinkage()); } /// Whether the definition of this global may be replaced at link time. bool isWeakForLinker() { return cir::isWeakForLinker(getLinkage()); } @@ -2437,16 +2437,16 @@ def GlobalOp : CIR_Op<"global", let builders = [ OpBuilder<(ins // MLIR's default visibility is public. - "StringRef":$sym_name, - "Type":$sym_type, + "llvm::StringRef":$sym_name, + "mlir::Type":$sym_type, CArg<"bool", "false">:$isConstant, // CIR defaults to external linkage. CArg<"cir::GlobalLinkageKind", "cir::GlobalLinkageKind::ExternalLinkage">:$linkage, CArg<"cir::AddressSpaceAttr", "{}">:$addrSpace, - CArg<"function_ref", + CArg<"llvm::function_ref", "nullptr">:$ctorBuilder, - CArg<"function_ref", + CArg<"llvm::function_ref", "nullptr">:$dtorBuilder) > ]; @@ -2660,11 +2660,11 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { $src`:`type($src) `)` attr-dict `->` type($result) }]; let builders = [ - OpBuilder<(ins "Type":$type, - "Value":$addr, - "Type":$storage_type, - "Value":$src, - "StringRef":$name, + OpBuilder<(ins "mlir::Type":$type, + "mlir::Value":$addr, + "mlir::Type":$storage_type, + "mlir::Value":$src, + "llvm::StringRef":$name, "unsigned":$size, "unsigned":$offset, "bool":$is_signed, @@ -2738,10 +2738,10 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> { qualified(type($addr)) `)` `->` type($result) }]; let builders = [ - OpBuilder<(ins "Type":$type, - "Value":$addr, - "Type":$storage_type, - "StringRef":$name, + OpBuilder<(ins "mlir::Type":$type, + "mlir::Value":$addr, + "mlir::Type":$storage_type, + "llvm::StringRef":$name, "unsigned":$size, "unsigned":$offset, "bool":$is_signed, @@ -2795,8 +2795,8 @@ def GetMemberOp : CIR_Op<"get_member"> { }]; let builders = [ - OpBuilder<(ins "Type":$type, - "Value":$value, + OpBuilder<(ins "mlir::Type":$type, + "mlir::Value":$value, "llvm::StringRef":$name, "unsigned":$index), [{ @@ -2810,11 +2810,11 @@ def GetMemberOp : CIR_Op<"get_member"> { uint64_t getIndex() { return getIndexAttr().getZExtValue(); } /// Return the record type pointed by the base pointer. - mlir::cir::PointerType getAddrTy() { return getAddr().getType(); } + cir::PointerType getAddrTy() { return getAddr().getType(); } /// Return the result type. - mlir::cir::PointerType getResultTy() { - return mlir::cast(getResult().getType()); + cir::PointerType getResultTy() { + return mlir::cast(getResult().getType()); } }]; @@ -3274,7 +3274,7 @@ def CallingConv : I32EnumAttr< "CallingConv", "calling convention", [CC_C, CC_SpirKernel, CC_SpirFunction]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def FuncOp : CIR_Op<"func", [ @@ -3384,11 +3384,11 @@ def FuncOp : CIR_Op<"func", [ let skipDefaultBuilders = 1; let builders = [OpBuilder<(ins - "StringRef":$name, "FuncType":$type, + "llvm::StringRef":$name, "FuncType":$type, CArg<"GlobalLinkageKind", "GlobalLinkageKind::ExternalLinkage">:$linkage, CArg<"CallingConv", "CallingConv::C">:$callingConv, - CArg<"ArrayRef", "{}">:$attrs, - CArg<"ArrayRef", "{}">:$argAttrs) + CArg<"llvm::ArrayRef", "{}">:$attrs, + CArg<"llvm::ArrayRef", "{}">:$argAttrs) >]; let extraClassDeclaration = [{ @@ -3399,7 +3399,7 @@ def FuncOp : CIR_Op<"func", [ /// Returns the results types that the callable region produces when /// executed. - ArrayRef getCallableResults() { + llvm::ArrayRef getCallableResults() { if (::llvm::isa(getFunctionType().getReturnType())) return {}; return getFunctionType().getReturnTypes(); @@ -3418,16 +3418,16 @@ def FuncOp : CIR_Op<"func", [ } /// Returns the argument types of this function. - ArrayRef getArgumentTypes() { return getFunctionType().getInputs(); } + llvm::ArrayRef getArgumentTypes() { return getFunctionType().getInputs(); } /// Returns the result types of this function. - ArrayRef getResultTypes() { return getFunctionType().getReturnTypes(); } + llvm::ArrayRef getResultTypes() { return getFunctionType().getReturnTypes(); } /// Hook for OpTrait::FunctionOpInterfaceTrait, called after verifying that /// the 'type' attribute is present and checks if it holds a function type. /// Ensures getType, getNumFuncArguments, and getNumFuncResults can be /// called safely. - LogicalResult verifyType(); + llvm::LogicalResult verifyType(); //===------------------------------------------------------------------===// // SymbolOpInterface Methods @@ -3436,7 +3436,7 @@ def FuncOp : CIR_Op<"func", [ bool isDeclaration(); bool hasAvailableExternallyLinkage() { - return mlir::cir::isAvailableExternallyLinkage(getLinkage()); + return cir::isAvailableExternallyLinkage(getLinkage()); } }]; @@ -3468,7 +3468,7 @@ def LLVMIntrinsicCallOp : CIR_Op<"llvm.intrinsic"> { let builders = [ OpBuilder<(ins "mlir::StringAttr":$intrinsic_name, "mlir::Type":$resType, - CArg<"ValueRange", "{}">:$operands), [{ + CArg<"mlir::ValueRange", "{}">:$operands), [{ $_state.addAttribute("intrinsic_name", intrinsic_name); $_state.addOperands(operands); if (resType) @@ -3489,17 +3489,17 @@ class CIR_CallOp extra_traits = []> : DeclareOpInterfaceMethods])> { let extraClassDeclaration = [{ /// Get the argument operands to the called function. - OperandRange getArgOperands() { + mlir::OperandRange getArgOperands() { return {arg_operand_begin(), arg_operand_end()}; } - MutableOperandRange getArgOperandsMutable() { + mlir::MutableOperandRange getArgOperandsMutable() { llvm_unreachable("NYI"); } /// Return the callee of this operation - CallInterfaceCallable getCallableForCallee() { - return (*this)->getAttrOfType("callee"); + mlir::CallInterfaceCallable getCallableForCallee() { + return (*this)->getAttrOfType("callee"); } /// Set the callee for this operation. @@ -3571,10 +3571,10 @@ def CallOp : CIR_CallOp<"call", [NoRegionArguments]> { let skipDefaultBuilders = 1; let builders = [ - OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Type":$resType, - CArg<"ValueRange", "{}">:$operands, + OpBuilder<(ins "mlir::SymbolRefAttr":$callee, "mlir::Type":$resType, + CArg<"mlir::ValueRange", "{}">:$operands, CArg<"CallingConv", "CallingConv::C">:$callingConv, - CArg<"UnitAttr", "{}">:$exception), [{ + CArg<"mlir::UnitAttr", "{}">:$exception), [{ $_state.addOperands(operands); if (callee) $_state.addAttribute("callee", callee); @@ -3587,11 +3587,11 @@ def CallOp : CIR_CallOp<"call", [NoRegionArguments]> { // Create region placeholder for potential cleanups. $_state.addRegion(); }]>, - OpBuilder<(ins "Value":$ind_target, + OpBuilder<(ins "mlir::Value":$ind_target, "FuncType":$fn_type, - CArg<"ValueRange", "{}">:$operands, + CArg<"mlir::ValueRange", "{}">:$operands, CArg<"CallingConv", "CallingConv::C">:$callingConv, - CArg<"UnitAttr", "{}">:$exception), [{ + CArg<"mlir::UnitAttr", "{}">:$exception), [{ $_state.addOperands(ValueRange{ind_target}); $_state.addOperands(operands); if (!fn_type.isVoid()) @@ -3635,11 +3635,11 @@ def TryCallOp : CIR_CallOp<"try_call", let skipDefaultBuilders = 1; let builders = [ - OpBuilder<(ins "SymbolRefAttr":$callee, "mlir::Type":$resType, - "Block *":$cont, "Block *":$landing_pad, - CArg<"ValueRange", "{}">:$operands, - CArg<"ValueRange", "{}">:$contOperands, - CArg<"ValueRange", "{}">:$landingPadOperands, + OpBuilder<(ins "mlir::SymbolRefAttr":$callee, "mlir::Type":$resType, + "mlir::Block *":$cont, "mlir::Block *":$landing_pad, + CArg<"mlir::ValueRange", "{}">:$operands, + CArg<"mlir::ValueRange", "{}">:$contOperands, + CArg<"mlir::ValueRange", "{}">:$landingPadOperands, CArg<"CallingConv", "CallingConv::C">:$callingConv), [{ $_state.addOperands(operands); if (callee) @@ -3663,12 +3663,12 @@ def TryCallOp : CIR_CallOp<"try_call", $_state.addSuccessors(cont); $_state.addSuccessors(landing_pad); }]>, - OpBuilder<(ins "Value":$ind_target, + OpBuilder<(ins "mlir::Value":$ind_target, "FuncType":$fn_type, - "Block *":$cont, "Block *":$landing_pad, - CArg<"ValueRange", "{}">:$operands, - CArg<"ValueRange", "{}">:$contOperands, - CArg<"ValueRange", "{}">:$landingPadOperands, + "mlir::Block *":$cont, "mlir::Block *":$landing_pad, + CArg<"mlir::ValueRange", "{}">:$operands, + CArg<"mlir::ValueRange", "{}">:$contOperands, + CArg<"mlir::ValueRange", "{}">:$landingPadOperands, CArg<"CallingConv", "CallingConv::C">:$callingConv), [{ ::llvm::SmallVector finalCallOperands({ind_target}); finalCallOperands.append(operands.begin(), operands.end()); @@ -3709,7 +3709,7 @@ def AwaitKind : I32EnumAttr< "AwaitKind", "await kind", [AK_Initial, AK_User, AK_Yield, AK_Final]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def AwaitOp : CIR_Op<"await", @@ -3783,12 +3783,12 @@ def AwaitOp : CIR_Op<"await", let skipDefaultBuilders = 1; let builders = [ OpBuilder<(ins - "mlir::cir::AwaitKind":$kind, - CArg<"function_ref", + "cir::AwaitKind":$kind, + CArg<"llvm::function_ref", "nullptr">:$readyBuilder, - CArg<"function_ref", + CArg<"llvm::function_ref", "nullptr">:$suspendBuilder, - CArg<"function_ref", + CArg<"llvm::function_ref", "nullptr">:$resumeBuilder )> ]; @@ -3858,8 +3858,8 @@ def TryOp : CIR_Op<"try", let hasVerifier = 0; let builders = [ OpBuilder<(ins - "function_ref":$tryBuilder, - "function_ref" + "llvm::function_ref":$tryBuilder, + "llvm::function_ref" :$catchBuilder)>, ]; } @@ -3874,7 +3874,7 @@ def CatchParamKind : I32EnumAttr< "CatchParamKind", "Designate limits for begin/end of catch param handling", [CatchParamBegin, CatchParamEnd]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def CatchParamOp : CIR_Op<"catch_param"> { @@ -3901,8 +3901,8 @@ def CatchParamOp : CIR_Op<"catch_param"> { }]; let extraClassDeclaration = [{ - bool isBegin() { return getKind() == mlir::cir::CatchParamKind::begin; } - bool isEnd() { return getKind() == mlir::cir::CatchParamKind::end; } + bool isBegin() { return getKind() == cir::CatchParamKind::begin; } + bool isEnd() { return getKind() == cir::CatchParamKind::end; } }]; let hasVerifier = 1; @@ -3989,11 +3989,11 @@ def CopyOp : CIR_Op<"copy", let extraClassDeclaration = [{ /// Returns the pointer type being copied. - mlir::cir::PointerType getType() { return getSrc().getType(); } + cir::PointerType getType() { return getSrc().getType(); } /// Returns the number of bytes to be copied. unsigned getLength() { - return DataLayout::closest(*this).getTypeSize(getType().getPointee()); + return mlir::DataLayout::closest(*this).getTypeSize(getType().getPointee()); } }]; } @@ -4010,7 +4010,7 @@ class CIR_MemCpyOp: CIR_Op { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { @@ -4727,12 +4727,12 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> { ); let builders = [OpBuilder<(ins - "ArrayRef":$asm_operands, - "StringRef":$asm_string, - "StringRef":$constraints, + "llvm::ArrayRef":$asm_operands, + "llvm::StringRef":$asm_string, + "llvm::StringRef":$constraints, "bool":$side_effects, "AsmFlavor":$asm_flavor, - "ArrayRef":$operand_attrs + "llvm::ArrayRef":$operand_attrs )> ]; @@ -4839,10 +4839,10 @@ class CIR_ArrayInitDestroy : CIR_Op { let builders = [ OpBuilder<(ins "mlir::Value":$addr, - "function_ref":$regionBuilder), [{ + "llvm::function_ref":$regionBuilder), [{ assert(regionBuilder && "builder callback expected"); - OpBuilder::InsertionGuard guard($_builder); - Region *r = $_state.addRegion(); + mlir::OpBuilder::InsertionGuard guard($_builder); + mlir::Region *r = $_state.addRegion(); $_state.addOperands(ValueRange{addr}); $_builder.createBlock(r); regionBuilder($_builder, $_state.location); @@ -4915,12 +4915,12 @@ def SwitchFlatOp : CIR_Op<"switch.flat", [AttrSizedOperandSegments, Terminator]> }]; let builders = [ - OpBuilder<(ins "Value":$condition, - "Block *":$defaultDestination, - "ValueRange":$defaultOperands, - CArg<"ArrayRef", "{}">:$caseValues, - CArg<"BlockRange", "{}">:$caseDestinations, - CArg<"ArrayRef", "{}">:$caseOperands)> + OpBuilder<(ins "mlir::Value":$condition, + "mlir::Block *":$defaultDestination, + "mlir::ValueRange":$defaultOperands, + CArg<"llvm::ArrayRef", "{}">:$caseValues, + CArg<"mlir::BlockRange", "{}">:$caseDestinations, + CArg<"llvm::ArrayRef", "{}">:$caseOperands)> ]; } @@ -5011,7 +5011,7 @@ def AtomicFetchKind : I32EnumAttr< "Binary opcode for atomic fetch operations", [Atomic_Add, Atomic_Sub, Atomic_And, Atomic_Xor, Atomic_Or, Atomic_Nand, Atomic_Max, Atomic_Min]> { - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; } def AtomicFetch : CIR_Op<"atomic.fetch", diff --git a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h index 06851947f24c..802d517f9202 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h +++ b/clang/include/clang/CIR/Dialect/IR/CIROpsEnums.h @@ -17,7 +17,6 @@ #include "mlir/IR/BuiltinAttributes.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h.inc" -namespace mlir { namespace cir { static bool isExternalLinkage(GlobalLinkageKind Linkage) { @@ -115,19 +114,18 @@ LLVM_ATTRIBUTE_UNUSED static bool isValidLinkage(GlobalLinkageKind L) { isLinkOnceLinkage(L); } -bool operator<(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; -bool operator>(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; -bool operator<=(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; -bool operator>=(mlir::cir::MemOrder, mlir::cir::MemOrder) = delete; +bool operator<(cir::MemOrder, cir::MemOrder) = delete; +bool operator>(cir::MemOrder, cir::MemOrder) = delete; +bool operator<=(cir::MemOrder, cir::MemOrder) = delete; +bool operator>=(cir::MemOrder, cir::MemOrder) = delete; // Validate an integral value which isn't known to fit within the enum's range // is a valid AtomicOrderingCABI. template inline bool isValidCIRAtomicOrderingCABI(Int I) { - return (Int)mlir::cir::MemOrder::Relaxed <= I && - I <= (Int)mlir::cir::MemOrder::SequentiallyConsistent; + return (Int)cir::MemOrder::Relaxed <= I && + I <= (Int)cir::MemOrder::SequentiallyConsistent; } } // namespace cir -} // namespace mlir #endif // MLIR_DIALECT_CIR_CIROPSENUMS_H_ diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 9736b3cd575e..4e9902792eca 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -28,7 +28,6 @@ // The base type for all RecordDecls. //===----------------------------------------------------------------------===// -namespace mlir { namespace cir { namespace detail { @@ -71,59 +70,63 @@ struct StructTypeStorage; /// "Node">>}> /// ``` class StructType - : public Type::TypeBase { + : public mlir::Type::TypeBase< + StructType, mlir::Type, detail::StructTypeStorage, + mlir::DataLayoutTypeInterface::Trait, mlir::TypeTrait::IsMutable> { // FIXME(cir): migrate this type to Tablegen once mutable types are supported. public: using Base::Base; using Base::getChecked; using Base::verifyInvariants; - static constexpr StringLiteral name = "cir.struct"; + static constexpr llvm::StringLiteral name = "cir.struct"; enum RecordKind : uint32_t { Class, Union, Struct }; /// Create a identified and complete struct type. - static StructType get(MLIRContext *context, ArrayRef members, - StringAttr name, bool packed, RecordKind kind, + static StructType get(mlir::MLIRContext *context, + llvm::ArrayRef members, + mlir::StringAttr name, bool packed, RecordKind kind, ASTRecordDeclInterface ast = {}); - static StructType getChecked(function_ref emitError, - MLIRContext *context, ArrayRef members, - StringAttr name, bool packed, RecordKind kind, - ASTRecordDeclInterface ast = {}); + static StructType + getChecked(llvm::function_ref emitError, + mlir::MLIRContext *context, llvm::ArrayRef members, + mlir::StringAttr name, bool packed, RecordKind kind, + ASTRecordDeclInterface ast = {}); /// Create a identified and incomplete struct type. - static StructType get(MLIRContext *context, StringAttr name, RecordKind kind); - static StructType getChecked(function_ref emitError, - MLIRContext *context, StringAttr name, - RecordKind kind); + static StructType get(mlir::MLIRContext *context, mlir::StringAttr name, + RecordKind kind); + static StructType + getChecked(llvm::function_ref emitError, + mlir::MLIRContext *context, mlir::StringAttr name, + RecordKind kind); /// Create a anonymous struct type (always complete). - static StructType get(MLIRContext *context, ArrayRef members, - bool packed, RecordKind kind, - ASTRecordDeclInterface ast = {}); - static StructType getChecked(function_ref emitError, - MLIRContext *context, ArrayRef members, - bool packed, RecordKind kind, - ASTRecordDeclInterface ast = {}); + static StructType get(mlir::MLIRContext *context, + llvm::ArrayRef members, bool packed, + RecordKind kind, ASTRecordDeclInterface ast = {}); + static StructType + getChecked(llvm::function_ref emitError, + mlir::MLIRContext *context, llvm::ArrayRef members, + bool packed, RecordKind kind, ASTRecordDeclInterface ast = {}); /// Validate the struct about to be constructed. - static LogicalResult - verifyInvariants(function_ref emitError, - ArrayRef members, StringAttr name, bool incomplete, - bool packed, StructType::RecordKind kind, + static llvm::LogicalResult + verifyInvariants(llvm::function_ref emitError, + llvm::ArrayRef members, mlir::StringAttr name, + bool incomplete, bool packed, StructType::RecordKind kind, ASTRecordDeclInterface ast); // Parse/print methods. - static constexpr StringLiteral getMnemonic() { return {"struct"}; } - static Type parse(AsmParser &odsParser); - void print(AsmPrinter &odsPrinter) const; + static constexpr llvm::StringLiteral getMnemonic() { return {"struct"}; } + static mlir::Type parse(mlir::AsmParser &odsParser); + void print(mlir::AsmPrinter &odsPrinter) const; // Accessors ASTRecordDeclInterface getAst() const; - ArrayRef getMembers() const; - StringAttr getName() const; + llvm::ArrayRef getMembers() const; + mlir::StringAttr getName() const; StructType::RecordKind getKind() const; bool getIncomplete() const; bool getPacked() const; @@ -137,7 +140,7 @@ class StructType bool isIncomplete() const; // Utilities - Type getLargestMember(const DataLayout &dataLayout) const; + mlir::Type getLargestMember(const mlir::DataLayout &dataLayout) const; size_t getNumElements() const { return getMembers().size(); }; std::string getKindAsStr() { switch (getKind()) { @@ -154,17 +157,18 @@ class StructType } /// Complete the struct type by mutating its members and attributes. - void complete(ArrayRef members, bool packed, + void complete(llvm::ArrayRef members, bool packed, ASTRecordDeclInterface ast = {}); /// DataLayoutTypeInterface methods. - llvm::TypeSize getTypeSizeInBits(const DataLayout &dataLayout, - DataLayoutEntryListRef params) const; - uint64_t getABIAlignment(const DataLayout &dataLayout, - DataLayoutEntryListRef params) const; - uint64_t getPreferredAlignment(const DataLayout &dataLayout, - DataLayoutEntryListRef params) const; - uint64_t getElementOffset(const DataLayout &dataLayout, unsigned idx) const; + llvm::TypeSize getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const; + uint64_t getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const; + uint64_t getPreferredAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const; + uint64_t getElementOffset(const mlir::DataLayout &dataLayout, + unsigned idx) const; bool isLayoutIdentical(const StructType &other); @@ -174,14 +178,13 @@ class StructType // from CIRAttrs.h. The implementation operates in terms of StructLayoutAttr // instead. mutable mlir::Attribute layoutInfo; - bool isPadded(const DataLayout &dataLayout) const; - void computeSizeAndAlignment(const DataLayout &dataLayout) const; + bool isPadded(const mlir::DataLayout &dataLayout) const; + void computeSizeAndAlignment(const mlir::DataLayout &dataLayout) const; }; bool isAnyFloatingPointType(mlir::Type t); bool isFPOrFPVectorTy(mlir::Type); } // namespace cir -} // namespace mlir mlir::ParseResult parseAddrSpaceAttribute(mlir::AsmParser &p, mlir::Attribute &addrSpaceAttr); diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index fe136a58a4a4..4317aaf3bb01 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -80,12 +80,12 @@ def CIR_IntType : CIR_Type<"Int", "int", // Unsigned integer type of a specific width. class UInt : Type($_self)">, - CPred<"::mlir::cast<::mlir::cir::IntType>($_self).isUnsigned()">, - CPred<"::mlir::cast<::mlir::cir::IntType>($_self).getWidth() == " # width> - ]>, width # "-bit unsigned integer", "::mlir::cir::IntType">, + CPred<"::mlir::isa<::cir::IntType>($_self)">, + CPred<"::mlir::cast<::cir::IntType>($_self).isUnsigned()">, + CPred<"::mlir::cast<::cir::IntType>($_self).getWidth() == " # width> + ]>, width # "-bit unsigned integer", "::cir::IntType">, BuildableType< - "mlir::cir::IntType::get($_builder.getContext(), " + "cir::IntType::get($_builder.getContext(), " # width # ", /*isSigned=*/false)"> { int bitwidth = width; } @@ -99,12 +99,12 @@ def UInt64 : UInt<64>; // Signed integer type of a specific width. class SInt : Type($_self)">, - CPred<"::mlir::cast<::mlir::cir::IntType>($_self).isSigned()">, - CPred<"::mlir::cast<::mlir::cir::IntType>($_self).getWidth() == " # width> - ]>, width # "-bit signed integer", "::mlir::cir::IntType">, + CPred<"::mlir::isa<::cir::IntType>($_self)">, + CPred<"::mlir::cast<::cir::IntType>($_self).isSigned()">, + CPred<"::mlir::cast<::cir::IntType>($_self).getWidth() == " # width> + ]>, width # "-bit signed integer", "::cir::IntType">, BuildableType< - "mlir::cir::IntType::get($_builder.getContext(), " + "cir::IntType::get($_builder.getContext(), " # width # ", /*isSigned=*/true)"> { int bitwidth = width; } @@ -117,13 +117,13 @@ def SInt64 : SInt<64>; def PrimitiveUInt : AnyTypeOf<[UInt8, UInt16, UInt32, UInt64], "primitive unsigned int", - "::mlir::cir::IntType">; + "::cir::IntType">; def PrimitiveSInt : AnyTypeOf<[SInt8, SInt16, SInt32, SInt64], "primitive signed int", - "::mlir::cir::IntType">; + "::cir::IntType">; def PrimitiveInt : AnyTypeOf<[UInt8, UInt16, UInt32, UInt64, SInt8, SInt16, SInt32, SInt64], - "primitive int", "::mlir::cir::IntType">; + "primitive int", "::cir::IntType">; //===----------------------------------------------------------------------===// // FloatType @@ -276,7 +276,7 @@ def CIR_PointerType : CIR_Type<"Pointer", "ptr", let extraClassDeclaration = [{ bool isVoidPtr() const { - return mlir::isa(getPointee()); + return mlir::isa(getPointee()); } }]; } @@ -296,7 +296,7 @@ def CIR_DataMemberType : CIR_Type<"DataMember", "data_member", }]; let parameters = (ins "mlir::Type":$memberTy, - "mlir::cir::StructType":$clsTy); + "cir::StructType":$clsTy); let assemblyFormat = [{ `<` $memberTy `in` $clsTy `>` @@ -381,7 +381,7 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { ``` }]; - let parameters = (ins ArrayRefParameter<"Type">:$inputs, "Type":$returnType, + let parameters = (ins ArrayRefParameter<"mlir::Type">:$inputs, "mlir::Type":$returnType, "bool":$varArg); let assemblyFormat = [{ `<` $returnType ` ` `(` custom($inputs, $varArg) `>` @@ -389,7 +389,7 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { let builders = [ TypeBuilderWithInferredContext<(ins - "ArrayRef":$inputs, "Type":$returnType, + "llvm::ArrayRef":$inputs, "mlir::Type":$returnType, CArg<"bool", "false">:$isVarArg), [{ return $_get(returnType.getContext(), inputs, returnType, isVarArg); }]> @@ -400,21 +400,21 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { bool isVarArg() const { return getVarArg(); } /// Returns the `i`th input operand type. Asserts if out of bounds. - Type getInput(unsigned i) const { return getInputs()[i]; } + mlir::Type getInput(unsigned i) const { return getInputs()[i]; } /// Returns the number of arguments to the function. unsigned getNumInputs() const { return getInputs().size(); } /// Returns the result type of the function as an ArrayRef, enabling better /// integration with generic MLIR utilities. - ArrayRef getReturnTypes() const; + llvm::ArrayRef getReturnTypes() const; /// Returns whether the function is returns void. bool isVoid() const; /// Returns a clone of this function type with the given argument /// and result types. - FuncType clone(TypeRange inputs, TypeRange results) const; + FuncType clone(mlir::TypeRange inputs, mlir::TypeRange results) const; }]; } @@ -430,8 +430,8 @@ def CIR_MethodType : CIR_Type<"Method", "method", of this type is ABI-dependent. }]; - let parameters = (ins "mlir::cir::FuncType":$memberFuncTy, - "mlir::cir::StructType":$clsTy); + let parameters = (ins "cir::FuncType":$memberFuncTy, + "cir::StructType":$clsTy); let assemblyFormat = [{ `<` qualified($memberFuncTy) `in` $clsTy `>` @@ -478,62 +478,62 @@ def CIR_VoidType : CIR_Type<"Void", "void"> { // Pointer to void def VoidPtr : Type< And<[ - CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, - CPred<"::mlir::isa<::mlir::cir::VoidType>(" - "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())">, + CPred<"::mlir::isa<::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::cir::VoidType>(" + "::mlir::cast<::cir::PointerType>($_self).getPointee())">, ]>, "void*">, BuildableType< - "mlir::cir::PointerType::get($_builder.getContext()," - "mlir::cir::VoidType::get($_builder.getContext()))"> { + "cir::PointerType::get($_builder.getContext()," + "cir::VoidType::get($_builder.getContext()))"> { } // Pointer to a primitive int, float or double def PrimitiveIntOrFPPtr : Type< And<[ - CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, - CPred<"::mlir::isa<::mlir::cir::IntType, ::mlir::cir::SingleType," - "::mlir::cir::DoubleType>(" - "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())">, + CPred<"::mlir::isa<::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::cir::IntType, ::cir::SingleType," + "::cir::DoubleType>(" + "::mlir::cast<::cir::PointerType>($_self).getPointee())">, ]>, "{int,void}*"> { } def ComplexPtr : Type< And<[ - CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, - CPred<"::mlir::isa<::mlir::cir::ComplexType>(" - "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())">, + CPred<"::mlir::isa<::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::cir::ComplexType>(" + "::mlir::cast<::cir::PointerType>($_self).getPointee())">, ]>, "!cir.complex*"> { } // Pointer to struct def StructPtr : Type< And<[ - CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, - CPred<"::mlir::isa<::mlir::cir::StructType>(" - "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())"> + CPred<"::mlir::isa<::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::cir::StructType>(" + "::mlir::cast<::cir::PointerType>($_self).getPointee())"> ]>, "!cir.struct*"> { } // Pointer to exception info def ExceptionPtr : Type< And<[ - CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, - CPred<"::mlir::isa<::mlir::cir::ExceptionInfoType>(" - "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())"> + CPred<"::mlir::isa<::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::cir::ExceptionInfoType>(" + "::mlir::cast<::cir::PointerType>($_self).getPointee())"> ]>, "!cir.eh_info*">, BuildableType< - "mlir::cir::PointerType::get($_builder.getContext()," - "mlir::cir::ExceptionInfoType::get($_builder.getContext()))"> { + "cir::PointerType::get($_builder.getContext()," + "cir::ExceptionInfoType::get($_builder.getContext()))"> { } // Vector of integral type def IntegerVector : Type< And<[ - CPred<"::mlir::isa<::mlir::cir::VectorType>($_self)">, - CPred<"::mlir::isa<::mlir::cir::IntType>(" - "::mlir::cast<::mlir::cir::VectorType>($_self).getEltType())">, - CPred<"::mlir::cast<::mlir::cir::IntType>(" - "::mlir::cast<::mlir::cir::VectorType>($_self).getEltType())" + CPred<"::mlir::isa<::cir::VectorType>($_self)">, + CPred<"::mlir::isa<::cir::IntType>(" + "::mlir::cast<::cir::VectorType>($_self).getEltType())">, + CPred<"::mlir::cast<::cir::IntType>(" + "::mlir::cast<::cir::VectorType>($_self).getEltType())" ".isPrimitive()"> ]>, "!cir.vector of !cir.int"> { } @@ -544,18 +544,18 @@ def CIR_AnyIntOrVecOfInt: AnyTypeOf<[CIR_IntType, IntegerVector]>; // Pointer to Arrays def ArrayPtr : Type< And<[ - CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, - CPred<"::mlir::isa<::mlir::cir::ArrayType>(" - "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())">, + CPred<"::mlir::isa<::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::cir::ArrayType>(" + "::mlir::cast<::cir::PointerType>($_self).getPointee())">, ]>, "!cir.ptr"> { } // Pointer to functions def FuncPtr : Type< And<[ - CPred<"::mlir::isa<::mlir::cir::PointerType>($_self)">, - CPred<"::mlir::isa<::mlir::cir::FuncType>(" - "::mlir::cast<::mlir::cir::PointerType>($_self).getPointee())">, + CPred<"::mlir::isa<::cir::PointerType>($_self)">, + CPred<"::mlir::isa<::cir::FuncType>(" + "::mlir::cast<::cir::PointerType>($_self).getPointee())">, ]>, "!cir.ptr"> { } @@ -563,7 +563,7 @@ def FuncPtr : Type< // StructType (defined in cpp files) //===----------------------------------------------------------------------===// -def CIR_StructType : Type($_self)">, +def CIR_StructType : Type($_self)">, "CIR struct type">; //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h index 5eba4ac460a7..f97a4afe5a4c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h @@ -18,7 +18,6 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/ADT/Hashing.h" -namespace mlir { namespace cir { namespace detail { @@ -27,30 +26,31 @@ namespace detail { //===----------------------------------------------------------------------===// /// Type storage for CIR record types. -struct StructTypeStorage : public TypeStorage { +struct StructTypeStorage : public mlir::TypeStorage { struct KeyTy { - ArrayRef members; - StringAttr name; + llvm::ArrayRef members; + mlir::StringAttr name; bool incomplete; bool packed; StructType::RecordKind kind; ASTRecordDeclInterface ast; - KeyTy(ArrayRef members, StringAttr name, bool incomplete, bool packed, - StructType::RecordKind kind, ASTRecordDeclInterface ast) + KeyTy(llvm::ArrayRef members, mlir::StringAttr name, + bool incomplete, bool packed, StructType::RecordKind kind, + ASTRecordDeclInterface ast) : members(members), name(name), incomplete(incomplete), packed(packed), kind(kind), ast(ast) {} }; - ArrayRef members; - StringAttr name; + llvm::ArrayRef members; + mlir::StringAttr name; bool incomplete; bool packed; StructType::RecordKind kind; ASTRecordDeclInterface ast; - StructTypeStorage(ArrayRef members, StringAttr name, bool incomplete, - bool packed, StructType::RecordKind kind, + StructTypeStorage(llvm::ArrayRef members, mlir::StringAttr name, + bool incomplete, bool packed, StructType::RecordKind kind, ASTRecordDeclInterface ast) : members(members), name(name), incomplete(incomplete), packed(packed), kind(kind), ast(ast) {} @@ -74,7 +74,7 @@ struct StructTypeStorage : public TypeStorage { key.ast); } - static StructTypeStorage *construct(TypeStorageAllocator &allocator, + static StructTypeStorage *construct(mlir::TypeStorageAllocator &allocator, const KeyTy &key) { return new (allocator.allocate()) StructTypeStorage(allocator.copyInto(key.members), key.name, @@ -87,11 +87,12 @@ struct StructTypeStorage : public TypeStorage { /// mutations. Anonymous structs are always complete and cannot be mutated. /// This method does not fail if a mutation of a complete struct does not /// change the struct. - LogicalResult mutate(TypeStorageAllocator &allocator, ArrayRef members, - bool packed, ASTRecordDeclInterface ast) { + llvm::LogicalResult mutate(mlir::TypeStorageAllocator &allocator, + llvm::ArrayRef members, bool packed, + ASTRecordDeclInterface ast) { // Anonymous structs cannot mutate. if (!name) - return failure(); + return llvm::failure(); // Mutation of complete structs are allowed if they change nothing. if (!incomplete) @@ -104,12 +105,11 @@ struct StructTypeStorage : public TypeStorage { this->ast = ast; incomplete = false; - return success(); + return llvm::success(); } }; } // namespace detail } // namespace cir -} // namespace mlir #endif // CIR_DIALECT_IR_CIRTYPESDETAILS_H diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index 41086e36748e..d6cd4831a6af 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -22,8 +22,8 @@ namespace mlir { std::unique_ptr createLifetimeCheckPass(); std::unique_ptr createLifetimeCheckPass(clang::ASTContext *astCtx); -std::unique_ptr createLifetimeCheckPass(ArrayRef remark, - ArrayRef hist, +std::unique_ptr createLifetimeCheckPass(llvm::ArrayRef remark, + llvm::ArrayRef hist, unsigned hist_limit, clang::ASTContext *astCtx); std::unique_ptr createCIRCanonicalizePass(); diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index d1383fb48109..4a8d2bfa9672 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -188,7 +188,7 @@ def CallConvLowering : Pass<"cir-call-conv-lowering"> { to properly lower CIR functions to LLVM IR. }]; let constructor = "mlir::createCallConvLoweringPass()"; - let dependentDialects = ["mlir::cir::CIRDialect"]; + let dependentDialects = ["cir::CIRDialect"]; } #endif // MLIR_DIALECT_CIR_PASSES diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.h b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.h index e2f1e16eb511..cafb5e071e26 100644 --- a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.h +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.h @@ -15,19 +15,16 @@ #include "clang/AST/DeclTemplate.h" #include "clang/AST/Mangle.h" -namespace mlir { namespace cir { mlir::Attribute makeFuncDeclAttr(const clang::Decl *decl, mlir::MLIRContext *ctx); } // namespace cir -} // namespace mlir /// Include the generated interface declarations. #include "clang/CIR/Interfaces/ASTAttrInterfaces.h.inc" -namespace mlir { namespace cir { template bool hasAttr(ASTDeclInterface decl) { @@ -40,6 +37,5 @@ template bool hasAttr(ASTDeclInterface decl) { } } // namespace cir -} // namespace mlir #endif // MLIR_INTERFACES_CIR_AST_ATAR_INTERFACES_H_ diff --git a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td index fc162c11f42c..33f3cffed030 100644 --- a/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/ASTAttrInterfaces.td @@ -11,7 +11,7 @@ include "mlir/IR/OpBase.td" -let cppNamespace = "::mlir::cir" in { +let cppNamespace = "::cir" in { def ASTDeclInterface : AttrInterface<"ASTDeclInterface"> { let methods = [ InterfaceMethod<"", "bool", "hasOwnerAttr", (ins), [{}], @@ -93,14 +93,14 @@ let cppNamespace = "::mlir::cir" in { let methods = [ InterfaceMethod<"", "bool", "isCopyAssignmentOperator", (ins), [{}], /*defaultImplementation=*/ [{ - if (auto decl = dyn_cast($_attr.getAst())) + if (auto decl = mlir::dyn_cast($_attr.getAst())) return decl->isCopyAssignmentOperator(); return false; }] >, InterfaceMethod<"", "bool", "isMoveAssignmentOperator", (ins), [{}], /*defaultImplementation=*/ [{ - if (auto decl = dyn_cast($_attr.getAst())) + if (auto decl = mlir::dyn_cast($_attr.getAst())) return decl->isMoveAssignmentOperator(); return false; }] @@ -190,7 +190,7 @@ let cppNamespace = "::mlir::cir" in { } def AnyASTFunctionDeclAttr : Attr< - CPred<"::mlir::isa<::mlir::cir::ASTFunctionDeclInterface>($_self)">, + CPred<"::mlir::isa<::cir::ASTFunctionDeclInterface>($_self)">, "AST Function attribute"> { let storageType = "::mlir::Attribute"; let returnType = "::mlir::Attribute"; @@ -210,11 +210,11 @@ let cppNamespace = "::mlir::cir" in { auto callee = $_attr.getAst()->getCallee(); if (!callee) return false; - auto *ice = dyn_cast(callee); + auto *ice = mlir::dyn_cast(callee); if (!ice) return false; - auto *dre = dyn_cast_or_null(ice->getSubExpr()); + auto *dre = mlir::dyn_cast_or_null(ice->getSubExpr()); if (!dre) return false; auto qual = dre->getQualifier(); @@ -248,7 +248,7 @@ let cppNamespace = "::mlir::cir" in { InterfaceMethod<"", "bool", "isMemberCallTo", (ins "llvm::StringRef":$fn), [{}], /*defaultImplementation=*/ [{ - auto memberCall = dyn_cast($_attr.getAst()); + auto memberCall = mlir::dyn_cast($_attr.getAst()); if (!memberCall) return false; auto methodDecl = memberCall->getMethodDecl(); @@ -277,6 +277,6 @@ let cppNamespace = "::mlir::cir" in { } -} // namespace mlir::cir +} // namespace cir #endif // MLIR_CIR_INTERFACES_AST_ATTR_INTERFACES diff --git a/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td index 7438c8be52d9..f1a9f9b36c9e 100644 --- a/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td +++ b/clang/include/clang/CIR/Interfaces/CIRFPTypeInterface.td @@ -15,7 +15,7 @@ def CIRFPTypeInterface : TypeInterface<"CIRFPTypeInterface"> { let description = [{ Contains helper functions to query properties about a floating-point type. }]; - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; let methods = [ InterfaceMethod<[{ diff --git a/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.h b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.h index 2e8a0c8e8a94..012ca9c6a6f1 100644 --- a/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.h +++ b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.h @@ -19,7 +19,6 @@ #include "mlir/Interfaces/ControlFlowInterfaces.h" #include "mlir/Interfaces/LoopLikeInterface.h" -namespace mlir { namespace cir { namespace detail { @@ -28,7 +27,6 @@ ::mlir::LogicalResult verifyLoopOpInterface(::mlir::Operation *op); } // namespace detail } // namespace cir -} // namespace mlir /// Include the tablegen'd interface declarations. #include "clang/CIR/Interfaces/CIRLoopOpInterface.h.inc" diff --git a/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td index bac30dac3d82..8fd4a321b396 100644 --- a/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td +++ b/clang/include/clang/CIR/Interfaces/CIRLoopOpInterface.td @@ -21,7 +21,7 @@ def LoopOpInterface : OpInterface<"LoopOpInterface", [ Contains helper functions to query properties and perform transformations on a loop. }]; - let cppNamespace = "::mlir::cir"; + let cppNamespace = "::cir"; let methods = [ InterfaceMethod<[{ @@ -71,11 +71,11 @@ def LoopOpInterface : OpInterface<"LoopOpInterface", [ }], /*retTy=*/"mlir::WalkResult", /*methodName=*/"walkBodySkippingNestedLoops", - /*args=*/(ins "::llvm::function_ref":$callback), + /*args=*/(ins "::llvm::function_ref":$callback), /*methodBody=*/"", /*defaultImplementation=*/[{ - return $_op.getBody().template walk([&](Operation *op) { - if (isa(op)) + return $_op.getBody().template walk([&](mlir::Operation *op) { + if (mlir::isa(op)) return mlir::WalkResult::skip(); return callback(op); }); @@ -86,7 +86,7 @@ def LoopOpInterface : OpInterface<"LoopOpInterface", [ let extraClassDeclaration = [{ /// Generic method to retrieve the successors of a LoopOpInterface operation. static void getLoopOpSuccessorRegions( - ::mlir::cir::LoopOpInterface op, ::mlir::RegionBranchPoint point, + ::cir::LoopOpInterface op, ::mlir::RegionBranchPoint point, ::mlir::SmallVectorImpl<::mlir::RegionSuccessor> ®ions); }]; diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h index 2cd4d9e42524..86064619af7d 100644 --- a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.h @@ -19,15 +19,11 @@ #include "clang/AST/Mangle.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" -namespace mlir { namespace cir {} // namespace cir -} // namespace mlir /// Include the generated interface declarations. #include "clang/CIR/Interfaces/CIROpInterfaces.h.inc" -namespace mlir { namespace cir {} // namespace cir -} // namespace mlir #endif // MLIR_INTERFACES_CIR_OP_H_ diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td index fd9c20687c3a..445a558debda 100644 --- a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td @@ -13,7 +13,7 @@ include "mlir/IR/OpBase.td" include "mlir/IR/SymbolInterfaces.td" include "mlir/Interfaces/CallInterfaces.td" -let cppNamespace = "::mlir::cir" in { +let cppNamespace = "::cir" in { // The CIRCallOpInterface must be used instead of CallOpInterface when looking // at arguments and other bits of CallOp. This creates a level of abstraction // that's useful for handling indirect calls and other details. @@ -36,7 +36,7 @@ let cppNamespace = "::mlir::cir" in { "unsigned", "getNumArgOperands", (ins)>, InterfaceMethod< "Return the calling convention of the call operation", - "mlir::cir::CallingConv", "getCallingConv", (ins)>, + "cir::CallingConv", "getCallingConv", (ins)>, ]; } @@ -51,19 +51,19 @@ let cppNamespace = "::mlir::cir" in { InterfaceMethod<"", "bool", "hasLocalLinkage", (ins), [{}], /*defaultImplementation=*/[{ - return mlir::cir::isLocalLinkage($_op.getLinkage()); + return cir::isLocalLinkage($_op.getLinkage()); }] >, InterfaceMethod<"", "bool", "hasExternalWeakLinkage", (ins), [{}], /*defaultImplementation=*/[{ - return mlir::cir::isExternalWeakLinkage($_op.getLinkage()); + return cir::isExternalWeakLinkage($_op.getLinkage()); }] >, InterfaceMethod<"", "bool", "isExternalLinkage", (ins), [{}], /*defaultImplementation=*/[{ - return mlir::cir::isExternalLinkage($_op.getLinkage()); + return cir::isExternalLinkage($_op.getLinkage()); }] >, InterfaceMethod<"", @@ -93,6 +93,6 @@ let cppNamespace = "::mlir::cir" in { }]; } -} // namespace mlir::cir +} // namespace cir #endif // MLIR_CIR_OP_INTERFACES diff --git a/clang/include/clang/CIR/LoweringHelpers.h b/clang/include/clang/CIR/LoweringHelpers.h index 01b9b4301c3a..771a382591fa 100644 --- a/clang/include/clang/CIR/LoweringHelpers.h +++ b/clang/include/clang/CIR/LoweringHelpers.h @@ -18,8 +18,7 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" mlir::DenseElementsAttr -convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, - mlir::Type type); +convertStringAttrToDenseElementsAttr(cir::ConstArrayAttr attr, mlir::Type type); template StorageTy getZeroInitFromType(mlir::Type Ty); template <> mlir::APInt getZeroInitFromType(mlir::Type Ty); @@ -28,16 +27,16 @@ template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty); mlir::Type getNestedTypeAndElemQuantity(mlir::Type Ty, unsigned &elemQuantity); template -void convertToDenseElementsAttrImpl(mlir::cir::ConstArrayAttr attr, +void convertToDenseElementsAttrImpl(cir::ConstArrayAttr attr, llvm::SmallVectorImpl &values); template mlir::DenseElementsAttr -convertToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, +convertToDenseElementsAttr(cir::ConstArrayAttr attr, const llvm::SmallVectorImpl &dims, mlir::Type type); std::optional -lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, +lowerConstArrayAttr(cir::ConstArrayAttr constArr, const mlir::TypeConverter *converter); #endif diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index db5878b2fec2..7643f9b87992 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -42,7 +42,7 @@ class Address { KnownNonNull_t IsKnownNonNull = NotKnownNonNull) : PointerAndKnownNonNull(pointer, IsKnownNonNull), ElementType(elementType), Alignment(alignment) { - assert(mlir::isa(pointer.getType()) && + assert(mlir::isa(pointer.getType()) && "Expected cir.ptr type"); assert(pointer && "Pointer cannot be null"); @@ -50,10 +50,9 @@ class Address { assert(!alignment.isZero() && "Alignment cannot be zero"); } Address(mlir::Value pointer, clang::CharUnits alignment) - : Address( - pointer, - mlir::cast(pointer.getType()).getPointee(), - alignment) { + : Address(pointer, + mlir::cast(pointer.getType()).getPointee(), + alignment) { assert((!alignment.isZero() || pointer == nullptr) && "creating valid address with invalid alignment"); @@ -113,8 +112,8 @@ class Address { } /// Return the type of the pointer value. - mlir::cir::PointerType getType() const { - return mlir::cast(getPointer().getType()); + cir::PointerType getType() const { + return mlir::cast(getPointer().getType()); } mlir::Type getElementType() const { diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 781a74c67a21..39a2ee8192d7 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -7,10 +7,10 @@ using namespace clang; using namespace clang::CIRGen; -using namespace mlir::cir; +using namespace cir; static bool isAggregateType(mlir::Type typ) { - return isa(typ); + return isa(typ); } static AsmFlavor inferFlavor(const CIRGenModule &cgm, const AsmStmt &S) { @@ -212,7 +212,7 @@ std::pair CIRGenFunction::buildAsmInputLValue( uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); if ((Size <= 64 && llvm::isPowerOf2_64(Size)) || getTargetHooks().isScalarizableAsmOperand(*this, Ty)) { - Ty = mlir::cir::IntType::get(&getMLIRContext(), Size, false); + Ty = cir::IntType::get(&getMLIRContext(), Size, false); return {builder.createLoad(getLoc(Loc), InputValue.getAddress().withElementType(Ty)), @@ -296,19 +296,19 @@ static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, // a pointer. if (mlir::isa(TruncTy)) Tmp = Builder.createFloatingCast(Tmp, TruncTy); - else if (isa(TruncTy) && - isa(Tmp.getType())) { + else if (isa(TruncTy) && + isa(Tmp.getType())) { uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); Tmp = Builder.createIntCast( - Tmp, mlir::cir::IntType::get(CTX, (unsigned)ResSize, false)); + Tmp, cir::IntType::get(CTX, (unsigned)ResSize, false)); Tmp = Builder.createIntToPtr(Tmp, TruncTy); - } else if (isa(Tmp.getType()) && - isa(TruncTy)) { + } else if (isa(Tmp.getType()) && + isa(TruncTy)) { uint64_t TmpSize = CGM.getDataLayout().getTypeSizeInBits(Tmp.getType()); Tmp = Builder.createPtrToInt( - Tmp, mlir::cir::IntType::get(CTX, (unsigned)TmpSize, false)); + Tmp, cir::IntType::get(CTX, (unsigned)TmpSize, false)); Tmp = Builder.createIntCast(Tmp, TruncTy); - } else if (isa(TruncTy)) { + } else if (isa(TruncTy)) { Tmp = Builder.createIntCast(Tmp, TruncTy); } else if (false /*TruncTy->isVectorTy()*/) { assert(!cir::MissingFeatures::asmVectorType()); @@ -434,7 +434,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { if (RequiresCast) { unsigned Size = getContext().getTypeSize(QTy); - Ty = mlir::cir::IntType::get(&getMLIRContext(), Size, false); + Ty = cir::IntType::get(&getMLIRContext(), Size, false); } ResultRegTypes.push_back(Ty); // If this output is tied to an input, and if the input is larger, then @@ -570,12 +570,12 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { if (getContext().getTypeSize(OutputType) > getContext().getTypeSize(InputTy)) { // Use ptrtoint as appropriate so that we can do our extension. - if (isa(Arg.getType())) + if (isa(Arg.getType())) Arg = builder.createPtrToInt(Arg, UIntPtrTy); mlir::Type OutputTy = convertType(OutputType); - if (isa(OutputTy)) + if (isa(OutputTy)) Arg = builder.createIntCast(Arg, OutputTy); - else if (isa(OutputTy)) + else if (isa(OutputTy)) Arg = builder.createIntCast(Arg, UIntPtrTy); else if (isa(OutputTy)) Arg = builder.createFloatingCast(Arg, OutputTy); @@ -631,7 +631,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { operands.push_back(InArgs); operands.push_back(InOutArgs); - auto IA = builder.create( + auto IA = builder.create( getLoc(S.getAsmLoc()), ResultType, operands, AsmString, Constraints, HasSideEffect, inferFlavor(CGM, S), mlir::ArrayAttr()); @@ -652,9 +652,9 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { for (auto typ : ArgElemTypes) { if (typ) { auto op = Args[i++]; - assert(mlir::isa(op.getType()) && + assert(mlir::isa(op.getType()) && "pointer type expected"); - assert(cast(op.getType()).getPointee() == typ && + assert(cast(op.getType()).getPointee() == typ && "element type differs from pointee type!"); operandAttrs.push_back(mlir::UnitAttr::get(&getMLIRContext())); @@ -675,7 +675,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { RegResults.push_back(result); } else if (ResultRegTypes.size() > 1) { auto alignment = CharUnits::One(); - auto sname = cast(ResultType).getName(); + auto sname = cast(ResultType).getName(); auto dest = buildAlloca(sname, ResultType, getLoc(S.getAsmLoc()), alignment, false); auto addr = Address(dest, alignment); diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 6e1c05949a33..e2958d9450fe 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -300,7 +300,7 @@ bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const { } Address AtomicInfo::castToAtomicIntPointer(Address addr) const { - auto intTy = mlir::dyn_cast(addr.getElementType()); + auto intTy = mlir::dyn_cast(addr.getElementType()); // Don't bother with int casts if the integer size is the same. if (intTy && intTy.getWidth() == AtomicSizeInBits) return addr; @@ -334,14 +334,14 @@ Address AtomicInfo::CreateTempAlloca() const { // of casts if necessary. // // FIXME(cir): figure out warning issue and move this to CIRBaseBuilder.h -static mlir::cir::IntAttr getConstOpIntAttr(mlir::Value v) { +static cir::IntAttr getConstOpIntAttr(mlir::Value v) { mlir::Operation *op = v.getDefiningOp(); - mlir::cir::IntAttr constVal; - while (auto c = dyn_cast(op)) + cir::IntAttr constVal; + while (auto c = dyn_cast(op)) op = c.getOperand().getDefiningOp(); - if (auto c = dyn_cast(op)) { - if (mlir::isa(c.getType())) - constVal = mlir::cast(c.getValue()); + if (auto c = dyn_cast(op)) { + if (mlir::isa(c.getType())) + constVal = mlir::cast(c.getValue()); } return constVal; } @@ -352,15 +352,15 @@ static mlir::cir::IntAttr getConstOpIntAttr(mlir::Value v) { // false. static bool isCstWeak(mlir::Value weakVal, bool &val) { mlir::Operation *op = weakVal.getDefiningOp(); - while (auto c = dyn_cast(op)) { + while (auto c = dyn_cast(op)) { op = c.getOperand().getDefiningOp(); } - if (auto c = dyn_cast(op)) { - if (mlir::isa(c.getType())) { - val = mlir::cast(c.getValue()).getUInt() != 0; + if (auto c = dyn_cast(op)) { + if (mlir::isa(c.getType())) { + val = mlir::cast(c.getValue()).getUInt() != 0; return true; - } else if (mlir::isa(c.getType())) { - val = mlir::cast(c.getValue()).getValue(); + } else if (mlir::isa(c.getType())) { + val = mlir::cast(c.getValue()).getValue(); return true; } } @@ -375,8 +375,8 @@ static bool isCstWeak(mlir::Value weakVal, bool &val) { static void buildDefaultCase(CIRGenBuilderTy &builder, mlir::Location loc) { auto EmptyArrayAttr = builder.getArrayAttr({}); mlir::OpBuilder::InsertPoint insertPoint; - builder.create( - loc, EmptyArrayAttr, mlir::cir::CaseOpKind::Default, insertPoint); + builder.create(loc, EmptyArrayAttr, cir::CaseOpKind::Default, + insertPoint); builder.restoreInsertionPoint(insertPoint); } @@ -385,13 +385,13 @@ static void buildDefaultCase(CIRGenBuilderTy &builder, mlir::Location loc) { // will hold the body of the "case" block. static void buildSingleMemOrderCase(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Type Type, - mlir::cir::MemOrder Order) { + cir::MemOrder Order) { SmallVector OneOrder{ - mlir::cir::IntAttr::get(Type, static_cast(Order))}; + cir::IntAttr::get(Type, static_cast(Order))}; auto OneAttribute = builder.getArrayAttr(OneOrder); mlir::OpBuilder::InsertPoint insertPoint; - builder.create(loc, OneAttribute, - mlir::cir::CaseOpKind::Equal, insertPoint); + builder.create(loc, OneAttribute, cir::CaseOpKind::Equal, + insertPoint); builder.restoreInsertionPoint(insertPoint); } @@ -400,40 +400,39 @@ static void buildSingleMemOrderCase(CIRGenBuilderTy &builder, // Create the region that will hold the body of the "case" block. static void buildDoubleMemOrderCase(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Type Type, - mlir::cir::MemOrder Order1, - mlir::cir::MemOrder Order2) { + cir::MemOrder Order1, + cir::MemOrder Order2) { SmallVector TwoOrders{ - mlir::cir::IntAttr::get(Type, static_cast(Order1)), - mlir::cir::IntAttr::get(Type, static_cast(Order2))}; + cir::IntAttr::get(Type, static_cast(Order1)), + cir::IntAttr::get(Type, static_cast(Order2))}; auto TwoAttributes = builder.getArrayAttr(TwoOrders); mlir::OpBuilder::InsertPoint insertPoint; - builder.create(loc, TwoAttributes, - mlir::cir::CaseOpKind::Anyof, insertPoint); + builder.create(loc, TwoAttributes, cir::CaseOpKind::Anyof, + insertPoint); builder.restoreInsertionPoint(insertPoint); } static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, - mlir::cir::MemOrder SuccessOrder, - mlir::cir::MemOrder FailureOrder, + cir::MemOrder SuccessOrder, + cir::MemOrder FailureOrder, llvm::SyncScope::ID Scope) { auto &builder = CGF.getBuilder(); auto loc = CGF.getLoc(E->getSourceRange()); auto Expected = builder.createLoad(loc, Val1); auto Desired = builder.createLoad(loc, Val2); auto boolTy = builder.getBoolTy(); - auto cmpxchg = builder.create( + auto cmpxchg = builder.create( loc, Expected.getType(), boolTy, Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder); cmpxchg.setIsVolatile(E->isVolatile()); cmpxchg.setWeak(IsWeak); auto cmp = builder.createNot(cmpxchg.getCmp()); - builder.create( + builder.create( loc, cmp, false, [&](mlir::OpBuilder &, mlir::Location) { - auto ptrTy = - mlir::cast(Val1.getPointer().getType()); + auto ptrTy = mlir::cast(Val1.getPointer().getType()); if (Val1.getElementType() != ptrTy.getPointee()) { Val1 = Val1.withPointer(builder.createPtrBitcast( Val1.getPointer(), Val1.getElementType())); @@ -453,30 +452,30 @@ static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, static void buildAtomicCmpXchgFailureSet( CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value FailureOrderVal, uint64_t Size, - mlir::cir::MemOrder SuccessOrder, llvm::SyncScope::ID Scope) { + cir::MemOrder SuccessOrder, llvm::SyncScope::ID Scope) { - mlir::cir::MemOrder FailureOrder; + cir::MemOrder FailureOrder; if (auto ordAttr = getConstOpIntAttr(FailureOrderVal)) { // We should not ever get to a case where the ordering isn't a valid CABI // value, but it's hard to enforce that in general. auto ord = ordAttr.getUInt(); - if (!mlir::cir::isValidCIRAtomicOrderingCABI(ord)) { - FailureOrder = mlir::cir::MemOrder::Relaxed; + if (!cir::isValidCIRAtomicOrderingCABI(ord)) { + FailureOrder = cir::MemOrder::Relaxed; } else { - switch ((mlir::cir::MemOrder)ord) { - case mlir::cir::MemOrder::Relaxed: + switch ((cir::MemOrder)ord) { + case cir::MemOrder::Relaxed: // 31.7.2.18: "The failure argument shall not be memory_order_release // nor memory_order_acq_rel". Fallback to monotonic. - case mlir::cir::MemOrder::Release: - case mlir::cir::MemOrder::AcquireRelease: - FailureOrder = mlir::cir::MemOrder::Relaxed; + case cir::MemOrder::Release: + case cir::MemOrder::AcquireRelease: + FailureOrder = cir::MemOrder::Relaxed; break; - case mlir::cir::MemOrder::Consume: - case mlir::cir::MemOrder::Acquire: - FailureOrder = mlir::cir::MemOrder::Acquire; + case cir::MemOrder::Consume: + case cir::MemOrder::Acquire: + FailureOrder = cir::MemOrder::Acquire; break; - case mlir::cir::MemOrder::SequentiallyConsistent: - FailureOrder = mlir::cir::MemOrder::SequentiallyConsistent; + case cir::MemOrder::SequentiallyConsistent: + FailureOrder = cir::MemOrder::SequentiallyConsistent; break; } } @@ -493,7 +492,7 @@ static void buildAtomicCmpXchgFailureSet( // can't handle a runtime value; all memory orders must be hard coded. // Generate a "switch" statement that converts the runtime value into a // compile-time value. - CGF.getBuilder().create( + CGF.getBuilder().create( FailureOrderVal.getLoc(), FailureOrderVal, [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { auto &builder = CGF.getBuilder(); @@ -505,7 +504,7 @@ static void buildAtomicCmpXchgFailureSet( // because there is no practical way to report an error at runtime. buildDefaultCase(builder, loc); buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, - SuccessOrder, mlir::cir::MemOrder::Relaxed, Scope); + SuccessOrder, cir::MemOrder::Relaxed, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -515,10 +514,9 @@ static void buildAtomicCmpXchgFailureSet( // memory_order_consume is not implemented and always falls back to // memory_order_acquire buildDoubleMemOrderCase(builder, loc, FailureOrderVal.getType(), - mlir::cir::MemOrder::Consume, - mlir::cir::MemOrder::Acquire); + cir::MemOrder::Consume, cir::MemOrder::Acquire); buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, - SuccessOrder, mlir::cir::MemOrder::Acquire, Scope); + SuccessOrder, cir::MemOrder::Acquire, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -529,10 +527,10 @@ static void buildAtomicCmpXchgFailureSet( // case seq_cst: buildSingleMemOrderCase(builder, loc, FailureOrderVal.getType(), - mlir::cir::MemOrder::SequentiallyConsistent); + cir::MemOrder::SequentiallyConsistent); buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, - SuccessOrder, - mlir::cir::MemOrder::SequentiallyConsistent, Scope); + SuccessOrder, cir::MemOrder::SequentiallyConsistent, + Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -543,15 +541,14 @@ static void buildAtomicCmpXchgFailureSet( static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value IsWeak, mlir::Value FailureOrder, - uint64_t Size, mlir::cir::MemOrder Order, - uint8_t Scope) { + uint64_t Size, cir::MemOrder Order, uint8_t Scope) { assert(!cir::MissingFeatures::syncScopeID()); StringRef Op; auto &builder = CGF.getBuilder(); auto loc = CGF.getLoc(E->getSourceRange()); - auto orderAttr = mlir::cir::MemOrderAttr::get(builder.getContext(), Order); - mlir::cir::AtomicFetchKindAttr fetchAttr; + auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), Order); + cir::AtomicFetchKindAttr fetchAttr; bool fetchFirst = true; switch (E->getOp()) { @@ -599,8 +596,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, // TODO(cir): this logic should be part of createStore, but doing so // currently breaks CodeGen/union.cpp and CodeGen/union.cpp. - auto ptrTy = - mlir::cast(Dest.getPointer().getType()); + auto ptrTy = mlir::cast(Dest.getPointer().getType()); if (Dest.getElementType() != ptrTy.getPointee()) { Dest = Dest.withPointer( builder.createPtrBitcast(Dest.getPointer(), Dest.getElementType())); @@ -631,7 +627,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_exchange: case AtomicExpr::AO__scoped_atomic_exchange_n: case AtomicExpr::AO__scoped_atomic_exchange: - Op = mlir::cir::AtomicXchg::getOperationName(); + Op = cir::AtomicXchg::getOperationName(); break; case AtomicExpr::AO__atomic_add_fetch: @@ -643,9 +639,9 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__opencl_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__scoped_atomic_fetch_add: - Op = mlir::cir::AtomicFetch::getOperationName(); - fetchAttr = mlir::cir::AtomicFetchKindAttr::get( - builder.getContext(), mlir::cir::AtomicFetchKind::Add); + Op = cir::AtomicFetch::getOperationName(); + fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(), + cir::AtomicFetchKind::Add); break; case AtomicExpr::AO__atomic_sub_fetch: @@ -657,9 +653,9 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__opencl_atomic_fetch_sub: case AtomicExpr::AO__atomic_fetch_sub: case AtomicExpr::AO__scoped_atomic_fetch_sub: - Op = mlir::cir::AtomicFetch::getOperationName(); - fetchAttr = mlir::cir::AtomicFetchKindAttr::get( - builder.getContext(), mlir::cir::AtomicFetchKind::Sub); + Op = cir::AtomicFetch::getOperationName(); + fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(), + cir::AtomicFetchKind::Sub); break; case AtomicExpr::AO__atomic_min_fetch: @@ -671,9 +667,9 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__opencl_atomic_fetch_min: case AtomicExpr::AO__atomic_fetch_min: case AtomicExpr::AO__scoped_atomic_fetch_min: - Op = mlir::cir::AtomicFetch::getOperationName(); - fetchAttr = mlir::cir::AtomicFetchKindAttr::get( - builder.getContext(), mlir::cir::AtomicFetchKind::Min); + Op = cir::AtomicFetch::getOperationName(); + fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(), + cir::AtomicFetchKind::Min); break; case AtomicExpr::AO__atomic_max_fetch: @@ -685,9 +681,9 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__opencl_atomic_fetch_max: case AtomicExpr::AO__atomic_fetch_max: case AtomicExpr::AO__scoped_atomic_fetch_max: - Op = mlir::cir::AtomicFetch::getOperationName(); - fetchAttr = mlir::cir::AtomicFetchKindAttr::get( - builder.getContext(), mlir::cir::AtomicFetchKind::Max); + Op = cir::AtomicFetch::getOperationName(); + fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(), + cir::AtomicFetchKind::Max); break; case AtomicExpr::AO__atomic_and_fetch: @@ -699,9 +695,9 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__opencl_atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_and: case AtomicExpr::AO__scoped_atomic_fetch_and: - Op = mlir::cir::AtomicFetch::getOperationName(); - fetchAttr = mlir::cir::AtomicFetchKindAttr::get( - builder.getContext(), mlir::cir::AtomicFetchKind::And); + Op = cir::AtomicFetch::getOperationName(); + fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(), + cir::AtomicFetchKind::And); break; case AtomicExpr::AO__atomic_or_fetch: @@ -713,9 +709,9 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__opencl_atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_or: case AtomicExpr::AO__scoped_atomic_fetch_or: - Op = mlir::cir::AtomicFetch::getOperationName(); - fetchAttr = mlir::cir::AtomicFetchKindAttr::get( - builder.getContext(), mlir::cir::AtomicFetchKind::Or); + Op = cir::AtomicFetch::getOperationName(); + fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(), + cir::AtomicFetchKind::Or); break; case AtomicExpr::AO__atomic_xor_fetch: @@ -727,9 +723,9 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__opencl_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_xor: case AtomicExpr::AO__scoped_atomic_fetch_xor: - Op = mlir::cir::AtomicFetch::getOperationName(); - fetchAttr = mlir::cir::AtomicFetchKindAttr::get( - builder.getContext(), mlir::cir::AtomicFetchKind::Xor); + Op = cir::AtomicFetch::getOperationName(); + fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(), + cir::AtomicFetchKind::Xor); break; case AtomicExpr::AO__atomic_nand_fetch: @@ -739,9 +735,9 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__c11_atomic_fetch_nand: case AtomicExpr::AO__atomic_fetch_nand: case AtomicExpr::AO__scoped_atomic_fetch_nand: - Op = mlir::cir::AtomicFetch::getOperationName(); - fetchAttr = mlir::cir::AtomicFetchKindAttr::get( - builder.getContext(), mlir::cir::AtomicFetchKind::Nand); + Op = cir::AtomicFetch::getOperationName(); + fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(), + cir::AtomicFetchKind::Nand); break; } @@ -758,14 +754,14 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, RMWI->setAttr("mem_order", orderAttr); if (E->isVolatile()) RMWI->setAttr("is_volatile", mlir::UnitAttr::get(builder.getContext())); - if (fetchFirst && Op == mlir::cir::AtomicFetch::getOperationName()) + if (fetchFirst && Op == cir::AtomicFetch::getOperationName()) RMWI->setAttr("fetch_first", mlir::UnitAttr::get(builder.getContext())); auto Result = RMWI->getResult(0); // TODO(cir): this logic should be part of createStore, but doing so currently // breaks CodeGen/union.cpp and CodeGen/union.cpp. - auto ptrTy = mlir::cast(Dest.getPointer().getType()); + auto ptrTy = mlir::cast(Dest.getPointer().getType()); if (Dest.getElementType() != ptrTy.getPointee()) { Dest = Dest.withPointer( builder.createPtrBitcast(Dest.getPointer(), Dest.getElementType())); @@ -784,7 +780,7 @@ static RValue buildAtomicLibcall(CIRGenFunction &CGF, StringRef fnName, static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value IsWeak, mlir::Value FailureOrder, - uint64_t Size, mlir::cir::MemOrder Order, + uint64_t Size, cir::MemOrder Order, mlir::Value Scope) { auto ScopeModel = Expr->getScopeModel(); @@ -1202,34 +1198,34 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // We should not ever get to a case where the ordering isn't a valid CABI // value, but it's hard to enforce that in general. auto ord = ordAttr.getUInt(); - if (mlir::cir::isValidCIRAtomicOrderingCABI(ord)) { - switch ((mlir::cir::MemOrder)ord) { - case mlir::cir::MemOrder::Relaxed: + if (cir::isValidCIRAtomicOrderingCABI(ord)) { + switch ((cir::MemOrder)ord) { + case cir::MemOrder::Relaxed: buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - mlir::cir::MemOrder::Relaxed, Scope); + cir::MemOrder::Relaxed, Scope); break; - case mlir::cir::MemOrder::Consume: - case mlir::cir::MemOrder::Acquire: + case cir::MemOrder::Consume: + case cir::MemOrder::Acquire: if (IsStore) break; // Avoid crashing on code with undefined behavior buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - mlir::cir::MemOrder::Acquire, Scope); + cir::MemOrder::Acquire, Scope); break; - case mlir::cir::MemOrder::Release: + case cir::MemOrder::Release: if (IsLoad) break; // Avoid crashing on code with undefined behavior buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - mlir::cir::MemOrder::Release, Scope); + cir::MemOrder::Release, Scope); break; - case mlir::cir::MemOrder::AcquireRelease: + case cir::MemOrder::AcquireRelease: if (IsLoad || IsStore) break; // Avoid crashing on code with undefined behavior buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - mlir::cir::MemOrder::AcquireRelease, Scope); + cir::MemOrder::AcquireRelease, Scope); break; - case mlir::cir::MemOrder::SequentiallyConsistent: + case cir::MemOrder::SequentiallyConsistent: buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - mlir::cir::MemOrder::SequentiallyConsistent, Scope); + cir::MemOrder::SequentiallyConsistent, Scope); break; } } @@ -1244,7 +1240,7 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // can't handle runtime memory orders; the memory order must be hard coded. // Generate a "switch" statement that converts a runtime value into a // compile-time value. - builder.create( + builder.create( Order.getLoc(), Order, [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) { mlir::Block *switchBlock = builder.getBlock(); @@ -1256,7 +1252,7 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // memory_order_relaxed. buildDefaultCase(builder, loc); buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - mlir::cir::MemOrder::Relaxed, Scope); + cir::MemOrder::Relaxed, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -1268,10 +1264,10 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // memory_order_acquire. These memory orders are not valid for // write-only operations. buildDoubleMemOrderCase(builder, loc, Order.getType(), - mlir::cir::MemOrder::Consume, - mlir::cir::MemOrder::Acquire); + cir::MemOrder::Consume, + cir::MemOrder::Acquire); buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, mlir::cir::MemOrder::Acquire, Scope); + Size, cir::MemOrder::Acquire, Scope); builder.createBreak(loc); } @@ -1281,9 +1277,9 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // case release: // memory_order_release is not valid for read-only operations. buildSingleMemOrderCase(builder, loc, Order.getType(), - mlir::cir::MemOrder::Release); + cir::MemOrder::Release); buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, mlir::cir::MemOrder::Release, Scope); + Size, cir::MemOrder::Release, Scope); builder.createBreak(loc); } @@ -1293,9 +1289,9 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // case acq_rel: // memory_order_acq_rel is only valid for read-write operations. buildSingleMemOrderCase(builder, loc, Order.getType(), - mlir::cir::MemOrder::AcquireRelease); + cir::MemOrder::AcquireRelease); buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, mlir::cir::MemOrder::AcquireRelease, Scope); + Size, cir::MemOrder::AcquireRelease, Scope); builder.createBreak(loc); } @@ -1303,9 +1299,9 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // case seq_cst: buildSingleMemOrderCase(builder, loc, Order.getType(), - mlir::cir::MemOrder::SequentiallyConsistent); + cir::MemOrder::SequentiallyConsistent); buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - mlir::cir::MemOrder::SequentiallyConsistent, Scope); + cir::MemOrder::SequentiallyConsistent, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -1321,11 +1317,11 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue lvalue, bool isInit) { bool IsVolatile = lvalue.isVolatileQualified(); - mlir::cir::MemOrder MO; + cir::MemOrder MO; if (lvalue.getType()->isAtomicType()) { - MO = mlir::cir::MemOrder::SequentiallyConsistent; + MO = cir::MemOrder::SequentiallyConsistent; } else { - MO = mlir::cir::MemOrder::Release; + MO = cir::MemOrder::Release; IsVolatile = true; } return buildAtomicStore(rvalue, lvalue, MO, IsVolatile, isInit); @@ -1337,9 +1333,9 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue lvalue, /// floating point operands. TODO: Allow compare-and-exchange and FP - see /// comment in CIRGenAtomicExpandPass.cpp. static bool shouldCastToInt(mlir::Type ValTy, bool CmpXchg) { - if (mlir::cir::isAnyFloatingPointType(ValTy)) - return isa(ValTy) || CmpXchg; - return !isa(ValTy) && !isa(ValTy); + if (cir::isAnyFloatingPointType(ValTy)) + return isa(ValTy) || CmpXchg; + return !isa(ValTy) && !isa(ValTy); } mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue RVal) const { @@ -1420,7 +1416,7 @@ mlir::Value AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const { /// type*; this means that for aggregate r-values, it should include /// storage for any padding that was necessary. void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, - mlir::cir::MemOrder MO, bool IsVolatile, + cir::MemOrder MO, bool IsVolatile, bool isInit) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. @@ -1456,10 +1452,10 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, } auto store = builder.createStore(loc, ValToStore, Addr); - if (MO == mlir::cir::MemOrder::Acquire) - MO = mlir::cir::MemOrder::Relaxed; // Monotonic - else if (MO == mlir::cir::MemOrder::AcquireRelease) - MO = mlir::cir::MemOrder::Release; + if (MO == cir::MemOrder::Acquire) + MO = cir::MemOrder::Relaxed; // Monotonic + else if (MO == cir::MemOrder::AcquireRelease) + MO = cir::MemOrder::Release; // Initializations don't need to be atomic. if (!isInit) store.setAtomic(MO); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp index e9fd05a71633..a50cefe34c79 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp @@ -12,19 +12,16 @@ using namespace clang::CIRGen; mlir::Value CIRGenBuilderTy::maybeBuildArrayDecay(mlir::Location loc, mlir::Value arrayPtr, mlir::Type eltTy) { - auto arrayPtrTy = - ::mlir::dyn_cast<::mlir::cir::PointerType>(arrayPtr.getType()); + auto arrayPtrTy = ::mlir::dyn_cast(arrayPtr.getType()); assert(arrayPtrTy && "expected pointer type"); - auto arrayTy = - ::mlir::dyn_cast<::mlir::cir::ArrayType>(arrayPtrTy.getPointee()); + auto arrayTy = ::mlir::dyn_cast(arrayPtrTy.getPointee()); if (arrayTy) { - auto addrSpace = ::mlir::cast_if_present<::mlir::cir::AddressSpaceAttr>( + auto addrSpace = ::mlir::cast_if_present( arrayPtrTy.getAddrSpace()); - mlir::cir::PointerType flatPtrTy = - getPointerTo(arrayTy.getEltType(), addrSpace); - return create( - loc, flatPtrTy, mlir::cir::CastKind::array_to_ptrdecay, arrayPtr); + cir::PointerType flatPtrTy = getPointerTo(arrayTy.getEltType(), addrSpace); + return create(loc, flatPtrTy, cir::CastKind::array_to_ptrdecay, + arrayPtr); } assert(arrayPtrTy.getPointee() == eltTy && @@ -41,29 +38,28 @@ mlir::Value CIRGenBuilderTy::getArrayElement(mlir::Location arrayLocBegin, if (shouldDecay) basePtr = maybeBuildArrayDecay(arrayLocBegin, arrayPtr, eltTy); mlir::Type flatPtrTy = basePtr.getType(); - return create(arrayLocEnd, flatPtrTy, basePtr, idx); + return create(arrayLocEnd, flatPtrTy, basePtr, idx); } -mlir::cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, - llvm::APSInt intVal) { +cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, + llvm::APSInt intVal) { bool isSigned = intVal.isSigned(); auto width = intVal.getBitWidth(); - mlir::cir::IntType t = isSigned ? getSIntNTy(width) : getUIntNTy(width); + cir::IntType t = isSigned ? getSIntNTy(width) : getUIntNTy(width); return getConstInt(loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); } -mlir::cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, - llvm::APInt intVal) { +cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, + llvm::APInt intVal) { auto width = intVal.getBitWidth(); - mlir::cir::IntType t = getUIntNTy(width); + cir::IntType t = getUIntNTy(width); return getConstInt(loc, t, intVal.getZExtValue()); } -mlir::cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, - mlir::Type t, uint64_t C) { - auto intTy = mlir::dyn_cast(t); - assert(intTy && "expected mlir::cir::IntType"); - return create(loc, intTy, - mlir::cir::IntAttr::get(t, C)); +cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, mlir::Type t, + uint64_t C) { + auto intTy = mlir::dyn_cast(t); + assert(intTy && "expected cir::IntType"); + return create(loc, intTy, cir::IntAttr::get(t, C)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index d58de4cb5d76..b9022309cec8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -132,18 +132,18 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { /// Get constant address of a global variable as an MLIR attribute. /// This wrapper infers the attribute type through the global op. - mlir::cir::GlobalViewAttr getGlobalViewAttr(mlir::cir::GlobalOp globalOp, - mlir::ArrayAttr indices = {}) { + cir::GlobalViewAttr getGlobalViewAttr(cir::GlobalOp globalOp, + mlir::ArrayAttr indices = {}) { auto type = getPointerTo(globalOp.getSymType()); return getGlobalViewAttr(type, globalOp, indices); } /// Get constant address of a global variable as an MLIR attribute. - mlir::cir::GlobalViewAttr getGlobalViewAttr(mlir::cir::PointerType type, - mlir::cir::GlobalOp globalOp, - mlir::ArrayAttr indices = {}) { + cir::GlobalViewAttr getGlobalViewAttr(cir::PointerType type, + cir::GlobalOp globalOp, + mlir::ArrayAttr indices = {}) { auto symbol = mlir::FlatSymbolRefAttr::get(globalOp.getSymNameAttr()); - return mlir::cir::GlobalViewAttr::get(type, symbol, indices); + return cir::GlobalViewAttr::get(type, symbol, indices); } mlir::Attribute getString(llvm::StringRef str, mlir::Type eltTy, @@ -154,34 +154,33 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { // If the string is full of null bytes, emit a #cir.zero rather than // a #cir.const_array. if (lastNonZeroPos == llvm::StringRef::npos) { - auto arrayTy = mlir::cir::ArrayType::get(getContext(), eltTy, finalSize); + auto arrayTy = cir::ArrayType::get(getContext(), eltTy, finalSize); return getZeroAttr(arrayTy); } // We will use trailing zeros only if there are more than one zero // at the end int trailingZerosNum = finalSize > lastNonZeroPos + 2 ? finalSize - lastNonZeroPos - 1 : 0; - auto truncatedArrayTy = mlir::cir::ArrayType::get( - getContext(), eltTy, finalSize - trailingZerosNum); - auto fullArrayTy = - mlir::cir::ArrayType::get(getContext(), eltTy, finalSize); - return mlir::cir::ConstArrayAttr::get( + auto truncatedArrayTy = + cir::ArrayType::get(getContext(), eltTy, finalSize - trailingZerosNum); + auto fullArrayTy = cir::ArrayType::get(getContext(), eltTy, finalSize); + return cir::ConstArrayAttr::get( getContext(), fullArrayTy, mlir::StringAttr::get(str.drop_back(trailingZerosNum), truncatedArrayTy), trailingZerosNum); } - mlir::cir::ConstArrayAttr getConstArray(mlir::Attribute attrs, - mlir::cir::ArrayType arrayTy) { - return mlir::cir::ConstArrayAttr::get(arrayTy, attrs); + cir::ConstArrayAttr getConstArray(mlir::Attribute attrs, + cir::ArrayType arrayTy) { + return cir::ConstArrayAttr::get(arrayTy, attrs); } mlir::Attribute getConstStructOrZeroAttr(mlir::ArrayAttr arrayAttr, bool packed = false, mlir::Type type = {}) { llvm::SmallVector members; - auto structTy = mlir::dyn_cast(type); + auto structTy = mlir::dyn_cast(type); assert(structTy && "expected cir.struct"); // Collect members and check if they are all zero. @@ -194,19 +193,19 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { // Struct type not specified: create anon struct type from members. if (!structTy) - structTy = getType(members, packed, - mlir::cir::StructType::Struct, - /*ast=*/nullptr); + structTy = + getType(members, packed, cir::StructType::Struct, + /*ast=*/nullptr); // Return zero or anonymous constant struct. if (isZero) - return mlir::cir::ZeroAttr::get(getContext(), structTy); - return mlir::cir::ConstStructAttr::get(structTy, arrayAttr); + return cir::ZeroAttr::get(getContext(), structTy); + return cir::ConstStructAttr::get(structTy, arrayAttr); } - mlir::cir::ConstStructAttr getAnonConstStruct(mlir::ArrayAttr arrayAttr, - bool packed = false, - mlir::Type ty = {}) { + cir::ConstStructAttr getAnonConstStruct(mlir::ArrayAttr arrayAttr, + bool packed = false, + mlir::Type ty = {}) { llvm::SmallVector members; for (auto &f : arrayAttr) { auto ta = mlir::dyn_cast(f); @@ -217,59 +216,58 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { if (!ty) ty = getAnonStructTy(members, packed); - auto sTy = mlir::dyn_cast(ty); + auto sTy = mlir::dyn_cast(ty); assert(sTy && "expected struct type"); - return mlir::cir::ConstStructAttr::get(sTy, arrayAttr); + return cir::ConstStructAttr::get(sTy, arrayAttr); } - mlir::cir::TypeInfoAttr getTypeInfo(mlir::ArrayAttr fieldsAttr) { + cir::TypeInfoAttr getTypeInfo(mlir::ArrayAttr fieldsAttr) { auto anonStruct = getAnonConstStruct(fieldsAttr); - return mlir::cir::TypeInfoAttr::get(anonStruct.getType(), fieldsAttr); + return cir::TypeInfoAttr::get(anonStruct.getType(), fieldsAttr); } - mlir::cir::CmpThreeWayInfoAttr getCmpThreeWayInfoStrongOrdering( + cir::CmpThreeWayInfoAttr getCmpThreeWayInfoStrongOrdering( const llvm::APSInt <, const llvm::APSInt &eq, const llvm::APSInt >) { - return mlir::cir::CmpThreeWayInfoAttr::get( - getContext(), lt.getSExtValue(), eq.getSExtValue(), gt.getSExtValue()); + return cir::CmpThreeWayInfoAttr::get(getContext(), lt.getSExtValue(), + eq.getSExtValue(), gt.getSExtValue()); } - mlir::cir::CmpThreeWayInfoAttr getCmpThreeWayInfoPartialOrdering( + cir::CmpThreeWayInfoAttr getCmpThreeWayInfoPartialOrdering( const llvm::APSInt <, const llvm::APSInt &eq, const llvm::APSInt >, const llvm::APSInt &unordered) { - return mlir::cir::CmpThreeWayInfoAttr::get( - getContext(), lt.getSExtValue(), eq.getSExtValue(), gt.getSExtValue(), - unordered.getSExtValue()); + return cir::CmpThreeWayInfoAttr::get(getContext(), lt.getSExtValue(), + eq.getSExtValue(), gt.getSExtValue(), + unordered.getSExtValue()); } - mlir::cir::DataMemberAttr getDataMemberAttr(mlir::cir::DataMemberType ty, - unsigned memberIndex) { - return mlir::cir::DataMemberAttr::get(getContext(), ty, memberIndex); + cir::DataMemberAttr getDataMemberAttr(cir::DataMemberType ty, + unsigned memberIndex) { + return cir::DataMemberAttr::get(getContext(), ty, memberIndex); } - mlir::cir::DataMemberAttr - getNullDataMemberAttr(mlir::cir::DataMemberType ty) { - return mlir::cir::DataMemberAttr::get(getContext(), ty, std::nullopt); + cir::DataMemberAttr getNullDataMemberAttr(cir::DataMemberType ty) { + return cir::DataMemberAttr::get(getContext(), ty, std::nullopt); } // TODO(cir): Once we have CIR float types, replace this by something like a // NullableValueInterface to allow for type-independent queries. bool isNullValue(mlir::Attribute attr) const { - if (mlir::isa(attr)) + if (mlir::isa(attr)) return true; - if (const auto ptrVal = mlir::dyn_cast(attr)) + if (const auto ptrVal = mlir::dyn_cast(attr)) return ptrVal.isNullValue(); - if (mlir::isa(attr)) + if (mlir::isa(attr)) return false; // TODO(cir): introduce char type in CIR and check for that instead. - if (const auto intVal = mlir::dyn_cast(attr)) + if (const auto intVal = mlir::dyn_cast(attr)) return intVal.isNullValue(); - if (const auto boolVal = mlir::dyn_cast(attr)) + if (const auto boolVal = mlir::dyn_cast(attr)) return !boolVal.getValue(); - if (auto fpAttr = mlir::dyn_cast(attr)) { + if (auto fpAttr = mlir::dyn_cast(attr)) { auto fpVal = fpAttr.getValue(); bool ignored; llvm::APFloat FV(+0.0); @@ -278,8 +276,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return FV.bitwiseIsEqual(fpVal); } - if (const auto structVal = - mlir::dyn_cast(attr)) { + if (const auto structVal = mlir::dyn_cast(attr)) { for (const auto elt : structVal.getMembers()) { // FIXME(cir): the struct's ID should not be considered a member. if (mlir::isa(elt)) @@ -290,7 +287,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return true; } - if (const auto arrayVal = mlir::dyn_cast(attr)) { + if (const auto arrayVal = mlir::dyn_cast(attr)) { if (mlir::isa(arrayVal.getElts())) return false; for (const auto elt : mlir::cast(arrayVal.getElts())) { @@ -300,7 +297,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return true; } - if (mlir::isa(attr)) + if (mlir::isa(attr)) return true; llvm_unreachable("NYI"); @@ -310,7 +307,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { // Type helpers // ------------ // - mlir::cir::IntType getUIntNTy(int N) { + cir::IntType getUIntNTy(int N) { switch (N) { case 8: return getUInt8Ty(); @@ -321,11 +318,11 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { case 64: return getUInt64Ty(); default: - return mlir::cir::IntType::get(getContext(), N, false); + return cir::IntType::get(getContext(), N, false); } } - mlir::cir::IntType getSIntNTy(int N) { + cir::IntType getSIntNTy(int N) { switch (N) { case 8: return getSInt8Ty(); @@ -336,21 +333,21 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { case 64: return getSInt64Ty(); default: - return mlir::cir::IntType::get(getContext(), N, true); + return cir::IntType::get(getContext(), N, true); } } - mlir::cir::VoidType getVoidTy() { return typeCache.VoidTy; } + cir::VoidType getVoidTy() { return typeCache.VoidTy; } - mlir::cir::IntType getSInt8Ty() { return typeCache.SInt8Ty; } - mlir::cir::IntType getSInt16Ty() { return typeCache.SInt16Ty; } - mlir::cir::IntType getSInt32Ty() { return typeCache.SInt32Ty; } - mlir::cir::IntType getSInt64Ty() { return typeCache.SInt64Ty; } + cir::IntType getSInt8Ty() { return typeCache.SInt8Ty; } + cir::IntType getSInt16Ty() { return typeCache.SInt16Ty; } + cir::IntType getSInt32Ty() { return typeCache.SInt32Ty; } + cir::IntType getSInt64Ty() { return typeCache.SInt64Ty; } - mlir::cir::IntType getUInt8Ty() { return typeCache.UInt8Ty; } - mlir::cir::IntType getUInt16Ty() { return typeCache.UInt16Ty; } - mlir::cir::IntType getUInt32Ty() { return typeCache.UInt32Ty; } - mlir::cir::IntType getUInt64Ty() { return typeCache.UInt64Ty; } + cir::IntType getUInt8Ty() { return typeCache.UInt8Ty; } + cir::IntType getUInt16Ty() { return typeCache.UInt16Ty; } + cir::IntType getUInt32Ty() { return typeCache.UInt32Ty; } + cir::IntType getUInt64Ty() { return typeCache.UInt64Ty; } bool isInt8Ty(mlir::Type i) { return i == typeCache.UInt8Ty || i == typeCache.SInt8Ty; @@ -364,9 +361,9 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { bool isInt64Ty(mlir::Type i) { return i == typeCache.UInt64Ty || i == typeCache.SInt64Ty; } - bool isInt(mlir::Type i) { return mlir::isa(i); } + bool isInt(mlir::Type i) { return mlir::isa(i); } - mlir::cir::IntType getExtendedIntTy(mlir::cir::IntType ty, bool isSigned) { + cir::IntType getExtendedIntTy(cir::IntType ty, bool isSigned) { switch (ty.getWidth()) { case 8: return isSigned ? typeCache.SInt16Ty : typeCache.UInt16Ty; @@ -379,7 +376,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { } } - mlir::cir::IntType getTruncatedIntTy(mlir::cir::IntType ty, bool isSigned) { + cir::IntType getTruncatedIntTy(cir::IntType ty, bool isSigned) { switch (ty.getWidth()) { case 16: return isSigned ? typeCache.SInt8Ty : typeCache.UInt8Ty; @@ -392,26 +389,25 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { } } - mlir::cir::VectorType getExtendedOrTruncatedElementVectorType( - mlir::cir::VectorType vt, bool isExtended, bool isSigned = false) { - auto elementTy = - mlir::dyn_cast_or_null(vt.getEltType()); + cir::VectorType + getExtendedOrTruncatedElementVectorType(cir::VectorType vt, bool isExtended, + bool isSigned = false) { + auto elementTy = mlir::dyn_cast_or_null(vt.getEltType()); assert(elementTy && "expected int vector"); - return mlir::cir::VectorType::get( - getContext(), - isExtended ? getExtendedIntTy(elementTy, isSigned) - : getTruncatedIntTy(elementTy, isSigned), - vt.getSize()); + return cir::VectorType::get(getContext(), + isExtended + ? getExtendedIntTy(elementTy, isSigned) + : getTruncatedIntTy(elementTy, isSigned), + vt.getSize()); } - mlir::cir::LongDoubleType - getLongDoubleTy(const llvm::fltSemantics &format) const { + cir::LongDoubleType getLongDoubleTy(const llvm::fltSemantics &format) const { if (&format == &llvm::APFloat::IEEEdouble()) - return mlir::cir::LongDoubleType::get(getContext(), typeCache.DoubleTy); + return cir::LongDoubleType::get(getContext(), typeCache.DoubleTy); if (&format == &llvm::APFloat::x87DoubleExtended()) - return mlir::cir::LongDoubleType::get(getContext(), typeCache.FP80Ty); + return cir::LongDoubleType::get(getContext(), typeCache.FP80Ty); if (&format == &llvm::APFloat::IEEEquad()) - return mlir::cir::LongDoubleType::get(getContext(), typeCache.FP128Ty); + return cir::LongDoubleType::get(getContext(), typeCache.FP128Ty); if (&format == &llvm::APFloat::PPCDoubleDouble()) llvm_unreachable("NYI"); @@ -421,47 +417,46 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { mlir::Type getVirtualFnPtrType(bool isVarArg = false) { // FIXME: replay LLVM codegen for now, perhaps add a vtable ptr special // type so it's a bit more clear and C++ idiomatic. - auto fnTy = mlir::cir::FuncType::get({}, getUInt32Ty(), isVarArg); + auto fnTy = cir::FuncType::get({}, getUInt32Ty(), isVarArg); assert(!cir::MissingFeatures::isVarArg()); return getPointerTo(getPointerTo(fnTy)); } - mlir::cir::FuncType getFuncType(llvm::ArrayRef params, - mlir::Type retTy, bool isVarArg = false) { - return mlir::cir::FuncType::get(params, retTy, isVarArg); + cir::FuncType getFuncType(llvm::ArrayRef params, mlir::Type retTy, + bool isVarArg = false) { + return cir::FuncType::get(params, retTy, isVarArg); } // Fetch the type representing a pointer to unsigned int values. - mlir::cir::PointerType getUInt8PtrTy(unsigned AddrSpace = 0) { + cir::PointerType getUInt8PtrTy(unsigned AddrSpace = 0) { return typeCache.UInt8PtrTy; } - mlir::cir::PointerType getUInt32PtrTy(unsigned AddrSpace = 0) { - return mlir::cir::PointerType::get(getContext(), typeCache.UInt32Ty); + cir::PointerType getUInt32PtrTy(unsigned AddrSpace = 0) { + return cir::PointerType::get(getContext(), typeCache.UInt32Ty); } /// Get a CIR anonymous struct type. - mlir::cir::StructType - getAnonStructTy(llvm::ArrayRef members, bool packed = false, - const clang::RecordDecl *ast = nullptr) { - mlir::cir::ASTRecordDeclAttr astAttr = nullptr; - auto kind = mlir::cir::StructType::RecordKind::Struct; + cir::StructType getAnonStructTy(llvm::ArrayRef members, + bool packed = false, + const clang::RecordDecl *ast = nullptr) { + cir::ASTRecordDeclAttr astAttr = nullptr; + auto kind = cir::StructType::RecordKind::Struct; if (ast) { - astAttr = getAttr(ast); + astAttr = getAttr(ast); kind = getRecordKind(ast->getTagKind()); } - return getType(members, packed, kind, astAttr); + return getType(members, packed, kind, astAttr); } /// Get a CIR record kind from a AST declaration tag. - mlir::cir::StructType::RecordKind - getRecordKind(const clang::TagTypeKind kind) { + cir::StructType::RecordKind getRecordKind(const clang::TagTypeKind kind) { switch (kind) { case clang::TagTypeKind::Struct: - return mlir::cir::StructType::Struct; + return cir::StructType::Struct; case clang::TagTypeKind::Union: - return mlir::cir::StructType::Union; + return cir::StructType::Union; case clang::TagTypeKind::Class: - return mlir::cir::StructType::Class; + return cir::StructType::Class; case clang::TagTypeKind::Interface: llvm_unreachable("interface records are NYI"); case clang::TagTypeKind::Enum: @@ -470,33 +465,33 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { } /// Get a incomplete CIR struct type. - mlir::cir::StructType getIncompleteStructTy(llvm::StringRef name, - const clang::RecordDecl *ast) { + cir::StructType getIncompleteStructTy(llvm::StringRef name, + const clang::RecordDecl *ast) { const auto nameAttr = getStringAttr(name); - auto kind = mlir::cir::StructType::RecordKind::Struct; + auto kind = cir::StructType::RecordKind::Struct; if (ast) kind = getRecordKind(ast->getTagKind()); - return getType(nameAttr, kind); + return getType(nameAttr, kind); } /// Get a CIR named struct type. /// /// If a struct already exists and is complete, but the client tries to fetch /// it with a different set of attributes, this method will crash. - mlir::cir::StructType getCompleteStructTy(llvm::ArrayRef members, - llvm::StringRef name, bool packed, - const clang::RecordDecl *ast) { + cir::StructType getCompleteStructTy(llvm::ArrayRef members, + llvm::StringRef name, bool packed, + const clang::RecordDecl *ast) { const auto nameAttr = getStringAttr(name); - mlir::cir::ASTRecordDeclAttr astAttr = nullptr; - auto kind = mlir::cir::StructType::RecordKind::Struct; + cir::ASTRecordDeclAttr astAttr = nullptr; + auto kind = cir::StructType::RecordKind::Struct; if (ast) { - astAttr = getAttr(ast); + astAttr = getAttr(ast); kind = getRecordKind(ast->getTagKind()); } // Create or get the struct. - auto type = getType(members, nameAttr, packed, kind, - astAttr); + auto type = + getType(members, nameAttr, packed, kind, astAttr); // Complete an incomplete struct or ensure the existing complete struct // matches the requested attributes. @@ -505,7 +500,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return type; } - mlir::cir::StructType + cir::StructType getCompleteStructType(mlir::ArrayAttr fields, bool packed = false, llvm::StringRef name = "", const clang::RecordDecl *ast = nullptr) { @@ -521,17 +516,16 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return getCompleteStructTy(members, name, packed, ast); } - mlir::cir::ArrayType getArrayType(mlir::Type eltType, unsigned size) { - return mlir::cir::ArrayType::get(getContext(), eltType, size); + cir::ArrayType getArrayType(mlir::Type eltType, unsigned size) { + return cir::ArrayType::get(getContext(), eltType, size); } bool isSized(mlir::Type ty) { - if (mlir::isa(ty)) + if (mlir::isa(ty)) return true; - if (mlir::isa(ty)) { - return isSized(mlir::cast(ty).getEltType()); + if (mlir::isa(ty)) { + return isSized(mlir::cast(ty).getEltType()); } assert(0 && "Unimplemented size for type"); return false; @@ -541,65 +535,61 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { // Constant creation helpers // ------------------------- // - mlir::cir::ConstantOp getSInt32(int32_t c, mlir::Location loc) { + cir::ConstantOp getSInt32(int32_t c, mlir::Location loc) { auto sInt32Ty = getSInt32Ty(); - return create(loc, sInt32Ty, - mlir::cir::IntAttr::get(sInt32Ty, c)); + return create(loc, sInt32Ty, + cir::IntAttr::get(sInt32Ty, c)); } - mlir::cir::ConstantOp getUInt32(uint32_t C, mlir::Location loc) { + cir::ConstantOp getUInt32(uint32_t C, mlir::Location loc) { auto uInt32Ty = getUInt32Ty(); - return create(loc, uInt32Ty, - mlir::cir::IntAttr::get(uInt32Ty, C)); + return create(loc, uInt32Ty, + cir::IntAttr::get(uInt32Ty, C)); } - mlir::cir::ConstantOp getSInt64(uint64_t C, mlir::Location loc) { + cir::ConstantOp getSInt64(uint64_t C, mlir::Location loc) { auto sInt64Ty = getSInt64Ty(); - return create(loc, sInt64Ty, - mlir::cir::IntAttr::get(sInt64Ty, C)); + return create(loc, sInt64Ty, + cir::IntAttr::get(sInt64Ty, C)); } - mlir::cir::ConstantOp getUInt64(uint64_t C, mlir::Location loc) { + cir::ConstantOp getUInt64(uint64_t C, mlir::Location loc) { auto uInt64Ty = getUInt64Ty(); - return create(loc, uInt64Ty, - mlir::cir::IntAttr::get(uInt64Ty, C)); + return create(loc, uInt64Ty, + cir::IntAttr::get(uInt64Ty, C)); } - mlir::cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal); + cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal); - mlir::cir::ConstantOp getConstInt(mlir::Location loc, llvm::APInt intVal); + cir::ConstantOp getConstInt(mlir::Location loc, llvm::APInt intVal); - mlir::cir::ConstantOp getConstInt(mlir::Location loc, mlir::Type t, - uint64_t C); + cir::ConstantOp getConstInt(mlir::Location loc, mlir::Type t, uint64_t C); - mlir::cir::ConstantOp getConstFP(mlir::Location loc, mlir::Type t, - llvm::APFloat fpVal) { - assert((mlir::isa(t)) && - "expected mlir::cir::SingleType or mlir::cir::DoubleType"); - return create(loc, t, - getAttr(t, fpVal)); + cir::ConstantOp getConstFP(mlir::Location loc, mlir::Type t, + llvm::APFloat fpVal) { + assert((mlir::isa(t)) && + "expected cir::SingleType or cir::DoubleType"); + return create(loc, t, getAttr(t, fpVal)); } - mlir::cir::IsFPClassOp createIsFPClass(mlir::Location loc, mlir::Value src, - unsigned flags) { - return create(loc, src, flags); + cir::IsFPClassOp createIsFPClass(mlir::Location loc, mlir::Value src, + unsigned flags) { + return create(loc, src, flags); } /// Create constant nullptr for pointer-to-data-member type ty. - mlir::cir::ConstantOp getNullDataMemberPtr(mlir::cir::DataMemberType ty, - mlir::Location loc) { - return create(loc, ty, getNullDataMemberAttr(ty)); + cir::ConstantOp getNullDataMemberPtr(cir::DataMemberType ty, + mlir::Location loc) { + return create(loc, ty, getNullDataMemberAttr(ty)); } - mlir::cir::ConstantOp getNullMethodPtr(mlir::cir::MethodType ty, - mlir::Location loc) { - return create(loc, ty, getNullMethodAttr(ty)); + cir::ConstantOp getNullMethodPtr(cir::MethodType ty, mlir::Location loc) { + return create(loc, ty, getNullMethodAttr(ty)); } - mlir::cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { + cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) { // TODO: dispatch creation for primitive types. - assert((mlir::isa(ty) || - mlir::isa(ty) || - mlir::isa(ty)) && + assert((mlir::isa(ty) || mlir::isa(ty) || + mlir::isa(ty)) && "NYI for other types"); - return create(loc, ty, getZeroAttr(ty)); + return create(loc, ty, getZeroAttr(ty)); } // @@ -608,39 +598,39 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { // /// Create a break operation. - mlir::cir::BreakOp createBreak(mlir::Location loc) { - return create(loc); + cir::BreakOp createBreak(mlir::Location loc) { + return create(loc); } /// Create a continue operation. - mlir::cir::ContinueOp createContinue(mlir::Location loc) { - return create(loc); + cir::ContinueOp createContinue(mlir::Location loc) { + return create(loc); } - mlir::cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, - mlir::Value src, mlir::Value len) { - return create(loc, dst, src, len); + cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, + mlir::Value src, mlir::Value len) { + return create(loc, dst, src, len); } - mlir::cir::MemMoveOp createMemMove(mlir::Location loc, mlir::Value dst, - mlir::Value src, mlir::Value len) { - return create(loc, dst, src, len); + cir::MemMoveOp createMemMove(mlir::Location loc, mlir::Value dst, + mlir::Value src, mlir::Value len) { + return create(loc, dst, src, len); } - mlir::cir::MemSetOp createMemSet(mlir::Location loc, mlir::Value dst, - mlir::Value val, mlir::Value len) { - val = createIntCast(val, mlir::cir::IntType::get(getContext(), 32, true)); - return create(loc, dst, val, len); + cir::MemSetOp createMemSet(mlir::Location loc, mlir::Value dst, + mlir::Value val, mlir::Value len) { + val = createIntCast(val, cir::IntType::get(getContext(), 32, true)); + return create(loc, dst, val, len); } mlir::Value createNeg(mlir::Value value) { - if (auto intTy = mlir::dyn_cast(value.getType())) { + if (auto intTy = mlir::dyn_cast(value.getType())) { // Source is a unsigned integer: first cast it to signed. if (intTy.isUnsigned()) value = createIntCast(value, getSIntNTy(intTy.getWidth())); - return create(value.getLoc(), value.getType(), - mlir::cir::UnaryOpKind::Minus, value); + return create(value.getLoc(), value.getType(), + cir::UnaryOpKind::Minus, value); } llvm_unreachable("negation for the given type is NYI"); @@ -652,8 +642,8 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { if (getIsFPConstrained()) llvm_unreachable("constrainedfp NYI"); - return create(v.getLoc(), destType, - mlir::cir::CastKind::floating, v); + return create(v.getLoc(), destType, cir::CastKind::floating, + v); } mlir::Value createFSub(mlir::Value lhs, mlir::Value rhs) { @@ -662,8 +652,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { llvm_unreachable("Constrained FP NYI"); assert(!cir::MissingFeatures::foldBinOpFMF()); - return create(lhs.getLoc(), mlir::cir::BinOpKind::Sub, - lhs, rhs); + return create(lhs.getLoc(), cir::BinOpKind::Sub, lhs, rhs); } mlir::Value createFAdd(mlir::Value lhs, mlir::Value rhs) { @@ -672,8 +661,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { llvm_unreachable("Constrained FP NYI"); assert(!cir::MissingFeatures::foldBinOpFMF()); - return create(lhs.getLoc(), mlir::cir::BinOpKind::Add, - lhs, rhs); + return create(lhs.getLoc(), cir::BinOpKind::Add, lhs, rhs); } mlir::Value createFMul(mlir::Value lhs, mlir::Value rhs) { assert(!cir::MissingFeatures::metaDataNode()); @@ -681,17 +669,16 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { llvm_unreachable("Constrained FP NYI"); assert(!cir::MissingFeatures::foldBinOpFMF()); - return create(lhs.getLoc(), mlir::cir::BinOpKind::Mul, - lhs, rhs); + return create(lhs.getLoc(), cir::BinOpKind::Mul, lhs, rhs); } mlir::Value createDynCast(mlir::Location loc, mlir::Value src, - mlir::cir::PointerType destType, bool isRefCast, - mlir::cir::DynamicCastInfoAttr info) { - auto castKind = isRefCast ? mlir::cir::DynamicCastKind::ref - : mlir::cir::DynamicCastKind::ptr; - return create(loc, destType, castKind, src, info, - /*relative_layout=*/false); + cir::PointerType destType, bool isRefCast, + cir::DynamicCastInfoAttr info) { + auto castKind = + isRefCast ? cir::DynamicCastKind::ref : cir::DynamicCastKind::ptr; + return create(loc, destType, castKind, src, info, + /*relative_layout=*/false); } mlir::Value createDynCastToVoid(mlir::Location loc, mlir::Value src, @@ -699,9 +686,9 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { // TODO(cir): consider address space here. assert(!cir::MissingFeatures::addressSpace()); auto destTy = getVoidPtrTy(); - return create( - loc, destTy, mlir::cir::DynamicCastKind::ptr, src, - mlir::cir::DynamicCastInfoAttr{}, vtableUseRelativeLayout); + return create(loc, destTy, cir::DynamicCastKind::ptr, + src, cir::DynamicCastInfoAttr{}, + vtableUseRelativeLayout); } Address createBaseClassAddr(mlir::Location loc, Address addr, @@ -711,7 +698,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return addr; auto ptrTy = getPointerTo(destType); - auto baseAddr = create( + auto baseAddr = create( loc, ptrTy, addr.getPointer(), mlir::APInt(64, offset), assumeNotNull); return Address(baseAddr, ptrTy, addr.getAlignment()); } @@ -723,21 +710,20 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return addr; auto ptrTy = getPointerTo(destType); - auto derivedAddr = create( + auto derivedAddr = create( loc, ptrTy, addr.getPointer(), mlir::APInt(64, offset), assumeNotNull); return Address(derivedAddr, ptrTy, addr.getAlignment()); } mlir::Value createVTTAddrPoint(mlir::Location loc, mlir::Type retTy, mlir::Value addr, uint64_t offset) { - return create( - loc, retTy, mlir::FlatSymbolRefAttr{}, addr, offset); + return create(loc, retTy, mlir::FlatSymbolRefAttr{}, + addr, offset); } mlir::Value createVTTAddrPoint(mlir::Location loc, mlir::Type retTy, mlir::FlatSymbolRefAttr sym, uint64_t offset) { - return create(loc, retTy, sym, mlir::Value{}, - offset); + return create(loc, retTy, sym, mlir::Value{}, offset); } // FIXME(cir): CIRGenBuilder class should have an attribute with a reference @@ -745,25 +731,23 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { // FIXME(cir): Track a list of globals, or at least the last one inserted, so // that we can insert globals in the same order they are defined by CIRGen. - [[nodiscard]] mlir::cir::GlobalOp + [[nodiscard]] cir::GlobalOp createGlobal(mlir::ModuleOp module, mlir::Location loc, mlir::StringRef name, - mlir::Type type, bool isConst, - mlir::cir::GlobalLinkageKind linkage, - mlir::cir::AddressSpaceAttr addrSpace = {}) { + mlir::Type type, bool isConst, cir::GlobalLinkageKind linkage, + cir::AddressSpaceAttr addrSpace = {}) { mlir::OpBuilder::InsertionGuard guard(*this); setInsertionPointToStart(module.getBody()); - return create(loc, name, type, isConst, linkage, - addrSpace); + return create(loc, name, type, isConst, linkage, addrSpace); } /// Creates a versioned global variable. If the symbol is already taken, an ID /// will be appended to the symbol. The returned global must always be queried /// for its name so it can be referenced correctly. - [[nodiscard]] mlir::cir::GlobalOp + [[nodiscard]] cir::GlobalOp createVersionedGlobal(mlir::ModuleOp module, mlir::Location loc, mlir::StringRef name, mlir::Type type, bool isConst, - mlir::cir::GlobalLinkageKind linkage, - mlir::cir::AddressSpaceAttr addrSpace = {}) { + cir::GlobalLinkageKind linkage, + cir::AddressSpaceAttr addrSpace = {}) { // Create a unique name if the given name is already taken. std::string uniqueName; if (unsigned version = GlobalsVersioning[name.str()]++) @@ -780,9 +764,9 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { const CIRGenBitFieldInfo &info, bool isLvalueVolatile, bool useVolatile) { auto offset = useVolatile ? info.VolatileOffset : info.Offset; - return create(loc, resultType, addr, storageType, - info.Name, info.Size, offset, - info.IsSigned, isLvalueVolatile); + return create(loc, resultType, addr, storageType, + info.Name, info.Size, offset, + info.IsSigned, isLvalueVolatile); } mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType, @@ -790,25 +774,24 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { mlir::Value src, const CIRGenBitFieldInfo &info, bool isLvalueVolatile, bool useVolatile) { auto offset = useVolatile ? info.VolatileOffset : info.Offset; - return create( - loc, resultType, dstAddr, storageType, src, info.Name, info.Size, - offset, info.IsSigned, isLvalueVolatile); + return create(loc, resultType, dstAddr, storageType, + src, info.Name, info.Size, offset, + info.IsSigned, isLvalueVolatile); } /// Create a pointer to a record member. mlir::Value createGetMember(mlir::Location loc, mlir::Type result, mlir::Value base, llvm::StringRef name, unsigned index) { - return create(loc, result, base, name, index); + return create(loc, result, base, name, index); } /// Create a cir.complex.real_ptr operation that derives a pointer to the real /// part of the complex value pointed to by the specified pointer value. mlir::Value createRealPtr(mlir::Location loc, mlir::Value value) { - auto srcPtrTy = mlir::cast(value.getType()); - auto srcComplexTy = - mlir::cast(srcPtrTy.getPointee()); - return create( + auto srcPtrTy = mlir::cast(value.getType()); + auto srcComplexTy = mlir::cast(srcPtrTy.getPointee()); + return create( loc, getPointerTo(srcComplexTy.getElementTy()), value); } @@ -820,10 +803,9 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { /// imaginary part of the complex value pointed to by the specified pointer /// value. mlir::Value createImagPtr(mlir::Location loc, mlir::Value value) { - auto srcPtrTy = mlir::cast(value.getType()); - auto srcComplexTy = - mlir::cast(srcPtrTy.getPointee()); - return create( + auto srcPtrTy = mlir::cast(value.getType()); + auto srcComplexTy = mlir::cast(srcPtrTy.getPointee()); + return create( loc, getPointerTo(srcComplexTy.getElementTy()), value); } @@ -845,22 +827,21 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { mlir::Value createLoad(mlir::Location loc, Address addr, bool isVolatile = false) { - auto ptrTy = - mlir::dyn_cast(addr.getPointer().getType()); + auto ptrTy = mlir::dyn_cast(addr.getPointer().getType()); if (addr.getElementType() != ptrTy.getPointee()) addr = addr.withPointer( createPtrBitcast(addr.getPointer(), addr.getElementType())); - return create( + return create( loc, addr.getElementType(), addr.getPointer(), /*isDeref=*/false, /*is_volatile=*/isVolatile, /*alignment=*/mlir::IntegerAttr{}, - /*mem_order=*/mlir::cir::MemOrderAttr{}); + /*mem_order=*/cir::MemOrderAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value ptr, llvm::MaybeAlign align, bool isVolatile) { - if (ty != mlir::cast(ptr.getType()).getPointee()) + if (ty != mlir::cast(ptr.getType()).getPointee()) ptr = createPtrBitcast(ptr, ty); uint64_t alignment = align ? align->value() : 0; return CIRBaseBuilderTy::createLoad(loc, ptr, isVolatile, alignment); @@ -879,53 +860,51 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return createAlignedLoad(loc, ty, addr, align.getAsAlign()); } - mlir::cir::StoreOp createStore(mlir::Location loc, mlir::Value val, - Address dst, bool _volatile = false, - ::mlir::IntegerAttr align = {}, - ::mlir::cir::MemOrderAttr order = {}) { + cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, + bool _volatile = false, + ::mlir::IntegerAttr align = {}, + cir::MemOrderAttr order = {}) { return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), _volatile, align, order); } - mlir::cir::StoreOp createFlagStore(mlir::Location loc, bool val, - mlir::Value dst) { + cir::StoreOp createFlagStore(mlir::Location loc, bool val, mlir::Value dst) { auto flag = getBool(val, loc); return CIRBaseBuilderTy::createStore(loc, flag, dst); } - mlir::cir::VecShuffleOp + cir::VecShuffleOp createVecShuffle(mlir::Location loc, mlir::Value vec1, mlir::Value vec2, llvm::ArrayRef maskAttrs) { - auto vecType = mlir::cast(vec1.getType()); - auto resultTy = mlir::cir::VectorType::get( - getContext(), vecType.getEltType(), maskAttrs.size()); - return CIRBaseBuilderTy::create( + auto vecType = mlir::cast(vec1.getType()); + auto resultTy = cir::VectorType::get(getContext(), vecType.getEltType(), + maskAttrs.size()); + return CIRBaseBuilderTy::create( loc, resultTy, vec1, vec2, getArrayAttr(maskAttrs)); } - mlir::cir::VecShuffleOp createVecShuffle(mlir::Location loc, mlir::Value vec1, - mlir::Value vec2, - llvm::ArrayRef mask) { + cir::VecShuffleOp createVecShuffle(mlir::Location loc, mlir::Value vec1, + mlir::Value vec2, + llvm::ArrayRef mask) { llvm::SmallVector maskAttrs; for (int32_t idx : mask) { - maskAttrs.push_back(mlir::cir::IntAttr::get(getSInt32Ty(), idx)); + maskAttrs.push_back(cir::IntAttr::get(getSInt32Ty(), idx)); } return createVecShuffle(loc, vec1, vec2, maskAttrs); } - mlir::cir::VecShuffleOp createVecShuffle(mlir::Location loc, mlir::Value vec1, - llvm::ArrayRef mask) { + cir::VecShuffleOp createVecShuffle(mlir::Location loc, mlir::Value vec1, + llvm::ArrayRef mask) { // FIXME(cir): Support use cir.vec.shuffle with single vec // Workaround: pass Vec as both vec1 and vec2 return createVecShuffle(loc, vec1, vec1, mask); } - mlir::cir::StoreOp + cir::StoreOp createAlignedStore(mlir::Location loc, mlir::Value val, mlir::Value dst, clang::CharUnits align = clang::CharUnits::One(), - bool _volatile = false, - ::mlir::cir::MemOrderAttr order = {}) { + bool _volatile = false, cir::MemOrderAttr order = {}) { llvm::MaybeAlign mayAlign = align.getAsAlign(); mlir::IntegerAttr alignAttr; if (mayAlign) { @@ -958,13 +937,13 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return {DivRet, ModRet}; }; - if (auto ArrayTy = mlir::dyn_cast(Ty)) { + if (auto ArrayTy = mlir::dyn_cast(Ty)) { int64_t EltSize = Layout.getTypeAllocSize(ArrayTy.getEltType()); SubType = ArrayTy.getEltType(); auto const [Index, NewOffset] = getIndexAndNewOffset(Offset, EltSize); Indices.push_back(Index); Offset = NewOffset; - } else if (auto StructTy = mlir::dyn_cast(Ty)) { + } else if (auto StructTy = mlir::dyn_cast(Ty)) { auto Elts = StructTy.getMembers(); int64_t Pos = 0; for (size_t I = 0; I < Elts.size(); ++I) { @@ -989,20 +968,19 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { computeGlobalViewIndicesFromFlatOffset(Offset, SubType, Layout, Indices); } - mlir::cir::StackSaveOp createStackSave(mlir::Location loc, mlir::Type ty) { - return create(loc, ty); + cir::StackSaveOp createStackSave(mlir::Location loc, mlir::Type ty) { + return create(loc, ty); } - mlir::cir::StackRestoreOp createStackRestore(mlir::Location loc, - mlir::Value v) { - return create(loc, v); + cir::StackRestoreOp createStackRestore(mlir::Location loc, mlir::Value v) { + return create(loc, v); } // TODO(cir): Change this to hoist alloca to the parent *scope* instead. /// Move alloca operation to the parent region. - void hoistAllocaToParentRegion(mlir::cir::AllocaOp alloca) { + void hoistAllocaToParentRegion(cir::AllocaOp alloca) { auto &block = alloca->getParentOp()->getParentRegion()->front(); - const auto allocas = block.getOps(); + const auto allocas = block.getOps(); if (allocas.empty()) { alloca->moveBefore(&block, block.begin()); } else { @@ -1010,20 +988,20 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { } } - mlir::cir::CmpThreeWayOp - createThreeWayCmpStrong(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, - const llvm::APSInt <Res, const llvm::APSInt &eqRes, - const llvm::APSInt >Res) { + cir::CmpThreeWayOp createThreeWayCmpStrong(mlir::Location loc, + mlir::Value lhs, mlir::Value rhs, + const llvm::APSInt <Res, + const llvm::APSInt &eqRes, + const llvm::APSInt >Res) { assert(ltRes.getBitWidth() == eqRes.getBitWidth() && ltRes.getBitWidth() == gtRes.getBitWidth() && "the three comparison results must have the same bit width"); auto cmpResultTy = getSIntNTy(ltRes.getBitWidth()); auto infoAttr = getCmpThreeWayInfoStrongOrdering(ltRes, eqRes, gtRes); - return create(loc, cmpResultTy, lhs, rhs, - infoAttr); + return create(loc, cmpResultTy, lhs, rhs, infoAttr); } - mlir::cir::CmpThreeWayOp + cir::CmpThreeWayOp createThreeWayCmpPartial(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, const llvm::APSInt <Res, const llvm::APSInt &eqRes, const llvm::APSInt >Res, @@ -1035,22 +1013,19 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { auto cmpResultTy = getSIntNTy(ltRes.getBitWidth()); auto infoAttr = getCmpThreeWayInfoPartialOrdering(ltRes, eqRes, gtRes, unorderedRes); - return create(loc, cmpResultTy, lhs, rhs, - infoAttr); + return create(loc, cmpResultTy, lhs, rhs, infoAttr); } - mlir::cir::GetRuntimeMemberOp createGetIndirectMember(mlir::Location loc, - mlir::Value objectPtr, - mlir::Value memberPtr) { - auto memberPtrTy = - mlir::cast(memberPtr.getType()); + cir::GetRuntimeMemberOp createGetIndirectMember(mlir::Location loc, + mlir::Value objectPtr, + mlir::Value memberPtr) { + auto memberPtrTy = mlir::cast(memberPtr.getType()); // TODO(cir): consider address space. assert(!cir::MissingFeatures::addressSpace()); auto resultTy = getPointerTo(memberPtrTy.getMemberTy()); - return create(loc, resultTy, objectPtr, - memberPtr); + return create(loc, resultTy, objectPtr, memberPtr); } /// Create a cir.ptr_stride operation to get access to an array element. diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 55ec6977410f..7575972eb6a2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -36,7 +36,7 @@ using namespace clang; using namespace clang::CIRGen; -using namespace mlir::cir; +using namespace cir; using namespace llvm; static RValue buildLibraryCall(CIRGenFunction &CGF, const FunctionDecl *FD, @@ -196,10 +196,10 @@ EncompassingIntegerType(ArrayRef Types) { /// Emit the conversions required to turn the given value into an /// integer of the given size. static mlir::Value buildToInt(CIRGenFunction &CGF, mlir::Value v, QualType t, - mlir::cir::IntType intType) { + cir::IntType intType) { v = CGF.buildToMemory(v, t); - if (isa(v.getType())) + if (isa(v.getType())) return CGF.getBuilder().createPtrToInt(v, intType); assert(v.getType() == intType); @@ -210,7 +210,7 @@ static mlir::Value buildFromInt(CIRGenFunction &CGF, mlir::Value v, QualType t, mlir::Type resultType) { v = CGF.buildFromMemory(v, t); - if (isa(resultType)) + if (isa(resultType)) return CGF.getBuilder().createIntToPtr(v, resultType); assert(v.getType() == resultType); @@ -221,7 +221,7 @@ static Address checkAtomicAlignment(CIRGenFunction &CGF, const CallExpr *E) { ASTContext &ctx = CGF.getContext(); Address ptr = CGF.buildPointerWithAlignment(E->getArg(0)); unsigned bytes = - isa(ptr.getElementType()) + isa(ptr.getElementType()) ? ctx.getTypeSizeInChars(ctx.VoidPtrTy).getQuantity() : CGF.CGM.getDataLayout().getTypeSizeInBits(ptr.getElementType()) / 8; unsigned align = ptr.getAlignment().getQuantity(); @@ -236,11 +236,9 @@ static Address checkAtomicAlignment(CIRGenFunction &CGF, const CallExpr *E) { /// Utility to insert an atomic instruction based on Intrinsic::ID /// and the expression node. -static mlir::Value -makeBinaryAtomicValue(CIRGenFunction &cgf, mlir::cir::AtomicFetchKind kind, - const CallExpr *expr, - mlir::cir::MemOrder ordering = - mlir::cir::MemOrder::SequentiallyConsistent) { +static mlir::Value makeBinaryAtomicValue( + CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr, + cir::MemOrder ordering = cir::MemOrder::SequentiallyConsistent) { QualType typ = expr->getType(); @@ -260,15 +258,14 @@ makeBinaryAtomicValue(CIRGenFunction &cgf, mlir::cir::AtomicFetchKind kind, mlir::Type valueType = val.getType(); val = buildToInt(cgf, val, typ, intType); - auto rmwi = builder.create( + auto rmwi = builder.create( cgf.getLoc(expr->getSourceRange()), destAddr.emitRawPointer(), val, kind, ordering, false, /* is volatile */ true); /* fetch first */ return buildFromInt(cgf, rmwi->getResult(0), typ, valueType); } -static RValue buildBinaryAtomic(CIRGenFunction &CGF, - mlir::cir::AtomicFetchKind kind, +static RValue buildBinaryAtomic(CIRGenFunction &CGF, cir::AtomicFetchKind kind, const CallExpr *E) { return RValue::get(makeBinaryAtomicValue(CGF, kind, E)); } @@ -289,11 +286,11 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, auto newVal = buildToInt(cgf, cgf.buildScalarExpr(expr->getArg(2)), typ, intType); - auto op = builder.create( + auto op = builder.create( cgf.getLoc(expr->getSourceRange()), cmpVal.getType(), builder.getBoolTy(), destAddr.getPointer(), cmpVal, newVal, - mlir::cir::MemOrder::SequentiallyConsistent, - mlir::cir::MemOrder::SequentiallyConsistent); + cir::MemOrder::SequentiallyConsistent, + cir::MemOrder::SequentiallyConsistent); return returnBool ? op.getResult(1) : op.getResult(0); } @@ -306,8 +303,8 @@ RValue CIRGenFunction::buildRotate(const CallExpr *E, bool IsRotateRight) { // result, but the CIR ops uses the same type for all values. auto ty = src.getType(); shiftAmt = builder.createIntCast(shiftAmt, ty); - auto r = builder.create(getLoc(E->getSourceRange()), src, - shiftAmt); + auto r = + builder.create(getLoc(E->getSourceRange()), src, shiftAmt); if (!IsRotateRight) r->setAttr("left", mlir::UnitAttr::get(src.getContext())); return RValue::get(r); @@ -487,7 +484,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_ceilf16: case Builtin::BI__builtin_ceill: case Builtin::BI__builtin_ceilf128: - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIcopysign: case Builtin::BIcopysignf: @@ -495,7 +492,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_copysign: case Builtin::BI__builtin_copysignf: case Builtin::BI__builtin_copysignl: - return buildBinaryFPBuiltin(*this, *E); + return buildBinaryFPBuiltin(*this, *E); case Builtin::BI__builtin_copysignf16: case Builtin::BI__builtin_copysignf128: @@ -510,7 +507,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_cosl: case Builtin::BI__builtin_cosf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIcosh: case Builtin::BIcoshf: @@ -531,7 +528,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_expl: case Builtin::BI__builtin_expf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIexp2: case Builtin::BIexp2f: @@ -542,7 +539,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_exp2l: case Builtin::BI__builtin_exp2f128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BI__builtin_exp10: case Builtin::BI__builtin_exp10f: @@ -559,7 +556,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fabsf16: case Builtin::BI__builtin_fabsl: case Builtin::BI__builtin_fabsf128: - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIfloor: case Builtin::BIfloorf: @@ -569,7 +566,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_floorf16: case Builtin::BI__builtin_floorl: case Builtin::BI__builtin_floorf128: - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIfma: case Builtin::BIfmaf: @@ -588,7 +585,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmaxf: case Builtin::BI__builtin_fmaxl: return RValue::get( - buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); case Builtin::BI__builtin_fmaxf16: case Builtin::BI__builtin_fmaxf128: @@ -601,7 +598,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fminf: case Builtin::BI__builtin_fminl: return RValue::get( - buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); case Builtin::BI__builtin_fminf16: case Builtin::BI__builtin_fminf128: @@ -616,7 +613,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmodf: case Builtin::BI__builtin_fmodl: assert(!cir::MissingFeatures::fastMathFlags()); - return buildBinaryFPBuiltin(*this, *E); + return buildBinaryFPBuiltin(*this, *E); case Builtin::BI__builtin_fmodf16: case Builtin::BI__builtin_fmodf128: @@ -632,7 +629,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_logl: case Builtin::BI__builtin_logf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlog10: case Builtin::BIlog10f: @@ -643,7 +640,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log10l: case Builtin::BI__builtin_log10f128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlog2: case Builtin::BIlog2f: @@ -654,7 +651,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log2l: case Builtin::BI__builtin_log2f128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BInearbyint: case Builtin::BInearbyintf: @@ -663,7 +660,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_nearbyintf: case Builtin::BI__builtin_nearbyintl: case Builtin::BI__builtin_nearbyintf128: - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIpow: case Builtin::BIpowf: @@ -673,7 +670,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_powl: assert(!cir::MissingFeatures::fastMathFlags()); return RValue::get( - buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); case Builtin::BI__builtin_powf16: case Builtin::BI__builtin_powf128: @@ -687,7 +684,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_rintf16: case Builtin::BI__builtin_rintl: case Builtin::BI__builtin_rintf128: - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIround: case Builtin::BIroundf: @@ -697,7 +694,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_roundf16: case Builtin::BI__builtin_roundl: case Builtin::BI__builtin_roundf128: - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIroundeven: case Builtin::BIroundevenf: @@ -718,7 +715,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sinl: case Builtin::BI__builtin_sinf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIsqrt: case Builtin::BIsqrtf: @@ -729,7 +726,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sqrtl: case Builtin::BI__builtin_sqrtf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BI__builtin_elementwise_sqrt: llvm_unreachable("BI__builtin_elementwise_sqrt NYI"); @@ -762,7 +759,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_truncf16: case Builtin::BI__builtin_truncl: case Builtin::BI__builtin_truncf128: - return buildUnaryFPBuiltin(*this, *E); + return buildUnaryFPBuiltin(*this, *E); case Builtin::BIlround: case Builtin::BIlroundf: @@ -770,8 +767,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_lround: case Builtin::BI__builtin_lroundf: case Builtin::BI__builtin_lroundl: - return buildUnaryMaybeConstrainedFPToIntBuiltin( - *this, *E); + return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); case Builtin::BI__builtin_lroundf128: llvm_unreachable("BI__builtin_lroundf128 NYI"); @@ -782,8 +778,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_llround: case Builtin::BI__builtin_llroundf: case Builtin::BI__builtin_llroundl: - return buildUnaryMaybeConstrainedFPToIntBuiltin( - *this, *E); + return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, + *E); case Builtin::BI__builtin_llroundf128: llvm_unreachable("BI__builtin_llroundf128 NYI"); @@ -794,8 +790,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_lrint: case Builtin::BI__builtin_lrintf: case Builtin::BI__builtin_lrintl: - return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, - *E); + return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); case Builtin::BI__builtin_lrintf128: llvm_unreachable("BI__builtin_lrintf128 NYI"); @@ -806,8 +801,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_llrint: case Builtin::BI__builtin_llrintf: case Builtin::BI__builtin_llrintl: - return buildUnaryMaybeConstrainedFPToIntBuiltin( - *this, *E); + return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); case Builtin::BI__builtin_llrintf128: llvm_unreachable("BI__builtin_llrintf128 NYI"); @@ -846,7 +840,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_va_copy: { auto dstPtr = buildVAListRef(E->getArg(0)).getPointer(); auto srcPtr = buildVAListRef(E->getArg(1)).getPointer(); - builder.create(dstPtr.getLoc(), dstPtr, srcPtr); + builder.create(dstPtr.getLoc(), dstPtr, srcPtr); return {}; } @@ -861,15 +855,15 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, mlir::Value Result; switch (getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: { - auto Call = getBuilder().create( - getLoc(E->getExprLoc()), Arg.getType(), Arg, false); + auto Call = getBuilder().create(getLoc(E->getExprLoc()), + Arg.getType(), Arg, false); Result = Call->getResult(0); break; } case LangOptions::SOB_Undefined: { if (!SanitizeOverflow) { - auto Call = getBuilder().create( - getLoc(E->getExprLoc()), Arg.getType(), Arg, true); + auto Call = getBuilder().create(getLoc(E->getExprLoc()), + Arg.getType(), Arg, true); Result = Call->getResult(0); break; } @@ -897,8 +891,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIconjf: case Builtin::BIconjl: { mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); - mlir::Value Conj = builder.createUnaryOp( - getLoc(E->getExprLoc()), mlir::cir::UnaryOpKind::Not, ComplexVal); + mlir::Value Conj = builder.createUnaryOp(getLoc(E->getExprLoc()), + cir::UnaryOpKind::Not, ComplexVal); return RValue::getComplex(Conj); } @@ -932,31 +926,31 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_clrsb: case Builtin::BI__builtin_clrsbl: case Builtin::BI__builtin_clrsbll: - return buildBuiltinBitOp(*this, E, std::nullopt); + return buildBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__builtin_ctzs: case Builtin::BI__builtin_ctz: case Builtin::BI__builtin_ctzl: case Builtin::BI__builtin_ctzll: case Builtin::BI__builtin_ctzg: - return buildBuiltinBitOp(*this, E, BCK_CTZPassedZero); + return buildBuiltinBitOp(*this, E, BCK_CTZPassedZero); case Builtin::BI__builtin_clzs: case Builtin::BI__builtin_clz: case Builtin::BI__builtin_clzl: case Builtin::BI__builtin_clzll: case Builtin::BI__builtin_clzg: - return buildBuiltinBitOp(*this, E, BCK_CLZPassedZero); + return buildBuiltinBitOp(*this, E, BCK_CLZPassedZero); case Builtin::BI__builtin_ffs: case Builtin::BI__builtin_ffsl: case Builtin::BI__builtin_ffsll: - return buildBuiltinBitOp(*this, E, std::nullopt); + return buildBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__builtin_parity: case Builtin::BI__builtin_parityl: case Builtin::BI__builtin_parityll: - return buildBuiltinBitOp(*this, E, std::nullopt); + return buildBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__lzcnt16: case Builtin::BI__lzcnt: @@ -970,7 +964,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_popcountl: case Builtin::BI__builtin_popcountll: case Builtin::BI__builtin_popcountg: - return buildBuiltinBitOp(*this, E, std::nullopt); + return buildBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__builtin_unpredictable: { if (CGM.getCodeGenOpts().OptimizationLevel != 0) @@ -1004,9 +998,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, mlir::Float64Type::get(&getMLIRContext()), Probability); } - auto result = builder.create( - getLoc(E->getSourceRange()), ArgValue.getType(), ArgValue, - ExpectedValue, ProbAttr); + auto result = builder.create(getLoc(E->getSourceRange()), + ArgValue.getType(), ArgValue, + ExpectedValue, ProbAttr); return RValue::get(result); } @@ -1019,7 +1013,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, mlir::Attribute alignmentAttr = ConstantEmitter(*this).emitAbstract( E->getArg(1), E->getArg(1)->getType()); - std::int64_t alignment = cast(alignmentAttr).getSInt(); + std::int64_t alignment = cast(alignmentAttr).getSInt(); ptrValue = buildAlignmentAssumption(ptrValue, ptr, ptr->getExprLoc(), builder.getI64IntegerAttr(alignment), @@ -1033,7 +1027,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(nullptr); mlir::Value argValue = buildScalarExpr(E->getArg(0)); - builder.create(getLoc(E->getExprLoc()), argValue); + builder.create(getLoc(E->getExprLoc()), argValue); return RValue::get(nullptr); } @@ -1044,8 +1038,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, mlir::Value value0 = buildScalarExpr(arg0); mlir::Value value1 = buildScalarExpr(arg1); - builder.create(getLoc(E->getExprLoc()), - value0, value1); + builder.create(getLoc(E->getExprLoc()), value0, + value1); return RValue::get(nullptr); } @@ -1062,8 +1056,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI_byteswap_ulong: case Builtin::BI_byteswap_uint64: { auto arg = buildScalarExpr(E->getArg(0)); - return RValue::get(builder.create( - getLoc(E->getSourceRange()), arg)); + return RValue::get( + builder.create(getLoc(E->getSourceRange()), arg)); } case Builtin::BI__builtin_bitreverse8: @@ -1107,14 +1101,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // inlining. return RValue::get( builder.getConstInt(getLoc(E->getSourceRange()), - mlir::cast(ResultType), 0)); + mlir::cast(ResultType), 0)); if (Arg->HasSideEffects(getContext())) // The argument is unevaluated, so be conservative if it might have // side-effects. return RValue::get( builder.getConstInt(getLoc(E->getSourceRange()), - mlir::cast(ResultType), 0)); + mlir::cast(ResultType), 0)); mlir::Value ArgValue = buildScalarExpr(Arg); if (ArgType->isObjCObjectPointerType()) @@ -1123,7 +1117,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, ArgType = CGM.getASTContext().getObjCIdType(); ArgValue = builder.createBitcast(ArgValue, ConvertType(ArgType)); - mlir::Value Result = builder.create( + mlir::Value Result = builder.create( getLoc(E->getSourceRange()), ArgValue); if (Result.getType() != ResultType) Result = builder.createBoolToInt(Result, ResultType); @@ -1137,8 +1131,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_object_size: { unsigned Type = E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); - auto ResType = - mlir::dyn_cast(ConvertType(E->getType())); + auto ResType = mlir::dyn_cast(ConvertType(E->getType())); assert(ResType && "not sure what to do?"); // We pass this builtin onto the optimizer so that it can figure out the @@ -1166,8 +1159,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Locality = evaluateOperandAsInt(E->getArg(2)); mlir::Value Address = buildScalarExpr(E->getArg(0)); - builder.create(getLoc(E->getSourceRange()), Address, - Locality, IsWrite); + builder.create(getLoc(E->getSourceRange()), Address, + Locality, IsWrite); return RValue::get(nullptr); } case Builtin::BI__builtin_readcyclecounter: @@ -1176,17 +1169,16 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_readsteadycounter NYI"); case Builtin::BI__builtin___clear_cache: { - mlir::Type voidTy = mlir::cir::VoidType::get(&getMLIRContext()); + mlir::Type voidTy = cir::VoidType::get(&getMLIRContext()); mlir::Value begin = builder.createPtrBitcast(buildScalarExpr(E->getArg(0)), voidTy); mlir::Value end = builder.createPtrBitcast(buildScalarExpr(E->getArg(1)), voidTy); - builder.create(getLoc(E->getSourceRange()), begin, - end); + builder.create(getLoc(E->getSourceRange()), begin, end); return RValue::get(nullptr); } case Builtin::BI__builtin_trap: { - builder.create(getLoc(E->getExprLoc())); + builder.create(getLoc(E->getExprLoc())); // Note that cir.trap is a terminator so we need to start a new block to // preserve the insertion point. @@ -1514,9 +1506,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, mlir::Location loc = getLoc(E->getExprLoc()); mlir::Attribute levelAttr = ConstantEmitter(*this).emitAbstract( E->getArg(0), E->getArg(0)->getType()); - int64_t level = mlir::cast(levelAttr).getSInt(); - return RValue::get(builder.create( - loc, builder.getUInt32(level, loc))); + int64_t level = mlir::cast(levelAttr).getSInt(); + return RValue::get( + builder.create(loc, builder.getUInt32(level, loc))); } case Builtin::BI_ReturnAddress: llvm_unreachable("BI_ReturnAddress NYI"); @@ -1567,7 +1559,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_fetch_and_add_4: case Builtin::BI__sync_fetch_and_add_8: case Builtin::BI__sync_fetch_and_add_16: { - return buildBinaryAtomic(*this, mlir::cir::AtomicFetchKind::Add, E); + return buildBinaryAtomic(*this, cir::AtomicFetchKind::Add, E); } case Builtin::BI__sync_fetch_and_sub_1: @@ -1575,7 +1567,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_fetch_and_sub_4: case Builtin::BI__sync_fetch_and_sub_8: case Builtin::BI__sync_fetch_and_sub_16: { - return buildBinaryAtomic(*this, mlir::cir::AtomicFetchKind::Sub, E); + return buildBinaryAtomic(*this, cir::AtomicFetchKind::Sub, E); } case Builtin::BI__sync_fetch_and_or_1: @@ -1757,10 +1749,10 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, WidthAndSignedness EncompassingInfo = EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo}); - auto EncompassingCIRTy = mlir::cir::IntType::get( + auto EncompassingCIRTy = cir::IntType::get( &getMLIRContext(), EncompassingInfo.Width, EncompassingInfo.Signed); auto ResultCIRTy = - mlir::cast(CGM.getTypes().ConvertType(ResultQTy)); + mlir::cast(CGM.getTypes().ConvertType(ResultQTy)); mlir::Value Left = buildScalarExpr(LeftArg); mlir::Value Right = buildScalarExpr(RightArg); @@ -1768,25 +1760,25 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // Extend each operand to the encompassing type, if necessary. if (Left.getType() != EncompassingCIRTy) - Left = builder.createCast(mlir::cir::CastKind::integral, Left, - EncompassingCIRTy); + Left = + builder.createCast(cir::CastKind::integral, Left, EncompassingCIRTy); if (Right.getType() != EncompassingCIRTy) - Right = builder.createCast(mlir::cir::CastKind::integral, Right, - EncompassingCIRTy); + Right = + builder.createCast(cir::CastKind::integral, Right, EncompassingCIRTy); // Perform the operation on the extended values. - mlir::cir::BinOpOverflowKind OpKind; + cir::BinOpOverflowKind OpKind; switch (BuiltinID) { default: llvm_unreachable("Unknown overflow builtin id."); case Builtin::BI__builtin_add_overflow: - OpKind = mlir::cir::BinOpOverflowKind::Add; + OpKind = cir::BinOpOverflowKind::Add; break; case Builtin::BI__builtin_sub_overflow: - OpKind = mlir::cir::BinOpOverflowKind::Sub; + OpKind = cir::BinOpOverflowKind::Sub; break; case Builtin::BI__builtin_mul_overflow: - OpKind = mlir::cir::BinOpOverflowKind::Mul; + OpKind = cir::BinOpOverflowKind::Mul; break; } @@ -1838,7 +1830,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Address ResultPtr = buildPointerWithAlignment(ResultArg); // Decide which of the arithmetic operation we are lowering to: - mlir::cir::BinOpOverflowKind ArithKind; + cir::BinOpOverflowKind ArithKind; switch (BuiltinID) { default: llvm_unreachable("Unknown overflow builtin id."); @@ -1848,7 +1840,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sadd_overflow: case Builtin::BI__builtin_saddl_overflow: case Builtin::BI__builtin_saddll_overflow: - ArithKind = mlir::cir::BinOpOverflowKind::Add; + ArithKind = cir::BinOpOverflowKind::Add; break; case Builtin::BI__builtin_usub_overflow: case Builtin::BI__builtin_usubl_overflow: @@ -1856,7 +1848,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_ssub_overflow: case Builtin::BI__builtin_ssubl_overflow: case Builtin::BI__builtin_ssubll_overflow: - ArithKind = mlir::cir::BinOpOverflowKind::Sub; + ArithKind = cir::BinOpOverflowKind::Sub; break; case Builtin::BI__builtin_umul_overflow: case Builtin::BI__builtin_umull_overflow: @@ -1864,14 +1856,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_smul_overflow: case Builtin::BI__builtin_smull_overflow: case Builtin::BI__builtin_smulll_overflow: - ArithKind = mlir::cir::BinOpOverflowKind::Mul; + ArithKind = cir::BinOpOverflowKind::Mul; break; } clang::QualType ResultQTy = ResultArg->getType()->castAs()->getPointeeType(); auto ResultCIRTy = - mlir::cast(CGM.getTypes().ConvertType(ResultQTy)); + mlir::cast(CGM.getTypes().ConvertType(ResultQTy)); auto Loc = getLoc(E->getSourceRange()); auto ArithResult = @@ -2310,7 +2302,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, if (auto V = buildTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { switch (EvalKind) { case cir::TEK_Scalar: - if (mlir::isa(V.getType())) + if (mlir::isa(V.getType())) return RValue::get(nullptr); return RValue::get(V); case cir::TEK_Aggregate: @@ -2415,9 +2407,9 @@ void CIRGenFunction::buildVAStartEnd(mlir::Value ArgValue, bool IsStart) { // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this // early, defer to LLVM lowering. if (IsStart) - builder.create(ArgValue.getLoc(), ArgValue); + builder.create(ArgValue.getLoc(), ArgValue); else - builder.create(ArgValue.getLoc(), ArgValue); + builder.create(ArgValue.getLoc(), ArgValue); } /// Checks if using the result of __builtin_object_size(p, @p From) in place of @@ -2439,7 +2431,7 @@ static bool areBOSTypesCompatible(int From, int To) { /// and we wouldn't otherwise try to reference a pass_object_size parameter, /// we'll call `cir.objsize` on EmittedE, rather than emitting E. mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, - mlir::cir::IntType ResType, + cir::IntType ResType, mlir::Value EmittedE, bool IsDynamic) { // We need to reference an argument if the pointer is a parameter with the @@ -2468,23 +2460,22 @@ mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, llvm_unreachable("NYI"); auto Ptr = EmittedE ? EmittedE : buildScalarExpr(E); - assert(mlir::isa(Ptr.getType()) && + assert(mlir::isa(Ptr.getType()) && "Non-pointer passed to __builtin_object_size?"); // LLVM intrinsics (which CIR lowers to at some point, only supports 0 // and 2, account for that right now. - mlir::cir::SizeInfoType sizeInfoTy = ((Type & 2) != 0) - ? mlir::cir::SizeInfoType::min - : mlir::cir::SizeInfoType::max; + cir::SizeInfoType sizeInfoTy = + ((Type & 2) != 0) ? cir::SizeInfoType::min : cir::SizeInfoType::max; // TODO(cir): Heads up for LLVM lowering, For GCC compatibility, // __builtin_object_size treat NULL as unknown size. - return builder.create( - getLoc(E->getSourceRange()), ResType, Ptr, sizeInfoTy, IsDynamic); + return builder.create(getLoc(E->getSourceRange()), ResType, + Ptr, sizeInfoTy, IsDynamic); } mlir::Value CIRGenFunction::evaluateOrEmitBuiltinObjectSize( - const Expr *E, unsigned Type, mlir::cir::IntType ResType, - mlir::Value EmittedE, bool IsDynamic) { + const Expr *E, unsigned Type, cir::IntType ResType, mlir::Value EmittedE, + bool IsDynamic) { uint64_t ObjectSize; if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic); @@ -2493,8 +2484,8 @@ mlir::Value CIRGenFunction::evaluateOrEmitBuiltinObjectSize( /// Given a builtin id for a function like "__builtin_fabsf", return a Function* /// for "fabsf". -mlir::cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *FD, - unsigned BuiltinID) { +cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *FD, + unsigned BuiltinID) { assert(astCtx.BuiltinInfo.isLibFunction(BuiltinID)); // Get the name, skip over the __builtin_ prefix (if necessary). diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 11fa4e2f94a4..02f6f4673002 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -38,7 +38,7 @@ using namespace clang; using namespace clang::CIRGen; -using namespace mlir::cir; +using namespace cir; using namespace llvm; enum { @@ -1899,25 +1899,24 @@ findARMVectorIntrinsicInMap(ArrayRef IntrinsicMap, return nullptr; } -static mlir::cir::VectorType GetNeonType(CIRGenFunction *CGF, - NeonTypeFlags TypeFlags, - bool HasLegalHalfType = true, - bool V1Ty = false, - bool AllowBFloatArgsAndRet = true) { +static cir::VectorType GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags, + bool HasLegalHalfType = true, + bool V1Ty = false, + bool AllowBFloatArgsAndRet = true) { int IsQuad = TypeFlags.isQuad(); switch (TypeFlags.getEltType()) { case NeonTypeFlags::Int8: case NeonTypeFlags::Poly8: - return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), - TypeFlags.isUnsigned() ? CGF->UInt8Ty - : CGF->SInt8Ty, - V1Ty ? 1 : (8 << IsQuad)); + return cir::VectorType::get(CGF->getBuilder().getContext(), + TypeFlags.isUnsigned() ? CGF->UInt8Ty + : CGF->SInt8Ty, + V1Ty ? 1 : (8 << IsQuad)); case NeonTypeFlags::Int16: case NeonTypeFlags::Poly16: - return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), - TypeFlags.isUnsigned() ? CGF->UInt16Ty - : CGF->SInt16Ty, - V1Ty ? 1 : (4 << IsQuad)); + return cir::VectorType::get(CGF->getBuilder().getContext(), + TypeFlags.isUnsigned() ? CGF->UInt16Ty + : CGF->SInt16Ty, + V1Ty ? 1 : (4 << IsQuad)); case NeonTypeFlags::BFloat16: if (AllowBFloatArgsAndRet) llvm_unreachable("NYI"); @@ -1929,29 +1928,29 @@ static mlir::cir::VectorType GetNeonType(CIRGenFunction *CGF, else llvm_unreachable("NYI"); case NeonTypeFlags::Int32: - return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), - TypeFlags.isUnsigned() ? CGF->UInt32Ty - : CGF->SInt32Ty, - V1Ty ? 1 : (2 << IsQuad)); + return cir::VectorType::get(CGF->getBuilder().getContext(), + TypeFlags.isUnsigned() ? CGF->UInt32Ty + : CGF->SInt32Ty, + V1Ty ? 1 : (2 << IsQuad)); case NeonTypeFlags::Int64: case NeonTypeFlags::Poly64: - return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), - TypeFlags.isUnsigned() ? CGF->UInt64Ty - : CGF->SInt64Ty, - V1Ty ? 1 : (1 << IsQuad)); + return cir::VectorType::get(CGF->getBuilder().getContext(), + TypeFlags.isUnsigned() ? CGF->UInt64Ty + : CGF->SInt64Ty, + V1Ty ? 1 : (1 << IsQuad)); case NeonTypeFlags::Poly128: // FIXME: i128 and f128 doesn't get fully support in Clang and llvm. // There is a lot of i128 and f128 API missing. // so we use v16i8 to represent poly128 and get pattern matched. llvm_unreachable("NYI"); case NeonTypeFlags::Float32: - return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), - CGF->getCIRGenModule().FloatTy, - V1Ty ? 1 : (2 << IsQuad)); + return cir::VectorType::get(CGF->getBuilder().getContext(), + CGF->getCIRGenModule().FloatTy, + V1Ty ? 1 : (2 << IsQuad)); case NeonTypeFlags::Float64: - return mlir::cir::VectorType::get(CGF->getBuilder().getContext(), - CGF->getCIRGenModule().DoubleTy, - V1Ty ? 1 : (1 << IsQuad)); + return cir::VectorType::get(CGF->getBuilder().getContext(), + CGF->getCIRGenModule().DoubleTy, + V1Ty ? 1 : (1 << IsQuad)); } llvm_unreachable("Unknown vector element type!"); } @@ -2006,7 +2005,7 @@ static mlir::Value buildAArch64TblBuiltinExpr(CIRGenFunction &CGF, // Determine the type of this overloaded NEON intrinsic. NeonTypeFlags Type = Result->getZExtValue(); - mlir::cir::VectorType Ty = GetNeonType(&CGF, Type); + cir::VectorType Ty = GetNeonType(&CGF, Type); if (!Ty) return nullptr; @@ -2139,19 +2138,18 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, // which can be found under LLVM IR directory. mlir::Type funcResTy = builder.getSInt64Ty(); mlir::Location loc = cgf.getLoc(clangCallExpr->getExprLoc()); - mlir::cir::LLVMIntrinsicCallOp op = - builder.create( - loc, builder.getStringAttr(intrinsicName), funcResTy, loadAddr); + cir::LLVMIntrinsicCallOp op = builder.create( + loc, builder.getStringAttr(intrinsicName), funcResTy, loadAddr); mlir::Value res = op.getResult(); // Convert result type to the expected type. - if (mlir::isa(realResTy)) { + if (mlir::isa(realResTy)) { return builder.createIntToPtr(res, realResTy); } - mlir::cir::IntType intResTy = + cir::IntType intResTy = builder.getSIntNTy(cgf.CGM.getDataLayout().getTypeSizeInBits(realResTy)); mlir::Value intCastRes = builder.createIntCast(res, intResTy); - if (mlir::isa(realResTy)) { + if (mlir::isa(realResTy)) { return builder.createIntCast(intCastRes, realResTy); } else { // Above cases should cover most situations and we have test coverage. @@ -2162,30 +2160,28 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, /// Given a vector of int type `vecTy`, return a vector type of /// int type with the same element type width, different signedness, /// and the same vector size. -static mlir::cir::VectorType -getSignChangedVectorType(CIRGenBuilderTy &builder, - mlir::cir::VectorType vecTy) { - auto elemTy = mlir::cast(vecTy.getEltType()); +static cir::VectorType getSignChangedVectorType(CIRGenBuilderTy &builder, + cir::VectorType vecTy) { + auto elemTy = mlir::cast(vecTy.getEltType()); elemTy = elemTy.isSigned() ? builder.getUIntNTy(elemTy.getWidth()) : builder.getSIntNTy(elemTy.getWidth()); - return mlir::cir::VectorType::get(builder.getContext(), elemTy, - vecTy.getSize()); + return cir::VectorType::get(builder.getContext(), elemTy, vecTy.getSize()); } -static mlir::cir::VectorType +static cir::VectorType getHalfEltSizeTwiceNumElemsVecType(CIRGenBuilderTy &builder, - mlir::cir::VectorType vecTy) { - auto elemTy = mlir::cast(vecTy.getEltType()); + cir::VectorType vecTy) { + auto elemTy = mlir::cast(vecTy.getEltType()); elemTy = elemTy.isSigned() ? builder.getSIntNTy(elemTy.getWidth() / 2) : builder.getUIntNTy(elemTy.getWidth() / 2); - return mlir::cir::VectorType::get(builder.getContext(), elemTy, - vecTy.getSize() * 2); + return cir::VectorType::get(builder.getContext(), elemTy, + vecTy.getSize() * 2); } /// Get integer from a mlir::Value that is an int constant or a constant op. static int64_t getIntValueFromConstOp(mlir::Value val) { - auto constOp = mlir::cast(val.getDefiningOp()); - return (mlir::cast(constOp.getValue())) + auto constOp = mlir::cast(val.getDefiningOp()); + return (mlir::cast(constOp.getValue())) .getValue() .getSExtValue(); } @@ -2195,7 +2191,7 @@ static int64_t getIntValueFromConstOp(mlir::Value val) { /// a const vector of `vecTy` which is the return of this function static mlir::Value buildNeonShiftVector(CIRGenBuilderTy &builder, mlir::Value shiftVal, - mlir::cir::VectorType vecTy, + cir::VectorType vecTy, mlir::Location loc, bool neg) { int shiftAmt = getIntValueFromConstOp(shiftVal); if (neg) @@ -2203,29 +2199,29 @@ static mlir::Value buildNeonShiftVector(CIRGenBuilderTy &builder, llvm::SmallVector vecAttr{ vecTy.getSize(), // ConstVectorAttr requires cir::IntAttr - mlir::cir::IntAttr::get(vecTy.getEltType(), shiftAmt)}; - mlir::cir::ConstVectorAttr constVecAttr = mlir::cir::ConstVectorAttr::get( + cir::IntAttr::get(vecTy.getEltType(), shiftAmt)}; + cir::ConstVectorAttr constVecAttr = cir::ConstVectorAttr::get( vecTy, mlir::ArrayAttr::get(builder.getContext(), vecAttr)); - return builder.create(loc, vecTy, constVecAttr); + return builder.create(loc, vecTy, constVecAttr); } /// Build ShiftOp of vector type whose shift amount is a vector built /// from a constant integer using `buildNeonShiftVector` function static mlir::Value buildCommonNeonShift(CIRGenBuilderTy &builder, mlir::Location loc, - mlir::cir::VectorType resTy, + cir::VectorType resTy, mlir::Value shifTgt, mlir::Value shiftAmt, bool shiftLeft, bool negAmt = false) { shiftAmt = buildNeonShiftVector(builder, shiftAmt, resTy, loc, negAmt); - return builder.create( + return builder.create( loc, resTy, builder.createBitcast(shifTgt, resTy), shiftAmt, shiftLeft); } /// Right-shift a vector by a constant. static mlir::Value buildNeonRShiftImm(CIRGenFunction &cgf, mlir::Value shiftVec, mlir::Value shiftVal, - mlir::cir::VectorType vecTy, bool usgn, + cir::VectorType vecTy, bool usgn, mlir::Location loc) { CIRGenBuilderTy &builder = cgf.getBuilder(); int64_t shiftAmt = getIntValueFromConstOp(shiftVal); @@ -2266,9 +2262,9 @@ mlir::Value buildNeonCall(CIRGenBuilderTy &builder, assert(!cir::MissingFeatures::buildConstrainedFPCall()); } if (shift > 0 && shift == j) { - args[j] = buildNeonShiftVector( - builder, args[j], mlir::cast(argTypes[j]), loc, - rightshift); + args[j] = buildNeonShiftVector(builder, args[j], + mlir::cast(argTypes[j]), + loc, rightshift); } else { args[j] = builder.createBitcast(args[j], argTypes[j]); } @@ -2278,7 +2274,7 @@ mlir::Value buildNeonCall(CIRGenBuilderTy &builder, return nullptr; } return builder - .create( + .create( loc, builder.getStringAttr(intrinsicName), funcResTy, args) .getResult(); } @@ -2329,8 +2325,8 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( const bool allowBFloatArgsAndRet = getTargetHooks().getABIInfo().allowBFloatArgsAndRet(); - mlir::cir::VectorType vTy = GetNeonType(this, neonType, hasLegalHalfType, - false, allowBFloatArgsAndRet); + cir::VectorType vTy = GetNeonType(this, neonType, hasLegalHalfType, false, + allowBFloatArgsAndRet); mlir::Type ty = vTy; if (!ty) return nullptr; @@ -2345,16 +2341,16 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( default: break; case NEON::BI__builtin_neon_vmovl_v: { - mlir::cir::VectorType dTy = builder.getExtendedOrTruncatedElementVectorType( + cir::VectorType dTy = builder.getExtendedOrTruncatedElementVectorType( vTy, false /* truncate */, - mlir::cast(vTy.getEltType()).isSigned()); + mlir::cast(vTy.getEltType()).isSigned()); // This cast makes sure arg type conforms intrinsic expected arg type. ops[0] = builder.createBitcast(ops[0], dTy); return builder.createIntCast(ops[0], ty); } case NEON::BI__builtin_neon_vmovn_v: { - mlir::cir::VectorType qTy = builder.getExtendedOrTruncatedElementVectorType( - vTy, true, mlir::cast(vTy.getEltType()).isSigned()); + cir::VectorType qTy = builder.getExtendedOrTruncatedElementVectorType( + vTy, true, mlir::cast(vTy.getEltType()).isSigned()); ops[0] = builder.createBitcast(ops[0], qTy); // It really is truncation in this context. // In CIR, integral cast op supports vector of int type truncating. @@ -2363,8 +2359,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vpaddl_v: case NEON::BI__builtin_neon_vpaddlq_v: { // The source operand type has twice as many elements of half the size. - mlir::cir::VectorType narrowTy = - getHalfEltSizeTwiceNumElemsVecType(builder, vTy); + cir::VectorType narrowTy = getHalfEltSizeTwiceNumElemsVecType(builder, vTy); return buildNeonCall(builder, {narrowTy}, ops, isUnsigned ? "aarch64.neon.uaddlp" : "aarch64.neon.saddlp", @@ -2386,13 +2381,13 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vqdmulh_lane_v: case NEON::BI__builtin_neon_vqrdmulhq_lane_v: case NEON::BI__builtin_neon_vqrdmulh_lane_v: { - mlir::cir::VectorType resTy = + cir::VectorType resTy = (builtinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || builtinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v) - ? mlir::cir::VectorType::get(&getMLIRContext(), vTy.getEltType(), - vTy.getSize() * 2) + ? cir::VectorType::get(&getMLIRContext(), vTy.getEltType(), + vTy.getSize() * 2) : vTy; - mlir::cir::VectorType mulVecT = + cir::VectorType mulVecT = GetNeonType(this, NeonTypeFlags(neonType.getEltType(), false, /*isQuad*/ false)); return buildNeonCall(builder, {resTy, mulVecT, SInt32Ty}, ops, @@ -2406,7 +2401,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vqshluq_n_v: { // These intrinsics expect signed vector type as input, but // return unsigned vector type. - mlir::cir::VectorType srcTy = getSignChangedVectorType(builder, vTy); + cir::VectorType srcTy = getSignChangedVectorType(builder, vTy); return buildNeonCall(builder, {srcTy, srcTy}, ops, "aarch64.neon.sqshlu", vTy, getLoc(e->getExprLoc()), false, /* not fp constrained op */ @@ -2430,10 +2425,9 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( } case NEON::BI__builtin_neon_vshll_n_v: { mlir::Location loc = getLoc(e->getExprLoc()); - mlir::cir::VectorType srcTy = - builder.getExtendedOrTruncatedElementVectorType( - vTy, false /* truncate */, - mlir::cast(vTy.getEltType()).isSigned()); + cir::VectorType srcTy = builder.getExtendedOrTruncatedElementVectorType( + vTy, false /* truncate */, + mlir::cast(vTy.getEltType()).isSigned()); ops[0] = builder.createBitcast(ops[0], srcTy); // The following cast will be lowered to SExt or ZExt in LLVM. ops[0] = builder.createIntCast(ops[0], vTy); @@ -2441,10 +2435,9 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( } case NEON::BI__builtin_neon_vshrn_n_v: { mlir::Location loc = getLoc(e->getExprLoc()); - mlir::cir::VectorType srcTy = - builder.getExtendedOrTruncatedElementVectorType( - vTy, true /* extended */, - mlir::cast(vTy.getEltType()).isSigned()); + cir::VectorType srcTy = builder.getExtendedOrTruncatedElementVectorType( + vTy, true /* extended */, + mlir::cast(vTy.getEltType()).isSigned()); ops[0] = builder.createBitcast(ops[0], srcTy); ops[0] = buildCommonNeonShift(builder, loc, srcTy, ops[0], ops[1], false); return builder.createIntCast(ops[0], vTy); @@ -2461,8 +2454,8 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( ops[0] = builder.createAnd(ops[0], ops[1]); // Note that during vmVM Lowering, result of `VecCmpOp` is sign extended, // matching traditional codegen behavior. - return builder.create( - loc, ty, mlir::cir::CmpOpKind::ne, ops[0], builder.getZero(loc, ty)); + return builder.create(loc, ty, cir::CmpOpKind::ne, ops[0], + builder.getZero(loc, ty)); } } @@ -2526,7 +2519,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( : "aarch64.neon.sqxtn"; argTypes.push_back(builder.getExtendedOrTruncatedElementVectorType( vTy, true /* extended */, - mlir::cast(vTy.getEltType()).isSigned())); + mlir::cast(vTy.getEltType()).isSigned())); break; } @@ -3076,8 +3069,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vsetq_lane_i64: case NEON::BI__builtin_neon_vsetq_lane_f32: Ops.push_back(buildScalarExpr(E->getArg(2))); - return builder.create(getLoc(E->getExprLoc()), - Ops[1], Ops[0], Ops[2]); + return builder.create(getLoc(E->getExprLoc()), Ops[1], + Ops[0], Ops[2]); case NEON::BI__builtin_neon_vset_lane_bf16: case NEON::BI__builtin_neon_vsetq_lane_bf16: // No support for now as no real/test case for them @@ -3094,75 +3087,75 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vget_lane_i8: case NEON::BI__builtin_neon_vdupb_lane_i8: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt8Ty, 8)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), UInt8Ty, 8)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i8: case NEON::BI__builtin_neon_vdupb_laneq_i8: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt8Ty, 16)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), UInt8Ty, 16)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i16: case NEON::BI__builtin_neon_vduph_lane_i16: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt16Ty, 4)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), UInt16Ty, 4)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i16: case NEON::BI__builtin_neon_vduph_laneq_i16: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt16Ty, 8)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), UInt16Ty, 8)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i32: case NEON::BI__builtin_neon_vdups_lane_i32: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt32Ty, 2)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), UInt32Ty, 2)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_f32: case NEON::BI__builtin_neon_vdups_lane_f32: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), FloatTy, 2)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), FloatTy, 2)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i32: case NEON::BI__builtin_neon_vdups_laneq_i32: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt32Ty, 4)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), UInt32Ty, 4)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i64: case NEON::BI__builtin_neon_vdupd_lane_i64: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt64Ty, 1)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), UInt64Ty, 1)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vdupd_lane_f64: case NEON::BI__builtin_neon_vget_lane_f64: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), DoubleTy, 1)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), DoubleTy, 1)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i64: case NEON::BI__builtin_neon_vdupd_laneq_i64: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), UInt64Ty, 2)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), UInt64Ty, 2)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_f32: case NEON::BI__builtin_neon_vdups_laneq_f32: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), FloatTy, 4)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), FloatTy, 4)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_f64: case NEON::BI__builtin_neon_vdupd_laneq_f64: Ops[0] = builder.createBitcast( - Ops[0], mlir::cir::VectorType::get(&getMLIRContext(), DoubleTy, 2)); - return builder.create( - getLoc(E->getExprLoc()), Ops[0], buildScalarExpr(E->getArg(1))); + Ops[0], cir::VectorType::get(&getMLIRContext(), DoubleTy, 2)); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + buildScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vaddh_f16: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vsubh_f16: @@ -3255,7 +3248,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } } - mlir::cir::VectorType ty = GetNeonType(this, Type); + cir::VectorType ty = GetNeonType(this, Type); if (!ty) return nullptr; @@ -3273,7 +3266,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, buildAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) return V; - mlir::cir::VectorType vTy = ty; + cir::VectorType vTy = ty; llvm::SmallVector args; switch (BuiltinID) { default: @@ -3306,9 +3299,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::StringRef name = usgn ? "aarch64.neon.umull" : "aarch64.neon.smull"; if (Type.isPoly()) name = "aarch64.neon.pmull"; - mlir::cir::VectorType argTy = - builder.getExtendedOrTruncatedElementVectorType( - ty, false /* truncated */, !usgn); + cir::VectorType argTy = builder.getExtendedOrTruncatedElementVectorType( + ty, false /* truncated */, !usgn); return buildNeonCall(builder, {argTy, argTy}, Ops, name, ty, getLoc(E->getExprLoc())); } @@ -3321,7 +3313,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vmin_v: case NEON::BI__builtin_neon_vminq_v: { llvm::StringRef name = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin"; - if (mlir::cir::isFPOrFPVectorTy(ty)) + if (cir::isFPOrFPVectorTy(ty)) name = "aarch64.neon.fmin"; return buildNeonCall(builder, {ty, ty}, Ops, name, ty, getLoc(E->getExprLoc())); @@ -3332,7 +3324,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vabd_v: case NEON::BI__builtin_neon_vabdq_v: { llvm::StringRef name = usgn ? "aarch64.neon.uabd" : "aarch64.neon.sabd"; - if (mlir::cir::isFPOrFPVectorTy(ty)) + if (cir::isFPOrFPVectorTy(ty)) name = "aarch64.neon.fabd"; return buildNeonCall(builder, {ty, ty}, Ops, name, ty, getLoc(E->getExprLoc())); @@ -3381,7 +3373,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, builder, {builder.getExtendedOrTruncatedElementVectorType( vTy, true /* extend */, - mlir::cast(vTy.getEltType()).isSigned()), + mlir::cast(vTy.getEltType()).isSigned()), SInt32Ty}, Ops, "aarch64.neon.rshrn", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqrshrn_n_v: @@ -3733,8 +3725,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, Ops[1] = builder.createBitcast(Ops[1], vTy); Ops[0] = builder.createAlignedLoad(Ops[0].getLoc(), vTy.getEltType(), Ops[0], PtrOp0.getAlignment()); - return builder.create(getLoc(E->getExprLoc()), - Ops[1], Ops[0], Ops[2]); + return builder.create(getLoc(E->getExprLoc()), Ops[1], + Ops[0], Ops[2]); } case NEON::BI__builtin_neon_vldap1_lane_s64: case NEON::BI__builtin_neon_vldap1q_lane_s64: { @@ -3744,15 +3736,14 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vld1q_dup_v: { Address ptrAddr = PtrOp0.withElementType(vTy.getEltType()); mlir::Value val = builder.createLoad(getLoc(E->getExprLoc()), ptrAddr); - mlir::cir::VecSplatOp vecSplat = builder.create( - getLoc(E->getExprLoc()), vTy, val); + cir::VecSplatOp vecSplat = + builder.create(getLoc(E->getExprLoc()), vTy, val); return vecSplat; } case NEON::BI__builtin_neon_vst1_lane_v: case NEON::BI__builtin_neon_vst1q_lane_v: { Ops[1] = builder.createBitcast(Ops[1], ty); - Ops[1] = builder.create(Ops[1].getLoc(), Ops[1], - Ops[2]); + Ops[1] = builder.create(Ops[1].getLoc(), Ops[1], Ops[2]); (void)builder.createAlignedStore(getLoc(E->getExprLoc()), Ops[1], Ops[0], PtrOp0.getAlignment()); return Ops[1]; @@ -3851,8 +3842,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, indices.push_back(i + vi); indices.push_back(i + e + vi); } - mlir::cir::ConstantOp idx = builder.getConstInt(loc, SInt32Ty, vi); - mlir::Value addr = builder.create( + cir::ConstantOp idx = builder.getConstInt(loc, SInt32Ty, vi); + mlir::Value addr = builder.create( loc, baseAddr.getType(), baseAddr, idx); sv = builder.createVecShuffle(loc, Ops[1], Ops[2], indices); (void)builder.CIRBaseBuilderTy::createStore(loc, sv, addr); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp index ffcaae1eb5b0..76fe5315009e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp @@ -29,7 +29,7 @@ using namespace clang; using namespace clang::CIRGen; -using namespace mlir::cir; +using namespace cir; mlir::Value CIRGenFunction::buildX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E) { diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index f77b61e51a56..beadfbb26a23 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -115,7 +115,7 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { auto Linkage = getFunctionLinkage(AliasDecl); // We can't use an alias if the linkage is not valid for one. - if (!mlir::cir::isValidLinkage(Linkage)) + if (!cir::isValidLinkage(Linkage)) return true; auto TargetLinkage = getFunctionLinkage(TargetDecl); @@ -123,8 +123,7 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { // Check if we have it already. StringRef MangledName = getMangledName(AliasDecl); auto Entry = getGlobalValue(MangledName); - auto globalValue = - dyn_cast_or_null(Entry); + auto globalValue = dyn_cast_or_null(Entry); if (Entry && globalValue && !globalValue.isDeclaration()) return false; if (Replacements.count(MangledName)) @@ -133,14 +132,13 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { [[maybe_unused]] auto AliasValueType = getTypes().GetFunctionType(AliasDecl); // Find the referent. - auto Aliasee = cast(GetAddrOfGlobal(TargetDecl)); - auto AliaseeGV = dyn_cast_or_null( + auto Aliasee = cast(GetAddrOfGlobal(TargetDecl)); + auto AliaseeGV = dyn_cast_or_null( GetAddrOfGlobal(TargetDecl)); // Instead of creating as alias to a linkonce_odr, replace all of the uses // of the aliasee. - if (mlir::cir::isDiscardableIfUnused(Linkage) && - !(TargetLinkage == - mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage && + if (cir::isDiscardableIfUnused(Linkage) && + !(TargetLinkage == cir::GlobalLinkageKind::AvailableExternallyLinkage && TargetDecl.getDecl()->hasAttr())) { // FIXME: An extern template instantiation will create functions with // linkage "AvailableExternally". In libc++, some classes also define @@ -155,7 +153,7 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { // COFF. A COFF weak external alias cannot satisfy a normal undefined // symbol reference from another TU. The other TU must also mark the // referenced symbol as weak, which we cannot rely on. - if (mlir::cir::isWeakForLinker(Linkage) && getTriple().isOSBinFormatCOFF()) { + if (cir::isWeakForLinker(Linkage) && getTriple().isOSBinFormatCOFF()) { llvm_unreachable("NYI"); } @@ -170,7 +168,7 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { // different COMDATs in different TUs. Another option would be to // output the alias both for weak_odr and linkonce_odr, but that // requires explicit comdat support in the IL. - if (mlir::cir::isWeakForLinker(TargetLinkage)) + if (cir::isWeakForLinker(TargetLinkage)) llvm_unreachable("NYI"); // Create the alias with no name. @@ -250,7 +248,7 @@ static void buildDeclDestroy(CIRGenFunction &CGF, const VarDecl *D) { // generated elsewhere which uses atexit instead, and it takes the destructor // directly. auto UsingExternalHelper = CGM.getCodeGenOpts().CXAAtExit; - mlir::cir::FuncOp fnOp; + cir::FuncOp fnOp; if (Record && (CanRegisterDestructor || UsingExternalHelper)) { assert(!D->getTLSKind() && "TLS NYI"); assert(!Record->hasTrivialDestructor()); @@ -273,7 +271,7 @@ static void buildDeclDestroy(CIRGenFunction &CGF, const VarDecl *D) { CGM.getCXXABI().registerGlobalDtor(CGF, D, fnOp, nullptr); } -mlir::cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { +cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { const auto &FnInfo = getTypes().arrangeCXXStructorDeclaration(GD); auto Fn = getAddrOfCXXStructor(GD, &FnInfo, /*FnType=*/nullptr, /*DontDefer=*/true, ForDefinition); @@ -308,7 +306,7 @@ void CIRGenFunction::buildInvariantStart([[maybe_unused]] CharUnits Size) { } void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, - mlir::cir::GlobalOp addr, + cir::GlobalOp addr, bool performInit) { const Expr *init = varDecl->getInit(); QualType ty = varDecl->getType(); @@ -344,7 +342,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, CIRGenFunction::SourceLocRAIIObject fnLoc{cgf, getLoc(varDecl->getLocation())}; - addr.setAstAttr(mlir::cir::ASTVarDeclAttr::get(&getMLIRContext(), varDecl)); + addr.setAstAttr(cir::ASTVarDeclAttr::get(&getMLIRContext(), varDecl)); if (ty->isReferenceType()) { mlir::OpBuilder::InsertionGuard guard(builder); @@ -365,18 +363,17 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, mlir::Operation *rvalueDefOp = rv.getScalarVal().getDefiningOp(); if (rvalueDefOp && rvalueDefOp->getBlock()) { mlir::Block *rvalSrcBlock = rvalueDefOp->getBlock(); - if (!rvalSrcBlock->empty() && - isa(rvalSrcBlock->back())) { + if (!rvalSrcBlock->empty() && isa(rvalSrcBlock->back())) { auto &front = rvalSrcBlock->front(); getGlobal.getDefiningOp()->moveBefore(&front); - auto yield = cast(rvalSrcBlock->back()); + auto yield = cast(rvalSrcBlock->back()); builder.setInsertionPoint(yield); } } cgf.buildStoreOfScalar(rv.getScalarVal(), declAddr, false, ty); } builder.setInsertionPointToEnd(block); - builder.create(addr->getLoc()); + builder.create(addr->getLoc()); } else { bool needsDtor = varDecl->needsDestruction(getASTContext()) == QualType::DK_cxx_destructor; @@ -395,7 +392,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, getASTContext().getDeclAlign(varDecl)); buildDeclInit(cgf, varDecl, declAddr); builder.setInsertionPointToEnd(block); - builder.create(addr->getLoc()); + builder.create(addr->getLoc()); } if (isConstantStorage) { @@ -418,7 +415,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, // Don't confuse lexical cleanup. builder.clearInsertionPoint(); } else - builder.create(addr->getLoc()); + builder.create(addr->getLoc()); } } } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp index 8ee041423e00..585dd78bab34 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp @@ -70,7 +70,7 @@ void CIRGenCXXABI::buildThisParam(CIRGenFunction &CGF, } } -mlir::cir::GlobalLinkageKind CIRGenCXXABI::getCXXDestructorLinkage( +cir::GlobalLinkageKind CIRGenCXXABI::getCXXDestructorLinkage( GVALinkage Linkage, const CXXDestructorDecl *Dtor, CXXDtorType DT) const { // Delegate back to CGM by default. return CGM.getCIRLinkageForDeclarator(Dtor, Linkage, diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index dae488656474..587a1ce9c880 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -179,7 +179,7 @@ class CIRGenCXXABI { /// \param Dtor - a function taking a single pointer argument /// \param Addr - a pointer to pass to the destructor function. virtual void registerGlobalDtor(CIRGenFunction &CGF, const VarDecl *D, - mlir::cir::FuncOp dtor, mlir::Value Addr) = 0; + cir::FuncOp dtor, mlir::Value Addr) = 0; virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, FunctionArgList &Args) const = 0; @@ -188,8 +188,8 @@ class CIRGenCXXABI { /// Get the address of the vtable for the given record decl which should be /// used for the vptr at the given offset in RD. - virtual mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, - CharUnits VPtrOffset) = 0; + virtual cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, + CharUnits VPtrOffset) = 0; /// Build a virtual function pointer in the ABI-specific way. virtual CIRGenCallee getVirtualFunctionPointer(CIRGenFunction &CGF, @@ -227,7 +227,7 @@ class CIRGenCXXABI { virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, CXXDtorType DT) const = 0; - virtual mlir::cir::GlobalLinkageKind + virtual cir::GlobalLinkageKind getCXXDestructorLinkage(GVALinkage Linkage, const CXXDestructorDecl *Dtor, CXXDtorType DT) const; @@ -244,10 +244,10 @@ class CIRGenCXXABI { const CXXRecordDecl *NearestVBase) = 0; /// Gets the pure virtual member call function. - virtual StringRef getPureVirtualCallName() = 0; + virtual llvm::StringRef getPureVirtualCallName() = 0; /// Gets the deleted virtual member call name. - virtual StringRef getDeletedVirtualCallName() = 0; + virtual llvm::StringRef getDeletedVirtualCallName() = 0; /// Specify how one should pass an argument of a record type. enum class RecordArgABI { @@ -343,12 +343,11 @@ class CIRGenCXXABI { virtual mlir::Value buildDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, QualType DestRecordTy, - mlir::cir::PointerType DestCIRTy, + cir::PointerType DestCIRTy, bool isRefCast, Address Src) = 0; - virtual mlir::cir::MethodAttr - buildVirtualMethodAttr(mlir::cir::MethodType MethodTy, - const CXXMethodDecl *MD) = 0; + virtual cir::MethodAttr buildVirtualMethodAttr(cir::MethodType MethodTy, + const CXXMethodDecl *MD) = 0; }; /// Creates and Itanium-family ABI diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index ca4904d60693..64b4c2f0957f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -39,7 +39,7 @@ using namespace clang; using namespace clang::CIRGen; CIRGenFunctionInfo *CIRGenFunctionInfo::create( - mlir::cir::CallingConv cirCC, bool instanceMethod, bool chainCall, + cir::CallingConv cirCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &info, llvm::ArrayRef paramInfos, CanQualType resultType, llvm::ArrayRef argTypes, RequiredArgs required) { @@ -195,12 +195,12 @@ static bool hasInAllocaArgs(CIRGenModule &CGM, CallingConv ExplicitCC, return false; } -mlir::cir::FuncType CIRGenTypes::GetFunctionType(GlobalDecl GD) { +cir::FuncType CIRGenTypes::GetFunctionType(GlobalDecl GD) { const CIRGenFunctionInfo &FI = arrangeGlobalDeclaration(GD); return GetFunctionType(FI); } -mlir::cir::FuncType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { +cir::FuncType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { bool Inserted = FunctionsBeingProcessed.insert(&FI).second; (void)Inserted; assert(Inserted && "Recursively being processed?"); @@ -260,12 +260,12 @@ mlir::cir::FuncType CIRGenTypes::GetFunctionType(const CIRGenFunctionInfo &FI) { (void)Erased; assert(Erased && "Not in set?"); - return mlir::cir::FuncType::get( - ArgTypes, (resultType ? resultType : Builder.getVoidTy()), - FI.isVariadic()); + return cir::FuncType::get(ArgTypes, + (resultType ? resultType : Builder.getVoidTy()), + FI.isVariadic()); } -mlir::cir::FuncType CIRGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { +cir::FuncType CIRGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { const CXXMethodDecl *MD = cast(GD.getDecl()); const FunctionProtoType *FPT = MD->getType()->getAs(); @@ -322,7 +322,7 @@ static void AddAttributesFromFunctionProtoType(CIRGenBuilderTy &builder, if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && FPT->isNothrow()) { - auto nu = mlir::cir::NoThrowAttr::get(builder.getContext()); + auto nu = cir::NoThrowAttr::get(builder.getContext()); FuncAttrs.set(nu.getMnemonic(), nu); } } @@ -348,7 +348,7 @@ void CIRGenModule::constructAttributeList(StringRef Name, const CIRGenFunctionInfo &FI, CIRGenCalleeInfo CalleeInfo, mlir::NamedAttrList &funcAttrs, - mlir::cir::CallingConv &callingConv, + cir::CallingConv &callingConv, bool AttrOnCallSite, bool IsThunk) { // Implementation Disclaimer // @@ -382,7 +382,7 @@ void CIRGenModule::constructAttributeList(StringRef Name, if (TargetDecl) { if (TargetDecl->hasAttr()) { - auto nu = mlir::cir::NoThrowAttr::get(&getMLIRContext()); + auto nu = cir::NoThrowAttr::get(&getMLIRContext()); funcAttrs.set(nu.getMnemonic(), nu); } @@ -434,11 +434,11 @@ void CIRGenModule::constructAttributeList(StringRef Name, } if (TargetDecl->hasAttr()) { - auto cirKernelAttr = mlir::cir::OpenCLKernelAttr::get(&getMLIRContext()); + auto cirKernelAttr = cir::OpenCLKernelAttr::get(&getMLIRContext()); funcAttrs.set(cirKernelAttr.getMnemonic(), cirKernelAttr); - auto uniformAttr = mlir::cir::OpenCLKernelUniformWorkGroupSizeAttr::get( - &getMLIRContext()); + auto uniformAttr = + cir::OpenCLKernelUniformWorkGroupSizeAttr::get(&getMLIRContext()); if (getLangOpts().OpenCLVersion <= 120) { // OpenCL v1.2 Work groups are always uniform funcAttrs.set(uniformAttr.getMnemonic(), uniformAttr); @@ -465,24 +465,22 @@ void CIRGenModule::constructAttributeList(StringRef Name, getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, funcAttrs); } -static mlir::cir::CIRCallOpInterface -buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, - mlir::cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal, - mlir::cir::FuncOp directFuncOp, - SmallVectorImpl &CIRCallArgs, bool isInvoke, - mlir::cir::CallingConv callingConv, - mlir::cir::ExtraFuncAttributesAttr extraFnAttrs) { +static cir::CIRCallOpInterface buildCallLikeOp( + CIRGenFunction &CGF, mlir::Location callLoc, cir::FuncType indirectFuncTy, + mlir::Value indirectFuncVal, cir::FuncOp directFuncOp, + SmallVectorImpl &CIRCallArgs, bool isInvoke, + cir::CallingConv callingConv, cir::ExtraFuncAttributesAttr extraFnAttrs) { auto &builder = CGF.getBuilder(); auto getOrCreateSurroundingTryOp = [&]() { // In OG, we build the landing pad for this scope. In CIR, we emit a // synthetic cir.try because this didn't come from codegenerating from a // try/catch in C++. assert(CGF.currLexScope && "expected scope"); - mlir::cir::TryOp op = CGF.currLexScope->getClosestTryParent(); + cir::TryOp op = CGF.currLexScope->getClosestTryParent(); if (op) return op; - op = builder.create( + op = builder.create( *CGF.currSrcLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) {}, // Don't emit the code right away for catch clauses, for @@ -517,9 +515,9 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, assert(builder.getInsertionBlock() && "expected valid basic block"); } - mlir::cir::CallOp callOpWithExceptions; + cir::CallOp callOpWithExceptions; // TODO(cir): Set calling convention for `cir.try_call`. - assert(callingConv == mlir::cir::CallingConv::C && "NYI"); + assert(callingConv == cir::CallingConv::C && "NYI"); if (indirectFuncTy) { callOpWithExceptions = builder.createIndirectTryCallOp( callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs); @@ -535,7 +533,7 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, CGF.callWithExceptionCtx = nullptr; if (tryOp.getSynthetic()) { - builder.create(tryOp.getLoc()); + builder.create(tryOp.getLoc()); builder.restoreInsertionPoint(ip); } return callOpWithExceptions; @@ -544,10 +542,10 @@ buildCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, assert(builder.getInsertionBlock() && "expected valid basic block"); if (indirectFuncTy) { // TODO(cir): Set calling convention for indirect calls. - assert(callingConv == mlir::cir::CallingConv::C && "NYI"); - return builder.createIndirectCallOp( - callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs, - mlir::cir::CallingConv::C, extraFnAttrs); + assert(callingConv == cir::CallingConv::C && "NYI"); + return builder.createIndirectCallOp(callLoc, indirectFuncVal, + indirectFuncTy, CIRCallArgs, + cir::CallingConv::C, extraFnAttrs); } return builder.createCallOp(callLoc, directFuncOp, CIRCallArgs, callingConv, extraFnAttrs); @@ -557,7 +555,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &CallArgs, - mlir::cir::CIRCallOpInterface *callOrTryCall, + cir::CIRCallOpInterface *callOrTryCall, bool IsMustTail, mlir::Location loc, std::optional E) { auto builder = CGM.getBuilder(); @@ -570,7 +568,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, QualType RetTy = CallInfo.getReturnType(); const auto &RetAI = CallInfo.getReturnInfo(); - mlir::cir::FuncType CIRFuncTy = getTypes().GetFunctionType(CallInfo); + cir::FuncType CIRFuncTy = getTypes().GetFunctionType(CallInfo); const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); // This is not always tied to a FunctionDecl (e.g. builtins that are xformed @@ -634,7 +632,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, switch (ArgInfo.getKind()) { case cir::ABIArgInfo::Direct: { - if (!mlir::isa(ArgInfo.getCoerceToType()) && + if (!mlir::isa(ArgInfo.getCoerceToType()) && ArgInfo.getCoerceToType() == convertType(info_it->type) && ArgInfo.getDirectOffset() == 0) { assert(NumCIRArgs == 1); @@ -648,7 +646,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // We might have to widen integers, but we should never truncate. if (ArgInfo.getCoerceToType() != V.getType() && - mlir::isa(V.getType())) + mlir::isa(V.getType())) llvm_unreachable("NYI"); // If the argument doesn't match, perform a bitcast to coerce it. This @@ -675,7 +673,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. - auto STy = dyn_cast(ArgInfo.getCoerceToType()); + auto STy = dyn_cast(ArgInfo.getCoerceToType()); if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { auto SrcTy = Src.getElementType(); // FIXME(cir): get proper location for each argument. @@ -741,10 +739,10 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // Compute the calling convention and attributes. mlir::NamedAttrList Attrs; StringRef FnName; - if (auto calleeFnOp = dyn_cast(CalleePtr)) + if (auto calleeFnOp = dyn_cast(CalleePtr)) FnName = calleeFnOp.getName(); - mlir::cir::CallingConv callingConv; + cir::CallingConv callingConv; CGM.constructAttributeList(FnName, CallInfo, Callee.getAbstractInfo(), Attrs, callingConv, /*AttrOnCallSite=*/true, @@ -778,10 +776,10 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, CannotThrow = true; } else { // Otherwise, nounwind call sites will never throw. - auto noThrowAttr = mlir::cir::NoThrowAttr::get(&getMLIRContext()); + auto noThrowAttr = cir::NoThrowAttr::get(&getMLIRContext()); CannotThrow = Attrs.getNamed(noThrowAttr.getMnemonic()).has_value(); - if (auto fptr = dyn_cast(CalleePtr)) + if (auto fptr = dyn_cast(CalleePtr)) if (fptr.getExtraAttrs().getElements().contains( noThrowAttr.getMnemonic())) CannotThrow = true; @@ -795,44 +793,43 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, // TODO: alignment attributes auto callLoc = loc; - mlir::cir::CIRCallOpInterface theCall = [&]() { - mlir::cir::FuncType indirectFuncTy; + cir::CIRCallOpInterface theCall = [&]() { + cir::FuncType indirectFuncTy; mlir::Value indirectFuncVal; - mlir::cir::FuncOp directFuncOp; + cir::FuncOp directFuncOp; - if (auto fnOp = dyn_cast(CalleePtr)) { + if (auto fnOp = dyn_cast(CalleePtr)) { directFuncOp = fnOp; - } else if (auto getGlobalOp = dyn_cast(CalleePtr)) { + } else if (auto getGlobalOp = dyn_cast(CalleePtr)) { // FIXME(cir): This peephole optimization to avoids indirect calls for // builtins. This should be fixed in the builting declaration instead by // not emitting an unecessary get_global in the first place. auto *globalOp = mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), getGlobalOp.getName()); assert(getGlobalOp && "undefined global function"); - directFuncOp = llvm::dyn_cast(globalOp); + directFuncOp = llvm::dyn_cast(globalOp); assert(directFuncOp && "operation is not a function"); } else { [[maybe_unused]] auto resultTypes = CalleePtr->getResultTypes(); [[maybe_unused]] auto FuncPtrTy = - mlir::dyn_cast(resultTypes.front()); - assert(FuncPtrTy && - mlir::isa(FuncPtrTy.getPointee()) && + mlir::dyn_cast(resultTypes.front()); + assert(FuncPtrTy && mlir::isa(FuncPtrTy.getPointee()) && "expected pointer to function"); indirectFuncTy = CIRFuncTy; indirectFuncVal = CalleePtr->getResult(0); } - auto extraFnAttrs = mlir::cir::ExtraFuncAttributesAttr::get( + auto extraFnAttrs = cir::ExtraFuncAttributesAttr::get( &getMLIRContext(), Attrs.getDictionary(&getMLIRContext())); - mlir::cir::CIRCallOpInterface callLikeOp = buildCallLikeOp( + cir::CIRCallOpInterface callLikeOp = buildCallLikeOp( *this, callLoc, indirectFuncTy, indirectFuncVal, directFuncOp, CIRCallArgs, isInvoke, callingConv, extraFnAttrs); if (E) - callLikeOp->setAttr( - "ast", mlir::cir::ASTCallExprAttr::get(&getMLIRContext(), *E)); + callLikeOp->setAttr("ast", + cir::ASTCallExprAttr::get(&getMLIRContext(), *E)); if (callOrTryCall) *callOrTryCall = callLikeOp; @@ -925,7 +922,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, } mlir::Value CIRGenFunction::buildRuntimeCall(mlir::Location loc, - mlir::cir::FuncOp callee, + cir::FuncOp callee, ArrayRef args) { // TODO(cir): set the calling convention to this runtime call. assert(!cir::MissingFeatures::setCallingConv()); @@ -1580,7 +1577,7 @@ mlir::Value CIRGenFunction::buildVAArg(VAArgExpr *VE, Address &VAListAddr) { auto loc = CGM.getLoc(VE->getExprLoc()); auto type = ConvertType(VE->getType()); auto vaList = buildVAListRef(VE->getSubExpr()).getPointer(); - return builder.create(loc, type, vaList); + return builder.create(loc, type, vaList); } static void getTrivialDefaultFunctionAttributes( @@ -1595,7 +1592,7 @@ static void getTrivialDefaultFunctionAttributes( // applied around them). LLVM will remove this attribute where it safely // can. - auto convgt = mlir::cir::ConvergentAttr::get(CGM.getBuilder().getContext()); + auto convgt = cir::ConvergentAttr::get(CGM.getBuilder().getContext()); funcAttrs.set(convgt.getMnemonic(), convgt); } @@ -1605,7 +1602,7 @@ static void getTrivialDefaultFunctionAttributes( if ((langOpts.CUDA && langOpts.CUDAIsDevice) || langOpts.SYCLIsDevice) llvm_unreachable("NYI"); if (langOpts.OpenCL) { - auto noThrow = mlir::cir::NoThrowAttr::get(CGM.getBuilder().getContext()); + auto noThrow = cir::NoThrowAttr::get(CGM.getBuilder().getContext()); funcAttrs.set(noThrow.getMnemonic(), noThrow); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 16a61c9c537d..4ffce395753b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -71,7 +71,7 @@ class CIRGenCallee { const clang::CallExpr *CE; clang::GlobalDecl MD; Address Addr; - mlir::cir::FuncType FTy; + cir::FuncType FTy; }; SpecialKind KindOrFunctionPointer; @@ -158,7 +158,7 @@ class CIRGenCallee { static CIRGenCallee forVirtual(const clang::CallExpr *CE, clang::GlobalDecl MD, Address Addr, - mlir::cir::FuncType FTy) { + cir::FuncType FTy) { CIRGenCallee result(SpecialKind::Virtual); result.VirtualInfo.CE = CE; result.VirtualInfo.MD = MD; @@ -180,7 +180,7 @@ class CIRGenCallee { assert(isVirtual()); return VirtualInfo.Addr; } - mlir::cir::FuncType getVirtualFunctionType() const { + cir::FuncType getVirtualFunctionType() const { assert(isVirtual()); return VirtualInfo.FTy; } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 7fb6a6a0645e..96044f0e52d6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -671,7 +671,7 @@ static Address ApplyNonVirtualAndVirtualOffset( nonVirtualOffset.getQuantity()); if (virtualOffset) { baseOffset = CGF.getBuilder().createBinop( - virtualOffset, mlir::cir::BinOpKind::Add, baseOffset); + virtualOffset, cir::BinOpKind::Add, baseOffset); } } else { baseOffset = virtualOffset; @@ -682,11 +682,11 @@ static Address ApplyNonVirtualAndVirtualOffset( mlir::Value ptr = addr.getPointer(); mlir::Type charPtrType = CGF.CGM.UInt8PtrTy; - mlir::Value charPtr = CGF.getBuilder().createCast( - mlir::cir::CastKind::bitcast, ptr, charPtrType); - mlir::Value adjusted = CGF.getBuilder().create( + mlir::Value charPtr = + CGF.getBuilder().createCast(cir::CastKind::bitcast, ptr, charPtrType); + mlir::Value adjusted = CGF.getBuilder().create( loc, charPtrType, charPtr, baseOffset); - ptr = CGF.getBuilder().createCast(mlir::cir::CastKind::bitcast, adjusted, + ptr = CGF.getBuilder().createCast(cir::CastKind::bitcast, adjusted, ptr.getType()); // If we have a virtual component, the alignment of the result will @@ -1127,7 +1127,7 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { if (DtorType != Dtor_Base && Dtor->getParent()->isAbstract()) { SourceLocation Loc = Dtor->hasBody() ? Dtor->getBody()->getBeginLoc() : Dtor->getLocation(); - builder.create(getLoc(Loc)); + builder.create(getLoc(Loc)); // The corresponding clang/CodeGen logic clears the insertion point here, // but MLIR's builder requires a valid insertion point, so we create a dummy // block (since the trap is a block terminator). @@ -1774,11 +1774,9 @@ void CIRGenFunction::buildCXXAggrConstructorCall( // llvm::BranchInst *zeroCheckBranch = nullptr; // Optimize for a constant count. - auto constantCount = - dyn_cast(numElements.getDefiningOp()); + auto constantCount = dyn_cast(numElements.getDefiningOp()); if (constantCount) { - auto constIntAttr = - mlir::dyn_cast(constantCount.getValue()); + auto constIntAttr = mlir::dyn_cast(constantCount.getValue()); // Just skip out if the constant count is zero. if (constIntAttr && constIntAttr.getUInt() == 0) return; @@ -1787,8 +1785,7 @@ void CIRGenFunction::buildCXXAggrConstructorCall( llvm_unreachable("NYI"); } - auto arrayTy = - mlir::dyn_cast(arrayBase.getElementType()); + auto arrayTy = mlir::dyn_cast(arrayBase.getElementType()); assert(arrayTy && "expected array type"); auto elementType = arrayTy.getEltType(); auto ptrToElmType = builder.getPointerTo(elementType); @@ -1829,7 +1826,7 @@ void CIRGenFunction::buildCXXAggrConstructorCall( } // Wmit the constructor call that will execute for every array element. - builder.create( + builder.create( *currSrcLoc, arrayBase.getPointer(), [&](mlir::OpBuilder &b, mlir::Location loc) { auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc); @@ -1843,7 +1840,7 @@ void CIRGenFunction::buildCXXAggrConstructorCall( buildCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false, /*Delegating=*/false, currAVS, E); - builder.create(loc); + builder.create(loc); }); } @@ -1955,7 +1952,7 @@ void CIRGenFunction::buildCXXConstructorCall( const CIRGenFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall( Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); - mlir::cir::CIRCallOpInterface C; + cir::CIRCallOpInterface C; buildCall(Info, Callee, ReturnValueSlot(), Args, &C, false, getLoc(Loc)); assert(CGM.getCodeGenOpts().OptimizationLevel == 0 || diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 679d9a9399f7..96dce5e2960f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -23,7 +23,7 @@ using namespace clang; using namespace clang::CIRGen; -using namespace mlir::cir; +using namespace cir; //===----------------------------------------------------------------------===// // CIRGenFunction cleanup related @@ -33,8 +33,8 @@ using namespace mlir::cir; /// or with the labeled blocked if already solved. /// /// Track on scope basis, goto's we need to fix later. -mlir::cir::BrOp CIRGenFunction::buildBranchThroughCleanup(mlir::Location Loc, - JumpDest Dest) { +cir::BrOp CIRGenFunction::buildBranchThroughCleanup(mlir::Location Loc, + JumpDest Dest) { // Remove this once we go for making sure unreachable code is // well modeled (or not). assert(builder.getInsertionBlock() && "not yet implemented"); @@ -266,11 +266,11 @@ static void buildCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, if (ActiveFlag.isValid()) { mlir::Value isActive = builder.createLoad(loc, ActiveFlag); - builder.create(loc, isActive, false, - [&](mlir::OpBuilder &b, mlir::Location) { - emitCleanup(); - builder.createYield(loc); - }); + builder.create(loc, isActive, false, + [&](mlir::OpBuilder &b, mlir::Location) { + emitCleanup(); + builder.createYield(loc); + }); } else { emitCleanup(); } @@ -323,8 +323,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { FallthroughSource->mightHaveTerminator() && FallthroughSource->getTerminator(); bool HasPrebranchedFallthrough = - HasTerminator && - !isa(FallthroughSource->getTerminator()); + HasTerminator && !isa(FallthroughSource->getTerminator()); // If this is a normal cleanup, then having a prebranched // fallthrough implies that the fallthrough source unconditionally @@ -423,8 +422,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // Emit the EH cleanup if required. if (RequiresEHCleanup) { - mlir::cir::TryOp tryOp = - ehEntry->getParentOp()->getParentOfType(); + cir::TryOp tryOp = ehEntry->getParentOp()->getParentOfType(); auto *nextAction = getEHDispatchBlock(EHParent, tryOp); (void)nextAction; @@ -476,8 +474,8 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // If nextAction is an EH resume block, also update all try locations // for these "to-patch" blocks with the appropriate resume content. if (nextAction == ehResumeBlock) { - if (auto tryToPatch = currYield->getParentOp() - ->getParentOfType()) { + if (auto tryToPatch = + currYield->getParentOp()->getParentOfType()) { mlir::Block *resumeBlockToPatch = tryToPatch.getCatchUnwindEntryBlock(); buildEHResumeBlock(/*isCleanup=*/true, resumeBlockToPatch, diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h index 87fefe34e103..20ed42f3adbb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -414,7 +414,7 @@ class alignas(8) EHCleanupScope : public EHScope { bool used = false; // Records a potentially unused instruction to be erased later. - void add(mlir::cir::AllocaOp allocaOp) { auxAllocas.push_back(allocaOp); } + void add(cir::AllocaOp allocaOp) { auxAllocas.push_back(allocaOp); } // Mark all recorded instructions as used. These will not be erased later. void markUsed() { diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index eed9019fdbbb..86366f6bfa15 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -23,11 +23,11 @@ struct clang::CIRGen::CGCoroData { // What is the current await expression kind and how many // await/yield expressions were encountered so far. // These are used to generate pretty labels for await expressions in LLVM IR. - mlir::cir::AwaitKind CurrentAwaitKind = mlir::cir::AwaitKind::init; + cir::AwaitKind CurrentAwaitKind = cir::AwaitKind::init; // Stores the __builtin_coro_id emitted in the function so that we can supply // it as the first argument to other builtins. - mlir::cir::CallOp CoroId = nullptr; + cir::CallOp CoroId = nullptr; // Stores the result of __builtin_coro_begin call. mlir::Value CoroBegin = nullptr; @@ -51,7 +51,7 @@ CIRGenFunction::CGCoroInfo::~CGCoroInfo() {} static void createCoroData(CIRGenFunction &CGF, CIRGenFunction::CGCoroInfo &CurCoro, - mlir::cir::CallOp CoroId) { + cir::CallOp CoroId) { if (CurCoro.Data) { llvm_unreachable("EmitCoroutineBodyStatement called twice?"); @@ -158,8 +158,8 @@ buildBodyAndFallthrough(CIRGenFunction &CGF, const CoroutineBodyStmt &S, return mlir::success(); } -mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, - mlir::Value nullPtr) { +cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, + mlir::Value nullPtr) { auto int32Ty = builder.getUInt32Ty(); auto &TI = CGM.getASTContext().getTargetInfo(); @@ -167,81 +167,79 @@ mlir::cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroId); - mlir::cir::FuncOp fnOp; + cir::FuncOp fnOp; if (!builtin) { fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroId, - mlir::cir::FuncType::get({int32Ty, VoidPtrTy, VoidPtrTy, VoidPtrTy}, - int32Ty), + cir::FuncType::get({int32Ty, VoidPtrTy, VoidPtrTy, VoidPtrTy}, int32Ty), /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(&getMLIRContext())); } else - fnOp = cast(builtin); + fnOp = cast(builtin); return builder.createCallOp(loc, fnOp, mlir::ValueRange{builder.getUInt32(NewAlign, loc), nullPtr, nullPtr, nullPtr}); } -mlir::cir::CallOp -CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { +cir::CallOp CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { auto boolTy = builder.getBoolTy(); auto int32Ty = builder.getUInt32Ty(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroAlloc); - mlir::cir::FuncOp fnOp; + cir::FuncOp fnOp; if (!builtin) { fnOp = CGM.createCIRFunction(loc, CGM.builtinCoroAlloc, - mlir::cir::FuncType::get({int32Ty}, boolTy), + cir::FuncType::get({int32Ty}, boolTy), /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(&getMLIRContext())); } else - fnOp = cast(builtin); + fnOp = cast(builtin); return builder.createCallOp( loc, fnOp, mlir::ValueRange{CurCoro.Data->CoroId.getResult()}); } -mlir::cir::CallOp +cir::CallOp CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr) { auto int32Ty = builder.getUInt32Ty(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroBegin); - mlir::cir::FuncOp fnOp; + cir::FuncOp fnOp; if (!builtin) { fnOp = CGM.createCIRFunction( loc, CGM.builtinCoroBegin, - mlir::cir::FuncType::get({int32Ty, VoidPtrTy}, VoidPtrTy), + cir::FuncType::get({int32Ty, VoidPtrTy}, VoidPtrTy), /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(&getMLIRContext())); } else - fnOp = cast(builtin); + fnOp = cast(builtin); return builder.createCallOp( loc, fnOp, mlir::ValueRange{CurCoro.Data->CoroId.getResult(), coroframeAddr}); } -mlir::cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, - mlir::Value nullPtr) { +cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, + mlir::Value nullPtr) { auto boolTy = builder.getBoolTy(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroEnd); - mlir::cir::FuncOp fnOp; + cir::FuncOp fnOp; if (!builtin) { - fnOp = CGM.createCIRFunction( - loc, CGM.builtinCoroEnd, - mlir::cir::FuncType::get({VoidPtrTy, boolTy}, boolTy), - /*FD=*/nullptr); + fnOp = + CGM.createCIRFunction(loc, CGM.builtinCoroEnd, + cir::FuncType::get({VoidPtrTy, boolTy}, boolTy), + /*FD=*/nullptr); assert(fnOp && "should always succeed"); fnOp.setBuiltinAttr(mlir::UnitAttr::get(&getMLIRContext())); } else - fnOp = cast(builtin); + fnOp = cast(builtin); return builder.createCallOp( loc, fnOp, mlir::ValueRange{nullPtr, builder.getBool(false, loc)}); @@ -252,7 +250,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { auto openCurlyLoc = getLoc(S.getBeginLoc()); auto nullPtrCst = builder.getNullPtr(VoidPtrTy, openCurlyLoc); - auto Fn = dyn_cast(CurFn); + auto Fn = dyn_cast(CurFn); assert(Fn && "other callables NYI"); Fn.setCoroutineAttr(mlir::UnitAttr::get(&getMLIRContext())); auto coroId = buildCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); @@ -272,20 +270,20 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { auto storeAddr = coroFrame.getPointer(); builder.CIRBaseBuilderTy::createStore(openCurlyLoc, nullPtrCst, storeAddr); - builder.create(openCurlyLoc, coroAlloc.getResult(), - /*withElseRegion=*/false, - /*thenBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - builder.CIRBaseBuilderTy::createStore( - loc, buildScalarExpr(S.getAllocate()), - storeAddr); - builder.create(loc); - }); + builder.create(openCurlyLoc, coroAlloc.getResult(), + /*withElseRegion=*/false, + /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + builder.CIRBaseBuilderTy::createStore( + loc, buildScalarExpr(S.getAllocate()), + storeAddr); + builder.create(loc); + }); CurCoro.Data->CoroBegin = buildCoroBeginBuiltinCall( openCurlyLoc, - builder.create(openCurlyLoc, allocaTy, storeAddr)) + builder.create(openCurlyLoc, allocaTy, storeAddr)) .getResult(); // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided. @@ -339,11 +337,11 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { } // FIXME(cir): EHStack.pushCleanup(EHCleanup); - CurCoro.Data->CurrentAwaitKind = mlir::cir::AwaitKind::init; + CurCoro.Data->CurrentAwaitKind = cir::AwaitKind::init; if (buildStmt(S.getInitSuspendStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - CurCoro.Data->CurrentAwaitKind = mlir::cir::AwaitKind::user; + CurCoro.Data->CurrentAwaitKind = cir::AwaitKind::user; // FIXME(cir): wrap buildBodyAndFallthrough with try/catch bits. if (S.getExceptionHandler()) @@ -362,7 +360,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { const bool CanFallthrough = currLexScope->hasCoreturn(); const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0; if (CanFallthrough || HasCoreturns) { - CurCoro.Data->CurrentAwaitKind = mlir::cir::AwaitKind::final; + CurCoro.Data->CurrentAwaitKind = cir::AwaitKind::final; { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPoint(CurCoro.Data->FinalSuspendInsPoint); @@ -409,7 +407,7 @@ struct LValueOrRValue { } // namespace static LValueOrRValue buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, - CoroutineSuspendExpr const &S, mlir::cir::AwaitKind Kind, + CoroutineSuspendExpr const &S, cir::AwaitKind Kind, AggValueSlot aggSlot, bool ignoreResult, mlir::Block *scopeParentBlock, mlir::Value &tmpResumeRValAddr, bool forLValue) { @@ -423,7 +421,7 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, auto UnbindOnExit = llvm::make_scope_exit([&] { Binder.unbind(CGF); }); auto &builder = CGF.getBuilder(); - [[maybe_unused]] auto awaitOp = builder.create( + [[maybe_unused]] auto awaitOp = builder.create( CGF.getLoc(S.getSourceRange()), Kind, /*readyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -447,7 +445,7 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, } // Signals the parent that execution flows to next region. - builder.create(loc); + builder.create(loc); }, /*resumeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -455,7 +453,7 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, // function is marked as 'noexcept', we avoid generating this additional // IR. CXXTryStmt *TryStmt = nullptr; - if (Coro.ExceptionHandler && Kind == mlir::cir::AwaitKind::init && + if (Coro.ExceptionHandler && Kind == cir::AwaitKind::init && memberCallExpressionCanThrow(S.getResumeExpr())) { llvm_unreachable("NYI"); } @@ -485,7 +483,7 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, } // Returns control back to parent. - builder.create(loc); + builder.create(loc); }); assert(awaitBuild.succeeded() && "Should know how to codegen"); @@ -494,7 +492,7 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, static RValue buildSuspendExpr(CIRGenFunction &CGF, const CoroutineSuspendExpr &E, - mlir::cir::AwaitKind kind, AggValueSlot aggSlot, + cir::AwaitKind kind, AggValueSlot aggSlot, bool ignoreResult) { RValue rval; auto scopeLoc = CGF.getLoc(E.getSourceRange()); @@ -519,7 +517,7 @@ static RValue buildSuspendExpr(CIRGenFunction &CGF, return rval; if (rval.isScalar()) { - rval = RValue::get(CGF.getBuilder().create( + rval = RValue::get(CGF.getBuilder().create( scopeLoc, rval.getScalarVal().getType(), tmpResumeRValAddr)); } else if (rval.isAggregate()) { // This is probably already handled via AggSlot, remove this assertion @@ -541,7 +539,7 @@ RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, RValue CIRGenFunction::buildCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot, bool ignoreResult) { - return buildSuspendExpr(*this, E, mlir::cir::AwaitKind::yield, aggSlot, + return buildSuspendExpr(*this, E, cir::AwaitKind::yield, aggSlot, ignoreResult); } @@ -564,8 +562,7 @@ mlir::LogicalResult CIRGenFunction::buildCoreturnStmt(CoreturnStmt const &S) { // scope cleanup handling. auto loc = getLoc(S.getSourceRange()); auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); - CurCoro.Data->FinalSuspendInsPoint = - builder.create(loc, retBlock); + CurCoro.Data->FinalSuspendInsPoint = builder.create(loc, retBlock); // Insert the new block to continue codegen after branch to ret block, // this will likely be an empty block. diff --git a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h index dd5700668c59..e0a27a2c2f82 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h +++ b/clang/lib/CIR/CodeGen/CIRGenCstEmitter.h @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// A helper class for emitting expressions and values as mlir::cir::ConstantOp +// A helper class for emitting expressions and values as cir::ConstantOp // and as initializers for global variables. // // Note: this is based on LLVM's codegen in ConstantEmitter.h, reusing this @@ -79,7 +79,7 @@ class ConstantEmitter { mlir::Attribute emitForInitializer(const APValue &value, LangAS destAddrSpace, QualType destType); - void finalize(mlir::cir::GlobalOp global); + void finalize(cir::GlobalOp global); // All of the "abstract" emission methods below permit the emission to // be immediately discarded without finalizing anything. Therefore, they diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index d68dceabbf22..5ed32d800bbb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -92,7 +92,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, (!NRVO && !D.isEscapingByref() && CGM.isTypeConstant(Ty, /*ExcludeCtor=*/true, /*ExcludeDtor=*/false))) { - buildStaticVarDecl(D, mlir::cir::GlobalLinkageKind::InternalLinkage); + buildStaticVarDecl(D, cir::GlobalLinkageKind::InternalLinkage); // Signal this condition to later callbacks. emission.Addr = Address::invalid(); @@ -314,7 +314,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { // frequently return an empty Attribute, to signal we want to codegen // some trivial ctor calls and whatnots. constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D); - if (constant && !mlir::isa(constant) && + if (constant && !mlir::isa(constant) && (trivialAutoVarInit != LangOptions::TrivialAutoVarInitKind::Uninitialized)) { llvm_unreachable("NYI"); @@ -333,7 +333,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { // out of it while trying to build the expression, mark it as such. auto addr = lv.getAddress().getPointer(); assert(addr && "Should have an address"); - auto allocaOp = dyn_cast_or_null(addr.getDefiningOp()); + auto allocaOp = dyn_cast_or_null(addr.getDefiningOp()); assert(allocaOp && "Address should come straight out of the alloca"); if (!allocaOp.use_empty()) @@ -452,14 +452,14 @@ static std::string getStaticDeclName(CIRGenModule &CGM, const VarDecl &D) { // TODO(cir): LLVM uses a Constant base class. Maybe CIR could leverage an // interface for all constants? -mlir::cir::GlobalOp +cir::GlobalOp CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, - mlir::cir::GlobalLinkageKind Linkage) { + cir::GlobalLinkageKind Linkage) { // In general, we don't always emit static var decls once before we reference // them. It is possible to reference them before emitting the function that // contains them, and it is possible to emit the containing function multiple // times. - if (mlir::cir::GlobalOp ExistingGV = StaticLocalDeclMap[&D]) + if (cir::GlobalOp ExistingGV = StaticLocalDeclMap[&D]) return ExistingGV; QualType Ty = D.getType(); @@ -473,7 +473,7 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, Name = getStaticDeclName(*this, D); mlir::Type LTy = getTypes().convertTypeForMem(Ty); - mlir::cir::AddressSpaceAttr AS = + cir::AddressSpaceAttr AS = builder.getAddrSpaceAttr(getGlobalVarAddressSpace(&D)); // OpenCL variables in local address space and CUDA shared @@ -484,7 +484,7 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, else if (Ty.getAddressSpace() != LangAS::opencl_local) Init = builder.getZeroInitAttr(getTypes().ConvertType(Ty)); - mlir::cir::GlobalOp GV = builder.createVersionedGlobal( + cir::GlobalOp GV = builder.createVersionedGlobal( getModule(), getLoc(D.getLocation()), Name, LTy, false, Linkage, AS); // TODO(cir): infer visibility from linkage in global op builder. GV.setVisibility(getMLIRVisibilityFromCIRLinkage(Linkage)); @@ -539,8 +539,8 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, /// Add the initializer for 'D' to the global variable that has already been /// created for it. If the initializer has a different type than GV does, this /// may free GV and return a different one. Otherwise it just returns GV. -mlir::cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( - const VarDecl &D, mlir::cir::GlobalOp GV, mlir::cir::GetGlobalOp GVAddr) { +cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( + const VarDecl &D, cir::GlobalOp GV, cir::GetGlobalOp GVAddr) { ConstantEmitter emitter(*this); mlir::TypedAttr Init = mlir::dyn_cast(emitter.tryEmitForInitializer(D)); @@ -575,7 +575,7 @@ mlir::cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( // because some types, like unions, can't be completely represented // in the LLVM type system.) if (GV.getSymType() != Init.getType()) { - mlir::cir::GlobalOp OldGV = GV; + cir::GlobalOp OldGV = GV; GV = builder.createGlobal(CGM.getModule(), getLoc(D.getSourceRange()), OldGV.getName(), Init.getType(), OldGV.getConstant(), GV.getLinkage()); @@ -595,7 +595,7 @@ mlir::cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( // Given those constraints, thread in the GetGlobalOp and update it // directly. GVAddr.getAddr().setType( - mlir::cir::PointerType::get(&getMLIRContext(), Init.getType())); + cir::PointerType::get(&getMLIRContext(), Init.getType())); OldGV->erase(); } @@ -619,7 +619,7 @@ mlir::cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( } void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, - mlir::cir::GlobalLinkageKind Linkage) { + cir::GlobalLinkageKind Linkage) { // Check to see if we already have a global variable for this // declaration. This can happen when double-emitting function // bodies, e.g. with complete and base constructors. @@ -627,7 +627,7 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, // TODO(cir): we should have a way to represent global ops as values without // having to emit a get global op. Sometimes these emissions are not used. auto addr = getBuilder().createGetGlobal(globalOp); - auto getAddrOp = mlir::cast(addr.getDefiningOp()); + auto getAddrOp = mlir::cast(addr.getDefiningOp()); CharUnits alignment = getContext().getDeclAlign(&D); @@ -1112,7 +1112,7 @@ void CIRGenFunction::buildArrayDestroy(mlir::Value begin, mlir::Value end, auto ptrToElmType = builder.getPointerTo(cirElementType); // Emit the dtor call that will execute for every array element. - builder.create( + builder.create( *currSrcLoc, begin, [&](mlir::OpBuilder &b, mlir::Location loc) { auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc); Address curAddr = Address(arg, ptrToElmType, elementAlign); @@ -1127,7 +1127,7 @@ void CIRGenFunction::buildArrayDestroy(mlir::Value begin, mlir::Value end, if (useEHCleanup) PopCleanupBlock(); - builder.create(loc); + builder.create(loc); }); } @@ -1157,10 +1157,9 @@ void CIRGenFunction::emitDestroy(Address addr, QualType type, bool checkZeroLength = true; // But if the array length is constant, we can suppress that. - auto constantCount = dyn_cast(length.getDefiningOp()); + auto constantCount = dyn_cast(length.getDefiningOp()); if (constantCount) { - auto constIntAttr = - mlir::dyn_cast(constantCount.getValue()); + auto constIntAttr = mlir::dyn_cast(constantCount.getValue()); // ...and if it's constant zero, we can just skip the entire thing. if (constIntAttr && constIntAttr.getUInt() == 0) return; @@ -1274,7 +1273,7 @@ void CIRGenFunction::pushDestroyAndDeferDeactivation( CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray) { mlir::Operation *flag = - builder.create(builder.getUnknownLoc()); + builder.create(builder.getUnknownLoc()); pushDestroy(cleanupKind, addr, type, destroyer, useEHCleanupForArray); DeferredDeactivationCleanupStack.push_back({EHStack.stable_begin(), flag}); } diff --git a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp index 8e89095aecff..007a5a3b2932 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp @@ -18,7 +18,7 @@ using namespace clang; using namespace clang::CIRGen; -using namespace mlir::cir; +using namespace cir; void CIRGenModule::buildCXXGlobalInitFunc() { while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) @@ -32,7 +32,7 @@ void CIRGenModule::buildCXXGlobalInitFunc() { } void CIRGenModule::buildCXXGlobalVarDeclInitFunc(const VarDecl *D, - mlir::cir::GlobalOp Addr, + cir::GlobalOp Addr, bool PerformInit) { // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, // __constant__ and __shared__ variables defined in namespace scope, diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index b7b6ba81b907..84a4176b36e9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -218,7 +218,7 @@ struct FreeException final : EHScopeStack::Cleanup { CIRGenBuilderTy &builder = CGF.getBuilder(); mlir::Location loc = CGF.currSrcLoc ? *CGF.currSrcLoc : builder.getUnknownLoc(); - builder.create( + builder.create( loc, builder.createBitcast(exn, builder.getVoidPtrTy())); } }; @@ -274,16 +274,16 @@ void CIRGenFunction::buildEHResumeBlock(bool isCleanup, // FIXME(cir): upon testcase // this should just add the // 'rethrow' attribute to - // mlir::cir::ResumeOp below. + // cir::ResumeOp below. llvm_unreachable("NYI"); } - getBuilder().create(loc, mlir::Value{}, mlir::Value{}); + getBuilder().create(loc, mlir::Value{}, mlir::Value{}); getBuilder().restoreInsertionPoint(ip); } mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup, - mlir::cir::TryOp tryOp) { + cir::TryOp tryOp) { if (ehResumeBlock) return ehResumeBlock; @@ -302,18 +302,19 @@ mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { mlir::OpBuilder::InsertPoint scopeIP; // Create a scope to hold try local storage for catch params. - [[maybe_unused]] auto s = builder.create( - loc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - scopeIP = getBuilder().saveInsertionPoint(); - }); + [[maybe_unused]] auto s = + builder.create(loc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + scopeIP = + getBuilder().saveInsertionPoint(); + }); auto r = mlir::success(); { mlir::OpBuilder::InsertionGuard guard(getBuilder()); getBuilder().restoreInsertionPoint(scopeIP); r = buildCXXTryStmtUnderScope(S); - getBuilder().create(loc); + getBuilder().create(loc); } return r; } @@ -345,7 +346,7 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { // don't populate right away. Reserve some space to store the exception // info but don't emit the bulk right away, for now only make sure the // scope returns the exception information. - auto tryOp = builder.create( + auto tryOp = builder.create( tryLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { beginInsertTryBody = getBuilder().saveInsertionPoint(); @@ -403,7 +404,7 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { /// It is an invariant that the dispatch block already exists. static void buildCatchDispatchBlock(CIRGenFunction &CGF, EHCatchScope &catchScope, - mlir::cir::TryOp tryOp) { + cir::TryOp tryOp) { if (EHPersonality::get(CGF).isWasmPersonality()) llvm_unreachable("NYI"); if (EHPersonality::get(CGF).usesFuncletPads()) @@ -459,8 +460,7 @@ static void buildCatchDispatchBlock(CIRGenFunction &CGF, } } -void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &S, - mlir::cir::TryOp tryOp, +void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &S, cir::TryOp tryOp, bool IsFnTryBlock) { unsigned NumHandlers = S.getNumHandlers(); EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers); @@ -501,7 +501,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { unsigned NumHandlers = S.getNumHandlers(); EHCatchScope &CatchScope = cast(*EHStack.begin()); assert(CatchScope.getNumHandlers() == NumHandlers); - mlir::cir::TryOp tryOp = currLexScope->getTry(); + cir::TryOp tryOp = currLexScope->getTry(); // If the catch was not required, bail out now. if (!CatchScope.hasEHBranches()) { @@ -620,7 +620,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { llvm_unreachable("Invalid EHScope Kind!"); } -mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { +mlir::Operation *CIRGenFunction::buildLandingPad(cir::TryOp tryOp) { assert(EHStack.requiresLandingPad()); assert(!CGM.getLangOpts().IgnoreExceptions && "LandingPad should not be emitted when -fignore-exceptions are in " @@ -708,7 +708,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { assert(!(hasCatchAll && hasFilter)); if (hasCatchAll) { // Attach the catch_all region. Can't coexist with an unwind one. - auto catchAll = mlir::cir::CatchAllAttr::get(&getMLIRContext()); + auto catchAll = cir::CatchAllAttr::get(&getMLIRContext()); clauses.push_back(catchAll); // If we have an EH filter, we need to add those handlers in the @@ -729,7 +729,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { // If there's no catch_all, attach the unwind region. This needs to be the // last region in the TryOp operation catch list. if (!hasCatchAll) { - auto catchUnwind = mlir::cir::CatchUnwindAttr::get(&getMLIRContext()); + auto catchUnwind = cir::CatchUnwindAttr::get(&getMLIRContext()); clauses.push_back(catchUnwind); } @@ -754,7 +754,7 @@ mlir::Operation *CIRGenFunction::buildLandingPad(mlir::cir::TryOp tryOp) { // getCachedEHDispatchBlock to infer state. mlir::Block * CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si, - mlir::cir::TryOp tryOp) { + cir::TryOp tryOp) { if (EHPersonality::get(*this).usesFuncletPads()) llvm_unreachable("NYI"); @@ -774,7 +774,7 @@ CIRGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si, // - Update the map to enqueue new dispatchBlock to also get a cleanup. See // code at the end of the function. mlir::Operation *parentOp = dispatchBlock->getParentOp(); - if (tryOp != parentOp->getParentOfType()) { + if (tryOp != parentOp->getParentOfType()) { originalBlock = dispatchBlock; dispatchBlock = nullptr; } @@ -864,7 +864,7 @@ bool CIRGenFunction::isInvokeDest() { return true; } -mlir::Operation *CIRGenFunction::getInvokeDestImpl(mlir::cir::TryOp tryOp) { +mlir::Operation *CIRGenFunction::getInvokeDestImpl(cir::TryOp tryOp) { assert(EHStack.requiresLandingPad()); assert(!EHStack.empty()); assert(isInvokeDest()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 30c8f5e33230..43cab7480c73 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -41,10 +41,9 @@ using namespace clang; using namespace clang::CIRGen; -using namespace mlir::cir; +using namespace cir; -static mlir::cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, - GlobalDecl GD) { +static cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); if (FD->hasAttr()) { @@ -75,7 +74,7 @@ static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, auto fieldType = CGF.convertType(field->getType()); auto fieldPtr = - mlir::cir::PointerType::get(CGF.getBuilder().getContext(), fieldType); + cir::PointerType::get(CGF.getBuilder().getContext(), fieldType); // For most cases fieldName is the same as field->getName() but for lambdas, // which do not currently carry the name, so it can be passed down from the // CaptureStmt. @@ -249,8 +248,7 @@ Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base, if (index == 0) return base.getAddress(); auto loc = getLoc(field->getLocation()); - auto fieldPtr = - mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); + auto fieldPtr = cir::PointerType::get(getBuilder().getContext(), fieldType); auto sea = getBuilder().createGetMember(loc, fieldPtr, base.getPointer(), field->getName(), index); return Address(sea, CharUnits::One()); @@ -489,7 +487,7 @@ static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { // When directing calling an inline builtin, call it through it's mangled // name to make it clear it's not the actual builtin. - auto Fn = cast(CGF.CurFn); + auto Fn = cast(CGF.CurFn); if (Fn.getName() != FDInlineName && onlyHasInlineBuiltinDeclaration(FD)) { assert(0 && "NYI"); } @@ -604,9 +602,9 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, Address addr, mlir::Type SrcTy = value.getType(); if (const auto *ClangVecTy = ty->getAs()) { - auto VecTy = dyn_cast(SrcTy); // TODO(CIR): this has fallen out of date with codegen llvm_unreachable("NYI: Special treatment of 3-element vector store"); + // auto VecTy = dyn_cast(SrcTy); // if (!CGM.getCodeGenOpts().PreserveVec3Type && // ClangVecTy->getNumElements() == 3) { // // Handle vec3 special. @@ -614,8 +612,7 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, Address addr, // // Our source is a vec3, do a shuffle vector to make it a vec4. // value = builder.createVecShuffle(value.getLoc(), value, // ArrayRef{0, 1, 2, -1}); - // SrcTy = mlir::cir::VectorType::get(VecTy.getContext(), - // VecTy.getEltType(), 4); + // SrcTy = cir::VectorType::get(VecTy.getContext(), VecTy.getEltType(), 4); // } // if (addr.getElementType() != SrcTy) { // addr = addr.withElementType(SrcTy); @@ -626,7 +623,7 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, Address addr, // Update the alloca with more info on initialization. assert(addr.getPointer() && "expected pointer to exist"); auto SrcAlloca = - dyn_cast_or_null(addr.getPointer().getDefiningOp()); + dyn_cast_or_null(addr.getPointer().getDefiningOp()); if (currVarDecl && SrcAlloca) { const VarDecl *VD = currVarDecl; assert(VD && "VarDecl expected"); @@ -671,8 +668,8 @@ RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { if (LV.isVectorElt()) { auto load = builder.createLoad(getLoc(Loc), LV.getVectorAddress()); - return RValue::get(builder.create( - getLoc(Loc), load, LV.getVectorIdx())); + return RValue::get(builder.create(getLoc(Loc), load, + LV.getVectorIdx())); } if (LV.isExtVectorElt()) { @@ -697,7 +694,7 @@ RValue CIRGenFunction::buildLoadOfExtVectorElementLValue(LValue LV) { // HLSL allows treating scalars as one-element vectors. Converting the scalar // IR value to a vector here allows the rest of codegen to behave as normal. - if (getLangOpts().HLSL && !mlir::isa(Vec.getType())) { + if (getLangOpts().HLSL && !mlir::isa(Vec.getType())) { llvm_unreachable("HLSL NYI"); } @@ -708,9 +705,9 @@ RValue CIRGenFunction::buildLoadOfExtVectorElementLValue(LValue LV) { const auto *ExprVT = LV.getType()->getAs(); if (!ExprVT) { int64_t InIdx = getAccessedFieldNo(0, Elts); - mlir::cir::ConstantOp Elt = + cir::ConstantOp Elt = builder.getConstInt(loc, builder.getSInt64Ty(), InIdx); - return RValue::get(builder.create(loc, Vec, Elt)); + return RValue::get(builder.create(loc, Vec, Elt)); } // Always use shuffle vector to try to retain the original program structure @@ -750,7 +747,7 @@ void CIRGenFunction::buildStoreThroughExtVectorComponentLValue(RValue Src, // To support this we need to handle the case where the destination address is // a scalar. Address DstAddr = Dst.getExtVectorAddress(); - if (!mlir::isa(DstAddr.getElementType())) { + if (!mlir::isa(DstAddr.getElementType())) { llvm_unreachable("HLSL NYI"); } @@ -764,7 +761,7 @@ void CIRGenFunction::buildStoreThroughExtVectorComponentLValue(RValue Src, if (const clang::VectorType *VTy = Dst.getType()->getAs()) { unsigned NumSrcElts = VTy->getNumElements(); - unsigned NumDstElts = cast(Vec.getType()).getSize(); + unsigned NumDstElts = cast(Vec.getType()).getSize(); if (NumDstElts == NumSrcElts) { // Use shuffle vector is the src and destination are the same number of // elements and restore the vector mask since it is on the side it will be @@ -809,7 +806,7 @@ void CIRGenFunction::buildStoreThroughExtVectorComponentLValue(RValue Src, unsigned InIdx = getAccessedFieldNo(0, Elts); auto Elt = builder.getSInt64(InIdx, loc); - Vec = builder.create(loc, Vec, SrcVal, Elt); + Vec = builder.create(loc, Vec, SrcVal, Elt); } builder.createStore(loc, Vec, Dst.getExtVectorAddress(), @@ -823,8 +820,8 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, // Read/modify/write the vector, inserting the new element mlir::Location loc = Dst.getVectorPointer().getLoc(); mlir::Value Vector = builder.createLoad(loc, Dst.getVectorAddress()); - Vector = builder.create( - loc, Vector, Src.getScalarVal(), Dst.getVectorIdx()); + Vector = builder.create(loc, Vector, Src.getScalarVal(), + Dst.getVectorIdx()); builder.createStore(loc, Vector, Dst.getVectorAddress()); return; } @@ -900,9 +897,9 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, auto V = CGF.CGM.getAddrOfGlobalVar(VD); auto RealVarTy = CGF.getTypes().convertTypeForMem(VD->getType()); - mlir::cir::PointerType realPtrTy = CGF.getBuilder().getPointerTo( - RealVarTy, cast_if_present( - cast(V.getType()).getAddrSpace())); + cir::PointerType realPtrTy = CGF.getBuilder().getPointerTo( + RealVarTy, cast_if_present( + cast(V.getType()).getAddrSpace())); if (realPtrTy != V.getType()) V = CGF.getBuilder().createBitcast(V.getLoc(), V, realPtrTy); @@ -938,17 +935,17 @@ static LValue buildFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, CharUnits align = CGF.getContext().getDeclAlign(FD); mlir::Type fnTy = funcOp.getFunctionType(); - auto ptrTy = mlir::cir::PointerType::get(CGF.getBuilder().getContext(), fnTy); - mlir::Value addr = CGF.getBuilder().create( + auto ptrTy = cir::PointerType::get(CGF.getBuilder().getContext(), fnTy); + mlir::Value addr = CGF.getBuilder().create( loc, ptrTy, funcOp.getSymName()); if (funcOp.getFunctionType() != CGF.CGM.getTypes().ConvertType(FD->getType())) { fnTy = CGF.CGM.getTypes().ConvertType(FD->getType()); - ptrTy = mlir::cir::PointerType::get(CGF.getBuilder().getContext(), fnTy); + ptrTy = cir::PointerType::get(CGF.getBuilder().getContext(), fnTy); - addr = CGF.getBuilder().create( - addr.getLoc(), ptrTy, mlir::cir::CastKind::bitcast, addr); + addr = CGF.getBuilder().create(addr.getLoc(), ptrTy, + cir::CastKind::bitcast, addr); } return CGF.makeAddrLValue(Address(addr, fnTy, align), E->getType(), @@ -1012,7 +1009,7 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { // Otherwise, it might be static local we haven't emitted yet for some // reason; most likely, because it's in an outer function. else if (VD->isStaticLocal()) { - mlir::cir::GlobalOp var = CGM.getOrCreateStaticVarDecl( + cir::GlobalOp var = CGM.getOrCreateStaticVarDecl( *VD, CGM.getCIRLinkageVarDefinition(VD, /*IsConstant=*/false)); addr = Address(builder.createGetGlobal(var), convertType(VD->getType()), getContext().getDeclAlign(VD)); @@ -1285,7 +1282,7 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { // Tag 'load' with deref attribute. if (auto loadOp = - dyn_cast<::mlir::cir::LoadOp>(Addr.getPointer().getDefiningOp())) { + dyn_cast(Addr.getPointer().getDefiningOp())) { loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext())); } @@ -1302,7 +1299,7 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { // __real is valid on scalars. This is a faster way of testing that. // __imag can only produce an rvalue on scalars. if (E->getOpcode() == UO_Real && - !mlir::isa(LV.getAddress().getElementType())) { + !mlir::isa(LV.getAddress().getElementType())) { assert(E->getSubExpr()->getType()->isArithmeticType()); return LV; } @@ -1495,17 +1492,16 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, assert(!cir::MissingFeatures::addressSpace()); auto CalleeTy = getTypes().GetFunctionType(FnInfo); // get non-variadic function type - CalleeTy = mlir::cir::FuncType::get(CalleeTy.getInputs(), - CalleeTy.getReturnType(), false); - auto CalleePtrTy = mlir::cir::PointerType::get(&getMLIRContext(), CalleeTy); + CalleeTy = cir::FuncType::get(CalleeTy.getInputs(), + CalleeTy.getReturnType(), false); + auto CalleePtrTy = cir::PointerType::get(&getMLIRContext(), CalleeTy); auto *Fn = Callee.getFunctionPointer(); mlir::Value Addr; - if (auto funcOp = llvm::dyn_cast(Fn)) { - Addr = builder.create( + if (auto funcOp = llvm::dyn_cast(Fn)) { + Addr = builder.create( getLoc(E->getSourceRange()), - mlir::cir::PointerType::get(&getMLIRContext(), - funcOp.getFunctionType()), + cir::PointerType::get(&getMLIRContext(), funcOp.getFunctionType()), funcOp.getSymName()); } else { Addr = Fn->getResult(0); @@ -1518,7 +1514,7 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, assert(!CGM.getLangOpts().HIP && "HIP NYI"); assert(!MustTailCall && "Must tail NYI"); - mlir::cir::CIRCallOpInterface callOP; + cir::CIRCallOpInterface callOP; RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, &callOP, E == MustTailCall, getLoc(E->getExprLoc()), E); @@ -1548,18 +1544,17 @@ Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, // If the array type was an incomplete type, we need to make sure // the decay ends up being the right type. auto lvalueAddrTy = - mlir::dyn_cast(Addr.getPointer().getType()); + mlir::dyn_cast(Addr.getPointer().getType()); assert(lvalueAddrTy && "expected pointer"); if (E->getType()->isVariableArrayType()) return Addr; - auto pointeeTy = - mlir::dyn_cast(lvalueAddrTy.getPointee()); + auto pointeeTy = mlir::dyn_cast(lvalueAddrTy.getPointee()); assert(pointeeTy && "expected array"); mlir::Type arrayTy = convertType(E->getType()); - assert(mlir::isa(arrayTy) && "expected array"); + assert(mlir::isa(arrayTy) && "expected array"); assert(pointeeTy == arrayTy); // The result of this decay conversion points to an array element within the @@ -1635,7 +1630,7 @@ static bool isPreserveAIArrayBase(CIRGenFunction &CGF, const Expr *ArrayBase) { static mlir::IntegerAttr getConstantIndexOrNull(mlir::Value idx) { // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr? - if (auto constantOp = dyn_cast(idx.getDefiningOp())) + if (auto constantOp = dyn_cast(idx.getDefiningOp())) return mlir::dyn_cast(constantOp.getValue()); return {}; } @@ -1740,8 +1735,8 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, llvm_unreachable("array bounds sanitizer is NYI"); // Extend or truncate the index type to 32 or 64-bits. - auto ptrTy = mlir::dyn_cast(Idx.getType()); - if (Promote && ptrTy && mlir::isa(ptrTy.getPointee())) + auto ptrTy = mlir::dyn_cast(Idx.getType()); + if (Promote && ptrTy && mlir::isa(ptrTy.getPointee())) llvm_unreachable("index type cast is NYI"); return Idx; @@ -1779,8 +1774,8 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // The element count here is the total number of non-VLA elements. mlir::Value numElements = getVLASize(vla).NumElts; - Idx = builder.createCast(mlir::cir::CastKind::integral, Idx, - numElements.getType()); + Idx = + builder.createCast(cir::CastKind::integral, Idx, numElements.getType()); Idx = builder.createMul(Idx, numElements); QualType ptrType = E->getBase()->getType(); @@ -1848,15 +1843,15 @@ LValue CIRGenFunction::buildStringLiteralLValue(const StringLiteral *E) { auto cstGlobal = mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), sym); assert(cstGlobal && "Expected global"); - auto g = dyn_cast(cstGlobal); + auto g = dyn_cast(cstGlobal); assert(g && "unaware of other symbol providers"); - auto ptrTy = mlir::cir::PointerType::get(CGM.getBuilder().getContext(), - g.getSymType()); + auto ptrTy = + cir::PointerType::get(CGM.getBuilder().getContext(), g.getSymType()); assert(g.getAlignment() && "expected alignment for string literal"); auto align = *g.getAlignment(); - auto addr = builder.create( - getLoc(E->getSourceRange()), ptrTy, g.getSymName()); + auto addr = builder.create(getLoc(E->getSourceRange()), + ptrTy, g.getSymName()); return makeAddrLValue( Address(addr, g.getSymType(), CharUnits::fromQuantity(align)), E->getType(), AlignmentSource::Decl); @@ -2180,11 +2175,11 @@ static Address createReferenceTemporary(CIRGenFunction &CGF, // The temporary memory should be created in the same scope as the extending // declaration of the temporary materialization expression. - mlir::cir::AllocaOp extDeclAlloca; + cir::AllocaOp extDeclAlloca; if (const clang::ValueDecl *extDecl = M->getExtendingDecl()) { auto extDeclAddrIter = CGF.LocalDeclMap.find(extDecl); if (extDeclAddrIter != CGF.LocalDeclMap.end()) { - extDeclAlloca = dyn_cast_if_present( + extDeclAlloca = dyn_cast_if_present( extDeclAddrIter->second.getDefiningOp()); } } @@ -2196,8 +2191,8 @@ static Address createReferenceTemporary(CIRGenFunction &CGF, } case SD_Thread: case SD_Static: { - auto a = mlir::cast( - CGF.CGM.getAddrOfGlobalTemporary(M, Inner)); + auto a = + mlir::cast(CGF.CGM.getAddrOfGlobalTemporary(M, Inner)); auto f = CGF.CGM.getBuilder().createGetGlobal(a); assert(a.getAlignment().has_value() && "This should always have an alignment"); @@ -2238,7 +2233,7 @@ static void pushTemporaryCleanup(CIRGenFunction &CGF, switch (M->getStorageDuration()) { case SD_Static: case SD_Thread: { - mlir::cir::FuncOp cleanupFn; + cir::FuncOp cleanupFn; mlir::Value cleanupArg; if (E->getType()->isArrayType()) { llvm_unreachable("SD_Static|SD_Thread + array types not implemented"); @@ -2299,8 +2294,7 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( Address Alloca = Address::invalid(); Address Object = createReferenceTemporary(*this, M, E, &Alloca); - if (auto Var = - dyn_cast(Object.getPointer().getDefiningOp())) { + if (auto Var = dyn_cast(Object.getPointer().getDefiningOp())) { // TODO(cir): add something akin to stripPointerCasts() to ptr above assert(0 && "NYI"); } else { @@ -2440,17 +2434,17 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, builder.restoreInsertionPoint(toInsert); // Block does not return: build empty yield. - if (mlir::isa(yieldTy)) { - builder.create(loc); + if (mlir::isa(yieldTy)) { + builder.create(loc); } else { // Block returns: set null yield value. mlir::Value op0 = builder.getNullValue(yieldTy, loc); - builder.create(loc, op0); + builder.create(loc, op0); } } }; Info.Result = builder - .create( + .create( loc, condV, /*trueBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { CIRGenFunction::LexicalScope lexScope{ @@ -2466,7 +2460,7 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, if (lhs) { yieldTy = lhs.getType(); - b.create(loc, lhs); + b.create(loc, lhs); return; } // If LHS or RHS is a throw or void expression we need @@ -2488,7 +2482,7 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, if (rhs) { yieldTy = rhs.getType(); - b.create(loc, rhs); + b.create(loc, rhs); } else { // If LHS or RHS is a throw or void expression we // need to patch arms as to properly match yield @@ -2578,7 +2572,7 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { LValue LV; auto scopeLoc = getLoc(E->getSourceRange()); - [[maybe_unused]] auto scope = builder.create( + [[maybe_unused]] auto scope = builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { CIRGenFunction::LexicalScope lexScope{*this, loc, @@ -2707,7 +2701,7 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, /// Emit an `if` on a boolean condition, filling `then` and `else` into /// appropriated regions. -mlir::cir::IfOp CIRGenFunction::buildIfOnBoolExpr( +cir::IfOp CIRGenFunction::buildIfOnBoolExpr( const clang::Expr *cond, llvm::function_ref thenBuilder, mlir::Location thenLoc, @@ -2721,9 +2715,9 @@ mlir::cir::IfOp CIRGenFunction::buildIfOnBoolExpr( // Emit the code with the fully general case. mlir::Value condV = buildOpOnBoolExpr(loc, cond); - return builder.create(loc, condV, elseLoc.has_value(), - /*thenBuilder=*/thenBuilder, - /*elseBuilder=*/elseBuilder); + return builder.create(loc, condV, elseLoc.has_value(), + /*thenBuilder=*/thenBuilder, + /*elseBuilder=*/elseBuilder); } /// TODO(cir): PGO data @@ -2753,16 +2747,16 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, auto ternaryOpRes = builder - .create( + .create( loc, condV, /*thenBuilder=*/ [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) { auto lhs = buildScalarExpr(trueExpr); - b.create(loc, lhs); + b.create(loc, lhs); }, /*elseBuilder=*/ [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) { auto rhs = buildScalarExpr(falseExpr); - b.create(loc, rhs); + b.create(loc, rhs); }) .getResult(); @@ -2798,9 +2792,9 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, // a surrounding cir.scope, make sure the alloca ends up in the surrounding // scope instead. This is necessary in order to guarantee all SSA values are // reachable during cleanups. - if (auto tryOp = llvm::dyn_cast_if_present( - entryBlock->getParentOp())) { - if (auto scopeOp = llvm::dyn_cast(tryOp->getParentOp())) + if (auto tryOp = + llvm::dyn_cast_if_present(entryBlock->getParentOp())) { + if (auto scopeOp = llvm::dyn_cast(tryOp->getParentOp())) entryBlock = &scopeOp.getRegion().front(); } @@ -2825,7 +2819,7 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy, /*var type*/ ty, name, alignIntAttr, arraySize); if (currVarDecl) { - auto alloca = cast(addr.getDefiningOp()); + auto alloca = cast(addr.getDefiningOp()); alloca.setAstAttr(ASTVarDeclAttr::get(&getMLIRContext(), currVarDecl)); } } @@ -2889,15 +2883,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, if (const auto *ClangVecTy = ty->getAs()) { // Handle vectors of size 3 like size 4 for better performance. - const auto VTy = cast(ElemTy); + const auto VTy = cast(ElemTy); // TODO(CIR): this has fallen out of sync with codegen llvm_unreachable("NYI: Special treatment of 3-element vector store"); // if (!CGM.getCodeGenOpts().PreserveVec3Type && // ClangVecTy->getNumElements() == 3) { // auto loc = addr.getPointer().getLoc(); - // auto vec4Ty = - // mlir::cir::VectorType::get(VTy.getContext(), VTy.getEltType(), 4); + // auto vec4Ty = cir::VectorType::get(VTy.getContext(), VTy.getEltType(), 4); // Address Cast = addr.withElementType(vec4Ty); // // Now load value. // mlir::Value V = builder.createLoad(loc, Cast); @@ -2909,11 +2902,11 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, } auto Ptr = addr.getPointer(); - if (mlir::isa(ElemTy)) { - ElemTy = mlir::cir::IntType::get(&getMLIRContext(), 8, true); - auto ElemPtrTy = mlir::cir::PointerType::get(&getMLIRContext(), ElemTy); - Ptr = builder.create(loc, ElemPtrTy, - mlir::cir::CastKind::bitcast, Ptr); + if (mlir::isa(ElemTy)) { + ElemTy = cir::IntType::get(&getMLIRContext(), 8, true); + auto ElemPtrTy = cir::PointerType::get(&getMLIRContext(), ElemTy); + Ptr = builder.create(loc, ElemPtrTy, cir::CastKind::bitcast, + Ptr); } mlir::Value Load = builder.CIRBaseBuilderTy::createLoad(loc, Ptr, isVolatile); @@ -2971,9 +2964,9 @@ Address CIRGenFunction::buildLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo, TBAAAccessInfo *pointeeTBAAInfo) { assert(!refLVal.isVolatile() && "NYI"); - mlir::cir::LoadOp load = builder.create( - loc, refLVal.getAddress().getElementType(), - refLVal.getAddress().getPointer()); + cir::LoadOp load = + builder.create(loc, refLVal.getAddress().getElementType(), + refLVal.getAddress().getPointer()); // TODO(cir): DecorateInstructionWithTBAA relevant for us? assert(!cir::MissingFeatures::tbaa()); @@ -2996,7 +2989,7 @@ LValue CIRGenFunction::buildLoadOfReferenceLValue(LValue RefLVal, void CIRGenFunction::buildUnreachable(SourceLocation Loc) { if (SanOpts.has(SanitizerKind::Unreachable)) llvm_unreachable("NYI"); - builder.create(getLoc(Loc)); + builder.create(getLoc(Loc)); } //===----------------------------------------------------------------------===// @@ -3062,29 +3055,31 @@ Address CIRGenFunction::CreateTempAlloca(mlir::Type Ty, CharUnits Align, /// This creates an alloca and inserts it into the entry block if \p ArraySize /// is nullptr, otherwise inserts it at the current insertion point of the /// builder. -mlir::cir::AllocaOp -CIRGenFunction::CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, - const Twine &Name, mlir::Value ArraySize, - bool insertIntoFnEntryBlock) { - return cast(buildAlloca(Name.str(), Ty, Loc, CharUnits(), - insertIntoFnEntryBlock, - ArraySize) - .getDefiningOp()); +cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, + mlir::Location Loc, + const Twine &Name, + mlir::Value ArraySize, + bool insertIntoFnEntryBlock) { + return cast(buildAlloca(Name.str(), Ty, Loc, CharUnits(), + insertIntoFnEntryBlock, ArraySize) + .getDefiningOp()); } /// This creates an alloca and inserts it into the provided insertion point -mlir::cir::AllocaOp CIRGenFunction::CreateTempAlloca( - mlir::Type Ty, mlir::Location Loc, const Twine &Name, - mlir::OpBuilder::InsertPoint ip, mlir::Value ArraySize) { +cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, + mlir::Location Loc, + const Twine &Name, + mlir::OpBuilder::InsertPoint ip, + mlir::Value ArraySize) { assert(ip.isSet() && "Insertion point is not set"); - return cast( + return cast( buildAlloca(Name.str(), Ty, Loc, CharUnits(), ip, ArraySize) .getDefiningOp()); } /// Just like CreateTempAlloca above, but place the alloca into the function /// entry basic block instead. -mlir::cir::AllocaOp CIRGenFunction::CreateTempAllocaInFnEntryBlock( +cir::AllocaOp CIRGenFunction::CreateTempAllocaInFnEntryBlock( mlir::Type Ty, mlir::Location Loc, const Twine &Name, mlir::Value ArraySize) { return CreateTempAlloca(Ty, Loc, Name, ArraySize, @@ -3252,7 +3247,7 @@ mlir::Value CIRGenFunction::buildScalarConstant( LValue CIRGenFunction::buildPredefinedLValue(const PredefinedExpr *E) { const auto *SL = E->getFunctionName(); assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); - auto Fn = dyn_cast(CurFn); + auto Fn = dyn_cast(CurFn); assert(Fn && "other callables NYI"); StringRef FnName = Fn.getName(); if (FnName.starts_with("\01")) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index d8bd131c278d..2218838ac7d6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -149,9 +149,9 @@ class AggExprEmitter : public StmtVisitor { void buildCopy(QualType type, const AggValueSlot &dest, const AggValueSlot &src); - void buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, - QualType ArrayQTy, Expr *ExprToVisit, - ArrayRef Args, Expr *ArrayFiller); + void buildArrayInit(Address DestPtr, cir::ArrayType AType, QualType ArrayQTy, + Expr *ExprToVisit, ArrayRef Args, + Expr *ArrayFiller); AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) @@ -470,7 +470,7 @@ static bool isTrivialFiller(Expr *E) { return false; } -void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, +void AggExprEmitter::buildArrayInit(Address DestPtr, cir::ArrayType AType, QualType ArrayQTy, Expr *ExprToVisit, ArrayRef Args, Expr *ArrayFiller) { uint64_t NumInitElements = Args.size(); @@ -483,15 +483,15 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, QualType elementPtrType = CGF.getContext().getPointerType(elementType); auto cirElementType = CGF.convertType(elementType); - auto cirAddrSpace = mlir::cast_if_present( + auto cirAddrSpace = mlir::cast_if_present( DestPtr.getType().getAddrSpace()); auto cirElementPtrType = CGF.getBuilder().getPointerTo(cirElementType, cirAddrSpace); auto loc = CGF.getLoc(ExprToVisit->getSourceRange()); // Cast from cir.ptr to cir.ptr - auto begin = CGF.getBuilder().create( - loc, cirElementPtrType, mlir::cir::CastKind::array_to_ptrdecay, + auto begin = CGF.getBuilder().create( + loc, cirElementPtrType, cir::CastKind::array_to_ptrdecay, DestPtr.getPointer()); CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); @@ -524,11 +524,11 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, for (uint64_t i = 0; i != NumInitElements; ++i) { if (i == 1) one = CGF.getBuilder().getConstInt( - loc, mlir::cast(CGF.PtrDiffTy), 1); + loc, mlir::cast(CGF.PtrDiffTy), 1); // Advance to the next element. if (i > 0) { - element = CGF.getBuilder().create( + element = CGF.getBuilder().create( loc, cirElementPtrType, element, one); // Tell the cleanup that it needs to destroy up to this @@ -559,10 +559,10 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, // Advance to the start of the rest of the array. if (NumInitElements) { - auto one = builder.getConstInt( - loc, mlir::cast(CGF.PtrDiffTy), 1); - element = builder.create(loc, cirElementPtrType, - element, one); + auto one = + builder.getConstInt(loc, mlir::cast(CGF.PtrDiffTy), 1); + element = builder.create(loc, cirElementPtrType, + element, one); assert(!endOfInit.isValid() && "destructed types NIY"); } @@ -576,8 +576,8 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, // Compute the end of array auto numArrayElementsConst = builder.getConstInt( - loc, mlir::cast(CGF.PtrDiffTy), NumArrayElements); - mlir::Value end = builder.create( + loc, mlir::cast(CGF.PtrDiffTy), NumArrayElements); + mlir::Value end = builder.create( loc, cirElementPtrType, begin, numArrayElementsConst); builder.createDoWhile( @@ -586,8 +586,8 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, [&](mlir::OpBuilder &b, mlir::Location loc) { auto currentElement = builder.createLoad(loc, tmpAddr); mlir::Type boolTy = CGF.getCIRType(CGF.getContext().BoolTy); - auto cmp = builder.create( - loc, boolTy, mlir::cir::CmpOpKind::ne, currentElement, end); + auto cmp = builder.create(loc, boolTy, cir::CmpOpKind::ne, + currentElement, end); builder.createCondition(cmp); }, /*bodyBuilder=*/ @@ -611,8 +611,8 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, mlir::cir::ArrayType AType, // Advance pointer and store them to temporary variable auto one = builder.getConstInt( - loc, mlir::cast(CGF.PtrDiffTy), 1); - auto nextElement = builder.create( + loc, mlir::cast(CGF.PtrDiffTy), 1); + auto nextElement = builder.create( loc, cirElementPtrType, currentElement, one); CGF.buildStoreThroughLValue(RValue::get(nextElement), tmpLV); @@ -893,11 +893,10 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { auto &builder = CGF.getBuilder(); auto scopeLoc = CGF.getLoc(E->getSourceRange()); mlir::OpBuilder::InsertPoint scopeBegin; - builder.create( - scopeLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - scopeBegin = b.saveInsertionPoint(); - }); + builder.create(scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + scopeBegin = b.saveInsertionPoint(); + }); { mlir::OpBuilder::InsertionGuard guard(builder); @@ -1249,7 +1248,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // Handle initialization of an array. if (ExprToVisit->getType()->isConstantArrayType()) { - auto AType = cast(Dest.getAddress().getElementType()); + auto AType = cast(Dest.getAddress().getElementType()); buildArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit, InitExprs, ArrayFiller); return; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 6d5a20cbd4ea..136480f9e277 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -560,7 +560,7 @@ static mlir::Value buildCXXNewAllocSize(CIRGenFunction &CGF, CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); sizeWithoutCookie = CGF.getBuilder().getConstant( CGF.getLoc(e->getSourceRange()), - mlir::cir::IntAttr::get(CGF.SizeTy, typeSize.getQuantity())); + cir::IntAttr::get(CGF.SizeTy, typeSize.getQuantity())); return sizeWithoutCookie; } @@ -1034,14 +1034,14 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { if (nullCheck) { mlir::Value nullPtr = builder.getNullPtr(allocation.getPointer().getType(), loc); - nullCmpResult = builder.createCompare(loc, mlir::cir::CmpOpKind::ne, + nullCmpResult = builder.createCompare(loc, cir::CmpOpKind::ne, allocation.getPointer(), nullPtr); preIfBody = builder.saveInsertionPoint(); - builder.create(loc, nullCmpResult, - /*withElseRegion=*/false, - [&](mlir::OpBuilder &, mlir::Location) { - ifBody = builder.saveInsertionPoint(); - }); + builder.create(loc, nullCmpResult, + /*withElseRegion=*/false, + [&](mlir::OpBuilder &, mlir::Location) { + ifBody = builder.saveInsertionPoint(); + }); postIfBody = builder.saveInsertionPoint(); } @@ -1071,7 +1071,7 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { allocatorArgs); operatorDeleteCleanup = EHStack.stable_begin(); cleanupDominator = - builder.create(getLoc(E->getSourceRange())) + builder.create(getLoc(E->getSourceRange())) .getOperation(); } @@ -1143,7 +1143,7 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { // Reset insertion point to resume back to post ifOp. if (postIfBody.isSet()) { - builder.create(loc); + builder.create(loc); builder.restoreInsertionPoint(postIfBody); } } @@ -1185,7 +1185,7 @@ static RValue buildNewDeleteCall(CIRGenFunction &CGF, const FunctionDecl *CalleeDecl, const FunctionProtoType *CalleeType, const CallArgList &Args) { - mlir::cir::CIRCallOpInterface CallOrTryCall; + cir::CIRCallOpInterface CallOrTryCall; auto CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl); CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl)); @@ -1248,7 +1248,7 @@ void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, if (Params.Size) { QualType SizeType = *ParamTypeIt++; CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); - assert(SizeTy && "expected mlir::cir::IntType"); + assert(SizeTy && "expected cir::IntType"); auto Size = builder.getConstInt(*currSrcLoc, ConvertType(SizeType), DeleteTypeSize.getQuantity()); @@ -1263,7 +1263,7 @@ void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, if (!CookieSize.isZero()) { // Uncomment upon adding testcase. // builder.createBinop( - // Size, mlir::cir::BinOpKind::Add, + // Size, cir::BinOpKind::Add, // builder.getConstInt(*currSrcLoc, SizeTy, // CookieSize.getQuantity())); llvm_unreachable("NYI"); @@ -1293,7 +1293,7 @@ void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, static mlir::Value buildDynamicCastToNull(CIRGenFunction &CGF, mlir::Location Loc, QualType DestTy) { mlir::Type DestCIRTy = CGF.ConvertType(DestTy); - assert(mlir::isa(DestCIRTy) && + assert(mlir::isa(DestCIRTy) && "result of dynamic_cast should be a ptr"); mlir::Value NullPtrValue = CGF.getBuilder().getNullPtr(DestCIRTy, Loc); @@ -1346,7 +1346,7 @@ mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, if (DCE->isAlwaysNull()) return buildDynamicCastToNull(*this, loc, destTy); - auto destCirTy = mlir::cast(ConvertType(destTy)); + auto destCirTy = mlir::cast(ConvertType(destTy)); return CGM.getCXXABI().buildDynamicCast(*this, loc, srcRecordTy, destRecordTy, destCirTy, isRefCast, ThisAddr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 2113539bda98..61b1979f0ebf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -377,15 +377,15 @@ mlir::Value ComplexExprEmitter::buildComplexToComplexCast(mlir::Value Val, QualType SrcElemTy = SrcType->castAs()->getElementType(); QualType DestElemTy = DestType->castAs()->getElementType(); - mlir::cir::CastKind CastOpKind; + cir::CastKind CastOpKind; if (SrcElemTy->isFloatingType() && DestElemTy->isFloatingType()) - CastOpKind = mlir::cir::CastKind::float_complex; + CastOpKind = cir::CastKind::float_complex; else if (SrcElemTy->isFloatingType() && DestElemTy->isIntegerType()) - CastOpKind = mlir::cir::CastKind::float_complex_to_int_complex; + CastOpKind = cir::CastKind::float_complex_to_int_complex; else if (SrcElemTy->isIntegerType() && DestElemTy->isFloatingType()) - CastOpKind = mlir::cir::CastKind::int_complex_to_float_complex; + CastOpKind = cir::CastKind::int_complex_to_float_complex; else if (SrcElemTy->isIntegerType() && DestElemTy->isIntegerType()) - CastOpKind = mlir::cir::CastKind::int_complex; + CastOpKind = cir::CastKind::int_complex; else llvm_unreachable("unexpected src type or dest type"); @@ -397,11 +397,11 @@ mlir::Value ComplexExprEmitter::buildScalarToComplexCast(mlir::Value Val, QualType SrcType, QualType DestType, SourceLocation Loc) { - mlir::cir::CastKind CastOpKind; + cir::CastKind CastOpKind; if (SrcType->isFloatingType()) - CastOpKind = mlir::cir::CastKind::float_to_complex; + CastOpKind = cir::CastKind::float_to_complex; else if (SrcType->isIntegerType()) - CastOpKind = mlir::cir::CastKind::int_to_complex; + CastOpKind = cir::CastKind::int_to_complex; else llvm_unreachable("unexpected src type"); @@ -548,7 +548,7 @@ mlir::Value ComplexExprEmitter::VisitPlus(const UnaryOperator *E, Op = Visit(E->getSubExpr()); return Builder.createUnaryOp(CGF.getLoc(E->getExprLoc()), - mlir::cir::UnaryOpKind::Plus, Op); + cir::UnaryOpKind::Plus, Op); } mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E, @@ -571,13 +571,13 @@ mlir::Value ComplexExprEmitter::VisitMinus(const UnaryOperator *E, Op = Visit(E->getSubExpr()); return Builder.createUnaryOp(CGF.getLoc(E->getExprLoc()), - mlir::cir::UnaryOpKind::Minus, Op); + cir::UnaryOpKind::Minus, Op); } mlir::Value ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) { mlir::Value Op = Visit(E->getSubExpr()); return Builder.createUnaryOp(CGF.getLoc(E->getExprLoc()), - mlir::cir::UnaryOpKind::Not, Op); + cir::UnaryOpKind::Not, Op); } ComplexExprEmitter::BinOpInfo @@ -780,19 +780,19 @@ mlir::Value ComplexExprEmitter::buildBinSub(const BinOpInfo &Op) { return CGF.getBuilder().createComplexSub(Op.Loc, Op.LHS, Op.RHS); } -static mlir::cir::ComplexRangeKind +static cir::ComplexRangeKind getComplexRangeAttr(LangOptions::ComplexRangeKind range) { switch (range) { case LangOptions::CX_Full: - return mlir::cir::ComplexRangeKind::Full; + return cir::ComplexRangeKind::Full; case LangOptions::CX_Improved: - return mlir::cir::ComplexRangeKind::Improved; + return cir::ComplexRangeKind::Improved; case LangOptions::CX_Promoted: - return mlir::cir::ComplexRangeKind::Promoted; + return cir::ComplexRangeKind::Promoted; case LangOptions::CX_Basic: - return mlir::cir::ComplexRangeKind::Basic; + return cir::ComplexRangeKind::Basic; case LangOptions::CX_None: - return mlir::cir::ComplexRangeKind::None; + return cir::ComplexRangeKind::None; } } @@ -831,20 +831,20 @@ LValue ComplexExprEmitter::buildBinAssignLValue(const BinaryOperator *E, mlir::Value ComplexExprEmitter::VisitImaginaryLiteral(const ImaginaryLiteral *IL) { auto Loc = CGF.getLoc(IL->getExprLoc()); - auto Ty = mlir::cast(CGF.getCIRType(IL->getType())); + auto Ty = mlir::cast(CGF.getCIRType(IL->getType())); auto ElementTy = Ty.getElementTy(); mlir::TypedAttr RealValueAttr; mlir::TypedAttr ImagValueAttr; - if (mlir::isa(ElementTy)) { + if (mlir::isa(ElementTy)) { auto ImagValue = cast(IL->getSubExpr())->getValue(); - RealValueAttr = mlir::cir::IntAttr::get(ElementTy, 0); - ImagValueAttr = mlir::cir::IntAttr::get(ElementTy, ImagValue); - } else if (mlir::isa(ElementTy)) { + RealValueAttr = cir::IntAttr::get(ElementTy, 0); + ImagValueAttr = cir::IntAttr::get(ElementTy, ImagValue); + } else if (mlir::isa(ElementTy)) { auto ImagValue = cast(IL->getSubExpr())->getValue(); - RealValueAttr = mlir::cir::FPAttr::get( + RealValueAttr = cir::FPAttr::get( ElementTy, llvm::APFloat::getZero(ImagValue.getSemantics())); - ImagValueAttr = mlir::cir::FPAttr::get(ElementTy, ImagValue); + ImagValueAttr = cir::FPAttr::get(ElementTy, ImagValue); } else llvm_unreachable("unexpected complex element type"); @@ -876,21 +876,19 @@ mlir::Value CIRGenFunction::buildPromotedComplexExpr(const Expr *E, mlir::Value CIRGenFunction::buildPromotedValue(mlir::Value result, QualType PromotionType) { - assert(mlir::isa( - mlir::cast(result.getType()) - .getElementTy()) && + assert(mlir::isa( + mlir::cast(result.getType()).getElementTy()) && "integral complex will never be promoted"); - return builder.createCast(mlir::cir::CastKind::float_complex, result, + return builder.createCast(cir::CastKind::float_complex, result, ConvertType(PromotionType)); } mlir::Value CIRGenFunction::buildUnPromotedValue(mlir::Value result, QualType UnPromotionType) { - assert(mlir::isa( - mlir::cast(result.getType()) - .getElementTy()) && + assert(mlir::isa( + mlir::cast(result.getType()).getElementTy()) && "integral complex will never be promoted"); - return builder.createCast(mlir::cir::CastKind::float_complex, result, + return builder.createCast(cir::CastKind::float_complex, result, ConvertType(UnPromotionType)); } @@ -967,8 +965,7 @@ mlir::Value CIRGenFunction::buildComplexPrePostIncDec(const UnaryOperator *E, mlir::Value InVal = buildLoadOfComplex(LV, E->getExprLoc()); auto Loc = getLoc(E->getExprLoc()); - auto OpKind = - isInc ? mlir::cir::UnaryOpKind::Inc : mlir::cir::UnaryOpKind::Dec; + auto OpKind = isInc ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; mlir::Value IncVal = builder.createUnaryOp(Loc, OpKind, InVal); // Store the updated result through the lvalue. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index ce27635e3f44..d4d031158d90 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -230,7 +230,7 @@ bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits, if (WantedBits == CharWidth) { // Got a full byte: just add it directly. - add(mlir::cir::IntAttr::get(charTy, BitsThisChar), OffsetInChars, + add(cir::IntAttr::get(charTy, BitsThisChar), OffsetInChars, AllowOverwrite); } else { // Partial byte: update the existing integer if there is one. If we @@ -257,17 +257,16 @@ bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits, bool isNull = false; if (*FirstElemToUpdate < Elems.size()) { auto firstEltToUpdate = - dyn_cast(Elems[*FirstElemToUpdate]); + dyn_cast(Elems[*FirstElemToUpdate]); isNull = firstEltToUpdate && firstEltToUpdate.isNullValue(); } if (*FirstElemToUpdate == *LastElemToUpdate || isNull) { // All existing bits are either zero or undef. - add(CGM.getBuilder().getAttr(charTy, BitsThisChar), + add(CGM.getBuilder().getAttr(charTy, BitsThisChar), OffsetInChars, /*AllowOverwrite*/ true); } else { - mlir::cir::IntAttr CI = - dyn_cast(Elems[*FirstElemToUpdate]); + cir::IntAttr CI = dyn_cast(Elems[*FirstElemToUpdate]); // In order to perform a partial update, we need the existing bitwise // value, which we can only extract for a constant int. // auto *CI = dyn_cast(ToUpdate); @@ -280,7 +279,7 @@ bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits, "unexpectedly overwriting bitfield"); BitsThisChar |= (CI.getValue() & ~UpdateMask); Elems[*FirstElemToUpdate] = - CGM.getBuilder().getAttr(charTy, BitsThisChar); + CGM.getBuilder().getAttr(charTy, BitsThisChar); } } @@ -346,13 +345,13 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( ConstantAggregateBuilderUtils Utils(CGM); if (Elems.empty()) - return mlir::cir::UndefAttr::get(CGM.getBuilder().getContext(), DesiredTy); + return cir::UndefAttr::get(CGM.getBuilder().getContext(), DesiredTy); auto Offset = [&](size_t I) { return Offsets[I] - StartOffset; }; // If we want an array type, see if all the elements are the same type and // appropriately spaced. - if (auto aty = mlir::dyn_cast(DesiredTy)) { + if (auto aty = mlir::dyn_cast(DesiredTy)) { llvm_unreachable("NYI"); } @@ -386,7 +385,7 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( // element. But in CIR, the union has the information for all members. So if // we only pass a single init element, we may be in trouble. We solve the // problem by appending placeholder attribute for the uninitialized fields. - if (auto desired = dyn_cast(DesiredTy); + if (auto desired = dyn_cast(DesiredTy); desired && desired.isUnion() && Elems.size() != desired.getNumElements()) { llvm::SmallVector UnionElemsStorage; @@ -396,7 +395,7 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( Ty && Ty.getType() == elemTy) UnionElemsStorage.push_back(Elems.back()); else - UnionElemsStorage.push_back(mlir::cir::InactiveUnionFieldAttr::get( + UnionElemsStorage.push_back(cir::InactiveUnionFieldAttr::get( CGM.getBuilder().getContext(), elemTy)); } @@ -454,7 +453,7 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( Packed ? PackedElems : UnpackedElems); auto strType = builder.getCompleteStructType(arrAttr, Packed); - if (auto desired = dyn_cast(DesiredTy)) + if (auto desired = dyn_cast(DesiredTy)) if (desired.isLayoutIdentical(strType)) strType = desired; @@ -528,7 +527,7 @@ class ConstStructBuilder { bool AllowOverwrite = false); bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, - mlir::cir::IntAttr InitExpr, bool AllowOverwrite = false); + cir::IntAttr InitExpr, bool AllowOverwrite = false); bool Build(InitListExpr *ILE, bool AllowOverwrite); bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase, @@ -554,8 +553,7 @@ bool ConstStructBuilder::AppendBytes(CharUnits FieldOffsetInChars, } bool ConstStructBuilder::AppendBitField(const FieldDecl *Field, - uint64_t FieldOffset, - mlir::cir::IntAttr CI, + uint64_t FieldOffset, cir::IntAttr CI, bool AllowOverwrite) { const auto &RL = CGM.getTypes().getCIRGenRecordLayout(Field->getParent()); const auto &Info = RL.getBitFieldInfo(Field); @@ -707,7 +705,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) { AllowOverwrite = true; } else { // Otherwise we have a bitfield. - if (auto constInt = dyn_cast(EltInit)) { + if (auto constInt = dyn_cast(EltInit)) { if (!AppendBitField(Field, Layout.getFieldOffset(FieldNo), constInt, AllowOverwrite)) return false; @@ -1068,8 +1066,8 @@ class ConstExprEmitter } mlir::Attribute EmitVectorInitialization(InitListExpr *ILE, QualType T) { - mlir::cir::VectorType VecTy = - mlir::cast(CGM.getTypes().ConvertType(T)); + cir::VectorType VecTy = + mlir::cast(CGM.getTypes().ConvertType(T)); unsigned NumElements = VecTy.getSize(); unsigned NumInits = ILE->getNumInits(); assert(NumElements >= NumInits && "Too many initializers for a vector"); @@ -1086,7 +1084,7 @@ class ConstExprEmitter for (unsigned i = NumInits; i < NumElements; ++i) { Elts.push_back(CGM.getBuilder().getZeroInitAttr(VecTy.getEltType())); } - return mlir::cir::ConstVectorAttr::get( + return cir::ConstVectorAttr::get( VecTy, mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Elts)); } @@ -1197,8 +1195,8 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, return builder.getConstArray( mlir::ArrayAttr::get(builder.getContext(), Eles), - mlir::cir::ArrayType::get(builder.getContext(), CommonElementType, - ArrayBound)); + cir::ArrayType::get(builder.getContext(), CommonElementType, + ArrayBound)); // TODO(cir): If all the elements had the same type up to the trailing // zeroes, emit a struct of two arrays (the nonzero data and the // zeroinitializer). Use DesiredType to get the element type. @@ -1218,8 +1216,8 @@ buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, return builder.getConstArray( mlir::ArrayAttr::get(builder.getContext(), Eles), - mlir::cir::ArrayType::get(builder.getContext(), CommonElementType, - ArrayBound)); + cir::ArrayType::get(builder.getContext(), CommonElementType, + ArrayBound)); } SmallVector Eles; @@ -1247,7 +1245,7 @@ struct ConstantLValue { /*implicit*/ ConstantLValue(mlir::Value value, bool hasOffsetApplied = false) : Value(value), HasOffsetApplied(hasOffsetApplied) {} - /*implicit*/ ConstantLValue(mlir::cir::GlobalViewAttr address) + /*implicit*/ ConstantLValue(cir::GlobalViewAttr address) : Value(address), HasOffsetApplied(false) {} ConstantLValue(std::nullptr_t) : ConstantLValue({}, false) {} @@ -1318,13 +1316,12 @@ class ConstantLValueEmitter // Handle attribute constant LValues. if (auto Attr = mlir::dyn_cast(C.Value)) { - if (auto GV = mlir::dyn_cast(Attr)) { - auto baseTy = - mlir::cast(GV.getType()).getPointee(); + if (auto GV = mlir::dyn_cast(Attr)) { + auto baseTy = mlir::cast(GV.getType()).getPointee(); auto destTy = CGM.getTypes().convertTypeForMem(DestType); assert(!GV.getIndices() && "Global view is already indexed"); - return mlir::cir::GlobalViewAttr::get(destTy, GV.getSymbol(), - getOffset(baseTy)); + return cir::GlobalViewAttr::get(destTy, GV.getSymbol(), + getOffset(baseTy)); } llvm_unreachable("Unsupported attribute type to offset"); } @@ -1347,7 +1344,7 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { // non-zero null pointer and addrspace casts that aren't trivially // represented in LLVM IR. auto destTy = CGM.getTypes().convertTypeForMem(DestType); - assert(mlir::isa(destTy)); + assert(mlir::isa(destTy)); // If there's no base at all, this is a null or absolute pointer, // possibly cast back to an integer type. @@ -1370,7 +1367,7 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { // Convert to the appropriate type; this could be an lvalue for // an integer. FIXME: performAddrSpaceCast - if (mlir::isa(destTy)) { + if (mlir::isa(destTy)) { if (value.is()) return value.get(); llvm_unreachable("NYI"); @@ -1383,7 +1380,7 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { /// bitcast to pointer type. mlir::Attribute ConstantLValueEmitter::tryEmitAbsolute(mlir::Type destTy) { // If we're producing a pointer, this is easy. - auto destPtrTy = mlir::dyn_cast(destTy); + auto destPtrTy = mlir::dyn_cast(destTy); assert(destPtrTy && "expected !cir.ptr type"); return CGM.getBuilder().getConstPtrAttr( destPtrTy, Value.getLValueOffset().getQuantity()); @@ -1404,7 +1401,7 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) { auto fop = CGM.GetAddrOfFunction(FD); auto builder = CGM.getBuilder(); auto ctxt = builder.getContext(); - return mlir::cir::GlobalViewAttr::get( + return cir::GlobalViewAttr::get( builder.getPointerTo(fop.getFunctionType()), mlir::FlatSymbolRefAttr::get(ctxt, fop.getSymNameAttr())); } @@ -1454,7 +1451,7 @@ tryEmitGlobalCompoundLiteral(ConstantEmitter &emitter, CGM.getTypes().convertTypeForMem(E->getType()), E->getType().isConstantStorage(CGM.getASTContext(), false, false)); GV.setInitialValueAttr(C); - GV.setLinkage(mlir::cir::GlobalLinkageKind::InternalLinkage); + GV.setLinkage(cir::GlobalLinkageKind::InternalLinkage); CharUnits Align = CGM.getASTContext().getTypeAlignInChars(E->getType()); GV.setAlignment(Align.getAsAlign().value()); @@ -1532,7 +1529,7 @@ ConstantLValue ConstantLValueEmitter::VisitMaterializeTemporaryExpr( mlir::Operation *globalTemp = CGM.getAddrOfGlobalTemporary(expr, inner); CIRGenBuilderTy builder = CGM.getBuilder(); return ConstantLValue( - builder.getGlobalViewAttr(mlir::cast(globalTemp))); + builder.getGlobalViewAttr(mlir::cast(globalTemp))); } //===----------------------------------------------------------------------===// @@ -1572,7 +1569,7 @@ mlir::Attribute ConstantEmitter::emitForInitializer(const APValue &value, return c; } -void ConstantEmitter::finalize(mlir::cir::GlobalOp global) { +void ConstantEmitter::finalize(cir::GlobalOp global) { assert(InitializedNonAbstract && "finalizing emitter that was used for abstract emission?"); assert(!Finalized && "finalizing emitter multiple times"); @@ -1623,9 +1620,8 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { // assignments and whatnots). Since this is for globals shouldn't // be a problem for the near future. if (CD->isTrivial() && CD->isDefaultConstructor()) - return mlir::cir::ZeroAttr::get( - CGM.getBuilder().getContext(), - CGM.getTypes().ConvertType(D.getType())); + return cir::ZeroAttr::get(CGM.getBuilder().getContext(), + CGM.getTypes().ConvertType(D.getType())); } } InConstantContext = D.hasConstantInitialization(); @@ -1727,8 +1723,8 @@ mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, assert(innerSize < outerSize && "emitted over-large constant for atomic"); auto &builder = CGM.getBuilder(); auto zeroArray = builder.getZeroInitAttr( - mlir::cir::ArrayType::get(builder.getContext(), builder.getUInt8Ty(), - (outerSize - innerSize) / 8)); + cir::ArrayType::get(builder.getContext(), builder.getUInt8Ty(), + (outerSize - innerSize) / 8)); SmallVector anonElts = {C, zeroArray}; auto arrAttr = mlir::ArrayAttr::get(builder.getContext(), anonElts); return builder.getAnonConstStruct(arrAttr, false); @@ -1736,7 +1732,7 @@ mlir::Attribute ConstantEmitter::emitForMemory(CIRGenModule &CGM, // Zero-extend bool. auto typed = mlir::dyn_cast(C); - if (typed && mlir::isa(typed.getType())) { + if (typed && mlir::isa(typed.getType())) { // Already taken care given that bool values coming from // integers only carry true/false. } @@ -1785,10 +1781,10 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, assert(0 && "not implemented"); case APValue::Int: { mlir::Type ty = CGM.getCIRType(DestType); - if (mlir::isa(ty)) + if (mlir::isa(ty)) return builder.getCIRBoolAttr(Value.getInt().getZExtValue()); - assert(mlir::isa(ty) && "expected integral type"); - return CGM.getBuilder().getAttr(ty, Value.getInt()); + assert(mlir::isa(ty) && "expected integral type"); + return CGM.getBuilder().getAttr(ty, Value.getInt()); } case APValue::Float: { const llvm::APFloat &Init = Value.getFloat(); @@ -1798,9 +1794,9 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, assert(0 && "not implemented"); else { mlir::Type ty = CGM.getCIRType(DestType); - assert(mlir::isa(ty) && + assert(mlir::isa(ty) && "expected floating-point type"); - return CGM.getBuilder().getAttr(ty, Init); + return CGM.getBuilder().getAttr(ty, Init); } } case APValue::Array: { @@ -1867,8 +1863,8 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, Elts.push_back(C); } auto Desired = - mlir::cast(CGM.getTypes().ConvertType(DestType)); - return mlir::cir::ConstVectorAttr::get( + mlir::cast(CGM.getTypes().ConvertType(DestType)); + return cir::ConstVectorAttr::get( Desired, mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Elts)); } case APValue::MemberPointer: { @@ -1880,8 +1876,8 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, if (const auto *memberFuncDecl = dyn_cast(memberDecl)) assert(0 && "not implemented"); - auto cirTy = mlir::cast( - CGM.getTypes().ConvertType(DestType)); + auto cirTy = + mlir::cast(CGM.getTypes().ConvertType(DestType)); const auto *fieldDecl = cast(memberDecl); return builder.getDataMemberAttr(cirTy, fieldDecl->getFieldIndex()); @@ -1932,21 +1928,21 @@ mlir::Value CIRGenModule::buildMemberPointerConstant(const UnaryOperator *E) { // A member function pointer. if (const auto *methodDecl = dyn_cast(decl)) { - auto ty = mlir::cast(getCIRType(E->getType())); + auto ty = mlir::cast(getCIRType(E->getType())); if (methodDecl->isVirtual()) - return builder.create( + return builder.create( loc, ty, getCXXABI().buildVirtualMethodAttr(ty, methodDecl)); auto methodFuncOp = GetAddrOfFunction(methodDecl); - return builder.create( + return builder.create( loc, ty, builder.getMethodAttr(ty, methodFuncOp)); } - auto ty = mlir::cast(getCIRType(E->getType())); + auto ty = mlir::cast(getCIRType(E->getType())); // Otherwise, a member data pointer. const auto *fieldDecl = cast(decl); - return builder.create( + return builder.create( loc, ty, builder.getDataMemberAttr(ty, fieldDecl->getFieldIndex())); } @@ -1978,8 +1974,8 @@ mlir::Attribute ConstantEmitter::emitAbstract(SourceLocation loc, mlir::Attribute ConstantEmitter::emitNullForMemory(mlir::Location loc, CIRGenModule &CGM, QualType T) { - auto cstOp = dyn_cast( - CGM.buildNullConstant(T, loc).getDefiningOp()); + auto cstOp = + dyn_cast(CGM.buildNullConstant(T, loc).getDefiningOp()); assert(cstOp && "expected cir.const op"); return emitForMemory(CGM, cstOp.getValue(), T); } @@ -1991,7 +1987,7 @@ static mlir::TypedAttr buildNullConstant(CIRGenModule &CGM, CGM.getTypes().getCIRGenRecordLayout(record); mlir::Type ty = (asCompleteObject ? layout.getCIRType() : layout.getBaseSubobjectCIRType()); - auto structure = dyn_cast(ty); + auto structure = dyn_cast(ty); assert(structure && "expected"); unsigned numElements = structure.getNumElements(); @@ -2044,8 +2040,8 @@ static mlir::TypedAttr buildNullConstant(CIRGenModule &CGM, } mlir::MLIRContext *mlirCtx = structure.getContext(); - return mlir::cir::ConstStructAttr::get( - mlirCtx, structure, mlir::ArrayAttr::get(mlirCtx, elements)); + return cir::ConstStructAttr::get(mlirCtx, structure, + mlir::ArrayAttr::get(mlirCtx, elements)); } mlir::TypedAttr diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index c13d95e69cc4..6763bbccd089 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -54,8 +54,8 @@ struct BinOpInfo { /// Check if the binop can result in integer overflow. bool mayHaveIntegerOverflow() const { // Without constant input, we can't rule out overflow. - auto LHSCI = dyn_cast(LHS.getDefiningOp()); - auto RHSCI = dyn_cast(RHS.getDefiningOp()); + auto LHSCI = dyn_cast(LHS.getDefiningOp()); + auto RHSCI = dyn_cast(RHS.getDefiningOp()); if (!LHSCI || !RHSCI) return true; @@ -173,9 +173,9 @@ class ScalarExprEmitter : public StmtVisitor { // Leaves. mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { mlir::Type Ty = CGF.getCIRType(E->getType()); - return Builder.create( + return Builder.create( CGF.getLoc(E->getExprLoc()), Ty, - Builder.getAttr(Ty, E->getValue())); + Builder.getAttr(Ty, E->getValue())); } mlir::Value VisitFixedPointLiteral(const FixedPointLiteral *E) { @@ -183,24 +183,24 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitFloatingLiteral(const FloatingLiteral *E) { mlir::Type Ty = CGF.getCIRType(E->getType()); - assert(mlir::isa(Ty) && + assert(mlir::isa(Ty) && "expect floating-point type"); - return Builder.create( + return Builder.create( CGF.getLoc(E->getExprLoc()), Ty, - Builder.getAttr(Ty, E->getValue())); + Builder.getAttr(Ty, E->getValue())); } mlir::Value VisitCharacterLiteral(const CharacterLiteral *E) { mlir::Type Ty = CGF.getCIRType(E->getType()); auto loc = CGF.getLoc(E->getExprLoc()); - auto init = mlir::cir::IntAttr::get(Ty, E->getValue()); - return Builder.create(loc, Ty, init); + auto init = cir::IntAttr::get(Ty, E->getValue()); + return Builder.create(loc, Ty, init); } mlir::Value VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { llvm_unreachable("NYI"); } mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { mlir::Type Ty = CGF.getCIRType(E->getType()); - return Builder.create( + return Builder.create( CGF.getLoc(E->getExprLoc()), Ty, Builder.getCIRBoolAttr(E->getValue())); } @@ -290,7 +290,7 @@ class ScalarExprEmitter : public StmtVisitor { // operation. mlir::Value VecValue = Visit(E->getBase()); mlir::Value IndexValue = Visit(E->getIdx()); - return CGF.builder.create( + return CGF.builder.create( CGF.getLoc(E->getSourceRange()), VecValue, IndexValue); } @@ -306,7 +306,7 @@ class ScalarExprEmitter : public StmtVisitor { // The undocumented form of __builtin_shufflevector. mlir::Value InputVec = Visit(E->getExpr(0)); mlir::Value IndexVec = Visit(E->getExpr(1)); - return CGF.builder.create( + return CGF.builder.create( CGF.getLoc(E->getSourceRange()), InputVec, IndexVec); } else { // The documented form of __builtin_shufflevector, where the indices are @@ -316,13 +316,13 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value Vec2 = Visit(E->getExpr(1)); SmallVector Indices; for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { - Indices.push_back(mlir::cir::IntAttr::get( - CGF.builder.getSInt64Ty(), - E->getExpr(i) - ->EvaluateKnownConstInt(CGF.getContext()) - .getSExtValue())); + Indices.push_back( + cir::IntAttr::get(CGF.builder.getSInt64Ty(), + E->getExpr(i) + ->EvaluateKnownConstInt(CGF.getContext()) + .getSExtValue())); } - return CGF.builder.create( + return CGF.builder.create( CGF.getLoc(E->getSourceRange()), CGF.getCIRType(E->getType()), Vec1, Vec2, CGF.builder.getArrayAttr(Indices)); } @@ -371,7 +371,7 @@ class ScalarExprEmitter : public StmtVisitor { // direclty in the parent scope removing the need to hoist it. assert(retAlloca.getDefiningOp() && "expected a alloca op"); CGF.getBuilder().hoistAllocaToParentRegion( - cast(retAlloca.getDefiningOp())); + cast(retAlloca.getDefiningOp())); return CGF.buildLoadOfScalar(CGF.makeAddrLValue(retAlloca, E->getType()), E->getExprLoc()); @@ -421,9 +421,9 @@ class ScalarExprEmitter : public StmtVisitor { // An interesting aspect of this is that increment is always true. // Decrement does not have this property. if (isInc && type->isBooleanType()) { - value = Builder.create( - CGF.getLoc(E->getExprLoc()), CGF.getCIRType(type), - Builder.getCIRBoolAttr(true)); + value = Builder.create(CGF.getLoc(E->getExprLoc()), + CGF.getCIRType(type), + Builder.getCIRBoolAttr(true)); } else if (type->isIntegerType()) { QualType promotedType; bool canPerformLossyDemotionCheck = false; @@ -440,9 +440,8 @@ class ScalarExprEmitter : public StmtVisitor { // TODO(cir): Currently, we store bitwidths in CIR types only for // integers. This might also be required for other types. - auto srcCirTy = mlir::dyn_cast(ConvertType(type)); - auto promotedCirTy = - mlir::dyn_cast(ConvertType(type)); + auto srcCirTy = mlir::dyn_cast(ConvertType(type)); + auto promotedCirTy = mlir::dyn_cast(ConvertType(type)); assert(srcCirTy && promotedCirTy && "Expected integer type"); assert( @@ -467,8 +466,8 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable( "unsigned integer overflow sanitized inc/dec not implemented"); } else { - auto Kind = E->isIncrementOp() ? mlir::cir::UnaryOpKind::Inc - : mlir::cir::UnaryOpKind::Dec; + auto Kind = + E->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; // NOTE(CIR): clang calls CreateAdd but folds this to a unary op value = buildUnaryOp(E, Kind, input); } @@ -488,11 +487,11 @@ class ScalarExprEmitter : public StmtVisitor { auto &builder = CGF.getBuilder(); auto amt = builder.getSInt32(amount, loc); if (CGF.getLangOpts().isSignedOverflowDefined()) { - value = builder.create(loc, value.getType(), - value, amt); + value = builder.create(loc, value.getType(), value, + amt); } else { - value = builder.create(loc, value.getType(), - value, amt); + value = builder.create(loc, value.getType(), value, + amt); assert(!cir::MissingFeatures::emitCheckedInBoundsGEP()); } } @@ -509,17 +508,15 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("cast via llvm.convert.from.fp16 is NYI"); } else { value = Builder.createCast(CGF.getLoc(E->getExprLoc()), - mlir::cir::CastKind::floating, input, + cir::CastKind::floating, input, CGF.CGM.FloatTy); } } - if (mlir::isa( - value.getType())) { + if (mlir::isa(value.getType())) { // Create the inc/dec operation. // NOTE(CIR): clang calls CreateAdd but folds this to a unary op - auto kind = - (isInc ? mlir::cir::UnaryOpKind::Inc : mlir::cir::UnaryOpKind::Dec); + auto kind = (isInc ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec); value = buildUnaryOp(E, kind, value); } else { // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or @@ -530,20 +527,20 @@ class ScalarExprEmitter : public StmtVisitor { const llvm::fltSemantics *FS; // Don't use getFloatTypeSemantics because Half isn't // necessarily represented using the "half" LLVM type. - if (mlir::isa(value.getType())) + if (mlir::isa(value.getType())) FS = &CGF.getTarget().getLongDoubleFormat(); - else if (mlir::isa(value.getType())) + else if (mlir::isa(value.getType())) FS = &CGF.getTarget().getHalfFormat(); - else if (mlir::isa(value.getType())) + else if (mlir::isa(value.getType())) FS = &CGF.getTarget().getBFloat16Format(); else llvm_unreachable("fp128 / ppc_fp128 NYI"); F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored); auto loc = CGF.getLoc(E->getExprLoc()); - auto amt = Builder.getConstant( - loc, mlir::cir::FPAttr::get(value.getType(), F)); - value = Builder.createBinop(value, mlir::cir::BinOpKind::Add, amt); + auto amt = + Builder.getConstant(loc, cir::FPAttr::get(value.getType(), F)); + value = Builder.createBinop(value, cir::BinOpKind::Add, amt); } if (type->isHalfType() && @@ -552,7 +549,7 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("cast via llvm.convert.to.fp16 is NYI"); } else { value = Builder.createCast(CGF.getLoc(E->getExprLoc()), - mlir::cir::CastKind::floating, value, + cir::CastKind::floating, value, input.getType()); } } @@ -587,8 +584,8 @@ class ScalarExprEmitter : public StmtVisitor { bool IsInc) { // NOTE(CIR): The SignedOverflowBehavior is attached to the global ModuleOp // and the nsw behavior is handled during lowering. - auto Kind = E->isIncrementOp() ? mlir::cir::UnaryOpKind::Inc - : mlir::cir::UnaryOpKind::Dec; + auto Kind = + E->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: return buildUnaryOp(E, Kind, InVal); @@ -641,7 +638,7 @@ class ScalarExprEmitter : public StmtVisitor { else operand = Visit(E->getSubExpr()); - return buildUnaryOp(E, mlir::cir::UnaryOpKind::Plus, operand); + return buildUnaryOp(E, cir::UnaryOpKind::Plus, operand); } mlir::Value VisitUnaryMinus(const UnaryOperator *E, @@ -666,13 +663,13 @@ class ScalarExprEmitter : public StmtVisitor { // NOTE: LLVM codegen will lower this directly to either a FNeg // or a Sub instruction. In CIR this will be handled later in LowerToLLVM. - return buildUnaryOp(E, mlir::cir::UnaryOpKind::Minus, operand); + return buildUnaryOp(E, cir::UnaryOpKind::Minus, operand); } mlir::Value VisitUnaryNot(const UnaryOperator *E) { TestAndClearIgnoreResultAssign(); mlir::Value op = Visit(E->getSubExpr()); - return buildUnaryOp(E, mlir::cir::UnaryOpKind::Not, op); + return buildUnaryOp(E, cir::UnaryOpKind::Not, op); } mlir::Value VisitUnaryLNot(const UnaryOperator *E); @@ -688,9 +685,9 @@ class ScalarExprEmitter : public StmtVisitor { return Visit(E->getSubExpr()); } - mlir::Value buildUnaryOp(const UnaryOperator *E, mlir::cir::UnaryOpKind kind, + mlir::Value buildUnaryOp(const UnaryOperator *E, cir::UnaryOpKind kind, mlir::Value input) { - return Builder.create( + return Builder.create( CGF.getLoc(E->getSourceRange().getBegin()), input.getType(), kind, input); } @@ -916,23 +913,23 @@ class ScalarExprEmitter : public StmtVisitor { QualType LHSTy = E->getLHS()->getType(); QualType RHSTy = E->getRHS()->getType(); - auto ClangCmpToCIRCmp = [](auto ClangCmp) -> mlir::cir::CmpOpKind { + auto ClangCmpToCIRCmp = [](auto ClangCmp) -> cir::CmpOpKind { switch (ClangCmp) { case BO_LT: - return mlir::cir::CmpOpKind::lt; + return cir::CmpOpKind::lt; case BO_GT: - return mlir::cir::CmpOpKind::gt; + return cir::CmpOpKind::gt; case BO_LE: - return mlir::cir::CmpOpKind::le; + return cir::CmpOpKind::le; case BO_GE: - return mlir::cir::CmpOpKind::ge; + return cir::CmpOpKind::ge; case BO_EQ: - return mlir::cir::CmpOpKind::eq; + return cir::CmpOpKind::eq; case BO_NE: - return mlir::cir::CmpOpKind::ne; + return cir::CmpOpKind::ne; default: llvm_unreachable("unsupported comparison kind"); - return mlir::cir::CmpOpKind(-1); + return cir::CmpOpKind(-1); } }; @@ -951,10 +948,10 @@ class ScalarExprEmitter : public StmtVisitor { } else { // Other kinds of vectors. Element-wise comparison returning // a vector. - mlir::cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); - return Builder.create( - CGF.getLoc(BOInfo.Loc), CGF.getCIRType(BOInfo.FullType), Kind, - BOInfo.LHS, BOInfo.RHS); + cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); + return Builder.create(CGF.getLoc(BOInfo.Loc), + CGF.getCIRType(BOInfo.FullType), + Kind, BOInfo.LHS, BOInfo.RHS); } } if (BOInfo.isFixedPointOp()) { @@ -965,15 +962,15 @@ class ScalarExprEmitter : public StmtVisitor { // Unsigned integers and pointers. if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && - mlir::isa(LHS.getType()) && - mlir::isa(RHS.getType())) { + mlir::isa(LHS.getType()) && + mlir::isa(RHS.getType())) { llvm_unreachable("NYI"); } - mlir::cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); - return Builder.create(CGF.getLoc(BOInfo.Loc), - CGF.getCIRType(BOInfo.FullType), - Kind, BOInfo.LHS, BOInfo.RHS); + cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); + return Builder.create(CGF.getLoc(BOInfo.Loc), + CGF.getCIRType(BOInfo.FullType), Kind, + BOInfo.LHS, BOInfo.RHS); } } else { // Complex Comparison: can only be an equality comparison. assert(0 && "not implemented"); @@ -985,8 +982,8 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value buildFloatToBoolConversion(mlir::Value src, mlir::Location loc) { auto boolTy = Builder.getBoolTy(); - return Builder.create( - loc, boolTy, mlir::cir::CastKind::float_to_bool, src); + return Builder.create(loc, boolTy, + cir::CastKind::float_to_bool, src); } mlir::Value buildIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) { @@ -996,8 +993,8 @@ class ScalarExprEmitter : public StmtVisitor { // TODO: optimize this common case here or leave it for later // CIR passes? mlir::Type boolTy = CGF.getCIRType(CGF.getContext().BoolTy); - return Builder.create( - loc, boolTy, mlir::cir::CastKind::int_to_bool, srcVal); + return Builder.create(loc, boolTy, cir::CastKind::int_to_bool, + srcVal); } /// Convert the specified expression value to a boolean (!cir.bool) truth @@ -1015,7 +1012,7 @@ class ScalarExprEmitter : public StmtVisitor { if (SrcType->isIntegerType()) return buildIntToBoolConversion(Src, loc); - assert(::mlir::isa<::mlir::cir::PointerType>(Src.getType())); + assert(::mlir::isa(Src.getType())); return buildPointerToBoolConversion(Src, SrcType); } @@ -1061,7 +1058,7 @@ class ScalarExprEmitter : public StmtVisitor { if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { // Cast to FP using the intrinsic if the half type itself isn't supported. - if (mlir::isa(DstTy)) { + if (mlir::isa(DstTy)) { if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) llvm_unreachable("cast via llvm.convert.from.fp16 is NYI"); } else { @@ -1071,8 +1068,8 @@ class ScalarExprEmitter : public StmtVisitor { if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { llvm_unreachable("cast via llvm.convert.from.fp16 is NYI"); } else { - Src = Builder.createCast( - CGF.getLoc(Loc), mlir::cir::CastKind::floating, Src, CGF.FloatTy); + Src = Builder.createCast(CGF.getLoc(Loc), cir::CastKind::floating, + Src, CGF.FloatTy); } SrcType = CGF.getContext().FloatTy; SrcTy = CGF.FloatTy; @@ -1090,13 +1087,13 @@ class ScalarExprEmitter : public StmtVisitor { // Handle pointer conversions next: pointers can only be converted to/from // other pointers and integers. Check for pointer types in terms of LLVM, as // some native types (like Obj-C id) may map to a pointer type. - if (auto DstPT = dyn_cast(DstTy)) { + if (auto DstPT = dyn_cast(DstTy)) { llvm_unreachable("NYI"); } - if (isa(SrcTy)) { + if (isa(SrcTy)) { // Must be an ptr to int cast. - assert(isa(DstTy) && "not ptr->int?"); + assert(isa(DstTy) && "not ptr->int?"); return Builder.createPtrToInt(Src, DstTy); } @@ -1132,14 +1129,14 @@ class ScalarExprEmitter : public StmtVisitor { if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { // Make sure we cast in a single step if from another FP type. - if (mlir::isa(SrcTy)) { + if (mlir::isa(SrcTy)) { // Use the intrinsic if the half type itself isn't supported // (as opposed to operations on half, available with NativeHalfType). if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) llvm_unreachable("cast via llvm.convert.to.fp16 is NYI"); // If the half type is supported, just use an fptrunc. - return Builder.createCast(CGF.getLoc(Loc), - mlir::cir::CastKind::floating, Src, DstTy); + return Builder.createCast(CGF.getLoc(Loc), cir::CastKind::floating, Src, + DstTy); } DstTy = CGF.FloatTy; } @@ -1150,8 +1147,8 @@ class ScalarExprEmitter : public StmtVisitor { if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { llvm_unreachable("cast via llvm.convert.to.fp16 is NYI"); } else { - Res = Builder.createCast(CGF.getLoc(Loc), mlir::cir::CastKind::floating, - Res, ResTy); + Res = Builder.createCast(CGF.getLoc(Loc), cir::CastKind::floating, Res, + ResTy); } } @@ -1267,7 +1264,7 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, Expr *indexOperand = expr->getRHS(); // In a subtraction, the LHS is always the pointer. - if (!isSubtraction && !mlir::isa(pointer.getType())) { + if (!isSubtraction && !mlir::isa(pointer.getType())) { std::swap(pointer, index); std::swap(pointerOperand, indexOperand); } @@ -1322,12 +1319,12 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, // multiply. We suppress this if overflow is not undefined behavior. mlir::Type elemTy = CGF.convertTypeForMem(vla->getElementType()); - index = CGF.getBuilder().createCast(mlir::cir::CastKind::integral, index, + index = CGF.getBuilder().createCast(cir::CastKind::integral, index, numElements.getType()); index = CGF.getBuilder().createMul(index, numElements); if (CGF.getLangOpts().isSignedOverflowDefined()) { - pointer = CGF.getBuilder().create( + pointer = CGF.getBuilder().create( CGF.getLoc(op.E->getExprLoc()), pointer.getType(), pointer, index); } else { pointer = CGF.buildCheckedInBoundsGEP(elemTy, pointer, index, isSigned, @@ -1345,7 +1342,7 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, elemTy = CGF.convertTypeForMem(elementType); if (CGF.getLangOpts().isSignedOverflowDefined()) - return CGF.getBuilder().create( + return CGF.getBuilder().create( CGF.getLoc(op.E->getExprLoc()), pointer.getType(), pointer, index); return CGF.buildCheckedInBoundsGEP(elemTy, pointer, index, isSigned, @@ -1377,7 +1374,7 @@ mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { !CanElideOverflowCheck(CGF.getContext(), Ops)) llvm_unreachable("NYI"); - if (mlir::cir::isFPOrFPVectorTy(Ops.LHS.getType())) { + if (cir::isFPOrFPVectorTy(Ops.LHS.getType())) { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); return Builder.createFMul(Ops.LHS, Ops.RHS); } @@ -1385,24 +1382,24 @@ mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { if (Ops.isFixedPointOp()) llvm_unreachable("NYI"); - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), - mlir::cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); + return Builder.create(CGF.getLoc(Ops.Loc), + CGF.getCIRType(Ops.FullType), + cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildDiv(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), - mlir::cir::BinOpKind::Div, Ops.LHS, Ops.RHS); + return Builder.create(CGF.getLoc(Ops.Loc), + CGF.getCIRType(Ops.FullType), + cir::BinOpKind::Div, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildRem(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), - mlir::cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); + return Builder.create(CGF.getLoc(Ops.Loc), + CGF.getCIRType(Ops.FullType), + cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { - if (mlir::isa(Ops.LHS.getType()) || - mlir::isa(Ops.RHS.getType())) + if (mlir::isa(Ops.LHS.getType()) || + mlir::isa(Ops.RHS.getType())) return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/false); if (Ops.CompType->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { @@ -1430,7 +1427,7 @@ mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { !CanElideOverflowCheck(CGF.getContext(), Ops)) llvm_unreachable("NYI"); - if (mlir::cir::isFPOrFPVectorTy(Ops.LHS.getType())) { + if (cir::isFPOrFPVectorTy(Ops.LHS.getType())) { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); return Builder.createFAdd(Ops.LHS, Ops.RHS); } @@ -1438,14 +1435,14 @@ mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { if (Ops.isFixedPointOp()) llvm_unreachable("NYI"); - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), - mlir::cir::BinOpKind::Add, Ops.LHS, Ops.RHS); + return Builder.create(CGF.getLoc(Ops.Loc), + CGF.getCIRType(Ops.FullType), + cir::BinOpKind::Add, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { // The LHS is always a pointer if either side is. - if (!mlir::isa(Ops.LHS.getType())) { + if (!mlir::isa(Ops.LHS.getType())) { if (Ops.CompType->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: { @@ -1473,7 +1470,7 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { !CanElideOverflowCheck(CGF.getContext(), Ops)) llvm_unreachable("NYI"); - if (mlir::cir::isFPOrFPVectorTy(Ops.LHS.getType())) { + if (cir::isFPOrFPVectorTy(Ops.LHS.getType())) { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); return Builder.createFSub(Ops.LHS, Ops.RHS); } @@ -1481,14 +1478,14 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { if (Ops.isFixedPointOp()) llvm_unreachable("NYI"); - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), - mlir::cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); + return Builder.create(CGF.getLoc(Ops.Loc), + CGF.getCIRType(Ops.FullType), + cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); } // If the RHS is not a pointer, then we have normal pointer // arithmetic. - if (!mlir::isa(Ops.RHS.getType())) + if (!mlir::isa(Ops.RHS.getType())) return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/true); // Otherwise, this is a pointer subtraction @@ -1500,8 +1497,8 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { // // See more in `EmitSub` in CGExprScalar.cpp. assert(!cir::MissingFeatures::llvmLoweringPtrDiffConsidersPointee()); - return Builder.create(CGF.getLoc(Ops.Loc), - CGF.PtrDiffTy, Ops.LHS, Ops.RHS); + return Builder.create(CGF.getLoc(Ops.Loc), CGF.PtrDiffTy, + Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { @@ -1527,13 +1524,13 @@ mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { if (CGF.getLangOpts().OpenCL) llvm_unreachable("NYI"); else if ((SanitizeBase || SanitizeExponent) && - mlir::isa(Ops.LHS.getType())) { + mlir::isa(Ops.LHS.getType())) { llvm_unreachable("NYI"); } - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), Ops.LHS, Ops.RHS, - CGF.getBuilder().getUnitAttr()); + return Builder.create(CGF.getLoc(Ops.Loc), + CGF.getCIRType(Ops.FullType), Ops.LHS, + Ops.RHS, CGF.getBuilder().getUnitAttr()); } mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { @@ -1549,30 +1546,30 @@ mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { if (CGF.getLangOpts().OpenCL) llvm_unreachable("NYI"); else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && - mlir::isa(Ops.LHS.getType())) { + mlir::isa(Ops.LHS.getType())) { llvm_unreachable("NYI"); } // Note that we don't need to distinguish unsigned treatment at this // point since it will be handled later by LLVM lowering. - return Builder.create( + return Builder.create( CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildAnd(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), - mlir::cir::BinOpKind::And, Ops.LHS, Ops.RHS); + return Builder.create(CGF.getLoc(Ops.Loc), + CGF.getCIRType(Ops.FullType), + cir::BinOpKind::And, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildXor(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), - mlir::cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); + return Builder.create(CGF.getLoc(Ops.Loc), + CGF.getCIRType(Ops.FullType), + cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::buildOr(const BinOpInfo &Ops) { - return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), - mlir::cir::BinOpKind::Or, Ops.LHS, Ops.RHS); + return Builder.create(CGF.getLoc(Ops.Loc), + CGF.getCIRType(Ops.FullType), + cir::BinOpKind::Or, Ops.LHS, Ops.RHS); } // Emit code for an explicit or implicit cast. Implicit @@ -1745,11 +1742,11 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { const MemberPointerType *MPT = CE->getType()->getAs(); if (MPT->isMemberFunctionPointerType()) { - auto Ty = mlir::cast(CGF.getCIRType(DestTy)); + auto Ty = mlir::cast(CGF.getCIRType(DestTy)); return Builder.getNullMethodPtr(Ty, CGF.getLoc(E->getExprLoc())); } - auto Ty = mlir::cast(CGF.getCIRType(DestTy)); + auto Ty = mlir::cast(CGF.getCIRType(DestTy)); return Builder.getNullDataMemberPtr(Ty, CGF.getLoc(E->getExprLoc())); } case CK_ReinterpretMemberPointer: @@ -1797,8 +1794,8 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // correct CIR conversion. auto MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestCIRTy); auto MiddleVal = Builder.createCast(E->getType()->isBooleanType() - ? mlir::cir::CastKind::bool_to_int - : mlir::cir::CastKind::integral, + ? cir::CastKind::bool_to_int + : cir::CastKind::integral, Src, MiddleTy); if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) @@ -1821,7 +1818,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_VectorSplat: { // Create a vector object and fill all elements with the same scalar value. assert(DestTy->isVectorType() && "CK_VectorSplat to non-vector type"); - return CGF.getBuilder().create( + return CGF.getBuilder().create( CGF.getLoc(E->getSourceRange()), CGF.getCIRType(DestTy), Visit(E)); } case CK_FixedPointCast: @@ -1934,14 +1931,14 @@ mlir::Value CIRGenFunction::buildComplexToScalarConversion(mlir::Value Src, auto ComplexElemTy = SrcTy->castAs()->getElementType(); if (DstTy->isBooleanType()) { auto Kind = ComplexElemTy->isFloatingType() - ? mlir::cir::CastKind::float_complex_to_bool - : mlir::cir::CastKind::int_complex_to_bool; + ? cir::CastKind::float_complex_to_bool + : cir::CastKind::int_complex_to_bool; return builder.createCast(getLoc(Loc), Kind, Src, ConvertType(DstTy)); } auto Kind = ComplexElemTy->isFloatingType() - ? mlir::cir::CastKind::float_complex_to_real - : mlir::cir::CastKind::int_complex_to_real; + ? cir::CastKind::float_complex_to_real + : cir::CastKind::int_complex_to_real; auto Real = builder.createCast(getLoc(Loc), Kind, Src, ConvertType(ComplexElemTy)); return buildScalarConversion(Real, ComplexElemTy, DstTy, Loc); @@ -1975,21 +1972,21 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { "NYI: scalable vector init"); assert(!cir::MissingFeatures::vectorConstants() && "NYI: vector constants"); auto VectorType = - mlir::dyn_cast(CGF.getCIRType(E->getType())); + mlir::dyn_cast(CGF.getCIRType(E->getType())); SmallVector Elements; for (Expr *init : E->inits()) { Elements.push_back(Visit(init)); } // Zero-initialize any remaining values. if (NumInitElements < VectorType.getSize()) { - mlir::Value ZeroValue = CGF.getBuilder().create( + mlir::Value ZeroValue = CGF.getBuilder().create( CGF.getLoc(E->getSourceRange()), VectorType.getEltType(), CGF.getBuilder().getZeroInitAttr(VectorType.getEltType())); for (uint64_t i = NumInitElements; i < VectorType.getSize(); ++i) { Elements.push_back(ZeroValue); } } - return CGF.getBuilder().create( + return CGF.getBuilder().create( CGF.getLoc(E->getSourceRange()), VectorType, Elements); } @@ -2017,9 +2014,9 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { // ZExt result to the expr type. auto dstTy = ConvertType(E->getType()); - if (mlir::isa(dstTy)) + if (mlir::isa(dstTy)) return Builder.createBoolToInt(boolVal, dstTy); - if (mlir::isa(dstTy)) + if (mlir::isa(dstTy)) return boolVal; llvm_unreachable("destination type for logical-not unary operator is NYI"); @@ -2075,37 +2072,36 @@ mlir::Value ScalarExprEmitter::buildScalarCast( llvm_unreachable("Obsolete code. Don't use mlir::IntegerType with CIR."); mlir::Type FullDstTy = DstTy; - if (mlir::isa(SrcTy) && - mlir::isa(DstTy)) { + if (mlir::isa(SrcTy) && mlir::isa(DstTy)) { // Use the element types of the vectors to figure out the CastKind. - SrcTy = mlir::dyn_cast(SrcTy).getEltType(); - DstTy = mlir::dyn_cast(DstTy).getEltType(); + SrcTy = mlir::dyn_cast(SrcTy).getEltType(); + DstTy = mlir::dyn_cast(DstTy).getEltType(); } - assert(!mlir::isa(SrcTy) && - !mlir::isa(DstTy) && + assert(!mlir::isa(SrcTy) && + !mlir::isa(DstTy) && "buildScalarCast given a vector type and a non-vector type"); - std::optional CastKind; + std::optional CastKind; - if (mlir::isa(SrcTy)) { + if (mlir::isa(SrcTy)) { if (Opts.TreatBooleanAsSigned) llvm_unreachable("NYI: signed bool"); if (CGF.getBuilder().isInt(DstTy)) { - CastKind = mlir::cir::CastKind::bool_to_int; - } else if (mlir::isa(DstTy)) { - CastKind = mlir::cir::CastKind::bool_to_float; + CastKind = cir::CastKind::bool_to_int; + } else if (mlir::isa(DstTy)) { + CastKind = cir::CastKind::bool_to_float; } else { llvm_unreachable("Internal error: Cast to unexpected type"); } } else if (CGF.getBuilder().isInt(SrcTy)) { if (CGF.getBuilder().isInt(DstTy)) { - CastKind = mlir::cir::CastKind::integral; - } else if (mlir::isa(DstTy)) { - CastKind = mlir::cir::CastKind::int_to_float; + CastKind = cir::CastKind::integral; + } else if (mlir::isa(DstTy)) { + CastKind = cir::CastKind::int_to_float; } else { llvm_unreachable("Internal error: Cast to unexpected type"); } - } else if (mlir::isa(SrcTy)) { + } else if (mlir::isa(SrcTy)) { if (CGF.getBuilder().isInt(DstTy)) { // If we can't recognize overflow as undefined behavior, assume that // overflow saturates. This protects against normal optimizations if we @@ -2114,8 +2110,8 @@ mlir::Value ScalarExprEmitter::buildScalarCast( llvm_unreachable("NYI"); if (Builder.getIsFPConstrained()) llvm_unreachable("NYI"); - CastKind = mlir::cir::CastKind::float_to_int; - } else if (mlir::isa(DstTy)) { + CastKind = cir::CastKind::float_to_int; + } else if (mlir::isa(DstTy)) { // TODO: split this to createFPExt/createFPTrunc return Builder.createFloatingCast(Src, FullDstTy); } else { @@ -2126,8 +2122,7 @@ mlir::Value ScalarExprEmitter::buildScalarCast( } assert(CastKind.has_value() && "Internal error: CastKind not set."); - return Builder.create(Src.getLoc(), FullDstTy, *CastKind, - Src); + return Builder.create(Src.getLoc(), FullDstTy, *CastKind, Src); } LValue @@ -2258,19 +2253,19 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( mlir::Value ScalarExprEmitter::buildComplexToScalarConversion( mlir::Location Loc, mlir::Value V, CastKind Kind, QualType DestTy) { - mlir::cir::CastKind CastOpKind; + cir::CastKind CastOpKind; switch (Kind) { case CK_FloatingComplexToReal: - CastOpKind = mlir::cir::CastKind::float_complex_to_real; + CastOpKind = cir::CastKind::float_complex_to_real; break; case CK_IntegralComplexToReal: - CastOpKind = mlir::cir::CastKind::int_complex_to_real; + CastOpKind = cir::CastKind::int_complex_to_real; break; case CK_FloatingComplexToBoolean: - CastOpKind = mlir::cir::CastKind::float_complex_to_bool; + CastOpKind = cir::CastKind::float_complex_to_bool; break; case CK_IntegralComplexToBoolean: - CastOpKind = mlir::cir::CastKind::int_complex_to_bool; + CastOpKind = cir::CastKind::int_complex_to_bool; break; default: llvm_unreachable("invalid complex-to-scalar cast kind"); @@ -2349,14 +2344,14 @@ mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { auto scopeLoc = CGF.getLoc(E->getSourceRange()); auto &builder = CGF.builder; - auto scope = builder.create( + auto scope = builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) { CIRGenFunction::LexicalScope lexScope{CGF, loc, builder.getInsertionBlock()}; auto scopeYieldVal = Visit(E->getSubExpr()); if (scopeYieldVal) { - builder.create(loc, scopeYieldVal); + builder.create(loc, scopeYieldVal); yieldTy = scopeYieldVal.getType(); } }); @@ -2486,8 +2481,8 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( mlir::Value condValue = Visit(condExpr); mlir::Value lhsValue = Visit(lhsExpr); mlir::Value rhsValue = Visit(rhsExpr); - return builder.create(loc, condValue, lhsValue, - rhsValue); + return builder.create(loc, condValue, lhsValue, + rhsValue); } // If this is a really simple expression (like x ? 4 : 5), emit this as a @@ -2500,7 +2495,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( assert(!cir::MissingFeatures::incrementProfileCounter()); return builder - .create( + .create( loc, condV, /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { auto lhs = Visit(lhsExpr); @@ -2508,7 +2503,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( lhs = builder.getNullValue(CGF.VoidTy, loc); lhsIsVoid = true; } - builder.create(loc, lhs); + builder.create(loc, lhs); }, /*elseBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { @@ -2517,7 +2512,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( assert(!rhs && "lhs and rhs types must match"); rhs = builder.getNullValue(CGF.VoidTy, loc); } - builder.create(loc, rhs); + builder.create(loc, rhs); }) .getResult(); } @@ -2540,17 +2535,17 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( builder.restoreInsertionPoint(toInsert); // Block does not return: build empty yield. - if (mlir::isa(yieldTy)) { - builder.create(loc); + if (mlir::isa(yieldTy)) { + builder.create(loc); } else { // Block returns: set null yield value. mlir::Value op0 = builder.getNullValue(yieldTy, loc); - builder.create(loc, op0); + builder.create(loc, op0); } } }; return builder - .create( + .create( loc, condV, /*trueBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { CIRGenFunction::LexicalScope lexScope{CGF, loc, @@ -2564,7 +2559,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( if (lhs) { yieldTy = lhs.getType(); - b.create(loc, lhs); + b.create(loc, lhs); return; } // If LHS or RHS is a throw or void expression we need to patch arms @@ -2584,7 +2579,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( if (rhs) { yieldTy = rhs.getType(); - b.create(loc, rhs); + b.create(loc, rhs); } else { // If LHS or RHS is a throw or void expression we need to patch // arms as to properly match yield types. @@ -2634,45 +2629,43 @@ mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { CIRGenFunction::ConditionalEvaluation eval(CGF); mlir::Value LHSCondV = CGF.evaluateExprAsBool(E->getLHS()); - auto ResOp = Builder.create( + auto ResOp = Builder.create( Loc, LHSCondV, /*trueBuilder=*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { CIRGenFunction::LexicalScope LexScope{CGF, Loc, B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); mlir::Value RHSCondV = CGF.evaluateExprAsBool(E->getRHS()); - auto res = B.create( + auto res = B.create( Loc, RHSCondV, /*trueBuilder*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { CIRGenFunction::LexicalScope lexScope{CGF, Loc, B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - auto res = B.create( + auto res = B.create( Loc, Builder.getBoolTy(), - Builder.getAttr(Builder.getBoolTy(), - true)); - B.create(Loc, res.getRes()); + Builder.getAttr(Builder.getBoolTy(), true)); + B.create(Loc, res.getRes()); }, /*falseBuilder*/ [&](mlir::OpBuilder &b, mlir::Location Loc) { CIRGenFunction::LexicalScope lexScope{CGF, Loc, b.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - auto res = b.create( + auto res = b.create( Loc, Builder.getBoolTy(), - Builder.getAttr(Builder.getBoolTy(), - false)); - b.create(Loc, res.getRes()); + Builder.getAttr(Builder.getBoolTy(), false)); + b.create(Loc, res.getRes()); }); - B.create(Loc, res.getResult()); + B.create(Loc, res.getResult()); }, /*falseBuilder*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { CIRGenFunction::LexicalScope lexScope{CGF, Loc, B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - auto res = B.create( + auto res = B.create( Loc, Builder.getBoolTy(), - Builder.getAttr(Builder.getBoolTy(), false)); - B.create(Loc, res.getRes()); + Builder.getAttr(Builder.getBoolTy(), false)); + B.create(Loc, res.getRes()); }); return Builder.createZExtOrBitCast(ResOp.getLoc(), ResOp.getResult(), ResTy); } @@ -2702,7 +2695,7 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { } // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. if (!CGF.ContainsLabel(E->getRHS())) { - if (auto intTy = mlir::dyn_cast(ResTy)) + if (auto intTy = mlir::dyn_cast(ResTy)) return Builder.getConstInt(Loc, intTy, 1); else return Builder.getBool(true, Loc); @@ -2712,22 +2705,22 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { CIRGenFunction::ConditionalEvaluation eval(CGF); mlir::Value LHSCondV = CGF.evaluateExprAsBool(E->getLHS()); - auto ResOp = Builder.create( + auto ResOp = Builder.create( Loc, LHSCondV, /*trueBuilder=*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { CIRGenFunction::LexicalScope lexScope{CGF, Loc, B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - auto res = B.create( + auto res = B.create( Loc, Builder.getBoolTy(), - Builder.getAttr(Builder.getBoolTy(), true)); - B.create(Loc, res.getRes()); + Builder.getAttr(Builder.getBoolTy(), true)); + B.create(Loc, res.getRes()); }, /*falseBuilder*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { CIRGenFunction::LexicalScope LexScope{CGF, Loc, B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); mlir::Value RHSCondV = CGF.evaluateExprAsBool(E->getRHS()); - auto res = B.create( + auto res = B.create( Loc, RHSCondV, /*trueBuilder*/ [&](mlir::OpBuilder &B, mlir::Location Loc) { SmallVector Locs; @@ -2742,11 +2735,10 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { CIRGenFunction::LexicalScope lexScope{CGF, Loc, B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - auto res = B.create( + auto res = B.create( Loc, Builder.getBoolTy(), - Builder.getAttr(Builder.getBoolTy(), - true)); - B.create(Loc, res.getRes()); + Builder.getAttr(Builder.getBoolTy(), true)); + B.create(Loc, res.getRes()); }, /*falseBuilder*/ [&](mlir::OpBuilder &b, mlir::Location Loc) { @@ -2762,13 +2754,12 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { CIRGenFunction::LexicalScope lexScope{CGF, Loc, B.getInsertionBlock()}; CGF.currLexScope->setAsTernary(); - auto res = b.create( + auto res = b.create( Loc, Builder.getBoolTy(), - Builder.getAttr(Builder.getBoolTy(), - false)); - b.create(Loc, res.getRes()); + Builder.getAttr(Builder.getBoolTy(), false)); + b.create(Loc, res.getRes()); }); - B.create(Loc, res.getResult()); + B.create(Loc, res.getResult()); }); return Builder.createZExtOrBitCast(ResOp.getLoc(), ResOp.getResult(), ResTy); @@ -2829,8 +2820,8 @@ mlir::Value CIRGenFunction::buildCheckedInBoundsGEP( bool SignedIndices, bool IsSubtraction, SourceLocation Loc) { mlir::Type PtrTy = Ptr.getType(); assert(IdxList.size() == 1 && "multi-index ptr arithmetic NYI"); - mlir::Value GEPVal = builder.create( - CGM.getLoc(Loc), PtrTy, Ptr, IdxList[0]); + mlir::Value GEPVal = + builder.create(CGM.getLoc(Loc), PtrTy, Ptr, IdxList[0]); // If the pointer overflow sanitizer isn't enabled, do nothing. if (!SanOpts.has(SanitizerKind::PointerOverflow)) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 155aceed7f2f..3523ca861e47 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -34,7 +34,7 @@ using namespace clang; using namespace clang::CIRGen; -using namespace mlir::cir; +using namespace cir; CIRGenFunction::CIRGenFunction(CIRGenModule &CGM, CIRGenBuilderTy &builder, bool suppressNewContext) @@ -311,7 +311,7 @@ mlir::LogicalResult CIRGenFunction::declare(const Decl *var, QualType ty, assert(!symbolTable.count(var) && "not supposed to be available just yet"); addr = buildAlloca(namedVar->getName(), ty, loc, alignment); - auto allocaOp = cast(addr.getDefiningOp()); + auto allocaOp = cast(addr.getDefiningOp()); if (isParam) allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext())); if (ty->isReferenceType() || ty.isConstQualified()) @@ -331,7 +331,7 @@ mlir::LogicalResult CIRGenFunction::declare(Address addr, const Decl *var, assert(!symbolTable.count(var) && "not supposed to be available just yet"); addrVal = addr.getPointer(); - auto allocaOp = cast(addrVal.getDefiningOp()); + auto allocaOp = cast(addrVal.getDefiningOp()); if (isParam) allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext())); if (ty->isReferenceType() || ty.isConstQualified()) @@ -442,12 +442,11 @@ void CIRGenFunction::LexicalScope::cleanup() { insertCleanupAndLeave(currBlock); } -mlir::cir::ReturnOp -CIRGenFunction::LexicalScope::buildReturn(mlir::Location loc) { +cir::ReturnOp CIRGenFunction::LexicalScope::buildReturn(mlir::Location loc) { auto &builder = CGF.getBuilder(); // If we are on a coroutine, add the coro_end builtin call. - auto Fn = dyn_cast(CGF.CurFn); + auto Fn = dyn_cast(CGF.CurFn); assert(Fn && "other callables NYI"); if (Fn.getCoroutine()) CGF.buildCoroEndBuiltinCall( @@ -485,14 +484,14 @@ void CIRGenFunction::LexicalScope::buildImplicitReturn() { llvm_unreachable("NYI"); } else if (shouldEmitUnreachable) { if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { - builder.create(localScope->EndLoc); + builder.create(localScope->EndLoc); builder.clearInsertionPoint(); return; } } if (CGF.SanOpts.has(SanitizerKind::Return) || shouldEmitUnreachable) { - builder.create(localScope->EndLoc); + builder.create(localScope->EndLoc); builder.clearInsertionPoint(); return; } @@ -501,7 +500,7 @@ void CIRGenFunction::LexicalScope::buildImplicitReturn() { (void)buildReturn(localScope->EndLoc); } -mlir::cir::TryOp CIRGenFunction::LexicalScope::getClosestTryParent() { +cir::TryOp CIRGenFunction::LexicalScope::getClosestTryParent() { auto *scope = this; while (scope) { if (scope->isTry()) @@ -601,9 +600,8 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // block, it'd be deleted now. Same for unused ret allocas from ReturnValue } -mlir::cir::FuncOp -CIRGenFunction::generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, - const CIRGenFunctionInfo &FnInfo) { +cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, + const CIRGenFunctionInfo &FnInfo) { assert(Fn && "generating code for a null function"); const auto FD = cast(GD.getDecl()); CurGD = GD; @@ -900,19 +898,18 @@ static mlir::Value emitArgumentDemotion(CIRGenFunction &CGF, const VarDecl *var, if (value.getType() == ty) return value; - assert( - (isa(ty) || mlir::cir::isAnyFloatingPointType(ty)) && - "unexpected promotion type"); + assert((isa(ty) || cir::isAnyFloatingPointType(ty)) && + "unexpected promotion type"); - if (isa(ty)) + if (isa(ty)) return CGF.getBuilder().CIRBaseBuilderTy::createIntCast(value, ty); - return CGF.getBuilder().CIRBaseBuilderTy::createCast( - mlir::cir::CastKind::floating, value, ty); + return CGF.getBuilder().CIRBaseBuilderTy::createCast(cir::CastKind::floating, + value, ty); } void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, - mlir::cir::FuncOp Fn, + cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc, @@ -1277,7 +1274,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, const auto *MD = cast(D); if (MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call) { // We're in a lambda. - auto Fn = dyn_cast(CurFn); + auto Fn = dyn_cast(CurFn); assert(Fn && "other callables NYI"); Fn.setLambdaAttr(mlir::UnitAttr::get(&getMLIRContext())); @@ -1845,16 +1842,14 @@ CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, // llvm::ArrayType *llvmArrayType = // dyn_cast(addr.getElementType()); - auto cirArrayType = - mlir::dyn_cast(addr.getElementType()); + auto cirArrayType = mlir::dyn_cast(addr.getElementType()); while (cirArrayType) { assert(isa(arrayType)); countFromCLAs *= cirArrayType.getSize(); eltType = arrayType->getElementType(); - cirArrayType = - mlir::dyn_cast(cirArrayType.getEltType()); + cirArrayType = mlir::dyn_cast(cirArrayType.getEltType()); arrayType = getContext().getAsArrayType(arrayType->getElementType()); assert((!cirArrayType || arrayType) && @@ -1884,8 +1879,8 @@ mlir::Value CIRGenFunction::buildAlignmentAssumption( mlir::Value offsetValue) { if (SanOpts.has(SanitizerKind::Alignment)) llvm_unreachable("NYI"); - return builder.create( - getLoc(assumptionLoc), ptrValue, alignment, offsetValue); + return builder.create(getLoc(assumptionLoc), ptrValue, + alignment, offsetValue); } mlir::Value CIRGenFunction::buildAlignmentAssumption( @@ -1903,7 +1898,7 @@ void CIRGenFunction::buildVarAnnotations(const VarDecl *decl, mlir::Value val) { for (const auto *annot : decl->specific_attrs()) { annotations.push_back(CGM.buildAnnotateAttr(annot)); } - auto allocaOp = dyn_cast_or_null(val.getDefiningOp()); + auto allocaOp = dyn_cast_or_null(val.getDefiningOp()); assert(allocaOp && "expects available alloca"); allocaOp.setAnnotationsAttr(builder.getArrayAttr(annotations)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 1e92a7bc029b..0185b5370642 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -105,7 +105,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Add OpenCL kernel arg metadata and the kernel attribute metadata to /// the function metadata. - void buildKernelMetadata(const FunctionDecl *FD, mlir::cir::FuncOp Fn); + void buildKernelMetadata(const FunctionDecl *FD, cir::FuncOp Fn); public: /// A non-RAII class containing all the information about a bound @@ -195,11 +195,12 @@ class CIRGenFunction : public CIRGenTypeCache { OpaqueValueMapping(CIRGenFunction &CGF, const AbstractConditionalOperator *op) : CGF(CGF) { - if (isa(op)) + if (mlir::isa(op)) // Leave Data empty. return; - const BinaryConditionalOperator *e = cast(op); + const BinaryConditionalOperator *e = + mlir::cast(op); Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(), e->getCommon()); } @@ -490,7 +491,7 @@ class CIRGenFunction : public CIRGenTypeCache { const CIRGenModule &getCIRGenModule() const { return CGM; } mlir::Block *getCurFunctionEntryBlock() { - auto Fn = dyn_cast(CurFn); + auto Fn = mlir::dyn_cast(CurFn); assert(Fn && "other callables NYI"); return &Fn.getRegion().front(); } @@ -832,10 +833,10 @@ class CIRGenFunction : public CIRGenTypeCache { VlaSizePair getVLASize(QualType vla); mlir::Value emitBuiltinObjectSize(const Expr *E, unsigned Type, - mlir::cir::IntType ResType, - mlir::Value EmittedE, bool IsDynamic); + cir::IntType ResType, mlir::Value EmittedE, + bool IsDynamic); mlir::Value evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, - mlir::cir::IntType ResType, + cir::IntType ResType, mlir::Value EmittedE, bool IsDynamic); @@ -920,13 +921,13 @@ class CIRGenFunction : public CIRGenTypeCache { RValue buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, - mlir::cir::CIRCallOpInterface *callOrTryCall, - bool IsMustTail, mlir::Location loc, + cir::CIRCallOpInterface *callOrTryCall, bool IsMustTail, + mlir::Location loc, std::optional E = std::nullopt); RValue buildCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, - mlir::cir::CIRCallOpInterface *callOrTryCall = nullptr, + cir::CIRCallOpInterface *callOrTryCall = nullptr, bool IsMustTail = false) { assert(currSrcLoc && "source location must have been set"); return buildCall(CallInfo, Callee, ReturnValue, Args, callOrTryCall, @@ -945,8 +946,8 @@ class CIRGenFunction : public CIRGenTypeCache { return getAsNaturalAddressOf(Addr, PointeeType).getBasePointer(); } - mlir::Value buildRuntimeCall(mlir::Location loc, mlir::cir::FuncOp callee, - ArrayRef args = {}); + mlir::Value buildRuntimeCall(mlir::Location loc, cir::FuncOp callee, + llvm::ArrayRef args = {}); void buildInvariantStart(CharUnits Size); @@ -980,13 +981,11 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildCoroutineBody(const CoroutineBodyStmt &S); mlir::LogicalResult buildCoreturnStmt(const CoreturnStmt &S); - mlir::cir::CallOp buildCoroIDBuiltinCall(mlir::Location loc, - mlir::Value nullPtr); - mlir::cir::CallOp buildCoroAllocBuiltinCall(mlir::Location loc); - mlir::cir::CallOp buildCoroBeginBuiltinCall(mlir::Location loc, - mlir::Value coroframeAddr); - mlir::cir::CallOp buildCoroEndBuiltinCall(mlir::Location loc, - mlir::Value nullPtr); + cir::CallOp buildCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr); + cir::CallOp buildCoroAllocBuiltinCall(mlir::Location loc); + cir::CallOp buildCoroBeginBuiltinCall(mlir::Location loc, + mlir::Value coroframeAddr); + cir::CallOp buildCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr); RValue buildCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot = AggValueSlot::ignored(), @@ -1027,8 +1026,9 @@ class CIRGenFunction : public CIRGenTypeCache { // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. - mlir::LogicalResult buildStmt(const clang::Stmt *S, bool useCurrentScope, - ArrayRef Attrs = std::nullopt); + mlir::LogicalResult + buildStmt(const clang::Stmt *S, bool useCurrentScope, + llvm::ArrayRef Attrs = std::nullopt); mlir::LogicalResult buildSimpleStmt(const clang::Stmt *S, bool useCurrentScope); @@ -1038,12 +1038,12 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildDoStmt(const clang::DoStmt &S); mlir::LogicalResult buildCXXForRangeStmt(const CXXForRangeStmt &S, - ArrayRef Attrs = std::nullopt); + llvm::ArrayRef Attrs = std::nullopt); mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); mlir::LogicalResult buildCXXTryStmtUnderScope(const clang::CXXTryStmt &S); mlir::LogicalResult buildCXXTryStmt(const clang::CXXTryStmt &S); - void enterCXXTryStmt(const CXXTryStmt &S, mlir::cir::TryOp catchOp, + void enterCXXTryStmt(const CXXTryStmt &S, cir::TryOp catchOp, bool IsFnTryBlock = false); void exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); @@ -1171,7 +1171,7 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS); - mlir::cir::IfOp buildIfOnBoolExpr( + cir::IfOp buildIfOnBoolExpr( const clang::Expr *cond, llvm::function_ref thenBuilder, mlir::Location thenLoc, @@ -1205,7 +1205,7 @@ class CIRGenFunction : public CIRGenTypeCache { bool isReference() const { return ValueAndIsReference.getInt(); } LValue getReferenceLValue(CIRGenFunction &CGF, Expr *refExpr) const { assert(isReference()); - // create(loc, ty, getZeroAttr(ty)); + // create(loc, ty, getZeroAttr(ty)); // CGF.getBuilder().const // return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(), // refExpr->getType()); @@ -1235,13 +1235,12 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Type getCIRType(const clang::QualType &type); const CaseStmt *foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, - mlir::ArrayAttr &value, - mlir::cir::CaseOpKind &kind); + mlir::ArrayAttr &value, cir::CaseOpKind &kind); template mlir::LogicalResult buildCaseDefaultCascade(const T *stmt, mlir::Type condType, - mlir::ArrayAttr value, mlir::cir::CaseOpKind kind, + mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase); mlir::LogicalResult buildCaseStmt(const clang::CaseStmt &S, @@ -1257,8 +1256,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult buildSwitchBody(const clang::Stmt *S); - mlir::cir::FuncOp generateCode(clang::GlobalDecl GD, mlir::cir::FuncOp Fn, - const CIRGenFunctionInfo &FnInfo); + cir::FuncOp generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, + const CIRGenFunctionInfo &FnInfo); clang::QualType buildFunctionArgList(clang::GlobalDecl GD, FunctionArgList &Args); @@ -1343,7 +1342,7 @@ class CIRGenFunction : public CIRGenTypeCache { void buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, mlir::Value &Result); - mlir::cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); + cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is /// nonnull, if 1\p LHS is marked _Nonnull. @@ -1356,7 +1355,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// \p IsSubtraction indicates whether the expression used to form the GEP /// is a subtraction. mlir::Value buildCheckedInBoundsGEP(mlir::Type ElemTy, mlir::Value Ptr, - ArrayRef IdxList, + llvm::ArrayRef IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc); @@ -1434,12 +1433,11 @@ class CIRGenFunction : public CIRGenTypeCache { /// inside a function, including static vars etc. void buildVarDecl(const clang::VarDecl &D); - mlir::cir::GlobalOp - addInitializerToStaticVarDecl(const VarDecl &D, mlir::cir::GlobalOp GV, - mlir::cir::GetGlobalOp GVAddr); + cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &D, + cir::GlobalOp GV, + cir::GetGlobalOp GVAddr); - void buildStaticVarDecl(const VarDecl &D, - mlir::cir::GlobalLinkageKind Linkage); + void buildStaticVarDecl(const VarDecl &D, cir::GlobalLinkageKind Linkage); /// Perform the usual unary conversions on the specified /// expression and compare the result against zero, returning an Int1Ty value. @@ -1643,7 +1641,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// \param Loc The location to be associated with the function. /// \param StartLoc The location of the function body. void StartFunction(clang::GlobalDecl GD, clang::QualType RetTy, - mlir::cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, + cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo, const FunctionArgList &Args, clang::SourceLocation Loc, clang::SourceLocation StartLoc); @@ -1700,7 +1698,7 @@ class CIRGenFunction : public CIRGenTypeCache { RValue buildAtomicExpr(AtomicExpr *E); void buildAtomicStore(RValue rvalue, LValue lvalue, bool isInit); - void buildAtomicStore(RValue rvalue, LValue lvalue, mlir::cir::MemOrder MO, + void buildAtomicStore(RValue rvalue, LValue lvalue, cir::MemOrder MO, bool IsVolatile, bool isInit); void buildAtomicInit(Expr *init, LValue dest); @@ -1782,7 +1780,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// TODO(cir): this could be a common AST helper between LLVM / CIR. bool hasVolatileMember(QualType T) { if (const RecordType *RT = T->getAs()) { - const RecordDecl *RD = cast(RT->getDecl()); + const RecordDecl *RD = mlir::cast(RT->getDecl()); return RD->hasVolatileMember(); } return false; @@ -1830,13 +1828,13 @@ class CIRGenFunction : public CIRGenTypeCache { }; /// Emits try/catch information for the current EH stack. - mlir::cir::CallOp callWithExceptionCtx = nullptr; - mlir::Operation *buildLandingPad(mlir::cir::TryOp tryOp); + cir::CallOp callWithExceptionCtx = nullptr; + mlir::Operation *buildLandingPad(cir::TryOp tryOp); void buildEHResumeBlock(bool isCleanup, mlir::Block *ehResumeBlock, mlir::Location loc); - mlir::Block *getEHResumeBlock(bool isCleanup, mlir::cir::TryOp tryOp); + mlir::Block *getEHResumeBlock(bool isCleanup, cir::TryOp tryOp); mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope, - mlir::cir::TryOp tryOp); + cir::TryOp tryOp); /// Unified block containing a call to cir.resume mlir::Block *ehResumeBlock = nullptr; llvm::DenseMap cleanupsToPatch; @@ -1845,8 +1843,8 @@ class CIRGenFunction : public CIRGenTypeCache { /// parameters. EHScopeStack::stable_iterator PrologueCleanupDepth; - mlir::Operation *getInvokeDestImpl(mlir::cir::TryOp tryOp); - mlir::Operation *getInvokeDest(mlir::cir::TryOp tryOp) { + mlir::Operation *getInvokeDestImpl(cir::TryOp tryOp); + mlir::Operation *getInvokeDest(cir::TryOp tryOp) { if (!EHStack.requiresLandingPad()) return nullptr; // Return the respective cir.try, this can be used to compute @@ -2102,7 +2100,7 @@ class CIRGenFunction : public CIRGenTypeCache { LexicalScope *ParentScope = nullptr; // Holds actual value for ScopeKind::Try - mlir::cir::TryOp tryOp = nullptr; + cir::TryOp tryOp = nullptr; // FIXME: perhaps we can use some info encoded in operations. enum Kind { @@ -2173,16 +2171,16 @@ class CIRGenFunction : public CIRGenTypeCache { bool isSwitch() { return ScopeKind == Kind::Switch; } bool isTernary() { return ScopeKind == Kind::Ternary; } bool isTry() { return ScopeKind == Kind::Try; } - mlir::cir::TryOp getTry() { + cir::TryOp getTry() { assert(isTry()); return tryOp; } - mlir::cir::TryOp getClosestTryParent(); + cir::TryOp getClosestTryParent(); void setAsGlobalInit() { ScopeKind = Kind::GlobalInit; } void setAsSwitch() { ScopeKind = Kind::Switch; } void setAsTernary() { ScopeKind = Kind::Ternary; } - void setAsTry(mlir::cir::TryOp op) { + void setAsTry(cir::TryOp op) { ScopeKind = Kind::Try; tryOp = op; } @@ -2220,7 +2218,7 @@ class CIRGenFunction : public CIRGenTypeCache { // have their own scopes but are distinct regions nonetheless. llvm::SmallVector RetBlocks; llvm::SmallVector> RetLocs; - llvm::DenseMap RetBlockInCaseIndex; + llvm::DenseMap RetBlockInCaseIndex; std::optional NormalRetBlockIndex; llvm::SmallVector> SwitchRegions; @@ -2228,7 +2226,7 @@ class CIRGenFunction : public CIRGenTypeCache { // get or create because of potential unreachable return statements, note // that for those, all source location maps to the first one found. mlir::Block *createRetBlock(CIRGenFunction &CGF, mlir::Location loc) { - assert((isa_and_nonnull( + assert((isa_and_nonnull( CGF.builder.getBlock()->getParentOp()) || RetBlocks.size() == 0) && "only switches can hold more than one ret block"); @@ -2241,7 +2239,7 @@ class CIRGenFunction : public CIRGenTypeCache { return b; } - mlir::cir::ReturnOp buildReturn(mlir::Location loc); + cir::ReturnOp buildReturn(mlir::Location loc); void buildImplicitReturn(); public: @@ -2261,7 +2259,7 @@ class CIRGenFunction : public CIRGenTypeCache { } mlir::Block *getOrCreateRetBlock(CIRGenFunction &CGF, mlir::Location loc) { - if (auto caseOp = dyn_cast_if_present( + if (auto caseOp = mlir::dyn_cast_if_present( CGF.builder.getBlock()->getParentOp())) { auto iter = RetBlockInCaseIndex.find(caseOp); if (iter != RetBlockInCaseIndex.end()) @@ -2302,7 +2300,7 @@ class CIRGenFunction : public CIRGenTypeCache { std::move(CGF.CXXInheritedCtorInitExprArgs)) { CGF.CurGD = GD; CGF.CurFuncDecl = CGF.CurCodeDecl = - cast(GD.getDecl()); + mlir::cast(GD.getDecl()); CGF.CXXABIThisDecl = nullptr; CGF.CXXABIThisValue = nullptr; CGF.CXXThisValue = nullptr; @@ -2373,18 +2371,18 @@ class CIRGenFunction : public CIRGenTypeCache { /// /// The cast is not performaed in CreateTempAllocaWithoutCast. This is /// more efficient if the caller knows that the address will not be exposed. - mlir::cir::AllocaOp CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, - const Twine &Name = "tmp", - mlir::Value ArraySize = nullptr, - bool insertIntoFnEntryBlock = false); - mlir::cir::AllocaOp - CreateTempAllocaInFnEntryBlock(mlir::Type Ty, mlir::Location Loc, + cir::AllocaOp CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, + const Twine &Name = "tmp", + mlir::Value ArraySize = nullptr, + bool insertIntoFnEntryBlock = false); + cir::AllocaOp CreateTempAllocaInFnEntryBlock(mlir::Type Ty, + mlir::Location Loc, + const Twine &Name = "tmp", + mlir::Value ArraySize = nullptr); + cir::AllocaOp CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, const Twine &Name = "tmp", + mlir::OpBuilder::InsertPoint ip = {}, mlir::Value ArraySize = nullptr); - mlir::cir::AllocaOp CreateTempAlloca(mlir::Type Ty, mlir::Location Loc, - const Twine &Name = "tmp", - mlir::OpBuilder::InsertPoint ip = {}, - mlir::Value ArraySize = nullptr); Address CreateTempAlloca(mlir::Type Ty, CharUnits align, mlir::Location Loc, const Twine &Name = "tmp", mlir::Value ArraySize = nullptr, @@ -2450,13 +2448,13 @@ struct DominatingCIRValue { if (!currBlock->isEntryBlock() || !definingOp->getParentOp()) return false; - if (auto fnOp = definingOp->getParentOfType()) { + if (auto fnOp = definingOp->getParentOfType()) { if (&fnOp.getBody().front() == currBlock) return true; return false; } - if (auto globalOp = definingOp->getParentOfType()) { + if (auto globalOp = definingOp->getParentOfType()) { assert(globalOp.getNumRegions() == 2 && "other regions NYI"); if (&globalOp.getCtorRegion().front() == currBlock) return true; @@ -2495,10 +2493,10 @@ inline mlir::Value DominatingCIRValue::restore(CIRGenFunction &CGF, return value.getPointer(); // Otherwise, it should be an alloca instruction, as set up in save(). - auto alloca = cast(value.getPointer().getDefiningOp()); + auto alloca = mlir::cast(value.getPointer().getDefiningOp()); mlir::Value val = CGF.getBuilder().createAlignedLoad( alloca.getLoc(), alloca.getType(), alloca); - mlir::cir::LoadOp loadOp = cast(val.getDefiningOp()); + cir::LoadOp loadOp = mlir::cast(val.getDefiningOp()); loadOp.setAlignment(alloca.getAlignment()); return val; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h index c93fa188f717..3f442bad2e61 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunctionInfo.h @@ -88,11 +88,11 @@ class CIRGenFunctionInfo final typedef clang::FunctionProtoType::ExtParameterInfo ExtParameterInfo; /// The cir::CallingConv to use for this function (as specified by the user). - mlir::cir::CallingConv CallingConvention : 8; + cir::CallingConv CallingConvention : 8; /// The cir::CallingConv to actually use for this function, which may depend /// on the ABI. - mlir::cir::CallingConv EffectiveCallingConvention : 8; + cir::CallingConv EffectiveCallingConvention : 8; /// The clang::CallingConv that this was originally created with. unsigned ASTCallingConvention : 6; @@ -129,7 +129,7 @@ class CIRGenFunctionInfo final /// TODO: think about modeling this properly, this is just a dumb subsitution /// for now since we arent supporting anything other than arguments in /// registers atm - mlir::cir::StructType *ArgStruct; + cir::StructType *ArgStruct; unsigned ArgStructAlign : 31; unsigned HasExtParameterInfos : 1; @@ -150,7 +150,7 @@ class CIRGenFunctionInfo final CIRGenFunctionInfo() : Required(RequiredArgs::All) {} public: - static CIRGenFunctionInfo *create(mlir::cir::CallingConv cirCC, bool instanceMethod, + static CIRGenFunctionInfo *create(cir::CallingConv cirCC, bool instanceMethod, bool chainCall, const clang::FunctionType::ExtInfo &extInfo, llvm::ArrayRef paramInfos, @@ -252,13 +252,11 @@ class CIRGenFunctionInfo final /// getCallingConvention - Return the user specified calling convention, which /// has been translated into a CIR CC. - mlir::cir::CallingConv getCallingConvention() const { - return CallingConvention; - } + cir::CallingConv getCallingConvention() const { return CallingConvention; } /// getEffectiveCallingConvention - Return the actual calling convention to /// use, which may depend on the ABI. - mlir::cir::CallingConv getEffectiveCallingConvention() const { + cir::CallingConv getEffectiveCallingConvention() const { return EffectiveCallingConvention; } @@ -277,7 +275,7 @@ class CIRGenFunctionInfo final return isVariadic() ? getRequiredArgs().getNumRequiredArgs() : arg_size(); } - mlir::cir::StructType *getArgStruct() const { return ArgStruct; } + cir::StructType *getArgStruct() const { return ArgStruct; } /// Return true if this function uses inalloca arguments. bool usesInAlloca() const { return ArgStruct; } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index dd79a1b7aaff..4c73215432db 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -36,7 +36,7 @@ using namespace clang::CIRGen; namespace { class CIRGenItaniumCXXABI : public CIRGenCXXABI { /// All the vtables which have been defined. - llvm::DenseMap VTables; + llvm::DenseMap VTables; protected: bool UseARMMethodPtrABI; @@ -81,10 +81,10 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl()); auto *op = CGM.getGlobalValue(Name); - if (auto globalOp = dyn_cast_or_null(op)) + if (auto globalOp = dyn_cast_or_null(op)) llvm_unreachable("NYI"); - if (auto funcOp = dyn_cast_or_null(op)) { + if (auto funcOp = dyn_cast_or_null(op)) { // This checks if virtual inline function has already been emitted. // Note that it is possible that this inline function would be emitted // after trying to emit vtable speculatively. Because of this we do @@ -186,14 +186,13 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { bool Delegating, Address This, QualType ThisTy) override; void registerGlobalDtor(CIRGenFunction &CGF, const VarDecl *D, - mlir::cir::FuncOp dtor, mlir::Value Addr) override; + cir::FuncOp dtor, mlir::Value Addr) override; virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) override; virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) override; CatchTypeInfo getAddrOfCXXCatchHandlerType(mlir::Location loc, QualType Ty, QualType CatchHandlerType) override { - auto rtti = - dyn_cast(getAddrOfRTTIDescriptor(loc, Ty)); + auto rtti = dyn_cast(getAddrOfRTTIDescriptor(loc, Ty)); assert(rtti && "expected GlobalViewAttr"); return CatchTypeInfo{rtti, 0}; } @@ -201,8 +200,8 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { void emitBeginCatch(CIRGenFunction &CGF, const CXXCatchStmt *C) override; bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; - mlir::cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, - CharUnits VPtrOffset) override; + cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *RD, + CharUnits VPtrOffset) override; CIRGenCallee getVirtualFunctionPointer(CIRGenFunction &CGF, GlobalDecl GD, Address This, mlir::Type Ty, SourceLocation Loc) override; @@ -319,12 +318,11 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { // function that clang CodeGen has. mlir::Value buildDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, QualType DestRecordTy, - mlir::cir::PointerType DestCIRTy, bool isRefCast, + cir::PointerType DestCIRTy, bool isRefCast, Address Src) override; - mlir::cir::MethodAttr - buildVirtualMethodAttr(mlir::cir::MethodType MethodTy, - const CXXMethodDecl *MD) override; + cir::MethodAttr buildVirtualMethodAttr(cir::MethodType MethodTy, + const CXXMethodDecl *MD) override; /**************************** RTTI Uniqueness ******************************/ protected: @@ -352,8 +350,7 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { /// Return the required visibility status for the given type and linkage in /// the current ABI. RTTIUniquenessKind - classifyRTTIUniqueness(QualType CanTy, - mlir::cir::GlobalLinkageKind Linkage) const; + classifyRTTIUniqueness(QualType CanTy, cir::GlobalLinkageKind Linkage) const; friend class CIRGenItaniumRTTIBuilder; }; } // namespace @@ -464,14 +461,14 @@ static StructorCIRGen getCIRGenToUse(CIRGenModule &CGM, auto Linkage = CGM.getFunctionLinkage(AliasDecl); (void)Linkage; - if (mlir::cir::isDiscardableIfUnused(Linkage)) + if (cir::isDiscardableIfUnused(Linkage)) return StructorCIRGen::RAUW; // FIXME: Should we allow available_externally aliases? - if (!mlir::cir::isValidLinkage(Linkage)) + if (!cir::isValidLinkage(Linkage)) return StructorCIRGen::RAUW; - if (mlir::cir::isWeakForLinker(Linkage)) { + if (cir::isWeakForLinker(Linkage)) { // Only ELF and wasm support COMDATs with arbitrary names (C5/D5). if (CGM.getTarget().getTriple().isOSBinFormatELF() || CGM.getTarget().getTriple().isOSBinFormatWasm()) @@ -489,18 +486,16 @@ static void emitConstructorDestructorAlias(CIRGenModule &CGM, // Does this function alias already exists? StringRef MangledName = CGM.getMangledName(AliasDecl); - auto globalValue = dyn_cast_or_null( + auto globalValue = dyn_cast_or_null( CGM.getGlobalValue(MangledName)); if (globalValue && !globalValue.isDeclaration()) { return; } - auto Entry = - dyn_cast_or_null(CGM.getGlobalValue(MangledName)); + auto Entry = dyn_cast_or_null(CGM.getGlobalValue(MangledName)); // Retrieve aliasee info. - auto Aliasee = - dyn_cast_or_null(CGM.GetAddrOfGlobal(TargetDecl)); + auto Aliasee = dyn_cast_or_null(CGM.GetAddrOfGlobal(TargetDecl)); assert(Aliasee && "expected cir.func"); // Populate actual alias. @@ -685,7 +680,7 @@ struct CallEndCatch final : EHScopeStack::Cleanup { // here. For CIR, just let it pass since the cleanup is going // to be emitted on a later pass when lowering the catch region. // CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); - CGF.getBuilder().create(*CGF.currSrcLoc); + CGF.getBuilder().create(*CGF.currSrcLoc); return; } @@ -693,7 +688,7 @@ struct CallEndCatch final : EHScopeStack::Cleanup { // here. For CIR, just let it pass since the cleanup is going // to be emitted on a later pass when lowering the catch region. // CGF.EmitRuntimeCallOrTryCall(getEndCatchFn(CGF.CGM)); - CGF.getBuilder().create(*CGF.currSrcLoc); + CGF.getBuilder().create(*CGF.currSrcLoc); } }; } // namespace @@ -707,7 +702,7 @@ struct CallEndCatch final : EHScopeStack::Cleanup { /// \param EndMightThrow - true if __cxa_end_catch might throw static mlir::Value CallBeginCatch(CIRGenFunction &CGF, mlir::Type ParamTy, bool EndMightThrow) { - auto catchParam = CGF.getBuilder().create( + auto catchParam = CGF.getBuilder().create( CGF.getBuilder().getUnknownLoc(), ParamTy, nullptr, nullptr); CGF.EHStack.pushCleanup( @@ -831,10 +826,9 @@ void CIRGenItaniumCXXABI::emitBeginCatch(CIRGenFunction &CGF, auto getCatchParamAllocaIP = [&]() { auto currIns = CGF.getBuilder().saveInsertionPoint(); auto currParent = currIns.getBlock()->getParentOp(); - mlir::Operation *scopeLikeOp = - currParent->getParentOfType(); + mlir::Operation *scopeLikeOp = currParent->getParentOfType(); if (!scopeLikeOp) - scopeLikeOp = currParent->getParentOfType(); + scopeLikeOp = currParent->getParentOfType(); assert(scopeLikeOp && "unknown outermost scope-like parent"); assert(scopeLikeOp->getNumRegions() == 1 && "expected single region"); @@ -851,11 +845,10 @@ void CIRGenItaniumCXXABI::emitBeginCatch(CIRGenFunction &CGF, CGF.buildAutoVarCleanups(var); } -mlir::cir::GlobalOp -CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, - CharUnits VPtrOffset) { +cir::GlobalOp CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, + CharUnits VPtrOffset) { assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); - mlir::cir::GlobalOp &vtable = VTables[RD]; + cir::GlobalOp &vtable = VTables[RD]; if (vtable) return vtable; @@ -879,7 +872,7 @@ CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, vtable = CGM.createOrReplaceCXXRuntimeVariable( CGM.getLoc(RD->getSourceRange()), Name, VTableType, - mlir::cir::GlobalLinkageKind::ExternalLinkage, + cir::GlobalLinkageKind::ExternalLinkage, getContext().toCharUnitsFromBits(PAlign)); // LLVM codegen handles unnamedAddr assert(!cir::MissingFeatures::unnamedAddr()); @@ -920,11 +913,10 @@ CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer( } else { VTable = CGF.getBuilder().createBitcast( loc, VTable, CGF.getBuilder().getPointerTo(TyPtr)); - auto VTableSlotPtr = - CGF.getBuilder().create( - loc, CGF.getBuilder().getPointerTo(TyPtr), - ::mlir::FlatSymbolRefAttr{}, VTable, - /*vtable_index=*/0, VTableIndex); + auto VTableSlotPtr = CGF.getBuilder().create( + loc, CGF.getBuilder().getPointerTo(TyPtr), + ::mlir::FlatSymbolRefAttr{}, VTable, + /*vtable_index=*/0, VTableIndex); VFuncLoad = CGF.getBuilder().createAlignedLoad(loc, TyPtr, VTableSlotPtr, CGF.getPointerAlign()); } @@ -982,7 +974,7 @@ CIRGenItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, auto &builder = CGM.getBuilder(); auto vtablePtrTy = builder.getVirtualFnPtrType(/*isVarArg=*/false); - return builder.create( + return builder.create( CGM.getLoc(VTableClass->getSourceRange()), vtablePtrTy, mlir::FlatSymbolRefAttr::get(vtable.getSymNameAttr()), mlir::Value{}, AddressPoint.VTableIndex, AddressPoint.AddressPointIndex); @@ -1075,8 +1067,8 @@ class CIRGenItaniumRTTIBuilder { SmallVector Fields; // Returns the mangled type name of the given type. - mlir::cir::GlobalOp GetAddrOfTypeName(mlir::Location loc, QualType Ty, - mlir::cir::GlobalLinkageKind Linkage); + cir::GlobalOp GetAddrOfTypeName(mlir::Location loc, QualType Ty, + cir::GlobalLinkageKind Linkage); // /// Returns the constant for the RTTI // /// descriptor of the given type. @@ -1161,7 +1153,7 @@ class CIRGenItaniumRTTIBuilder { /// Build the RTTI type info struct for the given type. mlir::Attribute BuildTypeInfo(mlir::Location loc, QualType Ty, - mlir::cir::GlobalLinkageKind Linkage, + cir::GlobalLinkageKind Linkage, mlir::SymbolTable::Visibility Visibility); }; } // namespace @@ -1429,8 +1421,8 @@ static bool CanUseSingleInheritance(const CXXRecordDecl *RD) { /// Return the linkage that the type info and type info name constants /// should have for the given type. -static mlir::cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &CGM, - QualType Ty) { +static cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &CGM, + QualType Ty) { // Itanium C++ ABI 2.9.5p7: // In addition, it and all of the intermediate abi::__pointer_type_info // structs in the chain down to the abi::__class_type_info for the @@ -1441,13 +1433,13 @@ static mlir::cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &CGM, // complete class RTTI (because the latter need not exist), possibly by // making it a local static object. if (ContainsIncompleteClassType(Ty)) - return mlir::cir::GlobalLinkageKind::InternalLinkage; + return cir::GlobalLinkageKind::InternalLinkage; switch (Ty->getLinkage()) { case Linkage::None: case Linkage::Internal: case Linkage::UniqueExternal: - return mlir::cir::GlobalLinkageKind::InternalLinkage; + return cir::GlobalLinkageKind::InternalLinkage; case Linkage::VisibleNone: case Linkage::Module: @@ -1455,16 +1447,16 @@ static mlir::cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &CGM, // RTTI is not enabled, which means that this type info struct is going // to be used for exception handling. Give it linkonce_odr linkage. if (!CGM.getLangOpts().RTTI) - return mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage; + return cir::GlobalLinkageKind::LinkOnceODRLinkage; if (const RecordType *Record = dyn_cast(Ty)) { const CXXRecordDecl *RD = cast(Record->getDecl()); if (RD->hasAttr()) - return mlir::cir::GlobalLinkageKind::WeakODRLinkage; + return cir::GlobalLinkageKind::WeakODRLinkage; if (CGM.getTriple().isWindowsItaniumEnvironment()) if (RD->hasAttr() && ShouldUseExternalRTTIDescriptor(CGM, Ty)) - return mlir::cir::GlobalLinkageKind::ExternalLinkage; + return cir::GlobalLinkageKind::ExternalLinkage; // MinGW always uses LinkOnceODRLinkage for type info. if (RD->isDynamicClass() && !CGM.getASTContext() .getTargetInfo() @@ -1473,7 +1465,7 @@ static mlir::cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &CGM, return CGM.getVTableLinkage(RD); } - return mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage; + return cir::GlobalLinkageKind::LinkOnceODRLinkage; case Linkage::Invalid: llvm_unreachable("Invalid linkage!"); } @@ -1491,7 +1483,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo(mlir::Location loc, llvm::raw_svector_ostream Out(Name); CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); - auto OldGV = dyn_cast_or_null( + auto OldGV = dyn_cast_or_null( mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), Name)); if (OldGV && !OldGV.isDeclaration()) { @@ -1514,7 +1506,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo(mlir::Location loc, assert(!cir::MissingFeatures::hiddenVisibility()); assert(!cir::MissingFeatures::protectedVisibility()); mlir::SymbolTable::Visibility symVisibility; - if (mlir::cir::isLocalLinkage(Linkage)) + if (cir::isLocalLinkage(Linkage)) // If the linkage is local, only default visibility makes sense. symVisibility = mlir::SymbolTable::Visibility::Public; else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) == @@ -1646,7 +1638,7 @@ void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, break; } - mlir::cir::GlobalOp VTable{}; + cir::GlobalOp VTable{}; // Check if the alias exists. If it doesn't, then get or create the global. if (CGM.getItaniumVTableContext().isRelativeLayout()) @@ -1675,8 +1667,9 @@ void CIRGenItaniumRTTIBuilder::BuildVTablePointer(mlir::Location loc, Fields.push_back(field); } -mlir::cir::GlobalOp CIRGenItaniumRTTIBuilder::GetAddrOfTypeName( - mlir::Location loc, QualType Ty, mlir::cir::GlobalLinkageKind Linkage) { +cir::GlobalOp +CIRGenItaniumRTTIBuilder::GetAddrOfTypeName(mlir::Location loc, QualType Ty, + cir::GlobalLinkageKind Linkage) { auto &builder = CGM.getBuilder(); SmallString<256> Name; llvm::raw_svector_ostream Out(Name); @@ -1693,7 +1686,7 @@ mlir::cir::GlobalOp CIRGenItaniumRTTIBuilder::GetAddrOfTypeName( // builder.getString can return a #cir.zero if the string given to it only // contains null bytes. However, type names cannot be full of null bytes. // So cast Init to a ConstArrayAttr should be safe. - auto InitStr = cast(Init); + auto InitStr = cast(Init); auto GV = CGM.createOrReplaceCXXRuntimeVariable(loc, Name, InitStr.getType(), Linkage, Align); @@ -1784,12 +1777,12 @@ void CIRGenItaniumRTTIBuilder::BuildVMIClassTypeInfo(mlir::Location loc, // structure, which may be referenced by using the __flags_masks // enumeration. These flags refer to both direct and indirect bases. unsigned Flags = ComputeVMIClassTypeInfoFlags(RD); - Fields.push_back(mlir::cir::IntAttr::get(UnsignedIntLTy, Flags)); + Fields.push_back(cir::IntAttr::get(UnsignedIntLTy, Flags)); // Itanium C++ ABI 2.9.5p6c: // __base_count is a word with the number of direct proper base class // descriptions that follow. - Fields.push_back(mlir::cir::IntAttr::get(UnsignedIntLTy, RD->getNumBases())); + Fields.push_back(cir::IntAttr::get(UnsignedIntLTy, RD->getNumBases())); if (!RD->getNumBases()) return; @@ -1856,7 +1849,7 @@ void CIRGenItaniumRTTIBuilder::BuildVMIClassTypeInfo(mlir::Location loc, if (Base.getAccessSpecifier() == AS_public) OffsetFlags |= BCTI_Public; - Fields.push_back(mlir::cir::IntAttr::get(OffsetFlagsLTy, OffsetFlags)); + Fields.push_back(cir::IntAttr::get(OffsetFlagsLTy, OffsetFlags)); } } @@ -1870,7 +1863,7 @@ CIRGenItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(mlir::Location loc, auto &builder = CGM.getBuilder(); // Look for an existing global. - auto GV = dyn_cast_or_null( + auto GV = dyn_cast_or_null( mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), Name)); if (!GV) { @@ -1893,7 +1886,7 @@ CIRGenItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(mlir::Location loc, } mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( - mlir::Location loc, QualType Ty, mlir::cir::GlobalLinkageKind Linkage, + mlir::Location loc, QualType Ty, cir::GlobalLinkageKind Linkage, mlir::SymbolTable::Visibility Visibility) { auto &builder = CGM.getBuilder(); assert(!cir::MissingFeatures::setDLLStorageClass()); @@ -2021,9 +2014,9 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); // Create new global and search for an existing global. - auto OldGV = dyn_cast_or_null( + auto OldGV = dyn_cast_or_null( mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), Name)); - mlir::cir::GlobalOp GV = + cir::GlobalOp GV = CIRGenModule::createGlobalOp(CGM, loc, Name, init.getType(), /*isConstant=*/true); @@ -2043,7 +2036,7 @@ mlir::Attribute CIRGenItaniumRTTIBuilder::BuildTypeInfo( OldGV->erase(); } - if (CGM.supportsCOMDAT() && mlir::cir::isWeakForLinker(GV.getLinkage())) { + if (CGM.supportsCOMDAT() && cir::isWeakForLinker(GV.getLinkage())) { assert(!cir::MissingFeatures::setComdat()); llvm_unreachable("NYI"); } @@ -2106,13 +2099,13 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, auto components = builder.beginStruct(); CGVT.createVTableInitializer(components, VTLayout, RTTI, - mlir::cir::isLocalLinkage(Linkage)); + cir::isLocalLinkage(Linkage)); components.finishAndSetAsInitializer(VTable, /*forVtable=*/true); // Set the correct linkage. VTable.setLinkage(Linkage); - if (CGM.supportsCOMDAT() && mlir::cir::isWeakForLinker(Linkage)) { + if (CGM.supportsCOMDAT() && cir::isWeakForLinker(Linkage)) { assert(!cir::MissingFeatures::setComdat()); } @@ -2132,8 +2125,7 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, // EmitFundamentalRTTIDescriptors(RD); } - auto VTableAsGlobalValue = - dyn_cast(*VTable); + auto VTableAsGlobalValue = dyn_cast(*VTable); assert(VTableAsGlobalValue && "VTable must support CIRGlobalValueInterface"); bool isDeclarationForLinker = VTableAsGlobalValue.isDeclarationForLinker(); // Always emit type metadata on non-available_externally definitions, and on @@ -2169,13 +2161,13 @@ void CIRGenItaniumCXXABI::emitVirtualInheritanceTables( /// given type? CIRGenItaniumCXXABI::RTTIUniquenessKind CIRGenItaniumCXXABI::classifyRTTIUniqueness( - QualType CanTy, mlir::cir::GlobalLinkageKind Linkage) const { + QualType CanTy, cir::GlobalLinkageKind Linkage) const { if (shouldRTTIBeUnique()) return RUK_Unique; // It's only necessary for linkonce_odr or weak_odr linkage. - if (Linkage != mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage && - Linkage != mlir::cir::GlobalLinkageKind::WeakODRLinkage) + if (Linkage != cir::GlobalLinkageKind::LinkOnceODRLinkage && + Linkage != cir::GlobalLinkageKind::WeakODRLinkage) return RUK_Unique; // It's only necessary with default visibility. @@ -2183,13 +2175,13 @@ CIRGenItaniumCXXABI::classifyRTTIUniqueness( return RUK_Unique; // If we're not required to publish this symbol, hide it. - if (Linkage == mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage) + if (Linkage == cir::GlobalLinkageKind::LinkOnceODRLinkage) return RUK_NonUniqueHidden; // If we're required to publish this symbol, as we might be under an // explicit instantiation, leave it with default visibility but // enable string-comparisons. - assert(Linkage == mlir::cir::GlobalLinkageKind::WeakODRLinkage); + assert(Linkage == cir::GlobalLinkageKind::WeakODRLinkage); return RUK_NonUniqueVisible; } @@ -2212,8 +2204,7 @@ void CIRGenItaniumCXXABI::buildDestructorCall( } void CIRGenItaniumCXXABI::registerGlobalDtor(CIRGenFunction &CGF, - const VarDecl *D, - mlir::cir::FuncOp dtor, + const VarDecl *D, cir::FuncOp dtor, mlir::Value Addr) { if (D->isNoDestroy(CGM.getASTContext())) return; @@ -2257,8 +2248,8 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, // Defer computing allocation size to some later lowering pass. auto exceptionPtr = builder - .create( - subExprLoc, throwTy, builder.getI64IntegerAttr(typeSize)) + .create(subExprLoc, throwTy, + builder.getI64IntegerAttr(typeSize)) .getAddr(); // Build expression and store its result into exceptionPtr. @@ -2266,7 +2257,7 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, CGF.buildAnyExprToExn(E->getSubExpr(), Address(exceptionPtr, exnAlign)); // Get the RTTI symbol address. - auto typeInfo = mlir::dyn_cast_if_present( + auto typeInfo = mlir::dyn_cast_if_present( CGM.getAddrOfRTTIDescriptor(subExprLoc, clangThrowType, /*ForEH=*/true)); assert(typeInfo && "expected GlobalViewAttr typeinfo"); @@ -2301,9 +2292,8 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, // Now throw the exception. mlir::Location loc = CGF.getLoc(E->getSourceRange()); - builder.create(loc, exceptionPtr, typeInfo.getSymbol(), - dtor); - builder.create(loc); + builder.create(loc, exceptionPtr, typeInfo.getSymbol(), dtor); + builder.create(loc); } mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset( @@ -2315,7 +2305,7 @@ mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset( BaseClassDecl); mlir::Value OffsetVal = CGF.getBuilder().getSInt64(VBaseOffsetOffset.getQuantity(), loc); - auto VBaseOffsetPtr = CGF.getBuilder().create( + auto VBaseOffsetPtr = CGF.getBuilder().create( loc, VTablePtr.getType(), VTablePtr, OffsetVal); // vbase.offset.ptr @@ -2332,13 +2322,13 @@ mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset( return VBaseOffset; } -static mlir::cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { +static cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { // Prototype: void __cxa_bad_cast(); // TODO(cir): set the calling convention of the runtime function. assert(!cir::MissingFeatures::setCallingConv()); - mlir::cir::FuncType FTy = + cir::FuncType FTy = CGF.getBuilder().getFuncType({}, CGF.getBuilder().getVoidTy()); return CGF.CGM.createRuntimeFunction(FTy, "__cxa_bad_cast"); } @@ -2348,7 +2338,7 @@ static void buildCallToBadCast(CIRGenFunction &CGF, mlir::Location loc) { assert(!cir::MissingFeatures::setCallingConv()); CGF.buildRuntimeCall(loc, getBadCastFn(CGF)); - CGF.getBuilder().create(loc); + CGF.getBuilder().create(loc); CGF.getBuilder().clearInsertionPoint(); } @@ -2407,7 +2397,7 @@ static CharUnits computeOffsetHint(ASTContext &Context, return Offset; } -static mlir::cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { +static cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { // Prototype: // void *__dynamic_cast(const void *sub, // global_as const abi::__class_type_info *src, @@ -2423,7 +2413,7 @@ static mlir::cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { // TODO(cir): set the calling convention of the runtime function. assert(!cir::MissingFeatures::setCallingConv()); - mlir::cir::FuncType FTy = CGF.getBuilder().getFuncType( + cir::FuncType FTy = CGF.getBuilder().getFuncType( {VoidPtrTy, RTTIPtrTy, RTTIPtrTy, PtrDiffTy}, VoidPtrTy); return CGF.CGM.createRuntimeFunction(FTy, "__dynamic_cast"); } @@ -2440,7 +2430,7 @@ static Address buildDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, static mlir::Value buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, - QualType DestRecordTy, mlir::cir::PointerType DestCIRTy, + QualType DestRecordTy, cir::PointerType DestCIRTy, bool IsRefCast, Address Src) { // Find all the inheritance paths from SrcRecordTy to DestRecordTy. const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); @@ -2524,8 +2514,8 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, // TODO(cir): decorate SrcVPtr with TBAA info. assert(!cir::MissingFeatures::tbaa()); - mlir::Value Success = CGF.getBuilder().createCompare( - Loc, mlir::cir::CmpOpKind::eq, SrcVPtr, ExpectedVPtr); + mlir::Value Success = CGF.getBuilder().createCompare(Loc, cir::CmpOpKind::eq, + SrcVPtr, ExpectedVPtr); auto buildCastResult = [&] { if (Offset->isZero()) @@ -2540,23 +2530,22 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, Loc, CGF.getBuilder().getUInt64Ty(), Offset->getQuantity()); mlir::Value SrcU8Ptr = CGF.getBuilder().createBitcast(Src.getPointer(), U8PtrTy); - mlir::Value ResultU8Ptr = CGF.getBuilder().create( + mlir::Value ResultU8Ptr = CGF.getBuilder().create( Loc, U8PtrTy, SrcU8Ptr, StrideToApply); return CGF.getBuilder().createBitcast(ResultU8Ptr, DestCIRTy); }; if (IsRefCast) { mlir::Value Failed = CGF.getBuilder().createNot(Success); - CGF.getBuilder().create( - Loc, Failed, /*withElseRegion=*/false, - [&](mlir::OpBuilder &, mlir::Location) { - buildCallToBadCast(CGF, Loc); - }); + CGF.getBuilder().create(Loc, Failed, /*withElseRegion=*/false, + [&](mlir::OpBuilder &, mlir::Location) { + buildCallToBadCast(CGF, Loc); + }); return buildCastResult(); } return CGF.getBuilder() - .create( + .create( Loc, Success, [&](mlir::OpBuilder &, mlir::Location) { auto Result = buildCastResult(); @@ -2570,12 +2559,13 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, .getResult(); } -static mlir::cir::DynamicCastInfoAttr -buildDynamicCastInfo(CIRGenFunction &CGF, mlir::Location Loc, - QualType SrcRecordTy, QualType DestRecordTy) { - auto srcRtti = mlir::cast( +static cir::DynamicCastInfoAttr buildDynamicCastInfo(CIRGenFunction &CGF, + mlir::Location Loc, + QualType SrcRecordTy, + QualType DestRecordTy) { + auto srcRtti = mlir::cast( CGF.CGM.getAddrOfRTTIDescriptor(Loc, SrcRecordTy)); - auto destRtti = mlir::cast( + auto destRtti = mlir::cast( CGF.CGM.getAddrOfRTTIDescriptor(Loc, DestRecordTy)); auto runtimeFuncOp = getItaniumDynamicCastFn(CGF); @@ -2588,17 +2578,18 @@ buildDynamicCastInfo(CIRGenFunction &CGF, mlir::Location Loc, auto offsetHint = computeOffsetHint(CGF.getContext(), srcDecl, destDecl); mlir::Type ptrdiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); - auto offsetHintAttr = - mlir::cir::IntAttr::get(ptrdiffTy, offsetHint.getQuantity()); + auto offsetHintAttr = cir::IntAttr::get(ptrdiffTy, offsetHint.getQuantity()); - return mlir::cir::DynamicCastInfoAttr::get(srcRtti, destRtti, runtimeFuncRef, - badCastFuncRef, offsetHintAttr); + return cir::DynamicCastInfoAttr::get(srcRtti, destRtti, runtimeFuncRef, + badCastFuncRef, offsetHintAttr); } -mlir::Value CIRGenItaniumCXXABI::buildDynamicCast( - CIRGenFunction &CGF, mlir::Location Loc, QualType SrcRecordTy, - QualType DestRecordTy, mlir::cir::PointerType DestCIRTy, bool isRefCast, - Address Src) { +mlir::Value CIRGenItaniumCXXABI::buildDynamicCast(CIRGenFunction &CGF, + mlir::Location Loc, + QualType SrcRecordTy, + QualType DestRecordTy, + cir::PointerType DestCIRTy, + bool isRefCast, Address Src) { bool isCastToVoid = DestRecordTy.isNull(); assert((!isCastToVoid || !isRefCast) && "cannot cast to void reference"); @@ -2617,8 +2608,8 @@ mlir::Value CIRGenItaniumCXXABI::buildDynamicCast( isRefCast, castInfo); } -mlir::cir::MethodAttr -CIRGenItaniumCXXABI::buildVirtualMethodAttr(mlir::cir::MethodType MethodTy, +cir::MethodAttr +CIRGenItaniumCXXABI::buildVirtualMethodAttr(cir::MethodType MethodTy, const CXXMethodDecl *MD) { assert(MD->isVirtual() && "only deal with virtual member functions"); @@ -2634,7 +2625,7 @@ CIRGenItaniumCXXABI::buildVirtualMethodAttr(mlir::cir::MethodType MethodTy, VTableOffset = Index * PointerWidth.getQuantity(); } - return mlir::cir::MethodAttr::get(MethodTy, VTableOffset); + return cir::MethodAttr::get(MethodTy, VTableOffset); } /// The Itanium ABI requires non-zero initialization only for data diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index cdf3f4b93ac0..c8fecd3f20ee 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -81,7 +81,7 @@ #include #include -using namespace mlir::cir; +using namespace cir; using namespace clang; using namespace clang::CIRGen; @@ -113,38 +113,30 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, VTables{*this}, openMPRuntime(new CIRGenOpenMPRuntime(*this)) { // Initialize CIR signed integer types cache. - SInt8Ty = ::mlir::cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true); - SInt16Ty = - ::mlir::cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/true); - SInt32Ty = - ::mlir::cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/true); - SInt64Ty = - ::mlir::cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/true); - SInt128Ty = - ::mlir::cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/true); + SInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true); + SInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/true); + SInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/true); + SInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/true); + SInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/true); // Initialize CIR unsigned integer types cache. - UInt8Ty = ::mlir::cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/false); - UInt16Ty = - ::mlir::cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/false); - UInt32Ty = - ::mlir::cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false); - UInt64Ty = - ::mlir::cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false); - UInt128Ty = - ::mlir::cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/false); - - VoidTy = ::mlir::cir::VoidType::get(&getMLIRContext()); + UInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/false); + UInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/false); + UInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false); + UInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false); + UInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/false); + + VoidTy = cir::VoidType::get(&getMLIRContext()); // Initialize CIR pointer types cache. - VoidPtrTy = ::mlir::cir::PointerType::get(&getMLIRContext(), VoidTy); + VoidPtrTy = cir::PointerType::get(&getMLIRContext(), VoidTy); - FP16Ty = ::mlir::cir::FP16Type::get(&getMLIRContext()); - BFloat16Ty = ::mlir::cir::BF16Type::get(&getMLIRContext()); - FloatTy = ::mlir::cir::SingleType::get(&getMLIRContext()); - DoubleTy = ::mlir::cir::DoubleType::get(&getMLIRContext()); - FP80Ty = ::mlir::cir::FP80Type::get(&getMLIRContext()); - FP128Ty = ::mlir::cir::FP128Type::get(&getMLIRContext()); + FP16Ty = cir::FP16Type::get(&getMLIRContext()); + BFloat16Ty = cir::BF16Type::get(&getMLIRContext()); + FloatTy = cir::SingleType::get(&getMLIRContext()); + DoubleTy = cir::DoubleType::get(&getMLIRContext()); + FP80Ty = cir::FP80Type::get(&getMLIRContext()); + FP128Ty = cir::FP128Type::get(&getMLIRContext()); // TODO: PointerWidthInBits PointerAlignInBytes = @@ -154,15 +146,15 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, .getQuantity(); // TODO: SizeSizeInBytes // TODO: IntAlignInBytes - UCharTy = ::mlir::cir::IntType::get(&getMLIRContext(), - astCtx.getTargetInfo().getCharWidth(), - /*isSigned=*/false); - UIntTy = ::mlir::cir::IntType::get(&getMLIRContext(), - astCtx.getTargetInfo().getIntWidth(), - /*isSigned=*/false); - UIntPtrTy = ::mlir::cir::IntType::get( - &getMLIRContext(), astCtx.getTargetInfo().getMaxPointerWidth(), - /*isSigned=*/false); + UCharTy = cir::IntType::get(&getMLIRContext(), + astCtx.getTargetInfo().getCharWidth(), + /*isSigned=*/false); + UIntTy = + cir::IntType::get(&getMLIRContext(), astCtx.getTargetInfo().getIntWidth(), + /*isSigned=*/false); + UIntPtrTy = cir::IntType::get(&getMLIRContext(), + astCtx.getTargetInfo().getMaxPointerWidth(), + /*isSigned=*/false); UInt8PtrTy = builder.getPointerTo(UInt8Ty); UInt8PtrPtrTy = builder.getPointerTo(UInt8PtrTy); AllocaInt8PtrTy = UInt8PtrTy; @@ -170,15 +162,15 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, // TODO: ConstGlobalsPtrTy CIRAllocaAddressSpace = getTargetCIRGenInfo().getCIRAllocaAddressSpace(); - PtrDiffTy = ::mlir::cir::IntType::get( - &getMLIRContext(), astCtx.getTargetInfo().getMaxPointerWidth(), - /*isSigned=*/true); + PtrDiffTy = cir::IntType::get(&getMLIRContext(), + astCtx.getTargetInfo().getMaxPointerWidth(), + /*isSigned=*/true); if (langOpts.OpenCL) { createOpenCLRuntime(); } - mlir::cir::sob::SignedOverflowBehavior sob; + cir::sob::SignedOverflowBehavior sob; switch (langOpts.getSignedOverflowBehavior()) { case clang::LangOptions::SignedOverflowBehaviorTy::SOB_Defined: sob = sob::SignedOverflowBehavior::defined; @@ -194,9 +186,9 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, // FIXME(cir): Implement a custom CIR Module Op and attributes to leverage // MLIR features. theModule->setAttr("cir.sob", - mlir::cir::SignedOverflowBehaviorAttr::get(&context, sob)); + cir::SignedOverflowBehaviorAttr::get(&context, sob)); auto lang = SourceLanguageAttr::get(&context, getCIRSourceLanguage()); - theModule->setAttr("cir.lang", mlir::cir::LangAttr::get(&context, lang)); + theModule->setAttr("cir.lang", cir::LangAttr::get(&context, lang)); theModule->setAttr("cir.triple", builder.getStringAttr(getTriple().str())); // Set the module name to be the name of the main file. TranslationUnitDecl // often contains invalid source locations and isn't a reliable source for the @@ -430,7 +422,7 @@ static bool shouldAssumeDSOLocal(const CIRGenModule &CGM, // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set // dso_local on the function if using a local alias is preferable (can avoid // PLT indirection). - if (!(isa(GV) && GV.canBenefitFromLocalAlias())) { + if (!(isa(GV) && GV.canBenefitFromLocalAlias())) { return false; } return !(CGM.getLangOpts().SemanticInterposition || @@ -457,7 +449,7 @@ static bool shouldAssumeDSOLocal(const CIRGenModule &CGM, // executable, a copy relocation will be needed at link time. dso_local is // excluded for thread-local variables because they generally don't support // copy relocations. - if (auto gv = dyn_cast(GV.getOperation())) + if (auto gv = dyn_cast(GV.getOperation())) if (!gv.getTlsModelAttr()) return true; @@ -467,8 +459,7 @@ static bool shouldAssumeDSOLocal(const CIRGenModule &CGM, // needed at link time. -fno-direct-access-external-data can avoid the // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as // it could just cause trouble without providing perceptible benefits. - if (isa(GV) && !CGOpts.NoPLT && - RM == llvm::Reloc::Static) + if (isa(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static) return true; } @@ -605,7 +596,7 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, auto Ty = getTypes().GetFunctionType(FI); // Get or create the prototype for the function. - auto Fn = dyn_cast_if_present(Op); + auto Fn = dyn_cast_if_present(Op); if (!Fn || Fn.getFunctionType() != Ty) Fn = GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/true, ForDefinition); @@ -641,7 +632,7 @@ void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, } /// Track functions to be called before main() runs. -void CIRGenModule::AddGlobalCtor(mlir::cir::FuncOp Ctor, int Priority) { +void CIRGenModule::AddGlobalCtor(cir::FuncOp Ctor, int Priority) { // FIXME(cir): handle LexOrder and Associated data upon testcases. // // Traditional LLVM codegen directly adds the function to the list of global @@ -649,13 +640,13 @@ void CIRGenModule::AddGlobalCtor(mlir::cir::FuncOp Ctor, int Priority) { // global list is created in LoweringPrepare. // // FIXME(from traditional LLVM): Type coercion of void()* types. - Ctor->setAttr(Ctor.getGlobalCtorAttrName(), - mlir::cir::GlobalCtorAttr::get(&getMLIRContext(), - Ctor.getName(), Priority)); + Ctor->setAttr( + Ctor.getGlobalCtorAttrName(), + cir::GlobalCtorAttr::get(&getMLIRContext(), Ctor.getName(), Priority)); } /// Add a function to the list that will be called when the module is unloaded. -void CIRGenModule::AddGlobalDtor(mlir::cir::FuncOp Dtor, int Priority, +void CIRGenModule::AddGlobalDtor(cir::FuncOp Dtor, int Priority, bool IsDtorAttrFunc) { assert(IsDtorAttrFunc && "NYI"); if (codeGenOpts.RegisterGlobalDtorsWithAtExit && @@ -665,9 +656,9 @@ void CIRGenModule::AddGlobalDtor(mlir::cir::FuncOp Dtor, int Priority, } // FIXME(from traditional LLVM): Type coercion of void()* types. - Dtor->setAttr(Dtor.getGlobalDtorAttrName(), - mlir::cir::GlobalDtorAttr::get(&getMLIRContext(), - Dtor.getName(), Priority)); + Dtor->setAttr( + Dtor.getGlobalDtorAttrName(), + cir::GlobalDtorAttr::get(&getMLIRContext(), Dtor.getName(), Priority)); } mlir::Operation *CIRGenModule::getGlobalValue(StringRef Name) { @@ -682,11 +673,13 @@ mlir::Value CIRGenModule::getGlobalValue(const Decl *D) { return CurCGF->symbolTable.lookup(D); } -mlir::cir::GlobalOp CIRGenModule::createGlobalOp( - CIRGenModule &cgm, mlir::Location loc, StringRef name, mlir::Type t, - bool isConstant, mlir::cir::AddressSpaceAttr addrSpace, - mlir::Operation *insertPoint, mlir::cir::GlobalLinkageKind linkage) { - mlir::cir::GlobalOp g; +cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &cgm, + mlir::Location loc, StringRef name, + mlir::Type t, bool isConstant, + cir::AddressSpaceAttr addrSpace, + mlir::Operation *insertPoint, + cir::GlobalLinkageKind linkage) { + cir::GlobalOp g; auto &builder = cgm.getBuilder(); { mlir::OpBuilder::InsertionGuard guard(builder); @@ -699,8 +692,8 @@ mlir::cir::GlobalOp CIRGenModule::createGlobalOp( if (curCGF) builder.setInsertionPoint(curCGF->CurFn); - g = builder.create(loc, name, t, isConstant, linkage, - addrSpace); + g = builder.create(loc, name, t, isConstant, linkage, + addrSpace); if (!curCGF) { if (insertPoint) cgm.getModule().insert(insertPoint, g); @@ -741,7 +734,7 @@ void CIRGenModule::setNonAliasAttributes(GlobalDecl GD, mlir::Operation *GO) { setCommonAttributes(GD, GO); if (D) { - auto GV = llvm::dyn_cast_or_null(GO); + auto GV = llvm::dyn_cast_or_null(GO); if (GV) { if (D->hasAttr()) assert(!cir::MissingFeatures::addUsedGlobal()); @@ -754,7 +747,7 @@ void CIRGenModule::setNonAliasAttributes(GlobalDecl GD, mlir::Operation *GO) { if (auto *SA = D->getAttr()) assert(!cir::MissingFeatures::addSectionAttributes()); } - auto F = llvm::dyn_cast_or_null(GO); + auto F = llvm::dyn_cast_or_null(GO); if (F) { if (D->hasAttr()) assert(!cir::MissingFeatures::addUsedGlobal()); @@ -780,15 +773,14 @@ void CIRGenModule::setNonAliasAttributes(GlobalDecl GD, mlir::Operation *GO) { assert(!cir::MissingFeatures::setTargetAttributes()); } -void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, - mlir::cir::GlobalOp New) { +void CIRGenModule::replaceGlobal(cir::GlobalOp Old, cir::GlobalOp New) { assert(Old.getSymName() == New.getSymName() && "symbol names must match"); // If the types does not match, update all references to Old to the new type. auto OldTy = Old.getSymType(); auto NewTy = New.getSymType(); - mlir::cir::AddressSpaceAttr oldAS = Old.getAddrSpaceAttr(); - mlir::cir::AddressSpaceAttr newAS = New.getAddrSpaceAttr(); + cir::AddressSpaceAttr oldAS = Old.getAddrSpaceAttr(); + cir::AddressSpaceAttr newAS = New.getAddrSpaceAttr(); // TODO(cir): If the AS differs, we should also update all references. if (oldAS != newAS) { llvm_unreachable("NYI"); @@ -798,14 +790,13 @@ void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, if (OldSymUses.has_value()) { for (auto Use : *OldSymUses) { auto *UserOp = Use.getUser(); - assert((isa(UserOp) || - isa(UserOp)) && + assert((isa(UserOp) || isa(UserOp)) && "GlobalOp symbol user is neither a GetGlobalOp nor a GlobalOp"); - if (auto GGO = dyn_cast(Use.getUser())) { + if (auto GGO = dyn_cast(Use.getUser())) { auto UseOpResultValue = GGO.getAddr(); UseOpResultValue.setType( - mlir::cir::PointerType::get(&getMLIRContext(), NewTy)); + cir::PointerType::get(&getMLIRContext(), NewTy)); } } } @@ -815,16 +806,16 @@ void CIRGenModule::replaceGlobal(mlir::cir::GlobalOp Old, Old.erase(); } -mlir::cir::TLS_Model CIRGenModule::GetDefaultCIRTLSModel() const { +cir::TLS_Model CIRGenModule::GetDefaultCIRTLSModel() const { switch (getCodeGenOpts().getDefaultTLSModel()) { case CodeGenOptions::GeneralDynamicTLSModel: - return mlir::cir::TLS_Model::GeneralDynamic; + return cir::TLS_Model::GeneralDynamic; case CodeGenOptions::LocalDynamicTLSModel: - return mlir::cir::TLS_Model::LocalDynamic; + return cir::TLS_Model::LocalDynamic; case CodeGenOptions::InitialExecTLSModel: - return mlir::cir::TLS_Model::InitialExec; + return cir::TLS_Model::InitialExec; case CodeGenOptions::LocalExecTLSModel: - return mlir::cir::TLS_Model::LocalExec; + return cir::TLS_Model::LocalExec; } llvm_unreachable("Invalid TLS model!"); } @@ -839,7 +830,7 @@ void CIRGenModule::setTLSMode(mlir::Operation *Op, const VarDecl &D) const { llvm_unreachable("NYI"); } - auto global = dyn_cast(Op); + auto global = dyn_cast(Op); assert(global && "NYI for other operations"); global.setTlsModel(TLM); } @@ -858,25 +849,25 @@ void CIRGenModule::setTLSMode(mlir::Operation *Op, const VarDecl &D) const { /// 3. If IsForDefinition is true, it is guaranteed that an actual global with /// type Ty will be returned, not conversion of a variable with the same /// mangled name but some other type. -mlir::cir::GlobalOp +cir::GlobalOp CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, LangAS langAS, const VarDecl *D, ForDefinition_t IsForDefinition) { // Lookup the entry, lazily creating it if necessary. - mlir::cir::GlobalOp Entry; + cir::GlobalOp Entry; if (auto *V = getGlobalValue(MangledName)) { - assert(isa(V) && "only supports GlobalOp for now"); - Entry = dyn_cast_or_null(V); + assert(isa(V) && "only supports GlobalOp for now"); + Entry = dyn_cast_or_null(V); } - mlir::cir::AddressSpaceAttr cirAS = builder.getAddrSpaceAttr(langAS); + cir::AddressSpaceAttr cirAS = builder.getAddrSpaceAttr(langAS); if (Entry) { auto entryCIRAS = Entry.getAddrSpaceAttr(); if (WeakRefReferences.erase(Entry)) { if (D && !D->hasAttr()) { - auto LT = mlir::cir::GlobalLinkageKind::ExternalLinkage; + auto LT = cir::GlobalLinkageKind::ExternalLinkage; Entry.setLinkageAttr( - mlir::cir::GlobalLinkageKindAttr::get(&getMLIRContext(), LT)); + cir::GlobalLinkageKindAttr::get(&getMLIRContext(), LT)); mlir::SymbolTable::setSymbolVisibility(Entry, getMLIRVisibility(Entry)); } } @@ -1019,7 +1010,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, return GV; } -mlir::cir::GlobalOp +cir::GlobalOp CIRGenModule::getOrCreateCIRGlobal(const VarDecl *D, mlir::Type Ty, ForDefinition_t IsForDefinition) { assert(D->hasGlobalStorage() && "Not a global variable"); @@ -1048,11 +1039,11 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *D, mlir::Type Ty, bool tlsAccess = D->getTLSKind() != VarDecl::TLS_None; auto g = getOrCreateCIRGlobal(D, Ty, IsForDefinition); auto ptrTy = builder.getPointerTo(g.getSymType(), g.getAddrSpaceAttr()); - return builder.create( - getLoc(D->getSourceRange()), ptrTy, g.getSymName(), tlsAccess); + return builder.create(getLoc(D->getSourceRange()), ptrTy, + g.getSymName(), tlsAccess); } -mlir::cir::GlobalViewAttr +cir::GlobalViewAttr CIRGenModule::getAddrOfGlobalVarAttr(const VarDecl *D, mlir::Type Ty, ForDefinition_t IsForDefinition) { assert(D->hasGlobalStorage() && "Not a global variable"); @@ -1071,17 +1062,17 @@ mlir::Operation *CIRGenModule::getWeakRefReference(const ValueDecl *VD) { // See if there is already something with the target's name in the module. mlir::Operation *Entry = getGlobalValue(AA->getAliasee()); if (Entry) { - assert((isa(Entry) || isa(Entry)) && + assert((isa(Entry) || isa(Entry)) && "weak ref should be against a global variable or function"); return Entry; } mlir::Type DeclTy = getTypes().convertTypeForMem(VD->getType()); - if (mlir::isa(DeclTy)) { + if (mlir::isa(DeclTy)) { auto F = GetOrCreateCIRFunction(AA->getAliasee(), DeclTy, GlobalDecl(cast(VD)), /*ForVtable=*/false); - F.setLinkage(mlir::cir::GlobalLinkageKind::ExternalWeakLinkage); + F.setLinkage(cir::GlobalLinkageKind::ExternalWeakLinkage); WeakRefReferences.insert(F); return F; } @@ -1093,7 +1084,7 @@ mlir::Operation *CIRGenModule::getWeakRefReference(const ValueDecl *VD) { /// helper betweem CIR and LLVM codegen. template void CIRGenModule::maybeHandleStaticInExternC(const SomeDecl *D, - mlir::cir::GlobalOp GV) { + cir::GlobalOp GV) { if (!getLangOpts().CPlusPlus) return; @@ -1233,15 +1224,14 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // away SymbolRefAttr. if (auto symAttr = mlir::dyn_cast(Init)) { auto cstGlobal = mlir::SymbolTable::lookupSymbolIn(theModule, symAttr); - assert(isa(cstGlobal) && + assert(isa(cstGlobal) && "unaware of other symbol providers"); - auto g = cast(cstGlobal); - auto arrayTy = mlir::dyn_cast(g.getSymType()); + auto g = cast(cstGlobal); + auto arrayTy = mlir::dyn_cast(g.getSymType()); // TODO(cir): pointer to array decay. Should this be modeled explicitly in // CIR? if (arrayTy) - InitType = - mlir::cir::PointerType::get(&getMLIRContext(), arrayTy.getEltType()); + InitType = cir::PointerType::get(&getMLIRContext(), arrayTy.getEltType()); } else { assert(mlir::isa(Init) && "This should have a type"); auto TypedInitAttr = mlir::cast(Init); @@ -1276,7 +1266,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, addGlobalAnnotations(D, GV); // Set CIR's linkage type as appropriate. - mlir::cir::GlobalLinkageKind Linkage = + cir::GlobalLinkageKind Linkage = getCIRLinkageVarDefinition(D, /*IsConstant=*/false); // TODO(cir): @@ -1340,7 +1330,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, else assert(!cir::MissingFeatures::setDLLStorageClass()); - if (Linkage == mlir::cir::GlobalLinkageKind::CommonLinkage) { + if (Linkage == cir::GlobalLinkageKind::CommonLinkage) { // common vars aren't constant even if declared const. GV.setConstant(false); // Tentative definition of global variables may be initialized with @@ -1349,7 +1339,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // explicit section therefore cannot have non-zero initial value. auto Initializer = GV.getInitialValue(); if (Initializer && !getBuilder().isNullValue(*Initializer)) - GV.setLinkage(mlir::cir::GlobalLinkageKind::WeakAnyLinkage); + GV.setLinkage(cir::GlobalLinkageKind::WeakAnyLinkage); } setNonAliasAttributes(D, GV); @@ -1431,11 +1421,11 @@ CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { return builder.getString(Str, eltTy, finalSize); } - auto arrayTy = mlir::dyn_cast( - getTypes().ConvertType(E->getType())); + auto arrayTy = + mlir::dyn_cast(getTypes().ConvertType(E->getType())); assert(arrayTy && "string literals must be emitted as an array type"); - auto arrayEltTy = mlir::dyn_cast(arrayTy.getEltType()); + auto arrayEltTy = mlir::dyn_cast(arrayTy.getEltType()); assert(arrayEltTy && "string literal elements must be emitted as integral type"); @@ -1458,7 +1448,7 @@ CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { SmallVector elements; elements.reserve(arraySize); for (uint64_t i = 0; i < arraySize; ++i) - elements.push_back(mlir::cir::IntAttr::get(arrayEltTy, elementValues[i])); + elements.push_back(cir::IntAttr::get(arrayEltTy, elementValues[i])); auto elementsAttr = mlir::ArrayAttr::get(&getMLIRContext(), elements); return builder.getConstArray(elementsAttr, arrayTy); @@ -1486,9 +1476,9 @@ LangAS CIRGenModule::getLangTempAllocaAddressSpace() const { return LangAS::Default; } -static mlir::cir::GlobalOp +static cir::GlobalOp generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, - mlir::cir::GlobalLinkageKind LT, CIRGenModule &CGM, + cir::GlobalLinkageKind LT, CIRGenModule &CGM, StringRef GlobalName, CharUnits Alignment) { unsigned AddrSpace = CGM.getASTContext().getTargetAddressSpace( CGM.getGlobalConstantAddressSpace()); @@ -1503,7 +1493,7 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, // Set up extra information and add to the module GV.setAlignmentAttr(CGM.getSize(Alignment)); GV.setLinkageAttr( - mlir::cir::GlobalLinkageKindAttr::get(CGM.getBuilder().getContext(), LT)); + cir::GlobalLinkageKindAttr::get(CGM.getBuilder().getContext(), LT)); CIRGenModule::setInitializer(GV, C); // TODO(cir) assert(!cir::MissingFeatures::threadLocal() && "NYI"); @@ -1517,7 +1507,7 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, } /// Return a pointer to a constant array for the given string literal. -mlir::cir::GlobalViewAttr +cir::GlobalViewAttr CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name) { CharUnits Alignment = @@ -1525,7 +1515,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, mlir::Attribute C = getConstantArrayFromStringLiteral(S); - mlir::cir::GlobalOp GV; + cir::GlobalOp GV; if (!getLangOpts().WritableStrings && ConstantStringMap.count(C)) { GV = ConstantStringMap[C]; // The bigger alignment always wins. @@ -1542,7 +1532,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, SmallString<256> MangledNameBuffer; StringRef GlobalVariableName; - auto LT = mlir::cir::GlobalLinkageKind::ExternalLinkage; + auto LT = cir::GlobalLinkageKind::ExternalLinkage; // Mangle the string literal if that's how the ABI merges duplicate strings. // Don't do it if they are writable, since we don't want writes in one TU to @@ -1551,7 +1541,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, !getLangOpts().WritableStrings) { assert(0 && "not implemented"); } else { - LT = mlir::cir::GlobalLinkageKind::PrivateLinkage; + LT = cir::GlobalLinkageKind::PrivateLinkage; GlobalVariableName = Name; } @@ -1567,10 +1557,9 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, assert(!cir::MissingFeatures::reportGlobalToASan() && "NYI"); } - auto ArrayTy = mlir::dyn_cast(GV.getSymType()); + auto ArrayTy = mlir::dyn_cast(GV.getSymType()); assert(ArrayTy && "String literal must be array"); - auto PtrTy = - mlir::cir::PointerType::get(&getMLIRContext(), ArrayTy.getEltType()); + auto PtrTy = cir::PointerType::get(&getMLIRContext(), ArrayTy.getEltType()); return builder.getGlobalViewAttr(PtrTy, GV); } @@ -1666,9 +1655,8 @@ CIRGenModule::getAddrOfGlobalTemporary(const MaterializeTemporaryExpr *expr, } // Create a global variable for this lifetime-extended temporary. - mlir::cir::GlobalLinkageKind linkage = - getCIRLinkageVarDefinition(varDecl, false); - if (linkage == mlir::cir::GlobalLinkageKind::ExternalLinkage) { + cir::GlobalLinkageKind linkage = getCIRLinkageVarDefinition(varDecl, false); + if (linkage == cir::GlobalLinkageKind::ExternalLinkage) { const VarDecl *initVD; if (varDecl->isStaticDataMember() && varDecl->getAnyInitializer(initVD) && isa(initVD->getLexicalDeclContext())) { @@ -1678,7 +1666,7 @@ CIRGenModule::getAddrOfGlobalTemporary(const MaterializeTemporaryExpr *expr, } else { // There is no need for this temporary to have external linkage if the // VarDecl has external linkage. - linkage = mlir::cir::GlobalLinkageKind::InternalLinkage; + linkage = cir::GlobalLinkageKind::InternalLinkage; } } auto targetAS = builder.getAddrSpaceAttr(addrSpace); @@ -1921,7 +1909,7 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context, return false; } -void CIRGenModule::setInitializer(mlir::cir::GlobalOp &global, +void CIRGenModule::setInitializer(cir::GlobalOp &global, mlir::Attribute value) { // Recompute visibility when updating initializer. global.setInitialValueAttr(value); @@ -1930,7 +1918,7 @@ void CIRGenModule::setInitializer(mlir::cir::GlobalOp &global, } mlir::SymbolTable::Visibility -CIRGenModule::getMLIRVisibility(mlir::cir::GlobalOp op) { +CIRGenModule::getMLIRVisibility(cir::GlobalOp op) { // MLIR doesn't accept public symbols declarations (only // definitions). if (op.isDeclaration()) @@ -1938,19 +1926,19 @@ CIRGenModule::getMLIRVisibility(mlir::cir::GlobalOp op) { return getMLIRVisibilityFromCIRLinkage(op.getLinkage()); } -mlir::SymbolTable::Visibility CIRGenModule::getMLIRVisibilityFromCIRLinkage( - mlir::cir::GlobalLinkageKind GLK) { +mlir::SymbolTable::Visibility +CIRGenModule::getMLIRVisibilityFromCIRLinkage(cir::GlobalLinkageKind GLK) { switch (GLK) { - case mlir::cir::GlobalLinkageKind::InternalLinkage: - case mlir::cir::GlobalLinkageKind::PrivateLinkage: + case cir::GlobalLinkageKind::InternalLinkage: + case cir::GlobalLinkageKind::PrivateLinkage: return mlir::SymbolTable::Visibility::Private; - case mlir::cir::GlobalLinkageKind::ExternalLinkage: - case mlir::cir::GlobalLinkageKind::ExternalWeakLinkage: - case mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage: - case mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage: - case mlir::cir::GlobalLinkageKind::CommonLinkage: - case mlir::cir::GlobalLinkageKind::WeakAnyLinkage: - case mlir::cir::GlobalLinkageKind::WeakODRLinkage: + case cir::GlobalLinkageKind::ExternalLinkage: + case cir::GlobalLinkageKind::ExternalWeakLinkage: + case cir::GlobalLinkageKind::LinkOnceODRLinkage: + case cir::GlobalLinkageKind::AvailableExternallyLinkage: + case cir::GlobalLinkageKind::CommonLinkage: + case cir::GlobalLinkageKind::WeakAnyLinkage: + case cir::GlobalLinkageKind::WeakODRLinkage: return mlir::SymbolTable::Visibility::Public; default: { llvm::errs() << "visibility not implemented for '" @@ -1961,8 +1949,7 @@ mlir::SymbolTable::Visibility CIRGenModule::getMLIRVisibilityFromCIRLinkage( llvm_unreachable("linkage should be handled above!"); } -mlir::cir::VisibilityKind -CIRGenModule::getGlobalVisibilityKindFromClangVisibility( +cir::VisibilityKind CIRGenModule::getGlobalVisibilityKindFromClangVisibility( clang::VisibilityAttr::VisibilityType visibility) { switch (visibility) { case clang::VisibilityAttr::VisibilityType::Default: @@ -1974,39 +1961,39 @@ CIRGenModule::getGlobalVisibilityKindFromClangVisibility( } } -mlir::cir::VisibilityAttr +cir::VisibilityAttr CIRGenModule::getGlobalVisibilityAttrFromDecl(const Decl *decl) { const clang::VisibilityAttr *VA = decl->getAttr(); - mlir::cir::VisibilityAttr cirVisibility = - mlir::cir::VisibilityAttr::get(&getMLIRContext()); + cir::VisibilityAttr cirVisibility = + cir::VisibilityAttr::get(&getMLIRContext()); if (VA) { - cirVisibility = mlir::cir::VisibilityAttr::get( + cirVisibility = cir::VisibilityAttr::get( &getMLIRContext(), getGlobalVisibilityKindFromClangVisibility(VA->getVisibility())); } return cirVisibility; } -mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( +cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) { if (Linkage == GVA_Internal) - return mlir::cir::GlobalLinkageKind::InternalLinkage; + return cir::GlobalLinkageKind::InternalLinkage; if (D->hasAttr()) { if (IsConstantVariable) - return mlir::cir::GlobalLinkageKind::WeakODRLinkage; + return cir::GlobalLinkageKind::WeakODRLinkage; else - return mlir::cir::GlobalLinkageKind::WeakAnyLinkage; + return cir::GlobalLinkageKind::WeakAnyLinkage; } if (const auto *FD = D->getAsFunction()) if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally) - return mlir::cir::GlobalLinkageKind::LinkOnceAnyLinkage; + return cir::GlobalLinkageKind::LinkOnceAnyLinkage; // We are guaranteed to have a strong definition somewhere else, // so we can use available_externally linkage. if (Linkage == GVA_AvailableExternally) - return mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; + return cir::GlobalLinkageKind::AvailableExternallyLinkage; // Note that Apple's kernel linker doesn't support symbol // coalescing, so we need to avoid linkonce and weak linkages there. @@ -2021,8 +2008,8 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( // definition is dependable. if (Linkage == GVA_DiscardableODR) return !astCtx.getLangOpts().AppleKext - ? mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage - : mlir::cir::GlobalLinkageKind::InternalLinkage; + ? cir::GlobalLinkageKind::LinkOnceODRLinkage + : cir::GlobalLinkageKind::InternalLinkage; // An explicit instantiation of a template has weak linkage, since // explicit instantiations can occur in multiple translation units @@ -2036,13 +2023,13 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( // therefore we need to follow the normal linkage paradigm. if (Linkage == GVA_StrongODR) { if (getLangOpts().AppleKext) - return mlir::cir::GlobalLinkageKind::ExternalLinkage; + return cir::GlobalLinkageKind::ExternalLinkage; if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice && !getLangOpts().GPURelocatableDeviceCode) return D->hasAttr() - ? mlir::cir::GlobalLinkageKind::ExternalLinkage - : mlir::cir::GlobalLinkageKind::InternalLinkage; - return mlir::cir::GlobalLinkageKind::WeakODRLinkage; + ? cir::GlobalLinkageKind::ExternalLinkage + : cir::GlobalLinkageKind::InternalLinkage; + return cir::GlobalLinkageKind::WeakODRLinkage; } // C++ doesn't have tentative definitions and thus cannot have common @@ -2050,18 +2037,18 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( if (!getLangOpts().CPlusPlus && isa(D) && !isVarDeclStrongDefinition(astCtx, *this, cast(D), getCodeGenOpts().NoCommon)) - return mlir::cir::GlobalLinkageKind::CommonLinkage; + return cir::GlobalLinkageKind::CommonLinkage; // selectany symbols are externally visible, so use weak instead of // linkonce. MSVC optimizes away references to const selectany globals, so // all definitions should be the same and ODR linkage should be used. // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx if (D->hasAttr()) - return mlir::cir::GlobalLinkageKind::WeakODRLinkage; + return cir::GlobalLinkageKind::WeakODRLinkage; // Otherwise, we have strong external linkage. assert(Linkage == GVA_StrongExternal); - return mlir::cir::GlobalLinkageKind::ExternalLinkage; + return cir::GlobalLinkageKind::ExternalLinkage; } /// This function is called when we implement a function with no prototype, e.g. @@ -2073,10 +2060,10 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( /// won't inline them. Instcombine normally deletes these calls, but it isn't /// run at -O0. void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( - mlir::Operation *Old, mlir::cir::FuncOp NewFn) { + mlir::Operation *Old, cir::FuncOp NewFn) { // If we're redefining a global as a function, don't transform it. - auto OldFn = dyn_cast(Old); + auto OldFn = dyn_cast(Old); if (!OldFn) return; @@ -2094,7 +2081,7 @@ void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( for (auto Use : SymUses.value()) { mlir::OpBuilder::InsertionGuard guard(builder); - if (auto noProtoCallOp = dyn_cast(Use.getUser())) { + if (auto noProtoCallOp = dyn_cast(Use.getUser())) { builder.setInsertionPoint(noProtoCallOp); // Patch call type with the real function type. @@ -2104,25 +2091,24 @@ void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( // Replace old no proto call with fixed call. noProtoCallOp.replaceAllUsesWith(realCallOp); noProtoCallOp.erase(); - } else if (auto getGlobalOp = - dyn_cast(Use.getUser())) { + } else if (auto getGlobalOp = dyn_cast(Use.getUser())) { // Replace type - getGlobalOp.getAddr().setType(mlir::cir::PointerType::get( - &getMLIRContext(), NewFn.getFunctionType())); + getGlobalOp.getAddr().setType( + cir::PointerType::get(&getMLIRContext(), NewFn.getFunctionType())); } else { llvm_unreachable("NIY"); } } } -mlir::cir::GlobalLinkageKind +cir::GlobalLinkageKind CIRGenModule::getCIRLinkageVarDefinition(const VarDecl *VD, bool IsConstant) { assert(!IsConstant && "constant variables NYI"); GVALinkage Linkage = astCtx.GetGVALinkageForVariable(VD); return getCIRLinkageForDeclarator(VD, Linkage, IsConstant); } -mlir::cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { +cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { const auto *D = cast(GD.getDecl()); GVALinkage Linkage = astCtx.GetGVALinkageForFunction(D); @@ -2135,8 +2121,8 @@ mlir::cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { void CIRGenModule::buildAliasForGlobal(StringRef mangledName, mlir::Operation *op, GlobalDecl aliasGD, - mlir::cir::FuncOp aliasee, - mlir::cir::GlobalLinkageKind linkage) { + cir::FuncOp aliasee, + cir::GlobalLinkageKind linkage) { auto *aliasFD = dyn_cast(aliasGD.getDecl()); assert(aliasFD && "expected FunctionDecl"); @@ -2181,12 +2167,9 @@ bool CIRGenModule::verifyModule() { return mlir::verify(theModule).succeeded(); } -std::pair -CIRGenModule::getAddrAndTypeOfCXXStructor(GlobalDecl GD, - const CIRGenFunctionInfo *FnInfo, - mlir::cir::FuncType FnType, - bool Dontdefer, - ForDefinition_t IsForDefinition) { +std::pair CIRGenModule::getAddrAndTypeOfCXXStructor( + GlobalDecl GD, const CIRGenFunctionInfo *FnInfo, cir::FuncType FnType, + bool Dontdefer, ForDefinition_t IsForDefinition) { auto *MD = cast(GD.getDecl()); if (isa(MD)) { @@ -2211,10 +2194,9 @@ CIRGenModule::getAddrAndTypeOfCXXStructor(GlobalDecl GD, return {FnType, Fn}; } -mlir::cir::FuncOp -CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, - bool ForVTable, bool DontDefer, - ForDefinition_t IsForDefinition) { +cir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, + bool ForVTable, bool DontDefer, + ForDefinition_t IsForDefinition) { assert(!cast(GD.getDecl())->isConsteval() && "consteval function should never be emitted"); @@ -2332,13 +2314,13 @@ void CIRGenModule::buildTentativeDefinition(const VarDecl *D) { // so, getGlobalValue might be better of returining a global value interface // that alows use to manage different globals value types transparently. if (GV) - assert(isa(GV) && + assert(isa(GV) && "tentative definition can only be built from a cir.global_op"); // We already have a definition, not declaration, with the same mangled name. // Emitting of declaration is not required (and actually overwrites emitted // definition). - if (GV && !dyn_cast(GV).isDeclaration()) + if (GV && !dyn_cast(GV).isDeclaration()) return; // If we have not seen a reference to this variable yet, place it into the @@ -2359,7 +2341,7 @@ void CIRGenModule::setGlobalVisibility(mlir::Operation *GV, void CIRGenModule::setDSOLocal(mlir::Operation *Op) const { assert(!cir::MissingFeatures::setDSOLocal()); - if (auto globalValue = dyn_cast(Op)) { + if (auto globalValue = dyn_cast(Op)) { setDSOLocal(globalValue); } } @@ -2386,10 +2368,9 @@ bool CIRGenModule::lookupRepresentativeDecl(StringRef MangledName, return true; } -mlir::cir::FuncOp -CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, - mlir::cir::FuncType Ty, - const clang::FunctionDecl *FD) { +cir::FuncOp CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, + cir::FuncType Ty, + const clang::FunctionDecl *FD) { // At the point we need to create the function, the insertion point // could be anywhere (e.g. callsite). Do not rely on whatever it might // be, properly save, find the appropriate place and restore. @@ -2405,7 +2386,7 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, if (curCGF) builder.setInsertionPoint(curCGF->CurFn); - f = builder.create(loc, name, Ty); + f = builder.create(loc, name, Ty); if (FD) f.setAstAttr(makeFuncDeclAttr(FD, &getMLIRContext())); @@ -2417,13 +2398,13 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, // A declaration gets private visibility by default, but external linkage // as the default linkage. - f.setLinkageAttr(mlir::cir::GlobalLinkageKindAttr::get( - &getMLIRContext(), mlir::cir::GlobalLinkageKind::ExternalLinkage)); + f.setLinkageAttr(cir::GlobalLinkageKindAttr::get( + &getMLIRContext(), cir::GlobalLinkageKind::ExternalLinkage)); mlir::SymbolTable::setSymbolVisibility( f, mlir::SymbolTable::Visibility::Private); // Initialize with empty dict of extra attributes. - f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + f.setExtraAttrsAttr(cir::ExtraFuncAttributesAttr::get( &getMLIRContext(), builder.getDictionaryAttr({}))); if (!curCGF) @@ -2432,9 +2413,10 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name, return f; } -mlir::cir::FuncOp CIRGenModule::createRuntimeFunction( - mlir::cir::FuncType Ty, StringRef Name, mlir::ArrayAttr, - [[maybe_unused]] bool Local, bool AssumeConvergent) { +cir::FuncOp CIRGenModule::createRuntimeFunction(cir::FuncType Ty, + StringRef Name, mlir::ArrayAttr, + [[maybe_unused]] bool Local, + bool AssumeConvergent) { if (AssumeConvergent) { llvm_unreachable("NYI"); } @@ -2500,7 +2482,7 @@ void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, mlir::NamedAttrList attrs{f.getExtraAttrs().getElements().getValue()}; if (!hasUnwindExceptions(getLangOpts())) { - auto attr = mlir::cir::NoThrowAttr::get(&getMLIRContext()); + auto attr = cir::NoThrowAttr::get(&getMLIRContext()); attrs.set(attr.getMnemonic(), attr); } @@ -2509,25 +2491,25 @@ void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, // explicitly marked as alwaysinline for semantic reasons, and inlining is // disabled, mark the function as noinline. if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { - auto attr = mlir::cir::InlineAttr::get( - &getMLIRContext(), mlir::cir::InlineKind::AlwaysInline); + auto attr = cir::InlineAttr::get(&getMLIRContext(), + cir::InlineKind::AlwaysInline); attrs.set(attr.getMnemonic(), attr); } } else if (decl->hasAttr()) { // Add noinline if the function isn't always_inline. - auto attr = mlir::cir::InlineAttr::get(&getMLIRContext(), - mlir::cir::InlineKind::NoInline); + auto attr = + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); attrs.set(attr.getMnemonic(), attr); } else if (decl->hasAttr()) { // (noinline wins over always_inline, and we can't specify both in IR) - auto attr = mlir::cir::InlineAttr::get(&getMLIRContext(), - mlir::cir::InlineKind::AlwaysInline); + auto attr = + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::AlwaysInline); attrs.set(attr.getMnemonic(), attr); } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { // If we're not inlining, then force everything that isn't always_inline // to carry an explicit noinline attribute. - auto attr = mlir::cir::InlineAttr::get(&getMLIRContext(), - mlir::cir::InlineKind::NoInline); + auto attr = + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); attrs.set(attr.getMnemonic(), attr); } else { // Otherwise, propagate the inline hint attribute and potentially use its @@ -2545,12 +2527,12 @@ void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, return any_of(Pattern->redecls(), CheckRedeclForInline); }; if (CheckForInline(cast(decl))) { - auto attr = mlir::cir::InlineAttr::get(&getMLIRContext(), - mlir::cir::InlineKind::InlineHint); + auto attr = + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::InlineHint); attrs.set(attr.getMnemonic(), attr); } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyHintInlining) { - auto attr = mlir::cir::InlineAttr::get(&getMLIRContext(), - mlir::cir::InlineKind::NoInline); + auto attr = + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); attrs.set(attr.getMnemonic(), attr); } } @@ -2566,31 +2548,30 @@ void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, } if (ShouldAddOptNone) { - auto optNoneAttr = mlir::cir::OptNoneAttr::get(&getMLIRContext()); + auto optNoneAttr = cir::OptNoneAttr::get(&getMLIRContext()); attrs.set(optNoneAttr.getMnemonic(), optNoneAttr); // OptimizeNone implies noinline; we should not be inlining such functions. - auto noInlineAttr = mlir::cir::InlineAttr::get( - &getMLIRContext(), mlir::cir::InlineKind::NoInline); + auto noInlineAttr = + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); attrs.set(noInlineAttr.getMnemonic(), noInlineAttr); } - f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + f.setExtraAttrsAttr(cir::ExtraFuncAttributesAttr::get( &getMLIRContext(), attrs.getDictionary(&getMLIRContext()))); } void CIRGenModule::setCIRFunctionAttributes(GlobalDecl GD, const CIRGenFunctionInfo &info, - mlir::cir::FuncOp func, - bool isThunk) { + cir::FuncOp func, bool isThunk) { // TODO(cir): More logic of constructAttributeList is needed. - mlir::cir::CallingConv callingConv; + cir::CallingConv callingConv; // Initialize PAL with existing attributes to merge attributes. mlir::NamedAttrList PAL{func.getExtraAttrs().getElements().getValue()}; constructAttributeList(func.getName(), info, GD, PAL, callingConv, /*AttrOnCallSite=*/false, isThunk); - func.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + func.setExtraAttrsAttr(cir::ExtraFuncAttributesAttr::get( &getMLIRContext(), PAL.getDictionary(&getMLIRContext()))); // TODO(cir): Check X86_VectorCall incompatibility with WinARM64EC @@ -2599,7 +2580,7 @@ void CIRGenModule::setCIRFunctionAttributes(GlobalDecl GD, } void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl, - mlir::cir::FuncOp func, + cir::FuncOp func, bool isIncompleteFunction, bool isThunk) { // NOTE(cir): Original CodeGen checks if this is an intrinsic. In CIR we @@ -2632,7 +2613,7 @@ void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl, /// /// If D is non-null, it specifies a decl that corresponded to this. This is /// used to set the attributes on the function when it is first created. -mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( +cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( StringRef MangledName, mlir::Type Ty, GlobalDecl GD, bool ForVTable, bool DontDefer, bool IsThunk, ForDefinition_t IsForDefinition, mlir::ArrayAttr ExtraAttrs) { @@ -2655,7 +2636,7 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // Lookup the entry, lazily creating it if necessary. mlir::Operation *Entry = getGlobalValue(MangledName); if (Entry) { - assert(isa(Entry) && + assert(isa(Entry) && "not implemented, only supports FuncOp for now"); if (WeakRefReferences.erase(Entry)) { @@ -2670,7 +2651,7 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // If there are two attempts to define the same mangled name, issue an // error. - auto Fn = cast(Entry); + auto Fn = cast(Entry); if (IsForDefinition && Fn && !Fn.isDeclaration()) { GlobalDecl OtherGD; // CHeck that GD is not yet in DiagnosedConflictingDefinitions is required @@ -2703,9 +2684,9 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( // set attributes. bool IsIncompleteFunction = false; - mlir::cir::FuncType FTy; - if (mlir::isa(Ty)) { - FTy = mlir::cast(Ty); + cir::FuncType FTy; + if (mlir::isa(Ty)) { + FTy = mlir::cast(Ty); } else { assert(false && "NYI"); // FTy = mlir::FunctionType::get(VoidTy, false); @@ -2731,7 +2712,7 @@ mlir::cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( assert(SymbolOp && "Expected a symbol-defining operation"); // TODO(cir): When can this symbol be something other than a function? - assert(isa(Entry) && "NYI"); + assert(isa(Entry) && "NYI"); // This might be an implementation of a function without a prototype, in // which case, try to do special replacement of calls which match the new @@ -2866,14 +2847,14 @@ void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { // redefinition). Just ignore those cases. // TODO: Not sure what to map this to for MLIR auto globalValueOp = Op; - if (auto Gv = dyn_cast(Op)) { + if (auto Gv = dyn_cast(Op)) { auto *result = mlir::SymbolTable::lookupSymbolIn(getModule(), Gv.getNameAttr()); globalValueOp = result; } if (auto cirGlobalValue = - dyn_cast(globalValueOp)) { + dyn_cast(globalValueOp)) { if (!cirGlobalValue.isDeclaration()) return; } @@ -3153,7 +3134,7 @@ bool CIRGenModule::supportsCOMDAT() const { void CIRGenModule::maybeSetTrivialComdat(const Decl &d, mlir::Operation *op) { if (!shouldBeInCOMDAT(*this, d)) return; - auto globalOp = dyn_cast_or_null(op); + auto globalOp = dyn_cast_or_null(op); if (globalOp) globalOp.setComdat(true); // Keep it as missing feature as we need to implement comdat for FuncOp. @@ -3161,7 +3142,7 @@ void CIRGenModule::maybeSetTrivialComdat(const Decl &d, mlir::Operation *op) { assert(!cir::MissingFeatures::setComdat() && "NYI"); } -bool CIRGenModule::isInNoSanitizeList(SanitizerMask Kind, mlir::cir::FuncOp Fn, +bool CIRGenModule::isInNoSanitizeList(SanitizerMask Kind, cir::FuncOp Fn, SourceLocation Loc) const { const auto &NoSanitizeL = getASTContext().getNoSanitizeList(); // NoSanitize by function name. @@ -3209,9 +3190,9 @@ void CIRGenModule::applyReplacements() { auto *Entry = getGlobalValue(MangledName); if (!Entry) continue; - assert(isa(Entry) && "expected function"); - auto OldF = cast(Entry); - auto NewF = dyn_cast(Replacement); + assert(isa(Entry) && "expected function"); + auto OldF = cast(Entry); + auto NewF = dyn_cast(Replacement); assert(NewF && "not implemented"); // Replace old with new, but keep the old order. @@ -3248,11 +3229,11 @@ void CIRGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { buildTopLevelDecl(VD); } -mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( +cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( mlir::Location loc, StringRef Name, mlir::Type Ty, - mlir::cir::GlobalLinkageKind Linkage, clang::CharUnits Alignment) { - mlir::cir::GlobalOp OldGV{}; - auto GV = dyn_cast_or_null( + cir::GlobalLinkageKind Linkage, clang::CharUnits Alignment) { + cir::GlobalOp OldGV{}; + auto GV = dyn_cast_or_null( mlir::SymbolTable::lookupSymbolIn(getModule(), Name)); if (GV) { @@ -3272,7 +3253,7 @@ mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( // Set up extra information and add to the module GV.setLinkageAttr( - mlir::cir::GlobalLinkageKindAttr::get(&getMLIRContext(), Linkage)); + cir::GlobalLinkageKindAttr::get(&getMLIRContext(), Linkage)); mlir::SymbolTable::setSymbolVisibility(GV, CIRGenModule::getMLIRVisibility(GV)); @@ -3286,7 +3267,7 @@ mlir::cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( OldGV->erase(); } - if (supportsCOMDAT() && mlir::cir::isWeakForLinker(Linkage) && + if (supportsCOMDAT() && cir::isWeakForLinker(Linkage) && !GV.hasAvailableExternallyLinkage()) { GV.setComdat(true); } @@ -3303,7 +3284,7 @@ bool CIRGenModule::shouldOpportunisticallyEmitVTables() { } void CIRGenModule::buildVTableTypeMetadata(const CXXRecordDecl *RD, - mlir::cir::GlobalOp VTable, + cir::GlobalOp VTable, const VTableLayout &VTLayout) { if (!getCodeGenOpts().LTOUnit) return; @@ -3334,11 +3315,11 @@ mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc, /// with a constantexpr cast to the right type. /// 3. Finally, if the existing global is the correct declaration, return the /// existing global. -mlir::cir::GlobalOp CIRGenModule::getOrInsertGlobal( +cir::GlobalOp CIRGenModule::getOrInsertGlobal( mlir::Location loc, StringRef Name, mlir::Type Ty, - llvm::function_ref CreateGlobalCallback) { + llvm::function_ref CreateGlobalCallback) { // See if we have a definition for the specified global already. - auto GV = dyn_cast_or_null(getGlobalValue(Name)); + auto GV = dyn_cast_or_null(getGlobalValue(Name)); if (!GV) { GV = CreateGlobalCallback(); } @@ -3358,9 +3339,8 @@ mlir::cir::GlobalOp CIRGenModule::getOrInsertGlobal( } // Overload to construct a global variable using its constructor's defaults. -mlir::cir::GlobalOp CIRGenModule::getOrInsertGlobal(mlir::Location loc, - StringRef Name, - mlir::Type Ty) { +cir::GlobalOp CIRGenModule::getOrInsertGlobal(mlir::Location loc, + StringRef Name, mlir::Type Ty) { return getOrInsertGlobal(loc, Name, Ty, [&] { return CIRGenModule::createGlobalOp(*this, loc, Name, builder.getPointerTo(Ty)); @@ -3417,9 +3397,9 @@ void CIRGenModule::ErrorUnsupported(const Decl *D, const char *Type) { getDiags().Report(astCtx.getFullLoc(D->getLocation()), DiagID) << Msg; } -mlir::cir::SourceLanguage CIRGenModule::getCIRSourceLanguage() { +cir::SourceLanguage CIRGenModule::getCIRSourceLanguage() { using ClangStd = clang::LangStandard; - using CIRLang = mlir::cir::SourceLanguage; + using CIRLang = cir::SourceLanguage; auto opts = getLangOpts(); if (opts.OpenCL && !opts.OpenCLCPlusPlus) @@ -3501,11 +3481,11 @@ mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(const AnnotateAttr *attr) { return lookup; } -mlir::cir::AnnotationAttr +cir::AnnotationAttr CIRGenModule::buildAnnotateAttr(const clang::AnnotateAttr *aa) { mlir::StringAttr annoGV = builder.getStringAttr(aa->getAnnotation()); mlir::ArrayAttr args = buildAnnotationArgs(aa); - return mlir::cir::AnnotationAttr::get(&getMLIRContext(), annoGV, args); + return cir::AnnotationAttr::get(&getMLIRContext(), annoGV, args); } void CIRGenModule::addGlobalAnnotations(const ValueDecl *d, @@ -3516,9 +3496,9 @@ void CIRGenModule::addGlobalAnnotations(const ValueDecl *d, llvm::SmallVector annotations; for (auto *i : d->specific_attrs()) annotations.push_back(buildAnnotateAttr(i)); - if (auto global = dyn_cast(gv)) + if (auto global = dyn_cast(gv)) global.setAnnotationsAttr(builder.getArrayAttr(annotations)); - else if (auto func = dyn_cast(gv)) + else if (auto func = dyn_cast(gv)) func.setAnnotationsAttr(builder.getArrayAttr(annotations)); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 5094370c9d2f..77abb80bbc77 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -144,7 +144,7 @@ class CIRGenModule : public CIRGenTypeCache { /// Store deferred function annotations so they can be emitted at the end with /// most up to date ValueDecl that will have all the inherited annotations. - llvm::DenseMap deferredAnnotations; + llvm::DenseMap deferredAnnotations; llvm::DenseMap materializedGlobalTemporaryMap; @@ -186,7 +186,7 @@ class CIRGenModule : public CIRGenTypeCache { /// Track whether the CIRGenModule is currently building an initializer /// for a global (e.g. as opposed to a regular cir.func). - mlir::cir::GlobalOp globalOpContext = nullptr; + cir::GlobalOp globalOpContext = nullptr; /// When a C++ decl with an initializer is deferred, null is /// appended to CXXGlobalInits, and the index of that null is placed @@ -205,37 +205,36 @@ class CIRGenModule : public CIRGenTypeCache { /// extern "C" linkage specification, prepare to emit an alias for it /// to the expected name. template - void maybeHandleStaticInExternC(const SomeDecl *D, mlir::cir::GlobalOp GV); + void maybeHandleStaticInExternC(const SomeDecl *D, cir::GlobalOp GV); /// Tell the consumer that this variable has been instantiated. void HandleCXXStaticMemberVarInstantiation(VarDecl *VD); - llvm::DenseMap StaticLocalDeclMap; - llvm::DenseMap Globals; - mlir::Operation *getGlobalValue(StringRef Ref); + llvm::DenseMap StaticLocalDeclMap; + llvm::DenseMap Globals; + mlir::Operation *getGlobalValue(llvm::StringRef Ref); mlir::Value getGlobalValue(const clang::Decl *D); /// If the specified mangled name is not in the module, create and return an /// mlir::GlobalOp value - mlir::cir::GlobalOp - getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, LangAS AddrSpace, - const VarDecl *D, + cir::GlobalOp + getOrCreateCIRGlobal(llvm::StringRef MangledName, mlir::Type Ty, + LangAS AddrSpace, const VarDecl *D, ForDefinition_t IsForDefinition = NotForDefinition); - mlir::cir::GlobalOp getStaticLocalDeclAddress(const VarDecl *D) { + cir::GlobalOp getStaticLocalDeclAddress(const VarDecl *D) { return StaticLocalDeclMap[D]; } - void setStaticLocalDeclAddress(const VarDecl *D, mlir::cir::GlobalOp C) { + void setStaticLocalDeclAddress(const VarDecl *D, cir::GlobalOp C) { StaticLocalDeclMap[D] = C; } - mlir::cir::GlobalOp - getOrCreateStaticVarDecl(const VarDecl &D, - mlir::cir::GlobalLinkageKind Linkage); + cir::GlobalOp getOrCreateStaticVarDecl(const VarDecl &D, + cir::GlobalLinkageKind Linkage); - mlir::cir::GlobalOp getOrCreateCIRGlobal(const VarDecl *D, mlir::Type Ty, - ForDefinition_t IsForDefinition); + cir::GlobalOp getOrCreateCIRGlobal(const VarDecl *D, mlir::Type Ty, + ForDefinition_t IsForDefinition); /// TODO(cir): once we have cir.module, add this as a convenience method /// there instead of here. @@ -246,25 +245,23 @@ class CIRGenModule : public CIRGenTypeCache { /// with a constantexpr cast to the right type. /// 3. Finally, if the existing global is the correct declaration, return /// the existing global. - mlir::cir::GlobalOp getOrInsertGlobal( - mlir::Location loc, StringRef Name, mlir::Type Ty, - llvm::function_ref CreateGlobalCallback); + cir::GlobalOp + getOrInsertGlobal(mlir::Location loc, llvm::StringRef Name, mlir::Type Ty, + llvm::function_ref CreateGlobalCallback); // Overload to construct a global variable using its constructor's defaults. - mlir::cir::GlobalOp getOrInsertGlobal(mlir::Location loc, StringRef Name, - mlir::Type Ty); + cir::GlobalOp getOrInsertGlobal(mlir::Location loc, llvm::StringRef Name, + mlir::Type Ty); - static mlir::cir::GlobalOp - createGlobalOp(CIRGenModule &cgm, mlir::Location loc, StringRef name, - mlir::Type t, bool isConstant = false, - mlir::cir::AddressSpaceAttr addrSpace = {}, - mlir::Operation *insertPoint = nullptr, - mlir::cir::GlobalLinkageKind linkage = - mlir::cir::GlobalLinkageKind::ExternalLinkage); + static cir::GlobalOp createGlobalOp( + CIRGenModule &cgm, mlir::Location loc, llvm::StringRef name, mlir::Type t, + bool isConstant = false, cir::AddressSpaceAttr addrSpace = {}, + mlir::Operation *insertPoint = nullptr, + cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::ExternalLinkage); // FIXME: Hardcoding priority here is gross. - void AddGlobalCtor(mlir::cir::FuncOp Ctor, int Priority = 65535); - void AddGlobalDtor(mlir::cir::FuncOp Dtor, int Priority = 65535, + void AddGlobalCtor(cir::FuncOp Ctor, int Priority = 65535); + void AddGlobalDtor(cir::FuncOp Dtor, int Priority = 65535, bool IsDtorAttrFunc = false); // Return whether structured convergence intrinsics should be generated for @@ -286,7 +283,7 @@ class CIRGenModule : public CIRGenTypeCache { ForDefinition_t IsForDefinition = NotForDefinition); /// Return the mlir::GlobalViewAttr for the address of the given global. - mlir::cir::GlobalViewAttr + cir::GlobalViewAttr getAddrOfGlobalVarAttr(const VarDecl *D, mlir::Type Ty = {}, ForDefinition_t IsForDefinition = NotForDefinition); @@ -314,22 +311,23 @@ class CIRGenModule : public CIRGenTypeCache { /// constructed for. If valid, the attributes applied to this decl may /// contribute to the function attributes and calling convention. /// \param Attrs [out] - On return, the attribute list to use. - void constructAttributeList(StringRef Name, const CIRGenFunctionInfo &Info, + void constructAttributeList(llvm::StringRef Name, + const CIRGenFunctionInfo &Info, CIRGenCalleeInfo CalleeInfo, mlir::NamedAttrList &Attrs, - mlir::cir::CallingConv &callingConv, + cir::CallingConv &callingConv, bool AttrOnCallSite, bool IsThunk); /// Helper function for getDefaultFunctionAttributes. Builds a set of function /// attributes which can be simply added to a function. - void getTrivialDefaultFunctionAttributes(StringRef name, bool hasOptnone, - bool attrOnCallSite, + void getTrivialDefaultFunctionAttributes(llvm::StringRef name, + bool hasOptnone, bool attrOnCallSite, mlir::NamedAttrList &funcAttrs); /// Helper function for constructAttributeList and /// addDefaultFunctionDefinitionAttributes. Builds a set of function /// attributes to add to a function with the given properties. - void getDefaultFunctionAttributes(StringRef name, bool hasOptnone, + void getDefaultFunctionAttributes(llvm::StringRef name, bool hasOptnone, bool attrOnCallSite, mlir::NamedAttrList &funcAttrs); @@ -337,9 +335,9 @@ class CIRGenModule : public CIRGenTypeCache { /// different type already exists then a new variable with the right type /// will be created and all uses of the old variable will be replaced with a /// bitcast to the new variable. - mlir::cir::GlobalOp createOrReplaceCXXRuntimeVariable( - mlir::Location loc, StringRef Name, mlir::Type Ty, - mlir::cir::GlobalLinkageKind Linkage, clang::CharUnits Alignment); + cir::GlobalOp createOrReplaceCXXRuntimeVariable( + mlir::Location loc, llvm::StringRef Name, mlir::Type Ty, + cir::GlobalLinkageKind Linkage, clang::CharUnits Alignment); /// Emit any vtables which we deferred and still have a use for. void buildDeferredVTables(); @@ -347,15 +345,14 @@ class CIRGenModule : public CIRGenTypeCache { void buildVTable(CXXRecordDecl *rd); - void setDSOLocal(mlir::cir::CIRGlobalValueInterface GV) const; + void setDSOLocal(cir::CIRGlobalValueInterface GV) const; /// Return the appropriate linkage for the vtable, VTT, and type information /// of the given class. - mlir::cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *RD); + cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *RD); /// Emit type metadata for the given vtable using the given layout. - void buildVTableTypeMetadata(const CXXRecordDecl *RD, - mlir::cir::GlobalOp VTable, + void buildVTableTypeMetadata(const CXXRecordDecl *RD, cir::GlobalOp VTable, const VTableLayout &VTLayout); /// Get the address of the RTTI descriptor for the given type. @@ -375,16 +372,16 @@ class CIRGenModule : public CIRGenTypeCache { llvm_unreachable("unknown visibility!"); } - llvm::DenseMap ConstantStringMap; + llvm::DenseMap ConstantStringMap; /// Return a constant array for the given string. mlir::Attribute getConstantArrayFromStringLiteral(const StringLiteral *E); /// Return a global symbol reference to a constant array for the given string /// literal. - mlir::cir::GlobalViewAttr + cir::GlobalViewAttr getAddrOfConstantStringFromLiteral(const StringLiteral *S, - StringRef Name = ".str"); + llvm::StringRef Name = ".str"); unsigned StringLiteralCnt = 0; unsigned CompoundLitaralCnt = 0; @@ -485,10 +482,11 @@ class CIRGenModule : public CIRGenTypeCache { const CXXRecordDecl *Derived, const CXXRecordDecl *VBase); - mlir::cir::FuncOp getAddrOfCXXStructor( - clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, - mlir::cir::FuncType FnType = nullptr, bool DontDefer = false, - ForDefinition_t IsForDefinition = NotForDefinition) { + cir::FuncOp + getAddrOfCXXStructor(clang::GlobalDecl GD, + const CIRGenFunctionInfo *FnInfo = nullptr, + cir::FuncType FnType = nullptr, bool DontDefer = false, + ForDefinition_t IsForDefinition = NotForDefinition) { return getAddrAndTypeOfCXXStructor(GD, FnInfo, FnType, DontDefer, IsForDefinition) @@ -533,9 +531,9 @@ class CIRGenModule : public CIRGenTypeCache { DefaultMethodsToEmit.emplace_back(GD); } - std::pair getAddrAndTypeOfCXXStructor( + std::pair getAddrAndTypeOfCXXStructor( clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, - mlir::cir::FuncType FnType = nullptr, bool Dontdefer = false, + cir::FuncType FnType = nullptr, bool Dontdefer = false, ForDefinition_t IsForDefinition = NotForDefinition); void buildTopLevelDecl(clang::Decl *decl); @@ -547,9 +545,9 @@ class CIRGenModule : public CIRGenTypeCache { bool tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D); - void buildAliasForGlobal(StringRef mangledName, mlir::Operation *op, - GlobalDecl aliasGD, mlir::cir::FuncOp aliasee, - mlir::cir::GlobalLinkageKind linkage); + void buildAliasForGlobal(llvm::StringRef mangledName, mlir::Operation *op, + GlobalDecl aliasGD, cir::FuncOp aliasee, + cir::GlobalLinkageKind linkage); mlir::Type getCIRType(const clang::QualType &type); @@ -566,7 +564,7 @@ class CIRGenModule : public CIRGenTypeCache { void setTLSMode(mlir::Operation *Op, const VarDecl &D) const; /// Get TLS mode from CodeGenOptions. - mlir::cir::TLS_Model GetDefaultCIRTLSModel() const; + cir::TLS_Model GetDefaultCIRTLSModel() const; /// Replace the present global `Old` with the given global `New`. Their symbol /// names must match; their types can be different. Usages of the old global @@ -574,7 +572,7 @@ class CIRGenModule : public CIRGenTypeCache { /// /// This function will erase the old global. This function will NOT insert the /// new global into the module. - void replaceGlobal(mlir::cir::GlobalOp Old, mlir::cir::GlobalOp New); + void replaceGlobal(cir::GlobalOp Old, cir::GlobalOp New); /// Determine whether the definition must be emitted; if this returns \c /// false, the definition can be emitted lazily if it's used. @@ -585,7 +583,7 @@ class CIRGenModule : public CIRGenTypeCache { bool MayDropFunctionReturn(const clang::ASTContext &Context, clang::QualType ReturnType); - bool isInNoSanitizeList(clang::SanitizerMask Kind, mlir::cir::FuncOp Fn, + bool isInNoSanitizeList(clang::SanitizerMask Kind, cir::FuncOp Fn, clang::SourceLocation) const; /// Determine whether the definition can be emitted eagerly, or should be @@ -599,7 +597,7 @@ class CIRGenModule : public CIRGenTypeCache { /// Return the address of the given function. If Ty is non-null, then this /// function will use the specified type if it has to create it. // TODO: this is a bit weird as `GetAddr` given we give back a FuncOp? - mlir::cir::FuncOp + cir::FuncOp GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty = nullptr, bool ForVTable = false, bool Dontdefer = false, ForDefinition_t IsForDefinition = NotForDefinition); @@ -637,17 +635,17 @@ class CIRGenModule : public CIRGenTypeCache { void UpdateCompletedType(const clang::TagDecl *TD); /// Set function attributes for a function declaration. - void setFunctionAttributes(GlobalDecl GD, mlir::cir::FuncOp F, + void setFunctionAttributes(GlobalDecl GD, cir::FuncOp F, bool IsIncompleteFunction, bool IsThunk); /// Set the CIR function attributes (sext, zext, etc). void setCIRFunctionAttributes(GlobalDecl GD, const CIRGenFunctionInfo &info, - mlir::cir::FuncOp func, bool isThunk); + cir::FuncOp func, bool isThunk); /// Set the CIR function attributes which only apply to a function /// definition. void setCIRFunctionAttributesForDefinition(const Decl *decl, - mlir::cir::FuncOp func); + cir::FuncOp func); void buildGlobalDefinition(clang::GlobalDecl D, mlir::Operation *Op = nullptr); @@ -656,10 +654,10 @@ class CIRGenModule : public CIRGenTypeCache { bool IsTentative = false); /// Emit the function that initializes the specified global - void buildCXXGlobalVarDeclInit(const VarDecl *varDecl, - mlir::cir::GlobalOp addr, bool performInit); + void buildCXXGlobalVarDeclInit(const VarDecl *varDecl, cir::GlobalOp addr, + bool performInit); - void buildCXXGlobalVarDeclInitFunc(const VarDecl *D, mlir::cir::GlobalOp Addr, + void buildCXXGlobalVarDeclInitFunc(const VarDecl *D, cir::GlobalOp Addr, bool PerformInit); void addDeferredVTable(const CXXRecordDecl *RD) { @@ -699,7 +697,7 @@ class CIRGenModule : public CIRGenTypeCache { // Produce code for this constructor/destructor. This method doesn't try to // apply any ABI rules about which other constructors/destructors are needed // or if they are alias to each other. - mlir::cir::FuncOp codegenCXXStructor(clang::GlobalDecl GD); + cir::FuncOp codegenCXXStructor(clang::GlobalDecl GD); bool lookupRepresentativeDecl(llvm::StringRef MangledName, clang::GlobalDecl &Result) const; @@ -713,39 +711,37 @@ class CIRGenModule : public CIRGenTypeCache { /// Visibility and Linkage /// ------- - static void setInitializer(mlir::cir::GlobalOp &op, mlir::Attribute value); + static void setInitializer(cir::GlobalOp &op, mlir::Attribute value); static mlir::SymbolTable::Visibility - getMLIRVisibilityFromCIRLinkage(mlir::cir::GlobalLinkageKind GLK); - static mlir::cir::VisibilityKind getGlobalVisibilityKindFromClangVisibility( + getMLIRVisibilityFromCIRLinkage(cir::GlobalLinkageKind GLK); + static cir::VisibilityKind getGlobalVisibilityKindFromClangVisibility( clang::VisibilityAttr::VisibilityType visibility); - mlir::cir::VisibilityAttr getGlobalVisibilityAttrFromDecl(const Decl *decl); - static mlir::SymbolTable::Visibility - getMLIRVisibility(mlir::cir::GlobalOp op); - mlir::cir::GlobalLinkageKind getFunctionLinkage(GlobalDecl GD); - mlir::cir::GlobalLinkageKind - getCIRLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage, - bool IsConstantVariable); - void setFunctionLinkage(GlobalDecl GD, mlir::cir::FuncOp f) { + cir::VisibilityAttr getGlobalVisibilityAttrFromDecl(const Decl *decl); + static mlir::SymbolTable::Visibility getMLIRVisibility(cir::GlobalOp op); + cir::GlobalLinkageKind getFunctionLinkage(GlobalDecl GD); + cir::GlobalLinkageKind getCIRLinkageForDeclarator(const DeclaratorDecl *D, + GVALinkage Linkage, + bool IsConstantVariable); + void setFunctionLinkage(GlobalDecl GD, cir::FuncOp f) { auto L = getFunctionLinkage(GD); - f.setLinkageAttr( - mlir::cir::GlobalLinkageKindAttr::get(&getMLIRContext(), L)); + f.setLinkageAttr(cir::GlobalLinkageKindAttr::get(&getMLIRContext(), L)); mlir::SymbolTable::setSymbolVisibility(f, getMLIRVisibilityFromCIRLinkage(L)); } - mlir::cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *VD, - bool IsConstant); + cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *VD, + bool IsConstant); - void addReplacement(StringRef Name, mlir::Operation *Op); + void addReplacement(llvm::StringRef Name, mlir::Operation *Op); mlir::Location getLocForFunction(const clang::FunctionDecl *FD); void ReplaceUsesOfNonProtoTypeWithRealFunction(mlir::Operation *Old, - mlir::cir::FuncOp NewFn); + cir::FuncOp NewFn); // TODO: CodeGen also passes an AttributeList here. We'll have to match that // in CIR - mlir::cir::FuncOp + cir::FuncOp GetOrCreateCIRFunction(llvm::StringRef MangledName, mlir::Type Ty, clang::GlobalDecl D, bool ForVTable, bool DontDefer = false, bool IsThunk = false, @@ -753,14 +749,13 @@ class CIRGenModule : public CIRGenTypeCache { mlir::ArrayAttr ExtraAttrs = {}); // Effectively create the CIR instruction, properly handling insertion // points. - mlir::cir::FuncOp createCIRFunction(mlir::Location loc, StringRef name, - mlir::cir::FuncType Ty, - const clang::FunctionDecl *FD); + cir::FuncOp createCIRFunction(mlir::Location loc, llvm::StringRef name, + cir::FuncType Ty, + const clang::FunctionDecl *FD); - mlir::cir::FuncOp createRuntimeFunction(mlir::cir::FuncType Ty, - StringRef Name, mlir::ArrayAttr = {}, - bool Local = false, - bool AssumeConvergent = false); + cir::FuncOp createRuntimeFunction(cir::FuncType Ty, llvm::StringRef Name, + mlir::ArrayAttr = {}, bool Local = false, + bool AssumeConvergent = false); /// Emit type info if type of an expression is a variably modified /// type. Also emit proper debug info for cast types. @@ -774,11 +769,10 @@ class CIRGenModule : public CIRGenTypeCache { /// Given a builtin id for a function like "__builtin_fabsf", return a /// Function* for "fabsf". - mlir::cir::FuncOp getBuiltinLibFunction(const FunctionDecl *FD, - unsigned BuiltinID); + cir::FuncOp getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID); /// Emit a general error that something can't be done. - void Error(SourceLocation loc, StringRef error); + void Error(SourceLocation loc, llvm::StringRef error); /// Print out an error that codegen doesn't support the specified stmt yet. void ErrorUnsupported(const Stmt *S, const char *Type); @@ -812,8 +806,7 @@ class CIRGenModule : public CIRGenTypeCache { /// \param FN is a pointer to IR function being generated. /// \param FD is a pointer to function declaration if any. /// \param CGF is a pointer to CIRGenFunction that generates this function. - void genKernelArgMetadata(mlir::cir::FuncOp FN, - const FunctionDecl *FD = nullptr, + void genKernelArgMetadata(cir::FuncOp FN, const FunctionDecl *FD = nullptr, CIRGenFunction *CGF = nullptr); /// Emits OpenCL specific Metadata e.g. OpenCL version. @@ -823,7 +816,7 @@ class CIRGenModule : public CIRGenTypeCache { /// information for a given GlobalValue. Notice that a GlobalValue could /// have multiple annotations, and this function creates attribute for /// one of them. - mlir::cir::AnnotationAttr buildAnnotateAttr(const clang::AnnotateAttr *aa); + cir::AnnotationAttr buildAnnotateAttr(const clang::AnnotateAttr *aa); private: // An ordered map of canonical GlobalDecls to their mangled names. @@ -838,7 +831,7 @@ class CIRGenModule : public CIRGenTypeCache { void setNonAliasAttributes(GlobalDecl GD, mlir::Operation *GV); /// Map source language used to a CIR attribute. - mlir::cir::SourceLanguage getCIRSourceLanguage(); + cir::SourceLanguage getCIRSourceLanguage(); /// Emit all the global annotations. /// This actually only emits annotations for deffered declarations of diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp index 116255b36f26..6247cf6b5c2a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp @@ -41,8 +41,7 @@ static unsigned ArgInfoAddressSpace(LangAS AS) { } } -void CIRGenModule::genKernelArgMetadata(mlir::cir::FuncOp Fn, - const FunctionDecl *FD, +void CIRGenModule::genKernelArgMetadata(cir::FuncOp Fn, const FunctionDecl *FD, CIRGenFunction *CGF) { assert(((FD && CGF) || (!FD && !CGF)) && "Incorrect use - FD and CGF should either be both null or not!"); @@ -170,7 +169,7 @@ void CIRGenModule::genKernelArgMetadata(mlir::cir::FuncOp Fn, resArgNames = builder.getArrayAttr(argNames); // Update the function's extra attributes with the kernel argument metadata. - auto value = mlir::cir::OpenCLKernelArgMetadataAttr::get( + auto value = cir::OpenCLKernelArgMetadataAttr::get( Fn.getContext(), builder.getI32ArrayAttr(addressQuals), builder.getArrayAttr(accessQuals), builder.getArrayAttr(argTypeNames), builder.getArrayAttr(argBaseTypeNames), @@ -178,7 +177,7 @@ void CIRGenModule::genKernelArgMetadata(mlir::cir::FuncOp Fn, mlir::NamedAttrList items{Fn.getExtraAttrs().getElements().getValue()}; auto oldValue = items.set(value.getMnemonic(), value); if (oldValue != value) { - Fn.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + Fn.setExtraAttrsAttr(cir::ExtraFuncAttributesAttr::get( &getMLIRContext(), builder.getDictionaryAttr(items))); } } else { @@ -188,7 +187,7 @@ void CIRGenModule::genKernelArgMetadata(mlir::cir::FuncOp Fn, } void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, - mlir::cir::FuncOp Fn) { + cir::FuncOp Fn) { if (!FD->hasAttr() && !FD->hasAttr()) return; @@ -197,7 +196,7 @@ void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, if (!getLangOpts().OpenCL) return; - using mlir::cir::OpenCLKernelMetadataAttr; + using cir::OpenCLKernelMetadataAttr; mlir::ArrayAttr workGroupSizeHintAttr, reqdWorkGroupSizeAttr; mlir::TypeAttr vecTypeHintAttr; @@ -246,7 +245,7 @@ void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, vecTypeHintAttr, vecTypeHintSignedness, intelReqdSubGroupSizeAttr); attrs.append(kernelMetadataAttr.getMnemonic(), kernelMetadataAttr); - Fn.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + Fn.setExtraAttrsAttr(cir::ExtraFuncAttributesAttr::get( &getMLIRContext(), attrs.getDictionary(&getMLIRContext()))); } @@ -259,7 +258,7 @@ void CIRGenModule::buildOpenCLMetadata() { unsigned minor = (version % 100) / 10; auto clVersionAttr = - mlir::cir::OpenCLVersionAttr::get(&getMLIRContext(), major, minor); + cir::OpenCLVersionAttr::get(&getMLIRContext(), major, minor); theModule->setAttr("cir.cl.version", clVersionAttr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp index a8e0f7dbeb5f..34207f74089b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp @@ -24,6 +24,5 @@ CIRGenOpenCLRuntime::~CIRGenOpenCLRuntime() {} void CIRGenOpenCLRuntime::buildWorkGroupLocalVarDecl(CIRGenFunction &CGF, const VarDecl &D) { - return CGF.buildStaticVarDecl(D, - mlir::cir::GlobalLinkageKind::InternalLinkage); + return CGF.buildStaticVarDecl(D, cir::GlobalLinkageKind::InternalLinkage); } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp index 382291fddfea..526567f31715 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.cpp @@ -31,8 +31,8 @@ void CIRGenOpenMPRuntime::checkAndEmitLastprivateConditional( return; } -void CIRGenOpenMPRuntime::registerTargetGlobalVariable( - const clang::VarDecl *VD, mlir::cir::GlobalOp globalOp) { +void CIRGenOpenMPRuntime::registerTargetGlobalVariable(const clang::VarDecl *VD, + cir::GlobalOp globalOp) { assert(!cir::MissingFeatures::openMPRuntime()); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h index 8c1c4f4a19f7..74320337e212 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenMPRuntime.h @@ -81,7 +81,7 @@ class CIRGenOpenMPRuntime { /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const clang::VarDecl *VD, - mlir::cir::GlobalOp globalOp); + cir::GlobalOp globalOp); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index a7bb94effd8d..19d48b886af3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -132,11 +132,11 @@ class CIRGenRecordLayout { private: /// The CIR type corresponding to this record layout; used when laying it out /// as a complete object. - mlir::cir::StructType CompleteObjectType; + cir::StructType CompleteObjectType; /// The CIR type for the non-virtual part of this record layout; used when /// laying it out as a base subobject. - mlir::cir::StructType BaseSubobjectType; + cir::StructType BaseSubobjectType; /// Map from (non-bit-field) struct field to the corresponding cir struct type /// field no. This info is populated by the record builder. @@ -165,8 +165,8 @@ class CIRGenRecordLayout { bool IsZeroInitializableAsBase : 1; public: - CIRGenRecordLayout(mlir::cir::StructType CompleteObjectType, - mlir::cir::StructType BaseSubobjectType, + CIRGenRecordLayout(cir::StructType CompleteObjectType, + cir::StructType BaseSubobjectType, bool IsZeroInitializable, bool IsZeroInitializableAsBase) : CompleteObjectType(CompleteObjectType), BaseSubobjectType(BaseSubobjectType), @@ -175,13 +175,11 @@ class CIRGenRecordLayout { /// Return the "complete object" LLVM type associated with /// this record. - mlir::cir::StructType getCIRType() const { return CompleteObjectType; } + cir::StructType getCIRType() const { return CompleteObjectType; } /// Return the "base subobject" LLVM type associated with /// this record. - mlir::cir::StructType getBaseSubobjectCIRType() const { - return BaseSubobjectType; - } + cir::StructType getBaseSubobjectCIRType() const { return BaseSubobjectType; } /// Return cir::StructType element number that corresponds to the field FD. unsigned getCIRFieldNo(const clang::FieldDecl *FD) const { diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 8d39419a2b97..1a29affa0df3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -22,7 +22,7 @@ using namespace clang; using namespace clang::CIRGen; -using namespace mlir::cir; +using namespace cir; Address CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S, bool getLast, @@ -72,7 +72,7 @@ Address CIRGenFunction::buildCompoundStmt(const CompoundStmt &S, bool getLast, // Add local scope to track new declared variables. SymTableScopeTy varScope(symbolTable); auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( + builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) { LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; @@ -437,7 +437,7 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); // The if scope contains the full source range for IfStmt. auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( + builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; @@ -533,11 +533,10 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { // dispatched by `handleReturnVal()` might needs to manipulate blocks and // look into parents, which are all unlinked. mlir::OpBuilder::InsertPoint scopeBody; - builder.create( - scopeLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - scopeBody = b.saveInsertionPoint(); - }); + builder.create(scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + scopeBody = b.saveInsertionPoint(); + }); { mlir::OpBuilder::InsertionGuard guard(builder); builder.restoreInsertionPoint(scopeBody); @@ -567,8 +566,8 @@ mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { // info support just yet, look at this again once we have it. assert(builder.getInsertionBlock() && "not yet implemented"); - builder.create(getLoc(S.getSourceRange()), - S.getLabel()->getName()); + builder.create(getLoc(S.getSourceRange()), + S.getLabel()->getName()); // A goto marks the end of a block, create a new one for codegen after // buildGotoStmt can resume building in that block. @@ -594,7 +593,7 @@ mlir::LogicalResult CIRGenFunction::buildLabel(const LabelDecl *D) { } builder.setInsertionPointToEnd(labelBlock); - builder.create(getLoc(D->getSourceRange()), D->getName()); + builder.create(getLoc(D->getSourceRange()), D->getName()); builder.setInsertionPointToEnd(labelBlock); // FIXME: emit debug info for labels, incrementProfileCounter @@ -623,7 +622,7 @@ mlir::LogicalResult CIRGenFunction::buildBreakStmt(const clang::BreakStmt &S) { const CaseStmt *CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, mlir::ArrayAttr &value, - mlir::cir::CaseOpKind &kind) { + cir::CaseOpKind &kind) { const CaseStmt *caseStmt = &S; const CaseStmt *lastCase = &S; SmallVector caseEltValueListAttr; @@ -637,17 +636,17 @@ const CaseStmt *CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, if (auto *rhs = caseStmt->getRHS()) { auto endVal = rhs->EvaluateKnownConstInt(getContext()); SmallVector rangeCaseAttr = { - mlir::cir::IntAttr::get(condType, intVal), - mlir::cir::IntAttr::get(condType, endVal)}; + cir::IntAttr::get(condType, intVal), + cir::IntAttr::get(condType, endVal)}; value = builder.getArrayAttr(rangeCaseAttr); - kind = mlir::cir::CaseOpKind::Range; + kind = cir::CaseOpKind::Range; // We may not be able to fold rangaes. Due to we can't present range case // with other trivial cases now. return caseStmt; } - caseEltValueListAttr.push_back(mlir::cir::IntAttr::get(condType, intVal)); + caseEltValueListAttr.push_back(cir::IntAttr::get(condType, intVal)); caseStmt = dyn_cast_or_null(caseStmt->getSubStmt()); @@ -659,8 +658,8 @@ const CaseStmt *CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, if (!caseEltValueListAttr.empty()) { value = builder.getArrayAttr(caseEltValueListAttr); - kind = caseEltValueListAttr.size() > 1 ? mlir::cir::CaseOpKind::Anyof - : mlir::cir::CaseOpKind::Equal; + kind = caseEltValueListAttr.size() > 1 ? cir::CaseOpKind::Anyof + : cir::CaseOpKind::Equal; } return lastCase; @@ -763,7 +762,7 @@ mlir::LogicalResult CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, mlir::Type condType, bool buildingTopLevelCase) { return buildCaseDefaultCascade(&S, condType, builder.getArrayAttr({}), - mlir::cir::CaseOpKind::Default, + cir::CaseOpKind::Default, buildingTopLevelCase); } @@ -786,7 +785,7 @@ mlir::LogicalResult CIRGenFunction::buildSwitchCase(const SwitchCase &S, mlir::LogicalResult CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef ForAttrs) { - mlir::cir::ForOp forOp; + cir::ForOp forOp; // TODO(cir): pass in array of attributes. auto forStmtBuilder = [&]() -> mlir::LogicalResult { @@ -842,15 +841,16 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, auto res = mlir::success(); auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( - scopeLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - // Create a cleanup scope for the condition variable cleanups. - // Logical equivalent from LLVM codegn for - // LexicalScope ConditionScope(*this, S.getSourceRange())... - LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - res = forStmtBuilder(); - }); + builder.create(scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // Create a cleanup scope for the condition + // variable cleanups. Logical equivalent from + // LLVM codegn for LexicalScope + // ConditionScope(*this, S.getSourceRange())... + LexicalScope lexScope{ + *this, loc, builder.getInsertionBlock()}; + res = forStmtBuilder(); + }); if (res.failed()) return res; @@ -860,7 +860,7 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, } mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { - mlir::cir::ForOp forOp; + cir::ForOp forOp; // TODO: pass in array of attributes. auto forStmtBuilder = [&]() -> mlir::LogicalResult { @@ -893,10 +893,9 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { // scalar type. condVal = evaluateExprAsBool(S.getCond()); } else { - auto boolTy = mlir::cir::BoolType::get(b.getContext()); - condVal = b.create( - loc, boolTy, - mlir::cir::BoolAttr::get(b.getContext(), boolTy, true)); + auto boolTy = cir::BoolType::get(b.getContext()); + condVal = b.create( + loc, boolTy, cir::BoolAttr::get(b.getContext(), boolTy, true)); } builder.createCondition(condVal); }, @@ -924,12 +923,12 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { auto res = mlir::success(); auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( - scopeLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - res = forStmtBuilder(); - }); + builder.create(scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScope lexScope{ + *this, loc, builder.getInsertionBlock()}; + res = forStmtBuilder(); + }); if (res.failed()) return res; @@ -939,7 +938,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { } mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { - mlir::cir::DoWhileOp doWhileOp; + cir::DoWhileOp doWhileOp; // TODO: pass in array of attributes. auto doStmtBuilder = [&]() -> mlir::LogicalResult { @@ -974,12 +973,12 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { auto res = mlir::success(); auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( - scopeLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - res = doStmtBuilder(); - }); + builder.create(scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScope lexScope{ + *this, loc, builder.getInsertionBlock()}; + res = doStmtBuilder(); + }); if (res.failed()) return res; @@ -989,7 +988,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { } mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { - mlir::cir::WhileOp whileOp; + cir::WhileOp whileOp; // TODO: pass in array of attributes. auto whileStmtBuilder = [&]() -> mlir::LogicalResult { @@ -1029,12 +1028,12 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { auto res = mlir::success(); auto scopeLoc = getLoc(S.getSourceRange()); - builder.create( - scopeLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - res = whileStmtBuilder(); - }); + builder.create(scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScope lexScope{ + *this, loc, builder.getInsertionBlock()}; + res = whileStmtBuilder(); + }); if (res.failed()) return res; @@ -1120,12 +1119,12 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { // The switch scope contains the full source range for SwitchStmt. auto scopeLoc = getLoc(S.getSourceRange()); auto res = mlir::success(); - builder.create( - scopeLoc, /*scopeBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - res = switchStmtBuilder(); - }); + builder.create(scopeLoc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + LexicalScope lexScope{ + *this, loc, builder.getInsertionBlock()}; + res = switchStmtBuilder(); + }); llvm::SmallVector cases; swop.collectCases(cases); diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp index 999d5be7ba3b..b865046828c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp @@ -76,7 +76,7 @@ CIRGenFunction::buildOMPParallelDirective(const OMPParallelDirective &S) { mlir::OpBuilder::InsertionGuard guardCase(builder); builder.setInsertionPointToEnd(&block); // Create a scope for the OpenMP region. - builder.create( + builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index e8ce46e409c0..e625efb40dc0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -30,18 +30,18 @@ struct CIRGenTypeCache { CIRGenTypeCache() {} /// void - mlir::cir::VoidType VoidTy; + cir::VoidType VoidTy; // char, int, short, long, __int128 - mlir::cir::IntType SInt8Ty, SInt16Ty, SInt32Ty, SInt64Ty, SInt128Ty; + cir::IntType SInt8Ty, SInt16Ty, SInt32Ty, SInt64Ty, SInt128Ty; // usigned char, unsigned, unsigned short, unsigned long, unsigned __int128 - mlir::cir::IntType UInt8Ty, UInt16Ty, UInt32Ty, UInt64Ty, UInt128Ty; + cir::IntType UInt8Ty, UInt16Ty, UInt32Ty, UInt64Ty, UInt128Ty; /// half, bfloat, float, double, fp80 - mlir::cir::FP16Type FP16Ty; - mlir::cir::BF16Type BFloat16Ty; - mlir::cir::SingleType FloatTy; - mlir::cir::DoubleType DoubleTy; - mlir::cir::FP80Type FP80Ty; - mlir::cir::FP128Type FP128Ty; + cir::FP16Type FP16Ty; + cir::BF16Type BFloat16Ty; + cir::SingleType FloatTy; + cir::DoubleType DoubleTy; + cir::FP80Type FP80Ty; + cir::FP128Type FP128Ty; /// int mlir::Type UIntTy; @@ -58,29 +58,29 @@ struct CIRGenTypeCache { mlir::Type PtrDiffTy; /// void* in address space 0 - mlir::cir::PointerType VoidPtrTy; - mlir::cir::PointerType UInt8PtrTy; + cir::PointerType VoidPtrTy; + cir::PointerType UInt8PtrTy; /// void** in address space 0 union { - mlir::cir::PointerType VoidPtrPtrTy; - mlir::cir::PointerType UInt8PtrPtrTy; + cir::PointerType VoidPtrPtrTy; + cir::PointerType UInt8PtrPtrTy; }; /// void* in alloca address space union { - mlir::cir::PointerType AllocaVoidPtrTy; - mlir::cir::PointerType AllocaInt8PtrTy; + cir::PointerType AllocaVoidPtrTy; + cir::PointerType AllocaInt8PtrTy; }; /// void* in default globals address space // union { - // mlir::cir::PointerType GlobalsVoidPtrTy; - // mlir::cir::PointerType GlobalsInt8PtrTy; + // cir::PointerType GlobalsVoidPtrTy; + // cir::PointerType GlobalsInt8PtrTy; // }; /// void* in the address space for constant globals - // mlir::cir::PointerType ConstGlobalsPtrTy; + // cir::PointerType ConstGlobalsPtrTy; /// The size and alignment of the builtin C type 'int'. This comes /// up enough in various ABI lowering tasks to be worth pre-computing. @@ -110,7 +110,7 @@ struct CIRGenTypeCache { // unsigned char SizeAlignInBytes; // }; - mlir::cir::AddressSpaceAttr CIRAllocaAddressSpace; + cir::AddressSpaceAttr CIRAllocaAddressSpace; // clang::CharUnits getSizeSize() const { // return clang::CharUnits::fromQuantity(SizeSizeInBytes); @@ -125,7 +125,7 @@ struct CIRGenTypeCache { return clang::CharUnits::fromQuantity(PointerAlignInBytes); } - mlir::cir::AddressSpaceAttr getCIRAllocaAddressSpace() const { + cir::AddressSpaceAttr getCIRAllocaAddressSpace() const { return CIRAllocaAddressSpace; } }; diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 1e1263ae9756..5483a0f805a5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -25,15 +25,15 @@ using namespace clang; using namespace clang::CIRGen; -mlir::cir::CallingConv +cir::CallingConv CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { switch (CC) { case CC_C: - return mlir::cir::CallingConv::C; + return cir::CallingConv::C; case CC_OpenCLKernel: return CGM.getTargetCIRGenInfo().getOpenCLKernelCallingConv(); case CC_SpirFunction: - return mlir::cir::CallingConv::SpirFunction; + return cir::CallingConv::SpirFunction; default: llvm_unreachable("No other calling conventions implemented."); } @@ -98,7 +98,7 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl, /// Return true if the specified type is already completely laid out. bool CIRGenTypes::isRecordLayoutComplete(const Type *Ty) const { - llvm::DenseMap::const_iterator I = + llvm::DenseMap::const_iterator I = recordDeclTypes.find(Ty); return I != recordDeclTypes.end() && I->second.isComplete(); } @@ -189,7 +189,7 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { // TagDecl's are not necessarily unique, instead use the (clang) type // connected to the decl. const auto *key = Context.getTagDeclType(RD).getTypePtr(); - mlir::cir::StructType entry = recordDeclTypes[key]; + cir::StructType entry = recordDeclTypes[key]; // Handle forward decl / incomplete types. if (!entry) { @@ -418,7 +418,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { break; case BuiltinType::Bool: - ResultType = ::mlir::cir::BoolType::get(&getMLIRContext()); + ResultType = cir::BoolType::get(&getMLIRContext()); break; // Signed types. @@ -442,9 +442,8 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::SatLongFract: case BuiltinType::SatShortAccum: case BuiltinType::SatShortFract: - ResultType = - mlir::cir::IntType::get(&getMLIRContext(), Context.getTypeSize(T), - /*isSigned=*/true); + ResultType = cir::IntType::get(&getMLIRContext(), Context.getTypeSize(T), + /*isSigned=*/true); break; // Unsigned types. case BuiltinType::Char16: @@ -471,8 +470,8 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::SatUShortAccum: case BuiltinType::SatUShortFract: ResultType = - mlir::cir::IntType::get(Builder.getContext(), Context.getTypeSize(T), - /*isSigned=*/false); + cir::IntType::get(Builder.getContext(), Context.getTypeSize(T), + /*isSigned=*/false); break; case BuiltinType::Float16: @@ -617,7 +616,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case Type::Complex: { const ComplexType *CT = cast(Ty); auto ElementTy = ConvertType(CT->getElementType()); - ResultType = ::mlir::cir::ComplexType::get(Builder.getContext(), ElementTy); + ResultType = cir::ComplexType::get(Builder.getContext(), ElementTy); break; } case Type::LValueReference: @@ -675,16 +674,16 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { // FIXME: In LLVM, "lower arrays of undefined struct type to arrays of // i8 just to have a concrete type". Not sure this makes sense in CIR yet. assert(Builder.isSized(EltTy) && "not implemented"); - ResultType = ::mlir::cir::ArrayType::get(Builder.getContext(), EltTy, - A->getSize().getZExtValue()); + ResultType = cir::ArrayType::get(Builder.getContext(), EltTy, + A->getSize().getZExtValue()); break; } case Type::ExtVector: case Type::Vector: { const VectorType *V = cast(Ty); auto ElementType = convertTypeForMem(V->getElementType()); - ResultType = ::mlir::cir::VectorType::get(Builder.getContext(), ElementType, - V->getNumElements()); + ResultType = cir::VectorType::get(Builder.getContext(), ElementType, + V->getNumElements()); break; } case Type::ConstantMatrix: { @@ -729,15 +728,15 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { const auto *MPT = cast(Ty); auto memberTy = ConvertType(MPT->getPointeeType()); - auto clsTy = mlir::cast( - ConvertType(QualType(MPT->getClass(), 0))); + auto clsTy = + mlir::cast(ConvertType(QualType(MPT->getClass(), 0))); if (MPT->isMemberDataPointer()) ResultType = - mlir::cir::DataMemberType::get(Builder.getContext(), memberTy, clsTy); + cir::DataMemberType::get(Builder.getContext(), memberTy, clsTy); else { - auto memberFuncTy = mlir::cast(memberTy); + auto memberFuncTy = mlir::cast(memberTy); ResultType = - mlir::cir::MethodType::get(Builder.getContext(), memberFuncTy, clsTy); + cir::MethodType::get(Builder.getContext(), memberFuncTy, clsTy); } break; } @@ -760,8 +759,8 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { } case Type::BitInt: { const auto *bitIntTy = cast(Ty); - ResultType = mlir::cir::IntType::get( - Builder.getContext(), bitIntTy->getNumBits(), bitIntTy->isSigned()); + ResultType = cir::IntType::get(Builder.getContext(), bitIntTy->getNumBits(), + bitIntTy->isSigned()); break; } } @@ -792,7 +791,7 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( if (FI) return *FI; - mlir::cir::CallingConv CC = ClangCallConvToCIRCallConv(info.getCC()); + cir::CallingConv CC = ClangCallConvToCIRCallConv(info.getCC()); // Construction the function info. We co-allocate the ArgInfos. FI = CIRGenFunctionInfo::create(CC, instanceMethod, chainCall, info, @@ -804,7 +803,7 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( assert(inserted && "Recursively being processed?"); // Compute ABI information. - if (CC == mlir::cir::CallingConv::SpirKernel) { + if (CC == cir::CallingConv::SpirKernel) { // Force target independent argument handling for the host visible // kernel functions. computeSPIRKernelABIInfo(CGM, *FI); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 16df1bc99ee8..fc59befb9501 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -62,10 +62,11 @@ class GlobalDecl; namespace mlir { class Type; +} // namespace mlir + namespace cir { class StructType; } // namespace cir -} // namespace mlir namespace clang::CIRGen { class CallArgList; @@ -92,7 +93,7 @@ class CIRGenTypes { CIRGenRecordLayouts; /// Contains the CIR type for any converted RecordDecl - llvm::DenseMap recordDeclTypes; + llvm::DenseMap recordDeclTypes; /// Hold memoized CIRGenFunctionInfo results llvm::FoldingSet FunctionInfos; @@ -127,7 +128,7 @@ class CIRGenTypes { bool isFuncParamTypeConvertible(clang::QualType Ty); /// Convert clang calling convention to CIR calling convention. - mlir::cir::CallingConv ClangCallConvToCIRCallConv(clang::CallingConv CC); + cir::CallingConv ClangCallConvToCIRCallConv(clang::CallingConv CC); /// Derives the 'this' type for CIRGen purposes, i.e. ignoring method CVR /// qualification. @@ -169,7 +170,7 @@ class CIRGenTypes { mlir::Type convertRecordDeclType(const clang::RecordDecl *recordDecl); std::unique_ptr - computeRecordLayout(const clang::RecordDecl *D, mlir::cir::StructType *Ty); + computeRecordLayout(const clang::RecordDecl *D, cir::StructType *Ty); std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix); @@ -189,14 +190,14 @@ class CIRGenTypes { mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); /// Get the CIR function type for \arg Info. - mlir::cir::FuncType GetFunctionType(const CIRGenFunctionInfo &Info); + cir::FuncType GetFunctionType(const CIRGenFunctionInfo &Info); - mlir::cir::FuncType GetFunctionType(clang::GlobalDecl GD); + cir::FuncType GetFunctionType(clang::GlobalDecl GD); /// Get the LLVM function type for use in a vtable, given a CXXMethodDecl. If /// the method to has an incomplete return type, and/or incomplete argument /// types, this will return the opaque type. - mlir::cir::FuncType GetFunctionTypeForVTable(clang::GlobalDecl GD); + cir::FuncType GetFunctionTypeForVTable(clang::GlobalDecl GD); // The arrangement methods are split into three families: // - those meant to drive the signature and prologue/epilogue diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 1f12fed80243..cec319e41046 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -59,7 +59,7 @@ mlir::Type CIRGenVTables::getVTableType(const VTableLayout &layout) { auto componentType = getVTableComponentType(); for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) tys.push_back( - mlir::cir::ArrayType::get(ctx, componentType, layout.getVTableSize(i))); + cir::ArrayType::get(ctx, componentType, layout.getVTableSize(i))); // FIXME(cir): should VTableLayout be encoded like we do for some // AST nodes? @@ -212,8 +212,8 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, // vtableHasLocalLinkage, // /*isCompleteDtor=*/false); } else { - assert((mlir::isa(rtti) || - mlir::isa(rtti)) && + assert((mlir::isa(rtti) || + mlir::isa(rtti)) && "expected GlobalViewAttr or ConstPtrAttr"); return builder.add(rtti); } @@ -227,7 +227,7 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, llvm_unreachable("NYI"); } - auto getSpecialVirtualFn = [&](StringRef name) -> mlir::cir::FuncOp { + auto getSpecialVirtualFn = [&](StringRef name) -> cir::FuncOp { // FIXME(PR43094): When merging comdat groups, lld can select a local // symbol as the signature symbol even though it cannot be accessed // outside that symbol's TU. The relative vtables ABI would make @@ -244,15 +244,15 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, CGM.getTriple().isNVPTX()) llvm_unreachable("NYI"); - mlir::cir::FuncType fnTy = + cir::FuncType fnTy = CGM.getBuilder().getFuncType({}, CGM.getBuilder().getVoidTy()); - mlir::cir::FuncOp fnPtr = CGM.createRuntimeFunction(fnTy, name); + cir::FuncOp fnPtr = CGM.createRuntimeFunction(fnTy, name); // LLVM codegen handles unnamedAddr assert(!cir::MissingFeatures::unnamedAddr()); return fnPtr; }; - mlir::cir::FuncOp fnPtr; + cir::FuncOp fnPtr; if (cast(GD.getDecl())->isPureVirtual()) { // Pure virtual member functions. if (!PureVirtualFn) @@ -286,7 +286,7 @@ void CIRGenVTables::addVTableComponent(ConstantArrayBuilder &builder, if (useRelativeLayout()) { llvm_unreachable("NYI"); } else { - return builder.add(mlir::cir::GlobalViewAttr::get( + return builder.add(cir::GlobalViewAttr::get( CGM.getBuilder().getUInt8PtrTy(), mlir::FlatSymbolRefAttr::get(fnPtr.getSymNameAttr()))); } @@ -328,10 +328,9 @@ void CIRGenVTables::createVTableInitializer(ConstantStructBuilder &builder, } } -mlir::cir::GlobalOp CIRGenVTables::generateConstructionVTable( +cir::GlobalOp CIRGenVTables::generateConstructionVTable( const CXXRecordDecl *RD, const BaseSubobject &Base, bool BaseIsVirtual, - mlir::cir::GlobalLinkageKind Linkage, - VTableAddressPointsMapTy &AddressPoints) { + cir::GlobalLinkageKind Linkage, VTableAddressPointsMapTy &AddressPoints) { if (CGM.getModuleDebugInfo()) llvm_unreachable("NYI"); @@ -360,8 +359,8 @@ mlir::cir::GlobalOp CIRGenVTables::generateConstructionVTable( // emitting an available_externally VTT, we provide references to an internal // linkage construction vtable. The ABI only requires complete-object vtables // to be the same for all instances of a type, not construction vtables. - if (Linkage == mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage) - Linkage = mlir::cir::GlobalLinkageKind::InternalLinkage; + if (Linkage == cir::GlobalLinkageKind::AvailableExternallyLinkage) + Linkage = cir::GlobalLinkageKind::InternalLinkage; auto Align = CGM.getDataLayout().getABITypeAlign(VTType); auto Loc = CGM.getLoc(RD->getSourceRange()); @@ -380,7 +379,7 @@ mlir::cir::GlobalOp CIRGenVTables::generateConstructionVTable( ConstantInitBuilder builder(CGM); auto components = builder.beginStruct(); createVTableInitializer(components, *VTLayout, RTTI, - mlir::cir::isLocalLinkage(VTable.getLinkage())); + cir::isLocalLinkage(VTable.getLinkage())); components.finishAndSetAsInitializer(VTable); // Set properties only after the initializer has been set to ensure that the @@ -400,10 +399,9 @@ mlir::cir::GlobalOp CIRGenVTables::generateConstructionVTable( /// Compute the required linkage of the vtable for the given class. /// /// Note that we only call this at the end of the translation unit. -mlir::cir::GlobalLinkageKind -CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { +cir::GlobalLinkageKind CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { if (!RD->isExternallyVisible()) - return mlir::cir::GlobalLinkageKind::InternalLinkage; + return cir::GlobalLinkageKind::InternalLinkage; // We're at the end of the translation unit, so the current key // function is fully correct. @@ -424,24 +422,24 @@ CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { "Shouldn't query vtable linkage without key function, " "optimizations, or debug info"); if (!def && codeGenOpts.OptimizationLevel > 0) - return mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; + return cir::GlobalLinkageKind::AvailableExternallyLinkage; if (keyFunction->isInlined()) return !astCtx.getLangOpts().AppleKext - ? mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage - : mlir::cir::GlobalLinkageKind::InternalLinkage; + ? cir::GlobalLinkageKind::LinkOnceODRLinkage + : cir::GlobalLinkageKind::InternalLinkage; - return mlir::cir::GlobalLinkageKind::ExternalLinkage; + return cir::GlobalLinkageKind::ExternalLinkage; case TSK_ImplicitInstantiation: return !astCtx.getLangOpts().AppleKext - ? mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage - : mlir::cir::GlobalLinkageKind::InternalLinkage; + ? cir::GlobalLinkageKind::LinkOnceODRLinkage + : cir::GlobalLinkageKind::InternalLinkage; case TSK_ExplicitInstantiationDefinition: return !astCtx.getLangOpts().AppleKext - ? mlir::cir::GlobalLinkageKind::WeakODRLinkage - : mlir::cir::GlobalLinkageKind::InternalLinkage; + ? cir::GlobalLinkageKind::WeakODRLinkage + : cir::GlobalLinkageKind::InternalLinkage; case TSK_ExplicitInstantiationDeclaration: llvm_unreachable("Should not have been asked to emit this"); @@ -451,19 +449,18 @@ CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { // -fapple-kext mode does not support weak linkage, so we must use // internal linkage. if (astCtx.getLangOpts().AppleKext) - return mlir::cir::GlobalLinkageKind::InternalLinkage; + return cir::GlobalLinkageKind::InternalLinkage; - auto DiscardableODRLinkage = mlir::cir::GlobalLinkageKind::LinkOnceODRLinkage; - auto NonDiscardableODRLinkage = mlir::cir::GlobalLinkageKind::WeakODRLinkage; + auto DiscardableODRLinkage = cir::GlobalLinkageKind::LinkOnceODRLinkage; + auto NonDiscardableODRLinkage = cir::GlobalLinkageKind::WeakODRLinkage; if (RD->hasAttr()) { // Cannot discard exported vtables. DiscardableODRLinkage = NonDiscardableODRLinkage; } else if (RD->hasAttr()) { // Imported vtables are available externally. - DiscardableODRLinkage = - mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; + DiscardableODRLinkage = cir::GlobalLinkageKind::AvailableExternallyLinkage; NonDiscardableODRLinkage = - mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage; + cir::GlobalLinkageKind::AvailableExternallyLinkage; } switch (RD->getTemplateSpecializationKind()) { @@ -478,9 +475,9 @@ CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { if (getTarget().getCXXABI().isMicrosoft()) return DiscardableODRLinkage; auto r = shouldEmitAvailableExternallyVTable(*this, RD) - ? mlir::cir::GlobalLinkageKind::AvailableExternallyLinkage - : mlir::cir::GlobalLinkageKind::ExternalLinkage; - assert(r == mlir::cir::GlobalLinkageKind::ExternalLinkage && + ? cir::GlobalLinkageKind::AvailableExternallyLinkage + : cir::GlobalLinkageKind::ExternalLinkage; + assert(r == cir::GlobalLinkageKind::ExternalLinkage && "available external NYI"); return r; } @@ -492,11 +489,10 @@ CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { llvm_unreachable("Invalid TemplateSpecializationKind!"); } -mlir::cir::GlobalOp +cir::GlobalOp getAddrOfVTTVTable(CIRGenVTables &CGVT, CIRGenModule &CGM, const CXXRecordDecl *MostDerivedClass, - const VTTVTable &vtable, - mlir::cir::GlobalLinkageKind linkage, + const VTTVTable &vtable, cir::GlobalLinkageKind linkage, VTableLayout::AddressPointsMapTy &addressPoints) { if (vtable.getBase() == MostDerivedClass) { assert(vtable.getBaseOffset().isZero() && @@ -509,7 +505,7 @@ getAddrOfVTTVTable(CIRGenVTables &CGVT, CIRGenModule &CGM, addressPoints); } -mlir::cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *RD) { +cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *RD) { assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT"); SmallString<256> OutName; @@ -523,15 +519,14 @@ mlir::cir::GlobalOp CIRGenVTables::getAddrOfVTT(const CXXRecordDecl *RD) { VTTBuilder Builder(CGM.getASTContext(), RD, /*GenerateDefinition=*/false); - auto ArrayType = mlir::cir::ArrayType::get(CGM.getBuilder().getContext(), - CGM.getBuilder().getUInt8PtrTy(), - Builder.getVTTComponents().size()); + auto ArrayType = cir::ArrayType::get(CGM.getBuilder().getContext(), + CGM.getBuilder().getUInt8PtrTy(), + Builder.getVTTComponents().size()); auto Align = CGM.getDataLayout().getABITypeAlign(CGM.getBuilder().getUInt8PtrTy()); auto VTT = CGM.createOrReplaceCXXRuntimeVariable( CGM.getLoc(RD->getSourceRange()), Name, ArrayType, - mlir::cir::GlobalLinkageKind::ExternalLinkage, - CharUnits::fromQuantity(Align)); + cir::GlobalLinkageKind::ExternalLinkage, CharUnits::fromQuantity(Align)); CGM.setGVProperties(VTT, RD); return VTT; } @@ -590,16 +585,16 @@ uint64_t CIRGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, } /// Emit the definition of the given vtable. -void CIRGenVTables::buildVTTDefinition(mlir::cir::GlobalOp VTT, - mlir::cir::GlobalLinkageKind Linkage, +void CIRGenVTables::buildVTTDefinition(cir::GlobalOp VTT, + cir::GlobalLinkageKind Linkage, const CXXRecordDecl *RD) { VTTBuilder Builder(CGM.getASTContext(), RD, /*GenerateDefinition=*/true); - auto ArrayType = mlir::cir::ArrayType::get(CGM.getBuilder().getContext(), - CGM.getBuilder().getUInt8PtrTy(), - Builder.getVTTComponents().size()); + auto ArrayType = cir::ArrayType::get(CGM.getBuilder().getContext(), + CGM.getBuilder().getUInt8PtrTy(), + Builder.getVTTComponents().size()); - SmallVector VTables; + SmallVector VTables; SmallVector VTableAddressPoints; for (const VTTVTable *i = Builder.getVTTVTables().begin(), *e = Builder.getVTTVTables().end(); @@ -614,7 +609,7 @@ void CIRGenVTables::buildVTTDefinition(mlir::cir::GlobalOp VTT, *e = Builder.getVTTComponents().end(); i != e; ++i) { const VTTVTable &VTTVT = Builder.getVTTVTables()[i->VTableIndex]; - mlir::cir::GlobalOp VTable = VTables[i->VTableIndex]; + cir::GlobalOp VTable = VTables[i->VTableIndex]; VTableLayout::AddressPointLocation AddressPoint; if (VTTVT.getBase() == RD) { // Just get the address point for the regular vtable. diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index 8440fdbafcfb..4b2247dc9fc8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -52,10 +52,10 @@ class CIRGenVTables { SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices; /// Cache for the pure virtual member call function. - mlir::cir::FuncOp PureVirtualFn = nullptr; + cir::FuncOp PureVirtualFn = nullptr; /// Cache for the deleted virtual member call function. - mlir::cir::FuncOp DeletedVirtualFn = nullptr; + cir::FuncOp DeletedVirtualFn = nullptr; void addVTableComponent(ConstantArrayBuilder &builder, const VTableLayout &layout, unsigned componentIndex, @@ -93,18 +93,16 @@ class CIRGenVTables { BaseSubobject Base); /// Generate a construction vtable for the given base subobject. - mlir::cir::GlobalOp + cir::GlobalOp generateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base, - bool BaseIsVirtual, - mlir::cir::GlobalLinkageKind Linkage, + bool BaseIsVirtual, cir::GlobalLinkageKind Linkage, VTableAddressPointsMapTy &AddressPoints); /// Get the address of the VTT for the given record decl. - mlir::cir::GlobalOp getAddrOfVTT(const CXXRecordDecl *RD); + cir::GlobalOp getAddrOfVTT(const CXXRecordDecl *RD); /// Emit the definition of the given vtable. - void buildVTTDefinition(mlir::cir::GlobalOp VTT, - mlir::cir::GlobalLinkageKind Linkage, + void buildVTTDefinition(cir::GlobalOp VTT, cir::GlobalLinkageKind Linkage, const CXXRecordDecl *RD); /// Emit the associated thunks for the given global decl. diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index e795fe97a51d..fc2f650eaed6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -299,7 +299,7 @@ class LValue { LValue R; R.LVType = Simple; - assert(mlir::cast(address.getPointer().getType())); + assert(mlir::cast(address.getPointer().getType())); R.V = address.getPointer(); R.ElementType = address.getElementType(); R.Initialize(type, qs, address.getAlignment(), baseInfo, tbaaInfo); diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index f5ea438dae38..24143185691e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -56,7 +56,7 @@ void CIRGenerator::Initialize(ASTContext &astCtx) { mlirCtx = std::make_unique(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); - mlirCtx->getOrLoadDialect(); + mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); mlirCtx->getOrLoadDialect(); diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 4614aa717d9c..373e0a735622 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -134,17 +134,17 @@ struct CIRRecordLowering final { } mlir::Type getCharType() { - return mlir::cir::IntType::get(&cirGenTypes.getMLIRContext(), - astContext.getCharWidth(), - /*isSigned=*/false); + return cir::IntType::get(&cirGenTypes.getMLIRContext(), + astContext.getCharWidth(), + /*isSigned=*/false); } - /// Wraps mlir::cir::IntType with some implicit arguments. + /// Wraps cir::IntType with some implicit arguments. mlir::Type getUIntNType(uint64_t NumBits) { unsigned AlignedBits = llvm::PowerOf2Ceil(NumBits); AlignedBits = std::max(8u, AlignedBits); - return mlir::cir::IntType::get(&cirGenTypes.getMLIRContext(), AlignedBits, - /*isSigned=*/false); + return cir::IntType::get(&cirGenTypes.getMLIRContext(), AlignedBits, + /*isSigned=*/false); } mlir::Type getByteArrayType(CharUnits numberOfChars) { @@ -152,8 +152,8 @@ struct CIRRecordLowering final { mlir::Type type = getCharType(); return numberOfChars == CharUnits::One() ? type - : mlir::cir::ArrayType::get(type.getContext(), type, - numberOfChars.getQuantity()); + : cir::ArrayType::get(type.getContext(), type, + numberOfChars.getQuantity()); } // This is different from LLVM traditional codegen because CIRGen uses arrays @@ -161,12 +161,12 @@ struct CIRRecordLowering final { // structures support. mlir::Type getBitfieldStorageType(unsigned numBits) { unsigned alignedBits = llvm::alignTo(numBits, astContext.getCharWidth()); - if (mlir::cir::IntType::isValidPrimitiveIntBitwidth(alignedBits)) { + if (cir::IntType::isValidPrimitiveIntBitwidth(alignedBits)) { return builder.getUIntNTy(alignedBits); } else { mlir::Type type = getCharType(); - return mlir::cir::ArrayType::get(type.getContext(), type, - alignedBits / astContext.getCharWidth()); + return cir::ArrayType::get(type.getContext(), type, + alignedBits / astContext.getCharWidth()); } } @@ -690,14 +690,13 @@ void CIRRecordLowering::insertPadding() { } std::unique_ptr -CIRGenTypes::computeRecordLayout(const RecordDecl *D, - mlir::cir::StructType *Ty) { +CIRGenTypes::computeRecordLayout(const RecordDecl *D, cir::StructType *Ty) { CIRRecordLowering builder(*this, D, /*packed=*/false); assert(Ty->isIncomplete() && "recomputing record layout?"); builder.lower(/*nonVirtualBaseType=*/false); // If we're in C++, compute the base subobject type. - mlir::cir::StructType BaseTy; + cir::StructType BaseTy; if (llvm::isa(D) && !D->isUnion() && !D->hasAttr()) { BaseTy = *Ty; @@ -720,12 +719,11 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, // Fill in the struct *after* computing the base type. Filling in the body // signifies that the type is no longer opaque and record layout is complete, // but we may need to recursively layout D while laying D out as a base type. - auto astAttr = mlir::cir::ASTRecordDeclAttr::get(Ty->getContext(), D); + auto astAttr = cir::ASTRecordDeclAttr::get(Ty->getContext(), D); Ty->complete(builder.fieldTypes, builder.isPacked, astAttr); auto RL = std::make_unique( - Ty ? *Ty : mlir::cir::StructType{}, - BaseTy ? BaseTy : mlir::cir::StructType{}, + Ty ? *Ty : cir::StructType{}, BaseTy ? BaseTy : cir::StructType{}, (bool)builder.IsZeroInitializable, (bool)builder.IsZeroInitializableAsBase); diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp index 086c4ece6b3d..4652670425ee 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -40,7 +40,7 @@ void ConstantInitFuture::abandon() { Data = nullptr; } -void ConstantInitFuture::installInGlobal(mlir::cir::GlobalOp GV) { +void ConstantInitFuture::installInGlobal(cir::GlobalOp GV) { assert(Data && "installing null future"); if (Data.is()) { CIRGenModule::setInitializer(GV, Data.get()); @@ -69,10 +69,9 @@ inline ConstantInitFuture::ConstantInitFuture(ConstantInitBuilderBase *builder) assert(builder->Buffer[0] != nullptr); } -mlir::cir::GlobalOp ConstantInitBuilderBase::createGlobal( +cir::GlobalOp ConstantInitBuilderBase::createGlobal( mlir::Attribute initializer, const llvm::Twine &name, CharUnits alignment, - bool constant, mlir::cir::GlobalLinkageKind linkage, - unsigned addressSpace) { + bool constant, cir::GlobalLinkageKind linkage, unsigned addressSpace) { llvm_unreachable("NYI"); // auto GV = // new llvm::GlobalVariable(CGM.getModule(), initializer->getType(), @@ -86,14 +85,14 @@ mlir::cir::GlobalOp ConstantInitBuilderBase::createGlobal( } void ConstantInitBuilderBase::setGlobalInitializer( - mlir::cir::GlobalOp GV, mlir::Attribute initializer) { + cir::GlobalOp GV, mlir::Attribute initializer) { CIRGenModule::setInitializer(GV, initializer); if (!SelfReferences.empty()) resolveSelfReferences(GV); } -void ConstantInitBuilderBase::resolveSelfReferences(mlir::cir::GlobalOp GV) { +void ConstantInitBuilderBase::resolveSelfReferences(cir::GlobalOp GV) { llvm_unreachable("NYI"); // for (auto &entry : SelfReferences) { // mlir::Attribute resolvedReference = @@ -129,14 +128,14 @@ void ConstantAggregateBuilderBase::addSize(CharUnits size) { } mlir::Attribute -ConstantAggregateBuilderBase::getRelativeOffset(mlir::cir::IntType offsetType, +ConstantAggregateBuilderBase::getRelativeOffset(cir::IntType offsetType, mlir::Attribute target) { return getRelativeOffsetToPosition(offsetType, target, Builder.Buffer.size() - Begin); } mlir::Attribute ConstantAggregateBuilderBase::getRelativeOffsetToPosition( - mlir::cir::IntType offsetType, mlir::Attribute target, size_t position) { + cir::IntType offsetType, mlir::Attribute target, size_t position) { llvm_unreachable("NYI"); // // Compute the address of the relative-address slot. // auto base = getAddrOfPosition(offsetType, position); @@ -273,9 +272,9 @@ ConstantAggregateBuilderBase::getOffsetFromGlobalTo(size_t end) const { // FIXME(cir): ideally we should use CIRGenBuilder for both static function // bellow by threading ConstantAggregateBuilderBase through // ConstantAggregateBuilderBase. -static mlir::cir::ConstArrayAttr getConstArray(mlir::Attribute attrs, - mlir::cir::ArrayType arrayTy) { - return mlir::cir::ConstArrayAttr::get(arrayTy, attrs); +static cir::ConstArrayAttr getConstArray(mlir::Attribute attrs, + cir::ArrayType arrayTy) { + return cir::ConstArrayAttr::get(arrayTy, attrs); } mlir::Attribute ConstantAggregateBuilderBase::finishArray(mlir::Type eltTy) { @@ -295,14 +294,14 @@ mlir::Attribute ConstantAggregateBuilderBase::finishArray(mlir::Type eltTy) { auto constant = getConstArray( mlir::ArrayAttr::get(eltTy.getContext(), elts), - mlir::cir::ArrayType::get(eltTy.getContext(), eltTy, elts.size())); + cir::ArrayType::get(eltTy.getContext(), eltTy, elts.size())); buffer.erase(buffer.begin() + Begin, buffer.end()); return constant; } mlir::Attribute ConstantAggregateBuilderBase::finishStruct(mlir::MLIRContext *ctx, - mlir::cir::StructType ty) { + cir::StructType ty) { markFinished(); auto &buffer = getBuffer(); diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h index 8f1852cdfe27..10335e89c1a0 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -51,10 +51,10 @@ class CIRGenModule; /// /*constant*/ true); class ConstantInitBuilderBase { struct SelfReference { - mlir::cir::GlobalOp Dummy; + cir::GlobalOp Dummy; llvm::SmallVector Indices; - SelfReference(mlir::cir::GlobalOp dummy) : Dummy(dummy) {} + SelfReference(cir::GlobalOp dummy) : Dummy(dummy) {} }; CIRGenModule &CGM; CIRGenBuilderTy &builder; @@ -75,19 +75,17 @@ class ConstantInitBuilderBase { } private: - mlir::cir::GlobalOp - createGlobal(mlir::Attribute initializer, const llvm::Twine &name, - CharUnits alignment, bool constant = false, - mlir::cir::GlobalLinkageKind linkage = - mlir::cir::GlobalLinkageKind::InternalLinkage, - unsigned addressSpace = 0); + cir::GlobalOp createGlobal( + mlir::Attribute initializer, const llvm::Twine &name, CharUnits alignment, + bool constant = false, + cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::InternalLinkage, + unsigned addressSpace = 0); ConstantInitFuture createFuture(mlir::Attribute initializer); - void setGlobalInitializer(mlir::cir::GlobalOp GV, - mlir::Attribute initializer); + void setGlobalInitializer(cir::GlobalOp GV, mlir::Attribute initializer); - void resolveSelfReferences(mlir::cir::GlobalOp GV); + void resolveSelfReferences(cir::GlobalOp GV); void abandon(size_t newEnd); }; @@ -188,16 +186,16 @@ class ConstantAggregateBuilderBase { void addSize(CharUnits size); /// Add an integer value of a specific type. - void addInt(mlir::cir::IntType intTy, uint64_t value, bool isSigned = false) { + void addInt(cir::IntType intTy, uint64_t value, bool isSigned = false) { add(mlir::IntegerAttr::get(intTy, llvm::APInt{intTy.getWidth(), value, isSigned})); } /// Add a pointer of a specific type. - void addPointer(mlir::cir::PointerType ptrTy, uint64_t value) { + void addPointer(cir::PointerType ptrTy, uint64_t value) { auto val = mlir::IntegerAttr::get( mlir::IntegerType::get(ptrTy.getContext(), 64), value); - add(mlir::cir::ConstPtrAttr::get(ptrTy.getContext(), ptrTy, val)); + add(cir::ConstPtrAttr::get(ptrTy.getContext(), ptrTy, val)); } /// Add a bitcast of a value to a specific type. @@ -219,15 +217,15 @@ class ConstantAggregateBuilderBase { /// in the current linkage unit. The offset will have the given /// integer type, which must be no wider than intptr_t. Some /// targets may not fully support this operation. - void addRelativeOffset(mlir::cir::IntType type, mlir::Attribute target) { + void addRelativeOffset(cir::IntType type, mlir::Attribute target) { llvm_unreachable("NYI"); // add(getRelativeOffset(type, target)); } /// Same as addRelativeOffset(), but instead relative to an element in this /// aggregate, identified by its index. - void addRelativeOffsetToPosition(mlir::cir::IntType type, - mlir::Attribute target, size_t position) { + void addRelativeOffsetToPosition(cir::IntType type, mlir::Attribute target, + size_t position) { llvm_unreachable("NYI"); // add(getRelativeOffsetToPosition(type, target, position)); } @@ -236,7 +234,7 @@ class ConstantAggregateBuilderBase { /// constant offset. This is primarily useful when the relative /// offset is known to be a multiple of (say) four and therefore /// the tag can be used to express an extra two bits of information. - void addTaggedRelativeOffset(mlir::cir::IntType type, mlir::Attribute address, + void addTaggedRelativeOffset(cir::IntType type, mlir::Attribute address, unsigned tag) { llvm_unreachable("NYI"); // mlir::Attribute offset = @@ -287,9 +285,8 @@ class ConstantAggregateBuilderBase { PlaceholderPosition addPlaceholderWithSize(mlir::Type expectedType); /// Fill a previously-added placeholder. - void fillPlaceholderWithInt(PlaceholderPosition position, - mlir::cir::IntType type, uint64_t value, - bool isSigned = false) { + void fillPlaceholderWithInt(PlaceholderPosition position, cir::IntType type, + uint64_t value, bool isSigned = false) { llvm_unreachable("NYI"); // fillPlaceholder(position, llvm::ConstantInt::get(type, value, isSigned)); } @@ -328,16 +325,16 @@ class ConstantAggregateBuilderBase { protected: mlir::Attribute finishArray(mlir::Type eltTy); mlir::Attribute finishStruct(mlir::MLIRContext *ctx, - mlir::cir::StructType structTy); + cir::StructType structTy); private: void getGEPIndicesTo(llvm::SmallVectorImpl &indices, size_t position) const; - mlir::Attribute getRelativeOffset(mlir::cir::IntType offsetType, + mlir::Attribute getRelativeOffset(cir::IntType offsetType, mlir::Attribute target); - mlir::Attribute getRelativeOffsetToPosition(mlir::cir::IntType offsetType, + mlir::Attribute getRelativeOffsetToPosition(cir::IntType offsetType, mlir::Attribute target, size_t position); @@ -367,7 +364,7 @@ class ConstantAggregateBuilderTemplateBase return ArrayBuilder(static_cast(this->Builder), this, eltTy); } - StructBuilder beginStruct(mlir::cir::StructType ty = nullptr) { + StructBuilder beginStruct(cir::StructType ty = nullptr) { return StructBuilder(static_cast(this->Builder), this, ty); } @@ -389,8 +386,7 @@ class ConstantAggregateBuilderTemplateBase /// directly on a ConstantInitBuilder, finish the array/struct and /// create a global variable with it as the initializer. template - mlir::cir::GlobalOp finishAndCreateGlobal(mlir::MLIRContext *ctx, - As &&...args) { + cir::GlobalOp finishAndCreateGlobal(mlir::MLIRContext *ctx, As &&...args) { assert(!this->Parent && "finishing non-root builder"); return this->Builder.createGlobal(asImpl().finishImpl(ctx), std::forward(args)...); @@ -399,16 +395,15 @@ class ConstantAggregateBuilderTemplateBase /// Given that this builder was created by beginning an array or struct /// directly on a ConstantInitBuilder, finish the array/struct and /// set it as the initializer of the given global variable. - void finishAndSetAsInitializer(mlir::cir::GlobalOp global, - bool forVTable = false) { + void finishAndSetAsInitializer(cir::GlobalOp global, bool forVTable = false) { assert(!this->Parent && "finishing non-root builder"); mlir::Attribute init = asImpl().finishImpl(global.getContext()); - auto initCSA = mlir::dyn_cast(init); + auto initCSA = mlir::dyn_cast(init); assert(initCSA && "expected #cir.const_struct attribute to represent vtable data"); return this->Builder.setGlobalInitializer( - global, forVTable ? mlir::cir::VTableAttr::get(initCSA.getType(), - initCSA.getMembers()) + global, forVTable ? cir::VTableAttr::get(initCSA.getType(), + initCSA.getMembers()) : init); } @@ -475,14 +470,14 @@ class ConstantStructBuilderTemplateBase using AggregateBuilderBase = typename Traits::AggregateBuilderBase; private: - mlir::cir::StructType StructTy; + cir::StructType StructTy; template friend class ConstantAggregateBuilderTemplateBase; protected: ConstantStructBuilderTemplateBase(InitBuilder &builder, AggregateBuilderBase *parent, - mlir::cir::StructType structTy) + cir::StructType structTy) : super(builder, parent), StructTy(structTy) { if (structTy) { llvm_unreachable("NYI"); @@ -495,7 +490,7 @@ class ConstantStructBuilderTemplateBase /// Use the given type for the struct if its element count is correct. /// Don't add more elements after calling this. - void suggestType(mlir::cir::StructType structTy) { + void suggestType(cir::StructType structTy) { if (this->size() == structTy.getNumElements()) { StructTy = structTy; } @@ -529,7 +524,7 @@ class ConstantInitBuilderTemplateBase : public ConstantInitBuilderBase { return ArrayBuilder(static_cast(*this), nullptr, eltTy); } - StructBuilder beginStruct(mlir::cir::StructType structTy = nullptr) { + StructBuilder beginStruct(cir::StructType structTy = nullptr) { return StructBuilder(static_cast(*this), nullptr, structTy); } }; @@ -580,7 +575,7 @@ class ConstantStructBuilder ConstantStructBuilder(ConstantInitBuilder &builder, ConstantAggregateBuilderBase *parent, - mlir::cir::StructType structTy) + cir::StructType structTy) : ConstantStructBuilderTemplateBase(builder, parent, structTy) {} }; diff --git a/clang/lib/CIR/CodeGen/ConstantInitFuture.h b/clang/lib/CIR/CodeGen/ConstantInitFuture.h index 3801c0dec199..1bb89fbbb15a 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitFuture.h +++ b/clang/lib/CIR/CodeGen/ConstantInitFuture.h @@ -70,7 +70,7 @@ class ConstantInitFuture { /// Install the initializer into a global variable. This cannot /// be called multiple times. - void installInGlobal(mlir::cir::GlobalOp global); + void installInGlobal(cir::GlobalOp global); void *getOpaqueValue() const { return Data.getOpaqueValue(); } static ConstantInitFuture getFromOpaqueValue(void *value) { diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h index 550af12cd7fa..e235c9ec3685 100644 --- a/clang/lib/CIR/CodeGen/EHScopeStack.h +++ b/clang/lib/CIR/CodeGen/EHScopeStack.h @@ -67,7 +67,7 @@ template struct DominatingValue : InvariantValue {}; template ::value || std::is_base_of::value) && - !std::is_base_of::value && + !std::is_base_of::value && !std::is_base_of::value> struct DominatingPointer; template struct DominatingPointer : InvariantValue {}; diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 7ca3baf67d65..855a0208d8d4 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -156,7 +156,7 @@ class AArch64TargetCIRGenInfo : public TargetCIRGenInfo { namespace { /// The AVX ABI leel for X86 targets. -using X86AVXABILevel = ::cir::X86AVXABILevel; +using X86AVXABILevel = cir::X86AVXABILevel; class X86_64ABIInfo : public ABIInfo { using Class = cir::X86ArgClass; @@ -247,7 +247,7 @@ class SPIRVABIInfo : public CommonSPIRABIInfo { void computeInfo(CIRGenFunctionInfo &FI) const override { // The logic is same as in DefaultABIInfo with an exception on the kernel // arguments handling. - mlir::cir::CallingConv CC = FI.getCallingConvention(); + cir::CallingConv CC = FI.getCallingConvention(); bool cxxabiHit = getCXXABI().classifyReturnType(FI); assert(!cxxabiHit && "C++ ABI not considered"); @@ -255,7 +255,7 @@ class SPIRVABIInfo : public CommonSPIRABIInfo { FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) { - if (CC == mlir::cir::CallingConv::SpirKernel) { + if (CC == cir::CallingConv::SpirKernel) { I.info = classifyKernelArgumentType(I.type); } else { I.info = classifyArgumentType(I.type); @@ -286,14 +286,14 @@ class CommonSPIRTargetCIRGenInfo : public TargetCIRGenInfo { CommonSPIRTargetCIRGenInfo(std::unique_ptr ABIInfo) : TargetCIRGenInfo(std::move(ABIInfo)) {} - mlir::cir::AddressSpaceAttr getCIRAllocaAddressSpace() const override { - return mlir::cir::AddressSpaceAttr::get( + cir::AddressSpaceAttr getCIRAllocaAddressSpace() const override { + return cir::AddressSpaceAttr::get( &getABIInfo().CGT.getMLIRContext(), - mlir::cir::AddressSpaceAttr::Kind::offload_private); + cir::AddressSpaceAttr::Kind::offload_private); } - mlir::cir::CallingConv getOpenCLKernelCallingConv() const override { - return mlir::cir::CallingConv::SpirKernel; + cir::CallingConv getOpenCLKernelCallingConv() const override { + return cir::CallingConv::SpirKernel; } }; @@ -401,7 +401,7 @@ cir::ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, // If we have a sign or zero extended integer, make sure to return Extend so // that the parameter gets the right LLVM IR attributes. - if (Hi == Class::NoClass && mlir::isa(ResType)) { + if (Hi == Class::NoClass && mlir::isa(ResType)) { assert(!Ty->getAs() && "NYI"); if (Ty->isSignedIntegerOrEnumerationType() && isPromotableIntegerTypeForABI(Ty)) @@ -532,7 +532,7 @@ cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { // If we have a sign or zero extended integer, make sure to return Extend so // that the parameter gets the right LLVM IR attributes. // TODO: extend the above consideration to MLIR - if (Hi == Class::NoClass && mlir::isa(ResType)) { + if (Hi == Class::NoClass && mlir::isa(ResType)) { // Treat an enum type as its underlying type. if (const auto *EnumTy = RetTy->getAs()) RetTy = EnumTy->getDecl()->getIntegerType(); @@ -572,12 +572,11 @@ TargetCIRGenInfo::getGlobalVarAddressSpace(CIRGenModule &CGM, } mlir::Value TargetCIRGenInfo::performAddrSpaceCast( - CIRGenFunction &CGF, mlir::Value Src, mlir::cir::AddressSpaceAttr SrcAddr, - mlir::cir::AddressSpaceAttr DestAddr, mlir::Type DestTy, - bool IsNonNull) const { + CIRGenFunction &CGF, mlir::Value Src, cir::AddressSpaceAttr SrcAddr, + cir::AddressSpaceAttr DestAddr, mlir::Type DestTy, bool IsNonNull) const { // Since target may map different address spaces in AST to the same address // space, an address space conversion may end up as a bitcast. - if (auto globalOp = Src.getDefiningOp()) + if (auto globalOp = Src.getDefiningOp()) llvm_unreachable("Global ops addrspace cast NYI"); // Try to preserve the source's name to make IR more readable. return CGF.getBuilder().createAddrSpaceCast(Src, DestTy); diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index 7eb07c093833..98e660eec748 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -80,7 +80,7 @@ class TargetCIRGenInfo { const clang::VarDecl *D) const; /// Get the CIR address space for alloca. - virtual mlir::cir::AddressSpaceAttr getCIRAllocaAddressSpace() const { + virtual cir::AddressSpaceAttr getCIRAllocaAddressSpace() const { // Return the null attribute, which means the target does not care about the // alloca address space. return {}; @@ -93,13 +93,13 @@ class TargetCIRGenInfo { /// \param DestTy is the destination pointer type. /// \param IsNonNull is the flag indicating \p V is known to be non null. virtual mlir::Value performAddrSpaceCast(CIRGenFunction &CGF, mlir::Value V, - mlir::cir::AddressSpaceAttr SrcAddr, - mlir::cir::AddressSpaceAttr DestAddr, + cir::AddressSpaceAttr SrcAddr, + cir::AddressSpaceAttr DestAddr, mlir::Type DestTy, bool IsNonNull = false) const; /// Get CIR calling convention for OpenCL kernel. - virtual mlir::cir::CallingConv getOpenCLKernelCallingConv() const { + virtual cir::CallingConv getOpenCLKernelCallingConv() const { // OpenCL kernels are called via an explicit runtime API with arguments // set with clSetKernelArg(), not as normal sub-functions. // Return SPIR_KERNEL by default as the kernel calling convention to @@ -110,7 +110,7 @@ class TargetCIRGenInfo { // clSetKernelArg() might break depending on the target-specific // conventions; different targets might split structs passed as values // to multiple function arguments etc. - return mlir::cir::CallingConv::SpirKernel; + return cir::CallingConv::SpirKernel; } virtual ~TargetCIRGenInfo() {} diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index d383fc4a0a90..08d8d601b1ad 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -43,7 +43,7 @@ static void printFloatLiteral(mlir::AsmPrinter &p, llvm::APFloat value, static mlir::ParseResult parseFloatLiteral(mlir::AsmParser &parser, mlir::FailureOr &value, - mlir::cir::CIRFPTypeInterface fpType); + cir::CIRFPTypeInterface fpType); static mlir::ParseResult parseConstPtr(mlir::AsmParser &parser, mlir::IntegerAttr &value); @@ -54,13 +54,12 @@ static void printConstPtr(mlir::AsmPrinter &p, mlir::IntegerAttr value); #include "clang/CIR/Dialect/IR/CIROpsAttributes.cpp.inc" using namespace mlir; -using namespace mlir::cir; +using namespace cir; //===----------------------------------------------------------------------===// // CIR AST Attr helpers //===----------------------------------------------------------------------===// -namespace mlir { namespace cir { mlir::Attribute makeFuncDeclAttr(const clang::Decl *decl, @@ -88,7 +87,6 @@ mlir::Attribute makeFuncDeclAttr(const clang::Decl *decl, } } // namespace cir -} // namespace mlir //===----------------------------------------------------------------------===// // General CIR parsing / printing @@ -97,7 +95,7 @@ mlir::Attribute makeFuncDeclAttr(const clang::Decl *decl, Attribute CIRDialect::parseAttribute(DialectAsmParser &parser, Type type) const { llvm::SMLoc typeLoc = parser.getCurrentLocation(); - StringRef mnemonic; + llvm::StringRef mnemonic; Attribute genAttr; OptionalParseResult parseResult = generatedAttributeParser(parser, &mnemonic, type, genAttr); @@ -121,7 +119,7 @@ static void printStructMembers(mlir::AsmPrinter &printer, static ParseResult parseStructMembers(mlir::AsmParser &parser, mlir::ArrayAttr &members) { - SmallVector elts; + llvm::SmallVector elts; auto delimiter = AsmParser::Delimiter::Braces; auto result = parser.parseCommaSeparatedList(delimiter, [&]() { @@ -142,7 +140,7 @@ static ParseResult parseStructMembers(mlir::AsmParser &parser, LogicalResult ConstStructAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, mlir::Type type, ArrayAttr members) { - auto sTy = mlir::dyn_cast_if_present(type); + auto sTy = mlir::dyn_cast_if_present(type); if (!sTy) { emitError() << "expected !cir.struct type"; return failure(); @@ -352,8 +350,8 @@ LogicalResult FPAttr::verify(function_ref emitError, //===----------------------------------------------------------------------===// LogicalResult ComplexAttr::verify(function_ref emitError, - mlir::cir::ComplexType type, - mlir::TypedAttr real, mlir::TypedAttr imag) { + cir::ComplexType type, mlir::TypedAttr real, + mlir::TypedAttr imag) { auto elemTy = type.getElementTy(); if (real.getType() != elemTy) { emitError() << "type of the real part does not match the complex type"; @@ -425,7 +423,7 @@ CmpThreeWayInfoAttr::verify(function_ref emitError, LogicalResult DataMemberAttr::verify(function_ref emitError, - mlir::cir::DataMemberType ty, + cir::DataMemberType ty, std::optional memberIndex) { if (!memberIndex.has_value()) { // DataMemberAttr without a given index represents a null value. @@ -462,7 +460,7 @@ DataMemberAttr::verify(function_ref emitError, LogicalResult MethodAttr::verify(function_ref<::mlir::InFlightDiagnostic()> emitError, - mlir::cir::MethodType type, + cir::MethodType type, std::optional symbol, std::optional vtable_offset) { if (symbol.has_value() && vtable_offset.has_value()) { @@ -475,7 +473,7 @@ MethodAttr::verify(function_ref<::mlir::InFlightDiagnostic()> emitError, } Attribute MethodAttr::parse(AsmParser &parser, Type odsType) { - auto ty = mlir::cast(odsType); + auto ty = mlir::cast(odsType); if (parser.parseLess()) return {}; @@ -558,7 +556,7 @@ LogicalResult GlobalAnnotationValuesAttr::verify( "global op or func it annotates"; return failure(); } - auto annoPart = ::mlir::dyn_cast(annoEntry[1]); + auto annoPart = ::mlir::dyn_cast(annoEntry[1]); if (!annoPart) { emitError() << "The second element of GlobalAnnotationValuesAttr" "annotations array element must be of " @@ -586,18 +584,17 @@ std::string DynamicCastInfoAttr::getAlias() const { } LogicalResult DynamicCastInfoAttr::verify( - function_ref emitError, - mlir::cir::GlobalViewAttr srcRtti, mlir::cir::GlobalViewAttr destRtti, - mlir::FlatSymbolRefAttr runtimeFunc, mlir::FlatSymbolRefAttr badCastFunc, - mlir::cir::IntAttr offsetHint) { + function_ref emitError, cir::GlobalViewAttr srcRtti, + cir::GlobalViewAttr destRtti, mlir::FlatSymbolRefAttr runtimeFunc, + mlir::FlatSymbolRefAttr badCastFunc, cir::IntAttr offsetHint) { auto isRttiPtr = [](mlir::Type ty) { // RTTI pointers are !cir.ptr. - auto ptrTy = mlir::dyn_cast(ty); + auto ptrTy = mlir::dyn_cast(ty); if (!ptrTy) return false; - auto pointeeIntTy = mlir::dyn_cast(ptrTy.getPointee()); + auto pointeeIntTy = mlir::dyn_cast(ptrTy.getPointee()); if (!pointeeIntTy) return false; diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp index 86584ac67e18..d1b17ad6cf39 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp @@ -9,7 +9,7 @@ using namespace cir; // Support for StructLayout //===----------------------------------------------------------------------===// -StructLayout::StructLayout(mlir::cir::StructType ST, const CIRDataLayout &DL) +StructLayout::StructLayout(cir::StructType ST, const CIRDataLayout &DL) : StructSize(llvm::TypeSize::getFixed(0)) { assert(!ST.isIncomplete() && "Cannot get layout of opaque structs"); IsPadded = false; @@ -18,10 +18,10 @@ StructLayout::StructLayout(mlir::cir::StructType ST, const CIRDataLayout &DL) // Loop over each of the elements, placing them in memory. for (unsigned i = 0, e = NumElements; i != e; ++i) { mlir::Type Ty = ST.getMembers()[i]; - if (i == 0 && ::cir::MissingFeatures::typeIsScalableType()) + if (i == 0 && cir::MissingFeatures::typeIsScalableType()) llvm_unreachable("Scalable types are not yet supported in CIR"); - assert(!::cir::MissingFeatures::recordDeclIsPacked() && + assert(!cir::MissingFeatures::recordDeclIsPacked() && "Cannot identify packed structs"); const llvm::Align TyAlign = ST.getPacked() ? llvm::Align(1) : DL.getABITypeAlign(Ty); @@ -92,7 +92,7 @@ unsigned StructLayout::getElementContainingOffset(uint64_t FixedOffset) const { namespace { class StructLayoutMap { - using LayoutInfoTy = llvm::DenseMap; + using LayoutInfoTy = llvm::DenseMap; LayoutInfoTy LayoutInfo; public: @@ -105,9 +105,7 @@ class StructLayoutMap { } } - StructLayout *&operator[](mlir::cir::StructType STy) { - return LayoutInfo[STy]; - } + StructLayout *&operator[](cir::StructType STy) { return LayoutInfo[STy]; } }; } // namespace @@ -144,8 +142,7 @@ void CIRDataLayout::clear() { LayoutMap = nullptr; } -const StructLayout * -CIRDataLayout::getStructLayout(mlir::cir::StructType Ty) const { +const StructLayout *CIRDataLayout::getStructLayout(cir::StructType Ty) const { if (!LayoutMap) LayoutMap = new StructLayoutMap(); @@ -178,18 +175,18 @@ CIRDataLayout::getStructLayout(mlir::cir::StructType Ty) const { */ llvm::Align CIRDataLayout::getAlignment(mlir::Type Ty, bool abiOrPref) const { - if (llvm::isa(Ty)) { + if (llvm::isa(Ty)) { // Packed structure types always have an ABI alignment of one. - if (::cir::MissingFeatures::recordDeclIsPacked() && abiOrPref) + if (cir::MissingFeatures::recordDeclIsPacked() && abiOrPref) llvm_unreachable("NYI"); - auto stTy = llvm::dyn_cast(Ty); + auto stTy = llvm::dyn_cast(Ty); if (stTy && stTy.getPacked() && abiOrPref) return llvm::Align(1); // Get the layout annotation... which is lazily created on demand. const StructLayout *Layout = - getStructLayout(llvm::cast(Ty)); + getStructLayout(llvm::cast(Ty)); const llvm::Align Align = abiOrPref ? StructAlignment.ABIAlign : StructAlignment.PrefAlign; return std::max(Align, Layout->getAlignment()); @@ -197,7 +194,7 @@ llvm::Align CIRDataLayout::getAlignment(mlir::Type Ty, bool abiOrPref) const { // FIXME(cir): This does not account for differnt address spaces, and relies // on CIR's data layout to give the proper alignment. - assert(!::cir::MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); // Fetch type alignment from MLIR's data layout. unsigned align = abiOrPref ? layout.getTypeABIAlignment(Ty) @@ -208,10 +205,10 @@ llvm::Align CIRDataLayout::getAlignment(mlir::Type Ty, bool abiOrPref) const { // The implementation of this method is provided inline as it is particularly // well suited to constant folding when called on a specific Type subclass. llvm::TypeSize CIRDataLayout::getTypeSizeInBits(mlir::Type Ty) const { - assert(!::cir::MissingFeatures::typeIsSized() && + assert(!cir::MissingFeatures::typeIsSized() && "Cannot getTypeInfo() on a type that is unsized!"); - if (auto structTy = llvm::dyn_cast(Ty)) { + if (auto structTy = llvm::dyn_cast(Ty)) { // FIXME(cir): CIR struct's data layout implementation doesn't do a good job // of handling unions particularities. We should have a separate union type. @@ -229,7 +226,7 @@ llvm::TypeSize CIRDataLayout::getTypeSizeInBits(mlir::Type Ty) const { // FIXME(cir): This does not account for different address spaces, and relies // on CIR's data layout to give the proper ABI-specific type width. - assert(!::cir::MissingFeatures::addressSpace()); + assert(!cir::MissingFeatures::addressSpace()); return llvm::TypeSize::getFixed(layout.getTypeSizeInBits(Ty)); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 0dd68f57e7dd..5724b7213e5a 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -59,7 +59,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { using OpAsmDialectInterface::OpAsmDialectInterface; AliasResult getAlias(Type type, raw_ostream &os) const final { - if (auto structType = dyn_cast(type)) { + if (auto structType = dyn_cast(type)) { StringAttr nameAttr = structType.getName(); if (!nameAttr) os << "ty_anon_" << structType.getKindAsStr(); @@ -67,7 +67,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << "ty_" << nameAttr.getValue(); return AliasResult::OverridableAlias; } - if (auto intType = dyn_cast(type)) { + if (auto intType = dyn_cast(type)) { // We only provide alias for standard integer types (i.e. integer types // whose width is divisible by 8). if (intType.getWidth() % 8 != 0) @@ -75,7 +75,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << intType.getAlias(); return AliasResult::OverridableAlias; } - if (auto voidType = dyn_cast(type)) { + if (auto voidType = dyn_cast(type)) { os << voidType.getAlias(); return AliasResult::OverridableAlias; } @@ -84,26 +84,25 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { } AliasResult getAlias(Attribute attr, raw_ostream &os) const final { - if (auto boolAttr = mlir::dyn_cast(attr)) { + if (auto boolAttr = mlir::dyn_cast(attr)) { os << (boolAttr.getValue() ? "true" : "false"); return AliasResult::FinalAlias; } - if (auto bitfield = mlir::dyn_cast(attr)) { + if (auto bitfield = mlir::dyn_cast(attr)) { os << "bfi_" << bitfield.getName().str(); return AliasResult::FinalAlias; } if (auto extraFuncAttr = - mlir::dyn_cast(attr)) { + mlir::dyn_cast(attr)) { os << "fn_attr"; return AliasResult::FinalAlias; } if (auto cmpThreeWayInfoAttr = - mlir::dyn_cast(attr)) { + mlir::dyn_cast(attr)) { os << cmpThreeWayInfoAttr.getAlias(); return AliasResult::FinalAlias; } - if (auto dynCastInfoAttr = - mlir::dyn_cast(attr)) { + if (auto dynCastInfoAttr = mlir::dyn_cast(attr)) { os << dynCastInfoAttr.getAlias(); return AliasResult::FinalAlias; } @@ -115,7 +114,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { /// Dialect initialization, the instance will be owned by the context. This is /// the point of registration of types and operations for the dialect. -void mlir::cir::CIRDialect::initialize() { +void cir::CIRDialect::initialize() { registerTypes(); registerAttributes(); addOperations< @@ -125,12 +124,12 @@ void mlir::cir::CIRDialect::initialize() { addInterfaces(); } -Operation *mlir::cir::CIRDialect::materializeConstant(mlir::OpBuilder &builder, - mlir::Attribute value, - mlir::Type type, - mlir::Location loc) { - return builder.create( - loc, type, mlir::cast(value)); +Operation *cir::CIRDialect::materializeConstant(mlir::OpBuilder &builder, + mlir::Attribute value, + mlir::Type type, + mlir::Location loc) { + return builder.create(loc, type, + mlir::cast(value)); } //===----------------------------------------------------------------------===// @@ -141,7 +140,7 @@ Operation *mlir::cir::CIRDialect::materializeConstant(mlir::OpBuilder &builder, // position of the parsed keyword in the list. If none of the keywords from the // list is parsed, returns -1. static int parseOptionalKeywordAlternative(AsmParser &parser, - ArrayRef keywords) { + ArrayRef keywords) { for (auto en : llvm::enumerate(keywords)) { if (succeeded(parser.parseOptionalKeyword(en.value()))) return en.index(); @@ -153,17 +152,15 @@ namespace { template struct EnumTraits {}; #define REGISTER_ENUM_TYPE(Ty) \ - template <> struct EnumTraits { \ - static StringRef stringify(mlir::cir::Ty value) { \ + template <> struct EnumTraits { \ + static llvm::StringRef stringify(cir::Ty value) { \ return stringify##Ty(value); \ } \ - static unsigned getMaxEnumVal() { \ - return mlir::cir::getMaxEnumValFor##Ty(); \ - } \ + static unsigned getMaxEnumVal() { return cir::getMaxEnumValFor##Ty(); } \ } #define REGISTER_ENUM_TYPE_WITH_NS(NS, Ty) \ template <> struct EnumTraits { \ - static StringRef stringify(NS::Ty value) { \ + static llvm::StringRef stringify(NS::Ty value) { \ return NS::stringify##Ty(value); \ } \ static unsigned getMaxEnumVal() { return NS::getMaxEnumValFor##Ty(); } \ @@ -171,7 +168,7 @@ template struct EnumTraits {}; REGISTER_ENUM_TYPE(GlobalLinkageKind); REGISTER_ENUM_TYPE(CallingConv); -REGISTER_ENUM_TYPE_WITH_NS(mlir::cir::sob, SignedOverflowBehavior); +REGISTER_ENUM_TYPE_WITH_NS(cir::sob, SignedOverflowBehavior); } // namespace /// Parse an enum from the keyword, or default to the provided default value. @@ -180,7 +177,7 @@ REGISTER_ENUM_TYPE_WITH_NS(mlir::cir::sob, SignedOverflowBehavior); /// TODO: teach other places in this file to use this function. template static RetTy parseOptionalCIRKeyword(AsmParser &parser, EnumTy defaultValue) { - SmallVector names; + llvm::SmallVector names; for (unsigned i = 0, e = EnumTraits::getMaxEnumVal(); i <= e; ++i) names.push_back(EnumTraits::stringify(static_cast(i))); @@ -193,7 +190,7 @@ static RetTy parseOptionalCIRKeyword(AsmParser &parser, EnumTy defaultValue) { /// Parse an enum from the keyword, return failure if the keyword is not found. template static ParseResult parseCIRKeyword(AsmParser &parser, RetTy &result) { - SmallVector names; + llvm::SmallVector names; for (unsigned i = 0, e = EnumTraits::getMaxEnumVal(); i <= e; ++i) names.push_back(EnumTraits::stringify(static_cast(i))); @@ -224,7 +221,7 @@ LogicalResult ensureRegionTerm(OpAsmParser &parser, Region ®ion, return parser.emitError(errLoc, "empty region must not omit terminator"); // Terminator was omited correctly: recreate it. - region.back().push_back(builder.create(eLoc)); + region.back().push_back(builder.create(eLoc)); return success(); } @@ -232,19 +229,19 @@ LogicalResult ensureRegionTerm(OpAsmParser &parser, Region ®ion, bool omitRegionTerm(mlir::Region &r) { const auto singleNonEmptyBlock = r.hasOneBlock() && !r.back().empty(); const auto yieldsNothing = [&r]() { - auto y = dyn_cast(r.back().getTerminator()); + auto y = dyn_cast(r.back().getTerminator()); return y && y.getArgs().empty(); }; return singleNonEmptyBlock && yieldsNothing(); } void printVisibilityAttr(OpAsmPrinter &printer, - mlir::cir::VisibilityAttr &visibility) { + cir::VisibilityAttr &visibility) { switch (visibility.getValue()) { - case mlir::cir::VisibilityKind::Hidden: + case cir::VisibilityKind::Hidden: printer << "hidden"; break; - case mlir::cir::VisibilityKind::Protected: + case cir::VisibilityKind::Protected: printer << "protected"; break; default: @@ -252,20 +249,18 @@ void printVisibilityAttr(OpAsmPrinter &printer, } } -void parseVisibilityAttr(OpAsmParser &parser, - mlir::cir::VisibilityAttr &visibility) { - mlir::cir::VisibilityKind visibilityKind; +void parseVisibilityAttr(OpAsmParser &parser, cir::VisibilityAttr &visibility) { + cir::VisibilityKind visibilityKind; if (parser.parseOptionalKeyword("hidden").succeeded()) { - visibilityKind = mlir::cir::VisibilityKind::Hidden; + visibilityKind = cir::VisibilityKind::Hidden; } else if (parser.parseOptionalKeyword("protected").succeeded()) { - visibilityKind = mlir::cir::VisibilityKind::Protected; + visibilityKind = cir::VisibilityKind::Protected; } else { - visibilityKind = mlir::cir::VisibilityKind::Default; + visibilityKind = cir::VisibilityKind::Default; } - visibility = - mlir::cir::VisibilityAttr::get(parser.getContext(), visibilityKind); + visibility = cir::VisibilityAttr::get(parser.getContext(), visibilityKind); } //===----------------------------------------------------------------------===// @@ -283,7 +278,7 @@ static mlir::ParseResult parseOmittedTerminatorRegion(mlir::OpAsmParser &parser, } static void printOmittedTerminatorRegion(mlir::OpAsmPrinter &printer, - mlir::cir::ScopeOp &op, + cir::ScopeOp &op, mlir::Region ®ion) { printer.printRegion(region, /*printEntryBlockArgs=*/false, @@ -292,14 +287,14 @@ static void printOmittedTerminatorRegion(mlir::OpAsmPrinter &printer, static mlir::ParseResult parseOmitDefaultVisibility(mlir::OpAsmParser &parser, - mlir::cir::VisibilityAttr &visibility) { + cir::VisibilityAttr &visibility) { parseVisibilityAttr(parser, visibility); return success(); } static void printOmitDefaultVisibility(mlir::OpAsmPrinter &printer, - mlir::cir::GlobalOp &op, - mlir::cir::VisibilityAttr visibility) { + cir::GlobalOp &op, + cir::VisibilityAttr visibility) { printVisibilityAttr(printer, visibility); } @@ -307,11 +302,10 @@ static void printOmitDefaultVisibility(mlir::OpAsmPrinter &printer, // AllocaOp //===----------------------------------------------------------------------===// -void mlir::cir::AllocaOp::build(::mlir::OpBuilder &odsBuilder, - ::mlir::OperationState &odsState, - ::mlir::Type addr, ::mlir::Type allocaType, - ::llvm::StringRef name, - ::mlir::IntegerAttr alignment) { +void cir::AllocaOp::build(::mlir::OpBuilder &odsBuilder, + ::mlir::OperationState &odsState, ::mlir::Type addr, + ::mlir::Type allocaType, ::llvm::StringRef name, + ::mlir::IntegerAttr alignment) { odsState.addAttribute(getAllocaTypeAttrName(odsState.name), ::mlir::TypeAttr::get(allocaType)); odsState.addAttribute(getNameAttrName(odsState.name), @@ -326,7 +320,7 @@ void mlir::cir::AllocaOp::build(::mlir::OpBuilder &odsBuilder, // BreakOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::BreakOp::verify() { +LogicalResult cir::BreakOp::verify() { if (!getOperation()->getParentOfType() && !getOperation()->getParentOfType()) return emitOpError("must be within a loop or switch"); @@ -340,7 +334,7 @@ LogicalResult mlir::cir::BreakOp::verify() { //===---------------------------------- // BranchOpTerminatorInterface Methods -void mlir::cir::ConditionOp::getSuccessorRegions( +void cir::ConditionOp::getSuccessorRegions( ArrayRef operands, SmallVectorImpl ®ions) { // TODO(cir): The condition value may be folded to a constant, narrowing // down its list of possible successors. @@ -358,12 +352,12 @@ void mlir::cir::ConditionOp::getSuccessorRegions( } MutableOperandRange -mlir::cir::ConditionOp::getMutableSuccessorOperands(RegionBranchPoint point) { +cir::ConditionOp::getMutableSuccessorOperands(RegionBranchPoint point) { // No values are yielded to the successor region. return MutableOperandRange(getOperation(), 0, 0); } -LogicalResult mlir::cir::ConditionOp::verify() { +LogicalResult cir::ConditionOp::verify() { if (!isa(getOperation()->getParentOp())) return emitOpError("condition must be within a conditional region"); return success(); @@ -375,40 +369,39 @@ LogicalResult mlir::cir::ConditionOp::verify() { static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, mlir::Attribute attrType) { - if (isa(attrType)) { - if (::mlir::isa<::mlir::cir::PointerType>(opType)) + if (isa(attrType)) { + if (::mlir::isa(opType)) return success(); return op->emitOpError("nullptr expects pointer type"); } - if (isa(attrType)) { + if (isa(attrType)) { // More detailed type verifications are already done in // DataMemberAttr::verify. Don't need to repeat here. return success(); } - if (isa(attrType)) { - if (::mlir::isa<::mlir::cir::StructType, ::mlir::cir::ArrayType, - ::mlir::cir::ComplexType, ::mlir::cir::VectorType>(opType)) + if (isa(attrType)) { + if (::mlir::isa(opType)) return success(); return op->emitOpError("zero expects struct or array type"); } - if (isa(attrType)) { - if (!::mlir::isa<::mlir::cir::VoidType>(opType)) + if (isa(attrType)) { + if (!::mlir::isa(opType)) return success(); return op->emitOpError("undef expects non-void type"); } - if (mlir::isa(attrType)) { - if (!mlir::isa(opType)) + if (mlir::isa(attrType)) { + if (!mlir::isa(opType)) return op->emitOpError("result type (") << opType << ") must be '!cir.bool' for '" << attrType << "'"; return success(); } - if (mlir::isa( - attrType)) { + if (mlir::isa(attrType)) { auto at = cast(attrType); if (at.getType() != opType) { return op->emitOpError("result type (") @@ -419,19 +412,19 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, } if (isa(attrType)) { - if (::mlir::isa<::mlir::cir::PointerType>(opType)) + if (::mlir::isa(opType)) return success(); return op->emitOpError("symbolref expects pointer type"); } - if (mlir::isa(attrType) || - mlir::isa(attrType) || - mlir::isa(attrType) || - mlir::isa(attrType) || - mlir::isa(attrType) || - mlir::isa(attrType)) + if (mlir::isa(attrType) || + mlir::isa(attrType) || + mlir::isa(attrType) || + mlir::isa(attrType) || + mlir::isa(attrType) || + mlir::isa(attrType)) return success(); - if (mlir::isa(attrType)) + if (mlir::isa(attrType)) return success(); assert(isa(attrType) && "What else could we be looking at here?"); @@ -439,14 +432,14 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, << cast(attrType).getType() << " not supported"; } -LogicalResult mlir::cir::ConstantOp::verify() { +LogicalResult cir::ConstantOp::verify() { // ODS already generates checks to make sure the result type is valid. We just // need to additionally check that the value's attribute type is consistent // with the result type. return checkConstantTypes(getOperation(), getType(), getValue()); } -OpFoldResult mlir::cir::ConstantOp::fold(FoldAdaptor /*adaptor*/) { +OpFoldResult cir::ConstantOp::fold(FoldAdaptor /*adaptor*/) { return getValue(); } @@ -454,7 +447,7 @@ OpFoldResult mlir::cir::ConstantOp::fold(FoldAdaptor /*adaptor*/) { // ContinueOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::ContinueOp::verify() { +LogicalResult cir::ContinueOp::verify() { if (!this->getOperation()->getParentOfType()) return emitOpError("must be within a loop"); return success(); @@ -464,43 +457,43 @@ LogicalResult mlir::cir::ContinueOp::verify() { // CastOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::CastOp::verify() { +LogicalResult cir::CastOp::verify() { auto resType = getResult().getType(); auto srcType = getSrc().getType(); - if (mlir::isa(srcType) && - mlir::isa(resType)) { + if (mlir::isa(srcType) && + mlir::isa(resType)) { // Use the element type of the vector to verify the cast kind. (Except for // bitcast, see below.) - srcType = mlir::dyn_cast(srcType).getEltType(); - resType = mlir::dyn_cast(resType).getEltType(); + srcType = mlir::dyn_cast(srcType).getEltType(); + resType = mlir::dyn_cast(resType).getEltType(); } switch (getKind()) { - case mlir::cir::CastKind::int_to_bool: { - if (!mlir::isa(resType)) + case cir::CastKind::int_to_bool: { + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.bool type for result"; - if (!mlir::isa(srcType)) + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.int type for source"; return success(); } - case mlir::cir::CastKind::ptr_to_bool: { - if (!mlir::isa(resType)) + case cir::CastKind::ptr_to_bool: { + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.bool type for result"; - if (!mlir::isa(srcType)) + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.ptr type for source"; return success(); } - case mlir::cir::CastKind::integral: { - if (!mlir::isa(resType)) + case cir::CastKind::integral: { + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.int type for result"; - if (!mlir::isa(srcType)) + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.int type for source"; return success(); } - case mlir::cir::CastKind::array_to_ptrdecay: { - auto arrayPtrTy = mlir::dyn_cast(srcType); - auto flatPtrTy = mlir::dyn_cast(resType); + case cir::CastKind::array_to_ptrdecay: { + auto arrayPtrTy = mlir::dyn_cast(srcType); + auto flatPtrTy = mlir::dyn_cast(resType); if (!arrayPtrTy || !flatPtrTy) return emitOpError() << "requires !cir.ptr type for source and result"; @@ -509,8 +502,7 @@ LogicalResult mlir::cir::CastOp::verify() { << "requires same address space for source and result"; } - auto arrayTy = - mlir::dyn_cast(arrayPtrTy.getPointee()); + auto arrayTy = mlir::dyn_cast(arrayPtrTy.getPointee()); if (!arrayTy) return emitOpError() << "requires !cir.array pointee"; @@ -519,14 +511,14 @@ LogicalResult mlir::cir::CastOp::verify() { << "requires same type for array element and pointee result"; return success(); } - case mlir::cir::CastKind::bitcast: { + case cir::CastKind::bitcast: { // Allow bitcast of structs for calling conventions. if (isa(srcType) || isa(resType)) return success(); // Handle the pointer types first. - auto srcPtrTy = mlir::dyn_cast(srcType); - auto resPtrTy = mlir::dyn_cast(resType); + auto srcPtrTy = mlir::dyn_cast(srcType); + auto resPtrTy = mlir::dyn_cast(resType); if (srcPtrTy && resPtrTy) { if (srcPtrTy.getAddrSpace() != resPtrTy.getAddrSpace()) { @@ -538,180 +530,175 @@ LogicalResult mlir::cir::CastOp::verify() { // This is the only cast kind where we don't want vector types to decay // into the element type. - if ((!mlir::isa(getSrc().getType()) || - !mlir::isa(getResult().getType()))) + if ((!mlir::isa(getSrc().getType()) || + !mlir::isa(getResult().getType()))) return emitOpError() << "requires !cir.ptr or !cir.vector type for source and result"; return success(); } - case mlir::cir::CastKind::floating: { - if (!mlir::isa(srcType) || - !mlir::isa(resType)) + case cir::CastKind::floating: { + if (!mlir::isa(srcType) || + !mlir::isa(resType)) return emitOpError() << "requires !cir.float type for source and result"; return success(); } - case mlir::cir::CastKind::float_to_int: { - if (!mlir::isa(srcType)) + case cir::CastKind::float_to_int: { + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.float type for source"; - if (!mlir::dyn_cast(resType)) + if (!mlir::dyn_cast(resType)) return emitOpError() << "requires !cir.int type for result"; return success(); } - case mlir::cir::CastKind::int_to_ptr: { - if (!mlir::dyn_cast(srcType)) + case cir::CastKind::int_to_ptr: { + if (!mlir::dyn_cast(srcType)) return emitOpError() << "requires !cir.int type for source"; - if (!mlir::dyn_cast(resType)) + if (!mlir::dyn_cast(resType)) return emitOpError() << "requires !cir.ptr type for result"; return success(); } - case mlir::cir::CastKind::ptr_to_int: { - if (!mlir::dyn_cast(srcType)) + case cir::CastKind::ptr_to_int: { + if (!mlir::dyn_cast(srcType)) return emitOpError() << "requires !cir.ptr type for source"; - if (!mlir::dyn_cast(resType)) + if (!mlir::dyn_cast(resType)) return emitOpError() << "requires !cir.int type for result"; return success(); } - case mlir::cir::CastKind::float_to_bool: { - if (!mlir::isa(srcType)) + case cir::CastKind::float_to_bool: { + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.float type for source"; - if (!mlir::isa(resType)) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.bool type for result"; return success(); } - case mlir::cir::CastKind::bool_to_int: { - if (!mlir::isa(srcType)) + case cir::CastKind::bool_to_int: { + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.bool type for source"; - if (!mlir::isa(resType)) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.int type for result"; return success(); } - case mlir::cir::CastKind::int_to_float: { - if (!mlir::isa(srcType)) + case cir::CastKind::int_to_float: { + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.int type for source"; - if (!mlir::isa(resType)) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.float type for result"; return success(); } - case mlir::cir::CastKind::bool_to_float: { - if (!mlir::isa(srcType)) + case cir::CastKind::bool_to_float: { + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.bool type for source"; - if (!mlir::isa(resType)) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.float type for result"; return success(); } - case mlir::cir::CastKind::address_space: { - auto srcPtrTy = mlir::dyn_cast(srcType); - auto resPtrTy = mlir::dyn_cast(resType); + case cir::CastKind::address_space: { + auto srcPtrTy = mlir::dyn_cast(srcType); + auto resPtrTy = mlir::dyn_cast(resType); if (!srcPtrTy || !resPtrTy) return emitOpError() << "requires !cir.ptr type for source and result"; if (srcPtrTy.getPointee() != resPtrTy.getPointee()) return emitOpError() << "requires two types differ in addrspace only"; return success(); } - case mlir::cir::CastKind::float_to_complex: { - if (!mlir::isa(srcType)) + case cir::CastKind::float_to_complex: { + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.float type for source"; - auto resComplexTy = mlir::dyn_cast(resType); + auto resComplexTy = mlir::dyn_cast(resType); if (!resComplexTy) return emitOpError() << "requires !cir.complex type for result"; if (srcType != resComplexTy.getElementTy()) return emitOpError() << "requires source type match result element type"; return success(); } - case mlir::cir::CastKind::int_to_complex: { - if (!mlir::isa(srcType)) + case cir::CastKind::int_to_complex: { + if (!mlir::isa(srcType)) return emitOpError() << "requires !cir.int type for source"; - auto resComplexTy = mlir::dyn_cast(resType); + auto resComplexTy = mlir::dyn_cast(resType); if (!resComplexTy) return emitOpError() << "requires !cir.complex type for result"; if (srcType != resComplexTy.getElementTy()) return emitOpError() << "requires source type match result element type"; return success(); } - case mlir::cir::CastKind::float_complex_to_real: { - auto srcComplexTy = mlir::dyn_cast(srcType); + case cir::CastKind::float_complex_to_real: { + auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy) return emitOpError() << "requires !cir.complex type for source"; - if (!mlir::isa(resType)) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.float type for result"; if (srcComplexTy.getElementTy() != resType) return emitOpError() << "requires source element type match result type"; return success(); } - case mlir::cir::CastKind::int_complex_to_real: { - auto srcComplexTy = mlir::dyn_cast(srcType); + case cir::CastKind::int_complex_to_real: { + auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy) return emitOpError() << "requires !cir.complex type for source"; - if (!mlir::isa(resType)) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.int type for result"; if (srcComplexTy.getElementTy() != resType) return emitOpError() << "requires source element type match result type"; return success(); } - case mlir::cir::CastKind::float_complex_to_bool: { - auto srcComplexTy = mlir::dyn_cast(srcType); + case cir::CastKind::float_complex_to_bool: { + auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy || - !mlir::isa(srcComplexTy.getElementTy())) + !mlir::isa(srcComplexTy.getElementTy())) return emitOpError() << "requires !cir.complex type for source"; - if (!mlir::isa(resType)) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.bool type for result"; return success(); } - case mlir::cir::CastKind::int_complex_to_bool: { - auto srcComplexTy = mlir::dyn_cast(srcType); - if (!srcComplexTy || - !mlir::isa(srcComplexTy.getElementTy())) + case cir::CastKind::int_complex_to_bool: { + auto srcComplexTy = mlir::dyn_cast(srcType); + if (!srcComplexTy || !mlir::isa(srcComplexTy.getElementTy())) return emitOpError() << "requires !cir.complex type for source"; - if (!mlir::isa(resType)) + if (!mlir::isa(resType)) return emitOpError() << "requires !cir.bool type for result"; return success(); } - case mlir::cir::CastKind::float_complex: { - auto srcComplexTy = mlir::dyn_cast(srcType); + case cir::CastKind::float_complex: { + auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy || - !mlir::isa(srcComplexTy.getElementTy())) + !mlir::isa(srcComplexTy.getElementTy())) return emitOpError() << "requires !cir.complex type for source"; - auto resComplexTy = mlir::dyn_cast(resType); + auto resComplexTy = mlir::dyn_cast(resType); if (!resComplexTy || - !mlir::isa(resComplexTy.getElementTy())) + !mlir::isa(resComplexTy.getElementTy())) return emitOpError() << "requires !cir.complex type for result"; return success(); } - case mlir::cir::CastKind::float_complex_to_int_complex: { - auto srcComplexTy = mlir::dyn_cast(srcType); + case cir::CastKind::float_complex_to_int_complex: { + auto srcComplexTy = mlir::dyn_cast(srcType); if (!srcComplexTy || - !mlir::isa(srcComplexTy.getElementTy())) + !mlir::isa(srcComplexTy.getElementTy())) return emitOpError() << "requires !cir.complex type for source"; - auto resComplexTy = mlir::dyn_cast(resType); - if (!resComplexTy || - !mlir::isa(resComplexTy.getElementTy())) + auto resComplexTy = mlir::dyn_cast(resType); + if (!resComplexTy || !mlir::isa(resComplexTy.getElementTy())) return emitOpError() << "requires !cir.complex type for result"; return success(); } - case mlir::cir::CastKind::int_complex: { - auto srcComplexTy = mlir::dyn_cast(srcType); - if (!srcComplexTy || - !mlir::isa(srcComplexTy.getElementTy())) + case cir::CastKind::int_complex: { + auto srcComplexTy = mlir::dyn_cast(srcType); + if (!srcComplexTy || !mlir::isa(srcComplexTy.getElementTy())) return emitOpError() << "requires !cir.complex type for source"; - auto resComplexTy = mlir::dyn_cast(resType); - if (!resComplexTy || - !mlir::isa(resComplexTy.getElementTy())) + auto resComplexTy = mlir::dyn_cast(resType); + if (!resComplexTy || !mlir::isa(resComplexTy.getElementTy())) return emitOpError() << "requires !cir.complex type for result"; return success(); } - case mlir::cir::CastKind::int_complex_to_float_complex: { - auto srcComplexTy = mlir::dyn_cast(srcType); - if (!srcComplexTy || - !mlir::isa(srcComplexTy.getElementTy())) + case cir::CastKind::int_complex_to_float_complex: { + auto srcComplexTy = mlir::dyn_cast(srcType); + if (!srcComplexTy || !mlir::isa(srcComplexTy.getElementTy())) return emitOpError() << "requires !cir.complex type for source"; - auto resComplexTy = mlir::dyn_cast(resType); + auto resComplexTy = mlir::dyn_cast(resType); if (!resComplexTy || - !mlir::isa(resComplexTy.getElementTy())) + !mlir::isa(resComplexTy.getElementTy())) return emitOpError() << "requires !cir.complex type for result"; return success(); @@ -721,21 +708,20 @@ LogicalResult mlir::cir::CastOp::verify() { llvm_unreachable("Unknown CastOp kind?"); } -bool isIntOrBoolCast(mlir::cir::CastOp op) { +bool isIntOrBoolCast(cir::CastOp op) { auto kind = op.getKind(); - return kind == mlir::cir::CastKind::bool_to_int || - kind == mlir::cir::CastKind::int_to_bool || - kind == mlir::cir::CastKind::integral; + return kind == cir::CastKind::bool_to_int || + kind == cir::CastKind::int_to_bool || kind == cir::CastKind::integral; } -Value tryFoldCastChain(mlir::cir::CastOp op) { - mlir::cir::CastOp head = op, tail = op; +Value tryFoldCastChain(cir::CastOp op) { + cir::CastOp head = op, tail = op; while (op) { if (!isIntOrBoolCast(op)) break; head = op; - op = dyn_cast_or_null(head.getSrc().getDefiningOp()); + op = dyn_cast_or_null(head.getSrc().getDefiningOp()); } if (head == tail) @@ -743,36 +729,36 @@ Value tryFoldCastChain(mlir::cir::CastOp op) { // if bool_to_int -> ... -> int_to_bool: take the bool // as we had it was before all casts - if (head.getKind() == mlir::cir::CastKind::bool_to_int && - tail.getKind() == mlir::cir::CastKind::int_to_bool) + if (head.getKind() == cir::CastKind::bool_to_int && + tail.getKind() == cir::CastKind::int_to_bool) return head.getSrc(); // if int_to_bool -> ... -> int_to_bool: take the result // of the first one, as no other casts (and ext casts as well) // don't change the first result - if (head.getKind() == mlir::cir::CastKind::int_to_bool && - tail.getKind() == mlir::cir::CastKind::int_to_bool) + if (head.getKind() == cir::CastKind::int_to_bool && + tail.getKind() == cir::CastKind::int_to_bool) return head.getResult(); return {}; } -OpFoldResult mlir::cir::CastOp::fold(FoldAdaptor adaptor) { +OpFoldResult cir::CastOp::fold(FoldAdaptor adaptor) { if (getSrc().getType() == getResult().getType()) { switch (getKind()) { - case mlir::cir::CastKind::integral: { + case cir::CastKind::integral: { // TODO: for sign differences, it's possible in certain conditions to // create a new attribute that's capable of representing the source. - SmallVector foldResults; + llvm::SmallVector foldResults; auto foldOrder = getSrc().getDefiningOp()->fold(foldResults); if (foldOrder.succeeded() && foldResults[0].is()) return foldResults[0].get(); return {}; } - case mlir::cir::CastKind::bitcast: - case mlir::cir::CastKind::address_space: - case mlir::cir::CastKind::float_complex: - case mlir::cir::CastKind::int_complex: { + case cir::CastKind::bitcast: + case cir::CastKind::address_space: + case cir::CastKind::float_complex: + case cir::CastKind::int_complex: { return getSrc(); } default: @@ -782,9 +768,9 @@ OpFoldResult mlir::cir::CastOp::fold(FoldAdaptor adaptor) { return tryFoldCastChain(*this); } -static bool isBoolNot(mlir::cir::UnaryOp op) { - return isa(op.getInput().getType()) && - op.getKind() == mlir::cir::UnaryOpKind::Not; +static bool isBoolNot(cir::UnaryOp op) { + return isa(op.getInput().getType()) && + op.getKind() == cir::UnaryOpKind::Not; } // This folder simplifies the sequential boolean not operations. @@ -796,7 +782,7 @@ static bool isBoolNot(mlir::cir::UnaryOp op) { // ``` // // and the argument of the first one (%0) will be used instead. -OpFoldResult mlir::cir::UnaryOp::fold(FoldAdaptor adaptor) { +OpFoldResult cir::UnaryOp::fold(FoldAdaptor adaptor) { if (isBoolNot(*this)) if (auto previous = dyn_cast_or_null(getInput().getDefiningOp())) if (isBoolNot(previous)) @@ -809,10 +795,9 @@ OpFoldResult mlir::cir::UnaryOp::fold(FoldAdaptor adaptor) { // DynamicCastOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::DynamicCastOp::verify() { - auto resultPointeeTy = - mlir::cast(getType()).getPointee(); - if (!mlir::isa(resultPointeeTy)) +LogicalResult cir::DynamicCastOp::verify() { + auto resultPointeeTy = mlir::cast(getType()).getPointee(); + if (!mlir::isa(resultPointeeTy)) return emitOpError() << "cir.dyn_cast must produce a void ptr or struct ptr"; @@ -823,7 +808,7 @@ LogicalResult mlir::cir::DynamicCastOp::verify() { // ComplexCreateOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::ComplexCreateOp::verify() { +LogicalResult cir::ComplexCreateOp::verify() { if (getType().getElementTy() != getReal().getType()) { emitOpError() << "operand type of cir.complex.create does not match its result type"; @@ -833,7 +818,7 @@ LogicalResult mlir::cir::ComplexCreateOp::verify() { return success(); } -OpFoldResult mlir::cir::ComplexCreateOp::fold(FoldAdaptor adaptor) { +OpFoldResult cir::ComplexCreateOp::fold(FoldAdaptor adaptor) { auto real = adaptor.getReal(); auto imag = adaptor.getImag(); @@ -848,16 +833,15 @@ OpFoldResult mlir::cir::ComplexCreateOp::fold(FoldAdaptor adaptor) { assert(realAttr.getType() == imagAttr.getType() && "real part and imag part should be of the same type"); - auto complexTy = - mlir::cir::ComplexType::get(getContext(), realAttr.getType()); - return mlir::cir::ComplexAttr::get(complexTy, realAttr, imagAttr); + auto complexTy = cir::ComplexType::get(getContext(), realAttr.getType()); + return cir::ComplexAttr::get(complexTy, realAttr, imagAttr); } //===----------------------------------------------------------------------===// // ComplexRealOp and ComplexImagOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::ComplexRealOp::verify() { +LogicalResult cir::ComplexRealOp::verify() { if (getType() != getOperand().getType().getElementTy()) { emitOpError() << "cir.complex.real result type does not match operand type"; return failure(); @@ -865,15 +849,14 @@ LogicalResult mlir::cir::ComplexRealOp::verify() { return success(); } -OpFoldResult mlir::cir::ComplexRealOp::fold(FoldAdaptor adaptor) { - auto input = - mlir::cast_if_present(adaptor.getOperand()); +OpFoldResult cir::ComplexRealOp::fold(FoldAdaptor adaptor) { + auto input = mlir::cast_if_present(adaptor.getOperand()); if (input) return input.getReal(); return nullptr; } -LogicalResult mlir::cir::ComplexImagOp::verify() { +LogicalResult cir::ComplexImagOp::verify() { if (getType() != getOperand().getType().getElementTy()) { emitOpError() << "cir.complex.imag result type does not match operand type"; return failure(); @@ -881,9 +864,8 @@ LogicalResult mlir::cir::ComplexImagOp::verify() { return success(); } -OpFoldResult mlir::cir::ComplexImagOp::fold(FoldAdaptor adaptor) { - auto input = - mlir::cast_if_present(adaptor.getOperand()); +OpFoldResult cir::ComplexImagOp::fold(FoldAdaptor adaptor) { + auto input = mlir::cast_if_present(adaptor.getOperand()); if (input) return input.getImag(); return nullptr; @@ -893,13 +875,11 @@ OpFoldResult mlir::cir::ComplexImagOp::fold(FoldAdaptor adaptor) { // ComplexRealPtrOp and ComplexImagPtrOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::ComplexRealPtrOp::verify() { - auto resultPointeeTy = - mlir::cast(getType()).getPointee(); - auto operandPtrTy = - mlir::cast(getOperand().getType()); +LogicalResult cir::ComplexRealPtrOp::verify() { + auto resultPointeeTy = mlir::cast(getType()).getPointee(); + auto operandPtrTy = mlir::cast(getOperand().getType()); auto operandPointeeTy = - mlir::cast(operandPtrTy.getPointee()); + mlir::cast(operandPtrTy.getPointee()); if (resultPointeeTy != operandPointeeTy.getElementTy()) { emitOpError() @@ -910,13 +890,11 @@ LogicalResult mlir::cir::ComplexRealPtrOp::verify() { return success(); } -LogicalResult mlir::cir::ComplexImagPtrOp::verify() { - auto resultPointeeTy = - mlir::cast(getType()).getPointee(); - auto operandPtrTy = - mlir::cast(getOperand().getType()); +LogicalResult cir::ComplexImagPtrOp::verify() { + auto resultPointeeTy = mlir::cast(getType()).getPointee(); + auto operandPtrTy = mlir::cast(getOperand().getType()); auto operandPointeeTy = - mlir::cast(operandPtrTy.getPointee()); + mlir::cast(operandPtrTy.getPointee()); if (resultPointeeTy != operandPointeeTy.getElementTy()) { emitOpError() @@ -933,9 +911,9 @@ LogicalResult mlir::cir::ComplexImagPtrOp::verify() { // TODO(CIR): The final interface here should include an argument for the // SyncScope::ID. -void mlir::cir::LoadOp::setAtomic(mlir::cir::MemOrder order) { +void cir::LoadOp::setAtomic(cir::MemOrder order) { setMemOrder(order); - if (::cir::MissingFeatures::syncScopeID()) + if (cir::MissingFeatures::syncScopeID()) llvm_unreachable("NYI"); } @@ -945,9 +923,9 @@ void mlir::cir::LoadOp::setAtomic(mlir::cir::MemOrder order) { // TODO(CIR): The final interface here should include an argument for the // SyncScope::ID. -void mlir::cir::StoreOp::setAtomic(mlir::cir::MemOrder order) { +void cir::StoreOp::setAtomic(cir::MemOrder order) { setMemOrder(order); - if (::cir::MissingFeatures::syncScopeID()) + if (cir::MissingFeatures::syncScopeID()) llvm_unreachable("NYI"); } @@ -955,7 +933,7 @@ void mlir::cir::StoreOp::setAtomic(mlir::cir::MemOrder order) { // VecCreateOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::VecCreateOp::verify() { +LogicalResult cir::VecCreateOp::verify() { // Verify that the number of arguments matches the number of elements in the // vector, and that the type of all the arguments matches the type of the // elements in the vector. @@ -980,12 +958,12 @@ LogicalResult mlir::cir::VecCreateOp::verify() { // VecTernaryOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::VecTernaryOp::verify() { +LogicalResult cir::VecTernaryOp::verify() { // Verify that the condition operand has the same number of elements as the // other operands. (The automatic verification already checked that all // operands are vector types and that the second and third operands are the // same type.) - if (mlir::cast(getCond().getType()).getSize() != + if (mlir::cast(getCond().getType()).getSize() != getVec1().getType().getSize()) { return emitOpError() << ": the number of elements in " << getCond().getType() << " and " @@ -998,7 +976,7 @@ LogicalResult mlir::cir::VecTernaryOp::verify() { // VecShuffle //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::VecShuffleOp::verify() { +LogicalResult cir::VecShuffleOp::verify() { // The number of elements in the indices array must match the number of // elements in the result type. if (getIndices().size() != getResult().getType().getSize()) { @@ -1012,10 +990,9 @@ LogicalResult mlir::cir::VecShuffleOp::verify() { << " and " << getResult().getType() << " don't match"; } // The indices must all be integer constants - if (not std::all_of(getIndices().begin(), getIndices().end(), - [](mlir::Attribute attr) { - return mlir::isa(attr); - })) { + if (not std::all_of( + getIndices().begin(), getIndices().end(), + [](mlir::Attribute attr) { return mlir::isa(attr); })) { return emitOpError() << "all index values must be integers"; } return success(); @@ -1025,10 +1002,10 @@ LogicalResult mlir::cir::VecShuffleOp::verify() { // VecShuffleDynamic //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::VecShuffleDynamicOp::verify() { +LogicalResult cir::VecShuffleDynamicOp::verify() { // The number of elements in the two input vectors must match. if (getVec().getType().getSize() != - mlir::cast(getIndices().getType()).getSize()) { + mlir::cast(getIndices().getType()).getSize()) { return emitOpError() << ": the number of elements in " << getVec().getType() << " and " << getIndices().getType() << " don't match"; } @@ -1039,8 +1016,8 @@ LogicalResult mlir::cir::VecShuffleDynamicOp::verify() { // ReturnOp //===----------------------------------------------------------------------===// -static mlir::LogicalResult checkReturnAndFunction(mlir::cir::ReturnOp op, - mlir::cir::FuncOp function) { +static mlir::LogicalResult checkReturnAndFunction(cir::ReturnOp op, + cir::FuncOp function) { // ReturnOps currently only have a single optional operand. if (op.getNumOperands() > 1) return op.emitOpError() << "expects at most 1 return operand"; @@ -1048,7 +1025,7 @@ static mlir::LogicalResult checkReturnAndFunction(mlir::cir::ReturnOp op, // Ensure returned type matches the function signature. auto expectedTy = function.getFunctionType().getReturnType(); auto actualTy = - (op.getNumOperands() == 0 ? mlir::cir::VoidType::get(op.getContext()) + (op.getNumOperands() == 0 ? cir::VoidType::get(op.getContext()) : op.getOperand(0).getType()); if (actualTy != expectedTy) return op.emitOpError() << "returns " << actualTy @@ -1057,15 +1034,15 @@ static mlir::LogicalResult checkReturnAndFunction(mlir::cir::ReturnOp op, return mlir::success(); } -mlir::LogicalResult mlir::cir::ReturnOp::verify() { +mlir::LogicalResult cir::ReturnOp::verify() { // Returns can be present in multiple different scopes, get the // wrapping function and start from there. auto *fnOp = getOperation()->getParentOp(); - while (!isa(fnOp)) + while (!isa(fnOp)) fnOp = fnOp->getParentOp(); // Make sure return types match function return type. - if (checkReturnAndFunction(*this, cast(fnOp)).failed()) + if (checkReturnAndFunction(*this, cast(fnOp)).failed()) return failure(); return success(); @@ -1075,7 +1052,7 @@ mlir::LogicalResult mlir::cir::ReturnOp::verify() { // ThrowOp //===----------------------------------------------------------------------===// -mlir::LogicalResult mlir::cir::ThrowOp::verify() { +mlir::LogicalResult cir::ThrowOp::verify() { // For the no-rethrow version, it must have at least the exception pointer. if (rethrows()) return success(); @@ -1093,8 +1070,7 @@ mlir::LogicalResult mlir::cir::ThrowOp::verify() { // IfOp //===----------------------------------------------------------------------===// -ParseResult mlir::cir::IfOp::parse(OpAsmParser &parser, - OperationState &result) { +ParseResult cir::IfOp::parse(OpAsmParser &parser, OperationState &result) { // Create the regions for 'then'. result.regions.reserve(2); Region *thenRegion = result.addRegion(); @@ -1102,7 +1078,7 @@ ParseResult mlir::cir::IfOp::parse(OpAsmParser &parser, auto &builder = parser.getBuilder(); OpAsmParser::UnresolvedOperand cond; - Type boolType = ::mlir::cir::BoolType::get(builder.getContext()); + Type boolType = cir::BoolType::get(builder.getContext()); if (parser.parseOperand(cond) || parser.resolveOperand(cond, boolType, result.operands)) @@ -1131,7 +1107,7 @@ ParseResult mlir::cir::IfOp::parse(OpAsmParser &parser, return success(); } -void mlir::cir::IfOp::print(OpAsmPrinter &p) { +void cir::IfOp::print(OpAsmPrinter &p) { p << " " << getCondition() << " "; auto &thenRegion = this->getThenRegion(); p.printRegion(thenRegion, @@ -1151,15 +1127,15 @@ void mlir::cir::IfOp::print(OpAsmPrinter &p) { } /// Default callback for IfOp builders. Inserts nothing for now. -void mlir::cir::buildTerminatedBody(OpBuilder &builder, Location loc) {} +void cir::buildTerminatedBody(OpBuilder &builder, Location loc) {} /// Given the region at `index`, or the parent operation if `index` is None, /// return the successor regions. These are the regions that may be selected /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void mlir::cir::IfOp::getSuccessorRegions( - mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { +void cir::IfOp::getSuccessorRegions(mlir::RegionBranchPoint point, + SmallVectorImpl ®ions) { // The `then` and the `else` region branch back to the parent operation. if (!point.isParent()) { regions.push_back(RegionSuccessor()); @@ -1190,10 +1166,10 @@ void mlir::cir::IfOp::getSuccessorRegions( return; } -void mlir::cir::IfOp::build( - OpBuilder &builder, OperationState &result, Value cond, bool withElseRegion, - function_ref thenBuilder, - function_ref elseBuilder) { +void cir::IfOp::build(OpBuilder &builder, OperationState &result, Value cond, + bool withElseRegion, + function_ref thenBuilder, + function_ref elseBuilder) { assert(thenBuilder && "the builder callback for 'then' must be present"); result.addOperands(cond); @@ -1211,7 +1187,7 @@ void mlir::cir::IfOp::build( elseBuilder(builder, result.location); } -LogicalResult mlir::cir::IfOp::verify() { return success(); } +LogicalResult cir::IfOp::verify() { return success(); } //===----------------------------------------------------------------------===// // ScopeOp @@ -1222,7 +1198,7 @@ LogicalResult mlir::cir::IfOp::verify() { return success(); } /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void mlir::cir::ScopeOp::getSuccessorRegions( +void cir::ScopeOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // The only region always branch back to the parent operation. if (!point.isParent()) { @@ -1234,7 +1210,7 @@ void mlir::cir::ScopeOp::getSuccessorRegions( regions.push_back(RegionSuccessor(&getScopeRegion())); } -void mlir::cir::ScopeOp::build( +void cir::ScopeOp::build( OpBuilder &builder, OperationState &result, function_ref scopeBuilder) { assert(scopeBuilder && "the builder callback for 'then' must be present"); @@ -1250,7 +1226,7 @@ void mlir::cir::ScopeOp::build( result.addTypes(TypeRange{yieldTy}); } -void mlir::cir::ScopeOp::build( +void cir::ScopeOp::build( OpBuilder &builder, OperationState &result, function_ref scopeBuilder) { assert(scopeBuilder && "the builder callback for 'then' must be present"); @@ -1260,13 +1236,13 @@ void mlir::cir::ScopeOp::build( scopeBuilder(builder, result.location); } -LogicalResult mlir::cir::ScopeOp::verify() { return success(); } +LogicalResult cir::ScopeOp::verify() { return success(); } //===----------------------------------------------------------------------===// // TryOp //===----------------------------------------------------------------------===// -void mlir::cir::TryOp::build( +void cir::TryOp::build( OpBuilder &builder, OperationState &result, function_ref tryBodyBuilder, function_ref catchBuilder) { @@ -1283,28 +1259,27 @@ void mlir::cir::TryOp::build( catchBuilder(builder, result.location, result); } -mlir::Region *mlir::cir::TryOp::getCatchLastRegion() { +mlir::Region *cir::TryOp::getCatchLastRegion() { unsigned numCatchRegions = getCatchRegions().size(); assert(numCatchRegions && "expected at least one region"); auto &lastRegion = getCatchRegions()[numCatchRegions - 1]; return &lastRegion; } -mlir::Block *mlir::cir::TryOp::getCatchUnwindEntryBlock() { +mlir::Block *cir::TryOp::getCatchUnwindEntryBlock() { return &getCatchLastRegion()->getBlocks().front(); } -mlir::Block *mlir::cir::TryOp::getCatchAllEntryBlock() { +mlir::Block *cir::TryOp::getCatchAllEntryBlock() { return &getCatchLastRegion()->getBlocks().front(); } -bool mlir::cir::TryOp::isCatchAllOnly() { +bool cir::TryOp::isCatchAllOnly() { mlir::ArrayAttr catchAttrList = getCatchTypesAttr(); - return catchAttrList.size() == 1 && - isa(catchAttrList[0]); + return catchAttrList.size() == 1 && isa(catchAttrList[0]); } -void mlir::cir::TryOp::getSuccessorRegions( +void cir::TryOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // If any index all the underlying regions branch back to the parent // operation. @@ -1323,7 +1298,7 @@ void mlir::cir::TryOp::getSuccessorRegions( regions.push_back(RegionSuccessor(&r)); } -void printCatchRegions(OpAsmPrinter &p, mlir::cir::TryOp op, +void printCatchRegions(OpAsmPrinter &p, cir::TryOp op, mlir::MutableArrayRef<::mlir::Region> regions, mlir::ArrayAttr catchList) { @@ -1334,7 +1309,7 @@ void printCatchRegions(OpAsmPrinter &p, mlir::cir::TryOp op, llvm::interleaveComma(catchList, p, [&](const Attribute &a) { auto exRtti = a; - if (mlir::isa(a)) { + if (mlir::isa(a)) { p.printAttribute(a); p << " "; } else if (!exRtti) { @@ -1355,7 +1330,7 @@ ParseResult parseCatchRegions( OpAsmParser &parser, llvm::SmallVectorImpl> ®ions, ::mlir::ArrayAttr &catchersAttr) { - SmallVector catchList; + llvm::SmallVector catchList; auto parseAndCheckRegion = [&]() -> ParseResult { // Parse region attached to catch @@ -1424,7 +1399,7 @@ ParseResult parseCatchRegions( /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void mlir::cir::TernaryOp::getSuccessorRegions( +void cir::TernaryOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // The `true` and the `false` region branch back to the parent operation. if (!point.isParent()) { @@ -1443,7 +1418,7 @@ void mlir::cir::TernaryOp::getSuccessorRegions( return; } -void mlir::cir::TernaryOp::build( +void cir::TernaryOp::build( OpBuilder &builder, OperationState &result, Value cond, function_ref trueBuilder, function_ref falseBuilder) { @@ -1467,10 +1442,10 @@ void mlir::cir::TernaryOp::build( // SelectOp //===----------------------------------------------------------------------===// -OpFoldResult mlir::cir::SelectOp::fold(FoldAdaptor adaptor) { +OpFoldResult cir::SelectOp::fold(FoldAdaptor adaptor) { auto condition = adaptor.getCondition(); if (condition) { - auto conditionValue = mlir::cast(condition).getValue(); + auto conditionValue = mlir::cast(condition).getValue(); return conditionValue ? getTrueValue() : getFalseValue(); } @@ -1489,12 +1464,12 @@ OpFoldResult mlir::cir::SelectOp::fold(FoldAdaptor adaptor) { // BrOp //===----------------------------------------------------------------------===// -mlir::SuccessorOperands mlir::cir::BrOp::getSuccessorOperands(unsigned index) { +mlir::SuccessorOperands cir::BrOp::getSuccessorOperands(unsigned index) { assert(index == 0 && "invalid successor index"); return mlir::SuccessorOperands(getDestOperandsMutable()); } -Block *mlir::cir::BrOp::getSuccessorForOperands(ArrayRef) { +Block *cir::BrOp::getSuccessorForOperands(ArrayRef) { return getDest(); } @@ -1502,15 +1477,13 @@ Block *mlir::cir::BrOp::getSuccessorForOperands(ArrayRef) { // BrCondOp //===----------------------------------------------------------------------===// -mlir::SuccessorOperands -mlir::cir::BrCondOp::getSuccessorOperands(unsigned index) { +mlir::SuccessorOperands cir::BrCondOp::getSuccessorOperands(unsigned index) { assert(index < getNumSuccessors() && "invalid successor index"); return SuccessorOperands(index == 0 ? getDestOperandsTrueMutable() : getDestOperandsFalseMutable()); } -Block * -mlir::cir::BrCondOp::getSuccessorForOperands(ArrayRef operands) { +Block *cir::BrCondOp::getSuccessorForOperands(ArrayRef operands) { if (IntegerAttr condAttr = dyn_cast_if_present(operands.front())) return condAttr.getValue().isOne() ? getDestTrue() : getDestFalse(); return nullptr; @@ -1520,7 +1493,7 @@ mlir::cir::BrCondOp::getSuccessorForOperands(ArrayRef operands) { // CaseOp //===----------------------------------------------------------------------===// -void mlir::cir::CaseOp::getSuccessorRegions( +void cir::CaseOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { if (!point.isParent()) { regions.push_back(RegionSuccessor()); @@ -1530,20 +1503,20 @@ void mlir::cir::CaseOp::getSuccessorRegions( regions.push_back(RegionSuccessor(&getCaseRegion())); } -void mlir::cir::CaseOp::build(OpBuilder &builder, OperationState &result, - ArrayAttr value, CaseOpKind kind, - OpBuilder::InsertPoint &insertPoint) { +void cir::CaseOp::build(OpBuilder &builder, OperationState &result, + ArrayAttr value, CaseOpKind kind, + OpBuilder::InsertPoint &insertPoint) { OpBuilder::InsertionGuard guardSwitch(builder); result.addAttribute("value", value); result.getOrAddProperties().kind = - ::mlir::cir::CaseOpKindAttr::get(builder.getContext(), kind); + cir::CaseOpKindAttr::get(builder.getContext(), kind); Region *caseRegion = result.addRegion(); builder.createBlock(caseRegion); insertPoint = builder.saveInsertionPoint(); } -LogicalResult mlir::cir::CaseOp::verify() { return success(); } +LogicalResult cir::CaseOp::verify() { return success(); } //===----------------------------------------------------------------------===// // SwitchOp @@ -1552,7 +1525,7 @@ LogicalResult mlir::cir::CaseOp::verify() { return success(); } ParseResult parseSwitchOp(OpAsmParser &parser, mlir::Region ®ions, mlir::OpAsmParser::UnresolvedOperand &cond, mlir::Type &condType) { - mlir::cir::IntType intCondType; + cir::IntType intCondType; if (parser.parseLParen()) return ::mlir::failure(); @@ -1572,9 +1545,8 @@ ParseResult parseSwitchOp(OpAsmParser &parser, mlir::Region ®ions, return ::mlir::success(); } -void printSwitchOp(OpAsmPrinter &p, mlir::cir::SwitchOp op, - mlir::Region &bodyRegion, mlir::Value condition, - mlir::Type condType) { +void printSwitchOp(OpAsmPrinter &p, cir::SwitchOp op, mlir::Region &bodyRegion, + mlir::Value condition, mlir::Type condType) { p << "("; p << condition; p << " : "; @@ -1586,7 +1558,7 @@ void printSwitchOp(OpAsmPrinter &p, mlir::cir::SwitchOp op, /*printBlockTerminators=*/true); } -void mlir::cir::SwitchOp::getSuccessorRegions( +void cir::SwitchOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // If any index all the underlying regions branch back to the parent // operation. @@ -1598,9 +1570,9 @@ void mlir::cir::SwitchOp::getSuccessorRegions( regions.push_back(RegionSuccessor(&getBody())); } -LogicalResult mlir::cir::SwitchOp::verify() { return success(); } +LogicalResult cir::SwitchOp::verify() { return success(); } -void mlir::cir::SwitchOp::build( +void cir::SwitchOp::build( OpBuilder &builder, OperationState &result, Value cond, function_ref switchBuilder) { assert(switchBuilder && "the builder callback for regions must be present"); @@ -1611,20 +1583,20 @@ void mlir::cir::SwitchOp::build( switchBuilder(builder, result.location, result); } -void mlir::cir::SwitchOp::collectCases(llvm::SmallVector &cases) { +void cir::SwitchOp::collectCases(llvm::SmallVector &cases) { walk([&](mlir::Operation *op) { // Don't walk in nested switch op. - if (isa(op) && op != *this) + if (isa(op) && op != *this) return WalkResult::skip(); - if (isa(op)) - cases.push_back(cast(*op)); + if (isa(op)) + cases.push_back(cast(*op)); return WalkResult::advance(); }); } -bool mlir::cir::SwitchOp::isSimpleForm(llvm::SmallVector &cases) { +bool cir::SwitchOp::isSimpleForm(llvm::SmallVector &cases) { collectCases(cases); if (getBody().empty()) @@ -1646,16 +1618,16 @@ bool mlir::cir::SwitchOp::isSimpleForm(llvm::SmallVector &cases) { // SwitchFlatOp //===----------------------------------------------------------------------===// -void mlir::cir::SwitchFlatOp::build(OpBuilder &builder, OperationState &result, - Value value, Block *defaultDestination, - ValueRange defaultOperands, - ArrayRef caseValues, - BlockRange caseDestinations, - ArrayRef caseOperands) { +void cir::SwitchFlatOp::build(OpBuilder &builder, OperationState &result, + Value value, Block *defaultDestination, + ValueRange defaultOperands, + ArrayRef caseValues, + BlockRange caseDestinations, + ArrayRef caseOperands) { std::vector caseValuesAttrs; for (auto &val : caseValues) { - caseValuesAttrs.push_back(mlir::cir::IntAttr::get(value.getType(), val)); + caseValuesAttrs.push_back(cir::IntAttr::get(value.getType(), val)); } auto attrs = ArrayAttr::get(builder.getContext(), caseValuesAttrs); @@ -1668,24 +1640,25 @@ void mlir::cir::SwitchFlatOp::build(OpBuilder &builder, OperationState &result, static ParseResult parseSwitchFlatOpCases( OpAsmParser &parser, Type flagType, mlir::ArrayAttr &caseValues, SmallVectorImpl &caseDestinations, - SmallVectorImpl> &caseOperands, - SmallVectorImpl> &caseOperandTypes) { + SmallVectorImpl> + &caseOperands, + SmallVectorImpl> &caseOperandTypes) { if (failed(parser.parseLSquare())) return failure(); if (succeeded(parser.parseOptionalRSquare())) return success(); - SmallVector values; + llvm::SmallVector values; auto parseCase = [&]() { int64_t value = 0; if (failed(parser.parseInteger(value))) return failure(); - values.push_back(mlir::cir::IntAttr::get(flagType, value)); + values.push_back(cir::IntAttr::get(flagType, value)); Block *destination; - SmallVector operands; - SmallVector operandTypes; + llvm::SmallVector operands; + llvm::SmallVector operandTypes; if (parser.parseColon() || parser.parseSuccessor(destination)) return failure(); if (!parser.parseOptionalLParen()) { @@ -1707,7 +1680,7 @@ static ParseResult parseSwitchFlatOpCases( return parser.parseRSquare(); } -static void printSwitchFlatOpCases(OpAsmPrinter &p, mlir::cir::SwitchFlatOp op, +static void printSwitchFlatOpCases(OpAsmPrinter &p, cir::SwitchFlatOp op, Type flagType, mlir::ArrayAttr caseValues, SuccessorRange caseDestinations, OperandRangeRange caseOperands, @@ -1725,7 +1698,7 @@ static void printSwitchFlatOpCases(OpAsmPrinter &p, mlir::cir::SwitchFlatOp op, [&](auto i) { p << " "; mlir::Attribute a = std::get<0>(i); - p << mlir::cast(a).getValue(); + p << mlir::cast(a).getValue(); p << ": "; p.printSuccessorAndUseList(std::get<1>(i), caseOperands[index++]); }, @@ -1741,33 +1714,33 @@ static void printSwitchFlatOpCases(OpAsmPrinter &p, mlir::cir::SwitchFlatOp op, // LoopOpInterface Methods //===----------------------------------------------------------------------===// -void mlir::cir::DoWhileOp::getSuccessorRegions( +void cir::DoWhileOp::getSuccessorRegions( ::mlir::RegionBranchPoint point, ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); } -::llvm::SmallVector mlir::cir::DoWhileOp::getLoopRegions() { +::llvm::SmallVector cir::DoWhileOp::getLoopRegions() { return {&getBody()}; } -void mlir::cir::WhileOp::getSuccessorRegions( +void cir::WhileOp::getSuccessorRegions( ::mlir::RegionBranchPoint point, ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); } -::llvm::SmallVector mlir::cir::WhileOp::getLoopRegions() { +::llvm::SmallVector cir::WhileOp::getLoopRegions() { return {&getBody()}; } -void mlir::cir::ForOp::getSuccessorRegions( +void cir::ForOp::getSuccessorRegions( ::mlir::RegionBranchPoint point, ::llvm::SmallVectorImpl<::mlir::RegionSuccessor> ®ions) { LoopOpInterface::getLoopOpSuccessorRegions(*this, point, regions); } -::llvm::SmallVector mlir::cir::ForOp::getLoopRegions() { +::llvm::SmallVector cir::ForOp::getLoopRegions() { return {&getBody()}; } @@ -1788,18 +1761,16 @@ static void printConstant(OpAsmPrinter &p, Attribute value) { } static ParseResult -parseGlobalOpAddrSpace(OpAsmParser &p, - mlir::cir::AddressSpaceAttr &addrSpaceAttr) { +parseGlobalOpAddrSpace(OpAsmParser &p, cir::AddressSpaceAttr &addrSpaceAttr) { return parseAddrSpaceAttribute(p, addrSpaceAttr); } -static void printGlobalOpAddrSpace(OpAsmPrinter &p, mlir::cir::GlobalOp op, - mlir::cir::AddressSpaceAttr addrSpaceAttr) { +static void printGlobalOpAddrSpace(OpAsmPrinter &p, cir::GlobalOp op, + cir::AddressSpaceAttr addrSpaceAttr) { printAddrSpaceAttribute(p, addrSpaceAttr); } -static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, - mlir::cir::GlobalOp op, +static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, cir::GlobalOp op, TypeAttr type, Attribute initAttr, mlir::Region &ctorRegion, mlir::Region &dtorRegion) { @@ -1892,7 +1863,7 @@ static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, return success(); } -LogicalResult mlir::cir::GlobalOp::verify() { +LogicalResult cir::GlobalOp::verify() { // Verify that the initial value, if present, is either a unit attribute or // an attribute CIR supports. if (getInitialValue().has_value()) { @@ -1969,10 +1940,10 @@ LogicalResult mlir::cir::GlobalOp::verify() { return success(); } -void mlir::cir::GlobalOp::build( - OpBuilder &odsBuilder, OperationState &odsState, StringRef sym_name, - Type sym_type, bool isConstant, mlir::cir::GlobalLinkageKind linkage, - mlir::cir::AddressSpaceAttr addrSpace, +void cir::GlobalOp::build( + OpBuilder &odsBuilder, OperationState &odsState, llvm::StringRef sym_name, + Type sym_type, bool isConstant, cir::GlobalLinkageKind linkage, + cir::AddressSpaceAttr addrSpace, function_ref ctorBuilder, function_ref dtorBuilder) { odsState.addAttribute(getSymNameAttrName(odsState.name), @@ -1983,8 +1954,8 @@ void mlir::cir::GlobalOp::build( odsState.addAttribute(getConstantAttrName(odsState.name), odsBuilder.getUnitAttr()); - ::mlir::cir::GlobalLinkageKindAttr linkageAttr = - mlir::cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage); + cir::GlobalLinkageKindAttr linkageAttr = + cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage); odsState.addAttribute(getLinkageAttrName(odsState.name), linkageAttr); if (addrSpace) @@ -2002,9 +1973,8 @@ void mlir::cir::GlobalOp::build( dtorBuilder(odsBuilder, odsState.location); } - odsState.addAttribute( - getGlobalVisibilityAttrName(odsState.name), - mlir::cir::VisibilityAttr::get(odsBuilder.getContext())); + odsState.addAttribute(getGlobalVisibilityAttrName(odsState.name), + cir::VisibilityAttr::get(odsBuilder.getContext())); } /// Given the region at `index`, or the parent operation if `index` is None, @@ -2012,7 +1982,7 @@ void mlir::cir::GlobalOp::build( /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void mlir::cir::GlobalOp::getSuccessorRegions( +void cir::GlobalOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // The `ctor` and `dtor` regions always branch back to the parent operation. if (!point.isParent()) { @@ -2042,7 +2012,7 @@ void mlir::cir::GlobalOp::getSuccessorRegions( //===----------------------------------------------------------------------===// LogicalResult -mlir::cir::GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { +cir::GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // Verify that the result type underlying pointer type matches the type of // the referenced cir.global or cir.func op. auto op = symbolTable.lookupNearestSymbolFrom(*this, getNameAttr()); @@ -2052,7 +2022,7 @@ mlir::cir::GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { << "' does not reference a valid cir.global or cir.func"; mlir::Type symTy; - mlir::cir::AddressSpaceAttr symAddrSpace{}; + cir::AddressSpaceAttr symAddrSpace{}; if (auto g = dyn_cast(op)) { symTy = g.getSymType(); symAddrSpace = g.getAddrSpaceAttr(); @@ -2085,8 +2055,8 @@ mlir::cir::GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // VTableAddrPointOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::VTableAddrPointOp::verifySymbolUses( - SymbolTableCollection &symbolTable) { +LogicalResult +cir::VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // vtable ptr is not coming from a symbol. if (!getName()) return success(); @@ -2102,13 +2072,13 @@ LogicalResult mlir::cir::VTableAddrPointOp::verifySymbolUses( auto init = op.getInitialValue(); if (!init) return success(); - if (!isa(*init)) + if (!isa(*init)) return emitOpError("Expected #cir.vtable in initializer for global '") << name << "'"; return success(); } -LogicalResult mlir::cir::VTableAddrPointOp::verify() { +LogicalResult cir::VTableAddrPointOp::verify() { // The operation uses either a symbol or a value to operate, but not both if (getName() && getSymAddr()) return emitOpError("should use either a symbol or value, but not both"); @@ -2118,11 +2088,11 @@ LogicalResult mlir::cir::VTableAddrPointOp::verify() { return success(); auto resultType = getAddr().getType(); - auto intTy = mlir::cir::IntType::get(getContext(), 32, /*isSigned=*/false); - auto fnTy = mlir::cir::FuncType::get({}, intTy); + auto intTy = cir::IntType::get(getContext(), 32, /*isSigned=*/false); + auto fnTy = cir::FuncType::get({}, intTy); - auto resTy = mlir::cir::PointerType::get( - getContext(), mlir::cir::PointerType::get(getContext(), fnTy)); + auto resTy = cir::PointerType::get(getContext(), + cir::PointerType::get(getContext(), fnTy)); if (resultType != resTy) return emitOpError("result type must be '") @@ -2134,8 +2104,8 @@ LogicalResult mlir::cir::VTableAddrPointOp::verify() { // VTTAddrPointOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::VTTAddrPointOp::verifySymbolUses( - SymbolTableCollection &symbolTable) { +LogicalResult +cir::VTTAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // VTT ptr is not coming from a symbol. if (!getName()) return success(); @@ -2151,13 +2121,13 @@ LogicalResult mlir::cir::VTTAddrPointOp::verifySymbolUses( auto init = op.getInitialValue(); if (!init) return success(); - if (!isa(*init)) + if (!isa(*init)) return emitOpError("Expected array in initializer for global VTT'") << name << "'"; return success(); } -LogicalResult mlir::cir::VTTAddrPointOp::verify() { +LogicalResult cir::VTTAddrPointOp::verify() { // The operation uses either a symbol or a value to operate, but not both if (getName() && getSymAddr()) return emitOpError("should use either a symbol or value, but not both"); @@ -2168,9 +2138,9 @@ LogicalResult mlir::cir::VTTAddrPointOp::verify() { auto resultType = getAddr().getType(); - auto resTy = mlir::cir::PointerType::get( - getContext(), mlir::cir::PointerType::get( - getContext(), mlir::cir::VoidType::get(getContext()))); + auto resTy = cir::PointerType::get( + getContext(), + cir::PointerType::get(getContext(), cir::VoidType::get(getContext()))); if (resultType != resTy) return emitOpError("result type must be '") @@ -2184,14 +2154,13 @@ LogicalResult mlir::cir::VTTAddrPointOp::verify() { /// Returns the name used for the linkage attribute. This *must* correspond to /// the name of the attribute in ODS. -static StringRef getLinkageAttrNameString() { return "linkage"; } - -void mlir::cir::FuncOp::build(OpBuilder &builder, OperationState &result, - StringRef name, mlir::cir::FuncType type, - GlobalLinkageKind linkage, - CallingConv callingConv, - ArrayRef attrs, - ArrayRef argAttrs) { +static llvm::StringRef getLinkageAttrNameString() { return "linkage"; } + +void cir::FuncOp::build(OpBuilder &builder, OperationState &result, + llvm::StringRef name, cir::FuncType type, + GlobalLinkageKind linkage, CallingConv callingConv, + ArrayRef attrs, + ArrayRef argAttrs) { result.addRegion(); result.addAttribute(SymbolTable::getSymbolAttrName(), builder.getStringAttr(name)); @@ -2203,7 +2172,7 @@ void mlir::cir::FuncOp::build(OpBuilder &builder, OperationState &result, result.addAttribute(getCallingConvAttrName(result.name), CallingConvAttr::get(builder.getContext(), callingConv)); result.addAttribute(getGlobalVisibilityAttrName(result.name), - mlir::cir::VisibilityAttr::get(builder.getContext())); + cir::VisibilityAttr::get(builder.getContext())); result.attributes.append(attrs.begin(), attrs.end()); if (argAttrs.empty()) @@ -2215,8 +2184,7 @@ void mlir::cir::FuncOp::build(OpBuilder &builder, OperationState &result, getResAttrsAttrName(result.name)); } -ParseResult mlir::cir::FuncOp::parse(OpAsmParser &parser, - OperationState &state) { +ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { llvm::SMLoc loc = parser.getCurrentLocation(); auto builtinNameAttr = getBuiltinAttrName(state.name); @@ -2238,7 +2206,7 @@ ParseResult mlir::cir::FuncOp::parse(OpAsmParser &parser, state.addAttribute(noProtoNameAttr, parser.getBuilder().getUnitAttr()); // TODO: Missing comdat - assert(!::cir::MissingFeatures::setComdat()); + assert(!cir::MissingFeatures::setComdat()); // Default to external linkage if no keyword is provided. state.addAttribute(getLinkageAttrNameString(), @@ -2254,7 +2222,7 @@ ParseResult mlir::cir::FuncOp::parse(OpAsmParser &parser, parser.getBuilder().getStringAttr(visAttrStr)); } - mlir::cir::VisibilityAttr cirVisibilityAttr; + cir::VisibilityAttr cirVisibilityAttr; parseVisibilityAttr(parser, cirVisibilityAttr); state.addAttribute(visibilityNameAttr, cirVisibilityAttr); @@ -2263,10 +2231,10 @@ ParseResult mlir::cir::FuncOp::parse(OpAsmParser &parser, state.addAttribute(dsolocalNameAttr, parser.getBuilder().getUnitAttr()); StringAttr nameAttr; - SmallVector arguments; - SmallVector resultAttrs; - SmallVector argTypes; - SmallVector resultTypes; + llvm::SmallVector arguments; + llvm::SmallVector resultAttrs; + llvm::SmallVector argTypes; + llvm::SmallVector resultTypes; auto &builder = parser.getBuilder(); // Parse the name as a symbol. @@ -2289,11 +2257,11 @@ ParseResult mlir::cir::FuncOp::parse(OpAsmParser &parser, // Fetch return type or set it to void if empty/ommited. mlir::Type returnType = - (resultTypes.empty() ? mlir::cir::VoidType::get(builder.getContext()) + (resultTypes.empty() ? cir::VoidType::get(builder.getContext()) : resultTypes.front()); // Build the function type. - auto fnType = mlir::cir::FuncType::get(argTypes, returnType, isVariadic); + auto fnType = cir::FuncType::get(argTypes, returnType, isVariadic); if (!fnType) return failure(); state.addAttribute(getFunctionTypeAttrName(state.name), @@ -2347,7 +2315,7 @@ ParseResult mlir::cir::FuncOp::parse(OpAsmParser &parser, CallingConvAttr::get(parser.getContext(), callConv)); auto parseGlobalDtorCtor = - [&](StringRef keyword, + [&](llvm::StringRef keyword, llvm::function_ref prio)> createAttr) -> mlir::LogicalResult { if (::mlir::succeeded(parser.parseOptionalKeyword(keyword))) { @@ -2370,21 +2338,19 @@ ParseResult mlir::cir::FuncOp::parse(OpAsmParser &parser, }; if (parseGlobalDtorCtor("global_ctor", [&](std::optional prio) { - mlir::cir::GlobalCtorAttr globalCtorAttr = - prio ? mlir::cir::GlobalCtorAttr::get(builder.getContext(), - nameAttr, *prio) - : mlir::cir::GlobalCtorAttr::get(builder.getContext(), - nameAttr); + cir::GlobalCtorAttr globalCtorAttr = + prio ? cir::GlobalCtorAttr::get(builder.getContext(), nameAttr, + *prio) + : cir::GlobalCtorAttr::get(builder.getContext(), nameAttr); state.addAttribute(getGlobalCtorAttrName(state.name), globalCtorAttr); }).failed()) return failure(); if (parseGlobalDtorCtor("global_dtor", [&](std::optional prio) { - mlir::cir::GlobalDtorAttr globalDtorAttr = - prio ? mlir::cir::GlobalDtorAttr::get(builder.getContext(), - nameAttr, *prio) - : mlir::cir::GlobalDtorAttr::get(builder.getContext(), - nameAttr); + cir::GlobalDtorAttr globalDtorAttr = + prio ? cir::GlobalDtorAttr::get(builder.getContext(), nameAttr, + *prio) + : cir::GlobalDtorAttr::get(builder.getContext(), nameAttr); state.addAttribute(getGlobalDtorAttrName(state.name), globalDtorAttr); }).failed()) return failure(); @@ -2399,7 +2365,7 @@ ParseResult mlir::cir::FuncOp::parse(OpAsmParser &parser, return failure(); } else { NamedAttrList empty; - extraAttrs = mlir::cir::ExtraFuncAttributesAttr::get( + extraAttrs = cir::ExtraFuncAttributesAttr::get( builder.getContext(), empty.getDictionary(builder.getContext())); } state.addAttribute(getExtraAttrsAttrName(state.name), extraAttrs); @@ -2420,19 +2386,19 @@ ParseResult mlir::cir::FuncOp::parse(OpAsmParser &parser, return success(); } -bool mlir::cir::FuncOp::isDeclaration() { +bool cir::FuncOp::isDeclaration() { auto aliasee = getAliasee(); if (!aliasee) return isExternal(); auto *modOp = getOperation()->getParentOp(); - auto targetFn = dyn_cast_or_null( + auto targetFn = dyn_cast_or_null( mlir::SymbolTable::lookupSymbolIn(modOp, *aliasee)); assert(targetFn && "expected aliasee to exist"); return targetFn.isDeclaration(); } -::mlir::Region *mlir::cir::FuncOp::getCallableRegion() { +::mlir::Region *cir::FuncOp::getCallableRegion() { auto aliasee = getAliasee(); if (!aliasee) return isExternal() ? nullptr : &getBody(); @@ -2440,13 +2406,13 @@ ::mlir::Region *mlir::cir::FuncOp::getCallableRegion() { // Note that we forward the region from the original aliasee // function. auto *modOp = getOperation()->getParentOp(); - auto targetFn = dyn_cast_or_null( + auto targetFn = dyn_cast_or_null( mlir::SymbolTable::lookupSymbolIn(modOp, *aliasee)); assert(targetFn && "expected aliasee to exist"); return targetFn.getCallableRegion(); } -void mlir::cir::FuncOp::print(OpAsmPrinter &p) { +void cir::FuncOp::print(OpAsmPrinter &p) { // When adding a specific keyword here, do not forget to omit it in // printFunctionAttributes below or there will be a syntax error when // parsing @@ -2482,7 +2448,7 @@ void mlir::cir::FuncOp::print(OpAsmPrinter &p) { p << ' '; p.printSymbolName(getSymName()); auto fnType = getFunctionType(); - SmallVector resultTypes; + llvm::SmallVector resultTypes; if (!fnType.isVoid()) function_interface_impl::printFunctionSignature( p, *this, fnType.getInputs(), fnType.isVarArg(), @@ -2550,9 +2516,9 @@ void mlir::cir::FuncOp::print(OpAsmPrinter &p) { // Hook for OpTrait::FunctionLike, called after verifying that the 'type' // attribute is present. This can check for preconditions of the // getNumArguments hook not failing. -LogicalResult mlir::cir::FuncOp::verifyType() { +LogicalResult cir::FuncOp::verifyType() { auto type = getFunctionType(); - if (!isa(type)) + if (!isa(type)) return emitOpError("requires '" + getFunctionTypeAttrName().str() + "' attribute of function type"); if (!getNoProto() && type.isVarArg() && type.getNumInputs() == 0) @@ -2565,24 +2531,23 @@ LogicalResult mlir::cir::FuncOp::verifyType() { // - functions don't have 'common' linkage // - external functions have 'external' or 'extern_weak' linkage // - coroutine body must use at least one cir.await operation. -LogicalResult mlir::cir::FuncOp::verify() { +LogicalResult cir::FuncOp::verify() { if (getLinkage() == cir::GlobalLinkageKind::CommonLinkage) return emitOpError() << "functions cannot have '" << stringifyGlobalLinkageKind( - mlir::cir::GlobalLinkageKind::CommonLinkage) + cir::GlobalLinkageKind::CommonLinkage) << "' linkage"; if (isExternal()) { - if (getLinkage() != mlir::cir::GlobalLinkageKind::ExternalLinkage && - getLinkage() != mlir::cir::GlobalLinkageKind::ExternalWeakLinkage) - return emitOpError() - << "external functions must have '" - << stringifyGlobalLinkageKind( - mlir::cir::GlobalLinkageKind::ExternalLinkage) - << "' or '" - << stringifyGlobalLinkageKind( - mlir::cir::GlobalLinkageKind::ExternalWeakLinkage) - << "' linkage"; + if (getLinkage() != cir::GlobalLinkageKind::ExternalLinkage && + getLinkage() != cir::GlobalLinkageKind::ExternalWeakLinkage) + return emitOpError() << "external functions must have '" + << stringifyGlobalLinkageKind( + cir::GlobalLinkageKind::ExternalLinkage) + << "' or '" + << stringifyGlobalLinkageKind( + cir::GlobalLinkageKind::ExternalWeakLinkage) + << "' linkage"; return success(); } @@ -2610,9 +2575,9 @@ LogicalResult mlir::cir::FuncOp::verify() { std::set gotos; getOperation()->walk([&](mlir::Operation *op) { - if (auto lab = dyn_cast(op)) { + if (auto lab = dyn_cast(op)) { labels.emplace(lab.getLabel()); - } else if (auto goTo = dyn_cast(op)) { + } else if (auto goTo = dyn_cast(op)) { gotos.emplace(goTo.getLabel()); } }); @@ -2631,29 +2596,29 @@ LogicalResult mlir::cir::FuncOp::verify() { // CallOp //===----------------------------------------------------------------------===// -mlir::Value mlir::cir::CallOp::getIndirectCall() { +mlir::Value cir::CallOp::getIndirectCall() { assert(isIndirect()); return getOperand(0); } -mlir::Operation::operand_iterator mlir::cir::CallOp::arg_operand_begin() { +mlir::Operation::operand_iterator cir::CallOp::arg_operand_begin() { auto arg_begin = operand_begin(); if (isIndirect()) arg_begin++; return arg_begin; } -mlir::Operation::operand_iterator mlir::cir::CallOp::arg_operand_end() { +mlir::Operation::operand_iterator cir::CallOp::arg_operand_end() { return operand_end(); } /// Return the operand at index 'i', accounts for indirect call. -Value mlir::cir::CallOp::getArgOperand(unsigned i) { +Value cir::CallOp::getArgOperand(unsigned i) { if (isIndirect()) i++; return getOperand(i); } /// Return the number of operands, accounts for indirect call. -unsigned mlir::cir::CallOp::getNumArgOperands() { +unsigned cir::CallOp::getNumArgOperands() { if (isIndirect()) return this->getOperation()->getNumOperands() - 1; return this->getOperation()->getNumOperands(); @@ -2666,12 +2631,11 @@ verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { if (!fnAttr) return success(); - mlir::cir::FuncOp fn = - symbolTable.lookupNearestSymbolFrom(op, fnAttr); + cir::FuncOp fn = symbolTable.lookupNearestSymbolFrom(op, fnAttr); if (!fn) return op->emitOpError() << "'" << fnAttr.getValue() << "' does not reference a valid function"; - auto callIf = dyn_cast(op); + auto callIf = dyn_cast(op); assert(callIf && "expected CIR call interface to be always available"); // Verify that the operand and result types match the callee. Note that @@ -2847,7 +2811,7 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, {static_cast(continueOperands.size()), static_cast(landingPadOperands.size()), static_cast(ops.size())}), - result.getOrAddProperties() + result.getOrAddProperties() .operandSegmentSizes.begin()); if (parser.resolveOperands(continueOperands, continueTypes, continueOperandsLoc, result.operands)) @@ -2861,12 +2825,12 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, if (parser.parseOptionalKeyword("cc").succeeded()) { if (parser.parseLParen().failed()) return failure(); - mlir::cir::CallingConv callingConv; - if (parseCIRKeyword(parser, callingConv).failed()) + cir::CallingConv callingConv; + if (parseCIRKeyword(parser, callingConv).failed()) return failure(); if (parser.parseRParen().failed()) return failure(); - result.addAttribute("calling_conv", mlir::cir::CallingConvAttr::get( + result.addAttribute("calling_conv", cir::CallingConvAttr::get( builder.getContext(), callingConv)); } @@ -2880,7 +2844,7 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, return failure(); } else { NamedAttrList empty; - extraAttrs = mlir::cir::ExtraFuncAttributesAttr::get( + extraAttrs = cir::ExtraFuncAttributesAttr::get( builder.getContext(), empty.getDictionary(builder.getContext())); } result.addAttribute(extraAttrsAttrName, extraAttrs); @@ -2900,17 +2864,14 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, return ::mlir::success(); } -void printCallCommon(Operation *op, mlir::Value indirectCallee, - mlir::FlatSymbolRefAttr flatSym, - ::mlir::OpAsmPrinter &state, - ::mlir::cir::ExtraFuncAttributesAttr extraAttrs, - ::mlir::cir::CallingConv callingConv, - ::mlir::UnitAttr exception = {}, - mlir::Block *cont = nullptr, - mlir::Block *landingPad = nullptr) { +void printCallCommon( + Operation *op, mlir::Value indirectCallee, mlir::FlatSymbolRefAttr flatSym, + ::mlir::OpAsmPrinter &state, cir::ExtraFuncAttributesAttr extraAttrs, + cir::CallingConv callingConv, ::mlir::UnitAttr exception = {}, + mlir::Block *cont = nullptr, mlir::Block *landingPad = nullptr) { state << ' '; - auto callLikeOp = mlir::cast(op); + auto callLikeOp = mlir::cast(op); auto ops = callLikeOp.getArgOperands(); if (exception) @@ -2928,7 +2889,7 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, if (cont) { assert(landingPad && "expected two successors"); - auto tryCall = dyn_cast(op); + auto tryCall = dyn_cast(op); assert(tryCall && "regular calls do not branch"); state << ' ' << tryCall.getCont(); if (!tryCall.getContOperands().empty()) { @@ -2965,7 +2926,7 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, state << ' '; state.printFunctionalType(op->getOperands().getTypes(), op->getResultTypes()); - if (callingConv != mlir::cir::CallingConv::C) { + if (callingConv != cir::CallingConv::C) { state << " cc("; state << stringifyCallingConv(callingConv); state << ")"; @@ -2980,7 +2941,7 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, // If exception is present and there are cleanups, this should be latest thing // present (after all attributes, etc). if (exception) { - auto call = dyn_cast(op); + auto call = dyn_cast(op); assert(call && "expected regular call"); if (!call.getCleanup().empty()) { state << " cleanup "; @@ -2990,19 +2951,19 @@ void printCallCommon(Operation *op, mlir::Value indirectCallee, } LogicalResult -mlir::cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { +cir::CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return verifyCallCommInSymbolUses(*this, symbolTable); } -::mlir::ParseResult mlir::cir::CallOp::parse(::mlir::OpAsmParser &parser, - ::mlir::OperationState &result) { +::mlir::ParseResult cir::CallOp::parse(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { return parseCallCommon(parser, result, getExtraAttrsAttrName(result.name)); } -void mlir::cir::CallOp::print(::mlir::OpAsmPrinter &state) { +void cir::CallOp::print(::mlir::OpAsmPrinter &state) { mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; - mlir::cir::CallingConv callingConv = getCallingConv(); + cir::CallingConv callingConv = getCallingConv(); mlir::UnitAttr exception = getExceptionAttr(); printCallCommon(*this, indirectCallee, getCalleeAttr(), state, getExtraAttrs(), callingConv, exception); @@ -3012,56 +2973,54 @@ void mlir::cir::CallOp::print(::mlir::OpAsmPrinter &state) { // TryCallOp //===----------------------------------------------------------------------===// -mlir::Value mlir::cir::TryCallOp::getIndirectCall() { +mlir::Value cir::TryCallOp::getIndirectCall() { assert(isIndirect()); return getOperand(0); } -mlir::Operation::operand_iterator mlir::cir::TryCallOp::arg_operand_begin() { +mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_begin() { auto arg_begin = operand_begin(); if (isIndirect()) arg_begin++; return arg_begin; } -mlir::Operation::operand_iterator mlir::cir::TryCallOp::arg_operand_end() { +mlir::Operation::operand_iterator cir::TryCallOp::arg_operand_end() { return operand_end(); } /// Return the operand at index 'i', accounts for indirect call. -Value mlir::cir::TryCallOp::getArgOperand(unsigned i) { +Value cir::TryCallOp::getArgOperand(unsigned i) { if (isIndirect()) i++; return getOperand(i); } /// Return the number of operands, accounts for indirect call. -unsigned mlir::cir::TryCallOp::getNumArgOperands() { +unsigned cir::TryCallOp::getNumArgOperands() { if (isIndirect()) return this->getOperation()->getNumOperands() - 1; return this->getOperation()->getNumOperands(); } LogicalResult -mlir::cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { +cir::TryCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) { return verifyCallCommInSymbolUses(*this, symbolTable); } -::mlir::ParseResult -mlir::cir::TryCallOp::parse(::mlir::OpAsmParser &parser, - ::mlir::OperationState &result) { +::mlir::ParseResult cir::TryCallOp::parse(::mlir::OpAsmParser &parser, + ::mlir::OperationState &result) { return parseCallCommon(parser, result, getExtraAttrsAttrName(result.name), /*hasDestinationBlocks=*/true); } -void mlir::cir::TryCallOp::print(::mlir::OpAsmPrinter &state) { +void cir::TryCallOp::print(::mlir::OpAsmPrinter &state) { mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; - mlir::cir::CallingConv callingConv = getCallingConv(); + cir::CallingConv callingConv = getCallingConv(); printCallCommon(*this, indirectCallee, getCalleeAttr(), state, getExtraAttrs(), callingConv, {}, getCont(), getLandingPad()); } -mlir::SuccessorOperands -mlir::cir::TryCallOp::getSuccessorOperands(unsigned index) { +mlir::SuccessorOperands cir::TryCallOp::getSuccessorOperands(unsigned index) { assert(index < getNumSuccessors() && "invalid successor index"); if (index == 0) return SuccessorOperands(getContOperandsMutable()); @@ -3076,13 +3035,13 @@ mlir::cir::TryCallOp::getSuccessorOperands(unsigned index) { // UnaryOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::UnaryOp::verify() { +LogicalResult cir::UnaryOp::verify() { switch (getKind()) { - case mlir::cir::UnaryOpKind::Inc: - case mlir::cir::UnaryOpKind::Dec: - case mlir::cir::UnaryOpKind::Plus: - case mlir::cir::UnaryOpKind::Minus: - case mlir::cir::UnaryOpKind::Not: + case cir::UnaryOpKind::Inc: + case cir::UnaryOpKind::Dec: + case cir::UnaryOpKind::Plus: + case cir::UnaryOpKind::Minus: + case cir::UnaryOpKind::Not: // Nothing to verify. return success(); } @@ -3094,14 +3053,13 @@ LogicalResult mlir::cir::UnaryOp::verify() { // AwaitOp //===----------------------------------------------------------------------===// -void mlir::cir::AwaitOp::build( - OpBuilder &builder, OperationState &result, mlir::cir::AwaitKind kind, +void cir::AwaitOp::build( + OpBuilder &builder, OperationState &result, cir::AwaitKind kind, function_ref readyBuilder, function_ref suspendBuilder, function_ref resumeBuilder) { - result.addAttribute( - getKindAttrName(result.name), - mlir::cir::AwaitKindAttr::get(builder.getContext(), kind)); + result.addAttribute(getKindAttrName(result.name), + cir::AwaitKindAttr::get(builder.getContext(), kind)); { OpBuilder::InsertionGuard guard(builder); Region *readyRegion = result.addRegion(); @@ -3129,7 +3087,7 @@ void mlir::cir::AwaitOp::build( /// during the flow of control. `operands` is a set of optional attributes /// that correspond to a constant value for each operand, or null if that /// operand is not a constant. -void mlir::cir::AwaitOp::getSuccessorRegions( +void cir::AwaitOp::getSuccessorRegions( mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { // If any index all the underlying regions branch back to the parent // operation. @@ -3145,7 +3103,7 @@ void mlir::cir::AwaitOp::getSuccessorRegions( regions.push_back(RegionSuccessor(&this->getResume())); } -LogicalResult mlir::cir::AwaitOp::verify() { +LogicalResult cir::AwaitOp::verify() { if (!isa(this->getReady().back().getTerminator())) return emitOpError("ready region must end with cir.condition"); return success(); @@ -3204,7 +3162,7 @@ mlir::OpTrait::impl::verifySameFirstSecondOperandAndResultType(Operation *op) { // FIXME: move all of these to CIRAttrs.cpp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::ConstArrayAttr::verify( +LogicalResult cir::ConstArrayAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::Type type, Attribute attr, int trailingZerosNum) { @@ -3212,8 +3170,8 @@ LogicalResult mlir::cir::ConstArrayAttr::verify( return emitError() << "constant array expects ArrayAttr or StringAttr"; if (auto strAttr = mlir::dyn_cast(attr)) { - mlir::cir::ArrayType at = mlir::cast(type); - auto intTy = mlir::dyn_cast(at.getEltType()); + cir::ArrayType at = mlir::cast(type); + auto intTy = mlir::dyn_cast(at.getEltType()); // TODO: add CIR type for char. if (!intTy || intTy.getWidth() != 8) { @@ -3248,8 +3206,8 @@ LogicalResult mlir::cir::ConstArrayAttr::verify( return eltTypeCheck; } -::mlir::Attribute mlir::cir::ConstArrayAttr::parse(::mlir::AsmParser &parser, - ::mlir::Type type) { +::mlir::Attribute cir::ConstArrayAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { ::mlir::FailureOr<::mlir::Type> resultTy; ::mlir::FailureOr resultVal; ::llvm::SMLoc loc = parser.getCurrentLocation(); @@ -3297,8 +3255,7 @@ ::mlir::Attribute mlir::cir::ConstArrayAttr::parse(::mlir::AsmParser &parser, auto zeros = 0; if (parser.parseOptionalComma().succeeded()) { if (parser.parseOptionalKeyword("trailing_zeros").succeeded()) { - auto typeSize = - mlir::cast(resultTy.value()).getSize(); + auto typeSize = mlir::cast(resultTy.value()).getSize(); auto elts = resultVal.value(); if (auto str = mlir::dyn_cast(elts)) zeros = typeSize - str.size(); @@ -3317,7 +3274,7 @@ ::mlir::Attribute mlir::cir::ConstArrayAttr::parse(::mlir::AsmParser &parser, loc, parser.getContext(), resultTy.value(), resultVal.value(), zeros); } -void mlir::cir::ConstArrayAttr::print(::mlir::AsmPrinter &printer) const { +void cir::ConstArrayAttr::print(::mlir::AsmPrinter &printer) const { printer << "<"; printer.printStrippedAttrOrType(getElts()); if (getTrailingZerosNum()) @@ -3325,16 +3282,16 @@ void mlir::cir::ConstArrayAttr::print(::mlir::AsmPrinter &printer) const { printer << ">"; } -LogicalResult mlir::cir::ConstVectorAttr::verify( +LogicalResult cir::ConstVectorAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::Type type, mlir::ArrayAttr arrayAttr) { - if (!mlir::isa(type)) { - return emitError() << "type of mlir::cir::ConstVectorAttr is not a " - "mlir::cir::VectorType: " + if (!mlir::isa(type)) { + return emitError() << "type of cir::ConstVectorAttr is not a " + "cir::VectorType: " << type; } - auto vecType = mlir::cast(type); + auto vecType = mlir::cast(type); // Do the number of elements match? if (vecType.getSize() != arrayAttr.size()) { @@ -3359,8 +3316,8 @@ LogicalResult mlir::cir::ConstVectorAttr::verify( return elementTypeCheck; } -::mlir::Attribute mlir::cir::ConstVectorAttr::parse(::mlir::AsmParser &parser, - ::mlir::Type type) { +::mlir::Attribute cir::ConstVectorAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { ::mlir::FailureOr<::mlir::Type> resultType; ::mlir::FailureOr resultValue; ::llvm::SMLoc loc = parser.getCurrentLocation(); @@ -3400,27 +3357,26 @@ ::mlir::Attribute mlir::cir::ConstVectorAttr::parse(::mlir::AsmParser &parser, loc, parser.getContext(), resultType.value(), resultValue.value()); } -void mlir::cir::ConstVectorAttr::print(::mlir::AsmPrinter &printer) const { +void cir::ConstVectorAttr::print(::mlir::AsmPrinter &printer) const { printer << "<"; printer.printStrippedAttrOrType(getElts()); printer << ">"; } ::mlir::Attribute -mlir::cir::SignedOverflowBehaviorAttr::parse(::mlir::AsmParser &parser, - ::mlir::Type type) { +cir::SignedOverflowBehaviorAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { if (parser.parseLess()) return {}; auto behavior = parseOptionalCIRKeyword( - parser, mlir::cir::sob::SignedOverflowBehavior::undefined); + parser, cir::sob::SignedOverflowBehavior::undefined); if (parser.parseGreater()) return {}; return SignedOverflowBehaviorAttr::get(parser.getContext(), behavior); } -void mlir::cir::SignedOverflowBehaviorAttr::print( - ::mlir::AsmPrinter &printer) const { +void cir::SignedOverflowBehaviorAttr::print(::mlir::AsmPrinter &printer) const { printer << "<"; switch (getBehavior()) { case sob::SignedOverflowBehavior::undefined: @@ -3436,12 +3392,11 @@ void mlir::cir::SignedOverflowBehaviorAttr::print( printer << ">"; } -LogicalResult mlir::cir::TypeInfoAttr::verify( +LogicalResult cir::TypeInfoAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::Type type, ::mlir::ArrayAttr typeinfoData) { - if (mlir::cir::ConstStructAttr::verify(emitError, type, typeinfoData) - .failed()) + if (cir::ConstStructAttr::verify(emitError, type, typeinfoData).failed()) return failure(); for (auto &member : typeinfoData) { @@ -3454,10 +3409,10 @@ LogicalResult mlir::cir::TypeInfoAttr::verify( return success(); } -LogicalResult mlir::cir::VTableAttr::verify( +LogicalResult cir::VTableAttr::verify( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::Type type, ::mlir::ArrayAttr vtableData) { - auto sTy = mlir::dyn_cast_if_present(type); + auto sTy = mlir::dyn_cast_if_present(type); if (!sTy) { emitError() << "expected !cir.struct type result"; return failure(); @@ -3469,16 +3424,14 @@ LogicalResult mlir::cir::VTableAttr::verify( for (size_t i = 0; i < sTy.getMembers().size(); ++i) { - auto arrayTy = mlir::dyn_cast(sTy.getMembers()[i]); - auto constArrayAttr = - mlir::dyn_cast(vtableData[i]); + auto arrayTy = mlir::dyn_cast(sTy.getMembers()[i]); + auto constArrayAttr = mlir::dyn_cast(vtableData[i]); if (!arrayTy || !constArrayAttr) { emitError() << "expected struct type with one array element"; return failure(); } - if (mlir::cir::ConstStructAttr::verify(emitError, type, vtableData) - .failed()) + if (cir::ConstStructAttr::verify(emitError, type, vtableData).failed()) return failure(); LogicalResult eltTypeCheck = success(); @@ -3505,7 +3458,7 @@ LogicalResult mlir::cir::VTableAttr::verify( // CopyOp Definitions //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::CopyOp::verify() { +LogicalResult cir::CopyOp::verify() { // A data layout is required for us to know the number of bytes to be copied. if (!getType().getPointee().hasTrait()) @@ -3521,7 +3474,7 @@ LogicalResult mlir::cir::CopyOp::verify() { // GetMemberOp Definitions //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::GetMemberOp::verify() { +LogicalResult cir::GetMemberOp::verify() { const auto recordTy = dyn_cast(getAddrTy().getPointee()); if (!recordTy) @@ -3543,7 +3496,7 @@ LogicalResult mlir::cir::GetMemberOp::verify() { // GetRuntimeMemberOp Definitions //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::GetRuntimeMemberOp::verify() { +LogicalResult cir::GetRuntimeMemberOp::verify() { auto recordTy = cast(cast(getAddr().getType()).getPointee()); auto memberPtrTy = getMember().getType(); @@ -3565,11 +3518,11 @@ LogicalResult mlir::cir::GetRuntimeMemberOp::verify() { // GetMethodOp Definitions //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::GetMethodOp::verify() { +LogicalResult cir::GetMethodOp::verify() { auto methodTy = getMethod().getType(); // Assume objectTy is !cir.ptr - auto objectPtrTy = mlir::cast(getObject().getType()); + auto objectPtrTy = mlir::cast(getObject().getType()); auto objectTy = objectPtrTy.getPointee(); if (methodTy.getClsTy() != objectTy) { @@ -3578,8 +3531,8 @@ LogicalResult mlir::cir::GetMethodOp::verify() { } // Assume methodFuncTy is !cir.func - auto calleePtrTy = mlir::cast(getCallee().getType()); - auto calleeTy = mlir::cast(calleePtrTy.getPointee()); + auto calleePtrTy = mlir::cast(getCallee().getType()); + auto calleeTy = mlir::cast(calleePtrTy.getPointee()); auto methodFuncTy = methodTy.getMemberFuncTy(); // We verify at here that calleeTy is !cir.func, !Args)> @@ -3600,10 +3553,9 @@ LogicalResult mlir::cir::GetMethodOp::verify() { return mlir::failure(); } - auto calleeThisArgPtrTy = - mlir::dyn_cast(calleeArgsTy[0]); + auto calleeThisArgPtrTy = mlir::dyn_cast(calleeArgsTy[0]); if (!calleeThisArgPtrTy || - !mlir::isa(calleeThisArgPtrTy.getPointee())) { + !mlir::isa(calleeThisArgPtrTy.getPointee())) { emitError() << "the first parameter of callee must be a void pointer"; return mlir::failure(); } @@ -3620,7 +3572,7 @@ LogicalResult mlir::cir::GetMethodOp::verify() { // InlineAsmOp Definitions //===----------------------------------------------------------------------===// -void mlir::cir::InlineAsmOp::print(OpAsmPrinter &p) { +void cir::InlineAsmOp::print(OpAsmPrinter &p) { p << '(' << getAsmFlavor() << ", "; p.increaseIndent(); p.printNewline(); @@ -3669,8 +3621,8 @@ void mlir::cir::InlineAsmOp::print(OpAsmPrinter &p) { p << " -> " << v.getType(); } -ParseResult mlir::cir::InlineAsmOp::parse(OpAsmParser &parser, - OperationState &result) { +ParseResult cir::InlineAsmOp::parse(OpAsmParser &parser, + OperationState &result) { llvm::SmallVector operand_attrs; llvm::SmallVector operandsGroupSizes; std::string asm_string, constraints; @@ -3800,35 +3752,35 @@ ParseResult mlir::cir::InlineAsmOp::parse(OpAsmParser &parser, // Atomic Definitions //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::AtomicFetch::verify() { - if (getBinop() == mlir::cir::AtomicFetchKind::Add || - getBinop() == mlir::cir::AtomicFetchKind::Sub) +LogicalResult cir::AtomicFetch::verify() { + if (getBinop() == cir::AtomicFetchKind::Add || + getBinop() == cir::AtomicFetchKind::Sub) return mlir::success(); - if (!mlir::isa(getVal().getType())) + if (!mlir::isa(getVal().getType())) return emitError() << "only operates on integer values"; return mlir::success(); } -LogicalResult mlir::cir::BinOp::verify() { +LogicalResult cir::BinOp::verify() { bool noWrap = getNoUnsignedWrap() || getNoSignedWrap(); - if (!isa(getType()) && noWrap) + if (!isa(getType()) && noWrap) return emitError() << "only operations on integer values may have nsw/nuw flags"; - bool noWrapOps = getKind() == mlir::cir::BinOpKind::Add || - getKind() == mlir::cir::BinOpKind::Sub || - getKind() == mlir::cir::BinOpKind::Mul; + bool noWrapOps = getKind() == cir::BinOpKind::Add || + getKind() == cir::BinOpKind::Sub || + getKind() == cir::BinOpKind::Mul; if (noWrap && !noWrapOps) return emitError() << "The nsw/nuw flags are applicable to opcodes: 'add', " "'sub' and 'mul'"; - bool complexOps = getKind() == mlir::cir::BinOpKind::Add || - getKind() == mlir::cir::BinOpKind::Sub; - if (isa(getType()) && !complexOps) + bool complexOps = + getKind() == cir::BinOpKind::Add || getKind() == cir::BinOpKind::Sub; + if (isa(getType()) && !complexOps) return emitError() << "cir.binop can only represent 'add' and 'sub' on complex numbers"; @@ -3838,11 +3790,11 @@ LogicalResult mlir::cir::BinOp::verify() { //===----------------------------------------------------------------------===// // ShiftOp Definitions //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::ShiftOp::verify() { +LogicalResult cir::ShiftOp::verify() { mlir::Operation *op = getOperation(); mlir::Type resType = getResult().getType(); - bool isOp0Vec = mlir::isa(op->getOperand(0).getType()); - bool isOp1Vec = mlir::isa(op->getOperand(1).getType()); + bool isOp0Vec = mlir::isa(op->getOperand(0).getType()); + bool isOp1Vec = mlir::isa(op->getOperand(1).getType()); if (isOp0Vec != isOp1Vec) return emitOpError() << "input types cannot be one vector and one scalar"; if (isOp1Vec && op->getOperand(1).getType() != resType) { @@ -3856,7 +3808,7 @@ LogicalResult mlir::cir::ShiftOp::verify() { // LabelOp Definitions //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::LabelOp::verify() { +LogicalResult cir::LabelOp::verify() { auto *op = getOperation(); auto *blk = op->getBlock(); if (&blk->front() != op) @@ -3869,7 +3821,7 @@ LogicalResult mlir::cir::LabelOp::verify() { //===----------------------------------------------------------------------===// LogicalResult -mlir::cir::EhTypeIdOp::verifySymbolUses(SymbolTableCollection &symbolTable) { +cir::EhTypeIdOp::verifySymbolUses(SymbolTableCollection &symbolTable) { auto op = symbolTable.lookupNearestSymbolFrom(*this, getTypeSymAttr()); if (!isa(op)) return emitOpError("'") @@ -3881,14 +3833,14 @@ mlir::cir::EhTypeIdOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // CatchParamOp //===----------------------------------------------------------------------===// -LogicalResult mlir::cir::CatchParamOp::verify() { +LogicalResult cir::CatchParamOp::verify() { if (getExceptionPtr()) { auto kind = getKind(); - if (!kind || *kind != mlir::cir::CatchParamKind::begin) + if (!kind || *kind != cir::CatchParamKind::begin) return emitOpError("needs 'begin' to work with exception pointer"); return success(); } - if (!getKind() && !(*this)->getParentOfType()) + if (!getKind() && !(*this)->getParentOfType()) return emitOpError("without 'kind' requires 'cir.try' surrounding scope"); return success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp index d49a4613ec86..e75b30b1c1c3 100644 --- a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp @@ -150,8 +150,7 @@ DeletionKind cir::CopyOp::removeBlockingUses( const DataLayout &dataLayout) { if (loadsFrom(slot)) builder.create(getLoc(), reachingDefinition, getDst(), false, - mlir::IntegerAttr{}, - mlir::cir::MemOrderAttr()); + mlir::IntegerAttr{}, cir::MemOrderAttr()); return DeletionKind::Delete; } diff --git a/clang/lib/CIR/Dialect/IR/CIROpenCLAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIROpenCLAttrs.cpp index e16aad6d6867..a31f210addbd 100644 --- a/clang/lib/CIR/Dialect/IR/CIROpenCLAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIROpenCLAttrs.cpp @@ -22,7 +22,7 @@ #include "llvm/ADT/TypeSwitch.h" using namespace mlir; -using namespace mlir::cir; +using namespace cir; //===----------------------------------------------------------------------===// // OpenCLKernelMetadataAttr definitions diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index ae50f79fff30..e579fe4c2f0c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -60,7 +60,7 @@ static void printPointerAddrSpace(mlir::AsmPrinter &p, #include "clang/CIR/Dialect/IR/CIROpsTypes.cpp.inc" using namespace mlir; -using namespace mlir::cir; +using namespace cir; //===----------------------------------------------------------------------===// // General CIR parsing / printing @@ -68,7 +68,7 @@ using namespace mlir::cir; Type CIRDialect::parseType(DialectAsmParser &parser) const { llvm::SMLoc typeLoc = parser.getCurrentLocation(); - StringRef mnemonic; + llvm::StringRef mnemonic; Type genType; // Try to parse as a tablegen'd type. @@ -118,7 +118,7 @@ void BoolType::print(mlir::AsmPrinter &printer) const {} Type StructType::getLargestMember(const ::mlir::DataLayout &dataLayout) const { if (!layoutInfo) computeSizeAndAlignment(dataLayout); - return mlir::cast(layoutInfo).getLargestMember(); + return mlir::cast(layoutInfo).getLargestMember(); } Type StructType::parse(mlir::AsmParser &parser) { @@ -186,7 +186,7 @@ Type StructType::parse(mlir::AsmParser &parser) { // Parse optional AST attribute. This is just a formality for now, since CIR // cannot yet read serialized AST. - mlir::cir::ASTRecordDeclAttr ast = nullptr; + cir::ASTRecordDeclAttr ast = nullptr; parser.parseOptionalAttribute(ast); if (parser.parseGreater()) @@ -264,8 +264,7 @@ void StructType::print(mlir::AsmPrinter &printer) const { mlir::LogicalResult StructType::verifyInvariants( llvm::function_ref emitError, llvm::ArrayRef members, mlir::StringAttr name, bool incomplete, - bool packed, mlir::cir::StructType::RecordKind kind, - ASTRecordDeclInterface ast) { + bool packed, cir::StructType::RecordKind kind, ASTRecordDeclInterface ast) { if (name && name.getValue().empty()) { emitError() << "identified structs cannot have an empty name"; return mlir::failure(); @@ -331,7 +330,7 @@ bool StructType::getIncomplete() const { return getImpl()->incomplete; } bool StructType::getPacked() const { return getImpl()->packed; } -mlir::cir::StructType::RecordKind StructType::getKind() const { +cir::StructType::RecordKind StructType::getKind() const { return getImpl()->kind; } @@ -438,20 +437,20 @@ ArrayType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, return dataLayout.getTypePreferredAlignment(getEltType()); } -llvm::TypeSize mlir::cir::VectorType::getTypeSizeInBits( +llvm::TypeSize cir::VectorType::getTypeSizeInBits( const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { return llvm::TypeSize::getFixed(getSize() * dataLayout.getTypeSizeInBits(getEltType())); } -uint64_t mlir::cir::VectorType::getABIAlignment( - const ::mlir::DataLayout &dataLayout, - ::mlir::DataLayoutEntryListRef params) const { +uint64_t +cir::VectorType::getABIAlignment(const ::mlir::DataLayout &dataLayout, + ::mlir::DataLayoutEntryListRef params) const { return llvm::NextPowerOf2(dataLayout.getTypeSizeInBits(*this)); } -uint64_t mlir::cir::VectorType::getPreferredAlignment( +uint64_t cir::VectorType::getPreferredAlignment( const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { return llvm::NextPowerOf2(dataLayout.getTypeSizeInBits(*this)); @@ -463,7 +462,7 @@ StructType::getTypeSizeInBits(const ::mlir::DataLayout &dataLayout, if (!layoutInfo) computeSizeAndAlignment(dataLayout); return llvm::TypeSize::getFixed( - mlir::cast(layoutInfo).getSize() * 8); + mlir::cast(layoutInfo).getSize() * 8); } uint64_t @@ -471,7 +470,7 @@ StructType::getABIAlignment(const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { if (!layoutInfo) computeSizeAndAlignment(dataLayout); - return mlir::cast(layoutInfo).getAlignment(); + return mlir::cast(layoutInfo).getAlignment(); } uint64_t @@ -483,7 +482,7 @@ StructType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, bool StructType::isPadded(const ::mlir::DataLayout &dataLayout) const { if (!layoutInfo) computeSizeAndAlignment(dataLayout); - return mlir::cast(layoutInfo).getPadded(); + return mlir::cast(layoutInfo).getPadded(); } uint64_t StructType::getElementOffset(const ::mlir::DataLayout &dataLayout, @@ -491,8 +490,7 @@ uint64_t StructType::getElementOffset(const ::mlir::DataLayout &dataLayout, assert(idx < getMembers().size() && "access not valid"); if (!layoutInfo) computeSizeAndAlignment(dataLayout); - auto offsets = - mlir::cast(layoutInfo).getOffsets(); + auto offsets = mlir::cast(layoutInfo).getOffsets(); auto intAttr = mlir::cast(offsets[idx]); return intAttr.getInt(); } @@ -512,7 +510,7 @@ void StructType::computeSizeAndAlignment( auto members = getMembers(); mlir::Type largestMember; unsigned largestMemberSize = 0; - SmallVector memberOffsets; + llvm::SmallVector memberOffsets; // Loop over each of the elements, placing them in memory. memberOffsets.reserve(numElements); @@ -567,9 +565,9 @@ void StructType::computeSizeAndAlignment( } auto offsets = mlir::ArrayAttr::get(getContext(), memberOffsets); - layoutInfo = mlir::cir::StructLayoutAttr::get( - getContext(), structSize, structAlignment.value(), isPadded, - largestMember, offsets); + layoutInfo = cir::StructLayoutAttr::get(getContext(), structSize, + structAlignment.value(), isPadded, + largestMember, offsets); } //===----------------------------------------------------------------------===// @@ -785,7 +783,7 @@ FP128Type::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, } const llvm::fltSemantics &LongDoubleType::getFloatSemantics() const { - return mlir::cast(getUnderlying()) + return mlir::cast(getUnderlying()) .getFloatSemantics(); } @@ -825,20 +823,20 @@ LongDoubleType::verify(function_ref emitError, // Floating-point type helpers //===----------------------------------------------------------------------===// -bool mlir::cir::isAnyFloatingPointType(mlir::Type t) { - return isa(t); +bool cir::isAnyFloatingPointType(mlir::Type t) { + return isa(t); } //===----------------------------------------------------------------------===// // Floating-point and Float-point Vecotr type helpers //===----------------------------------------------------------------------===// -bool mlir::cir::isFPOrFPVectorTy(mlir::Type t) { +bool cir::isFPOrFPVectorTy(mlir::Type t) { - if (isa(t)) { + if (isa(t)) { return isAnyFloatingPointType( - mlir::dyn_cast(t).getEltType()); + mlir::dyn_cast(t).getEltType()); } return isAnyFloatingPointType(t); } @@ -847,11 +845,10 @@ bool mlir::cir::isFPOrFPVectorTy(mlir::Type t) { // ComplexType Definitions //===----------------------------------------------------------------------===// -mlir::LogicalResult mlir::cir::ComplexType::verify( +mlir::LogicalResult cir::ComplexType::verify( llvm::function_ref emitError, mlir::Type elementTy) { - if (!mlir::isa( - elementTy)) { + if (!mlir::isa(elementTy)) { emitError() << "element type of !cir.complex must be either a " "floating-point type or an integer type"; return failure(); @@ -860,9 +857,9 @@ mlir::LogicalResult mlir::cir::ComplexType::verify( return success(); } -llvm::TypeSize mlir::cir::ComplexType::getTypeSizeInBits( - const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { +llvm::TypeSize +cir::ComplexType::getTypeSizeInBits(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { // C17 6.2.5p13: // Each complex type has the same representation and alignment requirements // as an array type containing exactly two elements of the corresponding @@ -872,9 +869,9 @@ llvm::TypeSize mlir::cir::ComplexType::getTypeSizeInBits( return dataLayout.getTypeSizeInBits(elementTy) * 2; } -uint64_t mlir::cir::ComplexType::getABIAlignment( - const mlir::DataLayout &dataLayout, - mlir::DataLayoutEntryListRef params) const { +uint64_t +cir::ComplexType::getABIAlignment(const mlir::DataLayout &dataLayout, + mlir::DataLayoutEntryListRef params) const { // C17 6.2.5p13: // Each complex type has the same representation and alignment requirements // as an array type containing exactly two elements of the corresponding @@ -884,7 +881,7 @@ uint64_t mlir::cir::ComplexType::getABIAlignment( return dataLayout.getTypeABIAlignment(elementTy); } -uint64_t mlir::cir::ComplexType::getPreferredAlignment( +uint64_t cir::ComplexType::getPreferredAlignment( const ::mlir::DataLayout &dataLayout, ::mlir::DataLayoutEntryListRef params) const { // C17 6.2.5p13: @@ -964,10 +961,10 @@ static mlir::Type getMethodLayoutType(mlir::MLIRContext *ctx) { // following struct: struct { fnptr_t, ptrdiff_t }, where fnptr_t is a // function pointer type. // TODO: consider member function pointer layout in other ABIs - auto voidPtrTy = mlir::cir::PointerType::get(mlir::cir::VoidType::get(ctx)); + auto voidPtrTy = cir::PointerType::get(cir::VoidType::get(ctx)); mlir::Type fields[2]{voidPtrTy, voidPtrTy}; - return mlir::cir::StructType::get(ctx, fields, /*packed=*/false, - mlir::cir::StructType::Struct); + return cir::StructType::get(ctx, fields, /*packed=*/false, + cir::StructType::Struct); } llvm::TypeSize @@ -995,7 +992,7 @@ MethodType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, mlir::LogicalResult PointerType::verify(llvm::function_ref emitError, mlir::Type pointee, mlir::Attribute addrSpace) { - if (addrSpace && !mlir::isa(addrSpace)) { + if (addrSpace && !mlir::isa(addrSpace)) { emitError() << "unexpected addrspace attribute type"; return mlir::failure(); } @@ -1004,7 +1001,7 @@ PointerType::verify(llvm::function_ref emitError, mlir::ParseResult parseAddrSpaceAttribute(mlir::AsmParser &p, mlir::Attribute &addrSpaceAttr) { - using mlir::cir::AddressSpaceAttr; + using cir::AddressSpaceAttr; auto attrLoc = p.getCurrentLocation(); llvm::StringRef addrSpaceKind; @@ -1037,7 +1034,7 @@ mlir::ParseResult parseAddrSpaceAttribute(mlir::AsmParser &p, void printAddrSpaceAttribute(mlir::AsmPrinter &p, mlir::Attribute rawAddrSpaceAttr) { - using mlir::cir::AddressSpaceAttr; + using cir::AddressSpaceAttr; auto addrSpaceAttr = mlir::cast(rawAddrSpaceAttr); if (addrSpaceAttr.isTarget()) { p << AddressSpaceAttr::kTargetKeyword << "<" diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp index b5096722f42d..316a39b762e6 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp @@ -41,7 +41,7 @@ struct RemoveRedundantBranches : public OpRewritePattern { Block *block = op.getOperation()->getBlock(); Block *dest = op.getDest(); - if (isa(dest->front())) + if (isa(dest->front())) return failure(); // Single edge between blocks: merge it. @@ -167,7 +167,7 @@ void CIRCanonicalizePass::runOnOperation() { populateCIRCanonicalizePatterns(patterns); // Collect operations to apply patterns. - SmallVector ops; + llvm::SmallVector ops; getOperation()->walk([&](Operation *op) { // CastOp here is to perform a manual `fold` in // applyOpPatternsAndFold diff --git a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp index 225da527b736..4cc0021ee287 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp @@ -60,10 +60,10 @@ struct SimplifyTernary final : public OpRewritePattern { !isSimpleTernaryBranch(op.getFalseRegion())) return mlir::failure(); - mlir::cir::YieldOp trueBranchYieldOp = mlir::cast( - op.getTrueRegion().front().getTerminator()); - mlir::cir::YieldOp falseBranchYieldOp = mlir::cast( - op.getFalseRegion().front().getTerminator()); + cir::YieldOp trueBranchYieldOp = + mlir::cast(op.getTrueRegion().front().getTerminator()); + cir::YieldOp falseBranchYieldOp = + mlir::cast(op.getFalseRegion().front().getTerminator()); auto trueValue = trueBranchYieldOp.getArgs()[0]; auto falseValue = falseBranchYieldOp.getArgs()[0]; @@ -71,8 +71,8 @@ struct SimplifyTernary final : public OpRewritePattern { rewriter.inlineBlockBefore(&op.getFalseRegion().front(), op); rewriter.eraseOp(trueBranchYieldOp); rewriter.eraseOp(falseBranchYieldOp); - rewriter.replaceOpWithNewOp(op, op.getCond(), - trueValue, falseValue); + rewriter.replaceOpWithNewOp(op, op.getCond(), trueValue, + falseValue); return mlir::success(); } @@ -96,8 +96,8 @@ struct SimplifyTernary final : public OpRewritePattern { // Check whether the region/block contains a cir.const followed by a // cir.yield that yields the value. - auto yieldOp = mlir::cast(onlyBlock.getTerminator()); - auto yieldValueDefOp = mlir::dyn_cast_if_present( + auto yieldOp = mlir::cast(onlyBlock.getTerminator()); + auto yieldValueDefOp = mlir::dyn_cast_if_present( yieldOp.getArgs()[0].getDefiningOp()); return yieldValueDefOp && yieldValueDefOp->getBlock() == &onlyBlock; } @@ -111,16 +111,15 @@ struct SimplifySelect : public OpRewritePattern { mlir::Operation *trueValueOp = op.getTrueValue().getDefiningOp(); mlir::Operation *falseValueOp = op.getFalseValue().getDefiningOp(); auto trueValueConstOp = - mlir::dyn_cast_if_present(trueValueOp); + mlir::dyn_cast_if_present(trueValueOp); auto falseValueConstOp = - mlir::dyn_cast_if_present(falseValueOp); + mlir::dyn_cast_if_present(falseValueOp); if (!trueValueConstOp || !falseValueConstOp) return mlir::failure(); - auto trueValue = - mlir::dyn_cast(trueValueConstOp.getValue()); + auto trueValue = mlir::dyn_cast(trueValueConstOp.getValue()); auto falseValue = - mlir::dyn_cast(falseValueConstOp.getValue()); + mlir::dyn_cast(falseValueConstOp.getValue()); if (!trueValue || !falseValue) return mlir::failure(); @@ -133,8 +132,8 @@ struct SimplifySelect : public OpRewritePattern { // cir.select if %0 then #false else #true -> cir.unary not %0 if (!trueValue.getValue() && falseValue.getValue()) { - rewriter.replaceOpWithNewOp( - op, mlir::cir::UnaryOpKind::Not, op.getCondition()); + rewriter.replaceOpWithNewOp(op, cir::UnaryOpKind::Not, + op.getCondition()); return mlir::success(); } @@ -167,7 +166,7 @@ void CIRSimplifyPass::runOnOperation() { populateMergeCleanupPatterns(patterns); // Collect operations to apply patterns. - SmallVector ops; + llvm::SmallVector ops; getOperation()->walk([&](Operation *op) { if (isa(op)) ops.push_back(op); diff --git a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp index d9247a834070..e736f76591dd 100644 --- a/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CallConvLowering.cpp @@ -20,12 +20,11 @@ #include "llvm/Support/TimeProfiler.h" -namespace mlir { namespace cir { FuncType getFuncPointerTy(mlir::Type typ) { - if (auto ptr = dyn_cast(typ)) - return dyn_cast(ptr.getPointee()); + if (auto ptr = mlir::dyn_cast(typ)) + return mlir::dyn_cast(ptr.getPointee()); return {}; } @@ -33,7 +32,7 @@ bool isFuncPointerTy(mlir::Type typ) { return (bool)getFuncPointerTy(typ); } struct CallConvLowering { - CallConvLowering(ModuleOp module) + CallConvLowering(mlir::ModuleOp module) : rewriter(module.getContext()), lowerModule(createLowerModule(module, rewriter)) {} @@ -43,13 +42,12 @@ struct CallConvLowering { auto calls = op.getSymbolUses(module); if (calls.has_value()) { for (auto call : calls.value()) { - if (auto g = dyn_cast(call.getUser())) + if (auto g = mlir::dyn_cast(call.getUser())) rewriteGetGlobalOp(g); - else if (auto c = dyn_cast(call.getUser())) + else if (auto c = mlir::dyn_cast(call.getUser())) lowerDirectCallOp(c, op); else { - cir_cconv_assert_or_abort(!::cir::MissingFeatures::ABIFuncPtr(), - "NYI"); + cir_cconv_assert_or_abort(!cir::MissingFeatures::ABIFuncPtr(), "NYI"); } } } @@ -75,7 +73,7 @@ struct CallConvLowering { return t; } - void bitcast(Value src, Type newTy) { + void bitcast(mlir::Value src, mlir::Type newTy) { if (src.getType() != newTy) { auto cast = rewriter.create(src.getLoc(), newTy, CastKind::bitcast, src); @@ -125,18 +123,22 @@ struct CallConvLoweringPass using CallConvLoweringBase::CallConvLoweringBase; void runOnOperation() override; - StringRef getArgument() const override { return "cir-call-conv-lowering"; }; + llvm::StringRef getArgument() const override { + return "cir-call-conv-lowering"; + }; }; void CallConvLoweringPass::runOnOperation() { - auto module = dyn_cast(getOperation()); + auto module = mlir::dyn_cast(getOperation()); CallConvLowering cc(module); module.walk([&](FuncOp op) { cc.lower(op); }); } } // namespace cir -std::unique_ptr createCallConvLoweringPass() { +namespace mlir { + +std::unique_ptr createCallConvLoweringPass() { return std::make_unique(); } diff --git a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp index b8745cdf0c2f..716412c0f6d8 100644 --- a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp +++ b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp @@ -33,7 +33,7 @@ void DropASTPass::runOnOperation() { op->walk([&](Operation *op) { if (auto alloca = dyn_cast(op)) { alloca.removeAstAttr(); - auto ty = mlir::dyn_cast(alloca.getAllocaType()); + auto ty = mlir::dyn_cast(alloca.getAllocaType()); if (!ty) return; ty.dropAst(); diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index bebb6c6770de..d0ea2ec985d2 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -20,7 +20,7 @@ #include "clang/CIR/Dialect/Passes.h" using namespace mlir; -using namespace mlir::cir; +using namespace cir; namespace { @@ -30,7 +30,7 @@ void lowerTerminator(mlir::Operation *op, mlir::Block *dest, assert(op->hasTrait() && "not a terminator"); mlir::OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint(op); - rewriter.replaceOpWithNewOp(op, dest); + rewriter.replaceOpWithNewOp(op, dest); } /// Walks a region while skipping operations of type `Ops`. This ensures the @@ -56,7 +56,7 @@ struct CIRIfFlattening : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::IfOp ifOp, + matchAndRewrite(cir::IfOp ifOp, mlir::PatternRewriter &rewriter) const override { mlir::OpBuilder::InsertionGuard guard(rewriter); auto loc = ifOp.getLoc(); @@ -78,9 +78,9 @@ struct CIRIfFlattening : public OpRewritePattern { rewriter.setInsertionPointToEnd(thenAfterBody); if (auto thenYieldOp = - dyn_cast(thenAfterBody->getTerminator())) { - rewriter.replaceOpWithNewOp( - thenYieldOp, thenYieldOp.getArgs(), continueBlock); + dyn_cast(thenAfterBody->getTerminator())) { + rewriter.replaceOpWithNewOp(thenYieldOp, thenYieldOp.getArgs(), + continueBlock); } rewriter.setInsertionPointToEnd(continueBlock); @@ -97,14 +97,14 @@ struct CIRIfFlattening : public OpRewritePattern { } rewriter.setInsertionPointToEnd(currentBlock); - rewriter.create(loc, ifOp.getCondition(), - thenBeforeBody, elseBeforeBody); + rewriter.create(loc, ifOp.getCondition(), thenBeforeBody, + elseBeforeBody); if (!emptyElse) { rewriter.setInsertionPointToEnd(elseAfterBody); if (auto elseYieldOp = - dyn_cast(elseAfterBody->getTerminator())) { - rewriter.replaceOpWithNewOp( + dyn_cast(elseAfterBody->getTerminator())) { + rewriter.replaceOpWithNewOp( elseYieldOp, elseYieldOp.getArgs(), continueBlock); } } @@ -114,12 +114,12 @@ struct CIRIfFlattening : public OpRewritePattern { } }; -class CIRScopeOpFlattening : public mlir::OpRewritePattern { +class CIRScopeOpFlattening : public mlir::OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using OpRewritePattern::OpRewritePattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ScopeOp scopeOp, + matchAndRewrite(cir::ScopeOp scopeOp, mlir::PatternRewriter &rewriter) const override { mlir::OpBuilder::InsertionGuard guard(rewriter); auto loc = scopeOp.getLoc(); @@ -152,15 +152,14 @@ class CIRScopeOpFlattening : public mlir::OpRewritePattern { // auto stackSaveOp = rewriter.create( // loc, mlir::LLVM::LLVMPointerType::get( // mlir::IntegerType::get(scopeOp.getContext(), 8))); - rewriter.create(loc, mlir::ValueRange(), beforeBody); + rewriter.create(loc, mlir::ValueRange(), beforeBody); // Replace the scopeop return with a branch that jumps out of the body. // Stack restore before leaving the body region. rewriter.setInsertionPointToEnd(afterBody); - if (auto yieldOp = - dyn_cast(afterBody->getTerminator())) { - rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getArgs(), - continueBlock); + if (auto yieldOp = dyn_cast(afterBody->getTerminator())) { + rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getArgs(), + continueBlock); } // TODO(cir): stackrestore? @@ -172,9 +171,9 @@ class CIRScopeOpFlattening : public mlir::OpRewritePattern { } }; -class CIRTryOpFlattening : public mlir::OpRewritePattern { +class CIRTryOpFlattening : public mlir::OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using OpRewritePattern::OpRewritePattern; mlir::Block *buildTypeCase(mlir::PatternRewriter &rewriter, mlir::Region &r, mlir::Block *afterTry, @@ -207,19 +206,19 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { mlir::Value exceptionPtr = entryBlock->addArgument(exceptionPtrTy, paramOp.getLoc()); - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( paramOp, catchType, exceptionPtr, - mlir::cir::CatchParamKindAttr::get(rewriter.getContext(), - mlir::cir::CatchParamKind::begin)); + cir::CatchParamKindAttr::get(rewriter.getContext(), + cir::CatchParamKind::begin)); rewriter.setInsertionPoint(yieldOp); - rewriter.create( + rewriter.create( catchLoc, mlir::Type{}, nullptr, - mlir::cir::CatchParamKindAttr::get(rewriter.getContext(), - mlir::cir::CatchParamKind::end)); + cir::CatchParamKindAttr::get(rewriter.getContext(), + cir::CatchParamKind::end)); rewriter.setInsertionPointToEnd(yieldOp->getBlock()); - rewriter.replaceOpWithNewOp(yieldOp, afterTry); + rewriter.replaceOpWithNewOp(yieldOp, afterTry); return entryBlock; } @@ -227,10 +226,10 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { mlir::Block *unwindBlock) const { assert(&r.front() == &r.back() && "only one block expected"); rewriter.mergeBlocks(&r.back(), unwindBlock); - auto resume = dyn_cast(unwindBlock->getTerminator()); + auto resume = dyn_cast(unwindBlock->getTerminator()); assert(resume && "expected 'cir.resume'"); rewriter.setInsertionPointToEnd(unwindBlock); - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( resume, unwindBlock->getArgument(0), unwindBlock->getArgument(1)); } @@ -262,27 +261,27 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { rewriter.setInsertionPointAfterValue(catchResult); auto catchType = catchResult.getType(); mlir::Location catchLoc = paramOp.getLoc(); - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( paramOp, catchType, exceptionPtr, - mlir::cir::CatchParamKindAttr::get(rewriter.getContext(), - mlir::cir::CatchParamKind::begin)); + cir::CatchParamKindAttr::get(rewriter.getContext(), + cir::CatchParamKind::begin)); rewriter.setInsertionPoint(yieldOp); - rewriter.create( + rewriter.create( catchLoc, mlir::Type{}, nullptr, - mlir::cir::CatchParamKindAttr::get(rewriter.getContext(), - mlir::cir::CatchParamKind::end)); + cir::CatchParamKindAttr::get(rewriter.getContext(), + cir::CatchParamKind::end)); rewriter.setInsertionPointToEnd(yieldOp->getBlock()); - rewriter.replaceOpWithNewOp(yieldOp, afterTry); + rewriter.replaceOpWithNewOp(yieldOp, afterTry); } - mlir::ArrayAttr collectTypeSymbols(mlir::cir::TryOp tryOp) const { + mlir::ArrayAttr collectTypeSymbols(cir::TryOp tryOp) const { mlir::ArrayAttr caseAttrList = tryOp.getCatchTypesAttr(); llvm::SmallVector symbolList; for (mlir::Attribute caseAttr : caseAttrList) { - auto typeIdGlobal = dyn_cast(caseAttr); + auto typeIdGlobal = dyn_cast(caseAttr); if (!typeIdGlobal) continue; symbolList.push_back(typeIdGlobal.getSymbol()); @@ -294,16 +293,16 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { return mlir::ArrayAttr::get(caseAttrList.getContext(), symbolList); } - void buildLandingPad(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, + void buildLandingPad(cir::TryOp tryOp, mlir::PatternRewriter &rewriter, mlir::Block *beforeCatch, mlir::Block *landingPadBlock, mlir::Block *catchDispatcher, - SmallVectorImpl &callsToRewrite, + SmallVectorImpl &callsToRewrite, unsigned callIdx, bool tryOnlyHasCatchAll, mlir::Type exceptionPtrType, mlir::Type typeIdType) const { rewriter.setInsertionPointToEnd(landingPadBlock); mlir::ArrayAttr symlist = collectTypeSymbols(tryOp); - auto inflightEh = rewriter.create( + auto inflightEh = rewriter.create( tryOp.getLoc(), exceptionPtrType, typeIdType, tryOp.getCleanup() ? mlir::UnitAttr::get(tryOp.getContext()) : nullptr, symlist); @@ -311,11 +310,10 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto exceptionPtr = inflightEh.getExceptionPtr(); // Time to emit cleanup's. - mlir::cir::CallOp callOp = callsToRewrite[callIdx]; + cir::CallOp callOp = callsToRewrite[callIdx]; if (!callOp.getCleanup().empty()) { mlir::Block *cleanupBlock = &callOp.getCleanup().getBlocks().back(); - auto cleanupYield = - cast(cleanupBlock->getTerminator()); + auto cleanupYield = cast(cleanupBlock->getTerminator()); cleanupYield->erase(); rewriter.mergeBlocks(cleanupBlock, landingPadBlock); rewriter.setInsertionPointToEnd(landingPadBlock); @@ -324,23 +322,23 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Branch out to the catch clauses dispatcher. assert(catchDispatcher->getNumArguments() >= 1 && "expected at least one argument in place"); - SmallVector dispatcherInitOps = {exceptionPtr}; + llvm::SmallVector dispatcherInitOps = {exceptionPtr}; if (!tryOnlyHasCatchAll) { assert(catchDispatcher->getNumArguments() == 2 && "expected two arguments in place"); dispatcherInitOps.push_back(selector); } - rewriter.create(tryOp.getLoc(), catchDispatcher, - dispatcherInitOps); + rewriter.create(tryOp.getLoc(), catchDispatcher, + dispatcherInitOps); return; } - mlir::Block * - buildLandingPads(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, - mlir::Block *beforeCatch, mlir::Block *afterTry, - SmallVectorImpl &callsToRewrite, - SmallVectorImpl &landingPads, - bool tryOnlyHasCatchAll) const { + mlir::Block *buildLandingPads(cir::TryOp tryOp, + mlir::PatternRewriter &rewriter, + mlir::Block *beforeCatch, mlir::Block *afterTry, + SmallVectorImpl &callsToRewrite, + SmallVectorImpl &landingPads, + bool tryOnlyHasCatchAll) const { unsigned numCalls = callsToRewrite.size(); // Create the first landing pad block and a placeholder for the initial // catch dispatcher (which will be the common destination for every new @@ -351,9 +349,9 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // For the dispatcher, already add the block arguments and prepare the // proper types the landing pad should use to jump to. mlir::Block *dispatcher = rewriter.createBlock(afterTry); - auto exceptionPtrType = mlir::cir::PointerType::get( - mlir::cir::VoidType::get(rewriter.getContext())); - auto typeIdType = mlir::cir::IntType::get(getContext(), 32, false); + auto exceptionPtrType = + cir::PointerType::get(cir::VoidType::get(rewriter.getContext())); + auto typeIdType = cir::IntType::get(getContext(), 32, false); dispatcher->addArgument(exceptionPtrType, tryOp.getLoc()); if (!tryOnlyHasCatchAll) dispatcher->addArgument(typeIdType, tryOp.getLoc()); @@ -370,25 +368,24 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { return dispatcher; } - mlir::Block *buildCatch(mlir::cir::TryOp tryOp, - mlir::PatternRewriter &rewriter, + mlir::Block *buildCatch(cir::TryOp tryOp, mlir::PatternRewriter &rewriter, mlir::Block *afterTry, mlir::Block *dispatcher, - SmallVectorImpl &callsToRewrite, + SmallVectorImpl &callsToRewrite, mlir::Attribute catchAttr, mlir::Attribute nextCatchAttr, mlir::Region &catchRegion) const { mlir::Location loc = tryOp.getLoc(); mlir::Block *nextDispatcher = nullptr; - if (auto typeIdGlobal = dyn_cast(catchAttr)) { + if (auto typeIdGlobal = dyn_cast(catchAttr)) { auto *previousDispatcher = dispatcher; auto typeId = - rewriter.create(loc, typeIdGlobal.getSymbol()); + rewriter.create(loc, typeIdGlobal.getSymbol()); auto ehPtr = previousDispatcher->getArgument(0); auto ehSel = previousDispatcher->getArgument(1); - auto match = rewriter.create( - loc, mlir::cir::BoolType::get(rewriter.getContext()), - mlir::cir::CmpOpKind::eq, ehSel, typeId); + auto match = rewriter.create( + loc, cir::BoolType::get(rewriter.getContext()), cir::CmpOpKind::eq, + ehSel, typeId); mlir::Block *typeCatchBlock = buildTypeCase(rewriter, catchRegion, afterTry, ehPtr.getType()); @@ -398,17 +395,16 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Next dispatcher gets by default both exception ptr and selector info, // but on a catch all we don't need selector info. nextDispatcher->addArgument(ehPtr.getType(), loc); - SmallVector nextDispatchOps = {ehPtr}; - if (!isa(nextCatchAttr)) { + llvm::SmallVector nextDispatchOps = {ehPtr}; + if (!isa(nextCatchAttr)) { nextDispatcher->addArgument(ehSel.getType(), loc); nextDispatchOps.push_back(ehSel); } - rewriter.create( - loc, match, typeCatchBlock, nextDispatcher, mlir::ValueRange{ehPtr}, - nextDispatchOps); + rewriter.create(loc, match, typeCatchBlock, nextDispatcher, + mlir::ValueRange{ehPtr}, nextDispatchOps); rewriter.setInsertionPointToEnd(nextDispatcher); - } else if (auto catchAll = dyn_cast(catchAttr)) { + } else if (auto catchAll = dyn_cast(catchAttr)) { // In case the catch(...) is all we got, `dispatcher` shall be // non-empty. assert(dispatcher->getArguments().size() == 1 && @@ -416,8 +412,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { auto ehPtr = dispatcher->getArgument(0); buildAllCase(rewriter, catchRegion, afterTry, dispatcher, ehPtr); // Do not update `nextDispatcher`, no more business in try/catch - } else if (auto catchUnwind = - dyn_cast(catchAttr)) { + } else if (auto catchUnwind = dyn_cast(catchAttr)) { assert(dispatcher->getArguments().size() == 2 && "expected two block argument"); buildUnwindCase(rewriter, catchRegion, dispatcher); @@ -426,17 +421,17 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { return nextDispatcher; } - void buildCatchers(mlir::cir::TryOp tryOp, mlir::PatternRewriter &rewriter, + void buildCatchers(cir::TryOp tryOp, mlir::PatternRewriter &rewriter, mlir::Block *afterBody, mlir::Block *afterTry, - SmallVectorImpl &callsToRewrite, + SmallVectorImpl &callsToRewrite, SmallVectorImpl &landingPads) const { // Replace the tryOp return with a branch that jumps out of the body. rewriter.setInsertionPointToEnd(afterBody); - auto tryBodyYield = cast(afterBody->getTerminator()); + auto tryBodyYield = cast(afterBody->getTerminator()); mlir::Block *beforeCatch = rewriter.getInsertionBlock(); rewriter.setInsertionPointToEnd(beforeCatch); - rewriter.replaceOpWithNewOp(tryBodyYield, afterTry); + rewriter.replaceOpWithNewOp(tryBodyYield, afterTry); // Start the landing pad by getting the inflight exception information. mlir::Block *nextDispatcher = @@ -463,7 +458,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { assert(!nextDispatcher && "last dispatch expected to be nullptr"); } - mlir::Block *buildTryBody(mlir::cir::TryOp tryOp, + mlir::Block *buildTryBody(cir::TryOp tryOp, mlir::PatternRewriter &rewriter) const { auto loc = tryOp.getLoc(); // Split the current block before the TryOp to create the inlining @@ -478,12 +473,12 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Branch into the body of the region. rewriter.setInsertionPointToEnd(beforeTryScopeBlock); - rewriter.create(loc, mlir::ValueRange(), beforeBody); + rewriter.create(loc, mlir::ValueRange(), beforeBody); return afterTry; } mlir::LogicalResult - matchAndRewrite(mlir::cir::TryOp tryOp, + matchAndRewrite(cir::TryOp tryOp, mlir::PatternRewriter &rewriter) const override { mlir::OpBuilder::InsertionGuard guard(rewriter); auto *afterBody = &tryOp.getTryRegion().back(); @@ -496,10 +491,10 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { // Grab the collection of `cir.call exception`s to rewrite to // `cir.try_call`. - SmallVector callsToRewrite; + llvm::SmallVector callsToRewrite; tryOp.getTryRegion().walk([&](CallOp op) { // Only grab calls within immediate closest TryOp scope. - if (op->getParentOfType() != tryOp) + if (op->getParentOfType() != tryOp) return; if (!op.getException()) return; @@ -510,7 +505,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { mlir::Block *afterTry = buildTryBody(tryOp, rewriter); // Build catchers. - SmallVector landingPads; + llvm::SmallVector landingPads; buildCatchers(tryOp, rewriter, afterBody, afterTry, callsToRewrite, landingPads); rewriter.eraseOp(tryOp); @@ -523,8 +518,8 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { mlir::Block *callBlock = callOp->getBlock(); mlir::Block *cont = rewriter.splitBlock(callBlock, mlir::Block::iterator(callOp)); - mlir::cir::ExtraFuncAttributesAttr extraAttrs = callOp.getExtraAttrs(); - std::optional ast = callOp.getAst(); + cir::ExtraFuncAttributesAttr extraAttrs = callOp.getExtraAttrs(); + std::optional ast = callOp.getAst(); mlir::FlatSymbolRefAttr symbol; if (!callOp.isIndirect()) @@ -533,7 +528,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { mlir::Type resTy = nullptr; if (callOp.getNumResults() > 0) resTy = callOp.getResult().getType(); - auto tryCall = rewriter.replaceOpWithNewOp( + auto tryCall = rewriter.replaceOpWithNewOp( callOp, symbol, resTy, cont, landingPads[callIdx], callOp.getOperands()); tryCall.setExtraAttrsAttr(extraAttrs); @@ -543,7 +538,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { } // Quick block cleanup: no indirection to the post try block. - auto brOp = dyn_cast(afterTry->getTerminator()); + auto brOp = dyn_cast(afterTry->getTerminator()); if (brOp && brOp.getDest()->hasNoPredecessors()) { mlir::Block *srcBlock = brOp.getDest(); rewriter.eraseOp(brOp); @@ -554,22 +549,22 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { }; class CIRLoopOpInterfaceFlattening - : public mlir::OpInterfaceRewritePattern { + : public mlir::OpInterfaceRewritePattern { public: using mlir::OpInterfaceRewritePattern< - mlir::cir::LoopOpInterface>::OpInterfaceRewritePattern; + cir::LoopOpInterface>::OpInterfaceRewritePattern; - inline void lowerConditionOp(mlir::cir::ConditionOp op, mlir::Block *body, + inline void lowerConditionOp(cir::ConditionOp op, mlir::Block *body, mlir::Block *exit, mlir::PatternRewriter &rewriter) const { mlir::OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint(op); - rewriter.replaceOpWithNewOp(op, op.getCondition(), - body, exit); + rewriter.replaceOpWithNewOp(op, op.getCondition(), body, + exit); } mlir::LogicalResult - matchAndRewrite(mlir::cir::LoopOpInterface op, + matchAndRewrite(cir::LoopOpInterface op, mlir::PatternRewriter &rewriter) const final { // Setup CFG blocks. auto *entry = rewriter.getInsertionBlock(); @@ -580,10 +575,10 @@ class CIRLoopOpInterfaceFlattening // Setup loop entry branch. rewriter.setInsertionPointToEnd(entry); - rewriter.create(op.getLoc(), &op.getEntry().front()); + rewriter.create(op.getLoc(), &op.getEntry().front()); // Branch from condition region to body or exit. - auto conditionOp = cast(cond->getTerminator()); + auto conditionOp = cast(cond->getTerminator()); lowerConditionOp(conditionOp, body, exit, rewriter); // TODO(cir): Remove the walks below. It visits operations unnecessarily, @@ -593,7 +588,7 @@ class CIRLoopOpInterfaceFlattening // Lower continue statements. mlir::Block *dest = (step ? step : cond); op.walkBodySkippingNestedLoops([&](mlir::Operation *op) { - if (!isa(op)) + if (!isa(op)) return mlir::WalkResult::advance(); lowerTerminator(op, dest, rewriter); @@ -601,9 +596,9 @@ class CIRLoopOpInterfaceFlattening }); // Lower break statements. - walkRegionSkipping( + walkRegionSkipping( op.getBody(), [&](mlir::Operation *op) { - if (!isa(op)) + if (!isa(op)) return mlir::WalkResult::advance(); lowerTerminator(op, exit, rewriter); @@ -612,14 +607,14 @@ class CIRLoopOpInterfaceFlattening // Lower optional body region yield. for (auto &blk : op.getBody().getBlocks()) { - auto bodyYield = dyn_cast(blk.getTerminator()); + auto bodyYield = dyn_cast(blk.getTerminator()); if (bodyYield) lowerTerminator(bodyYield, (step ? step : cond), rewriter); } // Lower mandatory step region yield. if (step) - lowerTerminator(cast(step->getTerminator()), cond, + lowerTerminator(cast(step->getTerminator()), cond, rewriter); // Move region contents out of the loop op. @@ -633,58 +628,56 @@ class CIRLoopOpInterfaceFlattening } }; -class CIRSwitchOpFlattening - : public mlir::OpRewritePattern { +class CIRSwitchOpFlattening : public mlir::OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using OpRewritePattern::OpRewritePattern; inline void rewriteYieldOp(mlir::PatternRewriter &rewriter, - mlir::cir::YieldOp yieldOp, + cir::YieldOp yieldOp, mlir::Block *destination) const { rewriter.setInsertionPoint(yieldOp); - rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getOperands(), - destination); + rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getOperands(), + destination); } // Return the new defaultDestination block. - Block *condBrToRangeDestination(mlir::cir::SwitchOp op, + Block *condBrToRangeDestination(cir::SwitchOp op, mlir::PatternRewriter &rewriter, mlir::Block *rangeDestination, mlir::Block *defaultDestination, APInt lowerBound, APInt upperBound) const { assert(lowerBound.sle(upperBound) && "Invalid range"); auto resBlock = rewriter.createBlock(defaultDestination); - auto sIntType = mlir::cir::IntType::get(op.getContext(), 32, true); - auto uIntType = mlir::cir::IntType::get(op.getContext(), 32, false); + auto sIntType = cir::IntType::get(op.getContext(), 32, true); + auto uIntType = cir::IntType::get(op.getContext(), 32, false); - auto rangeLength = rewriter.create( + auto rangeLength = rewriter.create( op.getLoc(), sIntType, - mlir::cir::IntAttr::get(op.getContext(), sIntType, - upperBound - lowerBound)); + cir::IntAttr::get(op.getContext(), sIntType, upperBound - lowerBound)); - auto lowerBoundValue = rewriter.create( + auto lowerBoundValue = rewriter.create( op.getLoc(), sIntType, - mlir::cir::IntAttr::get(op.getContext(), sIntType, lowerBound)); - auto diffValue = rewriter.create( - op.getLoc(), sIntType, mlir::cir::BinOpKind::Sub, op.getCondition(), - lowerBoundValue); + cir::IntAttr::get(op.getContext(), sIntType, lowerBound)); + auto diffValue = + rewriter.create(op.getLoc(), sIntType, cir::BinOpKind::Sub, + op.getCondition(), lowerBoundValue); // Use unsigned comparison to check if the condition is in the range. - auto uDiffValue = rewriter.create( + auto uDiffValue = rewriter.create( op.getLoc(), uIntType, CastKind::integral, diffValue); - auto uRangeLength = rewriter.create( + auto uRangeLength = rewriter.create( op.getLoc(), uIntType, CastKind::integral, rangeLength); - auto cmpResult = rewriter.create( - op.getLoc(), mlir::cir::BoolType::get(op.getContext()), - mlir::cir::CmpOpKind::le, uDiffValue, uRangeLength); - rewriter.create(op.getLoc(), cmpResult, - rangeDestination, defaultDestination); + auto cmpResult = rewriter.create( + op.getLoc(), cir::BoolType::get(op.getContext()), cir::CmpOpKind::le, + uDiffValue, uRangeLength); + rewriter.create(op.getLoc(), cmpResult, rangeDestination, + defaultDestination); return resBlock; } mlir::LogicalResult - matchAndRewrite(mlir::cir::SwitchOp op, + matchAndRewrite(cir::SwitchOp op, mlir::PatternRewriter &rewriter) const override { llvm::SmallVector cases; op.collectCases(cases); @@ -710,10 +703,10 @@ class CIRSwitchOpFlattening // inline everything from switch body between the switch op and the exit // block. { - mlir::cir::YieldOp switchYield = nullptr; + cir::YieldOp switchYield = nullptr; // Clear switch operation. for (auto &block : llvm::make_early_inc_range(op.getBody().getBlocks())) - if (auto yieldOp = dyn_cast(block.getTerminator())) + if (auto yieldOp = dyn_cast(block.getTerminator())) switchYield = yieldOp; assert(!op.getBody().empty()); @@ -726,7 +719,7 @@ class CIRSwitchOpFlattening rewriteYieldOp(rewriter, switchYield, exitBlock); rewriter.setInsertionPointToEnd(originalBlock); - rewriter.create(op.getLoc(), swopBlock); + rewriter.create(op.getLoc(), swopBlock); } // Allocate required data structures (disconsider default case in @@ -749,24 +742,24 @@ class CIRSwitchOpFlattening // Found default case: save destination and operands. switch (caseOp.getKind()) { - case mlir::cir::CaseOpKind::Default: + case cir::CaseOpKind::Default: defaultDestination = ®ion.front(); defaultOperands = defaultDestination->getArguments(); break; - case mlir::cir::CaseOpKind::Range: + case cir::CaseOpKind::Range: assert(caseOp.getValue().size() == 2 && "Case range should have 2 case value"); rangeValues.push_back( - {cast(caseOp.getValue()[0]).getValue(), - cast(caseOp.getValue()[1]).getValue()}); + {cast(caseOp.getValue()[0]).getValue(), + cast(caseOp.getValue()[1]).getValue()}); rangeDestinations.push_back(®ion.front()); rangeOperands.push_back(rangeDestinations.back()->getArguments()); break; - case mlir::cir::CaseOpKind::Anyof: - case mlir::cir::CaseOpKind::Equal: + case cir::CaseOpKind::Anyof: + case cir::CaseOpKind::Equal: // AnyOf cases kind can have multiple values, hence the loop below. for (auto &value : caseOp.getValue()) { - caseValues.push_back(cast(value).getValue()); + caseValues.push_back(cast(value).getValue()); caseDestinations.push_back(®ion.front()); caseOperands.push_back(caseDestinations.back()->getArguments()); } @@ -774,9 +767,9 @@ class CIRSwitchOpFlattening } // Handle break statements. - walkRegionSkipping( + walkRegionSkipping( region, [&](mlir::Operation *op) { - if (!isa(op)) + if (!isa(op)) return mlir::WalkResult::advance(); lowerTerminator(op, exitBlock, rewriter); @@ -788,15 +781,15 @@ class CIRSwitchOpFlattening if (blk.getNumSuccessors()) continue; - if (auto yieldOp = dyn_cast(blk.getTerminator())) { + if (auto yieldOp = dyn_cast(blk.getTerminator())) { mlir::Operation *nextOp = caseOp->getNextNode(); assert(nextOp && "caseOp is not expected to be the last op"); mlir::Block *oldBlock = nextOp->getBlock(); mlir::Block *newBlock = rewriter.splitBlock(oldBlock, nextOp->getIterator()); rewriter.setInsertionPointToEnd(oldBlock); - rewriter.create(nextOp->getLoc(), mlir::ValueRange(), - newBlock); + rewriter.create(nextOp->getLoc(), mlir::ValueRange(), + newBlock); rewriteYieldOp(rewriter, yieldOp, newBlock); } } @@ -810,7 +803,7 @@ class CIRSwitchOpFlattening // Create a branch to the entry of the inlined region. rewriter.setInsertionPointToEnd(oldBlock); - rewriter.create(caseOp.getLoc(), &entryBlock); + rewriter.create(caseOp.getLoc(), &entryBlock); } // Remove all cases since we've inlined the regions. @@ -853,42 +846,41 @@ class CIRSwitchOpFlattening // Set switch op to branch to the newly created blocks. rewriter.setInsertionPoint(op); - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( op, op.getCondition(), defaultDestination, defaultOperands, caseValues, caseDestinations, caseOperands); return mlir::success(); } }; -class CIRTernaryOpFlattening - : public mlir::OpRewritePattern { +class CIRTernaryOpFlattening : public mlir::OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using OpRewritePattern::OpRewritePattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::TernaryOp op, + matchAndRewrite(cir::TernaryOp op, mlir::PatternRewriter &rewriter) const override { auto loc = op->getLoc(); auto *condBlock = rewriter.getInsertionBlock(); auto opPosition = rewriter.getInsertionPoint(); auto *remainingOpsBlock = rewriter.splitBlock(condBlock, opPosition); - SmallVector locs; + llvm::SmallVector locs; // Ternary result is optional, make sure to populate the location only // when relevant. if (op->getResultTypes().size()) locs.push_back(loc); auto *continueBlock = rewriter.createBlock(remainingOpsBlock, op->getResultTypes(), locs); - rewriter.create(loc, remainingOpsBlock); + rewriter.create(loc, remainingOpsBlock); auto &trueRegion = op.getTrueRegion(); auto *trueBlock = &trueRegion.front(); mlir::Operation *trueTerminator = trueRegion.back().getTerminator(); rewriter.setInsertionPointToEnd(&trueRegion.back()); - auto trueYieldOp = dyn_cast(trueTerminator); + auto trueYieldOp = dyn_cast(trueTerminator); - rewriter.replaceOpWithNewOp( - trueYieldOp, trueYieldOp.getArgs(), continueBlock); + rewriter.replaceOpWithNewOp(trueYieldOp, trueYieldOp.getArgs(), + continueBlock); rewriter.inlineRegionBefore(trueRegion, continueBlock); auto *falseBlock = continueBlock; @@ -897,14 +889,13 @@ class CIRTernaryOpFlattening falseBlock = &falseRegion.front(); mlir::Operation *falseTerminator = falseRegion.back().getTerminator(); rewriter.setInsertionPointToEnd(&falseRegion.back()); - auto falseYieldOp = dyn_cast(falseTerminator); - rewriter.replaceOpWithNewOp( - falseYieldOp, falseYieldOp.getArgs(), continueBlock); + auto falseYieldOp = dyn_cast(falseTerminator); + rewriter.replaceOpWithNewOp(falseYieldOp, falseYieldOp.getArgs(), + continueBlock); rewriter.inlineRegionBefore(falseRegion, continueBlock); rewriter.setInsertionPointToEnd(condBlock); - rewriter.create(loc, op.getCond(), trueBlock, - falseBlock); + rewriter.create(loc, op.getCond(), trueBlock, falseBlock); rewriter.replaceOp(op, continueBlock->getArguments()); @@ -925,7 +916,7 @@ void FlattenCFGPass::runOnOperation() { populateFlattenCFGPatterns(patterns); // Collect operations to apply patterns. - SmallVector ops; + llvm::SmallVector ops; getOperation()->walk([&](Operation *op) { if (isa(op)) ops.push_back(op); diff --git a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp index 56e2308272ed..c46f89e87d12 100644 --- a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp +++ b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp @@ -10,7 +10,7 @@ #include "llvm/Support/TimeProfiler.h" using namespace mlir; -using namespace mlir::cir; +using namespace cir; namespace { @@ -20,17 +20,17 @@ struct GotoSolverPass : public GotoSolverBase { void runOnOperation() override; }; -static void process(mlir::cir::FuncOp func) { +static void process(cir::FuncOp func) { mlir::OpBuilder rewriter(func.getContext()); std::map labels; - std::vector gotos; + std::vector gotos; func.getBody().walk([&](mlir::Operation *op) { - if (auto lab = dyn_cast(op)) { + if (auto lab = dyn_cast(op)) { labels.emplace(lab.getLabel().str(), lab->getBlock()); lab.erase(); - } else if (auto goTo = dyn_cast(op)) { + } else if (auto goTo = dyn_cast(op)) { gotos.push_back(goTo); } }); @@ -39,15 +39,15 @@ static void process(mlir::cir::FuncOp func) { mlir::OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint(goTo); auto dest = labels[goTo.getLabel().str()]; - rewriter.create(goTo.getLoc(), dest); + rewriter.create(goTo.getLoc(), dest); goTo.erase(); } } void GotoSolverPass::runOnOperation() { llvm::TimeTraceScope scope("Goto Solver"); - SmallVector ops; - getOperation()->walk([&](mlir::cir::FuncOp op) { process(op); }); + llvm::SmallVector ops; + getOperation()->walk([&](cir::FuncOp op) { process(op); }); } } // namespace diff --git a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp index bafdca89e481..003e5425ebaa 100644 --- a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp +++ b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp @@ -18,7 +18,7 @@ #include "llvm/Support/TimeProfiler.h" using namespace mlir; -using namespace mlir::cir; +using namespace cir; namespace { @@ -28,14 +28,14 @@ struct HoistAllocasPass : public HoistAllocasBase { void runOnOperation() override; }; -static void process(mlir::cir::FuncOp func) { +static void process(cir::FuncOp func) { if (func.getRegion().empty()) return; // Hoist all static allocas to the entry block. mlir::Block &entryBlock = func.getRegion().front(); - llvm::SmallVector allocas; - func.getBody().walk([&](mlir::cir::AllocaOp alloca) { + llvm::SmallVector allocas; + func.getBody().walk([&](cir::AllocaOp alloca) { if (alloca->getBlock() == &entryBlock) return; // Don't hoist allocas with dynamic alloca size. @@ -54,8 +54,8 @@ static void process(mlir::cir::FuncOp func) { void HoistAllocasPass::runOnOperation() { llvm::TimeTraceScope scope("Hoist Allocas"); - SmallVector ops; - getOperation()->walk([&](mlir::cir::FuncOp op) { process(op); }); + llvm::SmallVector ops; + getOperation()->walk([&](cir::FuncOp op) { process(op); }); } } // namespace diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index f160239d460d..39a1ac4ef5ce 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -28,7 +28,7 @@ using cir::CIRBaseBuilderTy; using namespace mlir; -using namespace mlir::cir; +using namespace cir; namespace { @@ -49,7 +49,7 @@ struct IdiomRecognizerPass : public IdiomRecognizerBase { unsigned val = None; bool isOptionsParsed = false; - void parseOptions(ArrayRef remarks) { + void parseOptions(ArrayRef remarks) { if (isOptionsParsed) return; @@ -63,7 +63,7 @@ struct IdiomRecognizerPass : public IdiomRecognizerBase { } void parseOptions(IdiomRecognizerPass &pass) { - SmallVector remarks; + llvm::SmallVector remarks; for (auto &r : pass.remarksList) remarks.push_back(r); @@ -103,7 +103,7 @@ bool IdiomRecognizerPass::raiseStdFind(CallOp call) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(call.getOperation()); - auto findOp = builder.create( + auto findOp = builder.create( call.getLoc(), call.getResult().getType(), call.getCalleeAttr(), call.getOperand(0), call.getOperand(1), call.getOperand(2)); @@ -117,7 +117,7 @@ static bool isIteratorLikeType(mlir::Type t) { // in which case we could look at ASTRecordDeclInterface for more // information. auto pTy = dyn_cast(t); - if (!pTy || !mlir::isa(pTy.getPointee())) + if (!pTy || !mlir::isa(pTy.getPointee())) return false; return true; } @@ -153,13 +153,13 @@ bool IdiomRecognizerPass::raiseIteratorBeginEnd(CallOp call) { if (callExprAttr.isIteratorBeginCall()) { if (opts.emitRemarkFoundCalls()) emitRemark(call.getLoc()) << "found call to begin() iterator"; - iterOp = builder.create( + iterOp = builder.create( call.getLoc(), call.getResult().getType(), call.getCalleeAttr(), call.getOperand(0)); } else if (callExprAttr.isIteratorEndCall()) { if (opts.emitRemarkFoundCalls()) emitRemark(call.getLoc()) << "found call to end() iterator"; - iterOp = builder.create( + iterOp = builder.create( call.getLoc(), call.getResult().getType(), call.getCalleeAttr(), call.getOperand(0)); } else { @@ -186,7 +186,7 @@ void IdiomRecognizerPass::runOnOperation() { if (isa<::mlir::ModuleOp>(op)) theModule = cast<::mlir::ModuleOp>(op); - SmallVector callsToTransform; + llvm::SmallVector callsToTransform; op->walk([&](CallOp callOp) { // Process call operations diff --git a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp index b936157a1e9f..30719a3d60f9 100644 --- a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp @@ -28,7 +28,7 @@ using cir::CIRBaseBuilderTy; using namespace mlir; -using namespace mlir::cir; +using namespace cir; namespace { @@ -47,7 +47,7 @@ struct LibOptPass : public LibOptBase { unsigned val = None; bool isOptionsParsed = false; - void parseOptions(ArrayRef remarks) { + void parseOptions(ArrayRef remarks) { if (isOptionsParsed) return; @@ -61,7 +61,7 @@ struct LibOptPass : public LibOptBase { } void parseOptions(LibOptPass &pass) { - SmallVector remarks; + llvm::SmallVector remarks; for (auto &r : pass.remarksList) remarks.push_back(r); @@ -188,7 +188,7 @@ void LibOptPass::xformStdFindIntoMemchr(StdFindOp findOp) { findOp.getLoc(), memchrOp0, memchrOp1, builder.create( findOp.getLoc(), uInt64Ty, - mlir::cir::IntAttr::get(uInt64Ty, staticSize))); + cir::IntAttr::get(uInt64Ty, staticSize))); } } return builder.create( @@ -228,7 +228,7 @@ void LibOptPass::runOnOperation() { if (isa<::mlir::ModuleOp>(op)) theModule = cast<::mlir::ModuleOp>(op); - SmallVector stdFindToTransform; + llvm::SmallVector stdFindToTransform; op->walk([&](StdFindOp findOp) { stdFindToTransform.push_back(findOp); }); for (auto c : stdFindToTransform) diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index a781c9287c0f..368c36b48946 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -59,7 +59,7 @@ struct LifetimeCheckPass : public LifetimeCheckBase { mlir::Location loc, unsigned nestLevel); void updatePointsTo(mlir::Value addr, mlir::Value data, mlir::Location loc); void updatePointsToForConstStruct(mlir::Value addr, - mlir::cir::ConstStructAttr value, + cir::ConstStructAttr value, mlir::Location loc); void updatePointsToForZeroStruct(mlir::Value addr, StructType sTy, mlir::Location loc); @@ -133,8 +133,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { unsigned histLimit = 1; bool isOptionsParsed = false; - void parseOptions(ArrayRef remarks, ArrayRef hist, - unsigned hist_limit) { + void parseOptions(ArrayRef remarks, + ArrayRef hist, unsigned hist_limit) { if (isOptionsParsed) return; @@ -157,8 +157,8 @@ struct LifetimeCheckPass : public LifetimeCheckBase { } void parseOptions(LifetimeCheckPass &pass) { - SmallVector remarks; - SmallVector hists; + llvm::SmallVector remarks; + llvm::SmallVector hists; for (auto &r : pass.remarksList) remarks.push_back(r); @@ -671,8 +671,8 @@ void LifetimeCheckPass::checkLoop(LoopOpInterface loopOp) { // { /*body*/ } // // See checkIf for additional explanations. - SmallVector pmapOps; - SmallVector regionsToCheck; + llvm::SmallVector pmapOps; + llvm::SmallVector regionsToCheck; auto setupLoopRegionsToCheck = [&](bool isSubsequentTaken = false) { regionsToCheck = loopOp.getRegionsInExecutionOrder(); @@ -733,7 +733,7 @@ void LifetimeCheckPass::checkAwait(AwaitOp awaitOp) { // // FIXME: use branch interface here and only tackle // the necessary regions. - SmallVector pmapOps; + llvm::SmallVector pmapOps; for (auto r : awaitOp.getRegions()) { PMapType regionPmap = getPmap(); @@ -789,7 +789,7 @@ void LifetimeCheckPass::checkSwitch(SwitchOp switchOp) { // else {/*3*/}. // // See checkIf for additional explanations. - SmallVector pmapOps; + llvm::SmallVector pmapOps; // If there are no regions, return early pmap is the same. // TODO: if the switch is not in a simple form, return early now and try to @@ -845,7 +845,7 @@ void LifetimeCheckPass::checkIf(IfOp ifOp) { // // To that intent the pmap is copied out before checking each region and // pmap(ifOp) computed after analysing both paths. - SmallVector pmapOps; + llvm::SmallVector pmapOps; { PMapType localThenPmap = getPmap(); @@ -869,9 +869,9 @@ void LifetimeCheckPass::checkIf(IfOp ifOp) { } template bool isStructAndHasAttr(mlir::Type ty) { - if (!mlir::isa(ty)) + if (!mlir::isa(ty)) return false; - return hasAttr(mlir::cast(ty).getAst()); + return hasAttr(mlir::cast(ty).getAst()); } static bool isOwnerType(mlir::Type ty) { @@ -900,15 +900,15 @@ static bool isOwnerType(mlir::Type ty) { return isStructAndHasAttr(ty); } -static bool containsPointerElts(mlir::cir::StructType s) { +static bool containsPointerElts(cir::StructType s) { auto members = s.getMembers(); return std::any_of(members.begin(), members.end(), [](mlir::Type t) { - return mlir::isa(t); + return mlir::isa(t); }); } static bool isAggregateType(LifetimeCheckPass *pass, mlir::Type agg) { - auto t = mlir::dyn_cast(agg); + auto t = mlir::dyn_cast(agg); if (!t) return false; // Lambdas have their special handling, and shall not be considered as @@ -958,7 +958,7 @@ static bool isPointerType(mlir::Type t) { // library headers, the following well- known standard types are treated as-if // annotated as Pointers, in addition to raw pointers and references: ref- // erence_wrapper, and vector::reference. - if (mlir::isa(t)) + if (mlir::isa(t)) return true; return isStructAndHasAttr(t); } @@ -1019,14 +1019,14 @@ void LifetimeCheckPass::classifyAndInitTypeCategories(mlir::Value addr, break; // Map values for members to it's index in the aggregate. - auto members = mlir::cast(t).getMembers(); - SmallVector fieldVals; + auto members = mlir::cast(t).getMembers(); + llvm::SmallVector fieldVals; fieldVals.assign(members.size(), {}); // Go through uses of the alloca via `cir.struct_element_addr`, and // track only the fields that are actually used. std::for_each(addr.use_begin(), addr.use_end(), [&](mlir::OpOperand &use) { - auto op = dyn_cast(use.getOwner()); + auto op = dyn_cast(use.getOwner()); if (!op) return; @@ -1036,8 +1036,7 @@ void LifetimeCheckPass::classifyAndInitTypeCategories(mlir::Value addr, if (eltAddr.use_empty()) return; - auto eltTy = - mlir::cast(eltAddr.getType()).getPointee(); + auto eltTy = mlir::cast(eltAddr.getType()).getPointee(); // Classify exploded types. Keep alloca original location. classifyAndInitTypeCategories(eltAddr, eltTy, loc, ++nestLevel); @@ -1091,10 +1090,10 @@ void LifetimeCheckPass::checkCoroTaskStore(StoreOp storeOp) { // Bind values that are coming from alloca's (like %arg0 above) to the // pset of %task - this effectively leads to some invalidation of %task // when %arg0 finishes its lifetime at the end of the enclosing cir.scope. - if (auto call = dyn_cast(taskTmp.getDefiningOp())) { + if (auto call = dyn_cast(taskTmp.getDefiningOp())) { bool potentialTaintedTask = false; for (auto arg : call.getArgOperands()) { - auto alloca = dyn_cast(arg.getDefiningOp()); + auto alloca = dyn_cast(arg.getDefiningOp()); if (alloca && currScope->localValues.count(alloca)) { getPmap()[taskAddr].insert(State::getLocalValue(alloca)); potentialTaintedTask = true; @@ -1113,10 +1112,9 @@ void LifetimeCheckPass::checkCoroTaskStore(StoreOp storeOp) { mlir::Value LifetimeCheckPass::getLambdaFromMemberAccess(mlir::Value addr) { auto op = addr.getDefiningOp(); // FIXME: we likely want to consider more indirections here... - if (!isa(op)) + if (!isa(op)) return nullptr; - auto allocaOp = - dyn_cast(op->getOperand(0).getDefiningOp()); + auto allocaOp = dyn_cast(op->getOperand(0).getDefiningOp()); if (!allocaOp || !isLambdaType(allocaOp.getAllocaType())) return nullptr; return allocaOp; @@ -1126,7 +1124,7 @@ void LifetimeCheckPass::checkLambdaCaptureStore(StoreOp storeOp) { auto localByRefAddr = storeOp.getValue(); auto lambdaCaptureAddr = storeOp.getAddr(); - if (!isa_and_nonnull(localByRefAddr.getDefiningOp())) + if (!isa_and_nonnull(localByRefAddr.getDefiningOp())) return; auto lambdaAddr = getLambdaFromMemberAccess(lambdaCaptureAddr); if (!lambdaAddr) @@ -1136,8 +1134,9 @@ void LifetimeCheckPass::checkLambdaCaptureStore(StoreOp storeOp) { getPmap()[lambdaAddr].insert(State::getLocalValue(localByRefAddr)); } -void LifetimeCheckPass::updatePointsToForConstStruct( - mlir::Value addr, mlir::cir::ConstStructAttr value, mlir::Location loc) { +void LifetimeCheckPass::updatePointsToForConstStruct(mlir::Value addr, + cir::ConstStructAttr value, + mlir::Location loc) { assert(aggregates.count(addr) && "expected association with aggregate"); int memberIdx = 0; for (auto &attr : value.getMembers()) { @@ -1145,8 +1144,8 @@ void LifetimeCheckPass::updatePointsToForConstStruct( assert(ta && "expected typed attribute"); auto fieldAddr = aggregates[addr][memberIdx]; // Unseen fields are not tracked. - if (fieldAddr && mlir::isa(ta.getType())) { - assert(mlir::isa(ta) && + if (fieldAddr && mlir::isa(ta.getType())) { + assert(mlir::isa(ta) && "other than null not implemented"); markPsetNull(fieldAddr, loc); } @@ -1162,7 +1161,7 @@ void LifetimeCheckPass::updatePointsToForZeroStruct(mlir::Value addr, for (auto &t : sTy.getMembers()) { auto fieldAddr = aggregates[addr][memberIdx]; // Unseen fields are not tracked. - if (fieldAddr && mlir::isa(t)) { + if (fieldAddr && mlir::isa(t)) { markPsetNull(fieldAddr, loc); } memberIdx++; @@ -1219,12 +1218,12 @@ void LifetimeCheckPass::updatePointsTo(mlir::Value addr, mlir::Value data, // individual exploded fields. if (aggregates.count(addr)) { if (auto constStruct = - mlir::dyn_cast(cstOp.getValue())) { + mlir::dyn_cast(cstOp.getValue())) { updatePointsToForConstStruct(addr, constStruct, loc); return; } - if (auto zero = mlir::dyn_cast(cstOp.getValue())) { + if (auto zero = mlir::dyn_cast(cstOp.getValue())) { if (auto zeroStructTy = dyn_cast(zero.getType())) { updatePointsToForZeroStruct(addr, zeroStructTy, loc); return; @@ -1344,7 +1343,7 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, } case InvalidStyle::EndOfScope: { if (tasks.count(histKey)) { - StringRef resource = "resource"; + llvm::StringRef resource = "resource"; if (auto allocaOp = dyn_cast(info.val->getDefiningOp())) { if (isLambdaType(allocaOp.getAllocaType())) resource = "lambda"; @@ -1354,7 +1353,7 @@ void LifetimeCheckPass::emitInvalidHistory(mlir::InFlightDiagnostic &D, D.attachNote(info.loc) << "at the end of scope or full-expression"; } else if (derefStyle == DerefStyle::RetLambda) { assert(currFunc && "expected function"); - StringRef parent = currFunc->getLambda() ? "lambda" : "function"; + llvm::StringRef parent = currFunc->getLambda() ? "lambda" : "function"; D.attachNote(info.val->getLoc()) << "declared here but invalid after enclosing " << parent << " ends"; @@ -1446,7 +1445,7 @@ void LifetimeCheckPass::checkPointerDeref(mlir::Value addr, mlir::Location loc, emitPsetRemark(); } -static FuncOp getCalleeFromSymbol(ModuleOp mod, StringRef name) { +static FuncOp getCalleeFromSymbol(ModuleOp mod, llvm::StringRef name) { auto global = mlir::SymbolTable::lookupSymbolIn(mod, name); assert(global && "expected to find symbol for function"); return dyn_cast(global); @@ -1455,7 +1454,7 @@ static FuncOp getCalleeFromSymbol(ModuleOp mod, StringRef name) { static const ASTCXXMethodDeclInterface getMethod(ModuleOp mod, CallOp callOp) { if (!callOp.getCallee()) return nullptr; - StringRef name = *callOp.getCallee(); + llvm::StringRef name = *callOp.getCallee(); auto method = getCalleeFromSymbol(mod, name); if (!method || method.getBuiltin()) return nullptr; @@ -1734,7 +1733,7 @@ bool LifetimeCheckPass::isLambdaType(mlir::Type ty) { return IsLambdaTyCache[ty]; IsLambdaTyCache[ty] = false; - auto taskTy = mlir::dyn_cast(ty); + auto taskTy = mlir::dyn_cast(ty); if (!taskTy) return false; if (taskTy.getAst().isLambda()) @@ -1749,7 +1748,7 @@ bool LifetimeCheckPass::isTaskType(mlir::Value taskVal) { return IsTaskTyCache[ty]; bool result = [&] { - auto taskTy = mlir::dyn_cast(taskVal.getType()); + auto taskTy = mlir::dyn_cast(taskVal.getType()); if (!taskTy) return false; return taskTy.getAst().hasPromiseType(); @@ -1885,10 +1884,10 @@ std::unique_ptr mlir::createLifetimeCheckPass(clang::ASTContext *astCtx) { return std::move(lifetime); } -std::unique_ptr mlir::createLifetimeCheckPass(ArrayRef remark, - ArrayRef hist, - unsigned hist_limit, - clang::ASTContext *astCtx) { +std::unique_ptr +mlir::createLifetimeCheckPass(ArrayRef remark, + ArrayRef hist, + unsigned hist_limit, clang::ASTContext *astCtx) { auto lifetime = std::make_unique(); lifetime->setASTContext(astCtx); lifetime->opts.parseOptions(remark, hist, hist_limit); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index b11a028cbc2f..031c3b3b4b40 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -32,7 +32,7 @@ using cir::CIRBaseBuilderTy; using namespace mlir; -using namespace mlir::cir; +using namespace cir; static SmallString<128> getTransformedFileName(ModuleOp theModule) { SmallString<128> FileName; @@ -101,17 +101,15 @@ struct LoweringPreparePass : public LoweringPrepareBase { /// Build attribute of global annotation values void buildGlobalAnnotationValues(); - FuncOp - buildRuntimeFunction(mlir::OpBuilder &builder, llvm::StringRef name, - mlir::Location loc, mlir::cir::FuncType type, - mlir::cir::GlobalLinkageKind linkage = - mlir::cir::GlobalLinkageKind::ExternalLinkage); + FuncOp buildRuntimeFunction( + mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, + cir::FuncType type, + cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::ExternalLinkage); - GlobalOp - buildRuntimeVariable(mlir::OpBuilder &Builder, llvm::StringRef Name, - mlir::Location Loc, mlir::Type type, - mlir::cir::GlobalLinkageKind Linkage = - mlir::cir::GlobalLinkageKind::ExternalLinkage); + GlobalOp buildRuntimeVariable( + mlir::OpBuilder &Builder, llvm::StringRef Name, mlir::Location Loc, + mlir::Type type, + cir::GlobalLinkageKind Linkage = cir::GlobalLinkageKind::ExternalLinkage); /// Track the current number of global array string count for when the symbol /// has an empty name, and prevent collisions. @@ -122,14 +120,14 @@ struct LoweringPreparePass : public LoweringPrepareBase { /// ----------- clang::ASTContext *astCtx; - std::shared_ptr<::cir::LoweringPrepareCXXABI> cxxABI; + std::shared_ptr cxxABI; void setASTContext(clang::ASTContext *c) { astCtx = c; auto abiStr = c->getTargetInfo().getABI(); switch (c->getCXXABIKind()) { case clang::TargetCXXABI::GenericItanium: - cxxABI.reset(::cir::LoweringPrepareCXXABI::createItaniumABI()); + cxxABI.reset(cir::LoweringPrepareCXXABI::createItaniumABI()); break; case clang::TargetCXXABI::GenericAArch64: case clang::TargetCXXABI::AppleARM64: @@ -138,11 +136,11 @@ struct LoweringPreparePass : public LoweringPrepareBase { // query system. assert(abiStr == "aapcs" || abiStr == "darwinpcs" || abiStr == "aapcs-soft"); - cxxABI.reset(::cir::LoweringPrepareCXXABI::createAArch64ABI( + cxxABI.reset(cir::LoweringPrepareCXXABI::createAArch64ABI( abiStr == "aapcs" - ? ::cir::AArch64ABIKind::AAPCS - : (abiStr == "darwinpccs" ? ::cir::AArch64ABIKind::DarwinPCS - : ::cir::AArch64ABIKind::AAPCSSoft))); + ? cir::AArch64ABIKind::AAPCS + : (abiStr == "darwinpccs" ? cir::AArch64ABIKind::DarwinPCS + : cir::AArch64ABIKind::AAPCSSoft))); break; default: llvm_unreachable("NYI"); @@ -157,23 +155,23 @@ struct LoweringPreparePass : public LoweringPrepareBase { llvm::SmallVector dynamicInitializers; /// List of ctors to be called before main() - SmallVector globalCtorList; + llvm::SmallVector globalCtorList; /// List of dtors to be called when unloading module. - SmallVector globalDtorList; + llvm::SmallVector globalDtorList; /// List of annotations in the module - SmallVector globalAnnotations; + llvm::SmallVector globalAnnotations; }; } // namespace GlobalOp LoweringPreparePass::buildRuntimeVariable( mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, - mlir::Type type, mlir::cir::GlobalLinkageKind linkage) { + mlir::Type type, cir::GlobalLinkageKind linkage) { GlobalOp g = dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( theModule, StringAttr::get(theModule->getContext(), name))); if (!g) { - g = builder.create(loc, name, type); + g = builder.create(loc, name, type); g.setLinkageAttr( - mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage)); + cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage)); mlir::SymbolTable::setSymbolVisibility( g, mlir::SymbolTable::Visibility::Private); } @@ -182,17 +180,17 @@ GlobalOp LoweringPreparePass::buildRuntimeVariable( FuncOp LoweringPreparePass::buildRuntimeFunction( mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, - mlir::cir::FuncType type, mlir::cir::GlobalLinkageKind linkage) { + cir::FuncType type, cir::GlobalLinkageKind linkage) { FuncOp f = dyn_cast_or_null(SymbolTable::lookupNearestSymbolFrom( theModule, StringAttr::get(theModule->getContext(), name))); if (!f) { - f = builder.create(loc, name, type); + f = builder.create(loc, name, type); f.setLinkageAttr( - mlir::cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage)); + cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage)); mlir::SymbolTable::setSymbolVisibility( f, mlir::SymbolTable::Visibility::Private); mlir::NamedAttrList attrs; - f.setExtraAttrsAttr(mlir::cir::ExtraFuncAttributesAttr::get( + f.setExtraAttrsAttr(cir::ExtraFuncAttributesAttr::get( builder.getContext(), attrs.getDictionary(builder.getContext()))); } return f; @@ -212,11 +210,10 @@ FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { // Create a variable initialization function. CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointAfter(op); - auto voidTy = ::mlir::cir::VoidType::get(builder.getContext()); - auto fnType = mlir::cir::FuncType::get({}, voidTy); - FuncOp f = - buildRuntimeFunction(builder, fnName, op.getLoc(), fnType, - mlir::cir::GlobalLinkageKind::InternalLinkage); + auto voidTy = cir::VoidType::get(builder.getContext()); + auto fnType = cir::FuncType::get({}, voidTy); + FuncOp f = buildRuntimeFunction(builder, fnName, op.getLoc(), fnType, + cir::GlobalLinkageKind::InternalLinkage); // Move over the initialzation code of the ctor region. auto &block = op.getCtorRegion().front(); @@ -236,8 +233,8 @@ FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { // Look for the destructor call in dtorBlock auto &dtorBlock = dtorRegion.front(); - mlir::cir::CallOp dtorCall; - for (auto op : reverse(dtorBlock.getOps())) { + cir::CallOp dtorCall; + for (auto op : reverse(dtorBlock.getOps())) { dtorCall = op; break; } @@ -249,16 +246,14 @@ FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { // Create a runtime helper function: // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); - auto voidPtrTy = - ::mlir::cir::PointerType::get(builder.getContext(), voidTy); - auto voidFnTy = mlir::cir::FuncType::get({voidPtrTy}, voidTy); - auto voidFnPtrTy = - ::mlir::cir::PointerType::get(builder.getContext(), voidFnTy); + auto voidPtrTy = cir::PointerType::get(builder.getContext(), voidTy); + auto voidFnTy = cir::FuncType::get({voidPtrTy}, voidTy); + auto voidFnPtrTy = cir::PointerType::get(builder.getContext(), voidFnTy); auto HandlePtrTy = - mlir::cir::PointerType::get(builder.getContext(), Handle.getSymType()); - auto fnAtExitType = mlir::cir::FuncType::get( - {voidFnPtrTy, voidPtrTy, HandlePtrTy}, - mlir::cir::VoidType::get(builder.getContext())); + cir::PointerType::get(builder.getContext(), Handle.getSymType()); + auto fnAtExitType = + cir::FuncType::get({voidFnPtrTy, voidPtrTy, HandlePtrTy}, + cir::VoidType::get(builder.getContext())); const char *nameAtExit = "__cxa_atexit"; FuncOp fnAtExit = buildRuntimeFunction(builder, nameAtExit, op.getLoc(), fnAtExitType); @@ -267,18 +262,18 @@ FuncOp LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(GlobalOp op) { // &__dso_handle) builder.setInsertionPointAfter(dtorCall); mlir::Value args[3]; - auto dtorPtrTy = mlir::cir::PointerType::get(builder.getContext(), - dtorFunc.getFunctionType()); + auto dtorPtrTy = + cir::PointerType::get(builder.getContext(), dtorFunc.getFunctionType()); // dtorPtrTy - args[0] = builder.create( - dtorCall.getLoc(), dtorPtrTy, dtorFunc.getSymName()); - args[0] = builder.create( - dtorCall.getLoc(), voidFnPtrTy, mlir::cir::CastKind::bitcast, args[0]); - args[1] = builder.create(dtorCall.getLoc(), voidPtrTy, - mlir::cir::CastKind::bitcast, - dtorCall.getArgOperand(0)); - args[2] = builder.create( - Handle.getLoc(), HandlePtrTy, Handle.getSymName()); + args[0] = builder.create(dtorCall.getLoc(), dtorPtrTy, + dtorFunc.getSymName()); + args[0] = builder.create(dtorCall.getLoc(), voidFnPtrTy, + cir::CastKind::bitcast, args[0]); + args[1] = builder.create(dtorCall.getLoc(), voidPtrTy, + cir::CastKind::bitcast, + dtorCall.getArgOperand(0)); + args[2] = builder.create(Handle.getLoc(), HandlePtrTy, + Handle.getSymName()); builder.createCallOp(dtorCall.getLoc(), fnAtExit, args); dtorCall->erase(); entryBB->getOperations().splice(entryBB->end(), dtorBlock.getOperations(), @@ -305,11 +300,11 @@ static void canonicalizeIntrinsicThreeWayCmp(CIRBaseBuilderTy &builder, } auto canonicalizedCmpInfo = - mlir::cir::CmpThreeWayInfoAttr::get(builder.getContext(), -1, 0, 1); + cir::CmpThreeWayInfoAttr::get(builder.getContext(), -1, 0, 1); mlir::Value result = builder - .create(loc, op.getType(), op.getLhs(), - op.getRhs(), canonicalizedCmpInfo) + .create(loc, op.getType(), op.getLhs(), + op.getRhs(), canonicalizedCmpInfo) .getResult(); auto compareAndYield = [&](mlir::Value input, int64_t test, @@ -317,12 +312,12 @@ static void canonicalizeIntrinsicThreeWayCmp(CIRBaseBuilderTy &builder, // Create a conditional branch that tests whether `input` is equal to // `test`. If `input` is equal to `test`, yield `yield`. Otherwise, yield // `input` as is. - auto testValue = builder.getConstant( - loc, mlir::cir::IntAttr::get(input.getType(), test)); - auto yieldValue = builder.getConstant( - loc, mlir::cir::IntAttr::get(input.getType(), yield)); + auto testValue = + builder.getConstant(loc, cir::IntAttr::get(input.getType(), test)); + auto yieldValue = + builder.getConstant(loc, cir::IntAttr::get(input.getType(), yield)); auto eqToTest = - builder.createCompare(loc, mlir::cir::CmpOpKind::eq, input, testValue); + builder.createCompare(loc, cir::CmpOpKind::eq, input, testValue); return builder.createSelect(loc, eqToTest, yieldValue, input); }; @@ -342,7 +337,7 @@ static void canonicalizeIntrinsicThreeWayCmp(CIRBaseBuilderTy &builder, void LoweringPreparePass::lowerVAArgOp(VAArgOp op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPoint(op); - ::cir::CIRDataLayout datalayout(theModule); + cir::CIRDataLayout datalayout(theModule); auto res = cxxABI->lowerVAArg(builder, op, datalayout); if (res) { @@ -354,7 +349,7 @@ void LoweringPreparePass::lowerVAArgOp(VAArgOp op) { void LoweringPreparePass::lowerUnaryOp(UnaryOp op) { auto ty = op.getType(); - if (!mlir::isa(ty)) + if (!mlir::isa(ty)) return; auto loc = op.getLoc(); @@ -371,22 +366,22 @@ void LoweringPreparePass::lowerUnaryOp(UnaryOp op) { mlir::Value resultReal; mlir::Value resultImag; switch (opKind) { - case mlir::cir::UnaryOpKind::Inc: - case mlir::cir::UnaryOpKind::Dec: + case cir::UnaryOpKind::Inc: + case cir::UnaryOpKind::Dec: resultReal = builder.createUnaryOp(loc, opKind, operandReal); resultImag = operandImag; break; - case mlir::cir::UnaryOpKind::Plus: - case mlir::cir::UnaryOpKind::Minus: + case cir::UnaryOpKind::Plus: + case cir::UnaryOpKind::Minus: resultReal = builder.createUnaryOp(loc, opKind, operandReal); resultImag = builder.createUnaryOp(loc, opKind, operandImag); break; - case mlir::cir::UnaryOpKind::Not: + case cir::UnaryOpKind::Not: resultReal = operandReal; resultImag = - builder.createUnaryOp(loc, mlir::cir::UnaryOpKind::Minus, operandImag); + builder.createUnaryOp(loc, cir::UnaryOpKind::Minus, operandImag); break; } @@ -397,13 +392,12 @@ void LoweringPreparePass::lowerUnaryOp(UnaryOp op) { void LoweringPreparePass::lowerBinOp(BinOp op) { auto ty = op.getType(); - if (!mlir::isa(ty)) + if (!mlir::isa(ty)) return; auto loc = op.getLoc(); auto opKind = op.getKind(); - assert((opKind == mlir::cir::BinOpKind::Add || - opKind == mlir::cir::BinOpKind::Sub) && + assert((opKind == cir::BinOpKind::Add || opKind == cir::BinOpKind::Sub) && "invalid binary op kind on complex numbers"); CIRBaseBuilderTy builder(getContext()); @@ -441,18 +435,18 @@ static mlir::Value lowerComplexToScalarCast(MLIRContext &ctx, CastOp op) { auto src = op.getSrc(); - if (!mlir::isa(op.getType())) + if (!mlir::isa(op.getType())) return builder.createComplexReal(op.getLoc(), src); // Complex cast to bool: (bool)(a+bi) => (bool)a || (bool)b auto srcReal = builder.createComplexReal(op.getLoc(), src); auto srcImag = builder.createComplexImag(op.getLoc(), src); - mlir::cir::CastKind elemToBoolKind; - if (op.getKind() == mlir::cir::CastKind::float_complex_to_bool) - elemToBoolKind = mlir::cir::CastKind::float_to_bool; - else if (op.getKind() == mlir::cir::CastKind::int_complex_to_bool) - elemToBoolKind = mlir::cir::CastKind::int_to_bool; + cir::CastKind elemToBoolKind; + if (op.getKind() == cir::CastKind::float_complex_to_bool) + elemToBoolKind = cir::CastKind::float_to_bool; + else if (op.getKind() == cir::CastKind::int_complex_to_bool) + elemToBoolKind = cir::CastKind::int_to_bool; else llvm_unreachable("invalid complex to bool cast kind"); @@ -472,24 +466,24 @@ static mlir::Value lowerComplexToComplexCast(MLIRContext &ctx, CastOp op) { auto src = op.getSrc(); auto dstComplexElemTy = - mlir::cast(op.getType()).getElementTy(); + mlir::cast(op.getType()).getElementTy(); auto srcReal = builder.createComplexReal(op.getLoc(), src); auto srcImag = builder.createComplexReal(op.getLoc(), src); - mlir::cir::CastKind scalarCastKind; + cir::CastKind scalarCastKind; switch (op.getKind()) { - case mlir::cir::CastKind::float_complex: - scalarCastKind = mlir::cir::CastKind::floating; + case cir::CastKind::float_complex: + scalarCastKind = cir::CastKind::floating; break; - case mlir::cir::CastKind::float_complex_to_int_complex: - scalarCastKind = mlir::cir::CastKind::float_to_int; + case cir::CastKind::float_complex_to_int_complex: + scalarCastKind = cir::CastKind::float_to_int; break; - case mlir::cir::CastKind::int_complex: - scalarCastKind = mlir::cir::CastKind::integral; + case cir::CastKind::int_complex: + scalarCastKind = cir::CastKind::integral; break; - case mlir::cir::CastKind::int_complex_to_float_complex: - scalarCastKind = mlir::cir::CastKind::int_to_float; + case cir::CastKind::int_complex_to_float_complex: + scalarCastKind = cir::CastKind::int_to_float; break; default: llvm_unreachable("invalid complex to complex cast kind"); @@ -505,22 +499,22 @@ static mlir::Value lowerComplexToComplexCast(MLIRContext &ctx, CastOp op) { void LoweringPreparePass::lowerCastOp(CastOp op) { mlir::Value loweredValue; switch (op.getKind()) { - case mlir::cir::CastKind::float_to_complex: - case mlir::cir::CastKind::int_to_complex: + case cir::CastKind::float_to_complex: + case cir::CastKind::int_to_complex: loweredValue = lowerScalarToComplexCast(getContext(), op); break; - case mlir::cir::CastKind::float_complex_to_real: - case mlir::cir::CastKind::int_complex_to_real: - case mlir::cir::CastKind::float_complex_to_bool: - case mlir::cir::CastKind::int_complex_to_bool: + case cir::CastKind::float_complex_to_real: + case cir::CastKind::int_complex_to_real: + case cir::CastKind::float_complex_to_bool: + case cir::CastKind::int_complex_to_bool: loweredValue = lowerComplexToScalarCast(getContext(), op); break; - case mlir::cir::CastKind::float_complex: - case mlir::cir::CastKind::float_complex_to_int_complex: - case mlir::cir::CastKind::int_complex: - case mlir::cir::CastKind::int_complex_to_float_complex: + case cir::CastKind::float_complex: + case cir::CastKind::float_complex_to_int_complex: + case cir::CastKind::int_complex: + case cir::CastKind::int_complex_to_float_complex: loweredValue = lowerComplexToComplexCast(getContext(), op); break; @@ -535,16 +529,16 @@ void LoweringPreparePass::lowerCastOp(CastOp op) { static mlir::Value buildComplexBinOpLibCall( LoweringPreparePass &pass, CIRBaseBuilderTy &builder, llvm::StringRef (*libFuncNameGetter)(llvm::APFloat::Semantics), - mlir::Location loc, mlir::cir::ComplexType ty, mlir::Value lhsReal, + mlir::Location loc, cir::ComplexType ty, mlir::Value lhsReal, mlir::Value lhsImag, mlir::Value rhsReal, mlir::Value rhsImag) { - auto elementTy = mlir::cast(ty.getElementTy()); + auto elementTy = mlir::cast(ty.getElementTy()); auto libFuncName = libFuncNameGetter( llvm::APFloat::SemanticsToEnum(elementTy.getFloatSemantics())); llvm::SmallVector libFuncInputTypes(4, elementTy); - auto libFuncTy = mlir::cir::FuncType::get(libFuncInputTypes, ty); + auto libFuncTy = cir::FuncType::get(libFuncInputTypes, ty); - mlir::cir::FuncOp libFunc; + cir::FuncOp libFunc; { mlir::OpBuilder::InsertionGuard ipGuard{builder}; builder.setInsertionPointToStart(pass.theModule.getBody()); @@ -598,32 +592,31 @@ getComplexDivLibCallName(llvm::APFloat::Semantics semantics) { static mlir::Value lowerComplexMul(LoweringPreparePass &pass, CIRBaseBuilderTy &builder, - mlir::Location loc, - mlir::cir::ComplexBinOp op, + mlir::Location loc, cir::ComplexBinOp op, mlir::Value lhsReal, mlir::Value lhsImag, mlir::Value rhsReal, mlir::Value rhsImag) { // (a+bi) * (c+di) = (ac-bd) + (ad+bc)i auto resultRealLhs = - builder.createBinop(lhsReal, mlir::cir::BinOpKind::Mul, rhsReal); + builder.createBinop(lhsReal, cir::BinOpKind::Mul, rhsReal); auto resultRealRhs = - builder.createBinop(lhsImag, mlir::cir::BinOpKind::Mul, rhsImag); + builder.createBinop(lhsImag, cir::BinOpKind::Mul, rhsImag); auto resultImagLhs = - builder.createBinop(lhsReal, mlir::cir::BinOpKind::Mul, rhsImag); + builder.createBinop(lhsReal, cir::BinOpKind::Mul, rhsImag); auto resultImagRhs = - builder.createBinop(lhsImag, mlir::cir::BinOpKind::Mul, rhsReal); - auto resultReal = builder.createBinop( - resultRealLhs, mlir::cir::BinOpKind::Sub, resultRealRhs); - auto resultImag = builder.createBinop( - resultImagLhs, mlir::cir::BinOpKind::Add, resultImagRhs); + builder.createBinop(lhsImag, cir::BinOpKind::Mul, rhsReal); + auto resultReal = + builder.createBinop(resultRealLhs, cir::BinOpKind::Sub, resultRealRhs); + auto resultImag = + builder.createBinop(resultImagLhs, cir::BinOpKind::Add, resultImagRhs); auto algebraicResult = builder.createComplexCreate(loc, resultReal, resultImag); auto ty = op.getType(); auto range = op.getRange(); - if (mlir::isa(ty.getElementTy()) || - range == mlir::cir::ComplexRangeKind::Basic || - range == mlir::cir::ComplexRangeKind::Improved || - range == mlir::cir::ComplexRangeKind::Promoted) + if (mlir::isa(ty.getElementTy()) || + range == cir::ComplexRangeKind::Basic || + range == cir::ComplexRangeKind::Improved || + range == cir::ComplexRangeKind::Promoted) return algebraicResult; // Check whether the real part and the imaginary part of the result are both @@ -634,7 +627,7 @@ static mlir::Value lowerComplexMul(LoweringPreparePass &pass, auto resultRealAndImagAreNaN = builder.createLogicalAnd(loc, resultRealIsNaN, resultImagIsNaN); return builder - .create( + .create( loc, resultRealAndImagAreNaN, [&](mlir::OpBuilder &, mlir::Location) { auto libCallResult = buildComplexBinOpLibCall( @@ -658,23 +651,18 @@ buildAlgebraicComplexDiv(CIRBaseBuilderTy &builder, mlir::Location loc, auto &c = rhsReal; auto &d = rhsImag; - auto ac = builder.createBinop(loc, a, mlir::cir::BinOpKind::Mul, c); // a*c - auto bd = builder.createBinop(loc, b, mlir::cir::BinOpKind::Mul, d); // b*d - auto cc = builder.createBinop(loc, c, mlir::cir::BinOpKind::Mul, c); // c*c - auto dd = builder.createBinop(loc, d, mlir::cir::BinOpKind::Mul, d); // d*d - auto acbd = - builder.createBinop(loc, ac, mlir::cir::BinOpKind::Add, bd); // ac+bd - auto ccdd = - builder.createBinop(loc, cc, mlir::cir::BinOpKind::Add, dd); // cc+dd - auto resultReal = - builder.createBinop(loc, acbd, mlir::cir::BinOpKind::Div, ccdd); + auto ac = builder.createBinop(loc, a, cir::BinOpKind::Mul, c); // a*c + auto bd = builder.createBinop(loc, b, cir::BinOpKind::Mul, d); // b*d + auto cc = builder.createBinop(loc, c, cir::BinOpKind::Mul, c); // c*c + auto dd = builder.createBinop(loc, d, cir::BinOpKind::Mul, d); // d*d + auto acbd = builder.createBinop(loc, ac, cir::BinOpKind::Add, bd); // ac+bd + auto ccdd = builder.createBinop(loc, cc, cir::BinOpKind::Add, dd); // cc+dd + auto resultReal = builder.createBinop(loc, acbd, cir::BinOpKind::Div, ccdd); - auto bc = builder.createBinop(loc, b, mlir::cir::BinOpKind::Mul, c); // b*c - auto ad = builder.createBinop(loc, a, mlir::cir::BinOpKind::Mul, d); // a*d - auto bcad = - builder.createBinop(loc, bc, mlir::cir::BinOpKind::Sub, ad); // bc-ad - auto resultImag = - builder.createBinop(loc, bcad, mlir::cir::BinOpKind::Div, ccdd); + auto bc = builder.createBinop(loc, b, cir::BinOpKind::Mul, c); // b*c + auto ad = builder.createBinop(loc, a, cir::BinOpKind::Mul, d); // a*d + auto bcad = builder.createBinop(loc, bc, cir::BinOpKind::Sub, ad); // bc-ad + auto resultImag = builder.createBinop(loc, bcad, cir::BinOpKind::Div, ccdd); return builder.createComplexCreate(loc, resultReal, resultImag); } @@ -709,52 +697,47 @@ buildRangeReductionComplexDiv(CIRBaseBuilderTy &builder, mlir::Location loc, auto &d = rhsImag; auto trueBranchBuilder = [&](mlir::OpBuilder &, mlir::Location) { - auto r = builder.createBinop(loc, d, mlir::cir::BinOpKind::Div, - c); // r := d / c - auto rd = builder.createBinop(loc, r, mlir::cir::BinOpKind::Mul, d); // r*d - auto tmp = builder.createBinop(loc, c, mlir::cir::BinOpKind::Add, + auto r = builder.createBinop(loc, d, cir::BinOpKind::Div, + c); // r := d / c + auto rd = builder.createBinop(loc, r, cir::BinOpKind::Mul, d); // r*d + auto tmp = builder.createBinop(loc, c, cir::BinOpKind::Add, rd); // tmp := c + r*d - auto br = builder.createBinop(loc, b, mlir::cir::BinOpKind::Mul, r); // b*r - auto abr = - builder.createBinop(loc, a, mlir::cir::BinOpKind::Add, br); // a + b*r - auto e = builder.createBinop(loc, abr, mlir::cir::BinOpKind::Div, tmp); + auto br = builder.createBinop(loc, b, cir::BinOpKind::Mul, r); // b*r + auto abr = builder.createBinop(loc, a, cir::BinOpKind::Add, br); // a + b*r + auto e = builder.createBinop(loc, abr, cir::BinOpKind::Div, tmp); - auto ar = builder.createBinop(loc, a, mlir::cir::BinOpKind::Mul, r); // a*r - auto bar = - builder.createBinop(loc, b, mlir::cir::BinOpKind::Sub, ar); // b - a*r - auto f = builder.createBinop(loc, bar, mlir::cir::BinOpKind::Div, tmp); + auto ar = builder.createBinop(loc, a, cir::BinOpKind::Mul, r); // a*r + auto bar = builder.createBinop(loc, b, cir::BinOpKind::Sub, ar); // b - a*r + auto f = builder.createBinop(loc, bar, cir::BinOpKind::Div, tmp); auto result = builder.createComplexCreate(loc, e, f); builder.createYield(loc, result); }; auto falseBranchBuilder = [&](mlir::OpBuilder &, mlir::Location) { - auto r = builder.createBinop(loc, c, mlir::cir::BinOpKind::Div, - d); // r := c / d - auto rc = builder.createBinop(loc, r, mlir::cir::BinOpKind::Mul, c); // r*c - auto tmp = builder.createBinop(loc, d, mlir::cir::BinOpKind::Add, + auto r = builder.createBinop(loc, c, cir::BinOpKind::Div, + d); // r := c / d + auto rc = builder.createBinop(loc, r, cir::BinOpKind::Mul, c); // r*c + auto tmp = builder.createBinop(loc, d, cir::BinOpKind::Add, rc); // tmp := d + r*c - auto ar = builder.createBinop(loc, a, mlir::cir::BinOpKind::Mul, r); // a*r - auto arb = - builder.createBinop(loc, ar, mlir::cir::BinOpKind::Add, b); // a*r + b - auto e = builder.createBinop(loc, arb, mlir::cir::BinOpKind::Div, tmp); + auto ar = builder.createBinop(loc, a, cir::BinOpKind::Mul, r); // a*r + auto arb = builder.createBinop(loc, ar, cir::BinOpKind::Add, b); // a*r + b + auto e = builder.createBinop(loc, arb, cir::BinOpKind::Div, tmp); - auto br = builder.createBinop(loc, b, mlir::cir::BinOpKind::Mul, r); // b*r - auto bra = - builder.createBinop(loc, br, mlir::cir::BinOpKind::Sub, a); // b*r - a - auto f = builder.createBinop(loc, bra, mlir::cir::BinOpKind::Div, tmp); + auto br = builder.createBinop(loc, b, cir::BinOpKind::Mul, r); // b*r + auto bra = builder.createBinop(loc, br, cir::BinOpKind::Sub, a); // b*r - a + auto f = builder.createBinop(loc, bra, cir::BinOpKind::Div, tmp); auto result = builder.createComplexCreate(loc, e, f); builder.createYield(loc, result); }; - auto cFabs = builder.create(loc, c); - auto dFabs = builder.create(loc, d); - auto cmpResult = - builder.createCompare(loc, mlir::cir::CmpOpKind::ge, cFabs, dFabs); - auto ternary = builder.create( + auto cFabs = builder.create(loc, c); + auto dFabs = builder.create(loc, d); + auto cmpResult = builder.createCompare(loc, cir::CmpOpKind::ge, cFabs, dFabs); + auto ternary = builder.create( loc, cmpResult, trueBranchBuilder, falseBranchBuilder); return ternary.getResult(); @@ -762,18 +745,17 @@ buildRangeReductionComplexDiv(CIRBaseBuilderTy &builder, mlir::Location loc, static mlir::Value lowerComplexDiv(LoweringPreparePass &pass, CIRBaseBuilderTy &builder, - mlir::Location loc, - mlir::cir::ComplexBinOp op, + mlir::Location loc, cir::ComplexBinOp op, mlir::Value lhsReal, mlir::Value lhsImag, mlir::Value rhsReal, mlir::Value rhsImag) { auto ty = op.getType(); - if (mlir::isa(ty.getElementTy())) { + if (mlir::isa(ty.getElementTy())) { auto range = op.getRange(); - if (range == mlir::cir::ComplexRangeKind::Improved || - (range == mlir::cir::ComplexRangeKind::Promoted && !op.getPromoted())) + if (range == cir::ComplexRangeKind::Improved || + (range == cir::ComplexRangeKind::Promoted && !op.getPromoted())) return buildRangeReductionComplexDiv(builder, loc, lhsReal, lhsImag, rhsReal, rhsImag); - if (range == mlir::cir::ComplexRangeKind::Full) + if (range == cir::ComplexRangeKind::Full) return buildComplexBinOpLibCall(pass, builder, &getComplexDivLibCallName, loc, ty, lhsReal, lhsImag, rhsReal, rhsImag); @@ -796,7 +778,7 @@ void LoweringPreparePass::lowerComplexBinOp(ComplexBinOp op) { auto rhsImag = builder.createComplexImag(loc, rhs); mlir::Value loweredResult; - if (op.getKind() == mlir::cir::ComplexBinOpKind::Mul) + if (op.getKind() == cir::ComplexBinOpKind::Mul) loweredResult = lowerComplexMul(*this, builder, loc, op, lhsReal, lhsImag, rhsReal, rhsImag); else @@ -832,8 +814,8 @@ void LoweringPreparePass::lowerThreeWayCmpOp(CmpThreeWayOp op) { auto cmpInfo = op.getInfo(); auto buildCmpRes = [&](int64_t value) -> mlir::Value { - return builder.create( - loc, op.getType(), mlir::cir::IntAttr::get(op.getType(), value)); + return builder.create( + loc, op.getType(), cir::IntAttr::get(op.getType(), value)); }; auto ltRes = buildCmpRes(cmpInfo.getLt()); auto eqRes = buildCmpRes(cmpInfo.getEq()); @@ -841,8 +823,7 @@ void LoweringPreparePass::lowerThreeWayCmpOp(CmpThreeWayOp op) { auto buildCmp = [&](CmpOpKind kind) -> mlir::Value { auto ty = BoolType::get(&getContext()); - return builder.create(loc, ty, kind, op.getLhs(), - op.getRhs()); + return builder.create(loc, ty, kind, op.getLhs(), op.getRhs()); }; auto buildSelect = [&](mlir::Value condition, mlir::Value trueResult, mlir::Value falseResult) -> mlir::Value { @@ -915,7 +896,7 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { for (auto &f : dynamicInitializers) { // TODO: handle globals with a user-specified initialzation priority. - auto ctorAttr = mlir::cir::GlobalCtorAttr::get(&getContext(), f.getName()); + auto ctorAttr = cir::GlobalCtorAttr::get(&getContext(), f.getName()); globalCtorList.push_back(ctorAttr); } @@ -939,11 +920,10 @@ void LoweringPreparePass::buildCXXGlobalInitFunc() { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPointToEnd(&theModule.getBodyRegion().back()); - auto fnType = mlir::cir::FuncType::get( - {}, mlir::cir::VoidType::get(builder.getContext())); - FuncOp f = - buildRuntimeFunction(builder, fnName, theModule.getLoc(), fnType, - mlir::cir::GlobalLinkageKind::ExternalLinkage); + auto fnType = + cir::FuncType::get({}, cir::VoidType::get(builder.getContext())); + FuncOp f = buildRuntimeFunction(builder, fnName, theModule.getLoc(), fnType, + cir::GlobalLinkageKind::ExternalLinkage); builder.setInsertionPointToStart(f.addEntryBlock()); for (auto &f : dynamicInitializers) { builder.createCallOp(f.getLoc(), f); @@ -973,14 +953,14 @@ static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, // TODO: instead of fixed integer size, create alias for PtrDiffTy and unify // with CIRGen stuff. auto ptrDiffTy = - mlir::cir::IntType::get(builder.getContext(), 64, /*signed=*/false); - auto numArrayElementsConst = builder.create( - loc, ptrDiffTy, mlir::cir::IntAttr::get(ptrDiffTy, arrayLen)); + cir::IntType::get(builder.getContext(), 64, /*signed=*/false); + auto numArrayElementsConst = builder.create( + loc, ptrDiffTy, cir::IntAttr::get(ptrDiffTy, arrayLen)); - auto begin = builder.create( - loc, eltTy, mlir::cir::CastKind::array_to_ptrdecay, arrayAddr); - mlir::Value end = builder.create( - loc, eltTy, begin, numArrayElementsConst); + auto begin = builder.create( + loc, eltTy, cir::CastKind::array_to_ptrdecay, arrayAddr); + mlir::Value end = builder.create(loc, eltTy, begin, + numArrayElementsConst); auto tmpAddr = builder.createAlloca( loc, /*addr type*/ builder.getPointerTo(eltTy), @@ -991,29 +971,29 @@ static void lowerArrayDtorCtorIntoLoop(CIRBaseBuilderTy &builder, loc, /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto currentElement = b.create(loc, eltTy, tmpAddr); - mlir::Type boolTy = mlir::cir::BoolType::get(b.getContext()); - auto cmp = builder.create( - loc, boolTy, mlir::cir::CmpOpKind::eq, currentElement, end); + auto currentElement = b.create(loc, eltTy, tmpAddr); + mlir::Type boolTy = cir::BoolType::get(b.getContext()); + auto cmp = builder.create(loc, boolTy, cir::CmpOpKind::eq, + currentElement, end); builder.createCondition(cmp); }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - auto currentElement = b.create(loc, eltTy, tmpAddr); + auto currentElement = b.create(loc, eltTy, tmpAddr); CallOp ctorCall; op->walk([&](CallOp c) { ctorCall = c; }); assert(ctorCall && "expected ctor call"); - auto one = builder.create( - loc, ptrDiffTy, mlir::cir::IntAttr::get(ptrDiffTy, 1)); + auto one = builder.create( + loc, ptrDiffTy, cir::IntAttr::get(ptrDiffTy, 1)); ctorCall->moveAfter(one); ctorCall->setOperand(0, currentElement); // Advance pointer and store them to temporary variable - auto nextElement = builder.create( - loc, eltTy, currentElement, one); + auto nextElement = + builder.create(loc, eltTy, currentElement, one); builder.createStore(loc, nextElement, tmpAddr); builder.createYield(loc); }); @@ -1027,27 +1007,26 @@ void LoweringPreparePass::lowerArrayDtor(ArrayDtor op) { builder.setInsertionPointAfter(op.getOperation()); auto eltTy = op->getRegion(0).getArgument(0).getType(); - auto arrayLen = mlir::cast( - mlir::cast(op.getAddr().getType()) - .getPointee()) - .getSize(); + auto arrayLen = + mlir::cast( + mlir::cast(op.getAddr().getType()).getPointee()) + .getSize(); lowerArrayDtorCtorIntoLoop(builder, op, eltTy, op.getAddr(), arrayLen); } -static std::string getGlobalVarNameForConstString(mlir::cir::StoreOp op, +static std::string getGlobalVarNameForConstString(cir::StoreOp op, uint64_t &cnt) { llvm::SmallString<64> finalName; llvm::raw_svector_ostream Out(finalName); Out << "__const."; - if (auto fnOp = op->getParentOfType()) { + if (auto fnOp = op->getParentOfType()) { Out << fnOp.getSymNameAttr().getValue() << "."; } else { Out << "module."; } - auto allocaOp = - dyn_cast_or_null(op.getAddr().getDefiningOp()); + auto allocaOp = dyn_cast_or_null(op.getAddr().getDefiningOp()); if (allocaOp && !allocaOp.getName().empty()) Out << allocaOp.getName(); else @@ -1059,29 +1038,29 @@ void LoweringPreparePass::lowerToMemCpy(StoreOp op) { // Now that basic filter is done, do more checks before proceding with the // transformation. auto cstOp = - dyn_cast_if_present(op.getValue().getDefiningOp()); + dyn_cast_if_present(op.getValue().getDefiningOp()); if (!cstOp) return; - if (!isa(cstOp.getValue())) + if (!isa(cstOp.getValue())) return; CIRBaseBuilderTy builder(getContext()); // Create a global which is initialized with the attribute that is either a // constant array or struct. - assert(!::cir::MissingFeatures::unnamedAddr() && "NYI"); + assert(!cir::MissingFeatures::unnamedAddr() && "NYI"); builder.setInsertionPointToStart(&theModule.getBodyRegion().front()); std::string globalName = getGlobalVarNameForConstString(op, annonGlobalConstArrayCount); - mlir::cir::GlobalOp globalCst = buildRuntimeVariable( + cir::GlobalOp globalCst = buildRuntimeVariable( builder, globalName, op.getLoc(), op.getValue().getType(), - mlir::cir::GlobalLinkageKind::PrivateLinkage); + cir::GlobalLinkageKind::PrivateLinkage); globalCst.setInitialValueAttr(cstOp.getValue()); globalCst.setConstant(true); // Transform the store into a cir.copy. builder.setInsertionPointAfter(op.getOperation()); - mlir::cir::CopyOp memCpy = + cir::CopyOp memCpy = builder.createCopy(op.getAddr(), builder.createGetGlobal(globalCst)); op->replaceAllUsesWith(memCpy); op->erase(); @@ -1094,10 +1073,10 @@ void LoweringPreparePass::lowerArrayCtor(ArrayCtor op) { builder.setInsertionPointAfter(op.getOperation()); auto eltTy = op->getRegion(0).getArgument(0).getType(); - auto arrayLen = mlir::cast( - mlir::cast(op.getAddr().getType()) - .getPointee()) - .getSize(); + auto arrayLen = + mlir::cast( + mlir::cast(op.getAddr().getType()).getPointee()) + .getSize(); lowerArrayDtorCtorIntoLoop(builder, op, eltTy, op.getAddr(), arrayLen); } @@ -1139,7 +1118,7 @@ void LoweringPreparePass::addGlobalAnnotations(mlir::Operation *op, auto globalValue = cast(op); mlir::StringAttr globalValueName = globalValue.getNameAttr(); for (auto &annot : annotations) { - SmallVector entryArray = {globalValueName, annot}; + llvm::SmallVector entryArray = {globalValueName, annot}; globalAnnotations.push_back( mlir::ArrayAttr::get(theModule.getContext(), entryArray)); } @@ -1151,7 +1130,7 @@ void LoweringPreparePass::buildGlobalAnnotationValues() { mlir::ArrayAttr annotationValueArray = mlir::ArrayAttr::get(theModule.getContext(), globalAnnotations); theModule->setAttr("cir.global_annotations", - mlir::cir::GlobalAnnotationValuesAttr::get( + cir::GlobalAnnotationValuesAttr::get( theModule.getContext(), annotationValueArray)); } @@ -1184,9 +1163,9 @@ void LoweringPreparePass::runOnOp(Operation *op) { lowerArrayDtor(arrayDtor); } else if (auto storeOp = dyn_cast(op)) { mlir::Type valTy = storeOp.getValue().getType(); - if (isa(valTy) || isa(valTy)) + if (isa(valTy) || isa(valTy)) lowerToMemCpy(storeOp); - } else if (auto fnOp = dyn_cast(op)) { + } else if (auto fnOp = dyn_cast(op)) { if (auto globalCtor = fnOp.getGlobalCtorAttr()) { globalCtorList.push_back(globalCtor); } else if (auto globalDtor = fnOp.getGlobalDtorAttr()) { @@ -1204,12 +1183,12 @@ void LoweringPreparePass::runOnOperation() { theModule = cast<::mlir::ModuleOp>(op); } - SmallVector opsToTransform; + llvm::SmallVector opsToTransform; op->walk([&](Operation *op) { if (isa(op)) + ArrayCtor, ArrayDtor, cir::FuncOp, StoreOp>(op)) opsToTransform.push_back(op); }); diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h index 42e8917b43b6..47c63fae7d7b 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h @@ -27,16 +27,15 @@ namespace cir { class LoweringPrepareCXXABI { public: static LoweringPrepareCXXABI *createItaniumABI(); - static LoweringPrepareCXXABI *createAArch64ABI(::cir::AArch64ABIKind k); + static LoweringPrepareCXXABI *createAArch64ABI(cir::AArch64ABIKind k); - virtual mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, - mlir::cir::VAArgOp op, + virtual mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) = 0; virtual ~LoweringPrepareCXXABI() {} virtual mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, clang::ASTContext &astCtx, - mlir::cir::DynamicCastOp op) = 0; + cir::DynamicCastOp op) = 0; }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h index 1dbef0d24ddd..58b2a5e3915d 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h @@ -18,7 +18,7 @@ class LoweringPrepareItaniumCXXABI : public cir::LoweringPrepareCXXABI { public: mlir::Value lowerDynamicCast(cir::CIRBaseBuilderTy &builder, clang::ASTContext &astCtx, - mlir::cir::DynamicCastOp op) override; - mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, + cir::DynamicCastOp op) override; + mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) override; }; diff --git a/clang/lib/CIR/Dialect/Transforms/PassDetail.h b/clang/lib/CIR/Dialect/Transforms/PassDetail.h index 2fdcfbda61e5..697b24eafcd8 100644 --- a/clang/lib/CIR/Dialect/Transforms/PassDetail.h +++ b/clang/lib/CIR/Dialect/Transforms/PassDetail.h @@ -12,15 +12,15 @@ #include "mlir/IR/Dialect.h" #include "mlir/Pass/Pass.h" +namespace cir { +class CIRDialect; +} // namespace cir + namespace mlir { // Forward declaration from Dialect.h template void registerDialect(DialectRegistry ®istry); -namespace cir { -class CIRDialect; -} // namespace cir - #define GEN_PASS_CLASSES #include "clang/CIR/Dialect/Passes.h.inc" diff --git a/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp index 19ebf75a1c2c..6a46c4bad600 100644 --- a/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp @@ -76,8 +76,8 @@ struct canonicalizeIVtoCmpLHS : public OpRewritePattern { void replaceWithNewCmpOp(CmpOp oldCmp, CmpOpKind newKind, Value lhs, Value rhs, PatternRewriter &rewriter) const { rewriter.setInsertionPointAfter(oldCmp.getOperation()); - auto newCmp = rewriter.create( - oldCmp.getLoc(), oldCmp.getType(), newKind, lhs, rhs); + auto newCmp = rewriter.create(oldCmp.getLoc(), oldCmp.getType(), + newKind, lhs, rhs); oldCmp->replaceAllUsesWith(newCmp); oldCmp->erase(); } @@ -149,7 +149,7 @@ struct hoistLoopInvariantInCondBlock : public OpRewritePattern { // Return true for loop invariant operation and push it to initOps. bool isLoopInvariantOp(Operation *op, ForOp forOp, - SmallVector &initOps) const { + llvm::SmallVector &initOps) const { if (!op) return false; if (isa(op) || isLoopInvariantLoad(op, forOp)) { @@ -162,7 +162,7 @@ struct hoistLoopInvariantInCondBlock : public OpRewritePattern { initOps)) { initOps.push_back(op); return true; - } else if (isa(op) && + } else if (isa(op) && isLoopInvariantOp(op->getOperand(0).getDefiningOp(), forOp, initOps)) { initOps.push_back(op); @@ -189,7 +189,7 @@ struct hoistLoopInvariantInCondBlock : public OpRewritePattern { Value cmpRhs = loopCmp.getRhs(); auto defOp = cmpRhs.getDefiningOp(); - SmallVector initOps; + llvm::SmallVector initOps; // Collect loop invariant operations and move them before forOp. if (isLoopInvariantOp(defOp, forOp, initOps)) { for (auto op : initOps) @@ -225,7 +225,7 @@ void SCFPreparePass::runOnOperation() { populateSCFPreparePatterns(patterns); // Collect operations to apply patterns. - SmallVector ops; + llvm::SmallVector ops; getOperation()->walk([&](Operation *op) { // CastOp here is to perform a manual `fold` in // applyOpPatternsAndFold diff --git a/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp b/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp index 2fbccfc7946a..19807738505e 100644 --- a/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp +++ b/clang/lib/CIR/Dialect/Transforms/StdHelpers.cpp @@ -8,11 +8,10 @@ #include "StdHelpers.h" -namespace mlir { namespace cir { bool isStdArrayType(mlir::Type t) { - auto sTy = dyn_cast(t); + auto sTy = mlir::dyn_cast(t); if (!sTy) return false; auto recordDecl = sTy.getAst(); @@ -29,4 +28,3 @@ bool isStdArrayType(mlir::Type t) { } } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/StdHelpers.h b/clang/lib/CIR/Dialect/Transforms/StdHelpers.h index 302272feb6bb..245e329fb1bd 100644 --- a/clang/lib/CIR/Dialect/Transforms/StdHelpers.h +++ b/clang/lib/CIR/Dialect/Transforms/StdHelpers.h @@ -25,12 +25,10 @@ #ifndef DIALECT_CIR_TRANSFORMS_STDHELPERS_H_ #define DIALECT_CIR_TRANSFORMS_STDHELPERS_H_ -namespace mlir { namespace cir { bool isStdArrayType(mlir::Type t); } // namespace cir -} // namespace mlir #endif diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp index 7ff24be12b35..f94643bfff20 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.cpp @@ -17,7 +17,6 @@ #include "LowerTypes.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" -namespace mlir { namespace cir { // Pin the vtable to this file. @@ -29,25 +28,24 @@ CIRLowerContext &ABIInfo::getContext() const { return LT.getContext(); } const clang::TargetInfo &ABIInfo::getTarget() const { return LT.getTarget(); } -const ::cir::CIRDataLayout &ABIInfo::getDataLayout() const { +const cir::CIRDataLayout &ABIInfo::getDataLayout() const { return LT.getDataLayout(); } -bool ABIInfo::isPromotableIntegerTypeForABI(Type Ty) const { +bool ABIInfo::isPromotableIntegerTypeForABI(mlir::Type Ty) const { if (getContext().isPromotableIntegerType(Ty)) return true; - cir_cconv_assert(!::cir::MissingFeatures::fixedWidthIntegers()); + cir_cconv_assert(!cir::MissingFeatures::fixedWidthIntegers()); return false; } -::cir::ABIArgInfo ABIInfo::getNaturalAlignIndirect(mlir::Type Ty, bool ByVal, - bool Realign, - mlir::Type Padding) const { - return ::cir::ABIArgInfo::getIndirect(getContext().getTypeAlign(Ty), ByVal, - Realign, Padding); +cir::ABIArgInfo ABIInfo::getNaturalAlignIndirect(mlir::Type Ty, bool ByVal, + bool Realign, + mlir::Type Padding) const { + return cir::ABIArgInfo::getIndirect(getContext().getTypeAlign(Ty), ByVal, + Realign, Padding); } } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h index 0b67d84570ea..434070fd8157 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h @@ -20,7 +20,6 @@ #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "llvm/IR/CallingConv.h" -namespace mlir { namespace cir { // Forward declarations. @@ -43,20 +42,19 @@ class ABIInfo { const clang::TargetInfo &getTarget() const; - const ::cir::CIRDataLayout &getDataLayout() const; + const cir::CIRDataLayout &getDataLayout() const; virtual void computeInfo(LowerFunctionInfo &FI) const = 0; // Implement the Type::IsPromotableIntegerType for ABI specific needs. The // only difference is that this considers bit-precise integer types as well. - bool isPromotableIntegerTypeForABI(Type Ty) const; + bool isPromotableIntegerTypeForABI(mlir::Type Ty) const; - ::cir::ABIArgInfo getNaturalAlignIndirect(mlir::Type Ty, bool ByVal = true, - bool Realign = false, - mlir::Type Padding = {}) const; + cir::ABIArgInfo getNaturalAlignIndirect(mlir::Type Ty, bool ByVal = true, + bool Realign = false, + mlir::Type Padding = {}) const; }; } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFO_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index 493ddffdce3d..2c92be20bd41 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -18,41 +18,39 @@ #include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" -namespace mlir { namespace cir { bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, const ABIInfo &Info) { - Type Ty = FI.getReturnType(); + mlir::Type Ty = FI.getReturnType(); - if (const auto RT = dyn_cast(Ty)) { - cir_cconv_assert(!::cir::MissingFeatures::isCXXRecordDecl()); + if (const auto RT = mlir::dyn_cast(Ty)) { + cir_cconv_assert(!cir::MissingFeatures::isCXXRecordDecl()); } return CXXABI.classifyReturnType(FI); } -bool isAggregateTypeForABI(Type T) { - cir_cconv_assert(!::cir::MissingFeatures::functionMemberPointerType()); +bool isAggregateTypeForABI(mlir::Type T) { + cir_cconv_assert(!cir::MissingFeatures::functionMemberPointerType()); return !LowerFunction::hasScalarEvaluationKind(T); } -Type useFirstFieldIfTransparentUnion(Type Ty) { - if (auto RT = dyn_cast(Ty)) { +mlir::Type useFirstFieldIfTransparentUnion(mlir::Type Ty) { + if (auto RT = mlir::dyn_cast(Ty)) { if (RT.isUnion()) cir_cconv_assert_or_abort( - !::cir::MissingFeatures::ABITransparentUnionHandling(), "NYI"); + !cir::MissingFeatures::ABITransparentUnionHandling(), "NYI"); } return Ty; } CIRCXXABI::RecordArgABI getRecordArgABI(const StructType RT, CIRCXXABI &CXXABI) { - if (::cir::MissingFeatures::typeIsCXXRecordDecl()) { + if (cir::MissingFeatures::typeIsCXXRecordDecl()) { cir_cconv_unreachable("NYI"); } return CXXABI.getRecordArgABI(RT); } } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h index 9e45bc4e0ecc..df1cd2d0fe0d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h @@ -18,21 +18,19 @@ #include "CIRCXXABI.h" #include "LowerFunctionInfo.h" -namespace mlir { namespace cir { bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, const ABIInfo &Info); -bool isAggregateTypeForABI(Type T); +bool isAggregateTypeForABI(mlir::Type T); /// Pass transparent unions as if they were the type of the first element. Sema /// should ensure that all elements of the union have the same "machine type". -Type useFirstFieldIfTransparentUnion(Type Ty); +mlir::Type useFirstFieldIfTransparentUnion(mlir::Type Ty); CIRCXXABI::RecordArgABI getRecordArgABI(const StructType RT, CIRCXXABI &CXXABI); } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_ABIINFOIMPL_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.cpp index 8c483469f1ce..86cf7ebdc8f5 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.cpp @@ -13,10 +13,8 @@ #include "CIRCXXABI.h" -namespace mlir { namespace cir { CIRCXXABI::~CIRCXXABI() {} } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h index 42e666999005..a980f76f012d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -20,7 +20,6 @@ #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Target/AArch64.h" -namespace mlir { namespace cir { // Forward declarations. @@ -66,7 +65,6 @@ class CIRCXXABI { CIRCXXABI *CreateItaniumCXXABI(LowerModule &CGM); } // namespace cir -} // namespace mlir // FIXME(cir): Merge this into the CIRCXXABI class above. To do so, this code // should be updated to follow some level of codegen parity. @@ -75,16 +73,15 @@ namespace cir { class LoweringPrepareCXXABI { public: static LoweringPrepareCXXABI *createItaniumABI(); - static LoweringPrepareCXXABI *createAArch64ABI(::cir::AArch64ABIKind k); + static LoweringPrepareCXXABI *createAArch64ABI(cir::AArch64ABIKind k); - virtual mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, - mlir::cir::VAArgOp op, + virtual mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) = 0; virtual ~LoweringPrepareCXXABI() {} virtual mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, clang::ASTContext &astCtx, - mlir::cir::DynamicCastOp op) = 0; + cir::DynamicCastOp op) = 0; }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index c4912c651d21..c6960d411b93 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -20,15 +20,15 @@ #include "llvm/Support/ErrorHandling.h" #include -namespace mlir { namespace cir { -CIRLowerContext::CIRLowerContext(ModuleOp module, clang::LangOptions LOpts) +CIRLowerContext::CIRLowerContext(mlir::ModuleOp module, + clang::LangOptions LOpts) : MLIRCtx(module.getContext()), LangOpts(LOpts) {} CIRLowerContext::~CIRLowerContext() {} -clang::TypeInfo CIRLowerContext::getTypeInfo(Type T) const { +clang::TypeInfo CIRLowerContext::getTypeInfo(mlir::Type T) const { // TODO(cir): Memoize type info. clang::TypeInfo TI = getTypeInfoImpl(T); @@ -41,7 +41,7 @@ clang::TypeInfo CIRLowerContext::getTypeInfo(Type T) const { /// FIXME: Pointers into different addr spaces could have different sizes and /// alignment requirements: getPointerInfo should take an AddrSpace, this /// should take a QualType, &c. -clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { +clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const mlir::Type T) const { uint64_t Width = 0; unsigned Align = 8; clang::AlignRequirementKind AlignRequirement = @@ -50,12 +50,12 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { // TODO(cir): We should implement a better way to identify type kinds and use // builting data layout interface for this. auto typeKind = clang::Type::Builtin; - if (isa(T)) { + if (mlir::isa(T)) { typeKind = clang::Type::Builtin; - } else if (isa(T)) { + } else if (mlir::isa(T)) { typeKind = clang::Type::Record; } else { - cir_cconv_assert_or_abort(!::cir::MissingFeatures::ABIClangTypeKind(), + cir_cconv_assert_or_abort(!cir::MissingFeatures::ABIClangTypeKind(), "Unhandled type class"); // FIXME(cir): Completely wrong. Just here to make it non-blocking. typeKind = clang::Type::Builtin; @@ -71,7 +71,7 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { // current level of CIR. switch (typeKind) { case clang::Type::Builtin: { - if (auto intTy = dyn_cast(T)) { + if (auto intTy = mlir::dyn_cast(T)) { // NOTE(cir): This assumes int types are already ABI-specific. // FIXME(cir): Use data layout interface here instead. Width = intTy.getWidth(); @@ -79,17 +79,17 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { Align = std::ceil((float)Width / 8) * 8; break; } - if (auto boolTy = dyn_cast(T)) { + if (auto boolTy = mlir::dyn_cast(T)) { Width = Target->getFloatWidth(); Align = Target->getFloatAlign(); break; } - if (auto floatTy = dyn_cast(T)) { + if (auto floatTy = mlir::dyn_cast(T)) { Width = Target->getFloatWidth(); Align = Target->getFloatAlign(); break; } - if (auto doubleTy = dyn_cast(T)) { + if (auto doubleTy = mlir::dyn_cast(T)) { Width = Target->getDoubleWidth(); Align = Target->getDoubleAlign(); break; @@ -98,8 +98,8 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { break; } case clang::Type::Record: { - const auto RT = dyn_cast(T); - cir_cconv_assert(!::cir::MissingFeatures::tagTypeClassAbstraction()); + const auto RT = mlir::dyn_cast(T); + cir_cconv_assert(!cir::MissingFeatures::tagTypeClassAbstraction()); // Only handle TagTypes (names types) for now. cir_cconv_assert(RT.getName() && "Anonymous record is NYI"); @@ -107,14 +107,14 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { // NOTE(cir): Clang does some hanlding of invalid tagged declarations here. // Not sure if this is necessary in CIR. - if (::cir::MissingFeatures::typeGetAsEnumType()) { + if (cir::MissingFeatures::typeGetAsEnumType()) { cir_cconv_unreachable("NYI"); } const CIRRecordLayout &Layout = getCIRRecordLayout(RT); Width = toBits(Layout.getSize()); Align = toBits(Layout.getAlignment()); - cir_cconv_assert(!::cir::MissingFeatures::recordDeclHasAlignmentAttr()); + cir_cconv_assert(!cir::MissingFeatures::recordDeclHasAlignmentAttr()); break; } default: @@ -126,11 +126,11 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const Type T) const { return clang::TypeInfo(Width, Align, AlignRequirement); } -Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { - Type Ty; +mlir::Type CIRLowerContext::initBuiltinType(clang::BuiltinType::Kind K) { + mlir::Type Ty; // NOTE(cir): Clang does more stuff here. Not sure if we need to do the same. - cir_cconv_assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_cconv_assert(!cir::MissingFeatures::qualifiedTypes()); switch (K) { case clang::BuiltinType::Char_S: Ty = IntType::get(getMLIRContext(), 8, true); @@ -167,8 +167,8 @@ int64_t CIRLowerContext::toBits(clang::CharUnits CharSize) const { return CharSize.getQuantity() * getCharWidth(); } -clang::TypeInfoChars CIRLowerContext::getTypeInfoInChars(Type T) const { - if (auto arrTy = dyn_cast(T)) +clang::TypeInfoChars CIRLowerContext::getTypeInfoInChars(mlir::Type T) const { + if (auto arrTy = mlir::dyn_cast(T)) cir_cconv_unreachable("NYI"); clang::TypeInfo Info = getTypeInfo(T); return clang::TypeInfoChars(toCharUnitsFromBits(Info.Width), @@ -176,21 +176,21 @@ clang::TypeInfoChars CIRLowerContext::getTypeInfoInChars(Type T) const { Info.AlignRequirement); } -bool CIRLowerContext::isPromotableIntegerType(Type T) const { +bool CIRLowerContext::isPromotableIntegerType(mlir::Type T) const { // HLSL doesn't promote all small integer types to int, it // just uses the rank-based promotion rules for all types. - if (::cir::MissingFeatures::langOpts()) + if (cir::MissingFeatures::langOpts()) cir_cconv_unreachable("NYI"); // FIXME(cir): CIR does not distinguish between char, short, etc. So we just // assume it is promotable if smaller than 32 bits. This is wrong since, for // example, Char32 is promotable. Improve CIR or add an AST query here. - if (auto intTy = dyn_cast(T)) { - return cast(T).getWidth() < 32; + if (auto intTy = mlir::dyn_cast(T)) { + return mlir::cast(T).getWidth() < 32; } // Bool are also handled here for codegen parity. - if (auto boolTy = dyn_cast(T)) { + if (auto boolTy = mlir::dyn_cast(T)) { return true; } @@ -198,7 +198,7 @@ bool CIRLowerContext::isPromotableIntegerType(Type T) const { // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). // TODO(cir): CIR doesn't know if a integer originated from an enum. Improve // CIR or add an AST query here. - if (::cir::MissingFeatures::typeGetAsEnumType()) { + if (cir::MissingFeatures::typeGetAsEnumType()) { cir_cconv_unreachable("NYI"); } @@ -206,4 +206,3 @@ bool CIRLowerContext::isPromotableIntegerType(Type T) const { } } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h index 5a87f71c2bdc..e178eeccc642 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h @@ -23,7 +23,6 @@ #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" -namespace mlir { namespace cir { // FIXME(cir): Most of this is type-related information that should already be @@ -31,15 +30,15 @@ namespace cir { class CIRLowerContext : public llvm::RefCountedBase { private: - mutable SmallVector Types; + mutable llvm::SmallVector Types; - clang::TypeInfo getTypeInfoImpl(const Type T) const; + clang::TypeInfo getTypeInfoImpl(const mlir::Type T) const; const clang::TargetInfo *Target = nullptr; const clang::TargetInfo *AuxTarget = nullptr; /// MLIR context to be used when creating types. - MLIRContext *MLIRCtx; + mlir::MLIRContext *MLIRCtx; /// The language options used to create the AST associated with /// this ASTContext object. @@ -49,10 +48,10 @@ class CIRLowerContext : public llvm::RefCountedBase { // Built-in Types //===--------------------------------------------------------------------===// - Type CharTy; + mlir::Type CharTy; public: - CIRLowerContext(ModuleOp module, clang::LangOptions LOpts); + CIRLowerContext(mlir::ModuleOp module, clang::LangOptions LOpts); CIRLowerContext(const CIRLowerContext &) = delete; CIRLowerContext &operator=(const CIRLowerContext &) = delete; ~CIRLowerContext(); @@ -67,24 +66,24 @@ class CIRLowerContext : public llvm::RefCountedBase { const clang::TargetInfo *AuxTarget = nullptr); private: - Type initBuiltinType(clang::BuiltinType::Kind K); + mlir::Type initBuiltinType(clang::BuiltinType::Kind K); public: const clang::TargetInfo &getTargetInfo() const { return *Target; } const clang::LangOptions &getLangOpts() const { return LangOpts; } - MLIRContext *getMLIRContext() const { return MLIRCtx; } + mlir::MLIRContext *getMLIRContext() const { return MLIRCtx; } //===--------------------------------------------------------------------===// // Type Sizing and Analysis //===--------------------------------------------------------------------===// /// Get the size and alignment of the specified complete type in bits. - clang::TypeInfo getTypeInfo(Type T) const; + clang::TypeInfo getTypeInfo(mlir::Type T) const; /// Return the size of the specified (complete) type \p T, in bits. - uint64_t getTypeSize(Type T) const { return getTypeInfo(T).Width; } + uint64_t getTypeSize(mlir::Type T) const { return getTypeInfo(T).Width; } /// Return the size of the character type, in bits. // FIXME(cir): Refactor types and properly implement DataLayout interface in @@ -97,27 +96,26 @@ class CIRLowerContext : public llvm::RefCountedBase { /// Convert a size in characters to a size in bits. int64_t toBits(clang::CharUnits CharSize) const; - clang::CharUnits getTypeSizeInChars(Type T) const { + clang::CharUnits getTypeSizeInChars(mlir::Type T) const { // FIXME(cir): We should query MLIR's Datalayout here instead. return getTypeInfoInChars(T).Width; } /// Return the ABI-specified alignment of a (complete) type \p T, in /// bits. - unsigned getTypeAlign(Type T) const { return getTypeInfo(T).Align; } + unsigned getTypeAlign(mlir::Type T) const { return getTypeInfo(T).Align; } - clang::TypeInfoChars getTypeInfoInChars(Type T) const; + clang::TypeInfoChars getTypeInfoInChars(mlir::Type T) const; /// More type predicates useful for type checking/promotion - bool isPromotableIntegerType(Type T) const; // C99 6.3.1.1p2 + bool isPromotableIntegerType(mlir::Type T) const; // C99 6.3.1.1p2 /// Get or compute information about the layout of the specified /// record (struct/union/class) \p D, which indicates its size and field /// position information. - const CIRRecordLayout &getCIRRecordLayout(const Type D) const; + const CIRRecordLayout &getCIRRecordLayout(const mlir::Type D) const; }; } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRLowerContext_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp index 76a8f60bd549..c42ed58a30d9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.cpp @@ -14,7 +14,6 @@ #include "CIRRecordLayout.h" #include "clang/CIR/MissingFeatures.h" -namespace mlir { namespace cir { // Constructor for C++ records. @@ -23,11 +22,11 @@ CIRRecordLayout::CIRRecordLayout( clang::CharUnits alignment, clang::CharUnits preferredAlignment, clang::CharUnits unadjustedAlignment, clang::CharUnits requiredAlignment, bool hasOwnVFPtr, bool hasExtendableVFPtr, clang::CharUnits vbptroffset, - clang::CharUnits datasize, ArrayRef fieldoffsets, + clang::CharUnits datasize, llvm::ArrayRef fieldoffsets, clang::CharUnits nonvirtualsize, clang::CharUnits nonvirtualalignment, clang::CharUnits preferrednvalignment, - clang::CharUnits SizeOfLargestEmptySubobject, const Type PrimaryBase, - bool IsPrimaryBaseVirtual, const Type BaseSharingVBPtr, + clang::CharUnits SizeOfLargestEmptySubobject, const mlir::Type PrimaryBase, + bool IsPrimaryBaseVirtual, const mlir::Type BaseSharingVBPtr, bool EndsWithZeroSizedObject, bool LeadsWithZeroSizedBase) : Size(size), DataSize(datasize), Alignment(alignment), PreferredAlignment(preferredAlignment), @@ -48,7 +47,7 @@ CIRRecordLayout::CIRRecordLayout( CXXInfo->PreferredNVAlignment = preferrednvalignment; CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject; // FIXME(cir): Initialize base classes offsets. - cir_cconv_assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_cconv_assert(!cir::MissingFeatures::getCXXRecordBases()); CXXInfo->HasOwnVFPtr = hasOwnVFPtr; CXXInfo->VBPtrOffset = vbptroffset; CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr; @@ -59,4 +58,3 @@ CIRRecordLayout::CIRRecordLayout( } } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h index b282f32f8a9d..f42d9758bca1 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRRecordLayout.h @@ -20,7 +20,6 @@ #include #include -namespace mlir { namespace cir { class CIRLowerContext; @@ -110,12 +109,13 @@ class CIRRecordLayout { clang::CharUnits alignment, clang::CharUnits preferredAlignment, clang::CharUnits unadjustedAlignment, clang::CharUnits requiredAlignment, bool hasOwnVFPtr, bool hasExtendableVFPtr, clang::CharUnits vbptroffset, - clang::CharUnits datasize, ArrayRef fieldoffsets, + clang::CharUnits datasize, llvm::ArrayRef fieldoffsets, clang::CharUnits nonvirtualsize, clang::CharUnits nonvirtualalignment, clang::CharUnits preferrednvalignment, - clang::CharUnits SizeOfLargestEmptySubobject, const Type PrimaryBase, - bool IsPrimaryBaseVirtual, const Type BaseSharingVBPtr, - bool EndsWithZeroSizedObject, bool LeadsWithZeroSizedBase); + clang::CharUnits SizeOfLargestEmptySubobject, + const mlir::Type PrimaryBase, bool IsPrimaryBaseVirtual, + const mlir::Type BaseSharingVBPtr, bool EndsWithZeroSizedObject, + bool LeadsWithZeroSizedBase); ~CIRRecordLayout() = default; @@ -133,6 +133,5 @@ class CIRRecordLayout { }; } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRRECORDLAYOUT_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h index 12b45a56e881..1310505da536 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRToCIRArgMapping.h @@ -22,7 +22,6 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/Support/ErrorHandling.h" -namespace mlir { namespace cir { /// Encapsulates information about the way function arguments from @@ -68,9 +67,9 @@ class CIRToCIRArgMapping { bool onlyRequiredArgs = false) { unsigned IRArgNo = 0; bool SwapThisWithSRet = false; - const ::cir::ABIArgInfo &RetAI = FI.getReturnInfo(); + const cir::ABIArgInfo &RetAI = FI.getReturnInfo(); - if (RetAI.getKind() == ::cir::ABIArgInfo::Indirect) { + if (RetAI.getKind() == cir::ABIArgInfo::Indirect) { SwapThisWithSRet = RetAI.isSRetAfterThis(); SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; } @@ -82,20 +81,20 @@ class CIRToCIRArgMapping { ArgNo < NumArgs; ++I, ++ArgNo) { cir_cconv_assert(I != FI.arg_end()); // Type ArgType = I->type; - const ::cir::ABIArgInfo &AI = I->info; + const cir::ABIArgInfo &AI = I->info; // Collect data about IR arguments corresponding to Clang argument ArgNo. auto &IRArgs = ArgInfo[ArgNo]; - if (::cir::MissingFeatures::argumentPadding()) { + if (cir::MissingFeatures::argumentPadding()) { cir_cconv_unreachable("NYI"); } switch (AI.getKind()) { - case ::cir::ABIArgInfo::Extend: - case ::cir::ABIArgInfo::Direct: { + case cir::ABIArgInfo::Extend: + case cir::ABIArgInfo::Direct: { // FIXME(cir): handle sseregparm someday... cir_cconv_assert(AI.getCoerceToType() && "Missing coerced type!!"); - StructType STy = dyn_cast(AI.getCoerceToType()); + StructType STy = mlir::dyn_cast(AI.getCoerceToType()); if (AI.isDirect() && AI.getCanBeFlattened() && STy) { IRArgs.NumberOfArgs = STy.getNumElements(); } else { @@ -103,8 +102,8 @@ class CIRToCIRArgMapping { } break; } - case ::cir::ABIArgInfo::Indirect: - case ::cir::ABIArgInfo::IndirectAliased: + case cir::ABIArgInfo::Indirect: + case cir::ABIArgInfo::IndirectAliased: IRArgs.NumberOfArgs = 1; break; @@ -124,7 +123,7 @@ class CIRToCIRArgMapping { } cir_cconv_assert(ArgNo == ArgInfo.size()); - if (::cir::MissingFeatures::inallocaArgs()) { + if (cir::MissingFeatures::inallocaArgs()) { cir_cconv_unreachable("NYI"); } @@ -148,6 +147,5 @@ class CIRToCIRArgMapping { }; } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRTOCIRARGMAPPING_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index 87a1c5061aef..deb4053dc682 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -24,7 +24,6 @@ #include "LowerModule.h" #include "llvm/Support/ErrorHandling.h" -namespace mlir { namespace cir { namespace { @@ -46,9 +45,9 @@ class ItaniumCXXABI : public CIRCXXABI { // FIXME(cir): This expects a CXXRecordDecl! Not any record type. RecordArgABI getRecordArgABI(const StructType RD) const override { - cir_cconv_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + cir_cconv_assert(!cir::MissingFeatures::recordDeclIsCXXDecl()); // If C++ prohibits us from making a copy, pass by address. - cir_cconv_assert(!::cir::MissingFeatures::recordDeclCanPassInRegisters()); + cir_cconv_assert(!cir::MissingFeatures::recordDeclCanPassInRegisters()); return RAA_Default; } }; @@ -56,12 +55,12 @@ class ItaniumCXXABI : public CIRCXXABI { } // namespace bool ItaniumCXXABI::classifyReturnType(LowerFunctionInfo &FI) const { - const StructType RD = dyn_cast(FI.getReturnType()); + const StructType RD = mlir::dyn_cast(FI.getReturnType()); if (!RD) return false; // If C++ prohibits us from making a copy, return by address. - if (::cir::MissingFeatures::recordDeclCanPassInRegisters()) + if (cir::MissingFeatures::recordDeclCanPassInRegisters()) cir_cconv_unreachable("NYI"); return false; @@ -76,7 +75,7 @@ CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { case clang::TargetCXXABI::AppleARM64: // TODO: this isn't quite right, clang uses AppleARM64CXXABI which inherits // from ARMCXXABI. We'll have to follow suit. - cir_cconv_assert(!::cir::MissingFeatures::appleArm64CXXABI()); + cir_cconv_assert(!cir::MissingFeatures::appleArm64CXXABI()); return new ItaniumCXXABI(LM, /*UseARMMethodPtrABI=*/true, /*UseARMGuardVarABI=*/true); @@ -93,14 +92,13 @@ CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { } } // namespace cir -} // namespace mlir // FIXME(cir): Merge this into the CIRCXXABI class above. class LoweringPrepareItaniumCXXABI : public cir::LoweringPrepareCXXABI { public: mlir::Value lowerDynamicCast(cir::CIRBaseBuilderTy &builder, clang::ASTContext &astCtx, - mlir::cir::DynamicCastOp op) override; - mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, + cir::DynamicCastOp op) override; + mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) override; }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index 23c6c85a9723..1978f0497c85 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -9,11 +9,11 @@ #include "llvm/Support/ErrorHandling.h" using namespace mlir; -using namespace mlir::cir; +using namespace cir; -using ABIArgInfo = ::cir::ABIArgInfo; -using FnInfoOpts = ::cir::FnInfoOpts; -using MissingFeatures = ::cir::MissingFeatures; +using ABIArgInfo = cir::ABIArgInfo; +using FnInfoOpts = cir::FnInfoOpts; +using MissingFeatures = cir::MissingFeatures; namespace { @@ -25,7 +25,7 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, unsigned numExtraRequiredArgs, bool chainCall) { cir_cconv_assert(args.size() >= numExtraRequiredArgs); - cir_cconv_assert(!::cir::MissingFeatures::extParamInfo()); + cir_cconv_assert(!cir::MissingFeatures::extParamInfo()); // In most cases, there are no optional arguments. RequiredArgs required = RequiredArgs::All; @@ -35,9 +35,9 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, // FIXME(cir): Properly check if function is no-proto. if (/*IsPrototypedFunction=*/true) { if (fnType.isVarArg()) - cir_cconv_assert_or_abort(!::cir::MissingFeatures::isVarArg(), "NYI"); + cir_cconv_assert_or_abort(!cir::MissingFeatures::isVarArg(), "NYI"); - if (::cir::MissingFeatures::extParamInfo()) + if (cir::MissingFeatures::extParamInfo()) cir_cconv_unreachable("NYI"); } @@ -45,10 +45,10 @@ arrangeFreeFunctionLikeCall(LowerTypes <, LowerModule &LM, // its skipped here since it requires CodeGen info. Maybe this information // could be embbed in the FuncOp during CIRGen. - cir_cconv_assert(!::cir::MissingFeatures::chainCall() && !chainCall && "NYI"); + cir_cconv_assert(!cir::MissingFeatures::chainCall() && !chainCall && "NYI"); FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; - SmallVector argTypes; + llvm::SmallVector argTypes; for (const auto &a : args) argTypes.push_back(a.getType()); @@ -97,7 +97,7 @@ arrangeCIRFunctionInfo(LowerTypes &CGT, bool instanceMethod, /// /// NOTE(cir): Partially copies CodeGenModule::ConstructAttributeList, but /// focuses on ABI/Target-related attributes. -void LowerModule::constructAttributeList(StringRef Name, +void LowerModule::constructAttributeList(llvm::StringRef Name, const LowerFunctionInfo &FI, FuncOp CalleeInfo, FuncOp newFn, unsigned &CallingConv, @@ -152,13 +152,13 @@ void LowerModule::constructAttributeList(StringRef Name, [[fallthrough]]; case ABIArgInfo::Direct: if (RetAI.getInReg()) - cir_cconv_assert_or_abort(!::cir::MissingFeatures::ABIInRegAttribute(), + cir_cconv_assert_or_abort(!cir::MissingFeatures::ABIInRegAttribute(), "NYI"); - cir_cconv_assert(!::cir::MissingFeatures::noFPClass()); + cir_cconv_assert(!cir::MissingFeatures::noFPClass()); break; case ABIArgInfo::Ignore: case ABIArgInfo::Indirect: - cir_cconv_assert(!::cir::MissingFeatures::ABIPotentialArgAccess()); + cir_cconv_assert(!cir::MissingFeatures::ABIPotentialArgAccess()); break; default: cir_cconv_unreachable("Missing ABIArgInfo::Kind"); @@ -194,7 +194,7 @@ void LowerModule::constructAttributeList(StringRef Name, I != E; ++I, ++ArgNo) { // Type ParamType = I->type; const ABIArgInfo &AI = I->info; - SmallVector Attrs; + llvm::SmallVector Attrs; // Add attribute for padding argument, if necessary. if (IRFunctionArgs.hasPaddingArg(ArgNo)) { @@ -219,31 +219,31 @@ void LowerModule::constructAttributeList(StringRef Name, rewriter.getNamedAttr("cir.zeroext", rewriter.getUnitAttr())); [[fallthrough]]; case ABIArgInfo::Direct: - if (ArgNo == 0 && ::cir::MissingFeatures::chainCall()) + if (ArgNo == 0 && cir::MissingFeatures::chainCall()) cir_cconv_unreachable("ChainCall is NYI"); else if (AI.getInReg()) cir_cconv_unreachable("InReg attribute is NYI"); // Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); - cir_cconv_assert(!::cir::MissingFeatures::noFPClass()); + cir_cconv_assert(!cir::MissingFeatures::noFPClass()); break; case ABIArgInfo::Indirect: { - cir_cconv_assert(!::cir::MissingFeatures::ABIInRegAttribute()); - cir_cconv_assert(!::cir::MissingFeatures::ABIByValAttribute()); - cir_cconv_assert(!::cir::MissingFeatures::ABINoAliasAttribute()); - cir_cconv_assert(!::cir::MissingFeatures::ABIAlignmentAttribute()); - cir_cconv_assert(!::cir::MissingFeatures::ABIPotentialArgAccess()); + cir_cconv_assert(!cir::MissingFeatures::ABIInRegAttribute()); + cir_cconv_assert(!cir::MissingFeatures::ABIByValAttribute()); + cir_cconv_assert(!cir::MissingFeatures::ABINoAliasAttribute()); + cir_cconv_assert(!cir::MissingFeatures::ABIAlignmentAttribute()); + cir_cconv_assert(!cir::MissingFeatures::ABIPotentialArgAccess()); break; } default: cir_cconv_unreachable("Missing ABIArgInfo::Kind"); } - if (::cir::MissingFeatures::qualTypeIsReferenceType()) { + if (cir::MissingFeatures::qualTypeIsReferenceType()) { cir_cconv_unreachable("Reference handling is NYI"); } // TODO(cir): Missing some swift and nocapture stuff here. - cir_cconv_assert(!::cir::MissingFeatures::extParamInfo()); + cir_cconv_assert(!cir::MissingFeatures::extParamInfo()); if (!Attrs.empty()) { unsigned FirstIRArg, NumIRArgs; @@ -269,7 +269,7 @@ const LowerFunctionInfo &LowerTypes::arrangeFunctionDeclaration(FuncOp fnOp) { // When declaring a function without a prototype, always use a // non-variadic type. if (fnOp.getNoProto()) { - cir_cconv_assert_or_abort(!::cir::MissingFeatures::ABINoProtoFunctions(), + cir_cconv_assert_or_abort(!cir::MissingFeatures::ABINoProtoFunctions(), "NYI"); } @@ -290,7 +290,7 @@ LowerTypes::arrangeFreeFunctionCall(const OperandRange args, /// Arrange the argument and result information for the declaration or /// definition of the given function. const LowerFunctionInfo &LowerTypes::arrangeFreeFunctionType(FuncType FTy) { - SmallVector argTypes; + llvm::SmallVector argTypes; return ::arrangeCIRFunctionInfo(*this, /*instanceMethod=*/false, argTypes, FTy); } @@ -317,12 +317,12 @@ const LowerFunctionInfo & LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, ArrayRef argTypes, RequiredArgs required) { - cir_cconv_assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_cconv_assert(!cir::MissingFeatures::qualifiedTypes()); LowerFunctionInfo *FI = nullptr; // FIXME(cir): Allow user-defined CCs (e.g. __attribute__((vectorcall))). - cir_cconv_assert(!::cir::MissingFeatures::extParamInfo()); + cir_cconv_assert(!cir::MissingFeatures::extParamInfo()); unsigned CC = clangCallConvToLLVMCallConv(clang::CallingConv::CC_C); // Construct the function info. We co-allocate the ArgInfos. @@ -334,7 +334,7 @@ LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, // Compute ABI information. if (CC == llvm::CallingConv::SPIR_KERNEL) { cir_cconv_unreachable("NYI"); - } else if (::cir::MissingFeatures::extParamInfo()) { + } else if (cir::MissingFeatures::extParamInfo()) { cir_cconv_unreachable("NYI"); } else { // NOTE(cir): This corects the initial function info data. @@ -344,7 +344,7 @@ LowerTypes::arrangeLLVMFunctionInfo(Type resultType, FnInfoOpts opts, // Loop over all of the computed argument and return value info. If any of // them are direct or extend without a specified coerce type, specify the // default now. - ::cir::ABIArgInfo &retInfo = FI->getReturnInfo(); + cir::ABIArgInfo &retInfo = FI->getReturnInfo(); if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) retInfo.setCoerceToType(convertType(FI->getReturnType())); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h index b579f96fb436..4cb7607e2355 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.h @@ -16,7 +16,6 @@ #include "mlir/IR/Value.h" -namespace mlir { namespace cir { /// Contains the address where the return value of a function can be stored, and @@ -24,7 +23,7 @@ namespace cir { class ReturnValueSlot { // FIXME(cir): We should be able to query this directly from CIR at some // point. This class can then be removed. - Value Addr = {}; + mlir::Value Addr = {}; // Return value slot flags unsigned IsVolatile : 1; @@ -34,19 +33,18 @@ class ReturnValueSlot { public: ReturnValueSlot() : IsVolatile(false), IsUnused(false), IsExternallyDestructed(false) {} - ReturnValueSlot(Value Addr, bool IsVolatile, bool IsUnused = false, + ReturnValueSlot(mlir::Value Addr, bool IsVolatile, bool IsUnused = false, bool IsExternallyDestructed = false) : Addr(Addr), IsVolatile(IsVolatile), IsUnused(IsUnused), IsExternallyDestructed(IsExternallyDestructed) {} bool isNull() const { return !Addr; } bool isVolatile() const { return IsVolatile; } - Value getValue() const { return Addr; } + mlir::Value getValue() const { return Addr; } bool isUnused() const { return IsUnused; } bool isExternallyDestructed() const { return IsExternallyDestructed; } }; } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERCALL_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 69bb78283610..d2a7e83e7020 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -26,25 +26,25 @@ #include "clang/CIR/TypeEvaluationKind.h" #include "llvm/Support/ErrorHandling.h" -using ABIArgInfo = ::cir::ABIArgInfo; +using ABIArgInfo = cir::ABIArgInfo; -namespace mlir { namespace cir { namespace { -Value buildAddressAtOffset(LowerFunction &LF, Value addr, - const ABIArgInfo &info) { +mlir::Value buildAddressAtOffset(LowerFunction &LF, mlir::Value addr, + const ABIArgInfo &info) { if (unsigned offset = info.getDirectOffset()) { cir_cconv_unreachable("NYI"); } return addr; } -Value createCoercedBitcast(Value Src, Type DestTy, LowerFunction &CGF) { +mlir::Value createCoercedBitcast(mlir::Value Src, mlir::Type DestTy, + LowerFunction &CGF) { auto destPtrTy = PointerType::get(CGF.getRewriter().getContext(), DestTy); - if (auto load = dyn_cast(Src.getDefiningOp())) + if (auto load = mlir::dyn_cast(Src.getDefiningOp())) return CGF.getRewriter().create(Src.getLoc(), destPtrTy, CastKind::bitcast, load.getAddr()); @@ -56,13 +56,15 @@ Value createCoercedBitcast(Value Src, Type DestTy, LowerFunction &CGF) { /// try to gep into the struct to get at its inner goodness. Dive as deep as /// possible without entering an element with an in-memory size smaller than /// DstSize. -Value enterStructPointerForCoercedAccess(Value SrcPtr, StructType SrcSTy, - uint64_t DstSize, LowerFunction &CGF) { +mlir::Value enterStructPointerForCoercedAccess(mlir::Value SrcPtr, + StructType SrcSTy, + uint64_t DstSize, + LowerFunction &CGF) { // We can't dive into a zero-element struct. if (SrcSTy.getNumElements() == 0) cir_cconv_unreachable("NYI"); - Type FirstElt = SrcSTy.getMembers()[0]; + mlir::Type FirstElt = SrcSTy.getMembers()[0]; // If the first elt is at least as large as what we're looking for, or if the // first element is the same size as the whole struct, we can enter it. The @@ -74,7 +76,7 @@ Value enterStructPointerForCoercedAccess(Value SrcPtr, StructType SrcSTy, return SrcPtr; cir_cconv_assert_or_abort( - !::cir::MissingFeatures::ABIEnterStructForCoercedAccess(), "NYI"); + !cir::MissingFeatures::ABIEnterStructForCoercedAccess(), "NYI"); return SrcPtr; // FIXME: This is a temporary workaround for the assertion // above. } @@ -86,15 +88,16 @@ Value enterStructPointerForCoercedAccess(Value SrcPtr, StructType SrcSTy, /// This behaves as if the value were coerced through memory, so on big-endian /// targets the high bits are preserved in a truncation, while little-endian /// targets preserve the low bits. -static Value coerceIntOrPtrToIntOrPtr(Value val, Type typ, LowerFunction &CGF) { +static mlir::Value coerceIntOrPtrToIntOrPtr(mlir::Value val, mlir::Type typ, + LowerFunction &CGF) { if (val.getType() == typ) return val; auto &bld = CGF.getRewriter(); - if (isa(val.getType())) { + if (mlir::isa(val.getType())) { // If this is Pointer->Pointer avoid conversion to and from int. - if (isa(typ)) + if (mlir::isa(typ)) return bld.create(val.getLoc(), typ, CastKind::bitcast, val); // Convert the pointer to an integer so we can play with its width. @@ -102,7 +105,7 @@ static Value coerceIntOrPtrToIntOrPtr(Value val, Type typ, LowerFunction &CGF) { } auto dstIntTy = typ; - if (isa(dstIntTy)) + if (mlir::isa(dstIntTy)) cir_cconv_unreachable("NYI"); if (val.getType() != dstIntTy) { @@ -131,7 +134,7 @@ static Value coerceIntOrPtrToIntOrPtr(Value val, Type typ, LowerFunction &CGF) { } } - if (isa(typ)) + if (mlir::isa(typ)) val = bld.create(val.getLoc(), typ, CastKind::int_to_ptr, val); return val; @@ -142,19 +145,19 @@ static Value coerceIntOrPtrToIntOrPtr(Value val, Type typ, LowerFunction &CGF) { /// /// This safely handles the case when the src type is larger than the /// destination type; the upper bits of the src will be lost. -void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, +void createCoercedStore(mlir::Value Src, mlir::Value Dst, bool DstIsVolatile, LowerFunction &CGF) { - Type SrcTy = Src.getType(); - Type DstTy = Dst.getType(); + mlir::Type SrcTy = Src.getType(); + mlir::Type DstTy = Dst.getType(); if (SrcTy == DstTy) { cir_cconv_unreachable("NYI"); } llvm::TypeSize SrcSize = CGF.LM.getDataLayout().getTypeAllocSize(SrcTy); - auto dstPtrTy = dyn_cast(DstTy); + auto dstPtrTy = mlir::dyn_cast(DstTy); if (dstPtrTy) - if (auto dstSTy = dyn_cast(dstPtrTy.getPointee())) + if (auto dstSTy = mlir::dyn_cast(dstPtrTy.getPointee())) if (SrcTy != dstSTy) Dst = enterStructPointerForCoercedAccess(Dst, dstSTy, SrcSize.getFixedValue(), CGF); @@ -165,17 +168,17 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, : layout.getTypeAllocSize(DstTy); if (SrcSize.isScalable() || SrcSize <= DstSize) { - if (isa(SrcTy) && dstPtrTy && - isa(dstPtrTy.getPointee()) && + if (mlir::isa(SrcTy) && dstPtrTy && + mlir::isa(dstPtrTy.getPointee()) && SrcSize == layout.getTypeAllocSize(dstPtrTy.getPointee())) { cir_cconv_unreachable("NYI"); - } else if (auto STy = dyn_cast(SrcTy)) { + } else if (auto STy = mlir::dyn_cast(SrcTy)) { cir_cconv_unreachable("NYI"); } else { Dst = createCoercedBitcast(Dst, SrcTy, CGF); CGF.buildAggregateStore(Src, Dst, DstIsVolatile); } - } else if (isa(SrcTy)) { + } else if (mlir::isa(SrcTy)) { auto &bld = CGF.getRewriter(); auto *ctxt = CGF.LM.getMLIRContext(); auto dstIntTy = IntType::get(ctxt, DstSize.getFixedValue() * 8, false); @@ -189,7 +192,7 @@ void createCoercedStore(Value Src, Value Dst, bool DstIsVolatile, } // FIXME(cir): Create a custom rewriter class to abstract this away. -Value createBitcast(Value Src, Type Ty, LowerFunction &LF) { +mlir::Value createBitcast(mlir::Value Src, mlir::Type Ty, LowerFunction &LF) { return LF.getRewriter().create(Src.getLoc(), Ty, CastKind::bitcast, Src); } @@ -205,21 +208,22 @@ Value createBitcast(Value Src, Type Ty, LowerFunction &LF) { /// since CIR's type checker wouldn't allow it. Instead, it casts the existing /// ABI-agnostic value to it's ABI-aware counterpart. Nevertheless, we should /// try to follow the same logic as the original codegen for correctness. -Value createCoercedValue(Value Src, Type Ty, LowerFunction &CGF) { - Type SrcTy = Src.getType(); +mlir::Value createCoercedValue(mlir::Value Src, mlir::Type Ty, + LowerFunction &CGF) { + mlir::Type SrcTy = Src.getType(); // If SrcTy and Ty are the same, just reuse the exising load. if (SrcTy == Ty) return Src; // If it is the special boolean case, simply bitcast it. - if ((isa(SrcTy) && isa(Ty)) || - (isa(SrcTy) && isa(Ty))) + if ((mlir::isa(SrcTy) && mlir::isa(Ty)) || + (mlir::isa(SrcTy) && mlir::isa(Ty))) return createBitcast(Src, Ty, CGF); llvm::TypeSize DstSize = CGF.LM.getDataLayout().getTypeAllocSize(Ty); - if (auto SrcSTy = dyn_cast(SrcTy)) { + if (auto SrcSTy = mlir::dyn_cast(SrcTy)) { Src = enterStructPointerForCoercedAccess(Src, SrcSTy, DstSize.getFixedValue(), CGF); SrcTy = Src.getType(); @@ -229,8 +233,8 @@ Value createCoercedValue(Value Src, Type Ty, LowerFunction &CGF) { // If the source and destination are integer or pointer types, just do an // extension or truncation to the desired type. - if ((isa(Ty) || isa(Ty)) && - (isa(SrcTy) || isa(SrcTy))) { + if ((mlir::isa(Ty) || mlir::isa(Ty)) && + (mlir::isa(SrcTy) || mlir::isa(SrcTy))) { cir_cconv_unreachable("NYI"); } @@ -249,25 +253,25 @@ Value createCoercedValue(Value Src, Type Ty, LowerFunction &CGF) { cir_cconv_unreachable("NYI"); } -Value emitAddressAtOffset(LowerFunction &LF, Value addr, - const ABIArgInfo &info) { +mlir::Value emitAddressAtOffset(LowerFunction &LF, mlir::Value addr, + const ABIArgInfo &info) { if (unsigned offset = info.getDirectOffset()) { cir_cconv_unreachable("NYI"); } return addr; } -mlir::cir::AllocaOp findAlloca(Operation *op) { +cir::AllocaOp findAlloca(mlir::Operation *op) { if (!op) return {}; - if (auto al = dyn_cast(op)) { + if (auto al = mlir::dyn_cast(op)) { return al; - } else if (auto ret = dyn_cast(op)) { + } else if (auto ret = mlir::dyn_cast(op)) { auto vals = ret.getInput(); if (vals.size() == 1) return findAlloca(vals[0].getDefiningOp()); - } else if (auto load = dyn_cast(op)) { + } else if (auto load = mlir::dyn_cast(op)) { return findAlloca(load.getAddr().getDefiningOp()); } @@ -284,18 +288,18 @@ mlir::cir::AllocaOp findAlloca(Operation *op) { /// However, instead of emitting the load, it emits a cast. /// /// FIXME(cir): Improve parity with the original codegen. -Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { - Type SrcTy = Src.getType(); +mlir::Value castReturnValue(mlir::Value Src, mlir::Type Ty, LowerFunction &LF) { + mlir::Type SrcTy = Src.getType(); // If SrcTy and Ty are the same, nothing to do. if (SrcTy == Ty) return Src; // If is the special boolean case, simply bitcast it. - if (isa(SrcTy) && isa(Ty)) + if (mlir::isa(SrcTy) && mlir::isa(Ty)) return createBitcast(Src, Ty, LF); - auto intTy = dyn_cast(Ty); + auto intTy = mlir::dyn_cast(Ty); if (intTy && !intTy.isPrimitive()) cir_cconv_unreachable("non-primitive types NYI"); llvm::TypeSize DstSize = LF.LM.getDataLayout().getTypeAllocSize(Ty); @@ -304,8 +308,8 @@ Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { llvm::TypeSize SrcSize = LF.LM.getDataLayout().getTypeAllocSize(SrcTy); - if ((isa(Ty) || isa(Ty)) && - (isa(SrcTy) || isa(SrcTy))) { + if ((mlir::isa(Ty) || mlir::isa(Ty)) && + (mlir::isa(SrcTy) || mlir::isa(SrcTy))) { cir_cconv_unreachable("NYI"); } @@ -327,7 +331,7 @@ Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { auto &rewriter = LF.getRewriter(); auto *ctxt = LF.LM.getMLIRContext(); auto ptrTy = PointerType::get(ctxt, Ty); - auto voidPtr = PointerType::get(ctxt, mlir::cir::VoidType::get(ctxt)); + auto voidPtr = PointerType::get(ctxt, cir::VoidType::get(ctxt)); // insert alloca near the previuos one auto point = rewriter.saveInsertionPoint(); @@ -353,12 +357,12 @@ Value castReturnValue(Value Src, Type Ty, LowerFunction &LF) { } // namespace // FIXME(cir): Pass SrcFn and NewFn around instead of having then as attributes. -LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, +LowerFunction::LowerFunction(LowerModule &LM, mlir::PatternRewriter &rewriter, FuncOp srcFn, FuncOp newFn) : Target(LM.getTarget()), rewriter(rewriter), SrcFn(srcFn), NewFn(newFn), LM(LM) {} -LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, +LowerFunction::LowerFunction(LowerModule &LM, mlir::PatternRewriter &rewriter, FuncOp srcFn, CallOp callOp) : Target(LM.getTarget()), rewriter(rewriter), SrcFn(srcFn), callOp(callOp), LM(LM) {} @@ -366,9 +370,9 @@ LowerFunction::LowerFunction(LowerModule &LM, PatternRewriter &rewriter, /// This method has partial parity with CodeGenFunction::EmitFunctionProlog from /// the original codegen. However, it focuses on the ABI-specific details. On /// top of that, it is also responsible for rewriting the original function. -LogicalResult -LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, - MutableArrayRef Args) { +llvm::LogicalResult LowerFunction::buildFunctionProlog( + const LowerFunctionInfo &FI, FuncOp Fn, + llvm::MutableArrayRef Args) { // NOTE(cir): Skipping naked and implicit-return-zero functions here. These // are dealt with in CIRGen. @@ -377,22 +381,22 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // If we're using inalloca, all the memory arguments are GEPs off of the last // parameter, which is a pointer to the complete memory area. - cir_cconv_assert(!::cir::MissingFeatures::inallocaArgs()); + cir_cconv_assert(!cir::MissingFeatures::inallocaArgs()); // Name the struct return parameter. - cir_cconv_assert(!::cir::MissingFeatures::sretArgs()); + cir_cconv_assert(!cir::MissingFeatures::sretArgs()); // Track if we received the parameter as a pointer (indirect, byval, or // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it // into a local alloca for us. - SmallVector ArgVals; + llvm::SmallVector ArgVals; ArgVals.reserve(Args.size()); // FIXME(cir): non-blocking workaround for argument types that are not yet // properly handled by the ABI. if (cirCConvAssertionMode && FI.arg_size() != Args.size()) { - cir_cconv_assert(::cir::MissingFeatures::ABIParameterCoercion()); - return success(); + cir_cconv_assert(cir::MissingFeatures::ABIParameterCoercion()); + return llvm::success(); } // Create a pointer value for every parameter declaration. This usually @@ -402,22 +406,23 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, cir_cconv_assert(FI.arg_size() == Args.size()); unsigned ArgNo = 0; LowerFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); - for (MutableArrayRef::const_iterator i = Args.begin(), - e = Args.end(); + for (llvm::MutableArrayRef::const_iterator + i = Args.begin(), + e = Args.end(); i != e; ++i, ++info_it, ++ArgNo) { - const Value Arg = *i; + const mlir::Value Arg = *i; const ABIArgInfo &ArgI = info_it->info; - bool isPromoted = ::cir::MissingFeatures::varDeclIsKNRPromoted(); + bool isPromoted = cir::MissingFeatures::varDeclIsKNRPromoted(); // We are converting from ABIArgInfo type to VarDecl type directly, unless // the parameter is promoted. In this case we convert to // CGFunctionInfo::ArgInfo type with subsequent argument demotion. - Type Ty = {}; + mlir::Type Ty = {}; if (isPromoted) cir_cconv_unreachable("NYI"); else Ty = Arg.getType(); - cir_cconv_assert(!::cir::MissingFeatures::evaluationKind()); + cir_cconv_assert(!cir::MissingFeatures::evaluationKind()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -426,27 +431,27 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, case ABIArgInfo::Extend: case ABIArgInfo::Direct: { auto AI = Fn.getArgument(FirstIRArg); - Type LTy = Arg.getType(); + mlir::Type LTy = Arg.getType(); // Prepare parameter attributes. So far, only attributes for pointer // parameters are prepared. See // http://llvm.org/docs/LangRef.html#paramattrs. - if (ArgI.getDirectOffset() == 0 && isa(LTy) && - isa(ArgI.getCoerceToType())) { + if (ArgI.getDirectOffset() == 0 && mlir::isa(LTy) && + mlir::isa(ArgI.getCoerceToType())) { cir_cconv_assert_or_abort( - !::cir::MissingFeatures::ABIPointerParameterAttrs(), "NYI"); + !cir::MissingFeatures::ABIPointerParameterAttrs(), "NYI"); } // Prepare the argument value. If we have the trivial case, handle it // with no muss and fuss. - if (!isa(ArgI.getCoerceToType()) && + if (!mlir::isa(ArgI.getCoerceToType()) && ArgI.getCoerceToType() == Ty && ArgI.getDirectOffset() == 0) { cir_cconv_assert(NumIRArgs == 1); // LLVM expects swifterror parameters to be used in very restricted // ways. Copy the value into a less-restricted temporary. - Value V = AI; - if (::cir::MissingFeatures::extParamInfo()) { + mlir::Value V = AI; + if (cir::MissingFeatures::extParamInfo()) { cir_cconv_unreachable("NYI"); } @@ -461,16 +466,16 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // NOTE(cir): Here we have a trivial case, which means we can just // replace all uses of the original argument with the new one. - Value oldArg = SrcFn.getArgument(ArgNo); - Value newArg = Fn.getArgument(FirstIRArg); + mlir::Value oldArg = SrcFn.getArgument(ArgNo); + mlir::Value newArg = Fn.getArgument(FirstIRArg); rewriter.replaceAllUsesWith(oldArg, newArg); break; } - cir_cconv_assert(!::cir::MissingFeatures::vectorType()); + cir_cconv_assert(!cir::MissingFeatures::vectorType()); - StructType STy = dyn_cast(ArgI.getCoerceToType()); + StructType STy = mlir::dyn_cast(ArgI.getCoerceToType()); if (ArgI.isDirect() && !ArgI.getCanBeFlattened() && STy && STy.getNumElements() > 1) { cir_cconv_unreachable("NYI"); @@ -483,16 +488,16 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // proper alignment for the given type being allocated. auto Alloca = rewriter.create( Fn.getLoc(), rewriter.getType(Ty), Ty, - /*name=*/StringRef(""), + /*name=*/llvm::StringRef(""), /*alignment=*/rewriter.getI64IntegerAttr(4)); - Value Ptr = buildAddressAtOffset(*this, Alloca.getResult(), ArgI); + mlir::Value Ptr = buildAddressAtOffset(*this, Alloca.getResult(), ArgI); // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && STy.getNumElements() > 1) { - auto ptrType = cast(Ptr.getType()); + auto ptrType = mlir::cast(Ptr.getType()); llvm::TypeSize structSize = LM.getTypes().getDataLayout().getTypeAllocSize(STy); llvm::TypeSize ptrElementSize = @@ -504,7 +509,7 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, uint64_t srcSize = structSize.getFixedValue(); uint64_t dstSize = ptrElementSize.getFixedValue(); - Value addrToStoreInto; + mlir::Value addrToStoreInto; if (srcSize <= dstSize) { addrToStoreInto = rewriter.create( Ptr.getLoc(), PointerType::get(STy, ptrType.getAddrSpace()), @@ -515,9 +520,9 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, assert(STy.getNumElements() == NumIRArgs); for (unsigned i = 0, e = STy.getNumElements(); i != e; ++i) { - Value ai = Fn.getArgument(FirstIRArg + i); - Type elementTy = STy.getMembers()[i]; - Value eltPtr = rewriter.create( + mlir::Value ai = Fn.getArgument(FirstIRArg + i); + mlir::Type elementTy = STy.getMembers()[i]; + mlir::Value eltPtr = rewriter.create( ai.getLoc(), PointerType::get(elementTy, ptrType.getAddrSpace()), addrToStoreInto, @@ -532,13 +537,13 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, } else { // Simple case, just do a coerced store of the argument into the alloca. cir_cconv_assert(NumIRArgs == 1); - Value AI = Fn.getArgument(FirstIRArg); + mlir::Value AI = Fn.getArgument(FirstIRArg); // TODO(cir): Set argument name in the new function. createCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); } // Match to what EmitParamDecl is expecting for this type. - if (::cir::MissingFeatures::evaluationKind()) { + if (cir::MissingFeatures::evaluationKind()) { cir_cconv_unreachable("NYI"); } else { // FIXME(cir): Should we have an ParamValue abstraction like in the @@ -549,10 +554,10 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // NOTE(cir): Once we have uncoerced the argument, we should be able to // RAUW the original argument alloca with the new one. This assumes that // the argument is used only to be stored in a alloca. - Value arg = SrcFn.getArgument(ArgNo); + mlir::Value arg = SrcFn.getArgument(ArgNo); cir_cconv_assert(arg.hasOneUse()); auto *firstStore = *arg.user_begin(); - auto argAlloca = cast(firstStore).getAddr(); + auto argAlloca = mlir::cast(firstStore).getAddr(); rewriter.replaceAllUsesWith(argAlloca, Alloca); rewriter.eraseOp(firstStore); rewriter.eraseOp(argAlloca.getDefiningOp()); @@ -574,17 +579,17 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, "For truly ABI indirect arguments"); auto ptrTy = rewriter.getType(Arg.getType()); - Value arg = SrcFn.getArgument(ArgNo); + mlir::Value arg = SrcFn.getArgument(ArgNo); cir_cconv_assert(arg.hasOneUse()); auto *firstStore = *arg.user_begin(); - auto argAlloca = cast(firstStore).getAddr(); + auto argAlloca = mlir::cast(firstStore).getAddr(); rewriter.setInsertionPoint(argAlloca.getDefiningOp()); auto align = LM.getDataLayout().getABITypeAlign(ptrTy); auto alignAttr = rewriter.getI64IntegerAttr(align.value()); auto newAlloca = rewriter.create( Fn.getLoc(), rewriter.getType(ptrTy), ptrTy, - /*name=*/StringRef(""), + /*name=*/llvm::StringRef(""), /*alignment=*/alignAttr); rewriter.create(newAlloca.getLoc(), AI, @@ -616,15 +621,16 @@ LowerFunction::buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, // we migth have to add a counter part here. Currently, it is not needed. } - return success(); + return llvm::success(); } -LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { +llvm::LogicalResult +LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { // NOTE(cir): no-return, naked, and no result functions should be handled in // CIRGen. - Value RV = {}; - Type RetTy = FI.getReturnType(); + mlir::Value RV = {}; + mlir::Type RetTy = FI.getReturnType(); const ABIArgInfo &RetAI = FI.getReturnInfo(); switch (RetAI.getKind()) { @@ -633,7 +639,7 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { break; case ABIArgInfo::Indirect: { - Value RVAddr = {}; + mlir::Value RVAddr = {}; CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), FI, true); if (IRFunctionArgs.hasSRetArg()) { auto &entry = NewFn.getBody().front(); @@ -663,7 +669,7 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { // If there is a dominating store to ReturnValue, we can elide // the load, zap the store, and usually zap the alloca. // NOTE(cir): This seems like a premature optimization case. Skipping it. - if (::cir::MissingFeatures::returnValueDominatingStoreOptmiization()) { + if (cir::MissingFeatures::returnValueDominatingStoreOptmiization()) { cir_cconv_unreachable("NYI"); } // Otherwise, we have to do a simple load. @@ -672,7 +678,7 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { // for us and there is no casting necessary to conform to the ABI. The // zero-extension is enforced by the return value's attribute. Just // early exit. - return success(); + return llvm::success(); } } else { // NOTE(cir): Unlike the original codegen, CIR may have multiple return @@ -693,15 +699,16 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { cir_cconv_unreachable("Unhandled ABIArgInfo::Kind"); } - return success(); + return llvm::success(); } /// Generate code for a function based on the ABI-specific information. /// /// This method has partial parity with CodeGenFunction::GenerateCode, but it /// focuses on the ABI-specific details. So a lot of codegen stuff is removed. -LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, - const LowerFunctionInfo &FnInfo) { +llvm::LogicalResult +LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, + const LowerFunctionInfo &FnInfo) { cir_cconv_assert(newFn && "generating code for null Function"); auto Args = oldFn.getArguments(); @@ -709,10 +716,10 @@ LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, cir_cconv_assert(newFn.empty() && "Function already has a body"); rewriter.setInsertionPointToEnd(newFn.addEntryBlock()); if (buildFunctionProlog(FnInfo, newFn, oldFn.getArguments()).failed()) - return failure(); + return llvm::failure(); // Ensure that old ABI-agnostic arguments uses were replaced. - const auto hasNoUses = [](Value val) { return val.getUses().empty(); }; + const auto hasNoUses = [](mlir::Value val) { return val.getUses().empty(); }; cir_cconv_assert(std::all_of(Args.begin(), Args.end(), hasNoUses) && "Missing RAUW?"); @@ -721,8 +728,8 @@ LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, // to move the old function body to the new function. // Backup references to entry blocks. - Block *srcBlock = &oldFn.getBody().front(); - Block *dstBlock = &newFn.getBody().front(); + mlir::Block *srcBlock = &oldFn.getBody().front(); + mlir::Block *dstBlock = &newFn.getBody().front(); // Ensure both blocks have the same number of arguments in order to // safely merge them. @@ -751,12 +758,12 @@ LogicalResult LowerFunction::generateCode(FuncOp oldFn, FuncOp newFn, // Emit the standard function epilogue. if (buildFunctionEpilog(FnInfo).failed()) - return failure(); + return llvm::failure(); - return success(); + return llvm::success(); } -void LowerFunction::buildAggregateStore(Value Val, Value Dest, +void LowerFunction::buildAggregateStore(mlir::Value Val, mlir::Value Dest, bool DestIsVolatile) { // In LLVM codegen: // Function to store a first-class aggregate into memory. We prefer to @@ -767,7 +774,7 @@ void LowerFunction::buildAggregateStore(Value Val, Value Dest, (void)DestIsVolatile; // Circumvent CIR's type checking. - Type pointeeTy = mlir::cast(Dest.getType()).getPointee(); + mlir::Type pointeeTy = mlir::cast(Dest.getType()).getPointee(); if (Val.getType() != pointeeTy) { // NOTE(cir): We only bitcast and store if the types have the same size. cir_cconv_assert((LM.getDataLayout().getTypeSizeInBits(Val.getType()) == @@ -780,7 +787,8 @@ void LowerFunction::buildAggregateStore(Value Val, Value Dest, rewriter.create(Val.getLoc(), Val, Dest); } -Value LowerFunction::buildAggregateBitcast(Value Val, Type DestTy) { +mlir::Value LowerFunction::buildAggregateBitcast(mlir::Value Val, + mlir::Type DestTy) { auto Cast = createCoercedBitcast(Val, DestTy, *this); return rewriter.create(Val.getLoc(), Cast); } @@ -790,8 +798,8 @@ Value LowerFunction::buildAggregateBitcast(Value Val, Type DestTy) { /// FIXME(cir): This method has partial parity to CodeGenFunction's /// EmitCallEpxr method defined in CGExpr.cpp. This could likely be /// removed in favor of a more direct approach. -LogicalResult LowerFunction::rewriteCallOp(CallOp op, - ReturnValueSlot retValSlot) { +llvm::LogicalResult LowerFunction::rewriteCallOp(CallOp op, + ReturnValueSlot retValSlot) { // TODO(cir): Check if BlockCall, CXXMemberCall, CUDAKernelCall, or // CXXOperatorMember require special handling here. These should be handled @@ -810,8 +818,9 @@ LogicalResult LowerFunction::rewriteCallOp(CallOp op, if (SrcFn) { fnType = SrcFn.getFunctionType(); } else if (op.isIndirect()) { - if (auto ptrTy = dyn_cast(op.getIndirectCall().getType())) - fnType = dyn_cast(ptrTy.getPointee()); + if (auto ptrTy = + mlir::dyn_cast(op.getIndirectCall().getType())) + fnType = mlir::dyn_cast(ptrTy.getPointee()); } cir_cconv_assert(fnType && "No callee function type"); @@ -825,7 +834,7 @@ LogicalResult LowerFunction::rewriteCallOp(CallOp op, // Erase original ABI-agnostic call. rewriter.eraseOp(op); - return success(); + return llvm::success(); } /// Rewrite a call operation to abide to the ABI calling convention. @@ -833,9 +842,10 @@ LogicalResult LowerFunction::rewriteCallOp(CallOp op, /// FIXME(cir): This method has partial parity to CodeGenFunction's EmitCall /// method defined in CGExpr.cpp. This could likely be removed in favor of a /// more direct approach since most of the code here is exclusively CodeGen. -Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, - CallOp callOp, ReturnValueSlot retValSlot, - Value Chain) { +mlir::Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, + CallOp callOp, + ReturnValueSlot retValSlot, + mlir::Value Chain) { // NOTE(cir): Skip a bunch of function pointer stuff and AST declaration // asserts. Also skip sanitizers, as these should likely be handled at // CIRGen. @@ -871,17 +881,17 @@ Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, // Chain calls use this same code path to add the invisible chain parameter // to the function type. if ((origCallee && origCallee.getNoProto()) || Chain) { - cir_cconv_assert_or_abort(::cir::MissingFeatures::ABINoProtoFunctions(), + cir_cconv_assert_or_abort(cir::MissingFeatures::ABINoProtoFunctions(), "NYI"); } - cir_cconv_assert(!::cir::MissingFeatures::CUDA()); + cir_cconv_assert(!cir::MissingFeatures::CUDA()); // TODO(cir): LLVM IR has the concept of "CallBase", which is a base class // for all types of calls. Perhaps we should have a CIR interface to mimic // this class. CallOp CallOrInvoke = {}; - Value CallResult = + mlir::Value CallResult = rewriteCallOp(FnInfo, origCallee, callOp, retValSlot, Args, CallOrInvoke, /*isMustTail=*/false, callOp.getLoc()); @@ -893,17 +903,18 @@ Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, // NOTE(cir): This method has partial parity to CodeGenFunction's EmitCall // method in CGCall.cpp. When incrementing it, use the original codegen as a // reference: add ABI-specific stuff and skip codegen stuff. -Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, - FuncOp Callee, CallOp Caller, - ReturnValueSlot ReturnValue, - CallArgList &CallArgs, CallOp CallOrInvoke, - bool isMustTail, Location loc) { +mlir::Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, + FuncOp Callee, CallOp Caller, + ReturnValueSlot ReturnValue, + CallArgList &CallArgs, + CallOp CallOrInvoke, bool isMustTail, + mlir::Location loc) { // FIXME: We no longer need the types from CallArgs; lift up and simplify. // Handle struct-return functions by passing a pointer to the // location that we would like to return into. - Type RetTy = CallInfo.getReturnType(); // ABI-agnostic type. - const ::cir::ABIArgInfo &RetAI = CallInfo.getReturnInfo(); + mlir::Type RetTy = CallInfo.getReturnType(); // ABI-agnostic type. + const cir::ABIArgInfo &RetAI = CallInfo.getReturnInfo(); FuncType IRFuncTy = LM.getTypes().getFunctionType(CallInfo); @@ -919,7 +930,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, } CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), CallInfo); - SmallVector IRCallArgs(IRFunctionArgs.totalIRArgs()); + llvm::SmallVector IRCallArgs(IRFunctionArgs.totalIRArgs()); // If the call returns a temporary with struct return, create a temporary // alloca to hold the result, unless one is given to us. @@ -927,7 +938,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, cir_cconv_unreachable("NYI"); } - cir_cconv_assert(!::cir::MissingFeatures::swift()); + cir_cconv_assert(!cir::MissingFeatures::swift()); // NOTE(cir): Skipping lifetime markers here. @@ -950,43 +961,43 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, case ABIArgInfo::Extend: case ABIArgInfo::Direct: { - if (isa(info_it->type)) { + if (mlir::isa(info_it->type)) { IRCallArgs[FirstIRArg] = *I; break; } - if (!isa(ArgInfo.getCoerceToType()) && + if (!mlir::isa(ArgInfo.getCoerceToType()) && ArgInfo.getCoerceToType() == info_it->type && ArgInfo.getDirectOffset() == 0) { cir_cconv_assert(NumIRArgs == 1); - Value V; - if (!isa(I->getType())) { + mlir::Value V; + if (!mlir::isa(I->getType())) { V = *I; } else { cir_cconv_unreachable("NYI"); } - if (::cir::MissingFeatures::extParamInfo()) { + if (cir::MissingFeatures::extParamInfo()) { cir_cconv_unreachable("NYI"); } if (ArgInfo.getCoerceToType() != V.getType() && - isa(V.getType())) + mlir::isa(V.getType())) cir_cconv_unreachable("NYI"); if (FirstIRArg < IRFuncTy.getNumInputs() && V.getType() != IRFuncTy.getInput(FirstIRArg)) cir_cconv_unreachable("NYI"); - if (::cir::MissingFeatures::undef()) + if (cir::MissingFeatures::undef()) cir_cconv_unreachable("NYI"); IRCallArgs[FirstIRArg] = V; break; } // FIXME: Avoid the conversion through memory if possible. - Value Src = {}; - if (!isa(I->getType())) { + mlir::Value Src = {}; + if (!mlir::isa(I->getType())) { cir_cconv_unreachable("NYI"); } else { // NOTE(cir): L/RValue stuff are left for CIRGen to handle. @@ -999,19 +1010,20 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. - StructType STy = dyn_cast(ArgInfo.getCoerceToType()); + StructType STy = mlir::dyn_cast(ArgInfo.getCoerceToType()); if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { cir_cconv_unreachable("NYI"); } else { // In the simple case, just pass the coerced loaded value. cir_cconv_assert(NumIRArgs == 1); - Value Load = createCoercedValue(Src, ArgInfo.getCoerceToType(), *this); + mlir::Value Load = + createCoercedValue(Src, ArgInfo.getCoerceToType(), *this); // FIXME(cir): We should probably handle CMSE non-secure calls here - cir_cconv_assert(!::cir::MissingFeatures::cmseNonSecureCallAttr()); + cir_cconv_assert(!cir::MissingFeatures::cmseNonSecureCallAttr()); // since they are a ARM-specific feature. - if (::cir::MissingFeatures::undef()) + if (cir::MissingFeatures::undef()) cir_cconv_unreachable("NYI"); IRCallArgs[FirstIRArg] = Load; } @@ -1033,7 +1045,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // debugging stuff here. // Update the largest vector width if any arguments have vector types. - cir_cconv_assert(!::cir::MissingFeatures::vectorType()); + cir_cconv_assert(!cir::MissingFeatures::vectorType()); // Compute the calling convention and attributes. @@ -1072,7 +1084,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, rewriter.getAttr(rewriter.getDictionaryAttr({})); newCallOp->setAttr("extra_attrs", extraAttrs); - cir_cconv_assert(!::cir::MissingFeatures::vectorType()); + cir_cconv_assert(!cir::MissingFeatures::vectorType()); // NOTE(cir): Skipping some ObjC, tail-call, debug, and attribute stuff // here. @@ -1083,26 +1095,26 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // writebacks here. These should be handled in CIRGen, I think. // Convert return value from ABI-agnostic to ABI-aware. - Value Ret = [&] { + mlir::Value Ret = [&] { // NOTE(cir): CIRGen already handled the emission of the return value. We // need only to handle the ABI-specific to ABI-agnostic cast here. switch (RetAI.getKind()) { - case ::cir::ABIArgInfo::Ignore: + case cir::ABIArgInfo::Ignore: // If we are ignoring an argument that had a result, make sure to // construct the appropriate return value for our caller. return getUndefRValue(RetTy); case ABIArgInfo::Extend: case ABIArgInfo::Direct: { - Type RetIRTy = RetTy; + mlir::Type RetIRTy = RetTy; if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { switch (getEvaluationKind(RetTy)) { - case ::cir::TypeEvaluationKind::TEK_Scalar: { + case cir::TypeEvaluationKind::TEK_Scalar: { // If the argument doesn't match, perform a bitcast to coerce it. // This can happen due to trivial type mismatches. NOTE(cir): // Perhaps this section should handle CIR's boolean case. - Value V = newCallOp.getResult(); + mlir::Value V = newCallOp.getResult(); if (V.getType() != RetIRTy) cir_cconv_unreachable("NYI"); return V; @@ -1115,14 +1127,14 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // If coercing a fixed vector from a scalable vector for ABI // compatibility, and the types match, use the llvm.vector.extract // intrinsic to perform the conversion. - if (::cir::MissingFeatures::vectorType()) { + if (cir::MissingFeatures::vectorType()) { cir_cconv_unreachable("NYI"); } // FIXME(cir): Use return value slot here. - Value RetVal = callOp.getResult(); + mlir::Value RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. - cir_cconv_assert(!::cir::MissingFeatures::volatileTypes()); + cir_cconv_assert(!cir::MissingFeatures::volatileTypes()); // NOTE(cir): If the function returns, there should always be a valid // return value present. Instead of setting the return value here, we @@ -1130,18 +1142,18 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (!RetVal) { RetVal = callOp.getResult(); // TODO(cir): Check for volatile return values. - cir_cconv_assert(::cir::MissingFeatures::volatileTypes()); + cir_cconv_assert(cir::MissingFeatures::volatileTypes()); } // An empty record can overlap other data (if declared with // no_unique_address); omit the store for such types - as there is no // actual data to store. - if (dyn_cast(RetTy) && - cast(RetTy).getNumElements() != 0) { + if (mlir::dyn_cast(RetTy) && + mlir::cast(RetTy).getNumElements() != 0) { RetVal = newCallOp.getResult(); for (auto user : Caller.getOperation()->getUsers()) { - if (auto storeOp = dyn_cast(user)) { + if (auto storeOp = mlir::dyn_cast(user)) { auto DestPtr = createCoercedBitcast(storeOp.getAddr(), RetVal.getType(), *this); rewriter.replaceOpWithNewOp(storeOp, RetVal, DestPtr); @@ -1167,23 +1179,22 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // NOTE(cir): This method has partial parity to CodeGenFunction's // GetUndefRValue defined in CGExpr.cpp. -Value LowerFunction::getUndefRValue(Type Ty) { - if (isa(Ty)) +mlir::Value LowerFunction::getUndefRValue(mlir::Type Ty) { + if (mlir::isa(Ty)) return nullptr; llvm::outs() << "Missing undef handler for value type: " << Ty << "\n"; cir_cconv_unreachable("NYI"); } -::cir::TypeEvaluationKind LowerFunction::getEvaluationKind(Type type) { +cir::TypeEvaluationKind LowerFunction::getEvaluationKind(mlir::Type type) { // FIXME(cir): Implement type classes for CIR types. - if (isa(type)) - return ::cir::TypeEvaluationKind::TEK_Aggregate; - if (isa(type)) - return ::cir::TypeEvaluationKind::TEK_Scalar; + if (mlir::isa(type)) + return cir::TypeEvaluationKind::TEK_Aggregate; + if (mlir::isa(type)) + return cir::TypeEvaluationKind::TEK_Scalar; cir_cconv_unreachable("NYI"); } } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h index bd46bcdd1d8b..4300cdb4a99b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.h @@ -23,10 +23,9 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/TypeEvaluationKind.h" -namespace mlir { namespace cir { -using CallArgList = SmallVector; +using CallArgList = llvm::SmallVector; class LowerFunction { LowerFunction(const LowerFunction &) = delete; @@ -36,72 +35,74 @@ class LowerFunction { const clang::TargetInfo &Target; - PatternRewriter &rewriter; + mlir::PatternRewriter &rewriter; FuncOp SrcFn; // Original ABI-agnostic function. FuncOp NewFn; // New ABI-aware function. CallOp callOp; // Call operation to be lowered. public: /// Builder for lowering calling convention of a function definition. - LowerFunction(LowerModule &LM, PatternRewriter &rewriter, FuncOp srcFn, + LowerFunction(LowerModule &LM, mlir::PatternRewriter &rewriter, FuncOp srcFn, FuncOp newFn); /// Builder for lowering calling convention of a call operation. - LowerFunction(LowerModule &LM, PatternRewriter &rewriter, FuncOp srcFn, + LowerFunction(LowerModule &LM, mlir::PatternRewriter &rewriter, FuncOp srcFn, CallOp callOp); ~LowerFunction() = default; LowerModule &LM; // Per-module state. - PatternRewriter &getRewriter() const { return rewriter; } + mlir::PatternRewriter &getRewriter() const { return rewriter; } const clang::TargetInfo &getTarget() const { return Target; } // Build ABI/Target-specific function prologue. - LogicalResult buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, - MutableArrayRef Args); + llvm::LogicalResult + buildFunctionProlog(const LowerFunctionInfo &FI, FuncOp Fn, + llvm::MutableArrayRef Args); // Build ABI/Target-specific function epilogue. - LogicalResult buildFunctionEpilog(const LowerFunctionInfo &FI); + llvm::LogicalResult buildFunctionEpilog(const LowerFunctionInfo &FI); // Parity with CodeGenFunction::GenerateCode. Keep in mind that several // sections in the original function are focused on codegen unrelated to the // ABI. Such sections are handled in CIR's codegen, not here. - LogicalResult generateCode(FuncOp oldFn, FuncOp newFn, - const LowerFunctionInfo &FnInfo); + llvm::LogicalResult generateCode(FuncOp oldFn, FuncOp newFn, + const LowerFunctionInfo &FnInfo); // Emit the most simple cir.store possible (e.g. a store for a whole // struct), which can later be broken down in other CIR levels (or prior // to dialect codegen). - void buildAggregateStore(Value Val, Value Dest, bool DestIsVolatile); + void buildAggregateStore(mlir::Value Val, mlir::Value Dest, + bool DestIsVolatile); // Emit a simple bitcast for a coerced aggregate type to convert it from an // ABI-agnostic to an ABI-aware type. - Value buildAggregateBitcast(Value Val, Type DestTy); + mlir::Value buildAggregateBitcast(mlir::Value Val, mlir::Type DestTy); /// Rewrite a call operation to abide to the ABI calling convention. - LogicalResult rewriteCallOp(CallOp op, - ReturnValueSlot retValSlot = ReturnValueSlot()); - Value rewriteCallOp(FuncType calleeTy, FuncOp origCallee, CallOp callOp, - ReturnValueSlot retValSlot, Value Chain = nullptr); - Value rewriteCallOp(const LowerFunctionInfo &CallInfo, FuncOp Callee, - CallOp Caller, ReturnValueSlot ReturnValue, - CallArgList &CallArgs, CallOp CallOrInvoke, - bool isMustTail, Location loc); + llvm::LogicalResult + rewriteCallOp(CallOp op, ReturnValueSlot retValSlot = ReturnValueSlot()); + mlir::Value rewriteCallOp(FuncType calleeTy, FuncOp origCallee, CallOp callOp, + ReturnValueSlot retValSlot, + mlir::Value Chain = nullptr); + mlir::Value rewriteCallOp(const LowerFunctionInfo &CallInfo, FuncOp Callee, + CallOp Caller, ReturnValueSlot ReturnValue, + CallArgList &CallArgs, CallOp CallOrInvoke, + bool isMustTail, mlir::Location loc); /// Get an appropriate 'undef' value for the given type. - Value getUndefRValue(Type Ty); + mlir::Value getUndefRValue(mlir::Type Ty); /// Return the TypeEvaluationKind of Type \c T. - static ::cir::TypeEvaluationKind getEvaluationKind(Type T); + static cir::TypeEvaluationKind getEvaluationKind(mlir::Type T); - static bool hasScalarEvaluationKind(Type T) { - return getEvaluationKind(T) == ::cir::TypeEvaluationKind::TEK_Scalar; + static bool hasScalarEvaluationKind(mlir::Type T) { + return getEvaluationKind(T) == cir::TypeEvaluationKind::TEK_Scalar; } }; } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERFUNCTION_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h index 8da01bc23ada..6b0823c6d7c1 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunctionInfo.h @@ -21,7 +21,6 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/Support/TrailingObjects.h" -namespace mlir { namespace cir { /// A class for recording the number of arguments that a function @@ -64,7 +63,7 @@ class RequiredArgs { // named in the TrailingObjects base class of CGFunctionInfo. struct LowerFunctionInfoArgInfo { mlir::Type type; // Original ABI-agnostic type. - ::cir::ABIArgInfo info; // ABI-specific information. + cir::ABIArgInfo info; // ABI-specific information. }; // FIXME(cir): We could likely encode this information within CIR/MLIR, allowing @@ -108,11 +107,11 @@ class LowerFunctionInfo final public: static LowerFunctionInfo *create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, - Type resultType, - ArrayRef argTypes, + mlir::Type resultType, + llvm::ArrayRef argTypes, RequiredArgs required) { // TODO(cir): Add assertions? - cir_cconv_assert(!::cir::MissingFeatures::extParamInfo()); + cir_cconv_assert(!cir::MissingFeatures::extParamInfo()); void *buffer = operator new(totalSizeToAlloc(argTypes.size() + 1)); LowerFunctionInfo *FI = new (buffer) LowerFunctionInfo(); @@ -141,8 +140,8 @@ class LowerFunctionInfo final typedef const ArgInfo *const_arg_iterator; typedef ArgInfo *arg_iterator; - MutableArrayRef arguments() { - return MutableArrayRef(arg_begin(), NumArgs); + llvm::MutableArrayRef arguments() { + return llvm::MutableArrayRef(arg_begin(), NumArgs); } const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; } @@ -157,10 +156,10 @@ class LowerFunctionInfo final return isVariadic() ? Required.getNumRequiredArgs() : arg_size(); } - Type getReturnType() const { return getArgsBuffer()[0].type; } + mlir::Type getReturnType() const { return getArgsBuffer()[0].type; } - ::cir::ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; } - const ::cir::ABIArgInfo &getReturnInfo() const { + cir::ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; } + const cir::ABIArgInfo &getReturnInfo() const { return getArgsBuffer()[0].info; } @@ -173,6 +172,5 @@ class LowerFunctionInfo final }; } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERFUNCTIONINFO_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 85d08b4ce03f..8e4ac59a9363 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -29,11 +29,10 @@ #include "clang/CIR/Target/AArch64.h" #include "llvm/Support/ErrorHandling.h" -using MissingFeatures = ::cir::MissingFeatures; -using AArch64ABIKind = ::cir::AArch64ABIKind; -using X86AVXABILevel = ::cir::X86AVXABILevel; +using MissingFeatures = cir::MissingFeatures; +using AArch64ABIKind = cir::AArch64ABIKind; +using X86AVXABILevel = cir::X86AVXABILevel; -namespace mlir { namespace cir { static CIRCXXABI *createCXXABI(LowerModule &CGM) { @@ -89,10 +88,10 @@ createTargetLoweringInfo(LowerModule &LM) { } } -LowerModule::LowerModule(clang::LangOptions opts, ModuleOp &module, - StringAttr DL, +LowerModule::LowerModule(clang::LangOptions opts, mlir::ModuleOp &module, + mlir::StringAttr DL, std::unique_ptr target, - PatternRewriter &rewriter) + mlir::PatternRewriter &rewriter) : context(module, opts), module(module), Target(std::move(target)), ABI(createCXXABI(*this)), types(*this, DL.getValue()), rewriter(rewriter) { @@ -173,7 +172,7 @@ void LowerModule::setFunctionAttributes(FuncOp oldFn, FuncOp newFn, /// /// This method is based on CodeGenModule::EmitGlobalFunctionDefinition but it /// considerably simplified as it tries to remove any CodeGen related code. -LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { +llvm::LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { mlir::OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint(op); @@ -187,7 +186,7 @@ LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { // here, as they are mostly codegen logic. // Create a new function with the ABI-specific types. - FuncOp newFn = cast(rewriter.cloneWithoutRegions(op)); + FuncOp newFn = mlir::cast(rewriter.cloneWithoutRegions(op)); newFn.setType(Ty); // NOTE(cir): The clone above will preserve any existing attributes. If there @@ -204,15 +203,16 @@ LogicalResult LowerModule::rewriteFunctionDefinition(FuncOp op) { if (LowerFunction(*this, rewriter, op, newFn) .generateCode(op, newFn, FI) .failed()) - return failure(); + return llvm::failure(); } // Erase original ABI-agnostic function. rewriter.eraseOp(op); - return success(); + return llvm::success(); } -LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, FuncOp funcOp) { +llvm::LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, + FuncOp funcOp) { mlir::OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint(callOp); @@ -220,24 +220,24 @@ LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, FuncOp funcOp) { if (LowerFunction(*this, rewriter, funcOp, callOp) .rewriteCallOp(callOp) .failed()) - return failure(); + return llvm::failure(); - return success(); + return llvm::success(); } // TODO: not to create it every time -std::unique_ptr createLowerModule(ModuleOp module, - PatternRewriter &rewriter) { - assert(module->getAttr(LLVM::LLVMDialect::getDataLayoutAttrName()) && +std::unique_ptr +createLowerModule(mlir::ModuleOp module, mlir::PatternRewriter &rewriter) { + assert(module->getAttr(mlir::LLVM::LLVMDialect::getDataLayoutAttrName()) && "Missing data layout attribute"); // Fetch the LLVM data layout string. - auto dataLayoutStr = cast( - module->getAttr(LLVM::LLVMDialect::getDataLayoutAttrName())); + auto dataLayoutStr = mlir::cast( + module->getAttr(mlir::LLVM::LLVMDialect::getDataLayoutAttrName())); // Fetch target information. llvm::Triple triple( - cast(module->getAttr("cir.triple")).getValue()); + mlir::cast(module->getAttr("cir.triple")).getValue()); clang::TargetOptions targetOptions; targetOptions.Triple = triple.str(); auto targetInfo = clang::targets::AllocateTarget(triple, targetOptions); @@ -245,7 +245,7 @@ std::unique_ptr createLowerModule(ModuleOp module, // FIXME(cir): This just uses the default language options. We need to account // for custom options. // Create context. - cir_cconv_assert(!::cir::MissingFeatures::langOpts()); + cir_cconv_assert(!cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; return std::make_unique(langOpts, module, dataLayoutStr, @@ -253,4 +253,3 @@ std::unique_ptr createLowerModule(ModuleOp module, } } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index 07e74fe4dc51..16f5a099cf63 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -28,24 +28,23 @@ #include "clang/CIR/MissingFeatures.h" #include -namespace mlir { namespace cir { class LowerModule { CIRLowerContext context; - ModuleOp module; + mlir::ModuleOp module; const std::unique_ptr Target; mutable std::unique_ptr TheTargetCodeGenInfo; std::unique_ptr ABI; LowerTypes types; - PatternRewriter &rewriter; + mlir::PatternRewriter &rewriter; public: - LowerModule(clang::LangOptions opts, ModuleOp &module, StringAttr DL, - std::unique_ptr target, - PatternRewriter &rewriter); + LowerModule(clang::LangOptions opts, mlir::ModuleOp &module, + mlir::StringAttr DL, std::unique_ptr target, + mlir::PatternRewriter &rewriter); ~LowerModule() = default; // Trivial getters. @@ -53,10 +52,10 @@ class LowerModule { CIRLowerContext &getContext() { return context; } CIRCXXABI &getCXXABI() const { return *ABI; } const clang::TargetInfo &getTarget() const { return *Target; } - MLIRContext *getMLIRContext() { return module.getContext(); } - ModuleOp &getModule() { return module; } + mlir::MLIRContext *getMLIRContext() { return module.getContext(); } + mlir::ModuleOp &getModule() { return module; } - const ::cir::CIRDataLayout &getDataLayout() const { + const cir::CIRDataLayout &getDataLayout() const { return types.getDataLayout(); } @@ -68,12 +67,12 @@ class LowerModule { // FIXME(cir): This would be in ASTContext, not CodeGenModule. clang::TargetCXXABI::Kind getCXXABIKind() const { auto kind = getTarget().getCXXABI().getKind(); - cir_cconv_assert(!::cir::MissingFeatures::langOpts()); + cir_cconv_assert(!cir::MissingFeatures::langOpts()); return kind; } void - constructAttributeList(StringRef Name, const LowerFunctionInfo &FI, + constructAttributeList(llvm::StringRef Name, const LowerFunctionInfo &FI, FuncOp CalleeInfo, // TODO(cir): Implement CalleeInfo? FuncOp newFn, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk); @@ -86,23 +85,23 @@ class LowerModule { bool IsIncompleteFunction, bool IsThunk); // Create a CIR FuncOp with with the given signature. - FuncOp createCIRFunction( - StringRef MangledName, FuncType Ty, FuncOp D, bool ForVTable, - bool DontDefer = false, bool IsThunk = false, - ArrayRef = {}, // TODO(cir): __attribute__(()) stuff. - bool IsForDefinition = false); + FuncOp createCIRFunction(llvm::StringRef MangledName, FuncType Ty, FuncOp D, + bool ForVTable, bool DontDefer = false, + bool IsThunk = false, + llvm::ArrayRef = + {}, // TODO(cir): __attribute__(()) stuff. + bool IsForDefinition = false); // Rewrite CIR FuncOp to match the target ABI. - LogicalResult rewriteFunctionDefinition(FuncOp op); + llvm::LogicalResult rewriteFunctionDefinition(FuncOp op); // Rewrite CIR CallOp to match the target ABI. - LogicalResult rewriteFunctionCall(CallOp callOp, FuncOp funcOp = {}); + llvm::LogicalResult rewriteFunctionCall(CallOp callOp, FuncOp funcOp = {}); }; -std::unique_ptr createLowerModule(ModuleOp module, - PatternRewriter &rewriter); +std::unique_ptr createLowerModule(mlir::ModuleOp module, + mlir::PatternRewriter &rewriter); } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERMODULE_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index 5a3382dc40e6..f4b9c73cf43f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -20,9 +20,9 @@ #include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" -using namespace ::mlir::cir; +using namespace cir; -using ABIArgInfo = ::cir::ABIArgInfo; +using ABIArgInfo = cir::ABIArgInfo; unsigned LowerTypes::clangCallConvToLLVMCallConv(clang::CallingConv CC) { switch (CC) { @@ -33,7 +33,7 @@ unsigned LowerTypes::clangCallConvToLLVMCallConv(clang::CallingConv CC) { } } -LowerTypes::LowerTypes(LowerModule &LM, StringRef DLString) +LowerTypes::LowerTypes(LowerModule &LM, llvm::StringRef DLString) : LM(LM), context(LM.getContext()), Target(LM.getTarget()), CXXABI(LM.getCXXABI()), TheABIInfo(LM.getTargetLoweringInfo().getABIInfo()), @@ -43,14 +43,14 @@ LowerTypes::LowerTypes(LowerModule &LM, StringRef DLString) FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { mlir::Type resultType = {}; - const ::cir::ABIArgInfo &retAI = FI.getReturnInfo(); + const cir::ABIArgInfo &retAI = FI.getReturnInfo(); switch (retAI.getKind()) { case ABIArgInfo::Extend: case ABIArgInfo::Direct: resultType = retAI.getCoerceToType(); break; - case ::cir::ABIArgInfo::Ignore: - case ::cir::ABIArgInfo::Indirect: + case cir::ABIArgInfo::Ignore: + case cir::ABIArgInfo::Indirect: resultType = VoidType::get(getMLIRContext()); break; default: @@ -58,17 +58,17 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { } CIRToCIRArgMapping IRFunctionArgs(getContext(), FI, true); - SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); + llvm::SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); // Add type for sret argument. if (IRFunctionArgs.hasSRetArg()) { mlir::Type ret = FI.getReturnType(); ArgTypes[IRFunctionArgs.getSRetArgNo()] = - mlir::cir::PointerType::get(getMLIRContext(), ret); + cir::PointerType::get(getMLIRContext(), ret); } // Add type for inalloca argument. - cir_cconv_assert(!::cir::MissingFeatures::inallocaArgs()); + cir_cconv_assert(!cir::MissingFeatures::inallocaArgs()); // Add in all of the required arguments. unsigned ArgNo = 0; @@ -77,7 +77,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { for (; it != ie; ++it, ++ArgNo) { const ABIArgInfo &ArgInfo = it->info; - cir_cconv_assert(!::cir::MissingFeatures::argumentPadding()); + cir_cconv_assert(!cir::MissingFeatures::argumentPadding()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -87,8 +87,8 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { case ABIArgInfo::Direct: { // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. - Type argType = ArgInfo.getCoerceToType(); - StructType st = dyn_cast(argType); + mlir::Type argType = ArgInfo.getCoerceToType(); + StructType st = mlir::dyn_cast(argType); if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { cir_cconv_assert(NumIRArgs == st.getNumElements()); for (unsigned i = 0, e = st.getNumElements(); i != e; ++i) @@ -101,8 +101,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { } case ABIArgInfo::Indirect: { mlir::Type argType = (FI.arg_begin() + ArgNo)->type; - ArgTypes[FirstIRArg] = - mlir::cir::PointerType::get(getMLIRContext(), argType); + ArgTypes[FirstIRArg] = cir::PointerType::get(getMLIRContext(), argType); break; } default: @@ -114,7 +113,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { } /// Convert a CIR type to its ABI-specific default form. -mlir::Type LowerTypes::convertType(Type T) { +mlir::Type LowerTypes::convertType(mlir::Type T) { /// NOTE(cir): It the original codegen this method is used to get the default /// LLVM IR representation for a given AST type. When a the ABI-specific /// function info sets a nullptr for a return or argument type, the default @@ -123,12 +122,12 @@ mlir::Type LowerTypes::convertType(Type T) { /// It's kept here for codegen parity's sake. // Certain CIR types are already ABI-specific, so we just return them. - if (isa(T)) { + if (mlir::isa(T)) { return T; } llvm::outs() << "Missing default ABI-specific type for " << T << "\n"; cir_cconv_assert_or_abort( - !::cir::MissingFeatures::X86DefaultABITypeConvertion(), "NYI"); + !cir::MissingFeatures::X86DefaultABITypeConvertion(), "NYI"); return T; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h index d6f20941544f..751f95e67efd 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h @@ -24,7 +24,6 @@ #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/FnInfoOpts.h" -namespace mlir { namespace cir { // Forward declarations. @@ -46,22 +45,22 @@ class LowerTypes { const ABIInfo &TheABIInfo; // Used to build types and other MLIR operations. - MLIRContext *mlirContext; + mlir::MLIRContext *mlirContext; - ::cir::CIRDataLayout DL; + cir::CIRDataLayout DL; const ABIInfo &getABIInfo() const { return TheABIInfo; } public: - LowerTypes(LowerModule &LM, StringRef DLString); + LowerTypes(LowerModule &LM, llvm::StringRef DLString); ~LowerTypes() = default; - const ::cir::CIRDataLayout &getDataLayout() const { return DL; } + const cir::CIRDataLayout &getDataLayout() const { return DL; } LowerModule &getLM() const { return LM; } CIRCXXABI &getCXXABI() const { return CXXABI; } CIRLowerContext &getContext() { return context; } const clang::TargetInfo &getTarget() const { return Target; } - MLIRContext *getMLIRContext() { return mlirContext; } + mlir::MLIRContext *getMLIRContext() { return mlirContext; } /// Convert clang calling convention to LLVM callilng convention. unsigned clangCallConvToLLVMCallConv(clang::CallingConv CC); @@ -70,9 +69,9 @@ class LowerTypes { /// C function pointer type. /// FIXME(cir): Does the "free function" concept makes sense here? const LowerFunctionInfo &arrangeFunctionDeclaration(FuncOp fnOp); - const LowerFunctionInfo &arrangeFreeFunctionCall(const OperandRange args, - const FuncType fnType, - bool chainCall); + const LowerFunctionInfo & + arrangeFreeFunctionCall(const mlir::OperandRange args, const FuncType fnType, + bool chainCall); const LowerFunctionInfo &arrangeFreeFunctionType(FuncType FTy); const LowerFunctionInfo &arrangeGlobalDeclaration(FuncOp fnOp); @@ -85,19 +84,18 @@ class LowerTypes { /// \param opts - Options to control the arrangement. /// \param argTypes - ABI-agnostic CIR argument types. /// \param required - Information about required/optional arguments. - const LowerFunctionInfo &arrangeLLVMFunctionInfo(Type resultType, - ::cir::FnInfoOpts opts, - ArrayRef argTypes, - RequiredArgs required); + const LowerFunctionInfo & + arrangeLLVMFunctionInfo(mlir::Type resultType, cir::FnInfoOpts opts, + llvm::ArrayRef argTypes, + RequiredArgs required); /// Return the ABI-specific function type for a CIR function type. FuncType getFunctionType(const LowerFunctionInfo &FI); /// Convert a CIR type to its ABI-specific default form. - Type convertType(Type T); + mlir::Type convertType(mlir::Type T); }; } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_LOWERTYPES_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index 6b3229ac2ea6..627f3b048817 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -18,7 +18,7 @@ #include "clang/CIR/MissingFeatures.h" using namespace mlir; -using namespace mlir::cir; +using namespace cir; namespace { @@ -58,11 +58,11 @@ class EmptySubobjectMap { void EmptySubobjectMap::ComputeEmptySubobjectSizes() { // Check the bases. - cir_cconv_assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_cconv_assert(!cir::MissingFeatures::getCXXRecordBases()); // Check the fields. for (const auto FT : Class.getMembers()) { - cir_cconv_assert(!::cir::MissingFeatures::qualifiedTypes()); + cir_cconv_assert(!cir::MissingFeatures::qualifiedTypes()); const auto RT = dyn_cast(FT); // We only care about record types. @@ -70,7 +70,7 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() { continue; // TODO(cir): Handle nested record types. - cir_cconv_assert_or_abort(!::cir::MissingFeatures::ABINestedRecordLayout(), + cir_cconv_assert_or_abort(!cir::MissingFeatures::ABINestedRecordLayout(), "NYI"); } } @@ -87,7 +87,7 @@ bool EmptySubobjectMap::canPlaceFieldAtOffset(const Type Ty, class ItaniumRecordLayoutBuilder { protected: // FIXME(cir): Remove this and make the appropriate fields public. - friend class mlir::cir::CIRLowerContext; + friend class cir::CIRLowerContext; const CIRLowerContext &Context; @@ -108,7 +108,7 @@ class ItaniumRecordLayoutBuilder { /// \brief The maximum of the alignments of top-level members. clang::CharUnits UnadjustedAlignment; - SmallVector FieldOffsets; + llvm::SmallVector FieldOffsets; /// Whether the external AST source has provided a layout for this /// record. @@ -235,25 +235,25 @@ void ItaniumRecordLayoutBuilder::layout(const StructType RT) { initializeLayout(RT); // Lay out the vtable and the non-virtual bases. - cir_cconv_assert(!::cir::MissingFeatures::isCXXRecordDecl() && - !::cir::MissingFeatures::CXXRecordIsDynamicClass()); + cir_cconv_assert(!cir::MissingFeatures::isCXXRecordDecl() && + !cir::MissingFeatures::CXXRecordIsDynamicClass()); layoutFields(RT); // FIXME(cir): Handle virtual-related layouts. - cir_cconv_assert(!::cir::MissingFeatures::getCXXRecordBases()); + cir_cconv_assert(!cir::MissingFeatures::getCXXRecordBases()); cir_cconv_assert( - !::cir::MissingFeatures::itaniumRecordLayoutBuilderFinishLayout()); + !cir::MissingFeatures::itaniumRecordLayoutBuilderFinishLayout()); } void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { if (const auto RT = dyn_cast(Ty)) { IsUnion = RT.isUnion(); - cir_cconv_assert(!::cir::MissingFeatures::recordDeclIsMSStruct()); + cir_cconv_assert(!cir::MissingFeatures::recordDeclIsMSStruct()); } - cir_cconv_assert(!::cir::MissingFeatures::recordDeclIsPacked()); + cir_cconv_assert(!cir::MissingFeatures::recordDeclIsPacked()); // Honor the default struct packing maximum alignment flag. if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) { @@ -264,16 +264,16 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { // and forces all structures to have 2-byte alignment. The IBM docs on it // allude to additional (more complicated) semantics, especially with regard // to bit-fields, but gcc appears not to follow that. - if (::cir::MissingFeatures::declHasAlignMac68kAttr()) { + if (cir::MissingFeatures::declHasAlignMac68kAttr()) { cir_cconv_unreachable("NYI"); } else { - if (::cir::MissingFeatures::declHasAlignNaturalAttr()) + if (cir::MissingFeatures::declHasAlignNaturalAttr()) cir_cconv_unreachable("NYI"); - if (::cir::MissingFeatures::declHasMaxFieldAlignmentAttr()) + if (cir::MissingFeatures::declHasMaxFieldAlignmentAttr()) cir_cconv_unreachable("NYI"); - if (::cir::MissingFeatures::declGetMaxAlignment()) + if (cir::MissingFeatures::declGetMaxAlignment()) cir_cconv_unreachable("NYI"); } @@ -282,7 +282,7 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { // If there is an external AST source, ask it for the various offsets. if (const auto RT = dyn_cast(Ty)) { - if (::cir::MissingFeatures::astContextGetExternalSource()) { + if (cir::MissingFeatures::astContextGetExternalSource()) { cir_cconv_unreachable("NYI"); } } @@ -291,9 +291,8 @@ void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { void ItaniumRecordLayoutBuilder::layoutField(const Type D, bool InsertExtraPadding) { // auto FieldClass = D.dyn_cast(); - cir_cconv_assert( - !::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping() && - !::cir::MissingFeatures::CXXRecordDeclIsEmptyCXX11()); + cir_cconv_assert(!cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping() && + !cir::MissingFeatures::CXXRecordDeclIsEmptyCXX11()); bool IsOverlappingEmptyField = false; // FIXME(cir): Needs more features. clang::CharUnits FieldOffset = (IsUnion || IsOverlappingEmptyField) @@ -307,7 +306,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, cir_cconv_unreachable("NYI"); } - cir_cconv_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + cir_cconv_assert(!cir::MissingFeatures::fieldDeclIsBitfield()); uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit; // Reset the unfilled bits. @@ -340,15 +339,15 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, } else { setDeclInfo(false /* IsIncompleteArrayType */); - if (::cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping()) + if (cir::MissingFeatures::fieldDeclIsPotentiallyOverlapping()) cir_cconv_unreachable("NYI"); if (IsMsStruct) cir_cconv_unreachable("NYI"); } - cir_cconv_assert(!::cir::MissingFeatures::recordDeclIsPacked() && - !::cir::MissingFeatures::CXXRecordDeclIsPOD()); + cir_cconv_assert(!cir::MissingFeatures::recordDeclIsPacked() && + !cir::MissingFeatures::CXXRecordDeclIsPOD()); bool FieldPacked = false; // FIXME(cir): Needs more features. // When used as part of a typedef, or together with a 'packed' attribute, the @@ -386,7 +385,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, clang::CharUnits UnpackedFieldOffset = FieldOffset; // clang::CharUnits OriginalFieldAlign = UnpackedFieldAlign; - cir_cconv_assert(!::cir::MissingFeatures::fieldDeclGetMaxFieldAlignment()); + cir_cconv_assert(!cir::MissingFeatures::fieldDeclGetMaxFieldAlignment()); clang::CharUnits MaxAlignmentInChars = clang::CharUnits::Zero(); PackedFieldAlign = std::max(PackedFieldAlign, MaxAlignmentInChars); PreferredAlign = std::max(PreferredAlign, MaxAlignmentInChars); @@ -459,7 +458,7 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, // laid out. A regular mlir::Type has not way of doing this. In fact, we will // likely need an external abstraction, as I don't think this is possible with // just the field type. - cir_cconv_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + cir_cconv_assert(!cir::MissingFeatures::fieldDeclAbstraction()); if (Packed && !FieldPacked && PackedFieldAlign < FieldAlign) cir_cconv_unreachable("NYI"); @@ -468,10 +467,10 @@ void ItaniumRecordLayoutBuilder::layoutField(const Type D, void ItaniumRecordLayoutBuilder::layoutFields(const StructType D) { // Layout each field, for now, just sequentially, respecting alignment. In // the future, this will need to be tweakable by targets. - cir_cconv_assert(!::cir::MissingFeatures::recordDeclMayInsertExtraPadding() && + cir_cconv_assert(!cir::MissingFeatures::recordDeclMayInsertExtraPadding() && !Context.getLangOpts().SanitizeAddressFieldPadding); bool InsertExtraPadding = false; - cir_cconv_assert(!::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()); + cir_cconv_assert(!cir::MissingFeatures::recordDeclHasFlexibleArrayMember()); bool HasFlexibleArrayMember = false; for (const auto FT : D.getMembers()) { layoutField(FT, InsertExtraPadding && (FT != D.getMembers().back() || @@ -511,7 +510,7 @@ void ItaniumRecordLayoutBuilder::checkFieldPadding( unsigned UnpackedAlign, bool isPacked, const Type Ty) { // We let objc ivars without warning, objc interfaces generally are not used // for padding tricks. - if (::cir::MissingFeatures::objCIvarDecls()) + if (cir::MissingFeatures::objCIvarDecls()) cir_cconv_unreachable("NYI"); // FIXME(cir): Should the following be skiped in CIR? @@ -528,7 +527,7 @@ void ItaniumRecordLayoutBuilder::checkFieldPadding( PadSize = PadSize / CharBitNum; // InBits = false; } - cir_cconv_assert(::cir::MissingFeatures::bitFieldPaddingDiagnostics()); + cir_cconv_assert(cir::MissingFeatures::bitFieldPaddingDiagnostics()); } if (isPacked && Offset != UnpackedOffset) { HasPackedField = true; @@ -547,7 +546,7 @@ bool isMsLayout(const CIRLowerContext &Context) { /// of the given class (considering it as a base class) when allocating /// objects? static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { - cir_cconv_assert(!::cir::MissingFeatures::recordDeclIsCXXDecl()); + cir_cconv_assert(!cir::MissingFeatures::recordDeclIsCXXDecl()); switch (ABI.getTailPaddingUseRules()) { case clang::TargetCXXABI::AlwaysUseTailPadding: return false; @@ -569,7 +568,7 @@ static bool mustSkipTailPadding(clang::TargetCXXABI ABI, const StructType RD) { // intended. // FIXME(cir): This always returns true since we can't check if a CIR record // is a POD type. - cir_cconv_assert(!::cir::MissingFeatures::CXXRecordDeclIsPOD()); + cir_cconv_assert(!cir::MissingFeatures::CXXRecordDeclIsPOD()); return true; case clang::TargetCXXABI::UseTailPaddingUnlessPOD11: @@ -606,7 +605,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { cir_cconv_unreachable("NYI"); } else { // FIXME(cir): Add if-else separating C and C++ records. - cir_cconv_assert(!::cir::MissingFeatures::isCXXRecordDecl()); + cir_cconv_assert(!cir::MissingFeatures::isCXXRecordDecl()); EmptySubobjectMap EmptySubobjects(*this, RT); ItaniumRecordLayoutBuilder Builder(*this, &EmptySubobjects); Builder.layout(RT); @@ -621,7 +620,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { skipTailPadding ? Builder.getSize() : Builder.getDataSize(); clang::CharUnits NonVirtualSize = skipTailPadding ? DataSize : Builder.NonVirtualSize; - cir_cconv_assert(!::cir::MissingFeatures::CXXRecordIsDynamicClass()); + cir_cconv_assert(!cir::MissingFeatures::CXXRecordIsDynamicClass()); // FIXME(cir): Whose responsible for freeing the allocation below? NewEntry = new CIRRecordLayout( *this, Builder.getSize(), Builder.Alignment, Builder.PreferredAlignment, @@ -636,7 +635,7 @@ const CIRRecordLayout &CIRLowerContext::getCIRRecordLayout(const Type D) const { } // TODO(cir): Add option to dump the layouts. - cir_cconv_assert(!::cir::MissingFeatures::cacheRecordLayouts()); + cir_cconv_assert(!cir::MissingFeatures::cacheRecordLayouts()); return *NewEntry; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.cpp index 2502f8f0dfcb..d4f81d6fd80d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.cpp @@ -1,6 +1,5 @@ #include "TargetLoweringInfo.h" -namespace mlir { namespace cir { TargetLoweringInfo::TargetLoweringInfo(std::unique_ptr Info) @@ -9,4 +8,3 @@ TargetLoweringInfo::TargetLoweringInfo(std::unique_ptr Info) TargetLoweringInfo::~TargetLoweringInfo() = default; } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h index 4350458eeed2..8184c4f0afc2 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h @@ -19,21 +19,17 @@ #include "clang/CIR/Target/AArch64.h" #include "clang/CIR/Target/x86.h" -namespace mlir { namespace cir { std::unique_ptr -createX86_64TargetLoweringInfo(LowerModule &CGM, - ::cir::X86AVXABILevel AVXLevel); +createX86_64TargetLoweringInfo(LowerModule &CGM, cir::X86AVXABILevel AVXLevel); std::unique_ptr -createAArch64TargetLoweringInfo(LowerModule &CGM, - ::cir::AArch64ABIKind AVXLevel); +createAArch64TargetLoweringInfo(LowerModule &CGM, cir::AArch64ABIKind AVXLevel); std::unique_ptr createSPIRVTargetLoweringInfo(LowerModule &CGM); } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_TARGETINFO_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h index 4be2db10c1dd..8d33ef2e5dca 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetLoweringInfo.h @@ -20,7 +20,6 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" -namespace mlir { namespace cir { class TargetLoweringInfo { @@ -33,10 +32,9 @@ class TargetLoweringInfo { const ABIInfo &getABIInfo() const { return *Info; } virtual unsigned getTargetAddrSpaceFromCIRAddrSpace( - mlir::cir::AddressSpaceAttr addressSpaceAttr) const = 0; + cir::AddressSpaceAttr addressSpaceAttr) const = 0; }; } // namespace cir -} // namespace mlir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_TARGETLOWERINGINFO_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp index e1e6d098965a..cac197fae1bc 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/AArch64.cpp @@ -16,11 +16,10 @@ #include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" -using AArch64ABIKind = ::cir::AArch64ABIKind; -using ABIArgInfo = ::cir::ABIArgInfo; -using MissingFeatures = ::cir::MissingFeatures; +using AArch64ABIKind = cir::AArch64ABIKind; +using ABIArgInfo = cir::ABIArgInfo; +using MissingFeatures = cir::MissingFeatures; -namespace mlir { namespace cir { //===----------------------------------------------------------------------===// @@ -40,12 +39,12 @@ class AArch64ABIInfo : public ABIInfo { AArch64ABIKind getABIKind() const { return Kind; } bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; } - ABIArgInfo classifyReturnType(Type RetTy, bool IsVariadic) const; - ABIArgInfo classifyArgumentType(Type RetTy, bool IsVariadic, + ABIArgInfo classifyReturnType(mlir::Type RetTy, bool IsVariadic) const; + ABIArgInfo classifyArgumentType(mlir::Type RetTy, bool IsVariadic, unsigned CallingConvention) const; void computeInfo(LowerFunctionInfo &FI) const override { - if (!::mlir::cir::classifyReturnType(getCXXABI(), FI, *this)) + if (!cir::classifyReturnType(getCXXABI(), FI, *this)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic()); @@ -63,8 +62,8 @@ class AArch64TargetLoweringInfo : public TargetLoweringInfo { } unsigned getTargetAddrSpaceFromCIRAddrSpace( - mlir::cir::AddressSpaceAttr addressSpaceAttr) const override { - using Kind = mlir::cir::AddressSpaceAttr::Kind; + cir::AddressSpaceAttr addressSpaceAttr) const override { + using Kind = cir::AddressSpaceAttr::Kind; switch (addressSpaceAttr.getValue()) { case Kind::offload_private: case Kind::offload_local: @@ -80,17 +79,17 @@ class AArch64TargetLoweringInfo : public TargetLoweringInfo { } // namespace -ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, +ABIArgInfo AArch64ABIInfo::classifyReturnType(mlir::Type RetTy, bool IsVariadic) const { - if (isa(RetTy)) + if (mlir::isa(RetTy)) return ABIArgInfo::getIgnore(); - if (const auto _ = dyn_cast(RetTy)) { - cir_cconv_assert_or_abort(!::cir::MissingFeatures::vectorType(), "NYI"); + if (const auto _ = mlir::dyn_cast(RetTy)) { + cir_cconv_assert_or_abort(!cir::MissingFeatures::vectorType(), "NYI"); } // Large vector types should be returned via memory. - if (isa(RetTy) && getContext().getTypeSize(RetTy) > 128) + if (mlir::isa(RetTy) && getContext().getTypeSize(RetTy) > 128) cir_cconv_unreachable("NYI"); if (!isAggregateTypeForABI(RetTy)) { @@ -105,9 +104,9 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, } uint64_t Size = getContext().getTypeSize(RetTy); - cir_cconv_assert(!::cir::MissingFeatures::emitEmptyRecordCheck()); + cir_cconv_assert(!cir::MissingFeatures::emitEmptyRecordCheck()); cir_cconv_assert( - !::cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); + !cir::MissingFeatures::supportisHomogeneousAggregateQueryForAArch64()); // Aggregates <= 16 bytes are returned directly in registers or on the stack. if (Size <= 128) { @@ -119,7 +118,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, // BE, otherwise composite types will be indistinguishable from integer // types. return ABIArgInfo::getDirect( - mlir::cir::IntType::get(LT.getMLIRContext(), Size, false)); + cir::IntType::get(LT.getMLIRContext(), Size, false)); } unsigned Alignment = getContext().getTypeAlign(RetTy); @@ -128,10 +127,9 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. // For aggregates with 16-byte alignment, we use i128. if (Alignment < 128 && Size == 128) { - mlir::Type baseTy = - mlir::cir::IntType::get(LT.getMLIRContext(), 64, false); + mlir::Type baseTy = cir::IntType::get(LT.getMLIRContext(), 64, false); return ABIArgInfo::getDirect( - mlir::cir::ArrayType::get(LT.getMLIRContext(), baseTy, Size / 64)); + cir::ArrayType::get(LT.getMLIRContext(), baseTy, Size / 64)); } return ABIArgInfo::getDirect( @@ -142,7 +140,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(Type RetTy, } ABIArgInfo -AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, +AArch64ABIInfo::classifyArgumentType(mlir::Type Ty, bool IsVariadic, unsigned CallingConvention) const { Ty = useFirstFieldIfTransparentUnion(Ty); @@ -178,12 +176,12 @@ AArch64ABIInfo::classifyArgumentType(Type Ty, bool IsVariadic, // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. // For aggregates with 16-byte alignment, we use i128. - Type baseTy = - mlir::cir::IntType::get(LT.getMLIRContext(), Alignment, false); + mlir::Type baseTy = + cir::IntType::get(LT.getMLIRContext(), Alignment, false); auto argTy = Size == Alignment ? baseTy - : mlir::cir::ArrayType::get(LT.getMLIRContext(), baseTy, - Size / Alignment); + : cir::ArrayType::get(LT.getMLIRContext(), baseTy, + Size / Alignment); return ABIArgInfo::getDirect(argTy); } @@ -196,4 +194,3 @@ createAArch64TargetLoweringInfo(LowerModule &CGM, AArch64ABIKind Kind) { } } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp index b8ca5f663cc9..ec47a929cb34 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp @@ -28,22 +28,19 @@ namespace { class LoweringPrepareAArch64CXXABI : public LoweringPrepareItaniumCXXABI { public: LoweringPrepareAArch64CXXABI(AArch64ABIKind k) : Kind(k) {} - mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, + mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) override; private: AArch64ABIKind Kind; - mlir::Value lowerAAPCSVAArg(cir::CIRBaseBuilderTy &builder, - mlir::cir::VAArgOp op, + mlir::Value lowerAAPCSVAArg(cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout); bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; } - mlir::Value lowerMSVAArg(cir::CIRBaseBuilderTy &builder, - mlir::cir::VAArgOp op, + mlir::Value lowerMSVAArg(cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) { cir_cconv_unreachable("MSVC ABI not supported yet"); } - mlir::Value lowerDarwinVAArg(cir::CIRBaseBuilderTy &builder, - mlir::cir::VAArgOp op, + mlir::Value lowerDarwinVAArg(cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) { cir_cconv_unreachable("Darwin ABI not supported yet"); } @@ -56,16 +53,15 @@ cir::LoweringPrepareCXXABI::createAArch64ABI(AArch64ABIKind k) { } mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( - cir::CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, + cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) { auto loc = op->getLoc(); auto valist = op->getOperand(0); auto opResTy = op.getType(); // front end should not produce non-scalar type of VAArgOp bool isSupportedType = - mlir::isa(opResTy); + mlir::isa(opResTy); // Homogenous Aggregate type not supported and indirect arg // passing not supported yet. And for these supported types, @@ -82,7 +78,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // but it depends on arg type indirectness and coercion defined by ABI. auto baseTy = opResTy; - if (mlir::isa(baseTy)) { + if (mlir::isa(baseTy)) { cir_cconv_unreachable("ArrayType VAArg loweing NYI"); } // numRegs may not be 1 if ArrayType is supported. @@ -91,7 +87,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( if (Kind == AArch64ABIKind::AAPCSSoft) { cir_cconv_unreachable("AAPCSSoft cir.var_arg lowering NYI"); } - bool IsFPR = mlir::cir::isAnyFloatingPointType(baseTy); + bool IsFPR = cir::isAnyFloatingPointType(baseTy); // The AArch64 va_list type and handling is specified in the Procedure Call // Standard, section B.4: @@ -136,18 +132,18 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( int regSize = isIndirect ? 8 : tySize.getQuantity(); int regTopIndex; mlir::Value regOffsP; - mlir::cir::LoadOp regOffs; + cir::LoadOp regOffs; builder.restoreInsertionPoint(curInsertionP); // 3 is the field number of __gr_offs, 4 is the field number of __vr_offs if (!IsFPR) { regOffsP = builder.createGetMemberOp(loc, valist, "gr_offs", 3); - regOffs = builder.create(loc, regOffsP); + regOffs = builder.create(loc, regOffsP); regTopIndex = 1; regSize = llvm::alignTo(regSize, 8); } else { regOffsP = builder.createGetMemberOp(loc, valist, "vr_offs", 4); - regOffs = builder.create(loc, regOffsP); + regOffs = builder.create(loc, regOffsP); regTopIndex = 2; regSize = 16 * numRegs; } @@ -160,12 +156,11 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // argument. We don't want to keep updating regOffs (in case it overflows, // though anyone passing 2GB of arguments, each at most 16 bytes, deserves // whatever they get). - auto zeroValue = builder.create( - loc, regOffs.getType(), mlir::cir::IntAttr::get(regOffs.getType(), 0)); - auto usingStack = builder.create( - loc, boolTy, mlir::cir::CmpOpKind::ge, regOffs, zeroValue); - builder.create(loc, usingStack, onStackBlock, - maybeRegBlock); + auto zeroValue = builder.create( + loc, regOffs.getType(), cir::IntAttr::get(regOffs.getType(), 0)); + auto usingStack = builder.create(loc, boolTy, cir::CmpOpKind::ge, + regOffs, zeroValue); + builder.create(loc, usingStack, onStackBlock, maybeRegBlock); auto contBlock = currentBlock->splitBlock(op); // now contBlock should be the block after onStackBlock in CFG. @@ -196,17 +191,16 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( // The fact that this is done unconditionally reflects the fact that // allocating an argument to the stack also uses up all the remaining // registers of the appropriate kind. - auto regSizeValue = builder.create( - loc, regOffs.getType(), - mlir::cir::IntAttr::get(regOffs.getType(), regSize)); - auto newOffset = builder.create( - loc, regOffs.getType(), mlir::cir::BinOpKind::Add, regOffs, regSizeValue); + auto regSizeValue = builder.create( + loc, regOffs.getType(), cir::IntAttr::get(regOffs.getType(), regSize)); + auto newOffset = builder.create( + loc, regOffs.getType(), cir::BinOpKind::Add, regOffs, regSizeValue); builder.createStore(loc, newOffset, regOffsP); // Now we're in a position to decide whether this argument really was in // registers or not. - auto inRegs = builder.create( - loc, boolTy, mlir::cir::CmpOpKind::le, newOffset, zeroValue); - builder.create(loc, inRegs, inRegBlock, onStackBlock); + auto inRegs = builder.create(loc, boolTy, cir::CmpOpKind::le, + newOffset, zeroValue); + builder.create(loc, inRegs, inRegBlock, onStackBlock); //======================================= // Argument was in registers @@ -216,12 +210,12 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( builder.setInsertionPointToEnd(inRegBlock); auto regTopP = builder.createGetMemberOp( loc, valist, IsFPR ? "vr_top" : "gr_top", regTopIndex); - auto regTop = builder.create(loc, regTopP); + auto regTop = builder.create(loc, regTopP); auto i8Ty = mlir::IntegerType::get(builder.getContext(), 8); - auto i8PtrTy = mlir::cir::PointerType::get(builder.getContext(), i8Ty); + auto i8PtrTy = cir::PointerType::get(builder.getContext(), i8Ty); auto castRegTop = builder.createBitcast(regTop, i8PtrTy); - auto resAsInt8P = builder.create( - loc, castRegTop.getType(), castRegTop, regOffs); + auto resAsInt8P = builder.create(loc, castRegTop.getType(), + castRegTop, regOffs); if (isIndirect) { cir_cconv_assert(!cir::MissingFeatures::handleAArch64Indirect()); @@ -256,12 +250,12 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( if (isBigEndian && !isIndirect && (isHFA || isAggregateTypeForABI) && tySize < slotSize) { clang::CharUnits offset = slotSize - tySize; - auto offsetConst = builder.create( + auto offsetConst = builder.create( loc, regOffs.getType(), - mlir::cir::IntAttr::get(regOffs.getType(), offset.getQuantity())); + cir::IntAttr::get(regOffs.getType(), offset.getQuantity())); - resAsInt8P = builder.create( - loc, castRegTop.getType(), resAsInt8P, offsetConst); + resAsInt8P = builder.create(loc, castRegTop.getType(), + resAsInt8P, offsetConst); } } @@ -276,7 +270,7 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( cir_cconv_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); cir_cconv_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); - builder.create(loc, mlir::ValueRange{resAsVoidP}, contBlock); + builder.create(loc, mlir::ValueRange{resAsVoidP}, contBlock); //======================================= // Argument was on the stack @@ -284,9 +278,9 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( builder.setInsertionPointToEnd(onStackBlock); auto stackP = builder.createGetMemberOp(loc, valist, "stack", 0); - auto onStackPtr = builder.create(loc, stackP); + auto onStackPtr = builder.create(loc, stackP); auto ptrDiffTy = - mlir::cir::IntType::get(builder.getContext(), 64, /*signed=*/false); + cir::IntType::get(builder.getContext(), 64, /*signed=*/false); cir_cconv_assert(!cir::MissingFeatures::handleAArch64Indirect()); cir_cconv_assert(!cir::MissingFeatures::supportTyAlignQueryForAArch64()); @@ -314,51 +308,47 @@ mlir::Value LoweringPrepareAArch64CXXABI::lowerAAPCSVAArg( cir_cconv_assert(!cir::MissingFeatures::handleBigEndian()); cir_cconv_assert(!cir::MissingFeatures::supportTySizeQueryForAArch64()); - auto stackSizeC = builder.create( - loc, ptrDiffTy, - mlir::cir::IntAttr::get(ptrDiffTy, stackSize.getQuantity())); + auto stackSizeC = builder.create( + loc, ptrDiffTy, cir::IntAttr::get(ptrDiffTy, stackSize.getQuantity())); auto castStack = builder.createBitcast(onStackPtr, i8PtrTy); // Write the new value of __stack for the next call to va_arg - auto newStackAsi8Ptr = builder.create( + auto newStackAsi8Ptr = builder.create( loc, castStack.getType(), castStack, stackSizeC); auto newStack = builder.createBitcast(newStackAsi8Ptr, onStackPtr.getType()); builder.createStore(loc, newStack, stackP); if (isBigEndian && !isAggregateTypeForABI && tySize < stackSlotSize) { clang::CharUnits offset = stackSlotSize - tySize; - auto offsetConst = builder.create( - loc, ptrDiffTy, - mlir::cir::IntAttr::get(ptrDiffTy, offset.getQuantity())); - auto offsetStackAsi8Ptr = builder.create( + auto offsetConst = builder.create( + loc, ptrDiffTy, cir::IntAttr::get(ptrDiffTy, offset.getQuantity())); + auto offsetStackAsi8Ptr = builder.create( loc, castStack.getType(), castStack, offsetConst); auto onStackPtrBE = builder.createBitcast(offsetStackAsi8Ptr, onStackPtr.getType()); - builder.create(loc, mlir::ValueRange{onStackPtrBE}, - contBlock); + builder.create(loc, mlir::ValueRange{onStackPtrBE}, contBlock); } else { - builder.create(loc, mlir::ValueRange{onStackPtr}, - contBlock); + builder.create(loc, mlir::ValueRange{onStackPtr}, contBlock); } // generate additional instructions for end block builder.setInsertionPoint(op); contBlock->addArgument(onStackPtr.getType(), loc); auto resP = contBlock->getArgument(0); - cir_cconv_assert(mlir::isa(resP.getType())); - auto opResPTy = mlir::cir::PointerType::get(builder.getContext(), opResTy); + cir_cconv_assert(mlir::isa(resP.getType())); + auto opResPTy = cir::PointerType::get(builder.getContext(), opResTy); auto castResP = builder.createBitcast(resP, opResPTy); - auto res = builder.create(loc, castResP); + auto res = builder.create(loc, castResP); // there would be another level of ptr dereference if indirect arg passing cir_cconv_assert(!cir::MissingFeatures::handleAArch64Indirect()); if (isIndirect) { - res = builder.create(loc, res.getResult()); + res = builder.create(loc, res.getResult()); } return res.getResult(); } mlir::Value LoweringPrepareAArch64CXXABI::lowerVAArg(cir::CIRBaseBuilderTy &builder, - mlir::cir::VAArgOp op, + cir::VAArgOp op, const cir::CIRDataLayout &datalayout) { return Kind == AArch64ABIKind::Win64 ? lowerMSVAArg(builder, op, datalayout) : isDarwinPCS() ? lowerDarwinVAArg(builder, op, datalayout) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index 992786b7676d..f94553c58112 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -37,12 +37,12 @@ static void buildBadCastCall(CIRBaseBuilderTy &builder, mlir::Location loc, cir_cconv_assert(!MissingFeatures::setCallingConv()); builder.createCallOp(loc, badCastFuncRef, mlir::ValueRange{}); - builder.create(loc); + builder.create(loc); builder.clearInsertionPoint(); } static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, - mlir::cir::DynamicCastOp op) { + cir::DynamicCastOp op) { auto loc = op->getLoc(); auto srcValue = op.getSrc(); auto castInfo = op.getInfo().value(); @@ -66,7 +66,7 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, dynCastFuncArgs) .getResult(); - cir_cconv_assert(mlir::isa(castedPtr.getType()) && + cir_cconv_assert(mlir::isa(castedPtr.getType()) && "the return value of __dynamic_cast should be a ptr"); /// C++ [expr.dynamic.cast]p9: @@ -74,7 +74,7 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, if (op.isRefcast()) { // Emit a cir.if that checks the casted value. mlir::Value castedValueIsNull = builder.createPtrIsNull(castedPtr); - builder.create( + builder.create( loc, castedValueIsNull, false, [&](mlir::OpBuilder &, mlir::Location) { buildBadCastCall(builder, loc, castInfo.getBadCastFunc()); }); @@ -88,7 +88,7 @@ static mlir::Value buildDynamicCastAfterNullCheck(CIRBaseBuilderTy &builder, static mlir::Value buildDynamicCastToVoidAfterNullCheck(CIRBaseBuilderTy &builder, clang::ASTContext &astCtx, - mlir::cir::DynamicCastOp op) { + cir::DynamicCastOp op) { auto loc = op.getLoc(); bool vtableUsesRelativeLayout = op.getRelativeLayout(); @@ -106,8 +106,8 @@ buildDynamicCastToVoidAfterNullCheck(CIRBaseBuilderTy &builder, auto ptrdiffTyIsSigned = clang::TargetInfo::isTypeSigned(ptrdiffTy); auto ptrdiffTyWidth = targetInfo.getTypeWidth(ptrdiffTy); - vtableElemTy = mlir::cir::IntType::get(builder.getContext(), ptrdiffTyWidth, - ptrdiffTyIsSigned); + vtableElemTy = cir::IntType::get(builder.getContext(), ptrdiffTyWidth, + ptrdiffTyIsSigned); vtableElemAlign = llvm::divideCeil(targetInfo.getPointerAlign(clang::LangAS::Default), 8); } @@ -118,7 +118,7 @@ buildDynamicCastToVoidAfterNullCheck(CIRBaseBuilderTy &builder, auto vtablePtrPtr = builder.createBitcast(op.getSrc(), builder.getPointerTo(vtablePtrTy)); auto vtablePtr = builder.createLoad(loc, vtablePtrPtr); - auto offsetToTopSlotPtr = builder.create( + auto offsetToTopSlotPtr = builder.create( loc, vtablePtrTy, mlir::FlatSymbolRefAttr{}, vtablePtr, /*vtable_index=*/0, -2ULL); auto offsetToTop = @@ -128,8 +128,8 @@ buildDynamicCastToVoidAfterNullCheck(CIRBaseBuilderTy &builder, // Cast the input pointer to a uint8_t* to allow pointer arithmetic. auto u8PtrTy = builder.getPointerTo(builder.getUIntNTy(8)); auto srcBytePtr = builder.createBitcast(op.getSrc(), u8PtrTy); - auto dstBytePtr = builder.create( - loc, u8PtrTy, srcBytePtr, offsetToTop); + auto dstBytePtr = + builder.create(loc, u8PtrTy, srcBytePtr, offsetToTop); // Cast the result to a void*. return builder.createBitcast(dstBytePtr, builder.getVoidPtrTy()); } @@ -137,7 +137,7 @@ buildDynamicCastToVoidAfterNullCheck(CIRBaseBuilderTy &builder, mlir::Value LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, clang::ASTContext &astCtx, - mlir::cir::DynamicCastOp op) { + cir::DynamicCastOp op) { auto loc = op->getLoc(); auto srcValue = op.getSrc(); @@ -148,7 +148,7 @@ LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, auto srcValueIsNotNull = builder.createPtrToBoolCast(srcValue); return builder - .create( + .create( loc, srcValueIsNotNull, [&](mlir::OpBuilder &, mlir::Location) { mlir::Value castedValue = @@ -164,9 +164,10 @@ LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, .getResult(); } -mlir::Value LoweringPrepareItaniumCXXABI::lowerVAArg( - CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op, - const ::cir::CIRDataLayout &datalayout) { +mlir::Value +LoweringPrepareItaniumCXXABI::lowerVAArg(CIRBaseBuilderTy &builder, + cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) { // There is no generic cir lowering for var_arg, here we fail // so to prevent attempt of calling lowerVAArg for ItaniumCXXABI cir_cconv_unreachable("NYI"); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp index a0d48fb1f5a3..deffd8d27a05 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/SPIR.cpp @@ -15,10 +15,9 @@ #include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" -using ABIArgInfo = ::cir::ABIArgInfo; -using MissingFeature = ::cir::MissingFeatures; +using ABIArgInfo = cir::ABIArgInfo; +using MissingFeature = cir::MissingFeatures; -namespace mlir { namespace cir { //===----------------------------------------------------------------------===// @@ -33,7 +32,7 @@ class SPIRVABIInfo : public ABIInfo { private: void computeInfo(LowerFunctionInfo &FI) const override { - cir_cconv_assert_or_abort(!::cir::MissingFeatures::SPIRVABI(), "NYI"); + cir_cconv_assert_or_abort(!cir::MissingFeatures::SPIRVABI(), "NYI"); } }; @@ -43,8 +42,8 @@ class SPIRVTargetLoweringInfo : public TargetLoweringInfo { : TargetLoweringInfo(std::make_unique(LT)) {} unsigned getTargetAddrSpaceFromCIRAddrSpace( - mlir::cir::AddressSpaceAttr addressSpaceAttr) const override { - using Kind = mlir::cir::AddressSpaceAttr::Kind; + cir::AddressSpaceAttr addressSpaceAttr) const override { + using Kind = cir::AddressSpaceAttr::Kind; switch (addressSpaceAttr.getValue()) { case Kind::offload_private: return 0; @@ -70,4 +69,3 @@ createSPIRVTargetLoweringInfo(LowerModule &lowerModule) { } } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index b50702a5ee68..3d590b3d499b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -12,10 +12,9 @@ #include "llvm/Support/ErrorHandling.h" #include -using X86AVXABILevel = ::cir::X86AVXABILevel; -using ABIArgInfo = ::cir::ABIArgInfo; +using X86AVXABILevel = cir::X86AVXABILevel; +using ABIArgInfo = cir::ABIArgInfo; -namespace mlir { namespace cir { namespace { @@ -40,8 +39,8 @@ unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { /// one of the two halves in the INTEGER class. /// /// It is conservatively correct to return false. -static bool BitsContainNoUserData(Type Ty, unsigned StartBit, unsigned EndBit, - CIRLowerContext &Context) { +static bool BitsContainNoUserData(mlir::Type Ty, unsigned StartBit, + unsigned EndBit, CIRLowerContext &Context) { // If the bytes being queried are off the end of the type, there is no user // data hiding here. This handles analysis of builtins, vectors and other // types that don't contain interesting padding. @@ -57,8 +56,8 @@ static bool BitsContainNoUserData(Type Ty, unsigned StartBit, unsigned EndBit, const CIRRecordLayout &Layout = Context.getCIRRecordLayout(Ty); // If this is a C++ record, check the bases first. - if (::cir::MissingFeatures::isCXXRecordDecl() || - ::cir::MissingFeatures::getCXXRecordBases()) { + if (cir::MissingFeatures::isCXXRecordDecl() || + cir::MissingFeatures::getCXXRecordBases()) { cir_cconv_unreachable("NYI"); } @@ -90,12 +89,12 @@ static bool BitsContainNoUserData(Type Ty, unsigned StartBit, unsigned EndBit, } /// Return a floating point type at the specified offset. -Type getFPTypeAtOffset(Type IRType, unsigned IROffset, - const ::cir::CIRDataLayout &TD) { - if (IROffset == 0 && isa(IRType)) +mlir::Type getFPTypeAtOffset(mlir::Type IRType, unsigned IROffset, + const cir::CIRDataLayout &TD) { + if (IROffset == 0 && mlir::isa(IRType)) return IRType; - cir_cconv_assert_or_abort(!::cir::MissingFeatures::X86GetFPTypeAtOffset(), + cir_cconv_assert_or_abort(!cir::MissingFeatures::X86GetFPTypeAtOffset(), "NYI"); return IRType; // FIXME(cir): Temporary workaround for the assertion above. } @@ -103,7 +102,7 @@ Type getFPTypeAtOffset(Type IRType, unsigned IROffset, } // namespace class X86_64ABIInfo : public ABIInfo { - using Class = ::cir::X86ArgClass; + using Class = cir::X86ArgClass; /// Implement the X86_64 ABI merging algorithm. /// @@ -158,14 +157,16 @@ class X86_64ABIInfo : public ABIInfo { /// /// If the \arg Lo class is ComplexX87, then the \arg Hi class will /// also be ComplexX87. - void classify(Type T, uint64_t OffsetBase, Class &Lo, Class &Hi, + void classify(mlir::Type T, uint64_t OffsetBase, Class &Lo, Class &Hi, bool isNamedArg, bool IsRegCall = false) const; - Type GetSSETypeAtOffset(Type IRType, unsigned IROffset, Type SourceTy, - unsigned SourceOffset) const; + mlir::Type GetSSETypeAtOffset(mlir::Type IRType, unsigned IROffset, + mlir::Type SourceTy, + unsigned SourceOffset) const; - Type GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, Type SourceTy, - unsigned SourceOffset) const; + mlir::Type GetINTEGERTypeAtOffset(mlir::Type DestTy, unsigned IROffset, + mlir::Type SourceTy, + unsigned SourceOffset) const; /// The 0.98 ABI revision clarified a lot of ambiguities, /// unfortunately in ways that were not always consistent with @@ -182,9 +183,9 @@ class X86_64ABIInfo : public ABIInfo { X86_64ABIInfo(LowerTypes &CGT, X86AVXABILevel AVXLevel) : ABIInfo(CGT), AVXLevel(AVXLevel) {} - ::cir::ABIArgInfo classifyReturnType(Type RetTy) const; + cir::ABIArgInfo classifyReturnType(mlir::Type RetTy) const; - ABIArgInfo classifyArgumentType(Type Ty, unsigned freeIntRegs, + ABIArgInfo classifyArgumentType(mlir::Type Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, bool isNamedArg, bool IsRegCall) const; @@ -195,12 +196,12 @@ class X86_64TargetLoweringInfo : public TargetLoweringInfo { public: X86_64TargetLoweringInfo(LowerTypes &LM, X86AVXABILevel AVXLevel) : TargetLoweringInfo(std::make_unique(LM, AVXLevel)) { - cir_cconv_assert(!::cir::MissingFeatures::swift()); + cir_cconv_assert(!cir::MissingFeatures::swift()); } unsigned getTargetAddrSpaceFromCIRAddrSpace( - mlir::cir::AddressSpaceAttr addressSpaceAttr) const override { - using Kind = mlir::cir::AddressSpaceAttr::Kind; + cir::AddressSpaceAttr addressSpaceAttr) const override { + using Kind = cir::AddressSpaceAttr::Kind; switch (addressSpaceAttr.getValue()) { case Kind::offload_private: case Kind::offload_local: @@ -214,8 +215,8 @@ class X86_64TargetLoweringInfo : public TargetLoweringInfo { } }; -void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, - bool isNamedArg, bool IsRegCall) const { +void X86_64ABIInfo::classify(mlir::Type Ty, uint64_t OffsetBase, Class &Lo, + Class &Hi, bool isNamedArg, bool IsRegCall) const { // FIXME: This code can be simplified by introducing a simple value class // for Class pairs with appropriate constructor methods for the various // situations. @@ -232,9 +233,9 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, // FIXME(cir): There's currently no direct way to identify if a type is a // builtin. if (/*isBuitinType=*/true) { - if (isa(Ty)) { + if (mlir::isa(Ty)) { Current = Class::NoClass; - } else if (auto IntTy = dyn_cast(Ty)) { + } else if (auto IntTy = mlir::dyn_cast(Ty)) { if (IntTy.getWidth() == 128) { Lo = Class::Integer; Hi = Class::Integer; @@ -245,18 +246,18 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, // We should implement this in CIR to simplify the conditions below. // Hence, Comparisons below might not be truly equivalent to the ones in // Clang. - if (isa(Ty)) { + if (mlir::isa(Ty)) { Current = Class::Integer; } return; - } else if (isa(Ty) || isa(Ty)) { + } else if (mlir::isa(Ty) || mlir::isa(Ty)) { Current = Class::SSE; return; - } else if (isa(Ty)) { + } else if (mlir::isa(Ty)) { Current = Class::Integer; - } else if (const auto RT = dyn_cast(Ty)) { + } else if (const auto RT = mlir::dyn_cast(Ty)) { uint64_t Size = getContext().getTypeSize(Ty); // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger @@ -271,7 +272,7 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, cir_cconv_unreachable("NYI"); // Assume variable sized types are passed in memory. - if (::cir::MissingFeatures::recordDeclHasFlexibleArrayMember()) + if (cir::MissingFeatures::recordDeclHasFlexibleArrayMember()) cir_cconv_unreachable("NYI"); const auto &Layout = getContext().getCIRRecordLayout(Ty); @@ -280,8 +281,8 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, Current = Class::NoClass; // If this is a C++ record, classify the bases first. - cir_cconv_assert(!::cir::MissingFeatures::isCXXRecordDecl() && - !::cir::MissingFeatures::getCXXRecordBases()); + cir_cconv_assert(!cir::MissingFeatures::isCXXRecordDecl() && + !cir::MissingFeatures::getCXXRecordBases()); // Classify the fields one at a time, merging the results. bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <= @@ -290,14 +291,14 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, bool IsUnion = RT.isUnion() && !UseClang11Compat; // FIXME(cir): An interface to handle field declaration might be needed. - cir_cconv_assert(!::cir::MissingFeatures::fieldDeclAbstraction()); + cir_cconv_assert(!cir::MissingFeatures::fieldDeclAbstraction()); for (auto [idx, FT] : llvm::enumerate(RT.getMembers())) { uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); - cir_cconv_assert(!::cir::MissingFeatures::fieldDeclIsBitfield()); + cir_cconv_assert(!cir::MissingFeatures::fieldDeclIsBitfield()); bool BitField = false; // Ignore padding bit-fields. - if (BitField && !::cir::MissingFeatures::fieldDeclisUnnamedBitField()) + if (BitField && !cir::MissingFeatures::fieldDeclisUnnamedBitField()) cir_cconv_unreachable("NYI"); // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than @@ -344,8 +345,8 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, postMerge(Size, Lo, Hi); } else { llvm::outs() << "Missing X86 classification for type " << Ty << "\n"; - cir_cconv_assert_or_abort( - !::cir::MissingFeatures::X86TypeClassification(), "NYI"); + cir_cconv_assert_or_abort(!cir::MissingFeatures::X86TypeClassification(), + "NYI"); } // FIXME: _Decimal32 and _Decimal64 are SSE. // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). @@ -358,24 +359,25 @@ void X86_64ABIInfo::classify(Type Ty, uint64_t OffsetBase, Class &Lo, Class &Hi, /// Return a type that will be passed by the backend in the low 8 bytes of an /// XMM register, corresponding to the SSE class. -Type X86_64ABIInfo::GetSSETypeAtOffset(Type IRType, unsigned IROffset, - Type SourceTy, - unsigned SourceOffset) const { - const ::cir::CIRDataLayout &TD = getDataLayout(); +mlir::Type X86_64ABIInfo::GetSSETypeAtOffset(mlir::Type IRType, + unsigned IROffset, + mlir::Type SourceTy, + unsigned SourceOffset) const { + const cir::CIRDataLayout &TD = getDataLayout(); unsigned SourceSize = (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset; - Type T0 = getFPTypeAtOffset(IRType, IROffset, TD); - if (!T0 || isa(T0)) + mlir::Type T0 = getFPTypeAtOffset(IRType, IROffset, TD); + if (!T0 || mlir::isa(T0)) return T0; // NOTE(cir): Not sure if this is correct. - Type T1 = {}; + mlir::Type T1 = {}; unsigned T0Size = TD.getTypeAllocSize(T0); if (SourceSize > T0Size) cir_cconv_unreachable("NYI"); if (T1 == nullptr) { // Check if IRType is a half/bfloat + float. float type will be in // IROffset+4 due to its alignment. - if (isa(T0) && SourceSize > 4) + if (mlir::isa(T0) && SourceSize > 4) cir_cconv_unreachable("NYI"); // If we can't get a second FP type, return a simple half or float. // avx512fp16-abi.c:pr51813_2 shows it works to return float for @@ -401,14 +403,15 @@ Type X86_64ABIInfo::GetSSETypeAtOffset(Type IRType, unsigned IROffset, /// is an offset into this that we're processing (which is always either 0 or /// 8). /// -Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, - Type SourceTy, - unsigned SourceOffset) const { +mlir::Type X86_64ABIInfo::GetINTEGERTypeAtOffset(mlir::Type DestTy, + unsigned IROffset, + mlir::Type SourceTy, + unsigned SourceOffset) const { // If we're dealing with an un-offset CIR type, then it means that we're // returning an 8-byte unit starting with it. See if we can safely use it. if (IROffset == 0) { // Pointers and int64's always fill the 8-byte unit. - cir_cconv_assert(!isa(DestTy) && "Ptrs are NYI"); + cir_cconv_assert(!mlir::isa(DestTy) && "Ptrs are NYI"); // If we have a 1/2/4-byte integer, we can use it only if the rest of the // goodness in the source type is just tail padding. This is allowed to @@ -416,7 +419,7 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, // struct{double,int,int} because we wouldn't return the second int. We // have to do this analysis on the source type because we can't depend on // unions being lowered a specific way etc. - if (auto intTy = dyn_cast(DestTy)) { + if (auto intTy = mlir::dyn_cast(DestTy)) { if (intTy.getWidth() == 8 || intTy.getWidth() == 16 || intTy.getWidth() == 32) { unsigned BitWidth = intTy.getWidth(); @@ -427,9 +430,9 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, } } - if (auto RT = dyn_cast(DestTy)) { + if (auto RT = mlir::dyn_cast(DestTy)) { // If this is a struct, recurse into the field at the specified offset. - const ::cir::StructLayout *SL = getDataLayout().getStructLayout(RT); + const cir::StructLayout *SL = getDataLayout().getStructLayout(RT); if (IROffset < SL->getSizeInBytes()) { unsigned FieldIdx = SL->getElementContainingOffset(IROffset); IROffset -= SL->getElementOffset(FieldIdx); @@ -455,7 +458,7 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, // this might not make a difference in practice. For now, we just preserve the // sign as is to avoid unecessary bitcasts. bool isSigned = false; - if (auto intTy = dyn_cast(SourceTy)) + if (auto intTy = mlir::dyn_cast(SourceTy)) isSigned = intTy.isSigned(); return IntType::get(LT.getMLIRContext(), std::min(TySizeInBytes - SourceOffset, 8U) * 8, isSigned); @@ -467,7 +470,7 @@ Type X86_64ABIInfo::GetINTEGERTypeAtOffset(Type DestTy, unsigned IROffset, /// a by-value argument should be passed as i32* and the high part as float, /// return {i32*, float}. static mlir::Type GetX86_64ByValArgumentPair(mlir::Type lo, mlir::Type hi, - const ::cir::CIRDataLayout &td) { + const cir::CIRDataLayout &td) { // In order to correctly satisfy the ABI, we need to the high part to start // at offset 8. If the high and low parts we inferred are both 4-byte types // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have @@ -487,10 +490,11 @@ static mlir::Type GetX86_64ByValArgumentPair(mlir::Type lo, mlir::Type hi, // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and // NaCl). // Promote these to a larger type. - if (isa(lo)) + if (mlir::isa(lo)) lo = DoubleType::get(lo.getContext()); else { - assert((isa(lo)) && "Invalid/unknown lo type"); + assert((mlir::isa(lo)) && + "Invalid/unknown lo type"); // TODO(cir): does the sign of the int64 type matter here? lo = IntType::get(lo.getContext(), 64, true); } @@ -505,7 +509,7 @@ static mlir::Type GetX86_64ByValArgumentPair(mlir::Type lo, mlir::Type hi, return result; } -::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { +cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(mlir::Type RetTy) const { // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the // classification algorithm. X86_64ABIInfo::Class Lo, Hi; @@ -517,7 +521,7 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { cir_cconv_assert((Hi != Class::SSEUp || Lo == Class::SSE) && "Invalid SSEUp classification."); - Type resType = {}; + mlir::Type resType = {}; switch (Lo) { case Class::NoClass: if (Hi == Class::NoClass) @@ -529,12 +533,13 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { // If we have a sign or zero extended integer, make sure to return Extend // so that the parameter gets the right LLVM IR attributes. - if (Hi == Class::NoClass && isa(resType)) { + if (Hi == Class::NoClass && mlir::isa(resType)) { // NOTE(cir): We skip enum types handling here since CIR represents // enums directly as their unerlying integer types. NOTE(cir): For some // reason, Clang does not set the coerce type here and delays it to // arrangeLLVMFunctionInfo. We do the same to keep parity. - if (isa(RetTy) && isPromotableIntegerTypeForABI(RetTy)) + if (mlir::isa(RetTy) && + isPromotableIntegerTypeForABI(RetTy)) return ABIArgInfo::getExtend(RetTy); } break; @@ -546,11 +551,11 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { break; default: - cir_cconv_assert_or_abort( - !::cir::MissingFeatures::X86RetTypeClassification(), "NYI"); + cir_cconv_assert_or_abort(!cir::MissingFeatures::X86RetTypeClassification(), + "NYI"); } - Type HighPart = {}; + mlir::Type HighPart = {}; switch (Hi) { case Class::NoClass: @@ -575,11 +580,9 @@ ::cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(Type RetTy) const { return ABIArgInfo::getDirect(resType); } -ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, - unsigned &neededInt, - unsigned &neededSSE, - bool isNamedArg, - bool IsRegCall = false) const { +ABIArgInfo X86_64ABIInfo::classifyArgumentType( + mlir::Type Ty, unsigned freeIntRegs, unsigned &neededInt, + unsigned &neededSSE, bool isNamedArg, bool IsRegCall = false) const { Ty = useFirstFieldIfTransparentUnion(Ty); X86_64ABIInfo::Class Lo, Hi; @@ -594,7 +597,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, neededInt = 0; neededSSE = 0; - Type ResType = {}; + mlir::Type ResType = {}; switch (Lo) { // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 @@ -607,12 +610,12 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, // If we have a sign or zero extended integer, make sure to return Extend // so that the parameter gets the right LLVM IR attributes. - if (Hi == Class::NoClass && isa(ResType)) { + if (Hi == Class::NoClass && mlir::isa(ResType)) { // NOTE(cir): We skip enum types handling here since CIR represents // enums directly as their unerlying integer types. NOTE(cir): For some // reason, Clang does not set the coerce type here and delays it to // arrangeLLVMFunctionInfo. We do the same to keep parity. - if (isa(Ty) && isPromotableIntegerTypeForABI(Ty)) + if (mlir::isa(Ty) && isPromotableIntegerTypeForABI(Ty)) return ABIArgInfo::getExtend(Ty); } @@ -627,11 +630,11 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(Type Ty, unsigned freeIntRegs, break; } default: - cir_cconv_assert_or_abort( - !::cir::MissingFeatures::X86ArgTypeClassification(), "NYI"); + cir_cconv_assert_or_abort(!cir::MissingFeatures::X86ArgTypeClassification(), + "NYI"); } - Type HighPart = {}; + mlir::Type HighPart = {}; switch (Hi) { case Class::NoClass: break; @@ -674,8 +677,8 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { unsigned FreeSSERegs = IsRegCall ? 16 : 8; unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0; - if (!::mlir::cir::classifyReturnType(getCXXABI(), FI, *this)) { - if (IsRegCall || ::cir::MissingFeatures::regCall()) { + if (!cir::classifyReturnType(getCXXABI(), FI, *this)) { + if (IsRegCall || cir::MissingFeatures::regCall()) { cir_cconv_unreachable("RegCall is NYI"); } else FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); @@ -689,7 +692,7 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { cir_cconv_unreachable("NYI"); // The chain argument effectively gives us another free register. - if (::cir::MissingFeatures::chainCall()) + if (cir::MissingFeatures::chainCall()) cir_cconv_unreachable("NYI"); unsigned NumRequiredArgs = FI.getNumRequiredArgs(); @@ -700,7 +703,7 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { it != ie; ++it, ++ArgNo) { bool IsNamedArg = ArgNo < NumRequiredArgs; - if (IsRegCall && ::cir::MissingFeatures::regCall()) + if (IsRegCall && cir::MissingFeatures::regCall()) cir_cconv_unreachable("NYI"); else it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, @@ -713,7 +716,7 @@ void X86_64ABIInfo::computeInfo(LowerFunctionInfo &FI) const { if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { FreeIntRegs -= NeededInt; FreeSSERegs -= NeededSSE; - if (::cir::MissingFeatures::vectorType()) + if (cir::MissingFeatures::vectorType()) cir_cconv_unreachable("NYI"); } else { cir_cconv_unreachable("Indirect results are NYI"); @@ -801,4 +804,3 @@ createX86_64TargetLoweringInfo(LowerModule &LM, X86AVXABILevel AVXLevel) { } } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index d97a415e3b12..8e321fde177b 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -428,7 +428,7 @@ void CIRGenAction::ExecuteAction() { if (!mainFile) return; - mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); mlirContext->getOrLoadDialect(); mlirContext->getOrLoadDialect(); diff --git a/clang/lib/CIR/Interfaces/ASTAttrInterfaces.cpp b/clang/lib/CIR/Interfaces/ASTAttrInterfaces.cpp index a3f525dd65a3..d23e902bd827 100644 --- a/clang/lib/CIR/Interfaces/ASTAttrInterfaces.cpp +++ b/clang/lib/CIR/Interfaces/ASTAttrInterfaces.cpp @@ -9,7 +9,7 @@ #include "llvm/ADT/SmallVector.h" -using namespace mlir::cir; +using namespace cir; /// Include the generated type qualifiers interfaces. #include "clang/CIR/Interfaces/ASTAttrInterfaces.cpp.inc" diff --git a/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp b/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp index 6062a39be7fa..5bf692628396 100644 --- a/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp +++ b/clang/lib/CIR/Interfaces/CIRFPTypeInterface.cpp @@ -8,7 +8,7 @@ #include "clang/CIR/Interfaces/CIRFPTypeInterface.h" -using namespace mlir::cir; +using namespace cir; /// Include the generated interfaces. #include "clang/CIR/Interfaces/CIRFPTypeInterface.cpp.inc" diff --git a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp index 8b1708fa815c..085e1f6c245d 100644 --- a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp +++ b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp @@ -12,12 +12,11 @@ #include "clang/CIR/Interfaces/CIRLoopOpInterface.cpp.inc" #include "llvm/Support/ErrorHandling.h" -namespace mlir { namespace cir { void LoopOpInterface::getLoopOpSuccessorRegions( - LoopOpInterface op, RegionBranchPoint point, - SmallVectorImpl ®ions) { + LoopOpInterface op, mlir::RegionBranchPoint point, + llvm::SmallVectorImpl ®ions) { assert(point.isParent() || point.getRegionOrNull()); // Branching to first region: go to condition or body (do-while). @@ -26,7 +25,7 @@ void LoopOpInterface::getLoopOpSuccessorRegions( } // Branching from condition: go to body or exit. else if (&op.getCond() == point.getRegionOrNull()) { - regions.emplace_back(RegionSuccessor(op->getResults())); + regions.emplace_back(mlir::RegionSuccessor(op->getResults())); regions.emplace_back(&op.getBody(), op.getBody().getArguments()); } // Branching from body: go to step (for) or condition. @@ -44,14 +43,13 @@ void LoopOpInterface::getLoopOpSuccessorRegions( } /// Verify invariants of the LoopOpInterface. -LogicalResult detail::verifyLoopOpInterface(Operation *op) { +llvm::LogicalResult detail::verifyLoopOpInterface(mlir::Operation *op) { // FIXME: fix this so the conditionop isn't requiring MLIRCIR - // auto loopOp = cast(op); - // if (!isa(loopOp.getCond().back().getTerminator())) + // auto loopOp = mlir::cast(op); + // if (!mlir::isa(loopOp.getCond().back().getTerminator())) // return op->emitOpError( // "expected condition region to terminate with 'cir.condition'"); - return success(); + return llvm::success(); } } // namespace cir -} // namespace mlir diff --git a/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp b/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp index 93ab428d5f13..879a2d2e5f59 100644 --- a/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp +++ b/clang/lib/CIR/Interfaces/CIROpInterfaces.cpp @@ -10,7 +10,7 @@ #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "llvm/ADT/SmallVector.h" -using namespace mlir::cir; +using namespace cir; /// Include the generated type qualifiers interfaces. #include "clang/CIR/Interfaces/CIROpInterfaces.cpp.inc" @@ -18,13 +18,13 @@ using namespace mlir::cir; #include "clang/CIR/MissingFeatures.h" bool CIRGlobalValueInterface::hasDefaultVisibility() { - assert(!::cir::MissingFeatures::hiddenVisibility()); - assert(!::cir::MissingFeatures::protectedVisibility()); + assert(!cir::MissingFeatures::hiddenVisibility()); + assert(!cir::MissingFeatures::protectedVisibility()); return isPublic() || isPrivate(); } bool CIRGlobalValueInterface::canBenefitFromLocalAlias() { - assert(!::cir::MissingFeatures::supportIFuncAttr()); + assert(!cir::MissingFeatures::supportIFuncAttr()); // hasComdat here should be isDeduplicateComdat, but as far as clang codegen // is concerned, there is no case for Comdat::NoDeduplicate as all comdat // would be Comdat::Any or Comdat::Largest (in the case of MS ABI). And CIRGen diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 65912b81fd9d..82cdb0e21666 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -98,9 +98,9 @@ void walkRegionSkipping(mlir::Region ®ion, } /// Convert from a CIR comparison kind to an LLVM IR integral comparison kind. -mlir::LLVM::ICmpPredicate -convertCmpKindToICmpPredicate(mlir::cir::CmpOpKind kind, bool isSigned) { - using CIR = mlir::cir::CmpOpKind; +mlir::LLVM::ICmpPredicate convertCmpKindToICmpPredicate(cir::CmpOpKind kind, + bool isSigned) { + using CIR = cir::CmpOpKind; using LLVMICmp = mlir::LLVM::ICmpPredicate; switch (kind) { case CIR::eq: @@ -121,9 +121,8 @@ convertCmpKindToICmpPredicate(mlir::cir::CmpOpKind kind, bool isSigned) { /// Convert from a CIR comparison kind to an LLVM IR floating-point comparison /// kind. -mlir::LLVM::FCmpPredicate -convertCmpKindToFCmpPredicate(mlir::cir::CmpOpKind kind) { - using CIR = mlir::cir::CmpOpKind; +mlir::LLVM::FCmpPredicate convertCmpKindToFCmpPredicate(cir::CmpOpKind kind) { + using CIR = cir::CmpOpKind; using LLVMFCmp = mlir::LLVM::FCmpPredicate; switch (kind) { case CIR::eq: @@ -145,20 +144,20 @@ convertCmpKindToFCmpPredicate(mlir::cir::CmpOpKind kind) { /// If the given type is a vector type, return the vector's element type. /// Otherwise return the given type unchanged. mlir::Type elementTypeIfVector(mlir::Type type) { - if (auto VecType = mlir::dyn_cast(type)) { + if (auto VecType = mlir::dyn_cast(type)) { return VecType.getEltType(); } return type; } mlir::LLVM::Visibility -lowerCIRVisibilityToLLVMVisibility(mlir::cir::VisibilityKind visibilityKind) { +lowerCIRVisibilityToLLVMVisibility(cir::VisibilityKind visibilityKind) { switch (visibilityKind) { - case mlir::cir::VisibilityKind::Default: + case cir::VisibilityKind::Default: return ::mlir::LLVM::Visibility::Default; - case mlir::cir::VisibilityKind::Hidden: + case cir::VisibilityKind::Hidden: return ::mlir::LLVM::Visibility::Hidden; - case mlir::cir::VisibilityKind::Protected: + case cir::VisibilityKind::Protected: return ::mlir::LLVM::Visibility::Protected; } } @@ -288,7 +287,7 @@ mlir::LLVM::GlobalOp getOrCreateAnnotationArgsVar( /// arguments. void lowerAnnotationValue( mlir::Location &localLoc, mlir::Location annotLoc, - mlir::cir::AnnotationAttr annotation, mlir::ModuleOp &module, + cir::AnnotationAttr annotation, mlir::ModuleOp &module, mlir::OpBuilder &varInitBuilder, mlir::OpBuilder &globalVarBuilder, llvm::StringMap &stringGlobalsMap, llvm::StringMap &argStringGlobalsMap, @@ -362,7 +361,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, /// IntAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::IntAttr intAttr, +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::IntAttr intAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto loc = parentOp->getLoc(); @@ -372,7 +371,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::IntAttr intAttr, /// BoolAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::BoolAttr boolAttr, +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::BoolAttr boolAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto loc = parentOp->getLoc(); @@ -382,7 +381,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::BoolAttr boolAttr, /// ConstPtrAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ConstPtrAttr ptrAttr, +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstPtrAttr ptrAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto loc = parentOp->getLoc(); @@ -400,7 +399,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ConstPtrAttr ptrAttr, /// FPAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::FPAttr fltAttr, +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::FPAttr fltAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto loc = parentOp->getLoc(); @@ -410,7 +409,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::FPAttr fltAttr, /// ZeroAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ZeroAttr zeroAttr, +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ZeroAttr zeroAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto loc = parentOp->getLoc(); @@ -420,7 +419,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::ZeroAttr zeroAttr, /// UndefAttr visitor. inline mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::UndefAttr undefAttr, +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::UndefAttr undefAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto loc = parentOp->getLoc(); @@ -430,7 +429,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::cir::UndefAttr undefAttr, /// ConstStruct visitor. mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - mlir::cir::ConstStructAttr constStruct, + cir::ConstStructAttr constStruct, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(constStruct.getType()); @@ -439,10 +438,9 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, // Iteratively lower each constant element of the struct. for (auto [idx, elt] : llvm::enumerate(constStruct.getMembers())) { - if (auto constStructType = - dyn_cast(constStruct.getType()); + if (auto constStructType = dyn_cast(constStruct.getType()); constStructType && constStructType.isUnion()) { - if (isa(elt)) + if (isa(elt)) continue; idx = 0; @@ -457,7 +455,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, // VTableAttr visitor. mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - mlir::cir::VTableAttr vtableArr, + cir::VTableAttr vtableArr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(vtableArr.getType()); @@ -474,7 +472,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, // TypeInfoAttr visitor. mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - mlir::cir::TypeInfoAttr typeinfoArr, + cir::TypeInfoAttr typeinfoArr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(typeinfoArr.getType()); @@ -491,7 +489,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, // ConstArrayAttr visitor mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - mlir::cir::ConstArrayAttr constArr, + cir::ConstArrayAttr constArr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(constArr.getType()); @@ -519,7 +517,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, // would be a global constant that is memcopied. else if (auto strAttr = mlir::dyn_cast(constArr.getElts())) { - auto arrayTy = mlir::dyn_cast(strAttr.getType()); + auto arrayTy = mlir::dyn_cast(strAttr.getType()); assert(arrayTy && "String attribute must have an array type"); auto eltTy = arrayTy.getEltType(); for (auto [idx, elt] : llvm::enumerate(strAttr)) { @@ -537,7 +535,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, // ConstVectorAttr visitor. mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - mlir::cir::ConstVectorAttr constVec, + cir::ConstVectorAttr constVec, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(constVec.getType()); @@ -545,11 +543,10 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, SmallVector mlirValues; for (auto elementAttr : constVec.getElts()) { mlir::Attribute mlirAttr; - if (auto intAttr = mlir::dyn_cast(elementAttr)) { + if (auto intAttr = mlir::dyn_cast(elementAttr)) { mlirAttr = rewriter.getIntegerAttr( converter->convertType(intAttr.getType()), intAttr.getValue()); - } else if (auto floatAttr = - mlir::dyn_cast(elementAttr)) { + } else if (auto floatAttr = mlir::dyn_cast(elementAttr)) { mlirAttr = rewriter.getFloatAttr( converter->convertType(floatAttr.getType()), floatAttr.getValue()); } else { @@ -566,7 +563,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, // GlobalViewAttr visitor. mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - mlir::cir::GlobalViewAttr globalAttr, + cir::GlobalViewAttr globalAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { auto module = parentOp->getParentOfType(); @@ -577,13 +574,13 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, if (auto llvmSymbol = dyn_cast(sourceSymbol)) { sourceType = llvmSymbol.getType(); symName = llvmSymbol.getSymName(); - } else if (auto cirSymbol = dyn_cast(sourceSymbol)) { + } else if (auto cirSymbol = dyn_cast(sourceSymbol)) { sourceType = converter->convertType(cirSymbol.getSymType()); symName = cirSymbol.getSymName(); } else if (auto llvmFun = dyn_cast(sourceSymbol)) { sourceType = llvmFun.getFunctionType(); symName = llvmFun.getSymName(); - } else if (auto fun = dyn_cast(sourceSymbol)) { + } else if (auto fun = dyn_cast(sourceSymbol)) { sourceType = converter->convertType(fun.getFunctionType()); symName = fun.getSymName(); } else { @@ -615,7 +612,7 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, indices, true); } - auto ptrTy = mlir::dyn_cast(globalAttr.getType()); + auto ptrTy = mlir::dyn_cast(globalAttr.getType()); assert(ptrTy && "Expecting pointer type in GlobalViewAttr"); auto llvmEltTy = converter->convertType(ptrTy.getPointee()); @@ -632,29 +629,29 @@ inline mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { - if (const auto intAttr = mlir::dyn_cast(attr)) + if (const auto intAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, intAttr, rewriter, converter); - if (const auto fltAttr = mlir::dyn_cast(attr)) + if (const auto fltAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, fltAttr, rewriter, converter); - if (const auto ptrAttr = mlir::dyn_cast(attr)) + if (const auto ptrAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, ptrAttr, rewriter, converter); - if (const auto constStruct = mlir::dyn_cast(attr)) + if (const auto constStruct = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, constStruct, rewriter, converter); - if (const auto constArr = mlir::dyn_cast(attr)) + if (const auto constArr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, constArr, rewriter, converter); - if (const auto constVec = mlir::dyn_cast(attr)) + if (const auto constVec = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, constVec, rewriter, converter); - if (const auto boolAttr = mlir::dyn_cast(attr)) + if (const auto boolAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, boolAttr, rewriter, converter); - if (const auto zeroAttr = mlir::dyn_cast(attr)) + if (const auto zeroAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, zeroAttr, rewriter, converter); - if (const auto undefAttr = mlir::dyn_cast(attr)) + if (const auto undefAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, undefAttr, rewriter, converter); - if (const auto globalAttr = mlir::dyn_cast(attr)) + if (const auto globalAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, globalAttr, rewriter, converter); - if (const auto vtableAttr = mlir::dyn_cast(attr)) + if (const auto vtableAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, vtableAttr, rewriter, converter); - if (const auto typeinfoAttr = mlir::dyn_cast(attr)) + if (const auto typeinfoAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, typeinfoAttr, rewriter, converter); llvm_unreachable("unhandled attribute type"); @@ -662,8 +659,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, //===----------------------------------------------------------------------===// -mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { - using CIR = mlir::cir::GlobalLinkageKind; +mlir::LLVM::Linkage convertLinkage(cir::GlobalLinkageKind linkage) { + using CIR = cir::GlobalLinkageKind; using LLVM = mlir::LLVM::Linkage; switch (linkage) { @@ -690,8 +687,8 @@ mlir::LLVM::Linkage convertLinkage(mlir::cir::GlobalLinkageKind linkage) { }; } -mlir::LLVM::CConv convertCallingConv(mlir::cir::CallingConv callinvConv) { - using CIR = mlir::cir::CallingConv; +mlir::LLVM::CConv convertCallingConv(cir::CallingConv callinvConv) { + using CIR = cir::CallingConv; using LLVM = mlir::LLVM::CConv; switch (callinvConv) { @@ -705,12 +702,12 @@ mlir::LLVM::CConv convertCallingConv(mlir::cir::CallingConv callinvConv) { llvm_unreachable("Unknown calling convention"); } -class CIRCopyOpLowering : public mlir::OpConversionPattern { +class CIRCopyOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CopyOp op, OpAdaptor adaptor, + matchAndRewrite(cir::CopyOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { const mlir::Value length = rewriter.create( op.getLoc(), rewriter.getI32Type(), op.getLength()); @@ -720,13 +717,12 @@ class CIRCopyOpLowering : public mlir::OpConversionPattern { } }; -class CIRMemCpyOpLowering - : public mlir::OpConversionPattern { +class CIRMemCpyOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::MemCpyOp op, OpAdaptor adaptor, + matchAndRewrite(cir::MemCpyOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp( op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLen(), @@ -735,13 +731,12 @@ class CIRMemCpyOpLowering } }; -class CIRMemChrOpLowering - : public mlir::OpConversionPattern { +class CIRMemChrOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::MemChrOp op, OpAdaptor adaptor, + matchAndRewrite(cir::MemChrOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); llvm::SmallVector arguments; @@ -762,13 +757,12 @@ class CIRMemChrOpLowering } }; -class CIRMemMoveOpLowering - : public mlir::OpConversionPattern { +class CIRMemMoveOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::MemMoveOp op, OpAdaptor adaptor, + matchAndRewrite(cir::MemMoveOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp( op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLen(), @@ -777,12 +771,11 @@ class CIRMemMoveOpLowering } }; -class CIRMemsetOpLowering - : public mlir::OpConversionPattern { +class CIRMemsetOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::MemSetOp op, OpAdaptor adaptor, + matchAndRewrite(cir::MemSetOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto converted = rewriter.create( op.getLoc(), mlir::IntegerType::get(op.getContext(), 8), @@ -813,12 +806,12 @@ static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter, } class CIRPtrStrideOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::PtrStrideOp ptrStrideOp, OpAdaptor adaptor, + matchAndRewrite(cir::PtrStrideOp ptrStrideOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto *tc = getTypeConverter(); const auto resultTy = tc->convertType(ptrStrideOp.getType()); @@ -844,10 +837,10 @@ class CIRPtrStrideOpLowering // before it. To achieve that, look at unary minus, which already got // lowered to "sub 0, x". auto sub = dyn_cast(indexOp); - auto unary = dyn_cast_if_present( + auto unary = dyn_cast_if_present( ptrStrideOp.getStride().getDefiningOp()); bool rewriteSub = - unary && unary.getKind() == mlir::cir::UnaryOpKind::Minus && sub; + unary && unary.getKind() == cir::UnaryOpKind::Minus && sub; if (rewriteSub) index = indexOp->getOperand(1); @@ -876,13 +869,12 @@ class CIRPtrStrideOpLowering }; class CIRBaseClassAddrOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern< - mlir::cir::BaseClassAddrOp>::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BaseClassAddrOp baseClassOp, OpAdaptor adaptor, + matchAndRewrite(cir::BaseClassAddrOp baseClassOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { const auto resultType = getTypeConverter()->convertType(baseClassOp.getType()); @@ -915,14 +907,12 @@ class CIRBaseClassAddrOpLowering }; class CIRDerivedClassAddrOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern< - mlir::cir::DerivedClassAddrOp>::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::DerivedClassAddrOp derivedClassOp, - OpAdaptor adaptor, + matchAndRewrite(cir::DerivedClassAddrOp derivedClassOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { const auto resultType = getTypeConverter()->convertType(derivedClassOp.getType()); @@ -957,7 +947,7 @@ getValueForVTableSymbol(mlir::Operation *op, auto *symbol = mlir::SymbolTable::lookupSymbolIn(module, nameAttr); if (auto llvmSymbol = dyn_cast(symbol)) { eltType = llvmSymbol.getType(); - } else if (auto cirSymbol = dyn_cast(symbol)) { + } else if (auto cirSymbol = dyn_cast(symbol)) { eltType = converter->convertType(cirSymbol.getSymType()); } return rewriter.create( @@ -966,13 +956,12 @@ getValueForVTableSymbol(mlir::Operation *op, } class CIRVTTAddrPointOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern< - mlir::cir::VTTAddrPointOp>::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VTTAddrPointOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VTTAddrPointOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { const mlir::Type resultType = getTypeConverter()->convertType(op.getType()); llvm::SmallVector offsets; @@ -1002,13 +991,12 @@ class CIRVTTAddrPointOpLowering } }; -class CIRBrCondOpLowering - : public mlir::OpConversionPattern { +class CIRBrCondOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BrCondOp brOp, OpAdaptor adaptor, + matchAndRewrite(cir::BrCondOp brOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Value i1Condition; @@ -1040,16 +1028,16 @@ class CIRBrCondOpLowering } }; -class CIRCastOpLowering : public mlir::OpConversionPattern { +class CIRCastOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; inline mlir::Type convertTy(mlir::Type ty) const { return getTypeConverter()->convertType(ty); } mlir::LogicalResult - matchAndRewrite(mlir::cir::CastOp castOp, OpAdaptor adaptor, + matchAndRewrite(cir::CastOp castOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // For arithmetic conversions, LLVM IR uses the same instruction to convert // both individual scalars and entire vectors. This lowering pass handles @@ -1058,8 +1046,8 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto src = adaptor.getSrc(); switch (castOp.getKind()) { - case mlir::cir::CastKind::array_to_ptrdecay: { - const auto ptrTy = mlir::cast(castOp.getType()); + case cir::CastKind::array_to_ptrdecay: { + const auto ptrTy = mlir::cast(castOp.getType()); auto sourceValue = adaptor.getOperands().front(); auto targetType = convertTy(ptrTy); auto elementTy = convertTy(ptrTy.getPointee()); @@ -1068,31 +1056,31 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { castOp, targetType, elementTy, sourceValue, offset); break; } - case mlir::cir::CastKind::int_to_bool: { - auto zero = rewriter.create( + case cir::CastKind::int_to_bool: { + auto zero = rewriter.create( src.getLoc(), castOp.getSrc().getType(), - mlir::cir::IntAttr::get(castOp.getSrc().getType(), 0)); - rewriter.replaceOpWithNewOp( - castOp, mlir::cir::BoolType::get(getContext()), - mlir::cir::CmpOpKind::ne, castOp.getSrc(), zero); + cir::IntAttr::get(castOp.getSrc().getType(), 0)); + rewriter.replaceOpWithNewOp( + castOp, cir::BoolType::get(getContext()), cir::CmpOpKind::ne, + castOp.getSrc(), zero); break; } - case mlir::cir::CastKind::integral: { + case cir::CastKind::integral: { auto srcType = castOp.getSrc().getType(); auto dstType = castOp.getResult().getType(); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstType = getTypeConverter()->convertType(dstType); - mlir::cir::IntType srcIntType = - mlir::cast(elementTypeIfVector(srcType)); - mlir::cir::IntType dstIntType = - mlir::cast(elementTypeIfVector(dstType)); + cir::IntType srcIntType = + mlir::cast(elementTypeIfVector(srcType)); + cir::IntType dstIntType = + mlir::cast(elementTypeIfVector(dstType)); rewriter.replaceOp( castOp, getLLVMIntCast(rewriter, llvmSrcVal, llvmDstType, srcIntType.isUnsigned(), srcIntType.getWidth(), dstIntType.getWidth())); break; } - case mlir::cir::CastKind::floating: { + case cir::CastKind::floating: { auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(castOp.getResult().getType()); @@ -1100,13 +1088,13 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto srcTy = elementTypeIfVector(castOp.getSrc().getType()); auto dstTy = elementTypeIfVector(castOp.getResult().getType()); - if (!mlir::isa(dstTy) || - !mlir::isa(srcTy)) + if (!mlir::isa(dstTy) || + !mlir::isa(srcTy)) return castOp.emitError() << "NYI cast from " << srcTy << " to " << dstTy; auto getFloatWidth = [](mlir::Type ty) -> unsigned { - return mlir::cast(ty).getWidth(); + return mlir::cast(ty).getWidth(); }; if (getFloatWidth(srcTy) > getFloatWidth(dstTy)) @@ -1117,24 +1105,24 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } - case mlir::cir::CastKind::int_to_ptr: { - auto dstTy = mlir::cast(castOp.getType()); + case cir::CastKind::int_to_ptr: { + auto dstTy = mlir::cast(castOp.getType()); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); return mlir::success(); } - case mlir::cir::CastKind::ptr_to_int: { - auto dstTy = mlir::cast(castOp.getType()); + case cir::CastKind::ptr_to_int: { + auto dstTy = mlir::cast(castOp.getType()); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); return mlir::success(); } - case mlir::cir::CastKind::float_to_bool: { - auto dstTy = mlir::cast(castOp.getType()); + case cir::CastKind::float_to_bool: { + auto dstTy = mlir::cast(castOp.getType()); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); auto kind = mlir::LLVM::FCmpPredicate::une; @@ -1151,8 +1139,8 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { cmpResult); return mlir::success(); } - case mlir::cir::CastKind::bool_to_int: { - auto dstTy = mlir::cast(castOp.getType()); + case cir::CastKind::bool_to_int: { + auto dstTy = mlir::cast(castOp.getType()); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmSrcTy = mlir::cast(llvmSrcVal.getType()); auto llvmDstTy = @@ -1165,7 +1153,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } - case mlir::cir::CastKind::bool_to_float: { + case cir::CastKind::bool_to_float: { auto dstTy = castOp.getType(); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); @@ -1173,11 +1161,11 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } - case mlir::cir::CastKind::int_to_float: { + case cir::CastKind::int_to_float: { auto dstTy = castOp.getType(); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); - if (mlir::cast( + if (mlir::cast( elementTypeIfVector(castOp.getSrc().getType())) .isSigned()) rewriter.replaceOpWithNewOp(castOp, llvmDstTy, @@ -1187,11 +1175,11 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } - case mlir::cir::CastKind::float_to_int: { + case cir::CastKind::float_to_int: { auto dstTy = castOp.getType(); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); - if (mlir::cast( + if (mlir::cast( elementTypeIfVector(castOp.getResult().getType())) .isSigned()) rewriter.replaceOpWithNewOp(castOp, llvmDstTy, @@ -1201,7 +1189,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } - case mlir::cir::CastKind::bitcast: { + case cir::CastKind::bitcast: { auto dstTy = castOp.getType(); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); @@ -1209,19 +1197,19 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { llvmSrcVal); return mlir::success(); } - case mlir::cir::CastKind::ptr_to_bool: { + case cir::CastKind::ptr_to_bool: { auto zero = mlir::IntegerAttr::get(mlir::IntegerType::get(getContext(), 64), 0); - auto null = rewriter.create( + auto null = rewriter.create( src.getLoc(), castOp.getSrc().getType(), - mlir::cir::ConstPtrAttr::get(getContext(), castOp.getSrc().getType(), - zero)); - rewriter.replaceOpWithNewOp( - castOp, mlir::cir::BoolType::get(getContext()), - mlir::cir::CmpOpKind::ne, castOp.getSrc(), null); + cir::ConstPtrAttr::get(getContext(), castOp.getSrc().getType(), + zero)); + rewriter.replaceOpWithNewOp( + castOp, cir::BoolType::get(getContext()), cir::CmpOpKind::ne, + castOp.getSrc(), null); break; } - case mlir::cir::CastKind::address_space: { + case cir::CastKind::address_space: { auto dstTy = castOp.getType(); auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); @@ -1239,13 +1227,12 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } }; -class CIRReturnLowering - : public mlir::OpConversionPattern { +class CIRReturnLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ReturnOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ReturnOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getOperands()); @@ -1279,7 +1266,7 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, mlir::Block *landingPadBlock = nullptr) { llvm::SmallVector llvmResults; auto cirResults = op->getResultTypes(); - auto callIf = cast(op); + auto callIf = cast(op); if (converter->convertTypes(cirResults, llvmResults).failed()) return mlir::failure(); @@ -1298,9 +1285,9 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, assert(op->getOperands().size() && "operands list must no be empty for the indirect call"); auto typ = op->getOperands().front().getType(); - assert(isa(typ) && "expected pointer type"); - auto ptyp = dyn_cast(typ); - auto ftyp = dyn_cast(ptyp.getPointee()); + assert(isa(typ) && "expected pointer type"); + auto ptyp = dyn_cast(typ); + auto ftyp = dyn_cast(ptyp.getPointee()); assert(ftyp && "expected a pointer to a function as the first operand"); llvmFnTy = cast(converter->convertType(ftyp)); } @@ -1318,12 +1305,12 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, return mlir::success(); } -class CIRCallLowering : public mlir::OpConversionPattern { +class CIRCallLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CallOp op, OpAdaptor adaptor, + matchAndRewrite(cir::CallOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { return rewriteToCallOrInvoke(op.getOperation(), adaptor.getOperands(), rewriter, getTypeConverter(), @@ -1331,15 +1318,14 @@ class CIRCallLowering : public mlir::OpConversionPattern { } }; -class CIRTryCallLowering - : public mlir::OpConversionPattern { +class CIRTryCallLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::TryCallOp op, OpAdaptor adaptor, + matchAndRewrite(cir::TryCallOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - if (op.getCallingConv() != mlir::cir::CallingConv::C) { + if (op.getCallingConv() != cir::CallingConv::C) { return op.emitError( "non-C calling convention is not implemented for try_call"); } @@ -1362,12 +1348,12 @@ getLLVMLandingPadStructTy(mlir::ConversionPatternRewriter &rewriter) { } class CIREhInflightOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::EhInflightOp op, OpAdaptor adaptor, + matchAndRewrite(cir::EhInflightOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Location loc = op.getLoc(); auto llvmLandingPadStructTy = getLLVMLandingPadStructTy(rewriter); @@ -1444,8 +1430,7 @@ class CIREhInflightOpLowering } }; -class CIRAllocaLowering - : public mlir::OpConversionPattern { +class CIRAllocaLowering : public mlir::OpConversionPattern { mlir::DataLayout const &dataLayout; // Track globals created for annotation related strings llvm::StringMap &stringGlobalsMap; @@ -1464,7 +1449,7 @@ class CIRAllocaLowering llvm::StringMap &argStringGlobalsMap, llvm::MapVector &argsVarMap, mlir::MLIRContext *context) - : OpConversionPattern(typeConverter, context), + : OpConversionPattern(typeConverter, context), dataLayout(dataLayout), stringGlobalsMap(stringGlobalsMap), argStringGlobalsMap(argStringGlobalsMap), argsVarMap(argsVarMap) {} @@ -1486,7 +1471,7 @@ class CIRAllocaLowering for (mlir::Attribute entry : annotationValuesArray) { SmallVector intrinsicArgs; intrinsicArgs.push_back(op.getRes()); - auto annot = cast(entry); + auto annot = cast(entry); lowerAnnotationValue(loc, loc, annot, module, varInitBuilder, globalVarBuilder, stringGlobalsMap, argStringGlobalsMap, argsVarMap, intrinsicArgs); @@ -1497,7 +1482,7 @@ class CIRAllocaLowering } mlir::LogicalResult - matchAndRewrite(mlir::cir::AllocaOp op, OpAdaptor adaptor, + matchAndRewrite(cir::AllocaOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Value size = op.isDynamic() @@ -1540,31 +1525,31 @@ class CIRAllocaLowering }; static mlir::LLVM::AtomicOrdering -getLLVMMemOrder(std::optional &memorder) { +getLLVMMemOrder(std::optional &memorder) { if (!memorder) return mlir::LLVM::AtomicOrdering::not_atomic; switch (*memorder) { - case mlir::cir::MemOrder::Relaxed: + case cir::MemOrder::Relaxed: return mlir::LLVM::AtomicOrdering::monotonic; - case mlir::cir::MemOrder::Consume: - case mlir::cir::MemOrder::Acquire: + case cir::MemOrder::Consume: + case cir::MemOrder::Acquire: return mlir::LLVM::AtomicOrdering::acquire; - case mlir::cir::MemOrder::Release: + case cir::MemOrder::Release: return mlir::LLVM::AtomicOrdering::release; - case mlir::cir::MemOrder::AcquireRelease: + case cir::MemOrder::AcquireRelease: return mlir::LLVM::AtomicOrdering::acq_rel; - case mlir::cir::MemOrder::SequentiallyConsistent: + case cir::MemOrder::SequentiallyConsistent: return mlir::LLVM::AtomicOrdering::seq_cst; } llvm_unreachable("unknown memory order"); } -class CIRLoadLowering : public mlir::OpConversionPattern { +class CIRLoadLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::LoadOp op, OpAdaptor adaptor, + matchAndRewrite(cir::LoadOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { const auto llvmTy = getTypeConverter()->convertType(op.getResult().getType()); @@ -1588,12 +1573,12 @@ class CIRLoadLowering : public mlir::OpConversionPattern { } }; -class CIRStoreLowering : public mlir::OpConversionPattern { +class CIRStoreLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::StoreOp op, OpAdaptor adaptor, + matchAndRewrite(cir::StoreOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto memorder = op.getMemOrder(); auto ordering = getLLVMMemOrder(memorder); @@ -1618,17 +1603,17 @@ class CIRStoreLowering : public mlir::OpConversionPattern { } }; -bool hasTrailingZeros(mlir::cir::ConstArrayAttr attr) { +bool hasTrailingZeros(cir::ConstArrayAttr attr) { auto array = mlir::dyn_cast(attr.getElts()); return attr.hasTrailingZeros() || (array && std::count_if(array.begin(), array.end(), [](auto elt) { - auto ar = dyn_cast(elt); + auto ar = dyn_cast(elt); return ar && hasTrailingZeros(ar); })); } static mlir::Attribute -lowerDataMemberAttr(mlir::ModuleOp moduleOp, mlir::cir::DataMemberAttr attr, +lowerDataMemberAttr(mlir::ModuleOp moduleOp, cir::DataMemberAttr attr, const mlir::TypeConverter &typeConverter) { mlir::DataLayout layout{moduleOp}; @@ -1649,85 +1634,82 @@ lowerDataMemberAttr(mlir::ModuleOp moduleOp, mlir::cir::DataMemberAttr attr, return mlir::IntegerAttr::get(underlyingIntTy, memberOffset); } -class CIRConstantLowering - : public mlir::OpConversionPattern { +class CIRConstantLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ConstantOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ConstantOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Attribute attr = op.getValue(); - if (mlir::isa(op.getType())) { - int value = - (op.getValue() == - mlir::cir::BoolAttr::get( - getContext(), ::mlir::cir::BoolType::get(getContext()), true)); + if (mlir::isa(op.getType())) { + int value = (op.getValue() == + cir::BoolAttr::get(getContext(), + cir::BoolType::get(getContext()), true)); attr = rewriter.getIntegerAttr(typeConverter->convertType(op.getType()), value); - } else if (mlir::isa(op.getType())) { + } else if (mlir::isa(op.getType())) { attr = rewriter.getIntegerAttr( typeConverter->convertType(op.getType()), - mlir::cast(op.getValue()).getValue()); - } else if (mlir::isa(op.getType())) { + mlir::cast(op.getValue()).getValue()); + } else if (mlir::isa(op.getType())) { attr = rewriter.getFloatAttr( typeConverter->convertType(op.getType()), - mlir::cast(op.getValue()).getValue()); + mlir::cast(op.getValue()).getValue()); } else if (auto complexTy = - mlir::dyn_cast(op.getType())) { - auto complexAttr = mlir::cast(op.getValue()); + mlir::dyn_cast(op.getType())) { + auto complexAttr = mlir::cast(op.getValue()); auto complexElemTy = complexTy.getElementTy(); auto complexElemLLVMTy = typeConverter->convertType(complexElemTy); mlir::Attribute components[2]; - if (mlir::isa(complexElemTy)) { + if (mlir::isa(complexElemTy)) { components[0] = rewriter.getIntegerAttr( complexElemLLVMTy, - mlir::cast(complexAttr.getReal()).getValue()); + mlir::cast(complexAttr.getReal()).getValue()); components[1] = rewriter.getIntegerAttr( complexElemLLVMTy, - mlir::cast(complexAttr.getImag()).getValue()); + mlir::cast(complexAttr.getImag()).getValue()); } else { components[0] = rewriter.getFloatAttr( complexElemLLVMTy, - mlir::cast(complexAttr.getReal()).getValue()); + mlir::cast(complexAttr.getReal()).getValue()); components[1] = rewriter.getFloatAttr( complexElemLLVMTy, - mlir::cast(complexAttr.getImag()).getValue()); + mlir::cast(complexAttr.getImag()).getValue()); } attr = rewriter.getArrayAttr(components); - } else if (mlir::isa(op.getType())) { + } else if (mlir::isa(op.getType())) { // Optimize with dedicated LLVM op for null pointers. - if (mlir::isa(op.getValue())) { - if (mlir::cast(op.getValue()).isNullValue()) { + if (mlir::isa(op.getValue())) { + if (mlir::cast(op.getValue()).isNullValue()) { rewriter.replaceOpWithNewOp( op, typeConverter->convertType(op.getType())); return mlir::success(); } } // Lower GlobalViewAttr to llvm.mlir.addressof - if (auto gv = mlir::dyn_cast(op.getValue())) { + if (auto gv = mlir::dyn_cast(op.getValue())) { auto newOp = lowerCirAttrAsValue(op, gv, rewriter, getTypeConverter()); rewriter.replaceOp(op, newOp); return mlir::success(); } attr = op.getValue(); - } else if (mlir::isa(op.getType())) { - auto dataMember = mlir::cast(op.getValue()); + } else if (mlir::isa(op.getType())) { + auto dataMember = mlir::cast(op.getValue()); attr = lowerDataMemberAttr(op->getParentOfType(), dataMember, *typeConverter); } // TODO(cir): constant arrays are currently just pushed into the stack using // the store instruction, instead of being stored as global variables and // then memcopyied into the stack (as done in Clang). - else if (auto arrTy = mlir::dyn_cast(op.getType())) { + else if (auto arrTy = mlir::dyn_cast(op.getType())) { // Fetch operation constant array initializer. - auto constArr = mlir::dyn_cast(op.getValue()); - if (!constArr && - !isa(op.getValue())) + auto constArr = mlir::dyn_cast(op.getValue()); + if (!constArr && !isa(op.getValue())) return op.emitError() << "array does not have a constant initializer"; std::optional denseAttr; @@ -1747,7 +1729,7 @@ class CIRConstantLowering return mlir::success(); } } else if (const auto structAttr = - mlir::dyn_cast(op.getValue())) { + mlir::dyn_cast(op.getValue())) { // TODO(cir): this diverges from traditional lowering. Normally the // initializer would be a global constant that is memcopied. Here we just // define a local constant with llvm.undef that will be stored into the @@ -1757,10 +1739,9 @@ class CIRConstantLowering rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); return mlir::success(); - } else if (auto strTy = - mlir::dyn_cast(op.getType())) { + } else if (auto strTy = mlir::dyn_cast(op.getType())) { auto attr = op.getValue(); - if (mlir::isa(attr)) { + if (mlir::isa(attr)) { auto initVal = lowerCirAttrAsValue(op, attr, rewriter, typeConverter); rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); @@ -1770,7 +1751,7 @@ class CIRConstantLowering return op.emitError() << "unsupported lowering for struct constant type " << op.getType(); } else if (const auto vecTy = - mlir::dyn_cast(op.getType())) { + mlir::dyn_cast(op.getType())) { rewriter.replaceOp(op, lowerCirAttrAsValue(op, op.getValue(), rewriter, getTypeConverter())); return mlir::success(); @@ -1785,16 +1766,16 @@ class CIRConstantLowering }; class CIRVectorCreateLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VecCreateOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VecCreateOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // Start with an 'undef' value for the vector. Then 'insertelement' for // each of the vector elements. - auto vecTy = mlir::dyn_cast(op.getType()); + auto vecTy = mlir::dyn_cast(op.getType()); assert(vecTy && "result type of cir.vec.create op is not VectorType"); auto llvmTy = typeConverter->convertType(vecTy); auto loc = op.getLoc(); @@ -1812,28 +1793,27 @@ class CIRVectorCreateLowering } }; -class CIRVectorCmpOpLowering - : public mlir::OpConversionPattern { +class CIRVectorCmpOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VecCmpOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VecCmpOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - assert(mlir::isa(op.getType()) && - mlir::isa(op.getLhs().getType()) && - mlir::isa(op.getRhs().getType()) && + assert(mlir::isa(op.getType()) && + mlir::isa(op.getLhs().getType()) && + mlir::isa(op.getRhs().getType()) && "Vector compare with non-vector type"); // LLVM IR vector comparison returns a vector of i1. This one-bit vector // must be sign-extended to the correct result type. auto elementType = elementTypeIfVector(op.getLhs().getType()); mlir::Value bitResult; - if (auto intType = mlir::dyn_cast(elementType)) { + if (auto intType = mlir::dyn_cast(elementType)) { bitResult = rewriter.create( op.getLoc(), convertCmpKindToICmpPredicate(op.getKind(), intType.isSigned()), adaptor.getLhs(), adaptor.getRhs()); - } else if (mlir::isa(elementType)) { + } else if (mlir::isa(elementType)) { bitResult = rewriter.create( op.getLoc(), convertCmpKindToFCmpPredicate(op.getKind()), adaptor.getLhs(), adaptor.getRhs()); @@ -1847,19 +1827,19 @@ class CIRVectorCmpOpLowering }; class CIRVectorSplatLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VecSplatOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VecSplatOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // Vector splat can be implemented with an `insertelement` and a // `shufflevector`, which is better than an `insertelement` for each // element in the vector. Start with an undef vector. Insert the value into // the first element. Then use a `shufflevector` with a mask of all 0 to // fill out the entire vector with that value. - auto vecTy = mlir::dyn_cast(op.getType()); + auto vecTy = mlir::dyn_cast(op.getType()); assert(vecTy && "result type of cir.vec.splat op is not VectorType"); auto llvmTy = typeConverter->convertType(vecTy); auto loc = op.getLoc(); @@ -1878,17 +1858,17 @@ class CIRVectorSplatLowering }; class CIRVectorTernaryLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VecTernaryOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VecTernaryOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - assert(mlir::isa(op.getType()) && - mlir::isa(op.getCond().getType()) && - mlir::isa(op.getVec1().getType()) && - mlir::isa(op.getVec2().getType()) && + assert(mlir::isa(op.getType()) && + mlir::isa(op.getCond().getType()) && + mlir::isa(op.getVec1().getType()) && + mlir::isa(op.getVec2().getType()) && "Vector ternary op with non-vector type"); // Convert `cond` into a vector of i1, then use that in a `select` op. mlir::Value bitVec = rewriter.create( @@ -1903,23 +1883,22 @@ class CIRVectorTernaryLowering }; class CIRVectorShuffleIntsLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VecShuffleOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VecShuffleOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // LLVM::ShuffleVectorOp takes an ArrayRef of int for the list of indices. // Convert the ClangIR ArrayAttr of IntAttr constants into a // SmallVector. SmallVector indices; - std::transform(op.getIndices().begin(), op.getIndices().end(), - std::back_inserter(indices), [](mlir::Attribute intAttr) { - return mlir::cast(intAttr) - .getValue() - .getSExtValue(); - }); + std::transform( + op.getIndices().begin(), op.getIndices().end(), + std::back_inserter(indices), [](mlir::Attribute intAttr) { + return mlir::cast(intAttr).getValue().getSExtValue(); + }); rewriter.replaceOpWithNewOp( op, adaptor.getVec1(), adaptor.getVec2(), indices); return mlir::success(); @@ -1927,13 +1906,12 @@ class CIRVectorShuffleIntsLowering }; class CIRVectorShuffleVecLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern< - mlir::cir::VecShuffleDynamicOp>::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VecShuffleDynamicOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VecShuffleDynamicOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // LLVM IR does not have an operation that corresponds to this form of // the built-in. @@ -1950,7 +1928,7 @@ class CIRVectorShuffleVecLowering mlir::Type llvmIndexType = getTypeConverter()->convertType( elementTypeIfVector(op.getIndices().getType())); uint64_t numElements = - mlir::cast(op.getVec().getType()).getSize(); + mlir::cast(op.getVec().getType()).getSize(); mlir::Value maskValue = rewriter.create( loc, llvmIndexType, mlir::IntegerAttr::get(llvmIndexType, numElements - 1)); @@ -1981,13 +1959,12 @@ class CIRVectorShuffleVecLowering } }; -class CIRVAStartLowering - : public mlir::OpConversionPattern { +class CIRVAStartLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VAStartOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VAStartOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); auto vaList = rewriter.create( @@ -1997,12 +1974,12 @@ class CIRVAStartLowering } }; -class CIRVAEndLowering : public mlir::OpConversionPattern { +class CIRVAEndLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VAEndOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VAEndOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); auto vaList = rewriter.create( @@ -2012,13 +1989,12 @@ class CIRVAEndLowering : public mlir::OpConversionPattern { } }; -class CIRVACopyLowering - : public mlir::OpConversionPattern { +class CIRVACopyLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VACopyOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VACopyOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); auto dstList = rewriter.create( @@ -2030,20 +2006,20 @@ class CIRVACopyLowering } }; -class CIRVAArgLowering : public mlir::OpConversionPattern { +class CIRVAArgLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VAArgOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VAArgOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { return op.emitError("cir.vaarg lowering is NYI"); } }; -class CIRFuncLowering : public mlir::OpConversionPattern { +class CIRFuncLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; /// Returns the name used for the linkage attribute. This *must* correspond /// to the name of the attribute in ODS. @@ -2054,7 +2030,7 @@ class CIRFuncLowering : public mlir::OpConversionPattern { /// `LLVMFuncOp::build`. If `filterArgAttrs` is set, also filter out /// argument attributes. void - lowerFuncAttributes(mlir::cir::FuncOp func, bool filterArgAndResAttrs, + lowerFuncAttributes(cir::FuncOp func, bool filterArgAndResAttrs, SmallVectorImpl &result) const { for (auto attr : func->getAttrs()) { if (attr.getName() == mlir::SymbolTable::getSymbolAttrName() || @@ -2082,9 +2058,9 @@ class CIRFuncLowering : public mlir::OpConversionPattern { /// Here we lower possible OpenCLKernelMetadataAttr to use the converted type. void lowerFuncOpenCLKernelMetadata(mlir::NamedAttribute &extraAttrsEntry) const { - const auto attrKey = mlir::cir::OpenCLKernelMetadataAttr::getMnemonic(); + const auto attrKey = cir::OpenCLKernelMetadataAttr::getMnemonic(); auto oldExtraAttrs = - cast(extraAttrsEntry.getValue()); + cast(extraAttrsEntry.getValue()); if (!oldExtraAttrs.getElements().contains(attrKey)) return; @@ -2092,11 +2068,11 @@ class CIRFuncLowering : public mlir::OpConversionPattern { for (auto entry : oldExtraAttrs.getElements()) { if (entry.getName() == attrKey) { auto clKernelMetadata = - cast(entry.getValue()); + cast(entry.getValue()); if (auto vecTypeHint = clKernelMetadata.getVecTypeHint()) { auto newType = typeConverter->convertType(vecTypeHint.getValue()); auto newTypeHint = mlir::TypeAttr::get(newType); - auto newCLKMAttr = mlir::cir::OpenCLKernelMetadataAttr::get( + auto newCLKMAttr = cir::OpenCLKernelMetadataAttr::get( getContext(), clKernelMetadata.getWorkGroupSizeHint(), clKernelMetadata.getReqdWorkGroupSize(), newTypeHint, clKernelMetadata.getVecTypeHintSignedness(), @@ -2106,12 +2082,12 @@ class CIRFuncLowering : public mlir::OpConversionPattern { } newExtraAttrs.push_back(entry); } - extraAttrsEntry.setValue(mlir::cir::ExtraFuncAttributesAttr::get( + extraAttrsEntry.setValue(cir::ExtraFuncAttributesAttr::get( getContext(), newExtraAttrs.getDictionary(getContext()))); } mlir::LogicalResult - matchAndRewrite(mlir::cir::FuncOp op, OpAdaptor adaptor, + matchAndRewrite(cir::FuncOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto fnType = op.getFunctionType(); @@ -2170,12 +2146,12 @@ class CIRFuncLowering : public mlir::OpConversionPattern { }; class CIRGetGlobalOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::GetGlobalOp op, OpAdaptor adaptor, + matchAndRewrite(cir::GetGlobalOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // FIXME(cir): Premature DCE to avoid lowering stuff we're not using. // CIRGen should mitigate this and not emit the get_global. @@ -2201,12 +2177,12 @@ class CIRGetGlobalOpLowering }; class CIRComplexCreateOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ComplexCreateOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ComplexCreateOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto complexLLVMTy = getTypeConverter()->convertType(op.getResult().getType()); @@ -2227,12 +2203,12 @@ class CIRComplexCreateOpLowering }; class CIRComplexRealOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ComplexRealOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ComplexRealOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto resultLLVMTy = getTypeConverter()->convertType(op.getResult().getType()); @@ -2244,12 +2220,12 @@ class CIRComplexRealOpLowering }; class CIRComplexImagOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ComplexImagOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ComplexImagOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto resultLLVMTy = getTypeConverter()->convertType(op.getResult().getType()); @@ -2261,15 +2237,14 @@ class CIRComplexImagOpLowering }; class CIRComplexRealPtrOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ComplexRealPtrOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ComplexRealPtrOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto operandTy = - mlir::cast(op.getOperand().getType()); + auto operandTy = mlir::cast(op.getOperand().getType()); auto resultLLVMTy = getTypeConverter()->convertType(op.getResult().getType()); auto elementLLVMTy = @@ -2285,15 +2260,14 @@ class CIRComplexRealPtrOpLowering }; class CIRComplexImagPtrOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ComplexImagPtrOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ComplexImagPtrOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto operandTy = - mlir::cast(op.getOperand().getType()); + auto operandTy = mlir::cast(op.getOperand().getType()); auto resultLLVMTy = getTypeConverter()->convertType(op.getResult().getType()); auto elementLLVMTy = @@ -2309,18 +2283,18 @@ class CIRComplexImagPtrOpLowering }; class CIRSwitchFlatOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::SwitchFlatOp op, OpAdaptor adaptor, + matchAndRewrite(cir::SwitchFlatOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { llvm::SmallVector caseValues; if (op.getCaseValues()) { for (auto val : op.getCaseValues()) { - auto intAttr = dyn_cast(val); + auto intAttr = dyn_cast(val); caseValues.push_back(intAttr.getValue()); } } @@ -2345,18 +2319,17 @@ class CIRSwitchFlatOpLowering } }; -class CIRGlobalOpLowering - : public mlir::OpConversionPattern { +class CIRGlobalOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; // Get addrspace by converting a pointer type. // TODO: The approach here is a little hacky. We should access the target info // directly to convert the address space of global op, similar to what we do // for type converter. - unsigned getGlobalOpTargetAddrSpace(mlir::cir::GlobalOp op) const { - auto tempPtrTy = mlir::cir::PointerType::get(getContext(), op.getSymType(), - op.getAddrSpaceAttr()); + unsigned getGlobalOpTargetAddrSpace(cir::GlobalOp op) const { + auto tempPtrTy = cir::PointerType::get(getContext(), op.getSymType(), + op.getAddrSpaceAttr()); return cast( typeConverter->convertType(tempPtrTy)) .getAddressSpace(); @@ -2365,7 +2338,7 @@ class CIRGlobalOpLowering /// Replace CIR global with a region initialized LLVM global and update /// insertion point to the end of the initializer block. inline void setupRegionInitializedLLVMGlobalOp( - mlir::cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const { + cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const { const auto llvmType = getTypeConverter()->convertType(op.getSymType()); SmallVector attributes; auto newGlobalOp = rewriter.replaceOpWithNewOp( @@ -2380,7 +2353,7 @@ class CIRGlobalOpLowering } mlir::LogicalResult - matchAndRewrite(mlir::cir::GlobalOp op, OpAdaptor adaptor, + matchAndRewrite(cir::GlobalOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // Fetch required values to create LLVM op. @@ -2414,8 +2387,7 @@ class CIRGlobalOpLowering } // Initializer is a constant array: convert it to a compatible llvm init. - if (auto constArr = - mlir::dyn_cast(init.value())) { + if (auto constArr = mlir::dyn_cast(init.value())) { if (auto attr = mlir::dyn_cast(constArr.getElts())) { llvm::SmallString<256> literal(attr.getValue()); if (constArr.getTrailingZerosNum()) @@ -2438,19 +2410,18 @@ class CIRGlobalOpLowering << constArr.getElts(); return mlir::failure(); } - } else if (auto fltAttr = mlir::dyn_cast(init.value())) { + } else if (auto fltAttr = mlir::dyn_cast(init.value())) { // Initializer is a constant floating-point number: convert to MLIR // builtin constant. init = rewriter.getFloatAttr(llvmType, fltAttr.getValue()); } // Initializer is a constant integer: convert to MLIR builtin constant. - else if (auto intAttr = mlir::dyn_cast(init.value())) { + else if (auto intAttr = mlir::dyn_cast(init.value())) { init = rewriter.getIntegerAttr(llvmType, intAttr.getValue()); - } else if (auto boolAttr = - mlir::dyn_cast(init.value())) { + } else if (auto boolAttr = mlir::dyn_cast(init.value())) { init = rewriter.getBoolAttr(boolAttr.getValue()); - } else if (isa(init.value())) { + } else if (isa( + init.value())) { // TODO(cir): once LLVM's dialect has proper equivalent attributes this // should be updated. For now, we use a custom op to initialize globals // to the appropriate value. @@ -2460,31 +2431,30 @@ class CIRGlobalOpLowering rewriter.create(loc, value); return mlir::success(); } else if (auto dataMemberAttr = - mlir::dyn_cast(init.value())) { + mlir::dyn_cast(init.value())) { init = lowerDataMemberAttr(op->getParentOfType(), dataMemberAttr, *typeConverter); } else if (const auto structAttr = - mlir::dyn_cast(init.value())) { + mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( op->getLoc(), lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter)); return mlir::success(); - } else if (auto attr = - mlir::dyn_cast(init.value())) { + } else if (auto attr = mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( loc, lowerCirAttrAsValue(op, attr, rewriter, typeConverter)); return mlir::success(); } else if (const auto vtableAttr = - mlir::dyn_cast(init.value())) { + mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( op->getLoc(), lowerCirAttrAsValue(op, vtableAttr, rewriter, typeConverter)); return mlir::success(); } else if (const auto typeinfoAttr = - mlir::dyn_cast(init.value())) { + mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( op->getLoc(), @@ -2530,26 +2500,25 @@ class CIRGlobalOpLowering } }; -class CIRUnaryOpLowering - : public mlir::OpConversionPattern { +class CIRUnaryOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::UnaryOp op, OpAdaptor adaptor, + matchAndRewrite(cir::UnaryOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { assert(op.getType() == op.getInput().getType() && "Unary operation's operand type and result type are different"); mlir::Type type = op.getType(); mlir::Type elementType = elementTypeIfVector(type); - bool IsVector = mlir::isa(type); + bool IsVector = mlir::isa(type); auto llvmType = getTypeConverter()->convertType(type); auto loc = op.getLoc(); // Integer unary operations: + - ~ ++ -- - if (mlir::isa(elementType)) { + if (mlir::isa(elementType)) { switch (op.getKind()) { - case mlir::cir::UnaryOpKind::Inc: { + case cir::UnaryOpKind::Inc: { assert(!IsVector && "++ not allowed on vector types"); auto One = rewriter.create( loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); @@ -2557,7 +2526,7 @@ class CIRUnaryOpLowering adaptor.getInput(), One); return mlir::success(); } - case mlir::cir::UnaryOpKind::Dec: { + case cir::UnaryOpKind::Dec: { assert(!IsVector && "-- not allowed on vector types"); auto One = rewriter.create( loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); @@ -2565,11 +2534,11 @@ class CIRUnaryOpLowering adaptor.getInput(), One); return mlir::success(); } - case mlir::cir::UnaryOpKind::Plus: { + case cir::UnaryOpKind::Plus: { rewriter.replaceOp(op, adaptor.getInput()); return mlir::success(); } - case mlir::cir::UnaryOpKind::Minus: { + case cir::UnaryOpKind::Minus: { mlir::Value Zero; if (IsVector) Zero = rewriter.create(loc, llvmType); @@ -2580,7 +2549,7 @@ class CIRUnaryOpLowering adaptor.getInput()); return mlir::success(); } - case mlir::cir::UnaryOpKind::Not: { + case cir::UnaryOpKind::Not: { // bit-wise compliment operator, implemented as an XOR with -1. mlir::Value MinusOne; if (IsVector) { @@ -2592,8 +2561,7 @@ class CIRUnaryOpLowering loc, llvmElementType, mlir::IntegerAttr::get(llvmElementType, -1)); MinusOne = rewriter.create(loc, llvmType); - auto NumElements = - mlir::dyn_cast(type).getSize(); + auto NumElements = mlir::dyn_cast(type).getSize(); for (uint64_t i = 0; i < NumElements; ++i) { mlir::Value indexValue = rewriter.create( loc, rewriter.getI64Type(), i); @@ -2612,9 +2580,9 @@ class CIRUnaryOpLowering } // Floating point unary operations: + - ++ -- - if (mlir::isa(elementType)) { + if (mlir::isa(elementType)) { switch (op.getKind()) { - case mlir::cir::UnaryOpKind::Inc: { + case cir::UnaryOpKind::Inc: { assert(!IsVector && "++ not allowed on vector types"); auto oneAttr = rewriter.getFloatAttr(llvmType, 1.0); auto oneConst = @@ -2623,7 +2591,7 @@ class CIRUnaryOpLowering adaptor.getInput()); return mlir::success(); } - case mlir::cir::UnaryOpKind::Dec: { + case cir::UnaryOpKind::Dec: { assert(!IsVector && "-- not allowed on vector types"); auto negOneAttr = rewriter.getFloatAttr(llvmType, -1.0); auto negOneConst = @@ -2632,10 +2600,10 @@ class CIRUnaryOpLowering op, llvmType, negOneConst, adaptor.getInput()); return mlir::success(); } - case mlir::cir::UnaryOpKind::Plus: + case cir::UnaryOpKind::Plus: rewriter.replaceOp(op, adaptor.getInput()); return mlir::success(); - case mlir::cir::UnaryOpKind::Minus: { + case cir::UnaryOpKind::Minus: { rewriter.replaceOpWithNewOp(op, llvmType, adaptor.getInput()); return mlir::success(); @@ -2648,9 +2616,9 @@ class CIRUnaryOpLowering // Boolean unary operations: ! only. (For all others, the operand has // already been promoted to int.) - if (mlir::isa(elementType)) { + if (mlir::isa(elementType)) { switch (op.getKind()) { - case mlir::cir::UnaryOpKind::Not: + case cir::UnaryOpKind::Not: assert(!IsVector && "NYI: op! on vector mask"); rewriter.replaceOpWithNewOp( op, llvmType, adaptor.getInput(), @@ -2665,9 +2633,9 @@ class CIRUnaryOpLowering // Pointer unary operations: + only. (++ and -- of pointers are implemented // with cir.ptr_stride, not cir.unary.) - if (mlir::isa(elementType)) { + if (mlir::isa(elementType)) { switch (op.getKind()) { - case mlir::cir::UnaryOpKind::Plus: + case cir::UnaryOpKind::Plus: rewriter.replaceOp(op, adaptor.getInput()); return mlir::success(); default: @@ -2681,10 +2649,9 @@ class CIRUnaryOpLowering } }; -class CIRBinOpLowering : public mlir::OpConversionPattern { +class CIRBinOpLowering : public mlir::OpConversionPattern { - mlir::LLVM::IntegerOverflowFlags - getIntOverflowFlag(mlir::cir::BinOp op) const { + mlir::LLVM::IntegerOverflowFlags getIntOverflowFlag(cir::BinOp op) const { if (op.getNoUnsignedWrap()) return mlir::LLVM::IntegerOverflowFlags::nuw; @@ -2695,16 +2662,16 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { } public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BinOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BinOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { assert((op.getLhs().getType() == op.getRhs().getType()) && "inconsistent operands' types not supported yet"); mlir::Type type = op.getRhs().getType(); - assert((mlir::isa(type)) && + assert((mlir::isa( + type)) && "operand type not supported yet"); auto llvmTy = getTypeConverter()->convertType(op.getType()); @@ -2714,29 +2681,29 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { type = elementTypeIfVector(type); switch (op.getKind()) { - case mlir::cir::BinOpKind::Add: - if (mlir::isa(type)) + case cir::BinOpKind::Add: + if (mlir::isa(type)) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; - case mlir::cir::BinOpKind::Sub: - if (mlir::isa(type)) + case cir::BinOpKind::Sub: + if (mlir::isa(type)) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; - case mlir::cir::BinOpKind::Mul: - if (mlir::isa(type)) + case cir::BinOpKind::Mul: + if (mlir::isa(type)) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; - case mlir::cir::BinOpKind::Div: - if (auto ty = mlir::dyn_cast(type)) { + case cir::BinOpKind::Div: + if (auto ty = mlir::dyn_cast(type)) { if (ty.isUnsigned()) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else @@ -2744,8 +2711,8 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { } else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; - case mlir::cir::BinOpKind::Rem: - if (auto ty = mlir::dyn_cast(type)) { + case cir::BinOpKind::Rem: + if (auto ty = mlir::dyn_cast(type)) { if (ty.isUnsigned()) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else @@ -2753,13 +2720,13 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { } else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; - case mlir::cir::BinOpKind::And: + case cir::BinOpKind::And: rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; - case mlir::cir::BinOpKind::Or: + case cir::BinOpKind::Or: rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; - case mlir::cir::BinOpKind::Xor: + case cir::BinOpKind::Xor: rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; } @@ -2769,12 +2736,12 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { }; class CIRBinOpOverflowOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BinOpOverflowOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BinOpOverflowOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto loc = op.getLoc(); auto arithKind = op.getKind(); @@ -2850,7 +2817,7 @@ class CIRBinOpOverflowOpLowering } private: - static std::string getLLVMIntrinName(mlir::cir::BinOpOverflowKind opKind, + static std::string getLLVMIntrinName(cir::BinOpOverflowKind opKind, bool isSigned, unsigned width) { // The intrinsic name is `@llvm.{s|u}{opKind}.with.overflow.i{width}` @@ -2862,13 +2829,13 @@ class CIRBinOpOverflowOpLowering name.push_back('u'); switch (opKind) { - case mlir::cir::BinOpOverflowKind::Add: + case cir::BinOpOverflowKind::Add: name.append("add."); break; - case mlir::cir::BinOpOverflowKind::Sub: + case cir::BinOpOverflowKind::Sub: name.append("sub."); break; - case mlir::cir::BinOpOverflowKind::Mul: + case cir::BinOpOverflowKind::Mul: name.append("mul."); break; } @@ -2885,8 +2852,7 @@ class CIRBinOpOverflowOpLowering }; static EncompassedTypeInfo - computeEncompassedTypeWidth(mlir::cir::IntType operandTy, - mlir::cir::IntType resultTy) { + computeEncompassedTypeWidth(cir::IntType operandTy, cir::IntType resultTy) { auto sign = operandTy.getIsSigned() || resultTy.getIsSigned(); auto width = std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()), @@ -2895,23 +2861,19 @@ class CIRBinOpOverflowOpLowering } }; -class CIRShiftOpLowering - : public mlir::OpConversionPattern { +class CIRShiftOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ShiftOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ShiftOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto cirAmtTy = - mlir::dyn_cast(op.getAmount().getType()); - auto cirValTy = mlir::dyn_cast(op.getValue().getType()); + auto cirAmtTy = mlir::dyn_cast(op.getAmount().getType()); + auto cirValTy = mlir::dyn_cast(op.getValue().getType()); // Operands could also be vector type - auto cirAmtVTy = - mlir::dyn_cast(op.getAmount().getType()); - auto cirValVTy = - mlir::dyn_cast(op.getValue().getType()); + auto cirAmtVTy = mlir::dyn_cast(op.getAmount().getType()); + auto cirValVTy = mlir::dyn_cast(op.getValue().getType()); auto llvmTy = getTypeConverter()->convertType(op.getType()); mlir::Value amt = adaptor.getAmount(); mlir::Value val = adaptor.getValue(); @@ -2936,9 +2898,9 @@ class CIRShiftOpLowering rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); else { bool isUnSigned = - cirValTy ? !cirValTy.isSigned() - : !mlir::cast(cirValVTy.getEltType()) - .isSigned(); + cirValTy + ? !cirValTy.isSigned() + : !mlir::cast(cirValVTy.getEltType()).isSigned(); if (isUnSigned) rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); else @@ -2949,28 +2911,28 @@ class CIRShiftOpLowering } }; -class CIRCmpOpLowering : public mlir::OpConversionPattern { +class CIRCmpOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CmpOp cmpOp, OpAdaptor adaptor, + matchAndRewrite(cir::CmpOp cmpOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto type = cmpOp.getLhs().getType(); mlir::Value llResult; // Lower to LLVM comparison op. - if (auto intTy = mlir::dyn_cast(type)) { + if (auto intTy = mlir::dyn_cast(type)) { auto kind = convertCmpKindToICmpPredicate(cmpOp.getKind(), intTy.isSigned()); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else if (auto ptrTy = mlir::dyn_cast(type)) { + } else if (auto ptrTy = mlir::dyn_cast(type)) { auto kind = convertCmpKindToICmpPredicate(cmpOp.getKind(), /* isSigned=*/false); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else if (mlir::isa(type)) { + } else if (mlir::isa(type)) { auto kind = convertCmpKindToFCmpPredicate(cmpOp.getKind()); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); @@ -3010,13 +2972,12 @@ static mlir::LLVM::CallIntrinsicOp replaceOpWithCallLLVMIntrinsicOp( } class CIRIntrinsicCallLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern< - mlir::cir::LLVMIntrinsicCallOp>::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::LLVMIntrinsicCallOp op, OpAdaptor adaptor, + matchAndRewrite(cir::LLVMIntrinsicCallOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Type llvmResTy = getTypeConverter()->convertType(op->getResultTypes()[0]); @@ -3032,20 +2993,19 @@ class CIRIntrinsicCallLowering // %3 = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32) %2) // TODO(cir): MLIR LLVM dialect should handle this part as CIR has no way // to set LLVM IR attribute. - assert(!::cir::MissingFeatures::llvmIntrinsicElementTypeSupport()); + assert(!cir::MissingFeatures::llvmIntrinsicElementTypeSupport()); replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm." + name, llvmResTy, adaptor.getOperands()); return mlir::success(); } }; -class CIRAssumeLowering - : public mlir::OpConversionPattern { +class CIRAssumeLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::AssumeOp op, OpAdaptor adaptor, + matchAndRewrite(cir::AssumeOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto cond = rewriter.create( op.getLoc(), rewriter.getI1Type(), adaptor.getPredicate()); @@ -3055,12 +3015,12 @@ class CIRAssumeLowering }; class CIRAssumeAlignedLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::AssumeAlignedOp op, OpAdaptor adaptor, + matchAndRewrite(cir::AssumeAlignedOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { SmallVector opBundleArgs{adaptor.getPointer()}; @@ -3083,12 +3043,12 @@ class CIRAssumeAlignedLowering }; class CIRAssumeSepStorageLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::AssumeSepStorageOp op, OpAdaptor adaptor, + matchAndRewrite(cir::AssumeSepStorageOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto cond = rewriter.create( op.getLoc(), rewriter.getI1Type(), 1); @@ -3132,12 +3092,12 @@ static mlir::Value createLLVMBitOp(mlir::Location loc, } class CIRBitClrsbOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BitClrsbOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BitClrsbOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto zero = rewriter.create( op.getLoc(), adaptor.getInput().getType(), 0); @@ -3167,18 +3127,17 @@ class CIRBitClrsbOpLowering } }; -class CIRObjSizeOpLowering - : public mlir::OpConversionPattern { +class CIRObjSizeOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ObjSizeOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ObjSizeOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto llvmResTy = getTypeConverter()->convertType(op.getType()); auto loc = op->getLoc(); - mlir::cir::SizeInfoType kindInfo = op.getKind(); + cir::SizeInfoType kindInfo = op.getKind(); auto falseValue = rewriter.create( loc, rewriter.getI1Type(), false); auto trueValue = rewriter.create( @@ -3187,21 +3146,20 @@ class CIRObjSizeOpLowering replaceOpWithCallLLVMIntrinsicOp( rewriter, op, "llvm.objectsize", llvmResTy, mlir::ValueRange{adaptor.getPtr(), - kindInfo == mlir::cir::SizeInfoType::max ? falseValue - : trueValue, + kindInfo == cir::SizeInfoType::max ? falseValue + : trueValue, trueValue, op.getDynamic() ? trueValue : falseValue}); return mlir::LogicalResult::success(); } }; -class CIRBitClzOpLowering - : public mlir::OpConversionPattern { +class CIRBitClzOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BitClzOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BitClzOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto resTy = getTypeConverter()->convertType(op.getType()); auto llvmOp = @@ -3212,13 +3170,12 @@ class CIRBitClzOpLowering } }; -class CIRBitCtzOpLowering - : public mlir::OpConversionPattern { +class CIRBitCtzOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BitCtzOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BitCtzOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto resTy = getTypeConverter()->convertType(op.getType()); auto llvmOp = @@ -3229,13 +3186,12 @@ class CIRBitCtzOpLowering } }; -class CIRBitFfsOpLowering - : public mlir::OpConversionPattern { +class CIRBitFfsOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BitFfsOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BitFfsOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto resTy = getTypeConverter()->convertType(op.getType()); auto ctz = @@ -3263,12 +3219,12 @@ class CIRBitFfsOpLowering }; class CIRBitParityOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BitParityOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BitParityOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto resTy = getTypeConverter()->convertType(op.getType()); auto popcnt = @@ -3285,12 +3241,12 @@ class CIRBitParityOpLowering }; class CIRBitPopcountOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BitPopcountOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BitPopcountOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto resTy = getTypeConverter()->convertType(op.getType()); auto llvmOp = @@ -3301,30 +3257,30 @@ class CIRBitPopcountOpLowering } }; -static mlir::LLVM::AtomicOrdering getLLVMAtomicOrder(mlir::cir::MemOrder memo) { +static mlir::LLVM::AtomicOrdering getLLVMAtomicOrder(cir::MemOrder memo) { switch (memo) { - case mlir::cir::MemOrder::Relaxed: + case cir::MemOrder::Relaxed: return mlir::LLVM::AtomicOrdering::monotonic; - case mlir::cir::MemOrder::Consume: - case mlir::cir::MemOrder::Acquire: + case cir::MemOrder::Consume: + case cir::MemOrder::Acquire: return mlir::LLVM::AtomicOrdering::acquire; - case mlir::cir::MemOrder::Release: + case cir::MemOrder::Release: return mlir::LLVM::AtomicOrdering::release; - case mlir::cir::MemOrder::AcquireRelease: + case cir::MemOrder::AcquireRelease: return mlir::LLVM::AtomicOrdering::acq_rel; - case mlir::cir::MemOrder::SequentiallyConsistent: + case cir::MemOrder::SequentiallyConsistent: return mlir::LLVM::AtomicOrdering::seq_cst; } llvm_unreachable("shouldn't get here"); } class CIRAtomicCmpXchgLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::AtomicCmpXchg op, OpAdaptor adaptor, + matchAndRewrite(cir::AtomicCmpXchg op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto expected = adaptor.getExpected(); auto desired = adaptor.getDesired(); @@ -3351,12 +3307,12 @@ class CIRAtomicCmpXchgLowering }; class CIRAtomicXchgLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::AtomicXchg op, OpAdaptor adaptor, + matchAndRewrite(cir::AtomicXchg op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // FIXME: add syncscope. auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); @@ -3368,11 +3324,11 @@ class CIRAtomicXchgLowering }; class CIRAtomicFetchLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; - mlir::Value buildPostOp(mlir::cir::AtomicFetch op, OpAdaptor adaptor, + mlir::Value buildPostOp(cir::AtomicFetch op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal, bool isInt) const { SmallVector atomicOperands = {rmwVal, adaptor.getVal()}; @@ -3384,12 +3340,12 @@ class CIRAtomicFetchLowering ->getResult(0); } - mlir::Value buildMinMaxPostOp(mlir::cir::AtomicFetch op, OpAdaptor adaptor, + mlir::Value buildMinMaxPostOp(cir::AtomicFetch op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal, bool isSigned) const { auto loc = op.getLoc(); mlir::LLVM::ICmpPredicate pred; - if (op.getBinop() == mlir::cir::AtomicFetchKind::Max) { + if (op.getBinop() == cir::AtomicFetchKind::Max) { pred = isSigned ? mlir::LLVM::ICmpPredicate::sgt : mlir::LLVM::ICmpPredicate::ugt; } else { // Min @@ -3404,56 +3360,54 @@ class CIRAtomicFetchLowering adaptor.getVal()); } - llvm::StringLiteral getLLVMBinop(mlir::cir::AtomicFetchKind k, - bool isInt) const { + llvm::StringLiteral getLLVMBinop(cir::AtomicFetchKind k, bool isInt) const { switch (k) { - case mlir::cir::AtomicFetchKind::Add: + case cir::AtomicFetchKind::Add: return isInt ? mlir::LLVM::AddOp::getOperationName() : mlir::LLVM::FAddOp::getOperationName(); - case mlir::cir::AtomicFetchKind::Sub: + case cir::AtomicFetchKind::Sub: return isInt ? mlir::LLVM::SubOp::getOperationName() : mlir::LLVM::FSubOp::getOperationName(); - case mlir::cir::AtomicFetchKind::And: + case cir::AtomicFetchKind::And: return mlir::LLVM::AndOp::getOperationName(); - case mlir::cir::AtomicFetchKind::Xor: + case cir::AtomicFetchKind::Xor: return mlir::LLVM::XOrOp::getOperationName(); - case mlir::cir::AtomicFetchKind::Or: + case cir::AtomicFetchKind::Or: return mlir::LLVM::OrOp::getOperationName(); - case mlir::cir::AtomicFetchKind::Nand: + case cir::AtomicFetchKind::Nand: // There's no nand binop in LLVM, this is later fixed with a not. return mlir::LLVM::AndOp::getOperationName(); - case mlir::cir::AtomicFetchKind::Max: - case mlir::cir::AtomicFetchKind::Min: + case cir::AtomicFetchKind::Max: + case cir::AtomicFetchKind::Min: llvm_unreachable("handled in buildMinMaxPostOp"); } llvm_unreachable("Unknown atomic fetch opcode"); } - mlir::LLVM::AtomicBinOp getLLVMAtomicBinOp(mlir::cir::AtomicFetchKind k, - bool isInt, + mlir::LLVM::AtomicBinOp getLLVMAtomicBinOp(cir::AtomicFetchKind k, bool isInt, bool isSignedInt) const { switch (k) { - case mlir::cir::AtomicFetchKind::Add: + case cir::AtomicFetchKind::Add: return isInt ? mlir::LLVM::AtomicBinOp::add : mlir::LLVM::AtomicBinOp::fadd; - case mlir::cir::AtomicFetchKind::Sub: + case cir::AtomicFetchKind::Sub: return isInt ? mlir::LLVM::AtomicBinOp::sub : mlir::LLVM::AtomicBinOp::fsub; - case mlir::cir::AtomicFetchKind::And: + case cir::AtomicFetchKind::And: return mlir::LLVM::AtomicBinOp::_and; - case mlir::cir::AtomicFetchKind::Xor: + case cir::AtomicFetchKind::Xor: return mlir::LLVM::AtomicBinOp::_xor; - case mlir::cir::AtomicFetchKind::Or: + case cir::AtomicFetchKind::Or: return mlir::LLVM::AtomicBinOp::_or; - case mlir::cir::AtomicFetchKind::Nand: + case cir::AtomicFetchKind::Nand: return mlir::LLVM::AtomicBinOp::nand; - case mlir::cir::AtomicFetchKind::Max: { + case cir::AtomicFetchKind::Max: { if (!isInt) return mlir::LLVM::AtomicBinOp::fmax; return isSignedInt ? mlir::LLVM::AtomicBinOp::max : mlir::LLVM::AtomicBinOp::umax; } - case mlir::cir::AtomicFetchKind::Min: { + case cir::AtomicFetchKind::Min: { if (!isInt) return mlir::LLVM::AtomicBinOp::fmin; return isSignedInt ? mlir::LLVM::AtomicBinOp::min @@ -3464,15 +3418,14 @@ class CIRAtomicFetchLowering } mlir::LogicalResult - matchAndRewrite(mlir::cir::AtomicFetch op, OpAdaptor adaptor, + matchAndRewrite(cir::AtomicFetch op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { bool isInt, isSignedInt = false; // otherwise it's float. - if (auto intTy = - mlir::dyn_cast(op.getVal().getType())) { + if (auto intTy = mlir::dyn_cast(op.getVal().getType())) { isInt = true; isSignedInt = intTy.isSigned(); - } else if (mlir::isa( + } else if (mlir::isa( op.getVal().getType())) isInt = false; else { @@ -3488,15 +3441,15 @@ class CIRAtomicFetchLowering mlir::Value result = rmwVal.getRes(); if (!op.getFetchFirst()) { - if (op.getBinop() == mlir::cir::AtomicFetchKind::Max || - op.getBinop() == mlir::cir::AtomicFetchKind::Min) + if (op.getBinop() == cir::AtomicFetchKind::Max || + op.getBinop() == cir::AtomicFetchKind::Min) result = buildMinMaxPostOp(op, adaptor, rewriter, rmwVal.getRes(), isSignedInt); else result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt); // Compensate lack of nand binop in LLVM IR. - if (op.getBinop() == mlir::cir::AtomicFetchKind::Nand) { + if (op.getBinop() == cir::AtomicFetchKind::Nand) { auto negOne = rewriter.create( op.getLoc(), result.getType(), -1); result = @@ -3510,12 +3463,12 @@ class CIRAtomicFetchLowering }; class CIRByteswapOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ByteswapOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ByteswapOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // Note that LLVM intrinsic calls to @llvm.bswap.i* have the same type as // the operand. @@ -3532,13 +3485,12 @@ class CIRByteswapOpLowering } }; -class CIRRotateOpLowering - : public mlir::OpConversionPattern { +class CIRRotateOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::RotateOp op, OpAdaptor adaptor, + matchAndRewrite(cir::RotateOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // Note that LLVM intrinsic calls to @llvm.fsh{r,l}.i* have the same type as // the operand. @@ -3553,22 +3505,20 @@ class CIRRotateOpLowering } }; -class CIRSelectOpLowering - : public mlir::OpConversionPattern { +class CIRSelectOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::SelectOp op, OpAdaptor adaptor, + matchAndRewrite(cir::SelectOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto getConstantBool = [](mlir::Value value) -> std::optional { - auto definingOp = mlir::dyn_cast_if_present( - value.getDefiningOp()); + auto definingOp = + mlir::dyn_cast_if_present(value.getDefiningOp()); if (!definingOp) return std::nullopt; - auto constValue = - mlir::dyn_cast(definingOp.getValue()); + auto constValue = mlir::dyn_cast(definingOp.getValue()); if (!constValue) return std::nullopt; @@ -3580,7 +3530,7 @@ class CIRSelectOpLowering // - select %0, true, %1 => or %0, %1 auto trueValue = op.getTrueValue(); auto falseValue = op.getFalseValue(); - if (mlir::isa(trueValue.getType())) { + if (mlir::isa(trueValue.getType())) { if (std::optional falseValueBool = getConstantBool(falseValue); falseValueBool.has_value() && !*falseValueBool) { // select %0, %1, false => and %0, %1 @@ -3607,12 +3557,12 @@ class CIRSelectOpLowering } }; -class CIRBrOpLowering : public mlir::OpConversionPattern { +class CIRBrOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BrOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BrOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getOperands(), op.getDest()); @@ -3621,21 +3571,21 @@ class CIRBrOpLowering : public mlir::OpConversionPattern { }; class CIRGetMemberOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::GetMemberOp op, OpAdaptor adaptor, + matchAndRewrite(cir::GetMemberOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto llResTy = getTypeConverter()->convertType(op.getType()); const auto structTy = - mlir::cast(op.getAddrTy().getPointee()); + mlir::cast(op.getAddrTy().getPointee()); assert(structTy && "expected struct type"); switch (structTy.getKind()) { - case mlir::cir::StructType::Struct: - case mlir::cir::StructType::Class: { + case cir::StructType::Struct: + case cir::StructType::Class: { // Since the base address is a pointer to an aggregate, the first offset // is always zero. The second offset tell us which member it will access. llvm::SmallVector offset{0, op.getIndex()}; @@ -3644,7 +3594,7 @@ class CIRGetMemberOpLowering adaptor.getAddr(), offset); return mlir::success(); } - case mlir::cir::StructType::Union: + case cir::StructType::Union: // Union members share the address space, so we just need a bitcast to // conform to type-checking. rewriter.replaceOpWithNewOp(op, llResTy, @@ -3655,13 +3605,12 @@ class CIRGetMemberOpLowering }; class CIRGetRuntimeMemberOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern< - mlir::cir::GetRuntimeMemberOp>::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::GetRuntimeMemberOp op, OpAdaptor adaptor, + matchAndRewrite(cir::GetRuntimeMemberOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto llvmResTy = getTypeConverter()->convertType(op.getType()); auto llvmElementTy = mlir::IntegerType::get(op.getContext(), 8); @@ -3672,23 +3621,22 @@ class CIRGetRuntimeMemberOpLowering } }; -class CIRPtrDiffOpLowering - : public mlir::OpConversionPattern { +class CIRPtrDiffOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; uint64_t getTypeSize(mlir::Type type, mlir::Operation &op) const { mlir::DataLayout layout(op.getParentOfType()); // For LLVM purposes we treat void as u8. - if (isa(type)) - type = mlir::cir::IntType::get(type.getContext(), 8, /*isSigned=*/false); + if (isa(type)) + type = cir::IntType::get(type.getContext(), 8, /*isSigned=*/false); return llvm::divideCeil(layout.getTypeSizeInBits(type), 8); } mlir::LogicalResult - matchAndRewrite(mlir::cir::PtrDiffOp op, OpAdaptor adaptor, + matchAndRewrite(cir::PtrDiffOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto dstTy = mlir::cast(op.getType()); + auto dstTy = mlir::cast(op.getType()); auto llvmDstTy = getTypeConverter()->convertType(dstTy); auto lhs = rewriter.create(op.getLoc(), llvmDstTy, @@ -3699,7 +3647,7 @@ class CIRPtrDiffOpLowering auto diff = rewriter.create(op.getLoc(), llvmDstTy, lhs, rhs); - auto ptrTy = mlir::cast(op.getLhs().getType()); + auto ptrTy = mlir::cast(op.getLhs().getType()); auto typeSize = getTypeSize(ptrTy.getPointee(), *op); // Avoid silly division by 1. @@ -3720,13 +3668,12 @@ class CIRPtrDiffOpLowering } }; -class CIRExpectOpLowering - : public mlir::OpConversionPattern { +class CIRExpectOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ExpectOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ExpectOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { std::optional prob = op.getProb(); if (!prob) @@ -3740,12 +3687,12 @@ class CIRExpectOpLowering }; class CIRVTableAddrPointOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VTableAddrPointOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VTableAddrPointOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { const auto *converter = getTypeConverter(); auto targetType = converter->convertType(op.getType()); @@ -3774,12 +3721,12 @@ class CIRVTableAddrPointOpLowering }; class CIRStackSaveLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::StackSaveOp op, OpAdaptor adaptor, + matchAndRewrite(cir::StackSaveOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto ptrTy = getTypeConverter()->convertType(op.getType()); rewriter.replaceOpWithNewOp(op, ptrTy); @@ -3791,24 +3738,24 @@ class CIRStackSaveLowering #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" class CIRUnreachableLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::UnreachableOp op, OpAdaptor adaptor, + matchAndRewrite(cir::UnreachableOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op); return mlir::success(); } }; -class CIRTrapLowering : public mlir::OpConversionPattern { +class CIRTrapLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::TrapOp op, OpAdaptor adaptor, + matchAndRewrite(cir::TrapOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto loc = op->getLoc(); rewriter.eraseOp(op); @@ -3825,19 +3772,19 @@ class CIRTrapLowering : public mlir::OpConversionPattern { }; class CIRInlineAsmOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::InlineAsmOp op, OpAdaptor adaptor, + matchAndRewrite(cir::InlineAsmOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Type llResTy; if (op.getNumResults()) llResTy = getTypeConverter()->convertType(op.getType(0)); auto dialect = op.getAsmFlavor(); - auto llDialect = dialect == mlir::cir::AsmFlavor::x86_att + auto llDialect = dialect == cir::AsmFlavor::x86_att ? mlir::LLVM::AsmDialect::AD_ATT : mlir::LLVM::AsmDialect::AD_Intel; @@ -3869,7 +3816,7 @@ class CIRInlineAsmOpLowering } std::vector attrs; - auto typ = cast(cirOperands[i].getType()); + auto typ = cast(cirOperands[i].getType()); auto typAttr = mlir::TypeAttr::get( getTypeConverter()->convertType(typ.getPointee())); @@ -3889,13 +3836,12 @@ class CIRInlineAsmOpLowering } }; -class CIRPrefetchLowering - : public mlir::OpConversionPattern { +class CIRPrefetchLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::PrefetchOp op, OpAdaptor adaptor, + matchAndRewrite(cir::PrefetchOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp( op, adaptor.getAddr(), adaptor.getIsWrite(), adaptor.getLocality(), @@ -3905,12 +3851,12 @@ class CIRPrefetchLowering }; class CIRSetBitfieldLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::SetBitfieldOp op, OpAdaptor adaptor, + matchAndRewrite(cir::SetBitfieldOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint(op); @@ -3923,9 +3869,9 @@ class CIRSetBitfieldLowering unsigned storageSize = 0; - if (auto arTy = mlir::dyn_cast(storageType)) + if (auto arTy = mlir::dyn_cast(storageType)) storageSize = arTy.getSize() * 8; - else if (auto intTy = mlir::dyn_cast(storageType)) + else if (auto intTy = mlir::dyn_cast(storageType)) storageSize = intTy.getWidth(); else llvm_unreachable( @@ -3981,12 +3927,12 @@ class CIRSetBitfieldLowering }; class CIRGetBitfieldLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::GetBitfieldOp op, OpAdaptor adaptor, + matchAndRewrite(cir::GetBitfieldOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::OpBuilder::InsertionGuard guard(rewriter); @@ -3999,9 +3945,9 @@ class CIRGetBitfieldLowering auto context = storageType.getContext(); unsigned storageSize = 0; - if (auto arTy = mlir::dyn_cast(storageType)) + if (auto arTy = mlir::dyn_cast(storageType)) storageSize = arTy.getSize() * 8; - else if (auto intTy = mlir::dyn_cast(storageType)) + else if (auto intTy = mlir::dyn_cast(storageType)) storageSize = intTy.getWidth(); else llvm_unreachable( @@ -4036,12 +3982,12 @@ class CIRGetBitfieldLowering }; class CIRIsConstantOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::IsConstantOp op, OpAdaptor adaptor, + matchAndRewrite(cir::IsConstantOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // FIXME(cir): llvm.intr.is.constant returns i1 value but the LLVM Lowering // expects that cir.bool type will be lowered as i8 type. @@ -4055,13 +4001,12 @@ class CIRIsConstantOpLowering }; class CIRCmpThreeWayOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern< - mlir::cir::CmpThreeWayOp>::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CmpThreeWayOp op, OpAdaptor adaptor, + matchAndRewrite(cir::CmpThreeWayOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { if (!op.isIntegralComparison() || !op.isStrongOrdering()) { op.emitError() << "unsupported three-way comparison type"; @@ -4072,7 +4017,7 @@ class CIRCmpThreeWayOpLowering assert(cmpInfo.getLt() == -1 && cmpInfo.getEq() == 0 && cmpInfo.getGt() == 1); - auto operandTy = mlir::cast(op.getLhs().getType()); + auto operandTy = mlir::cast(op.getLhs().getType()); auto resultTy = op.getType(); auto llvmIntrinsicName = getLLVMIntrinsicName( operandTy.isSigned(), operandTy.getWidth(), resultTy.getWidth()); @@ -4117,12 +4062,12 @@ class CIRCmpThreeWayOpLowering }; class CIRReturnAddrOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ReturnAddrOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ReturnAddrOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm.returnaddress", @@ -4132,12 +4077,12 @@ class CIRReturnAddrOpLowering }; class CIRClearCacheOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ClearCacheOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ClearCacheOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto begin = adaptor.getBegin(); auto end = adaptor.getEnd(); @@ -4151,12 +4096,12 @@ class CIRClearCacheOpLowering }; class CIREhTypeIdOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::EhTypeIdOp op, OpAdaptor adaptor, + matchAndRewrite(cir::EhTypeIdOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Value addrOp = rewriter.create( op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), @@ -4170,12 +4115,12 @@ class CIREhTypeIdOpLowering }; class CIRCatchParamOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CatchParamOp op, OpAdaptor adaptor, + matchAndRewrite(cir::CatchParamOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { if (op.isBegin()) { // Get or create `declare ptr @__cxa_begin_catch(ptr)` @@ -4204,13 +4149,12 @@ class CIRCatchParamOpLowering } }; -class CIRResumeOpLowering - : public mlir::OpConversionPattern { +class CIRResumeOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ResumeOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ResumeOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // %lpad.val = insertvalue { ptr, i32 } poison, ptr %exception_ptr, 0 // %lpad.val2 = insertvalue { ptr, i32 } %lpad.val, i32 %selector, 1 @@ -4232,12 +4176,12 @@ class CIRResumeOpLowering }; class CIRAllocExceptionOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::AllocExceptionOp op, OpAdaptor adaptor, + matchAndRewrite(cir::AllocExceptionOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // Get or create `declare ptr @__cxa_allocate_exception(i64)` StringRef fnName = "__cxa_allocate_exception"; @@ -4255,12 +4199,12 @@ class CIRAllocExceptionOpLowering }; class CIRFreeExceptionOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::FreeExceptionOp op, OpAdaptor adaptor, + matchAndRewrite(cir::FreeExceptionOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // Get or create `declare void @__cxa_free_exception(ptr)` StringRef fnName = "__cxa_free_exception"; @@ -4275,13 +4219,12 @@ class CIRFreeExceptionOpLowering } }; -class CIRThrowOpLowering - : public mlir::OpConversionPattern { +class CIRThrowOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ThrowOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ThrowOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // Get or create `declare void @__cxa_throw(ptr, ptr, ptr)` StringRef fnName = "__cxa_throw"; @@ -4312,12 +4255,12 @@ class CIRThrowOpLowering }; class CIRIsFPClassOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::IsFPClassOp op, OpAdaptor adaptor, + matchAndRewrite(cir::IsFPClassOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto src = adaptor.getSrc(); auto flags = adaptor.getFlags(); @@ -4338,12 +4281,12 @@ class CIRIsFPClassOpLowering } }; -class CIRAbsOpLowering : public mlir::OpConversionPattern { +class CIRAbsOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::AbsOp op, OpAdaptor adaptor, + matchAndRewrite(cir::AbsOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto resTy = this->getTypeConverter()->convertType(op.getType()); auto absOp = rewriter.create( @@ -4409,85 +4352,84 @@ void populateCIRToLLVMConversionPatterns( namespace { -std::unique_ptr -prepareLowerModule(mlir::ModuleOp module) { +std::unique_ptr prepareLowerModule(mlir::ModuleOp module) { mlir::PatternRewriter rewriter{module->getContext()}; // If the triple is not present, e.g. CIR modules parsed from text, we // cannot init LowerModule properly. - assert(!::cir::MissingFeatures::makeTripleAlwaysPresent()); + assert(!cir::MissingFeatures::makeTripleAlwaysPresent()); if (!module->hasAttr("cir.triple")) return {}; - return mlir::cir::createLowerModule(module, rewriter); + return cir::createLowerModule(module, rewriter); } // FIXME: change the type of lowerModule to `LowerModule &` to have better // lambda capturing experience. Also blocked by makeTripleAlwaysPresent. void prepareTypeConverter(mlir::LLVMTypeConverter &converter, mlir::DataLayout &dataLayout, - mlir::cir::LowerModule *lowerModule) { - converter.addConversion([&, lowerModule]( - mlir::cir::PointerType type) -> mlir::Type { - // Drop pointee type since LLVM dialect only allows opaque pointers. - - auto addrSpace = - mlir::cast_if_present(type.getAddrSpace()); - // Null addrspace attribute indicates the default addrspace. - if (!addrSpace) - return mlir::LLVM::LLVMPointerType::get(type.getContext()); - - assert(lowerModule && "CIR AS map is not available"); - // Pass through target addrspace and map CIR addrspace to LLVM addrspace by - // querying the target info. - unsigned targetAS = - addrSpace.isTarget() - ? addrSpace.getTargetValue() - : lowerModule->getTargetLoweringInfo() - .getTargetAddrSpaceFromCIRAddrSpace(addrSpace); - - return mlir::LLVM::LLVMPointerType::get(type.getContext(), targetAS); - }); - converter.addConversion([&](mlir::cir::DataMemberType type) -> mlir::Type { + cir::LowerModule *lowerModule) { + converter.addConversion( + [&, lowerModule](cir::PointerType type) -> mlir::Type { + // Drop pointee type since LLVM dialect only allows opaque pointers. + + auto addrSpace = + mlir::cast_if_present(type.getAddrSpace()); + // Null addrspace attribute indicates the default addrspace. + if (!addrSpace) + return mlir::LLVM::LLVMPointerType::get(type.getContext()); + + assert(lowerModule && "CIR AS map is not available"); + // Pass through target addrspace and map CIR addrspace to LLVM addrspace + // by querying the target info. + unsigned targetAS = + addrSpace.isTarget() + ? addrSpace.getTargetValue() + : lowerModule->getTargetLoweringInfo() + .getTargetAddrSpaceFromCIRAddrSpace(addrSpace); + + return mlir::LLVM::LLVMPointerType::get(type.getContext(), targetAS); + }); + converter.addConversion([&](cir::DataMemberType type) -> mlir::Type { return mlir::IntegerType::get(type.getContext(), dataLayout.getTypeSizeInBits(type)); }); - converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { + converter.addConversion([&](cir::ArrayType type) -> mlir::Type { auto ty = converter.convertType(type.getEltType()); return mlir::LLVM::LLVMArrayType::get(ty, type.getSize()); }); - converter.addConversion([&](mlir::cir::VectorType type) -> mlir::Type { + converter.addConversion([&](cir::VectorType type) -> mlir::Type { auto ty = converter.convertType(type.getEltType()); return mlir::LLVM::getFixedVectorType(ty, type.getSize()); }); - converter.addConversion([&](mlir::cir::BoolType type) -> mlir::Type { + converter.addConversion([&](cir::BoolType type) -> mlir::Type { return mlir::IntegerType::get(type.getContext(), 8, mlir::IntegerType::Signless); }); - converter.addConversion([&](mlir::cir::IntType type) -> mlir::Type { + converter.addConversion([&](cir::IntType type) -> mlir::Type { // LLVM doesn't work with signed types, so we drop the CIR signs here. return mlir::IntegerType::get(type.getContext(), type.getWidth()); }); - converter.addConversion([&](mlir::cir::SingleType type) -> mlir::Type { + converter.addConversion([&](cir::SingleType type) -> mlir::Type { return mlir::Float32Type::get(type.getContext()); }); - converter.addConversion([&](mlir::cir::DoubleType type) -> mlir::Type { + converter.addConversion([&](cir::DoubleType type) -> mlir::Type { return mlir::Float64Type::get(type.getContext()); }); - converter.addConversion([&](mlir::cir::FP80Type type) -> mlir::Type { + converter.addConversion([&](cir::FP80Type type) -> mlir::Type { return mlir::Float80Type::get(type.getContext()); }); - converter.addConversion([&](mlir::cir::FP128Type type) -> mlir::Type { + converter.addConversion([&](cir::FP128Type type) -> mlir::Type { return mlir::Float128Type::get(type.getContext()); }); - converter.addConversion([&](mlir::cir::LongDoubleType type) -> mlir::Type { + converter.addConversion([&](cir::LongDoubleType type) -> mlir::Type { return converter.convertType(type.getUnderlying()); }); - converter.addConversion([&](mlir::cir::FP16Type type) -> mlir::Type { + converter.addConversion([&](cir::FP16Type type) -> mlir::Type { return mlir::Float16Type::get(type.getContext()); }); - converter.addConversion([&](mlir::cir::BF16Type type) -> mlir::Type { - return mlir::Float16Type::get(type.getContext()); + converter.addConversion([&](cir::BF16Type type) -> mlir::Type { + return mlir::BFloat16Type::get(type.getContext()); }); - converter.addConversion([&](mlir::cir::ComplexType type) -> mlir::Type { + converter.addConversion([&](cir::ComplexType type) -> mlir::Type { // A complex type is lowered to an LLVM struct that contains the real and // imaginary part as data fields. mlir::Type elementTy = converter.convertType(type.getElementTy()); @@ -4495,7 +4437,7 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, return mlir::LLVM::LLVMStructType::getLiteral(type.getContext(), structFields); }); - converter.addConversion([&](mlir::cir::FuncType type) -> mlir::Type { + converter.addConversion([&](cir::FuncType type) -> mlir::Type { auto result = converter.convertType(type.getReturnType()); llvm::SmallVector arguments; if (converter.convertTypes(type.getInputs(), arguments).failed()) @@ -4503,19 +4445,19 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, auto varArg = type.isVarArg(); return mlir::LLVM::LLVMFunctionType::get(result, arguments, varArg); }); - converter.addConversion([&](mlir::cir::StructType type) -> mlir::Type { + converter.addConversion([&](cir::StructType type) -> mlir::Type { // FIXME(cir): create separate unions, struct, and classes types. // Convert struct members. llvm::SmallVector llvmMembers; switch (type.getKind()) { - case mlir::cir::StructType::Class: + case cir::StructType::Class: // TODO(cir): This should be properly validated. - case mlir::cir::StructType::Struct: + case cir::StructType::Struct: for (auto ty : type.getMembers()) llvmMembers.push_back(converter.convertType(ty)); break; // Unions are lowered as only the largest member. - case mlir::cir::StructType::Union: { + case cir::StructType::Union: { auto largestMember = type.getLargestMember(dataLayout); if (largestMember) llvmMembers.push_back(converter.convertType(largestMember)); @@ -4538,7 +4480,7 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, return llvmStruct; }); - converter.addConversion([&](mlir::cir::VoidType type) -> mlir::Type { + converter.addConversion([&](cir::VoidType type) -> mlir::Type { return mlir::LLVM::LLVMVoidType::get(type.getContext()); }); } @@ -4677,7 +4619,7 @@ void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar( if (!attr) return; if (auto globalAnnotValues = - mlir::dyn_cast(attr)) { + mlir::dyn_cast(attr)) { auto annotationValuesArray = mlir::dyn_cast(globalAnnotValues.getAnnotations()); if (!annotationValuesArray || annotationValuesArray.empty()) @@ -4734,8 +4676,8 @@ void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar( moduleLoc, annoPtrTy, globalValueName); vals.push_back(globalValueFld->getResult(0)); - mlir::cir::AnnotationAttr annot = - mlir::cast(annotValue[1]); + cir::AnnotationAttr annot = + mlir::cast(annotValue[1]); lowerAnnotationValue(moduleLoc, globalValue->getLoc(), annot, module, varInitBuilder, globalVarBuilder, stringGlobalsMap, argStringGlobalsMap, argsVarMap, vals); @@ -4757,8 +4699,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { auto module = getOperation(); mlir::DataLayout dataLayout(module); mlir::LLVMTypeConverter converter(&getContext()); - std::unique_ptr lowerModule = - prepareLowerModule(module); + std::unique_ptr lowerModule = prepareLowerModule(module); prepareTypeConverter(converter, dataLayout, lowerModule.get()); mlir::RewritePatternSet patterns(&getContext()); @@ -4778,7 +4719,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { mlir::populateFuncToLLVMConversionPatterns(converter, patterns); mlir::ConversionTarget target(getContext()); - using namespace mlir::cir; + using namespace cir; // clang-format off target.addLegalOp(); // clang-format on target.addLegalDialect(); - target.addIllegalDialect(); // Allow operations that will be lowered directly to LLVM IR. @@ -4813,23 +4754,23 @@ void ConvertCIRToLLVMPass::runOnOperation() { signalPassFailure(); // Emit the llvm.global_ctors array. - buildCtorDtorList( - module, "cir.global_ctors", "llvm.global_ctors", - [](mlir::Attribute attr) { - assert(mlir::isa(attr) && - "must be a GlobalCtorAttr"); - auto ctorAttr = mlir::cast(attr); - return std::make_pair(ctorAttr.getName(), ctorAttr.getPriority()); - }); + buildCtorDtorList(module, "cir.global_ctors", "llvm.global_ctors", + [](mlir::Attribute attr) { + assert(mlir::isa(attr) && + "must be a GlobalCtorAttr"); + auto ctorAttr = mlir::cast(attr); + return std::make_pair(ctorAttr.getName(), + ctorAttr.getPriority()); + }); // Emit the llvm.global_dtors array. - buildCtorDtorList( - module, "cir.global_dtors", "llvm.global_dtors", - [](mlir::Attribute attr) { - assert(mlir::isa(attr) && - "must be a GlobalDtorAttr"); - auto dtorAttr = mlir::cast(attr); - return std::make_pair(dtorAttr.getName(), dtorAttr.getPriority()); - }); + buildCtorDtorList(module, "cir.global_dtors", "llvm.global_dtors", + [](mlir::Attribute attr) { + assert(mlir::isa(attr) && + "must be a GlobalDtorAttr"); + auto dtorAttr = mlir::cast(attr); + return std::make_pair(dtorAttr.getName(), + dtorAttr.getPriority()); + }); buildGlobalAnnotationsVar(stringGlobalsMap, argStringGlobalsMap, argsVarMap); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index 7b520ab2d72e..ca0b498f9f2f 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -68,8 +68,8 @@ class CIRDialectLLVMIRTranslationInterface llvm::Module *llvmModule = moduleTranslation.getLLVMModule(); llvm::LLVMContext &llvmContext = llvmModule->getContext(); - if (auto openclVersionAttr = mlir::dyn_cast( - attribute.getValue())) { + if (auto openclVersionAttr = + mlir::dyn_cast(attribute.getValue())) { auto *int32Ty = llvm::IntegerType::get(llvmContext, 32); llvm::Metadata *oclVerElts[] = { llvm::ConstantAsMetadata::get( @@ -92,11 +92,11 @@ class CIRDialectLLVMIRTranslationInterface mlir::LLVM::ModuleTranslation &moduleTranslation) const { llvm::Function *llvmFunc = moduleTranslation.lookupFunction(func.getName()); llvm::LLVMContext &llvmCtx = moduleTranslation.getLLVMContext(); - if (auto extraAttr = mlir::dyn_cast( + if (auto extraAttr = mlir::dyn_cast( attribute.getValue())) { for (auto attr : extraAttr.getElements()) { if (auto inlineAttr = - mlir::dyn_cast(attr.getValue())) { + mlir::dyn_cast(attr.getValue())) { if (inlineAttr.isNoInline()) llvmFunc->addFnAttr(llvm::Attribute::NoInline); else if (inlineAttr.isAlwaysInline()) @@ -105,28 +105,27 @@ class CIRDialectLLVMIRTranslationInterface llvmFunc->addFnAttr(llvm::Attribute::InlineHint); else llvm_unreachable("Unknown inline kind"); - } else if (mlir::dyn_cast(attr.getValue())) { + } else if (mlir::dyn_cast(attr.getValue())) { llvmFunc->addFnAttr(llvm::Attribute::OptimizeNone); - } else if (mlir::dyn_cast(attr.getValue())) { + } else if (mlir::dyn_cast(attr.getValue())) { llvmFunc->addFnAttr(llvm::Attribute::NoUnwind); - } else if (mlir::dyn_cast(attr.getValue())) { + } else if (mlir::dyn_cast(attr.getValue())) { llvmFunc->addFnAttr(llvm::Attribute::Convergent); - } else if (mlir::dyn_cast( - attr.getValue())) { + } else if (mlir::dyn_cast(attr.getValue())) { const auto uniformAttrName = - mlir::cir::OpenCLKernelUniformWorkGroupSizeAttr::getMnemonic(); + cir::OpenCLKernelUniformWorkGroupSizeAttr::getMnemonic(); const bool isUniform = extraAttr.getElements().getNamed(uniformAttrName).has_value(); auto attrs = llvmFunc->getAttributes().addFnAttribute( llvmCtx, "uniform-work-group-size", isUniform ? "true" : "false"); llvmFunc->setAttributes(attrs); } else if (auto clKernelMetadata = - mlir::dyn_cast( + mlir::dyn_cast( attr.getValue())) { emitOpenCLKernelMetadata(clKernelMetadata, llvmFunc, moduleTranslation); } else if (auto clArgMetadata = - mlir::dyn_cast( + mlir::dyn_cast( attr.getValue())) { emitOpenCLKernelArgMetadata(clArgMetadata, func.getNumArguments(), llvmFunc, moduleTranslation); @@ -139,8 +138,7 @@ class CIRDialectLLVMIRTranslationInterface } void emitOpenCLKernelMetadata( - mlir::cir::OpenCLKernelMetadataAttr clKernelMetadata, - llvm::Function *llvmFunc, + cir::OpenCLKernelMetadataAttr clKernelMetadata, llvm::Function *llvmFunc, mlir::LLVM::ModuleTranslation &moduleTranslation) const { auto &vmCtx = moduleTranslation.getLLVMContext(); @@ -192,7 +190,7 @@ class CIRDialectLLVMIRTranslationInterface } void emitOpenCLKernelArgMetadata( - mlir::cir::OpenCLKernelArgMetadataAttr clArgMetadata, unsigned numArgs, + cir::OpenCLKernelArgMetadataAttr clArgMetadata, unsigned numArgs, llvm::Function *llvmFunc, mlir::LLVM::ModuleTranslation &moduleTranslation) const { auto &vmCtx = moduleTranslation.getLLVMContext(); @@ -266,11 +264,10 @@ class CIRDialectLLVMIRTranslationInterface }; void registerCIRDialectTranslation(mlir::DialectRegistry ®istry) { - registry.insert(); - registry.addExtension( - +[](mlir::MLIRContext *ctx, mlir::cir::CIRDialect *dialect) { - dialect->addInterfaces(); - }); + registry.insert(); + registry.addExtension(+[](mlir::MLIRContext *ctx, cir::CIRDialect *dialect) { + dialect->addInterfaces(); + }); } void registerCIRDialectTranslation(mlir::MLIRContext &context) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h b/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h index 46de5dfc7634..0bf68b47189c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LoweringHelpers.h @@ -13,12 +13,10 @@ #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" -using namespace llvm; - mlir::Value createIntCast(mlir::OpBuilder &bld, mlir::Value src, mlir::IntegerType dstTy, bool isSigned = false) { auto srcTy = src.getType(); - assert(isa(srcTy)); + assert(mlir::isa(srcTy)); auto srcWidth = mlir::cast(srcTy).getWidth(); auto dstWidth = mlir::cast(dstTy).getWidth(); diff --git a/clang/lib/CIR/Lowering/LoweringHelpers.cpp b/clang/lib/CIR/Lowering/LoweringHelpers.cpp index 98d5158ea716..26c2945105cc 100644 --- a/clang/lib/CIR/Lowering/LoweringHelpers.cpp +++ b/clang/lib/CIR/Lowering/LoweringHelpers.cpp @@ -12,14 +12,14 @@ #include "clang/CIR/LoweringHelpers.h" mlir::DenseElementsAttr -convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, +convertStringAttrToDenseElementsAttr(cir::ConstArrayAttr attr, mlir::Type type) { auto values = llvm::SmallVector{}; auto stringAttr = mlir::dyn_cast(attr.getElts()); assert(stringAttr && "expected string attribute here"); for (auto element : stringAttr) values.push_back({8, (uint64_t)element}); - auto arrayTy = mlir::dyn_cast(attr.getType()); + auto arrayTy = mlir::dyn_cast(attr.getType()); assert(arrayTy && "String attribute must have an array type"); if (arrayTy.getSize() != stringAttr.size()) llvm_unreachable("array type of the length not equal to that of the string " @@ -30,17 +30,17 @@ convertStringAttrToDenseElementsAttr(mlir::cir::ConstArrayAttr attr, } template <> mlir::APInt getZeroInitFromType(mlir::Type Ty) { - assert(mlir::isa(Ty) && "expected int type"); - auto IntTy = mlir::cast(Ty); + assert(mlir::isa(Ty) && "expected int type"); + auto IntTy = mlir::cast(Ty); return mlir::APInt::getZero(IntTy.getWidth()); } template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty) { - assert((mlir::isa(Ty)) && + assert((mlir::isa(Ty)) && "only float and double supported"); - if (Ty.isF32() || mlir::isa(Ty)) + if (Ty.isF32() || mlir::isa(Ty)) return mlir::APFloat(0.f); - if (Ty.isF64() || mlir::isa(Ty)) + if (Ty.isF64() || mlir::isa(Ty)) return mlir::APFloat(0.0); llvm_unreachable("NYI"); } @@ -52,13 +52,13 @@ template <> mlir::APFloat getZeroInitFromType(mlir::Type Ty) { /// \param currentIndex the current index in the values array template void convertToDenseElementsAttrImpl( - mlir::cir::ConstArrayAttr attr, llvm::SmallVectorImpl &values, + cir::ConstArrayAttr attr, llvm::SmallVectorImpl &values, const llvm::SmallVectorImpl ¤tDims, int64_t dimIndex, int64_t currentIndex) { if (auto stringAttr = mlir::dyn_cast(attr.getElts())) { - if (auto arrayType = mlir::dyn_cast(attr.getType())) { + if (auto arrayType = mlir::dyn_cast(attr.getType())) { for (auto element : stringAttr) { - auto intAttr = mlir::cir::IntAttr::get(arrayType.getEltType(), element); + auto intAttr = cir::IntAttr::get(arrayType.getEltType(), element); values[currentIndex++] = mlir::dyn_cast(intAttr).getValue(); } return; @@ -77,15 +77,14 @@ void convertToDenseElementsAttrImpl( continue; } - if (auto subArrayAttr = - mlir::dyn_cast(eltAttr)) { + if (auto subArrayAttr = mlir::dyn_cast(eltAttr)) { convertToDenseElementsAttrImpl(subArrayAttr, values, currentDims, dimIndex, currentIndex); currentIndex += elementsSizeInCurrentDim; continue; } - if (mlir::isa(eltAttr)) { + if (mlir::isa(eltAttr)) { currentIndex += elementsSizeInCurrentDim; continue; } @@ -96,7 +95,7 @@ void convertToDenseElementsAttrImpl( template mlir::DenseElementsAttr convertToDenseElementsAttr( - mlir::cir::ConstArrayAttr attr, const llvm::SmallVectorImpl &dims, + cir::ConstArrayAttr attr, const llvm::SmallVectorImpl &dims, mlir::Type elementType, mlir::Type convertedElementType) { unsigned vector_size = 1; for (auto dim : dims) @@ -111,7 +110,7 @@ mlir::DenseElementsAttr convertToDenseElementsAttr( } std::optional -lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, +lowerConstArrayAttr(cir::ConstArrayAttr constArr, const mlir::TypeConverter *converter) { // Ensure ConstArrayAttr has a type. @@ -119,14 +118,13 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, assert(typedConstArr && "cir::ConstArrayAttr is not a mlir::TypedAttr"); // Ensure ConstArrayAttr type is a ArrayType. - auto cirArrayType = - mlir::dyn_cast(typedConstArr.getType()); + auto cirArrayType = mlir::dyn_cast(typedConstArr.getType()); assert(cirArrayType && "cir::ConstArrayAttr is not a cir::ArrayType"); // Is a ConstArrayAttr with an cir::ArrayType: fetch element type. mlir::Type type = cirArrayType; auto dims = llvm::SmallVector{}; - while (auto arrayType = mlir::dyn_cast(type)) { + while (auto arrayType = mlir::dyn_cast(type)) { dims.push_back(arrayType.getSize()); type = arrayType.getEltType(); } @@ -134,11 +132,11 @@ lowerConstArrayAttr(mlir::cir::ConstArrayAttr constArr, if (mlir::isa(constArr.getElts())) return convertStringAttrToDenseElementsAttr(constArr, converter->convertType(type)); - if (mlir::isa(type)) - return convertToDenseElementsAttr( + if (mlir::isa(type)) + return convertToDenseElementsAttr( constArr, dims, type, converter->convertType(type)); - if (mlir::isa(type)) - return convertToDenseElementsAttr( + if (mlir::isa(type)) + return convertToDenseElementsAttr( constArr, dims, type, converter->convertType(type)); return std::nullopt; diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp index 1b6eba94c5ea..16252e1058cd 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp @@ -36,7 +36,7 @@ namespace cir { class SCFLoop { public: - SCFLoop(mlir::cir::ForOp op, mlir::ConversionPatternRewriter *rewriter) + SCFLoop(cir::ForOp op, mlir::ConversionPatternRewriter *rewriter) : forOp(op), rewriter(rewriter) {} int64_t getStep() { return step; } @@ -44,7 +44,7 @@ class SCFLoop { mlir::Value getUpperBound() { return upperBound; } int64_t findStepAndIV(mlir::Value &addr); - mlir::cir::CmpOp findCmpOp(); + cir::CmpOp findCmpOp(); mlir::Value findIVInitValue(); void analysis(); @@ -52,8 +52,8 @@ class SCFLoop { void transferToSCFForOp(); private: - mlir::cir::ForOp forOp; - mlir::cir::CmpOp cmpOp; + cir::ForOp forOp; + cir::CmpOp cmpOp; mlir::Value IVAddr, lowerBound = nullptr, upperBound = nullptr; mlir::ConversionPatternRewriter *rewriter; int64_t step = 0; @@ -61,33 +61,33 @@ class SCFLoop { class SCFWhileLoop { public: - SCFWhileLoop(mlir::cir::WhileOp op, mlir::cir::WhileOp::Adaptor adaptor, + SCFWhileLoop(cir::WhileOp op, cir::WhileOp::Adaptor adaptor, mlir::ConversionPatternRewriter *rewriter) : whileOp(op), adaptor(adaptor), rewriter(rewriter) {} void transferToSCFWhileOp(); private: - mlir::cir::WhileOp whileOp; - mlir::cir::WhileOp::Adaptor adaptor; + cir::WhileOp whileOp; + cir::WhileOp::Adaptor adaptor; mlir::ConversionPatternRewriter *rewriter; }; class SCFDoLoop { public: - SCFDoLoop(mlir::cir::DoWhileOp op, mlir::cir::DoWhileOp::Adaptor adaptor, + SCFDoLoop(cir::DoWhileOp op, cir::DoWhileOp::Adaptor adaptor, mlir::ConversionPatternRewriter *rewriter) : DoOp(op), adaptor(adaptor), rewriter(rewriter) {} void transferToSCFWhileOp(); private: - mlir::cir::DoWhileOp DoOp; - mlir::cir::DoWhileOp::Adaptor adaptor; + cir::DoWhileOp DoOp; + cir::DoWhileOp::Adaptor adaptor; mlir::ConversionPatternRewriter *rewriter; }; -static int64_t getConstant(mlir::cir::ConstantOp op) { +static int64_t getConstant(cir::ConstantOp op) { auto attr = op->getAttrs().front().getValue(); - const auto IntAttr = mlir::dyn_cast(attr); + const auto IntAttr = mlir::dyn_cast(attr); return IntAttr.getValue().getSExtValue(); } @@ -100,28 +100,28 @@ int64_t SCFLoop::findStepAndIV(mlir::Value &addr) { mlir::Value IV = nullptr; // Try to match "IV load addr; ++IV; store IV, addr" to find step. for (mlir::Operation &op : *stepBlock) - if (auto loadOp = dyn_cast(op)) { + if (auto loadOp = dyn_cast(op)) { addr = loadOp.getAddr(); IV = loadOp.getResult(); - } else if (auto cop = dyn_cast(op)) { + } else if (auto cop = dyn_cast(op)) { if (step) llvm_unreachable( "Not support multiple constant in step calculation yet"); step = getConstant(cop); - } else if (auto bop = dyn_cast(op)) { + } else if (auto bop = dyn_cast(op)) { if (bop.getLhs() != IV) llvm_unreachable("Find BinOp not operate on IV"); - if (bop.getKind() != mlir::cir::BinOpKind::Add) + if (bop.getKind() != cir::BinOpKind::Add) llvm_unreachable( "Not support BinOp other than Add in step calculation yet"); - } else if (auto uop = dyn_cast(op)) { + } else if (auto uop = dyn_cast(op)) { if (uop.getInput() != IV) llvm_unreachable("Find UnaryOp not operate on IV"); - if (uop.getKind() == mlir::cir::UnaryOpKind::Inc) + if (uop.getKind() == cir::UnaryOpKind::Inc) step = 1; - else if (uop.getKind() == mlir::cir::UnaryOpKind::Dec) + else if (uop.getKind() == cir::UnaryOpKind::Dec) llvm_unreachable("Not support decrement step yet"); - } else if (auto storeOp = dyn_cast(op)) { + } else if (auto storeOp = dyn_cast(op)) { assert(storeOp.getAddr() == addr && "Can't find IV when lowering ForOp"); } assert(step && "Can't find step when lowering ForOp"); @@ -132,7 +132,7 @@ int64_t SCFLoop::findStepAndIV(mlir::Value &addr) { static bool isIVLoad(mlir::Operation *op, mlir::Value IVAddr) { if (!op) return false; - if (isa(op)) { + if (isa(op)) { if (!op->getOperand(0)) return false; if (op->getOperand(0) == IVAddr) @@ -141,15 +141,15 @@ static bool isIVLoad(mlir::Operation *op, mlir::Value IVAddr) { return false; } -mlir::cir::CmpOp SCFLoop::findCmpOp() { +cir::CmpOp SCFLoop::findCmpOp() { cmpOp = nullptr; for (auto *user : IVAddr.getUsers()) { if (user->getParentRegion() != &forOp.getCond()) continue; - if (auto loadOp = dyn_cast(*user)) { + if (auto loadOp = dyn_cast(*user)) { if (!loadOp->hasOneUse()) continue; - if (auto op = dyn_cast(*loadOp->user_begin())) { + if (auto op = dyn_cast(*loadOp->user_begin())) { cmpOp = op; break; } @@ -159,7 +159,7 @@ mlir::cir::CmpOp SCFLoop::findCmpOp() { llvm_unreachable("Can't find loop CmpOp"); auto type = cmpOp.getLhs().getType(); - if (!mlir::isa(type)) + if (!mlir::isa(type)) llvm_unreachable("Non-integer type IV is not supported"); auto lhsDefOp = cmpOp.getLhs().getDefiningOp(); @@ -168,8 +168,8 @@ mlir::cir::CmpOp SCFLoop::findCmpOp() { if (!isIVLoad(lhsDefOp, IVAddr)) llvm_unreachable("cmpOp LHS is not IV"); - if (cmpOp.getKind() != mlir::cir::CmpOpKind::le && - cmpOp.getKind() != mlir::cir::CmpOpKind::lt) + if (cmpOp.getKind() != cir::CmpOpKind::le && + cmpOp.getKind() != cir::CmpOpKind::lt) llvm_unreachable("Not support lowering other than le or lt comparison"); return cmpOp; @@ -211,9 +211,9 @@ void SCFLoop::analysis() { if (step > 0) { lowerBound = IVInit; - if (cmpOp.getKind() == mlir::cir::CmpOpKind::lt) + if (cmpOp.getKind() == cir::CmpOpKind::lt) upperBound = IVEndBound; - else if (cmpOp.getKind() == mlir::cir::CmpOpKind::le) + else if (cmpOp.getKind() == cir::CmpOpKind::le) upperBound = plusConstant(IVEndBound, cmpOp.getLoc(), 1); } assert(lowerBound && "can't find loop lower bound"); @@ -233,8 +233,7 @@ void SCFLoop::transferToSCFForOp() { rewriter->inlineBlockBefore(&forOp.getBody().front(), scfForOp.getBody(), scfForOp.getBody()->end(), bbArg); scfForOp->walk([&](mlir::Operation *op) { - if (isa(op) || isa(op) || - isa(op)) + if (isa(op) || isa(op) || isa(op)) llvm_unreachable( "Not support lowering loop with break, continue or if yet"); // Replace the IV usage to scf loop induction variable. @@ -285,12 +284,12 @@ void SCFDoLoop::transferToSCFWhileOp() { afterBuilder); } -class CIRForOpLowering : public mlir::OpConversionPattern { +class CIRForOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ForOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ForOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { SCFLoop loop(op, &rewriter); loop.analysis(); @@ -300,13 +299,12 @@ class CIRForOpLowering : public mlir::OpConversionPattern { } }; -class CIRWhileOpLowering - : public mlir::OpConversionPattern { +class CIRWhileOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::WhileOp op, OpAdaptor adaptor, + matchAndRewrite(cir::WhileOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { SCFWhileLoop loop(op, adaptor, &rewriter); loop.transferToSCFWhileOp(); @@ -315,12 +313,12 @@ class CIRWhileOpLowering } }; -class CIRDoOpLowering : public mlir::OpConversionPattern { +class CIRDoOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::DoWhileOp op, OpAdaptor adaptor, + matchAndRewrite(cir::DoWhileOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { SCFDoLoop loop(op, adaptor, &rewriter); loop.transferToSCFWhileOp(); @@ -330,11 +328,11 @@ class CIRDoOpLowering : public mlir::OpConversionPattern { }; class CIRConditionOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ConditionOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ConditionOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto *parentOp = op->getParentOp(); return llvm::TypeSwitch(parentOp) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 7238edbbc407..4dca01b70043 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -60,13 +60,12 @@ using namespace llvm; namespace cir { -class CIRReturnLowering - : public mlir::OpConversionPattern { +class CIRReturnLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ReturnOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ReturnOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getOperands()); @@ -89,12 +88,12 @@ struct ConvertCIRToMLIRPass virtual StringRef getArgument() const override { return "cir-to-mlir"; } }; -class CIRCallOpLowering : public mlir::OpConversionPattern { +class CIRCallOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CallOp op, OpAdaptor adaptor, + matchAndRewrite(cir::CallOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { SmallVector types; if (mlir::failed( @@ -106,13 +105,12 @@ class CIRCallOpLowering : public mlir::OpConversionPattern { } }; -class CIRAllocaOpLowering - : public mlir::OpConversionPattern { +class CIRAllocaOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::AllocaOp op, OpAdaptor adaptor, + matchAndRewrite(cir::AllocaOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto type = adaptor.getAllocaType(); auto mlirType = getTypeConverter()->convertType(type); @@ -166,12 +164,12 @@ static void eraseIfSafe(mlir::Value oldAddr, mlir::Value newAddr, } } -class CIRLoadOpLowering : public mlir::OpConversionPattern { +class CIRLoadOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::LoadOp op, OpAdaptor adaptor, + matchAndRewrite(cir::LoadOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Value base; SmallVector indices; @@ -186,13 +184,12 @@ class CIRLoadOpLowering : public mlir::OpConversionPattern { } }; -class CIRStoreOpLowering - : public mlir::OpConversionPattern { +class CIRStoreOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::StoreOp op, OpAdaptor adaptor, + matchAndRewrite(cir::StoreOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { mlir::Value base; SmallVector indices; @@ -209,150 +206,145 @@ class CIRStoreOpLowering } }; -class CIRCosOpLowering : public mlir::OpConversionPattern { +class CIRCosOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CosOp op, OpAdaptor adaptor, + matchAndRewrite(cir::CosOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRSqrtOpLowering : public mlir::OpConversionPattern { +class CIRSqrtOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::SqrtOp op, OpAdaptor adaptor, + matchAndRewrite(cir::SqrtOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRFAbsOpLowering : public mlir::OpConversionPattern { +class CIRFAbsOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::FAbsOp op, OpAdaptor adaptor, + matchAndRewrite(cir::FAbsOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRAbsOpLowering : public mlir::OpConversionPattern { +class CIRAbsOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::AbsOp op, OpAdaptor adaptor, + matchAndRewrite(cir::AbsOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRFloorOpLowering - : public mlir::OpConversionPattern { +class CIRFloorOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::FloorOp op, OpAdaptor adaptor, + matchAndRewrite(cir::FloorOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRCeilOpLowering : public mlir::OpConversionPattern { +class CIRCeilOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CeilOp op, OpAdaptor adaptor, + matchAndRewrite(cir::CeilOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRLog10OpLowering - : public mlir::OpConversionPattern { +class CIRLog10OpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::Log10Op op, OpAdaptor adaptor, + matchAndRewrite(cir::Log10Op op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRLogOpLowering : public mlir::OpConversionPattern { +class CIRLogOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::LogOp op, OpAdaptor adaptor, + matchAndRewrite(cir::LogOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRLog2OpLowering : public mlir::OpConversionPattern { +class CIRLog2OpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::Log2Op op, OpAdaptor adaptor, + matchAndRewrite(cir::Log2Op op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRRoundOpLowering - : public mlir::OpConversionPattern { +class CIRRoundOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::RoundOp op, OpAdaptor adaptor, + matchAndRewrite(cir::RoundOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRExpOpLowering : public mlir::OpConversionPattern { +class CIRExpOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ExpOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ExpOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRShiftOpLowering - : public mlir::OpConversionPattern { +class CIRShiftOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ShiftOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ShiftOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto cirAmtTy = - mlir::dyn_cast(op.getAmount().getType()); - auto cirValTy = mlir::dyn_cast(op.getValue().getType()); + auto cirAmtTy = mlir::dyn_cast(op.getAmount().getType()); + auto cirValTy = mlir::dyn_cast(op.getValue().getType()); auto mlirTy = getTypeConverter()->convertType(op.getType()); mlir::Value amt = adaptor.getAmount(); mlir::Value val = adaptor.getValue(); @@ -378,24 +370,24 @@ class CIRShiftOpLowering } }; -class CIRExp2OpLowering : public mlir::OpConversionPattern { +class CIRExp2OpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::Exp2Op op, OpAdaptor adaptor, + matchAndRewrite(cir::Exp2Op op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); } }; -class CIRSinOpLowering : public mlir::OpConversionPattern { +class CIRSinOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::SinOp op, OpAdaptor adaptor, + matchAndRewrite(cir::SinOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.getSrc()); return mlir::LogicalResult::success(); @@ -422,19 +414,19 @@ class CIRBitOpLowering : public mlir::OpConversionPattern { }; using CIRBitClzOpLowering = - CIRBitOpLowering; + CIRBitOpLowering; using CIRBitCtzOpLowering = - CIRBitOpLowering; + CIRBitOpLowering; using CIRBitPopcountOpLowering = - CIRBitOpLowering; + CIRBitOpLowering; class CIRBitClrsbOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BitClrsbOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BitClrsbOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto inputTy = adaptor.getInput().getType(); auto zero = getConst(rewriter, op.getLoc(), inputTy, 0); @@ -465,13 +457,12 @@ class CIRBitClrsbOpLowering } }; -class CIRBitFfsOpLowering - : public mlir::OpConversionPattern { +class CIRBitFfsOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BitFfsOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BitFfsOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto resTy = getTypeConverter()->convertType(op.getType()); auto inputTy = adaptor.getInput().getType(); @@ -500,12 +491,12 @@ class CIRBitFfsOpLowering }; class CIRBitParityOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BitParityOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BitParityOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto resTy = getTypeConverter()->convertType(op.getType()); auto count = @@ -520,9 +511,9 @@ class CIRBitParityOpLowering }; class CIRConstantOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; private: // This code is in a separate function rather than part of matchAndRewrite @@ -536,7 +527,7 @@ class CIRConstantOpLowering "Can't lower a non-typed attribute"); auto mlirType = getTypeConverter()->convertType( mlir::cast(cirAttr).getType()); - if (auto vecAttr = mlir::dyn_cast(cirAttr)) { + if (auto vecAttr = mlir::dyn_cast(cirAttr)) { assert(mlir::isa(mlirType) && "MLIR type for CIR vector attribute is not mlir::VectorType"); assert(mlir::isa(mlirType) && @@ -548,11 +539,11 @@ class CIRConstantOpLowering } return mlir::DenseElementsAttr::get( mlir::cast(mlirType), mlirValues); - } else if (auto boolAttr = mlir::dyn_cast(cirAttr)) { + } else if (auto boolAttr = mlir::dyn_cast(cirAttr)) { return rewriter.getIntegerAttr(mlirType, boolAttr.getValue()); - } else if (auto floatAttr = mlir::dyn_cast(cirAttr)) { + } else if (auto floatAttr = mlir::dyn_cast(cirAttr)) { return rewriter.getFloatAttr(mlirType, floatAttr.getValue()); - } else if (auto intAttr = mlir::dyn_cast(cirAttr)) { + } else if (auto intAttr = mlir::dyn_cast(cirAttr)) { return rewriter.getIntegerAttr(mlirType, intAttr.getValue()); } else { llvm_unreachable("NYI: unsupported attribute kind lowering to MLIR"); @@ -562,7 +553,7 @@ class CIRConstantOpLowering public: mlir::LogicalResult - matchAndRewrite(mlir::cir::ConstantOp op, OpAdaptor adaptor, + matchAndRewrite(cir::ConstantOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp( op, getTypeConverter()->convertType(op.getType()), @@ -571,12 +562,12 @@ class CIRConstantOpLowering } }; -class CIRFuncOpLowering : public mlir::OpConversionPattern { +class CIRFuncOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::FuncOp op, OpAdaptor adaptor, + matchAndRewrite(cir::FuncOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto fnType = op.getFunctionType(); @@ -608,41 +599,40 @@ class CIRFuncOpLowering : public mlir::OpConversionPattern { } }; -class CIRUnaryOpLowering - : public mlir::OpConversionPattern { +class CIRUnaryOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::UnaryOp op, OpAdaptor adaptor, + matchAndRewrite(cir::UnaryOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto input = adaptor.getInput(); auto type = getTypeConverter()->convertType(op.getType()); switch (op.getKind()) { - case mlir::cir::UnaryOpKind::Inc: { + case cir::UnaryOpKind::Inc: { auto One = rewriter.create( op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); rewriter.replaceOpWithNewOp(op, type, input, One); break; } - case mlir::cir::UnaryOpKind::Dec: { + case cir::UnaryOpKind::Dec: { auto One = rewriter.create( op.getLoc(), type, mlir::IntegerAttr::get(type, 1)); rewriter.replaceOpWithNewOp(op, type, input, One); break; } - case mlir::cir::UnaryOpKind::Plus: { + case cir::UnaryOpKind::Plus: { rewriter.replaceOp(op, op.getInput()); break; } - case mlir::cir::UnaryOpKind::Minus: { + case cir::UnaryOpKind::Minus: { auto Zero = rewriter.create( op.getLoc(), type, mlir::IntegerAttr::get(type, 0)); rewriter.replaceOpWithNewOp(op, type, Zero, input); break; } - case mlir::cir::UnaryOpKind::Not: { + case cir::UnaryOpKind::Not: { auto MinusOne = rewriter.create( op.getLoc(), type, mlir::IntegerAttr::get(type, -1)); rewriter.replaceOpWithNewOp(op, type, MinusOne, @@ -655,12 +645,12 @@ class CIRUnaryOpLowering } }; -class CIRBinOpLowering : public mlir::OpConversionPattern { +class CIRBinOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BinOp op, OpAdaptor adaptor, + matchAndRewrite(cir::BinOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { assert((adaptor.getLhs().getType() == adaptor.getRhs().getType()) && "inconsistent operands' types not supported yet"); @@ -671,37 +661,37 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { "operand type not supported yet"); auto type = op.getLhs().getType(); - if (auto VecType = mlir::dyn_cast(type)) { + if (auto VecType = mlir::dyn_cast(type)) { type = VecType.getEltType(); } switch (op.getKind()) { - case mlir::cir::BinOpKind::Add: - if (mlir::isa(type)) + case cir::BinOpKind::Add: + if (mlir::isa(type)) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; - case mlir::cir::BinOpKind::Sub: - if (mlir::isa(type)) + case cir::BinOpKind::Sub: + if (mlir::isa(type)) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; - case mlir::cir::BinOpKind::Mul: - if (mlir::isa(type)) + case cir::BinOpKind::Mul: + if (mlir::isa(type)) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); else rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; - case mlir::cir::BinOpKind::Div: - if (auto ty = mlir::dyn_cast(type)) { + case cir::BinOpKind::Div: + if (auto ty = mlir::dyn_cast(type)) { if (ty.isUnsigned()) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); @@ -712,8 +702,8 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; - case mlir::cir::BinOpKind::Rem: - if (auto ty = mlir::dyn_cast(type)) { + case cir::BinOpKind::Rem: + if (auto ty = mlir::dyn_cast(type)) { if (ty.isUnsigned()) rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); @@ -724,15 +714,15 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; - case mlir::cir::BinOpKind::And: + case cir::BinOpKind::And: rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; - case mlir::cir::BinOpKind::Or: + case cir::BinOpKind::Or: rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; - case mlir::cir::BinOpKind::Xor: + case cir::BinOpKind::Xor: rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; @@ -742,26 +732,26 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { } }; -class CIRCmpOpLowering : public mlir::OpConversionPattern { +class CIRCmpOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::CmpOp op, OpAdaptor adaptor, + matchAndRewrite(cir::CmpOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto type = op.getLhs().getType(); mlir::Value mlirResult; - if (auto ty = mlir::dyn_cast(type)) { + if (auto ty = mlir::dyn_cast(type)) { auto kind = convertCmpKindToCmpIPredicate(op.getKind(), ty.isSigned()); mlirResult = rewriter.create( op.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else if (auto ty = mlir::dyn_cast(type)) { + } else if (auto ty = mlir::dyn_cast(type)) { auto kind = convertCmpKindToCmpFPredicate(op.getKind()); mlirResult = rewriter.create( op.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else if (auto ty = mlir::dyn_cast(type)) { + } else if (auto ty = mlir::dyn_cast(type)) { llvm_unreachable("pointer comparison not supported yet"); } else { return op.emitError() << "unsupported type for CmpOp: " << type; @@ -778,24 +768,23 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { } }; -class CIRBrOpLowering : public mlir::OpRewritePattern { +class CIRBrOpLowering : public mlir::OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using OpRewritePattern::OpRewritePattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BrOp op, + matchAndRewrite(cir::BrOp op, mlir::PatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, op.getDest()); return mlir::LogicalResult::success(); } }; -class CIRScopeOpLowering - : public mlir::OpConversionPattern { - using mlir::OpConversionPattern::OpConversionPattern; +class CIRScopeOpLowering : public mlir::OpConversionPattern { + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::ScopeOp scopeOp, OpAdaptor adaptor, + matchAndRewrite(cir::ScopeOp scopeOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // Empty scope: just remove it. if (scopeOp.getRegion().empty()) { @@ -826,12 +815,11 @@ class CIRScopeOpLowering } }; -struct CIRBrCondOpLowering - : public mlir::OpConversionPattern { - using mlir::OpConversionPattern::OpConversionPattern; +struct CIRBrCondOpLowering : public mlir::OpConversionPattern { + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::BrCondOp brOp, OpAdaptor adaptor, + matchAndRewrite(cir::BrCondOp brOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto condition = adaptor.getCond(); @@ -846,13 +834,12 @@ struct CIRBrCondOpLowering } }; -class CIRTernaryOpLowering - : public mlir::OpConversionPattern { +class CIRTernaryOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::TernaryOp op, OpAdaptor adaptor, + matchAndRewrite(cir::TernaryOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.setInsertionPoint(op); auto condition = adaptor.getCond(); @@ -877,12 +864,11 @@ class CIRTernaryOpLowering } }; -class CIRYieldOpLowering - : public mlir::OpConversionPattern { +class CIRYieldOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::YieldOp op, OpAdaptor adaptor, + matchAndRewrite(cir::YieldOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto *parentOp = op->getParentOp(); return llvm::TypeSwitch(parentOp) @@ -895,12 +881,12 @@ class CIRYieldOpLowering } }; -class CIRIfOpLowering : public mlir::OpConversionPattern { +class CIRIfOpLowering : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::IfOp ifop, OpAdaptor adaptor, + matchAndRewrite(cir::IfOp ifop, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto condition = adaptor.getCondition(); auto i1Condition = rewriter.create( @@ -920,12 +906,11 @@ class CIRIfOpLowering : public mlir::OpConversionPattern { } }; -class CIRGlobalOpLowering - : public mlir::OpConversionPattern { +class CIRGlobalOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::GlobalOp op, OpAdaptor adaptor, + matchAndRewrite(cir::GlobalOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { auto moduleOp = op->getParentOfType(); if (!moduleOp) @@ -949,15 +934,13 @@ class CIRGlobalOpLowering mlir::Attribute initialValue = mlir::Attribute(); std::optional init = op.getInitialValue(); if (init.has_value()) { - if (auto constArr = - mlir::dyn_cast(init.value())) { + if (auto constArr = mlir::dyn_cast(init.value())) { init = lowerConstArrayAttr(constArr, getTypeConverter()); if (init.has_value()) initialValue = init.value(); else llvm_unreachable("GlobalOp lowering array with initial value fail"); - } else if (auto constArr = - mlir::dyn_cast(init.value())) { + } else if (auto constArr = mlir::dyn_cast(init.value())) { if (memrefType.getShape().size()) { auto elementType = memrefType.getElementType(); auto rtt = @@ -980,16 +963,13 @@ class CIRGlobalOpLowering } else llvm_unreachable("GlobalOp lowering unsuppored type"); } - } else if (auto intAttr = - mlir::dyn_cast(init.value())) { + } else if (auto intAttr = mlir::dyn_cast(init.value())) { auto rtt = mlir::RankedTensorType::get({}, convertedType); initialValue = mlir::DenseIntElementsAttr::get(rtt, intAttr.getValue()); - } else if (auto fltAttr = - mlir::dyn_cast(init.value())) { + } else if (auto fltAttr = mlir::dyn_cast(init.value())) { auto rtt = mlir::RankedTensorType::get({}, convertedType); initialValue = mlir::DenseFPElementsAttr::get(rtt, fltAttr.getValue()); - } else if (auto boolAttr = - mlir::dyn_cast(init.value())) { + } else if (auto boolAttr = mlir::dyn_cast(init.value())) { auto rtt = mlir::RankedTensorType::get({}, convertedType); initialValue = mlir::DenseIntElementsAttr::get(rtt, (char)boolAttr.getValue()); @@ -1013,12 +993,12 @@ class CIRGlobalOpLowering }; class CIRGetGlobalOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::GetGlobalOp op, OpAdaptor adaptor, + matchAndRewrite(cir::GetGlobalOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // FIXME(cir): Premature DCE to avoid lowering stuff we're not using. // CIRGen should mitigate this and not emit the get_global. @@ -1035,14 +1015,14 @@ class CIRGetGlobalOpLowering }; class CIRVectorCreateLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VecCreateOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VecCreateOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto vecTy = mlir::dyn_cast(op.getType()); + auto vecTy = mlir::dyn_cast(op.getType()); assert(vecTy && "result type of cir.vec.create op is not VectorType"); auto elementTy = typeConverter->convertType(vecTy.getEltType()); auto loc = op.getLoc(); @@ -1065,12 +1045,12 @@ class CIRVectorCreateLowering }; class CIRVectorInsertLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VecInsertOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VecInsertOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp( op, adaptor.getValue(), adaptor.getVec(), adaptor.getIndex()); @@ -1079,12 +1059,12 @@ class CIRVectorInsertLowering }; class CIRVectorExtractLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VecExtractOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VecExtractOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp( op, adaptor.getVec(), adaptor.getIndex()); @@ -1092,27 +1072,26 @@ class CIRVectorExtractLowering } }; -class CIRVectorCmpOpLowering - : public mlir::OpConversionPattern { +class CIRVectorCmpOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::VecCmpOp op, OpAdaptor adaptor, + matchAndRewrite(cir::VecCmpOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - assert(mlir::isa(op.getType()) && - mlir::isa(op.getLhs().getType()) && - mlir::isa(op.getRhs().getType()) && + assert(mlir::isa(op.getType()) && + mlir::isa(op.getLhs().getType()) && + mlir::isa(op.getRhs().getType()) && "Vector compare with non-vector type"); auto elementType = - mlir::cast(op.getLhs().getType()).getEltType(); + mlir::cast(op.getLhs().getType()).getEltType(); mlir::Value bitResult; - if (auto intType = mlir::dyn_cast(elementType)) { + if (auto intType = mlir::dyn_cast(elementType)) { bitResult = rewriter.create( op.getLoc(), convertCmpKindToCmpIPredicate(op.getKind(), intType.isSigned()), adaptor.getLhs(), adaptor.getRhs()); - } else if (mlir::isa(elementType)) { + } else if (mlir::isa(elementType)) { bitResult = rewriter.create( op.getLoc(), convertCmpKindToCmpFPredicate(op.getKind()), adaptor.getLhs(), adaptor.getRhs()); @@ -1125,22 +1104,22 @@ class CIRVectorCmpOpLowering } }; -class CIRCastOpLowering : public mlir::OpConversionPattern { +class CIRCastOpLowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; + using OpConversionPattern::OpConversionPattern; inline mlir::Type convertTy(mlir::Type ty) const { return getTypeConverter()->convertType(ty); } mlir::LogicalResult - matchAndRewrite(mlir::cir::CastOp op, OpAdaptor adaptor, + matchAndRewrite(cir::CastOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - if (isa(op.getSrc().getType())) + if (isa(op.getSrc().getType())) llvm_unreachable("CastOp lowering for vector type is not supported yet"); auto src = adaptor.getSrc(); auto dstType = op.getResult().getType(); - using CIR = mlir::cir::CastKind; + using CIR = cir::CastKind; switch (op.getKind()) { case CIR::array_to_ptrdecay: { auto newDstType = mlir::cast(convertTy(dstType)); @@ -1149,18 +1128,18 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return mlir::success(); } case CIR::int_to_bool: { - auto zero = rewriter.create( + auto zero = rewriter.create( src.getLoc(), op.getSrc().getType(), - mlir::cir::IntAttr::get(op.getSrc().getType(), 0)); - rewriter.replaceOpWithNewOp( - op, mlir::cir::BoolType::get(getContext()), mlir::cir::CmpOpKind::ne, - op.getSrc(), zero); + cir::IntAttr::get(op.getSrc().getType(), 0)); + rewriter.replaceOpWithNewOp( + op, cir::BoolType::get(getContext()), cir::CmpOpKind::ne, op.getSrc(), + zero); return mlir::success(); } case CIR::integral: { auto newDstType = convertTy(dstType); auto srcType = op.getSrc().getType(); - mlir::cir::IntType srcIntType = mlir::cast(srcType); + cir::IntType srcIntType = mlir::cast(srcType); auto newOp = createIntCast(rewriter, src, newDstType, srcIntType.isSigned()); rewriter.replaceOp(op, newOp); @@ -1171,12 +1150,12 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { auto srcTy = op.getSrc().getType(); auto dstTy = op.getResult().getType(); - if (!mlir::isa(dstTy) || - !mlir::isa(srcTy)) + if (!mlir::isa(dstTy) || + !mlir::isa(srcTy)) return op.emitError() << "NYI cast from " << srcTy << " to " << dstTy; auto getFloatWidth = [](mlir::Type ty) -> unsigned { - return mlir::cast(ty).getWidth(); + return mlir::cast(ty).getWidth(); }; if (getFloatWidth(srcTy) > getFloatWidth(dstTy)) @@ -1186,7 +1165,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return mlir::success(); } case CIR::float_to_bool: { - auto dstTy = mlir::cast(op.getType()); + auto dstTy = mlir::cast(op.getType()); auto newDstType = convertTy(dstTy); auto kind = mlir::arith::CmpFPredicate::UNE; @@ -1202,7 +1181,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return mlir::success(); } case CIR::bool_to_int: { - auto dstTy = mlir::cast(op.getType()); + auto dstTy = mlir::cast(op.getType()); auto newDstType = mlir::cast(convertTy(dstTy)); auto newOp = createIntCast(rewriter, src, newDstType); rewriter.replaceOp(op, newOp); @@ -1217,7 +1196,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { case CIR::int_to_float: { auto dstTy = op.getType(); auto newDstType = convertTy(dstTy); - if (mlir::cast(op.getSrc().getType()).isSigned()) + if (mlir::cast(op.getSrc().getType()).isSigned()) rewriter.replaceOpWithNewOp(op, newDstType, src); else rewriter.replaceOpWithNewOp(op, newDstType, src); @@ -1226,7 +1205,7 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { case CIR::float_to_int: { auto dstTy = op.getType(); auto newDstType = convertTy(dstTy); - if (mlir::cast(op.getResult().getType()).isSigned()) + if (mlir::cast(op.getResult().getType()).isSigned()) rewriter.replaceOpWithNewOp(op, newDstType, src); else rewriter.replaceOpWithNewOp(op, newDstType, src); @@ -1240,20 +1219,20 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { }; class CIRPtrStrideOpLowering - : public mlir::OpConversionPattern { + : public mlir::OpConversionPattern { public: - using mlir::OpConversionPattern::OpConversionPattern; + using mlir::OpConversionPattern::OpConversionPattern; // Return true if PtrStrideOp is produced by cast with array_to_ptrdecay kind // and they are in the same block. - inline bool isCastArrayToPtrConsumer(mlir::cir::PtrStrideOp op) const { + inline bool isCastArrayToPtrConsumer(cir::PtrStrideOp op) const { auto defOp = op->getOperand(0).getDefiningOp(); if (!defOp) return false; - auto castOp = dyn_cast(defOp); + auto castOp = dyn_cast(defOp); if (!castOp) return false; - if (castOp.getKind() != mlir::cir::CastKind::array_to_ptrdecay) + if (castOp.getKind() != cir::CastKind::array_to_ptrdecay) return false; if (!castOp->hasOneUse()) return false; @@ -1264,18 +1243,16 @@ class CIRPtrStrideOpLowering // Return true if all the PtrStrideOp users are load, store or cast // with array_to_ptrdecay kind and they are in the same block. - inline bool - isLoadStoreOrCastArrayToPtrProduer(mlir::cir::PtrStrideOp op) const { + inline bool isLoadStoreOrCastArrayToPtrProduer(cir::PtrStrideOp op) const { if (op.use_empty()) return false; for (auto *user : op->getUsers()) { if (!op->isBeforeInBlock(user)) return false; - if (isa(*user) || isa(*user)) + if (isa(*user) || isa(*user)) continue; - auto castOp = dyn_cast(*user); - if (castOp && - (castOp.getKind() == mlir::cir::CastKind::array_to_ptrdecay)) + auto castOp = dyn_cast(*user); + if (castOp && (castOp.getKind() == cir::CastKind::array_to_ptrdecay)) continue; return false; } @@ -1296,7 +1273,7 @@ class CIRPtrStrideOpLowering // only been used to propogate %base and %stride to memref.load/store and // should be erased after the conversion. mlir::LogicalResult - matchAndRewrite(mlir::cir::PtrStrideOp op, OpAdaptor adaptor, + matchAndRewrite(cir::PtrStrideOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { if (!isCastArrayToPtrConsumer(op)) return mlir::failure(); @@ -1347,12 +1324,12 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, static mlir::TypeConverter prepareTypeConverter() { mlir::TypeConverter converter; - converter.addConversion([&](mlir::cir::PointerType type) -> mlir::Type { + converter.addConversion([&](cir::PointerType type) -> mlir::Type { auto ty = converter.convertType(type.getPointee()); // FIXME: The pointee type might not be converted (e.g. struct) if (!ty) return nullptr; - if (isa(type.getPointee())) + if (isa(type.getPointee())) return ty; return mlir::MemRefType::get({}, ty); }); @@ -1360,33 +1337,32 @@ static mlir::TypeConverter prepareTypeConverter() { [&](mlir::IntegerType type) -> mlir::Type { return type; }); converter.addConversion( [&](mlir::FloatType type) -> mlir::Type { return type; }); - converter.addConversion( - [&](mlir::cir::VoidType type) -> mlir::Type { return {}; }); - converter.addConversion([&](mlir::cir::IntType type) -> mlir::Type { + converter.addConversion([&](cir::VoidType type) -> mlir::Type { return {}; }); + converter.addConversion([&](cir::IntType type) -> mlir::Type { // arith dialect ops doesn't take signed integer -- drop cir sign here return mlir::IntegerType::get( type.getContext(), type.getWidth(), mlir::IntegerType::SignednessSemantics::Signless); }); - converter.addConversion([&](mlir::cir::BoolType type) -> mlir::Type { + converter.addConversion([&](cir::BoolType type) -> mlir::Type { return mlir::IntegerType::get(type.getContext(), 8); }); - converter.addConversion([&](mlir::cir::SingleType type) -> mlir::Type { + converter.addConversion([&](cir::SingleType type) -> mlir::Type { return mlir::Float32Type::get(type.getContext()); }); - converter.addConversion([&](mlir::cir::DoubleType type) -> mlir::Type { + converter.addConversion([&](cir::DoubleType type) -> mlir::Type { return mlir::Float64Type::get(type.getContext()); }); - converter.addConversion([&](mlir::cir::FP80Type type) -> mlir::Type { + converter.addConversion([&](cir::FP80Type type) -> mlir::Type { return mlir::Float80Type::get(type.getContext()); }); - converter.addConversion([&](mlir::cir::LongDoubleType type) -> mlir::Type { + converter.addConversion([&](cir::LongDoubleType type) -> mlir::Type { return converter.convertType(type.getUnderlying()); }); - converter.addConversion([&](mlir::cir::ArrayType type) -> mlir::Type { + converter.addConversion([&](cir::ArrayType type) -> mlir::Type { SmallVector shape; mlir::Type curType = type; - while (auto arrayType = dyn_cast(curType)) { + while (auto arrayType = dyn_cast(curType)) { shape.push_back(arrayType.getSize()); curType = arrayType.getEltType(); } @@ -1396,7 +1372,7 @@ static mlir::TypeConverter prepareTypeConverter() { return nullptr; return mlir::MemRefType::get(shape, elementType); }); - converter.addConversion([&](mlir::cir::VectorType type) -> mlir::Type { + converter.addConversion([&](cir::VectorType type) -> mlir::Type { auto ty = converter.convertType(type.getEltType()); return mlir::VectorType::get(type.getSize(), ty); }); @@ -1421,7 +1397,7 @@ void ConvertCIRToMLIRPass::runOnOperation() { mlir::memref::MemRefDialect, mlir::func::FuncDialect, mlir::scf::SCFDialect, mlir::cf::ControlFlowDialect, mlir::math::MathDialect, mlir::vector::VectorDialect>(); - target.addIllegalDialect(); + target.addIllegalDialect(); if (failed(applyPartialConversion(module, target, std::move(patterns)))) signalPassFailure(); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h b/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h index 46d2bd7fc2a1..beb80e41b57f 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerToMLIRHelpers.h @@ -38,9 +38,9 @@ mlir::Value createIntCast(mlir::ConversionPatternRewriter &rewriter, return rewriter.create(loc, dstTy, src); } -mlir::arith::CmpIPredicate -convertCmpKindToCmpIPredicate(mlir::cir::CmpOpKind kind, bool isSigned) { - using CIR = mlir::cir::CmpOpKind; +mlir::arith::CmpIPredicate convertCmpKindToCmpIPredicate(cir::CmpOpKind kind, + bool isSigned) { + using CIR = cir::CmpOpKind; using arithCmpI = mlir::arith::CmpIPredicate; switch (kind) { case CIR::eq: @@ -59,9 +59,8 @@ convertCmpKindToCmpIPredicate(mlir::cir::CmpOpKind kind, bool isSigned) { llvm_unreachable("Unknown CmpOpKind"); } -mlir::arith::CmpFPredicate -convertCmpKindToCmpFPredicate(mlir::cir::CmpOpKind kind) { - using CIR = mlir::cir::CmpOpKind; +mlir::arith::CmpFPredicate convertCmpKindToCmpFPredicate(cir::CmpOpKind kind) { + using CIR = cir::CmpOpKind; using arithCmpF = mlir::arith::CmpFPredicate; switch (kind) { case CIR::eq: diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 40a96a38a8d5..8dc375c28b85 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -76,26 +76,29 @@ CreateFrontendBaseAction(CompilerInstance &CI) { case EmitAssembly: #if CLANG_ENABLE_CIR if (UseCIR) - return std::make_unique<::cir::EmitAssemblyAction>(); + return std::make_unique(); if (CIRAnalysisOnly) - return std::make_unique<::cir::AnalysisOnlyAndEmitAssemblyAction>(); + return std::make_unique(); #endif return std::make_unique(); case EmitBC: { #if CLANG_ENABLE_CIR if (UseCIR) - return std::make_unique<::cir::EmitBCAction>(); + return std::make_unique(); if (CIRAnalysisOnly) - return std::make_unique<::cir::AnalysisOnlyAndEmitBCAction>(); + return std::make_unique(); #endif return std::make_unique(); } #if CLANG_ENABLE_CIR - case EmitCIR: return std::make_unique<::cir::EmitCIRAction>(); + case EmitCIR: + return std::make_unique(); case EmitCIRFlat: - return std::make_unique<::cir::EmitCIRFlatAction>(); - case EmitCIROnly: return std::make_unique<::cir::EmitCIROnlyAction>(); - case EmitMLIR: return std::make_unique<::cir::EmitMLIRAction>(); + return std::make_unique(); + case EmitCIROnly: + return std::make_unique(); + case EmitMLIR: + return std::make_unique(); #else case EmitCIR: case EmitCIRFlat: @@ -106,32 +109,32 @@ CreateFrontendBaseAction(CompilerInstance &CI) { case EmitLLVM: { #if CLANG_ENABLE_CIR if (UseCIR) - return std::make_unique<::cir::EmitLLVMAction>(); + return std::make_unique(); if (CIRAnalysisOnly) - return std::make_unique<::cir::AnalysisOnlyAndEmitLLVMAction>(); + return std::make_unique(); #endif return std::make_unique(); } case EmitLLVMOnly: { #if CLANG_ENABLE_CIR if (CIRAnalysisOnly) - return std::make_unique<::cir::AnalysisOnlyAndEmitLLVMOnlyAction>(); + return std::make_unique(); #endif return std::make_unique(); } case EmitCodeGenOnly: { #if CLANG_ENABLE_CIR if (CIRAnalysisOnly) - return std::make_unique<::cir::AnalysisOnlyAndEmitLLVMOnlyAction>(); + return std::make_unique(); #endif return std::make_unique(); } case EmitObj: { #if CLANG_ENABLE_CIR if (UseCIR) - return std::make_unique<::cir::EmitObjAction>(); + return std::make_unique(); if (CIRAnalysisOnly) - return std::make_unique<::cir::AnalysisOnlyAndEmitObjAction>(); + return std::make_unique(); #endif return std::make_unique(); } diff --git a/clang/test/CIR/CodeGen/bf16-ops.c b/clang/test/CIR/CodeGen/bf16-ops.c index 08086eefc874..479be9980546 100644 --- a/clang/test/CIR/CodeGen/bf16-ops.c +++ b/clang/test/CIR/CodeGen/bf16-ops.c @@ -6,7 +6,6 @@ // RUN: FileCheck --input-file=%t.ll --check-prefix=NONATIVE-LLVM %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -target-feature +fullbf16 -fclangir -emit-llvm -o %t.ll %s // RUN: FileCheck --input-file=%t.ll --check-prefix=NATIVE-LLVM %s -// XFAIL: * volatile unsigned test; volatile int i0; diff --git a/clang/test/CIR/IR/invalid-annotations.cir b/clang/test/CIR/IR/invalid-annotations.cir index d7de2d5c5602..2a4fa79bd284 100644 --- a/clang/test/CIR/IR/invalid-annotations.cir +++ b/clang/test/CIR/IR/invalid-annotations.cir @@ -3,7 +3,7 @@ // expected-error @below {{invalid kind of attribute specified}} -// expected-error @below {{failed to parse AnnotationAttr parameter 'name' which is to be a `StringAttr`}} +// expected-error @below {{failed to parse AnnotationAttr parameter 'name' which is to be a `mlir::StringAttr`}} cir.global external @a = #cir.ptr : !cir.ptr [#cir.annotation] // ----- diff --git a/clang/tools/cir-lsp-server/cir-lsp-server.cpp b/clang/tools/cir-lsp-server/cir-lsp-server.cpp index bd823c13a42e..0c9398c428d3 100644 --- a/clang/tools/cir-lsp-server/cir-lsp-server.cpp +++ b/clang/tools/cir-lsp-server/cir-lsp-server.cpp @@ -15,6 +15,6 @@ int main(int argc, char **argv) { mlir::DialectRegistry registry; mlir::registerAllDialects(registry); - registry.insert(); + registry.insert(); return failed(mlir::MlirLspServerMain(argc, argv, registry)); } diff --git a/clang/tools/cir-opt/cir-opt.cpp b/clang/tools/cir-opt/cir-opt.cpp index a51b3a602baa..2c242f9d2db1 100644 --- a/clang/tools/cir-opt/cir-opt.cpp +++ b/clang/tools/cir-opt/cir-opt.cpp @@ -41,10 +41,10 @@ struct CIRToLLVMPipelineOptions int main(int argc, char **argv) { // TODO: register needed MLIR passes for CIR? mlir::DialectRegistry registry; - registry.insert(); + registry + .insert(); ::mlir::registerPass([]() -> std::unique_ptr<::mlir::Pass> { return cir::createConvertMLIRToLLVMPass(); diff --git a/clang/utils/TableGen/CIRLoweringEmitter.cpp b/clang/utils/TableGen/CIRLoweringEmitter.cpp index 3e5456e7e692..84b5ceea998e 100644 --- a/clang/utils/TableGen/CIRLoweringEmitter.cpp +++ b/clang/utils/TableGen/CIRLoweringEmitter.cpp @@ -21,14 +21,14 @@ void GenerateLowering(const Record *Operation) { std::string LLVMOp = Operation->getValueAsString("llvmOp").str(); ClassDefinitions += "class CIR" + Name + - "Lowering : public mlir::OpConversionPattern { public: - using OpConversionPattern::OpConversionPattern; mlir::LogicalResult - matchAndRewrite(mlir::cir::)C++" + + matchAndRewrite(cir::)C++" + Name + " op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) " "const " From c345b03cc538ddda30ece69ce59d9d86753cd100 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Fri, 8 Nov 2024 12:10:50 -0800 Subject: [PATCH 2056/2301] [CIR] Merge two copies of CIRGenAction.h (#1085) We have both clang/include/clang/CIRFrontendAction/CIRGenAction.h and clang/include/clang/cir/FrontendAction/CIRGenAction.h, which is a historical artifact. The latter is what's being upstreamed, so merge the former into it to avoid any confusion. --- .../{CIRFrontendAction => CIR/FrontendAction}/CIRGenAction.h | 3 ++- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 +- clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) rename clang/include/clang/{CIRFrontendAction => CIR/FrontendAction}/CIRGenAction.h (99%) diff --git a/clang/include/clang/CIRFrontendAction/CIRGenAction.h b/clang/include/clang/CIR/FrontendAction/CIRGenAction.h similarity index 99% rename from clang/include/clang/CIRFrontendAction/CIRGenAction.h rename to clang/include/clang/CIR/FrontendAction/CIRGenAction.h index 13c2a4381573..6618fbc54261 100644 --- a/clang/include/clang/CIRFrontendAction/CIRGenAction.h +++ b/clang/include/clang/CIR/FrontendAction/CIRGenAction.h @@ -129,7 +129,8 @@ class EmitObjAction : public CIRGenAction { EmitObjAction(mlir::MLIRContext *mlirCtx = nullptr); }; -// Used for -fclangir-analysis-only: use CIR analysis but still use original LLVM codegen path +// Used for -fclangir-analysis-only: use CIR analysis but still use original +// LLVM codegen path class AnalysisOnlyActionBase : public clang::CodeGenAction { virtual void anchor(); diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 8e321fde177b..cc236abe0047 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// -#include "clang/CIRFrontendAction/CIRGenAction.h" +#include "clang/CIR/FrontendAction/CIRGenAction.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/IR/BuiltinOps.h" diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp index 8dc375c28b85..4a54d90a6591 100644 --- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp +++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp @@ -37,7 +37,7 @@ #include "mlir/IR/MLIRContext.h" #include "mlir/Pass/PassManager.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIRFrontendAction/CIRGenAction.h" +#include "clang/CIR/FrontendAction/CIRGenAction.h" #endif using namespace clang; From 25fb28bbf214f4445ea51e402299fee5b98cd1d1 Mon Sep 17 00:00:00 2001 From: Congcong Cai Date: Sat, 9 Nov 2024 04:46:07 +0800 Subject: [PATCH 2057/2301] [CIR][NFC] Expand doc on `cir.scope` terminators (#1078) Fixed: #265 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 2cf9b6889c82..02f30833f3db 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1056,6 +1056,7 @@ def ScopeOp : CIR_Op<"scope", [ } ``` + The blocks can be terminated by `cir.yield`, `cir.return` or `cir.throw`. If `cir.scope` yields no value, the `cir.yield` can be left out, and will be inserted implicitly. }]; From adf1b7dcf8e5df91a1bcf8180b956527e12a9b14 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 8 Nov 2024 15:50:45 -0500 Subject: [PATCH 2058/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vqdmulhq_v, neon_vqdmulh_v (#1079) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 5 + clang/test/CIR/CodeGen/AArch64/neon.c | 92 +++++++++++-------- 2 files changed, 61 insertions(+), 36 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 02f6f4673002..71bc955f8cfd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2485,6 +2485,11 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( : "aarch64.neon.sqadd"; break; } + case NEON::BI__builtin_neon_vqdmulh_v: + case NEON::BI__builtin_neon_vqdmulhq_v: { + intrincsName = "aarch64.neon.sqdmulh"; + break; + } case NEON::BI__builtin_neon_vqrdmulh_v: case NEON::BI__builtin_neon_vqrdmulhq_v: { intrincsName = "aarch64.neon.sqrdmulh"; diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 231d2154cb9b..a32e98803ef9 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -4814,45 +4814,65 @@ float64x2_t test_vminq_f64(float64x2_t a, float64x2_t b) { // return vpaddq_f64(a, b); // } -// NYI-LABEL: @test_vqdmulh_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VQDMULH_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: [[VQDMULH_V3_I:%.*]] = bitcast <4 x i16> [[VQDMULH_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VQDMULH_V2_I]] -// int16x4_t test_vqdmulh_s16(int16x4_t a, int16x4_t b) { -// return vqdmulh_s16(a, b); -// } +int16x4_t test_vqdmulh_s16(int16x4_t a, int16x4_t b) { + return vqdmulh_s16(a, b); -// NYI-LABEL: @test_vqdmulh_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VQDMULH_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: [[VQDMULH_V3_I:%.*]] = bitcast <2 x i32> [[VQDMULH_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VQDMULH_V2_I]] -// int32x2_t test_vqdmulh_s32(int32x2_t a, int32x2_t b) { -// return vqdmulh_s32(a, b); -// } + // CIR-LABEL: vqdmulh_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vqdmulhq_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VQDMULHQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %a, <8 x i16> %b) -// NYI: [[VQDMULHQ_V3_I:%.*]] = bitcast <8 x i16> [[VQDMULHQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VQDMULHQ_V2_I]] -// int16x8_t test_vqdmulhq_s16(int16x8_t a, int16x8_t b) { -// return vqdmulhq_s16(a, b); -// } + // LLVM: {{.*}}test_vqdmulh_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8> + // LLVM: [[VQDMULH_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> [[A]], <4 x i16> [[B]]) + // LLVM: [[VQDMULH_V3_I:%.*]] = bitcast <4 x i16> [[VQDMULH_V2_I]] to <8 x i8> + // LLVM: ret <4 x i16> [[VQDMULH_V2_I]] +} -// NYI-LABEL: @test_vqdmulhq_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VQDMULHQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %a, <4 x i32> %b) -// NYI: [[VQDMULHQ_V3_I:%.*]] = bitcast <4 x i32> [[VQDMULHQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VQDMULHQ_V2_I]] -// int32x4_t test_vqdmulhq_s32(int32x4_t a, int32x4_t b) { -// return vqdmulhq_s32(a, b); -// } +int32x2_t test_vqdmulh_s32(int32x2_t a, int32x2_t b) { + return vqdmulh_s32(a, b); + + // CIR-LABEL: vqdmulh_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqdmulh_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8> + // LLVM: [[VQDMULH_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> [[A]], <2 x i32> [[B]]) + // LLVM: [[VQDMULH_V3_I:%.*]] = bitcast <2 x i32> [[VQDMULH_V2_I]] to <8 x i8> + // LLVM: ret <2 x i32> [[VQDMULH_V2_I]] +} + +int16x8_t test_vqdmulhq_s16(int16x8_t a, int16x8_t b) { + return vqdmulhq_s16(a, b); + + // CIR-LABEL: vqdmulhq_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqdmulhq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8> + // LLVM: [[VQDMULH_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) + // LLVM: [[VQDMULH_V3_I:%.*]] = bitcast <8 x i16> [[VQDMULH_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VQDMULH_V2_I]] +} + +int32x4_t test_vqdmulhq_s32(int32x4_t a, int32x4_t b) { + return vqdmulhq_s32(a, b); + + // CIR-LABEL: vqdmulhq_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqdmulhq_s32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8> + // LLVM: [[VQDMULH_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) + // LLVM: [[VQDMULH_V3_I:%.*]] = bitcast <4 x i32> [[VQDMULH_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VQDMULH_V2_I]] +} int16x4_t test_vqrdmulh_s16(int16x4_t a, int16x4_t b) { return vqrdmulh_s16(a, b); From 337da0c3e0141a9d87e7cb233d28e972dbe27e18 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Fri, 8 Nov 2024 16:35:19 -0800 Subject: [PATCH 2059/2301] [CIR][CIRGen] Change buildX functions to emitX (#1093) The buildX naming convention originated when the CIRGen implementation was planned to be substantially different from original CodeGen. CIRGen is now a much closer adaption of CodeGen, and the emitX to buildX renaming just makes things more confusing, since CodeGen also has some helper functions whose names start with build or Build, so it's not immediately clear which CodeGen function corresponds to a CIRGen buildX function. Rename the buildX functions back to emitX to fix this. This diff was generated mostly mechanically. I searched for all buildX functions in CIRGen and all emitX or buildX functions in CodeGen: ``` rg '\b[Bb]uild[A-Z][A-Za-z0-9_]*\b' clang/lib/CIR/CodeGen -Io | sort -u -o /tmp/buildfuncs rg '\b([Ee]mit|[Bb]uild)[A-Z][A-Za-z0-9_]*\b' clang/lib/CodeGen -Io | sort -u -o /tmp/emitfuncs ``` I used a simple Python script to find corresponding functions: https://gist.github.com/smeenai/02be7ced8564cef5518df72606ec7b19. https://gist.github.com/smeenai/6ffd67be4249c8cebdd7fa99cfa4f13c is the resulting list of correspondences. This isn't 100% accurate because it's not accounting for the files that the functions are present in, but that's pretty unlikely to matter here, so I kept it simple. The buildX functions in CIRGen which correspond to an emitX function in CodeGen should be changed, and the ones which correspond to a BuildX function in CodeGen should not be changed. That leaves some functions without any correspondences, which required a judgement call. I scanned through all those functions, and buildVirtualMethodAttr was the only one that seemed like it shouldn't be changed to emit. I performed the replacement as follows: ``` funcs="$(awk '(/-> [Ee]/ || !/->/) && !/buildVirtualMethodAttr/ { print substr($1, 6) }' /tmp/corrfuncs | paste -sd '|')" find clang/include/clang/CIR clang/lib/CIR/{CodeGen,FrontendAction} \( -name '*.h' -o -name '*.cpp' \) -print0 | \ xargs -0 perl -pi -e "s/\bbuild($funcs)\\b/emit\\1/g" ``` The mechanical changes are in the first commit of this PR. There was a manual fixup required for a token pasting macro in CIRGenExprScalar.cpp, which is the second commit. I then ran `git clang-format`, which is the third commit. (They'll be squashed together when the PR is committed.) --- clang/include/clang/CIR/CIRGenerator.h | 6 +- clang/include/clang/CIR/MissingFeatures.h | 10 +- clang/lib/CIR/CodeGen/CIRAsm.cpp | 58 +- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 212 +++-- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 361 ++++---- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 232 +++-- clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 44 +- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 35 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 76 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 230 +++-- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 24 +- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 116 +-- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 103 ++- clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenException.cpp | 45 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 658 +++++++------- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 211 +++-- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 251 +++--- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 283 +++--- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 42 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 445 ++++----- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 115 ++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 841 +++++++++--------- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 147 +-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 134 +-- clang/lib/CIR/CodeGen/CIRGenModule.h | 71 +- clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h | 4 +- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 258 +++--- clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp | 14 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 14 +- clang/lib/CIR/CodeGen/CIRGenVTables.h | 6 +- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 12 +- .../Targets/LoweringPrepareItaniumCXXABI.cpp | 2 +- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 +- 37 files changed, 2524 insertions(+), 2564 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 78e6055ca993..f4c30a5e892b 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -66,7 +66,7 @@ class CIRGenerator : public clang::ASTConsumer { ~HandlingTopLevelDeclRAII() { unsigned Level = --Self.HandlingTopLevelDecls; if (Level == 0 && EmitDeferred) - Self.buildDeferredDecls(); + Self.emitDeferredDecls(); } }; @@ -101,8 +101,8 @@ class CIRGenerator : public clang::ASTConsumer { bool verifyModule(); - void buildDeferredDecls(); - void buildDefaultMethods(); + void emitDeferredDecls(); + void emitDefaultMethods(); }; } // namespace cir diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 412ac4385f4b..afc1e6b4f148 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -54,9 +54,9 @@ constexpr bool cirCConvAssertionMode = namespace cir { struct MissingFeatures { - // TODO(CIR): Implement the CIRGenFunction::buildTypeCheck method that handles + // TODO(CIR): Implement the CIRGenFunction::emitTypeCheck method that handles // sanitizer related type check features - static bool buildTypeCheck() { return false; } + static bool emitTypeCheck() { return false; } static bool tbaa() { return false; } static bool cleanups() { return false; } static bool emitNullabilityCheck() { return false; } @@ -128,8 +128,8 @@ struct MissingFeatures { // Missing Emissions static bool variablyModifiedTypeEmission() { return false; } - static bool buildLValueAlignmentAssumption() { return false; } - static bool buildDerivedToBaseCastForDevirt() { return false; } + static bool emitLValueAlignmentAssumption() { return false; } + static bool emitDerivedToBaseCastForDevirt() { return false; } static bool emitFunctionEpilog() { return false; } // References related stuff @@ -226,7 +226,7 @@ struct MissingFeatures { static bool deferredReplacements() { return false; } static bool shouldInstrumentFunction() { return false; } static bool xray() { return false; } - static bool buildConstrainedFPCall() { return false; } + static bool emitConstrainedFPCall() { return false; } static bool emitEmptyRecordCheck() { return false; } // Inline assembly diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 39a2ee8192d7..a20e75e07423 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -200,13 +200,13 @@ static void collectInOutConstrainsInfos(const CIRGenFunction &cgf, } } -std::pair CIRGenFunction::buildAsmInputLValue( +std::pair CIRGenFunction::emitAsmInputLValue( const TargetInfo::ConstraintInfo &Info, LValue InputValue, QualType InputType, std::string &ConstraintStr, SourceLocation Loc) { if (Info.allowsRegister() || !Info.allowsMemory()) { if (hasScalarEvaluationKind(InputType)) - return {buildLoadOfLValue(InputValue, Loc).getScalarVal(), mlir::Type()}; + return {emitLoadOfLValue(InputValue, Loc).getScalarVal(), mlir::Type()}; mlir::Type Ty = convertType(InputType); uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); @@ -226,9 +226,9 @@ std::pair CIRGenFunction::buildAsmInputLValue( } std::pair -CIRGenFunction::buildAsmInput(const TargetInfo::ConstraintInfo &Info, - const Expr *InputExpr, - std::string &ConstraintStr) { +CIRGenFunction::emitAsmInput(const TargetInfo::ConstraintInfo &Info, + const Expr *InputExpr, + std::string &ConstraintStr) { auto loc = getLoc(InputExpr->getExprLoc()); // If this can't be a register or memory, i.e., has to be a constant @@ -251,23 +251,23 @@ CIRGenFunction::buildAsmInput(const TargetInfo::ConstraintInfo &Info, if (Info.allowsRegister() || !Info.allowsMemory()) if (CIRGenFunction::hasScalarEvaluationKind(InputExpr->getType())) - return {buildScalarExpr(InputExpr), mlir::Type()}; + return {emitScalarExpr(InputExpr), mlir::Type()}; if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) - return {buildScalarExpr(InputExpr), mlir::Type()}; + return {emitScalarExpr(InputExpr), mlir::Type()}; InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); - LValue Dest = buildLValue(InputExpr); - return buildAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, - InputExpr->getExprLoc()); + LValue Dest = emitLValue(InputExpr); + return emitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, + InputExpr->getExprLoc()); } -static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, - const llvm::ArrayRef RegResults, - const llvm::ArrayRef ResultRegTypes, - const llvm::ArrayRef ResultTruncRegTypes, - const llvm::ArrayRef ResultRegDests, - const llvm::ArrayRef ResultRegQualTys, - const llvm::BitVector &ResultTypeRequiresCast, - const llvm::BitVector &ResultRegIsFlagReg) { +static void emitAsmStores(CIRGenFunction &CGF, const AsmStmt &S, + const llvm::ArrayRef RegResults, + const llvm::ArrayRef ResultRegTypes, + const llvm::ArrayRef ResultTruncRegTypes, + const llvm::ArrayRef ResultRegDests, + const llvm::ArrayRef ResultRegQualTys, + const llvm::BitVector &ResultTypeRequiresCast, + const llvm::BitVector &ResultRegIsFlagReg) { CIRGenBuilderTy &Builder = CGF.getBuilder(); CIRGenModule &CGM = CGF.CGM; auto CTX = Builder.getContext(); @@ -337,11 +337,11 @@ static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, Dest = CGF.makeAddrLValue(A, Ty); } - CGF.buildStoreThroughLValue(RValue::get(Tmp), Dest); + CGF.emitStoreThroughLValue(RValue::get(Tmp), Dest); } } -mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { +mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &S) { // Assemble the final asm string. std::string AsmString = S.generateAsmString(getContext()); @@ -405,7 +405,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg); OutputConstraints.push_back(OutputConstraint); - LValue Dest = buildLValue(OutExpr); + LValue Dest = emitLValue(OutExpr); if (!Constraints.empty()) Constraints += ','; @@ -496,8 +496,8 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { mlir::Value Arg; mlir::Type ArgElemType; std::tie(Arg, ArgElemType) = - buildAsmInputLValue(Info, Dest, InputExpr->getType(), - InOutConstraints, InputExpr->getExprLoc()); + emitAsmInputLValue(Info, Dest, InputExpr->getType(), InOutConstraints, + InputExpr->getExprLoc()); if (mlir::Type AdjTy = getTargetHooks().adjustInlineAsmType( *this, OutputConstraint, Arg.getType())) @@ -555,7 +555,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { std::string ReplaceConstraint(InputConstraint); mlir::Value Arg; mlir::Type ArgElemType; - std::tie(Arg, ArgElemType) = buildAsmInput(Info, InputExpr, Constraints); + std::tie(Arg, ArgElemType) = emitAsmInput(Info, InputExpr, Constraints); // If this input argument is tied to a larger output result, extend the // input to be the same size as the output. The LLVM backend wants to see @@ -676,8 +676,8 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { } else if (ResultRegTypes.size() > 1) { auto alignment = CharUnits::One(); auto sname = cast(ResultType).getName(); - auto dest = buildAlloca(sname, ResultType, getLoc(S.getAsmLoc()), - alignment, false); + auto dest = emitAlloca(sname, ResultType, getLoc(S.getAsmLoc()), + alignment, false); auto addr = Address(dest, alignment); builder.createStore(getLoc(S.getAsmLoc()), result, addr); @@ -692,9 +692,9 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { } } - buildAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes, - ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast, - ResultRegIsFlagReg); + emitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes, + ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast, + ResultRegIsFlagReg); return mlir::success(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index e2958d9450fe..41fcd60179d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -261,11 +261,11 @@ class AtomicInfo { // This function emits any expression (scalar, complex, or aggregate) // into a temporary alloca. -static Address buildValToTemp(CIRGenFunction &CGF, Expr *E) { +static Address emitValToTemp(CIRGenFunction &CGF, Expr *E) { Address DeclPtr = CGF.CreateMemTemp( E->getType(), CGF.getLoc(E->getSourceRange()), ".atomictmp"); - CGF.buildAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), - /*Init*/ true); + CGF.emitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), + /*Init*/ true); return DeclPtr; } @@ -372,7 +372,7 @@ static bool isCstWeak(mlir::Value weakVal, bool &val) { // Create a "default:" label and add it to the given collection of case labels. // Create the region that will hold the body of the "default:" block. -static void buildDefaultCase(CIRGenBuilderTy &builder, mlir::Location loc) { +static void emitDefaultCase(CIRGenBuilderTy &builder, mlir::Location loc) { auto EmptyArrayAttr = builder.getArrayAttr({}); mlir::OpBuilder::InsertPoint insertPoint; builder.create(loc, EmptyArrayAttr, cir::CaseOpKind::Default, @@ -383,9 +383,8 @@ static void buildDefaultCase(CIRGenBuilderTy &builder, mlir::Location loc) { // Create a single "case" label with the given MemOrder as its value. Add the // "case" label to the given collection of case labels. Create the region that // will hold the body of the "case" block. -static void buildSingleMemOrderCase(CIRGenBuilderTy &builder, - mlir::Location loc, mlir::Type Type, - cir::MemOrder Order) { +static void emitSingleMemOrderCase(CIRGenBuilderTy &builder, mlir::Location loc, + mlir::Type Type, cir::MemOrder Order) { SmallVector OneOrder{ cir::IntAttr::get(Type, static_cast(Order))}; auto OneAttribute = builder.getArrayAttr(OneOrder); @@ -398,10 +397,9 @@ static void buildSingleMemOrderCase(CIRGenBuilderTy &builder, // Create a pair of "case" labels with the given MemOrders as their values. // Add the combined "case" attribute to the given collection of case labels. // Create the region that will hold the body of the "case" block. -static void buildDoubleMemOrderCase(CIRGenBuilderTy &builder, - mlir::Location loc, mlir::Type Type, - cir::MemOrder Order1, - cir::MemOrder Order2) { +static void emitDoubleMemOrderCase(CIRGenBuilderTy &builder, mlir::Location loc, + mlir::Type Type, cir::MemOrder Order1, + cir::MemOrder Order2) { SmallVector TwoOrders{ cir::IntAttr::get(Type, static_cast(Order1)), cir::IntAttr::get(Type, static_cast(Order2))}; @@ -412,12 +410,12 @@ static void buildDoubleMemOrderCase(CIRGenBuilderTy &builder, builder.restoreInsertionPoint(insertPoint); } -static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, - Address Dest, Address Ptr, Address Val1, - Address Val2, uint64_t Size, - cir::MemOrder SuccessOrder, - cir::MemOrder FailureOrder, - llvm::SyncScope::ID Scope) { +static void emitAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, + Address Dest, Address Ptr, Address Val1, + Address Val2, uint64_t Size, + cir::MemOrder SuccessOrder, + cir::MemOrder FailureOrder, + llvm::SyncScope::ID Scope) { auto &builder = CGF.getBuilder(); auto loc = CGF.getLoc(E->getSourceRange()); auto Expected = builder.createLoad(loc, Val1); @@ -442,14 +440,14 @@ static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, }); // Update the memory at Dest with Cmp's value. - CGF.buildStoreOfScalar(cmpxchg.getCmp(), - CGF.makeAddrLValue(Dest, E->getType())); + CGF.emitStoreOfScalar(cmpxchg.getCmp(), + CGF.makeAddrLValue(Dest, E->getType())); } /// Given an ordering required on success, emit all possible cmpxchg /// instructions to cope with the provided (but possibly only dynamically known) /// FailureOrder. -static void buildAtomicCmpXchgFailureSet( +static void emitAtomicCmpXchgFailureSet( CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value FailureOrderVal, uint64_t Size, cir::MemOrder SuccessOrder, llvm::SyncScope::ID Scope) { @@ -483,8 +481,8 @@ static void buildAtomicCmpXchgFailureSet( // success argument". This condition has been lifted and the only // precondition is 31.7.2.18. Effectively treat this as a DR and skip // language version checks. - buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, - SuccessOrder, FailureOrder, Scope); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, + FailureOrder, Scope); return; } @@ -502,9 +500,9 @@ static void buildAtomicCmpXchgFailureSet( // default: // Unsupported memory orders get generated as memory_order_relaxed, // because there is no practical way to report an error at runtime. - buildDefaultCase(builder, loc); - buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, - SuccessOrder, cir::MemOrder::Relaxed, Scope); + emitDefaultCase(builder, loc); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, cir::MemOrder::Relaxed, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -513,10 +511,10 @@ static void buildAtomicCmpXchgFailureSet( // case acquire: // memory_order_consume is not implemented and always falls back to // memory_order_acquire - buildDoubleMemOrderCase(builder, loc, FailureOrderVal.getType(), - cir::MemOrder::Consume, cir::MemOrder::Acquire); - buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, - SuccessOrder, cir::MemOrder::Acquire, Scope); + emitDoubleMemOrderCase(builder, loc, FailureOrderVal.getType(), + cir::MemOrder::Consume, cir::MemOrder::Acquire); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, cir::MemOrder::Acquire, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -526,11 +524,11 @@ static void buildAtomicCmpXchgFailureSet( // the failure memory order. They fall back to memory_order_relaxed. // case seq_cst: - buildSingleMemOrderCase(builder, loc, FailureOrderVal.getType(), - cir::MemOrder::SequentiallyConsistent); - buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, - SuccessOrder, cir::MemOrder::SequentiallyConsistent, - Scope); + emitSingleMemOrderCase(builder, loc, FailureOrderVal.getType(), + cir::MemOrder::SequentiallyConsistent); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, cir::MemOrder::SequentiallyConsistent, + Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -538,10 +536,10 @@ static void buildAtomicCmpXchgFailureSet( }); } -static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, - Address Ptr, Address Val1, Address Val2, - mlir::Value IsWeak, mlir::Value FailureOrder, - uint64_t Size, cir::MemOrder Order, uint8_t Scope) { +static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, + Address Ptr, Address Val1, Address Val2, + mlir::Value IsWeak, mlir::Value FailureOrder, + uint64_t Size, cir::MemOrder Order, uint8_t Scope) { assert(!cir::MissingFeatures::syncScopeID()); StringRef Op; @@ -559,8 +557,8 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__c11_atomic_compare_exchange_strong: case AtomicExpr::AO__hip_atomic_compare_exchange_strong: case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: - buildAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, - FailureOrder, Size, Order, Scope); + emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Order, Scope); return; case AtomicExpr::AO__c11_atomic_compare_exchange_weak: case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: @@ -573,8 +571,8 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__scoped_atomic_compare_exchange_n: { bool weakVal; if (isCstWeak(IsWeak, weakVal)) { - buildAtomicCmpXchgFailureSet(CGF, E, weakVal, Dest, Ptr, Val1, Val2, - FailureOrder, Size, Order, Scope); + emitAtomicCmpXchgFailureSet(CGF, E, weakVal, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Order, Scope); } else { llvm_unreachable("NYI"); } @@ -769,27 +767,27 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, builder.createStore(loc, Result, Dest); } -static RValue buildAtomicLibcall(CIRGenFunction &CGF, StringRef fnName, - QualType resultType, CallArgList &args) { +static RValue emitAtomicLibcall(CIRGenFunction &CGF, StringRef fnName, + QualType resultType, CallArgList &args) { [[maybe_unused]] const CIRGenFunctionInfo &fnInfo = CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args); [[maybe_unused]] auto fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo); llvm_unreachable("NYI"); } -static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, - Address Ptr, Address Val1, Address Val2, - mlir::Value IsWeak, mlir::Value FailureOrder, - uint64_t Size, cir::MemOrder Order, - mlir::Value Scope) { +static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, + Address Ptr, Address Val1, Address Val2, + mlir::Value IsWeak, mlir::Value FailureOrder, + uint64_t Size, cir::MemOrder Order, + mlir::Value Scope) { auto ScopeModel = Expr->getScopeModel(); // LLVM atomic instructions always have synch scope. If clang atomic // expression has no scope operand, use default LLVM synch scope. if (!ScopeModel) { assert(!cir::MissingFeatures::syncScopeID()); - buildAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, - Order, /*FIXME(cir): LLVM default scope*/ 1); + emitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, + Order, /*FIXME(cir): LLVM default scope*/ 1); return; } @@ -804,7 +802,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, llvm_unreachable("NYI"); } -RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { +RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *E) { QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); QualType MemTy = AtomicTy; if (const AtomicType *AT = AtomicTy->getAs()) @@ -814,12 +812,12 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { Address Val1 = Address::invalid(); Address Val2 = Address::invalid(); Address Dest = Address::invalid(); - Address Ptr = buildPointerWithAlignment(E->getPtr()); + Address Ptr = emitPointerWithAlignment(E->getPtr()); if (E->getOp() == AtomicExpr::AO__c11_atomic_init || E->getOp() == AtomicExpr::AO__opencl_atomic_init) { LValue lvalue = makeAddrLValue(Ptr, AtomicTy); - buildAtomicInit(E->getVal1(), lvalue); + emitAtomicInit(E->getVal1(), lvalue); return RValue::get(nullptr); } @@ -842,8 +840,8 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity(); } - auto Order = buildScalarExpr(E->getOrder()); - auto Scope = E->getScopeModel() ? buildScalarExpr(E->getScope()) : nullptr; + auto Order = emitScalarExpr(E->getOrder()); + auto Scope = E->getScopeModel() ? emitScalarExpr(E->getScope()) : nullptr; bool ShouldCastToIntPtrTy = true; switch (E->getOp()) { @@ -860,18 +858,18 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__atomic_load: case AtomicExpr::AO__scoped_atomic_load: - Dest = buildPointerWithAlignment(E->getVal1()); + Dest = emitPointerWithAlignment(E->getVal1()); break; case AtomicExpr::AO__atomic_store: case AtomicExpr::AO__scoped_atomic_store: - Val1 = buildPointerWithAlignment(E->getVal1()); + Val1 = emitPointerWithAlignment(E->getVal1()); break; case AtomicExpr::AO__atomic_exchange: case AtomicExpr::AO__scoped_atomic_exchange: - Val1 = buildPointerWithAlignment(E->getVal1()); - Dest = buildPointerWithAlignment(E->getVal2()); + Val1 = emitPointerWithAlignment(E->getVal1()); + Dest = emitPointerWithAlignment(E->getVal2()); break; case AtomicExpr::AO__atomic_compare_exchange: @@ -884,18 +882,18 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: case AtomicExpr::AO__scoped_atomic_compare_exchange: case AtomicExpr::AO__scoped_atomic_compare_exchange_n: - Val1 = buildPointerWithAlignment(E->getVal1()); + Val1 = emitPointerWithAlignment(E->getVal1()); if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange || E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange) - Val2 = buildPointerWithAlignment(E->getVal2()); + Val2 = emitPointerWithAlignment(E->getVal2()); else - Val2 = buildValToTemp(*this, E->getVal2()); - OrderFail = buildScalarExpr(E->getOrderFail()); + Val2 = emitValToTemp(*this, E->getVal2()); + OrderFail = emitScalarExpr(E->getOrderFail()); if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n || E->getOp() == AtomicExpr::AO__atomic_compare_exchange || E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n || E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange) { - IsWeak = buildScalarExpr(E->getWeak()); + IsWeak = emitScalarExpr(E->getWeak()); } break; @@ -970,7 +968,7 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__scoped_atomic_xor_fetch: case AtomicExpr::AO__scoped_atomic_store_n: case AtomicExpr::AO__scoped_atomic_exchange_n: - Val1 = buildValToTemp(*this, E->getVal1()); + Val1 = emitValToTemp(*this, E->getVal1()); break; } @@ -1164,7 +1162,7 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { } [[maybe_unused]] RValue Res = - buildAtomicLibcall(*this, LibCallName, RetTy, Args); + emitAtomicLibcall(*this, LibCallName, RetTy, Args); // The value is returned directly from the libcall. if (E->isCmpXChg()) { llvm_unreachable("NYI"); @@ -1201,31 +1199,31 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { if (cir::isValidCIRAtomicOrderingCABI(ord)) { switch ((cir::MemOrder)ord) { case cir::MemOrder::Relaxed: - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::Relaxed, Scope); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Relaxed, Scope); break; case cir::MemOrder::Consume: case cir::MemOrder::Acquire: if (IsStore) break; // Avoid crashing on code with undefined behavior - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::Acquire, Scope); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Acquire, Scope); break; case cir::MemOrder::Release: if (IsLoad) break; // Avoid crashing on code with undefined behavior - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::Release, Scope); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Release, Scope); break; case cir::MemOrder::AcquireRelease: if (IsLoad || IsStore) break; // Avoid crashing on code with undefined behavior - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::AcquireRelease, Scope); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::AcquireRelease, Scope); break; case cir::MemOrder::SequentiallyConsistent: - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::SequentiallyConsistent, Scope); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::SequentiallyConsistent, Scope); break; } } @@ -1250,9 +1248,9 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // order value that is not supported. There is no good way to report // an unsupported memory order at runtime, hence the fallback to // memory_order_relaxed. - buildDefaultCase(builder, loc); - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::Relaxed, Scope); + emitDefaultCase(builder, loc); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Relaxed, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -1263,11 +1261,11 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // memory_order_consume is not implemented; it is always treated like // memory_order_acquire. These memory orders are not valid for // write-only operations. - buildDoubleMemOrderCase(builder, loc, Order.getType(), - cir::MemOrder::Consume, - cir::MemOrder::Acquire); - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, cir::MemOrder::Acquire, Scope); + emitDoubleMemOrderCase(builder, loc, Order.getType(), + cir::MemOrder::Consume, + cir::MemOrder::Acquire); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Acquire, Scope); builder.createBreak(loc); } @@ -1276,10 +1274,10 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { if (!IsLoad) { // case release: // memory_order_release is not valid for read-only operations. - buildSingleMemOrderCase(builder, loc, Order.getType(), - cir::MemOrder::Release); - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, cir::MemOrder::Release, Scope); + emitSingleMemOrderCase(builder, loc, Order.getType(), + cir::MemOrder::Release); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Release, Scope); builder.createBreak(loc); } @@ -1288,20 +1286,20 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { if (!IsLoad && !IsStore) { // case acq_rel: // memory_order_acq_rel is only valid for read-write operations. - buildSingleMemOrderCase(builder, loc, Order.getType(), - cir::MemOrder::AcquireRelease); - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, cir::MemOrder::AcquireRelease, Scope); + emitSingleMemOrderCase(builder, loc, Order.getType(), + cir::MemOrder::AcquireRelease); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::AcquireRelease, Scope); builder.createBreak(loc); } builder.setInsertionPointToEnd(switchBlock); // case seq_cst: - buildSingleMemOrderCase(builder, loc, Order.getType(), - cir::MemOrder::SequentiallyConsistent); - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::SequentiallyConsistent, Scope); + emitSingleMemOrderCase(builder, loc, Order.getType(), + cir::MemOrder::SequentiallyConsistent); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::SequentiallyConsistent, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -1314,8 +1312,8 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { RValTy, E->getExprLoc()); } -void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue lvalue, - bool isInit) { +void CIRGenFunction::emitAtomicStore(RValue rvalue, LValue lvalue, + bool isInit) { bool IsVolatile = lvalue.isVolatileQualified(); cir::MemOrder MO; if (lvalue.getType()->isAtomicType()) { @@ -1324,7 +1322,7 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue lvalue, MO = cir::MemOrder::Release; IsVolatile = true; } - return buildAtomicStore(rvalue, lvalue, MO, IsVolatile, isInit); + return emitAtomicStore(rvalue, lvalue, MO, IsVolatile, isInit); } /// Return true if \param ValTy is a type that should be casted to integer @@ -1390,7 +1388,7 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const { // Okay, store the rvalue in. if (rvalue.isScalar()) { - CGF.buildStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true); + CGF.emitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true); } else { llvm_unreachable("NYI"); } @@ -1401,7 +1399,7 @@ mlir::Value AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const { // through memory. Floats get casted if needed by AtomicExpandPass. if (auto Value = getScalarRValValueOrNull(RVal)) { if (!shouldCastToInt(Value.getType(), CmpXchg)) { - return CGF.buildToMemory(Value, ValueTy); + return CGF.emitToMemory(Value, ValueTy); } else { llvm_unreachable("NYI"); } @@ -1415,9 +1413,9 @@ mlir::Value AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const { /// Note that the r-value is expected to be an r-value *of the atomic /// type*; this means that for aggregate r-values, it should include /// storage for any padding that was necessary. -void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, - cir::MemOrder MO, bool IsVolatile, - bool isInit) { +void CIRGenFunction::emitAtomicStore(RValue rvalue, LValue dest, + cir::MemOrder MO, bool IsVolatile, + bool isInit) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. auto loc = dest.getPointer().getLoc(); @@ -1472,12 +1470,12 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, llvm_unreachable("NYI"); } -void CIRGenFunction::buildAtomicInit(Expr *init, LValue dest) { +void CIRGenFunction::emitAtomicInit(Expr *init, LValue dest) { AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange())); switch (atomics.getEvaluationKind()) { case cir::TEK_Scalar: { - mlir::Value value = buildScalarExpr(init); + mlir::Value value = emitScalarExpr(init); atomics.emitCopyIntoMemory(RValue::get(value)); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 7575972eb6a2..49674ff2780c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -39,11 +39,10 @@ using namespace clang::CIRGen; using namespace cir; using namespace llvm; -static RValue buildLibraryCall(CIRGenFunction &CGF, const FunctionDecl *FD, - const CallExpr *E, - mlir::Operation *calleeValue) { +static RValue emitLibraryCall(CIRGenFunction &CGF, const FunctionDecl *FD, + const CallExpr *E, mlir::Operation *calleeValue) { auto callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(FD)); - return CGF.buildCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); + return CGF.emitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); } static mlir::Value tryUseTestFPKind(CIRGenFunction &CGF, unsigned BuiltinID, @@ -58,8 +57,8 @@ static mlir::Value tryUseTestFPKind(CIRGenFunction &CGF, unsigned BuiltinID, } template -static RValue buildUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { - auto Arg = CGF.buildScalarExpr(E.getArg(0)); +static RValue emitUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { + auto Arg = CGF.emitScalarExpr(E.getArg(0)); CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, &E); if (CGF.getBuilder().getIsFPConstrained()) @@ -71,10 +70,10 @@ static RValue buildUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { } template -static RValue buildUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &CGF, - const CallExpr &E) { +static RValue emitUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &CGF, + const CallExpr &E) { auto ResultType = CGF.ConvertType(E.getType()); - auto Src = CGF.buildScalarExpr(E.getArg(0)); + auto Src = CGF.emitScalarExpr(E.getArg(0)); if (CGF.getBuilder().getIsFPConstrained()) llvm_unreachable("constraint FP operations are NYI"); @@ -84,9 +83,9 @@ static RValue buildUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &CGF, } template -static RValue buildBinaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { - auto Arg0 = CGF.buildScalarExpr(E.getArg(0)); - auto Arg1 = CGF.buildScalarExpr(E.getArg(1)); +static RValue emitBinaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { + auto Arg0 = CGF.emitScalarExpr(E.getArg(0)); + auto Arg1 = CGF.emitScalarExpr(E.getArg(1)); auto Loc = CGF.getLoc(E.getExprLoc()); auto Ty = CGF.ConvertType(E.getType()); @@ -96,10 +95,10 @@ static RValue buildBinaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { } template -static mlir::Value buildBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &CGF, - const CallExpr &E) { - auto Arg0 = CGF.buildScalarExpr(E.getArg(0)); - auto Arg1 = CGF.buildScalarExpr(E.getArg(1)); +static mlir::Value emitBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &CGF, + const CallExpr &E) { + auto Arg0 = CGF.emitScalarExpr(E.getArg(0)); + auto Arg1 = CGF.emitScalarExpr(E.getArg(1)); auto Loc = CGF.getLoc(E.getExprLoc()); auto Ty = CGF.ConvertType(E.getType()); @@ -115,13 +114,13 @@ static mlir::Value buildBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &CGF, template static RValue -buildBuiltinBitOp(CIRGenFunction &CGF, const CallExpr *E, - std::optional CK) { +emitBuiltinBitOp(CIRGenFunction &CGF, const CallExpr *E, + std::optional CK) { mlir::Value arg; if (CK.has_value()) - arg = CGF.buildCheckedArgForBuiltin(E->getArg(0), *CK); + arg = CGF.emitCheckedArgForBuiltin(E->getArg(0), *CK); else - arg = CGF.buildScalarExpr(E->getArg(0)); + arg = CGF.emitScalarExpr(E->getArg(0)); auto resultTy = CGF.ConvertType(E->getType()); auto op = @@ -195,9 +194,9 @@ EncompassingIntegerType(ArrayRef Types) { /// Emit the conversions required to turn the given value into an /// integer of the given size. -static mlir::Value buildToInt(CIRGenFunction &CGF, mlir::Value v, QualType t, - cir::IntType intType) { - v = CGF.buildToMemory(v, t); +static mlir::Value emitToInt(CIRGenFunction &CGF, mlir::Value v, QualType t, + cir::IntType intType) { + v = CGF.emitToMemory(v, t); if (isa(v.getType())) return CGF.getBuilder().createPtrToInt(v, intType); @@ -206,9 +205,9 @@ static mlir::Value buildToInt(CIRGenFunction &CGF, mlir::Value v, QualType t, return v; } -static mlir::Value buildFromInt(CIRGenFunction &CGF, mlir::Value v, QualType t, - mlir::Type resultType) { - v = CGF.buildFromMemory(v, t); +static mlir::Value emitFromInt(CIRGenFunction &CGF, mlir::Value v, QualType t, + mlir::Type resultType) { + v = CGF.emitFromMemory(v, t); if (isa(resultType)) return CGF.getBuilder().createIntToPtr(v, resultType); @@ -219,7 +218,7 @@ static mlir::Value buildFromInt(CIRGenFunction &CGF, mlir::Value v, QualType t, static Address checkAtomicAlignment(CIRGenFunction &CGF, const CallExpr *E) { ASTContext &ctx = CGF.getContext(); - Address ptr = CGF.buildPointerWithAlignment(E->getArg(0)); + Address ptr = CGF.emitPointerWithAlignment(E->getArg(0)); unsigned bytes = isa(ptr.getElementType()) ? ctx.getTypeSizeInChars(ctx.VoidPtrTy).getQuantity() @@ -254,19 +253,19 @@ static mlir::Value makeBinaryAtomicValue( expr->getArg(0)->getType()->getPointeeType()->isUnsignedIntegerType() ? builder.getUIntNTy(cgf.getContext().getTypeSize(typ)) : builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); - mlir::Value val = cgf.buildScalarExpr(expr->getArg(1)); + mlir::Value val = cgf.emitScalarExpr(expr->getArg(1)); mlir::Type valueType = val.getType(); - val = buildToInt(cgf, val, typ, intType); + val = emitToInt(cgf, val, typ, intType); auto rmwi = builder.create( cgf.getLoc(expr->getSourceRange()), destAddr.emitRawPointer(), val, kind, ordering, false, /* is volatile */ true); /* fetch first */ - return buildFromInt(cgf, rmwi->getResult(0), typ, valueType); + return emitFromInt(cgf, rmwi->getResult(0), typ, valueType); } -static RValue buildBinaryAtomic(CIRGenFunction &CGF, cir::AtomicFetchKind kind, - const CallExpr *E) { +static RValue emitBinaryAtomic(CIRGenFunction &CGF, cir::AtomicFetchKind kind, + const CallExpr *E) { return RValue::get(makeBinaryAtomicValue(CGF, kind, E)); } @@ -281,10 +280,10 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, expr->getArg(0)->getType()->getPointeeType()->isUnsignedIntegerType() ? builder.getUIntNTy(cgf.getContext().getTypeSize(typ)) : builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); - auto cmpVal = cgf.buildScalarExpr(expr->getArg(1)); - cmpVal = buildToInt(cgf, cmpVal, typ, intType); + auto cmpVal = cgf.emitScalarExpr(expr->getArg(1)); + cmpVal = emitToInt(cgf, cmpVal, typ, intType); auto newVal = - buildToInt(cgf, cgf.buildScalarExpr(expr->getArg(2)), typ, intType); + emitToInt(cgf, cgf.emitScalarExpr(expr->getArg(2)), typ, intType); auto op = builder.create( cgf.getLoc(expr->getSourceRange()), cmpVal.getType(), builder.getBoolTy(), @@ -295,9 +294,9 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, return returnBool ? op.getResult(1) : op.getResult(0); } -RValue CIRGenFunction::buildRotate(const CallExpr *E, bool IsRotateRight) { - auto src = buildScalarExpr(E->getArg(0)); - auto shiftAmt = buildScalarExpr(E->getArg(1)); +RValue CIRGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) { + auto src = emitScalarExpr(E->getArg(0)); + auto shiftAmt = emitScalarExpr(E->getArg(1)); // The builtin's shift arg may have a different type than the source arg and // result, but the CIR ops uses the same type for all values. @@ -310,9 +309,9 @@ RValue CIRGenFunction::buildRotate(const CallExpr *E, bool IsRotateRight) { return RValue::get(r); } -RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, - const CallExpr *E, - ReturnValueSlot ReturnValue) { +RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, + const CallExpr *E, + ReturnValueSlot ReturnValue) { const FunctionDecl *FD = GD.getDecl()->getAsFunction(); // See if we can constant fold this builtin. If so, don't emit it at all. @@ -484,7 +483,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_ceilf16: case Builtin::BI__builtin_ceill: case Builtin::BI__builtin_ceilf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIcopysign: case Builtin::BIcopysignf: @@ -492,7 +491,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_copysign: case Builtin::BI__builtin_copysignf: case Builtin::BI__builtin_copysignl: - return buildBinaryFPBuiltin(*this, *E); + return emitBinaryFPBuiltin(*this, *E); case Builtin::BI__builtin_copysignf16: case Builtin::BI__builtin_copysignf128: @@ -507,7 +506,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_cosl: case Builtin::BI__builtin_cosf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIcosh: case Builtin::BIcoshf: @@ -528,7 +527,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_expl: case Builtin::BI__builtin_expf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIexp2: case Builtin::BIexp2f: @@ -539,7 +538,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_exp2l: case Builtin::BI__builtin_exp2f128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BI__builtin_exp10: case Builtin::BI__builtin_exp10f: @@ -556,7 +555,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fabsf16: case Builtin::BI__builtin_fabsl: case Builtin::BI__builtin_fabsf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIfloor: case Builtin::BIfloorf: @@ -566,7 +565,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_floorf16: case Builtin::BI__builtin_floorl: case Builtin::BI__builtin_floorf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIfma: case Builtin::BIfmaf: @@ -585,7 +584,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmaxf: case Builtin::BI__builtin_fmaxl: return RValue::get( - buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + emitBinaryMaybeConstrainedFPBuiltin(*this, *E)); case Builtin::BI__builtin_fmaxf16: case Builtin::BI__builtin_fmaxf128: @@ -598,7 +597,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fminf: case Builtin::BI__builtin_fminl: return RValue::get( - buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + emitBinaryMaybeConstrainedFPBuiltin(*this, *E)); case Builtin::BI__builtin_fminf16: case Builtin::BI__builtin_fminf128: @@ -613,7 +612,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmodf: case Builtin::BI__builtin_fmodl: assert(!cir::MissingFeatures::fastMathFlags()); - return buildBinaryFPBuiltin(*this, *E); + return emitBinaryFPBuiltin(*this, *E); case Builtin::BI__builtin_fmodf16: case Builtin::BI__builtin_fmodf128: @@ -629,7 +628,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_logl: case Builtin::BI__builtin_logf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIlog10: case Builtin::BIlog10f: @@ -640,7 +639,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log10l: case Builtin::BI__builtin_log10f128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIlog2: case Builtin::BIlog2f: @@ -651,7 +650,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log2l: case Builtin::BI__builtin_log2f128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BInearbyint: case Builtin::BInearbyintf: @@ -660,7 +659,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_nearbyintf: case Builtin::BI__builtin_nearbyintl: case Builtin::BI__builtin_nearbyintf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIpow: case Builtin::BIpowf: @@ -670,7 +669,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_powl: assert(!cir::MissingFeatures::fastMathFlags()); return RValue::get( - buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + emitBinaryMaybeConstrainedFPBuiltin(*this, *E)); case Builtin::BI__builtin_powf16: case Builtin::BI__builtin_powf128: @@ -684,7 +683,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_rintf16: case Builtin::BI__builtin_rintl: case Builtin::BI__builtin_rintf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIround: case Builtin::BIroundf: @@ -694,7 +693,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_roundf16: case Builtin::BI__builtin_roundl: case Builtin::BI__builtin_roundf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIroundeven: case Builtin::BIroundevenf: @@ -715,7 +714,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sinl: case Builtin::BI__builtin_sinf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIsqrt: case Builtin::BIsqrtf: @@ -726,7 +725,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sqrtl: case Builtin::BI__builtin_sqrtf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BI__builtin_elementwise_sqrt: llvm_unreachable("BI__builtin_elementwise_sqrt NYI"); @@ -759,7 +758,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_truncf16: case Builtin::BI__builtin_truncl: case Builtin::BI__builtin_truncf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIlround: case Builtin::BIlroundf: @@ -767,7 +766,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_lround: case Builtin::BI__builtin_lroundf: case Builtin::BI__builtin_lroundl: - return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); case Builtin::BI__builtin_lroundf128: llvm_unreachable("BI__builtin_lroundf128 NYI"); @@ -778,8 +777,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_llround: case Builtin::BI__builtin_llroundf: case Builtin::BI__builtin_llroundl: - return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, - *E); + return emitUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); case Builtin::BI__builtin_llroundf128: llvm_unreachable("BI__builtin_llroundf128 NYI"); @@ -790,7 +788,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_lrint: case Builtin::BI__builtin_lrintf: case Builtin::BI__builtin_lrintl: - return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); case Builtin::BI__builtin_lrintf128: llvm_unreachable("BI__builtin_lrintf128 NYI"); @@ -801,7 +799,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_llrint: case Builtin::BI__builtin_llrintf: case Builtin::BI__builtin_llrintl: - return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); case Builtin::BI__builtin_llrintf128: llvm_unreachable("BI__builtin_llrintf128 NYI"); @@ -831,15 +829,15 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_va_start: case Builtin::BI__va_start: case Builtin::BI__builtin_va_end: { - buildVAStartEnd(BuiltinID == Builtin::BI__va_start - ? buildScalarExpr(E->getArg(0)) - : buildVAListRef(E->getArg(0)).getPointer(), - BuiltinID != Builtin::BI__builtin_va_end); + emitVAStartEnd(BuiltinID == Builtin::BI__va_start + ? emitScalarExpr(E->getArg(0)) + : emitVAListRef(E->getArg(0)).getPointer(), + BuiltinID != Builtin::BI__builtin_va_end); return {}; } case Builtin::BI__builtin_va_copy: { - auto dstPtr = buildVAListRef(E->getArg(0)).getPointer(); - auto srcPtr = buildVAListRef(E->getArg(1)).getPointer(); + auto dstPtr = emitVAListRef(E->getArg(0)).getPointer(); + auto srcPtr = emitVAListRef(E->getArg(1)).getPointer(); builder.create(dstPtr.getLoc(), dstPtr, srcPtr); return {}; } @@ -851,7 +849,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_labs: case Builtin::BI__builtin_llabs: { bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow); - auto Arg = buildScalarExpr(E->getArg(0)); + auto Arg = emitScalarExpr(E->getArg(0)); mlir::Value Result; switch (getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: { @@ -877,8 +875,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(Result); } case Builtin::BI__builtin_complex: { - mlir::Value Real = buildScalarExpr(E->getArg(0)); - mlir::Value Imag = buildScalarExpr(E->getArg(1)); + mlir::Value Real = emitScalarExpr(E->getArg(0)); + mlir::Value Imag = emitScalarExpr(E->getArg(1)); mlir::Value Complex = builder.createComplexCreate(getLoc(E->getExprLoc()), Real, Imag); return RValue::getComplex(Complex); @@ -890,7 +888,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIconj: case Builtin::BIconjf: case Builtin::BIconjl: { - mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); + mlir::Value ComplexVal = emitComplexExpr(E->getArg(0)); mlir::Value Conj = builder.createUnaryOp(getLoc(E->getExprLoc()), cir::UnaryOpKind::Not, ComplexVal); return RValue::getComplex(Conj); @@ -902,7 +900,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIcreal: case Builtin::BIcrealf: case Builtin::BIcreall: { - mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); + mlir::Value ComplexVal = emitComplexExpr(E->getArg(0)); mlir::Value Real = builder.createComplexReal(getLoc(E->getExprLoc()), ComplexVal); return RValue::get(Real); @@ -917,7 +915,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIcimag: case Builtin::BIcimagf: case Builtin::BIcimagl: { - mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); + mlir::Value ComplexVal = emitComplexExpr(E->getArg(0)); mlir::Value Real = builder.createComplexImag(getLoc(E->getExprLoc()), ComplexVal); return RValue::get(Real); @@ -926,31 +924,31 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_clrsb: case Builtin::BI__builtin_clrsbl: case Builtin::BI__builtin_clrsbll: - return buildBuiltinBitOp(*this, E, std::nullopt); + return emitBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__builtin_ctzs: case Builtin::BI__builtin_ctz: case Builtin::BI__builtin_ctzl: case Builtin::BI__builtin_ctzll: case Builtin::BI__builtin_ctzg: - return buildBuiltinBitOp(*this, E, BCK_CTZPassedZero); + return emitBuiltinBitOp(*this, E, BCK_CTZPassedZero); case Builtin::BI__builtin_clzs: case Builtin::BI__builtin_clz: case Builtin::BI__builtin_clzl: case Builtin::BI__builtin_clzll: case Builtin::BI__builtin_clzg: - return buildBuiltinBitOp(*this, E, BCK_CLZPassedZero); + return emitBuiltinBitOp(*this, E, BCK_CLZPassedZero); case Builtin::BI__builtin_ffs: case Builtin::BI__builtin_ffsl: case Builtin::BI__builtin_ffsll: - return buildBuiltinBitOp(*this, E, std::nullopt); + return emitBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__builtin_parity: case Builtin::BI__builtin_parityl: case Builtin::BI__builtin_parityll: - return buildBuiltinBitOp(*this, E, std::nullopt); + return emitBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__lzcnt16: case Builtin::BI__lzcnt: @@ -964,18 +962,18 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_popcountl: case Builtin::BI__builtin_popcountll: case Builtin::BI__builtin_popcountg: - return buildBuiltinBitOp(*this, E, std::nullopt); + return emitBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__builtin_unpredictable: { if (CGM.getCodeGenOpts().OptimizationLevel != 0) assert(!cir::MissingFeatures::insertBuiltinUnpredictable()); - return RValue::get(buildScalarExpr(E->getArg(0))); + return RValue::get(emitScalarExpr(E->getArg(0))); } case Builtin::BI__builtin_expect: case Builtin::BI__builtin_expect_with_probability: { - auto ArgValue = buildScalarExpr(E->getArg(0)); - auto ExpectedValue = buildScalarExpr(E->getArg(1)); + auto ArgValue = emitScalarExpr(E->getArg(0)); + auto ExpectedValue = emitScalarExpr(E->getArg(1)); // Don't generate cir.expect on -O0 as the backend won't use it for // anything. Note, we still IRGen ExpectedValue because it could have @@ -1007,17 +1005,17 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_assume_aligned: { const Expr *ptr = E->getArg(0); - mlir::Value ptrValue = buildScalarExpr(ptr); + mlir::Value ptrValue = emitScalarExpr(ptr); mlir::Value offsetValue = - (E->getNumArgs() > 2) ? buildScalarExpr(E->getArg(2)) : nullptr; + (E->getNumArgs() > 2) ? emitScalarExpr(E->getArg(2)) : nullptr; mlir::Attribute alignmentAttr = ConstantEmitter(*this).emitAbstract( E->getArg(1), E->getArg(1)->getType()); std::int64_t alignment = cast(alignmentAttr).getSInt(); - ptrValue = buildAlignmentAssumption(ptrValue, ptr, ptr->getExprLoc(), - builder.getI64IntegerAttr(alignment), - offsetValue); + ptrValue = emitAlignmentAssumption(ptrValue, ptr, ptr->getExprLoc(), + builder.getI64IntegerAttr(alignment), + offsetValue); return RValue::get(ptrValue); } @@ -1026,7 +1024,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, if (E->getArg(0)->HasSideEffects(getContext())) return RValue::get(nullptr); - mlir::Value argValue = buildScalarExpr(E->getArg(0)); + mlir::Value argValue = emitScalarExpr(E->getArg(0)); builder.create(getLoc(E->getExprLoc()), argValue); return RValue::get(nullptr); } @@ -1035,8 +1033,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const Expr *arg0 = E->getArg(0); const Expr *arg1 = E->getArg(1); - mlir::Value value0 = buildScalarExpr(arg0); - mlir::Value value1 = buildScalarExpr(arg1); + mlir::Value value0 = emitScalarExpr(arg0); + mlir::Value value1 = emitScalarExpr(arg1); builder.create(getLoc(E->getExprLoc()), value0, value1); @@ -1055,7 +1053,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI_byteswap_ushort: case Builtin::BI_byteswap_ulong: case Builtin::BI_byteswap_uint64: { - auto arg = buildScalarExpr(E->getArg(0)); + auto arg = emitScalarExpr(E->getArg(0)); return RValue::get( builder.create(getLoc(E->getSourceRange()), arg)); } @@ -1075,7 +1073,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI_rotl: case Builtin::BI_lrotl: case Builtin::BI_rotl64: - return buildRotate(E, false); + return emitRotate(E, false); case Builtin::BI__builtin_rotateright8: case Builtin::BI__builtin_rotateright16: @@ -1086,7 +1084,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI_rotr: case Builtin::BI_lrotr: case Builtin::BI_rotr64: - return buildRotate(E, true); + return emitRotate(E, true); case Builtin::BI__builtin_constant_p: { mlir::Type ResultType = ConvertType(E->getType()); @@ -1110,7 +1108,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, builder.getConstInt(getLoc(E->getSourceRange()), mlir::cast(ResultType), 0)); - mlir::Value ArgValue = buildScalarExpr(Arg); + mlir::Value ArgValue = emitScalarExpr(Arg); if (ArgType->isObjCObjectPointerType()) // Convert Objective-C objects to id because we cannot distinguish between // LLVM types for Obj-C classes as they are opaque. @@ -1158,7 +1156,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, if (E->getNumArgs() > 2) Locality = evaluateOperandAsInt(E->getArg(2)); - mlir::Value Address = buildScalarExpr(E->getArg(0)); + mlir::Value Address = emitScalarExpr(E->getArg(0)); builder.create(getLoc(E->getSourceRange()), Address, Locality, IsWrite); return RValue::get(nullptr); @@ -1171,9 +1169,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin___clear_cache: { mlir::Type voidTy = cir::VoidType::get(&getMLIRContext()); mlir::Value begin = - builder.createPtrBitcast(buildScalarExpr(E->getArg(0)), voidTy); + builder.createPtrBitcast(emitScalarExpr(E->getArg(0)), voidTy); mlir::Value end = - builder.createPtrBitcast(buildScalarExpr(E->getArg(1)), voidTy); + builder.createPtrBitcast(emitScalarExpr(E->getArg(1)), voidTy); builder.create(getLoc(E->getSourceRange()), begin, end); return RValue::get(nullptr); } @@ -1191,7 +1189,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__debugbreak: llvm_unreachable("BI__debugbreak NYI"); case Builtin::BI__builtin_unreachable: { - buildUnreachable(E->getExprLoc()); + emitUnreachable(E->getExprLoc()); // We do need to preserve an insertion point. builder.createBlock(builder.getBlock()->getParent()); @@ -1339,7 +1337,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_alloca_uninitialized: case Builtin::BI__builtin_alloca: { // Get alloca size input - mlir::Value Size = buildScalarExpr(E->getArg(0)); + mlir::Value Size = emitScalarExpr(E->getArg(0)); // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__. const TargetInfo &TI = getContext().getTargetInfo(); @@ -1394,14 +1392,13 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_memcpy: case Builtin::BImempcpy: case Builtin::BI__builtin_mempcpy: { - Address Dest = buildPointerWithAlignment(E->getArg(0)); - Address Src = buildPointerWithAlignment(E->getArg(1)); - mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); - buildNonNullArgCheck(RValue::get(Dest.getPointer()), - E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), - FD, 0); - buildNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), - E->getArg(1)->getExprLoc(), FD, 1); + Address Dest = emitPointerWithAlignment(E->getArg(0)); + Address Src = emitPointerWithAlignment(E->getArg(1)); + mlir::Value SizeVal = emitScalarExpr(E->getArg(2)); + emitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), + E->getArg(0)->getExprLoc(), FD, 0); + emitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), + E->getArg(1)->getExprLoc(), FD, 1); builder.createMemCpy(getLoc(E->getSourceRange()), Dest.getPointer(), Src.getPointer(), SizeVal); if (BuiltinID == Builtin::BImempcpy || @@ -1416,11 +1413,11 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_char_memchr: case Builtin::BI__builtin_memchr: { - Address srcPtr = buildPointerWithAlignment(E->getArg(0)); + Address srcPtr = emitPointerWithAlignment(E->getArg(0)); mlir::Value src = builder.createBitcast(srcPtr.getPointer(), builder.getVoidPtrTy()); - mlir::Value pattern = buildScalarExpr(E->getArg(1)); - mlir::Value len = buildScalarExpr(E->getArg(2)); + mlir::Value pattern = emitScalarExpr(E->getArg(1)); + mlir::Value len = emitScalarExpr(E->getArg(2)); mlir::Value res = builder.create(getLoc(E->getExprLoc()), src, pattern, len); return RValue::get(res); @@ -1436,8 +1433,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm::APSInt dstSize = dstSizeResult.Val.getInt(); if (size.ugt(dstSize)) break; - Address dest = buildPointerWithAlignment(E->getArg(0)); - Address src = buildPointerWithAlignment(E->getArg(1)); + Address dest = emitPointerWithAlignment(E->getArg(0)); + Address src = emitPointerWithAlignment(E->getArg(1)); auto loc = getLoc(E->getSourceRange()); ConstantOp sizeOp = builder.getConstInt(loc, size); builder.createMemCpy(loc, dest.getPointer(), src.getPointer(), sizeOp); @@ -1452,26 +1449,24 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BImemmove: case Builtin::BI__builtin_memmove: { - Address Dest = buildPointerWithAlignment(E->getArg(0)); - Address Src = buildPointerWithAlignment(E->getArg(1)); - mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); - buildNonNullArgCheck(RValue::get(Dest.getPointer()), - E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), - FD, 0); - buildNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), - E->getArg(1)->getExprLoc(), FD, 1); + Address Dest = emitPointerWithAlignment(E->getArg(0)); + Address Src = emitPointerWithAlignment(E->getArg(1)); + mlir::Value SizeVal = emitScalarExpr(E->getArg(2)); + emitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), + E->getArg(0)->getExprLoc(), FD, 0); + emitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), + E->getArg(1)->getExprLoc(), FD, 1); builder.createMemMove(getLoc(E->getSourceRange()), Dest.getPointer(), Src.getPointer(), SizeVal); return RValue::get(Dest.getPointer()); } case Builtin::BImemset: case Builtin::BI__builtin_memset: { - Address Dest = buildPointerWithAlignment(E->getArg(0)); - mlir::Value ByteVal = buildScalarExpr(E->getArg(1)); - mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); - buildNonNullArgCheck(RValue::get(Dest.getPointer()), - E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), - FD, 0); + Address Dest = emitPointerWithAlignment(E->getArg(0)); + mlir::Value ByteVal = emitScalarExpr(E->getArg(1)); + mlir::Value SizeVal = emitScalarExpr(E->getArg(2)); + emitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), + E->getArg(0)->getExprLoc(), FD, 0); builder.createMemSet(getLoc(E->getSourceRange()), Dest.getPointer(), ByteVal, SizeVal); return RValue::get(Dest.getPointer()); @@ -1489,8 +1484,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm::APSInt dstSize = dstSizeResult.Val.getInt(); if (size.ugt(dstSize)) break; - Address dest = buildPointerWithAlignment(E->getArg(0)); - mlir::Value byteVal = buildScalarExpr(E->getArg(1)); + Address dest = emitPointerWithAlignment(E->getArg(0)); + mlir::Value byteVal = emitScalarExpr(E->getArg(1)); auto loc = getLoc(E->getSourceRange()); ConstantOp sizeOp = builder.getConstInt(loc, size); builder.createMemSet(loc, dest.getPointer(), byteVal, sizeOp); @@ -1559,7 +1554,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_fetch_and_add_4: case Builtin::BI__sync_fetch_and_add_8: case Builtin::BI__sync_fetch_and_add_16: { - return buildBinaryAtomic(*this, cir::AtomicFetchKind::Add, E); + return emitBinaryAtomic(*this, cir::AtomicFetchKind::Add, E); } case Builtin::BI__sync_fetch_and_sub_1: @@ -1567,7 +1562,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_fetch_and_sub_4: case Builtin::BI__sync_fetch_and_sub_8: case Builtin::BI__sync_fetch_and_sub_16: { - return buildBinaryAtomic(*this, cir::AtomicFetchKind::Sub, E); + return emitBinaryAtomic(*this, cir::AtomicFetchKind::Sub, E); } case Builtin::BI__sync_fetch_and_or_1: @@ -1754,9 +1749,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, auto ResultCIRTy = mlir::cast(CGM.getTypes().ConvertType(ResultQTy)); - mlir::Value Left = buildScalarExpr(LeftArg); - mlir::Value Right = buildScalarExpr(RightArg); - Address ResultPtr = buildPointerWithAlignment(ResultArg); + mlir::Value Left = emitScalarExpr(LeftArg); + mlir::Value Right = emitScalarExpr(RightArg); + Address ResultPtr = emitPointerWithAlignment(ResultArg); // Extend each operand to the encompassing type, if necessary. if (Left.getType() != EncompassingCIRTy) @@ -1798,7 +1793,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // Finally, store the result using the pointer. bool isVolatile = ResultArg->getType()->getPointeeType().isVolatileQualified(); - builder.createStore(Loc, buildToMemory(ArithResult.result, ResultQTy), + builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy), ResultPtr, isVolatile); return RValue::get(ArithResult.overflow); @@ -1823,11 +1818,11 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_smull_overflow: case Builtin::BI__builtin_smulll_overflow: { // Scalarize our inputs. - mlir::Value X = buildScalarExpr(E->getArg(0)); - mlir::Value Y = buildScalarExpr(E->getArg(1)); + mlir::Value X = emitScalarExpr(E->getArg(0)); + mlir::Value Y = emitScalarExpr(E->getArg(1)); const clang::Expr *ResultArg = E->getArg(2); - Address ResultPtr = buildPointerWithAlignment(ResultArg); + Address ResultPtr = emitPointerWithAlignment(ResultArg); // Decide which of the arithmetic operation we are lowering to: cir::BinOpOverflowKind ArithKind; @@ -1871,7 +1866,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, bool isVolatile = ResultArg->getType()->getPointeeType().isVolatileQualified(); - builder.createStore(Loc, buildToMemory(ArithResult.result, ResultQTy), + builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy), ResultPtr, isVolatile); return RValue::get(ArithResult.overflow); @@ -1880,14 +1875,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIaddressof: case Builtin::BI__addressof: case Builtin::BI__builtin_addressof: - return RValue::get(buildLValue(E->getArg(0)).getPointer()); + return RValue::get(emitLValue(E->getArg(0)).getPointer()); case Builtin::BI__builtin_function_start: llvm_unreachable("BI__builtin_function_start NYI"); case Builtin::BI__builtin_operator_new: - return buildBuiltinNewDeleteCall( + return emitBuiltinNewDeleteCall( E->getCallee()->getType()->castAs(), E, false); case Builtin::BI__builtin_operator_delete: - buildBuiltinNewDeleteCall( + emitBuiltinNewDeleteCall( E->getCallee()->getType()->castAs(), E, true); return RValue::get(nullptr); case Builtin::BI__builtin_is_aligned: @@ -2007,7 +2002,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BImove_if_noexcept: case Builtin::BIforward: case Builtin::BIas_const: - return RValue::get(buildLValue(E->getArg(0)).getPointer()); + return RValue::get(emitLValue(E->getArg(0)).getPointer()); case Builtin::BIforward_like: llvm_unreachable("BIforward_like NYI"); case Builtin::BI__GetExceptionInfo: @@ -2030,7 +2025,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_coro_id like NYI"); case Builtin::BI__builtin_coro_frame: { - return buildCoroutineFrame(); + return emitCoroutineFrame(); } case Builtin::BI__builtin_coro_free: case Builtin::BI__builtin_coro_size: { @@ -2042,8 +2037,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, CGM.GetOrCreateCIRFunction(ND->getName(), ty, gd, /*ForVTable=*/false, /*DontDefer=*/false); fnOp.setBuiltinAttr(mlir::UnitAttr::get(&getMLIRContext())); - return buildCall(E->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), - E, ReturnValue); + return emitCall(E->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), E, + ReturnValue); } case Builtin::BIread_pipe: @@ -2151,7 +2146,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // any of data classes, specified by the second argument. case Builtin::BI__builtin_isnan: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); if (mlir::Value Result = tryUseTestFPKind(*this, BuiltinID, V)) return RValue::get(Result); mlir::Location Loc = getLoc(E->getBeginLoc()); @@ -2163,7 +2158,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_issignaling: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); mlir::Location Loc = getLoc(E->getBeginLoc()); // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( @@ -2173,7 +2168,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_isinf: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); if (mlir::Value Result = tryUseTestFPKind(*this, BuiltinID, V)) return RValue::get(Result); mlir::Location Loc = getLoc(E->getBeginLoc()); @@ -2191,7 +2186,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__finitel: case Builtin::BI__builtin_isfinite: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); if (mlir::Value Result = tryUseTestFPKind(*this, BuiltinID, V)) return RValue::get(Result); mlir::Location Loc = getLoc(E->getBeginLoc()); @@ -2203,7 +2198,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_isnormal: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); mlir::Location Loc = getLoc(E->getBeginLoc()); // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( @@ -2213,7 +2208,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_issubnormal: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); mlir::Location Loc = getLoc(E->getBeginLoc()); // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( @@ -2223,7 +2218,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_iszero: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); mlir::Location Loc = getLoc(E->getBeginLoc()); // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( @@ -2237,7 +2232,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, break; CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); uint64_t Test = Result.Val.getInt().getLimitedValue(); mlir::Location Loc = getLoc(E->getBeginLoc()); @@ -2251,14 +2246,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // the call using the normal call path, but using the unmangled // version of the function name. if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) - return buildLibraryCall(*this, FD, E, - CGM.getBuiltinLibFunction(FD, BuiltinID)); + return emitLibraryCall(*this, FD, E, + CGM.getBuiltinLibFunction(FD, BuiltinID)); // If this is a predefined lib function (e.g. malloc), emit the call // using exactly the normal call path. if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) - return buildLibraryCall(*this, FD, E, - buildScalarExpr(E->getCallee()).getDefiningOp()); + return emitLibraryCall(*this, FD, E, + emitScalarExpr(E->getCallee()).getDefiningOp()); // Check that a call to a target specific builtin has the correct target // features. @@ -2299,7 +2294,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, } // Now see if we can emit a target-specific builtin. - if (auto V = buildTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { + if (auto V = emitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { switch (EvalKind) { case cir::TEK_Scalar: if (mlir::isa(V.getType())) @@ -2319,12 +2314,12 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return GetUndefRValue(E->getType()); } -mlir::Value CIRGenFunction::buildCheckedArgForBuiltin(const Expr *E, - BuiltinCheckKind Kind) { +mlir::Value CIRGenFunction::emitCheckedArgForBuiltin(const Expr *E, + BuiltinCheckKind Kind) { assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && "Unsupported builtin check kind"); - auto value = buildScalarExpr(E); + auto value = emitScalarExpr(E); if (!SanOpts.has(SanitizerKind::Builtin)) return value; @@ -2332,11 +2327,11 @@ mlir::Value CIRGenFunction::buildCheckedArgForBuiltin(const Expr *E, llvm_unreachable("NYI"); } -static mlir::Value buildTargetArchBuiltinExpr(CIRGenFunction *CGF, - unsigned BuiltinID, - const CallExpr *E, - ReturnValueSlot ReturnValue, - llvm::Triple::ArchType Arch) { +static mlir::Value emitTargetArchBuiltinExpr(CIRGenFunction *CGF, + unsigned BuiltinID, + const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch) { // When compiling in HipStdPar mode we have to be conservative in rejecting // target specific features in the FE, and defer the possible error to the // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is @@ -2356,13 +2351,13 @@ static mlir::Value buildTargetArchBuiltinExpr(CIRGenFunction *CGF, case llvm::Triple::aarch64: case llvm::Triple::aarch64_32: case llvm::Triple::aarch64_be: - return CGF->buildAArch64BuiltinExpr(BuiltinID, E, ReturnValue, Arch); + return CGF->emitAArch64BuiltinExpr(BuiltinID, E, ReturnValue, Arch); case llvm::Triple::bpfeb: case llvm::Triple::bpfel: llvm_unreachable("NYI"); case llvm::Triple::x86: case llvm::Triple::x86_64: - return CGF->buildX86BuiltinExpr(BuiltinID, E); + return CGF->emitX86BuiltinExpr(BuiltinID, E); case llvm::Triple::ppc: case llvm::Triple::ppcle: case llvm::Triple::ppc64: @@ -2389,21 +2384,21 @@ static mlir::Value buildTargetArchBuiltinExpr(CIRGenFunction *CGF, } } -mlir::Value -CIRGenFunction::buildTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue) { +mlir::Value CIRGenFunction::emitTargetBuiltinExpr(unsigned BuiltinID, + const CallExpr *E, + ReturnValueSlot ReturnValue) { if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) { assert(getContext().getAuxTargetInfo() && "Missing aux target info"); - return buildTargetArchBuiltinExpr( + return emitTargetArchBuiltinExpr( this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E, ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch()); } - return buildTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue, - getTarget().getTriple().getArch()); + return emitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue, + getTarget().getTriple().getArch()); } -void CIRGenFunction::buildVAStartEnd(mlir::Value ArgValue, bool IsStart) { +void CIRGenFunction::emitVAStartEnd(mlir::Value ArgValue, bool IsStart) { // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this // early, defer to LLVM lowering. if (IsStart) @@ -2448,8 +2443,8 @@ mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, auto DIter = LocalDeclMap.find(D); assert(DIter != LocalDeclMap.end()); - return buildLoadOfScalar(DIter->second, /*Volatile=*/false, - getContext().getSizeType(), E->getBeginLoc()); + return emitLoadOfScalar(DIter->second, /*Volatile=*/false, + getContext().getSizeType(), E->getBeginLoc()); } } @@ -2459,7 +2454,7 @@ mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) llvm_unreachable("NYI"); - auto Ptr = EmittedE ? EmittedE : buildScalarExpr(E); + auto Ptr = EmittedE ? EmittedE : emitScalarExpr(E); assert(mlir::isa(Ptr.getType()) && "Non-pointer passed to __builtin_object_size?"); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 71bc955f8cfd..46874b871f87 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1955,11 +1955,11 @@ static cir::VectorType GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags, llvm_unreachable("Unknown vector element type!"); } -static mlir::Value buildAArch64TblBuiltinExpr(CIRGenFunction &CGF, - unsigned BuiltinID, - const CallExpr *E, - SmallVectorImpl &Ops, - llvm::Triple::ArchType Arch) { +static mlir::Value emitAArch64TblBuiltinExpr(CIRGenFunction &CGF, + unsigned BuiltinID, + const CallExpr *E, + SmallVectorImpl &Ops, + llvm::Triple::ArchType Arch) { unsigned int Int = 0; [[maybe_unused]] const char *s = nullptr; @@ -2082,16 +2082,16 @@ static mlir::Value buildAArch64TblBuiltinExpr(CIRGenFunction &CGF, llvm_unreachable("NYI"); } -mlir::Value CIRGenFunction::buildAArch64SMEBuiltinExpr(unsigned BuiltinID, - const CallExpr *E) { +mlir::Value CIRGenFunction::emitAArch64SMEBuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { auto *Builtin = findARMVectorIntrinsicInMap(AArch64SMEIntrinsicMap, BuiltinID, AArch64SMEIntrinsicsProvenSorted); (void)Builtin; llvm_unreachable("NYI"); } -mlir::Value CIRGenFunction::buildAArch64SVEBuiltinExpr(unsigned BuiltinID, - const CallExpr *E) { +mlir::Value CIRGenFunction::emitAArch64SVEBuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 && BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64_x4) { llvm_unreachable("NYI"); @@ -2102,12 +2102,12 @@ mlir::Value CIRGenFunction::buildAArch64SVEBuiltinExpr(unsigned BuiltinID, llvm_unreachable("NYI"); } -mlir::Value CIRGenFunction::buildScalarOrConstFoldImmArg(unsigned ICEArguments, - unsigned Idx, - const CallExpr *E) { +mlir::Value CIRGenFunction::emitScalarOrConstFoldImmArg(unsigned ICEArguments, + unsigned Idx, + const CallExpr *E) { mlir::Value Arg = {}; if ((ICEArguments & (1 << Idx)) == 0) { - Arg = buildScalarExpr(E->getArg(Idx)); + Arg = emitScalarExpr(E->getArg(Idx)); } else { // If this is required to be a constant, constant fold it so that we // know that the generated intrinsic gets a ConstantInt. @@ -2119,9 +2119,9 @@ mlir::Value CIRGenFunction::buildScalarOrConstFoldImmArg(unsigned ICEArguments, return Arg; } -static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, - const CallExpr *clangCallExpr, - CIRGenFunction &cgf) { +static mlir::Value emitArmLdrexNon128Intrinsic(unsigned int builtinID, + const CallExpr *clangCallExpr, + CIRGenFunction &cgf) { StringRef intrinsicName; if (builtinID == clang::AArch64::BI__builtin_arm_ldrex) { intrinsicName = "aarch64.ldxr"; @@ -2129,7 +2129,7 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, llvm_unreachable("Unknown builtinID"); } // Argument - mlir::Value loadAddr = cgf.buildScalarExpr(clangCallExpr->getArg(0)); + mlir::Value loadAddr = cgf.emitScalarExpr(clangCallExpr->getArg(0)); // Get Instrinc call CIRGenBuilderTy &builder = cgf.getBuilder(); QualType clangResTy = clangCallExpr->getType(); @@ -2189,10 +2189,10 @@ static int64_t getIntValueFromConstOp(mlir::Value val) { /// Build a constant shift amount vector of `vecTy` to shift a vector /// Here `shitfVal` is a constant integer that will be splated into a /// a const vector of `vecTy` which is the return of this function -static mlir::Value buildNeonShiftVector(CIRGenBuilderTy &builder, - mlir::Value shiftVal, - cir::VectorType vecTy, - mlir::Location loc, bool neg) { +static mlir::Value emitNeonShiftVector(CIRGenBuilderTy &builder, + mlir::Value shiftVal, + cir::VectorType vecTy, + mlir::Location loc, bool neg) { int shiftAmt = getIntValueFromConstOp(shiftVal); if (neg) shiftAmt = -shiftAmt; @@ -2206,23 +2206,21 @@ static mlir::Value buildNeonShiftVector(CIRGenBuilderTy &builder, } /// Build ShiftOp of vector type whose shift amount is a vector built -/// from a constant integer using `buildNeonShiftVector` function -static mlir::Value buildCommonNeonShift(CIRGenBuilderTy &builder, - mlir::Location loc, - cir::VectorType resTy, - mlir::Value shifTgt, - mlir::Value shiftAmt, bool shiftLeft, - bool negAmt = false) { - shiftAmt = buildNeonShiftVector(builder, shiftAmt, resTy, loc, negAmt); +/// from a constant integer using `emitNeonShiftVector` function +static mlir::Value +emitCommonNeonShift(CIRGenBuilderTy &builder, mlir::Location loc, + cir::VectorType resTy, mlir::Value shifTgt, + mlir::Value shiftAmt, bool shiftLeft, bool negAmt = false) { + shiftAmt = emitNeonShiftVector(builder, shiftAmt, resTy, loc, negAmt); return builder.create( loc, resTy, builder.createBitcast(shifTgt, resTy), shiftAmt, shiftLeft); } /// Right-shift a vector by a constant. -static mlir::Value buildNeonRShiftImm(CIRGenFunction &cgf, mlir::Value shiftVec, - mlir::Value shiftVal, - cir::VectorType vecTy, bool usgn, - mlir::Location loc) { +static mlir::Value emitNeonRShiftImm(CIRGenFunction &cgf, mlir::Value shiftVec, + mlir::Value shiftVal, + cir::VectorType vecTy, bool usgn, + mlir::Location loc) { CIRGenBuilderTy &builder = cgf.getBuilder(); int64_t shiftAmt = getIntValueFromConstOp(shiftVal); int eltSize = cgf.CGM.getDataLayout().getTypeSizeInBits(vecTy.getEltType()); @@ -2240,37 +2238,37 @@ static mlir::Value buildNeonRShiftImm(CIRGenFunction &cgf, mlir::Value shiftVec, --shiftAmt; shiftVal = builder.getConstInt(loc, vecTy.getEltType(), shiftAmt); } - return buildCommonNeonShift(builder, loc, vecTy, shiftVec, shiftVal, - false /* right shift */); + return emitCommonNeonShift(builder, loc, vecTy, shiftVec, shiftVal, + false /* right shift */); } -mlir::Value buildNeonCall(CIRGenBuilderTy &builder, - llvm::SmallVector argTypes, - llvm::SmallVectorImpl &args, - llvm::StringRef intrinsicName, mlir::Type funcResTy, - mlir::Location loc, - bool isConstrainedFPIntrinsic = false, - unsigned shift = 0, bool rightshift = false) { +mlir::Value emitNeonCall(CIRGenBuilderTy &builder, + llvm::SmallVector argTypes, + llvm::SmallVectorImpl &args, + llvm::StringRef intrinsicName, mlir::Type funcResTy, + mlir::Location loc, + bool isConstrainedFPIntrinsic = false, + unsigned shift = 0, bool rightshift = false) { // TODO: Consider removing the following unreachable when we have - // buildConstrainedFPCall feature implemented - assert(!cir::MissingFeatures::buildConstrainedFPCall()); + // emitConstrainedFPCall feature implemented + assert(!cir::MissingFeatures::emitConstrainedFPCall()); if (isConstrainedFPIntrinsic) llvm_unreachable("isConstrainedFPIntrinsic NYI"); for (unsigned j = 0; j < argTypes.size(); ++j) { if (isConstrainedFPIntrinsic) { - assert(!cir::MissingFeatures::buildConstrainedFPCall()); + assert(!cir::MissingFeatures::emitConstrainedFPCall()); } if (shift > 0 && shift == j) { - args[j] = buildNeonShiftVector(builder, args[j], - mlir::cast(argTypes[j]), - loc, rightshift); + args[j] = emitNeonShiftVector(builder, args[j], + mlir::cast(argTypes[j]), + loc, rightshift); } else { args[j] = builder.createBitcast(args[j], argTypes[j]); } } if (isConstrainedFPIntrinsic) { - assert(!cir::MissingFeatures::buildConstrainedFPCall()); + assert(!cir::MissingFeatures::emitConstrainedFPCall()); return nullptr; } return builder @@ -2279,16 +2277,16 @@ mlir::Value buildNeonCall(CIRGenBuilderTy &builder, .getResult(); } -/// This function `buildCommonNeonCallPattern0` implements a common way +/// This function `emitCommonNeonCallPattern0` implements a common way /// to generate neon intrinsic call that has following pattern: /// 1. There is a need to cast result of the intrinsic call back to /// expression type. /// 2. Function arg types are given, not deduced from actual arg types. static mlir::Value -buildCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, - llvm::SmallVector argTypes, - llvm::SmallVectorImpl &ops, - mlir::Type funcResTy, const clang::CallExpr *e) { +emitCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, + llvm::SmallVector argTypes, + llvm::SmallVectorImpl &ops, + mlir::Type funcResTy, const clang::CallExpr *e) { CIRGenBuilderTy &builder = cgf.getBuilder(); if (argTypes.empty()) { // The most common arg types is {funcResTy, funcResTy} for neon intrinsic @@ -2298,13 +2296,13 @@ buildCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, argTypes = {funcResTy, funcResTy}; } mlir::Value res = - buildNeonCall(builder, std::move(argTypes), ops, intrincsName, funcResTy, - cgf.getLoc(e->getExprLoc())); + emitNeonCall(builder, std::move(argTypes), ops, intrincsName, funcResTy, + cgf.getLoc(e->getExprLoc())); mlir::Type resultType = cgf.ConvertType(e->getType()); return builder.createBitcast(res, resultType); } -mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( +mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, llvm::SmallVectorImpl &ops, Address ptrOp0, Address ptrOp1, @@ -2360,10 +2358,10 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vpaddlq_v: { // The source operand type has twice as many elements of half the size. cir::VectorType narrowTy = getHalfEltSizeTwiceNumElemsVecType(builder, vTy); - return buildNeonCall(builder, {narrowTy}, ops, - isUnsigned ? "aarch64.neon.uaddlp" - : "aarch64.neon.saddlp", - vTy, getLoc(e->getExprLoc())); + return emitNeonCall(builder, {narrowTy}, ops, + isUnsigned ? "aarch64.neon.uaddlp" + : "aarch64.neon.saddlp", + vTy, getLoc(e->getExprLoc())); } case NEON::BI__builtin_neon_vext_v: case NEON::BI__builtin_neon_vextq_v: { @@ -2390,27 +2388,27 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( cir::VectorType mulVecT = GetNeonType(this, NeonTypeFlags(neonType.getEltType(), false, /*isQuad*/ false)); - return buildNeonCall(builder, {resTy, mulVecT, SInt32Ty}, ops, - (builtinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || - builtinID == NEON::BI__builtin_neon_vqdmulh_lane_v) - ? "aarch64.neon.sqdmulh.lane" - : "aarch64.neon.sqrdmulh.lane", - resTy, getLoc(e->getExprLoc())); + return emitNeonCall(builder, {resTy, mulVecT, SInt32Ty}, ops, + (builtinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || + builtinID == NEON::BI__builtin_neon_vqdmulh_lane_v) + ? "aarch64.neon.sqdmulh.lane" + : "aarch64.neon.sqrdmulh.lane", + resTy, getLoc(e->getExprLoc())); } case NEON::BI__builtin_neon_vqshlu_n_v: case NEON::BI__builtin_neon_vqshluq_n_v: { // These intrinsics expect signed vector type as input, but // return unsigned vector type. cir::VectorType srcTy = getSignChangedVectorType(builder, vTy); - return buildNeonCall(builder, {srcTy, srcTy}, ops, "aarch64.neon.sqshlu", - vTy, getLoc(e->getExprLoc()), - false, /* not fp constrained op */ - 1, /* second arg is shift amount */ - false /* leftshift */); + return emitNeonCall(builder, {srcTy, srcTy}, ops, "aarch64.neon.sqshlu", + vTy, getLoc(e->getExprLoc()), + false, /* not fp constrained op */ + 1, /* second arg is shift amount */ + false /* leftshift */); } case NEON::BI__builtin_neon_vrshr_n_v: case NEON::BI__builtin_neon_vrshrq_n_v: { - return buildNeonCall( + return emitNeonCall( builder, {vTy, isUnsigned ? getSignChangedVectorType(builder, vTy) : vTy}, ops, isUnsigned ? "aarch64.neon.urshl" : "aarch64.neon.srshl", vTy, @@ -2421,7 +2419,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vshl_n_v: case NEON::BI__builtin_neon_vshlq_n_v: { mlir::Location loc = getLoc(e->getExprLoc()); - return buildCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); + return emitCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); } case NEON::BI__builtin_neon_vshll_n_v: { mlir::Location loc = getLoc(e->getExprLoc()); @@ -2431,7 +2429,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( ops[0] = builder.createBitcast(ops[0], srcTy); // The following cast will be lowered to SExt or ZExt in LLVM. ops[0] = builder.createIntCast(ops[0], vTy); - return buildCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); + return emitCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); } case NEON::BI__builtin_neon_vshrn_n_v: { mlir::Location loc = getLoc(e->getExprLoc()); @@ -2439,13 +2437,13 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( vTy, true /* extended */, mlir::cast(vTy.getEltType()).isSigned()); ops[0] = builder.createBitcast(ops[0], srcTy); - ops[0] = buildCommonNeonShift(builder, loc, srcTy, ops[0], ops[1], false); + ops[0] = emitCommonNeonShift(builder, loc, srcTy, ops[0], ops[1], false); return builder.createIntCast(ops[0], vTy); } case NEON::BI__builtin_neon_vshr_n_v: case NEON::BI__builtin_neon_vshrq_n_v: - return buildNeonRShiftImm(*this, ops[0], ops[1], vTy, isUnsigned, - getLoc(e->getExprLoc())); + return emitNeonRShiftImm(*this, ops[0], ops[1], vTy, isUnsigned, + getLoc(e->getExprLoc())); case NEON::BI__builtin_neon_vtst_v: case NEON::BI__builtin_neon_vtstq_v: { mlir::Location loc = getLoc(e->getExprLoc()); @@ -2538,21 +2536,20 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( if (intrincsName.empty()) return nullptr; - return buildCommonNeonCallPattern0(*this, intrincsName, argTypes, ops, vTy, - e); + return emitCommonNeonCallPattern0(*this, intrincsName, argTypes, ops, vTy, e); } mlir::Value -CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue, - llvm::Triple::ArchType Arch) { +CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch) { if (BuiltinID >= clang::AArch64::FirstSVEBuiltin && BuiltinID <= clang::AArch64::LastSVEBuiltin) - return buildAArch64SVEBuiltinExpr(BuiltinID, E); + return emitAArch64SVEBuiltinExpr(BuiltinID, E); if (BuiltinID >= clang::AArch64::FirstSMEBuiltin && BuiltinID <= clang::AArch64::LastSMEBuiltin) - return buildAArch64SMEBuiltinExpr(BuiltinID, E); + return emitAArch64SMEBuiltinExpr(BuiltinID, E); if (BuiltinID == Builtin::BI__builtin_cpu_supports) llvm_unreachable("NYI"); @@ -2681,7 +2678,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex || BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) { - return buildArmLdrexNon128Intrinsic(BuiltinID, E, *this); + return emitArmLdrexNon128Intrinsic(BuiltinID, E, *this); } if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex || @@ -2900,12 +2897,12 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vstl1q_lane_s64: // Get the alignment for the argument in addition to the value; // we'll use it later. - PtrOp0 = buildPointerWithAlignment(E->getArg(0)); + PtrOp0 = emitPointerWithAlignment(E->getArg(0)); Ops.push_back(PtrOp0.emitRawPointer()); continue; } } - Ops.push_back(buildScalarOrConstFoldImmArg(ICEArguments, i, E)); + Ops.push_back(emitScalarOrConstFoldImmArg(ICEArguments, i, E)); } auto SISDMap = ArrayRef(AArch64SISDIntrinsicMap); @@ -3073,7 +3070,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vsetq_lane_i32: case NEON::BI__builtin_neon_vsetq_lane_i64: case NEON::BI__builtin_neon_vsetq_lane_f32: - Ops.push_back(buildScalarExpr(E->getArg(2))); + Ops.push_back(emitScalarExpr(E->getArg(2))); return builder.create(getLoc(E->getExprLoc()), Ops[1], Ops[0], Ops[2]); case NEON::BI__builtin_neon_vset_lane_bf16: @@ -3094,73 +3091,73 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt8Ty, 8)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i8: case NEON::BI__builtin_neon_vdupb_laneq_i8: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt8Ty, 16)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i16: case NEON::BI__builtin_neon_vduph_lane_i16: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt16Ty, 4)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i16: case NEON::BI__builtin_neon_vduph_laneq_i16: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt16Ty, 8)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i32: case NEON::BI__builtin_neon_vdups_lane_i32: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt32Ty, 2)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_f32: case NEON::BI__builtin_neon_vdups_lane_f32: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), FloatTy, 2)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i32: case NEON::BI__builtin_neon_vdups_laneq_i32: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt32Ty, 4)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i64: case NEON::BI__builtin_neon_vdupd_lane_i64: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt64Ty, 1)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vdupd_lane_f64: case NEON::BI__builtin_neon_vget_lane_f64: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), DoubleTy, 1)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i64: case NEON::BI__builtin_neon_vdupd_laneq_i64: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt64Ty, 2)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_f32: case NEON::BI__builtin_neon_vdups_laneq_f32: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), FloatTy, 4)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_f64: case NEON::BI__builtin_neon_vdupd_laneq_f64: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), DoubleTy, 2)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vaddh_f16: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vsubh_f16: @@ -3262,13 +3259,12 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, AArch64SIMDIntrinsicsProvenSorted); if (Builtin) - return buildCommonNeonBuiltinExpr( + return emitCommonNeonBuiltinExpr( Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, Builtin->NameHint, Builtin->TypeModifier, E, Ops, /*never use addresses*/ Address::invalid(), Address::invalid(), Arch); - if (mlir::Value V = - buildAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) + if (mlir::Value V = emitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) return V; cir::VectorType vTy = ty; @@ -3306,8 +3302,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, name = "aarch64.neon.pmull"; cir::VectorType argTy = builder.getExtendedOrTruncatedElementVectorType( ty, false /* truncated */, !usgn); - return buildNeonCall(builder, {argTy, argTy}, Ops, name, ty, - getLoc(E->getExprLoc())); + return emitNeonCall(builder, {argTy, argTy}, Ops, name, ty, + getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vmax_v: case NEON::BI__builtin_neon_vmaxq_v: @@ -3320,8 +3316,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::StringRef name = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin"; if (cir::isFPOrFPVectorTy(ty)) name = "aarch64.neon.fmin"; - return buildNeonCall(builder, {ty, ty}, Ops, name, ty, - getLoc(E->getExprLoc())); + return emitNeonCall(builder, {ty, ty}, Ops, name, ty, + getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vminh_f16: { llvm_unreachable("NYI"); @@ -3331,8 +3327,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::StringRef name = usgn ? "aarch64.neon.uabd" : "aarch64.neon.sabd"; if (cir::isFPOrFPVectorTy(ty)) name = "aarch64.neon.fabd"; - return buildNeonCall(builder, {ty, ty}, Ops, name, ty, - getLoc(E->getExprLoc())); + return emitNeonCall(builder, {ty, ty}, Ops, name, ty, + getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vpadal_v: case NEON::BI__builtin_neon_vpadalq_v: { @@ -3366,7 +3362,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vqrshrun_n_v: // The prototype of builtin_neon_vqrshrun_n can be found at // https://developer.arm.com/architectures/instruction-sets/intrinsics/ - return buildNeonCall( + return emitNeonCall( builder, {builder.getExtendedOrTruncatedElementVectorType(ty, true, true), SInt32Ty}, @@ -3374,7 +3370,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vqshrn_n_v: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vrshrn_n_v: - return buildNeonCall( + return emitNeonCall( builder, {builder.getExtendedOrTruncatedElementVectorType( vTy, true /* extend */, @@ -3388,9 +3384,9 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vrnda_v: case NEON::BI__builtin_neon_vrndaq_v: { - assert(!cir::MissingFeatures::buildConstrainedFPCall()); - return buildNeonCall(builder, {ty}, Ops, "round", ty, - getLoc(E->getExprLoc())); + assert(!cir::MissingFeatures::emitConstrainedFPCall()); + return emitNeonCall(builder, {ty}, Ops, "round", ty, + getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndih_f16: { llvm_unreachable("NYI"); @@ -3410,10 +3406,10 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } case NEON::BI__builtin_neon_vrndns_f32: { - mlir::Value arg0 = buildScalarExpr(E->getArg(0)); + mlir::Value arg0 = emitScalarExpr(E->getArg(0)); args.push_back(arg0); - return buildNeonCall(builder, {arg0.getType()}, args, "roundeven.f32", - getCIRGenModule().FloatTy, getLoc(E->getExprLoc())); + return emitNeonCall(builder, {arg0.getType()}, args, "roundeven.f32", + getCIRGenModule().FloatTy, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndph_f16: { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp index 76fe5315009e..0cd8f09f6da3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp @@ -31,7 +31,7 @@ using namespace clang; using namespace clang::CIRGen; using namespace cir; -mlir::Value CIRGenFunction::buildX86BuiltinExpr(unsigned BuiltinID, - const CallExpr *E) { +mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index beadfbb26a23..7668ef3dd1b7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -172,18 +172,18 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { llvm_unreachable("NYI"); // Create the alias with no name. - buildAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); + emitAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); return false; } -static void buildDeclInit(CIRGenFunction &CGF, const VarDecl *D, - Address DeclPtr) { +static void emitDeclInit(CIRGenFunction &CGF, const VarDecl *D, + Address DeclPtr) { assert((D->hasGlobalStorage() || (D->hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) && "VarDecl must have global or local (in the case of OpenCL) storage!"); assert(!D->getType()->isReferenceType() && - "Should not call buildDeclInit on a reference!"); + "Should not call emitDeclInit on a reference!"); QualType type = D->getType(); LValue lv = CGF.makeAddrLValue(DeclPtr, type); @@ -191,21 +191,21 @@ static void buildDeclInit(CIRGenFunction &CGF, const VarDecl *D, const Expr *Init = D->getInit(); switch (CIRGenFunction::getEvaluationKind(type)) { case cir::TEK_Aggregate: - CGF.buildAggExpr( - Init, AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - AggValueSlot::DoesNotOverlap)); + CGF.emitAggExpr(Init, + AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap)); return; case cir::TEK_Scalar: - CGF.buildScalarInit(Init, CGF.getLoc(D->getLocation()), lv, false); + CGF.emitScalarInit(Init, CGF.getLoc(D->getLocation()), lv, false); return; case cir::TEK_Complex: llvm_unreachable("complext evaluation NYI"); } } -static void buildDeclDestroy(CIRGenFunction &CGF, const VarDecl *D) { +static void emitDeclDestroy(CIRGenFunction &CGF, const VarDecl *D) { // Honor __attribute__((no_destroy)) and bail instead of attempting // to emit a reference to a possibly nonexistent destructor, which // in turn can cause a crash. This will result in a global constructor @@ -292,12 +292,12 @@ cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { /// Emit code to cause the variable at the given address to be considered as /// constant from this point onwards. -static void buildDeclInvariant(CIRGenFunction &CGF, const VarDecl *D) { - return CGF.buildInvariantStart( +static void emitDeclInvariant(CIRGenFunction &CGF, const VarDecl *D) { + return CGF.emitInvariantStart( CGF.getContext().getTypeSizeInChars(D->getType())); } -void CIRGenFunction::buildInvariantStart([[maybe_unused]] CharUnits Size) { +void CIRGenFunction::emitInvariantStart([[maybe_unused]] CharUnits Size) { // Do not emit the intrinsic if we're not optimizing. if (!CGM.getCodeGenOpts().OptimizationLevel) return; @@ -305,9 +305,9 @@ void CIRGenFunction::buildInvariantStart([[maybe_unused]] CharUnits Size) { assert(!cir::MissingFeatures::createInvariantIntrinsic()); } -void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, - cir::GlobalOp addr, - bool performInit) { +void CIRGenModule::emitCXXGlobalVarDeclInit(const VarDecl *varDecl, + cir::GlobalOp addr, + bool performInit) { const Expr *init = varDecl->getInit(); QualType ty = varDecl->getType(); @@ -357,7 +357,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, getASTContext().getDeclAlign(varDecl)); assert(performInit && "cannot have constant initializer which needs " "destruction for reference"); - RValue rv = cgf.buildReferenceBindingToExpr(init); + RValue rv = cgf.emitReferenceBindingToExpr(init); { mlir::OpBuilder::InsertionGuard guard(builder); mlir::Operation *rvalueDefOp = rv.getScalarVal().getDefiningOp(); @@ -370,7 +370,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, builder.setInsertionPoint(yield); } } - cgf.buildStoreOfScalar(rv.getScalarVal(), declAddr, false, ty); + cgf.emitStoreOfScalar(rv.getScalarVal(), declAddr, false, ty); } builder.setInsertionPointToEnd(block); builder.create(addr->getLoc()); @@ -390,7 +390,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, builder.setInsertionPointToStart(block); Address declAddr(getAddrOfGlobalVar(varDecl), getASTContext().getDeclAlign(varDecl)); - buildDeclInit(cgf, varDecl, declAddr); + emitDeclInit(cgf, varDecl, declAddr); builder.setInsertionPointToEnd(block); builder.create(addr->getLoc()); } @@ -398,7 +398,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, if (isConstantStorage) { // TODO: this leads to a missing feature in the moment, probably also need // a LexicalScope to be inserted here. - buildDeclInvariant(cgf, varDecl); + emitDeclInvariant(cgf, varDecl); } else { // If not constant storage we'll emit this regardless of NeedsDtor value. mlir::OpBuilder::InsertionGuard guard(builder); @@ -408,7 +408,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, lexScope.setAsGlobalInit(); builder.setInsertionPointToStart(block); - buildDeclDestroy(cgf, varDecl); + emitDeclDestroy(cgf, varDecl); builder.setInsertionPointToEnd(block); if (block->empty()) { block->erase(); diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 587a1ce9c880..0a0c1bef4242 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -95,8 +95,8 @@ class CIRGenCXXABI { clang::CXXCtorType Type, bool ForVirtualBase, bool Delegating) = 0; /// Emit the ABI-specific prolog for the function - virtual void buildInstanceFunctionProlog(SourceLocation Loc, - CIRGenFunction &CGF) = 0; + virtual void emitInstanceFunctionProlog(SourceLocation Loc, + CIRGenFunction &CGF) = 0; /// Get the type of the implicit "this" parameter used by a method. May return /// zero if no specific type is applicable, e.g. if the ABI expects the "this" @@ -162,16 +162,15 @@ class CIRGenCXXABI { bool Delegating) = 0; /// Emit constructor variants required by this ABI. - virtual void buildCXXConstructors(const clang::CXXConstructorDecl *D) = 0; + virtual void emitCXXConstructors(const clang::CXXConstructorDecl *D) = 0; /// Emit dtor variants required by this ABI. - virtual void buildCXXDestructors(const clang::CXXDestructorDecl *D) = 0; + virtual void emitCXXDestructors(const clang::CXXDestructorDecl *D) = 0; /// Emit the destructor call. - virtual void buildDestructorCall(CIRGenFunction &CGF, - const CXXDestructorDecl *DD, - CXXDtorType Type, bool ForVirtualBase, - bool Delegating, Address This, - QualType ThisTy) = 0; + virtual void emitDestructorCall(CIRGenFunction &CGF, + const CXXDestructorDecl *DD, CXXDtorType Type, + bool ForVirtualBase, bool Delegating, + Address This, QualType ThisTy) = 0; /// Emit code to force the execution of a destructor during global /// teardown. The default implementation of this uses atexit. @@ -328,23 +327,23 @@ class CIRGenCXXABI { /// Emit a single constructor/destructor with the gien type from a C++ /// constructor Decl. - virtual void buildCXXStructor(clang::GlobalDecl GD) = 0; + virtual void emitCXXStructor(clang::GlobalDecl GD) = 0; - virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) = 0; - virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) = 0; + virtual void emitRethrow(CIRGenFunction &CGF, bool isNoReturn) = 0; + virtual void emitThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) = 0; - virtual void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) = 0; + virtual void emitBadCastCall(CIRGenFunction &CGF, mlir::Location loc) = 0; virtual mlir::Value getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &CGF, Address This, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl) = 0; - virtual mlir::Value buildDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, - QualType SrcRecordTy, - QualType DestRecordTy, - cir::PointerType DestCIRTy, - bool isRefCast, Address Src) = 0; + virtual mlir::Value emitDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, + QualType DestRecordTy, + cir::PointerType DestCIRTy, + bool isRefCast, Address Src) = 0; virtual cir::MethodAttr buildVirtualMethodAttr(cir::MethodType MethodTy, const CXXMethodDecl *MD) = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 64b4c2f0957f..a8e06467e08d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -287,8 +287,8 @@ CIRGenCallee CIRGenCallee::prepareConcreteCallee(CIRGenFunction &CGF) const { return *this; } -void CIRGenFunction::buildAggregateStore(mlir::Value Val, Address Dest, - bool DestIsVolatile) { +void CIRGenFunction::emitAggregateStore(mlir::Value Val, Address Dest, + bool DestIsVolatile) { // In LLVM codegen: // Function to store a first-class aggregate into memory. We prefer to // store the elements rather than the aggregate to be more friendly to @@ -465,7 +465,7 @@ void CIRGenModule::constructAttributeList(StringRef Name, getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, funcAttrs); } -static cir::CIRCallOpInterface buildCallLikeOp( +static cir::CIRCallOpInterface emitCallLikeOp( CIRGenFunction &CGF, mlir::Location callLoc, cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal, cir::FuncOp directFuncOp, SmallVectorImpl &CIRCallArgs, bool isInvoke, @@ -486,7 +486,7 @@ static cir::CIRCallOpInterface buildCallLikeOp( // Don't emit the code right away for catch clauses, for // now create the regions and consume the try scope result. // Note that clauses are later populated in - // CIRGenFunction::buildLandingPad. + // CIRGenFunction::emitLandingPad. [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &result) { // Since this didn't come from an explicit try, we only need one @@ -551,13 +551,13 @@ static cir::CIRCallOpInterface buildCallLikeOp( extraFnAttrs); } -RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, - const CIRGenCallee &Callee, - ReturnValueSlot ReturnValue, - const CallArgList &CallArgs, - cir::CIRCallOpInterface *callOrTryCall, - bool IsMustTail, mlir::Location loc, - std::optional E) { +RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, + ReturnValueSlot ReturnValue, + const CallArgList &CallArgs, + cir::CIRCallOpInterface *callOrTryCall, + bool IsMustTail, mlir::Location loc, + std::optional E) { auto builder = CGM.getBuilder(); // FIXME: We no longer need the types from CallArgs; lift up and simplify @@ -823,7 +823,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, auto extraFnAttrs = cir::ExtraFuncAttributesAttr::get( &getMLIRContext(), Attrs.getDictionary(&getMLIRContext())); - cir::CIRCallOpInterface callLikeOp = buildCallLikeOp( + cir::CIRCallOpInterface callLikeOp = emitCallLikeOp( *this, callLoc, indirectFuncTy, indirectFuncVal, directFuncOp, CIRCallArgs, isInvoke, callingConv, extraFnAttrs); @@ -880,7 +880,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, assert(Results.size() <= 1 && "multiple returns NYI"); SourceLocRAIIObject Loc{*this, callLoc}; - buildAggregateStore(Results[0], DestPtr, DestIsVolatile); + emitAggregateStore(Results[0], DestPtr, DestIsVolatile); return RValue::getAggregate(DestPtr); } case cir::TEK_Scalar: { @@ -921,9 +921,9 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, return ret; } -mlir::Value CIRGenFunction::buildRuntimeCall(mlir::Location loc, - cir::FuncOp callee, - ArrayRef args) { +mlir::Value CIRGenFunction::emitRuntimeCall(mlir::Location loc, + cir::FuncOp callee, + ArrayRef args) { // TODO(cir): set the calling convention to this runtime call. assert(!cir::MissingFeatures::setCallingConv()); @@ -937,8 +937,8 @@ mlir::Value CIRGenFunction::buildRuntimeCall(mlir::Location loc, return call->getResult(0); } -void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, - QualType type) { +void CIRGenFunction::emitCallArg(CallArgList &args, const Expr *E, + QualType type) { // TODO: Add the DisableDebugLocationUpdates helper assert(!dyn_cast(E) && "NYI"); @@ -947,7 +947,7 @@ void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, if (E->isGLValue()) { assert(E->getObjectKind() == OK_Ordinary); - return args.add(buildReferenceBindingToExpr(E), type); + return args.add(emitReferenceBindingToExpr(E), type); } bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); @@ -962,13 +962,13 @@ void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, if (HasAggregateEvalKind && isa(E) && cast(E)->getCastKind() == CK_LValueToRValue) { - LValue L = buildLValue(cast(E)->getSubExpr()); + LValue L = emitLValue(cast(E)->getSubExpr()); assert(L.isSimple()); args.addUncopiedAggregate(L, type); return; } - args.add(buildAnyExprToTemp(E), type); + args.add(emitAnyExprToTemp(E), type); } QualType CIRGenFunction::getVarArgType(const Expr *Arg) { @@ -989,19 +989,19 @@ QualType CIRGenFunction::getVarArgType(const Expr *Arg) { return Arg->getType(); } -/// Similar to buildAnyExpr(), however, the result will always be accessible +/// Similar to emitAnyExpr(), however, the result will always be accessible /// even if no aggregate location is provided. -RValue CIRGenFunction::buildAnyExprToTemp(const Expr *E) { +RValue CIRGenFunction::emitAnyExprToTemp(const Expr *E) { AggValueSlot AggSlot = AggValueSlot::ignored(); if (hasAggregateEvaluationKind(E->getType())) AggSlot = CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), getCounterAggTmpAsString()); - return buildAnyExpr(E, AggSlot); + return emitAnyExpr(E, AggSlot); } -void CIRGenFunction::buildCallArgs( +void CIRGenFunction::emitCallArgs( CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range ArgRange, AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { @@ -1076,11 +1076,11 @@ void CIRGenFunction::buildCallArgs( assert(!isa(*Arg) && "NYI"); assert(!isa_and_nonnull(AC.getDecl()) && "NYI"); - buildCallArg(Args, *Arg, ArgTypes[Idx]); + emitCallArg(Args, *Arg, ArgTypes[Idx]); // In particular, we depend on it being the last arg in Args, and the // objectsize bits depend on there only being one arg if !LeftToRight. assert(InitialArgSize + 1 == Args.size() && - "The code below depends on only adding one arg per buildCallArg"); + "The code below depends on only adding one arg per emitCallArg"); (void)InitialArgSize; // Since pointer argument are never emitted as LValue, it is safe to emit // non-null argument check for r-value only. @@ -1343,11 +1343,11 @@ static bool isInAllocaArgument(CIRGenCXXABI &ABI, QualType type) { ABI.getRecordArgABI(RD) == CIRGenCXXABI::RecordArgABI::DirectInMemory; } -void CIRGenFunction::buildDelegateCallArg(CallArgList &args, - const VarDecl *param, - SourceLocation loc) { +void CIRGenFunction::emitDelegateCallArg(CallArgList &args, + const VarDecl *param, + SourceLocation loc) { // StartFunction converted the ABI-lowered parameter(s) into a local alloca. - // We need to turn that into an r-value suitable for buildCall + // We need to turn that into an r-value suitable for emitCall Address local = GetAddrOfLocalVar(param); QualType type = param->getType(); @@ -1553,15 +1553,15 @@ RValue CallArg::getRValue(CIRGenFunction &CGF, mlir::Location loc) const { if (!HasLV) return RV; LValue Copy = CGF.makeAddrLValue(CGF.CreateMemTemp(Ty, loc), Ty); - CGF.buildAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, - LV.isVolatile()); + CGF.emitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, + LV.isVolatile()); IsUsed = true; return RValue::getAggregate(Copy.getAddress()); } -void CIRGenFunction::buildNonNullArgCheck(RValue RV, QualType ArgType, - SourceLocation ArgLoc, - AbstractCallee AC, unsigned ParmNum) { +void CIRGenFunction::emitNonNullArgCheck(RValue RV, QualType ArgType, + SourceLocation ArgLoc, + AbstractCallee AC, unsigned ParmNum) { if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || SanOpts.has(SanitizerKind::NullabilityArg))) return; @@ -1572,11 +1572,11 @@ void CIRGenFunction::buildNonNullArgCheck(RValue RV, QualType ArgType, // FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. We // need to decide how to handle va_arg target-specific codegen. -mlir::Value CIRGenFunction::buildVAArg(VAArgExpr *VE, Address &VAListAddr) { +mlir::Value CIRGenFunction::emitVAArg(VAArgExpr *VE, Address &VAListAddr) { assert(!VE->isMicrosoftABI() && "NYI"); auto loc = CGM.getLoc(VE->getExprLoc()); auto type = ConvertType(VE->getType()); - auto vaList = buildVAListRef(VE->getSubExpr()).getPointer(); + auto vaList = emitVAListRef(VE->getSubExpr()).getPointer(); return builder.create(loc, type, vaList); } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 96044f0e52d6..d3b96e5beedf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -141,7 +141,7 @@ class FieldMemcpyizer { return MemcpySize; } - void buildMemcpy() { + void emitMemcpy() { // Give the subclass a chance to bail out if it feels the memcpy isn't worth // it (e.g. Hasn't aggregated enough data). if (!FirstField) { @@ -158,7 +158,7 @@ class FieldMemcpyizer { const CXXRecordDecl *ClassDecl; private: - void buildMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) { + void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) { llvm_unreachable("NYI"); } @@ -200,27 +200,27 @@ class FieldMemcpyizer { unsigned LastAddedFieldIndex; }; -static void buildLValueForAnyFieldInitialization(CIRGenFunction &CGF, - CXXCtorInitializer *MemberInit, - LValue &LHS) { +static void emitLValueForAnyFieldInitialization(CIRGenFunction &CGF, + CXXCtorInitializer *MemberInit, + LValue &LHS) { FieldDecl *Field = MemberInit->getAnyMember(); if (MemberInit->isIndirectMemberInitializer()) { // If we are initializing an anonymous union field, drill down to the field. IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); for (const auto *I : IndirectField->chain()) { auto *fd = cast(I); - LHS = CGF.buildLValueForFieldInitialization(LHS, fd, fd->getName()); + LHS = CGF.emitLValueForFieldInitialization(LHS, fd, fd->getName()); } } else { - LHS = CGF.buildLValueForFieldInitialization(LHS, Field, Field->getName()); + LHS = CGF.emitLValueForFieldInitialization(LHS, Field, Field->getName()); } } -static void buildMemberInitializer(CIRGenFunction &CGF, - const CXXRecordDecl *ClassDecl, - CXXCtorInitializer *MemberInit, - const CXXConstructorDecl *Constructor, - FunctionArgList &Args) { +static void emitMemberInitializer(CIRGenFunction &CGF, + const CXXRecordDecl *ClassDecl, + CXXCtorInitializer *MemberInit, + const CXXConstructorDecl *Constructor, + FunctionArgList &Args) { // TODO: ApplyDebugLocation assert(MemberInit->isAnyMemberInitializer() && "Mush have member initializer!"); @@ -241,7 +241,7 @@ static void buildMemberInitializer(CIRGenFunction &CGF, else LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); - buildLValueForAnyFieldInitialization(CGF, MemberInit, LHS); + emitLValueForAnyFieldInitialization(CGF, MemberInit, LHS); // Special case: If we are in a copy or move constructor, and we are copying // an array off PODs or classes with tirival copy constructors, ignore the AST @@ -255,7 +255,7 @@ static void buildMemberInitializer(CIRGenFunction &CGF, llvm_unreachable("NYI"); } - CGF.buildInitializerForField(Field, LHS, MemberInit->getInit()); + CGF.emitInitializerForField(Field, LHS, MemberInit->getInit()); } class ConstructorMemcpyizer : public FieldMemcpyizer { @@ -296,13 +296,13 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { AggregatedInits.push_back(MemberInit); addMemcpyableField(MemberInit->getMember()); } else { - buildAggregatedInits(); - buildMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, - ConstructorDecl, Args); + emitAggregatedInits(); + emitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, + ConstructorDecl, Args); } } - void buildAggregatedInits() { + void emitAggregatedInits() { if (AggregatedInits.size() <= 1) { // This memcpy is too small to be worthwhile. Fall back on default // codegen. @@ -314,7 +314,7 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { } pushEHDestructors(); - buildMemcpy(); + emitMemcpy(); AggregatedInits.clear(); } @@ -331,12 +331,12 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { if (!CGF.needsEHCleanup(dtorKind)) continue; LValue FieldLHS = LHS; - buildLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); + emitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType); } } - void finish() { buildAggregatedInits(); } + void finish() { emitAggregatedInits(); } private: const CXXConstructorDecl *ConstructorDecl; @@ -435,7 +435,7 @@ class AssignmentMemcpyizer : public FieldMemcpyizer { AggregatedStmts.push_back(S); } else { emitAggregatedStmts(); - if (CGF.buildStmt(S, /*useCurrentScope=*/true).failed()) + if (CGF.emitStmt(S, /*useCurrentScope=*/true).failed()) llvm_unreachable("Should not get here!"); } } @@ -444,14 +444,13 @@ class AssignmentMemcpyizer : public FieldMemcpyizer { if (AggregatedStmts.size() <= 1) { if (!AggregatedStmts.empty()) { CopyingValueRepresentation CVR(CGF); - if (CGF.buildStmt(AggregatedStmts[0], /*useCurrentScope=*/true) - .failed()) + if (CGF.emitStmt(AggregatedStmts[0], /*useCurrentScope=*/true).failed()) llvm_unreachable("Should not get here!"); } reset(); } - buildMemcpy(); + emitMemcpy(); AggregatedStmts.clear(); } @@ -486,8 +485,8 @@ struct CallBaseDtor final : EHScopeStack::Cleanup { Address Addr = CGF.getAddressOfDirectBaseInCompleteClass( *CGF.currSrcLoc, CGF.LoadCXXThisAddress(), DerivedClass, BaseClass, BaseIsVirtual); - CGF.buildCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, - /*Delegating=*/false, Addr, ThisTy); + CGF.emitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, + /*Delegating=*/false, Addr, ThisTy); } }; @@ -540,9 +539,9 @@ Address CIRGenFunction::getAddressOfDirectBaseInCompleteClass( /*assume_not_null=*/true); } -static void buildBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, - const CXXRecordDecl *ClassDecl, - CXXCtorInitializer *BaseInit) { +static void emitBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, + const CXXRecordDecl *ClassDecl, + CXXCtorInitializer *BaseInit) { assert(BaseInit->isBaseInitializer() && "Must have base initializer!"); Address ThisPtr = CGF.LoadCXXThisAddress(); @@ -568,7 +567,7 @@ static void buildBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, CGF.getOverlapForBaseInit(ClassDecl, BaseClassDecl, isBaseVirtual)); - CGF.buildAggExpr(BaseInit->getInit(), AggSlot); + CGF.emitAggExpr(BaseInit->getInit(), AggSlot); if (CGF.CGM.getLangOpts().Exceptions && !BaseClassDecl->hasTrivialDestructor()) @@ -578,11 +577,11 @@ static void buildBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, /// This routine generates necessary code to initialize base classes and /// non-static data members belonging to this constructor. -void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, - CXXCtorType CtorType, - FunctionArgList &Args) { +void CIRGenFunction::emitCtorPrologue(const CXXConstructorDecl *CD, + CXXCtorType CtorType, + FunctionArgList &Args) { if (CD->isDelegatingConstructor()) - return buildDelegatingCXXConstructorCall(CD, Args); + return emitDelegatingCXXConstructorCall(CD, Args); const CXXRecordDecl *ClassDecl = CD->getParent(); @@ -617,7 +616,7 @@ void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, CGM.getCodeGenOpts().OptimizationLevel > 0 && isInitializerOfDynamicClass(*B)) llvm_unreachable("NYI"); - buildBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); + emitBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); } if (BaseCtorContinueBB) { @@ -632,7 +631,7 @@ void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, CGM.getCodeGenOpts().OptimizationLevel > 0 && isInitializerOfDynamicClass(*B)) llvm_unreachable("NYI"); - buildBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); + emitBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); } CXXThisValue = OldThis; @@ -847,13 +846,13 @@ Address CIRGenFunction::LoadCXXThisAddress() { return Address(LoadCXXThis(), CXXThisAlignment); } -void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, - Expr *Init) { +void CIRGenFunction::emitInitializerForField(FieldDecl *Field, LValue LHS, + Expr *Init) { QualType FieldType = Field->getType(); switch (getEvaluationKind(FieldType)) { case cir::TEK_Scalar: if (LHS.isSimple()) { - buildExprAsInit(Init, Field, LHS, false); + emitExprAsInit(Init, Field, LHS, false); } else { llvm_unreachable("NYI"); } @@ -868,7 +867,7 @@ void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, AggValueSlot::IsNotZeroed, // Checks are made by the code that calls constructor. AggValueSlot::IsSanitizerChecked); - buildAggExpr(Init, Slot); + emitAggExpr(Init, Slot); break; } } @@ -881,7 +880,7 @@ void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, llvm_unreachable("NYI"); } -void CIRGenFunction::buildDelegateCXXConstructorCall( +void CIRGenFunction::emitDelegateCXXConstructorCall( const CXXConstructorDecl *Ctor, CXXCtorType CtorType, const FunctionArgList &Args, SourceLocation Loc) { CallArgList DelegateArgs; @@ -904,17 +903,16 @@ void CIRGenFunction::buildDelegateCXXConstructorCall( for (; I != E; ++I) { const VarDecl *param = *I; // FIXME: per-argument source location - buildDelegateCallArg(DelegateArgs, param, Loc); + emitDelegateCallArg(DelegateArgs, param, Loc); } - buildCXXConstructorCall(Ctor, CtorType, /*ForVirtualBase=*/false, - /*Delegating=*/true, This, DelegateArgs, - AggValueSlot::MayOverlap, Loc, - /*NewPointerIsChecked=*/true); + emitCXXConstructorCall(Ctor, CtorType, /*ForVirtualBase=*/false, + /*Delegating=*/true, This, DelegateArgs, + AggValueSlot::MayOverlap, Loc, + /*NewPointerIsChecked=*/true); } -void CIRGenFunction::buildImplicitAssignmentOperatorBody( - FunctionArgList &Args) { +void CIRGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { const CXXMethodDecl *AssignOp = cast(CurGD.getDecl()); const Stmt *RootS = AssignOp->getBody(); assert(isa(RootS) && @@ -931,7 +929,7 @@ void CIRGenFunction::buildImplicitAssignmentOperatorBody( AM.finish(); } -void CIRGenFunction::buildForwardingCallToLambda( +void CIRGenFunction::emitForwardingCallToLambda( const CXXMethodDecl *callOperator, CallArgList &callArgs) { // Get the address of the call operator. const auto &calleeFnInfo = @@ -956,19 +954,19 @@ void CIRGenFunction::buildForwardingCallToLambda( // Now emit our call. auto callee = CIRGenCallee::forDirect(calleePtr, GlobalDecl(callOperator)); - RValue RV = buildCall(calleeFnInfo, callee, returnSlot, callArgs); + RValue RV = emitCall(calleeFnInfo, callee, returnSlot, callArgs); // If necessary, copy the returned value into the slot. if (!resultType->isVoidType() && returnSlot.isNull()) { if (getLangOpts().ObjCAutoRefCount && resultType->isObjCRetainableType()) llvm_unreachable("NYI"); - buildReturnOfRValue(*currSrcLoc, RV, resultType); + emitReturnOfRValue(*currSrcLoc, RV, resultType); } else { llvm_unreachable("NYI"); } } -void CIRGenFunction::buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { +void CIRGenFunction::emitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { const CXXRecordDecl *Lambda = MD->getParent(); // Start building arguments for forwarding call @@ -982,7 +980,7 @@ void CIRGenFunction::buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { // Add the rest of the parameters. for (auto *Param : MD->parameters()) - buildDelegateCallArg(CallArgs, Param, Param->getBeginLoc()); + emitDelegateCallArg(CallArgs, Param, Param->getBeginLoc()); const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); // For a generic lambda, find the corresponding call operator specialization @@ -998,10 +996,10 @@ void CIRGenFunction::buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { assert(CorrespondingCallOpSpecialization); CallOp = cast(CorrespondingCallOpSpecialization); } - buildForwardingCallToLambda(CallOp, CallArgs); + emitForwardingCallToLambda(CallOp, CallArgs); } -void CIRGenFunction::buildLambdaStaticInvokeBody(const CXXMethodDecl *MD) { +void CIRGenFunction::emitLambdaStaticInvokeBody(const CXXMethodDecl *MD) { if (MD->isVariadic()) { // Codgen for LLVM doesn't emit code for this as well, it says: // FIXME: Making this work correctly is nasty because it requires either @@ -1010,7 +1008,7 @@ void CIRGenFunction::buildLambdaStaticInvokeBody(const CXXMethodDecl *MD) { llvm_unreachable("NYI"); } - buildLambdaDelegatingInvokeBody(MD); + emitLambdaDelegatingInvokeBody(MD); } void CIRGenFunction::destroyCXXObject(CIRGenFunction &CGF, Address addr, @@ -1022,8 +1020,8 @@ void CIRGenFunction::destroyCXXObject(CIRGenFunction &CGF, Address addr, // dtors which shall be removed on later CIR passes. However, only remove this // assertion once we get a testcase to exercise this path. assert(!dtor->isTrivial()); - CGF.buildCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, - /*Delegating=*/false, addr, type); + CGF.emitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, + /*Delegating=*/false, addr, type); } static bool FieldHasTrivialDestructorBody(ASTContext &Context, @@ -1115,7 +1113,7 @@ static bool CanSkipVTablePointerInitialization(CIRGenFunction &CGF, } /// Emits the body of the current destructor. -void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { +void CIRGenFunction::emitDestructorBody(FunctionArgList &Args) { const CXXDestructorDecl *Dtor = cast(CurGD.getDecl()); CXXDtorType DtorType = CurGD.getDtorType(); @@ -1148,9 +1146,8 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { EnterDtorCleanups(Dtor, Dtor_Deleting); if (HaveInsertPoint()) { QualType ThisTy = Dtor->getFunctionObjectParameterType(); - buildCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, - /*Delegating=*/false, LoadCXXThisAddress(), - ThisTy); + emitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, + /*Delegating=*/false, LoadCXXThisAddress(), ThisTy); } return; } @@ -1188,9 +1185,8 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { if (!isTryBody) { QualType ThisTy = Dtor->getFunctionObjectParameterType(); - buildCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, - /*Delegating=*/false, LoadCXXThisAddress(), - ThisTy); + emitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, + /*Delegating=*/false, LoadCXXThisAddress(), ThisTy); break; } @@ -1217,7 +1213,7 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { if (isTryBody) llvm_unreachable("NYI"); else if (Body) - (void)buildStmt(Body, /*useCurrentScope=*/true); + (void)emitStmt(Body, /*useCurrentScope=*/true); else { assert(Dtor->isImplicit() && "bodyless dtor not implicit"); // nothing to do besides what's in the epilogue @@ -1242,7 +1238,7 @@ namespace { [[maybe_unused]] mlir::Value LoadThisForDtorDelete(CIRGenFunction &CGF, const CXXDestructorDecl *DD) { if (Expr *ThisArg = DD->getOperatorDeleteThisArg()) - return CGF.buildScalarExpr(ThisArg); + return CGF.emitScalarExpr(ThisArg); return CGF.LoadCXXThis(); } @@ -1253,9 +1249,9 @@ struct CallDtorDelete final : EHScopeStack::Cleanup { void Emit(CIRGenFunction &CGF, Flags flags) override { const CXXDestructorDecl *Dtor = cast(CGF.CurCodeDecl); const CXXRecordDecl *ClassDecl = Dtor->getParent(); - CGF.buildDeleteCall(Dtor->getOperatorDelete(), - LoadThisForDtorDelete(CGF, Dtor), - CGF.getContext().getTagDeclType(ClassDecl)); + CGF.emitDeleteCall(Dtor->getOperatorDelete(), + LoadThisForDtorDelete(CGF, Dtor), + CGF.getContext().getTagDeclType(ClassDecl)); } }; } // namespace @@ -1276,7 +1272,7 @@ class DestroyField final : public EHScopeStack::Cleanup { Address thisValue = CGF.LoadCXXThisAddress(); QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); LValue ThisLV = CGF.makeAddrLValue(thisValue, RecordTy); - LValue LV = CGF.buildLValueForField(ThisLV, field); + LValue LV = CGF.emitLValueForField(ThisLV, field); assert(LV.isSimple()); CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, @@ -1417,13 +1413,13 @@ struct CallDelegatingCtorDtor final : EHScopeStack::Cleanup { // We are calling the destructor from within the constructor. // Therefore, "this" should have the expected type. QualType ThisTy = Dtor->getFunctionObjectParameterType(); - CGF.buildCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, - /*Delegating=*/true, Addr, ThisTy); + CGF.emitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, + /*Delegating=*/true, Addr, ThisTy); } }; } // end anonymous namespace -void CIRGenFunction::buildDelegatingCXXConstructorCall( +void CIRGenFunction::emitDelegatingCXXConstructorCall( const CXXConstructorDecl *Ctor, const FunctionArgList &Args) { assert(Ctor->isDelegatingConstructor()); @@ -1436,7 +1432,7 @@ void CIRGenFunction::buildDelegatingCXXConstructorCall( // Checks are made by the code that calls constructor. AggValueSlot::IsSanitizerChecked); - buildAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); + emitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); const CXXRecordDecl *ClassDecl = Ctor->getParent(); if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { @@ -1448,13 +1444,12 @@ void CIRGenFunction::buildDelegatingCXXConstructorCall( } } -void CIRGenFunction::buildCXXDestructorCall(const CXXDestructorDecl *DD, - CXXDtorType Type, - bool ForVirtualBase, - bool Delegating, Address This, - QualType ThisTy) { - CGM.getCXXABI().buildDestructorCall(*this, DD, Type, ForVirtualBase, - Delegating, This, ThisTy); +void CIRGenFunction::emitCXXDestructorCall(const CXXDestructorDecl *DD, + CXXDtorType Type, + bool ForVirtualBase, bool Delegating, + Address This, QualType ThisTy) { + CGM.getCXXABI().emitDestructorCall(*this, DD, Type, ForVirtualBase, + Delegating, This, ThisTy); } mlir::Value CIRGenFunction::GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, @@ -1633,9 +1628,9 @@ bool CIRGenFunction::shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) { TypeName); } -void CIRGenFunction::buildTypeMetadataCodeForVCall(const CXXRecordDecl *RD, - mlir::Value VTable, - SourceLocation Loc) { +void CIRGenFunction::emitTypeMetadataCodeForVCall(const CXXRecordDecl *RD, + mlir::Value VTable, + SourceLocation Loc) { if (SanOpts.has(SanitizerKind::CFIVCall)) { llvm_unreachable("NYI"); } else if (CGM.getCodeGenOpts().WholeProgramVTables && @@ -1661,7 +1656,7 @@ mlir::Value CIRGenFunction::getVTablePtr(mlir::Location Loc, Address This, return VTable; } -Address CIRGenFunction::buildCXXMemberDataPointerAddress( +Address CIRGenFunction::emitCXXMemberDataPointerAddress( const Expr *E, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo) { assert(!cir::MissingFeatures::cxxABI()); @@ -1744,14 +1739,14 @@ CIRGenModule::getVBaseAlignment(CharUnits actualDerivedAlign, /// \param arrayBegin an arrayType* /// \param zeroInitialize true if each element should be /// zero-initialized before it is constructed -void CIRGenFunction::buildCXXAggrConstructorCall( +void CIRGenFunction::emitCXXAggrConstructorCall( const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *E, bool NewPointerIsChecked, bool zeroInitialize) { QualType elementType; - auto numElements = buildArrayLength(arrayType, elementType, arrayBegin); - buildCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, - NewPointerIsChecked, zeroInitialize); + auto numElements = emitArrayLength(arrayType, elementType, arrayBegin); + emitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, + NewPointerIsChecked, zeroInitialize); } /// Emit a loop to call a particular constructor for each of several members @@ -1763,7 +1758,7 @@ void CIRGenFunction::buildCXXAggrConstructorCall( /// \param arrayBase a T*, where T is the type constructed by ctor /// \param zeroInitialize true if each element should be /// zero-initialized before it is constructed -void CIRGenFunction::buildCXXAggrConstructorCall( +void CIRGenFunction::emitCXXAggrConstructorCall( const CXXConstructorDecl *ctor, mlir::Value numElements, Address arrayBase, const CXXConstructExpr *E, bool NewPointerIsChecked, bool zeroInitialize) { // It's legal for numElements to be zero. This can happen both @@ -1837,9 +1832,9 @@ void CIRGenFunction::buildCXXAggrConstructorCall( AggValueSlot::DoesNotOverlap, AggValueSlot::IsNotZeroed, NewPointerIsChecked ? AggValueSlot::IsSanitizerChecked : AggValueSlot::IsNotSanitizerChecked); - buildCXXConstructorCall(ctor, Ctor_Complete, - /*ForVirtualBase=*/false, - /*Delegating=*/false, currAVS, E); + emitCXXConstructorCall(ctor, Ctor_Complete, + /*ForVirtualBase=*/false, + /*Delegating=*/false, currAVS, E); builder.create(loc); }); } @@ -1872,12 +1867,12 @@ static bool canEmitDelegateCallArgs(CIRGenFunction &CGF, return true; } -void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, - clang::CXXCtorType Type, - bool ForVirtualBase, - bool Delegating, - AggValueSlot ThisAVS, - const clang::CXXConstructExpr *E) { +void CIRGenFunction::emitCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, + bool ForVirtualBase, + bool Delegating, + AggValueSlot ThisAVS, + const clang::CXXConstructExpr *E) { CallArgList Args; Address This = ThisAVS.getAddress(); LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace(); @@ -1901,15 +1896,15 @@ void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, ? EvaluationOrder::ForceLeftToRight : EvaluationOrder::Default; - buildCallArgs(Args, FPT, E->arguments(), E->getConstructor(), - /*ParamsToSkip*/ 0, Order); + emitCallArgs(Args, FPT, E->arguments(), E->getConstructor(), + /*ParamsToSkip*/ 0, Order); - buildCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args, - ThisAVS.mayOverlap(), E->getExprLoc(), - ThisAVS.isSanitizerChecked()); + emitCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args, + ThisAVS.mayOverlap(), E->getExprLoc(), + ThisAVS.isSanitizerChecked()); } -void CIRGenFunction::buildCXXConstructorCall( +void CIRGenFunction::emitCXXConstructorCall( const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, Address This, CallArgList &Args, AggValueSlot::Overlap_t Overlap, SourceLocation Loc, @@ -1918,8 +1913,8 @@ void CIRGenFunction::buildCXXConstructorCall( const auto *ClassDecl = D->getParent(); if (!NewPointerIsChecked) - buildTypeCheck(CIRGenFunction::TCK_ConstructorCall, Loc, This.getPointer(), - getContext().getRecordType(ClassDecl), CharUnits::Zero()); + emitTypeCheck(CIRGenFunction::TCK_ConstructorCall, Loc, This.getPointer(), + getContext().getRecordType(ClassDecl), CharUnits::Zero()); // If this is a call to a trivial default constructor: // In LLVM: do nothing. @@ -1953,7 +1948,7 @@ void CIRGenFunction::buildCXXConstructorCall( Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); cir::CIRCallOpInterface C; - buildCall(Info, Callee, ReturnValueSlot(), Args, &C, false, getLoc(Loc)); + emitCall(Info, Callee, ReturnValueSlot(), Args, &C, false, getLoc(Loc)); assert(CGM.getCodeGenOpts().OptimizationLevel == 0 || ClassDecl->isDynamicClass() || Type == Ctor_Base || @@ -1961,7 +1956,7 @@ void CIRGenFunction::buildCXXConstructorCall( "vtable assumption loads NYI"); } -void CIRGenFunction::buildInheritedCXXConstructorCall( +void CIRGenFunction::emitInheritedCXXConstructorCall( const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) { CallArgList Args; @@ -1986,24 +1981,23 @@ void CIRGenFunction::buildInheritedCXXConstructorCall( assert(getContext().hasSameUnqualifiedType( OuterCtor->getParamDecl(Param->getFunctionScopeIndex())->getType(), Param->getType())); - buildDelegateCallArg(Args, Param, E->getLocation()); + emitDelegateCallArg(Args, Param, E->getLocation()); // Forward __attribute__(pass_object_size). if (Param->hasAttr()) { auto *POSParam = SizeArguments[Param]; assert(POSParam && "missing pass_object_size value for forwarding"); - buildDelegateCallArg(Args, POSParam, E->getLocation()); + emitDelegateCallArg(Args, POSParam, E->getLocation()); } } } - buildCXXConstructorCall(D, Ctor_Base, ForVirtualBase, /*Delegating*/ false, - This, Args, AggValueSlot::MayOverlap, - E->getLocation(), - /*NewPointerIsChecked*/ true); + emitCXXConstructorCall(D, Ctor_Base, ForVirtualBase, /*Delegating*/ false, + This, Args, AggValueSlot::MayOverlap, E->getLocation(), + /*NewPointerIsChecked*/ true); } -void CIRGenFunction::buildInlinedInheritingCXXConstructorCall( +void CIRGenFunction::emitInlinedInheritingCXXConstructorCall( const CXXConstructorDecl *Ctor, CXXCtorType CtorType, bool ForVirtualBase, bool Delegating, CallArgList &Args) { GlobalDecl GD(Ctor, CtorType); diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 96dce5e2960f..4e0a305a502c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -33,8 +33,8 @@ using namespace cir; /// or with the labeled blocked if already solved. /// /// Track on scope basis, goto's we need to fix later. -cir::BrOp CIRGenFunction::buildBranchThroughCleanup(mlir::Location Loc, - JumpDest Dest) { +cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc, + JumpDest Dest) { // Remove this once we go for making sure unreachable code is // well modeled (or not). assert(builder.getInsertionBlock() && "not yet implemented"); @@ -47,8 +47,8 @@ cir::BrOp CIRGenFunction::buildBranchThroughCleanup(mlir::Location Loc, } /// Emits all the code to cause the given temporary to be cleaned up. -void CIRGenFunction::buildCXXTemporary(const CXXTemporary *Temporary, - QualType TempType, Address Ptr) { +void CIRGenFunction::emitCXXTemporary(const CXXTemporary *Temporary, + QualType TempType, Address Ptr) { pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject, /*useEHCleanup*/ true); } @@ -248,9 +248,9 @@ static void destroyOptimisticNormalEntry(CIRGenFunction &CGF, llvm_unreachable("NYI"); } -static void buildCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, - EHScopeStack::Cleanup::Flags flags, - Address ActiveFlag) { +static void emitCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, + EHScopeStack::Cleanup::Flags flags, + Address ActiveFlag) { auto emitCleanup = [&]() { // Ask the cleanup to emit itself. assert(CGF.HaveInsertPoint() && "expected insertion point"); @@ -409,7 +409,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { destroyOptimisticNormalEntry(*this, Scope); EHStack.popCleanup(); Scope.markEmitted(); - buildCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); + emitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); // Otherwise, the best approach is to thread everything through // the cleanup block and then try to clean up after ourselves. @@ -454,7 +454,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { auto yield = cast(ehEntry->getTerminator()); builder.setInsertionPoint(yield); - buildCleanup(*this, Fn, cleanupFlags, EHActiveFlag); + emitCleanup(*this, Fn, cleanupFlags, EHActiveFlag); } if (CPI) @@ -478,12 +478,12 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { currYield->getParentOp()->getParentOfType()) { mlir::Block *resumeBlockToPatch = tryToPatch.getCatchUnwindEntryBlock(); - buildEHResumeBlock(/*isCleanup=*/true, resumeBlockToPatch, - tryToPatch.getLoc()); + emitEHResumeBlock(/*isCleanup=*/true, resumeBlockToPatch, + tryToPatch.getLoc()); } } - buildCleanup(*this, Fn, cleanupFlags, EHActiveFlag); + emitCleanup(*this, Fn, cleanupFlags, EHActiveFlag); currBlock = blockToPatch; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 86366f6bfa15..6b6ed53faafa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -123,12 +123,12 @@ struct ParamReferenceReplacerRAII { } // namespace // Emit coroutine intrinsic and patch up arguments of the token type. -RValue CIRGenFunction::buildCoroutineIntrinsic(const CallExpr *E, - unsigned int IID) { +RValue CIRGenFunction::emitCoroutineIntrinsic(const CallExpr *E, + unsigned int IID) { llvm_unreachable("NYI"); } -RValue CIRGenFunction::buildCoroutineFrame() { +RValue CIRGenFunction::emitCoroutineFrame() { if (CurCoro.Data && CurCoro.Data->CoroBegin) { return RValue::get(CurCoro.Data->CoroBegin); } @@ -136,10 +136,10 @@ RValue CIRGenFunction::buildCoroutineFrame() { } static mlir::LogicalResult -buildBodyAndFallthrough(CIRGenFunction &CGF, const CoroutineBodyStmt &S, - Stmt *Body, - const CIRGenFunction::LexicalScope *currLexScope) { - if (CGF.buildStmt(Body, /*useCurrentScope=*/true).failed()) +emitBodyAndFallthrough(CIRGenFunction &CGF, const CoroutineBodyStmt &S, + Stmt *Body, + const CIRGenFunction::LexicalScope *currLexScope) { + if (CGF.emitStmt(Body, /*useCurrentScope=*/true).failed()) return mlir::failure(); // Note that LLVM checks CanFallthrough by looking into the availability // of the insert block which is kinda brittle and unintuitive, seems to be @@ -152,14 +152,14 @@ buildBodyAndFallthrough(CIRGenFunction &CGF, const CoroutineBodyStmt &S, const bool CanFallthrough = !currLexScope->hasCoreturn(); if (CanFallthrough) if (Stmt *OnFallthrough = S.getFallthroughHandler()) - if (CGF.buildStmt(OnFallthrough, /*useCurrentScope=*/true).failed()) + if (CGF.emitStmt(OnFallthrough, /*useCurrentScope=*/true).failed()) return mlir::failure(); return mlir::success(); } -cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, - mlir::Value nullPtr) { +cir::CallOp CIRGenFunction::emitCoroIDBuiltinCall(mlir::Location loc, + mlir::Value nullPtr) { auto int32Ty = builder.getUInt32Ty(); auto &TI = CGM.getASTContext().getTargetInfo(); @@ -183,7 +183,7 @@ cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, nullPtr, nullPtr, nullPtr}); } -cir::CallOp CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { +cir::CallOp CIRGenFunction::emitCoroAllocBuiltinCall(mlir::Location loc) { auto boolTy = builder.getBoolTy(); auto int32Ty = builder.getUInt32Ty(); @@ -204,8 +204,8 @@ cir::CallOp CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { } cir::CallOp -CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, - mlir::Value coroframeAddr) { +CIRGenFunction::emitCoroBeginBuiltinCall(mlir::Location loc, + mlir::Value coroframeAddr) { auto int32Ty = builder.getUInt32Ty(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroBegin); @@ -225,8 +225,8 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, mlir::ValueRange{CurCoro.Data->CoroId.getResult(), coroframeAddr}); } -cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, - mlir::Value nullPtr) { +cir::CallOp CIRGenFunction::emitCoroEndBuiltinCall(mlir::Location loc, + mlir::Value nullPtr) { auto boolTy = builder.getBoolTy(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroEnd); @@ -246,19 +246,19 @@ cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, } mlir::LogicalResult -CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { +CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &S) { auto openCurlyLoc = getLoc(S.getBeginLoc()); auto nullPtrCst = builder.getNullPtr(VoidPtrTy, openCurlyLoc); auto Fn = dyn_cast(CurFn); assert(Fn && "other callables NYI"); Fn.setCoroutineAttr(mlir::UnitAttr::get(&getMLIRContext())); - auto coroId = buildCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); + auto coroId = emitCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); createCoroData(*this, CurCoro, coroId); // Backend is allowed to elide memory allocations, to help it, emit // auto mem = coro.alloc() ? 0 : ... allocation code ...; - auto coroAlloc = buildCoroAllocBuiltinCall(openCurlyLoc); + auto coroAlloc = emitCoroAllocBuiltinCall(openCurlyLoc); // Initialize address of coroutine frame to null auto astVoidPtrTy = CGM.getASTContext().VoidPtrTy; @@ -275,13 +275,13 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { builder.CIRBaseBuilderTy::createStore( - loc, buildScalarExpr(S.getAllocate()), + loc, emitScalarExpr(S.getAllocate()), storeAddr); builder.create(loc); }); CurCoro.Data->CoroBegin = - buildCoroBeginBuiltinCall( + emitCoroBeginBuiltinCall( openCurlyLoc, builder.create(openCurlyLoc, allocaTy, storeAddr)) .getResult(); @@ -310,12 +310,12 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { // evolution of coroutine TS may allow promise constructor to observe // parameter copies. for (auto *PM : S.getParamMoves()) { - if (buildStmt(PM, /*useCurrentScope=*/true).failed()) + if (emitStmt(PM, /*useCurrentScope=*/true).failed()) return mlir::failure(); ParamReplacer.addCopy(cast(PM)); } - if (buildStmt(S.getPromiseDeclStmt(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getPromiseDeclStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); // ReturnValue should be valid as long as the coroutine's return type @@ -331,22 +331,22 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { // otherwise the call to get_return_object wouldn't be in front // of initial_suspend. if (ReturnValue.isValid()) { - buildAnyExprToMem(S.getReturnValue(), ReturnValue, - S.getReturnValue()->getType().getQualifiers(), - /*IsInit*/ true); + emitAnyExprToMem(S.getReturnValue(), ReturnValue, + S.getReturnValue()->getType().getQualifiers(), + /*IsInit*/ true); } // FIXME(cir): EHStack.pushCleanup(EHCleanup); CurCoro.Data->CurrentAwaitKind = cir::AwaitKind::init; - if (buildStmt(S.getInitSuspendStmt(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInitSuspendStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); CurCoro.Data->CurrentAwaitKind = cir::AwaitKind::user; - // FIXME(cir): wrap buildBodyAndFallthrough with try/catch bits. + // FIXME(cir): wrap emitBodyAndFallthrough with try/catch bits. if (S.getExceptionHandler()) assert(!cir::MissingFeatures::unhandledException() && "NYI"); - if (buildBodyAndFallthrough(*this, S, S.getBody(), currLexScope).failed()) + if (emitBodyAndFallthrough(*this, S, S.getBody(), currLexScope).failed()) return mlir::failure(); // Note that LLVM checks CanFallthrough by looking into the availability @@ -364,7 +364,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPoint(CurCoro.Data->FinalSuspendInsPoint); - if (buildStmt(S.getFinalSuspendStmt(), /*useCurrentScope=*/true) + if (emitStmt(S.getFinalSuspendStmt(), /*useCurrentScope=*/true) .failed()) return mlir::failure(); } @@ -406,11 +406,11 @@ struct LValueOrRValue { }; } // namespace static LValueOrRValue -buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, - CoroutineSuspendExpr const &S, cir::AwaitKind Kind, - AggValueSlot aggSlot, bool ignoreResult, - mlir::Block *scopeParentBlock, - mlir::Value &tmpResumeRValAddr, bool forLValue) { +emitSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, + CoroutineSuspendExpr const &S, cir::AwaitKind Kind, + AggValueSlot aggSlot, bool ignoreResult, + mlir::Block *scopeParentBlock, + mlir::Value &tmpResumeRValAddr, bool forLValue) { auto *E = S.getCommonExpr(); auto awaitBuild = mlir::success(); @@ -435,7 +435,7 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, // to LLVM dialect (or some other MLIR dialect) // A invalid suspendRet indicates "void returning await_suspend" - auto suspendRet = CGF.buildScalarExpr(S.getSuspendExpr()); + auto suspendRet = CGF.emitScalarExpr(S.getSuspendExpr()); // Veto suspension if requested by bool returning await_suspend. if (suspendRet) { @@ -461,14 +461,14 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, // FIXME(cir): the alloca for the resume expr should be placed in the // enclosing cir.scope instead. if (forLValue) - awaitRes.LV = CGF.buildLValue(S.getResumeExpr()); + awaitRes.LV = CGF.emitLValue(S.getResumeExpr()); else { awaitRes.RV = - CGF.buildAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult); + CGF.emitAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult); if (!awaitRes.RV.isIgnored()) { // Create the alloca in the block before the scope wrapping // cir.await. - tmpResumeRValAddr = CGF.buildAlloca( + tmpResumeRValAddr = CGF.emitAlloca( "__coawait_resume_rval", awaitRes.RV.getScalarVal().getType(), loc, CharUnits::One(), builder.getBestAllocaInsertPoint(scopeParentBlock)); @@ -490,10 +490,10 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, return awaitRes; } -static RValue buildSuspendExpr(CIRGenFunction &CGF, - const CoroutineSuspendExpr &E, - cir::AwaitKind kind, AggValueSlot aggSlot, - bool ignoreResult) { +static RValue emitSuspendExpr(CIRGenFunction &CGF, + const CoroutineSuspendExpr &E, + cir::AwaitKind kind, AggValueSlot aggSlot, + bool ignoreResult) { RValue rval; auto scopeLoc = CGF.getLoc(E.getSourceRange()); @@ -508,9 +508,9 @@ static RValue buildSuspendExpr(CIRGenFunction &CGF, // No need to explicitly wrap this into a scope since the AST already uses a // ExprWithCleanups, which will wrap this into a cir.scope anyways. - rval = buildSuspendExpression(CGF, *CGF.CurCoro.Data, E, kind, aggSlot, - ignoreResult, currEntryBlock, tmpResumeRValAddr, - /*forLValue*/ false) + rval = emitSuspendExpression(CGF, *CGF.CurCoro.Data, E, kind, aggSlot, + ignoreResult, currEntryBlock, tmpResumeRValAddr, + /*forLValue*/ false) .RV; if (ignoreResult || rval.isIgnored()) @@ -529,21 +529,21 @@ static RValue buildSuspendExpr(CIRGenFunction &CGF, return rval; } -RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, - AggValueSlot aggSlot, - bool ignoreResult) { - return buildSuspendExpr(*this, E, CurCoro.Data->CurrentAwaitKind, aggSlot, - ignoreResult); +RValue CIRGenFunction::emitCoawaitExpr(const CoawaitExpr &E, + AggValueSlot aggSlot, + bool ignoreResult) { + return emitSuspendExpr(*this, E, CurCoro.Data->CurrentAwaitKind, aggSlot, + ignoreResult); } -RValue CIRGenFunction::buildCoyieldExpr(const CoyieldExpr &E, - AggValueSlot aggSlot, - bool ignoreResult) { - return buildSuspendExpr(*this, E, cir::AwaitKind::yield, aggSlot, - ignoreResult); +RValue CIRGenFunction::emitCoyieldExpr(const CoyieldExpr &E, + AggValueSlot aggSlot, + bool ignoreResult) { + return emitSuspendExpr(*this, E, cir::AwaitKind::yield, aggSlot, + ignoreResult); } -mlir::LogicalResult CIRGenFunction::buildCoreturnStmt(CoreturnStmt const &S) { +mlir::LogicalResult CIRGenFunction::emitCoreturnStmt(CoreturnStmt const &S) { ++CurCoro.Data->CoreturnCount; currLexScope->setCoreturn(); @@ -553,9 +553,9 @@ mlir::LogicalResult CIRGenFunction::buildCoreturnStmt(CoreturnStmt const &S) { // with a void expression for side effects. // FIXME(cir): add scope // RunCleanupsScope cleanupScope(*this); - buildIgnoredExpr(RV); + emitIgnoredExpr(RV); } - if (buildStmt(S.getPromiseCall(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getPromiseCall(), /*useCurrentScope=*/true).failed()) return mlir::failure(); // Create a new return block (if not existent) and add a branch to // it. The actual return instruction is only inserted during current diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 5ed32d800bbb..e6bcb0d6bf04 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -33,8 +33,8 @@ using namespace clang; using namespace clang::CIRGen; CIRGenFunction::AutoVarEmission -CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, - mlir::OpBuilder::InsertPoint ip) { +CIRGenFunction::emitAutoVarAlloca(const VarDecl &D, + mlir::OpBuilder::InsertPoint ip) { QualType Ty = D.getType(); assert( Ty.getAddressSpace() == LangAS::Default || @@ -51,7 +51,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, // If the type is variably-modified, emit all the VLA sizes for it. if (Ty->isVariablyModifiedType()) - buildVariablyModifiedType(Ty); + emitVariablyModifiedType(Ty); assert(!cir::MissingFeatures::generateDebugInfo()); assert(!cir::MissingFeatures::cxxABI()); @@ -92,7 +92,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, (!NRVO && !D.isEscapingByref() && CGM.isTypeConstant(Ty, /*ExcludeCtor=*/true, /*ExcludeDtor=*/false))) { - buildStaticVarDecl(D, cir::GlobalLinkageKind::InternalLinkage); + emitStaticVarDecl(D, cir::GlobalLinkageKind::InternalLinkage); // Signal this condition to later callbacks. emission.Addr = Address::invalid(); @@ -199,7 +199,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, assert(!cir::MissingFeatures::generateDebugInfo()); if (D.hasAttr()) - buildVarAnnotations(&D, address.emitRawPointer()); + emitVarAnnotations(&D, address.emitRawPointer()); // TODO(cir): in LLVM this calls @llvm.lifetime.end. assert(!cir::MissingFeatures::shouldEmitLifetimeMarkers()); @@ -254,7 +254,7 @@ static void emitStoresForConstant(CIRGenModule &CGM, const VarDecl &D, builder.createStore(loc, builder.getConstant(loc, constant), addr); } -void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { +void CIRGenFunction::emitAutoVarInit(const AutoVarEmission &emission) { assert(emission.Variable && "emission was not valid!"); // If this was emitted as a global constant, we're done. @@ -328,7 +328,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { if (!constant || isa(Init)) { initializeWhatIsTechnicallyUninitialized(Loc); LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); - buildExprAsInit(Init, &D, lv); + emitExprAsInit(Init, &D, lv); // In case lv has uses it means we indeed initialized something // out of it while trying to build the expression, mark it as such. auto addr = lv.getAddress().getPointer(); @@ -350,7 +350,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { assert(Init && "expected initializer"); auto initLoc = getLoc(Init->getSourceRange()); lv.setNonGC(true); - return buildStoreThroughLValue( + return emitStoreThroughLValue( RValue::get(builder.getConstant(initLoc, typedConstant)), lv); } @@ -358,7 +358,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { typedConstant, /*IsAutoInit=*/false); } -void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { +void CIRGenFunction::emitAutoVarCleanups(const AutoVarEmission &emission) { assert(emission.Variable && "emission was not valid!"); // If this was emitted as a global constant, we're done. @@ -372,7 +372,7 @@ void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { // Check the type for a cleanup. if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext())) - buildAutoVarTypeCleanup(emission, dtorKind); + emitAutoVarTypeCleanup(emission, dtorKind); // In GC mode, honor objc_precise_lifetime. if (getContext().getLangOpts().getGC() != LangOptions::NonGC && @@ -389,13 +389,13 @@ void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { /// Emit code and set up symbol table for a variable declaration with auto, /// register, or no storage class specifier. These turn into simple stack /// objects, globals depending on target. -void CIRGenFunction::buildAutoVarDecl(const VarDecl &D) { - AutoVarEmission emission = buildAutoVarAlloca(D); - buildAutoVarInit(emission); - buildAutoVarCleanups(emission); +void CIRGenFunction::emitAutoVarDecl(const VarDecl &D) { + AutoVarEmission emission = emitAutoVarAlloca(D); + emitAutoVarInit(emission); + emitAutoVarCleanups(emission); } -void CIRGenFunction::buildVarDecl(const VarDecl &D) { +void CIRGenFunction::emitVarDecl(const VarDecl &D) { if (D.hasExternalStorage()) { // Don't emit it now, allow it to be emitted lazily on its first use. return; @@ -415,16 +415,16 @@ void CIRGenFunction::buildVarDecl(const VarDecl &D) { // some variables even if we can constant-evaluate them because // we can't guarantee every translation unit will constant-evaluate them. - return buildStaticVarDecl(D, Linkage); + return emitStaticVarDecl(D, Linkage); } if (D.getType().getAddressSpace() == LangAS::opencl_local) - return CGM.getOpenCLRuntime().buildWorkGroupLocalVarDecl(*this, D); + return CGM.getOpenCLRuntime().emitWorkGroupLocalVarDecl(*this, D); assert(D.hasLocalStorage()); CIRGenFunction::VarDeclContext varDeclCtx{*this, &D}; - return buildAutoVarDecl(D); + return emitAutoVarDecl(D); } static std::string getStaticDeclName(CIRGenModule &CGM, const VarDecl &D) { @@ -618,8 +618,8 @@ cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( return GV; } -void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, - cir::GlobalLinkageKind Linkage) { +void CIRGenFunction::emitStaticVarDecl(const VarDecl &D, + cir::GlobalLinkageKind Linkage) { // Check to see if we already have a global variable for this // declaration. This can happen when double-emitting function // bodies, e.g. with complete and base constructors. @@ -701,34 +701,34 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, } } -void CIRGenFunction::buildNullabilityCheck(LValue LHS, mlir::Value RHS, - SourceLocation Loc) { +void CIRGenFunction::emitNullabilityCheck(LValue LHS, mlir::Value RHS, + SourceLocation Loc) { if (!SanOpts.has(SanitizerKind::NullabilityAssign)) return; llvm_unreachable("NYI"); } -void CIRGenFunction::buildScalarInit(const Expr *init, mlir::Location loc, - LValue lvalue, bool capturedByInit) { +void CIRGenFunction::emitScalarInit(const Expr *init, mlir::Location loc, + LValue lvalue, bool capturedByInit) { Qualifiers::ObjCLifetime lifetime = Qualifiers::ObjCLifetime::OCL_None; assert(!cir::MissingFeatures::objCLifetime()); if (!lifetime) { SourceLocRAIIObject Loc{*this, loc}; - mlir::Value value = buildScalarExpr(init); + mlir::Value value = emitScalarExpr(init); if (capturedByInit) llvm_unreachable("NYI"); assert(!cir::MissingFeatures::emitNullabilityCheck()); - buildStoreThroughLValue(RValue::get(value), lvalue, true); + emitStoreThroughLValue(RValue::get(value), lvalue, true); return; } llvm_unreachable("NYI"); } -void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, - LValue lvalue, bool capturedByInit) { +void CIRGenFunction::emitExprAsInit(const Expr *init, const ValueDecl *D, + LValue lvalue, bool capturedByInit) { SourceLocRAIIObject Loc{*this, getLoc(init->getSourceRange())}; if (capturedByInit) llvm_unreachable("NYI"); @@ -736,22 +736,22 @@ void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, QualType type = D->getType(); if (type->isReferenceType()) { - RValue rvalue = buildReferenceBindingToExpr(init); + RValue rvalue = emitReferenceBindingToExpr(init); if (capturedByInit) llvm_unreachable("NYI"); - buildStoreThroughLValue(rvalue, lvalue); + emitStoreThroughLValue(rvalue, lvalue); return; } switch (CIRGenFunction::getEvaluationKind(type)) { case cir::TEK_Scalar: - buildScalarInit(init, getLoc(D->getSourceRange()), lvalue); + emitScalarInit(init, getLoc(D->getSourceRange()), lvalue); return; case cir::TEK_Complex: { - mlir::Value complex = buildComplexExpr(init); + mlir::Value complex = emitComplexExpr(init); if (capturedByInit) llvm_unreachable("NYI"); - buildStoreOfComplex(getLoc(init->getExprLoc()), complex, lvalue, - /*init*/ true); + emitStoreOfComplex(getLoc(init->getExprLoc()), complex, lvalue, + /*init*/ true); return; } case cir::TEK_Aggregate: @@ -764,16 +764,16 @@ void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, else assert(false && "Only VarDecl implemented so far"); // TODO: how can we delay here if D is captured by its initializer? - buildAggExpr(init, - AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, Overlap)); + emitAggExpr(init, + AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, Overlap)); return; } llvm_unreachable("bad evaluation kind"); } -void CIRGenFunction::buildDecl(const Decl &D) { +void CIRGenFunction::emitDecl(const Decl &D) { switch (D.getKind()) { case Decl::ImplicitConceptSpecialization: case Decl::HLSLBuffer: @@ -875,11 +875,11 @@ void CIRGenFunction::buildDecl(const Decl &D) { const VarDecl &VD = cast(D); assert(VD.isLocalVarDecl() && "Should not see file-scope variables inside a function!"); - buildVarDecl(VD); + emitVarDecl(VD); if (auto *DD = dyn_cast(&VD)) for (auto *B : DD->bindings()) if (auto *HD = B->getHoldingVar()) - buildVarDecl(*HD); + emitVarDecl(*HD); return; } @@ -893,7 +893,7 @@ void CIRGenFunction::buildDecl(const Decl &D) { if (auto *DI = getDebugInfo()) assert(!cir::MissingFeatures::generateDebugInfo()); if (Ty->isVariablyModifiedType()) - buildVariablyModifiedType(Ty); + emitVariablyModifiedType(Ty); return; } } @@ -1095,12 +1095,11 @@ void CIRGenFunction::pushRegularPartialArrayCleanup(mlir::Value arrayBegin, /// \param useEHCleanup - whether to push an EH cleanup to destroy /// the remaining elements in case the destruction of a single /// element throws -void CIRGenFunction::buildArrayDestroy(mlir::Value begin, mlir::Value end, - QualType elementType, - CharUnits elementAlign, - Destroyer *destroyer, - bool checkZeroLength, - bool useEHCleanup) { +void CIRGenFunction::emitArrayDestroy(mlir::Value begin, mlir::Value end, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer, + bool checkZeroLength, bool useEHCleanup) { assert(!elementType->isArrayType()); if (checkZeroLength) { llvm_unreachable("NYI"); @@ -1148,7 +1147,7 @@ void CIRGenFunction::emitDestroy(Address addr, QualType type, if (!arrayType) return destroyer(*this, addr, type); - auto length = buildArrayLength(arrayType, type, addr); + auto length = emitArrayLength(arrayType, type, addr); CharUnits elementAlign = addr.getAlignment().alignmentOfArrayElement( getContext().getTypeSizeInChars(type)); @@ -1170,8 +1169,8 @@ void CIRGenFunction::emitDestroy(Address addr, QualType type, auto begin = addr.getPointer(); mlir::Value end; // Use this for future non-constant counts. - buildArrayDestroy(begin, end, type, elementAlign, destroyer, checkZeroLength, - useEHCleanupForArray); + emitArrayDestroy(begin, end, type, elementAlign, destroyer, checkZeroLength, + useEHCleanupForArray); if (constantCount.use_empty()) constantCount.erase(); } @@ -1196,7 +1195,7 @@ void CIRGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) { } /// Enter a destroy cleanup for the given local variable. -void CIRGenFunction::buildAutoVarTypeCleanup( +void CIRGenFunction::emitAutoVarTypeCleanup( const CIRGenFunction::AutoVarEmission &emission, QualType::DestructionKind dtorKind) { assert(dtorKind != QualType::DK_none); diff --git a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp index 007a5a3b2932..0b9fa80536de 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp @@ -20,7 +20,7 @@ using namespace clang; using namespace clang::CIRGen; using namespace cir; -void CIRGenModule::buildCXXGlobalInitFunc() { +void CIRGenModule::emitCXXGlobalInitFunc() { while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) CXXGlobalInits.pop_back(); @@ -31,9 +31,9 @@ void CIRGenModule::buildCXXGlobalInitFunc() { assert(0 && "NYE"); } -void CIRGenModule::buildCXXGlobalVarDeclInitFunc(const VarDecl *D, - cir::GlobalOp Addr, - bool PerformInit) { +void CIRGenModule::emitCXXGlobalVarDeclInitFunc(const VarDecl *D, + cir::GlobalOp Addr, + bool PerformInit) { // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, // __constant__ and __shared__ variables defined in namespace scope, // that are of class type, cannot have a non-empty constructor. All @@ -49,5 +49,5 @@ void CIRGenModule::buildCXXGlobalVarDeclInitFunc(const VarDecl *D, if (I != DelayedCXXInitPosition.end() && I->second == ~0U) return; - buildCXXGlobalVarDeclInit(D, Addr, PerformInit); + emitCXXGlobalVarDeclInit(D, Addr, PerformInit); } diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 84a4176b36e9..b7a10fb4ef96 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -191,16 +191,16 @@ const EHPersonality &EHPersonality::get(CIRGenFunction &CGF) { return get(CGF.CGM, dyn_cast_or_null(FD)); } -void CIRGenFunction::buildCXXThrowExpr(const CXXThrowExpr *E) { +void CIRGenFunction::emitCXXThrowExpr(const CXXThrowExpr *E) { if (const Expr *SubExpr = E->getSubExpr()) { QualType ThrowType = SubExpr->getType(); if (ThrowType->isObjCObjectPointerType()) { llvm_unreachable("NYI"); } else { - CGM.getCXXABI().buildThrow(*this, E); + CGM.getCXXABI().emitThrow(*this, E); } } else { - CGM.getCXXABI().buildRethrow(*this, /*isNoReturn=*/true); + CGM.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true); } // In LLVM codegen the expression emitters expect to leave this @@ -225,10 +225,10 @@ struct FreeException final : EHScopeStack::Cleanup { } // end anonymous namespace // Emits an exception expression into the given location. This -// differs from buildAnyExprToMem only in that, if a final copy-ctor +// differs from emitAnyExprToMem only in that, if a final copy-ctor // call is required, an exception within that copy ctor causes // std::terminate to be invoked. -void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { +void CIRGenFunction::emitAnyExprToExn(const Expr *e, Address addr) { // Make sure the exception object is cleaned up if there's an // exception during initialization. pushFullExprCleanup(EHCleanup, addr.getPointer()); @@ -247,8 +247,8 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { // evaluated but before the exception is caught. But the best way // to handle that is to teach EmitAggExpr to do the final copy // differently if it can't be elided. - buildAnyExprToMem(e, typedAddr, e->getType().getQualifiers(), - /*IsInit*/ true); + emitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(), + /*IsInit*/ true); // Deactivate the cleanup block. auto op = typedAddr.getPointer().getDefiningOp(); @@ -257,9 +257,9 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { DeactivateCleanupBlock(cleanup, op); } -void CIRGenFunction::buildEHResumeBlock(bool isCleanup, - mlir::Block *ehResumeBlock, - mlir::Location loc) { +void CIRGenFunction::emitEHResumeBlock(bool isCleanup, + mlir::Block *ehResumeBlock, + mlir::Location loc) { auto ip = getBuilder().saveInsertionPoint(); getBuilder().setInsertionPointToStart(ehResumeBlock); @@ -293,11 +293,11 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup, if (!ehResumeBlock->empty()) return ehResumeBlock; - buildEHResumeBlock(isCleanup, ehResumeBlock, tryOp.getLoc()); + emitEHResumeBlock(isCleanup, ehResumeBlock, tryOp.getLoc()); return ehResumeBlock; } -mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { +mlir::LogicalResult CIRGenFunction::emitCXXTryStmt(const CXXTryStmt &S) { auto loc = getLoc(S.getSourceRange()); mlir::OpBuilder::InsertPoint scopeIP; @@ -313,14 +313,14 @@ mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { { mlir::OpBuilder::InsertionGuard guard(getBuilder()); getBuilder().restoreInsertionPoint(scopeIP); - r = buildCXXTryStmtUnderScope(S); + r = emitCXXTryStmtUnderScope(S); getBuilder().create(loc); } return r; } mlir::LogicalResult -CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { +CIRGenFunction::emitCXXTryStmtUnderScope(const CXXTryStmt &S) { const llvm::Triple &T = getTarget().getTriple(); // If we encounter a try statement on in an OpenMP target region offloaded to // a GPU, we treat it as a basic block. @@ -354,7 +354,7 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { // Don't emit the code right away for catch clauses, for // now create the regions and consume the try scope result. // Note that clauses are later populated in - // CIRGenFunction::buildLandingPad. + // CIRGenFunction::emitLandingPad. [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &result) { mlir::OpBuilder::InsertionGuard guard(b); @@ -384,7 +384,7 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { { CIRGenFunction::LexicalScope tryBodyScope{ *this, loc, getBuilder().getInsertionBlock()}; - if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) return mlir::failure(); } } @@ -402,9 +402,8 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { /// Emit the structure of the dispatch block for the given catch scope. /// It is an invariant that the dispatch block already exists. -static void buildCatchDispatchBlock(CIRGenFunction &CGF, - EHCatchScope &catchScope, - cir::TryOp tryOp) { +static void emitCatchDispatchBlock(CIRGenFunction &CGF, + EHCatchScope &catchScope, cir::TryOp tryOp) { if (EHPersonality::get(CGF).isWasmPersonality()) llvm_unreachable("NYI"); if (EHPersonality::get(CGF).usesFuncletPads()) @@ -522,7 +521,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { } // Emit the structure of the EH dispatch for this catch. - buildCatchDispatchBlock(*this, CatchScope, tryOp); + emitCatchDispatchBlock(*this, CatchScope, tryOp); // Copy the handler blocks off before we pop the EH stack. Emitting // the handlers might scribble on this memory. @@ -569,7 +568,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { assert(!cir::MissingFeatures::incrementProfileCounter()); // Perform the body of the catch. - (void)buildStmt(C->getHandlerBlock(), /*useCurrentScope=*/true); + (void)emitStmt(C->getHandlerBlock(), /*useCurrentScope=*/true); // [except.handle]p11: // The currently handled exception is rethrown if control @@ -620,7 +619,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { llvm_unreachable("Invalid EHScope Kind!"); } -mlir::Operation *CIRGenFunction::buildLandingPad(cir::TryOp tryOp) { +mlir::Operation *CIRGenFunction::emitLandingPad(cir::TryOp tryOp) { assert(EHStack.requiresLandingPad()); assert(!CGM.getLangOpts().IgnoreExceptions && "LandingPad should not be emitted when -fignore-exceptions are in " @@ -881,7 +880,7 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl(cir::TryOp tryOp) { llvm::errs() << "PersonalityFn: " << Personality.PersonalityFn << "\n"; llvm_unreachable("NYI"); } else { - LP = buildLandingPad(tryOp); + LP = emitLandingPad(tryOp); } assert(LP); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 43cab7480c73..ba2d4b1f185f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -43,7 +43,7 @@ using namespace clang; using namespace clang::CIRGen; using namespace cir; -static cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { +static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); if (FD->hasAttr()) { @@ -56,17 +56,17 @@ static cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { return V; } -static Address buildPreserveStructAccess(CIRGenFunction &CGF, LValue base, - Address addr, const FieldDecl *field) { +static Address emitPreserveStructAccess(CIRGenFunction &CGF, LValue base, + Address addr, const FieldDecl *field) { llvm_unreachable("NYI"); } /// Get the address of a zero-sized field within a record. The resulting address /// doesn't necessarily have the right type. -static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, - const FieldDecl *field, - llvm::StringRef fieldName, - unsigned fieldIndex) { +static Address emitAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, + const FieldDecl *field, + llvm::StringRef fieldName, + unsigned fieldIndex) { if (field->isZeroSize(CGF.getContext())) llvm_unreachable("NYI"); @@ -112,11 +112,11 @@ static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { return false; } -static Address buildPointerWithAlignment(const Expr *expr, - LValueBaseInfo *baseInfo, - TBAAAccessInfo *tbaaInfo, - KnownNonNull_t isKnownNonNull, - CIRGenFunction &cgf) { +static Address emitPointerWithAlignment(const Expr *expr, + LValueBaseInfo *baseInfo, + TBAAAccessInfo *tbaaInfo, + KnownNonNull_t isKnownNonNull, + CIRGenFunction &cgf) { // We allow this with ObjC object pointers because of fragile ABIs. assert(expr->getType()->isPointerType() || expr->getType()->isObjCObjectPointerType()); @@ -125,7 +125,7 @@ static Address buildPointerWithAlignment(const Expr *expr, // Casts: if (const CastExpr *CE = dyn_cast(expr)) { if (const auto *ECE = dyn_cast(CE)) - cgf.CGM.buildExplicitCastExprType(ECE, &cgf); + cgf.CGM.emitExplicitCastExprType(ECE, &cgf); switch (CE->getCastKind()) { // Non-converting casts (but not C's implicit conversion from void*). @@ -139,7 +139,7 @@ static Address buildPointerWithAlignment(const Expr *expr, assert(!cir::MissingFeatures::tbaa()); LValueBaseInfo innerBaseInfo; - Address addr = cgf.buildPointerWithAlignment( + Address addr = cgf.emitPointerWithAlignment( CE->getSubExpr(), &innerBaseInfo, tbaaInfo, isKnownNonNull); if (baseInfo) *baseInfo = innerBaseInfo; @@ -181,7 +181,7 @@ static Address buildPointerWithAlignment(const Expr *expr, // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo. case CK_ArrayToPointerDecay: - return cgf.buildArrayToPointerDecay(CE->getSubExpr()); + return cgf.emitArrayToPointerDecay(CE->getSubExpr()); case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { @@ -189,7 +189,7 @@ static Address buildPointerWithAlignment(const Expr *expr, // conservatively pretend that the complete object is of the base class // type. assert(!cir::MissingFeatures::tbaa()); - Address Addr = cgf.buildPointerWithAlignment(CE->getSubExpr(), baseInfo); + Address Addr = cgf.emitPointerWithAlignment(CE->getSubExpr(), baseInfo); auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); return cgf.getAddressOfBaseClass( Addr, Derived, CE->path_begin(), CE->path_end(), @@ -207,7 +207,7 @@ static Address buildPointerWithAlignment(const Expr *expr, if (const UnaryOperator *UO = dyn_cast(expr)) { // TODO(cir): maybe we should use cir.unary for pointers here instead. if (UO->getOpcode() == UO_AddrOf) { - LValue LV = cgf.buildLValue(UO->getSubExpr()); + LValue LV = cgf.emitLValue(UO->getSubExpr()); if (baseInfo) *baseInfo = LV.getBaseInfo(); assert(!cir::MissingFeatures::tbaa()); @@ -232,7 +232,7 @@ static Address buildPointerWithAlignment(const Expr *expr, // Otherwise, use the alignment of the type. return cgf.makeNaturalAddressForPointer( - cgf.buildScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(), + cgf.emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(), /*ForPointeeType=*/true, baseInfo, tbaaInfo, isKnownNonNull); } @@ -264,8 +264,8 @@ static bool useVolatileForBitField(const CIRGenModule &cgm, LValue base, .isVolatileQualified(); } -LValue CIRGenFunction::buildLValueForBitField(LValue base, - const FieldDecl *field) { +LValue CIRGenFunction::emitLValueForBitField(LValue base, + const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); const RecordDecl *rec = field->getParent(); @@ -294,12 +294,11 @@ LValue CIRGenFunction::buildLValueForBitField(LValue base, TBAAAccessInfo()); } -LValue CIRGenFunction::buildLValueForField(LValue base, - const FieldDecl *field) { +LValue CIRGenFunction::emitLValueForField(LValue base, const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); if (field->isBitField()) - return buildLValueForBitField(base, field); + return emitLValueForBitField(base, field); // Fields of may-alias structures are may-alais themselves. // FIXME: this hould get propagated down through anonymous structs and unions. @@ -334,7 +333,7 @@ LValue CIRGenFunction::buildLValueForField(LValue base, unsigned fieldIndex = field->getFieldIndex(); if (CGM.LambdaFieldToName.count(field)) fieldName = CGM.LambdaFieldToName[field]; - addr = buildAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); + addr = emitAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); if (CGM.getCodeGenOpts().StrictVTablePointers && hasAnyVptr(FieldType, getContext())) @@ -358,10 +357,10 @@ LValue CIRGenFunction::buildLValueForField(LValue base, if (CGM.LambdaFieldToName.count(field)) fieldName = CGM.LambdaFieldToName[field]; - addr = buildAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); + addr = emitAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); } else // Remember the original struct field index - addr = buildPreserveStructAccess(*this, base, addr, field); + addr = emitPreserveStructAccess(*this, base, addr, field); } // If this is a reference field, load the reference right now. @@ -370,8 +369,8 @@ LValue CIRGenFunction::buildLValueForField(LValue base, LValue RefLVal = makeAddrLValue(addr, FieldType, FieldBaseInfo); if (RecordCVR & Qualifiers::Volatile) RefLVal.getQuals().addVolatile(); - addr = buildLoadOfReference(RefLVal, getLoc(field->getSourceRange()), - &FieldBaseInfo); + addr = emitLoadOfReference(RefLVal, getLoc(field->getSourceRange()), + &FieldBaseInfo); // Qualifiers on the struct don't apply to the referencee. RecordCVR = 0; @@ -401,18 +400,18 @@ LValue CIRGenFunction::buildLValueForField(LValue base, return LV; } -LValue CIRGenFunction::buildLValueForFieldInitialization( +LValue CIRGenFunction::emitLValueForFieldInitialization( LValue Base, const clang::FieldDecl *Field, llvm::StringRef FieldName) { QualType FieldType = Field->getType(); if (!FieldType->isReferenceType()) - return buildLValueForField(Base, Field); + return emitLValueForField(Base, Field); auto &layout = CGM.getTypes().getCIRGenRecordLayout(Field->getParent()); unsigned FieldIndex = layout.getCIRFieldNo(Field); - Address V = buildAddrOfFieldStorage(*this, Base.getAddress(), Field, - FieldName, FieldIndex); + Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field, FieldName, + FieldIndex); // Make sure that the address is pointing to the right type. auto memTy = getTypes().convertTypeForMem(FieldType); @@ -428,8 +427,7 @@ LValue CIRGenFunction::buildLValueForFieldInitialization( return makeAddrLValue(V, FieldType, FieldBaseInfo); } -LValue -CIRGenFunction::buildCompoundLiteralLValue(const CompoundLiteralExpr *E) { +LValue CIRGenFunction::emitCompoundLiteralLValue(const CompoundLiteralExpr *E) { if (E->isFileScope()) { llvm_unreachable("NYI"); } @@ -443,8 +441,8 @@ CIRGenFunction::buildCompoundLiteralLValue(const CompoundLiteralExpr *E) { const Expr *InitExpr = E->getInitializer(); LValue Result = makeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); - buildAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), - /*Init*/ true); + emitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), + /*Init*/ true); // Block-scope compound literals are destroyed at the end of the enclosing // scope in C. @@ -465,7 +463,7 @@ static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) { return true; } -static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { +static CIRGenCallee emitDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); if (auto builtinID = FD->getBuiltinID()) { @@ -502,7 +500,7 @@ static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { return CIRGenCallee::forBuiltin(builtinID, FD); } - auto CalleePtr = buildFunctionDeclPointer(CGM, GD); + auto CalleePtr = emitFunctionDeclPointer(CGM, GD); assert(!CGM.getLangOpts().CUDA && "NYI"); @@ -524,25 +522,25 @@ bool CIRGenFunction::hasBooleanRepresentation(QualType Ty) { return false; } -CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { +CIRGenCallee CIRGenFunction::emitCallee(const clang::Expr *E) { E = E->IgnoreParens(); // Look through function-to-pointer decay. if (const auto *ICE = dyn_cast(E)) { if (ICE->getCastKind() == CK_FunctionToPointerDecay || ICE->getCastKind() == CK_BuiltinFnToFnPtr) { - return buildCallee(ICE->getSubExpr()); + return emitCallee(ICE->getSubExpr()); } // Resolve direct calls. } else if (const auto *DRE = dyn_cast(E)) { const auto *FD = dyn_cast(DRE->getDecl()); assert(FD && "DeclRef referring to FunctionDecl only thing supported so far"); - return buildDirectCallee(CGM, FD); + return emitDirectCallee(CGM, FD); } else if (auto ME = dyn_cast(E)) { if (auto FD = dyn_cast(ME->getMemberDecl())) { - buildIgnoredExpr(ME->getBase()); - return buildDirectCallee(CGM, FD); + emitIgnoredExpr(ME->getBase()); + return emitDirectCallee(CGM, FD); } } @@ -553,11 +551,11 @@ CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { mlir::Value calleePtr; QualType functionType; if (auto ptrType = E->getType()->getAs()) { - calleePtr = buildScalarExpr(E); + calleePtr = emitScalarExpr(E); functionType = ptrType->getPointeeType(); } else { functionType = E->getType(); - calleePtr = buildLValue(E).getPointer(); + calleePtr = emitLValue(E).getPointer(); } assert(functionType->isFunctionType()); @@ -573,30 +571,30 @@ CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { assert(false && "Nothing else supported yet!"); } -mlir::Value CIRGenFunction::buildToMemory(mlir::Value Value, QualType Ty) { +mlir::Value CIRGenFunction::emitToMemory(mlir::Value Value, QualType Ty) { // Bool has a different representation in memory than in registers. return Value; } -void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue) { +void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue) { // TODO: constant matrix type, no init, non temporal, TBAA - buildStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), lvalue.getBaseInfo(), - lvalue.getTBAAInfo(), false, false); + emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), false, false); } -void CIRGenFunction::buildStoreOfScalar(mlir::Value value, Address addr, - bool isVolatile, QualType ty, - LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, bool isInit, - bool isNontemporal) { - value = buildToMemory(value, ty); +void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr, + bool isVolatile, QualType ty, + LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, bool isInit, + bool isNontemporal) { + value = emitToMemory(value, ty); LValue atomicLValue = LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); if (ty->isAtomicType() || (!isInit && LValueIsSuitableForInlineAtomic(atomicLValue))) { - buildAtomicStore(RValue::get(value), atomicLValue, isInit); + emitAtomicStore(RValue::get(value), atomicLValue, isInit); return; } @@ -642,29 +640,29 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, Address addr, llvm_unreachable("NYI"); } -void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, - bool isInit) { +void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue, + bool isInit) { if (lvalue.getType()->isConstantMatrixType()) { llvm_unreachable("NYI"); } - buildStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), lvalue.getBaseInfo(), - lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); + emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); } /// Given an expression that represents a value lvalue, this /// method emits the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. -RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { +RValue CIRGenFunction::emitLoadOfLValue(LValue LV, SourceLocation Loc) { assert(!LV.getType()->isFunctionType()); assert(!(LV.getType()->isConstantMatrixType()) && "not implemented"); if (LV.isBitField()) - return buildLoadOfBitfieldLValue(LV, Loc); + return emitLoadOfBitfieldLValue(LV, Loc); if (LV.isSimple()) - return RValue::get(buildLoadOfScalar(LV, Loc)); + return RValue::get(emitLoadOfScalar(LV, Loc)); if (LV.isVectorElt()) { auto load = builder.createLoad(getLoc(Loc), LV.getVectorAddress()); @@ -673,7 +671,7 @@ RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { } if (LV.isExtVectorElt()) { - return buildLoadOfExtVectorElementLValue(LV); + return emitLoadOfExtVectorElementLValue(LV); } llvm_unreachable("NYI"); @@ -688,7 +686,7 @@ int64_t CIRGenFunction::getAccessedFieldNo(unsigned int idx, // If this is a reference to a subset of the elements of a vector, create an // appropriate shufflevector. -RValue CIRGenFunction::buildLoadOfExtVectorElementLValue(LValue LV) { +RValue CIRGenFunction::emitLoadOfExtVectorElementLValue(LValue LV) { mlir::Location loc = LV.getExtVectorPointer().getLoc(); mlir::Value Vec = builder.createLoad(loc, LV.getExtVectorAddress()); @@ -721,8 +719,7 @@ RValue CIRGenFunction::buildLoadOfExtVectorElementLValue(LValue LV) { return RValue::get(Vec); } -RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, - SourceLocation Loc) { +RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc) { const CIRGenBitFieldInfo &info = LV.getBitFieldInfo(); // Get the output type. @@ -739,8 +736,8 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, return RValue::get(field); } -void CIRGenFunction::buildStoreThroughExtVectorComponentLValue(RValue Src, - LValue Dst) { +void CIRGenFunction::emitStoreThroughExtVectorComponentLValue(RValue Src, + LValue Dst) { mlir::Location loc = Dst.getExtVectorPointer().getLoc(); // HLSL allows storing to scalar values through ExtVector component LValues. @@ -813,8 +810,8 @@ void CIRGenFunction::buildStoreThroughExtVectorComponentLValue(RValue Src, Dst.isVolatileQualified()); } -void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, - bool isInit) { +void CIRGenFunction::emitStoreThroughLValue(RValue Src, LValue Dst, + bool isInit) { if (!Dst.isSimple()) { if (Dst.isVectorElt()) { // Read/modify/write the vector, inserting the new element @@ -827,11 +824,11 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, } if (Dst.isExtVectorElt()) - return buildStoreThroughExtVectorComponentLValue(Src, Dst); + return emitStoreThroughExtVectorComponentLValue(Src, Dst); assert(Dst.isBitField() && "NIY LValue type"); mlir::Value result; - return buildStoreThroughBitfieldLValue(Src, Dst, result); + return emitStoreThroughBitfieldLValue(Src, Dst, result); } assert(Dst.isSimple() && "only implemented simple"); @@ -849,11 +846,11 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, } assert(Src.isScalar() && "Can't emit an agg store with this method"); - buildStoreOfScalar(Src.getScalarVal(), Dst, isInit); + emitStoreOfScalar(Src.getScalarVal(), Dst, isInit); } -void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, - mlir::Value &Result) { +void CIRGenFunction::emitStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result) { // According to the AACPS: // When a volatile bit-field is written, and its container does not overlap // with any non-bit-field member, its container must be read exactly once @@ -878,8 +875,8 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, Src.getScalarVal(), info, Dst.isVolatileQualified(), useVolatile); } -static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, - const VarDecl *VD) { +static LValue emitGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, + const VarDecl *VD) { QualType T = E->getType(); // If it's thread_local, emit a call to its wrapper function instead. @@ -920,17 +917,17 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, return LV; } -static LValue buildCapturedFieldLValue(CIRGenFunction &CGF, const FieldDecl *FD, - mlir::Value ThisValue) { +static LValue emitCapturedFieldLValue(CIRGenFunction &CGF, const FieldDecl *FD, + mlir::Value ThisValue) { QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); - return CGF.buildLValueForField(LV, FD); + return CGF.emitLValueForField(LV, FD); } -static LValue buildFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, - GlobalDecl GD) { +static LValue emitFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, + GlobalDecl GD) { const FunctionDecl *FD = cast(GD.getDecl()); - auto funcOp = buildFunctionDeclPointer(CGF.CGM, GD); + auto funcOp = emitFunctionDeclPointer(CGF.CGM, GD); auto loc = CGF.getLoc(E->getSourceRange()); CharUnits align = CGF.getContext().getDeclAlign(FD); @@ -952,7 +949,7 @@ static LValue buildFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, AlignmentSource::Decl); } -LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { +LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *E) { const NamedDecl *ND = E->getDecl(); QualType T = E->getType(); @@ -971,7 +968,7 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { if (E->refersToEnclosingVariableOrCapture()) { VD = VD->getCanonicalDecl(); if (auto *FD = LambdaCaptureFields.lookup(VD)) - return buildCapturedFieldLValue(*this, FD, CXXABIThisValue); + return emitCapturedFieldLValue(*this, FD, CXXABIThisValue); assert(!cir::MissingFeatures::CGCapturedStmtInfo() && "NYI"); // TODO[OpenMP]: Find the appropiate captured variable value and return // it. @@ -997,7 +994,7 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { if (const auto *VD = dyn_cast(ND)) { // Check if this is a global variable if (VD->hasLinkage() || VD->isStaticDataMember()) - return buildGlobalVarDeclLValue(*this, E, VD); + return emitGlobalVarDeclLValue(*this, E, VD); Address addr = Address::invalid(); @@ -1036,8 +1033,8 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { // Drill into reference types. LValue LV = VD->getType()->isReferenceType() - ? buildLoadOfReferenceLValue(addr, getLoc(E->getSourceRange()), - VD->getType(), AlignmentSource::Decl) + ? emitLoadOfReferenceLValue(addr, getLoc(E->getSourceRange()), + VD->getType(), AlignmentSource::Decl) : makeAddrLValue(addr, T, AlignmentSource::Decl); // Statics are defined as globals, so they are not include in the function's @@ -1069,7 +1066,7 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { } if (const auto *FD = dyn_cast(ND)) { - LValue LV = buildFunctionDeclLValue(*this, E, FD); + LValue LV = emitFunctionDeclLValue(*this, E, FD); // Emit debuginfo for the function declaration if the target wants to. if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) @@ -1084,9 +1081,9 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { if (const auto *BD = dyn_cast(ND)) { if (E->refersToEnclosingVariableOrCapture()) { auto *FD = LambdaCaptureFields.lookup(BD); - return buildCapturedFieldLValue(*this, FD, CXXABIThisValue); + return emitCapturedFieldLValue(*this, FD, CXXABIThisValue); } - return buildLValue(BD->getBinding()); + return emitLValue(BD->getBinding()); } // We can form DeclRefExprs naming GUID declarations when reconstituting @@ -1101,31 +1098,30 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { } LValue -CIRGenFunction::buildPointerToDataMemberBinaryExpr(const BinaryOperator *E) { +CIRGenFunction::emitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { assert((E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) && "unexpected binary operator opcode"); auto baseAddr = Address::invalid(); if (E->getOpcode() == BO_PtrMemD) - baseAddr = buildLValue(E->getLHS()).getAddress(); + baseAddr = emitLValue(E->getLHS()).getAddress(); else - baseAddr = buildPointerWithAlignment(E->getLHS()); + baseAddr = emitPointerWithAlignment(E->getLHS()); const auto *memberPtrTy = E->getRHS()->getType()->castAs(); - auto memberPtr = buildScalarExpr(E->getRHS()); + auto memberPtr = emitScalarExpr(E->getRHS()); LValueBaseInfo baseInfo; // TODO(cir): add TBAA assert(!cir::MissingFeatures::tbaa()); - auto memberAddr = buildCXXMemberDataPointerAddress(E, baseAddr, memberPtr, - memberPtrTy, &baseInfo); + auto memberAddr = emitCXXMemberDataPointerAddress(E, baseAddr, memberPtr, + memberPtrTy, &baseInfo); return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo); } -LValue -CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { +LValue CIRGenFunction::emitExtVectorElementExpr(const ExtVectorElementExpr *E) { // Emit the base vector as an l-value. LValue base; @@ -1136,7 +1132,7 @@ CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { LValueBaseInfo BaseInfo; // TODO(cir): Support TBAA assert(!cir::MissingFeatures::tbaa()); - Address Ptr = buildPointerWithAlignment(E->getBase(), &BaseInfo); + Address Ptr = emitPointerWithAlignment(E->getBase(), &BaseInfo); const auto *PT = E->getBase()->getType()->castAs(); base = makeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo); base.getQuals().removeObjCGCAttr(); @@ -1144,12 +1140,12 @@ CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), // emit the base as an lvalue. assert(E->getBase()->getType()->isVectorType()); - base = buildLValue(E->getBase()); + base = emitLValue(E->getBase()); } else { // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. assert(E->getBase()->getType()->isVectorType() && "Result must be a vector"); - mlir::Value Vec = buildScalarExpr(E->getBase()); + mlir::Value Vec = emitScalarExpr(E->getBase()); // Store the vector to memory (because LValue wants an address). QualType BaseTy = E->getBase()->getType(); @@ -1189,15 +1185,15 @@ CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { base.getBaseInfo(), base.getTBAAInfo()); } -LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { +LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *E) { // Comma expressions just emit their LHS then their RHS as an l-value. if (E->getOpcode() == BO_Comma) { - buildIgnoredExpr(E->getLHS()); - return buildLValue(E->getRHS()); + emitIgnoredExpr(E->getLHS()); + return emitLValue(E->getRHS()); } if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) - return buildPointerToDataMemberBinaryExpr(E); + return emitPointerToDataMemberBinaryExpr(E); assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); @@ -1210,15 +1206,15 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { clang::Qualifiers::ObjCLifetime::OCL_None && "not implemented"); - RValue RV = buildAnyExpr(E->getRHS()); - LValue LV = buildLValue(E->getLHS()); + RValue RV = emitAnyExpr(E->getRHS()); + LValue LV = emitLValue(E->getLHS()); SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; if (LV.isBitField()) { mlir::Value result; - buildStoreThroughBitfieldLValue(RV, LV, result); + emitStoreThroughBitfieldLValue(RV, LV, result); } else { - buildStoreThroughLValue(RV, LV); + emitStoreThroughLValue(RV, LV); } if (getLangOpts().OpenMP) CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, @@ -1227,7 +1223,7 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { } case cir::TEK_Complex: - return buildComplexAssignmentLValue(E); + return emitComplexAssignmentLValue(E); case cir::TEK_Aggregate: assert(0 && "not implemented"); } @@ -1236,11 +1232,11 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { /// Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. -Address CIRGenFunction::buildPointerWithAlignment( +Address CIRGenFunction::emitPointerWithAlignment( const Expr *expr, LValueBaseInfo *baseInfo, TBAAAccessInfo *tbaaInfo, KnownNonNull_t isKnownNonNull) { - Address addr = ::buildPointerWithAlignment(expr, baseInfo, tbaaInfo, - isKnownNonNull, *this); + Address addr = ::emitPointerWithAlignment(expr, baseInfo, tbaaInfo, + isKnownNonNull, *this); if (isKnownNonNull && !addr.isKnownNonNull()) addr.setKnownNonNull(); return addr; @@ -1258,15 +1254,15 @@ mlir::Value CIRGenFunction::evaluateExprAsBool(const Expr *E) { SourceLocation Loc = E->getExprLoc(); // TODO(cir): CGFPOptionsRAII for FP stuff. if (!E->getType()->isAnyComplexType()) - return buildScalarConversion(buildScalarExpr(E), E->getType(), BoolTy, Loc); + return emitScalarConversion(emitScalarExpr(E), E->getType(), BoolTy, Loc); llvm_unreachable("complex to scalar not implemented"); } -LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { +LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *E) { // __extension__ doesn't affect lvalue-ness. if (E->getOpcode() == UO_Extension) - return buildLValue(E->getSubExpr()); + return emitLValue(E->getSubExpr()); QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); switch (E->getOpcode()) { @@ -1278,7 +1274,7 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { LValueBaseInfo BaseInfo; // TODO: add TBAAInfo - Address Addr = buildPointerWithAlignment(E->getSubExpr(), &BaseInfo); + Address Addr = emitPointerWithAlignment(E->getSubExpr(), &BaseInfo); // Tag 'load' with deref attribute. if (auto loadOp = @@ -1293,7 +1289,7 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { } case UO_Real: case UO_Imag: { - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); assert(LV.isSimple() && "real/imag on non-ordinary l-value"); // __real is valid on scalars. This is a faster way of testing that. @@ -1309,8 +1305,8 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { auto Loc = getLoc(E->getExprLoc()); Address Component = (E->getOpcode() == UO_Real - ? buildAddrOfRealComponent(Loc, LV.getAddress(), LV.getType()) - : buildAddrOfImagComponent(Loc, LV.getAddress(), LV.getType())); + ? emitAddrOfRealComponent(Loc, LV.getAddress(), LV.getType()) + : emitAddrOfImagComponent(Loc, LV.getAddress(), LV.getType())); // TODO(cir): TBAA info. assert(!cir::MissingFeatures::tbaa()); LValue ElemLV = makeAddrLValue(Component, T, LV.getBaseInfo()); @@ -1321,12 +1317,12 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { case UO_PreDec: { bool isInc = E->isIncrementOp(); bool isPre = E->isPrefix(); - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); if (E->getType()->isAnyComplexType()) { - buildComplexPrePostIncDec(E, LV, isInc, true /*isPre*/); + emitComplexPrePostIncDec(E, LV, isInc, true /*isPre*/); } else { - buildScalarPrePostIncDec(E, LV, isInc, isPre); + emitScalarPrePostIncDec(E, LV, isInc, isPre); } return LV; @@ -1336,46 +1332,46 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { /// Emit code to compute the specified expression which /// can have any type. The result is returned as an RValue struct. -RValue CIRGenFunction::buildAnyExpr(const Expr *E, AggValueSlot aggSlot, - bool ignoreResult) { +RValue CIRGenFunction::emitAnyExpr(const Expr *E, AggValueSlot aggSlot, + bool ignoreResult) { switch (CIRGenFunction::getEvaluationKind(E->getType())) { case cir::TEK_Scalar: - return RValue::get(buildScalarExpr(E)); + return RValue::get(emitScalarExpr(E)); case cir::TEK_Complex: - return RValue::getComplex(buildComplexExpr(E)); + return RValue::getComplex(emitComplexExpr(E)); case cir::TEK_Aggregate: { if (!ignoreResult && aggSlot.isIgnored()) aggSlot = CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), getCounterAggTmpAsString()); - buildAggExpr(E, aggSlot); + emitAggExpr(E, aggSlot); return aggSlot.asRValue(); } } llvm_unreachable("bad evaluation kind"); } -RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, - ReturnValueSlot ReturnValue) { +RValue CIRGenFunction::emitCallExpr(const clang::CallExpr *E, + ReturnValueSlot ReturnValue) { assert(!E->getCallee()->getType()->isBlockPointerType() && "ObjC Blocks NYI"); if (const auto *CE = dyn_cast(E)) - return buildCXXMemberCallExpr(CE, ReturnValue); + return emitCXXMemberCallExpr(CE, ReturnValue); assert(!dyn_cast(E) && "CUDA NYI"); if (const auto *CE = dyn_cast(E)) if (const CXXMethodDecl *MD = dyn_cast_or_null(CE->getCalleeDecl())) - return buildCXXOperatorMemberCallExpr(CE, MD, ReturnValue); + return emitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); - CIRGenCallee callee = buildCallee(E->getCallee()); + CIRGenCallee callee = emitCallee(E->getCallee()); if (callee.isBuiltin()) - return buildBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), E, - ReturnValue); + return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), E, + ReturnValue); assert(!callee.isPsuedoDestructor() && "NYI"); - return buildCall(E->getCallee()->getType(), callee, E, ReturnValue); + return emitCall(E->getCallee()->getType(), callee, E, ReturnValue); } RValue CIRGenFunction::GetUndefRValue(QualType ty) { @@ -1400,18 +1396,18 @@ RValue CIRGenFunction::GetUndefRValue(QualType ty) { llvm_unreachable("bad evaluation kind"); } -LValue CIRGenFunction::buildStmtExprLValue(const StmtExpr *E) { +LValue CIRGenFunction::emitStmtExprLValue(const StmtExpr *E) { // Can only get l-value for message expression returning aggregate type - RValue RV = buildAnyExprToTemp(E); + RValue RV = emitAnyExprToTemp(E); return makeAddrLValue(RV.getAggregateAddress(), E->getType(), AlignmentSource::Decl); } -RValue CIRGenFunction::buildCall(clang::QualType CalleeType, - const CIRGenCallee &OrigCallee, - const clang::CallExpr *E, - ReturnValueSlot ReturnValue, - mlir::Value Chain) { +RValue CIRGenFunction::emitCall(clang::QualType CalleeType, + const CIRGenCallee &OrigCallee, + const clang::CallExpr *E, + ReturnValueSlot ReturnValue, + mlir::Value Chain) { // Get the actual function type. The callee type will always be a pointer to // function type or a block pointer type. assert(CalleeType->isFunctionPointerType() && @@ -1463,8 +1459,8 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, } } - buildCallArgs(Args, dyn_cast(FnType), E->arguments(), - E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); + emitCallArgs(Args, dyn_cast(FnType), E->arguments(), + E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); const CIRGenFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( Args, FnType, /*ChainCall=*/Chain.getAsOpaquePointer()); @@ -1515,8 +1511,8 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, assert(!MustTailCall && "Must tail NYI"); cir::CIRCallOpInterface callOP; - RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, &callOP, - E == MustTailCall, getLoc(E->getExprLoc()), E); + RValue Call = emitCall(FnInfo, Callee, ReturnValue, Args, &callOP, + E == MustTailCall, getLoc(E->getExprLoc()), E); assert(!getDebugInfo() && "Debug Info NYI"); @@ -1524,21 +1520,21 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, } /// Emit code to compute the specified expression, ignoring the result. -void CIRGenFunction::buildIgnoredExpr(const Expr *E) { +void CIRGenFunction::emitIgnoredExpr(const Expr *E) { if (E->isPRValue()) - return (void)buildAnyExpr(E, AggValueSlot::ignored(), true); + return (void)emitAnyExpr(E, AggValueSlot::ignored(), true); // Just emit it as an l-value and drop the result. - buildLValue(E); + emitLValue(E); } -Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, - LValueBaseInfo *BaseInfo) { +Address CIRGenFunction::emitArrayToPointerDecay(const Expr *E, + LValueBaseInfo *BaseInfo) { assert(E->getType()->isArrayType() && "Array to pointer decay must have array source type!"); // Expressions of array type can't be bitfields or vector elements. - LValue LV = buildLValue(E); + LValue LV = emitLValue(E); Address Addr = LV.getAddress(); // If the array type was an incomplete type, we need to make sure @@ -1650,11 +1646,11 @@ static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, } static mlir::Value -buildArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, - mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, - ArrayRef indices, bool inbounds, - bool signedIndices, bool shouldDecay, - const llvm::Twine &name = "arrayidx") { +emitArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, + mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, + ArrayRef indices, bool inbounds, + bool signedIndices, bool shouldDecay, + const llvm::Twine &name = "arrayidx") { assert(indices.size() == 1 && "cannot handle multiple indices yet"); auto idx = indices.back(); auto &CGM = CGF.getCIRGenModule(); @@ -1675,7 +1671,7 @@ static QualType getFixedSizeElementType(const ASTContext &ctx, return eltType; } -static Address buildArraySubscriptPtr( +static Address emitArraySubscriptPtr( CIRGenFunction &CGF, mlir::Location beginLoc, mlir::Location endLoc, Address addr, ArrayRef indices, QualType eltType, bool inbounds, bool signedIndices, mlir::Location loc, bool shouldDecay, @@ -1696,9 +1692,9 @@ static Address buildArraySubscriptPtr( auto LastIndex = getConstantIndexOrNull(indices.back()); if (!LastIndex || (!CGF.IsInPreservedAIRegion && !isPreserveAIArrayBase(CGF, Base))) { - eltPtr = buildArraySubscriptPtr(CGF, beginLoc, endLoc, addr.getPointer(), - addr.getElementType(), indices, inbounds, - signedIndices, shouldDecay, name); + eltPtr = emitArraySubscriptPtr(CGF, beginLoc, endLoc, addr.getPointer(), + addr.getElementType(), indices, inbounds, + signedIndices, shouldDecay, name); } else { // assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); // assert(indices.size() == 1 && "cannot handle multiple indices yet"); @@ -1713,18 +1709,18 @@ static Address buildArraySubscriptPtr( return Address(eltPtr, CGF.getTypes().convertTypeForMem(eltType), eltAlign); } -LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, - bool Accessed) { +LValue CIRGenFunction::emitArraySubscriptExpr(const ArraySubscriptExpr *E, + bool Accessed) { // The index must always be an integer, which is not an aggregate. Emit it // in lexical order (this complexity is, sadly, required by C++17). mlir::Value IdxPre = - (E->getLHS() == E->getIdx()) ? buildScalarExpr(E->getIdx()) : nullptr; + (E->getLHS() == E->getIdx()) ? emitScalarExpr(E->getIdx()) : nullptr; bool SignedIndices = false; auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> mlir::Value { mlir::Value Idx = IdxPre; if (E->getLHS() != E->getIdx()) { assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS"); - Idx = buildScalarExpr(E->getIdx()); + Idx = emitScalarExpr(E->getIdx()); } QualType IdxTy = E->getIdx()->getType(); @@ -1747,7 +1743,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // with this subscript. if (E->getBase()->getType()->isVectorType() && !isa(E->getBase())) { - LValue lhs = buildLValue(E->getBase()); + LValue lhs = emitLValue(E->getBase()); auto index = EmitIdxAfterBase(/*Promote=*/false); return LValue::MakeVectorElt(lhs.getAddress(), index, E->getBase()->getType(), lhs.getBaseInfo(), @@ -1769,7 +1765,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // The base must be a pointer, which is not an aggregate. Emit // it. It needs to be emitted first in case it's what captures // the VLA bounds. - Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); + Addr = emitPointerWithAlignment(E->getBase(), &EltBaseInfo); auto Idx = EmitIdxAfterBase(/*Promote*/ true); // The element count here is the total number of non-VLA elements. @@ -1779,7 +1775,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, Idx = builder.createMul(Idx, numElements); QualType ptrType = E->getBase()->getType(); - Addr = buildArraySubscriptPtr( + Addr = emitArraySubscriptPtr( *this, CGM.getLoc(E->getBeginLoc()), CGM.getLoc(E->getEndLoc()), Addr, {Idx}, E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, CGM.getLoc(E->getExprLoc()), /*shouldDecay=*/false, @@ -1798,14 +1794,14 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // For simple multidimensional array indexing, set the 'accessed' flag // for better bounds-checking of the base expression. if (const auto *ASE = dyn_cast(Array)) - ArrayLV = buildArraySubscriptExpr(ASE, /*Accessed=*/true); + ArrayLV = emitArraySubscriptExpr(ASE, /*Accessed=*/true); else - ArrayLV = buildLValue(Array); + ArrayLV = emitLValue(Array); auto Idx = EmitIdxAfterBase(/*Promote=*/true); // Propagate the alignment from the array itself to the result. QualType arrayType = Array->getType(); - Addr = buildArraySubscriptPtr( + Addr = emitArraySubscriptPtr( *this, CGM.getLoc(Array->getBeginLoc()), CGM.getLoc(Array->getEndLoc()), ArrayLV.getAddress(), {Idx}, E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, @@ -1818,10 +1814,10 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // The base must be a pointer; emit it with an estimate of its alignment. // TODO(cir): EltTBAAInfo assert(!cir::MissingFeatures::tbaa() && "TBAA is NYI"); - Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); + Addr = emitPointerWithAlignment(E->getBase(), &EltBaseInfo); auto Idx = EmitIdxAfterBase(/*Promote*/ true); QualType ptrType = E->getBase()->getType(); - Addr = buildArraySubscriptPtr( + Addr = emitArraySubscriptPtr( *this, CGM.getLoc(E->getBeginLoc()), CGM.getLoc(E->getEndLoc()), Addr, Idx, E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, CGM.getLoc(E->getExprLoc()), /*shouldDecay=*/false, @@ -1837,7 +1833,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, return LV; } -LValue CIRGenFunction::buildStringLiteralLValue(const StringLiteral *E) { +LValue CIRGenFunction::emitStringLiteralLValue(const StringLiteral *E) { auto sym = CGM.getAddrOfConstantStringFromLiteral(E).getSymbol(); auto cstGlobal = mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), sym); @@ -1863,7 +1859,7 @@ LValue CIRGenFunction::buildStringLiteralLValue(const StringLiteral *E) { /// we need the address of an aggregate in order to access one of its members. /// This can happen for all the reasons that casts are permitted with aggregate /// result, including noop aggregate casts, and cast from scalar to union. -LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { +LValue CIRGenFunction::emitCastLValue(const CastExpr *E) { switch (E->getCastKind()) { case CK_HLSLArrayRValue: case CK_HLSLVectorTruncation: @@ -1927,10 +1923,10 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { assert(0 && "NYI"); case CK_Dynamic: { - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); Address V = LV.getAddress(); const auto *DCE = cast(E); - return MakeNaturalAlignAddrLValue(buildDynamicCast(V, DCE), E->getType()); + return MakeNaturalAlignAddrLValue(emitDynamicCast(V, DCE), E->getType()); } case CK_ConstructorConversion: @@ -1938,12 +1934,12 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_LValueToRValue: - return buildLValue(E->getSubExpr()); + return emitLValue(E->getSubExpr()); case CK_NoOp: { // CK_NoOp can model a qualification conversion, which can remove an array // bound and change the IR type. - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); // Propagate the volatile qualifier to LValue, if exists in E. if (E->changesVolatileQualification()) llvm_unreachable("NYI"); @@ -1965,7 +1961,7 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { E->getSubExpr()->getType()->castAs(); auto *DerivedClassDecl = cast(DerivedClassTy->getDecl()); - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); Address This = LV.getAddress(); // Perform the derived-to-base conversion @@ -1988,7 +1984,7 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { assert(0 && "NYI"); } case CK_AddressSpaceConversion: { - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); QualType DestTy = getContext().getPointerType(E->getType()); auto SrcAS = builder.getAddrSpaceAttr(E->getSubExpr()->getType().getAddressSpace()); @@ -2023,12 +2019,12 @@ static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &CGF, return nullptr; } -LValue CIRGenFunction::buildCheckedLValue(const Expr *E, TypeCheckKind TCK) { +LValue CIRGenFunction::emitCheckedLValue(const Expr *E, TypeCheckKind TCK) { LValue LV; if (SanOpts.has(SanitizerKind::ArrayBounds) && isa(E)) assert(0 && "not implemented"); else - LV = buildLValue(E); + LV = emitLValue(E); if (!isa(E) && !LV.isBitField() && LV.isSimple()) { SanitizerSet SkippedChecks; if (const auto *ME = dyn_cast(E)) { @@ -2038,8 +2034,8 @@ LValue CIRGenFunction::buildCheckedLValue(const Expr *E, TypeCheckKind TCK) { if (IsBaseCXXThis || isa(ME->getBase())) SkippedChecks.set(SanitizerKind::Null, true); } - buildTypeCheck(TCK, E->getExprLoc(), LV.getPointer(), E->getType(), - LV.getAlignment(), SkippedChecks); + emitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(), E->getType(), + LV.getAlignment(), SkippedChecks); } return LV; } @@ -2068,10 +2064,10 @@ bool CIRGenFunction::isWrappedCXXThis(const Expr *Obj) { return true; } -LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { +LValue CIRGenFunction::emitMemberExpr(const MemberExpr *E) { if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) { - buildIgnoredExpr(E->getBase()); - return buildDeclRefLValue(DRE); + emitIgnoredExpr(E->getBase()); + return emitDeclRefLValue(DRE); } Expr *BaseExpr = E->getBase(); @@ -2079,7 +2075,7 @@ LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { LValue BaseLV; if (E->isArrow()) { LValueBaseInfo BaseInfo; - Address Addr = buildPointerWithAlignment(BaseExpr, &BaseInfo); + Address Addr = emitPointerWithAlignment(BaseExpr, &BaseInfo); QualType PtrTy = BaseExpr->getType()->getPointeeType(); SanitizerSet SkippedChecks; bool IsBaseCXXThis = isWrappedCXXThis(BaseExpr); @@ -2087,15 +2083,15 @@ LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { SkippedChecks.set(SanitizerKind::Alignment, true); if (IsBaseCXXThis || isa(BaseExpr)) SkippedChecks.set(SanitizerKind::Null, true); - buildTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, - /*Alignment=*/CharUnits::Zero(), SkippedChecks); + emitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, + /*Alignment=*/CharUnits::Zero(), SkippedChecks); BaseLV = makeAddrLValue(Addr, PtrTy, BaseInfo); } else - BaseLV = buildCheckedLValue(BaseExpr, TCK_MemberAccess); + BaseLV = emitCheckedLValue(BaseExpr, TCK_MemberAccess); NamedDecl *ND = E->getMemberDecl(); if (auto *Field = dyn_cast(ND)) { - LValue LV = buildLValueForField(BaseLV, Field); + LValue LV = emitLValueForField(BaseLV, Field); assert(!cir::MissingFeatures::setObjCGCLValueClass() && "NYI"); if (getLangOpts().OpenMP) { // If the member was explicitly marked as nontemporal, mark it as @@ -2112,8 +2108,8 @@ LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { llvm_unreachable("Unhandled member declaration!"); } -LValue CIRGenFunction::buildCallExprLValue(const CallExpr *E) { - RValue RV = buildCallExpr(E); +LValue CIRGenFunction::emitCallExprLValue(const CallExpr *E) { + RValue RV = emitCallExpr(E); if (!RV.isScalar()) return makeAddrLValue(RV.getAggregateAddress(), E->getType(), @@ -2127,8 +2123,8 @@ LValue CIRGenFunction::buildCallExprLValue(const CallExpr *E) { } /// Evaluate an expression into a given memory location. -void CIRGenFunction::buildAnyExprToMem(const Expr *E, Address Location, - Qualifiers Quals, bool IsInit) { +void CIRGenFunction::emitAnyExprToMem(const Expr *E, Address Location, + Qualifiers Quals, bool IsInit) { // FIXME: This function should take an LValue as an argument. switch (getEvaluationKind(E->getType())) { case cir::TEK_Complex: @@ -2136,18 +2132,18 @@ void CIRGenFunction::buildAnyExprToMem(const Expr *E, Address Location, return; case cir::TEK_Aggregate: { - buildAggExpr(E, AggValueSlot::forAddr(Location, Quals, - AggValueSlot::IsDestructed_t(IsInit), - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsAliased_t(!IsInit), - AggValueSlot::MayOverlap)); + emitAggExpr(E, AggValueSlot::forAddr(Location, Quals, + AggValueSlot::IsDestructed_t(IsInit), + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsAliased_t(!IsInit), + AggValueSlot::MayOverlap)); return; } case cir::TEK_Scalar: { - RValue RV = RValue::get(buildScalarExpr(E)); + RValue RV = RValue::get(emitScalarExpr(E)); LValue LV = makeAddrLValue(Location, E->getType()); - buildStoreThroughLValue(RV, LV); + emitStoreThroughLValue(RV, LV); return; } } @@ -2264,7 +2260,7 @@ static void pushTemporaryCleanup(CIRGenFunction &CGF, } } -LValue CIRGenFunction::buildMaterializeTemporaryExpr( +LValue CIRGenFunction::emitMaterializeTemporaryExpr( const MaterializeTemporaryExpr *M) { const Expr *E = M->getSubExpr(); @@ -2272,7 +2268,7 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( !cast(M->getExtendingDecl())->isARCPseudoStrong()) && "Reference should never be pseudo-strong!"); - // FIXME: ideally this would use buildAnyExprToMem, however, we cannot do so + // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so // as that will cause the lifetime adjustment to be lost for ARC auto ownership = M->getType().getObjCLifetime(); if (ownership != Qualifiers::OCL_None && @@ -2285,7 +2281,7 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); for (const auto &Ignored : CommaLHSs) - buildIgnoredExpr(Ignored); + emitIgnoredExpr(Ignored); if (const auto *opaque = dyn_cast(E)) assert(0 && "NYI"); @@ -2314,7 +2310,7 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( break; } - buildAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true); + emitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true); } pushTemporaryCleanup(*this, M, E, Object); @@ -2329,7 +2325,7 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( return makeAddrLValue(Object, M->getType(), AlignmentSource::Decl); } -LValue CIRGenFunction::buildOpaqueValueLValue(const OpaqueValueExpr *e) { +LValue CIRGenFunction::emitOpaqueValueLValue(const OpaqueValueExpr *e) { assert(OpaqueValueMappingData::shouldBindAsLValue(e)); return getOrCreateOpaqueLValueMapping(e); } @@ -2345,7 +2341,7 @@ CIRGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { return it->second; assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted"); - return buildLValue(e->getSourceExpr()); + return emitLValue(e->getSourceExpr()); } RValue @@ -2359,7 +2355,7 @@ CIRGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { return it->second; assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted"); - return buildAnyExpr(e->getSourceExpr()); + return emitAnyExpr(e->getSourceExpr()); } namespace { @@ -2384,7 +2380,7 @@ std::optional HandleConditionalOperatorLValueSimpleCase( if (auto *ThrowExpr = dyn_cast(Live->IgnoreParens())) { llvm_unreachable("NYI"); } - return CGF.buildLValue(Live); + return CGF.emitLValue(Live); } } return std::nullopt; @@ -2394,21 +2390,21 @@ std::optional HandleConditionalOperatorLValueSimpleCase( /// Emit the operand of a glvalue conditional operator. This is either a glvalue /// or a (possibly-parenthesized) throw-expression. If this is a throw, no /// LValue is returned and the current block has been terminated. -static std::optional buildLValueOrThrowExpression(CIRGenFunction &CGF, - const Expr *Operand) { +static std::optional emitLValueOrThrowExpression(CIRGenFunction &CGF, + const Expr *Operand) { if (auto *ThrowExpr = dyn_cast(Operand->IgnoreParens())) { llvm_unreachable("NYI"); } - return CGF.buildLValue(Operand); + return CGF.emitLValue(Operand); } // Create and generate the 3 blocks for a conditional operator. // Leaves the 'current block' in the continuation basic block. template CIRGenFunction::ConditionalInfo -CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, - const FuncTy &BranchGenFunc) { +CIRGenFunction::emitConditionalBlocks(const AbstractConditionalOperator *E, + const FuncTy &BranchGenFunc) { ConditionalInfo Info; auto &CGF = *this; ConditionalEvaluation eval(CGF); @@ -2417,7 +2413,7 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, auto *trueExpr = E->getTrueExpr(); auto *falseExpr = E->getFalseExpr(); - mlir::Value condV = CGF.buildOpOnBoolExpr(loc, E->getCond()); + mlir::Value condV = CGF.emitOpOnBoolExpr(loc, E->getCond()); SmallVector insertPoints{}; mlir::Type yieldTy{}; @@ -2496,13 +2492,13 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, return Info; } -LValue CIRGenFunction::buildConditionalOperatorLValue( +LValue CIRGenFunction::emitConditionalOperatorLValue( const AbstractConditionalOperator *expr) { if (!expr->isGLValue()) { // ?: here should be an aggregate. assert(hasAggregateEvaluationKind(expr->getType()) && "Unexpected conditional operator!"); - return buildAggExprToLValue(expr); + return emitAggExprToLValue(expr); } OpaqueValueMapping binding(*this, expr); @@ -2511,8 +2507,8 @@ LValue CIRGenFunction::buildConditionalOperatorLValue( return *Res; ConditionalInfo Info = - buildConditionalBlocks(expr, [](CIRGenFunction &CGF, const Expr *E) { - return buildLValueOrThrowExpression(CGF, E); + emitConditionalBlocks(expr, [](CIRGenFunction &CGF, const Expr *E) { + return emitLValueOrThrowExpression(CGF, E); }); if ((Info.LHS && !Info.LHS->isSimple()) || @@ -2537,7 +2533,7 @@ LValue CIRGenFunction::buildConditionalOperatorLValue( /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. -LValue CIRGenFunction::buildLValue(const Expr *E) { +LValue CIRGenFunction::emitLValue(const Expr *E) { // FIXME: ApplyDebugLocation DL(*this, E); switch (E->getStmtClass()) { default: { @@ -2546,27 +2542,26 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { assert(0 && "not implemented"); } case Expr::ConditionalOperatorClass: - return buildConditionalOperatorLValue(cast(E)); + return emitConditionalOperatorLValue(cast(E)); case Expr::ArraySubscriptExprClass: - return buildArraySubscriptExpr(cast(E)); + return emitArraySubscriptExpr(cast(E)); case Expr::ExtVectorElementExprClass: - return buildExtVectorElementExpr(cast(E)); + return emitExtVectorElementExpr(cast(E)); case Expr::BinaryOperatorClass: - return buildBinaryOperatorLValue(cast(E)); + return emitBinaryOperatorLValue(cast(E)); case Expr::CompoundAssignOperatorClass: { QualType Ty = E->getType(); if (const AtomicType *AT = Ty->getAs()) assert(0 && "not yet implemented"); if (!Ty->isAnyComplexType()) - return buildCompoundAssignmentLValue(cast(E)); - return buildComplexCompoundAssignmentLValue( - cast(E)); + return emitCompoundAssignmentLValue(cast(E)); + return emitComplexCompoundAssignmentLValue(cast(E)); } case Expr::CallExprClass: case Expr::CXXMemberCallExprClass: case Expr::CXXOperatorCallExprClass: case Expr::UserDefinedLiteralClass: - return buildCallExprLValue(cast(E)); + return emitCallExprLValue(cast(E)); case Expr::ExprWithCleanupsClass: { const auto *cleanups = cast(E); LValue LV; @@ -2578,7 +2573,7 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { CIRGenFunction::LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - LV = buildLValue(cleanups->getSubExpr()); + LV = emitLValue(cleanups->getSubExpr()); if (LV.isSimple()) { // Defend against branches out of gnu statement expressions // surrounded by cleanups. @@ -2595,19 +2590,19 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return LV; } case Expr::ParenExprClass: - return buildLValue(cast(E)->getSubExpr()); + return emitLValue(cast(E)->getSubExpr()); case Expr::DeclRefExprClass: - return buildDeclRefLValue(cast(E)); + return emitDeclRefLValue(cast(E)); case Expr::UnaryOperatorClass: - return buildUnaryOpLValue(cast(E)); + return emitUnaryOpLValue(cast(E)); case Expr::StringLiteralClass: - return buildStringLiteralLValue(cast(E)); + return emitStringLiteralLValue(cast(E)); case Expr::MemberExprClass: - return buildMemberExpr(cast(E)); + return emitMemberExpr(cast(E)); case Expr::CompoundLiteralExprClass: - return buildCompoundLiteralLValue(cast(E)); + return emitCompoundLiteralLValue(cast(E)); case Expr::PredefinedExprClass: - return buildPredefinedLValue(cast(E)); + return emitPredefinedLValue(cast(E)); case Expr::CXXFunctionalCastExprClass: case Expr::CXXReinterpretCastExprClass: case Expr::CXXConstCastExprClass: @@ -2615,22 +2610,22 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { case Expr::ObjCBridgedCastExprClass: emitError(getLoc(E->getExprLoc()), "l-value not implemented for '") << E->getStmtClassName() << "'"; - assert(0 && "Use buildCastLValue below, remove me when adding testcase"); + assert(0 && "Use emitCastLValue below, remove me when adding testcase"); case Expr::CStyleCastExprClass: case Expr::CXXStaticCastExprClass: case Expr::CXXDynamicCastExprClass: case Expr::ImplicitCastExprClass: - return buildCastLValue(cast(E)); + return emitCastLValue(cast(E)); case Expr::OpaqueValueExprClass: - return buildOpaqueValueLValue(cast(E)); + return emitOpaqueValueLValue(cast(E)); case Expr::MaterializeTemporaryExprClass: - return buildMaterializeTemporaryExpr(cast(E)); + return emitMaterializeTemporaryExpr(cast(E)); case Expr::ObjCPropertyRefExprClass: llvm_unreachable("cannot emit a property reference directly"); case Expr::StmtExprClass: - return buildStmtExprLValue(cast(E)); + return emitStmtExprLValue(cast(E)); } return LValue::makeAddr(Address::invalid(), E->getType()); @@ -2646,7 +2641,7 @@ RValue CIRGenFunction::convertTempToRValue(Address addr, clang::QualType type, case cir::TEK_Aggregate: return lvalue.asAggregateRValue(); case cir::TEK_Scalar: - return RValue::get(buildLoadOfScalar(lvalue, loc)); + return RValue::get(emitLoadOfScalar(lvalue, loc)); } llvm_unreachable("NYI"); } @@ -2663,9 +2658,9 @@ bool CIRGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { /// Emit an `if` on a boolean condition, filling `then` and `else` into /// appropriated regions. -mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, - const Stmt *thenS, - const Stmt *elseS) { +mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond, + const Stmt *thenS, + const Stmt *elseS) { // Attempt to be more accurate as possible with IfOp location, generate // one fused location that has either 2 or 4 total locations, depending // on else's availability. @@ -2680,18 +2675,18 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, elseLoc = getStmtLoc(*elseS); mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); - buildIfOnBoolExpr( + emitIfOnBoolExpr( cond, /*thenBuilder=*/ [&](mlir::OpBuilder &, mlir::Location) { LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()}; - resThen = buildStmt(thenS, /*useCurrentScope=*/true); + resThen = emitStmt(thenS, /*useCurrentScope=*/true); }, thenLoc, /*elseBuilder=*/ [&](mlir::OpBuilder &, mlir::Location) { assert(elseLoc && "Invalid location for elseS."); LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()}; - resElse = buildStmt(elseS, /*useCurrentScope=*/true); + resElse = emitStmt(elseS, /*useCurrentScope=*/true); }, elseLoc); @@ -2701,7 +2696,7 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, /// Emit an `if` on a boolean condition, filling `then` and `else` into /// appropriated regions. -cir::IfOp CIRGenFunction::buildIfOnBoolExpr( +cir::IfOp CIRGenFunction::emitIfOnBoolExpr( const clang::Expr *cond, llvm::function_ref thenBuilder, mlir::Location thenLoc, @@ -2714,7 +2709,7 @@ cir::IfOp CIRGenFunction::buildIfOnBoolExpr( auto loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs); // Emit the code with the fully general case. - mlir::Value condV = buildOpOnBoolExpr(loc, cond); + mlir::Value condV = emitOpOnBoolExpr(loc, cond); return builder.create(loc, condV, elseLoc.has_value(), /*thenBuilder=*/thenBuilder, /*elseBuilder=*/elseBuilder); @@ -2722,8 +2717,8 @@ cir::IfOp CIRGenFunction::buildIfOnBoolExpr( /// TODO(cir): PGO data /// TODO(cir): see EmitBranchOnBoolExpr for extra ideas). -mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, - const Expr *cond) { +mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc, + const Expr *cond) { // TODO(CIR): scoped ApplyDebugLocation DL(*this, Cond); // TODO(CIR): __builtin_unpredictable and profile counts? cond = cond->IgnoreParens(); @@ -2743,25 +2738,25 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, if (const ConditionalOperator *CondOp = dyn_cast(cond)) { auto *trueExpr = CondOp->getTrueExpr(); auto *falseExpr = CondOp->getFalseExpr(); - mlir::Value condV = buildOpOnBoolExpr(loc, CondOp->getCond()); + mlir::Value condV = emitOpOnBoolExpr(loc, CondOp->getCond()); auto ternaryOpRes = builder .create( loc, condV, /*thenBuilder=*/ [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) { - auto lhs = buildScalarExpr(trueExpr); + auto lhs = emitScalarExpr(trueExpr); b.create(loc, lhs); }, /*elseBuilder=*/ [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) { - auto rhs = buildScalarExpr(falseExpr); + auto rhs = emitScalarExpr(falseExpr); b.create(loc, rhs); }) .getResult(); - return buildScalarConversion(ternaryOpRes, CondOp->getType(), - getContext().BoolTy, CondOp->getExprLoc()); + return emitScalarConversion(ternaryOpRes, CondOp->getType(), + getContext().BoolTy, CondOp->getExprLoc()); } if (const CXXThrowExpr *Throw = dyn_cast(cond)) { @@ -2780,10 +2775,10 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, return evaluateExprAsBool(cond); } -mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, - mlir::Location loc, CharUnits alignment, - bool insertIntoFnEntryBlock, - mlir::Value arraySize) { +mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty, + mlir::Location loc, CharUnits alignment, + bool insertIntoFnEntryBlock, + mlir::Value arraySize) { mlir::Block *entryBlock = insertIntoFnEntryBlock ? getCurFunctionEntryBlock() : currLexScope->getEntryBlock(); @@ -2798,14 +2793,14 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, entryBlock = &scopeOp.getRegion().front(); } - return buildAlloca(name, ty, loc, alignment, - builder.getBestAllocaInsertPoint(entryBlock), arraySize); + return emitAlloca(name, ty, loc, alignment, + builder.getBestAllocaInsertPoint(entryBlock), arraySize); } -mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, - mlir::Location loc, CharUnits alignment, - mlir::OpBuilder::InsertPoint ip, - mlir::Value arraySize) { +mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty, + mlir::Location loc, CharUnits alignment, + mlir::OpBuilder::InsertPoint ip, + mlir::Value arraySize) { // CIR uses its own alloca AS rather than follow the target data layout like // original CodeGen. The data layout awareness should be done in the lowering // pass instead. @@ -2826,29 +2821,29 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, return addr; } -mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, - mlir::Location loc, CharUnits alignment, - bool insertIntoFnEntryBlock, - mlir::Value arraySize) { - return buildAlloca(name, getCIRType(ty), loc, alignment, - insertIntoFnEntryBlock, arraySize); +mlir::Value CIRGenFunction::emitAlloca(StringRef name, QualType ty, + mlir::Location loc, CharUnits alignment, + bool insertIntoFnEntryBlock, + mlir::Value arraySize) { + return emitAlloca(name, getCIRType(ty), loc, alignment, + insertIntoFnEntryBlock, arraySize); } -mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, - SourceLocation loc) { - return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), getLoc(loc), lvalue.getBaseInfo(), - lvalue.getTBAAInfo(), lvalue.isNontemporal()); +mlir::Value CIRGenFunction::emitLoadOfScalar(LValue lvalue, + SourceLocation loc) { + return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), getLoc(loc), lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), lvalue.isNontemporal()); } -mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, - mlir::Location loc) { - return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), loc, lvalue.getBaseInfo(), - lvalue.getTBAAInfo(), lvalue.isNontemporal()); +mlir::Value CIRGenFunction::emitLoadOfScalar(LValue lvalue, + mlir::Location loc) { + return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), loc, lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), lvalue.isNontemporal()); } -mlir::Value CIRGenFunction::buildFromMemory(mlir::Value Value, QualType Ty) { +mlir::Value CIRGenFunction::emitFromMemory(mlir::Value Value, QualType Ty) { if (!Ty->isBooleanType() && hasBooleanRepresentation(Ty)) { llvm_unreachable("NIY"); } @@ -2856,22 +2851,21 @@ mlir::Value CIRGenFunction::buildFromMemory(mlir::Value Value, QualType Ty) { return Value; } -mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, - QualType ty, SourceLocation loc, - LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, - bool isNontemporal) { - return buildLoadOfScalar(addr, isVolatile, ty, getLoc(loc), baseInfo, - tbaaInfo, isNontemporal); +mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile, + QualType ty, SourceLocation loc, + LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, + bool isNontemporal) { + return emitLoadOfScalar(addr, isVolatile, ty, getLoc(loc), baseInfo, tbaaInfo, + isNontemporal); } -mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, - QualType ty, mlir::Location loc, - LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, - bool isNontemporal) { +mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile, + QualType ty, mlir::Location loc, + LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, + bool isNontemporal) { // TODO(CIR): this has fallen out of sync with codegen - // Atomic operations have to be done on integral types LValue atomicLValue = LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); @@ -2895,9 +2889,9 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, // // Now load value. // mlir::Value V = builder.createLoad(loc, Cast); - // // Shuffle vector to get vec3. - // V = builder.createVecShuffle(loc, V, ArrayRef{0, 1, 2}); - // return buildFromMemory(V, ty); + // // Shuffle vector to get vec3. + // V = builder.createVecShuffle(loc, V, ArrayRef{0, 1, 2}); + // return emitFromMemory(V, ty); // } } @@ -2918,18 +2912,18 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, assert(!cir::MissingFeatures::tbaa() && "NYI"); assert(!cir::MissingFeatures::emitScalarRangeCheck() && "NYI"); - return buildFromMemory(Load, ty); + return emitFromMemory(Load, ty); } // Note: this function also emit constructor calls to support a MSVC extensions // allowing explicit constructor function call. -RValue CIRGenFunction::buildCXXMemberCallExpr(const CXXMemberCallExpr *CE, - ReturnValueSlot ReturnValue) { +RValue CIRGenFunction::emitCXXMemberCallExpr(const CXXMemberCallExpr *CE, + ReturnValueSlot ReturnValue) { const Expr *callee = CE->getCallee()->IgnoreParens(); if (isa(callee)) - return buildCXXMemberPointerCallExpr(CE, ReturnValue); + return emitCXXMemberPointerCallExpr(CE, ReturnValue); const auto *ME = cast(callee); const auto *MD = cast(ME->getMemberDecl()); @@ -2943,13 +2937,13 @@ RValue CIRGenFunction::buildCXXMemberCallExpr(const CXXMemberCallExpr *CE, bool IsArrow = ME->isArrow(); const Expr *Base = ME->getBase(); - return buildCXXMemberOrOperatorMemberCallExpr( + return emitCXXMemberOrOperatorMemberCallExpr( CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base); } -RValue CIRGenFunction::buildReferenceBindingToExpr(const Expr *E) { +RValue CIRGenFunction::emitReferenceBindingToExpr(const Expr *E) { // Emit the expression as an lvalue. - LValue LV = buildLValue(E); + LValue LV = emitLValue(E); assert(LV.isSimple()); auto Value = LV.getPointer(); @@ -2960,9 +2954,9 @@ RValue CIRGenFunction::buildReferenceBindingToExpr(const Expr *E) { return RValue::get(Value); } -Address CIRGenFunction::buildLoadOfReference(LValue refLVal, mlir::Location loc, - LValueBaseInfo *pointeeBaseInfo, - TBAAAccessInfo *pointeeTBAAInfo) { +Address CIRGenFunction::emitLoadOfReference(LValue refLVal, mlir::Location loc, + LValueBaseInfo *pointeeBaseInfo, + TBAAAccessInfo *pointeeTBAAInfo) { assert(!refLVal.isVolatile() && "NYI"); cir::LoadOp load = builder.create(loc, refLVal.getAddress().getElementType(), @@ -2978,15 +2972,15 @@ Address CIRGenFunction::buildLoadOfReference(LValue refLVal, mlir::Location loc, return Address(load, getTypes().convertTypeForMem(pointeeType), align); } -LValue CIRGenFunction::buildLoadOfReferenceLValue(LValue RefLVal, - mlir::Location Loc) { +LValue CIRGenFunction::emitLoadOfReferenceLValue(LValue RefLVal, + mlir::Location Loc) { LValueBaseInfo PointeeBaseInfo; - Address PointeeAddr = buildLoadOfReference(RefLVal, Loc, &PointeeBaseInfo); + Address PointeeAddr = emitLoadOfReference(RefLVal, Loc, &PointeeBaseInfo); return makeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), PointeeBaseInfo); } -void CIRGenFunction::buildUnreachable(SourceLocation Loc) { +void CIRGenFunction::emitUnreachable(SourceLocation Loc) { if (SanOpts.has(SanitizerKind::Unreachable)) llvm_unreachable("NYI"); builder.create(getLoc(Loc)); @@ -3060,8 +3054,8 @@ cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, const Twine &Name, mlir::Value ArraySize, bool insertIntoFnEntryBlock) { - return cast(buildAlloca(Name.str(), Ty, Loc, CharUnits(), - insertIntoFnEntryBlock, ArraySize) + return cast(emitAlloca(Name.str(), Ty, Loc, CharUnits(), + insertIntoFnEntryBlock, ArraySize) .getDefiningOp()); } @@ -3073,7 +3067,7 @@ cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, mlir::Value ArraySize) { assert(ip.isSet() && "Insertion point is not set"); return cast( - buildAlloca(Name.str(), Ty, Loc, CharUnits(), ip, ArraySize) + emitAlloca(Name.str(), Ty, Loc, CharUnits(), ip, ArraySize) .getDefiningOp()); } @@ -3216,10 +3210,10 @@ CIRGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { // This should probably fire even for if (isa(value)) { if (!getContext().DeclMustBeEmitted(cast(value))) - buildDeclRefExprDbgValue(refExpr, result.Val); + emitDeclRefExprDbgValue(refExpr, result.Val); } else { assert(isa(value)); - buildDeclRefExprDbgValue(refExpr, result.Val); + emitDeclRefExprDbgValue(refExpr, result.Val); } // If we emitted a reference constant, we need to dereference that. @@ -3234,17 +3228,17 @@ CIRGenFunction::tryEmitAsConstant(const MemberExpr *ME) { llvm_unreachable("NYI"); } -mlir::Value CIRGenFunction::buildScalarConstant( +mlir::Value CIRGenFunction::emitScalarConstant( const CIRGenFunction::ConstantEmission &Constant, Expr *E) { assert(Constant && "not a constant"); if (Constant.isReference()) - return buildLoadOfLValue(Constant.getReferenceLValue(*this, E), - E->getExprLoc()) + return emitLoadOfLValue(Constant.getReferenceLValue(*this, E), + E->getExprLoc()) .getScalarVal(); return builder.getConstant(getLoc(E->getSourceRange()), Constant.getValue()); } -LValue CIRGenFunction::buildPredefinedLValue(const PredefinedExpr *E) { +LValue CIRGenFunction::emitPredefinedLValue(const PredefinedExpr *E) { const auto *SL = E->getFunctionName(); assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); auto Fn = dyn_cast(CurFn); @@ -3259,5 +3253,5 @@ LValue CIRGenFunction::buildPredefinedLValue(const PredefinedExpr *E) { llvm_unreachable("NYI"); } - return buildStringLiteralLValue(SL); + return emitStringLiteralLValue(SL); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 2218838ac7d6..f13cb8600f9a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -135,23 +135,23 @@ class AggExprEmitter : public StmtVisitor { /// Given an expression with aggregate type that represents a value lvalue, /// this method emits the address of the lvalue, then loads the result into /// DestPtr. - void buildAggLoadOfLValue(const Expr *E); + void emitAggLoadOfLValue(const Expr *E); enum ExprValueKind { EVK_RValue, EVK_NonRValue }; /// Perform the final copy to DestPtr, if desired. - void buildFinalDestCopy(QualType type, RValue src); + void emitFinalDestCopy(QualType type, RValue src); /// Perform the final copy to DestPtr, if desired. SrcIsRValue is true if /// source comes from an RValue. - void buildFinalDestCopy(QualType type, const LValue &src, - ExprValueKind SrcValueKind = EVK_NonRValue); - void buildCopy(QualType type, const AggValueSlot &dest, - const AggValueSlot &src); + void emitFinalDestCopy(QualType type, const LValue &src, + ExprValueKind SrcValueKind = EVK_NonRValue); + void emitCopy(QualType type, const AggValueSlot &dest, + const AggValueSlot &src); - void buildArrayInit(Address DestPtr, cir::ArrayType AType, QualType ArrayQTy, - Expr *ExprToVisit, ArrayRef Args, - Expr *ArrayFiller); + void emitArrayInit(Address DestPtr, cir::ArrayType AType, QualType ArrayQTy, + Expr *ExprToVisit, ArrayRef Args, + Expr *ArrayFiller); AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) @@ -182,7 +182,7 @@ class AggExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } void VisitCoawaitExpr(CoawaitExpr *E) { - CGF.buildCoawaitExpr(*E, Dest, IsResultUnused); + CGF.emitCoawaitExpr(*E, Dest, IsResultUnused); } void VisitCoyieldExpr(CoyieldExpr *E) { llvm_unreachable("NYI"); } void VisitUnaryCoawait(UnaryOperator *E) { llvm_unreachable("NYI"); } @@ -193,13 +193,13 @@ class AggExprEmitter : public StmtVisitor { void VisitConstantExpr(ConstantExpr *E) { llvm_unreachable("NYI"); } // l-values - void VisitDeclRefExpr(DeclRefExpr *E) { buildAggLoadOfLValue(E); } - void VisitMemberExpr(MemberExpr *E) { buildAggLoadOfLValue(E); } - void VisitUnaryDeref(UnaryOperator *E) { buildAggLoadOfLValue(E); } + void VisitDeclRefExpr(DeclRefExpr *E) { emitAggLoadOfLValue(E); } + void VisitMemberExpr(MemberExpr *E) { emitAggLoadOfLValue(E); } + void VisitUnaryDeref(UnaryOperator *E) { emitAggLoadOfLValue(E); } void VisitStringLiteral(StringLiteral *E) { llvm_unreachable("NYI"); } void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { - buildAggLoadOfLValue(E); + emitAggLoadOfLValue(E); } void VisitPredefinedExpr(const PredefinedExpr *E) { llvm_unreachable("NYI"); } @@ -209,7 +209,7 @@ class AggExprEmitter : public StmtVisitor { void VisitStmtExpr(const StmtExpr *E) { assert(!cir::MissingFeatures::stmtExprEvaluation() && "NYI"); - CGF.buildCompoundStmt(*E->getSubStmt(), /*getLast=*/true, Dest); + CGF.emitCompoundStmt(*E->getSubStmt(), /*getLast=*/true, Dest); } void VisitBinaryOperator(const BinaryOperator *E) { llvm_unreachable("NYI"); } @@ -229,7 +229,7 @@ class AggExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } - LValue lhs = CGF.buildLValue(E->getLHS()); + LValue lhs = CGF.emitLValue(E->getLHS()); // If we have an atomic type, evaluate into the destination and then // do an atomic copy. @@ -248,10 +248,10 @@ class AggExprEmitter : public StmtVisitor { if (!lhsSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType())) assert(!cir::MissingFeatures::atomicTypes()); - CGF.buildAggExpr(E->getRHS(), lhsSlot); + CGF.emitAggExpr(E->getRHS(), lhsSlot); // Copy into the destination if the assignment isn't ignored. - buildFinalDestCopy(E->getType(), lhs); + emitFinalDestCopy(E->getType(), lhs); if (!Dest.isIgnored() && !Dest.isExternallyDestructed() && E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) @@ -302,7 +302,7 @@ class AggExprEmitter : public StmtVisitor { CGF, CGF.getLoc(E->getSourceRange())}; // Emit an array containing the elements. The array is externally // destructed if the std::initializer_list object is. - LValue Array = CGF.buildLValue(E->getSubExpr()); + LValue Array = CGF.emitLValue(E->getSubExpr()); assert(Array.isSimple() && "initializer_list array not a simple lvalue"); Address ArrayPtr = Array.getAddress(); @@ -321,9 +321,9 @@ class AggExprEmitter : public StmtVisitor { AggValueSlot Dest = EnsureSlot(loc, E->getType()); LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), E->getType()); LValue Start = - CGF.buildLValueForFieldInitialization(DestLV, *Field, Field->getName()); + CGF.emitLValueForFieldInitialization(DestLV, *Field, Field->getName()); mlir::Value ArrayStart = ArrayPtr.emitRawPointer(); - CGF.buildStoreThroughLValue(RValue::get(ArrayStart), Start); + CGF.emitStoreThroughLValue(RValue::get(ArrayStart), Start); ++Field; assert(Field != Record->field_end() && "Expected std::initializer_list to have two fields"); @@ -335,10 +335,10 @@ class AggExprEmitter : public StmtVisitor { mlir::Value Size = sizeOp.getRes(); Builder.getUIntNTy(ArrayType->getSizeBitWidth()); LValue EndOrLength = - CGF.buildLValueForFieldInitialization(DestLV, *Field, Field->getName()); + CGF.emitLValueForFieldInitialization(DestLV, *Field, Field->getName()); if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { // Length. - CGF.buildStoreThroughLValue(RValue::get(Size), EndOrLength); + CGF.emitStoreThroughLValue(RValue::get(Size), EndOrLength); } else { // End pointer. assert(Field->getType()->isPointerType() && @@ -349,7 +349,7 @@ class AggExprEmitter : public StmtVisitor { auto ArrayEnd = Builder.getArrayElement(loc, loc, ArrayPtr.getPointer(), ArrayPtr.getElementType(), Size, false); - CGF.buildStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); + CGF.emitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); } assert(++Field == Record->field_end() && "Expected std::initializer_list to only have two fields"); @@ -367,9 +367,9 @@ class AggExprEmitter : public StmtVisitor { void VisitVAArgExpr(VAArgExpr *E) { llvm_unreachable("NYI"); } - void buildInitializationToLValue(Expr *E, LValue LV); + void emitInitializationToLValue(Expr *E, LValue LV); - void buildNullInitializationToLValue(mlir::Location loc, LValue Address); + void emitNullInitializationToLValue(mlir::Location loc, LValue Address); void VisitCXXThrowExpr(const CXXThrowExpr *E) { llvm_unreachable("NYI"); } void VisitAtomicExpr(AtomicExpr *E) { llvm_unreachable("NYI"); } }; @@ -381,27 +381,27 @@ class AggExprEmitter : public StmtVisitor { /// Given an expression with aggregate type that represents a value lvalue, this /// method emits the address of the lvalue, then loads the result into DestPtr. -void AggExprEmitter::buildAggLoadOfLValue(const Expr *E) { - LValue LV = CGF.buildLValue(E); +void AggExprEmitter::emitAggLoadOfLValue(const Expr *E) { + LValue LV = CGF.emitLValue(E); // If the type of the l-value is atomic, then do an atomic load. if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV) || cir::MissingFeatures::atomicTypes()) llvm_unreachable("atomic load is NYI"); - buildFinalDestCopy(E->getType(), LV); + emitFinalDestCopy(E->getType(), LV); } /// Perform the final copy to DestPtr, if desired. -void AggExprEmitter::buildFinalDestCopy(QualType type, RValue src) { +void AggExprEmitter::emitFinalDestCopy(QualType type, RValue src) { assert(src.isAggregate() && "value must be aggregate value!"); LValue srcLV = CGF.makeAddrLValue(src.getAggregateAddress(), type); - buildFinalDestCopy(type, srcLV, EVK_RValue); + emitFinalDestCopy(type, srcLV, EVK_RValue); } /// Perform the final copy to DestPtr, if desired. -void AggExprEmitter::buildFinalDestCopy(QualType type, const LValue &src, - ExprValueKind SrcValueKind) { +void AggExprEmitter::emitFinalDestCopy(QualType type, const LValue &src, + ExprValueKind SrcValueKind) { // If Dest is ignored, then we're evaluating an aggregate expression // in a context that doesn't care about the result. Note that loads // from volatile l-values force the existence of a non-ignored @@ -425,15 +425,15 @@ void AggExprEmitter::buildFinalDestCopy(QualType type, const LValue &src, AggValueSlot srcAgg = AggValueSlot::forLValue( src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased, AggValueSlot::MayOverlap); - buildCopy(type, Dest, srcAgg); + emitCopy(type, Dest, srcAgg); } /// Perform a copy from the source into the destination. /// /// \param type - the type of the aggregate being copied; qualifiers are /// ignored -void AggExprEmitter::buildCopy(QualType type, const AggValueSlot &dest, - const AggValueSlot &src) { +void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest, + const AggValueSlot &src) { if (dest.requiresGCollection()) llvm_unreachable("garbage collection is NYI"); @@ -442,8 +442,8 @@ void AggExprEmitter::buildCopy(QualType type, const AggValueSlot &dest, // the two sides. LValue DestLV = CGF.makeAddrLValue(dest.getAddress(), type); LValue SrcLV = CGF.makeAddrLValue(src.getAddress(), type); - CGF.buildAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), - dest.isVolatile() || src.isVolatile()); + CGF.emitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), + dest.isVolatile() || src.isVolatile()); } // FIXME(cir): This function could be shared with traditional LLVM codegen @@ -470,9 +470,9 @@ static bool isTrivialFiller(Expr *E) { return false; } -void AggExprEmitter::buildArrayInit(Address DestPtr, cir::ArrayType AType, - QualType ArrayQTy, Expr *ExprToVisit, - ArrayRef Args, Expr *ArrayFiller) { +void AggExprEmitter::emitArrayInit(Address DestPtr, cir::ArrayType AType, + QualType ArrayQTy, Expr *ExprToVisit, + ArrayRef Args, Expr *ArrayFiller) { uint64_t NumInitElements = Args.size(); uint64_t NumArrayElements = AType.getSize(); @@ -539,7 +539,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, cir::ArrayType AType, LValue elementLV = CGF.makeAddrLValue( Address(element, cirElementType, elementAlign), elementType); - buildInitializationToLValue(Args[i], elementLV); + emitInitializationToLValue(Args[i], elementLV); } // Check whether there's a non-trivial array-fill expression. @@ -572,7 +572,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, cir::ArrayType AType, auto tmpAddr = CGF.CreateTempAlloca( cirElementPtrType, CGF.getPointerAlign(), loc, "arrayinit.temp"); LValue tmpLV = CGF.makeAddrLValue(tmpAddr, elementPtrType); - CGF.buildStoreThroughLValue(RValue::get(element), tmpLV); + CGF.emitStoreThroughLValue(RValue::get(element), tmpLV); // Compute the end of array auto numArrayElementsConst = builder.getConstInt( @@ -602,9 +602,9 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, cir::ArrayType AType, Address(currentElement, cirElementType, elementAlign), elementType); if (ArrayFiller) - buildInitializationToLValue(ArrayFiller, elementLV); + emitInitializationToLValue(ArrayFiller, elementLV); else - buildNullInitializationToLValue(loc, elementLV); + emitNullInitializationToLValue(loc, elementLV); // Tell the EH cleanup that we finished with the last element. assert(!endOfInit.isValid() && "destructed types NIY"); @@ -614,7 +614,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, cir::ArrayType AType, loc, mlir::cast(CGF.PtrDiffTy), 1); auto nextElement = builder.create( loc, cirElementPtrType, currentElement, one); - CGF.buildStoreThroughLValue(RValue::get(nextElement), tmpLV); + CGF.emitStoreThroughLValue(RValue::get(nextElement), tmpLV); builder.createYield(loc); }); @@ -777,8 +777,8 @@ static bool isSimpleZero(const Expr *E, CIRGenFunction &CGF) { return false; } -void AggExprEmitter::buildNullInitializationToLValue(mlir::Location loc, - LValue lv) { +void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc, + LValue lv) { QualType type = lv.getType(); // If the destination slot is already zeroed out before the aggregate is @@ -788,25 +788,25 @@ void AggExprEmitter::buildNullInitializationToLValue(mlir::Location loc, if (CGF.hasScalarEvaluationKind(type)) { // For non-aggregates, we can store the appropriate null constant. - auto null = CGF.CGM.buildNullConstant(type, loc); + auto null = CGF.CGM.emitNullConstant(type, loc); // Note that the following is not equivalent to // EmitStoreThroughBitfieldLValue for ARC types. if (lv.isBitField()) { mlir::Value result; - CGF.buildStoreThroughBitfieldLValue(RValue::get(null), lv, result); + CGF.emitStoreThroughBitfieldLValue(RValue::get(null), lv, result); } else { assert(lv.isSimple()); - CGF.buildStoreOfScalar(null, lv, /* isInitialization */ true); + CGF.emitStoreOfScalar(null, lv, /* isInitialization */ true); } } else { // There's a potential optimization opportunity in combining // memsets; that would be easy for arrays, but relatively // difficult for structures with the current code. - CGF.buildNullInitialization(loc, lv.getAddress(), lv.getType()); + CGF.emitNullInitialization(loc, lv.getAddress(), lv.getType()); } } -void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { +void AggExprEmitter::emitInitializationToLValue(Expr *E, LValue LV) { QualType type = LV.getType(); // FIXME: Ignore result? // FIXME: Are initializers affected by volatile? @@ -821,13 +821,13 @@ void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { if (isa(E) || isa(E)) { auto loc = E->getSourceRange().isValid() ? CGF.getLoc(E->getSourceRange()) : *CGF.currSrcLoc; - return buildNullInitializationToLValue(loc, LV); + return emitNullInitializationToLValue(loc, LV); } else if (isa(E)) { // Do nothing. return; } else if (type->isReferenceType()) { - RValue RV = CGF.buildReferenceBindingToExpr(E); - return CGF.buildStoreThroughLValue(RV, LV); + RValue RV = CGF.emitReferenceBindingToExpr(E); + return CGF.emitStoreThroughLValue(RV, LV); } switch (CGF.getEvaluationKind(type)) { @@ -835,7 +835,7 @@ void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { llvm_unreachable("NYI"); return; case cir::TEK_Aggregate: - CGF.buildAggExpr( + CGF.emitAggExpr( E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, @@ -843,9 +843,9 @@ void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { return; case cir::TEK_Scalar: if (LV.isSimple()) { - CGF.buildScalarInit(E, CGF.getLoc(E->getSourceRange()), LV); + CGF.emitScalarInit(E, CGF.getLoc(E->getSourceRange()), LV); } else { - CGF.buildStoreThroughLValue(RValue::get(CGF.buildScalarExpr(E)), LV); + CGF.emitStoreThroughLValue(RValue::get(CGF.emitScalarExpr(E)), LV); } return; } @@ -859,14 +859,14 @@ void AggExprEmitter::VisitMaterializeTemporaryExpr( void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); - CGF.buildCXXConstructExpr(E, Slot); + CGF.emitCXXConstructExpr(E, Slot); } void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { if (Dest.isPotentiallyAliased() && E->getType().isPODType(CGF.getContext())) { // For a POD type, just emit a load of the lvalue + a copy, because our // compound literal might alias the destination. - buildAggLoadOfLValue(E); + emitAggLoadOfLValue(E); return; } @@ -879,7 +879,7 @@ void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { if (Destruct) Slot.setExternallyDestructed(); - CGF.buildAggExpr(E->getInitializer(), Slot); + CGF.emitAggExpr(E->getInitializer(), Slot); if (Destruct) if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) @@ -934,12 +934,12 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { // Emit initialization LValue LV = - CGF.buildLValueForFieldInitialization(SlotLV, *CurField, fieldName); + CGF.emitLValueForFieldInitialization(SlotLV, *CurField, fieldName); if (CurField->hasCapturedVLAType()) { llvm_unreachable("NYI"); } - buildInitializationToLValue(captureInit, LV); + emitInitializationToLValue(captureInit, LV); // Push a destructor if necessary. if (QualType::DestructionKind DtorKind = @@ -954,16 +954,16 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { void AggExprEmitter::VisitCastExpr(CastExpr *E) { if (const auto *ECE = dyn_cast(E)) - CGF.CGM.buildExplicitCastExprType(ECE, &CGF); + CGF.CGM.emitExplicitCastExprType(ECE, &CGF); switch (E->getCastKind()) { case CK_LValueToRValueBitCast: { if (Dest.isIgnored()) { - CGF.buildAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), - /*ignoreResult=*/true); + CGF.emitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), + /*ignoreResult=*/true); break; } - LValue SourceLV = CGF.buildLValue(E->getSubExpr()); + LValue SourceLV = CGF.emitLValue(E->getSubExpr()); Address SourceAddress = SourceLV.getAddress(); Address DestAddress = Dest.getAddress(); @@ -984,16 +984,16 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { case CK_ToUnion: { // Evaluate even if the destination is ignored. if (Dest.isIgnored()) { - CGF.buildAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), - /*ignoreResult=*/true); + CGF.emitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), + /*ignoreResult=*/true); break; } // GCC union extension QualType Ty = E->getSubExpr()->getType(); Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty)); - buildInitializationToLValue(E->getSubExpr(), - CGF.makeAddrLValue(CastPtr, Ty)); + emitInitializationToLValue(E->getSubExpr(), + CGF.makeAddrLValue(CastPtr, Ty)); break; } @@ -1099,7 +1099,7 @@ void AggExprEmitter::VisitCallExpr(const CallExpr *E) { } withReturnValueSlot( - E, [&](ReturnValueSlot Slot) { return CGF.buildCallExpr(E, Slot); }); + E, [&](ReturnValueSlot Slot) { return CGF.emitCallExpr(E, Slot); }); } void AggExprEmitter::withReturnValueSlot( @@ -1136,7 +1136,7 @@ void AggExprEmitter::withReturnValueSlot( return; assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer()); - buildFinalDestCopy(E->getType(), Src); + emitFinalDestCopy(E->getType(), Src); if (!RequiresDestruction) { // If there's no dtor to run, the copy was the last use of our temporary. @@ -1166,8 +1166,8 @@ void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { if (E->getType()->isAnyComplexType()) llvm_unreachable("NYI"); - auto LHS = CGF.buildAnyExpr(E->getLHS()).getScalarVal(); - auto RHS = CGF.buildAnyExpr(E->getRHS()).getScalarVal(); + auto LHS = CGF.emitAnyExpr(E->getLHS()).getScalarVal(); + auto RHS = CGF.emitAnyExpr(E->getRHS()).getScalarVal(); mlir::Value ResultScalar; if (ArgTy->isNullPtrType()) { @@ -1196,9 +1196,9 @@ void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { // Emit the address of the first (and only) field in the comparison category // type, and initialize it from the constant integer value produced above. const FieldDecl *ResultField = *CmpInfo.Record->field_begin(); - LValue FieldLV = CGF.buildLValueForFieldInitialization( - DestLV, ResultField, ResultField->getName()); - CGF.buildStoreThroughLValue(RValue::get(ResultScalar), FieldLV); + LValue FieldLV = CGF.emitLValueForFieldInitialization(DestLV, ResultField, + ResultField->getName()); + CGF.emitStoreThroughLValue(RValue::get(ResultScalar), FieldLV); // All done! The result is in the Dest slot. } @@ -1249,8 +1249,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // Handle initialization of an array. if (ExprToVisit->getType()->isConstantArrayType()) { auto AType = cast(Dest.getAddress().getElementType()); - buildArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), - ExprToVisit, InitExprs, ArrayFiller); + emitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit, + InitExprs, ArrayFiller); return; } else if (ExprToVisit->getType()->isVariableArrayType()) { llvm_unreachable("variable arrays NYI"); @@ -1311,14 +1311,14 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( FieldDecl *Field = InitializedFieldInUnion; LValue FieldLoc = - CGF.buildLValueForFieldInitialization(DestLV, Field, Field->getName()); + CGF.emitLValueForFieldInitialization(DestLV, Field, Field->getName()); if (NumInitElements) { // Store the initializer into the field - buildInitializationToLValue(InitExprs[0], FieldLoc); + emitInitializationToLValue(InitExprs[0], FieldLoc); } else { // Default-initialize to null. - buildNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), - FieldLoc); + emitNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), + FieldLoc); } return; @@ -1342,7 +1342,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( CGF.getTypes().isZeroInitializable(ExprToVisit->getType())) break; LValue LV = - CGF.buildLValueForFieldInitialization(DestLV, field, field->getName()); + CGF.emitLValueForFieldInitialization(DestLV, field, field->getName()); // We never generate write-barries for initialized fields. assert(!cir::MissingFeatures::setNonGC()); @@ -1350,11 +1350,11 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // Store the initializer into the field. CIRGenFunction::SourceLocRAIIObject loc{ CGF, CGF.getLoc(record->getSourceRange())}; - buildInitializationToLValue(InitExprs[curInitIndex++], LV); + emitInitializationToLValue(InitExprs[curInitIndex++], LV); } else { // We're out of initializers; default-initialize to null - buildNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), - LV); + emitNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), + LV); } // Push a destructor if necessary. @@ -1389,7 +1389,7 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { // Push that destructor we promised. if (!wasExternallyDestructed) - CGF.buildCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); + CGF.emitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); } void AggExprEmitter::VisitAbstractConditionalOperator( @@ -1409,7 +1409,7 @@ void AggExprEmitter::VisitAbstractConditionalOperator( E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; isExternallyDestructed |= destructNonTrivialCStruct; - CGF.buildIfOnBoolExpr( + CGF.emitIfOnBoolExpr( E->getCond(), /*thenBuilder=*/ [&](mlir::OpBuilder &, mlir::Location) { eval.begin(CGF); @@ -1447,24 +1447,23 @@ void AggExprEmitter::VisitAbstractConditionalOperator( } void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { - CGF.buildIgnoredExpr(E->getLHS()); + CGF.emitIgnoredExpr(E->getLHS()); Visit(E->getRHS()); } void AggExprEmitter::VisitCXXInheritedCtorInitExpr( const CXXInheritedCtorInitExpr *E) { AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); - CGF.buildInheritedCXXConstructorCall(E->getConstructor(), - E->constructsVBase(), Slot.getAddress(), - E->inheritedFromVBase(), E); + CGF.emitInheritedCXXConstructorCall(E->getConstructor(), E->constructsVBase(), + Slot.getAddress(), + E->inheritedFromVBase(), E); } void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { QualType T = E->getType(); mlir::Location loc = CGF.getLoc(E->getSourceRange()); AggValueSlot Slot = EnsureSlot(loc, T); - buildNullInitializationToLValue(loc, - CGF.makeAddrLValue(Slot.getAddress(), T)); + emitNullInitializationToLValue(loc, CGF.makeAddrLValue(Slot.getAddress(), T)); } //===----------------------------------------------------------------------===// @@ -1599,7 +1598,7 @@ AggValueSlot::Overlap_t CIRGenFunction::getOverlapForBaseInit( return AggValueSlot::MayOverlap; } -void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { +void CIRGenFunction::emitAggExpr(const Expr *E, AggValueSlot Slot) { assert(E && CIRGenFunction::hasAggregateEvaluationKind(E->getType()) && "Invalid aggregate expression to emit"); assert((Slot.getAddress().isValid() || Slot.isIgnored()) && @@ -1611,9 +1610,9 @@ void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast(E)); } -void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, - AggValueSlot::Overlap_t MayOverlap, - bool isVolatile) { +void CIRGenFunction::emitAggregateCopy(LValue Dest, LValue Src, QualType Ty, + AggValueSlot::Overlap_t MayOverlap, + bool isVolatile) { // TODO(cir): this function needs improvements, commented code for now since // this will be touched again soon. assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); @@ -1737,13 +1736,13 @@ CIRGenFunction::getOverlapForFieldInit(const FieldDecl *FD) { return AggValueSlot::MayOverlap; } -LValue CIRGenFunction::buildAggExprToLValue(const Expr *E) { +LValue CIRGenFunction::emitAggExprToLValue(const Expr *E) { assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!"); Address Temp = CreateMemTemp(E->getType(), getLoc(E->getSourceRange())); LValue LV = makeAddrLValue(Temp, E->getType()); - buildAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - AggValueSlot::DoesNotOverlap)); + emitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap)); return LV; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 136480f9e277..be3ec6071def 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -30,10 +30,10 @@ struct MemberCallInfo { }; } // namespace -static RValue buildNewDeleteCall(CIRGenFunction &CGF, - const FunctionDecl *CalleeDecl, - const FunctionProtoType *CalleeType, - const CallArgList &Args); +static RValue emitNewDeleteCall(CIRGenFunction &CGF, + const FunctionDecl *CalleeDecl, + const FunctionProtoType *CalleeType, + const CallArgList &Args); static MemberCallInfo commonBuildCXXMemberOrOperatorCall(CIRGenFunction &CGF, const CXXMethodDecl *MD, @@ -68,8 +68,8 @@ commonBuildCXXMemberOrOperatorCall(CIRGenFunction &CGF, const CXXMethodDecl *MD, } else if (CE) { // Special case: skip first argument of CXXOperatorCall (it is "this"). unsigned ArgsToSkip = isa(CE) ? 1 : 0; - CGF.buildCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip), - CE->getDirectCallee()); + CGF.emitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip), + CE->getDirectCallee()); } else { assert( FPT->getNumParams() == 0 && @@ -79,7 +79,7 @@ commonBuildCXXMemberOrOperatorCall(CIRGenFunction &CGF, const CXXMethodDecl *MD, return {required, PrefixSize}; } -RValue CIRGenFunction::buildCXXMemberOrOperatorCall( +RValue CIRGenFunction::emitCXXMemberOrOperatorCall( const CXXMethodDecl *MD, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, mlir::Value This, mlir::Value ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE, CallArgList *RtlArgs) { @@ -92,8 +92,8 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorCall( Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize); assert((CE || currSrcLoc) && "expected source location"); mlir::Location loc = CE ? getLoc(CE->getExprLoc()) : *currSrcLoc; - return buildCall(FnInfo, Callee, ReturnValue, Args, nullptr, - CE && CE == MustTailCall, loc, CE); + return emitCall(FnInfo, Callee, ReturnValue, Args, nullptr, + CE && CE == MustTailCall, loc, CE); } // TODO(cir): this can be shared with LLVM codegen @@ -106,8 +106,8 @@ static CXXRecordDecl *getCXXRecord(const Expr *E) { } RValue -CIRGenFunction::buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, - ReturnValueSlot ReturnValue) { +CIRGenFunction::emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, + ReturnValueSlot ReturnValue) { const BinaryOperator *BO = cast(E->getCallee()->IgnoreParens()); const Expr *BaseExpr = BO->getLHS(); @@ -119,15 +119,15 @@ CIRGenFunction::buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, // Emit the 'this' pointer. Address This = Address::invalid(); if (BO->getOpcode() == BO_PtrMemI) - This = buildPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull); + This = emitPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull); else - This = buildLValue(BaseExpr).getAddress(); + This = emitLValue(BaseExpr).getAddress(); - buildTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(), - QualType(MPT->getClass(), 0)); + emitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(), + QualType(MPT->getClass(), 0)); // Get the member function pointer. - mlir::Value MemFnPtr = buildScalarExpr(MemFnExpr); + mlir::Value MemFnPtr = emitScalarExpr(MemFnExpr); // Resolve the member function pointer to the actual callee and adjust the // "this" pointer for call. @@ -138,19 +138,19 @@ CIRGenFunction::buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, // Prepare the call arguments. CallArgList ArgsList; ArgsList.add(RValue::get(AdjustedThis), getContext().VoidPtrTy); - buildCallArgs(ArgsList, FPT, E->arguments()); + emitCallArgs(ArgsList, FPT, E->arguments()); RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1); // Build the call. CIRGenCallee Callee(FPT, CalleePtr.getDefiningOp()); - return buildCall(CGM.getTypes().arrangeCXXMethodCall(ArgsList, FPT, required, - /*PrefixSize=*/0), - Callee, ReturnValue, ArgsList, nullptr, E == MustTailCall, - Loc); + return emitCall(CGM.getTypes().arrangeCXXMethodCall(ArgsList, FPT, required, + /*PrefixSize=*/0), + Callee, ReturnValue, ArgsList, nullptr, E == MustTailCall, + Loc); } -RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( +RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr( const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, const Expr *Base) { @@ -183,7 +183,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( // one or the one of the full expression, we would have to build // a derived-to-base cast to compute the correct this pointer, but // we don't have support for that yet, so do a virtual call. - assert(!cir::MissingFeatures::buildDerivedToBaseCastForDevirt()); + assert(!cir::MissingFeatures::emitDerivedToBaseCastForDevirt()); DevirtualizedMethod = nullptr; } } @@ -206,12 +206,12 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( // See further note on TrivialAssignment, we don't handle this during // codegen, differently than LLVM, which early optimizes like this: // if (TrivialAssignment) { - // TrivialAssignmentRHS = buildLValue(CE->getArg(1)); + // TrivialAssignmentRHS = emitLValue(CE->getArg(1)); // } else { RtlArgs = &RtlArgStorage; - buildCallArgs(*RtlArgs, MD->getType()->castAs(), - drop_begin(CE->arguments(), 1), CE->getDirectCallee(), - /*ParamsToSkip*/ 0, EvaluationOrder::ForceRightToLeft); + emitCallArgs(*RtlArgs, MD->getType()->castAs(), + drop_begin(CE->arguments(), 1), CE->getDirectCallee(), + /*ParamsToSkip*/ 0, EvaluationOrder::ForceRightToLeft); } } @@ -219,10 +219,10 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( if (IsArrow) { LValueBaseInfo BaseInfo; assert(!cir::MissingFeatures::tbaa()); - Address ThisValue = buildPointerWithAlignment(Base, &BaseInfo); + Address ThisValue = emitPointerWithAlignment(Base, &BaseInfo); This = makeAddrLValue(ThisValue, Base->getType(), BaseInfo); } else { - This = buildLValue(Base); + This = emitLValue(Base); } if (const CXXConstructorDecl *Ctor = dyn_cast(MD)) { @@ -244,8 +244,8 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( // We don't early optimize like LLVM does: // LValue RHS = isa(CE) ? TrivialAssignmentRHS // : - // buildLValue(*CE->arg_begin()); - // buildAggregateAssign(This, RHS, CE->getType()); + // emitLValue(*CE->arg_begin()); + // emitAggregateAssign(This, RHS, CE->getType()); // return RValue::get(This.getPointer()); } else { assert(MD->getParent()->mayInsertExtraPadding() && @@ -284,7 +284,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( SkippedChecks.set(SanitizerKind::Null, true); } - if (cir::MissingFeatures::buildTypeCheck()) + if (cir::MissingFeatures::emitTypeCheck()) llvm_unreachable("NYI"); // C++ [class.virtual]p12: @@ -318,9 +318,9 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( IsArrow ? Base->getType()->getPointeeType() : Base->getType(); // CIRGen does not pass CallOrInvoke here (different from OG LLVM codegen) // because in practice it always null even in OG. - buildCXXDestructorCall(globalDecl, Callee, This.getPointer(), thisTy, - /*ImplicitParam=*/nullptr, - /*ImplicitParamTy=*/QualType(), CE); + emitCXXDestructorCall(globalDecl, Callee, This.getPointer(), thisTy, + /*ImplicitParam=*/nullptr, + /*ImplicitParamTy=*/QualType(), CE); } return RValue::get(nullptr); } @@ -355,25 +355,25 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( This.setAddress(NewThisAddr); } - return buildCXXMemberOrOperatorCall( + return emitCXXMemberOrOperatorCall( CalleeDecl, Callee, ReturnValue, This.getPointer(), /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); } RValue -CIRGenFunction::buildCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, - const CXXMethodDecl *MD, - ReturnValueSlot ReturnValue) { +CIRGenFunction::emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, + const CXXMethodDecl *MD, + ReturnValueSlot ReturnValue) { assert(MD->isInstance() && "Trying to emit a member call expr on a static method!"); - return buildCXXMemberOrOperatorMemberCallExpr( + return emitCXXMemberOrOperatorMemberCallExpr( E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr, /*IsArrow=*/false, E->getArg(0)); } -static void buildNullBaseClassInitialization(CIRGenFunction &CGF, - Address DestPtr, - const CXXRecordDecl *Base) { +static void emitNullBaseClassInitialization(CIRGenFunction &CGF, + Address DestPtr, + const CXXRecordDecl *Base) { if (Base->isEmpty()) return; @@ -422,7 +422,7 @@ static void buildNullBaseClassInitialization(CIRGenFunction &CGF, // TODO(cir): `nullConstantForBase` might be better off as a value instead // of an mlir::TypedAttr? Once this moves out of skeleton, make sure to double // check on what's better. - mlir::Attribute nullConstantForBase = CGF.CGM.buildNullConstantForBase(Base); + mlir::Attribute nullConstantForBase = CGF.CGM.emitNullConstantForBase(Base); if (!CGF.getBuilder().isNullValue(nullConstantForBase)) { llvm_unreachable("NYI"); // Otherwise, just memset the whole thing to zero. This is legal @@ -433,8 +433,8 @@ static void buildNullBaseClassInitialization(CIRGenFunction &CGF, } } -void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, - AggValueSlot Dest) { +void CIRGenFunction::emitCXXConstructExpr(const CXXConstructExpr *E, + AggValueSlot Dest) { assert(!Dest.isIgnored() && "Must have a destination!"); const auto *CD = E->getConstructor(); @@ -446,13 +446,13 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, switch (E->getConstructionKind()) { case CXXConstructionKind::Delegating: case CXXConstructionKind::Complete: - buildNullInitialization(getLoc(E->getSourceRange()), Dest.getAddress(), - E->getType()); + emitNullInitialization(getLoc(E->getSourceRange()), Dest.getAddress(), + E->getType()); break; case CXXConstructionKind::VirtualBase: case CXXConstructionKind::NonVirtualBase: - buildNullBaseClassInitialization(*this, Dest.getAddress(), - CD->getParent()); + emitNullBaseClassInitialization(*this, Dest.getAddress(), + CD->getParent()); break; } } @@ -475,13 +475,13 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, assert(SrcObj->isTemporaryObject(getContext(), CD->getParent())); assert( getContext().hasSameUnqualifiedType(E->getType(), SrcObj->getType())); - buildAggExpr(SrcObj, Dest); + emitAggExpr(SrcObj, Dest); return; } if (const ArrayType *arrayType = getContext().getAsArrayType(E->getType())) { - buildCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E, - Dest.isSanitizerChecked()); + emitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E, + Dest.isSanitizerChecked()); } else { clang::CXXCtorType Type = Ctor_Complete; bool ForVirtualBase = false; @@ -504,7 +504,7 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, break; } - buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); + emitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); } } @@ -549,11 +549,10 @@ static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) { return Params; } -static mlir::Value buildCXXNewAllocSize(CIRGenFunction &CGF, - const CXXNewExpr *e, - unsigned minElements, - mlir::Value &numElements, - mlir::Value &sizeWithoutCookie) { +static mlir::Value emitCXXNewAllocSize(CIRGenFunction &CGF, const CXXNewExpr *e, + unsigned minElements, + mlir::Value &numElements, + mlir::Value &sizeWithoutCookie) { QualType type = e->getAllocatedType(); if (!e->isArray()) { @@ -655,7 +654,7 @@ class CallDeleteDuringNew final : public EHScopeStack::Cleanup { } // Call 'operator delete'. - buildNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); + emitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); } }; } // namespace @@ -724,11 +723,11 @@ static void EnterNewDeleteCleanup(CIRGenFunction &CGF, const CXXNewExpr *E, static void StoreAnyExprIntoOneUnit(CIRGenFunction &CGF, const Expr *Init, QualType AllocType, Address NewPtr, AggValueSlot::Overlap_t MayOverlap) { - // FIXME: Refactor with buildExprAsInit. + // FIXME: Refactor with emitExprAsInit. switch (CGF.getEvaluationKind(AllocType)) { case cir::TEK_Scalar: - CGF.buildScalarInit(Init, CGF.getLoc(Init->getSourceRange()), - CGF.makeAddrLValue(NewPtr, AllocType), false); + CGF.emitScalarInit(Init, CGF.getLoc(Init->getSourceRange()), + CGF.makeAddrLValue(NewPtr, AllocType), false); return; case cir::TEK_Complex: llvm_unreachable("NYI"); @@ -739,17 +738,17 @@ static void StoreAnyExprIntoOneUnit(CIRGenFunction &CGF, const Expr *Init, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, MayOverlap, AggValueSlot::IsNotZeroed, AggValueSlot::IsSanitizerChecked); - CGF.buildAggExpr(Init, Slot); + CGF.emitAggExpr(Init, Slot); return; } } llvm_unreachable("bad evaluation kind"); } -static void buildNewInitializer(CIRGenFunction &CGF, const CXXNewExpr *E, - QualType ElementType, mlir::Type ElementTy, - Address NewPtr, mlir::Value NumElements, - mlir::Value AllocSizeWithoutCookie) { +static void emitNewInitializer(CIRGenFunction &CGF, const CXXNewExpr *E, + QualType ElementType, mlir::Type ElementTy, + Address NewPtr, mlir::Value NumElements, + mlir::Value AllocSizeWithoutCookie) { assert(!cir::MissingFeatures::generateDebugInfo()); if (E->isArray()) { llvm_unreachable("NYI"); @@ -785,7 +784,7 @@ struct CallObjectDelete final : EHScopeStack::Cleanup { : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} void Emit(CIRGenFunction &CGF, Flags flags) override { - CGF.buildDeleteCall(OperatorDelete, Ptr, ElementType); + CGF.emitDeleteCall(OperatorDelete, Ptr, ElementType); } }; } // namespace @@ -800,8 +799,8 @@ static bool EmitObjectDelete(CIRGenFunction &CGF, const CXXDeleteExpr *DE, // dynamic type, the static type shall be a base class of the dynamic type // of the object to be deleted and the static type shall have a virtual // destructor or the behavior is undefined. - CGF.buildTypeCheck(CIRGenFunction::TCK_MemberCall, DE->getExprLoc(), - Ptr.getPointer(), ElementType); + CGF.emitTypeCheck(CIRGenFunction::TCK_MemberCall, DE->getExprLoc(), + Ptr.getPointer(), ElementType); const FunctionDecl *OperatorDelete = DE->getOperatorDelete(); assert(!OperatorDelete->isDestroyingOperatorDelete()); @@ -878,9 +877,9 @@ static bool EmitObjectDelete(CIRGenFunction &CGF, const CXXDeleteExpr *DE, return false; } -void CIRGenFunction::buildCXXDeleteExpr(const CXXDeleteExpr *E) { +void CIRGenFunction::emitCXXDeleteExpr(const CXXDeleteExpr *E) { const Expr *Arg = E->getArgument(); - Address Ptr = buildPointerWithAlignment(Arg); + Address Ptr = emitPointerWithAlignment(Arg); // Null check the pointer. // @@ -918,7 +917,7 @@ void CIRGenFunction::buildCXXDeleteExpr(const CXXDeleteExpr *E) { } } -mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { +mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *E) { // The element type being allocated. QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); @@ -940,7 +939,7 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { mlir::Value numElements = nullptr; mlir::Value allocSizeWithoutCookie = nullptr; - mlir::Value allocSize = buildCXXNewAllocSize( + mlir::Value allocSize = emitCXXNewAllocSize( *this, E, minElements, numElements, allocSizeWithoutCookie); CharUnits allocAlign = getContext().getTypeAlignInChars(allocType); @@ -954,7 +953,7 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { const Expr *arg = *E->placement_arguments().begin(); LValueBaseInfo BaseInfo; - allocation = buildPointerWithAlignment(arg, &BaseInfo); + allocation = emitPointerWithAlignment(arg, &BaseInfo); // The pointer expression will, in many cases, be an opaque void*. // In these cases, discard the computed alignment and use the @@ -989,13 +988,13 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { } // FIXME: Why do we not pass a CalleeDecl here? - buildCallArgs(allocatorArgs, allocatorType, E->placement_arguments(), - /*AC*/ - AbstractCallee(), - /*ParamsToSkip*/ - ParamsToSkip); + emitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(), + /*AC*/ + AbstractCallee(), + /*ParamsToSkip*/ + ParamsToSkip); RValue RV = - buildNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); + emitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); // Set !heapallocsite metadata on the call to operator new. assert(!cir::MissingFeatures::generateDebugInfo()); @@ -1116,13 +1115,13 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { // we'll null check the wrong pointer here. SanitizerSet SkippedChecks; SkippedChecks.set(SanitizerKind::Null, nullCheck); - buildTypeCheck(CIRGenFunction::TCK_ConstructorCall, - E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(), - result.getPointer(), allocType, result.getAlignment(), - SkippedChecks, numElements); + emitTypeCheck(CIRGenFunction::TCK_ConstructorCall, + E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(), + result.getPointer(), allocType, result.getAlignment(), + SkippedChecks, numElements); - buildNewInitializer(*this, E, allocType, elementTy, result, numElements, - allocSizeWithoutCookie); + emitNewInitializer(*this, E, allocType, elementTy, result, numElements, + allocSizeWithoutCookie); auto resultPtr = result.getPointer(); if (E->isArray()) { llvm_unreachable("NYI"); @@ -1151,12 +1150,12 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { return resultPtr; } -RValue CIRGenFunction::buildCXXDestructorCall(GlobalDecl Dtor, - const CIRGenCallee &Callee, - mlir::Value This, QualType ThisTy, - mlir::Value ImplicitParam, - QualType ImplicitParamTy, - const CallExpr *CE) { +RValue CIRGenFunction::emitCXXDestructorCall(GlobalDecl Dtor, + const CIRGenCallee &Callee, + mlir::Value This, QualType ThisTy, + mlir::Value ImplicitParam, + QualType ImplicitParamTy, + const CallExpr *CE) { const CXXMethodDecl *DtorDecl = cast(Dtor.getDecl()); assert(!ThisTy.isNull()); @@ -1173,25 +1172,25 @@ RValue CIRGenFunction::buildCXXDestructorCall(GlobalDecl Dtor, commonBuildCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam, ImplicitParamTy, CE, Args, nullptr); assert((CE || Dtor.getDecl()) && "expected source location provider"); - return buildCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee, - ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall, - CE ? getLoc(CE->getExprLoc()) - : getLoc(Dtor.getDecl()->getSourceRange())); + return emitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee, + ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall, + CE ? getLoc(CE->getExprLoc()) + : getLoc(Dtor.getDecl()->getSourceRange())); } /// Emit a call to an operator new or operator delete function, as implicitly /// created by new-expressions and delete-expressions. -static RValue buildNewDeleteCall(CIRGenFunction &CGF, - const FunctionDecl *CalleeDecl, - const FunctionProtoType *CalleeType, - const CallArgList &Args) { +static RValue emitNewDeleteCall(CIRGenFunction &CGF, + const FunctionDecl *CalleeDecl, + const FunctionProtoType *CalleeType, + const CallArgList &Args) { cir::CIRCallOpInterface CallOrTryCall; auto CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl); CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl)); - RValue RV = CGF.buildCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( - Args, CalleeType, /*ChainCall=*/false), - Callee, ReturnValueSlot(), Args, &CallOrTryCall); + RValue RV = CGF.emitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( + Args, CalleeType, /*ChainCall=*/false), + Callee, ReturnValueSlot(), Args, &CallOrTryCall); /// C++1y [expr.new]p10: /// [In a new-expression,] an implementation is allowed to omit a call @@ -1202,11 +1201,11 @@ static RValue buildNewDeleteCall(CIRGenFunction &CGF, return RV; } -RValue CIRGenFunction::buildBuiltinNewDeleteCall(const FunctionProtoType *type, - const CallExpr *theCall, - bool isDelete) { +RValue CIRGenFunction::emitBuiltinNewDeleteCall(const FunctionProtoType *type, + const CallExpr *theCall, + bool isDelete) { CallArgList args; - buildCallArgs(args, type, theCall->arguments()); + emitCallArgs(args, type, theCall->arguments()); // Find the allocation or deallocation function that we're calling. ASTContext &ctx = getContext(); DeclarationName name = @@ -1215,14 +1214,14 @@ RValue CIRGenFunction::buildBuiltinNewDeleteCall(const FunctionProtoType *type, for (auto *decl : ctx.getTranslationUnitDecl()->lookup(name)) if (auto *fd = dyn_cast(decl)) if (ctx.hasSameType(fd->getType(), QualType(type, 0))) - return buildNewDeleteCall(*this, fd, type, args); + return emitNewDeleteCall(*this, fd, type, args); llvm_unreachable("predeclared global operator new/delete is missing"); } -void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, - mlir::Value Ptr, QualType DeleteTy, - mlir::Value NumElements, - CharUnits CookieSize) { +void CIRGenFunction::emitDeleteCall(const FunctionDecl *DeleteFD, + mlir::Value Ptr, QualType DeleteTy, + mlir::Value NumElements, + CharUnits CookieSize) { assert((!NumElements && CookieSize.isZero()) || DeleteFD->getOverloadedOperator() == OO_Array_Delete); @@ -1281,7 +1280,7 @@ void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, "unknown parameter to usual delete function"); // Emit the call to delete. - buildNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs); + emitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs); // If call argument lowering didn't use the destroying_delete_t alloca, // remove it again. @@ -1290,8 +1289,8 @@ void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, } } -static mlir::Value buildDynamicCastToNull(CIRGenFunction &CGF, - mlir::Location Loc, QualType DestTy) { +static mlir::Value emitDynamicCastToNull(CIRGenFunction &CGF, + mlir::Location Loc, QualType DestTy) { mlir::Type DestCIRTy = CGF.ConvertType(DestTy); assert(mlir::isa(DestCIRTy) && "result of dynamic_cast should be a ptr"); @@ -1302,7 +1301,7 @@ static mlir::Value buildDynamicCastToNull(CIRGenFunction &CGF, auto *CurrentRegion = CGF.getBuilder().getBlock()->getParent(); /// C++ [expr.dynamic.cast]p9: /// A failed cast to reference type throws std::bad_cast - CGF.CGM.getCXXABI().buildBadCastCall(CGF, Loc); + CGF.CGM.getCXXABI().emitBadCastCall(CGF, Loc); // The call to bad_cast will terminate the current block. Create a new block // to hold any follow up code. @@ -1312,11 +1311,11 @@ static mlir::Value buildDynamicCastToNull(CIRGenFunction &CGF, return NullPtrValue; } -mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, - const CXXDynamicCastExpr *DCE) { +mlir::Value CIRGenFunction::emitDynamicCast(Address ThisAddr, + const CXXDynamicCastExpr *DCE) { auto loc = getLoc(DCE->getSourceRange()); - CGM.buildExplicitCastExprType(DCE, this); + CGM.emitExplicitCastExprType(DCE, this); QualType destTy = DCE->getTypeAsWritten(); QualType srcTy = DCE->getSubExpr()->getType(); @@ -1340,13 +1339,13 @@ mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, } assert(srcRecordTy->isRecordType() && "source type must be a record type!"); - buildTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(), - srcRecordTy); + emitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(), + srcRecordTy); if (DCE->isAlwaysNull()) - return buildDynamicCastToNull(*this, loc, destTy); + return emitDynamicCastToNull(*this, loc, destTy); auto destCirTy = mlir::cast(ConvertType(destTy)); - return CGM.getCXXABI().buildDynamicCast(*this, loc, srcRecordTy, destRecordTy, - destCirTy, isRefCast, ThisAddr); + return CGM.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy, + destCirTy, isRefCast, ThisAddr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 61b1979f0ebf..df4aab399cfc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -31,23 +31,23 @@ class ComplexExprEmitter : public StmtVisitor { /// Given an expression with complex type that represents a value l-value, /// this method emits the address of the l-value, then loads and returns the /// result. - mlir::Value buildLoadOfLValue(const Expr *E) { - return buildLoadOfLValue(CGF.buildLValue(E), E->getExprLoc()); + mlir::Value emitLoadOfLValue(const Expr *E) { + return emitLoadOfLValue(CGF.emitLValue(E), E->getExprLoc()); } - mlir::Value buildLoadOfLValue(LValue LV, SourceLocation Loc); + mlir::Value emitLoadOfLValue(LValue LV, SourceLocation Loc); /// EmitStoreOfComplex - Store the specified real/imag parts into the /// specified value pointer. - void buildStoreOfComplex(mlir::Location Loc, mlir::Value Val, LValue LV, - bool isInit); + void emitStoreOfComplex(mlir::Location Loc, mlir::Value Val, LValue LV, + bool isInit); /// Emit a cast from complex value Val to DestType. - mlir::Value buildComplexToComplexCast(mlir::Value Val, QualType SrcType, - QualType DestType, SourceLocation Loc); - /// Emit a cast from scalar value Val to DestType. - mlir::Value buildScalarToComplexCast(mlir::Value Val, QualType SrcType, + mlir::Value emitComplexToComplexCast(mlir::Value Val, QualType SrcType, QualType DestType, SourceLocation Loc); + /// Emit a cast from scalar value Val to DestType. + mlir::Value emitScalarToComplexCast(mlir::Value Val, QualType SrcType, + QualType DestType, SourceLocation Loc); //===--------------------------------------------------------------------===// // Visitor Methods @@ -89,8 +89,8 @@ class ComplexExprEmitter : public StmtVisitor { Expr *E) { assert(Constant && "not a constant"); if (Constant.isReference()) - return buildLoadOfLValue(Constant.getReferenceLValue(CGF, E), - E->getExprLoc()); + return emitLoadOfLValue(Constant.getReferenceLValue(CGF, E), + E->getExprLoc()); auto valueAttr = Constant.getValue(); return Builder.getConstant(CGF.getLoc(E->getSourceRange()), valueAttr); @@ -100,7 +100,7 @@ class ComplexExprEmitter : public StmtVisitor { mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { if (CIRGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) return emitConstant(Constant, E); - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { llvm_unreachable("NYI"); @@ -120,13 +120,13 @@ class ComplexExprEmitter : public StmtVisitor { // FIXME: CompoundLiteralExpr - mlir::Value buildCast(CastKind CK, Expr *Op, QualType DestTy); + mlir::Value emitCast(CastKind CK, Expr *Op, QualType DestTy); mlir::Value VisitImplicitCastExpr(ImplicitCastExpr *E) { // Unlike for scalars, we don't have to worry about function->ptr demotion // here. if (E->changesVolatileQualification()) - return buildLoadOfLValue(E); - return buildCast(E->getCastKind(), E->getSubExpr(), E->getType()); + return emitLoadOfLValue(E); + return emitCast(E->getCastKind(), E->getSubExpr(), E->getType()); } mlir::Value VisitCastExpr(CastExpr *E); mlir::Value VisitCallExpr(const CallExpr *E); @@ -189,22 +189,22 @@ class ComplexExprEmitter : public StmtVisitor { FPOptions FPFeatures{}; }; - BinOpInfo buildBinOps(const BinaryOperator *E, - QualType PromotionTy = QualType()); - mlir::Value buildPromoted(const Expr *E, QualType PromotionTy); - mlir::Value buildPromotedComplexOperand(const Expr *E, QualType PromotionTy); + BinOpInfo emitBinOps(const BinaryOperator *E, + QualType PromotionTy = QualType()); + mlir::Value emitPromoted(const Expr *E, QualType PromotionTy); + mlir::Value emitPromotedComplexOperand(const Expr *E, QualType PromotionTy); - LValue buildCompoundAssignLValue( + LValue emitCompoundAssignLValue( const CompoundAssignOperator *E, mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &), RValue &Val); - mlir::Value buildCompoundAssign( + mlir::Value emitCompoundAssign( const CompoundAssignOperator *E, mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &)); - mlir::Value buildBinAdd(const BinOpInfo &Op); - mlir::Value buildBinSub(const BinOpInfo &Op); - mlir::Value buildBinMul(const BinOpInfo &Op); - mlir::Value buildBinDiv(const BinOpInfo &Op); + mlir::Value emitBinAdd(const BinOpInfo &Op); + mlir::Value emitBinSub(const BinOpInfo &Op); + mlir::Value emitBinMul(const BinOpInfo &Op); + mlir::Value emitBinDiv(const BinOpInfo &Op); QualType HigherPrecisionTypeForComplexArithmetic(QualType ElementType, bool IsDivOpCode) { @@ -254,9 +254,9 @@ class ComplexExprEmitter : public StmtVisitor { QualType promotionTy = getPromotionType( \ E->getType(), \ (E->getOpcode() == BinaryOperatorKind::BO_Div) ? true : false); \ - mlir::Value result = buildBin##OP(buildBinOps(E, promotionTy)); \ + mlir::Value result = emitBin##OP(emitBinOps(E, promotionTy)); \ if (!promotionTy.isNull()) \ - result = CGF.buildUnPromotedValue(result, E->getType()); \ + result = CGF.emitUnPromotedValue(result, E->getType()); \ return result; \ } @@ -272,16 +272,16 @@ class ComplexExprEmitter : public StmtVisitor { // Compound assignments. mlir::Value VisitBinAddAssign(const CompoundAssignOperator *E) { - return buildCompoundAssign(E, &ComplexExprEmitter::buildBinAdd); + return emitCompoundAssign(E, &ComplexExprEmitter::emitBinAdd); } mlir::Value VisitBinSubAssign(const CompoundAssignOperator *E) { - return buildCompoundAssign(E, &ComplexExprEmitter::buildBinSub); + return emitCompoundAssign(E, &ComplexExprEmitter::emitBinSub); } mlir::Value VisitBinMulAssign(const CompoundAssignOperator *E) { - return buildCompoundAssign(E, &ComplexExprEmitter::buildBinMul); + return emitCompoundAssign(E, &ComplexExprEmitter::emitBinMul); } mlir::Value VisitBinDivAssign(const CompoundAssignOperator *E) { - return buildCompoundAssign(E, &ComplexExprEmitter::buildBinDiv); + return emitCompoundAssign(E, &ComplexExprEmitter::emitBinDiv); } // GCC rejects rem/and/or/xor for integer complex. @@ -289,10 +289,10 @@ class ComplexExprEmitter : public StmtVisitor { // No comparisons produce a complex result. - LValue buildBinAssignLValue(const BinaryOperator *E, mlir::Value &Val); + LValue emitBinAssignLValue(const BinaryOperator *E, mlir::Value &Val); mlir::Value VisitBinAssign(const BinaryOperator *E) { mlir::Value Val; - LValue LV = buildBinAssignLValue(E, Val); + LValue LV = emitBinAssignLValue(E, Val); // The result of an assignment in C is the assigned r-value. if (!CGF.getLangOpts().CPlusPlus) @@ -303,7 +303,7 @@ class ComplexExprEmitter : public StmtVisitor { if (!LV.isVolatileQualified()) return Val; - return buildLoadOfLValue(LV, E->getExprLoc()); + return emitLoadOfLValue(LV, E->getExprLoc()); }; mlir::Value VisitBinComma(const BinaryOperator *E) { llvm_unreachable("NYI"); @@ -345,8 +345,8 @@ static mlir::Value createComplexFromReal(CIRGenBuilderTy &builder, return builder.createComplexCreate(loc, real, imag); } -mlir::Value ComplexExprEmitter::buildLoadOfLValue(LValue LV, - SourceLocation Loc) { +mlir::Value ComplexExprEmitter::emitLoadOfLValue(LValue LV, + SourceLocation Loc) { assert(LV.isSimple() && "non-simple complex l-value?"); if (LV.getType()->isAtomicType()) llvm_unreachable("NYI"); @@ -355,9 +355,8 @@ mlir::Value ComplexExprEmitter::buildLoadOfLValue(LValue LV, return Builder.createLoad(CGF.getLoc(Loc), SrcPtr, LV.isVolatileQualified()); } -void ComplexExprEmitter::buildStoreOfComplex(mlir::Location Loc, - mlir::Value Val, LValue LV, - bool isInit) { +void ComplexExprEmitter::emitStoreOfComplex(mlir::Location Loc, mlir::Value Val, + LValue LV, bool isInit) { if (LV.getType()->isAtomicType() || (!isInit && CGF.LValueIsSuitableForInlineAtomic(LV))) llvm_unreachable("NYI"); @@ -366,10 +365,10 @@ void ComplexExprEmitter::buildStoreOfComplex(mlir::Location Loc, Builder.createStore(Loc, Val, DestAddr, LV.isVolatileQualified()); } -mlir::Value ComplexExprEmitter::buildComplexToComplexCast(mlir::Value Val, - QualType SrcType, - QualType DestType, - SourceLocation Loc) { +mlir::Value ComplexExprEmitter::emitComplexToComplexCast(mlir::Value Val, + QualType SrcType, + QualType DestType, + SourceLocation Loc) { if (SrcType == DestType) return Val; @@ -393,10 +392,10 @@ mlir::Value ComplexExprEmitter::buildComplexToComplexCast(mlir::Value Val, CGF.ConvertType(DestType)); } -mlir::Value ComplexExprEmitter::buildScalarToComplexCast(mlir::Value Val, - QualType SrcType, - QualType DestType, - SourceLocation Loc) { +mlir::Value ComplexExprEmitter::emitScalarToComplexCast(mlir::Value Val, + QualType SrcType, + QualType DestType, + SourceLocation Loc) { cir::CastKind CastOpKind; if (SrcType->isFloatingType()) CastOpKind = cir::CastKind::float_to_complex; @@ -409,8 +408,8 @@ mlir::Value ComplexExprEmitter::buildScalarToComplexCast(mlir::Value Val, CGF.ConvertType(DestType)); } -mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, - QualType DestTy) { +mlir::Value ComplexExprEmitter::emitCast(CastKind CK, Expr *Op, + QualType DestTy) { switch (CK) { case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); @@ -490,8 +489,8 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, case CK_FloatingRealToComplex: case CK_IntegralRealToComplex: { assert(!cir::MissingFeatures::CGFPOptionsRAII()); - return buildScalarToComplexCast(CGF.buildScalarExpr(Op), Op->getType(), - DestTy, Op->getExprLoc()); + return emitScalarToComplexCast(CGF.emitScalarExpr(Op), Op->getType(), + DestTy, Op->getExprLoc()); } case CK_FloatingComplexCast: @@ -499,8 +498,8 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, case CK_IntegralComplexCast: case CK_IntegralComplexToFloatingComplex: { assert(!cir::MissingFeatures::CGFPOptionsRAII()); - return buildComplexToComplexCast(Visit(Op), Op->getType(), DestTy, - Op->getExprLoc()); + return emitComplexToComplexCast(Visit(Op), Op->getType(), DestTy, + Op->getExprLoc()); } } @@ -509,23 +508,23 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, mlir::Value ComplexExprEmitter::VisitCastExpr(CastExpr *E) { if (const auto *ECE = dyn_cast(E)) - CGF.CGM.buildExplicitCastExprType(ECE, &CGF); + CGF.CGM.emitExplicitCastExprType(ECE, &CGF); if (E->changesVolatileQualification()) - return buildLoadOfLValue(E); - return buildCast(E->getCastKind(), E->getSubExpr(), E->getType()); + return emitLoadOfLValue(E); + return emitCast(E->getCastKind(), E->getSubExpr(), E->getType()); } mlir::Value ComplexExprEmitter::VisitCallExpr(const CallExpr *E) { if (E->getCallReturnType(CGF.getContext())->isReferenceType()) - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); - return CGF.buildCallExpr(E).getComplexVal(); + return CGF.emitCallExpr(E).getComplexVal(); } mlir::Value ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre) { - LValue LV = CGF.buildLValue(E->getSubExpr()); - return CGF.buildComplexPrePostIncDec(E, LV, isInc, isPre); + LValue LV = CGF.emitLValue(E->getSubExpr()); + return CGF.emitComplexPrePostIncDec(E, LV, isInc, isPre); } mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *E, @@ -535,7 +534,7 @@ mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *E, : PromotionType; mlir::Value result = VisitPlus(E, promotionTy); if (!promotionTy.isNull()) - return CGF.buildUnPromotedValue(result, E->getSubExpr()->getType()); + return CGF.emitUnPromotedValue(result, E->getSubExpr()->getType()); return result; } @@ -543,7 +542,7 @@ mlir::Value ComplexExprEmitter::VisitPlus(const UnaryOperator *E, QualType PromotionType) { mlir::Value Op; if (!PromotionType.isNull()) - Op = CGF.buildPromotedComplexExpr(E->getSubExpr(), PromotionType); + Op = CGF.emitPromotedComplexExpr(E->getSubExpr(), PromotionType); else Op = Visit(E->getSubExpr()); @@ -558,7 +557,7 @@ mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E, : PromotionType; mlir::Value result = VisitMinus(E, promotionTy); if (!promotionTy.isNull()) - return CGF.buildUnPromotedValue(result, E->getSubExpr()->getType()); + return CGF.emitUnPromotedValue(result, E->getSubExpr()->getType()); return result; } @@ -566,7 +565,7 @@ mlir::Value ComplexExprEmitter::VisitMinus(const UnaryOperator *E, QualType PromotionType) { mlir::Value Op; if (!PromotionType.isNull()) - Op = CGF.buildPromotedComplexExpr(E->getSubExpr(), PromotionType); + Op = CGF.emitPromotedComplexExpr(E->getSubExpr(), PromotionType); else Op = Visit(E->getSubExpr()); @@ -581,11 +580,11 @@ mlir::Value ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) { } ComplexExprEmitter::BinOpInfo -ComplexExprEmitter::buildBinOps(const BinaryOperator *E, QualType PromotionTy) { +ComplexExprEmitter::emitBinOps(const BinaryOperator *E, QualType PromotionTy) { BinOpInfo Ops{CGF.getLoc(E->getExprLoc())}; - Ops.LHS = buildPromotedComplexOperand(E->getLHS(), PromotionTy); - Ops.RHS = buildPromotedComplexOperand(E->getRHS(), PromotionTy); + Ops.LHS = emitPromotedComplexOperand(E->getLHS(), PromotionTy); + Ops.RHS = emitPromotedComplexOperand(E->getRHS(), PromotionTy); if (!PromotionTy.isNull()) Ops.Ty = PromotionTy; else @@ -594,14 +593,14 @@ ComplexExprEmitter::buildBinOps(const BinaryOperator *E, QualType PromotionTy) { return Ops; } -mlir::Value ComplexExprEmitter::buildPromoted(const Expr *E, - QualType PromotionTy) { +mlir::Value ComplexExprEmitter::emitPromoted(const Expr *E, + QualType PromotionTy) { E = E->IgnoreParens(); if (const auto *BO = dyn_cast(E)) { switch (BO->getOpcode()) { #define HANDLE_BINOP(OP) \ case BO_##OP: \ - return buildBin##OP(buildBinOps(BO, PromotionTy)); + return emitBin##OP(emitBinOps(BO, PromotionTy)); HANDLE_BINOP(Add) HANDLE_BINOP(Sub) HANDLE_BINOP(Mul) @@ -622,16 +621,16 @@ mlir::Value ComplexExprEmitter::buildPromoted(const Expr *E, } auto result = Visit(const_cast(E)); if (!PromotionTy.isNull()) - return CGF.buildPromotedValue(result, PromotionTy); + return CGF.emitPromotedValue(result, PromotionTy); return result; } mlir::Value -ComplexExprEmitter::buildPromotedComplexOperand(const Expr *E, - QualType PromotionTy) { +ComplexExprEmitter::emitPromotedComplexOperand(const Expr *E, + QualType PromotionTy) { if (E->getType()->isAnyComplexType()) { if (!PromotionTy.isNull()) - return CGF.buildPromotedComplexExpr(E, PromotionTy); + return CGF.emitPromotedComplexExpr(E, PromotionTy); return Visit(const_cast(E)); } @@ -639,15 +638,15 @@ ComplexExprEmitter::buildPromotedComplexOperand(const Expr *E, if (!PromotionTy.isNull()) { QualType ComplexElementTy = PromotionTy->castAs()->getElementType(); - Real = CGF.buildPromotedScalarExpr(E, ComplexElementTy); + Real = CGF.emitPromotedScalarExpr(E, ComplexElementTy); } else - Real = CGF.buildScalarExpr(E); + Real = CGF.emitScalarExpr(E); return createComplexFromReal(CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), Real); } -LValue ComplexExprEmitter::buildCompoundAssignLValue( +LValue ComplexExprEmitter::emitCompoundAssignLValue( const CompoundAssignOperator *E, mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &), RValue &Val) { QualType LHSTy = E->getLHS()->getType(); @@ -676,19 +675,19 @@ LValue ComplexExprEmitter::buildCompoundAssignLValue( if (!PromotionTypeRHS.isNull()) OpInfo.RHS = createComplexFromReal( CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), - CGF.buildPromotedScalarExpr(E->getRHS(), PromotionTypeRHS)); + CGF.emitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS)); else { assert(CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, E->getRHS()->getType())); OpInfo.RHS = createComplexFromReal(CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), - CGF.buildScalarExpr(E->getRHS())); + CGF.emitScalarExpr(E->getRHS())); } } else { if (!PromotionTypeRHS.isNull()) { OpInfo.RHS = createComplexFromReal( CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), - CGF.buildPromotedComplexExpr(E->getRHS(), PromotionTypeRHS)); + CGF.emitPromotedComplexExpr(E->getRHS(), PromotionTypeRHS)); } else { assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty, E->getRHS()->getType())); @@ -696,20 +695,20 @@ LValue ComplexExprEmitter::buildCompoundAssignLValue( } } - LValue LHS = CGF.buildLValue(E->getLHS()); + LValue LHS = CGF.emitLValue(E->getLHS()); // Load from the l-value and convert it. SourceLocation Loc = E->getExprLoc(); QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType()); if (LHSTy->isAnyComplexType()) { - mlir::Value LHSVal = buildLoadOfLValue(LHS, Loc); + mlir::Value LHSVal = emitLoadOfLValue(LHS, Loc); if (!PromotionTypeLHS.isNull()) OpInfo.LHS = - buildComplexToComplexCast(LHSVal, LHSTy, PromotionTypeLHS, Loc); + emitComplexToComplexCast(LHSVal, LHSTy, PromotionTypeLHS, Loc); else - OpInfo.LHS = buildComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc); + OpInfo.LHS = emitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc); } else { - mlir::Value LHSVal = CGF.buildLoadOfScalar(LHS, Loc); + mlir::Value LHSVal = CGF.emitLoadOfScalar(LHS, Loc); // For floating point real operands we can directly pass the scalar form // to the binary operator emission and potentially get more efficient code. if (LHSTy->isRealFloatingType()) { @@ -719,17 +718,17 @@ LValue ComplexExprEmitter::buildCompoundAssignLValue( cast(PromotionTypeLHS)->getElementType(); if (!CGF.getContext().hasSameUnqualifiedType(PromotedComplexElementTy, PromotionTypeLHS)) - LHSVal = CGF.buildScalarConversion(LHSVal, LHSTy, - PromotedComplexElementTy, Loc); + LHSVal = CGF.emitScalarConversion(LHSVal, LHSTy, + PromotedComplexElementTy, Loc); } else { if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy)) LHSVal = - CGF.buildScalarConversion(LHSVal, LHSTy, ComplexElementTy, Loc); + CGF.emitScalarConversion(LHSVal, LHSTy, ComplexElementTy, Loc); } OpInfo.LHS = createComplexFromReal(CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), LHSVal); } else { - OpInfo.LHS = buildScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc); + OpInfo.LHS = emitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc); } } @@ -739,25 +738,25 @@ LValue ComplexExprEmitter::buildCompoundAssignLValue( // Truncate the result and store it into the LHS lvalue. if (LHSTy->isAnyComplexType()) { mlir::Value ResVal = - buildComplexToComplexCast(Result, OpInfo.Ty, LHSTy, Loc); - buildStoreOfComplex(CGF.getLoc(E->getExprLoc()), ResVal, LHS, - /*isInit*/ false); + emitComplexToComplexCast(Result, OpInfo.Ty, LHSTy, Loc); + emitStoreOfComplex(CGF.getLoc(E->getExprLoc()), ResVal, LHS, + /*isInit*/ false); Val = RValue::getComplex(ResVal); } else { mlir::Value ResVal = - CGF.buildComplexToScalarConversion(Result, OpInfo.Ty, LHSTy, Loc); - CGF.buildStoreOfScalar(ResVal, LHS, /*isInit*/ false); + CGF.emitComplexToScalarConversion(Result, OpInfo.Ty, LHSTy, Loc); + CGF.emitStoreOfScalar(ResVal, LHS, /*isInit*/ false); Val = RValue::get(ResVal); } return LHS; } -mlir::Value ComplexExprEmitter::buildCompoundAssign( +mlir::Value ComplexExprEmitter::emitCompoundAssign( const CompoundAssignOperator *E, mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &)) { RValue Val; - LValue LV = buildCompoundAssignLValue(E, Func, Val); + LValue LV = emitCompoundAssignLValue(E, Func, Val); // The result of an assignment in C is the assigned r-value. if (!CGF.getLangOpts().CPlusPlus) @@ -767,15 +766,15 @@ mlir::Value ComplexExprEmitter::buildCompoundAssign( if (!LV.isVolatileQualified()) return Val.getComplexVal(); - return buildLoadOfLValue(LV, E->getExprLoc()); + return emitLoadOfLValue(LV, E->getExprLoc()); } -mlir::Value ComplexExprEmitter::buildBinAdd(const BinOpInfo &Op) { +mlir::Value ComplexExprEmitter::emitBinAdd(const BinOpInfo &Op) { assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexAdd(Op.Loc, Op.LHS, Op.RHS); } -mlir::Value ComplexExprEmitter::buildBinSub(const BinOpInfo &Op) { +mlir::Value ComplexExprEmitter::emitBinSub(const BinOpInfo &Op) { assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexSub(Op.Loc, Op.LHS, Op.RHS); } @@ -796,22 +795,22 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) { } } -mlir::Value ComplexExprEmitter::buildBinMul(const BinOpInfo &Op) { +mlir::Value ComplexExprEmitter::emitBinMul(const BinOpInfo &Op) { assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexMul( Op.Loc, Op.LHS, Op.RHS, getComplexRangeAttr(Op.FPFeatures.getComplexRange()), FPHasBeenPromoted); } -mlir::Value ComplexExprEmitter::buildBinDiv(const BinOpInfo &Op) { +mlir::Value ComplexExprEmitter::emitBinDiv(const BinOpInfo &Op) { assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexDiv( Op.Loc, Op.LHS, Op.RHS, getComplexRangeAttr(Op.FPFeatures.getComplexRange()), FPHasBeenPromoted); } -LValue ComplexExprEmitter::buildBinAssignLValue(const BinaryOperator *E, - mlir::Value &Val) { +LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *E, + mlir::Value &Val) { assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), E->getRHS()->getType()) && "Invalid assignment"); @@ -820,10 +819,10 @@ LValue ComplexExprEmitter::buildBinAssignLValue(const BinaryOperator *E, Val = Visit(E->getRHS()); // Compute the address to store into. - LValue LHS = CGF.buildLValue(E->getLHS()); + LValue LHS = CGF.emitLValue(E->getLHS()); // Store the result value into the LHS lvalue. - buildStoreOfComplex(CGF.getLoc(E->getExprLoc()), Val, LHS, /*isInit*/ false); + emitStoreOfComplex(CGF.getLoc(E->getExprLoc()), Val, LHS, /*isInit*/ false); return LHS; } @@ -855,8 +854,8 @@ ComplexExprEmitter::VisitImaginaryLiteral(const ImaginaryLiteral *IL) { mlir::Value ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) { if (E->getNumInits() == 2) { - mlir::Value Real = CGF.buildScalarExpr(E->getInit(0)); - mlir::Value Imag = CGF.buildScalarExpr(E->getInit(1)); + mlir::Value Real = CGF.emitScalarExpr(E->getInit(0)); + mlir::Value Imag = CGF.emitScalarExpr(E->getInit(1)); return Builder.createComplexCreate(CGF.getLoc(E->getExprLoc()), Real, Imag); } @@ -869,13 +868,13 @@ mlir::Value ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) { return Builder.getZero(CGF.getLoc(E->getExprLoc()), CGF.ConvertType(Ty)); } -mlir::Value CIRGenFunction::buildPromotedComplexExpr(const Expr *E, - QualType PromotionType) { - return ComplexExprEmitter(*this).buildPromoted(E, PromotionType); +mlir::Value CIRGenFunction::emitPromotedComplexExpr(const Expr *E, + QualType PromotionType) { + return ComplexExprEmitter(*this).emitPromoted(E, PromotionType); } -mlir::Value CIRGenFunction::buildPromotedValue(mlir::Value result, - QualType PromotionType) { +mlir::Value CIRGenFunction::emitPromotedValue(mlir::Value result, + QualType PromotionType) { assert(mlir::isa( mlir::cast(result.getType()).getElementTy()) && "integral complex will never be promoted"); @@ -883,8 +882,8 @@ mlir::Value CIRGenFunction::buildPromotedValue(mlir::Value result, ConvertType(PromotionType)); } -mlir::Value CIRGenFunction::buildUnPromotedValue(mlir::Value result, - QualType UnPromotionType) { +mlir::Value CIRGenFunction::emitUnPromotedValue(mlir::Value result, + QualType UnPromotionType) { assert(mlir::isa( mlir::cast(result.getType()).getElementTy()) && "integral complex will never be promoted"); @@ -892,43 +891,43 @@ mlir::Value CIRGenFunction::buildUnPromotedValue(mlir::Value result, ConvertType(UnPromotionType)); } -mlir::Value CIRGenFunction::buildComplexExpr(const Expr *E) { +mlir::Value CIRGenFunction::emitComplexExpr(const Expr *E) { assert(E && getComplexType(E->getType()) && "Invalid complex expression to emit"); return ComplexExprEmitter(*this).Visit(const_cast(E)); } -void CIRGenFunction::buildComplexExprIntoLValue(const Expr *E, LValue dest, - bool isInit) { +void CIRGenFunction::emitComplexExprIntoLValue(const Expr *E, LValue dest, + bool isInit) { assert(E && getComplexType(E->getType()) && "Invalid complex expression to emit"); ComplexExprEmitter Emitter(*this); mlir::Value Val = Emitter.Visit(const_cast(E)); - Emitter.buildStoreOfComplex(getLoc(E->getExprLoc()), Val, dest, isInit); + Emitter.emitStoreOfComplex(getLoc(E->getExprLoc()), Val, dest, isInit); } -void CIRGenFunction::buildStoreOfComplex(mlir::Location Loc, mlir::Value V, - LValue dest, bool isInit) { - ComplexExprEmitter(*this).buildStoreOfComplex(Loc, V, dest, isInit); +void CIRGenFunction::emitStoreOfComplex(mlir::Location Loc, mlir::Value V, + LValue dest, bool isInit) { + ComplexExprEmitter(*this).emitStoreOfComplex(Loc, V, dest, isInit); } -Address CIRGenFunction::buildAddrOfRealComponent(mlir::Location loc, - Address addr, - QualType complexType) { +Address CIRGenFunction::emitAddrOfRealComponent(mlir::Location loc, + Address addr, + QualType complexType) { return builder.createRealPtr(loc, addr); } -Address CIRGenFunction::buildAddrOfImagComponent(mlir::Location loc, - Address addr, - QualType complexType) { +Address CIRGenFunction::emitAddrOfImagComponent(mlir::Location loc, + Address addr, + QualType complexType) { return builder.createImagPtr(loc, addr); } -LValue CIRGenFunction::buildComplexAssignmentLValue(const BinaryOperator *E) { +LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *E) { assert(E->getOpcode() == BO_Assign); mlir::Value Val; // ignored - LValue LVal = ComplexExprEmitter(*this).buildBinAssignLValue(E, Val); + LValue LVal = ComplexExprEmitter(*this).emitBinAssignLValue(E, Val); if (getLangOpts().OpenMP) llvm_unreachable("NYI"); return LVal; @@ -940,36 +939,36 @@ using CompoundFunc = static CompoundFunc getComplexOp(BinaryOperatorKind Op) { switch (Op) { case BO_MulAssign: - return &ComplexExprEmitter::buildBinMul; + return &ComplexExprEmitter::emitBinMul; case BO_DivAssign: - return &ComplexExprEmitter::buildBinDiv; + return &ComplexExprEmitter::emitBinDiv; case BO_SubAssign: - return &ComplexExprEmitter::buildBinSub; + return &ComplexExprEmitter::emitBinSub; case BO_AddAssign: - return &ComplexExprEmitter::buildBinAdd; + return &ComplexExprEmitter::emitBinAdd; default: llvm_unreachable("unexpected complex compound assignment"); } } -LValue CIRGenFunction::buildComplexCompoundAssignmentLValue( +LValue CIRGenFunction::emitComplexCompoundAssignmentLValue( const CompoundAssignOperator *E) { CompoundFunc Op = getComplexOp(E->getOpcode()); RValue Val; - return ComplexExprEmitter(*this).buildCompoundAssignLValue(E, Op, Val); + return ComplexExprEmitter(*this).emitCompoundAssignLValue(E, Op, Val); } -mlir::Value CIRGenFunction::buildComplexPrePostIncDec(const UnaryOperator *E, - LValue LV, bool isInc, - bool isPre) { - mlir::Value InVal = buildLoadOfComplex(LV, E->getExprLoc()); +mlir::Value CIRGenFunction::emitComplexPrePostIncDec(const UnaryOperator *E, + LValue LV, bool isInc, + bool isPre) { + mlir::Value InVal = emitLoadOfComplex(LV, E->getExprLoc()); auto Loc = getLoc(E->getExprLoc()); auto OpKind = isInc ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; mlir::Value IncVal = builder.createUnaryOp(Loc, OpKind, InVal); // Store the updated result through the lvalue. - buildStoreOfComplex(Loc, IncVal, LV, /*init*/ false); + emitStoreOfComplex(Loc, IncVal, LV, /*init*/ false); if (getLangOpts().OpenMP) llvm_unreachable("NYI"); @@ -978,6 +977,6 @@ mlir::Value CIRGenFunction::buildComplexPrePostIncDec(const UnaryOperator *E, return isPre ? IncVal : InVal; } -mlir::Value CIRGenFunction::buildLoadOfComplex(LValue src, SourceLocation loc) { - return ComplexExprEmitter(*this).buildLoadOfLValue(src, loc); +mlir::Value CIRGenFunction::emitLoadOfComplex(LValue src, SourceLocation loc) { + return ComplexExprEmitter(*this).emitLoadOfLValue(src, loc); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index d4d031158d90..ae42f2ff411a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -44,10 +44,10 @@ namespace { class ConstExprEmitter; static mlir::Attribute -buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, - mlir::Type CommonElementType, unsigned ArrayBound, - SmallVectorImpl &Elements, - mlir::TypedAttr Filler); +emitArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, + mlir::Type CommonElementType, unsigned ArrayBound, + SmallVectorImpl &Elements, + mlir::TypedAttr Filler); struct ConstantAggregateBuilderUtils { CIRGenModule &CGM; @@ -905,7 +905,7 @@ class ConstExprEmitter mlir::Attribute VisitCastExpr(CastExpr *E, QualType destType) { if (const auto *ECE = dyn_cast(E)) - CGM.buildExplicitCastExprType(ECE, Emitter.CGF); + CGM.emitExplicitCastExprType(ECE, Emitter.CGF); Expr *subExpr = E->getSubExpr(); switch (E->getCastKind()) { @@ -1057,8 +1057,8 @@ class ConstExprEmitter auto typedFiller = llvm::dyn_cast_or_null(Filler); if (Filler && !typedFiller) llvm_unreachable("We shouldn't be receiving untyped attrs here"); - return buildArrayConstant(CGM, desiredType, CommonElementType, NumElements, - Elts, typedFiller); + return emitArrayConstant(CGM, desiredType, CommonElementType, NumElements, + Elts, typedFiller); } mlir::Attribute EmitRecordInitialization(InitListExpr *ILE, QualType T) { @@ -1163,10 +1163,10 @@ class ConstExprEmitter }; static mlir::Attribute -buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, - mlir::Type CommonElementType, unsigned ArrayBound, - SmallVectorImpl &Elements, - mlir::TypedAttr Filler) { +emitArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, + mlir::Type CommonElementType, unsigned ArrayBound, + SmallVectorImpl &Elements, + mlir::TypedAttr Filler) { auto &builder = CGM.getBuilder(); // Figure out how long the initial prefix of non-zero elements is. @@ -1847,8 +1847,8 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, if (Filler && !typedFiller) llvm_unreachable("this should always be typed"); - return buildArrayConstant(CGM, Desired, CommonElementType, NumElements, - Elts, typedFiller); + return emitArrayConstant(CGM, Desired, CommonElementType, NumElements, Elts, + typedFiller); } case APValue::Vector: { const QualType ElementType = @@ -1896,7 +1896,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, llvm_unreachable("Unknown APValue kind"); } -mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { +mlir::Value CIRGenModule::emitNullConstant(QualType T, mlir::Location loc) { if (T->getAs()) { return builder.getNullPtr(getTypes().convertTypeForMem(T), loc); } @@ -1919,7 +1919,7 @@ mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { return {}; } -mlir::Value CIRGenModule::buildMemberPointerConstant(const UnaryOperator *E) { +mlir::Value CIRGenModule::emitMemberPointerConstant(const UnaryOperator *E) { assert(!cir::MissingFeatures::cxxABI()); auto loc = getLoc(E->getSourceRange()); @@ -1975,14 +1975,14 @@ mlir::Attribute ConstantEmitter::emitNullForMemory(mlir::Location loc, CIRGenModule &CGM, QualType T) { auto cstOp = - dyn_cast(CGM.buildNullConstant(T, loc).getDefiningOp()); + dyn_cast(CGM.emitNullConstant(T, loc).getDefiningOp()); assert(cstOp && "expected cir.const op"); return emitForMemory(CGM, cstOp.getValue(), T); } -static mlir::TypedAttr buildNullConstant(CIRGenModule &CGM, - const RecordDecl *record, - bool asCompleteObject) { +static mlir::TypedAttr emitNullConstant(CIRGenModule &CGM, + const RecordDecl *record, + bool asCompleteObject) { const CIRGenRecordLayout &layout = CGM.getTypes().getCIRGenRecordLayout(record); mlir::Type ty = (asCompleteObject ? layout.getCIRType() @@ -2045,6 +2045,6 @@ static mlir::TypedAttr buildNullConstant(CIRGenModule &CGM, } mlir::TypedAttr -CIRGenModule::buildNullConstantForBase(const CXXRecordDecl *Record) { - return ::buildNullConstant(*this, Record, false); +CIRGenModule::emitNullConstantForBase(const CXXRecordDecl *Record) { + return ::emitNullConstant(*this, Record, false); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 6763bbccd089..6c4441ba0a1c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -108,26 +108,26 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Type ConvertType(QualType T) { return CGF.ConvertType(T); } - LValue buildLValue(const Expr *E) { return CGF.buildLValue(E); } - LValue buildCheckedLValue(const Expr *E, CIRGenFunction::TypeCheckKind TCK) { - return CGF.buildCheckedLValue(E, TCK); + LValue emitLValue(const Expr *E) { return CGF.emitLValue(E); } + LValue emitCheckedLValue(const Expr *E, CIRGenFunction::TypeCheckKind TCK) { + return CGF.emitCheckedLValue(E, TCK); } - mlir::Value buildComplexToScalarConversion(mlir::Location Loc, mlir::Value V, - CastKind Kind, QualType DestTy); + mlir::Value emitComplexToScalarConversion(mlir::Location Loc, mlir::Value V, + CastKind Kind, QualType DestTy); /// Emit a value that corresponds to null for the given type. - mlir::Value buildNullValue(QualType Ty, mlir::Location loc); + mlir::Value emitNullValue(QualType Ty, mlir::Location loc); - mlir::Value buildPromotedValue(mlir::Value result, QualType PromotionType) { + mlir::Value emitPromotedValue(mlir::Value result, QualType PromotionType) { return Builder.createFloatingCast(result, ConvertType(PromotionType)); } - mlir::Value buildUnPromotedValue(mlir::Value result, QualType ExprType) { + mlir::Value emitUnPromotedValue(mlir::Value result, QualType ExprType) { return Builder.createFloatingCast(result, ConvertType(ExprType)); } - mlir::Value buildPromoted(const Expr *E, QualType PromotionType); + mlir::Value emitPromoted(const Expr *E, QualType PromotionType); //===--------------------------------------------------------------------===// // Visitor Methods @@ -161,10 +161,10 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitCoawaitExpr(CoawaitExpr *S) { - return CGF.buildCoawaitExpr(*S).getScalarVal(); + return CGF.emitCoawaitExpr(*S).getScalarVal(); } mlir::Value VisitCoyieldExpr(CoyieldExpr *S) { - return CGF.buildCoyieldExpr(*S).getScalarVal(); + return CGF.emitCoyieldExpr(*S).getScalarVal(); } mlir::Value VisitUnaryCoawait(const UnaryOperator *E) { llvm_unreachable("NYI"); @@ -208,7 +208,7 @@ class ScalarExprEmitter : public StmtVisitor { if (E->getType()->isVoidType()) return nullptr; - return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); + return emitNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); } mlir::Value VisitGNUNullExpr(const GNUNullExpr *E) { llvm_unreachable("NYI"); @@ -246,22 +246,22 @@ class ScalarExprEmitter : public StmtVisitor { } /// Emits the address of the l-value, then loads and returns the result. - mlir::Value buildLoadOfLValue(const Expr *E) { - LValue LV = CGF.buildLValue(E); + mlir::Value emitLoadOfLValue(const Expr *E) { + LValue LV = CGF.emitLValue(E); // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); - return CGF.buildLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); + return CGF.emitLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); } - mlir::Value buildLoadOfLValue(LValue LV, SourceLocation Loc) { - return CGF.buildLoadOfLValue(LV, Loc).getScalarVal(); + mlir::Value emitLoadOfLValue(LValue LV, SourceLocation Loc) { + return CGF.emitLoadOfLValue(LV, Loc).getScalarVal(); } // l-values mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { if (CIRGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) { - return CGF.buildScalarConstant(Constant, E); + return CGF.emitScalarConstant(Constant, E); } - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitObjCSelectorExpr(ObjCSelectorExpr *E) { @@ -295,7 +295,7 @@ class ScalarExprEmitter : public StmtVisitor { } // Just load the lvalue formed by the subscript expression. - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) { @@ -330,18 +330,16 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *E) { // __builtin_convertvector is an element-wise cast, and is implemented as a // regular cast. The back end handles casts of vectors correctly. - return buildScalarConversion(Visit(E->getSrcExpr()), - E->getSrcExpr()->getType(), E->getType(), - E->getSourceRange().getBegin()); + return emitScalarConversion(Visit(E->getSrcExpr()), + E->getSrcExpr()->getType(), E->getType(), + E->getSourceRange().getBegin()); } - mlir::Value VisitExtVectorElementExpr(Expr *E) { - return buildLoadOfLValue(E); - } + mlir::Value VisitExtVectorElementExpr(Expr *E) { return emitLoadOfLValue(E); } mlir::Value VisitMemberExpr(MemberExpr *E); mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitInitListExpr(InitListExpr *E); @@ -351,7 +349,7 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { - return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); + return emitNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); } mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *E) { return VisitCastExpr(E); @@ -362,7 +360,7 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitStmtExpr(StmtExpr *E) { assert(!cir::MissingFeatures::stmtExprEvaluation() && "NYI"); Address retAlloca = - CGF.buildCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType()); + CGF.emitCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType()); if (!retAlloca.isValid()) return {}; @@ -373,29 +371,29 @@ class ScalarExprEmitter : public StmtVisitor { CGF.getBuilder().hoistAllocaToParentRegion( cast(retAlloca.getDefiningOp())); - return CGF.buildLoadOfScalar(CGF.makeAddrLValue(retAlloca, E->getType()), - E->getExprLoc()); + return CGF.emitLoadOfScalar(CGF.makeAddrLValue(retAlloca, E->getType()), + E->getExprLoc()); } // Unary Operators. mlir::Value VisitUnaryPostDec(const UnaryOperator *E) { - LValue LV = buildLValue(E->getSubExpr()); - return buildScalarPrePostIncDec(E, LV, false, false); + LValue LV = emitLValue(E->getSubExpr()); + return emitScalarPrePostIncDec(E, LV, false, false); } mlir::Value VisitUnaryPostInc(const UnaryOperator *E) { - LValue LV = buildLValue(E->getSubExpr()); - return buildScalarPrePostIncDec(E, LV, true, false); + LValue LV = emitLValue(E->getSubExpr()); + return emitScalarPrePostIncDec(E, LV, true, false); } mlir::Value VisitUnaryPreDec(const UnaryOperator *E) { - LValue LV = buildLValue(E->getSubExpr()); - return buildScalarPrePostIncDec(E, LV, false, true); + LValue LV = emitLValue(E->getSubExpr()); + return emitScalarPrePostIncDec(E, LV, false, true); } mlir::Value VisitUnaryPreInc(const UnaryOperator *E) { - LValue LV = buildLValue(E->getSubExpr()); - return buildScalarPrePostIncDec(E, LV, true, true); + LValue LV = emitLValue(E->getSubExpr()); + return emitScalarPrePostIncDec(E, LV, true, true); } - mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, - bool isInc, bool isPre) { + mlir::Value emitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre) { assert(!CGF.getLangOpts().OpenMP && "Not implemented"); QualType type = E->getSubExpr()->getType(); @@ -407,7 +405,7 @@ class ScalarExprEmitter : public StmtVisitor { if (const AtomicType *atomicTy = type->getAs()) { llvm_unreachable("no atomics inc/dec yet"); } else { - value = buildLoadOfLValue(LV, E->getExprLoc()); + value = emitLoadOfLValue(LV, E->getExprLoc()); input = value; } @@ -460,7 +458,7 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable( "perform lossy demotion case for inc/dec not implemented yet"); } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { - value = buildIncDecConsiderOverflowBehavior(E, value, isInc); + value = emitIncDecConsiderOverflowBehavior(E, value, isInc); } else if (E->canOverflow() && type->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { llvm_unreachable( @@ -469,7 +467,7 @@ class ScalarExprEmitter : public StmtVisitor { auto Kind = E->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; // NOTE(CIR): clang calls CreateAdd but folds this to a unary op - value = buildUnaryOp(E, Kind, input); + value = emitUnaryOp(E, Kind, input); } // Next most common: pointer increment. } else if (const PointerType *ptr = type->getAs()) { @@ -517,7 +515,7 @@ class ScalarExprEmitter : public StmtVisitor { // Create the inc/dec operation. // NOTE(CIR): clang calls CreateAdd but folds this to a unary op auto kind = (isInc ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec); - value = buildUnaryOp(E, kind, value); + value = emitUnaryOp(E, kind, value); } else { // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or // __float128. Convert from float. @@ -570,34 +568,34 @@ class ScalarExprEmitter : public StmtVisitor { // Store the updated result through the lvalue if (LV.isBitField()) - CGF.buildStoreThroughBitfieldLValue(RValue::get(value), LV, value); + CGF.emitStoreThroughBitfieldLValue(RValue::get(value), LV, value); else - CGF.buildStoreThroughLValue(RValue::get(value), LV); + CGF.emitStoreThroughLValue(RValue::get(value), LV); // If this is a postinc, return the value read from memory, otherwise use // the updated value. return isPre ? value : input; } - mlir::Value buildIncDecConsiderOverflowBehavior(const UnaryOperator *E, - mlir::Value InVal, - bool IsInc) { + mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *E, + mlir::Value InVal, + bool IsInc) { // NOTE(CIR): The SignedOverflowBehavior is attached to the global ModuleOp // and the nsw behavior is handled during lowering. auto Kind = E->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: - return buildUnaryOp(E, Kind, InVal); + return emitUnaryOp(E, Kind, InVal); case LangOptions::SOB_Undefined: if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) - return buildUnaryOp(E, Kind, InVal); + return emitUnaryOp(E, Kind, InVal); llvm_unreachable( "inc/dec overflow behavior SOB_Undefined not implemented yet"); break; case LangOptions::SOB_Trapping: if (!E->canOverflow()) - return buildUnaryOp(E, Kind, InVal); + return emitUnaryOp(E, Kind, InVal); llvm_unreachable( "inc/dec overflow behavior SOB_Trapping not implemented yet"); break; @@ -606,15 +604,15 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { if (llvm::isa(E->getType())) - return CGF.CGM.buildMemberPointerConstant(E); + return CGF.CGM.emitMemberPointerConstant(E); - return CGF.buildLValue(E->getSubExpr()).getPointer(); + return CGF.emitLValue(E->getSubExpr()).getPointer(); } mlir::Value VisitUnaryDeref(const UnaryOperator *E) { if (E->getType()->isVoidType()) return Visit(E->getSubExpr()); // the actual value should be unused - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitUnaryPlus(const UnaryOperator *E, QualType PromotionType = QualType()) { @@ -623,7 +621,7 @@ class ScalarExprEmitter : public StmtVisitor { : PromotionType; auto result = VisitPlus(E, promotionTy); if (result && !promotionTy.isNull()) - return buildUnPromotedValue(result, E->getType()); + return emitUnPromotedValue(result, E->getType()); return result; } @@ -634,11 +632,11 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value operand; if (!PromotionType.isNull()) - operand = CGF.buildPromotedScalarExpr(E->getSubExpr(), PromotionType); + operand = CGF.emitPromotedScalarExpr(E->getSubExpr(), PromotionType); else operand = Visit(E->getSubExpr()); - return buildUnaryOp(E, cir::UnaryOpKind::Plus, operand); + return emitUnaryOp(E, cir::UnaryOpKind::Plus, operand); } mlir::Value VisitUnaryMinus(const UnaryOperator *E, @@ -648,7 +646,7 @@ class ScalarExprEmitter : public StmtVisitor { : PromotionType; auto result = VisitMinus(E, promotionTy); if (result && !promotionTy.isNull()) - return buildUnPromotedValue(result, E->getType()); + return emitUnPromotedValue(result, E->getType()); return result; } @@ -657,19 +655,19 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value operand; if (!PromotionType.isNull()) - operand = CGF.buildPromotedScalarExpr(E->getSubExpr(), PromotionType); + operand = CGF.emitPromotedScalarExpr(E->getSubExpr(), PromotionType); else operand = Visit(E->getSubExpr()); // NOTE: LLVM codegen will lower this directly to either a FNeg // or a Sub instruction. In CIR this will be handled later in LowerToLLVM. - return buildUnaryOp(E, cir::UnaryOpKind::Minus, operand); + return emitUnaryOp(E, cir::UnaryOpKind::Minus, operand); } mlir::Value VisitUnaryNot(const UnaryOperator *E) { TestAndClearIgnoreResultAssign(); mlir::Value op = Visit(E->getSubExpr()); - return buildUnaryOp(E, cir::UnaryOpKind::Not, op); + return emitUnaryOp(E, cir::UnaryOpKind::Not, op); } mlir::Value VisitUnaryLNot(const UnaryOperator *E); @@ -685,8 +683,8 @@ class ScalarExprEmitter : public StmtVisitor { return Visit(E->getSubExpr()); } - mlir::Value buildUnaryOp(const UnaryOperator *E, cir::UnaryOpKind kind, - mlir::Value input) { + mlir::Value emitUnaryOp(const UnaryOperator *E, cir::UnaryOpKind kind, + mlir::Value input) { return Builder.create( CGF.getLoc(E->getSourceRange().getBegin()), input.getType(), kind, input); @@ -710,10 +708,10 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitExprWithCleanups(ExprWithCleanups *E); mlir::Value VisitCXXNewExpr(const CXXNewExpr *E) { - return CGF.buildCXXNewExpr(E); + return CGF.emitCXXNewExpr(E); } mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *E) { - CGF.buildCXXDeleteExpr(E); + CGF.emitCXXDeleteExpr(E); return {}; } mlir::Value VisitTypeTraitExpr(const TypeTraitExpr *E) { @@ -736,10 +734,10 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) { - return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); + return emitNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); } mlir::Value VisitCXXThrowExpr(CXXThrowExpr *E) { - CGF.buildCXXThrowExpr(E); + CGF.emitCXXThrowExpr(E); return nullptr; } mlir::Value VisitCXXNoexceptExpr(CXXNoexceptExpr *E) { @@ -747,7 +745,7 @@ class ScalarExprEmitter : public StmtVisitor { } /// Perform a pointer to boolean conversion. - mlir::Value buildPointerToBoolConversion(mlir::Value V, QualType QT) { + mlir::Value emitPointerToBoolConversion(mlir::Value V, QualType QT) { // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM. // We might want to have a separate pass for these types of conversions. return CGF.getBuilder().createPtrToBoolCast(V); @@ -755,7 +753,7 @@ class ScalarExprEmitter : public StmtVisitor { // Comparisons. #define VISITCOMP(CODE) \ - mlir::Value VisitBin##CODE(const BinaryOperator *E) { return buildCmp(E); } + mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); } VISITCOMP(LT) VISITCOMP(GT) VISITCOMP(LE) @@ -768,17 +766,17 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitBinLAnd(const BinaryOperator *B); mlir::Value VisitBinLOr(const BinaryOperator *B); mlir::Value VisitBinComma(const BinaryOperator *E) { - CGF.buildIgnoredExpr(E->getLHS()); + CGF.emitIgnoredExpr(E->getLHS()); // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen. return Visit(E->getRHS()); } mlir::Value VisitBinPtrMemD(const BinaryOperator *E) { - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitBinPtrMemI(const BinaryOperator *E) { - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { @@ -803,7 +801,7 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitAsTypeExpr(AsTypeExpr *E) { llvm_unreachable("NYI"); } mlir::Value VisitAtomicExpr(AtomicExpr *E) { - return CGF.buildAtomicExpr(E).getScalarVal(); + return CGF.emitAtomicExpr(E).getScalarVal(); } // Emit a conversion from the specified type to the specified destination @@ -825,15 +823,15 @@ class ScalarExprEmitter : public StmtVisitor { EmitImplicitIntegerSignChangeChecks( SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {} }; - mlir::Value buildScalarCast(mlir::Value Src, QualType SrcType, - QualType DstType, mlir::Type SrcTy, - mlir::Type DstTy, ScalarConversionOpts Opts); + mlir::Value emitScalarCast(mlir::Value Src, QualType SrcType, + QualType DstType, mlir::Type SrcTy, + mlir::Type DstTy, ScalarConversionOpts Opts); - BinOpInfo buildBinOps(const BinaryOperator *E, - QualType PromotionType = QualType()) { + BinOpInfo emitBinOps(const BinaryOperator *E, + QualType PromotionType = QualType()) { BinOpInfo Result; - Result.LHS = CGF.buildPromotedScalarExpr(E->getLHS(), PromotionType); - Result.RHS = CGF.buildPromotedScalarExpr(E->getRHS(), PromotionType); + Result.LHS = CGF.emitPromotedScalarExpr(E->getLHS(), PromotionType); + Result.RHS = CGF.emitPromotedScalarExpr(E->getRHS(), PromotionType); if (!PromotionType.isNull()) Result.FullType = PromotionType; else @@ -850,24 +848,24 @@ class ScalarExprEmitter : public StmtVisitor { return Result; } - mlir::Value buildMul(const BinOpInfo &Ops); - mlir::Value buildDiv(const BinOpInfo &Ops); - mlir::Value buildRem(const BinOpInfo &Ops); - mlir::Value buildAdd(const BinOpInfo &Ops); - mlir::Value buildSub(const BinOpInfo &Ops); - mlir::Value buildShl(const BinOpInfo &Ops); - mlir::Value buildShr(const BinOpInfo &Ops); - mlir::Value buildAnd(const BinOpInfo &Ops); - mlir::Value buildXor(const BinOpInfo &Ops); - mlir::Value buildOr(const BinOpInfo &Ops); + mlir::Value emitMul(const BinOpInfo &Ops); + mlir::Value emitDiv(const BinOpInfo &Ops); + mlir::Value emitRem(const BinOpInfo &Ops); + mlir::Value emitAdd(const BinOpInfo &Ops); + mlir::Value emitSub(const BinOpInfo &Ops); + mlir::Value emitShl(const BinOpInfo &Ops); + mlir::Value emitShr(const BinOpInfo &Ops); + mlir::Value emitAnd(const BinOpInfo &Ops); + mlir::Value emitXor(const BinOpInfo &Ops); + mlir::Value emitOr(const BinOpInfo &Ops); - LValue buildCompoundAssignLValue( + LValue emitCompoundAssignLValue( const CompoundAssignOperator *E, mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &), mlir::Value &Result); mlir::Value - buildCompoundAssign(const CompoundAssignOperator *E, - mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &)); + emitCompoundAssign(const CompoundAssignOperator *E, + mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &)); // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM // codegen. @@ -887,13 +885,13 @@ class ScalarExprEmitter : public StmtVisitor { #define HANDLEBINOP(OP) \ mlir::Value VisitBin##OP(const BinaryOperator *E) { \ QualType promotionTy = getPromotionType(E->getType()); \ - auto result = build##OP(buildBinOps(E, promotionTy)); \ + auto result = emit##OP(emitBinOps(E, promotionTy)); \ if (result && !promotionTy.isNull()) \ - result = buildUnPromotedValue(result, E->getType()); \ + result = emitUnPromotedValue(result, E->getType()); \ return result; \ } \ mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *E) { \ - return buildCompoundAssign(E, &ScalarExprEmitter::build##OP); \ + return emitCompoundAssign(E, &ScalarExprEmitter::emit##OP); \ } HANDLEBINOP(Mul) @@ -908,7 +906,7 @@ class ScalarExprEmitter : public StmtVisitor { HANDLEBINOP(Or) #undef HANDLEBINOP - mlir::Value buildCmp(const BinaryOperator *E) { + mlir::Value emitCmp(const BinaryOperator *E) { mlir::Value Result; QualType LHSTy = E->getLHS()->getType(); QualType RHSTy = E->getRHS()->getType(); @@ -936,7 +934,7 @@ class ScalarExprEmitter : public StmtVisitor { if (const MemberPointerType *MPT = LHSTy->getAs()) { assert(0 && "not implemented"); } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { - BinOpInfo BOInfo = buildBinOps(E); + BinOpInfo BOInfo = emitBinOps(E); mlir::Value LHS = BOInfo.LHS; mlir::Value RHS = BOInfo.RHS; @@ -976,17 +974,17 @@ class ScalarExprEmitter : public StmtVisitor { assert(0 && "not implemented"); } - return buildScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), - E->getExprLoc()); + return emitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), + E->getExprLoc()); } - mlir::Value buildFloatToBoolConversion(mlir::Value src, mlir::Location loc) { + mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) { auto boolTy = Builder.getBoolTy(); return Builder.create(loc, boolTy, cir::CastKind::float_to_bool, src); } - mlir::Value buildIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) { + mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) { // Because of the type rules of C, we often end up computing a // logical value, then zero extending it to int, then wanting it // as a logical value again. @@ -999,21 +997,21 @@ class ScalarExprEmitter : public StmtVisitor { /// Convert the specified expression value to a boolean (!cir.bool) truth /// value. This is equivalent to "Val != 0". - mlir::Value buildConversionToBool(mlir::Value Src, QualType SrcType, - mlir::Location loc) { + mlir::Value emitConversionToBool(mlir::Value Src, QualType SrcType, + mlir::Location loc) { assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); if (SrcType->isRealFloatingType()) - return buildFloatToBoolConversion(Src, loc); + return emitFloatToBoolConversion(Src, loc); if (auto *MPT = llvm::dyn_cast(SrcType)) assert(0 && "not implemented"); if (SrcType->isIntegerType()) - return buildIntToBoolConversion(Src, loc); + return emitIntToBoolConversion(Src, loc); assert(::mlir::isa(Src.getType())); - return buildPointerToBoolConversion(Src, SrcType); + return emitPointerToBoolConversion(Src, SrcType); } /// Emit a conversion from the specified type to the specified destination @@ -1021,11 +1019,11 @@ class ScalarExprEmitter : public StmtVisitor { /// TODO: do we need ScalarConversionOpts here? Should be done in another /// pass. mlir::Value - buildScalarConversion(mlir::Value Src, QualType SrcType, QualType DstType, - SourceLocation Loc, - ScalarConversionOpts Opts = ScalarConversionOpts()) { + emitScalarConversion(mlir::Value Src, QualType SrcType, QualType DstType, + SourceLocation Loc, + ScalarConversionOpts Opts = ScalarConversionOpts()) { // All conversions involving fixed point types should be handled by the - // buildFixedPoint family functions. This is done to prevent bloating up + // emitFixedPoint family functions. This is done to prevent bloating up // this function more, and although fixed point numbers are represented by // integers, we do not want to follow any logic that assumes they should be // treated as integers. @@ -1050,7 +1048,7 @@ class ScalarExprEmitter : public StmtVisitor { // Handle conversions to bool first, they are special: comparisons against // 0. if (DstType->isBooleanType()) - return buildConversionToBool(Src, SrcType, CGF.getLoc(Loc)); + return emitConversionToBool(Src, SrcType, CGF.getLoc(Loc)); mlir::Type DstTy = ConvertType(DstType); @@ -1141,7 +1139,7 @@ class ScalarExprEmitter : public StmtVisitor { DstTy = CGF.FloatTy; } - Res = buildScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); + Res = emitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); if (DstTy != ResTy) { if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { @@ -1166,17 +1164,17 @@ class ScalarExprEmitter : public StmtVisitor { /// Emit the computation of the specified expression of scalar type, /// ignoring the result. -mlir::Value CIRGenFunction::buildScalarExpr(const Expr *E) { +mlir::Value CIRGenFunction::emitScalarExpr(const Expr *E) { assert(E && hasScalarEvaluationKind(E->getType()) && "Invalid scalar expression to emit"); return ScalarExprEmitter(*this, builder).Visit(const_cast(E)); } -mlir::Value CIRGenFunction::buildPromotedScalarExpr(const Expr *E, - QualType PromotionType) { +mlir::Value CIRGenFunction::emitPromotedScalarExpr(const Expr *E, + QualType PromotionType) { if (!PromotionType.isNull()) - return ScalarExprEmitter(*this, builder).buildPromoted(E, PromotionType); + return ScalarExprEmitter(*this, builder).emitPromoted(E, PromotionType); return ScalarExprEmitter(*this, builder).Visit(const_cast(E)); } @@ -1251,9 +1249,9 @@ static std::optional getUnwidenedIntegerType(const ASTContext &Ctx, } /// Emit pointer + index arithmetic. -static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, - const BinOpInfo &op, - bool isSubtraction) { +static mlir::Value emitPointerArithmetic(CIRGenFunction &CGF, + const BinOpInfo &op, + bool isSubtraction) { // Must have binary (not unary) expr here. Unary pointer // increment/decrement doesn't use this path. const BinaryOperator *expr = cast(op.E); @@ -1327,8 +1325,8 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, pointer = CGF.getBuilder().create( CGF.getLoc(op.E->getExprLoc()), pointer.getType(), pointer, index); } else { - pointer = CGF.buildCheckedInBoundsGEP(elemTy, pointer, index, isSigned, - isSubtraction, op.E->getExprLoc()); + pointer = CGF.emitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, + isSubtraction, op.E->getExprLoc()); } return pointer; } @@ -1345,11 +1343,11 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, return CGF.getBuilder().create( CGF.getLoc(op.E->getExprLoc()), pointer.getType(), pointer, index); - return CGF.buildCheckedInBoundsGEP(elemTy, pointer, index, isSigned, - isSubtraction, op.E->getExprLoc()); + return CGF.emitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, + isSubtraction, op.E->getExprLoc()); } -mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &Ops) { if (Ops.CompType->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: @@ -1386,21 +1384,21 @@ mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { CGF.getCIRType(Ops.FullType), cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildDiv(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), cir::BinOpKind::Div, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildRem(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &Ops) { if (mlir::isa(Ops.LHS.getType()) || mlir::isa(Ops.RHS.getType())) - return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/false); + return emitPointerArithmetic(CGF, Ops, /*isSubtraction=*/false); if (Ops.CompType->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: @@ -1440,7 +1438,7 @@ mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { cir::BinOpKind::Add, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &Ops) { // The LHS is always a pointer if either side is. if (!mlir::isa(Ops.LHS.getType())) { if (Ops.CompType->isSignedIntegerOrEnumerationType()) { @@ -1486,7 +1484,7 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { // If the RHS is not a pointer, then we have normal pointer // arithmetic. if (!mlir::isa(Ops.RHS.getType())) - return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/true); + return emitPointerArithmetic(CGF, Ops, /*isSubtraction=*/true); // Otherwise, this is a pointer subtraction @@ -1501,7 +1499,7 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &Ops) { // TODO: This misses out on the sanitizer check below. if (Ops.isFixedPointOp()) llvm_unreachable("NYI"); @@ -1533,7 +1531,7 @@ mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { Ops.RHS, CGF.getBuilder().getUnitAttr()); } -mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &Ops) { // TODO: This misses out on the sanitizer check below. if (Ops.isFixedPointOp()) llvm_unreachable("NYI"); @@ -1556,17 +1554,17 @@ mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildAnd(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), cir::BinOpKind::And, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildXor(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildOr(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), cir::BinOpKind::Or, Ops.LHS, Ops.RHS); @@ -1598,7 +1596,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_LValueBitCast: case CK_ObjCObjectLValueCast: case CK_LValueToRValueBitCast: { - LValue SourceLVal = CGF.buildLValue(E); + LValue SourceLVal = CGF.emitLValue(E); Address SourceAddr = SourceLVal.getAddress(); mlir::Type DestElemTy = CGF.convertTypeForMem(DestTy); @@ -1613,7 +1611,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { if (Kind == CK_LValueToRValueBitCast) assert(!cir::MissingFeatures::tbaa()); - return buildLoadOfLValue(DestLVal, CE->getExprLoc()); + return emitLoadOfLValue(DestLVal, CE->getExprLoc()); } case CK_CPointerToObjCPointerCast: @@ -1665,7 +1663,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // eliminate the useless instructions emitted during translating E. if (Result.HasSideEffects) Visit(E); - return CGF.CGM.buildNullConstant(DestTy, CGF.getLoc(E->getExprLoc())); + return CGF.CGM.emitNullConstant(DestTy, CGF.getLoc(E->getExprLoc())); } // Since target may map different address spaces in AST to the same address // space, an address space conversion may end up as a bitcast. @@ -1696,7 +1694,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_BaseToDerived: { const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); - Address Base = CGF.buildPointerWithAlignment(E); + Address Base = CGF.emitPointerWithAlignment(E); Address Derived = CGF.getAddressOfDerivedClass( Base, DerivedClassDecl, CE->path_begin(), CE->path_end(), CGF.shouldNullCheckClassCastValue(CE)); @@ -1714,17 +1712,17 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_DerivedToBase: { // The EmitPointerWithAlignment path does this fine; just discard // the alignment. - return CGF.buildPointerWithAlignment(CE).getPointer(); + return CGF.emitPointerWithAlignment(CE).getPointer(); } case CK_Dynamic: { - Address V = CGF.buildPointerWithAlignment(E); + Address V = CGF.emitPointerWithAlignment(E); const auto *DCE = cast(CE); - return CGF.buildDynamicCast(V, DCE); + return CGF.emitDynamicCast(V, DCE); } case CK_ArrayToPointerDecay: - return CGF.buildArrayToPointerDecay(E).getPointer(); + return CGF.emitArrayToPointerDecay(E).getPointer(); case CK_FunctionToPointerDecay: - return buildLValue(E).getPointer(); + return emitLValue(E).getPointer(); case CK_NullToPointer: { // FIXME: use MustVisitNullValue(E) and evaluate expr. @@ -1736,7 +1734,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_NullToMemberPointer: { if (MustVisitNullValue(E)) - CGF.buildIgnoredExpr(E); + CGF.emitIgnoredExpr(E); assert(!cir::MissingFeatures::cxxABI()); @@ -1810,7 +1808,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { return Builder.createPtrToInt(Visit(E), ConvertType(DestTy)); } case CK_ToVoid: { - CGF.buildIgnoredExpr(E); + CGF.emitIgnoredExpr(E); return nullptr; } case CK_MatrixCast: @@ -1836,8 +1834,8 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { if (!ICE->isPartOfExplicitCast()) Opts = ScalarConversionOpts(CGF.SanOpts); } - return buildScalarConversion(Visit(E), E->getType(), DestTy, - CE->getExprLoc(), Opts); + return emitScalarConversion(Visit(E), E->getType(), DestTy, + CE->getExprLoc(), Opts); } case CK_IntegralToFloating: @@ -1848,29 +1846,29 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { if (Kind == CK_FixedPointToFloating || Kind == CK_FloatingToFixedPoint) llvm_unreachable("Fixed point casts are NYI."); CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, CE); - return buildScalarConversion(Visit(E), E->getType(), DestTy, - CE->getExprLoc()); + return emitScalarConversion(Visit(E), E->getType(), DestTy, + CE->getExprLoc()); } case CK_BooleanToSignedIntegral: llvm_unreachable("NYI"); case CK_IntegralToBoolean: { - return buildIntToBoolConversion(Visit(E), CGF.getLoc(CE->getSourceRange())); + return emitIntToBoolConversion(Visit(E), CGF.getLoc(CE->getSourceRange())); } case CK_PointerToBoolean: - return buildPointerToBoolConversion(Visit(E), E->getType()); + return emitPointerToBoolConversion(Visit(E), E->getType()); case CK_FloatingToBoolean: - return buildFloatToBoolConversion(Visit(E), CGF.getLoc(E->getExprLoc())); + return emitFloatToBoolConversion(Visit(E), CGF.getLoc(E->getExprLoc())); case CK_MemberPointerToBoolean: llvm_unreachable("NYI"); case CK_FloatingComplexToReal: case CK_IntegralComplexToReal: case CK_FloatingComplexToBoolean: case CK_IntegralComplexToBoolean: { - mlir::Value V = CGF.buildComplexExpr(E); - return buildComplexToScalarConversion(CGF.getLoc(CE->getExprLoc()), V, Kind, - DestTy); + mlir::Value V = CGF.emitComplexExpr(E); + return emitComplexToScalarConversion(CGF.getLoc(CE->getExprLoc()), V, Kind, + DestTy); } case CK_ZeroToOCLOpaqueType: llvm_unreachable("NYI"); @@ -1888,10 +1886,10 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *E) { if (E->getCallReturnType(CGF.getContext())->isReferenceType()) - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); - auto V = CGF.buildCallExpr(E).getScalarVal(); - assert(!cir::MissingFeatures::buildLValueAlignmentAssumption()); + auto V = CGF.emitCallExpr(E).getScalarVal(); + assert(!cir::MissingFeatures::emitLValueAlignmentAssumption()); return V; } @@ -1902,29 +1900,28 @@ mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { Expr::EvalResult Result; if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { llvm::APSInt Value = Result.Val.getInt(); - CGF.buildIgnoredExpr(E->getBase()); + CGF.emitIgnoredExpr(E->getBase()); return Builder.getConstInt(CGF.getLoc(E->getExprLoc()), Value); } - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } /// Emit a conversion from the specified type to the specified destination /// type, both of which are CIR scalar types. -mlir::Value CIRGenFunction::buildScalarConversion(mlir::Value Src, - QualType SrcTy, - QualType DstTy, - SourceLocation Loc) { +mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value Src, + QualType SrcTy, QualType DstTy, + SourceLocation Loc) { assert(CIRGenFunction::hasScalarEvaluationKind(SrcTy) && CIRGenFunction::hasScalarEvaluationKind(DstTy) && "Invalid scalar expression to emit"); return ScalarExprEmitter(*this, builder) - .buildScalarConversion(Src, SrcTy, DstTy, Loc); + .emitScalarConversion(Src, SrcTy, DstTy, Loc); } -mlir::Value CIRGenFunction::buildComplexToScalarConversion(mlir::Value Src, - QualType SrcTy, - QualType DstTy, - SourceLocation Loc) { +mlir::Value CIRGenFunction::emitComplexToScalarConversion(mlir::Value Src, + QualType SrcTy, + QualType DstTy, + SourceLocation Loc) { assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && "Invalid complex -> scalar conversion"); @@ -1941,7 +1938,7 @@ mlir::Value CIRGenFunction::buildComplexToScalarConversion(mlir::Value Src, : cir::CastKind::int_complex_to_real; auto Real = builder.createCast(getLoc(Loc), Kind, Src, ConvertType(ComplexElemTy)); - return buildScalarConversion(Real, ComplexElemTy, DstTy, Loc); + return emitScalarConversion(Real, ComplexElemTy, DstTy, Loc); } /// If the specified expression does not fold @@ -1992,7 +1989,7 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { if (NumInitElements == 0) { // C++11 value-initialization for the scalar. - return buildNullValue(E->getType(), CGF.getLoc(E->getExprLoc())); + return emitNullValue(E->getType(), CGF.getLoc(E->getExprLoc())); } return Visit(E->getInit(0)); @@ -2031,7 +2028,7 @@ mlir::Value ScalarExprEmitter::VisitReal(const UnaryOperator *E) { // Note that we have to ask E because Op might be an l-value that // this won't work for, e.g. an Obj-C property. if (E->isGLValue()) - return CGF.buildLoadOfLValue(CGF.buildLValue(E), E->getExprLoc()) + return CGF.emitLoadOfLValue(CGF.emitLValue(E), E->getExprLoc()) .getScalarVal(); // Otherwise, calculate and project. llvm_unreachable("NYI"); @@ -2049,7 +2046,7 @@ mlir::Value ScalarExprEmitter::VisitImag(const UnaryOperator *E) { // Note that we have to ask E because Op might be an l-value that // this won't work for, e.g. an Obj-C property. if (E->isGLValue()) - return CGF.buildLoadOfLValue(CGF.buildLValue(E), E->getExprLoc()) + return CGF.emitLoadOfLValue(CGF.emitLValue(E), E->getExprLoc()) .getScalarVal(); // Otherwise, calculate and project. llvm_unreachable("NYI"); @@ -2062,9 +2059,11 @@ mlir::Value ScalarExprEmitter::VisitImag(const UnaryOperator *E) { // floating-point. Conversions involving other types are handled elsewhere. // Conversion to bool is handled elsewhere because that's a comparison against // zero, not a simple cast. This handles both individual scalars and vectors. -mlir::Value ScalarExprEmitter::buildScalarCast( - mlir::Value Src, QualType SrcType, QualType DstType, mlir::Type SrcTy, - mlir::Type DstTy, ScalarConversionOpts Opts) { +mlir::Value ScalarExprEmitter::emitScalarCast(mlir::Value Src, QualType SrcType, + QualType DstType, + mlir::Type SrcTy, + mlir::Type DstTy, + ScalarConversionOpts Opts) { assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && "Internal error: matrix types not handled by this function."); if (mlir::isa(SrcTy) || @@ -2079,7 +2078,7 @@ mlir::Value ScalarExprEmitter::buildScalarCast( } assert(!mlir::isa(SrcTy) && !mlir::isa(DstTy) && - "buildScalarCast given a vector type and a non-vector type"); + "emitScalarCast given a vector type and a non-vector type"); std::optional CastKind; @@ -2126,14 +2125,14 @@ mlir::Value ScalarExprEmitter::buildScalarCast( } LValue -CIRGenFunction::buildCompoundAssignmentLValue(const CompoundAssignOperator *E) { +CIRGenFunction::emitCompoundAssignmentLValue(const CompoundAssignOperator *E) { ScalarExprEmitter Scalar(*this, builder); mlir::Value Result; switch (E->getOpcode()) { #define COMPOUND_OP(Op) \ case BO_##Op##Assign: \ - return Scalar.buildCompoundAssignLValue(E, &ScalarExprEmitter::build##Op, \ - Result) + return Scalar.emitCompoundAssignLValue(E, &ScalarExprEmitter::emit##Op, \ + Result) COMPOUND_OP(Mul); COMPOUND_OP(Div); COMPOUND_OP(Rem); @@ -2174,7 +2173,7 @@ CIRGenFunction::buildCompoundAssignmentLValue(const CompoundAssignOperator *E) { llvm_unreachable("Unhandled compound assignment operator"); } -LValue ScalarExprEmitter::buildCompoundAssignLValue( +LValue ScalarExprEmitter::emitCompoundAssignLValue( const CompoundAssignOperator *E, mlir::Value (ScalarExprEmitter::*Func)(const BinOpInfo &), mlir::Value &Result) { @@ -2195,7 +2194,7 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType()); if (!PromotionTypeRHS.isNull()) - OpInfo.RHS = CGF.buildPromotedScalarExpr(E->getRHS(), PromotionTypeRHS); + OpInfo.RHS = CGF.emitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS); else OpInfo.RHS = Visit(E->getRHS()); @@ -2210,40 +2209,40 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( OpInfo.Loc = E->getSourceRange(); // Load/convert the LHS - LValue LHSLV = CGF.buildLValue(E->getLHS()); + LValue LHSLV = CGF.emitLValue(E->getLHS()); if (const AtomicType *atomicTy = LHSTy->getAs()) { assert(0 && "not implemented"); } - OpInfo.LHS = buildLoadOfLValue(LHSLV, E->getExprLoc()); + OpInfo.LHS = emitLoadOfLValue(LHSLV, E->getExprLoc()); CIRGenFunction::SourceLocRAIIObject sourceloc{ CGF, CGF.getLoc(E->getSourceRange())}; SourceLocation Loc = E->getExprLoc(); if (!PromotionTypeLHS.isNull()) - OpInfo.LHS = buildScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS, - E->getExprLoc()); + OpInfo.LHS = emitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS, + E->getExprLoc()); else - OpInfo.LHS = buildScalarConversion(OpInfo.LHS, LHSTy, - E->getComputationLHSType(), Loc); + OpInfo.LHS = emitScalarConversion(OpInfo.LHS, LHSTy, + E->getComputationLHSType(), Loc); // Expand the binary operator. Result = (this->*Func)(OpInfo); // Convert the result back to the LHS type, // potentially with Implicit Conversion sanitizer check. - Result = buildScalarConversion(Result, PromotionTypeCR, LHSTy, Loc, - ScalarConversionOpts(CGF.SanOpts)); + Result = emitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc, + ScalarConversionOpts(CGF.SanOpts)); // Store the result value into the LHS lvalue. Bit-fields are handled // specially because the result is altered by the store, i.e., [C99 6.5.16p1] // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHSLV.isBitField()) - CGF.buildStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, Result); + CGF.emitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, Result); else - CGF.buildStoreThroughLValue(RValue::get(Result), LHSLV); + CGF.emitStoreThroughLValue(RValue::get(Result), LHSLV); if (CGF.getLangOpts().OpenMP) CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, @@ -2251,8 +2250,10 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( return LHSLV; } -mlir::Value ScalarExprEmitter::buildComplexToScalarConversion( - mlir::Location Loc, mlir::Value V, CastKind Kind, QualType DestTy) { +mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location Loc, + mlir::Value V, + CastKind Kind, + QualType DestTy) { cir::CastKind CastOpKind; switch (Kind) { case CK_FloatingComplexToReal: @@ -2274,18 +2275,18 @@ mlir::Value ScalarExprEmitter::buildComplexToScalarConversion( return Builder.createCast(Loc, CastOpKind, V, CGF.ConvertType(DestTy)); } -mlir::Value ScalarExprEmitter::buildNullValue(QualType Ty, mlir::Location loc) { - return CGF.buildFromMemory(CGF.CGM.buildNullConstant(Ty, loc), Ty); +mlir::Value ScalarExprEmitter::emitNullValue(QualType Ty, mlir::Location loc) { + return CGF.emitFromMemory(CGF.CGM.emitNullConstant(Ty, loc), Ty); } -mlir::Value ScalarExprEmitter::buildPromoted(const Expr *E, - QualType PromotionType) { +mlir::Value ScalarExprEmitter::emitPromoted(const Expr *E, + QualType PromotionType) { E = E->IgnoreParens(); if (const auto *BO = dyn_cast(E)) { switch (BO->getOpcode()) { #define HANDLE_BINOP(OP) \ case BO_##OP: \ - return build##OP(buildBinOps(BO, PromotionType)); + return emit##OP(emitBinOps(BO, PromotionType)); HANDLE_BINOP(Add) HANDLE_BINOP(Sub) HANDLE_BINOP(Mul) @@ -2310,19 +2311,19 @@ mlir::Value ScalarExprEmitter::buildPromoted(const Expr *E, auto result = Visit(const_cast(E)); if (result) { if (!PromotionType.isNull()) - return buildPromotedValue(result, PromotionType); - return buildUnPromotedValue(result, E->getType()); + return emitPromotedValue(result, PromotionType); + return emitUnPromotedValue(result, E->getType()); } return result; } -mlir::Value ScalarExprEmitter::buildCompoundAssign( +mlir::Value ScalarExprEmitter::emitCompoundAssign( const CompoundAssignOperator *E, mlir::Value (ScalarExprEmitter::*Func)(const BinOpInfo &)) { bool Ignore = TestAndClearIgnoreResultAssign(); mlir::Value RHS; - LValue LHS = buildCompoundAssignLValue(E, Func, RHS); + LValue LHS = emitCompoundAssignLValue(E, Func, RHS); // If the result is clearly ignored, return now. if (Ignore) @@ -2337,7 +2338,7 @@ mlir::Value ScalarExprEmitter::buildCompoundAssign( return RHS; // Otherwise, reload the value. - return buildLoadOfLValue(LHS, E->getExprLoc()); + return emitLoadOfLValue(LHS, E->getExprLoc()); } mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { @@ -2381,19 +2382,19 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // __block variables need to have the rhs evaluated first, plus this should // improve codegen just a little. RHS = Visit(E->getRHS()); - LHS = buildCheckedLValue(E->getLHS(), CIRGenFunction::TCK_Store); + LHS = emitCheckedLValue(E->getLHS(), CIRGenFunction::TCK_Store); // Store the value into the LHS. Bit-fields are handled specially because // the result is altered by the store, i.e., [C99 6.5.16p1] // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHS.isBitField()) { - CGF.buildStoreThroughBitfieldLValue(RValue::get(RHS), LHS, RHS); + CGF.emitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, RHS); } else { - CGF.buildNullabilityCheck(LHS, RHS, E->getExprLoc()); + CGF.emitNullabilityCheck(LHS, RHS, E->getExprLoc()); CIRGenFunction::SourceLocRAIIObject loc{CGF, CGF.getLoc(E->getSourceRange())}; - CGF.buildStoreThroughLValue(RValue::get(RHS), LHS); + CGF.emitStoreThroughLValue(RValue::get(RHS), LHS); } } @@ -2410,7 +2411,7 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { return RHS; // Otherwise, reload the value. - return buildLoadOfLValue(LHS, E->getExprLoc()); + return emitLoadOfLValue(LHS, E->getExprLoc()); } /// Return true if the specified expression is cheap enough and side-effect-free @@ -2517,7 +2518,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( .getResult(); } - mlir::Value condV = CGF.buildOpOnBoolExpr(loc, condExpr); + mlir::Value condV = CGF.emitOpOnBoolExpr(loc, condExpr); CIRGenFunction::ConditionalEvaluation eval(CGF); SmallVector insertPoints{}; mlir::Type yieldTy{}; @@ -2591,11 +2592,11 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( .getResult(); } -mlir::Value CIRGenFunction::buildScalarPrePostIncDec(const UnaryOperator *E, - LValue LV, bool isInc, - bool isPre) { +mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *E, + LValue LV, bool isInc, + bool isPre) { return ScalarExprEmitter(*this, builder) - .buildScalarPrePostIncDec(E, LV, isInc, isPre); + .emitScalarPrePostIncDec(E, LV, isInc, isPre); } mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { @@ -2772,7 +2773,7 @@ mlir::Value ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { assert(!cir::MissingFeatures::variablyModifiedTypeEmission() && "NYI"); Address ArgValue = Address::invalid(); - mlir::Value Val = CGF.buildVAArg(VE, ArgValue); + mlir::Value Val = CGF.emitVAArg(VE, ArgValue); return Val; } @@ -2788,11 +2789,11 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( if (E->isArgumentType()) { // sizeof(type) - make sure to emit the VLA size. - CGF.buildVariablyModifiedType(TypeToSize); + CGF.emitVariablyModifiedType(TypeToSize); } else { // C99 6.5.3.4p2: If the argument is an expression of type // VLA, it is evaluated. - CGF.buildIgnoredExpr(E->getArgumentExpr()); + CGF.emitIgnoredExpr(E->getArgumentExpr()); } auto VlaSize = CGF.getVLASize(VAT); @@ -2815,7 +2816,7 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( E->EvaluateKnownConstInt(CGF.getContext())); } -mlir::Value CIRGenFunction::buildCheckedInBoundsGEP( +mlir::Value CIRGenFunction::emitCheckedInBoundsGEP( mlir::Type ElemTy, mlir::Value Ptr, ArrayRef IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc) { mlir::Type PtrTy = Ptr.getType(); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 3523ca861e47..b31a4ba325ae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -219,12 +219,11 @@ bool CIRGenFunction::sanitizePerformTypeCheck() const { SanOpts.has(SanitizerKind::Vptr); } -void CIRGenFunction::buildTypeCheck(TypeCheckKind TCK, - clang::SourceLocation Loc, mlir::Value V, - clang::QualType Type, - clang::CharUnits Alignment, - clang::SanitizerSet SkippedChecks, - std::optional ArraySize) { +void CIRGenFunction::emitTypeCheck(TypeCheckKind TCK, clang::SourceLocation Loc, + mlir::Value V, clang::QualType Type, + clang::CharUnits Alignment, + clang::SanitizerSet SkippedChecks, + std::optional ArraySize) { if (!sanitizePerformTypeCheck()) return; @@ -271,8 +270,8 @@ static bool endsWithReturn(const Decl *F) { return false; } -void CIRGenFunction::buildAndUpdateRetAlloca(QualType ty, mlir::Location loc, - CharUnits alignment) { +void CIRGenFunction::emitAndUpdateRetAlloca(QualType ty, mlir::Location loc, + CharUnits alignment) { if (ty->isVoidType()) { // Void type; nothing to return. @@ -290,7 +289,7 @@ void CIRGenFunction::buildAndUpdateRetAlloca(QualType ty, mlir::Location loc, cir::ABIArgInfo::InAlloca) { llvm_unreachable("NYI"); } else { - auto addr = buildAlloca("__retval", ty, loc, alignment); + auto addr = emitAlloca("__retval", ty, loc, alignment); FnRetAlloca = addr; ReturnValue = Address(addr, alignment); @@ -310,7 +309,7 @@ mlir::LogicalResult CIRGenFunction::declare(const Decl *var, QualType ty, assert(namedVar && "Needs a named decl"); assert(!symbolTable.count(var) && "not supposed to be available just yet"); - addr = buildAlloca(namedVar->getName(), ty, loc, alignment); + addr = emitAlloca(namedVar->getName(), ty, loc, alignment); auto allocaOp = cast(addr.getDefiningOp()); if (isParam) allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext())); @@ -364,7 +363,7 @@ void CIRGenFunction::LexicalScope::cleanup() { builder.setInsertionPointToEnd(retBlock); mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; curLoc++; - (void)buildReturn(retLoc); + (void)emitReturn(retLoc); } auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { @@ -385,7 +384,7 @@ void CIRGenFunction::LexicalScope::cleanup() { } if (localScope->Depth == 0) { - buildImplicitReturn(); + emitImplicitReturn(); return; } @@ -442,15 +441,15 @@ void CIRGenFunction::LexicalScope::cleanup() { insertCleanupAndLeave(currBlock); } -cir::ReturnOp CIRGenFunction::LexicalScope::buildReturn(mlir::Location loc) { +cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) { auto &builder = CGF.getBuilder(); // If we are on a coroutine, add the coro_end builtin call. auto Fn = dyn_cast(CGF.CurFn); assert(Fn && "other callables NYI"); if (Fn.getCoroutine()) - CGF.buildCoroEndBuiltinCall( - loc, builder.getNullPtr(builder.getVoidPtrTy(), loc)); + CGF.emitCoroEndBuiltinCall(loc, + builder.getNullPtr(builder.getVoidPtrTy(), loc)); if (CGF.FnRetCIRTy.has_value()) { // If there's anything to return, load it first. @@ -460,7 +459,7 @@ cir::ReturnOp CIRGenFunction::LexicalScope::buildReturn(mlir::Location loc) { return builder.create(loc); } -void CIRGenFunction::LexicalScope::buildImplicitReturn() { +void CIRGenFunction::LexicalScope::emitImplicitReturn() { auto &builder = CGF.getBuilder(); auto *localScope = CGF.currLexScope; @@ -497,7 +496,7 @@ void CIRGenFunction::LexicalScope::buildImplicitReturn() { } } - (void)buildReturn(localScope->EndLoc); + (void)emitReturn(localScope->EndLoc); } cir::TryOp CIRGenFunction::LexicalScope::getClosestTryParent() { @@ -711,9 +710,9 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, // TODO: PGO.assignRegionCounters assert(!cir::MissingFeatures::shouldInstrumentFunction()); if (isa(FD)) - buildDestructorBody(Args); + emitDestructorBody(Args); else if (isa(FD)) - buildConstructorBody(Args); + emitConstructorBody(Args); else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice && FD->hasAttr()) llvm_unreachable("NYI"); @@ -722,15 +721,15 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, // The lambda static invoker function is special, because it forwards or // clones the body of the function call operator (but is actually // static). - buildLambdaStaticInvokeBody(cast(FD)); + emitLambdaStaticInvokeBody(cast(FD)); } else if (FD->isDefaulted() && isa(FD) && (cast(FD)->isCopyAssignmentOperator() || cast(FD)->isMoveAssignmentOperator())) { // Implicit copy-assignment gets the same special treatment as implicit // copy-constructors. - buildImplicitAssignmentOperatorBody(Args); + emitImplicitAssignmentOperatorBody(Args); } else if (Body) { - if (mlir::failed(buildFunctionBody(Body))) { + if (mlir::failed(emitFunctionBody(Body))) { Fn.erase(); return nullptr; } @@ -759,7 +758,7 @@ mlir::Value CIRGenFunction::createLoad(const VarDecl *VD, const char *Name) { addr.getElementType(), addr.getPointer()); } -void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { +void CIRGenFunction::emitConstructorBody(FunctionArgList &Args) { assert(!cir::MissingFeatures::emitAsanPrologueOrEpilogue()); const auto *Ctor = cast(CurGD.getDecl()); auto CtorType = CurGD.getCtorType(); @@ -772,7 +771,7 @@ void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { // optimization. if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && CGM.getTarget().getCXXABI().hasConstructorVariants()) { - buildDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getEndLoc()); + emitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getEndLoc()); return; } @@ -794,7 +793,7 @@ void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { // complete ctor and then delegate to the base ctor. // Emit the constructor prologue, i.e. the base and member initializers. - buildCtorPrologue(Ctor, CtorType, Args); + emitCtorPrologue(Ctor, CtorType, Args); // Emit the body of the statement. if (IsTryBody) @@ -802,7 +801,7 @@ void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { else { // TODO: propagate this result via mlir::logical result. Just unreachable // now just to have it handled. - if (mlir::failed(buildStmt(Body, true))) + if (mlir::failed(emitStmt(Body, true))) llvm_unreachable("NYI"); } @@ -886,7 +885,7 @@ static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { return true; } -/// TODO: this should live in `buildFunctionProlog` +/// TODO: this should live in `emitFunctionProlog` /// An argument came in as a promoted argument; demote it back to its /// declared type. static mlir::Value emitArgumentDemotion(CIRGenFunction &CGF, const VarDecl *var, @@ -1119,7 +1118,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, ((getLangOpts().HIP || getLangOpts().OffloadViaLLVM) && getLangOpts().CUDAIsDevice))) { // Add metadata for a kernel function. - buildKernelMetadata(FD, Fn); + emitKernelMetadata(FD, Fn); } if (FD && FD->hasAttr()) { @@ -1134,7 +1133,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // If we're checking nullability, we need to know whether we can check the // return value. Initialize the falg to 'true' and refine it in - // buildParmDecl. + // emitParmDecl. if (SanOpts.has(SanitizerKind::NullabilityReturn)) { llvm_unreachable("NYI"); } @@ -1220,7 +1219,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm_unreachable("NYI"); } - // TODO: buildFunctionProlog + // TODO: emitFunctionProlog { // Set the insertion point in the builder to the beginning of the @@ -1228,7 +1227,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // operations in this function. builder.setInsertionPointToStart(EntryBB); - // TODO: this should live in `buildFunctionProlog + // TODO: this should live in `emitFunctionProlog // Declare all the function arguments in the symbol table. for (const auto nameValue : llvm::zip(Args, EntryBB->getArguments())) { auto *paramVar = std::get<0>(nameValue); @@ -1245,7 +1244,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, auto address = Address(addr, alignment); setAddrOfLocalVar(paramVar, address); - // TODO: this should live in `buildFunctionProlog` + // TODO: this should live in `emitFunctionProlog` bool isPromoted = isa(paramVar) && cast(paramVar)->isKNRPromoted(); assert(!cir::MissingFeatures::constructABIArgDirectExtend()); @@ -1264,12 +1263,12 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // When the current function is not void, create an address to store the // result value. if (FnRetCIRTy.has_value()) - buildAndUpdateRetAlloca(FnRetQualTy, FnEndLoc, - CGM.getNaturalTypeAlignment(FnRetQualTy)); + emitAndUpdateRetAlloca(FnRetQualTy, FnEndLoc, + CGM.getNaturalTypeAlignment(FnRetQualTy)); } if (D && isa(D) && cast(D)->isInstance()) { - CGM.getCXXABI().buildInstanceFunctionProlog(Loc, *this); + CGM.getCXXABI().emitInstanceFunctionProlog(Loc, *this); const auto *MD = cast(D); if (MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call) { @@ -1311,7 +1310,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, MD->getParent()->getLambdaCaptureDefault() == LCD_None) SkippedChecks.set(SanitizerKind::Null, true); - assert(!cir::MissingFeatures::buildTypeCheck() && "NYI"); + assert(!cir::MissingFeatures::emitTypeCheck() && "NYI"); } } @@ -1330,7 +1329,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, ty = vd->getType(); if (ty->isVariablyModifiedType()) - buildVariablyModifiedType(ty); + emitVariablyModifiedType(ty); } } // Emit a location at the end of the prologue. @@ -1358,7 +1357,7 @@ bool CIRGenFunction::ShouldInstrumentFunction() { llvm_unreachable("NYI"); } -mlir::LogicalResult CIRGenFunction::buildFunctionBody(const clang::Stmt *Body) { +mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *Body) { // TODO: incrementProfileCounter(Body); // We start with function level scope for variables. @@ -1366,9 +1365,9 @@ mlir::LogicalResult CIRGenFunction::buildFunctionBody(const clang::Stmt *Body) { auto result = mlir::LogicalResult::success(); if (const CompoundStmt *S = dyn_cast(Body)) - buildCompoundStmtWithoutScope(*S); + emitCompoundStmtWithoutScope(*S); else - result = buildStmt(Body, /*useCurrentScope*/ true); + result = emitStmt(Body, /*useCurrentScope*/ true); // This is checked after emitting the function body so we know if there are // any permitted infinite loops. @@ -1435,8 +1434,8 @@ std::string CIRGenFunction::getCounterRefTmpAsString() { return getVersionedTmpName("ref.tmp", CounterRefTmp++); } -void CIRGenFunction::buildNullInitialization(mlir::Location loc, - Address DestPtr, QualType Ty) { +void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address DestPtr, + QualType Ty) { // Ignore empty classes in C++. if (getLangOpts().CPlusPlus) { if (const RecordType *RT = Ty->getAs()) { @@ -1558,15 +1557,15 @@ bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *CE) { return true; } -void CIRGenFunction::buildDeclRefExprDbgValue(const DeclRefExpr *E, - const APValue &Init) { +void CIRGenFunction::emitDeclRefExprDbgValue(const DeclRefExpr *E, + const APValue &Init) { assert(!cir::MissingFeatures::generateDebugInfo()); } -Address CIRGenFunction::buildVAListRef(const Expr *E) { +Address CIRGenFunction::emitVAListRef(const Expr *E) { if (getContext().getBuiltinVaListType()->isArrayType()) - return buildPointerWithAlignment(E); - return buildLValue(E).getAddress(); + return emitPointerWithAlignment(E); + return emitLValue(E).getAddress(); } // Emits an error if we don't have a valid set of target features for the @@ -1683,7 +1682,7 @@ CIRGenFunction::getVLASize(const VariableArrayType *type) { // TODO(cir): most part of this function can be shared between CIRGen // and traditional LLVM codegen -void CIRGenFunction::buildVariablyModifiedType(QualType type) { +void CIRGenFunction::emitVariablyModifiedType(QualType type) { assert(type->isVariablyModifiedType() && "Must pass variably modified type to EmitVLASizes!"); @@ -1771,7 +1770,7 @@ void CIRGenFunction::buildVariablyModifiedType(QualType type) { // e.g. with a typedef and a pointer to it. mlir::Value &entry = VLASizeMap[sizeExpr]; if (!entry) { - mlir::Value size = buildScalarExpr(sizeExpr); + mlir::Value size = emitScalarExpr(sizeExpr); assert(!cir::MissingFeatures::sanitizeVLABound()); // Always zexting here would be wrong if it weren't @@ -1809,7 +1808,7 @@ void CIRGenFunction::buildVariablyModifiedType(QualType type) { case Type::TypeOfExpr: // Stop walking: emit typeof expression. - buildIgnoredExpr(cast(ty)->getUnderlyingExpr()); + emitIgnoredExpr(cast(ty)->getUnderlyingExpr()); return; case Type::Atomic: @@ -1826,8 +1825,8 @@ void CIRGenFunction::buildVariablyModifiedType(QualType type) { /// Computes the length of an array in elements, as well as the base /// element type and a properly-typed first element pointer. mlir::Value -CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, - QualType &baseType, Address &addr) { +CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType, + QualType &baseType, Address &addr) { const auto *arrayType = origArrayType; // If it's a VLA, we have to load the stored size. Note that @@ -1873,7 +1872,7 @@ CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, return numElements; } -mlir::Value CIRGenFunction::buildAlignmentAssumption( +mlir::Value CIRGenFunction::emitAlignmentAssumption( mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, mlir::IntegerAttr alignment, mlir::Value offsetValue) { @@ -1883,20 +1882,20 @@ mlir::Value CIRGenFunction::buildAlignmentAssumption( alignment, offsetValue); } -mlir::Value CIRGenFunction::buildAlignmentAssumption( +mlir::Value CIRGenFunction::emitAlignmentAssumption( mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc, mlir::IntegerAttr alignment, mlir::Value offsetValue) { QualType ty = expr->getType(); SourceLocation loc = expr->getExprLoc(); - return buildAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment, - offsetValue); + return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment, + offsetValue); } -void CIRGenFunction::buildVarAnnotations(const VarDecl *decl, mlir::Value val) { +void CIRGenFunction::emitVarAnnotations(const VarDecl *decl, mlir::Value val) { assert(decl->hasAttr() && "no annotate attribute"); llvm::SmallVector annotations; for (const auto *annot : decl->specific_attrs()) { - annotations.push_back(CGM.buildAnnotateAttr(annot)); + annotations.push_back(CGM.emitAnnotateAttr(annot)); } auto allocaOp = dyn_cast_or_null(val.getDefiningOp()); assert(allocaOp && "expects available alloca"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 0185b5370642..8d4fabeff642 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -105,7 +105,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Add OpenCL kernel arg metadata and the kernel attribute metadata to /// the function metadata. - void buildKernelMetadata(const FunctionDecl *FD, cir::FuncOp Fn); + void emitKernelMetadata(const FunctionDecl *FD, cir::FuncOp Fn); public: /// A non-RAII class containing all the information about a bound @@ -137,8 +137,8 @@ class CIRGenFunction : public CIRGenTypeCache { static OpaqueValueMappingData bind(CIRGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e) { if (shouldBindAsLValue(ov)) - return bind(CGF, ov, CGF.buildLValue(e)); - return bind(CGF, ov, CGF.buildAnyExpr(e)); + return bind(CGF, ov, CGF.emitLValue(e)); + return bind(CGF, ov, CGF.emitAnyExpr(e)); } static OpaqueValueMappingData @@ -252,22 +252,22 @@ class CIRGenFunction : public CIRGenTypeCache { public: // FIXME(cir): move this to CIRGenBuider.h - mlir::Value buildAlloca(llvm::StringRef name, clang::QualType ty, - mlir::Location loc, clang::CharUnits alignment, - bool insertIntoFnEntryBlock = false, - mlir::Value arraySize = nullptr); - mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, - mlir::Location loc, clang::CharUnits alignment, - bool insertIntoFnEntryBlock = false, - mlir::Value arraySize = nullptr); - mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, - mlir::Location loc, clang::CharUnits alignment, - mlir::OpBuilder::InsertPoint ip, - mlir::Value arraySize = nullptr); + mlir::Value emitAlloca(llvm::StringRef name, clang::QualType ty, + mlir::Location loc, clang::CharUnits alignment, + bool insertIntoFnEntryBlock = false, + mlir::Value arraySize = nullptr); + mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, + mlir::Location loc, clang::CharUnits alignment, + bool insertIntoFnEntryBlock = false, + mlir::Value arraySize = nullptr); + mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, + mlir::Location loc, clang::CharUnits alignment, + mlir::OpBuilder::InsertPoint ip, + mlir::Value arraySize = nullptr); private: - void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, - clang::CharUnits alignment); + void emitAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, + clang::CharUnits alignment); // Track current variable initialization (if there's one) const clang::VarDecl *currVarDecl = nullptr; @@ -438,12 +438,12 @@ class CIRGenFunction : public CIRGenTypeCache { LambdaCaptureFields; clang::FieldDecl *LambdaThisCaptureField = nullptr; - void buildForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator, - CallArgList &CallArgs); - void buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD); - void buildLambdaStaticInvokeBody(const CXXMethodDecl *MD); + void emitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator, + CallArgList &CallArgs); + void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD); + void emitLambdaStaticInvokeBody(const CXXMethodDecl *MD); - LValue buildPredefinedLValue(const PredefinedExpr *E); + LValue emitPredefinedLValue(const PredefinedExpr *E); /// When generating code for a C++ member function, this will /// hold the implicit 'this' declaration. @@ -596,7 +596,7 @@ class CIRGenFunction : public CIRGenTypeCache { CIRGenDebugInfo *getDebugInfo() { return debugInfo; } - void buildReturnOfRValue(mlir::Location loc, RValue RV, QualType Ty); + void emitReturnOfRValue(mlir::Location loc, RValue RV, QualType Ty); /// Set the address of a local variable. void setAddrOfLocalVar(const clang::VarDecl *VD, Address Addr) { @@ -618,131 +618,130 @@ class CIRGenFunction : public CIRGenTypeCache { } /// Whether any type-checking sanitizers are enabled. If \c false, calls to - /// buildTypeCheck can be skipped. + /// emitTypeCheck can be skipped. bool sanitizePerformTypeCheck() const; - void buildTypeCheck(TypeCheckKind TCK, clang::SourceLocation Loc, - mlir::Value V, clang::QualType Type, - clang::CharUnits Alignment = clang::CharUnits::Zero(), - clang::SanitizerSet SkippedChecks = clang::SanitizerSet(), - std::optional ArraySize = std::nullopt); + void emitTypeCheck(TypeCheckKind TCK, clang::SourceLocation Loc, + mlir::Value V, clang::QualType Type, + clang::CharUnits Alignment = clang::CharUnits::Zero(), + clang::SanitizerSet SkippedChecks = clang::SanitizerSet(), + std::optional ArraySize = std::nullopt); - void buildAggExpr(const clang::Expr *E, AggValueSlot Slot); + void emitAggExpr(const clang::Expr *E, AggValueSlot Slot); /// Emit the computation of the specified expression of complex type, /// returning the result. - mlir::Value buildComplexExpr(const Expr *E); + mlir::Value emitComplexExpr(const Expr *E); - void buildComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit); + void emitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit); - void buildStoreOfComplex(mlir::Location Loc, mlir::Value V, LValue dest, - bool isInit); + void emitStoreOfComplex(mlir::Location Loc, mlir::Value V, LValue dest, + bool isInit); - Address buildAddrOfRealComponent(mlir::Location loc, Address complex, - QualType complexType); - Address buildAddrOfImagComponent(mlir::Location loc, Address complex, - QualType complexType); + Address emitAddrOfRealComponent(mlir::Location loc, Address complex, + QualType complexType); + Address emitAddrOfImagComponent(mlir::Location loc, Address complex, + QualType complexType); - LValue buildComplexAssignmentLValue(const BinaryOperator *E); - LValue buildComplexCompoundAssignmentLValue(const CompoundAssignOperator *E); + LValue emitComplexAssignmentLValue(const BinaryOperator *E); + LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E); /// Emits a reference binding to the passed in expression. - RValue buildReferenceBindingToExpr(const Expr *E); + RValue emitReferenceBindingToExpr(const Expr *E); - LValue buildCastLValue(const CastExpr *E); + LValue emitCastLValue(const CastExpr *E); - void buildCXXConstructExpr(const clang::CXXConstructExpr *E, - AggValueSlot Dest); + void emitCXXConstructExpr(const clang::CXXConstructExpr *E, + AggValueSlot Dest); /// Emit a call to an inheriting constructor (that is, one that invokes a /// constructor inherited from a base class) by inlining its definition. This /// is necessary if the ABI does not support forwarding the arguments to the /// base class constructor (because they're variadic or similar). - void buildInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor, - CXXCtorType CtorType, - bool ForVirtualBase, - bool Delegating, - CallArgList &Args); + void emitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor, + CXXCtorType CtorType, + bool ForVirtualBase, + bool Delegating, + CallArgList &Args); /// Emit a call to a constructor inherited from a base class, passing the /// current constructor's arguments along unmodified (without even making /// a copy). - void buildInheritedCXXConstructorCall(const CXXConstructorDecl *D, - bool ForVirtualBase, Address This, - bool InheritedFromVBase, - const CXXInheritedCtorInitExpr *E); - - void buildCXXConstructorCall(const clang::CXXConstructorDecl *D, - clang::CXXCtorType Type, bool ForVirtualBase, - bool Delegating, AggValueSlot ThisAVS, - const clang::CXXConstructExpr *E); - - void buildCXXConstructorCall(const clang::CXXConstructorDecl *D, - clang::CXXCtorType Type, bool ForVirtualBase, - bool Delegating, Address This, CallArgList &Args, - AggValueSlot::Overlap_t Overlap, - clang::SourceLocation Loc, - bool NewPointerIsChecked); - - RValue buildCXXMemberOrOperatorCall( + void emitInheritedCXXConstructorCall(const CXXConstructorDecl *D, + bool ForVirtualBase, Address This, + bool InheritedFromVBase, + const CXXInheritedCtorInitExpr *E); + + void emitCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, + bool Delegating, AggValueSlot ThisAVS, + const clang::CXXConstructExpr *E); + + void emitCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, + bool Delegating, Address This, CallArgList &Args, + AggValueSlot::Overlap_t Overlap, + clang::SourceLocation Loc, + bool NewPointerIsChecked); + + RValue emitCXXMemberOrOperatorCall( const clang::CXXMethodDecl *Method, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, mlir::Value This, mlir::Value ImplicitParam, clang::QualType ImplicitParamTy, const clang::CallExpr *E, CallArgList *RtlArgs); - RValue buildCXXMemberCallExpr(const clang::CXXMemberCallExpr *E, - ReturnValueSlot ReturnValue); - RValue buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, - ReturnValueSlot ReturnValue); - RValue buildCXXMemberOrOperatorMemberCallExpr( + RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *E, + ReturnValueSlot ReturnValue); + RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, + ReturnValueSlot ReturnValue); + RValue emitCXXMemberOrOperatorMemberCallExpr( const clang::CallExpr *CE, const clang::CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, clang::NestedNameSpecifier *Qualifier, bool IsArrow, const clang::Expr *Base); - RValue buildCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, - const CXXMethodDecl *MD, - ReturnValueSlot ReturnValue); - void buildNullInitialization(mlir::Location loc, Address DestPtr, - QualType Ty); + RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, + const CXXMethodDecl *MD, + ReturnValueSlot ReturnValue); + void emitNullInitialization(mlir::Location loc, Address DestPtr, QualType Ty); bool shouldNullCheckClassCastValue(const CastExpr *CE); - void buildCXXTemporary(const CXXTemporary *Temporary, QualType TempType, - Address Ptr); - mlir::Value buildCXXNewExpr(const CXXNewExpr *E); - void buildCXXDeleteExpr(const CXXDeleteExpr *E); + void emitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, + Address Ptr); + mlir::Value emitCXXNewExpr(const CXXNewExpr *E); + void emitCXXDeleteExpr(const CXXDeleteExpr *E); - void buildCXXAggrConstructorCall(const CXXConstructorDecl *D, - const clang::ArrayType *ArrayTy, - Address ArrayPtr, const CXXConstructExpr *E, - bool NewPointerIsChecked, - bool ZeroInitialization = false); + void emitCXXAggrConstructorCall(const CXXConstructorDecl *D, + const clang::ArrayType *ArrayTy, + Address ArrayPtr, const CXXConstructExpr *E, + bool NewPointerIsChecked, + bool ZeroInitialization = false); - void buildCXXAggrConstructorCall(const CXXConstructorDecl *ctor, - mlir::Value numElements, Address arrayBase, - const CXXConstructExpr *E, - bool NewPointerIsChecked, - bool zeroInitialize); + void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, + mlir::Value numElements, Address arrayBase, + const CXXConstructExpr *E, + bool NewPointerIsChecked, + bool zeroInitialize); /// Compute the length of an array, even if it's a VLA, and drill down to the /// base element type. - mlir::Value buildArrayLength(const clang::ArrayType *arrayType, - QualType &baseType, Address &addr); + mlir::Value emitArrayLength(const clang::ArrayType *arrayType, + QualType &baseType, Address &addr); - void buildDeleteCall(const FunctionDecl *DeleteFD, mlir::Value Ptr, - QualType DeleteTy, mlir::Value NumElements = nullptr, - CharUnits CookieSize = CharUnits()); + void emitDeleteCall(const FunctionDecl *DeleteFD, mlir::Value Ptr, + QualType DeleteTy, mlir::Value NumElements = nullptr, + CharUnits CookieSize = CharUnits()); - RValue buildBuiltinNewDeleteCall(const FunctionProtoType *type, - const CallExpr *theCallExpr, bool isDelete); + RValue emitBuiltinNewDeleteCall(const FunctionProtoType *type, + const CallExpr *theCallExpr, bool isDelete); - mlir::Value buildDynamicCast(Address ThisAddr, const CXXDynamicCastExpr *DCE); + mlir::Value emitDynamicCast(Address ThisAddr, const CXXDynamicCastExpr *DCE); mlir::Value createLoad(const clang::VarDecl *VD, const char *Name); - mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + mlir::Value emitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre); + mlir::Value emitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre); - mlir::Value buildComplexPrePostIncDec(const UnaryOperator *E, LValue LV, - bool isInc, bool isPre); // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. @@ -794,16 +793,16 @@ class CIRGenFunction : public CIRGenTypeCache { // Build a "reference" to a va_list; this is either the address or the value // of the expression, depending on how va_list is defined. - Address buildVAListRef(const Expr *E); + Address emitVAListRef(const Expr *E); /// Emits a CIR variable-argument operation, either /// \c cir.va.start or \c cir.va.end. /// /// \param ArgValue A reference to the \c va_list as emitted by either - /// \c buildVAListRef or \c buildMSVAListRef. + /// \c emitVAListRef or \c emitMSVAListRef. /// /// \param IsStart If \c true, emits \c cir.va.start, otherwise \c cir.va.end. - void buildVAStartEnd(mlir::Value ArgValue, bool IsStart); + void emitVAStartEnd(mlir::Value ArgValue, bool IsStart); /// Generate code to get an argument from the passed in pointer /// and update it accordingly. @@ -811,12 +810,12 @@ class CIRGenFunction : public CIRGenTypeCache { /// \param VE The \c VAArgExpr for which to generate code. /// /// \param VAListAddr Receives a reference to the \c va_list as emitted by - /// either \c buildVAListRef or \c buildMSVAListRef. + /// either \c emitVAListRef or \c emitMSVAListRef. /// /// \returns SSA value with the argument. - mlir::Value buildVAArg(VAArgExpr *VE, Address &VAListAddr); + mlir::Value emitVAArg(VAArgExpr *VE, Address &VAListAddr); - void buildVariablyModifiedType(QualType Ty); + void emitVariablyModifiedType(QualType Ty); struct VlaSizePair { mlir::Value NumElts; @@ -828,7 +827,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Returns an MLIR value that corresponds to the size, /// in non-variably-sized elements, of a variable length array type, /// plus that largest non-variably-sized element type. Assumes that - /// the type has already been emitted with buildVariablyModifiedType. + /// the type has already been emitted with emitVariablyModifiedType. VlaSizePair getVLASize(const VariableArrayType *vla); VlaSizePair getVLASize(QualType vla); @@ -843,61 +842,58 @@ class CIRGenFunction : public CIRGenTypeCache { /// Given an expression that represents a value lvalue, this method emits /// the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. - RValue buildLoadOfLValue(LValue LV, SourceLocation Loc); - mlir::Value buildLoadOfScalar(Address addr, bool isVolatile, - clang::QualType ty, clang::SourceLocation loc, - LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, - bool isNontemporal = false); - mlir::Value buildLoadOfScalar(Address addr, bool isVolatile, - clang::QualType ty, mlir::Location loc, - LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, - bool isNontemporal = false); + RValue emitLoadOfLValue(LValue LV, SourceLocation Loc); + mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, + clang::QualType ty, clang::SourceLocation loc, + LValueBaseInfo baseInfo, TBAAAccessInfo tbaaInfo, + bool isNontemporal = false); + mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, + clang::QualType ty, mlir::Location loc, + LValueBaseInfo baseInfo, TBAAAccessInfo tbaaInfo, + bool isNontemporal = false); int64_t getAccessedFieldNo(unsigned idx, const mlir::ArrayAttr elts); - RValue buildLoadOfExtVectorElementLValue(LValue LV); + RValue emitLoadOfExtVectorElementLValue(LValue LV); - void buildStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst); + void emitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst); - RValue buildLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); + RValue emitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); /// Load a scalar value from an address, taking care to appropriately convert /// from the memory representation to CIR value representation. - mlir::Value buildLoadOfScalar(Address addr, bool isVolatile, - clang::QualType ty, clang::SourceLocation loc, - AlignmentSource source = AlignmentSource::Type, - bool isNontemporal = false) { - return buildLoadOfScalar(addr, isVolatile, ty, loc, LValueBaseInfo(source), - CGM.getTBAAAccessInfo(ty), isNontemporal); + mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, + clang::QualType ty, clang::SourceLocation loc, + AlignmentSource source = AlignmentSource::Type, + bool isNontemporal = false) { + return emitLoadOfScalar(addr, isVolatile, ty, loc, LValueBaseInfo(source), + CGM.getTBAAAccessInfo(ty), isNontemporal); } /// Load a scalar value from an address, taking care to appropriately convert /// form the memory representation to the CIR value representation. The /// l-value must be a simple l-value. - mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); - mlir::Value buildLoadOfScalar(LValue lvalue, mlir::Location Loc); + mlir::Value emitLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); + mlir::Value emitLoadOfScalar(LValue lvalue, mlir::Location Loc); /// Load a complex number from the specified l-value. - mlir::Value buildLoadOfComplex(LValue src, SourceLocation loc); + mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc); - Address buildLoadOfReference(LValue refLVal, mlir::Location loc, - LValueBaseInfo *pointeeBaseInfo = nullptr, - TBAAAccessInfo *pointeeTBAAInfo = nullptr); - LValue buildLoadOfReferenceLValue(LValue RefLVal, mlir::Location Loc); + Address emitLoadOfReference(LValue refLVal, mlir::Location loc, + LValueBaseInfo *pointeeBaseInfo = nullptr, + TBAAAccessInfo *pointeeTBAAInfo = nullptr); + LValue emitLoadOfReferenceLValue(LValue RefLVal, mlir::Location Loc); LValue - buildLoadOfReferenceLValue(Address RefAddr, mlir::Location Loc, - QualType RefTy, - AlignmentSource Source = AlignmentSource::Type) { + emitLoadOfReferenceLValue(Address RefAddr, mlir::Location Loc, QualType RefTy, + AlignmentSource Source = AlignmentSource::Type) { LValue RefLVal = makeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source)); - return buildLoadOfReferenceLValue(RefLVal, Loc); + return emitLoadOfReferenceLValue(RefLVal, Loc); } - void buildImplicitAssignmentOperatorBody(FunctionArgList &Args); + void emitImplicitAssignmentOperatorBody(FunctionArgList &Args); - void buildAggregateStore(mlir::Value Val, Address Dest, bool DestIsVolatile); + void emitAggregateStore(mlir::Value Val, Address Dest, bool DestIsVolatile); - void buildCallArgs( + void emitCallArgs( CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range ArgRange, AbstractCallee AC = AbstractCallee(), unsigned ParamsToSkip = 0, @@ -906,39 +902,39 @@ class CIRGenFunction : public CIRGenTypeCache { void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl); void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl); - LValue buildStmtExprLValue(const StmtExpr *E); + LValue emitStmtExprLValue(const StmtExpr *E); - LValue buildPointerToDataMemberBinaryExpr(const BinaryOperator *E); + LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *E); /// TODO: Add TBAAAccessInfo - Address buildCXXMemberDataPointerAddress( + Address emitCXXMemberDataPointerAddress( const Expr *E, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo); /// Generate a call of the given function, expecting the given /// result type, and using the given argument list which specifies both the /// LLVM arguments and the types they were derived from. - RValue buildCall(const CIRGenFunctionInfo &CallInfo, - const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, - const CallArgList &Args, - cir::CIRCallOpInterface *callOrTryCall, bool IsMustTail, - mlir::Location loc, - std::optional E = std::nullopt); - RValue buildCall(const CIRGenFunctionInfo &CallInfo, - const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, - const CallArgList &Args, - cir::CIRCallOpInterface *callOrTryCall = nullptr, - bool IsMustTail = false) { + RValue emitCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, + const CallArgList &Args, + cir::CIRCallOpInterface *callOrTryCall, bool IsMustTail, + mlir::Location loc, + std::optional E = std::nullopt); + RValue emitCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, + const CallArgList &Args, + cir::CIRCallOpInterface *callOrTryCall = nullptr, + bool IsMustTail = false) { assert(currSrcLoc && "source location must have been set"); - return buildCall(CallInfo, Callee, ReturnValue, Args, callOrTryCall, - IsMustTail, *currSrcLoc, std::nullopt); + return emitCall(CallInfo, Callee, ReturnValue, Args, callOrTryCall, + IsMustTail, *currSrcLoc, std::nullopt); } - RValue buildCall(clang::QualType FnType, const CIRGenCallee &Callee, - const clang::CallExpr *E, ReturnValueSlot returnValue, - mlir::Value Chain = nullptr); + RValue emitCall(clang::QualType FnType, const CIRGenCallee &Callee, + const clang::CallExpr *E, ReturnValueSlot returnValue, + mlir::Value Chain = nullptr); - RValue buildCallExpr(const clang::CallExpr *E, - ReturnValueSlot ReturnValue = ReturnValueSlot()); + RValue emitCallExpr(const clang::CallExpr *E, + ReturnValueSlot ReturnValue = ReturnValueSlot()); Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy); @@ -946,26 +942,26 @@ class CIRGenFunction : public CIRGenTypeCache { return getAsNaturalAddressOf(Addr, PointeeType).getBasePointer(); } - mlir::Value buildRuntimeCall(mlir::Location loc, cir::FuncOp callee, - llvm::ArrayRef args = {}); + mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee, + llvm::ArrayRef args = {}); - void buildInvariantStart(CharUnits Size); + void emitInvariantStart(CharUnits Size); /// Create a check for a function parameter that may potentially be /// declared as non-null. - void buildNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, - AbstractCallee AC, unsigned ParmNum); + void emitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, + AbstractCallee AC, unsigned ParmNum); - void buildCallArg(CallArgList &args, const clang::Expr *E, - clang::QualType ArgType); + void emitCallArg(CallArgList &args, const clang::Expr *E, + clang::QualType ArgType); - LValue buildCallExprLValue(const CallExpr *E); + LValue emitCallExprLValue(const CallExpr *E); - /// Similarly to buildAnyExpr(), however, the result will always be accessible + /// Similarly to emitAnyExpr(), however, the result will always be accessible /// even if no aggregate location is provided. - RValue buildAnyExprToTemp(const clang::Expr *E); + RValue emitAnyExprToTemp(const clang::Expr *E); - CIRGenCallee buildCallee(const clang::Expr *E); + CIRGenCallee emitCallee(const clang::Expr *E); void finishFunction(SourceLocation EndLoc); @@ -973,87 +969,87 @@ class CIRGenFunction : public CIRGenTypeCache { /// result is returned as an RValue struct. If this is an aggregate /// expression, the aggloc/agglocvolatile arguments indicate where the result /// should be returned. - RValue buildAnyExpr(const clang::Expr *E, - AggValueSlot aggSlot = AggValueSlot::ignored(), - bool ignoreResult = false); - - mlir::LogicalResult buildFunctionBody(const clang::Stmt *Body); - mlir::LogicalResult buildCoroutineBody(const CoroutineBodyStmt &S); - mlir::LogicalResult buildCoreturnStmt(const CoreturnStmt &S); - - cir::CallOp buildCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr); - cir::CallOp buildCoroAllocBuiltinCall(mlir::Location loc); - cir::CallOp buildCoroBeginBuiltinCall(mlir::Location loc, - mlir::Value coroframeAddr); - cir::CallOp buildCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr); - - RValue buildCoawaitExpr(const CoawaitExpr &E, - AggValueSlot aggSlot = AggValueSlot::ignored(), - bool ignoreResult = false); - RValue buildCoyieldExpr(const CoyieldExpr &E, - AggValueSlot aggSlot = AggValueSlot::ignored(), - bool ignoreResult = false); - RValue buildCoroutineIntrinsic(const CallExpr *E, unsigned int IID); - RValue buildCoroutineFrame(); + RValue emitAnyExpr(const clang::Expr *E, + AggValueSlot aggSlot = AggValueSlot::ignored(), + bool ignoreResult = false); + + mlir::LogicalResult emitFunctionBody(const clang::Stmt *Body); + mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &S); + mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &S); + + cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr); + cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc); + cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, + mlir::Value coroframeAddr); + cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr); + + RValue emitCoawaitExpr(const CoawaitExpr &E, + AggValueSlot aggSlot = AggValueSlot::ignored(), + bool ignoreResult = false); + RValue emitCoyieldExpr(const CoyieldExpr &E, + AggValueSlot aggSlot = AggValueSlot::ignored(), + bool ignoreResult = false); + RValue emitCoroutineIntrinsic(const CallExpr *E, unsigned int IID); + RValue emitCoroutineFrame(); enum class MSVCIntrin; - mlir::Value buildARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue, - llvm::Triple::ArchType Arch); - mlir::Value buildARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue, - llvm::Triple::ArchType Arch); - mlir::Value buildCommonNeonBuiltinExpr( + mlir::Value emitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); + mlir::Value emitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); + mlir::Value emitCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, llvm::SmallVectorImpl &ops, Address ptrOp0, Address ptrOp1, llvm::Triple::ArchType arch); - mlir::Value buildAlignmentAssumption(mlir::Value ptrValue, QualType ty, - SourceLocation loc, - SourceLocation assumptionLoc, - mlir::IntegerAttr alignment, - mlir::Value offsetValue = nullptr); + mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, + SourceLocation loc, + SourceLocation assumptionLoc, + mlir::IntegerAttr alignment, + mlir::Value offsetValue = nullptr); - mlir::Value buildAlignmentAssumption(mlir::Value ptrValue, const Expr *expr, - SourceLocation assumptionLoc, - mlir::IntegerAttr alignment, - mlir::Value offsetValue = nullptr); + mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr, + SourceLocation assumptionLoc, + mlir::IntegerAttr alignment, + mlir::Value offsetValue = nullptr); /// Build a debug stoppoint if we are emitting debug info. - void buildStopPoint(const Stmt *S); + void emitStopPoint(const Stmt *S); // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. mlir::LogicalResult - buildStmt(const clang::Stmt *S, bool useCurrentScope, - llvm::ArrayRef Attrs = std::nullopt); + emitStmt(const clang::Stmt *S, bool useCurrentScope, + llvm::ArrayRef Attrs = std::nullopt); - mlir::LogicalResult buildSimpleStmt(const clang::Stmt *S, - bool useCurrentScope); + mlir::LogicalResult emitSimpleStmt(const clang::Stmt *S, + bool useCurrentScope); - mlir::LogicalResult buildForStmt(const clang::ForStmt &S); - mlir::LogicalResult buildWhileStmt(const clang::WhileStmt &S); - mlir::LogicalResult buildDoStmt(const clang::DoStmt &S); + mlir::LogicalResult emitForStmt(const clang::ForStmt &S); + mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &S); + mlir::LogicalResult emitDoStmt(const clang::DoStmt &S); mlir::LogicalResult - buildCXXForRangeStmt(const CXXForRangeStmt &S, - llvm::ArrayRef Attrs = std::nullopt); - mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); + emitCXXForRangeStmt(const CXXForRangeStmt &S, + llvm::ArrayRef Attrs = std::nullopt); + mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &S); - mlir::LogicalResult buildCXXTryStmtUnderScope(const clang::CXXTryStmt &S); - mlir::LogicalResult buildCXXTryStmt(const clang::CXXTryStmt &S); + mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &S); + mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &S); void enterCXXTryStmt(const CXXTryStmt &S, cir::TryOp catchOp, bool IsFnTryBlock = false); void exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); - Address buildCompoundStmt(const clang::CompoundStmt &S, bool getLast = false, - AggValueSlot slot = AggValueSlot::ignored()); + Address emitCompoundStmt(const clang::CompoundStmt &S, bool getLast = false, + AggValueSlot slot = AggValueSlot::ignored()); Address - buildCompoundStmtWithoutScope(const clang::CompoundStmt &S, - bool getLast = false, - AggValueSlot slot = AggValueSlot::ignored()); + emitCompoundStmtWithoutScope(const clang::CompoundStmt &S, + bool getLast = false, + AggValueSlot slot = AggValueSlot::ignored()); GlobalDecl CurSEHParent; bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; } @@ -1073,12 +1069,12 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emit code to compute the specified expression, /// ignoring the result. - void buildIgnoredExpr(const clang::Expr *E); + void emitIgnoredExpr(const clang::Expr *E); - LValue buildArraySubscriptExpr(const clang::ArraySubscriptExpr *E, - bool Accessed = false); + LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *E, + bool Accessed = false); - mlir::LogicalResult buildDeclStmt(const clang::DeclStmt &S); + mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &S); /// Determine whether a return value slot may overlap some other object. AggValueSlot::Overlap_t getOverlapForReturnValue() { @@ -1100,53 +1096,52 @@ class CIRGenFunction : public CIRGenTypeCache { /// addressed later. RValue GetUndefRValue(clang::QualType Ty); - mlir::Value buildFromMemory(mlir::Value Value, clang::QualType Ty); + mlir::Value emitFromMemory(mlir::Value Value, clang::QualType Ty); mlir::Type convertType(clang::QualType T); - mlir::LogicalResult buildAsmStmt(const clang::AsmStmt &S); + mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &S); std::pair - buildAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue, - QualType InputType, std::string &ConstraintStr, - SourceLocation Loc); + emitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue, + QualType InputType, std::string &ConstraintStr, + SourceLocation Loc); std::pair - buildAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, - std::string &ConstraintStr); + emitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, + std::string &ConstraintStr); - mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); + mlir::LogicalResult emitIfStmt(const clang::IfStmt &S); - mlir::LogicalResult buildReturnStmt(const clang::ReturnStmt &S); + mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &S); - mlir::LogicalResult buildGotoStmt(const clang::GotoStmt &S); + mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &S); - mlir::LogicalResult buildLabel(const clang::LabelDecl *D); - mlir::LogicalResult buildLabelStmt(const clang::LabelStmt &S); + mlir::LogicalResult emitLabel(const clang::LabelDecl *D); + mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &S); - mlir::LogicalResult buildAttributedStmt(const AttributedStmt &S); + mlir::LogicalResult emitAttributedStmt(const AttributedStmt &S); - mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); - mlir::LogicalResult buildContinueStmt(const clang::ContinueStmt &S); + mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &S); + mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &S); // OpenMP gen functions: - mlir::LogicalResult buildOMPParallelDirective(const OMPParallelDirective &S); - mlir::LogicalResult buildOMPTaskwaitDirective(const OMPTaskwaitDirective &S); - mlir::LogicalResult - buildOMPTaskyieldDirective(const OMPTaskyieldDirective &S); - mlir::LogicalResult buildOMPBarrierDirective(const OMPBarrierDirective &S); + mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &S); + mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &S); + mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &S); + mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &S); - LValue buildOpaqueValueLValue(const OpaqueValueExpr *e); + LValue emitOpaqueValueLValue(const OpaqueValueExpr *e); /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. - LValue buildLValue(const clang::Expr *E); + LValue emitLValue(const clang::Expr *E); - void buildDecl(const clang::Decl &D); + void emitDecl(const clang::Decl &D); /// Emit local annotations for the local variable V, declared by D. - void buildVarAnnotations(const VarDecl *decl, mlir::Value val); + void emitVarAnnotations(const VarDecl *decl, mlir::Value val); /// If the specified expression does not fold to a constant, or if it does but /// contains a label, return false. If it constant folds return true and set @@ -1168,20 +1163,19 @@ class CIRGenFunction : public CIRGenTypeCache { /// times we expect the condition to evaluate to true based on PGO data. We /// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr /// for extra ideas). - mlir::LogicalResult buildIfOnBoolExpr(const clang::Expr *cond, - const clang::Stmt *thenS, - const clang::Stmt *elseS); - cir::IfOp buildIfOnBoolExpr( + mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, + const clang::Stmt *thenS, + const clang::Stmt *elseS); + cir::IfOp emitIfOnBoolExpr( const clang::Expr *cond, llvm::function_ref thenBuilder, mlir::Location thenLoc, llvm::function_ref elseBuilder, std::optional elseLoc = {}); - mlir::Value buildTernaryOnBoolExpr(const clang::Expr *cond, - mlir::Location loc, - const clang::Stmt *thenS, - const clang::Stmt *elseS); - mlir::Value buildOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond); + mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, + const clang::Stmt *thenS, + const clang::Stmt *elseS); + mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond); class ConstantEmission { // Cannot use mlir::TypedAttr directly here because of bit availability. @@ -1223,14 +1217,14 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emit the computation of the specified expression of scalar type, /// ignoring the result. - mlir::Value buildScalarExpr(const clang::Expr *E); - mlir::Value buildScalarConstant(const ConstantEmission &Constant, Expr *E); + mlir::Value emitScalarExpr(const clang::Expr *E); + mlir::Value emitScalarConstant(const ConstantEmission &Constant, Expr *E); - mlir::Value buildPromotedComplexExpr(const Expr *E, QualType PromotionType); - mlir::Value buildPromotedScalarExpr(const clang::Expr *E, - QualType PromotionType); - mlir::Value buildPromotedValue(mlir::Value result, QualType PromotionType); - mlir::Value buildUnPromotedValue(mlir::Value result, QualType PromotionType); + mlir::Value emitPromotedComplexExpr(const Expr *E, QualType PromotionType); + mlir::Value emitPromotedScalarExpr(const clang::Expr *E, + QualType PromotionType); + mlir::Value emitPromotedValue(mlir::Value result, QualType PromotionType); + mlir::Value emitUnPromotedValue(mlir::Value result, QualType PromotionType); mlir::Type getCIRType(const clang::QualType &type); @@ -1238,23 +1232,23 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::ArrayAttr &value, cir::CaseOpKind &kind); template - mlir::LogicalResult - buildCaseDefaultCascade(const T *stmt, mlir::Type condType, - mlir::ArrayAttr value, cir::CaseOpKind kind, - bool buildingTopLevelCase); - - mlir::LogicalResult buildCaseStmt(const clang::CaseStmt &S, - mlir::Type condType, - bool buildingTopLevelCase); + mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, + mlir::ArrayAttr value, + cir::CaseOpKind kind, + bool buildingTopLevelCase); - mlir::LogicalResult buildDefaultStmt(const clang::DefaultStmt &S, - mlir::Type condType, - bool buildingTopLevelCase); + mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &S, + mlir::Type condType, + bool buildingTopLevelCase); - mlir::LogicalResult buildSwitchCase(const clang::SwitchCase &S, + mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &S, + mlir::Type condType, bool buildingTopLevelCase); - mlir::LogicalResult buildSwitchBody(const clang::Stmt *S); + mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &S, + bool buildingTopLevelCase); + + mlir::LogicalResult emitSwitchBody(const clang::Stmt *S); cir::FuncOp generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo); @@ -1305,86 +1299,85 @@ class CIRGenFunction : public CIRGenTypeCache { } }; - LValue buildMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); + LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); /// Emit the alloca and debug information for a /// local variable. Does not emit initialization or destruction. - AutoVarEmission buildAutoVarAlloca(const clang::VarDecl &D, - mlir::OpBuilder::InsertPoint = {}); - - void buildAutoVarInit(const AutoVarEmission &emission); - void buildAutoVarCleanups(const AutoVarEmission &emission); - void buildAutoVarTypeCleanup(const AutoVarEmission &emission, - clang::QualType::DestructionKind dtorKind); - - void buildStoreOfScalar(mlir::Value value, LValue lvalue); - void buildStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, - clang::QualType ty, LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, bool isInit = false, - bool isNontemporal = false); - void buildStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, - QualType ty, - AlignmentSource source = AlignmentSource::Type, - bool isInit = false, bool isNontemporal = false) { - buildStoreOfScalar(value, addr, isVolatile, ty, LValueBaseInfo(source), - CGM.getTBAAAccessInfo(ty), isInit, isNontemporal); + AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &D, + mlir::OpBuilder::InsertPoint = {}); + + void emitAutoVarInit(const AutoVarEmission &emission); + void emitAutoVarCleanups(const AutoVarEmission &emission); + void emitAutoVarTypeCleanup(const AutoVarEmission &emission, + clang::QualType::DestructionKind dtorKind); + + void emitStoreOfScalar(mlir::Value value, LValue lvalue); + void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, + clang::QualType ty, LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, bool isInit = false, + bool isNontemporal = false); + void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, + QualType ty, + AlignmentSource source = AlignmentSource::Type, + bool isInit = false, bool isNontemporal = false) { + emitStoreOfScalar(value, addr, isVolatile, ty, LValueBaseInfo(source), + CGM.getTBAAAccessInfo(ty), isInit, isNontemporal); } - void buildStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit); + void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit); - mlir::Value buildToMemory(mlir::Value Value, clang::QualType Ty); - void buildDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init); + mlir::Value emitToMemory(mlir::Value Value, clang::QualType Ty); + void emitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init); /// Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. - void buildStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false); + void emitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false); - void buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, - mlir::Value &Result); + void emitStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result); - cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); + cir::BrOp emitBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is /// nonnull, if 1\p LHS is marked _Nonnull. - void buildNullabilityCheck(LValue LHS, mlir::Value RHS, - clang::SourceLocation Loc); + void emitNullabilityCheck(LValue LHS, mlir::Value RHS, + clang::SourceLocation Loc); /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to /// detect undefined behavior when the pointer overflow sanitizer is enabled. /// \p SignedIndices indicates whether any of the GEP indices are signed. /// \p IsSubtraction indicates whether the expression used to form the GEP /// is a subtraction. - mlir::Value buildCheckedInBoundsGEP(mlir::Type ElemTy, mlir::Value Ptr, - llvm::ArrayRef IdxList, - bool SignedIndices, bool IsSubtraction, - SourceLocation Loc); - - void buildScalarInit(const clang::Expr *init, mlir::Location loc, - LValue lvalue, bool capturedByInit = false); - - LValue buildDeclRefLValue(const clang::DeclRefExpr *E); - LValue buildExtVectorElementExpr(const ExtVectorElementExpr *E); - LValue buildBinaryOperatorLValue(const clang::BinaryOperator *E); - LValue buildCompoundAssignmentLValue(const clang::CompoundAssignOperator *E); - LValue buildUnaryOpLValue(const clang::UnaryOperator *E); - LValue buildStringLiteralLValue(const StringLiteral *E); - RValue buildBuiltinExpr(const clang::GlobalDecl GD, unsigned BuiltinID, - const clang::CallExpr *E, - ReturnValueSlot ReturnValue); - RValue buildRotate(const CallExpr *E, bool IsRotateRight); - mlir::Value buildTargetBuiltinExpr(unsigned BuiltinID, - const clang::CallExpr *E, - ReturnValueSlot ReturnValue); + mlir::Value emitCheckedInBoundsGEP(mlir::Type ElemTy, mlir::Value Ptr, + llvm::ArrayRef IdxList, + bool SignedIndices, bool IsSubtraction, + SourceLocation Loc); + + void emitScalarInit(const clang::Expr *init, mlir::Location loc, + LValue lvalue, bool capturedByInit = false); + + LValue emitDeclRefLValue(const clang::DeclRefExpr *E); + LValue emitExtVectorElementExpr(const ExtVectorElementExpr *E); + LValue emitBinaryOperatorLValue(const clang::BinaryOperator *E); + LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *E); + LValue emitUnaryOpLValue(const clang::UnaryOperator *E); + LValue emitStringLiteralLValue(const StringLiteral *E); + RValue emitBuiltinExpr(const clang::GlobalDecl GD, unsigned BuiltinID, + const clang::CallExpr *E, ReturnValueSlot ReturnValue); + RValue emitRotate(const CallExpr *E, bool IsRotateRight); + mlir::Value emitTargetBuiltinExpr(unsigned BuiltinID, + const clang::CallExpr *E, + ReturnValueSlot ReturnValue); // Target specific builtin emission - mlir::Value buildScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, - const CallExpr *E); - mlir::Value buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue, - llvm::Triple::ArchType Arch); - mlir::Value buildAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); - mlir::Value buildAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); - mlir::Value buildX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); + mlir::Value emitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, + const CallExpr *E); + mlir::Value emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); + mlir::Value emitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); + mlir::Value emitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); + mlir::Value emitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); /// Given an expression with a pointer type, emit the value and compute our /// best estimate of the alignment of the pointee. @@ -1403,13 +1396,12 @@ class CIRGenFunction : public CIRGenTypeCache { /// reasonable to just ignore the returned alignment when it isn't from an /// explicit source. Address - buildPointerWithAlignment(const clang::Expr *expr, - LValueBaseInfo *baseInfo = nullptr, - TBAAAccessInfo *tbaaInfo = nullptr, - KnownNonNull_t isKnownNonNull = NotKnownNonNull); + emitPointerWithAlignment(const clang::Expr *expr, + LValueBaseInfo *baseInfo = nullptr, + TBAAAccessInfo *tbaaInfo = nullptr, + KnownNonNull_t isKnownNonNull = NotKnownNonNull); - LValue - buildConditionalOperatorLValue(const AbstractConditionalOperator *expr); + LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr); /// Emit an expression as an initializer for an object (variable, field, etc.) /// at the given location. The expression is not necessarily the normal @@ -1421,39 +1413,39 @@ class CIRGenFunction : public CIRGenTypeCache { /// \param lvalue the lvalue to initialize /// \param capturedByInit true if \p D is a __block variable whose address is /// potentially changed by the initializer - void buildExprAsInit(const clang::Expr *init, const clang::ValueDecl *D, - LValue lvalue, bool capturedByInit = false); + void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *D, + LValue lvalue, bool capturedByInit = false); /// Emit code and set up symbol table for a variable declaration with auto, /// register, or no storage class specifier. These turn into simple stack /// objects, globals depending on target. - void buildAutoVarDecl(const clang::VarDecl &D); + void emitAutoVarDecl(const clang::VarDecl &D); /// This method handles emission of any variable declaration /// inside a function, including static vars etc. - void buildVarDecl(const clang::VarDecl &D); + void emitVarDecl(const clang::VarDecl &D); cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &D, cir::GlobalOp GV, cir::GetGlobalOp GVAddr); - void buildStaticVarDecl(const VarDecl &D, cir::GlobalLinkageKind Linkage); + void emitStaticVarDecl(const VarDecl &D, cir::GlobalLinkageKind Linkage); /// Perform the usual unary conversions on the specified /// expression and compare the result against zero, returning an Int1Ty value. mlir::Value evaluateExprAsBool(const clang::Expr *E); - void buildCtorPrologue(const clang::CXXConstructorDecl *CD, - clang::CXXCtorType Type, FunctionArgList &Args); - void buildConstructorBody(FunctionArgList &Args); - void buildDestructorBody(FunctionArgList &Args); - void buildCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, - bool ForVirtualBase, bool Delegating, - Address This, QualType ThisTy); - RValue buildCXXDestructorCall(GlobalDecl Dtor, const CIRGenCallee &Callee, - mlir::Value This, QualType ThisTy, - mlir::Value ImplicitParam, - QualType ImplicitParamTy, const CallExpr *E); + void emitCtorPrologue(const clang::CXXConstructorDecl *CD, + clang::CXXCtorType Type, FunctionArgList &Args); + void emitConstructorBody(FunctionArgList &Args); + void emitDestructorBody(FunctionArgList &Args); + void emitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, + bool ForVirtualBase, bool Delegating, Address This, + QualType ThisTy); + RValue emitCXXDestructorCall(GlobalDecl Dtor, const CIRGenCallee &Callee, + mlir::Value This, QualType ThisTy, + mlir::Value ImplicitParam, + QualType ImplicitParamTy, const CallExpr *E); /// Enter the cleanups necessary to complete the given phase of destruction /// for a destructor. The end result should call destructors on members and @@ -1521,8 +1513,8 @@ class CIRGenFunction : public CIRGenTypeCache { /// If whole-program virtual table optimization is enabled, emit an assumption /// that VTable is a member of RD's type identifier. Or, if vptr CFI is /// enabled, emit a check that VTable is a member of RD's type identifier. - void buildTypeMetadataCodeForVCall(const CXXRecordDecl *RD, - mlir::Value VTable, SourceLocation Loc); + void emitTypeMetadataCodeForVCall(const CXXRecordDecl *RD, mlir::Value VTable, + SourceLocation Loc); /// Return the VTT parameter that should be passed to a base /// constructor/destructor with virtual bases. @@ -1647,15 +1639,14 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emit a conversion from the specified type to the specified destination /// type, both of which are CIR scalar types. - mlir::Value buildScalarConversion(mlir::Value Src, clang::QualType SrcTy, - clang::QualType DstTy, - clang::SourceLocation Loc); + mlir::Value emitScalarConversion(mlir::Value Src, clang::QualType SrcTy, + clang::QualType DstTy, + clang::SourceLocation Loc); /// Emit a conversion from the specified complex type to the specified /// destination type, where the destination type is an LLVM scalar type. - mlir::Value buildComplexToScalarConversion(mlir::Value Src, QualType SrcTy, - QualType DstTy, - SourceLocation Loc); + mlir::Value emitComplexToScalarConversion(mlir::Value Src, QualType SrcTy, + QualType DstTy, SourceLocation Loc); LValue makeAddrLValue(Address addr, clang::QualType ty, LValueBaseInfo baseInfo) { @@ -1674,18 +1665,18 @@ class CIRGenFunction : public CIRGenTypeCache { void initializeVTablePointer(mlir::Location loc, const VPtr &Vptr); AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); - LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); - LValue buildLValueForBitField(LValue base, const FieldDecl *field); + LValue emitLValueForField(LValue Base, const clang::FieldDecl *Field); + LValue emitLValueForBitField(LValue base, const FieldDecl *field); - /// Like buildLValueForField, excpet that if the Field is a reference, this + /// Like emitLValueForField, excpet that if the Field is a reference, this /// will return the address of the reference and not the address of the value /// stored in the reference. - LValue buildLValueForFieldInitialization(LValue Base, - const clang::FieldDecl *Field, - llvm::StringRef FieldName); + LValue emitLValueForFieldInitialization(LValue Base, + const clang::FieldDecl *Field, + llvm::StringRef FieldName); - void buildInitializerForField(clang::FieldDecl *Field, LValue LHS, - clang::Expr *Init); + void emitInitializerForField(clang::FieldDecl *Field, LValue LHS, + clang::Expr *Init); /// Determine whether the given initializer is trivial in the sense /// that it requires no code to be generated. @@ -1694,13 +1685,13 @@ class CIRGenFunction : public CIRGenTypeCache { // TODO: this can also be abstrated into common AST helpers bool hasBooleanRepresentation(clang::QualType Ty); - void buildCXXThrowExpr(const CXXThrowExpr *E); + void emitCXXThrowExpr(const CXXThrowExpr *E); - RValue buildAtomicExpr(AtomicExpr *E); - void buildAtomicStore(RValue rvalue, LValue lvalue, bool isInit); - void buildAtomicStore(RValue rvalue, LValue lvalue, cir::MemOrder MO, - bool IsVolatile, bool isInit); - void buildAtomicInit(Expr *init, LValue dest); + RValue emitAtomicExpr(AtomicExpr *E); + void emitAtomicStore(RValue rvalue, LValue lvalue, bool isInit); + void emitAtomicStore(RValue rvalue, LValue lvalue, cir::MemOrder MO, + bool IsVolatile, bool isInit); + void emitAtomicInit(Expr *init, LValue dest); /// Return the address of a local variable. Address GetAddrOfLocalVar(const clang::VarDecl *VD) { @@ -1724,23 +1715,23 @@ class CIRGenFunction : public CIRGenTypeCache { /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts. static bool isWrappedCXXThis(const clang::Expr *E); - void buildDelegateCXXConstructorCall(const clang::CXXConstructorDecl *Ctor, - clang::CXXCtorType CtorType, - const FunctionArgList &Args, - clang::SourceLocation Loc); + void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *Ctor, + clang::CXXCtorType CtorType, + const FunctionArgList &Args, + clang::SourceLocation Loc); // It's important not to confuse this and the previous function. Delegating // constructors are the C++11 feature. The constructor delegate optimization // is used to reduce duplication in the base and complete constructors where // they are substantially the same. - void buildDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, - const FunctionArgList &Args); + void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, + const FunctionArgList &Args); /// We are performing a delegate call; that is, the current function is /// delegating to another one. Produce a r-value suitable for passing the /// given parameter. - void buildDelegateCallArg(CallArgList &args, const clang::VarDecl *param, - clang::SourceLocation loc); + void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param, + clang::SourceLocation loc); /// Return true if the current function should not be instrumented with /// sanitizers. @@ -1752,18 +1743,18 @@ class CIRGenFunction : public CIRGenTypeCache { bool ShouldInstrumentFunction(); /// TODO(cir): add TBAAAccessInfo - Address buildArrayToPointerDecay(const Expr *Array, - LValueBaseInfo *BaseInfo = nullptr); + Address emitArrayToPointerDecay(const Expr *Array, + LValueBaseInfo *BaseInfo = nullptr); /// Emits the code necessary to evaluate an arbitrary expression into the /// given memory location. - void buildAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, - bool IsInitializer); - void buildAnyExprToExn(const Expr *E, Address Addr); + void emitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, + bool IsInitializer); + void emitAnyExprToExn(const Expr *E, Address Addr); - LValue buildCheckedLValue(const Expr *E, TypeCheckKind TCK); - LValue buildMemberExpr(const MemberExpr *E); - LValue buildCompoundLiteralLValue(const CompoundLiteralExpr *E); + LValue emitCheckedLValue(const Expr *E, TypeCheckKind TCK); + LValue emitMemberExpr(const MemberExpr *E); + LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *E); /// Specifies which type of sanitizer check to apply when handling a /// particular builtin. @@ -1774,7 +1765,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emits an argument for a call to a builtin. If the builtin sanitizer is /// enabled, a runtime check specified by \p Kind is also emitted. - mlir::Value buildCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind); + mlir::Value emitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind); /// returns true if aggregate type has a volatile member. /// TODO(cir): this could be a common AST helper between LLVM / CIR. @@ -1787,12 +1778,12 @@ class CIRGenFunction : public CIRGenTypeCache { } /// Emit an aggregate assignment. - void buildAggregateAssign(LValue Dest, LValue Src, QualType EltTy) { + void emitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) { bool IsVolatile = hasVolatileMember(EltTy); - buildAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile); + emitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile); } - LValue buildAggExprToLValue(const Expr *E); + LValue emitAggExprToLValue(const Expr *E); /// Emit an aggregate copy. /// @@ -1801,13 +1792,13 @@ class CIRGenFunction : public CIRGenTypeCache { /// \param MayOverlap Whether the tail padding of the destination might be /// occupied by some other object. More efficient code can often be /// generated if not. - void buildAggregateCopy(LValue Dest, LValue Src, QualType EltTy, - AggValueSlot::Overlap_t MayOverlap, - bool isVolatile = false); + void emitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, + AggValueSlot::Overlap_t MayOverlap, + bool isVolatile = false); /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime /// checking is enabled. Otherwise, just emit an unreachable instruction. - void buildUnreachable(SourceLocation Loc); + void emitUnreachable(SourceLocation Loc); /// /// Cleanups @@ -1829,9 +1820,9 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emits try/catch information for the current EH stack. cir::CallOp callWithExceptionCtx = nullptr; - mlir::Operation *buildLandingPad(cir::TryOp tryOp); - void buildEHResumeBlock(bool isCleanup, mlir::Block *ehResumeBlock, - mlir::Location loc); + mlir::Operation *emitLandingPad(cir::TryOp tryOp); + void emitEHResumeBlock(bool isCleanup, mlir::Block *ehResumeBlock, + mlir::Location loc); mlir::Block *getEHResumeBlock(bool isCleanup, cir::TryOp tryOp); mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope, cir::TryOp tryOp); @@ -1928,8 +1919,8 @@ class CIRGenFunction : public CIRGenTypeCache { }; template - ConditionalInfo buildConditionalBlocks(const AbstractConditionalOperator *E, - const FuncTy &BranchGenFunc); + ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *E, + const FuncTy &BranchGenFunc); // Return true if we're currently emitting one branch or the other of a // conditional expression. @@ -1964,10 +1955,10 @@ class CIRGenFunction : public CIRGenTypeCache { void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); - void buildArrayDestroy(mlir::Value begin, mlir::Value end, - QualType elementType, CharUnits elementAlign, - Destroyer *destroyer, bool checkZeroLength, - bool useEHCleanup); + void emitArrayDestroy(mlir::Value begin, mlir::Value end, + QualType elementType, CharUnits elementAlign, + Destroyer *destroyer, bool checkZeroLength, + bool useEHCleanup); /// The values of function arguments to use when evaluating /// CXXInheritedCtorInitExprs within this context. @@ -2239,8 +2230,8 @@ class CIRGenFunction : public CIRGenTypeCache { return b; } - cir::ReturnOp buildReturn(mlir::Location loc); - void buildImplicitReturn(); + cir::ReturnOp emitReturn(mlir::Location loc); + void emitImplicitReturn(); public: llvm::ArrayRef getRetBlocks() { return RetBlocks; } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 4c73215432db..5f00189ef90c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -167,8 +167,8 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { return false; } - void buildInstanceFunctionProlog(SourceLocation Loc, - CIRGenFunction &CGF) override; + void emitInstanceFunctionProlog(SourceLocation Loc, + CIRGenFunction &CGF) override; void addImplicitStructorParams(CIRGenFunction &CGF, QualType &ResTy, FunctionArgList &Params) override; @@ -178,17 +178,17 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { CXXDtorType Type, bool ForVirtualBase, bool Delegating) override; - void buildCXXConstructors(const clang::CXXConstructorDecl *D) override; - void buildCXXDestructors(const clang::CXXDestructorDecl *D) override; - void buildCXXStructor(clang::GlobalDecl GD) override; - void buildDestructorCall(CIRGenFunction &CGF, const CXXDestructorDecl *DD, - CXXDtorType Type, bool ForVirtualBase, - bool Delegating, Address This, - QualType ThisTy) override; + void emitCXXConstructors(const clang::CXXConstructorDecl *D) override; + void emitCXXDestructors(const clang::CXXDestructorDecl *D) override; + void emitCXXStructor(clang::GlobalDecl GD) override; + void emitDestructorCall(CIRGenFunction &CGF, const CXXDestructorDecl *DD, + CXXDtorType Type, bool ForVirtualBase, + bool Delegating, Address This, + QualType ThisTy) override; void registerGlobalDtor(CIRGenFunction &CGF, const VarDecl *D, cir::FuncOp dtor, mlir::Value Addr) override; - virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) override; - virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) override; + virtual void emitRethrow(CIRGenFunction &CGF, bool isNoReturn) override; + virtual void emitThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) override; CatchTypeInfo getAddrOfCXXCatchHandlerType(mlir::Location loc, QualType Ty, QualType CatchHandlerType) override { @@ -304,7 +304,7 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { return Args.size() - 1; } - void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) override; + void emitBadCastCall(CIRGenFunction &CGF, mlir::Location loc) override; mlir::Value getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &CGF, @@ -316,10 +316,10 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { // expressions are lowered to `cir.dyn_cast` ops instead of calls to runtime // functions. So during CIRGen we don't need the `emitDynamicCastCall` // function that clang CodeGen has. - mlir::Value buildDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, - QualType SrcRecordTy, QualType DestRecordTy, - cir::PointerType DestCIRTy, bool isRefCast, - Address Src) override; + mlir::Value emitDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, QualType DestRecordTy, + cir::PointerType DestCIRTy, bool isRefCast, + Address Src) override; cir::MethodAttr buildVirtualMethodAttr(cir::MethodType MethodTy, const CXXMethodDecl *MD) override; @@ -499,10 +499,10 @@ static void emitConstructorDestructorAlias(CIRGenModule &CGM, assert(Aliasee && "expected cir.func"); // Populate actual alias. - CGM.buildAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); + CGM.emitAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); } -void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { +void CIRGenItaniumCXXABI::emitCXXStructor(GlobalDecl GD) { auto *MD = cast(GD.getDecl()); auto *CD = dyn_cast(MD); const CXXDestructorDecl *DD = CD ? nullptr : cast(MD); @@ -594,8 +594,8 @@ void CIRGenCXXABI::setCXXABIThisValue(CIRGenFunction &CGF, CGF.CXXABIThisValue = ThisPtr; } -void CIRGenItaniumCXXABI::buildInstanceFunctionProlog(SourceLocation Loc, - CIRGenFunction &CGF) { +void CIRGenItaniumCXXABI::emitInstanceFunctionProlog(SourceLocation Loc, + CIRGenFunction &CGF) { // Naked functions have no prolog. if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr()) llvm_unreachable("NYI"); @@ -624,36 +624,36 @@ void CIRGenItaniumCXXABI::buildInstanceFunctionProlog(SourceLocation Loc, llvm_unreachable("NYI"); } -void CIRGenItaniumCXXABI::buildCXXConstructors(const CXXConstructorDecl *D) { +void CIRGenItaniumCXXABI::emitCXXConstructors(const CXXConstructorDecl *D) { // Just make sure we're in sync with TargetCXXABI. assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); // The constructor used for constructing this as a base class; // ignores virtual bases. - CGM.buildGlobal(GlobalDecl(D, Ctor_Base)); + CGM.emitGlobal(GlobalDecl(D, Ctor_Base)); // The constructor used for constructing this as a complete class; // constructs the virtual bases, then calls the base constructor. if (!D->getParent()->isAbstract()) { // We don't need to emit the complete ctro if the class is abstract. - CGM.buildGlobal(GlobalDecl(D, Ctor_Complete)); + CGM.emitGlobal(GlobalDecl(D, Ctor_Complete)); } } -void CIRGenItaniumCXXABI::buildCXXDestructors(const CXXDestructorDecl *D) { +void CIRGenItaniumCXXABI::emitCXXDestructors(const CXXDestructorDecl *D) { // The destructor used for destructing this as a base class; ignores // virtual bases. - CGM.buildGlobal(GlobalDecl(D, Dtor_Base)); + CGM.emitGlobal(GlobalDecl(D, Dtor_Base)); // The destructor used for destructing this as a most-derived class; // call the base destructor and then destructs any virtual bases. - CGM.buildGlobal(GlobalDecl(D, Dtor_Complete)); + CGM.emitGlobal(GlobalDecl(D, Dtor_Complete)); // The destructor in a virtual table is always a 'deleting' // destructor, which calls the complete destructor and then uses the // appropriate operator delete. if (D->isVirtual()) - CGM.buildGlobal(GlobalDecl(D, Dtor_Deleting)); + CGM.emitGlobal(GlobalDecl(D, Dtor_Deleting)); } namespace { @@ -769,8 +769,8 @@ static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, llvm_unreachable("NYI"); return; case cir::TEK_Scalar: { - auto exnLoad = CGF.buildLoadOfScalar(srcLV, catchParam.getLoc()); - CGF.buildStoreOfScalar(exnLoad, destLV, /*init*/ true); + auto exnLoad = CGF.emitLoadOfScalar(srcLV, catchParam.getLoc()); + CGF.emitStoreOfScalar(exnLoad, destLV, /*init*/ true); return; } case cir::TEK_Aggregate: @@ -839,10 +839,10 @@ void CIRGenItaniumCXXABI::emitBeginCatch(CIRGenFunction &CGF, // Emit the local. Make sure the alloca's superseed the current scope, since // these are going to be consumed by `cir.catch`, which is not within the // current scope. - auto var = CGF.buildAutoVarAlloca(*CatchParam, getCatchParamAllocaIP()); + auto var = CGF.emitAutoVarAlloca(*CatchParam, getCatchParamAllocaIP()); InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc()); // FIXME(cir): double check cleanups here are happening in the right blocks. - CGF.buildAutoVarCleanups(var); + CGF.emitAutoVarCleanups(var); } cir::GlobalOp CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, @@ -905,7 +905,7 @@ CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer( if (CGF.shouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { llvm_unreachable("NYI"); } else { - CGF.buildTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); + CGF.emitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); mlir::Value VFuncLoad; if (CGM.getItaniumVTableContext().isRelativeLayout()) { @@ -2135,7 +2135,7 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, // defined in headers but with a strong definition only in a shared // library. if (!isDeclarationForLinker || CGM.getCodeGenOpts().WholeProgramVTables) { - CGM.buildVTableTypeMetadata(RD, VTable, VTLayout); + CGM.emitVTableTypeMetadata(RD, VTable, VTLayout); // For available_externally definitions, add the vtable to // @llvm.compiler.used so that it isn't deleted before whole program // analysis. @@ -2154,7 +2154,7 @@ void CIRGenItaniumCXXABI::emitVirtualInheritanceTables( const CXXRecordDecl *RD) { CIRGenVTables &VTables = CGM.getVTables(); auto VTT = VTables.getAddrOfVTT(RD); - VTables.buildVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); + VTables.emitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); } /// What sort of uniqueness rules should we use for the RTTI for the @@ -2185,7 +2185,7 @@ CIRGenItaniumCXXABI::classifyRTTIUniqueness( return RUK_NonUniqueVisible; } -void CIRGenItaniumCXXABI::buildDestructorCall( +void CIRGenItaniumCXXABI::emitDestructorCall( CIRGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy) { GlobalDecl GD(DD, Type); @@ -2199,8 +2199,8 @@ void CIRGenItaniumCXXABI::buildDestructorCall( else Callee = CIRGenCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD); - CGF.buildCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy, - nullptr); + CGF.emitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy, + nullptr); } void CIRGenItaniumCXXABI::registerGlobalDtor(CIRGenFunction &CGF, @@ -2227,13 +2227,13 @@ mlir::Value CIRGenItaniumCXXABI::getCXXDestructorImplicitParam( return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); } -void CIRGenItaniumCXXABI::buildRethrow(CIRGenFunction &CGF, bool isNoReturn) { +void CIRGenItaniumCXXABI::emitRethrow(CIRGenFunction &CGF, bool isNoReturn) { // void __cxa_rethrow(); llvm_unreachable("NYI"); } -void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, - const CXXThrowExpr *E) { +void CIRGenItaniumCXXABI::emitThrow(CIRGenFunction &CGF, + const CXXThrowExpr *E) { // This differs a bit from LLVM codegen, CIR has native operations for some // cxa functions, and defers allocation size computation, always pass the dtor // symbol, etc. CIRGen also does not use getAllocateExceptionFn / getThrowFn. @@ -2254,7 +2254,7 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, // Build expression and store its result into exceptionPtr. CharUnits exnAlign = CGF.getContext().getExnObjectAlignment(); - CGF.buildAnyExprToExn(E->getSubExpr(), Address(exceptionPtr, exnAlign)); + CGF.emitAnyExprToExn(E->getSubExpr(), Address(exceptionPtr, exnAlign)); // Get the RTTI symbol address. auto typeInfo = mlir::dyn_cast_if_present( @@ -2333,18 +2333,18 @@ static cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { return CGF.CGM.createRuntimeFunction(FTy, "__cxa_bad_cast"); } -static void buildCallToBadCast(CIRGenFunction &CGF, mlir::Location loc) { +static void emitCallToBadCast(CIRGenFunction &CGF, mlir::Location loc) { // TODO(cir): set the calling convention to the runtime function. assert(!cir::MissingFeatures::setCallingConv()); - CGF.buildRuntimeCall(loc, getBadCastFn(CGF)); + CGF.emitRuntimeCall(loc, getBadCastFn(CGF)); CGF.getBuilder().create(loc); CGF.getBuilder().clearInsertionPoint(); } -void CIRGenItaniumCXXABI::buildBadCastCall(CIRGenFunction &CGF, - mlir::Location loc) { - buildCallToBadCast(CGF, loc); +void CIRGenItaniumCXXABI::emitBadCastCall(CIRGenFunction &CGF, + mlir::Location loc) { + emitCallToBadCast(CGF, loc); } static CharUnits computeOffsetHint(ASTContext &Context, @@ -2418,8 +2418,8 @@ static cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { return CGF.CGM.createRuntimeFunction(FTy, "__dynamic_cast"); } -static Address buildDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, - QualType SrcRecordTy, Address Src) { +static Address emitDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, Address Src) { auto vtableUsesRelativeLayout = CGF.CGM.getItaniumVTableContext().isRelativeLayout(); auto ptr = CGF.getBuilder().createDynCastToVoid(Loc, Src.getPointer(), @@ -2427,11 +2427,12 @@ static Address buildDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, return Address{ptr, Src.getAlignment()}; } -static mlir::Value -buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, - mlir::Location Loc, QualType SrcRecordTy, - QualType DestRecordTy, cir::PointerType DestCIRTy, - bool IsRefCast, Address Src) { +static mlir::Value emitExactDynamicCast(CIRGenItaniumCXXABI &ABI, + CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, + QualType DestRecordTy, + cir::PointerType DestCIRTy, + bool IsRefCast, Address Src) { // Find all the inheritance paths from SrcRecordTy to DestRecordTy. const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); @@ -2472,7 +2473,7 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, // object and see if it's a DestDecl. Note that the most-derived object // must be at least as aligned as this base class subobject, and must // have a vptr at offset 0. - Src = buildDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src); + Src = emitDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src); SrcDecl = DestDecl; Offset = CharUnits::Zero(); break; @@ -2484,7 +2485,7 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, mlir::Value NullPtrValue = CGF.getBuilder().getNullPtr(DestCIRTy, Loc); if (IsRefCast) { auto *CurrentRegion = CGF.getBuilder().getBlock()->getParent(); - buildCallToBadCast(CGF, Loc); + emitCallToBadCast(CGF, Loc); // The call to bad_cast will terminate the block. Create a new block to // hold any follow up code. @@ -2517,7 +2518,7 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, mlir::Value Success = CGF.getBuilder().createCompare(Loc, cir::CmpOpKind::eq, SrcVPtr, ExpectedVPtr); - auto buildCastResult = [&] { + auto emitCastResult = [&] { if (Offset->isZero()) return CGF.getBuilder().createBitcast(Src.getPointer(), DestCIRTy); @@ -2539,16 +2540,16 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, mlir::Value Failed = CGF.getBuilder().createNot(Success); CGF.getBuilder().create(Loc, Failed, /*withElseRegion=*/false, [&](mlir::OpBuilder &, mlir::Location) { - buildCallToBadCast(CGF, Loc); + emitCallToBadCast(CGF, Loc); }); - return buildCastResult(); + return emitCastResult(); } return CGF.getBuilder() .create( Loc, Success, [&](mlir::OpBuilder &, mlir::Location) { - auto Result = buildCastResult(); + auto Result = emitCastResult(); CGF.getBuilder().createYield(Loc, Result); }, [&](mlir::OpBuilder &, mlir::Location) { @@ -2559,10 +2560,10 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, .getResult(); } -static cir::DynamicCastInfoAttr buildDynamicCastInfo(CIRGenFunction &CGF, - mlir::Location Loc, - QualType SrcRecordTy, - QualType DestRecordTy) { +static cir::DynamicCastInfoAttr emitDynamicCastInfo(CIRGenFunction &CGF, + mlir::Location Loc, + QualType SrcRecordTy, + QualType DestRecordTy) { auto srcRtti = mlir::cast( CGF.CGM.getAddrOfRTTIDescriptor(Loc, SrcRecordTy)); auto destRtti = mlir::cast( @@ -2584,26 +2585,26 @@ static cir::DynamicCastInfoAttr buildDynamicCastInfo(CIRGenFunction &CGF, badCastFuncRef, offsetHintAttr); } -mlir::Value CIRGenItaniumCXXABI::buildDynamicCast(CIRGenFunction &CGF, - mlir::Location Loc, - QualType SrcRecordTy, - QualType DestRecordTy, - cir::PointerType DestCIRTy, - bool isRefCast, Address Src) { +mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &CGF, + mlir::Location Loc, + QualType SrcRecordTy, + QualType DestRecordTy, + cir::PointerType DestCIRTy, + bool isRefCast, Address Src) { bool isCastToVoid = DestRecordTy.isNull(); assert((!isCastToVoid || !isRefCast) && "cannot cast to void reference"); if (isCastToVoid) - return buildDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src).getPointer(); + return emitDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src).getPointer(); // If the destination is effectively final, the cast succeeds if and only // if the dynamic type of the pointer is exactly the destination type. if (DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() && CGF.CGM.getCodeGenOpts().OptimizationLevel > 0) - return buildExactDynamicCast(*this, CGF, Loc, SrcRecordTy, DestRecordTy, - DestCIRTy, isRefCast, Src); + return emitExactDynamicCast(*this, CGF, Loc, SrcRecordTy, DestRecordTy, + DestCIRTy, isRefCast, Src); - auto castInfo = buildDynamicCastInfo(CGF, Loc, SrcRecordTy, DestRecordTy); + auto castInfo = emitDynamicCastInfo(CGF, Loc, SrcRecordTy, DestRecordTy); return CGF.getBuilder().createDynCast(Loc, Src.getPointer(), DestCIRTy, isRefCast, castInfo); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c8fecd3f20ee..a1b1e9293c48 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -474,7 +474,7 @@ void CIRGenModule::setDSOLocal(CIRGlobalValueInterface GV) const { GV.setDSOLocal(shouldAssumeDSOLocal(*this, GV)); } -void CIRGenModule::buildGlobal(GlobalDecl GD) { +void CIRGenModule::emitGlobal(GlobalDecl GD) { llvm::TimeTraceScope scope("build CIR Global", [&]() -> std::string { auto *ND = dyn_cast(GD.getDecl()); if (!ND) @@ -560,7 +560,7 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { // to benefit from cache locality. if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) { // Emit the definition if it can't be deferred. - buildGlobalDefinition(GD); + emitGlobalDefinition(GD); return; } @@ -587,8 +587,8 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { } } -void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, - mlir::Operation *Op) { +void CIRGenModule::emitGlobalFunctionDefinition(GlobalDecl GD, + mlir::Operation *Op) { auto const *D = cast(GD.getDecl()); // Compute the function info and CIR type. @@ -1113,8 +1113,8 @@ void CIRGenModule::maybeHandleStaticInExternC(const SomeDecl *D, assert(0 && "not implemented"); } -void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, - bool IsTentative) { +void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *D, + bool IsTentative) { // TODO(cir): // OpenCL global variables of sampler type are translated to function calls, // therefore no need to be translated. @@ -1356,7 +1356,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // Emit the initializer function if necessary. if (NeedsGlobalCtor || NeedsGlobalDtor) { globalOpContext = GV; - buildCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor); + emitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor); globalOpContext = nullptr; } @@ -1366,7 +1366,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, assert(!cir::MissingFeatures::generateDebugInfo()); } -void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { +void CIRGenModule::emitGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { const auto *D = cast(GD.getDecl()); if (const auto *FD = dyn_cast(D)) { // At -O0, don't generate CIR for functions with available_externally @@ -1378,29 +1378,29 @@ void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { // Make sure to emit the definition(s) before we emit the thunks. This is // necessary for the generation of certain thunks. if (isa(Method) || isa(Method)) - ABI->buildCXXStructor(GD); + ABI->emitCXXStructor(GD); else if (FD->isMultiVersion()) llvm_unreachable("NYI"); else - buildGlobalFunctionDefinition(GD, Op); + emitGlobalFunctionDefinition(GD, Op); if (Method->isVirtual()) - getVTables().buildThunks(GD); + getVTables().emitThunks(GD); return; } if (FD->isMultiVersion()) llvm_unreachable("NYI"); - buildGlobalFunctionDefinition(GD, Op); + emitGlobalFunctionDefinition(GD, Op); return; } if (const auto *VD = dyn_cast(D)) { - return buildGlobalVarDefinition(VD, !VD->hasDefinition()); + return emitGlobalVarDefinition(VD, !VD->hasDefinition()); } - llvm_unreachable("Invalid argument to buildGlobalDefinition()"); + llvm_unreachable("Invalid argument to emitGlobalDefinition()"); } mlir::Attribute @@ -1564,7 +1564,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, return builder.getGlobalViewAttr(PtrTy, GV); } -void CIRGenModule::buildDeclContext(const DeclContext *DC) { +void CIRGenModule::emitDeclContext(const DeclContext *DC) { for (auto *I : DC->decls()) { // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope // are themselves considered "top-level", so EmitTopLevelDecl on an @@ -1574,17 +1574,17 @@ void CIRGenModule::buildDeclContext(const DeclContext *DC) { if (auto *OID = dyn_cast(I)) llvm_unreachable("NYI"); - buildTopLevelDecl(I); + emitTopLevelDecl(I); } } -void CIRGenModule::buildLinkageSpec(const LinkageSpecDecl *LSD) { +void CIRGenModule::emitLinkageSpec(const LinkageSpecDecl *LSD) { if (LSD->getLanguage() != LinkageSpecLanguageIDs::C && LSD->getLanguage() != LinkageSpecLanguageIDs::CXX) { llvm_unreachable("unsupported linkage spec"); return; } - buildDeclContext(LSD); + emitDeclContext(LSD); } mlir::Operation * @@ -1704,7 +1704,7 @@ CIRGenModule::getAddrOfGlobalTemporary(const MaterializeTemporaryExpr *expr, } // Emit code for a single top level declaration. -void CIRGenModule::buildTopLevelDecl(Decl *decl) { +void CIRGenModule::emitTopLevelDecl(Decl *decl) { // Ignore dependent declarations if (decl->isTemplated()) return; @@ -1716,7 +1716,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { switch (decl->getKind()) { default: - llvm::errs() << "buildTopLevelDecl codegen for decl kind '" + llvm::errs() << "emitTopLevelDecl codegen for decl kind '" << decl->getDeclKindName() << "' not implemented\n"; assert(false && "Not yet implemented"); @@ -1727,13 +1727,13 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { for (DeclContext::decl_iterator D = TU->decls_begin(), DEnd = TU->decls_end(); D != DEnd; ++D) - buildTopLevelDecl(*D); + emitTopLevelDecl(*D); return; } case Decl::Var: case Decl::Decomposition: case Decl::VarTemplateSpecialization: - buildGlobal(cast(decl)); + emitGlobal(cast(decl)); assert(!isa(decl) && "not implemented"); // if (auto *DD = dyn_cast(decl)) // for (auto *B : DD->bindings()) @@ -1744,12 +1744,12 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { case Decl::CXXConversion: case Decl::CXXMethod: case Decl::Function: - buildGlobal(cast(decl)); + emitGlobal(cast(decl)); assert(!codeGenOpts.CoverageMapping && "Coverage Mapping NYI"); break; // C++ Decls case Decl::Namespace: - buildDeclContext(cast(decl)); + emitDeclContext(cast(decl)); break; case Decl::ClassTemplateSpecialization: { // const auto *Spec = cast(decl); @@ -1761,7 +1761,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { // TODO: Handle debug info as CodeGenModule.cpp does for (auto *childDecl : crd->decls()) if (isa(childDecl) || isa(childDecl)) - buildTopLevelDecl(childDecl); + emitTopLevelDecl(childDecl); break; } // No code generation needed. @@ -1783,10 +1783,10 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); break; case Decl::CXXConstructor: - getCXXABI().buildCXXConstructors(cast(decl)); + getCXXABI().emitCXXConstructors(cast(decl)); break; case Decl::CXXDestructor: - getCXXABI().buildCXXDestructors(cast(decl)); + getCXXABI().emitCXXDestructors(cast(decl)); break; case Decl::StaticAssert: @@ -1794,7 +1794,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { break; case Decl::LinkageSpec: - buildLinkageSpec(cast(decl)); + emitLinkageSpec(cast(decl)); break; case Decl::Typedef: @@ -2119,10 +2119,10 @@ cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { return getCIRLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false); } -void CIRGenModule::buildAliasForGlobal(StringRef mangledName, - mlir::Operation *op, GlobalDecl aliasGD, - cir::FuncOp aliasee, - cir::GlobalLinkageKind linkage) { +void CIRGenModule::emitAliasForGlobal(StringRef mangledName, + mlir::Operation *op, GlobalDecl aliasGD, + cir::FuncOp aliasee, + cir::GlobalLinkageKind linkage) { auto *aliasFD = dyn_cast(aliasGD.getDecl()); assert(aliasFD && "expected FunctionDecl"); @@ -2303,7 +2303,7 @@ StringRef CIRGenModule::getMangledName(GlobalDecl GD) { return MangledDeclNames[CanonicalGD] = Result.first->first(); } -void CIRGenModule::buildTentativeDefinition(const VarDecl *D) { +void CIRGenModule::emitTentativeDefinition(const VarDecl *D) { assert(!D->getInit() && "Cannot emit definite definitions here!"); StringRef MangledName = getMangledName(D); @@ -2331,7 +2331,7 @@ void CIRGenModule::buildTentativeDefinition(const VarDecl *D) { } // The tentative definition is the only definition. - buildGlobalVarDefinition(D); + emitGlobalVarDefinition(D); } void CIRGenModule::setGlobalVisibility(mlir::Operation *GV, @@ -2817,7 +2817,7 @@ mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { return mlir::FusedLoc::get(locs, metadata, &getMLIRContext()); } -void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { +void CIRGenModule::emitGlobalDecl(clang::GlobalDecl &D) { // We should call GetAddrOfGlobal with IsForDefinition set to true in order // to get a Value with exactly the type we need, not something that might // have been created for another decl with the same mangled name but @@ -2865,10 +2865,10 @@ void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { return; // Otherwise, emit the definition and move on to the next one. - buildGlobalDefinition(D, Op); + emitGlobalDefinition(D, Op); } -void CIRGenModule::buildDeferred(unsigned recursionLimit) { +void CIRGenModule::emitDeferred(unsigned recursionLimit) { // Emit deferred declare target declarations if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) getOpenMPRuntime().emitDeferredTargetDecls(); @@ -2878,7 +2878,7 @@ void CIRGenModule::buildDeferred(unsigned recursionLimit) { // static function, iterate until no changes are made. if (!DeferredVTables.empty()) { - buildDeferredVTables(); + emitDeferredVTables(); // Emitting a vtable doesn't directly cause more vtables to // become deferred, although it can cause functions to be @@ -2897,7 +2897,7 @@ void CIRGenModule::buildDeferred(unsigned recursionLimit) { if (DeferredDeclsToEmit.empty()) return; - // Grab the list of decls to emit. If buildGlobalDefinition schedules more + // Grab the list of decls to emit. If emitGlobalDefinition schedules more // work, it will not interfere with this. std::vector CurDeclsToEmit; CurDeclsToEmit.swap(DeferredDeclsToEmit); @@ -2913,23 +2913,23 @@ void CIRGenModule::buildDeferred(unsigned recursionLimit) { continue; } - buildGlobalDecl(D); + emitGlobalDecl(D); // If we found out that we need to emit more decls, do that recursively. // This has the advantage that the decls are emitted in a DFS and related // ones are close together, which is convenient for testing. if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) { - buildDeferred(recursionLimit); + emitDeferred(recursionLimit); assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty()); } } } -void CIRGenModule::buildDefaultMethods() { +void CIRGenModule::emitDefaultMethods() { // Differently from DeferredDeclsToEmit, there's no recurrent use of // DefaultMethodsToEmit, so use it directly for emission. for (auto &D : DefaultMethodsToEmit) - buildGlobalDecl(D); + emitGlobalDecl(D); } mlir::IntegerAttr CIRGenModule::getSize(CharUnits size) { @@ -2964,32 +2964,32 @@ CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { } void CIRGenModule::Release() { - buildDeferred(getCodeGenOpts().ClangIRBuildDeferredThreshold); - // TODO: buildVTablesOpportunistically(); + emitDeferred(getCodeGenOpts().ClangIRBuildDeferredThreshold); + // TODO: emitVTablesOpportunistically(); // TODO: applyGlobalValReplacements(); applyReplacements(); // TODO: checkAliases(); - // TODO: buildMultiVersionFunctions(); - buildCXXGlobalInitFunc(); - // TODO: buildCXXGlobalCleanUpFunc(); + // TODO: emitMultiVersionFunctions(); + emitCXXGlobalInitFunc(); + // TODO: emitCXXGlobalCleanUpFunc(); // TODO: registerGlobalDtorsWithAtExit(); - // TODO: buildCXXThreadLocalInitFunc(); + // TODO: emitCXXThreadLocalInitFunc(); // TODO: ObjCRuntime if (astCtx.getLangOpts().CUDA) { llvm_unreachable("NYI"); } // TODO: OpenMPRuntime // TODO: PGOReader - // TODO: buildCtorList(GlobalCtors); + // TODO: emitCtorList(GlobalCtors); // TODO: builtCtorList(GlobalDtors); - buildGlobalAnnotations(); - // TODO: buildDeferredUnusedCoverageMappings(); + emitGlobalAnnotations(); + // TODO: emitDeferredUnusedCoverageMappings(); // TODO: CIRGenPGO // TODO: CoverageMapping if (getCodeGenOpts().SanitizeCfiCrossDso) { llvm_unreachable("NYI"); } - // TODO: buildAtAvailableLinkGuard(); + // TODO: emitAtAvailableLinkGuard(); if (astCtx.getTargetInfo().getTriple().isWasm() && !astCtx.getTargetInfo().getTriple().isOSEmscripten()) { llvm_unreachable("NYI"); @@ -3001,18 +3001,18 @@ void CIRGenModule::Release() { llvm_unreachable("NYI"); } - // TODO: buildLLVMUsed(); + // TODO: emitLLVMUsed(); // TODO: SanStats if (getCodeGenOpts().Autolink) { - // TODO: buildModuleLinkOptions + // TODO: emitModuleLinkOptions } // Emit OpenCL specific module metadata: OpenCL/SPIR version. if (langOpts.CUDAIsDevice && getTriple().isSPIRV()) llvm_unreachable("CUDA SPIR-V NYI"); if (langOpts.OpenCL) { - buildOpenCLMetadata(); + emitOpenCLMetadata(); // Emit SPIR version. if (getTriple().isSPIR()) llvm_unreachable("SPIR target NYI"); @@ -3205,8 +3205,8 @@ void CIRGenModule::applyReplacements() { } } -void CIRGenModule::buildExplicitCastExprType(const ExplicitCastExpr *E, - CIRGenFunction *CGF) { +void CIRGenModule::emitExplicitCastExprType(const ExplicitCastExpr *E, + CIRGenFunction *CGF) { // Bind VLAs in the cast type. if (CGF && E->getType()->isVariablyModifiedType()) llvm_unreachable("NYI"); @@ -3226,7 +3226,7 @@ void CIRGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { llvm_unreachable("NYI"); } - buildTopLevelDecl(VD); + emitTopLevelDecl(VD); } cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( @@ -3283,9 +3283,9 @@ bool CIRGenModule::shouldOpportunisticallyEmitVTables() { return codeGenOpts.OptimizationLevel > 0; } -void CIRGenModule::buildVTableTypeMetadata(const CXXRecordDecl *RD, - cir::GlobalOp VTable, - const VTableLayout &VTLayout) { +void CIRGenModule::emitVTableTypeMetadata(const CXXRecordDecl *RD, + cir::GlobalOp VTable, + const VTableLayout &VTLayout) { if (!getCodeGenOpts().LTOUnit) return; llvm_unreachable("NYI"); @@ -3440,7 +3440,7 @@ LangAS CIRGenModule::getGlobalVarAddressSpace(const VarDecl *D) { return getTargetCIRGenInfo().getGlobalVarAddressSpace(*this, D); } -mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(const AnnotateAttr *attr) { +mlir::ArrayAttr CIRGenModule::emitAnnotationArgs(const AnnotateAttr *attr) { ArrayRef exprs = {attr->args_begin(), attr->args_size()}; if (exprs.empty()) { return mlir::ArrayAttr::get(&getMLIRContext(), {}); @@ -3482,9 +3482,9 @@ mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(const AnnotateAttr *attr) { } cir::AnnotationAttr -CIRGenModule::buildAnnotateAttr(const clang::AnnotateAttr *aa) { +CIRGenModule::emitAnnotateAttr(const clang::AnnotateAttr *aa) { mlir::StringAttr annoGV = builder.getStringAttr(aa->getAnnotation()); - mlir::ArrayAttr args = buildAnnotationArgs(aa); + mlir::ArrayAttr args = emitAnnotationArgs(aa); return cir::AnnotationAttr::get(&getMLIRContext(), annoGV, args); } @@ -3495,14 +3495,14 @@ void CIRGenModule::addGlobalAnnotations(const ValueDecl *d, "annotation only on globals"); llvm::SmallVector annotations; for (auto *i : d->specific_attrs()) - annotations.push_back(buildAnnotateAttr(i)); + annotations.push_back(emitAnnotateAttr(i)); if (auto global = dyn_cast(gv)) global.setAnnotationsAttr(builder.getArrayAttr(annotations)); else if (auto func = dyn_cast(gv)) func.setAnnotationsAttr(builder.getArrayAttr(annotations)); } -void CIRGenModule::buildGlobalAnnotations() { +void CIRGenModule::emitGlobalAnnotations() { for (const auto &[mangledName, vd] : deferredAnnotations) { mlir::Operation *gv = getGlobalValue(mangledName); if (gv) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 77abb80bbc77..961a999990b6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -112,7 +112,7 @@ class CIRGenModule : public CIRGenTypeCache { /// Holds the OpenMP runtime std::unique_ptr openMPRuntime; - /// Per-function codegen information. Updated everytime buildCIR is called + /// Per-function codegen information. Updated everytime emitCIR is called /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; @@ -182,7 +182,7 @@ class CIRGenModule : public CIRGenTypeCache { std::vector CXXGlobalInits; /// Emit the function that initializes C++ globals. - void buildCXXGlobalInitFunc(); + void emitCXXGlobalInitFunc(); /// Track whether the CIRGenModule is currently building an initializer /// for a global (e.g. as opposed to a regular cir.func). @@ -340,10 +340,10 @@ class CIRGenModule : public CIRGenTypeCache { cir::GlobalLinkageKind Linkage, clang::CharUnits Alignment); /// Emit any vtables which we deferred and still have a use for. - void buildDeferredVTables(); + void emitDeferredVTables(); bool shouldOpportunisticallyEmitVTables(); - void buildVTable(CXXRecordDecl *rd); + void emitVTable(CXXRecordDecl *rd); void setDSOLocal(cir::CIRGlobalValueInterface GV) const; @@ -352,8 +352,8 @@ class CIRGenModule : public CIRGenTypeCache { cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *RD); /// Emit type metadata for the given vtable using the given layout. - void buildVTableTypeMetadata(const CXXRecordDecl *RD, cir::GlobalOp VTable, - const VTableLayout &VTLayout); + void emitVTableTypeMetadata(const CXXRecordDecl *RD, cir::GlobalOp VTable, + const VTableLayout &VTLayout); /// Get the address of the RTTI descriptor for the given type. mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType Ty, @@ -536,18 +536,18 @@ class CIRGenModule : public CIRGenTypeCache { cir::FuncType FnType = nullptr, bool Dontdefer = false, ForDefinition_t IsForDefinition = NotForDefinition); - void buildTopLevelDecl(clang::Decl *decl); - void buildLinkageSpec(const LinkageSpecDecl *D); + void emitTopLevelDecl(clang::Decl *decl); + void emitLinkageSpec(const LinkageSpecDecl *D); /// Emit code for a single global function or var decl. Forward declarations /// are emitted lazily. - void buildGlobal(clang::GlobalDecl D); + void emitGlobal(clang::GlobalDecl D); bool tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D); - void buildAliasForGlobal(llvm::StringRef mangledName, mlir::Operation *op, - GlobalDecl aliasGD, cir::FuncOp aliasee, - cir::GlobalLinkageKind linkage); + void emitAliasForGlobal(llvm::StringRef mangledName, mlir::Operation *op, + GlobalDecl aliasGD, cir::FuncOp aliasee, + cir::GlobalLinkageKind linkage); mlir::Type getCIRType(const clang::QualType &type); @@ -614,22 +614,22 @@ class CIRGenModule : public CIRGenTypeCache { } // C++ related functions. - void buildDeclContext(const DeclContext *DC); + void emitDeclContext(const DeclContext *DC); /// Return the result of value-initializing the given type, i.e. a null /// expression of the given type. This is usually, but not always, an LLVM /// null constant. - mlir::Value buildNullConstant(QualType T, mlir::Location loc); + mlir::Value emitNullConstant(QualType T, mlir::Location loc); /// Return a null constant appropriate for zero-initializing a base class with /// the given type. This is usually, but not always, an LLVM null constant. - mlir::TypedAttr buildNullConstantForBase(const CXXRecordDecl *Record); + mlir::TypedAttr emitNullConstantForBase(const CXXRecordDecl *Record); - mlir::Value buildMemberPointerConstant(const UnaryOperator *E); + mlir::Value emitMemberPointerConstant(const UnaryOperator *E); llvm::StringRef getMangledName(clang::GlobalDecl GD); - void buildTentativeDefinition(const VarDecl *D); + void emitTentativeDefinition(const VarDecl *D); // Make sure that this type is translated. void UpdateCompletedType(const clang::TagDecl *TD); @@ -647,18 +647,17 @@ class CIRGenModule : public CIRGenTypeCache { void setCIRFunctionAttributesForDefinition(const Decl *decl, cir::FuncOp func); - void buildGlobalDefinition(clang::GlobalDecl D, - mlir::Operation *Op = nullptr); - void buildGlobalFunctionDefinition(clang::GlobalDecl D, mlir::Operation *Op); - void buildGlobalVarDefinition(const clang::VarDecl *D, - bool IsTentative = false); + void emitGlobalDefinition(clang::GlobalDecl D, mlir::Operation *Op = nullptr); + void emitGlobalFunctionDefinition(clang::GlobalDecl D, mlir::Operation *Op); + void emitGlobalVarDefinition(const clang::VarDecl *D, + bool IsTentative = false); /// Emit the function that initializes the specified global - void buildCXXGlobalVarDeclInit(const VarDecl *varDecl, cir::GlobalOp addr, - bool performInit); + void emitCXXGlobalVarDeclInit(const VarDecl *varDecl, cir::GlobalOp addr, + bool performInit); - void buildCXXGlobalVarDeclInitFunc(const VarDecl *D, cir::GlobalOp Addr, - bool PerformInit); + void emitCXXGlobalVarDeclInitFunc(const VarDecl *D, cir::GlobalOp Addr, + bool PerformInit); void addDeferredVTable(const CXXRecordDecl *RD) { DeferredVTables.push_back(RD); @@ -671,13 +670,13 @@ class CIRGenModule : public CIRGenTypeCache { std::nullptr_t getModuleDebugInfo() { return nullptr; } /// Emit any needed decls for which code generation was deferred. - void buildDeferred(unsigned recursionLimit); + void emitDeferred(unsigned recursionLimit); - /// Helper for `buildDeferred` to apply actual codegen. - void buildGlobalDecl(clang::GlobalDecl &D); + /// Helper for `emitDeferred` to apply actual codegen. + void emitGlobalDecl(clang::GlobalDecl &D); /// Build default methods not emitted before this point. - void buildDefaultMethods(); + void emitDefaultMethods(); const llvm::Triple &getTriple() const { return target.getTriple(); } @@ -759,8 +758,8 @@ class CIRGenModule : public CIRGenTypeCache { /// Emit type info if type of an expression is a variably modified /// type. Also emit proper debug info for cast types. - void buildExplicitCastExprType(const ExplicitCastExpr *E, - CIRGenFunction *CGF = nullptr); + void emitExplicitCastExprType(const ExplicitCastExpr *E, + CIRGenFunction *CGF = nullptr); static constexpr const char *builtinCoroId = "__builtin_coro_id"; static constexpr const char *builtinCoroAlloc = "__builtin_coro_alloc"; @@ -810,13 +809,13 @@ class CIRGenModule : public CIRGenTypeCache { CIRGenFunction *CGF = nullptr); /// Emits OpenCL specific Metadata e.g. OpenCL version. - void buildOpenCLMetadata(); + void emitOpenCLMetadata(); /// Create cir::AnnotationAttr which contains the annotation /// information for a given GlobalValue. Notice that a GlobalValue could /// have multiple annotations, and this function creates attribute for /// one of them. - cir::AnnotationAttr buildAnnotateAttr(const clang::AnnotateAttr *aa); + cir::AnnotationAttr emitAnnotateAttr(const clang::AnnotateAttr *aa); private: // An ordered map of canonical GlobalDecls to their mangled names. @@ -836,10 +835,10 @@ class CIRGenModule : public CIRGenTypeCache { /// Emit all the global annotations. /// This actually only emits annotations for deffered declarations of /// functions, because global variables need no deffred emission. - void buildGlobalAnnotations(); + void emitGlobalAnnotations(); /// Emit additional args of the annotation. - mlir::ArrayAttr buildAnnotationArgs(const clang::AnnotateAttr *attr); + mlir::ArrayAttr emitAnnotationArgs(const clang::AnnotateAttr *attr); /// Add global annotations for a global value. /// Those annotations are emitted during lowering to the LLVM code. diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp index 6247cf6b5c2a..d11126940935 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp @@ -186,8 +186,8 @@ void CIRGenModule::genKernelArgMetadata(cir::FuncOp Fn, const FunctionDecl *FD, } } -void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, - cir::FuncOp Fn) { +void CIRGenFunction::emitKernelMetadata(const FunctionDecl *FD, + cir::FuncOp Fn) { if (!FD->hasAttr() && !FD->hasAttr()) return; @@ -249,7 +249,7 @@ void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, &getMLIRContext(), attrs.getDictionary(&getMLIRContext()))); } -void CIRGenModule::buildOpenCLMetadata() { +void CIRGenModule::emitOpenCLMetadata() { // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the // opencl.ocl.version named metadata node. // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL. diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp index 34207f74089b..4f4433b49bea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp @@ -22,7 +22,7 @@ using namespace clang::CIRGen; CIRGenOpenCLRuntime::~CIRGenOpenCLRuntime() {} -void CIRGenOpenCLRuntime::buildWorkGroupLocalVarDecl(CIRGenFunction &CGF, - const VarDecl &D) { - return CGF.buildStaticVarDecl(D, cir::GlobalLinkageKind::InternalLinkage); +void CIRGenOpenCLRuntime::emitWorkGroupLocalVarDecl(CIRGenFunction &CGF, + const VarDecl &D) { + return CGF.emitStaticVarDecl(D, cir::GlobalLinkageKind::InternalLinkage); } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h index f08ed0bf31e8..252a810f2061 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h @@ -37,8 +37,8 @@ class CIRGenOpenCLRuntime { /// Emit the IR required for a work-group-local variable declaration, and add /// an entry to CGF's LocalDeclMap for D. The base class does this using /// CIRGenFunction::EmitStaticVarDecl to emit an internal global for D. - virtual void buildWorkGroupLocalVarDecl(CIRGenFunction &CGF, - const clang::VarDecl &D); + virtual void emitWorkGroupLocalVarDecl(CIRGenFunction &CGF, + const clang::VarDecl &D); }; } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 1a29affa0df3..8dff466cecd8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -24,9 +24,9 @@ using namespace clang; using namespace clang::CIRGen; using namespace cir; -Address CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S, - bool getLast, - AggValueSlot slot) { +Address CIRGenFunction::emitCompoundStmtWithoutScope(const CompoundStmt &S, + bool getLast, + AggValueSlot slot) { const Stmt *ExprResult = S.getStmtExprResult(); assert((!getLast || (getLast && ExprResult)) && "If getLast is true then the CompoundStmt must have a StmtExprResult"); @@ -47,17 +47,17 @@ Address CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S, const Expr *E = cast(ExprResult); QualType exprTy = E->getType(); if (hasAggregateEvaluationKind(exprTy)) { - buildAggExpr(E, slot); + emitAggExpr(E, slot); } else { // We can't return an RValue here because there might be cleanups at // the end of the StmtExpr. Because of that, we have to emit the result // here into a temporary alloca. retAlloca = CreateMemTemp(exprTy, getLoc(E->getSourceRange())); - buildAnyExprToMem(E, retAlloca, Qualifiers(), - /*IsInit*/ false); + emitAnyExprToMem(E, retAlloca, Qualifiers(), + /*IsInit*/ false); } } else { - if (buildStmt(CurStmt, /*useCurrentScope=*/false).failed()) + if (emitStmt(CurStmt, /*useCurrentScope=*/false).failed()) llvm_unreachable("failed to build statement"); } } @@ -65,8 +65,8 @@ Address CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S, return retAlloca; } -Address CIRGenFunction::buildCompoundStmt(const CompoundStmt &S, bool getLast, - AggValueSlot slot) { +Address CIRGenFunction::emitCompoundStmt(const CompoundStmt &S, bool getLast, + AggValueSlot slot) { Address retAlloca = Address::invalid(); // Add local scope to track new declared variables. @@ -76,22 +76,22 @@ Address CIRGenFunction::buildCompoundStmt(const CompoundStmt &S, bool getLast, scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) { LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - retAlloca = buildCompoundStmtWithoutScope(S, getLast, slot); + retAlloca = emitCompoundStmtWithoutScope(S, getLast, slot); }); return retAlloca; } -void CIRGenFunction::buildStopPoint(const Stmt *S) { +void CIRGenFunction::emitStopPoint(const Stmt *S) { assert(!cir::MissingFeatures::generateDebugInfo()); } // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. -mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, - bool useCurrentScope, - ArrayRef Attrs) { - if (mlir::succeeded(buildSimpleStmt(S, useCurrentScope))) +mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *S, + bool useCurrentScope, + ArrayRef Attrs) { + if (mlir::succeeded(emitSimpleStmt(S, useCurrentScope))) return mlir::success(); if (getContext().getLangOpts().OpenMP && @@ -132,7 +132,7 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, mlir::Block *incoming = builder.getInsertionBlock(); assert(incoming && "expression emission must have an insertion point"); - buildIgnoredExpr(cast(S)); + emitIgnoredExpr(cast(S)); mlir::Block *outgoing = builder.getInsertionBlock(); assert(outgoing && "expression emission cleared block!"); @@ -141,52 +141,52 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, } case Stmt::IfStmtClass: - if (buildIfStmt(cast(*S)).failed()) + if (emitIfStmt(cast(*S)).failed()) return mlir::failure(); break; case Stmt::SwitchStmtClass: - if (buildSwitchStmt(cast(*S)).failed()) + if (emitSwitchStmt(cast(*S)).failed()) return mlir::failure(); break; case Stmt::ForStmtClass: - if (buildForStmt(cast(*S)).failed()) + if (emitForStmt(cast(*S)).failed()) return mlir::failure(); break; case Stmt::WhileStmtClass: - if (buildWhileStmt(cast(*S)).failed()) + if (emitWhileStmt(cast(*S)).failed()) return mlir::failure(); break; case Stmt::DoStmtClass: - if (buildDoStmt(cast(*S)).failed()) + if (emitDoStmt(cast(*S)).failed()) return mlir::failure(); break; case Stmt::CoroutineBodyStmtClass: - return buildCoroutineBody(cast(*S)); + return emitCoroutineBody(cast(*S)); case Stmt::CoreturnStmtClass: - return buildCoreturnStmt(cast(*S)); + return emitCoreturnStmt(cast(*S)); case Stmt::CXXTryStmtClass: - return buildCXXTryStmt(cast(*S)); + return emitCXXTryStmt(cast(*S)); case Stmt::CXXForRangeStmtClass: - return buildCXXForRangeStmt(cast(*S), Attrs); + return emitCXXForRangeStmt(cast(*S), Attrs); case Stmt::IndirectGotoStmtClass: case Stmt::ReturnStmtClass: // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. case Stmt::GCCAsmStmtClass: case Stmt::MSAsmStmtClass: - return buildAsmStmt(cast(*S)); + return emitAsmStmt(cast(*S)); // OMP directives: case Stmt::OMPParallelDirectiveClass: - return buildOMPParallelDirective(cast(*S)); + return emitOMPParallelDirective(cast(*S)); case Stmt::OMPTaskwaitDirectiveClass: - return buildOMPTaskwaitDirective(cast(*S)); + return emitOMPTaskwaitDirective(cast(*S)); case Stmt::OMPTaskyieldDirectiveClass: - return buildOMPTaskyieldDirective(cast(*S)); + return emitOMPTaskyieldDirective(cast(*S)); case Stmt::OMPBarrierDirectiveClass: - return buildOMPBarrierDirective(cast(*S)); + return emitOMPBarrierDirective(cast(*S)); // Unsupported AST nodes: case Stmt::CapturedStmtClass: case Stmt::ObjCAtTryStmtClass: @@ -281,41 +281,41 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, - bool useCurrentScope) { +mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *S, + bool useCurrentScope) { switch (S->getStmtClass()) { default: return mlir::failure(); case Stmt::DeclStmtClass: - return buildDeclStmt(cast(*S)); + return emitDeclStmt(cast(*S)); case Stmt::CompoundStmtClass: - useCurrentScope ? buildCompoundStmtWithoutScope(cast(*S)) - : buildCompoundStmt(cast(*S)); + useCurrentScope ? emitCompoundStmtWithoutScope(cast(*S)) + : emitCompoundStmt(cast(*S)); break; case Stmt::ReturnStmtClass: - return buildReturnStmt(cast(*S)); + return emitReturnStmt(cast(*S)); case Stmt::GotoStmtClass: - return buildGotoStmt(cast(*S)); + return emitGotoStmt(cast(*S)); case Stmt::ContinueStmtClass: - return buildContinueStmt(cast(*S)); + return emitContinueStmt(cast(*S)); case Stmt::NullStmtClass: break; case Stmt::LabelStmtClass: - return buildLabelStmt(cast(*S)); + return emitLabelStmt(cast(*S)); case Stmt::CaseStmtClass: case Stmt::DefaultStmtClass: // If we reached here, we must not handling a switch case in the top level. - return buildSwitchCase(cast(*S), - /*buildingTopLevelCase=*/false); + return emitSwitchCase(cast(*S), + /*buildingTopLevelCase=*/false); break; case Stmt::BreakStmtClass: - return buildBreakStmt(cast(*S)); + return emitBreakStmt(cast(*S)); case Stmt::AttributedStmtClass: - return buildAttributedStmt(cast(*S)); + return emitAttributedStmt(cast(*S)); case Stmt::SEHLeaveStmtClass: llvm::errs() << "CIR codegen for '" << S->getStmtClassName() @@ -326,18 +326,18 @@ mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildLabelStmt(const clang::LabelStmt &S) { - if (buildLabel(S.getDecl()).failed()) +mlir::LogicalResult CIRGenFunction::emitLabelStmt(const clang::LabelStmt &S) { + if (emitLabel(S.getDecl()).failed()) return mlir::failure(); // IsEHa: not implemented. assert(!(getContext().getLangOpts().EHAsynch && S.isSideEntry())); - return buildStmt(S.getSubStmt(), /* useCurrentScope */ true); + return emitStmt(S.getSubStmt(), /* useCurrentScope */ true); } mlir::LogicalResult -CIRGenFunction::buildAttributedStmt(const AttributedStmt &S) { +CIRGenFunction::emitAttributedStmt(const AttributedStmt &S) { for (const auto *A : S.getAttrs()) { switch (A->getKind()) { case attr::NoMerge: @@ -350,7 +350,7 @@ CIRGenFunction::buildAttributedStmt(const AttributedStmt &S) { } } - return buildStmt(S.getSubStmt(), true, S.getAttrs()); + return emitStmt(S.getSubStmt(), true, S.getAttrs()); } // Add terminating yield on body regions (loops, ...) in case there are @@ -382,7 +382,7 @@ static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r, b->erase(); } -mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { +mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &S) { mlir::LogicalResult res = mlir::success(); // The else branch of a consteval if statement is always the only branch // that can be runtime evaluated. @@ -398,14 +398,14 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { // compares unequal to 0. The condition must be a scalar type. auto ifStmtBuilder = [&]() -> mlir::LogicalResult { if (S.isConsteval()) - return buildStmt(ConstevalExecuted, /*useCurrentScope=*/true); + return emitStmt(ConstevalExecuted, /*useCurrentScope=*/true); if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); + emitDecl(*S.getConditionVariable()); // During LLVM codegen, if the condition constant folds and can be elided, // it tries to avoid emitting the condition and the dead arm of the if/else. @@ -420,7 +420,7 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { // in this lambda like in Clang but postponed to other MLIR // passes. if (const Stmt *Executed = CondConstant ? S.getThen() : S.getElse()) - return buildStmt(Executed, /*useCurrentScope=*/true); + return emitStmt(Executed, /*useCurrentScope=*/true); // There is nothing to execute at runtime. // TODO(cir): there is still an empty cir.scope generated by the caller. return mlir::success(); @@ -430,7 +430,7 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); assert(!cir::MissingFeatures::incrementProfileCounter()); - return buildIfOnBoolExpr(S.getCond(), S.getThen(), S.getElse()); + return emitIfOnBoolExpr(S.getCond(), S.getThen(), S.getElse()); }; // TODO: Add a new scoped symbol table. @@ -447,20 +447,20 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { return res; } -mlir::LogicalResult CIRGenFunction::buildDeclStmt(const DeclStmt &S) { +mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &S) { if (!builder.getInsertionBlock()) { CGM.emitError("Seems like this is unreachable code, what should we do?"); return mlir::failure(); } for (const auto *I : S.decls()) { - buildDecl(*I); + emitDecl(*I); } return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { +mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &S) { assert(!cir::MissingFeatures::requiresReturnValueCheck()); auto loc = getLoc(S.getSourceRange()); @@ -492,29 +492,29 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { // Make sure not to return anything, but evaluate the expression // for side effects. if (RV) { - buildAnyExpr(RV); + emitAnyExpr(RV); } } else if (!RV) { // Do nothing (return value is left uninitialized) } else if (FnRetTy->isReferenceType()) { // If this function returns a reference, take the address of the // expression rather than the value. - RValue Result = buildReferenceBindingToExpr(RV); + RValue Result = emitReferenceBindingToExpr(RV); builder.createStore(loc, Result.getScalarVal(), ReturnValue); } else { mlir::Value V = nullptr; switch (CIRGenFunction::getEvaluationKind(RV->getType())) { case cir::TEK_Scalar: - V = buildScalarExpr(RV); + V = emitScalarExpr(RV); builder.CIRBaseBuilderTy::createStore(loc, V, *FnRetAlloca); break; case cir::TEK_Complex: - buildComplexExprIntoLValue(RV, - makeAddrLValue(ReturnValue, RV->getType()), - /*isInit*/ true); + emitComplexExprIntoLValue(RV, + makeAddrLValue(ReturnValue, RV->getType()), + /*isInit*/ true); break; case cir::TEK_Aggregate: - buildAggExpr( + emitAggExpr( RV, AggValueSlot::forAddr( ReturnValue, Qualifiers(), AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, @@ -559,7 +559,7 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { +mlir::LogicalResult CIRGenFunction::emitGotoStmt(const GotoStmt &S) { // FIXME: LLVM codegen inserts emit stop point here for debug info // sake when the insertion point is available, but doesn't do // anything special when there isn't. We haven't implemented debug @@ -570,7 +570,7 @@ mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { S.getLabel()->getName()); // A goto marks the end of a block, create a new one for codegen after - // buildGotoStmt can resume building in that block. + // emitGotoStmt can resume building in that block. // Insert the new block to continue codegen after goto. builder.createBlock(builder.getBlock()->getParent()); @@ -578,7 +578,7 @@ mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildLabel(const LabelDecl *D) { +mlir::LogicalResult CIRGenFunction::emitLabel(const LabelDecl *D) { // Create a new block to tag with a label and add a branch from // the current one to it. If the block is empty just call attach it // to this label. @@ -601,7 +601,7 @@ mlir::LogicalResult CIRGenFunction::buildLabel(const LabelDecl *D) { } mlir::LogicalResult -CIRGenFunction::buildContinueStmt(const clang::ContinueStmt &S) { +CIRGenFunction::emitContinueStmt(const clang::ContinueStmt &S) { builder.createContinue(getLoc(S.getContinueLoc())); // Insert the new block to continue codegen after the continue statement. @@ -610,7 +610,7 @@ CIRGenFunction::buildContinueStmt(const clang::ContinueStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildBreakStmt(const clang::BreakStmt &S) { +mlir::LogicalResult CIRGenFunction::emitBreakStmt(const clang::BreakStmt &S) { builder.createBreak(getLoc(S.getBreakLoc())); // Insert the new block to continue codegen after the break statement. @@ -667,9 +667,9 @@ const CaseStmt *CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, template mlir::LogicalResult -CIRGenFunction::buildCaseDefaultCascade(const T *stmt, mlir::Type condType, - mlir::ArrayAttr value, CaseOpKind kind, - bool buildingTopLevelCase) { +CIRGenFunction::emitCaseDefaultCascade(const T *stmt, mlir::Type condType, + mlir::ArrayAttr value, CaseOpKind kind, + bool buildingTopLevelCase) { assert((isa(stmt)) && "only case or default stmt go here"); @@ -696,7 +696,7 @@ CIRGenFunction::buildCaseDefaultCascade(const T *stmt, mlir::Type condType, subStmtKind = SubStmtKind::Case; builder.createYield(loc); } else - result = buildStmt(sub, /*useCurrentScope=*/!isa(sub)); + result = emitStmt(sub, /*useCurrentScope=*/!isa(sub)); insertPoint = builder.saveInsertionPoint(); } @@ -734,11 +734,10 @@ CIRGenFunction::buildCaseDefaultCascade(const T *stmt, mlir::Type condType, // We don't need to revert this if we find the current switch can't be in // simple form later since the conversion itself should be harmless. if (subStmtKind == SubStmtKind::Case) - result = - buildCaseStmt(*cast(sub), condType, buildingTopLevelCase); + result = emitCaseStmt(*cast(sub), condType, buildingTopLevelCase); else if (subStmtKind == SubStmtKind::Default) - result = buildDefaultStmt(*cast(sub), condType, - buildingTopLevelCase); + result = emitDefaultStmt(*cast(sub), condType, + buildingTopLevelCase); else if (buildingTopLevelCase) // If we're building a top level case, try to restore the insert point to // the case we're building, then we can attach more random stmts to the @@ -748,43 +747,42 @@ CIRGenFunction::buildCaseDefaultCascade(const T *stmt, mlir::Type condType, return result; } -mlir::LogicalResult CIRGenFunction::buildCaseStmt(const CaseStmt &S, - mlir::Type condType, - bool buildingTopLevelCase) { +mlir::LogicalResult CIRGenFunction::emitCaseStmt(const CaseStmt &S, + mlir::Type condType, + bool buildingTopLevelCase) { mlir::ArrayAttr value; CaseOpKind kind; auto *caseStmt = foldCaseStmt(S, condType, value, kind); - return buildCaseDefaultCascade(caseStmt, condType, value, kind, - buildingTopLevelCase); + return emitCaseDefaultCascade(caseStmt, condType, value, kind, + buildingTopLevelCase); } -mlir::LogicalResult -CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, mlir::Type condType, - bool buildingTopLevelCase) { - return buildCaseDefaultCascade(&S, condType, builder.getArrayAttr({}), - cir::CaseOpKind::Default, - buildingTopLevelCase); +mlir::LogicalResult CIRGenFunction::emitDefaultStmt(const DefaultStmt &S, + mlir::Type condType, + bool buildingTopLevelCase) { + return emitCaseDefaultCascade(&S, condType, builder.getArrayAttr({}), + cir::CaseOpKind::Default, buildingTopLevelCase); } -mlir::LogicalResult CIRGenFunction::buildSwitchCase(const SwitchCase &S, - bool buildingTopLevelCase) { +mlir::LogicalResult CIRGenFunction::emitSwitchCase(const SwitchCase &S, + bool buildingTopLevelCase) { assert(!condTypeStack.empty() && "build switch case without specifying the type of the condition"); if (S.getStmtClass() == Stmt::CaseStmtClass) - return buildCaseStmt(cast(S), condTypeStack.back(), - buildingTopLevelCase); + return emitCaseStmt(cast(S), condTypeStack.back(), + buildingTopLevelCase); if (S.getStmtClass() == Stmt::DefaultStmtClass) - return buildDefaultStmt(cast(S), condTypeStack.back(), - buildingTopLevelCase); + return emitDefaultStmt(cast(S), condTypeStack.back(), + buildingTopLevelCase); llvm_unreachable("expect case or default stmt"); } mlir::LogicalResult -CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, - ArrayRef ForAttrs) { +CIRGenFunction::emitCXXForRangeStmt(const CXXForRangeStmt &S, + ArrayRef ForAttrs) { cir::ForOp forOp; // TODO(cir): pass in array of attributes. @@ -792,13 +790,13 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, auto loopRes = mlir::success(); // Evaluate the first pieces before the loop. if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - if (buildStmt(S.getRangeStmt(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getRangeStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - if (buildStmt(S.getBeginStmt(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getBeginStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - if (buildStmt(S.getEndStmt(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getEndStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); assert(!cir::MissingFeatures::loopInfoStack()); @@ -823,16 +821,16 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, // In C++ the scope of the init-statement and the scope of // statement are one and the same. bool useCurrentScope = true; - if (buildStmt(S.getLoopVarStmt(), useCurrentScope).failed()) + if (emitStmt(S.getLoopVarStmt(), useCurrentScope).failed()) loopRes = mlir::failure(); - if (buildStmt(S.getBody(), useCurrentScope).failed()) + if (emitStmt(S.getBody(), useCurrentScope).failed()) loopRes = mlir::failure(); - buildStopPoint(&S); + emitStopPoint(&S); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { if (S.getInc()) - if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInc(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); builder.createYield(loc); }); @@ -859,7 +857,7 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { +mlir::LogicalResult CIRGenFunction::emitForStmt(const ForStmt &S) { cir::ForOp forOp; // TODO: pass in array of attributes. @@ -867,7 +865,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { auto loopRes = mlir::success(); // Evaluate the first part before the loop. if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); assert(!cir::MissingFeatures::loopInfoStack()); // From LLVM: if there are any cleanups between here and the loop-exit @@ -887,7 +885,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { // If the for statement has a condition scope, // emit the local variable declaration. if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); + emitDecl(*S.getConditionVariable()); // C99 6.8.5p2/p4: The first substatement is executed if the // expression compares unequal to 0. The condition must be a // scalar type. @@ -907,14 +905,14 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { // nested within the scope of init-statement. bool useCurrentScope = CGM.getASTContext().getLangOpts().CPlusPlus ? true : false; - if (buildStmt(S.getBody(), useCurrentScope).failed()) + if (emitStmt(S.getBody(), useCurrentScope).failed()) loopRes = mlir::failure(); - buildStopPoint(&S); + emitStopPoint(&S); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { if (S.getInc()) - if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInc(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); builder.createYield(loc); }); @@ -937,7 +935,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { +mlir::LogicalResult CIRGenFunction::emitDoStmt(const DoStmt &S) { cir::DoWhileOp doWhileOp; // TODO: pass in array of attributes. @@ -964,9 +962,9 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getBody(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); - buildStopPoint(&S); + emitStopPoint(&S); }); return loopRes; }; @@ -987,7 +985,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { +mlir::LogicalResult CIRGenFunction::emitWhileStmt(const WhileStmt &S) { cir::WhileOp whileOp; // TODO: pass in array of attributes. @@ -1010,7 +1008,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { // If the for statement has a condition scope, // emit the local variable declaration. if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); + emitDecl(*S.getConditionVariable()); // C99 6.8.5p2/p4: The first substatement is executed if the // expression compares unequal to 0. The condition must be a // scalar type. @@ -1019,9 +1017,9 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getBody(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); - buildStopPoint(&S); + emitStopPoint(&S); }); return loopRes; }; @@ -1042,7 +1040,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildSwitchBody(const Stmt *S) { +mlir::LogicalResult CIRGenFunction::emitSwitchBody(const Stmt *S) { // It is rare but legal if the switch body is not a compound stmt. e.g., // // switch(a) @@ -1053,7 +1051,7 @@ mlir::LogicalResult CIRGenFunction::buildSwitchBody(const Stmt *S) { // ... // } if (!isa(S)) - return buildStmt(S, /*useCurrentScope=*/!false); + return emitStmt(S, /*useCurrentScope=*/!false); auto *compoundStmt = cast(S); @@ -1065,21 +1063,21 @@ mlir::LogicalResult CIRGenFunction::buildSwitchBody(const Stmt *S) { // random stmt to the region of previous built case op to try to make // the being generated `cir.switch` to be in simple form. if (mlir::failed( - buildSwitchCase(*switchCase, /*buildingTopLevelCase=*/true))) + emitSwitchCase(*switchCase, /*buildingTopLevelCase=*/true))) return mlir::failure(); continue; } // Otherwise, just build the statements in the nearest case region. - if (mlir::failed(buildStmt(c, /*useCurrentScope=*/!isa(c)))) + if (mlir::failed(emitStmt(c, /*useCurrentScope=*/!isa(c)))) return mlir::failure(); } return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { +mlir::LogicalResult CIRGenFunction::emitSwitchStmt(const SwitchStmt &S) { // TODO: LLVM codegen does some early optimization to fold the condition and // only emit live cases. CIR should use MLIR to achieve similar things, // nothing to be done here. @@ -1088,13 +1086,13 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { SwitchOp swop; auto switchStmtBuilder = [&]() -> mlir::LogicalResult { if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); + emitDecl(*S.getConditionVariable()); - mlir::Value condV = buildScalarExpr(S.getCond()); + mlir::Value condV = emitScalarExpr(S.getCond()); // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts()) // TODO: if the switch has a condition wrapped by __builtin_unpredictable? @@ -1108,7 +1106,7 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { condTypeStack.push_back(condV.getType()); - res = buildSwitchBody(S.getBody()); + res = emitSwitchBody(S.getBody()); condTypeStack.pop_back(); }); @@ -1135,16 +1133,16 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { return res; } -void CIRGenFunction::buildReturnOfRValue(mlir::Location loc, RValue RV, - QualType Ty) { +void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue RV, + QualType Ty) { if (RV.isScalar()) { builder.createStore(loc, RV.getScalarVal(), ReturnValue); } else if (RV.isAggregate()) { LValue Dest = makeAddrLValue(ReturnValue, Ty); LValue Src = makeAddrLValue(RV.getAggregateAddress(), Ty); - buildAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue()); + emitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue()); } else { llvm_unreachable("NYI"); } - buildBranchThroughCleanup(loc, ReturnBlock()); + emitBranchThroughCleanup(loc, ReturnBlock()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp index b865046828c9..5494268e9606 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp @@ -67,7 +67,7 @@ static void buildDependences(const OMPExecutableDirective &S, } mlir::LogicalResult -CIRGenFunction::buildOMPParallelDirective(const OMPParallelDirective &S) { +CIRGenFunction::emitOMPParallelDirective(const OMPParallelDirective &S) { mlir::LogicalResult res = mlir::success(); auto scopeLoc = getLoc(S.getSourceRange()); // Create a `omp.parallel` op. @@ -81,9 +81,9 @@ CIRGenFunction::buildOMPParallelDirective(const OMPParallelDirective &S) { [&](mlir::OpBuilder &b, mlir::Location loc) { LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; // Emit the body of the region. - if (buildStmt(S.getCapturedStmt(OpenMPDirectiveKind::OMPD_parallel) - ->getCapturedStmt(), - /*useCurrentScope=*/true) + if (emitStmt(S.getCapturedStmt(OpenMPDirectiveKind::OMPD_parallel) + ->getCapturedStmt(), + /*useCurrentScope=*/true) .failed()) res = mlir::failure(); }); @@ -93,7 +93,7 @@ CIRGenFunction::buildOMPParallelDirective(const OMPParallelDirective &S) { } mlir::LogicalResult -CIRGenFunction::buildOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { +CIRGenFunction::emitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { mlir::LogicalResult res = mlir::success(); OMPTaskDataTy Data; buildDependences(S, Data); @@ -103,7 +103,7 @@ CIRGenFunction::buildOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { return res; } mlir::LogicalResult -CIRGenFunction::buildOMPTaskyieldDirective(const OMPTaskyieldDirective &S) { +CIRGenFunction::emitOMPTaskyieldDirective(const OMPTaskyieldDirective &S) { mlir::LogicalResult res = mlir::success(); // Creation of an omp.taskyield operation CGM.getOpenMPRuntime().emitTaskyieldCall(builder, *this, @@ -112,7 +112,7 @@ CIRGenFunction::buildOMPTaskyieldDirective(const OMPTaskyieldDirective &S) { } mlir::LogicalResult -CIRGenFunction::buildOMPBarrierDirective(const OMPBarrierDirective &S) { +CIRGenFunction::emitOMPBarrierDirective(const OMPBarrierDirective &S) { mlir::LogicalResult res = mlir::success(); // Creation of an omp.barrier operation CGM.getOpenMPRuntime().emitBarrierCall(builder, *this, diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index cec319e41046..932dd4bebeed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -131,7 +131,7 @@ static bool shouldEmitVTableAtEndOfTranslationUnit(CIRGenModule &CGM, /// Given that at some point we emitted a reference to one or more /// vtables, and that we are now at the end of the translation unit, /// decide whether we should emit them. -void CIRGenModule::buildDeferredVTables() { +void CIRGenModule::emitDeferredVTables() { #ifndef NDEBUG // Remember the size of DeferredVTables, because we're going to assume // that this entire operation doesn't modify it. @@ -156,7 +156,7 @@ void CIRGenModule::buildDeferredVTables() { /// This is only called for vtables that _must_ be emitted (mainly due to key /// functions). For weak vtables, CodeGen tracks when they are needed and /// emits them as-needed. -void CIRGenModule::buildVTable(CXXRecordDecl *rd) { +void CIRGenModule::emitVTable(CXXRecordDecl *rd) { VTables.GenerateClassData(rd); } @@ -387,7 +387,7 @@ cir::GlobalOp CIRGenVTables::generateConstructionVTable( assert(!VTable.isDeclaration() && "Shouldn't set properties on declaration"); CGM.setGVProperties(VTable, RD); - CGM.buildVTableTypeMetadata(RD, VTable, *VTLayout.get()); + CGM.emitVTableTypeMetadata(RD, VTable, *VTLayout.get()); if (UsingRelativeLayout) { llvm_unreachable("NYI"); @@ -585,9 +585,9 @@ uint64_t CIRGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, } /// Emit the definition of the given vtable. -void CIRGenVTables::buildVTTDefinition(cir::GlobalOp VTT, - cir::GlobalLinkageKind Linkage, - const CXXRecordDecl *RD) { +void CIRGenVTables::emitVTTDefinition(cir::GlobalOp VTT, + cir::GlobalLinkageKind Linkage, + const CXXRecordDecl *RD) { VTTBuilder Builder(CGM.getASTContext(), RD, /*GenerateDefinition=*/true); auto ArrayType = cir::ArrayType::get(CGM.getBuilder().getContext(), @@ -651,7 +651,7 @@ void CIRGenVTables::buildVTTDefinition(cir::GlobalOp VTT, } } -void CIRGenVTables::buildThunks(GlobalDecl GD) { +void CIRGenVTables::emitThunks(GlobalDecl GD) { const CXXMethodDecl *MD = cast(GD.getDecl())->getCanonicalDecl(); diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index 4b2247dc9fc8..639eb370ca0e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -102,11 +102,11 @@ class CIRGenVTables { cir::GlobalOp getAddrOfVTT(const CXXRecordDecl *RD); /// Emit the definition of the given vtable. - void buildVTTDefinition(cir::GlobalOp VTT, cir::GlobalLinkageKind Linkage, - const CXXRecordDecl *RD); + void emitVTTDefinition(cir::GlobalOp VTT, cir::GlobalLinkageKind Linkage, + const CXXRecordDecl *RD); /// Emit the associated thunks for the given global decl. - void buildThunks(GlobalDecl GD); + void emitThunks(GlobalDecl GD); /// Generate all the class data required to be generated upon definition of a /// KeyFunction. This includes the vtable, the RTTI data structure (if RTTI diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 24143185691e..0266e893909a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -82,7 +82,7 @@ bool CIRGenerator::HandleTopLevelDecl(DeclGroupRef D) { HandlingTopLevelDeclRAII HandlingDecl(*this); for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { - CGM->buildTopLevelDecl(*I); + CGM->emitTopLevelDecl(*I); } return true; @@ -125,9 +125,9 @@ void CIRGenerator::HandleInlineFunctionDefinition(FunctionDecl *D) { CGM->AddDeferredUnusedCoverageMapping(D); } -void CIRGenerator::buildDefaultMethods() { CGM->buildDefaultMethods(); } +void CIRGenerator::emitDefaultMethods() { CGM->emitDefaultMethods(); } -void CIRGenerator::buildDeferredDecls() { +void CIRGenerator::emitDeferredDecls() { if (DeferredInlineMemberFuncDefs.empty()) return; @@ -136,7 +136,7 @@ void CIRGenerator::buildDeferredDecls() { // invoked if AST inspection results in declarations being added. HandlingTopLevelDeclRAII HandlingDecls(*this); for (unsigned I = 0; I != DeferredInlineMemberFuncDefs.size(); ++I) - CGM->buildTopLevelDecl(DeferredInlineMemberFuncDefs[I]); + CGM->emitTopLevelDecl(DeferredInlineMemberFuncDefs[I]); DeferredInlineMemberFuncDefs.clear(); } @@ -188,12 +188,12 @@ void CIRGenerator::CompleteTentativeDefinition(VarDecl *D) { if (Diags.hasErrorOccurred()) return; - CGM->buildTentativeDefinition(D); + CGM->emitTentativeDefinition(D); } void CIRGenerator::HandleVTable(CXXRecordDecl *rd) { if (Diags.hasErrorOccurred()) return; - CGM->buildVTable(rd); + CGM->emitVTable(rd); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index f94553c58112..bba759494e3b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -141,7 +141,7 @@ LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, auto loc = op->getLoc(); auto srcValue = op.getSrc(); - cir_cconv_assert(!MissingFeatures::buildTypeCheck()); + cir_cconv_assert(!MissingFeatures::emitTypeCheck()); if (op.isRefcast()) return buildDynamicCastAfterNullCheck(builder, op); diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index cc236abe0047..62f7c664495e 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -265,7 +265,7 @@ class CIRGenConsumer : public clang::ASTConsumer { if (outputStream && mlirMod) { // Emit remaining defaulted C++ methods if (!feOptions.ClangIRDisableEmitCXXDefault) - gen->buildDefaultMethods(); + gen->emitDefaultMethods(); // FIXME: we cannot roundtrip prettyForm=true right now. mlir::OpPrintingFlags flags; From a28811a8e16871cb6fdd6f5b7e35c13ce90c3480 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Sat, 9 Nov 2024 15:44:55 -0500 Subject: [PATCH 2060/2301] [CIR][NFC] Move LoweringPrepare into CIRGen (#1092) Move LP into CIRGen and give it a handle on the CIRGenModule. A lot of code has been duplicated from CIRGen into cir/Dialect/Transforms in order to let LP live there, but with more necessary CIRGen features (e.g. EH scope and cleanups) going to be used in LP it doesn't make sense to keep it separate. Add this patch that just refactors LoweringPrepare into the CIRGen directory and give it a handle on the CGM. --- clang/include/clang/CIR/CIRGenerator.h | 1 + clang/include/clang/CIR/CIRToCIRPasses.h | 17 +++++++++------- clang/include/clang/CIR/Dialect/Passes.h | 10 ++++++++-- clang/include/clang/CIR/Dialect/Passes.td | 4 ++-- .../Transforms/LoweringPrepareCXXABI.h | 0 .../Transforms/LoweringPrepareItaniumCXXABI.h | 0 .../CIR/Dialect/Transforms/PassDetail.h | 0 clang/lib/CIR/CodeGen/CIRPasses.cpp | 15 +++++++------- clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + .../LoweringPrepare.cpp | 13 +++++++++--- .../Dialect/Transforms/CIRCanonicalize.cpp | 2 +- .../CIR/Dialect/Transforms/CIRSimplify.cpp | 2 +- .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 - clang/lib/CIR/Dialect/Transforms/DropAST.cpp | 2 +- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 2 +- .../lib/CIR/Dialect/Transforms/GotoSolver.cpp | 4 ++-- .../CIR/Dialect/Transforms/HoistAllocas.cpp | 4 ++-- .../Dialect/Transforms/IdiomRecognizer.cpp | 2 +- clang/lib/CIR/Dialect/Transforms/LibOpt.cpp | 2 +- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 3 +-- .../lib/CIR/Dialect/Transforms/SCFPrepare.cpp | 2 +- clang/lib/CIR/Dialect/Transforms/StdHelpers.h | 2 +- .../Targets/LoweringPrepareAArch64CXXABI.cpp | 2 +- .../Targets/LoweringPrepareItaniumCXXABI.cpp | 2 +- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 20 ++++++++++--------- clang/lib/FrontendTool/CMakeLists.txt | 1 + 26 files changed, 67 insertions(+), 47 deletions(-) rename clang/{lib => include/clang}/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h (100%) rename clang/{lib => include/clang}/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h (100%) rename clang/{lib => include/clang}/CIR/Dialect/Transforms/PassDetail.h (100%) rename clang/lib/CIR/{Dialect/Transforms => CodeGen}/LoweringPrepare.cpp (99%) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index f4c30a5e892b..52738824bef4 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -98,6 +98,7 @@ class CIRGenerator : public clang::ASTConsumer { std::unique_ptr takeContext() { return std::move(mlirCtx); }; + clang::CIRGen::CIRGenModule &getCGM() { return *CGM; } bool verifyModule(); diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index 4ad4aeebb22e..02a2795b94eb 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -18,7 +18,10 @@ namespace clang { class ASTContext; -} +namespace CIRGen { +class CIRGenModule; +} // namespace CIRGen +} // namespace clang namespace mlir { class MLIRContext; @@ -30,12 +33,12 @@ namespace cir { // Run set of cleanup/prepare/etc passes CIR <-> CIR. mlir::LogicalResult runCIRToCIRPasses( mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, - llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, - llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, - llvm::StringRef libOptOpts, std::string &passOptParsingFailure, - bool enableCIRSimplify, bool flattenCIR, bool emitMLIR, - bool enableCallConvLowering, bool enableMem2reg); + clang::CIRGen::CIRGenModule &cgm, clang::ASTContext &astCtx, + bool enableVerifier, bool enableLifetime, llvm::StringRef lifetimeOpts, + bool enableIdiomRecognizer, llvm::StringRef idiomRecognizerOpts, + bool enableLibOpt, llvm::StringRef libOptOpts, + std::string &passOptParsingFailure, bool enableCIRSimplify, bool flattenCIR, + bool emitMLIR, bool enableCallConvLowering, bool enableMem2reg); } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index d6cd4831a6af..ca1cc40353e6 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -17,7 +17,11 @@ namespace clang { class ASTContext; -} +namespace CIRGen { +class CIRGenModule; +} // namespace CIRGen +} // namespace clang + namespace mlir { std::unique_ptr createLifetimeCheckPass(); @@ -31,7 +35,9 @@ std::unique_ptr createCIRSimplifyPass(); std::unique_ptr createDropASTPass(); std::unique_ptr createSCFPreparePass(); std::unique_ptr createLoweringPreparePass(); -std::unique_ptr createLoweringPreparePass(clang::ASTContext *astCtx); +std::unique_ptr +createLoweringPreparePass(clang::ASTContext *astCtx, + clang::CIRGen::CIRGenModule &cgm); std::unique_ptr createIdiomRecognizerPass(); std::unique_ptr createIdiomRecognizerPass(clang::ASTContext *astCtx); std::unique_ptr createLibOptPass(); diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index 4a8d2bfa9672..1ea41cdd34a2 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -109,7 +109,7 @@ def SCFPrepare : Pass<"cir-mlir-scf-prepare"> { def HoistAllocas : Pass<"cir-hoist-allocas"> { let summary = "Hoist allocas to the entry of the function"; - let description = [{ + let description = [{ This pass hoist all non-dynamic allocas to the entry of the function. This is helpful for later code generation. }]; @@ -119,7 +119,7 @@ def HoistAllocas : Pass<"cir-hoist-allocas"> { def FlattenCFG : Pass<"cir-flatten-cfg"> { let summary = "Produces flatten cfg"; - let description = [{ + let description = [{ This pass transforms CIR and inline all the nested regions. Thus, the next post condtions are met after the pass applied: - there is not any nested region in a function body diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h b/clang/include/clang/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h similarity index 100% rename from clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h rename to clang/include/clang/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h b/clang/include/clang/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h similarity index 100% rename from clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h rename to clang/include/clang/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h diff --git a/clang/lib/CIR/Dialect/Transforms/PassDetail.h b/clang/include/clang/CIR/Dialect/Transforms/PassDetail.h similarity index 100% rename from clang/lib/CIR/Dialect/Transforms/PassDetail.h rename to clang/include/clang/CIR/Dialect/Transforms/PassDetail.h diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 65b43cfc6ffd..5cbbf0cd7477 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -13,6 +13,7 @@ #include "clang/AST/ASTContext.h" #include "clang/CIR/Dialect/Passes.h" +#include "CIRGenModule.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" @@ -24,12 +25,12 @@ namespace cir { mlir::LogicalResult runCIRToCIRPasses( mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, - llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, - llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, - llvm::StringRef libOptOpts, std::string &passOptParsingFailure, - bool enableCIRSimplify, bool flattenCIR, bool emitMLIR, - bool enableCallConvLowering, bool enableMem2Reg) { + clang::CIRGen::CIRGenModule &cgm, clang::ASTContext &astCtx, + bool enableVerifier, bool enableLifetime, llvm::StringRef lifetimeOpts, + bool enableIdiomRecognizer, llvm::StringRef idiomRecognizerOpts, + bool enableLibOpt, llvm::StringRef libOptOpts, + std::string &passOptParsingFailure, bool enableCIRSimplify, bool flattenCIR, + bool emitMLIR, bool enableCallConvLowering, bool enableMem2Reg) { llvm::TimeTraceScope scope("CIR To CIR Passes"); @@ -73,7 +74,7 @@ mlir::LogicalResult runCIRToCIRPasses( if (enableCIRSimplify) pm.addPass(mlir::createCIRSimplifyPass()); - pm.addPass(mlir::createLoweringPreparePass(&astCtx)); + pm.addPass(mlir::createLoweringPreparePass(&astCtx, cgm)); if (flattenCIR || enableMem2Reg) mlir::populateCIRPreLoweringPasses(pm, enableCallConvLowering); diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 02ac813ef732..e7d406c07f19 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -44,6 +44,7 @@ add_clang_library(clangCIR CIRPasses.cpp CIRRecordLayoutBuilder.cpp ConstantInitBuilder.cpp + LoweringPrepare.cpp TargetInfo.cpp DEPENDS diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/CodeGen/LoweringPrepare.cpp similarity index 99% rename from clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp rename to clang/lib/CIR/CodeGen/LoweringPrepare.cpp index 031c3b3b4b40..8cd63127a3a0 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/CodeGen/LoweringPrepare.cpp @@ -6,8 +6,8 @@ // //===----------------------------------------------------------------------===// -#include "LoweringPrepareCXXABI.h" -#include "PassDetail.h" +#include "CIRGenModule.h" + #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Region.h" #include "clang/AST/ASTContext.h" @@ -19,6 +19,8 @@ #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/SmallVector.h" @@ -121,6 +123,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { clang::ASTContext *astCtx; std::shared_ptr cxxABI; + clang::CIRGen::CIRGenModule *cgm; void setASTContext(clang::ASTContext *c) { astCtx = c; @@ -147,6 +150,8 @@ struct LoweringPreparePass : public LoweringPrepareBase { } } + void setCGM(clang::CIRGen::CIRGenModule &cgm) { this->cgm = &cgm; } + /// Tracks current module. ModuleOp theModule; @@ -1205,8 +1210,10 @@ std::unique_ptr mlir::createLoweringPreparePass() { } std::unique_ptr -mlir::createLoweringPreparePass(clang::ASTContext *astCtx) { +mlir::createLoweringPreparePass(clang::ASTContext *astCtx, + clang::CIRGen::CIRGenModule &cgm) { auto pass = std::make_unique(); pass->setASTContext(astCtx); + pass->setCGM(cgm); return std::move(pass); } diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp index 316a39b762e6..4f65353705bc 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// -#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Block.h" #include "mlir/IR/Operation.h" @@ -16,6 +15,7 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" using namespace mlir; using namespace cir; diff --git a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp index 4cc0021ee287..1dc2004c3192 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// -#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Block.h" #include "mlir/IR/Operation.h" @@ -16,6 +15,7 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "llvm/ADT/SmallVector.h" using namespace mlir; diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index 76ac0cbf1c8d..a51e9c1a76da 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -2,7 +2,6 @@ add_subdirectory(TargetLowering) add_clang_library(MLIRCIRTransforms LifetimeCheck.cpp - LoweringPrepare.cpp CIRCanonicalize.cpp CIRSimplify.cpp DropAST.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp index 716412c0f6d8..ebbf85fa0026 100644 --- a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp +++ b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp @@ -8,10 +8,10 @@ #include "clang/CIR/Dialect/Passes.h" -#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "clang/AST/ASTContext.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SmallSet.h" diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index d0ea2ec985d2..fcff0f276f2b 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -10,7 +10,6 @@ // function region. // //===----------------------------------------------------------------------===// -#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" @@ -18,6 +17,7 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" using namespace mlir; using namespace cir; diff --git a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp index c46f89e87d12..1e0125cdc17a 100644 --- a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp +++ b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp @@ -1,4 +1,3 @@ -#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" @@ -6,6 +5,7 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "llvm/Support/TimeProfiler.h" @@ -54,4 +54,4 @@ void GotoSolverPass::runOnOperation() { std::unique_ptr mlir::createGotoSolverPass() { return std::make_unique(); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp index 003e5425ebaa..21168073ed95 100644 --- a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp +++ b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// -#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" @@ -14,6 +13,7 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "llvm/Support/TimeProfiler.h" @@ -62,4 +62,4 @@ void HoistAllocasPass::runOnOperation() { std::unique_ptr mlir::createHoistAllocasPass() { return std::make_unique(); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index 39a1ac4ef5ce..4a5d32049373 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// -#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Region.h" @@ -16,6 +15,7 @@ #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" diff --git a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp index 30719a3d60f9..2c60381b46b1 100644 --- a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// -#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Region.h" @@ -16,6 +15,7 @@ #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 368c36b48946..00be93128d1b 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -6,8 +6,6 @@ // //===----------------------------------------------------------------------===// -#include "PassDetail.h" - #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" @@ -15,6 +13,7 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "clang/CIR/Interfaces/CIRLoopOpInterface.h" #include "llvm/ADT/SetOperations.h" diff --git a/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp index 6a46c4bad600..565550e80dc3 100644 --- a/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp @@ -6,12 +6,12 @@ // //===----------------------------------------------------------------------===// -#include "PassDetail.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" using namespace mlir; using namespace cir; diff --git a/clang/lib/CIR/Dialect/Transforms/StdHelpers.h b/clang/lib/CIR/Dialect/Transforms/StdHelpers.h index 245e329fb1bd..cccb093d67d9 100644 --- a/clang/lib/CIR/Dialect/Transforms/StdHelpers.h +++ b/clang/lib/CIR/Dialect/Transforms/StdHelpers.h @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// -#include "PassDetail.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Region.h" #include "clang/AST/ASTContext.h" @@ -14,6 +13,7 @@ #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" +#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp index ec47a929cb34..5db2746e6896 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp @@ -14,10 +14,10 @@ // TODO(cir): Refactor this to follow some level of codegen parity. -#include "../LoweringPrepareItaniumCXXABI.h" #include "clang/AST/CharUnits.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h" #include "clang/CIR/MissingFeatures.h" #include diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index bba759494e3b..07e3e2e4b4f7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -14,7 +14,7 @@ // TODO(cir): Refactor this to follow some level of codegen parity. -#include "../LoweringPrepareItaniumCXXABI.h" +#include "clang/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 62f7c664495e..b34b479228e0 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -187,6 +187,7 @@ class CIRGenConsumer : public clang::ASTConsumer { auto mlirMod = gen->getModule(); auto mlirCtx = gen->takeContext(); + auto &cgm = gen->getCGM(); auto setupCIRPipelineAndExecute = [&] { // Sanitize passes options. MLIR uses spaces between pass options @@ -205,15 +206,16 @@ class CIRGenConsumer : public clang::ASTConsumer { // Setup and run CIR pipeline. std::string passOptParsingFailure; - if (runCIRToCIRPasses( - mlirMod, mlirCtx.get(), C, !feOptions.ClangIRDisableCIRVerifier, - feOptions.ClangIRLifetimeCheck, lifetimeOpts, - feOptions.ClangIRIdiomRecognizer, idiomRecognizerOpts, - feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure, - codeGenOptions.OptimizationLevel > 0, - action == CIRGenAction::OutputType::EmitCIRFlat, - action == CIRGenAction::OutputType::EmitMLIR, enableCCLowering, - feOptions.ClangIREnableMem2Reg) + if (runCIRToCIRPasses(mlirMod, mlirCtx.get(), cgm, C, + !feOptions.ClangIRDisableCIRVerifier, + feOptions.ClangIRLifetimeCheck, lifetimeOpts, + feOptions.ClangIRIdiomRecognizer, + idiomRecognizerOpts, feOptions.ClangIRLibOpt, + libOptOpts, passOptParsingFailure, + codeGenOptions.OptimizationLevel > 0, + action == CIRGenAction::OutputType::EmitCIRFlat, + action == CIRGenAction::OutputType::EmitMLIR, + enableCCLowering, feOptions.ClangIREnableMem2Reg) .failed()) { if (!passOptParsingFailure.empty()) diagnosticsEngine.Report(diag::err_drv_cir_pass_opt_parsing) diff --git a/clang/lib/FrontendTool/CMakeLists.txt b/clang/lib/FrontendTool/CMakeLists.txt index 6dae1455010c..be31fb9628d2 100644 --- a/clang/lib/FrontendTool/CMakeLists.txt +++ b/clang/lib/FrontendTool/CMakeLists.txt @@ -16,6 +16,7 @@ set(deps) if(CLANG_ENABLE_CIR) list(APPEND link_libs + clangCIR clangCIRFrontendAction MLIRCIRTransforms MLIRIR From b5bc92bbfcd0095fe104c407b6aa97bb0ab4dcdd Mon Sep 17 00:00:00 2001 From: 7mile Date: Sun, 10 Nov 2024 04:45:22 +0800 Subject: [PATCH 2061/2301] [CIR][CodeGen][LowerToLLVM] String literals for OpenCL (#1091) This PR supports string literals in OpenCL end to end, making it possible to use `printf`. This involves two changes: * In CIRGen, ensure we create the global symbol for string literals with correct `constant` address space. * In LowerToLLVM, make the lowering of `GlobalViewAttr` aware of the upstream address space. Other proper refactors are also applied. Two test cases from OG CodeGen are reused. `str_literals.cl` is the primary test, while `printf.cl` is the bonus one. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 13 +++-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 12 ++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 42 ++++++++------ clang/test/CIR/CodeGen/OpenCL/printf.cl | 55 +++++++++++++++++++ clang/test/CIR/CodeGen/OpenCL/str_literals.cl | 23 ++++++++ clang/test/CodeGenOpenCL/printf.cl | 1 + 7 files changed, 120 insertions(+), 31 deletions(-) create mode 100644 clang/test/CIR/CodeGen/OpenCL/printf.cl create mode 100644 clang/test/CIR/CodeGen/OpenCL/str_literals.cl diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 76a4c7174f25..c8b9a0a03cbf 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -375,13 +375,17 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return createAlloca(loc, addrType, type, name, alignmentIntAttr); } - mlir::Value createGetGlobal(cir::GlobalOp global, bool threadLocal = false) { + mlir::Value createGetGlobal(mlir::Location loc, cir::GlobalOp global, + bool threadLocal = false) { return create( - global.getLoc(), - getPointerTo(global.getSymType(), global.getAddrSpaceAttr()), + loc, getPointerTo(global.getSymType(), global.getAddrSpaceAttr()), global.getName(), threadLocal); } + mlir::Value createGetGlobal(cir::GlobalOp global, bool threadLocal = false) { + return createGetGlobal(global.getLoc(), global, threadLocal); + } + /// Create a copy with inferred length. cir::CopyOp createCopy(mlir::Value dst, mlir::Value src, bool isVolatile = false) { @@ -547,8 +551,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { }); if (last != block->rend()) - return OpBuilder::InsertPoint(block, - ++mlir::Block::iterator(&*last)); + return OpBuilder::InsertPoint(block, ++mlir::Block::iterator(&*last)); return OpBuilder::InsertPoint(block, block->begin()); }; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index ba2d4b1f185f..fc0dbfa4d751 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1842,12 +1842,9 @@ LValue CIRGenFunction::emitStringLiteralLValue(const StringLiteral *E) { auto g = dyn_cast(cstGlobal); assert(g && "unaware of other symbol providers"); - auto ptrTy = - cir::PointerType::get(CGM.getBuilder().getContext(), g.getSymType()); assert(g.getAlignment() && "expected alignment for string literal"); auto align = *g.getAlignment(); - auto addr = builder.create(getLoc(E->getSourceRange()), - ptrTy, g.getSymName()); + auto addr = builder.createGetGlobal(getLoc(E->getSourceRange()), g); return makeAddrLValue( Address(addr, g.getSymType(), CharUnits::fromQuantity(align)), E->getType(), AlignmentSource::Decl); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index a1b1e9293c48..939d4dc40433 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1480,15 +1480,14 @@ static cir::GlobalOp generateStringLiteral(mlir::Location loc, mlir::TypedAttr C, cir::GlobalLinkageKind LT, CIRGenModule &CGM, StringRef GlobalName, CharUnits Alignment) { - unsigned AddrSpace = CGM.getASTContext().getTargetAddressSpace( - CGM.getGlobalConstantAddressSpace()); - assert((AddrSpace == 0 && !cir::MissingFeatures::addressSpaceInGlobalVar()) && - "NYI"); + cir::AddressSpaceAttr addrSpaceAttr = + CGM.getBuilder().getAddrSpaceAttr(CGM.getGlobalConstantAddressSpace()); // Create a global variable for this string // FIXME(cir): check for insertion point in module level. auto GV = CIRGenModule::createGlobalOp(CGM, loc, GlobalName, C.getType(), - !CGM.getLangOpts().WritableStrings); + !CGM.getLangOpts().WritableStrings, + addrSpaceAttr); // Set up extra information and add to the module GV.setAlignmentAttr(CGM.getSize(Alignment)); @@ -1559,7 +1558,8 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, auto ArrayTy = mlir::dyn_cast(GV.getSymType()); assert(ArrayTy && "String literal must be array"); - auto PtrTy = cir::PointerType::get(&getMLIRContext(), ArrayTy.getEltType()); + auto PtrTy = + getBuilder().getPointerTo(ArrayTy.getEltType(), GV.getAddrSpaceAttr()); return builder.getGlobalViewAttr(PtrTy, GV); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 82cdb0e21666..1d2fb1036b36 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -347,6 +347,19 @@ void lowerAnnotationValue( } } +// Get addrspace by converting a pointer type. +// TODO: The approach here is a little hacky. We should access the target info +// directly to convert the address space of global op, similar to what we do +// for type converter. +unsigned getGlobalOpTargetAddrSpace(mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter, + cir::GlobalOp op) { + auto tempPtrTy = cir::PointerType::get(rewriter.getContext(), op.getSymType(), + op.getAddrSpaceAttr()); + return cast(converter->convertType(tempPtrTy)) + .getAddressSpace(); +} + } // namespace //===----------------------------------------------------------------------===// @@ -568,28 +581,36 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, const mlir::TypeConverter *converter) { auto module = parentOp->getParentOfType(); mlir::Type sourceType; + unsigned sourceAddrSpace = 0; llvm::StringRef symName; auto *sourceSymbol = mlir::SymbolTable::lookupSymbolIn(module, globalAttr.getSymbol()); if (auto llvmSymbol = dyn_cast(sourceSymbol)) { sourceType = llvmSymbol.getType(); symName = llvmSymbol.getSymName(); + sourceAddrSpace = llvmSymbol.getAddrSpace(); } else if (auto cirSymbol = dyn_cast(sourceSymbol)) { sourceType = converter->convertType(cirSymbol.getSymType()); symName = cirSymbol.getSymName(); + sourceAddrSpace = + getGlobalOpTargetAddrSpace(rewriter, converter, cirSymbol); } else if (auto llvmFun = dyn_cast(sourceSymbol)) { sourceType = llvmFun.getFunctionType(); symName = llvmFun.getSymName(); + sourceAddrSpace = 0; } else if (auto fun = dyn_cast(sourceSymbol)) { sourceType = converter->convertType(fun.getFunctionType()); symName = fun.getSymName(); + sourceAddrSpace = 0; } else { llvm_unreachable("Unexpected GlobalOp type"); } auto loc = parentOp->getLoc(); mlir::Value addrOp = rewriter.create( - loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), symName); + loc, + mlir::LLVM::LLVMPointerType::get(rewriter.getContext(), sourceAddrSpace), + symName); if (globalAttr.getIndices()) { llvm::SmallVector indices; @@ -2323,18 +2344,6 @@ class CIRGlobalOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; - // Get addrspace by converting a pointer type. - // TODO: The approach here is a little hacky. We should access the target info - // directly to convert the address space of global op, similar to what we do - // for type converter. - unsigned getGlobalOpTargetAddrSpace(cir::GlobalOp op) const { - auto tempPtrTy = cir::PointerType::get(getContext(), op.getSymType(), - op.getAddrSpaceAttr()); - return cast( - typeConverter->convertType(tempPtrTy)) - .getAddressSpace(); - } - /// Replace CIR global with a region initialized LLVM global and update /// insertion point to the end of the initializer block. inline void setupRegionInitializedLLVMGlobalOp( @@ -2345,7 +2354,7 @@ class CIRGlobalOpLowering : public mlir::OpConversionPattern { op, llvmType, op.getConstant(), convertLinkage(op.getLinkage()), op.getSymName(), nullptr, /*alignment*/ op.getAlignment().value_or(0), - /*addrSpace*/ getGlobalOpTargetAddrSpace(op), + /*addrSpace*/ getGlobalOpTargetAddrSpace(rewriter, typeConverter, op), /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); newGlobalOp.getRegion().push_back(new mlir::Block()); @@ -2380,7 +2389,8 @@ class CIRGlobalOpLowering : public mlir::OpConversionPattern { if (!init.has_value()) { rewriter.replaceOpWithNewOp( op, llvmType, isConst, linkage, symbol, mlir::Attribute(), - /*alignment*/ 0, /*addrSpace*/ getGlobalOpTargetAddrSpace(op), + /*alignment*/ 0, + /*addrSpace*/ getGlobalOpTargetAddrSpace(rewriter, typeConverter, op), /*dsoLocal*/ isDsoLocal, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); return mlir::success(); @@ -2469,7 +2479,7 @@ class CIRGlobalOpLowering : public mlir::OpConversionPattern { auto llvmGlobalOp = rewriter.replaceOpWithNewOp( op, llvmType, isConst, linkage, symbol, init.value(), /*alignment*/ op.getAlignment().value_or(0), - /*addrSpace*/ getGlobalOpTargetAddrSpace(op), + /*addrSpace*/ getGlobalOpTargetAddrSpace(rewriter, typeConverter, op), /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); diff --git a/clang/test/CIR/CodeGen/OpenCL/printf.cl b/clang/test/CIR/CodeGen/OpenCL/printf.cl new file mode 100644 index 000000000000..b539fce01c2b --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/printf.cl @@ -0,0 +1,55 @@ +// RUN: %clang_cc1 -fclangir -no-enable-noundef-analysis -cl-std=CL1.2 -cl-ext=-+cl_khr_fp64 -triple spirv64-unknown-unknown -disable-llvm-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.12fp64.cir %s +// RUN: FileCheck -input-file=%t.12fp64.cir -check-prefixes=CIR-FP64,CIR-ALL %s +// RUN: %clang_cc1 -fclangir -no-enable-noundef-analysis -cl-std=CL1.2 -cl-ext=-cl_khr_fp64 -triple spirv64-unknown-unknown -disable-llvm-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.12nofp64.cir %s +// RUN: FileCheck -input-file=%t.12nofp64.cir -check-prefixes=CIR-NOFP64,CIR-ALL %s +// RUN: %clang_cc1 -fclangir -no-enable-noundef-analysis -cl-std=CL3.0 -cl-ext=+__opencl_c_fp64,+cl_khr_fp64 -triple spirv64-unknown-unknown -disable-llvm-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.30fp64.cir %s +// RUN: FileCheck -input-file=%t.30fp64.cir -check-prefixes=CIR-FP64,CIR-ALL %s +// RUN: %clang_cc1 -fclangir -no-enable-noundef-analysis -cl-std=CL3.0 -cl-ext=-__opencl_c_fp64,-cl_khr_fp64 -triple spirv64-unknown-unknown -disable-llvm-passes -emit-cir -fno-clangir-call-conv-lowering -o %t.30nofp64.cir %s +// RUN: FileCheck -input-file=%t.30nofp64.cir -check-prefixes=CIR-NOFP64,CIR-ALL %s +// RUN: %clang_cc1 -fclangir -no-enable-noundef-analysis -cl-std=CL1.2 -cl-ext=-+cl_khr_fp64 -triple spirv64-unknown-unknown -disable-llvm-passes -emit-llvm -fno-clangir-call-conv-lowering -o %t.12fp64.ll %s +// RUN: FileCheck -input-file=%t.12fp64.ll -check-prefixes=LLVM-FP64,LLVM-ALL %s +// RUN: %clang_cc1 -fclangir -no-enable-noundef-analysis -cl-std=CL1.2 -cl-ext=-cl_khr_fp64 -triple spirv64-unknown-unknown -disable-llvm-passes -emit-llvm -fno-clangir-call-conv-lowering -o %t.12nofp64.ll %s +// RUN: FileCheck -input-file=%t.12nofp64.ll -check-prefixes=LLVM-NOFP64,LLVM-ALL %s +// RUN: %clang_cc1 -fclangir -no-enable-noundef-analysis -cl-std=CL3.0 -cl-ext=+__opencl_c_fp64,+cl_khr_fp64 -triple spirv64-unknown-unknown -disable-llvm-passes -emit-llvm -fno-clangir-call-conv-lowering -o %t.30fp64.ll %s +// RUN: FileCheck -input-file=%t.30fp64.ll -check-prefixes=LLVM-FP64,LLVM-ALL %s +// RUN: %clang_cc1 -fclangir -no-enable-noundef-analysis -cl-std=CL3.0 -cl-ext=-__opencl_c_fp64,-cl_khr_fp64 -triple spirv64-unknown-unknown -disable-llvm-passes -emit-llvm -fno-clangir-call-conv-lowering -o %t.30nofp64.ll %s +// RUN: FileCheck -input-file=%t.30nofp64.ll -check-prefixes=LLVM-NOFP64,LLVM-ALL %s + +typedef __attribute__((ext_vector_type(2))) float float2; +typedef __attribute__((ext_vector_type(2))) half half2; + +#if defined(cl_khr_fp64) || defined(__opencl_c_fp64) +typedef __attribute__((ext_vector_type(2))) double double2; +#endif + +int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2))); + +kernel void test_printf_float2(float2 arg) { + printf("%v2hlf", arg); +} +// CIR-ALL-LABEL: @test_printf_float2( +// CIR-FP64: %{{.+}} = cir.call @printf(%{{.+}}, %{{.+}}) : (!cir.ptr, !cir.vector) -> !s32i cc(spir_function) +// CIR-NOFP64:%{{.+}} = cir.call @printf(%{{.+}}, %{{.+}}) : (!cir.ptr, !cir.vector) -> !s32i cc(spir_function) +// LLVM-ALL-LABEL: @test_printf_float2( +// LLVM-FP64: %{{.+}} = call spir_func i32 (ptr addrspace(2), ...) @{{.*}}printf{{.*}}(ptr addrspace(2) @.str, <2 x float> %{{.*}}) +// LLVM-NOFP64: call spir_func i32 (ptr addrspace(2), ...) @{{.*}}printf{{.*}}(ptr addrspace(2) @.str, <2 x float> %{{.*}}) + +kernel void test_printf_half2(half2 arg) { + printf("%v2hf", arg); +} +// CIR-ALL-LABEL: @test_printf_half2( +// CIR-FP64: %{{.+}} = cir.call @printf(%{{.+}}, %{{.+}}) : (!cir.ptr, !cir.vector) -> !s32i cc(spir_function) +// CIR-NOFP64:%{{.+}} = cir.call @printf(%{{.+}}, %{{.+}}) : (!cir.ptr, !cir.vector) -> !s32i cc(spir_function) +// LLVM-ALL-LABEL: @test_printf_half2( +// LLVM-FP64: %{{.+}} = call spir_func i32 (ptr addrspace(2), ...) @{{.*}}printf{{.*}}(ptr addrspace(2) @.str.1, <2 x half> %{{.*}}) +// LLVM-NOFP64: %{{.+}} = call spir_func i32 (ptr addrspace(2), ...) @{{.*}}printf{{.*}}(ptr addrspace(2) @.str.1, <2 x half> %{{.*}}) + +#if defined(cl_khr_fp64) || defined(__opencl_c_fp64) +kernel void test_printf_double2(double2 arg) { + printf("%v2lf", arg); +} +// CIR-FP64-LABEL: @test_printf_double2( +// CIR-FP64: %{{.+}} = cir.call @printf(%{{.+}}, %{{.+}}) : (!cir.ptr, !cir.vector) -> !s32i cc(spir_function) +// LLVM-FP64-LABEL: @test_printf_double2( +// LLVM-FP64: call spir_func i32 (ptr addrspace(2), ...) @{{.*}}printf{{.*}}(ptr addrspace(2) @.str.2, <2 x double> %{{.*}}) +#endif diff --git a/clang/test/CIR/CodeGen/OpenCL/str_literals.cl b/clang/test/CIR/CodeGen/OpenCL/str_literals.cl new file mode 100644 index 000000000000..c60b0a449711 --- /dev/null +++ b/clang/test/CIR/CodeGen/OpenCL/str_literals.cl @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 %s -fclangir -triple=spirv64-unknown-unknown -cl-opt-disable -emit-cir -o %t.cir -ffake-address-space-map +// RUN: FileCheck -input-file=%t.cir -check-prefix=CIR %s +// RUN: %clang_cc1 %s -fclangir -triple=spirv64-unknown-unknown -cl-opt-disable -emit-llvm -o %t.ll -ffake-address-space-map +// RUN: FileCheck -input-file=%t.ll -check-prefix=LLVM %s + +__constant char *__constant x = "hello world"; +__constant char *__constant y = "hello world"; + +// CIR: cir.global{{.*}} constant {{.*}}addrspace(offload_constant) @".str" = #cir.const_array<"hello world\00" : !cir.array> : !cir.array +// CIR: cir.global{{.*}} constant {{.*}}addrspace(offload_constant) @x = #cir.global_view<@".str"> : !cir.ptr +// CIR: cir.global{{.*}} constant {{.*}}addrspace(offload_constant) @y = #cir.global_view<@".str"> : !cir.ptr +// CIR: cir.global{{.*}} constant {{.*}}addrspace(offload_constant) @".str.1" = #cir.const_array<"f\00" : !cir.array> : !cir.array +// LLVM: addrspace(2) constant{{.*}}"hello world\00" +// LLVM-NOT: addrspace(2) constant +// LLVM: @x = {{(dso_local )?}}addrspace(2) constant ptr addrspace(2) +// LLVM: @y = {{(dso_local )?}}addrspace(2) constant ptr addrspace(2) +// LLVM: addrspace(2) constant{{.*}}"f\00" + +void f() { + // CIR: cir.store %{{.*}}, %{{.*}} : !cir.ptr, !cir.ptr, addrspace(offload_private)> + // LLVM: store ptr addrspace(2) {{.*}}, ptr + constant const char *f3 = __func__; +} diff --git a/clang/test/CodeGenOpenCL/printf.cl b/clang/test/CodeGenOpenCL/printf.cl index 2e11b8889d23..012b7c822344 100644 --- a/clang/test/CodeGenOpenCL/printf.cl +++ b/clang/test/CodeGenOpenCL/printf.cl @@ -4,6 +4,7 @@ // RUN: %clang_cc1 -no-enable-noundef-analysis -cl-std=CL3.0 -cl-ext=-__opencl_c_fp64,-cl_khr_fp64 -triple spir-unknown-unknown -disable-llvm-passes -emit-llvm -o - %s | FileCheck -check-prefixes=NOFP64,ALL %s // RUN: %clang_cc1 -no-enable-noundef-analysis -cl-std=clc++2021 -cl-ext=+__opencl_c_fp64,+cl_khr_fp64 -triple spir-unknown-unknown -disable-llvm-passes -emit-llvm -o - %s | FileCheck -check-prefixes=FP64,ALL %s // RUN: %clang_cc1 -no-enable-noundef-analysis -cl-std=clc++2021 -cl-ext=-__opencl_c_fp64,-cl_khr_fp64 -triple spir-unknown-unknown -disable-llvm-passes -emit-llvm -o - %s | FileCheck -check-prefixes=NOFP64,ALL %s +// XFAIL: * typedef __attribute__((ext_vector_type(2))) float float2; typedef __attribute__((ext_vector_type(2))) half half2; From 5ba213ac7492e4745f0793e54379c6a9e4ea9c72 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Sat, 9 Nov 2024 13:03:07 -0800 Subject: [PATCH 2062/2301] Revert "[CIR][NFC] Move LoweringPrepare into CIRGen (#1092)" This reverts commit 446335228c09ac7a56d60cc3f4fc27ada3986f66. --- clang/include/clang/CIR/CIRGenerator.h | 1 - clang/include/clang/CIR/CIRToCIRPasses.h | 17 ++++++-------- clang/include/clang/CIR/Dialect/Passes.h | 10 ++------ clang/include/clang/CIR/Dialect/Passes.td | 4 ++-- clang/lib/CIR/CodeGen/CIRPasses.cpp | 15 ++++++------ clang/lib/CIR/CodeGen/CMakeLists.txt | 1 - .../Dialect/Transforms/CIRCanonicalize.cpp | 2 +- .../CIR/Dialect/Transforms/CIRSimplify.cpp | 2 +- .../lib/CIR/Dialect/Transforms/CMakeLists.txt | 1 + clang/lib/CIR/Dialect/Transforms/DropAST.cpp | 2 +- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 2 +- .../lib/CIR/Dialect/Transforms/GotoSolver.cpp | 4 ++-- .../CIR/Dialect/Transforms/HoistAllocas.cpp | 4 ++-- .../Dialect/Transforms/IdiomRecognizer.cpp | 2 +- clang/lib/CIR/Dialect/Transforms/LibOpt.cpp | 2 +- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 3 ++- .../Transforms}/LoweringPrepare.cpp | 13 +++-------- .../Transforms/LoweringPrepareCXXABI.h | 0 .../Transforms/LoweringPrepareItaniumCXXABI.h | 0 .../CIR/Dialect/Transforms/PassDetail.h | 0 .../lib/CIR/Dialect/Transforms/SCFPrepare.cpp | 2 +- clang/lib/CIR/Dialect/Transforms/StdHelpers.h | 2 +- .../Targets/LoweringPrepareAArch64CXXABI.cpp | 2 +- .../Targets/LoweringPrepareItaniumCXXABI.cpp | 2 +- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 23 ++++++++----------- clang/lib/FrontendTool/CMakeLists.txt | 1 - clang/test/CIR/CodeGen/OpenCL/printf.cl | 1 + 27 files changed, 49 insertions(+), 69 deletions(-) rename clang/lib/CIR/{CodeGen => Dialect/Transforms}/LoweringPrepare.cpp (99%) rename clang/{include/clang => lib}/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h (100%) rename clang/{include/clang => lib}/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h (100%) rename clang/{include/clang => lib}/CIR/Dialect/Transforms/PassDetail.h (100%) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 52738824bef4..f4c30a5e892b 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -98,7 +98,6 @@ class CIRGenerator : public clang::ASTConsumer { std::unique_ptr takeContext() { return std::move(mlirCtx); }; - clang::CIRGen::CIRGenModule &getCGM() { return *CGM; } bool verifyModule(); diff --git a/clang/include/clang/CIR/CIRToCIRPasses.h b/clang/include/clang/CIR/CIRToCIRPasses.h index 02a2795b94eb..4ad4aeebb22e 100644 --- a/clang/include/clang/CIR/CIRToCIRPasses.h +++ b/clang/include/clang/CIR/CIRToCIRPasses.h @@ -18,10 +18,7 @@ namespace clang { class ASTContext; -namespace CIRGen { -class CIRGenModule; -} // namespace CIRGen -} // namespace clang +} namespace mlir { class MLIRContext; @@ -33,12 +30,12 @@ namespace cir { // Run set of cleanup/prepare/etc passes CIR <-> CIR. mlir::LogicalResult runCIRToCIRPasses( mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - clang::CIRGen::CIRGenModule &cgm, clang::ASTContext &astCtx, - bool enableVerifier, bool enableLifetime, llvm::StringRef lifetimeOpts, - bool enableIdiomRecognizer, llvm::StringRef idiomRecognizerOpts, - bool enableLibOpt, llvm::StringRef libOptOpts, - std::string &passOptParsingFailure, bool enableCIRSimplify, bool flattenCIR, - bool emitMLIR, bool enableCallConvLowering, bool enableMem2reg); + clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, + llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, + llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, + llvm::StringRef libOptOpts, std::string &passOptParsingFailure, + bool enableCIRSimplify, bool flattenCIR, bool emitMLIR, + bool enableCallConvLowering, bool enableMem2reg); } // namespace cir diff --git a/clang/include/clang/CIR/Dialect/Passes.h b/clang/include/clang/CIR/Dialect/Passes.h index ca1cc40353e6..d6cd4831a6af 100644 --- a/clang/include/clang/CIR/Dialect/Passes.h +++ b/clang/include/clang/CIR/Dialect/Passes.h @@ -17,11 +17,7 @@ namespace clang { class ASTContext; -namespace CIRGen { -class CIRGenModule; -} // namespace CIRGen -} // namespace clang - +} namespace mlir { std::unique_ptr createLifetimeCheckPass(); @@ -35,9 +31,7 @@ std::unique_ptr createCIRSimplifyPass(); std::unique_ptr createDropASTPass(); std::unique_ptr createSCFPreparePass(); std::unique_ptr createLoweringPreparePass(); -std::unique_ptr -createLoweringPreparePass(clang::ASTContext *astCtx, - clang::CIRGen::CIRGenModule &cgm); +std::unique_ptr createLoweringPreparePass(clang::ASTContext *astCtx); std::unique_ptr createIdiomRecognizerPass(); std::unique_ptr createIdiomRecognizerPass(clang::ASTContext *astCtx); std::unique_ptr createLibOptPass(); diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index 1ea41cdd34a2..4a8d2bfa9672 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -109,7 +109,7 @@ def SCFPrepare : Pass<"cir-mlir-scf-prepare"> { def HoistAllocas : Pass<"cir-hoist-allocas"> { let summary = "Hoist allocas to the entry of the function"; - let description = [{ + let description = [{ This pass hoist all non-dynamic allocas to the entry of the function. This is helpful for later code generation. }]; @@ -119,7 +119,7 @@ def HoistAllocas : Pass<"cir-hoist-allocas"> { def FlattenCFG : Pass<"cir-flatten-cfg"> { let summary = "Produces flatten cfg"; - let description = [{ + let description = [{ This pass transforms CIR and inline all the nested regions. Thus, the next post condtions are met after the pass applied: - there is not any nested region in a function body diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 5cbbf0cd7477..65b43cfc6ffd 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -13,7 +13,6 @@ #include "clang/AST/ASTContext.h" #include "clang/CIR/Dialect/Passes.h" -#include "CIRGenModule.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" @@ -25,12 +24,12 @@ namespace cir { mlir::LogicalResult runCIRToCIRPasses( mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - clang::CIRGen::CIRGenModule &cgm, clang::ASTContext &astCtx, - bool enableVerifier, bool enableLifetime, llvm::StringRef lifetimeOpts, - bool enableIdiomRecognizer, llvm::StringRef idiomRecognizerOpts, - bool enableLibOpt, llvm::StringRef libOptOpts, - std::string &passOptParsingFailure, bool enableCIRSimplify, bool flattenCIR, - bool emitMLIR, bool enableCallConvLowering, bool enableMem2Reg) { + clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, + llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, + llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, + llvm::StringRef libOptOpts, std::string &passOptParsingFailure, + bool enableCIRSimplify, bool flattenCIR, bool emitMLIR, + bool enableCallConvLowering, bool enableMem2Reg) { llvm::TimeTraceScope scope("CIR To CIR Passes"); @@ -74,7 +73,7 @@ mlir::LogicalResult runCIRToCIRPasses( if (enableCIRSimplify) pm.addPass(mlir::createCIRSimplifyPass()); - pm.addPass(mlir::createLoweringPreparePass(&astCtx, cgm)); + pm.addPass(mlir::createLoweringPreparePass(&astCtx)); if (flattenCIR || enableMem2Reg) mlir::populateCIRPreLoweringPasses(pm, enableCallConvLowering); diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index e7d406c07f19..02ac813ef732 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -44,7 +44,6 @@ add_clang_library(clangCIR CIRPasses.cpp CIRRecordLayoutBuilder.cpp ConstantInitBuilder.cpp - LoweringPrepare.cpp TargetInfo.cpp DEPENDS diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp index 4f65353705bc..316a39b762e6 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Block.h" #include "mlir/IR/Operation.h" @@ -15,7 +16,6 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" using namespace mlir; using namespace cir; diff --git a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp index 1dc2004c3192..4cc0021ee287 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Block.h" #include "mlir/IR/Operation.h" @@ -15,7 +16,6 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "llvm/ADT/SmallVector.h" using namespace mlir; diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt index a51e9c1a76da..76ac0cbf1c8d 100644 --- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt @@ -2,6 +2,7 @@ add_subdirectory(TargetLowering) add_clang_library(MLIRCIRTransforms LifetimeCheck.cpp + LoweringPrepare.cpp CIRCanonicalize.cpp CIRSimplify.cpp DropAST.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp index ebbf85fa0026..716412c0f6d8 100644 --- a/clang/lib/CIR/Dialect/Transforms/DropAST.cpp +++ b/clang/lib/CIR/Dialect/Transforms/DropAST.cpp @@ -8,10 +8,10 @@ #include "clang/CIR/Dialect/Passes.h" +#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "clang/AST/ASTContext.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SmallSet.h" diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index fcff0f276f2b..d0ea2ec985d2 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -10,6 +10,7 @@ // function region. // //===----------------------------------------------------------------------===// +#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" @@ -17,7 +18,6 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" using namespace mlir; using namespace cir; diff --git a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp index 1e0125cdc17a..c46f89e87d12 100644 --- a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp +++ b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp @@ -1,3 +1,4 @@ +#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" @@ -5,7 +6,6 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "llvm/Support/TimeProfiler.h" @@ -54,4 +54,4 @@ void GotoSolverPass::runOnOperation() { std::unique_ptr mlir::createGotoSolverPass() { return std::make_unique(); -} +} \ No newline at end of file diff --git a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp index 21168073ed95..003e5425ebaa 100644 --- a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp +++ b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" @@ -13,7 +14,6 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "llvm/Support/TimeProfiler.h" @@ -62,4 +62,4 @@ void HoistAllocasPass::runOnOperation() { std::unique_ptr mlir::createHoistAllocasPass() { return std::make_unique(); -} +} \ No newline at end of file diff --git a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp index 4a5d32049373..39a1ac4ef5ce 100644 --- a/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp +++ b/clang/lib/CIR/Dialect/Transforms/IdiomRecognizer.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Region.h" @@ -15,7 +16,6 @@ #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" diff --git a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp index 2c60381b46b1..30719a3d60f9 100644 --- a/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LibOpt.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "PassDetail.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Region.h" @@ -15,7 +16,6 @@ #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 00be93128d1b..368c36b48946 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -6,6 +6,8 @@ // //===----------------------------------------------------------------------===// +#include "PassDetail.h" + #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" @@ -13,7 +15,6 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "clang/CIR/Interfaces/CIRLoopOpInterface.h" #include "llvm/ADT/SetOperations.h" diff --git a/clang/lib/CIR/CodeGen/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp similarity index 99% rename from clang/lib/CIR/CodeGen/LoweringPrepare.cpp rename to clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 8cd63127a3a0..031c3b3b4b40 100644 --- a/clang/lib/CIR/CodeGen/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -6,8 +6,8 @@ // //===----------------------------------------------------------------------===// -#include "CIRGenModule.h" - +#include "LoweringPrepareCXXABI.h" +#include "PassDetail.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Region.h" #include "clang/AST/ASTContext.h" @@ -19,8 +19,6 @@ #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/SmallVector.h" @@ -123,7 +121,6 @@ struct LoweringPreparePass : public LoweringPrepareBase { clang::ASTContext *astCtx; std::shared_ptr cxxABI; - clang::CIRGen::CIRGenModule *cgm; void setASTContext(clang::ASTContext *c) { astCtx = c; @@ -150,8 +147,6 @@ struct LoweringPreparePass : public LoweringPrepareBase { } } - void setCGM(clang::CIRGen::CIRGenModule &cgm) { this->cgm = &cgm; } - /// Tracks current module. ModuleOp theModule; @@ -1210,10 +1205,8 @@ std::unique_ptr mlir::createLoweringPreparePass() { } std::unique_ptr -mlir::createLoweringPreparePass(clang::ASTContext *astCtx, - clang::CIRGen::CIRGenModule &cgm) { +mlir::createLoweringPreparePass(clang::ASTContext *astCtx) { auto pass = std::make_unique(); pass->setASTContext(astCtx); - pass->setCGM(cgm); return std::move(pass); } diff --git a/clang/include/clang/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h similarity index 100% rename from clang/include/clang/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h rename to clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h diff --git a/clang/include/clang/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h similarity index 100% rename from clang/include/clang/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h rename to clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h diff --git a/clang/include/clang/CIR/Dialect/Transforms/PassDetail.h b/clang/lib/CIR/Dialect/Transforms/PassDetail.h similarity index 100% rename from clang/include/clang/CIR/Dialect/Transforms/PassDetail.h rename to clang/lib/CIR/Dialect/Transforms/PassDetail.h diff --git a/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp index 565550e80dc3..6a46c4bad600 100644 --- a/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/SCFPrepare.cpp @@ -6,12 +6,12 @@ // //===----------------------------------------------------------------------===// +#include "PassDetail.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" using namespace mlir; using namespace cir; diff --git a/clang/lib/CIR/Dialect/Transforms/StdHelpers.h b/clang/lib/CIR/Dialect/Transforms/StdHelpers.h index cccb093d67d9..245e329fb1bd 100644 --- a/clang/lib/CIR/Dialect/Transforms/StdHelpers.h +++ b/clang/lib/CIR/Dialect/Transforms/StdHelpers.h @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "PassDetail.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Region.h" #include "clang/AST/ASTContext.h" @@ -13,7 +14,6 @@ #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/Passes.h" -#include "clang/CIR/Dialect/Transforms/PassDetail.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp index 5db2746e6896..ec47a929cb34 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareAArch64CXXABI.cpp @@ -14,10 +14,10 @@ // TODO(cir): Refactor this to follow some level of codegen parity. +#include "../LoweringPrepareItaniumCXXABI.h" #include "clang/AST/CharUnits.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "clang/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h" #include "clang/CIR/MissingFeatures.h" #include diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index 07e3e2e4b4f7..bba759494e3b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -14,7 +14,7 @@ // TODO(cir): Refactor this to follow some level of codegen parity. -#include "clang/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h" +#include "../LoweringPrepareItaniumCXXABI.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index b34b479228e0..3bb9cf03c970 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -187,7 +187,6 @@ class CIRGenConsumer : public clang::ASTConsumer { auto mlirMod = gen->getModule(); auto mlirCtx = gen->takeContext(); - auto &cgm = gen->getCGM(); auto setupCIRPipelineAndExecute = [&] { // Sanitize passes options. MLIR uses spaces between pass options @@ -206,16 +205,15 @@ class CIRGenConsumer : public clang::ASTConsumer { // Setup and run CIR pipeline. std::string passOptParsingFailure; - if (runCIRToCIRPasses(mlirMod, mlirCtx.get(), cgm, C, - !feOptions.ClangIRDisableCIRVerifier, - feOptions.ClangIRLifetimeCheck, lifetimeOpts, - feOptions.ClangIRIdiomRecognizer, - idiomRecognizerOpts, feOptions.ClangIRLibOpt, - libOptOpts, passOptParsingFailure, - codeGenOptions.OptimizationLevel > 0, - action == CIRGenAction::OutputType::EmitCIRFlat, - action == CIRGenAction::OutputType::EmitMLIR, - enableCCLowering, feOptions.ClangIREnableMem2Reg) + if (runCIRToCIRPasses( + mlirMod, mlirCtx.get(), C, !feOptions.ClangIRDisableCIRVerifier, + feOptions.ClangIRLifetimeCheck, lifetimeOpts, + feOptions.ClangIRIdiomRecognizer, idiomRecognizerOpts, + feOptions.ClangIRLibOpt, libOptOpts, passOptParsingFailure, + codeGenOptions.OptimizationLevel > 0, + action == CIRGenAction::OutputType::EmitCIRFlat, + action == CIRGenAction::OutputType::EmitMLIR, enableCCLowering, + feOptions.ClangIREnableMem2Reg) .failed()) { if (!passOptParsingFailure.empty()) diagnosticsEngine.Report(diag::err_drv_cir_pass_opt_parsing) @@ -486,8 +484,7 @@ EmitObjAction::EmitObjAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::EmitObj, _MLIRContext) {} } // namespace cir -// Used for -fclangir-analysis-only: use CIR analysis but still use original -// LLVM codegen path +// Used for -fclangir-analysis-only: use CIR analysis but still use original LLVM codegen path void AnalysisOnlyActionBase::anchor() {} AnalysisOnlyActionBase::AnalysisOnlyActionBase(unsigned _Act, llvm::LLVMContext *_VMContext) diff --git a/clang/lib/FrontendTool/CMakeLists.txt b/clang/lib/FrontendTool/CMakeLists.txt index be31fb9628d2..6dae1455010c 100644 --- a/clang/lib/FrontendTool/CMakeLists.txt +++ b/clang/lib/FrontendTool/CMakeLists.txt @@ -16,7 +16,6 @@ set(deps) if(CLANG_ENABLE_CIR) list(APPEND link_libs - clangCIR clangCIRFrontendAction MLIRCIRTransforms MLIRIR diff --git a/clang/test/CIR/CodeGen/OpenCL/printf.cl b/clang/test/CIR/CodeGen/OpenCL/printf.cl index b539fce01c2b..180e194d8153 100644 --- a/clang/test/CIR/CodeGen/OpenCL/printf.cl +++ b/clang/test/CIR/CodeGen/OpenCL/printf.cl @@ -14,6 +14,7 @@ // RUN: FileCheck -input-file=%t.30fp64.ll -check-prefixes=LLVM-FP64,LLVM-ALL %s // RUN: %clang_cc1 -fclangir -no-enable-noundef-analysis -cl-std=CL3.0 -cl-ext=-__opencl_c_fp64,-cl_khr_fp64 -triple spirv64-unknown-unknown -disable-llvm-passes -emit-llvm -fno-clangir-call-conv-lowering -o %t.30nofp64.ll %s // RUN: FileCheck -input-file=%t.30nofp64.ll -check-prefixes=LLVM-NOFP64,LLVM-ALL %s +// XFAIL: * typedef __attribute__((ext_vector_type(2))) float float2; typedef __attribute__((ext_vector_type(2))) half half2; From 3bef174804a089abc5478f51537a299acf56fe3f Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Tue, 12 Nov 2024 02:47:32 +0800 Subject: [PATCH 2063/2301] [CIR][CIRGen] support builtin signbit (#1033) This patch adds support for the `__builtin_signbit` intrinsic. The intrinsic requires special handling for PowerPC; however, since ClangIR does not currently support PowerPC, this handling is omitted in this implementation. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 5 +++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 13 ++++++++ clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 15 +++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 33 ++++++++++++++++++- clang/test/CIR/CodeGen/builtin-signbit.c | 25 ++++++++++++++ 6 files changed, 89 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-signbit.c diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index c8b9a0a03cbf..f1275a472f3c 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -397,6 +397,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return create(loc, dst, src, len); } + cir::SignBitOp createSignBit(mlir::Location loc, mlir::Value val) { + auto resTy = cir::IntType::get(getContext(), 32, true); + return create(loc, resTy, val); + } + mlir::Value createSub(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, bool hasNSW = false) { auto op = create(lhs.getLoc(), lhs.getType(), diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 02f30833f3db..7c7491e495c0 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -5127,4 +5127,17 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", let hasVerifier = 0; } +def SignBitOp : CIR_Op<"signbit", [Pure]> { + let summary = "Checks the sign of a floating-point number"; + let description = [{ + It returns a non-zero value (true) if the number is negative + and zero (false) if the number is positive or zero. + }]; + let arguments = (ins CIR_AnyFloat:$input); + let results = (outs SInt32:$res); + let assemblyFormat = [{ + $input attr-dict `:` type($input) `->` qualified(type($res)) + }]; +} + #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index afc1e6b4f148..b9f81fcd3e22 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -228,6 +228,7 @@ struct MissingFeatures { static bool xray() { return false; } static bool emitConstrainedFPCall() { return false; } static bool emitEmptyRecordCheck() { return false; } + static bool isPPC_FP128Ty() { return false; } // Inline assembly static bool asmGoto() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 49674ff2780c..638386521818 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -216,6 +216,13 @@ static mlir::Value emitFromInt(CIRGenFunction &CGF, mlir::Value v, QualType t, return v; } +static mlir::Value emitSignBit(mlir::Location loc, CIRGenFunction &CGF, + mlir::Value val) { + assert(!::cir::MissingFeatures::isPPC_FP128Ty()); + auto ret = CGF.getBuilder().createSignBit(loc, val); + return ret->getResult(0); +} + static Address checkAtomicAlignment(CIRGenFunction &CGF, const CallExpr *E) { ASTContext &ctx = CGF.getContext(); Address ptr = CGF.emitPointerWithAlignment(E->getArg(0)); @@ -1700,8 +1707,12 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_signbit: case Builtin::BI__builtin_signbitf: - case Builtin::BI__builtin_signbitl: - llvm_unreachable("BI__builtin_signbit like NYI"); + case Builtin::BI__builtin_signbitl: { + auto loc = getLoc(E->getBeginLoc()); + return RValue::get(builder.createZExtOrBitCast( + loc, emitSignBit(loc, *this, emitScalarExpr(E->getArg(0))), + ConvertType(E->getType()))); + } case Builtin::BI__warn_memset_zero_len: llvm_unreachable("BI__warn_memset_zero_len NYI"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 1d2fb1036b36..cba17c21f3e9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4305,6 +4305,37 @@ class CIRAbsOpLowering : public mlir::OpConversionPattern { return mlir::success(); } }; +class CIRSignBitOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::SignBitOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + assert(!::cir::MissingFeatures::isPPC_FP128Ty()); + + mlir::DataLayout layout(op->getParentOfType()); + int width = layout.getTypeSizeInBits(op.getInput().getType()); + if (auto longDoubleType = + mlir::dyn_cast(op.getInput().getType())) { + if (mlir::isa(longDoubleType.getUnderlying())) { + // see https://github.com/llvm/clangir/issues/1057 + llvm_unreachable("NYI"); + } + } + auto intTy = mlir::IntegerType::get(rewriter.getContext(), width); + auto bitcast = rewriter.create(op->getLoc(), intTy, + adaptor.getInput()); + auto zero = rewriter.create(op->getLoc(), intTy, 0); + auto cmpResult = rewriter.create( + op.getLoc(), mlir::LLVM::ICmpPredicate::slt, bitcast.getResult(), zero); + auto converted = rewriter.create( + op.getLoc(), mlir::IntegerType::get(rewriter.getContext(), 32), + cmpResult); + rewriter.replaceOp(op, converted); + return mlir::success(); + } +}; void populateCIRToLLVMConversionPatterns( mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter, @@ -4353,7 +4384,7 @@ void populateCIRToLLVMConversionPatterns( CIRAssumeLowering, CIRAssumeAlignedLowering, CIRAssumeSepStorageLowering, CIRBaseClassAddrOpLowering, CIRDerivedClassAddrOpLowering, CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering, CIRAbsOpLowering, - CIRMemMoveOpLowering, CIRMemsetOpLowering + CIRMemMoveOpLowering, CIRMemsetOpLowering, CIRSignBitOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/test/CIR/CodeGen/builtin-signbit.c b/clang/test/CIR/CodeGen/builtin-signbit.c new file mode 100644 index 000000000000..78b25ae4bf9e --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-signbit.c @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +void test_signbit_float(float val) { + // CIR-LABEL: test_signbit_float + // CIR: %{{.+}} = cir.signbit %{{.+}} : !cir.float -> !s32i + // LLVM-LABEL: test_signbit_float + // LLVM: [[TMP1:%.*]] = bitcast float %{{.+}} to i32 + // LLVM: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0 + // LLVM: %{{.+}} = zext i1 [[TMP2]] to i32 + __builtin_signbit(val); +} + +void test_signbit_double(double val) { + // CIR-LABEL: test_signbit_double + // CIR: %{{.+}} = cir.signbit %{{.+}} : !cir.float -> !s32i + // LLVM-LABEL: test_signbit_double + // LLVM: [[CONV:%.*]] = fptrunc double %{{.+}} to float + // LLVM: [[TMP1:%.*]] = bitcast float [[CONV]] to i32 + // LLVM: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0 + // LLVM: %{{.+}} = zext i1 [[TMP2]] to i32 + __builtin_signbitf(val); +} From 65979e61b90777545fe1d8c5ca67172854244276 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 11 Nov 2024 13:48:59 -0500 Subject: [PATCH 2064/2301] [CIR][CIRGen][Builtin] Support __builtin_memcpy_inline (#1069) The test code is from [OG's clang/test/CodeGen/builtins-memcpy-inline.c](https://github.com/llvm/clangir/blob/5f1afad625f1292ffcf02c36402d292c46213c86/clang/test/CodeGen/builtins-memcpy-inline.c#L7) Also, a little design choice when introducing MemCpyInlineOp, I chose to let it inherit CIR_MemCpyOp, so in future when we optimize MemCpy like Ops, we'd have cleaner and more unified code. However, the cost is that during LLVM lowering I'd have to convert the length from ConstOp into IntegerAttr as that's [what LLVM dialect is expecting](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrmemcpyinline-llvmmemcpyinlineop) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 61 +++++++++++++++---- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 18 +++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 19 +++++- clang/test/CIR/CodeGen/builtins-memory.c | 38 ++++++++++++ 4 files changed, 121 insertions(+), 15 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7c7491e495c0..a829e1c99d79 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4003,19 +4003,14 @@ def CopyOp : CIR_Op<"copy", // MemCpyOp && MemMoveOp //===----------------------------------------------------------------------===// -class CIR_MemCpyOp: CIR_Op]> { - let arguments = (ins Arg:$dst, - Arg:$src, - PrimitiveUInt:$len); +class CIR_MemOp + : CIR_Op]> { + dag commonArgs = (ins Arg:$dst, + Arg:$src); let hasVerifier = 0; - - let extraClassDeclaration = [{ - /// Returns the byte length type. - cir::IntType getLenTy() { return getLen().getType(); } - }]; } -def MemCpyOp : CIR_MemCpyOp<"libc.memcpy"> { +def MemCpyOp : CIR_MemOp<"libc.memcpy"> { let summary = "Equivalent to libc's `memcpy`"; let description = [{ Given two CIR pointers, `src` and `dst`, `cir.libc.memcpy` will copy `len` @@ -4034,13 +4029,20 @@ def MemCpyOp : CIR_MemCpyOp<"libc.memcpy"> { ``` }]; + let arguments = !con(commonArgs, (ins PrimitiveUInt:$len)); + let assemblyFormat = [{ $len `bytes` `from` $src `to` $dst attr-dict `:` type($len) `` `,` qualified(type($src)) `->` qualified(type($dst)) }]; + + let extraClassDeclaration = [{ + /// Returns the byte length type. + cir::IntType getLenTy() { return getLen().getType(); } + }]; } -def MemMoveOp : CIR_MemCpyOp<"libc.memmove"> { +def MemMoveOp : CIR_MemOp<"libc.memmove"> { let summary = "Equivalent to libc's `memmove`"; let description = [{ Given two CIR pointers, `src` and `dst`, `cir.libc.memmove` will copy `len` @@ -4057,12 +4059,49 @@ def MemMoveOp : CIR_MemCpyOp<"libc.memmove"> { ``` }]; + let arguments = !con(commonArgs, (ins PrimitiveUInt:$len)); let assemblyFormat = [{ $len `bytes` `from` $src `to` $dst attr-dict `:` qualified(type($dst)) `,` type($len) }]; + + let extraClassDeclaration = [{ + /// Returns the byte length type. + cir::IntType getLenTy() { return getLen().getType(); } + }]; } + +//===----------------------------------------------------------------------===// +// MemCpyInlineOp +//===----------------------------------------------------------------------===// + +def MemCpyInlineOp : CIR_MemOp<"memcpy_inline"> { + let summary = "Memory copy with constant length without calling" + "any external function"; + let description = [{ + Given two CIR pointers, `src` and `dst`, `memcpy_inline` will copy `len` + bytes from the memory pointed by `src` to the memory pointed by `dst`. + + Unlike `cir.libc.memcpy`, this Op guarantees that no external functions + are called, and length of copied bytes is a constant. + + Examples: + + ```mlir + // Copying 2 bytes from one array to a struct: + cir.memcpy_inline 2 bytes from %arr to %struct : !cir.ptr -> !cir.ptr + ``` + }]; + + let arguments = !con(commonArgs, (ins I64Attr:$len)); + + let assemblyFormat = [{ + $len `bytes` `from` $src `to` $dst attr-dict + `:` qualified(type($src)) `->` qualified(type($dst)) + }]; +} + //===----------------------------------------------------------------------===// // MemSetOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 638386521818..21fd90819ced 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1415,8 +1415,22 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(Dest.getPointer()); } - case Builtin::BI__builtin_memcpy_inline: - llvm_unreachable("BI__builtin_memcpy_inline NYI"); + case Builtin::BI__builtin_memcpy_inline: { + Address dest = emitPointerWithAlignment(E->getArg(0)); + Address src = emitPointerWithAlignment(E->getArg(1)); + emitNonNullArgCheck(RValue::get(dest.getPointer()), E->getArg(0)->getType(), + E->getArg(0)->getExprLoc(), FD, 0); + emitNonNullArgCheck(RValue::get(src.getPointer()), E->getArg(1)->getType(), + E->getArg(1)->getExprLoc(), FD, 1); + uint64_t size = + E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue(); + builder.create( + getLoc(E->getSourceRange()), dest.getPointer(), src.getPointer(), + mlir::IntegerAttr::get(mlir::IntegerType::get(builder.getContext(), 64), + size)); + // __builtin_memcpy_inline has no return value + return RValue::get(nullptr); + } case Builtin::BI__builtin_char_memchr: case Builtin::BI__builtin_memchr: { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index cba17c21f3e9..8e94cd91c465 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -778,6 +778,21 @@ class CIRMemChrOpLowering : public mlir::OpConversionPattern { } }; +class CIRMemCpyInlineOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::MemCpyInlineOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLenAttr(), + /*isVolatile=*/false); + return mlir::success(); + } +}; + class CIRMemMoveOpLowering : public mlir::OpConversionPattern { public: using mlir::OpConversionPattern::OpConversionPattern; @@ -4363,8 +4378,8 @@ void populateCIRToLLVMConversionPatterns( CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, - CIRMemCpyOpLowering, CIRMemChrOpLowering, CIRFAbsOpLowering, - CIRExpectOpLowering, CIRVTableAddrPointOpLowering, + CIRMemCpyOpLowering, CIRMemChrOpLowering, CIRMemCpyInlineOpLowering, + CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, CIRVectorShuffleVecLowering, CIRStackSaveLowering, CIRUnreachableLowering, diff --git a/clang/test/CIR/CodeGen/builtins-memory.c b/clang/test/CIR/CodeGen/builtins-memory.c index 439b82e98d33..940e09a8ed6d 100644 --- a/clang/test/CIR/CodeGen/builtins-memory.c +++ b/clang/test/CIR/CodeGen/builtins-memory.c @@ -1,5 +1,8 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - \ +// RUN: | opt -S -passes=instcombine,mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s typedef __SIZE_TYPE__ size_t; void test_memcpy_chk(void *dest, const void *src, size_t n) { @@ -107,3 +110,38 @@ void test_memset_chk(void *dest, int ch, size_t n) { // CIR: cir.call @__memset_chk(%[[#DEST_LOAD]], %[[#CH_LOAD]], %[[#N_LOAD1]], %[[#N_LOAD2]]) __builtin___memset_chk(dest, ch, n, n); } + +// FIXME: The test should test intrinsic argument alignment, however, +// currently we lack support for argument attributes. +// Thus, added `COM: LLVM:` lines so we can easily flip the test +// when the support of argument attributes is in. +void test_memcpy_inline(void *dst, const void *src, size_t n) { + + // CIR-LABEL: test_memcpy_inline + // CIR: cir.memcpy_inline 0 bytes from {{%.*}} to {{%.*}} : !cir.ptr -> !cir.ptr + + // LLVM-LABEL: test_memcpy_inline + // LLVM: call void @llvm.memcpy.inline.p0.p0.i64(ptr {{%.*}}, ptr {{%.*}}, i64 0, i1 false) + // COM: LLVM: call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 {{%.*}}, ptr align 1 {{%.*}}, i64 0, i1 false) + __builtin_memcpy_inline(dst, src, 0); + + // CIR: cir.memcpy_inline 1 bytes from {{%.*}} to {{%.*}} : !cir.ptr -> !cir.ptr + + // LLVM: call void @llvm.memcpy.inline.p0.p0.i64(ptr {{%.*}}, ptr {{%.*}}, i64 1, i1 false) + // COM: LLVM: call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 {{%.*}}, ptr align 1 {{%.*}}, i64 1, i1 false) + __builtin_memcpy_inline(dst, src, 1); + + // CIR: cir.memcpy_inline 4 bytes from {{%.*}} to {{%.*}} : !cir.ptr -> !cir.ptr + + // LLVM: call void @llvm.memcpy.inline.p0.p0.i64(ptr {{%.*}}, ptr {{%.*}}, i64 4, i1 false) + // COM: LLVM: call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 {{%.*}}, ptr align 1 {{%.*}}, i64 4, i1 false) + __builtin_memcpy_inline(dst, src, 4); +} + +void test_memcpy_inline_aligned_buffers(unsigned long long *dst, const unsigned long long *src) { + + // LLVM-LABEL: test_memcpy_inline_aligned_buffers + // LLVM: call void @llvm.memcpy.inline.p0.p0.i64(ptr {{%.*}}, ptr {{%.*}}, i64 4, i1 false) + // COM: LLVM: call void @llvm.memcpy.inline.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 {{%.*}}, i64 4, i1 false) + __builtin_memcpy_inline(dst, src, 4); +} From 4a2c3b2bf8f08d4be9949e9a0c69f88726060a1c Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 11 Nov 2024 15:08:57 -0500 Subject: [PATCH 2065/2301] [CIR][CIRGen][Builtin] Support __sync_add_and_fetch (#1077) Notable change is to introduce helper function `buildBinaryAtomicPost` which models on [OG's `EmitBinaryAtomicPost`](https://github.com/llvm/clangir/blob/dbf320e5c3db0410566ae561067c595308870bad/clang/lib/CodeGen/CGBuiltin.cpp#L340C15-L340C35). Comparing to `EmitBinaryAtomicPost`, `buildBinaryAtomicPost` is more concise as OG's ``EmitBinaryAtomicPost`` duplicates quite a bit of code from [MakeBinaryAtomicValue](https://github.com/llvm/clangir/blob/dbf320e5c3db0410566ae561067c595308870bad/clang/lib/CodeGen/CGBuiltin.cpp#L340) Also, didn't implement invert as __sync_add_and_fetch does not need it, but will add it (which is a trivial work) when we implement a builtin that needs it. Test cases are from [OG](https://github.com/llvm/clangir/blob/dbf320e5c3db0410566ae561067c595308870bad/clang/test/CodeGen/Atomics.c#L134) --- clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 34 ++++++++- clang/test/CIR/CodeGen/atomic.cpp | 91 +++++++++++++++++++++++ 3 files changed, 124 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index b9f81fcd3e22..cb720ad800d8 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -229,6 +229,7 @@ struct MissingFeatures { static bool emitConstrainedFPCall() { return false; } static bool emitEmptyRecordCheck() { return false; } static bool isPPC_FP128Ty() { return false; } + static bool emitBinaryAtomicPostHasInvert() { return false; } // Inline assembly static bool asmGoto() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 21fd90819ced..04f2c1861401 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -244,6 +244,7 @@ static Address checkAtomicAlignment(CIRGenFunction &CGF, const CallExpr *E) { /// and the expression node. static mlir::Value makeBinaryAtomicValue( CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr, + mlir::Value *neededValP = nullptr, mlir::Type *neededValT = nullptr, cir::MemOrder ordering = cir::MemOrder::SequentiallyConsistent) { QualType typ = expr->getType(); @@ -263,7 +264,15 @@ static mlir::Value makeBinaryAtomicValue( mlir::Value val = cgf.emitScalarExpr(expr->getArg(1)); mlir::Type valueType = val.getType(); val = emitToInt(cgf, val, typ, intType); - + // These output arguments are needed for post atomic fetch operations + // that calculate the result of the operation as return value of + // _and_fetch builtins. The `AtomicFetch` operation only updates the + // memory location and returns the old value. + if (neededValP) { + assert(neededValT); + *neededValP = val; + *neededValT = valueType; + } auto rmwi = builder.create( cgf.getLoc(expr->getSourceRange()), destAddr.emitRawPointer(), val, kind, ordering, false, /* is volatile */ @@ -276,6 +285,26 @@ static RValue emitBinaryAtomic(CIRGenFunction &CGF, cir::AtomicFetchKind kind, return RValue::get(makeBinaryAtomicValue(CGF, kind, E)); } +static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, + cir::AtomicFetchKind atomicOpkind, + const CallExpr *e, + cir::BinOpKind binopKind) { + mlir::Value val; + mlir::Type valueType; + clang::QualType typ = e->getType(); + mlir::Value result = + makeBinaryAtomicValue(cgf, atomicOpkind, e, &val, &valueType); + clang::CIRGen::CIRGenBuilderTy &builder = cgf.getBuilder(); + result = builder.create(result.getLoc(), binopKind, result, val); + result = emitFromInt(cgf, result, typ, valueType); + // FIXME: Some callers of this function expect the result to be inverted, + // which would need invert flag passed in and do the inversion here like + // traditional clang code gen does. When we implment those caller builtins + // we should implement the inversion here. + assert(!MissingFeatures::emitBinaryAtomicPostHasInvert()); + return RValue::get(result); +} + static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, const CallExpr *expr, bool returnBool) { @@ -1626,7 +1655,8 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_add_and_fetch_4: case Builtin::BI__sync_add_and_fetch_8: case Builtin::BI__sync_add_and_fetch_16: - llvm_unreachable("BI__sync_add_and_fetch like NYI"); + return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Add, E, + cir::BinOpKind::Add); case Builtin::BI__sync_sub_and_fetch_1: case Builtin::BI__sync_sub_and_fetch_2: diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index a284fd80ec25..c348bfebf486 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -12,6 +12,15 @@ typedef struct _a { void m() { at y; } +signed char sc; +unsigned char uc; +signed short ss; +unsigned short us; +signed int si; +unsigned int ui; +signed long long sll; +unsigned long long ull; + // CHECK: ![[A:.*]] = !cir.struct int basic_binop_fetch(int *i) { @@ -649,3 +658,85 @@ void cmp_val_ushort(unsigned short* p, short x, short u) { void cmp_val_ulong(unsigned long* p, long x, long u) { long r = __sync_val_compare_and_swap(p, x, u); } + +// CHECK-LABEL: @test_op_and_fetch +// LLVM-LABEL: @test_op_and_fetch +extern "C" void test_op_and_fetch (void) +{ + // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i + // CHECK: [[RES0:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i + // CHECK: [[RET0:%.*]] = cir.binop(add, [[RES0]], [[VAL0]]) : !s8i + // LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES0:%.*]] = atomicrmw add ptr @sc, i8 [[VAL0]] seq_cst, align 1 + // LLVM: [[RET0:%.*]] = add i8 [[RES0]], [[VAL0]] + // LLVM: store i8 [[RET0]], ptr @sc, align 1 + sc = __sync_add_and_fetch (&sc, uc); + + // CHECK: [[RES1:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i + // CHECK: [[RET1:%.*]] = cir.binop(add, [[RES1]], [[VAL1]]) : !u8i + // LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES1:%.*]] = atomicrmw add ptr @uc, i8 [[VAL1]] seq_cst, align 1 + // LLVM: [[RET1:%.*]] = add i8 [[RES1]], [[VAL1]] + // LLVM: store i8 [[RET1]], ptr @uc, align 1 + uc = __sync_add_and_fetch (&uc, uc); + + // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i + // CHECK: [[RES2:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i + // CHECK: [[RET2:%.*]] = cir.binop(add, [[RES2]], [[VAL2]]) : !s16i + // LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16 + // LLVM: [[RES2:%.*]] = atomicrmw add ptr @ss, i16 [[CONV2]] seq_cst, align 2 + // LLVM: [[RET2:%.*]] = add i16 [[RES2]], [[CONV2]] + // LLVM: store i16 [[RET2]], ptr @ss, align 2 + ss = __sync_add_and_fetch (&ss, uc); + + // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i + // CHECK: [[RES3:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i + // CHECK: [[RET3:%.*]] = cir.binop(add, [[RES3]], [[VAL3]]) : !u16i + // LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16 + // LLVM: [[RES3:%.*]] = atomicrmw add ptr @us, i16 [[CONV3]] seq_cst, align 2 + // LLVM: [[RET3:%.*]] = add i16 [[RES3]], [[CONV3]] + // LLVM: store i16 [[RET3]], ptr @us + us = __sync_add_and_fetch (&us, uc); + + // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i + // CHECK: [[RES4:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i + // CHECK: [[RET4:%.*]] = cir.binop(add, [[RES4]], [[VAL4]]) : !s32i + // LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32 + // LLVM: [[RES4:%.*]] = atomicrmw add ptr @si, i32 [[CONV4]] seq_cst, align 4 + // LLVM: [[RET4:%.*]] = add i32 [[RES4]], [[CONV4]] + // LLVM: store i32 [[RET4]], ptr @si, align 4 + si = __sync_add_and_fetch (&si, uc); + + // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i + // CHECK: [[RES5:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i + // CHECK: [[RET5:%.*]] = cir.binop(add, [[RES5]], [[VAL5]]) : !u32i + // LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32 + // LLVM: [[RES5:%.*]] = atomicrmw add ptr @ui, i32 [[CONV5]] seq_cst, align 4 + // LLVM: [[RET5:%.*]] = add i32 [[RES5]], [[CONV5]] + // LLVM: store i32 [[RET5]], ptr @ui, align 4 + ui = __sync_add_and_fetch (&ui, uc); + + // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i + // CHECK: [[RES6:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i + // CHECK: [[RET6:%.*]] = cir.binop(add, [[RES6]], [[VAL6]]) : !s64i + // LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64 + // LLVM: [[RES6:%.*]] = atomicrmw add ptr @sll, i64 [[CONV6]] seq_cst, align 8 + // LLVM: [[RET6:%.*]] = add i64 [[RES6]], [[CONV6]] + // LLVM: store i64 [[RET6]], ptr @sll, align 8 + sll = __sync_add_and_fetch (&sll, uc); + + // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i + // CHECK: [[RES7:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i + // CHECK: [[RET7:%.*]] = cir.binop(add, [[RES7]], [[VAL7]]) : !u64i + // LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64 + // LLVM: [[RES7:%.*]] = atomicrmw add ptr @ull, i64 [[CONV7]] seq_cst, align 8 + // LLVM: [[RET7:%.*]] = add i64 [[RES7]], [[CONV7]] + // LLVM: store i64 [[RET7]], ptr @ull, align 8 + ull = __sync_add_and_fetch (&ull, uc); +} From c55faa1ea6c774c20121977118c93635091d3d84 Mon Sep 17 00:00:00 2001 From: 7mile Date: Tue, 12 Nov 2024 04:10:45 +0800 Subject: [PATCH 2066/2301] [CIR][CodeGen][NFC] Simplify replacing initializer of static decls (#1095) When emitting initializers for static declarations, it's essential to ensure that the `cir.global` operation aligns its type with that of the initializer. The original approach creates a new global op and copies every attribute from the old one. But just `setSymType` should work well. This also removes missing feature flags there. --- clang/include/clang/CIR/MissingFeatures.h | 1 - clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 17 ++--------------- clang/test/CIR/CodeGen/stmtexpr-init.c | 4 ++-- 3 files changed, 4 insertions(+), 18 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index cb720ad800d8..b25c5e07f9f0 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -68,7 +68,6 @@ struct MissingFeatures { // Address space related static bool addressSpace() { return false; } - static bool addressSpaceInGlobalVar() { return false; } // Clang codegen options static bool strictVTablePointers() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index e6bcb0d6bf04..6fa387483492 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -575,19 +575,7 @@ cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( // because some types, like unions, can't be completely represented // in the LLVM type system.) if (GV.getSymType() != Init.getType()) { - cir::GlobalOp OldGV = GV; - GV = builder.createGlobal(CGM.getModule(), getLoc(D.getSourceRange()), - OldGV.getName(), Init.getType(), - OldGV.getConstant(), GV.getLinkage()); - // FIXME(cir): OG codegen inserts new GV before old one, we probably don't - // need that? - GV.setVisibility(OldGV.getVisibility()); - GV.setGlobalVisibilityAttr(OldGV.getGlobalVisibilityAttr()); - GV.setInitialValueAttr(Init); - GV.setTlsModelAttr(OldGV.getTlsModelAttr()); - assert(!cir::MissingFeatures::setDSOLocal()); - assert(!cir::MissingFeatures::setComdat()); - assert(!cir::MissingFeatures::addressSpaceInGlobalVar()); + GV.setSymType(Init.getType()); // Normally this should be done with a call to CGM.replaceGlobal(OldGV, GV), // but since at this point the current block hasn't been really attached, @@ -595,8 +583,7 @@ cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( // Given those constraints, thread in the GetGlobalOp and update it // directly. GVAddr.getAddr().setType( - cir::PointerType::get(&getMLIRContext(), Init.getType())); - OldGV->erase(); + getBuilder().getPointerTo(Init.getType(), GV.getAddrSpaceAttr())); } bool NeedsDtor = diff --git a/clang/test/CIR/CodeGen/stmtexpr-init.c b/clang/test/CIR/CodeGen/stmtexpr-init.c index 27e909d2b39c..66b676400761 100644 --- a/clang/test/CIR/CodeGen/stmtexpr-init.c +++ b/clang/test/CIR/CodeGen/stmtexpr-init.c @@ -33,11 +33,11 @@ struct outer { }; void T2(void) { - // CIR-DAG: cir.global "private" constant internal @T2._a = #cir.const_struct<{#cir.int<2> : !s32i, #cir.const_array<[#cir.int<50> : !s32i, #cir.int<60> : !s32i]> : !cir.array}> + // CIR-DAG: cir.global "private" constant internal dsolocal @T2._a = #cir.const_struct<{#cir.int<2> : !s32i, #cir.const_array<[#cir.int<50> : !s32i, #cir.int<60> : !s32i]> : !cir.array}> // LLVM-DAG: internal constant { i32, [2 x i32] } { i32 2, [2 x i32] [i32 50, i32 60] } const struct sized_array *A = ARRAY_PTR(50, 60); - // CIR-DAG: cir.global "private" constant internal @T2._a.1 = #cir.const_struct<{#cir.int<3> : !s32i, #cir.const_array<[#cir.int<10> : !s32i, #cir.int<20> : !s32i, #cir.int<30> : !s32i]> : !cir.array}> + // CIR-DAG: cir.global "private" constant internal dsolocal @T2._a.1 = #cir.const_struct<{#cir.int<3> : !s32i, #cir.const_array<[#cir.int<10> : !s32i, #cir.int<20> : !s32i, #cir.int<30> : !s32i]> : !cir.array}> // LLVM-DAG: internal constant { i32, [3 x i32] } { i32 3, [3 x i32] [i32 10, i32 20, i32 30] } struct outer X = {ARRAY_PTR(10, 20, 30)}; From 331549e0ea8711a5165bd447cdc634077ea71810 Mon Sep 17 00:00:00 2001 From: 7mile Date: Tue, 12 Nov 2024 04:17:47 +0800 Subject: [PATCH 2067/2301] [CIR][Dialect][NFC] Fix double white spaces in `cir.global` assembly (#1096) Following #1009 and #1028, this PR removes the double white spaces in the assembly format of `cir.global` op. It's basically some `mlir-tablegen`-builtin magic: With `constBuilderCall` specified, we can apply `DefaultValuedAttr` with any default value we can construct from constant values. Then we can easily omit the default in assembly. Hence, we don't need to compromise anything for the wrapper attribute `cir::VisibilityAttr`. Similarly to #1009, an empty literal ``` `` ``` is used to omit the leading space emitted by inner attribute. The test case `visibility-attribute.c` is modified to save the intermediate CIR to disk and reflect the effects. Double whitespaces in other test files are removed. --- clang/include/clang/CIR/Dialect/IR/CIRAttrs.td | 3 +++ clang/include/clang/CIR/Dialect/IR/CIROps.td | 7 +++++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 13 ------------- clang/test/CIR/CallConvLowering/AArch64/struct.c | 4 ++-- clang/test/CIR/CodeGen/AArch64/neon-arith.c | 6 +++--- clang/test/CIR/CodeGen/annotations-var.c | 4 ++-- clang/test/CIR/CodeGen/array-init.c | 2 +- .../CIR/CodeGen/attribute-annotate-multiple.cpp | 6 +++--- clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp | 14 +++++++------- clang/test/CIR/CodeGen/global-new.cpp | 4 ++-- clang/test/CIR/CodeGen/globals-neg-index-array.c | 4 ++-- clang/test/CIR/CodeGen/kr-func-promote.c | 2 +- clang/test/CIR/CodeGen/member-init-struct.cpp | 2 +- clang/test/CIR/CodeGen/no-pie.c | 2 +- clang/test/CIR/CodeGen/paren-list-init.cpp | 4 ++-- clang/test/CIR/CodeGen/static-vars.cpp | 2 +- clang/test/CIR/CodeGen/temporaries.cpp | 10 +++++----- clang/test/CIR/CodeGen/tempref.cpp | 8 ++++---- clang/test/CIR/CodeGen/unary-deref.cpp | 2 +- clang/test/CIR/CodeGen/union-init.c | 2 +- clang/test/CIR/CodeGen/visibility-attribute.c | 6 ++++-- clang/test/CIR/IR/annotations.cir | 2 +- clang/test/CIR/IR/invalid-annotations.cir | 2 +- clang/test/CIR/Lowering/brcond.cir | 2 +- clang/test/CIR/Lowering/store-memcpy.cpp | 2 +- 25 files changed, 55 insertions(+), 60 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index c931b9376287..463bdd5cec7a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -1016,6 +1016,9 @@ def VisibilityAttr : CIR_Attr<"Visibility", "visibility"> { let skipDefaultBuilders = 1; + // Make DefaultValuedAttr accept VisibilityKind as default value ($0). + let constBuilderCall = "cir::VisibilityAttr::get($_builder.getContext(), $0)"; + let extraClassDeclaration = [{ bool isDefault() const { return getValue() == VisibilityKind::Default; }; bool isHidden() const { return getValue() == VisibilityKind::Hidden; }; diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a829e1c99d79..975a3ff6c8bd 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2387,7 +2387,10 @@ def GlobalOp : CIR_Op<"global", // TODO: sym_visibility can possibly be represented by implementing the // necessary Symbol's interface in terms of linkage instead. let arguments = (ins SymbolNameAttr:$sym_name, - VisibilityAttr:$global_visibility, + DefaultValuedAttr< + VisibilityAttr, + "VisibilityKind::Default" + >:$global_visibility, OptionalAttr:$sym_visibility, TypeAttr:$sym_type, Arg:$linkage, @@ -2405,7 +2408,7 @@ def GlobalOp : CIR_Op<"global", let regions = (region AnyRegion:$ctorRegion, AnyRegion:$dtorRegion); let assemblyFormat = [{ ($sym_visibility^)? - custom($global_visibility) + (`` $global_visibility^)? (`constant` $constant^)? $linkage (`comdat` $comdat^)? diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 5724b7213e5a..9abab472b779 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -285,19 +285,6 @@ static void printOmittedTerminatorRegion(mlir::OpAsmPrinter &printer, /*printBlockTerminators=*/!omitRegionTerm(region)); } -static mlir::ParseResult -parseOmitDefaultVisibility(mlir::OpAsmParser &parser, - cir::VisibilityAttr &visibility) { - parseVisibilityAttr(parser, visibility); - return success(); -} - -static void printOmitDefaultVisibility(mlir::OpAsmPrinter &printer, - cir::GlobalOp &op, - cir::VisibilityAttr visibility) { - printVisibilityAttr(printer, visibility); -} - //===----------------------------------------------------------------------===// // AllocaOp //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CallConvLowering/AArch64/struct.c b/clang/test/CIR/CallConvLowering/AArch64/struct.c index f5dfd43dfcf5..209e393f5455 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/struct.c +++ b/clang/test/CIR/CallConvLowering/AArch64/struct.c @@ -40,7 +40,7 @@ S init(S s) { return s; } -// CIR: cir.func no_proto @foo1 +// CIR: cir.func no_proto @foo1 // CIR: %[[#V0:]] = cir.alloca !ty_S, !cir.ptr, ["s"] // CIR: %[[#V1:]] = cir.alloca !ty_S, !cir.ptr, ["tmp"] {alignment = 4 : i64} // CIR: %[[#V2:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr @@ -143,7 +143,7 @@ S2 init2(S2 s) { return s; } -// CIR: cir.func no_proto @foo3() +// CIR: cir.func no_proto @foo3() // CIR: %[[#V0:]] = cir.alloca !ty_S2_, !cir.ptr, ["s"] // CIR: %[[#V1:]] = cir.alloca !ty_S2_, !cir.ptr, ["tmp"] {alignment = 1 : i64} // CIR: %[[#V2:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index 96125b44b3bb..fbc9ce71343d 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -20,7 +20,7 @@ float32_t test_vrndns_f32(float32_t a) { return vrndns_f32(a); } -// CIR: cir.func internal private @vrndns_f32(%arg0: !cir.float {{.*}}) -> !cir.float +// CIR: cir.func internal private @vrndns_f32(%arg0: !cir.float {{.*}}) -> !cir.float // CIR: cir.store %arg0, [[ARG_SAVE:%.*]] : !cir.float, !cir.ptr // CIR: [[INTRIN_ARG:%.*]] = cir.load [[ARG_SAVE]] : !cir.ptr, !cir.float // CIR: {{%.*}} = cir.llvm.intrinsic "roundeven.f32" [[INTRIN_ARG]] : (!cir.float) @@ -42,7 +42,7 @@ float32x2_t test_vrnda_f32(float32x2_t a) { return vrnda_f32(a); } -// CIR: cir.func internal private @vrnda_f32(%arg0: !cir.vector +// CIR: cir.func internal private @vrnda_f32(%arg0: !cir.vector // CIR: cir.store %arg0, [[ARG_SAVE:%.*]] : !cir.vector, !cir.ptr> // CIR: [[INTRIN_ARG:%.*]] = cir.load [[ARG_SAVE]] : !cir.ptr>, !cir.vector // CIR: [[INTRIN_ARG_CAST:%.*]] = cir.cast(bitcast, [[INTRIN_ARG]] : !cir.vector), !cir.vector @@ -66,7 +66,7 @@ float32x4_t test_vrndaq_f32(float32x4_t a) { return vrndaq_f32(a); } -// CIR: cir.func internal private @vrndaq_f32(%arg0: !cir.vector +// CIR: cir.func internal private @vrndaq_f32(%arg0: !cir.vector // CIR: cir.store %arg0, [[ARG_SAVE:%.*]] : !cir.vector, !cir.ptr> // CIR: [[INTRIN_ARG:%.*]] = cir.load [[ARG_SAVE]] : !cir.ptr>, !cir.vector // CIR: [[INTRIN_ARG_CAST:%.*]] = cir.cast(bitcast, [[INTRIN_ARG]] : !cir.vector), !cir.vector diff --git a/clang/test/CIR/CodeGen/annotations-var.c b/clang/test/CIR/CodeGen/annotations-var.c index 5bb3989bc9d0..1a3787acc105 100644 --- a/clang/test/CIR/CodeGen/annotations-var.c +++ b/clang/test/CIR/CodeGen/annotations-var.c @@ -3,8 +3,8 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// CIR-DAG: cir.global external @globalvar = #cir.int<3> : !s32i [#cir.annotation] {alignment = 4 : i64} -// CIR-DAG: cir.global external @globalvar2 = #cir.int<2> : !s32i [#cir.annotation] {alignment = 4 : i64} +// CIR-DAG: cir.global external @globalvar = #cir.int<3> : !s32i [#cir.annotation] {alignment = 4 : i64} +// CIR-DAG: cir.global external @globalvar2 = #cir.int<2> : !s32i [#cir.annotation] {alignment = 4 : i64} // LLVM-DAG: @.str.annotation = private unnamed_addr constant [15 x i8] c"localvar_ann_0\00", section "llvm.metadata" // LLVM-DAG: @.str.1.annotation = private unnamed_addr constant [{{[0-9]+}} x i8] c"{{.*}}annotations-var.c\00", section "llvm.metadata" diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c index aa4c7f7ea2d0..22c282a6ffec 100644 --- a/clang/test/CIR/CodeGen/array-init.c +++ b/clang/test/CIR/CodeGen/array-init.c @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s -// CHECK-DAG: cir.global "private" constant cir_private @__const.foo.bar = #cir.const_array<[#cir.fp<9.000000e+00> : !cir.double, #cir.fp<8.000000e+00> : !cir.double, #cir.fp<7.000000e+00> : !cir.double]> : !cir.array +// CHECK-DAG: cir.global "private" constant cir_private @__const.foo.bar = #cir.const_array<[#cir.fp<9.000000e+00> : !cir.double, #cir.fp<8.000000e+00> : !cir.double, #cir.fp<7.000000e+00> : !cir.double]> : !cir.array typedef struct { int a; long b; diff --git a/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp b/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp index 9c360d6bda02..e92db678b274 100644 --- a/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp +++ b/clang/test/CIR/CodeGen/attribute-annotate-multiple.cpp @@ -18,11 +18,11 @@ void bar() __attribute__((annotate("withargfunc", "os", 22))) { // BEFORE: module @{{.*}}attribute-annotate-multiple.cpp" attributes {cir.lang = -// BEFORE: cir.global external @a = #cir.ptr : !cir.ptr +// BEFORE: cir.global external @a = #cir.ptr : !cir.ptr // BEFORE-SAME: [#cir.annotation] -// BEFORE: cir.global external @b = #cir.ptr : !cir.ptr +// BEFORE: cir.global external @b = #cir.ptr : !cir.ptr // BEFORE-SAME: [#cir.annotation] -// BEFORE: cir.global external @c = #cir.ptr : !cir.ptr +// BEFORE: cir.global external @c = #cir.ptr : !cir.ptr // BEFORE-SAME: [#cir.annotation] // BEFORE: cir.global external @tile = #cir.int<7> : !s32i // BEFORE-SAME: #cir.annotation] diff --git a/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp b/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp index 8da371e6abe4..c53e4977ccdb 100644 --- a/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp +++ b/clang/test/CIR/CodeGen/cxx1z-inline-variables.cpp @@ -26,13 +26,13 @@ const int &compat_use_after_redecl1 = compat::c; const int &compat_use_after_redecl2 = compat::d; const int &compat_use_after_redecl3 = compat::g; -// CIR: cir.global constant weak_odr comdat @_ZN6compat1bE = #cir.int<2> : !s32i {alignment = 4 : i64} -// CIR: cir.global constant weak_odr comdat @_ZN6compat1aE = #cir.int<1> : !s32i {alignment = 4 : i64} -// CIR: cir.global constant weak_odr comdat @_ZN6compat1cE = #cir.int<3> : !s32i {alignment = 4 : i64} -// CIR: cir.global constant external @_ZN6compat1eE = #cir.int<5> : !s32i {alignment = 4 : i64} -// CIR: cir.global constant weak_odr comdat @_ZN6compat1fE = #cir.int<6> : !s32i {alignment = 4 : i64} -// CIR: cir.global constant linkonce_odr comdat @_ZN6compat1dE = #cir.int<4> : !s32i {alignment = 4 : i64} -// CIR: cir.global constant linkonce_odr comdat @_ZN6compat1gE = #cir.int<7> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant weak_odr comdat @_ZN6compat1bE = #cir.int<2> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant weak_odr comdat @_ZN6compat1aE = #cir.int<1> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant weak_odr comdat @_ZN6compat1cE = #cir.int<3> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant external @_ZN6compat1eE = #cir.int<5> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant weak_odr comdat @_ZN6compat1fE = #cir.int<6> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant linkonce_odr comdat @_ZN6compat1dE = #cir.int<4> : !s32i {alignment = 4 : i64} +// CIR: cir.global constant linkonce_odr comdat @_ZN6compat1gE = #cir.int<7> : !s32i {alignment = 4 : i64} // LLVM: $_ZN6compat1bE = comdat any // LLVM: $_ZN6compat1aE = comdat any diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index 63cf667d259e..8ab125c0c5de 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -14,7 +14,7 @@ e *g = new e(0); // CIR_BEFORE: ![[ty:.*]] = !cir.struct { +// CIR_BEFORE: cir.global external @g = ctor : !cir.ptr { // CIR_BEFORE: %[[GlobalAddr:.*]] = cir.get_global @g : !cir.ptr> // CIR_BEFORE: %[[Size:.*]] = cir.const #cir.int<1> : !u64i // CIR_BEFORE: %[[NewAlloc:.*]] = cir.call @_Znwm(%[[Size]]) : (!u64i) -> !cir.ptr @@ -37,7 +37,7 @@ e *g = new e(0); // CIR_EH: cir.resume // CIR_EH: }] -// CIR_FLAT_EH: cir.func internal private @__cxx_global_var_init() +// CIR_FLAT_EH: cir.func internal private @__cxx_global_var_init() // CIR_FLAT_EH: ^bb3: // CIR_FLAT_EH: %exception_ptr, %type_id = cir.eh.inflight_exception // CIR_FLAT_EH: cir.call @_ZdlPvm({{.*}}) : (!cir.ptr, !u64i) -> () diff --git a/clang/test/CIR/CodeGen/globals-neg-index-array.c b/clang/test/CIR/CodeGen/globals-neg-index-array.c index 62a59498ba64..7b21b6d30041 100644 --- a/clang/test/CIR/CodeGen/globals-neg-index-array.c +++ b/clang/test/CIR/CodeGen/globals-neg-index-array.c @@ -14,7 +14,7 @@ struct __attribute__((packed)) PackedStruct { }; struct PackedStruct packed[10]; char *packed_element = &(packed[-2].a3); -// CHECK: cir.global external @packed = #cir.zero : !cir.array {alignment = 16 : i64} loc(#loc5) -// CHECK: cir.global external @packed_element = #cir.global_view<@packed, [-2 : i32, 2 : i32]> +// CHECK: cir.global external @packed = #cir.zero : !cir.array {alignment = 16 : i64} loc(#loc5) +// CHECK: cir.global external @packed_element = #cir.global_view<@packed, [-2 : i32, 2 : i32]> // LLVM: @packed = global [10 x %struct.PackedStruct] zeroinitializer // LLVM: @packed_element = global ptr getelementptr inbounds ([10 x %struct.PackedStruct], ptr @packed, i32 0, i32 -2, i32 2) diff --git a/clang/test/CIR/CodeGen/kr-func-promote.c b/clang/test/CIR/CodeGen/kr-func-promote.c index 1edfcd805c6f..2e9839b7e6dc 100644 --- a/clang/test/CIR/CodeGen/kr-func-promote.c +++ b/clang/test/CIR/CodeGen/kr-func-promote.c @@ -6,7 +6,7 @@ // CHECK: cir.store %1, %0 : !s16i, !cir.ptr void foo(x) short x; {} -// CHECK: cir.func no_proto @bar(%arg0: !cir.double +// CHECK: cir.func no_proto @bar(%arg0: !cir.double // CHECK: %0 = cir.alloca !cir.float, !cir.ptr, ["f", init] // CHECK: %1 = cir.cast(floating, %arg0 : !cir.double), !cir.float // CHECK: cir.store %1, %0 : !cir.float, !cir.ptr diff --git a/clang/test/CIR/CodeGen/member-init-struct.cpp b/clang/test/CIR/CodeGen/member-init-struct.cpp index 169577a98a36..8440526c1a1c 100644 --- a/clang/test/CIR/CodeGen/member-init-struct.cpp +++ b/clang/test/CIR/CodeGen/member-init-struct.cpp @@ -15,7 +15,7 @@ struct C { C(void (C::*x)(), int y) : b(), c(y), e(x) {} }; -// CHECK-LABEL: cir.global external @x = #cir.zero : !ty_A +// CHECK-LABEL: cir.global external @x = #cir.zero : !ty_A A x; C a, b(x), c(0, 2); diff --git a/clang/test/CIR/CodeGen/no-pie.c b/clang/test/CIR/CodeGen/no-pie.c index c0ffd9790392..e8ab9c112466 100644 --- a/clang/test/CIR/CodeGen/no-pie.c +++ b/clang/test/CIR/CodeGen/no-pie.c @@ -7,5 +7,5 @@ extern int var; int get() { return var; } -// CIR: cir.global "private" external dsolocal @var : !s32i {alignment = 4 : i64} +// CIR: cir.global "private" external dsolocal @var : !s32i {alignment = 4 : i64} // LLVM: @var = external dso_local global i32 \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/paren-list-init.cpp b/clang/test/CIR/CodeGen/paren-list-init.cpp index 0fb659e06333..240e1c7d5e95 100644 --- a/clang/test/CIR/CodeGen/paren-list-init.cpp +++ b/clang/test/CIR/CodeGen/paren-list-init.cpp @@ -23,7 +23,7 @@ template void make1() { Vec v; S1((Vec&&) v); -// CIR: cir.func linkonce_odr @_Z5make1ILi0EEvv() +// CIR: cir.func linkonce_odr @_Z5make1ILi0EEvv() // CIR: %[[VEC:.*]] = cir.alloca ![[VecType]], !cir.ptr // CIR: cir.call @_ZN3VecC1Ev(%[[VEC]]) : (!cir.ptr) // CIR: cir.scope { @@ -35,7 +35,7 @@ void make1() { // CIR: cir.call @_ZN3VecD1Ev(%[[VEC]]) : (!cir.ptr) -> () // CIR: cir.return -// CIR_EH: cir.func linkonce_odr @_Z5make1ILi0EEvv() +// CIR_EH: cir.func linkonce_odr @_Z5make1ILi0EEvv() // CIR_EH: %[[VEC:.*]] = cir.alloca ![[VecType]], !cir.ptr, ["v", init] // Construct v diff --git a/clang/test/CIR/CodeGen/static-vars.cpp b/clang/test/CIR/CodeGen/static-vars.cpp index e0c405521e5d..c8d1ff5ed439 100644 --- a/clang/test/CIR/CodeGen/static-vars.cpp +++ b/clang/test/CIR/CodeGen/static-vars.cpp @@ -38,7 +38,7 @@ void func2(void) { // CHECK-DAG: cir.global "private" internal dsolocal @_ZZ5func2vE1j = #cir.fp<0.000000e+00> : !cir.float } -// CHECK-DAG: cir.global linkonce_odr comdat @_ZZ4testvE1c = #cir.int<0> : !s32i +// CHECK-DAG: cir.global linkonce_odr comdat @_ZZ4testvE1c = #cir.int<0> : !s32i // LLVM-DAG: $_ZZ4testvE1c = comdat any // LLVM-DAG: @_ZZ4testvE1c = linkonce_odr global i32 0, comdat, align 4 diff --git a/clang/test/CIR/CodeGen/temporaries.cpp b/clang/test/CIR/CodeGen/temporaries.cpp index 2fbbe03b2370..d51b6974109f 100644 --- a/clang/test/CIR/CodeGen/temporaries.cpp +++ b/clang/test/CIR/CodeGen/temporaries.cpp @@ -14,9 +14,9 @@ void f() { !E(); } -// CIR: cir.func private @_ZN1EC1Ev(!cir.ptr) extra(#fn_attr) -// CIR-NEXT: cir.func private @_ZN1EntEv(!cir.ptr) -> !ty_E -// CIR-NEXT: cir.func private @_ZN1ED1Ev(!cir.ptr) extra(#fn_attr) +// CIR: cir.func private @_ZN1EC1Ev(!cir.ptr) extra(#fn_attr) +// CIR-NEXT: cir.func private @_ZN1EntEv(!cir.ptr) -> !ty_E +// CIR-NEXT: cir.func private @_ZN1ED1Ev(!cir.ptr) extra(#fn_attr) // CIR-NEXT: cir.func @_Z1fv() extra(#fn_attr1) { // CIR-NEXT: cir.scope { // CIR-NEXT: %[[ONE:[0-9]+]] = cir.alloca !ty_E, !cir.ptr, ["agg.tmp.ensured"] {alignment = 1 : i64} @@ -44,8 +44,8 @@ void f() { const unsigned int n = 1234; const int &r = (const int&)n; -// CIR: cir.global "private" constant internal @_ZGR1r_ = #cir.int<1234> : !s32i -// CIR-NEXT: cir.global constant external @r = #cir.global_view<@_ZGR1r_> : !cir.ptr {alignment = 8 : i64} +// CIR: cir.global "private" constant internal @_ZGR1r_ = #cir.int<1234> : !s32i +// CIR-NEXT: cir.global constant external @r = #cir.global_view<@_ZGR1r_> : !cir.ptr {alignment = 8 : i64} // LLVM: @_ZGR1r_ = internal constant i32 1234, align 4 // LLVM-NEXT: @r = constant ptr @_ZGR1r_, align 8 diff --git a/clang/test/CIR/CodeGen/tempref.cpp b/clang/test/CIR/CodeGen/tempref.cpp index 9c7ac0eccb86..645b8f867cef 100644 --- a/clang/test/CIR/CodeGen/tempref.cpp +++ b/clang/test/CIR/CodeGen/tempref.cpp @@ -6,9 +6,9 @@ struct A { ~A(); }; A &&a = dynamic_cast(A{}); -// CHECK: cir.func private @_ZN1AD1Ev(!cir.ptr) extra(#fn_attr) -// CHECK-NEXT: cir.global external @a = #cir.ptr : !cir.ptr {alignment = 8 : i64, ast = #cir.var.decl.ast} -// CHECK-NEXT: cir.func internal private @__cxx_global_var_init() { +// CHECK: cir.func private @_ZN1AD1Ev(!cir.ptr) extra(#fn_attr) +// CHECK-NEXT: cir.global external @a = #cir.ptr : !cir.ptr {alignment = 8 : i64, ast = #cir.var.decl.ast} +// CHECK-NEXT: cir.func internal private @__cxx_global_var_init() { // CHECK-NEXT: cir.scope { // CHECK-NEXT: %[[SEVEN:[0-9]+]] = cir.get_global @a : !cir.ptr> // CHECK-NEXT: %[[EIGHT:[0-9]+]] = cir.get_global @_ZGR1a_ : !cir.ptr @@ -16,7 +16,7 @@ A &&a = dynamic_cast(A{}); // CHECK-NEXT: } // CHECK-NEXT: cir.return // CHECK-NEXT: } -// CHECK-NEXT: cir.func private @_GLOBAL__sub_I_tempref.cpp() { +// CHECK-NEXT: cir.func private @_GLOBAL__sub_I_tempref.cpp() { // CHECK-NEXT: cir.call @__cxx_global_var_init() : () -> () // CHECK-NEXT: cir.return // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/unary-deref.cpp b/clang/test/CIR/CodeGen/unary-deref.cpp index b5ceb4cceb7f..665fc4bca0fe 100644 --- a/clang/test/CIR/CodeGen/unary-deref.cpp +++ b/clang/test/CIR/CodeGen/unary-deref.cpp @@ -10,7 +10,7 @@ void foo() { (void)p.read(); } -// CHECK: cir.func linkonce_odr @_ZNK12MyIntPointer4readEv +// CHECK: cir.func linkonce_odr @_ZNK12MyIntPointer4readEv // CHECK: %2 = cir.load %0 // CHECK: %3 = cir.get_member %2[0] {name = "ptr"} // CHECK: %4 = cir.load deref %3 : !cir.ptr> diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index 1d0886644b2b..d6f2e949b16e 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -34,7 +34,7 @@ void foo(int x) { // CHECK: cir.return union { int i; float f; } u = { }; -// CHECK: cir.global external @u = #cir.zero : ![[TY_u]] +// CHECK: cir.global external @u = #cir.zero : ![[TY_u]] unsigned is_little(void) { const union { diff --git a/clang/test/CIR/CodeGen/visibility-attribute.c b/clang/test/CIR/CodeGen/visibility-attribute.c index 07834d78910e..7a1d0aaafbad 100644 --- a/clang/test/CIR/CodeGen/visibility-attribute.c +++ b/clang/test/CIR/CodeGen/visibility-attribute.c @@ -1,5 +1,7 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -check-prefix=CIR -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o - | FileCheck %s -check-prefix=LLVM +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck %s -input-file=%t.cir -check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll +// RUN: FileCheck %s -input-file=%t.ll -check-prefix=LLVM extern int glob_default; // CIR: cir.global "private" external @glob_default : !s32i diff --git a/clang/test/CIR/IR/annotations.cir b/clang/test/CIR/IR/annotations.cir index 1f92b8e8e126..454703e74563 100644 --- a/clang/test/CIR/IR/annotations.cir +++ b/clang/test/CIR/IR/annotations.cir @@ -28,7 +28,7 @@ cir.func @bar() attributes {annotations = [#cir.annotation], // CHECK-SAME: ["bar", #cir.annotation], // CHECK-SAME: ["_Z1fv", #cir.annotation]]>} -// CHECK: cir.global external @a = #cir.int<0> : !s32i +// CHECK: cir.global external @a = #cir.int<0> : !s32i // CHECK-SAME: [#cir.annotation] // CHECK: cir.func @foo() // CHECK-SAME: [#cir.annotation] diff --git a/clang/test/CIR/IR/invalid-annotations.cir b/clang/test/CIR/IR/invalid-annotations.cir index 2a4fa79bd284..d7c76b221c52 100644 --- a/clang/test/CIR/IR/invalid-annotations.cir +++ b/clang/test/CIR/IR/invalid-annotations.cir @@ -4,7 +4,7 @@ // expected-error @below {{invalid kind of attribute specified}} // expected-error @below {{failed to parse AnnotationAttr parameter 'name' which is to be a `mlir::StringAttr`}} -cir.global external @a = #cir.ptr : !cir.ptr [#cir.annotation] +cir.global external @a = #cir.ptr : !cir.ptr [#cir.annotation] // ----- diff --git a/clang/test/CIR/Lowering/brcond.cir b/clang/test/CIR/Lowering/brcond.cir index d2df89740358..262e0a8f868b 100644 --- a/clang/test/CIR/Lowering/brcond.cir +++ b/clang/test/CIR/Lowering/brcond.cir @@ -3,7 +3,7 @@ !s32i = !cir.int #fn_attr = #cir, nothrow = #cir.nothrow, optnone = #cir.optnone})> -module { cir.func no_proto @test() -> !cir.bool extra(#fn_attr) { +module { cir.func no_proto @test() -> !cir.bool extra(#fn_attr) { %0 = cir.const #cir.int<0> : !s32i %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool cir.br ^bb1 diff --git a/clang/test/CIR/Lowering/store-memcpy.cpp b/clang/test/CIR/Lowering/store-memcpy.cpp index f53bb3ea5d61..cf6997c2c8de 100644 --- a/clang/test/CIR/Lowering/store-memcpy.cpp +++ b/clang/test/CIR/Lowering/store-memcpy.cpp @@ -5,7 +5,7 @@ void foo() { char s1[] = "Hello"; } -// AFTER-DAG: cir.global "private" constant cir_private @__const._Z3foov.s1 = #cir.const_array<"Hello\00" : !cir.array> : !cir.array +// AFTER-DAG: cir.global "private" constant cir_private @__const._Z3foov.s1 = #cir.const_array<"Hello\00" : !cir.array> : !cir.array // AFTER: @_Z3foov // AFTER: %[[S1:.*]] = cir.alloca !cir.array, !cir.ptr>, ["s1"] // AFTER: %[[HELLO:.*]] = cir.get_global @__const._Z3foov.s1 : !cir.ptr> From 30aa4281da0efd4c39bc7da2cfc213cc0e33db71 Mon Sep 17 00:00:00 2001 From: orbiri Date: Mon, 11 Nov 2024 22:20:02 +0200 Subject: [PATCH 2068/2301] [CIR] Properly ensure terminating IfOp and ScopeOp regions (#1097) The code changes modify the `cir.if` and `cir.scope` operations to ensure that their code regions are properly terminated. Previously, the if/else and scope regions could be left completely empty which is non-trivially expected in most code inspecting these ops. This led, for example, to a crash when and if clause was left empty in the source code. Now, the child regions must be terminated, either explicitly or implicitly by the default builder and assembly parser. This change improves the clarity and correctness of the code. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 44 ++++++++++++------- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 37 +++++++++++----- .../Dialect/Transforms/CIRCanonicalize.cpp | 6 +-- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 6 ++- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 4 +- clang/test/CIR/CodeGen/OpenMP/parallel.cpp | 2 - clang/test/CIR/CodeGen/if-constexpr.cpp | 3 -- clang/test/CIR/CodeGen/stmt-expr.c | 5 +-- clang/test/CIR/Lowering/if.cir | 34 ++++++++++++++ 9 files changed, 101 insertions(+), 40 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 975a3ff6c8bd..61b02d50e90d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -750,8 +750,10 @@ def IfOp : CIR_Op<"if", } ``` - `cir.if` defines no values and the 'else' can be omitted. `cir.yield` must - explicitly terminate the region if it has more than one block. + `cir.if` defines no values and the 'else' can be omitted. The if/else + regions must be terminated. If the region has only one block, the terminator + can be left out, and `cir.yield` terminator will be inserted implictly. + Otherwise, the region must be explicitly terminated. }]; let arguments = (ins CIR_BoolType:$condition); let regions = (region AnyRegion:$thenRegion, AnyRegion:$elseRegion); @@ -1070,6 +1072,16 @@ def ScopeOp : CIR_Op<"scope", [ custom($scopeRegion) (`:` type($results)^)? attr-dict }]; + let extraClassDeclaration = [{ + /// Determine whether the scope is empty, meaning it contains a single block + /// terminated by a cir.yield. + bool isEmpty() { + auto &entry = getRegion().front(); + return getRegion().hasOneBlock() && + llvm::isa(entry.front()); + } + }]; + let builders = [ // Scopes for yielding values. OpBuilder<(ins @@ -1200,7 +1212,7 @@ def ShiftOp : CIR_Op<"shift", [Pure]> { be either integer type or vector of integer type. However, they must be either all vector of integer type, or all integer type. If they are vectors, each vector element of the shift target is shifted by the corresponding - shift amount in the shift amount vector. + shift amount in the shift amount vector. ```mlir %7 = cir.shift(left, %1 : !u64i, %4 : !s32i) -> !u64i @@ -1879,17 +1891,17 @@ def SwitchOp : CIR_Op<"switch", is an integral condition value. The set of `cir.case` operations and their enclosing `cir.switch` - represents the semantics of a C/C++ switch statement. Users can use + represents the semantics of a C/C++ switch statement. Users can use `collectCases(llvm::SmallVector &cases)` to collect the `cir.case` operation in the `cir.switch` operation easily. The `cir.case` operations doesn't have to be in the region of `cir.switch` directly. However, when all the `cir.case` operations lives in the region of `cir.switch` directly and there is no other operations except the ending - `cir.yield` operation in the region of `cir.switch` directly, we call the - `cir.switch` operation is in a simple form. Users can use + `cir.yield` operation in the region of `cir.switch` directly, we call the + `cir.switch` operation is in a simple form. Users can use `bool isSimpleForm(llvm::SmallVector &cases)` member function to - detect if the `cir.switch` operation is in a simple form. The simple form + detect if the `cir.switch` operation is in a simple form. The simple form makes analysis easier to handle the `cir.switch` operation and makes the boundary to give up pretty clear. @@ -1976,7 +1988,7 @@ def SwitchOp : CIR_Op<"switch", switch(int cond) { l: b++; - + case 4: a++; break; @@ -4178,13 +4190,13 @@ def ReturnAddrOp : CIR_Op<"return_address"> { let results = (outs Res:$result); let description = [{ - Represents call to builtin function ` __builtin_return_address` in CIR. - This builtin function returns the return address of the current function, - or of one of its callers. + Represents call to builtin function ` __builtin_return_address` in CIR. + This builtin function returns the return address of the current function, + or of one of its callers. The `level` argument is number of frames to scan up the call stack. - For instance, value of 0 yields the return address of the current function, - value of 1 yields the return address of the caller of the current function, - and so forth. + For instance, value of 0 yields the return address of the current function, + value of 1 yields the return address of the caller of the current function, + and so forth. Examples: @@ -4324,8 +4336,8 @@ def AbsOp : CIR_Op<"abs", [Pure, SameOperandsAndResultType]> { let summary = [{ libc builtin equivalent abs, labs, llabs - The `poison` argument indicate whether the result value - is a poison value if the first argument is statically or + The `poison` argument indicate whether the result value + is a poison value if the first argument is statically or dynamically an INT_MIN value. Example: diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 9abab472b779..c740cb23e626 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -208,20 +208,24 @@ LogicalResult ensureRegionTerm(OpAsmParser &parser, Region ®ion, Location eLoc = parser.getEncodedSourceLoc(parser.getCurrentLocation()); OpBuilder builder(parser.getBuilder().getContext()); - // Region is empty or properly terminated: nothing to do. - if (region.empty() || - (region.back().mightHaveTerminator() && region.back().getTerminator())) + // Insert empty block in case the region is empty to ensure the terminator + // will be inserted + if (region.empty()) + builder.createBlock(®ion); + + Block &block = region.back(); + // Region is properly terminated: nothing to do. + if (!block.empty() && block.back().hasTrait()) return success(); // Check for invalid terminator omissions. if (!region.hasOneBlock()) return parser.emitError(errLoc, "multi-block region must not omit terminator"); - if (region.back().empty()) - return parser.emitError(errLoc, "empty region must not omit terminator"); - // Terminator was omited correctly: recreate it. - region.back().push_back(builder.create(eLoc)); + // Terminator was omitted correctly: recreate it. + builder.setInsertionPointToEnd(&block); + builder.create(eLoc); return success(); } @@ -1113,8 +1117,11 @@ void cir::IfOp::print(OpAsmPrinter &p) { p.printOptionalAttrDict(getOperation()->getAttrs()); } -/// Default callback for IfOp builders. Inserts nothing for now. -void cir::buildTerminatedBody(OpBuilder &builder, Location loc) {} +/// Default callback for IfOp builders. +void cir::buildTerminatedBody(OpBuilder &builder, Location loc) { + // add cir.yield to the end of the block + builder.create(loc); +} /// Given the region at `index`, or the parent operation if `index` is None, /// return the successor regions. These are the regions that may be selected @@ -1223,7 +1230,17 @@ void cir::ScopeOp::build( scopeBuilder(builder, result.location); } -LogicalResult cir::ScopeOp::verify() { return success(); } +LogicalResult cir::ScopeOp::verify() { + if (getRegion().empty()) { + return emitOpError() << "cir.scope must not be empty since it should " + "include at least an implicit cir.yield "; + } + + if (getRegion().back().empty() || + !getRegion().back().getTerminator()->hasTrait()) + return emitOpError() << "last block of cir.scope must be terminated"; + return success(); +} //===----------------------------------------------------------------------===// // TryOp diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp index 316a39b762e6..555cb20408a5 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp @@ -60,9 +60,9 @@ struct RemoveEmptyScope : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult match(ScopeOp op) const final { - return success(op.getRegion().empty() || - (op.getRegion().getBlocks().size() == 1 && - op.getRegion().front().empty())); + // TODO: Remove this logic once CIR uses MLIR infrastructure to remove + // trivially dead operations + return success(op.isEmpty()); } void rewrite(ScopeOp op, PatternRewriter &rewriter) const final { diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index d0ea2ec985d2..9521c767452b 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -66,7 +66,7 @@ struct CIRIfFlattening : public OpRewritePattern { auto *remainingOpsBlock = rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); mlir::Block *continueBlock; - if (ifOp->getResults().size() == 0) + if (ifOp->getResults().empty()) continueBlock = remainingOpsBlock; else llvm_unreachable("NYI"); @@ -125,7 +125,9 @@ class CIRScopeOpFlattening : public mlir::OpRewritePattern { auto loc = scopeOp.getLoc(); // Empty scope: just remove it. - if (scopeOp.getRegion().empty()) { + // TODO: Remove this logic once CIR uses MLIR infrastructure to remove + // trivially dead operations + if (scopeOp.isEmpty()) { rewriter.eraseOp(scopeOp); return mlir::success(); } diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 4dca01b70043..8b07d0b31cd2 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -787,7 +787,9 @@ class CIRScopeOpLowering : public mlir::OpConversionPattern { matchAndRewrite(cir::ScopeOp scopeOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { // Empty scope: just remove it. - if (scopeOp.getRegion().empty()) { + // TODO: Remove this logic once CIR uses MLIR infrastructure to remove + // trivially dead operations + if (scopeOp.isEmpty()) { rewriter.eraseOp(scopeOp); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/OpenMP/parallel.cpp b/clang/test/CIR/CodeGen/OpenMP/parallel.cpp index d2523d7b5396..861f22f7c86b 100644 --- a/clang/test/CIR/CodeGen/OpenMP/parallel.cpp +++ b/clang/test/CIR/CodeGen/OpenMP/parallel.cpp @@ -4,8 +4,6 @@ // CHECK: cir.func void omp_parallel_1() { // CHECK: omp.parallel { -// CHECK-NEXT: cir.scope { -// CHECK-NEXT: } // CHECK-NEXT: omp.terminator // CHECK-NEXT: } #pragma omp parallel diff --git a/clang/test/CIR/CodeGen/if-constexpr.cpp b/clang/test/CIR/CodeGen/if-constexpr.cpp index f980f3100841..6ee1c21dfe3e 100644 --- a/clang/test/CIR/CodeGen/if-constexpr.cpp +++ b/clang/test/CIR/CodeGen/if-constexpr.cpp @@ -75,9 +75,6 @@ void if0() { // CHECK-NEXT: cir.store %3, %2 : !s32i, !cir.ptr loc({{.*}}) // CHECK-NEXT: } loc({{.*}}) // CHECK-NEXT: cir.scope { -// Note that Clang does not even emit a block in this case -// CHECK-NEXT: } loc({{.*}}) -// CHECK-NEXT: cir.scope { // CHECK-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {{.*}} // CHECK-NEXT: %3 = cir.alloca !s32i, !cir.ptr, ["y", init] {{.*}} // CHECK-NEXT: %4 = cir.const #cir.int<70> : !s32i loc({{.*}}) diff --git a/clang/test/CIR/CodeGen/stmt-expr.c b/clang/test/CIR/CodeGen/stmt-expr.c index 0e3daebb9d78..32b0049bd0e6 100644 --- a/clang/test/CIR/CodeGen/stmt-expr.c +++ b/clang/test/CIR/CodeGen/stmt-expr.c @@ -4,9 +4,8 @@ // Yields void. void test1() { ({ }); } // CHECK: @test1 -// CHECK: cir.scope { -// CHECK-NOT: cir.yield -// CHECK: } +// CHECK-NEXT: cir.return + // Yields an out-of-scope scalar. void test2() { ({int x = 3; x; }); } diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir index cb2960b69a32..6bcf03b0fcfe 100644 --- a/clang/test/CIR/Lowering/if.cir +++ b/clang/test/CIR/Lowering/if.cir @@ -62,4 +62,38 @@ module { // MLIR-NEXT: ^bb2: // pred: ^bb0 // MLIR-NEXT: llvm.return %arg0 : i32 // MLIR-NEXT: } + + // Verify empty if clause is properly lowered to empty block + cir.func @emptyIfClause(%arg0: !s32i) -> !s32i { + // MLIR-LABEL: llvm.func @emptyIfClause + %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + // MLIR: llvm.cond_br {{%.*}}, ^[[T:.*]], ^[[PHI:.*]] + cir.if %4 { + // MLIR-NEXT: ^[[T]]: + // MLIR-NEXT: llvm.br ^[[PHI]] + } + // MLIR-NEXT: ^[[PHI]]: + // MLIR-NEXT: llvm.return + cir.return %arg0 : !s32i + } + + // Verify empty if-else clauses are properly lowered to empty blocks + // TODO: Fix reversed order of blocks in the test once Issue clangir/#1094 is + // addressed + cir.func @emptyIfElseClause(%arg0: !s32i) -> !s32i { + // MLIR-LABEL: llvm.func @emptyIfElseClause + %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + // MLIR: llvm.cond_br {{%.*}}, ^[[T:.*]], ^[[F:.*]] + cir.if %4 { + } else { + } + // MLIR-NEXT: ^[[F]]: + // MLIR-NEXT: llvm.br ^[[PHI:.*]] + // MLIR-NEXT: ^[[T]]: + // MLIR-NEXT: llvm.br ^[[PHI]] + // MLIR-NEXT: ^[[PHI]]: + // MLIR-NEXT: llvm.return + cir.return %arg0 : !s32i + } + } From fb546aedef22eb110d9288cc785d24bc4c5ea646 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 11 Nov 2024 16:00:26 -0500 Subject: [PATCH 2069/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vhsub_v, neon_vhsubq_v (#1103) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 7 +- clang/test/CIR/CodeGen/AArch64/neon.c | 552 ++++++++++-------- 2 files changed, 312 insertions(+), 247 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 46874b871f87..91cc76bbf2cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2516,7 +2516,12 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( : "aarch64.neon.shadd"; break; } - + case NEON::BI__builtin_neon_vhsub_v: + case NEON::BI__builtin_neon_vhsubq_v: { + intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.uhsub" + : "aarch64.neon.shsub"; + break; + } case NEON::BI__builtin_neon_vqmovn_v: { intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.uqxtn" : "aarch64.neon.sqxtn"; diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index a32e98803ef9..dbb6687e107b 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -858,7 +858,7 @@ int16x4_t test_vabd_s16(int16x4_t v1, int16x4_t v2) { return vabd_s16(v1, v2); // CIR-LABEL: vabd_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_s16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -870,7 +870,7 @@ int32x2_t test_vabd_s32(int32x2_t v1, int32x2_t v2) { return vabd_s32(v1, v2); // CIR-LABEL: vabd_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_s32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -882,7 +882,7 @@ uint8x8_t test_vabd_u8(uint8x8_t v1, uint8x8_t v2) { return vabd_u8(v1, v2); // CIR-LABEL: vabd_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_u8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) @@ -894,7 +894,7 @@ uint16x4_t test_vabd_u16(uint16x4_t v1, uint16x4_t v2) { return vabd_u16(v1, v2); // CIR-LABEL: vabd_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_u16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -906,7 +906,7 @@ uint32x2_t test_vabd_u32(uint32x2_t v1, uint32x2_t v2) { return vabd_u32(v1, v2); // CIR-LABEL: vabd_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_u32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -918,7 +918,7 @@ float32x2_t test_vabd_f32(float32x2_t v1, float32x2_t v2) { return vabd_f32(v1, v2); // CIR-LABEL: vabd_f32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.fabd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.fabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) // LLVM: {{.*}}test_vabd_f32(<2 x float>{{.*}}[[V1:%.*]], <2 x float>{{.*}}[[V2:%.*]]) @@ -980,7 +980,7 @@ uint16x8_t test_vabdq_u16(uint16x8_t v1, uint16x8_t v2) { // CIR-LABEL: vabdq_u16 // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uabd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) - + // LLVM: {{.*}}test_vabdq_u16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) // LLVM: [[VABD_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]]) // LLVM: ret <8 x i16> [[VABD_I]] @@ -2572,7 +2572,7 @@ int8x8_t test_vhadd_s8(int8x8_t v1, int8x8_t v2) { return vhadd_s8(v1, v2); // CIR-LABEL: vhadd_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_s8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) @@ -2584,7 +2584,7 @@ int16x4_t test_vhadd_s16(int16x4_t v1, int16x4_t v2) { return vhadd_s16(v1, v2); // CIR-LABEL: vhadd_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_s16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -2599,7 +2599,7 @@ int32x2_t test_vhadd_s32(int32x2_t v1, int32x2_t v2) { return vhadd_s32(v1, v2); // CIR-LABEL: vhadd_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_s32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -2614,7 +2614,7 @@ uint8x8_t test_vhadd_u8(uint8x8_t v1, uint8x8_t v2) { return vhadd_u8(v1, v2); // CIR-LABEL: vhadd_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_u8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) @@ -2626,7 +2626,7 @@ uint16x4_t test_vhadd_u16(uint16x4_t v1, uint16x4_t v2) { return vhadd_u16(v1, v2); // CIR-LABEL: vhadd_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_u16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -2641,7 +2641,7 @@ uint32x2_t test_vhadd_u32(uint32x2_t v1, uint32x2_t v2) { return vhadd_u32(v1, v2); // CIR-LABEL: vhadd_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhadd_u32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -2656,7 +2656,7 @@ int8x16_t test_vhaddq_s8(int8x16_t v1, int8x16_t v2) { return vhaddq_s8(v1, v2); // CIR-LABEL: vhaddq_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_s8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) @@ -2668,7 +2668,7 @@ int16x8_t test_vhaddq_s16(int16x8_t v1, int16x8_t v2) { return vhaddq_s16(v1, v2); // CIR-LABEL: vhaddq_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_s16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) @@ -2683,7 +2683,7 @@ int32x4_t test_vhaddq_s32(int32x4_t v1, int32x4_t v2) { return vhaddq_s32(v1, v2); // CIR-LABEL: vhaddq_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_s32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) @@ -2698,7 +2698,7 @@ uint8x16_t test_vhaddq_u8(uint8x16_t v1, uint8x16_t v2) { return vhaddq_u8(v1, v2); // CIR-LABEL: vhaddq_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_u8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) @@ -2710,7 +2710,7 @@ uint16x8_t test_vhaddq_u16(uint16x8_t v1, uint16x8_t v2) { return vhaddq_u16(v1, v2); // CIR-LABEL: vhaddq_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_u16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) @@ -2725,7 +2725,7 @@ uint32x4_t test_vhaddq_u32(uint32x4_t v1, uint32x4_t v2) { return vhaddq_u32(v1, v2); // CIR-LABEL: vhaddq_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vhaddq_u32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) @@ -2736,119 +2736,179 @@ uint32x4_t test_vhaddq_u32(uint32x4_t v1, uint32x4_t v2) { // LLVM: ret <4 x i32> [[VHADD_V2_I]] } -// NYI-LABEL: @test_vhsub_s8( -// NYI: [[VHSUB_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.shsub.v8i8(<8 x i8> %v1, <8 x i8> %v2) -// NYI: ret <8 x i8> [[VHSUB_V_I]] -// int8x8_t test_vhsub_s8(int8x8_t v1, int8x8_t v2) { -// return vhsub_s8(v1, v2); -// } +int8x8_t test_vhsub_s8(int8x8_t v1, int8x8_t v2) { + return vhsub_s8(v1, v2); -// NYI-LABEL: @test_vhsub_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> -// NYI: [[VHSUB_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.shsub.v4i16(<4 x i16> %v1, <4 x i16> %v2) -// NYI: [[VHSUB_V3_I:%.*]] = bitcast <4 x i16> [[VHSUB_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VHSUB_V2_I]] -// int16x4_t test_vhsub_s16(int16x4_t v1, int16x4_t v2) { -// return vhsub_s16(v1, v2); -// } + // CIR-LABEL: vhsub_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vhsub_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> -// NYI: [[VHSUB_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.shsub.v2i32(<2 x i32> %v1, <2 x i32> %v2) -// NYI: [[VHSUB_V3_I:%.*]] = bitcast <2 x i32> [[VHSUB_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VHSUB_V2_I]] -// int32x2_t test_vhsub_s32(int32x2_t v1, int32x2_t v2) { -// return vhsub_s32(v1, v2); -// } + // LLVM: {{.*}}@test_vhsub_s8(<8 x i8>{{.*}}[[v1:%.*]], <8 x i8>{{.*}}[[v2:%.*]]) + // LLVM: [[VHSUB_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.shsub.v8i8(<8 x i8> [[v1]], <8 x i8> [[v2]]) + // LLVM: ret <8 x i8> [[VHSUB_V_I]] +} -// NYI-LABEL: @test_vhsub_u8( -// NYI: [[VHSUB_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uhsub.v8i8(<8 x i8> %v1, <8 x i8> %v2) -// NYI: ret <8 x i8> [[VHSUB_V_I]] -// uint8x8_t test_vhsub_u8(uint8x8_t v1, uint8x8_t v2) { -// return vhsub_u8(v1, v2); -// } +int16x4_t test_vhsub_s16(int16x4_t v1, int16x4_t v2) { + return vhsub_s16(v1, v2); -// NYI-LABEL: @test_vhsub_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> -// NYI: [[VHSUB_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uhsub.v4i16(<4 x i16> %v1, <4 x i16> %v2) -// NYI: [[VHSUB_V3_I:%.*]] = bitcast <4 x i16> [[VHSUB_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VHSUB_V2_I]] -// uint16x4_t test_vhsub_u16(uint16x4_t v1, uint16x4_t v2) { -// return vhsub_u16(v1, v2); -// } + // CIR-LABEL: vhsub_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vhsub_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> -// NYI: [[VHSUB_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uhsub.v2i32(<2 x i32> %v1, <2 x i32> %v2) -// NYI: [[VHSUB_V3_I:%.*]] = bitcast <2 x i32> [[VHSUB_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VHSUB_V2_I]] -// uint32x2_t test_vhsub_u32(uint32x2_t v1, uint32x2_t v2) { -// return vhsub_u32(v1, v2); -// } + // LLVM: {{.*}}@test_vhsub_s16(<4 x i16>{{.*}}[[v1:%.*]], <4 x i16>{{.*}}[[v2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[v1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[v2]] to <8 x i8> + // LLVM: [[VHSUB_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.shsub.v4i16(<4 x i16> [[v1]], <4 x i16> [[v2]]) + // LLVM: [[VHSUB_V3_I:%.*]] = bitcast <4 x i16> [[VHSUB_V2_I]] to <8 x i8> + // LLVM: ret <4 x i16> [[VHSUB_V2_I]] +} -// NYI-LABEL: @test_vhsubq_s8( -// NYI: [[VHSUBQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.shsub.v16i8(<16 x i8> %v1, <16 x i8> %v2) -// NYI: ret <16 x i8> [[VHSUBQ_V_I]] -// int8x16_t test_vhsubq_s8(int8x16_t v1, int8x16_t v2) { -// return vhsubq_s8(v1, v2); -// } +int32x2_t test_vhsub_s32(int32x2_t v1, int32x2_t v2) { + return vhsub_s32(v1, v2); -// NYI-LABEL: @test_vhsubq_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> -// NYI: [[VHSUBQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.shsub.v8i16(<8 x i16> %v1, <8 x i16> %v2) -// NYI: [[VHSUBQ_V3_I:%.*]] = bitcast <8 x i16> [[VHSUBQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VHSUBQ_V2_I]] -// int16x8_t test_vhsubq_s16(int16x8_t v1, int16x8_t v2) { -// return vhsubq_s16(v1, v2); -// } + // CIR-LABEL: vhsub_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vhsubq_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> -// NYI: [[VHSUBQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.shsub.v4i32(<4 x i32> %v1, <4 x i32> %v2) -// NYI: [[VHSUBQ_V3_I:%.*]] = bitcast <4 x i32> [[VHSUBQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VHSUBQ_V2_I]] -// int32x4_t test_vhsubq_s32(int32x4_t v1, int32x4_t v2) { -// return vhsubq_s32(v1, v2); -// } + // LLVM: {{.*}}@test_vhsub_s32(<2 x i32>{{.*}}[[v1:%.*]], <2 x i32>{{.*}}[[v2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[v1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[v2]] to <8 x i8> + // LLVM: [[VHSUB_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.shsub.v2i32(<2 x i32> [[v1]], <2 x i32> [[v2]]) + // LLVM: [[VHSUB_V3_I:%.*]] = bitcast <2 x i32> [[VHSUB_V2_I]] to <8 x i8> + // LLVM: ret <2 x i32> [[VHSUB_V2_I]] +} -// NYI-LABEL: @test_vhsubq_u8( -// NYI: [[VHSUBQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uhsub.v16i8(<16 x i8> %v1, <16 x i8> %v2) -// NYI: ret <16 x i8> [[VHSUBQ_V_I]] -// uint8x16_t test_vhsubq_u8(uint8x16_t v1, uint8x16_t v2) { -// return vhsubq_u8(v1, v2); -// } +uint8x8_t test_vhsub_u8(uint8x8_t v1, uint8x8_t v2) { + return vhsub_u8(v1, v2); -// NYI-LABEL: @test_vhsubq_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> -// NYI: [[VHSUBQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uhsub.v8i16(<8 x i16> %v1, <8 x i16> %v2) -// NYI: [[VHSUBQ_V3_I:%.*]] = bitcast <8 x i16> [[VHSUBQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VHSUBQ_V2_I]] -// uint16x8_t test_vhsubq_u16(uint16x8_t v1, uint16x8_t v2) { -// return vhsubq_u16(v1, v2); -// } + // CIR-LABEL: vhsub_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vhsubq_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> -// NYI: [[VHSUBQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uhsub.v4i32(<4 x i32> %v1, <4 x i32> %v2) -// NYI: [[VHSUBQ_V3_I:%.*]] = bitcast <4 x i32> [[VHSUBQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VHSUBQ_V2_I]] -// uint32x4_t test_vhsubq_u32(uint32x4_t v1, uint32x4_t v2) { -// return vhsubq_u32(v1, v2); -// } + // LLVM: {{.*}}@test_vhsub_u8(<8 x i8>{{.*}}[[v1:%.*]], <8 x i8>{{.*}}[[v2:%.*]]) + // LLVM: [[VHSUB_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uhsub.v8i8(<8 x i8> [[v1]], <8 x i8> [[v2]]) + // LLVM: ret <8 x i8> [[VHSUB_V_I]] +} + +uint16x4_t test_vhsub_u16(uint16x4_t v1, uint16x4_t v2) { + return vhsub_u16(v1, v2); + + // CIR-LABEL: vhsub_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vhsub_u16(<4 x i16>{{.*}}[[v1:%.*]], <4 x i16>{{.*}}[[v2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[v1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[v2]] to <8 x i8> + // LLVM: [[VHSUB_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uhsub.v4i16(<4 x i16> [[v1]], <4 x i16> [[v2]]) + // LLVM: [[VHSUB_V3_I:%.*]] = bitcast <4 x i16> [[VHSUB_V2_I]] to <8 x i8> + // LLVM: ret <4 x i16> [[VHSUB_V2_I]] +} + +uint32x2_t test_vhsub_u32(uint32x2_t v1, uint32x2_t v2) { + return vhsub_u32(v1, v2); + + // CIR-LABEL: vhsub_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vhsub_u32(<2 x i32>{{.*}}[[v1:%.*]], <2 x i32>{{.*}}[[v2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[v1]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[v2]] to <8 x i8> + // LLVM: [[VHSUB_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uhsub.v2i32(<2 x i32> [[v1]], <2 x i32> [[v2]]) + // LLVM: [[VHSUB_V3_I:%.*]] = bitcast <2 x i32> [[VHSUB_V2_I]] to <8 x i8> + // LLVM: ret <2 x i32> [[VHSUB_V2_I]] +} + +int8x16_t test_vhsubq_s8(int8x16_t v1, int8x16_t v2) { + return vhsubq_s8(v1, v2); + + // CIR-LABEL: vhsubq_s8 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vhsubq_s8(<16 x i8>{{.*}}[[v1:%.*]], <16 x i8>{{.*}}[[v2:%.*]]) + // LLVM: [[VHSUBQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.shsub.v16i8(<16 x i8> [[v1]], <16 x i8> [[v2]]) + // LLVM: ret <16 x i8> [[VHSUBQ_V_I]] +} + +int16x8_t test_vhsubq_s16(int16x8_t v1, int16x8_t v2) { + return vhsubq_s16(v1, v2); + + // CIR-LABEL: vhsubq_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vhsubq_s16(<8 x i16>{{.*}}[[v1:%.*]], <8 x i16>{{.*}}[[v2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[v1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[v2]] to <16 x i8> + // LLVM: [[VHSUBQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.shsub.v8i16(<8 x i16> [[v1]], <8 x i16> [[v2]]) + // LLVM: [[VHSUBQ_V3_I:%.*]] = bitcast <8 x i16> [[VHSUBQ_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VHSUBQ_V2_I]] +} + +int32x4_t test_vhsubq_s32(int32x4_t v1, int32x4_t v2) { + return vhsubq_s32(v1, v2); + + // CIR-LABEL: vhsubq_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.shsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vhsubq_s32(<4 x i32>{{.*}}[[v1:%.*]], <4 x i32>{{.*}}[[v2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[v1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[v2]] to <16 x i8> + // LLVM: [[VHSUBQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.shsub.v4i32(<4 x i32> [[v1]], <4 x i32> [[v2]]) + // LLVM: [[VHSUBQ_V3_I:%.*]] = bitcast <4 x i32> [[VHSUBQ_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VHSUBQ_V2_I]] +} + +uint8x16_t test_vhsubq_u8(uint8x16_t v1, uint8x16_t v2) { + return vhsubq_u8(v1, v2); + + // CIR-LABEL: vhsubq_u8 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vhsubq_u8(<16 x i8>{{.*}}[[v1:%.*]], <16 x i8>{{.*}}[[v2:%.*]]) + // LLVM: [[VHSUBQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.uhsub.v16i8(<16 x i8> [[v1]], <16 x i8> [[v2]]) + // LLVM: ret <16 x i8> [[VHSUBQ_V_I]] +} + +uint16x8_t test_vhsubq_u16(uint16x8_t v1, uint16x8_t v2) { + return vhsubq_u16(v1, v2); + + // CIR-LABEL: vhsubq_u16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vhsubq_u16(<8 x i16>{{.*}}[[v1:%.*]], <8 x i16>{{.*}}[[v2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[v1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[v2]] to <16 x i8> + // LLVM: [[VHSUBQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uhsub.v8i16(<8 x i16> [[v1]], <8 x i16> [[v2]]) + // LLVM: [[VHSUBQ_V3_I:%.*]] = bitcast <8 x i16> [[VHSUBQ_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VHSUBQ_V2_I]] +} + +uint32x4_t test_vhsubq_u32(uint32x4_t v1, uint32x4_t v2) { + return vhsubq_u32(v1, v2); + + // CIR-LABEL: vhsubq_u32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uhsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}@test_vhsubq_u32(<4 x i32>{{.*}}[[v1:%.*]], <4 x i32>{{.*}}[[v2:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[v1]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[v2]] to <16 x i8> + // LLVM: [[VHSUBQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uhsub.v4i32(<4 x i32> [[v1]], <4 x i32> [[v2]]) + // LLVM: [[VHSUBQ_V3_I:%.*]] = bitcast <4 x i32> [[VHSUBQ_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VHSUBQ_V2_I]] +} int8x8_t test_vrhadd_s8(int8x8_t v1, int8x8_t v2) { return vrhadd_s8(v1, v2); // CIR-LABEL: vrhadd_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_s8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) @@ -2860,7 +2920,7 @@ int16x4_t test_vrhadd_s16(int16x4_t v1, int16x4_t v2) { return vrhadd_s16(v1, v2); // CIR-LABEL: vrhadd_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_s16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -2875,7 +2935,7 @@ int32x2_t test_vrhadd_s32(int32x2_t v1, int32x2_t v2) { return vrhadd_s32(v1, v2); // CIR-LABEL: vrhadd_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_s32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -2890,7 +2950,7 @@ uint8x8_t test_vrhadd_u8(uint8x8_t v1, uint8x8_t v2) { return vrhadd_u8(v1, v2); // CIR-LABEL: vrhadd_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_u8(<8 x i8>{{.*}}[[V1:%.*]], <8 x i8>{{.*}}[[V2:%.*]]) @@ -2902,7 +2962,7 @@ uint16x4_t test_vrhadd_u16(uint16x4_t v1, uint16x4_t v2) { return vrhadd_u16(v1, v2); // CIR-LABEL: vrhadd_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_u16(<4 x i16>{{.*}}[[V1:%.*]], <4 x i16>{{.*}}[[V2:%.*]]) @@ -2917,7 +2977,7 @@ uint32x2_t test_vrhadd_u32(uint32x2_t v1, uint32x2_t v2) { return vrhadd_u32(v1, v2); // CIR-LABEL: vrhadd_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhadd_u32(<2 x i32>{{.*}}[[V1:%.*]], <2 x i32>{{.*}}[[V2:%.*]]) @@ -2932,7 +2992,7 @@ int8x16_t test_vrhaddq_s8(int8x16_t v1, int8x16_t v2) { return vrhaddq_s8(v1, v2); // CIR-LABEL: vrhaddq_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_s8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) @@ -2944,7 +3004,7 @@ int16x8_t test_vrhaddq_s16(int16x8_t v1, int16x8_t v2) { return vrhaddq_s16(v1, v2); // CIR-LABEL: vrhaddq_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_s16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) @@ -2959,7 +3019,7 @@ int32x4_t test_vrhaddq_s32(int32x4_t v1, int32x4_t v2) { return vrhaddq_s32(v1, v2); // CIR-LABEL: vrhaddq_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_s32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) @@ -2974,7 +3034,7 @@ uint8x16_t test_vrhaddq_u8(uint8x16_t v1, uint8x16_t v2) { return vrhaddq_u8(v1, v2); // CIR-LABEL: vrhaddq_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_u8(<16 x i8>{{.*}}[[V1:%.*]], <16 x i8>{{.*}}[[V2:%.*]]) @@ -2986,7 +3046,7 @@ uint16x8_t test_vrhaddq_u16(uint16x8_t v1, uint16x8_t v2) { return vrhaddq_u16(v1, v2); // CIR-LABEL: vrhaddq_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_u16(<8 x i16>{{.*}}[[V1:%.*]], <8 x i16>{{.*}}[[V2:%.*]]) @@ -3001,7 +3061,7 @@ uint32x4_t test_vrhaddq_u32(uint32x4_t v1, uint32x4_t v2) { return vrhaddq_u32(v1, v2); // CIR-LABEL: vrhaddq_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urhadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrhaddq_u32(<4 x i32>{{.*}}[[V1:%.*]], <4 x i32>{{.*}}[[V2:%.*]]) @@ -3015,7 +3075,7 @@ uint32x4_t test_vrhaddq_u32(uint32x4_t v1, uint32x4_t v2) { int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { return vqadd_s8(a, b); // CIR-LABEL: vqadd_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_s8( @@ -3026,7 +3086,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { int16x4_t test_vqadd_s16(int16x4_t a, int16x4_t b) { return vqadd_s16(a, b); // CIR-LABEL: vqadd_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_s16( @@ -3040,7 +3100,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { int32x2_t test_vqadd_s32(int32x2_t a, int32x2_t b) { return vqadd_s32(a, b); // CIR-LABEL: vqadd_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_s32( @@ -3054,7 +3114,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { int64x1_t test_vqadd_s64(int64x1_t a, int64x1_t b) { return vqadd_s64(a, b); // CIR-LABEL: vqadd_s64 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_s64( @@ -3063,12 +3123,12 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { // LLVM: [[VQADD_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqadd.v1i64(<1 x i64> %0, <1 x i64> %1) // LLVM: [[VQADD_V3_I:%.*]] = bitcast <1 x i64> [[VQADD_V2_I]] to <8 x i8> // LLVM: ret <1 x i64> [[VQADD_V2_I]] - } + } uint8x8_t test_vqadd_u8(uint8x8_t a, uint8x8_t b) { return vqadd_u8(a, b); // CIR-LABEL: vqadd_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_u8( @@ -3079,7 +3139,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { uint16x4_t test_vqadd_u16(uint16x4_t a, uint16x4_t b) { return vqadd_u16(a, b); // CIR-LABEL: vqadd_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_u16( @@ -3090,7 +3150,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { uint32x2_t test_vqadd_u32(uint32x2_t a, uint32x2_t b) { return vqadd_u32(a, b); // CIR-LABEL: vqadd_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_u32( @@ -3101,7 +3161,7 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { uint64x1_t test_vqadd_u64(uint64x1_t a, uint64x1_t b) { return vqadd_u64(a, b); // CIR-LABEL: vqadd_u64 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.uqadd" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM-LABEL: @test_vqadd_u64( @@ -3409,7 +3469,7 @@ int8x16_t test_vshlq_s8(int8x16_t a, int8x16_t b) { return vshlq_s8(a, b); // CIR-LABEL: vshlq_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) @@ -3421,7 +3481,7 @@ int16x8_t test_vshlq_s16(int16x8_t a, int16x8_t b) { return vshlq_s16(a, b); // CIR-LABEL: vshlq_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) @@ -3436,7 +3496,7 @@ int32x4_t test_vshlq_s32(int32x4_t a, int32x4_t b) { return vshlq_s32(a, b); // CIR-LABEL: vshlq_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) @@ -3451,7 +3511,7 @@ int64x2_t test_vshlq_s64(int64x2_t a, int64x2_t b) { return vshlq_s64(a, b); // CIR-LABEL: vshlq_s64 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s64(<2 x i64>{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]]) @@ -3466,7 +3526,7 @@ uint8x16_t test_vshlq_u8(uint8x16_t a, int8x16_t b) { return vshlq_u8(a, b); // CIR-LABEL: vshlq_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_u8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) @@ -3478,7 +3538,7 @@ uint16x8_t test_vshlq_u16(uint16x8_t a, int16x8_t b) { return vshlq_u16(a, b); // CIR-LABEL: vshlq_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_u16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) @@ -3508,7 +3568,7 @@ uint64x2_t test_vshlq_u64(uint64x2_t a, int64x2_t b) { return vshlq_u64(a, b); // CIR-LABEL: vshlq_u64 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_u64(<2 x i64>{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]]) @@ -4118,7 +4178,7 @@ int8x8_t test_vmin_s8(int8x8_t a, int8x8_t b) { // CIR-LABEL: vmin_s8 // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smin" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector - + // LLVM: {{.*}}@test_vmin_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) // LLVM: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[A]], <8 x i8> [[B]]) // LLVM: ret <8 x i8> [[VMIN_I]] @@ -4818,7 +4878,7 @@ int16x4_t test_vqdmulh_s16(int16x4_t a, int16x4_t b) { return vqdmulh_s16(a, b); // CIR-LABEL: vqdmulh_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqdmulh_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) @@ -4833,7 +4893,7 @@ int32x2_t test_vqdmulh_s32(int32x2_t a, int32x2_t b) { return vqdmulh_s32(a, b); // CIR-LABEL: vqdmulh_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqdmulh_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) @@ -4848,7 +4908,7 @@ int16x8_t test_vqdmulhq_s16(int16x8_t a, int16x8_t b) { return vqdmulhq_s16(a, b); // CIR-LABEL: vqdmulhq_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmulh" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqdmulhq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) @@ -4908,7 +4968,7 @@ int16x8_t test_vqrdmulhq_s16(int16x8_t a, int16x8_t b) { return vqrdmulhq_s16(a, b); // CIR-LABEL: vqrdmulhq_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrdmulh" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrdmulh" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vqrdmulhq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) @@ -4966,9 +5026,9 @@ int8x8_t test_vshl_n_s8(int8x8_t a) { return vshl_n_s8(a, 3); // CIR-LABEL: @test_vshl_n_s8 - // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshl_n_s8(<8 x i8>{{.*}}[[A:%.*]]) // LLVM: [[VSHL_N:%.*]] = shl <8 x i8> [[A]], splat (i8 3) @@ -4980,15 +5040,15 @@ int16x4_t test_vshl_n_s16(int16x4_t a) { return vshl_n_s16(a, 3); // CIR-LABEL: @test_vshl_n_s16 - // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, // CIR-SAME: #cir.int<3> : !s16i]> - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshl_n_s16(<4 x i16>{{.*}}[[A:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> // LLVM: [[VSHL_N:%.*]] = shl <4 x i16> [[TMP1]], splat (i16 3) - // LLVM: ret <4 x i16> [[VSHL_N]] + // LLVM: ret <4 x i16> [[VSHL_N]] } int32x2_t test_vshl_n_s32(int32x2_t a) { @@ -4996,7 +5056,7 @@ int32x2_t test_vshl_n_s32(int32x2_t a) { // CIR-LABEL: @test_vshl_n_s32 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : !s32i]> - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshl_n_s32(<2 x i32>{{.*}}[[A:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> @@ -5010,9 +5070,9 @@ int32x2_t test_vshl_n_s32(int32x2_t a) { // NYI: ret <16 x i8> [[VSHL_N]] int8x16_t test_vshlq_n_s8(int8x16_t a) { return vshlq_n_s8(a, 3); - + // CIR-LABEL: @test_vshlq_n_s8 - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshlq_n_s8(<16 x i8>{{.*}}[[A:%.*]]) // LLVM: [[VSHL_N:%.*]] = shl <16 x i8> [[A]], splat (i8 3) @@ -5023,7 +5083,7 @@ int16x8_t test_vshlq_n_s16(int16x8_t a) { return vshlq_n_s16(a, 3); // CIR-LABEL: @test_vshlq_n_s16 - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshlq_n_s16(<8 x i16>{{.*}}[[A:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> @@ -5037,10 +5097,10 @@ int32x4_t test_vshlq_n_s32(int32x4_t a) { return vshlq_n_s32(a, 3); // CIR-LABEL: @test_vshlq_n_s32 - // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : // CIR-SAME: !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i]> - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : - // CIR-SAME: !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : + // CIR-SAME: !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshlq_n_s32(<4 x i32>{{.*}}[[A:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> @@ -5054,7 +5114,7 @@ int64x2_t test_vshlq_n_s64(int64x2_t a) { // CIR-LABEL: @test_vshlq_n_s64 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i, #cir.int<3> : !s64i]> - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, [[AMT]] : // CIR-SAME: !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshlq_n_s64(<2 x i64>{{.*}}[[A:%.*]]) @@ -5068,8 +5128,8 @@ uint8x8_t test_vshl_n_u8(uint8x8_t a) { return vshl_n_u8(a, 3); // CIR-LABEL: @test_vshl_n_u8 - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : - // CIR-SAME: !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshl_n_u8(<8 x i8>{{.*}}[[A:%.*]]) // LLVM: [[VSHL_N:%.*]] = shl <8 x i8> [[A]], splat (i8 3) @@ -5080,8 +5140,8 @@ uint16x4_t test_vshl_n_u16(uint16x4_t a) { return vshl_n_u16(a, 3); // CIR-LABEL: @test_vshl_n_u16 - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : - // CIR-SAME: !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshl_n_u16(<4 x i16>{{.*}}[[A:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> @@ -5094,9 +5154,9 @@ uint32x2_t test_vshl_n_u32(uint32x2_t a) { return vshl_n_u32(a, 3); // CIR-LABEL: @test_vshl_n_u32 - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : - // CIR-SAME: !cir.vector) -> !cir.vector - + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector + // LLVM: {{.*}}@test_vshl_n_u32(<2 x i32>{{.*}}[[A:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] @@ -5108,8 +5168,8 @@ uint8x16_t test_vshlq_n_u8(uint8x16_t a) { return vshlq_n_u8(a, 3); // CIR-LABEL: @test_vshlq_n_u8 - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : - // CIR-SAME: !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshlq_n_u8(<16 x i8>{{.*}}[[A:%.*]]) // LLVM: [[VSHL_N:%.*]] = shl <16 x i8> [[A]], splat (i8 3) @@ -5120,8 +5180,8 @@ uint16x8_t test_vshlq_n_u16(uint16x8_t a) { return vshlq_n_u16(a, 3); // CIR-LABEL: @test_vshlq_n_u16 - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : - // CIR-SAME: !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshlq_n_u16(<8 x i16>{{.*}}[[A:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> @@ -5134,8 +5194,8 @@ uint32x4_t test_vshlq_n_u32(uint32x4_t a) { return vshlq_n_u32(a, 3); // CIR-LABEL: @test_vshlq_n_u32 - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : - // CIR-SAME: !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshlq_n_u32(<4 x i32>{{.*}}[[A:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> @@ -5148,8 +5208,8 @@ uint64x2_t test_vshlq_n_u64(uint64x2_t a) { return vshlq_n_u64(a, 3); // CIR-LABEL: @test_vshlq_n_u64 - // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : - // CIR-SAME: !cir.vector) -> !cir.vector + // CIR: {{.*}} = cir.shift(left, {{.*}} : !cir.vector, {{.*}} : + // CIR-SAME: !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshlq_n_u64(<2 x i64>{{.*}}[[A:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> @@ -5261,7 +5321,7 @@ int32x4_t test_vshrq_n_s32(int32x4_t a) { } // Vector lashr/ashr are undefined when the shift amount is equal to the vector -// element size. Thus in code gen, for singed input, we make the shift amount +// element size. Thus in code gen, for singed input, we make the shift amount // one less than the vector element size. int32x4_t test_vshrq_n_s32_32(int32x4_t a) { return vshrq_n_s32(a, 32); @@ -5326,8 +5386,8 @@ uint16x4_t test_vshr_n_u16_16(uint16x4_t a) { return vshr_n_u16(a, 16); // CIR-LABEL: vshr_n_u16 - // CIR: {{%.*}} = cir.const #cir.int<16> : !s32i - // CIR: {{%.*}} = cir.const #cir.zero : !cir.vector + // CIR: {{%.*}} = cir.const #cir.int<16> : !s32i + // CIR: {{%.*}} = cir.const #cir.zero : !cir.vector // CIR-NOT: cir.shift // LLVM: {{.*}}test_vshr_n_u16_16(<4 x i16>{{.*}}[[A:%.*]]) @@ -5580,7 +5640,7 @@ int8x8_t test_vrshr_n_s8(int8x8_t a) { // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_s8(<8 x i8>{{.*}}[[A:%.*]]) @@ -5595,7 +5655,7 @@ uint8x8_t test_vrshr_n_u8(uint8x8_t a) { // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_u8(<8 x i8>{{.*}}[[A:%.*]]) @@ -5609,7 +5669,7 @@ int16x4_t test_vrshr_n_s16(int16x4_t a) { // CIR-LABEL: vrshr_n_s16 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_s16(<4 x i16>{{.*}}[[A:%.*]]) @@ -5625,7 +5685,7 @@ uint16x4_t test_vrshr_n_u16(uint16x4_t a) { // CIR-LABEL: vrshr_n_u16 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_u16(<4 x i16>{{.*}}[[A:%.*]]) @@ -5640,7 +5700,7 @@ int32x2_t test_vrshr_n_s32(int32x2_t a) { // CIR-LABEL: vrshr_n_s32 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_s32(<2 x i32>{{.*}}[[A:%.*]]) @@ -5655,7 +5715,7 @@ uint32x2_t test_vrshr_n_u32(uint32x2_t a) { // CIR-LABEL: vrshr_n_u32 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_u32(<2 x i32>{{.*}}[[A:%.*]]) @@ -5670,7 +5730,7 @@ int64x1_t test_vrshr_n_s64(int64x1_t a) { // CIR-LABEL: vrshr_n_s64 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_s64(<1 x i64>{{.*}}[[A:%.*]]) @@ -5685,7 +5745,7 @@ uint64x1_t test_vrshr_n_u64(uint64x1_t a) { // CIR-LABEL: vrshr_n_u64 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshr_n_u64(<1 x i64>{{.*}}[[A:%.*]]) @@ -5703,7 +5763,7 @@ int8x16_t test_vrshrq_n_s8(int8x16_t a) { // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_s8(<16 x i8>{{.*}}[[A:%.*]]) @@ -5719,7 +5779,7 @@ uint8x16_t test_vrshrq_n_u8(uint8x16_t a) { // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, // CIR-SAME: #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i, #cir.int<-3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_u8(<16 x i8>{{.*}}[[A:%.*]]) @@ -5733,7 +5793,7 @@ int16x8_t test_vrshrq_n_s16(int16x8_t a) { // CIR-LABEL: vrshrq_n_s16 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_s16(<8 x i16>{{.*}}[[A:%.*]]) @@ -5749,7 +5809,7 @@ uint16x8_t test_vrshrq_n_u16(uint16x8_t a) { // CIR-LABEL: vrshrq_n_u16 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, // CIR-SAME: #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i, #cir.int<-3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_u16(<8 x i16>{{.*}}[[A:%.*]]) @@ -5764,7 +5824,7 @@ int32x4_t test_vrshrq_n_s32(int32x4_t a) { // CIR-LABEL: vrshrq_n_s32 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i, #cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_s32(<4 x i32>{{.*}}[[A:%.*]]) @@ -5778,9 +5838,9 @@ uint32x4_t test_vrshrq_n_u32(uint32x4_t a) { return vrshrq_n_u32(a, 3); // CIR-LABEL: vrshrq_n_u32 - // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i, + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s32i, #cir.int<-3> : !s32i, // CIR-SAME: #cir.int<-3> : !s32i, #cir.int<-3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_u32(<4 x i32>{{.*}}[[A:%.*]]) @@ -5795,7 +5855,7 @@ int64x2_t test_vrshrq_n_s64(int64x2_t a) { // CIR-LABEL: vrshrq_n_s64 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i, #cir.int<-3> : !s64i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_s64(<2 x i64>{{.*}}[[A:%.*]]) @@ -5810,7 +5870,7 @@ uint64x2_t test_vrshrq_n_u64(uint64x2_t a) { // CIR-LABEL: vrshrq_n_u64 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<-3> : !s64i, #cir.int<-3> : !s64i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vrshrq_n_u64(<2 x i64>{{.*}}[[A:%.*]]) @@ -6324,9 +6384,9 @@ uint8x8_t test_vqshlu_n_s8(int8x8_t a) { return vqshlu_n_s8(a, 3); // CIR-LABEL: vqshlu_n_s8 - // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, // CIR: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshlu_n_s8(<8 x i8>{{.*}}[[A:%.*]]) @@ -6338,9 +6398,9 @@ uint16x4_t test_vqshlu_n_s16(int16x4_t a) { return vqshlu_n_s16(a, 3); // CIR-LABEL: vqshlu_n_s16 - // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, // CIR-SAME:#cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshlu_n_s16(<4 x i16>{{.*}}[[A:%.*]]) @@ -6355,7 +6415,7 @@ uint32x2_t test_vqshlu_n_s32(int32x2_t a) { // CIR-LABEL: vqshlu_n_s32 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshlu_n_s32(<2 x i32>{{.*}}[[A:%.*]]) @@ -6368,11 +6428,11 @@ uint8x16_t test_vqshluq_n_s8(int8x16_t a) { return vqshluq_n_s8(a, 3); // CIR-LABEL: vqshluq_n_s8 - // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, - // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, + // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshluq_n_s8(<16 x i8>{{.*}}[[A:%.*]]) @@ -6384,10 +6444,10 @@ uint16x8_t test_vqshluq_n_s16(int16x8_t a) { return vqshluq_n_s16(a, 3); // CIR-LABEL: vqshluq_n_s16 - // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, - // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshluq_n_s16(<8 x i16>{{.*}}[[A:%.*]]) @@ -6401,9 +6461,9 @@ uint32x4_t test_vqshluq_n_s32(int32x4_t a) { return vqshluq_n_s32(a, 3); // CIR-LABEL: vqshluq_n_s32 - // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : !s32i, + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, #cir.int<3> : !s32i, // CIR-SAME: #cir.int<3> : !s32i, #cir.int<3> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshluq_n_s32(<4 x i32>{{.*}}[[A:%.*]]) @@ -6418,7 +6478,7 @@ uint64x2_t test_vqshluq_n_s64(int64x2_t a) { // CIR-LABEL: vqshluq_n_s64 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i, #cir.int<3> : !s64i]> : !cir.vector - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{%.*}}, [[AMT]] : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vqshluq_n_s64(<2 x i64>{{.*}}[[A:%.*]]) @@ -6433,7 +6493,7 @@ int8x8_t test_vshrn_n_s16(int16x8_t a) { // CIR-LABEL: vshrn_n_s16 // CIR: [[TGT:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector - // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector // CIR: [[RES:%.*]] = cir.shift(right, [[TGT]] : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector // CIR: {{%.*}} = cir.cast(integral, [[RES]] : !cir.vector), !cir.vector @@ -6486,7 +6546,7 @@ uint8x8_t test_vshrn_n_u16(uint16x8_t a) { // CIR-LABEL: vshrn_n_u16 // CIR: [[TGT:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector - // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, + // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, // CIR-SAME: #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i]> : !cir.vector // CIR: [[RES:%.*]] = cir.shift(right, [[TGT]] : !cir.vector, [[AMT]] : !cir.vector) -> !cir.vector // CIR: {{%.*}} = cir.cast(integral, [[RES]] : !cir.vector), !cir.vector @@ -6678,7 +6738,7 @@ int32x2_t test_vrshrn_n_s64(int64x2_t a) { return vrshrn_n_s64(a, 19); // CIR-LABEL: vrshrn_n_s64 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vrshrn_n_s64(<2 x i64>{{.*}}[[A:%.*]]) @@ -6692,7 +6752,7 @@ uint8x8_t test_vrshrn_n_u16(uint16x8_t a) { return vrshrn_n_u16(a, 3); // CIR-LABEL: vrshrn_n_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vrshrn_n_u16(<8 x i16>{{.*}}[[A:%.*]]) @@ -6706,7 +6766,7 @@ uint16x4_t test_vrshrn_n_u32(uint32x4_t a) { return vrshrn_n_u32(a, 9); // CIR-LABEL: vrshrn_n_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}vrshrn_n_u32(<4 x i32>{{.*}}[[A:%.*]]) @@ -6720,7 +6780,7 @@ uint32x2_t test_vrshrn_n_u64(uint64x2_t a) { return vrshrn_n_u64(a, 19); // CIR-LABEL: vrshrn_n_u64 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.rshrn" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector // LLVM: {{.*}}test_vrshrn_n_u64(<2 x i64>{{.*}}[[A:%.*]]) @@ -6795,10 +6855,10 @@ uint8x8_t test_vqrshrun_n_s16(int16x8_t a) { return vqrshrun_n_s16(a, 3); // CIR-LABEL: test_vqrshrun_n_s16 // CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<3> : !s32i - // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector - + // LLVM-LABEL: @test_vqrshrun_n_s16( // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> {{%.*}} to <16 x i8> // LLVM: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> @@ -6810,10 +6870,10 @@ uint16x4_t test_vqrshrun_n_s32(int32x4_t a) { return vqrshrun_n_s32(a, 9); // CIR-LABEL: test_vqrshrun_n_s32 // CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<9> : !s32i - // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector - + // LLVM-LABEL: @test_vqrshrun_n_s32( // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> {{%.*}} to <16 x i8> // LLVM: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> @@ -6825,10 +6885,10 @@ uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { return vqrshrun_n_s64(a, 19); // CIR-LABEL: test_vqrshrun_n_s64 // CIR: [[INTRN_ARG1:%.*]] = cir.const #cir.int<19> : !s32i - // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[INTRN_ARG0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqrshrun" [[INTRN_ARG0]], [[INTRN_ARG1]] : // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector - + // LLVM-LABEL: @test_vqrshrun_n_s64( // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> {{%.*}} to <16 x i8> // LLVM: [[VQRSHRUN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> @@ -7101,8 +7161,8 @@ int16x8_t test_vshll_n_s8(int8x8_t a) { // CIR: [[SHIFT_TGT:%.*]] = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector // CIR: [[SHIFT_AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector - // CIR: {{%.*}} = cir.shift(left, [[SHIFT_TGT]] : !cir.vector, [[SHIFT_AMT]] : !cir.vector) -> !cir.vector - + // CIR: {{%.*}} = cir.shift(left, [[SHIFT_TGT]] : !cir.vector, [[SHIFT_AMT]] : !cir.vector) -> !cir.vector + // LLVM: {{.*}}@test_vshll_n_s8(<8 x i8>{{.*}}[[A:%.*]]) // LLVM: [[TMP0:%.*]] = sext <8 x i8> [[A]] to <8 x i16> // LLVM: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], splat (i16 3) @@ -7114,9 +7174,9 @@ int32x4_t test_vshll_n_s16(int16x4_t a) { // CIR-LABEL: vshll_n_s16 // CIR: [[SHIFT_TGT:%.*]] = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector - // CIR: [[SHIFT_AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<9> : !s32i, #cir.int<9> : !s32i, #cir.int<9> : + // CIR: [[SHIFT_AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<9> : !s32i, #cir.int<9> : !s32i, #cir.int<9> : // CIR-SAME: !s32i, #cir.int<9> : !s32i]> : !cir.vector - // CIR: {{%.*}} = cir.shift(left, [[SHIFT_TGT]] : !cir.vector, [[SHIFT_AMT]] : !cir.vector) -> !cir.vector + // CIR: {{%.*}} = cir.shift(left, [[SHIFT_TGT]] : !cir.vector, [[SHIFT_AMT]] : !cir.vector) -> !cir.vector // LLVM: {{.*}}@test_vshll_n_s16(<4 x i16>{{.*}}[[A:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> @@ -7161,7 +7221,7 @@ uint32x4_t test_vshll_n_u16(uint16x4_t a) { // CIR-LABEL: vshll_n_u16 // CIR: [[SHIFT_TGT:%.*]] = cir.cast(integral, {{%.*}} : !cir.vector), !cir.vector - // CIR: [[SHIFT_AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<9> : !u32i, #cir.int<9> : !u32i, + // CIR: [[SHIFT_AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<9> : !u32i, #cir.int<9> : !u32i, // CIR-SAME: #cir.int<9> : !u32i, #cir.int<9> : !u32i]> : !cir.vector // LLVM: {{.*}}@test_vshll_n_u16(<4 x i16>{{.*}}[[A:%.*]]) @@ -8768,7 +8828,7 @@ int16x8_t test_vmull_s8(int8x8_t a, int8x8_t b) { return vmull_s8(a, b); // CIR-LABEL: vmull_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) @@ -8780,7 +8840,7 @@ int32x4_t test_vmull_s16(int16x4_t a, int16x4_t b) { return vmull_s16(a, b); // CIR-LABEL: vmull_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) @@ -8794,7 +8854,7 @@ int64x2_t test_vmull_s32(int32x2_t a, int32x2_t b) { return vmull_s32(a, b); // CIR-LABEL: vmull_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.smull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) @@ -8808,7 +8868,7 @@ uint16x8_t test_vmull_u8(uint8x8_t a, uint8x8_t b) { return vmull_u8(a, b); // CIR-LABEL: vmull_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) @@ -8820,7 +8880,7 @@ uint32x4_t test_vmull_u16(uint16x4_t a, uint16x4_t b) { return vmull_u16(a, b); // CIR-LABEL: vmull_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) @@ -8834,7 +8894,7 @@ uint64x2_t test_vmull_u32(uint32x2_t a, uint32x2_t b) { return vmull_u32(a, b); // CIR-LABEL: vmull_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.umull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_u32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) @@ -9298,7 +9358,7 @@ poly16x8_t test_vmull_p8(poly8x8_t a, poly8x8_t b) { return vmull_p8(a, b); // CIR-LABEL: vmull_p8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.pmull" {{%.*}}, {{%.*}} : + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.pmull" {{%.*}}, {{%.*}} : // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vmull_p8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) @@ -18371,8 +18431,8 @@ uint8x8_t test_vmovn_u16(uint16x8_t a) { return vmovn_u16(a); // CIR-LABEL: vmovn_u16 // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector - // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector - + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + // LLVM: {{.*}}@test_vmovn_u16(<8 x i16>{{.*}}[[A:%.*]]) // LLVM: [[VMOVN_1:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> // LLVM: [[VMOVN_I:%.*]] = trunc <8 x i16> [[A]] to <8 x i8> @@ -18383,8 +18443,8 @@ uint16x4_t test_vmovn_u32(uint32x4_t a) { return vmovn_u32(a); // CIR-LABEL: vmovn_u32 // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector - // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector - + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + // LLVM: {{.*}}@test_vmovn_u32(<4 x i32>{{.*}}[[A:%.*]]) // LLVM: [[VMOVN_1:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> // LLVM: [[VMOVN_I:%.*]] = trunc <4 x i32> [[A]] to <4 x i16> @@ -18395,8 +18455,8 @@ uint32x2_t test_vmovn_u64(uint64x2_t a) { return vmovn_u64(a); // CIR-LABEL: vmovn_u64 // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector - // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector - + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + // LLVM: {{.*}}@test_vmovn_u64(<2 x i64>{{.*}}[[A:%.*]]) // LLVM: [[VMOVN_1:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> // LLVM: [[VMOVN_I:%.*]] = trunc <2 x i64> [[A]] to <2 x i32> @@ -18407,8 +18467,8 @@ int8x8_t test_vmovn_s16(int16x8_t a) { return vmovn_s16(a); // CIR-LABEL: vmovn_s16 // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector - // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector - + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + // LLVM: {{.*}}@test_vmovn_s16(<8 x i16>{{.*}}[[A:%.*]]) // LLVM: [[VMOVN_1:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> // LLVM: [[VMOVN_I:%.*]] = trunc <8 x i16> [[A]] to <8 x i8> @@ -18419,8 +18479,8 @@ int16x4_t test_vmovn_s32(int32x4_t a) { return vmovn_s32(a); // CIR-LABEL: vmovn_s32 // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector - // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector - + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + // LLVM: {{.*}}@test_vmovn_s32(<4 x i32>{{.*}}[[A:%.*]]) // LLVM: [[VMOVN_1:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> // LLVM: [[VMOVN_I:%.*]] = trunc <4 x i32> [[A]] to <4 x i16> @@ -18431,8 +18491,8 @@ int32x2_t test_vmovn_s64(int64x2_t a) { return vmovn_s64(a); // CIR-LABEL: vmovn_s64 // CIR: [[ARG:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector - // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector - + // CIR: {{%.*}} = cir.cast(integral, [[ARG]] : !cir.vector), !cir.vector + // LLVM: {{.*}}@test_vmovn_s64(<2 x i64>{{.*}}[[A:%.*]]) // LLVM: [[VMOVN_1:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> // LLVM: [[VMOVN_I:%.*]] = trunc <2 x i64> [[A]] to <2 x i32> From 0d56f00d957141c2e7f76ab6902a0e68bf58f2a2 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Mon, 11 Nov 2024 16:21:50 -0800 Subject: [PATCH 2070/2301] [CIR][NFC] Fix formatting (#1105) This got misaligned after the namespace changes. --- clang/include/clang/CIR/Dialect/IR/CIRDialect.td | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td index ddf5bdfe5ce7..5ec1865bc3e2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -42,7 +42,7 @@ def CIR_Dialect : Dialect { void printType(mlir::Type type, mlir::DialectAsmPrinter &printer) const override; mlir::Attribute parseAttribute(mlir::DialectAsmParser &parser, - mlir::Type type) const override; + mlir::Type type) const override; void printAttribute(mlir::Attribute attr, mlir::DialectAsmPrinter &os) const override; }]; From 366207fa9857af44d3aafb5ac666c336c75967e8 Mon Sep 17 00:00:00 2001 From: orbiri Date: Tue, 12 Nov 2024 22:15:40 +0200 Subject: [PATCH 2071/2301] [CIR][NFC] Conform if/else lowering order to match clang's output (#1107) Before the commit, when flattening if/else clauses - the else body came before the "then" body, as opposed to clang's output order. This commit reverses this and hopefully allows easier comparisson between clang's output and cir's. --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 2 +- clang/test/CIR/CodeGen/abstract-cond.c | 6 ++--- clang/test/CIR/Lowering/if.cir | 18 +++++++-------- clang/test/CIR/Transforms/if.cir | 8 +++---- clang/test/CIR/Transforms/mem2reg.c | 22 +++++++++---------- 5 files changed, 28 insertions(+), 28 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 9521c767452b..5e484d520a71 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -91,7 +91,7 @@ struct CIRIfFlattening : public OpRewritePattern { if (!emptyElse) { elseBeforeBody = &ifOp.getElseRegion().front(); elseAfterBody = &ifOp.getElseRegion().back(); - rewriter.inlineRegionBefore(ifOp.getElseRegion(), thenAfterBody); + rewriter.inlineRegionBefore(ifOp.getElseRegion(), continueBlock); } else { elseBeforeBody = elseAfterBody = continueBlock; } diff --git a/clang/test/CIR/CodeGen/abstract-cond.c b/clang/test/CIR/CodeGen/abstract-cond.c index dc3df811d8f4..9ff125235105 100644 --- a/clang/test/CIR/CodeGen/abstract-cond.c +++ b/clang/test/CIR/CodeGen/abstract-cond.c @@ -27,11 +27,11 @@ int f6(int a0, struct s6 a1, struct s6 a2) { // LLVM: %[[LOAD_A0:.*]] = load i32, ptr {{.*}} // LLVM: %[[COND:.*]] = icmp ne i32 %[[LOAD_A0]], 0 // LLVM: br i1 %[[COND]], label %[[A1_PATH:.*]], label %[[A2_PATH:.*]], -// LLVM: [[A2_PATH]]: +// LLVM: [[A1_PATH]]: // LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[TMP:.*]], ptr {{.*}}, i32 4, i1 false) // LLVM: br label %[[EXIT:[a-z0-9]+]] -// LLVM: [[A1_PATH]]: +// LLVM: [[A2_PATH]]: // LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[TMP]], ptr {{.*}}, i32 4, i1 false) // LLVM: br label %[[EXIT]] // LLVM: [[EXIT]]: -// LLVM: getelementptr {{.*}}, ptr %[[TMP]], i32 0, i32 0 \ No newline at end of file +// LLVM: getelementptr {{.*}}, ptr %[[TMP]], i32 0, i32 0 diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir index 6bcf03b0fcfe..44aa412ffd13 100644 --- a/clang/test/CIR/Lowering/if.cir +++ b/clang/test/CIR/Lowering/if.cir @@ -18,12 +18,12 @@ module { // MLIR: llvm.func @foo(%arg0: i32) -> i32 // MLIR-NEXT: %0 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %1 = llvm.icmp "ne" %arg0, %0 : i32 -// MLIR-NEXT: llvm.cond_br %1, ^bb2, ^bb1 +// MLIR-NEXT: llvm.cond_br %1, ^bb1, ^bb2 // MLIR-NEXT: ^bb1: // pred: ^bb0 -// MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: %2 = llvm.mlir.constant(1 : i32) : i32 // MLIR-NEXT: llvm.return %2 : i32 // MLIR-NEXT: ^bb2: // pred: ^bb0 -// MLIR-NEXT: %3 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: %3 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: llvm.return %3 : i32 // MLIR-NEXT: ^bb3: // no predecessors // MLIR-NEXT: llvm.return %arg0 : i32 @@ -31,13 +31,13 @@ module { // LLVM: define i32 @foo(i32 %0) // LLVM-NEXT: %2 = icmp ne i32 %0, 0 -// LLVM-NEXT: br i1 %2, label %4, label %3 +// LLVM-NEXT: br i1 %2, label %3, label %4 // LLVM-EMPTY: // LLVM-NEXT: 3: -// LLVM-NEXT: ret i32 0 +// LLVM-NEXT: ret i32 1 // LLVM-EMPTY: // LLVM-NEXT: 4: -// LLVM-NEXT: ret i32 1 +// LLVM-NEXT: ret i32 0 // LLVM-EMPTY: // LLVM-NEXT: 5: // LLVM-NEXT: ret i32 %0 @@ -85,12 +85,12 @@ module { %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool // MLIR: llvm.cond_br {{%.*}}, ^[[T:.*]], ^[[F:.*]] cir.if %4 { + // MLIR-NEXT: ^[[T]]: + // MLIR-NEXT: llvm.br ^[[PHI:.*]] } else { - } // MLIR-NEXT: ^[[F]]: - // MLIR-NEXT: llvm.br ^[[PHI:.*]] - // MLIR-NEXT: ^[[T]]: // MLIR-NEXT: llvm.br ^[[PHI]] + } // MLIR-NEXT: ^[[PHI]]: // MLIR-NEXT: llvm.return cir.return %arg0 : !s32i diff --git a/clang/test/CIR/Transforms/if.cir b/clang/test/CIR/Transforms/if.cir index 7ca069fe9399..03848bf8d063 100644 --- a/clang/test/CIR/Transforms/if.cir +++ b/clang/test/CIR/Transforms/if.cir @@ -16,12 +16,12 @@ module { } // CHECK: cir.func @foo(%arg0: !s32i) -> !s32i { // CHECK-NEXT: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool -// CHECK-NEXT: cir.brcond %0 ^bb2, ^bb1 +// CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // pred: ^bb0 -// CHECK-NEXT: %1 = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.return %1 : !s32i // CHECK-NEXT: ^bb2: // pred: ^bb0 -// CHECK-NEXT: %2 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %2 = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.return %2 : !s32i // CHECK-NEXT: ^bb3: // no predecessors // CHECK-NEXT: cir.return %arg0 : !s32i @@ -45,4 +45,4 @@ module { // CHECK-NEXT: cir.return %arg0 : !s32i // CHECK-NEXT: } -} \ No newline at end of file +} diff --git a/clang/test/CIR/Transforms/mem2reg.c b/clang/test/CIR/Transforms/mem2reg.c index 83c975fd6d13..5d8d2f59b35b 100644 --- a/clang/test/CIR/Transforms/mem2reg.c +++ b/clang/test/CIR/Transforms/mem2reg.c @@ -1,18 +1,18 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=BEFORE -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fclangir-mem2reg %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fclangir-mem2reg %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=MEM2REG int return_42() { int y = 42; - return y; + return y; } // BEFORE: cir.func {{.*@return_42}} // BEFORE: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // BEFORE: %1 = cir.alloca !s32i, !cir.ptr, ["y", init] {alignment = 4 : i64} // BEFORE: %2 = cir.const #cir.int<42> : !s32i -// BEFORE: cir.store %2, %1 : !s32i, !cir.ptr +// BEFORE: cir.store %2, %1 : !s32i, !cir.ptr // BEFORE: %3 = cir.load %1 : !cir.ptr, !s32i // BEFORE: cir.store %3, %0 : !s32i, !cir.ptr // BEFORE: %4 = cir.load %0 : !cir.ptr, !s32i @@ -63,7 +63,7 @@ void alloca_in_loop(int* ar, int n) { // BEFORE: cir.yield // BEFORE: } // BEFORE: } -// BEFORE: cir.return +// BEFORE: cir.return // MEM2REG: cir.func {{.*@alloca_in_loop}} // MEM2REG: cir.br ^bb1 @@ -152,13 +152,13 @@ int alloca_in_ifelse(int x) { // MEM2REG: %1 = cir.const #cir.int<42> : !s32i // MEM2REG: %2 = cir.cmp(gt, %arg0, %1) : !s32i, !s32i // MEM2REG: %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool -// MEM2REG: cir.brcond %3 ^bb3, ^bb2 +// MEM2REG: cir.brcond %3 ^bb2, ^bb3 // MEM2REG: ^bb2: // pred: ^bb1 -// MEM2REG: %4 = cir.const #cir.int<3> : !s32i +// MEM2REG: %4 = cir.const #cir.int<2> : !s32i // MEM2REG: %5 = cir.binop(mul, %arg0, %4) nsw : !s32i // MEM2REG: cir.br ^bb4(%5 : !s32i) // MEM2REG: ^bb3: // pred: ^bb1 -// MEM2REG: %6 = cir.const #cir.int<2> : !s32i +// MEM2REG: %6 = cir.const #cir.int<3> : !s32i // MEM2REG: %7 = cir.binop(mul, %arg0, %6) nsw : !s32i // MEM2REG: cir.br ^bb4(%7 : !s32i) // MEM2REG: ^bb4(%8: !s32i{{.*}}): // 2 preds: ^bb2, ^bb3 @@ -174,7 +174,7 @@ int alloca_in_ifelse(int x) { typedef __SIZE_TYPE__ size_t; void *alloca(size_t size); - + void test_bitcast(size_t n) { int *c1 = alloca(n); } @@ -189,7 +189,7 @@ void test_bitcast(size_t n) { // BEFORE: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr // BEFORE: cir.store %5, %1 : !cir.ptr, !cir.ptr> // BEFORE: cir.return - + // MEM2REG: cir.func {{.*@test_bitcast}} // MEM2REG: cir.return -// MEM2REG: } \ No newline at end of file +// MEM2REG: } From a38a523883379be8618d1a1b7612889f66cb0f9a Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Thu, 14 Nov 2024 03:12:39 +0800 Subject: [PATCH 2072/2301] [CIR] [Lowering] [X86_64] Support VAArg in shape (#1100) This patch implements transformations for VAArg in X86_64 ABI **in shape**. `In shape` means it can't work properly due to the dependent X86_64 ABI is not robust. e.g., when we want to use VAArg with `long double`, we need https://github.com/llvm/clangir/pull/1087. This patch literally implement https://github.com/llvm/llvm-project/blob/d233fedfb0de882353c348cd1ac57dab619efa6d/clang/lib/CodeGen/Targets/X86.cpp#L3015-L3240 in CIR. There some differences due to the traditional pipeline are converting AST to LLVM and we're transforming CIR to CIR. And also to get the ABI Info, I moved `X86_64ABIInfo` to the header. --- clang/include/clang/CIR/ABIArgInfo.h | 2 + .../CIR/Dialect/Builder/CIRBaseBuilder.h | 11 + clang/include/clang/CIR/Dialect/IR/CIROps.td | 20 + .../Dialect/Transforms/LoweringPrepare.cpp | 9 +- .../Transforms/LoweringPrepareCXXABI.h | 1 + .../Transforms/LoweringPrepareX86ABI.h | 0 .../Transforms/TargetLowering/ABIInfoImpl.cpp | 12 + .../Transforms/TargetLowering/ABIInfoImpl.h | 3 + .../Transforms/TargetLowering/CIRCXXABI.h | 19 - .../Transforms/TargetLowering/CMakeLists.txt | 1 + .../TargetLowering/ItaniumCXXABI.cpp | 1 + .../Targets/LoweringPrepareX86CXXABI.cpp | 357 ++++++++++++++++++ .../Transforms/TargetLowering/Targets/X86.cpp | 92 +---- .../TargetLowering/Targets/X86_64ABIInfo.h | 96 +++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 34 +- clang/test/CIR/Lowering/var-arg-x86_64.c | 78 ++++ 16 files changed, 624 insertions(+), 112 deletions(-) create mode 100644 clang/lib/CIR/Dialect/Transforms/LoweringPrepareX86ABI.h create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86_64ABIInfo.h create mode 100644 clang/test/CIR/Lowering/var-arg-x86_64.c diff --git a/clang/include/clang/CIR/ABIArgInfo.h b/clang/include/clang/CIR/ABIArgInfo.h index b3c3d68b9572..28215e7ba196 100644 --- a/clang/include/clang/CIR/ABIArgInfo.h +++ b/clang/include/clang/CIR/ABIArgInfo.h @@ -252,6 +252,8 @@ class ABIArgInfo { bool isExpand() const { return TheKind == Expand; } bool isCoerceAndExpand() const { return TheKind == CoerceAndExpand; } + bool isIgnore() const { return TheKind == Ignore; } + bool isSignExt() const { assert(isExtend() && "Invalid kind!"); return SignExt; diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index f1275a472f3c..225fa444e340 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -48,6 +48,17 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return create(loc, ty, getAttr(ty, val)); } + mlir::Value getSignedInt(mlir::Location loc, int64_t val, unsigned numBits) { + return getConstAPSInt( + loc, llvm::APSInt(llvm::APInt(numBits, val), /*isUnsigned=*/false)); + } + + mlir::Value getUnsignedInt(mlir::Location loc, uint64_t val, + unsigned numBits) { + return getConstAPSInt( + loc, llvm::APSInt(llvm::APInt(numBits, val), /*isUnsigned=*/true)); + } + mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, const llvm::APInt &val) { return create(loc, typ, getAttr(typ, val)); diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 61b02d50e90d..e4bb5aea7980 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4485,6 +4485,26 @@ def AssumeSepStorageOp : CIR_Op<"assume.separate_storage", [SameTypeOperands]> { }]; } +//===----------------------------------------------------------------------===// +// PtrMask Operations +//===----------------------------------------------------------------------===// + +def PtrMaskOp : CIR_Op<"ptr_mask", [AllTypesMatch<["ptr", "result"]>]> { + let summary = "Masks out bits of the pointer according to a mask"; + let description = [{ + The `cir.ptr_mask` operation takes a pointer and an interger `mask` as its + argument and return the masked pointer type according to the `mask`. + }]; + + let arguments = (ins CIR_PointerType:$ptr, + CIR_IntType:$mask); + let results = (outs CIR_PointerType:$result); + + let assemblyFormat = [{ + `(` $ptr `,` $mask `:` type($mask) `)` `:` qualified(type($result)) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // Branch Probability Operations //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 031c3b3b4b40..6af33fd551f2 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -124,9 +124,16 @@ struct LoweringPreparePass : public LoweringPrepareBase { void setASTContext(clang::ASTContext *c) { astCtx = c; - auto abiStr = c->getTargetInfo().getABI(); + const clang::TargetInfo &target = c->getTargetInfo(); + auto abiStr = target.getABI(); switch (c->getCXXABIKind()) { case clang::TargetCXXABI::GenericItanium: + if (target.getTriple().getArch() == llvm::Triple::x86_64) { + cxxABI.reset( + cir::LoweringPrepareCXXABI::createX86ABI(/*is64bit=*/true)); + break; + } + cxxABI.reset(cir::LoweringPrepareCXXABI::createItaniumABI()); break; case clang::TargetCXXABI::GenericAArch64: diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h index 47c63fae7d7b..f3ae48c13574 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h @@ -28,6 +28,7 @@ class LoweringPrepareCXXABI { public: static LoweringPrepareCXXABI *createItaniumABI(); static LoweringPrepareCXXABI *createAArch64ABI(cir::AArch64ABIKind k); + static LoweringPrepareCXXABI *createX86ABI(bool is64Bit); virtual mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) = 0; diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareX86ABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareX86ABI.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index 2c92be20bd41..e07315d54a38 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -36,6 +36,18 @@ bool isAggregateTypeForABI(mlir::Type T) { return !LowerFunction::hasScalarEvaluationKind(T); } +mlir::Value emitRoundPointerUpToAlignment(cir::CIRBaseBuilderTy &builder, + mlir::Value ptr, unsigned alignment) { + // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align; + mlir::Location loc = ptr.getLoc(); + mlir::Value roundUp = builder.createPtrStride( + loc, builder.createPtrBitcast(ptr, builder.getUIntNTy(8)), + builder.getUnsignedInt(loc, alignment - 1, /*width=*/32)); + return builder.create( + loc, roundUp.getType(), roundUp, + builder.getSignedInt(loc, -alignment, /*width=*/32)); +} + mlir::Type useFirstFieldIfTransparentUnion(mlir::Type Ty) { if (auto RT = mlir::dyn_cast(Ty)) { if (RT.isUnion()) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h index df1cd2d0fe0d..8005b153a544 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h @@ -25,6 +25,9 @@ bool classifyReturnType(const CIRCXXABI &CXXABI, LowerFunctionInfo &FI, bool isAggregateTypeForABI(mlir::Type T); +mlir::Value emitRoundPointerUpToAlignment(cir::CIRBaseBuilderTy &builder, + mlir::Value ptr, unsigned alignment); + /// Pass transparent unions as if they were the type of the first element. Sema /// should ensure that all elements of the union have the same "machine type". mlir::Type useFirstFieldIfTransparentUnion(mlir::Type Ty); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h index a980f76f012d..0f05ec8040f8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -66,23 +66,4 @@ CIRCXXABI *CreateItaniumCXXABI(LowerModule &CGM); } // namespace cir -// FIXME(cir): Merge this into the CIRCXXABI class above. To do so, this code -// should be updated to follow some level of codegen parity. -namespace cir { - -class LoweringPrepareCXXABI { -public: - static LoweringPrepareCXXABI *createItaniumABI(); - static LoweringPrepareCXXABI *createAArch64ABI(cir::AArch64ABIKind k); - - virtual mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, cir::VAArgOp op, - const cir::CIRDataLayout &datalayout) = 0; - virtual ~LoweringPrepareCXXABI() {} - - virtual mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, - clang::ASTContext &astCtx, - cir::DynamicCastOp op) = 0; -}; -} // namespace cir - #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRCXXABI_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt index 218656c3b144..d3cb9fc96f1a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt @@ -17,6 +17,7 @@ add_clang_library(TargetLowering Targets/X86.cpp Targets/LoweringPrepareAArch64CXXABI.cpp Targets/LoweringPrepareItaniumCXXABI.cpp + Targets/LoweringPrepareX86CXXABI.cpp DEPENDS clangBasic diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index deb4053dc682..081db25808d1 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -20,6 +20,7 @@ // //===----------------------------------------------------------------------===// +#include "../LoweringPrepareCXXABI.h" #include "CIRCXXABI.h" #include "LowerModule.h" #include "llvm/Support/ErrorHandling.h" diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp new file mode 100644 index 000000000000..ba376d26b0fc --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp @@ -0,0 +1,357 @@ +//====- LoweringPrepareX86CXXABI.cpp - Arm64 ABI specific code -------====// +// +// Part of the LLVM Project, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===------------------------------------------------------------------===// +// +// This file provides X86{_64, _32} C++ ABI specific code that is used during +// LLVMIR lowering prepare. +// +//===------------------------------------------------------------------===// + +#include "../LowerModule.h" +#include "../LoweringPrepareItaniumCXXABI.h" +#include "ABIInfoImpl.h" +#include "X86_64ABIInfo.h" + +using namespace clang; +using namespace cir; + +namespace { +class LoweringPrepareX86CXXABI : public LoweringPrepareItaniumCXXABI { + bool is64; + +public: + LoweringPrepareX86CXXABI(bool is64) : is64(is64) {} + mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) override { + if (is64) + return lowerVAArgX86_64(builder, op, datalayout); + + return lowerVAArgX86_32(builder, op, datalayout); + } + + mlir::Value lowerVAArgX86_64(cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, + const cir::CIRDataLayout &datalayout); + mlir::Value lowerVAArgX86_32(cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) { + llvm_unreachable("lowerVAArg for X86_32 not implemented yet"); + } +}; + +std::unique_ptr getLowerModule(cir::VAArgOp op) { + mlir::ModuleOp mo = op->getParentOfType(); + if (!mo) + return nullptr; + + mlir::PatternRewriter rewriter(mo.getContext()); + return cir::createLowerModule(mo, rewriter); +} + +mlir::Value buildX86_64VAArgFromMemory(cir::CIRBaseBuilderTy &builder, + const cir::CIRDataLayout &datalayout, + mlir::Value valist, mlir::Type Ty, + mlir::Location loc) { + mlir::Value overflow_arg_area_p = + builder.createGetMemberOp(loc, valist, "overflow_arg_area", 2); + mlir::Value overflow_arg_area = builder.createLoad(loc, overflow_arg_area_p); + + // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 + // byte boundary if alignment needed by type exceeds 8 byte boundary. + // It isn't stated explicitly in the standard, but in practice we use + // alignment greater than 16 where necessary. + unsigned alignment = datalayout.getABITypeAlign(Ty).value(); + if (alignment > 8) + overflow_arg_area = + emitRoundPointerUpToAlignment(builder, overflow_arg_area, alignment); + + // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. + mlir::Value res = overflow_arg_area; + + // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: + // l->overflow_arg_area + sizeof(type). + // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to + // an 8 byte boundary. + uint64_t sizeInBytes = datalayout.getTypeStoreSize(Ty).getFixedValue(); + mlir::Value stride = builder.getSignedInt(loc, ((sizeInBytes + 7) & ~7), 32); + mlir::Value castedPtr = + builder.createPtrBitcast(overflow_arg_area, builder.getSIntNTy(8)); + overflow_arg_area = builder.createPtrStride(loc, castedPtr, stride); + builder.createStore(loc, overflow_arg_area, overflow_arg_area_p); + + return res; +} + +mlir::Value LoweringPrepareX86CXXABI::lowerVAArgX86_64( + cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, + const cir::CIRDataLayout &datalayout) { + // FIXME: return early since X86_64ABIInfo::classify can't handle these types. + // Let's hope LLVM's va_arg instruction can take care of it. + // Remove this when X86_64ABIInfo::classify can take care of every type. + if (!mlir::isa(op.getType())) + return nullptr; + + // Assume that va_list type is correct; should be pointer to LLVM type: + // struct { + // i32 gp_offset; + // i32 fp_offset; + // i8* overflow_arg_area; + // i8* reg_save_area; + // }; + unsigned neededInt, neededSSE; + + std::unique_ptr lowerModule = getLowerModule(op); + if (!lowerModule) + return nullptr; + + mlir::Type ty = op.getType(); + + // FIXME: How should we access the X86AVXABILevel? + X86_64ABIInfo abiInfo(lowerModule->getTypes(), X86AVXABILevel::None); + ABIArgInfo ai = abiInfo.classifyArgumentType( + ty, 0, neededInt, neededSSE, /*isNamedArg=*/false, /*IsRegCall=*/false); + + // Empty records are ignored for parameter passing purposes. + if (ai.isIgnore()) + return nullptr; + + mlir::Location loc = op.getLoc(); + mlir::Value valist = op.getOperand(); + + // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed + // in the registers. If not go to step 7. + if (!neededInt && !neededSSE) + return builder.createLoad( + loc, builder.createPtrBitcast(buildX86_64VAArgFromMemory( + builder, datalayout, valist, ty, loc), + ty)); + + auto currentBlock = builder.getInsertionBlock(); + + // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of + // general purpose registers needed to pass type and num_fp to hold + // the number of floating point registers needed. + + // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into + // registers. In the case: l->gp_offset > 48 - num_gp * 8 or + // l->fp_offset > 304 - num_fp * 16 go to step 7. + // + // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of + // register save space). + + mlir::Value inRegs; + mlir::Value gp_offset_p, fp_offset_p; + mlir::Value gp_offset, fp_offset; + + if (neededInt) { + gp_offset_p = builder.createGetMemberOp(loc, valist, "gp_offset", 0); + gp_offset = builder.createLoad(loc, gp_offset_p); + inRegs = builder.getUnsignedInt(loc, 48 - neededInt * 8, 32); + inRegs = builder.createCompare(loc, cir::CmpOpKind::le, gp_offset, inRegs); + } + + if (neededSSE) { + fp_offset_p = builder.createGetMemberOp(loc, valist, "fp_offset", 1); + fp_offset = builder.createLoad(loc, fp_offset_p); + mlir::Value fitsInFP = + builder.getUnsignedInt(loc, 176 - neededSSE * 16, 32); + fitsInFP = + builder.createCompare(loc, cir::CmpOpKind::le, fp_offset, fitsInFP); + inRegs = inRegs ? builder.createAnd(inRegs, fitsInFP) : fitsInFP; + } + + mlir::Block *contBlock = currentBlock->splitBlock(op); + mlir::Block *inRegBlock = builder.createBlock(contBlock); + mlir::Block *inMemBlock = builder.createBlock(contBlock); + + builder.setInsertionPointToEnd(currentBlock); + builder.create(loc, inRegs, inRegBlock, inMemBlock); + + // Emit code to load the value if it was passed in registers. + builder.setInsertionPointToStart(inRegBlock); + + // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with + // an offset of l->gp_offset and/or l->fp_offset. This may require + // copying to a temporary location in case the parameter is passed + // in different register classes or requires an alignment greater + // than 8 for general purpose registers and 16 for XMM registers. + // + // FIXME: This really results in shameful code when we end up needing to + // collect arguments from different places; often what should result in a + // simple assembling of a structure from scattered addresses has many more + // loads than necessary. Can we clean this up? + mlir::Value regSaveArea = builder.createLoad( + loc, builder.createGetMemberOp(loc, valist, "reg_save_area", 3)); + mlir::Value regAddr; + + uint64_t tyAlign = datalayout.getABITypeAlign(ty).value(); + // The alignment of result address. + uint64_t alignment = 0; + if (neededInt && neededSSE) { + // FIXME: Cleanup. + assert(ai.isDirect() && "Unexpected ABI info for mixed regs"); + StructType structTy = mlir::cast(ai.getCoerceToType()); + cir::PointerType addrTy = builder.getPointerTo(ty); + + mlir::Value tmp = builder.createAlloca(loc, addrTy, ty, "tmp", + CharUnits::fromQuantity(tyAlign)); + tmp = builder.createPtrBitcast(tmp, structTy); + assert(structTy.getNumElements() == 2 && + "Unexpected ABI info for mixed regs"); + mlir::Type tyLo = structTy.getMembers()[0]; + mlir::Type tyHi = structTy.getMembers()[1]; + assert((isFPOrFPVectorTy(tyLo) ^ isFPOrFPVectorTy(tyHi)) && + "Unexpected ABI info for mixed regs"); + mlir::Value gpAddr = builder.createPtrStride(loc, regSaveArea, gp_offset); + mlir::Value fpAddr = builder.createPtrStride(loc, regSaveArea, fp_offset); + mlir::Value regLoAddr = isFPOrFPVectorTy(tyLo) ? fpAddr : gpAddr; + mlir::Value regHiAddr = isFPOrFPVectorTy(tyHi) ? gpAddr : fpAddr; + + // Copy the first element. + // FIXME: Our choice of alignment here and below is probably pessimistic. + mlir::Value v = builder.createAlignedLoad( + loc, regLoAddr, datalayout.getABITypeAlign(tyLo).value()); + builder.createStore(loc, v, + builder.createGetMemberOp(loc, tmp, "gp_offset", 0)); + + // Copy the second element. + v = builder.createAlignedLoad(loc, regHiAddr, + datalayout.getABITypeAlign(tyHi).value()); + builder.createStore(loc, v, + builder.createGetMemberOp(loc, tmp, "fp_offset", 1)); + + tmp = builder.createPtrBitcast(tmp, ty); + regAddr = tmp; + } else if (neededInt || neededSSE == 1) { + uint64_t tySize = datalayout.getTypeStoreSize(ty).getFixedValue(); + + mlir::Type coTy; + if (ai.isDirect()) + coTy = ai.getCoerceToType(); + + mlir::Value gpOrFpOffset = neededInt ? gp_offset : fp_offset; + alignment = neededInt ? 8 : 16; + uint64_t regSize = neededInt ? neededInt * 8 : 16; + // There are two cases require special handling: + // 1) + // ``` + // struct { + // struct {} a[8]; + // int b; + // }; + // ``` + // The lower 8 bytes of the structure are not stored, + // so an 8-byte offset is needed when accessing the structure. + // 2) + // ``` + // struct { + // long long a; + // struct {} b; + // }; + // ``` + // The stored size of this structure is smaller than its actual size, + // which may lead to reading past the end of the register save area. + if (coTy && (ai.getDirectOffset() == 8 || regSize < tySize)) { + cir::PointerType addrTy = builder.getPointerTo(ty); + mlir::Value tmp = builder.createAlloca(loc, addrTy, ty, "tmp", + CharUnits::fromQuantity(tyAlign)); + mlir::Value addr = + builder.createPtrStride(loc, regSaveArea, gpOrFpOffset); + mlir::Value src = builder.createAlignedLoad( + loc, builder.createPtrBitcast(addr, coTy), tyAlign); + mlir::Value ptrOffset = + builder.getUnsignedInt(loc, ai.getDirectOffset(), 32); + mlir::Value dst = builder.createPtrStride(loc, tmp, ptrOffset); + builder.createStore(loc, src, dst); + regAddr = tmp; + } else { + regAddr = builder.createPtrStride(loc, regSaveArea, gpOrFpOffset); + + // Copy into a temporary if the type is more aligned than the + // register save area. + if (neededInt && tyAlign > 8) { + cir::PointerType addrTy = builder.getPointerTo(ty); + mlir::Value tmp = builder.createAlloca( + loc, addrTy, ty, "tmp", CharUnits::fromQuantity(tyAlign)); + builder.createMemCpy(loc, tmp, regAddr, + builder.getUnsignedInt(loc, tySize, 32)); + regAddr = tmp; + } + } + + } else { + assert(neededSSE == 2 && "Invalid number of needed registers!"); + // SSE registers are spaced 16 bytes apart in the register save + // area, we need to collect the two eightbytes together. + // The ABI isn't explicit about this, but it seems reasonable + // to assume that the slots are 16-byte aligned, since the stack is + // naturally 16-byte aligned and the prologue is expected to store + // all the SSE registers to the RSA. + + mlir::Value regAddrLo = + builder.createPtrStride(loc, regSaveArea, fp_offset); + mlir::Value regAddrHi = builder.createPtrStride( + loc, regAddrLo, builder.getUnsignedInt(loc, 16, /*numBits=*/32)); + + mlir::MLIRContext *Context = abiInfo.getContext().getMLIRContext(); + StructType structTy = + ai.canHaveCoerceToType() + ? cast(ai.getCoerceToType()) + : StructType::get( + Context, {DoubleType::get(Context), DoubleType::get(Context)}, + /*packed=*/false, StructType::Struct); + cir::PointerType addrTy = builder.getPointerTo(ty); + mlir::Value tmp = builder.createAlloca(loc, addrTy, ty, "tmp", + CharUnits::fromQuantity(tyAlign)); + tmp = builder.createPtrBitcast(tmp, structTy); + mlir::Value v = builder.createLoad( + loc, builder.createPtrBitcast(regAddrLo, structTy.getMembers()[0])); + builder.createStore(loc, v, builder.createGetMemberOp(loc, tmp, "", 0)); + v = builder.createLoad( + loc, builder.createPtrBitcast(regAddrHi, structTy.getMembers()[1])); + builder.createStore(loc, v, builder.createGetMemberOp(loc, tmp, "", 1)); + + tmp = builder.createPtrBitcast(tmp, ty); + regAddr = tmp; + } + + // AMD64-ABI 3.5.7p5: Step 5. Set: + // l->gp_offset = l->gp_offset + num_gp * 8 + // l->fp_offset = l->fp_offset + num_fp * 16. + if (neededInt) { + mlir::Value offset = builder.getUnsignedInt(loc, neededInt * 8, 32); + builder.createStore(loc, builder.createAdd(gp_offset, offset), gp_offset_p); + } + + if (neededSSE) { + mlir::Value offset = builder.getUnsignedInt(loc, neededSSE * 8, 32); + builder.createStore(loc, builder.createAdd(fp_offset, offset), fp_offset_p); + } + + builder.create(loc, mlir::ValueRange{regAddr}, contBlock); + + // Emit code to load the value if it was passed in memory. + builder.setInsertionPointToStart(inMemBlock); + mlir::Value memAddr = + buildX86_64VAArgFromMemory(builder, datalayout, valist, ty, loc); + builder.create(loc, mlir::ValueRange{memAddr}, contBlock); + + // Return the appropriate result. + builder.setInsertionPointToStart(contBlock); + mlir::Value res_addr = contBlock->addArgument(regAddr.getType(), loc); + + return alignment + ? builder.createAlignedLoad( + loc, builder.createPtrBitcast(res_addr, ty), alignment) + : builder.createLoad(loc, builder.createPtrBitcast(res_addr, ty)); +} +} // namespace + +cir::LoweringPrepareCXXABI * +cir::LoweringPrepareCXXABI::createX86ABI(bool is64Bit) { + return new LoweringPrepareX86CXXABI(is64Bit); +} diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 3d590b3d499b..39bd1716aa3b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -5,6 +5,7 @@ #include "LowerModule.h" #include "LowerTypes.h" #include "TargetInfo.h" +#include "X86_64ABIInfo.h" #include "clang/CIR/ABIArgInfo.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" @@ -101,97 +102,6 @@ mlir::Type getFPTypeAtOffset(mlir::Type IRType, unsigned IROffset, } // namespace -class X86_64ABIInfo : public ABIInfo { - using Class = cir::X86ArgClass; - - /// Implement the X86_64 ABI merging algorithm. - /// - /// Merge an accumulating classification \arg Accum with a field - /// classification \arg Field. - /// - /// \param Accum - The accumulating classification. This should - /// always be either NoClass or the result of a previous merge - /// call. In addition, this should never be Memory (the caller - /// should just return Memory for the aggregate). - static Class merge(Class Accum, Class Field); - - /// Implement the X86_64 ABI post merging algorithm. - /// - /// Post merger cleanup, reduces a malformed Hi and Lo pair to - /// final MEMORY or SSE classes when necessary. - /// - /// \param AggregateSize - The size of the current aggregate in - /// the classification process. - /// - /// \param Lo - The classification for the parts of the type - /// residing in the low word of the containing object. - /// - /// \param Hi - The classification for the parts of the type - /// residing in the higher words of the containing object. - /// - void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; - - /// Determine the x86_64 register classes in which the given type T should be - /// passed. - /// - /// \param Lo - The classification for the parts of the type - /// residing in the low word of the containing object. - /// - /// \param Hi - The classification for the parts of the type - /// residing in the high word of the containing object. - /// - /// \param OffsetBase - The bit offset of this type in the - /// containing object. Some parameters are classified different - /// depending on whether they straddle an eightbyte boundary. - /// - /// \param isNamedArg - Whether the argument in question is a "named" - /// argument, as used in AMD64-ABI 3.5.7. - /// - /// \param IsRegCall - Whether the calling conversion is regcall. - /// - /// If a word is unused its result will be NoClass; if a type should - /// be passed in Memory then at least the classification of \arg Lo - /// will be Memory. - /// - /// The \arg Lo class will be NoClass iff the argument is ignored. - /// - /// If the \arg Lo class is ComplexX87, then the \arg Hi class will - /// also be ComplexX87. - void classify(mlir::Type T, uint64_t OffsetBase, Class &Lo, Class &Hi, - bool isNamedArg, bool IsRegCall = false) const; - - mlir::Type GetSSETypeAtOffset(mlir::Type IRType, unsigned IROffset, - mlir::Type SourceTy, - unsigned SourceOffset) const; - - mlir::Type GetINTEGERTypeAtOffset(mlir::Type DestTy, unsigned IROffset, - mlir::Type SourceTy, - unsigned SourceOffset) const; - - /// The 0.98 ABI revision clarified a lot of ambiguities, - /// unfortunately in ways that were not always consistent with - /// certain previous compilers. In particular, platforms which - /// required strict binary compatibility with older versions of GCC - /// may need to exempt themselves. - bool honorsRevision0_98() const { - return !getTarget().getTriple().isOSDarwin(); - } - - X86AVXABILevel AVXLevel; - -public: - X86_64ABIInfo(LowerTypes &CGT, X86AVXABILevel AVXLevel) - : ABIInfo(CGT), AVXLevel(AVXLevel) {} - - cir::ABIArgInfo classifyReturnType(mlir::Type RetTy) const; - - ABIArgInfo classifyArgumentType(mlir::Type Ty, unsigned freeIntRegs, - unsigned &neededInt, unsigned &neededSSE, - bool isNamedArg, bool IsRegCall) const; - - void computeInfo(LowerFunctionInfo &FI) const override; -}; - class X86_64TargetLoweringInfo : public TargetLoweringInfo { public: X86_64TargetLoweringInfo(LowerTypes &LM, X86AVXABILevel AVXLevel) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86_64ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86_64ABIInfo.h new file mode 100644 index 000000000000..201730519207 --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86_64ABIInfo.h @@ -0,0 +1,96 @@ +#include "ABIInfo.h" +#include "clang/CIR/Target/x86.h" + +namespace cir { +class X86_64ABIInfo : public cir::ABIInfo { + using Class = cir::X86ArgClass; + + /// Implement the X86_64 ABI merging algorithm. + /// + /// Merge an accumulating classification \arg Accum with a field + /// classification \arg Field. + /// + /// \param Accum - The accumulating classification. This should + /// always be either NoClass or the result of a previous merge + /// call. In addition, this should never be Memory (the caller + /// should just return Memory for the aggregate). + static Class merge(Class Accum, Class Field); + + /// Implement the X86_64 ABI post merging algorithm. + /// + /// Post merger cleanup, reduces a malformed Hi and Lo pair to + /// final MEMORY or SSE classes when necessary. + /// + /// \param AggregateSize - The size of the current aggregate in + /// the classification process. + /// + /// \param Lo - The classification for the parts of the type + /// residing in the low word of the containing object. + /// + /// \param Hi - The classification for the parts of the type + /// residing in the higher words of the containing object. + /// + void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; + + /// Determine the x86_64 register classes in which the given type T should be + /// passed. + /// + /// \param Lo - The classification for the parts of the type + /// residing in the low word of the containing object. + /// + /// \param Hi - The classification for the parts of the type + /// residing in the high word of the containing object. + /// + /// \param OffsetBase - The bit offset of this type in the + /// containing object. Some parameters are classified different + /// depending on whether they straddle an eightbyte boundary. + /// + /// \param isNamedArg - Whether the argument in question is a "named" + /// argument, as used in AMD64-ABI 3.5.7. + /// + /// \param IsRegCall - Whether the calling conversion is regcall. + /// + /// If a word is unused its result will be NoClass; if a type should + /// be passed in Memory then at least the classification of \arg Lo + /// will be Memory. + /// + /// The \arg Lo class will be NoClass iff the argument is ignored. + /// + /// If the \arg Lo class is ComplexX87, then the \arg Hi class will + /// also be ComplexX87. + void classify(mlir::Type T, uint64_t OffsetBase, Class &Lo, Class &Hi, + bool isNamedArg, bool IsRegCall = false) const; + + mlir::Type GetSSETypeAtOffset(mlir::Type IRType, unsigned IROffset, + mlir::Type SourceTy, + unsigned SourceOffset) const; + + mlir::Type GetINTEGERTypeAtOffset(mlir::Type DestTy, unsigned IROffset, + mlir::Type SourceTy, + unsigned SourceOffset) const; + + /// The 0.98 ABI revision clarified a lot of ambiguities, + /// unfortunately in ways that were not always consistent with + /// certain previous compilers. In particular, platforms which + /// required strict binary compatibility with older versions of GCC + /// may need to exempt themselves. + bool honorsRevision0_98() const { + return !getTarget().getTriple().isOSDarwin(); + } + + ::cir::X86AVXABILevel AVXLevel; + +public: + X86_64ABIInfo(LowerTypes &CGT, cir::X86AVXABILevel AVXLevel) + : ABIInfo(CGT), AVXLevel(AVXLevel) {} + + cir::ABIArgInfo classifyReturnType(mlir::Type RetTy) const; + + cir::ABIArgInfo classifyArgumentType(mlir::Type Ty, unsigned freeIntRegs, + unsigned &neededInt, unsigned &neededSSE, + bool isNamedArg, bool IsRegCall) const; + + void computeInfo(LowerFunctionInfo &FI) const override; +}; + +} // namespace cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8e94cd91c465..745827e1bdb1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4306,6 +4306,37 @@ class CIRIsFPClassOpLowering } }; +class CIRPtrMaskOpLowering : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::PtrMaskOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + // FIXME: We'd better to lower to mlir::LLVM::PtrMaskOp if it exists. + // So we have to make it manually here by following: + // https://llvm.org/docs/LangRef.html#llvm-ptrmask-intrinsic + auto loc = op.getLoc(); + auto mask = op.getMask(); + + auto moduleOp = op->getParentOfType(); + mlir::DataLayout layout(moduleOp); + auto iPtrIdxValue = layout.getTypeSizeInBits(mask.getType()); + auto iPtrIdx = mlir::IntegerType::get(moduleOp->getContext(), iPtrIdxValue); + + auto intPtr = rewriter.create( + loc, iPtrIdx, adaptor.getPtr()); // this may truncate + mlir::Value masked = + rewriter.create(loc, intPtr, adaptor.getMask()); + mlir::Value diff = rewriter.create(loc, intPtr, masked); + rewriter.replaceOpWithNewOp( + op, getTypeConverter()->convertType(op.getType()), + mlir::IntegerType::get(moduleOp->getContext(), 8), adaptor.getPtr(), + diff); + return mlir::success(); + } +}; + class CIRAbsOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -4399,7 +4430,8 @@ void populateCIRToLLVMConversionPatterns( CIRAssumeLowering, CIRAssumeAlignedLowering, CIRAssumeSepStorageLowering, CIRBaseClassAddrOpLowering, CIRDerivedClassAddrOpLowering, CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering, CIRAbsOpLowering, - CIRMemMoveOpLowering, CIRMemsetOpLowering, CIRSignBitOpLowering + CIRMemMoveOpLowering, CIRMemsetOpLowering, CIRSignBitOpLowering, + CIRPtrMaskOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/test/CIR/Lowering/var-arg-x86_64.c b/clang/test/CIR/Lowering/var-arg-x86_64.c new file mode 100644 index 000000000000..992d5e82cd98 --- /dev/null +++ b/clang/test/CIR/Lowering/var-arg-x86_64.c @@ -0,0 +1,78 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s + +#include + +double f1(int n, ...) { + va_list valist; + va_start(valist, n); + double res = va_arg(valist, double); + va_end(valist); + return res; +} + +// CHECK: [[VA_LIST_TYPE:%.+]] = type { i32, i32, ptr, ptr } + +// CHECK: define {{.*}}@f1 +// CHECK: [[VA_LIST_ALLOCA:%.+]] = alloca {{.*}}[[VA_LIST_TYPE]] +// CHECK: [[VA_LIST:%.+]] = getelementptr {{.*}} [[VA_LIST_ALLOCA]], i32 0 +// CHECK: call {{.*}}@llvm.va_start.p0(ptr [[VA_LIST]]) +// CHECK: [[VA_LIST2:%.+]] = getelementptr {{.*}} [[VA_LIST_ALLOCA]], i32 0 +// CHECK: [[FP_OFFSET_P:%.+]] = getelementptr {{.*}} [[VA_LIST2]], i32 0, i32 1 +// CHECK: [[FP_OFFSET:%.+]] = load {{.*}}, ptr [[FP_OFFSET_P]] +// CHECK: [[COMPARED:%.+]] = icmp ule i32 {{.*}}, 160 +// CHECK: br i1 [[COMPARED]], label %[[THEN_BB:.+]], label %[[ELSE_BB:.+]], +// +// CHECK: [[THEN_BB]]: +// CHECK: [[UPDATED_FP_OFFSET:%.+]] = add i32 [[FP_OFFSET]], 8 +// CHECK: store i32 [[UPDATED_FP_OFFSET]], ptr [[FP_OFFSET_P]] +// CHECK: br label %[[CONT_BB:.+]], +// +// CHECK: [[ELSE_BB]]: +// CHECK: [[OVERFLOW_ARG_AREA_ADDR:%.+]] = getelementptr {{.*}} [[VA_LIST2]], i32 0, i32 2 +// CHECK: [[OVERFLOW_ARG_AREA:%.+]] = load ptr, ptr [[OVERFLOW_ARG_AREA_ADDR]] +// CHECK: [[OVERFLOW_ARG_AREA_OFFSET:%.+]] = getelementptr {{.*}} [[OVERFLOW_ARG_AREA]], i64 8 +// CHECK: store ptr [[OVERFLOW_ARG_AREA_OFFSET]], ptr [[OVERFLOW_ARG_AREA_ADDR]] +// CHECK: br label %[[CONT_BB]] +// +// CHECK: [[CONT_BB]]: +// CHECK: [[VA_LIST3:%.+]] = getelementptr {{.*}} [[VA_LIST_ALLOCA]], i32 0 +// CHECK: call {{.*}}@llvm.va_end.p0(ptr [[VA_LIST3]]) + +// CIR: cir.func @f1 +// CIR: [[VA_LIST_ALLOCA:%.+]] = cir.alloca !cir.array, +// CIR: [[RES:%.+]] = cir.alloca !cir.double, !cir.ptr, ["res", +// CIR: [[VASTED_VA_LIST:%.+]] = cir.cast(array_to_ptrdecay, [[VA_LIST_ALLOCA]] +// CIR: cir.va.start [[VASTED_VA_LIST]] +// CIR: [[VASTED_VA_LIST:%.+]] = cir.cast(array_to_ptrdecay, [[VA_LIST_ALLOCA]] +// CIR: [[FP_OFFSET_P:%.+]] = cir.get_member [[VASTED_VA_LIST]][1] {name = "fp_offset"} +// CIR: [[FP_OFFSET:%.+]] = cir.load [[FP_OFFSET_P]] +// CIR: [[OFFSET_CONSTANT:%.+]] = cir.const #cir.int<160> +// CIR: [[CMP:%.+]] = cir.cmp(le, [[FP_OFFSET]], [[OFFSET_CONSTANT]]) +// CIR: cir.brcond [[CMP]] ^[[InRegBlock:.+]], ^[[InMemBlock:.+]] loc +// +// CIR: ^[[InRegBlock]]: +// CIR: [[REG_SAVE_AREA_P:%.+]] = cir.get_member [[VASTED_VA_LIST]][3] {name = "reg_save_area"} +// CIR: [[REG_SAVE_AREA:%.+]] = cir.load [[REG_SAVE_AREA_P]] +// CIR: [[UPDATED:%.+]] = cir.ptr_stride([[REG_SAVE_AREA]] {{.*}}, [[FP_OFFSET]] +// CIR: [[CONSTANT:%.+]] = cir.const #cir.int<8> +// CIR: [[ADDED:%.+]] = cir.binop(add, [[FP_OFFSET]], [[CONSTANT]]) +// CIR: cir.store [[ADDED]], [[FP_OFFSET_P]] +// CIR: cir.br ^[[ContBlock:.+]]([[UPDATED]] +// +// CIR: ^[[InMemBlock]]: +// CIR: [[OVERFLOW_ARG_AREA_P:%.+]] = cir.get_member [[VASTED_VA_LIST]][2] {name = "overflow_arg_area"} +// CIR: [[OVERFLOW_ARG_AREA:%.+]] = cir.load [[OVERFLOW_ARG_AREA_P]] +// CIR: [[OFFSET:%.+]] = cir.const #cir.int<8> +// CIR: [[CASTED:%.+]] = cir.cast(bitcast, [[OVERFLOW_ARG_AREA]] : !cir.ptr) +// CIR: [[NEW_VALUE:%.+]] = cir.ptr_stride([[CASTED]] : !cir.ptr, [[OFFSET]] +// CIR: [[CASTED_P:%.+]] = cir.cast(bitcast, [[OVERFLOW_ARG_AREA_P]] : !cir.ptr>) +// CIR: store [[NEW_VALUE]], [[CASTED_P]] +// CIR: cir.br ^[[ContBlock]]([[OVERFLOW_ARG_AREA]] +// +// CIR: ^[[ContBlock]]([[ARG:.+]]: !cir.ptr +// CIR: [[CASTED_ARG_P:%.+]] = cir.cast(bitcast, [[ARG]] +// CIR: [[CASTED_ARG:%.+]] = cir.load align(16) [[CASTED_ARG_P]] +// CIR: store [[CASTED_ARG]], [[RES]] From 55adbd21800819df2bc57d86a4d82b8ee5214a3b Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 13 Nov 2024 14:15:34 -0500 Subject: [PATCH 2073/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vqdmlal, neon_vqdmlsl (#1104) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 15 +++ clang/test/CIR/CodeGen/AArch64/neon.c | 109 +++++++++++------- 2 files changed, 84 insertions(+), 40 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 91cc76bbf2cd..68415df912cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2363,6 +2363,21 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( : "aarch64.neon.saddlp", vTy, getLoc(e->getExprLoc())); } + case NEON::BI__builtin_neon_vqdmlal_v: + case NEON::BI__builtin_neon_vqdmlsl_v: { + llvm::SmallVector mulOps(ops.begin() + 1, ops.end()); + cir::VectorType srcVty = builder.getExtendedOrTruncatedElementVectorType( + vTy, false, /* truncate */ + mlir::cast(vTy.getEltType()).isSigned()); + ops[1] = emitNeonCall(builder, {srcVty, srcVty}, mulOps, + "aarch64.neon.sqdmull", vTy, getLoc(e->getExprLoc())); + ops.resize(2); + return emitNeonCall(builder, {vTy, vTy}, ops, + builtinID == NEON::BI__builtin_neon_vqdmlal_v + ? "aarch64.neon.sqadd" + : "aarch64.neon.sqsub", + vTy, getLoc(e->getExprLoc())); + } case NEON::BI__builtin_neon_vext_v: case NEON::BI__builtin_neon_vextq_v: { int cv = getIntValueFromConstOp(ops[2]); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index dbb6687e107b..69ba9fa11798 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -9234,49 +9234,78 @@ uint64x2_t test_vmull_u32(uint32x2_t a, uint32x2_t b) { // return vqdmull_s32(a, b); // } -// NYI-LABEL: @test_vqdmlal_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <4 x i16> %c to <8 x i8> -// NYI: [[VQDMLAL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %c) -// NYI: [[VQDMLAL_V3_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> [[VQDMLAL2_I]]) -// NYI: ret <4 x i32> [[VQDMLAL_V3_I]] -// int32x4_t test_vqdmlal_s16(int32x4_t a, int16x4_t b, int16x4_t c) { -// return vqdmlal_s16(a, b, c); -// } +int32x4_t test_vqdmlal_s16(int32x4_t a, int16x4_t b, int16x4_t c) { + return vqdmlal_s16(a, b, c); -// NYI-LABEL: @test_vqdmlal_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <2 x i32> %c to <8 x i8> -// NYI: [[VQDMLAL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %c) -// NYI: [[VQDMLAL_V3_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> [[VQDMLAL2_I]]) -// NYI: ret <2 x i64> [[VQDMLAL_V3_I]] -// int64x2_t test_vqdmlal_s32(int64x2_t a, int32x2_t b, int32x2_t c) { -// return vqdmlal_s32(a, b, c); -// } + // CIR-LABEL: vqdmlal_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmull" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vqdmlsl_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <4 x i16> %c to <8 x i8> -// NYI: [[VQDMLAL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %c) -// NYI: [[VQDMLSL_V3_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> [[VQDMLAL2_I]]) -// NYI: ret <4 x i32> [[VQDMLSL_V3_I]] -// int32x4_t test_vqdmlsl_s16(int32x4_t a, int16x4_t b, int16x4_t c) { -// return vqdmlsl_s16(a, b, c); -// } + // LLVM: {{.*}}test_vqdmlal_s16(<4 x i32>{{.*}}[[a:%.*]], <4 x i16>{{.*}}[[b:%.*]], <4 x i16>{{.*}}[[c:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[b]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <4 x i16> [[c]] to <8 x i8> + // LLVM: [[VQDMLAL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[b]], <4 x i16> [[c]]) + // LLVM: [[VQDMLAL_V3_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> [[a]], <4 x i32> [[VQDMLAL2_I]]) + // LLVM: ret <4 x i32> [[VQDMLAL_V3_I]] +} -// NYI-LABEL: @test_vqdmlsl_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <2 x i32> %c to <8 x i8> -// NYI: [[VQDMLAL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %c) -// NYI: [[VQDMLSL_V3_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> [[VQDMLAL2_I]]) -// NYI: ret <2 x i64> [[VQDMLSL_V3_I]] -// int64x2_t test_vqdmlsl_s32(int64x2_t a, int32x2_t b, int32x2_t c) { -// return vqdmlsl_s32(a, b, c); -// } +int64x2_t test_vqdmlal_s32(int64x2_t a, int32x2_t b, int32x2_t c) { + return vqdmlal_s32(a, b, c); + + // CIR-LABEL: vqdmlal_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmull" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqadd" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqdmlal_s32(<2 x i64>{{.*}}[[a:%.*]], <2 x i32>{{.*}}[[b:%.*]], <2 x i32>{{.*}}[[c:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[b]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <2 x i32> [[c]] to <8 x i8> + // LLVM: [[VQDMLAL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> [[b]], <2 x i32> [[c]]) + // LLVM: [[VQDMLAL_V3_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> [[a]], <2 x i64> [[VQDMLAL2_I]]) + // LLVM: ret <2 x i64> [[VQDMLAL_V3_I]] +} + + +int32x4_t test_vqdmlsl_s16(int32x4_t a, int16x4_t b, int16x4_t c) { + return vqdmlsl_s16(a, b, c); + + // CIR-LABEL: vqdmlsl_s16 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmull" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqdmlsl_s16(<4 x i32>{{.*}}[[a:%.*]], <4 x i16>{{.*}}[[b:%.*]], <4 x i16>{{.*}}[[c:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[b]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <4 x i16> [[c]] to <8 x i8> + // LLVM: [[VQDMLSL2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[b]], <4 x i16> [[c]]) + // LLVM: [[VQDMLSL_V3_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> [[a]], <4 x i32> [[VQDMLSL2_I]]) + // LLVM: ret <4 x i32> [[VQDMLSL_V3_I]] +} + +int64x2_t test_vqdmlsl_s32(int64x2_t a, int32x2_t b, int32x2_t c) { + return vqdmlsl_s32(a, b, c); + + // CIR-LABEL: vqdmlsl_s32 + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqdmull" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sqsub" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vqdmlsl_s32(<2 x i64>{{.*}}[[a:%.*]], <2 x i32>{{.*}}[[b:%.*]], <2 x i32>{{.*}}[[c:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[b]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <2 x i32> [[c]] to <8 x i8> + // LLVM: [[VQDMLSL2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> [[b]], <2 x i32> [[c]]) + // LLVM: [[VQDMLSL_V3_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> [[a]], <2 x i64> [[VQDMLSL2_I]]) + // LLVM: ret <2 x i64> [[VQDMLSL_V3_I]] +} // NYI-LABEL: @test_vqdmull_high_s16( // NYI: [[SHUFFLE_I_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> From 18daffafa20866e208b2a4dce336da3fa0ac5af2 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 13 Nov 2024 22:55:16 +0300 Subject: [PATCH 2074/2301] [CIR][ABI][AArch64] Support struct passing with coercion through memory (#1111) This PR adds a support for one more case of passing structs by value, with `memcpy` emitted. First of all, don't worry - despite the PR seems big, it's basically consist of helpers + refactoring. Also, there is a minor change in the `CIRBaseBuilder` - I made static the `getBestAllocaInsertPoint` method in order to call it from lowering - we discussed once - and I here we just need it (or copy-paste the code, which doesn't seem good). I will add several comments in order to simplify review. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 2 +- .../TargetLowering/LowerFunction.cpp | 119 +++++++++++------- .../AArch64/aarch64-cc-structs.c | 16 +++ 3 files changed, 93 insertions(+), 44 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 225fa444e340..a8589baa5ae0 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -560,7 +560,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { // Block handling helpers // ---------------------- // - OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block) { + static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block) { auto last = std::find_if(block->rbegin(), block->rend(), [](mlir::Operation &op) { return mlir::isa(&op); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index d2a7e83e7020..cf2fdda5b483 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -19,6 +19,7 @@ #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" #include "clang/CIR/ABIArgInfo.h" +#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" @@ -140,6 +141,76 @@ static mlir::Value coerceIntOrPtrToIntOrPtr(mlir::Value val, mlir::Type typ, return val; } +// FIXME(cir): Create a custom rewriter class to abstract this away. +mlir::Value createBitcast(mlir::Value Src, mlir::Type Ty, LowerFunction &LF) { + return LF.getRewriter().create(Src.getLoc(), Ty, CastKind::bitcast, + Src); +} + +AllocaOp createTmpAlloca(LowerFunction &LF, mlir::Location loc, mlir::Type ty) { + auto &rw = LF.getRewriter(); + auto *ctxt = rw.getContext(); + mlir::PatternRewriter::InsertionGuard guard(rw); + + // find function's entry block and use it to find a best place for alloca + auto *blk = rw.getBlock(); + auto *op = blk->getParentOp(); + FuncOp fun = mlir::dyn_cast(op); + if (!fun) + fun = op->getParentOfType(); + auto &entry = fun.getBody().front(); + + auto ip = CIRBaseBuilderTy::getBestAllocaInsertPoint(&entry); + rw.restoreInsertionPoint(ip); + + auto align = LF.LM.getDataLayout().getABITypeAlign(ty); + auto alignAttr = rw.getI64IntegerAttr(align.value()); + auto ptrTy = PointerType::get(ctxt, ty); + return rw.create(loc, ptrTy, ty, "tmp", alignAttr); +} + +bool isVoidPtr(mlir::Value v) { + if (auto p = mlir::dyn_cast(v.getType())) + return mlir::isa(p.getPointee()); + return false; +} + +MemCpyOp createMemCpy(LowerFunction &LF, mlir::Value dst, mlir::Value src, + uint64_t len) { + cir_cconv_assert(mlir::isa(src.getType())); + cir_cconv_assert(mlir::isa(dst.getType())); + + auto *ctxt = LF.getRewriter().getContext(); + auto &rw = LF.getRewriter(); + auto voidPtr = PointerType::get(ctxt, cir::VoidType::get(ctxt)); + + if (!isVoidPtr(src)) + src = createBitcast(src, voidPtr, LF); + if (!isVoidPtr(dst)) + dst = createBitcast(dst, voidPtr, LF); + + auto i64Ty = IntType::get(ctxt, 64, false); + auto length = rw.create(src.getLoc(), IntAttr::get(i64Ty, len)); + return rw.create(src.getLoc(), dst, src, length); +} + +cir::AllocaOp findAlloca(mlir::Operation *op) { + if (!op) + return {}; + + if (auto al = mlir::dyn_cast(op)) { + return al; + } else if (auto ret = mlir::dyn_cast(op)) { + auto vals = ret.getInput(); + if (vals.size() == 1) + return findAlloca(vals[0].getDefiningOp()); + } else if (auto load = mlir::dyn_cast(op)) { + return findAlloca(load.getAddr().getDefiningOp()); + } + + return {}; +} + /// Create a store to \param Dst from \param Src where the source and /// destination may have different types. /// @@ -187,16 +258,12 @@ void createCoercedStore(mlir::Value Src, mlir::Value Dst, bool DstIsVolatile, auto addr = bld.create(Dst.getLoc(), ptrTy, CastKind::bitcast, Dst); bld.create(Dst.getLoc(), Src, addr); } else { - cir_cconv_unreachable("NYI"); + auto tmp = createTmpAlloca(CGF, Src.getLoc(), SrcTy); + CGF.getRewriter().create(Src.getLoc(), Src, tmp); + createMemCpy(CGF, Dst, tmp, DstSize.getFixedValue()); } } -// FIXME(cir): Create a custom rewriter class to abstract this away. -mlir::Value createBitcast(mlir::Value Src, mlir::Type Ty, LowerFunction &LF) { - return LF.getRewriter().create(Src.getLoc(), Ty, CastKind::bitcast, - Src); -} - /// Coerces a \param Src value to a value of type \param Ty. /// /// This safely handles the case when the src type is smaller than the @@ -261,23 +328,6 @@ mlir::Value emitAddressAtOffset(LowerFunction &LF, mlir::Value addr, return addr; } -cir::AllocaOp findAlloca(mlir::Operation *op) { - if (!op) - return {}; - - if (auto al = mlir::dyn_cast(op)) { - return al; - } else if (auto ret = mlir::dyn_cast(op)) { - auto vals = ret.getInput(); - if (vals.size() == 1) - return findAlloca(vals[0].getDefiningOp()); - } else if (auto load = mlir::dyn_cast(op)) { - return findAlloca(load.getAddr().getDefiningOp()); - } - - return {}; -} - /// After the calling convention is lowered, an ABI-agnostic type might have to /// be loaded back to its ABI-aware couterpart so it may be returned. If they /// differ, we have to do a coerced load. A coerced load, which means to load a @@ -329,25 +379,8 @@ mlir::Value castReturnValue(mlir::Value Src, mlir::Type Ty, LowerFunction &LF) { // Otherwise do coercion through memory. if (auto addr = findAlloca(Src.getDefiningOp())) { auto &rewriter = LF.getRewriter(); - auto *ctxt = LF.LM.getMLIRContext(); - auto ptrTy = PointerType::get(ctxt, Ty); - auto voidPtr = PointerType::get(ctxt, cir::VoidType::get(ctxt)); - - // insert alloca near the previuos one - auto point = rewriter.saveInsertionPoint(); - rewriter.setInsertionPointAfter(addr); - auto align = LF.LM.getDataLayout().getABITypeAlign(Ty); - auto alignAttr = rewriter.getI64IntegerAttr(align.value()); - auto tmp = - rewriter.create(Src.getLoc(), ptrTy, Ty, "tmp", alignAttr); - rewriter.restoreInsertionPoint(point); - - auto srcVoidPtr = createBitcast(addr, voidPtr, LF); - auto dstVoidPtr = createBitcast(tmp, voidPtr, LF); - auto i64Ty = IntType::get(ctxt, 64, false); - auto len = rewriter.create( - Src.getLoc(), IntAttr::get(i64Ty, SrcSize.getFixedValue())); - rewriter.create(Src.getLoc(), dstVoidPtr, srcVoidPtr, len); + auto tmp = createTmpAlloca(LF, Src.getLoc(), Ty); + createMemCpy(LF, tmp, addr, SrcSize.getFixedValue()); return rewriter.create(Src.getLoc(), tmp.getResult()); } diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index eb1899840713..1acc75da262f 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -152,3 +152,19 @@ void pass_eq_128(EQ_128 s) {} // LLVM: store ptr %0, ptr %[[#V1]], align 8 // LLVM: %[[#V2:]] = load ptr, ptr %[[#V1]], align 8 void pass_gt_128(GT_128 s) {} + +// CHECK: cir.func @passS(%arg0: !cir.array +// CHECK: %[[#V0:]] = cir.alloca !ty_S, !cir.ptr, [""] {alignment = 4 : i64} +// CHECK: %[[#V1:]] = cir.alloca !cir.array, !cir.ptr>, ["tmp"] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %[[#V1]] : !cir.array, !cir.ptr> +// CHECK: %[[#V2:]] = cir.cast(bitcast, %[[#V1]] : !cir.ptr>), !cir.ptr +// CHECK: %[[#V3:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V4:]] = cir.const #cir.int<12> : !u64i +// CHECK: cir.libc.memcpy %[[#V4]] bytes from %[[#V2]] to %[[#V3]] : !u64i, !cir.ptr -> !cir.ptr + +// LLVM: void @passS([2 x i64] %[[#ARG:]]) +// LLVM: %[[#V1:]] = alloca %struct.S, i64 1, align 4 +// LLVM: %[[#V2:]] = alloca [2 x i64], i64 1, align 8 +// LLVM: store [2 x i64] %[[#ARG]], ptr %[[#V2]], align 8 +// LLVM: call void @llvm.memcpy.p0.p0.i64(ptr %[[#V1]], ptr %[[#V2]], i64 12, i1 false) +void passS(S s) {} \ No newline at end of file From e6e7625f5990e42d8b3527a019f06208d0da4c72 Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 13 Nov 2024 14:55:49 -0500 Subject: [PATCH 2075/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vaeseq_u8 (#1112) --- clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 4 ++++ clang/test/CIR/CodeGen/AArch64/neon-crypto.c | 13 ++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 68415df912cd..922ef1b29dc5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2485,6 +2485,10 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( argTypes.push_back(vTy); break; } + case NEON::BI__builtin_neon_vaeseq_u8: { + intrincsName = "aarch64.crypto.aese"; + break; + } case NEON::BI__builtin_neon_vpadd_v: case NEON::BI__builtin_neon_vpaddq_v: { intrincsName = mlir::isa(vTy.getEltType()) diff --git a/clang/test/CIR/CodeGen/AArch64/neon-crypto.c b/clang/test/CIR/CodeGen/AArch64/neon-crypto.c index 7b2f7be0efa4..8f83d0cc4739 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-crypto.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-crypto.c @@ -4,7 +4,7 @@ // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ -// RUN: -target-feature +sha2 -target-feature +aes \ +// RUN: -fclangir -target-feature +sha2 -target-feature +aes \ // RUN: -disable-O0-optnone -emit-llvm -o - %s \ // RUN: | opt -S -passes=mem2reg,simplifycfg -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s @@ -23,3 +23,14 @@ uint8x16_t test_vaesmcq_u8(uint8x16_t data) { // LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> [[DATA]]) // LLVM: ret <16 x i8> [[RES]] } + +uint8x16_t test_vaeseq_u8(uint8x16_t data, uint8x16_t key) { + return vaeseq_u8(data, key); + + // CIR-LABEL: vaeseq_u8 + // {{%.*}} = cir.llvm.intrinsic "aarch64.crypto.aese" {{%.*}} : (!cir.vector) -> !cir.vector + + // LLVM: {{.*}}vaeseq_u8(<16 x i8>{{.*}}[[DATA:%.*]], <16 x i8>{{.*}}[[KEY:%.*]]) + // LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> [[DATA]], <16 x i8> [[KEY]]) + // LLVM: ret <16 x i8> [[RES]] +} From 3db6127a12ec43627c33ffb90458ba590dcc2aa4 Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 13 Nov 2024 15:06:28 -0500 Subject: [PATCH 2076/2301] [CIR][CIRGen][Builtin] Support __builtin_wmemchr (#1115) --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 9 +++++++-- clang/test/CIR/CodeGen/builtins.cpp | 19 +++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 04f2c1861401..538e166c5d2c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1541,8 +1541,13 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, builder.createMemSet(loc, dest.getPointer(), byteVal, sizeOp); return RValue::get(dest.getPointer()); } - case Builtin::BI__builtin_wmemchr: - llvm_unreachable("BI__builtin_wmemchr NYI"); + case Builtin::BI__builtin_wmemchr: { + // The MSVC runtime library does not provide a definition of wmemchr, so we + // need an inline implementation. + if (getTarget().getTriple().isOSMSVCRT()) + llvm_unreachable("BI__builtin_wmemchr NYI for OS with MSVC runtime"); + break; + } case Builtin::BI__builtin_wmemcmp: llvm_unreachable("BI__builtin_wmemcmp NYI"); case Builtin::BI__builtin_dwarf_cfa: diff --git a/clang/test/CIR/CodeGen/builtins.cpp b/clang/test/CIR/CodeGen/builtins.cpp index 971fc09e07d9..f103abb8db64 100644 --- a/clang/test/CIR/CodeGen/builtins.cpp +++ b/clang/test/CIR/CodeGen/builtins.cpp @@ -76,6 +76,25 @@ extern "C" char* test_memchr(const char arg[32]) { // LLVM: ret ptr [[RET]] } +extern "C" wchar_t* test_wmemchr(const wchar_t *wc) { + return __builtin_wmemchr(wc, 257u, 32); + + // CIR-LABEL: test_wmemchr + // CIR: [[PATTERN:%.*]] = cir.const #cir.int<257> : !u32i + // CIR: [[LEN:%.*]] = cir.const #cir.int<32> : !s32i + // CIR: [[LEN_U64:%.*]] = cir.cast(integral, [[LEN]] : !s32i), !u64i + // CIR: cir.call @wmemchr({{%.*}}, [[PATTERN]], [[LEN_U64]]) : (!cir.ptr, !u32i, !u64i) -> !cir.ptr + + // LLVM: {{.*}}@test_wmemchr(ptr{{.*}}[[ARG:%.*]]) + // LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8 + // LLVM: store ptr [[ARG]], ptr [[TMP0]], align 8 + // LLVM: [[SRC:%.*]] = load ptr, ptr [[TMP0]], align 8 + // LLVM: [[RES:%.*]] = call ptr @wmemchr(ptr [[SRC]], i32 257, i64 32) + // LLVM: store ptr [[RES]], ptr [[RET_P:%.*]], align 8 + // LLVM: [[RET:%.*]] = load ptr, ptr [[RET_P]], align 8 + // LLVM: ret ptr [[RET]] +} + extern "C" void *test_return_address(void) { return __builtin_return_address(1); From 34474844f866f8fbf37be3f7c24513f2cb95c5e8 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Thu, 14 Nov 2024 04:22:05 +0800 Subject: [PATCH 2077/2301] [CIR][CIRGen] Support __builtin_signbitl (#1117) follow https://github.com/llvm/clangir/pull/1033 handle `LongDoubleType` with `FP80Type`. --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 7 +++++-- clang/test/CIR/CodeGen/builtin-signbit.c | 10 ++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 745827e1bdb1..724a40c6e114 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4365,8 +4365,11 @@ class CIRSignBitOpLowering : public mlir::OpConversionPattern { if (auto longDoubleType = mlir::dyn_cast(op.getInput().getType())) { if (mlir::isa(longDoubleType.getUnderlying())) { - // see https://github.com/llvm/clangir/issues/1057 - llvm_unreachable("NYI"); + // If the underlying type of LongDouble is FP80Type, + // DataLayout::getTypeSizeInBits returns 128. + // See https://github.com/llvm/clangir/issues/1057. + // Set the width to 80 manually. + width = 80; } } auto intTy = mlir::IntegerType::get(rewriter.getContext(), width); diff --git a/clang/test/CIR/CodeGen/builtin-signbit.c b/clang/test/CIR/CodeGen/builtin-signbit.c index 78b25ae4bf9e..622d877242cd 100644 --- a/clang/test/CIR/CodeGen/builtin-signbit.c +++ b/clang/test/CIR/CodeGen/builtin-signbit.c @@ -23,3 +23,13 @@ void test_signbit_double(double val) { // LLVM: %{{.+}} = zext i1 [[TMP2]] to i32 __builtin_signbitf(val); } + +void test_signbit_long_double(long double val) { + // CIR: test_signbit_long_double + // LLVM: test_signbit_long_double + __builtin_signbitl(val); + // CIR: %{{.+}} = cir.signbit %{{.+}} : !cir.long_double -> !s32i + // LLVM: [[TMP1:%.*]] = bitcast x86_fp80 %{{.+}} to i80 + // LLVM: [[TMP2:%.*]] = icmp slt i80 [[TMP1]], 0 + // LLVM: %{{.+}} = zext i1 [[TMP2]] to i32 +} From d435cca744b32e68ed1089a32d2e468cc5bcc139 Mon Sep 17 00:00:00 2001 From: 7mile Date: Thu, 14 Nov 2024 04:34:14 +0800 Subject: [PATCH 2078/2301] [CIR][Dialect][NFC] Refactor hardcoded attribute name strings (#1122) As title, this patch refactors raw string literals for (module) attribute names into static methods of `CIRDialect`, following the convention of MLIR. --- .../include/clang/CIR/Dialect/IR/CIRDialect.td | 9 +++++++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 8 +++++--- clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp | 3 ++- .../CIR/Dialect/Transforms/LoweringPrepare.cpp | 6 +++--- .../Transforms/TargetLowering/LowerCall.cpp | 8 ++++---- .../Transforms/TargetLowering/LowerModule.cpp | 5 +++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 17 +++++++++-------- 7 files changed, 35 insertions(+), 21 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td index 5ec1865bc3e2..c27bc1f28443 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -34,6 +34,15 @@ def CIR_Dialect : Dialect { // Names of CIR parameter attributes. static llvm::StringRef getSExtAttrName() { return "cir.signext"; } static llvm::StringRef getZExtAttrName() { return "cir.zeroext"; } + static llvm::StringRef getSOBAttrName() { return "cir.sob"; } + static llvm::StringRef getLangAttrName() { return "cir.lang"; } + static llvm::StringRef getTripleAttrName() { return "cir.triple"; } + + static llvm::StringRef getGlobalCtorsAttrName() { return "cir.global_ctors"; } + static llvm::StringRef getGlobalDtorsAttrName() { return "cir.global_dtors"; } + static llvm::StringRef getGlobalAnnotationsAttrName() { return "cir.global_annotations"; } + + static llvm::StringRef getOpenCLVersionAttrName() { return "cir.cl.version"; } void registerAttributes(); void registerTypes(); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 939d4dc40433..97678fa2ad8a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -185,11 +185,13 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, // FIXME(cir): Implement a custom CIR Module Op and attributes to leverage // MLIR features. - theModule->setAttr("cir.sob", + theModule->setAttr(cir::CIRDialect::getSOBAttrName(), cir::SignedOverflowBehaviorAttr::get(&context, sob)); auto lang = SourceLanguageAttr::get(&context, getCIRSourceLanguage()); - theModule->setAttr("cir.lang", cir::LangAttr::get(&context, lang)); - theModule->setAttr("cir.triple", builder.getStringAttr(getTriple().str())); + theModule->setAttr(cir::CIRDialect::getLangAttrName(), + cir::LangAttr::get(&context, lang)); + theModule->setAttr(cir::CIRDialect::getTripleAttrName(), + builder.getStringAttr(getTriple().str())); // Set the module name to be the name of the main file. TranslationUnitDecl // often contains invalid source locations and isn't a reliable source for the // module location. diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp index d11126940935..3db01cab6659 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp @@ -260,5 +260,6 @@ void CIRGenModule::emitOpenCLMetadata() { auto clVersionAttr = cir::OpenCLVersionAttr::get(&getMLIRContext(), major, minor); - theModule->setAttr("cir.cl.version", clVersionAttr); + theModule->setAttr(cir::CIRDialect::getOpenCLVersionAttrName(), + clVersionAttr); } diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 6af33fd551f2..b0709e9638ff 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -888,11 +888,11 @@ void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { void LoweringPreparePass::buildGlobalCtorDtorList() { if (!globalCtorList.empty()) { - theModule->setAttr("cir.global_ctors", + theModule->setAttr(cir::CIRDialect::getGlobalCtorsAttrName(), mlir::ArrayAttr::get(&getContext(), globalCtorList)); } if (!globalDtorList.empty()) { - theModule->setAttr("cir.global_dtors", + theModule->setAttr(cir::CIRDialect::getGlobalDtorsAttrName(), mlir::ArrayAttr::get(&getContext(), globalDtorList)); } } @@ -1136,7 +1136,7 @@ void LoweringPreparePass::buildGlobalAnnotationValues() { return; mlir::ArrayAttr annotationValueArray = mlir::ArrayAttr::get(theModule.getContext(), globalAnnotations); - theModule->setAttr("cir.global_annotations", + theModule->setAttr(cir::CIRDialect::getGlobalAnnotationsAttrName(), cir::GlobalAnnotationValuesAttr::get( theModule.getContext(), annotationValueArray)); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp index 1978f0497c85..8b335fa9c5e8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerCall.cpp @@ -211,12 +211,12 @@ void LowerModule::constructAttributeList(llvm::StringRef Name, switch (AI.getKind()) { case ABIArgInfo::Extend: if (AI.isSignExt()) - Attrs.push_back( - rewriter.getNamedAttr("cir.signext", rewriter.getUnitAttr())); + Attrs.push_back(rewriter.getNamedAttr( + cir::CIRDialect::getSExtAttrName(), rewriter.getUnitAttr())); else // FIXME(cir): Add a proper abstraction to create attributes. - Attrs.push_back( - rewriter.getNamedAttr("cir.zeroext", rewriter.getUnitAttr())); + Attrs.push_back(rewriter.getNamedAttr( + cir::CIRDialect::getZExtAttrName(), rewriter.getUnitAttr())); [[fallthrough]]; case ABIArgInfo::Direct: if (ArgNo == 0 && cir::MissingFeatures::chainCall()) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 8e4ac59a9363..f6c75b39e516 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -236,8 +236,9 @@ createLowerModule(mlir::ModuleOp module, mlir::PatternRewriter &rewriter) { module->getAttr(mlir::LLVM::LLVMDialect::getDataLayoutAttrName())); // Fetch target information. - llvm::Triple triple( - mlir::cast(module->getAttr("cir.triple")).getValue()); + llvm::Triple triple(mlir::cast( + module->getAttr(cir::CIRDialect::getTripleAttrName())) + .getValue()); clang::TargetOptions targetOptions; targetOptions.Triple = triple.str(); auto targetInfo = clang::targets::AllocateTarget(triple, targetOptions); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 724a40c6e114..03d5b20d38d3 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4448,7 +4448,7 @@ std::unique_ptr prepareLowerModule(mlir::ModuleOp module) { // If the triple is not present, e.g. CIR modules parsed from text, we // cannot init LowerModule properly. assert(!cir::MissingFeatures::makeTripleAlwaysPresent()); - if (!module->hasAttr("cir.triple")) + if (!module->hasAttr(cir::CIRDialect::getTripleAttrName())) return {}; return cir::createLowerModule(module, rewriter); } @@ -4706,7 +4706,8 @@ void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar( llvm::StringMap &argStringGlobalsMap, llvm::MapVector &argsVarMap) { mlir::ModuleOp module = getOperation(); - mlir::Attribute attr = module->getAttr("cir.global_annotations"); + mlir::Attribute attr = + module->getAttr(cir::CIRDialect::getGlobalAnnotationsAttrName()); if (!attr) return; if (auto globalAnnotValues = @@ -4834,8 +4835,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { // Allow operations that will be lowered directly to LLVM IR. target.addLegalOp(); - getOperation()->removeAttr("cir.sob"); - getOperation()->removeAttr("cir.lang"); + getOperation()->removeAttr(cir::CIRDialect::getSOBAttrName()); + getOperation()->removeAttr(cir::CIRDialect::getLangAttrName()); llvm::SmallVector ops; ops.push_back(module); @@ -4845,8 +4846,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { signalPassFailure(); // Emit the llvm.global_ctors array. - buildCtorDtorList(module, "cir.global_ctors", "llvm.global_ctors", - [](mlir::Attribute attr) { + buildCtorDtorList(module, cir::CIRDialect::getGlobalCtorsAttrName(), + "llvm.global_ctors", [](mlir::Attribute attr) { assert(mlir::isa(attr) && "must be a GlobalCtorAttr"); auto ctorAttr = mlir::cast(attr); @@ -4854,8 +4855,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { ctorAttr.getPriority()); }); // Emit the llvm.global_dtors array. - buildCtorDtorList(module, "cir.global_dtors", "llvm.global_dtors", - [](mlir::Attribute attr) { + buildCtorDtorList(module, cir::CIRDialect::getGlobalDtorsAttrName(), + "llvm.global_dtors", [](mlir::Attribute attr) { assert(mlir::isa(attr) && "must be a GlobalDtorAttr"); auto dtorAttr = mlir::cast(attr); From 1e37f52e89a709623666b4fd833ddeb0b80574a2 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 14 Nov 2024 22:01:52 +0300 Subject: [PATCH 2079/2301] [CIR][ABI][Lowering] Fixes calls with union type (#1119) This PR handles calls with unions passed by value in the calling convention pass. #### Implementation As one may know, data layout for unions in CIR and in LLVM differ one from another. In CIR we track all the union members, while in LLVM IR only the largest one. And here we need to take this difference into account: we need to find a type of the largest member and treat it as the first (and only) union member in order to preserve all the logic from the original codegen. There is a method `StructType::getLargestMember` - but looks like it produces different results (with the one I implemented or better to say copy-pasted). Maybe it's done intentionally, I don't know. The LLVM IR produced has also some difference from the original one. In the original IR `gep` is emitted - and we can not do the same. If we create `getMemberOp` we may fail on type checking for unions - since the first member type may differ from the largest type. This is why we create `bitcast` instead. Relates to the issue #1061 --- clang/include/clang/CIR/MissingFeatures.h | 1 - clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 13 +++--- .../TargetLowering/LowerFunction.cpp | 41 ++++++++++++++----- .../test/CIR/CallConvLowering/AArch64/union.c | 32 ++++++++++++++- clang/test/CIR/Lowering/unions.cir | 2 +- 5 files changed, 68 insertions(+), 21 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index b25c5e07f9f0..fbcc3cadb855 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -261,7 +261,6 @@ struct MissingFeatures { static bool X86TypeClassification() { return false; } static bool ABIClangTypeKind() { return false; } - static bool ABIEnterStructForCoercedAccess() { return false; } static bool ABIFuncPtr() { return false; } static bool ABIInRegAttribute() { return false; } static bool ABINestedRecordLayout() { return false; } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index e579fe4c2f0c..2e262478a733 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -519,13 +519,12 @@ void StructType::computeSizeAndAlignment( // Found a nested union: recurse into it to fetch its largest member. auto structMember = mlir::dyn_cast(ty); - if (structMember && structMember.isUnion()) { - auto candidate = structMember.getLargestMember(dataLayout); - if (dataLayout.getTypeSize(candidate) > largestMemberSize) { - largestMember = candidate; - largestMemberSize = dataLayout.getTypeSize(largestMember); - } - } else if (dataLayout.getTypeSize(ty) > largestMemberSize) { + if (!largestMember || + dataLayout.getTypeABIAlignment(ty) > + dataLayout.getTypeABIAlignment(largestMember) || + (dataLayout.getTypeABIAlignment(ty) == + dataLayout.getTypeABIAlignment(largestMember) && + dataLayout.getTypeSize(ty) > largestMemberSize)) { largestMember = ty; largestMemberSize = dataLayout.getTypeSize(largestMember); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index cf2fdda5b483..704242a73b8c 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -53,6 +53,12 @@ mlir::Value createCoercedBitcast(mlir::Value Src, mlir::Type DestTy, CastKind::bitcast, Src); } +// FIXME(cir): Create a custom rewriter class to abstract this away. +mlir::Value createBitcast(mlir::Value Src, mlir::Type Ty, LowerFunction &LF) { + return LF.getRewriter().create(Src.getLoc(), Ty, CastKind::bitcast, + Src); +} + /// Given a struct pointer that we are accessing some number of bytes out of it, /// try to gep into the struct to get at its inner goodness. Dive as deep as /// possible without entering an element with an in-memory size smaller than @@ -67,6 +73,9 @@ mlir::Value enterStructPointerForCoercedAccess(mlir::Value SrcPtr, mlir::Type FirstElt = SrcSTy.getMembers()[0]; + if (SrcSTy.isUnion()) + FirstElt = SrcSTy.getLargestMember(CGF.LM.getDataLayout().layout); + // If the first elt is at least as large as what we're looking for, or if the // first element is the same size as the whole struct, we can enter it. The // comparison must be made on the store size and not the alloca size. Using @@ -76,10 +85,26 @@ mlir::Value enterStructPointerForCoercedAccess(mlir::Value SrcPtr, FirstEltSize < CGF.LM.getDataLayout().getTypeStoreSize(SrcSTy)) return SrcPtr; - cir_cconv_assert_or_abort( - !cir::MissingFeatures::ABIEnterStructForCoercedAccess(), "NYI"); - return SrcPtr; // FIXME: This is a temporary workaround for the assertion - // above. + auto &rw = CGF.getRewriter(); + auto *ctxt = rw.getContext(); + auto ptrTy = PointerType::get(ctxt, FirstElt); + if (mlir::isa(SrcPtr.getType())) { + auto addr = SrcPtr; + if (auto load = mlir::dyn_cast(SrcPtr.getDefiningOp())) + addr = load.getAddr(); + cir_cconv_assert(mlir::isa(addr.getType())); + // we can not use getMemberOp here since we need a pointer to the first + // element. And in the case of unions we pick a type of the largest elt, + // that may or may not be the first one. Thus, getMemberOp verification + // may fail. + auto cast = createBitcast(addr, ptrTy, CGF); + SrcPtr = rw.create(SrcPtr.getLoc(), cast); + } + + if (auto sty = mlir::dyn_cast(SrcPtr.getType())) + return enterStructPointerForCoercedAccess(SrcPtr, sty, DstSize, CGF); + + return SrcPtr; } /// Convert a value Val to the specific Ty where both @@ -141,12 +166,6 @@ static mlir::Value coerceIntOrPtrToIntOrPtr(mlir::Value val, mlir::Type typ, return val; } -// FIXME(cir): Create a custom rewriter class to abstract this away. -mlir::Value createBitcast(mlir::Value Src, mlir::Type Ty, LowerFunction &LF) { - return LF.getRewriter().create(Src.getLoc(), Ty, CastKind::bitcast, - Src); -} - AllocaOp createTmpAlloca(LowerFunction &LF, mlir::Location loc, mlir::Type ty) { auto &rw = LF.getRewriter(); auto *ctxt = rw.getContext(); @@ -302,7 +321,7 @@ mlir::Value createCoercedValue(mlir::Value Src, mlir::Type Ty, // extension or truncation to the desired type. if ((mlir::isa(Ty) || mlir::isa(Ty)) && (mlir::isa(SrcTy) || mlir::isa(SrcTy))) { - cir_cconv_unreachable("NYI"); + return coerceIntOrPtrToIntOrPtr(Src, Ty, CGF); } // If load is legal, just bitcast the src pointer. diff --git a/clang/test/CIR/CallConvLowering/AArch64/union.c b/clang/test/CIR/CallConvLowering/AArch64/union.c index 4f622f0215c3..ed02e9aded7a 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/union.c +++ b/clang/test/CIR/CallConvLowering/AArch64/union.c @@ -38,4 +38,34 @@ void foo(U u) {} U init() { U u; return u; -} \ No newline at end of file +} + +typedef union { + + struct { + short a; + char b; + char c; + }; + + int x; +} A; + +void passA(A x) {} + +// CIR: cir.func {{.*@callA}}() +// CIR: %[[#V0:]] = cir.alloca !ty_A, !cir.ptr, ["x"] {alignment = 4 : i64} +// CIR: %[[#V1:]] = cir.cast(bitcast, %[[#V0:]] : !cir.ptr), !cir.ptr +// CIR: %[[#V2:]] = cir.load %[[#V1]] : !cir.ptr, !s32i +// CIR: %[[#V3:]] = cir.cast(integral, %[[#V2]] : !s32i), !u64i +// CIR: cir.call @passA(%[[#V3]]) : (!u64i) -> () + +// LLVM: void @callA() +// LLVM: %[[#V0:]] = alloca %union.A, i64 1, align 4 +// LLVM: %[[#V1:]] = load i32, ptr %[[#V0]], align 4 +// LLVM: %[[#V2:]] = sext i32 %[[#V1]] to i64 +// LLVM: call void @passA(i64 %[[#V2]]) +void callA() { + A x; + passA(x); +} diff --git a/clang/test/CIR/Lowering/unions.cir b/clang/test/CIR/Lowering/unions.cir index 0cc9d1d15749..fe56e2af7527 100644 --- a/clang/test/CIR/Lowering/unions.cir +++ b/clang/test/CIR/Lowering/unions.cir @@ -16,7 +16,7 @@ module { cir.global external @u2 = #cir.zero : !ty_U2_ cir.global external @u3 = #cir.zero : !ty_U3_ // CHECK: llvm.mlir.global external @u2() {addr_space = 0 : i32} : !llvm.struct<"union.U2", (f64)> - // CHECK: llvm.mlir.global external @u3() {addr_space = 0 : i32} : !llvm.struct<"union.U3", (i32)> + // CHECK: llvm.mlir.global external @u3() {addr_space = 0 : i32} : !llvm.struct<"union.U3", (struct<"union.U1", (i32)>)> // CHECK: llvm.func @test cir.func @test(%arg0: !cir.ptr) { From f3823af8a271232fbbe88ace2d3d659dfb2e5200 Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 14 Nov 2024 14:03:00 -0500 Subject: [PATCH 2080/2301] [CIR][Builtin][NFC] More informative llvm_unreachable message in CIRGenBuiltinAArch64.cpp (#1124) We are still seeing crash message like `NYI UNREACHABLE executed at clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp:3304`, which is not convenient for triaging as our code base changes so fast, line number doesn't help much. So, here we replaced most of `llvm_unreachable("NYI")` with more informative message. --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 488 +++++++++--------- 1 file changed, 244 insertions(+), 244 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 922ef1b29dc5..7e75e3f1ba5c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1919,14 +1919,14 @@ static cir::VectorType GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags, V1Ty ? 1 : (4 << IsQuad)); case NeonTypeFlags::BFloat16: if (AllowBFloatArgsAndRet) - llvm_unreachable("NYI"); + llvm_unreachable("NeonTypeFlags::BFloat16 NYI"); else - llvm_unreachable("NYI"); + llvm_unreachable("NeonTypeFlags::BFloat16 NYI"); case NeonTypeFlags::Float16: if (HasLegalHalfType) - llvm_unreachable("NYI"); + llvm_unreachable("NeonTypeFlags::Float16 NYI"); else - llvm_unreachable("NYI"); + llvm_unreachable("NeonTypeFlags::Float16 NYI"); case NeonTypeFlags::Int32: return cir::VectorType::get(CGF->getBuilder().getContext(), TypeFlags.isUnsigned() ? CGF->UInt32Ty @@ -1942,7 +1942,7 @@ static cir::VectorType GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags, // FIXME: i128 and f128 doesn't get fully support in Clang and llvm. // There is a lot of i128 and f128 API missing. // so we use v16i8 to represent poly128 and get pattern matched. - llvm_unreachable("NYI"); + llvm_unreachable("NeonTypeFlags::Poly128 NYI"); case NeonTypeFlags::Float32: return cir::VectorType::get(CGF->getBuilder().getContext(), CGF->getCIRGenModule().FloatTy, @@ -2013,25 +2013,25 @@ static mlir::Value emitAArch64TblBuiltinExpr(CIRGenFunction &CGF, // argument that specifies the vector type, need to handle each case. switch (BuiltinID) { case NEON::BI__builtin_neon_vtbl1_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vtbl1_v NYI"); } case NEON::BI__builtin_neon_vtbl2_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vtbl2_v NYI"); } case NEON::BI__builtin_neon_vtbl3_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vtbl3_v NYI"); } case NEON::BI__builtin_neon_vtbl4_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vtbl4_v NYI"); } case NEON::BI__builtin_neon_vtbx1_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vtbx1_v NYI"); } case NEON::BI__builtin_neon_vtbx2_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vtbx2_v NYI"); } case NEON::BI__builtin_neon_vtbx3_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vtbx3_v NYI"); } case NEON::BI__builtin_neon_vqtbl1_v: case NEON::BI__builtin_neon_vqtbl1q_v: @@ -2576,7 +2576,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, return emitAArch64SMEBuiltinExpr(BuiltinID, E); if (BuiltinID == Builtin::BI__builtin_cpu_supports) - llvm_unreachable("NYI"); + llvm_unreachable("Builtin::BI__builtin_cpu_supports NYI"); unsigned HintID = static_cast(-1); switch (BuiltinID) { @@ -2612,94 +2612,94 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } if (BuiltinID == clang::AArch64::BI__builtin_arm_trap) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_trap NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_get_sme_state) { // Create call to __arm_sme_state and store the results to the two pointers. - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_get_sme_state NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit) { assert((getContext().getTypeSize(E->getType()) == 32) && "rbit of unusual size!"); - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_rbit NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit64) { assert((getContext().getTypeSize(E->getType()) == 64) && "rbit of unusual size!"); - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_rbit64 NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_clz || BuiltinID == clang::AArch64::BI__builtin_arm_clz64) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_clz64 NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_cls) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_cls NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_cls64) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_cls64 NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32zf || BuiltinID == clang::AArch64::BI__builtin_arm_rint32z) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_rint32z NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64zf || BuiltinID == clang::AArch64::BI__builtin_arm_rint64z) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_rint64z NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32xf || BuiltinID == clang::AArch64::BI__builtin_arm_rint32x) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_rint32x NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64xf || BuiltinID == clang::AArch64::BI__builtin_arm_rint64x) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_rint64x NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_jcvt) { assert((getContext().getTypeSize(E->getType()) == 32) && "__jcvt of unusual size!"); - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_jcvt NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b || BuiltinID == clang::AArch64::BI__builtin_arm_st64b || BuiltinID == clang::AArch64::BI__builtin_arm_st64bv || BuiltinID == clang::AArch64::BI__builtin_arm_st64bv0) { - llvm_unreachable("NYI"); + llvm_unreachable(" clang::AArch64::BI__builtin_arm_st64bv0 like NYI"); if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b) { // Load from the address via an LLVM intrinsic, receiving a // tuple of 8 i64 words, and store each one to ValPtr. - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_ld64b NYI"); } else { // Load 8 i64 words from ValPtr, and store them to the address // via an LLVM intrinsic. - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_st64b NYI"); } } if (BuiltinID == clang::AArch64::BI__builtin_arm_rndr || BuiltinID == clang::AArch64::BI__builtin_arm_rndrrs) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_rndrrs NYI"); } if (BuiltinID == clang::AArch64::BI__clear_cache) { assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"); - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__clear_cache NYI"); } if ((BuiltinID == clang::AArch64::BI__builtin_arm_ldrex || BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) && getContext().getTypeSize(E->getType()) == 128) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_ldaex NYI"); } else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex || BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) { return emitArmLdrexNon128Intrinsic(BuiltinID, E, *this); @@ -2708,28 +2708,28 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex || BuiltinID == clang::AArch64::BI__builtin_arm_stlex) && getContext().getTypeSize(E->getArg(0)->getType()) == 128) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_stlex NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_strex || BuiltinID == clang::AArch64::BI__builtin_arm_stlex) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_stlex NYI"); } if (BuiltinID == clang::AArch64::BI__getReg) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__getReg NYI"); } if (BuiltinID == clang::AArch64::BI__break) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__break NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_clrex) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_clrex NYI"); } if (BuiltinID == clang::AArch64::BI_ReadWriteBarrier) - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI_ReadWriteBarrier"); // CRC32 // FIXME(cir): get rid of LLVM when this gets implemented. @@ -2743,16 +2743,16 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case clang::AArch64::BI__builtin_arm_crc32cw: case clang::AArch64::BI__builtin_arm_crc32d: case clang::AArch64::BI__builtin_arm_crc32cd: - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_crc32cd NYI"); } if (CRCIntrinsicID != llvm::Intrinsic::not_intrinsic) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_crc32cd NYI"); } // Memory Operations (MOPS) if (BuiltinID == AArch64::BI__builtin_arm_mops_memset_tag) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_crc32cd NYI"); } // Memory Tagging Extensions (MTE) Intrinsics @@ -2765,11 +2765,11 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case clang::AArch64::BI__builtin_arm_ldg: case clang::AArch64::BI__builtin_arm_stg: case clang::AArch64::BI__builtin_arm_subp: - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_subp NYI"); } if (MTEIntrinsicID != llvm::Intrinsic::not_intrinsic) { - llvm_unreachable("NYI"); + llvm_unreachable("llvm::Intrinsic::not_intrinsic NYI"); } if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr || @@ -2781,12 +2781,12 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, BuiltinID == clang::AArch64::BI__builtin_arm_wsr128 || BuiltinID == clang::AArch64::BI__builtin_arm_wsrp) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_wsrp NYI"); if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr || BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 || BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 || BuiltinID == clang::AArch64::BI__builtin_arm_rsrp) - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_rsrp NYI"); bool IsPointerBuiltin = BuiltinID == clang::AArch64::BI__builtin_arm_rsrp || BuiltinID == clang::AArch64::BI__builtin_arm_wsrp; @@ -2798,11 +2798,11 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, BuiltinID == clang::AArch64::BI__builtin_arm_wsr128; if (Is32Bit) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_wsr NYI"); } else if (Is128Bit) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_wsr128 NYI"); } else if (IsPointerBuiltin) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_arm_wsrp NYI"); } else { llvm_unreachable("NYI"); }; @@ -2811,21 +2811,21 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } if (BuiltinID == clang::AArch64::BI__builtin_sponentry) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__builtin_sponentry NYI"); } if (BuiltinID == clang::AArch64::BI_ReadStatusReg || BuiltinID == clang::AArch64::BI_WriteStatusReg) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI_WriteStatusReg NYI"); } if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI_AddressOfReturnAddress NYI"); } if (BuiltinID == clang::AArch64::BI__mulh || BuiltinID == clang::AArch64::BI__umulh) { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI__umulh NYI"); } if (BuiltinID == AArch64::BI__writex18byte || @@ -2833,48 +2833,48 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, BuiltinID == AArch64::BI__writex18dword || BuiltinID == AArch64::BI__writex18qword) { // Read x18 as i8* - llvm_unreachable("NYI"); + llvm_unreachable("AArch64::BI__writex18qword NYI"); } if (BuiltinID == AArch64::BI__readx18byte || BuiltinID == AArch64::BI__readx18word || BuiltinID == AArch64::BI__readx18dword || BuiltinID == AArch64::BI__readx18qword) { - llvm_unreachable("NYI"); + llvm_unreachable("AArch64::BI__readx18qword NYI"); } if (BuiltinID == AArch64::BI_CopyDoubleFromInt64 || BuiltinID == AArch64::BI_CopyFloatFromInt32 || BuiltinID == AArch64::BI_CopyInt32FromFloat || BuiltinID == AArch64::BI_CopyInt64FromDouble) { - llvm_unreachable("NYI"); + llvm_unreachable("AArch64::BI_CopyInt64FromDouble NYI"); } if (BuiltinID == AArch64::BI_CountLeadingOnes || BuiltinID == AArch64::BI_CountLeadingOnes64 || BuiltinID == AArch64::BI_CountLeadingZeros || BuiltinID == AArch64::BI_CountLeadingZeros64) { - llvm_unreachable("NYI"); + llvm_unreachable("AArch64::BI_CountLeadingZeros64 NYI"); if (BuiltinID == AArch64::BI_CountLeadingOnes || BuiltinID == AArch64::BI_CountLeadingOnes64) - llvm_unreachable("NYI"); + llvm_unreachable("AArch64::BI_CountLeadingOnes64 NYI"); - llvm_unreachable("NYI"); + llvm_unreachable("BI_CountLeadingZeros64 NYI"); } if (BuiltinID == AArch64::BI_CountLeadingSigns || BuiltinID == AArch64::BI_CountLeadingSigns64) { - llvm_unreachable("NYI"); + llvm_unreachable("BI_CountLeadingSigns64 NYI"); } if (BuiltinID == AArch64::BI_CountOneBits || BuiltinID == AArch64::BI_CountOneBits64) { - llvm_unreachable("NYI"); + llvm_unreachable("AArch64::BI_CountOneBits64 NYI"); } if (BuiltinID == AArch64::BI__prefetch) { - llvm_unreachable("NYI"); + llvm_unreachable("AArch64::BI__prefetch NYI"); } if (BuiltinID == NEON::BI__builtin_neon_vcvth_bf16_f32) @@ -2884,7 +2884,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, // evaluation. if (std::optional MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID)) - llvm_unreachable("NYI"); + llvm_unreachable("translateAarch64ToMsvcIntrin NYI"); // Some intrinsics are equivalent - if they are use the base intrinsic ID. auto It = llvm::find_if(NEONEquivalentIntrinsicMap, [BuiltinID](auto &P) { @@ -2934,7 +2934,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted); if (Builtin) { - llvm_unreachable("NYI"); + llvm_unreachable("Builtin from findARMVectorIntrinsicInMap NYI"); } const Expr *Arg = E->getArg(E->getNumArgs() - 1); @@ -2951,15 +2951,15 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, default: break; case NEON::BI__builtin_neon_vabsh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vabsh_f16 NYI"); case NEON::BI__builtin_neon_vaddq_p128: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddq_p128 NYI"); } case NEON::BI__builtin_neon_vldrq_p128: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vldrq_p128 NYI"); } case NEON::BI__builtin_neon_vstrq_p128: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vstrq_p128 NYI"); } case NEON::BI__builtin_neon_vcvts_f32_u32: case NEON::BI__builtin_neon_vcvtd_f64_u64: @@ -2968,8 +2968,8 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vcvts_f32_s32: case NEON::BI__builtin_neon_vcvtd_f64_s64: { if (usgn) - llvm_unreachable("NYI"); - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvtd_f64_s64 NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvtd_f64_s64 NYI"); } case NEON::BI__builtin_neon_vcvth_f16_u16: case NEON::BI__builtin_neon_vcvth_f16_u32: @@ -2980,8 +2980,8 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vcvth_f16_s32: case NEON::BI__builtin_neon_vcvth_f16_s64: { if (usgn) - llvm_unreachable("NYI"); - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvth_f16_s64 NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvth_f16_s64 NYI"); } case NEON::BI__builtin_neon_vcvtah_u16_f16: case NEON::BI__builtin_neon_vcvtmh_u16_f16: @@ -2993,80 +2993,80 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vcvtnh_s16_f16: case NEON::BI__builtin_neon_vcvtph_s16_f16: case NEON::BI__builtin_neon_vcvth_s16_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvth_s16_f16 NYI"); } case NEON::BI__builtin_neon_vcaleh_f16: case NEON::BI__builtin_neon_vcalth_f16: case NEON::BI__builtin_neon_vcageh_f16: case NEON::BI__builtin_neon_vcagth_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcagth_f16 NYI"); } case NEON::BI__builtin_neon_vcvth_n_s16_f16: case NEON::BI__builtin_neon_vcvth_n_u16_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvth_n_u16_f16 NYI"); } case NEON::BI__builtin_neon_vcvth_n_f16_s16: case NEON::BI__builtin_neon_vcvth_n_f16_u16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvth_n_f16_u16 NYI"); } case NEON::BI__builtin_neon_vpaddd_s64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vpaddd_s64 NYI"); } case NEON::BI__builtin_neon_vpaddd_f64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vpaddd_f64 NYI"); } case NEON::BI__builtin_neon_vpadds_f32: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vpadds_f32 NYI"); } case NEON::BI__builtin_neon_vceqzd_s64: case NEON::BI__builtin_neon_vceqzd_f64: case NEON::BI__builtin_neon_vceqzs_f32: case NEON::BI__builtin_neon_vceqzh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vceqzh_f16 NYI"); case NEON::BI__builtin_neon_vcgezd_s64: case NEON::BI__builtin_neon_vcgezd_f64: case NEON::BI__builtin_neon_vcgezs_f32: case NEON::BI__builtin_neon_vcgezh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcgezh_f16 NYI"); case NEON::BI__builtin_neon_vclezd_s64: case NEON::BI__builtin_neon_vclezd_f64: case NEON::BI__builtin_neon_vclezs_f32: case NEON::BI__builtin_neon_vclezh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vclezh_f16 NYI"); case NEON::BI__builtin_neon_vcgtzd_s64: case NEON::BI__builtin_neon_vcgtzd_f64: case NEON::BI__builtin_neon_vcgtzs_f32: case NEON::BI__builtin_neon_vcgtzh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcgtzh_f16 NYI"); case NEON::BI__builtin_neon_vcltzd_s64: case NEON::BI__builtin_neon_vcltzd_f64: case NEON::BI__builtin_neon_vcltzs_f32: case NEON::BI__builtin_neon_vcltzh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcltzh_f16 NYI"); case NEON::BI__builtin_neon_vceqzd_u64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vceqzd_u64 NYI"); } case NEON::BI__builtin_neon_vceqd_f64: case NEON::BI__builtin_neon_vcled_f64: case NEON::BI__builtin_neon_vcltd_f64: case NEON::BI__builtin_neon_vcged_f64: case NEON::BI__builtin_neon_vcgtd_f64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcgtd_f64 NYI"); } case NEON::BI__builtin_neon_vceqs_f32: case NEON::BI__builtin_neon_vcles_f32: case NEON::BI__builtin_neon_vclts_f32: case NEON::BI__builtin_neon_vcges_f32: case NEON::BI__builtin_neon_vcgts_f32: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcgts_f32 NYI"); } case NEON::BI__builtin_neon_vceqh_f16: case NEON::BI__builtin_neon_vcleh_f16: case NEON::BI__builtin_neon_vclth_f16: case NEON::BI__builtin_neon_vcgeh_f16: case NEON::BI__builtin_neon_vcgth_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcgth_f16 NYI"); } case NEON::BI__builtin_neon_vceqd_s64: case NEON::BI__builtin_neon_vceqd_u64: @@ -3078,11 +3078,11 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vcged_s64: case NEON::BI__builtin_neon_vcled_u64: case NEON::BI__builtin_neon_vcled_s64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcled_s64 NYI"); } case NEON::BI__builtin_neon_vtstd_s64: case NEON::BI__builtin_neon_vtstd_u64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vtstd_u64 NYI"); } case NEON::BI__builtin_neon_vset_lane_i8: case NEON::BI__builtin_neon_vset_lane_i16: @@ -3102,13 +3102,13 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, // No support for now as no real/test case for them // at the moment, the implementation should be the same as above // vset_lane or vsetq_lane intrinsics - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsetq_lane_bf16 NYI"); case NEON::BI__builtin_neon_vset_lane_f64: // The vector type needs a cast for the v1f64 variant. - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vset_lane_f64 NYI"); case NEON::BI__builtin_neon_vsetq_lane_f64: // The vector type needs a cast for the v2f64 variant. - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsetq_lane_f64 NYI"); case NEON::BI__builtin_neon_vget_lane_i8: case NEON::BI__builtin_neon_vdupb_lane_i8: @@ -3183,85 +3183,85 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, return builder.create(getLoc(E->getExprLoc()), Ops[0], emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vaddh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddh_f16 NYI"); case NEON::BI__builtin_neon_vsubh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsubh_f16 NYI"); case NEON::BI__builtin_neon_vmulh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmulh_f16 NYI"); case NEON::BI__builtin_neon_vdivh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vdivh_f16 NYI"); case NEON::BI__builtin_neon_vfmah_f16: // NEON intrinsic puts accumulator first, unlike the LLVM fma. - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vfmah_f16 NYI"); case NEON::BI__builtin_neon_vfmsh_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vfmsh_f16 NYI"); } case NEON::BI__builtin_neon_vaddd_s64: case NEON::BI__builtin_neon_vaddd_u64: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddd_u64 NYI"); case NEON::BI__builtin_neon_vsubd_s64: case NEON::BI__builtin_neon_vsubd_u64: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsubd_u64 NYI"); case NEON::BI__builtin_neon_vqdmlalh_s16: case NEON::BI__builtin_neon_vqdmlslh_s16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqdmlslh_s16 NYI"); } case NEON::BI__builtin_neon_vqshlud_n_s64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqshlud_n_s64 NYI"); } case NEON::BI__builtin_neon_vqshld_n_u64: case NEON::BI__builtin_neon_vqshld_n_s64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqshld_n_s64 NYI"); } case NEON::BI__builtin_neon_vrshrd_n_u64: case NEON::BI__builtin_neon_vrshrd_n_s64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrshrd_n_s64 NYI"); } case NEON::BI__builtin_neon_vrsrad_n_u64: case NEON::BI__builtin_neon_vrsrad_n_s64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrsrad_n_s64 NYI"); } case NEON::BI__builtin_neon_vshld_n_s64: case NEON::BI__builtin_neon_vshld_n_u64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vshld_n_u64 NYI"); } case NEON::BI__builtin_neon_vshrd_n_s64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vshrd_n_s64 NYI"); } case NEON::BI__builtin_neon_vshrd_n_u64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vshrd_n_u64 NYI"); } case NEON::BI__builtin_neon_vsrad_n_s64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsrad_n_s64 NYI"); } case NEON::BI__builtin_neon_vsrad_n_u64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsrad_n_u64 NYI"); } case NEON::BI__builtin_neon_vqdmlalh_lane_s16: case NEON::BI__builtin_neon_vqdmlalh_laneq_s16: case NEON::BI__builtin_neon_vqdmlslh_lane_s16: case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqdmlslh_laneq_s16 NYI"); } case NEON::BI__builtin_neon_vqdmlals_s32: case NEON::BI__builtin_neon_vqdmlsls_s32: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqdmlsls_s32 NYI"); } case NEON::BI__builtin_neon_vqdmlals_lane_s32: case NEON::BI__builtin_neon_vqdmlals_laneq_s32: case NEON::BI__builtin_neon_vqdmlsls_lane_s32: case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqdmlsls_laneq_s32 NYI"); } case NEON::BI__builtin_neon_vget_lane_bf16: case NEON::BI__builtin_neon_vduph_lane_bf16: case NEON::BI__builtin_neon_vduph_lane_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vduph_lane_f16 NYI"); } case NEON::BI__builtin_neon_vgetq_lane_bf16: case NEON::BI__builtin_neon_vduph_laneq_bf16: case NEON::BI__builtin_neon_vduph_laneq_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vduph_laneq_f16 NYI"); } case NEON::BI__builtin_neon_vcvt_bf16_f32: case NEON::BI__builtin_neon_vcvtq_low_bf16_f32: @@ -3270,7 +3270,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case clang::AArch64::BI_InterlockedAdd: case clang::AArch64::BI_InterlockedAdd64: { - llvm_unreachable("NYI"); + llvm_unreachable("clang::AArch64::BI_InterlockedAdd64 NYI"); } } @@ -3298,19 +3298,19 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, return nullptr; case NEON::BI__builtin_neon_vbsl_v: case NEON::BI__builtin_neon_vbslq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vbslq_v NYI"); } case NEON::BI__builtin_neon_vfma_lane_v: case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types // The ARM builtins (and instructions) have the addend as the first // operand, but the 'fma' intrinsics have it last. Swap it around here. - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vfmaq_lane_v NYI"); } case NEON::BI__builtin_neon_vfma_laneq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vfma_laneq_v NYI"); } case NEON::BI__builtin_neon_vfmaq_laneq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vfmaq_laneq_v NYI"); } case NEON::BI__builtin_neon_vfmah_lane_f16: case NEON::BI__builtin_neon_vfmas_lane_f32: @@ -3318,7 +3318,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vfmas_laneq_f32: case NEON::BI__builtin_neon_vfmad_lane_f64: case NEON::BI__builtin_neon_vfmad_laneq_f64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vfmad_laneq_f64 NYI"); } case NEON::BI__builtin_neon_vmull_v: { llvm::StringRef name = usgn ? "aarch64.neon.umull" : "aarch64.neon.smull"; @@ -3331,9 +3331,9 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vmax_v: case NEON::BI__builtin_neon_vmaxq_v: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxq_v NYI"); case NEON::BI__builtin_neon_vmaxh_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxh_f16 NYI"); } case NEON::BI__builtin_neon_vmin_v: case NEON::BI__builtin_neon_vminq_v: { @@ -3344,7 +3344,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vminh_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminh_f16 NYI"); } case NEON::BI__builtin_neon_vabd_v: case NEON::BI__builtin_neon_vabdq_v: { @@ -3356,33 +3356,33 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vpadal_v: case NEON::BI__builtin_neon_vpadalq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vpadalq_v NYI"); } case NEON::BI__builtin_neon_vpmin_v: case NEON::BI__builtin_neon_vpminq_v: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vpminq_v NYI"); case NEON::BI__builtin_neon_vpmax_v: case NEON::BI__builtin_neon_vpmaxq_v: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vpmaxq_v NYI"); case NEON::BI__builtin_neon_vminnm_v: case NEON::BI__builtin_neon_vminnmq_v: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminnmq_v NYI"); case NEON::BI__builtin_neon_vminnmh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminnmh_f16 NYI"); case NEON::BI__builtin_neon_vmaxnm_v: case NEON::BI__builtin_neon_vmaxnmq_v: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxnmq_v NYI"); case NEON::BI__builtin_neon_vmaxnmh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxnmh_f16 NYI"); case NEON::BI__builtin_neon_vrecpss_f32: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrecpss_f32 NYI"); } case NEON::BI__builtin_neon_vrecpsd_f64: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrecpsd_f64 NYI"); case NEON::BI__builtin_neon_vrecpsh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrecpsh_f16 NYI"); case NEON::BI__builtin_neon_vqshrun_n_v: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqshrun_n_v NYI"); case NEON::BI__builtin_neon_vqrshrun_n_v: // The prototype of builtin_neon_vqrshrun_n can be found at // https://developer.arm.com/architectures/instruction-sets/intrinsics/ @@ -3392,7 +3392,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, SInt32Ty}, Ops, "aarch64.neon.sqrshrun", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqshrn_n_v: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqshrn_n_v NYI"); case NEON::BI__builtin_neon_vrshrn_n_v: return emitNeonCall( builder, @@ -3402,9 +3402,9 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, SInt32Ty}, Ops, "aarch64.neon.rshrn", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqrshrn_n_v: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqrshrn_n_v NYI"); case NEON::BI__builtin_neon_vrndah_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndah_f16 NYI"); } case NEON::BI__builtin_neon_vrnda_v: case NEON::BI__builtin_neon_vrndaq_v: { @@ -3413,21 +3413,21 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndih_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndih_f16 NYI"); } case NEON::BI__builtin_neon_vrndmh_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndmh_f16 NYI"); } case NEON::BI__builtin_neon_vrndm_v: case NEON::BI__builtin_neon_vrndmq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndmq_v NYI"); } case NEON::BI__builtin_neon_vrndnh_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndnh_f16 NYI"); } case NEON::BI__builtin_neon_vrndn_v: case NEON::BI__builtin_neon_vrndnq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndnq_v NYI"); } case NEON::BI__builtin_neon_vrndns_f32: { mlir::Value arg0 = emitScalarExpr(E->getArg(0)); @@ -3436,58 +3436,58 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, getCIRGenModule().FloatTy, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndph_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndph_f16 NYI"); } case NEON::BI__builtin_neon_vrndp_v: case NEON::BI__builtin_neon_vrndpq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndpq_v NYI"); } case NEON::BI__builtin_neon_vrndxh_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndxh_f16 NYI"); } case NEON::BI__builtin_neon_vrndx_v: case NEON::BI__builtin_neon_vrndxq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndxq_v NYI"); } case NEON::BI__builtin_neon_vrndh_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndh_f16 NYI"); } case NEON::BI__builtin_neon_vrnd32x_f32: case NEON::BI__builtin_neon_vrnd32xq_f32: case NEON::BI__builtin_neon_vrnd32x_f64: case NEON::BI__builtin_neon_vrnd32xq_f64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrnd32xq_f64 NYI"); } case NEON::BI__builtin_neon_vrnd32z_f32: case NEON::BI__builtin_neon_vrnd32zq_f32: case NEON::BI__builtin_neon_vrnd32z_f64: case NEON::BI__builtin_neon_vrnd32zq_f64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrnd32zq_f64 NYI"); } case NEON::BI__builtin_neon_vrnd64x_f32: case NEON::BI__builtin_neon_vrnd64xq_f32: case NEON::BI__builtin_neon_vrnd64x_f64: case NEON::BI__builtin_neon_vrnd64xq_f64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrnd64xq_f64 NYI"); } case NEON::BI__builtin_neon_vrnd64z_f32: case NEON::BI__builtin_neon_vrnd64zq_f32: case NEON::BI__builtin_neon_vrnd64z_f64: case NEON::BI__builtin_neon_vrnd64zq_f64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrnd64zq_f64 NYI"); } case NEON::BI__builtin_neon_vrnd_v: case NEON::BI__builtin_neon_vrndq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrndq_v NYI"); } case NEON::BI__builtin_neon_vcvt_f64_v: case NEON::BI__builtin_neon_vcvtq_f64_v: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvtq_f64_v NYI"); case NEON::BI__builtin_neon_vcvt_f64_f32: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvt_f64_f32 NYI"); } case NEON::BI__builtin_neon_vcvt_f32_f64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvt_f32_f64 NYI"); } case NEON::BI__builtin_neon_vcvt_s32_v: case NEON::BI__builtin_neon_vcvt_u32_v: @@ -3501,7 +3501,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vcvtq_u64_v: case NEON::BI__builtin_neon_vcvtq_s16_f16: case NEON::BI__builtin_neon_vcvtq_u16_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvtq_u16_f16 NYI"); } case NEON::BI__builtin_neon_vcvta_s16_f16: case NEON::BI__builtin_neon_vcvta_u16_f16: @@ -3515,7 +3515,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vcvtaq_s64_v: case NEON::BI__builtin_neon_vcvta_u64_v: case NEON::BI__builtin_neon_vcvtaq_u64_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvtaq_u64_v NYI"); } case NEON::BI__builtin_neon_vcvtm_s16_f16: case NEON::BI__builtin_neon_vcvtm_s32_v: @@ -3529,7 +3529,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vcvtmq_s64_v: case NEON::BI__builtin_neon_vcvtm_u64_v: case NEON::BI__builtin_neon_vcvtmq_u64_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvtmq_u64_v NYI"); } case NEON::BI__builtin_neon_vcvtn_s16_f16: case NEON::BI__builtin_neon_vcvtn_s32_v: @@ -3543,7 +3543,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vcvtnq_s64_v: case NEON::BI__builtin_neon_vcvtn_u64_v: case NEON::BI__builtin_neon_vcvtnq_u64_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvtnq_u64_v NYI"); } case NEON::BI__builtin_neon_vcvtp_s16_f16: case NEON::BI__builtin_neon_vcvtp_s32_v: @@ -3557,181 +3557,181 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vcvtpq_s64_v: case NEON::BI__builtin_neon_vcvtp_u64_v: case NEON::BI__builtin_neon_vcvtpq_u64_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vcvtpq_u64_v NYI"); } case NEON::BI__builtin_neon_vmulx_v: case NEON::BI__builtin_neon_vmulxq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmulxq_v NYI"); } case NEON::BI__builtin_neon_vmulxh_lane_f16: case NEON::BI__builtin_neon_vmulxh_laneq_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmulxh_laneq_f16 NYI"); } case NEON::BI__builtin_neon_vmul_lane_v: case NEON::BI__builtin_neon_vmul_laneq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmul_laneq_v NYI"); } case NEON::BI__builtin_neon_vnegd_s64: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vnegd_s64 NYI"); case NEON::BI__builtin_neon_vnegh_f16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vnegh_f16 NYI"); case NEON::BI__builtin_neon_vpmaxnm_v: case NEON::BI__builtin_neon_vpmaxnmq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vpmaxnmq_v NYI"); } case NEON::BI__builtin_neon_vpminnm_v: case NEON::BI__builtin_neon_vpminnmq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vpminnmq_v NYI"); } case NEON::BI__builtin_neon_vsqrth_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsqrth_f16 NYI"); } case NEON::BI__builtin_neon_vsqrt_v: case NEON::BI__builtin_neon_vsqrtq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsqrtq_v NYI"); } case NEON::BI__builtin_neon_vrbit_v: case NEON::BI__builtin_neon_vrbitq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrbitq_v NYI"); } case NEON::BI__builtin_neon_vaddv_u8: // FIXME: These are handled by the AArch64 scalar code. - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddv_u8 NYI"); [[fallthrough]]; case NEON::BI__builtin_neon_vaddv_s8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddv_s8 NYI"); } case NEON::BI__builtin_neon_vaddv_u16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddv_u16 NYI"); [[fallthrough]]; case NEON::BI__builtin_neon_vaddv_s16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddv_s16 NYI"); } case NEON::BI__builtin_neon_vaddvq_u8: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddvq_u8 NYI"); [[fallthrough]]; case NEON::BI__builtin_neon_vaddvq_s8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddvq_s8 NYI"); } case NEON::BI__builtin_neon_vaddvq_u16: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddvq_u16 NYI"); [[fallthrough]]; case NEON::BI__builtin_neon_vaddvq_s16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddvq_s16 NYI"); } case NEON::BI__builtin_neon_vmaxv_u8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxv_u8 NYI"); } case NEON::BI__builtin_neon_vmaxv_u16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxv_u16 NYI"); } case NEON::BI__builtin_neon_vmaxvq_u8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxvq_u8 NYI"); } case NEON::BI__builtin_neon_vmaxvq_u16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxvq_u16 NYI"); } case NEON::BI__builtin_neon_vmaxv_s8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxv_s8 NYI"); } case NEON::BI__builtin_neon_vmaxv_s16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxv_s16 NYI"); } case NEON::BI__builtin_neon_vmaxvq_s8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxvq_s8 NYI"); } case NEON::BI__builtin_neon_vmaxvq_s16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxvq_s16 NYI"); } case NEON::BI__builtin_neon_vmaxv_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxv_f16 NYI"); } case NEON::BI__builtin_neon_vmaxvq_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxvq_f16 NYI"); } case NEON::BI__builtin_neon_vminv_u8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminv_u8 NYI"); } case NEON::BI__builtin_neon_vminv_u16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminv_u16 NYI"); } case NEON::BI__builtin_neon_vminvq_u8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminvq_u8 NYI"); } case NEON::BI__builtin_neon_vminvq_u16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminvq_u16 NYI"); } case NEON::BI__builtin_neon_vminv_s8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminv_s8 NYI"); } case NEON::BI__builtin_neon_vminv_s16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminv_s16 NYI"); } case NEON::BI__builtin_neon_vminvq_s8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminvq_s8 NYI"); } case NEON::BI__builtin_neon_vminvq_s16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminvq_s16 NYI"); } case NEON::BI__builtin_neon_vminv_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminv_f16 NYI"); } case NEON::BI__builtin_neon_vminvq_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminvq_f16 NYI"); } case NEON::BI__builtin_neon_vmaxnmv_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxnmv_f16 NYI"); } case NEON::BI__builtin_neon_vmaxnmvq_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmaxnmvq_f16 NYI"); } case NEON::BI__builtin_neon_vminnmv_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminnmv_f16 NYI"); } case NEON::BI__builtin_neon_vminnmvq_f16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vminnmvq_f16 NYI"); } case NEON::BI__builtin_neon_vmul_n_f64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vmul_n_f64 NYI"); } case NEON::BI__builtin_neon_vaddlv_u8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddlv_u8 NYI"); } case NEON::BI__builtin_neon_vaddlv_u16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddlv_u16 NYI"); } case NEON::BI__builtin_neon_vaddlvq_u8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddlvq_u8 NYI"); } case NEON::BI__builtin_neon_vaddlvq_u16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddlvq_u16 NYI"); } case NEON::BI__builtin_neon_vaddlv_s8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddlv_s8 NYI"); } case NEON::BI__builtin_neon_vaddlv_s16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddlv_s16 NYI"); } case NEON::BI__builtin_neon_vaddlvq_s8: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddlvq_s8 NYI"); } case NEON::BI__builtin_neon_vaddlvq_s16: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vaddlvq_s16 NYI"); } case NEON::BI__builtin_neon_vsri_n_v: case NEON::BI__builtin_neon_vsriq_n_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsriq_n_v NYI"); } case NEON::BI__builtin_neon_vsli_n_v: case NEON::BI__builtin_neon_vsliq_n_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsliq_n_v NYI"); } case NEON::BI__builtin_neon_vsra_n_v: case NEON::BI__builtin_neon_vsraq_n_v: - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsraq_n_v NYI"); case NEON::BI__builtin_neon_vrsra_n_v: case NEON::BI__builtin_neon_vrsraq_n_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vrsraq_n_v NYI"); } case NEON::BI__builtin_neon_vld1_v: case NEON::BI__builtin_neon_vld1q_v: { @@ -3755,7 +3755,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vldap1_lane_s64: case NEON::BI__builtin_neon_vldap1q_lane_s64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vldap1q_lane_s64 NYI"); } case NEON::BI__builtin_neon_vld1_dup_v: case NEON::BI__builtin_neon_vld1q_dup_v: { @@ -3775,67 +3775,67 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vstl1_lane_s64: case NEON::BI__builtin_neon_vstl1q_lane_s64: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vstl1q_lane_s64 NYI"); } case NEON::BI__builtin_neon_vld2_v: case NEON::BI__builtin_neon_vld2q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vld2q_v NYI"); } case NEON::BI__builtin_neon_vld3_v: case NEON::BI__builtin_neon_vld3q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vld3q_v NYI"); } case NEON::BI__builtin_neon_vld4_v: case NEON::BI__builtin_neon_vld4q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vld4q_v NYI"); } case NEON::BI__builtin_neon_vld2_dup_v: case NEON::BI__builtin_neon_vld2q_dup_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vld2q_dup_v NYI"); } case NEON::BI__builtin_neon_vld3_dup_v: case NEON::BI__builtin_neon_vld3q_dup_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vld3q_dup_v NYI"); } case NEON::BI__builtin_neon_vld4_dup_v: case NEON::BI__builtin_neon_vld4q_dup_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vld4q_dup_v NYI"); } case NEON::BI__builtin_neon_vld2_lane_v: case NEON::BI__builtin_neon_vld2q_lane_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vld2q_lane_v NYI"); } case NEON::BI__builtin_neon_vld3_lane_v: case NEON::BI__builtin_neon_vld3q_lane_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vld3q_lane_v NYI"); } case NEON::BI__builtin_neon_vld4_lane_v: case NEON::BI__builtin_neon_vld4q_lane_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vld4q_lane_v NYI"); } case NEON::BI__builtin_neon_vst2_v: case NEON::BI__builtin_neon_vst2q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vst2q_v NYI"); } case NEON::BI__builtin_neon_vst2_lane_v: case NEON::BI__builtin_neon_vst2q_lane_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vst2q_lane_v NYI"); } case NEON::BI__builtin_neon_vst3_v: case NEON::BI__builtin_neon_vst3q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vst3q_v NYI"); } case NEON::BI__builtin_neon_vst3_lane_v: case NEON::BI__builtin_neon_vst3q_lane_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vst3q_lane_v NYI"); } case NEON::BI__builtin_neon_vst4_v: case NEON::BI__builtin_neon_vst4q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vst4q_v NYI"); } case NEON::BI__builtin_neon_vst4_lane_v: case NEON::BI__builtin_neon_vst4q_lane_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vst4q_lane_v NYI"); } case NEON::BI__builtin_neon_vtrn_v: case NEON::BI__builtin_neon_vtrnq_v: { @@ -3877,43 +3877,43 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vuzp_v: case NEON::BI__builtin_neon_vuzpq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vuzpq_v NYI"); } case NEON::BI__builtin_neon_vzip_v: case NEON::BI__builtin_neon_vzipq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vzipq_v NYI"); } case NEON::BI__builtin_neon_vqtbl1q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqtbl1q_v NYI"); } case NEON::BI__builtin_neon_vqtbl2q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqtbl2q_v NYI"); } case NEON::BI__builtin_neon_vqtbl3q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqtbl3q_v NYI"); } case NEON::BI__builtin_neon_vqtbl4q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqtbl4q_v NYI"); } case NEON::BI__builtin_neon_vqtbx1q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqtbx1q_v NYI"); } case NEON::BI__builtin_neon_vqtbx2q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqtbx2q_v NYI"); } case NEON::BI__builtin_neon_vqtbx3q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqtbx3q_v NYI"); } case NEON::BI__builtin_neon_vqtbx4q_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vqtbx4q_v NYI"); } case NEON::BI__builtin_neon_vsqadd_v: case NEON::BI__builtin_neon_vsqaddq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vsqaddq_v NYI"); } case NEON::BI__builtin_neon_vuqadd_v: case NEON::BI__builtin_neon_vuqaddq_v: { - llvm_unreachable("NYI"); + llvm_unreachable("NEON::BI__builtin_neon_vuqaddq_v NYI"); } } } From 478122a0f06481ae124731d18a19520e0c7a838f Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 14 Nov 2024 14:10:23 -0500 Subject: [PATCH 2081/2301] [CIR][CIRGen][Builtin][Neon] Lower vcvt_f32_v, vcvtq_f32_v (#1120) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 8 ++++ clang/test/CIR/CodeGen/AArch64/neon-misc.c | 47 +++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 7e75e3f1ba5c..fa17eb22416b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2317,6 +2317,7 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( // Determine the type of this overloaded NEON intrinsic. NeonTypeFlags neonType(neonTypeConst->getZExtValue()); bool isUnsigned = neonType.isUnsigned(); + bool isQuad = neonType.isQuad(); const bool hasLegalHalfType = getTarget().hasLegalHalfType(); // The value of allowBFloatArgsAndRet is true for AArch64, but it should // come from ABI info. @@ -2378,6 +2379,13 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( : "aarch64.neon.sqsub", vTy, getLoc(e->getExprLoc())); } + case NEON::BI__builtin_neon_vcvt_f32_v: + case NEON::BI__builtin_neon_vcvtq_f32_v: { + ops[0] = builder.createBitcast(ops[0], ty); + ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, isQuad), + hasLegalHalfType); + return builder.createCast(cir::CastKind::int_to_float, ops[0], ty); + } case NEON::BI__builtin_neon_vext_v: case NEON::BI__builtin_neon_vextq_v: { int cv = getIntValueFromConstOp(ops[2]); diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index 2869a95fe884..a798139de55f 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -860,3 +860,50 @@ uint32x2_t test_vqmovn_u64(uint64x2_t a) { // LLVM: [[VQMOVN_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> [[A]]) // LLVM: ret <2 x i32> [[VQMOVN_V1_I]] } +float32x2_t test_vcvt_f32_s32(int32x2_t a) { + return vcvt_f32_s32(a); + + // CIR-LABEL: vcvt_f32_s32 + // {{%.*}} = cir.cast(int_to_float, {{%.*}} : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vcvt_f32_s32(<2 x i32>{{.*}}[[a:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[a]] to <8 x i8> + // LLVM: [[VCVT_I:%.*]] = sitofp <2 x i32> [[a]] to <2 x float> + // LLVM: ret <2 x float> [[VCVT_I]] +} + +float32x2_t test_vcvt_f32_u32(uint32x2_t a) { + return vcvt_f32_u32(a); + + // CIR-LABEL: vcvt_f32_u32 + // {{%.*}} = cir.cast(int_to_float, {{%.*}} : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vcvt_f32_u32(<2 x i32>{{.*}}[[a:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[a]] to <8 x i8> + // LLVM: [[VCVT_I:%.*]] = uitofp <2 x i32> [[a]] to <2 x float> + // LLVM: ret <2 x float> [[VCVT_I]] +} + +float32x4_t test_vcvtq_f32_s32(int32x4_t a) { + return vcvtq_f32_s32(a); + + // CIR-LABEL: vcvtq_f32_s32 + // {{%.*}} = cir.cast(int_to_float, {{%.*}} : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vcvtq_f32_s32(<4 x i32>{{.*}}[[a:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[VCVT_I:%.*]] = sitofp <4 x i32> [[a]] to <4 x float> + // LLVM: ret <4 x float> [[VCVT_I]] +} + +float32x4_t test_vcvtq_f32_u32(uint32x4_t a) { + return vcvtq_f32_u32(a); + + // CIR-LABEL: vcvtq_f32_u32 + // {{%.*}} = cir.cast(int_to_float, {{%.*}} : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vcvtq_f32_u32(<4 x i32>{{.*}}[[a:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[VCVT_I:%.*]] = uitofp <4 x i32> [[a]] to <4 x float> + // LLVM: ret <4 x float> [[VCVT_I]] +} From d9133c9d721a8e48996eff5e027d94e8fcf39c57 Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 15 Nov 2024 03:10:40 +0800 Subject: [PATCH 2082/2301] [CIR][LowerToLLVM] Lowering triple from `cir.triple` attribute (#1125) Currently, the final `target triple` in LLVM IR is set in `CIRGenAction`, which is not executed by cir tools like `cir-translate`. This PR delay its assignment to LLVM lowering, enabling sharing the emitting of `target triple` between different invoking paths. --- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 -- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 17 +++++++++++++++-- clang/test/CIR/Tools/cir-translate-triple.cir | 13 +++++++++++++ 3 files changed, 28 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/Tools/cir-translate-triple.cir diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 3bb9cf03c970..7fd1e904e44d 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -292,8 +292,6 @@ class CIRGenConsumer : public clang::ASTConsumer { feOptions.ClangIRDisableCIRVerifier, !feOptions.ClangIRCallConvLowering); - llvmModule->setTargetTriple(targetOptions.Triple); - BackendAction backendAction = getBackendActionFromOutputType(action); emitBackendOutput(compilerInstance, codeGenOptions, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 03d5b20d38d3..9c5d8b575281 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1290,6 +1290,8 @@ struct ConvertCIRToLLVMPass llvm::StringMap &argStringGlobalsMap, llvm::MapVector &argsVarMap); + void processCIRAttrs(mlir::ModuleOp moduleOp); + virtual StringRef getArgument() const override { return "cir-flat-to-llvm"; } }; @@ -4785,6 +4787,18 @@ void ConvertCIRToLLVMPass::buildGlobalAnnotationsVar( } } +void ConvertCIRToLLVMPass::processCIRAttrs(mlir::ModuleOp module) { + // Lower the module attributes to LLVM equivalents. + if (auto tripleAttr = module->getAttr(cir::CIRDialect::getTripleAttrName())) + module->setAttr(mlir::LLVM::LLVMDialect::getTargetTripleAttrName(), + tripleAttr); + + // Strip the CIR attributes. + module->removeAttr(cir::CIRDialect::getSOBAttrName()); + module->removeAttr(cir::CIRDialect::getLangAttrName()); + module->removeAttr(cir::CIRDialect::getTripleAttrName()); +} + void ConvertCIRToLLVMPass::runOnOperation() { llvm::TimeTraceScope scope("Convert CIR to LLVM Pass"); @@ -4835,8 +4849,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { // Allow operations that will be lowered directly to LLVM IR. target.addLegalOp(); - getOperation()->removeAttr(cir::CIRDialect::getSOBAttrName()); - getOperation()->removeAttr(cir::CIRDialect::getLangAttrName()); + processCIRAttrs(module); llvm::SmallVector ops; ops.push_back(module); diff --git a/clang/test/CIR/Tools/cir-translate-triple.cir b/clang/test/CIR/Tools/cir-translate-triple.cir new file mode 100644 index 000000000000..a647df165aba --- /dev/null +++ b/clang/test/CIR/Tools/cir-translate-triple.cir @@ -0,0 +1,13 @@ +// RUN: cir-translate --cir-to-llvmir --disable-cc-lowering %s -o %t.ll +// RUN: FileCheck %s -input-file %t.ll -check-prefix=LLVM + +module attributes { + cir.triple = "x86_64-unknown-linux-gnu", + llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" +} { + cir.func @foo() { + cir.return + } +} + +// LLVM-DAG: target triple = "x86_64-unknown-linux-gnu" From 55d0f532740cb519daf9d8e95934d4073b0a70b5 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Thu, 14 Nov 2024 22:11:46 +0300 Subject: [PATCH 2083/2301] [CIR][ABI][AArch64][Lowering] Support calls for struct types > 128 bits (#1074) As the title says, this PR adds support for calls with struct types > 128 bits, building upon this [PR](https://github.com/llvm/clangir/pull/1068). The idea is gotten from the original Codegen, and I have added a couple of tests. --- clang/include/clang/CIR/MissingFeatures.h | 2 + .../TargetLowering/LowerFunction.cpp | 51 ++++++++++++++++++- .../AArch64/aarch64-cc-structs.c | 37 ++++++++++++++ 3 files changed, 89 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index fbcc3cadb855..211e0d879595 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -404,6 +404,8 @@ struct MissingFeatures { static bool itaniumRecordLayoutBuilderFinishLayout() { return false; } static bool mustProgress() { return false; } + + static bool skipTempCopy() { return false; } }; } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 704242a73b8c..483ce026ee0e 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -704,6 +704,14 @@ LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) { if (auto al = findAlloca(ret)) { rewriter.replaceAllUsesWith(al.getResult(), RVAddr); rewriter.eraseOp(al); + rewriter.setInsertionPoint(ret); + + auto retInputs = ret.getInput(); + assert(retInputs.size() == 1 && "return should only have one input"); + if (auto load = mlir::dyn_cast(retInputs[0].getDefiningOp())) + if (load.getResult().use_empty()) + rewriter.eraseOp(load); + rewriter.replaceOpWithNewOp(ret); } }); @@ -952,6 +960,15 @@ mlir::Value LowerFunction::rewriteCallOp(FuncType calleeTy, FuncOp origCallee, return CallResult; } +mlir::Value createAlloca(mlir::Location loc, mlir::Type type, + LowerFunction &CGF) { + auto align = CGF.LM.getDataLayout().getABITypeAlign(type); + auto alignAttr = CGF.getRewriter().getI64IntegerAttr(align.value()); + return CGF.getRewriter().create( + loc, CGF.getRewriter().getType(type), type, + /*name=*/llvm::StringRef(""), alignAttr); +} + // NOTE(cir): This method has partial parity to CodeGenFunction's EmitCall // method in CGCall.cpp. When incrementing it, use the original codegen as a // reference: add ABI-specific stuff and skip codegen stuff. @@ -984,10 +1001,12 @@ mlir::Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, CIRToCIRArgMapping IRFunctionArgs(LM.getContext(), CallInfo); llvm::SmallVector IRCallArgs(IRFunctionArgs.totalIRArgs()); + mlir::Value sRetPtr; // If the call returns a temporary with struct return, create a temporary // alloca to hold the result, unless one is given to us. if (RetAI.isIndirect() || RetAI.isCoerceAndExpand() || RetAI.isInAlloca()) { - cir_cconv_unreachable("NYI"); + sRetPtr = createAlloca(loc, RetTy, *this); + IRCallArgs[IRFunctionArgs.getSRetArgNo()] = sRetPtr; } cir_cconv_assert(!cir::MissingFeatures::swift()); @@ -1082,6 +1101,32 @@ mlir::Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, break; } + case ABIArgInfo::Indirect: + case ABIArgInfo::IndirectAliased: { + assert(NumIRArgs == 1); + // TODO(cir): For aggregate types + // We want to avoid creating an unnecessary temporary+copy here; + // however, we need one in three cases: + // 1. If the argument is not byval, and we are required to copy the + // 2. If the argument is byval, RV is not sufficiently aligned, and + // source. (This case doesn't occur on any common architecture.) + // we cannot force it to be sufficiently aligned. + // 3. If the argument is byval, but RV is not located in default + // or alloca address space. + cir_cconv_assert(!::cir::MissingFeatures::skipTempCopy()); + + mlir::Value alloca = findAlloca(I->getDefiningOp()); + + // since they are a ARM-specific feature. + if (::cir::MissingFeatures::undef()) + cir_cconv_unreachable("NYI"); + + IRCallArgs[FirstIRArg] = alloca; + + // NOTE(cir): Skipping Emissions, lifetime markers. + + break; + } default: llvm::outs() << "Missing ABIArgInfo::Kind: " << ArgInfo.getKind() << "\n"; cir_cconv_unreachable("NYI"); @@ -1217,6 +1262,10 @@ mlir::Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // done in CIRGen return RetVal; } + case ABIArgInfo::Indirect: { + auto load = rewriter.create(loc, sRetPtr); + return load.getResult(); + } default: llvm::errs() << "Unhandled ABIArgInfo kind: " << RetAI.getKind() << "\n"; cir_cconv_unreachable("NYI"); diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index 1acc75da262f..93f87db39cfb 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -153,6 +153,43 @@ void pass_eq_128(EQ_128 s) {} // LLVM: %[[#V2:]] = load ptr, ptr %[[#V1]], align 8 void pass_gt_128(GT_128 s) {} +// CHECK: cir.func @get_gt_128(%arg0: !cir.ptr {{.*}}, %arg1: !cir.ptr +// CHECK: %[[#V0:]] = cir.alloca !cir.ptr, !cir.ptr>, [""] {alignment = 8 : i64} +// CHECK: cir.store %arg1, %[[#V0]] : !cir.ptr, !cir.ptr> +// CHECK: %[[#V1:]] = cir.load %[[#V0]] : !cir.ptr>, !cir.ptr +// CHECK: cir.copy %[[#V1]] to %arg0 : !cir.ptr +// CHECK: cir.return + +// LLVM: void @get_gt_128(ptr %[[#V0:]], ptr %[[#V1:]]) +// LLVM: %[[#V3:]] = alloca ptr, i64 1, align 8 +// LLVM: store ptr %[[#V1]], ptr %[[#V3]], align 8 +// LLVM: %[[#V4:]] = load ptr, ptr %[[#V3]], align 8 +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[#V0]], ptr %[[#V4]], i32 24, i1 false) +// LLVM: ret void +GT_128 get_gt_128(GT_128 s) { + return s; +} + +// CHECK: cir.func no_proto @call_and_get_gt_128(%arg0: !cir.ptr +// CHECK: %[[#V0:]] = cir.alloca !ty_GT_128_, !cir.ptr, {{.*}} {alignment = 8 : i64} +// CHECK: %[[#V1:]] = cir.alloca !ty_GT_128_, !cir.ptr, {{.*}} {alignment = 8 : i64} +// CHECK: cir.call @get_gt_128(%[[#V1]], %arg0) : (!cir.ptr, !cir.ptr) -> () +// CHECK: %[[#V2:]] = cir.load %[[#V1]] : !cir.ptr, !ty_GT_128_ +// CHECK: cir.store %[[#V2]], %[[#V0]] : !ty_GT_128_, !cir.ptr +// CHECK: cir.return + +// LLVM: void @call_and_get_gt_128(ptr %[[#V0:]]) +// LLVM: %[[#V2:]] = alloca %struct.GT_128, i64 1, align 8 +// LLVM: %[[#V3:]] = alloca %struct.GT_128, i64 1, align 8 +// LLVM: call void @get_gt_128(ptr %[[#V3]], ptr %[[#V0]]) +// LLVM: %[[#V4:]] = load %struct.GT_128, ptr %[[#V3]], align 8 +// LLVM: store %struct.GT_128 %[[#V4]], ptr %[[#V2]], align 8 +// LLVM: ret void +GT_128 call_and_get_gt_128() { + GT_128 s; + s = get_gt_128(s); + return s; +} // CHECK: cir.func @passS(%arg0: !cir.array // CHECK: %[[#V0:]] = cir.alloca !ty_S, !cir.ptr, [""] {alignment = 4 : i64} // CHECK: %[[#V1:]] = cir.alloca !cir.array, !cir.ptr>, ["tmp"] {alignment = 8 : i64} From 98809ec84defd6d747d69b6cce4a7e3e4e7829e8 Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 14 Nov 2024 14:13:13 -0500 Subject: [PATCH 2084/2301] [CIR][CIRGen][Builtin] Support __builtin_elementwise_abs and extend AbsOp to take vector input (#1099) Extend AbsOp to take vector of int input. With it, we can support __builtin_elementwise_abs. We should in the next PR extend FpUnaryOps to support vector type input so we won't have blocker to implement all elementwise builtins completely. Now just temporarily have missingFeature `fpUnaryOPsSupportVectorType`. Currently, int type UnaryOp support vector type. FYI: [clang's documentation about elementwise builtins](https://clang.llvm.org/docs/LanguageExtensions.html#vector-builtins) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 ++-- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 1 + .../include/clang/CIR/Dialect/IR/CIRTypes.td | 17 +++++++++++ clang/include/clang/CIR/MissingFeatures.h | 3 ++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 19 +++++++++++-- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 14 +++++++++- clang/test/CIR/CodeGen/builtins-elementwise.c | 28 +++++++++++++++++++ 7 files changed, 81 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtins-elementwise.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e4bb5aea7980..8d4ede9958f0 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4331,8 +4331,8 @@ def SqrtOp : UnaryFPToFPBuiltinOp<"sqrt", "SqrtOp">; def TruncOp : UnaryFPToFPBuiltinOp<"trunc", "FTruncOp">; def AbsOp : CIR_Op<"abs", [Pure, SameOperandsAndResultType]> { - let arguments = (ins PrimitiveSInt:$src, UnitAttr:$poison); - let results = (outs PrimitiveSInt:$result); + let arguments = (ins CIR_AnySignedIntOrVecOfSignedInt:$src, UnitAttr:$poison); + let results = (outs CIR_AnySignedIntOrVecOfSignedInt:$result); let summary = [{ libc builtin equivalent abs, labs, llabs @@ -4345,6 +4345,7 @@ def AbsOp : CIR_Op<"abs", [Pure, SameOperandsAndResultType]> { ```mlir %0 = cir.const #cir.int<-42> : s32i %1 = cir.abs %0 poison : s32i + %2 = cir.abs %3 : !cir.vector ``` }]; let assemblyFormat = "$src ( `poison` $poison^ )? `:` type($src) attr-dict"; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 4e9902792eca..9f6eab7c7ba9 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -184,6 +184,7 @@ class StructType bool isAnyFloatingPointType(mlir::Type t); bool isFPOrFPVectorTy(mlir::Type); +bool isIntOrIntVectorTy(mlir::Type); } // namespace cir mlir::ParseResult parseAddrSpaceAttribute(mlir::AsmParser &p, diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 4317aaf3bb01..f73d80402047 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -60,6 +60,9 @@ def CIR_IntType : CIR_Type<"Int", "int", bool isPrimitive() const { return isValidPrimitiveIntBitwidth(getWidth()); } + bool isSignedPrimitive() const { + return isPrimitive() && isSigned(); + } /// Returns a minimum bitwidth of cir::IntType static unsigned minBitwidth() { return 1; } @@ -538,8 +541,22 @@ def IntegerVector : Type< ]>, "!cir.vector of !cir.int"> { } +// Vector of signed integral type +def SignedIntegerVector : Type< + And<[ + CPred<"::mlir::isa<::cir::VectorType>($_self)">, + CPred<"::mlir::isa<::cir::IntType>(" + "::mlir::cast<::cir::VectorType>($_self).getEltType())">, + CPred<"::mlir::cast<::cir::IntType>(" + "::mlir::cast<::cir::VectorType>($_self).getEltType())" + ".isSignedPrimitive()"> + ]>, "!cir.vector of !cir.int"> { +} + // Constraints def CIR_AnyIntOrVecOfInt: AnyTypeOf<[CIR_IntType, IntegerVector]>; +def CIR_AnySignedIntOrVecOfSignedInt: AnyTypeOf< + [PrimitiveSInt, SignedIntegerVector]>; // Pointer to Arrays def ArrayPtr : Type< diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 211e0d879595..7d59e10809eb 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -328,6 +328,9 @@ struct MissingFeatures { //-- Other missing features + // We need to extend fpUnaryOPs to support vector types. + static bool fpUnaryOPsSupportVectorType() { return false; } + // We need to track the parent record types that represent a field // declaration. This is necessary to determine the layout of a class. static bool fieldDeclAbstraction() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 538e166c5d2c..b5a51d678fd8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1255,9 +1255,22 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_nondeterministic_value: llvm_unreachable("BI__builtin_nondeterministic_value NYI"); - case Builtin::BI__builtin_elementwise_abs: - llvm_unreachable("BI__builtin_elementwise_abs NYI"); - + case Builtin::BI__builtin_elementwise_abs: { + mlir::Type cirTy = ConvertType(E->getArg(0)->getType()); + bool isIntTy = cir::isIntOrIntVectorTy(cirTy); + if (!isIntTy) { + if (cir::isAnyFloatingPointType(cirTy)) { + return emitUnaryFPBuiltin(*this, *E); + } + assert(!MissingFeatures::fpUnaryOPsSupportVectorType()); + llvm_unreachable("unsupported type for BI__builtin_elementwise_abs"); + } + mlir::Value arg = emitScalarExpr(E->getArg(0)); + auto call = getBuilder().create(getLoc(E->getExprLoc()), + arg.getType(), arg, false); + mlir::Value result = call->getResult(0); + return RValue::get(result); + } case Builtin::BI__builtin_elementwise_acos: llvm_unreachable("BI__builtin_elementwise_acos NYI"); case Builtin::BI__builtin_elementwise_asin: diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 2e262478a733..bfa8ef62f54e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -828,7 +828,7 @@ bool cir::isAnyFloatingPointType(mlir::Type t) { } //===----------------------------------------------------------------------===// -// Floating-point and Float-point Vecotr type helpers +// Floating-point and Float-point Vector type helpers //===----------------------------------------------------------------------===// bool cir::isFPOrFPVectorTy(mlir::Type t) { @@ -840,6 +840,18 @@ bool cir::isFPOrFPVectorTy(mlir::Type t) { return isAnyFloatingPointType(t); } +//===----------------------------------------------------------------------===// +// CIR Integer and Integer Vector type helpers +//===----------------------------------------------------------------------===// + +bool cir::isIntOrIntVectorTy(mlir::Type t) { + + if (isa(t)) { + return isa(mlir::dyn_cast(t).getEltType()); + } + return isa(t); +} + //===----------------------------------------------------------------------===// // ComplexType Definitions //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/builtins-elementwise.c b/clang/test/CIR/CodeGen/builtins-elementwise.c new file mode 100644 index 000000000000..af3b975970ac --- /dev/null +++ b/clang/test/CIR/CodeGen/builtins-elementwise.c @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ +// RUN: -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s +// XFAIL: * + +typedef int vint4 __attribute__((ext_vector_type(4))); + +void test_builtin_elementwise_abs(vint4 vi4, int i, float f, double d) { + // CIR-LABEL: test_builtin_elementwise_abs + // LLVM-LABEL: test_builtin_elementwise_abs + // CIR: {{%.*}} = cir.fabs {{%.*}} : !cir.float + // LLVM: {{%.*}} = call float @llvm.fabs.f32(float {{%.*}}) + f = __builtin_elementwise_abs(f); + + // CIR: {{%.*}} = cir.fabs {{%.*}} : !cir.double + // LLVM: {{%.*}} = call double @llvm.fabs.f64(double {{%.*}}) + d = __builtin_elementwise_abs(d); + + // CIR: {{%.*}} = cir.abs {{%.*}} : !cir.vector + // LLVM: {{%.*}} = call <4 x i32> @llvm.abs.v4i32(<4 x i32> {{%.*}}, i1 false) + vi4 = __builtin_elementwise_abs(vi4); + + // CIR: {{%.*}} = cir.abs {{%.*}} : !s32 + // LLVM: {{%.*}} = call i32 @llvm.abs.i32(i32 {{%.*}}, i1 false) + i = __builtin_elementwise_abs(i); +} From 1323fb1c37b9e392eeb079a9de76766be5cd5aeb Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Fri, 15 Nov 2024 13:31:38 +0800 Subject: [PATCH 2085/2301] [ClangIR][Lowering][NFC] Split LowerToLLVM.h from LowerToLLVM.cpp (#1102) This is a NFC patch that moves declaration from LowerToLLVM.cpp. The motivation of the patch is, we hope we can use the abilities from MLIR's standard dialects without lowering **ALL** clangir operation to MLIR's standard dialects. For example, currently we have 86 operations in LowerToLLVM.cpp but only 45 operations under though MLIR. It won't be easy to add proper lowering for all operation to **different** dialects. I think the solution may be to allow **mixed** IR. So that we can lowering CIR to MLIR's standard dialects partially and we can use some existing analysis and optimizations in MLIR and then we can lower all of them (the MLIR dialects and unlowered clangir) to LLVM IR. The hybrid IR is one of the goals of MLIR as far as I know. NOTE: I completely understand that the DirectlyLLVM pipeline is the tier-1 pipeline that we want to support. The idea above won't change this. I just want to offer some oppotunities for the downstream projects and finally some chances to improve the overall ecosystem. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 5880 ++++++++--------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 1024 +++ clang/utils/TableGen/CIRLoweringEmitter.cpp | 33 +- 3 files changed, 3712 insertions(+), 3225 deletions(-) create mode 100644 clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 9c5d8b575281..232666939916 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -9,19 +9,15 @@ // This file implements lowering of CIR operations to LLVMIR. // //===----------------------------------------------------------------------===// +#include "LowerToLLVM.h" #include "LoweringHelpers.h" #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" -#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h" -#include "mlir/Conversion/LLVMCommon/TypeConverter.h" #include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h" #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" -#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" -#include "mlir/Dialect/LLVMIR/LLVMDialect.h" -#include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/Dialect/LLVMIR/Transforms/Passes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" @@ -45,11 +41,6 @@ #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" -#include "mlir/Transforms/DialectConversion.h" -#include "clang/CIR/Dialect/IR/CIRAttrs.h" -#include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "clang/CIR/Dialect/IR/CIROpsEnums.h" -#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/MissingFeatures.h" @@ -70,8 +61,6 @@ #include #include -#include "LowerModule.h" - using namespace cir; using namespace llvm; @@ -367,13 +356,9 @@ unsigned getGlobalOpTargetAddrSpace(mlir::ConversionPatternRewriter &rewriter, //===----------------------------------------------------------------------===// /// Switches on the type of attribute and calls the appropriate conversion. -inline mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter); /// IntAttr visitor. -inline mlir::Value +static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::IntAttr intAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { @@ -383,7 +368,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::IntAttr intAttr, } /// BoolAttr visitor. -inline mlir::Value +static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::BoolAttr boolAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { @@ -393,7 +378,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::BoolAttr boolAttr, } /// ConstPtrAttr visitor. -inline mlir::Value +static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstPtrAttr ptrAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { @@ -411,7 +396,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstPtrAttr ptrAttr, } /// FPAttr visitor. -inline mlir::Value +static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::FPAttr fltAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { @@ -421,7 +406,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::FPAttr fltAttr, } /// ZeroAttr visitor. -inline mlir::Value +static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ZeroAttr zeroAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { @@ -431,7 +416,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ZeroAttr zeroAttr, } /// UndefAttr visitor. -inline mlir::Value +static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::UndefAttr undefAttr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter) { @@ -441,10 +426,10 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::UndefAttr undefAttr, } /// ConstStruct visitor. -mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - cir::ConstStructAttr constStruct, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +static mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstStructAttr constStruct, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(constStruct.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); @@ -467,10 +452,10 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } // VTableAttr visitor. -mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - cir::VTableAttr vtableArr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +static mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::VTableAttr vtableArr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(vtableArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); @@ -484,10 +469,10 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } // TypeInfoAttr visitor. -mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - cir::TypeInfoAttr typeinfoArr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +static mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::TypeInfoAttr typeinfoArr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(typeinfoArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); @@ -501,10 +486,10 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } // ConstArrayAttr visitor -mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - cir::ConstArrayAttr constArr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +static mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstArrayAttr constArr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(constArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result; @@ -547,10 +532,10 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } // ConstVectorAttr visitor. -mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - cir::ConstVectorAttr constVec, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +static mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstVectorAttr constVec, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { auto llvmTy = converter->convertType(constVec.getType()); auto loc = parentOp->getLoc(); SmallVector mlirValues; @@ -575,10 +560,10 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } // GlobalViewAttr visitor. -mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, - cir::GlobalViewAttr globalAttr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +static mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::GlobalViewAttr globalAttr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { auto module = parentOp->getParentOfType(); mlir::Type sourceType; unsigned sourceAddrSpace = 0; @@ -646,10 +631,9 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, } /// Switches on the type of attribute and calls the appropriate conversion. -inline mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { if (const auto intAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, intAttr, rewriter, converter); if (const auto fltAttr = mlir::dyn_cast(attr)) @@ -723,105 +707,75 @@ mlir::LLVM::CConv convertCallingConv(cir::CallingConv callinvConv) { llvm_unreachable("Unknown calling convention"); } -class CIRCopyOpLowering : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::CopyOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - const mlir::Value length = rewriter.create( - op.getLoc(), rewriter.getI32Type(), op.getLength()); - rewriter.replaceOpWithNewOp( - op, adaptor.getDst(), adaptor.getSrc(), length, op.getIsVolatile()); - return mlir::success(); - } -}; - -class CIRMemCpyOpLowering : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::MemCpyOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp( - op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLen(), - /*isVolatile=*/false); - return mlir::success(); - } -}; - -class CIRMemChrOpLowering : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMCopyOpLowering::matchAndRewrite( + cir::CopyOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + const mlir::Value length = rewriter.create( + op.getLoc(), rewriter.getI32Type(), op.getLength()); + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), adaptor.getSrc(), length, op.getIsVolatile()); + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::MemChrOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); - llvm::SmallVector arguments; - const mlir::TypeConverter *converter = getTypeConverter(); - mlir::Type srcTy = converter->convertType(op.getSrc().getType()); - mlir::Type patternTy = converter->convertType(op.getPattern().getType()); - mlir::Type lenTy = converter->convertType(op.getLen().getType()); - auto fnTy = - mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {srcTy, patternTy, lenTy}, - /*isVarArg=*/false); - llvm::StringRef fnName = "memchr"; - getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); - rewriter.replaceOpWithNewOp( - op, mlir::TypeRange{llvmPtrTy}, fnName, - mlir::ValueRange{adaptor.getSrc(), adaptor.getPattern(), - adaptor.getLen()}); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMMemCpyOpLowering::matchAndRewrite( + cir::MemCpyOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLen(), + /*isVolatile=*/false); + return mlir::success(); +} -class CIRMemCpyInlineOpLowering - : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::MemCpyInlineOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp( - op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLenAttr(), - /*isVolatile=*/false); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMMemChrOpLowering::matchAndRewrite( + cir::MemChrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + llvm::SmallVector arguments; + const mlir::TypeConverter *converter = getTypeConverter(); + mlir::Type srcTy = converter->convertType(op.getSrc().getType()); + mlir::Type patternTy = converter->convertType(op.getPattern().getType()); + mlir::Type lenTy = converter->convertType(op.getLen().getType()); + auto fnTy = + mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {srcTy, patternTy, lenTy}, + /*isVarArg=*/false); + llvm::StringRef fnName = "memchr"; + getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); + rewriter.replaceOpWithNewOp( + op, mlir::TypeRange{llvmPtrTy}, fnName, + mlir::ValueRange{adaptor.getSrc(), adaptor.getPattern(), + adaptor.getLen()}); + return mlir::success(); +} -class CIRMemMoveOpLowering : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMMemMoveOpLowering::matchAndRewrite( + cir::MemMoveOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLen(), + /*isVolatile=*/false); + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::MemMoveOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp( - op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLen(), - /*isVolatile=*/false); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMMemCpyInlineOpLowering::matchAndRewrite( + cir::MemCpyInlineOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), adaptor.getSrc(), adaptor.getLenAttr(), + /*isVolatile=*/false); + return mlir::success(); +} -class CIRMemsetOpLowering : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - mlir::LogicalResult - matchAndRewrite(cir::MemSetOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto converted = rewriter.create( - op.getLoc(), mlir::IntegerType::get(op.getContext(), 8), - adaptor.getVal()); - rewriter.replaceOpWithNewOp( - op, adaptor.getDst(), converted, adaptor.getLen(), - /*isVolatile=*/false); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMMemSetOpLowering::matchAndRewrite( + cir::MemSetOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto converted = rewriter.create( + op.getLoc(), mlir::IntegerType::get(op.getContext(), 8), + adaptor.getVal()); + rewriter.replaceOpWithNewOp(op, adaptor.getDst(), + converted, adaptor.getLen(), + /*isVolatile=*/false); + return mlir::success(); +} static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter, mlir::Value llvmSrc, mlir::Type llvmDstIntTy, @@ -841,138 +795,120 @@ static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter, return rewriter.create(loc, llvmDstIntTy, llvmSrc); } -class CIRPtrStrideOpLowering - : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::PtrStrideOp ptrStrideOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto *tc = getTypeConverter(); - const auto resultTy = tc->convertType(ptrStrideOp.getType()); - auto elementTy = tc->convertType(ptrStrideOp.getElementTy()); - auto *ctx = elementTy.getContext(); - - // void and function types doesn't really have a layout to use in GEPs, - // make it i8 instead. - if (mlir::isa(elementTy) || - mlir::isa(elementTy)) - elementTy = mlir::IntegerType::get(elementTy.getContext(), 8, - mlir::IntegerType::Signless); - - // Zero-extend, sign-extend or trunc the pointer value. - auto index = adaptor.getStride(); - auto width = mlir::cast(index.getType()).getWidth(); - mlir::DataLayout LLVMLayout(ptrStrideOp->getParentOfType()); - auto layoutWidth = - LLVMLayout.getTypeIndexBitwidth(adaptor.getBase().getType()); - auto indexOp = index.getDefiningOp(); - if (indexOp && layoutWidth && width != *layoutWidth) { - // If the index comes from a subtraction, make sure the extension happens - // before it. To achieve that, look at unary minus, which already got - // lowered to "sub 0, x". - auto sub = dyn_cast(indexOp); - auto unary = dyn_cast_if_present( - ptrStrideOp.getStride().getDefiningOp()); - bool rewriteSub = - unary && unary.getKind() == cir::UnaryOpKind::Minus && sub; - if (rewriteSub) - index = indexOp->getOperand(1); - - // Handle the cast - auto llvmDstType = mlir::IntegerType::get(ctx, *layoutWidth); - index = getLLVMIntCast(rewriter, index, llvmDstType, - ptrStrideOp.getStride().getType().isUnsigned(), - width, *layoutWidth); - - // Rewrite the sub in front of extensions/trunc - if (rewriteSub) { - index = rewriter.create( - index.getLoc(), index.getType(), - rewriter.create( - index.getLoc(), index.getType(), - mlir::IntegerAttr::get(index.getType(), 0)), - index); - rewriter.eraseOp(sub); - } - } +mlir::LogicalResult CIRToLLVMPtrStrideOpLowering::matchAndRewrite( + cir::PtrStrideOp ptrStrideOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto *tc = getTypeConverter(); + const auto resultTy = tc->convertType(ptrStrideOp.getType()); + auto elementTy = tc->convertType(ptrStrideOp.getElementTy()); + auto *ctx = elementTy.getContext(); + + // void and function types doesn't really have a layout to use in GEPs, + // make it i8 instead. + if (mlir::isa(elementTy) || + mlir::isa(elementTy)) + elementTy = mlir::IntegerType::get(elementTy.getContext(), 8, + mlir::IntegerType::Signless); - rewriter.replaceOpWithNewOp( - ptrStrideOp, resultTy, elementTy, adaptor.getBase(), index); - return mlir::success(); + // Zero-extend, sign-extend or trunc the pointer value. + auto index = adaptor.getStride(); + auto width = mlir::cast(index.getType()).getWidth(); + mlir::DataLayout LLVMLayout(ptrStrideOp->getParentOfType()); + auto layoutWidth = + LLVMLayout.getTypeIndexBitwidth(adaptor.getBase().getType()); + auto indexOp = index.getDefiningOp(); + if (indexOp && layoutWidth && width != *layoutWidth) { + // If the index comes from a subtraction, make sure the extension happens + // before it. To achieve that, look at unary minus, which already got + // lowered to "sub 0, x". + auto sub = dyn_cast(indexOp); + auto unary = dyn_cast_if_present( + ptrStrideOp.getStride().getDefiningOp()); + bool rewriteSub = + unary && unary.getKind() == cir::UnaryOpKind::Minus && sub; + if (rewriteSub) + index = indexOp->getOperand(1); + + // Handle the cast + auto llvmDstType = mlir::IntegerType::get(ctx, *layoutWidth); + index = getLLVMIntCast(rewriter, index, llvmDstType, + ptrStrideOp.getStride().getType().isUnsigned(), + width, *layoutWidth); + + // Rewrite the sub in front of extensions/trunc + if (rewriteSub) { + index = rewriter.create( + index.getLoc(), index.getType(), + rewriter.create( + index.getLoc(), index.getType(), + mlir::IntegerAttr::get(index.getType(), 0)), + index); + rewriter.eraseOp(sub); + } } -}; -class CIRBaseClassAddrOpLowering - : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::BaseClassAddrOp baseClassOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - const auto resultType = - getTypeConverter()->convertType(baseClassOp.getType()); - mlir::Value derivedAddr = adaptor.getDerivedAddr(); - llvm::SmallVector offset = { - adaptor.getOffset().getZExtValue()}; - mlir::Type byteType = mlir::IntegerType::get(resultType.getContext(), 8, - mlir::IntegerType::Signless); - if (adaptor.getOffset().getZExtValue() == 0) { - rewriter.replaceOpWithNewOp( - baseClassOp, resultType, adaptor.getDerivedAddr()); - return mlir::success(); - } + rewriter.replaceOpWithNewOp( + ptrStrideOp, resultTy, elementTy, adaptor.getBase(), index); + return mlir::success(); +} - if (baseClassOp.getAssumeNotNull()) { - rewriter.replaceOpWithNewOp( - baseClassOp, resultType, byteType, derivedAddr, offset); - } else { - auto loc = baseClassOp.getLoc(); - mlir::Value isNull = rewriter.create( - loc, mlir::LLVM::ICmpPredicate::eq, derivedAddr, - rewriter.create(loc, derivedAddr.getType())); - mlir::Value adjusted = rewriter.create( - loc, resultType, byteType, derivedAddr, offset); - rewriter.replaceOpWithNewOp(baseClassOp, isNull, - derivedAddr, adjusted); - } +mlir::LogicalResult CIRToLLVMBaseClassAddrOpLowering::matchAndRewrite( + cir::BaseClassAddrOp baseClassOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + const auto resultType = + getTypeConverter()->convertType(baseClassOp.getType()); + mlir::Value derivedAddr = adaptor.getDerivedAddr(); + llvm::SmallVector offset = { + adaptor.getOffset().getZExtValue()}; + mlir::Type byteType = mlir::IntegerType::get(resultType.getContext(), 8, + mlir::IntegerType::Signless); + if (adaptor.getOffset().getZExtValue() == 0) { + rewriter.replaceOpWithNewOp( + baseClassOp, resultType, adaptor.getDerivedAddr()); return mlir::success(); } -}; -class CIRDerivedClassAddrOpLowering - : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::DerivedClassAddrOp derivedClassOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - const auto resultType = - getTypeConverter()->convertType(derivedClassOp.getType()); - mlir::Value baseAddr = adaptor.getBaseAddr(); - int64_t offsetVal = adaptor.getOffset().getZExtValue() * -1; - llvm::SmallVector offset = {offsetVal}; - mlir::Type byteType = mlir::IntegerType::get(resultType.getContext(), 8, - mlir::IntegerType::Signless); - if (derivedClassOp.getAssumeNotNull()) { - rewriter.replaceOpWithNewOp( - derivedClassOp, resultType, byteType, baseAddr, offset); - } else { - auto loc = derivedClassOp.getLoc(); - mlir::Value isNull = rewriter.create( - loc, mlir::LLVM::ICmpPredicate::eq, baseAddr, - rewriter.create(loc, baseAddr.getType())); - mlir::Value adjusted = rewriter.create( - loc, resultType, byteType, baseAddr, offset); - rewriter.replaceOpWithNewOp(derivedClassOp, isNull, - baseAddr, adjusted); - } - return mlir::success(); + if (baseClassOp.getAssumeNotNull()) { + rewriter.replaceOpWithNewOp( + baseClassOp, resultType, byteType, derivedAddr, offset); + } else { + auto loc = baseClassOp.getLoc(); + mlir::Value isNull = rewriter.create( + loc, mlir::LLVM::ICmpPredicate::eq, derivedAddr, + rewriter.create(loc, derivedAddr.getType())); + mlir::Value adjusted = rewriter.create( + loc, resultType, byteType, derivedAddr, offset); + rewriter.replaceOpWithNewOp(baseClassOp, isNull, + derivedAddr, adjusted); } -}; + return mlir::success(); +} + +mlir::LogicalResult CIRToLLVMDerivedClassAddrOpLowering::matchAndRewrite( + cir::DerivedClassAddrOp derivedClassOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + const auto resultType = + getTypeConverter()->convertType(derivedClassOp.getType()); + mlir::Value baseAddr = adaptor.getBaseAddr(); + int64_t offsetVal = adaptor.getOffset().getZExtValue() * -1; + llvm::SmallVector offset = {offsetVal}; + mlir::Type byteType = mlir::IntegerType::get(resultType.getContext(), 8, + mlir::IntegerType::Signless); + if (derivedClassOp.getAssumeNotNull()) { + rewriter.replaceOpWithNewOp(derivedClassOp, resultType, + byteType, baseAddr, offset); + } else { + auto loc = derivedClassOp.getLoc(); + mlir::Value isNull = rewriter.create( + loc, mlir::LLVM::ICmpPredicate::eq, baseAddr, + rewriter.create(loc, baseAddr.getType())); + mlir::Value adjusted = rewriter.create( + loc, resultType, byteType, baseAddr, offset); + rewriter.replaceOpWithNewOp(derivedClassOp, isNull, + baseAddr, adjusted); + } + return mlir::success(); +} static mlir::Value getValueForVTableSymbol(mlir::Operation *op, @@ -991,290 +927,265 @@ getValueForVTableSymbol(mlir::Operation *op, nameAttr.getValue()); } -class CIRVTTAddrPointOpLowering - : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::VTTAddrPointOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - const mlir::Type resultType = getTypeConverter()->convertType(op.getType()); - llvm::SmallVector offsets; - mlir::Type eltType; - mlir::Value llvmAddr = adaptor.getSymAddr(); - - if (op.getSymAddr()) { - if (op.getOffset() == 0) { - rewriter.replaceAllUsesWith(op, llvmAddr); - rewriter.eraseOp(op); - return mlir::success(); - } - - offsets.push_back(adaptor.getOffset()); - eltType = mlir::IntegerType::get(resultType.getContext(), 8, - mlir::IntegerType::Signless); - } else { - llvmAddr = getValueForVTableSymbol(op, rewriter, getTypeConverter(), - op.getNameAttr(), eltType); - assert(eltType && "Shouldn't ever be missing an eltType here"); - offsets.push_back(0); - offsets.push_back(adaptor.getOffset()); +mlir::LogicalResult CIRToLLVMVTTAddrPointOpLowering::matchAndRewrite( + cir::VTTAddrPointOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + const mlir::Type resultType = getTypeConverter()->convertType(op.getType()); + llvm::SmallVector offsets; + mlir::Type eltType; + mlir::Value llvmAddr = adaptor.getSymAddr(); + + if (op.getSymAddr()) { + if (op.getOffset() == 0) { + rewriter.replaceAllUsesWith(op, llvmAddr); + rewriter.eraseOp(op); + return mlir::success(); } - rewriter.replaceOpWithNewOp(op, resultType, eltType, - llvmAddr, offsets, true); - return mlir::success(); - } -}; -class CIRBrCondOpLowering : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; + offsets.push_back(adaptor.getOffset()); + eltType = mlir::IntegerType::get(resultType.getContext(), 8, + mlir::IntegerType::Signless); + } else { + llvmAddr = getValueForVTableSymbol(op, rewriter, getTypeConverter(), + op.getNameAttr(), eltType); + assert(eltType && "Shouldn't ever be missing an eltType here"); + offsets.push_back(0); + offsets.push_back(adaptor.getOffset()); + } + rewriter.replaceOpWithNewOp(op, resultType, eltType, + llvmAddr, offsets, true); + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::BrCondOp brOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - mlir::Value i1Condition; +mlir::LogicalResult CIRToLLVMBrCondOpLowering::matchAndRewrite( + cir::BrCondOp brOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Value i1Condition; - auto hasOneUse = false; + auto hasOneUse = false; - if (auto defOp = brOp.getCond().getDefiningOp()) - hasOneUse = defOp->getResult(0).hasOneUse(); + if (auto defOp = brOp.getCond().getDefiningOp()) + hasOneUse = defOp->getResult(0).hasOneUse(); - if (auto defOp = adaptor.getCond().getDefiningOp()) { - if (auto zext = dyn_cast(defOp)) { - if (zext->use_empty() && - zext->getOperand(0).getType() == rewriter.getI1Type()) { - i1Condition = zext->getOperand(0); - if (hasOneUse) - rewriter.eraseOp(zext); - } + if (auto defOp = adaptor.getCond().getDefiningOp()) { + if (auto zext = dyn_cast(defOp)) { + if (zext->use_empty() && + zext->getOperand(0).getType() == rewriter.getI1Type()) { + i1Condition = zext->getOperand(0); + if (hasOneUse) + rewriter.eraseOp(zext); } } + } - if (!i1Condition) - i1Condition = rewriter.create( - brOp.getLoc(), rewriter.getI1Type(), adaptor.getCond()); + if (!i1Condition) + i1Condition = rewriter.create( + brOp.getLoc(), rewriter.getI1Type(), adaptor.getCond()); - rewriter.replaceOpWithNewOp( - brOp, i1Condition, brOp.getDestTrue(), adaptor.getDestOperandsTrue(), - brOp.getDestFalse(), adaptor.getDestOperandsFalse()); + rewriter.replaceOpWithNewOp( + brOp, i1Condition, brOp.getDestTrue(), adaptor.getDestOperandsTrue(), + brOp.getDestFalse(), adaptor.getDestOperandsFalse()); - return mlir::success(); - } -}; + return mlir::success(); +} -class CIRCastOpLowering : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; +mlir::Type CIRToLLVMCastOpLowering::convertTy(mlir::Type ty) const { + return getTypeConverter()->convertType(ty); +} - inline mlir::Type convertTy(mlir::Type ty) const { - return getTypeConverter()->convertType(ty); - } +mlir::LogicalResult CIRToLLVMCastOpLowering::matchAndRewrite( + cir::CastOp castOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // For arithmetic conversions, LLVM IR uses the same instruction to convert + // both individual scalars and entire vectors. This lowering pass handles + // both situations. + + auto src = adaptor.getSrc(); + + switch (castOp.getKind()) { + case cir::CastKind::array_to_ptrdecay: { + const auto ptrTy = mlir::cast(castOp.getType()); + auto sourceValue = adaptor.getOperands().front(); + auto targetType = convertTy(ptrTy); + auto elementTy = convertTy(ptrTy.getPointee()); + auto offset = llvm::SmallVector{0}; + rewriter.replaceOpWithNewOp( + castOp, targetType, elementTy, sourceValue, offset); + break; + } + case cir::CastKind::int_to_bool: { + auto zero = rewriter.create( + src.getLoc(), castOp.getSrc().getType(), + cir::IntAttr::get(castOp.getSrc().getType(), 0)); + rewriter.replaceOpWithNewOp( + castOp, cir::BoolType::get(getContext()), cir::CmpOpKind::ne, + castOp.getSrc(), zero); + break; + } + case cir::CastKind::integral: { + auto srcType = castOp.getSrc().getType(); + auto dstType = castOp.getResult().getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstType = getTypeConverter()->convertType(dstType); + cir::IntType srcIntType = + mlir::cast(elementTypeIfVector(srcType)); + cir::IntType dstIntType = + mlir::cast(elementTypeIfVector(dstType)); + rewriter.replaceOp(castOp, getLLVMIntCast(rewriter, llvmSrcVal, llvmDstType, + srcIntType.isUnsigned(), + srcIntType.getWidth(), + dstIntType.getWidth())); + break; + } + case cir::CastKind::floating: { + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = + getTypeConverter()->convertType(castOp.getResult().getType()); + + auto srcTy = elementTypeIfVector(castOp.getSrc().getType()); + auto dstTy = elementTypeIfVector(castOp.getResult().getType()); + + if (!mlir::isa(dstTy) || + !mlir::isa(srcTy)) + return castOp.emitError() << "NYI cast from " << srcTy << " to " << dstTy; + + auto getFloatWidth = [](mlir::Type ty) -> unsigned { + return mlir::cast(ty).getWidth(); + }; - mlir::LogicalResult - matchAndRewrite(cir::CastOp castOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // For arithmetic conversions, LLVM IR uses the same instruction to convert - // both individual scalars and entire vectors. This lowering pass handles - // both situations. - - auto src = adaptor.getSrc(); - - switch (castOp.getKind()) { - case cir::CastKind::array_to_ptrdecay: { - const auto ptrTy = mlir::cast(castOp.getType()); - auto sourceValue = adaptor.getOperands().front(); - auto targetType = convertTy(ptrTy); - auto elementTy = convertTy(ptrTy.getPointee()); - auto offset = llvm::SmallVector{0}; - rewriter.replaceOpWithNewOp( - castOp, targetType, elementTy, sourceValue, offset); - break; - } - case cir::CastKind::int_to_bool: { - auto zero = rewriter.create( - src.getLoc(), castOp.getSrc().getType(), - cir::IntAttr::get(castOp.getSrc().getType(), 0)); - rewriter.replaceOpWithNewOp( - castOp, cir::BoolType::get(getContext()), cir::CmpOpKind::ne, - castOp.getSrc(), zero); - break; - } - case cir::CastKind::integral: { - auto srcType = castOp.getSrc().getType(); - auto dstType = castOp.getResult().getType(); - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstType = getTypeConverter()->convertType(dstType); - cir::IntType srcIntType = - mlir::cast(elementTypeIfVector(srcType)); - cir::IntType dstIntType = - mlir::cast(elementTypeIfVector(dstType)); - rewriter.replaceOp( - castOp, getLLVMIntCast(rewriter, llvmSrcVal, llvmDstType, - srcIntType.isUnsigned(), srcIntType.getWidth(), - dstIntType.getWidth())); - break; - } - case cir::CastKind::floating: { - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = - getTypeConverter()->convertType(castOp.getResult().getType()); - - auto srcTy = elementTypeIfVector(castOp.getSrc().getType()); - auto dstTy = elementTypeIfVector(castOp.getResult().getType()); - - if (!mlir::isa(dstTy) || - !mlir::isa(srcTy)) - return castOp.emitError() - << "NYI cast from " << srcTy << " to " << dstTy; - - auto getFloatWidth = [](mlir::Type ty) -> unsigned { - return mlir::cast(ty).getWidth(); - }; - - if (getFloatWidth(srcTy) > getFloatWidth(dstTy)) - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, - llvmSrcVal); - else - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + if (getFloatWidth(srcTy) > getFloatWidth(dstTy)) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); - return mlir::success(); - } - case cir::CastKind::int_to_ptr: { - auto dstTy = mlir::cast(castOp.getType()); - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = getTypeConverter()->convertType(dstTy); - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, - llvmSrcVal); - return mlir::success(); - } - case cir::CastKind::ptr_to_int: { - auto dstTy = mlir::cast(castOp.getType()); - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = getTypeConverter()->convertType(dstTy); - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, - llvmSrcVal); - return mlir::success(); - } - case cir::CastKind::float_to_bool: { - auto dstTy = mlir::cast(castOp.getType()); - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = getTypeConverter()->convertType(dstTy); - auto kind = mlir::LLVM::FCmpPredicate::une; - - // Check if float is not equal to zero. - auto zeroFloat = rewriter.create( - castOp.getLoc(), llvmSrcVal.getType(), - mlir::FloatAttr::get(llvmSrcVal.getType(), 0.0)); - - // Extend comparison result to either bool (C++) or int (C). - mlir::Value cmpResult = rewriter.create( - castOp.getLoc(), kind, llvmSrcVal, zeroFloat); - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, - cmpResult); - return mlir::success(); - } - case cir::CastKind::bool_to_int: { - auto dstTy = mlir::cast(castOp.getType()); - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmSrcTy = mlir::cast(llvmSrcVal.getType()); - auto llvmDstTy = - mlir::cast(getTypeConverter()->convertType(dstTy)); - if (llvmSrcTy.getWidth() == llvmDstTy.getWidth()) - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, - llvmSrcVal); - else - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case cir::CastKind::int_to_ptr: { + auto dstTy = mlir::cast(castOp.getType()); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); - return mlir::success(); - } - case cir::CastKind::bool_to_float: { - auto dstTy = castOp.getType(); - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = getTypeConverter()->convertType(dstTy); - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + return mlir::success(); + } + case cir::CastKind::ptr_to_int: { + auto dstTy = mlir::cast(castOp.getType()); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); - return mlir::success(); - } - case cir::CastKind::int_to_float: { - auto dstTy = castOp.getType(); - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = getTypeConverter()->convertType(dstTy); - if (mlir::cast( - elementTypeIfVector(castOp.getSrc().getType())) - .isSigned()) - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, - llvmSrcVal); - else - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, - llvmSrcVal); - return mlir::success(); - } - case cir::CastKind::float_to_int: { - auto dstTy = castOp.getType(); - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = getTypeConverter()->convertType(dstTy); - if (mlir::cast( - elementTypeIfVector(castOp.getResult().getType())) - .isSigned()) - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, - llvmSrcVal); - else - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, - llvmSrcVal); - return mlir::success(); - } - case cir::CastKind::bitcast: { - auto dstTy = castOp.getType(); - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = getTypeConverter()->convertType(dstTy); + return mlir::success(); + } + case cir::CastKind::float_to_bool: { + auto dstTy = mlir::cast(castOp.getType()); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + auto kind = mlir::LLVM::FCmpPredicate::une; + + // Check if float is not equal to zero. + auto zeroFloat = rewriter.create( + castOp.getLoc(), llvmSrcVal.getType(), + mlir::FloatAttr::get(llvmSrcVal.getType(), 0.0)); + + // Extend comparison result to either bool (C++) or int (C). + mlir::Value cmpResult = rewriter.create( + castOp.getLoc(), kind, llvmSrcVal, zeroFloat); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + cmpResult); + return mlir::success(); + } + case cir::CastKind::bool_to_int: { + auto dstTy = mlir::cast(castOp.getType()); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmSrcTy = mlir::cast(llvmSrcVal.getType()); + auto llvmDstTy = + mlir::cast(getTypeConverter()->convertType(dstTy)); + if (llvmSrcTy.getWidth() == llvmDstTy.getWidth()) rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); - return mlir::success(); - } - case cir::CastKind::ptr_to_bool: { - auto zero = - mlir::IntegerAttr::get(mlir::IntegerType::get(getContext(), 64), 0); - auto null = rewriter.create( - src.getLoc(), castOp.getSrc().getType(), - cir::ConstPtrAttr::get(getContext(), castOp.getSrc().getType(), - zero)); - rewriter.replaceOpWithNewOp( - castOp, cir::BoolType::get(getContext()), cir::CmpOpKind::ne, - castOp.getSrc(), null); - break; - } - case cir::CastKind::address_space: { - auto dstTy = castOp.getType(); - auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = getTypeConverter()->convertType(dstTy); - rewriter.replaceOpWithNewOp( - castOp, llvmDstTy, llvmSrcVal); - break; - } - default: { - return castOp.emitError("Unhandled cast kind: ") - << castOp.getKindAttrName(); - } - } - + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); return mlir::success(); } -}; + case cir::CastKind::bool_to_float: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case cir::CastKind::int_to_float: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + if (mlir::cast(elementTypeIfVector(castOp.getSrc().getType())) + .isSigned()) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case cir::CastKind::float_to_int: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + if (mlir::cast( + elementTypeIfVector(castOp.getResult().getType())) + .isSigned()) + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + else + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case cir::CastKind::bitcast: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + return mlir::success(); + } + case cir::CastKind::ptr_to_bool: { + auto zero = + mlir::IntegerAttr::get(mlir::IntegerType::get(getContext(), 64), 0); + auto null = rewriter.create( + src.getLoc(), castOp.getSrc().getType(), + cir::ConstPtrAttr::get(getContext(), castOp.getSrc().getType(), zero)); + rewriter.replaceOpWithNewOp( + castOp, cir::BoolType::get(getContext()), cir::CmpOpKind::ne, + castOp.getSrc(), null); + break; + } + case cir::CastKind::address_space: { + auto dstTy = castOp.getType(); + auto llvmSrcVal = adaptor.getOperands().front(); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); + rewriter.replaceOpWithNewOp(castOp, llvmDstTy, + llvmSrcVal); + break; + } + default: { + return castOp.emitError("Unhandled cast kind: ") + << castOp.getKindAttrName(); + } + } -class CIRReturnLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::ReturnOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, - adaptor.getOperands()); - return mlir::LogicalResult::success(); - } -}; +mlir::LogicalResult CIRToLLVMReturnOpLowering::matchAndRewrite( + cir::ReturnOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + rewriter.replaceOpWithNewOp(op, adaptor.getOperands()); + return mlir::LogicalResult::success(); +} struct ConvertCIRToLLVMPass : public mlir::PassWrapper { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::CallOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - return rewriteToCallOrInvoke(op.getOperation(), adaptor.getOperands(), - rewriter, getTypeConverter(), - op.getCalleeAttr()); - } -}; - -class CIRTryCallLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMCallOpLowering::matchAndRewrite( + cir::CallOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + return rewriteToCallOrInvoke(op.getOperation(), adaptor.getOperands(), + rewriter, getTypeConverter(), + op.getCalleeAttr()); +} - mlir::LogicalResult - matchAndRewrite(cir::TryCallOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - if (op.getCallingConv() != cir::CallingConv::C) { - return op.emitError( - "non-C calling convention is not implemented for try_call"); - } - return rewriteToCallOrInvoke( - op.getOperation(), adaptor.getOperands(), rewriter, getTypeConverter(), - op.getCalleeAttr(), op.getCont(), op.getLandingPad()); +mlir::LogicalResult CIRToLLVMTryCallOpLowering::matchAndRewrite( + cir::TryCallOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + if (op.getCallingConv() != cir::CallingConv::C) { + return op.emitError( + "non-C calling convention is not implemented for try_call"); } -}; + return rewriteToCallOrInvoke(op.getOperation(), adaptor.getOperands(), + rewriter, getTypeConverter(), op.getCalleeAttr(), + op.getCont(), op.getLandingPad()); +} static mlir::LLVM::LLVMStructType getLLVMLandingPadStructTy(mlir::ConversionPatternRewriter &rewriter) { @@ -1385,184 +1286,154 @@ getLLVMLandingPadStructTy(mlir::ConversionPatternRewriter &rewriter) { return mlir::LLVM::LLVMStructType::getLiteral(ctx, structFields); } -class CIREhInflightOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::EhInflightOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - mlir::Location loc = op.getLoc(); - auto llvmLandingPadStructTy = getLLVMLandingPadStructTy(rewriter); - mlir::ArrayAttr symListAttr = op.getSymTypeListAttr(); - mlir::SmallVector symAddrs; - - auto llvmFn = op->getParentOfType(); - assert(llvmFn && "expected LLVM function parent"); - mlir::Block *entryBlock = &llvmFn.getRegion().front(); - assert(entryBlock->isEntryBlock()); - - // %x = landingpad { ptr, i32 } - // Note that since llvm.landingpad has to be the first operation on the - // block, any needed value for its operands has to be added somewhere else. - if (symListAttr) { - // catch ptr @_ZTIi - // catch ptr @_ZTIPKc - for (mlir::Attribute attr : op.getSymTypeListAttr()) { - auto symAttr = cast(attr); - // Generate `llvm.mlir.addressof` for each symbol, and place those - // operations in the LLVM function entry basic block. - mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPointToStart(entryBlock); - mlir::Value addrOp = rewriter.create( - loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), - symAttr.getValue()); - symAddrs.push_back(addrOp); - } - } else { - if (!op.getCleanup()) { - // catch ptr null - mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPointToStart(entryBlock); - mlir::Value nullOp = rewriter.create( - loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext())); - symAddrs.push_back(nullOp); - } +mlir::LogicalResult CIRToLLVMEhInflightOpLowering::matchAndRewrite( + cir::EhInflightOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Location loc = op.getLoc(); + auto llvmLandingPadStructTy = getLLVMLandingPadStructTy(rewriter); + mlir::ArrayAttr symListAttr = op.getSymTypeListAttr(); + mlir::SmallVector symAddrs; + + auto llvmFn = op->getParentOfType(); + assert(llvmFn && "expected LLVM function parent"); + mlir::Block *entryBlock = &llvmFn.getRegion().front(); + assert(entryBlock->isEntryBlock()); + + // %x = landingpad { ptr, i32 } + // Note that since llvm.landingpad has to be the first operation on the + // block, any needed value for its operands has to be added somewhere else. + if (symListAttr) { + // catch ptr @_ZTIi + // catch ptr @_ZTIPKc + for (mlir::Attribute attr : op.getSymTypeListAttr()) { + auto symAttr = cast(attr); + // Generate `llvm.mlir.addressof` for each symbol, and place those + // operations in the LLVM function entry basic block. + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPointToStart(entryBlock); + mlir::Value addrOp = rewriter.create( + loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), + symAttr.getValue()); + symAddrs.push_back(addrOp); } - - // %slot = extractvalue { ptr, i32 } %x, 0 - // %selector = extractvalue { ptr, i32 } %x, 1 - auto padOp = rewriter.create( - loc, llvmLandingPadStructTy, symAddrs); - SmallVector slotIdx = {0}; - SmallVector selectorIdx = {1}; - - if (op.getCleanup()) - padOp.setCleanup(true); - - mlir::Value slot = - rewriter.create(loc, padOp, slotIdx); - mlir::Value selector = - rewriter.create(loc, padOp, selectorIdx); - - rewriter.replaceOp(op, mlir::ValueRange{slot, selector}); - - // Landing pads are required to be in LLVM functions with personality - // attribute. FIXME: for now hardcode personality creation in order to start - // adding exception tests, once we annotate CIR with such information, - // change it to be in FuncOp lowering instead. - { + } else { + if (!op.getCleanup()) { + // catch ptr null mlir::OpBuilder::InsertionGuard guard(rewriter); - // Insert personality decl before the current function. - rewriter.setInsertionPoint(llvmFn); - auto personalityFnTy = - mlir::LLVM::LLVMFunctionType::get(rewriter.getI32Type(), {}, - /*isVarArg=*/true); - // Get or create `__gxx_personality_v0` - StringRef fnName = "__gxx_personality_v0"; - getOrCreateLLVMFuncOp(rewriter, op, fnName, personalityFnTy); - llvmFn.setPersonality(fnName); + rewriter.setInsertionPointToStart(entryBlock); + mlir::Value nullOp = rewriter.create( + loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext())); + symAddrs.push_back(nullOp); } - return mlir::success(); } -}; -class CIRAllocaLowering : public mlir::OpConversionPattern { - mlir::DataLayout const &dataLayout; - // Track globals created for annotation related strings - llvm::StringMap &stringGlobalsMap; - // Track globals created for annotation arg related strings. - // They are different from annotation strings, as strings used in args - // are not in llvmMetadataSectionName, and also has aligment 1. - llvm::StringMap &argStringGlobalsMap; - // Track globals created for annotation args. - llvm::MapVector &argsVarMap; + // %slot = extractvalue { ptr, i32 } %x, 0 + // %selector = extractvalue { ptr, i32 } %x, 1 + auto padOp = rewriter.create( + loc, llvmLandingPadStructTy, symAddrs); + SmallVector slotIdx = {0}; + SmallVector selectorIdx = {1}; -public: - CIRAllocaLowering( - mlir::TypeConverter const &typeConverter, - mlir::DataLayout const &dataLayout, - llvm::StringMap &stringGlobalsMap, - llvm::StringMap &argStringGlobalsMap, - llvm::MapVector &argsVarMap, - mlir::MLIRContext *context) - : OpConversionPattern(typeConverter, context), - dataLayout(dataLayout), stringGlobalsMap(stringGlobalsMap), - argStringGlobalsMap(argStringGlobalsMap), argsVarMap(argsVarMap) {} - - void buildAllocaAnnotations(mlir::LLVM::AllocaOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter, - mlir::ArrayAttr annotationValuesArray) const { - mlir::ModuleOp module = op->getParentOfType(); - mlir::OpBuilder globalVarBuilder(module.getContext()); + if (op.getCleanup()) + padOp.setCleanup(true); - mlir::OpBuilder::InsertPoint afterAlloca = rewriter.saveInsertionPoint(); - globalVarBuilder.setInsertionPointToEnd(&module.getBodyRegion().front()); + mlir::Value slot = + rewriter.create(loc, padOp, slotIdx); + mlir::Value selector = + rewriter.create(loc, padOp, selectorIdx); - mlir::Location loc = op.getLoc(); - mlir::OpBuilder varInitBuilder(module.getContext()); - varInitBuilder.restoreInsertionPoint(afterAlloca); + rewriter.replaceOp(op, mlir::ValueRange{slot, selector}); - auto intrinRetTy = mlir::LLVM::LLVMVoidType::get(getContext()); - constexpr const char *intrinNameAttr = "llvm.var.annotation.p0.p0"; - for (mlir::Attribute entry : annotationValuesArray) { - SmallVector intrinsicArgs; - intrinsicArgs.push_back(op.getRes()); - auto annot = cast(entry); - lowerAnnotationValue(loc, loc, annot, module, varInitBuilder, - globalVarBuilder, stringGlobalsMap, - argStringGlobalsMap, argsVarMap, intrinsicArgs); - rewriter.create( - loc, intrinRetTy, mlir::StringAttr::get(getContext(), intrinNameAttr), - intrinsicArgs); - } + // Landing pads are required to be in LLVM functions with personality + // attribute. FIXME: for now hardcode personality creation in order to start + // adding exception tests, once we annotate CIR with such information, + // change it to be in FuncOp lowering instead. + { + mlir::OpBuilder::InsertionGuard guard(rewriter); + // Insert personality decl before the current function. + rewriter.setInsertionPoint(llvmFn); + auto personalityFnTy = + mlir::LLVM::LLVMFunctionType::get(rewriter.getI32Type(), {}, + /*isVarArg=*/true); + // Get or create `__gxx_personality_v0` + StringRef fnName = "__gxx_personality_v0"; + getOrCreateLLVMFuncOp(rewriter, op, fnName, personalityFnTy); + llvmFn.setPersonality(fnName); } + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::AllocaOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - mlir::Value size = - op.isDynamic() - ? adaptor.getDynAllocSize() - : rewriter.create( - op.getLoc(), - typeConverter->convertType(rewriter.getIndexType()), - rewriter.getIntegerAttr(rewriter.getIndexType(), 1)); - auto elementTy = getTypeConverter()->convertType(op.getAllocaType()); - auto resultTy = getTypeConverter()->convertType(op.getResult().getType()); - // Verification between the CIR alloca AS and the one from data layout. - { - auto resPtrTy = mlir::cast(resultTy); - auto dlAllocaASAttr = mlir::cast_if_present( - dataLayout.getAllocaMemorySpace()); - // Absence means 0 - // TODO: The query for the alloca AS should be done through CIRDataLayout - // instead to reuse the logic of interpret null attr as 0. - auto dlAllocaAS = dlAllocaASAttr ? dlAllocaASAttr.getInt() : 0; - if (dlAllocaAS != resPtrTy.getAddressSpace()) { - return op.emitError() << "alloca address space doesn't match the one " - "from the target data layout: " - << dlAllocaAS; - } +void CIRToLLVMAllocaOpLowering::buildAllocaAnnotations( + mlir::LLVM::AllocaOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::ArrayAttr annotationValuesArray) const { + mlir::ModuleOp module = op->getParentOfType(); + mlir::OpBuilder globalVarBuilder(module.getContext()); + + mlir::OpBuilder::InsertPoint afterAlloca = rewriter.saveInsertionPoint(); + globalVarBuilder.setInsertionPointToEnd(&module.getBodyRegion().front()); + + mlir::Location loc = op.getLoc(); + mlir::OpBuilder varInitBuilder(module.getContext()); + varInitBuilder.restoreInsertionPoint(afterAlloca); + + auto intrinRetTy = mlir::LLVM::LLVMVoidType::get(getContext()); + constexpr const char *intrinNameAttr = "llvm.var.annotation.p0.p0"; + for (mlir::Attribute entry : annotationValuesArray) { + SmallVector intrinsicArgs; + intrinsicArgs.push_back(op.getRes()); + auto annot = cast(entry); + lowerAnnotationValue(loc, loc, annot, module, varInitBuilder, + globalVarBuilder, stringGlobalsMap, + argStringGlobalsMap, argsVarMap, intrinsicArgs); + rewriter.create( + loc, intrinRetTy, mlir::StringAttr::get(getContext(), intrinNameAttr), + intrinsicArgs); + } +} + +mlir::LogicalResult CIRToLLVMAllocaOpLowering::matchAndRewrite( + cir::AllocaOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Value size = + op.isDynamic() ? adaptor.getDynAllocSize() + : rewriter.create( + op.getLoc(), + typeConverter->convertType(rewriter.getIndexType()), + rewriter.getIntegerAttr(rewriter.getIndexType(), 1)); + auto elementTy = getTypeConverter()->convertType(op.getAllocaType()); + auto resultTy = getTypeConverter()->convertType(op.getResult().getType()); + // Verification between the CIR alloca AS and the one from data layout. + { + auto resPtrTy = mlir::cast(resultTy); + auto dlAllocaASAttr = mlir::cast_if_present( + dataLayout.getAllocaMemorySpace()); + // Absence means 0 + // TODO: The query for the alloca AS should be done through CIRDataLayout + // instead to reuse the logic of interpret null attr as 0. + auto dlAllocaAS = dlAllocaASAttr ? dlAllocaASAttr.getInt() : 0; + if (dlAllocaAS != resPtrTy.getAddressSpace()) { + return op.emitError() << "alloca address space doesn't match the one " + "from the target data layout: " + << dlAllocaAS; } + } - // If there are annotations available, copy them out before we destroy the - // original cir.alloca. - mlir::ArrayAttr annotations; - if (op.getAnnotations()) - annotations = op.getAnnotationsAttr(); + // If there are annotations available, copy them out before we destroy the + // original cir.alloca. + mlir::ArrayAttr annotations; + if (op.getAnnotations()) + annotations = op.getAnnotationsAttr(); - auto llvmAlloca = rewriter.replaceOpWithNewOp( - op, resultTy, elementTy, size, op.getAlignmentAttr().getInt()); + auto llvmAlloca = rewriter.replaceOpWithNewOp( + op, resultTy, elementTy, size, op.getAlignmentAttr().getInt()); - if (annotations && !annotations.empty()) - buildAllocaAnnotations(llvmAlloca, adaptor, rewriter, annotations); - return mlir::success(); - } -}; + if (annotations && !annotations.empty()) + buildAllocaAnnotations(llvmAlloca, adaptor, rewriter, annotations); + return mlir::success(); +} -static mlir::LLVM::AtomicOrdering +mlir::LLVM::AtomicOrdering getLLVMMemOrder(std::optional &memorder) { if (!memorder) return mlir::LLVM::AtomicOrdering::not_atomic; @@ -1582,64 +1453,52 @@ getLLVMMemOrder(std::optional &memorder) { llvm_unreachable("unknown memory order"); } -class CIRLoadLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::LoadOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - const auto llvmTy = - getTypeConverter()->convertType(op.getResult().getType()); - auto memorder = op.getMemOrder(); - auto ordering = getLLVMMemOrder(memorder); - auto alignOpt = op.getAlignment(); - unsigned alignment = 0; - if (!alignOpt) { - mlir::DataLayout layout(op->getParentOfType()); - alignment = (unsigned)layout.getTypeABIAlignment(llvmTy); - } else { - alignment = *alignOpt; - } - - // TODO: nontemporal, invariant, syncscope. - rewriter.replaceOpWithNewOp( - op, llvmTy, adaptor.getAddr(), /* alignment */ alignment, - op.getIsVolatile(), /* nontemporal */ false, - /* invariant */ false, /* invariantGroup */ false, ordering); - return mlir::LogicalResult::success(); +mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite( + cir::LoadOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + const auto llvmTy = getTypeConverter()->convertType(op.getResult().getType()); + auto memorder = op.getMemOrder(); + auto ordering = getLLVMMemOrder(memorder); + auto alignOpt = op.getAlignment(); + unsigned alignment = 0; + if (!alignOpt) { + mlir::DataLayout layout(op->getParentOfType()); + alignment = (unsigned)layout.getTypeABIAlignment(llvmTy); + } else { + alignment = *alignOpt; } -}; -class CIRStoreLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::StoreOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto memorder = op.getMemOrder(); - auto ordering = getLLVMMemOrder(memorder); - auto alignOpt = op.getAlignment(); - unsigned alignment = 0; - - if (!alignOpt) { - const auto llvmTy = - getTypeConverter()->convertType(op.getValue().getType()); - mlir::DataLayout layout(op->getParentOfType()); - alignment = (unsigned)layout.getTypeABIAlignment(llvmTy); - } else { - alignment = *alignOpt; - } + // TODO: nontemporal, invariant, syncscope. + rewriter.replaceOpWithNewOp( + op, llvmTy, adaptor.getAddr(), /* alignment */ alignment, + op.getIsVolatile(), /* nontemporal */ false, + /* invariant */ false, /* invariantGroup */ false, ordering); + return mlir::LogicalResult::success(); +} - // TODO: nontemporal, syncscope. - rewriter.replaceOpWithNewOp( - op, adaptor.getValue(), adaptor.getAddr(), alignment, - op.getIsVolatile(), /* nontemporal */ false, /* invariantGroup */ false, - ordering); - return mlir::LogicalResult::success(); +mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite( + cir::StoreOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto memorder = op.getMemOrder(); + auto ordering = getLLVMMemOrder(memorder); + auto alignOpt = op.getAlignment(); + unsigned alignment = 0; + + if (!alignOpt) { + const auto llvmTy = + getTypeConverter()->convertType(op.getValue().getType()); + mlir::DataLayout layout(op->getParentOfType()); + alignment = (unsigned)layout.getTypeABIAlignment(llvmTy); + } else { + alignment = *alignOpt; } -}; + + // TODO: nontemporal, syncscope. + rewriter.replaceOpWithNewOp( + op, adaptor.getValue(), adaptor.getAddr(), alignment, op.getIsVolatile(), + /* nontemporal */ false, /* invariantGroup */ false, ordering); + return mlir::LogicalResult::success(); +} bool hasTrailingZeros(cir::ConstArrayAttr attr) { auto array = mlir::dyn_cast(attr.getElts()); @@ -1672,1313 +1531,1152 @@ lowerDataMemberAttr(mlir::ModuleOp moduleOp, cir::DataMemberAttr attr, return mlir::IntegerAttr::get(underlyingIntTy, memberOffset); } -class CIRConstantLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::ConstantOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - mlir::Attribute attr = op.getValue(); - - if (mlir::isa(op.getType())) { - int value = (op.getValue() == - cir::BoolAttr::get(getContext(), - cir::BoolType::get(getContext()), true)); - attr = rewriter.getIntegerAttr(typeConverter->convertType(op.getType()), - value); - } else if (mlir::isa(op.getType())) { - attr = rewriter.getIntegerAttr( - typeConverter->convertType(op.getType()), - mlir::cast(op.getValue()).getValue()); - } else if (mlir::isa(op.getType())) { - attr = rewriter.getFloatAttr( - typeConverter->convertType(op.getType()), - mlir::cast(op.getValue()).getValue()); - } else if (auto complexTy = - mlir::dyn_cast(op.getType())) { - auto complexAttr = mlir::cast(op.getValue()); - auto complexElemTy = complexTy.getElementTy(); - auto complexElemLLVMTy = typeConverter->convertType(complexElemTy); - - mlir::Attribute components[2]; - if (mlir::isa(complexElemTy)) { - components[0] = rewriter.getIntegerAttr( - complexElemLLVMTy, - mlir::cast(complexAttr.getReal()).getValue()); - components[1] = rewriter.getIntegerAttr( - complexElemLLVMTy, - mlir::cast(complexAttr.getImag()).getValue()); - } else { - components[0] = rewriter.getFloatAttr( - complexElemLLVMTy, - mlir::cast(complexAttr.getReal()).getValue()); - components[1] = rewriter.getFloatAttr( - complexElemLLVMTy, - mlir::cast(complexAttr.getImag()).getValue()); - } +mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( + cir::ConstantOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Attribute attr = op.getValue(); + + if (mlir::isa(op.getType())) { + int value = (op.getValue() == + cir::BoolAttr::get(getContext(), + cir::BoolType::get(getContext()), true)); + attr = rewriter.getIntegerAttr(typeConverter->convertType(op.getType()), + value); + } else if (mlir::isa(op.getType())) { + attr = rewriter.getIntegerAttr( + typeConverter->convertType(op.getType()), + mlir::cast(op.getValue()).getValue()); + } else if (mlir::isa(op.getType())) { + attr = rewriter.getFloatAttr( + typeConverter->convertType(op.getType()), + mlir::cast(op.getValue()).getValue()); + } else if (auto complexTy = mlir::dyn_cast(op.getType())) { + auto complexAttr = mlir::cast(op.getValue()); + auto complexElemTy = complexTy.getElementTy(); + auto complexElemLLVMTy = typeConverter->convertType(complexElemTy); + + mlir::Attribute components[2]; + if (mlir::isa(complexElemTy)) { + components[0] = rewriter.getIntegerAttr( + complexElemLLVMTy, + mlir::cast(complexAttr.getReal()).getValue()); + components[1] = rewriter.getIntegerAttr( + complexElemLLVMTy, + mlir::cast(complexAttr.getImag()).getValue()); + } else { + components[0] = rewriter.getFloatAttr( + complexElemLLVMTy, + mlir::cast(complexAttr.getReal()).getValue()); + components[1] = rewriter.getFloatAttr( + complexElemLLVMTy, + mlir::cast(complexAttr.getImag()).getValue()); + } - attr = rewriter.getArrayAttr(components); - } else if (mlir::isa(op.getType())) { - // Optimize with dedicated LLVM op for null pointers. - if (mlir::isa(op.getValue())) { - if (mlir::cast(op.getValue()).isNullValue()) { - rewriter.replaceOpWithNewOp( - op, typeConverter->convertType(op.getType())); - return mlir::success(); - } - } - // Lower GlobalViewAttr to llvm.mlir.addressof - if (auto gv = mlir::dyn_cast(op.getValue())) { - auto newOp = lowerCirAttrAsValue(op, gv, rewriter, getTypeConverter()); - rewriter.replaceOp(op, newOp); + attr = rewriter.getArrayAttr(components); + } else if (mlir::isa(op.getType())) { + // Optimize with dedicated LLVM op for null pointers. + if (mlir::isa(op.getValue())) { + if (mlir::cast(op.getValue()).isNullValue()) { + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType())); return mlir::success(); } - attr = op.getValue(); - } else if (mlir::isa(op.getType())) { - auto dataMember = mlir::cast(op.getValue()); - attr = lowerDataMemberAttr(op->getParentOfType(), - dataMember, *typeConverter); } - // TODO(cir): constant arrays are currently just pushed into the stack using - // the store instruction, instead of being stored as global variables and - // then memcopyied into the stack (as done in Clang). - else if (auto arrTy = mlir::dyn_cast(op.getType())) { - // Fetch operation constant array initializer. - - auto constArr = mlir::dyn_cast(op.getValue()); - if (!constArr && !isa(op.getValue())) - return op.emitError() << "array does not have a constant initializer"; - - std::optional denseAttr; - if (constArr && hasTrailingZeros(constArr)) { - auto newOp = - lowerCirAttrAsValue(op, constArr, rewriter, getTypeConverter()); - rewriter.replaceOp(op, newOp); - return mlir::success(); - } else if (constArr && - (denseAttr = lowerConstArrayAttr(constArr, typeConverter))) { - attr = denseAttr.value(); - } else { - auto initVal = - lowerCirAttrAsValue(op, op.getValue(), rewriter, typeConverter); - rewriter.replaceAllUsesWith(op, initVal); - rewriter.eraseOp(op); - return mlir::success(); - } - } else if (const auto structAttr = - mlir::dyn_cast(op.getValue())) { - // TODO(cir): this diverges from traditional lowering. Normally the - // initializer would be a global constant that is memcopied. Here we just - // define a local constant with llvm.undef that will be stored into the - // stack. + // Lower GlobalViewAttr to llvm.mlir.addressof + if (auto gv = mlir::dyn_cast(op.getValue())) { + auto newOp = lowerCirAttrAsValue(op, gv, rewriter, getTypeConverter()); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } + attr = op.getValue(); + } else if (mlir::isa(op.getType())) { + auto dataMember = mlir::cast(op.getValue()); + attr = lowerDataMemberAttr(op->getParentOfType(), + dataMember, *typeConverter); + } + // TODO(cir): constant arrays are currently just pushed into the stack using + // the store instruction, instead of being stored as global variables and + // then memcopyied into the stack (as done in Clang). + else if (auto arrTy = mlir::dyn_cast(op.getType())) { + // Fetch operation constant array initializer. + + auto constArr = mlir::dyn_cast(op.getValue()); + if (!constArr && !isa(op.getValue())) + return op.emitError() << "array does not have a constant initializer"; + + std::optional denseAttr; + if (constArr && hasTrailingZeros(constArr)) { + auto newOp = + lowerCirAttrAsValue(op, constArr, rewriter, getTypeConverter()); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } else if (constArr && + (denseAttr = lowerConstArrayAttr(constArr, typeConverter))) { + attr = denseAttr.value(); + } else { auto initVal = - lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter); + lowerCirAttrAsValue(op, op.getValue(), rewriter, typeConverter); rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); return mlir::success(); - } else if (auto strTy = mlir::dyn_cast(op.getType())) { - auto attr = op.getValue(); - if (mlir::isa(attr)) { - auto initVal = lowerCirAttrAsValue(op, attr, rewriter, typeConverter); - rewriter.replaceAllUsesWith(op, initVal); - rewriter.eraseOp(op); - return mlir::success(); - } - - return op.emitError() << "unsupported lowering for struct constant type " - << op.getType(); - } else if (const auto vecTy = - mlir::dyn_cast(op.getType())) { - rewriter.replaceOp(op, lowerCirAttrAsValue(op, op.getValue(), rewriter, - getTypeConverter())); + } + } else if (const auto structAttr = + mlir::dyn_cast(op.getValue())) { + // TODO(cir): this diverges from traditional lowering. Normally the + // initializer would be a global constant that is memcopied. Here we just + // define a local constant with llvm.undef that will be stored into the + // stack. + auto initVal = lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter); + rewriter.replaceAllUsesWith(op, initVal); + rewriter.eraseOp(op); + return mlir::success(); + } else if (auto strTy = mlir::dyn_cast(op.getType())) { + auto attr = op.getValue(); + if (mlir::isa(attr)) { + auto initVal = lowerCirAttrAsValue(op, attr, rewriter, typeConverter); + rewriter.replaceAllUsesWith(op, initVal); + rewriter.eraseOp(op); return mlir::success(); - } else - return op.emitError() << "unsupported constant type " << op.getType(); - - rewriter.replaceOpWithNewOp( - op, getTypeConverter()->convertType(op.getType()), attr); + } + return op.emitError() << "unsupported lowering for struct constant type " + << op.getType(); + } else if (const auto vecTy = mlir::dyn_cast(op.getType())) { + rewriter.replaceOp(op, lowerCirAttrAsValue(op, op.getValue(), rewriter, + getTypeConverter())); return mlir::success(); - } -}; + } else + return op.emitError() << "unsupported constant type " << op.getType(); -class CIRVectorCreateLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::VecCreateOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // Start with an 'undef' value for the vector. Then 'insertelement' for - // each of the vector elements. - auto vecTy = mlir::dyn_cast(op.getType()); - assert(vecTy && "result type of cir.vec.create op is not VectorType"); - auto llvmTy = typeConverter->convertType(vecTy); - auto loc = op.getLoc(); - mlir::Value result = rewriter.create(loc, llvmTy); - assert(vecTy.getSize() == op.getElements().size() && - "cir.vec.create op count doesn't match vector type elements count"); - for (uint64_t i = 0; i < vecTy.getSize(); ++i) { - mlir::Value indexValue = rewriter.create( - loc, rewriter.getI64Type(), i); - result = rewriter.create( - loc, result, adaptor.getElements()[i], indexValue); - } - rewriter.replaceOp(op, result); - return mlir::success(); - } -}; + rewriter.replaceOpWithNewOp( + op, getTypeConverter()->convertType(op.getType()), attr); -class CIRVectorCmpOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::VecCmpOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - assert(mlir::isa(op.getType()) && - mlir::isa(op.getLhs().getType()) && - mlir::isa(op.getRhs().getType()) && - "Vector compare with non-vector type"); - // LLVM IR vector comparison returns a vector of i1. This one-bit vector - // must be sign-extended to the correct result type. - auto elementType = elementTypeIfVector(op.getLhs().getType()); - mlir::Value bitResult; - if (auto intType = mlir::dyn_cast(elementType)) { - bitResult = rewriter.create( - op.getLoc(), - convertCmpKindToICmpPredicate(op.getKind(), intType.isSigned()), - adaptor.getLhs(), adaptor.getRhs()); - } else if (mlir::isa(elementType)) { - bitResult = rewriter.create( - op.getLoc(), convertCmpKindToFCmpPredicate(op.getKind()), - adaptor.getLhs(), adaptor.getRhs()); - } else { - return op.emitError() << "unsupported type for VecCmpOp: " << elementType; - } - rewriter.replaceOpWithNewOp( - op, typeConverter->convertType(op.getType()), bitResult); - return mlir::success(); - } -}; + return mlir::success(); +} -class CIRVectorSplatLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::VecSplatOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // Vector splat can be implemented with an `insertelement` and a - // `shufflevector`, which is better than an `insertelement` for each - // element in the vector. Start with an undef vector. Insert the value into - // the first element. Then use a `shufflevector` with a mask of all 0 to - // fill out the entire vector with that value. - auto vecTy = mlir::dyn_cast(op.getType()); - assert(vecTy && "result type of cir.vec.splat op is not VectorType"); - auto llvmTy = typeConverter->convertType(vecTy); - auto loc = op.getLoc(); - mlir::Value undef = rewriter.create(loc, llvmTy); +mlir::LogicalResult CIRToLLVMVecCreateOpLowering::matchAndRewrite( + cir::VecCreateOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // Start with an 'undef' value for the vector. Then 'insertelement' for + // each of the vector elements. + auto vecTy = mlir::dyn_cast(op.getType()); + assert(vecTy && "result type of cir.vec.create op is not VectorType"); + auto llvmTy = typeConverter->convertType(vecTy); + auto loc = op.getLoc(); + mlir::Value result = rewriter.create(loc, llvmTy); + assert(vecTy.getSize() == op.getElements().size() && + "cir.vec.create op count doesn't match vector type elements count"); + for (uint64_t i = 0; i < vecTy.getSize(); ++i) { mlir::Value indexValue = - rewriter.create(loc, rewriter.getI64Type(), 0); - mlir::Value elementValue = adaptor.getValue(); - mlir::Value oneElement = rewriter.create( - loc, undef, elementValue, indexValue); - SmallVector zeroValues(vecTy.getSize(), 0); - mlir::Value shuffled = rewriter.create( - loc, oneElement, undef, zeroValues); - rewriter.replaceOp(op, shuffled); - return mlir::success(); + rewriter.create(loc, rewriter.getI64Type(), i); + result = rewriter.create( + loc, result, adaptor.getElements()[i], indexValue); } -}; + rewriter.replaceOp(op, result); + return mlir::success(); +} -class CIRVectorTernaryLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::VecTernaryOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - assert(mlir::isa(op.getType()) && - mlir::isa(op.getCond().getType()) && - mlir::isa(op.getVec1().getType()) && - mlir::isa(op.getVec2().getType()) && - "Vector ternary op with non-vector type"); - // Convert `cond` into a vector of i1, then use that in a `select` op. - mlir::Value bitVec = rewriter.create( - op.getLoc(), mlir::LLVM::ICmpPredicate::ne, adaptor.getCond(), - rewriter.create( - op.getCond().getLoc(), - typeConverter->convertType(op.getCond().getType()))); - rewriter.replaceOpWithNewOp( - op, bitVec, adaptor.getVec1(), adaptor.getVec2()); - return mlir::success(); +mlir::LogicalResult CIRToLLVMVecCmpOpLowering::matchAndRewrite( + cir::VecCmpOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + assert(mlir::isa(op.getType()) && + mlir::isa(op.getLhs().getType()) && + mlir::isa(op.getRhs().getType()) && + "Vector compare with non-vector type"); + // LLVM IR vector comparison returns a vector of i1. This one-bit vector + // must be sign-extended to the correct result type. + auto elementType = elementTypeIfVector(op.getLhs().getType()); + mlir::Value bitResult; + if (auto intType = mlir::dyn_cast(elementType)) { + bitResult = rewriter.create( + op.getLoc(), + convertCmpKindToICmpPredicate(op.getKind(), intType.isSigned()), + adaptor.getLhs(), adaptor.getRhs()); + } else if (mlir::isa(elementType)) { + bitResult = rewriter.create( + op.getLoc(), convertCmpKindToFCmpPredicate(op.getKind()), + adaptor.getLhs(), adaptor.getRhs()); + } else { + return op.emitError() << "unsupported type for VecCmpOp: " << elementType; } -}; + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType()), bitResult); + return mlir::success(); +} -class CIRVectorShuffleIntsLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::VecShuffleOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // LLVM::ShuffleVectorOp takes an ArrayRef of int for the list of indices. - // Convert the ClangIR ArrayAttr of IntAttr constants into a - // SmallVector. - SmallVector indices; - std::transform( - op.getIndices().begin(), op.getIndices().end(), - std::back_inserter(indices), [](mlir::Attribute intAttr) { - return mlir::cast(intAttr).getValue().getSExtValue(); - }); - rewriter.replaceOpWithNewOp( - op, adaptor.getVec1(), adaptor.getVec2(), indices); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMVecSplatOpLowering::matchAndRewrite( + cir::VecSplatOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // Vector splat can be implemented with an `insertelement` and a + // `shufflevector`, which is better than an `insertelement` for each + // element in the vector. Start with an undef vector. Insert the value into + // the first element. Then use a `shufflevector` with a mask of all 0 to + // fill out the entire vector with that value. + auto vecTy = mlir::dyn_cast(op.getType()); + assert(vecTy && "result type of cir.vec.splat op is not VectorType"); + auto llvmTy = typeConverter->convertType(vecTy); + auto loc = op.getLoc(); + mlir::Value undef = rewriter.create(loc, llvmTy); + mlir::Value indexValue = + rewriter.create(loc, rewriter.getI64Type(), 0); + mlir::Value elementValue = adaptor.getValue(); + mlir::Value oneElement = rewriter.create( + loc, undef, elementValue, indexValue); + SmallVector zeroValues(vecTy.getSize(), 0); + mlir::Value shuffled = rewriter.create( + loc, oneElement, undef, zeroValues); + rewriter.replaceOp(op, shuffled); + return mlir::success(); +} -class CIRVectorShuffleVecLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::VecShuffleDynamicOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // LLVM IR does not have an operation that corresponds to this form of - // the built-in. - // __builtin_shufflevector(V, I) - // is implemented as this pseudocode, where the for loop is unrolled - // and N is the number of elements: - // masked = I & (N-1) - // for (i in 0 <= i < N) - // result[i] = V[masked[i]] - auto loc = op.getLoc(); - mlir::Value input = adaptor.getVec(); - mlir::Type llvmIndexVecType = - getTypeConverter()->convertType(op.getIndices().getType()); - mlir::Type llvmIndexType = getTypeConverter()->convertType( - elementTypeIfVector(op.getIndices().getType())); - uint64_t numElements = - mlir::cast(op.getVec().getType()).getSize(); - mlir::Value maskValue = rewriter.create( - loc, llvmIndexType, - mlir::IntegerAttr::get(llvmIndexType, numElements - 1)); - mlir::Value maskVector = - rewriter.create(loc, llvmIndexVecType); - for (uint64_t i = 0; i < numElements; ++i) { - mlir::Value iValue = rewriter.create( - loc, rewriter.getI64Type(), i); - maskVector = rewriter.create( - loc, maskVector, maskValue, iValue); - } - mlir::Value maskedIndices = rewriter.create( - loc, llvmIndexVecType, adaptor.getIndices(), maskVector); - mlir::Value result = rewriter.create( - loc, getTypeConverter()->convertType(op.getVec().getType())); - for (uint64_t i = 0; i < numElements; ++i) { - mlir::Value iValue = rewriter.create( - loc, rewriter.getI64Type(), i); - mlir::Value indexValue = rewriter.create( - loc, maskedIndices, iValue); - mlir::Value valueAtIndex = - rewriter.create(loc, input, indexValue); - result = rewriter.create( - loc, result, valueAtIndex, iValue); - } - rewriter.replaceOp(op, result); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMVecTernaryOpLowering::matchAndRewrite( + cir::VecTernaryOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + assert(mlir::isa(op.getType()) && + mlir::isa(op.getCond().getType()) && + mlir::isa(op.getVec1().getType()) && + mlir::isa(op.getVec2().getType()) && + "Vector ternary op with non-vector type"); + // Convert `cond` into a vector of i1, then use that in a `select` op. + mlir::Value bitVec = rewriter.create( + op.getLoc(), mlir::LLVM::ICmpPredicate::ne, adaptor.getCond(), + rewriter.create( + op.getCond().getLoc(), + typeConverter->convertType(op.getCond().getType()))); + rewriter.replaceOpWithNewOp( + op, bitVec, adaptor.getVec1(), adaptor.getVec2()); + return mlir::success(); +} -class CIRVAStartLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::VAStartOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); - auto vaList = rewriter.create( - op.getLoc(), opaquePtr, adaptor.getOperands().front()); - rewriter.replaceOpWithNewOp(op, vaList); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMVecShuffleOpLowering::matchAndRewrite( + cir::VecShuffleOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // LLVM::ShuffleVectorOp takes an ArrayRef of int for the list of indices. + // Convert the ClangIR ArrayAttr of IntAttr constants into a + // SmallVector. + SmallVector indices; + std::transform( + op.getIndices().begin(), op.getIndices().end(), + std::back_inserter(indices), [](mlir::Attribute intAttr) { + return mlir::cast(intAttr).getValue().getSExtValue(); + }); + rewriter.replaceOpWithNewOp( + op, adaptor.getVec1(), adaptor.getVec2(), indices); + return mlir::success(); +} -class CIRVAEndLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::VAEndOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); - auto vaList = rewriter.create( - op.getLoc(), opaquePtr, adaptor.getOperands().front()); - rewriter.replaceOpWithNewOp(op, vaList); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMVecShuffleDynamicOpLowering::matchAndRewrite( + cir::VecShuffleDynamicOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // LLVM IR does not have an operation that corresponds to this form of + // the built-in. + // __builtin_shufflevector(V, I) + // is implemented as this pseudocode, where the for loop is unrolled + // and N is the number of elements: + // masked = I & (N-1) + // for (i in 0 <= i < N) + // result[i] = V[masked[i]] + auto loc = op.getLoc(); + mlir::Value input = adaptor.getVec(); + mlir::Type llvmIndexVecType = + getTypeConverter()->convertType(op.getIndices().getType()); + mlir::Type llvmIndexType = getTypeConverter()->convertType( + elementTypeIfVector(op.getIndices().getType())); + uint64_t numElements = + mlir::cast(op.getVec().getType()).getSize(); + mlir::Value maskValue = rewriter.create( + loc, llvmIndexType, + mlir::IntegerAttr::get(llvmIndexType, numElements - 1)); + mlir::Value maskVector = + rewriter.create(loc, llvmIndexVecType); + for (uint64_t i = 0; i < numElements; ++i) { + mlir::Value iValue = + rewriter.create(loc, rewriter.getI64Type(), i); + maskVector = rewriter.create( + loc, maskVector, maskValue, iValue); + } + mlir::Value maskedIndices = rewriter.create( + loc, llvmIndexVecType, adaptor.getIndices(), maskVector); + mlir::Value result = rewriter.create( + loc, getTypeConverter()->convertType(op.getVec().getType())); + for (uint64_t i = 0; i < numElements; ++i) { + mlir::Value iValue = + rewriter.create(loc, rewriter.getI64Type(), i); + mlir::Value indexValue = rewriter.create( + loc, maskedIndices, iValue); + mlir::Value valueAtIndex = + rewriter.create(loc, input, indexValue); + result = rewriter.create(loc, result, + valueAtIndex, iValue); + } + rewriter.replaceOp(op, result); + return mlir::success(); +} -class CIRVACopyLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::VACopyOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); - auto dstList = rewriter.create( - op.getLoc(), opaquePtr, adaptor.getOperands().front()); - auto srcList = rewriter.create( - op.getLoc(), opaquePtr, adaptor.getOperands().back()); - rewriter.replaceOpWithNewOp(op, dstList, srcList); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMVAStartOpLowering::matchAndRewrite( + cir::VAStartOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); + auto vaList = rewriter.create( + op.getLoc(), opaquePtr, adaptor.getOperands().front()); + rewriter.replaceOpWithNewOp(op, vaList); + return mlir::success(); +} -class CIRVAArgLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMVAEndOpLowering::matchAndRewrite( + cir::VAEndOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); + auto vaList = rewriter.create( + op.getLoc(), opaquePtr, adaptor.getOperands().front()); + rewriter.replaceOpWithNewOp(op, vaList); + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::VAArgOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - return op.emitError("cir.vaarg lowering is NYI"); - } -}; +mlir::LogicalResult CIRToLLVMVACopyOpLowering::matchAndRewrite( + cir::VACopyOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext()); + auto dstList = rewriter.create( + op.getLoc(), opaquePtr, adaptor.getOperands().front()); + auto srcList = rewriter.create( + op.getLoc(), opaquePtr, adaptor.getOperands().back()); + rewriter.replaceOpWithNewOp(op, dstList, srcList); + return mlir::success(); +} -class CIRFuncLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMVAArgOpLowering::matchAndRewrite( + cir::VAArgOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + return op.emitError("cir.vaarg lowering is NYI"); +} /// Returns the name used for the linkage attribute. This *must* correspond /// to the name of the attribute in ODS. - static StringRef getLinkageAttrNameString() { return "linkage"; } - - /// Convert the `cir.func` attributes to `llvm.func` attributes. - /// Only retain those attributes that are not constructed by - /// `LLVMFuncOp::build`. If `filterArgAttrs` is set, also filter out - /// argument attributes. - void - lowerFuncAttributes(cir::FuncOp func, bool filterArgAndResAttrs, - SmallVectorImpl &result) const { - for (auto attr : func->getAttrs()) { - if (attr.getName() == mlir::SymbolTable::getSymbolAttrName() || - attr.getName() == func.getFunctionTypeAttrName() || - attr.getName() == getLinkageAttrNameString() || - attr.getName() == func.getCallingConvAttrName() || - (filterArgAndResAttrs && - (attr.getName() == func.getArgAttrsAttrName() || - attr.getName() == func.getResAttrsAttrName()))) - continue; - - // `CIRDialectLLVMIRTranslationInterface` requires "cir." prefix for - // dialect specific attributes, rename them. - if (attr.getName() == func.getExtraAttrsAttrName()) { - std::string cirName = "cir." + func.getExtraAttrsAttrName().str(); - attr.setName(mlir::StringAttr::get(getContext(), cirName)); +StringRef CIRToLLVMFuncOpLowering::getLinkageAttrNameString() { + return "linkage"; +} - lowerFuncOpenCLKernelMetadata(attr); - } - result.push_back(attr); +/// Convert the `cir.func` attributes to `llvm.func` attributes. +/// Only retain those attributes that are not constructed by +/// `LLVMFuncOp::build`. If `filterArgAttrs` is set, also filter out +/// argument attributes. +void CIRToLLVMFuncOpLowering::lowerFuncAttributes( + cir::FuncOp func, bool filterArgAndResAttrs, + SmallVectorImpl &result) const { + for (auto attr : func->getAttrs()) { + if (attr.getName() == mlir::SymbolTable::getSymbolAttrName() || + attr.getName() == func.getFunctionTypeAttrName() || + attr.getName() == getLinkageAttrNameString() || + attr.getName() == func.getCallingConvAttrName() || + (filterArgAndResAttrs && + (attr.getName() == func.getArgAttrsAttrName() || + attr.getName() == func.getResAttrsAttrName()))) + continue; + + // `CIRDialectLLVMIRTranslationInterface` requires "cir." prefix for + // dialect specific attributes, rename them. + if (attr.getName() == func.getExtraAttrsAttrName()) { + std::string cirName = "cir." + func.getExtraAttrsAttrName().str(); + attr.setName(mlir::StringAttr::get(getContext(), cirName)); + + lowerFuncOpenCLKernelMetadata(attr); } + result.push_back(attr); } +} /// When do module translation, we can only translate LLVM-compatible types. /// Here we lower possible OpenCLKernelMetadataAttr to use the converted type. - void - lowerFuncOpenCLKernelMetadata(mlir::NamedAttribute &extraAttrsEntry) const { - const auto attrKey = cir::OpenCLKernelMetadataAttr::getMnemonic(); - auto oldExtraAttrs = - cast(extraAttrsEntry.getValue()); - if (!oldExtraAttrs.getElements().contains(attrKey)) - return; +void CIRToLLVMFuncOpLowering::lowerFuncOpenCLKernelMetadata( + mlir::NamedAttribute &extraAttrsEntry) const { + const auto attrKey = cir::OpenCLKernelMetadataAttr::getMnemonic(); + auto oldExtraAttrs = + cast(extraAttrsEntry.getValue()); + if (!oldExtraAttrs.getElements().contains(attrKey)) + return; - mlir::NamedAttrList newExtraAttrs; - for (auto entry : oldExtraAttrs.getElements()) { - if (entry.getName() == attrKey) { - auto clKernelMetadata = - cast(entry.getValue()); - if (auto vecTypeHint = clKernelMetadata.getVecTypeHint()) { - auto newType = typeConverter->convertType(vecTypeHint.getValue()); - auto newTypeHint = mlir::TypeAttr::get(newType); - auto newCLKMAttr = cir::OpenCLKernelMetadataAttr::get( - getContext(), clKernelMetadata.getWorkGroupSizeHint(), - clKernelMetadata.getReqdWorkGroupSize(), newTypeHint, - clKernelMetadata.getVecTypeHintSignedness(), - clKernelMetadata.getIntelReqdSubGroupSize()); - entry.setValue(newCLKMAttr); - } + mlir::NamedAttrList newExtraAttrs; + for (auto entry : oldExtraAttrs.getElements()) { + if (entry.getName() == attrKey) { + auto clKernelMetadata = + cast(entry.getValue()); + if (auto vecTypeHint = clKernelMetadata.getVecTypeHint()) { + auto newType = typeConverter->convertType(vecTypeHint.getValue()); + auto newTypeHint = mlir::TypeAttr::get(newType); + auto newCLKMAttr = cir::OpenCLKernelMetadataAttr::get( + getContext(), clKernelMetadata.getWorkGroupSizeHint(), + clKernelMetadata.getReqdWorkGroupSize(), newTypeHint, + clKernelMetadata.getVecTypeHintSignedness(), + clKernelMetadata.getIntelReqdSubGroupSize()); + entry.setValue(newCLKMAttr); } - newExtraAttrs.push_back(entry); } - extraAttrsEntry.setValue(cir::ExtraFuncAttributesAttr::get( - getContext(), newExtraAttrs.getDictionary(getContext()))); + newExtraAttrs.push_back(entry); } + extraAttrsEntry.setValue(cir::ExtraFuncAttributesAttr::get( + getContext(), newExtraAttrs.getDictionary(getContext()))); +} - mlir::LogicalResult - matchAndRewrite(cir::FuncOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - - auto fnType = op.getFunctionType(); - auto isDsoLocal = op.getDsolocal(); - mlir::TypeConverter::SignatureConversion signatureConversion( - fnType.getNumInputs()); +mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewrite( + cir::FuncOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { - for (const auto &argType : enumerate(fnType.getInputs())) { - auto convertedType = typeConverter->convertType(argType.value()); - if (!convertedType) - return mlir::failure(); - signatureConversion.addInputs(argType.index(), convertedType); - } + auto fnType = op.getFunctionType(); + auto isDsoLocal = op.getDsolocal(); + mlir::TypeConverter::SignatureConversion signatureConversion( + fnType.getNumInputs()); - mlir::Type resultType = - getTypeConverter()->convertType(fnType.getReturnType()); - - // Create the LLVM function operation. - auto llvmFnTy = mlir::LLVM::LLVMFunctionType::get( - resultType ? resultType : mlir::LLVM::LLVMVoidType::get(getContext()), - signatureConversion.getConvertedTypes(), - /*isVarArg=*/fnType.isVarArg()); - // LLVMFuncOp expects a single FileLine Location instead of a fused - // location. - auto Loc = op.getLoc(); - if (mlir::isa(Loc)) { - auto FusedLoc = mlir::cast(Loc); - Loc = FusedLoc.getLocations()[0]; - } - assert((mlir::isa(Loc) || - mlir::isa(Loc)) && - "expected single location or unknown location here"); - - auto linkage = convertLinkage(op.getLinkage()); - auto cconv = convertCallingConv(op.getCallingConv()); - SmallVector attributes; - lowerFuncAttributes(op, /*filterArgAndResAttrs=*/false, attributes); - - auto fn = rewriter.create( - Loc, op.getName(), llvmFnTy, linkage, isDsoLocal, cconv, - mlir::SymbolRefAttr(), attributes); - - fn.setVisibility_Attr(mlir::LLVM::VisibilityAttr::get( - getContext(), lowerCIRVisibilityToLLVMVisibility( - op.getGlobalVisibilityAttr().getValue()))); - - rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); - if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, - &signatureConversion))) + for (const auto &argType : enumerate(fnType.getInputs())) { + auto convertedType = typeConverter->convertType(argType.value()); + if (!convertedType) return mlir::failure(); + signatureConversion.addInputs(argType.index(), convertedType); + } + + mlir::Type resultType = + getTypeConverter()->convertType(fnType.getReturnType()); + + // Create the LLVM function operation. + auto llvmFnTy = mlir::LLVM::LLVMFunctionType::get( + resultType ? resultType : mlir::LLVM::LLVMVoidType::get(getContext()), + signatureConversion.getConvertedTypes(), + /*isVarArg=*/fnType.isVarArg()); + // LLVMFuncOp expects a single FileLine Location instead of a fused + // location. + auto Loc = op.getLoc(); + if (mlir::isa(Loc)) { + auto FusedLoc = mlir::cast(Loc); + Loc = FusedLoc.getLocations()[0]; + } + assert((mlir::isa(Loc) || + mlir::isa(Loc)) && + "expected single location or unknown location here"); + + auto linkage = convertLinkage(op.getLinkage()); + auto cconv = convertCallingConv(op.getCallingConv()); + SmallVector attributes; + lowerFuncAttributes(op, /*filterArgAndResAttrs=*/false, attributes); + + auto fn = rewriter.create( + Loc, op.getName(), llvmFnTy, linkage, isDsoLocal, cconv, + mlir::SymbolRefAttr(), attributes); + + fn.setVisibility_Attr(mlir::LLVM::VisibilityAttr::get( + getContext(), lowerCIRVisibilityToLLVMVisibility( + op.getGlobalVisibilityAttr().getValue()))); + + rewriter.inlineRegionBefore(op.getBody(), fn.getBody(), fn.end()); + if (failed(rewriter.convertRegionTypes(&fn.getBody(), *typeConverter, + &signatureConversion))) + return mlir::failure(); - rewriter.eraseOp(op); - - return mlir::LogicalResult::success(); - } -}; - -class CIRGetGlobalOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::GetGlobalOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // FIXME(cir): Premature DCE to avoid lowering stuff we're not using. - // CIRGen should mitigate this and not emit the get_global. - if (op->getUses().empty()) { - rewriter.eraseOp(op); - return mlir::success(); - } - - auto type = getTypeConverter()->convertType(op.getType()); - auto symbol = op.getName(); - mlir::Operation *newop = - rewriter.create(op.getLoc(), type, symbol); + rewriter.eraseOp(op); - if (op.getTls()) { - // Handle access to TLS via intrinsic. - newop = rewriter.create( - op.getLoc(), type, newop->getResult(0)); - } + return mlir::LogicalResult::success(); +} - rewriter.replaceOp(op, newop); +mlir::LogicalResult CIRToLLVMGetGlobalOpLowering::matchAndRewrite( + cir::GetGlobalOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // FIXME(cir): Premature DCE to avoid lowering stuff we're not using. + // CIRGen should mitigate this and not emit the get_global. + if (op->getUses().empty()) { + rewriter.eraseOp(op); return mlir::success(); } -}; -class CIRComplexCreateOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + auto type = getTypeConverter()->convertType(op.getType()); + auto symbol = op.getName(); + mlir::Operation *newop = + rewriter.create(op.getLoc(), type, symbol); - mlir::LogicalResult - matchAndRewrite(cir::ComplexCreateOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto complexLLVMTy = - getTypeConverter()->convertType(op.getResult().getType()); - auto initialComplex = - rewriter.create(op->getLoc(), complexLLVMTy); + if (op.getTls()) { + // Handle access to TLS via intrinsic. + newop = rewriter.create( + op.getLoc(), type, newop->getResult(0)); + } - int64_t position[1]{0}; - auto realComplex = rewriter.create( - op->getLoc(), initialComplex, adaptor.getReal(), position); + rewriter.replaceOp(op, newop); + return mlir::success(); +} - position[0] = 1; - auto complex = rewriter.create( - op->getLoc(), realComplex, adaptor.getImag(), position); +mlir::LogicalResult CIRToLLVMComplexCreateOpLowering::matchAndRewrite( + cir::ComplexCreateOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto complexLLVMTy = + getTypeConverter()->convertType(op.getResult().getType()); + auto initialComplex = + rewriter.create(op->getLoc(), complexLLVMTy); - rewriter.replaceOp(op, complex); - return mlir::success(); - } -}; + int64_t position[1]{0}; + auto realComplex = rewriter.create( + op->getLoc(), initialComplex, adaptor.getReal(), position); -class CIRComplexRealOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::ComplexRealOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto resultLLVMTy = - getTypeConverter()->convertType(op.getResult().getType()); - rewriter.replaceOpWithNewOp( - op, resultLLVMTy, adaptor.getOperand(), - llvm::ArrayRef{0}); - return mlir::success(); - } -}; + position[0] = 1; + auto complex = rewriter.create( + op->getLoc(), realComplex, adaptor.getImag(), position); -class CIRComplexImagOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::ComplexImagOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto resultLLVMTy = - getTypeConverter()->convertType(op.getResult().getType()); - rewriter.replaceOpWithNewOp( - op, resultLLVMTy, adaptor.getOperand(), - llvm::ArrayRef{1}); - return mlir::success(); - } -}; + rewriter.replaceOp(op, complex); + return mlir::success(); +} -class CIRComplexRealPtrOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::ComplexRealPtrOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto operandTy = mlir::cast(op.getOperand().getType()); - auto resultLLVMTy = - getTypeConverter()->convertType(op.getResult().getType()); - auto elementLLVMTy = - getTypeConverter()->convertType(operandTy.getPointee()); - - mlir::LLVM::GEPArg gepIndices[2]{{0}, {0}}; - rewriter.replaceOpWithNewOp( - op, resultLLVMTy, elementLLVMTy, adaptor.getOperand(), gepIndices, - /*inbounds=*/true); +mlir::LogicalResult CIRToLLVMComplexRealOpLowering::matchAndRewrite( + cir::ComplexRealOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto resultLLVMTy = getTypeConverter()->convertType(op.getResult().getType()); + rewriter.replaceOpWithNewOp( + op, resultLLVMTy, adaptor.getOperand(), llvm::ArrayRef{0}); + return mlir::success(); +} - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMComplexImagOpLowering::matchAndRewrite( + cir::ComplexImagOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto resultLLVMTy = getTypeConverter()->convertType(op.getResult().getType()); + rewriter.replaceOpWithNewOp( + op, resultLLVMTy, adaptor.getOperand(), llvm::ArrayRef{1}); + return mlir::success(); +} -class CIRComplexImagPtrOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::ComplexImagPtrOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto operandTy = mlir::cast(op.getOperand().getType()); - auto resultLLVMTy = - getTypeConverter()->convertType(op.getResult().getType()); - auto elementLLVMTy = - getTypeConverter()->convertType(operandTy.getPointee()); - - mlir::LLVM::GEPArg gepIndices[2]{{0}, {1}}; - rewriter.replaceOpWithNewOp( - op, resultLLVMTy, elementLLVMTy, adaptor.getOperand(), gepIndices, - /*inbounds=*/true); +mlir::LogicalResult CIRToLLVMComplexRealPtrOpLowering::matchAndRewrite( + cir::ComplexRealPtrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto operandTy = mlir::cast(op.getOperand().getType()); + auto resultLLVMTy = getTypeConverter()->convertType(op.getResult().getType()); + auto elementLLVMTy = getTypeConverter()->convertType(operandTy.getPointee()); - return mlir::success(); - } -}; + mlir::LLVM::GEPArg gepIndices[2]{{0}, {0}}; + rewriter.replaceOpWithNewOp( + op, resultLLVMTy, elementLLVMTy, adaptor.getOperand(), gepIndices, + /*inbounds=*/true); -class CIRSwitchFlatOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::SwitchFlatOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { +mlir::LogicalResult CIRToLLVMComplexImagPtrOpLowering::matchAndRewrite( + cir::ComplexImagPtrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto operandTy = mlir::cast(op.getOperand().getType()); + auto resultLLVMTy = getTypeConverter()->convertType(op.getResult().getType()); + auto elementLLVMTy = getTypeConverter()->convertType(operandTy.getPointee()); - llvm::SmallVector caseValues; - if (op.getCaseValues()) { - for (auto val : op.getCaseValues()) { - auto intAttr = dyn_cast(val); - caseValues.push_back(intAttr.getValue()); - } - } + mlir::LLVM::GEPArg gepIndices[2]{{0}, {1}}; + rewriter.replaceOpWithNewOp( + op, resultLLVMTy, elementLLVMTy, adaptor.getOperand(), gepIndices, + /*inbounds=*/true); - llvm::SmallVector caseDestinations; - llvm::SmallVector caseOperands; + return mlir::success(); +} - for (auto x : op.getCaseDestinations()) { - caseDestinations.push_back(x); - } +mlir::LogicalResult CIRToLLVMSwitchFlatOpLowering::matchAndRewrite( + cir::SwitchFlatOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { - for (auto x : op.getCaseOperands()) { - caseOperands.push_back(x); + llvm::SmallVector caseValues; + if (op.getCaseValues()) { + for (auto val : op.getCaseValues()) { + auto intAttr = dyn_cast(val); + caseValues.push_back(intAttr.getValue()); } + } - // Set switch op to branch to the newly created blocks. - rewriter.setInsertionPoint(op); - rewriter.replaceOpWithNewOp( - op, adaptor.getCondition(), op.getDefaultDestination(), - op.getDefaultOperands(), caseValues, caseDestinations, caseOperands); - return mlir::success(); + llvm::SmallVector caseDestinations; + llvm::SmallVector caseOperands; + + for (auto x : op.getCaseDestinations()) { + caseDestinations.push_back(x); } -}; -class CIRGlobalOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + for (auto x : op.getCaseOperands()) { + caseOperands.push_back(x); + } + + // Set switch op to branch to the newly created blocks. + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp( + op, adaptor.getCondition(), op.getDefaultDestination(), + op.getDefaultOperands(), caseValues, caseDestinations, caseOperands); + return mlir::success(); +} /// Replace CIR global with a region initialized LLVM global and update /// insertion point to the end of the initializer block. - inline void setupRegionInitializedLLVMGlobalOp( - cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const { - const auto llvmType = getTypeConverter()->convertType(op.getSymType()); - SmallVector attributes; - auto newGlobalOp = rewriter.replaceOpWithNewOp( - op, llvmType, op.getConstant(), convertLinkage(op.getLinkage()), - op.getSymName(), nullptr, - /*alignment*/ op.getAlignment().value_or(0), +void CIRToLLVMGlobalOpLowering::setupRegionInitializedLLVMGlobalOp( + cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const { + const auto llvmType = getTypeConverter()->convertType(op.getSymType()); + SmallVector attributes; + auto newGlobalOp = rewriter.replaceOpWithNewOp( + op, llvmType, op.getConstant(), convertLinkage(op.getLinkage()), + op.getSymName(), nullptr, + /*alignment*/ op.getAlignment().value_or(0), + /*addrSpace*/ getGlobalOpTargetAddrSpace(rewriter, typeConverter, op), + /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), + /*comdat*/ mlir::SymbolRefAttr(), attributes); + newGlobalOp.getRegion().push_back(new mlir::Block()); + rewriter.setInsertionPointToEnd(newGlobalOp.getInitializerBlock()); +} + +mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( + cir::GlobalOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + + // Fetch required values to create LLVM op. + const auto llvmType = getTypeConverter()->convertType(op.getSymType()); + const auto isConst = op.getConstant(); + const auto isDsoLocal = op.getDsolocal(); + const auto linkage = convertLinkage(op.getLinkage()); + const auto symbol = op.getSymName(); + const auto loc = op.getLoc(); + std::optional section = op.getSection(); + std::optional init = op.getInitialValue(); + mlir::LLVM::VisibilityAttr visibility = mlir::LLVM::VisibilityAttr::get( + getContext(), lowerCIRVisibilityToLLVMVisibility( + op.getGlobalVisibilityAttr().getValue())); + + SmallVector attributes; + if (section.has_value()) + attributes.push_back(rewriter.getNamedAttr( + "section", rewriter.getStringAttr(section.value()))); + + attributes.push_back(rewriter.getNamedAttr("visibility_", visibility)); + + // Check for missing funcionalities. + if (!init.has_value()) { + rewriter.replaceOpWithNewOp( + op, llvmType, isConst, linkage, symbol, mlir::Attribute(), + /*alignment*/ 0, /*addrSpace*/ getGlobalOpTargetAddrSpace(rewriter, typeConverter, op), - /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), + /*dsoLocal*/ isDsoLocal, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); - newGlobalOp.getRegion().push_back(new mlir::Block()); - rewriter.setInsertionPointToEnd(newGlobalOp.getInitializerBlock()); + return mlir::success(); } - mlir::LogicalResult - matchAndRewrite(cir::GlobalOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - - // Fetch required values to create LLVM op. - const auto llvmType = getTypeConverter()->convertType(op.getSymType()); - const auto isConst = op.getConstant(); - const auto isDsoLocal = op.getDsolocal(); - const auto linkage = convertLinkage(op.getLinkage()); - const auto symbol = op.getSymName(); - const auto loc = op.getLoc(); - std::optional section = op.getSection(); - std::optional init = op.getInitialValue(); - mlir::LLVM::VisibilityAttr visibility = mlir::LLVM::VisibilityAttr::get( - getContext(), lowerCIRVisibilityToLLVMVisibility( - op.getGlobalVisibilityAttr().getValue())); - - SmallVector attributes; - if (section.has_value()) - attributes.push_back(rewriter.getNamedAttr( - "section", rewriter.getStringAttr(section.value()))); - - attributes.push_back(rewriter.getNamedAttr("visibility_", visibility)); - - // Check for missing funcionalities. - if (!init.has_value()) { - rewriter.replaceOpWithNewOp( - op, llvmType, isConst, linkage, symbol, mlir::Attribute(), - /*alignment*/ 0, - /*addrSpace*/ getGlobalOpTargetAddrSpace(rewriter, typeConverter, op), - /*dsoLocal*/ isDsoLocal, /*threadLocal*/ (bool)op.getTlsModelAttr(), - /*comdat*/ mlir::SymbolRefAttr(), attributes); - return mlir::success(); - } - - // Initializer is a constant array: convert it to a compatible llvm init. - if (auto constArr = mlir::dyn_cast(init.value())) { - if (auto attr = mlir::dyn_cast(constArr.getElts())) { - llvm::SmallString<256> literal(attr.getValue()); - if (constArr.getTrailingZerosNum()) - literal.append(constArr.getTrailingZerosNum(), '\0'); - init = rewriter.getStringAttr(literal); - } else if (auto attr = - mlir::dyn_cast(constArr.getElts())) { - // Failed to use a compact attribute as an initializer: - // initialize elements individually. - if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { - setupRegionInitializedLLVMGlobalOp(op, rewriter); - rewriter.create( - op->getLoc(), - lowerCirAttrAsValue(op, constArr, rewriter, typeConverter)); - return mlir::success(); - } - } else { - op.emitError() - << "unsupported lowering for #cir.const_array with value " - << constArr.getElts(); - return mlir::failure(); + // Initializer is a constant array: convert it to a compatible llvm init. + if (auto constArr = mlir::dyn_cast(init.value())) { + if (auto attr = mlir::dyn_cast(constArr.getElts())) { + llvm::SmallString<256> literal(attr.getValue()); + if (constArr.getTrailingZerosNum()) + literal.append(constArr.getTrailingZerosNum(), '\0'); + init = rewriter.getStringAttr(literal); + } else if (auto attr = + mlir::dyn_cast(constArr.getElts())) { + // Failed to use a compact attribute as an initializer: + // initialize elements individually. + if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), + lowerCirAttrAsValue(op, constArr, rewriter, typeConverter)); + return mlir::success(); } - } else if (auto fltAttr = mlir::dyn_cast(init.value())) { - // Initializer is a constant floating-point number: convert to MLIR - // builtin constant. - init = rewriter.getFloatAttr(llvmType, fltAttr.getValue()); - } - // Initializer is a constant integer: convert to MLIR builtin constant. - else if (auto intAttr = mlir::dyn_cast(init.value())) { - init = rewriter.getIntegerAttr(llvmType, intAttr.getValue()); - } else if (auto boolAttr = mlir::dyn_cast(init.value())) { - init = rewriter.getBoolAttr(boolAttr.getValue()); - } else if (isa( - init.value())) { - // TODO(cir): once LLVM's dialect has proper equivalent attributes this - // should be updated. For now, we use a custom op to initialize globals - // to the appropriate value. - setupRegionInitializedLLVMGlobalOp(op, rewriter); - auto value = - lowerCirAttrAsValue(op, init.value(), rewriter, typeConverter); - rewriter.create(loc, value); - return mlir::success(); - } else if (auto dataMemberAttr = - mlir::dyn_cast(init.value())) { - init = lowerDataMemberAttr(op->getParentOfType(), - dataMemberAttr, *typeConverter); - } else if (const auto structAttr = - mlir::dyn_cast(init.value())) { - setupRegionInitializedLLVMGlobalOp(op, rewriter); - rewriter.create( - op->getLoc(), - lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter)); - return mlir::success(); - } else if (auto attr = mlir::dyn_cast(init.value())) { - setupRegionInitializedLLVMGlobalOp(op, rewriter); - rewriter.create( - loc, lowerCirAttrAsValue(op, attr, rewriter, typeConverter)); - return mlir::success(); - } else if (const auto vtableAttr = - mlir::dyn_cast(init.value())) { - setupRegionInitializedLLVMGlobalOp(op, rewriter); - rewriter.create( - op->getLoc(), - lowerCirAttrAsValue(op, vtableAttr, rewriter, typeConverter)); - return mlir::success(); - } else if (const auto typeinfoAttr = - mlir::dyn_cast(init.value())) { - setupRegionInitializedLLVMGlobalOp(op, rewriter); - rewriter.create( - op->getLoc(), - lowerCirAttrAsValue(op, typeinfoAttr, rewriter, typeConverter)); - return mlir::success(); } else { - op.emitError() << "unsupported initializer '" << init.value() << "'"; + op.emitError() << "unsupported lowering for #cir.const_array with value " + << constArr.getElts(); return mlir::failure(); } + } else if (auto fltAttr = mlir::dyn_cast(init.value())) { + // Initializer is a constant floating-point number: convert to MLIR + // builtin constant. + init = rewriter.getFloatAttr(llvmType, fltAttr.getValue()); + } + // Initializer is a constant integer: convert to MLIR builtin constant. + else if (auto intAttr = mlir::dyn_cast(init.value())) { + init = rewriter.getIntegerAttr(llvmType, intAttr.getValue()); + } else if (auto boolAttr = mlir::dyn_cast(init.value())) { + init = rewriter.getBoolAttr(boolAttr.getValue()); + } else if (isa( + init.value())) { + // TODO(cir): once LLVM's dialect has proper equivalent attributes this + // should be updated. For now, we use a custom op to initialize globals + // to the appropriate value. + setupRegionInitializedLLVMGlobalOp(op, rewriter); + auto value = lowerCirAttrAsValue(op, init.value(), rewriter, typeConverter); + rewriter.create(loc, value); + return mlir::success(); + } else if (auto dataMemberAttr = + mlir::dyn_cast(init.value())) { + init = lowerDataMemberAttr(op->getParentOfType(), + dataMemberAttr, *typeConverter); + } else if (const auto structAttr = + mlir::dyn_cast(init.value())) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), + lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter)); + return mlir::success(); + } else if (auto attr = mlir::dyn_cast(init.value())) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + loc, lowerCirAttrAsValue(op, attr, rewriter, typeConverter)); + return mlir::success(); + } else if (const auto vtableAttr = + mlir::dyn_cast(init.value())) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), + lowerCirAttrAsValue(op, vtableAttr, rewriter, typeConverter)); + return mlir::success(); + } else if (const auto typeinfoAttr = + mlir::dyn_cast(init.value())) { + setupRegionInitializedLLVMGlobalOp(op, rewriter); + rewriter.create( + op->getLoc(), + lowerCirAttrAsValue(op, typeinfoAttr, rewriter, typeConverter)); + return mlir::success(); + } else { + op.emitError() << "unsupported initializer '" << init.value() << "'"; + return mlir::failure(); + } - // Rewrite op. - auto llvmGlobalOp = rewriter.replaceOpWithNewOp( - op, llvmType, isConst, linkage, symbol, init.value(), - /*alignment*/ op.getAlignment().value_or(0), - /*addrSpace*/ getGlobalOpTargetAddrSpace(rewriter, typeConverter, op), - /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), - /*comdat*/ mlir::SymbolRefAttr(), attributes); + // Rewrite op. + auto llvmGlobalOp = rewriter.replaceOpWithNewOp( + op, llvmType, isConst, linkage, symbol, init.value(), + /*alignment*/ op.getAlignment().value_or(0), + /*addrSpace*/ getGlobalOpTargetAddrSpace(rewriter, typeConverter, op), + /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), + /*comdat*/ mlir::SymbolRefAttr(), attributes); - auto mod = op->getParentOfType(); - if (op.getComdat()) - addComdat(llvmGlobalOp, comdatOp, rewriter, mod); + auto mod = op->getParentOfType(); + if (op.getComdat()) + addComdat(llvmGlobalOp, comdatOp, rewriter, mod); - return mlir::success(); - } + return mlir::success(); +} -private: - mutable mlir::LLVM::ComdatOp comdatOp = nullptr; - static void addComdat(mlir::LLVM::GlobalOp &op, - mlir::LLVM::ComdatOp &comdatOp, - mlir::OpBuilder &builder, mlir::ModuleOp &module) { - StringRef comdatName("__llvm_comdat_globals"); - if (!comdatOp) { - builder.setInsertionPointToStart(module.getBody()); - comdatOp = - builder.create(module.getLoc(), comdatName); - } - builder.setInsertionPointToStart(&comdatOp.getBody().back()); - auto selectorOp = builder.create( - comdatOp.getLoc(), op.getSymName(), mlir::LLVM::comdat::Comdat::Any); - op.setComdatAttr(mlir::SymbolRefAttr::get( - builder.getContext(), comdatName, - mlir::FlatSymbolRefAttr::get(selectorOp.getSymNameAttr()))); - } -}; +void CIRToLLVMGlobalOpLowering::addComdat(mlir::LLVM::GlobalOp &op, + mlir::LLVM::ComdatOp &comdatOp, + mlir::OpBuilder &builder, + mlir::ModuleOp &module) { + StringRef comdatName("__llvm_comdat_globals"); + if (!comdatOp) { + builder.setInsertionPointToStart(module.getBody()); + comdatOp = + builder.create(module.getLoc(), comdatName); + } + builder.setInsertionPointToStart(&comdatOp.getBody().back()); + auto selectorOp = builder.create( + comdatOp.getLoc(), op.getSymName(), mlir::LLVM::comdat::Comdat::Any); + op.setComdatAttr(mlir::SymbolRefAttr::get( + builder.getContext(), comdatName, + mlir::FlatSymbolRefAttr::get(selectorOp.getSymNameAttr()))); +} -class CIRUnaryOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::UnaryOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - assert(op.getType() == op.getInput().getType() && - "Unary operation's operand type and result type are different"); - mlir::Type type = op.getType(); - mlir::Type elementType = elementTypeIfVector(type); - bool IsVector = mlir::isa(type); - auto llvmType = getTypeConverter()->convertType(type); - auto loc = op.getLoc(); - - // Integer unary operations: + - ~ ++ -- - if (mlir::isa(elementType)) { - switch (op.getKind()) { - case cir::UnaryOpKind::Inc: { - assert(!IsVector && "++ not allowed on vector types"); - auto One = rewriter.create( - loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); - rewriter.replaceOpWithNewOp(op, llvmType, - adaptor.getInput(), One); - return mlir::success(); - } - case cir::UnaryOpKind::Dec: { - assert(!IsVector && "-- not allowed on vector types"); - auto One = rewriter.create( - loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); - rewriter.replaceOpWithNewOp(op, llvmType, - adaptor.getInput(), One); - return mlir::success(); - } - case cir::UnaryOpKind::Plus: { - rewriter.replaceOp(op, adaptor.getInput()); - return mlir::success(); - } - case cir::UnaryOpKind::Minus: { - mlir::Value Zero; - if (IsVector) - Zero = rewriter.create(loc, llvmType); - else - Zero = rewriter.create( - loc, llvmType, mlir::IntegerAttr::get(llvmType, 0)); - rewriter.replaceOpWithNewOp(op, llvmType, Zero, - adaptor.getInput()); - return mlir::success(); - } - case cir::UnaryOpKind::Not: { - // bit-wise compliment operator, implemented as an XOR with -1. - mlir::Value MinusOne; - if (IsVector) { - // Creating a vector object with all -1 values is easier said than - // done. It requires a series of insertelement ops. - mlir::Type llvmElementType = - getTypeConverter()->convertType(elementType); - auto MinusOneInt = rewriter.create( - loc, llvmElementType, - mlir::IntegerAttr::get(llvmElementType, -1)); - MinusOne = rewriter.create(loc, llvmType); - auto NumElements = mlir::dyn_cast(type).getSize(); - for (uint64_t i = 0; i < NumElements; ++i) { - mlir::Value indexValue = rewriter.create( - loc, rewriter.getI64Type(), i); - MinusOne = rewriter.create( - loc, MinusOne, MinusOneInt, indexValue); - } - } else { - MinusOne = rewriter.create( - loc, llvmType, mlir::IntegerAttr::get(llvmType, -1)); +mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite( + cir::UnaryOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + assert(op.getType() == op.getInput().getType() && + "Unary operation's operand type and result type are different"); + mlir::Type type = op.getType(); + mlir::Type elementType = elementTypeIfVector(type); + bool IsVector = mlir::isa(type); + auto llvmType = getTypeConverter()->convertType(type); + auto loc = op.getLoc(); + + // Integer unary operations: + - ~ ++ -- + if (mlir::isa(elementType)) { + switch (op.getKind()) { + case cir::UnaryOpKind::Inc: { + assert(!IsVector && "++ not allowed on vector types"); + auto One = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput(), One); + return mlir::success(); + } + case cir::UnaryOpKind::Dec: { + assert(!IsVector && "-- not allowed on vector types"); + auto One = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput(), One); + return mlir::success(); + } + case cir::UnaryOpKind::Plus: { + rewriter.replaceOp(op, adaptor.getInput()); + return mlir::success(); + } + case cir::UnaryOpKind::Minus: { + mlir::Value Zero; + if (IsVector) + Zero = rewriter.create(loc, llvmType); + else + Zero = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, 0)); + rewriter.replaceOpWithNewOp(op, llvmType, Zero, + adaptor.getInput()); + return mlir::success(); + } + case cir::UnaryOpKind::Not: { + // bit-wise compliment operator, implemented as an XOR with -1. + mlir::Value MinusOne; + if (IsVector) { + // Creating a vector object with all -1 values is easier said than + // done. It requires a series of insertelement ops. + mlir::Type llvmElementType = + getTypeConverter()->convertType(elementType); + auto MinusOneInt = rewriter.create( + loc, llvmElementType, mlir::IntegerAttr::get(llvmElementType, -1)); + MinusOne = rewriter.create(loc, llvmType); + auto NumElements = mlir::dyn_cast(type).getSize(); + for (uint64_t i = 0; i < NumElements; ++i) { + mlir::Value indexValue = rewriter.create( + loc, rewriter.getI64Type(), i); + MinusOne = rewriter.create( + loc, MinusOne, MinusOneInt, indexValue); } - rewriter.replaceOpWithNewOp(op, llvmType, MinusOne, - adaptor.getInput()); - return mlir::success(); - } + } else { + MinusOne = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, -1)); } + rewriter.replaceOpWithNewOp(op, llvmType, MinusOne, + adaptor.getInput()); + return mlir::success(); } - - // Floating point unary operations: + - ++ -- - if (mlir::isa(elementType)) { - switch (op.getKind()) { - case cir::UnaryOpKind::Inc: { - assert(!IsVector && "++ not allowed on vector types"); - auto oneAttr = rewriter.getFloatAttr(llvmType, 1.0); - auto oneConst = - rewriter.create(loc, llvmType, oneAttr); - rewriter.replaceOpWithNewOp(op, llvmType, oneConst, - adaptor.getInput()); - return mlir::success(); - } - case cir::UnaryOpKind::Dec: { - assert(!IsVector && "-- not allowed on vector types"); - auto negOneAttr = rewriter.getFloatAttr(llvmType, -1.0); - auto negOneConst = - rewriter.create(loc, llvmType, negOneAttr); - rewriter.replaceOpWithNewOp( - op, llvmType, negOneConst, adaptor.getInput()); - return mlir::success(); - } - case cir::UnaryOpKind::Plus: - rewriter.replaceOp(op, adaptor.getInput()); - return mlir::success(); - case cir::UnaryOpKind::Minus: { - rewriter.replaceOpWithNewOp(op, llvmType, - adaptor.getInput()); - return mlir::success(); - } - default: - return op.emitError() - << "Unknown floating-point unary operation during CIR lowering"; - } } + } - // Boolean unary operations: ! only. (For all others, the operand has - // already been promoted to int.) - if (mlir::isa(elementType)) { - switch (op.getKind()) { - case cir::UnaryOpKind::Not: - assert(!IsVector && "NYI: op! on vector mask"); - rewriter.replaceOpWithNewOp( - op, llvmType, adaptor.getInput(), - rewriter.create( - loc, llvmType, mlir::IntegerAttr::get(llvmType, 1))); - return mlir::success(); - default: - return op.emitError() - << "Unknown boolean unary operation during CIR lowering"; - } + // Floating point unary operations: + - ++ -- + if (mlir::isa(elementType)) { + switch (op.getKind()) { + case cir::UnaryOpKind::Inc: { + assert(!IsVector && "++ not allowed on vector types"); + auto oneAttr = rewriter.getFloatAttr(llvmType, 1.0); + auto oneConst = + rewriter.create(loc, llvmType, oneAttr); + rewriter.replaceOpWithNewOp(op, llvmType, oneConst, + adaptor.getInput()); + return mlir::success(); } - - // Pointer unary operations: + only. (++ and -- of pointers are implemented - // with cir.ptr_stride, not cir.unary.) - if (mlir::isa(elementType)) { - switch (op.getKind()) { - case cir::UnaryOpKind::Plus: - rewriter.replaceOp(op, adaptor.getInput()); - return mlir::success(); - default: - op.emitError() << "Unknown pointer unary operation during CIR lowering"; - return mlir::failure(); - } + case cir::UnaryOpKind::Dec: { + assert(!IsVector && "-- not allowed on vector types"); + auto negOneAttr = rewriter.getFloatAttr(llvmType, -1.0); + auto negOneConst = + rewriter.create(loc, llvmType, negOneAttr); + rewriter.replaceOpWithNewOp(op, llvmType, negOneConst, + adaptor.getInput()); + return mlir::success(); + } + case cir::UnaryOpKind::Plus: + rewriter.replaceOp(op, adaptor.getInput()); + return mlir::success(); + case cir::UnaryOpKind::Minus: { + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput()); + return mlir::success(); + } + default: + return op.emitError() + << "Unknown floating-point unary operation during CIR lowering"; } - - return op.emitError() << "Unary operation has unsupported type: " - << elementType; } -}; - -class CIRBinOpLowering : public mlir::OpConversionPattern { - mlir::LLVM::IntegerOverflowFlags getIntOverflowFlag(cir::BinOp op) const { - if (op.getNoUnsignedWrap()) - return mlir::LLVM::IntegerOverflowFlags::nuw; - - if (op.getNoSignedWrap()) - return mlir::LLVM::IntegerOverflowFlags::nsw; + // Boolean unary operations: ! only. (For all others, the operand has + // already been promoted to int.) + if (mlir::isa(elementType)) { + switch (op.getKind()) { + case cir::UnaryOpKind::Not: + assert(!IsVector && "NYI: op! on vector mask"); + rewriter.replaceOpWithNewOp( + op, llvmType, adaptor.getInput(), + rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1))); + return mlir::success(); + default: + return op.emitError() + << "Unknown boolean unary operation during CIR lowering"; + } + } - return mlir::LLVM::IntegerOverflowFlags::none; + // Pointer unary operations: + only. (++ and -- of pointers are implemented + // with cir.ptr_stride, not cir.unary.) + if (mlir::isa(elementType)) { + switch (op.getKind()) { + case cir::UnaryOpKind::Plus: + rewriter.replaceOp(op, adaptor.getInput()); + return mlir::success(); + default: + op.emitError() << "Unknown pointer unary operation during CIR lowering"; + return mlir::failure(); + } } -public: - using OpConversionPattern::OpConversionPattern; + return op.emitError() << "Unary operation has unsupported type: " + << elementType; +} - mlir::LogicalResult - matchAndRewrite(cir::BinOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - assert((op.getLhs().getType() == op.getRhs().getType()) && - "inconsistent operands' types not supported yet"); - mlir::Type type = op.getRhs().getType(); - assert((mlir::isa( - type)) && - "operand type not supported yet"); +mlir::LLVM::IntegerOverflowFlags +CIRToLLVMBinOpLowering::getIntOverflowFlag(cir::BinOp op) const { + if (op.getNoUnsignedWrap()) + return mlir::LLVM::IntegerOverflowFlags::nuw; - auto llvmTy = getTypeConverter()->convertType(op.getType()); - auto rhs = adaptor.getRhs(); - auto lhs = adaptor.getLhs(); + if (op.getNoSignedWrap()) + return mlir::LLVM::IntegerOverflowFlags::nsw; - type = elementTypeIfVector(type); + return mlir::LLVM::IntegerOverflowFlags::none; +} - switch (op.getKind()) { - case cir::BinOpKind::Add: - if (mlir::isa(type)) - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, - getIntOverflowFlag(op)); - else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - break; - case cir::BinOpKind::Sub: - if (mlir::isa(type)) - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, - getIntOverflowFlag(op)); +mlir::LogicalResult CIRToLLVMBinOpLowering::matchAndRewrite( + cir::BinOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + assert((op.getLhs().getType() == op.getRhs().getType()) && + "inconsistent operands' types not supported yet"); + mlir::Type type = op.getRhs().getType(); + assert((mlir::isa( + type)) && + "operand type not supported yet"); + + auto llvmTy = getTypeConverter()->convertType(op.getType()); + auto rhs = adaptor.getRhs(); + auto lhs = adaptor.getLhs(); + + type = elementTypeIfVector(type); + + switch (op.getKind()) { + case cir::BinOpKind::Add: + if (mlir::isa(type)) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); + else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case cir::BinOpKind::Sub: + if (mlir::isa(type)) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); + else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case cir::BinOpKind::Mul: + if (mlir::isa(type)) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, + getIntOverflowFlag(op)); + else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case cir::BinOpKind::Div: + if (auto ty = mlir::dyn_cast(type)) { + if (ty.isUnsigned()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - break; - case cir::BinOpKind::Mul: - if (mlir::isa(type)) - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, - getIntOverflowFlag(op)); + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + } else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case cir::BinOpKind::Rem: + if (auto ty = mlir::dyn_cast(type)) { + if (ty.isUnsigned()) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - break; - case cir::BinOpKind::Div: - if (auto ty = mlir::dyn_cast(type)) { - if (ty.isUnsigned()) - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - } else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - break; - case cir::BinOpKind::Rem: - if (auto ty = mlir::dyn_cast(type)) { - if (ty.isUnsigned()) - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - } else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - break; - case cir::BinOpKind::And: - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - break; - case cir::BinOpKind::Or: - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - break; - case cir::BinOpKind::Xor: - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); - break; - } - - return mlir::LogicalResult::success(); - } -}; + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + } else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case cir::BinOpKind::And: + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case cir::BinOpKind::Or: + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + case cir::BinOpKind::Xor: + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + break; + } + + return mlir::LogicalResult::success(); +} -class CIRBinOpOverflowOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::BinOpOverflowOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto loc = op.getLoc(); - auto arithKind = op.getKind(); - auto operandTy = op.getLhs().getType(); - auto resultTy = op.getResult().getType(); - - auto encompassedTyInfo = computeEncompassedTypeWidth(operandTy, resultTy); - auto encompassedLLVMTy = rewriter.getIntegerType(encompassedTyInfo.width); - - auto lhs = adaptor.getLhs(); - auto rhs = adaptor.getRhs(); - if (operandTy.getWidth() < encompassedTyInfo.width) { - if (operandTy.isSigned()) { - lhs = rewriter.create(loc, encompassedLLVMTy, lhs); - rhs = rewriter.create(loc, encompassedLLVMTy, rhs); - } else { - lhs = rewriter.create(loc, encompassedLLVMTy, lhs); - rhs = rewriter.create(loc, encompassedLLVMTy, rhs); - } +mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite( + cir::BinOpOverflowOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto loc = op.getLoc(); + auto arithKind = op.getKind(); + auto operandTy = op.getLhs().getType(); + auto resultTy = op.getResult().getType(); + + auto encompassedTyInfo = computeEncompassedTypeWidth(operandTy, resultTy); + auto encompassedLLVMTy = rewriter.getIntegerType(encompassedTyInfo.width); + + auto lhs = adaptor.getLhs(); + auto rhs = adaptor.getRhs(); + if (operandTy.getWidth() < encompassedTyInfo.width) { + if (operandTy.isSigned()) { + lhs = rewriter.create(loc, encompassedLLVMTy, lhs); + rhs = rewriter.create(loc, encompassedLLVMTy, rhs); + } else { + lhs = rewriter.create(loc, encompassedLLVMTy, lhs); + rhs = rewriter.create(loc, encompassedLLVMTy, rhs); } + } - auto intrinName = getLLVMIntrinName(arithKind, encompassedTyInfo.sign, - encompassedTyInfo.width); - auto intrinNameAttr = mlir::StringAttr::get(op.getContext(), intrinName); + auto intrinName = getLLVMIntrinName(arithKind, encompassedTyInfo.sign, + encompassedTyInfo.width); + auto intrinNameAttr = mlir::StringAttr::get(op.getContext(), intrinName); - auto overflowLLVMTy = rewriter.getI1Type(); - auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral( - rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy}); + auto overflowLLVMTy = rewriter.getI1Type(); + auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral( + rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy}); - auto callLLVMIntrinOp = rewriter.create( - loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs}); - auto intrinRet = callLLVMIntrinOp.getResult(0); + auto callLLVMIntrinOp = rewriter.create( + loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs}); + auto intrinRet = callLLVMIntrinOp.getResult(0); - auto result = rewriter + auto result = rewriter + .create(loc, intrinRet, + ArrayRef{0}) + .getResult(); + auto overflow = rewriter .create(loc, intrinRet, - ArrayRef{0}) + ArrayRef{1}) .getResult(); - auto overflow = rewriter - .create( - loc, intrinRet, ArrayRef{1}) - .getResult(); - - if (resultTy.getWidth() < encompassedTyInfo.width) { - auto resultLLVMTy = getTypeConverter()->convertType(resultTy); - auto truncResult = - rewriter.create(loc, resultLLVMTy, result); - - // Extend the truncated result back to the encompassing type to check for - // any overflows during the truncation. - mlir::Value truncResultExt; - if (resultTy.isSigned()) - truncResultExt = rewriter.create( - loc, encompassedLLVMTy, truncResult); - else - truncResultExt = rewriter.create( - loc, encompassedLLVMTy, truncResult); - auto truncOverflow = rewriter.create( - loc, mlir::LLVM::ICmpPredicate::ne, truncResultExt, result); - - result = truncResult; - overflow = - rewriter.create(loc, overflow, truncOverflow); - } - auto boolLLVMTy = - getTypeConverter()->convertType(op.getOverflow().getType()); - if (boolLLVMTy != rewriter.getI1Type()) - overflow = rewriter.create(loc, boolLLVMTy, overflow); - - rewriter.replaceOp(op, mlir::ValueRange{result, overflow}); + if (resultTy.getWidth() < encompassedTyInfo.width) { + auto resultLLVMTy = getTypeConverter()->convertType(resultTy); + auto truncResult = + rewriter.create(loc, resultLLVMTy, result); + + // Extend the truncated result back to the encompassing type to check for + // any overflows during the truncation. + mlir::Value truncResultExt; + if (resultTy.isSigned()) + truncResultExt = rewriter.create( + loc, encompassedLLVMTy, truncResult); + else + truncResultExt = rewriter.create( + loc, encompassedLLVMTy, truncResult); + auto truncOverflow = rewriter.create( + loc, mlir::LLVM::ICmpPredicate::ne, truncResultExt, result); - return mlir::success(); + result = truncResult; + overflow = rewriter.create(loc, overflow, truncOverflow); } -private: - static std::string getLLVMIntrinName(cir::BinOpOverflowKind opKind, - bool isSigned, unsigned width) { - // The intrinsic name is `@llvm.{s|u}{opKind}.with.overflow.i{width}` + auto boolLLVMTy = getTypeConverter()->convertType(op.getOverflow().getType()); + if (boolLLVMTy != rewriter.getI1Type()) + overflow = rewriter.create(loc, boolLLVMTy, overflow); - std::string name = "llvm."; + rewriter.replaceOp(op, mlir::ValueRange{result, overflow}); - if (isSigned) - name.push_back('s'); - else - name.push_back('u'); + return mlir::success(); +} - switch (opKind) { - case cir::BinOpOverflowKind::Add: - name.append("add."); - break; - case cir::BinOpOverflowKind::Sub: - name.append("sub."); - break; - case cir::BinOpOverflowKind::Mul: - name.append("mul."); - break; - } +std::string CIRToLLVMBinOpOverflowOpLowering::getLLVMIntrinName( + cir::BinOpOverflowKind opKind, bool isSigned, unsigned width) { + // The intrinsic name is `@llvm.{s|u}{opKind}.with.overflow.i{width}` - name.append("with.overflow.i"); - name.append(std::to_string(width)); + std::string name = "llvm."; - return name; + if (isSigned) + name.push_back('s'); + else + name.push_back('u'); + + switch (opKind) { + case cir::BinOpOverflowKind::Add: + name.append("add."); + break; + case cir::BinOpOverflowKind::Sub: + name.append("sub."); + break; + case cir::BinOpOverflowKind::Mul: + name.append("mul."); + break; } - struct EncompassedTypeInfo { - bool sign; - unsigned width; - }; + name.append("with.overflow.i"); + name.append(std::to_string(width)); - static EncompassedTypeInfo - computeEncompassedTypeWidth(cir::IntType operandTy, cir::IntType resultTy) { - auto sign = operandTy.getIsSigned() || resultTy.getIsSigned(); - auto width = - std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()), - resultTy.getWidth() + (sign && resultTy.isUnsigned())); - return {sign, width}; - } -}; + return name; +} -class CIRShiftOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::ShiftOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto cirAmtTy = mlir::dyn_cast(op.getAmount().getType()); - auto cirValTy = mlir::dyn_cast(op.getValue().getType()); - - // Operands could also be vector type - auto cirAmtVTy = mlir::dyn_cast(op.getAmount().getType()); - auto cirValVTy = mlir::dyn_cast(op.getValue().getType()); - auto llvmTy = getTypeConverter()->convertType(op.getType()); - mlir::Value amt = adaptor.getAmount(); - mlir::Value val = adaptor.getValue(); - - assert(((cirValTy && cirAmtTy) || (cirAmtVTy && cirValVTy)) && - "shift input type must be integer or vector type, otherwise NYI"); - - assert((cirValTy == op.getType() || cirValVTy == op.getType()) && - "inconsistent operands' types NYI"); - - // Ensure shift amount is the same type as the value. Some undefined - // behavior might occur in the casts below as per [C99 6.5.7.3]. - // Vector type shift amount needs no cast as type consistency is expected to - // be already be enforced at CIRGen. - if (cirAmtTy) - amt = getLLVMIntCast(rewriter, amt, mlir::cast(llvmTy), - !cirAmtTy.isSigned(), cirAmtTy.getWidth(), - cirValTy.getWidth()); - - // Lower to the proper LLVM shift operation. - if (op.getIsShiftleft()) - rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); - else { - bool isUnSigned = - cirValTy - ? !cirValTy.isSigned() - : !mlir::cast(cirValVTy.getEltType()).isSigned(); - if (isUnSigned) - rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); - else - rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); - } +CIRToLLVMBinOpOverflowOpLowering::EncompassedTypeInfo +CIRToLLVMBinOpOverflowOpLowering::computeEncompassedTypeWidth( + cir::IntType operandTy, cir::IntType resultTy) { + auto sign = operandTy.getIsSigned() || resultTy.getIsSigned(); + auto width = std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()), + resultTy.getWidth() + (sign && resultTy.isUnsigned())); + return {sign, width}; +} - return mlir::success(); +mlir::LogicalResult CIRToLLVMShiftOpLowering::matchAndRewrite( + cir::ShiftOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto cirAmtTy = mlir::dyn_cast(op.getAmount().getType()); + auto cirValTy = mlir::dyn_cast(op.getValue().getType()); + + // Operands could also be vector type + auto cirAmtVTy = mlir::dyn_cast(op.getAmount().getType()); + auto cirValVTy = mlir::dyn_cast(op.getValue().getType()); + auto llvmTy = getTypeConverter()->convertType(op.getType()); + mlir::Value amt = adaptor.getAmount(); + mlir::Value val = adaptor.getValue(); + + assert(((cirValTy && cirAmtTy) || (cirAmtVTy && cirValVTy)) && + "shift input type must be integer or vector type, otherwise NYI"); + + assert((cirValTy == op.getType() || cirValVTy == op.getType()) && + "inconsistent operands' types NYI"); + + // Ensure shift amount is the same type as the value. Some undefined + // behavior might occur in the casts below as per [C99 6.5.7.3]. + // Vector type shift amount needs no cast as type consistency is expected to + // be already be enforced at CIRGen. + if (cirAmtTy) + amt = getLLVMIntCast(rewriter, amt, mlir::cast(llvmTy), + !cirAmtTy.isSigned(), cirAmtTy.getWidth(), + cirValTy.getWidth()); + + // Lower to the proper LLVM shift operation. + if (op.getIsShiftleft()) + rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); + else { + bool isUnSigned = + cirValTy ? !cirValTy.isSigned() + : !mlir::cast(cirValVTy.getEltType()).isSigned(); + if (isUnSigned) + rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); + else + rewriter.replaceOpWithNewOp(op, llvmTy, val, amt); } -}; - -class CIRCmpOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::CmpOp cmpOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto type = cmpOp.getLhs().getType(); - mlir::Value llResult; - - // Lower to LLVM comparison op. - if (auto intTy = mlir::dyn_cast(type)) { - auto kind = - convertCmpKindToICmpPredicate(cmpOp.getKind(), intTy.isSigned()); - llResult = rewriter.create( - cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else if (auto ptrTy = mlir::dyn_cast(type)) { - auto kind = convertCmpKindToICmpPredicate(cmpOp.getKind(), - /* isSigned=*/false); - llResult = rewriter.create( - cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else if (mlir::isa(type)) { - auto kind = convertCmpKindToFCmpPredicate(cmpOp.getKind()); - llResult = rewriter.create( - cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); - } else { - return cmpOp.emitError() << "unsupported type for CmpOp: " << type; - } - // LLVM comparison ops return i1, but cir::CmpOp returns the same type as - // the LHS value. Since this return value can be used later, we need to - // restore the type with the extension below. - auto llResultTy = getTypeConverter()->convertType(cmpOp.getType()); - rewriter.replaceOpWithNewOp(cmpOp, llResultTy, - llResult); + return mlir::success(); +} - return mlir::success(); +mlir::LogicalResult CIRToLLVMCmpOpLowering::matchAndRewrite( + cir::CmpOp cmpOp, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto type = cmpOp.getLhs().getType(); + mlir::Value llResult; + + // Lower to LLVM comparison op. + if (auto intTy = mlir::dyn_cast(type)) { + auto kind = + convertCmpKindToICmpPredicate(cmpOp.getKind(), intTy.isSigned()); + llResult = rewriter.create( + cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + } else if (auto ptrTy = mlir::dyn_cast(type)) { + auto kind = convertCmpKindToICmpPredicate(cmpOp.getKind(), + /* isSigned=*/false); + llResult = rewriter.create( + cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + } else if (mlir::isa(type)) { + auto kind = convertCmpKindToFCmpPredicate(cmpOp.getKind()); + llResult = rewriter.create( + cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + } else { + return cmpOp.emitError() << "unsupported type for CmpOp: " << type; } -}; -static mlir::LLVM::CallIntrinsicOp + // LLVM comparison ops return i1, but cir::CmpOp returns the same type as + // the LHS value. Since this return value can be used later, we need to + // restore the type with the extension below. + auto llResultTy = getTypeConverter()->convertType(cmpOp.getType()); + rewriter.replaceOpWithNewOp(cmpOp, llResultTy, llResult); + + return mlir::success(); +} + +mlir::LLVM::CallIntrinsicOp createCallLLVMIntrinsicOp(mlir::ConversionPatternRewriter &rewriter, mlir::Location loc, const llvm::Twine &intrinsicName, mlir::Type resultTy, mlir::ValueRange operands) { @@ -2988,7 +2686,7 @@ createCallLLVMIntrinsicOp(mlir::ConversionPatternRewriter &rewriter, loc, resultTy, intrinsicNameAttr, operands); } -static mlir::LLVM::CallIntrinsicOp replaceOpWithCallLLVMIntrinsicOp( +mlir::LLVM::CallIntrinsicOp replaceOpWithCallLLVMIntrinsicOp( mlir::ConversionPatternRewriter &rewriter, mlir::Operation *op, const llvm::Twine &intrinsicName, mlir::Type resultTy, mlir::ValueRange operands) { @@ -2998,99 +2696,76 @@ static mlir::LLVM::CallIntrinsicOp replaceOpWithCallLLVMIntrinsicOp( return callIntrinOp; } -class CIRIntrinsicCallLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::LLVMIntrinsicCallOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - mlir::Type llvmResTy = - getTypeConverter()->convertType(op->getResultTypes()[0]); - if (!llvmResTy) - return op.emitError("expected LLVM result type"); - StringRef name = op.getIntrinsicName(); - // Some llvm intrinsics require ElementType attribute to be attached to - // the argument of pointer type. That prevents us from generating LLVM IR - // because from LLVM dialect, we have LLVM IR like the below which fails - // LLVM IR verification. - // %3 = call i64 @llvm.aarch64.ldxr.p0(ptr %2) - // The expected LLVM IR should be like - // %3 = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32) %2) - // TODO(cir): MLIR LLVM dialect should handle this part as CIR has no way - // to set LLVM IR attribute. - assert(!cir::MissingFeatures::llvmIntrinsicElementTypeSupport()); - replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm." + name, llvmResTy, - adaptor.getOperands()); - return mlir::success(); - } -}; - -class CIRAssumeLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::AssumeOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto cond = rewriter.create( - op.getLoc(), rewriter.getI1Type(), adaptor.getPredicate()); - rewriter.replaceOpWithNewOp(op, cond); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMLLVMIntrinsicCallOpLowering::matchAndRewrite( + cir::LLVMIntrinsicCallOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Type llvmResTy = + getTypeConverter()->convertType(op->getResultTypes()[0]); + if (!llvmResTy) + return op.emitError("expected LLVM result type"); + StringRef name = op.getIntrinsicName(); + // Some llvm intrinsics require ElementType attribute to be attached to + // the argument of pointer type. That prevents us from generating LLVM IR + // because from LLVM dialect, we have LLVM IR like the below which fails + // LLVM IR verification. + // %3 = call i64 @llvm.aarch64.ldxr.p0(ptr %2) + // The expected LLVM IR should be like + // %3 = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32) %2) + // TODO(cir): MLIR LLVM dialect should handle this part as CIR has no way + // to set LLVM IR attribute. + assert(!cir::MissingFeatures::llvmIntrinsicElementTypeSupport()); + replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm." + name, llvmResTy, + adaptor.getOperands()); + return mlir::success(); +} -class CIRAssumeAlignedLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMAssumeOpLowering::matchAndRewrite( + cir::AssumeOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto cond = rewriter.create( + op.getLoc(), rewriter.getI1Type(), adaptor.getPredicate()); + rewriter.replaceOpWithNewOp(op, cond); + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::AssumeAlignedOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - SmallVector opBundleArgs{adaptor.getPointer()}; +mlir::LogicalResult CIRToLLVMAssumeAlignedOpLowering::matchAndRewrite( + cir::AssumeAlignedOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + SmallVector opBundleArgs{adaptor.getPointer()}; - auto alignment = rewriter.create( - op.getLoc(), rewriter.getI64Type(), op.getAlignment()); - opBundleArgs.push_back(alignment); + auto alignment = rewriter.create( + op.getLoc(), rewriter.getI64Type(), op.getAlignment()); + opBundleArgs.push_back(alignment); - if (mlir::Value offset = adaptor.getOffset()) - opBundleArgs.push_back(offset); + if (mlir::Value offset = adaptor.getOffset()) + opBundleArgs.push_back(offset); - auto cond = rewriter.create( - op.getLoc(), rewriter.getI1Type(), 1); - rewriter.create(op.getLoc(), cond, "align", - opBundleArgs); - rewriter.replaceAllUsesWith(op, op.getPointer()); - rewriter.eraseOp(op); + auto cond = rewriter.create(op.getLoc(), + rewriter.getI1Type(), 1); + rewriter.create(op.getLoc(), cond, "align", + opBundleArgs); + rewriter.replaceAllUsesWith(op, op.getPointer()); + rewriter.eraseOp(op); - return mlir::success(); - } -}; + return mlir::success(); +} -class CIRAssumeSepStorageLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::AssumeSepStorageOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto cond = rewriter.create( - op.getLoc(), rewriter.getI1Type(), 1); - rewriter.replaceOpWithNewOp( - op, cond, "separate_storage", - mlir::ValueRange{adaptor.getPtr1(), adaptor.getPtr2()}); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMAssumeSepStorageOpLowering::matchAndRewrite( + cir::AssumeSepStorageOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto cond = rewriter.create(op.getLoc(), + rewriter.getI1Type(), 1); + rewriter.replaceOpWithNewOp( + op, cond, "separate_storage", + mlir::ValueRange{adaptor.getPtr1(), adaptor.getPtr2()}); + return mlir::success(); +} -static mlir::Value createLLVMBitOp(mlir::Location loc, - const llvm::Twine &llvmIntrinBaseName, - mlir::Type resultTy, mlir::Value operand, - std::optional poisonZeroInputFlag, - mlir::ConversionPatternRewriter &rewriter) { +mlir::Value createLLVMBitOp(mlir::Location loc, + const llvm::Twine &llvmIntrinBaseName, + mlir::Type resultTy, mlir::Value operand, + std::optional poisonZeroInputFlag, + mlir::ConversionPatternRewriter &rewriter) { auto operandIntTy = mlir::cast(operand.getType()); auto resultIntTy = mlir::cast(resultTy); @@ -3118,1275 +2793,1048 @@ static mlir::Value createLLVMBitOp(mlir::Location loc, /*isUnsigned=*/true, operandIntTy.getWidth(), resultIntTy.getWidth()); } -class CIRBitClrsbOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::BitClrsbOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto zero = rewriter.create( - op.getLoc(), adaptor.getInput().getType(), 0); - auto isNeg = rewriter.create( - op.getLoc(), - mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), - mlir::LLVM::ICmpPredicate::slt), - adaptor.getInput(), zero); - - auto negOne = rewriter.create( - op.getLoc(), adaptor.getInput().getType(), -1); - auto flipped = rewriter.create( - op.getLoc(), adaptor.getInput(), negOne); - - auto select = rewriter.create( - op.getLoc(), isNeg, flipped, adaptor.getInput()); - - auto resTy = getTypeConverter()->convertType(op.getType()); - auto clz = createLLVMBitOp(op.getLoc(), "llvm.ctlz", resTy, select, - /*poisonZeroInputFlag=*/false, rewriter); - - auto one = rewriter.create(op.getLoc(), resTy, 1); - auto res = rewriter.create(op.getLoc(), clz, one); - rewriter.replaceOp(op, res); - - return mlir::LogicalResult::success(); - } -}; - -class CIRObjSizeOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::ObjSizeOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto llvmResTy = getTypeConverter()->convertType(op.getType()); - auto loc = op->getLoc(); - - cir::SizeInfoType kindInfo = op.getKind(); - auto falseValue = rewriter.create( - loc, rewriter.getI1Type(), false); - auto trueValue = rewriter.create( - loc, rewriter.getI1Type(), true); - - replaceOpWithCallLLVMIntrinsicOp( - rewriter, op, "llvm.objectsize", llvmResTy, - mlir::ValueRange{adaptor.getPtr(), - kindInfo == cir::SizeInfoType::max ? falseValue - : trueValue, - trueValue, op.getDynamic() ? trueValue : falseValue}); - - return mlir::LogicalResult::success(); - } -}; - -class CIRBitClzOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::BitClzOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto resTy = getTypeConverter()->convertType(op.getType()); - auto llvmOp = - createLLVMBitOp(op.getLoc(), "llvm.ctlz", resTy, adaptor.getInput(), - /*poisonZeroInputFlag=*/true, rewriter); - rewriter.replaceOp(op, llvmOp); - return mlir::LogicalResult::success(); - } -}; - -class CIRBitCtzOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::BitCtzOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto resTy = getTypeConverter()->convertType(op.getType()); - auto llvmOp = - createLLVMBitOp(op.getLoc(), "llvm.cttz", resTy, adaptor.getInput(), - /*poisonZeroInputFlag=*/true, rewriter); - rewriter.replaceOp(op, llvmOp); - return mlir::LogicalResult::success(); - } -}; +mlir::LogicalResult CIRToLLVMBitClrsbOpLowering::matchAndRewrite( + cir::BitClrsbOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto zero = rewriter.create( + op.getLoc(), adaptor.getInput().getType(), 0); + auto isNeg = rewriter.create( + op.getLoc(), + mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), + mlir::LLVM::ICmpPredicate::slt), + adaptor.getInput(), zero); + + auto negOne = rewriter.create( + op.getLoc(), adaptor.getInput().getType(), -1); + auto flipped = rewriter.create(op.getLoc(), + adaptor.getInput(), negOne); + + auto select = rewriter.create( + op.getLoc(), isNeg, flipped, adaptor.getInput()); + + auto resTy = getTypeConverter()->convertType(op.getType()); + auto clz = createLLVMBitOp(op.getLoc(), "llvm.ctlz", resTy, select, + /*poisonZeroInputFlag=*/false, rewriter); + + auto one = rewriter.create(op.getLoc(), resTy, 1); + auto res = rewriter.create(op.getLoc(), clz, one); + rewriter.replaceOp(op, res); + + return mlir::LogicalResult::success(); +} -class CIRBitFfsOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMObjSizeOpLowering::matchAndRewrite( + cir::ObjSizeOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto llvmResTy = getTypeConverter()->convertType(op.getType()); + auto loc = op->getLoc(); + + cir::SizeInfoType kindInfo = op.getKind(); + auto falseValue = + rewriter.create(loc, rewriter.getI1Type(), false); + auto trueValue = + rewriter.create(loc, rewriter.getI1Type(), true); + + replaceOpWithCallLLVMIntrinsicOp( + rewriter, op, "llvm.objectsize", llvmResTy, + mlir::ValueRange{adaptor.getPtr(), + kindInfo == cir::SizeInfoType::max ? falseValue + : trueValue, + trueValue, op.getDynamic() ? trueValue : falseValue}); + + return mlir::LogicalResult::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::BitFfsOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto resTy = getTypeConverter()->convertType(op.getType()); - auto ctz = - createLLVMBitOp(op.getLoc(), "llvm.cttz", resTy, adaptor.getInput(), - /*poisonZeroInputFlag=*/false, rewriter); +mlir::LogicalResult CIRToLLVMBitClzOpLowering::matchAndRewrite( + cir::BitClzOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto llvmOp = + createLLVMBitOp(op.getLoc(), "llvm.ctlz", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/true, rewriter); + rewriter.replaceOp(op, llvmOp); + return mlir::LogicalResult::success(); +} - auto one = rewriter.create(op.getLoc(), resTy, 1); - auto ctzAddOne = rewriter.create(op.getLoc(), ctz, one); +mlir::LogicalResult CIRToLLVMBitCtzOpLowering::matchAndRewrite( + cir::BitCtzOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto llvmOp = + createLLVMBitOp(op.getLoc(), "llvm.cttz", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/true, rewriter); + rewriter.replaceOp(op, llvmOp); + return mlir::LogicalResult::success(); +} - auto zeroInputTy = rewriter.create( - op.getLoc(), adaptor.getInput().getType(), 0); - auto isZero = rewriter.create( - op.getLoc(), - mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), - mlir::LLVM::ICmpPredicate::eq), - adaptor.getInput(), zeroInputTy); +mlir::LogicalResult CIRToLLVMBitFfsOpLowering::matchAndRewrite( + cir::BitFfsOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto ctz = + createLLVMBitOp(op.getLoc(), "llvm.cttz", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/false, rewriter); + + auto one = rewriter.create(op.getLoc(), resTy, 1); + auto ctzAddOne = rewriter.create(op.getLoc(), ctz, one); + + auto zeroInputTy = rewriter.create( + op.getLoc(), adaptor.getInput().getType(), 0); + auto isZero = rewriter.create( + op.getLoc(), + mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), + mlir::LLVM::ICmpPredicate::eq), + adaptor.getInput(), zeroInputTy); + + auto zero = rewriter.create(op.getLoc(), resTy, 0); + auto res = rewriter.create(op.getLoc(), isZero, zero, + ctzAddOne); + rewriter.replaceOp(op, res); + + return mlir::LogicalResult::success(); +} - auto zero = rewriter.create(op.getLoc(), resTy, 0); - auto res = rewriter.create(op.getLoc(), isZero, zero, - ctzAddOne); - rewriter.replaceOp(op, res); +mlir::LogicalResult CIRToLLVMBitParityOpLowering::matchAndRewrite( + cir::BitParityOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto popcnt = + createLLVMBitOp(op.getLoc(), "llvm.ctpop", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/std::nullopt, rewriter); - return mlir::LogicalResult::success(); - } -}; + auto one = rewriter.create(op.getLoc(), resTy, 1); + auto popcntMod2 = + rewriter.create(op.getLoc(), popcnt, one); + rewriter.replaceOp(op, popcntMod2); -class CIRBitParityOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::BitParityOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto resTy = getTypeConverter()->convertType(op.getType()); - auto popcnt = - createLLVMBitOp(op.getLoc(), "llvm.ctpop", resTy, adaptor.getInput(), - /*poisonZeroInputFlag=*/std::nullopt, rewriter); - - auto one = rewriter.create(op.getLoc(), resTy, 1); - auto popcntMod2 = - rewriter.create(op.getLoc(), popcnt, one); - rewriter.replaceOp(op, popcntMod2); - - return mlir::LogicalResult::success(); - } -}; + return mlir::LogicalResult::success(); +} -class CIRBitPopcountOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::BitPopcountOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto resTy = getTypeConverter()->convertType(op.getType()); - auto llvmOp = - createLLVMBitOp(op.getLoc(), "llvm.ctpop", resTy, adaptor.getInput(), - /*poisonZeroInputFlag=*/std::nullopt, rewriter); - rewriter.replaceOp(op, llvmOp); - return mlir::LogicalResult::success(); - } -}; +mlir::LogicalResult CIRToLLVMBitPopcountOpLowering::matchAndRewrite( + cir::BitPopcountOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto resTy = getTypeConverter()->convertType(op.getType()); + auto llvmOp = + createLLVMBitOp(op.getLoc(), "llvm.ctpop", resTy, adaptor.getInput(), + /*poisonZeroInputFlag=*/std::nullopt, rewriter); + rewriter.replaceOp(op, llvmOp); + return mlir::LogicalResult::success(); +} -static mlir::LLVM::AtomicOrdering getLLVMAtomicOrder(cir::MemOrder memo) { +mlir::LLVM::AtomicOrdering getLLVMAtomicOrder(cir::MemOrder memo) { switch (memo) { case cir::MemOrder::Relaxed: return mlir::LLVM::AtomicOrdering::monotonic; - case cir::MemOrder::Consume: - case cir::MemOrder::Acquire: - return mlir::LLVM::AtomicOrdering::acquire; - case cir::MemOrder::Release: - return mlir::LLVM::AtomicOrdering::release; - case cir::MemOrder::AcquireRelease: - return mlir::LLVM::AtomicOrdering::acq_rel; - case cir::MemOrder::SequentiallyConsistent: - return mlir::LLVM::AtomicOrdering::seq_cst; - } - llvm_unreachable("shouldn't get here"); -} - -class CIRAtomicCmpXchgLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::AtomicCmpXchg op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto expected = adaptor.getExpected(); - auto desired = adaptor.getDesired(); - - // FIXME: add syncscope. - auto cmpxchg = rewriter.create( - op.getLoc(), adaptor.getPtr(), expected, desired, - getLLVMAtomicOrder(adaptor.getSuccOrder()), - getLLVMAtomicOrder(adaptor.getFailOrder())); - cmpxchg.setWeak(adaptor.getWeak()); - cmpxchg.setVolatile_(adaptor.getIsVolatile()); - - // Check result and apply stores accordingly. - auto old = rewriter.create( - op.getLoc(), cmpxchg.getResult(), 0); - auto cmp = rewriter.create( - op.getLoc(), cmpxchg.getResult(), 1); - - auto extCmp = rewriter.create( - op.getLoc(), rewriter.getI8Type(), cmp); - rewriter.replaceOp(op, {old, extCmp}); - return mlir::success(); - } -}; - -class CIRAtomicXchgLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::AtomicXchg op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // FIXME: add syncscope. - auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); - rewriter.replaceOpWithNewOp( - op, mlir::LLVM::AtomicBinOp::xchg, adaptor.getPtr(), adaptor.getVal(), - llvmOrder); - return mlir::success(); - } -}; - -class CIRAtomicFetchLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::Value buildPostOp(cir::AtomicFetch op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter, - mlir::Value rmwVal, bool isInt) const { - SmallVector atomicOperands = {rmwVal, adaptor.getVal()}; - SmallVector atomicResTys = {rmwVal.getType()}; - return rewriter - .create(op.getLoc(), - rewriter.getStringAttr(getLLVMBinop(op.getBinop(), isInt)), - atomicOperands, atomicResTys, {}) - ->getResult(0); - } - - mlir::Value buildMinMaxPostOp(cir::AtomicFetch op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter, - mlir::Value rmwVal, bool isSigned) const { - auto loc = op.getLoc(); - mlir::LLVM::ICmpPredicate pred; - if (op.getBinop() == cir::AtomicFetchKind::Max) { - pred = isSigned ? mlir::LLVM::ICmpPredicate::sgt - : mlir::LLVM::ICmpPredicate::ugt; - } else { // Min - pred = isSigned ? mlir::LLVM::ICmpPredicate::slt - : mlir::LLVM::ICmpPredicate::ult; - } - - auto cmp = rewriter.create( - loc, mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), pred), - rmwVal, adaptor.getVal()); - return rewriter.create(loc, cmp, rmwVal, - adaptor.getVal()); - } - - llvm::StringLiteral getLLVMBinop(cir::AtomicFetchKind k, bool isInt) const { - switch (k) { - case cir::AtomicFetchKind::Add: - return isInt ? mlir::LLVM::AddOp::getOperationName() - : mlir::LLVM::FAddOp::getOperationName(); - case cir::AtomicFetchKind::Sub: - return isInt ? mlir::LLVM::SubOp::getOperationName() - : mlir::LLVM::FSubOp::getOperationName(); - case cir::AtomicFetchKind::And: - return mlir::LLVM::AndOp::getOperationName(); - case cir::AtomicFetchKind::Xor: - return mlir::LLVM::XOrOp::getOperationName(); - case cir::AtomicFetchKind::Or: - return mlir::LLVM::OrOp::getOperationName(); - case cir::AtomicFetchKind::Nand: - // There's no nand binop in LLVM, this is later fixed with a not. - return mlir::LLVM::AndOp::getOperationName(); - case cir::AtomicFetchKind::Max: - case cir::AtomicFetchKind::Min: - llvm_unreachable("handled in buildMinMaxPostOp"); - } - llvm_unreachable("Unknown atomic fetch opcode"); - } - - mlir::LLVM::AtomicBinOp getLLVMAtomicBinOp(cir::AtomicFetchKind k, bool isInt, - bool isSignedInt) const { - switch (k) { - case cir::AtomicFetchKind::Add: - return isInt ? mlir::LLVM::AtomicBinOp::add - : mlir::LLVM::AtomicBinOp::fadd; - case cir::AtomicFetchKind::Sub: - return isInt ? mlir::LLVM::AtomicBinOp::sub - : mlir::LLVM::AtomicBinOp::fsub; - case cir::AtomicFetchKind::And: - return mlir::LLVM::AtomicBinOp::_and; - case cir::AtomicFetchKind::Xor: - return mlir::LLVM::AtomicBinOp::_xor; - case cir::AtomicFetchKind::Or: - return mlir::LLVM::AtomicBinOp::_or; - case cir::AtomicFetchKind::Nand: - return mlir::LLVM::AtomicBinOp::nand; - case cir::AtomicFetchKind::Max: { - if (!isInt) - return mlir::LLVM::AtomicBinOp::fmax; - return isSignedInt ? mlir::LLVM::AtomicBinOp::max - : mlir::LLVM::AtomicBinOp::umax; - } - case cir::AtomicFetchKind::Min: { - if (!isInt) - return mlir::LLVM::AtomicBinOp::fmin; - return isSignedInt ? mlir::LLVM::AtomicBinOp::min - : mlir::LLVM::AtomicBinOp::umin; - } - } - llvm_unreachable("Unknown atomic fetch opcode"); - } - - mlir::LogicalResult - matchAndRewrite(cir::AtomicFetch op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - - bool isInt, isSignedInt = false; // otherwise it's float. - if (auto intTy = mlir::dyn_cast(op.getVal().getType())) { - isInt = true; - isSignedInt = intTy.isSigned(); - } else if (mlir::isa( - op.getVal().getType())) - isInt = false; - else { - return op.emitError() - << "Unsupported type: " << adaptor.getVal().getType(); - } - - // FIXME: add syncscope. - auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); - auto llvmBinOpc = getLLVMAtomicBinOp(op.getBinop(), isInt, isSignedInt); - auto rmwVal = rewriter.create( - op.getLoc(), llvmBinOpc, adaptor.getPtr(), adaptor.getVal(), llvmOrder); - - mlir::Value result = rmwVal.getRes(); - if (!op.getFetchFirst()) { - if (op.getBinop() == cir::AtomicFetchKind::Max || - op.getBinop() == cir::AtomicFetchKind::Min) - result = buildMinMaxPostOp(op, adaptor, rewriter, rmwVal.getRes(), - isSignedInt); - else - result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt); - - // Compensate lack of nand binop in LLVM IR. - if (op.getBinop() == cir::AtomicFetchKind::Nand) { - auto negOne = rewriter.create( - op.getLoc(), result.getType(), -1); - result = - rewriter.create(op.getLoc(), result, negOne); - } - } - - rewriter.replaceOp(op, result); - return mlir::success(); + case cir::MemOrder::Consume: + case cir::MemOrder::Acquire: + return mlir::LLVM::AtomicOrdering::acquire; + case cir::MemOrder::Release: + return mlir::LLVM::AtomicOrdering::release; + case cir::MemOrder::AcquireRelease: + return mlir::LLVM::AtomicOrdering::acq_rel; + case cir::MemOrder::SequentiallyConsistent: + return mlir::LLVM::AtomicOrdering::seq_cst; } -}; + llvm_unreachable("shouldn't get here"); +} -class CIRByteswapOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite( + cir::AtomicCmpXchg op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto expected = adaptor.getExpected(); + auto desired = adaptor.getDesired(); + + // FIXME: add syncscope. + auto cmpxchg = rewriter.create( + op.getLoc(), adaptor.getPtr(), expected, desired, + getLLVMAtomicOrder(adaptor.getSuccOrder()), + getLLVMAtomicOrder(adaptor.getFailOrder())); + cmpxchg.setWeak(adaptor.getWeak()); + cmpxchg.setVolatile_(adaptor.getIsVolatile()); + + // Check result and apply stores accordingly. + auto old = rewriter.create( + op.getLoc(), cmpxchg.getResult(), 0); + auto cmp = rewriter.create( + op.getLoc(), cmpxchg.getResult(), 1); + + auto extCmp = rewriter.create(op.getLoc(), + rewriter.getI8Type(), cmp); + rewriter.replaceOp(op, {old, extCmp}); + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::ByteswapOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // Note that LLVM intrinsic calls to @llvm.bswap.i* have the same type as - // the operand. +mlir::LogicalResult CIRToLLVMAtomicXchgLowering::matchAndRewrite( + cir::AtomicXchg op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // FIXME: add syncscope. + auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); + rewriter.replaceOpWithNewOp( + op, mlir::LLVM::AtomicBinOp::xchg, adaptor.getPtr(), adaptor.getVal(), + llvmOrder); + return mlir::success(); +} - auto resTy = mlir::cast( - getTypeConverter()->convertType(op.getType())); +mlir::Value CIRToLLVMAtomicFetchLowering::buildPostOp( + cir::AtomicFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal, + bool isInt) const { + SmallVector atomicOperands = {rmwVal, adaptor.getVal()}; + SmallVector atomicResTys = {rmwVal.getType()}; + return rewriter + .create(op.getLoc(), + rewriter.getStringAttr(getLLVMBinop(op.getBinop(), isInt)), + atomicOperands, atomicResTys, {}) + ->getResult(0); +} - std::string llvmIntrinName = "llvm.bswap.i"; - llvmIntrinName.append(std::to_string(resTy.getWidth())); +mlir::Value CIRToLLVMAtomicFetchLowering::buildMinMaxPostOp( + cir::AtomicFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal, + bool isSigned) const { + auto loc = op.getLoc(); + mlir::LLVM::ICmpPredicate pred; + if (op.getBinop() == cir::AtomicFetchKind::Max) { + pred = isSigned ? mlir::LLVM::ICmpPredicate::sgt + : mlir::LLVM::ICmpPredicate::ugt; + } else { // Min + pred = isSigned ? mlir::LLVM::ICmpPredicate::slt + : mlir::LLVM::ICmpPredicate::ult; + } + + auto cmp = rewriter.create( + loc, mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), pred), + rmwVal, adaptor.getVal()); + return rewriter.create(loc, cmp, rmwVal, + adaptor.getVal()); +} - rewriter.replaceOpWithNewOp(op, adaptor.getInput()); +llvm::StringLiteral +CIRToLLVMAtomicFetchLowering::getLLVMBinop(cir::AtomicFetchKind k, + bool isInt) const { + switch (k) { + case cir::AtomicFetchKind::Add: + return isInt ? mlir::LLVM::AddOp::getOperationName() + : mlir::LLVM::FAddOp::getOperationName(); + case cir::AtomicFetchKind::Sub: + return isInt ? mlir::LLVM::SubOp::getOperationName() + : mlir::LLVM::FSubOp::getOperationName(); + case cir::AtomicFetchKind::And: + return mlir::LLVM::AndOp::getOperationName(); + case cir::AtomicFetchKind::Xor: + return mlir::LLVM::XOrOp::getOperationName(); + case cir::AtomicFetchKind::Or: + return mlir::LLVM::OrOp::getOperationName(); + case cir::AtomicFetchKind::Nand: + // There's no nand binop in LLVM, this is later fixed with a not. + return mlir::LLVM::AndOp::getOperationName(); + case cir::AtomicFetchKind::Max: + case cir::AtomicFetchKind::Min: + llvm_unreachable("handled in buildMinMaxPostOp"); + } + llvm_unreachable("Unknown atomic fetch opcode"); +} - return mlir::LogicalResult::success(); - } -}; +mlir::LLVM::AtomicBinOp CIRToLLVMAtomicFetchLowering::getLLVMAtomicBinOp( + cir::AtomicFetchKind k, bool isInt, bool isSignedInt) const { + switch (k) { + case cir::AtomicFetchKind::Add: + return isInt ? mlir::LLVM::AtomicBinOp::add : mlir::LLVM::AtomicBinOp::fadd; + case cir::AtomicFetchKind::Sub: + return isInt ? mlir::LLVM::AtomicBinOp::sub : mlir::LLVM::AtomicBinOp::fsub; + case cir::AtomicFetchKind::And: + return mlir::LLVM::AtomicBinOp::_and; + case cir::AtomicFetchKind::Xor: + return mlir::LLVM::AtomicBinOp::_xor; + case cir::AtomicFetchKind::Or: + return mlir::LLVM::AtomicBinOp::_or; + case cir::AtomicFetchKind::Nand: + return mlir::LLVM::AtomicBinOp::nand; + case cir::AtomicFetchKind::Max: { + if (!isInt) + return mlir::LLVM::AtomicBinOp::fmax; + return isSignedInt ? mlir::LLVM::AtomicBinOp::max + : mlir::LLVM::AtomicBinOp::umax; + } + case cir::AtomicFetchKind::Min: { + if (!isInt) + return mlir::LLVM::AtomicBinOp::fmin; + return isSignedInt ? mlir::LLVM::AtomicBinOp::min + : mlir::LLVM::AtomicBinOp::umin; + } + } + llvm_unreachable("Unknown atomic fetch opcode"); +} -class CIRRotateOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::RotateOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // Note that LLVM intrinsic calls to @llvm.fsh{r,l}.i* have the same type as - // the operand. - auto src = adaptor.getSrc(); - if (op.getLeft()) - rewriter.replaceOpWithNewOp(op, src, src, - adaptor.getAmt()); +mlir::LogicalResult CIRToLLVMAtomicFetchLowering::matchAndRewrite( + cir::AtomicFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + + bool isInt, isSignedInt = false; // otherwise it's float. + if (auto intTy = mlir::dyn_cast(op.getVal().getType())) { + isInt = true; + isSignedInt = intTy.isSigned(); + } else if (mlir::isa(op.getVal().getType())) + isInt = false; + else { + return op.emitError() << "Unsupported type: " << adaptor.getVal().getType(); + } + + // FIXME: add syncscope. + auto llvmOrder = getLLVMAtomicOrder(adaptor.getMemOrder()); + auto llvmBinOpc = getLLVMAtomicBinOp(op.getBinop(), isInt, isSignedInt); + auto rmwVal = rewriter.create( + op.getLoc(), llvmBinOpc, adaptor.getPtr(), adaptor.getVal(), llvmOrder); + + mlir::Value result = rmwVal.getRes(); + if (!op.getFetchFirst()) { + if (op.getBinop() == cir::AtomicFetchKind::Max || + op.getBinop() == cir::AtomicFetchKind::Min) + result = buildMinMaxPostOp(op, adaptor, rewriter, rmwVal.getRes(), + isSignedInt); else - rewriter.replaceOpWithNewOp(op, src, src, - adaptor.getAmt()); - return mlir::LogicalResult::success(); - } -}; + result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt); -class CIRSelectOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + // Compensate lack of nand binop in LLVM IR. + if (op.getBinop() == cir::AtomicFetchKind::Nand) { + auto negOne = rewriter.create( + op.getLoc(), result.getType(), -1); + result = rewriter.create(op.getLoc(), result, negOne); + } + } - mlir::LogicalResult - matchAndRewrite(cir::SelectOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto getConstantBool = [](mlir::Value value) -> std::optional { - auto definingOp = - mlir::dyn_cast_if_present(value.getDefiningOp()); - if (!definingOp) - return std::nullopt; + rewriter.replaceOp(op, result); + return mlir::success(); +} - auto constValue = mlir::dyn_cast(definingOp.getValue()); - if (!constValue) - return std::nullopt; +mlir::LogicalResult CIRToLLVMByteswapOpLowering::matchAndRewrite( + cir::ByteswapOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // Note that LLVM intrinsic calls to @llvm.bswap.i* have the same type as + // the operand. - return constValue.getValue(); - }; + auto resTy = mlir::cast( + getTypeConverter()->convertType(op.getType())); - // Two special cases in the LLVMIR codegen of select op: - // - select %0, %1, false => and %0, %1 - // - select %0, true, %1 => or %0, %1 - auto trueValue = op.getTrueValue(); - auto falseValue = op.getFalseValue(); - if (mlir::isa(trueValue.getType())) { - if (std::optional falseValueBool = getConstantBool(falseValue); - falseValueBool.has_value() && !*falseValueBool) { - // select %0, %1, false => and %0, %1 - rewriter.replaceOpWithNewOp( - op, adaptor.getCondition(), adaptor.getTrueValue()); - return mlir::success(); - } - if (std::optional trueValueBool = getConstantBool(trueValue); - trueValueBool.has_value() && *trueValueBool) { - // select %0, true, %1 => or %0, %1 - rewriter.replaceOpWithNewOp( - op, adaptor.getCondition(), adaptor.getFalseValue()); - return mlir::success(); - } - } + std::string llvmIntrinName = "llvm.bswap.i"; + llvmIntrinName.append(std::to_string(resTy.getWidth())); - auto llvmCondition = rewriter.create( - op.getLoc(), mlir::IntegerType::get(op->getContext(), 1), - adaptor.getCondition()); - rewriter.replaceOpWithNewOp( - op, llvmCondition, adaptor.getTrueValue(), adaptor.getFalseValue()); + rewriter.replaceOpWithNewOp(op, adaptor.getInput()); - return mlir::success(); - } -}; + return mlir::LogicalResult::success(); +} -class CIRBrOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMRotateOpLowering::matchAndRewrite( + cir::RotateOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // Note that LLVM intrinsic calls to @llvm.fsh{r,l}.i* have the same type as + // the operand. + auto src = adaptor.getSrc(); + if (op.getLeft()) + rewriter.replaceOpWithNewOp(op, src, src, + adaptor.getAmt()); + else + rewriter.replaceOpWithNewOp(op, src, src, + adaptor.getAmt()); + return mlir::LogicalResult::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::BrOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, adaptor.getOperands(), - op.getDest()); - return mlir::LogicalResult::success(); - } -}; +mlir::LogicalResult CIRToLLVMSelectOpLowering::matchAndRewrite( + cir::SelectOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto getConstantBool = [](mlir::Value value) -> std::optional { + auto definingOp = + mlir::dyn_cast_if_present(value.getDefiningOp()); + if (!definingOp) + return std::nullopt; -class CIRGetMemberOpLowering - : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; + auto constValue = mlir::dyn_cast(definingOp.getValue()); + if (!constValue) + return std::nullopt; - mlir::LogicalResult - matchAndRewrite(cir::GetMemberOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto llResTy = getTypeConverter()->convertType(op.getType()); - const auto structTy = - mlir::cast(op.getAddrTy().getPointee()); - assert(structTy && "expected struct type"); + return constValue.getValue(); + }; - switch (structTy.getKind()) { - case cir::StructType::Struct: - case cir::StructType::Class: { - // Since the base address is a pointer to an aggregate, the first offset - // is always zero. The second offset tell us which member it will access. - llvm::SmallVector offset{0, op.getIndex()}; - const auto elementTy = getTypeConverter()->convertType(structTy); - rewriter.replaceOpWithNewOp(op, llResTy, elementTy, - adaptor.getAddr(), offset); + // Two special cases in the LLVMIR codegen of select op: + // - select %0, %1, false => and %0, %1 + // - select %0, true, %1 => or %0, %1 + auto trueValue = op.getTrueValue(); + auto falseValue = op.getFalseValue(); + if (mlir::isa(trueValue.getType())) { + if (std::optional falseValueBool = getConstantBool(falseValue); + falseValueBool.has_value() && !*falseValueBool) { + // select %0, %1, false => and %0, %1 + rewriter.replaceOpWithNewOp(op, adaptor.getCondition(), + adaptor.getTrueValue()); return mlir::success(); } - case cir::StructType::Union: - // Union members share the address space, so we just need a bitcast to - // conform to type-checking. - rewriter.replaceOpWithNewOp(op, llResTy, - adaptor.getAddr()); + if (std::optional trueValueBool = getConstantBool(trueValue); + trueValueBool.has_value() && *trueValueBool) { + // select %0, true, %1 => or %0, %1 + rewriter.replaceOpWithNewOp(op, adaptor.getCondition(), + adaptor.getFalseValue()); return mlir::success(); } } -}; - -class CIRGetRuntimeMemberOpLowering - : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::GetRuntimeMemberOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto llvmResTy = getTypeConverter()->convertType(op.getType()); - auto llvmElementTy = mlir::IntegerType::get(op.getContext(), 8); - - rewriter.replaceOpWithNewOp( - op, llvmResTy, llvmElementTy, adaptor.getAddr(), adaptor.getMember()); - return mlir::success(); - } -}; - -class CIRPtrDiffOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - uint64_t getTypeSize(mlir::Type type, mlir::Operation &op) const { - mlir::DataLayout layout(op.getParentOfType()); - // For LLVM purposes we treat void as u8. - if (isa(type)) - type = cir::IntType::get(type.getContext(), 8, /*isSigned=*/false); - return llvm::divideCeil(layout.getTypeSizeInBits(type), 8); - } - - mlir::LogicalResult - matchAndRewrite(cir::PtrDiffOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto dstTy = mlir::cast(op.getType()); - auto llvmDstTy = getTypeConverter()->convertType(dstTy); - auto lhs = rewriter.create(op.getLoc(), llvmDstTy, - adaptor.getLhs()); - auto rhs = rewriter.create(op.getLoc(), llvmDstTy, - adaptor.getRhs()); + auto llvmCondition = rewriter.create( + op.getLoc(), mlir::IntegerType::get(op->getContext(), 1), + adaptor.getCondition()); + rewriter.replaceOpWithNewOp( + op, llvmCondition, adaptor.getTrueValue(), adaptor.getFalseValue()); - auto diff = - rewriter.create(op.getLoc(), llvmDstTy, lhs, rhs); - - auto ptrTy = mlir::cast(op.getLhs().getType()); - auto typeSize = getTypeSize(ptrTy.getPointee(), *op); + return mlir::success(); +} - // Avoid silly division by 1. - auto resultVal = diff.getResult(); - if (typeSize != 1) { - auto typeSizeVal = rewriter.create( - op.getLoc(), llvmDstTy, mlir::IntegerAttr::get(llvmDstTy, typeSize)); +mlir::LogicalResult CIRToLLVMBrOpLowering::matchAndRewrite( + cir::BrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + rewriter.replaceOpWithNewOp(op, adaptor.getOperands(), + op.getDest()); + return mlir::LogicalResult::success(); +} - if (dstTy.isUnsigned()) - resultVal = rewriter.create(op.getLoc(), llvmDstTy, - diff, typeSizeVal); - else - resultVal = rewriter.create(op.getLoc(), llvmDstTy, - diff, typeSizeVal); - } - rewriter.replaceOp(op, resultVal); +mlir::LogicalResult CIRToLLVMGetMemberOpLowering::matchAndRewrite( + cir::GetMemberOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto llResTy = getTypeConverter()->convertType(op.getType()); + const auto structTy = + mlir::cast(op.getAddrTy().getPointee()); + assert(structTy && "expected struct type"); + + switch (structTy.getKind()) { + case cir::StructType::Struct: + case cir::StructType::Class: { + // Since the base address is a pointer to an aggregate, the first offset + // is always zero. The second offset tell us which member it will access. + llvm::SmallVector offset{0, op.getIndex()}; + const auto elementTy = getTypeConverter()->convertType(structTy); + rewriter.replaceOpWithNewOp(op, llResTy, elementTy, + adaptor.getAddr(), offset); return mlir::success(); } -}; - -class CIRExpectOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::ExpectOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - std::optional prob = op.getProb(); - if (!prob) - rewriter.replaceOpWithNewOp(op, adaptor.getVal(), - adaptor.getExpected()); - else - rewriter.replaceOpWithNewOp( - op, adaptor.getVal(), adaptor.getExpected(), prob.value()); + case cir::StructType::Union: + // Union members share the address space, so we just need a bitcast to + // conform to type-checking. + rewriter.replaceOpWithNewOp(op, llResTy, + adaptor.getAddr()); return mlir::success(); } -}; - -class CIRVTableAddrPointOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::VTableAddrPointOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - const auto *converter = getTypeConverter(); - auto targetType = converter->convertType(op.getType()); - mlir::Value symAddr = op.getSymAddr(); - llvm::SmallVector offsets; - mlir::Type eltType; - if (!symAddr) { - symAddr = getValueForVTableSymbol(op, rewriter, getTypeConverter(), - op.getNameAttr(), eltType); - offsets = llvm::SmallVector{ - 0, op.getVtableIndex(), op.getAddressPointIndex()}; - } else { - // Get indirect vtable address point retrieval - symAddr = adaptor.getSymAddr(); - eltType = converter->convertType(symAddr.getType()); - offsets = - llvm::SmallVector{op.getAddressPointIndex()}; - } - - assert(eltType && "Shouldn't ever be missing an eltType here"); - rewriter.replaceOpWithNewOp(op, targetType, eltType, - symAddr, offsets, true); +} - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMGetRuntimeMemberOpLowering::matchAndRewrite( + cir::GetRuntimeMemberOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto llvmResTy = getTypeConverter()->convertType(op.getType()); + auto llvmElementTy = mlir::IntegerType::get(op.getContext(), 8); -class CIRStackSaveLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + rewriter.replaceOpWithNewOp( + op, llvmResTy, llvmElementTy, adaptor.getAddr(), adaptor.getMember()); + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::StackSaveOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto ptrTy = getTypeConverter()->convertType(op.getType()); - rewriter.replaceOpWithNewOp(op, ptrTy); - return mlir::success(); - } -}; +uint64_t CIRToLLVMPtrDiffOpLowering::getTypeSize(mlir::Type type, + mlir::Operation &op) const { + mlir::DataLayout layout(op.getParentOfType()); + // For LLVM purposes we treat void as u8. + if (isa(type)) + type = cir::IntType::get(type.getContext(), 8, /*isSigned=*/false); + return llvm::divideCeil(layout.getTypeSizeInBits(type), 8); +} -#define GET_BUILTIN_LOWERING_CLASSES -#include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" +mlir::LogicalResult CIRToLLVMPtrDiffOpLowering::matchAndRewrite( + cir::PtrDiffOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto dstTy = mlir::cast(op.getType()); + auto llvmDstTy = getTypeConverter()->convertType(dstTy); -class CIRUnreachableLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + auto lhs = rewriter.create(op.getLoc(), llvmDstTy, + adaptor.getLhs()); + auto rhs = rewriter.create(op.getLoc(), llvmDstTy, + adaptor.getRhs()); - mlir::LogicalResult - matchAndRewrite(cir::UnreachableOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op); - return mlir::success(); - } -}; + auto diff = + rewriter.create(op.getLoc(), llvmDstTy, lhs, rhs); -class CIRTrapLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + auto ptrTy = mlir::cast(op.getLhs().getType()); + auto typeSize = getTypeSize(ptrTy.getPointee(), *op); - mlir::LogicalResult - matchAndRewrite(cir::TrapOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto loc = op->getLoc(); - rewriter.eraseOp(op); + // Avoid silly division by 1. + auto resultVal = diff.getResult(); + if (typeSize != 1) { + auto typeSizeVal = rewriter.create( + op.getLoc(), llvmDstTy, mlir::IntegerAttr::get(llvmDstTy, typeSize)); - rewriter.create(loc); + if (dstTy.isUnsigned()) + resultVal = rewriter.create(op.getLoc(), llvmDstTy, + diff, typeSizeVal); + else + resultVal = rewriter.create(op.getLoc(), llvmDstTy, + diff, typeSizeVal); + } + rewriter.replaceOp(op, resultVal); + return mlir::success(); +} - // Note that the call to llvm.trap is not a terminator in LLVM dialect. - // So we must emit an additional llvm.unreachable to terminate the current - // block. - rewriter.create(loc); +mlir::LogicalResult CIRToLLVMExpectOpLowering::matchAndRewrite( + cir::ExpectOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + std::optional prob = op.getProb(); + if (!prob) + rewriter.replaceOpWithNewOp(op, adaptor.getVal(), + adaptor.getExpected()); + else + rewriter.replaceOpWithNewOp( + op, adaptor.getVal(), adaptor.getExpected(), prob.value()); + return mlir::success(); +} - return mlir::success(); +mlir::LogicalResult CIRToLLVMVTableAddrPointOpLowering::matchAndRewrite( + cir::VTableAddrPointOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + const auto *converter = getTypeConverter(); + auto targetType = converter->convertType(op.getType()); + mlir::Value symAddr = op.getSymAddr(); + llvm::SmallVector offsets; + mlir::Type eltType; + if (!symAddr) { + symAddr = getValueForVTableSymbol(op, rewriter, getTypeConverter(), + op.getNameAttr(), eltType); + offsets = llvm::SmallVector{0, op.getVtableIndex(), + op.getAddressPointIndex()}; + } else { + // Get indirect vtable address point retrieval + symAddr = adaptor.getSymAddr(); + eltType = converter->convertType(symAddr.getType()); + offsets = llvm::SmallVector{op.getAddressPointIndex()}; } -}; -class CIRInlineAsmOpLowering - : public mlir::OpConversionPattern { + assert(eltType && "Shouldn't ever be missing an eltType here"); + rewriter.replaceOpWithNewOp(op, targetType, eltType, + symAddr, offsets, true); - using mlir::OpConversionPattern::OpConversionPattern; + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::InlineAsmOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - mlir::Type llResTy; - if (op.getNumResults()) - llResTy = getTypeConverter()->convertType(op.getType(0)); +mlir::LogicalResult CIRToLLVMStackSaveOpLowering::matchAndRewrite( + cir::StackSaveOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto ptrTy = getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, ptrTy); + return mlir::success(); +} - auto dialect = op.getAsmFlavor(); - auto llDialect = dialect == cir::AsmFlavor::x86_att - ? mlir::LLVM::AsmDialect::AD_ATT - : mlir::LLVM::AsmDialect::AD_Intel; +#define GET_BUILTIN_LOWERING_CLASSES_DEF +#include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" +#undef GET_BUILTIN_LOWERING_CLASSES_DEF - std::vector opAttrs; - auto llvmAttrName = mlir::LLVM::InlineAsmOp::getElementTypeAttrName(); +mlir::LogicalResult CIRToLLVMUnreachableOpLowering::matchAndRewrite( + cir::UnreachableOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + rewriter.replaceOpWithNewOp(op); + return mlir::success(); +} - // this is for the lowering to LLVM from LLVm dialect. Otherwise, if we - // don't have the result (i.e. void type as a result of operation), the - // element type attribute will be attached to the whole instruction, but not - // to the operand - if (!op.getNumResults()) - opAttrs.push_back(mlir::Attribute()); +mlir::LogicalResult CIRToLLVMTrapOpLowering::matchAndRewrite( + cir::TrapOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto loc = op->getLoc(); + rewriter.eraseOp(op); - llvm::SmallVector llvmOperands; - llvm::SmallVector cirOperands; - for (size_t i = 0; i < op.getAsmOperands().size(); ++i) { - auto llvmOps = adaptor.getAsmOperands()[i]; - auto cirOps = op.getAsmOperands()[i]; - llvmOperands.insert(llvmOperands.end(), llvmOps.begin(), llvmOps.end()); - cirOperands.insert(cirOperands.end(), cirOps.begin(), cirOps.end()); - } + rewriter.create(loc); - // so far we infer the llvm dialect element type attr from - // CIR operand type. - for (std::size_t i = 0; i < op.getOperandAttrs().size(); ++i) { - if (!op.getOperandAttrs()[i]) { - opAttrs.push_back(mlir::Attribute()); - continue; - } + // Note that the call to llvm.trap is not a terminator in LLVM dialect. + // So we must emit an additional llvm.unreachable to terminate the current + // block. + rewriter.create(loc); - std::vector attrs; - auto typ = cast(cirOperands[i].getType()); - auto typAttr = mlir::TypeAttr::get( - getTypeConverter()->convertType(typ.getPointee())); + return mlir::success(); +} - attrs.push_back(rewriter.getNamedAttr(llvmAttrName, typAttr)); - auto newDict = rewriter.getDictionaryAttr(attrs); - opAttrs.push_back(newDict); +mlir::LogicalResult CIRToLLVMInlineAsmOpLowering::matchAndRewrite( + cir::InlineAsmOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Type llResTy; + if (op.getNumResults()) + llResTy = getTypeConverter()->convertType(op.getType(0)); + + auto dialect = op.getAsmFlavor(); + auto llDialect = dialect == cir::AsmFlavor::x86_att + ? mlir::LLVM::AsmDialect::AD_ATT + : mlir::LLVM::AsmDialect::AD_Intel; + + std::vector opAttrs; + auto llvmAttrName = mlir::LLVM::InlineAsmOp::getElementTypeAttrName(); + + // this is for the lowering to LLVM from LLVm dialect. Otherwise, if we + // don't have the result (i.e. void type as a result of operation), the + // element type attribute will be attached to the whole instruction, but not + // to the operand + if (!op.getNumResults()) + opAttrs.push_back(mlir::Attribute()); + + llvm::SmallVector llvmOperands; + llvm::SmallVector cirOperands; + for (size_t i = 0; i < op.getAsmOperands().size(); ++i) { + auto llvmOps = adaptor.getAsmOperands()[i]; + auto cirOps = op.getAsmOperands()[i]; + llvmOperands.insert(llvmOperands.end(), llvmOps.begin(), llvmOps.end()); + cirOperands.insert(cirOperands.end(), cirOps.begin(), cirOps.end()); + } + + // so far we infer the llvm dialect element type attr from + // CIR operand type. + for (std::size_t i = 0; i < op.getOperandAttrs().size(); ++i) { + if (!op.getOperandAttrs()[i]) { + opAttrs.push_back(mlir::Attribute()); + continue; } - rewriter.replaceOpWithNewOp( - op, llResTy, llvmOperands, op.getAsmStringAttr(), - op.getConstraintsAttr(), op.getSideEffectsAttr(), - /*is_align_stack*/ mlir::UnitAttr(), - mlir::LLVM::AsmDialectAttr::get(getContext(), llDialect), - rewriter.getArrayAttr(opAttrs)); + std::vector attrs; + auto typ = cast(cirOperands[i].getType()); + auto typAttr = + mlir::TypeAttr::get(getTypeConverter()->convertType(typ.getPointee())); - return mlir::success(); + attrs.push_back(rewriter.getNamedAttr(llvmAttrName, typAttr)); + auto newDict = rewriter.getDictionaryAttr(attrs); + opAttrs.push_back(newDict); } -}; -class CIRPrefetchLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + rewriter.replaceOpWithNewOp( + op, llResTy, llvmOperands, op.getAsmStringAttr(), op.getConstraintsAttr(), + op.getSideEffectsAttr(), + /*is_align_stack*/ mlir::UnitAttr(), + mlir::LLVM::AsmDialectAttr::get(getContext(), llDialect), + rewriter.getArrayAttr(opAttrs)); - mlir::LogicalResult - matchAndRewrite(cir::PrefetchOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp( - op, adaptor.getAddr(), adaptor.getIsWrite(), adaptor.getLocality(), - /*DataCache*/ 1); - return mlir::success(); - } -}; + return mlir::success(); +} -class CIRSetBitfieldLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMPrefetchOpLowering::matchAndRewrite( + cir::PrefetchOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + rewriter.replaceOpWithNewOp( + op, adaptor.getAddr(), adaptor.getIsWrite(), adaptor.getLocality(), + /*DataCache*/ 1); + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::SetBitfieldOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPoint(op); +mlir::LogicalResult CIRToLLVMSetBitfieldOpLowering::matchAndRewrite( + cir::SetBitfieldOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); - auto info = op.getBitfieldInfo(); - auto size = info.getSize(); - auto offset = info.getOffset(); - auto storageType = info.getStorageType(); - auto context = storageType.getContext(); + auto info = op.getBitfieldInfo(); + auto size = info.getSize(); + auto offset = info.getOffset(); + auto storageType = info.getStorageType(); + auto context = storageType.getContext(); - unsigned storageSize = 0; + unsigned storageSize = 0; - if (auto arTy = mlir::dyn_cast(storageType)) - storageSize = arTy.getSize() * 8; - else if (auto intTy = mlir::dyn_cast(storageType)) - storageSize = intTy.getWidth(); - else - llvm_unreachable( - "Either ArrayType or IntType expected for bitfields storage"); + if (auto arTy = mlir::dyn_cast(storageType)) + storageSize = arTy.getSize() * 8; + else if (auto intTy = mlir::dyn_cast(storageType)) + storageSize = intTy.getWidth(); + else + llvm_unreachable( + "Either ArrayType or IntType expected for bitfields storage"); - auto intType = mlir::IntegerType::get(context, storageSize); - auto srcVal = createIntCast(rewriter, adaptor.getSrc(), intType); - auto srcWidth = storageSize; - auto resultVal = srcVal; + auto intType = mlir::IntegerType::get(context, storageSize); + auto srcVal = createIntCast(rewriter, adaptor.getSrc(), intType); + auto srcWidth = storageSize; + auto resultVal = srcVal; - if (storageSize != size) { - assert(storageSize > size && "Invalid bitfield size."); + if (storageSize != size) { + assert(storageSize > size && "Invalid bitfield size."); - mlir::Value val = rewriter.create( - op.getLoc(), intType, adaptor.getAddr(), /* alignment */ 0, - op.getIsVolatile()); + mlir::Value val = rewriter.create( + op.getLoc(), intType, adaptor.getAddr(), /* alignment */ 0, + op.getIsVolatile()); - srcVal = createAnd(rewriter, srcVal, - llvm::APInt::getLowBitsSet(srcWidth, size)); - resultVal = srcVal; - srcVal = createShL(rewriter, srcVal, offset); + srcVal = + createAnd(rewriter, srcVal, llvm::APInt::getLowBitsSet(srcWidth, size)); + resultVal = srcVal; + srcVal = createShL(rewriter, srcVal, offset); - // Mask out the original value. - val = - createAnd(rewriter, val, + // Mask out the original value. + val = createAnd(rewriter, val, ~llvm::APInt::getBitsSet(srcWidth, offset, offset + size)); - // Or together the unchanged values and the source value. - srcVal = rewriter.create(op.getLoc(), val, srcVal); - } - - rewriter.create(op.getLoc(), srcVal, adaptor.getAddr(), - /* alignment */ 0, op.getIsVolatile()); - - auto resultTy = getTypeConverter()->convertType(op.getType()); - - resultVal = createIntCast(rewriter, resultVal, - mlir::cast(resultTy)); - - if (info.getIsSigned()) { - assert(size <= storageSize); - unsigned highBits = storageSize - size; - - if (highBits) { - resultVal = createShL(rewriter, resultVal, highBits); - resultVal = createAShR(rewriter, resultVal, highBits); - } - } - - rewriter.replaceOp(op, resultVal); - return mlir::success(); + // Or together the unchanged values and the source value. + srcVal = rewriter.create(op.getLoc(), val, srcVal); } -}; - -class CIRGetBitfieldLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - mlir::LogicalResult - matchAndRewrite(cir::GetBitfieldOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.create(op.getLoc(), srcVal, adaptor.getAddr(), + /* alignment */ 0, op.getIsVolatile()); - mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPoint(op); - - auto info = op.getBitfieldInfo(); - auto size = info.getSize(); - auto offset = info.getOffset(); - auto storageType = info.getStorageType(); - auto context = storageType.getContext(); - unsigned storageSize = 0; - - if (auto arTy = mlir::dyn_cast(storageType)) - storageSize = arTy.getSize() * 8; - else if (auto intTy = mlir::dyn_cast(storageType)) - storageSize = intTy.getWidth(); - else - llvm_unreachable( - "Either ArrayType or IntType expected for bitfields storage"); + auto resultTy = getTypeConverter()->convertType(op.getType()); - auto intType = mlir::IntegerType::get(context, storageSize); + resultVal = createIntCast(rewriter, resultVal, + mlir::cast(resultTy)); - mlir::Value val = rewriter.create( - op.getLoc(), intType, adaptor.getAddr(), 0, op.getIsVolatile()); - val = rewriter.create(op.getLoc(), intType, val); - - if (info.getIsSigned()) { - assert(static_cast(offset + size) <= storageSize); - unsigned highBits = storageSize - offset - size; - val = createShL(rewriter, val, highBits); - val = createAShR(rewriter, val, offset + highBits); - } else { - val = createLShR(rewriter, val, offset); + if (info.getIsSigned()) { + assert(size <= storageSize); + unsigned highBits = storageSize - size; - if (static_cast(offset) + size < storageSize) - val = createAnd(rewriter, val, - llvm::APInt::getLowBitsSet(storageSize, size)); + if (highBits) { + resultVal = createShL(rewriter, resultVal, highBits); + resultVal = createAShR(rewriter, resultVal, highBits); } - - auto resTy = getTypeConverter()->convertType(op.getType()); - auto newOp = - createIntCast(rewriter, val, mlir::cast(resTy), - info.getIsSigned()); - rewriter.replaceOp(op, newOp); - return mlir::success(); - } -}; - -class CIRIsConstantOpLowering - : public mlir::OpConversionPattern { - - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::IsConstantOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // FIXME(cir): llvm.intr.is.constant returns i1 value but the LLVM Lowering - // expects that cir.bool type will be lowered as i8 type. - // So we have to insert zext here. - auto isConstantOP = rewriter.create( - op.getLoc(), adaptor.getVal()); - rewriter.replaceOpWithNewOp(op, rewriter.getI8Type(), - isConstantOP); - return mlir::success(); } -}; -class CIRCmpThreeWayOpLowering - : public mlir::OpConversionPattern { -public: - using mlir::OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::CmpThreeWayOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - if (!op.isIntegralComparison() || !op.isStrongOrdering()) { - op.emitError() << "unsupported three-way comparison type"; - return mlir::failure(); - } + rewriter.replaceOp(op, resultVal); + return mlir::success(); +} - auto cmpInfo = op.getInfo(); - assert(cmpInfo.getLt() == -1 && cmpInfo.getEq() == 0 && - cmpInfo.getGt() == 1); +mlir::LogicalResult CIRToLLVMGetBitfieldOpLowering::matchAndRewrite( + cir::GetBitfieldOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + + auto info = op.getBitfieldInfo(); + auto size = info.getSize(); + auto offset = info.getOffset(); + auto storageType = info.getStorageType(); + auto context = storageType.getContext(); + unsigned storageSize = 0; + + if (auto arTy = mlir::dyn_cast(storageType)) + storageSize = arTy.getSize() * 8; + else if (auto intTy = mlir::dyn_cast(storageType)) + storageSize = intTy.getWidth(); + else + llvm_unreachable( + "Either ArrayType or IntType expected for bitfields storage"); + + auto intType = mlir::IntegerType::get(context, storageSize); + + mlir::Value val = rewriter.create( + op.getLoc(), intType, adaptor.getAddr(), 0, op.getIsVolatile()); + val = rewriter.create(op.getLoc(), intType, val); + + if (info.getIsSigned()) { + assert(static_cast(offset + size) <= storageSize); + unsigned highBits = storageSize - offset - size; + val = createShL(rewriter, val, highBits); + val = createAShR(rewriter, val, offset + highBits); + } else { + val = createLShR(rewriter, val, offset); - auto operandTy = mlir::cast(op.getLhs().getType()); - auto resultTy = op.getType(); - auto llvmIntrinsicName = getLLVMIntrinsicName( - operandTy.isSigned(), operandTy.getWidth(), resultTy.getWidth()); + if (static_cast(offset) + size < storageSize) + val = createAnd(rewriter, val, + llvm::APInt::getLowBitsSet(storageSize, size)); + } - rewriter.setInsertionPoint(op); + auto resTy = getTypeConverter()->convertType(op.getType()); + auto newOp = createIntCast( + rewriter, val, mlir::cast(resTy), info.getIsSigned()); + rewriter.replaceOp(op, newOp); + return mlir::success(); +} - auto llvmLhs = adaptor.getLhs(); - auto llvmRhs = adaptor.getRhs(); - auto llvmResultTy = getTypeConverter()->convertType(resultTy); - auto callIntrinsicOp = - createCallLLVMIntrinsicOp(rewriter, op.getLoc(), llvmIntrinsicName, - llvmResultTy, {llvmLhs, llvmRhs}); +mlir::LogicalResult CIRToLLVMIsConstantOpLowering::matchAndRewrite( + cir::IsConstantOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // FIXME(cir): llvm.intr.is.constant returns i1 value but the LLVM Lowering + // expects that cir.bool type will be lowered as i8 type. + // So we have to insert zext here. + auto isConstantOP = + rewriter.create(op.getLoc(), adaptor.getVal()); + rewriter.replaceOpWithNewOp(op, rewriter.getI8Type(), + isConstantOP); + return mlir::success(); +} - rewriter.replaceOp(op, callIntrinsicOp); - return mlir::success(); +mlir::LogicalResult CIRToLLVMCmpThreeWayOpLowering::matchAndRewrite( + cir::CmpThreeWayOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + if (!op.isIntegralComparison() || !op.isStrongOrdering()) { + op.emitError() << "unsupported three-way comparison type"; + return mlir::failure(); } -private: - static std::string getLLVMIntrinsicName(bool signedCmp, unsigned operandWidth, - unsigned resultWidth) { - // The intrinsic's name takes the form: - // `llvm..i.i` + auto cmpInfo = op.getInfo(); + assert(cmpInfo.getLt() == -1 && cmpInfo.getEq() == 0 && cmpInfo.getGt() == 1); - std::string result = "llvm."; + auto operandTy = mlir::cast(op.getLhs().getType()); + auto resultTy = op.getType(); + auto llvmIntrinsicName = getLLVMIntrinsicName( + operandTy.isSigned(), operandTy.getWidth(), resultTy.getWidth()); - if (signedCmp) - result.append("scmp."); - else - result.append("ucmp."); + rewriter.setInsertionPoint(op); - // Result type part. - result.push_back('i'); - result.append(std::to_string(resultWidth)); - result.push_back('.'); + auto llvmLhs = adaptor.getLhs(); + auto llvmRhs = adaptor.getRhs(); + auto llvmResultTy = getTypeConverter()->convertType(resultTy); + auto callIntrinsicOp = + createCallLLVMIntrinsicOp(rewriter, op.getLoc(), llvmIntrinsicName, + llvmResultTy, {llvmLhs, llvmRhs}); - // Operand type part. - result.push_back('i'); - result.append(std::to_string(operandWidth)); + rewriter.replaceOp(op, callIntrinsicOp); + return mlir::success(); +} - return result; - } -}; +std::string CIRToLLVMCmpThreeWayOpLowering::getLLVMIntrinsicName( + bool signedCmp, unsigned operandWidth, unsigned resultWidth) { + // The intrinsic's name takes the form: + // `llvm..i.i` -class CIRReturnAddrOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + std::string result = "llvm."; - mlir::LogicalResult - matchAndRewrite(cir::ReturnAddrOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); - replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm.returnaddress", - llvmPtrTy, adaptor.getOperands()); - return mlir::success(); - } -}; + if (signedCmp) + result.append("scmp."); + else + result.append("ucmp."); -class CIRClearCacheOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::ClearCacheOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto begin = adaptor.getBegin(); - auto end = adaptor.getEnd(); - auto intrinNameAttr = - mlir::StringAttr::get(op.getContext(), "llvm.clear_cache"); - rewriter.replaceOpWithNewOp( - op, mlir::Type{}, intrinNameAttr, mlir::ValueRange{begin, end}); + // Result type part. + result.push_back('i'); + result.append(std::to_string(resultWidth)); + result.push_back('.'); - return mlir::success(); - } -}; + // Operand type part. + result.push_back('i'); + result.append(std::to_string(operandWidth)); -class CIREhTypeIdOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + return result; +} - mlir::LogicalResult - matchAndRewrite(cir::EhTypeIdOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - mlir::Value addrOp = rewriter.create( - op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), - op.getTypeSymAttr()); - mlir::LLVM::CallIntrinsicOp newOp = createCallLLVMIntrinsicOp( - rewriter, op.getLoc(), "llvm.eh.typeid.for.p0", rewriter.getI32Type(), - mlir::ValueRange{addrOp}); - rewriter.replaceOp(op, newOp); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMReturnAddrOpLowering::matchAndRewrite( + cir::ReturnAddrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm.returnaddress", + llvmPtrTy, adaptor.getOperands()); + return mlir::success(); +} -class CIRCatchParamOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::CatchParamOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - if (op.isBegin()) { - // Get or create `declare ptr @__cxa_begin_catch(ptr)` - StringRef fnName = "__cxa_begin_catch"; - auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); - auto fnTy = mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {llvmPtrTy}, - /*isVarArg=*/false); - getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); - rewriter.replaceOpWithNewOp( - op, mlir::TypeRange{llvmPtrTy}, fnName, - mlir::ValueRange{adaptor.getExceptionPtr()}); - return mlir::success(); - } else if (op.isEnd()) { - StringRef fnName = "__cxa_end_catch"; - auto fnTy = mlir::LLVM::LLVMFunctionType::get( - mlir::LLVM::LLVMVoidType::get(rewriter.getContext()), {}, - /*isVarArg=*/false); - getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); - rewriter.create(op.getLoc(), mlir::TypeRange{}, - fnName, mlir::ValueRange{}); - rewriter.eraseOp(op); - return mlir::success(); - } - llvm_unreachable("only begin/end supposed to make to lowering stage"); - return mlir::failure(); - } -}; +mlir::LogicalResult CIRToLLVMClearCacheOpLowering::matchAndRewrite( + cir::ClearCacheOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto begin = adaptor.getBegin(); + auto end = adaptor.getEnd(); + auto intrinNameAttr = + mlir::StringAttr::get(op.getContext(), "llvm.clear_cache"); + rewriter.replaceOpWithNewOp( + op, mlir::Type{}, intrinNameAttr, mlir::ValueRange{begin, end}); -class CIRResumeOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::ResumeOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // %lpad.val = insertvalue { ptr, i32 } poison, ptr %exception_ptr, 0 - // %lpad.val2 = insertvalue { ptr, i32 } %lpad.val, i32 %selector, 1 - // resume { ptr, i32 } %lpad.val2 - SmallVector slotIdx = {0}; - SmallVector selectorIdx = {1}; - auto llvmLandingPadStructTy = getLLVMLandingPadStructTy(rewriter); - mlir::Value poison = rewriter.create( - op.getLoc(), llvmLandingPadStructTy); - - mlir::Value slot = rewriter.create( - op.getLoc(), poison, adaptor.getExceptionPtr(), slotIdx); - mlir::Value selector = rewriter.create( - op.getLoc(), slot, adaptor.getTypeId(), selectorIdx); - - rewriter.replaceOpWithNewOp(op, selector); - return mlir::success(); - } -}; + return mlir::success(); +} -class CIRAllocExceptionOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMEhTypeIdOpLowering::matchAndRewrite( + cir::EhTypeIdOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Value addrOp = rewriter.create( + op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), + op.getTypeSymAttr()); + mlir::LLVM::CallIntrinsicOp newOp = createCallLLVMIntrinsicOp( + rewriter, op.getLoc(), "llvm.eh.typeid.for.p0", rewriter.getI32Type(), + mlir::ValueRange{addrOp}); + rewriter.replaceOp(op, newOp); + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::AllocExceptionOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // Get or create `declare ptr @__cxa_allocate_exception(i64)` - StringRef fnName = "__cxa_allocate_exception"; +mlir::LogicalResult CIRToLLVMCatchParamOpLowering::matchAndRewrite( + cir::CatchParamOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + if (op.isBegin()) { + // Get or create `declare ptr @__cxa_begin_catch(ptr)` + StringRef fnName = "__cxa_begin_catch"; auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); - auto int64Ty = mlir::IntegerType::get(rewriter.getContext(), 64); - auto fnTy = mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {int64Ty}, + auto fnTy = mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {llvmPtrTy}, /*isVarArg=*/false); getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); - auto size = rewriter.create(op.getLoc(), - adaptor.getSizeAttr()); rewriter.replaceOpWithNewOp( - op, mlir::TypeRange{llvmPtrTy}, fnName, mlir::ValueRange{size}); + op, mlir::TypeRange{llvmPtrTy}, fnName, + mlir::ValueRange{adaptor.getExceptionPtr()}); return mlir::success(); - } -}; - -class CIRFreeExceptionOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::FreeExceptionOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // Get or create `declare void @__cxa_free_exception(ptr)` - StringRef fnName = "__cxa_free_exception"; - auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); - auto voidTy = mlir::LLVM::LLVMVoidType::get(rewriter.getContext()); - auto fnTy = mlir::LLVM::LLVMFunctionType::get(voidTy, {llvmPtrTy}, - /*isVarArg=*/false); + } else if (op.isEnd()) { + StringRef fnName = "__cxa_end_catch"; + auto fnTy = mlir::LLVM::LLVMFunctionType::get( + mlir::LLVM::LLVMVoidType::get(rewriter.getContext()), {}, + /*isVarArg=*/false); getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); - rewriter.replaceOpWithNewOp( - op, mlir::TypeRange{}, fnName, mlir::ValueRange{adaptor.getPtr()}); + rewriter.create(op.getLoc(), mlir::TypeRange{}, fnName, + mlir::ValueRange{}); + rewriter.eraseOp(op); return mlir::success(); } -}; + llvm_unreachable("only begin/end supposed to make to lowering stage"); + return mlir::failure(); +} -class CIRThrowOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; +mlir::LogicalResult CIRToLLVMResumeOpLowering::matchAndRewrite( + cir::ResumeOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // %lpad.val = insertvalue { ptr, i32 } poison, ptr %exception_ptr, 0 + // %lpad.val2 = insertvalue { ptr, i32 } %lpad.val, i32 %selector, 1 + // resume { ptr, i32 } %lpad.val2 + SmallVector slotIdx = {0}; + SmallVector selectorIdx = {1}; + auto llvmLandingPadStructTy = getLLVMLandingPadStructTy(rewriter); + mlir::Value poison = rewriter.create( + op.getLoc(), llvmLandingPadStructTy); + + mlir::Value slot = rewriter.create( + op.getLoc(), poison, adaptor.getExceptionPtr(), slotIdx); + mlir::Value selector = rewriter.create( + op.getLoc(), slot, adaptor.getTypeId(), selectorIdx); + + rewriter.replaceOpWithNewOp(op, selector); + return mlir::success(); +} - mlir::LogicalResult - matchAndRewrite(cir::ThrowOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // Get or create `declare void @__cxa_throw(ptr, ptr, ptr)` - StringRef fnName = "__cxa_throw"; - auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); - auto voidTy = mlir::LLVM::LLVMVoidType::get(rewriter.getContext()); - auto fnTy = mlir::LLVM::LLVMFunctionType::get( - voidTy, {llvmPtrTy, llvmPtrTy, llvmPtrTy}, - /*isVarArg=*/false); - getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); - mlir::Value typeInfo = rewriter.create( - op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), - adaptor.getTypeInfoAttr()); +mlir::LogicalResult CIRToLLVMAllocExceptionOpLowering::matchAndRewrite( + cir::AllocExceptionOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // Get or create `declare ptr @__cxa_allocate_exception(i64)` + StringRef fnName = "__cxa_allocate_exception"; + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + auto int64Ty = mlir::IntegerType::get(rewriter.getContext(), 64); + auto fnTy = mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {int64Ty}, + /*isVarArg=*/false); + getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); + auto size = rewriter.create(op.getLoc(), + adaptor.getSizeAttr()); + rewriter.replaceOpWithNewOp( + op, mlir::TypeRange{llvmPtrTy}, fnName, mlir::ValueRange{size}); + return mlir::success(); +} - mlir::Value dtor; - if (op.getDtor()) { - dtor = rewriter.create( - op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), - adaptor.getDtorAttr()); - } else { - dtor = rewriter.create( - op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext())); - } - rewriter.replaceOpWithNewOp( - op, mlir::TypeRange{}, fnName, - mlir::ValueRange{adaptor.getExceptionPtr(), typeInfo, dtor}); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMFreeExceptionOpLowering::matchAndRewrite( + cir::FreeExceptionOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // Get or create `declare void @__cxa_free_exception(ptr)` + StringRef fnName = "__cxa_free_exception"; + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + auto voidTy = mlir::LLVM::LLVMVoidType::get(rewriter.getContext()); + auto fnTy = mlir::LLVM::LLVMFunctionType::get(voidTy, {llvmPtrTy}, + /*isVarArg=*/false); + getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); + rewriter.replaceOpWithNewOp( + op, mlir::TypeRange{}, fnName, mlir::ValueRange{adaptor.getPtr()}); + return mlir::success(); +} -class CIRIsFPClassOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::IsFPClassOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto src = adaptor.getSrc(); - auto flags = adaptor.getFlags(); - auto retTy = rewriter.getI1Type(); - - auto loc = op->getLoc(); - - auto intrinsic = - rewriter.create(loc, retTy, src, flags); - // FIMXE: CIR now will convert cir::BoolType to i8 type unconditionally. - // Remove this conversion after we fix - // https://github.com/llvm/clangir/issues/480 - auto converted = rewriter.create( - loc, rewriter.getI8Type(), intrinsic->getResult(0)); - - rewriter.replaceOp(op, converted); - return mlir::success(); +mlir::LogicalResult CIRToLLVMThrowOpLowering::matchAndRewrite( + cir::ThrowOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // Get or create `declare void @__cxa_throw(ptr, ptr, ptr)` + StringRef fnName = "__cxa_throw"; + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + auto voidTy = mlir::LLVM::LLVMVoidType::get(rewriter.getContext()); + auto fnTy = mlir::LLVM::LLVMFunctionType::get( + voidTy, {llvmPtrTy, llvmPtrTy, llvmPtrTy}, + /*isVarArg=*/false); + getOrCreateLLVMFuncOp(rewriter, op, fnName, fnTy); + mlir::Value typeInfo = rewriter.create( + op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), + adaptor.getTypeInfoAttr()); + + mlir::Value dtor; + if (op.getDtor()) { + dtor = rewriter.create( + op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), + adaptor.getDtorAttr()); + } else { + dtor = rewriter.create( + op.getLoc(), mlir::LLVM::LLVMPointerType::get(rewriter.getContext())); } -}; + rewriter.replaceOpWithNewOp( + op, mlir::TypeRange{}, fnName, + mlir::ValueRange{adaptor.getExceptionPtr(), typeInfo, dtor}); + return mlir::success(); +} -class CIRPtrMaskOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::PtrMaskOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - // FIXME: We'd better to lower to mlir::LLVM::PtrMaskOp if it exists. - // So we have to make it manually here by following: - // https://llvm.org/docs/LangRef.html#llvm-ptrmask-intrinsic - auto loc = op.getLoc(); - auto mask = op.getMask(); - - auto moduleOp = op->getParentOfType(); - mlir::DataLayout layout(moduleOp); - auto iPtrIdxValue = layout.getTypeSizeInBits(mask.getType()); - auto iPtrIdx = mlir::IntegerType::get(moduleOp->getContext(), iPtrIdxValue); - - auto intPtr = rewriter.create( - loc, iPtrIdx, adaptor.getPtr()); // this may truncate - mlir::Value masked = - rewriter.create(loc, intPtr, adaptor.getMask()); - mlir::Value diff = rewriter.create(loc, intPtr, masked); - rewriter.replaceOpWithNewOp( - op, getTypeConverter()->convertType(op.getType()), - mlir::IntegerType::get(moduleOp->getContext(), 8), adaptor.getPtr(), - diff); - return mlir::success(); - } -}; +mlir::LogicalResult CIRToLLVMIsFPClassOpLowering::matchAndRewrite( + cir::IsFPClassOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto src = adaptor.getSrc(); + auto flags = adaptor.getFlags(); + auto retTy = rewriter.getI1Type(); -class CIRAbsOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(cir::AbsOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - auto resTy = this->getTypeConverter()->convertType(op.getType()); - auto absOp = rewriter.create( - op.getLoc(), resTy, adaptor.getOperands()[0], adaptor.getPoison()); - rewriter.replaceOp(op, absOp); - return mlir::success(); - } -}; -class CIRSignBitOpLowering : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; + auto loc = op->getLoc(); - mlir::LogicalResult - matchAndRewrite(cir::SignBitOp op, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - assert(!::cir::MissingFeatures::isPPC_FP128Ty()); + auto intrinsic = + rewriter.create(loc, retTy, src, flags); + // FIMXE: CIR now will convert cir::BoolType to i8 type unconditionally. + // Remove this conversion after we fix + // https://github.com/llvm/clangir/issues/480 + auto converted = rewriter.create( + loc, rewriter.getI8Type(), intrinsic->getResult(0)); - mlir::DataLayout layout(op->getParentOfType()); - int width = layout.getTypeSizeInBits(op.getInput().getType()); - if (auto longDoubleType = - mlir::dyn_cast(op.getInput().getType())) { - if (mlir::isa(longDoubleType.getUnderlying())) { - // If the underlying type of LongDouble is FP80Type, - // DataLayout::getTypeSizeInBits returns 128. - // See https://github.com/llvm/clangir/issues/1057. - // Set the width to 80 manually. - width = 80; - } + rewriter.replaceOp(op, converted); + return mlir::success(); +} + +mlir::LogicalResult CIRToLLVMAbsOpLowering::matchAndRewrite( + cir::AbsOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto resTy = this->getTypeConverter()->convertType(op.getType()); + auto absOp = rewriter.create( + op.getLoc(), resTy, adaptor.getOperands()[0], adaptor.getPoison()); + rewriter.replaceOp(op, absOp); + return mlir::success(); +} + +mlir::LogicalResult CIRToLLVMPtrMaskOpLowering::matchAndRewrite( + cir::PtrMaskOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // FIXME: We'd better to lower to mlir::LLVM::PtrMaskOp if it exists. + // So we have to make it manually here by following: + // https://llvm.org/docs/LangRef.html#llvm-ptrmask-intrinsic + auto loc = op.getLoc(); + auto mask = op.getMask(); + + auto moduleOp = op->getParentOfType(); + mlir::DataLayout layout(moduleOp); + auto iPtrIdxValue = layout.getTypeSizeInBits(mask.getType()); + auto iPtrIdx = mlir::IntegerType::get(moduleOp->getContext(), iPtrIdxValue); + + auto intPtr = rewriter.create( + loc, iPtrIdx, adaptor.getPtr()); // this may truncate + mlir::Value masked = + rewriter.create(loc, intPtr, adaptor.getMask()); + mlir::Value diff = rewriter.create(loc, intPtr, masked); + rewriter.replaceOpWithNewOp( + op, getTypeConverter()->convertType(op.getType()), + mlir::IntegerType::get(moduleOp->getContext(), 8), adaptor.getPtr(), + diff); + return mlir::success(); +} + +mlir::LogicalResult CIRToLLVMSignBitOpLowering::matchAndRewrite( + cir::SignBitOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + assert(!::cir::MissingFeatures::isPPC_FP128Ty()); + + mlir::DataLayout layout(op->getParentOfType()); + int width = layout.getTypeSizeInBits(op.getInput().getType()); + if (auto longDoubleType = + mlir::dyn_cast(op.getInput().getType())) { + if (mlir::isa(longDoubleType.getUnderlying())) { + // If the underlying type of LongDouble is FP80Type, + // DataLayout::getTypeSizeInBits returns 128. + // See https://github.com/llvm/clangir/issues/1057. + // Set the width to 80 manually. + width = 80; } - auto intTy = mlir::IntegerType::get(rewriter.getContext(), width); - auto bitcast = rewriter.create(op->getLoc(), intTy, - adaptor.getInput()); - auto zero = rewriter.create(op->getLoc(), intTy, 0); - auto cmpResult = rewriter.create( - op.getLoc(), mlir::LLVM::ICmpPredicate::slt, bitcast.getResult(), zero); - auto converted = rewriter.create( - op.getLoc(), mlir::IntegerType::get(rewriter.getContext(), 32), - cmpResult); - rewriter.replaceOp(op, converted); - return mlir::success(); } -}; + auto intTy = mlir::IntegerType::get(rewriter.getContext(), width); + auto bitcast = rewriter.create(op->getLoc(), intTy, + adaptor.getInput()); + auto zero = rewriter.create(op->getLoc(), intTy, 0); + auto cmpResult = rewriter.create( + op.getLoc(), mlir::LLVM::ICmpPredicate::slt, bitcast.getResult(), zero); + auto converted = rewriter.create( + op.getLoc(), mlir::IntegerType::get(rewriter.getContext(), 32), + cmpResult); + rewriter.replaceOp(op, converted); + return mlir::success(); +} void populateCIRToLLVMConversionPatterns( mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter, @@ -4394,57 +3842,62 @@ void populateCIRToLLVMConversionPatterns( llvm::StringMap &stringGlobalsMap, llvm::StringMap &argStringGlobalsMap, llvm::MapVector &argsVarMap) { - patterns.add(patterns.getContext()); - patterns.add(converter, dataLayout, stringGlobalsMap, - argStringGlobalsMap, argsVarMap, - patterns.getContext()); + patterns.add(patterns.getContext()); + patterns.add(converter, dataLayout, + stringGlobalsMap, argStringGlobalsMap, + argsVarMap, patterns.getContext()); patterns.add< - CIRCmpOpLowering, CIRSelectOpLowering, CIRBitClrsbOpLowering, - CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, - CIRBitParityOpLowering, CIRBitPopcountOpLowering, - CIRAtomicCmpXchgLowering, CIRAtomicXchgLowering, CIRAtomicFetchLowering, - CIRByteswapOpLowering, CIRRotateOpLowering, CIRBrCondOpLowering, - CIRPtrStrideOpLowering, CIRCallLowering, CIRTryCallLowering, - CIREhInflightOpLowering, CIRUnaryOpLowering, CIRBinOpLowering, - CIRBinOpOverflowOpLowering, CIRShiftOpLowering, CIRLoadLowering, - CIRConstantLowering, CIRStoreLowering, CIRFuncLowering, CIRCastOpLowering, - CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRComplexCreateOpLowering, - CIRComplexRealOpLowering, CIRComplexImagOpLowering, - CIRComplexRealPtrOpLowering, CIRComplexImagPtrOpLowering, - CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, - CIRBrOpLowering, CIRGetMemberOpLowering, CIRGetRuntimeMemberOpLowering, - CIRSwitchFlatOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, - CIRMemCpyOpLowering, CIRMemChrOpLowering, CIRMemCpyInlineOpLowering, - CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, - CIRVectorCreateLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, - CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, - CIRVectorShuffleVecLowering, CIRStackSaveLowering, CIRUnreachableLowering, - CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, - CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, - CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, CIRMemCpyOpLowering, - CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, - CIRVectorCreateLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, - CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, - CIRVectorShuffleVecLowering, CIRStackSaveLowering, CIRUnreachableLowering, - CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, - CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, - CIRIsConstantOpLowering, CIRCmpThreeWayOpLowering, - CIRReturnAddrOpLowering, CIRClearCacheOpLowering, CIREhTypeIdOpLowering, - CIRCatchParamOpLowering, CIRResumeOpLowering, CIRAllocExceptionOpLowering, - CIRFreeExceptionOpLowering, CIRThrowOpLowering, CIRIntrinsicCallLowering, - CIRAssumeLowering, CIRAssumeAlignedLowering, CIRAssumeSepStorageLowering, - CIRBaseClassAddrOpLowering, CIRDerivedClassAddrOpLowering, - CIRVTTAddrPointOpLowering, CIRIsFPClassOpLowering, CIRAbsOpLowering, - CIRMemMoveOpLowering, CIRMemsetOpLowering, CIRSignBitOpLowering, - CIRPtrMaskOpLowering + CIRToLLVMCmpOpLowering, CIRToLLVMSelectOpLowering, + CIRToLLVMBitClrsbOpLowering, CIRToLLVMBitClzOpLowering, + CIRToLLVMBitCtzOpLowering, CIRToLLVMBitFfsOpLowering, + CIRToLLVMBitParityOpLowering, CIRToLLVMBitPopcountOpLowering, + CIRToLLVMAtomicCmpXchgLowering, CIRToLLVMAtomicXchgLowering, + CIRToLLVMAtomicFetchLowering, CIRToLLVMByteswapOpLowering, + CIRToLLVMRotateOpLowering, CIRToLLVMBrCondOpLowering, + CIRToLLVMPtrStrideOpLowering, CIRToLLVMCallOpLowering, + CIRToLLVMTryCallOpLowering, CIRToLLVMEhInflightOpLowering, + CIRToLLVMUnaryOpLowering, CIRToLLVMBinOpLowering, + CIRToLLVMBinOpOverflowOpLowering, CIRToLLVMShiftOpLowering, + CIRToLLVMLoadOpLowering, CIRToLLVMConstantOpLowering, + CIRToLLVMStoreOpLowering, CIRToLLVMFuncOpLowering, + CIRToLLVMCastOpLowering, CIRToLLVMGlobalOpLowering, + CIRToLLVMGetGlobalOpLowering, CIRToLLVMComplexCreateOpLowering, + CIRToLLVMComplexRealOpLowering, CIRToLLVMComplexImagOpLowering, + CIRToLLVMComplexRealPtrOpLowering, CIRToLLVMComplexImagPtrOpLowering, + CIRToLLVMVAStartOpLowering, CIRToLLVMVAEndOpLowering, + CIRToLLVMVACopyOpLowering, CIRToLLVMVAArgOpLowering, + CIRToLLVMBrOpLowering, CIRToLLVMGetMemberOpLowering, + CIRToLLVMGetRuntimeMemberOpLowering, CIRToLLVMSwitchFlatOpLowering, + CIRToLLVMPtrDiffOpLowering, CIRToLLVMCopyOpLowering, + CIRToLLVMMemCpyOpLowering, CIRToLLVMMemChrOpLowering, + CIRToLLVMAbsOpLowering, CIRToLLVMExpectOpLowering, + CIRToLLVMVTableAddrPointOpLowering, CIRToLLVMVecCreateOpLowering, + CIRToLLVMVecCmpOpLowering, CIRToLLVMVecSplatOpLowering, + CIRToLLVMVecTernaryOpLowering, CIRToLLVMVecShuffleDynamicOpLowering, + CIRToLLVMVecShuffleOpLowering, CIRToLLVMStackSaveOpLowering, + CIRToLLVMUnreachableOpLowering, CIRToLLVMTrapOpLowering, + CIRToLLVMInlineAsmOpLowering, CIRToLLVMSetBitfieldOpLowering, + CIRToLLVMGetBitfieldOpLowering, CIRToLLVMPrefetchOpLowering, + CIRToLLVMObjSizeOpLowering, CIRToLLVMIsConstantOpLowering, + CIRToLLVMCmpThreeWayOpLowering, CIRToLLVMMemCpyOpLowering, + CIRToLLVMIsConstantOpLowering, CIRToLLVMCmpThreeWayOpLowering, + CIRToLLVMReturnAddrOpLowering, CIRToLLVMClearCacheOpLowering, + CIRToLLVMEhTypeIdOpLowering, CIRToLLVMCatchParamOpLowering, + CIRToLLVMResumeOpLowering, CIRToLLVMAllocExceptionOpLowering, + CIRToLLVMFreeExceptionOpLowering, CIRToLLVMThrowOpLowering, + CIRToLLVMLLVMIntrinsicCallOpLowering, CIRToLLVMAssumeOpLowering, + CIRToLLVMAssumeAlignedOpLowering, CIRToLLVMAssumeSepStorageOpLowering, + CIRToLLVMBaseClassAddrOpLowering, CIRToLLVMDerivedClassAddrOpLowering, + CIRToLLVMVTTAddrPointOpLowering, CIRToLLVMIsFPClassOpLowering, + CIRToLLVMAbsOpLowering, CIRToLLVMMemMoveOpLowering, + CIRToLLVMMemSetOpLowering, CIRToLLVMMemCpyInlineOpLowering, + CIRToLLVMSignBitOpLowering, CIRToLLVMPtrMaskOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST >(converter, patterns.getContext()); } -namespace { - std::unique_ptr prepareLowerModule(mlir::ModuleOp module) { mlir::PatternRewriter rewriter{module->getContext()}; // If the triple is not present, e.g. CIR modules parsed from text, we @@ -4577,9 +4030,8 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, return mlir::LLVM::LLVMVoidType::get(type.getContext()); }); } -} // namespace -static void buildCtorDtorList( +void buildCtorDtorList( mlir::ModuleOp module, StringRef globalXtorName, StringRef llvmXtorName, llvm::function_ref(mlir::Attribute)> createXtor) { llvm::SmallVector, 2> globalXtors; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h new file mode 100644 index 000000000000..d1488ec8f6f5 --- /dev/null +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -0,0 +1,1024 @@ +//====- LowerToLLVM.h - Lowering from CIR to LLVMIR -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "LowerModule.h" + +#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" +#include "mlir/Conversion/LLVMCommon/TypeConverter.h" +#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/LLVMIR/LLVMTypes.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/Transforms/DialectConversion.h" + +namespace cir { +namespace direct { +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter); + +mlir::LLVM::Linkage convertLinkage(cir::GlobalLinkageKind linkage); + +mlir::LLVM::CConv convertCallingConv(cir::CallingConv callinvConv); + +void buildCtorDtorList( + mlir::ModuleOp module, mlir::StringRef globalXtorName, + mlir::StringRef llvmXtorName, + llvm::function_ref(mlir::Attribute)> + createXtor); + +void populateCIRToLLVMConversionPatterns( + mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter, + mlir::DataLayout &dataLayout, + llvm::StringMap &stringGlobalsMap, + llvm::StringMap &argStringGlobalsMap, + llvm::MapVector &argsVarMap); + +std::unique_ptr prepareLowerModule(mlir::ModuleOp module); + +void prepareTypeConverter(mlir::LLVMTypeConverter &converter, + mlir::DataLayout &dataLayout, + cir::LowerModule *lowerModule); + +mlir::LLVM::AtomicOrdering +getLLVMMemOrder(std::optional &memorder); + +mlir::LLVM::AtomicOrdering getLLVMAtomicOrder(cir::MemOrder memo); + +mlir::LLVM::CallIntrinsicOp +createCallLLVMIntrinsicOp(mlir::ConversionPatternRewriter &rewriter, + mlir::Location loc, const llvm::Twine &intrinsicName, + mlir::Type resultTy, mlir::ValueRange operands); + +mlir::LLVM::CallIntrinsicOp replaceOpWithCallLLVMIntrinsicOp( + mlir::ConversionPatternRewriter &rewriter, mlir::Operation *op, + const llvm::Twine &intrinsicName, mlir::Type resultTy, + mlir::ValueRange operands); + +mlir::Value createLLVMBitOp(mlir::Location loc, + const llvm::Twine &llvmIntrinBaseName, + mlir::Type resultTy, mlir::Value operand, + std::optional poisonZeroInputFlag, + mlir::ConversionPatternRewriter &rewriter); + +class CIRToLLVMCopyOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::CopyOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMMemCpyOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::MemCpyOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMMemChrOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::MemChrOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMMemMoveOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::MemMoveOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMMemCpyInlineOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::MemCpyInlineOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override; +}; + +class CIRToLLVMMemSetOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::MemSetOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMPtrStrideOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::PtrStrideOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBaseClassAddrOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BaseClassAddrOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMDerivedClassAddrOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::DerivedClassAddrOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVTTAddrPointOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VTTAddrPointOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBrCondOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BrCondOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMCastOpLowering : public mlir::OpConversionPattern { + mlir::Type convertTy(mlir::Type ty) const; + +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::CastOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMReturnOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ReturnOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMCallOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::CallOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMTryCallOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::TryCallOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMEhInflightOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::EhInflightOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMAllocaOpLowering + : public mlir::OpConversionPattern { + mlir::DataLayout const &dataLayout; + // Track globals created for annotation related strings + llvm::StringMap &stringGlobalsMap; + // Track globals created for annotation arg related strings. + // They are different from annotation strings, as strings used in args + // are not in llvmMetadataSectionName, and also has aligment 1. + llvm::StringMap &argStringGlobalsMap; + // Track globals created for annotation args. + llvm::MapVector &argsVarMap; + +public: + CIRToLLVMAllocaOpLowering( + mlir::TypeConverter const &typeConverter, + mlir::DataLayout const &dataLayout, + llvm::StringMap &stringGlobalsMap, + llvm::StringMap &argStringGlobalsMap, + llvm::MapVector &argsVarMap, + mlir::MLIRContext *context) + : OpConversionPattern(typeConverter, context), + dataLayout(dataLayout), stringGlobalsMap(stringGlobalsMap), + argStringGlobalsMap(argStringGlobalsMap), argsVarMap(argsVarMap) {} + + using mlir::OpConversionPattern::OpConversionPattern; + + void buildAllocaAnnotations(mlir::LLVM::AllocaOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::ArrayAttr annotationValuesArray) const; + + mlir::LogicalResult + matchAndRewrite(cir::AllocaOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMLoadOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::LoadOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMStoreOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::StoreOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMConstantOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ConstantOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVecCreateOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VecCreateOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVecCmpOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VecCmpOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVecSplatOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VecSplatOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVecTernaryOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VecTernaryOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVecShuffleOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VecShuffleOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVecShuffleDynamicOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern< + cir::VecShuffleDynamicOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VecShuffleDynamicOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVAStartOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VAStartOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVAEndOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VAEndOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVACopyOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VACopyOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVAArgOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VAArgOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMFuncOpLowering : public mlir::OpConversionPattern { + static mlir::StringRef getLinkageAttrNameString(); + + void lowerFuncAttributes( + cir::FuncOp func, bool filterArgAndResAttrs, + mlir::SmallVectorImpl &result) const; + + void + lowerFuncOpenCLKernelMetadata(mlir::NamedAttribute &extraAttrsEntry) const; + +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::FuncOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMGetGlobalOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::GetGlobalOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMComplexCreateOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ComplexCreateOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMComplexRealOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ComplexRealOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMComplexImagOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ComplexImagOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMComplexRealPtrOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ComplexRealPtrOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMComplexImagPtrOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ComplexImagPtrOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMSwitchFlatOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::SwitchFlatOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMGlobalOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::GlobalOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; + +private: + void setupRegionInitializedLLVMGlobalOp( + cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const; + + mutable mlir::LLVM::ComdatOp comdatOp = nullptr; + static void addComdat(mlir::LLVM::GlobalOp &op, + mlir::LLVM::ComdatOp &comdatOp, + mlir::OpBuilder &builder, mlir::ModuleOp &module); +}; + +class CIRToLLVMUnaryOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::UnaryOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBinOpLowering : public mlir::OpConversionPattern { + mlir::LLVM::IntegerOverflowFlags getIntOverflowFlag(cir::BinOp op) const; + +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BinOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBinOpOverflowOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BinOpOverflowOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; + +private: + static std::string getLLVMIntrinName(cir::BinOpOverflowKind opKind, + bool isSigned, unsigned width); + + struct EncompassedTypeInfo { + bool sign; + unsigned width; + }; + + static EncompassedTypeInfo computeEncompassedTypeWidth(cir::IntType operandTy, + cir::IntType resultTy); +}; + +class CIRToLLVMShiftOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ShiftOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMCmpOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::CmpOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMLLVMIntrinsicCallOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern< + cir::LLVMIntrinsicCallOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::LLVMIntrinsicCallOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMAssumeOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AssumeOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMAssumeAlignedOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AssumeAlignedOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMAssumeSepStorageOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AssumeSepStorageOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBitClrsbOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BitClrsbOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMObjSizeOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ObjSizeOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBitClzOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BitClzOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBitCtzOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BitCtzOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBitFfsOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BitFfsOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBitParityOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BitParityOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBitPopcountOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BitPopcountOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMAtomicCmpXchgLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AtomicCmpXchg op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMAtomicXchgLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AtomicXchg op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMAtomicFetchLowering + : public mlir::OpConversionPattern { + mlir::Value buildPostOp(cir::AtomicFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::Value rmwVal, bool isInt) const; + + mlir::Value buildMinMaxPostOp(cir::AtomicFetch op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter, + mlir::Value rmwVal, bool isSigned) const; + + llvm::StringLiteral getLLVMBinop(cir::AtomicFetchKind k, bool isInt) const; + + mlir::LLVM::AtomicBinOp getLLVMAtomicBinOp(cir::AtomicFetchKind k, bool isInt, + bool isSignedInt) const; + +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AtomicFetch op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMByteswapOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ByteswapOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMRotateOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::RotateOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMSelectOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::SelectOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMBrOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::BrOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMGetMemberOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::GetMemberOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMGetRuntimeMemberOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::GetRuntimeMemberOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMPtrDiffOpLowering + : public mlir::OpConversionPattern { + uint64_t getTypeSize(mlir::Type type, mlir::Operation &op) const; + +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::PtrDiffOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMExpectOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ExpectOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMVTableAddrPointOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::VTableAddrPointOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMStackSaveOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::StackSaveOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMUnreachableOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::UnreachableOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMTrapOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::TrapOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMInlineAsmOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::InlineAsmOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMPrefetchOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::PrefetchOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMSetBitfieldOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::SetBitfieldOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMGetBitfieldOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::GetBitfieldOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMIsConstantOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::IsConstantOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMCmpThreeWayOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::CmpThreeWayOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; + +private: + static std::string getLLVMIntrinsicName(bool signedCmp, unsigned operandWidth, + unsigned resultWidth); +}; + +class CIRToLLVMReturnAddrOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ReturnAddrOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMClearCacheOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ClearCacheOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMEhTypeIdOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::EhTypeIdOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMCatchParamOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::CatchParamOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMResumeOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ResumeOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMAllocExceptionOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AllocExceptionOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMFreeExceptionOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::FreeExceptionOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMThrowOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ThrowOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMIsFPClassOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::IsFPClassOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMPtrMaskOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::PtrMaskOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMAbsOpLowering : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AbsOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMSignBitOpLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::SignBitOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override; +}; + +#define GET_BUILTIN_LOWERING_CLASSES_DECLARE +#include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" +#undef GET_BUILTIN_LOWERING_CLASSES_DECLARE + +} // namespace direct +} // namespace cir diff --git a/clang/utils/TableGen/CIRLoweringEmitter.cpp b/clang/utils/TableGen/CIRLoweringEmitter.cpp index 84b5ceea998e..9b71e9ab597d 100644 --- a/clang/utils/TableGen/CIRLoweringEmitter.cpp +++ b/clang/utils/TableGen/CIRLoweringEmitter.cpp @@ -12,6 +12,7 @@ using namespace llvm; namespace { +std::string ClassDeclaration; std::string ClassDefinitions; std::string ClassList; @@ -19,7 +20,8 @@ void GenerateLowering(const Record *Operation) { using namespace std::string_literals; std::string Name = Operation->getName().str(); std::string LLVMOp = Operation->getValueAsString("llvmOp").str(); - ClassDefinitions += + + ClassDeclaration += "class CIR" + Name + "Lowering : public mlir::OpConversionPattern { @@ -32,15 +34,24 @@ void GenerateLowering(const Record *Operation) { Name + " op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) " "const " - "override {"; + "override;" + + R"C++( +}; +)C++"; + + ClassDefinitions += + R"C++(mlir::LogicalResult +CIR)C++" + + Name + "Lowering::matchAndRewrite(cir::" + Name + + R"C++( op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const {)C++"; auto ResultCount = Operation->getValueAsDag("results")->getNumArgs(); if (ResultCount > 0) ClassDefinitions += R"C++( - auto resTy = this->getTypeConverter()->convertType(op.getType());)C++"; + auto resTy = this->getTypeConverter()->convertType(op.getType());)C++"; ClassDefinitions += R"C++( - rewriter.replaceOpWithNewOp(op"; if (ResultCount > 0) @@ -51,9 +62,8 @@ void GenerateLowering(const Record *Operation) { ClassDefinitions += ", adaptor.getOperands()[" + std::to_string(i) + ']'; ClassDefinitions += R"C++(); - return mlir::success(); - } -}; + return mlir::success(); +} )C++"; ClassList += ", CIR" + Name + "Lowering\n"; @@ -69,8 +79,9 @@ void clang::EmitCIRBuiltinsLowering(const RecordKeeper &Records, GenerateLowering(Builtin); } - OS << "#ifdef GET_BUILTIN_LOWERING_CLASSES\n" - << ClassDefinitions << "\n#undef GET_BUILTIN_LOWERING_CLASSES\n#endif\n"; - OS << "#ifdef GET_BUILTIN_LOWERING_LIST\n" - << ClassList << "\n#undef GET_BUILTIN_LOWERING_LIST\n#endif\n"; + OS << "#ifdef GET_BUILTIN_LOWERING_CLASSES_DECLARE\n" + << ClassDeclaration << "\n#endif\n"; + OS << "#ifdef GET_BUILTIN_LOWERING_CLASSES_DEF\n" + << ClassDefinitions << "\n#endif\n"; + OS << "#ifdef GET_BUILTIN_LOWERING_LIST\n" << ClassList << "\n#endif\n"; } From b1569838c185c1dbbda016048c478206f68216e8 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 15 Nov 2024 00:33:59 -0500 Subject: [PATCH 2086/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_splat_lane, neon_splat_laneq, neon_splatq_lane and neon_splatq_laneq (#1126) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 21 + clang/test/CIR/CodeGen/AArch64/neon-misc.c | 609 ++++++++++++++++++ 2 files changed, 630 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index fa17eb22416b..fcb5119b7fff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2186,6 +2186,14 @@ static int64_t getIntValueFromConstOp(mlir::Value val) { .getSExtValue(); } +static mlir::Value emitNeonSplat(CIRGenBuilderTy &builder, mlir::Location loc, + mlir::Value splatVec, mlir::Value splatLane, + unsigned int splatCnt) { + int64_t splatValInt = getIntValueFromConstOp(splatLane); + llvm::SmallVector splatMask(splatCnt, splatValInt); + return builder.createVecShuffle(loc, splatVec, splatMask); +} + /// Build a constant shift amount vector of `vecTy` to shift a vector /// Here `shitfVal` is a constant integer that will be splated into a /// a const vector of `vecTy` which is the return of this function @@ -2339,6 +2347,19 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( switch (builtinID) { default: break; + case NEON::BI__builtin_neon_splat_lane_v: + case NEON::BI__builtin_neon_splat_laneq_v: + case NEON::BI__builtin_neon_splatq_lane_v: + case NEON::BI__builtin_neon_splatq_laneq_v: { + uint64_t numElements = vTy.getSize(); + if (builtinID == NEON::BI__builtin_neon_splatq_lane_v) + numElements = numElements << 1; + if (builtinID == NEON::BI__builtin_neon_splat_laneq_v) + numElements = numElements >> 1; + ops[0] = builder.createBitcast(ops[0], vTy); + return emitNeonSplat(builder, getLoc(e->getExprLoc()), ops[0], ops[1], + numElements); + } case NEON::BI__builtin_neon_vmovl_v: { cir::VectorType dTy = builder.getExtendedOrTruncatedElementVectorType( vTy, false /* truncate */, diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index a798139de55f..4c516ad06f18 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -860,6 +860,7 @@ uint32x2_t test_vqmovn_u64(uint64x2_t a) { // LLVM: [[VQMOVN_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> [[A]]) // LLVM: ret <2 x i32> [[VQMOVN_V1_I]] } + float32x2_t test_vcvt_f32_s32(int32x2_t a) { return vcvt_f32_s32(a); @@ -907,3 +908,611 @@ float32x4_t test_vcvtq_f32_u32(uint32x4_t a) { // LLVM: [[VCVT_I:%.*]] = uitofp <4 x i32> [[a]] to <4 x float> // LLVM: ret <4 x float> [[VCVT_I]] } + +int8x8_t test_splat_lane_s8(int8x8_t v) { + return (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)v, 7, 0); + + // CIR-LABEL: test_splat_lane_s8 + // CIR: [[VEC:%.*]] = cir.load {{%.*}} : !cir.ptr>, !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_lane_s8(<8 x i8>{{.*}}[[V:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <8 x i8> [[V]], <8 x i8> [[V]], <8 x i32> + // LLVM: ret <8 x i8> [[RES]] +} + +int16x4_t test_splat_lane_s16(int16x4_t v) { + return (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)v, 3, 1); + + // CIR-LABEL: test_splat_lane_s16 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_lane_s16(<4 x i16>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[RES:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> [[TMP1]], <4 x i32> + // LLVM: ret <4 x i16> [[RES]] +} + +int32x2_t test_splat_lane_s32(int32x2_t v) { + return (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)v, 1, 2); + + // CIR-LABEL: test_splat_lane_s32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<1> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_lane_s32(<2 x i32>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[RES:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> + // LLVM: ret <2 x i32> [[RES]] +} + +int64x1_t test_splat_lane_s64(int64x1_t v) { + return (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)v, 0, 3); + + // CIR-LABEL: test_splat_lane_s64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) [#cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_lane_s64(<1 x i64>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[RES:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP1]], <1 x i32> zeroinitializer + // LLVM: ret <1 x i64> [[RES]] +} + +uint8x8_t test_splat_lane_u8(uint8x8_t v) { + return (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)v, 7, 16); + + // CIR-LABEL: test_splat_lane_u8 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_lane_u8(<8 x i8>{{.*}}[[V:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <8 x i8> [[V]], <8 x i8> [[V]], <8 x i32> + // LLVM: ret <8 x i8> [[RES]] +} +uint16x4_t test_splat_lane_u16(uint16x4_t v) { + return (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)v, 3, 17); + + // CIR-LABEL: test_splat_lane_u16 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_lane_u16(<4 x i16>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[RES:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> [[TMP1]], <4 x i32> + // LLVM: ret <4 x i16> [[RES]] +} + +uint32x2_t test_splat_lane_u32(uint32x2_t v) { + return (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)v, 1, 18); + + // CIR-LABEL: test_splat_lane_u32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<1> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_lane_u32(<2 x i32>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[RES:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> + // LLVM: ret <2 x i32> [[RES]] +} + +uint64x1_t test_splat_lane_u64(uint64x1_t v) { + return (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)v, 0, 19); + + // CIR-LABEL: test_splat_lane_u64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) [#cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_lane_u64(<1 x i64>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[RES:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP1]], <1 x i32> zeroinitializer + // LLVM: ret <1 x i64> [[RES]] +} + +float32x2_t test_splat_lane_f32(float32x2_t v) { + return (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)v, 1, 9); + + // CIR-LABEL: test_splat_lane_f32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<1> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_lane_f32(<2 x float>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x float> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> + // LLVM: [[RES:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP1]], <2 x i32> + // LLVM: ret <2 x float> [[RES]] +} + +float64x1_t test_splat_lane_f64(float64x1_t v) { + return (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)v, 0, 10); + + // CIR-LABEL: test_splat_lane_f64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) [#cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_lane_f64(<1 x double>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x double> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double> + // LLVM: [[RES:%.*]] = shufflevector <1 x double> [[TMP1]], <1 x double> [[TMP1]], <1 x i32> zeroinitializer + // LLVM: ret <1 x double> [[RES]] +} + +int8x16_t test_splatq_lane_s8(int8x8_t v) { + return (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)v, 7, 0); + + // CIR-LABEL: test_splatq_lane_s8 + // CIR: [[VEC:%.*]] = cir.load {{%.*}} : !cir.ptr>, !cir.vector + // CIR: [[TMP0:%.*]] = cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_lane_s8(<8 x i8>{{.*}}[[V:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <8 x i8> [[V]], <8 x i8> [[V]], + // LLVM-SAME: <16 x i32> + // LLVM: ret <16 x i8> [[RES]] +} + +int16x8_t test_splatq_lane_s16(int16x4_t v) { + return (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)v, 3, 1); + + // CIR-LABEL: test_splatq_lane_s16 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[TMP0:%.*]] = cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i, + // CIR-SAME: #cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_lane_s16(<4 x i16>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[RES:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> [[TMP1]], <8 x i32> + // LLVM: ret <8 x i16> [[RES]] +} + +int32x4_t test_splatq_lane_s32(int32x2_t v) { + return (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)v, 1, 2); + + // CIR-LABEL: test_splatq_lane_s32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[TMP0:%.*]] = cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_lane_s32(<2 x i32>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[RES:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <4 x i32> + // LLVM: ret <4 x i32> [[RES]] +} + +int64x2_t test_splatq_lane_s64(int64x1_t v) { + return (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)v, 0, 3); + + // CIR-LABEL: test_splatq_lane_s64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[TMP0:%.*]] = cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_lane_s64(<1 x i64>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[RES:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP1]], <2 x i32> zeroinitializer + // LLVM: ret <2 x i64> [[RES]] +} + +uint8x16_t test_splatq_lane_u8(uint8x8_t v) { + return (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)v, 7, 16); + + // CIR-LABEL: test_splatq_lane_u8 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_lane_u8(<8 x i8>{{.*}}[[V:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <8 x i8> [[V]], <8 x i8> [[V]], + // LLVM-SAME: <16 x i32> + // LLVM: ret <16 x i8> [[RES]] +} + +uint16x8_t test_splatq_lane_u16(uint16x4_t v) { + return (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)v, 3, 17); + + // CIR-LABEL: test_splatq_lane_u16 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[TMP0:%.*]] = cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i, + // CIR-SAME: #cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_lane_u16(<4 x i16>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[RES:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> [[TMP1]], <8 x i32> + // LLVM: ret <8 x i16> [[RES]] +} + +uint32x4_t test_splatq_lane_u32(uint32x2_t v) { + return (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)v, 1, 18); + + // CIR-LABEL: test_splatq_lane_u32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[TMP0:%.*]] = cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_lane_u32(<2 x i32>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[RES:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <4 x i32> + // LLVM: ret <4 x i32> [[RES]] +} + +uint64x2_t test_splatq_lane_u64(uint64x1_t v) { + return (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)v, 0, 19); + + // CIR-LABEL: test_splatq_lane_u64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_lane_u64(<1 x i64>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[RES:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP1]], <2 x i32> zeroinitializer + // LLVM: ret <2 x i64> [[RES]] +} + +float32x4_t test_splatq_lane_f32(float32x2_t v) { + return (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)v, 1, 9); + + // CIR-LABEL: test_splatq_lane_f32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i, #cir.int<1> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_lane_f32(<2 x float>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x float> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> + // LLVM: [[RES:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP1]], <4 x i32> + // LLVM: ret <4 x float> [[RES]] +} + +float64x2_t test_splatq_lane_f64(float64x1_t v) { + return (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)v, 0, 10); + + // CIR-LABEL: test_splatq_lane_f64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_lane_f64(<1 x double>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x double> [[V]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double> + // LLVM: [[RES:%.*]] = shufflevector <1 x double> [[TMP1]], <1 x double> [[TMP1]], <2 x i32> zeroinitializer + // LLVM: ret <2 x double> [[RES]] +} + +int8x8_t test_splat_laneq_s8(int8x16_t v) { + return (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)v, 15, 32); + + // CIR-LABEL: test_splat_laneq_s8 + // CIR: [[VEC:%.*]] = cir.load {{.*}} : !cir.ptr>, !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, + // CIR-SAME: #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_laneq_s8(<16 x i8>{{.*}}[[V:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <16 x i8> [[V]], <16 x i8> [[V]], + // LLVM-SAME: <8 x i32> + // LLVM: ret <8 x i8> [[RES]] +} + +int16x4_t test_splat_laneq_s16(int16x8_t v) { + return (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)v, 7, 33); + + // CIR-LABEL: test_splat_laneq_s16 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_laneq_s16(<8 x i16>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[RES:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP1]], <4 x i32> + // LLVM: ret <4 x i16> [[RES]] +} + +int32x2_t test_splat_laneq_s32(int32x4_t v) { + return (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)v, 3, 34); + + // CIR-LABEL: test_splat_laneq_s32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<3> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_laneq_s32(<4 x i32>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[RES:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP1]], <2 x i32> + // LLVM: ret <2 x i32> [[RES]] +} + +int64x1_t test_splat_laneq_s64(int64x2_t v) { + return (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)v, 0, 35); + + // CIR-LABEL: test_splat_laneq_s64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[TMP0:%.*]] = cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_laneq_s64(<2 x i64>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[RES:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP1]], <1 x i32> zeroinitializer + // LLVM: ret <1 x i64> [[RES]] +} + +float32x2_t test_splat_laneq_f32(float32x4_t v) { + return (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)v, 1, 41); + + // CIR-LABEL: test_splat_laneq_f32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[TMP0:%.*]] = cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<1> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_laneq_f32(<4 x float>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x float> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> + // LLVM: [[RES:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP1]], <2 x i32> + // LLVM: ret <2 x float> [[RES]] +} + +float64x1_t test_splat_laneq_f64(float64x2_t v) { + return (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)v, 0, 42); + + // CIR-LABEL: test_splat_laneq_f64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[TMP0:%.*]] = cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_laneq_f64(<2 x double>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double> + // LLVM: [[RES:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> [[TMP1]], <1 x i32> zeroinitializer + // LLVM: ret <1 x double> [[RES]] +} + +uint8x8_t test_splat_laneq_u8(uint8x16_t v) { + return (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)v, 15, 48); + + // CIR-LABEL: test_splat_laneq_u8 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, + // CIR-SAME: #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_laneq_u8(<16 x i8>{{.*}}[[V:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <16 x i8> [[V]], <16 x i8> [[V]], + // LLVM-SAME: <8 x i32> + // LLVM: ret <8 x i8> [[RES]] +} + +uint16x4_t test_splat_laneq_u16(uint16x8_t v) { + return (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)v, 7, 49); + + // CIR-LABEL: test_splat_laneq_u16 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_laneq_u16(<8 x i16>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[RES:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP1]], <4 x i32> + // LLVM: ret <4 x i16> [[RES]] +} + +uint32x2_t test_splat_laneq_u32(uint32x4_t v) { + return (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)v, 3, 50); + + // CIR-LABEL: test_splat_laneq_u32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<3> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_laneq_u32(<4 x i32>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[RES:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP1]], <2 x i32> + // LLVM: ret <2 x i32> [[RES]] +} + +uint64x1_t test_splat_laneq_u64(uint64x2_t v) { + return (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)v, 0, 51); + + // CIR-LABEL: test_splat_laneq_u64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splat_laneq_u64(<2 x i64>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[RES:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP1]], <1 x i32> zeroinitializer + // LLVM: ret <1 x i64> [[RES]] +} + +int8x16_t test_splatq_laneq_s8(int8x16_t v) { + return (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)v, 15, 32); + + // CIR-LABEL: test_splatq_laneq_s8 + // CIR: [[VEC:%.*]] = cir.load {{.*}} : !cir.ptr>, !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, + // CIR-SAME: #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, + // CIR-SAME: #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, + // CIR-SAME: #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_laneq_s8(<16 x i8>{{.*}}[[V:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <16 x i8> [[V]], <16 x i8> [[V]], + // LLVM-SAME: <16 x i32> + // LLVM: ret <16 x i8> [[RES]] +} + +int16x8_t test_splatq_laneq_s16(int16x8_t v) { + return (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)v, 7, 33); + + // CIR-LABEL: test_splatq_laneq_s16 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_laneq_s16(<8 x i16>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[RES:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP1]], <8 x i32> + // LLVM: ret <8 x i16> [[RES]] +} + +int32x4_t test_splatq_laneq_s32(int32x4_t v) { + return (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)v, 3, 34); + + // CIR-LABEL: test_splatq_laneq_s32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_laneq_s32(<4 x i32>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[RES:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP1]], <4 x i32> + // LLVM: ret <4 x i32> [[RES]] +} + +int64x2_t test_splatq_laneq_s64(int64x2_t v) { + return (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)v, 0, 35); + + // CIR-LABEL: test_splatq_laneq_s64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_laneq_s64(<2 x i64>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[RES:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP1]], <2 x i32> zeroinitializer + // LLVM: ret <2 x i64> [[RES]] +} + +float32x4_t test_splatq_laneq_f32(float32x4_t v) { + return (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)v, 3, 41); + + // CIR-LABEL: test_splatq_laneq_f32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_laneq_f32(<4 x float>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x float> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> + // LLVM: [[RES:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP1]], <4 x i32> + // LLVM: ret <4 x float> [[RES]] +} + +float64x2_t test_splatq_laneq_f64(float64x2_t v) { + return (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)v, 0, 42); + + // CIR-LABEL: test_splatq_laneq_f64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_laneq_f64(<2 x double>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double> + // LLVM: [[RES:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> [[TMP1]], <2 x i32> zeroinitializer + // LLVM: ret <2 x double> [[RES]] +} + +uint8x16_t test_splatq_laneq_u8(uint8x16_t v) { + return (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)v, 15, 48); + + // CIR-LABEL: test_splatq_laneq_u8 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, + // CIR-SAME: #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, + // CIR-SAME: #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, + // CIR-SAME: #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i, #cir.int<15> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_laneq_u8(<16 x i8>{{.*}}[[V:%.*]]) + // LLVM: [[RES:%.*]] = shufflevector <16 x i8> [[V]], <16 x i8> [[V]], + // LLVM-SAME: <16 x i32> + // LLVM: ret <16 x i8> [[RES]] +} + +uint16x8_t test_splatq_laneq_u16(uint16x8_t v) { + return (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)v, 7, 49); + + // CIR-LABEL: test_splatq_laneq_u16 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i, #cir.int<7> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_laneq_u16(<8 x i16>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[RES:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP1]], <8 x i32> + // LLVM: ret <8 x i16> [[RES]] +} + +uint32x4_t test_splatq_laneq_u32(uint32x4_t v) { + return (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)v, 3, 50); + + // CIR-LABEL: test_splatq_laneq_u32 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i, #cir.int<3> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_laneq_u32(<4 x i32>{{.*}} + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[RES:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP1]], <4 x i32> + // LLVM: ret <4 x i32> [[RES]] +} + +uint64x2_t test_splatq_laneq_u64(uint64x2_t v) { + return (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)v, 0, 51); + + // CIR-LABEL: test_splatq_laneq_u64 + // CIR: [[VEC:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.vec.shuffle([[VEC]], [[VEC]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<0> : !s32i] : !cir.vector + + // LLVM: {{.*}}@test_splatq_laneq_u64(<2 x i64>{{.*}}[[V:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[V]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[RES:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP1]], <2 x i32> zeroinitializer + // LLVM: ret <2 x i64> [[RES]] +} From 4fe38ef5057da1076bc3c5b03888c1696da6a397 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 15 Nov 2024 00:34:25 -0500 Subject: [PATCH 2087/2301] [CIR][CIRGen][Builtin] Support __builtin___memmove_chk (#1106) --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 49 +++++++++++------ clang/test/CIR/CodeGen/builtins-memory.c | 67 +++++++++++++++++++++++- 2 files changed, 98 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index b5a51d678fd8..611561b6c0ae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -345,6 +345,19 @@ RValue CIRGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) { return RValue::get(r); } +static bool isMemBuiltinOutOfBoundPossible(const clang::Expr *sizeArg, + const clang::Expr *dstSizeArg, + clang::ASTContext &astContext, + llvm::APSInt &size) { + clang::Expr::EvalResult sizeResult, dstSizeResult; + if (!sizeArg->EvaluateAsInt(sizeResult, astContext) || + !dstSizeArg->EvaluateAsInt(dstSizeResult, astContext)) + return true; + size = sizeResult.Val.getInt(); + llvm::APSInt dstSize = dstSizeResult.Val.getInt(); + return size.ugt(dstSize); +} + RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue) { @@ -1488,13 +1501,9 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin___memcpy_chk: { // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. - Expr::EvalResult sizeResult, dstSizeResult; - if (!E->getArg(2)->EvaluateAsInt(sizeResult, CGM.getASTContext()) || - !E->getArg(3)->EvaluateAsInt(dstSizeResult, CGM.getASTContext())) - break; - llvm::APSInt size = sizeResult.Val.getInt(); - llvm::APSInt dstSize = dstSizeResult.Val.getInt(); - if (size.ugt(dstSize)) + llvm::APSInt size; + if (isMemBuiltinOutOfBoundPossible(E->getArg(2), E->getArg(3), + CGM.getASTContext(), size)) break; Address dest = emitPointerWithAlignment(E->getArg(0)); Address src = emitPointerWithAlignment(E->getArg(1)); @@ -1507,9 +1516,19 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_objc_memmove_collectable: llvm_unreachable("BI__builtin_objc_memmove_collectable NYI"); - case Builtin::BI__builtin___memmove_chk: - llvm_unreachable("BI__builtin___memmove_chk NYI"); - + case Builtin::BI__builtin___memmove_chk: { + // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. + llvm::APSInt size; + if (isMemBuiltinOutOfBoundPossible(E->getArg(2), E->getArg(3), + CGM.getASTContext(), size)) + break; + Address Dest = emitPointerWithAlignment(E->getArg(0)); + Address Src = emitPointerWithAlignment(E->getArg(1)); + auto loc = getLoc(E->getSourceRange()); + ConstantOp sizeOp = builder.getConstInt(loc, size); + builder.createMemMove(loc, Dest.getPointer(), Src.getPointer(), sizeOp); + return RValue::get(Dest.getPointer()); + } case Builtin::BImemmove: case Builtin::BI__builtin_memmove: { Address Dest = emitPointerWithAlignment(E->getArg(0)); @@ -1539,13 +1558,9 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_memset_inline NYI"); case Builtin::BI__builtin___memset_chk: { // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. - Expr::EvalResult sizeResult, dstSizeResult; - if (!E->getArg(2)->EvaluateAsInt(sizeResult, CGM.getASTContext()) || - !E->getArg(3)->EvaluateAsInt(dstSizeResult, CGM.getASTContext())) - break; - llvm::APSInt size = sizeResult.Val.getInt(); - llvm::APSInt dstSize = dstSizeResult.Val.getInt(); - if (size.ugt(dstSize)) + llvm::APSInt size; + if (isMemBuiltinOutOfBoundPossible(E->getArg(2), E->getArg(3), + CGM.getASTContext(), size)) break; Address dest = emitPointerWithAlignment(E->getArg(0)); mlir::Value byteVal = emitScalarExpr(E->getArg(1)); diff --git a/clang/test/CIR/CodeGen/builtins-memory.c b/clang/test/CIR/CodeGen/builtins-memory.c index 940e09a8ed6d..472d2103a960 100644 --- a/clang/test/CIR/CodeGen/builtins-memory.c +++ b/clang/test/CIR/CodeGen/builtins-memory.c @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - \ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o - \ // RUN: | opt -S -passes=instcombine,mem2reg,simplifycfg -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s @@ -58,6 +58,71 @@ void test_memcpy_chk(void *dest, const void *src, size_t n) { __builtin___memcpy_chk(dest, src, n, n); } +void test_memmove_chk(void *dest, const void *src, size_t n) { + // CIR-LABEL: cir.func @test_memmove_chk + // CIR: %[[#DEST:]] = cir.alloca {{.*}} ["dest", init] + // CIR: %[[#SRC:]] = cir.alloca {{.*}} ["src", init] + // CIR: %[[#N:]] = cir.alloca {{.*}} ["n", init] + + // LLVM-LABEL: test_memmove_chk + + // An unchecked memmove should be emitted when the count and buffer size are + // constants and the count is less than or equal to the buffer size. + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<8> + // CIR: cir.libc.memmove %[[#COUNT]] bytes from %[[#SRC_LOAD]] to %[[#DEST_LOAD]] + // LLVM: call void @llvm.memmove.p0.p0.i64(ptr {{%.*}}, ptr {{%.*}}, i64 8, i1 false) + // COM: LLVM: call void @llvm.memmove.p0.p0.i64(ptr align 1 {{%.*}}, ptr align 1 {{%.*}}, i64 8, i1 false) + __builtin___memmove_chk(dest, src, 8, 10); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<10> + // CIR: cir.libc.memmove %[[#COUNT]] bytes from %[[#SRC_LOAD]] to %[[#DEST_LOAD]] + // LLVM: call void @llvm.memmove.p0.p0.i64(ptr {{%.*}}, ptr {{%.*}}, i64 10, i1 false) + // COM: LLVM: call void @llvm.memmove.p0.p0.i64(ptr align 1 {{%.*}}, ptr align 1 {{%.*}}, i64 10, i1 false) + __builtin___memmove_chk(dest, src, 10, 10); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<10> + // CIR: %[[#SIZE:]] = cir.const #cir.int<8> + // CIR: cir.call @__memmove_chk(%[[#DEST_LOAD]], %[[#SRC_LOAD]], %[[#COUNT]], %[[#SIZE]]) + // LLVM: call ptr @__memmove_chk(ptr {{%.*}}, ptr {{%.*}}, i64 10, i64 8) + // COM: LLVM: call ptr @__memmove_chk(ptr noundef %4, ptr noundef %5, i64 noundef 10, i64 noundef 8) + __builtin___memmove_chk(dest, src, 10lu, 8lu); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#N_LOAD:]] = cir.load %[[#N]] + // CIR: %[[#SIZE:]] = cir.const #cir.int<10> + // CIR: cir.call @__memmove_chk(%[[#DEST_LOAD]], %[[#SRC_LOAD]], %[[#N_LOAD]], %[[#SIZE]]) + // LLVM: call ptr @__memmove_chk(ptr {{%.*}}, ptr {{%.*}}, i64 {{%.*}}, i64 10) + // COM: LLVM: call ptr @__memmove_chk(ptr noundef {{%.*}}, ptr noundef {{%.*}}, i64 noundef {{%.*}}, i64 noundef 10) + __builtin___memmove_chk(dest, src, n, 10lu); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#COUNT:]] = cir.const #cir.int<10> + // CIR: %[[#N_LOAD:]] = cir.load %[[#N]] + // CIR: cir.call @__memmove_chk(%[[#DEST_LOAD]], %[[#SRC_LOAD]], %[[#COUNT]], %[[#N_LOAD]]) + // LLVM: call ptr @__memmove_chk(ptr {{%.*}}, ptr {{%.*}}, i64 10, i64 {{%.*}}) + // COM: LLVM: call ptr @__memmove_chk(ptr noundef {{%.*}}, ptr noundef {{%.*}}, i64 noundef 10, i64 noundef {{%.*}}) + __builtin___memmove_chk(dest, src, 10lu, n); + + // CIR: %[[#DEST_LOAD:]] = cir.load %[[#DEST]] + // CIR: %[[#SRC_LOAD:]] = cir.load %[[#SRC]] + // CIR: %[[#N_LOAD1:]] = cir.load %[[#N]] + // CIR: %[[#N_LOAD2:]] = cir.load %[[#N]] + // CIR: cir.call @__memmove_chk(%[[#DEST_LOAD]], %[[#SRC_LOAD]], %[[#N_LOAD1]], %[[#N_LOAD2]]) + // LLVM: call ptr @__memmove_chk(ptr {{%.*}}, ptr {{%.*}}, i64 {{%.*}}, i64 {{%.*}}) + // COM: LLVM: call ptr @__memmove_chk(ptr noundef {{%.*}}, ptr noundef {{%.*}}, i64 noundef {{%.*}}, i64 noundef {{%.*}}) + __builtin___memmove_chk(dest, src, n, n); +} + + void test_memset_chk(void *dest, int ch, size_t n) { // CIR-LABEL: cir.func @test_memset_chk // CIR: %[[#DEST:]] = cir.alloca {{.*}} ["dest", init] From 63fd9ef9b9446f9481e4744f93cf4f91da31fbec Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 8 Nov 2024 13:32:31 -0800 Subject: [PATCH 2088/2301] [CIR][CIRGen] Support for visual bases in getAddressOfBaseClass This is going to be raised in follow up work, which is hard to do in one go because createBaseClassAddr goes of the OG skeleton and ideally we want ApplyNonVirtualAndVirtualOffset to work naturally. This also doesn't handle null checks, coming next. --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 48 ++++++++---------- clang/test/CIR/CodeGen/virtual-base-cast.cpp | 53 ++++++++++++++++++++ 2 files changed, 75 insertions(+), 26 deletions(-) create mode 100644 clang/test/CIR/CodeGen/virtual-base-cast.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index d3b96e5beedf..8d91c206dd0c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1545,7 +1545,9 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, // *start* with a step down to the correct virtual base subobject, // and hence will not require any further steps. if ((*Start)->isVirtual()) { - llvm_unreachable("NYI: Cast to virtual base class"); + VBase = cast( + (*Start)->getType()->castAs()->getDecl()); + ++Start; } // Compute the static offset of the ultimate destination within its @@ -1574,39 +1576,33 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, if (sanitizePerformTypeCheck()) { llvm_unreachable("NYI: sanitizePerformTypeCheck"); } - return builder.createBaseClassAddr(getLoc(Loc), Value, BaseValueTy, - NonVirtualOffset.getQuantity(), - /*assumeNotNull=*/not NullCheckValue); } if (sanitizePerformTypeCheck()) { assert(!cir::MissingFeatures::sanitizeOther()); } - // Conversion to a virtual base. cir.base_class_addr can't handle this. - // Generate the code to look up the address in the virtual table. - - llvm_unreachable("NYI: Cast to virtual base class"); - - // This is just an outline of what the code might look like, since I can't - // actually test it. -#if 0 - mlir::Value VirtualOffset = ...; // This is a dynamic expression. Creating - // it requires calling an ABI-specific - // function. - Value = ApplyNonVirtualAndVirtualOffset(getLoc(Loc), *this, Value, - NonVirtualOffset, VirtualOffset, - Derived, VBase); - Value = builder.createElementBitCast(Value.getPointer().getLoc(), Value, - BaseValueTy); - if (sanitizePerformTypeCheck()) { - // Do something here - } - if (NullCheckValue) { - // Convert to 'derivedPtr == nullptr ? nullptr : basePtr' + // Compute the virtual offset. + mlir::Value VirtualOffset = nullptr; + if (VBase) { + VirtualOffset = CGM.getCXXABI().getVirtualBaseClassOffset( + getLoc(Loc), *this, Value, Derived, VBase); + } else { + Value = builder.createBaseClassAddr(getLoc(Loc), Value, BaseValueTy, + NonVirtualOffset.getQuantity(), + /*assumeNotNull=*/not NullCheckValue); } -#endif + // Apply both offsets. + // FIXME: remove condition. + if (VBase) + Value = ApplyNonVirtualAndVirtualOffset(getLoc(Loc), *this, Value, + NonVirtualOffset, VirtualOffset, + Derived, VBase); + + // Cast to the destination type. + if (VBase) + Value = Value.withElementType(BaseValueTy); return Value; } diff --git a/clang/test/CIR/CodeGen/virtual-base-cast.cpp b/clang/test/CIR/CodeGen/virtual-base-cast.cpp new file mode 100644 index 000000000000..2c2a3e378714 --- /dev/null +++ b/clang/test/CIR/CodeGen/virtual-base-cast.cpp @@ -0,0 +1,53 @@ + +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -std=c++20 -mconstructor-aliases -O0 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -std=c++20 -mconstructor-aliases -O0 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +struct A { int a; virtual int aa(); }; +struct B { int b; virtual int bb(); }; +struct C : virtual A, virtual B { int c; virtual int aa(); virtual int bb(); }; +struct AA { int a; virtual int aa(); }; +struct BB { int b; virtual int bb(); }; +struct CC : AA, BB { virtual int aa(); virtual int bb(); virtual int cc(); }; +struct D : virtual C, virtual CC { int e; }; + +D* x; + +A* a() { return x; } +// CIR-LABEL: @_Z1av() +// CIR: %[[OFFSET_OFFSET:.*]] = cir.const #cir.int<-32> : !s64i +// CIR: %[[OFFSET_PTR:.*]] = cir.ptr_stride(%4 : !cir.ptr, %[[OFFSET_OFFSET]] : !s64i), !cir.ptr +// CIR: %[[OFFSET_PTR_CAST:.*]] = cir.cast(bitcast, %[[OFFSET_PTR]] : !cir.ptr), !cir.ptr +// CIR: %[[OFFSET:.*]] = cir.load %[[OFFSET_PTR_CAST]] : !cir.ptr, !s64i +// CIR: %[[VBASE_ADDR:.*]] = cir.ptr_stride({{.*}} : !cir.ptr, %[[OFFSET]] : !s64i), !cir.ptr +// CIR: cir.cast(bitcast, %[[VBASE_ADDR]] : !cir.ptr), !cir.ptr + +// FIXME: this version should include null check. +// LLVM-LABEL: @_Z1av() +// LLVM: %[[OFFSET_OFFSET:.*]] = getelementptr i8, ptr {{.*}}, i64 -32 +// LLVM: %[[OFFSET_PTR:.*]] = load i64, ptr %[[OFFSET_OFFSET]], align 8 +// LLVM: %[[VBASE_ADDR:.*]] = getelementptr i8, ptr {{.*}}, i64 %[[OFFSET_PTR]] +// LLVM: store ptr %[[VBASE_ADDR]], ptr {{.*}}, align 8 + +B* b() { return x; } +BB* c() { return x; } + +// Put the vbptr at a non-zero offset inside a non-virtual base. +struct E { int e; }; +struct F : E, D { int f; }; + +F* y; + +BB* d() { return y; } +// CIR-LABEL: @_Z1dv +// CIR: %[[OFFSET:.*]] = cir.load {{.*}} : !cir.ptr, !s64i +// CIR: %[[ADJUST:.*]] = cir.const #cir.int<16> : !s64i +// CIR: cir.binop(add, %[[OFFSET]], %[[ADJUST]]) : !s64i + +// LLVM-LABEL: @_Z1dv +// LLVM: %[[OFFSET_OFFSET:.*]] = getelementptr i8, ptr {{.*}}, i64 -48 +// LLVM: %[[OFFSET_PTR:.*]] = load i64, ptr %[[OFFSET_OFFSET]], align 8 +// LLVM: %[[ADJUST:.*]] = add i64 %[[OFFSET_PTR]], 16 +// LLVM: %[[VBASE_ADDR:.*]] = getelementptr i8, ptr {{.*}}, i64 %[[ADJUST]] +// LLVM: store ptr %[[VBASE_ADDR]], \ No newline at end of file From 344b515d6cebed559e613b04c54fe4a7dc91b6d5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Nov 2024 21:48:17 -0800 Subject: [PATCH 2089/2301] [CIR][NFC] Fix unused variable warning --- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index bfa8ef62f54e..3a600a0a0575 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -518,7 +518,6 @@ void StructType::computeSizeAndAlignment( auto ty = members[i]; // Found a nested union: recurse into it to fetch its largest member. - auto structMember = mlir::dyn_cast(ty); if (!largestMember || dataLayout.getTypeABIAlignment(ty) > dataLayout.getTypeABIAlignment(largestMember) || From 530afbc9a8a4b32773f7fcc4b2960323bea3bfd2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Nov 2024 21:54:00 -0800 Subject: [PATCH 2090/2301] [CIR][CIRGen] Bring getAddressOfBaseClass a bit closer to OG --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 4 +++- clang/test/CIR/CodeGen/derived-cast.cpp | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 8d91c206dd0c..e52a6516c8de 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1572,10 +1572,12 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, // If there is no virtual base, use cir.base_class_addr. It takes care of // the adjustment and the null pointer check. - if (!VBase) { + if (NonVirtualOffset.isZero() && !VBase) { if (sanitizePerformTypeCheck()) { llvm_unreachable("NYI: sanitizePerformTypeCheck"); } + return builder.createBaseClassAddr(getLoc(Loc), Value, BaseValueTy, 0, + /*assumeNotNull=*/true); } if (sanitizePerformTypeCheck()) { diff --git a/clang/test/CIR/CodeGen/derived-cast.cpp b/clang/test/CIR/CodeGen/derived-cast.cpp index 28109f553a5e..243f15e8f0c3 100644 --- a/clang/test/CIR/CodeGen/derived-cast.cpp +++ b/clang/test/CIR/CodeGen/derived-cast.cpp @@ -26,7 +26,7 @@ A *B::getAsA() { // CIR: %[[VAL_2:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__retval"] {alignment = 8 : i64} // CIR: %[[VAL_3:.*]] = cir.load %[[VAL_1]] : !cir.ptr>, !cir.ptr // CIR: %[[VAL_4:.*]] = cir.derived_class_addr(%[[VAL_3]] : !cir.ptr nonnull) [4] -> !cir.ptr -// CIR: %[[VAL_5:.*]] = cir.base_class_addr(%[[VAL_4]] : !cir.ptr) [0] -> !cir.ptr +// CIR: %[[VAL_5:.*]] = cir.base_class_addr(%[[VAL_4]] : !cir.ptr nonnull) [0] -> !cir.ptr // CIR: cir.store %[[VAL_5]], %[[VAL_2]] : !cir.ptr, !cir.ptr> // CIR: %[[VAL_6:.*]] = cir.load %[[VAL_2]] : !cir.ptr>, !cir.ptr // CIR: cir.return %[[VAL_6]] : !cir.ptr From 66fe0327f63d9932d35c6317625d1cc513e5b8a5 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Nov 2024 21:58:10 -0800 Subject: [PATCH 2091/2301] [CIR][CIRGen][NFC] More unification of virtual and non-virtual offset paths --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index e52a6516c8de..fd498a92275d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -654,7 +654,8 @@ void CIRGenFunction::emitCtorPrologue(const CXXConstructorDecl *CD, static Address ApplyNonVirtualAndVirtualOffset( mlir::Location loc, CIRGenFunction &CGF, Address addr, CharUnits nonVirtualOffset, mlir::Value virtualOffset, - const CXXRecordDecl *derivedClass, const CXXRecordDecl *nearestVBase) { + const CXXRecordDecl *derivedClass, const CXXRecordDecl *nearestVBase, + mlir::Type baseValueTy = {}, bool assumeNotNull = true) { // Assert that we have something to do. assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr); @@ -671,6 +672,14 @@ static Address ApplyNonVirtualAndVirtualOffset( if (virtualOffset) { baseOffset = CGF.getBuilder().createBinop( virtualOffset, cir::BinOpKind::Add, baseOffset); + } else if (baseValueTy) { + // TODO(cir): this should be used as a firt class in this function for the + // nonVirtualOffset cases, but all users of this function need to be + // updated first. + baseOffset.getDefiningOp()->erase(); + return CGF.getBuilder().createBaseClassAddr( + loc, addr, baseValueTy, nonVirtualOffset.getQuantity(), + assumeNotNull); } } else { baseOffset = virtualOffset; @@ -1589,22 +1598,17 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, if (VBase) { VirtualOffset = CGM.getCXXABI().getVirtualBaseClassOffset( getLoc(Loc), *this, Value, Derived, VBase); - } else { - Value = builder.createBaseClassAddr(getLoc(Loc), Value, BaseValueTy, - NonVirtualOffset.getQuantity(), - /*assumeNotNull=*/not NullCheckValue); } // Apply both offsets. - // FIXME: remove condition. - if (VBase) - Value = ApplyNonVirtualAndVirtualOffset(getLoc(Loc), *this, Value, - NonVirtualOffset, VirtualOffset, - Derived, VBase); + Value = ApplyNonVirtualAndVirtualOffset( + getLoc(Loc), *this, Value, NonVirtualOffset, VirtualOffset, Derived, + VBase, BaseValueTy, not NullCheckValue); // Cast to the destination type. if (VBase) Value = Value.withElementType(BaseValueTy); + return Value; } From c370c1d885ed29d459e51ff5ef11748f5ee5a5b7 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Nov 2024 22:14:39 -0800 Subject: [PATCH 2092/2301] [CIR][CIRGen][NFC] More skeleton conformance Now that we fixed the dep on VBase, clean up the rest of the function. --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index fd498a92275d..3b7792f6a785 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1606,8 +1606,7 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, VBase, BaseValueTy, not NullCheckValue); // Cast to the destination type. - if (VBase) - Value = Value.withElementType(BaseValueTy); + Value = Value.withElementType(BaseValueTy); return Value; } From 38bb0648452e1366e0ab7fdb5865c09bdda26111 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 14 Nov 2024 23:12:04 -0800 Subject: [PATCH 2093/2301] [CIR][CIRGen] Teach all uses of ApplyNonVirtualAndVirtualOffset to use BaseClassAddrOp --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 26 ++++++++++++------------- clang/test/CIR/CodeGen/multi-vtable.cpp | 9 +++------ clang/test/CIR/CodeGen/vtt.cpp | 14 ++++--------- 3 files changed, 20 insertions(+), 29 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 3b7792f6a785..3a801872d084 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -662,21 +662,19 @@ static Address ApplyNonVirtualAndVirtualOffset( // Compute the offset from the static and dynamic components. mlir::Value baseOffset; if (!nonVirtualOffset.isZero()) { - mlir::Type OffsetType = - (CGF.CGM.getTarget().getCXXABI().isItaniumFamily() && - CGF.CGM.getItaniumVTableContext().isRelativeLayout()) - ? CGF.SInt32Ty - : CGF.PtrDiffTy; - baseOffset = CGF.getBuilder().getConstInt(loc, OffsetType, - nonVirtualOffset.getQuantity()); if (virtualOffset) { + mlir::Type OffsetType = + (CGF.CGM.getTarget().getCXXABI().isItaniumFamily() && + CGF.CGM.getItaniumVTableContext().isRelativeLayout()) + ? CGF.SInt32Ty + : CGF.PtrDiffTy; + baseOffset = CGF.getBuilder().getConstInt(loc, OffsetType, + nonVirtualOffset.getQuantity()); baseOffset = CGF.getBuilder().createBinop( virtualOffset, cir::BinOpKind::Add, baseOffset); - } else if (baseValueTy) { - // TODO(cir): this should be used as a firt class in this function for the - // nonVirtualOffset cases, but all users of this function need to be - // updated first. - baseOffset.getDefiningOp()->erase(); + } else { + assert(baseValueTy && "expected base type"); + // If no virtualOffset is present this is the final stop. return CGF.getBuilder().createBaseClassAddr( loc, addr, baseValueTy, nonVirtualOffset.getQuantity(), assumeNotNull); @@ -725,6 +723,7 @@ void CIRGenFunction::initializeVTablePointer(mlir::Location loc, mlir::Value VirtualOffset{}; CharUnits NonVirtualOffset = CharUnits::Zero(); + mlir::Type BaseValueTy; if (CGM.getCXXABI().isVirtualOffsetNeededForVTableField(*this, Vptr)) { // We need to use the virtual base offset offset because the virtual base // might have a different offset in the most derived class. @@ -734,6 +733,7 @@ void CIRGenFunction::initializeVTablePointer(mlir::Location loc, } else { // We can just use the base offset in the complete class. NonVirtualOffset = Vptr.Base.getBaseOffset(); + BaseValueTy = convertType(getContext().getTagDeclType(Vptr.Base.getBase())); } // Apply the offsets. @@ -741,7 +741,7 @@ void CIRGenFunction::initializeVTablePointer(mlir::Location loc, if (!NonVirtualOffset.isZero() || VirtualOffset) { VTableField = ApplyNonVirtualAndVirtualOffset( loc, *this, VTableField, NonVirtualOffset, VirtualOffset, - Vptr.VTableClass, Vptr.NearestVBase); + Vptr.VTableClass, Vptr.NearestVBase, BaseValueTy); } // Finally, store the address point. Use the same CIR types as the field. diff --git a/clang/test/CIR/CodeGen/multi-vtable.cpp b/clang/test/CIR/CodeGen/multi-vtable.cpp index a00e29f45109..b887e78c8239 100644 --- a/clang/test/CIR/CodeGen/multi-vtable.cpp +++ b/clang/test/CIR/CodeGen/multi-vtable.cpp @@ -55,11 +55,8 @@ int main() { // CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> // CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> // CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV5Child, vtable_index = 1, address_point_index = 2) : !cir.ptr>> -// CIR: %{{[0-9]+}} = cir.const #cir.int<8> : !s64i -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr -// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: %7 = cir.base_class_addr(%1 : !cir.ptr nonnull) [8] -> !cir.ptr +// CIR: %8 = cir.cast(bitcast, %7 : !cir.ptr), !cir.ptr>>> loc(#loc8) // CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> // CIR: cir.return // CIR: } @@ -70,7 +67,7 @@ int main() { // LLVM-DAG: define linkonce_odr void @_ZN5ChildC2Ev(ptr %0) // LLVM-DAG: store ptr getelementptr inbounds ({ [4 x ptr], [3 x ptr] }, ptr @_ZTV5Child, i32 0, i32 0, i32 2), ptr %{{[0-9]+}}, align 8 -// LLVM-DAG: %{{[0-9]+}} = getelementptr i8, ptr %3, i64 8 +// LLVM-DAG: %{{[0-9]+}} = getelementptr i8, ptr {{.*}}, i32 8 // LLVM-DAG: store ptr getelementptr inbounds ({ [4 x ptr], [3 x ptr] }, ptr @_ZTV5Child, i32 0, i32 1, i32 2), ptr %{{[0-9]+}}, align 8 // LLVM-DAG: ret void // } diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp index 16203276c544..ab8cc999f856 100644 --- a/clang/test/CIR/CodeGen/vtt.cpp +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -120,19 +120,13 @@ int f() { // CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> // CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1D, vtable_index = 2, address_point_index = 3) : !cir.ptr>> -// CIR: %{{[0-9]+}} = cir.const #cir.int<40> : !s64i -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr -// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: %{{[0-9]+}} = cir.base_class_addr(%{{[0-9]+}} : !cir.ptr nonnull) [40] -> !cir.ptr +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> // CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> // CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1D, vtable_index = 1, address_point_index = 3) : !cir.ptr>> -// CIR: %{{[0-9]+}} = cir.const #cir.int<16> : !s64i -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr -// CIR: %{{[0-9]+}} = cir.ptr_stride(%{{[0-9]+}} : !cir.ptr, %{{[0-9]+}} : !s64i), !cir.ptr -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> +// CIR: cir.base_class_addr(%{{[0-9]+}} : !cir.ptr nonnull) [16] -> !cir.ptr +// CIR: cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> // CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> // CIR: cir.return // CIR: } From 19f7a41adeba3cff80f29621ee97bca4501304b8 Mon Sep 17 00:00:00 2001 From: orbiri Date: Mon, 18 Nov 2024 18:57:14 +0200 Subject: [PATCH 2094/2301] [CIR] Force cir.cmp to always return bool (#1110) It was always the intention for `cir.cmp` operations to return bool result. Due to missing constraints, a bug in codegen has slipped in which created `cir.cmp` operations with result type that matches the original AST expression type. In C, as opposed to C++, boolean expression types are "int". This resulted with extra operations being codegened around boolean expressions and their usage. This commit both enforces `cir.cmp` in the op definition and fixes the mentioned bug. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 13 +- clang/test/CIR/CodeGen/bf16-ops.c | 289 +++++++++--------- clang/test/CIR/CodeGen/float16-ops.c | 144 ++++----- clang/test/CIR/CodeGen/fp16-ops.c | 72 ++--- clang/test/CIR/IR/invalid.cir | 10 + clang/test/CIR/Lowering/ThroughMLIR/doWhile.c | 79 +++-- clang/test/CIR/Lowering/ThroughMLIR/if.c | 156 +++++----- clang/test/CIR/Lowering/ThroughMLIR/while.c | 109 +++---- clang/test/CIR/Lowering/dot.cir | 12 +- clang/test/CIR/Lowering/goto.cir | 7 +- clang/test/CIR/Lowering/loops-with-break.cir | 47 ++- .../test/CIR/Lowering/loops-with-continue.cir | 47 ++- clang/test/CIR/Lowering/switch.cir | 5 +- clang/test/CIR/Transforms/mem2reg.c | 78 +++-- clang/test/CIR/Transforms/scf-prepare.cir | 33 +- clang/test/CIR/Transforms/simpl.c | 10 +- clang/test/CIR/Transforms/switch.cir | 5 +- 18 files changed, 531 insertions(+), 588 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8d4ede9958f0..ac3ca72c0c70 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1268,8 +1268,7 @@ def CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> { ``` }]; - // TODO: get more accurate than CIR_AnyType - let results = (outs CIR_AnyType:$result); + let results = (outs CIR_BoolType:$result); let arguments = (ins Arg:$kind, CIR_AnyType:$lhs, CIR_AnyType:$rhs); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 6c4441ba0a1c..ee2a0c32cbff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -947,12 +947,11 @@ class ScalarExprEmitter : public StmtVisitor { // Other kinds of vectors. Element-wise comparison returning // a vector. cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); - return Builder.create(CGF.getLoc(BOInfo.Loc), - CGF.getCIRType(BOInfo.FullType), - Kind, BOInfo.LHS, BOInfo.RHS); + Result = Builder.create( + CGF.getLoc(BOInfo.Loc), CGF.getCIRType(BOInfo.FullType), Kind, + BOInfo.LHS, BOInfo.RHS); } - } - if (BOInfo.isFixedPointOp()) { + } else if (BOInfo.isFixedPointOp()) { assert(0 && "not implemented"); } else { // FIXME(cir): handle another if above for CIR equivalent on @@ -966,9 +965,7 @@ class ScalarExprEmitter : public StmtVisitor { } cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); - return Builder.create(CGF.getLoc(BOInfo.Loc), - CGF.getCIRType(BOInfo.FullType), Kind, - BOInfo.LHS, BOInfo.RHS); + Result = Builder.createCompare(CGF.getLoc(BOInfo.Loc), Kind, LHS, RHS); } } else { // Complex Comparison: can only be an equality comparison. assert(0 && "not implemented"); diff --git a/clang/test/CIR/CodeGen/bf16-ops.c b/clang/test/CIR/CodeGen/bf16-ops.c index 479be9980546..406446b778eb 100644 --- a/clang/test/CIR/CodeGen/bf16-ops.c +++ b/clang/test/CIR/CodeGen/bf16-ops.c @@ -481,11 +481,11 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.+}} = fsub bfloat %{{.+}}, %[[#A]] test = (h2 < h0); - // NONATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i - // NATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp olt bfloat %{{.+}}, %{{.+}} @@ -494,13 +494,13 @@ void foo(void) { test = (h2 < (__bf16)42.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 - // NONATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#C]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 - // NATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#C]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp olt bfloat %{{.+}}, 0xR4228 @@ -508,12 +508,12 @@ void foo(void) { test = (h2 < f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM: %{{.+}} = fcmp olt float %[[#LHS]], %{{.+}} @@ -523,12 +523,12 @@ void foo(void) { test = (f2 < h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp olt float %{{.+}}, %[[#RHS]] @@ -538,12 +538,12 @@ void foo(void) { test = (i0 < h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM: %{{.+}} = fcmp olt bfloat %[[#LHS]], %{{.+}} @@ -553,12 +553,12 @@ void foo(void) { test = (h0 < i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp olt bfloat %{{.+}}, %[[#RHS]] @@ -567,11 +567,11 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.+}} = fcmp olt bfloat %{{.+}}, %[[#RHS]] test = (h0 > h2); - // NONATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i - // NATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp ogt bfloat %{{.+}}, %{{.+}} @@ -580,13 +580,13 @@ void foo(void) { test = ((__bf16)42.0 > h2); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 - // NONATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#C]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 - // NATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#C]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp ogt bfloat 0xR4228, %{{.+}} @@ -594,12 +594,12 @@ void foo(void) { test = (h0 > f2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM: %{{.+}} = fcmp ogt float %[[#LHS]], %{{.+}} @@ -609,12 +609,12 @@ void foo(void) { test = (f0 > h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM: %{{.+}} = fcmp ogt float %{{.+}}, %[[#RHS]] @@ -624,12 +624,12 @@ void foo(void) { test = (i0 > h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM: %{{.+}} = fcmp ogt bfloat %[[#LHS]], %{{.+}} @@ -639,12 +639,12 @@ void foo(void) { test = (h0 > i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp ogt bfloat %{{.+}}, %[[#RHS]] @@ -653,11 +653,11 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.+}} = fcmp ogt bfloat %{{.+}}, %[[#RHS]] test = (h2 <= h0); - // NONATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i - // NATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp ole bfloat %{{.+}}, %{{.+}} @@ -666,13 +666,13 @@ void foo(void) { test = (h2 <= (__bf16)42.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 - // NONATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#C]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 - // NATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#C]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp ole bfloat %{{.+}}, 0xR4228 @@ -680,12 +680,12 @@ void foo(void) { test = (h2 <= f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM: %{{.+}} = fcmp ole float %[[#LHS]], %{{.+}} @@ -695,12 +695,12 @@ void foo(void) { test = (f2 <= h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp ole float %{{.+}}, %[[#RHS]] @@ -710,12 +710,12 @@ void foo(void) { test = (i0 <= h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM: %{{.+}} = fcmp ole bfloat %[[#LHS]], %{{.+}} @@ -725,12 +725,12 @@ void foo(void) { test = (h0 <= i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp ole bfloat %{{.+}}, %[[#RHS]] @@ -739,12 +739,13 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.+}} = fcmp ole bfloat %{{.+}}, %[[#RHS]] test = (h0 >= h2); - // NONATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %[[#B:]] = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i + // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-NEXT: %{{.+}} = cir.get_global @test : !cir.ptr - // NATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp oge bfloat %{{.+}}, %{{.+}} @@ -754,14 +755,14 @@ void foo(void) { // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.bf16 - // NONATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#D]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.bf16 - // NATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i + // NATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#D]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp oge bfloat %{{.+}}, 0xRC000 @@ -769,12 +770,12 @@ void foo(void) { test = (h0 >= f2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM: %{{.+}} = fcmp oge float %[[#LHS]], %{{.+}} @@ -784,12 +785,12 @@ void foo(void) { test = (f0 >= h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM: %{{.+}} = fcmp oge float %{{.+}}, %[[#RHS]] @@ -799,12 +800,12 @@ void foo(void) { test = (i0 >= h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM: %{{.+}} = fcmp oge bfloat %[[#LHS]], %{{.+}} @@ -814,12 +815,12 @@ void foo(void) { test = (h0 >= i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp oge bfloat %{{.+}}, %[[#RHS]] @@ -828,11 +829,11 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.+}} = fcmp oge bfloat %{{.+}}, %[[#RHS]] test = (h1 == h2); - // NONATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i - // NATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp oeq bfloat %{{.+}}, %{{.+}} @@ -841,13 +842,13 @@ void foo(void) { test = (h1 == (__bf16)1.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 - // NONATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#C]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 - // NATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#C]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp oeq bfloat %{{.+}}, 0xR3F80 @@ -855,12 +856,12 @@ void foo(void) { test = (h1 == f1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#A:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM: %{{.+}} = fcmp oeq float %[[#A]], %{{.+}} @@ -870,12 +871,12 @@ void foo(void) { test = (f1 == h1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp oeq float %{{.+}}, %[[#RHS]] @@ -885,12 +886,12 @@ void foo(void) { test = (i0 == h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM: %{{.+}} = fcmp oeq bfloat %[[#LHS]], %{{.+}} @@ -900,12 +901,12 @@ void foo(void) { test = (h0 == i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp oeq bfloat %{{.+}}, %[[#RHS]] @@ -914,11 +915,11 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.+}} = fcmp oeq bfloat %{{.+}}, %[[#RHS]] test = (h1 != h2); - // NONATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NONATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i - // NATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i + // NATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#A]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp une bfloat %{{.+}}, %{{.+}} @@ -926,13 +927,13 @@ void foo(void) { test = (h1 != (__bf16)1.0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.bf16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.bf16 - // NATIVE-NEXT: %[[#C:]] = cir.cmp(ne, %{{.+}}, %[[#B]]) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i + // NATIVE-NEXT: %[[#C:]] = cir.cmp(ne, %{{.+}}, %[[#B]]) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#C]] : !cir.bool), !s32i // NONATIVE-LLVM: %{{.+}} = fcmp une bfloat %{{.+}}, 0xR3F80 @@ -940,12 +941,12 @@ void foo(void) { test = (h1 != f1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#LHS:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM: %{{.+}} = fcmp une float %[[#LHS]], %{{.+}} @@ -955,12 +956,12 @@ void foo(void) { test = (f1 != h1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = fpext bfloat %{{.+}} to float // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp une float %{{.+}}, %[[#RHS]] @@ -970,12 +971,12 @@ void foo(void) { test = (i0 != h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM: %{{.+}} = fcmp une bfloat %[[#LHS]], %{{.+}} @@ -985,12 +986,12 @@ void foo(void) { test = (h0 != i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NONATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.bf16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !s32i - // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.bf16, !cir.bool + // NATIVE-NEXT: %{{.+}} = cir.cast(bool_to_int, %[[#B]] : !cir.bool), !s32i // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to bfloat // NONATIVE-LLVM-NEXT: %{{.+}} = fcmp une bfloat %{{.+}}, %[[#RHS]] diff --git a/clang/test/CIR/CodeGen/float16-ops.c b/clang/test/CIR/CodeGen/float16-ops.c index 5b3b7127476b..43f686a8b360 100644 --- a/clang/test/CIR/CodeGen/float16-ops.c +++ b/clang/test/CIR/CodeGen/float16-ops.c @@ -477,10 +477,10 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.+}} = fsub half %{{.+}}, %[[#A]] test = (h2 < h0); - // NONATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i - // NATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp olt half %{{.+}}, %{{.+}} @@ -490,12 +490,12 @@ void foo(void) { test = (h2 < (_Float16)42.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // NONATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // NATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NATIVE-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp olt half %{{.+}}, 0xH5140 @@ -504,11 +504,11 @@ void foo(void) { test = (h2 < f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float @@ -519,11 +519,11 @@ void foo(void) { test = (f2 < h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.=}} to float @@ -534,11 +534,11 @@ void foo(void) { test = (i0 < h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half @@ -549,11 +549,11 @@ void foo(void) { test = (h0 < i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half @@ -563,10 +563,10 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.+}} = fcmp olt half %{{.+}}, %[[#A]] test = (h0 > h2); - // NONATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i - // NATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp ogt half %{{.+}}, %{{.+}} @@ -576,12 +576,12 @@ void foo(void) { test = ((_Float16)42.0 > h2); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // NONATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // NATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp ogt half 0xH5140, %{{.+}} @@ -590,11 +590,11 @@ void foo(void) { test = (h0 > f2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.=}} to float @@ -605,11 +605,11 @@ void foo(void) { test = (f0 > h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float @@ -620,11 +620,11 @@ void foo(void) { test = (i0 > h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half @@ -635,11 +635,11 @@ void foo(void) { test = (h0 > i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half @@ -649,10 +649,10 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.+}} = fcmp ogt half %{{.+}}, %[[#RHS]] test = (h2 <= h0); - // NONATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i - // NATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp ole half %{{.+}}, %{{.+}} @@ -662,12 +662,12 @@ void foo(void) { test = (h2 <= (_Float16)42.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // NONATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // NATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NATIVE-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp ole half %{{.+}}, 0xH5140 @@ -676,11 +676,11 @@ void foo(void) { test = (h2 <= f0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float @@ -691,11 +691,11 @@ void foo(void) { test = (f2 <= h0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float @@ -706,11 +706,11 @@ void foo(void) { test = (i0 <= h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half @@ -721,11 +721,11 @@ void foo(void) { test = (h0 <= i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half @@ -735,11 +735,11 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.+}} = fcmp ole half %{{.+}}, %[[#RHS]] test = (h0 >= h2); - // NONATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // NONATIVE-NEXT: %{{.+}} = cir.get_global @test : !cir.ptr - // NATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp oge half %{{.+}}, %{{.+}} @@ -750,13 +750,13 @@ void foo(void) { // NONATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double // NONATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 - // NONATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double // NATIVE-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 - // NATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.f16, !s32i + // NATIVE-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp oge half %{{.+}}, 0xHC000 @@ -765,11 +765,11 @@ void foo(void) { test = (h0 >= f2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float @@ -780,11 +780,11 @@ void foo(void) { test = (f0 >= h2); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float @@ -795,11 +795,11 @@ void foo(void) { test = (i0 >= h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half @@ -810,11 +810,11 @@ void foo(void) { test = (h0 >= i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half @@ -824,10 +824,10 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.+}} = fcmp oge half %{{.+}}, %[[#RHS]] test = (h1 == h2); - // NONATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i - // NATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp oeq half %{{.+}}, %{{.+}} @@ -837,12 +837,12 @@ void foo(void) { test = (h1 == (_Float16)1.0); // NONATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // NONATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // NONATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // NATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NATIVE-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp oeq half %{{.+}}, 0xH3C00 @@ -851,11 +851,11 @@ void foo(void) { test = (h1 == f1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float @@ -866,11 +866,11 @@ void foo(void) { test = (f1 == h1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float @@ -881,11 +881,11 @@ void foo(void) { test = (i0 == h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half @@ -896,11 +896,11 @@ void foo(void) { test = (h0 == i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half @@ -910,10 +910,10 @@ void foo(void) { // NATIVE-LLVM-NEXT: %{{.=}} = fcmp oeq half %{{.+}}, %[[#RHS]] test = (h1 != h2); - // NONATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i - // NATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp une half %{{.+}}, %{{.+}} @@ -922,12 +922,12 @@ void foo(void) { test = (h1 != (_Float16)1.0); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.double), !cir.f16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // NATIVE-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // NATIVE-NEXT: %[[#C:]] = cir.cmp(ne, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // NATIVE-NEXT: %[[#C:]] = cir.cmp(ne, %{{.+}}, %[[#B]]) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // NONATIVE-LLVM: %{{.+}} = fcmp une half %{{.+}}, 0xH3C00 @@ -936,11 +936,11 @@ void foo(void) { test = (h1 != f1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i + // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float @@ -951,11 +951,11 @@ void foo(void) { test = (f1 != h1); // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#A:]] = fpext half %{{.+}} to float @@ -966,11 +966,11 @@ void foo(void) { test = (i0 != h0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NONATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // NATIVE: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half @@ -981,11 +981,11 @@ void foo(void) { test = (h0 != i0); // NONATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NONATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NONATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NATIVE: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // NATIVE-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // NONATIVE-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half diff --git a/clang/test/CIR/CodeGen/fp16-ops.c b/clang/test/CIR/CodeGen/fp16-ops.c index 04cf64700d74..708d5db1dde0 100644 --- a/clang/test/CIR/CodeGen/fp16-ops.c +++ b/clang/test/CIR/CodeGen/fp16-ops.c @@ -228,7 +228,7 @@ void foo(void) { // CHECK-LLVM-NEXT: %{{.+}} = fsub half %{{.+}}, %[[#A]] test = (h2 < h0); - // CHECK: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#A:]] = cir.cmp(lt, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp olt half %{{.+}}, %{{.+}} @@ -236,14 +236,14 @@ void foo(void) { test = (h2 < (__fp16)42.0); // CHECK: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // CHECK-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // CHECK-NEXT: %[[#C:]] = cir.cmp(lt, %{{.+}}, %[[#B]]) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp olt half %{{.+}}, 0xH5140 test = (h2 < f0); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#A:]] = fpext half %{{.+}} to float @@ -251,7 +251,7 @@ void foo(void) { test = (f2 < h0); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#A:]] = fpext half %{{.=}} to float @@ -259,7 +259,7 @@ void foo(void) { test = (i0 < h0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#B:]] = cir.cmp(lt, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half @@ -267,14 +267,14 @@ void foo(void) { test = (h0 < i0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(lt, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#A:]] = sitofp i32 %{{.+}} to half // CHECK-LLVM-NEXT: %{{.+}} = fcmp olt half %{{.+}}, %[[#A]] test = (h0 > h2); - // CHECK: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#A:]] = cir.cmp(gt, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp ogt half %{{.+}}, %{{.+}} @@ -282,14 +282,14 @@ void foo(void) { test = ((__fp16)42.0 > h2); // CHECK: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // CHECK: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#C:]] = cir.cmp(gt, %[[#B]], %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp ogt half 0xH5140, %{{.+}} test = (h0 > f2); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.=}} to float @@ -297,7 +297,7 @@ void foo(void) { test = (f0 > h2); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float @@ -305,7 +305,7 @@ void foo(void) { test = (i0 > h0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#B:]] = cir.cmp(gt, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half @@ -313,14 +313,14 @@ void foo(void) { test = (h0 > i0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(gt, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half // CHECK-LLVM-NEXT: %{{.+}} = fcmp ogt half %{{.+}}, %[[#RHS]] test = (h2 <= h0); - // CHECK: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#A:]] = cir.cmp(le, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp ole half %{{.+}}, %{{.+}} @@ -328,14 +328,14 @@ void foo(void) { test = (h2 <= (__fp16)42.0); // CHECK: %[[#A:]] = cir.const #cir.fp<4.200000e+01> : !cir.double // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // CHECK-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // CHECK-NEXT: %[[#C:]] = cir.cmp(le, %{{.+}}, %[[#B]]) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp ole half %{{.+}}, 0xH5140 test = (h2 <= f0); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float @@ -343,7 +343,7 @@ void foo(void) { test = (f2 <= h0); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float @@ -351,7 +351,7 @@ void foo(void) { test = (i0 <= h0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#B:]] = cir.cmp(le, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half @@ -359,14 +359,14 @@ void foo(void) { test = (h0 <= i0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(le, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half // CHECK-LLVM-NEXT: %{{.+}} = fcmp ole half %{{.+}}, %[[#RHS]] test = (h0 >= h2); - // CHECK: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#A:]] = cir.cmp(ge, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp oge half %{{.+}}, %{{.+}} @@ -375,14 +375,14 @@ void foo(void) { // CHECK: %[[#A:]] = cir.const #cir.fp<2.000000e+00> : !cir.double // CHECK-NEXT: %[[#B:]] = cir.unary(minus, %[[#A]]) : !cir.double, !cir.double // CHECK-NEXT: %[[#C:]] = cir.cast(floating, %[[#B]] : !cir.double), !cir.f16 - // CHECK-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.f16, !s32i + // CHECK-NEXT: %[[#D:]] = cir.cmp(ge, %{{.+}}, %[[#C]]) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#D]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp oge half %{{.+}}, 0xHC000 test = (h0 >= f2); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float @@ -390,7 +390,7 @@ void foo(void) { test = (f0 >= h2); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float @@ -398,7 +398,7 @@ void foo(void) { test = (i0 >= h0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#B:]] = cir.cmp(ge, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half @@ -406,14 +406,14 @@ void foo(void) { test = (h0 >= i0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(ge, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half // CHECK-LLVM-NEXT: %{{.+}} = fcmp oge half %{{.+}}, %[[#RHS]] test = (h1 == h2); - // CHECK: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#A:]] = cir.cmp(eq, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp oeq half %{{.+}}, %{{.+}} @@ -421,14 +421,14 @@ void foo(void) { test = (h1 == (__fp16)1.0); // CHECK: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // CHECK-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // CHECK-NEXT: %[[#C:]] = cir.cmp(eq, %{{.+}}, %[[#B]]) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp oeq half %{{.+}}, 0xH3C00 test = (h1 == f1); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.+}} to float @@ -436,7 +436,7 @@ void foo(void) { test = (f1 == h1); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#RHS:]] = fpext half %{{.+}} to float @@ -444,7 +444,7 @@ void foo(void) { test = (i0 == h0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#B:]] = cir.cmp(eq, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half @@ -452,14 +452,14 @@ void foo(void) { test = (h0 == i0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(eq, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half // CHECK-LLVM-NEXT: %{{.=}} = fcmp oeq half %{{.+}}, %[[#RHS]] test = (h1 != h2); - // CHECK: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#A:]] = cir.cmp(ne, %{{.+}}, %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#A]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp une half %{{.+}}, %{{.+}} @@ -467,14 +467,14 @@ void foo(void) { test = (h1 != (__fp16)1.0); // CHECK: %[[#A:]] = cir.const #cir.fp<1.000000e+00> : !cir.double // CHECK-NEXT: %[[#B:]] = cir.cast(floating, %[[#A]] : !cir.double), !cir.f16 - // CHECK-NEXT: %[[#C:]] = cir.cmp(ne, %{{.+}}, %[[#B]]) : !cir.f16, !s32i + // CHECK-NEXT: %[[#C:]] = cir.cmp(ne, %{{.+}}, %[[#B]]) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // CHECK-LLVM: %{{.+}} = fcmp une half %{{.+}}, 0xH3C00 test = (h1 != f1); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !s32i + // CHECK: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#LHS:]] = fpext half %{{.=}} to float @@ -482,7 +482,7 @@ void foo(void) { test = (f1 != h1); // CHECK: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.f16), !cir.float - // CHECK-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.float, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#A:]] = fpext half %{{.+}} to float @@ -490,7 +490,7 @@ void foo(void) { test = (i0 != h0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.f16, !s32i + // CHECK: %[[#B:]] = cir.cmp(ne, %[[#A]], %{{.+}}) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#LHS:]] = sitofp i32 %{{.+}} to half @@ -498,7 +498,7 @@ void foo(void) { test = (h0 != i0); // CHECK: %[[#A:]] = cir.cast(int_to_float, %{{.+}} : !s32i), !cir.f16 - // CHECK-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !s32i + // CHECK-NEXT: %[[#B:]] = cir.cmp(ne, %{{.+}}, %[[#A]]) : !cir.f16, !cir.bool // CHECK-NEXT: %{{.+}} = cir.cast(integral, %[[#B]] : !s32i), !u32i // CHECK-LLVM: %[[#RHS:]] = sitofp i32 %{{.+}} to half diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 6acb9592246a..af516b2aaed6 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1437,3 +1437,13 @@ cir.global external @f = #cir.fp<0x7FC00000 : !cir.float> : !cir.float // expected-error @below {{unexpected decimal integer literal for a floating point value}} // expected-note @below {{add a trailing dot to make the literal a float}} cir.global external @f = #cir.fp<42> : !cir.float + +// ----- + +// Verify +!s32i = !cir.int +cir.func @cast0(%arg0: !s32i, %arg1: !s32i) { + // expected-error @below {{custom op 'cir.cmp' invalid kind of Type specified}} + %1 = cir.cmp(eq, %arg0, %arg1): !s32i, !s32i + cir.return +} diff --git a/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c b/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c index b6069e8a787e..8cc32dc96c94 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c +++ b/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c @@ -43,10 +43,7 @@ void nestedDoWhile() { // CHECK: %[[VAR4:.+]] = memref.load %[[ALLOC1]][] : memref // CHECK: %[[C10_I32:.+]] = arith.constant 10 : i32 // CHECK: %[[CMP:.+]] = arith.cmpi sle, %[[VAR4]], %[[C10_I32]] : i32 -// CHECK: %[[EXT:.+]] = arith.extui %[[CMP]] : i1 to i32 -// CHECK: %[[C0_I32_3:.+]] = arith.constant 0 : i32 -// CHECK: %[[NE:.+]] = arith.cmpi ne, %[[EXT]], %[[C0_I32_3]] : i32 -// CHECK: %[[EXT1:.+]] = arith.extui %[[NE]] : i1 to i8 +// CHECK: %[[EXT1:.+]] = arith.extui %[[CMP]] : i1 to i8 // CHECK: %[[TRUNC:.+]] = arith.trunci %[[EXT1]] : i8 to i1 // CHECK: scf.condition(%[[TRUNC]]) // CHECK: } do { @@ -59,49 +56,43 @@ void nestedDoWhile() { // CHECK: return %[[RET]] : i32 // CHECK: func.func @nestedDoWhile() { -// CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref -// CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 -// CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref +// CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref +// CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +// CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref // CHECK: memref.alloca_scope { -// CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref +// CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref // CHECK: scf.while : () -> () { -// CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref -// CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 -// CHECK: %[[ONE:.+]] = arith.addi %[[ZERO]], %[[C1_I32]] : i32 -// CHECK: memref.store %[[ONE]], %[[alloca]][] : memref -// CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 -// CHECK: memref.store %[[C0_I32_1]], %[[alloca_0]][] : memref +// CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +// CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +// CHECK: %[[ONE:.+]] = arith.addi %[[ZERO]], %[[C1_I32]] : i32 +// CHECK: memref.store %[[ONE]], %[[alloca]][] : memref +// CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 +// CHECK: memref.store %[[C0_I32_1]], %[[alloca_0]][] : memref // CHECK: memref.alloca_scope { // CHECK: scf.while : () -> () { -// CHECK: %[[EIGHT:.+]] = memref.load %[[alloca_0]][] : memref -// CHECK: %[[C2_I32_3:.+]] = arith.constant 2 : i32 -// CHECK: %[[NINE:.+]] = arith.cmpi slt, %[[EIGHT]], %[[C2_I32_3]] : i32 -// CHECK: %[[TEN:.+]] = arith.extui %9 : i1 to i32 -// CHECK: %[[C0_I32_4:.+]] = arith.constant 0 : i32 -// CHECK: %[[ELEVEN:.+]] = arith.cmpi ne, %[[TEN]], %[[C0_I32_4]] : i32 -// CHECK: %[[TWELVE:.+]] = arith.extui %[[ELEVEN]] : i1 to i8 -// CHECK: %[[THIRTEEN:.+]] = arith.trunci %[[TWELVE]] : i8 to i1 -// CHECK: scf.condition(%[[THIRTEEN]]) +// CHECK: %[[EIGHT:.+]] = memref.load %[[alloca_0]][] : memref +// CHECK: %[[C2_I32_3:.+]] = arith.constant 2 : i32 +// CHECK: %[[NINE:.+]] = arith.cmpi slt, %[[EIGHT]], %[[C2_I32_3]] : i32 +// CHECK: %[[TWELVE:.+]] = arith.extui %[[NINE]] : i1 to i8 +// CHECK: %[[THIRTEEN:.+]] = arith.trunci %[[TWELVE]] : i8 to i1 +// CHECK: scf.condition(%[[THIRTEEN]]) // CHECK: } do { -// CHECK: %[[EIGHT]] = memref.load %[[alloca_0]][] : memref -// CHECK: %[[C1_I32_3:.+]] = arith.constant 1 : i32 -// CHECK: %[[NINE]] = arith.addi %[[EIGHT]], %[[C1_I32_3]] : i32 -// CHECK: memref.store %[[NINE]], %[[alloca_0]][] : memref -// CHECK: scf.yield -// CHECK: } -// CHECK: } -// CHECK: %[[TWO:.+]] = memref.load %[[alloca]][] : memref -// CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 -// CHECK: %[[THREE:.+]] = arith.cmpi slt, %[[TWO]], %[[C2_I32]] : i32 -// CHECK: %[[FOUR:.+]] = arith.extui %[[THREE]] : i1 to i32 -// CHECK: %[[C0_I32_2:.+]] = arith.constant 0 : i32 -// CHECK: %[[FIVE:.+]] = arith.cmpi ne, %[[FOUR]], %[[C0_I32_2]] : i32 -// CHECK: %[[SIX:.+]] = arith.extui %[[FIVE]] : i1 to i8 -// CHECK: %[[SEVEN:.+]] = arith.trunci %[[SIX]] : i8 to i1 -// CHECK: scf.condition(%[[SEVEN]]) +// CHECK: %[[EIGHT]] = memref.load %[[alloca_0]][] : memref +// CHECK: %[[C1_I32_3:.+]] = arith.constant 1 : i32 +// CHECK: %[[NINE]] = arith.addi %[[EIGHT]], %[[C1_I32_3]] : i32 +// CHECK: memref.store %[[NINE]], %[[alloca_0]][] : memref +// CHECK: scf.yield +// CHECK: } +// CHECK: } +// CHECK: %[[TWO:.+]] = memref.load %[[alloca]][] : memref +// CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +// CHECK: %[[THREE:.+]] = arith.cmpi slt, %[[TWO]], %[[C2_I32]] : i32 +// CHECK: %[[SIX:.+]] = arith.extui %[[THREE]] : i1 to i8 +// CHECK: %[[SEVEN:.+]] = arith.trunci %[[SIX]] : i8 to i1 +// CHECK: scf.condition(%[[SEVEN]]) // CHECK: } do { -// CHECK: scf.yield -// CHECK: } -// CHECK: } -// CHECK: return -// CHECK: } \ No newline at end of file +// CHECK: scf.yield +// CHECK: } +// CHECK: } +// CHECK: return +// CHECK: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/if.c b/clang/test/CIR/Lowering/ThroughMLIR/if.c index 4ff228514cd6..8e88346c727f 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/if.c +++ b/clang/test/CIR/Lowering/ThroughMLIR/if.c @@ -13,34 +13,31 @@ void foo() { //CHECK: func.func @foo() { //CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref -//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref -//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 -//CHECK: memref.store %[[C2_I32]], %[[alloca]][] : memref -//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 -//CHECK: memref.store %[[C0_I32]], %[[alloca_0]][] : memref +//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +//CHECK: memref.store %[[C2_I32]], %[[alloca]][] : memref +//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +//CHECK: memref.store %[[C0_I32]], %[[alloca_0]][] : memref //CHECK: memref.alloca_scope { -//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref -//CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 -//CHECK: %[[ONE:.+]] = arith.cmpi sgt, %[[ZERO]], %[[C0_I32_1]] : i32 -//CHECK: %[[TWO:.+]] = arith.extui %[[ONE]] : i1 to i32 -//CHECK: %[[C0_I32_2:.+]] = arith.constant 0 : i32 -//CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO]], %[[C0_I32_2]] : i32 -//CHECK: %[[FOUR:.+]] = arith.extui %[[THREE]] : i1 to i8 -//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR]] : i8 to i1 +//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi sgt, %[[ZERO]], %[[C0_I32_1]] : i32 +//CHECK: %[[FOUR:.+]] = arith.extui %[[ONE]] : i1 to i8 +//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR]] : i8 to i1 //CHECK: scf.if %[[FIVE]] { -//CHECK: %[[SIX:.+]] = memref.load %[[alloca_0]][] : memref -//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 -//CHECK: %[[SEVEN:.+]] = arith.addi %[[SIX]], %[[C1_I32]] : i32 -//CHECK: memref.store %[[SEVEN]], %[[alloca_0]][] : memref +//CHECK: %[[SIX:.+]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: %[[SEVEN:.+]] = arith.addi %[[SIX]], %[[C1_I32]] : i32 +//CHECK: memref.store %[[SEVEN]], %[[alloca_0]][] : memref //CHECK: } else { -//CHECK: %[[SIX:.+]] = memref.load %[[alloca_0]][] : memref -//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 -//CHECK: %[[SEVEN:.+]] = arith.subi %[[SIX]], %[[C1_I32]] : i32 -//CHECK: memref.store %[[SEVEN]], %[[alloca_0]][] : memref -//CHECK: } -//CHECK: } -//CHECK: return -//CHECK: } +//CHECK: %[[SIX:.+]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: %[[SEVEN:.+]] = arith.subi %[[SIX]], %[[C1_I32]] : i32 +//CHECK: memref.store %[[SEVEN]], %[[alloca_0]][] : memref +//CHECK: } +//CHECK: } +//CHECK: return +//CHECK: } void foo2() { int a = 2; @@ -51,30 +48,27 @@ void foo2() { } //CHECK: func.func @foo2() { -//CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref -//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref -//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 -//CHECK: memref.store %[[C2_I32]], %[[alloca]][] : memref -//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 -//CHECK: memref.store %[[C0_I32]], %[[alloca_0]][] : memref +//CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +//CHECK: memref.store %[[C2_I32]], %[[alloca]][] : memref +//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +//CHECK: memref.store %[[C0_I32]], %[[alloca_0]][] : memref //CHECK: memref.alloca_scope { -//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref -//CHECK: %[[C3_I32:.+]] = arith.constant 3 : i32 -//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C3_I32]] : i32 -//CHECK: %[[TWO:.+]] = arith.extui %[[ONE]] : i1 to i32 -//CHECK: %[[C0_I32_1]] = arith.constant 0 : i32 -//CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO]], %[[C0_I32_1]] : i32 -//CHECK: %[[FOUR:.+]] = arith.extui %[[THREE]] : i1 to i8 -//CHECK: %[[FIVE]] = arith.trunci %[[FOUR]] : i8 to i1 +//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C3_I32:.+]] = arith.constant 3 : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C3_I32]] : i32 +//CHECK: %[[FOUR:.+]] = arith.extui %[[ONE]] : i1 to i8 +//CHECK: %[[FIVE]] = arith.trunci %[[FOUR]] : i8 to i1 //CHECK: scf.if %[[FIVE]] { -//CHECK: %[[SIX:.+]] = memref.load %[[alloca_0]][] : memref -//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 -//CHECK: %[[SEVEN:.+]] = arith.addi %[[SIX]], %[[C1_I32]] : i32 -//CHECK: memref.store %[[SEVEN]], %[[alloca_0]][] : memref -//CHECK: } -//CHECK: } -//CHECK: return -//CHECK: } +//CHECK: %[[SIX:.+]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: %[[SEVEN:.+]] = arith.addi %[[SIX]], %[[C1_I32]] : i32 +//CHECK: memref.store %[[SEVEN]], %[[alloca_0]][] : memref +//CHECK: } +//CHECK: } +//CHECK: return +//CHECK: } void foo3() { int a = 2; @@ -93,45 +87,39 @@ void foo3() { //CHECK: func.func @foo3() { //CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref //CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref -//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 -//CHECK: memref.store %[[C2_I32]], %[[alloca]][] : memref -//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 -//CHECK: memref.store %[[C0_I32]], %[[alloca_0]][] : memref +//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +//CHECK: memref.store %[[C2_I32]], %[[alloca]][] : memref +//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +//CHECK: memref.store %[[C0_I32]], %[[alloca_0]][] : memref //CHECK: memref.alloca_scope { -//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref -//CHECK: %[[C3_I32:.+]] = arith.constant 3 : i32 -//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C3_I32]] : i32 -//CHECK: %[[TWO:.+]] = arith.extui %[[ONE]] : i1 to i32 -//CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 -//CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO:.+]], %[[C0_I32_1]] : i32 -//CHECK: %[[FOUR:.+]] = arith.extui %[[THREE]] : i1 to i8 -//CHECK: %[[FIVE]] = arith.trunci %[[FOUR]] : i8 to i1 +//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C3_I32:.+]] = arith.constant 3 : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C3_I32]] : i32 +//CHECK: %[[FOUR:.+]] = arith.extui %[[ONE]] : i1 to i8 +//CHECK: %[[FIVE]] = arith.trunci %[[FOUR]] : i8 to i1 //CHECK: scf.if %[[FIVE]] { -//CHECK: %[[alloca_2:.+]] = memref.alloca() {alignment = 4 : i64} : memref -//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 -//CHECK: memref.store %[[C1_I32]], %[[alloca_2]][] : memref +//CHECK: %[[alloca_2:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: memref.store %[[C1_I32]], %[[alloca_2]][] : memref //CHECK: memref.alloca_scope { -//CHECK: %[[SIX:.+]] = memref.load %[[alloca_2]][] : memref -//CHECK: %[[C2_I32_3:.+]] = arith.constant 2 : i32 -//CHECK: %[[SEVEN:.+]] = arith.cmpi sgt, %[[SIX]], %[[C2_I32_3]] : i32 -//CHECK: %[[EIGHT:.+]] = arith.extui %[[SEVEN]] : i1 to i32 -//CHECK: %[[C0_I32_4:.+]] = arith.constant 0 : i32 -//CHECK: %[[NINE:.+]] = arith.cmpi ne, %[[EIGHT]], %[[C0_I32_4]] : i32 -//CHECK: %[[TEN:.+]] = arith.extui %[[NINE]] : i1 to i8 -//CHECK: %[[ELEVEN:.+]] = arith.trunci %[[TEN]] : i8 to i1 +//CHECK: %[[SIX:.+]] = memref.load %[[alloca_2]][] : memref +//CHECK: %[[C2_I32_3:.+]] = arith.constant 2 : i32 +//CHECK: %[[SEVEN:.+]] = arith.cmpi sgt, %[[SIX]], %[[C2_I32_3]] : i32 +//CHECK: %[[TEN:.+]] = arith.extui %[[SEVEN]] : i1 to i8 +//CHECK: %[[ELEVEN:.+]] = arith.trunci %[[TEN]] : i8 to i1 //CHECK: scf.if %[[ELEVEN]] { -//CHECK: %[[TWELVE:.+]] = memref.load %[[alloca_0]][] : memref -//CHECK: %[[C1_I32_5:.+]] = arith.constant 1 : i32 -//CHECK: %[[THIRTEEN:.+]] = arith.addi %[[TWELVE]], %[[C1_I32_5]] : i32 -//CHECK: memref.store %[[THIRTEEN]], %[[alloca_0]][] : memref +//CHECK: %[[TWELVE:.+]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32_5:.+]] = arith.constant 1 : i32 +//CHECK: %[[THIRTEEN:.+]] = arith.addi %[[TWELVE]], %[[C1_I32_5]] : i32 +//CHECK: memref.store %[[THIRTEEN]], %[[alloca_0]][] : memref //CHECK: } else { -//CHECK: %[[TWELVE:.+]] = memref.load %[[alloca_0]][] : memref -//CHECK: %[[C1_I32_5:.+]] = arith.constant 1 : i32 -//CHECK: %[[THIRTEEN:.+]] = arith.subi %[[TWELVE]], %[[C1_I32_5]] : i32 -//CHECK: memref.store %[[THIRTEEN]], %[[alloca_0]][] : memref -//CHECK: } -//CHECK: } -//CHECK: } -//CHECK: } -//CHECK: return -//CHECK: } +//CHECK: %[[TWELVE:.+]] = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32_5:.+]] = arith.constant 1 : i32 +//CHECK: %[[THIRTEEN:.+]] = arith.subi %[[TWELVE]], %[[C1_I32_5]] : i32 +//CHECK: memref.store %[[THIRTEEN]], %[[alloca_0]][] : memref +//CHECK: } +//CHECK: } +//CHECK: } +//CHECK: } +//CHECK: return +//CHECK: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/while.c b/clang/test/CIR/Lowering/ThroughMLIR/while.c index 40ad92de95e4..8cc1f7bca30d 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/while.c +++ b/clang/test/CIR/Lowering/ThroughMLIR/while.c @@ -21,75 +21,66 @@ void nestedWhile() { //CHECK: func.func @singleWhile() { //CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref -//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 -//CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref +//CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 +//CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref //CHECK: memref.alloca_scope { //CHECK: scf.while : () -> () { -//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref -//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 -//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO:.+]], %[[C2_I32]] : i32 -//CHECK: %[[TWO:.+]] = arith.extui %[[ONE:.+]] : i1 to i32 -//CHECK: %[[C0_I32_0:.+]] = arith.constant 0 : i32 -//CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO:.+]], %[[C0_I32_0]] : i32 -//CHECK: %[[FOUR:.+]] = arith.extui %[[THREE:.+]] : i1 to i8 -//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR:.+]] : i8 to i1 -//CHECK: scf.condition(%[[FIVE]]) +//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO:.+]], %[[C2_I32]] : i32 +//CHECK: %[[FOUR:.+]] = arith.extui %[[ONE:.+]] : i1 to i8 +//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR:.+]] : i8 to i1 +//CHECK: scf.condition(%[[FIVE]]) //CHECK: } do { -//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref -//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 -//CHECK: %[[ONE:.+]] = arith.addi %0, %[[C1_I32:.+]] : i32 -//CHECK: memref.store %[[ONE:.+]], %[[alloca]][] : memref -//CHECK: scf.yield -//CHECK: } -//CHECK: } -//CHECK: return -//CHECK: } +//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: %[[ONE:.+]] = arith.addi %0, %[[C1_I32:.+]] : i32 +//CHECK: memref.store %[[ONE:.+]], %[[alloca]][] : memref +//CHECK: scf.yield +//CHECK: } +//CHECK: } +//CHECK: return +//CHECK: } //CHECK: func.func @nestedWhile() { //CHECK: %[[alloca:.+]] = memref.alloca() {alignment = 4 : i64} : memref //CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 -//CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref +//CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref //CHECK: memref.alloca_scope { -//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref +//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref //CHECK: scf.while : () -> () { -//CHECK: %[[ZERO:.+]] = memref.load %alloca[] : memref -//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 -//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C2_I32]] : i32 -//CHECK: %[[TWO:.+]] = arith.extui %[[ONE]] : i1 to i32 -//CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 -//CHECK: %[[THREE:.+]] = arith.cmpi ne, %[[TWO]], %[[C0_I32_1]] : i32 -//CHECK: %[[FOUR:.+]] = arith.extui %[[THREE]] : i1 to i8 -//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR]] : i8 to i1 -//CHECK: scf.condition(%[[FIVE]]) +//CHECK: %[[ZERO:.+]] = memref.load %alloca[] : memref +//CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 +//CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C2_I32]] : i32 +//CHECK: %[[FOUR:.+]] = arith.extui %[[ONE]] : i1 to i8 +//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR]] : i8 to i1 +//CHECK: scf.condition(%[[FIVE]]) //CHECK: } do { -//CHECK: %[[C0_I32_1]] = arith.constant 0 : i32 -//CHECK: memref.store %[[C0_I32_1]], %[[alloca_0]][] : memref +//CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 +//CHECK: memref.store %[[C0_I32_1]], %[[alloca_0]][] : memref //CHECK: memref.alloca_scope { //CHECK: scf.while : () -> () { -//CHECK: %[[TWO]] = memref.load %[[alloca_0]][] : memref -//CHECK: %[[C2_I32]] = arith.constant 2 : i32 -//CHECK: %[[THREE]] = arith.cmpi slt, %[[TWO]], %[[C2_I32]] : i32 -//CHECK: %[[FOUR]] = arith.extui %[[THREE]] : i1 to i32 -//CHECK: %[[C0_I32_2:.+]] = arith.constant 0 : i32 -//CHECK: %[[FIVE]] = arith.cmpi ne, %[[FOUR]], %[[C0_I32_2]] : i32 -//CHECK: %[[SIX:.+]] = arith.extui %[[FIVE]] : i1 to i8 -//CHECK: %[[SEVEN:.+]] = arith.trunci %[[SIX]] : i8 to i1 -//CHECK: scf.condition(%[[SEVEN]]) +//CHECK: %{{.*}} = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C2_I32]] = arith.constant 2 : i32 +//CHECK: %{{.*}} = arith.cmpi slt, %{{.*}}, %[[C2_I32]] : i32 +//CHECK: %[[SIX:.+]] = arith.extui %{{.*}} : i1 to i8 +//CHECK: %[[SEVEN:.+]] = arith.trunci %[[SIX]] : i8 to i1 +//CHECK: scf.condition(%[[SEVEN]]) //CHECK: } do { -//CHECK: %[[TWO]] = memref.load %[[alloca_0]][] : memref -//CHECK: %[[C1_I32_2:.+]] = arith.constant 1 : i32 -//CHECK: %[[THREE]] = arith.addi %[[TWO]], %[[C1_I32_2]] : i32 -//CHECK: memref.store %[[THREE]], %[[alloca_0]][] : memref -//CHECK: scf.yield -//CHECK: } -//CHECK: } -//CHECK: %[[ZERO]] = memref.load %[[alloca]][] : memref -//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 -//CHECK: %[[ONE]] = arith.addi %[[ZERO]], %[[C1_I32]] : i32 -//CHECK: memref.store %[[ONE]], %[[alloca]][] : memref -//CHECK: scf.yield -//CHECK: } -//CHECK: } -//CHECK: return -//CHECK: } -//CHECK: } \ No newline at end of file +//CHECK: %{{.*}} = memref.load %[[alloca_0]][] : memref +//CHECK: %[[C1_I32_2:.+]] = arith.constant 1 : i32 +//CHECK: %{{.*}} = arith.addi %{{.*}}, %[[C1_I32_2]] : i32 +//CHECK: memref.store %{{.*}}, %[[alloca_0]][] : memref +//CHECK: scf.yield +//CHECK: } +//CHECK: } +//CHECK: %[[ZERO]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: %[[ONE]] = arith.addi %[[ZERO]], %[[C1_I32]] : i32 +//CHECK: memref.store %[[ONE]], %[[alloca]][] : memref +//CHECK: scf.yield +//CHECK: } +//CHECK: } +//CHECK: return +//CHECK: } +//CHECK: } diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index ad1241e1cad3..4c1586d8eaa5 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -21,9 +21,8 @@ module { cir.for : cond { %10 = cir.load %8 : !cir.ptr, !s32i %11 = cir.load %2 : !cir.ptr, !s32i - %12 = cir.cmp(lt, %10, %11) : !s32i, !s32i - %13 = cir.cast(int_to_bool, %12 : !s32i), !cir.bool - cir.condition(%13) + %12 = cir.cmp(lt, %10, %11) : !s32i, !cir.bool + cir.condition(%12) } body { %10 = cir.load %0 : !cir.ptr>, !cir.ptr %11 = cir.load %8 : !cir.ptr, !s32i @@ -79,10 +78,7 @@ module { // MLIR: %[[VAL_17:.*]] = llvm.load %[[VAL_2]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_18:.*]] = llvm.load %[[VAL_8]] {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR: %[[VAL_19:.*]] = llvm.icmp "slt" %[[VAL_17]], %[[VAL_18]] : i32 -// MLIR: %[[VAL_20:.*]] = llvm.zext %[[VAL_19]] : i1 to i32 -// MLIR: %[[VAL_21:.*]] = llvm.mlir.constant(0 : i32) : i32 -// MLIR: %[[VAL_22:.*]] = llvm.icmp "ne" %[[VAL_20]], %[[VAL_21]] : i32 -// MLIR: llvm.cond_br %[[VAL_22]], ^bb3, ^bb5 +// MLIR: llvm.cond_br %[[VAL_19]], ^bb3, ^bb5 // MLIR: ^bb3: // MLIR: %[[VAL_23:.*]] = llvm.load %[[VAL_4]] {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr // MLIR: %[[VAL_24:.*]] = llvm.load %[[VAL_2]] {alignment = 4 : i64} : !llvm.ptr -> i32 @@ -112,4 +108,4 @@ module { // MLIR: llvm.store %[[VAL_39]], %[[VAL_10]] {{.*}}: f64, !llvm.ptr // MLIR: %[[VAL_40:.*]] = llvm.load %[[VAL_10]] {alignment = 8 : i64} : !llvm.ptr -> f64 // MLIR: llvm.return %[[VAL_40]] : f64 -// MLIR: } \ No newline at end of file +// MLIR: } diff --git a/clang/test/CIR/Lowering/goto.cir b/clang/test/CIR/Lowering/goto.cir index f09626ec122f..cd3a57d2e713 100644 --- a/clang/test/CIR/Lowering/goto.cir +++ b/clang/test/CIR/Lowering/goto.cir @@ -3,7 +3,7 @@ !s32i = !cir.int module { - + cir.func @gotoFromIf(%arg0: !s32i) -> !s32i { %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} @@ -11,9 +11,8 @@ module { cir.scope { %6 = cir.load %0 : !cir.ptr, !s32i %7 = cir.const #cir.int<5> : !s32i - %8 = cir.cmp(gt, %6, %7) : !s32i, !s32i - %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool - cir.if %9 { + %8 = cir.cmp(gt, %6, %7) : !s32i, !cir.bool + cir.if %8 { cir.goto "err" } } diff --git a/clang/test/CIR/Lowering/loops-with-break.cir b/clang/test/CIR/Lowering/loops-with-break.cir index 6a7ef3e8c023..813d9aed05d5 100644 --- a/clang/test/CIR/Lowering/loops-with-break.cir +++ b/clang/test/CIR/Lowering/loops-with-break.cir @@ -11,17 +11,15 @@ module { cir.for : cond { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const #cir.int<10> : !s32i - %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) + %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool + cir.condition(%4) } body { cir.scope { cir.scope { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const #cir.int<5> : !s32i - %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.if %5 { + %4 = cir.cmp(eq, %2, %3) : !s32i, !cir.bool + cir.if %4 { cir.break } } @@ -73,9 +71,8 @@ module { cir.for : cond { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const #cir.int<10> : !s32i - %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) + %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool + cir.condition(%4) } body { cir.scope { cir.scope { @@ -85,17 +82,15 @@ module { cir.for : cond { %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.const #cir.int<10> : !s32i - %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.condition(%7) + %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool + cir.condition(%6) } body { cir.scope { cir.scope { %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.const #cir.int<5> : !s32i - %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.if %7 { + %6 = cir.cmp(eq, %4, %5) : !s32i, !cir.bool + cir.if %6 { cir.break } } @@ -174,9 +169,8 @@ module { cir.while { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const #cir.int<10> : !s32i - %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) + %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool + cir.condition(%4) } do { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i @@ -184,9 +178,8 @@ module { cir.scope { %4 = cir.load %0 : !cir.ptr, !s32i %5 = cir.const #cir.int<5> : !s32i - %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.if %7 { + %6 = cir.cmp(eq, %4, %5) : !s32i, !cir.bool + cir.if %6 { cir.break } } @@ -233,9 +226,8 @@ cir.func @testDoWhile() { cir.scope { %4 = cir.load %0 : !cir.ptr, !s32i %5 = cir.const #cir.int<5> : !s32i - %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.if %7 { + %6 = cir.cmp(eq, %4, %5) : !s32i, !cir.bool + cir.if %6 { cir.break } } @@ -243,9 +235,8 @@ cir.func @testDoWhile() { } while { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const #cir.int<10> : !s32i - %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) + %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool + cir.condition(%4) } } cir.return @@ -275,4 +266,4 @@ cir.func @testDoWhile() { // [...] // CHECK: } -} \ No newline at end of file +} diff --git a/clang/test/CIR/Lowering/loops-with-continue.cir b/clang/test/CIR/Lowering/loops-with-continue.cir index 0371d416b61d..f6a91dcab560 100644 --- a/clang/test/CIR/Lowering/loops-with-continue.cir +++ b/clang/test/CIR/Lowering/loops-with-continue.cir @@ -11,17 +11,15 @@ module { cir.for : cond { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const #cir.int<10> : !s32i - %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) + %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool + cir.condition(%4) } body { cir.scope { cir.scope { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const #cir.int<5> : !s32i - %4 = cir.cmp(eq, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.if %5 { + %4 = cir.cmp(eq, %2, %3) : !s32i, !cir.bool + cir.if %4 { cir.continue } } @@ -74,9 +72,8 @@ module { cir.for : cond { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const #cir.int<10> : !s32i - %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) + %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool + cir.condition(%4) } body { cir.scope { cir.scope { @@ -86,17 +83,15 @@ module { cir.for : cond { %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.const #cir.int<10> : !s32i - %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.condition(%7) + %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool + cir.condition(%6) } body { cir.scope { cir.scope { %4 = cir.load %2 : !cir.ptr, !s32i %5 = cir.const #cir.int<5> : !s32i - %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.if %7 { + %6 = cir.cmp(eq, %4, %5) : !s32i, !cir.bool + cir.if %6 { cir.continue } } @@ -174,9 +169,8 @@ cir.func @testWhile() { cir.while { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const #cir.int<10> : !s32i - %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) + %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool + cir.condition(%4) } do { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.unary(inc, %2) : !s32i, !s32i @@ -184,9 +178,8 @@ cir.func @testWhile() { cir.scope { %4 = cir.load %0 : !cir.ptr, !s32i %5 = cir.const #cir.int<5> : !s32i - %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.if %7 { + %6 = cir.cmp(eq, %4, %5) : !s32i, !cir.bool + cir.if %6 { cir.continue } } @@ -230,9 +223,8 @@ cir.func @testWhile() { cir.scope { %4 = cir.load %0 : !cir.ptr, !s32i %5 = cir.const #cir.int<5> : !s32i - %6 = cir.cmp(eq, %4, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.if %7 { + %6 = cir.cmp(eq, %4, %5) : !s32i, !cir.bool + cir.if %6 { cir.continue } } @@ -240,9 +232,8 @@ cir.func @testWhile() { } while { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const #cir.int<10> : !s32i - %4 = cir.cmp(lt, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) + %4 = cir.cmp(lt, %2, %3) : !s32i, !cir.bool + cir.condition(%4) } } cir.return @@ -271,4 +262,4 @@ cir.func @testWhile() { // [...] // CHECK: } -} \ No newline at end of file +} diff --git a/clang/test/CIR/Lowering/switch.cir b/clang/test/CIR/Lowering/switch.cir index 81cc6efdc92d..9434b7337f7e 100644 --- a/clang/test/CIR/Lowering/switch.cir +++ b/clang/test/CIR/Lowering/switch.cir @@ -154,9 +154,8 @@ module { cir.scope { %6 = cir.load %1 : !cir.ptr, !s32i %7 = cir.const #cir.int<0> : !s32i - %8 = cir.cmp(ge, %6, %7) : !s32i, !s32i - %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool - cir.if %9 { + %8 = cir.cmp(ge, %6, %7) : !s32i, !cir.bool + cir.if %8 { cir.break } } diff --git a/clang/test/CIR/Transforms/mem2reg.c b/clang/test/CIR/Transforms/mem2reg.c index 5d8d2f59b35b..b60d9eb0d1e9 100644 --- a/clang/test/CIR/Transforms/mem2reg.c +++ b/clang/test/CIR/Transforms/mem2reg.c @@ -41,9 +41,8 @@ void alloca_in_loop(int* ar, int n) { // BEFORE: cir.for : cond { // BEFORE: %4 = cir.load %2 : !cir.ptr, !s32i // BEFORE: %5 = cir.load %1 : !cir.ptr, !s32i -// BEFORE: %6 = cir.cmp(lt, %4, %5) : !s32i, !s32i -// BEFORE: %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool -// BEFORE: cir.condition(%7) +// BEFORE: %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool +// BEFORE: cir.condition(%6) // BEFORE: } body { // BEFORE: cir.scope { // BEFORE: %4 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} @@ -71,21 +70,20 @@ void alloca_in_loop(int* ar, int n) { // MEM2REG: %0 = cir.const #cir.int<0> : !s32i // MEM2REG: cir.br ^bb2(%0 : !s32i) // MEM2REG: ^bb2(%1: !s32i{{.*}}): // 2 preds: ^bb1, ^bb6 -// MEM2REG: %2 = cir.cmp(lt, %1, %arg1) : !s32i, !s32i -// MEM2REG: %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool -// MEM2REG: cir.brcond %3 ^bb3, ^bb7 +// MEM2REG: %2 = cir.cmp(lt, %1, %arg1) : !s32i, !cir.bool +// MEM2REG: cir.brcond %2 ^bb3, ^bb7 // MEM2REG: ^bb3: // pred: ^bb2 // MEM2REG: cir.br ^bb4 // MEM2REG: ^bb4: // pred: ^bb3 -// MEM2REG: %4 = cir.const #cir.int<4> : !s32i -// MEM2REG: %5 = cir.ptr_stride(%arg0 : !cir.ptr, %1 : !s32i), !cir.ptr -// MEM2REG: cir.store %4, %5 : !s32i, !cir.ptr +// MEM2REG: %3 = cir.const #cir.int<4> : !s32i +// MEM2REG: %4 = cir.ptr_stride(%arg0 : !cir.ptr, %1 : !s32i), !cir.ptr +// MEM2REG: cir.store %3, %4 : !s32i, !cir.ptr // MEM2REG: cir.br ^bb5 // MEM2REG: ^bb5: // pred: ^bb4 // MEM2REG: cir.br ^bb6 // MEM2REG: ^bb6: // pred: ^bb5 -// MEM2REG: %6 = cir.unary(inc, %1) : !s32i, !s32i -// MEM2REG: cir.br ^bb2(%6 : !s32i) +// MEM2REG: %5 = cir.unary(inc, %1) : !s32i, !s32i +// MEM2REG: cir.br ^bb2(%5 : !s32i) // MEM2REG: ^bb7: // pred: ^bb2 // MEM2REG: cir.br ^bb8 // MEM2REG: ^bb8: // pred: ^bb7 @@ -116,24 +114,23 @@ int alloca_in_ifelse(int x) { // BEFORE: cir.scope { // BEFORE: %9 = cir.load %0 : !cir.ptr, !s32i // BEFORE: %10 = cir.const #cir.int<42> : !s32i -// BEFORE: %11 = cir.cmp(gt, %9, %10) : !s32i, !s32i -// BEFORE: %12 = cir.cast(int_to_bool, %11 : !s32i), !cir.bool -// BEFORE: cir.if %12 { -// BEFORE: %13 = cir.alloca !s32i, !cir.ptr, ["z", init] {alignment = 4 : i64} -// BEFORE: %14 = cir.const #cir.int<2> : !s32i -// BEFORE: cir.store %14, %13 : !s32i, !cir.ptr -// BEFORE: %15 = cir.load %0 : !cir.ptr, !s32i -// BEFORE: %16 = cir.load %13 : !cir.ptr, !s32i -// BEFORE: %17 = cir.binop(mul, %15, %16) nsw : !s32i -// BEFORE: cir.store %17, %2 : !s32i, !cir.ptr +// BEFORE: %11 = cir.cmp(gt, %9, %10) : !s32i, !cir.bool +// BEFORE: cir.if %11 { +// BEFORE: %12 = cir.alloca !s32i, !cir.ptr, ["z", init] {alignment = 4 : i64} +// BEFORE: %13 = cir.const #cir.int<2> : !s32i +// BEFORE: cir.store %13, %12 : !s32i, !cir.ptr +// BEFORE: %14 = cir.load %0 : !cir.ptr, !s32i +// BEFORE: %15 = cir.load %12 : !cir.ptr, !s32i +// BEFORE: %16 = cir.binop(mul, %14, %15) nsw : !s32i +// BEFORE: cir.store %16, %2 : !s32i, !cir.ptr // BEFORE: } else { -// BEFORE: %13 = cir.alloca !s32i, !cir.ptr, ["z", init] {alignment = 4 : i64} -// BEFORE: %14 = cir.const #cir.int<3> : !s32i -// BEFORE: cir.store %14, %13 : !s32i, !cir.ptr -// BEFORE: %15 = cir.load %0 : !cir.ptr, !s32i -// BEFORE: %16 = cir.load %13 : !cir.ptr, !s32i -// BEFORE: %17 = cir.binop(mul, %15, %16) nsw : !s32i -// BEFORE: cir.store %17, %2 : !s32i, !cir.ptr +// BEFORE: %12 = cir.alloca !s32i, !cir.ptr, ["z", init] {alignment = 4 : i64} +// BEFORE: %13 = cir.const #cir.int<3> : !s32i +// BEFORE: cir.store %13, %12 : !s32i, !cir.ptr +// BEFORE: %14 = cir.load %0 : !cir.ptr, !s32i +// BEFORE: %15 = cir.load %12 : !cir.ptr, !s32i +// BEFORE: %16 = cir.binop(mul, %14, %15) nsw : !s32i +// BEFORE: cir.store %16, %2 : !s32i, !cir.ptr // BEFORE: } // BEFORE: } // BEFORE: %4 = cir.load %2 : !cir.ptr, !s32i @@ -150,23 +147,22 @@ int alloca_in_ifelse(int x) { // MEM2REG: cir.br ^bb1 // MEM2REG: ^bb1: // pred: ^bb0 // MEM2REG: %1 = cir.const #cir.int<42> : !s32i -// MEM2REG: %2 = cir.cmp(gt, %arg0, %1) : !s32i, !s32i -// MEM2REG: %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool -// MEM2REG: cir.brcond %3 ^bb2, ^bb3 +// MEM2REG: %2 = cir.cmp(gt, %arg0, %1) : !s32i, !cir.bool +// MEM2REG: cir.brcond %2 ^bb2, ^bb3 // MEM2REG: ^bb2: // pred: ^bb1 -// MEM2REG: %4 = cir.const #cir.int<2> : !s32i -// MEM2REG: %5 = cir.binop(mul, %arg0, %4) nsw : !s32i -// MEM2REG: cir.br ^bb4(%5 : !s32i) +// MEM2REG: %3 = cir.const #cir.int<2> : !s32i +// MEM2REG: %4 = cir.binop(mul, %arg0, %3) nsw : !s32i +// MEM2REG: cir.br ^bb4(%4 : !s32i) // MEM2REG: ^bb3: // pred: ^bb1 -// MEM2REG: %6 = cir.const #cir.int<3> : !s32i -// MEM2REG: %7 = cir.binop(mul, %arg0, %6) nsw : !s32i -// MEM2REG: cir.br ^bb4(%7 : !s32i) -// MEM2REG: ^bb4(%8: !s32i{{.*}}): // 2 preds: ^bb2, ^bb3 +// MEM2REG: %5 = cir.const #cir.int<3> : !s32i +// MEM2REG: %6 = cir.binop(mul, %arg0, %5) nsw : !s32i +// MEM2REG: cir.br ^bb4(%6 : !s32i) +// MEM2REG: ^bb4(%7: !s32i{{.*}}): // 2 preds: ^bb2, ^bb3 // MEM2REG: cir.br ^bb5 // MEM2REG: ^bb5: // pred: ^bb4 -// MEM2REG: %9 = cir.const #cir.int<1> : !s32i -// MEM2REG: %10 = cir.binop(add, %8, %9) nsw : !s32i -// MEM2REG: cir.return %10 : !s32i +// MEM2REG: %8 = cir.const #cir.int<1> : !s32i +// MEM2REG: %9 = cir.binop(add, %7, %8) nsw : !s32i +// MEM2REG: cir.return %9 : !s32i // MEM2REG: } diff --git a/clang/test/CIR/Transforms/scf-prepare.cir b/clang/test/CIR/Transforms/scf-prepare.cir index 063420b1c516..3e1551de25c4 100644 --- a/clang/test/CIR/Transforms/scf-prepare.cir +++ b/clang/test/CIR/Transforms/scf-prepare.cir @@ -14,7 +14,7 @@ module { // CHECK: %[[BOUND:.*]] = cir.load %[[BOUND_ADDR:.*]] : !cir.ptr, !s32i // CHECK: cir.for : cond { // CHECK: %[[IV:.*]] = cir.load %[[IV_ADDR:.*]] : !cir.ptr, !s32i - // CHECK: %[[COND:.*]] = cir.cmp(lt, %[[IV]], %4) : !s32i, !s32i + // CHECK: %[[COND:.*]] = cir.cmp(lt, %[[IV]], %4) : !s32i, !cir.bool %0 = cir.alloca !s32i, !cir.ptr, ["l", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["u", init] {alignment = 4 : i64} @@ -27,9 +27,8 @@ module { cir.for : cond { %4 = cir.load %1 : !cir.ptr, !s32i %5 = cir.load %2 : !cir.ptr, !s32i - %6 = cir.cmp(gt, %4, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.condition(%7) + %6 = cir.cmp(gt, %4, %5) : !s32i, !cir.bool + cir.condition(%6) } body { %4 = cir.const #cir.int<3> : !s32i %5 = cir.get_global @a : !cir.ptr> @@ -57,8 +56,8 @@ module { // CHECK: %[[BOUND:.*]] = cir.const #cir.int<50> : !s32i // CHECK: cir.for : cond { // CHECK: %[[IV:.*]] = cir.load %[[IV_ADDR:.*]] : !cir.ptr, !s32i - // CHECK: %[[COND:.*]] = cir.cmp(le, %[[IV]], %[[BOUND]]) : !s32i, !s32i - + // CHECK: %[[COND:.*]] = cir.cmp(le, %[[IV]], %[[BOUND]]) : !s32i, !cir.bool + cir.scope { %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} %1 = cir.const #cir.int<0> : !s32i @@ -66,9 +65,8 @@ module { cir.for : cond { %2 = cir.const #cir.int<50> : !s32i %3 = cir.load %0 : !cir.ptr, !s32i - %4 = cir.cmp(ge, %2, %3) : !s32i, !s32i - %5 = cir.cast(int_to_bool, %4 : !s32i), !cir.bool - cir.condition(%5) + %4 = cir.cmp(ge, %2, %3) : !s32i, !cir.bool + cir.condition(%4) } body { %2 = cir.const #cir.int<3> : !s32i %3 = cir.get_global @a : !cir.ptr> @@ -99,7 +97,7 @@ module { // CHECK: cir.for : cond { // CHECK: %[[BOUND:.*]] = cir.load %[[BOUND_ADDR:.*]] : !cir.ptr, !s32i // CHECK: %[[IV:.*]] = cir.load %[[IV_ADDR:.*]] : !cir.ptr, !s32i - // CHECK: %[[COND:.*]] = cir.cmp(lt, %[[IV]], %[[BOUND]]) : !s32i, !s32i + // CHECK: %[[COND:.*]] = cir.cmp(lt, %[[IV]], %[[BOUND]]) : !s32i, !cir.bool %0 = cir.alloca !s32i, !cir.ptr, ["l", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["u", init] {alignment = 4 : i64} @@ -112,9 +110,8 @@ module { cir.for : cond { %4 = cir.load %1 : !cir.ptr, !s32i %5 = cir.load %2 : !cir.ptr, !s32i - %6 = cir.cmp(gt, %4, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.condition(%7) + %6 = cir.cmp(gt, %4, %5) : !s32i, !cir.bool + cir.condition(%6) } body { cir.scope { %4 = cir.load %1 : !cir.ptr, !s32i @@ -157,9 +154,8 @@ module { %3 = cir.const #cir.int<100> : !s32i %4 = cir.const #cir.int<1> : !s32i %5 = cir.binop(sub, %3, %4) nsw : !s32i - %6 = cir.cmp(lt, %2, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.condition(%7) + %6 = cir.cmp(lt, %2, %5) : !s32i, !cir.bool + cir.condition(%6) } body { cir.scope { } @@ -192,9 +188,8 @@ module { %2 = cir.load %0 : !cir.ptr, !s32i %3 = cir.const #cir.int<100> : !s32i %5 = cir.binop(sub, %3, %arg0) nsw : !s32i - %6 = cir.cmp(lt, %2, %5) : !s32i, !s32i - %7 = cir.cast(int_to_bool, %6 : !s32i), !cir.bool - cir.condition(%7) + %6 = cir.cmp(lt, %2, %5) : !s32i, !cir.bool + cir.condition(%6) } body { cir.scope { } diff --git a/clang/test/CIR/Transforms/simpl.c b/clang/test/CIR/Transforms/simpl.c index dda9f495ca4c..90d65d651369 100644 --- a/clang/test/CIR/Transforms/simpl.c +++ b/clang/test/CIR/Transforms/simpl.c @@ -18,8 +18,9 @@ int foo(int* ptr) { // BEFORE: cir.func {{.*@foo}} // BEFORE: [[X0:%.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr // BEFORE: [[X1:%.*]] = cir.const #cir.ptr : !cir.ptr -// BEFORE: [[X2:%.*]] = cir.cmp(eq, [[X0]], [[X1]]) : !cir.ptr, !s32i -// BEFORE: [[X3:%.*]] = cir.cast(int_to_bool, [[X2]] : !s32i), !cir.bool +// BEFORE: [[X2:%.*]] = cir.cmp(eq, [[X0]], [[X1]]) : !cir.ptr, !cir.bool +// BEFORE: [[BOOL_TO_INT:%.*]] = cir.cast(bool_to_int, [[X2]] : !cir.bool), !s32i +// BEFORE: [[X3:%.*]] = cir.cast(int_to_bool, [[BOOL_TO_INT]] : !s32i), !cir.bool // BEFORE: [[X4:%.*]] = cir.unary(not, [[X3]]) : !cir.bool, !cir.bool // BEFORE: [[X5:%.*]] = cir.cast(bool_to_int, [[X4]] : !cir.bool), !s32i // BEFORE: [[X6:%.*]] = cir.cast(int_to_bool, [[X5]] : !s32i), !cir.bool @@ -33,6 +34,5 @@ int foo(int* ptr) { // AFTER: [[X0:%.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr // AFTER: [[X1:%.*]] = cir.const #cir.ptr : !cir.ptr -// AFTER: [[X2:%.*]] = cir.cmp(eq, [[X0]], [[X1]]) : !cir.ptr, !s32i -// AFTER: [[X3:%.*]] = cir.cast(int_to_bool, [[X2]] : !s32i), !cir.bool -// AFTER: cir.if [[X3]] \ No newline at end of file +// AFTER: [[X2:%.*]] = cir.cmp(eq, [[X0]], [[X1]]) : !cir.ptr, !cir.bool +// AFTER: cir.if [[X2]] diff --git a/clang/test/CIR/Transforms/switch.cir b/clang/test/CIR/Transforms/switch.cir index f7cc8fb31196..77ca59836f48 100644 --- a/clang/test/CIR/Transforms/switch.cir +++ b/clang/test/CIR/Transforms/switch.cir @@ -174,9 +174,8 @@ module { cir.scope { %6 = cir.load %1 : !cir.ptr, !s32i %7 = cir.const #cir.int<0> : !s32i - %8 = cir.cmp(ge, %6, %7) : !s32i, !s32i - %9 = cir.cast(int_to_bool, %8 : !s32i), !cir.bool - cir.if %9 { + %8 = cir.cmp(ge, %6, %7) : !s32i, !cir.bool + cir.if %8 { cir.break } } From bb076bfb0b718989dea1d6691d0df439dff67765 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Wed, 20 Nov 2024 01:40:34 +0800 Subject: [PATCH 2095/2301] [CIR][CIRGen] Support __builtin_memset_inline (#1114) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 31 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 7 +++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 16 ++++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 29 ++++++++++++----- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 10 ++++++ clang/test/CIR/CodeGen/builtins-memory.c | 20 ++++++++++++ 6 files changed, 103 insertions(+), 10 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index ac3ca72c0c70..0785bfd8760c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4147,6 +4147,37 @@ def MemSetOp : CIR_Op<"libc.memset"> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// MemSetInlineOp +//===----------------------------------------------------------------------===// + +def MemSetInlineOp : CIR_Op<"memset_inline"> { + let arguments = (ins Arg:$dst, + SInt32:$val, + I64Attr:$len); + let summary = "Fill a block of memory with constant length without calling" + "any external function"; + let description = [{ + Given the CIR pointer, `dst`, `cir.memset_inline` will set the first `len` + bytes of the memory pointed by `dst` to the specified `val`. + + The `len` argument must be a constant integer argument specifying the number + of bytes to fill. + + Examples: + + ```mlir + // Set 2 bytes from a struct to 0 + cir.memset_inline 2 bytes from %struct set to %zero : !cir.ptr, !s32i + ``` + }]; + + let assemblyFormat = [{ + $len `bytes` `from` $dst `set` `to` $val attr-dict + `:` qualified(type($dst)) `,` type($val) + }]; + let hasVerifier = 0; +} //===----------------------------------------------------------------------===// // MemChrOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index b9022309cec8..91dd7d6e5a7d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -623,6 +623,13 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return create(loc, dst, val, len); } + cir::MemSetInlineOp createMemSetInline(mlir::Location loc, mlir::Value dst, + mlir::Value val, + mlir::IntegerAttr len) { + val = createIntCast(val, cir::IntType::get(getContext(), 32, true)); + return create(loc, dst, val, len); + } + mlir::Value createNeg(mlir::Value value) { if (auto intTy = mlir::dyn_cast(value.getType())) { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 611561b6c0ae..1e139432785f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1554,8 +1554,20 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(Dest.getPointer()); } - case Builtin::BI__builtin_memset_inline: - llvm_unreachable("BI__builtin_memset_inline NYI"); + case Builtin::BI__builtin_memset_inline: { + Address Dest = emitPointerWithAlignment(E->getArg(0)); + mlir::Value ByteVal = emitScalarExpr(E->getArg(1)); + uint64_t size = + E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue(); + emitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), + E->getArg(0)->getExprLoc(), FD, 0); + builder.createMemSetInline( + getLoc(E->getSourceRange()), Dest.getPointer(), ByteVal, + mlir::IntegerAttr::get(mlir::IntegerType::get(builder.getContext(), 64), + size)); + // __builtin_memset_inline has no return value + return RValue::get(nullptr); + } case Builtin::BI__builtin___memset_chk: { // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. llvm::APSInt size; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 232666939916..c2a714a43ec2 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -777,6 +777,18 @@ mlir::LogicalResult CIRToLLVMMemSetOpLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMMemSetInlineOpLowering::matchAndRewrite( + cir::MemSetInlineOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto converted = rewriter.create( + op.getLoc(), mlir::IntegerType::get(op.getContext(), 8), + adaptor.getVal()); + rewriter.replaceOpWithNewOp( + op, adaptor.getDst(), converted, adaptor.getLenAttr(), + /*isVolatile=*/false); + return mlir::success(); +} + static mlir::Value getLLVMIntCast(mlir::ConversionPatternRewriter &rewriter, mlir::Value llvmSrc, mlir::Type llvmDstIntTy, bool isUnsigned, uint64_t cirSrcWidth, @@ -1851,8 +1863,8 @@ mlir::LogicalResult CIRToLLVMVAArgOpLowering::matchAndRewrite( return op.emitError("cir.vaarg lowering is NYI"); } - /// Returns the name used for the linkage attribute. This *must* correspond - /// to the name of the attribute in ODS. +/// Returns the name used for the linkage attribute. This *must* correspond +/// to the name of the attribute in ODS. StringRef CIRToLLVMFuncOpLowering::getLinkageAttrNameString() { return "linkage"; } @@ -1886,8 +1898,8 @@ void CIRToLLVMFuncOpLowering::lowerFuncAttributes( } } - /// When do module translation, we can only translate LLVM-compatible types. - /// Here we lower possible OpenCLKernelMetadataAttr to use the converted type. +/// When do module translation, we can only translate LLVM-compatible types. +/// Here we lower possible OpenCLKernelMetadataAttr to use the converted type. void CIRToLLVMFuncOpLowering::lowerFuncOpenCLKernelMetadata( mlir::NamedAttribute &extraAttrsEntry) const { const auto attrKey = cir::OpenCLKernelMetadataAttr::getMnemonic(); @@ -2100,8 +2112,8 @@ mlir::LogicalResult CIRToLLVMSwitchFlatOpLowering::matchAndRewrite( return mlir::success(); } - /// Replace CIR global with a region initialized LLVM global and update - /// insertion point to the end of the initializer block. +/// Replace CIR global with a region initialized LLVM global and update +/// insertion point to the end of the initializer block. void CIRToLLVMGlobalOpLowering::setupRegionInitializedLLVMGlobalOp( cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const { const auto llvmType = getTypeConverter()->convertType(op.getSymType()); @@ -3890,8 +3902,9 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMBaseClassAddrOpLowering, CIRToLLVMDerivedClassAddrOpLowering, CIRToLLVMVTTAddrPointOpLowering, CIRToLLVMIsFPClassOpLowering, CIRToLLVMAbsOpLowering, CIRToLLVMMemMoveOpLowering, - CIRToLLVMMemSetOpLowering, CIRToLLVMMemCpyInlineOpLowering, - CIRToLLVMSignBitOpLowering, CIRToLLVMPtrMaskOpLowering + CIRToLLVMMemSetOpLowering, CIRToLLVMMemSetInlineOpLowering, + CIRToLLVMMemCpyInlineOpLowering, CIRToLLVMSignBitOpLowering, + CIRToLLVMPtrMaskOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index d1488ec8f6f5..a88c30d3dd15 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -125,6 +125,16 @@ class CIRToLLVMMemSetOpLowering mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMMemSetInlineOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::MemSetInlineOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override; +}; + class CIRToLLVMPtrStrideOpLowering : public mlir::OpConversionPattern { public: diff --git a/clang/test/CIR/CodeGen/builtins-memory.c b/clang/test/CIR/CodeGen/builtins-memory.c index 472d2103a960..9c7a74301aaa 100644 --- a/clang/test/CIR/CodeGen/builtins-memory.c +++ b/clang/test/CIR/CodeGen/builtins-memory.c @@ -210,3 +210,23 @@ void test_memcpy_inline_aligned_buffers(unsigned long long *dst, const unsigned // COM: LLVM: call void @llvm.memcpy.inline.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 {{%.*}}, i64 4, i1 false) __builtin_memcpy_inline(dst, src, 4); } + +void test_memset_inline(void *dst, int val) { + + // CIR-LABEL: test_memset_inline + // CIR: cir.memset_inline 0 bytes from {{%.*}} set to {{%.*}} : !cir.ptr, !s32i + + // LLVM-LABEL: test_memset_inline + // LLVM: call void @llvm.memset.inline.p0.i64(ptr {{%.*}}, i8 {{%.*}}, i64 0, i1 false) + __builtin_memset_inline(dst, val, 0); + + // CIR: cir.memset_inline 1 bytes from {{%.*}} set to {{%.*}} : !cir.ptr, !s32i + + // LLVM: call void @llvm.memset.inline.p0.i64(ptr {{%.*}}, i8 {{%.*}}, i64 1, i1 false) + __builtin_memset_inline(dst, val, 1); + + // CIR: cir.memset_inline 4 bytes from {{%.*}} set to {{%.*}} : !cir.ptr, !s32i + + // LLVM: call void @llvm.memset.inline.p0.i64(ptr {{%.*}}, i8 {{%.*}}, i64 4, i1 false) + __builtin_memset_inline(dst, val, 4); +} From c01a3390ae1f49c0479677dbab57cf2a40e63ea9 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Wed, 20 Nov 2024 01:56:38 +0800 Subject: [PATCH 2096/2301] [CIR][CIRGen][TBAA] Initial TBAA support (#1116) This is the first patch to support TBAA, following the discussion at https://github.com/llvm/clangir/pull/1076#discussion_r1835031415 - add skeleton for CIRGen, utilizing `decorateOperationWithTBAA` - add empty implementation in `CIRGenTBAA` - introduce `CIR_TBAAAttr` with empty body - attach `CIR_TBAAAttr` to `LoadOp` and `StoreOp` - no handling of vtable pointer - no LLVM lowering --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 12 +- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 3 + clang/include/clang/CIR/Dialect/IR/CIROps.td | 15 +- clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 16 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 262 ++++++++++-------- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 11 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 14 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 71 ++++- clang/lib/CIR/CodeGen/CIRGenModule.h | 47 +++- clang/lib/CIR/CodeGen/CIRGenTBAA.cpp | 64 +++++ clang/lib/CIR/CodeGen/CIRGenTBAA.h | 167 ++++++++++- clang/lib/CIR/CodeGen/CIRGenValue.h | 19 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 + clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp | 7 +- clang/test/CIR/CodeGen/tbaa.c | 22 ++ 22 files changed, 579 insertions(+), 181 deletions(-) create mode 100644 clang/test/CIR/CodeGen/tbaa.c diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index a8589baa5ae0..b19fe5884e86 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -158,7 +158,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { llvm_unreachable("Zero initializer for given type is NYI"); } - mlir::Value createLoad(mlir::Location loc, mlir::Value ptr, + cir::LoadOp createLoad(mlir::Location loc, mlir::Value ptr, bool isVolatile = false, uint64_t alignment = 0) { mlir::IntegerAttr intAttr; if (alignment) @@ -167,7 +167,9 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return create(loc, ptr, /*isDeref=*/false, isVolatile, /*alignment=*/intAttr, - /*mem_order=*/cir::MemOrderAttr{}); + /*mem_order=*/ + cir::MemOrderAttr{}, + /*tbaa=*/mlir::ArrayAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr, @@ -353,7 +355,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { if (mlir::cast(dst.getType()).getPointee() != val.getType()) dst = createPtrBitcast(dst, val.getType()); - return create(loc, val, dst, _volatile, align, order); + return create(loc, val, dst, _volatile, align, order, + /*tbaa=*/mlir::ArrayAttr{}); } mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, @@ -400,7 +403,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { /// Create a copy with inferred length. cir::CopyOp createCopy(mlir::Value dst, mlir::Value src, bool isVolatile = false) { - return create(dst.getLoc(), dst, src, isVolatile); + return create(dst.getLoc(), dst, src, isVolatile, + /*tbaa=*/mlir::ArrayAttr{}); } cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 463bdd5cec7a..d0ac1d00c4b5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -1216,6 +1216,9 @@ def GlobalAnnotationValuesAttr : CIR_Attr<"GlobalAnnotationValues", let genVerifyDecl = 1; } +def CIR_TBAAAttr : CIR_Attr<"TBAA", "tbaa", []> { +} + include "clang/CIR/Dialect/IR/CIROpenCLAttrs.td" #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 0785bfd8760c..897098f4c25c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -587,7 +587,8 @@ def LoadOp : CIR_Op<"load", [ [MemRead]>:$addr, UnitAttr:$isDeref, UnitAttr:$is_volatile, OptionalAttr:$alignment, - OptionalAttr:$mem_order + OptionalAttr:$mem_order, + OptionalAttr:$tbaa ); let results = (outs CIR_AnyType:$result); @@ -597,6 +598,7 @@ def LoadOp : CIR_Op<"load", [ (`align` `(` $alignment^ `)`)? (`atomic` `(` $mem_order^ `)`)? $addr `:` qualified(type($addr)) `,` type($result) attr-dict + (`tbaa` `(` $tbaa^ `)`)? }]; let extraClassDeclaration = [{ @@ -654,13 +656,15 @@ def StoreOp : CIR_Op<"store", [ [MemWrite]>:$addr, UnitAttr:$is_volatile, OptionalAttr:$alignment, - OptionalAttr:$mem_order); + OptionalAttr:$mem_order, + OptionalAttr:$tbaa); let assemblyFormat = [{ (`volatile` $is_volatile^)? (`align` `(` $alignment^ `)`)? (`atomic` `(` $mem_order^ `)`)? $value `,` $addr attr-dict `:` type($value) `,` qualified(type($addr)) + (`tbaa` `(` $tbaa^ `)`)? }]; let extraClassDeclaration = [{ @@ -3980,7 +3984,8 @@ def CopyOp : CIR_Op<"copy", DeclareOpInterfaceMethods]> { let arguments = (ins Arg:$dst, Arg:$src, - UnitAttr:$is_volatile); + UnitAttr:$is_volatile, + OptionalAttr:$tbaa); let summary = "Copies contents from a CIR pointer to another"; let description = [{ Given two CIR pointers, `src` and `dst`, `cir.copy` will copy the memory @@ -3999,7 +4004,9 @@ def CopyOp : CIR_Op<"copy", }]; let assemblyFormat = [{$src `to` $dst (`volatile` $is_volatile^)? - attr-dict `:` qualified(type($dst)) }]; + attr-dict `:` qualified(type($dst)) + (`tbaa` `(` $tbaa^ `)`)? + }]; let hasVerifier = 1; let extraClassDeclaration = [{ diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 7d59e10809eb..f6e7b34ad197 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -58,6 +58,7 @@ struct MissingFeatures { // sanitizer related type check features static bool emitTypeCheck() { return false; } static bool tbaa() { return false; } + static bool tbaa_struct() { return false; } static bool cleanups() { return false; } static bool emitNullabilityCheck() { return false; } static bool ptrAuth() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 41fcd60179d0..607f62cea8ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -585,7 +585,7 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__atomic_load: case AtomicExpr::AO__scoped_atomic_load_n: case AtomicExpr::AO__scoped_atomic_load: { - auto *load = builder.createLoad(loc, Ptr).getDefiningOp(); + auto load = builder.createLoad(loc, Ptr); // FIXME(cir): add scope information. assert(!cir::MissingFeatures::syncScopeID()); load->setAttr("mem_order", orderAttr); @@ -1462,8 +1462,7 @@ void CIRGenFunction::emitAtomicStore(RValue rvalue, LValue dest, if (IsVolatile) store.setIsVolatile(true); - // DecorateInstructionWithTBAA - assert(!cir::MissingFeatures::tbaa()); + CGM.decorateOperationWithTBAA(store, dest.getTBAAInfo()); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 91dd7d6e5a7d..ea5fb980cc40 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -832,7 +832,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { addr.getAlignment()); } - mlir::Value createLoad(mlir::Location loc, Address addr, + cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile = false) { auto ptrTy = mlir::dyn_cast(addr.getPointer().getType()); if (addr.getElementType() != ptrTy.getPointee()) @@ -842,7 +842,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return create( loc, addr.getElementType(), addr.getPointer(), /*isDeref=*/false, /*is_volatile=*/isVolatile, /*alignment=*/mlir::IntegerAttr{}, - /*mem_order=*/cir::MemOrderAttr{}); + /*mem_order=*/cir::MemOrderAttr{}, /*tbaa=*/mlir::ArrayAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 3a801872d084..d4e53e268328 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -751,8 +751,14 @@ void CIRGenFunction::initializeVTablePointer(mlir::Location loc, assert(!cir::MissingFeatures::addressSpace()); VTableField = builder.createElementBitCast(loc, VTableField, VTableAddressPoint.getType()); - builder.createStore(loc, VTableAddressPoint, VTableField); - assert(!cir::MissingFeatures::tbaa()); + auto storeOp = builder.createStore(loc, VTableAddressPoint, VTableField); + TBAAAccessInfo TBAAInfo = + CGM.getTBAAVTablePtrAccessInfo(VTableAddressPoint.getType()); + CGM.decorateOperationWithTBAA(storeOp, TBAAInfo); + if (CGM.getCodeGenOpts().OptimizationLevel > 0 && + CGM.getCodeGenOpts().StrictVTablePointers) { + assert(!cir::MissingFeatures::createInvariantGroup()); + } } void CIRGenFunction::initializeVTablePointers(mlir::Location loc, @@ -1659,14 +1665,16 @@ mlir::Value CIRGenFunction::getVTablePtr(mlir::Location Loc, Address This, Address CIRGenFunction::emitCXXMemberDataPointerAddress( const Expr *E, Address base, mlir::Value memberPtr, - const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo) { + const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo, + TBAAAccessInfo *tbaaInfo) { assert(!cir::MissingFeatures::cxxABI()); auto op = builder.createGetIndirectMember(getLoc(E->getSourceRange()), base.getPointer(), memberPtr); QualType memberType = memberPtrType->getPointeeType(); - CharUnits memberAlign = CGM.getNaturalTypeAlignment(memberType, baseInfo); + CharUnits memberAlign = + CGM.getNaturalTypeAlignment(memberType, baseInfo, tbaaInfo); memberAlign = CGM.getDynamicOffsetAlignment( base.getAlignment(), memberPtrType->getClass()->getAsCXXRecordDecl(), memberAlign); diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 6fa387483492..c4d53a8477ec 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -327,7 +327,7 @@ void CIRGenFunction::emitAutoVarInit(const AutoVarEmission &emission) { // its removal/optimization to the CIR lowering. if (!constant || isa(Init)) { initializeWhatIsTechnicallyUninitialized(Loc); - LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); + LValue lv = makeAddrLValue(Loc, type, AlignmentSource::Decl); emitExprAsInit(Init, &D, lv); // In case lv has uses it means we indeed initialized something // out of it while trying to build the expression, mark it as such. diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index fc0dbfa4d751..a6a5121272f5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -136,20 +136,26 @@ static Address emitPointerWithAlignment(const Expr *expr, CE->getSubExpr()->getType()->getAs()) { if (PtrTy->getPointeeType()->isVoidType()) break; - assert(!cir::MissingFeatures::tbaa()); LValueBaseInfo innerBaseInfo; + TBAAAccessInfo innerTBAAInfo; Address addr = cgf.emitPointerWithAlignment( - CE->getSubExpr(), &innerBaseInfo, tbaaInfo, isKnownNonNull); + CE->getSubExpr(), &innerBaseInfo, &innerTBAAInfo, isKnownNonNull); if (baseInfo) *baseInfo = innerBaseInfo; + if (tbaaInfo) { + *tbaaInfo = innerTBAAInfo; + } if (isa(CE)) { - assert(!cir::MissingFeatures::tbaa()); LValueBaseInfo TargetTypeBaseInfo; + TBAAAccessInfo TargetTypeTBAAInfo; CharUnits Align = cgf.CGM.getNaturalPointeeTypeAlignment( - expr->getType(), &TargetTypeBaseInfo); + expr->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo); + if (tbaaInfo) + *tbaaInfo = + cgf.CGM.mergeTBAAInfoForCast(*tbaaInfo, TargetTypeTBAAInfo); // If the source l-value is opaque, honor the alignment of the // casted-to type. @@ -188,9 +194,15 @@ static Address emitPointerWithAlignment(const Expr *expr, // TODO: Support accesses to members of base classes in TBAA. For now, we // conservatively pretend that the complete object is of the base class // type. - assert(!cir::MissingFeatures::tbaa()); - Address Addr = cgf.emitPointerWithAlignment(CE->getSubExpr(), baseInfo); - auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); + if (tbaaInfo) { + *tbaaInfo = cgf.CGM.getTBAAAccessInfo(expr->getType()); + } + Address Addr = cgf.emitPointerWithAlignment( + CE->getSubExpr(), baseInfo, nullptr, + (KnownNonNull_t)(isKnownNonNull || + CE->getCastKind() == CK_UncheckedDerivedToBase)); + const auto *Derived = + CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); return cgf.getAddressOfBaseClass( Addr, Derived, CE->path_begin(), CE->path_end(), cgf.shouldNullCheckClassCastValue(CE), CE->getExprLoc()); @@ -210,7 +222,8 @@ static Address emitPointerWithAlignment(const Expr *expr, LValue LV = cgf.emitLValue(UO->getSubExpr()); if (baseInfo) *baseInfo = LV.getBaseInfo(); - assert(!cir::MissingFeatures::tbaa()); + if (tbaaInfo) + *tbaaInfo = LV.getTBAAInfo(); return LV.getAddress(); } } @@ -288,7 +301,7 @@ LValue CIRGenFunction::emitLValueForBitField(LValue base, QualType fieldType = field->getType().withCVRQualifiers(base.getVRQualifiers()); - assert(!cir::MissingFeatures::tbaa() && "NYI TBAA for bit fields"); + // TODO(cir): Support TBAA for bit fields. LValueBaseInfo fieldBaseInfo(BaseInfo.getAlignmentSource()); return LValue::MakeBitfield(Addr, info, fieldType, fieldBaseInfo, TBAAAccessInfo()); @@ -306,15 +319,34 @@ LValue CIRGenFunction::emitLValueForField(LValue base, const FieldDecl *field) { const RecordDecl *rec = field->getParent(); AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); - if (cir::MissingFeatures::tbaa() || rec->hasAttr() || + TBAAAccessInfo FieldTBAAInfo; + if (base.getTBAAInfo().isMayAlias() || rec->hasAttr() || FieldType->isVectorType()) { - assert(!cir::MissingFeatures::tbaa() && "NYI"); + FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); } else if (rec->isUnion()) { - assert(!cir::MissingFeatures::tbaa() && "NYI"); + FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); } else { // If no base type been assigned for the base access, then try to generate // one for this base lvalue. - assert(!cir::MissingFeatures::tbaa() && "NYI"); + FieldTBAAInfo = base.getTBAAInfo(); + if (!FieldTBAAInfo.baseType) { + FieldTBAAInfo.baseType = CGM.getTBAABaseTypeInfo(base.getType()); + assert(!FieldTBAAInfo.offset && + "Nonzero offset for an access with no base type!"); + } + + // Adjust offset to be relative to the base type. + const ASTRecordLayout &Layout = + getContext().getASTRecordLayout(field->getParent()); + unsigned CharWidth = getContext().getCharWidth(); + if (FieldTBAAInfo.baseType) + FieldTBAAInfo.offset += + Layout.getFieldOffset(field->getFieldIndex()) / CharWidth; + + // Update the final access type and size. + FieldTBAAInfo.accessType = CGM.getTBAAAccessInfo(FieldType).accessType; + FieldTBAAInfo.size = + getContext().getTypeSizeInChars(FieldType).getQuantity(); } Address addr = base.getAddress(); @@ -365,12 +397,12 @@ LValue CIRGenFunction::emitLValueForField(LValue base, const FieldDecl *field) { // If this is a reference field, load the reference right now. if (FieldType->isReferenceType()) { - assert(!cir::MissingFeatures::tbaa()); - LValue RefLVal = makeAddrLValue(addr, FieldType, FieldBaseInfo); + LValue RefLVal = + makeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); if (RecordCVR & Qualifiers::Volatile) RefLVal.getQuals().addVolatile(); addr = emitLoadOfReference(RefLVal, getLoc(field->getSourceRange()), - &FieldBaseInfo); + &FieldBaseInfo, &FieldTBAAInfo); // Qualifiers on the struct don't apply to the referencee. RecordCVR = 0; @@ -387,10 +419,7 @@ LValue CIRGenFunction::emitLValueForField(LValue base, const FieldDecl *field) { if (field->hasAttr()) llvm_unreachable("NYI"); - if (cir::MissingFeatures::tbaa()) - // Next line should take a TBAA object - llvm_unreachable("NYI"); - LValue LV = makeAddrLValue(addr, FieldType, FieldBaseInfo); + LValue LV = makeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); LV.getQuals().addCVRQualifiers(RecordCVR); // __weak attribute on a field is ignored. @@ -423,8 +452,8 @@ LValue CIRGenFunction::emitLValueForFieldInitialization( LValueBaseInfo BaseInfo = Base.getBaseInfo(); AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource)); - assert(!cir::MissingFeatures::tbaa() && "NYI"); - return makeAddrLValue(V, FieldType, FieldBaseInfo); + return makeAddrLValue(V, FieldType, FieldBaseInfo, + CGM.getTBAAInfoForSubobject(Base, FieldType)); } LValue CIRGenFunction::emitCompoundLiteralLValue(const CompoundLiteralExpr *E) { @@ -630,14 +659,13 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr, } assert(currSrcLoc && "must pass in source location"); - builder.createStore(*currSrcLoc, value, addr, isVolatile); + auto storeOp = builder.createStore(*currSrcLoc, value, addr, isVolatile); if (isNontemporal) { llvm_unreachable("NYI"); } - if (cir::MissingFeatures::tbaa()) - llvm_unreachable("NYI"); + CGM.decorateOperationWithTBAA(storeOp, tbaaInfo); } void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue, @@ -1113,12 +1141,12 @@ CIRGenFunction::emitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { auto memberPtr = emitScalarExpr(E->getRHS()); LValueBaseInfo baseInfo; - // TODO(cir): add TBAA - assert(!cir::MissingFeatures::tbaa()); - auto memberAddr = emitCXXMemberDataPointerAddress(E, baseAddr, memberPtr, - memberPtrTy, &baseInfo); + TBAAAccessInfo tbaaInfo; + auto memberAddr = emitCXXMemberDataPointerAddress( + E, baseAddr, memberPtr, memberPtrTy, &baseInfo, &tbaaInfo); - return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo); + return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo, + tbaaInfo); } LValue CIRGenFunction::emitExtVectorElementExpr(const ExtVectorElementExpr *E) { @@ -1130,11 +1158,10 @@ LValue CIRGenFunction::emitExtVectorElementExpr(const ExtVectorElementExpr *E) { // If it is a pointer to a vector, emit the address and form an lvalue with // it. LValueBaseInfo BaseInfo; - // TODO(cir): Support TBAA - assert(!cir::MissingFeatures::tbaa()); - Address Ptr = emitPointerWithAlignment(E->getBase(), &BaseInfo); + TBAAAccessInfo TBAAInfo; + Address Ptr = emitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo); const auto *PT = E->getBase()->getType()->castAs(); - base = makeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo); + base = makeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo); base.getQuals().removeObjCGCAttr(); } else if (E->getBase()->isGLValue()) { // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), @@ -1273,8 +1300,9 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *E) { assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); LValueBaseInfo BaseInfo; - // TODO: add TBAAInfo - Address Addr = emitPointerWithAlignment(E->getSubExpr(), &BaseInfo); + TBAAAccessInfo TBAAInfo; + Address Addr = + emitPointerWithAlignment(E->getSubExpr(), &BaseInfo, &TBAAInfo); // Tag 'load' with deref attribute. if (auto loadOp = @@ -1282,7 +1310,7 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *E) { loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext())); } - LValue LV = LValue::makeAddr(Addr, T, BaseInfo); + LValue LV = LValue::makeAddr(Addr, T, BaseInfo, TBAAInfo); // TODO: set addr space // TODO: ObjC/GC/__weak write barrier stuff. return LV; @@ -1307,9 +1335,8 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *E) { (E->getOpcode() == UO_Real ? emitAddrOfRealComponent(Loc, LV.getAddress(), LV.getType()) : emitAddrOfImagComponent(Loc, LV.getAddress(), LV.getType())); - // TODO(cir): TBAA info. - assert(!cir::MissingFeatures::tbaa()); - LValue ElemLV = makeAddrLValue(Component, T, LV.getBaseInfo()); + LValue ElemLV = makeAddrLValue(Component, T, LV.getBaseInfo(), + CGM.getTBAAInfoForSubobject(LV, T)); ElemLV.getQuals().addQualifiers(LV.getQuals()); return ElemLV; } @@ -1529,7 +1556,8 @@ void CIRGenFunction::emitIgnoredExpr(const Expr *E) { } Address CIRGenFunction::emitArrayToPointerDecay(const Expr *E, - LValueBaseInfo *BaseInfo) { + LValueBaseInfo *BaseInfo, + TBAAAccessInfo *TBAAInfo) { assert(E->getType()->isArrayType() && "Array to pointer decay must have array source type!"); @@ -1561,7 +1589,8 @@ Address CIRGenFunction::emitArrayToPointerDecay(const Expr *E, QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); if (BaseInfo) *BaseInfo = LV.getBaseInfo(); - assert(!cir::MissingFeatures::tbaa() && "NYI"); + if (TBAAInfo) + *TBAAInfo = CGM.getTBAAAccessInfo(EltType); mlir::Value ptr = CGM.getBuilder().maybeBuildArrayDecay( CGM.getLoc(E->getSourceRange()), Addr.getPointer(), @@ -1757,15 +1786,16 @@ LValue CIRGenFunction::emitArraySubscriptExpr(const ArraySubscriptExpr *E, llvm_unreachable("extvector subscript is NYI"); } - assert(!cir::MissingFeatures::tbaa() && "TBAA is NYI"); LValueBaseInfo EltBaseInfo; + TBAAAccessInfo EltTBAAInfo; + Address Addr = Address::invalid(); if (const VariableArrayType *vla = getContext().getAsVariableArrayType(E->getType())) { // The base must be a pointer, which is not an aggregate. Emit // it. It needs to be emitted first in case it's what captures // the VLA bounds. - Addr = emitPointerWithAlignment(E->getBase(), &EltBaseInfo); + Addr = emitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); auto Idx = EmitIdxAfterBase(/*Promote*/ true); // The element count here is the total number of non-VLA elements. @@ -1808,13 +1838,10 @@ LValue CIRGenFunction::emitArraySubscriptExpr(const ArraySubscriptExpr *E, CGM.getLoc(E->getExprLoc()), /*shouldDecay=*/true, &arrayType, E->getBase()); EltBaseInfo = ArrayLV.getBaseInfo(); - // TODO(cir): EltTBAAInfo - assert(!cir::MissingFeatures::tbaa() && "TBAA is NYI"); + EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType()); } else { // The base must be a pointer; emit it with an estimate of its alignment. - // TODO(cir): EltTBAAInfo - assert(!cir::MissingFeatures::tbaa() && "TBAA is NYI"); - Addr = emitPointerWithAlignment(E->getBase(), &EltBaseInfo); + Addr = emitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); auto Idx = EmitIdxAfterBase(/*Promote*/ true); QualType ptrType = E->getBase()->getType(); Addr = emitArraySubscriptPtr( @@ -1824,7 +1851,7 @@ LValue CIRGenFunction::emitArraySubscriptExpr(const ArraySubscriptExpr *E, &ptrType, E->getBase()); } - LValue LV = LValue::makeAddr(Addr, E->getType(), EltBaseInfo); + LValue LV = LValue::makeAddr(Addr, E->getType(), EltBaseInfo, EltTBAAInfo); if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) { llvm_unreachable("ObjC is NYI"); @@ -1969,8 +1996,8 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *E) { // TODO: Support accesses to members of base classes in TBAA. For now, we // conservatively pretend that the complete object is of the base class // type. - assert(!cir::MissingFeatures::tbaa()); - return makeAddrLValue(Base, E->getType(), LV.getBaseInfo()); + return makeAddrLValue(Base, E->getType(), LV.getBaseInfo(), + CGM.getTBAAInfoForSubobject(LV, E->getType())); } case CK_ToUnion: assert(0 && "NYI"); @@ -1988,10 +2015,9 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *E) { auto DestAS = builder.getAddrSpaceAttr(E->getType().getAddressSpace()); mlir::Value V = getTargetHooks().performAddrSpaceCast( *this, LV.getPointer(), SrcAS, DestAS, ConvertType(DestTy)); - assert(!cir::MissingFeatures::tbaa()); return makeAddrLValue(Address(V, getTypes().convertTypeForMem(E->getType()), LV.getAddress().getAlignment()), - E->getType(), LV.getBaseInfo()); + E->getType(), LV.getBaseInfo(), LV.getTBAAInfo()); } case CK_ObjCObjectLValueCast: { assert(0 && "NYI"); @@ -2072,7 +2098,8 @@ LValue CIRGenFunction::emitMemberExpr(const MemberExpr *E) { LValue BaseLV; if (E->isArrow()) { LValueBaseInfo BaseInfo; - Address Addr = emitPointerWithAlignment(BaseExpr, &BaseInfo); + TBAAAccessInfo TBAAInfo; + Address Addr = emitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo); QualType PtrTy = BaseExpr->getType()->getPointeeType(); SanitizerSet SkippedChecks; bool IsBaseCXXThis = isWrappedCXXThis(BaseExpr); @@ -2082,7 +2109,7 @@ LValue CIRGenFunction::emitMemberExpr(const MemberExpr *E) { SkippedChecks.set(SanitizerKind::Null, true); emitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, /*Alignment=*/CharUnits::Zero(), SkippedChecks); - BaseLV = makeAddrLValue(Addr, PtrTy, BaseInfo); + BaseLV = makeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo); } else BaseLV = emitCheckedLValue(BaseExpr, TCK_MemberAccess); @@ -2436,56 +2463,55 @@ CIRGenFunction::emitConditionalBlocks(const AbstractConditionalOperator *E, } }; - Info.Result = builder - .create( - loc, condV, /*trueBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScope lexScope{ - *this, loc, b.getInsertionBlock()}; - CGF.currLexScope->setAsTernary(); - - assert( - !cir::MissingFeatures::incrementProfileCounter()); - eval.begin(CGF); - Info.LHS = BranchGenFunc(CGF, trueExpr); - auto lhs = Info.LHS->getPointer(); - eval.end(CGF); - - if (lhs) { - yieldTy = lhs.getType(); - b.create(loc, lhs); - return; - } - // If LHS or RHS is a throw or void expression we need - // to patch arms as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - }, - /*falseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScope lexScope{ - *this, loc, b.getInsertionBlock()}; - CGF.currLexScope->setAsTernary(); - - assert( - !cir::MissingFeatures::incrementProfileCounter()); - eval.begin(CGF); - Info.RHS = BranchGenFunc(CGF, falseExpr); - auto rhs = Info.RHS->getPointer(); - eval.end(CGF); - - if (rhs) { - yieldTy = rhs.getType(); - b.create(loc, rhs); - } else { - // If LHS or RHS is a throw or void expression we - // need to patch arms as to properly match yield - // types. - insertPoints.push_back(b.saveInsertionPoint()); - } - - patchVoidOrThrowSites(); - }) - .getResult(); + Info.Result = + builder + .create( + loc, condV, /*trueBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{*this, loc, + b.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + + assert(!cir::MissingFeatures::incrementProfileCounter()); + eval.begin(CGF); + Info.LHS = BranchGenFunc(CGF, trueExpr); + auto lhs = Info.LHS->getPointer(); + eval.end(CGF); + + if (lhs) { + yieldTy = lhs.getType(); + b.create(loc, lhs); + return; + } + // If LHS or RHS is a throw or void expression we need + // to patch arms as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + }, + /*falseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScope lexScope{*this, loc, + b.getInsertionBlock()}; + CGF.currLexScope->setAsTernary(); + + assert(!cir::MissingFeatures::incrementProfileCounter()); + eval.begin(CGF); + Info.RHS = BranchGenFunc(CGF, falseExpr); + auto rhs = Info.RHS->getPointer(); + eval.end(CGF); + + if (rhs) { + yieldTy = rhs.getType(); + b.create(loc, rhs); + } else { + // If LHS or RHS is a throw or void expression we + // need to patch arms as to properly match yield + // types. + insertPoints.push_back(b.saveInsertionPoint()); + } + + patchVoidOrThrowSites(); + }) + .getResult(); return Info; } @@ -2520,8 +2546,10 @@ LValue CIRGenFunction::emitConditionalOperatorLValue( AlignmentSource alignSource = std::max(Info.LHS->getBaseInfo().getAlignmentSource(), Info.RHS->getBaseInfo().getAlignmentSource()); - assert(!cir::MissingFeatures::tbaa()); - return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource)); + TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( + Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo()); + return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource), + TBAAInfo); } else { llvm_unreachable("NYI"); } @@ -2625,7 +2653,7 @@ LValue CIRGenFunction::emitLValue(const Expr *E) { return emitStmtExprLValue(cast(E)); } - return LValue::makeAddr(Address::invalid(), E->getType()); + llvm_unreachable("NYI"); } /// Given the address of a temporary variable, produce an r-value of its type. @@ -2899,17 +2927,16 @@ mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile, Ptr = builder.create(loc, ElemPtrTy, cir::CastKind::bitcast, Ptr); } - - mlir::Value Load = builder.CIRBaseBuilderTy::createLoad(loc, Ptr, isVolatile); + auto loadOp = builder.CIRBaseBuilderTy::createLoad(loc, Ptr, isVolatile); if (isNontemporal) { llvm_unreachable("NYI"); } + CGM.decorateOperationWithTBAA(loadOp, tbaaInfo); - assert(!cir::MissingFeatures::tbaa() && "NYI"); assert(!cir::MissingFeatures::emitScalarRangeCheck() && "NYI"); - return emitFromMemory(Load, ty); + return emitFromMemory(loadOp, ty); } // Note: this function also emit constructor calls to support a MSVC extensions @@ -2959,8 +2986,7 @@ Address CIRGenFunction::emitLoadOfReference(LValue refLVal, mlir::Location loc, builder.create(loc, refLVal.getAddress().getElementType(), refLVal.getAddress().getPointer()); - // TODO(cir): DecorateInstructionWithTBAA relevant for us? - assert(!cir::MissingFeatures::tbaa()); + CGM.decorateOperationWithTBAA(load, refLVal.getTBAAInfo()); QualType pointeeType = refLVal.getType()->getPointeeType(); CharUnits align = @@ -2972,9 +2998,11 @@ Address CIRGenFunction::emitLoadOfReference(LValue refLVal, mlir::Location loc, LValue CIRGenFunction::emitLoadOfReferenceLValue(LValue RefLVal, mlir::Location Loc) { LValueBaseInfo PointeeBaseInfo; - Address PointeeAddr = emitLoadOfReference(RefLVal, Loc, &PointeeBaseInfo); + TBAAAccessInfo PointeeTBAAInfo; + Address PointeeAddr = + emitLoadOfReference(RefLVal, Loc, &PointeeBaseInfo, &PointeeTBAAInfo); return makeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), - PointeeBaseInfo); + PointeeBaseInfo, PointeeTBAAInfo); } void CIRGenFunction::emitUnreachable(SourceLocation Loc) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index f13cb8600f9a..32f343ffd605 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -1708,13 +1708,18 @@ void CIRGenFunction::emitAggregateCopy(LValue Dest, LValue Src, QualType Ty, } } - builder.createCopy(DestPtr.getPointer(), SrcPtr.getPointer(), isVolatile); + auto copyOp = + builder.createCopy(DestPtr.getPointer(), SrcPtr.getPointer(), isVolatile); // Determine the metadata to describe the position of any padding in this // memcpy, as well as the TBAA tags for the members of the struct, in case // the optimizer wishes to expand it in to scalar memory operations. - if (CGM.getCodeGenOpts().NewStructPathTBAA || cir::MissingFeatures::tbaa()) - llvm_unreachable("TBAA is NYI"); + assert(!cir::MissingFeatures::tbaa_struct() && "tbaa.struct NYI"); + if (CGM.getCodeGenOpts().NewStructPathTBAA) { + TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer( + Dest.getTBAAInfo(), Src.getTBAAInfo()); + CGM.decorateOperationWithTBAA(copyOp, TBAAInfo); + } } AggValueSlot::Overlap_t diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index be3ec6071def..e336594388ae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -218,9 +218,9 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr( LValue This; if (IsArrow) { LValueBaseInfo BaseInfo; - assert(!cir::MissingFeatures::tbaa()); - Address ThisValue = emitPointerWithAlignment(Base, &BaseInfo); - This = makeAddrLValue(ThisValue, Base->getType(), BaseInfo); + TBAAAccessInfo TBAAInfo; + Address ThisValue = emitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo); + This = makeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo); } else { This = emitLValue(Base); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index ee2a0c32cbff..7d51dc05e7c1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1604,10 +1604,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { Address DestAddr = SourceAddr.withPointer(DestPtr).withElementType(DestElemTy); LValue DestLVal = CGF.makeAddrLValue(DestAddr, DestTy); - - if (Kind == CK_LValueToRValueBitCast) - assert(!cir::MissingFeatures::tbaa()); - + DestLVal.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); return emitLoadOfLValue(DestLVal, CE->getExprLoc()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index b31a4ba325ae..1c84cb3ca71b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -824,14 +824,13 @@ LValue CIRGenFunction::MakeNaturalAlignPointeeAddrLValue(mlir::Value val, TBAAAccessInfo tbaaInfo; CharUnits align = CGM.getNaturalTypeAlignment(ty, &baseInfo, &tbaaInfo, /* for PointeeType= */ true); - return makeAddrLValue(Address(val, align), ty, baseInfo); + return makeAddrLValue(Address(val, align), ty, baseInfo, tbaaInfo); } LValue CIRGenFunction::MakeNaturalAlignAddrLValue(mlir::Value val, QualType ty) { LValueBaseInfo baseInfo; TBAAAccessInfo tbaaInfo; - assert(!cir::MissingFeatures::tbaa()); CharUnits alignment = CGM.getNaturalTypeAlignment(ty, &baseInfo, &tbaaInfo); Address addr(val, getTypes().convertTypeForMem(ty), alignment); return LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 8d4fabeff642..e5db0a01e429 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -886,7 +886,8 @@ class CIRGenFunction : public CIRGenTypeCache { LValue emitLoadOfReferenceLValue(Address RefAddr, mlir::Location Loc, QualType RefTy, AlignmentSource Source = AlignmentSource::Type) { - LValue RefLVal = makeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source)); + LValue RefLVal = makeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source), + CGM.getTBAAAccessInfo(RefTy)); return emitLoadOfReferenceLValue(RefLVal, Loc); } void emitImplicitAssignmentOperatorBody(FunctionArgList &Args); @@ -909,7 +910,8 @@ class CIRGenFunction : public CIRGenTypeCache { /// TODO: Add TBAAAccessInfo Address emitCXXMemberDataPointerAddress( const Expr *E, Address base, mlir::Value memberPtr, - const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo); + const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo, + TBAAAccessInfo *tbaaInfo); /// Generate a call of the given function, expecting the given /// result type, and using the given argument list which specifies both the @@ -1649,9 +1651,8 @@ class CIRGenFunction : public CIRGenTypeCache { QualType DstTy, SourceLocation Loc); LValue makeAddrLValue(Address addr, clang::QualType ty, - LValueBaseInfo baseInfo) { - return LValue::makeAddr(addr, ty, getContext(), baseInfo, - CGM.getTBAAAccessInfo(ty)); + LValueBaseInfo baseInfo, TBAAAccessInfo tbaaInfo) { + return LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); } LValue makeAddrLValue(Address addr, clang::QualType ty, @@ -1744,7 +1745,8 @@ class CIRGenFunction : public CIRGenTypeCache { /// TODO(cir): add TBAAAccessInfo Address emitArrayToPointerDecay(const Expr *Array, - LValueBaseInfo *BaseInfo = nullptr); + LValueBaseInfo *BaseInfo = nullptr, + TBAAAccessInfo *TBAAInfo = nullptr); /// Emits the code necessary to evaluate an arbitrary expression into the /// given memory location. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 97678fa2ad8a..b9e332d4b27a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -205,6 +205,11 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, /*line=*/0, /*col=*/0)); } + if (langOpts.Sanitize.has(SanitizerKind::Thread) || + (!codeGenOpts.RelaxedAliasing && codeGenOpts.OptimizationLevel > 0)) { + tbaa.reset(new CIRGenTBAA(&context, astctx, genTypes, theModule, + codeGenOpts, langOpts)); + } } CIRGenModule::~CIRGenModule() {} @@ -258,6 +263,9 @@ CharUnits CIRGenModule::getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *tbaaInfo, bool forPointeeType) { + if (tbaaInfo) { + *tbaaInfo = getTBAAAccessInfo(T); + } // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But // that doesn't return the information we need to compute BaseInfo. @@ -3513,8 +3521,69 @@ void CIRGenModule::emitGlobalAnnotations() { deferredAnnotations.clear(); } +cir::TBAAAttr CIRGenModule::getTBAATypeInfo(QualType QTy) { + if (!tbaa) { + return nullptr; + } + return tbaa->getTypeInfo(QTy); +} + TBAAAccessInfo CIRGenModule::getTBAAAccessInfo(QualType accessType) { + if (!tbaa) { + return TBAAAccessInfo(); + } + if (getLangOpts().CUDAIsDevice) { + llvm_unreachable("NYI"); + } + return tbaa->getAccessInfo(accessType); +} + +TBAAAccessInfo +CIRGenModule::getTBAAVTablePtrAccessInfo(mlir::Type VTablePtrType) { if (!tbaa) return TBAAAccessInfo(); - llvm_unreachable("NYI"); + return tbaa->getVTablePtrAccessInfo(VTablePtrType); +} + +mlir::ArrayAttr CIRGenModule::getTBAAStructInfo(QualType QTy) { + if (!tbaa) + return nullptr; + return tbaa->getTBAAStructInfo(QTy); +} + +cir::TBAAAttr CIRGenModule::getTBAABaseTypeInfo(QualType QTy) { + if (!tbaa) { + return nullptr; + } + return tbaa->getBaseTypeInfo(QTy); +} + +mlir::ArrayAttr CIRGenModule::getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo) { + if (!tbaa) { + return nullptr; + } + return tbaa->getAccessTagInfo(tbaaInfo); +} + +TBAAAccessInfo CIRGenModule::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, + TBAAAccessInfo TargetInfo) { + if (!tbaa) + return TBAAAccessInfo(); + return tbaa->mergeTBAAInfoForCast(SourceInfo, TargetInfo); +} + +TBAAAccessInfo +CIRGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, + TBAAAccessInfo InfoB) { + if (!tbaa) + return TBAAAccessInfo(); + return tbaa->mergeTBAAInfoForConditionalOperator(InfoA, InfoB); +} + +TBAAAccessInfo +CIRGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo, + TBAAAccessInfo SrcInfo) { + if (!tbaa) + return TBAAAccessInfo(); + return tbaa->mergeTBAAInfoForConditionalOperator(DestInfo, SrcInfo); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 961a999990b6..61d975491f33 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -39,6 +39,7 @@ #include "llvm/ADT/SmallPtrSet.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" @@ -506,10 +507,52 @@ class CIRGenModule : public CIRGenTypeCache { return VTables.getItaniumVTableContext(); } - /// getTBAAAccessInfo - Gte TBAA information that describes an access to an - /// object of the given type. + /// Get attribute used to describe accesses to objects of + /// the given type. + cir::TBAAAttr getTBAATypeInfo(QualType QTy); + + /// Get TBAA information that describes an access to an object of the given + /// type. TBAAAccessInfo getTBAAAccessInfo(QualType accessType); + /// Get the TBAA information that describes an access to a virtual table + /// pointer. + TBAAAccessInfo getTBAAVTablePtrAccessInfo(mlir::Type VTablePtrType); + + mlir::ArrayAttr getTBAAStructInfo(QualType QTy); + + /// Get metadata that describes the given base access type. Return null if the + /// type is not suitable for use in TBAA access tags. + cir::TBAAAttr getTBAABaseTypeInfo(QualType QTy); + + mlir::ArrayAttr getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo); + + /// Get merged TBAA information for the purposes of type casts. + TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, + TBAAAccessInfo TargetInfo); + + /// Get merged TBAA information for the purposes of conditional operator. + TBAAAccessInfo mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, + TBAAAccessInfo InfoB); + + /// Get merged TBAA information for the purposes of memory transfer calls. + TBAAAccessInfo mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo, + TBAAAccessInfo SrcInfo); + + /// Get TBAA information for an access with a given base lvalue. + TBAAAccessInfo getTBAAInfoForSubobject(LValue Base, QualType AccessType) { + if (Base.getTBAAInfo().isMayAlias()) + return TBAAAccessInfo::getMayAliasInfo(); + return getTBAAAccessInfo(AccessType); + } + + template + void decorateOperationWithTBAA(Op op, TBAAAccessInfo tbaaInfo) { + if (auto tag = getTBAAAccessTagInfo(tbaaInfo)) { + op.setTbaaAttr(tag); + } + } + /// This contains all the decls which have definitions but which are deferred /// for emission and therefore should only be output if they are actually /// used. If a decl is in this, then it is known to have not been referenced diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp index e69de29bb2d1..c3083e93eeb1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp @@ -0,0 +1,64 @@ +#include "CIRGenTBAA.h" +#include "CIRGenCXXABI.h" +#include "CIRGenTypes.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/RecordLayout.h" +#include "llvm/Support/ErrorHandling.h" +namespace clang::CIRGen { + +cir::TBAAAttr tbaa_NYI(mlir::MLIRContext *ctx) { + return cir::TBAAAttr::get(ctx); +} + +CIRGenTBAA::CIRGenTBAA(mlir::MLIRContext *ctx, clang::ASTContext &context, + CIRGenTypes &types, mlir::ModuleOp moduleOp, + const clang::CodeGenOptions &codeGenOpts, + const clang::LangOptions &features) + : ctx(ctx), context(context), types(types), moduleOp(moduleOp), + codeGenOpts(codeGenOpts), features(features) {} + +cir::TBAAAttr CIRGenTBAA::getTypeInfo(clang::QualType qty) { + return tbaa_NYI(ctx); +} + +TBAAAccessInfo CIRGenTBAA::getAccessInfo(clang::QualType accessType) { + return TBAAAccessInfo(); +} + +TBAAAccessInfo CIRGenTBAA::getVTablePtrAccessInfo(mlir::Type vtablePtrType) { + return TBAAAccessInfo(); +} + +mlir::ArrayAttr CIRGenTBAA::getTBAAStructInfo(clang::QualType qty) { + return mlir::ArrayAttr::get(ctx, {}); +} + +cir::TBAAAttr CIRGenTBAA::getBaseTypeInfo(clang::QualType qty) { + return tbaa_NYI(ctx); +} + +mlir::ArrayAttr CIRGenTBAA::getAccessTagInfo(TBAAAccessInfo tbaaInfo) { + return mlir::ArrayAttr::get(ctx, {tbaa_NYI(ctx)}); +} + +TBAAAccessInfo CIRGenTBAA::mergeTBAAInfoForCast(TBAAAccessInfo sourceInfo, + TBAAAccessInfo targetInfo) { + return TBAAAccessInfo(); +} + +TBAAAccessInfo +CIRGenTBAA::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo infoA, + TBAAAccessInfo infoB) { + return TBAAAccessInfo(); +} + +TBAAAccessInfo +CIRGenTBAA::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo destInfo, + TBAAAccessInfo srcInfo) { + return TBAAAccessInfo(); +} + +} // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.h b/clang/lib/CIR/CodeGen/CIRGenTBAA.h index 2b33f0da16d4..b6a392bd164c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.h +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.h @@ -13,16 +13,169 @@ #ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENTBAA_H #define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENTBAA_H - +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "clang/AST/Type.h" +#include "clang/Basic/CodeGenOptions.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" namespace clang::CIRGen { +class CIRGenTypes; +enum class TBAAAccessKind : unsigned { + Ordinary, + MayAlias, + Incomplete, +}; +// Describes a memory access in terms of TBAA. +struct TBAAAccessInfo { + TBAAAccessInfo(TBAAAccessKind kind, cir::TBAAAttr baseType, + cir::TBAAAttr accessType, uint64_t offset, uint64_t size) + : kind(kind), baseType(baseType), accessType(accessType), offset(offset) { + } -// TBAAAccessInfo - Describes a memory access in terms of TBAA. -struct TBAAAccessInfo {}; + TBAAAccessInfo(cir::TBAAAttr baseType, cir::TBAAAttr accessType, + uint64_t offset, uint64_t size) + : kind(TBAAAccessKind::Ordinary), baseType(baseType), + accessType(accessType), offset(offset) {} -/// CIRGenTBAA - This class organizes the cross-module state that is used while -/// lowering AST types to LLVM types. -class CIRGenTBAA {}; + explicit TBAAAccessInfo(cir::TBAAAttr accessType, uint64_t size) + : TBAAAccessInfo(TBAAAccessKind::Ordinary, /* baseType= */ {}, accessType, + /* offset= */ 0, size) {} -} // namespace clang::CIRGen + TBAAAccessInfo() + : TBAAAccessInfo(/* accessType= */ nullptr, /* size= */ 0) {}; + + static TBAAAccessInfo getMayAliasInfo() { + return TBAAAccessInfo(TBAAAccessKind::MayAlias, /* baseType= */ {}, + /* accessType= */ nullptr, + /* offset= */ 0, /* size= */ 0); + } + + bool isMayAlias() const { return kind == TBAAAccessKind::MayAlias; } + + static TBAAAccessInfo getIncompleteInfo() { + return TBAAAccessInfo(TBAAAccessKind::Incomplete, /* baseType= */ {}, + /* accessType= */ {}, + /* offset= */ 0, /* size= */ 0); + } + + bool isIncomplete() const { return kind == TBAAAccessKind::Incomplete; } + + bool operator==(const TBAAAccessInfo &other) const { + return kind == other.kind && baseType == other.baseType && + accessType == other.accessType && offset == other.offset && + size == other.size; + } + + bool operator!=(const TBAAAccessInfo &other) const { + return !(*this == other); + } + + explicit operator bool() const { return *this != TBAAAccessInfo(); } + + /// The kind of the access descriptor. + TBAAAccessKind kind; + + /// The base/leading access type. May be null if this access + /// descriptor represents an access that is not considered to be an access + /// to an aggregate or union member. + cir::TBAAAttr baseType; + + /// The final access type. May be null if there is no TBAA + /// information available about this access. + cir::TBAAAttr accessType; + + /// The byte offset of the final access within the base one. Must be + /// zero if the base access type is not specified. + uint64_t offset; + /// The size of access, in bytes. + uint64_t size; +}; + +/// This class organizes the cross-module state that is used while lowering AST +/// types to LLVM types. +class CIRGenTBAA { + mlir::MLIRContext *ctx; + clang::ASTContext &context; + CIRGenTypes &types; + mlir::ModuleOp moduleOp; + const clang::CodeGenOptions &codeGenOpts; + const clang::LangOptions &features; + +public: + CIRGenTBAA(mlir::MLIRContext *ctx, clang::ASTContext &context, + CIRGenTypes &types, mlir::ModuleOp moduleOp, + const clang::CodeGenOptions &codeGenOpts, + const clang::LangOptions &features); + + /// Get attribute used to describe accesses to objects of the given type. + cir::TBAAAttr getTypeInfo(clang::QualType qty); + + /// Get TBAA information that describes an access to an object of the given + /// type. + TBAAAccessInfo getAccessInfo(clang::QualType accessType); + + /// Get the TBAA information that describes an access to a virtual table + /// pointer. + TBAAAccessInfo getVTablePtrAccessInfo(mlir::Type vtablePtrType); + + /// Get the TBAAStruct attributes to be used for a memcpy of the given type. + mlir::ArrayAttr getTBAAStructInfo(clang::QualType qty); + + /// Get attribute that describes the given base access type. Return null if + /// the type is not suitable for use in TBAA access tags. + cir::TBAAAttr getBaseTypeInfo(clang::QualType qty); + + /// Get TBAA tag for a given memory access. + mlir::ArrayAttr getAccessTagInfo(TBAAAccessInfo tbaaInfo); + + /// Get merged TBAA information for the purpose of type casts. + TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo sourceInfo, + TBAAAccessInfo targetInfo); + + /// Get merged TBAA information for the purpose of conditional operator. + TBAAAccessInfo mergeTBAAInfoForConditionalOperator(TBAAAccessInfo infoA, + TBAAAccessInfo infoB); + + /// Get merged TBAA information for the purpose of memory transfer calls. + TBAAAccessInfo mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo destInfo, + TBAAAccessInfo srcInfo); +}; +} // namespace clang::CIRGen +namespace llvm { +template <> struct DenseMapInfo { + static clang::CIRGen::TBAAAccessInfo getEmptyKey() { + unsigned unsignedKey = DenseMapInfo::getEmptyKey(); + return clang::CIRGen::TBAAAccessInfo( + static_cast(unsignedKey), + DenseMapInfo::getEmptyKey(), + DenseMapInfo::getEmptyKey(), + DenseMapInfo::getEmptyKey(), + DenseMapInfo::getEmptyKey()); + } + static clang::CIRGen::TBAAAccessInfo getTombstoneKey() { + unsigned unsignedKey = DenseMapInfo::getTombstoneKey(); + return clang::CIRGen::TBAAAccessInfo( + static_cast(unsignedKey), + DenseMapInfo::getTombstoneKey(), + DenseMapInfo::getTombstoneKey(), + DenseMapInfo::getTombstoneKey(), + DenseMapInfo::getTombstoneKey()); + } + static unsigned getHashValue(const clang::CIRGen::TBAAAccessInfo &val) { + auto kindValue = static_cast(val.kind); + return DenseMapInfo::getHashValue(kindValue) ^ + DenseMapInfo::getHashValue(val.baseType) ^ + DenseMapInfo::getHashValue(val.accessType) ^ + DenseMapInfo::getHashValue(val.offset) ^ + DenseMapInfo::getHashValue(val.size); + } + static bool isEqual(const clang::CIRGen::TBAAAccessInfo &lhs, + const clang::CIRGen::TBAAAccessInfo &rhs) { + return lhs == rhs; + } +}; +} // namespace llvm #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index fc2f650eaed6..8dd16f6ce9e1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -268,26 +268,14 @@ class LValue { LValueBaseInfo getBaseInfo() const { return BaseInfo; } void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } - static LValue makeAddr(Address address, clang::QualType T, - AlignmentSource Source = AlignmentSource::Type) { - LValue R; - R.LVType = Simple; - R.V = address.getPointer(); - R.ElementType = address.getElementType(); - R.Initialize(T, T.getQualifiers(), address.getAlignment(), - LValueBaseInfo(Source), TBAAAccessInfo()); - return R; - } - // FIXME: only have one of these static methods. - static LValue makeAddr(Address address, clang::QualType T, - LValueBaseInfo LBI) { + static LValue makeAddr(Address address, clang::QualType T, LValueBaseInfo LBI, + TBAAAccessInfo tbaaInfo) { LValue R; R.LVType = Simple; R.V = address.getPointer(); R.ElementType = address.getElementType(); - R.Initialize(T, T.getQualifiers(), address.getAlignment(), LBI, - TBAAAccessInfo()); + R.Initialize(T, T.getQualifiers(), address.getAlignment(), LBI, tbaaInfo); return R; } @@ -307,6 +295,7 @@ class LValue { } TBAAAccessInfo getTBAAInfo() const { return tbaaInfo; } + void setTBAAInfo(TBAAAccessInfo info) { tbaaInfo = info; } const clang::Qualifiers &getQuals() const { return Quals; } clang::Qualifiers &getQuals() { return Quals; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index c740cb23e626..542bfaf105c9 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -106,6 +106,10 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << dynCastInfoAttr.getAlias(); return AliasResult::FinalAlias; } + if (auto tbaaAttr = mlir::dyn_cast(attr)) { + os << tbaaAttr.getMnemonic(); + return AliasResult::OverridableAlias; + } return AliasResult::NoAlias; } diff --git a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp index e75b30b1c1c3..80963353a304 100644 --- a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp @@ -11,6 +11,7 @@ // //===----------------------------------------------------------------------===// +#include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Matchers.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" @@ -52,8 +53,7 @@ void cir::AllocaOp::handleBlockArgument(const MemorySlot &slot, std::optional cir::AllocaOp::handlePromotionComplete(const MemorySlot &slot, - Value defaultValue, - OpBuilder &builder) { + Value defaultValue, OpBuilder &builder) { if (defaultValue && defaultValue.use_empty()) defaultValue.getDefiningOp()->erase(); this->erase(); @@ -150,7 +150,8 @@ DeletionKind cir::CopyOp::removeBlockingUses( const DataLayout &dataLayout) { if (loadsFrom(slot)) builder.create(getLoc(), reachingDefinition, getDst(), false, - mlir::IntegerAttr{}, cir::MemOrderAttr()); + mlir::IntegerAttr{}, cir::MemOrderAttr(), + mlir::ArrayAttr{}); return DeletionKind::Delete; } diff --git a/clang/test/CIR/CodeGen/tbaa.c b/clang/test/CIR/CodeGen/tbaa.c new file mode 100644 index 000000000000..43cdde47ecb7 --- /dev/null +++ b/clang/test/CIR/CodeGen/tbaa.c @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// CIR: #tbaa[[TBAA_NO:.*]] = #cir.tbaa +void f(int *a, float *b) { + // CIR: cir.scope + // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) + // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s32i tbaa([#tbaa[[TBAA_NO]]]) + // CIR: cir.if + // CIR: %[[C2:.*]] = cir.const #cir.fp<2 + // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) + // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.float, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) + // CIR: else + // CIR: %[[C3:.*]] = cir.const #cir.fp<3 + // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) + // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.float, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) + if (*a == 1) { + *b = 2.0f; + } else { + *b = 3.0f; + } +} From 4eb60b48b88536a9457237eb97bd32262cc8260f Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Tue, 19 Nov 2024 20:58:00 +0300 Subject: [PATCH 2097/2301] [CIR][ABI][AArch64][Lowering] Support structures with padding (#1118) The title describes the purpose of the PR. It adds initial support for structures with padding to the call convention lowering for AArch64. I have also _initial support_ for the missing feature [FinishLayout](https://github.com/llvm/clangir/blob/5c5d58402bebdb1e851fb055f746662d4e7eb586/clang/lib/AST/RecordLayoutBuilder.cpp#L786) for records, and the logic is gotten from the original codegen. Finally, I added a test for verification. --- clang/include/clang/CIR/MissingFeatures.h | 3 -- .../TargetLowering/LowerFunction.cpp | 28 +++++++++++- .../TargetLowering/RecordLayoutBuilder.cpp | 43 +++++++++++++++---- .../AArch64/aarch64-cc-structs.c | 31 ++++++++++++- 4 files changed, 92 insertions(+), 13 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index f6e7b34ad197..8f56f0726f8a 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -404,9 +404,6 @@ struct MissingFeatures { // specs. We should make it always present. static bool makeTripleAlwaysPresent() { return false; } - // This Itanium bit is currently being skipped in cir. - static bool itaniumRecordLayoutBuilderFinishLayout() { return false; } - static bool mustProgress() { return false; } static bool skipTempCopy() { return false; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 483ce026ee0e..06242e52383a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -347,6 +347,31 @@ mlir::Value emitAddressAtOffset(LowerFunction &LF, mlir::Value addr, return addr; } +/// Creates a coerced value from \param src having a type of \param ty which is +/// a non primitive type +mlir::Value createCoercedNonPrimitive(mlir::Value src, mlir::Type ty, + LowerFunction &LF) { + if (auto load = mlir::dyn_cast(src.getDefiningOp())) { + auto &bld = LF.getRewriter(); + auto addr = load.getAddr(); + + auto oldAlloca = mlir::dyn_cast(addr.getDefiningOp()); + auto alloca = bld.create( + src.getLoc(), bld.getType(ty), ty, + /*name=*/llvm::StringRef(""), oldAlloca.getAlignmentAttr()); + + auto tySize = LF.LM.getDataLayout().getTypeStoreSize(ty); + createMemCpy(LF, alloca, addr, tySize.getFixedValue()); + + auto newLoad = bld.create(src.getLoc(), alloca.getResult()); + bld.replaceAllOpUsesWith(load, newLoad); + + return newLoad; + } + + cir_cconv_unreachable("NYI"); +} + /// After the calling convention is lowered, an ABI-agnostic type might have to /// be loaded back to its ABI-aware couterpart so it may be returned. If they /// differ, we have to do a coerced load. A coerced load, which means to load a @@ -370,7 +395,8 @@ mlir::Value castReturnValue(mlir::Value Src, mlir::Type Ty, LowerFunction &LF) { auto intTy = mlir::dyn_cast(Ty); if (intTy && !intTy.isPrimitive()) - cir_cconv_unreachable("non-primitive types NYI"); + return createCoercedNonPrimitive(Src, Ty, LF); + llvm::TypeSize DstSize = LF.LM.getDataLayout().getTypeAllocSize(Ty); // FIXME(cir): Do we need the EnterStructPointerForCoercedAccess routine here? diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index 627f3b048817..db2af4ac9177 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -223,6 +223,9 @@ class ItaniumRecordLayoutBuilder { /// Initialize record layout for the given record decl. void initializeLayout(const Type Ty); + /// Finalize record layout. Adjust record size based on the alignment. + void finishLayout(const StructType D); + uint64_t getDataSizeInBits() const { return DataSize; } void setDataSize(clang::CharUnits NewSize) { @@ -243,8 +246,7 @@ void ItaniumRecordLayoutBuilder::layout(const StructType RT) { // FIXME(cir): Handle virtual-related layouts. cir_cconv_assert(!cir::MissingFeatures::getCXXRecordBases()); - cir_cconv_assert( - !cir::MissingFeatures::itaniumRecordLayoutBuilderFinishLayout()); + finishLayout(RT); } void ItaniumRecordLayoutBuilder::initializeLayout(const mlir::Type Ty) { @@ -478,6 +480,31 @@ void ItaniumRecordLayoutBuilder::layoutFields(const StructType D) { } } +void ItaniumRecordLayoutBuilder::finishLayout(const StructType D) { + // If we have any remaining field tail padding, include that in the overall + // size. + setSize(std::max(getSizeInBits(), (uint64_t)Context.toBits(PaddedFieldSize))); + + // Finally, round the size of the record up to the alignment of the + // record itself. + uint64_t unpaddedSize = getSizeInBits() - UnfilledBitsInLastUnit; + uint64_t unpackedSizeInBits = + llvm::alignTo(getSizeInBits(), Context.toBits(UnpackedAlignment)); + + uint64_t roundedSize = llvm::alignTo( + getSizeInBits(), + Context.toBits(!Context.getTargetInfo().defaultsToAIXPowerAlignment() + ? Alignment + : PreferredAlignment)); + + if (UseExternalLayout) { + cir_cconv_unreachable("NYI"); + } + + // Set the size to the final size. + setSize(roundedSize); +} + void ItaniumRecordLayoutBuilder::UpdateAlignment( clang::CharUnits NewAlignment, clang::CharUnits UnpackedNewAlignment, clang::CharUnits PreferredNewAlignment) { @@ -521,13 +548,13 @@ void ItaniumRecordLayoutBuilder::checkFieldPadding( // Warn if padding was introduced to the struct/class. if (!IsUnion && Offset > UnpaddedOffset) { - unsigned PadSize = Offset - UnpaddedOffset; - // bool InBits = true; - if (PadSize % CharBitNum == 0) { - PadSize = PadSize / CharBitNum; - // InBits = false; + unsigned padSize = Offset - UnpaddedOffset; + bool inBits = true; + if (padSize % CharBitNum == 0) { + padSize = padSize / CharBitNum; + inBits = false; } - cir_cconv_assert(cir::MissingFeatures::bitFieldPaddingDiagnostics()); + cir_cconv_assert(!cir::MissingFeatures::bitFieldPaddingDiagnostics()); } if (isPacked && Offset != UnpackedOffset) { HasPackedField = true; diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index 93f87db39cfb..969d40842b75 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -204,4 +204,33 @@ GT_128 call_and_get_gt_128() { // LLVM: %[[#V2:]] = alloca [2 x i64], i64 1, align 8 // LLVM: store [2 x i64] %[[#ARG]], ptr %[[#V2]], align 8 // LLVM: call void @llvm.memcpy.p0.p0.i64(ptr %[[#V1]], ptr %[[#V2]], i64 12, i1 false) -void passS(S s) {} \ No newline at end of file +void passS(S s) {} + +typedef struct { + uint8_t a; + uint16_t b; + uint8_t c; +} S_PAD; + +// CHECK: cir.func {{.*@ret_s_pad}}() -> !u48i +// CHECK: %[[#V0:]] = cir.alloca !ty_S_PAD, !cir.ptr, ["__retval"] {alignment = 2 : i64} +// CHECK: %[[#V1:]] = cir.load %[[#V0]] : !cir.ptr, !ty_S_PAD +// CHECK: %[[#V2:]] = cir.alloca !u48i, !cir.ptr, [""] {alignment = 2 : i64} +// CHECK: %[[#V3:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr) +// CHECK: %[[#V4:]] = cir.cast(bitcast, %[[#V2:]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V5:]] = cir.const #cir.int<6> : !u64i +// CHECK: cir.libc.memcpy %[[#V5]] bytes from %[[#V3]] to %[[#V4]] : !u64i, !cir.ptr +// CHECK: %[[#V6:]] = cir.load %[[#V2]] : !cir.ptr +// CHECK: cir.return %[[#V6]] + +// LLVM: i48 @ret_s_pad() +// LLVM: %[[#V1:]] = alloca %struct.S_PAD, i64 1, align 2 +// LLVM: %[[#V2:]] = load %struct.S_PAD, ptr %[[#V1]], align 2 +// LLVM: %[[#V3:]] = alloca i48, i64 1, align 2 +// LLVM: call void @llvm.memcpy.p0.p0.i64(ptr %[[#V3]], ptr %[[#V1]], i64 6, i1 false) +// LLVM: %[[#V4:]] = load i48, ptr %[[#V3]] +// LLVM: ret i48 %[[#V4]] +S_PAD ret_s_pad() { + S_PAD s; + return s; +} From 479a4abb80f703b0ff7cbf33d91d451c58ec007e Mon Sep 17 00:00:00 2001 From: Congcong Cai Date: Wed, 20 Nov 2024 12:49:38 +0800 Subject: [PATCH 2098/2301] [CIR] fix deref nullptr when verify symbol for `cir.get_global` (#1143) --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 ++-- clang/test/CIR/IR/invalid.cir | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 542bfaf105c9..287749fb85b3 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2023,8 +2023,8 @@ LogicalResult cir::GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // Verify that the result type underlying pointer type matches the type of // the referenced cir.global or cir.func op. - auto op = symbolTable.lookupNearestSymbolFrom(*this, getNameAttr()); - if (!(isa(op) || isa(op))) + auto *op = symbolTable.lookupNearestSymbolFrom(*this, getNameAttr()); + if (op == nullptr || !(isa(op) || isa(op))) return emitOpError("'") << getName() << "' does not reference a valid cir.global or cir.func"; diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index af516b2aaed6..01828fbe22b4 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1103,6 +1103,15 @@ module { // ----- +!s8i = !cir.int +cir.func @no_reference_global() { + // expected-error @below {{'cir.get_global' op 'str' does not reference a valid cir.global or cir.func}} + %0 = cir.get_global @str : !cir.ptr + cir.return +} + +// ----- + // expected-error@+1 {{invalid underlying type for long double}} cir.func @bad_long_double(%arg0 : !cir.long_double) -> () { cir.return From 564a58865a1de36cf585c51ef7314748cfe409bc Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 22 Nov 2024 14:05:22 +0800 Subject: [PATCH 2099/2301] [CIR][NFC] Refactor code for populating LLVM conversion patterns (#1152) The function `populateCIRToLLVMConversionPatterns` contains a spaghetti of LLVM dialect conversion patterns, which results in merge conflicts very easily. Besides, a few patterns are even registered for more than once, possibly due to careless resolution of merge conflicts. This PR attempts to mitigate this problem. Pattern names now are sorted in alphabetical order, and each source code line now only lists exactly one pattern name to reduce potential merge conflicts. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 135 ++++++++++++------ 1 file changed, 89 insertions(+), 46 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index c2a714a43ec2..bea705567854 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3859,55 +3859,98 @@ void populateCIRToLLVMConversionPatterns( stringGlobalsMap, argStringGlobalsMap, argsVarMap, patterns.getContext()); patterns.add< - CIRToLLVMCmpOpLowering, CIRToLLVMSelectOpLowering, - CIRToLLVMBitClrsbOpLowering, CIRToLLVMBitClzOpLowering, - CIRToLLVMBitCtzOpLowering, CIRToLLVMBitFfsOpLowering, - CIRToLLVMBitParityOpLowering, CIRToLLVMBitPopcountOpLowering, - CIRToLLVMAtomicCmpXchgLowering, CIRToLLVMAtomicXchgLowering, - CIRToLLVMAtomicFetchLowering, CIRToLLVMByteswapOpLowering, - CIRToLLVMRotateOpLowering, CIRToLLVMBrCondOpLowering, - CIRToLLVMPtrStrideOpLowering, CIRToLLVMCallOpLowering, - CIRToLLVMTryCallOpLowering, CIRToLLVMEhInflightOpLowering, - CIRToLLVMUnaryOpLowering, CIRToLLVMBinOpLowering, - CIRToLLVMBinOpOverflowOpLowering, CIRToLLVMShiftOpLowering, - CIRToLLVMLoadOpLowering, CIRToLLVMConstantOpLowering, - CIRToLLVMStoreOpLowering, CIRToLLVMFuncOpLowering, - CIRToLLVMCastOpLowering, CIRToLLVMGlobalOpLowering, - CIRToLLVMGetGlobalOpLowering, CIRToLLVMComplexCreateOpLowering, - CIRToLLVMComplexRealOpLowering, CIRToLLVMComplexImagOpLowering, - CIRToLLVMComplexRealPtrOpLowering, CIRToLLVMComplexImagPtrOpLowering, - CIRToLLVMVAStartOpLowering, CIRToLLVMVAEndOpLowering, - CIRToLLVMVACopyOpLowering, CIRToLLVMVAArgOpLowering, - CIRToLLVMBrOpLowering, CIRToLLVMGetMemberOpLowering, - CIRToLLVMGetRuntimeMemberOpLowering, CIRToLLVMSwitchFlatOpLowering, - CIRToLLVMPtrDiffOpLowering, CIRToLLVMCopyOpLowering, - CIRToLLVMMemCpyOpLowering, CIRToLLVMMemChrOpLowering, - CIRToLLVMAbsOpLowering, CIRToLLVMExpectOpLowering, - CIRToLLVMVTableAddrPointOpLowering, CIRToLLVMVecCreateOpLowering, - CIRToLLVMVecCmpOpLowering, CIRToLLVMVecSplatOpLowering, - CIRToLLVMVecTernaryOpLowering, CIRToLLVMVecShuffleDynamicOpLowering, - CIRToLLVMVecShuffleOpLowering, CIRToLLVMStackSaveOpLowering, - CIRToLLVMUnreachableOpLowering, CIRToLLVMTrapOpLowering, - CIRToLLVMInlineAsmOpLowering, CIRToLLVMSetBitfieldOpLowering, - CIRToLLVMGetBitfieldOpLowering, CIRToLLVMPrefetchOpLowering, - CIRToLLVMObjSizeOpLowering, CIRToLLVMIsConstantOpLowering, - CIRToLLVMCmpThreeWayOpLowering, CIRToLLVMMemCpyOpLowering, - CIRToLLVMIsConstantOpLowering, CIRToLLVMCmpThreeWayOpLowering, - CIRToLLVMReturnAddrOpLowering, CIRToLLVMClearCacheOpLowering, - CIRToLLVMEhTypeIdOpLowering, CIRToLLVMCatchParamOpLowering, - CIRToLLVMResumeOpLowering, CIRToLLVMAllocExceptionOpLowering, - CIRToLLVMFreeExceptionOpLowering, CIRToLLVMThrowOpLowering, - CIRToLLVMLLVMIntrinsicCallOpLowering, CIRToLLVMAssumeOpLowering, - CIRToLLVMAssumeAlignedOpLowering, CIRToLLVMAssumeSepStorageOpLowering, - CIRToLLVMBaseClassAddrOpLowering, CIRToLLVMDerivedClassAddrOpLowering, - CIRToLLVMVTTAddrPointOpLowering, CIRToLLVMIsFPClassOpLowering, - CIRToLLVMAbsOpLowering, CIRToLLVMMemMoveOpLowering, - CIRToLLVMMemSetOpLowering, CIRToLLVMMemSetInlineOpLowering, - CIRToLLVMMemCpyInlineOpLowering, CIRToLLVMSignBitOpLowering, - CIRToLLVMPtrMaskOpLowering + // clang-format off + CIRToLLVMAbsOpLowering, + CIRToLLVMAllocExceptionOpLowering, + CIRToLLVMAssumeAlignedOpLowering, + CIRToLLVMAssumeOpLowering, + CIRToLLVMAssumeSepStorageOpLowering, + CIRToLLVMAtomicCmpXchgLowering, + CIRToLLVMAtomicFetchLowering, + CIRToLLVMAtomicXchgLowering, + CIRToLLVMBaseClassAddrOpLowering, + CIRToLLVMBinOpLowering, + CIRToLLVMBinOpOverflowOpLowering, + CIRToLLVMBitClrsbOpLowering, + CIRToLLVMBitClzOpLowering, + CIRToLLVMBitCtzOpLowering, + CIRToLLVMBitFfsOpLowering, + CIRToLLVMBitParityOpLowering, + CIRToLLVMBitPopcountOpLowering, + CIRToLLVMBrCondOpLowering, + CIRToLLVMBrOpLowering, + CIRToLLVMByteswapOpLowering, + CIRToLLVMCallOpLowering, + CIRToLLVMCastOpLowering, + CIRToLLVMCatchParamOpLowering, + CIRToLLVMClearCacheOpLowering, + CIRToLLVMCmpOpLowering, + CIRToLLVMCmpThreeWayOpLowering, + CIRToLLVMComplexCreateOpLowering, + CIRToLLVMComplexImagOpLowering, + CIRToLLVMComplexImagPtrOpLowering, + CIRToLLVMComplexRealOpLowering, + CIRToLLVMComplexRealPtrOpLowering, + CIRToLLVMConstantOpLowering, + CIRToLLVMCopyOpLowering, + CIRToLLVMDerivedClassAddrOpLowering, + CIRToLLVMEhInflightOpLowering, + CIRToLLVMEhTypeIdOpLowering, + CIRToLLVMExpectOpLowering, + CIRToLLVMFreeExceptionOpLowering, + CIRToLLVMFuncOpLowering, + CIRToLLVMGetBitfieldOpLowering, + CIRToLLVMGetGlobalOpLowering, + CIRToLLVMGetMemberOpLowering, + CIRToLLVMGetRuntimeMemberOpLowering, + CIRToLLVMGlobalOpLowering, + CIRToLLVMInlineAsmOpLowering, + CIRToLLVMIsConstantOpLowering, + CIRToLLVMIsFPClassOpLowering, + CIRToLLVMLLVMIntrinsicCallOpLowering, + CIRToLLVMLoadOpLowering, + CIRToLLVMMemChrOpLowering, + CIRToLLVMMemCpyInlineOpLowering, + CIRToLLVMMemCpyOpLowering, + CIRToLLVMMemMoveOpLowering, + CIRToLLVMMemSetInlineOpLowering, + CIRToLLVMMemSetOpLowering, + CIRToLLVMObjSizeOpLowering, + CIRToLLVMPrefetchOpLowering, + CIRToLLVMPtrDiffOpLowering, + CIRToLLVMPtrMaskOpLowering, + CIRToLLVMPtrStrideOpLowering, + CIRToLLVMResumeOpLowering, + CIRToLLVMReturnAddrOpLowering, + CIRToLLVMRotateOpLowering, + CIRToLLVMSelectOpLowering, + CIRToLLVMSetBitfieldOpLowering, + CIRToLLVMShiftOpLowering, + CIRToLLVMSignBitOpLowering, + CIRToLLVMStackSaveOpLowering, + CIRToLLVMStoreOpLowering, + CIRToLLVMSwitchFlatOpLowering, + CIRToLLVMThrowOpLowering, + CIRToLLVMTrapOpLowering, + CIRToLLVMTryCallOpLowering, + CIRToLLVMUnaryOpLowering, + CIRToLLVMUnreachableOpLowering, + CIRToLLVMVAArgOpLowering, + CIRToLLVMVACopyOpLowering, + CIRToLLVMVAEndOpLowering, + CIRToLLVMVAStartOpLowering, + CIRToLLVMVecCmpOpLowering, + CIRToLLVMVecCreateOpLowering, + CIRToLLVMVecShuffleDynamicOpLowering, + CIRToLLVMVecShuffleOpLowering, + CIRToLLVMVecSplatOpLowering, + CIRToLLVMVecTernaryOpLowering, + CIRToLLVMVTableAddrPointOpLowering, + CIRToLLVMVTTAddrPointOpLowering #define GET_BUILTIN_LOWERING_LIST #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_LIST + // clang-format on >(converter, patterns.getContext()); } From 1e3a8e32215a56b3eeedb85be7ce6c888f61d41a Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 22 Nov 2024 19:31:16 -0500 Subject: [PATCH 2100/2301] [CIR][Dialect] Extend UnaryFPToFPBuiltinOp to vector of FP type (#1132) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 10 +++ .../CIR/Lowering/builtin-floating-point.cir | 86 ++++++++++++++++++- 3 files changed, 97 insertions(+), 3 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 897098f4c25c..702e62a98435 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4342,8 +4342,8 @@ def LLrintOp : UnaryFPToIntBuiltinOp<"llrint", "LlrintOp">; class UnaryFPToFPBuiltinOp : CIR_Op { - let arguments = (ins CIR_AnyFloat:$src); - let results = (outs CIR_AnyFloat:$result); + let arguments = (ins CIR_AnyFloatOrVecOfFloat:$src); + let results = (outs CIR_AnyFloatOrVecOfFloat:$result); let summary = "libc builtin equivalent ignoring " "floating point exceptions and errno"; let assemblyFormat = "$src `:` type($src) attr-dict"; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index f73d80402047..c805b6887cf3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -553,10 +553,20 @@ def SignedIntegerVector : Type< ]>, "!cir.vector of !cir.int"> { } +// Vector of Float type +def FPVector : Type< + And<[ + CPred<"::mlir::isa<::cir::VectorType>($_self)">, + CPred<"::mlir::isa<::cir::SingleType, ::cir::DoubleType>(" + "::mlir::cast<::cir::VectorType>($_self).getEltType())">, + ]>, "!cir.vector of !cir.fp"> { +} + // Constraints def CIR_AnyIntOrVecOfInt: AnyTypeOf<[CIR_IntType, IntegerVector]>; def CIR_AnySignedIntOrVecOfSignedInt: AnyTypeOf< [PrimitiveSInt, SignedIntegerVector]>; +def CIR_AnyFloatOrVecOfFloat: AnyTypeOf<[CIR_AnyFloat, FPVector]>; // Pointer to Arrays def ArrayPtr : Type< diff --git a/clang/test/CIR/Lowering/builtin-floating-point.cir b/clang/test/CIR/Lowering/builtin-floating-point.cir index 82b733233da3..157b3abe10f5 100644 --- a/clang/test/CIR/Lowering/builtin-floating-point.cir +++ b/clang/test/CIR/Lowering/builtin-floating-point.cir @@ -2,49 +2,133 @@ // RUN: FileCheck --input-file=%t.ll %s module { - cir.func @test(%arg0 : !cir.float) { + cir.func @test(%arg0 : !cir.float, %arg1 : !cir.vector, %arg2 : !cir.vector) { %1 = cir.cos %arg0 : !cir.float // CHECK: llvm.intr.cos(%arg0) : (f32) -> f32 + + %101 = cir.cos %arg1 : !cir.vector + // CHECK: llvm.intr.cos(%arg1) : (vector<2xf64>) -> vector<2xf64> + %201 = cir.cos %arg2 : !cir.vector + // CHECK: llvm.intr.cos(%arg2) : (vector<4xf32>) -> vector<4xf32> + %2 = cir.ceil %arg0 : !cir.float // CHECK: llvm.intr.ceil(%arg0) : (f32) -> f32 + %102 = cir.ceil %arg1 : !cir.vector + // CHECK: llvm.intr.ceil(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %202 = cir.ceil %arg2 : !cir.vector + // CHECK: llvm.intr.ceil(%arg2) : (vector<4xf32>) -> vector<4xf32> + %3 = cir.exp %arg0 : !cir.float // CHECK: llvm.intr.exp(%arg0) : (f32) -> f32 + %103 = cir.exp %arg1 : !cir.vector + // CHECK: llvm.intr.exp(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %203 = cir.exp %arg2 : !cir.vector + // CHECK: llvm.intr.exp(%arg2) : (vector<4xf32>) -> vector<4xf32> + %4 = cir.exp2 %arg0 : !cir.float // CHECK: llvm.intr.exp2(%arg0) : (f32) -> f32 + %104 = cir.exp2 %arg1 : !cir.vector + // CHECK: llvm.intr.exp2(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %204 = cir.exp2 %arg2 : !cir.vector + // CHECK: llvm.intr.exp2(%arg2) : (vector<4xf32>) -> vector<4xf32> + %5 = cir.fabs %arg0 : !cir.float // CHECK: llvm.intr.fabs(%arg0) : (f32) -> f32 + %105 = cir.fabs %arg1 : !cir.vector + // CHECK: llvm.intr.fabs(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %205 = cir.fabs %arg2 : !cir.vector + // CHECK: llvm.intr.fabs(%arg2) : (vector<4xf32>) -> vector<4xf32> + %6 = cir.floor %arg0 : !cir.float // CHECK: llvm.intr.floor(%arg0) : (f32) -> f32 + %106 = cir.floor %arg1 : !cir.vector + // CHECK: llvm.intr.floor(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %206 = cir.floor %arg2 : !cir.vector + // CHECK: llvm.intr.floor(%arg2) : (vector<4xf32>) -> vector<4xf32> + %7 = cir.log %arg0 : !cir.float // CHECK: llvm.intr.log(%arg0) : (f32) -> f32 + %107 = cir.log %arg1 : !cir.vector + // CHECK: llvm.intr.log(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %207 = cir.log %arg2 : !cir.vector + // CHECK: llvm.intr.log(%arg2) : (vector<4xf32>) -> vector<4xf32> + %8 = cir.log10 %arg0 : !cir.float // CHECK: llvm.intr.log10(%arg0) : (f32) -> f32 + %108 = cir.log10 %arg1 : !cir.vector + // CHECK: llvm.intr.log10(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %208 = cir.log10 %arg2 : !cir.vector + // CHECK: llvm.intr.log10(%arg2) : (vector<4xf32>) -> vector<4xf32> + %9 = cir.log2 %arg0 : !cir.float // CHECK: llvm.intr.log2(%arg0) : (f32) -> f32 + %109 = cir.log2 %arg1 : !cir.vector + // CHECK: llvm.intr.log2(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %209 = cir.log2 %arg2 : !cir.vector + // CHECK: llvm.intr.log2(%arg2) : (vector<4xf32>) -> vector<4xf32> + %10 = cir.nearbyint %arg0 : !cir.float // CHECK: llvm.intr.nearbyint(%arg0) : (f32) -> f32 + %110 = cir.nearbyint %arg1 : !cir.vector + // CHECK: llvm.intr.nearbyint(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %210 = cir.nearbyint %arg2 : !cir.vector + // CHECK: llvm.intr.nearbyint(%arg2) : (vector<4xf32>) -> vector<4xf32> + %11 = cir.rint %arg0 : !cir.float // CHECK: llvm.intr.rint(%arg0) : (f32) -> f32 + %111 = cir.rint %arg1 : !cir.vector + // CHECK: llvm.intr.rint(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %211 = cir.rint %arg2 : !cir.vector + // CHECK: llvm.intr.rint(%arg2) : (vector<4xf32>) -> vector<4xf32> + %12 = cir.round %arg0 : !cir.float // CHECK: llvm.intr.round(%arg0) : (f32) -> f32 + %112 = cir.round %arg1 : !cir.vector + // CHECK: llvm.intr.round(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %212 = cir.round %arg2 : !cir.vector + // CHECK: llvm.intr.round(%arg2) : (vector<4xf32>) -> vector<4xf32> + %13 = cir.sin %arg0 : !cir.float // CHECK: llvm.intr.sin(%arg0) : (f32) -> f32 + %113 = cir.sin %arg1 : !cir.vector + // CHECK: llvm.intr.sin(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %213 = cir.sin %arg2 : !cir.vector + // CHECK: llvm.intr.sin(%arg2) : (vector<4xf32>) -> vector<4xf32> + %14 = cir.sqrt %arg0 : !cir.float // CHECK: llvm.intr.sqrt(%arg0) : (f32) -> f32 + %114 = cir.sqrt %arg1 : !cir.vector + // CHECK: llvm.intr.sqrt(%arg1) : (vector<2xf64>) -> vector<2xf64> + + %214 = cir.sqrt %arg2 : !cir.vector + // CHECK: llvm.intr.sqrt(%arg2) : (vector<4xf32>) -> vector<4xf32> + cir.return } } From e3d9fc3efb75ab2d2f6e365fffbd25ad2970e8e2 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 22 Nov 2024 19:31:41 -0500 Subject: [PATCH 2101/2301] [CIR][Builtin][Neon][NFC] Introduce skeleton of emitCommonNeonSISDBuiltinExpr (#1133) This PR is a NFC as we just NYI every builtID of neon SISD. We will implement them in subsequent PRs. --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 508 +++++++++++++++++- 1 file changed, 498 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index fcb5119b7fff..b9042573d2a3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2592,6 +2592,489 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( return emitCommonNeonCallPattern0(*this, intrincsName, argTypes, ops, vTy, e); } +static mlir::Value emitCommonNeonSISDBuiltinExpr( + CIRGenFunction &cgf, const ARMVectorIntrinsicInfo &info, + llvm::SmallVectorImpl &args, const CallExpr *expr) { + unsigned builtinID = info.BuiltinID; + switch (builtinID) { + default: + llvm::errs() << getAArch64SIMDIntrinsicString(builtinID) << " "; + llvm_unreachable("in emitCommonNeonSISDBuiltinExpr NYI"); + case NEON::BI__builtin_neon_vabdd_f64: + llvm_unreachable(" neon_vabdd_f64 NYI "); + case NEON::BI__builtin_neon_vabds_f32: + llvm_unreachable(" neon_vabds_f32 NYI "); + case NEON::BI__builtin_neon_vabsd_s64: + llvm_unreachable(" neon_vabsd_s64 NYI "); + case NEON::BI__builtin_neon_vaddlv_s32: + llvm_unreachable(" neon_vaddlv_s32 NYI "); + case NEON::BI__builtin_neon_vaddlv_u32: + llvm_unreachable(" neon_vaddlv_u32 NYI "); + case NEON::BI__builtin_neon_vaddlvq_s32: + llvm_unreachable(" neon_vaddlvq_s32 NYI "); + case NEON::BI__builtin_neon_vaddlvq_u32: + llvm_unreachable(" neon_vaddlvq_u32 NYI "); + case NEON::BI__builtin_neon_vaddv_f32: + llvm_unreachable(" neon_vaddv_f32 NYI "); + case NEON::BI__builtin_neon_vaddv_s32: + llvm_unreachable(" neon_vaddv_s32 NYI "); + case NEON::BI__builtin_neon_vaddv_u32: + llvm_unreachable(" neon_vaddv_u32 NYI "); + case NEON::BI__builtin_neon_vaddvq_f32: + llvm_unreachable(" neon_vaddvq_f32 NYI "); + case NEON::BI__builtin_neon_vaddvq_f64: + llvm_unreachable(" neon_vaddvq_f64 NYI "); + case NEON::BI__builtin_neon_vaddvq_s32: + llvm_unreachable(" neon_vaddvq_s32 NYI "); + case NEON::BI__builtin_neon_vaddvq_s64: + llvm_unreachable(" neon_vaddvq_s64 NYI "); + case NEON::BI__builtin_neon_vaddvq_u32: + llvm_unreachable(" neon_vaddvq_u32 NYI "); + case NEON::BI__builtin_neon_vaddvq_u64: + llvm_unreachable(" neon_vaddvq_u64 NYI "); + case NEON::BI__builtin_neon_vcaged_f64: + llvm_unreachable(" neon_vcaged_f64 NYI "); + case NEON::BI__builtin_neon_vcages_f32: + llvm_unreachable(" neon_vcages_f32 NYI "); + case NEON::BI__builtin_neon_vcagtd_f64: + llvm_unreachable(" neon_vcagtd_f64 NYI "); + case NEON::BI__builtin_neon_vcagts_f32: + llvm_unreachable(" neon_vcagts_f32 NYI "); + case NEON::BI__builtin_neon_vcaled_f64: + llvm_unreachable(" neon_vcaled_f64 NYI "); + case NEON::BI__builtin_neon_vcales_f32: + llvm_unreachable(" neon_vcales_f32 NYI "); + case NEON::BI__builtin_neon_vcaltd_f64: + llvm_unreachable(" neon_vcaltd_f64 NYI "); + case NEON::BI__builtin_neon_vcalts_f32: + llvm_unreachable(" neon_vcalts_f32 NYI "); + case NEON::BI__builtin_neon_vcvtad_s64_f64: + llvm_unreachable(" neon_vcvtad_s64_f64 NYI "); + case NEON::BI__builtin_neon_vcvtad_u64_f64: + llvm_unreachable(" neon_vcvtad_u64_f64 NYI "); + case NEON::BI__builtin_neon_vcvtas_s32_f32: + llvm_unreachable(" neon_vcvtas_s32_f32 NYI "); + case NEON::BI__builtin_neon_vcvtas_u32_f32: + llvm_unreachable(" neon_vcvtas_u32_f32 NYI "); + case NEON::BI__builtin_neon_vcvtd_n_f64_s64: + llvm_unreachable(" neon_vcvtd_n_f64_s64 NYI "); + case NEON::BI__builtin_neon_vcvtd_n_f64_u64: + llvm_unreachable(" neon_vcvtd_n_f64_u64 NYI "); + case NEON::BI__builtin_neon_vcvtd_n_s64_f64: + llvm_unreachable(" neon_vcvtd_n_s64_f64 NYI "); + case NEON::BI__builtin_neon_vcvtd_n_u64_f64: + llvm_unreachable(" neon_vcvtd_n_u64_f64 NYI "); + case NEON::BI__builtin_neon_vcvtd_s64_f64: + llvm_unreachable(" neon_vcvtd_s64_f64 NYI "); + case NEON::BI__builtin_neon_vcvtd_u64_f64: + llvm_unreachable(" neon_vcvtd_u64_f64 NYI "); + case NEON::BI__builtin_neon_vcvth_bf16_f32: + llvm_unreachable(" neon_vcvth_bf16_f32 NYI "); + case NEON::BI__builtin_neon_vcvtmd_s64_f64: + llvm_unreachable(" neon_vcvtmd_s64_f64 NYI "); + case NEON::BI__builtin_neon_vcvtmd_u64_f64: + llvm_unreachable(" neon_vcvtmd_u64_f64 NYI "); + case NEON::BI__builtin_neon_vcvtms_s32_f32: + llvm_unreachable(" neon_vcvtms_s32_f32 NYI "); + case NEON::BI__builtin_neon_vcvtms_u32_f32: + llvm_unreachable(" neon_vcvtms_u32_f32 NYI "); + case NEON::BI__builtin_neon_vcvtnd_s64_f64: + llvm_unreachable(" neon_vcvtnd_s64_f64 NYI "); + case NEON::BI__builtin_neon_vcvtnd_u64_f64: + llvm_unreachable(" neon_vcvtnd_u64_f64 NYI "); + case NEON::BI__builtin_neon_vcvtns_s32_f32: + llvm_unreachable(" neon_vcvtns_s32_f32 NYI "); + case NEON::BI__builtin_neon_vcvtns_u32_f32: + llvm_unreachable(" neon_vcvtns_u32_f32 NYI "); + case NEON::BI__builtin_neon_vcvtpd_s64_f64: + llvm_unreachable(" neon_vcvtpd_s64_f64 NYI "); + case NEON::BI__builtin_neon_vcvtpd_u64_f64: + llvm_unreachable(" neon_vcvtpd_u64_f64 NYI "); + case NEON::BI__builtin_neon_vcvtps_s32_f32: + llvm_unreachable(" neon_vcvtps_s32_f32 NYI "); + case NEON::BI__builtin_neon_vcvtps_u32_f32: + llvm_unreachable(" neon_vcvtps_u32_f32 NYI "); + case NEON::BI__builtin_neon_vcvts_n_f32_s32: + llvm_unreachable(" neon_vcvts_n_f32_s32 NYI "); + case NEON::BI__builtin_neon_vcvts_n_f32_u32: + llvm_unreachable(" neon_vcvts_n_f32_u32 NYI "); + case NEON::BI__builtin_neon_vcvts_n_s32_f32: + llvm_unreachable(" neon_vcvts_n_s32_f32 NYI "); + case NEON::BI__builtin_neon_vcvts_n_u32_f32: + llvm_unreachable(" neon_vcvts_n_u32_f32 NYI "); + case NEON::BI__builtin_neon_vcvts_s32_f32: + llvm_unreachable(" neon_vcvts_s32_f32 NYI "); + case NEON::BI__builtin_neon_vcvts_u32_f32: + llvm_unreachable(" neon_vcvts_u32_f32 NYI "); + case NEON::BI__builtin_neon_vcvtxd_f32_f64: + llvm_unreachable(" neon_vcvtxd_f32_f64 NYI "); + case NEON::BI__builtin_neon_vmaxnmv_f32: + llvm_unreachable(" neon_vmaxnmv_f32 NYI "); + case NEON::BI__builtin_neon_vmaxnmvq_f32: + llvm_unreachable(" neon_vmaxnmvq_f32 NYI "); + case NEON::BI__builtin_neon_vmaxnmvq_f64: + llvm_unreachable(" neon_vmaxnmvq_f64 NYI "); + case NEON::BI__builtin_neon_vmaxv_f32: + llvm_unreachable(" neon_vmaxv_f32 NYI "); + case NEON::BI__builtin_neon_vmaxv_s32: + llvm_unreachable(" neon_vmaxv_s32 NYI "); + case NEON::BI__builtin_neon_vmaxv_u32: + llvm_unreachable(" neon_vmaxv_u32 NYI "); + case NEON::BI__builtin_neon_vmaxvq_f32: + llvm_unreachable(" neon_vmaxvq_f32 NYI "); + case NEON::BI__builtin_neon_vmaxvq_f64: + llvm_unreachable(" neon_vmaxvq_f64 NYI "); + case NEON::BI__builtin_neon_vmaxvq_s32: + llvm_unreachable(" neon_vmaxvq_s32 NYI "); + case NEON::BI__builtin_neon_vmaxvq_u32: + llvm_unreachable(" neon_vmaxvq_u32 NYI "); + case NEON::BI__builtin_neon_vminnmv_f32: + llvm_unreachable(" neon_vminnmv_f32 NYI "); + case NEON::BI__builtin_neon_vminnmvq_f32: + llvm_unreachable(" neon_vminnmvq_f32 NYI "); + case NEON::BI__builtin_neon_vminnmvq_f64: + llvm_unreachable(" neon_vminnmvq_f64 NYI "); + case NEON::BI__builtin_neon_vminv_f32: + llvm_unreachable(" neon_vminv_f32 NYI "); + case NEON::BI__builtin_neon_vminv_s32: + llvm_unreachable(" neon_vminv_s32 NYI "); + case NEON::BI__builtin_neon_vminv_u32: + llvm_unreachable(" neon_vminv_u32 NYI "); + case NEON::BI__builtin_neon_vminvq_f32: + llvm_unreachable(" neon_vminvq_f32 NYI "); + case NEON::BI__builtin_neon_vminvq_f64: + llvm_unreachable(" neon_vminvq_f64 NYI "); + case NEON::BI__builtin_neon_vminvq_s32: + llvm_unreachable(" neon_vminvq_s32 NYI "); + case NEON::BI__builtin_neon_vminvq_u32: + llvm_unreachable(" neon_vminvq_u32 NYI "); + case NEON::BI__builtin_neon_vmull_p64: + llvm_unreachable(" neon_vmull_p64 NYI "); + case NEON::BI__builtin_neon_vmulxd_f64: + llvm_unreachable(" neon_vmulxd_f64 NYI "); + case NEON::BI__builtin_neon_vmulxs_f32: + llvm_unreachable(" neon_vmulxs_f32 NYI "); + case NEON::BI__builtin_neon_vpaddd_s64: + llvm_unreachable(" neon_vpaddd_s64 NYI "); + case NEON::BI__builtin_neon_vpaddd_u64: + llvm_unreachable(" neon_vpaddd_u64 NYI "); + case NEON::BI__builtin_neon_vpmaxnmqd_f64: + llvm_unreachable(" neon_vpmaxnmqd_f64 NYI "); + case NEON::BI__builtin_neon_vpmaxnms_f32: + llvm_unreachable(" neon_vpmaxnms_f32 NYI "); + case NEON::BI__builtin_neon_vpmaxqd_f64: + llvm_unreachable(" neon_vpmaxqd_f64 NYI "); + case NEON::BI__builtin_neon_vpmaxs_f32: + llvm_unreachable(" neon_vpmaxs_f32 NYI "); + case NEON::BI__builtin_neon_vpminnmqd_f64: + llvm_unreachable(" neon_vpminnmqd_f64 NYI "); + case NEON::BI__builtin_neon_vpminnms_f32: + llvm_unreachable(" neon_vpminnms_f32 NYI "); + case NEON::BI__builtin_neon_vpminqd_f64: + llvm_unreachable(" neon_vpminqd_f64 NYI "); + case NEON::BI__builtin_neon_vpmins_f32: + llvm_unreachable(" neon_vpmins_f32 NYI "); + case NEON::BI__builtin_neon_vqabsb_s8: + llvm_unreachable(" neon_vqabsb_s8 NYI "); + case NEON::BI__builtin_neon_vqabsd_s64: + llvm_unreachable(" neon_vqabsd_s64 NYI "); + case NEON::BI__builtin_neon_vqabsh_s16: + llvm_unreachable(" neon_vqabsh_s16 NYI "); + case NEON::BI__builtin_neon_vqabss_s32: + llvm_unreachable(" neon_vqabss_s32 NYI "); + case NEON::BI__builtin_neon_vqaddb_s8: + llvm_unreachable(" neon_vqaddb_s8 NYI "); + case NEON::BI__builtin_neon_vqaddb_u8: + llvm_unreachable(" neon_vqaddb_u8 NYI "); + case NEON::BI__builtin_neon_vqaddd_s64: + llvm_unreachable(" neon_vqaddd_s64 NYI "); + case NEON::BI__builtin_neon_vqaddd_u64: + llvm_unreachable(" neon_vqaddd_u64 NYI "); + case NEON::BI__builtin_neon_vqaddh_s16: + llvm_unreachable(" neon_vqaddh_s16 NYI "); + case NEON::BI__builtin_neon_vqaddh_u16: + llvm_unreachable(" neon_vqaddh_u16 NYI "); + case NEON::BI__builtin_neon_vqadds_s32: + llvm_unreachable(" neon_vqadds_s32 NYI "); + case NEON::BI__builtin_neon_vqadds_u32: + llvm_unreachable(" neon_vqadds_u32 NYI "); + case NEON::BI__builtin_neon_vqdmulhh_s16: + llvm_unreachable(" neon_vqdmulhh_s16 NYI "); + case NEON::BI__builtin_neon_vqdmulhs_s32: + llvm_unreachable(" neon_vqdmulhs_s32 NYI "); + case NEON::BI__builtin_neon_vqdmullh_s16: + llvm_unreachable(" neon_vqdmullh_s16 NYI "); + case NEON::BI__builtin_neon_vqdmulls_s32: + llvm_unreachable(" neon_vqdmulls_s32 NYI "); + case NEON::BI__builtin_neon_vqmovnd_s64: + llvm_unreachable(" neon_vqmovnd_s64 NYI "); + case NEON::BI__builtin_neon_vqmovnd_u64: + llvm_unreachable(" neon_vqmovnd_u64 NYI "); + case NEON::BI__builtin_neon_vqmovnh_s16: + llvm_unreachable(" neon_vqmovnh_s16 NYI "); + case NEON::BI__builtin_neon_vqmovnh_u16: + llvm_unreachable(" neon_vqmovnh_u16 NYI "); + case NEON::BI__builtin_neon_vqmovns_s32: + llvm_unreachable(" neon_vqmovns_s32 NYI "); + case NEON::BI__builtin_neon_vqmovns_u32: + llvm_unreachable(" neon_vqmovns_u32 NYI "); + case NEON::BI__builtin_neon_vqmovund_s64: + llvm_unreachable(" neon_vqmovund_s64 NYI "); + case NEON::BI__builtin_neon_vqmovunh_s16: + llvm_unreachable(" neon_vqmovunh_s16 NYI "); + case NEON::BI__builtin_neon_vqmovuns_s32: + llvm_unreachable(" neon_vqmovuns_s32 NYI "); + case NEON::BI__builtin_neon_vqnegb_s8: + llvm_unreachable(" neon_vqnegb_s8 NYI "); + case NEON::BI__builtin_neon_vqnegd_s64: + llvm_unreachable(" neon_vqnegd_s64 NYI "); + case NEON::BI__builtin_neon_vqnegh_s16: + llvm_unreachable(" neon_vqnegh_s16 NYI "); + case NEON::BI__builtin_neon_vqnegs_s32: + llvm_unreachable(" neon_vqnegs_s32 NYI "); + case NEON::BI__builtin_neon_vqrdmlahh_s16: + llvm_unreachable(" neon_vqrdmlahh_s16 NYI "); + case NEON::BI__builtin_neon_vqrdmlahs_s32: + llvm_unreachable(" neon_vqrdmlahs_s32 NYI "); + case NEON::BI__builtin_neon_vqrdmlshh_s16: + llvm_unreachable(" neon_vqrdmlshh_s16 NYI "); + case NEON::BI__builtin_neon_vqrdmlshs_s32: + llvm_unreachable(" neon_vqrdmlshs_s32 NYI "); + case NEON::BI__builtin_neon_vqrdmulhh_s16: + llvm_unreachable(" neon_vqrdmulhh_s16 NYI "); + case NEON::BI__builtin_neon_vqrdmulhs_s32: + llvm_unreachable(" neon_vqrdmulhs_s32 NYI "); + case NEON::BI__builtin_neon_vqrshlb_s8: + llvm_unreachable(" neon_vqrshlb_s8 NYI "); + case NEON::BI__builtin_neon_vqrshlb_u8: + llvm_unreachable(" neon_vqrshlb_u8 NYI "); + case NEON::BI__builtin_neon_vqrshld_s64: + llvm_unreachable(" neon_vqrshld_s64 NYI "); + case NEON::BI__builtin_neon_vqrshld_u64: + llvm_unreachable(" neon_vqrshld_u64 NYI "); + case NEON::BI__builtin_neon_vqrshlh_s16: + llvm_unreachable(" neon_vqrshlh_s16 NYI "); + case NEON::BI__builtin_neon_vqrshlh_u16: + llvm_unreachable(" neon_vqrshlh_u16 NYI "); + case NEON::BI__builtin_neon_vqrshls_s32: + llvm_unreachable(" neon_vqrshls_s32 NYI "); + case NEON::BI__builtin_neon_vqrshls_u32: + llvm_unreachable(" neon_vqrshls_u32 NYI "); + case NEON::BI__builtin_neon_vqrshrnd_n_s64: + llvm_unreachable(" neon_vqrshrnd_n_s64 NYI "); + case NEON::BI__builtin_neon_vqrshrnd_n_u64: + llvm_unreachable(" neon_vqrshrnd_n_u64 NYI "); + case NEON::BI__builtin_neon_vqrshrnh_n_s16: + llvm_unreachable(" neon_vqrshrnh_n_s16 NYI "); + case NEON::BI__builtin_neon_vqrshrnh_n_u16: + llvm_unreachable(" neon_vqrshrnh_n_u16 NYI "); + case NEON::BI__builtin_neon_vqrshrns_n_s32: + llvm_unreachable(" neon_vqrshrns_n_s32 NYI "); + case NEON::BI__builtin_neon_vqrshrns_n_u32: + llvm_unreachable(" neon_vqrshrns_n_u32 NYI "); + case NEON::BI__builtin_neon_vqrshrund_n_s64: + llvm_unreachable(" neon_vqrshrund_n_s64 NYI "); + case NEON::BI__builtin_neon_vqrshrunh_n_s16: + llvm_unreachable(" neon_vqrshrunh_n_s16 NYI "); + case NEON::BI__builtin_neon_vqrshruns_n_s32: + llvm_unreachable(" neon_vqrshruns_n_s32 NYI "); + case NEON::BI__builtin_neon_vqshlb_n_s8: + llvm_unreachable(" neon_vqshlb_n_s8 NYI "); + case NEON::BI__builtin_neon_vqshlb_n_u8: + llvm_unreachable(" neon_vqshlb_n_u8 NYI "); + case NEON::BI__builtin_neon_vqshlb_s8: + llvm_unreachable(" neon_vqshlb_s8 NYI "); + case NEON::BI__builtin_neon_vqshlb_u8: + llvm_unreachable(" neon_vqshlb_u8 NYI "); + case NEON::BI__builtin_neon_vqshld_s64: + llvm_unreachable(" neon_vqshld_s64 NYI "); + case NEON::BI__builtin_neon_vqshld_u64: + llvm_unreachable(" neon_vqshld_u64 NYI "); + case NEON::BI__builtin_neon_vqshlh_n_s16: + llvm_unreachable(" neon_vqshlh_n_s16 NYI "); + case NEON::BI__builtin_neon_vqshlh_n_u16: + llvm_unreachable(" neon_vqshlh_n_u16 NYI "); + case NEON::BI__builtin_neon_vqshlh_s16: + llvm_unreachable(" neon_vqshlh_s16 NYI "); + case NEON::BI__builtin_neon_vqshlh_u16: + llvm_unreachable(" neon_vqshlh_u16 NYI "); + case NEON::BI__builtin_neon_vqshls_n_s32: + llvm_unreachable(" neon_vqshls_n_s32 NYI "); + case NEON::BI__builtin_neon_vqshls_n_u32: + llvm_unreachable(" neon_vqshls_n_u32 NYI "); + case NEON::BI__builtin_neon_vqshls_s32: + llvm_unreachable(" neon_vqshls_s32 NYI "); + case NEON::BI__builtin_neon_vqshls_u32: + llvm_unreachable(" neon_vqshls_u32 NYI "); + case NEON::BI__builtin_neon_vqshlub_n_s8: + llvm_unreachable(" neon_vqshlub_n_s8 NYI "); + case NEON::BI__builtin_neon_vqshluh_n_s16: + llvm_unreachable(" neon_vqshluh_n_s16 NYI "); + case NEON::BI__builtin_neon_vqshlus_n_s32: + llvm_unreachable(" neon_vqshlus_n_s32 NYI "); + case NEON::BI__builtin_neon_vqshrnd_n_s64: + llvm_unreachable(" neon_vqshrnd_n_s64 NYI "); + case NEON::BI__builtin_neon_vqshrnd_n_u64: + llvm_unreachable(" neon_vqshrnd_n_u64 NYI "); + case NEON::BI__builtin_neon_vqshrnh_n_s16: + llvm_unreachable(" neon_vqshrnh_n_s16 NYI "); + case NEON::BI__builtin_neon_vqshrnh_n_u16: + llvm_unreachable(" neon_vqshrnh_n_u16 NYI "); + case NEON::BI__builtin_neon_vqshrns_n_s32: + llvm_unreachable(" neon_vqshrns_n_s32 NYI "); + case NEON::BI__builtin_neon_vqshrns_n_u32: + llvm_unreachable(" neon_vqshrns_n_u32 NYI "); + case NEON::BI__builtin_neon_vqshrund_n_s64: + llvm_unreachable(" neon_vqshrund_n_s64 NYI "); + case NEON::BI__builtin_neon_vqshrunh_n_s16: + llvm_unreachable(" neon_vqshrunh_n_s16 NYI "); + case NEON::BI__builtin_neon_vqshruns_n_s32: + llvm_unreachable(" neon_vqshruns_n_s32 NYI "); + case NEON::BI__builtin_neon_vqsubb_s8: + llvm_unreachable(" neon_vqsubb_s8 NYI "); + case NEON::BI__builtin_neon_vqsubb_u8: + llvm_unreachable(" neon_vqsubb_u8 NYI "); + case NEON::BI__builtin_neon_vqsubd_s64: + llvm_unreachable(" neon_vqsubd_s64 NYI "); + case NEON::BI__builtin_neon_vqsubd_u64: + llvm_unreachable(" neon_vqsubd_u64 NYI "); + case NEON::BI__builtin_neon_vqsubh_s16: + llvm_unreachable(" neon_vqsubh_s16 NYI "); + case NEON::BI__builtin_neon_vqsubh_u16: + llvm_unreachable(" neon_vqsubh_u16 NYI "); + case NEON::BI__builtin_neon_vqsubs_s32: + llvm_unreachable(" neon_vqsubs_s32 NYI "); + case NEON::BI__builtin_neon_vqsubs_u32: + llvm_unreachable(" neon_vqsubs_u32 NYI "); + case NEON::BI__builtin_neon_vrecped_f64: + llvm_unreachable(" neon_vrecped_f64 NYI "); + case NEON::BI__builtin_neon_vrecpes_f32: + llvm_unreachable(" neon_vrecpes_f32 NYI "); + case NEON::BI__builtin_neon_vrecpxd_f64: + llvm_unreachable(" neon_vrecpxd_f64 NYI "); + case NEON::BI__builtin_neon_vrecpxs_f32: + llvm_unreachable(" neon_vrecpxs_f32 NYI "); + case NEON::BI__builtin_neon_vrshld_s64: + llvm_unreachable(" neon_vrshld_s64 NYI "); + case NEON::BI__builtin_neon_vrshld_u64: + llvm_unreachable(" neon_vrshld_u64 NYI "); + case NEON::BI__builtin_neon_vrsqrted_f64: + llvm_unreachable(" neon_vrsqrted_f64 NYI "); + case NEON::BI__builtin_neon_vrsqrtes_f32: + llvm_unreachable(" neon_vrsqrtes_f32 NYI "); + case NEON::BI__builtin_neon_vrsqrtsd_f64: + llvm_unreachable(" neon_vrsqrtsd_f64 NYI "); + case NEON::BI__builtin_neon_vrsqrtss_f32: + llvm_unreachable(" neon_vrsqrtss_f32 NYI "); + case NEON::BI__builtin_neon_vsha1cq_u32: + llvm_unreachable(" neon_vsha1cq_u32 NYI "); + case NEON::BI__builtin_neon_vsha1h_u32: + llvm_unreachable(" neon_vsha1h_u32 NYI "); + case NEON::BI__builtin_neon_vsha1mq_u32: + llvm_unreachable(" neon_vsha1mq_u32 NYI "); + case NEON::BI__builtin_neon_vsha1pq_u32: + llvm_unreachable(" neon_vsha1pq_u32 NYI "); + case NEON::BI__builtin_neon_vshld_s64: + llvm_unreachable(" neon_vshld_s64 NYI "); + case NEON::BI__builtin_neon_vshld_u64: + llvm_unreachable(" neon_vshld_u64 NYI "); + case NEON::BI__builtin_neon_vslid_n_s64: + llvm_unreachable(" neon_vslid_n_s64 NYI "); + case NEON::BI__builtin_neon_vslid_n_u64: + llvm_unreachable(" neon_vslid_n_u64 NYI "); + case NEON::BI__builtin_neon_vsqaddb_u8: + llvm_unreachable(" neon_vsqaddb_u8 NYI "); + case NEON::BI__builtin_neon_vsqaddd_u64: + llvm_unreachable(" neon_vsqaddd_u64 NYI "); + case NEON::BI__builtin_neon_vsqaddh_u16: + llvm_unreachable(" neon_vsqaddh_u16 NYI "); + case NEON::BI__builtin_neon_vsqadds_u32: + llvm_unreachable(" neon_vsqadds_u32 NYI "); + case NEON::BI__builtin_neon_vsrid_n_s64: + llvm_unreachable(" neon_vsrid_n_s64 NYI "); + case NEON::BI__builtin_neon_vsrid_n_u64: + llvm_unreachable(" neon_vsrid_n_u64 NYI "); + case NEON::BI__builtin_neon_vuqaddb_s8: + llvm_unreachable(" neon_vuqaddb_s8 NYI "); + case NEON::BI__builtin_neon_vuqaddd_s64: + llvm_unreachable(" neon_vuqaddd_s64 NYI "); + case NEON::BI__builtin_neon_vuqaddh_s16: + llvm_unreachable(" neon_vuqaddh_s16 NYI "); + case NEON::BI__builtin_neon_vuqadds_s32: + llvm_unreachable(" neon_vuqadds_s32 NYI "); + // FP16 scalar intrinisics go here. + case NEON::BI__builtin_neon_vabdh_f16: + llvm_unreachable(" neon_vabdh_f16 NYI "); + case NEON::BI__builtin_neon_vcvtah_s32_f16: + llvm_unreachable(" neon_vcvtah_s32_f16 NYI "); + case NEON::BI__builtin_neon_vcvtah_s64_f16: + llvm_unreachable(" neon_vcvtah_s64_f16 NYI "); + case NEON::BI__builtin_neon_vcvtah_u32_f16: + llvm_unreachable(" neon_vcvtah_u32_f16 NYI "); + case NEON::BI__builtin_neon_vcvtah_u64_f16: + llvm_unreachable(" neon_vcvtah_u64_f16 NYI "); + case NEON::BI__builtin_neon_vcvth_n_f16_s32: + llvm_unreachable(" neon_vcvth_n_f16_s32 NYI "); + case NEON::BI__builtin_neon_vcvth_n_f16_s64: + llvm_unreachable(" neon_vcvth_n_f16_s64 NYI "); + case NEON::BI__builtin_neon_vcvth_n_f16_u32: + llvm_unreachable(" neon_vcvth_n_f16_u32 NYI "); + case NEON::BI__builtin_neon_vcvth_n_f16_u64: + llvm_unreachable(" neon_vcvth_n_f16_u64 NYI "); + case NEON::BI__builtin_neon_vcvth_n_s32_f16: + llvm_unreachable(" neon_vcvth_n_s32_f16 NYI "); + case NEON::BI__builtin_neon_vcvth_n_s64_f16: + llvm_unreachable(" neon_vcvth_n_s64_f16 NYI "); + case NEON::BI__builtin_neon_vcvth_n_u32_f16: + llvm_unreachable(" neon_vcvth_n_u32_f16 NYI "); + case NEON::BI__builtin_neon_vcvth_n_u64_f16: + llvm_unreachable(" neon_vcvth_n_u64_f16 NYI "); + case NEON::BI__builtin_neon_vcvth_s32_f16: + llvm_unreachable(" neon_vcvth_s32_f16 NYI "); + case NEON::BI__builtin_neon_vcvth_s64_f16: + llvm_unreachable(" neon_vcvth_s64_f16 NYI "); + case NEON::BI__builtin_neon_vcvth_u32_f16: + llvm_unreachable(" neon_vcvth_u32_f16 NYI "); + case NEON::BI__builtin_neon_vcvth_u64_f16: + llvm_unreachable(" neon_vcvth_u64_f16 NYI "); + case NEON::BI__builtin_neon_vcvtmh_s32_f16: + llvm_unreachable(" neon_vcvtmh_s32_f16 NYI "); + case NEON::BI__builtin_neon_vcvtmh_s64_f16: + llvm_unreachable(" neon_vcvtmh_s64_f16 NYI "); + case NEON::BI__builtin_neon_vcvtmh_u32_f16: + llvm_unreachable(" neon_vcvtmh_u32_f16 NYI "); + case NEON::BI__builtin_neon_vcvtmh_u64_f16: + llvm_unreachable(" neon_vcvtmh_u64_f16 NYI "); + case NEON::BI__builtin_neon_vcvtnh_s32_f16: + llvm_unreachable(" neon_vcvtnh_s32_f16 NYI "); + case NEON::BI__builtin_neon_vcvtnh_s64_f16: + llvm_unreachable(" neon_vcvtnh_s64_f16 NYI "); + case NEON::BI__builtin_neon_vcvtnh_u32_f16: + llvm_unreachable(" neon_vcvtnh_u32_f16 NYI "); + case NEON::BI__builtin_neon_vcvtnh_u64_f16: + llvm_unreachable(" neon_vcvtnh_u64_f16 NYI "); + case NEON::BI__builtin_neon_vcvtph_s32_f16: + llvm_unreachable(" neon_vcvtph_s32_f16 NYI "); + case NEON::BI__builtin_neon_vcvtph_s64_f16: + llvm_unreachable(" neon_vcvtph_s64_f16 NYI "); + case NEON::BI__builtin_neon_vcvtph_u32_f16: + llvm_unreachable(" neon_vcvtph_u32_f16 NYI "); + case NEON::BI__builtin_neon_vcvtph_u64_f16: + llvm_unreachable(" neon_vcvtph_u64_f16 NYI "); + case NEON::BI__builtin_neon_vmulxh_f16: + llvm_unreachable(" neon_vmulxh_f16 NYI "); + case NEON::BI__builtin_neon_vrecpeh_f16: + llvm_unreachable(" neon_vrecpeh_f16 NYI "); + case NEON::BI__builtin_neon_vrecpxh_f16: + llvm_unreachable(" neon_vrecpxh_f16 NYI "); + case NEON::BI__builtin_neon_vrsqrteh_f16: + llvm_unreachable(" neon_vrsqrteh_f16 NYI "); + case NEON::BI__builtin_neon_vrsqrtsh_f16: + llvm_unreachable(" neon_vrsqrtsh_f16 NYI "); + } + return nullptr; +} + mlir::Value CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, @@ -2958,12 +3441,16 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, Ops.push_back(emitScalarOrConstFoldImmArg(ICEArguments, i, E)); } - auto SISDMap = ArrayRef(AArch64SISDIntrinsicMap); - const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( - SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted); + auto theSISDMap = ArrayRef(AArch64SISDIntrinsicMap); + const ARMVectorIntrinsicInfo *builtinInfo = findARMVectorIntrinsicInMap( + theSISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted); - if (Builtin) { - llvm_unreachable("Builtin from findARMVectorIntrinsicInMap NYI"); + if (builtinInfo) { + Ops.push_back(emitScalarExpr(E->getArg(E->getNumArgs() - 1))); + mlir::Value result = + emitCommonNeonSISDBuiltinExpr(*this, *builtinInfo, Ops, E); + assert(result && "SISD intrinsic should have been handled"); + return result; } const Expr *Arg = E->getArg(E->getNumArgs() - 1); @@ -3309,12 +3796,13 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, // Not all intrinsics handled by the common case work for AArch64 yet, so only // defer to common code if it's been added to our special map. - Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, - AArch64SIMDIntrinsicsProvenSorted); - if (Builtin) + builtinInfo = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, + AArch64SIMDIntrinsicsProvenSorted); + if (builtinInfo) return emitCommonNeonBuiltinExpr( - Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, - Builtin->NameHint, Builtin->TypeModifier, E, Ops, + builtinInfo->BuiltinID, builtinInfo->LLVMIntrinsic, + builtinInfo->AltLLVMIntrinsic, builtinInfo->NameHint, + builtinInfo->TypeModifier, E, Ops, /*never use addresses*/ Address::invalid(), Address::invalid(), Arch); if (mlir::Value V = emitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) From ab98b3c636d9b58fa2a4ea42ad90c0ef78a82ebe Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 22 Nov 2024 19:32:05 -0500 Subject: [PATCH 2102/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vqshrn_n_v (#1144) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 9 +- clang/test/CIR/CodeGen/AArch64/neon.c | 126 +++++++++++------- 2 files changed, 86 insertions(+), 49 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index b9042573d2a3..907959f9625c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3909,7 +3909,14 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, SInt32Ty}, Ops, "aarch64.neon.sqrshrun", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqshrn_n_v: - llvm_unreachable("NEON::BI__builtin_neon_vqshrn_n_v NYI"); + return emitNeonCall( + builder, + {builder.getExtendedOrTruncatedElementVectorType( + vTy, true /* extend */, + mlir::cast(vTy.getEltType()).isSigned()), + SInt32Ty}, + Ops, usgn ? "aarch64.neon.uqshrn" : "aarch64.neon.sqshrn", ty, + getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vrshrn_n_v: return emitNeonCall( builder, diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 69ba9fa11798..004298b873e8 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -6926,59 +6926,89 @@ uint32x2_t test_vqrshrun_n_s64(int64x2_t a) { // return vqrshrun_high_n_s64(a, b, 19); // } -// NYI-LABEL: @test_vqshrn_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VQSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> [[VQSHRN_N]], i32 3) -// NYI: ret <8 x i8> [[VQSHRN_N1]] -// int8x8_t test_vqshrn_n_s16(int16x8_t a) { -// return vqshrn_n_s16(a, 3); -// } +int8x8_t test_vqshrn_n_s16(int16x8_t a) { + return vqshrn_n_s16(a, 3); -// NYI-LABEL: @test_vqshrn_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VQSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> [[VQSHRN_N]], i32 19) -// NYI: ret <4 x i16> [[VQSHRN_N1]] -// int16x4_t test_vqshrn_n_s32(int32x4_t a) { -// return vqshrn_n_s32(a, 9); -// } + // CIR-LABEL: vqshrn_n_s16 + // CIR: cir.llvm.intrinsic "aarch64.neon.sqshrn" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector -// NYI-LABEL: @test_vqshrn_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VQSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> [[VQSHRN_N]], i32 19) -// NYI: ret <2 x i32> [[VQSHRN_N1]] -// int32x2_t test_vqshrn_n_s64(int64x2_t a) { -// return vqshrn_n_s64(a, 19); -// } + // LLVM:{{.*}}test_vqshrn_n_s16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VQSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> [[VQSHRN_N]], i32 3) + // LLVM: ret <8 x i8> [[VQSHRN_N1]] +} -// NYI-LABEL: @test_vqshrn_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VQSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> [[VQSHRN_N]], i32 3) -// NYI: ret <8 x i8> [[VQSHRN_N1]] -// uint8x8_t test_vqshrn_n_u16(uint16x8_t a) { -// return vqshrn_n_u16(a, 3); -// } +int16x4_t test_vqshrn_n_s32(int32x4_t a) { + return vqshrn_n_s32(a, 9); -// NYI-LABEL: @test_vqshrn_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VQSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> [[VQSHRN_N]], i32 9) -// NYI: ret <4 x i16> [[VQSHRN_N1]] -// uint16x4_t test_vqshrn_n_u32(uint32x4_t a) { -// return vqshrn_n_u32(a, 9); -// } + // CIR-LABEL: vqshrn_n_s32 + // CIR: cir.llvm.intrinsic "aarch64.neon.sqshrn" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector -// NYI-LABEL: @test_vqshrn_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VQSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> [[VQSHRN_N]], i32 19) -// NYI: ret <2 x i32> [[VQSHRN_N1]] -// uint32x2_t test_vqshrn_n_u64(uint64x2_t a) { -// return vqshrn_n_u64(a, 19); -// } + // LLVM:{{.*}}test_vqshrn_n_s32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VQSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> [[VQSHRN_N]], i32 9) + // LLVM: ret <4 x i16> [[VQSHRN_N1]] +} + +int32x2_t test_vqshrn_n_s64(int64x2_t a) { + return vqshrn_n_s64(a, 19); + + // CIR-LABEL: vqshrn_n_s64 + // CIR: cir.llvm.intrinsic "aarch64.neon.sqshrn" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM:{{.*}}test_vqshrn_n_s64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VQSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> [[VQSHRN_N]], i32 19) + // LLVM: ret <2 x i32> [[VQSHRN_N1]] +} + +uint8x8_t test_vqshrn_n_u16(uint16x8_t a) { + return vqshrn_n_u16(a, 3); + + // CIR-LABEL: vqshrn_n_u16 + // CIR: cir.llvm.intrinsic "aarch64.neon.uqshrn" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM:{{.*}}test_vqshrn_n_u16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> + // LLVM: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VQSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> [[VQSHRN_N]], i32 3) + // LLVM: ret <8 x i8> [[VQSHRN_N1]] +} + +uint16x4_t test_vqshrn_n_u32(uint32x4_t a) { + return vqshrn_n_u32(a, 9); + + // CIR-LABEL: vqshrn_n_u32 + // CIR: cir.llvm.intrinsic "aarch64.neon.uqshrn" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM:{{.*}}test_vqshrn_n_u32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> + // LLVM: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VQSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> [[VQSHRN_N]], i32 9) + // LLVM: ret <4 x i16> [[VQSHRN_N1]] +} + +uint32x2_t test_vqshrn_n_u64(uint64x2_t a) { + return vqshrn_n_u64(a, 19); + + // CIR-LABEL: vqshrn_n_u64 + // CIR: cir.llvm.intrinsic "aarch64.neon.uqshrn" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM:{{.*}}test_vqshrn_n_u64(<2 x i64>{{.*}}[[A:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> + // LLVM: [[VQSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VQSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> [[VQSHRN_N]], i32 19) + // LLVM: ret <2 x i32> [[VQSHRN_N1]] +} // NYI-LABEL: @test_vqshrn_high_n_s16( // NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> From bf83e5d3897c25ea76e2433e1c7982636558ac32 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 22 Nov 2024 19:32:32 -0500 Subject: [PATCH 2103/2301] [CIR][FlattenCFG] Let results of CIR ScopeOp forwarded during FlattenCFG (#1147) This PR implements NYI in CIRScopeOpFlattening. It seems to me the best way is to let results of ScopeOp forwarded as block arguments of the last block split from the cir.scope block. --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 9 ++--- clang/test/CIR/CodeGen/fullexpr.cpp | 33 +++++++++++++++++++ 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 5e484d520a71..bd62d84320fd 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -135,13 +135,10 @@ class CIRScopeOpFlattening : public mlir::OpRewritePattern { // Split the current block before the ScopeOp to create the inlining // point. auto *currentBlock = rewriter.getInsertionBlock(); - auto *remainingOpsBlock = + mlir::Block *continueBlock = rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); - mlir::Block *continueBlock; - if (scopeOp.getNumResults() == 0) - continueBlock = remainingOpsBlock; - else - llvm_unreachable("NYI"); + if (scopeOp.getNumResults() > 0) + continueBlock->addArguments(scopeOp.getResultTypes(), loc); // Inline body region. auto *beforeBody = &scopeOp.getRegion().front(); diff --git a/clang/test/CIR/CodeGen/fullexpr.cpp b/clang/test/CIR/CodeGen/fullexpr.cpp index a83ce7d530cc..346d5c6dd3e9 100644 --- a/clang/test/CIR/CodeGen/fullexpr.cpp +++ b/clang/test/CIR/CodeGen/fullexpr.cpp @@ -1,5 +1,10 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir-flat %s -o %t.cir.flat +// RUN: FileCheck --check-prefix=FLAT --input-file=%t.cir.flat %s +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -o - %s \ +// RUN: | opt -S -passes=instcombine,mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s int go(int const& val); @@ -18,3 +23,31 @@ int go1() { // CHECK-NEXT: cir.yield %[[#RValTmp]] : !s32i // CHECK-NEXT: } // CHECK-NEXT: cir.store %[[#RVal]], %[[#XAddr]] : !s32i, !cir.ptr + +// FLAT: cir.func @_Z3go1v() -> !s32i +// FLAT: %[[#TmpAddr:]] = cir.alloca !s32i, !cir.ptr, ["ref.tmp0", init] {alignment = 4 : i64} +// FLAT: %[[#XAddr:]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// FLAT: cir.br ^[[before_body:.*]]{{ loc.*}} +// FLAT-NEXT: ^[[before_body]]: // pred: ^bb0 +// FLAT-NEXT: %[[#One:]] = cir.const #cir.int<1> : !s32i +// FLAT-NEXT: cir.store %[[#One]], %[[#TmpAddr]] : !s32i, !cir.ptr +// FLAT-NEXT: %[[#RValTmp:]] = cir.call @_Z2goRKi(%[[#TmpAddr]]) : (!cir.ptr) -> !s32i +// FLAT-NEXT: cir.br ^[[continue_block:.*]](%[[#RValTmp]] : !s32i) {{loc.*}} +// FLAT-NEXT: ^[[continue_block]](%[[#BlkArgRval:]]: !s32i {{loc.*}}): // pred: ^[[before_body]] +// FLAT-NEXT: cir.store %[[#BlkArgRval]], %[[#XAddr]] : !s32i, !cir.ptr + +// LLVM-LABEL: @_Z3go1v() +// LLVM-NEXT: %[[#TmpAddr:]] = alloca i32, i64 1, align 4 +// LLVM: br label %[[before_body:[0-9]+]] +// LLVM: [[before_body]]: +// LLVM-NEXT: store i32 1, ptr %[[#TmpAddr]], align 4 +// LLVM-NEXT: %[[#RValTmp:]] = call i32 @_Z2goRKi(ptr %[[#TmpAddr]]) +// LLVM-NEXT: br label %[[continue_block:[0-9]+]] + +// LLVM: [[continue_block]]: +// LLVM-NEXT: [[PHI:%.*]] = phi i32 [ %[[#RValTmp]], %[[before_body]] ] +// LLVM: store i32 [[PHI]], ptr [[TMP0:%.*]], align 4 +// LLVM: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// LLVM: store i32 [[TMP1]], ptr [[TMP2:%.*]], align 4 +// LLVM: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// LLVM: ret i32 [[TMP3]] From 8cb8d8d2779a128cc2312b957325dbe4997e20f2 Mon Sep 17 00:00:00 2001 From: 7mile Date: Sun, 24 Nov 2024 02:14:23 +0800 Subject: [PATCH 2104/2301] [CIR][Transforms][NFC] Fix undesirable include of clang's private header (#1157) With [llvm-project#116090](https://github.com/llvm/llvm-project/pull/116090) merged, we can get rid of `#include "../../../../Basic/Targets.h"` now. --- .../lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index f6c75b39e516..a9266aa4ada8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -11,10 +11,8 @@ // //===----------------------------------------------------------------------===// -// FIXME(cir): This header file is not exposed to the public API, but can be -// reused by CIR ABI lowering since it holds target-specific information. -#include "../../../../Basic/Targets.h" #include "clang/Basic/LangOptions.h" +#include "clang/Basic/TargetInfo.h" #include "clang/Basic/TargetOptions.h" #include "CIRLowerContext.h" From 486a298d7055fb5dbac7975e2c45625dbf113ec2 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 26 Nov 2024 04:12:18 +0800 Subject: [PATCH 2105/2301] [CIR][NFC] move data member pointer lowering to CXXABI (#1130) This PR moves the lowering code for data member pointers from the conversion patterns to the implementation of CXXABI because this part should be ABI-specific. --- .../Transforms/TargetLowering/CIRCXXABI.h | 26 +++++++ .../TargetLowering/ItaniumCXXABI.cpp | 62 ++++++++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 74 +++++++++---------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 25 ++++++- clang/test/CIR/Lowering/data-member.cir | 27 ++++--- 5 files changed, 159 insertions(+), 55 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h index 0f05ec8040f8..4c2f442721e8 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -15,9 +15,15 @@ #define LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_CIRCXXABI_H #include "LowerFunctionInfo.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Types.h" #include "mlir/IR/Value.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "mlir/Transforms/DialectConversion.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Target/AArch64.h" namespace cir { @@ -59,6 +65,26 @@ class CIRCXXABI { /// Returns how an argument of the given record type should be passed. /// FIXME(cir): This expects a CXXRecordDecl! Not any record type. virtual RecordArgABI getRecordArgABI(const StructType RD) const = 0; + + /// Lower the given data member pointer type to its ABI type. The returned + /// type is also a CIR type. + virtual mlir::Type + lowerDataMemberType(cir::DataMemberType type, + const mlir::TypeConverter &typeConverter) const = 0; + + /// Lower the given data member pointer constant to a constant of the ABI + /// type. The returned constant is represented as an attribute as well. + virtual mlir::TypedAttr + lowerDataMemberConstant(cir::DataMemberAttr attr, + const mlir::DataLayout &layout, + const mlir::TypeConverter &typeConverter) const = 0; + + /// Lower the given cir.get_runtime_member op to a sequence of more + /// "primitive" CIR operations that act on the ABI types. + virtual mlir::Operation * + lowerGetRuntimeMember(cir::GetRuntimeMemberOp op, mlir::Type loweredResultTy, + mlir::Value loweredAddr, mlir::Value loweredMember, + mlir::OpBuilder &builder) const = 0; }; /// Creates an Itanium-family ABI. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index 081db25808d1..a87cdc01ea9d 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -23,6 +23,7 @@ #include "../LoweringPrepareCXXABI.h" #include "CIRCXXABI.h" #include "LowerModule.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "llvm/Support/ErrorHandling.h" namespace cir { @@ -51,6 +52,19 @@ class ItaniumCXXABI : public CIRCXXABI { cir_cconv_assert(!cir::MissingFeatures::recordDeclCanPassInRegisters()); return RAA_Default; } + + mlir::Type + lowerDataMemberType(cir::DataMemberType type, + const mlir::TypeConverter &typeConverter) const override; + + mlir::TypedAttr lowerDataMemberConstant( + cir::DataMemberAttr attr, const mlir::DataLayout &layout, + const mlir::TypeConverter &typeConverter) const override; + + mlir::Operation * + lowerGetRuntimeMember(cir::GetRuntimeMemberOp op, mlir::Type loweredResultTy, + mlir::Value loweredAddr, mlir::Value loweredMember, + mlir::OpBuilder &builder) const override; }; } // namespace @@ -67,6 +81,54 @@ bool ItaniumCXXABI::classifyReturnType(LowerFunctionInfo &FI) const { return false; } +mlir::Type ItaniumCXXABI::lowerDataMemberType( + cir::DataMemberType type, const mlir::TypeConverter &typeConverter) const { + // Itanium C++ ABI 2.3: + // A pointer to data member is an offset from the base address of + // the class object containing it, represented as a ptrdiff_t + const clang::TargetInfo &target = LM.getTarget(); + clang::TargetInfo::IntType ptrdiffTy = + target.getPtrDiffType(clang::LangAS::Default); + return cir::IntType::get(type.getContext(), target.getTypeWidth(ptrdiffTy), + target.isTypeSigned(ptrdiffTy)); +} + +mlir::TypedAttr ItaniumCXXABI::lowerDataMemberConstant( + cir::DataMemberAttr attr, const mlir::DataLayout &layout, + const mlir::TypeConverter &typeConverter) const { + uint64_t memberOffset; + if (attr.isNullPtr()) { + // Itanium C++ ABI 2.3: + // A NULL pointer is represented as -1. + memberOffset = -1ull; + } else { + // Itanium C++ ABI 2.3: + // A pointer to data member is an offset from the base address of + // the class object containing it, represented as a ptrdiff_t + auto memberIndex = attr.getMemberIndex().value(); + memberOffset = + attr.getType().getClsTy().getElementOffset(layout, memberIndex); + } + + mlir::Type abiTy = lowerDataMemberType(attr.getType(), typeConverter); + return cir::IntAttr::get(abiTy, memberOffset); +} + +mlir::Operation *ItaniumCXXABI::lowerGetRuntimeMember( + cir::GetRuntimeMemberOp op, mlir::Type loweredResultTy, + mlir::Value loweredAddr, mlir::Value loweredMember, + mlir::OpBuilder &builder) const { + auto byteTy = IntType::get(op.getContext(), 8, true); + auto bytePtrTy = PointerType::get( + byteTy, mlir::cast(op.getAddr().getType()).getAddrSpace()); + auto objectBytesPtr = builder.create(op.getLoc(), bytePtrTy, + CastKind::bitcast, op.getAddr()); + auto memberBytesPtr = builder.create( + op.getLoc(), bytePtrTy, objectBytesPtr, loweredMember); + return builder.create(op.getLoc(), op.getType(), CastKind::bitcast, + memberBytesPtr); +} + CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { switch (LM.getCXXABIKind()) { // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index bea705567854..b1d8b1870632 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1521,28 +1521,6 @@ bool hasTrailingZeros(cir::ConstArrayAttr attr) { })); } -static mlir::Attribute -lowerDataMemberAttr(mlir::ModuleOp moduleOp, cir::DataMemberAttr attr, - const mlir::TypeConverter &typeConverter) { - mlir::DataLayout layout{moduleOp}; - - uint64_t memberOffset; - if (attr.isNullPtr()) { - // TODO(cir): the numerical value of a null data member pointer is - // ABI-specific and should be queried through ABI. - assert(!MissingFeatures::targetCodeGenInfoGetNullPointer()); - memberOffset = -1ull; - } else { - auto memberIndex = attr.getMemberIndex().value(); - memberOffset = - attr.getType().getClsTy().getElementOffset(layout, memberIndex); - } - - auto underlyingIntTy = mlir::IntegerType::get( - moduleOp->getContext(), layout.getTypeSizeInBits(attr.getType())); - return mlir::IntegerAttr::get(underlyingIntTy, memberOffset); -} - mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( cir::ConstantOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -1602,9 +1580,13 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( } attr = op.getValue(); } else if (mlir::isa(op.getType())) { + assert(lowerMod && "lower module is not available"); auto dataMember = mlir::cast(op.getValue()); - attr = lowerDataMemberAttr(op->getParentOfType(), - dataMember, *typeConverter); + mlir::DataLayout layout(op->getParentOfType()); + mlir::TypedAttr abiValue = lowerMod->getCXXABI().lowerDataMemberConstant( + dataMember, layout, *typeConverter); + rewriter.replaceOpWithNewOp(op, abiValue); + return mlir::success(); } // TODO(cir): constant arrays are currently just pushed into the stack using // the store instruction, instead of being stored as global variables and @@ -2208,8 +2190,15 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( return mlir::success(); } else if (auto dataMemberAttr = mlir::dyn_cast(init.value())) { - init = lowerDataMemberAttr(op->getParentOfType(), - dataMemberAttr, *typeConverter); + assert(lowerMod && "lower module is not available"); + mlir::DataLayout layout(op->getParentOfType()); + mlir::TypedAttr abiValue = lowerMod->getCXXABI().lowerDataMemberConstant( + dataMemberAttr, layout, *typeConverter); + auto abiOp = mlir::cast(rewriter.clone(*op.getOperation())); + abiOp.setInitialValueAttr(abiValue); + abiOp.setSymType(abiValue.getType()); + rewriter.replaceOp(op, abiOp); + return mlir::success(); } else if (const auto structAttr = mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); @@ -3237,11 +3226,11 @@ mlir::LogicalResult CIRToLLVMGetMemberOpLowering::matchAndRewrite( mlir::LogicalResult CIRToLLVMGetRuntimeMemberOpLowering::matchAndRewrite( cir::GetRuntimeMemberOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { - auto llvmResTy = getTypeConverter()->convertType(op.getType()); - auto llvmElementTy = mlir::IntegerType::get(op.getContext(), 8); - - rewriter.replaceOpWithNewOp( - op, llvmResTy, llvmElementTy, adaptor.getAddr(), adaptor.getMember()); + assert(lowerMod && "lowering module is not available"); + mlir::Type llvmResTy = getTypeConverter()->convertType(op.getType()); + mlir::Operation *llvmOp = lowerMod->getCXXABI().lowerGetRuntimeMember( + op, llvmResTy, adaptor.getAddr(), adaptor.getMember(), rewriter); + rewriter.replaceOp(op, llvmOp); return mlir::success(); } @@ -3850,7 +3839,7 @@ mlir::LogicalResult CIRToLLVMSignBitOpLowering::matchAndRewrite( void populateCIRToLLVMConversionPatterns( mlir::RewritePatternSet &patterns, mlir::TypeConverter &converter, - mlir::DataLayout &dataLayout, + mlir::DataLayout &dataLayout, cir::LowerModule *lowerModule, llvm::StringMap &stringGlobalsMap, llvm::StringMap &argStringGlobalsMap, llvm::MapVector &argsVarMap) { @@ -3858,6 +3847,9 @@ void populateCIRToLLVMConversionPatterns( patterns.add(converter, dataLayout, stringGlobalsMap, argStringGlobalsMap, argsVarMap, patterns.getContext()); + patterns.add( + converter, patterns.getContext(), lowerModule); patterns.add< // clang-format off CIRToLLVMAbsOpLowering, @@ -3891,7 +3883,6 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMComplexImagPtrOpLowering, CIRToLLVMComplexRealOpLowering, CIRToLLVMComplexRealPtrOpLowering, - CIRToLLVMConstantOpLowering, CIRToLLVMCopyOpLowering, CIRToLLVMDerivedClassAddrOpLowering, CIRToLLVMEhInflightOpLowering, @@ -3902,8 +3893,6 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMGetBitfieldOpLowering, CIRToLLVMGetGlobalOpLowering, CIRToLLVMGetMemberOpLowering, - CIRToLLVMGetRuntimeMemberOpLowering, - CIRToLLVMGlobalOpLowering, CIRToLLVMInlineAsmOpLowering, CIRToLLVMIsConstantOpLowering, CIRToLLVMIsFPClassOpLowering, @@ -3990,10 +3979,13 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, return mlir::LLVM::LLVMPointerType::get(type.getContext(), targetAS); }); - converter.addConversion([&](cir::DataMemberType type) -> mlir::Type { - return mlir::IntegerType::get(type.getContext(), - dataLayout.getTypeSizeInBits(type)); - }); + converter.addConversion( + [&, lowerModule](cir::DataMemberType type) -> mlir::Type { + assert(lowerModule && "CXXABI is not available"); + mlir::Type abiType = + lowerModule->getCXXABI().lowerDataMemberType(type, converter); + return converter.convertType(abiType); + }); converter.addConversion([&](cir::ArrayType type) -> mlir::Type { auto ty = converter.convertType(type.getEltType()); return mlir::LLVM::LLVMArrayType::get(ty, type.getSize()); @@ -4328,8 +4320,8 @@ void ConvertCIRToLLVMPass::runOnOperation() { llvm::MapVector argsVarMap; populateCIRToLLVMConversionPatterns(patterns, converter, dataLayout, - stringGlobalsMap, argStringGlobalsMap, - argsVarMap); + lowerModule.get(), stringGlobalsMap, + argStringGlobalsMap, argsVarMap); mlir::populateFuncToLLVMConversionPatterns(converter, patterns); mlir::ConversionTarget target(getContext()); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index a88c30d3dd15..d86d9dc0e1b5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -291,8 +291,15 @@ class CIRToLLVMStoreOpLowering class CIRToLLVMConstantOpLowering : public mlir::OpConversionPattern { + cir::LowerModule *lowerMod; + public: - using mlir::OpConversionPattern::OpConversionPattern; + CIRToLLVMConstantOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + cir::LowerModule *lowerModule) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) { + setHasBoundedRewriteRecursion(); + } mlir::LogicalResult matchAndRewrite(cir::ConstantOp op, OpAdaptor, @@ -490,8 +497,15 @@ class CIRToLLVMSwitchFlatOpLowering class CIRToLLVMGlobalOpLowering : public mlir::OpConversionPattern { + cir::LowerModule *lowerMod; + public: - using mlir::OpConversionPattern::OpConversionPattern; + CIRToLLVMGlobalOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + cir::LowerModule *lowerModule) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) { + setHasBoundedRewriteRecursion(); + } mlir::LogicalResult matchAndRewrite(cir::GlobalOp op, OpAdaptor, @@ -774,8 +788,13 @@ class CIRToLLVMGetMemberOpLowering class CIRToLLVMGetRuntimeMemberOpLowering : public mlir::OpConversionPattern { + cir::LowerModule *lowerMod; + public: - using mlir::OpConversionPattern::OpConversionPattern; + CIRToLLVMGetRuntimeMemberOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + cir::LowerModule *lowerModule) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) {} mlir::LogicalResult matchAndRewrite(cir::GetRuntimeMemberOp op, OpAdaptor, diff --git a/clang/test/CIR/Lowering/data-member.cir b/clang/test/CIR/Lowering/data-member.cir index 1609ac43ff03..14f3138bde56 100644 --- a/clang/test/CIR/Lowering/data-member.cir +++ b/clang/test/CIR/Lowering/data-member.cir @@ -5,7 +5,10 @@ !s64i = !cir.int !structT = !cir.struct, !cir.int, !cir.int}> -module @test { +module @test attributes { + cir.triple = "x86_64-unknown-linux-gnu", + llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" +} { cir.global external @pt_member = #cir.data_member<1> : !cir.data_member // MLIR: llvm.mlir.global external @pt_member(4 : i64) {addr_space = 0 : i32} : i64 // LLVM: @pt_member = global i64 4 @@ -15,8 +18,8 @@ module @test { cir.return %0 : !cir.data_member } // MLIR: llvm.func @constant() -> i64 - // MLIR-NEXT: %0 = llvm.mlir.constant(4 : i64) : i64 - // MLIR-NEXT: llvm.return %0 : i64 + // MLIR-NEXT: %[[#VAL:]] = llvm.mlir.constant(4 : i64) : i64 + // MLIR-NEXT: llvm.return %[[#VAL]] : i64 // MLIR-NEXT: } // LLVM: define i64 @constant() @@ -28,8 +31,8 @@ module @test { cir.return %0 : !cir.data_member } // MLIR: llvm.func @null_constant() -> i64 - // MLIR-NEXT: %0 = llvm.mlir.constant(-1 : i64) : i64 - // MLIR-NEXT: llvm.return %0 : i64 + // MLIR-NEXT: %[[#VAL:]] = llvm.mlir.constant(-1 : i64) : i64 + // MLIR-NEXT: llvm.return %[[#VAL]] : i64 // MLIR-NEXT: } // LLVM: define i64 @null_constant() !dbg !7 { @@ -40,13 +43,15 @@ module @test { %0 = cir.get_runtime_member %arg0[%arg1 : !cir.data_member] : !cir.ptr -> !cir.ptr cir.return %0 : !cir.ptr } - // MLIR: llvm.func @get_runtime_member(%arg0: !llvm.ptr, %arg1: i64) -> !llvm.ptr - // MLIR-NEXT: %0 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, i8 - // MLIR-NEXT: llvm.return %0 : !llvm.ptr + // MLIR: llvm.func @get_runtime_member(%[[ARG0:.+]]: !llvm.ptr, %[[ARG1:.+]]: i64) -> !llvm.ptr + // MLIR-NEXT: %[[#PTR:]] = llvm.bitcast %[[ARG0]] : !llvm.ptr to !llvm.ptr + // MLIR-NEXT: %[[#VAL:]] = llvm.getelementptr %[[#PTR]][%[[ARG1]]] : (!llvm.ptr, i64) -> !llvm.ptr, i8 + // MLIR-NEXT: %[[#RET:]] = llvm.bitcast %[[#VAL]] : !llvm.ptr to !llvm.ptr + // MLIR-NEXT: llvm.return %[[#RET]] : !llvm.ptr // MLIR-NEXT: } - // LLVM: define ptr @get_runtime_member(ptr %0, i64 %1) - // LLVM-NEXT: %3 = getelementptr i8, ptr %0, i64 %1 - // LLVM-NEXT: ret ptr %3 + // LLVM: define ptr @get_runtime_member(ptr %[[ARG0:.+]], i64 %[[ARG1:.+]]) + // LLVM-NEXT: %[[#VAL:]] = getelementptr i8, ptr %[[ARG0]], i64 %[[ARG1]] + // LLVM-NEXT: ret ptr %[[#VAL]] // LLVM-NEXT: } } \ No newline at end of file From b5a5e9779faf7f82f8350633e403a5cb3d81fbe5 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Mon, 25 Nov 2024 23:12:50 +0300 Subject: [PATCH 2106/2301] [CIR][AArch64][Lowering] Support fields with structs containing constant arrays or pointers (#1136) This PR adds support for function arguments with structs that contain constant arrays or pointers for AArch64. For example, ``` typedef struct { int a[42]; } CAT; void pass_cat(CAT a) {} ``` As usual, the main ideas are gotten from the original [CodeGen](https://github.com/llvm/clangir/blob/3aed38cf52e72cb51a907fad9dd53802f6505b81/clang/lib/AST/ASTContext.cpp#L1823), and I have added a couple of tests. --- .../TargetLowering/CIRLowerContext.cpp | 26 +++++++++- .../AArch64/aarch64-cc-structs.c | 17 +++++++ .../CIR/CallConvLowering/AArch64/ptr-fields.c | 49 +++++++++++++++++++ 3 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CallConvLowering/AArch64/ptr-fields.c diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index c6960d411b93..1dd790c1357c 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -94,6 +94,11 @@ clang::TypeInfo CIRLowerContext::getTypeInfoImpl(const mlir::Type T) const { Align = Target->getDoubleAlign(); break; } + if (mlir::isa(T)) { + Width = Target->getPointerWidth(clang::LangAS::Default); + Align = Target->getPointerAlign(clang::LangAS::Default); + break; + } cir_cconv_unreachable("Unknown builtin type!"); break; } @@ -167,9 +172,28 @@ int64_t CIRLowerContext::toBits(clang::CharUnits CharSize) const { return CharSize.getQuantity() * getCharWidth(); } +/// Performing the computation in CharUnits +/// instead of in bits prevents overflowing the uint64_t for some large arrays. +clang::TypeInfoChars getConstantArrayInfoInChars(const CIRLowerContext &ctx, + cir::ArrayType arrTy) { + clang::TypeInfoChars eltInfo = ctx.getTypeInfoInChars(arrTy.getEltType()); + uint64_t tySize = arrTy.getSize(); + assert((tySize == 0 || static_cast(eltInfo.Width.getQuantity()) <= + (uint64_t)(-1) / tySize) && + "Overflow in array type char size evaluation"); + uint64_t width = eltInfo.Width.getQuantity() * tySize; + unsigned align = eltInfo.Align.getQuantity(); + if (!ctx.getTargetInfo().getCXXABI().isMicrosoft() || + ctx.getTargetInfo().getPointerWidth(clang::LangAS::Default) == 64) + width = llvm::alignTo(width, align); + return clang::TypeInfoChars(clang::CharUnits::fromQuantity(width), + clang::CharUnits::fromQuantity(align), + eltInfo.AlignRequirement); +} + clang::TypeInfoChars CIRLowerContext::getTypeInfoInChars(mlir::Type T) const { if (auto arrTy = mlir::dyn_cast(T)) - cir_cconv_unreachable("NYI"); + return getConstantArrayInfoInChars(*this, arrTy); clang::TypeInfo Info = getTypeInfo(T); return clang::TypeInfoChars(toCharUnitsFromBits(Info.Width), toCharUnitsFromBits(Info.Align), diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index 969d40842b75..84c7141a7508 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -234,3 +234,20 @@ S_PAD ret_s_pad() { S_PAD s; return s; } + +typedef struct { + int a[42]; +} CAT; + +// CHECK: cir.func @pass_cat(%arg0: !cir.ptr +// CHECK: %[[#V0:]] = cir.alloca !cir.ptr, !cir.ptr>, [""] {alignment = 8 : i64} +// CHECK: cir.store %arg0, %[[#V0]] : !cir.ptr, !cir.ptr> +// CHECK: %[[#V1:]] = cir.load %[[#V0]] : !cir.ptr>, !cir.ptr +// CHECK: cir.return + +// LLVM: void @pass_cat(ptr %[[#V0:]]) +// LLVM: %[[#V2:]] = alloca ptr, i64 1, align 8 +// LLVM: store ptr %[[#V0]], ptr %[[#V2]], align 8 +// LLVM: %[[#V3:]] = load ptr, ptr %[[#V2]], align 8 +// LLVM: ret void +void pass_cat(CAT a) {} diff --git a/clang/test/CIR/CallConvLowering/AArch64/ptr-fields.c b/clang/test/CIR/CallConvLowering/AArch64/ptr-fields.c new file mode 100644 index 000000000000..8fc121e9bf9b --- /dev/null +++ b/clang/test/CIR/CallConvLowering/AArch64/ptr-fields.c @@ -0,0 +1,49 @@ +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -fclangir-call-conv-lowering -emit-cir-flat -mmlir --mlir-print-ir-after=cir-call-conv-lowering %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -fclangir-call-conv-lowering +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +typedef int (*myfptr)(int); + +typedef struct { + myfptr f; +} A; + +int foo(int x) { return x; } + +// CIR: cir.func @passA(%arg0: !u64i +// CIR: %[[#V0:]] = cir.alloca !ty_A, !cir.ptr, [""] {alignment = 4 : i64} +// CIR: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CIR: cir.store %arg0, %[[#V1]] : !u64i, !cir.ptr +// CIR: %[[#V2:]] = cir.get_global @foo : !cir.ptr> +// CIR: %[[#V3:]] = cir.get_member %[[#V0]][0] {name = "f"} : !cir.ptr -> !cir.ptr>> +// CIR: cir.store %[[#V2]], %[[#V3]] : !cir.ptr>, !cir.ptr>> +// CIR: cir.return + +// LLVM: void @passA(i64 %[[#V0:]]) +// LLVM: %[[#V2:]] = alloca %struct.A, i64 1, align 4 +// LLVM: store i64 %[[#V0]], ptr %[[#V2]], align 8 +// LLVM: %[[#V3:]] = getelementptr %struct.A, ptr %[[#V2]], i32 0, i32 0 +// LLVM: store ptr @foo, ptr %[[#V3]], align 8 +// LLVM: ret void +void passA(A a) { a.f = foo; } + +typedef struct { + int a; +} S_1; + +typedef struct { + S_1* s; +} S_2; + +// CIR: cir.func @passB(%arg0: !u64i +// CIR: %[[#V0:]] = cir.alloca !ty_S_2_, !cir.ptr, [""] {alignment = 4 : i64} +// CIR: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CIR: cir.store %arg0, %[[#V1]] : !u64i, !cir.ptr +// CIR: cir.return + +// LLVM: void @passB(i64 %[[#V0:]]) +// LLVM: %[[#V2:]] = alloca %struct.S_2, i64 1, align 4 +// LLVM: store i64 %[[#V0]], ptr %[[#V2]], align 8 +// LLVM: ret void +void passB(S_2 s) {} From b29c903a2528e420762617f844383e60aa3bcfd8 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 25 Nov 2024 15:19:41 -0500 Subject: [PATCH 2107/2301] [CIR][CIRGen][Builtin] Support __builtin_frame_address (#1137) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 43 ++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 13 +++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 10 +++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 10 +++++ clang/test/CIR/CodeGen/builtins.cpp | 15 ++++++- clang/test/CIR/IR/builtins.cir | 2 + 6 files changed, 80 insertions(+), 13 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 702e62a98435..74757d506ba1 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4218,13 +4218,21 @@ def MemChrOp : CIR_Op<"libc.memchr"> { } //===----------------------------------------------------------------------===// -// ReturnAddrOp +// ReturnAddrOp and FrameAddrOp //===----------------------------------------------------------------------===// -def ReturnAddrOp : CIR_Op<"return_address"> { +class FuncAddrBuiltinOp : CIR_Op { let arguments = (ins UInt32:$level); - let summary = "The return address of the current function, or of one of its callers"; let results = (outs Res:$result); + let assemblyFormat = [{ + `(` $level `)` attr-dict + }]; + let hasVerifier = 0; +} + +def ReturnAddrOp : FuncAddrBuiltinOp<"return_address"> { + let summary = + "The return address of the current function, or of one of its callers"; let description = [{ Represents call to builtin function ` __builtin_return_address` in CIR. @@ -4241,11 +4249,34 @@ def ReturnAddrOp : CIR_Op<"return_address"> { %p = return_address(%level) -> !cir.ptr ``` }]; +} - let assemblyFormat = [{ - `(` $level `)` attr-dict +def FrameAddrOp : FuncAddrBuiltinOp<"frame_address"> { + let summary = + "The frame address of the current function, or of one of its callers"; + + let description = [{ + Represents call to builtin function ` __builtin_frame_address` in CIR. + This builtin function returns the frame address of the current function, + or of one of its callers. The frame is the area on the stack that holds + local variables and saved registers. The frame address is normally the + address of the first word pushed on to the stack by the function. + However, the exact definition depends upon the processor and the calling + convention. If the processor has a dedicated frame pointer register, and + the function has a frame, then __builtin_frame_address returns the value of + the frame pointer register. + + The `level` argument is number of frames to scan up the call stack. + For instance, value of 0 yields the frame address of the current function, + value of 1 yields the frame address of the caller of the current function, + and so forth. + + Examples: + + ```mlir + %p = frame_address(%level) -> !cir.ptr + ``` }]; - let hasVerifier = 0; } //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 1e139432785f..7b502b33533a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1592,18 +1592,21 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_wmemcmp NYI"); case Builtin::BI__builtin_dwarf_cfa: llvm_unreachable("BI__builtin_dwarf_cfa NYI"); - case Builtin::BI__builtin_return_address: { + case Builtin::BI__builtin_return_address: + case Builtin::BI__builtin_frame_address: { mlir::Location loc = getLoc(E->getExprLoc()); mlir::Attribute levelAttr = ConstantEmitter(*this).emitAbstract( E->getArg(0), E->getArg(0)->getType()); - int64_t level = mlir::cast(levelAttr).getSInt(); + uint64_t level = mlir::cast(levelAttr).getUInt(); + if (BuiltinID == Builtin::BI__builtin_return_address) { + return RValue::get(builder.create( + loc, builder.getUInt32(level, loc))); + } return RValue::get( - builder.create(loc, builder.getUInt32(level, loc))); + builder.create(loc, builder.getUInt32(level, loc))); } case Builtin::BI_ReturnAddress: llvm_unreachable("BI_ReturnAddress NYI"); - case Builtin::BI__builtin_frame_address: - llvm_unreachable("BI__builtin_frame_address NYI"); case Builtin::BI__builtin_extract_return_addr: llvm_unreachable("BI__builtin_extract_return_addr NYI"); case Builtin::BI__builtin_frob_return_addr: diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index b1d8b1870632..1d8be78053ed 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3612,6 +3612,15 @@ mlir::LogicalResult CIRToLLVMReturnAddrOpLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMFrameAddrOpLowering::matchAndRewrite( + cir::FrameAddrOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm.frameaddress", llvmPtrTy, + adaptor.getOperands()); + return mlir::success(); +} + mlir::LogicalResult CIRToLLVMClearCacheOpLowering::matchAndRewrite( cir::ClearCacheOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -3888,6 +3897,7 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMEhInflightOpLowering, CIRToLLVMEhTypeIdOpLowering, CIRToLLVMExpectOpLowering, + CIRToLLVMFrameAddrOpLowering, CIRToLLVMFreeExceptionOpLowering, CIRToLLVMFuncOpLowering, CIRToLLVMGetBitfieldOpLowering, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index d86d9dc0e1b5..892f0d07dd6b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -936,6 +936,16 @@ class CIRToLLVMReturnAddrOpLowering mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMFrameAddrOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::FrameAddrOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + class CIRToLLVMClearCacheOpLowering : public mlir::OpConversionPattern { public: diff --git a/clang/test/CIR/CodeGen/builtins.cpp b/clang/test/CIR/CodeGen/builtins.cpp index f103abb8db64..c5f1ed5e0aff 100644 --- a/clang/test/CIR/CodeGen/builtins.cpp +++ b/clang/test/CIR/CodeGen/builtins.cpp @@ -99,9 +99,20 @@ extern "C" void *test_return_address(void) { return __builtin_return_address(1); // CIR-LABEL: test_return_address - // [[ARG:%.*]] = cir.const #cir.int<1> : !u32i - // {{%.*}} = cir.return_address([[ARG]]) + // CIR: [[ARG:%.*]] = cir.const #cir.int<1> : !u32i + // CIR: {{%.*}} = cir.return_address([[ARG]]) // LLVM-LABEL: @test_return_address // LLVM: {{%.*}} = call ptr @llvm.returnaddress(i32 1) } + +extern "C" void *test_frame_address(void) { + return __builtin_frame_address(1); + + // CIR-LABEL: test_frame_address + // CIR: [[ARG:%.*]] = cir.const #cir.int<1> : !u32i + // CIR: {{%.*}} = cir.frame_address([[ARG]]) + + // LLVM-LABEL: @test_frame_address + // LLVM: {{%.*}} = call ptr @llvm.frameaddress.p0(i32 1) +} diff --git a/clang/test/CIR/IR/builtins.cir b/clang/test/CIR/IR/builtins.cir index 86c0c57825de..413119420be6 100644 --- a/clang/test/CIR/IR/builtins.cir +++ b/clang/test/CIR/IR/builtins.cir @@ -5,10 +5,12 @@ module { cir.func @test1() { %0 = cir.const #cir.int<1> : !u32i %1 = cir.return_address(%0) + %2 = cir.frame_address(%0) cir.return } // CHECK: cir.func @test1() // CHECK: %0 = cir.const #cir.int<1> : !u32i // CHECK: %1 = cir.return_address(%0) + // CHECK: %2 = cir.frame_address(%0) // CHECK: cir.return } From c1c576afbaf79a05fb0dc1c642afce83f5fe8901 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Mon, 25 Nov 2024 23:20:45 +0300 Subject: [PATCH 2108/2301] [CIR][ABI][AArch64][Lowering] support for calling struct types in range (64, 128) (#1141) This PR adds support for the lowering of AArch64 calls with structs having sizes greater than 64 and less than 128. The idea is from the original [CodeGen](https://github.com/llvm/clangir/blob/da601b374deea6665f710f7e432dfa82f457059e/clang/lib/CodeGen/CGCall.cpp#L1329), where we perform a coercion through memory for these type of calls. I have added a test for this. --- .../TargetLowering/LowerFunction.cpp | 7 ++++++ .../AArch64/aarch64-cc-structs.c | 25 +++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 06242e52383a..c85d95ba2ddd 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -336,6 +336,13 @@ mlir::Value createCoercedValue(mlir::Value Src, mlir::Type Ty, return CGF.buildAggregateBitcast(Src, Ty); } + if (auto alloca = findAlloca(Src.getDefiningOp())) { + auto tmpAlloca = createTmpAlloca(CGF, alloca.getLoc(), Ty); + createMemCpy(CGF, tmpAlloca, alloca, SrcSize.getFixedValue()); + return CGF.getRewriter().create(alloca.getLoc(), + tmpAlloca.getResult()); + } + cir_cconv_unreachable("NYI"); } diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index 84c7141a7508..0e2f3cccbb9f 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -206,6 +206,31 @@ GT_128 call_and_get_gt_128() { // LLVM: call void @llvm.memcpy.p0.p0.i64(ptr %[[#V1]], ptr %[[#V2]], i64 12, i1 false) void passS(S s) {} +// CHECK: @callS() +// CHECK: %[[#V0:]] = cir.alloca !ty_S, !cir.ptr, ["s"] {alignment = 4 : i64} +// CHECK: %[[#V1:]] = cir.alloca !cir.array, !cir.ptr>, ["tmp"] {alignment = 8 : i64} +// CHECK: %[[#V2:]] = cir.load %[[#V0]] : !cir.ptr, !ty_S +// CHECK: %[[#V3:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V4:]] = cir.cast(bitcast, %[[#V1]] : !cir.ptr>), !cir.ptr +// CHECK: %[[#V5:]] = cir.const #cir.int<12> : !u64i +// CHECK: cir.libc.memcpy %[[#V5]] bytes from %[[#V3]] to %[[#V4]] : !u64i, !cir.ptr -> !cir.ptr +// CHECK: %[[#V6:]] = cir.load %[[#V1]] : !cir.ptr>, !cir.array +// CHECK: cir.call @passS(%[[#V6]]) : (!cir.array) -> () +// CHECK: cir.return + +// LLVM: @callS() +// LLVM: %[[#V1:]] = alloca %struct.S, i64 1, align 4 +// LLVM: %[[#V2:]] = alloca [2 x i64], i64 1, align 8 +// LLVM: %[[#V3:]] = load %struct.S, ptr %[[#V1]], align 4 +// LLVM: call void @llvm.memcpy.p0.p0.i64(ptr %[[#V2]], ptr %[[#V1]], i64 12, i1 false) +// LLVM: %[[#V4:]] = load [2 x i64], ptr %[[#V2]], align 8 +// LLVM: call void @passS([2 x i64] %[[#V4]]) +// LLVM: ret void +void callS() { + S s; + passS(s); +} + typedef struct { uint8_t a; uint16_t b; From 7377ae963d501136f0b0a1172e26e72fd115001a Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 25 Nov 2024 15:21:33 -0500 Subject: [PATCH 2109/2301] [CIR][CIRGen][Builtin] Support builtin __sync_sub_and_fetch (#1146) --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 3 +- clang/test/CIR/CodeGen/atomic.cpp | 77 +++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 7b502b33533a..c8aa38b75d35 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1711,7 +1711,8 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_sub_and_fetch_4: case Builtin::BI__sync_sub_and_fetch_8: case Builtin::BI__sync_sub_and_fetch_16: - llvm_unreachable("BI__sync_sub_and_fetch like NYI"); + return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Sub, E, + cir::BinOpKind::Sub); case Builtin::BI__sync_and_and_fetch_1: case Builtin::BI__sync_and_and_fetch_2: diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index c348bfebf486..4f24e947e6c5 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -739,4 +739,81 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RET7:%.*]] = add i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 ull = __sync_add_and_fetch (&ull, uc); + + // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i + // CHECK: [[RES0:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i + // CHECK: [[RET0:%.*]] = cir.binop(sub, [[RES0]], [[VAL0]]) : !s8i + // LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES0:%.*]] = atomicrmw sub ptr @sc, i8 [[VAL0]] seq_cst, align 1 + // LLVM: [[RET0:%.*]] = sub i8 [[RES0]], [[VAL0]] + // LLVM: store i8 [[RET0]], ptr @sc, align 1 + sc = __sync_sub_and_fetch (&sc, uc); + + // CHECK: [[RES1:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i + // CHECK: [[RET1:%.*]] = cir.binop(sub, [[RES1]], [[VAL1]]) : !u8i + // LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES1:%.*]] = atomicrmw sub ptr @uc, i8 [[VAL1]] seq_cst, align 1 + // LLVM: [[RET1:%.*]] = sub i8 [[RES1]], [[VAL1]] + // LLVM: store i8 [[RET1]], ptr @uc, align 1 + uc = __sync_sub_and_fetch (&uc, uc); + + // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i + // CHECK: [[RES2:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i + // CHECK: [[RET2:%.*]] = cir.binop(sub, [[RES2]], [[VAL2]]) : !s16i + // LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16 + // LLVM: [[RES2:%.*]] = atomicrmw sub ptr @ss, i16 [[CONV2]] seq_cst, align 2 + // LLVM: [[RET2:%.*]] = sub i16 [[RES2]], [[CONV2]] + // LLVM: store i16 [[RET2]], ptr @ss, align 2 + ss = __sync_sub_and_fetch (&ss, uc); + + // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i + // CHECK: [[RES3:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i + // CHECK: [[RET3:%.*]] = cir.binop(sub, [[RES3]], [[VAL3]]) : !u16i + // LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16 + // LLVM: [[RES3:%.*]] = atomicrmw sub ptr @us, i16 [[CONV3]] seq_cst, align 2 + // LLVM: [[RET3:%.*]] = sub i16 [[RES3]], [[CONV3]] + // LLVM: store i16 [[RET3]], ptr @us + us = __sync_sub_and_fetch (&us, uc); + + // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i + // CHECK: [[RES4:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i + // CHECK: [[RET4:%.*]] = cir.binop(sub, [[RES4]], [[VAL4]]) : !s32i + // LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32 + // LLVM: [[RES4:%.*]] = atomicrmw sub ptr @si, i32 [[CONV4]] seq_cst, align 4 + // LLVM: [[RET4:%.*]] = sub i32 [[RES4]], [[CONV4]] + // LLVM: store i32 [[RET4]], ptr @si, align 4 + si = __sync_sub_and_fetch (&si, uc); + + // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i + // CHECK: [[RES5:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i + // CHECK: [[RET5:%.*]] = cir.binop(sub, [[RES5]], [[VAL5]]) : !u32i + // LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32 + // LLVM: [[RES5:%.*]] = atomicrmw sub ptr @ui, i32 [[CONV5]] seq_cst, align 4 + // LLVM: [[RET5:%.*]] = sub i32 [[RES5]], [[CONV5]] + // LLVM: store i32 [[RET5]], ptr @ui, align 4 + ui = __sync_sub_and_fetch (&ui, uc); + + // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i + // CHECK: [[RES6:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i + // CHECK: [[RET6:%.*]] = cir.binop(sub, [[RES6]], [[VAL6]]) : !s64i + // LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64 + // LLVM: [[RES6:%.*]] = atomicrmw sub ptr @sll, i64 [[CONV6]] seq_cst, align 8 + // LLVM: [[RET6:%.*]] = sub i64 [[RES6]], [[CONV6]] + // LLVM: store i64 [[RET6]], ptr @sll, align 8 + sll = __sync_sub_and_fetch (&sll, uc); + + // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i + // CHECK: [[RES7:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i + // CHECK: [[RET7:%.*]] = cir.binop(sub, [[RES7]], [[VAL7]]) : !u64i + // LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64 + // LLVM: [[RES7:%.*]] = atomicrmw sub ptr @ull, i64 [[CONV7]] seq_cst, align 8 + // LLVM: [[RET7:%.*]] = sub i64 [[RES7]], [[CONV7]] + // LLVM: store i64 [[RET7]], ptr @ull, align 8 + ull = __sync_sub_and_fetch (&ull, uc); } From 4baa37e1b3970000ca2f74b44afc37fe4bf13528 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 25 Nov 2024 15:21:47 -0500 Subject: [PATCH 2110/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vpadal_v, neon_vpadalq_v (#1148) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 9 +- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 200 ++++++++++++++++++ 2 files changed, 208 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 907959f9625c..09ea7af3ab85 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3873,7 +3873,14 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vpadal_v: case NEON::BI__builtin_neon_vpadalq_v: { - llvm_unreachable("NEON::BI__builtin_neon_vpadalq_v NYI"); + cir::VectorType argTy = getHalfEltSizeTwiceNumElemsVecType(builder, vTy); + mlir::Location loc = getLoc(E->getExprLoc()); + llvm::SmallVector args = {Ops[1]}; + mlir::Value tmp = emitNeonCall( + builder, {argTy}, args, + usgn ? "aarch64.neon.uaddlp" : "aarch64.neon.saddlp", vTy, loc); + mlir::Value addEnd = builder.createBitcast(Ops[0], vTy); + return builder.createAdd(tmp, addEnd); } case NEON::BI__builtin_neon_vpmin_v: case NEON::BI__builtin_neon_vpminq_v: diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index 4c516ad06f18..a2d5fdcb8383 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -1516,3 +1516,203 @@ uint64x2_t test_splatq_laneq_u64(uint64x2_t v) { // LLVM: [[RES:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP1]], <2 x i32> zeroinitializer // LLVM: ret <2 x i64> [[RES]] } + +int16x4_t test_vpadal_s8(int16x4_t a, int8x8_t b) { + return vpadal_s8(a, b); + + // CIR-LABEL: vpadal_s8 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadal_s8(<4 x i16>{{.*}}[[a:%.*]], <8 x i8>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[a]] to <8 x i8> + // LLVM: [[VPADAL_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> [[b]]) + // LLVM: [[TMP1:%.*]] = add <4 x i16> [[VPADAL_I]], [[a]] + // LLVM: ret <4 x i16> [[TMP1]] +} + +int32x2_t test_vpadal_s16(int32x2_t a, int16x4_t b) { + return vpadal_s16(a, b); + + // CIR-LABEL: vpadal_s16 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadal_s16(<2 x i32>{{.*}}[[a:%.*]], <4 x i16>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[b]] to <8 x i8> + // LLVM: [[VPADAL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> [[b]]) + // LLVM: [[TMP2:%.*]] = add <2 x i32> [[VPADAL1_I]], [[a]] + // LLVM: ret <2 x i32> [[TMP2]] +} + +int64x1_t test_vpadal_s32(int64x1_t a, int32x2_t b) { + return vpadal_s32(a, b); + + // CIR-LABEL: vpadal_s32 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadal_s32(<1 x i64>{{.*}}[[a:%.*]], <2 x i32>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[b]] to <8 x i8> + // LLVM: [[VPADAL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.saddlp.v1i64.v2i32(<2 x i32> [[b]]) + // LLVM: [[TMP2:%.*]] = add <1 x i64> [[VPADAL1_I]], [[a]] + // LLVM: ret <1 x i64> [[TMP2]] +} + +uint16x4_t test_vpadal_u8(uint16x4_t a, uint8x8_t b) { + return vpadal_u8(a, b); + + // CIR-LABEL: vpadal_u8 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadal_u8(<4 x i16>{{.*}}[[a:%.*]], <8 x i8>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[a]] to <8 x i8> + // LLVM: [[VPADAL_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> [[b]]) + // LLVM: [[TMP1:%.*]] = add <4 x i16> [[VPADAL_I]], [[a]] + // LLVM: ret <4 x i16> [[TMP1]] +} + +uint32x2_t test_vpadal_u16(uint32x2_t a, uint16x4_t b) { + return vpadal_u16(a, b); + + // CIR-LABEL: vpadal_u16 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadal_u16(<2 x i32>{{.*}}[[a:%.*]], <4 x i16>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[b]] to <8 x i8> + // LLVM: [[VPADAL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> [[b]]) + // LLVM: [[TMP2:%.*]] = add <2 x i32> [[VPADAL1_I]], [[a]] + // LLVM: ret <2 x i32> [[TMP2]] +} + +uint64x1_t test_vpadal_u32(uint64x1_t a, uint32x2_t b) { + return vpadal_u32(a, b); + + // CIR-LABEL: vpadal_u32 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadal_u32(<1 x i64>{{.*}}[[a:%.*]], <2 x i32>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[b]] to <8 x i8> + // LLVM: [[VPADAL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> [[b]]) + // LLVM: [[TMP2:%.*]] = add <1 x i64> [[VPADAL1_I]], [[a]] + // LLVM: ret <1 x i64> [[TMP2]] +} + +int16x8_t test_vpadalq_s8(int16x8_t a, int8x16_t b) { + return vpadalq_s8(a, b); + + // CIR-LABEL: vpadalq_s8 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadalq_s8(<8 x i16>{{.*}}[[a:%.*]], <16 x i8>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[a]] to <16 x i8> + // LLVM: [[VPADAL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> [[b]]) + // LLVM: [[TMP1:%.*]] = add <8 x i16> [[VPADAL_I]], [[a]] + // LLVM: ret <8 x i16> [[TMP1]] +} + +int32x4_t test_vpadalq_s16(int32x4_t a, int16x8_t b) { + return vpadalq_s16(a, b); + + // CIR-LABEL: vpadalq_s16 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadalq_s16(<4 x i32>{{.*}}[[a:%.*]], <8 x i16>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[b]] to <16 x i8> + // LLVM: [[VPADAL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> [[b]]) + // LLVM: [[TMP2:%.*]] = add <4 x i32> [[VPADAL1_I]], [[a]] + // LLVM: ret <4 x i32> [[TMP2]] +} + +int64x2_t test_vpadalq_s32(int64x2_t a, int32x4_t b) { + return vpadalq_s32(a, b); + + // CIR-LABEL: vpadalq_s32 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.saddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadalq_s32(<2 x i64>{{.*}}[[a:%.*]], <4 x i32>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[b]] to <16 x i8> + // LLVM: [[VPADAL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> [[b]]) + // LLVM: [[TMP2:%.*]] = add <2 x i64> [[VPADAL1_I]], [[a]] + // LLVM: ret <2 x i64> [[TMP2]] +} + +uint16x8_t test_vpadalq_u8(uint16x8_t a, uint8x16_t b) { + return vpadalq_u8(a, b); + + // CIR-LABEL: vpadalq_u8 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadalq_u8(<8 x i16>{{.*}}[[a:%.*]], <16 x i8>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[a]] to <16 x i8> + // LLVM: [[VPADAL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> [[b]]) + // LLVM: [[TMP1:%.*]] = add <8 x i16> [[VPADAL_I]], [[a]] + // LLVM: ret <8 x i16> [[TMP1]] +} + +uint32x4_t test_vpadalq_u16(uint32x4_t a, uint16x8_t b) { + return vpadalq_u16(a, b); + + // CIR-LABEL: vpadalq_u16 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadalq_u16(<4 x i32>{{.*}}[[a:%.*]], <8 x i16>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[b]] to <16 x i8> + // LLVM: [[VPADAL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> [[b]]) + // LLVM: [[TMP2:%.*]] = add <4 x i32> [[VPADAL1_I]], [[a]] + // LLVM: ret <4 x i32> [[TMP2]] +} + +uint64x2_t test_vpadalq_u32(uint64x2_t a, uint32x4_t b) { + return vpadalq_u32(a, b); + + // CIR-LABEL: vpadalq_u32 + // CIR: [[VPADAL_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.uaddlp" {{%.*}} : + // CIR-SAME: (!cir.vector) -> !cir.vector + // CIR: [[a:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: {{%.*}} = cir.binop(add, [[VPADAL_I]], [[a]]) : !cir.vector + + // LLVM: {{.*}}test_vpadalq_u32(<2 x i64>{{.*}}[[a:%.*]], <4 x i32>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[b]] to <16 x i8> + // LLVM: [[VPADAL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> [[b]]) + // LLVM: [[TMP2:%.*]] = add <2 x i64> [[VPADAL1_I]], [[a]] + // LLVM: ret <2 x i64> [[TMP2]] +} From ee7e3b83496cb440349c3635deab8bfc0932e1b0 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Tue, 26 Nov 2024 04:22:37 +0800 Subject: [PATCH 2111/2301] [CIR] [Lowering] [X86_64] Support VAArg for LongDouble (#1150) Recommit https://github.com/llvm/clangir/pull/1101 I am not sure what happened. But that merged PR doesn't show in the git log. Maybe the stacked PR may not get successed? But after all, we need to land it again. Following off are original commit messages: --- This is the following of https://github.com/llvm/clangir/pull/1100. After https://github.com/llvm/clangir/pull/1100, when we want to use LongDouble for VAArg, we will be in trouble due to details in X86_64's ABI and this patch tries to address this. The practical impact the patch is, after this patch, with https://github.com/llvm/clangir/pull/1088 and a small following up fix, we can build and run all C's benchmark in SpecCPU 2017. I think it is a milestone. --- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 3 +- .../Transforms/TargetLowering/ABIInfoImpl.cpp | 7 ++ .../Transforms/TargetLowering/ABIInfoImpl.h | 1 + .../Targets/LoweringPrepareX86CXXABI.cpp | 5 +- .../Transforms/TargetLowering/Targets/X86.cpp | 118 +++++++++++++++++- .../TargetLowering/Targets/X86_64ABIInfo.h | 8 ++ clang/test/CIR/Lowering/var-arg-x86_64.c | 52 ++++++++ 7 files changed, 186 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 3a600a0a0575..df89584fd3a9 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -745,7 +745,7 @@ const llvm::fltSemantics &FP80Type::getFloatSemantics() const { llvm::TypeSize FP80Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, mlir::DataLayoutEntryListRef params) const { - return llvm::TypeSize::getFixed(16); + return llvm::TypeSize::getFixed(128); } uint64_t FP80Type::getABIAlignment(const mlir::DataLayout &dataLayout, @@ -766,6 +766,7 @@ const llvm::fltSemantics &FP128Type::getFloatSemantics() const { llvm::TypeSize FP128Type::getTypeSizeInBits(const mlir::DataLayout &dataLayout, mlir::DataLayoutEntryListRef params) const { + // FIXME: We probably want it to return 128. But we're lacking a test now. return llvm::TypeSize::getFixed(16); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index e07315d54a38..66e40e6ac5d0 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -65,4 +65,11 @@ CIRCXXABI::RecordArgABI getRecordArgABI(const StructType RT, return CXXABI.getRecordArgABI(RT); } +CIRCXXABI::RecordArgABI getRecordArgABI(mlir::Type ty, CIRCXXABI &CXXABI) { + auto st = mlir::dyn_cast(ty); + if (!st) + return CIRCXXABI::RAA_Default; + return getRecordArgABI(st, CXXABI); +} + } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h index 8005b153a544..8088a333c4a5 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.h @@ -33,6 +33,7 @@ mlir::Value emitRoundPointerUpToAlignment(cir::CIRBaseBuilderTy &builder, mlir::Type useFirstFieldIfTransparentUnion(mlir::Type Ty); CIRCXXABI::RecordArgABI getRecordArgABI(const StructType RT, CIRCXXABI &CXXABI); +CIRCXXABI::RecordArgABI getRecordArgABI(mlir::Type ty, CIRCXXABI &CXXABI); } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp index ba376d26b0fc..d9b574fac4e7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp @@ -46,7 +46,6 @@ std::unique_ptr getLowerModule(cir::VAArgOp op) { mlir::ModuleOp mo = op->getParentOfType(); if (!mo) return nullptr; - mlir::PatternRewriter rewriter(mo.getContext()); return cir::createLowerModule(mo, rewriter); } @@ -92,7 +91,7 @@ mlir::Value LoweringPrepareX86CXXABI::lowerVAArgX86_64( // Let's hope LLVM's va_arg instruction can take care of it. // Remove this when X86_64ABIInfo::classify can take care of every type. if (!mlir::isa(op.getType())) + StructType, LongDoubleType>(op.getType())) return nullptr; // Assume that va_list type is correct; should be pointer to LLVM type: @@ -107,7 +106,6 @@ mlir::Value LoweringPrepareX86CXXABI::lowerVAArgX86_64( std::unique_ptr lowerModule = getLowerModule(op); if (!lowerModule) return nullptr; - mlir::Type ty = op.getType(); // FIXME: How should we access the X86AVXABILevel? @@ -167,7 +165,6 @@ mlir::Value LoweringPrepareX86CXXABI::lowerVAArgX86_64( mlir::Block *contBlock = currentBlock->splitBlock(op); mlir::Block *inRegBlock = builder.createBlock(contBlock); mlir::Block *inMemBlock = builder.createBlock(contBlock); - builder.setInsertionPointToEnd(currentBlock); builder.create(loc, inRegs, inRegBlock, inMemBlock); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 39bd1716aa3b..11c1ed459147 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -165,6 +165,21 @@ void X86_64ABIInfo::classify(mlir::Type Ty, uint64_t OffsetBase, Class &Lo, Current = Class::SSE; return; + } else if (mlir::isa(Ty)) { + const llvm::fltSemantics *LDF = + &getContext().getTargetInfo().getLongDoubleFormat(); + if (LDF == &llvm::APFloat::IEEEquad()) { + Lo = Class::SSE; + Hi = Class::SSEUp; + } else if (LDF == &llvm::APFloat::x87DoubleExtended()) { + Lo = Class::X87; + Hi = Class::X87Up; + } else if (LDF == &llvm::APFloat::IEEEdouble()) { + Current = Class::SSE; + } else { + llvm_unreachable("unexpected long double representation!"); + } + return; } else if (mlir::isa(Ty)) { Current = Class::Integer; } else if (const auto RT = mlir::dyn_cast(Ty)) { @@ -267,6 +282,65 @@ void X86_64ABIInfo::classify(mlir::Type Ty, uint64_t OffsetBase, Class &Lo, cir_cconv_unreachable("NYI"); } +ABIArgInfo X86_64ABIInfo::getIndirectResult(mlir::Type ty, + unsigned freeIntRegs) const { + // If this is a scalar LLVM value then assume LLVM will pass it in the right + // place naturally. + // + // This assumption is optimistic, as there could be free registers available + // when we need to pass this argument in memory, and LLVM could try to pass + // the argument in the free register. This does not seem to happen currently, + // but this code would be much safer if we could mark the argument with + // 'onstack'. See PR12193. + if (!isAggregateTypeForABI(ty) /* && IsIllegalVectorType(Ty) &&*/ + /*!Ty->isBitIntType()*/) { + // FIXME: Handling enum type? + + return (isPromotableIntegerTypeForABI(ty) ? ABIArgInfo::getExtend(ty) + : ABIArgInfo::getDirect()); + } + + if (CIRCXXABI::RecordArgABI RAA = getRecordArgABI(ty, getCXXABI())) + return getNaturalAlignIndirect(ty, RAA == CIRCXXABI::RAA_DirectInMemory); + + // Compute the byval alignment. We specify the alignment of the byval in all + // cases so that the mid-level optimizer knows the alignment of the byval. + unsigned align = std::max(getContext().getTypeAlign(ty) / 8, 8U); + + // Attempt to avoid passing indirect results using byval when possible. This + // is important for good codegen. + // + // We do this by coercing the value into a scalar type which the backend can + // handle naturally (i.e., without using byval). + // + // For simplicity, we currently only do this when we have exhausted all of the + // free integer registers. Doing this when there are free integer registers + // would require more care, as we would have to ensure that the coerced value + // did not claim the unused register. That would require either reording the + // arguments to the function (so that any subsequent inreg values came first), + // or only doing this optimization when there were no following arguments that + // might be inreg. + // + // We currently expect it to be rare (particularly in well written code) for + // arguments to be passed on the stack when there are still free integer + // registers available (this would typically imply large structs being passed + // by value), so this seems like a fair tradeoff for now. + // + // We can revisit this if the backend grows support for 'onstack' parameter + // attributes. See PR12193. + if (freeIntRegs == 0) { + uint64_t size = getContext().getTypeSize(ty); + + // If this type fits in an eightbyte, coerce it into the matching integral + // type, which will end up on the stack (with alignment 8). + if (align == 8 && size <= 64) + return ABIArgInfo::getDirect( + cir::IntType::get(LT.getMLIRContext(), size, false)); + } + + return ABIArgInfo::getIndirect(align); +} + /// Return a type that will be passed by the backend in the low 8 bytes of an /// XMM register, corresponding to the SSE class. mlir::Type X86_64ABIInfo::GetSSETypeAtOffset(mlir::Type IRType, @@ -278,7 +352,7 @@ mlir::Type X86_64ABIInfo::GetSSETypeAtOffset(mlir::Type IRType, (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset; mlir::Type T0 = getFPTypeAtOffset(IRType, IROffset, TD); if (!T0 || mlir::isa(T0)) - return T0; // NOTE(cir): Not sure if this is correct. + return cir::DoubleType::get(LT.getMLIRContext()); mlir::Type T1 = {}; unsigned T0Size = TD.getTypeAllocSize(T0); @@ -296,6 +370,8 @@ mlir::Type X86_64ABIInfo::GetSSETypeAtOffset(mlir::Type IRType, return T0; } + return cir::DoubleType::get(LT.getMLIRContext()); + cir_cconv_unreachable("NYI"); } @@ -539,6 +615,22 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType( ++neededSSE; break; } + // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument + // on the stack. + case Class::Memory: + + // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or + // COMPLEX_X87, it is passed in memory. + case Class::X87: + case Class::ComplexX87: + if (getRecordArgABI(Ty, getCXXABI()) == CIRCXXABI::RAA_Indirect) + ++neededInt; + return getIndirectResult(Ty, freeIntRegs); + + case Class::SSEUp: + case Class::X87Up: + llvm_unreachable("Invalid classification for lo word."); + default: cir_cconv_assert_or_abort(!cir::MissingFeatures::X86ArgTypeClassification(), "NYI"); @@ -546,6 +638,11 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType( mlir::Type HighPart = {}; switch (Hi) { + case Class::Memory: + case Class::X87: + case Class::ComplexX87: + llvm_unreachable("Invalid classification for hi word."); + case Class::NoClass: break; @@ -558,8 +655,23 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType( return ABIArgInfo::getDirect(HighPart, 8); break; - default: - cir_cconv_unreachable("NYI"); + // X87Up generally doesn't occur here (long double is passed in + // memory), except in situations involving unions. + case Class::X87Up: + case Class::SSE: + ++neededSSE; + HighPart = GetSSETypeAtOffset(Ty, 8, Ty, 8); + + if (Lo == Class::NoClass) // Pass HighPart at offset 8 in memory. + return ABIArgInfo::getDirect(HighPart, 8); + break; + + // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the + // eightbyte is passed in the upper half of the last used SSE + // register. This only happens when 128-bit vectors are passed. + case Class::SSEUp: + llvm_unreachable("NYI && We need to implement GetByteVectorType"); + break; } // If a high part was specified, merge it together with the low part. It is diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86_64ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86_64ABIInfo.h index 201730519207..192ab750a548 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86_64ABIInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86_64ABIInfo.h @@ -69,6 +69,14 @@ class X86_64ABIInfo : public cir::ABIInfo { mlir::Type SourceTy, unsigned SourceOffset) const; + /// getIndirectResult - Give a source type \arg Ty, return a suitable result + /// such that the argument will be passed in memory. + /// + /// \param freeIntRegs - The number of free integer registers remaining + /// available. + ::cir::ABIArgInfo getIndirectResult(mlir::Type ty, + unsigned freeIntRegs) const; + /// The 0.98 ABI revision clarified a lot of ambiguities, /// unfortunately in ways that were not always consistent with /// certain previous compilers. In particular, platforms which diff --git a/clang/test/CIR/Lowering/var-arg-x86_64.c b/clang/test/CIR/Lowering/var-arg-x86_64.c index 992d5e82cd98..012e702d7f17 100644 --- a/clang/test/CIR/Lowering/var-arg-x86_64.c +++ b/clang/test/CIR/Lowering/var-arg-x86_64.c @@ -76,3 +76,55 @@ double f1(int n, ...) { // CIR: [[CASTED_ARG_P:%.+]] = cir.cast(bitcast, [[ARG]] // CIR: [[CASTED_ARG:%.+]] = cir.load align(16) [[CASTED_ARG_P]] // CIR: store [[CASTED_ARG]], [[RES]] +long double f2(int n, ...) { + va_list valist; + va_start(valist, n); + long double res = va_arg(valist, long double); + va_end(valist); + return res; +} + +// CHECK: define {{.*}}@f2 +// CHECK: [[RESULT:%.+]] = alloca x86_fp80 +// CHECK: [[VA_LIST_ALLOCA:%.+]] = alloca {{.*}}[[VA_LIST_TYPE]] +// CHECK: [[RES:%.+]] = alloca x86_fp80 +// CHECK: [[VA_LIST:%.+]] = getelementptr {{.*}} [[VA_LIST_ALLOCA]], i32 0 +// CHECK: call {{.*}}@llvm.va_start.p0(ptr [[VA_LIST]]) +// CHECK: [[VA_LIST2:%.+]] = getelementptr {{.*}} [[VA_LIST_ALLOCA]], i32 0 +// CHECK: [[OVERFLOW_AREA_P:%.+]] = getelementptr {{.*}} [[VA_LIST2]], i32 0, i32 2 +// CHECK: [[OVERFLOW_AREA:%.+]] = load ptr, ptr [[OVERFLOW_AREA_P]] +// Ptr Mask Operations +// CHECK: [[OVERFLOW_AREA_OFFSET_ALIGNED:%.+]] = getelementptr i8, ptr [[OVERFLOW_AREA]], i64 15 +// CHECK: [[OVERFLOW_AREA_OFFSET_ALIGNED_P:%.+]] = ptrtoint ptr [[OVERFLOW_AREA_OFFSET_ALIGNED]] to i32 +// CHECK: [[MASKED:%.+]] = and i32 [[OVERFLOW_AREA_OFFSET_ALIGNED_P]], -16 +// CHECK: [[DIFF:%.+]] = sub i32 [[OVERFLOW_AREA_OFFSET_ALIGNED_P]], [[MASKED]] +// CHECK: [[PTR_MASKED:%.+]] = getelementptr i8, ptr [[OVERFLOW_AREA_OFFSET_ALIGNED]], i32 [[DIFF]] +// CHECK: [[OVERFLOW_AREA_NEXT:%.+]] = getelementptr i8, ptr [[PTR_MASKED]], i64 16 +// CHECK: store ptr [[OVERFLOW_AREA_NEXT]], ptr [[OVERFLOW_AREA_P]] +// CHECK: [[VALUE:%.+]] = load x86_fp80, ptr [[PTR_MASKED]] +// CHECK: store x86_fp80 [[VALUE]], ptr [[RES]] +// CHECK: [[VA_LIST2:%.+]] = getelementptr {{.*}} [[VA_LIST_ALLOCA]], i32 0 +// CHECK: call {{.*}}@llvm.va_end.p0(ptr [[VA_LIST2]]) +// CHECK: [[VALUE2:%.+]] = load x86_fp80, ptr [[RES]] +// CHECK: store x86_fp80 [[VALUE2]], ptr [[RESULT]] +// CHECK: [[RETURN_VALUE:%.+]] = load x86_fp80, ptr [[RESULT]] +// CHECK: ret x86_fp80 [[RETURN_VALUE]] + +// CIR: cir.func @f2 +// CIR: [[VA_LIST_ALLOCA:%.+]] = cir.alloca !cir.array, !cir.ptr>, ["valist"] +// CIR: [[RES:%.+]] = cir.alloca !cir.long_double, !cir.ptr>, ["res" +// CIR: [[VASTED_VA_LIST:%.+]] = cir.cast(array_to_ptrdecay, [[VA_LIST_ALLOCA]] +// CIR: cir.va.start [[VASTED_VA_LIST]] +// CIR: [[VASTED_VA_LIST:%.+]] = cir.cast(array_to_ptrdecay, [[VA_LIST_ALLOCA]] +// CIR: [[OVERFLOW_AREA_P:%.+]] = cir.get_member [[VASTED_VA_LIST]][2] {name = "overflow_arg_area"} +// CIR-DAG: [[OVERFLOW_AREA:%.+]] = cir.load [[OVERFLOW_AREA_P]] +// CIR-DAG: [[CASTED:%.+]] = cir.cast(bitcast, [[OVERFLOW_AREA]] : !cir.ptr) +// CIR-DAG: [[CONSTANT:%.+]] = cir.const #cir.int<15> +// CIR-DAG: [[PTR_STRIDE:%.+]] = cir.ptr_stride([[CASTED]] {{.*}}[[CONSTANT]] +// CIR-DAG: [[MINUS_ALIGN:%.+]] = cir.const #cir.int<-16> +// CIR-DAG: [[ALIGNED:%.+]] = cir.ptr_mask([[PTR_STRIDE]], [[MINUS_ALIGN]] +// CIR: [[ALIGN:%.+]] = cir.const #cir.int<16> +// CIR: [[CAST_ALIGNED:%.+]] = cir.cast(bitcast, [[ALIGNED]] : !cir.ptr), !cir.ptr> +// CIR: [[CAST_ALIGNED_VALUE:%.+]] = cir.load [[CAST_ALIGNED]] +// CIR: cir.store [[CAST_ALIGNED_VALUE]], [[RES]] +// CIR. cir.via.end From d10a6856c6509672a7133c0604cf7f5a3e3cf2db Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 25 Nov 2024 15:23:51 -0500 Subject: [PATCH 2112/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vrshl_v and neon_vrshlq_v (#1151) They are rounding shift of vectors, and shift amount is from the least significant byte of the corresponding element of the second input vector. Thus, it is implemented in [its own ASM ](https://godbolt.org/z/v65sbeKaW). These make them not suitable to be lowered to CIR ShiftOp though it supports vector type now. --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 6 + clang/test/CIR/CodeGen/AArch64/neon.c | 344 +++++++++++------- 2 files changed, 218 insertions(+), 132 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 09ea7af3ab85..400be20c5c8f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2585,6 +2585,12 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( vTy, true /* extended */, true /* signed */)); break; } + case NEON::BI__builtin_neon_vrshl_v: + case NEON::BI__builtin_neon_vrshlq_v: { + intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.urshl" + : "aarch64.neon.srshl"; + break; + } } if (intrincsName.empty()) diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 004298b873e8..5b1043962429 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -3727,153 +3727,233 @@ uint64x2_t test_vshlq_u64(uint64x2_t a, int64x2_t b) { // return vqshlq_u64(a, b); // } -// NYI-LABEL: @test_vrshl_s8( -// NYI: [[VRSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i8> [[VRSHL_V_I]] -// int8x8_t test_vrshl_s8(int8x8_t a, int8x8_t b) { -// return vrshl_s8(a, b); -// } +int8x8_t test_vrshl_s8(int8x8_t a, int8x8_t b) { + return vrshl_s8(a, b); -// NYI-LABEL: @test_vrshl_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VRSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: [[VRSHL_V3_I:%.*]] = bitcast <4 x i16> [[VRSHL_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VRSHL_V2_I]] -// int16x4_t test_vrshl_s16(int16x4_t a, int16x4_t b) { -// return vrshl_s16(a, b); -// } + // CIR-LABEL: vrshl_s8 + // CIR: cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrshl_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VRSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: [[VRSHL_V3_I:%.*]] = bitcast <2 x i32> [[VRSHL_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VRSHL_V2_I]] -// int32x2_t test_vrshl_s32(int32x2_t a, int32x2_t b) { -// return vrshl_s32(a, b); -// } + // LLVM: {{.*}}test_vrshl_s8(<8 x i8>{{.*}}[[a:%.*]], <8 x i8>{{.*}}[[b:%.*]]) + // LLVM: [[VRSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> [[a]], <8 x i8> [[b]]) + // LLVM: ret <8 x i8> [[VRSHL_V_I]] +} -// NYI-LABEL: @test_vrshl_s64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> -// NYI: [[VRSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %a, <1 x i64> %b) -// NYI: [[VRSHL_V3_I:%.*]] = bitcast <1 x i64> [[VRSHL_V2_I]] to <8 x i8> -// NYI: ret <1 x i64> [[VRSHL_V2_I]] -// int64x1_t test_vrshl_s64(int64x1_t a, int64x1_t b) { -// return vrshl_s64(a, b); -// } +int16x4_t test_vrshl_s16(int16x4_t a, int16x4_t b) { + return vrshl_s16(a, b); -// NYI-LABEL: @test_vrshl_u8( -// NYI: [[VRSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i8> [[VRSHL_V_I]] -// uint8x8_t test_vrshl_u8(uint8x8_t a, int8x8_t b) { -// return vrshl_u8(a, b); -// } + // CIR-LABEL: vrshl_s16 + // CIR: cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrshl_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VRSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: [[VRSHL_V3_I:%.*]] = bitcast <4 x i16> [[VRSHL_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VRSHL_V2_I]] -// uint16x4_t test_vrshl_u16(uint16x4_t a, int16x4_t b) { -// return vrshl_u16(a, b); -// } + // LLVM: {{.*}}test_vrshl_s16(<4 x i16>{{.*}}[[a:%.*]], <4 x i16>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[b]] to <8 x i8> + // LLVM: [[VRSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> [[a]], <4 x i16> [[b]]) + // LLVM: [[VRSHL_V3_I:%.*]] = bitcast <4 x i16> [[VRSHL_V2_I]] to <8 x i8> + // LLVM: ret <4 x i16> [[VRSHL_V2_I]] +} -// NYI-LABEL: @test_vrshl_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VRSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: [[VRSHL_V3_I:%.*]] = bitcast <2 x i32> [[VRSHL_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VRSHL_V2_I]] -// uint32x2_t test_vrshl_u32(uint32x2_t a, int32x2_t b) { -// return vrshl_u32(a, b); -// } +int32x2_t test_vrshl_s32(int32x2_t a, int32x2_t b) { + return vrshl_s32(a, b); -// NYI-LABEL: @test_vrshl_u64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> -// NYI: [[VRSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %a, <1 x i64> %b) -// NYI: [[VRSHL_V3_I:%.*]] = bitcast <1 x i64> [[VRSHL_V2_I]] to <8 x i8> -// NYI: ret <1 x i64> [[VRSHL_V2_I]] -// uint64x1_t test_vrshl_u64(uint64x1_t a, int64x1_t b) { -// return vrshl_u64(a, b); -// } + // CIR-LABEL: vrshl_s32 + // CIR: cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrshlq_s8( -// NYI: [[VRSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %a, <16 x i8> %b) -// NYI: ret <16 x i8> [[VRSHLQ_V_I]] -// int8x16_t test_vrshlq_s8(int8x16_t a, int8x16_t b) { -// return vrshlq_s8(a, b); -// } + // LLVM: {{.*}}test_vrshl_s32(<2 x i32>{{.*}}[[a:%.*]], <2 x i32>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[b]] to <8 x i8> + // LLVM: [[VRSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> [[a]], <2 x i32> [[b]]) + // LLVM: [[VRSHL_V3_I:%.*]] = bitcast <2 x i32> [[VRSHL_V2_I]] to <8 x i8> + // LLVM: ret <2 x i32> [[VRSHL_V2_I]] +} -// NYI-LABEL: @test_vrshlq_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VRSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %a, <8 x i16> %b) -// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VRSHLQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VRSHLQ_V2_I]] -// int16x8_t test_vrshlq_s16(int16x8_t a, int16x8_t b) { -// return vrshlq_s16(a, b); -// } +int64x1_t test_vrshl_s64(int64x1_t a, int64x1_t b) { + return vrshl_s64(a, b); -// NYI-LABEL: @test_vrshlq_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VRSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %a, <4 x i32> %b) -// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VRSHLQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VRSHLQ_V2_I]] -// int32x4_t test_vrshlq_s32(int32x4_t a, int32x4_t b) { -// return vrshlq_s32(a, b); -// } + // CIR-LABEL: vrshl_s64 + // CIR: cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrshlq_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> -// NYI: [[VRSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %a, <2 x i64> %b) -// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VRSHLQ_V2_I]] to <16 x i8> -// NYI: ret <2 x i64> [[VRSHLQ_V2_I]] -// int64x2_t test_vrshlq_s64(int64x2_t a, int64x2_t b) { -// return vrshlq_s64(a, b); -// } + // LLVM: {{.*}}test_vrshl_s64(<1 x i64>{{.*}}[[a:%.*]], <1 x i64>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> [[b]] to <8 x i8> + // LLVM: [[VRSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> [[a]], <1 x i64> [[b]]) + // LLVM: [[VRSHL_V3_I:%.*]] = bitcast <1 x i64> [[VRSHL_V2_I]] to <8 x i8> + // LLVM: ret <1 x i64> [[VRSHL_V2_I]] +} -// NYI-LABEL: @test_vrshlq_u8( -// NYI: [[VRSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %a, <16 x i8> %b) -// NYI: ret <16 x i8> [[VRSHLQ_V_I]] -// uint8x16_t test_vrshlq_u8(uint8x16_t a, int8x16_t b) { -// return vrshlq_u8(a, b); -// } +uint8x8_t test_vrshl_u8(uint8x8_t a, int8x8_t b) { + return vrshl_u8(a, b); -// NYI-LABEL: @test_vrshlq_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VRSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %a, <8 x i16> %b) -// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VRSHLQ_V2_I]] to <16 x i8> -// NYI: ret <8 x i16> [[VRSHLQ_V2_I]] -// uint16x8_t test_vrshlq_u16(uint16x8_t a, int16x8_t b) { -// return vrshlq_u16(a, b); -// } + // CIR-LABEL: vrshl_u8 + // CIR: cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vrshlq_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VRSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %a, <4 x i32> %b) -// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VRSHLQ_V2_I]] to <16 x i8> -// NYI: ret <4 x i32> [[VRSHLQ_V2_I]] -// uint32x4_t test_vrshlq_u32(uint32x4_t a, int32x4_t b) { -// return vrshlq_u32(a, b); -// } + // LLVM: {{.*}}test_vrshl_u8(<8 x i8>{{.*}}[[a:%.*]], <8 x i8>{{.*}}[[b:%.*]]) + // LLVM: [[VRSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> [[a]], <8 x i8> [[b]]) + // LLVM: ret <8 x i8> [[VRSHL_V_I]] +} -// NYI-LABEL: @test_vrshlq_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> -// NYI: [[VRSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %a, <2 x i64> %b) -// NYI: [[VRSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VRSHLQ_V2_I]] to <16 x i8> -// NYI: ret <2 x i64> [[VRSHLQ_V2_I]] -// uint64x2_t test_vrshlq_u64(uint64x2_t a, int64x2_t b) { -// return vrshlq_u64(a, b); -// } +uint16x4_t test_vrshl_u16(uint16x4_t a, int16x4_t b) { + return vrshl_u16(a, b); + + // CIR-LABEL: vrshl_u16 + // CIR: cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vrshl_u16(<4 x i16>{{.*}}[[a:%.*]], <4 x i16>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[b]] to <8 x i8> + // LLVM: [[VRSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> [[a]], <4 x i16> [[b]]) + // LLVM: [[VRSHL_V3_I:%.*]] = bitcast <4 x i16> + // LLVM: ret <4 x i16> [[VRSHL_V2_I]] +} + +uint32x2_t test_vrshl_u32(uint32x2_t a, int32x2_t b) { + return vrshl_u32(a, b); + + // CIR-LABEL: vrshl_u32 + // CIR: cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vrshl_u32(<2 x i32>{{.*}}[[a:%.*]], <2 x i32>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[b]] to <8 x i8> + // LLVM: [[VRSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> [[a]], <2 x i32> [[b]]) + // LLVM: [[VRSHL_V3_I:%.*]] = bitcast <2 x i32> + // LLVM: ret <2 x i32> [[VRSHL_V2_I]] +} + +uint64x1_t test_vrshl_u64(uint64x1_t a, int64x1_t b) { + return vrshl_u64(a, b); + + // CIR-LABEL: vrshl_u64 + // CIR: cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vrshl_u64(<1 x i64>{{.*}}[[a:%.*]], <1 x i64>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> [[b]] to <8 x i8> + // LLVM: [[VRSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> [[a]], <1 x i64> [[b]]) + // LLVM: [[VRSHL_V3_I:%.*]] = bitcast <1 x i64> + // LLVM: ret <1 x i64> [[VRSHL_V2_I]] +} + +int8x16_t test_vrshlq_s8(int8x16_t a, int8x16_t b) { + return vrshlq_s8(a, b); + + // CIR-LABEL: vrshlq_s8 + // CIR: cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vrshlq_s8(<16 x i8>{{.*}}[[a:%.*]], <16 x i8>{{.*}}[[b:%.*]]) + // LLVM: [[VRSHL_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> [[a]], <16 x i8> [[b]]) + // LLVM: ret <16 x i8> [[VRSHL_V_I]] +} + +int16x8_t test_vrshlq_s16(int16x8_t a, int16x8_t b) { + return vrshlq_s16(a, b); + + // CIR-LABEL: vrshlq_s16 + // CIR: cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vrshlq_s16(<8 x i16>{{.*}}[[a:%.*]], <8 x i16>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[b]] to <16 x i8> + // LLVM: [[VRSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> [[a]], <8 x i16> [[b]]) + // LLVM: [[VRSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VRSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VRSHLQ_V2_I]] +} + +int32x4_t test_vrshlq_s32(int32x4_t a, int32x4_t b) { + return vrshlq_s32(a, b); + + // CIR-LABEL: vrshlq_s32 + // CIR: cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vrshlq_s32(<4 x i32>{{.*}}[[a:%.*]], <4 x i32>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[b]] to <16 x i8> + // LLVM: [[VRSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> [[a]], <4 x i32> [[b]]) + // LLVM: [[VRSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VRSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VRSHLQ_V2_I]] +} + +int64x2_t test_vrshlq_s64(int64x2_t a, int64x2_t b) { + return vrshlq_s64(a, b); + + // CIR-LABEL: vrshlq_s64 + // CIR: cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vrshlq_s64(<2 x i64>{{.*}}[[a:%.*]], <2 x i64>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> [[b]] to <16 x i8> + // LLVM: [[VRSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> [[a]], <2 x i64> [[b]]) + // LLVM: [[VRSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VRSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <2 x i64> [[VRSHLQ_V2_I]] +} + +uint8x16_t test_vrshlq_u8(uint8x16_t a, int8x16_t b) { + return vrshlq_u8(a, b); + + // CIR-LABEL: vrshlq_u8 + // CIR: cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vrshlq_u8(<16 x i8>{{.*}}[[a:%.*]], <16 x i8>{{.*}}[[b:%.*]]) + // LLVM: [[VRSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> [[a]], <16 x i8> [[b]]) + // LLVM: ret <16 x i8> [[VRSHLQ_V_I]] +} + +uint16x8_t test_vrshlq_u16(uint16x8_t a, int16x8_t b) { + return vrshlq_u16(a, b); + + // CIR-LABEL: vrshlq_u16 + // CIR: cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vrshlq_u16(<8 x i16>{{.*}}[[a:%.*]], <8 x i16>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[b]] to <16 x i8> + // LLVM: [[VRSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> [[a]], <8 x i16> [[b]]) + // LLVM: [[VRSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VRSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <8 x i16> [[VRSHLQ_V2_I]] +} + +uint32x4_t test_vrshlq_u32(uint32x4_t a, int32x4_t b) { + return vrshlq_u32(a, b); + + // CIR-LABEL: vrshlq_u32 + // CIR: cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vrshlq_u32(<4 x i32>{{.*}}[[a:%.*]], <4 x i32>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[b]] to <16 x i8> + // LLVM: [[VRSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> [[a]], <4 x i32> [[b]]) + // LLVM: [[VRSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VRSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <4 x i32> [[VRSHLQ_V2_I]] +} + +uint64x2_t test_vrshlq_u64(uint64x2_t a, int64x2_t b) { + return vrshlq_u64(a, b); + + // CIR-LABEL: vrshlq_u64 + // CIR: cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}} : + // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vrshlq_u64(<2 x i64>{{.*}}[[a:%.*]], <2 x i64>{{.*}}[[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> [[b]] to <16 x i8> + // LLVM: [[VRSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> [[a]], <2 x i64> [[b]]) + // LLVM: [[VRSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VRSHLQ_V2_I]] to <16 x i8> + // LLVM: ret <2 x i64> [[VRSHLQ_V2_I]] +} // NYI-LABEL: @test_vqrshl_s8( // NYI: [[VQRSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshl.v8i8(<8 x i8> %a, <8 x i8> %b) From f5084d2add896c09492edff5975d42acb4a36d82 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Mon, 25 Nov 2024 12:25:24 -0800 Subject: [PATCH 2113/2301] [CIR][CIRGen] Fix a stack-use-after-free (#1155) Caught by ASAN. We were creating a reference to an object going out of scope. Incidentally, this feels like the sort of issue the lifetime checker will be great for detecting :) --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index ae42f2ff411a..deea4159ff36 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -385,11 +385,10 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( // element. But in CIR, the union has the information for all members. So if // we only pass a single init element, we may be in trouble. We solve the // problem by appending placeholder attribute for the uninitialized fields. + llvm::SmallVector UnionElemsStorage; if (auto desired = dyn_cast(DesiredTy); desired && desired.isUnion() && Elems.size() != desired.getNumElements()) { - llvm::SmallVector UnionElemsStorage; - for (auto elemTy : desired.getMembers()) { if (auto Ty = mlir::dyn_cast(Elems.back()); Ty && Ty.getType() == elemTy) @@ -400,8 +399,9 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( } UnpackedElems = UnionElemsStorage; - } else + } else { UnpackedElems = Elems; + } llvm::SmallVector UnpackedElemStorage; if (DesiredSize < AlignedSize || DesiredSize.alignTo(Align) != DesiredSize) { From 2ae5f4f2a93d1abbe56e04f1f3a3400434e9136e Mon Sep 17 00:00:00 2001 From: 7mile Date: Tue, 26 Nov 2024 04:28:03 +0800 Subject: [PATCH 2114/2301] [CIR][Dialect][NFC] Remove redundant module attribute `llvm.data_layout` (#1156) Since LLVM specific data layout string is not proper in ClangIR, this PR replaces it with existing MLIR DLTI equivalent and eliminate the redundancy. Although the constructor of `LowerModule` of TargetLowering library requires a llvm data layout string, it is not used currently. (I believe it would also not matter in the future.) Therefore, this PR has no functional change. --- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 2 -- .../Transforms/TargetLowering/LowerModule.cpp | 15 +++------------ .../Transforms/TargetLowering/LowerModule.h | 2 +- .../Transforms/TargetLowering/LowerTypes.cpp | 2 +- .../Transforms/TargetLowering/LowerTypes.h | 2 +- clang/test/CIR/CodeGen/dlti.c | 2 -- clang/test/CIR/Lowering/address-space.cir | 2 +- clang/test/CIR/Lowering/exceptions.cir | 2 +- clang/test/CIR/Lowering/hello.cir | 2 +- clang/test/CIR/Tools/cir-translate-triple.cir | 3 ++- 10 files changed, 11 insertions(+), 23 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 0266e893909a..a4c6bd16df52 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -42,8 +42,6 @@ CIRGenerator::~CIRGenerator() { static void setMLIRDataLayout(mlir::ModuleOp &mod, const llvm::DataLayout &dl) { auto *context = mod.getContext(); - mod->setAttr(mlir::LLVM::LLVMDialect::getDataLayoutAttrName(), - mlir::StringAttr::get(context, dl.getStringRepresentation())); mlir::DataLayoutSpecInterface dlSpec = mlir::translateDataLayout(dl, context); mod->setAttr(mlir::DLTIDialect::kDataLayoutAttrName, dlSpec); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index a9266aa4ada8..f127195f416c 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -87,12 +87,10 @@ createTargetLoweringInfo(LowerModule &LM) { } LowerModule::LowerModule(clang::LangOptions opts, mlir::ModuleOp &module, - mlir::StringAttr DL, std::unique_ptr target, mlir::PatternRewriter &rewriter) : context(module, opts), module(module), Target(std::move(target)), - ABI(createCXXABI(*this)), types(*this, DL.getValue()), - rewriter(rewriter) { + ABI(createCXXABI(*this)), types(*this), rewriter(rewriter) { context.initBuiltinTypes(*Target); } @@ -226,13 +224,6 @@ llvm::LogicalResult LowerModule::rewriteFunctionCall(CallOp callOp, // TODO: not to create it every time std::unique_ptr createLowerModule(mlir::ModuleOp module, mlir::PatternRewriter &rewriter) { - assert(module->getAttr(mlir::LLVM::LLVMDialect::getDataLayoutAttrName()) && - "Missing data layout attribute"); - - // Fetch the LLVM data layout string. - auto dataLayoutStr = mlir::cast( - module->getAttr(mlir::LLVM::LLVMDialect::getDataLayoutAttrName())); - // Fetch target information. llvm::Triple triple(mlir::cast( module->getAttr(cir::CIRDialect::getTripleAttrName())) @@ -247,8 +238,8 @@ createLowerModule(mlir::ModuleOp module, mlir::PatternRewriter &rewriter) { cir_cconv_assert(!cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; - return std::make_unique(langOpts, module, dataLayoutStr, - std::move(targetInfo), rewriter); + return std::make_unique(langOpts, module, std::move(targetInfo), + rewriter); } } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index 16f5a099cf63..eb206f2750a7 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -43,7 +43,7 @@ class LowerModule { public: LowerModule(clang::LangOptions opts, mlir::ModuleOp &module, - mlir::StringAttr DL, std::unique_ptr target, + std::unique_ptr target, mlir::PatternRewriter &rewriter); ~LowerModule() = default; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index f4b9c73cf43f..0c2233ef84c9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -33,7 +33,7 @@ unsigned LowerTypes::clangCallConvToLLVMCallConv(clang::CallingConv CC) { } } -LowerTypes::LowerTypes(LowerModule &LM, llvm::StringRef DLString) +LowerTypes::LowerTypes(LowerModule &LM) : LM(LM), context(LM.getContext()), Target(LM.getTarget()), CXXABI(LM.getCXXABI()), TheABIInfo(LM.getTargetLoweringInfo().getABIInfo()), diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h index 751f95e67efd..a1c23718eadd 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.h @@ -52,7 +52,7 @@ class LowerTypes { const ABIInfo &getABIInfo() const { return TheABIInfo; } public: - LowerTypes(LowerModule &LM, llvm::StringRef DLString); + LowerTypes(LowerModule &LM); ~LowerTypes() = default; const cir::CIRDataLayout &getDataLayout() const { return DL; } diff --git a/clang/test/CIR/CodeGen/dlti.c b/clang/test/CIR/CodeGen/dlti.c index 2267b992f42e..a613b9e49cc7 100644 --- a/clang/test/CIR/CodeGen/dlti.c +++ b/clang/test/CIR/CodeGen/dlti.c @@ -24,5 +24,3 @@ void foo() {} // CHECK-DAG: "dlti.stack_alignment" = 128 : i64 // CHECK-DAG: "dlti.endianness" = "little" // >, -// CHECK-DAG: llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" - diff --git a/clang/test/CIR/Lowering/address-space.cir b/clang/test/CIR/Lowering/address-space.cir index 733c6ddda940..ac80912bdbd9 100644 --- a/clang/test/CIR/Lowering/address-space.cir +++ b/clang/test/CIR/Lowering/address-space.cir @@ -5,7 +5,7 @@ module attributes { cir.triple = "spirv64-unknown-unknown", - llvm.data_layout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1" + dlti.dl_spec = #dlti.dl_spec : vector<2xi64>, i32 = dense<32> : vector<2xi64>, i1 = dense<8> : vector<2xi64>, i8 = dense<8> : vector<2xi64>, f128 = dense<128> : vector<2xi64>, f64 = dense<64> : vector<2xi64>, f16 = dense<16> : vector<2xi64>, i64 = dense<64> : vector<2xi64>, !llvm.ptr = dense<64> : vector<4xi64>, "dlti.endianness" = "little", "dlti.global_memory_space" = 1 : ui64> } { cir.global external addrspace(offload_global) @addrspace1 = #cir.int<1> : !s32i // LLVM: @addrspace1 = addrspace(1) global i32 diff --git a/clang/test/CIR/Lowering/exceptions.cir b/clang/test/CIR/Lowering/exceptions.cir index 1d99e9e2e620..2c40ac96d351 100644 --- a/clang/test/CIR/Lowering/exceptions.cir +++ b/clang/test/CIR/Lowering/exceptions.cir @@ -8,7 +8,7 @@ !u8i = !cir.int !void = !cir.void -module @"try-catch.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior, cir.triple = "x86_64-unknown-linux-gnu", dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>, #dlti.dl_entry<"dlti.endianness", "little">>, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"} { +module @"try-catch.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior, cir.triple = "x86_64-unknown-linux-gnu", dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>, #dlti.dl_entry<"dlti.endianness", "little">>} { cir.global "private" constant external @_ZTIi : !cir.ptr cir.global "private" constant external @_ZTIPKc : !cir.ptr cir.func private @_Z8divisionii(!s32i, !s32i) -> !cir.double diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index 195cbf28fbde..868261307b87 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -3,7 +3,7 @@ !s32i = !cir.int !s8i = !cir.int -module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior, dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i32>>, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"} { +module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior, dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i32>>} { cir.func private @printf(!cir.ptr, ...) -> !s32i cir.global "private" constant internal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.func @main() -> !s32i { diff --git a/clang/test/CIR/Tools/cir-translate-triple.cir b/clang/test/CIR/Tools/cir-translate-triple.cir index a647df165aba..175e3cc7cded 100644 --- a/clang/test/CIR/Tools/cir-translate-triple.cir +++ b/clang/test/CIR/Tools/cir-translate-triple.cir @@ -3,7 +3,7 @@ module attributes { cir.triple = "x86_64-unknown-linux-gnu", - llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" + dlti.dl_spec = #dlti.dl_spec : vector<2xi64>, f80 = dense<128> : vector<2xi64>, !llvm.ptr<271> = dense<32> : vector<4xi64>, !llvm.ptr<272> = dense<64> : vector<4xi64>, i64 = dense<64> : vector<2xi64>, f16 = dense<16> : vector<2xi64>, i32 = dense<32> : vector<2xi64>, f128 = dense<128> : vector<2xi64>, !llvm.ptr<270> = dense<32> : vector<4xi64>, f64 = dense<64> : vector<2xi64>, !llvm.ptr = dense<64> : vector<4xi64>, i1 = dense<8> : vector<2xi64>, i8 = dense<8> : vector<2xi64>, i16 = dense<16> : vector<2xi64>, "dlti.stack_alignment" = 128 : i64, "dlti.endianness" = "little"> } { cir.func @foo() { cir.return @@ -11,3 +11,4 @@ module attributes { } // LLVM-DAG: target triple = "x86_64-unknown-linux-gnu" +// LLVM-DAG: target datalayout = "{{.*}}" From 6d7d313e6614493b37d1cc68c14a47804fe18df0 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 25 Nov 2024 16:29:35 -0500 Subject: [PATCH 2115/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vqrshrn_n_v (#1161) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 9 +- clang/test/CIR/CodeGen/AArch64/neon.c | 139 ++++++++++++------ 2 files changed, 99 insertions(+), 49 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 400be20c5c8f..befb34eebdef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3939,7 +3939,14 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, SInt32Ty}, Ops, "aarch64.neon.rshrn", ty, getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vqrshrn_n_v: - llvm_unreachable("NEON::BI__builtin_neon_vqrshrn_n_v NYI"); + return emitNeonCall( + builder, + {builder.getExtendedOrTruncatedElementVectorType( + vTy, true /* extend */, + mlir::cast(vTy.getEltType()).isSigned()), + SInt32Ty}, + Ops, usgn ? "aarch64.neon.uqrshrn" : "aarch64.neon.sqrshrn", ty, + getLoc(E->getExprLoc())); case NEON::BI__builtin_neon_vrndah_f16: { llvm_unreachable("NEON::BI__builtin_neon_vrndah_f16 NYI"); } diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 5b1043962429..8a6447ff2d39 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -7150,59 +7150,102 @@ uint32x2_t test_vqshrn_n_u64(uint64x2_t a) { // return vqshrn_high_n_u64(a, b, 19); // } -// NYI-LABEL: @test_vqrshrn_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VQRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> [[VQRSHRN_N]], i32 3) -// NYI: ret <8 x i8> [[VQRSHRN_N1]] -// int8x8_t test_vqrshrn_n_s16(int16x8_t a) { -// return vqrshrn_n_s16(a, 3); -// } +int8x8_t test_vqrshrn_n_s16(int16x8_t a) { + return vqrshrn_n_s16(a, 3); -// NYI-LABEL: @test_vqrshrn_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VQRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> [[VQRSHRN_N]], i32 9) -// NYI: ret <4 x i16> [[VQRSHRN_N1]] -// int16x4_t test_vqrshrn_n_s32(int32x4_t a) { -// return vqrshrn_n_s32(a, 9); -// } + // CIR-LABEL: vqrshrn_n_s16 + // CIR: [[AMT:%.*]] = cir.const #cir.int<3> : !s32i + // CIR: [[VQRSHRN_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[VQRSHRN_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.sqrshrn" [[VQRSHRN_N]], [[AMT]] : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector -// NYI-LABEL: @test_vqrshrn_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VQRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> [[VQRSHRN_N]], i32 19) -// NYI: ret <2 x i32> [[VQRSHRN_N1]] -// int32x2_t test_vqrshrn_n_s64(int64x2_t a) { -// return vqrshrn_n_s64(a, 19); -// } + // LLVM: {{.*}}test_vqrshrn_n_s16(<8 x i16>{{.*}}[[a:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[a]] to <16 x i8> + // LLVM: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VQRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> [[VQRSHRN_N]], i32 3) + // LLVM: ret <8 x i8> [[VQRSHRN_N1]] +} -// NYI-LABEL: @test_vqrshrn_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[VQRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> [[VQRSHRN_N]], i32 3) -// NYI: ret <8 x i8> [[VQRSHRN_N1]] -// uint8x8_t test_vqrshrn_n_u16(uint16x8_t a) { -// return vqrshrn_n_u16(a, 3); -// } +int16x4_t test_vqrshrn_n_s32(int32x4_t a) { + return vqrshrn_n_s32(a, 9); -// NYI-LABEL: @test_vqrshrn_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[VQRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> [[VQRSHRN_N]], i32 9) -// NYI: ret <4 x i16> [[VQRSHRN_N1]] -// uint16x4_t test_vqrshrn_n_u32(uint32x4_t a) { -// return vqrshrn_n_u32(a, 9); -// } + // CIR-LABEL: vqrshrn_n_s32 + // CIR: [[AMT:%.*]] = cir.const #cir.int<9> : !s32i + // CIR: [[VQRSHRN_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[VQRSHRN_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.sqrshrn" [[VQRSHRN_N]], [[AMT]] : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector -// NYI-LABEL: @test_vqrshrn_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[VQRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> [[VQRSHRN_N]], i32 19) -// NYI: ret <2 x i32> [[VQRSHRN_N1]] -// uint32x2_t test_vqrshrn_n_u64(uint64x2_t a) { -// return vqrshrn_n_u64(a, 19); -// } + // LLVM: {{.*}}test_vqrshrn_n_s32(<4 x i32>{{.*}}[[a:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VQRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> [[VQRSHRN_N]], i32 9) + // LLVM: ret <4 x i16> [[VQRSHRN_N1]] + +} + +int32x2_t test_vqrshrn_n_s64(int64x2_t a) { + return vqrshrn_n_s64(a, 19); + + // CIR-LABEL: vqrshrn_n_s64 + // CIR: [[AMT:%.*]] = cir.const #cir.int<19> : !s32 + // CIR: [[VQRSHRN_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[VQRSHRN_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.sqrshrn" [[VQRSHRN_N]], [[AMT]] : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM: {{.*}}test_vqrshrn_n_s64(<2 x i64>{{.*}}[[a:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[a]] to <16 x i8> + // LLVM: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VQRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> [[VQRSHRN_N]], i32 19) + // LLVM: ret <2 x i32> [[VQRSHRN_N1]] +} + +uint8x8_t test_vqrshrn_n_u16(uint16x8_t a) { + return vqrshrn_n_u16(a, 3); + + // CIR-LABEL: vqrshrn_n_u16 + // CIR: [[AMT:%.*]] = cir.const #cir.int<3> : !s32 + // CIR: [[VQRSHRN_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[VQRSHRN_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.uqrshrn" [[VQRSHRN_N]], [[AMT]] : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM: {{.*}}test_vqrshrn_n_u16(<8 x i16>{{.*}}[[a:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[a]] to <16 x i8> + // LLVM: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[VQRSHRN_N1:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> [[VQRSHRN_N]], i32 3) + // LLVM: ret <8 x i8> [[VQRSHRN_N1]] +} + +uint16x4_t test_vqrshrn_n_u32(uint32x4_t a) { + return vqrshrn_n_u32(a, 9); + + // CIR-LABEL: vqrshrn_n_u32 + // CIR: [[AMT:%.*]] = cir.const #cir.int<9> : !s32 + // CIR: [[VQRSHRN_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[VQRSHRN_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.uqrshrn" [[VQRSHRN_N]], [[AMT]] : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM: {{.*}}test_vqrshrn_n_u32(<4 x i32>{{.*}}[[a:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[VQRSHRN_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> [[VQRSHRN_N]], i32 9) + // LLVM: ret <4 x i16> [[VQRSHRN_N1]] +} + +uint32x2_t test_vqrshrn_n_u64(uint64x2_t a) { + return vqrshrn_n_u64(a, 19); + + // CIR-LABEL: vqrshrn_n_u64 + // CIR: [[AMT:%.*]] = cir.const #cir.int<19> : !s32 + // CIR: [[VQRSHRN_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[VQRSHRN_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.uqrshrn" [[VQRSHRN_N]], [[AMT]] : + // CIR-SAME: (!cir.vector, !s32i) -> !cir.vector + + // LLVM: {{.*}}test_vqrshrn_n_u64(<2 x i64>{{.*}}[[a:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[a]] to <16 x i8> + // LLVM: [[VQRSHRN_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[VQRSHRN_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> [[VQRSHRN_N]], i32 19) + // LLVM: ret <2 x i32> [[VQRSHRN_N1]] +} // NYI-LABEL: @test_vqrshrn_high_n_s16( // NYI: [[TMP0:%.*]] = bitcast <8 x i16> %b to <16 x i8> From dff40b1d78310c9cb9049917553f823ebbb3562a Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Mon, 25 Nov 2024 13:30:14 -0800 Subject: [PATCH 2116/2301] [CIR][TargetLowering] Fix use iteration (#1162) Based on https://mlir.llvm.org/docs/Tutorials/UnderstandingTheIRStructure/#traversing-the-def-use-chains, the users of an op are linked together in a doubly-linked list, so replacing a user while iterating the users causes a use-after-free. Store the users in a worklist and process them afterwards instead to avoid this. --- .../Transforms/TargetLowering/LowerFunction.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index c85d95ba2ddd..a3f9f94a86b0 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -1282,12 +1282,14 @@ mlir::Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, mlir::cast(RetTy).getNumElements() != 0) { RetVal = newCallOp.getResult(); - for (auto user : Caller.getOperation()->getUsers()) { - if (auto storeOp = mlir::dyn_cast(user)) { - auto DestPtr = createCoercedBitcast(storeOp.getAddr(), - RetVal.getType(), *this); - rewriter.replaceOpWithNewOp(storeOp, RetVal, DestPtr); - } + llvm::SmallVector workList; + for (auto *user : Caller->getUsers()) + if (auto storeOp = mlir::dyn_cast(user)) + workList.push_back(storeOp); + for (StoreOp storeOp : workList) { + auto destPtr = + createCoercedBitcast(storeOp.getAddr(), RetVal.getType(), *this); + rewriter.replaceOpWithNewOp(storeOp, RetVal, destPtr); } } From 02ea9aaee44113e97aaf3c9c8d983065b0b85a28 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Mon, 25 Nov 2024 13:31:10 -0800 Subject: [PATCH 2117/2301] [CIR][FlattenCFG] Fix rewrite API misuse (#1163) We need to perform all erasures via the rewrite API instead of directly for the framework to work correctly. This was detected by a combination of `-DMLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS=ON` [1] and ASAN. [1] https://mlir.llvm.org/getting_started/Debugging/#detecting-invalid-api-usage --- clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index bd62d84320fd..6a7b00901aad 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -313,7 +313,7 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { if (!callOp.getCleanup().empty()) { mlir::Block *cleanupBlock = &callOp.getCleanup().getBlocks().back(); auto cleanupYield = cast(cleanupBlock->getTerminator()); - cleanupYield->erase(); + rewriter.eraseOp(cleanupYield); rewriter.mergeBlocks(cleanupBlock, landingPadBlock); rewriter.setInsertionPointToEnd(landingPadBlock); } From 940390daa08e0eb7866b007a7f6f14a97255b85c Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 25 Nov 2024 16:31:48 -0500 Subject: [PATCH 2118/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vabs_v and neon_vabsq_v (#1081) Now implement the same as [OG](https://github.com/llvm/clangir/blob/7619b20d7461b2d46c17a3154ec4b2f12ca35ea5/clang/lib/CodeGen/CGBuiltin.cpp#L7886), which is to call llvm aarch64 intrinsic which would eventually become [an ARM64 instruction](https://developer.arm.com/documentation/ddi0596/2021-03/SIMD-FP-Instructions/ABS--Absolute-value--vector--?lang=en). However, clearly there is an alternative, which is to extend CIR::AbsOp and CIR::FAbsOp to support vector type and only lower it at LLVM Lowering stage to either [LLVM::FAbsOP ](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrfabs-llvmfabsop) or [[LLVM::AbsOP ]](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrabs-llvmabsop), provided LLVM dialect could do the right thing of TargetLowering by translating to llvm aarch64 intrinsic eventually. The question is whether it is worth doing it? Any way, put up this diff for suggestions and ideas. --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 9 ++ clang/test/CIR/CodeGen/AArch64/neon-arith.c | 143 ++++++++++++++++++ 2 files changed, 152 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index befb34eebdef..ce6bd0d8db55 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2360,6 +2360,15 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( return emitNeonSplat(builder, getLoc(e->getExprLoc()), ops[0], ops[1], numElements); } + case NEON::BI__builtin_neon_vabs_v: + case NEON::BI__builtin_neon_vabsq_v: { + mlir::Location loc = getLoc(e->getExprLoc()); + ops[0] = builder.createBitcast(ops[0], vTy); + if (mlir::isa(vTy.getEltType())) { + return builder.create(loc, ops[0]); + } + return builder.create(loc, ops[0]); + } case NEON::BI__builtin_neon_vmovl_v: { cir::VectorType dTy = builder.getExtendedOrTruncatedElementVectorType( vTy, false /* truncate */, diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index fbc9ce71343d..3f839cce90fc 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -739,3 +739,146 @@ uint64x2_t test_vpaddlq_u32(uint32x4_t a) { // LLVM: [[VPADDL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> [[A]]) // LLVM: ret <2 x i64> [[VPADDL1_I]] } + +int8x8_t test_vabs_s8(int8x8_t a) { + return vabs_s8(a); + + // CIR-LABEL: vabs_s8 + // CIR: cir.abs {{%.*}} : !cir.vector + + // LLVM: {{.*}}test_vabs_s8(<8 x i8>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_I:%.*]] = call <8 x i8> @llvm.abs.v8i8(<8 x i8> [[a]], i1 false) + // LLVM: ret <8 x i8> [[VABS_I]] +} + +int8x16_t test_vabsq_s8(int8x16_t a) { + return vabsq_s8(a); + + // CIR-LABEL: vabsq_s8 + // CIR: cir.abs {{%.*}} : !cir.vector + + // LLVM: {{.*}}test_vabsq_s8(<16 x i8>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_I:%.*]] = call <16 x i8> @llvm.abs.v16i8(<16 x i8> [[a]], i1 false) + // LLVM: ret <16 x i8> [[VABS_I]] +} + +int16x4_t test_vabs_s16(int16x4_t a) { + return vabs_s16(a); + + // CIR-LABEL: vabs_s16 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabs_s16(<4 x i16>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <4 x i16> @llvm.abs.v4i16(<4 x i16> [[a]], i1 false) + // LLVM: ret <4 x i16> [[VABS1_I]] +} + +int16x8_t test_vabsq_s16(int16x8_t a) { + return vabsq_s16(a); + + // CIR-LABEL: vabsq_s16 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabsq_s16(<8 x i16>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <8 x i16> @llvm.abs.v8i16(<8 x i16> [[a]], i1 false) + // LLVM: ret <8 x i16> [[VABS1_I]] +} + +int32x2_t test_vabs_s32(int32x2_t a) { + return vabs_s32(a); + + // CIR-LABEL: vabs_s32 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabs_s32(<2 x i32>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <2 x i32> @llvm.abs.v2i32(<2 x i32> [[a]], i1 false) + // LLVM: ret <2 x i32> [[VABS1_I]] +} + +int32x4_t test_vabsq_s32(int32x4_t a) { + return vabsq_s32(a); + + // CIR-LABEL: vabsq_s32 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabsq_s32(<4 x i32>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[a]], i1 false) + // LLVM: ret <4 x i32> [[VABS1_I]] +} + +int64x1_t test_vabs_s64(int64x1_t a) { + return vabs_s64(a); + + // CIR-LABEL: vabs_s64 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabs_s64(<1 x i64>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <1 x i64> @llvm.abs.v1i64(<1 x i64> [[a]], i1 false) + // LLVM: ret <1 x i64> [[VABS1_I]] +} + +int64x2_t test_vabsq_s64(int64x2_t a) { + return vabsq_s64(a); + + // CIR-LABEL: vabsq_s64 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.abs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabsq_s64(<2 x i64>{{.*}}[[a:%.*]]) + // LLVM: [[VABS1_I:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> [[a]], i1 false) + // LLVM: ret <2 x i64> [[VABS1_I]] +} + + +float32x2_t test_vabs_f32(float32x2_t a) { + return vabs_f32(a); + + // CIR-LABEL: vabs_f32 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.fabs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabs_f32(<2 x float>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_F:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[a]]) + // LLVM: ret <2 x float> [[VABS_F]] +} + +float32x4_t test_vabsq_f32(float32x4_t a) { + return vabsq_f32(a); + + // CIR-LABEL: vabsq_f32 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.fabs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabsq_f32(<4 x float>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_F:%.*]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> [[a]]) + // LLVM: ret <4 x float> [[VABS_F]] +} + +float64x1_t test_vabs_f64(float64x1_t a) { + return vabs_f64(a); + + // CIR-LABEL: vabs_f64 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.fabs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabs_f64(<1 x double>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_F:%.*]] = call <1 x double> @llvm.fabs.v1f64(<1 x double> [[a]]) + // LLVM: ret <1 x double> [[VABS_F]] +} + +float64x2_t test_vabsq_f64(float64x2_t a) { + return vabsq_f64(a); + + // CIR-LABEL: vabsq_f64 + // CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.fabs [[TMP0]] : !cir.vector + + // LLVM: {{.*}}test_vabsq_f64(<2 x double>{{.*}}[[a:%.*]]) + // LLVM: [[VABS_F:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[a]]) + // LLVM: ret <2 x double> [[VABS_F]] +} From 4762e73717a26db7cb7a2ab9caa7640de4bbaad3 Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 25 Nov 2024 23:19:20 -0500 Subject: [PATCH 2119/2301] [CIR][CIRGen][Builtin] Support __builtin_launder except in the case of -fstrict-vtable-pointers (#1138) Without using flag `-fstrict-vtable-pointers`, `__builtin_launder` is a noop. This PR implements that, and leave implementation for the case of `-fstrict-vtable-pointers` to future where there is a need. This PR also adapted most of test cases from [OG test case](https://github.com/llvm/clangir/blob/3aed38cf52e72cb51a907fad9dd53802f6505b81/clang/test/CodeGenCXX/builtin-launder.cpp#L1). I didn't use test cases in the namespace [pessimizing_cases](https://github.com/llvm/clangir/blob/3aed38cf52e72cb51a907fad9dd53802f6505b81/clang/test/CodeGenCXX/builtin-launder.cpp#L269), as they have no difference even when `-fstrict-vtable-pointers` is on. --- clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 49 +++- clang/test/CIR/CodeGen/builtins.cpp | 289 ++++++++++++++++++++++ 3 files changed, 337 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 8f56f0726f8a..547b3f05731c 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -230,6 +230,7 @@ struct MissingFeatures { static bool emitEmptyRecordCheck() { return false; } static bool isPPC_FP128Ty() { return false; } static bool emitBinaryAtomicPostHasInvert() { return false; } + static bool createLaunderInvariantGroup() { return false; } // Inline assembly static bool asmGoto() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index c8aa38b75d35..608a3e8f60c6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -330,6 +330,43 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, return returnBool ? op.getResult(1) : op.getResult(0); } +static bool +typeRequiresBuiltinLaunderImp(const ASTContext &ctx, QualType ty, + llvm::SmallPtrSetImpl &seen) { + if (const auto *arr = ctx.getAsArrayType(ty)) + ty = ctx.getBaseElementType(arr); + + const auto *record = ty->getAsCXXRecordDecl(); + if (!record) + return false; + + // We've already checked this type, or are in the process of checking it. + if (!seen.insert(record).second) + return false; + + assert(record->hasDefinition() && + "Incomplete types should already be diagnosed"); + + if (record->isDynamicClass()) + return true; + + for (FieldDecl *fld : record->fields()) { + if (typeRequiresBuiltinLaunderImp(ctx, fld->getType(), seen)) + return true; + } + return false; +} + +/// Determine if the specified type requires laundering by checking if it is a +/// dynamic class type or contains a subobject which is a dynamic class type. +static bool typeRequiresBuiltinLaunder(clang::CIRGen::CIRGenModule &cgm, + QualType ty) { + if (!cgm.getCodeGenOpts().StrictVTablePointers) + return false; + llvm::SmallPtrSet seen; + return typeRequiresBuiltinLaunderImp(cgm.getASTContext(), ty, seen); +} + RValue CIRGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) { auto src = emitScalarExpr(E->getArg(0)); auto shiftAmt = emitScalarExpr(E->getArg(1)); @@ -1625,8 +1662,16 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_setjmp NYI"); case Builtin::BI__builtin_longjmp: llvm_unreachable("BI__builtin_longjmp NYI"); - case Builtin::BI__builtin_launder: - llvm_unreachable("BI__builtin_launder NYI"); + case Builtin::BI__builtin_launder: { + const clang::Expr *arg = E->getArg(0); + clang::QualType argTy = arg->getType()->getPointeeType(); + mlir::Value ptr = emitScalarExpr(arg); + if (typeRequiresBuiltinLaunder(CGM, argTy)) { + assert(!MissingFeatures::createLaunderInvariantGroup()); + llvm_unreachable(" launder.invariant.group NYI "); + } + return RValue::get(ptr); + } case Builtin::BI__sync_fetch_and_add: case Builtin::BI__sync_fetch_and_sub: diff --git a/clang/test/CIR/CodeGen/builtins.cpp b/clang/test/CIR/CodeGen/builtins.cpp index c5f1ed5e0aff..504ec13da6ee 100644 --- a/clang/test/CIR/CodeGen/builtins.cpp +++ b/clang/test/CIR/CodeGen/builtins.cpp @@ -116,3 +116,292 @@ extern "C" void *test_frame_address(void) { // LLVM-LABEL: @test_frame_address // LLVM: {{%.*}} = call ptr @llvm.frameaddress.p0(i32 1) } + +// Following block of tests are for __builtin_launder +// FIXME: Once we fully __builtin_launder by allowing -fstrict-vtable-pointers, +// we should move following block of tests to a separate file. +namespace launder_test { +//===----------------------------------------------------------------------===// +// Positive Cases +//===----------------------------------------------------------------------===// + +struct TestVirtualFn { + virtual void foo() {} +}; + +// CIR-LABEL: test_builtin_launder_virtual_fn +// LLVM: define{{.*}} void @test_builtin_launder_virtual_fn(ptr [[P:%.*]]) +extern "C" void test_builtin_launder_virtual_fn(TestVirtualFn *p) { + // CIR: cir.return + + // LLVM: store ptr [[P]], ptr [[P_ADDR:%.*]], align 8 + // LLVM-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 8 + // LLVM-NEXT: store ptr [[TMP0]], ptr {{%.*}} + // LLVM-NEXT: ret void + TestVirtualFn *d = __builtin_launder(p); +} + +struct TestPolyBase : TestVirtualFn { +}; + +// CIR-LABEL: test_builtin_launder_poly_base +// LLVM: define{{.*}} void @test_builtin_launder_poly_base(ptr [[P:%.*]]) +extern "C" void test_builtin_launder_poly_base(TestPolyBase *p) { + // CIR: cir.return + + // LLVM: store ptr [[P]], ptr [[P_ADDR:%.*]], align 8 + // LLVM-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 8 + // LLVM-NEXT: store ptr [[TMP0]], ptr {{%.*}} + // LLVM-NEXT: ret void + TestPolyBase *d = __builtin_launder(p); +} + +struct TestBase {}; +struct TestVirtualBase : virtual TestBase {}; + +// CIR-LABEL: test_builtin_launder_virtual_base +// LLVM: define{{.*}} void @test_builtin_launder_virtual_base(ptr [[P:%.*]]) +extern "C" void test_builtin_launder_virtual_base(TestVirtualBase *p) { + TestVirtualBase *d = __builtin_launder(p); + + // CIR: cir.return + + // LLVM: store ptr [[P]], ptr [[P_ADDR:%.*]], align 8 + // LLVM-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 8 + // LLVM-NEXT: store ptr [[TMP0]], ptr {{%.*}} + // LLVM-NEXT: ret void +} + +//===----------------------------------------------------------------------===// +// Negative Cases +//===----------------------------------------------------------------------===// + +// CIR-LABEL: test_builtin_launder_ommitted_one +// LLVM: define{{.*}} void @test_builtin_launder_ommitted_one(ptr [[P:%.*]]) +extern "C" void test_builtin_launder_ommitted_one(int *p) { + int *d = __builtin_launder(p); + + // CIR: cir.return + + // LLVM-NEXT: [[P_ADDR:%.*]] = alloca ptr, i64 1, align 8 + // LLVM-NEXT: [[D:%.*]] = alloca ptr, i64 1, align 8 + // LLVM: store ptr [[P]], ptr [[P_ADDR:%.*]], align 8 + // LLVM-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 8 + // LLVM-NEXT: store ptr [[TMP0]], ptr [[D]] + // LLVM-NEXT: ret void +} + +struct TestNoInvariant { + int x; +}; + +// CIR-LABEL: test_builtin_launder_ommitted_two +// LLVM: define{{.*}} void @test_builtin_launder_ommitted_two(ptr [[P:%.*]]) +extern "C" void test_builtin_launder_ommitted_two(TestNoInvariant *p) { + TestNoInvariant *d = __builtin_launder(p); + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM-NEXT: [[P_ADDR:%.*]] = alloca ptr, i64 1, align 8 + // LLVM-NEXT: [[D:%.*]] = alloca ptr, i64 1, align 8 + // LLVM: store ptr [[P]], ptr [[P_ADDR:%.*]], align 8 + // LLVM-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 8 + // LLVM-NEXT: store ptr [[TMP0]], ptr [[D]] + // LLVM-NEXT: ret void +} + +struct TestVirtualMember { + TestVirtualFn member; +}; + +// CIR-LABEL: test_builtin_launder_virtual_member +// LLVM: define{{.*}} void @test_builtin_launder_virtual_member +extern "C" void test_builtin_launder_virtual_member(TestVirtualMember *p) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + TestVirtualMember *d = __builtin_launder(p); +} + +struct TestVirtualMemberDepth2 { + TestVirtualMember member; +}; + +// CIR-LABEL: test_builtin_launder_virtual_member_depth_2 +// LLVM: define{{.*}} void @test_builtin_launder_virtual_member_depth_2 +extern "C" void test_builtin_launder_virtual_member_depth_2(TestVirtualMemberDepth2 *p) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + TestVirtualMemberDepth2 *d = __builtin_launder(p); +} + +struct TestVirtualReferenceMember { + TestVirtualFn &member; +}; + +// CIR-LABEL: test_builtin_launder_virtual_reference_member +// LLVM: define{{.*}} void @test_builtin_launder_virtual_reference_member +extern "C" void test_builtin_launder_virtual_reference_member(TestVirtualReferenceMember *p) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + TestVirtualReferenceMember *d = __builtin_launder(p); +} + +struct TestRecursiveMember { + TestRecursiveMember() : member(*this) {} + TestRecursiveMember &member; +}; + +// CIR-LABEL: test_builtin_launder_recursive_member +// LLVM: define{{.*}} void @test_builtin_launder_recursive_member +extern "C" void test_builtin_launder_recursive_member(TestRecursiveMember *p) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + TestRecursiveMember *d = __builtin_launder(p); +} + +struct TestVirtualRecursiveMember { + TestVirtualRecursiveMember() : member(*this) {} + TestVirtualRecursiveMember &member; + virtual void foo(); +}; + +// CIR-LABEL: test_builtin_launder_virtual_recursive_member +// LLVM: define{{.*}} void @test_builtin_launder_virtual_recursive_member +extern "C" void test_builtin_launder_virtual_recursive_member(TestVirtualRecursiveMember *p) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + TestVirtualRecursiveMember *d = __builtin_launder(p); +} + +// CIR-LABEL: test_builtin_launder_array +// LLVM: define{{.*}} void @test_builtin_launder_array +extern "C" void test_builtin_launder_array(TestVirtualFn (&Arr)[5]) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + TestVirtualFn *d = __builtin_launder(Arr); +} + +// CIR-LABEL: test_builtin_launder_array_nested +// LLVM: define{{.*}} void @test_builtin_launder_array_nested +extern "C" void test_builtin_launder_array_nested(TestVirtualFn (&Arr)[5][2]) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + using RetTy = TestVirtualFn(*)[2]; + RetTy d = __builtin_launder(Arr); +} + +// CIR-LABEL: test_builtin_launder_array_no_invariant +// LLVM: define{{.*}} void @test_builtin_launder_array_no_invariant +extern "C" void test_builtin_launder_array_no_invariant(TestNoInvariant (&Arr)[5]) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + TestNoInvariant *d = __builtin_launder(Arr); +} + +// CIR-LABEL: test_builtin_launder_array_nested_no_invariant +// LLVM: define{{.*}} void @test_builtin_launder_array_nested_no_invariant +extern "C" void test_builtin_launder_array_nested_no_invariant(TestNoInvariant (&Arr)[5][2]) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + using RetTy = TestNoInvariant(*)[2]; + RetTy d = __builtin_launder(Arr); +} + +template +struct WithMember { + Member mem; +}; + +template struct WithMember; + +// CIR-LABEL: test_builtin_launder_member_array +// LLVM: define{{.*}} void @test_builtin_launder_member_array +extern "C" void test_builtin_launder_member_array(WithMember *p) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + auto *d = __builtin_launder(p); +} + +template struct WithMember; + +// CIR-LABEL: test_builtin_launder_member_array_nested +// LLVM: define{{.*}} void @test_builtin_launder_member_array_nested +extern "C" void test_builtin_launder_member_array_nested(WithMember *p) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + auto *d = __builtin_launder(p); +} + +template struct WithMember; + +// CIR-LABEL: test_builtin_launder_member_array_no_invariant +// LLVM: define{{.*}} void @test_builtin_launder_member_array_no_invariant +extern "C" void test_builtin_launder_member_array_no_invariant(WithMember *p) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + auto *d = __builtin_launder(p); +} + +template struct WithMember; + +// CIR-LABEL: test_builtin_launder_member_array_nested_no_invariant +// LLVM: define{{.*}} void @test_builtin_launder_member_array_nested_no_invariant +extern "C" void test_builtin_launder_member_array_nested_no_invariant(WithMember *p) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + auto *d = __builtin_launder(p); +} + +template +struct WithBase : T {}; + +template struct WithBase; + +// CIR-LABEL: test_builtin_launder_base_no_invariant +// LLVM: define{{.*}} void @test_builtin_launder_base_no_invariant +extern "C" void test_builtin_launder_base_no_invariant(WithBase *p) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + auto *d = __builtin_launder(p); +} + +template struct WithBase; + +// CIR-LABEL: test_builtin_launder_base +// LLVM: define{{.*}} void @test_builtin_launder_base +extern "C" void test_builtin_launder_base(WithBase *p) { + // CIR: cir.return + + // LLVM-NOT: llvm.launder.invariant.group + // LLVM: ret void + auto *d = __builtin_launder(p); +} +} From 5d787bda6cdf198f57e623ee5914b96d767cc494 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Mon, 25 Nov 2024 20:27:58 -0800 Subject: [PATCH 2120/2301] [CIR][CIRGen] Support copy constructors with non-record arrays (#1165) If a record type contains an array of non-record types, we can generate a copy for it inside the copy constructor, as CodeGen does. CodeGen does so for arrays of record types where applicable as well, but we'll want to represent the construction of those explicitly, as outlined in https://github.com/llvm/clangir/issues/1055. --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 25 ++++++++++++++- clang/test/CIR/CodeGen/copy-constructor.cpp | 35 +++++++++++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/copy-constructor.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index d4e53e268328..59c8adcbc5ed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -252,7 +252,30 @@ static void emitMemberInitializer(CIRGenFunction &CGF, CGF.getContext().getAsConstantArrayType(FieldType); if (Array && Constructor->isDefaulted() && Constructor->isCopyOrMoveConstructor()) { - llvm_unreachable("NYI"); + QualType baseElementTy = CGF.getContext().getBaseElementType(Array); + // NOTE(cir): CodeGen allows record types to be memcpy'd if applicable, + // whereas ClangIR wants to represent all object construction explicitly. + if (!baseElementTy->isRecordType()) { + unsigned srcArgIndex = + CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args); + cir::LoadOp srcPtr = CGF.getBuilder().createLoad( + CGF.getLoc(MemberInit->getSourceLocation()), + CGF.GetAddrOfLocalVar(Args[srcArgIndex])); + LValue thisRhslv = CGF.MakeNaturalAlignAddrLValue(srcPtr, RecordTy); + LValue src = CGF.emitLValueForFieldInitialization(thisRhslv, Field, + Field->getName()); + + // Copy the aggregate. + CGF.emitAggregateCopy(LHS, src, FieldType, + CGF.getOverlapForFieldInit(Field), + LHS.isVolatileQualified()); + // Ensure that we destroy the objects if an exception is thrown later in + // the constructor. + QualType::DestructionKind dtorKind = FieldType.isDestructedType(); + assert(!CGF.needsEHCleanup(dtorKind) && + "Arrays of non-record types shouldn't need EH cleanup"); + return; + } } CGF.emitInitializerForField(Field, LHS, MemberInit->getInit()); diff --git a/clang/test/CIR/CodeGen/copy-constructor.cpp b/clang/test/CIR/CodeGen/copy-constructor.cpp new file mode 100644 index 000000000000..92e0887b02ef --- /dev/null +++ b/clang/test/CIR/CodeGen/copy-constructor.cpp @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +struct HasScalarArrayMember { + int arr[2][2]; + HasScalarArrayMember(const HasScalarArrayMember &); +}; + +// CIR-LABEL: cir.func @_ZN20HasScalarArrayMemberC2ERKS_( +// CIR-NEXT: %[[#THIS:]] = cir.alloca !cir.ptr +// CIR-NEXT: %[[#OTHER:]] = cir.alloca !cir.ptr +// CIR-NEXT: cir.store %arg0, %[[#THIS]] +// CIR-NEXT: cir.store %arg1, %[[#OTHER]] +// CIR-NEXT: %[[#THIS_LOAD:]] = cir.load %[[#THIS]] +// CIR-NEXT: %[[#THIS_ARR:]] = cir.get_member %[[#THIS_LOAD]][0] {name = "arr"} +// CIR-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER]] +// CIR-NEXT: %[[#OTHER_ARR:]] = cir.get_member %[[#OTHER_LOAD]][0] {name = "arr"} +// CIR-NEXT: cir.copy %[[#OTHER_ARR]] to %[[#THIS_ARR]] : !cir.ptr x 2>> +// CIR-NEXT: cir.return + +// LLVM-LABEL: define {{.*}} @_ZN20HasScalarArrayMemberC2ERKS_( +// LLVM-SAME: ptr %[[#ARG0:]], ptr %[[#ARG1:]]) +// LLVM-NEXT: %[[#THIS:]] = alloca ptr +// LLVM-NEXT: %[[#OTHER:]] = alloca ptr +// LLVM-NEXT: store ptr %[[#ARG0]], ptr %[[#THIS]] +// LLVM-NEXT: store ptr %[[#ARG1]], ptr %[[#OTHER]] +// LLVM-NEXT: %[[#THIS_LOAD:]] = load ptr, ptr %[[#THIS]] +// LLVM-NEXT: %[[#THIS_ARR:]] = getelementptr %struct.HasScalarArrayMember, ptr %[[#THIS_LOAD]], i32 0, i32 0 +// LLVM-NEXT: %[[#OTHER_LOAD:]] = load ptr, ptr %[[#OTHER]] +// LLVM-NEXT: %[[#OTHER_ARR:]] = getelementptr %struct.HasScalarArrayMember, ptr %[[#OTHER_LOAD]], i32 0, i32 0 +// LLVM-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %[[#THIS_ARR]], ptr %[[#OTHER_ARR]], i32 16, i1 false) +// LLVM-NEXT: ret void +HasScalarArrayMember::HasScalarArrayMember(const HasScalarArrayMember &) = default; From 292b4f009a018e2e54c0e68318c82cd2e887d792 Mon Sep 17 00:00:00 2001 From: Congcong Cai Date: Wed, 27 Nov 2024 05:15:38 +0800 Subject: [PATCH 2121/2301] [CIR][Dialect] Introduce StdInitializerListOp to represent high-level semantics of C++ initializer list (#1121) I don't finish all work about `cir.initlist`. But I want to get some feedback about the cir design to make sure I am in correct way. Fixed: #777 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 34 ++++++++ clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 75 +++++------------ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 23 +++++ .../Dialect/Transforms/LoweringPrepare.cpp | 83 ++++++++++++++++++- clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp | 33 +++++--- .../CIR/CodeGen/initlist-ptr-unsigned.cpp | 16 +++- clang/test/CIR/IR/invalid.cir | 78 +++++++++++++++++ 7 files changed, 272 insertions(+), 70 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 74757d506ba1..38ce76aabde3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -5283,4 +5283,38 @@ def SignBitOp : CIR_Op<"signbit", [Pure]> { }]; } +//===----------------------------------------------------------------------===// +// StdInitializerListOp +//===----------------------------------------------------------------------===// + +def StdInitializerListOp : CIR_Op<"std.initializer_list"> { + let summary = "Initialize std::initializer_list"; + let description = [{ + The `std.initializer_list` operation will initialize + `std::initializer_list` with given arguments list. + + ```cpp + initializer_list v{1,2,3}; // initialize v with 1, 2, 3 + ``` + + The code above will generate CIR similar as: + + ```mlir + %0 = cir.alloca INITLIST_TYPE, !cir.ptr + %1 = cir.const #cir.int<1> + ... + cir.std.initializer_list %0 (%1 %2 %3) + ``` + + The type of each argument should be the same as template parameter of + `std::initializer_list` (aka `T` in `std::initializer_list`). + }]; + let arguments = (ins StructPtr:$initList, Variadic:$args); + let assemblyFormat = [{ + $initList ` ` `(` ($args^ `:` type($args))? `)` `:` type($initList) attr-dict + }]; + + let hasVerifier = 1; +} + #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 32f343ffd605..d436822bf9ec 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -18,11 +18,15 @@ #include "clang/AST/Decl.h" #include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtVisitor.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/MissingFeatures.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" @@ -297,62 +301,25 @@ class AggExprEmitter : public StmtVisitor { void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); void VisitLambdaExpr(LambdaExpr *E); void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { - ASTContext &Ctx = CGF.getContext(); - CIRGenFunction::SourceLocRAIIObject locRAIIObject{ - CGF, CGF.getLoc(E->getSourceRange())}; - // Emit an array containing the elements. The array is externally - // destructed if the std::initializer_list object is. - LValue Array = CGF.emitLValue(E->getSubExpr()); - assert(Array.isSimple() && "initializer_list array not a simple lvalue"); - Address ArrayPtr = Array.getAddress(); - - const ConstantArrayType *ArrayType = - Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); - assert(ArrayType && "std::initializer_list constructed from non-array"); - - RecordDecl *Record = E->getType()->castAs()->getDecl(); - RecordDecl::field_iterator Field = Record->field_begin(); - assert(Field != Record->field_end() && - Ctx.hasSameType(Field->getType()->getPointeeType(), - ArrayType->getElementType()) && - "Expected std::initializer_list first field to be const E *"); - // Start pointer. auto loc = CGF.getLoc(E->getSourceRange()); - AggValueSlot Dest = EnsureSlot(loc, E->getType()); - LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), E->getType()); - LValue Start = - CGF.emitLValueForFieldInitialization(DestLV, *Field, Field->getName()); - mlir::Value ArrayStart = ArrayPtr.emitRawPointer(); - CGF.emitStoreThroughLValue(RValue::get(ArrayStart), Start); - ++Field; - assert(Field != Record->field_end() && - "Expected std::initializer_list to have two fields"); - - auto Builder = CGF.getBuilder(); - - auto sizeOp = Builder.getConstInt(loc, ArrayType->getSize()); - - mlir::Value Size = sizeOp.getRes(); - Builder.getUIntNTy(ArrayType->getSizeBitWidth()); - LValue EndOrLength = - CGF.emitLValueForFieldInitialization(DestLV, *Field, Field->getName()); - if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { - // Length. - CGF.emitStoreThroughLValue(RValue::get(Size), EndOrLength); - } else { - // End pointer. - assert(Field->getType()->isPointerType() && - Ctx.hasSameType(Field->getType()->getPointeeType(), - ArrayType->getElementType()) && - "Expected std::initializer_list second field to be const E *"); - - auto ArrayEnd = - Builder.getArrayElement(loc, loc, ArrayPtr.getPointer(), - ArrayPtr.getElementType(), Size, false); - CGF.emitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); + auto builder = CGF.getBuilder(); + auto *subExpr = + llvm::cast(E->getSubExpr())->getSubExpr(); + llvm::SmallVector inits{}; + for (auto *init : llvm::cast(subExpr)->inits()) { + RValue tmpInit = CGF.emitAnyExprToTemp(init); + if (tmpInit.isScalar()) { + inits.push_back(tmpInit.getScalarVal()); + } else if (tmpInit.isComplex()) { + inits.push_back(tmpInit.getComplexVal()); + } else if (tmpInit.isAggregate()) { + inits.push_back(tmpInit.getAggregatePointer()); + } else { + llvm_unreachable("invalid temp expr type"); + } } - assert(++Field == Record->field_end() && - "Expected std::initializer_list to only have two fields"); + mlir::Value dest = EnsureSlot(loc, E->getType()).getPointer(); + builder.create(loc, dest, inits); } void VisitExprWithCleanups(ExprWithCleanups *E); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 287749fb85b3..aa48cafcc645 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -3853,6 +3853,29 @@ LogicalResult cir::CatchParamOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// StdInitializerListOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult cir::StdInitializerListOp::verify() { + auto resultType = mlir::cast( + mlir::cast(getInitList().getType()).getPointee()); + if (resultType.getMembers().size() != 2) + return emitOpError( + "std::initializer_list must be '!cir.struct' with two fields"); + auto memberPtr = mlir::dyn_cast(resultType.getMembers()[0]); + if (memberPtr == nullptr) + return emitOpError("first member type of std::initializer_list must be " + "'!cir.ptr', but provided ") + << resultType.getMembers()[0]; + auto expectedType = memberPtr.getPointee(); + for (const mlir::Value &arg : getArgs()) + if (expectedType != arg.getType()) + return emitOpError("arg type must be ") + << expectedType << ", but provided " << arg.getType(); + return mlir::success(); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index b0709e9638ff..a949d78eaf80 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -12,15 +12,19 @@ #include "mlir/IR/Region.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" +#include "clang/AST/Decl.h" #include "clang/AST/Mangle.h" #include "clang/Basic/Module.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/APFloat.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" @@ -85,6 +89,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { void lowerToMemCpy(StoreOp op); void lowerArrayDtor(ArrayDtor op); void lowerArrayCtor(ArrayCtor op); + void lowerStdInitializerListOp(StdInitializerListOp op); /// Collect annotations of global values in the module void addGlobalAnnotations(mlir::Operation *op, mlir::ArrayAttr annotations); @@ -1120,6 +1125,79 @@ void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { op.erase(); } +/// lowering construction of std::initializer_list. +/// 1. alloca array for arg list. +/// 2. copy arg list to array. +/// 3. construct std::initializer_list from array. +void LoweringPreparePass::lowerStdInitializerListOp(StdInitializerListOp op) { + auto loc = op.getLoc(); + cir::CIRDataLayout dataLayout(theModule); + auto args = op.getArgs(); + + auto stdInitializerListType = mlir::cast( + mlir::cast(op.getInitList().getType()).getPointee()); + clang::RecordDecl::field_range stdInitializerListFields = + stdInitializerListType.getAst().getRawDecl()->fields(); + + mlir::Type elementType = + mlir::cast(stdInitializerListType.getMembers()[0]) + .getPointee(); + auto tempArrayType = + cir::ArrayType::get(&getContext(), elementType, args.size()); + + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op); + + IntegerAttr alignment = builder.getI64IntegerAttr( + dataLayout.getPrefTypeAlign(tempArrayType).value()); + assert(!cir::MissingFeatures::addressSpace()); + mlir::Value arrayPtr = builder.createAlloca( + loc, cir::PointerType::get(tempArrayType), tempArrayType, "", alignment); + mlir::Value arrayStartPtr = + builder.createCast(cir::CastKind::array_to_ptrdecay, arrayPtr, + cir::PointerType::get(elementType)); + for (unsigned i = 0; i < args.size(); i++) { + if (i == 0) { + builder.createStore(loc, args[i], arrayStartPtr); + } else { + mlir::Value offset = builder.getUnsignedInt(loc, i, 64); + mlir::Value dest = builder.create( + loc, arrayStartPtr.getType(), arrayStartPtr, offset); + builder.createStore(loc, args[i], dest); + } + } + + // FIXME(cir): better handling according to different field type. [ptr ptr], + // [ptr size], [size ptr]. + + clang::RecordDecl::field_iterator it = stdInitializerListFields.begin(); + const clang::RecordDecl::field_iterator startField = it; + const unsigned startIdx = 0U; + const clang::RecordDecl::field_iterator endOrSizeField = ++it; + const unsigned endOrSizeIdx = 1U; + assert(llvm::range_size(stdInitializerListFields) == 2U); + + mlir::Value startMemberPtr = builder.createGetMemberOp( + loc, op.getInitList(), startField->getName().data(), startIdx); + builder.createStore(loc, arrayStartPtr, startMemberPtr); + + mlir::Value size = builder.getUnsignedInt(loc, args.size(), 64); + if (endOrSizeField->getType()->isPointerType()) { + mlir::Value arrayEndPtr = builder.create( + loc, arrayStartPtr.getType(), arrayStartPtr, size); + mlir::Value endMemberPtr = builder.createGetMemberOp( + loc, op.getInitList(), endOrSizeField->getName().data(), endOrSizeIdx); + builder.createStore(loc, arrayEndPtr, endMemberPtr); + } else { + assert(endOrSizeField->getType()->isIntegerType()); + mlir::Value sizeMemberPtr = builder.createGetMemberOp( + loc, op.getInitList(), endOrSizeField->getName().data(), endOrSizeIdx); + builder.createStore(loc, size, sizeMemberPtr); + } + + op.erase(); +} + void LoweringPreparePass::addGlobalAnnotations(mlir::Operation *op, mlir::ArrayAttr annotations) { auto globalValue = cast(op); @@ -1180,6 +1258,8 @@ void LoweringPreparePass::runOnOp(Operation *op) { } if (std::optional annotations = fnOp.getAnnotations()) addGlobalAnnotations(fnOp, annotations.value()); + } else if (auto stdInitializerListOp = dyn_cast(op)) { + lowerStdInitializerListOp(stdInitializerListOp); } } @@ -1195,7 +1275,8 @@ void LoweringPreparePass::runOnOperation() { op->walk([&](Operation *op) { if (isa(op)) + ArrayCtor, ArrayDtor, cir::FuncOp, StoreOp, StdInitializerListOp>( + op)) opsToTransform.push_back(op); }); diff --git a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp index 6808c5a89e33..b807e1bae756 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp @@ -1,3 +1,5 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir -clangir-disable-passes +// RUN: FileCheck --check-prefix=BEFORE --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll @@ -15,6 +17,16 @@ void test() { } } // namespace std +// BEFORE: [[INITLIST_TYPE:!.*]] = !cir.struct" {!cir.ptr>, !cir.ptr>} #cir.record.decl.ast> +// BEFORE: %0 = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, +// BEFORE: %1 = cir.get_global @".str" : !cir.ptr> +// BEFORE: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// BEFORE: %3 = cir.get_global @".str.1" : !cir.ptr> +// BEFORE: %4 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr +// BEFORE: cir.std.initializer_list %0 (%2, %4 : !cir.ptr, !cir.ptr) : !cir.ptr<[[INITLIST_TYPE]]> +// BEFORE: %5 = cir.load %0 : !cir.ptr<[[INITLIST_TYPE]]>, [[INITLIST_TYPE]] +// BEFORE: cir.call @_ZSt1fIPKcEvSt16initializer_listIT_E(%5) : ([[INITLIST_TYPE]]) -> () + // CIR: [[INITLIST_TYPE:!.*]] = !cir.struct" {!cir.ptr>, !cir.ptr>}> // CIR: cir.func linkonce_odr @_ZSt1fIPKcEvSt16initializer_listIT_E(%arg0: [[INITLIST_TYPE]] // CIR: [[LOCAL:%.*]] = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, @@ -27,24 +39,22 @@ void test() { // CIR: cir.func @_ZSt4testv() // CIR: cir.scope { // CIR: [[INITLIST_LOCAL:%.*]] = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, -// CIR: [[LOCAL_ELEM_ARRAY:%.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, -// CIR: [[FIRST_ELEM_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[LOCAL_ELEM_ARRAY]] : !cir.ptr x 2>>), !cir.ptr> // CIR: [[XY_CHAR_ARRAY:%.*]] = cir.get_global [[STR_XY]] : !cir.ptr> // CIR: [[STR_XY_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[XY_CHAR_ARRAY]] : !cir.ptr>), !cir.ptr -// CIR: cir.store [[STR_XY_PTR]], [[FIRST_ELEM_PTR]] : !cir.ptr, !cir.ptr> -// CIR: [[ONE:%.*]] = cir.const #cir.int<1> -// CIR: [[NEXT_ELEM_PTR:%.*]] = cir.ptr_stride([[FIRST_ELEM_PTR]] : !cir.ptr>, [[ONE]] : !s64i), !cir.ptr> // CIR: [[UV_CHAR_ARRAY:%.*]] = cir.get_global [[STR_UV]] : !cir.ptr> // CIR: [[STR_UV_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[UV_CHAR_ARRAY]] : !cir.ptr>), !cir.ptr +// CIR: [[LOCAL_ELEM_ARRAY:%.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, +// CIR: [[ELEM_BEGIN:%.*]] = cir.cast(array_to_ptrdecay, [[LOCAL_ELEM_ARRAY]] : !cir.ptr x 2>>), !cir.ptr> +// CIR: cir.store [[STR_XY_PTR]], [[ELEM_BEGIN]] : !cir.ptr, !cir.ptr> +// CIR: [[ONE:%.*]] = cir.const #cir.int<1> +// CIR: [[NEXT_ELEM_PTR:%.*]] = cir.ptr_stride([[ELEM_BEGIN]] : !cir.ptr>, [[ONE]] : !u64i), !cir.ptr> // CIR: cir.store [[STR_UV_PTR]], [[NEXT_ELEM_PTR]] : !cir.ptr, !cir.ptr> // CIR: [[START_FLD_PTR:%.*]] = cir.get_member [[INITLIST_LOCAL]][0] {name = "array_start"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr>> -// CIR: [[START_FLD_PTR_AS_PTR_2_CHAR_ARRAY:%.*]] = cir.cast(bitcast, [[START_FLD_PTR]] : !cir.ptr>>), !cir.ptr x 2>>> -// CIR: cir.store [[LOCAL_ELEM_ARRAY]], [[START_FLD_PTR_AS_PTR_2_CHAR_ARRAY]] : !cir.ptr x 2>>, !cir.ptr x 2>>> +// CIR: cir.store [[ELEM_BEGIN]], [[START_FLD_PTR]] : !cir.ptr>, !cir.ptr>> // CIR: [[ELEM_ARRAY_LEN:%.*]] = cir.const #cir.int<2> +// CIR: [[ELEM_END:%.*]] = cir.ptr_stride([[ELEM_BEGIN]] : !cir.ptr>, [[ELEM_ARRAY_LEN]] : !u64i), !cir.ptr> // CIR: [[END_FLD_PTR:%.*]] = cir.get_member [[INITLIST_LOCAL]][1] {name = "array_end"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr>> -// CIR: [[LOCAL_ELEM_ARRAY_END:%.*]] = cir.ptr_stride([[LOCAL_ELEM_ARRAY]] : !cir.ptr x 2>>, [[ELEM_ARRAY_LEN]] : !u64i), !cir.ptr x 2>> -// CIR: [[END_FLD_PTR_AS_PTR_2_CHAR_ARRAY:%.*]] = cir.cast(bitcast, [[END_FLD_PTR]] : !cir.ptr>>), !cir.ptr x 2>>> -// CIR: cir.store [[LOCAL_ELEM_ARRAY_END]], [[END_FLD_PTR_AS_PTR_2_CHAR_ARRAY]] : !cir.ptr x 2>>, !cir.ptr x 2>>> +// CIR: cir.store [[ELEM_END]], [[END_FLD_PTR]] : !cir.ptr>, !cir.ptr>> // CIR: [[ARG:%.*]] = cir.load [[INITLIST_LOCAL]] : !cir.ptr<[[INITLIST_TYPE]]>, [[INITLIST_TYPE]] // CIR: cir.call @_ZSt1fIPKcEvSt16initializer_listIT_E([[ARG]]) : ([[INITLIST_TYPE]]) -> () // CIR: } @@ -72,8 +82,9 @@ void test() { // LLVM: [[PTR_SECOND_ELEM:%.*]] = getelementptr ptr, ptr [[PTR_FIRST_ELEM]], i64 1, // LLVM: store ptr @.str.1, ptr [[PTR_SECOND_ELEM]], align 8, // LLVM: [[INIT_START_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0, +// LLVM: store ptr [[PTR_FIRST_ELEM]], ptr [[INIT_START_FLD_PTR]], align 8, +// LLVM: [[ELEM_ARRAY_END:%.*]] = getelementptr ptr, ptr [[PTR_FIRST_ELEM]], i64 2, // LLVM: [[INIT_END_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 1, -// LLVM: [[ELEM_ARRAY_END:%.*]] = getelementptr [2 x ptr], ptr [[ELEM_ARRAY_PTR]], i64 2, // LLVM: store ptr [[ELEM_ARRAY_END]], ptr [[INIT_END_FLD_PTR]], align 8, // LLVM: [[ARG2PASS:%.*]] = load %"class.std::initializer_list", ptr [[INIT_STRUCT]], align 8, // LLVM: call void @_ZSt1fIPKcEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG2PASS]]), diff --git a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp index f80ae2ec46cd..a2e9d9c43ee7 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp @@ -1,3 +1,5 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir -clangir-disable-passes +// RUN: FileCheck --check-prefix=BEFORE --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll @@ -15,6 +17,13 @@ void test() { } } // namespace std +// BEFORE: [[INITLIST_TYPE:!.*]] = !cir.struct" {!cir.ptr, !u64i} #cir.record.decl.ast> +// BEFORE: %0 = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, +// BEFORE: %1 = cir.const #cir.int<7> : !s32i +// BEFORE: cir.std.initializer_list %0 (%1 : !s32i) : !cir.ptr<[[INITLIST_TYPE]]> +// BEFORE: %2 = cir.load %0 : !cir.ptr<[[INITLIST_TYPE]]>, [[INITLIST_TYPE]] +// BEFORE: cir.call @_ZSt1fIiEvSt16initializer_listIT_E(%2) : ([[INITLIST_TYPE]]) -> () + // CIR: [[INITLIST_TYPE:!.*]] = !cir.struct" {!cir.ptr, !u64i}> // CIR: cir.func linkonce_odr @_ZSt1fIiEvSt16initializer_listIT_E(%arg0: [[INITLIST_TYPE]] @@ -25,13 +34,12 @@ void test() { // CIR: cir.func @_ZSt4testv() // CIR: cir.scope { // CIR: [[LIST_PTR:%.*]] = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, +// CIR: [[SEVEN:%.*]] = cir.const #cir.int<7> : !s32i // CIR: [[ARRAY:%.*]] = cir.alloca !cir.array, !cir.ptr>, // CIR: [[DECAY_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[ARRAY]] : !cir.ptr>), !cir.ptr -// CIR: [[SEVEN:%.*]] = cir.const #cir.int<7> : !s32i // CIR: cir.store [[SEVEN]], [[DECAY_PTR]] : !s32i, !cir.ptr // CIR: [[FLD_C:%.*]] = cir.get_member [[LIST_PTR]][0] {name = "c"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr> -// CIR: [[ARRAY_PTR:%.*]] = cir.cast(bitcast, [[FLD_C]] : !cir.ptr>), !cir.ptr>> -// CIR: cir.store [[ARRAY]], [[ARRAY_PTR]] : !cir.ptr>, !cir.ptr>> +// CIR: cir.store [[DECAY_PTR]], [[FLD_C]] : !cir.ptr, !cir.ptr> // CIR: [[LENGTH_ONE:%.*]] = cir.const #cir.int<1> // CIR: [[FLD_LEN:%.*]] = cir.get_member [[LIST_PTR]][1] {name = "len"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr // CIR: cir.store [[LENGTH_ONE]], [[FLD_LEN]] : !u64i, !cir.ptr @@ -54,7 +62,7 @@ void test() { // LLVM: [[PTR_FIRST_ELEM:%.*]] = getelementptr i32, ptr [[ELEM_ARRAY]], i32 0, // LLVM: store i32 7, ptr [[PTR_FIRST_ELEM]], align 4, // LLVM: [[ELEM_ARRAY_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0, -// LLVM: store ptr [[ELEM_ARRAY]], ptr [[ELEM_ARRAY_PTR]], align 8, +// LLVM: store ptr [[PTR_FIRST_ELEM]], ptr [[ELEM_ARRAY_PTR]], align 8, // LLVM: [[INIT_LEN_FLD:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 1, // LLVM: store i64 1, ptr [[INIT_LEN_FLD]], align 8, // LLVM: [[ARG2PASS:%.*]] = load %"class.std::initializer_list", ptr [[INIT_STRUCT]], align 8, diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 01828fbe22b4..2e8e3ae9d44e 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1449,6 +1449,84 @@ cir.global external @f = #cir.fp<42> : !cir.float // ----- +!s32i = !cir.int +!u32i = !cir.int +!initializer_list_s32i = !cir.struct, !cir.ptr}> +cir.func @std_initializer_list_wrong_arg_type() { + %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] + %1 = cir.const #cir.int<1> : !s32i + %2 = cir.const #cir.int<1> : !u32i + // expected-error @below {{'cir.std.initializer_list' op arg type must be '!cir.int', but provided '!cir.int'}} + cir.std.initializer_list %0 (%2, %1, %1 : !u32i, !s32i, !s32i) : !cir.ptr +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int +!initializer_list_s32i = !cir.struct, !cir.ptr}> +cir.func @std_initializer_list_wrong_arg_type() { + %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] + %1 = cir.const #cir.int<1> : !s32i + %2 = cir.const #cir.int<1> : !u32i + // expected-error @below {{'cir.std.initializer_list' op arg type must be '!cir.int', but provided '!cir.int'}} + cir.std.initializer_list %0 (%1, %2, %1 : !s32i, !u32i, !s32i) : !cir.ptr + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int +!initializer_list_s32i = !cir.struct, !cir.ptr}> +cir.func @std_initializer_list_wrong_arg_type() { + %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] + %1 = cir.const #cir.int<1> : !s32i + %2 = cir.const #cir.int<1> : !u32i + // expected-error @below {{'cir.std.initializer_list' op arg type must be '!cir.int', but provided '!cir.int'}} + cir.std.initializer_list %0 (%1, %1, %2 : !s32i, !s32i, !u32i) : !cir.ptr + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int +!initializer_list_s32i = !cir.struct, !cir.ptr}> +cir.func @std_initializer_list_wrong_arg_type() { + %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] + %1 = cir.const #cir.int<1> : !s32i + %2 = cir.const #cir.int<1> : !u32i + // expected-error @below {{'cir.std.initializer_list' op arg type must be '!cir.int', but provided '!cir.int'}} + cir.std.initializer_list %0 (%2, %2, %2 : !u32i, !u32i, !u32i) : !cir.ptr + cir.return +} + +// ----- + +!s32i = !cir.int +!initializer_list_s32i = !cir.struct, !cir.ptr, !cir.ptr}> +cir.func @std_initializer_list_wrong_struct() { + %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] + %1 = cir.const #cir.int<1> : !s32i + // expected-error @below {{'cir.std.initializer_list' op std::initializer_list must be '!cir.struct' with two fields}} + cir.std.initializer_list %0 (%1, %1, %1 : !s32i, !s32i, !s32i) : !cir.ptr + cir.return +} + +// ----- + +!s32i = !cir.int +!initializer_list_s32i = !cir.struct}> +cir.func @std_initializer_list_wrong_struct() { + %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] + %1 = cir.const #cir.int<1> : !s32i + // expected-error @below {{'cir.std.initializer_list' op first member type of std::initializer_list must be '!cir.ptr', but provided '!cir.int'}} + cir.std.initializer_list %0 (%1, %1, %1 : !s32i, !s32i, !s32i) : !cir.ptr + cir.return +} + +// ----- // Verify !s32i = !cir.int cir.func @cast0(%arg0: !s32i, %arg1: !s32i) { From 2af9ae932dce3a088e660bbc00091790c023f01b Mon Sep 17 00:00:00 2001 From: Guojin Date: Tue, 26 Nov 2024 16:16:08 -0500 Subject: [PATCH 2122/2301] [CIR][CIRGen][Builtin][Neon] Lower __builtin_neon_vshl_v (#1134) This PR also changed implementation of BI__builtin_neon_vshlq_v into using CIR ShiftOp --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 6 +- clang/test/CIR/CodeGen/AArch64/neon.c | 204 +++++++++--------- 2 files changed, 112 insertions(+), 98 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index ce6bd0d8db55..bf0142f85220 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2562,9 +2562,11 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( : "aarch64.neon.srhadd"; break; } + case NEON::BI__builtin_neon_vshl_v: case NEON::BI__builtin_neon_vshlq_v: { - intrincsName = (intrinicId != altLLVMIntrinsic) ? "aarch64.neon.ushl" - : "aarch64.neon.sshl"; + return builder.create( + getLoc(e->getExprLoc()), vTy, builder.createBitcast(ops[0], vTy), + builder.createBitcast(ops[1], vTy), true /* left */); break; } case NEON::BI__builtin_neon_vhadd_v: diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 8a6447ff2d39..2c1c0ee9bb1d 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -3391,89 +3391,114 @@ int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) { // return vqsubq_u64(a, b); // } -// NYI-LABEL: @test_vshl_s8( -// NYI: [[VSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sshl.v8i8(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i8> [[VSHL_V_I]] -// int8x8_t test_vshl_s8(int8x8_t a, int8x8_t b) { -// return vshl_s8(a, b); -// } +int8x8_t test_vshl_s8(int8x8_t a, int8x8_t b) { + return vshl_s8(a, b); -// NYI-LABEL: @test_vshl_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sshl.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: [[VSHL_V3_I:%.*]] = bitcast <4 x i16> [[VSHL_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VSHL_V2_I]] -// int16x4_t test_vshl_s16(int16x4_t a, int16x4_t b) { -// return vshl_s16(a, b); -// } + // CIR-LABEL: vshl_s8 + // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshl_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sshl.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: [[VSHL_V3_I:%.*]] = bitcast <2 x i32> [[VSHL_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VSHL_V2_I]] -// int32x2_t test_vshl_s32(int32x2_t a, int32x2_t b) { -// return vshl_s32(a, b); -// } + // LLVM: {{.*}}test_vshl_s8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VSHL_V_I:%.*]] = shl <8 x i8> [[A]], [[B]] + // LLVM: ret <8 x i8> [[VSHL_V_I]] +} -// NYI-LABEL: @test_vshl_s64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> -// NYI: [[VSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.sshl.v1i64(<1 x i64> %a, <1 x i64> %b) -// NYI: [[VSHL_V3_I:%.*]] = bitcast <1 x i64> [[VSHL_V2_I]] to <8 x i8> -// NYI: ret <1 x i64> [[VSHL_V2_I]] -// int64x1_t test_vshl_s64(int64x1_t a, int64x1_t b) { -// return vshl_s64(a, b); -// } +int16x4_t test_vshl_s16(int16x4_t a, int16x4_t b) { + return vshl_s16(a, b); -// NYI-LABEL: @test_vshl_u8( -// NYI: [[VSHL_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.ushl.v8i8(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i8> [[VSHL_V_I]] -// uint8x8_t test_vshl_u8(uint8x8_t a, int8x8_t b) { -// return vshl_u8(a, b); -// } + // CIR-LABEL: vshl_s16 + // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector -// NYI-LABEL: @test_vshl_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VSHL_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.ushl.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: [[VSHL_V3_I:%.*]] = bitcast <4 x i16> [[VSHL_V2_I]] to <8 x i8> -// NYI: ret <4 x i16> [[VSHL_V2_I]] -// uint16x4_t test_vshl_u16(uint16x4_t a, int16x4_t b) { -// return vshl_u16(a, b); -// } + // LLVM: {{.*}}test_vshl_s16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8> + // LLVM: [[VSHL_V2_I:%.*]] = shl <4 x i16> [[A]], [[B]] + // LLVM: ret <4 x i16> [[VSHL_V2_I]] +} -// NYI-LABEL: @test_vshl_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VSHL_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.ushl.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: [[VSHL_V3_I:%.*]] = bitcast <2 x i32> [[VSHL_V2_I]] to <8 x i8> -// NYI: ret <2 x i32> [[VSHL_V2_I]] -// uint32x2_t test_vshl_u32(uint32x2_t a, int32x2_t b) { -// return vshl_u32(a, b); -// } +int32x2_t test_vshl_s32(int32x2_t a, int32x2_t b) { + return vshl_s32(a, b); -// NYI-LABEL: @test_vshl_u64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> -// NYI: [[VSHL_V2_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.ushl.v1i64(<1 x i64> %a, <1 x i64> %b) -// NYI: [[VSHL_V3_I:%.*]] = bitcast <1 x i64> [[VSHL_V2_I]] to <8 x i8> -// NYI: ret <1 x i64> [[VSHL_V2_I]] -// uint64x1_t test_vshl_u64(uint64x1_t a, int64x1_t b) { -// return vshl_u64(a, b); -// } + // CIR-LABEL: vshl_s32 + // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshl_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8> + // LLVM: [[VSHL_V2_I:%.*]] = shl <2 x i32> [[A]], [[B]] + // LLVM: ret <2 x i32> [[VSHL_V2_I]] +} + +int64x1_t test_vshl_s64(int64x1_t a, int64x1_t b) { + return vshl_s64(a, b); + + // CIR-LABEL: vshl_s64 + // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshl_s64(<1 x i64>{{.*}}[[A:%.*]], <1 x i64>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8> + // LLVM: [[VSHL_V2_I:%.*]] = shl <1 x i64> [[A]], [[B]] + // LLVM: ret <1 x i64> [[VSHL_V2_I]] +} + +uint8x8_t test_vshl_u8(uint8x8_t a, int8x8_t b) { + return vshl_u8(a, b); + + // CIR-LABEL: vshl_u8 + // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshl_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VSHL_V_I:%.*]] = shl <8 x i8> [[A]], [[B]] + // LLVM: ret <8 x i8> [[VSHL_V_I]] +} + +uint16x4_t test_vshl_u16(uint16x4_t a, int16x4_t b) { + return vshl_u16(a, b); + + // CIR-LABEL: vshl_u16 + // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshl_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8> + // LLVM: [[VSHL_V2_I:%.*]] = shl <4 x i16> [[A]], [[B]] + // LLVM: ret <4 x i16> [[VSHL_V2_I]] +} + +uint32x2_t test_vshl_u32(uint32x2_t a, int32x2_t b) { + return vshl_u32(a, b); + + // CIR-LABEL: vshl_u32 + // CIR: cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshl_u32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8> + // LLVM: [[VSHL_V2_I:%.*]] = shl <2 x i32> [[A]], [[B]] + // LLVM: ret <2 x i32> [[VSHL_V2_I]] +} + +uint64x1_t test_vshl_u64(uint64x1_t a, int64x1_t b) { + return vshl_u64(a, b); + + // CIR-LABEL: vshl_u64 + // CIR: cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector + + // LLVM: {{.*}}test_vshl_u64(<1 x i64>{{.*}}[[A:%.*]], <1 x i64>{{.*}}[[B:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8> + // LLVM: [[VSHL_V2_I:%.*]] = shl <1 x i64> [[A]], [[B]] + // LLVM: ret <1 x i64> [[VSHL_V2_I]] +} int8x16_t test_vshlq_s8(int8x16_t a, int8x16_t b) { return vshlq_s8(a, b); // CIR-LABEL: vshlq_s8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : - // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) - // LLVM: [[VSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) + // LLVM: [[VSHLQ_V_I:%.*]] = shl <16 x i8> [[A]], [[B]] // LLVM: ret <16 x i8> [[VSHLQ_V_I]] } @@ -3481,14 +3506,12 @@ int16x8_t test_vshlq_s16(int16x8_t a, int16x8_t b) { return vshlq_s16(a, b); // CIR-LABEL: vshlq_s16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : - // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8> - // LLVM: [[VSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sshl.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) - // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = shl <8 x i16> [[A]], [[B]] // LLVM: ret <8 x i16> [[VSHLQ_V2_I]] } @@ -3496,14 +3519,12 @@ int32x4_t test_vshlq_s32(int32x4_t a, int32x4_t b) { return vshlq_s32(a, b); // CIR-LABEL: vshlq_s32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : - // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8> - // LLVM: [[VSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) - // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = shl <4 x i32> [[A]], [[B]] // LLVM: ret <4 x i32> [[VSHLQ_V2_I]] } @@ -3511,14 +3532,12 @@ int64x2_t test_vshlq_s64(int64x2_t a, int64x2_t b) { return vshlq_s64(a, b); // CIR-LABEL: vshlq_s64 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.sshl" {{%.*}}, {{%.*}} : - // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_s64(<2 x i64>{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8> - // LLVM: [[VSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) - // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = shl <2 x i64> [[A]], [[B]] // LLVM: ret <2 x i64> [[VSHLQ_V2_I]] } @@ -3526,11 +3545,10 @@ uint8x16_t test_vshlq_u8(uint8x16_t a, int8x16_t b) { return vshlq_u8(a, b); // CIR-LABEL: vshlq_u8 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : - // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_u8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) - // LLVM: [[VSHLQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.ushl.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) + // LLVM: [[VSHLQ_V_I:%.*]] = shl <16 x i8> [[A]], [[B]] // LLVM: ret <16 x i8> [[VSHLQ_V_I]] } @@ -3538,14 +3556,12 @@ uint16x8_t test_vshlq_u16(uint16x8_t a, int16x8_t b) { return vshlq_u16(a, b); // CIR-LABEL: vshlq_u16 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : - // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_u16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8> - // LLVM: [[VSHLQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.ushl.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) - // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <8 x i16> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = shl <8 x i16> [[A]], [[B]] // LLVM: ret <8 x i16> [[VSHLQ_V2_I]] } @@ -3553,14 +3569,12 @@ uint32x4_t test_vshlq_u32(uint32x4_t a, int32x4_t b) { return vshlq_u32(a, b); // CIR-LABEL: vshlq_u32 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : - // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_u32(<4 x i32>{{.*}}[[A:%.*]], <4 x i32>{{.*}}[[B:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8> // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8> - // LLVM: [[VSHLQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) - // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <4 x i32> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = shl <4 x i32> [[A]], [[B]] // LLVM: ret <4 x i32> [[VSHLQ_V2_I]] } @@ -3568,14 +3582,12 @@ uint64x2_t test_vshlq_u64(uint64x2_t a, int64x2_t b) { return vshlq_u64(a, b); // CIR-LABEL: vshlq_u64 - // CIR: {{%.*}} = cir.llvm.intrinsic "aarch64.neon.ushl" {{%.*}}, {{%.*}} : - // CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.shift(left, {{%.*}} : !cir.vector, {{%.*}} : !cir.vector) -> !cir.vector // LLVM: {{.*}}test_vshlq_u64(<2 x i64>{{.*}}[[A:%.*]], <2 x i64>{{.*}}[[B:%.*]]) // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8> // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8> - // LLVM: [[VSHLQ_V2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.ushl.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) - // LLVM: [[VSHLQ_V3_I:%.*]] = bitcast <2 x i64> [[VSHLQ_V2_I]] to <16 x i8> + // LLVM: [[VSHLQ_V2_I:%.*]] = shl <2 x i64> [[A]], [[B]] // LLVM: ret <2 x i64> [[VSHLQ_V2_I]] } From f61557a3a08891ab7dcbd98e5c83616f4abd4f5d Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Wed, 27 Nov 2024 07:56:23 +0800 Subject: [PATCH 2123/2301] [CIR] Correct signedness for createSignedInt (#1167) After I rebased, I found these problems with Spec2017. I was surprised why it doesn't have problems. Maybe some updates in LLVM part. --- clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h | 3 ++- .../lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index b19fe5884e86..87ef2766a5c9 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -50,7 +50,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::Value getSignedInt(mlir::Location loc, int64_t val, unsigned numBits) { return getConstAPSInt( - loc, llvm::APSInt(llvm::APInt(numBits, val), /*isUnsigned=*/false)); + loc, llvm::APSInt(llvm::APInt(numBits, val, /*isSigned=*/true), + /*isUnsigned=*/false)); } mlir::Value getUnsignedInt(mlir::Location loc, uint64_t val, diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp index 66e40e6ac5d0..feac26463304 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfoImpl.cpp @@ -45,7 +45,7 @@ mlir::Value emitRoundPointerUpToAlignment(cir::CIRBaseBuilderTy &builder, builder.getUnsignedInt(loc, alignment - 1, /*width=*/32)); return builder.create( loc, roundUp.getType(), roundUp, - builder.getSignedInt(loc, -alignment, /*width=*/32)); + builder.getSignedInt(loc, -(signed)alignment, /*width=*/32)); } mlir::Type useFirstFieldIfTransparentUnion(mlir::Type Ty) { From f8ecab29768afce66755dfd0983d7200e21cb250 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Wed, 27 Nov 2024 07:57:13 +0800 Subject: [PATCH 2124/2301] [CIR] Relax the requirement for ternary operation (#1168) The requirement for the size of then-else part of cir.ternary operation seems to be too conservative. Like the example shows, it is possible the regions got expanded during the transformation. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 ++-- clang/test/CIR/CodeGen/ternary.c | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/ternary.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 38ce76aabde3..74443ca1bdaf 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -805,8 +805,8 @@ def TernaryOp : CIR_Op<"ternary", ``` }]; let arguments = (ins CIR_BoolType:$cond); - let regions = (region SizedRegion<1>:$trueRegion, - SizedRegion<1>:$falseRegion); + let regions = (region AnyRegion:$trueRegion, + AnyRegion:$falseRegion); let results = (outs Optional:$result); let skipDefaultBuilders = 1; diff --git a/clang/test/CIR/CodeGen/ternary.c b/clang/test/CIR/CodeGen/ternary.c new file mode 100644 index 000000000000..16c20460f102 --- /dev/null +++ b/clang/test/CIR/CodeGen/ternary.c @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR + +#include + +double f1(int cond, int n, ...) { + va_list valist; + va_start(valist, n); + double res = cond ? va_arg(valist, double) : 0; + va_end(valist); + return res; +} + +// Fine enough to check it passes the verifying. +// CIR: cir.ternary From 98179f84577800ac6fe310e0a5d884df78e26568 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Wed, 27 Nov 2024 02:59:55 +0300 Subject: [PATCH 2125/2301] [CIR][ABI][AArch64][Lowering] Fix the callsite for nested unions (#1169) For example, the following reaches ["NYI"](https://github.com/llvm/clangir/blob/c8b626d49e7f306052b2e6d3ce60b1f689d37cb5/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp#L348) when lowering to AArch64: ``` typedef struct { union { struct { char a, b; }; char c; }; } A; void foo(A a) {} void bar() { A a; foo(a); } ``` Currently, the value of the struct becomes a bitcast operation, so we can simply extend `findAlloca` to be able to trace the source alloca properly, then use that for the [coercion](https://github.com/llvm/clangir/blob/c8b626d49e7f306052b2e6d3ce60b1f689d37cb5/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp#L341) through memory. I have also added a test for this case. --- .../TargetLowering/LowerFunction.cpp | 2 + .../AArch64/aarch64-cc-structs.c | 54 +++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index a3f9f94a86b0..0771bce3399e 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -225,6 +225,8 @@ cir::AllocaOp findAlloca(mlir::Operation *op) { return findAlloca(vals[0].getDefiningOp()); } else if (auto load = mlir::dyn_cast(op)) { return findAlloca(load.getAddr().getDefiningOp()); + } else if (auto cast = mlir::dyn_cast(op)) { + return findAlloca(cast.getSrc().getDefiningOp()); } return {}; diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index 0e2f3cccbb9f..34923dc2422e 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -276,3 +276,57 @@ typedef struct { // LLVM: %[[#V3:]] = load ptr, ptr %[[#V2]], align 8 // LLVM: ret void void pass_cat(CAT a) {} + +typedef struct { + union { + struct { + char a, b; + }; + char c; + }; +} NESTED_U; + +// CHECK: cir.func @pass_nested_u(%arg0: !u64i +// CHECK: %[[#V0:]] = cir.alloca !ty_NESTED_U, !cir.ptr, [""] {alignment = 4 : i64} +// CHECK: %[[#V1:]] = cir.cast(integral, %arg0 : !u64i), !u16i +// CHECK: %[[#V2:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr +// CHECK: cir.store %[[#V1]], %[[#V2]] : !u16i +// CHECK: cir.return + +// LLVM: @pass_nested_u(i64 %[[#V0:]] +// LLVM: %[[#V2:]] = alloca %struct.NESTED_U, i64 1, align 4 +// LLVM: %[[#V3:]] = trunc i64 %[[#V0]] to i16 +// LLVM: store i16 %[[#V3]], ptr %[[#V2]], align 2 +// LLVM: ret void +void pass_nested_u(NESTED_U a) {} + +// CHECK: cir.func no_proto @call_nested_u() +// CHECK: %[[#V0:]] = cir.alloca !ty_NESTED_U, !cir.ptr +// CHECK: %[[#V1:]] = cir.alloca !u64i, !cir.ptr, ["tmp"] {alignment = 8 : i64} +// CHECK: %[[#V2:]] = cir.load %[[#V0]] : !cir.ptr, !ty_NESTED_U +// CHECK: %[[#V3:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr) +// CHECK: %[[#V4:]] = cir.load %[[#V3]] +// CHECK: %[[#V5:]] = cir.cast(bitcast, %[[#V3]] +// CHECK: %[[#V6:]] = cir.load %[[#V5]] +// CHECK: %[[#V7:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V8:]] = cir.cast(bitcast, %[[#V1]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V9:]] = cir.const #cir.int<2> : !u64i +// CHECK: cir.libc.memcpy %[[#V9]] bytes from %[[#V7]] to %[[#V8]] : !u64i, !cir.ptr -> !cir.ptr +// CHECK: %[[#V10:]] = cir.load %[[#V1]] : !cir.ptr, !u64i +// CHECK: cir.call @pass_nested_u(%[[#V10]]) : (!u64i) -> () +// CHECK: cir.return + +// LLVM: void @call_nested_u() +// LLVM: %[[#V1:]] = alloca %struct.NESTED_U, i64 1, align 1 +// LLVM: %[[#V2:]] = alloca i64, i64 1, align 8 +// LLVM: %[[#V3:]] = load %struct.NESTED_U, ptr %[[#V1]], align 1 +// LLVM: %[[#V4:]] = load %union.anon.0, ptr %[[#V1]], align 1 +// LLVM: %[[#V5:]] = load %struct.anon.1, ptr %[[#V1]], align 1 +// LLVM: call void @llvm.memcpy.p0.p0.i64(ptr %[[#V2]], ptr %[[#V1]], i64 2, i1 false) +// LLVM: %[[#V6:]] = load i64, ptr %[[#V2]], align 8 +// LLVM: call void @pass_nested_u(i64 %[[#V6]]) +// LLVM: ret void +void call_nested_u() { + NESTED_U a; + pass_nested_u(a); +} From 1933610f3d539261624de4064df0cb4adf70ca28 Mon Sep 17 00:00:00 2001 From: Guojin Date: Tue, 26 Nov 2024 19:00:52 -0500 Subject: [PATCH 2126/2301] [CIR][NFC][Test] Add llvm ir test for lambdas (#1170) Added a few FIXMEs. There are 2 types of FIXMEs; 1. Most of them are missing func call and parameter attributes. I didn't add for all missing sites for this type as it would have been just copy pastes. 2. FIXME in lambda __invoke(): OG simply returns but CIR generates call to llvm.trap. This is just temporary and we will fix in in near future. But I feel I should still list those IRs so once we fix problem with codegen of invoke, we'd get test failure on this one and fix it. Actually, this way, this test file would be a natural test case for implementation of invoke. --- clang/test/CIR/CodeGen/lambda.cpp | 151 ++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index b1084ff81bd3..97193997e7a1 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -1,5 +1,8 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -Wno-return-stack-address -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -o - %s \ +// RUN: | opt -S -passes=instcombine,mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s void fn() { auto a = [](){}; @@ -15,6 +18,21 @@ void fn() { // CHECK-NEXT: %0 = cir.alloca !ty_anon2E0_, !cir.ptr, ["a"] // CHECK: cir.call @_ZZ2fnvENK3$_0clEv +// LLVM: {{.*}}void @"_ZZ2fnvENK3$_0clEv"(ptr [[THIS:%.*]]) +// FIXME: argument attributes should be emmitted, and lambda's alignment +// COM: LLVM: {{.*}} @"_ZZ2fnvENK3$_0clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[THIS:%.*]]){{%.*}} align 2 { +// LLVM: [[THIS_ADDR:%.*]] = alloca ptr, i64 1, align 8 +// LLVM: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// LLVM: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// LLVM: ret void + +// LLVM-LABEL: _Z2fnv +// LLVM: [[a:%.*]] = alloca %class.anon.0, i64 1, align 1 +// FIXME: parameter attributes should be emitted +// LLVM: call void @"_ZZ2fnvENK3$_0clEv"(ptr [[a]]) +// COM: LLVM: call void @"_ZZ2fnvENK3$_0clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[a]]) +// LLVM: ret void + void l0() { int i; auto a = [&](){ i = i + 1; }; @@ -37,6 +55,34 @@ void l0() { // CHECK: cir.func @_Z2l0v() +// LLVM: {{.* }}void @"_ZZ2l0vENK3$_0clEv"(ptr [[THIS:%.*]]) +// LLVM: [[THIS_ADDR:%.*]] = alloca ptr, i64 1, align 8 +// LLVM: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// LLVM: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// LLVM: [[I:%.*]] = getelementptr %class.anon.2, ptr [[THIS1]], i32 0, i32 0 +// FIXME: getelementptr argument attributes should be emitted +// COM: LLVM: [[I:%.*]] = getelementptr inbounds nuw %class.anon.0, ptr [[THIS1]], i32 0, i32 0 +// LLVM: [[TMP0:%.*]] = load ptr, ptr [[I]], align 8 +// LLVM: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// LLVM: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 +// LLVM: [[I:%.*]] = getelementptr %class.anon.2, ptr [[THIS1]], i32 0, i32 +// COM: LLVM: [[I:%.*]] = getelementptr inbounds nuw %class.anon.0, ptr [[THIS1]], i32 0, i32 0 +// LLVM: [[TMP4:%.*]] = load ptr, ptr [[I]], align 8 +// LLVM: store i32 [[ADD]], ptr [[TMP4]], align 4 +// LLVM: ret void + +// LLVM-LABEL: _Z2l0v +// LLVM: [[i:%.*]] = alloca i32, i64 1, align 4 +// LLVM: [[a:%.*]] = alloca %class.anon.2, i64 1, align 8 +// FIXME: getelementptr argument attributes should be emitted +// COM: LLVM: [[TMP0:%.*]] = getelementptr inbounds %class.anon.2, ptr [[a]], i32 0, i32 0 +// LLVM: [[TMP0:%.*]] = getelementptr %class.anon.2, ptr [[a]], i32 0, i32 0 +// LLVM: store ptr [[i]], ptr [[TMP0]], align 8 +// FIXME: parameter attributes should be emitted +// COM: LLVM: call void @"_ZZ2l0vENK3$_0clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[a]]) +// LLVM: call void @"_ZZ2l0vENK3$_0clEv"(ptr [[a]]) +// LLVM: ret void + auto g() { int i = 12; return [&] { @@ -55,6 +101,15 @@ auto g() { // CHECK: %4 = cir.load %0 : !cir.ptr, !ty_anon2E3_ // CHECK: cir.return %4 : !ty_anon2E3_ +// LLVM-LABEL: @_Z1gv() +// LLVM: [[retval:%.*]] = alloca %class.anon.3, i64 1, align 8 +// LLVM: [[i:%.*]] = alloca i32, i64 1, align 4 +// LLVM: store i32 12, ptr [[i]], align 4 +// LLVM: [[i_addr:%.*]] = getelementptr %class.anon.3, ptr [[retval]], i32 0, i32 0 +// LLVM: store ptr [[i]], ptr [[i_addr]], align 8 +// LLVM: [[tmp:%.*]] = load %class.anon.3, ptr [[retval]], align 8 +// LLVM: ret %class.anon.3 [[tmp]] + auto g2() { int i = 12; auto lam = [&] { @@ -75,6 +130,15 @@ auto g2() { // CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !ty_anon2E4_ // CHECK-NEXT: cir.return %4 : !ty_anon2E4_ +// LLVM-LABEL: @_Z2g2v() +// LLVM: [[retval:%.*]] = alloca %class.anon.4, i64 1, align 8 +// LLVM: [[i:%.*]] = alloca i32, i64 1, align 4 +// LLVM: store i32 12, ptr [[i]], align 4 +// LLVM: [[i_addr:%.*]] = getelementptr %class.anon.4, ptr [[retval]], i32 0, i32 0 +// LLVM: store ptr [[i]], ptr [[i_addr]], align 8 +// LLVM: [[tmp:%.*]] = load %class.anon.4, ptr [[retval]], align 8 +// LLVM: ret %class.anon.4 [[tmp]] + int f() { return g2()(); } @@ -92,6 +156,36 @@ int f() { // CHECK-NEXT: cir.return %1 : !s32i // CHECK-NEXT: } +// LLVM: {{.*}}i32 @"_ZZ2g2vENK3$_0clEv"(ptr [[THIS:%.*]]) +// LLVM: [[THIS_ADDR:%.*]] = alloca ptr, i64 1, align 8 +// LLVM: [[I_SAVE:%.*]] = alloca i32, i64 1, align 4 +// LLVM: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// LLVM: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// LLVM: [[I:%.*]] = getelementptr %class.anon.4, ptr [[THIS1]], i32 0, i32 0 +// LLVM: [[TMP0:%.*]] = load ptr, ptr [[I]], align 8 +// LLVM: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// LLVM: [[ADD:%.*]] = add nsw i32 [[TMP1]], 100 +// LLVM: [[I:%.*]] = getelementptr %class.anon.4, ptr [[THIS1]], i32 0, i32 0 +// LLVM: [[TMP4:%.*]] = load ptr, ptr [[I]], align 8 +// LLVM: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// LLVM: store i32 [[TMP5]], ptr [[I_SAVE]], align 4 +// LLVM: [[TMP6:%.*]] = load i32, ptr [[I_SAVE]], align 4 +// LLVM: ret i32 [[TMP6]] + +// LLVM-LABEL: _Z1fv +// LLVM: [[ref_tmp0:%.*]] = alloca %class.anon.4, i64 1, align 8 +// LLVM: [[ret_val:%.*]] = alloca i32, i64 1, align 4 +// LLVM: br label %[[scope_bb:[0-9]+]], +// LLVM: [[scope_bb]]: +// LLVM: [[tmp0:%.*]] = call %class.anon.4 @_Z2g2v() +// LLVM: store %class.anon.4 [[tmp0]], ptr [[ref_tmp0]], align 8 +// LLVM: [[tmp1:%.*]] = call i32 @"_ZZ2g2vENK3$_0clEv"(ptr [[ref_tmp0]]) +// LLVM: store i32 [[tmp1]], ptr [[ret_val]], align 4 +// LLVM: br label %[[ret_bb:[0-9]+]], +// LLVM: [[ret_bb]]: +// LLVM: [[tmp2:%.*]] = load i32, ptr [[ret_val]], align 4 +// LLVM: ret i32 [[tmp2]] + int g3() { auto* fn = +[](int const& i) -> int { return i; }; auto task = fn(3); @@ -134,3 +228,60 @@ int g3() { // CHECK: } // CHECK: } + +// lambda operator() +// FIXME: argument attributes should be emitted +// COM: LLVM: define internal noundef i32 @"_ZZ2g3vENK3$_0clERKi"(ptr noundef nonnull align 1 dereferenceable(1) {{%.*}}, ptr noundef nonnull align 4 dereferenceable(4){{%.*}}) #0 align 2 +// LLVM: {{.*}}i32 @"_ZZ2g3vENK3$_0clERKi"(ptr {{%.*}}, ptr {{%.*}}) + +// lambda __invoke() +// LLVM: {{.*}}i32 @"_ZZ2g3vEN3$_08__invokeERKi"(ptr [[i:%.*]]) +// LLVM: [[i_addr:%.*]] = alloca ptr, i64 1, align 8 +// LLVM: [[ret_val:%.*]] = alloca i32, i64 1, align 4 +// LLVM: [[unused_capture:%.*]] = alloca %class.anon.5, i64 1, align 1 +// LLVM: store ptr [[i]], ptr [[i_addr]], align 8 +// LLVM: [[TMP0:%.*]] = load ptr, ptr [[i_addr]], align 8 +// FIXME: call and argument attributes should be emitted +// COM: LLVM: [[CALL:%.*]] = call noundef i32 @"_ZZ2g3vENK3$_0clERKi"(ptr noundef nonnull align 1 dereferenceable(1) [[unused_capture]], ptr noundef nonnull align 4 dereferenceable(4) [[TMP0]]) +// LLVM: [[CALL:%.*]] = call i32 @"_ZZ2g3vENK3$_0clERKi"(ptr [[unused_capture]], ptr [[TMP0]]) +// LLVM: store i32 [[CALL]], ptr [[ret_val]], align 4 +// FIXME: should just return result +// COM: LLVM: ret i32 [[ret_val]] +// LLVM: call void @llvm.trap() +// LLVM: unreachable + +// lambda operator int (*)(int const&)() +// LLVM-LABEL: @"_ZZ2g3vENK3$_0cvPFiRKiEEv" +// LLVM: store ptr @"_ZZ2g3vEN3$_08__invokeERKi", ptr [[ret_val:%.*]], align 8 +// LLVM: [[TMP0:%.*]] = load ptr, ptr [[ret_val]], align 8 +// LLVM: ret ptr [[TMP0]] + +// LLVM-LABEL: _Z2g3v +// LLVM-DAG: [[ref_tmp0:%.*]] = alloca %class.anon.5, i64 1, align 1 +// LLVM-DAG: [[ref_tmp1:%.*]] = alloca i32, i64 1, align 4 +// LLVM-DAG: [[ret_val:%.*]] = alloca i32, i64 1, align 4 +// LLVM-DAG: [[fn_ptr:%.*]] = alloca ptr, i64 1, align 8 +// LLVM-DAG: [[task:%.*]] = alloca i32, i64 1, align 4 +// LLVM: br label %[[scope0_bb:[0-9]+]], + +// LLVM: [[scope0_bb]]: {{.*}}; preds = %0 +// LLVM: [[call:%.*]] = call ptr @"_ZZ2g3vENK3$_0cvPFiRKiEEv"(ptr [[ref_tmp0]]) +// LLVM: br label %[[scope1_before:[0-9]+]], + +// LLVM: [[scope1_before]]: {{.*}}; preds = %[[scope0_bb]] +// LLVM: [[tmp0:%.*]] = phi ptr [ [[call]], %[[scope0_bb]] ] +// LLVM: br label %[[scope1_bb:[0-9]+]], + +// LLVM: [[scope1_bb]]: {{.*}}; preds = %[[scope1_before]] +// LLVM: [[fn:%.*]] = load ptr, ptr [[fn_ptr]], align 8 +// LLVM: store i32 3, ptr [[ref_tmp1]], align 4 +// LLVM: [[call1:%.*]] = call i32 [[fn]](ptr [[ref_tmp1]]) +// LLVM: br label %[[ret_bb:[0-9]+]], + +// LLVM: [[ret_bb]]: {{.*}}; preds = %[[scope1_bb]] +// LLVM: [[tmp1:%.*]] = phi i32 [ [[call1]], %[[scope1_bb]] ] +// LLVM: store i32 [[tmp1]], ptr [[task]], align 4 +// LLVM: [[tmp2:%.*]] = load i32, ptr [[task]], align 4 +// LLVM: store i32 [[tmp2]], ptr [[ret_val]], align 4 +// LLVM: [[tmp3:%.*]] = load i32, ptr [[ret_val]], align 4 +// LLVM: ret i32 [[tmp3]] From 160e55dfab858b48a6e80c32162743a0b5529f28 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Mon, 25 Nov 2024 17:27:28 -0800 Subject: [PATCH 2127/2301] [CIR][CIRGen] Add more tracking skeleton for missing cleanups There are scenarios where we are not emitting cleanups, this commit starts to pave the way to be more complete in that area. Small addition of skeleton here plus some fixes. Both `clang/test/CIR/CodeGen/vla.c` and `clang/test/CIR/CodeGen/nrvo.cpp` now pass in face of this code path. --- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 44 ++++++++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 10 +++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 21 ++++++++--- clang/lib/CIR/CodeGen/EHScopeStack.h | 32 ++++++++--------- 4 files changed, 79 insertions(+), 28 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 4e0a305a502c..40fc101d4c23 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -42,8 +42,36 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc, // Insert a branch: to the cleanup block (unsolved) or to the already // materialized label. Keep track of unsolved goto's. - return builder.create(Loc, Dest.isValid() ? Dest.getBlock() - : ReturnBlock().getBlock()); + auto brOp = builder.create( + Loc, Dest.isValid() ? Dest.getBlock() : ReturnBlock().getBlock()); + + // Calculate the innermost active normal cleanup. + EHScopeStack::stable_iterator TopCleanup = + EHStack.getInnermostActiveNormalCleanup(); + + // If we're not in an active normal cleanup scope, or if the + // destination scope is within the innermost active normal cleanup + // scope, we don't need to worry about fixups. + if (TopCleanup == EHStack.stable_end() || + TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid + // FIXME(cir): should we clear insertion point here? + return brOp; + } + + // If we can't resolve the destination cleanup scope, just add this + // to the current cleanup scope as a branch fixup. + if (!Dest.getScopeDepth().isValid()) { + BranchFixup &Fixup = EHStack.addBranchFixup(); + Fixup.destination = Dest.getBlock(); + Fixup.destinationIndex = Dest.getDestIndex(); + Fixup.initialBranch = brOp; + Fixup.optimisticBranchBlock = nullptr; + // FIXME(cir): should we clear insertion point here? + return brOp; + } + + // FIXME(cir): otherwise, thread through all the normal cleanups in scope. + return brOp; } /// Emits all the code to cause the given temporary to be cleaned up. @@ -574,6 +602,18 @@ void CIRGenFunction::PopCleanupBlocks( void EHScopeStack::Cleanup::anchor() {} +EHScopeStack::stable_iterator +EHScopeStack::getInnermostActiveNormalCleanup() const { + for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end(); + si != se;) { + EHCleanupScope &cleanup = cast(*find(si)); + if (cleanup.isActive()) + return si; + si = cleanup.getEnclosingNormalCleanup(); + } + return stable_end(); +} + /// Push an entry of the given size onto this protected-scope stack. char *EHScopeStack::allocate(size_t Size) { Size = llvm::alignTo(Size, ScopeStackAlignment); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 1c84cb3ca71b..fa46b0bd2cef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -537,8 +537,8 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // the ret after it's been at EndLoc. if (auto *DI = getDebugInfo()) assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); - // FIXME(cir): vla.c test currently crashes here. - // PopCleanupBlocks(PrologueCleanupDepth); + builder.clearInsertionPoint(); + PopCleanupBlocks(PrologueCleanupDepth); } // Emit function epilog (to return). @@ -561,8 +561,7 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { assert(!cir::MissingFeatures::emitFunctionEpilog() && "NYI"); assert(!cir::MissingFeatures::emitEndEHSpec() && "NYI"); - // FIXME(cir): vla.c test currently crashes here. - // assert(EHStack.empty() && "did not remove all scopes from cleanup stack!"); + assert(EHStack.empty() && "did not remove all scopes from cleanup stack!"); // If someone did an indirect goto, emit the indirect goto block at the end of // the function. @@ -1203,8 +1202,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, } assert(!cir::MissingFeatures::emitStartEHSpec() && "NYI"); - // FIXME(cir): vla.c test currently crashes here. - // PrologueCleanupDepth = EHStack.stable_begin(); + PrologueCleanupDepth = EHStack.stable_begin(); // Emit OpenMP specific initialization of the device functions. if (getLangOpts().OpenMP && CurCodeDecl) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index e5db0a01e429..4172ac1e208d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -76,11 +76,24 @@ class CIRGenFunction : public CIRGenTypeCache { /// require a jump out through normal cleanups. struct JumpDest { JumpDest() = default; - JumpDest(mlir::Block *Block) : Block(Block) {} + JumpDest(mlir::Block *block, EHScopeStack::stable_iterator depth = {}, + unsigned index = 0) + : block(block) {} + + bool isValid() const { return block != nullptr; } + mlir::Block *getBlock() const { return block; } + EHScopeStack::stable_iterator getScopeDepth() const { return scopeDepth; } + unsigned getDestIndex() const { return index; } + + // This should be used cautiously. + void setScopeDepth(EHScopeStack::stable_iterator depth) { + scopeDepth = depth; + } - bool isValid() const { return Block != nullptr; } - mlir::Block *getBlock() const { return Block; } - mlir::Block *Block = nullptr; + private: + mlir::Block *block = nullptr; + EHScopeStack::stable_iterator scopeDepth; + unsigned index; }; /// Track mlir Blocks for each C/C++ label. diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h index e235c9ec3685..a35ca2140856 100644 --- a/clang/lib/CIR/CodeGen/EHScopeStack.h +++ b/clang/lib/CIR/CodeGen/EHScopeStack.h @@ -34,22 +34,22 @@ class CIRGenFunction; /// the innermost cleanup. When a (normal) cleanup is popped, any /// unresolved fixups in that scope are threaded through the cleanup. struct BranchFixup { - // /// The block containing the terminator which needs to be modified - // /// into a switch if this fixup is resolved into the current scope. - // /// If null, LatestBranch points directly to the destination. - // llvm::BasicBlock *OptimisticBranchBlock; - - // /// The ultimate destination of the branch. - // /// - // /// This can be set to null to indicate that this fixup was - // /// successfully resolved. - // llvm::BasicBlock *Destination; - - // /// The destination index value. - // unsigned DestinationIndex; - - // /// The initial branch of the fixup. - // llvm::BranchInst *InitialBranch; + /// The block containing the terminator which needs to be modified + /// into a switch if this fixup is resolved into the current scope. + /// If null, LatestBranch points directly to the destination. + mlir::Block *optimisticBranchBlock = nullptr; + + /// The ultimate destination of the branch. + /// + /// This can be set to null to indicate that this fixup was + /// successfully resolved. + mlir::Block *destination = nullptr; + + /// The destination index value. + unsigned destinationIndex = 0; + + /// The initial branch of the fixup. + cir::BrOp initialBranch = {}; }; template struct InvariantValue { From 641baf67aadf6e03addb93a491d5b7d160f71586 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 26 Nov 2024 16:42:51 -0800 Subject: [PATCH 2128/2301] [CIR][NFC] Cleanup warnings post rebase --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenTBAA.h | 8 ++++---- .../Transforms/TargetLowering/RecordLayoutBuilder.cpp | 6 ------ 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 8dff466cecd8..410c25891bbb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -101,6 +101,7 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *S, switch (S->getStmtClass()) { case Stmt::OMPScopeDirectiveClass: llvm_unreachable("NYI"); + case Stmt::OpenACCCombinedConstructClass: case Stmt::OpenACCComputeConstructClass: case Stmt::OpenACCLoopConstructClass: case Stmt::OMPErrorDirectiveClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.h b/clang/lib/CIR/CodeGen/CIRGenTBAA.h index b6a392bd164c..49ff321e342a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.h +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.h @@ -98,11 +98,11 @@ struct TBAAAccessInfo { /// types to LLVM types. class CIRGenTBAA { mlir::MLIRContext *ctx; - clang::ASTContext &context; - CIRGenTypes &types; + [[maybe_unused]] clang::ASTContext &context; + [[maybe_unused]] CIRGenTypes &types; mlir::ModuleOp moduleOp; - const clang::CodeGenOptions &codeGenOpts; - const clang::LangOptions &features; + [[maybe_unused]] const clang::CodeGenOptions &codeGenOpts; + [[maybe_unused]] const clang::LangOptions &features; public: CIRGenTBAA(mlir::MLIRContext *ctx, clang::ASTContext &context, diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp index db2af4ac9177..e47fb6fa3afc 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/RecordLayoutBuilder.cpp @@ -487,10 +487,6 @@ void ItaniumRecordLayoutBuilder::finishLayout(const StructType D) { // Finally, round the size of the record up to the alignment of the // record itself. - uint64_t unpaddedSize = getSizeInBits() - UnfilledBitsInLastUnit; - uint64_t unpackedSizeInBits = - llvm::alignTo(getSizeInBits(), Context.toBits(UnpackedAlignment)); - uint64_t roundedSize = llvm::alignTo( getSizeInBits(), Context.toBits(!Context.getTargetInfo().defaultsToAIXPowerAlignment() @@ -549,10 +545,8 @@ void ItaniumRecordLayoutBuilder::checkFieldPadding( // Warn if padding was introduced to the struct/class. if (!IsUnion && Offset > UnpaddedOffset) { unsigned padSize = Offset - UnpaddedOffset; - bool inBits = true; if (padSize % CharBitNum == 0) { padSize = padSize / CharBitNum; - inBits = false; } cir_cconv_assert(!cir::MissingFeatures::bitFieldPaddingDiagnostics()); } From 9f52eeec2c35d928e2743659fe5174494587ef7b Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Wed, 27 Nov 2024 14:11:14 +0800 Subject: [PATCH 2129/2301] [CIR][Lowering][debuginfo] Disable debug info if `-g` is not specified (#1145) Fix https://github.com/llvm/clangir/issues/793 --- clang/include/clang/CIR/LowerToLLVM.h | 3 +- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 22 ++++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 8 ++- clang/test/CIR/CodeGen/AArch64/neon.c | 32 +++++----- clang/test/CIR/CodeGen/OpenCL/convergent.cl | 6 +- clang/test/CIR/CodeGen/abstract-cond.c | 2 +- clang/test/CIR/CodeGen/annotations-var.c | 2 +- clang/test/CIR/CodeGen/atomic-xchg-field.c | 8 +-- .../CodeGen/call-via-class-member-funcptr.cpp | 10 ++-- clang/test/CIR/CodeGen/clear_cache.c | 2 +- clang/test/CIR/CodeGen/fun-ptr.c | 2 +- clang/test/CIR/CodeGen/func_dsolocal_pie.c | 10 ++-- clang/test/CIR/CodeGen/global-new.cpp | 8 +-- clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp | 36 ++++++------ .../CIR/CodeGen/initlist-ptr-unsigned.cpp | 26 ++++----- clang/test/CIR/CodeGen/lambda.cpp | 12 ++-- clang/test/CIR/CodeGen/libc.c | 2 +- clang/test/CIR/CodeGen/new-null.cpp | 4 +- clang/test/CIR/CodeGen/pass-object-size.c | 6 +- clang/test/CIR/CodeGen/sourcelocation.cpp | 2 +- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 58 +++++++++---------- clang/test/CIR/CodeGen/var-arg-float.c | 56 +++++++++--------- clang/test/CIR/CodeGen/var-arg-scope.c | 44 +++++++------- clang/test/CIR/CodeGen/var-arg.c | 56 +++++++++--------- .../CIR/CodeGen/virtual-destructor-calls.cpp | 4 +- clang/test/CIR/CodeGen/visibility-attribute.c | 6 +- clang/test/CIR/CodeGen/weak.c | 2 +- clang/test/CIR/Lowering/array-init.c | 2 +- clang/test/CIR/Lowering/debug-info.c | 19 ++++++ clang/test/CIR/Lowering/int-wrap.cir | 6 +- clang/test/CIR/Lowering/switch-while.c | 20 +++---- clang/test/CIR/Lowering/var-arg-x86_64.c | 4 +- clang/tools/cir-translate/cir-translate.cpp | 3 +- 33 files changed, 256 insertions(+), 227 deletions(-) create mode 100644 clang/test/CIR/Lowering/debug-info.c diff --git a/clang/include/clang/CIR/LowerToLLVM.h b/clang/include/clang/CIR/LowerToLLVM.h index 325cbf3afd5d..2992163196e7 100644 --- a/clang/include/clang/CIR/LowerToLLVM.h +++ b/clang/include/clang/CIR/LowerToLLVM.h @@ -31,7 +31,8 @@ namespace cir { namespace direct { std::unique_ptr lowerDirectlyFromCIRToLLVMIR( mlir::ModuleOp theModule, llvm::LLVMContext &llvmCtx, - bool disableVerifier = false, bool disableCCLowering = false); + bool disableVerifier = false, bool disableCCLowering = false, + bool disableDebugInfo = false); } // Lower directly from pristine CIR to LLVMIR. diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 7fd1e904e44d..ad11ae9be66f 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -94,10 +94,11 @@ getBackendActionFromOutputType(CIRGenAction::OutputType action) { static std::unique_ptr lowerFromCIRToLLVMIR( const clang::FrontendOptions &feOptions, mlir::ModuleOp mlirMod, std::unique_ptr mlirCtx, llvm::LLVMContext &llvmCtx, - bool disableVerifier = false, bool disableCCLowering = false) { + bool disableVerifier = false, bool disableCCLowering = false, + bool disableDebugInfo = false) { if (feOptions.ClangIRDirectLowering) return direct::lowerDirectlyFromCIRToLLVMIR( - mlirMod, llvmCtx, disableVerifier, disableCCLowering); + mlirMod, llvmCtx, disableVerifier, disableCCLowering, disableDebugInfo); else return lowerFromCIRToMLIRToLLVMIR(mlirMod, std::move(mlirCtx), llvmCtx); } @@ -287,10 +288,12 @@ class CIRGenConsumer : public clang::ASTConsumer { case CIRGenAction::OutputType::EmitObj: case CIRGenAction::OutputType::EmitAssembly: { llvm::LLVMContext llvmCtx; - auto llvmModule = - lowerFromCIRToLLVMIR(feOptions, mlirMod, std::move(mlirCtx), llvmCtx, - feOptions.ClangIRDisableCIRVerifier, - !feOptions.ClangIRCallConvLowering); + bool disableDebugInfo = + codeGenOptions.getDebugInfo() == llvm::codegenoptions::NoDebugInfo; + auto llvmModule = lowerFromCIRToLLVMIR( + feOptions, mlirMod, std::move(mlirCtx), llvmCtx, + feOptions.ClangIRDisableCIRVerifier, + !feOptions.ClangIRCallConvLowering, disableDebugInfo); BackendAction backendAction = getBackendActionFromOutputType(action); @@ -439,10 +442,12 @@ void CIRGenAction::ExecuteAction() { // FIXME(cir): This compilation path does not account for some flags. llvm::LLVMContext llvmCtx; + bool disableDebugInfo = + ci.getCodeGenOpts().getDebugInfo() == llvm::codegenoptions::NoDebugInfo; auto llvmModule = lowerFromCIRToLLVMIR( ci.getFrontendOpts(), mlirModule.release(), std::unique_ptr(mlirContext), llvmCtx, - /*disableVerifier=*/false, /*disableCCLowering=*/true); + /*disableVerifier=*/false, /*disableCCLowering=*/true, disableDebugInfo); if (outstream) llvmModule->print(*outstream, nullptr); @@ -482,7 +487,8 @@ EmitObjAction::EmitObjAction(mlir::MLIRContext *_MLIRContext) : CIRGenAction(OutputType::EmitObj, _MLIRContext) {} } // namespace cir -// Used for -fclangir-analysis-only: use CIR analysis but still use original LLVM codegen path +// Used for -fclangir-analysis-only: use CIR analysis but still use original +// LLVM codegen path void AnalysisOnlyActionBase::anchor() {} AnalysisOnlyActionBase::AnalysisOnlyActionBase(unsigned _Act, llvm::LLVMContext *_VMContext) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 1d8be78053ed..f191850fbac8 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4402,7 +4402,8 @@ extern void registerCIRDialectTranslation(mlir::MLIRContext &context); std::unique_ptr lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx, - bool disableVerifier, bool disableCCLowering) { + bool disableVerifier, bool disableCCLowering, + bool disableDebugInfo) { llvm::TimeTraceScope scope("lower from CIR to LLVM directly"); mlir::MLIRContext *mlirCtx = theModule.getContext(); @@ -4412,8 +4413,9 @@ lowerDirectlyFromCIRToLLVMIR(mlir::ModuleOp theModule, LLVMContext &llvmCtx, // This is necessary to have line tables emitted and basic // debugger working. In the future we will add proper debug information // emission directly from our frontend. - pm.addPass(mlir::LLVM::createDIScopeForLLVMFuncOpPass()); - + if (!disableDebugInfo) { + pm.addPass(mlir::LLVM::createDIScopeForLLVMFuncOpPass()); + } // FIXME(cir): this shouldn't be necessary. It's meant to be a temporary // workaround until we understand why some unrealized casts are being // emmited and how to properly avoid them. diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 2c1c0ee9bb1d..dad82af42d11 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -10156,7 +10156,7 @@ uint8x16_t test_vld1q_u8(uint8_t const *a) { // CIR: cir.load align(1) %[[CAST]] : !cir.ptr>, !cir.vector // LLVM-LABEL: @test_vld1q_u8 - // LLVM: [[TMP1:%.*]] = load <16 x i8>, ptr %0, align 1, + // LLVM: [[TMP1:%.*]] = load <16 x i8>, ptr %0, align 1 } uint16x8_t test_vld1q_u16(uint16_t const *a) { @@ -10166,7 +10166,7 @@ uint16x8_t test_vld1q_u16(uint16_t const *a) { // CIR: cir.load align(2) %[[CAST]] : !cir.ptr>, !cir.vector // LLVM-LABEL: @test_vld1q_u16 - // LLVM: [[TMP1:%.*]] = load <8 x i16>, ptr %0, align 2, + // LLVM: [[TMP1:%.*]] = load <8 x i16>, ptr %0, align 2 } uint32x4_t test_vld1q_u32(uint32_t const *a) { @@ -10176,7 +10176,7 @@ uint32x4_t test_vld1q_u32(uint32_t const *a) { // CIR: cir.load align(4) %[[CAST]] : !cir.ptr>, !cir.vector // LLVM-LABEL: @test_vld1q_u32 - // LLVM: [[TMP1:%.*]] = load <4 x i32>, ptr %0, align 4, + // LLVM: [[TMP1:%.*]] = load <4 x i32>, ptr %0, align 4 } uint64x2_t test_vld1q_u64(uint64_t const *a) { @@ -10186,7 +10186,7 @@ uint64x2_t test_vld1q_u64(uint64_t const *a) { // CIR: cir.load align(8) %[[CAST]] : !cir.ptr>, !cir.vector // LLVM-LABEL: @test_vld1q_u64 - // LLVM: [[TMP1:%.*]] = load <2 x i64>, ptr %0, align 8, + // LLVM: [[TMP1:%.*]] = load <2 x i64>, ptr %0, align 8 } int8x16_t test_vld1q_s8(int8_t const *a) { @@ -10196,7 +10196,7 @@ int8x16_t test_vld1q_s8(int8_t const *a) { // CIR: cir.load align(1) %[[CAST]] : !cir.ptr>, !cir.vector // LLVM-LABEL: @test_vld1q_s8 - // LLVM: [[TMP1:%.*]] = load <16 x i8>, ptr %0, align 1, + // LLVM: [[TMP1:%.*]] = load <16 x i8>, ptr %0, align 1 } int16x8_t test_vld1q_s16(int16_t const *a) { @@ -10206,7 +10206,7 @@ int16x8_t test_vld1q_s16(int16_t const *a) { // CIR: cir.load align(2) %[[CAST]] : !cir.ptr>, !cir.vector // LLVM-LABEL: @test_vld1q_s16 - // LLVM: [[TMP1:%.*]] = load <8 x i16>, ptr %0, align 2, + // LLVM: [[TMP1:%.*]] = load <8 x i16>, ptr %0, align 2 } int32x4_t test_vld1q_s32(int32_t const *a) { @@ -10216,7 +10216,7 @@ int32x4_t test_vld1q_s32(int32_t const *a) { // CIR: cir.load align(4) %[[CAST]] : !cir.ptr>, !cir.vector // LLVM-LABEL: @test_vld1q_s32 - // LLVM: [[TMP1:%.*]] = load <4 x i32>, ptr %0, align 4, + // LLVM: [[TMP1:%.*]] = load <4 x i32>, ptr %0, align 4 } int64x2_t test_vld1q_s64(int64_t const *a) { @@ -10226,7 +10226,7 @@ int64x2_t test_vld1q_s64(int64_t const *a) { // CIR: cir.load align(8) %[[CAST]] : !cir.ptr>, !cir.vector // LLVM-LABEL: @test_vld1q_s64 - // LLVM: [[TMP1:%.*]] = load <2 x i64>, ptr %0, align 8, + // LLVM: [[TMP1:%.*]] = load <2 x i64>, ptr %0, align 8 } // NYI-LABEL: @test_vld1q_f16( @@ -11389,7 +11389,7 @@ void test_vst1q_u8(uint8_t *a, uint8x16_t b) { // CIR: cir.store align(1) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> // LLVM-LABEL: @test_vst1q_u8 - // LLVM: store <16 x i8> %{{.*}}, ptr %0, align 1, + // LLVM: store <16 x i8> %{{.*}}, ptr %0, align 1 } void test_vst1q_u16(uint16_t *a, uint16x8_t b) { @@ -11399,7 +11399,7 @@ void test_vst1q_u16(uint16_t *a, uint16x8_t b) { // CIR: cir.store align(2) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> // LLVM-LABEL: @test_vst1q_u16 - // LLVM: store <8 x i16> %{{.*}}, ptr %0, align 2, + // LLVM: store <8 x i16> %{{.*}}, ptr %0, align 2 } void test_vst1q_u32(uint32_t *a, uint32x4_t b) { @@ -11409,7 +11409,7 @@ void test_vst1q_u32(uint32_t *a, uint32x4_t b) { // CIR: cir.store align(4) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> // LLVM-LABEL: @test_vst1q_u32 - // LLVM: store <4 x i32> %{{.*}}, ptr %0, align 4, + // LLVM: store <4 x i32> %{{.*}}, ptr %0, align 4 } void test_vst1q_u64(uint64_t *a, uint64x2_t b) { @@ -11419,7 +11419,7 @@ void test_vst1q_u64(uint64_t *a, uint64x2_t b) { // CIR: cir.store align(8) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> // LLVM-LABEL: @test_vst1q_u64 - // LLVM: store <2 x i64> %{{.*}}, ptr %0, align 8, + // LLVM: store <2 x i64> %{{.*}}, ptr %0, align 8 } void test_vst1q_s8(int8_t *a, int8x16_t b) { @@ -11429,7 +11429,7 @@ void test_vst1q_s8(int8_t *a, int8x16_t b) { // CIR: cir.store align(1) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> // LLVM-LABEL: @test_vst1q_s8 - // LLVM: store <16 x i8> %{{.*}}, ptr %0, align 1, + // LLVM: store <16 x i8> %{{.*}}, ptr %0, align 1 } void test_vst1q_s16(int16_t *a, int16x8_t b) { @@ -11439,7 +11439,7 @@ void test_vst1q_s16(int16_t *a, int16x8_t b) { // CIR: cir.store align(2) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> // LLVM-LABEL: @test_vst1q_s16 - // LLVM: store <8 x i16> %{{.*}}, ptr %0, align 2, + // LLVM: store <8 x i16> %{{.*}}, ptr %0, align 2 } void test_vst1q_s32(int32_t *a, int32x4_t b) { @@ -11449,7 +11449,7 @@ void test_vst1q_s32(int32_t *a, int32x4_t b) { // CIR: cir.store align(4) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> // LLVM-LABEL: @test_vst1q_s32 - // LLVM: store <4 x i32> %{{.*}}, ptr %0, align 4, + // LLVM: store <4 x i32> %{{.*}}, ptr %0, align 4 } void test_vst1q_s64(int64_t *a, int64x2_t b) { @@ -11459,7 +11459,7 @@ void test_vst1q_s64(int64_t *a, int64x2_t b) { // CIR: cir.store align(8) %{{.*}}, %[[CAST]] : !cir.vector, !cir.ptr> // LLVM-LABEL: @test_vst1q_s64 - // LLVM: store <2 x i64> %{{.*}}, ptr %0, align 8, + // LLVM: store <2 x i64> %{{.*}}, ptr %0, align 8 } // NYI-LABEL: @test_vst1q_f16( diff --git a/clang/test/CIR/CodeGen/OpenCL/convergent.cl b/clang/test/CIR/CodeGen/OpenCL/convergent.cl index ba8a57f98d04..5726483fde8a 100644 --- a/clang/test/CIR/CodeGen/OpenCL/convergent.cl +++ b/clang/test/CIR/CodeGen/OpenCL/convergent.cl @@ -21,10 +21,10 @@ void non_convfun(void) { // External functions should be assumed convergent. void f(void); // CIR: cir.func{{.+}} @f(){{.*}} extra(#fn_attr[[CONV_DECL_ATTR]]) -// LLVM: declare {{.+}} spir_func void @f() local_unnamed_addr #[[CONV_ATTR:[0-9]+]] +// LLVM: declare spir_func void @f() local_unnamed_addr #[[CONV_ATTR:[0-9]+]] void g(void); // CIR: cir.func{{.+}} @g(){{.*}} extra(#fn_attr[[CONV_DECL_ATTR]]) -// LLVM: declare {{.+}} spir_func void @g() local_unnamed_addr #[[CONV_ATTR]] +// LLVM: declare spir_func void @g() local_unnamed_addr #[[CONV_ATTR]] // Test two if's are merged and non_convfun duplicated. void test_merge_if(int a) { @@ -68,7 +68,7 @@ void test_merge_if(int a) { void convfun(void) __attribute__((convergent)); // CIR: cir.func{{.+}} @convfun(){{.*}} extra(#fn_attr[[CONV_DECL_ATTR]]) -// LLVM: declare {{.+}} spir_func void @convfun() local_unnamed_addr #[[CONV_ATTR]] +// LLVM: declare spir_func void @convfun() local_unnamed_addr #[[CONV_ATTR]] // Test two if's are not merged. void test_no_merge_if(int a) { diff --git a/clang/test/CIR/CodeGen/abstract-cond.c b/clang/test/CIR/CodeGen/abstract-cond.c index 9ff125235105..1863e134982e 100644 --- a/clang/test/CIR/CodeGen/abstract-cond.c +++ b/clang/test/CIR/CodeGen/abstract-cond.c @@ -26,7 +26,7 @@ int f6(int a0, struct s6 a1, struct s6 a2) { // LLVM-LABEL: @f6 // LLVM: %[[LOAD_A0:.*]] = load i32, ptr {{.*}} // LLVM: %[[COND:.*]] = icmp ne i32 %[[LOAD_A0]], 0 -// LLVM: br i1 %[[COND]], label %[[A1_PATH:.*]], label %[[A2_PATH:.*]], +// LLVM: br i1 %[[COND]], label %[[A1_PATH:.*]], label %[[A2_PATH:.*]] // LLVM: [[A1_PATH]]: // LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[TMP:.*]], ptr {{.*}}, i32 4, i1 false) // LLVM: br label %[[EXIT:[a-z0-9]+]] diff --git a/clang/test/CIR/CodeGen/annotations-var.c b/clang/test/CIR/CodeGen/annotations-var.c index 1a3787acc105..7d7fb31be260 100644 --- a/clang/test/CIR/CodeGen/annotations-var.c +++ b/clang/test/CIR/CodeGen/annotations-var.c @@ -37,5 +37,5 @@ void local(void) { // LLVM: call void @llvm.var.annotation.p0.p0(ptr %[[ALLOC2]], ptr @.str.annotation, ptr @.str.1.annotation, i32 24, ptr null) // LLVM: %[[ALLOC3:.*]] = alloca i32 // LLVM: call void @llvm.var.annotation.p0.p0(ptr %[[ALLOC3]], ptr @.str.3.annotation, -// LLVM-SAME: ptr @.str.1.annotation, i32 25, ptr @.args.annotation), +// LLVM-SAME: ptr @.str.1.annotation, i32 25, ptr @.args.annotation) } diff --git a/clang/test/CIR/CodeGen/atomic-xchg-field.c b/clang/test/CIR/CodeGen/atomic-xchg-field.c index cecd745725f1..c01abf7bae6e 100644 --- a/clang/test/CIR/CodeGen/atomic-xchg-field.c +++ b/clang/test/CIR/CodeGen/atomic-xchg-field.c @@ -58,11 +58,11 @@ void structAtomicExchange(unsigned referenceCount, wPtr item) { // LLVM: %[[RES:.*]] = cmpxchg weak ptr %9, i32 %[[EXP]], i32 %[[DES]] seq_cst seq_cst // LLVM: %[[OLD:.*]] = extractvalue { i32, i1 } %[[RES]], 0 // LLVM: %[[CMP:.*]] = extractvalue { i32, i1 } %[[RES]], 1 -// LLVM: %[[Z:.*]] = zext i1 %[[CMP]] to i8, !dbg !16 -// LLVM: %[[X:.*]] = xor i8 %[[Z]], 1, !dbg !16 -// LLVM: %[[FAIL:.*]] = trunc i8 %[[X]] to i1, !dbg !16 +// LLVM: %[[Z:.*]] = zext i1 %[[CMP]] to i8 +// LLVM: %[[X:.*]] = xor i8 %[[Z]], 1 +// LLVM: %[[FAIL:.*]] = trunc i8 %[[X]] to i1 -// LLVM: br i1 %[[FAIL:.*]], label %[[STORE_OLD:.*]], label %[[CONTINUE:.*]], +// LLVM: br i1 %[[FAIL:.*]], label %[[STORE_OLD:.*]], label %[[CONTINUE:.*]] // LLVM: [[STORE_OLD]]: // LLVM: store i32 %[[OLD]], ptr // LLVM: br label %[[CONTINUE]] diff --git a/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp index 09f74a108e1b..56ba8a4c80ec 100644 --- a/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp +++ b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp @@ -38,17 +38,17 @@ void fn1() { f f1; } // LLVM: %class.f = type { %class.a } // LLVM: %class.a = type { i8 } // LLVM: @h = global i32 0 -// LLVM: declare {{.*}} ptr @_ZN1a1bEi(i32) +// LLVM: declare ptr @_ZN1a1bEi(i32) // LLVM: define dso_local ptr @_ZN1f1bEv(ptr [[ARG0:%.*]]) // LLVM: [[ARG0_SAVE:%.*]] = alloca ptr, i64 1, align 8 // LLVM: [[RET_SAVE:%.*]] = alloca ptr, i64 1, align 8 -// LLVM: store ptr [[ARG0]], ptr [[ARG0_SAVE]], align 8, +// LLVM: store ptr [[ARG0]], ptr [[ARG0_SAVE]], align 8 // LLVM: [[ARG0_LOAD:%.*]] = load ptr, ptr [[ARG0_SAVE]], align 8 -// LLVM: [[FUNC_PTR:%.*]] = getelementptr %class.f, ptr [[ARG0_LOAD]], i32 0, i32 0, +// LLVM: [[FUNC_PTR:%.*]] = getelementptr %class.f, ptr [[ARG0_LOAD]], i32 0, i32 0 // LLVM: [[VAR_H:%.*]] = load i32, ptr @h, align 4 -// LLVM: [[RET_VAL:%.*]] = call ptr @_ZN1a1bEi(i32 [[VAR_H]]), -// LLVM: store ptr [[RET_VAL]], ptr [[RET_SAVE]], align 8, +// LLVM: [[RET_VAL:%.*]] = call ptr @_ZN1a1bEi(i32 [[VAR_H]]) +// LLVM: store ptr [[RET_VAL]], ptr [[RET_SAVE]], align 8 // LLVM: [[RET_VAL2:%.*]] = load ptr, ptr [[RET_SAVE]], align 8 // LLVM: ret ptr [[RET_VAL2]] diff --git a/clang/test/CIR/CodeGen/clear_cache.c b/clang/test/CIR/CodeGen/clear_cache.c index 7b649e068a19..789d4708a814 100644 --- a/clang/test/CIR/CodeGen/clear_cache.c +++ b/clang/test/CIR/CodeGen/clear_cache.c @@ -20,7 +20,7 @@ char buffer[32] = "This is a largely unused buffer"; // CIR: cir.clear_cache %[[VAL_3]] : !cir.ptr, %[[VAL_8]], // LLVM-LABEL: main -// LLVM: call void @llvm.clear_cache(ptr @buffer, ptr getelementptr (i8, ptr @buffer, i64 32)), +// LLVM: call void @llvm.clear_cache(ptr @buffer, ptr getelementptr (i8, ptr @buffer, i64 32)) int main(void) { __builtin___clear_cache(buffer, buffer+32); diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c index 18b78ed0cb7c..7092c58e64d0 100644 --- a/clang/test/CIR/CodeGen/fun-ptr.c +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -61,7 +61,7 @@ int foo(Data* d) { // CIR: cir.call [[RET]]() : (!cir.ptr>) -> () // CIR: cir.return -// LLVM: declare {{.*}} ptr {{@.*test.*}}() +// LLVM: declare ptr {{@.*test.*}}() // LLVM: define dso_local void {{@.*bar.*}}() // LLVM: [[RET:%.*]] = call ptr {{@.*test.*}}() // LLVM: call void [[RET]]() diff --git a/clang/test/CIR/CodeGen/func_dsolocal_pie.c b/clang/test/CIR/CodeGen/func_dsolocal_pie.c index 94f0dda5392f..fda5e5875fe5 100644 --- a/clang/test/CIR/CodeGen/func_dsolocal_pie.c +++ b/clang/test/CIR/CodeGen/func_dsolocal_pie.c @@ -22,13 +22,13 @@ int main() { // CIR: cir.call @foo([[TMP1]]) : (!s32i) -> () // LLVM: define dso_local void @foo(i32 [[TMP3:%.*]]) -// LLVM: [[ARG_STACK:%.*]] = alloca i32, i64 1, align 4, +// LLVM: [[ARG_STACK:%.*]] = alloca i32, i64 1, align 4 // LLVM: store i32 [[TMP3]], ptr [[ARG_STACK]], align 4 -// LLVM: ret void, +// LLVM: ret void // LLVM: define dso_local i32 @main() -// LLVM: [[TMP4:%.*]] = alloca i32, i64 1, align 4, -// LLVM: call void @foo(i32 2), +// LLVM: [[TMP4:%.*]] = alloca i32, i64 1, align 4 +// LLVM: call void @foo(i32 2) // LLVM: store i32 0, ptr [[TMP4]], align 4 // LLVM: [[RET_VAL:%.*]] = load i32, ptr [[TMP4]], align 4 -// LLVM: ret i32 [[RET_VAL]], +// LLVM: ret i32 [[RET_VAL]] diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index 8ab125c0c5de..eb9ab0aee1ac 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -45,20 +45,20 @@ e *g = new e(0); // LLVM_EH: define internal void @__cxx_global_var_init() personality ptr @__gxx_personality_v0 // LLVM_EH: call ptr @_Znwm(i64 1) -// LLVM_EH: br label %[[L2:.*]], +// LLVM_EH: br label %[[L2:.*]] // LLVM_EH: [[L2]]: // LLVM_EH: invoke void @_ZN1eC1Ei -// LLVM_EH: to label %[[CONT:.*]] unwind label %[[PAD:.*]], +// LLVM_EH: to label %[[CONT:.*]] unwind label %[[PAD:.*]] // LLVM_EH: [[CONT]]: -// LLVM_EH: br label %[[END:.*]], +// LLVM_EH: br label %[[END:.*]] // LLVM_EH: [[PAD]]: // LLVM_EH: landingpad { ptr, i32 } // LLVM_EH: cleanup // LLVM_EH: call void @_ZdlPvm -// LLVM_EH: br label %[[RESUME:.*]], +// LLVM_EH: br label %[[RESUME:.*]] // LLVM_EH: [[RESUME]]: // LLVM_EH: resume { ptr, i32 } diff --git a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp index b807e1bae756..8a0e2ac7aaab 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp @@ -67,27 +67,27 @@ void test() { // LLVM: @.str.1 = private constant [3 x i8] c"uv\00" // LLVM: define linkonce_odr void @_ZSt1fIPKcEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG0:%.*]]) -// LLVM: [[LOCAL_PTR:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, -// LLVM: store %"class.std::initializer_list" [[ARG0]], ptr [[LOCAL_PTR]], align 8, -// LLVM: ret void, +// LLVM: [[LOCAL_PTR:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8 +// LLVM: store %"class.std::initializer_list" [[ARG0]], ptr [[LOCAL_PTR]], align 8 +// LLVM: ret void // LLVM: } // LLVM: define dso_local void @_ZSt4testv() -// LLVM: [[INIT_STRUCT:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, -// LLVM: [[ELEM_ARRAY_PTR:%.*]] = alloca [2 x ptr], i64 1, align 8, -// LLVM: br label %[[SCOPE_START:.*]], +// LLVM: [[INIT_STRUCT:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8 +// LLVM: [[ELEM_ARRAY_PTR:%.*]] = alloca [2 x ptr], i64 1, align 8 +// LLVM: br label %[[SCOPE_START:.*]] // LLVM: [[SCOPE_START]]: ; preds = %0 -// LLVM: [[PTR_FIRST_ELEM:%.*]] = getelementptr ptr, ptr [[ELEM_ARRAY_PTR]], i32 0, -// LLVM: store ptr @.str, ptr [[PTR_FIRST_ELEM]], align 8, -// LLVM: [[PTR_SECOND_ELEM:%.*]] = getelementptr ptr, ptr [[PTR_FIRST_ELEM]], i64 1, -// LLVM: store ptr @.str.1, ptr [[PTR_SECOND_ELEM]], align 8, -// LLVM: [[INIT_START_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0, -// LLVM: store ptr [[PTR_FIRST_ELEM]], ptr [[INIT_START_FLD_PTR]], align 8, -// LLVM: [[ELEM_ARRAY_END:%.*]] = getelementptr ptr, ptr [[PTR_FIRST_ELEM]], i64 2, -// LLVM: [[INIT_END_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 1, -// LLVM: store ptr [[ELEM_ARRAY_END]], ptr [[INIT_END_FLD_PTR]], align 8, -// LLVM: [[ARG2PASS:%.*]] = load %"class.std::initializer_list", ptr [[INIT_STRUCT]], align 8, -// LLVM: call void @_ZSt1fIPKcEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG2PASS]]), -// LLVM: br label %[[SCOPE_END:.*]], +// LLVM: [[PTR_FIRST_ELEM:%.*]] = getelementptr ptr, ptr [[ELEM_ARRAY_PTR]], i32 0 +// LLVM: store ptr @.str, ptr [[PTR_FIRST_ELEM]], align 8 +// LLVM: [[PTR_SECOND_ELEM:%.*]] = getelementptr ptr, ptr [[PTR_FIRST_ELEM]], i64 1 +// LLVM: store ptr @.str.1, ptr [[PTR_SECOND_ELEM]], align 8 +// LLVM: [[INIT_START_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0 +// LLVM: store ptr [[PTR_FIRST_ELEM]], ptr [[INIT_START_FLD_PTR]], align 8 +// LLVM: [[ELEM_ARRAY_END:%.*]] = getelementptr ptr, ptr [[PTR_FIRST_ELEM]], i64 2 +// LLVM: [[INIT_END_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 1 +// LLVM: store ptr [[ELEM_ARRAY_END]], ptr [[INIT_END_FLD_PTR]], align 8 +// LLVM: [[ARG2PASS:%.*]] = load %"class.std::initializer_list", ptr [[INIT_STRUCT]], align 8 +// LLVM: call void @_ZSt1fIPKcEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG2PASS]]) +// LLVM: br label %[[SCOPE_END:.*]] // LLVM: [[SCOPE_END]]: ; preds = %[[SCOPE_START]] // LLVM: ret void diff --git a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp index a2e9d9c43ee7..08ed514ca707 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp @@ -51,22 +51,22 @@ void test() { // LLVM: %"class.std::initializer_list" = type { ptr, i64 } // LLVM: define linkonce_odr void @_ZSt1fIiEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG:%.*]]) -// LLVM: [[LOCAL:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, -// LLVM: store %"class.std::initializer_list" [[ARG]], ptr [[LOCAL]], align 8, +// LLVM: [[LOCAL:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8 +// LLVM: store %"class.std::initializer_list" [[ARG]], ptr [[LOCAL]], align 8 // LLVM: define dso_local void @_ZSt4testv() -// LLVM: [[INIT_STRUCT:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8, -// LLVM: [[ELEM_ARRAY:%.*]] = alloca [1 x i32], i64 1, align 4, -// LLVM: br label %[[SCOPE_START:.*]], +// LLVM: [[INIT_STRUCT:%.*]] = alloca %"class.std::initializer_list", i64 1, align 8 +// LLVM: [[ELEM_ARRAY:%.*]] = alloca [1 x i32], i64 1, align 4 +// LLVM: br label %[[SCOPE_START:.*]] // LLVM: [[SCOPE_START]]: ; preds = %0 -// LLVM: [[PTR_FIRST_ELEM:%.*]] = getelementptr i32, ptr [[ELEM_ARRAY]], i32 0, -// LLVM: store i32 7, ptr [[PTR_FIRST_ELEM]], align 4, -// LLVM: [[ELEM_ARRAY_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0, -// LLVM: store ptr [[PTR_FIRST_ELEM]], ptr [[ELEM_ARRAY_PTR]], align 8, -// LLVM: [[INIT_LEN_FLD:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 1, -// LLVM: store i64 1, ptr [[INIT_LEN_FLD]], align 8, -// LLVM: [[ARG2PASS:%.*]] = load %"class.std::initializer_list", ptr [[INIT_STRUCT]], align 8, +// LLVM: [[PTR_FIRST_ELEM:%.*]] = getelementptr i32, ptr [[ELEM_ARRAY]], i32 0 +// LLVM: store i32 7, ptr [[PTR_FIRST_ELEM]], align 4 +// LLVM: [[ELEM_ARRAY_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0 +// LLVM: store ptr [[PTR_FIRST_ELEM]], ptr [[ELEM_ARRAY_PTR]], align 8 +// LLVM: [[INIT_LEN_FLD:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 1 +// LLVM: store i64 1, ptr [[INIT_LEN_FLD]], align 8 +// LLVM: [[ARG2PASS:%.*]] = load %"class.std::initializer_list", ptr [[INIT_STRUCT]], align 8 // LLVM: call void @_ZSt1fIiEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG2PASS]]) -// LLVM: br label %[[SCOPE_END:.*]], +// LLVM: br label %[[SCOPE_END:.*]] // LLVM: [[SCOPE_END]]: ; preds = %[[SCOPE_START]] // LLVM: ret void diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 97193997e7a1..b45634c0def8 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -175,13 +175,13 @@ int f() { // LLVM-LABEL: _Z1fv // LLVM: [[ref_tmp0:%.*]] = alloca %class.anon.4, i64 1, align 8 // LLVM: [[ret_val:%.*]] = alloca i32, i64 1, align 4 -// LLVM: br label %[[scope_bb:[0-9]+]], +// LLVM: br label %[[scope_bb:[0-9]+]] // LLVM: [[scope_bb]]: // LLVM: [[tmp0:%.*]] = call %class.anon.4 @_Z2g2v() // LLVM: store %class.anon.4 [[tmp0]], ptr [[ref_tmp0]], align 8 // LLVM: [[tmp1:%.*]] = call i32 @"_ZZ2g2vENK3$_0clEv"(ptr [[ref_tmp0]]) // LLVM: store i32 [[tmp1]], ptr [[ret_val]], align 4 -// LLVM: br label %[[ret_bb:[0-9]+]], +// LLVM: br label %[[ret_bb:[0-9]+]] // LLVM: [[ret_bb]]: // LLVM: [[tmp2:%.*]] = load i32, ptr [[ret_val]], align 4 // LLVM: ret i32 [[tmp2]] @@ -262,21 +262,21 @@ int g3() { // LLVM-DAG: [[ret_val:%.*]] = alloca i32, i64 1, align 4 // LLVM-DAG: [[fn_ptr:%.*]] = alloca ptr, i64 1, align 8 // LLVM-DAG: [[task:%.*]] = alloca i32, i64 1, align 4 -// LLVM: br label %[[scope0_bb:[0-9]+]], +// LLVM: br label %[[scope0_bb:[0-9]+]] // LLVM: [[scope0_bb]]: {{.*}}; preds = %0 // LLVM: [[call:%.*]] = call ptr @"_ZZ2g3vENK3$_0cvPFiRKiEEv"(ptr [[ref_tmp0]]) -// LLVM: br label %[[scope1_before:[0-9]+]], +// LLVM: br label %[[scope1_before:[0-9]+]] // LLVM: [[scope1_before]]: {{.*}}; preds = %[[scope0_bb]] // LLVM: [[tmp0:%.*]] = phi ptr [ [[call]], %[[scope0_bb]] ] -// LLVM: br label %[[scope1_bb:[0-9]+]], +// LLVM: br label %[[scope1_bb:[0-9]+]] // LLVM: [[scope1_bb]]: {{.*}}; preds = %[[scope1_before]] // LLVM: [[fn:%.*]] = load ptr, ptr [[fn_ptr]], align 8 // LLVM: store i32 3, ptr [[ref_tmp1]], align 4 // LLVM: [[call1:%.*]] = call i32 [[fn]](ptr [[ref_tmp1]]) -// LLVM: br label %[[ret_bb:[0-9]+]], +// LLVM: br label %[[ret_bb:[0-9]+]] // LLVM: [[ret_bb]]: {{.*}}; preds = %[[scope1_bb]] // LLVM: [[tmp1:%.*]] = phi i32 [ [[call1]], %[[scope1_bb]] ] diff --git a/clang/test/CIR/CodeGen/libc.c b/clang/test/CIR/CodeGen/libc.c index c11d3bc764e5..fa0332261b72 100644 --- a/clang/test/CIR/CodeGen/libc.c +++ b/clang/test/CIR/CodeGen/libc.c @@ -20,7 +20,7 @@ void *memmove(void *, const void *, unsigned long); void testMemmove(void *src, const void *dst, unsigned long size) { memmove(dst, src, size); // CHECK: cir.libc.memmove %{{.+}} bytes from %{{.+}} to %{{.+}} : !cir.ptr, !u64i - // LLVM: call void @llvm.memmove.{{.+}}.i64(ptr %{{.+}}, ptr %{{.+}}, i64 %{{.+}}, i1 false), + // LLVM: call void @llvm.memmove.{{.+}}.i64(ptr %{{.+}}, ptr %{{.+}}, i64 %{{.+}}, i1 false) } // Should generate CIR's builtin memset op. diff --git a/clang/test/CIR/CodeGen/new-null.cpp b/clang/test/CIR/CodeGen/new-null.cpp index 773e2c63f85d..1957d54873a0 100644 --- a/clang/test/CIR/CodeGen/new-null.cpp +++ b/clang/test/CIR/CodeGen/new-null.cpp @@ -68,10 +68,10 @@ namespace test15 { // LLVM: %[[VAL_2:.*]] = load ptr, ptr %[[VAL_0]], align 8 // LLVM: %[[VAL_3:.*]] = call ptr @_ZnwmPvb(i64 1, ptr %[[VAL_2]], i8 1) // LLVM: %[[VAL_4:.*]] = icmp ne ptr %[[VAL_3]], null - // LLVM: br i1 %[[VAL_4]], label %[[VAL_5:.*]], label %[[VAL_6:.*]], + // LLVM: br i1 %[[VAL_4]], label %[[VAL_5:.*]], label %[[VAL_6:.*]] // LLVM: [[VAL_5]]: ; preds = %[[VAL_7:.*]] // LLVM: call void @_ZN6test151AC1Ev(ptr %[[VAL_3]]) - // LLVM: br label %[[VAL_6]], + // LLVM: br label %[[VAL_6]] // LLVM: [[VAL_6]]: ; preds = %[[VAL_5]], %[[VAL_7]] // LLVM: ret void diff --git a/clang/test/CIR/CodeGen/pass-object-size.c b/clang/test/CIR/CodeGen/pass-object-size.c index 5bd20f8934de..798912c32b00 100644 --- a/clang/test/CIR/CodeGen/pass-object-size.c +++ b/clang/test/CIR/CodeGen/pass-object-size.c @@ -22,8 +22,8 @@ void c() { // CIR-NEXT: cir.call @e([[TMP3]], [[TMP4]]) : (!cir.ptr, !u64i) -> () // LLVM: define dso_local void @c() -// LLVM: [[TMP0:%.*]] = alloca i32, i64 %{{[0-9]+}}, -// LLVM: [[TMP1:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 false, i1 true, i1 false), +// LLVM: [[TMP0:%.*]] = alloca i32, i64 %{{[0-9]+}} +// LLVM: [[TMP1:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 false, i1 true, i1 false) // LLVM-NEXT: call void @b(ptr [[TMP0]], i64 [[TMP1]]) -// LLVM: [[TMP2:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 true, i1 true, i1 false), +// LLVM: [[TMP2:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[TMP0]], i1 true, i1 true, i1 false) // LLVM-NEXT: call void @e(ptr [[TMP0]], i64 [[TMP2]]) diff --git a/clang/test/CIR/CodeGen/sourcelocation.cpp b/clang/test/CIR/CodeGen/sourcelocation.cpp index 97ee16aa3cde..9ea7ad614315 100644 --- a/clang/test/CIR/CodeGen/sourcelocation.cpp +++ b/clang/test/CIR/CodeGen/sourcelocation.cpp @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -debug-info-kind=constructor -dwarf-version=4 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM int s0(int a, int b) { diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index 7b6c80535e30..40c35434e17c 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -41,11 +41,11 @@ void yo() { // LLVM-LABEL: @_Z2yov() // LLVM: %[[Vec:.*]] = alloca %struct.Vec -// LLVM: br label %[[INVOKE_BB:.*]], +// LLVM: br label %[[INVOKE_BB:.*]] // LLVM: [[INVOKE_BB]]: // LLVM: invoke void @_ZN3VecC1Ev(ptr %[[Vec]]) -// LLVM: to label %[[DTOR_BB:.*]] unwind label %[[LPAD_BB:.*]], +// LLVM: to label %[[DTOR_BB:.*]] unwind label %[[LPAD_BB:.*]] // LLVM: [[DTOR_BB]]: // LLVM: call void @_ZN3VecD1Ev(ptr %[[Vec]]) @@ -54,12 +54,12 @@ void yo() { // LLVM: [[LPAD_BB]]: // LLVM: landingpad { ptr, i32 } // LLVM: catch ptr null -// LLVM: br label %[[CATCH_BB:.*]], +// LLVM: br label %[[CATCH_BB:.*]] // LLVM: [[CATCH_BB]]: // LLVM: call ptr @__cxa_begin_catch // LLVM: call void @__cxa_end_catch() -// LLVM: br label %[[RET_BB:.*]], +// LLVM: br label %[[RET_BB:.*]] // LLVM: [[RET_BB]]: // LLVM: ret void @@ -216,50 +216,50 @@ void yo3(bool x) { // LLVM: %[[V2:.*]] = alloca %struct.Vec // LLVM: %[[V3:.*]] = alloca %struct.Vec // LLVM: %[[V4:.*]] = alloca %struct.Vec -// LLVM: br label %[[CALL0:.*]], +// LLVM: br label %[[CALL0:.*]] // LLVM: [[CALL0]]: // LLVM: invoke void @_ZN3VecC1Ev(ptr %[[V1]]) -// LLVM: to label %[[CALL1:.*]] unwind label %[[LPAD0:.*]], +// LLVM: to label %[[CALL1:.*]] unwind label %[[LPAD0:.*]] // LLVM: [[CALL1]]: // LLVM: invoke void @_ZN3VecC1Ev(ptr %[[V2]]) -// LLVM: to label %[[CALL2:.*]] unwind label %[[LPAD1:.*]], +// LLVM: to label %[[CALL2:.*]] unwind label %[[LPAD1:.*]] // LLVM: [[CALL2]]: // LLVM: invoke void @_ZN3VecC1Ev(ptr %[[V3]]) -// LLVM: to label %[[CALL3:.*]] unwind label %[[LPAD2:.*]], +// LLVM: to label %[[CALL3:.*]] unwind label %[[LPAD2:.*]] // LLVM: [[CALL3]]: // LLVM: invoke void @_ZN3VecC1Ev(ptr %[[V4]]) -// LLVM: to label %[[REGULAR_CLEANUP:.*]] unwind label %[[LPAD3:.*]], +// LLVM: to label %[[REGULAR_CLEANUP:.*]] unwind label %[[LPAD3:.*]] // LLVM: [[REGULAR_CLEANUP]]: -// LLVM: call void @_ZN3VecD1Ev(ptr %[[V4]]), -// LLVM: call void @_ZN3VecD1Ev(ptr %[[V3]]), -// LLVM: call void @_ZN3VecD1Ev(ptr %[[V2]]), -// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]), -// LLVM: br label %[[RET:.*]], +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V4]]) +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V3]]) +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V2]]) +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]) +// LLVM: br label %[[RET:.*]] // LLVM: [[LPAD0]]: // LLVM: landingpad { ptr, i32 } -// LLVM: catch ptr null, -// LLVM: br label %[[CATCH:.*]], +// LLVM: catch ptr null +// LLVM: br label %[[CATCH:.*]] // LLVM: [[LPAD1]]: // LLVM: landingpad { ptr, i32 } -// LLVM: catch ptr null, -// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]), -// LLVM: br label %[[CATCH]], +// LLVM: catch ptr null +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]) +// LLVM: br label %[[CATCH]] // LLVM: [[LPAD2]]: // LLVM: landingpad { ptr, i32 } -// LLVM: catch ptr null, -// LLVM: call void @_ZN3VecD1Ev(ptr %[[V2]]), -// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]), -// LLVM: br label %[[CATCH]], +// LLVM: catch ptr null +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V2]]) +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]) +// LLVM: br label %[[CATCH]] // LLVM: [[LPAD3]]: // LLVM: landingpad { ptr, i32 } -// LLVM: catch ptr null, -// LLVM: call void @_ZN3VecD1Ev(ptr %[[V3]]), -// LLVM: call void @_ZN3VecD1Ev(ptr %[[V2]]), -// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]), -// LLVM: br label %[[CATCH]], +// LLVM: catch ptr null +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V3]]) +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V2]]) +// LLVM: call void @_ZN3VecD1Ev(ptr %[[V1]]) +// LLVM: br label %[[CATCH]] // LLVM: [[CATCH]]: // LLVM: call ptr @__cxa_begin_catch -// LLVM: br label %[[RET]], +// LLVM: br label %[[RET]] // LLVM: [[RET]]: // LLVM: ret void diff --git a/clang/test/CIR/CodeGen/var-arg-float.c b/clang/test/CIR/CodeGen/var-arg-float.c index 2b3f5099dd1b..5b8539cfa380 100644 --- a/clang/test/CIR/CodeGen/var-arg-float.c +++ b/clang/test/CIR/CodeGen/var-arg-float.c @@ -78,41 +78,41 @@ double f1(int n, ...) { // beginning block llvm code // LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } // LLVM: define dso_local double @f1(i32 %0, ...) -// LLVM: [[ARGN:%.*]] = alloca i32, i64 1, align 4, -// LLVM: [[RETP:%.*]] = alloca double, i64 1, align 8, -// LLVM: [[RESP:%.*]] = alloca double, i64 1, align 8, -// LLVM: call void @llvm.va_start.p0(ptr [[VARLIST:%.*]]), +// LLVM: [[ARGN:%.*]] = alloca i32, i64 1, align 4 +// LLVM: [[RETP:%.*]] = alloca double, i64 1, align 8 +// LLVM: [[RESP:%.*]] = alloca double, i64 1, align 8 +// LLVM: call void @llvm.va_start.p0(ptr [[VARLIST:%.*]]) // LLVM: [[VR_OFFS_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 4 -// LLVM: [[VR_OFFS:%.*]] = load i32, ptr [[VR_OFFS_P]], align 4, -// LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[VR_OFFS]], 0, -// LLVM-NEXT: br i1 [[CMP0]], label %[[BB_ON_STACK:.*]], label %[[BB_MAY_REG:.*]], +// LLVM: [[VR_OFFS:%.*]] = load i32, ptr [[VR_OFFS_P]], align 4 +// LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[VR_OFFS]], 0 +// LLVM-NEXT: br i1 [[CMP0]], label %[[BB_ON_STACK:.*]], label %[[BB_MAY_REG:.*]] // LLVM: [[BB_MAY_REG]]: ; -// LLVM-NEXT: [[NEW_REG_OFFS:%.*]] = add i32 [[VR_OFFS]], 16, -// LLVM-NEXT: store i32 [[NEW_REG_OFFS]], ptr [[VR_OFFS_P]], align 4, -// LLVM-NEXT: [[CMP1:%.*]] = icmp sle i32 [[NEW_REG_OFFS]], 0, -// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG:.*]], label %[[BB_ON_STACK]], +// LLVM-NEXT: [[NEW_REG_OFFS:%.*]] = add i32 [[VR_OFFS]], 16 +// LLVM-NEXT: store i32 [[NEW_REG_OFFS]], ptr [[VR_OFFS_P]], align 4 +// LLVM-NEXT: [[CMP1:%.*]] = icmp sle i32 [[NEW_REG_OFFS]], 0 +// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG:.*]], label %[[BB_ON_STACK]] // LLVM: [[BB_IN_REG]]: ; -// LLVM-NEXT: [[VR_TOP_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 2, -// LLVM-NEXT: [[VR_TOP:%.*]] = load ptr, ptr [[VR_TOP_P]], align 8, -// LLVM-NEXT: [[EXT64_VR_OFFS:%.*]] = sext i32 [[VR_OFFS]] to i64, -// LLVM-NEXT: [[IN_REG_OUTPUT:%.*]] = getelementptr i8, ptr [[VR_TOP]], i64 [[EXT64_VR_OFFS]], -// LLVM-NEXT: br label %[[BB_END:.*]], +// LLVM-NEXT: [[VR_TOP_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 2 +// LLVM-NEXT: [[VR_TOP:%.*]] = load ptr, ptr [[VR_TOP_P]], align 8 +// LLVM-NEXT: [[EXT64_VR_OFFS:%.*]] = sext i32 [[VR_OFFS]] to i64 +// LLVM-NEXT: [[IN_REG_OUTPUT:%.*]] = getelementptr i8, ptr [[VR_TOP]], i64 [[EXT64_VR_OFFS]] +// LLVM-NEXT: br label %[[BB_END:.*]] // LLVM: [[BB_ON_STACK]]: ; -// LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0, -// LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8, -// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i64 8, -// LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8, -// LLVM-NEXT: br label %[[BB_END]], +// LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0 +// LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8 +// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i64 8 +// LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8 +// LLVM-NEXT: br label %[[BB_END]] // LLVM: [[BB_END]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG]] // LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT]], %[[BB_IN_REG]] ], [ [[STACK_V]], %[[BB_ON_STACK]] ] -// LLVM-NEXT: [[PHIV:%.*]] = load double, ptr [[PHIP]], align 8, -// LLVM-NEXT: store double [[PHIV]], ptr [[RESP]], align 8, -// LLVM: call void @llvm.va_end.p0(ptr [[VARLIST]]), -// LLVM: [[RES:%.*]] = load double, ptr [[RESP]], align 8, -// LLVM: store double [[RES]], ptr [[RETP]], align 8, -// LLVM: [[RETV:%.*]] = load double, ptr [[RETP]], align 8, -// LLVM-NEXT: ret double [[RETV]], +// LLVM-NEXT: [[PHIV:%.*]] = load double, ptr [[PHIP]], align 8 +// LLVM-NEXT: store double [[PHIV]], ptr [[RESP]], align 8 +// LLVM: call void @llvm.va_end.p0(ptr [[VARLIST]]) +// LLVM: [[RES:%.*]] = load double, ptr [[RESP]], align 8 +// LLVM: store double [[RES]], ptr [[RETP]], align 8 +// LLVM: [[RETV:%.*]] = load double, ptr [[RETP]], align 8 +// LLVM-NEXT: ret double [[RETV]] diff --git a/clang/test/CIR/CodeGen/var-arg-scope.c b/clang/test/CIR/CodeGen/var-arg-scope.c index 2a52f5621f37..a57fcb9f1944 100644 --- a/clang/test/CIR/CodeGen/var-arg-scope.c +++ b/clang/test/CIR/CodeGen/var-arg-scope.c @@ -67,39 +67,39 @@ void f1(__builtin_va_list c) { // LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } // LLVM: define dso_local void @f1(%struct.__va_list %0) -// LLVM: [[VARLIST:%.*]] = alloca %struct.__va_list, i64 1, align 8, -// LLVM: br label %[[SCOPE_FRONT:.*]], +// LLVM: [[VARLIST:%.*]] = alloca %struct.__va_list, i64 1, align 8 +// LLVM: br label %[[SCOPE_FRONT:.*]] // LLVM: [[SCOPE_FRONT]]: ; preds = %1 // LLVM: [[GR_OFFS_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 3 -// LLVM: [[GR_OFFS:%.*]] = load i32, ptr [[GR_OFFS_P]], align 4, -// LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[GR_OFFS]], 0, -// LLVM-NEXT: br i1 [[CMP0]], label %[[BB_ON_STACK:.*]], label %[[BB_MAY_REG:.*]], +// LLVM: [[GR_OFFS:%.*]] = load i32, ptr [[GR_OFFS_P]], align 4 +// LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[GR_OFFS]], 0 +// LLVM-NEXT: br i1 [[CMP0]], label %[[BB_ON_STACK:.*]], label %[[BB_MAY_REG:.*]] // LLVM: [[BB_MAY_REG]]: ; -// LLVM: [[NEW_REG_OFFS:%.*]] = add i32 [[GR_OFFS]], 8, -// LLVM: store i32 [[NEW_REG_OFFS]], ptr [[GR_OFFS_P]], align 4, -// LLVM-NEXT: [[CMP1:%.*]] = icmp sle i32 [[NEW_REG_OFFS]], 0, -// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG:.*]], label %[[BB_ON_STACK]], +// LLVM: [[NEW_REG_OFFS:%.*]] = add i32 [[GR_OFFS]], 8 +// LLVM: store i32 [[NEW_REG_OFFS]], ptr [[GR_OFFS_P]], align 4 +// LLVM-NEXT: [[CMP1:%.*]] = icmp sle i32 [[NEW_REG_OFFS]], 0 +// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG:.*]], label %[[BB_ON_STACK]] // LLVM: [[BB_IN_REG]]: ; -// LLVM-NEXT: [[GR_TOP_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 1, -// LLVM-NEXT: [[GR_TOP:%.*]] = load ptr, ptr [[GR_TOP_P]], align 8, -// LLVM-NEXT: [[EXT64_GR_OFFS:%.*]] = sext i32 [[GR_OFFS]] to i64, -// LLVM-NEXT: [[IN_REG_OUTPUT:%.*]] = getelementptr i8, ptr [[GR_TOP]], i64 [[EXT64_GR_OFFS]], -// LLVM-NEXT: br label %[[BB_END:.*]], +// LLVM-NEXT: [[GR_TOP_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 1 +// LLVM-NEXT: [[GR_TOP:%.*]] = load ptr, ptr [[GR_TOP_P]], align 8 +// LLVM-NEXT: [[EXT64_GR_OFFS:%.*]] = sext i32 [[GR_OFFS]] to i64 +// LLVM-NEXT: [[IN_REG_OUTPUT:%.*]] = getelementptr i8, ptr [[GR_TOP]], i64 [[EXT64_GR_OFFS]] +// LLVM-NEXT: br label %[[BB_END:.*]] // LLVM: [[BB_ON_STACK]]: ; -// LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0, -// LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8, -// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i64 8, -// LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8, -// LLVM-NEXT: br label %[[BB_END]], +// LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0 +// LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8 +// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i64 8 +// LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8 +// LLVM-NEXT: br label %[[BB_END]] // LLVM: [[BB_END]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG]] // LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT]], %[[BB_IN_REG]] ], [ [[STACK_V]], %[[BB_ON_STACK]] ] -// LLVM-NEXT: [[PHIV:%.*]] = load ptr, ptr [[PHIP]], align 8, -// LLVM-NEXT: br label %[[OUT_SCOPE:.*]], +// LLVM-NEXT: [[PHIV:%.*]] = load ptr, ptr [[PHIP]], align 8 +// LLVM-NEXT: br label %[[OUT_SCOPE:.*]] // LLVM: [[OUT_SCOPE]]: ; preds = %[[BB_END]] -// LLVM-NEXT: ret void, +// LLVM-NEXT: ret void diff --git a/clang/test/CIR/CodeGen/var-arg.c b/clang/test/CIR/CodeGen/var-arg.c index a1a9e1cdb4ef..b2f1161fa443 100644 --- a/clang/test/CIR/CodeGen/var-arg.c +++ b/clang/test/CIR/CodeGen/var-arg.c @@ -81,41 +81,41 @@ int f1(int n, ...) { // LLVM: %struct.__va_list = type { ptr, ptr, ptr, i32, i32 } // LLVM: define dso_local i32 @f1(i32 %0, ...) -// LLVM: [[ARGN:%.*]] = alloca i32, i64 1, align 4, -// LLVM: [[RETP:%.*]] = alloca i32, i64 1, align 4, -// LLVM: [[RESP:%.*]] = alloca i32, i64 1, align 4, -// LLVM: call void @llvm.va_start.p0(ptr [[VARLIST:%.*]]), +// LLVM: [[ARGN:%.*]] = alloca i32, i64 1, align 4 +// LLVM: [[RETP:%.*]] = alloca i32, i64 1, align 4 +// LLVM: [[RESP:%.*]] = alloca i32, i64 1, align 4 +// LLVM: call void @llvm.va_start.p0(ptr [[VARLIST:%.*]]) // LLVM: [[GR_OFFS_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 3 -// LLVM: [[GR_OFFS:%.*]] = load i32, ptr [[GR_OFFS_P]], align 4, -// LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[GR_OFFS]], 0, -// LLVM-NEXT: br i1 [[CMP0]], label %[[BB_ON_STACK:.*]], label %[[BB_MAY_REG:.*]], +// LLVM: [[GR_OFFS:%.*]] = load i32, ptr [[GR_OFFS_P]], align 4 +// LLVM-NEXT: [[CMP0:%.*]] = icmp sge i32 [[GR_OFFS]], 0 +// LLVM-NEXT: br i1 [[CMP0]], label %[[BB_ON_STACK:.*]], label %[[BB_MAY_REG:.*]] // LLVM: [[BB_MAY_REG]]: ; -// LLVM: [[NEW_REG_OFFS:%.*]] = add i32 [[GR_OFFS]], 8, -// LLVM: store i32 [[NEW_REG_OFFS]], ptr [[GR_OFFS_P]], align 4, -// LLVM-NEXT: [[CMP1:%.*]] = icmp sle i32 [[NEW_REG_OFFS]], 0, -// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG:.*]], label %[[BB_ON_STACK]], +// LLVM: [[NEW_REG_OFFS:%.*]] = add i32 [[GR_OFFS]], 8 +// LLVM: store i32 [[NEW_REG_OFFS]], ptr [[GR_OFFS_P]], align 4 +// LLVM-NEXT: [[CMP1:%.*]] = icmp sle i32 [[NEW_REG_OFFS]], 0 +// LLVM-NEXT: br i1 [[CMP1]], label %[[BB_IN_REG:.*]], label %[[BB_ON_STACK]] // LLVM: [[BB_IN_REG]]: ; -// LLVM-NEXT: [[GR_TOP_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 1, -// LLVM-NEXT: [[GR_TOP:%.*]] = load ptr, ptr [[GR_TOP_P]], align 8, -// LLVM-NEXT: [[EXT64_GR_OFFS:%.*]] = sext i32 [[GR_OFFS]] to i64, -// LLVM-NEXT: [[IN_REG_OUTPUT:%.*]] = getelementptr i8, ptr [[GR_TOP]], i64 [[EXT64_GR_OFFS]], -// LLVM-NEXT: br label %[[BB_END:.*]], +// LLVM-NEXT: [[GR_TOP_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 1 +// LLVM-NEXT: [[GR_TOP:%.*]] = load ptr, ptr [[GR_TOP_P]], align 8 +// LLVM-NEXT: [[EXT64_GR_OFFS:%.*]] = sext i32 [[GR_OFFS]] to i64 +// LLVM-NEXT: [[IN_REG_OUTPUT:%.*]] = getelementptr i8, ptr [[GR_TOP]], i64 [[EXT64_GR_OFFS]] +// LLVM-NEXT: br label %[[BB_END:.*]] // LLVM: [[BB_ON_STACK]]: ; -// LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0, -// LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8, -// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i64 8, -// LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8, -// LLVM-NEXT: br label %[[BB_END]], +// LLVM-NEXT: [[STACK_P:%.*]] = getelementptr %struct.__va_list, ptr [[VARLIST]], i32 0, i32 0 +// LLVM-NEXT: [[STACK_V:%.*]] = load ptr, ptr [[STACK_P]], align 8 +// LLVM-NEXT: [[NEW_STACK_V:%.*]] = getelementptr i8, ptr [[STACK_V]], i64 8 +// LLVM-NEXT: store ptr [[NEW_STACK_V]], ptr [[STACK_P]], align 8 +// LLVM-NEXT: br label %[[BB_END]] // LLVM: [[BB_END]]: ; preds = %[[BB_ON_STACK]], %[[BB_IN_REG]] // LLVM-NEXT: [[PHIP:%.*]] = phi ptr [ [[IN_REG_OUTPUT]], %[[BB_IN_REG]] ], [ [[STACK_V]], %[[BB_ON_STACK]] ] -// LLVM-NEXT: [[PHIV:%.*]] = load i32, ptr [[PHIP]], align 4, -// LLVM-NEXT: store i32 [[PHIV]], ptr [[RESP]], align 4, -// LLVM: call void @llvm.va_end.p0(ptr [[VARLIST]]), -// LLVM: [[RES:%.*]] = load i32, ptr [[RESP]], align 4, -// LLVM: store i32 [[RES]], ptr [[RETP]], align 4, -// LLVM: [[RETV:%.*]] = load i32, ptr [[RETP]], align 4, -// LLVM-NEXT: ret i32 [[RETV]], +// LLVM-NEXT: [[PHIV:%.*]] = load i32, ptr [[PHIP]], align 4 +// LLVM-NEXT: store i32 [[PHIV]], ptr [[RESP]], align 4 +// LLVM: call void @llvm.va_end.p0(ptr [[VARLIST]]) +// LLVM: [[RES:%.*]] = load i32, ptr [[RESP]], align 4 +// LLVM: store i32 [[RES]], ptr [[RETP]], align 4 +// LLVM: [[RETV:%.*]] = load i32, ptr [[RETP]], align 4 +// LLVM-NEXT: ret i32 [[RETV]] diff --git a/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp index 78f5866d48a7..877475b15ebd 100644 --- a/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp +++ b/clang/test/CIR/CodeGen/virtual-destructor-calls.cpp @@ -34,7 +34,7 @@ struct B : A { // Complete dtor: just an alias because there are no virtual bases. // CIR: cir.func private @_ZN1BD1Ev(!cir.ptr) alias(@_ZN1BD2Ev) // FIXME: LLVM output should be: @_ZN1BD1Ev ={{.*}} unnamed_addr alias {{.*}} @_ZN1BD2Ev -// LLVM: declare {{.*}} dso_local void @_ZN1BD1Ev(ptr) +// LLVM: declare dso_local void @_ZN1BD1Ev(ptr) // Deleting dtor: defers to the complete dtor. // LLVM: define{{.*}} void @_ZN1BD0Ev(ptr @@ -52,7 +52,7 @@ struct B : A { // FIXME: LLVM output should be: @_ZN1CD2Ev ={{.*}} unnamed_addr alias {{.*}} @_ZN1BD2Ev // LLVM: define dso_local void @_ZN1CD2Ev(ptr // FIXME: LLVM output should be: @_ZN1CD1Ev ={{.*}} unnamed_addr alias {{.*}} @_ZN1CD2Ev -// LLVM: declare {{.*}} dso_local void @_ZN1CD1Ev(ptr) +// LLVM: declare dso_local void @_ZN1CD1Ev(ptr) // FIXME: note that LLVM_O1 cannot be tested because the canocalizers running // on top of LLVM IR dialect delete _ZN1CD2Ev in its current form (a function // declaration) since its not used in the TU. diff --git a/clang/test/CIR/CodeGen/visibility-attribute.c b/clang/test/CIR/CodeGen/visibility-attribute.c index 7a1d0aaafbad..9edca315fd2a 100644 --- a/clang/test/CIR/CodeGen/visibility-attribute.c +++ b/clang/test/CIR/CodeGen/visibility-attribute.c @@ -22,15 +22,15 @@ int call_glob() void foo_default(); // CIR: cir.func no_proto private @foo_default(...) -// LLVM: declare {{.*}} void @foo_default(...) +// LLVM: declare void @foo_default(...) void __attribute__((__visibility__("hidden"))) foo_hidden(); // CIR: cir.func no_proto private hidden @foo_hidden(...) -// LLVM: declare {{.*}} hidden void @foo_hidden(...) +// LLVM: declare hidden void @foo_hidden(...) void __attribute__((__visibility__("protected"))) foo_protected(); // CIR: cir.func no_proto private protected @foo_protected(...) -// LLVM: declare {{.*}} protected void @foo_protected(...) +// LLVM: declare protected void @foo_protected(...) static void static_foo_default() {} // CIR: cir.func no_proto internal private @static_foo_default() diff --git a/clang/test/CIR/CodeGen/weak.c b/clang/test/CIR/CodeGen/weak.c index 398ac47c73f9..28d4aa5a595a 100644 --- a/clang/test/CIR/CodeGen/weak.c +++ b/clang/test/CIR/CodeGen/weak.c @@ -18,7 +18,7 @@ void active (void) // CIR: cir.func @active() // CIR-NEXT: cir.call @B() : () -> () -// LLVM: declare !dbg !{{.}} extern_weak void @B() +// LLVM: declare extern_weak void @B() // LLVM: define dso_local void @active() // LLVM-NEXT: call void @B() diff --git a/clang/test/CIR/Lowering/array-init.c b/clang/test/CIR/Lowering/array-init.c index dd61ba3234c5..9e16c8042095 100644 --- a/clang/test/CIR/Lowering/array-init.c +++ b/clang/test/CIR/Lowering/array-init.c @@ -25,7 +25,7 @@ void charInit2() { // LLVM: %[[PTR:.*]] = alloca [4 x [2 x i8]], i64 1, align 1 // FIXME: OG uses @llvm.memcpy.p0.p0.i64 -// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[PTR]], ptr @__const.charInit3.arr, i32 8, i1 false), !dbg !16 +// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[PTR]], ptr @__const.charInit3.arr, i32 8, i1 false) void charInit3() { char arr[4][2] = {"ab", "cd", "ef", "gh"}; } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/debug-info.c b/clang/test/CIR/Lowering/debug-info.c new file mode 100644 index 000000000000..42a8217c5cc7 --- /dev/null +++ b/clang/test/CIR/Lowering/debug-info.c @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM_NO_DEBUG +// RUN: %clang_cc1 -debug-info-kind=constructor -dwarf-version=4 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM_WITH_DEBUG +int foo(int a, int b) { + // LLVM_NO_DEBUG-NOT: !dbg + + // LLVM_WITH_DEBUG-LABEL: foo + // LLVM_WITH_DEBUG: %[[VAR_A:.*]] = load i32, ptr %{{.*}}, align 4, !dbg ![[DI_LOC1:.*]] + // LLVM_WITH_DEBUG: %[[VAR_B:.*]] = load i32, ptr %{{.*}}, align 4, !dbg ![[DI_LOC2:.*]] + // LLVM_WITH_DEBUG: %[[VAR_C:.*]] = add nsw i32 %[[VAR_A]], %[[VAR_B]], !dbg ![[DI_LOC1]] + // LLVM_WITH_DEBUG: store i32 %[[VAR_C]], ptr %{{.*}}, align 4, !dbg ![[DI_LOC3:.*]] + + // LLVM_WITH_DEBUG: ![[DI_LOC3]] = !DILocation(line: [[LINE:.*]], scope: ![[SCOPE:.*]]) + // LLVM_WITH_DEBUG: ![[DI_LOC1]] = !DILocation(line: [[LINE]], column: {{.*}}, scope: ![[SCOPE]]) + // LLVM_WITH_DEBUG: ![[DI_LOC2]] = !DILocation(line: [[LINE]], column: {{.*}}, scope: ![[SCOPE]]) + int c = a + b; + return c; +} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/int-wrap.cir b/clang/test/CIR/Lowering/int-wrap.cir index f74f64feb2e8..f885e745004b 100644 --- a/clang/test/CIR/Lowering/int-wrap.cir +++ b/clang/test/CIR/Lowering/int-wrap.cir @@ -19,6 +19,6 @@ module { // MLIR-NEXT: llvm.sub {{.*}}, {{.*}} overflow : i32 // MLIR-NEXT: llvm.sub {{.*}}, {{.*}} : i32 -// LLVM: sub nsw i32 {{.*}}, {{.*}}, !dbg !9 -// LLVM-NEXT: sub nuw i32 {{.*}}, {{.*}}, !dbg !10 -// LLVM-NEXT: sub i32 {{.*}}, {{.*}}, !dbg !11 \ No newline at end of file +// LLVM: sub nsw i32 {{.*}}, {{.*}} +// LLVM-NEXT: sub nuw i32 {{.*}}, {{.*}} +// LLVM-NEXT: sub i32 {{.*}}, {{.*}} \ No newline at end of file diff --git a/clang/test/CIR/Lowering/switch-while.c b/clang/test/CIR/Lowering/switch-while.c index ed8d177323f1..078f0045716c 100644 --- a/clang/test/CIR/Lowering/switch-while.c +++ b/clang/test/CIR/Lowering/switch-while.c @@ -34,23 +34,23 @@ int f(int a, int cond) { // CHECK: i32 3, label %[[THREE_BB:.+]] // CHECK: i32 5, label %[[FIVE_BB:.+]] // CHECK: i32 100, label %[[HUNDRED_BB:.+]] -// CHECK: ], +// CHECK: ] // // CHECK: [[UNREACHABLE_BB:.+]]: {{.*}}; No predecessors! // // CHECK: [[LOOP_ENTRY:.+]]: -// CHECK: br label %[[LOOP_HEADER:.+]], +// CHECK: br label %[[LOOP_HEADER:.+]] // // CHECK: [[LOOP_HEADER]]: // CHECK: add i32 %{{.*}}, 1 -// CHECK: br label %[[DEFAULT_BB:.+]], +// CHECK: br label %[[DEFAULT_BB:.+]] // // CHECK: [[DEFAULT_BB]]: -// CHECK: br label %[[IF_BB:.+]], +// CHECK: br label %[[IF_BB:.+]] // // CHECK: [[IF_BB]]: // CHECK: %[[CMP:.+]] = icmp ne i32 %[[COND:.+]], 0 -// CHECK: br i1 %[[CMP]], label %[[IF_TRUE_BB:.+]], label %[[IF_FALSE_BB:.+]], +// CHECK: br i1 %[[CMP]], label %[[IF_TRUE_BB:.+]], label %[[IF_FALSE_BB:.+]] // // CHECK: [[IF_TRUE_BB]]: // CHECK: ret @@ -62,23 +62,23 @@ int f(int a, int cond) { // // CHECK: [[TWO_BB]]: // CHECK: add i32 %{{.*}}, 1 -// CHECK: br label %[[FALLTHOUGH_BB:.+]], +// CHECK: br label %[[FALLTHOUGH_BB:.+]] // // CHECK: [[FALLTHOUGH_BB]]: -// CHECK: br label %[[LOOP_HEADER]], +// CHECK: br label %[[LOOP_HEADER]] // // CHECK: [[FIVE_BB]]: -// CHECK: br label %[[LOOP_EXIT_BB:.+]], +// CHECK: br label %[[LOOP_EXIT_BB:.+]] // // CHECK: [[HUNDRED_BB]]: // CHECK: call {{.*}}@func100() -// CHECK: br label %[[CONTINUE_BB:.+]], +// CHECK: br label %[[CONTINUE_BB:.+]] // // CHECK: [[CONTINUE_BB]]: // CHECK: br label %[[LOOP_HEADER]] // // CHECK: [[LOOP_EXIT_BB]]: -// CHECK: br label %[[RET_BB:.+]], +// CHECK: br label %[[RET_BB:.+]] // // CHECK: [[RET_BB]]: // CHECK: ret diff --git a/clang/test/CIR/Lowering/var-arg-x86_64.c b/clang/test/CIR/Lowering/var-arg-x86_64.c index 012e702d7f17..977d938dbb1b 100644 --- a/clang/test/CIR/Lowering/var-arg-x86_64.c +++ b/clang/test/CIR/Lowering/var-arg-x86_64.c @@ -23,12 +23,12 @@ double f1(int n, ...) { // CHECK: [[FP_OFFSET_P:%.+]] = getelementptr {{.*}} [[VA_LIST2]], i32 0, i32 1 // CHECK: [[FP_OFFSET:%.+]] = load {{.*}}, ptr [[FP_OFFSET_P]] // CHECK: [[COMPARED:%.+]] = icmp ule i32 {{.*}}, 160 -// CHECK: br i1 [[COMPARED]], label %[[THEN_BB:.+]], label %[[ELSE_BB:.+]], +// CHECK: br i1 [[COMPARED]], label %[[THEN_BB:.+]], label %[[ELSE_BB:.+]] // // CHECK: [[THEN_BB]]: // CHECK: [[UPDATED_FP_OFFSET:%.+]] = add i32 [[FP_OFFSET]], 8 // CHECK: store i32 [[UPDATED_FP_OFFSET]], ptr [[FP_OFFSET_P]] -// CHECK: br label %[[CONT_BB:.+]], +// CHECK: br label %[[CONT_BB:.+]] // // CHECK: [[ELSE_BB]]: // CHECK: [[OVERFLOW_ARG_AREA_ADDR:%.+]] = getelementptr {{.*}} [[VA_LIST2]], i32 0, i32 2 diff --git a/clang/tools/cir-translate/cir-translate.cpp b/clang/tools/cir-translate/cir-translate.cpp index b465a7dfb1fc..347215d76115 100644 --- a/clang/tools/cir-translate/cir-translate.cpp +++ b/clang/tools/cir-translate/cir-translate.cpp @@ -27,7 +27,8 @@ namespace direct { extern void registerCIRDialectTranslation(mlir::DialectRegistry ®istry); extern std::unique_ptr lowerDirectlyFromCIRToLLVMIR( mlir::ModuleOp theModule, llvm::LLVMContext &llvmCtx, - bool disableVerifier = false, bool disableCCLowering = false); + bool disableVerifier = false, bool disableCCLowering = false, + bool disableDebugInfo = false); } // namespace direct } // namespace cir From 37294b32bc47e21ed9df75ee7c9b53fcd7a8b20d Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Thu, 28 Nov 2024 06:41:12 +0800 Subject: [PATCH 2130/2301] [CIR] Remove InavalidUnionFieldAttr and remove a check in array attr (#1166) Close https://github.com/llvm/clangir/issues/1131 This is another solution to https://github.com/llvm/clangir/pull/1160 This patch revert https://github.com/llvm/clangir/pull/1007 and remain its test. The problem described in https://github.com/llvm/clangir/pull/1007 is workaround by skipping the check of equivalent of element types in arrays. We can't mock such checks simply by adding another attribute to `ConstStructAttr` since the types are aggregated. e.g., we have to handle the cases like `struct { union { ... } }` and `struct { struct { union { ... } } }` and so on. To make it, we have to introduce what I called "two type systems" in https://github.com/llvm/clangir/pull/1160. This is not very good giving it removes a reasonable check. But it might not be so problematic since the Sema part has already checked it. (Of course, we still need face the risks to introduce new bugs any way) --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 15 ------ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 3 -- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 28 +--------- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 16 +----- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 8 --- clang/test/CIR/CodeGen/nested-union-array.c | 33 ------------ clang/test/CIR/CodeGen/union-init.c | 13 +++-- clang/test/CIR/IR/invalid.cir | 3 +- clang/test/CIR/Lowering/nested-union-array.c | 28 ++++++++++ .../test/CIR/Lowering/union-in-struct-init.c | 51 +++++++++++++++++++ 10 files changed, 89 insertions(+), 109 deletions(-) delete mode 100644 clang/test/CIR/CodeGen/nested-union-array.c create mode 100644 clang/test/CIR/Lowering/nested-union-array.c create mode 100644 clang/test/CIR/Lowering/union-in-struct-init.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index d0ac1d00c4b5..71d8439e175a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -92,21 +92,6 @@ def CIR_BoolAttr : CIR_Attr<"Bool", "bool", [TypedAttrInterface]> { }]; } -//===----------------------------------------------------------------------===// -// InactiveUnionFieldAttr -//===----------------------------------------------------------------------===// - -def InactiveUnionFieldAttr : CIR_Attr<"InactiveUnionField", "inactive_field", [TypedAttrInterface]> { - let summary = "Attribute to represent an uninitialized field for a union."; - let description = [{ - The InactiveUnionFieldAttr is used to represent an uninitialized field - for a union. - }]; - - let parameters = (ins AttributeSelfTypeParameter<"">:$type); - let assemblyFormat = [{}]; -} - //===----------------------------------------------------------------------===// // ZeroAttr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index ea5fb980cc40..28be733f62d7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -297,9 +297,6 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return true; } - if (mlir::isa(attr)) - return true; - llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index deea4159ff36..67eee98667bc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -377,31 +377,7 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( CharUnits AlignedSize = Size.alignTo(Align); bool Packed = false; - ArrayRef UnpackedElems; - - // Fill the init elements for union. This comes from a fundamental - // difference between CIR and LLVM IR. In LLVM IR, the union is simply a - // struct with the largest member. So it is fine to have only one init - // element. But in CIR, the union has the information for all members. So if - // we only pass a single init element, we may be in trouble. We solve the - // problem by appending placeholder attribute for the uninitialized fields. - llvm::SmallVector UnionElemsStorage; - if (auto desired = dyn_cast(DesiredTy); - desired && desired.isUnion() && - Elems.size() != desired.getNumElements()) { - for (auto elemTy : desired.getMembers()) { - if (auto Ty = mlir::dyn_cast(Elems.back()); - Ty && Ty.getType() == elemTy) - UnionElemsStorage.push_back(Elems.back()); - else - UnionElemsStorage.push_back(cir::InactiveUnionFieldAttr::get( - CGM.getBuilder().getContext(), elemTy)); - } - - UnpackedElems = UnionElemsStorage; - } else { - UnpackedElems = Elems; - } + ArrayRef UnpackedElems = Elems; llvm::SmallVector UnpackedElemStorage; if (DesiredSize < AlignedSize || DesiredSize.alignTo(Align) != DesiredSize) { @@ -1048,8 +1024,6 @@ class ConstExprEmitter return {}; if (i == 0) CommonElementType = C.getType(); - else if (C.getType() != CommonElementType) - CommonElementType = nullptr; Elts.push_back(std::move(C)); } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index aa48cafcc645..0996338bb654 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -3197,21 +3197,7 @@ LogicalResult cir::ConstArrayAttr::verify( // Make sure both number of elements and subelement types match type. if (at.getSize() != arrayAttr.size() + trailingZerosNum) return emitError() << "constant array size should match type size"; - LogicalResult eltTypeCheck = success(); - arrayAttr.walkImmediateSubElements( - [&](Attribute attr) { - // Once we find a mismatch, stop there. - if (eltTypeCheck.failed()) - return; - auto typedAttr = mlir::dyn_cast(attr); - if (!typedAttr || typedAttr.getType() != at.getEltType()) { - eltTypeCheck = failure(); - emitError() - << "constant array element should match array element type"; - } - }, - [&](Type type) {}); - return eltTypeCheck; + return success(); } ::mlir::Attribute cir::ConstArrayAttr::parse(::mlir::AsmParser &parser, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f191850fbac8..2daaaeb9aa3c 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -436,14 +436,6 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstStructAttr constStruct, // Iteratively lower each constant element of the struct. for (auto [idx, elt] : llvm::enumerate(constStruct.getMembers())) { - if (auto constStructType = dyn_cast(constStruct.getType()); - constStructType && constStructType.isUnion()) { - if (isa(elt)) - continue; - - idx = 0; - } - mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); result = rewriter.create(loc, result, init, idx); } diff --git a/clang/test/CIR/CodeGen/nested-union-array.c b/clang/test/CIR/CodeGen/nested-union-array.c deleted file mode 100644 index 7684e3c951c7..000000000000 --- a/clang/test/CIR/CodeGen/nested-union-array.c +++ /dev/null @@ -1,33 +0,0 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM - -struct nested -{ - union { - const char *single; - const char *const *multi; - } output; -}; -static const char * const test[] = { - "test", -}; -const struct nested data[] = -{ - { - { - .multi = test, - }, - }, - { - { - .single = "hello", - }, - }, -}; - -// CIR: ![[ANON_TY:.+]] = !cir.struct, !cir.ptr> -// CIR: ![[NESTED_TY:.+]] = !cir.struct, #cir.global_view<@test> : !cir.ptr>}> : ![[ANON_TY]]}> : ![[NESTED_TY:.+]], #cir.const_struct<{#cir.const_struct<{#cir.global_view<@".str"> : !cir.ptr, #cir.inactive_field : !cir.ptr>}> : ![[ANON_TY]]}> : ![[NESTED_TY:.+]]]> : !cir.array -// LLVM: @data = constant [2 x {{.*}}] diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index d6f2e949b16e..122999de23c2 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -12,12 +12,10 @@ void foo(int x) { A a = {.x = x}; } -// CHECK-DAG: ![[TY_U:.*]] = !cir.struct -// CHECK-DAG: ![[anon0:.*]] = !cir.struct +// CHECK-DAG: ![[anon:.*]] = !cir.struct // CHECK-DAG: #[[bfi_x:.*]] = #cir.bitfield_info // CHECK-DAG: #[[bfi_y:.*]] = #cir.bitfield_info -// CHECK-DAG: ![[TY_A:.*]] = !cir.struct // CHECK-DAG: ![[anon1:.*]] = !cir.struct} // CHECK-LABEL: cir.func @foo( @@ -34,7 +32,7 @@ void foo(int x) { // CHECK: cir.return union { int i; float f; } u = { }; -// CHECK: cir.global external @u = #cir.zero : ![[TY_u]] +// CHECK: cir.global external @u = #cir.zero : ![[anon]] unsigned is_little(void) { const union { @@ -45,8 +43,9 @@ unsigned is_little(void) { } // CHECK: cir.func @is_little -// CHECK: %[[VAL_1:.*]] = cir.get_global @is_little.one : !cir.ptr -// CHECK: %[[VAL_2:.*]] = cir.get_member %[[VAL_1]][1] {name = "c"} : !cir.ptr -> !cir.ptr> +// CHECK: %[[VAL_1:.*]] = cir.get_global @is_little.one : !cir.ptr +// CHECK: %[[VAL_2:.*]] = cir.cast(bitcast, %[[VAL_1]] : !cir.ptr), !cir.ptr +// CHECK: %[[VAL_3:.*]] = cir.get_member %[[VAL_2]][1] {name = "c"} : !cir.ptr -> !cir.ptr> typedef union { int x; diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 2e8e3ae9d44e..bbe61a0502b0 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -328,7 +328,8 @@ cir.func @cast27(%p : !u64i) { !u32i = !cir.int !u8i = !cir.int module { - // expected-error@+1 {{constant array element should match array element type}} + // FIXME: The check for equality of the array element type is currently disabled due to https://github.com/llvm/clangir/pull/1007 + // expected error {{constant array element should match array element type}} cir.global external @a = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<23> : !u8i, #cir.int<33> : !u8i] : !cir.array> } diff --git a/clang/test/CIR/Lowering/nested-union-array.c b/clang/test/CIR/Lowering/nested-union-array.c new file mode 100644 index 000000000000..f48f687dc2ab --- /dev/null +++ b/clang/test/CIR/Lowering/nested-union-array.c @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM + +struct nested +{ + union { + const char *single; + const char *const *multi; + } output; +}; +static const char * const test[] = { + "test", +}; +const struct nested data[] = +{ + { + { + .multi = test, + }, + }, + { + { + .single = "hello", + }, + }, +}; + +// LLVM: @data = constant [2 x {{.*}}] diff --git a/clang/test/CIR/Lowering/union-in-struct-init.c b/clang/test/CIR/Lowering/union-in-struct-init.c new file mode 100644 index 000000000000..80b5ed32ad75 --- /dev/null +++ b/clang/test/CIR/Lowering/union-in-struct-init.c @@ -0,0 +1,51 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM +typedef struct { + union { + int a; + long b; + }; +} S; + +S s = { .a = 1 }; + +// LLVM: @s = global { { i32, [4 x i8] } } { { i32, [4 x i8] } { i32 1, [4 x i8] zeroinitializer } } + +typedef struct { + union { + int a; + long b; + }; +} S2; + +S2 s2 = { .b = 1 }; + +// LLVM: @s2 = global { { i64 } } { { i64 } { i64 1 } } + +typedef struct { + union { + int a; + long b; + long double c; + }; +} S3; + +S3 s3 = { .a = 1 }; + +// LLVM: @s3 = global { { i32, [12 x i8] } } { { i32, [12 x i8] } { i32 1, [12 x i8] zeroinitializer } } + +typedef struct { + int a, b, c, d; +} T; + +typedef union { + struct { + int a; + long b; + }; + T c; +} S4; + +S4 s4 = {.c = {1, 2, 3, 4}}; + +// LLVM: @s4 = global { %struct.T } { %struct.T { i32 1, i32 2, i32 3, i32 4 } } From 04257225ad5e708ea729b60cae19b8145c36e7fc Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 27 Nov 2024 18:01:23 -0500 Subject: [PATCH 2131/2301] [CIR][Dialect] Extend BinaryFPToFPBuiltinOp to vector of FP type (#1173) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 5 ++- .../CIR/Lowering/builtin-floating-point.cir | 45 +++++++++++++++++++ 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 74443ca1bdaf..f8732d4c2c15 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4425,8 +4425,9 @@ class BinaryFPToFPBuiltinOp libc builtin equivalent ignoring floating-point exceptions and errno. }]; - let arguments = (ins CIR_AnyFloat:$lhs, CIR_AnyFloat:$rhs); - let results = (outs CIR_AnyFloat:$result); + let arguments = (ins CIR_AnyFloatOrVecOfFloat:$lhs, + CIR_AnyFloatOrVecOfFloat:$rhs); + let results = (outs CIR_AnyFloatOrVecOfFloat:$result); let assemblyFormat = [{ $lhs `,` $rhs `:` qualified(type($lhs)) attr-dict diff --git a/clang/test/CIR/Lowering/builtin-floating-point.cir b/clang/test/CIR/Lowering/builtin-floating-point.cir index 157b3abe10f5..4ccf7b1ab49d 100644 --- a/clang/test/CIR/Lowering/builtin-floating-point.cir +++ b/clang/test/CIR/Lowering/builtin-floating-point.cir @@ -129,6 +129,51 @@ module { %214 = cir.sqrt %arg2 : !cir.vector // CHECK: llvm.intr.sqrt(%arg2) : (vector<4xf32>) -> vector<4xf32> + %15 = cir.copysign %arg0, %arg0 : !cir.float + // CHECK: llvm.intr.copysign(%arg0, %arg0) : (f32, f32) -> f32 + + %115 = cir.copysign %arg1, %arg1 : !cir.vector + // CHECK: llvm.intr.copysign(%arg1, %arg1) : (vector<2xf64>, vector<2xf64>) -> vector<2xf64> + + %215 = cir.copysign %arg2, %arg2 : !cir.vector + // CHECK: llvm.intr.copysign(%arg2, %arg2) : (vector<4xf32>, vector<4xf32>) -> vector<4xf32> + + %16 = cir.fmax %arg0, %arg0 : !cir.float + // CHECK: llvm.intr.maxnum(%arg0, %arg0) : (f32, f32) -> f32 + + %116 = cir.fmax %arg1, %arg1 : !cir.vector + // CHECK: llvm.intr.maxnum(%arg1, %arg1) : (vector<2xf64>, vector<2xf64>) -> vector<2xf64> + + %216 = cir.fmax %arg2, %arg2 : !cir.vector + // CHECK: llvm.intr.maxnum(%arg2, %arg2) : (vector<4xf32>, vector<4xf32>) -> vector<4xf32> + + %17 = cir.fmin %arg0, %arg0 : !cir.float + // CHECK: llvm.intr.minnum(%arg0, %arg0) : (f32, f32) -> f32 + + %117 = cir.fmin %arg1, %arg1 : !cir.vector + // CHECK: llvm.intr.minnum(%arg1, %arg1) : (vector<2xf64>, vector<2xf64>) -> vector<2xf64> + + %217 = cir.fmin %arg2, %arg2 : !cir.vector + // CHECK: llvm.intr.minnum(%arg2, %arg2) : (vector<4xf32>, vector<4xf32>) -> vector<4xf32> + + %18 = cir.fmod %arg0, %arg0 : !cir.float + // CHECK: llvm.frem %arg0, %arg0 : f32 + + %118 = cir.fmod %arg1, %arg1 : !cir.vector + // CHECK: llvm.frem %arg1, %arg1 : vector<2xf64> + + %218 = cir.fmod %arg2, %arg2 : !cir.vector + // CHECK: llvm.frem %arg2, %arg2 : vector<4xf32> + + %19 = cir.pow %arg0, %arg0 : !cir.float + // CHECK: llvm.intr.pow(%arg0, %arg0) : (f32, f32) -> f32 + + %119 = cir.pow %arg1, %arg1 : !cir.vector + // CHECK: llvm.intr.pow(%arg1, %arg1) : (vector<2xf64>, vector<2xf64>) -> vector<2xf64> + + %219 = cir.pow %arg2, %arg2 : !cir.vector + // CHECK: llvm.intr.pow(%arg2, %arg2) : (vector<4xf32>, vector<4xf32>) -> vector<4xf32> + cir.return } } From 537ad9f67cf49afb4fc0be562a66594688222507 Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 27 Nov 2024 18:02:00 -0500 Subject: [PATCH 2132/2301] [CIR][CIRGen][Builtin] Support __builtin_elementwise_abs with vector of floating type (#1174) [PR1132](https://github.com/llvm/clangir/pull/1132) implements missing feature `fpUnaryOPsSupportVectorType`, so revisit this code. One another thing changed is that I stopped using `cir::isAnyFloatingPointType` as it contains types like long double and FP80 which are not supported by the [builtin's signature](https://clang.llvm.org/docs/LanguageExtensions.html#vector-builtins) --- clang/include/clang/CIR/MissingFeatures.h | 3 --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 6 ++++-- clang/test/CIR/CodeGen/builtins-elementwise.c | 13 ++++++++++++- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 547b3f05731c..05193495e19e 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -330,9 +330,6 @@ struct MissingFeatures { //-- Other missing features - // We need to extend fpUnaryOPs to support vector types. - static bool fpUnaryOPsSupportVectorType() { return false; } - // We need to track the parent record types that represent a field // declaration. This is necessary to determine the layout of a class. static bool fieldDeclAbstraction() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 608a3e8f60c6..d32306bf3d5e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1309,10 +1309,12 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, mlir::Type cirTy = ConvertType(E->getArg(0)->getType()); bool isIntTy = cir::isIntOrIntVectorTy(cirTy); if (!isIntTy) { - if (cir::isAnyFloatingPointType(cirTy)) { + mlir::Type eltTy = cirTy; + if (mlir::isa(cirTy)) + eltTy = mlir::cast(cirTy).getEltType(); + if (mlir::isa(eltTy)) { return emitUnaryFPBuiltin(*this, *E); } - assert(!MissingFeatures::fpUnaryOPsSupportVectorType()); llvm_unreachable("unsupported type for BI__builtin_elementwise_abs"); } mlir::Value arg = emitScalarExpr(E->getArg(0)); diff --git a/clang/test/CIR/CodeGen/builtins-elementwise.c b/clang/test/CIR/CodeGen/builtins-elementwise.c index af3b975970ac..191a4f8d8c3b 100644 --- a/clang/test/CIR/CodeGen/builtins-elementwise.c +++ b/clang/test/CIR/CodeGen/builtins-elementwise.c @@ -6,8 +6,11 @@ // XFAIL: * typedef int vint4 __attribute__((ext_vector_type(4))); +typedef float vfloat4 __attribute__((ext_vector_type(4))); +typedef double vdouble4 __attribute__((ext_vector_type(4))); -void test_builtin_elementwise_abs(vint4 vi4, int i, float f, double d) { +void test_builtin_elementwise_abs(vint4 vi4, int i, float f, double d, + vfloat4 vf4, vdouble4 vd4) { // CIR-LABEL: test_builtin_elementwise_abs // LLVM-LABEL: test_builtin_elementwise_abs // CIR: {{%.*}} = cir.fabs {{%.*}} : !cir.float @@ -25,4 +28,12 @@ void test_builtin_elementwise_abs(vint4 vi4, int i, float f, double d) { // CIR: {{%.*}} = cir.abs {{%.*}} : !s32 // LLVM: {{%.*}} = call i32 @llvm.abs.i32(i32 {{%.*}}, i1 false) i = __builtin_elementwise_abs(i); + + // CIR: {{%.*}} = cir.fabs {{%.*}} : !cir.vector + // LLVM: {{%.*}} = call <4 x float> @llvm.fabs.v4f32(<4 x float> {{%.*}}) + vf4 = __builtin_elementwise_abs(vf4); + + // CIR: {{%.*}} = cir.fabs {{%.*}} : !cir.vector + // LLVM: {{%.*}} = call <4 x double> @llvm.fabs.v4f64(<4 x double> {{%.*}}) + vd4 = __builtin_elementwise_abs(vd4); } From 0e51250eb252b5edc4f5c0e455eca566f2ccd81a Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 27 Nov 2024 18:02:31 -0500 Subject: [PATCH 2133/2301] [CIR][CIRGen][Builtin][Neon] Lower SISD neon_vqrdmulhs_s32 (#1175) [OG's implementation ](https://github.com/llvm/clangir/blob/aaf38b30d31251f3411790820c5e1bf914393ddc/clang/lib/CodeGen/CGBuiltin.cpp#L7527) provides one common code to handle all neon SISD intrinsics. But IMHO, it entangles different things together which hurts readability. Here, We start with simple easy-to-understand approach with specific case. And in the future, as we handle more intrinsics, we may come up with a few simple common patterns. --- clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 9 +++++++-- clang/test/CIR/CodeGen/AArch64/neon.c | 16 ++++++++++------ 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index bf0142f85220..50755eabf8c0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2611,8 +2611,12 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( static mlir::Value emitCommonNeonSISDBuiltinExpr( CIRGenFunction &cgf, const ARMVectorIntrinsicInfo &info, - llvm::SmallVectorImpl &args, const CallExpr *expr) { + llvm::SmallVectorImpl &ops, const CallExpr *expr) { unsigned builtinID = info.BuiltinID; + clang::CIRGen::CIRGenBuilderTy &builder = cgf.getBuilder(); + mlir::Type resultTy = cgf.convertType(expr->getType()); + mlir::Location loc = cgf.getLoc(expr->getExprLoc()); + switch (builtinID) { default: llvm::errs() << getAArch64SIMDIntrinsicString(builtinID) << " "; @@ -2860,7 +2864,8 @@ static mlir::Value emitCommonNeonSISDBuiltinExpr( case NEON::BI__builtin_neon_vqrdmulhh_s16: llvm_unreachable(" neon_vqrdmulhh_s16 NYI "); case NEON::BI__builtin_neon_vqrdmulhs_s32: - llvm_unreachable(" neon_vqrdmulhs_s32 NYI "); + return emitNeonCall(builder, {resultTy, resultTy}, ops, + "aarch64.neon.sqrdmulh", resultTy, loc); case NEON::BI__builtin_neon_vqrshlb_s8: llvm_unreachable(" neon_vqrshlb_s8 NYI "); case NEON::BI__builtin_neon_vqrshlb_u8: diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index dad82af42d11..9e2ebaa11d4a 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -10005,12 +10005,16 @@ poly16x8_t test_vmull_p8(poly8x8_t a, poly8x8_t b) { // return vqrdmulhh_s16(a, b); // } -// NYI-LABEL: @test_vqrdmulhs_s32( -// NYI: [[VQRDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %a, i32 %b) -// NYI: ret i32 [[VQRDMULHS_S32_I]] -// int32_t test_vqrdmulhs_s32(int32_t a, int32_t b) { -// return vqrdmulhs_s32(a, b); -// } +int32_t test_vqrdmulhs_s32(int32_t a, int32_t b) { + return vqrdmulhs_s32(a, b); + + // CIR-LABEL: vqrdmulhs_s32 + // CIR: cir.llvm.intrinsic "aarch64.neon.sqrdmulh" {{%.*}}, {{%.*}} : (!s32i, !s32i) -> !s32i + + // LLVM: {{.*}}test_vqrdmulhs_s32(i32{{.*}}[[a:%.*]], i32{{.*}}[[b:%.*]]) + // LLVM: [[VQRDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 [[a]], i32 [[b]]) + // LLVM: ret i32 [[VQRDMULHS_S32_I]] +} // NYI-LABEL: @test_vmulxs_f32( // NYI: [[VMULXS_F32_I:%.*]] = call float @llvm.aarch64.neon.fmulx.f32(float %a, float %b) From cf9ed011254fedd9a1d425e107e05b630e3e02ae Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 27 Nov 2024 18:02:51 -0500 Subject: [PATCH 2134/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vaddlvq_u16 (#1176) [OG's implementation here](https://github.com/llvm/clangir/blob/1b052dac90f8d070aafc2034e13ae3e88552d58a/clang/lib/CodeGen/CGBuiltin.cpp#L13432) [OG's test here](https://github.com/llvm/clangir/blob/1b052dac90f8d070aafc2034e13ae3e88552d58a/clang/test/CodeGen/AArch64/neon-across.c#L41) --- clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 5 ++++- clang/test/CIR/CodeGen/AArch64/neon-arith.c | 11 +++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 50755eabf8c0..cdea3ed725b4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -4264,7 +4264,10 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NEON::BI__builtin_neon_vaddlvq_u8 NYI"); } case NEON::BI__builtin_neon_vaddlvq_u16: { - llvm_unreachable("NEON::BI__builtin_neon_vaddlvq_u16 NYI"); + mlir::Type argTy = cir::VectorType::get(builder.getContext(), UInt16Ty, 8); + llvm::SmallVector argOps = {emitScalarExpr(E->getArg(0))}; + return emitNeonCall(builder, {argTy}, argOps, "aarch64.neon.uaddlv", + UInt32Ty, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vaddlv_s8: { llvm_unreachable("NEON::BI__builtin_neon_vaddlv_s8 NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index 3f839cce90fc..7dabbbfc9925 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -882,3 +882,14 @@ float64x2_t test_vabsq_f64(float64x2_t a) { // LLVM: [[VABS_F:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[a]]) // LLVM: ret <2 x double> [[VABS_F]] } + +uint32_t test_vaddlvq_u16(uint16x8_t a) { + return vaddlvq_u16(a); + + // CIR-LABEL: vaddlvq_u16 + // CIR: cir.llvm.intrinsic "aarch64.neon.uaddlv" {{%.*}}: (!cir.vector) -> !u32i + + // LLVM: {{.*}}test_vaddlvq_u16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> [[A]]) + // LLVM: ret i32 [[VADDLV_I]] +} From 9c04e02508282afcc7597bd131918f61d5081225 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Tue, 3 Dec 2024 06:29:05 +0800 Subject: [PATCH 2135/2301] [CIR][CIRGen] Change SignBitOp result type to !cir.bool (#1187) Co-authored-by: Sirui Mu --- .../clang/CIR/Dialect/Builder/CIRBaseBuilder.h | 2 +- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 +++--- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 +-- clang/test/CIR/CodeGen/builtin-signbit.c | 15 ++++++--------- 4 files changed, 11 insertions(+), 15 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 87ef2766a5c9..52f9a3180fb7 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -414,7 +414,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { } cir::SignBitOp createSignBit(mlir::Location loc, mlir::Value val) { - auto resTy = cir::IntType::get(getContext(), 32, true); + auto resTy = cir::BoolType::get(getContext()); return create(loc, resTy, val); } diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index f8732d4c2c15..2c754843b404 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -5274,11 +5274,11 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", def SignBitOp : CIR_Op<"signbit", [Pure]> { let summary = "Checks the sign of a floating-point number"; let description = [{ - It returns a non-zero value (true) if the number is negative - and zero (false) if the number is positive or zero. + It returns whether the sign bit (i.e. the highest bit) of the input operand + is set. }]; let arguments = (ins CIR_AnyFloat:$input); - let results = (outs SInt32:$res); + let results = (outs CIR_BoolType:$res); let assemblyFormat = [{ $input attr-dict `:` type($input) `->` qualified(type($res)) }]; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 2daaaeb9aa3c..0affaa12a52e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3832,8 +3832,7 @@ mlir::LogicalResult CIRToLLVMSignBitOpLowering::matchAndRewrite( auto cmpResult = rewriter.create( op.getLoc(), mlir::LLVM::ICmpPredicate::slt, bitcast.getResult(), zero); auto converted = rewriter.create( - op.getLoc(), mlir::IntegerType::get(rewriter.getContext(), 32), - cmpResult); + op.getLoc(), getTypeConverter()->convertType(op.getType()), cmpResult); rewriter.replaceOp(op, converted); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/builtin-signbit.c b/clang/test/CIR/CodeGen/builtin-signbit.c index 622d877242cd..99c2b09d26e5 100644 --- a/clang/test/CIR/CodeGen/builtin-signbit.c +++ b/clang/test/CIR/CodeGen/builtin-signbit.c @@ -5,31 +5,28 @@ void test_signbit_float(float val) { // CIR-LABEL: test_signbit_float - // CIR: %{{.+}} = cir.signbit %{{.+}} : !cir.float -> !s32i + // CIR: %{{.+}} = cir.signbit %{{.+}} : !cir.float -> !cir.bool // LLVM-LABEL: test_signbit_float // LLVM: [[TMP1:%.*]] = bitcast float %{{.+}} to i32 // LLVM: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0 - // LLVM: %{{.+}} = zext i1 [[TMP2]] to i32 - __builtin_signbit(val); + if (__builtin_signbit(val)) {}; } void test_signbit_double(double val) { // CIR-LABEL: test_signbit_double - // CIR: %{{.+}} = cir.signbit %{{.+}} : !cir.float -> !s32i + // CIR: %{{.+}} = cir.signbit %{{.+}} : !cir.float -> !cir.bool // LLVM-LABEL: test_signbit_double // LLVM: [[CONV:%.*]] = fptrunc double %{{.+}} to float // LLVM: [[TMP1:%.*]] = bitcast float [[CONV]] to i32 // LLVM: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0 - // LLVM: %{{.+}} = zext i1 [[TMP2]] to i32 - __builtin_signbitf(val); + if (__builtin_signbitf(val)) {} } void test_signbit_long_double(long double val) { // CIR: test_signbit_long_double // LLVM: test_signbit_long_double - __builtin_signbitl(val); - // CIR: %{{.+}} = cir.signbit %{{.+}} : !cir.long_double -> !s32i + if (__builtin_signbitl(val)) {} + // CIR: %{{.+}} = cir.signbit %{{.+}} : !cir.long_double -> !cir.bool // LLVM: [[TMP1:%.*]] = bitcast x86_fp80 %{{.+}} to i80 // LLVM: [[TMP2:%.*]] = icmp slt i80 [[TMP1]], 0 - // LLVM: %{{.+}} = zext i1 [[TMP2]] to i32 } From 63471c049eaa24f62ee7a59089abc9927bc2f8dd Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 3 Dec 2024 06:31:27 +0800 Subject: [PATCH 2136/2301] [CIR] Add code generation options to lowering context (#1171) This PR adds `clang::CodeGenOptions` to the lowering context. Similar to `clang::LangOptions`, the code generation options are currently set to the default values when initializing the lowering context. Besides, this PR also adds a new attribute `#cir.opt_level`. The attribute is a module-level attribute and it holds the optimization level (e.g. -O1, -Oz, etc.). The attribute is consumed when initializing the lowering context to populate the `OptimizationLevel` and the `OptimizeSize` field in the code generation options. CIRGen is updated to attach this attribute to the module op. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 50 ++++++++++++++++++- .../clang/CIR/Dialect/IR/CIRDialect.td | 1 + clang/include/clang/CIR/MissingFeatures.h | 5 ++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 ++ clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 17 +++++++ .../TargetLowering/CIRLowerContext.cpp | 6 ++- .../TargetLowering/CIRLowerContext.h | 8 ++- .../Transforms/TargetLowering/LowerModule.cpp | 25 ++++++++-- .../Transforms/TargetLowering/LowerModule.h | 4 +- clang/test/CIR/CodeGen/optimization-attr.cpp | 32 ++++++++++++ 10 files changed, 141 insertions(+), 11 deletions(-) create mode 100644 clang/test/CIR/CodeGen/optimization-attr.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 71d8439e175a..0cfbf84fa58a 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -73,6 +73,54 @@ def LangAttr : CIR_Attr<"Lang", "lang"> { let genVerifyDecl = 0; } +//===----------------------------------------------------------------------===// +// OptInfoAttr +//===----------------------------------------------------------------------===// + +def CIR_OptInfoAttr : CIR_Attr<"OptInfo", "opt_info"> { + let summary = + "A module-level attribute that holds the optimization information"; + let description = [{ + The `#cir.opt_info` attribute holds the optimization related information. + Currently this attribute is a module-level attribute that gets attached to + the module operation during CIRGen. + + The `level` parameter gives the optimization level. It must be an integer + between 0 and 3, inclusive. It corresponds to the `OptimizationLevel` field + within the `clang::CodeGenOptions` structure. + + The `size` parameter gives the code size optimization level. It must be an + integer between 0 and 2, inclusive. It corresponds to the `OptimizeSize` + field within the `clang::CodeGenOptions` structure. + + The `level` and `size` parameters correspond to the optimization level + command line options passed to clang driver. The table below lists the + current correspondance relationship: + + | Flag | `level` | `size` | + |------------------|---------|--------| + | `-O0` or nothing | 0 | 0 | + | `-O1` | 1 | 0 | + | `-O2` | 2 | 0 | + | `-O3` | 3 | 0 | + | `-Os` | 2 | 1 | + | `-Oz` | 2 | 2 | + + Examples: + + ```mlir + #cir.opt_info // -O2 + ``` + }]; + + let parameters = (ins "unsigned":$level, "unsigned":$size); + + let assemblyFormat = [{ + `<` `level` `=` $level `,` `size` `=` $size `>` + }]; + let genVerifyDecl = 1; +} + //===----------------------------------------------------------------------===// // BoolAttr //===----------------------------------------------------------------------===// @@ -311,7 +359,7 @@ def ComplexAttr : CIR_Attr<"Complex", "complex", [TypedAttrInterface]> { contains values of the same CIR type. }]; - let parameters = (ins + let parameters = (ins AttributeSelfTypeParameter<"", "cir::ComplexType">:$type, "mlir::TypedAttr":$real, "mlir::TypedAttr":$imag); diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td index c27bc1f28443..5b3b4eedc682 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -37,6 +37,7 @@ def CIR_Dialect : Dialect { static llvm::StringRef getSOBAttrName() { return "cir.sob"; } static llvm::StringRef getLangAttrName() { return "cir.lang"; } static llvm::StringRef getTripleAttrName() { return "cir.triple"; } + static llvm::StringRef getOptInfoAttrName() { return "cir.opt_info"; } static llvm::StringRef getGlobalCtorsAttrName() { return "cir.global_ctors"; } static llvm::StringRef getGlobalDtorsAttrName() { return "cir.global_dtors"; } diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 05193495e19e..886fc2426048 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -377,6 +377,11 @@ struct MissingFeatures { // just yet. Right now, it only instantiates the default lang options. static bool langOpts() { return false; } + // CodeGenOpts may affect lowering, but we do not carry this information into + // CIR just yet. Right now, it only instantiates the default code generation + // options. + static bool codeGenOpts() { return false; } + // Several type qualifiers are not yet supported in CIR, but important when // evaluating ABI-specific lowering. static bool qualifiedTypes() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index b9e332d4b27a..30188ac98dbb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -192,6 +192,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, cir::LangAttr::get(&context, lang)); theModule->setAttr(cir::CIRDialect::getTripleAttrName(), builder.getStringAttr(getTriple().str())); + if (CGO.OptimizationLevel > 0 || CGO.OptimizeSize > 0) + theModule->setAttr(cir::CIRDialect::getOptInfoAttrName(), + cir::OptInfoAttr::get(&context, CGO.OptimizationLevel, + CGO.OptimizeSize)); // Set the module name to be the name of the main file. TranslationUnitDecl // often contains invalid source locations and isn't a reliable source for the // module location. diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 08d8d601b1ad..9dbf12ad138c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -214,6 +214,23 @@ void LangAttr::print(AsmPrinter &printer) const { printer << "<" << getLang().getValue() << '>'; } +//===----------------------------------------------------------------------===// +// OptInfoAttr definitions +//===----------------------------------------------------------------------===// + +LogicalResult OptInfoAttr::verify(function_ref emitError, + unsigned level, unsigned size) { + if (level > 3) { + emitError() << "optimization level must be between 0 and 3 inclusive"; + return failure(); + } + if (size > 2) { + emitError() << "size optimization level must be between 0 and 2 inclusive"; + return failure(); + } + return success(); +} + //===----------------------------------------------------------------------===// // ConstPtrAttr definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp index 1dd790c1357c..b6c82adfd038 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.cpp @@ -23,8 +23,10 @@ namespace cir { CIRLowerContext::CIRLowerContext(mlir::ModuleOp module, - clang::LangOptions LOpts) - : MLIRCtx(module.getContext()), LangOpts(LOpts) {} + clang::LangOptions LOpts, + clang::CodeGenOptions CGOpts) + : MLIRCtx(module.getContext()), LangOpts(std::move(LOpts)), + CodeGenOpts(std::move(CGOpts)) {} CIRLowerContext::~CIRLowerContext() {} diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h index e178eeccc642..f4a988045927 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRLowerContext.h @@ -44,6 +44,9 @@ class CIRLowerContext : public llvm::RefCountedBase { /// this ASTContext object. clang::LangOptions LangOpts; + /// Options for code generation. + clang::CodeGenOptions CodeGenOpts; + //===--------------------------------------------------------------------===// // Built-in Types //===--------------------------------------------------------------------===// @@ -51,7 +54,8 @@ class CIRLowerContext : public llvm::RefCountedBase { mlir::Type CharTy; public: - CIRLowerContext(mlir::ModuleOp module, clang::LangOptions LOpts); + CIRLowerContext(mlir::ModuleOp module, clang::LangOptions LOpts, + clang::CodeGenOptions CGOpts); CIRLowerContext(const CIRLowerContext &) = delete; CIRLowerContext &operator=(const CIRLowerContext &) = delete; ~CIRLowerContext(); @@ -73,6 +77,8 @@ class CIRLowerContext : public llvm::RefCountedBase { const clang::LangOptions &getLangOpts() const { return LangOpts; } + const clang::CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; } + mlir::MLIRContext *getMLIRContext() const { return MLIRCtx; } //===--------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index f127195f416c..278091070763 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -86,11 +86,14 @@ createTargetLoweringInfo(LowerModule &LM) { } } -LowerModule::LowerModule(clang::LangOptions opts, mlir::ModuleOp &module, +LowerModule::LowerModule(clang::LangOptions langOpts, + clang::CodeGenOptions codeGenOpts, + mlir::ModuleOp &module, std::unique_ptr target, mlir::PatternRewriter &rewriter) - : context(module, opts), module(module), Target(std::move(target)), - ABI(createCXXABI(*this)), types(*this), rewriter(rewriter) { + : context(module, std::move(langOpts), std::move(codeGenOpts)), + module(module), Target(std::move(target)), ABI(createCXXABI(*this)), + types(*this), rewriter(rewriter) { context.initBuiltinTypes(*Target); } @@ -238,8 +241,20 @@ createLowerModule(mlir::ModuleOp module, mlir::PatternRewriter &rewriter) { cir_cconv_assert(!cir::MissingFeatures::langOpts()); clang::LangOptions langOpts; - return std::make_unique(langOpts, module, std::move(targetInfo), - rewriter); + // FIXME(cir): This just uses the default code generation options. We need to + // account for custom options. + cir_cconv_assert(!cir::MissingFeatures::codeGenOpts()); + clang::CodeGenOptions codeGenOpts; + + if (auto optInfo = mlir::cast_if_present( + module->getAttr(cir::CIRDialect::getOptInfoAttrName()))) { + codeGenOpts.OptimizationLevel = optInfo.getLevel(); + codeGenOpts.OptimizeSize = optInfo.getSize(); + } + + return std::make_unique(std::move(langOpts), + std::move(codeGenOpts), module, + std::move(targetInfo), rewriter); } } // namespace cir diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h index eb206f2750a7..a69d9cace84a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.h @@ -42,8 +42,8 @@ class LowerModule { mlir::PatternRewriter &rewriter; public: - LowerModule(clang::LangOptions opts, mlir::ModuleOp &module, - std::unique_ptr target, + LowerModule(clang::LangOptions langOpts, clang::CodeGenOptions codeGenOpts, + mlir::ModuleOp &module, std::unique_ptr target, mlir::PatternRewriter &rewriter); ~LowerModule() = default; diff --git a/clang/test/CIR/CodeGen/optimization-attr.cpp b/clang/test/CIR/CodeGen/optimization-attr.cpp new file mode 100644 index 000000000000..6af62bff6b35 --- /dev/null +++ b/clang/test/CIR/CodeGen/optimization-attr.cpp @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -O0 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CHECK-O0 %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -O1 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CHECK-O1 %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -O2 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CHECK-O2 %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -O3 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CHECK-O3 %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Os -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CHECK-Os %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Oz -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CHECK-Oz %s + +void foo() {} + +// CHECK-O0: module +// CHECK-O0-NOT: cir.opt_info + +// CHECK-O1: module +// CHECK-O1: cir.opt_info = #cir.opt_info + +// CHECK-O2: module +// CHECK-O2: cir.opt_info = #cir.opt_info + +// CHECK-O3: module +// CHECK-O3: cir.opt_info = #cir.opt_info + +// CHECK-Os: module +// CHECK-Os: cir.opt_info = #cir.opt_info + +// CHECK-Oz: module +// CHECK-Oz: cir.opt_info = #cir.opt_info From f22e0a73e905cdaa0441d171a42c75c9412d8d8c Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Tue, 3 Dec 2024 06:34:53 +0800 Subject: [PATCH 2137/2301] [CIR] [CodeGen] Handle arrangeCXXStructorDeclaration (#1179) Removes some NYIs. But left assert(false) due to missing tests. It looks better since it is not so scaring as NYI. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 29 ++++++++++++++++-------- clang/test/CIR/CodeGen/variadic-ctor.cpp | 11 +++++++++ 2 files changed, 31 insertions(+), 9 deletions(-) create mode 100644 clang/test/CIR/CodeGen/variadic-ctor.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index a8e06467e08d..29db13706437 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1193,19 +1193,30 @@ CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { CIRGenCXXABI::AddedStructorArgCounts AddedArgs = TheCXXABI.buildStructorSignature(GD, argTypes); - (void)AddedArgs; - assert(paramInfos.empty() && "NYI"); + if (!paramInfos.empty()) { + // Note: prefix implies after the first param. + if (AddedArgs.Prefix) + paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, + FunctionProtoType::ExtParameterInfo{}); + if (AddedArgs.Suffix) + paramInfos.append(AddedArgs.Suffix, + FunctionProtoType::ExtParameterInfo{}); + + assert(false && "Please sent PR with a test and remove this"); + } - assert(!MD->isVariadic() && "Variadic fns NYI"); - RequiredArgs required = RequiredArgs::All; - (void)required; + RequiredArgs required = + (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) + : RequiredArgs::All); FunctionType::ExtInfo extInfo = FTP->getExtInfo(); + CanQualType resultType = TheCXXABI.HasThisReturn(GD) ? argTypes.front() + : TheCXXABI.hasMostDerivedReturn(GD) + ? Context.VoidPtrTy + : Context.VoidTy; - assert(!TheCXXABI.HasThisReturn(GD) && "NYI"); - - CanQualType resultType = Context.VoidTy; - (void)resultType; + assert(!TheCXXABI.HasThisReturn(GD) && + "Please sent PR with a test and remove this"); return arrangeCIRFunctionInfo(resultType, cir::FnInfoOpts::IsInstanceMethod, argTypes, extInfo, paramInfos, required); diff --git a/clang/test/CIR/CodeGen/variadic-ctor.cpp b/clang/test/CIR/CodeGen/variadic-ctor.cpp new file mode 100644 index 000000000000..afc2293a8895 --- /dev/null +++ b/clang/test/CIR/CodeGen/variadic-ctor.cpp @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -std=c++20 -fclangir -emit-cir -triple x86_64-unknown-linux-gnu %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR + +class A { +public: + A(void *, ...); +}; + +A a(nullptr, 1, "str"); + +// CIR: cir.func private @_ZN1AC1EPvz(!cir.ptr, !cir.ptr, ...) From a6bbd662dbd81637c00da7c23dfaa9b7a3967d1f Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 3 Dec 2024 07:36:37 +0800 Subject: [PATCH 2138/2301] [CIR] Add support for casting pointer-to-data-member values (#1188) This PR adds support for base-to-derived and derived-to-base casts on pointer-to-data-member values. Related to #973. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 52 +++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 27 ++++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 26 +++++++ .../Transforms/TargetLowering/CIRCXXABI.h | 12 +++ .../TargetLowering/ItaniumCXXABI.cpp | 46 +++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 74 +++++++++++++----- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 30 ++++++++ .../CodeGen/pointer-to-data-member-cast.cpp | 76 +++++++++++++++++++ 8 files changed, 323 insertions(+), 20 deletions(-) create mode 100644 clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 2c754843b404..1cfa199ef76b 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3279,6 +3279,58 @@ def DerivedClassAddrOp : CIR_Op<"derived_class_addr"> { let hasVerifier = 0; } +//===----------------------------------------------------------------------===// +// BaseDataMemberOp & DerivedDataMemberOp +//===----------------------------------------------------------------------===// + +def BaseDataMemberOp : CIR_Op<"base_data_member", [Pure]> { + let summary = + "Cast a derived class data member pointer to a base class data member " + "pointer"; + let description = [{ + The `cir.base_data_member` operation casts a data member pointer of type + `T Derived::*` to a data member pointer of type `T Base::*`, where `Base` + is an accessible non-ambiguous non-virtual base class of `Derived`. + + The `offset` parameter gives the offset in bytes of the `Base` base class + subobject within a `Derived` object. + }]; + + let arguments = (ins CIR_DataMemberType:$src, IndexAttr:$offset); + let results = (outs CIR_DataMemberType:$result); + + let assemblyFormat = [{ + `(` $src `:` qualified(type($src)) `)` + `[` $offset `]` `->` qualified(type($result)) attr-dict + }]; + + let hasVerifier = 1; +} + +def DerivedDataMemberOp : CIR_Op<"derived_data_member", [Pure]> { + let summary = + "Cast a base class data member pointer to a derived class data member " + "pointer"; + let description = [{ + The `cir.derived_data_member` operation casts a data member pointer of type + `T Base::*` to a data member pointer of type `T Derived::*`, where `Base` + is an accessible non-ambiguous non-virtual base class of `Derived`. + + The `offset` parameter gives the offset in bytes of the `Base` base class + subobject within a `Derived` object. + }]; + + let arguments = (ins CIR_DataMemberType:$src, IndexAttr:$offset); + let results = (outs CIR_DataMemberType:$result); + + let assemblyFormat = [{ + `(` $src `:` qualified(type($src)) `)` + `[` $offset `]` `->` qualified(type($result)) attr-dict + }]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // FuncOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 7d51dc05e7c1..2a5cfed9ff69 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1744,9 +1744,30 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_ReinterpretMemberPointer: llvm_unreachable("NYI"); case CK_BaseToDerivedMemberPointer: - llvm_unreachable("NYI"); - case CK_DerivedToBaseMemberPointer: - llvm_unreachable("NYI"); + case CK_DerivedToBaseMemberPointer: { + mlir::Value src = Visit(E); + + QualType derivedTy = + Kind == CK_DerivedToBaseMemberPointer ? E->getType() : CE->getType(); + const CXXRecordDecl *derivedClass = derivedTy->castAs() + ->getClass() + ->getAsCXXRecordDecl(); + CharUnits offset = CGF.CGM.computeNonVirtualBaseClassOffset( + derivedClass, CE->path_begin(), CE->path_end()); + + if (E->getType()->isMemberFunctionPointerType()) + llvm_unreachable("NYI"); + + mlir::Location loc = CGF.getLoc(E->getExprLoc()); + mlir::Type resultTy = CGF.getCIRType(DestTy); + mlir::IntegerAttr offsetAttr = Builder.getIndexAttr(offset.getQuantity()); + + if (Kind == CK_BaseToDerivedMemberPointer) + return Builder.create(loc, resultTy, src, + offsetAttr); + return Builder.create(loc, resultTy, src, + offsetAttr); + } case CK_ARCProduceObject: llvm_unreachable("NYI"); case CK_ARCConsumeObject: diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 0996338bb654..341533123497 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -799,6 +799,32 @@ LogicalResult cir::DynamicCastOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// BaseDataMemberOp & DerivedDataMemberOp +//===----------------------------------------------------------------------===// + +static LogicalResult verifyDataMemberCast(Operation *op, mlir::Value src, + mlir::Type resultTy) { + // Let the operand type be T1 C1::*, let the result type be T2 C2::*. + // Verify that T1 and T2 are the same type. + auto inputMemberTy = + mlir::cast(src.getType()).getMemberTy(); + auto resultMemberTy = mlir::cast(resultTy).getMemberTy(); + if (inputMemberTy != resultMemberTy) + return op->emitOpError() + << "member types of the operand and the result do not match"; + + return mlir::success(); +} + +LogicalResult cir::BaseDataMemberOp::verify() { + return verifyDataMemberCast(getOperation(), getSrc(), getType()); +} + +LogicalResult cir::DerivedDataMemberOp::verify() { + return verifyDataMemberCast(getOperation(), getSrc(), getType()); +} + //===----------------------------------------------------------------------===// // ComplexCreateOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h index 4c2f442721e8..830d5589fbe9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -85,6 +85,18 @@ class CIRCXXABI { lowerGetRuntimeMember(cir::GetRuntimeMemberOp op, mlir::Type loweredResultTy, mlir::Value loweredAddr, mlir::Value loweredMember, mlir::OpBuilder &builder) const = 0; + + /// Lower the given cir.base_data_member op to a sequence of more "primitive" + /// CIR operations that act on the ABI types. + virtual mlir::Value lowerBaseDataMember(cir::BaseDataMemberOp op, + mlir::Value loweredSrc, + mlir::OpBuilder &builder) const = 0; + + /// Lower the given cir.derived_data_member op to a sequence of more + /// "primitive" CIR operations that act on the ABI types. + virtual mlir::Value + lowerDerivedDataMember(cir::DerivedDataMemberOp op, mlir::Value loweredSrc, + mlir::OpBuilder &builder) const = 0; }; /// Creates an Itanium-family ABI. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index a87cdc01ea9d..0ba1e51232c2 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -65,6 +65,14 @@ class ItaniumCXXABI : public CIRCXXABI { lowerGetRuntimeMember(cir::GetRuntimeMemberOp op, mlir::Type loweredResultTy, mlir::Value loweredAddr, mlir::Value loweredMember, mlir::OpBuilder &builder) const override; + + mlir::Value lowerBaseDataMember(cir::BaseDataMemberOp op, + mlir::Value loweredSrc, + mlir::OpBuilder &builder) const override; + + mlir::Value lowerDerivedDataMember(cir::DerivedDataMemberOp op, + mlir::Value loweredSrc, + mlir::OpBuilder &builder) const override; }; } // namespace @@ -129,6 +137,44 @@ mlir::Operation *ItaniumCXXABI::lowerGetRuntimeMember( memberBytesPtr); } +static mlir::Value lowerDataMemberCast(mlir::Operation *op, + mlir::Value loweredSrc, + std::int64_t offset, + bool isDerivedToBase, + mlir::OpBuilder &builder) { + if (offset == 0) + return loweredSrc; + + auto nullValue = builder.create( + op->getLoc(), mlir::IntegerAttr::get(loweredSrc.getType(), -1)); + auto isNull = builder.create(op->getLoc(), cir::CmpOpKind::eq, + loweredSrc, nullValue); + + auto offsetValue = builder.create( + op->getLoc(), mlir::IntegerAttr::get(loweredSrc.getType(), offset)); + auto binOpKind = isDerivedToBase ? cir::BinOpKind::Sub : cir::BinOpKind::Add; + auto adjustedPtr = builder.create( + op->getLoc(), loweredSrc.getType(), binOpKind, loweredSrc, offsetValue); + + return builder.create(op->getLoc(), loweredSrc.getType(), + isNull, nullValue, adjustedPtr); +} + +mlir::Value ItaniumCXXABI::lowerBaseDataMember(cir::BaseDataMemberOp op, + mlir::Value loweredSrc, + mlir::OpBuilder &builder) const { + return lowerDataMemberCast(op, loweredSrc, op.getOffset().getSExtValue(), + /*isDerivedToBase=*/true, builder); +} + +mlir::Value +ItaniumCXXABI::lowerDerivedDataMember(cir::DerivedDataMemberOp op, + mlir::Value loweredSrc, + mlir::OpBuilder &builder) const { + return lowerDataMemberCast(op, loweredSrc, op.getOffset().getSExtValue(), + /*isDerivedToBase=*/false, builder); +} + CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { switch (LM.getCXXABIKind()) { // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 0affaa12a52e..abf8858ff681 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -914,6 +914,24 @@ mlir::LogicalResult CIRToLLVMDerivedClassAddrOpLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMBaseDataMemberOpLowering::matchAndRewrite( + cir::BaseDataMemberOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Value loweredResult = + lowerMod->getCXXABI().lowerBaseDataMember(op, adaptor.getSrc(), rewriter); + rewriter.replaceOp(op, loweredResult); + return mlir::success(); +} + +mlir::LogicalResult CIRToLLVMDerivedDataMemberOpLowering::matchAndRewrite( + cir::DerivedDataMemberOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Value loweredResult = lowerMod->getCXXABI().lowerDerivedDataMember( + op, adaptor.getSrc(), rewriter); + rewriter.replaceOp(op, loweredResult); + return mlir::success(); +} + static mlir::Value getValueForVTableSymbol(mlir::Operation *op, mlir::ConversionPatternRewriter &rewriter, @@ -1518,7 +1536,13 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( mlir::ConversionPatternRewriter &rewriter) const { mlir::Attribute attr = op.getValue(); - if (mlir::isa(op.getType())) { + if (mlir::isa(op.getType())) { + // Verified cir.const operations cannot actually be of these types, but the + // lowering pass may generate temporary cir.const operations with these + // types. This is OK since MLIR allows unverified operations to be alive + // during a pass as long as they don't live past the end of the pass. + attr = op.getValue(); + } else if (mlir::isa(op.getType())) { int value = (op.getValue() == cir::BoolAttr::get(getContext(), cir::BoolType::get(getContext()), true)); @@ -2412,11 +2436,12 @@ CIRToLLVMBinOpLowering::getIntOverflowFlag(cir::BinOp op) const { mlir::LogicalResult CIRToLLVMBinOpLowering::matchAndRewrite( cir::BinOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { - assert((op.getLhs().getType() == op.getRhs().getType()) && + assert((adaptor.getLhs().getType() == adaptor.getRhs().getType()) && "inconsistent operands' types not supported yet"); + mlir::Type type = op.getRhs().getType(); - assert((mlir::isa( - type)) && + assert((mlir::isa(type)) && "operand type not supported yet"); auto llvmTy = getTypeConverter()->convertType(op.getType()); @@ -2427,29 +2452,32 @@ mlir::LogicalResult CIRToLLVMBinOpLowering::matchAndRewrite( switch (op.getKind()) { case cir::BinOpKind::Add: - if (mlir::isa(type)) + if (mlir::isa(type)) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case cir::BinOpKind::Sub: - if (mlir::isa(type)) + if (mlir::isa(type)) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case cir::BinOpKind::Mul: - if (mlir::isa(type)) + if (mlir::isa(type)) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case cir::BinOpKind::Div: - if (auto ty = mlir::dyn_cast(type)) { - if (ty.isUnsigned()) + if (mlir::isa(type)) { + auto isUnsigned = mlir::isa(type) + ? mlir::cast(type).isUnsigned() + : mlir::cast(type).isUnsigned(); + if (isUnsigned) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); @@ -2457,8 +2485,11 @@ mlir::LogicalResult CIRToLLVMBinOpLowering::matchAndRewrite( rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); break; case cir::BinOpKind::Rem: - if (auto ty = mlir::dyn_cast(type)) { - if (ty.isUnsigned()) + if (mlir::isa(type)) { + auto isUnsigned = mlir::isa(type) + ? mlir::cast(type).isUnsigned() + : mlir::cast(type).isUnsigned(); + if (isUnsigned) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); else rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); @@ -2642,9 +2673,12 @@ mlir::LogicalResult CIRToLLVMCmpOpLowering::matchAndRewrite( mlir::Value llResult; // Lower to LLVM comparison op. - if (auto intTy = mlir::dyn_cast(type)) { - auto kind = - convertCmpKindToICmpPredicate(cmpOp.getKind(), intTy.isSigned()); + // if (auto intTy = mlir::dyn_cast(type)) { + if (mlir::isa(type)) { + auto isSigned = mlir::isa(type) + ? mlir::cast(type).isSigned() + : mlir::cast(type).isSigned(); + auto kind = convertCmpKindToICmpPredicate(cmpOp.getKind(), isSigned); llResult = rewriter.create( cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); } else if (auto ptrTy = mlir::dyn_cast(type)) { @@ -3847,9 +3881,15 @@ void populateCIRToLLVMConversionPatterns( patterns.add(converter, dataLayout, stringGlobalsMap, argStringGlobalsMap, argsVarMap, patterns.getContext()); - patterns.add( - converter, patterns.getContext(), lowerModule); + patterns.add< + // clang-format off + CIRToLLVMBaseDataMemberOpLowering, + CIRToLLVMConstantOpLowering, + CIRToLLVMDerivedDataMemberOpLowering, + CIRToLLVMGetRuntimeMemberOpLowering, + CIRToLLVMGlobalOpLowering + // clang-format on + >(converter, patterns.getContext(), lowerModule); patterns.add< // clang-format off CIRToLLVMAbsOpLowering, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 892f0d07dd6b..14d33404b466 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -165,6 +165,36 @@ class CIRToLLVMDerivedClassAddrOpLowering mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMBaseDataMemberOpLowering + : public mlir::OpConversionPattern { + cir::LowerModule *lowerMod; + +public: + CIRToLLVMBaseDataMemberOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + cir::LowerModule *lowerModule) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) {} + + mlir::LogicalResult + matchAndRewrite(cir::BaseDataMemberOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMDerivedDataMemberOpLowering + : public mlir::OpConversionPattern { + cir::LowerModule *lowerMod; + +public: + CIRToLLVMDerivedDataMemberOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + cir::LowerModule *lowerModule) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) {} + + mlir::LogicalResult + matchAndRewrite(cir::DerivedDataMemberOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + class CIRToLLVMVTTAddrPointOpLowering : public mlir::OpConversionPattern { public: diff --git a/clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp b/clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp new file mode 100644 index 000000000000..0127559bba65 --- /dev/null +++ b/clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp @@ -0,0 +1,76 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CIR %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s + +struct Base1 { + int base1_data; +}; + +struct Base2 { + int base2_data; +}; + +struct Derived : Base1, Base2 { + int derived_data; +}; + +// CIR-LABEL: @_Z15base_to_derivedM5Base2i +// LLVM-LABEL: @_Z15base_to_derivedM5Base2i +auto base_to_derived(int Base2::*ptr) -> int Derived::* { + return ptr; + // CIR: %{{.+}} = cir.derived_data_member(%{{.+}} : !cir.data_member) [4] -> !cir.data_member + + // LLVM: %[[#src:]] = load i64, ptr %{{.+}} + // LLVM-NEXT: %[[#is_null:]] = icmp eq i64 %[[#src]], -1 + // LLVM-NEXT: %[[#is_null_bool:]] = zext i1 %[[#is_null]] to i8 + // LLVM-NEXT: %[[#adjusted:]] = add i64 %[[#src]], 4 + // LLVM-NEXT: %[[#cond:]] = trunc i8 %[[#is_null_bool]] to i1 + // LLVM-NEXT: %{{.+}} = select i1 %[[#cond]], i64 -1, i64 %[[#adjusted]] +} + +// CIR-LABEL: @_Z15derived_to_baseM7Derivedi +// LLVM-LABEL: @_Z15derived_to_baseM7Derivedi +auto derived_to_base(int Derived::*ptr) -> int Base2::* { + return static_cast(ptr); + // CIR: %{{.+}} = cir.base_data_member(%{{.+}} : !cir.data_member) [4] -> !cir.data_member + + // LLVM: %[[#src:]] = load i64, ptr %{{.+}} + // LLVM-NEXT: %[[#is_null:]] = icmp eq i64 %[[#src]], -1 + // LLVM-NEXT: %[[#is_null_bool:]] = zext i1 %[[#is_null]] to i8 + // LLVM-NEXT: %[[#adjusted:]] = sub i64 %[[#src]], 4 + // LLVM-NEXT: %[[#cond:]] = trunc i8 %[[#is_null_bool]] to i1 + // LLVM-NEXT: %9 = select i1 %[[#cond]], i64 -1, i64 %[[#adjusted]] +} + +// CIR-LABEL: @_Z27base_to_derived_zero_offsetM5Base1i +// LLVM-LABEL: @_Z27base_to_derived_zero_offsetM5Base1i +auto base_to_derived_zero_offset(int Base1::*ptr) -> int Derived::* { + return ptr; + // CIR: %{{.+}} = cir.derived_data_member(%{{.+}} : !cir.data_member) [0] -> !cir.data_member + + // No LLVM instructions emitted for performing a zero-offset cast. + // LLVM-NEXT: %[[#src_slot:]] = alloca i64, i64 1 + // LLVM-NEXT: %[[#ret_slot:]] = alloca i64, i64 1 + // LLVM-NEXT: store i64 %{{.+}}, ptr %[[#src_slot]] + // LLVM-NEXT: %[[#temp:]] = load i64, ptr %[[#src_slot]] + // LLVM-NEXT: store i64 %[[#temp]], ptr %[[#ret_slot]] + // LLVM-NEXT: %[[#ret:]] = load i64, ptr %[[#ret_slot]] + // LLVM-NEXT: ret i64 %[[#ret]] +} + +// CIR-LABEL: @_Z27derived_to_base_zero_offsetM7Derivedi +// LLVM-LABEL: @_Z27derived_to_base_zero_offsetM7Derivedi +auto derived_to_base_zero_offset(int Derived::*ptr) -> int Base1::* { + return static_cast(ptr); + // CIR: %{{.+}} = cir.base_data_member(%{{.+}} : !cir.data_member) [0] -> !cir.data_member + + // No LLVM instructions emitted for performing a zero-offset cast. + // LLVM-NEXT: %[[#src_slot:]] = alloca i64, i64 1 + // LLVM-NEXT: %[[#ret_slot:]] = alloca i64, i64 1 + // LLVM-NEXT: store i64 %{{.+}}, ptr %[[#src_slot]] + // LLVM-NEXT: %[[#temp:]] = load i64, ptr %[[#src_slot]] + // LLVM-NEXT: store i64 %[[#temp]], ptr %[[#ret_slot]] + // LLVM-NEXT: %[[#ret:]] = load i64, ptr %[[#ret_slot]] + // LLVM-NEXT: ret i64 %[[#ret]] +} From 3892a4c5f6c53ba6da62674aa99c642bda7a0c25 Mon Sep 17 00:00:00 2001 From: Guojin Date: Tue, 3 Dec 2024 13:38:25 -0500 Subject: [PATCH 2139/2301] [CIR][CodeGen][LowerToLLVM] Fix llvm lowering of CIR `UnaryOpKind_Not` (#1194) Basically, for int type, the order of Ops is not the same as OG in the emitted LLVM IR. OG has constant as the second op position. See [OG's order ](https://godbolt.org/z/584jrWeYn). --- .../lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 14 +++++++------- clang/test/CIR/CodeGen/vectype-ext.cpp | 2 +- clang/test/CIR/Lowering/unary-not.cir | 2 +- clang/test/CIR/Lowering/vectype.cpp | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index abf8858ff681..2fafc9f08f8a 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2326,7 +2326,7 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite( } case cir::UnaryOpKind::Not: { // bit-wise compliment operator, implemented as an XOR with -1. - mlir::Value MinusOne; + mlir::Value minusOne; if (IsVector) { // Creating a vector object with all -1 values is easier said than // done. It requires a series of insertelement ops. @@ -2334,20 +2334,20 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite( getTypeConverter()->convertType(elementType); auto MinusOneInt = rewriter.create( loc, llvmElementType, mlir::IntegerAttr::get(llvmElementType, -1)); - MinusOne = rewriter.create(loc, llvmType); + minusOne = rewriter.create(loc, llvmType); auto NumElements = mlir::dyn_cast(type).getSize(); for (uint64_t i = 0; i < NumElements; ++i) { mlir::Value indexValue = rewriter.create( loc, rewriter.getI64Type(), i); - MinusOne = rewriter.create( - loc, MinusOne, MinusOneInt, indexValue); + minusOne = rewriter.create( + loc, minusOne, MinusOneInt, indexValue); } } else { - MinusOne = rewriter.create( + minusOne = rewriter.create( loc, llvmType, mlir::IntegerAttr::get(llvmType, -1)); } - rewriter.replaceOpWithNewOp(op, llvmType, MinusOne, - adaptor.getInput()); + rewriter.replaceOpWithNewOp( + op, llvmType, adaptor.getInput(), minusOne); return mlir::success(); } } diff --git a/clang/test/CIR/CodeGen/vectype-ext.cpp b/clang/test/CIR/CodeGen/vectype-ext.cpp index b5e64499d7b4..e4cc9927894f 100644 --- a/clang/test/CIR/CodeGen/vectype-ext.cpp +++ b/clang/test/CIR/CodeGen/vectype-ext.cpp @@ -135,7 +135,7 @@ void vector_int_test(int x) { vi4 n = ~a; // CIR: %{{[0-9]+}} = cir.unary(not, %{{[0-9]+}}) : !cir.vector, !cir.vector // LLVM: %[[#VAL:]] = load <4 x i32>, ptr %{{[0-9]+}}, align 16 - // LLVM-NEXT: %[[#RES:]] = xor <4 x i32> splat (i32 -1), %[[#VAL]] + // LLVM-NEXT: %[[#RES:]] = xor <4 x i32> %[[#VAL]], splat (i32 -1) // LLVM-NEXT: store <4 x i32> %[[#RES]], ptr %{{[0-9]+}}, align 16 // TODO: Ternary conditional operator diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index 4d686f3875af..86a7405bd0ee 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -18,7 +18,7 @@ module { // MLIR: = llvm.mlir.constant(-1 : i32) // MLIR: = llvm.xor -// LLVM: = xor i32 -1, %[[#]] +// LLVM: = xor i32 %[[#]], -1 cir.func @floatingPoint(%arg0: !cir.float, %arg1: !cir.double) { diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index ad8472abea01..c457500694ce 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -138,7 +138,7 @@ void vector_int_test(int x) { // CHECK: %[[#T101:]] = llvm.insertelement %[[#T94]], %[[#T99]][%[[#T100]] : i64] : vector<4xi32> // CHECK: %[[#T102:]] = llvm.mlir.constant(3 : i64) : i64 // CHECK: %[[#T103:]] = llvm.insertelement %[[#T94]], %[[#T101]][%[[#T102]] : i64] : vector<4xi32> - // CHECK: %[[#T104:]] = llvm.xor %[[#T103]], %[[#T93]] : vector<4xi32> + // CHECK: %[[#T104:]] = llvm.xor %[[#T93]], %[[#T103]] : vector<4xi32> // CHECK: llvm.store %[[#T104]], %[[#T29:]] {alignment = 16 : i64} : vector<4xi32>, !llvm.ptr // Ternary conditional operator From db6c436731766f7a2450cfc5ac5247b5ab013d07 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Tue, 3 Dec 2024 11:10:56 -0800 Subject: [PATCH 2140/2301] [CIR][CIRGen] Support emitting memcpys for fields (#1195) Default assignment operator generation was failing because of memcpy generation for fields being unsupported. Implement it following CodeGen's example, as usual. Follow-ups will avoid emitting memcpys for fields of trivial class types, and extend this to copy constructors as well. Fixes https://github.com/llvm/clangir/issues/1128 --- clang/include/clang/CIR/MissingFeatures.h | 1 + clang/lib/CIR/CodeGen/CIRGenClass.cpp | 59 +++++++++++--- clang/test/CIR/CodeGen/assign-operator.cpp | 92 ++++++++++++++++++++++ 3 files changed, 139 insertions(+), 13 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 886fc2426048..c248f17e0263 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -174,6 +174,7 @@ struct MissingFeatures { // ABIInfo queries. static bool useTargetLoweringABIInfo() { return false; } + static bool isEmptyFieldForLayout() { return false; } // Misc static bool cacheRecordLayouts() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 59c8adcbc5ed..4ae628cc621c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -98,15 +98,13 @@ class CopyingValueRepresentation { class FieldMemcpyizer { public: - FieldMemcpyizer(CIRGenFunction &CGF, const CXXRecordDecl *ClassDecl, + FieldMemcpyizer(CIRGenFunction &CGF, const CXXMethodDecl *MethodDecl, const VarDecl *SrcRec) - : CGF(CGF), ClassDecl(ClassDecl), - // SrcRec(SrcRec), + : CGF(CGF), MethodDecl(MethodDecl), ClassDecl(MethodDecl->getParent()), + SrcRec(SrcRec), RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0), - LastFieldOffset(0), LastAddedFieldIndex(0) { - (void)SrcRec; - } + LastFieldOffset(0), LastAddedFieldIndex(0) {} bool isMemcpyableField(FieldDecl *F) const { // Never memcpy fields when we are adding poised paddings. @@ -115,11 +113,11 @@ class FieldMemcpyizer { Qualifiers Qual = F->getType().getQualifiers(); if (Qual.hasVolatile() || Qual.hasObjCLifetime()) return false; - return true; } void addMemcpyableField(FieldDecl *F) { + assert(!cir::MissingFeatures::isEmptyFieldForLayout()); if (F->isZeroSize(CGF.getContext())) return; if (!FirstField) @@ -148,18 +146,54 @@ class FieldMemcpyizer { return; } - llvm_unreachable("NYI"); + uint64_t firstByteOffset; + if (FirstField->isBitField()) { + const CIRGenRecordLayout &rl = + CGF.getTypes().getCIRGenRecordLayout(FirstField->getParent()); + const CIRGenBitFieldInfo &bfInfo = rl.getBitFieldInfo(FirstField); + // FirstFieldOffset is not appropriate for bitfields, + // we need to use the storage offset instead. + firstByteOffset = CGF.getContext().toBits(bfInfo.StorageOffset); + } else { + firstByteOffset = FirstFieldOffset; + } + + CharUnits memcpySize = getMemcpySize(firstByteOffset); + QualType recordTy = CGF.getContext().getTypeDeclType(ClassDecl); + Address thisPtr = CGF.LoadCXXThisAddress(); + LValue destLv = CGF.makeAddrLValue(thisPtr, recordTy); + LValue dest = CGF.emitLValueForFieldInitialization(destLv, FirstField, + FirstField->getName()); + cir::LoadOp srcPtr = CGF.getBuilder().createLoad( + CGF.getLoc(MethodDecl->getLocation()), CGF.GetAddrOfLocalVar(SrcRec)); + LValue srcLv = CGF.MakeNaturalAlignAddrLValue(srcPtr, recordTy); + LValue src = CGF.emitLValueForFieldInitialization(srcLv, FirstField, + FirstField->getName()); + + emitMemcpyIR(dest.isBitField() ? dest.getBitFieldAddress() + : dest.getAddress(), + src.isBitField() ? src.getBitFieldAddress() : src.getAddress(), + memcpySize); + reset(); } void reset() { FirstField = nullptr; } protected: CIRGenFunction &CGF; + const CXXMethodDecl *MethodDecl; const CXXRecordDecl *ClassDecl; private: void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) { - llvm_unreachable("NYI"); + mlir::Location loc = CGF.getLoc(MethodDecl->getLocation()); + cir::ConstantOp sizeOp = + CGF.getBuilder().getConstInt(loc, CGF.SizeTy, Size.getQuantity()); + mlir::Value dest = + CGF.getBuilder().createBitcast(DestPtr.getPointer(), CGF.VoidPtrTy); + mlir::Value src = + CGF.getBuilder().createBitcast(SrcPtr.getPointer(), CGF.VoidPtrTy); + CGF.getBuilder().createMemCpy(loc, dest, src, sizeOp); } void addInitialField(FieldDecl *F) { @@ -192,7 +226,7 @@ class FieldMemcpyizer { } } - // const VarDecl *SrcRec; + const VarDecl *SrcRec; const ASTRecordLayout &RecLayout; FieldDecl *FirstField; FieldDecl *LastField; @@ -307,8 +341,7 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { public: ConstructorMemcpyizer(CIRGenFunction &CGF, const CXXConstructorDecl *CD, FunctionArgList &Args) - : FieldMemcpyizer(CGF, CD->getParent(), - getTrivialCopySource(CGF, CD, Args)), + : FieldMemcpyizer(CGF, CD, getTrivialCopySource(CGF, CD, Args)), ConstructorDecl(CD), MemcpyableCtor(CD->isDefaulted() && CD->isCopyOrMoveConstructor() && CGF.getLangOpts().getGC() == LangOptions::NonGC), @@ -446,7 +479,7 @@ class AssignmentMemcpyizer : public FieldMemcpyizer { public: AssignmentMemcpyizer(CIRGenFunction &CGF, const CXXMethodDecl *AD, FunctionArgList &Args) - : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), + : FieldMemcpyizer(CGF, AD, Args[Args.size() - 1]), AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { assert(Args.size() == 2); } diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 63fc25c5817f..55118f222c7f 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -99,3 +99,95 @@ int main() { // CHECK: %2 = cir.load %0 : !cir.ptr, !s32i // CHECK: cir.return %2 : !s32i // CHECK: } + +struct HasNonTrivialAssignOp { + HasNonTrivialAssignOp &operator=(const HasNonTrivialAssignOp &); +}; + +struct ContainsNonTrivial { + HasNonTrivialAssignOp start; + int i; + int *j; + HasNonTrivialAssignOp middle; + int k : 4; + int l : 4; + int m : 4; + HasNonTrivialAssignOp end; + ContainsNonTrivial &operator=(const ContainsNonTrivial &); +}; + +// CHECK-LABEL: cir.func @_ZN18ContainsNonTrivialaSERKS_( +// CHECK-NEXT: %[[#THIS:]] = cir.alloca !cir.ptr +// CHECK-NEXT: %[[#OTHER:]] = cir.alloca !cir.ptr +// CHECK-NEXT: %[[#RETVAL:]] = cir.alloca !cir.ptr +// CHECK-NEXT: cir.store %arg0, %[[#THIS]] +// CHECK-NEXT: cir.store %arg1, %[[#OTHER]] +// CHECK-NEXT: %[[#THIS_LOAD:]] = cir.load deref %[[#THIS]] +// CHECK-NEXT: %[[#THIS_START:]] = cir.get_member %[[#THIS_LOAD]][0] {name = "start"} +// CHECK-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER]] +// CHECK-NEXT: %[[#OTHER_START:]] = cir.get_member %[[#OTHER_LOAD]][0] {name = "start"} +// CHECK-NEXT: cir.call @_ZN21HasNonTrivialAssignOpaSERKS_(%[[#THIS_START]], %[[#OTHER_START]]) +// CHECK-NEXT: %[[#THIS_I:]] = cir.get_member %[[#THIS_LOAD]][2] {name = "i"} +// CHECK-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER]] +// CHECK-NEXT: %[[#OTHER_I:]] = cir.get_member %[[#OTHER_LOAD]][2] {name = "i"} +// CHECK-NEXT: %[[#MEMCPY_SIZE:]] = cir.const #cir.int<12> : !u64i +// CHECK-NEXT: %[[#THIS_I_CAST:]] = cir.cast(bitcast, %[[#THIS_I]] : !cir.ptr), !cir.ptr +// CHECK-NEXT: %[[#OTHER_I_CAST:]] = cir.cast(bitcast, %[[#OTHER_I]] : !cir.ptr), !cir.ptr +// CHECK-NEXT: cir.libc.memcpy %[[#MEMCPY_SIZE]] bytes from %[[#OTHER_I_CAST]] to %[[#THIS_I_CAST]] +// CHECK-NEXT: %[[#THIS_MIDDLE:]] = cir.get_member %[[#THIS_LOAD]][4] {name = "middle"} +// CHECK-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER]] +// CHECK-NEXT: %[[#OTHER_MIDDLE:]] = cir.get_member %[[#OTHER_LOAD]][4] {name = "middle"} +// CHECK-NEXT: cir.call @_ZN21HasNonTrivialAssignOpaSERKS_(%[[#THIS_MIDDLE]], %[[#OTHER_MIDDLE]]) +// CHECK-NEXT: %[[#THIS_K:]] = cir.get_member %[[#THIS_LOAD]][5] {name = "k"} +// CHECK-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER]] +// CHECK-NEXT: %[[#OTHER_K:]] = cir.get_member %[[#OTHER_LOAD]][5] {name = "k"} +// CHECK-NEXT: %[[#MEMCPY_SIZE:]] = cir.const #cir.int<2> : !u64i +// CHECK-NEXT: %[[#THIS_K_CAST:]] = cir.cast(bitcast, %[[#THIS_K]] : !cir.ptr), !cir.ptr +// CHECK-NEXT: %[[#OTHER_K_CAST:]] = cir.cast(bitcast, %[[#OTHER_K]] : !cir.ptr), !cir.ptr +// CHECK-NEXT: cir.libc.memcpy %[[#MEMCPY_SIZE]] bytes from %[[#OTHER_K_CAST]] to %[[#THIS_K_CAST]] +// CHECK-NEXT: %[[#THIS_END:]] = cir.get_member %[[#THIS_LOAD]][6] {name = "end"} +// CHECK-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER]] +// CHECK-NEXT: %[[#OTHER_END:]] = cir.get_member %[[#OTHER_LOAD]][6] {name = "end"} +// CHECK-NEXT: cir.call @_ZN21HasNonTrivialAssignOpaSERKS_(%[[#THIS_END]], %[[#OTHER_END]]) +// CHECK-NEXT: cir.store %[[#THIS_LOAD]], %[[#RETVAL]] +// CHECK-NEXT: %[[#RETVAL_LOAD:]] = cir.load %[[#RETVAL]] +// CHECK-NEXT: cir.return %[[#RETVAL_LOAD]] +// CHECK-NEXT: } +ContainsNonTrivial & +ContainsNonTrivial::operator=(const ContainsNonTrivial &) = default; + +struct Trivial { + int i; + int *j; + double k; + int l[3]; +}; + +// CHECK-LABEL: cir.func linkonce_odr @_ZN7TrivialaSERKS_( +// CHECK-NEXT: %[[#THIS:]] = cir.alloca !cir.ptr +// CHECK-NEXT: %[[#OTHER:]] = cir.alloca !cir.ptr +// CHECK-NEXT: %[[#RETVAL:]] = cir.alloca !cir.ptr +// CHECK-NEXT: cir.store %arg0, %[[#THIS]] +// CHECK-NEXT: cir.store %arg1, %[[#OTHER]] +// CHECK-NEXT: %[[#THIS_LOAD:]] = cir.load deref %[[#THIS]] +// CHECK-NEXT: %[[#THIS_I:]] = cir.get_member %[[#THIS_LOAD]][0] {name = "i"} +// CHECK-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER]] +// CHECK-NEXT: %[[#OTHER_I:]] = cir.get_member %[[#OTHER_LOAD]][0] {name = "i"} +// Note that tail padding bytes are not included. +// CHECK-NEXT: %[[#MEMCPY_SIZE:]] = cir.const #cir.int<36> : !u64i +// CHECK-NEXT: %[[#THIS_I_CAST:]] = cir.cast(bitcast, %[[#THIS_I]] : !cir.ptr), !cir.ptr +// CHECK-NEXT: %[[#OTHER_I_CAST:]] = cir.cast(bitcast, %[[#OTHER_I]] : !cir.ptr), !cir.ptr +// CHECK-NEXT: cir.libc.memcpy %[[#MEMCPY_SIZE]] bytes from %[[#OTHER_I_CAST]] to %[[#THIS_I_CAST]] +// CHECK-NEXT: cir.store %[[#THIS_LOAD]], %[[#RETVAL]] +// CHECK-NEXT: cir.br ^bb1 +// CHECK-NEXT: ^bb1: +// CHECK-NEXT: %[[#RETVAL_LOAD:]] = cir.load %[[#RETVAL]] +// CHECK-NEXT: cir.return %[[#RETVAL_LOAD]] +// CHECK-NEXT: } + +// We should explicitly call operator= even for trivial types. +// CHECK-LABEL: cir.func @_Z11copyTrivialR7TrivialS0_( +// CHECK: cir.call @_ZN7TrivialaSERKS_( +void copyTrivial(Trivial &a, Trivial &b) { + a = b; +} From 4c4ffecd5ee0356ac15edb37ab6a7e082172d3c8 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Tue, 3 Dec 2024 11:14:46 -0800 Subject: [PATCH 2141/2301] [CIR][CIRGen] Call trivial assignment operators in more cases (#1196) Our previous logic here was matching CodeGen, which folds trivial assignment operator calls into memcpys, but we want to avoid that. Note that we still end up emitting memcpys for arrays of classes with trivial assignment operators; https://github.com/llvm/clangir/issues/1177 tracks fixing that. --- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 17 +++--------- clang/test/CIR/CodeGen/assign-operator.cpp | 30 ++++++++++++++++++++++ 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 4ae628cc621c..9fceab1c970e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -429,20 +429,11 @@ class AssignmentMemcpyizer : public FieldMemcpyizer { } return nullptr; } else if (CXXMemberCallExpr *MCE = dyn_cast(S)) { - CXXMethodDecl *MD = dyn_cast(MCE->getCalleeDecl()); - if (!(MD && isMemcpyEquivalentSpecialMember(MD))) - return nullptr; - MemberExpr *IOA = dyn_cast(MCE->getImplicitObjectArgument()); - if (!IOA) - return nullptr; - FieldDecl *Field = dyn_cast(IOA->getMemberDecl()); - if (!Field || !isMemcpyableField(Field)) - return nullptr; - MemberExpr *Arg0 = dyn_cast(MCE->getArg(0)); - if (!Arg0 || Field != dyn_cast(Arg0->getMemberDecl())) - return nullptr; - return Field; + // We want to represent all calls explicitly for analysis purposes. + return nullptr; } else if (CallExpr *CE = dyn_cast(S)) { + // TODO(cir): https://github.com/llvm/clangir/issues/1177: This can result + // in memcpys instead of calls to trivial member functions. FunctionDecl *FD = dyn_cast(CE->getCalleeDecl()); if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) return nullptr; diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 55118f222c7f..e814a649c728 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -191,3 +191,33 @@ struct Trivial { void copyTrivial(Trivial &a, Trivial &b) { a = b; } + +struct ContainsTrivial { + Trivial t1; + Trivial t2; + ContainsTrivial &operator=(const ContainsTrivial &); +}; + +// We should explicitly call operator= even for trivial types. +// CHECK-LABEL: cir.func @_ZN15ContainsTrivialaSERKS_( +// CHECK: cir.call @_ZN7TrivialaSERKS_( +// CHECK: cir.call @_ZN7TrivialaSERKS_( +ContainsTrivial &ContainsTrivial::operator=(const ContainsTrivial &) = default; + +struct ContainsTrivialArray { + Trivial arr[2]; + ContainsTrivialArray &operator=(const ContainsTrivialArray &); +}; + +// We should be calling operator= here but don't currently. +// CHECK-LABEL: cir.func @_ZN20ContainsTrivialArrayaSERKS_( +// CHECK: %[[#THIS_LOAD:]] = cir.load deref %[[#]] +// CHECK-NEXT: %[[#THIS_ARR:]] = cir.get_member %[[#THIS_LOAD]][0] {name = "arr"} +// CHECK-NEXT: %[[#THIS_ARR_CAST:]] = cir.cast(bitcast, %[[#THIS_ARR]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#]] +// CHECK-NEXT: %[[#OTHER_ARR:]] = cir.get_member %[[#OTHER_LOAD]][0] {name = "arr"} +// CHECK-NEXT: %[[#OTHER_ARR_CAST:]] = cir.cast(bitcast, %[[#OTHER_ARR]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[#MEMCPY_SIZE:]] = cir.const #cir.int<80> : !u64i +// CHECK-NEXT: cir.libc.memcpy %[[#MEMCPY_SIZE]] bytes from %[[#OTHER_ARR_CAST]] to %[[#THIS_ARR_CAST]] +ContainsTrivialArray & +ContainsTrivialArray::operator=(const ContainsTrivialArray &) = default; From 8a79b2672be713fbce79f963a5a62f15dce9a2ea Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Tue, 3 Dec 2024 11:16:02 -0800 Subject: [PATCH 2142/2301] [CIR][CIRGen] Emit memcpys for copy constructors (#1197) CodeGen does so for trivial record types as well as non-record types; we only do it for non-record types. --- clang/include/clang/CIR/MissingFeatures.h | 1 - clang/lib/CIR/CodeGen/CIRGenClass.cpp | 44 ++++++++++------- clang/test/CIR/CodeGen/copy-constructor.cpp | 52 +++++++++++++++++++++ 3 files changed, 79 insertions(+), 18 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index c248f17e0263..57471c4dc9bb 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -141,7 +141,6 @@ struct MissingFeatures { static bool shouldSplitConstantStore() { return false; } static bool shouldCreateMemCpyFromGlobal() { return false; } static bool shouldReverseUnaryCondOnBoolExpr() { return false; } - static bool fieldMemcpyizerBuildMemcpy() { return false; } static bool isTrivialCtorOrDtor() { return false; } static bool isMemcpyEquivalentSpecialMember() { return false; } static bool constructABIArgDirectExtend() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 9fceab1c970e..e965d2f777ed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -333,9 +333,23 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { if (!MemcpyableCtor) return false; + FieldDecl *field = MemberInit->getMember(); + assert(field && "No field for member init."); + QualType fieldType = field->getType(); + CXXConstructExpr *ce = dyn_cast(MemberInit->getInit()); + + // Bail out on any members of record type (unlike CodeGen, which emits a + // memcpy for trivially-copyable record types). + if (ce || (fieldType->isArrayType() && + CGF.getContext().getBaseElementType(fieldType)->isRecordType())) + return false; - assert(!cir::MissingFeatures::fieldMemcpyizerBuildMemcpy()); - return false; + // Bail out on volatile fields. + if (!isMemcpyableField(field)) + return false; + + // Otherwise we're good. + return true; } public: @@ -363,7 +377,10 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { // This memcpy is too small to be worthwhile. Fall back on default // codegen. if (!AggregatedInits.empty()) { - llvm_unreachable("NYI"); + CopyingValueRepresentation cvr(CGF); + emitMemberInitializer(CGF, ConstructorDecl->getParent(), + AggregatedInits[0], ConstructorDecl, Args); + AggregatedInits.clear(); } reset(); return; @@ -375,21 +392,14 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { } void pushEHDestructors() { - Address ThisPtr = CGF.LoadCXXThisAddress(); - QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); - LValue LHS = CGF.makeAddrLValue(ThisPtr, RecordTy); - (void)LHS; - - for (unsigned i = 0; i < AggregatedInits.size(); ++i) { - CXXCtorInitializer *MemberInit = AggregatedInits[i]; - QualType FieldType = MemberInit->getAnyMember()->getType(); - QualType::DestructionKind dtorKind = FieldType.isDestructedType(); - if (!CGF.needsEHCleanup(dtorKind)) - continue; - LValue FieldLHS = LHS; - emitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); - CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType); +#ifndef NDEBUG + for (CXXCtorInitializer *memberInit : AggregatedInits) { + QualType fieldType = memberInit->getAnyMember()->getType(); + QualType::DestructionKind dtorKind = fieldType.isDestructedType(); + assert(!CGF.needsEHCleanup(dtorKind) && + "Non-record types shouldn't need EH cleanup"); } +#endif } void finish() { emitAggregatedInits(); } diff --git a/clang/test/CIR/CodeGen/copy-constructor.cpp b/clang/test/CIR/CodeGen/copy-constructor.cpp index 92e0887b02ef..8c6475d642e1 100644 --- a/clang/test/CIR/CodeGen/copy-constructor.cpp +++ b/clang/test/CIR/CodeGen/copy-constructor.cpp @@ -33,3 +33,55 @@ struct HasScalarArrayMember { // LLVM-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %[[#THIS_ARR]], ptr %[[#OTHER_ARR]], i32 16, i1 false) // LLVM-NEXT: ret void HasScalarArrayMember::HasScalarArrayMember(const HasScalarArrayMember &) = default; + +struct Trivial { int *i; }; +struct ManyMembers { + int i; + int j; + Trivial k; + int l[1]; + int m[2]; + Trivial n; + int &o; + int *p; +}; + +// CIR-LABEL: cir.func linkonce_odr @_ZN11ManyMembersC2ERKS_( +// CIR: %[[#THIS_LOAD:]] = cir.load %[[#]] +// CIR-NEXT: %[[#THIS_I:]] = cir.get_member %[[#THIS_LOAD]][0] {name = "i"} +// CIR-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER:]] +// CIR-NEXT: %[[#OTHER_I:]] = cir.get_member %[[#OTHER_LOAD]][0] {name = "i"} +// CIR-NEXT: %[[#MEMCPY_SIZE:]] = cir.const #cir.int<8> +// CIR-NEXT: %[[#THIS_I_CAST:]] = cir.cast(bitcast, %[[#THIS_I]] : !cir.ptr), !cir.ptr +// CIR-NEXT: %[[#OTHER_I_CAST:]] = cir.cast(bitcast, %[[#OTHER_I]] : !cir.ptr), !cir.ptr +// CIR-NEXT: cir.libc.memcpy %[[#MEMCPY_SIZE]] bytes from %[[#OTHER_I_CAST]] to %[[#THIS_I_CAST]] +// CIR-NEXT: %[[#THIS_K:]] = cir.get_member %[[#THIS_LOAD]][2] {name = "k"} +// CIR-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER]] +// CIR-NEXT: %[[#OTHER_K:]] = cir.get_member %[[#OTHER_LOAD]][2] {name = "k"} +// CIR-NEXT: cir.call @_ZN7TrivialC1ERKS_(%[[#THIS_K]], %[[#OTHER_K]]) +// CIR-NEXT: %[[#THIS_L:]] = cir.get_member %[[#THIS_LOAD]][3] {name = "l"} +// CIR-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER]] +// CIR-NEXT: %[[#OTHER_L:]] = cir.get_member %[[#OTHER_LOAD]][3] {name = "l"} +// CIR-NEXT: %[[#MEMCPY_SIZE:]] = cir.const #cir.int<12> +// CIR-NEXT: %[[#THIS_L_CAST:]] = cir.cast(bitcast, %[[#THIS_L]] : !cir.ptr>), !cir.ptr +// CIR-NEXT: %[[#OTHER_L_CAST:]] = cir.cast(bitcast, %[[#OTHER_L]] : !cir.ptr>), !cir.ptr +// CIR-NEXT: cir.libc.memcpy %[[#MEMCPY_SIZE]] bytes from %[[#OTHER_L_CAST]] to %[[#THIS_L_CAST]] +// CIR-NEXT: %[[#THIS_N:]] = cir.get_member %[[#THIS_LOAD]][5] {name = "n"} +// CIR-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER]] +// CIR-NEXT: %[[#OTHER_N:]] = cir.get_member %[[#OTHER_LOAD]][5] {name = "n"} +// CIR-NEXT: cir.call @_ZN7TrivialC1ERKS_(%[[#THIS_N]], %[[#OTHER_N]]) +// CIR-NEXT: %[[#THIS_O:]] = cir.get_member %[[#THIS_LOAD]][6] {name = "o"} +// CIR-NEXT: %[[#OTHER_LOAD:]] = cir.load %[[#OTHER]] +// CIR-NEXT: %[[#OTHER_O:]] = cir.get_member %[[#OTHER_LOAD]][6] {name = "o"} +// CIR-NEXT: %[[#MEMCPY_SIZE:]] = cir.const #cir.int<16> +// CIR-NEXT: %[[#THIS_O_CAST:]] = cir.cast(bitcast, %[[#THIS_O]] : !cir.ptr>), !cir.ptr +// CIR-NEXT: %[[#OTHER_O_CAST:]] = cir.cast(bitcast, %[[#OTHER_O]] : !cir.ptr>), !cir.ptr +// CIR-NEXT: cir.libc.memcpy %[[#MEMCPY_SIZE]] bytes from %[[#OTHER_O_CAST]] to %[[#THIS_O_CAST]] +// CIR-NEXT: cir.return +// CIR-NEXT: } + +// CIR-LABEL: cir.func @_Z6doCopyR11ManyMembers( +// CIR: cir.call @_ZN11ManyMembersC1ERKS_( +ManyMembers doCopy(ManyMembers &src) { + return src; +} From 495ffd454edce5790f3996cf9bcfe4eb911f9ae9 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Tue, 3 Dec 2024 11:17:07 -0800 Subject: [PATCH 2143/2301] [CIR][CIRGen] Remove -clangir-disable-emit-cxx-default (#1198) This is a leftover from when ClangIR was initially focused on analysis and could ignore default method generation. We now handle default methods and should generate them in all cases. This fixes several bugs: - Default methods weren't emitted when emitting LLVM, only CIR. - Default methods only referenced by other default methods weren't emitted. --- clang/include/clang/CIR/CIRGenerator.h | 1 - clang/include/clang/Driver/Options.td | 4 ---- .../include/clang/Frontend/FrontendOptions.h | 10 +++----- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 12 +--------- clang/lib/CIR/CodeGen/CIRGenModule.h | 11 --------- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 2 -- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 4 ---- clang/lib/Frontend/CompilerInvocation.cpp | 3 --- clang/test/CIR/CodeGen/assign-operator.cpp | 11 --------- clang/test/CIR/CodeGen/coro-task.cpp | 2 +- clang/test/CIR/CodeGen/default-methods.cpp | 24 +++++++++++++++++++ clang/test/CIR/CodeGen/delete.cpp | 4 ++-- clang/test/CIR/CodeGen/derived-to-base.cpp | 2 +- clang/test/CIR/CodeGen/dtors-scopes.cpp | 4 ++-- clang/test/CIR/CodeGen/dtors.cpp | 2 +- clang/test/CIR/CodeGen/libcall.cpp | 2 +- clang/test/CIR/CodeGen/move.cpp | 2 +- clang/test/CIR/CodeGen/new.cpp | 4 ++-- clang/test/CIR/CodeGen/nrvo.cpp | 2 +- clang/test/CIR/CodeGen/rangefor.cpp | 2 +- clang/test/CIR/CodeGen/std-array.cpp | 4 ++-- clang/test/CIR/CodeGen/std-find.cpp | 4 ++-- clang/test/CIR/CodeGen/vector.cpp | 4 ++-- clang/test/CIR/CodeGen/vtable-rtti.cpp | 4 ++-- clang/test/CIR/Transforms/lib-opt-find.cpp | 2 +- .../CIR/Transforms/lifetime-check-agg.cpp | 2 +- .../CIR/Transforms/lifetime-check-owner.cpp | 6 ++--- .../lifetime-check-range-for-vector.cpp | 4 ++-- .../CIR/Transforms/lifetime-check-string.cpp | 8 +++---- .../test/CIR/Transforms/lifetime-fn-args.cpp | 4 ++-- .../CIR/Transforms/lifetime-null-passing.cpp | 4 ++-- clang/test/CIR/Transforms/lifetime-this.cpp | 4 ++-- 32 files changed, 66 insertions(+), 92 deletions(-) create mode 100644 clang/test/CIR/CodeGen/default-methods.cpp diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index f4c30a5e892b..f121d2c453d7 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -102,7 +102,6 @@ class CIRGenerator : public clang::ASTConsumer { bool verifyModule(); void emitDeferredDecls(); - void emitDefaultMethods(); }; } // namespace cir diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 6722cb60bafa..3bd0fce9ab29 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3092,10 +3092,6 @@ def clangir_disable_verifier : Flag<["-"], "clangir-disable-verifier">, Visibility<[ClangOption, CC1Option]>, HelpText<"ClangIR: Disable MLIR module verifier">, MarshallingInfoFlag>; -def clangir_disable_emit_cxx_default : Flag<["-"], "clangir-disable-emit-cxx-default">, - Visibility<[ClangOption, CC1Option]>, - HelpText<"ClangIR: Disable emission of c++ default (compiler implemented) methods.">, - MarshallingInfoFlag>; def clangir_verify_diagnostics : Flag<["-"], "clangir-verify-diagnostics">, Visibility<[ClangOption, CC1Option]>, HelpText<"ClangIR: Enable diagnostic verification in MLIR, similar to clang's -verify">, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 64664f41c879..c20744b49136 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -433,9 +433,6 @@ class FrontendOptions { /// Disable Clang IR (CIR) verifier unsigned ClangIRDisableCIRVerifier : 1; - /// Disable ClangIR emission for CXX default (compiler generated methods). - unsigned ClangIRDisableEmitCXXDefault : 1; - /// Enable diagnostic verification for CIR unsigned ClangIRVerifyDiags : 1; @@ -655,10 +652,9 @@ class FrontendOptions { EmitPrettySymbolGraphs(false), GenReducedBMI(false), UseClangIRPipeline(false), ClangIRDirectLowering(false), ClangIRDisablePasses(false), ClangIRDisableCIRVerifier(false), - ClangIRDisableEmitCXXDefault(false), ClangIRLifetimeCheck(false), - ClangIRIdiomRecognizer(false), ClangIRLibOpt(false), - ClangIRAnalysisOnly(false), TimeTraceGranularity(500), - TimeTraceVerbose(false) {} + ClangIRLifetimeCheck(false), ClangIRIdiomRecognizer(false), + ClangIRLibOpt(false), ClangIRAnalysisOnly(false), + TimeTraceGranularity(500), TimeTraceVerbose(false) {} /// getInputKindForExtension - Return the appropriate input kind for a file /// extension. For example, "c" would return Language::C. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 30188ac98dbb..0582a95dc456 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2786,10 +2786,7 @@ cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( FD = FD->getPreviousDecl()) { if (isa(FD->getLexicalDeclContext())) { if (FD->doesThisDeclarationHaveABody()) { - if (isDefaultedMethod(FD)) - addDefaultMethodsToEmit(GD.getWithDecl(FD)); - else - addDeferredDeclToEmit(GD.getWithDecl(FD)); + addDeferredDeclToEmit(GD.getWithDecl(FD)); break; } } @@ -2939,13 +2936,6 @@ void CIRGenModule::emitDeferred(unsigned recursionLimit) { } } -void CIRGenModule::emitDefaultMethods() { - // Differently from DeferredDeclsToEmit, there's no recurrent use of - // DefaultMethodsToEmit, so use it directly for emission. - for (auto &D : DefaultMethodsToEmit) - emitGlobalDecl(D); -} - mlir::IntegerAttr CIRGenModule::getSize(CharUnits size) { return builder.getSizeFromCharUnits(&getMLIRContext(), size); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 61d975491f33..771c90a2bccd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -566,14 +566,6 @@ class CIRGenModule : public CIRGenTypeCache { DeferredDeclsToEmit.emplace_back(GD); } - // After HandleTranslation finishes, differently from DeferredDeclsToEmit, - // DefaultMethodsToEmit is only called after a set of CIR passes run. See - // addDefaultMethodsToEmit usage for examples. - std::vector DefaultMethodsToEmit; - void addDefaultMethodsToEmit(clang::GlobalDecl GD) { - DefaultMethodsToEmit.emplace_back(GD); - } - std::pair getAddrAndTypeOfCXXStructor( clang::GlobalDecl GD, const CIRGenFunctionInfo *FnInfo = nullptr, cir::FuncType FnType = nullptr, bool Dontdefer = false, @@ -718,9 +710,6 @@ class CIRGenModule : public CIRGenTypeCache { /// Helper for `emitDeferred` to apply actual codegen. void emitGlobalDecl(clang::GlobalDecl &D); - /// Build default methods not emitted before this point. - void emitDefaultMethods(); - const llvm::Triple &getTriple() const { return target.getTriple(); } // Finalize CIR code generation. diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index a4c6bd16df52..aefd0842bd9d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -123,8 +123,6 @@ void CIRGenerator::HandleInlineFunctionDefinition(FunctionDecl *D) { CGM->AddDeferredUnusedCoverageMapping(D); } -void CIRGenerator::emitDefaultMethods() { CGM->emitDefaultMethods(); } - void CIRGenerator::emitDeferredDecls() { if (DeferredInlineMemberFuncDefs.empty()) return; diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index ad11ae9be66f..ec3e4207e43a 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -264,10 +264,6 @@ class CIRGenConsumer : public clang::ASTConsumer { case CIRGenAction::OutputType::EmitCIR: case CIRGenAction::OutputType::EmitCIRFlat: if (outputStream && mlirMod) { - // Emit remaining defaulted C++ methods - if (!feOptions.ClangIRDisableEmitCXXDefault) - gen->emitDefaultMethods(); - // FIXME: we cannot roundtrip prettyForm=true right now. mlir::OpPrintingFlags flags; flags.enableDebugInfo(/*enable=*/true, /*prettyForm=*/false); diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index dcd979c35e9d..a505b5412448 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3122,9 +3122,6 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, if (Args.hasArg(OPT_clangir_disable_verifier)) Opts.ClangIRDisableCIRVerifier = true; - if (Args.hasArg(OPT_clangir_disable_emit_cxx_default)) - Opts.ClangIRDisableEmitCXXDefault = true; - if (Args.hasArg(OPT_clangir_verify_diagnostics)) Opts.ClangIRVerifyDiags = true; diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index e814a649c728..47a815407874 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -1,9 +1,6 @@ // RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// RUN: %clang_cc1 -std=c++17 -mconstructor-aliases -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -clangir-disable-emit-cxx-default %s -o %t-disable.cir -// RUN: FileCheck --input-file=%t-disable.cir %s --check-prefix=DISABLE - int strlen(char const *); struct String { @@ -40,9 +37,6 @@ struct String { // CHECK: cir.return // CHECK: } - // DISABLE: cir.func linkonce_odr @_ZN10StringViewC2ERK6String - // DISABLE-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} - // StringView::operator=(StringView&&) // // CHECK: cir.func linkonce_odr @_ZN10StringViewaSEOS_ @@ -61,9 +55,6 @@ struct String { // CHECK: %8 = cir.load %2 : !cir.ptr> // CHECK: cir.return %8 : !cir.ptr // CHECK: } - - // DISABLE: cir.func private @_ZN10StringViewaSEOS_ - // DISABLE-NEXT: cir.func @main() }; struct StringView { @@ -179,8 +170,6 @@ struct Trivial { // CHECK-NEXT: %[[#OTHER_I_CAST:]] = cir.cast(bitcast, %[[#OTHER_I]] : !cir.ptr), !cir.ptr // CHECK-NEXT: cir.libc.memcpy %[[#MEMCPY_SIZE]] bytes from %[[#OTHER_I_CAST]] to %[[#THIS_I_CAST]] // CHECK-NEXT: cir.store %[[#THIS_LOAD]], %[[#RETVAL]] -// CHECK-NEXT: cir.br ^bb1 -// CHECK-NEXT: ^bb1: // CHECK-NEXT: %[[#RETVAL_LOAD:]] = cir.load %[[#RETVAL]] // CHECK-NEXT: cir.return %[[#RETVAL_LOAD]] // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index acd2818e7a4b..364f0bfc85ce 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s namespace std { diff --git a/clang/test/CIR/CodeGen/default-methods.cpp b/clang/test/CIR/CodeGen/default-methods.cpp new file mode 100644 index 000000000000..73f7afb1b814 --- /dev/null +++ b/clang/test/CIR/CodeGen/default-methods.cpp @@ -0,0 +1,24 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CIR %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s + +// We should emit and call both implicit operator= functions. +struct S { + struct T { + int x; + } t; +}; + +// CIR-LABEL: cir.func linkonce_odr @_ZN1S1TaSERKS0_({{.*}} { +// CIR-LABEL: cir.func linkonce_odr @_ZN1SaSERKS_( +// CIR: cir.call @_ZN1S1TaSERKS0_( +// CIR-LABEL: cir.func @_Z1fR1SS0_( +// CIR: cir.call @_ZN1SaSERKS_( + +// LLVM-LABEL: define linkonce_odr ptr @_ZN1S1TaSERKS0_( +// LLVM-LABEL: define linkonce_odr ptr @_ZN1SaSERKS_( +// LLVM: call ptr @_ZN1S1TaSERKS0_( +// LLVM-LABEL: define dso_local void @_Z1fR1SS0_( +// LLVM: call ptr @_ZN1SaSERKS_( +void f(S &s1, S &s2) { s1 = s2; } diff --git a/clang/test/CIR/CodeGen/delete.cpp b/clang/test/CIR/CodeGen/delete.cpp index b02641ff87b0..d45baa241adc 100644 --- a/clang/test/CIR/CodeGen/delete.cpp +++ b/clang/test/CIR/CodeGen/delete.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s typedef __typeof(sizeof(int)) size_t; @@ -12,4 +12,4 @@ namespace test1 { // CHECK: %[[CONST:.*]] = cir.const #cir.int<4> : !u64i // CHECK: cir.call @_ZN5test11AdlEPvm({{.*}}, %[[CONST]]) -} \ No newline at end of file +} diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index e3a860d99ac3..879d09f58c34 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s typedef enum { diff --git a/clang/test/CIR/CodeGen/dtors-scopes.cpp b/clang/test/CIR/CodeGen/dtors-scopes.cpp index c9bdb1dd2da8..baa1d7666df9 100644 --- a/clang/test/CIR/CodeGen/dtors-scopes.cpp +++ b/clang/test/CIR/CodeGen/dtors-scopes.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s // RUN: %clang_cc1 -triple arm64-apple-macosx14.0.0 -std=c++20 -fclangir -emit-cir %s -o %t2.cir // RUN: FileCheck --input-file=%t2.cir %s --check-prefix=DTOR_BODY @@ -33,4 +33,4 @@ void dtor1() { // DTOR_BODY: cir.call @_ZN1CD2Ev // DTOR_BODY: cir.return -// DTOR_BODY: } \ No newline at end of file +// DTOR_BODY: } diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index 2202d339b76d..60c330d53b78 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s enum class EFMode { Always, Verbose }; diff --git a/clang/test/CIR/CodeGen/libcall.cpp b/clang/test/CIR/CodeGen/libcall.cpp index 17d2e7912833..192b0ff13294 100644 --- a/clang/test/CIR/CodeGen/libcall.cpp +++ b/clang/test/CIR/CodeGen/libcall.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s typedef __builtin_va_list va_list; diff --git a/clang/test/CIR/CodeGen/move.cpp b/clang/test/CIR/CodeGen/move.cpp index 6e3f317d0615..2a6cbc158b0e 100644 --- a/clang/test/CIR/CodeGen/move.cpp +++ b/clang/test/CIR/CodeGen/move.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s namespace std { diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index bcce7d566793..6e829dd5de6b 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s #include "std-cxx.h" @@ -55,4 +55,4 @@ class B { void t() { B b; b.construct(&b); -} \ No newline at end of file +} diff --git a/clang/test/CIR/CodeGen/nrvo.cpp b/clang/test/CIR/CodeGen/nrvo.cpp index 8edc47aa2c8a..1138e7733667 100644 --- a/clang/test/CIR/CodeGen/nrvo.cpp +++ b/clang/test/CIR/CodeGen/nrvo.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s #include "std-cxx.h" diff --git a/clang/test/CIR/CodeGen/rangefor.cpp b/clang/test/CIR/CodeGen/rangefor.cpp index 8c63b688cdd0..b9ab633064c3 100644 --- a/clang/test/CIR/CodeGen/rangefor.cpp +++ b/clang/test/CIR/CodeGen/rangefor.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s #include "std-cxx.h" diff --git a/clang/test/CIR/CodeGen/std-array.cpp b/clang/test/CIR/CodeGen/std-array.cpp index a360a0a37d44..891eb99d8888 100644 --- a/clang/test/CIR/CodeGen/std-array.cpp +++ b/clang/test/CIR/CodeGen/std-array.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s #include "std-cxx.h" @@ -14,4 +14,4 @@ void t() { // CHECK: {{.*}} = cir.cast(array_to_ptrdecay // CHECK: {{.*}} = cir.const #cir.int<9> : !u32i -// CHECK: cir.call @_ZNSt5arrayIhLj9EE3endEv \ No newline at end of file +// CHECK: cir.call @_ZNSt5arrayIhLj9EE3endEv diff --git a/clang/test/CIR/CodeGen/std-find.cpp b/clang/test/CIR/CodeGen/std-find.cpp index 73494ba8b308..6f4e41a35ca5 100644 --- a/clang/test/CIR/CodeGen/std-find.cpp +++ b/clang/test/CIR/CodeGen/std-find.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s #include "std-cxx.h" @@ -24,4 +24,4 @@ int test_find(unsigned char n = 3) // CHECK: cir.if %[[neq_cmp]] return num_found; -} \ No newline at end of file +} diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp index ad99c6e4fe6a..177b4813b226 100644 --- a/clang/test/CIR/CodeGen/vector.cpp +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s #include "std-cxx.h" @@ -32,4 +32,4 @@ void m() { std::vector a; int i = 43; a.resize(i); -} \ No newline at end of file +} diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index 9e86b41f1d30..e11e80bd5b4f 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -fno-rtti -mconstructor-aliases -clangir-disable-emit-cxx-default -emit-cir %s -o %t2.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -fno-rtti -mconstructor-aliases -emit-cir %s -o %t2.cir // RUN: FileCheck --input-file=%t2.cir --check-prefix=RTTI_DISABLED %s class A diff --git a/clang/test/CIR/Transforms/lib-opt-find.cpp b/clang/test/CIR/Transforms/lib-opt-find.cpp index c11daba10f28..0f26ce4175a4 100644 --- a/clang/test/CIR/Transforms/lib-opt-find.cpp +++ b/clang/test/CIR/Transforms/lib-opt-find.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -clangir-disable-emit-cxx-default -fclangir -fclangir-idiom-recognizer -fclangir-lib-opt -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -fclangir -fclangir-idiom-recognizer -fclangir-lib-opt -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s #include "std-cxx.h" diff --git a/clang/test/CIR/Transforms/lifetime-check-agg.cpp b/clang/test/CIR/Transforms/lifetime-check-agg.cpp index ebfe00c2ad56..9dc9f98592f8 100644 --- a/clang/test/CIR/Transforms/lifetime-check-agg.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-agg.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir-analysis-only -fclangir-lifetime-check="history=all;remarks=all" %s -clangir-verify-diagnostics -emit-obj -o /dev/null typedef enum SType { diff --git a/clang/test/CIR/Transforms/lifetime-check-owner.cpp b/clang/test/CIR/Transforms/lifetime-check-owner.cpp index 23643c821884..089bc4886036 100644 --- a/clang/test/CIR/Transforms/lifetime-check-owner.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-owner.cpp @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fclangir-lifetime-check="history=all;remarks=all;history_limit=1" -clangir-verify-diagnostics -emit-cir %s -o %t.cir -struct [[gsl::Owner(int)]] MyIntOwner { +struct [[gsl::Owner(int)]] MyIntOwner { // expected-remark {{pset => { fn_arg:0 }}} int val; MyIntOwner(int v) : val(v) {} void changeInt(int i); @@ -8,7 +8,7 @@ struct [[gsl::Owner(int)]] MyIntOwner { int read() const; }; -struct [[gsl::Pointer(int)]] MyIntPointer { +struct [[gsl::Pointer(int)]] MyIntPointer { // expected-remark {{pset => { fn_arg:0 }}} int *ptr; MyIntPointer(int *p = nullptr) : ptr(p) {} MyIntPointer(const MyIntOwner &); @@ -68,4 +68,4 @@ void yolo4() { p.read(); // expected-warning {{use of invalid pointer 'p'}} // expected-remark@-1 {{pset => { invalid }}} q.read(); // expected-remark {{pset => { o1__1' }}} -} \ No newline at end of file +} diff --git a/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp b/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp index e9c6d62b6f64..989bbcf91bee 100644 --- a/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-range-for-vector.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all" -fclangir-skip-system-headers -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -mconstructor-aliases -fclangir -fclangir-lifetime-check="history=all" -fclangir-skip-system-headers -clangir-verify-diagnostics -emit-cir %s -o %t.cir #include "std-cxx.h" @@ -25,4 +25,4 @@ void swappy(unsigned c) { for (unsigned i = 0; i < c; i++) { images2[i] = {INFO_ENUM_1}; } -} \ No newline at end of file +} diff --git a/clang/test/CIR/Transforms/lifetime-check-string.cpp b/clang/test/CIR/Transforms/lifetime-check-string.cpp index 383f3b5da626..4e5f780aaeee 100644 --- a/clang/test/CIR/Transforms/lifetime-check-string.cpp +++ b/clang/test/CIR/Transforms/lifetime-check-string.cpp @@ -1,8 +1,8 @@ -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir int strlen(char const *); -struct [[gsl::Owner(char *)]] String { +struct [[gsl::Owner(char *)]] String { // expected-remark {{pset => { fn_arg:0 }}} long size; long capacity; const char *storage; @@ -11,7 +11,7 @@ struct [[gsl::Owner(char *)]] String { String(char const *s) : size{strlen(s)}, capacity{size}, storage{s} {} }; -struct [[gsl::Pointer(int)]] StringView { +struct [[gsl::Pointer(int)]] StringView { // expected-remark {{pset => { fn_arg:0 }}} long size; const char *storage; char operator[](int); @@ -84,4 +84,4 @@ void sv3() { cout << name; // expected-note {{invalidated by non-const use of owner type}} cout << sv; // expected-warning {{passing invalid pointer 'sv'}} // expected-remark@-1 {{pset => { invalid }}} -} \ No newline at end of file +} diff --git a/clang/test/CIR/Transforms/lifetime-fn-args.cpp b/clang/test/CIR/Transforms/lifetime-fn-args.cpp index 6c1b297f1b32..eea6ae863d3c 100644 --- a/clang/test/CIR/Transforms/lifetime-fn-args.cpp +++ b/clang/test/CIR/Transforms/lifetime-fn-args.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -fclangir-lifetime-check="history=all;remarks=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir struct A { void* ctx; @@ -9,4 +9,4 @@ void A::setInfo(void** ctxPtr) { if (ctxPtr != nullptr) { *ctxPtr = ctx; // expected-remark {{pset => { fn_arg:1 }}} } -} \ No newline at end of file +} diff --git a/clang/test/CIR/Transforms/lifetime-null-passing.cpp b/clang/test/CIR/Transforms/lifetime-null-passing.cpp index e26210b56234..05c97f8df3b0 100644 --- a/clang/test/CIR/Transforms/lifetime-null-passing.cpp +++ b/clang/test/CIR/Transforms/lifetime-null-passing.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -mconstructor-aliases -fclangir -fclangir-lifetime-check="history=all" -clangir-verify-diagnostics -emit-cir %s -o %t.cir class _j {}; typedef _j* jobj; @@ -20,4 +20,4 @@ struct X { void nullpassing() { jobj o = nullptr; X::e.wildfn(0, &o); -} \ No newline at end of file +} diff --git a/clang/test/CIR/Transforms/lifetime-this.cpp b/clang/test/CIR/Transforms/lifetime-this.cpp index 8e18af8a9e16..78eb7ef3e4eb 100644 --- a/clang/test/CIR/Transforms/lifetime-this.cpp +++ b/clang/test/CIR/Transforms/lifetime-this.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -mconstructor-aliases -fclangir -clangir-disable-emit-cxx-default -fclangir-lifetime-check="history=all;remarks=all" -fclangir-skip-system-headers -clangir-verify-diagnostics -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -mconstructor-aliases -fclangir -fclangir-lifetime-check="history=all;remarks=all" -fclangir-skip-system-headers -clangir-verify-diagnostics -emit-cir %s -o %t.cir #include "std-cxx.h" @@ -9,4 +9,4 @@ struct S { void S::f(int a, int b) { std::shared_ptr l = std::make_shared(a, b, this); // expected-remark {{pset => { this }}} -} \ No newline at end of file +} From f1b4ba28635226f91ab67a61c54aa8ad2ca2c6d2 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Fri, 6 Dec 2024 23:54:44 +0800 Subject: [PATCH 2144/2301] [CIR] Make use of !invariant.group metadata for const allocas (#1159) This PR updates the LLVM lowering part of load and stores to const allocas and makes use of the !invariant.group metadata in the result LLVM IR. The HoistAlloca pass is also updated. The const flag on a hoisted alloca is removed for now since their uses are not always invariants. Will update in later PRs to teach their invariants. --- .../CIR/Dialect/Transforms/HoistAllocas.cpp | 12 +++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 35 +++++++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 14 +++- clang/test/CIR/CodeGen/const-alloca.cpp | 69 ++++++++++++++----- 4 files changed, 102 insertions(+), 28 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp index 003e5425ebaa..4b29c7235a02 100644 --- a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp +++ b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp @@ -48,8 +48,18 @@ static void process(cir::FuncOp func) { mlir::Operation *insertPoint = &*entryBlock.begin(); - for (auto alloca : allocas) + for (auto alloca : allocas) { alloca->moveBefore(insertPoint); + if (alloca.getConstant()) { + // Hoisted alloca may come from the body of a loop, in which case the + // stack slot is re-used by multiple objects alive in different iterations + // of the loop. In theory, each of these objects are still constant within + // their lifetimes, but currently we're not emitting metadata to further + // describe this. So for now let's behave conservatively and remove the + // const flag on nested allocas when hoisting them. + alloca.setConstant(false); + } + } } void HoistAllocasPass::runOnOperation() { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 2fafc9f08f8a..d2c1e765cabd 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -959,8 +959,7 @@ mlir::LogicalResult CIRToLLVMVTTAddrPointOpLowering::matchAndRewrite( if (op.getSymAddr()) { if (op.getOffset() == 0) { - rewriter.replaceAllUsesWith(op, llvmAddr); - rewriter.eraseOp(op); + rewriter.replaceOp(op, {llvmAddr}); return mlir::success(); } @@ -1490,11 +1489,21 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite( alignment = *alignOpt; } - // TODO: nontemporal, invariant, syncscope. + auto invariant = false; + // Under -O1 or higher optimization levels, add the invariant metadata if the + // load operation loads from a constant object. + if (lowerMod && + lowerMod->getContext().getCodeGenOpts().OptimizationLevel > 0) { + auto addrAllocaOp = + mlir::dyn_cast_if_present(op.getAddr().getDefiningOp()); + invariant = addrAllocaOp && addrAllocaOp.getConstant(); + } + + // TODO: nontemporal, syncscope. rewriter.replaceOpWithNewOp( op, llvmTy, adaptor.getAddr(), /* alignment */ alignment, op.getIsVolatile(), /* nontemporal */ false, - /* invariant */ false, /* invariantGroup */ false, ordering); + /* invariant */ false, /* invariantGroup */ invariant, ordering); return mlir::LogicalResult::success(); } @@ -1515,10 +1524,20 @@ mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite( alignment = *alignOpt; } + auto invariant = false; + // Under -O1 or higher optimization levels, add the invariant metadata if the + // store operation stores to a constant object. + if (lowerMod && + lowerMod->getContext().getCodeGenOpts().OptimizationLevel > 0) { + auto addrAllocaOp = + mlir::dyn_cast_if_present(op.getAddr().getDefiningOp()); + invariant = addrAllocaOp && addrAllocaOp.getConstant(); + } + // TODO: nontemporal, syncscope. rewriter.replaceOpWithNewOp( op, adaptor.getValue(), adaptor.getAddr(), alignment, op.getIsVolatile(), - /* nontemporal */ false, /* invariantGroup */ false, ordering); + /* nontemporal */ false, /* invariantGroup */ invariant, ordering); return mlir::LogicalResult::success(); } @@ -3887,7 +3906,9 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMConstantOpLowering, CIRToLLVMDerivedDataMemberOpLowering, CIRToLLVMGetRuntimeMemberOpLowering, - CIRToLLVMGlobalOpLowering + CIRToLLVMGlobalOpLowering, + CIRToLLVMLoadOpLowering, + CIRToLLVMStoreOpLowering // clang-format on >(converter, patterns.getContext(), lowerModule); patterns.add< @@ -3938,7 +3959,6 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMIsConstantOpLowering, CIRToLLVMIsFPClassOpLowering, CIRToLLVMLLVMIntrinsicCallOpLowering, - CIRToLLVMLoadOpLowering, CIRToLLVMMemChrOpLowering, CIRToLLVMMemCpyInlineOpLowering, CIRToLLVMMemCpyOpLowering, @@ -3958,7 +3978,6 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMShiftOpLowering, CIRToLLVMSignBitOpLowering, CIRToLLVMStackSaveOpLowering, - CIRToLLVMStoreOpLowering, CIRToLLVMSwitchFlatOpLowering, CIRToLLVMThrowOpLowering, CIRToLLVMTrapOpLowering, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 14d33404b466..48baae2ae799 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -301,8 +301,13 @@ class CIRToLLVMAllocaOpLowering }; class CIRToLLVMLoadOpLowering : public mlir::OpConversionPattern { + cir::LowerModule *lowerMod; + public: - using mlir::OpConversionPattern::OpConversionPattern; + CIRToLLVMLoadOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + cir::LowerModule *lowerModule) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) {} mlir::LogicalResult matchAndRewrite(cir::LoadOp op, OpAdaptor, @@ -311,8 +316,13 @@ class CIRToLLVMLoadOpLowering : public mlir::OpConversionPattern { class CIRToLLVMStoreOpLowering : public mlir::OpConversionPattern { + cir::LowerModule *lowerMod; + public: - using mlir::OpConversionPattern::OpConversionPattern; + CIRToLLVMStoreOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + cir::LowerModule *lowerModule) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) {} mlir::LogicalResult matchAndRewrite(cir::StoreOp op, OpAdaptor, diff --git a/clang/test/CIR/CodeGen/const-alloca.cpp b/clang/test/CIR/CodeGen/const-alloca.cpp index c15e77d306ed..9247b2692474 100644 --- a/clang/test/CIR/CodeGen/const-alloca.cpp +++ b/clang/test/CIR/CodeGen/const-alloca.cpp @@ -1,5 +1,7 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -O1 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CIR %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -O1 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s int produce_int(); void blackbox(const int &); @@ -8,33 +10,33 @@ void local_const_int() { const int x = produce_int(); } -// CHECK-LABEL: @_Z15local_const_intv -// CHECK: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init, const] -// CHECK: } +// CIR-LABEL: @_Z15local_const_intv +// CIR: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init, const] +// CIR: } void param_const_int(const int x) {} -// CHECK-LABEL: @_Z15param_const_inti -// CHECK: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init, const] -// CHECK: } +// CIR-LABEL: @_Z15param_const_inti +// CIR: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init, const] +// CIR: } void local_constexpr_int() { constexpr int x = 42; blackbox(x); } -// CHECK-LABEL: @_Z19local_constexpr_intv -// CHECK: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init, const] -// CHECK: } +// CIR-LABEL: @_Z19local_constexpr_intv +// CIR: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init, const] +// CIR: } void local_reference() { int x = 0; int &r = x; } -// CHECK-LABEL: @_Z15local_referencev -// CHECK: %{{.+}} = cir.alloca !cir.ptr, !cir.ptr>, ["r", init, const] -// CHECK: } +// CIR-LABEL: @_Z15local_referencev +// CIR: %{{.+}} = cir.alloca !cir.ptr, !cir.ptr>, ["r", init, const] +// CIR: } struct Foo { int a; @@ -47,6 +49,39 @@ void local_const_struct() { const Foo x = produce_foo(); } -// CHECK-LABEL: @_Z18local_const_structv -// CHECK: %{{.+}} = cir.alloca !ty_Foo, !cir.ptr, ["x", init, const] -// CHECK: } +// CIR-LABEL: @_Z18local_const_structv +// CIR: %{{.+}} = cir.alloca !ty_Foo, !cir.ptr, ["x", init, const] +// CIR: } + +[[clang::optnone]] +int local_const_load_store() { + const int x = produce_int(); + int y = x; + return y; +} + +// CIR-LABEL: @_Z22local_const_load_storev +// CIR: %{{.+}} = cir.alloca !s32i, !cir.ptr, ["x", init, const] {alignment = 4 : i64} +// CIR: } + +// LLVM-LABEL: @_Z22local_const_load_storev +// LLVM: %[[#INIT:]] = call i32 @_Z11produce_intv() +// LLVM-NEXT: store i32 %[[#INIT]], ptr %[[#SLOT:]], align 4, !invariant.group !{{.+}} +// LLVM-NEXT: %{{.+}} = load i32, ptr %[[#SLOT]], align 4, !invariant.group !{{.+}} +// LLVM: } + +int local_const_optimize() { + const int x = produce_int(); + blackbox(x); + blackbox(x); + return x; +} + +// LLVM-LABEL: @_Z20local_const_optimizev() +// LLVM-NEXT: %[[#slot:]] = alloca i32, align 4 +// LLVM-NEXT: %[[#init:]] = tail call i32 @_Z11produce_intv() +// LLVM-NEXT: store i32 %[[#init]], ptr %[[#slot]], align 4, !invariant.group !{{.+}} +// LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#slot]]) +// LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#slot]]) +// LLVM-NEXT: ret i32 %[[#init]] +// LLVM-NEXT: } From d219ab905c64cc43a190f18ba32de0ecdfd9ec18 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Sat, 7 Dec 2024 01:21:25 +0800 Subject: [PATCH 2145/2301] [CIR][CIRGen] Add CIRGen support for assume statement (#1205) This PR adds CIRGen support for C++23 `[[assume(expr)]]` statement. --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 11 ++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 4 ++++ clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 10 ++++++++++ clang/test/CIR/CodeGen/builtin-assume.cpp | 20 ++++++++++++++++++-- 4 files changed, 42 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index d32306bf3d5e..d7bd1f745f3a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1110,7 +1110,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, if (E->getArg(0)->HasSideEffects(getContext())) return RValue::get(nullptr); - mlir::Value argValue = emitScalarExpr(E->getArg(0)); + mlir::Value argValue = emitCheckedArgForAssume(E->getArg(0)); builder.create(getLoc(E->getExprLoc()), argValue); return RValue::get(nullptr); } @@ -2478,6 +2478,15 @@ mlir::Value CIRGenFunction::emitCheckedArgForBuiltin(const Expr *E, llvm_unreachable("NYI"); } +mlir::Value CIRGenFunction::emitCheckedArgForAssume(const Expr *E) { + mlir::Value argValue = evaluateExprAsBool(E); + if (!SanOpts.has(SanitizerKind::Builtin)) + return argValue; + + assert(!MissingFeatures::sanitizerBuiltin()); + llvm_unreachable("NYI"); +} + static mlir::Value emitTargetArchBuiltinExpr(CIRGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 4172ac1e208d..b6b949e47bf2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1782,6 +1782,10 @@ class CIRGenFunction : public CIRGenTypeCache { /// enabled, a runtime check specified by \p Kind is also emitted. mlir::Value emitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind); + /// Emits an argument for a call to a `__builtin_assume`. If the builtin + /// sanitizer is enabled, a runtime check is also emitted. + mlir::Value emitCheckedArgForAssume(const Expr *E); + /// returns true if aggregate type has a volatile member. /// TODO(cir): this could be a common AST helper between LLVM / CIR. bool hasVolatileMember(QualType T) { diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 410c25891bbb..8708eeecb7e5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -346,6 +346,16 @@ CIRGenFunction::emitAttributedStmt(const AttributedStmt &S) { case attr::AlwaysInline: case attr::MustTail: llvm_unreachable("NIY attributes"); + case attr::CXXAssume: { + const Expr *assumption = cast(A)->getAssumption(); + if (getLangOpts().CXXAssumptions && builder.getInsertionBlock() && + !assumption->HasSideEffects(getContext())) { + mlir::Value assumptionValue = emitCheckedArgForAssume(assumption); + builder.create(getLoc(S.getSourceRange()), + assumptionValue); + } + break; + } default: break; } diff --git a/clang/test/CIR/CodeGen/builtin-assume.cpp b/clang/test/CIR/CodeGen/builtin-assume.cpp index 88e8ad11565e..8d7448a2724d 100644 --- a/clang/test/CIR/CodeGen/builtin-assume.cpp +++ b/clang/test/CIR/CodeGen/builtin-assume.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++23 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++23 -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck %s --check-prefix=LLVM --input-file=%t.ll int test_assume(int x) { @@ -19,6 +19,22 @@ int test_assume(int x) { // LLVM: %[[#cond:]] = trunc i8 %{{.+}} to i1 // LLVM-NEXT: call void @llvm.assume(i1 %[[#cond]]) +int test_assume_attr(int x) { + [[assume(x > 0)]]; + return x; +} + +// CIR: cir.func @_Z16test_assume_attri +// CIR: %[[#x:]] = cir.load %{{.+}} : !cir.ptr, !s32i +// CIR-NEXT: %[[#zero:]] = cir.const #cir.int<0> : !s32i +// CIR-NEXT: %[[#cond:]] = cir.cmp(gt, %[[#x]], %[[#zero]]) : !s32i, !cir.bool +// CIR-NEXT: cir.assume %[[#cond]] : !cir.bool +// CIR: } + +// LLVM: @_Z16test_assume_attri +// LLVM: %[[#cond:]] = trunc i8 %{{.+}} to i1 +// LLVM-NEXT: call void @llvm.assume(i1 %[[#cond]]) + int test_assume_aligned(int *ptr) { int *aligned = (int *)__builtin_assume_aligned(ptr, 8); return *aligned; From 23b6ff578aedd3f5cb06011a8b186ec2e7183852 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 6 Dec 2024 12:23:31 -0500 Subject: [PATCH 2146/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vbsl_v and neon_vbslq_v (#1206) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 25 +- clang/test/CIR/CodeGen/AArch64/neon.c | 627 +++++++++++------- 2 files changed, 404 insertions(+), 248 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index cdea3ed725b4..ce5ff2a82244 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2178,6 +2178,19 @@ getHalfEltSizeTwiceNumElemsVecType(CIRGenBuilderTy &builder, vecTy.getSize() * 2); } +static cir::VectorType +castVecOfFPTypeToVecOfIntWithSameWidth(CIRGenBuilderTy &builder, + cir::VectorType vecTy) { + if (mlir::isa(vecTy.getEltType())) + return cir::VectorType::get(builder.getContext(), builder.getSInt32Ty(), + vecTy.getSize()); + if (mlir::isa(vecTy.getEltType())) + return cir::VectorType::get(builder.getContext(), builder.getSInt64Ty(), + vecTy.getSize()); + llvm_unreachable( + "Unsupported element type in getVecOfIntTypeWithSameEltWidth"); +} + /// Get integer from a mlir::Value that is an int constant or a constant op. static int64_t getIntValueFromConstOp(mlir::Value val) { auto constOp = mlir::cast(val.getDefiningOp()); @@ -3837,7 +3850,17 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, return nullptr; case NEON::BI__builtin_neon_vbsl_v: case NEON::BI__builtin_neon_vbslq_v: { - llvm_unreachable("NEON::BI__builtin_neon_vbslq_v NYI"); + cir::VectorType bitTy = vTy; + if (cir::isAnyFloatingPointType(bitTy.getEltType())) + bitTy = castVecOfFPTypeToVecOfIntWithSameWidth(builder, vTy); + Ops[0] = builder.createBitcast(Ops[0], bitTy); + Ops[1] = builder.createBitcast(Ops[1], bitTy); + Ops[2] = builder.createBitcast(Ops[2], bitTy); + + Ops[1] = builder.createAnd(Ops[0], Ops[1]); + Ops[2] = builder.createAnd(builder.createNot(Ops[0]), Ops[2]); + Ops[0] = builder.createOr(Ops[1], Ops[2]); + return builder.createBitcast(Ops[0], ty); } case NEON::BI__builtin_neon_vfma_lane_v: case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 9e2ebaa11d4a..10acbf34f7d8 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -1022,136 +1022,203 @@ float64x2_t test_vabdq_f64(float64x2_t v1, float64x2_t v2) { // LLVM: ret <2 x double> [[VABD_F]] } -// NYI-LABEL: @test_vbsl_s8( -// NYI: [[VBSL_I:%.*]] = and <8 x i8> %v1, %v2 -// NYI: [[TMP0:%.*]] = xor <8 x i8> %v1, -// NYI: [[VBSL1_I:%.*]] = and <8 x i8> [[TMP0]], %v3 -// NYI: [[VBSL2_I:%.*]] = or <8 x i8> [[VBSL_I]], [[VBSL1_I]] -// NYI: ret <8 x i8> [[VBSL2_I]] -// int8x8_t test_vbsl_s8(uint8x8_t v1, int8x8_t v2, int8x8_t v3) { -// return vbsl_s8(v1, v2, v3); -// } +int8x8_t test_vbsl_s8(uint8x8_t v1, int8x8_t v2, int8x8_t v3) { + return vbsl_s8(v1, v2, v3); + + // CIR-LABEL: vbsl_s8 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbsl_s8(<8 x i8>{{.*}}[[v1:%.*]], <8 x i8>{{.*}}[[v2:%.*]], <8 x i8>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL_I:%.*]] = and <8 x i8> [[v1]], [[v2]] + // LLVM: [[TMP0:%.*]] = xor <8 x i8> [[v1]], splat (i8 -1) + // LLVM: [[VBSL1_I:%.*]] = and <8 x i8> [[TMP0]], [[v3]] + // LLVM: [[VBSL2_I:%.*]] = or <8 x i8> [[VBSL_I]], [[VBSL1_I]] + // LLVM: ret <8 x i8> [[VBSL2_I]] +} -// NYI-LABEL: @test_vbsl_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <4 x i16> %v3 to <8 x i8> -// NYI: [[VBSL3_I:%.*]] = and <4 x i16> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <4 x i16> %v1, -// NYI: [[VBSL4_I:%.*]] = and <4 x i16> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <4 x i16> [[VBSL3_I]], [[VBSL4_I]] -// NYI: [[TMP4:%.*]] = bitcast <4 x i16> [[VBSL5_I]] to <8 x i8> -// NYI: ret <8 x i8> [[TMP4]] -// int8x8_t test_vbsl_s16(uint16x4_t v1, int16x4_t v2, int16x4_t v3) { -// return (int8x8_t)vbsl_s16(v1, v2, v3); -// } +int8x8_t test_vbsl_s16(uint16x4_t v1, int16x4_t v2, int16x4_t v3) { + return (int8x8_t)vbsl_s16(v1, v2, v3); + + // CIR-LABEL: vbsl_s16 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbsl_s16(<4 x i16>{{.*}}[[v1:%.*]], <4 x i16>{{.*}}[[v2:%.*]], <4 x i16>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL3_I:%.*]] = and <4 x i16> [[v1]], [[v2]] + // LLVM: [[TMP3:%.*]] = xor <4 x i16> [[v1]], splat (i16 -1) + // LLVM: [[VBSL4_I:%.*]] = and <4 x i16> [[TMP3]], [[v3]] + // LLVM: [[VBSL5_I:%.*]] = or <4 x i16> [[VBSL3_I]], [[VBSL4_I]] + // LLVM: [[TMP4:%.*]] = bitcast <4 x i16> [[VBSL5_I]] to <8 x i8> + // LLVM: ret <8 x i8> [[TMP4]] +} -// NYI-LABEL: @test_vbsl_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <2 x i32> %v3 to <8 x i8> -// NYI: [[VBSL3_I:%.*]] = and <2 x i32> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <2 x i32> %v1, -// NYI: [[VBSL4_I:%.*]] = and <2 x i32> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <2 x i32> [[VBSL3_I]], [[VBSL4_I]] -// NYI: ret <2 x i32> [[VBSL5_I]] -// int32x2_t test_vbsl_s32(uint32x2_t v1, int32x2_t v2, int32x2_t v3) { -// return vbsl_s32(v1, v2, v3); -// } - -// NYI-LABEL: @test_vbsl_s64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %v2 to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <1 x i64> %v3 to <8 x i8> -// NYI: [[VBSL3_I:%.*]] = and <1 x i64> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <1 x i64> %v1, -// NYI: [[VBSL4_I:%.*]] = and <1 x i64> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <1 x i64> [[VBSL3_I]], [[VBSL4_I]] -// NYI: ret <1 x i64> [[VBSL5_I]] -// int64x1_t test_vbsl_s64(uint64x1_t v1, int64x1_t v2, int64x1_t v3) { -// return vbsl_s64(v1, v2, v3); -// } - -// NYI-LABEL: @test_vbsl_u8( -// NYI: [[VBSL_I:%.*]] = and <8 x i8> %v1, %v2 -// NYI: [[TMP0:%.*]] = xor <8 x i8> %v1, -// NYI: [[VBSL1_I:%.*]] = and <8 x i8> [[TMP0]], %v3 -// NYI: [[VBSL2_I:%.*]] = or <8 x i8> [[VBSL_I]], [[VBSL1_I]] -// NYI: ret <8 x i8> [[VBSL2_I]] -// uint8x8_t test_vbsl_u8(uint8x8_t v1, uint8x8_t v2, uint8x8_t v3) { -// return vbsl_u8(v1, v2, v3); -// } +int32x2_t test_vbsl_s32(uint32x2_t v1, int32x2_t v2, int32x2_t v3) { + return vbsl_s32(v1, v2, v3); + + // CIR-LABEL: vbsl_s32 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbsl_s32(<2 x i32>{{.*}}[[v1:%.*]], <2 x i32>{{.*}}[[v2:%.*]], <2 x i32>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL3_I:%.*]] = and <2 x i32> [[v1]], [[v2]] + // LLVM: [[TMP3:%.*]] = xor <2 x i32> [[v1]], splat (i32 -1) + // LLVM: [[VBSL4_I:%.*]] = and <2 x i32> [[TMP3]], [[v3]] + // LLVM: [[VBSL5_I:%.*]] = or <2 x i32> [[VBSL3_I]], [[VBSL4_I]] + // LLVM: ret <2 x i32> [[VBSL5_I]] +} -// NYI-LABEL: @test_vbsl_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %v2 to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <4 x i16> %v3 to <8 x i8> -// NYI: [[VBSL3_I:%.*]] = and <4 x i16> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <4 x i16> %v1, -// NYI: [[VBSL4_I:%.*]] = and <4 x i16> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <4 x i16> [[VBSL3_I]], [[VBSL4_I]] -// NYI: ret <4 x i16> [[VBSL5_I]] -// uint16x4_t test_vbsl_u16(uint16x4_t v1, uint16x4_t v2, uint16x4_t v3) { -// return vbsl_u16(v1, v2, v3); -// } +int64x1_t test_vbsl_s64(uint64x1_t v1, int64x1_t v2, int64x1_t v3) { + return vbsl_s64(v1, v2, v3); + + // CIR-LABEL: vbsl_s64 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbsl_s64(<1 x i64>{{.*}}[[v1:%.*]], <1 x i64>{{.*}}[[v2:%.*]], <1 x i64>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL3_I:%.*]] = and <1 x i64> [[v1]], [[v2]] + // LLVM: [[TMP3:%.*]] = xor <1 x i64> [[v1]], splat (i64 -1) + // LLVM: [[VBSL4_I:%.*]] = and <1 x i64> [[TMP3]], [[v3]] + // LLVM: [[VBSL5_I:%.*]] = or <1 x i64> [[VBSL3_I]], [[VBSL4_I]] + // LLVM: ret <1 x i64> [[VBSL5_I]] +} -// NYI-LABEL: @test_vbsl_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v2 to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <2 x i32> %v3 to <8 x i8> -// NYI: [[VBSL3_I:%.*]] = and <2 x i32> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <2 x i32> %v1, -// NYI: [[VBSL4_I:%.*]] = and <2 x i32> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <2 x i32> [[VBSL3_I]], [[VBSL4_I]] -// NYI: ret <2 x i32> [[VBSL5_I]] -// uint32x2_t test_vbsl_u32(uint32x2_t v1, uint32x2_t v2, uint32x2_t v3) { -// return vbsl_u32(v1, v2, v3); -// } - -// NYI-LABEL: @test_vbsl_u64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %v2 to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <1 x i64> %v3 to <8 x i8> -// NYI: [[VBSL3_I:%.*]] = and <1 x i64> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <1 x i64> %v1, -// NYI: [[VBSL4_I:%.*]] = and <1 x i64> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <1 x i64> [[VBSL3_I]], [[VBSL4_I]] -// NYI: ret <1 x i64> [[VBSL5_I]] -// uint64x1_t test_vbsl_u64(uint64x1_t v1, uint64x1_t v2, uint64x1_t v3) { -// return vbsl_u64(v1, v2, v3); -// } - -// NYI-LABEL: @test_vbsl_f32( -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %v1 to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <2 x float> %v2 to <8 x i8> -// NYI: [[TMP3:%.*]] = bitcast <2 x float> %v3 to <8 x i8> -// NYI: [[VBSL1_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x i32> -// NYI: [[VBSL2_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x i32> -// NYI: [[VBSL3_I:%.*]] = and <2 x i32> %v1, [[VBSL1_I]] -// NYI: [[TMP4:%.*]] = xor <2 x i32> %v1, -// NYI: [[VBSL4_I:%.*]] = and <2 x i32> [[TMP4]], [[VBSL2_I]] -// NYI: [[VBSL5_I:%.*]] = or <2 x i32> [[VBSL3_I]], [[VBSL4_I]] -// NYI: [[TMP5:%.*]] = bitcast <2 x i32> [[VBSL5_I]] to <2 x float> -// NYI: ret <2 x float> [[TMP5]] -// float32x2_t test_vbsl_f32(uint32x2_t v1, float32x2_t v2, float32x2_t v3) { -// return vbsl_f32(v1, v2, v3); -// } - -// NYI-LABEL: @test_vbsl_f64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %v1 to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x double> %v2 to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <1 x double> %v3 to <8 x i8> -// NYI: [[VBSL1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> -// NYI: [[VBSL2_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x i64> -// NYI: [[VBSL3_I:%.*]] = and <1 x i64> %v1, [[VBSL1_I]] -// NYI: [[TMP3:%.*]] = xor <1 x i64> %v1, -// NYI: [[VBSL4_I:%.*]] = and <1 x i64> [[TMP3]], [[VBSL2_I]] -// NYI: [[VBSL5_I:%.*]] = or <1 x i64> [[VBSL3_I]], [[VBSL4_I]] -// NYI: [[TMP4:%.*]] = bitcast <1 x i64> [[VBSL5_I]] to <1 x double> -// NYI: ret <1 x double> [[TMP4]] -// float64x1_t test_vbsl_f64(uint64x1_t v1, float64x1_t v2, float64x1_t v3) { -// return vbsl_f64(v1, v2, v3); -// } +uint8x8_t test_vbsl_u8(uint8x8_t v1, uint8x8_t v2, uint8x8_t v3) { + return vbsl_u8(v1, v2, v3); + + // CIR-LABEL: vbsl_u8 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbsl_u8(<8 x i8>{{.*}}[[v1:%.*]], <8 x i8>{{.*}}[[v2:%.*]], <8 x i8>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL3_I:%.*]] = and <8 x i8> [[v1]], [[v2]] + // LLVM: [[TMP3:%.*]] = xor <8 x i8> [[v1]], splat (i8 -1) + // LLVM: [[VBSL4_I:%.*]] = and <8 x i8> [[TMP3]], [[v3]] + // LLVM: [[VBSL5_I:%.*]] = or <8 x i8> [[VBSL3_I]], [[VBSL4_I]] + // LLVM: ret <8 x i8> [[VBSL5_I]] +} + +uint16x4_t test_vbsl_u16(uint16x4_t v1, uint16x4_t v2, uint16x4_t v3) { + return vbsl_u16(v1, v2, v3); + + // CIR-LABEL: vbsl_u16 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbsl_u16(<4 x i16>{{.*}}[[v1:%.*]], <4 x i16>{{.*}}[[v2:%.*]], <4 x i16>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL3_I:%.*]] = and <4 x i16> [[v1]], [[v2]] + // LLVM: [[TMP3:%.*]] = xor <4 x i16> [[v1]], splat (i16 -1) + // LLVM: [[VBSL4_I:%.*]] = and <4 x i16> [[TMP3]], [[v3]] + // LLVM: [[VBSL5_I:%.*]] = or <4 x i16> [[VBSL3_I]], [[VBSL4_I]] + // LLVM: ret <4 x i16> [[VBSL5_I]] +} + + +uint32x2_t test_vbsl_u32(uint32x2_t v1, uint32x2_t v2, uint32x2_t v3) { + return vbsl_u32(v1, v2, v3); + + // CIR-LABEL: vbsl_u32 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbsl_u32(<2 x i32>{{.*}}[[v1:%.*]], <2 x i32>{{.*}}[[v2:%.*]], <2 x i32>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL3_I:%.*]] = and <2 x i32> [[v1]], [[v2]] + // LLVM: [[TMP3:%.*]] = xor <2 x i32> [[v1]], splat (i32 -1) + // LLVM: [[VBSL4_I:%.*]] = and <2 x i32> [[TMP3]], [[v3]] + // LLVM: [[VBSL5_I:%.*]] = or <2 x i32> [[VBSL3_I]], [[VBSL4_I]] + // LLVM: ret <2 x i32> [[VBSL5_I]] +} + +uint64x1_t test_vbsl_u64(uint64x1_t v1, uint64x1_t v2, uint64x1_t v3) { + return vbsl_u64(v1, v2, v3); + + // CIR-LABEL: vbsl_u64 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbsl_u64(<1 x i64>{{.*}}[[v1:%.*]], <1 x i64>{{.*}}[[v2:%.*]], <1 x i64>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL3_I:%.*]] = and <1 x i64> [[v1]], [[v2]] + // LLVM: [[TMP3:%.*]] = xor <1 x i64> [[v1]], splat (i64 -1) + // LLVM: [[VBSL4_I:%.*]] = and <1 x i64> [[TMP3]], [[v3]] + // LLVM: [[VBSL5_I:%.*]] = or <1 x i64> [[VBSL3_I]], [[VBSL4_I]] + // LLVM: ret <1 x i64> [[VBSL5_I]] +} + +float32x2_t test_vbsl_f32(uint32x2_t v1, float32x2_t v2, float32x2_t v3) { + return vbsl_f32(v1, v2, v3); + + // CIR-LABEL: vbsl_f32 + // CIR: [[v1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v3:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v1_tmp:%.*]] = cir.cast(bitcast, [[v1]] : !cir.vector), !cir.vector + // CIR: [[v2_tmp:%.*]] = cir.cast(bitcast, [[v2]] : !cir.vector), !cir.vector + // CIR: [[v3_tmp:%.*]] = cir.cast(bitcast, [[v3]] : !cir.vector), !cir.vector + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1_tmp]], [[v2_tmp]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1_tmp]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3_tmp]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + // CIR: cir.cast(bitcast, [[VBSL2_I]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vbsl_f32(<2 x i32>{{.*}}[[v1:%.*]], <2 x float>{{.*}}[[v2:%.*]], <2 x float>{{.*}}[[v3:%.*]]) + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[v1]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <2 x float> [[v2]] to <8 x i8> + // LLVM: [[TMP3:%.*]] = bitcast <2 x float> [[v3]] to <8 x i8> + // LLVM: [[VBSL1_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x i32> + // LLVM: [[VBSL2_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x i32> + // LLVM: [[VBSL3_I:%.*]] = and <2 x i32> [[v1]], [[VBSL1_I]] + // LLVM: [[TMP4:%.*]] = xor <2 x i32> [[v1]], splat (i32 -1) + // LLVM: [[VBSL4_I:%.*]] = and <2 x i32> [[TMP4]], [[VBSL2_I]] + // LLVM: [[VBSL5_I:%.*]] = or <2 x i32> [[VBSL3_I]], [[VBSL4_I]] + // LLVM: [[TMP5:%.*]] = bitcast <2 x i32> [[VBSL5_I]] to <2 x float> + // LLVM: ret <2 x float> [[TMP5]] +} + +float64x1_t test_vbsl_f64(uint64x1_t v1, float64x1_t v2, float64x1_t v3) { + return vbsl_f64(v1, v2, v3); + + // CIR-LABEL: vbsl_f64 + // CIR: [[v1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v3:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v1_tmp:%.*]] = cir.cast(bitcast, [[v1]] : !cir.vector), !cir.vector + // CIR: [[v2_tmp:%.*]] = cir.cast(bitcast, [[v2]] : !cir.vector), !cir.vector + // CIR: [[v3_tmp:%.*]] = cir.cast(bitcast, [[v3]] : !cir.vector), !cir.vector + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1_tmp]], [[v2_tmp]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1_tmp]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3_tmp]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + // CIR: cir.cast(bitcast, [[VBSL2_I]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vbsl_f64(<1 x i64>{{.*}}[[v1:%.*]], <1 x double>{{.*}}[[v2:%.*]], <1 x double>{{.*}}[[v3:%.*]]) + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> [[v1]] to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <1 x double> [[v2]] to <8 x i8> + // LLVM: [[TMP3:%.*]] = bitcast <1 x double> [[v3]] to <8 x i8> + // LLVM: [[VBSL1_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x i64> + // LLVM: [[VBSL2_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x i64> + // LLVM: [[VBSL3_I:%.*]] = and <1 x i64> [[v1]], [[VBSL1_I]] + // LLVM: [[TMP4:%.*]] = xor <1 x i64> [[v1]], splat (i64 -1) + // LLVM: [[VBSL4_I:%.*]] = and <1 x i64> [[TMP4]], [[VBSL2_I]] + // LLVM: [[VBSL5_I:%.*]] = or <1 x i64> [[VBSL3_I]], [[VBSL4_I]] + // LLVM: [[TMP5:%.*]] = bitcast <1 x i64> [[VBSL5_I]] to <1 x double> + // LLVM: ret <1 x double> [[TMP5]] +} // NYI-LABEL: @test_vbsl_p8( // NYI: [[VBSL_I:%.*]] = and <8 x i8> %v1, %v2 @@ -1176,119 +1243,201 @@ float64x2_t test_vabdq_f64(float64x2_t v1, float64x2_t v2) { // return vbsl_p16(v1, v2, v3); // } -// NYI-LABEL: @test_vbslq_s8( -// NYI: [[VBSL_I:%.*]] = and <16 x i8> %v1, %v2 -// NYI: [[TMP0:%.*]] = xor <16 x i8> %v1, -// NYI: [[VBSL1_I:%.*]] = and <16 x i8> [[TMP0]], %v3 -// NYI: [[VBSL2_I:%.*]] = or <16 x i8> [[VBSL_I]], [[VBSL1_I]] -// NYI: ret <16 x i8> [[VBSL2_I]] -// int8x16_t test_vbslq_s8(uint8x16_t v1, int8x16_t v2, int8x16_t v3) { -// return vbslq_s8(v1, v2, v3); -// } +int8x16_t test_vbslq_s8(uint8x16_t v1, int8x16_t v2, int8x16_t v3) { + return vbslq_s8(v1, v2, v3); -// NYI-LABEL: @test_vbslq_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <8 x i16> %v3 to <16 x i8> -// NYI: [[VBSL3_I:%.*]] = and <8 x i16> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <8 x i16> %v1, -// NYI: [[VBSL4_I:%.*]] = and <8 x i16> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <8 x i16> [[VBSL3_I]], [[VBSL4_I]] -// NYI: ret <8 x i16> [[VBSL5_I]] -// int16x8_t test_vbslq_s16(uint16x8_t v1, int16x8_t v2, int16x8_t v3) { -// return vbslq_s16(v1, v2, v3); -// } + // CIR-LABEL: vbslq_s8 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector -// NYI-LABEL: @test_vbslq_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <4 x i32> %v3 to <16 x i8> -// NYI: [[VBSL3_I:%.*]] = and <4 x i32> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <4 x i32> %v1, -// NYI: [[VBSL4_I:%.*]] = and <4 x i32> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <4 x i32> [[VBSL3_I]], [[VBSL4_I]] -// NYI: ret <4 x i32> [[VBSL5_I]] -// int32x4_t test_vbslq_s32(uint32x4_t v1, int32x4_t v2, int32x4_t v3) { -// return vbslq_s32(v1, v2, v3); -// } + // LLVM: {{.*}}test_vbslq_s8(<16 x i8>{{.*}}[[v1:%.*]], <16 x i8>{{.*}}[[v2:%.*]], <16 x i8>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL_I:%.*]] = and <16 x i8> [[v1]], [[v2]] + // LLVM: [[TMP0:%.*]] = xor <16 x i8> [[v1]], splat (i8 -1) + // LLVM: [[VBSL1_I:%.*]] = and <16 x i8> [[TMP0]], [[v3]] + // LLVM: [[VBSL2_I:%.*]] = or <16 x i8> [[VBSL_I]], [[VBSL1_I]] + // LLVM: ret <16 x i8> [[VBSL2_I]] +} -// NYI-LABEL: @test_vbslq_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %v2 to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <2 x i64> %v3 to <16 x i8> -// NYI: [[VBSL3_I:%.*]] = and <2 x i64> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <2 x i64> %v1, -// NYI: [[VBSL4_I:%.*]] = and <2 x i64> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <2 x i64> [[VBSL3_I]], [[VBSL4_I]] -// NYI: ret <2 x i64> [[VBSL5_I]] -// int64x2_t test_vbslq_s64(uint64x2_t v1, int64x2_t v2, int64x2_t v3) { -// return vbslq_s64(v1, v2, v3); -// } +int16x8_t test_vbslq_s16(uint16x8_t v1, int16x8_t v2, int16x8_t v3) { + return vbslq_s16(v1, v2, v3); + + // CIR-LABEL: vbslq_s16 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbslq_s16(<8 x i16>{{.*}}[[v1:%.*]], <8 x i16>{{.*}}[[v2:%.*]], <8 x i16>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL_I:%.*]] = and <8 x i16> [[v1]], [[v2]] + // LLVM: [[TMP0:%.*]] = xor <8 x i16> [[v1]], splat (i16 -1) + // LLVM: [[VBSL1_I:%.*]] = and <8 x i16> [[TMP0]], [[v3]] + // LLVM: [[VBSL2_I:%.*]] = or <8 x i16> [[VBSL_I]], [[VBSL1_I]] + // LLVM: ret <8 x i16> [[VBSL2_I]] +} -// NYI-LABEL: @test_vbslq_u8( -// NYI: [[VBSL_I:%.*]] = and <16 x i8> %v1, %v2 -// NYI: [[TMP0:%.*]] = xor <16 x i8> %v1, -// NYI: [[VBSL1_I:%.*]] = and <16 x i8> [[TMP0]], %v3 -// NYI: [[VBSL2_I:%.*]] = or <16 x i8> [[VBSL_I]], [[VBSL1_I]] -// NYI: ret <16 x i8> [[VBSL2_I]] -// uint8x16_t test_vbslq_u8(uint8x16_t v1, uint8x16_t v2, uint8x16_t v3) { -// return vbslq_u8(v1, v2, v3); -// } +int32x4_t test_vbslq_s32(uint32x4_t v1, int32x4_t v2, int32x4_t v3) { + return vbslq_s32(v1, v2, v3); + + // CIR-LABEL: vbslq_s32 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbslq_s32(<4 x i32>{{.*}}[[v1:%.*]], <4 x i32>{{.*}}[[v2:%.*]], <4 x i32>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL_I:%.*]] = and <4 x i32> [[v1]], [[v2]] + // LLVM: [[TMP0:%.*]] = xor <4 x i32> [[v1]], splat (i32 -1) + // LLVM: [[VBSL1_I:%.*]] = and <4 x i32> [[TMP0]], [[v3]] + // LLVM: [[VBSL2_I:%.*]] = or <4 x i32> [[VBSL_I]], [[VBSL1_I]] + // LLVM: ret <4 x i32> [[VBSL2_I]] +} -// NYI-LABEL: @test_vbslq_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %v2 to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <8 x i16> %v3 to <16 x i8> -// NYI: [[VBSL3_I:%.*]] = and <8 x i16> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <8 x i16> %v1, -// NYI: [[VBSL4_I:%.*]] = and <8 x i16> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <8 x i16> [[VBSL3_I]], [[VBSL4_I]] -// NYI: ret <8 x i16> [[VBSL5_I]] -// uint16x8_t test_vbslq_u16(uint16x8_t v1, uint16x8_t v2, uint16x8_t v3) { -// return vbslq_u16(v1, v2, v3); -// } +int64x2_t test_vbslq_s64(uint64x2_t v1, int64x2_t v2, int64x2_t v3) { + return vbslq_s64(v1, v2, v3); + + // CIR-LABEL: vbslq_s64 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbslq_s64(<2 x i64>{{.*}}[[v1:%.*]], <2 x i64>{{.*}}[[v2:%.*]], <2 x i64>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL_I:%.*]] = and <2 x i64> [[v1]], [[v2]] + // LLVM: [[TMP0:%.*]] = xor <2 x i64> [[v1]], splat (i64 -1) + // LLVM: [[VBSL1_I:%.*]] = and <2 x i64> [[TMP0]], [[v3]] + // LLVM: [[VBSL2_I:%.*]] = or <2 x i64> [[VBSL_I]], [[VBSL1_I]] + // LLVM: ret <2 x i64> [[VBSL2_I]] +} -// NYI-LABEL: @test_vbslq_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %v2 to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <4 x i32> %v3 to <16 x i8> -// NYI: [[VBSL3_I:%.*]] = and <4 x i32> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <4 x i32> %v1, -// NYI: [[VBSL4_I:%.*]] = and <4 x i32> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <4 x i32> [[VBSL3_I]], [[VBSL4_I]] -// NYI: ret <4 x i32> [[VBSL5_I]] -// int32x4_t test_vbslq_u32(uint32x4_t v1, int32x4_t v2, int32x4_t v3) { -// return vbslq_s32(v1, v2, v3); -// } +uint8x16_t test_vbslq_u8(uint8x16_t v1, uint8x16_t v2, uint8x16_t v3) { + return vbslq_u8(v1, v2, v3); + + // CIR-LABEL: vbslq_u8 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbslq_u8(<16 x i8>{{.*}}[[v1:%.*]], <16 x i8>{{.*}}[[v2:%.*]], <16 x i8>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL_I:%.*]] = and <16 x i8> [[v1]], [[v2]] + // LLVM: [[TMP0:%.*]] = xor <16 x i8> [[v1]], splat (i8 -1) + // LLVM: [[VBSL1_I:%.*]] = and <16 x i8> [[TMP0]], [[v3]] + // LLVM: [[VBSL2_I:%.*]] = or <16 x i8> [[VBSL_I]], [[VBSL1_I]] + // LLVM: ret <16 x i8> [[VBSL2_I]] +} -// NYI-LABEL: @test_vbslq_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %v2 to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <2 x i64> %v3 to <16 x i8> -// NYI: [[VBSL3_I:%.*]] = and <2 x i64> %v1, %v2 -// NYI: [[TMP3:%.*]] = xor <2 x i64> %v1, -// NYI: [[VBSL4_I:%.*]] = and <2 x i64> [[TMP3]], %v3 -// NYI: [[VBSL5_I:%.*]] = or <2 x i64> [[VBSL3_I]], [[VBSL4_I]] -// NYI: ret <2 x i64> [[VBSL5_I]] -// uint64x2_t test_vbslq_u64(uint64x2_t v1, uint64x2_t v2, uint64x2_t v3) { -// return vbslq_u64(v1, v2, v3); -// } +uint16x8_t test_vbslq_u16(uint16x8_t v1, uint16x8_t v2, uint16x8_t v3) { + return vbslq_u16(v1, v2, v3); + + // CIR-LABEL: vbslq_u16 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbslq_u16(<8 x i16>{{.*}}[[v1:%.*]], <8 x i16>{{.*}}[[v2:%.*]], <8 x i16>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL_I:%.*]] = and <8 x i16> [[v1]], [[v2]] + // LLVM: [[TMP0:%.*]] = xor <8 x i16> [[v1]], splat (i16 -1) + // LLVM: [[VBSL1_I:%.*]] = and <8 x i16> [[TMP0]], [[v3]] + // LLVM: [[VBSL2_I:%.*]] = or <8 x i16> [[VBSL_I]], [[VBSL1_I]] + // LLVM: ret <8 x i16> [[VBSL2_I]] +} -// NYI-LABEL: @test_vbslq_f32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <4 x float> %v3 to <16 x i8> -// NYI: [[VBSL1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> -// NYI: [[VBSL2_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x i32> -// NYI: [[VBSL3_I:%.*]] = and <4 x i32> %v1, [[VBSL1_I]] -// NYI: [[TMP3:%.*]] = xor <4 x i32> %v1, -// NYI: [[VBSL4_I:%.*]] = and <4 x i32> [[TMP3]], [[VBSL2_I]] -// NYI: [[VBSL5_I:%.*]] = or <4 x i32> [[VBSL3_I]], [[VBSL4_I]] -// NYI: [[TMP4:%.*]] = bitcast <4 x i32> [[VBSL5_I]] to <4 x float> -// NYI: ret <4 x float> [[TMP4]] -// float32x4_t test_vbslq_f32(uint32x4_t v1, float32x4_t v2, float32x4_t v3) { -// return vbslq_f32(v1, v2, v3); -// } +uint32x4_t test_vbslq_u32(uint32x4_t v1, uint32x4_t v2, uint32x4_t v3) { + return vbslq_u32(v1, v2, v3); + + // CIR-LABEL: vbslq_u32 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbslq_u32(<4 x i32>{{.*}}[[v1:%.*]], <4 x i32>{{.*}}[[v2:%.*]], <4 x i32>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL_I:%.*]] = and <4 x i32> [[v1]], [[v2]] + // LLVM: [[TMP0:%.*]] = xor <4 x i32> [[v1]], splat (i32 -1) + // LLVM: [[VBSL1_I:%.*]] = and <4 x i32> [[TMP0]], [[v3]] + // LLVM: [[VBSL2_I:%.*]] = or <4 x i32> [[VBSL_I]], [[VBSL1_I]] + // LLVM: ret <4 x i32> [[VBSL2_I]] +} + +uint64x2_t test_vbslq_u64(uint64x2_t v1, uint64x2_t v2, uint64x2_t v3) { + return vbslq_u64(v1, v2, v3); + + // CIR-LABEL: vbslq_u64 + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1:%.*]], [[v2:%.*]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3:%.*]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + + // LLVM: {{.*}}test_vbslq_u64(<2 x i64>{{.*}}[[v1:%.*]], <2 x i64>{{.*}}[[v2:%.*]], <2 x i64>{{.*}}[[v3:%.*]]) + // LLVM: [[VBSL_I:%.*]] = and <2 x i64> [[v1]], [[v2]] + // LLVM: [[TMP0:%.*]] = xor <2 x i64> [[v1]], splat (i64 -1) + // LLVM: [[VBSL1_I:%.*]] = and <2 x i64> [[TMP0]], [[v3]] + // LLVM: [[VBSL2_I:%.*]] = or <2 x i64> [[VBSL_I]], [[VBSL1_I]] + // LLVM: ret <2 x i64> [[VBSL2_I]] +} + +float32x4_t test_vbslq_f32(uint32x4_t v1, float32x4_t v2, float32x4_t v3) { + return vbslq_f32(v1, v2, v3); + + // CIR-LABEL: vbslq_f32 + // CIR: [[v1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v3:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v1_tmp:%.*]] = cir.cast(bitcast, [[v1]] : !cir.vector), !cir.vector + // CIR: [[v2_tmp:%.*]] = cir.cast(bitcast, [[v2]] : !cir.vector), !cir.vector + // CIR: [[v3_tmp:%.*]] = cir.cast(bitcast, [[v3]] : !cir.vector), !cir.vector + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1_tmp]], [[v2_tmp]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1_tmp]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3_tmp]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + // CIR: cir.cast(bitcast, [[VBSL2_I]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vbslq_f32(<4 x i32>{{.*}}[[v1:%.*]], <4 x float>{{.*}}[[v2:%.*]], <4 x float>{{.*}}[[v3:%.*]]) + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[v1]] to <16 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <4 x float> [[v2]] to <16 x i8> + // LLVM: [[TMP3:%.*]] = bitcast <4 x float> [[v3]] to <16 x i8> + // LLVM: [[VBSL1_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x i32> + // LLVM: [[VBSL2_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x i32> + // LLVM: [[VBSL3_I:%.*]] = and <4 x i32> [[v1]], [[VBSL1_I]] + // LLVM: [[TMP4:%.*]] = xor <4 x i32> [[v1]], splat (i32 -1) + // LLVM: [[VBSL4_I:%.*]] = and <4 x i32> [[TMP4]], [[VBSL2_I]] + // LLVM: [[VBSL5_I:%.*]] = or <4 x i32> [[VBSL3_I]], [[VBSL4_I]] + // LLVM: [[TMP5:%.*]] = bitcast <4 x i32> [[VBSL5_I]] to <4 x float> + // LLVM: ret <4 x float> [[TMP5]] +} + +float64x2_t test_vbslq_f64(uint64x2_t v1, float64x2_t v2, float64x2_t v3) { + return vbslq_f64(v1, v2, v3); + + // CIR-LABEL: vbslq_f64 + // CIR: [[v1:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v3:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[v1_tmp:%.*]] = cir.cast(bitcast, [[v1]] : !cir.vector), !cir.vector + // CIR: [[v2_tmp:%.*]] = cir.cast(bitcast, [[v2]] : !cir.vector), !cir.vector + // CIR: [[v3_tmp:%.*]] = cir.cast(bitcast, [[v3]] : !cir.vector), !cir.vector + // CIR: [[VBSL_I:%.*]] = cir.binop(and, [[v1_tmp]], [[v2_tmp]]) : !cir.vector + // CIR: [[TMP0:%.*]] = cir.unary(not, [[v1_tmp]]) : !cir.vector, !cir.vector + // CIR: [[VBSL1_I:%.*]] = cir.binop(and, [[TMP0]], [[v3_tmp]]) : !cir.vector + // CIR: [[VBSL2_I:%.*]] = cir.binop(or, [[VBSL_I]], [[VBSL1_I]]) : !cir.vector + // CIR: cir.cast(bitcast, [[VBSL2_I]] : !cir.vector), !cir.vector + + // LLVM: {{.*}}test_vbslq_f64(<2 x i64>{{.*}}[[v1:%.*]], <2 x double>{{.*}}[[v2:%.*]], <2 x double>{{.*}}[[v3:%.*]]) + // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> [[v1]] to <16 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <2 x double> [[v2]] to <16 x i8> + // LLVM: [[TMP3:%.*]] = bitcast <2 x double> [[v3]] to <16 x i8> + // LLVM: [[VBSL1_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> + // LLVM: [[VBSL2_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x i64> + // LLVM: [[VBSL3_I:%.*]] = and <2 x i64> [[v1]], [[VBSL1_I]] + // LLVM: [[TMP4:%.*]] = xor <2 x i64> [[v1]], splat (i64 -1) + // LLVM: [[VBSL4_I:%.*]] = and <2 x i64> [[TMP4]], [[VBSL2_I]] + // LLVM: [[VBSL5_I:%.*]] = or <2 x i64> [[VBSL3_I]], [[VBSL4_I]] + // LLVM: [[TMP5:%.*]] = bitcast <2 x i64> [[VBSL5_I]] to <2 x double> + // LLVM: ret <2 x double> [[TMP5]] +} // NYI-LABEL: @test_vbslq_p8( // NYI: [[VBSL_I:%.*]] = and <16 x i8> %v1, %v2 @@ -1313,22 +1462,6 @@ float64x2_t test_vabdq_f64(float64x2_t v1, float64x2_t v2) { // return vbslq_p16(v1, v2, v3); // } -// NYI-LABEL: @test_vbslq_f64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %v1 to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <2 x double> %v3 to <16 x i8> -// NYI: [[VBSL1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> -// NYI: [[VBSL2_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> -// NYI: [[VBSL3_I:%.*]] = and <2 x i64> %v1, [[VBSL1_I]] -// NYI: [[TMP3:%.*]] = xor <2 x i64> %v1, -// NYI: [[VBSL4_I:%.*]] = and <2 x i64> [[TMP3]], [[VBSL2_I]] -// NYI: [[VBSL5_I:%.*]] = or <2 x i64> [[VBSL3_I]], [[VBSL4_I]] -// NYI: [[TMP4:%.*]] = bitcast <2 x i64> [[VBSL5_I]] to <2 x double> -// NYI: ret <2 x double> [[TMP4]] -// float64x2_t test_vbslq_f64(uint64x2_t v1, float64x2_t v2, float64x2_t v3) { -// return vbslq_f64(v1, v2, v3); -// } - // NYI-LABEL: @test_vrecps_f32( // NYI: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8> // NYI: [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8> From 90aa3e5211d0a636d757cdd1153c8b4360cc6d95 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 6 Dec 2024 12:23:58 -0500 Subject: [PATCH 2147/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vaddlvq_u32 (#1208) --- clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 7 +++++-- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 11 +++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index ce5ff2a82244..26da036dc773 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2646,8 +2646,11 @@ static mlir::Value emitCommonNeonSISDBuiltinExpr( llvm_unreachable(" neon_vaddlv_u32 NYI "); case NEON::BI__builtin_neon_vaddlvq_s32: llvm_unreachable(" neon_vaddlvq_s32 NYI "); - case NEON::BI__builtin_neon_vaddlvq_u32: - llvm_unreachable(" neon_vaddlvq_u32 NYI "); + case NEON::BI__builtin_neon_vaddlvq_u32: { + mlir::Type argTy = cgf.convertType(expr->getArg(0)->getType()); + return emitNeonCall(builder, {argTy}, ops, "aarch64.neon.uaddlv", resultTy, + loc); + } case NEON::BI__builtin_neon_vaddv_f32: llvm_unreachable(" neon_vaddv_f32 NYI "); case NEON::BI__builtin_neon_vaddv_s32: diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index a2d5fdcb8383..c0935a81b835 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -1716,3 +1716,14 @@ uint64x2_t test_vpadalq_u32(uint64x2_t a, uint32x4_t b) { // LLVM: [[TMP2:%.*]] = add <2 x i64> [[VPADAL1_I]], [[a]] // LLVM: ret <2 x i64> [[TMP2]] } + +uint64_t test_vaddlvq_u32(uint32x4_t a) { + return vaddlvq_u32(a); + + // CIR-LABEL: vaddlvq_u32 + // CIR: = cir.llvm.intrinsic "aarch64.neon.uaddlv" {{%.*}} : (!cir.vector) -> !u64i + + // LLVM: {{.*}}@test_vaddlvq_u32(<4 x i32>{{.*}}[[A:%.*]]) + // LLVM-NEXT: [[VADDLVQ_U32_I:%.*]] = call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> [[A]]) + // LLVM-NEXT: ret i64 [[VADDLVQ_U32_I]] +} From 2e3d9739dd3df29e1dd3828ec986c6bb248a55dc Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Fri, 6 Dec 2024 09:25:16 -0800 Subject: [PATCH 2148/2301] [CIR][CIRGen] Flesh out CIRGenModule::Release skeleton (#1209) There's a lot that was missing here. Add NYIs and MissingFeatures as appropriate, so that we can start work on matching CodeGen. --- clang/include/clang/CIR/MissingFeatures.h | 44 +++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 353 ++++++++++++++++++++-- 2 files changed, 371 insertions(+), 26 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 57471c4dc9bb..5ab4473bb7ec 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -108,6 +108,7 @@ struct MissingFeatures { static bool setObjCGCLValueClass() { return false; } static bool objCLifetime() { return false; } static bool objCIvarDecls() { return false; } + static bool objCRuntime() { return false; } // Debug info static bool generateDebugInfo() { return false; } @@ -328,6 +329,49 @@ struct MissingFeatures { static bool noFPClass() { return false; } static bool llvmIntrinsicElementTypeSupport() { return false; } + //-- Missing parts of the CIRGenModule::Release skeleton. + static bool emitModuleInitializers() { return false; } + static bool emittedDeferredDecls() { return false; } + static bool emitVTablesOpportunistically() { return false; } + static bool applyGlobalValReplacements() { return false; } + static bool emitMultiVersionFunctions() { return false; } + static bool incrementalExtensions() { return false; } + static bool emitCXXModuleInitFunc() { return false; } + static bool emitCXXGlobalCleanUpFunc() { return false; } + static bool registerGlobalDtorsWithAtExit() { return false; } + static bool emitCXXThreadLocalInitFunc() { return false; } + static bool pgoReader() { return false; } + static bool emitCtorList() { return false; } + static bool emitStaticExternCAliases() { return false; } + static bool checkAliases() { return false; } + static bool emitDeferredUnusedCoverageMappings() { return false; } + static bool cirGenPGO() { return false; } + static bool coverageMapping() { return false; } + static bool emitAtAvailableLinkGuard() { return false; } + static bool emitLLVMUsed() { return false; } + static bool sanStats() { return false; } + static bool linkerOptionsMetadata() { return false; } + static bool emitModuleLinkOptions() { return false; } + static bool elfDependentLibraries() { return false; } + static bool dwarfVersion() { return false; } + static bool wcharWidth() { return false; } + static bool enumWidth() { return false; } + static bool setPICLevel() { return false; } + static bool setPIELevel() { return false; } + static bool codeModel() { return false; } + static bool largeDataThreshold() { return false; } + static bool directAccessExternalData() { return false; } + static bool setUwtable() { return false; } + static bool setFramePointer() { return false; } + static bool simplifyPersonality() { return false; } + static bool emitVersionIdentMetadata() { return false; } + static bool emitTargetGlobals() { return false; } + static bool emitTargetMetadata() { return false; } + static bool emitBackendOptionsMetadata() { return false; } + static bool embedObject() { return false; } + static bool setVisibilityFromDLLStorageClass() { return false; } + static bool mustTailCallUndefinedGlobals() { return false; } + //-- Other missing features // We need to track the parent record types that represent a field diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 0582a95dc456..17b1f6e0ce3f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2968,50 +2968,244 @@ CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { } void CIRGenModule::Release() { + assert(!MissingFeatures::emitModuleInitializers()); emitDeferred(getCodeGenOpts().ClangIRBuildDeferredThreshold); - // TODO: emitVTablesOpportunistically(); - // TODO: applyGlobalValReplacements(); + assert(!MissingFeatures::emittedDeferredDecls()); + assert(!MissingFeatures::emitVTablesOpportunistically()); + assert(!MissingFeatures::applyGlobalValReplacements()); applyReplacements(); - // TODO: checkAliases(); - // TODO: emitMultiVersionFunctions(); + assert(!MissingFeatures::emitMultiVersionFunctions()); + + assert(!MissingFeatures::incrementalExtensions()); + + assert(!MissingFeatures::emitCXXModuleInitFunc()); emitCXXGlobalInitFunc(); - // TODO: emitCXXGlobalCleanUpFunc(); - // TODO: registerGlobalDtorsWithAtExit(); - // TODO: emitCXXThreadLocalInitFunc(); - // TODO: ObjCRuntime + assert(!MissingFeatures::emitCXXGlobalCleanUpFunc()); + assert(!MissingFeatures::registerGlobalDtorsWithAtExit()); + assert(!MissingFeatures::emitCXXThreadLocalInitFunc()); + assert(!MissingFeatures::objCRuntime()); if (astCtx.getLangOpts().CUDA) { llvm_unreachable("NYI"); } - // TODO: OpenMPRuntime - // TODO: PGOReader - // TODO: emitCtorList(GlobalCtors); - // TODO: builtCtorList(GlobalDtors); + assert(!MissingFeatures::openMPRuntime()); + assert(!MissingFeatures::pgoReader()); + assert(!MissingFeatures::emitCtorList()); // GlobalCtors, GlobalDtors emitGlobalAnnotations(); - // TODO: emitDeferredUnusedCoverageMappings(); - // TODO: CIRGenPGO - // TODO: CoverageMapping + assert(!MissingFeatures::emitStaticExternCAliases()); + assert(!MissingFeatures::checkAliases()); + assert(!MissingFeatures::emitDeferredUnusedCoverageMappings()); + assert(!MissingFeatures::cirGenPGO()); // setValueProfilingFlag, + // setProfileVersion + assert(!MissingFeatures::coverageMapping()); if (getCodeGenOpts().SanitizeCfiCrossDso) { llvm_unreachable("NYI"); } - // TODO: emitAtAvailableLinkGuard(); - if (astCtx.getTargetInfo().getTriple().isWasm() && - !astCtx.getTargetInfo().getTriple().isOSEmscripten()) { + if (langOpts.Sanitize.has(SanitizerKind::KCFI)) + llvm_unreachable("NYI"); + assert(!MissingFeatures::emitAtAvailableLinkGuard()); + if (astCtx.getTargetInfo().getTriple().isWasm()) + llvm_unreachable("NYI"); + + if (getTriple().isAMDGPU() || + (getTriple().isSPIRV() && getTriple().getVendor() == llvm::Triple::AMD)) { + llvm_unreachable("NYI"); + } + + // Emit a global array containing all external kernels or device variables + // used by host functions and mark it as used for CUDA/HIP. This is necessary + // to get kernels or device variables in archives linked in even if these + // kernels or device variables are only used in host functions. + if (!astCtx.CUDAExternalDeviceDeclODRUsedByHost.empty()) { + llvm_unreachable("NYI"); + } + if (langOpts.HIP && !getLangOpts().OffloadingNewDriver) { + llvm_unreachable("NYI"); + } + assert(!MissingFeatures::emitLLVMUsed()); + assert(!MissingFeatures::sanStats()); + + if (codeGenOpts.Autolink && (astCtx.getLangOpts().Modules || + !MissingFeatures::linkerOptionsMetadata())) { + assert(!MissingFeatures::emitModuleLinkOptions()); + } + + // On ELF we pass the dependent library specifiers directly to the linker + // without manipulating them. This is in contrast to other platforms where + // they are mapped to a specific linker option by the compiler. This + // difference is a result of the greater variety of ELF linkers and the fact + // that ELF linkers tend to handle libraries in a more complicated fashion + // than on other platforms. This forces us to defer handling the dependent + // libs to the linker. + // + // CUDA/HIP device and host libraries are different. Currently there is no + // way to differentiate dependent libraries for host or device. Existing + // usage of #pragma comment(lib, *) is intended for host libraries on + // Windows. Therefore emit llvm.dependent-libraries only for host. + assert(!MissingFeatures::elfDependentLibraries()); + + assert(!MissingFeatures::dwarfVersion()); + + if (codeGenOpts.Dwarf64) + llvm_unreachable("NYI"); + + if (astCtx.getLangOpts().SemanticInterposition) + // Require various optimization to respect semantic interposition. + llvm_unreachable("NYI"); + + if (codeGenOpts.EmitCodeView) { + // Indicate that we want CodeView in the metadata. + llvm_unreachable("NYI"); + } + if (codeGenOpts.CodeViewGHash) { + llvm_unreachable("NYI"); + } + if (codeGenOpts.ControlFlowGuard) { + // Function ID tables and checks for Control Flow Guard (cfguard=2). + llvm_unreachable("NYI"); + } else if (codeGenOpts.ControlFlowGuardNoChecks) { + // Function ID tables for Control Flow Guard (cfguard=1). + llvm_unreachable("NYI"); + } + if (codeGenOpts.EHContGuard) { + // Function ID tables for EH Continuation Guard. + llvm_unreachable("NYI"); + } + if (astCtx.getLangOpts().Kernel) { + // Note if we are compiling with /kernel. + llvm_unreachable("NYI"); + } + if (codeGenOpts.OptimizationLevel > 0 && codeGenOpts.StrictVTablePointers) { + // We don't support LTO with 2 with different StrictVTablePointers + // FIXME: we could support it by stripping all the information introduced + // by StrictVTablePointers. llvm_unreachable("NYI"); } + if (getModuleDebugInfo()) + // We support a single version in the linked module. The LLVM + // parser will drop debug info with a different version number + // (and warn about it, too). + llvm_unreachable("NYI"); + + // We need to record the widths of enums and wchar_t, so that we can generate + // the correct build attributes in the ARM backend. wchar_size is also used by + // TargetLibraryInfo. + assert(!MissingFeatures::wcharWidth()); - // Emit reference of __amdgpu_device_library_preserve_asan_functions to - // preserve ASAN functions in bitcode libraries. - if (getLangOpts().Sanitize.has(SanitizerKind::Address)) { + if (getTriple().isOSzOS()) { llvm_unreachable("NYI"); } - // TODO: emitLLVMUsed(); - // TODO: SanStats + llvm::Triple t = astCtx.getTargetInfo().getTriple(); + if (t.isARM() || t.isThumb()) { + // The minimum width of an enum in bytes + assert(!MissingFeatures::enumWidth()); + } - if (getCodeGenOpts().Autolink) { - // TODO: emitModuleLinkOptions + if (t.isRISCV()) { + llvm_unreachable("NYI"); } + if (codeGenOpts.SanitizeCfiCrossDso) { + // Indicate that we want cross-DSO control flow integrity checks. + llvm_unreachable("NYI"); + } + + if (codeGenOpts.WholeProgramVTables) { + // Indicate whether VFE was enabled for this module, so that the + // vcall_visibility metadata added under whole program vtables is handled + // appropriately in the optimizer. + llvm_unreachable("NYI"); + } + + if (langOpts.Sanitize.has(SanitizerKind::CFIICall)) { + llvm_unreachable("NYI"); + } + + if (codeGenOpts.SanitizeCfiICallNormalizeIntegers) { + llvm_unreachable("NYI"); + } + + if (langOpts.Sanitize.has(SanitizerKind::KCFI)) { + llvm_unreachable("NYI"); + } + + if (codeGenOpts.CFProtectionReturn && + target.checkCFProtectionReturnSupported(getDiags())) { + // Indicate that we want to instrument return control flow protection. + llvm_unreachable("NYI"); + } + + if (codeGenOpts.CFProtectionBranch && + target.checkCFProtectionBranchSupported(getDiags())) { + // Indicate that we want to instrument branch control flow protection. + llvm_unreachable("NYI"); + } + + if (codeGenOpts.FunctionReturnThunks) + llvm_unreachable("NYI"); + + if (codeGenOpts.IndirectBranchCSPrefix) + llvm_unreachable("NYI"); + + // Add module metadata for return address signing (ignoring + // non-leaf/all) and stack tagging. These are actually turned on by function + // attributes, but we use module metadata to emit build attributes. This is + // needed for LTO, where the function attributes are inside bitcode + // serialised into a global variable by the time build attributes are + // emitted, so we can't access them. LTO objects could be compiled with + // different flags therefore module flags are set to "Min" behavior to achieve + // the same end result of the normal build where e.g BTI is off if any object + // doesn't support it. + if (astCtx.getTargetInfo().hasFeature("ptrauth") && + langOpts.getSignReturnAddressScope() != + LangOptions::SignReturnAddressScopeKind::None) + llvm_unreachable("NYI"); + if (langOpts.Sanitize.has(SanitizerKind::MemtagStack)) + llvm_unreachable("NYI"); + + if (t.isARM() || t.isThumb() || t.isAArch64()) { + if (langOpts.BranchTargetEnforcement) + llvm_unreachable("NYI"); + if (langOpts.BranchProtectionPAuthLR) + llvm_unreachable("NYI"); + if (langOpts.GuardedControlStack) + llvm_unreachable("NYI"); + if (langOpts.hasSignReturnAddress()) + llvm_unreachable("NYI"); + if (langOpts.isSignReturnAddressScopeAll()) + llvm_unreachable("NYI"); + if (!langOpts.isSignReturnAddressWithAKey()) + llvm_unreachable("NYI"); + + if (langOpts.PointerAuthELFGOT) + llvm_unreachable("NYI"); + + if (getTriple().isOSLinux()) { + assert(getTriple().isOSBinFormatELF()); + assert(!MissingFeatures::ptrAuth()); + } + } + + if (codeGenOpts.StackClashProtector) + llvm_unreachable("NYI"); + + if (codeGenOpts.StackProbeSize && codeGenOpts.StackProbeSize != 4096) + llvm_unreachable("NYI"); + + if (!codeGenOpts.MemoryProfileOutput.empty()) { + llvm_unreachable("NYI"); + } + + if (langOpts.CUDAIsDevice && getTriple().isNVPTX()) { + llvm_unreachable("NYI"); + } + + if (langOpts.EHAsynch) + llvm_unreachable("NYI"); + + // Indicate whether this Module was compiled with -fopenmp + assert(!MissingFeatures::openMP()); + // Emit OpenCL specific module metadata: OpenCL/SPIR version. if (langOpts.CUDAIsDevice && getTriple().isSPIRV()) llvm_unreachable("CUDA SPIR-V NYI"); @@ -3022,7 +3216,114 @@ void CIRGenModule::Release() { llvm_unreachable("SPIR target NYI"); } - // TODO: FINISH THE REST OF THIS + // HLSL related end of code gen work items. + if (langOpts.HLSL) + llvm_unreachable("NYI"); + + if (uint32_t picLevel = astCtx.getLangOpts().PICLevel) { + assert(picLevel < 3 && "Invalid PIC Level"); + assert(!MissingFeatures::setPICLevel()); + if (astCtx.getLangOpts().PIE) + assert(!MissingFeatures::setPIELevel()); + } + + if (getCodeGenOpts().CodeModel.size() > 0) { + unsigned cm = llvm::StringSwitch(getCodeGenOpts().CodeModel) + .Case("tiny", llvm::CodeModel::Tiny) + .Case("small", llvm::CodeModel::Small) + .Case("kernel", llvm::CodeModel::Kernel) + .Case("medium", llvm::CodeModel::Medium) + .Case("large", llvm::CodeModel::Large) + .Default(~0u); + if (cm != ~0u) { + llvm::CodeModel::Model codeModel = + static_cast(cm); + (void)codeModel; + assert(!MissingFeatures::codeModel()); + + if ((cm == llvm::CodeModel::Medium || cm == llvm::CodeModel::Large) && + astCtx.getTargetInfo().getTriple().getArch() == + llvm::Triple::x86_64) { + assert(!MissingFeatures::largeDataThreshold()); + } + } + } + + if (codeGenOpts.NoPLT) + llvm_unreachable("NYI"); + assert(!MissingFeatures::directAccessExternalData()); + if (codeGenOpts.UnwindTables) + assert(!MissingFeatures::setUwtable()); + + switch (codeGenOpts.getFramePointer()) { + case CodeGenOptions::FramePointerKind::None: + // 0 ("none") is the default. + break; + case CodeGenOptions::FramePointerKind::Reserved: + assert(!MissingFeatures::setFramePointer()); + break; + case CodeGenOptions::FramePointerKind::NonLeaf: + assert(!MissingFeatures::setFramePointer()); + break; + case CodeGenOptions::FramePointerKind::All: + assert(!MissingFeatures::setFramePointer()); + break; + } + + assert(!MissingFeatures::simplifyPersonality()); + + if (getCodeGenOpts().EmitDeclMetadata) + llvm_unreachable("NYI"); + + if (getCodeGenOpts().CoverageNotesFile.size() || + getCodeGenOpts().CoverageDataFile.size()) + llvm_unreachable("NYI"); + + if (getModuleDebugInfo()) + llvm_unreachable("NYI"); + + assert(!MissingFeatures::emitVersionIdentMetadata()); + + if (!getCodeGenOpts().RecordCommandLine.empty()) + llvm_unreachable("NYI"); + + if (!getCodeGenOpts().StackProtectorGuard.empty()) + llvm_unreachable("NYI"); + if (!getCodeGenOpts().StackProtectorGuardReg.empty()) + llvm_unreachable("NYI"); + if (!getCodeGenOpts().StackProtectorGuardSymbol.empty()) + llvm_unreachable("NYI"); + if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX) + llvm_unreachable("NYI"); + if (getCodeGenOpts().StackAlignment) + llvm_unreachable("NYI"); + if (getCodeGenOpts().SkipRaxSetup) + llvm_unreachable("NYI"); + if (getLangOpts().RegCall4) + llvm_unreachable("NYI"); + + if (getASTContext().getTargetInfo().getMaxTLSAlign()) + llvm_unreachable("NYI"); + + assert(!MissingFeatures::emitTargetGlobals()); + + assert(!MissingFeatures::emitTargetMetadata()); + + assert(!MissingFeatures::emitBackendOptionsMetadata()); + + // If there is device offloading code embed it in the host now. + assert(!MissingFeatures::embedObject()); + + // Set visibility from DLL storage class + // We do this at the end of LLVM IR generation; after any operation + // that might affect the DLL storage class or the visibility, and + // before anything that might act on these. + assert(!MissingFeatures::setVisibilityFromDLLStorageClass()); + + // Check the tail call symbols are truly undefined. + if (getTriple().isPPC() && !MissingFeatures::mustTailCallUndefinedGlobals()) { + llvm_unreachable("NYI"); + } } namespace { From afab91d8db5fc8aa52bb52aed6ba0850bc6fc857 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 6 Dec 2024 12:26:19 -0500 Subject: [PATCH 2149/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vaddv_s16 and neon_vaddv_u16 (#1210) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 11 ++++++-- clang/test/CIR/CodeGen/AArch64/neon-arith.c | 26 +++++++++++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 26da036dc773..5e0ce259a0e2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -4188,10 +4188,17 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NEON::BI__builtin_neon_vaddv_s8 NYI"); } case NEON::BI__builtin_neon_vaddv_u16: - llvm_unreachable("NEON::BI__builtin_neon_vaddv_u16 NYI"); + usgn = true; [[fallthrough]]; case NEON::BI__builtin_neon_vaddv_s16: { - llvm_unreachable("NEON::BI__builtin_neon_vaddv_s16 NYI"); + cir::IntType eltTy = usgn ? UInt16Ty : SInt16Ty; + cir::VectorType vTy = cir::VectorType::get(builder.getContext(), eltTy, 4); + Ops.push_back(emitScalarExpr(E->getArg(0))); + // This is to add across the vector elements, so wider result type needed. + Ops[0] = emitNeonCall(builder, {vTy}, Ops, + usgn ? "aarch64.neon.uaddv" : "aarch64.neon.saddv", + SInt32Ty, getLoc(E->getExprLoc())); + return builder.createIntCast(Ops[0], eltTy); } case NEON::BI__builtin_neon_vaddvq_u8: llvm_unreachable("NEON::BI__builtin_neon_vaddvq_u8 NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index 7dabbbfc9925..f44a4bb9e465 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -893,3 +893,29 @@ uint32_t test_vaddlvq_u16(uint16x8_t a) { // LLVM: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> [[A]]) // LLVM: ret i32 [[VADDLV_I]] } + +uint16_t test_vaddv_u16(uint16x4_t a) { + return vaddv_u16(a); + + // CIR-LABEL: vaddv_u16 + // CIR: [[VADDV_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.uaddv" {{%.*}} : (!cir.vector) -> !s32i + // CIR: cir.cast(integral, [[VADDV_I]] : !s32i), !u16i + + // LLVM: {{.*}}test_vaddv_u16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> [[A]]) + // LLVM-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16 + // LLVM-NEXT: ret i16 [[TMP0]] +} + +int16_t test_vaddv_s16(int16x4_t a) { + return vaddv_s16(a); + + // CIR-LABEL: vaddv_s16 + // CIR: [[VADDV_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.saddv" {{%.*}} : (!cir.vector) -> !s32i + // CIR: cir.cast(integral, [[VADDV_I]] : !s32i), !s16i + + // LLVM: {{.*}}test_vaddv_s16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> [[A]]) + // LLVM-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16 + // LLVM-NEXT: ret i16 [[TMP0]] +} From eec51a8297d1db70cd8778a7ba276bbd61df5910 Mon Sep 17 00:00:00 2001 From: Congcong Cai Date: Sat, 7 Dec 2024 11:56:25 +0800 Subject: [PATCH 2150/2301] [CIR] revert `StdInitializerListOp` (#1216) Revert "[CIR][Dialect] Introduce StdInitializerListOp to represent high-level semantics of C++ initializer list (#1121)". This reverts commit 7532e0501ab3e8d5aa4c07b73ae579a23fa42e7d. First step for #1215. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 34 -------- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 75 ++++++++++++----- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 23 ----- .../Dialect/Transforms/LoweringPrepare.cpp | 83 +------------------ clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp | 34 +++----- .../CIR/CodeGen/initlist-ptr-unsigned.cpp | 16 +--- clang/test/CIR/IR/invalid.cir | 78 ----------------- 7 files changed, 71 insertions(+), 272 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1cfa199ef76b..a3ce4f5c204d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -5336,38 +5336,4 @@ def SignBitOp : CIR_Op<"signbit", [Pure]> { }]; } -//===----------------------------------------------------------------------===// -// StdInitializerListOp -//===----------------------------------------------------------------------===// - -def StdInitializerListOp : CIR_Op<"std.initializer_list"> { - let summary = "Initialize std::initializer_list"; - let description = [{ - The `std.initializer_list` operation will initialize - `std::initializer_list` with given arguments list. - - ```cpp - initializer_list v{1,2,3}; // initialize v with 1, 2, 3 - ``` - - The code above will generate CIR similar as: - - ```mlir - %0 = cir.alloca INITLIST_TYPE, !cir.ptr - %1 = cir.const #cir.int<1> - ... - cir.std.initializer_list %0 (%1 %2 %3) - ``` - - The type of each argument should be the same as template parameter of - `std::initializer_list` (aka `T` in `std::initializer_list`). - }]; - let arguments = (ins StructPtr:$initList, Variadic:$args); - let assemblyFormat = [{ - $initList ` ` `(` ($args^ `:` type($args))? `)` `:` type($initList) attr-dict - }]; - - let hasVerifier = 1; -} - #endif // LLVM_CLANG_CIR_DIALECT_IR_CIROPS diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index d436822bf9ec..32f343ffd605 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -18,15 +18,11 @@ #include "clang/AST/Decl.h" #include "clang/AST/Expr.h" -#include "clang/AST/ExprCXX.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtVisitor.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" -#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/MissingFeatures.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" @@ -301,25 +297,62 @@ class AggExprEmitter : public StmtVisitor { void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); void VisitLambdaExpr(LambdaExpr *E); void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { + ASTContext &Ctx = CGF.getContext(); + CIRGenFunction::SourceLocRAIIObject locRAIIObject{ + CGF, CGF.getLoc(E->getSourceRange())}; + // Emit an array containing the elements. The array is externally + // destructed if the std::initializer_list object is. + LValue Array = CGF.emitLValue(E->getSubExpr()); + assert(Array.isSimple() && "initializer_list array not a simple lvalue"); + Address ArrayPtr = Array.getAddress(); + + const ConstantArrayType *ArrayType = + Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); + assert(ArrayType && "std::initializer_list constructed from non-array"); + + RecordDecl *Record = E->getType()->castAs()->getDecl(); + RecordDecl::field_iterator Field = Record->field_begin(); + assert(Field != Record->field_end() && + Ctx.hasSameType(Field->getType()->getPointeeType(), + ArrayType->getElementType()) && + "Expected std::initializer_list first field to be const E *"); + // Start pointer. auto loc = CGF.getLoc(E->getSourceRange()); - auto builder = CGF.getBuilder(); - auto *subExpr = - llvm::cast(E->getSubExpr())->getSubExpr(); - llvm::SmallVector inits{}; - for (auto *init : llvm::cast(subExpr)->inits()) { - RValue tmpInit = CGF.emitAnyExprToTemp(init); - if (tmpInit.isScalar()) { - inits.push_back(tmpInit.getScalarVal()); - } else if (tmpInit.isComplex()) { - inits.push_back(tmpInit.getComplexVal()); - } else if (tmpInit.isAggregate()) { - inits.push_back(tmpInit.getAggregatePointer()); - } else { - llvm_unreachable("invalid temp expr type"); - } + AggValueSlot Dest = EnsureSlot(loc, E->getType()); + LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), E->getType()); + LValue Start = + CGF.emitLValueForFieldInitialization(DestLV, *Field, Field->getName()); + mlir::Value ArrayStart = ArrayPtr.emitRawPointer(); + CGF.emitStoreThroughLValue(RValue::get(ArrayStart), Start); + ++Field; + assert(Field != Record->field_end() && + "Expected std::initializer_list to have two fields"); + + auto Builder = CGF.getBuilder(); + + auto sizeOp = Builder.getConstInt(loc, ArrayType->getSize()); + + mlir::Value Size = sizeOp.getRes(); + Builder.getUIntNTy(ArrayType->getSizeBitWidth()); + LValue EndOrLength = + CGF.emitLValueForFieldInitialization(DestLV, *Field, Field->getName()); + if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { + // Length. + CGF.emitStoreThroughLValue(RValue::get(Size), EndOrLength); + } else { + // End pointer. + assert(Field->getType()->isPointerType() && + Ctx.hasSameType(Field->getType()->getPointeeType(), + ArrayType->getElementType()) && + "Expected std::initializer_list second field to be const E *"); + + auto ArrayEnd = + Builder.getArrayElement(loc, loc, ArrayPtr.getPointer(), + ArrayPtr.getElementType(), Size, false); + CGF.emitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); } - mlir::Value dest = EnsureSlot(loc, E->getType()).getPointer(); - builder.create(loc, dest, inits); + assert(++Field == Record->field_end() && + "Expected std::initializer_list to only have two fields"); } void VisitExprWithCleanups(ExprWithCleanups *E); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 341533123497..dfc57e9e603d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -3865,29 +3865,6 @@ LogicalResult cir::CatchParamOp::verify() { return success(); } -//===----------------------------------------------------------------------===// -// StdInitializerListOp Definitions -//===----------------------------------------------------------------------===// - -LogicalResult cir::StdInitializerListOp::verify() { - auto resultType = mlir::cast( - mlir::cast(getInitList().getType()).getPointee()); - if (resultType.getMembers().size() != 2) - return emitOpError( - "std::initializer_list must be '!cir.struct' with two fields"); - auto memberPtr = mlir::dyn_cast(resultType.getMembers()[0]); - if (memberPtr == nullptr) - return emitOpError("first member type of std::initializer_list must be " - "'!cir.ptr', but provided ") - << resultType.getMembers()[0]; - auto expectedType = memberPtr.getPointee(); - for (const mlir::Value &arg : getArgs()) - if (expectedType != arg.getType()) - return emitOpError("arg type must be ") - << expectedType << ", but provided " << arg.getType(); - return mlir::success(); -} - //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index a949d78eaf80..b0709e9638ff 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -12,19 +12,15 @@ #include "mlir/IR/Region.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" -#include "clang/AST/Decl.h" #include "clang/AST/Mangle.h" #include "clang/Basic/Module.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDataLayout.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "clang/CIR/Dialect/IR/CIROpsEnums.h" -#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/Interfaces/ASTAttrInterfaces.h" #include "llvm/ADT/APFloat.h" -#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" @@ -89,7 +85,6 @@ struct LoweringPreparePass : public LoweringPrepareBase { void lowerToMemCpy(StoreOp op); void lowerArrayDtor(ArrayDtor op); void lowerArrayCtor(ArrayCtor op); - void lowerStdInitializerListOp(StdInitializerListOp op); /// Collect annotations of global values in the module void addGlobalAnnotations(mlir::Operation *op, mlir::ArrayAttr annotations); @@ -1125,79 +1120,6 @@ void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { op.erase(); } -/// lowering construction of std::initializer_list. -/// 1. alloca array for arg list. -/// 2. copy arg list to array. -/// 3. construct std::initializer_list from array. -void LoweringPreparePass::lowerStdInitializerListOp(StdInitializerListOp op) { - auto loc = op.getLoc(); - cir::CIRDataLayout dataLayout(theModule); - auto args = op.getArgs(); - - auto stdInitializerListType = mlir::cast( - mlir::cast(op.getInitList().getType()).getPointee()); - clang::RecordDecl::field_range stdInitializerListFields = - stdInitializerListType.getAst().getRawDecl()->fields(); - - mlir::Type elementType = - mlir::cast(stdInitializerListType.getMembers()[0]) - .getPointee(); - auto tempArrayType = - cir::ArrayType::get(&getContext(), elementType, args.size()); - - CIRBaseBuilderTy builder(getContext()); - builder.setInsertionPointAfter(op); - - IntegerAttr alignment = builder.getI64IntegerAttr( - dataLayout.getPrefTypeAlign(tempArrayType).value()); - assert(!cir::MissingFeatures::addressSpace()); - mlir::Value arrayPtr = builder.createAlloca( - loc, cir::PointerType::get(tempArrayType), tempArrayType, "", alignment); - mlir::Value arrayStartPtr = - builder.createCast(cir::CastKind::array_to_ptrdecay, arrayPtr, - cir::PointerType::get(elementType)); - for (unsigned i = 0; i < args.size(); i++) { - if (i == 0) { - builder.createStore(loc, args[i], arrayStartPtr); - } else { - mlir::Value offset = builder.getUnsignedInt(loc, i, 64); - mlir::Value dest = builder.create( - loc, arrayStartPtr.getType(), arrayStartPtr, offset); - builder.createStore(loc, args[i], dest); - } - } - - // FIXME(cir): better handling according to different field type. [ptr ptr], - // [ptr size], [size ptr]. - - clang::RecordDecl::field_iterator it = stdInitializerListFields.begin(); - const clang::RecordDecl::field_iterator startField = it; - const unsigned startIdx = 0U; - const clang::RecordDecl::field_iterator endOrSizeField = ++it; - const unsigned endOrSizeIdx = 1U; - assert(llvm::range_size(stdInitializerListFields) == 2U); - - mlir::Value startMemberPtr = builder.createGetMemberOp( - loc, op.getInitList(), startField->getName().data(), startIdx); - builder.createStore(loc, arrayStartPtr, startMemberPtr); - - mlir::Value size = builder.getUnsignedInt(loc, args.size(), 64); - if (endOrSizeField->getType()->isPointerType()) { - mlir::Value arrayEndPtr = builder.create( - loc, arrayStartPtr.getType(), arrayStartPtr, size); - mlir::Value endMemberPtr = builder.createGetMemberOp( - loc, op.getInitList(), endOrSizeField->getName().data(), endOrSizeIdx); - builder.createStore(loc, arrayEndPtr, endMemberPtr); - } else { - assert(endOrSizeField->getType()->isIntegerType()); - mlir::Value sizeMemberPtr = builder.createGetMemberOp( - loc, op.getInitList(), endOrSizeField->getName().data(), endOrSizeIdx); - builder.createStore(loc, size, sizeMemberPtr); - } - - op.erase(); -} - void LoweringPreparePass::addGlobalAnnotations(mlir::Operation *op, mlir::ArrayAttr annotations) { auto globalValue = cast(op); @@ -1258,8 +1180,6 @@ void LoweringPreparePass::runOnOp(Operation *op) { } if (std::optional annotations = fnOp.getAnnotations()) addGlobalAnnotations(fnOp, annotations.value()); - } else if (auto stdInitializerListOp = dyn_cast(op)) { - lowerStdInitializerListOp(stdInitializerListOp); } } @@ -1275,8 +1195,7 @@ void LoweringPreparePass::runOnOperation() { op->walk([&](Operation *op) { if (isa( - op)) + ArrayCtor, ArrayDtor, cir::FuncOp, StoreOp>(op)) opsToTransform.push_back(op); }); diff --git a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp index 8a0e2ac7aaab..25873623d6e8 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-ptr.cpp @@ -1,5 +1,3 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir -clangir-disable-passes -// RUN: FileCheck --check-prefix=BEFORE --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll @@ -17,16 +15,6 @@ void test() { } } // namespace std -// BEFORE: [[INITLIST_TYPE:!.*]] = !cir.struct" {!cir.ptr>, !cir.ptr>} #cir.record.decl.ast> -// BEFORE: %0 = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, -// BEFORE: %1 = cir.get_global @".str" : !cir.ptr> -// BEFORE: %2 = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr -// BEFORE: %3 = cir.get_global @".str.1" : !cir.ptr> -// BEFORE: %4 = cir.cast(array_to_ptrdecay, %3 : !cir.ptr>), !cir.ptr -// BEFORE: cir.std.initializer_list %0 (%2, %4 : !cir.ptr, !cir.ptr) : !cir.ptr<[[INITLIST_TYPE]]> -// BEFORE: %5 = cir.load %0 : !cir.ptr<[[INITLIST_TYPE]]>, [[INITLIST_TYPE]] -// BEFORE: cir.call @_ZSt1fIPKcEvSt16initializer_listIT_E(%5) : ([[INITLIST_TYPE]]) -> () - // CIR: [[INITLIST_TYPE:!.*]] = !cir.struct" {!cir.ptr>, !cir.ptr>}> // CIR: cir.func linkonce_odr @_ZSt1fIPKcEvSt16initializer_listIT_E(%arg0: [[INITLIST_TYPE]] // CIR: [[LOCAL:%.*]] = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, @@ -39,22 +27,24 @@ void test() { // CIR: cir.func @_ZSt4testv() // CIR: cir.scope { // CIR: [[INITLIST_LOCAL:%.*]] = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, +// CIR: [[LOCAL_ELEM_ARRAY:%.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, +// CIR: [[FIRST_ELEM_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[LOCAL_ELEM_ARRAY]] : !cir.ptr x 2>>), !cir.ptr> // CIR: [[XY_CHAR_ARRAY:%.*]] = cir.get_global [[STR_XY]] : !cir.ptr> // CIR: [[STR_XY_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[XY_CHAR_ARRAY]] : !cir.ptr>), !cir.ptr +// CIR: cir.store [[STR_XY_PTR]], [[FIRST_ELEM_PTR]] : !cir.ptr, !cir.ptr> +// CIR: [[ONE:%.*]] = cir.const #cir.int<1> +// CIR: [[NEXT_ELEM_PTR:%.*]] = cir.ptr_stride([[FIRST_ELEM_PTR]] : !cir.ptr>, [[ONE]] : !s64i), !cir.ptr> // CIR: [[UV_CHAR_ARRAY:%.*]] = cir.get_global [[STR_UV]] : !cir.ptr> // CIR: [[STR_UV_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[UV_CHAR_ARRAY]] : !cir.ptr>), !cir.ptr -// CIR: [[LOCAL_ELEM_ARRAY:%.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, -// CIR: [[ELEM_BEGIN:%.*]] = cir.cast(array_to_ptrdecay, [[LOCAL_ELEM_ARRAY]] : !cir.ptr x 2>>), !cir.ptr> -// CIR: cir.store [[STR_XY_PTR]], [[ELEM_BEGIN]] : !cir.ptr, !cir.ptr> -// CIR: [[ONE:%.*]] = cir.const #cir.int<1> -// CIR: [[NEXT_ELEM_PTR:%.*]] = cir.ptr_stride([[ELEM_BEGIN]] : !cir.ptr>, [[ONE]] : !u64i), !cir.ptr> // CIR: cir.store [[STR_UV_PTR]], [[NEXT_ELEM_PTR]] : !cir.ptr, !cir.ptr> // CIR: [[START_FLD_PTR:%.*]] = cir.get_member [[INITLIST_LOCAL]][0] {name = "array_start"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr>> -// CIR: cir.store [[ELEM_BEGIN]], [[START_FLD_PTR]] : !cir.ptr>, !cir.ptr>> +// CIR: [[START_FLD_PTR_AS_PTR_2_CHAR_ARRAY:%.*]] = cir.cast(bitcast, [[START_FLD_PTR]] : !cir.ptr>>), !cir.ptr x 2>>> +// CIR: cir.store [[LOCAL_ELEM_ARRAY]], [[START_FLD_PTR_AS_PTR_2_CHAR_ARRAY]] : !cir.ptr x 2>>, !cir.ptr x 2>>> // CIR: [[ELEM_ARRAY_LEN:%.*]] = cir.const #cir.int<2> -// CIR: [[ELEM_END:%.*]] = cir.ptr_stride([[ELEM_BEGIN]] : !cir.ptr>, [[ELEM_ARRAY_LEN]] : !u64i), !cir.ptr> // CIR: [[END_FLD_PTR:%.*]] = cir.get_member [[INITLIST_LOCAL]][1] {name = "array_end"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr>> -// CIR: cir.store [[ELEM_END]], [[END_FLD_PTR]] : !cir.ptr>, !cir.ptr>> +// CIR: [[LOCAL_ELEM_ARRAY_END:%.*]] = cir.ptr_stride([[LOCAL_ELEM_ARRAY]] : !cir.ptr x 2>>, [[ELEM_ARRAY_LEN]] : !u64i), !cir.ptr x 2>> +// CIR: [[END_FLD_PTR_AS_PTR_2_CHAR_ARRAY:%.*]] = cir.cast(bitcast, [[END_FLD_PTR]] : !cir.ptr>>), !cir.ptr x 2>>> +// CIR: cir.store [[LOCAL_ELEM_ARRAY_END]], [[END_FLD_PTR_AS_PTR_2_CHAR_ARRAY]] : !cir.ptr x 2>>, !cir.ptr x 2>>> // CIR: [[ARG:%.*]] = cir.load [[INITLIST_LOCAL]] : !cir.ptr<[[INITLIST_TYPE]]>, [[INITLIST_TYPE]] // CIR: cir.call @_ZSt1fIPKcEvSt16initializer_listIT_E([[ARG]]) : ([[INITLIST_TYPE]]) -> () // CIR: } @@ -82,9 +72,9 @@ void test() { // LLVM: [[PTR_SECOND_ELEM:%.*]] = getelementptr ptr, ptr [[PTR_FIRST_ELEM]], i64 1 // LLVM: store ptr @.str.1, ptr [[PTR_SECOND_ELEM]], align 8 // LLVM: [[INIT_START_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0 -// LLVM: store ptr [[PTR_FIRST_ELEM]], ptr [[INIT_START_FLD_PTR]], align 8 -// LLVM: [[ELEM_ARRAY_END:%.*]] = getelementptr ptr, ptr [[PTR_FIRST_ELEM]], i64 2 +// LLVM: store ptr [[ELEM_ARRAY_PTR]], ptr [[INIT_START_FLD_PTR]], align 8 // LLVM: [[INIT_END_FLD_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 1 +// LLVM: [[ELEM_ARRAY_END:%.*]] = getelementptr [2 x ptr], ptr [[ELEM_ARRAY_PTR]], i64 2 // LLVM: store ptr [[ELEM_ARRAY_END]], ptr [[INIT_END_FLD_PTR]], align 8 // LLVM: [[ARG2PASS:%.*]] = load %"class.std::initializer_list", ptr [[INIT_STRUCT]], align 8 // LLVM: call void @_ZSt1fIPKcEvSt16initializer_listIT_E(%"class.std::initializer_list" [[ARG2PASS]]) diff --git a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp index 08ed514ca707..cc90d0ac1d84 100644 --- a/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp +++ b/clang/test/CIR/CodeGen/initlist-ptr-unsigned.cpp @@ -1,5 +1,3 @@ -// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir -clangir-disable-passes -// RUN: FileCheck --check-prefix=BEFORE --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s // RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll @@ -17,13 +15,6 @@ void test() { } } // namespace std -// BEFORE: [[INITLIST_TYPE:!.*]] = !cir.struct" {!cir.ptr, !u64i} #cir.record.decl.ast> -// BEFORE: %0 = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, -// BEFORE: %1 = cir.const #cir.int<7> : !s32i -// BEFORE: cir.std.initializer_list %0 (%1 : !s32i) : !cir.ptr<[[INITLIST_TYPE]]> -// BEFORE: %2 = cir.load %0 : !cir.ptr<[[INITLIST_TYPE]]>, [[INITLIST_TYPE]] -// BEFORE: cir.call @_ZSt1fIiEvSt16initializer_listIT_E(%2) : ([[INITLIST_TYPE]]) -> () - // CIR: [[INITLIST_TYPE:!.*]] = !cir.struct" {!cir.ptr, !u64i}> // CIR: cir.func linkonce_odr @_ZSt1fIiEvSt16initializer_listIT_E(%arg0: [[INITLIST_TYPE]] @@ -34,12 +25,13 @@ void test() { // CIR: cir.func @_ZSt4testv() // CIR: cir.scope { // CIR: [[LIST_PTR:%.*]] = cir.alloca [[INITLIST_TYPE]], !cir.ptr<[[INITLIST_TYPE]]>, -// CIR: [[SEVEN:%.*]] = cir.const #cir.int<7> : !s32i // CIR: [[ARRAY:%.*]] = cir.alloca !cir.array, !cir.ptr>, // CIR: [[DECAY_PTR:%.*]] = cir.cast(array_to_ptrdecay, [[ARRAY]] : !cir.ptr>), !cir.ptr +// CIR: [[SEVEN:%.*]] = cir.const #cir.int<7> : !s32i // CIR: cir.store [[SEVEN]], [[DECAY_PTR]] : !s32i, !cir.ptr // CIR: [[FLD_C:%.*]] = cir.get_member [[LIST_PTR]][0] {name = "c"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr> -// CIR: cir.store [[DECAY_PTR]], [[FLD_C]] : !cir.ptr, !cir.ptr> +// CIR: [[ARRAY_PTR:%.*]] = cir.cast(bitcast, [[FLD_C]] : !cir.ptr>), !cir.ptr>> +// CIR: cir.store [[ARRAY]], [[ARRAY_PTR]] : !cir.ptr>, !cir.ptr>> // CIR: [[LENGTH_ONE:%.*]] = cir.const #cir.int<1> // CIR: [[FLD_LEN:%.*]] = cir.get_member [[LIST_PTR]][1] {name = "len"} : !cir.ptr<[[INITLIST_TYPE]]> -> !cir.ptr // CIR: cir.store [[LENGTH_ONE]], [[FLD_LEN]] : !u64i, !cir.ptr @@ -62,7 +54,7 @@ void test() { // LLVM: [[PTR_FIRST_ELEM:%.*]] = getelementptr i32, ptr [[ELEM_ARRAY]], i32 0 // LLVM: store i32 7, ptr [[PTR_FIRST_ELEM]], align 4 // LLVM: [[ELEM_ARRAY_PTR:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 0 -// LLVM: store ptr [[PTR_FIRST_ELEM]], ptr [[ELEM_ARRAY_PTR]], align 8 +// LLVM: store ptr [[ELEM_ARRAY]], ptr [[ELEM_ARRAY_PTR]], align 8 // LLVM: [[INIT_LEN_FLD:%.*]] = getelementptr %"class.std::initializer_list", ptr [[INIT_STRUCT]], i32 0, i32 1 // LLVM: store i64 1, ptr [[INIT_LEN_FLD]], align 8 // LLVM: [[ARG2PASS:%.*]] = load %"class.std::initializer_list", ptr [[INIT_STRUCT]], align 8 diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index bbe61a0502b0..80fe6e114127 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1450,84 +1450,6 @@ cir.global external @f = #cir.fp<42> : !cir.float // ----- -!s32i = !cir.int -!u32i = !cir.int -!initializer_list_s32i = !cir.struct, !cir.ptr}> -cir.func @std_initializer_list_wrong_arg_type() { - %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] - %1 = cir.const #cir.int<1> : !s32i - %2 = cir.const #cir.int<1> : !u32i - // expected-error @below {{'cir.std.initializer_list' op arg type must be '!cir.int', but provided '!cir.int'}} - cir.std.initializer_list %0 (%2, %1, %1 : !u32i, !s32i, !s32i) : !cir.ptr -} - -// ----- - -!s32i = !cir.int -!u32i = !cir.int -!initializer_list_s32i = !cir.struct, !cir.ptr}> -cir.func @std_initializer_list_wrong_arg_type() { - %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] - %1 = cir.const #cir.int<1> : !s32i - %2 = cir.const #cir.int<1> : !u32i - // expected-error @below {{'cir.std.initializer_list' op arg type must be '!cir.int', but provided '!cir.int'}} - cir.std.initializer_list %0 (%1, %2, %1 : !s32i, !u32i, !s32i) : !cir.ptr - cir.return -} - -// ----- - -!s32i = !cir.int -!u32i = !cir.int -!initializer_list_s32i = !cir.struct, !cir.ptr}> -cir.func @std_initializer_list_wrong_arg_type() { - %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] - %1 = cir.const #cir.int<1> : !s32i - %2 = cir.const #cir.int<1> : !u32i - // expected-error @below {{'cir.std.initializer_list' op arg type must be '!cir.int', but provided '!cir.int'}} - cir.std.initializer_list %0 (%1, %1, %2 : !s32i, !s32i, !u32i) : !cir.ptr - cir.return -} - -// ----- - -!s32i = !cir.int -!u32i = !cir.int -!initializer_list_s32i = !cir.struct, !cir.ptr}> -cir.func @std_initializer_list_wrong_arg_type() { - %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] - %1 = cir.const #cir.int<1> : !s32i - %2 = cir.const #cir.int<1> : !u32i - // expected-error @below {{'cir.std.initializer_list' op arg type must be '!cir.int', but provided '!cir.int'}} - cir.std.initializer_list %0 (%2, %2, %2 : !u32i, !u32i, !u32i) : !cir.ptr - cir.return -} - -// ----- - -!s32i = !cir.int -!initializer_list_s32i = !cir.struct, !cir.ptr, !cir.ptr}> -cir.func @std_initializer_list_wrong_struct() { - %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] - %1 = cir.const #cir.int<1> : !s32i - // expected-error @below {{'cir.std.initializer_list' op std::initializer_list must be '!cir.struct' with two fields}} - cir.std.initializer_list %0 (%1, %1, %1 : !s32i, !s32i, !s32i) : !cir.ptr - cir.return -} - -// ----- - -!s32i = !cir.int -!initializer_list_s32i = !cir.struct}> -cir.func @std_initializer_list_wrong_struct() { - %0 = cir.alloca !initializer_list_s32i, !cir.ptr, ["v"] - %1 = cir.const #cir.int<1> : !s32i - // expected-error @below {{'cir.std.initializer_list' op first member type of std::initializer_list must be '!cir.ptr', but provided '!cir.int'}} - cir.std.initializer_list %0 (%1, %1, %1 : !s32i, !s32i, !s32i) : !cir.ptr - cir.return -} - -// ----- // Verify !s32i = !cir.int cir.func @cast0(%arg0: !s32i, %arg1: !s32i) { From 84add0e5fa94b0ff79c5e24bf7ba5f0d4b2ef0f6 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Mon, 9 Dec 2024 06:52:25 -0800 Subject: [PATCH 2151/2301] [CIR][NFC] Cleanup: MLIRContext vs ASTContext (#1217) ClangIR CodeGen makes use of both `mlir::MLIRContext` and `clang::ASTContext`. Referring to these as just "context" can be ambiguous. This change attempts to make the CodeGen code more clear by choosing better variable names. The change is almost entirely renaming: mostly change variables, parameters, and fields named `context` or `ctx` to `mlirContext` or `astContext`. There are some other renames having to do with contexts, and a small number of other drive-by fixes. (I considered also renaming functions named `getContext()`, but did not include that in this change.) This is entirely code cleanup. There should be no behavior changes. --- clang/include/clang/CIR/CIRGenerator.h | 8 +- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 22 +-- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 6 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 39 ++--- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 29 ++-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 10 +- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 18 +-- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 24 +-- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 31 ++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 151 +++++++++--------- clang/lib/CIR/CodeGen/CIRGenModule.h | 8 +- clang/lib/CIR/CodeGen/CIRGenTBAA.cpp | 21 +-- clang/lib/CIR/CodeGen/CIRGenTBAA.h | 6 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 39 ++--- clang/lib/CIR/CodeGen/CIRGenTypes.h | 4 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 16 +- clang/lib/CIR/CodeGen/CIRGenValue.h | 10 +- clang/lib/CIR/CodeGen/CIRGenerator.cpp | 36 +++-- clang/lib/CIR/CodeGen/CIRPasses.cpp | 14 +- clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp | 4 +- clang/lib/CIR/CodeGen/ConstantInitBuilder.h | 22 +-- 25 files changed, 281 insertions(+), 264 deletions(-) diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index f121d2c453d7..cb702aa978fa 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -45,7 +45,7 @@ namespace cir { class CIRGenerator : public clang::ASTConsumer { virtual void anchor(); clang::DiagnosticsEngine &Diags; - clang::ASTContext *astCtx; + clang::ASTContext *astContext; llvm::IntrusiveRefCntPtr fs; // Only used for debug info. @@ -71,7 +71,7 @@ class CIRGenerator : public clang::ASTConsumer { }; protected: - std::unique_ptr mlirCtx; + std::unique_ptr mlirContext; std::unique_ptr CGM; private: @@ -86,7 +86,7 @@ class CIRGenerator : public clang::ASTConsumer { bool EmitFunction(const clang::FunctionDecl *FD); bool HandleTopLevelDecl(clang::DeclGroupRef D) override; - void HandleTranslationUnit(clang::ASTContext &Ctx) override; + void HandleTranslationUnit(clang::ASTContext &astContext) override; void HandleInlineFunctionDefinition(clang::FunctionDecl *D) override; void HandleTagDeclDefinition(clang::TagDecl *D) override; void HandleTagDeclRequiredDefinition(const clang::TagDecl *D) override; @@ -96,7 +96,7 @@ class CIRGenerator : public clang::ASTConsumer { mlir::ModuleOp getModule(); std::unique_ptr takeContext() { - return std::move(mlirCtx); + return std::move(mlirContext); }; bool verifyModule(); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index d7bd1f745f3a..47c4d3dc0f18 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -154,12 +154,12 @@ struct WidthAndSignedness { } // namespace static WidthAndSignedness -getIntegerWidthAndSignedness(const clang::ASTContext &context, +getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType Type) { assert(Type->isIntegerType() && "Given type is not an integer."); unsigned Width = Type->isBooleanType() ? 1 - : Type->isBitIntType() ? context.getIntWidth(Type) - : context.getTypeInfo(Type).Width; + : Type->isBitIntType() ? astContext.getIntWidth(Type) + : astContext.getTypeInfo(Type).Width; bool Signed = Type->isSignedIntegerType(); return {Width, Signed}; } @@ -224,11 +224,11 @@ static mlir::Value emitSignBit(mlir::Location loc, CIRGenFunction &CGF, } static Address checkAtomicAlignment(CIRGenFunction &CGF, const CallExpr *E) { - ASTContext &ctx = CGF.getContext(); + ASTContext &astContext = CGF.getContext(); Address ptr = CGF.emitPointerWithAlignment(E->getArg(0)); unsigned bytes = isa(ptr.getElementType()) - ? ctx.getTypeSizeInChars(ctx.VoidPtrTy).getQuantity() + ? astContext.getTypeSizeInChars(astContext.VoidPtrTy).getQuantity() : CGF.CGM.getDataLayout().getTypeSizeInBits(ptr.getElementType()) / 8; unsigned align = ptr.getAlignment().getQuantity(); if (align % bytes != 0) { @@ -331,10 +331,10 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, } static bool -typeRequiresBuiltinLaunderImp(const ASTContext &ctx, QualType ty, +typeRequiresBuiltinLaunderImp(const ASTContext &astContext, QualType ty, llvm::SmallPtrSetImpl &seen) { - if (const auto *arr = ctx.getAsArrayType(ty)) - ty = ctx.getBaseElementType(arr); + if (const auto *arr = astContext.getAsArrayType(ty)) + ty = astContext.getBaseElementType(arr); const auto *record = ty->getAsCXXRecordDecl(); if (!record) @@ -351,7 +351,7 @@ typeRequiresBuiltinLaunderImp(const ASTContext &ctx, QualType ty, return true; for (FieldDecl *fld : record->fields()) { - if (typeRequiresBuiltinLaunderImp(ctx, fld->getType(), seen)) + if (typeRequiresBuiltinLaunderImp(astContext, fld->getType(), seen)) return true; } return false; @@ -2641,7 +2641,7 @@ mlir::Value CIRGenFunction::evaluateOrEmitBuiltinObjectSize( /// for "fabsf". cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID) { - assert(astCtx.BuiltinInfo.isLibFunction(BuiltinID)); + assert(astContext.BuiltinInfo.isLibFunction(BuiltinID)); // Get the name, skip over the __builtin_ prefix (if necessary). StringRef Name; @@ -2703,7 +2703,7 @@ cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *FD, AIXLongDouble64Builtins.end()) Name = AIXLongDouble64Builtins[BuiltinID]; else - Name = astCtx.BuiltinInfo.getName(BuiltinID).substr(10); + Name = astContext.BuiltinInfo.getName(BuiltinID).substr(10); } auto Ty = getTypes().ConvertType(FD->getType()); diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 7668ef3dd1b7..87a4ffa76d8e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -96,7 +96,7 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { return true; // If the base is at a non-zero offset, give up. - const ASTRecordLayout &ClassLayout = astCtx.getASTRecordLayout(Class); + const ASTRecordLayout &ClassLayout = astContext.getASTRecordLayout(Class); if (!ClassLayout.getBaseClassOffset(UniqueBase).isZero()) return true; diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 0a0c1bef4242..127d59c54892 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -31,10 +31,10 @@ class CIRGenFunctionInfo; class CIRGenCXXABI { protected: CIRGenModule &CGM; - std::unique_ptr MangleCtx; + std::unique_ptr MangleContext; CIRGenCXXABI(CIRGenModule &CGM) - : CGM{CGM}, MangleCtx(CGM.getASTContext().createMangleContext()) {} + : CGM{CGM}, MangleContext(CGM.getASTContext().createMangleContext()) {} clang::ASTContext &getContext() const { return CGM.getASTContext(); } @@ -115,7 +115,7 @@ class CIRGenCXXABI { virtual bool classifyReturnType(CIRGenFunctionInfo &FI) const = 0; /// Gets the mangle context. - clang::MangleContext &getMangleContext() { return *MangleCtx; } + clang::MangleContext &getMangleContext() { return *MangleContext; } clang::ImplicitParamDecl *&getStructorImplicitParamDecl(CIRGenFunction &CGF) { return CGF.CXXStructorImplicitParamDecl; diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 29db13706437..6e31e2f41311 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -103,11 +103,12 @@ class ClangToCIRArgMapping { SmallVector ArgInfo; public: - ClangToCIRArgMapping(const ASTContext &Context, const CIRGenFunctionInfo &FI, + ClangToCIRArgMapping(const ASTContext &astContext, + const CIRGenFunctionInfo &FI, bool OnlyRequiredArgs = false) : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalCIRArgs(0), ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { - construct(Context, FI, OnlyRequiredArgs); + construct(astContext, FI, OnlyRequiredArgs); } bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } @@ -130,11 +131,11 @@ class ClangToCIRArgMapping { } private: - void construct(const ASTContext &Context, const CIRGenFunctionInfo &FI, + void construct(const ASTContext &astContext, const CIRGenFunctionInfo &FI, bool OnlyRequiredArgs); }; -void ClangToCIRArgMapping::construct(const ASTContext &Context, +void ClangToCIRArgMapping::construct(const ASTContext &astContext, const CIRGenFunctionInfo &FI, bool OnlyRequiredArgs) { unsigned CIRArgNo = 0; @@ -314,7 +315,7 @@ static Address emitAddressAtOffset(CIRGenFunction &CGF, Address addr, } static void AddAttributesFromFunctionProtoType(CIRGenBuilderTy &builder, - ASTContext &Ctx, + ASTContext &astContext, mlir::NamedAttrList &FuncAttrs, const FunctionProtoType *FPT) { if (!FPT) @@ -366,7 +367,7 @@ void CIRGenModule::constructAttributeList(StringRef Name, // TODO: NoReturn, cmse_nonsecure_call // Collect function CIR attributes from the callee prototype if we have one. - AddAttributesFromFunctionProtoType(getBuilder(), astCtx, funcAttrs, + AddAttributesFromFunctionProtoType(getBuilder(), astContext, funcAttrs, CalleeInfo.getCalleeFunctionProtoType()); const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); @@ -388,7 +389,7 @@ void CIRGenModule::constructAttributeList(StringRef Name, if (const FunctionDecl *Fn = dyn_cast(TargetDecl)) { AddAttributesFromFunctionProtoType( - getBuilder(), astCtx, funcAttrs, + getBuilder(), astContext, funcAttrs, Fn->getType()->getAs()); if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { // A sane operator new returns a non-aliasing pointer. @@ -1054,9 +1055,9 @@ void CIRGenFunction::emitCallArgs( if (PS == nullptr) return; - const auto &Context = getContext(); - auto SizeTy = Context.getSizeType(); - auto T = builder.getUIntNTy(Context.getTypeSize(SizeTy)); + const clang::ASTContext &astContext = getContext(); + auto SizeTy = astContext.getSizeType(); + auto T = builder.getUIntNTy(astContext.getTypeSize(SizeTy)); assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); auto V = evaluateOrEmitBuiltinObjectSize( Arg, PS->getType(), T, EmittedArg.getScalarVal(), PS->isDynamic()); @@ -1212,8 +1213,8 @@ CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { FunctionType::ExtInfo extInfo = FTP->getExtInfo(); CanQualType resultType = TheCXXABI.HasThisReturn(GD) ? argTypes.front() : TheCXXABI.hasMostDerivedReturn(GD) - ? Context.VoidPtrTy - : Context.VoidTy; + ? astContext.VoidPtrTy + : astContext.VoidTy; assert(!TheCXXABI.HasThisReturn(GD) && "Please sent PR with a test and remove this"); @@ -1300,7 +1301,7 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXConstructorCall( // FIXME: Kill copy. llvm::SmallVector ArgTypes; for (const auto &Arg : Args) - ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); + ArgTypes.push_back(astContext.getCanonicalParamType(Arg.Ty)); // +1 for implicit this, which should always be args[0] unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; @@ -1314,7 +1315,7 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXConstructorCall( GlobalDecl GD(D, CtorKind); assert(!TheCXXABI.HasThisReturn(GD) && "ThisReturn NYI"); assert(!TheCXXABI.hasMostDerivedReturn(GD) && "Most derived return NYI"); - CanQualType ResultType = Context.VoidTy; + CanQualType ResultType = astContext.VoidTy; FunctionType::ExtInfo Info = FPT->getExtInfo(); llvm::SmallVector ParamInfos; @@ -1336,7 +1337,7 @@ bool CIRGenTypes::inheritingCtorHasParams(const InheritedConstructor &Inherited, !Target.getCXXABI().hasConstructorVariants(); } -bool CIRGenModule::MayDropFunctionReturn(const ASTContext &Context, +bool CIRGenModule::MayDropFunctionReturn(const ASTContext &astContext, QualType ReturnType) { // We can't just disard the return value for a record type with a complex // destructor or a non-trivially copyable type. @@ -1345,7 +1346,7 @@ bool CIRGenModule::MayDropFunctionReturn(const ASTContext &Context, llvm_unreachable("NYI"); } - return ReturnType.isTriviallyCopyableType(Context); + return ReturnType.isTriviallyCopyableType(astContext); } static bool isInAllocaArgument(CIRGenCXXABI &ABI, QualType type) { @@ -1437,10 +1438,10 @@ arrangeFreeFunctionLikeCall(CIRGenTypes &CGT, CIRGenModule &CGM, } static llvm::SmallVector -getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { +getArgTypesForCall(ASTContext &astContext, const CallArgList &args) { llvm::SmallVector argTypes; for (auto &arg : args) - argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); + argTypes.push_back(astContext.getCanonicalParamType(arg.Ty)); return argTypes; } @@ -1469,7 +1470,7 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXMethodCall( getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); // FIXME: Kill copy. - auto argTypes = getArgTypesForCall(Context, args); + auto argTypes = getArgTypesForCall(astContext, args); auto info = proto->getExtInfo(); return arrangeCIRFunctionInfo(GetReturnType(proto->getReturnType()), diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index e965d2f777ed..01ff43a783f2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -127,15 +127,16 @@ class FieldMemcpyizer { } CharUnits getMemcpySize(uint64_t FirstByteOffset) const { - ASTContext &Ctx = CGF.getContext(); + ASTContext &astContext = CGF.getContext(); unsigned LastFieldSize = LastField->isBitField() ? LastField->getBitWidthValue() - : Ctx.toBits( - Ctx.getTypeInfoDataSizeInChars(LastField->getType()).Width); + : astContext.toBits( + astContext.getTypeInfoDataSizeInChars(LastField->getType()) + .Width); uint64_t MemcpySizeBits = LastFieldOffset + LastFieldSize - - FirstByteOffset + Ctx.getCharWidth() - 1; - CharUnits MemcpySize = Ctx.toCharUnitsFromBits(MemcpySizeBits); + FirstByteOffset + astContext.getCharWidth() - 1; + CharUnits MemcpySize = astContext.toCharUnitsFromBits(MemcpySizeBits); return MemcpySize; } @@ -1096,12 +1097,12 @@ void CIRGenFunction::destroyCXXObject(CIRGenFunction &CGF, Address addr, /*Delegating=*/false, addr, type); } -static bool FieldHasTrivialDestructorBody(ASTContext &Context, +static bool FieldHasTrivialDestructorBody(ASTContext &astContext, const FieldDecl *Field); // FIXME(cir): this should be shared with traditional codegen. static bool -HasTrivialDestructorBody(ASTContext &Context, +HasTrivialDestructorBody(ASTContext &astContext, const CXXRecordDecl *BaseClassDecl, const CXXRecordDecl *MostDerivedClassDecl) { // If the destructor is trivial we don't have to check anything else. @@ -1113,7 +1114,7 @@ HasTrivialDestructorBody(ASTContext &Context, // Check fields. for (const auto *Field : BaseClassDecl->fields()) - if (!FieldHasTrivialDestructorBody(Context, Field)) + if (!FieldHasTrivialDestructorBody(astContext, Field)) return false; // Check non-virtual bases. @@ -1123,7 +1124,7 @@ HasTrivialDestructorBody(ASTContext &Context, const CXXRecordDecl *NonVirtualBase = cast(I.getType()->castAs()->getDecl()); - if (!HasTrivialDestructorBody(Context, NonVirtualBase, + if (!HasTrivialDestructorBody(astContext, NonVirtualBase, MostDerivedClassDecl)) return false; } @@ -1133,7 +1134,8 @@ HasTrivialDestructorBody(ASTContext &Context, for (const auto &I : BaseClassDecl->vbases()) { const CXXRecordDecl *VirtualBase = cast(I.getType()->castAs()->getDecl()); - if (!HasTrivialDestructorBody(Context, VirtualBase, MostDerivedClassDecl)) + if (!HasTrivialDestructorBody(astContext, VirtualBase, + MostDerivedClassDecl)) return false; } } @@ -1142,9 +1144,10 @@ HasTrivialDestructorBody(ASTContext &Context, } // FIXME(cir): this should be shared with traditional codegen. -static bool FieldHasTrivialDestructorBody(ASTContext &Context, +static bool FieldHasTrivialDestructorBody(ASTContext &astContext, const FieldDecl *Field) { - QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); + QualType FieldBaseElementType = + astContext.getBaseElementType(Field->getType()); const RecordType *RT = FieldBaseElementType->getAs(); if (!RT) @@ -1156,7 +1159,7 @@ static bool FieldHasTrivialDestructorBody(ASTContext &Context, if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion()) return false; - return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); + return HasTrivialDestructorBody(astContext, FieldClassDecl, FieldClassDecl); } /// Check whether we need to initialize any vtable pointers before calling this diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index a6a5121272f5..37b147a3aa57 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -93,7 +93,7 @@ static Address emitAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, return addr; } -static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { +static bool hasAnyVptr(const QualType Type, const ASTContext &astContext) { const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); if (!RD) return false; @@ -102,11 +102,11 @@ static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { return true; for (const auto &Base : RD->bases()) - if (hasAnyVptr(Base.getType(), Context)) + if (hasAnyVptr(Base.getType(), astContext)) return true; for (const FieldDecl *Field : RD->fields()) - if (hasAnyVptr(Field->getType(), Context)) + if (hasAnyVptr(Field->getType(), astContext)) return true; return false; @@ -1691,12 +1691,12 @@ emitArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, shouldDecay); } -static QualType getFixedSizeElementType(const ASTContext &ctx, +static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla) { QualType eltType; do { eltType = vla->getElementType(); - } while ((vla = ctx.getAsVariableArrayType(eltType))); + } while ((vla = astContext.getAsVariableArrayType(eltType))); return eltType; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index e336594388ae..43fb7c2d7724 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -1207,13 +1207,13 @@ RValue CIRGenFunction::emitBuiltinNewDeleteCall(const FunctionProtoType *type, CallArgList args; emitCallArgs(args, type, theCall->arguments()); // Find the allocation or deallocation function that we're calling. - ASTContext &ctx = getContext(); - DeclarationName name = - ctx.DeclarationNames.getCXXOperatorName(isDelete ? OO_Delete : OO_New); + ASTContext &astContext = getContext(); + DeclarationName name = astContext.DeclarationNames.getCXXOperatorName( + isDelete ? OO_Delete : OO_New); - for (auto *decl : ctx.getTranslationUnitDecl()->lookup(name)) + for (auto *decl : astContext.getTranslationUnitDecl()->lookup(name)) if (auto *fd = dyn_cast(decl)) - if (ctx.hasSameType(fd->getType(), QualType(type, 0))) + if (astContext.hasSameType(fd->getType(), QualType(type, 0))) return emitNewDeleteCall(*this, fd, type, args); llvm_unreachable("predeclared global operator new/delete is missing"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index df4aab399cfc..429d2f81af4e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -208,13 +208,13 @@ class ComplexExprEmitter : public StmtVisitor { QualType HigherPrecisionTypeForComplexArithmetic(QualType ElementType, bool IsDivOpCode) { - ASTContext &Ctx = CGF.getContext(); + ASTContext &astContext = CGF.getContext(); const QualType HigherElementType = - Ctx.GetHigherPrecisionFPType(ElementType); + astContext.GetHigherPrecisionFPType(ElementType); const llvm::fltSemantics &ElementTypeSemantics = - Ctx.getFloatTypeSemantics(ElementType); + astContext.getFloatTypeSemantics(ElementType); const llvm::fltSemantics &HigherElementTypeSemantics = - Ctx.getFloatTypeSemantics(HigherElementType); + astContext.getFloatTypeSemantics(HigherElementType); // Check that the promoted type can handle the intermediate values without // overflowing. This can be interpreted as: // (SmallerType.LargestFiniteVal * SmallerType.LargestFiniteVal) * 2 <= @@ -225,7 +225,7 @@ class ComplexExprEmitter : public StmtVisitor { if (llvm::APFloat::semanticsMaxExponent(ElementTypeSemantics) * 2 + 1 <= llvm::APFloat::semanticsMaxExponent(HigherElementTypeSemantics)) { FPHasBeenPromoted = true; - return Ctx.getComplexType(HigherElementType); + return astContext.getComplexType(HigherElementType); } else { // The intermediate values can't be represented in the promoted type // without overflowing. diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 67eee98667bc..dbd78284349d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -193,7 +193,7 @@ bool ConstantAggregateBuilder::add(mlir::Attribute A, CharUnits Offset, bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits, bool AllowOverwrite) { - const ASTContext &Context = CGM.getASTContext(); + const ASTContext &astContext = CGM.getASTContext(); const uint64_t CharWidth = CGM.getASTContext().getCharWidth(); auto charTy = CGM.getBuilder().getUIntNTy(CharWidth); // Offset of where we want the first bit to go within the bits of the @@ -203,7 +203,7 @@ bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits, // We split bit-fields up into individual bytes. Walk over the bytes and // update them. for (CharUnits OffsetInChars = - Context.toCharUnitsFromBits(OffsetInBits - OffsetWithinChar); + astContext.toCharUnitsFromBits(OffsetInBits - OffsetWithinChar); /**/; ++OffsetInChars) { // Number of bits we want to fill in this char. unsigned WantedBits = @@ -515,9 +515,9 @@ bool ConstStructBuilder::AppendField(const FieldDecl *Field, uint64_t FieldOffset, mlir::Attribute InitCst, bool AllowOverwrite) { - const ASTContext &Context = CGM.getASTContext(); + const ASTContext &astContext = CGM.getASTContext(); - CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset); + CharUnits FieldOffsetInChars = astContext.toCharUnitsFromBits(FieldOffset); return AppendBytes(FieldOffsetInChars, InitCst, AllowOverwrite); } @@ -1374,10 +1374,10 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) { if (auto *FD = dyn_cast(D)) { auto fop = CGM.GetAddrOfFunction(FD); auto builder = CGM.getBuilder(); - auto ctxt = builder.getContext(); + mlir::MLIRContext *mlirContext = builder.getContext(); return cir::GlobalViewAttr::get( builder.getPointerTo(fop.getFunctionType()), - mlir::FlatSymbolRefAttr::get(ctxt, fop.getSymNameAttr())); + mlir::FlatSymbolRefAttr::get(mlirContext, fop.getSymNameAttr())); } if (auto *VD = dyn_cast(D)) { @@ -2013,9 +2013,9 @@ static mlir::TypedAttr emitNullConstant(CIRGenModule &CGM, } } - mlir::MLIRContext *mlirCtx = structure.getContext(); - return cir::ConstStructAttr::get(mlirCtx, structure, - mlir::ArrayAttr::get(mlirCtx, elements)); + mlir::MLIRContext *mlirContext = structure.getContext(); + return cir::ConstStructAttr::get(mlirContext, structure, + mlir::ArrayAttr::get(mlirContext, elements)); } mlir::TypedAttr diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 2a5cfed9ff69..2f0d83869731 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1183,28 +1183,28 @@ mlir::Value CIRGenFunction::emitPromotedScalarExpr(const Expr *E, } /// If \p E is a widened promoted integer, get its base (unpromoted) type. -static std::optional getUnwidenedIntegerType(const ASTContext &Ctx, - const Expr *E) { +static std::optional +getUnwidenedIntegerType(const ASTContext &astContext, const Expr *E) { const Expr *Base = E->IgnoreImpCasts(); if (E == Base) return std::nullopt; QualType BaseTy = Base->getType(); - if (!Ctx.isPromotableIntegerType(BaseTy) || - Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType())) + if (!astContext.isPromotableIntegerType(BaseTy) || + astContext.getTypeSize(BaseTy) >= astContext.getTypeSize(E->getType())) return std::nullopt; return BaseTy; } /// Check if \p E is a widened promoted integer. -[[maybe_unused]] static bool IsWidenedIntegerOp(const ASTContext &Ctx, +[[maybe_unused]] static bool IsWidenedIntegerOp(const ASTContext &astContext, const Expr *E) { - return getUnwidenedIntegerType(Ctx, E).has_value(); + return getUnwidenedIntegerType(astContext, E).has_value(); } /// Check if we can skip the overflow check for \p Op. -[[maybe_unused]] static bool CanElideOverflowCheck(const ASTContext &Ctx, +[[maybe_unused]] static bool CanElideOverflowCheck(const ASTContext &astContext, const BinOpInfo &Op) { assert((isa(Op.E) || isa(Op.E)) && "Expected a unary or binary operator"); @@ -1221,11 +1221,11 @@ static std::optional getUnwidenedIntegerType(const ASTContext &Ctx, // We usually don't need overflow checks for binops with widened operands. // Multiplication with promoted unsigned operands is a special case. const auto *BO = cast(Op.E); - auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS()); + auto OptionalLHSTy = getUnwidenedIntegerType(astContext, BO->getLHS()); if (!OptionalLHSTy) return false; - auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS()); + auto OptionalRHSTy = getUnwidenedIntegerType(astContext, BO->getRHS()); if (!OptionalRHSTy) return false; @@ -1240,9 +1240,9 @@ static std::optional getUnwidenedIntegerType(const ASTContext &Ctx, // For unsigned multiplication the overflow check can be elided if either one // of the unpromoted types are less than half the size of the promoted type. - unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType()); - return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize || - (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize; + unsigned PromotedSize = astContext.getTypeSize(Op.E->getType()); + return (2 * astContext.getTypeSize(LHSTy)) < PromotedSize || + (2 * astContext.getTypeSize(RHSTy)) < PromotedSize; } /// Emit pointer + index arithmetic. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index fa46b0bd2cef..cd7763f2ef79 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -863,14 +863,15 @@ bool CIRGenFunction::ShouldXRayInstrumentFunction() const { return CGM.getCodeGenOpts().XRayInstrumentFunctions; } -static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { +static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &astContext) { auto *MD = dyn_cast_or_null(D); if (!MD || !MD->getDeclName().getAsIdentifierInfo() || !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") || (MD->getNumParams() != 1 && MD->getNumParams() != 2)) return false; - if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) + if (MD->parameters()[0]->getType().getCanonicalType() != + astContext.getSizeType()) return false; if (MD->getNumParams() == 2) { diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 5f00189ef90c..ea5cd755ec26 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -417,7 +417,7 @@ bool CIRGenItaniumCXXABI::classifyReturnType(CIRGenFunctionInfo &FI) const { CIRGenCXXABI::AddedStructorArgCounts CIRGenItaniumCXXABI::buildStructorSignature( GlobalDecl GD, llvm::SmallVectorImpl &ArgTys) { - auto &Context = getContext(); + clang::ASTContext &astContext = getContext(); // All parameters are already in place except VTT, which goes after 'this'. // These are clang types, so we don't need to worry about sret yet. @@ -427,9 +427,9 @@ CIRGenItaniumCXXABI::buildStructorSignature( : GD.getDtorType() == Dtor_Base) && cast(GD.getDecl())->getParent()->getNumVBases() != 0) { LangAS AS = CGM.getGlobalVarAddressSpace(nullptr); - QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS); + QualType Q = astContext.getAddrSpaceQualType(astContext.VoidPtrTy, AS); ArgTys.insert(ArgTys.begin() + 1, - Context.getPointerType(CanQualType::CreateUnsafe(Q))); + astContext.getPointerType(CanQualType::CreateUnsafe(Q))); return AddedStructorArgCounts::prefix(1); } @@ -569,15 +569,15 @@ void CIRGenItaniumCXXABI::addImplicitStructorParams(CIRGenFunction &CGF, // Check if we need a VTT parameter as well. if (NeedsVTTParameter(CGF.CurGD)) { - ASTContext &Context = getContext(); + ASTContext &astContext = getContext(); // FIXME: avoid the fake decl LangAS AS = CGM.getGlobalVarAddressSpace(nullptr); - QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS); - QualType T = Context.getPointerType(Q); + QualType Q = astContext.getAddrSpaceQualType(astContext.VoidPtrTy, AS); + QualType T = astContext.getPointerType(Q); auto *VTTDecl = ImplicitParamDecl::Create( - Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"), - T, ImplicitParamKind::CXXVTT); + astContext, /*DC=*/nullptr, MD->getLocation(), + &astContext.Idents.get("vtt"), T, ImplicitParamKind::CXXVTT); Params.insert(Params.begin() + 1, VTTDecl); getStructorImplicitParamDecl(CGF) = VTTDecl; } @@ -1309,11 +1309,11 @@ static bool IsStandardLibraryRTTIDescriptor(QualType Ty) { /// standard-library type. /// TODO(cir): this can unified with LLVM codegen static bool ShouldUseExternalRTTIDescriptor(CIRGenModule &CGM, QualType Ty) { - ASTContext &Context = CGM.getASTContext(); + ASTContext &astContext = CGM.getASTContext(); // If RTTI is disabled, assume it might be disabled in the // translation unit that defines any potential key function, too. - if (!Context.getLangOpts().RTTI) + if (!astContext.getLangOpts().RTTI) return false; if (const RecordType *RecordTy = dyn_cast(Ty)) { @@ -2347,7 +2347,7 @@ void CIRGenItaniumCXXABI::emitBadCastCall(CIRGenFunction &CGF, emitCallToBadCast(CGF, loc); } -static CharUnits computeOffsetHint(ASTContext &Context, +static CharUnits computeOffsetHint(ASTContext &astContext, const CXXRecordDecl *Src, const CXXRecordDecl *Dst) { CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, @@ -2378,7 +2378,8 @@ static CharUnits computeOffsetHint(ASTContext &Context, continue; // Accumulate the base class offsets. - const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class); + const ASTRecordLayout &L = + astContext.getASTRecordLayout(PathElement.Class); Offset += L.getBaseClassOffset( PathElement.Base->getType()->getAsCXXRecordDecl()); } @@ -2620,9 +2621,9 @@ CIRGenItaniumCXXABI::buildVirtualMethodAttr(cir::MethodType MethodTy, // Multiply by 4-byte relative offsets. VTableOffset = Index * 4; } else { - const ASTContext &Context = getContext(); - CharUnits PointerWidth = Context.toCharUnitsFromBits( - Context.getTargetInfo().getPointerWidth(LangAS::Default)); + const ASTContext &astContext = getContext(); + CharUnits PointerWidth = astContext.toCharUnitsFromBits( + astContext.getTargetInfo().getPointerWidth(LangAS::Default)); VTableOffset = Index * PointerWidth.getQuantity(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 17b1f6e0ce3f..69e611972a6c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -102,15 +102,16 @@ static CIRGenCXXABI *createCXXABI(CIRGenModule &CGM) { } } -CIRGenModule::CIRGenModule(mlir::MLIRContext &context, - clang::ASTContext &astctx, +CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, + clang::ASTContext &astContext, const clang::CodeGenOptions &CGO, DiagnosticsEngine &Diags) - : builder(context, *this), astCtx(astctx), langOpts(astctx.getLangOpts()), - codeGenOpts(CGO), + : builder(mlirContext, *this), astContext(astContext), + langOpts(astContext.getLangOpts()), codeGenOpts(CGO), theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), - target(astCtx.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, - VTables{*this}, openMPRuntime(new CIRGenOpenMPRuntime(*this)) { + target(astContext.getTargetInfo()), ABI(createCXXABI(*this)), + genTypes{*this}, VTables{*this}, + openMPRuntime(new CIRGenOpenMPRuntime(*this)) { // Initialize CIR signed integer types cache. SInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true); @@ -140,20 +141,20 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, // TODO: PointerWidthInBits PointerAlignInBytes = - astctx + astContext .toCharUnitsFromBits( - astctx.getTargetInfo().getPointerAlign(LangAS::Default)) + astContext.getTargetInfo().getPointerAlign(LangAS::Default)) .getQuantity(); // TODO: SizeSizeInBytes // TODO: IntAlignInBytes UCharTy = cir::IntType::get(&getMLIRContext(), - astCtx.getTargetInfo().getCharWidth(), + astContext.getTargetInfo().getCharWidth(), /*isSigned=*/false); - UIntTy = - cir::IntType::get(&getMLIRContext(), astCtx.getTargetInfo().getIntWidth(), - /*isSigned=*/false); + UIntTy = cir::IntType::get(&getMLIRContext(), + astContext.getTargetInfo().getIntWidth(), + /*isSigned=*/false); UIntPtrTy = cir::IntType::get(&getMLIRContext(), - astCtx.getTargetInfo().getMaxPointerWidth(), + astContext.getTargetInfo().getMaxPointerWidth(), /*isSigned=*/false); UInt8PtrTy = builder.getPointerTo(UInt8Ty); UInt8PtrPtrTy = builder.getPointerTo(UInt8PtrTy); @@ -163,7 +164,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, CIRAllocaAddressSpace = getTargetCIRGenInfo().getCIRAllocaAddressSpace(); PtrDiffTy = cir::IntType::get(&getMLIRContext(), - astCtx.getTargetInfo().getMaxPointerWidth(), + astContext.getTargetInfo().getMaxPointerWidth(), /*isSigned=*/true); if (langOpts.OpenCL) { @@ -186,32 +187,33 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &context, // FIXME(cir): Implement a custom CIR Module Op and attributes to leverage // MLIR features. theModule->setAttr(cir::CIRDialect::getSOBAttrName(), - cir::SignedOverflowBehaviorAttr::get(&context, sob)); - auto lang = SourceLanguageAttr::get(&context, getCIRSourceLanguage()); + cir::SignedOverflowBehaviorAttr::get(&mlirContext, sob)); + auto lang = SourceLanguageAttr::get(&mlirContext, getCIRSourceLanguage()); theModule->setAttr(cir::CIRDialect::getLangAttrName(), - cir::LangAttr::get(&context, lang)); + cir::LangAttr::get(&mlirContext, lang)); theModule->setAttr(cir::CIRDialect::getTripleAttrName(), builder.getStringAttr(getTriple().str())); if (CGO.OptimizationLevel > 0 || CGO.OptimizeSize > 0) theModule->setAttr(cir::CIRDialect::getOptInfoAttrName(), - cir::OptInfoAttr::get(&context, CGO.OptimizationLevel, + cir::OptInfoAttr::get(&mlirContext, + CGO.OptimizationLevel, CGO.OptimizeSize)); // Set the module name to be the name of the main file. TranslationUnitDecl // often contains invalid source locations and isn't a reliable source for the // module location. - auto MainFileID = astctx.getSourceManager().getMainFileID(); + auto MainFileID = astContext.getSourceManager().getMainFileID(); const FileEntry &MainFile = - *astctx.getSourceManager().getFileEntryForID(MainFileID); + *astContext.getSourceManager().getFileEntryForID(MainFileID); auto Path = MainFile.tryGetRealPathName(); if (!Path.empty()) { theModule.setSymName(Path); - theModule->setLoc(mlir::FileLineColLoc::get(&context, Path, + theModule->setLoc(mlir::FileLineColLoc::get(&mlirContext, Path, /*line=*/0, /*col=*/0)); } if (langOpts.Sanitize.has(SanitizerKind::Thread) || (!codeGenOpts.RelaxedAliasing && codeGenOpts.OptimizationLevel > 0)) { - tbaa.reset(new CIRGenTBAA(&context, astctx, genTypes, theModule, + tbaa.reset(new CIRGenTBAA(&mlirContext, astContext, genTypes, theModule, codeGenOpts, langOpts)); } } @@ -220,12 +222,12 @@ CIRGenModule::~CIRGenModule() {} bool CIRGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor, bool ExcludeDtor) { - if (!Ty.isConstant(astCtx) && !Ty->isReferenceType()) + if (!Ty.isConstant(astContext) && !Ty->isReferenceType()) return false; - if (astCtx.getLangOpts().CPlusPlus) { + if (astContext.getLangOpts().CPlusPlus) { if (const CXXRecordDecl *Record = - astCtx.getBaseElementType(Ty)->getAsCXXRecordDecl()) + astContext.getBaseElementType(Ty)->getAsCXXRecordDecl()) return ExcludeCtor && !Record->hasMutableFields() && (Record->hasTrivialDestructor() || ExcludeDtor); } @@ -241,7 +243,7 @@ CharUnits CIRGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) { if (!RD->hasDefinition()) return CharUnits::One(); // Hopefully won't be used anywhere. - auto &layout = astCtx.getASTRecordLayout(RD); + auto &layout = astContext.getASTRecordLayout(RD); // If the class is final, then we know that the pointer points to an // object of that type and can use the full alignment. @@ -280,7 +282,7 @@ CharUnits CIRGenModule::getNaturalTypeAlignment(QualType T, if (auto Align = TT->getDecl()->getMaxAlignment()) { if (BaseInfo) *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); - return astCtx.toCharUnitsFromBits(Align); + return astContext.toCharUnitsFromBits(Align); } } @@ -288,7 +290,7 @@ CharUnits CIRGenModule::getNaturalTypeAlignment(QualType T, // Analyze the base element type, so we don't get confused by incomplete // array types. - T = astCtx.getBaseElementType(T); + T = astContext.getBaseElementType(T); if (T->isIncompleteType()) { // We could try to replicate the logic from @@ -316,13 +318,14 @@ CharUnits CIRGenModule::getNaturalTypeAlignment(QualType T, // non-virtual alignment. Alignment = getClassPointerAlignment(RD); } else { - Alignment = astCtx.getTypeAlignInChars(T); + Alignment = astContext.getTypeAlignInChars(T); } // Cap to the global maximum type alignment unless the alignment // was somehow explicit on the type. - if (unsigned MaxAlign = astCtx.getLangOpts().MaxTypeAlign) { - if (Alignment.getQuantity() > MaxAlign && !astCtx.isAlignmentRequired(T)) + if (unsigned MaxAlign = astContext.getLangOpts().MaxTypeAlign) { + if (Alignment.getQuantity() > MaxAlign && + !astContext.isAlignmentRequired(T)) Alignment = CharUnits::fromQuantity(MaxAlign); } return Alignment; @@ -364,7 +367,7 @@ bool CIRGenModule::MayBeEmittedEagerly(const ValueDecl *Global) { // A definition of an inline constexpr static data member may change // linkage later if it's redeclared outside the class. // TODO(cir): do we care? - assert(astCtx.getInlineVariableDefinitionKind(VD) != + assert(astContext.getInlineVariableDefinitionKind(VD) != ASTContext::InlineVariableDefinitionKind::WeakUnknown && "not implemented"); @@ -552,7 +555,7 @@ void CIRGenModule::emitGlobal(GlobalDecl GD) { const auto *VD = cast(Global); assert(VD->isFileVarDecl() && "Cannot emit local var decl as global."); if (VD->isThisDeclarationADefinition() != VarDecl::Definition && - !astCtx.isMSStaticDataMemberInlineDefinition(VD)) { + !astContext.isMSStaticDataMemberInlineDefinition(VD)) { if (langOpts.OpenMP) { // Emit declaration of the must-be-emitted declare target variable. if (std::optional Res = @@ -562,7 +565,7 @@ void CIRGenModule::emitGlobal(GlobalDecl GD) { } // If this declaration may have caused an inline variable definition to // change linkage, make sure that it's emitted. - if (astCtx.getInlineVariableDefinitionKind(VD) == + if (astContext.getInlineVariableDefinitionKind(VD) == ASTContext::InlineVariableDefinitionKind::Strong) getAddrOfGlobalVar(VD); return; @@ -968,7 +971,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // FIXME: This code is overly simple and should be merged with other global // handling. - GV.setAlignmentAttr(getSize(astCtx.getDeclAlign(D))); + GV.setAlignmentAttr(getSize(astContext.getDeclAlign(D))); GV.setConstant(isTypeConstant(D->getType(), false, false)); // TODO(cir): setLinkageForGV(GV, D); @@ -982,7 +985,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef MangledName, mlir::Type Ty, // If required by the ABI, treat declarations of static data members with // inline initializers as definitions. - if (astCtx.isMSStaticDataMemberInlineDefinition(D)) { + if (astContext.isMSStaticDataMemberInlineDefinition(D)) { assert(0 && "not implemented"); } @@ -1147,10 +1150,10 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *D, // If yes, we shouldn't emit the GloablCtor and GlobalDtor for the variable // since this is the job for its original source. bool IsDefinitionAvailableExternally = - astCtx.GetGVALinkageForVariable(D) == GVA_AvailableExternally; + astContext.GetGVALinkageForVariable(D) == GVA_AvailableExternally; bool NeedsGlobalDtor = !IsDefinitionAvailableExternally && - D->needsDestruction(astCtx) == QualType::DK_cxx_destructor; + D->needsDestruction(astContext) == QualType::DK_cxx_destructor; // It is helpless to emit the definition for an available_externally variable // which can't be marked as const. @@ -1212,7 +1215,7 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *D, T = D->getType(); if (getLangOpts().CPlusPlus) { - if (InitDecl->hasFlexibleArrayInit(astCtx)) + if (InitDecl->hasFlexibleArrayInit(astContext)) ErrorUnsupported(D, "flexible array initializer"); Init = builder.getZeroInitAttr(getCIRType(T)); if (!IsDefinitionAvailableExternally) @@ -1325,7 +1328,7 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *D, // weak or linkonce, the de-duplication semantics are important to preserve, // so we don't change the linkage. if (D->getTLSKind() == VarDecl::TLS_Dynamic && GV.isPublic() && - astCtx.getTargetInfo().getTriple().isOSDarwin() && + astContext.getTargetInfo().getTriple().isOSDarwin() && !D->hasAttr()) { // TODO(cir): set to mlir::SymbolTable::Visibility::Private once we have // testcases. @@ -1427,7 +1430,8 @@ CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { SmallString<64> Str(E->getString()); // Resize the string to the right size, which is indicated by its type. - const ConstantArrayType *CAT = astCtx.getAsConstantArrayType(E->getType()); + const ConstantArrayType *CAT = + astContext.getAsConstantArrayType(E->getType()); auto finalSize = CAT->getSize().getZExtValue(); Str.resize(finalSize); @@ -1524,7 +1528,7 @@ cir::GlobalViewAttr CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name) { CharUnits Alignment = - astCtx.getAlignOfGlobalVarInChars(S->getType(), /*VD=*/nullptr); + astContext.getAlignOfGlobalVarInChars(S->getType(), /*VD=*/nullptr); mlir::Attribute C = getConstantArrayFromStringLiteral(S); @@ -1847,7 +1851,7 @@ static bool shouldBeInCOMDAT(CIRGenModule &CGM, const Decl &D) { } // TODO(cir): this could be a common method between LLVM codegen. -static bool isVarDeclStrongDefinition(const ASTContext &Context, +static bool isVarDeclStrongDefinition(const ASTContext &astContext, CIRGenModule &CGM, const VarDecl *D, bool NoCommon) { // Don't give variables common linkage if -fno-common was specified unless it @@ -1889,11 +1893,11 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context, // Declarations with a required alignment do not have common linkage in MSVC // mode. - if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { + if (astContext.getTargetInfo().getCXXABI().isMicrosoft()) { if (D->hasAttr()) return true; QualType VarType = D->getType(); - if (Context.isAlignmentRequired(VarType)) + if (astContext.isAlignmentRequired(VarType)) return true; if (const auto *RT = VarType->getAs()) { @@ -1903,7 +1907,7 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context, continue; if (FD->hasAttr()) return true; - if (Context.isAlignmentRequired(FD->getType())) + if (astContext.isAlignmentRequired(FD->getType())) return true; } } @@ -1915,9 +1919,9 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context, // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two // alignments for common symbols via the aligncomm directive, so this // restriction only applies to MSVC environments. - if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() && - Context.getTypeAlignIfKnown(D->getType()) > - Context.toBits(CharUnits::fromQuantity(32))) + if (astContext.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() && + astContext.getTypeAlignIfKnown(D->getType()) > + astContext.toBits(CharUnits::fromQuantity(32))) return true; return false; @@ -1973,6 +1977,7 @@ cir::VisibilityKind CIRGenModule::getGlobalVisibilityKindFromClangVisibility( case clang::VisibilityAttr::VisibilityType::Protected: return VisibilityKind::Protected; } + llvm_unreachable("unexpected visibility value"); } cir::VisibilityAttr @@ -2021,7 +2026,7 @@ cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( // merged with other definitions. c) C++ has the ODR, so we know the // definition is dependable. if (Linkage == GVA_DiscardableODR) - return !astCtx.getLangOpts().AppleKext + return !astContext.getLangOpts().AppleKext ? cir::GlobalLinkageKind::LinkOnceODRLinkage : cir::GlobalLinkageKind::InternalLinkage; @@ -2049,7 +2054,7 @@ cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator( // C++ doesn't have tentative definitions and thus cannot have common // linkage. if (!getLangOpts().CPlusPlus && isa(D) && - !isVarDeclStrongDefinition(astCtx, *this, cast(D), + !isVarDeclStrongDefinition(astContext, *this, cast(D), getCodeGenOpts().NoCommon)) return cir::GlobalLinkageKind::CommonLinkage; @@ -2118,14 +2123,14 @@ void CIRGenModule::ReplaceUsesOfNonProtoTypeWithRealFunction( cir::GlobalLinkageKind CIRGenModule::getCIRLinkageVarDefinition(const VarDecl *VD, bool IsConstant) { assert(!IsConstant && "constant variables NYI"); - GVALinkage Linkage = astCtx.GetGVALinkageForVariable(VD); + GVALinkage Linkage = astContext.GetGVALinkageForVariable(VD); return getCIRLinkageForDeclarator(VD, Linkage, IsConstant); } cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { const auto *D = cast(GD.getDecl()); - GVALinkage Linkage = astCtx.GetGVALinkageForFunction(D); + GVALinkage Linkage = astContext.GetGVALinkageForFunction(D); if (const auto *Dtor = dyn_cast(D)) return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType()); @@ -2806,7 +2811,7 @@ cir::FuncOp CIRGenModule::GetOrCreateCIRFunction( mlir::Location CIRGenModule::getLoc(SourceLocation SLoc) { assert(SLoc.isValid() && "expected valid source location"); - const SourceManager &SM = astCtx.getSourceManager(); + const SourceManager &SM = astContext.getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(SLoc); StringRef Filename = PLoc.getFilename(); return mlir::FileLineColLoc::get(builder.getStringAttr(Filename), @@ -2920,7 +2925,7 @@ void CIRGenModule::emitDeferred(unsigned recursionLimit) { if (getCodeGenOpts().ClangIRSkipFunctionsFromSystemHeaders) { auto *decl = D.getDecl(); assert(decl && "expected decl"); - if (astCtx.getSourceManager().isInSystemHeader(decl->getLocation())) + if (astContext.getSourceManager().isInSystemHeader(decl->getLocation())) continue; } @@ -2984,7 +2989,7 @@ void CIRGenModule::Release() { assert(!MissingFeatures::registerGlobalDtorsWithAtExit()); assert(!MissingFeatures::emitCXXThreadLocalInitFunc()); assert(!MissingFeatures::objCRuntime()); - if (astCtx.getLangOpts().CUDA) { + if (astContext.getLangOpts().CUDA) { llvm_unreachable("NYI"); } assert(!MissingFeatures::openMPRuntime()); @@ -3003,7 +3008,7 @@ void CIRGenModule::Release() { if (langOpts.Sanitize.has(SanitizerKind::KCFI)) llvm_unreachable("NYI"); assert(!MissingFeatures::emitAtAvailableLinkGuard()); - if (astCtx.getTargetInfo().getTriple().isWasm()) + if (astContext.getTargetInfo().getTriple().isWasm()) llvm_unreachable("NYI"); if (getTriple().isAMDGPU() || @@ -3015,7 +3020,7 @@ void CIRGenModule::Release() { // used by host functions and mark it as used for CUDA/HIP. This is necessary // to get kernels or device variables in archives linked in even if these // kernels or device variables are only used in host functions. - if (!astCtx.CUDAExternalDeviceDeclODRUsedByHost.empty()) { + if (!astContext.CUDAExternalDeviceDeclODRUsedByHost.empty()) { llvm_unreachable("NYI"); } if (langOpts.HIP && !getLangOpts().OffloadingNewDriver) { @@ -3024,7 +3029,7 @@ void CIRGenModule::Release() { assert(!MissingFeatures::emitLLVMUsed()); assert(!MissingFeatures::sanStats()); - if (codeGenOpts.Autolink && (astCtx.getLangOpts().Modules || + if (codeGenOpts.Autolink && (astContext.getLangOpts().Modules || !MissingFeatures::linkerOptionsMetadata())) { assert(!MissingFeatures::emitModuleLinkOptions()); } @@ -3048,7 +3053,7 @@ void CIRGenModule::Release() { if (codeGenOpts.Dwarf64) llvm_unreachable("NYI"); - if (astCtx.getLangOpts().SemanticInterposition) + if (astContext.getLangOpts().SemanticInterposition) // Require various optimization to respect semantic interposition. llvm_unreachable("NYI"); @@ -3070,7 +3075,7 @@ void CIRGenModule::Release() { // Function ID tables for EH Continuation Guard. llvm_unreachable("NYI"); } - if (astCtx.getLangOpts().Kernel) { + if (astContext.getLangOpts().Kernel) { // Note if we are compiling with /kernel. llvm_unreachable("NYI"); } @@ -3095,7 +3100,7 @@ void CIRGenModule::Release() { llvm_unreachable("NYI"); } - llvm::Triple t = astCtx.getTargetInfo().getTriple(); + llvm::Triple t = astContext.getTargetInfo().getTriple(); if (t.isARM() || t.isThumb()) { // The minimum width of an enum in bytes assert(!MissingFeatures::enumWidth()); @@ -3156,7 +3161,7 @@ void CIRGenModule::Release() { // different flags therefore module flags are set to "Min" behavior to achieve // the same end result of the normal build where e.g BTI is off if any object // doesn't support it. - if (astCtx.getTargetInfo().hasFeature("ptrauth") && + if (astContext.getTargetInfo().hasFeature("ptrauth") && langOpts.getSignReturnAddressScope() != LangOptions::SignReturnAddressScopeKind::None) llvm_unreachable("NYI"); @@ -3220,10 +3225,10 @@ void CIRGenModule::Release() { if (langOpts.HLSL) llvm_unreachable("NYI"); - if (uint32_t picLevel = astCtx.getLangOpts().PICLevel) { + if (uint32_t picLevel = astContext.getLangOpts().PICLevel) { assert(picLevel < 3 && "Invalid PIC Level"); assert(!MissingFeatures::setPICLevel()); - if (astCtx.getLangOpts().PIE) + if (astContext.getLangOpts().PIE) assert(!MissingFeatures::setPIELevel()); } @@ -3242,7 +3247,7 @@ void CIRGenModule::Release() { assert(!MissingFeatures::codeModel()); if ((cm == llvm::CodeModel::Medium || cm == llvm::CodeModel::Large) && - astCtx.getTargetInfo().getTriple().getArch() == + astContext.getTargetInfo().getTriple().getArch() == llvm::Triple::x86_64) { assert(!MissingFeatures::largeDataThreshold()); } @@ -3379,7 +3384,7 @@ bool CIRGenModule::isTriviallyRecursive(const FunctionDecl *func) { name = func->getName(); } - FunctionIsDirectlyRecursive walker(name, astCtx.BuiltinInfo); + FunctionIsDirectlyRecursive walker(name, astContext.BuiltinInfo); const Stmt *body = func->getBody(); return body ? walker.Visit(body) : false; } @@ -3403,7 +3408,7 @@ bool CIRGenModule::shouldEmitFunction(GlobalDecl globalDecl) { // behavior may break ABI compatibility of the current unit. if (const Module *mod = func->getOwningModule(); mod && mod->getTopLevelModule()->isNamedModule() && - astCtx.getCurrentNamedModule() != mod->getTopLevelModule()) { + astContext.getCurrentNamedModule() != mod->getTopLevelModule()) { // There are practices to mark template member function as always-inline // and mark the template as extern explicit instantiation but not give // the definition for member function. So we have to emit the function @@ -3658,7 +3663,7 @@ CharUnits CIRGenModule::computeNonVirtualBaseClassOffset( CastExpr::path_const_iterator End) { CharUnits Offset = CharUnits::Zero(); - const ASTContext &Context = getASTContext(); + const ASTContext &astContext = getASTContext(); const CXXRecordDecl *RD = DerivedClass; for (CastExpr::path_const_iterator I = Start; I != End; ++I) { @@ -3666,7 +3671,7 @@ CharUnits CIRGenModule::computeNonVirtualBaseClassOffset( assert(!Base->isVirtual() && "Should not see virtual bases here!"); // Get the layout. - const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + const ASTRecordLayout &Layout = astContext.getASTRecordLayout(RD); const auto *BaseDecl = cast(Base->getType()->castAs()->getDecl()); @@ -3682,7 +3687,7 @@ CharUnits CIRGenModule::computeNonVirtualBaseClassOffset( void CIRGenModule::Error(SourceLocation loc, StringRef message) { unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0"); - getDiags().Report(astCtx.getFullLoc(loc), diagID) << message; + getDiags().Report(astContext.getFullLoc(loc), diagID) << message; } /// Print out an error that codegen doesn't support the specified stmt yet. @@ -3690,7 +3695,7 @@ void CIRGenModule::ErrorUnsupported(const Stmt *S, const char *Type) { unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "cannot compile this %0 yet"); std::string Msg = Type; - getDiags().Report(astCtx.getFullLoc(S->getBeginLoc()), DiagID) + getDiags().Report(astContext.getFullLoc(S->getBeginLoc()), DiagID) << Msg << S->getSourceRange(); } @@ -3699,7 +3704,7 @@ void CIRGenModule::ErrorUnsupported(const Decl *D, const char *Type) { unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "cannot compile this %0 yet"); std::string Msg = Type; - getDiags().Report(astCtx.getFullLoc(D->getLocation()), DiagID) << Msg; + getDiags().Report(astContext.getFullLoc(D->getLocation()), DiagID) << Msg; } cir::SourceLanguage CIRGenModule::getCIRSourceLanguage() { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 771c90a2bccd..9d7c1eb572a6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -64,7 +64,7 @@ class CIRGenModule : public CIRGenTypeCache { CIRGenModule &operator=(CIRGenModule &) = delete; public: - CIRGenModule(mlir::MLIRContext &context, clang::ASTContext &astctx, + CIRGenModule(mlir::MLIRContext &mlirContext, clang::ASTContext &astContext, const clang::CodeGenOptions &CGO, clang::DiagnosticsEngine &Diags); @@ -81,7 +81,7 @@ class CIRGenModule : public CIRGenTypeCache { CIRGenBuilderTy builder; /// Hold Clang AST information. - clang::ASTContext &astCtx; + clang::ASTContext &astContext; const clang::LangOptions &langOpts; @@ -153,7 +153,7 @@ class CIRGenModule : public CIRGenTypeCache { public: mlir::ModuleOp getModule() const { return theModule; } CIRGenBuilderTy &getBuilder() { return builder; } - clang::ASTContext &getASTContext() const { return astCtx; } + clang::ASTContext &getASTContext() const { return astContext; } const clang::TargetInfo &getTarget() const { return target; } const clang::CodeGenOptions &getCodeGenOpts() const { return codeGenOpts; } clang::DiagnosticsEngine &getDiags() const { return Diags; } @@ -615,7 +615,7 @@ class CIRGenModule : public CIRGenTypeCache { /// Whether this function's return type has no side effects, and thus may be /// trivially discared if it is unused. - bool MayDropFunctionReturn(const clang::ASTContext &Context, + bool MayDropFunctionReturn(const clang::ASTContext &astContext, clang::QualType ReturnType); bool isInNoSanitizeList(clang::SanitizerMask Kind, cir::FuncOp Fn, diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp index c3083e93eeb1..a6efc05e4110 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp @@ -9,19 +9,20 @@ #include "llvm/Support/ErrorHandling.h" namespace clang::CIRGen { -cir::TBAAAttr tbaa_NYI(mlir::MLIRContext *ctx) { - return cir::TBAAAttr::get(ctx); +cir::TBAAAttr tbaa_NYI(mlir::MLIRContext *mlirContext) { + return cir::TBAAAttr::get(mlirContext); } -CIRGenTBAA::CIRGenTBAA(mlir::MLIRContext *ctx, clang::ASTContext &context, - CIRGenTypes &types, mlir::ModuleOp moduleOp, +CIRGenTBAA::CIRGenTBAA(mlir::MLIRContext *mlirContext, + clang::ASTContext &astContext, CIRGenTypes &types, + mlir::ModuleOp moduleOp, const clang::CodeGenOptions &codeGenOpts, const clang::LangOptions &features) - : ctx(ctx), context(context), types(types), moduleOp(moduleOp), - codeGenOpts(codeGenOpts), features(features) {} + : mlirContext(mlirContext), astContext(astContext), types(types), + moduleOp(moduleOp), codeGenOpts(codeGenOpts), features(features) {} cir::TBAAAttr CIRGenTBAA::getTypeInfo(clang::QualType qty) { - return tbaa_NYI(ctx); + return tbaa_NYI(mlirContext); } TBAAAccessInfo CIRGenTBAA::getAccessInfo(clang::QualType accessType) { @@ -33,15 +34,15 @@ TBAAAccessInfo CIRGenTBAA::getVTablePtrAccessInfo(mlir::Type vtablePtrType) { } mlir::ArrayAttr CIRGenTBAA::getTBAAStructInfo(clang::QualType qty) { - return mlir::ArrayAttr::get(ctx, {}); + return mlir::ArrayAttr::get(mlirContext, {}); } cir::TBAAAttr CIRGenTBAA::getBaseTypeInfo(clang::QualType qty) { - return tbaa_NYI(ctx); + return tbaa_NYI(mlirContext); } mlir::ArrayAttr CIRGenTBAA::getAccessTagInfo(TBAAAccessInfo tbaaInfo) { - return mlir::ArrayAttr::get(ctx, {tbaa_NYI(ctx)}); + return mlir::ArrayAttr::get(mlirContext, {tbaa_NYI(mlirContext)}); } TBAAAccessInfo CIRGenTBAA::mergeTBAAInfoForCast(TBAAAccessInfo sourceInfo, diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.h b/clang/lib/CIR/CodeGen/CIRGenTBAA.h index 49ff321e342a..3f59a0e6538b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.h +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.h @@ -97,15 +97,15 @@ struct TBAAAccessInfo { /// This class organizes the cross-module state that is used while lowering AST /// types to LLVM types. class CIRGenTBAA { - mlir::MLIRContext *ctx; - [[maybe_unused]] clang::ASTContext &context; + mlir::MLIRContext *mlirContext; + [[maybe_unused]] clang::ASTContext &astContext; [[maybe_unused]] CIRGenTypes &types; mlir::ModuleOp moduleOp; [[maybe_unused]] const clang::CodeGenOptions &codeGenOpts; [[maybe_unused]] const clang::LangOptions &features; public: - CIRGenTBAA(mlir::MLIRContext *ctx, clang::ASTContext &context, + CIRGenTBAA(mlir::MLIRContext *mlirContext, clang::ASTContext &astContext, CIRGenTypes &types, mlir::ModuleOp moduleOp, const clang::CodeGenOptions &codeGenOpts, const clang::LangOptions &features); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 5483a0f805a5..799d875739d5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -40,7 +40,7 @@ CIRGenTypes::ClangCallConvToCIRCallConv(clang::CallingConv CC) { } CIRGenTypes::CIRGenTypes(CIRGenModule &cgm) - : Context(cgm.getASTContext()), Builder(cgm.getBuilder()), CGM{cgm}, + : astContext(cgm.getASTContext()), Builder(cgm.getBuilder()), CGM{cgm}, Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), TheABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) { SkippedLayout = false; @@ -188,7 +188,7 @@ static bool isSafeToConvert(const RecordDecl *RD, CIRGenTypes &CGT) { mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *RD) { // TagDecl's are not necessarily unique, instead use the (clang) type // connected to the decl. - const auto *key = Context.getTagDeclType(RD).getTypePtr(); + const auto *key = astContext.getTagDeclType(RD).getTypePtr(); cir::StructType entry = recordDeclTypes[key]; // Handle forward decl / incomplete types. @@ -358,12 +358,12 @@ bool CIRGenTypes::isFuncTypeConvertible(const FunctionType *FT) { /// ConvertType - Convert the specified type to its MLIR form. mlir::Type CIRGenTypes::ConvertType(QualType T) { - T = Context.getCanonicalType(T); + T = astContext.getCanonicalType(T); const Type *Ty = T.getTypePtr(); // For the device-side compilation, CUDA device builtin surface/texture types // may be represented in different types. - assert(!Context.getLangOpts().CUDAIsDevice && "not implemented"); + assert(!astContext.getLangOpts().CUDAIsDevice && "not implemented"); if (const auto *recordType = dyn_cast(T)) return convertRecordDeclType(recordType->getDecl()); @@ -442,8 +442,9 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::SatLongFract: case BuiltinType::SatShortAccum: case BuiltinType::SatShortFract: - ResultType = cir::IntType::get(&getMLIRContext(), Context.getTypeSize(T), - /*isSigned=*/true); + ResultType = + cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(T), + /*isSigned=*/true); break; // Unsigned types. case BuiltinType::Char16: @@ -470,7 +471,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case BuiltinType::SatUShortAccum: case BuiltinType::SatUShortFract: ResultType = - cir::IntType::get(Builder.getContext(), Context.getTypeSize(T), + cir::IntType::get(Builder.getContext(), astContext.getTypeSize(T), /*isSigned=*/false); break; @@ -478,8 +479,8 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { ResultType = CGM.FP16Ty; break; case BuiltinType::Half: - if (Context.getLangOpts().NativeHalfType || - !Context.getTargetInfo().useFP16ConversionIntrinsics()) + if (astContext.getLangOpts().NativeHalfType || + !astContext.getTargetInfo().useFP16ConversionIntrinsics()) ResultType = CGM.FP16Ty; else llvm_unreachable("NYI"); @@ -494,11 +495,11 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { ResultType = CGM.DoubleTy; break; case BuiltinType::LongDouble: - ResultType = Builder.getLongDoubleTy(Context.getFloatTypeSemantics(T)); + ResultType = Builder.getLongDoubleTy(astContext.getFloatTypeSemantics(T)); break; case BuiltinType::Float128: case BuiltinType::Ibm128: - // FIXME: look at Context.getFloatTypeSemantics(T) and getTypeForFormat + // FIXME: look at astContext.getFloatTypeSemantics(T) and getTypeForFormat // on LLVM codegen. assert(0 && "not implemented"); break; @@ -746,8 +747,8 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { ResultType = convertTypeForMem(valueType); // Pad out to the inflated size if necessary. - uint64_t valueSize = Context.getTypeSize(valueType); - uint64_t atomicSize = Context.getTypeSize(Ty); + uint64_t valueSize = astContext.getTypeSize(valueType); + uint64_t atomicSize = astContext.getTypeSize(Ty); if (valueSize != atomicSize) { llvm_unreachable("NYI"); } @@ -872,7 +873,7 @@ void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { // Only complete if we converted it already. If we haven't converted it yet, // we'll just do it lazily. - if (recordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) + if (recordDeclTypes.count(astContext.getTagDeclType(RD).getTypePtr())) convertRecordDeclType(RD); // If necessary, provide the full definition of a type only used with a @@ -884,7 +885,7 @@ void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { /// Return record layout info for the given record decl. const CIRGenRecordLayout & CIRGenTypes::getCIRGenRecordLayout(const RecordDecl *RD) { - const auto *Key = Context.getTagDeclType(RD).getTypePtr(); + const auto *Key = astContext.getTagDeclType(RD).getTypePtr(); auto I = CIRGenRecordLayouts.find(Key); if (I != CIRGenRecordLayouts.end()) @@ -908,15 +909,15 @@ bool CIRGenTypes::isPointerZeroInitializable(clang::QualType T) { bool CIRGenTypes::isZeroInitializable(QualType T) { if (T->getAs()) - return Context.getTargetNullPointerValue(T) == 0; + return astContext.getTargetNullPointerValue(T) == 0; - if (const auto *AT = Context.getAsArrayType(T)) { + if (const auto *AT = astContext.getAsArrayType(T)) { if (isa(AT)) return true; if (const auto *CAT = dyn_cast(AT)) - if (Context.getConstantArrayElementCount(CAT) == 0) + if (astContext.getConstantArrayElementCount(CAT) == 0) return true; - T = Context.getBaseElementType(T); + T = astContext.getBaseElementType(T); } // Records are non-zero-initializable if they contain any diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index fc59befb9501..c862d3232086 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -78,7 +78,7 @@ class CIRGenBuilderTy; /// This class organizes the cross-module state that is used while lowering /// AST types to CIR types. class CIRGenTypes { - clang::ASTContext &Context; + clang::ASTContext &astContext; CIRGenBuilderTy &Builder; CIRGenModule &CGM; const clang::TargetInfo &Target; @@ -140,7 +140,7 @@ class CIRGenTypes { using TypeCacheTy = llvm::DenseMap; TypeCacheTy TypeCache; - clang::ASTContext &getContext() const { return Context; } + clang::ASTContext &getContext() const { return astContext; } mlir::MLIRContext &getMLIRContext() const; bool isRecordLayoutComplete(const clang::Type *Ty) const; diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 932dd4bebeed..2fa51b534da4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -55,11 +55,11 @@ mlir::Type CIRGenVTables::getVTableComponentType() { mlir::Type CIRGenVTables::getVTableType(const VTableLayout &layout) { SmallVector tys; - auto ctx = CGM.getBuilder().getContext(); + mlir::MLIRContext *mlirContext = CGM.getBuilder().getContext(); auto componentType = getVTableComponentType(); for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) - tys.push_back( - cir::ArrayType::get(ctx, componentType, layout.getVTableSize(i))); + tys.push_back(cir::ArrayType::get(mlirContext, componentType, + layout.getVTableSize(i))); // FIXME(cir): should VTableLayout be encoded like we do for some // AST nodes? @@ -405,7 +405,7 @@ cir::GlobalLinkageKind CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { // We're at the end of the translation unit, so the current key // function is fully correct. - const CXXMethodDecl *keyFunction = astCtx.getCurrentKeyFunction(RD); + const CXXMethodDecl *keyFunction = astContext.getCurrentKeyFunction(RD); if (keyFunction && !RD->hasAttr()) { // If this class has a key function, use that to determine the // linkage of the vtable. @@ -425,19 +425,19 @@ cir::GlobalLinkageKind CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { return cir::GlobalLinkageKind::AvailableExternallyLinkage; if (keyFunction->isInlined()) - return !astCtx.getLangOpts().AppleKext + return !astContext.getLangOpts().AppleKext ? cir::GlobalLinkageKind::LinkOnceODRLinkage : cir::GlobalLinkageKind::InternalLinkage; return cir::GlobalLinkageKind::ExternalLinkage; case TSK_ImplicitInstantiation: - return !astCtx.getLangOpts().AppleKext + return !astContext.getLangOpts().AppleKext ? cir::GlobalLinkageKind::LinkOnceODRLinkage : cir::GlobalLinkageKind::InternalLinkage; case TSK_ExplicitInstantiationDefinition: - return !astCtx.getLangOpts().AppleKext + return !astContext.getLangOpts().AppleKext ? cir::GlobalLinkageKind::WeakODRLinkage : cir::GlobalLinkageKind::InternalLinkage; @@ -448,7 +448,7 @@ cir::GlobalLinkageKind CIRGenModule::getVTableLinkage(const CXXRecordDecl *RD) { // -fapple-kext mode does not support weak linkage, so we must use // internal linkage. - if (astCtx.getLangOpts().AppleKext) + if (astContext.getLangOpts().AppleKext) return cir::GlobalLinkageKind::InternalLinkage; auto DiscardableODRLinkage = cir::GlobalLinkageKind::LinkOnceODRLinkage; diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 8dd16f6ce9e1..6e26c8059a15 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -280,10 +280,10 @@ class LValue { } static LValue makeAddr(Address address, clang::QualType type, - clang::ASTContext &context, LValueBaseInfo baseInfo, + clang::ASTContext &astContext, LValueBaseInfo baseInfo, TBAAAccessInfo tbaaInfo) { clang::Qualifiers qs = type.getQualifiers(); - qs.setObjCGCAttr(context.getObjCGCAttrKind(type)); + qs.setObjCGCAttr(astContext.getObjCGCAttrKind(type)); LValue R; R.LVType = Simple; @@ -533,10 +533,10 @@ class AggValueSlot { /// Get the preferred size to use when storing a value to this slot. This /// is the type size unless that might overlap another object, in which /// case it's the dsize. - clang::CharUnits getPreferredSize(clang::ASTContext &Ctx, + clang::CharUnits getPreferredSize(clang::ASTContext &astContext, clang::QualType Type) { - return mayOverlap() ? Ctx.getTypeInfoDataSizeInChars(Type).Width - : Ctx.getTypeSizeInChars(Type); + return mayOverlap() ? astContext.getTypeInfoDataSizeInChars(Type).Width + : astContext.getTypeSizeInChars(Type); } }; diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index aefd0842bd9d..1269302ddcb0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -41,27 +41,29 @@ CIRGenerator::~CIRGenerator() { } static void setMLIRDataLayout(mlir::ModuleOp &mod, const llvm::DataLayout &dl) { - auto *context = mod.getContext(); - mlir::DataLayoutSpecInterface dlSpec = mlir::translateDataLayout(dl, context); + mlir::MLIRContext *mlirContext = mod.getContext(); + mlir::DataLayoutSpecInterface dlSpec = + mlir::translateDataLayout(dl, mlirContext); mod->setAttr(mlir::DLTIDialect::kDataLayoutAttrName, dlSpec); } -void CIRGenerator::Initialize(ASTContext &astCtx) { +void CIRGenerator::Initialize(ASTContext &astContext) { using namespace llvm; - this->astCtx = &astCtx; - - mlirCtx = std::make_unique(); - mlirCtx->getOrLoadDialect(); - mlirCtx->getOrLoadDialect(); - mlirCtx->getOrLoadDialect(); - mlirCtx->getOrLoadDialect(); - mlirCtx->getOrLoadDialect(); - mlirCtx->getOrLoadDialect(); - CGM = std::make_unique(*mlirCtx.get(), astCtx, - codeGenOpts, Diags); + this->astContext = &astContext; + + mlirContext = std::make_unique(); + mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); + mlirContext->getOrLoadDialect(); + CGM = std::make_unique( + *mlirContext.get(), astContext, codeGenOpts, Diags); auto mod = CGM->getModule(); - auto layout = llvm::DataLayout(astCtx.getTargetInfo().getDataLayoutString()); + auto layout = + llvm::DataLayout(astContext.getTargetInfo().getDataLayoutString()); setMLIRDataLayout(mod, layout); } @@ -152,11 +154,11 @@ void CIRGenerator::HandleTagDeclDefinition(TagDecl *D) { // For MSVC compatibility, treat declarations of static data members with // inline initializers as definitions. - if (astCtx->getTargetInfo().getCXXABI().isMicrosoft()) { + if (astContext->getTargetInfo().getCXXABI().isMicrosoft()) { llvm_unreachable("NYI"); } // For OpenMP emit declare reduction functions, if required. - if (astCtx->getLangOpts().OpenMP) { + if (astContext->getLangOpts().OpenMP) { llvm_unreachable("NYI"); } } diff --git a/clang/lib/CIR/CodeGen/CIRPasses.cpp b/clang/lib/CIR/CodeGen/CIRPasses.cpp index 65b43cfc6ffd..04582af2f517 100644 --- a/clang/lib/CIR/CodeGen/CIRPasses.cpp +++ b/clang/lib/CIR/CodeGen/CIRPasses.cpp @@ -23,8 +23,8 @@ namespace cir { mlir::LogicalResult runCIRToCIRPasses( - mlir::ModuleOp theModule, mlir::MLIRContext *mlirCtx, - clang::ASTContext &astCtx, bool enableVerifier, bool enableLifetime, + mlir::ModuleOp theModule, mlir::MLIRContext *mlirContext, + clang::ASTContext &astContext, bool enableVerifier, bool enableLifetime, llvm::StringRef lifetimeOpts, bool enableIdiomRecognizer, llvm::StringRef idiomRecognizerOpts, bool enableLibOpt, llvm::StringRef libOptOpts, std::string &passOptParsingFailure, @@ -33,7 +33,7 @@ mlir::LogicalResult runCIRToCIRPasses( llvm::TimeTraceScope scope("CIR To CIR Passes"); - mlir::PassManager pm(mlirCtx); + mlir::PassManager pm(mlirContext); pm.addPass(mlir::createCIRCanonicalizePass()); // TODO(CIR): Make this actually propagate errors correctly. This is stubbed @@ -43,7 +43,7 @@ mlir::LogicalResult runCIRToCIRPasses( }; if (enableLifetime) { - auto lifetimePass = mlir::createLifetimeCheckPass(&astCtx); + auto lifetimePass = mlir::createLifetimeCheckPass(&astContext); if (lifetimePass->initializeOptions(lifetimeOpts, errorHandler).failed()) { passOptParsingFailure = lifetimeOpts; return mlir::failure(); @@ -52,7 +52,7 @@ mlir::LogicalResult runCIRToCIRPasses( } if (enableIdiomRecognizer) { - auto idiomPass = mlir::createIdiomRecognizerPass(&astCtx); + auto idiomPass = mlir::createIdiomRecognizerPass(&astContext); if (idiomPass->initializeOptions(idiomRecognizerOpts, errorHandler) .failed()) { passOptParsingFailure = idiomRecognizerOpts; @@ -62,7 +62,7 @@ mlir::LogicalResult runCIRToCIRPasses( } if (enableLibOpt) { - auto libOpPass = mlir::createLibOptPass(&astCtx); + auto libOpPass = mlir::createLibOptPass(&astContext); if (libOpPass->initializeOptions(libOptOpts, errorHandler).failed()) { passOptParsingFailure = libOptOpts; return mlir::failure(); @@ -73,7 +73,7 @@ mlir::LogicalResult runCIRToCIRPasses( if (enableCIRSimplify) pm.addPass(mlir::createCIRSimplifyPass()); - pm.addPass(mlir::createLoweringPreparePass(&astCtx)); + pm.addPass(mlir::createLoweringPreparePass(&astContext)); if (flattenCIR || enableMem2Reg) mlir::populateCIRPreLoweringPasses(pm, enableCallConvLowering); diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp index 4652670425ee..848189f852c4 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -300,7 +300,7 @@ mlir::Attribute ConstantAggregateBuilderBase::finishArray(mlir::Type eltTy) { } mlir::Attribute -ConstantAggregateBuilderBase::finishStruct(mlir::MLIRContext *ctx, +ConstantAggregateBuilderBase::finishStruct(mlir::MLIRContext *mlirContext, cir::StructType ty) { markFinished(); @@ -317,7 +317,7 @@ ConstantAggregateBuilderBase::finishStruct(mlir::MLIRContext *ctx, // assert(ty->isPacked() == Packed); // constant = llvm::ConstantStruct::get(ty, elts); } else { - const auto members = mlir::ArrayAttr::get(ctx, elts); + const auto members = mlir::ArrayAttr::get(mlirContext, elts); constant = Builder.CGM.getBuilder().getAnonConstStruct(members, Packed); } diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h index 10335e89c1a0..d3a8c0abd308 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.h +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.h @@ -324,7 +324,7 @@ class ConstantAggregateBuilderBase { protected: mlir::Attribute finishArray(mlir::Type eltTy); - mlir::Attribute finishStruct(mlir::MLIRContext *ctx, + mlir::Attribute finishStruct(mlir::MLIRContext *mlirContext, cir::StructType structTy); private: @@ -377,18 +377,20 @@ class ConstantAggregateBuilderTemplateBase /// builder. This aids in readability by making it easier to find the /// places that add components to a builder, as well as "bookending" /// the sub-builder more explicitly. - void finishAndAddTo(mlir::MLIRContext *ctx, AggregateBuilderBase &parent) { + void finishAndAddTo(mlir::MLIRContext *mlirContext, + AggregateBuilderBase &parent) { assert(this->Parent == &parent && "adding to non-parent builder"); - parent.add(asImpl().finishImpl(ctx)); + parent.add(asImpl().finishImpl(mlirContext)); } /// Given that this builder was created by beginning an array or struct /// directly on a ConstantInitBuilder, finish the array/struct and /// create a global variable with it as the initializer. template - cir::GlobalOp finishAndCreateGlobal(mlir::MLIRContext *ctx, As &&...args) { + cir::GlobalOp finishAndCreateGlobal(mlir::MLIRContext *mlirContext, + As &&...args) { assert(!this->Parent && "finishing non-root builder"); - return this->Builder.createGlobal(asImpl().finishImpl(ctx), + return this->Builder.createGlobal(asImpl().finishImpl(mlirContext), std::forward(args)...); } @@ -415,9 +417,9 @@ class ConstantAggregateBuilderTemplateBase /// This is useful for allowing a finished initializer to passed to /// an API which will build the global. However, the "future" preserves /// a dependency on the original builder; it is an error to pass it aside. - ConstantInitFuture finishAndCreateFuture(mlir::MLIRContext *ctx) { + ConstantInitFuture finishAndCreateFuture(mlir::MLIRContext *mlirContext) { assert(!this->Parent && "finishing non-root builder"); - return this->Builder.createFuture(asImpl().finishImpl(ctx)); + return this->Builder.createFuture(asImpl().finishImpl(mlirContext)); } }; @@ -447,7 +449,7 @@ class ConstantArrayBuilderTemplateBase private: /// Form an array constant from the values that have been added to this /// builder. - mlir::Attribute finishImpl([[maybe_unused]] mlir::MLIRContext *ctx) { + mlir::Attribute finishImpl([[maybe_unused]] mlir::MLIRContext *mlirContext) { return AggregateBuilderBase::finishArray(EltTy); } }; @@ -499,8 +501,8 @@ class ConstantStructBuilderTemplateBase private: /// Form an array constant from the values that have been added to this /// builder. - mlir::Attribute finishImpl(mlir::MLIRContext *ctx) { - return AggregateBuilderBase::finishStruct(ctx, StructTy); + mlir::Attribute finishImpl(mlir::MLIRContext *mlirContext) { + return AggregateBuilderBase::finishStruct(mlirContext, StructTy); } }; From 14532cd0a6602ef7c875853fb1b8dc02d2fd424b Mon Sep 17 00:00:00 2001 From: Guojin Date: Mon, 9 Dec 2024 11:58:20 -0500 Subject: [PATCH 2152/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vqadds_s32 (#1200) This can't be simply implemented by our CIR Add via LLVM::AddOp, as i[t's saturated add.](https://godbolt.org/z/MxqGrj6fP) --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 8 +- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 4 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 10 +++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 73 ++++++++++++------- clang/test/CIR/CodeGen/AArch64/neon.c | 29 +++++--- clang/test/CIR/IR/invalid.cir | 27 +++++++ clang/test/CIR/Lowering/binop-signed-int.cir | 4 + .../test/CIR/Lowering/binop-unsigned-int.cir | 7 +- 9 files changed, 126 insertions(+), 40 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 52f9a3180fb7..771b7dd33cd4 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -419,13 +419,15 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { } mlir::Value createSub(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, - bool hasNSW = false) { + bool hasNSW = false, bool saturated = false) { auto op = create(lhs.getLoc(), lhs.getType(), cir::BinOpKind::Sub, lhs, rhs); if (hasNUW) op.setNoUnsignedWrap(true); if (hasNSW) op.setNoSignedWrap(true); + if (saturated) + op.setSaturated(true); return op; } @@ -438,13 +440,15 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { } mlir::Value createAdd(mlir::Value lhs, mlir::Value rhs, bool hasNUW = false, - bool hasNSW = false) { + bool hasNSW = false, bool saturated = false) { auto op = create(lhs.getLoc(), lhs.getType(), cir::BinOpKind::Add, lhs, rhs); if (hasNUW) op.setNoUnsignedWrap(true); if (hasNSW) op.setNoSignedWrap(true); + if (saturated) + op.setSaturated(true); return op; } diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a3ce4f5c204d..dc7af3154f2d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1192,12 +1192,14 @@ def BinOp : CIR_Op<"binop", [Pure, let arguments = (ins Arg:$kind, CIR_AnyType:$lhs, CIR_AnyType:$rhs, UnitAttr:$no_unsigned_wrap, - UnitAttr:$no_signed_wrap); + UnitAttr:$no_signed_wrap, + UnitAttr:$saturated); let assemblyFormat = [{ `(` $kind `,` $lhs `,` $rhs `)` (`nsw` $no_signed_wrap^)? (`nuw` $no_unsigned_wrap^)? + (`sat` $saturated^)? `:` type($lhs) attr-dict }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 5e0ce259a0e2..f8337af303cc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2832,7 +2832,7 @@ static mlir::Value emitCommonNeonSISDBuiltinExpr( case NEON::BI__builtin_neon_vqaddh_u16: llvm_unreachable(" neon_vqaddh_u16 NYI "); case NEON::BI__builtin_neon_vqadds_s32: - llvm_unreachable(" neon_vqadds_s32 NYI "); + return builder.createAdd(ops[0], ops[1], false, false, true); case NEON::BI__builtin_neon_vqadds_u32: llvm_unreachable(" neon_vqadds_u32 NYI "); case NEON::BI__builtin_neon_vqdmulhh_s16: @@ -2981,7 +2981,7 @@ static mlir::Value emitCommonNeonSISDBuiltinExpr( case NEON::BI__builtin_neon_vqsubh_u16: llvm_unreachable(" neon_vqsubh_u16 NYI "); case NEON::BI__builtin_neon_vqsubs_s32: - llvm_unreachable(" neon_vqsubs_s32 NYI "); + return builder.createSub(ops[0], ops[1], false, false, true); case NEON::BI__builtin_neon_vqsubs_u32: llvm_unreachable(" neon_vqsubs_u32 NYI "); case NEON::BI__builtin_neon_vrecped_f64: diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index dfc57e9e603d..f9c0554c4fef 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -3785,6 +3785,7 @@ LogicalResult cir::AtomicFetch::verify() { LogicalResult cir::BinOp::verify() { bool noWrap = getNoUnsignedWrap() || getNoSignedWrap(); + bool saturated = getSaturated(); if (!isa(getType()) && noWrap) return emitError() @@ -3794,9 +3795,18 @@ LogicalResult cir::BinOp::verify() { getKind() == cir::BinOpKind::Sub || getKind() == cir::BinOpKind::Mul; + bool saturatedOps = + getKind() == cir::BinOpKind::Add || getKind() == cir::BinOpKind::Sub; + if (noWrap && !noWrapOps) return emitError() << "The nsw/nuw flags are applicable to opcodes: 'add', " "'sub' and 'mul'"; + if (saturated && !saturatedOps) + return emitError() << "The saturated flag is applicable to opcodes: 'add' " + "and 'sub'"; + if (noWrap && saturated) + return emitError() << "The nsw/nuw flags and the saturated flag are " + "mutually exclusive"; bool complexOps = getKind() == cir::BinOpKind::Add || getKind() == cir::BinOpKind::Sub; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index d2c1e765cabd..5db505fe69d9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2452,6 +2452,13 @@ CIRToLLVMBinOpLowering::getIntOverflowFlag(cir::BinOp op) const { return mlir::LLVM::IntegerOverflowFlags::none; } +static bool isIntTypeUnsigned(mlir::Type type) { + // TODO: Ideally, we should only need to check cir::IntType here. + return mlir::isa(type) + ? mlir::cast(type).isUnsigned() + : mlir::cast(type).isUnsigned(); +} + mlir::LogicalResult CIRToLLVMBinOpLowering::matchAndRewrite( cir::BinOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -2464,6 +2471,10 @@ mlir::LogicalResult CIRToLLVMBinOpLowering::matchAndRewrite( "operand type not supported yet"); auto llvmTy = getTypeConverter()->convertType(op.getType()); + mlir::Type llvmEltTy = + mlir::isa(llvmTy) + ? mlir::cast(llvmTy).getElementType() + : llvmTy; auto rhs = adaptor.getRhs(); auto lhs = adaptor.getLhs(); @@ -2471,58 +2482,70 @@ mlir::LogicalResult CIRToLLVMBinOpLowering::matchAndRewrite( switch (op.getKind()) { case cir::BinOpKind::Add: - if (mlir::isa(type)) + if (mlir::isa(llvmEltTy)) { + if (op.getSaturated()) { + if (isIntTypeUnsigned(type)) { + rewriter.replaceOpWithNewOp(op, lhs, rhs); + break; + } + rewriter.replaceOpWithNewOp(op, lhs, rhs); + break; + } rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); - else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + } else + rewriter.replaceOpWithNewOp(op, lhs, rhs); break; case cir::BinOpKind::Sub: - if (mlir::isa(type)) + if (mlir::isa(llvmEltTy)) { + if (op.getSaturated()) { + if (isIntTypeUnsigned(type)) { + rewriter.replaceOpWithNewOp(op, lhs, rhs); + break; + } + rewriter.replaceOpWithNewOp(op, lhs, rhs); + break; + } rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); - else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + } else + rewriter.replaceOpWithNewOp(op, lhs, rhs); break; case cir::BinOpKind::Mul: - if (mlir::isa(type)) + if (mlir::isa(llvmEltTy)) rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs, getIntOverflowFlag(op)); else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, lhs, rhs); break; case cir::BinOpKind::Div: - if (mlir::isa(type)) { - auto isUnsigned = mlir::isa(type) - ? mlir::cast(type).isUnsigned() - : mlir::cast(type).isUnsigned(); + if (mlir::isa(llvmEltTy)) { + auto isUnsigned = isIntTypeUnsigned(type); if (isUnsigned) - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, lhs, rhs); else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, lhs, rhs); } else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, lhs, rhs); break; case cir::BinOpKind::Rem: - if (mlir::isa(type)) { - auto isUnsigned = mlir::isa(type) - ? mlir::cast(type).isUnsigned() - : mlir::cast(type).isUnsigned(); + if (mlir::isa(llvmEltTy)) { + auto isUnsigned = isIntTypeUnsigned(type); if (isUnsigned) - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, lhs, rhs); else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, lhs, rhs); } else - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, lhs, rhs); break; case cir::BinOpKind::And: - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, lhs, rhs); break; case cir::BinOpKind::Or: - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, lhs, rhs); break; case cir::BinOpKind::Xor: - rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + rewriter.replaceOpWithNewOp(op, lhs, rhs); break; } diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 10acbf34f7d8..3ddaea58f163 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -9750,12 +9750,16 @@ poly16x8_t test_vmull_p8(poly8x8_t a, poly8x8_t b) { // return vqaddh_s16(a, b); // } -// NYI-LABEL: @test_vqadds_s32( -// NYI: [[VQADDS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqadd.i32(i32 %a, i32 %b) -// NYI: ret i32 [[VQADDS_S32_I]] -// int32_t test_vqadds_s32(int32_t a, int32_t b) { -// return vqadds_s32(a, b); -// } +int32_t test_vqadds_s32(int32_t a, int32_t b) { + return vqadds_s32(a, b); + + // CIR: vqadds_s32 + // CIR: cir.binop(add, {{%.*}}, {{%.*}}) sat : !s32i + + // LLVM:{{.*}}test_vqadds_s32(i32{{.*}}[[a:%.*]], i32{{.*}}[[b:%.*]]) + // LLVM: [[VQADDS_S32_I:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[a]], i32 [[b]]) + // LLVM: ret i32 [[VQADDS_S32_I]] +} // NYI-LABEL: @test_vqaddd_s64( // NYI: [[VQADDD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %a, i64 %b) @@ -9821,9 +9825,16 @@ poly16x8_t test_vmull_p8(poly8x8_t a, poly8x8_t b) { // NYI-LABEL: @test_vqsubs_s32( // NYI: [[VQSUBS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqsub.i32(i32 %a, i32 %b) // NYI: ret i32 [[VQSUBS_S32_I]] -// int32_t test_vqsubs_s32(int32_t a, int32_t b) { -// return vqsubs_s32(a, b); -// } +int32_t test_vqsubs_s32(int32_t a, int32_t b) { + return vqsubs_s32(a, b); + + // CIR: vqsubs_s32 + // CIR: cir.binop(sub, {{%.*}}, {{%.*}}) sat : !s32i + + // LLVM:{{.*}}test_vqsubs_s32(i32{{.*}}[[a:%.*]], i32{{.*}}[[b:%.*]]) + // LLVM: [[VQSUBS_S32_I:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[a]], i32 [[b]]) + // LLVM: ret i32 [[VQSUBS_S32_I]] +} // NYI-LABEL: @test_vqsubd_s64( // NYI: [[VQSUBD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %a, i64 %b) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 80fe6e114127..1a6c2f503dfd 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1091,6 +1091,33 @@ cir.func @bad_binop_for_nowrap(%x: !u32i, %y: !u32i) { // ----- +!u32i = !cir.int + +cir.func @bad_binop_for_saturated(%x: !u32i, %y: !u32i) { + // expected-error@+1 {{The saturated flag is applicable to opcodes: 'add' and 'sub'}} + %0 = cir.binop(div, %x, %y) sat : !u32i +} + +// ----- + +!s32i = !cir.int + +cir.func @no_nsw_for_saturated(%x: !s32i, %y: !s32i) { + // expected-error@+1 {{The nsw/nuw flags and the saturated flag are mutually exclusive}} + %0 = cir.binop(add, %x, %y) nsw sat : !s32i +} + +// ----- + +!s32i = !cir.int + +cir.func @no_nuw_for_saturated(%x: !s32i, %y: !s32i) { + // expected-error@+1 {{The nsw/nuw flags and the saturated flag are mutually exclusive}} + %0 = cir.binop(add, %x, %y) nuw sat : !s32i +} + +// ----- + !s32i = !cir.int module { diff --git a/clang/test/CIR/Lowering/binop-signed-int.cir b/clang/test/CIR/Lowering/binop-signed-int.cir index 5f028a6c901b..44b479f12fb8 100644 --- a/clang/test/CIR/Lowering/binop-signed-int.cir +++ b/clang/test/CIR/Lowering/binop-signed-int.cir @@ -58,6 +58,10 @@ module { %33 = cir.load %1 : !cir.ptr, !s32i %34 = cir.binop(or, %32, %33) : !s32i // CHECK: = llvm.or + %35 = cir.binop(add, %32, %33) sat: !s32i + // CHECK: = llvm.intr.sadd.sat{{.*}}(i32, i32) -> i32 + %36 = cir.binop(sub, %32, %33) sat: !s32i + // CHECK: = llvm.intr.ssub.sat{{.*}}(i32, i32) -> i32 cir.store %34, %2 : !s32i, !cir.ptr cir.return } diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir index 04de2e049ae0..0ce374488725 100644 --- a/clang/test/CIR/Lowering/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -49,6 +49,8 @@ module { %33 = cir.load %1 : !cir.ptr, !u32i %34 = cir.binop(or, %32, %33) : !u32i cir.store %34, %2 : !u32i, !cir.ptr + %35 = cir.binop(add, %32, %33) sat: !u32i + %36 = cir.binop(sub, %32, %33) sat: !u32i cir.return } } @@ -62,7 +64,8 @@ module { // MLIR: = llvm.shl // MLIR: = llvm.and // MLIR: = llvm.xor -// MLIR: = llvm.or +// MLIR: = llvm.intr.uadd.sat{{.*}}(i32, i32) -> i32 +// MLIR: = llvm.intr.usub.sat{{.*}}(i32, i32) -> i32 // LLVM: = mul i32 // LLVM: = udiv i32 @@ -74,3 +77,5 @@ module { // LLVM: = and i32 // LLVM: = xor i32 // LLVM: = or i32 +// LLVM: = call i32 @llvm.uadd.sat.i32 +// LLVM: = call i32 @llvm.usub.sat.i32 From c7fa6188c7ba7251176351b5b8367959d63dfde0 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Tue, 10 Dec 2024 23:48:39 -0500 Subject: [PATCH 2153/2301] [CIR][CodeGen][NFC] Sync emitDeclInit to CodeGen --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 38 +++++++++++++++++------------ 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 87a4ffa76d8e..eb582c8f01a3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -176,33 +176,39 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { return false; } -static void emitDeclInit(CIRGenFunction &CGF, const VarDecl *D, - Address DeclPtr) { - assert((D->hasGlobalStorage() || - (D->hasLocalStorage() && - CGF.getContext().getLangOpts().OpenCLCPlusPlus)) && +static void emitDeclInit(CIRGenFunction &cgf, const VarDecl *varDecl, + Address declPtr) { + assert((varDecl->hasGlobalStorage() || + (varDecl->hasLocalStorage() && + cgf.getContext().getLangOpts().OpenCLCPlusPlus)) && "VarDecl must have global or local (in the case of OpenCL) storage!"); - assert(!D->getType()->isReferenceType() && + assert(!varDecl->getType()->isReferenceType() && "Should not call emitDeclInit on a reference!"); - QualType type = D->getType(); - LValue lv = CGF.makeAddrLValue(DeclPtr, type); + QualType type = varDecl->getType(); + LValue lv = cgf.makeAddrLValue(declPtr, type); - const Expr *Init = D->getInit(); + const Expr *init = varDecl->getInit(); switch (CIRGenFunction::getEvaluationKind(type)) { + case cir::TEK_Scalar: + if (lv.isObjCStrong()) + llvm_unreachable("NYI"); + else if (lv.isObjCWeak()) + llvm_unreachable("NYI"); + else + cgf.emitScalarInit(init, cgf.getLoc(varDecl->getLocation()), lv, false); + return; + case cir::TEK_Complex: + llvm_unreachable("complext evaluation NYI"); case cir::TEK_Aggregate: - CGF.emitAggExpr(Init, + cgf.emitAggExpr(init, AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); return; - case cir::TEK_Scalar: - CGF.emitScalarInit(Init, CGF.getLoc(D->getLocation()), lv, false); - return; - case cir::TEK_Complex: - llvm_unreachable("complext evaluation NYI"); } + llvm_unreachable("bad evaluation kind"); } static void emitDeclDestroy(CIRGenFunction &CGF, const VarDecl *D) { @@ -336,7 +342,7 @@ void CIRGenModule::emitCXXGlobalVarDeclInit(const VarDecl *varDecl, assert(varDecl && " Expected a global declaration!"); CIRGenFunction cgf{*this, builder, true}; - llvm::SaveAndRestore savedCGF(CurCGF, &cgf); + llvm::SaveAndRestore savedCGF(CurCGF, &cgf); CurCGF->CurFn = addr; CIRGenFunction::SourceLocRAIIObject fnLoc{cgf, From fb8b8ce0f83e950a8f6258e1bf3ba336f1ed9811 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 11 Dec 2024 11:47:28 -0300 Subject: [PATCH 2154/2301] [CIR] Honor -clangir-disable-verifier while printing --- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index ec3e4207e43a..5cb81e89388f 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -267,6 +267,8 @@ class CIRGenConsumer : public clang::ASTConsumer { // FIXME: we cannot roundtrip prettyForm=true right now. mlir::OpPrintingFlags flags; flags.enableDebugInfo(/*enable=*/true, /*prettyForm=*/false); + if (feOptions.ClangIRDisableCIRVerifier) + flags.assumeVerified(); mlirMod->print(*outputStream, flags); } break; From bd298b3708c5a4d6e5a143ebed893c419124fb4b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 27 Nov 2024 15:35:18 -0800 Subject: [PATCH 2155/2301] [CIR][CIRGen][NFCI] Take a step into getting scope information to match OG We are missing cleanups all around, more incremental progress towards fixing that. This is supposed to be NFC intended, but we have to start changing some bits in order to properly match cleanup bits in OG. Start tagging places with more MissingFeatures to allow us to incrementally improve the situation. --- clang/include/clang/CIR/MissingFeatures.h | 14 +- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 174 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 72 ++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 71 ++++--- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 41 +++-- .../CodeGen/builtin-constant-evaluated.cpp | 10 +- clang/test/CIR/CodeGen/lambda.cpp | 6 +- 8 files changed, 321 insertions(+), 69 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 5ab4473bb7ec..4a1130de6ca5 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -59,7 +59,6 @@ struct MissingFeatures { static bool emitTypeCheck() { return false; } static bool tbaa() { return false; } static bool tbaa_struct() { return false; } - static bool cleanups() { return false; } static bool emitNullabilityCheck() { return false; } static bool ptrAuth() { return false; } @@ -160,12 +159,22 @@ struct MissingFeatures { static bool fastMathFlags() { return false; } static bool fastMathFuncAttributes() { return false; } + // Cleanup + static bool cleanups() { return false; } + static bool simplifyCleanupEntry() { return false; } + static bool requiresCleanups() { return false; } + static bool cleanupBranchAfterSwitch() { return false; } + static bool cleanupAlwaysBranchThrough() { return false; } + static bool cleanupDestinationIndex() { return false; } + static bool cleanupDestroyNRVOVariable() { return false; } + static bool cleanupAppendInsts() { return false; } + static bool cleanupIndexAndBIAdjustment() { return false; } + // Exception handling static bool isSEHTryScope() { return false; } static bool ehStack() { return false; } static bool emitStartEHSpec() { return false; } static bool emitEndEHSpec() { return false; } - static bool simplifyCleanupEntry() { return false; } // Type qualifiers. static bool atomicTypes() { return false; } @@ -208,7 +217,6 @@ struct MissingFeatures { static bool addAutoInitAnnotation() { return false; } static bool addHeapAllocSiteMetadata() { return false; } static bool loopInfoStack() { return false; } - static bool requiresCleanups() { return false; } static bool constantFoldsToSimpleInteger() { return false; } static bool checkFunctionCallABI() { return false; } static bool zeroInitializer() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 40fc101d4c23..534fc2a59968 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -37,13 +37,12 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc, JumpDest Dest) { // Remove this once we go for making sure unreachable code is // well modeled (or not). - assert(builder.getInsertionBlock() && "not yet implemented"); assert(!cir::MissingFeatures::ehStack()); // Insert a branch: to the cleanup block (unsolved) or to the already // materialized label. Keep track of unsolved goto's. - auto brOp = builder.create( - Loc, Dest.isValid() ? Dest.getBlock() : ReturnBlock().getBlock()); + assert(Dest.getBlock() && "assumes incoming valid dest"); + auto brOp = builder.create(Loc, Dest.getBlock()); // Calculate the innermost active normal cleanup. EHScopeStack::stable_iterator TopCleanup = @@ -70,7 +69,33 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc, return brOp; } - // FIXME(cir): otherwise, thread through all the normal cleanups in scope. + // Otherwise, thread through all the normal cleanups in scope. + auto index = builder.getUInt32(Dest.getDestIndex(), Loc); + assert(!cir::MissingFeatures::cleanupIndexAndBIAdjustment()); + + // Add this destination to all the scopes involved. + EHScopeStack::stable_iterator I = TopCleanup; + EHScopeStack::stable_iterator E = Dest.getScopeDepth(); + if (E.strictlyEncloses(I)) { + while (true) { + EHCleanupScope &Scope = cast(*EHStack.find(I)); + assert(Scope.isNormalCleanup()); + I = Scope.getEnclosingNormalCleanup(); + + // If this is the last cleanup we're propagating through, tell it + // that there's a resolved jump moving through it. + if (!E.strictlyEncloses(I)) { + Scope.addBranchAfter(index, Dest.getBlock()); + break; + } + + // Otherwise, tell the scope that there's a jump propagating + // through it. If this isn't new information, all the rest of + // the work has been done before. + if (!Scope.addBranchThrough(Dest.getBlock())) + break; + } + } return brOp; } @@ -305,6 +330,18 @@ static void emitCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, // No need to emit continuation block because CIR uses a cir.if. } +static mlir::Block *createNormalEntry(CIRGenFunction &cgf, + EHCleanupScope &scope) { + assert(scope.isNormalCleanup()); + mlir::Block *entry = scope.getNormalBlock(); + if (!entry) { + mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder()); + entry = cgf.currLexScope->getOrCreateCleanupBlock(cgf.getBuilder()); + scope.setNormalBlock(entry); + } + return entry; +} + /// Pops a cleanup block. If the block includes a normal cleanup, the /// current insertion point is threaded through the cleanup, as are /// any branch fixups on the cleanup. @@ -341,7 +378,8 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // - whether there's a fallthrough auto *FallthroughSource = builder.getInsertionBlock(); - bool HasFallthrough = (FallthroughSource != nullptr && IsActive); + bool HasFallthrough = + (FallthroughSource != nullptr && (IsActive || HasExistingBranches)); // Branch-through fall-throughs leave the insertion point set to the // end of the last cleanup, which points to the current scope. The @@ -442,7 +480,131 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // Otherwise, the best approach is to thread everything through // the cleanup block and then try to clean up after ourselves. } else { - llvm_unreachable("NYI"); + // Force the entry block to exist. + mlir::Block *normalEntry = createNormalEntry(*this, Scope); + + // I. Set up the fallthrough edge in. + mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP; + + // If there's a fallthrough, we need to store the cleanup + // destination index. For fall-throughs this is always zero. + if (HasFallthrough) { + if (!HasPrebranchedFallthrough) { + assert(!cir::MissingFeatures::cleanupDestinationIndex()); + } + + // Otherwise, save and clear the IP if we don't have fallthrough + // because the cleanup is inactive. + } else if (FallthroughSource) { + assert(!IsActive && "source without fallthrough for active cleanup"); + savedInactiveFallthroughIP = getBuilder().saveInsertionPoint(); + } + + // II. Emit the entry block. This implicitly branches to it if + // we have fallthrough. All the fixups and existing branches + // should already be branched to it. + builder.setInsertionPointToEnd(normalEntry); + + // intercept normal cleanup to mark SEH scope end + if (IsEHa) { + llvm_unreachable("NYI"); + } + + // III. Figure out where we're going and build the cleanup + // epilogue. + bool HasEnclosingCleanups = + (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); + + // Compute the branch-through dest if we need it: + // - if there are branch-throughs threaded through the scope + // - if fall-through is a branch-through + // - if there are fixups that will be optimistically forwarded + // to the enclosing cleanup + mlir::Block *branchThroughDest = nullptr; + if (Scope.hasBranchThroughs() || + (FallthroughSource && FallthroughIsBranchThrough) || + (HasFixups && HasEnclosingCleanups)) { + llvm_unreachable("NYI"); + } + + mlir::Block *fallthroughDest = nullptr; + + // If there's exactly one branch-after and no other threads, + // we can route it without a switch. + // Skip for SEH, since ExitSwitch is used to generate code to indicate + // abnormal termination. (SEH: Except _leave and fall-through at + // the end, all other exits in a _try (return/goto/continue/break) + // are considered as abnormal terminations, using NormalCleanupDestSlot + // to indicate abnormal termination) + if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && + !currentFunctionUsesSEHTry() && Scope.getNumBranchAfters() == 1) { + llvm_unreachable("NYI"); + // Build a switch-out if we need it: + // - if there are branch-afters threaded through the scope + // - if fall-through is a branch-after + // - if there are fixups that have nowhere left to go and + // so must be immediately resolved + } else if (Scope.getNumBranchAfters() || + (HasFallthrough && !FallthroughIsBranchThrough) || + (HasFixups && !HasEnclosingCleanups)) { + assert(!cir::MissingFeatures::cleanupBranchAfterSwitch()); + } else { + // We should always have a branch-through destination in this case. + assert(branchThroughDest); + assert(!cir::MissingFeatures::cleanupAlwaysBranchThrough()); + } + + // IV. Pop the cleanup and emit it. + Scope.markEmitted(); + EHStack.popCleanup(); + assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); + + emitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); + + // Append the prepared cleanup prologue from above. + assert(!cir::MissingFeatures::cleanupAppendInsts()); + + // Optimistically hope that any fixups will continue falling through. + for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); I < E; + ++I) { + llvm_unreachable("NYI"); + } + + // V. Set up the fallthrough edge out. + + // Case 1: a fallthrough source exists but doesn't branch to the + // cleanup because the cleanup is inactive. + if (!HasFallthrough && FallthroughSource) { + // Prebranched fallthrough was forwarded earlier. + // Non-prebranched fallthrough doesn't need to be forwarded. + // Either way, all we need to do is restore the IP we cleared before. + assert(!IsActive); + llvm_unreachable("NYI"); + + // Case 2: a fallthrough source exists and should branch to the + // cleanup, but we're not supposed to branch through to the next + // cleanup. + } else if (HasFallthrough && fallthroughDest) { + llvm_unreachable("NYI"); + + // Case 3: a fallthrough source exists and should branch to the + // cleanup and then through to the next. + } else if (HasFallthrough) { + // Everything is already set up for this. + + // Case 4: no fallthrough source exists. + } else { + // FIXME(cir): should we clear insertion point here? + } + + // VI. Assorted cleaning. + + // Check whether we can merge NormalEntry into a single predecessor. + // This might invalidate (non-IR) pointers to NormalEntry. + // + // If it did invalidate those pointers, and NormalEntry was the same + // as NormalExit, go back and patch up the fixups. + assert(!cir::MissingFeatures::simplifyCleanupEntry()); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index c4d53a8477ec..80eefc3cc919 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -916,7 +916,7 @@ template struct DestroyNRVOVariable : EHScopeStack::Cleanup { QualType Ty; void Emit(CIRGenFunction &CGF, Flags flags) override { - llvm_unreachable("NYI"); + assert(!cir::MissingFeatures::cleanupDestroyNRVOVariable()); } virtual ~DestroyNRVOVariable() = default; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index cd7763f2ef79..bbea1f2e4a8f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -357,15 +357,23 @@ void CIRGenFunction::LexicalScope::cleanup() { // Cleanup are done right before codegen resume a scope. This is where // objects are destroyed. - unsigned curLoc = 0; + SmallVector retBlocks; for (auto *retBlock : localScope->getRetBlocks()) { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToEnd(retBlock); - mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; - curLoc++; + retBlocks.push_back(retBlock); + mlir::Location retLoc = localScope->getRetLoc(retBlock); (void)emitReturn(retLoc); } + auto removeUnusedRetBlocks = [&]() { + for (mlir::Block *retBlock : retBlocks) { + if (!retBlock->getUses().empty()) + continue; + retBlock->erase(); + } + }; + auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToEnd(InsPt); @@ -381,9 +389,34 @@ void CIRGenFunction::LexicalScope::cleanup() { if (!cleanupBlock && localScope->getCleanupBlock(builder)) { cleanupBlock = localScope->getCleanupBlock(builder); builder.create(InsPt->back().getLoc(), cleanupBlock); + if (!cleanupBlock->mightHaveTerminator()) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToEnd(cleanupBlock); + builder.create(localScope->EndLoc); + } } if (localScope->Depth == 0) { + // TODO(cir): get rid of all this special cases once cleanups are properly + // implemented. + // TODO(cir): most of this code should move into emitBranchThroughCleanup + if (localScope->getRetBlocks().size() == 1) { + mlir::Block *retBlock = localScope->getRetBlocks()[0]; + mlir::Location loc = localScope->getRetLoc(retBlock); + if (retBlock->getUses().empty()) + retBlock->erase(); + else { + // Thread return block via cleanup block. + if (cleanupBlock) { + for (auto &blockUse : retBlock->getUses()) { + auto brOp = dyn_cast(blockUse.getOwner()); + brOp.setSuccessor(cleanupBlock); + } + } + builder.create(loc, retBlock); + return; + } + } emitImplicitReturn(); return; } @@ -428,6 +461,7 @@ void CIRGenFunction::LexicalScope::cleanup() { // get into this condition and emit the proper cleanup. This is // needed to get nrvo to interop with dtor logic. PerformCleanup = false; + removeUnusedRetBlocks(); return; } @@ -537,7 +571,7 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // the ret after it's been at EndLoc. if (auto *DI = getDebugInfo()) assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); - builder.clearInsertionPoint(); + // FIXME(cir): should we clearInsertionPoint? breaks many testcases PopCleanupBlocks(PrologueCleanupDepth); } @@ -686,7 +720,7 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, assert(Fn.isDeclaration() && "Function already has body?"); mlir::Block *EntryBB = Fn.addEntryBlock(); builder.setInsertionPointToStart(EntryBB); - + mlir::Block *maybeEmptyLastBlock = nullptr; { // Initialize lexical scope information. LexicalScope lexScope{*this, fusedLoc, EntryBB}; @@ -736,18 +770,22 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, llvm_unreachable("no definition for emitted function"); assert(builder.getInsertionBlock() && "Should be valid"); - } + maybeEmptyLastBlock = builder.getInsertionBlock(); - if (mlir::failed(Fn.verifyBody())) - return nullptr; + if (mlir::failed(Fn.verifyBody())) + return nullptr; - // Emit the standard function epilogue. - finishFunction(BodyRange.getEnd()); + // Emit the standard function epilogue. + finishFunction(BodyRange.getEnd()); - // If we haven't marked the function nothrow through other means, do a quick - // pass now to see if we can. - assert(!cir::MissingFeatures::tryMarkNoThrow()); + // If we haven't marked the function nothrow through other means, do a quick + // pass now to see if we can. + assert(!cir::MissingFeatures::tryMarkNoThrow()); + } + if (maybeEmptyLastBlock && maybeEmptyLastBlock->getUses().empty() && + maybeEmptyLastBlock->empty()) + maybeEmptyLastBlock->erase(); return Fn; } @@ -1171,9 +1209,13 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, if (FD && FD->isMain() && cir::MissingFeatures::zerocallusedregs()) llvm_unreachable("NYI"); - mlir::Block *EntryBB = &Fn.getBlocks().front(); + // CIRGen has its own logic for entry blocks, usually per operation region. + mlir::Block *retBlock = currLexScope->getOrCreateRetBlock(*this, getLoc(Loc)); + // returnBlock handles per region getJumpDestInCurrentScope LLVM traditional + // codegen logic. + (void)returnBlock(retBlock); - // TODO: allocapt insertion? probably don't need for CIR + mlir::Block *EntryBB = &Fn.getBlocks().front(); if (cir::MissingFeatures::requiresReturnValueCheck()) llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index b6b949e47bf2..60aef3c39d6b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -379,11 +379,14 @@ class CIRGenFunction : public CIRGenTypeCache { clang::GlobalDecl CurGD; /// Unified return block. - /// Not that for LLVM codegen this is a memeber variable instead. - JumpDest ReturnBlock() { - return JumpDest(currLexScope->getOrCreateCleanupBlock(builder)); + /// In CIR this is a function because each scope might have + /// it's associated return block. + JumpDest returnBlock(mlir::Block *retBlock) { + return getJumpDestInCurrentScope(retBlock); } + unsigned nextCleanupDestIndex = 1; + /// The temporary alloca to hold the return value. This is /// invalid iff the function has no return value. Address ReturnValue = Address::invalid(); @@ -1351,6 +1354,16 @@ class CIRGenFunction : public CIRGenTypeCache { void emitStoreThroughBitfieldLValue(RValue Src, LValue Dst, mlir::Value &Result); + /// The given basic block lies in the current EH scope, but may be a + /// target of a potentially scope-crossing jump; get a stable handle + /// to which we can perform this jump later. + /// CIRGen: this mostly tracks state for figuring out the proper scope + /// information, no actual branches are emitted. + JumpDest getJumpDestInCurrentScope(mlir::Block *target) { + return JumpDest(target, EHStack.getInnermostNormalCleanup(), + nextCleanupDestIndex++); + } + cir::BrOp emitBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is @@ -2070,11 +2083,14 @@ class CIRGenFunction : public CIRGenTypeCache { void ForceCleanup(std::initializer_list ValuesToReload = {}) { assert(PerformCleanup && "Already forced cleanup"); - CGF.DidCallStackSave = OldDidCallStackSave; - CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize, - ValuesToReload); - PerformCleanup = false; - CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth; + { + mlir::OpBuilder::InsertionGuard guard(CGF.getBuilder()); + CGF.DidCallStackSave = OldDidCallStackSave; + CGF.PopCleanupBlocks(CleanupStackDepth, + LifetimeExtendedCleanupStackSize, ValuesToReload); + PerformCleanup = false; + CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth; + } } }; @@ -2203,7 +2219,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) { if (CleanupBlock) return getCleanupBlock(builder); - return createCleanupBlock(builder); + CleanupBlock = createCleanupBlock(builder); + return CleanupBlock; } mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) { @@ -2213,9 +2230,10 @@ class CIRGenFunction : public CIRGenTypeCache { { // Create the cleanup block but dont hook it up around just yet. mlir::OpBuilder::InsertionGuard guard(builder); - CleanupBlock = builder.createBlock(builder.getBlock()->getParent()); + mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent() + : &CGF.CurFn->getRegion(0); + CleanupBlock = builder.createBlock(r); } - assert(builder.getInsertionBlock() && "Should be valid"); return CleanupBlock; } @@ -2227,7 +2245,7 @@ class CIRGenFunction : public CIRGenTypeCache { // On switches we need one return block per region, since cases don't // have their own scopes but are distinct regions nonetheless. llvm::SmallVector RetBlocks; - llvm::SmallVector> RetLocs; + llvm::DenseMap RetLocs; llvm::DenseMap RetBlockInCaseIndex; std::optional NormalRetBlockIndex; llvm::SmallVector> SwitchRegions; @@ -2245,7 +2263,7 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::OpBuilder::InsertionGuard guard(CGF.builder); auto *b = CGF.builder.createBlock(CGF.builder.getBlock()->getParent()); RetBlocks.push_back(b); - RetLocs.push_back(loc); + updateRetLoc(b, loc); return b; } @@ -2254,8 +2272,9 @@ class CIRGenFunction : public CIRGenTypeCache { public: llvm::ArrayRef getRetBlocks() { return RetBlocks; } - llvm::ArrayRef> getRetLocs() { - return RetLocs; + mlir::Location getRetLoc(mlir::Block *b) { return RetLocs.at(b); } + void updateRetLoc(mlir::Block *b, mlir::Location loc) { + RetLocs.insert_or_assign(b, loc); } llvm::MutableArrayRef> getSwitchRegions() { assert(isSwitch() && "expected switch scope"); @@ -2269,22 +2288,26 @@ class CIRGenFunction : public CIRGenTypeCache { } mlir::Block *getOrCreateRetBlock(CIRGenFunction &CGF, mlir::Location loc) { + mlir::Block *ret = nullptr; if (auto caseOp = mlir::dyn_cast_if_present( CGF.builder.getBlock()->getParentOp())) { auto iter = RetBlockInCaseIndex.find(caseOp); if (iter != RetBlockInCaseIndex.end()) - return RetBlocks[iter->second]; - - mlir::Block *ret = createRetBlock(CGF, loc); - RetBlockInCaseIndex[caseOp] = RetBlocks.size() - 1; - return ret; - } - if (!NormalRetBlockIndex) { - mlir::Block *ret = createRetBlock(CGF, loc); + ret = RetBlocks[iter->second]; + else { + ret = createRetBlock(CGF, loc); + RetBlockInCaseIndex[caseOp] = RetBlocks.size() - 1; + return ret; + } + } else if (!NormalRetBlockIndex) { + ret = createRetBlock(CGF, loc); NormalRetBlockIndex = RetBlocks.size() - 1; return ret; + } else { + ret = &*RetBlocks[*NormalRetBlockIndex]; } - return &*RetBlocks[*NormalRetBlockIndex]; + updateRetLoc(ret, loc); + return ret; } // Scope entry block tracking diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 8708eeecb7e5..f6c3ccf12847 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -72,12 +72,18 @@ Address CIRGenFunction::emitCompoundStmt(const CompoundStmt &S, bool getLast, // Add local scope to track new declared variables. SymTableScopeTy varScope(symbolTable); auto scopeLoc = getLoc(S.getSourceRange()); + mlir::OpBuilder::InsertPoint scopeInsPt; builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) { - LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - retAlloca = emitCompoundStmtWithoutScope(S, getLast, slot); + scopeInsPt = b.saveInsertionPoint(); }); + { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.restoreInsertionPoint(scopeInsPt); + LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; + retAlloca = emitCompoundStmtWithoutScope(S, getLast, slot); + } return retAlloca; } @@ -473,14 +479,25 @@ mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &S) { mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &S) { assert(!cir::MissingFeatures::requiresReturnValueCheck()); + assert(!cir::MissingFeatures::isSEHTryScope()); + auto loc = getLoc(S.getSourceRange()); // Emit the result value, even if unused, to evaluate the side effects. const Expr *RV = S.getRetValue(); - // TODO(cir): LLVM codegen uses a RunCleanupsScope cleanupScope here, we - // should model this in face of dtors. + // Record the result expression of the return statement. The recorded + // expression is used to determine whether a block capture's lifetime should + // end at the end of the full expression as opposed to the end of the scope + // enclosing the block expression. + // + // This permits a small, easily-implemented exception to our over-conservative + // rules about not jumping to statements following block literals with + // non-trivial cleanups. + // TODO(cir): SaveRetExpr + // SaveRetExprRAII SaveRetExpr(RV, *this); + RunCleanupsScope cleanupScope(*this); bool createNewScope = false; if (const auto *EWC = dyn_cast_or_null(RV)) { RV = EWC->getSubExpr(); @@ -557,16 +574,17 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &S) { } } - // Create a new return block (if not existent) and add a branch to - // it. The actual return instruction is only inserted during current - // scope cleanup handling. + cleanupScope.ForceCleanup(); + + // In CIR we might have returns in different scopes. + // FIXME(cir): cleanup code is handling actual return emission, the logic + // should try to match traditional codegen more closely (to the extend which + // is possible). auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); - builder.create(loc, retBlock); + emitBranchThroughCleanup(loc, returnBlock(retBlock)); // Insert the new block to continue codegen after branch to ret block. builder.createBlock(builder.getBlock()->getParent()); - - // TODO(cir): LLVM codegen for a cleanup on cleanupScope here. return mlir::success(); } @@ -1155,5 +1173,6 @@ void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue RV, } else { llvm_unreachable("NYI"); } - emitBranchThroughCleanup(loc, ReturnBlock()); + auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); + emitBranchThroughCleanup(loc, returnBlock(retBlock)); } diff --git a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp index a53d85fbf55b..216e63029ddd 100644 --- a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp +++ b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp @@ -4,9 +4,9 @@ auto func() { return __builtin_strcmp("", ""); // CIR: cir.func @_Z4funcv() - // CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} loc(#loc2) - // CIR-NEXT: %1 = cir.const #cir.int<0> : !s32i loc(#loc7) - // CIR-NEXT: cir.store %1, %0 : !s32i, !cir.ptr loc(#loc8) - // CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i loc(#loc8) - // CIR-NEXT: cir.return %2 : !s32i loc(#loc8) + // CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + // CIR-NEXT: %1 = cir.const #cir.int<0> : !s32i + // CIR-NEXT: cir.store %1, %0 : !s32i, !cir.ptr + // CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i + // CIR-NEXT: cir.return %2 : !s32i } diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index b45634c0def8..6c3538696361 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -245,10 +245,8 @@ int g3() { // COM: LLVM: [[CALL:%.*]] = call noundef i32 @"_ZZ2g3vENK3$_0clERKi"(ptr noundef nonnull align 1 dereferenceable(1) [[unused_capture]], ptr noundef nonnull align 4 dereferenceable(4) [[TMP0]]) // LLVM: [[CALL:%.*]] = call i32 @"_ZZ2g3vENK3$_0clERKi"(ptr [[unused_capture]], ptr [[TMP0]]) // LLVM: store i32 [[CALL]], ptr [[ret_val]], align 4 -// FIXME: should just return result -// COM: LLVM: ret i32 [[ret_val]] -// LLVM: call void @llvm.trap() -// LLVM: unreachable +// LLVM: %[[ret:.*]] = load i32, ptr [[ret_val]], align 4 +// LLVM: ret i32 %[[ret]] // lambda operator int (*)(int const&)() // LLVM-LABEL: @"_ZZ2g3vENK3$_0cvPFiRKiEEv" From f2f5bfb38506d064af3a1e6260f52cf5dd6f01ba Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 11 Dec 2024 22:40:36 -0300 Subject: [PATCH 2156/2301] Revert "[CIR][CIRGen][NFCI] Take a step into getting scope information to match OG" Seems like windows bots are now broken! This reverts commit 9a63c50ee75bd609a7760103b24a480351069440. --- clang/include/clang/CIR/MissingFeatures.h | 14 +- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 174 +----------------- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 72 ++------ clang/lib/CIR/CodeGen/CIRGenFunction.h | 71 +++---- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 41 ++--- .../CodeGen/builtin-constant-evaluated.cpp | 10 +- clang/test/CIR/CodeGen/lambda.cpp | 6 +- 8 files changed, 69 insertions(+), 321 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 4a1130de6ca5..5ab4473bb7ec 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -59,6 +59,7 @@ struct MissingFeatures { static bool emitTypeCheck() { return false; } static bool tbaa() { return false; } static bool tbaa_struct() { return false; } + static bool cleanups() { return false; } static bool emitNullabilityCheck() { return false; } static bool ptrAuth() { return false; } @@ -159,22 +160,12 @@ struct MissingFeatures { static bool fastMathFlags() { return false; } static bool fastMathFuncAttributes() { return false; } - // Cleanup - static bool cleanups() { return false; } - static bool simplifyCleanupEntry() { return false; } - static bool requiresCleanups() { return false; } - static bool cleanupBranchAfterSwitch() { return false; } - static bool cleanupAlwaysBranchThrough() { return false; } - static bool cleanupDestinationIndex() { return false; } - static bool cleanupDestroyNRVOVariable() { return false; } - static bool cleanupAppendInsts() { return false; } - static bool cleanupIndexAndBIAdjustment() { return false; } - // Exception handling static bool isSEHTryScope() { return false; } static bool ehStack() { return false; } static bool emitStartEHSpec() { return false; } static bool emitEndEHSpec() { return false; } + static bool simplifyCleanupEntry() { return false; } // Type qualifiers. static bool atomicTypes() { return false; } @@ -217,6 +208,7 @@ struct MissingFeatures { static bool addAutoInitAnnotation() { return false; } static bool addHeapAllocSiteMetadata() { return false; } static bool loopInfoStack() { return false; } + static bool requiresCleanups() { return false; } static bool constantFoldsToSimpleInteger() { return false; } static bool checkFunctionCallABI() { return false; } static bool zeroInitializer() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 534fc2a59968..40fc101d4c23 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -37,12 +37,13 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc, JumpDest Dest) { // Remove this once we go for making sure unreachable code is // well modeled (or not). + assert(builder.getInsertionBlock() && "not yet implemented"); assert(!cir::MissingFeatures::ehStack()); // Insert a branch: to the cleanup block (unsolved) or to the already // materialized label. Keep track of unsolved goto's. - assert(Dest.getBlock() && "assumes incoming valid dest"); - auto brOp = builder.create(Loc, Dest.getBlock()); + auto brOp = builder.create( + Loc, Dest.isValid() ? Dest.getBlock() : ReturnBlock().getBlock()); // Calculate the innermost active normal cleanup. EHScopeStack::stable_iterator TopCleanup = @@ -69,33 +70,7 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc, return brOp; } - // Otherwise, thread through all the normal cleanups in scope. - auto index = builder.getUInt32(Dest.getDestIndex(), Loc); - assert(!cir::MissingFeatures::cleanupIndexAndBIAdjustment()); - - // Add this destination to all the scopes involved. - EHScopeStack::stable_iterator I = TopCleanup; - EHScopeStack::stable_iterator E = Dest.getScopeDepth(); - if (E.strictlyEncloses(I)) { - while (true) { - EHCleanupScope &Scope = cast(*EHStack.find(I)); - assert(Scope.isNormalCleanup()); - I = Scope.getEnclosingNormalCleanup(); - - // If this is the last cleanup we're propagating through, tell it - // that there's a resolved jump moving through it. - if (!E.strictlyEncloses(I)) { - Scope.addBranchAfter(index, Dest.getBlock()); - break; - } - - // Otherwise, tell the scope that there's a jump propagating - // through it. If this isn't new information, all the rest of - // the work has been done before. - if (!Scope.addBranchThrough(Dest.getBlock())) - break; - } - } + // FIXME(cir): otherwise, thread through all the normal cleanups in scope. return brOp; } @@ -330,18 +305,6 @@ static void emitCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, // No need to emit continuation block because CIR uses a cir.if. } -static mlir::Block *createNormalEntry(CIRGenFunction &cgf, - EHCleanupScope &scope) { - assert(scope.isNormalCleanup()); - mlir::Block *entry = scope.getNormalBlock(); - if (!entry) { - mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder()); - entry = cgf.currLexScope->getOrCreateCleanupBlock(cgf.getBuilder()); - scope.setNormalBlock(entry); - } - return entry; -} - /// Pops a cleanup block. If the block includes a normal cleanup, the /// current insertion point is threaded through the cleanup, as are /// any branch fixups on the cleanup. @@ -378,8 +341,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // - whether there's a fallthrough auto *FallthroughSource = builder.getInsertionBlock(); - bool HasFallthrough = - (FallthroughSource != nullptr && (IsActive || HasExistingBranches)); + bool HasFallthrough = (FallthroughSource != nullptr && IsActive); // Branch-through fall-throughs leave the insertion point set to the // end of the last cleanup, which points to the current scope. The @@ -480,131 +442,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // Otherwise, the best approach is to thread everything through // the cleanup block and then try to clean up after ourselves. } else { - // Force the entry block to exist. - mlir::Block *normalEntry = createNormalEntry(*this, Scope); - - // I. Set up the fallthrough edge in. - mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP; - - // If there's a fallthrough, we need to store the cleanup - // destination index. For fall-throughs this is always zero. - if (HasFallthrough) { - if (!HasPrebranchedFallthrough) { - assert(!cir::MissingFeatures::cleanupDestinationIndex()); - } - - // Otherwise, save and clear the IP if we don't have fallthrough - // because the cleanup is inactive. - } else if (FallthroughSource) { - assert(!IsActive && "source without fallthrough for active cleanup"); - savedInactiveFallthroughIP = getBuilder().saveInsertionPoint(); - } - - // II. Emit the entry block. This implicitly branches to it if - // we have fallthrough. All the fixups and existing branches - // should already be branched to it. - builder.setInsertionPointToEnd(normalEntry); - - // intercept normal cleanup to mark SEH scope end - if (IsEHa) { - llvm_unreachable("NYI"); - } - - // III. Figure out where we're going and build the cleanup - // epilogue. - bool HasEnclosingCleanups = - (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); - - // Compute the branch-through dest if we need it: - // - if there are branch-throughs threaded through the scope - // - if fall-through is a branch-through - // - if there are fixups that will be optimistically forwarded - // to the enclosing cleanup - mlir::Block *branchThroughDest = nullptr; - if (Scope.hasBranchThroughs() || - (FallthroughSource && FallthroughIsBranchThrough) || - (HasFixups && HasEnclosingCleanups)) { - llvm_unreachable("NYI"); - } - - mlir::Block *fallthroughDest = nullptr; - - // If there's exactly one branch-after and no other threads, - // we can route it without a switch. - // Skip for SEH, since ExitSwitch is used to generate code to indicate - // abnormal termination. (SEH: Except _leave and fall-through at - // the end, all other exits in a _try (return/goto/continue/break) - // are considered as abnormal terminations, using NormalCleanupDestSlot - // to indicate abnormal termination) - if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && - !currentFunctionUsesSEHTry() && Scope.getNumBranchAfters() == 1) { - llvm_unreachable("NYI"); - // Build a switch-out if we need it: - // - if there are branch-afters threaded through the scope - // - if fall-through is a branch-after - // - if there are fixups that have nowhere left to go and - // so must be immediately resolved - } else if (Scope.getNumBranchAfters() || - (HasFallthrough && !FallthroughIsBranchThrough) || - (HasFixups && !HasEnclosingCleanups)) { - assert(!cir::MissingFeatures::cleanupBranchAfterSwitch()); - } else { - // We should always have a branch-through destination in this case. - assert(branchThroughDest); - assert(!cir::MissingFeatures::cleanupAlwaysBranchThrough()); - } - - // IV. Pop the cleanup and emit it. - Scope.markEmitted(); - EHStack.popCleanup(); - assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); - - emitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); - - // Append the prepared cleanup prologue from above. - assert(!cir::MissingFeatures::cleanupAppendInsts()); - - // Optimistically hope that any fixups will continue falling through. - for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); I < E; - ++I) { - llvm_unreachable("NYI"); - } - - // V. Set up the fallthrough edge out. - - // Case 1: a fallthrough source exists but doesn't branch to the - // cleanup because the cleanup is inactive. - if (!HasFallthrough && FallthroughSource) { - // Prebranched fallthrough was forwarded earlier. - // Non-prebranched fallthrough doesn't need to be forwarded. - // Either way, all we need to do is restore the IP we cleared before. - assert(!IsActive); - llvm_unreachable("NYI"); - - // Case 2: a fallthrough source exists and should branch to the - // cleanup, but we're not supposed to branch through to the next - // cleanup. - } else if (HasFallthrough && fallthroughDest) { - llvm_unreachable("NYI"); - - // Case 3: a fallthrough source exists and should branch to the - // cleanup and then through to the next. - } else if (HasFallthrough) { - // Everything is already set up for this. - - // Case 4: no fallthrough source exists. - } else { - // FIXME(cir): should we clear insertion point here? - } - - // VI. Assorted cleaning. - - // Check whether we can merge NormalEntry into a single predecessor. - // This might invalidate (non-IR) pointers to NormalEntry. - // - // If it did invalidate those pointers, and NormalEntry was the same - // as NormalExit, go back and patch up the fixups. - assert(!cir::MissingFeatures::simplifyCleanupEntry()); + llvm_unreachable("NYI"); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 80eefc3cc919..c4d53a8477ec 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -916,7 +916,7 @@ template struct DestroyNRVOVariable : EHScopeStack::Cleanup { QualType Ty; void Emit(CIRGenFunction &CGF, Flags flags) override { - assert(!cir::MissingFeatures::cleanupDestroyNRVOVariable()); + llvm_unreachable("NYI"); } virtual ~DestroyNRVOVariable() = default; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index bbea1f2e4a8f..cd7763f2ef79 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -357,23 +357,15 @@ void CIRGenFunction::LexicalScope::cleanup() { // Cleanup are done right before codegen resume a scope. This is where // objects are destroyed. - SmallVector retBlocks; + unsigned curLoc = 0; for (auto *retBlock : localScope->getRetBlocks()) { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToEnd(retBlock); - retBlocks.push_back(retBlock); - mlir::Location retLoc = localScope->getRetLoc(retBlock); + mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; + curLoc++; (void)emitReturn(retLoc); } - auto removeUnusedRetBlocks = [&]() { - for (mlir::Block *retBlock : retBlocks) { - if (!retBlock->getUses().empty()) - continue; - retBlock->erase(); - } - }; - auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToEnd(InsPt); @@ -389,34 +381,9 @@ void CIRGenFunction::LexicalScope::cleanup() { if (!cleanupBlock && localScope->getCleanupBlock(builder)) { cleanupBlock = localScope->getCleanupBlock(builder); builder.create(InsPt->back().getLoc(), cleanupBlock); - if (!cleanupBlock->mightHaveTerminator()) { - mlir::OpBuilder::InsertionGuard guard(builder); - builder.setInsertionPointToEnd(cleanupBlock); - builder.create(localScope->EndLoc); - } } if (localScope->Depth == 0) { - // TODO(cir): get rid of all this special cases once cleanups are properly - // implemented. - // TODO(cir): most of this code should move into emitBranchThroughCleanup - if (localScope->getRetBlocks().size() == 1) { - mlir::Block *retBlock = localScope->getRetBlocks()[0]; - mlir::Location loc = localScope->getRetLoc(retBlock); - if (retBlock->getUses().empty()) - retBlock->erase(); - else { - // Thread return block via cleanup block. - if (cleanupBlock) { - for (auto &blockUse : retBlock->getUses()) { - auto brOp = dyn_cast(blockUse.getOwner()); - brOp.setSuccessor(cleanupBlock); - } - } - builder.create(loc, retBlock); - return; - } - } emitImplicitReturn(); return; } @@ -461,7 +428,6 @@ void CIRGenFunction::LexicalScope::cleanup() { // get into this condition and emit the proper cleanup. This is // needed to get nrvo to interop with dtor logic. PerformCleanup = false; - removeUnusedRetBlocks(); return; } @@ -571,7 +537,7 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // the ret after it's been at EndLoc. if (auto *DI = getDebugInfo()) assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); - // FIXME(cir): should we clearInsertionPoint? breaks many testcases + builder.clearInsertionPoint(); PopCleanupBlocks(PrologueCleanupDepth); } @@ -720,7 +686,7 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, assert(Fn.isDeclaration() && "Function already has body?"); mlir::Block *EntryBB = Fn.addEntryBlock(); builder.setInsertionPointToStart(EntryBB); - mlir::Block *maybeEmptyLastBlock = nullptr; + { // Initialize lexical scope information. LexicalScope lexScope{*this, fusedLoc, EntryBB}; @@ -770,22 +736,18 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, llvm_unreachable("no definition for emitted function"); assert(builder.getInsertionBlock() && "Should be valid"); - maybeEmptyLastBlock = builder.getInsertionBlock(); + } - if (mlir::failed(Fn.verifyBody())) - return nullptr; + if (mlir::failed(Fn.verifyBody())) + return nullptr; - // Emit the standard function epilogue. - finishFunction(BodyRange.getEnd()); + // Emit the standard function epilogue. + finishFunction(BodyRange.getEnd()); - // If we haven't marked the function nothrow through other means, do a quick - // pass now to see if we can. - assert(!cir::MissingFeatures::tryMarkNoThrow()); - } + // If we haven't marked the function nothrow through other means, do a quick + // pass now to see if we can. + assert(!cir::MissingFeatures::tryMarkNoThrow()); - if (maybeEmptyLastBlock && maybeEmptyLastBlock->getUses().empty() && - maybeEmptyLastBlock->empty()) - maybeEmptyLastBlock->erase(); return Fn; } @@ -1209,14 +1171,10 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, if (FD && FD->isMain() && cir::MissingFeatures::zerocallusedregs()) llvm_unreachable("NYI"); - // CIRGen has its own logic for entry blocks, usually per operation region. - mlir::Block *retBlock = currLexScope->getOrCreateRetBlock(*this, getLoc(Loc)); - // returnBlock handles per region getJumpDestInCurrentScope LLVM traditional - // codegen logic. - (void)returnBlock(retBlock); - mlir::Block *EntryBB = &Fn.getBlocks().front(); + // TODO: allocapt insertion? probably don't need for CIR + if (cir::MissingFeatures::requiresReturnValueCheck()) llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 60aef3c39d6b..b6b949e47bf2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -379,14 +379,11 @@ class CIRGenFunction : public CIRGenTypeCache { clang::GlobalDecl CurGD; /// Unified return block. - /// In CIR this is a function because each scope might have - /// it's associated return block. - JumpDest returnBlock(mlir::Block *retBlock) { - return getJumpDestInCurrentScope(retBlock); + /// Not that for LLVM codegen this is a memeber variable instead. + JumpDest ReturnBlock() { + return JumpDest(currLexScope->getOrCreateCleanupBlock(builder)); } - unsigned nextCleanupDestIndex = 1; - /// The temporary alloca to hold the return value. This is /// invalid iff the function has no return value. Address ReturnValue = Address::invalid(); @@ -1354,16 +1351,6 @@ class CIRGenFunction : public CIRGenTypeCache { void emitStoreThroughBitfieldLValue(RValue Src, LValue Dst, mlir::Value &Result); - /// The given basic block lies in the current EH scope, but may be a - /// target of a potentially scope-crossing jump; get a stable handle - /// to which we can perform this jump later. - /// CIRGen: this mostly tracks state for figuring out the proper scope - /// information, no actual branches are emitted. - JumpDest getJumpDestInCurrentScope(mlir::Block *target) { - return JumpDest(target, EHStack.getInnermostNormalCleanup(), - nextCleanupDestIndex++); - } - cir::BrOp emitBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is @@ -2083,14 +2070,11 @@ class CIRGenFunction : public CIRGenTypeCache { void ForceCleanup(std::initializer_list ValuesToReload = {}) { assert(PerformCleanup && "Already forced cleanup"); - { - mlir::OpBuilder::InsertionGuard guard(CGF.getBuilder()); - CGF.DidCallStackSave = OldDidCallStackSave; - CGF.PopCleanupBlocks(CleanupStackDepth, - LifetimeExtendedCleanupStackSize, ValuesToReload); - PerformCleanup = false; - CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth; - } + CGF.DidCallStackSave = OldDidCallStackSave; + CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize, + ValuesToReload); + PerformCleanup = false; + CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth; } }; @@ -2219,8 +2203,7 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) { if (CleanupBlock) return getCleanupBlock(builder); - CleanupBlock = createCleanupBlock(builder); - return CleanupBlock; + return createCleanupBlock(builder); } mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) { @@ -2230,10 +2213,9 @@ class CIRGenFunction : public CIRGenTypeCache { { // Create the cleanup block but dont hook it up around just yet. mlir::OpBuilder::InsertionGuard guard(builder); - mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent() - : &CGF.CurFn->getRegion(0); - CleanupBlock = builder.createBlock(r); + CleanupBlock = builder.createBlock(builder.getBlock()->getParent()); } + assert(builder.getInsertionBlock() && "Should be valid"); return CleanupBlock; } @@ -2245,7 +2227,7 @@ class CIRGenFunction : public CIRGenTypeCache { // On switches we need one return block per region, since cases don't // have their own scopes but are distinct regions nonetheless. llvm::SmallVector RetBlocks; - llvm::DenseMap RetLocs; + llvm::SmallVector> RetLocs; llvm::DenseMap RetBlockInCaseIndex; std::optional NormalRetBlockIndex; llvm::SmallVector> SwitchRegions; @@ -2263,7 +2245,7 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::OpBuilder::InsertionGuard guard(CGF.builder); auto *b = CGF.builder.createBlock(CGF.builder.getBlock()->getParent()); RetBlocks.push_back(b); - updateRetLoc(b, loc); + RetLocs.push_back(loc); return b; } @@ -2272,9 +2254,8 @@ class CIRGenFunction : public CIRGenTypeCache { public: llvm::ArrayRef getRetBlocks() { return RetBlocks; } - mlir::Location getRetLoc(mlir::Block *b) { return RetLocs.at(b); } - void updateRetLoc(mlir::Block *b, mlir::Location loc) { - RetLocs.insert_or_assign(b, loc); + llvm::ArrayRef> getRetLocs() { + return RetLocs; } llvm::MutableArrayRef> getSwitchRegions() { assert(isSwitch() && "expected switch scope"); @@ -2288,26 +2269,22 @@ class CIRGenFunction : public CIRGenTypeCache { } mlir::Block *getOrCreateRetBlock(CIRGenFunction &CGF, mlir::Location loc) { - mlir::Block *ret = nullptr; if (auto caseOp = mlir::dyn_cast_if_present( CGF.builder.getBlock()->getParentOp())) { auto iter = RetBlockInCaseIndex.find(caseOp); if (iter != RetBlockInCaseIndex.end()) - ret = RetBlocks[iter->second]; - else { - ret = createRetBlock(CGF, loc); - RetBlockInCaseIndex[caseOp] = RetBlocks.size() - 1; - return ret; - } - } else if (!NormalRetBlockIndex) { - ret = createRetBlock(CGF, loc); + return RetBlocks[iter->second]; + + mlir::Block *ret = createRetBlock(CGF, loc); + RetBlockInCaseIndex[caseOp] = RetBlocks.size() - 1; + return ret; + } + if (!NormalRetBlockIndex) { + mlir::Block *ret = createRetBlock(CGF, loc); NormalRetBlockIndex = RetBlocks.size() - 1; return ret; - } else { - ret = &*RetBlocks[*NormalRetBlockIndex]; } - updateRetLoc(ret, loc); - return ret; + return &*RetBlocks[*NormalRetBlockIndex]; } // Scope entry block tracking diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index f6c3ccf12847..8708eeecb7e5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -72,18 +72,12 @@ Address CIRGenFunction::emitCompoundStmt(const CompoundStmt &S, bool getLast, // Add local scope to track new declared variables. SymTableScopeTy varScope(symbolTable); auto scopeLoc = getLoc(S.getSourceRange()); - mlir::OpBuilder::InsertPoint scopeInsPt; builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) { - scopeInsPt = b.saveInsertionPoint(); + LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; + retAlloca = emitCompoundStmtWithoutScope(S, getLast, slot); }); - { - mlir::OpBuilder::InsertionGuard guard(builder); - builder.restoreInsertionPoint(scopeInsPt); - LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; - retAlloca = emitCompoundStmtWithoutScope(S, getLast, slot); - } return retAlloca; } @@ -479,25 +473,14 @@ mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &S) { mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &S) { assert(!cir::MissingFeatures::requiresReturnValueCheck()); - assert(!cir::MissingFeatures::isSEHTryScope()); - auto loc = getLoc(S.getSourceRange()); // Emit the result value, even if unused, to evaluate the side effects. const Expr *RV = S.getRetValue(); - // Record the result expression of the return statement. The recorded - // expression is used to determine whether a block capture's lifetime should - // end at the end of the full expression as opposed to the end of the scope - // enclosing the block expression. - // - // This permits a small, easily-implemented exception to our over-conservative - // rules about not jumping to statements following block literals with - // non-trivial cleanups. - // TODO(cir): SaveRetExpr - // SaveRetExprRAII SaveRetExpr(RV, *this); + // TODO(cir): LLVM codegen uses a RunCleanupsScope cleanupScope here, we + // should model this in face of dtors. - RunCleanupsScope cleanupScope(*this); bool createNewScope = false; if (const auto *EWC = dyn_cast_or_null(RV)) { RV = EWC->getSubExpr(); @@ -574,17 +557,16 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &S) { } } - cleanupScope.ForceCleanup(); - - // In CIR we might have returns in different scopes. - // FIXME(cir): cleanup code is handling actual return emission, the logic - // should try to match traditional codegen more closely (to the extend which - // is possible). + // Create a new return block (if not existent) and add a branch to + // it. The actual return instruction is only inserted during current + // scope cleanup handling. auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); - emitBranchThroughCleanup(loc, returnBlock(retBlock)); + builder.create(loc, retBlock); // Insert the new block to continue codegen after branch to ret block. builder.createBlock(builder.getBlock()->getParent()); + + // TODO(cir): LLVM codegen for a cleanup on cleanupScope here. return mlir::success(); } @@ -1173,6 +1155,5 @@ void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue RV, } else { llvm_unreachable("NYI"); } - auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); - emitBranchThroughCleanup(loc, returnBlock(retBlock)); + emitBranchThroughCleanup(loc, ReturnBlock()); } diff --git a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp index 216e63029ddd..a53d85fbf55b 100644 --- a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp +++ b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp @@ -4,9 +4,9 @@ auto func() { return __builtin_strcmp("", ""); // CIR: cir.func @_Z4funcv() - // CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} - // CIR-NEXT: %1 = cir.const #cir.int<0> : !s32i - // CIR-NEXT: cir.store %1, %0 : !s32i, !cir.ptr - // CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i - // CIR-NEXT: cir.return %2 : !s32i + // CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} loc(#loc2) + // CIR-NEXT: %1 = cir.const #cir.int<0> : !s32i loc(#loc7) + // CIR-NEXT: cir.store %1, %0 : !s32i, !cir.ptr loc(#loc8) + // CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i loc(#loc8) + // CIR-NEXT: cir.return %2 : !s32i loc(#loc8) } diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 6c3538696361..b45634c0def8 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -245,8 +245,10 @@ int g3() { // COM: LLVM: [[CALL:%.*]] = call noundef i32 @"_ZZ2g3vENK3$_0clERKi"(ptr noundef nonnull align 1 dereferenceable(1) [[unused_capture]], ptr noundef nonnull align 4 dereferenceable(4) [[TMP0]]) // LLVM: [[CALL:%.*]] = call i32 @"_ZZ2g3vENK3$_0clERKi"(ptr [[unused_capture]], ptr [[TMP0]]) // LLVM: store i32 [[CALL]], ptr [[ret_val]], align 4 -// LLVM: %[[ret:.*]] = load i32, ptr [[ret_val]], align 4 -// LLVM: ret i32 %[[ret]] +// FIXME: should just return result +// COM: LLVM: ret i32 [[ret_val]] +// LLVM: call void @llvm.trap() +// LLVM: unreachable // lambda operator int (*)(int const&)() // LLVM-LABEL: @"_ZZ2g3vENK3$_0cvPFiRKiEEv" From 9107c99f93beede9a594161003180568047f88a9 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Wed, 11 Dec 2024 22:33:56 -0500 Subject: [PATCH 2157/2301] [CIR][CodeGen][NFC] Refactor emitCXXGlobalVarDeclInit to match codegen --- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 81 +++++++++++++++-------------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index eb582c8f01a3..280c735440c3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -17,6 +17,7 @@ #include "CIRGenModule.h" #include "clang/AST/GlobalDecl.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/SaveAndRestore.h" #include @@ -335,11 +336,6 @@ void CIRGenModule::emitCXXGlobalVarDeclInit(const VarDecl *varDecl, // expects "this" in the "generic" address space. assert(!cir::MissingFeatures::addressSpace()); - if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && - varDecl->hasAttr()) { - llvm_unreachable("NYI"); - } - assert(varDecl && " Expected a global declaration!"); CIRGenFunction cgf{*this, builder, true}; llvm::SaveAndRestore savedCGF(CurCGF, &cgf); @@ -350,42 +346,15 @@ void CIRGenModule::emitCXXGlobalVarDeclInit(const VarDecl *varDecl, addr.setAstAttr(cir::ASTVarDeclAttr::get(&getMLIRContext(), varDecl)); - if (ty->isReferenceType()) { - mlir::OpBuilder::InsertionGuard guard(builder); - auto *block = builder.createBlock(&addr.getCtorRegion()); - CIRGenFunction::LexicalScope lexScope{*CurCGF, addr.getLoc(), - builder.getInsertionBlock()}; - lexScope.setAsGlobalInit(); - builder.setInsertionPointToStart(block); - auto getGlobal = builder.createGetGlobal(addr); - - Address declAddr(getGlobal, getGlobal.getType(), - getASTContext().getDeclAlign(varDecl)); - assert(performInit && "cannot have constant initializer which needs " - "destruction for reference"); - RValue rv = cgf.emitReferenceBindingToExpr(init); - { - mlir::OpBuilder::InsertionGuard guard(builder); - mlir::Operation *rvalueDefOp = rv.getScalarVal().getDefiningOp(); - if (rvalueDefOp && rvalueDefOp->getBlock()) { - mlir::Block *rvalSrcBlock = rvalueDefOp->getBlock(); - if (!rvalSrcBlock->empty() && isa(rvalSrcBlock->back())) { - auto &front = rvalSrcBlock->front(); - getGlobal.getDefiningOp()->moveBefore(&front); - auto yield = cast(rvalSrcBlock->back()); - builder.setInsertionPoint(yield); - } - } - cgf.emitStoreOfScalar(rv.getScalarVal(), declAddr, false, ty); + if (!ty->isReferenceType()) { + if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && + varDecl->hasAttr()) { + llvm_unreachable("NYI"); } - builder.setInsertionPointToEnd(block); - builder.create(addr->getLoc()); - } else { + bool needsDtor = varDecl->needsDestruction(getASTContext()) == QualType::DK_cxx_destructor; // PerformInit, constant store invariant / destroy handled below. - bool isConstantStorage = - varDecl->getType().isConstantStorage(getASTContext(), true, !needsDtor); if (performInit) { mlir::OpBuilder::InsertionGuard guard(builder); auto *block = builder.createBlock(&addr.getCtorRegion()); @@ -401,9 +370,10 @@ void CIRGenModule::emitCXXGlobalVarDeclInit(const VarDecl *varDecl, builder.create(addr->getLoc()); } - if (isConstantStorage) { - // TODO: this leads to a missing feature in the moment, probably also need - // a LexicalScope to be inserted here. + if (varDecl->getType().isConstantStorage(getASTContext(), true, + !needsDtor)) { + // TODO(CIR): this leads to a missing feature in the moment, probably also + // need a LexicalScope to be inserted here. emitDeclInvariant(cgf, varDecl); } else { // If not constant storage we'll emit this regardless of NeedsDtor value. @@ -423,5 +393,36 @@ void CIRGenModule::emitCXXGlobalVarDeclInit(const VarDecl *varDecl, } else builder.create(addr->getLoc()); } + return; + } + + mlir::OpBuilder::InsertionGuard guard(builder); + auto *block = builder.createBlock(&addr.getCtorRegion()); + CIRGenFunction::LexicalScope lexScope{*CurCGF, addr.getLoc(), + builder.getInsertionBlock()}; + lexScope.setAsGlobalInit(); + builder.setInsertionPointToStart(block); + auto getGlobal = builder.createGetGlobal(addr); + + Address declAddr(getGlobal, getGlobal.getType(), + getASTContext().getDeclAlign(varDecl)); + assert(performInit && "cannot have constant initializer which needs " + "destruction for reference"); + RValue rv = cgf.emitReferenceBindingToExpr(init); + { + mlir::OpBuilder::InsertionGuard guard(builder); + mlir::Operation *rvalueDefOp = rv.getScalarVal().getDefiningOp(); + if (rvalueDefOp && rvalueDefOp->getBlock()) { + mlir::Block *rvalSrcBlock = rvalueDefOp->getBlock(); + if (!rvalSrcBlock->empty() && isa(rvalSrcBlock->back())) { + auto &front = rvalSrcBlock->front(); + getGlobal.getDefiningOp()->moveBefore(&front); + auto yield = cast(rvalSrcBlock->back()); + builder.setInsertionPoint(yield); + } + } + cgf.emitStoreOfScalar(rv.getScalarVal(), declAddr, false, ty); } + builder.setInsertionPointToEnd(block); + builder.create(addr->getLoc()); } From cf875ec46d7d77c23c2eb145df97a83083c08b82 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 12 Dec 2024 01:40:18 -0500 Subject: [PATCH 2158/2301] [CIR][CodeGen] Flesh out some more missing features from Address --- clang/lib/CIR/CodeGen/Address.h | 35 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenPointerAuthInfo.h | 20 +++++++++++ 2 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenPointerAuthInfo.h diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index 7643f9b87992..b88bd4378647 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -19,6 +19,8 @@ #include "llvm/IR/Constants.h" +#include "CIRGenPointerAuthInfo.h" + #include "mlir/IR/Value.h" namespace clang::CIRGen { @@ -29,10 +31,24 @@ enum KnownNonNull_t { NotKnownNonNull, KnownNonNull }; /// Like RawAddress, an abstract representation of an aligned address, but the /// pointer contained in this class is possibly signed. class Address { + + // The boolean flag indicates whether the pointer is known to be non-null. llvm::PointerIntPair PointerAndKnownNonNull; + + /// The expected CIR type of the pointer. Carrying accurate element type + /// information in Address makes it more convenient to work with Address + /// values and allows frontend assertions to catch simple mistakes. mlir::Type ElementType; + clang::CharUnits Alignment; + /// The ptrauth information needed to authenticate the base pointer. + cir::CIRGenPointerAuthInfo ptrAuthInfo; + + /// Offset from the base pointer. This is non-null only when the base pointer + /// is signed. + mlir::Value offset = nullptr; + protected: Address(std::nullptr_t) : ElementType(nullptr) {} @@ -49,6 +65,14 @@ class Address { assert(elementType && "Element type cannot be null"); assert(!alignment.isZero() && "Alignment cannot be zero"); } + + Address(mlir::Value basePtr, mlir::Type elementType, + clang::CharUnits alignment, cir::CIRGenPointerAuthInfo ptrAuthInfo, + mlir::Value offset, KnownNonNull_t isKnownNonNull = NotKnownNonNull) + : PointerAndKnownNonNull(basePtr, isKnownNonNull), + ElementType(elementType), Alignment(alignment), + ptrAuthInfo(ptrAuthInfo), offset(offset) {} + Address(mlir::Value pointer, clang::CharUnits alignment) : Address(pointer, mlir::cast(pointer.getType()).getPointee(), @@ -78,10 +102,15 @@ class Address { isKnownNonNull()); } + bool hasOffset() const { return bool(offset); } + /// Return address with different element type, but same pointer and /// alignment. Address withElementType(mlir::Type ElemTy) const { - // TODO(cir): hasOffset() check + if (!hasOffset()) + return Address(getBasePointer(), ElemTy, getAlignment(), + getPointerAuthInfo(), /*Offset=*/nullptr, + isKnownNonNull()); return Address(getPointer(), ElemTy, getAlignment(), isKnownNonNull()); } @@ -121,6 +150,10 @@ class Address { return ElementType; } + const cir::CIRGenPointerAuthInfo &getPointerAuthInfo() const { + return ptrAuthInfo; + } + /// Whether the pointer is known not to be null. KnownNonNull_t isKnownNonNull() const { assert(isValid()); diff --git a/clang/lib/CIR/CodeGen/CIRGenPointerAuthInfo.h b/clang/lib/CIR/CodeGen/CIRGenPointerAuthInfo.h new file mode 100644 index 000000000000..bffc90974b66 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenPointerAuthInfo.h @@ -0,0 +1,20 @@ +//===----- CIRGenPointerAuthInfo.h - ---------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Pointer auth info class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENPOINTERAUTHINFO_H +#define LLVM_CLANG_LIB_CIR_CODEGEN_CIRGENPOINTERAUTHINFO_H + +namespace cir { +class CIRGenPointerAuthInfo {}; +} // namespace cir + +#endif From 9ddefd8afcbc4bab18f0de2a464bd80cd6105a26 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Fri, 13 Dec 2024 01:37:34 +0800 Subject: [PATCH 2159/2301] [CIR][CIRGen] Handle NYI in CIRGenModule::tryEmitBaseDestructorAsAlias (#1180) This removes some NYI in CIRGenModule::tryEmitBaseDestructorAsAlias and similar to https://github.com/llvm/clangir/pull/1179, use `assert(false)` to tell devs to add test. It is slightly verbose due to the difference between LLVM and CIR's type system. LLVM's pointer are opaque types while CIR's pointer are typed. So we need to handle these pointers when transforming the generated cir. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 10 ++++++ clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 12 ++++--- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 37 ++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.h | 5 +++ clang/test/CIR/CodeGen/dtor-alias.cpp | 18 ++++++++++ 5 files changed, 78 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/dtor-alias.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index dc7af3154f2d..188aabc7db70 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3585,6 +3585,16 @@ class CIR_CallOp extra_traits = []> : bool isIndirect() { return !getCallee(); } mlir::Value getIndirectCall(); + + void setArg(unsigned index, mlir::Value value) { + if (!isIndirect()) { + setOperand(index, value); + return; + } + + // For indirect call, the operand list is shifted by one. + setOperand(index + 1, value); + } }]; let hasCustomAssemblyFormat = 1; diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 280c735440c3..5d401d008dd7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -146,7 +146,8 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { // members with attribute "AlwaysInline" and expect no reference to // be generated. It is desirable to reenable this optimisation after // corresponding LLVM changes. - llvm_unreachable("NYI"); + addReplacement(MangledName, Aliasee); + return false; } // If we have a weak, non-discardable alias (weak, weak_odr), like an @@ -155,7 +156,8 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { // symbol reference from another TU. The other TU must also mark the // referenced symbol as weak, which we cannot rely on. if (cir::isWeakForLinker(Linkage) && getTriple().isOSBinFormatCOFF()) { - llvm_unreachable("NYI"); + llvm_unreachable("please sent a PR with a test and remove this.\n"); + return true; } // If we don't have a definition for the destructor yet or the definition @@ -169,8 +171,10 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { // different COMDATs in different TUs. Another option would be to // output the alias both for weak_odr and linkonce_odr, but that // requires explicit comdat support in the IL. - if (cir::isWeakForLinker(TargetLinkage)) - llvm_unreachable("NYI"); + if (cir::isWeakForLinker(TargetLinkage)) { + llvm_unreachable("please sent a PR with a test and remove this.\n"); + return true; + } // Create the alias with no name. emitAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 69e611972a6c..7a66b0730078 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -3493,6 +3493,39 @@ void CIRGenModule::addReplacement(StringRef Name, mlir::Operation *Op) { Replacements[Name] = Op; } +void CIRGenModule::replacePointerTypeArgs(cir::FuncOp OldF, cir::FuncOp NewF) { + auto optionalUseRange = OldF.getSymbolUses(theModule); + if (!optionalUseRange) + return; + + for (auto U : *optionalUseRange) { + // CallTryOp only shows up after FlattenCFG. + auto Call = mlir::dyn_cast(U.getUser()); + if (!Call) + continue; + + auto ArgOps = Call.getArgOps(); + auto FuncArgTypes = NewF.getFunctionType().getInputs(); + for (unsigned I = 0; I < FuncArgTypes.size(); I++) { + if (ArgOps[I].getType() == FuncArgTypes[I]) + continue; + + auto argPointerTy = mlir::dyn_cast(ArgOps[I].getType()); + auto funcArgPointerTy = mlir::dyn_cast(FuncArgTypes[I]); + + // If we can't solve it, leave it for the verifier to bail out. + if (!argPointerTy || !funcArgPointerTy) + continue; + + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPoint(Call); + auto castedArg = + builder.createBitcast(Call.getLoc(), ArgOps[I], funcArgPointerTy); + Call.setArg(I, castedArg); + } + } +} + void CIRGenModule::applyReplacements() { for (auto &I : Replacements) { StringRef MangledName = I.first(); @@ -3505,6 +3538,10 @@ void CIRGenModule::applyReplacements() { auto NewF = dyn_cast(Replacement); assert(NewF && "not implemented"); + // LLVM has opaque pointer but CIR not. So we may have to handle these + // different pointer types when performing replacement. + replacePointerTypeArgs(OldF, NewF); + // Replace old with new, but keep the old order. if (OldF.replaceAllSymbolUses(NewF.getSymNameAttr(), theModule).failed()) llvm_unreachable("internal error, cannot RAUW symbol"); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 9d7c1eb572a6..905754a4ad3a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -860,6 +860,11 @@ class CIRGenModule : public CIRGenTypeCache { /// Call replaceAllUsesWith on all pairs in Replacements. void applyReplacements(); + /// A helper function to replace all uses of OldF to NewF that replace + /// the type of pointer arguments. This is not needed to tradtional + /// pipeline since LLVM has opaque pointers but CIR not. + void replacePointerTypeArgs(cir::FuncOp OldF, cir::FuncOp NewF); + void setNonAliasAttributes(GlobalDecl GD, mlir::Operation *GV); /// Map source language used to a CIR attribute. cir::SourceLanguage getCIRSourceLanguage(); diff --git a/clang/test/CIR/CodeGen/dtor-alias.cpp b/clang/test/CIR/CodeGen/dtor-alias.cpp new file mode 100644 index 000000000000..b094f4e20ca0 --- /dev/null +++ b/clang/test/CIR/CodeGen/dtor-alias.cpp @@ -0,0 +1,18 @@ +// FIXME: Remove of -clangir-disable-passes may trigger a memory safe bug in CIR internally during +// lowering +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu \ +// RUN: -mconstructor-aliases -fclangir -emit-cir %s -o %t.cir \ +// RUN: -clangir-disable-passes -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir + +namespace { +struct A { + ~A() {} +}; + +struct B : public A {}; +} + +B x; + +// CHECK: cir.call @_ZN12_GLOBAL__N_11AD2Ev({{.*}}) : (!cir.ptr) -> () From 1eeed6e52ca635d69d9edad33951942f9e96e694 Mon Sep 17 00:00:00 2001 From: 7mile Date: Fri, 13 Dec 2024 01:39:34 +0800 Subject: [PATCH 2160/2301] [CIR][cir-translate] Support specifying target for `cir-translate` (#1186) This PR adds a new command line option `--target` to our tool `cir-translate`. The concrete behaviour of it also depends on the triple and data layout in the CIR module. See the table in code comments for details. The default triple is `x86_64-unknown-linux-gnu` currently. Some tests are updated with triple and DLTI attribute eliminated (replaced by an option in RUN line). But still some tests remain unchanged, primarily because they use `cir-opt` instead. --- clang/test/CIR/Lowering/address-space.cir | 7 +- clang/test/CIR/Lowering/data-member.cir | 4 +- clang/test/CIR/Lowering/exceptions.cir | 4 +- clang/test/CIR/Tools/cir-translate-triple.cir | 14 --- .../cir-translate/cir-translate-triple.cir | 11 ++ .../has-triple-and-data-layout.cir | 24 ++++ .../has-triple-no-data-layout.cir | 23 ++++ .../invalid-translate-triple.cir | 8 ++ .../no-triple-has-data-layout.cir | 23 ++++ .../no-triple-no-data-layout.cir | 21 ++++ .../cir-translate/warn-default-triple.cir | 8 ++ clang/tools/cir-translate/cir-translate.cpp | 111 +++++++++++++++++- 12 files changed, 234 insertions(+), 24 deletions(-) delete mode 100644 clang/test/CIR/Tools/cir-translate-triple.cir create mode 100644 clang/test/CIR/Tools/cir-translate/cir-translate-triple.cir create mode 100644 clang/test/CIR/Tools/cir-translate/has-triple-and-data-layout.cir create mode 100644 clang/test/CIR/Tools/cir-translate/has-triple-no-data-layout.cir create mode 100644 clang/test/CIR/Tools/cir-translate/invalid-translate-triple.cir create mode 100644 clang/test/CIR/Tools/cir-translate/no-triple-has-data-layout.cir create mode 100644 clang/test/CIR/Tools/cir-translate/no-triple-no-data-layout.cir create mode 100644 clang/test/CIR/Tools/cir-translate/warn-default-triple.cir diff --git a/clang/test/CIR/Lowering/address-space.cir b/clang/test/CIR/Lowering/address-space.cir index ac80912bdbd9..abe693a1cf51 100644 --- a/clang/test/CIR/Lowering/address-space.cir +++ b/clang/test/CIR/Lowering/address-space.cir @@ -1,12 +1,9 @@ -// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o %t.ll +// RUN: cir-translate %s -cir-to-llvmir --target spirv64-unknown-unknown --disable-cc-lowering -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM !s32i = !cir.int -module attributes { - cir.triple = "spirv64-unknown-unknown", - dlti.dl_spec = #dlti.dl_spec : vector<2xi64>, i32 = dense<32> : vector<2xi64>, i1 = dense<8> : vector<2xi64>, i8 = dense<8> : vector<2xi64>, f128 = dense<128> : vector<2xi64>, f64 = dense<64> : vector<2xi64>, f16 = dense<16> : vector<2xi64>, i64 = dense<64> : vector<2xi64>, !llvm.ptr = dense<64> : vector<4xi64>, "dlti.endianness" = "little", "dlti.global_memory_space" = 1 : ui64> -} { +module { cir.global external addrspace(offload_global) @addrspace1 = #cir.int<1> : !s32i // LLVM: @addrspace1 = addrspace(1) global i32 diff --git a/clang/test/CIR/Lowering/data-member.cir b/clang/test/CIR/Lowering/data-member.cir index 14f3138bde56..d96f57ea4560 100644 --- a/clang/test/CIR/Lowering/data-member.cir +++ b/clang/test/CIR/Lowering/data-member.cir @@ -1,5 +1,5 @@ // RUN: cir-opt -cir-to-llvm -o - %s | FileCheck -check-prefix=MLIR %s -// RUN: cir-translate -cir-to-llvmir --disable-cc-lowering -o - %s | FileCheck -check-prefix=LLVM %s +// RUN: cir-translate -cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering -o - %s | FileCheck -check-prefix=LLVM %s !s32i = !cir.int !s64i = !cir.int @@ -7,7 +7,7 @@ module @test attributes { cir.triple = "x86_64-unknown-linux-gnu", - llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" + dlti.dl_spec = #dlti.dl_spec : vector<2xi64>, f80 = dense<128> : vector<2xi64>, !llvm.ptr<271> = dense<32> : vector<4xi64>, !llvm.ptr<272> = dense<64> : vector<4xi64>, i64 = dense<64> : vector<2xi64>, f16 = dense<16> : vector<2xi64>, i32 = dense<32> : vector<2xi64>, f128 = dense<128> : vector<2xi64>, !llvm.ptr<270> = dense<32> : vector<4xi64>, f64 = dense<64> : vector<2xi64>, !llvm.ptr = dense<64> : vector<4xi64>, i1 = dense<8> : vector<2xi64>, i8 = dense<8> : vector<2xi64>, i16 = dense<16> : vector<2xi64>, "dlti.stack_alignment" = 128 : i64, "dlti.endianness" = "little"> } { cir.global external @pt_member = #cir.data_member<1> : !cir.data_member // MLIR: llvm.mlir.global external @pt_member(4 : i64) {addr_space = 0 : i32} : i64 diff --git a/clang/test/CIR/Lowering/exceptions.cir b/clang/test/CIR/Lowering/exceptions.cir index 2c40ac96d351..4623ed88b34f 100644 --- a/clang/test/CIR/Lowering/exceptions.cir +++ b/clang/test/CIR/Lowering/exceptions.cir @@ -1,4 +1,4 @@ -// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o %t.ll +// RUN: cir-translate %s -cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM !s32i = !cir.int @@ -8,7 +8,7 @@ !u8i = !cir.int !void = !cir.void -module @"try-catch.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior, cir.triple = "x86_64-unknown-linux-gnu", dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>, #dlti.dl_entry<"dlti.endianness", "little">>} { +module @"try-catch.cpp" attributes {cir.lang = #cir.lang, cir.sob = #cir.signed_overflow_behavior} { cir.global "private" constant external @_ZTIi : !cir.ptr cir.global "private" constant external @_ZTIPKc : !cir.ptr cir.func private @_Z8divisionii(!s32i, !s32i) -> !cir.double diff --git a/clang/test/CIR/Tools/cir-translate-triple.cir b/clang/test/CIR/Tools/cir-translate-triple.cir deleted file mode 100644 index 175e3cc7cded..000000000000 --- a/clang/test/CIR/Tools/cir-translate-triple.cir +++ /dev/null @@ -1,14 +0,0 @@ -// RUN: cir-translate --cir-to-llvmir --disable-cc-lowering %s -o %t.ll -// RUN: FileCheck %s -input-file %t.ll -check-prefix=LLVM - -module attributes { - cir.triple = "x86_64-unknown-linux-gnu", - dlti.dl_spec = #dlti.dl_spec : vector<2xi64>, f80 = dense<128> : vector<2xi64>, !llvm.ptr<271> = dense<32> : vector<4xi64>, !llvm.ptr<272> = dense<64> : vector<4xi64>, i64 = dense<64> : vector<2xi64>, f16 = dense<16> : vector<2xi64>, i32 = dense<32> : vector<2xi64>, f128 = dense<128> : vector<2xi64>, !llvm.ptr<270> = dense<32> : vector<4xi64>, f64 = dense<64> : vector<2xi64>, !llvm.ptr = dense<64> : vector<4xi64>, i1 = dense<8> : vector<2xi64>, i8 = dense<8> : vector<2xi64>, i16 = dense<16> : vector<2xi64>, "dlti.stack_alignment" = 128 : i64, "dlti.endianness" = "little"> -} { - cir.func @foo() { - cir.return - } -} - -// LLVM-DAG: target triple = "x86_64-unknown-linux-gnu" -// LLVM-DAG: target datalayout = "{{.*}}" diff --git a/clang/test/CIR/Tools/cir-translate/cir-translate-triple.cir b/clang/test/CIR/Tools/cir-translate/cir-translate-triple.cir new file mode 100644 index 000000000000..fa653ef3de25 --- /dev/null +++ b/clang/test/CIR/Tools/cir-translate/cir-translate-triple.cir @@ -0,0 +1,11 @@ +// RUN: cir-translate --cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering %s -o %t.ll +// RUN: FileCheck %s -input-file %t.ll -check-prefix=LLVM + +module { + cir.func @foo() { + cir.return + } +} + +// LLVM-DAG: target triple = "x86_64-unknown-linux-gnu" +// LLVM-DAG: target datalayout = "{{.*}}" diff --git a/clang/test/CIR/Tools/cir-translate/has-triple-and-data-layout.cir b/clang/test/CIR/Tools/cir-translate/has-triple-and-data-layout.cir new file mode 100644 index 000000000000..81da113f1d64 --- /dev/null +++ b/clang/test/CIR/Tools/cir-translate/has-triple-and-data-layout.cir @@ -0,0 +1,24 @@ +// RUN: cir-translate --cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering %s -o %t.x86.ll +// RUN: FileCheck %s -input-file %t.x86.ll -check-prefix=X86 +// RUN: cir-translate --cir-to-llvmir --target spirv64-unknown-unknown --disable-cc-lowering %s -o %t.spirv64.ll +// RUN: FileCheck %s -input-file %t.spirv64.ll -check-prefix=SPIRV64 +// RUN: cir-translate --cir-to-llvmir --disable-cc-lowering %s -o %t.default.ll +// RUN: FileCheck %s -input-file %t.default.ll -check-prefix=DEFAULT + +module attributes { + cir.triple = "spirv64-unknown-unknown", + dlti.dl_spec = #dlti.dl_spec<"dlti.global_memory_space" = 7 : ui64> +} { + cir.func @foo() { + cir.return + } +} + +// X86-NOT: target datalayout = "G7" +// X86-DAG: target triple = "x86_64-unknown-linux-gnu" + +// SPIRV64-NOT: target datalayout = "G7" +// SPIRV64-DAG: target triple = "spirv64-unknown-unknown" + +// DEFAULT-DAG: target datalayout = "G7" +// DEFAULT-DAG: target triple = "spirv64-unknown-unknown" diff --git a/clang/test/CIR/Tools/cir-translate/has-triple-no-data-layout.cir b/clang/test/CIR/Tools/cir-translate/has-triple-no-data-layout.cir new file mode 100644 index 000000000000..34c543362bed --- /dev/null +++ b/clang/test/CIR/Tools/cir-translate/has-triple-no-data-layout.cir @@ -0,0 +1,23 @@ +// RUN: cir-translate --cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering %s -o %t.x86.ll +// RUN: FileCheck %s -input-file %t.x86.ll -check-prefix=X86 +// RUN: cir-translate --cir-to-llvmir --target spirv64-unknown-unknown --disable-cc-lowering %s -o %t.spirv64.ll +// RUN: FileCheck %s -input-file %t.spirv64.ll -check-prefix=SPIRV64 +// RUN: cir-translate --cir-to-llvmir --disable-cc-lowering %s -o %t.default.ll +// RUN: FileCheck %s -input-file %t.default.ll -check-prefix=DEFAULT + +module attributes { + cir.triple = "spirv64-unknown-unknown" +} { + cir.func @foo() { + cir.return + } +} + +// X86-DAG: target triple = "x86_64-unknown-linux-gnu" +// X86-DAG: target datalayout = "{{.*}}" + +// SPIRV64-DAG: target triple = "spirv64-unknown-unknown" +// SPIRV64-DAG: target datalayout = "{{.*}}" + +// DEFAULT-DAG: target triple = "spirv64-unknown-unknown" +// DEFAULT-DAG: target datalayout = "{{.*}}" diff --git a/clang/test/CIR/Tools/cir-translate/invalid-translate-triple.cir b/clang/test/CIR/Tools/cir-translate/invalid-translate-triple.cir new file mode 100644 index 000000000000..07bd766a3787 --- /dev/null +++ b/clang/test/CIR/Tools/cir-translate/invalid-translate-triple.cir @@ -0,0 +1,8 @@ +// RUN: cir-translate -verify-diagnostics --cir-to-llvmir --target foobar --disable-cc-lowering %s 2>&1 + +// expected-error@below {{invalid target triple 'foobar'}} +module { + cir.func @foo() { + cir.return + } +} diff --git a/clang/test/CIR/Tools/cir-translate/no-triple-has-data-layout.cir b/clang/test/CIR/Tools/cir-translate/no-triple-has-data-layout.cir new file mode 100644 index 000000000000..f2853941271f --- /dev/null +++ b/clang/test/CIR/Tools/cir-translate/no-triple-has-data-layout.cir @@ -0,0 +1,23 @@ +// RUN: cir-translate --cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering %s -o %t.x86.ll +// RUN: FileCheck %s -input-file %t.x86.ll -check-prefix=X86 +// RUN: cir-translate --cir-to-llvmir --target spirv64-unknown-unknown --disable-cc-lowering %s -o %t.spirv64.ll +// RUN: FileCheck %s -input-file %t.spirv64.ll -check-prefix=SPIRV64 +// RUN: cir-translate --cir-to-llvmir --disable-cc-lowering %s -o %t.default.ll +// RUN: FileCheck %s -input-file %t.default.ll -check-prefix=DEFAULT + +module attributes { + dlti.dl_spec = #dlti.dl_spec<"dlti.global_memory_space" = 7 : ui64> +} { + cir.func @foo() { + cir.return + } +} + +// X86-NOT: target datalayout = "G7" +// X86-DAG: target triple = "x86_64-unknown-linux-gnu" + +// SPIRV64-NOT: target datalayout = "G7" +// SPIRV64-DAG: target triple = "spirv64-unknown-unknown" + +// DEFAULT-NOT: target datalayout = "G7" +// DEFAULT-DAG: target triple = "x86_64-unknown-linux-gnu" diff --git a/clang/test/CIR/Tools/cir-translate/no-triple-no-data-layout.cir b/clang/test/CIR/Tools/cir-translate/no-triple-no-data-layout.cir new file mode 100644 index 000000000000..f18f69dd876d --- /dev/null +++ b/clang/test/CIR/Tools/cir-translate/no-triple-no-data-layout.cir @@ -0,0 +1,21 @@ +// RUN: cir-translate --cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering %s -o %t.x86.ll +// RUN: FileCheck %s -input-file %t.x86.ll -check-prefix=X86 +// RUN: cir-translate --cir-to-llvmir --target spirv64-unknown-unknown --disable-cc-lowering %s -o %t.spirv64.ll +// RUN: FileCheck %s -input-file %t.spirv64.ll -check-prefix=SPIRV64 +// RUN: cir-translate --cir-to-llvmir --disable-cc-lowering %s -o %t.default.ll +// RUN: FileCheck %s -input-file %t.default.ll -check-prefix=DEFAULT + +module { + cir.func @foo() { + cir.return + } +} + +// X86-DAG: target triple = "x86_64-unknown-linux-gnu" +// X86-DAG: target datalayout = "{{.*}}" + +// SPIRV64-DAG: target triple = "spirv64-unknown-unknown" +// SPIRV64-DAG: target datalayout = "{{.*}}" + +// DEFAULT-DAG: target triple = "x86_64-unknown-linux-gnu" +// DEFAULT-DAG: target datalayout = "{{.*}}" diff --git a/clang/test/CIR/Tools/cir-translate/warn-default-triple.cir b/clang/test/CIR/Tools/cir-translate/warn-default-triple.cir new file mode 100644 index 000000000000..519e96598d43 --- /dev/null +++ b/clang/test/CIR/Tools/cir-translate/warn-default-triple.cir @@ -0,0 +1,8 @@ +// RUN: cir-translate -verify-diagnostics --cir-to-llvmir --disable-cc-lowering %s + +// expected-warning@below {{no target triple provided, assuming x86_64-unknown-linux-gnu}} +module { + cir.func @foo() { + cir.return + } +} diff --git a/clang/tools/cir-translate/cir-translate.cpp b/clang/tools/cir-translate/cir-translate.cpp index 347215d76115..bc8fea1f635e 100644 --- a/clang/tools/cir-translate/cir-translate.cpp +++ b/clang/tools/cir-translate/cir-translate.cpp @@ -13,14 +13,22 @@ #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" #include "mlir/InitAllTranslations.h" #include "mlir/Support/LogicalResult.h" #include "mlir/Target/LLVMIR/Dialect/All.h" +#include "mlir/Target/LLVMIR/Import.h" #include "mlir/Tools/mlir-translate/MlirTranslateMain.h" #include "mlir/Tools/mlir-translate/Translation.h" + #include "llvm/IR/Module.h" +#include "llvm/TargetParser/Host.h" + +#include "clang/Basic/TargetInfo.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/MissingFeatures.h" namespace cir { namespace direct { @@ -30,6 +38,102 @@ extern std::unique_ptr lowerDirectlyFromCIRToLLVMIR( bool disableVerifier = false, bool disableCCLowering = false, bool disableDebugInfo = false); } // namespace direct + +namespace { + +/// The goal of this option is to ensure that the triple and data layout specs +/// are always available in the ClangIR module. With this requirement met, the +/// behavior of this option is designed to be as intuitive as possible, as shown +/// in the table below: +/// +/// +--------+--------+-------------+-----------------+-----------------------+ +/// | Option | Triple | Data Layout | Behavior Triple | Behavior Data Layout | +/// +========+========+=============+=================+=======================+ +/// | T | T | T | Overwrite | Derive from triple | +/// | T | T | F | Overwrite | Derive from triple | +/// | T | F | T | Overwrite | Derive from triple | +/// | T | F | F | Overwrite | Derive from triple | +/// | F | T | T | | | +/// | F | T | F | | Derive from triple | +/// | F | F | T | Set default | Derive from triple | +/// | F | F | F | Set default | Derive from triple | +/// +--------+--------+-------------+-----------------+-----------------------+ +llvm::cl::opt + targetTripleOption("target", + llvm::cl::desc("Specify a default target triple when " + "it's not available in the module"), + llvm::cl::init("")); + +std::string prepareCIRModuleTriple(mlir::ModuleOp mod) { + std::string triple = targetTripleOption; + + // Treat "" as the default target machine. + if (triple.empty()) { + // Currently ClangIR only supports a couple of targets. Not specifying a + // target triple will default to x86_64-unknown-linux-gnu. + triple = "x86_64-unknown-linux-gnu"; + + mod.emitWarning() << "no target triple provided, assuming " << triple; + } + + mod->setAttr(cir::CIRDialect::getTripleAttrName(), + mlir::StringAttr::get(mod.getContext(), triple)); + return triple; +} + +llvm::LogicalResult prepareCIRModuleDataLayout(mlir::ModuleOp mod, + llvm::StringRef rawTriple) { + auto *context = mod.getContext(); + + // Data layout is fully determined by the target triple. Here we only pass the + // triple to get the data layout. + llvm::Triple triple(rawTriple); + clang::TargetOptions targetOptions; + targetOptions.Triple = rawTriple; + // FIXME: AllocateTarget is a big deal. Better make it a global state. + auto targetInfo = + clang::targets::AllocateTarget(llvm::Triple(rawTriple), targetOptions); + if (!targetInfo) { + mod.emitError() << "error: invalid target triple '" << rawTriple << "'\n"; + return llvm::failure(); + } + std::string layoutString = targetInfo->getDataLayoutString(); + + // Registered dialects may not be loaded yet, ensure they are. + context->loadDialect(); + + mlir::DataLayoutSpecInterface dlSpec = + mlir::translateDataLayout(llvm::DataLayout(layoutString), context); + mod->setAttr(mlir::DLTIDialect::kDataLayoutAttrName, dlSpec); + + return llvm::success(); +} + +/// Prepare requirements like cir.triple and data layout. +llvm::LogicalResult prepareCIRModuleForTranslation(mlir::ModuleOp mod) { + auto modTriple = mod->getAttrOfType( + cir::CIRDialect::getTripleAttrName()); + auto modDataLayout = mod->getAttr(mlir::DLTIDialect::kDataLayoutAttrName); + bool hasTargetOption = targetTripleOption.getNumOccurrences() > 0; + + // Skip the situation where nothing should be done. + if (!hasTargetOption && modTriple && modDataLayout) + return llvm::success(); + + std::string triple; + + if (!hasTargetOption && modTriple) { + // Do nothing if it's already set. + triple = modTriple.getValue(); + } else { + // Otherwise, overwrite or set default. + triple = prepareCIRModuleTriple(mod); + } + + // If the data layout is not set, derive it from the triple. + return prepareCIRModuleDataLayout(mod, triple); +} +} // namespace } // namespace cir void registerToLLVMTranslation() { @@ -41,9 +145,14 @@ void registerToLLVMTranslation() { mlir::TranslateFromMLIRRegistration registration( "cir-to-llvmir", "Translate CIR to LLVMIR", [](mlir::Operation *op, mlir::raw_ostream &output) { + auto cirModule = llvm::dyn_cast(op); + + if (mlir::failed(cir::prepareCIRModuleForTranslation(cirModule))) + return mlir::failure(); + llvm::LLVMContext llvmContext; auto llvmModule = cir::direct::lowerDirectlyFromCIRToLLVMIR( - llvm::dyn_cast(op), llvmContext, + cirModule, llvmContext, /*disableVerifier=*/false, disableCCLowering); if (!llvmModule) return mlir::failure(); From 3d7fcd4502c72323a868299f4d235d34f1c34cba Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 12 Dec 2024 12:41:03 -0500 Subject: [PATCH 2161/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vqmovns_s32 and add CIR PoisonAttr (#1199) CIR PoisonOp is needed in this context as alternative would be to use VecCreateOp to prepare an arg for VecInsertElement, but VecCreate is for different purpose and [it would insert all elements](https://github.com/llvm/clangir/blob/eacaabba76ebdbf87217fefaa77f92c45cf4509c/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp#L1679) which is not totally unnecessary in this context. Here is the [intrinsic def ](https://developer.arm.com/architectures/instruction-sets/intrinsics/#f:@navigationhierarchiessimdisa=[Neon]&q=vqmovns_) --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 15 +++++++ .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 39 ++++++++++++++++++- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 +++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 33 ++++++++++++++-- clang/test/CIR/CodeGen/AArch64/neon.c | 27 +++++++++---- clang/test/CIR/IR/invalid.cir | 6 +++ clang/test/CIR/Lowering/const.cir | 2 + 7 files changed, 115 insertions(+), 13 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 0cfbf84fa58a..b375b624ccc5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -169,6 +169,21 @@ def UndefAttr : CIR_Attr<"Undef", "undef", [TypedAttrInterface]> { let assemblyFormat = [{}]; } +//===----------------------------------------------------------------------===// +// PoisonAttr +//===----------------------------------------------------------------------===// + +def PoisonAttr : CIR_Attr<"Poison", "poison", [TypedAttrInterface]> { + let summary = "Represent an poison constant"; + let description = [{ + The PoisonAttr represents an poison constant, corresponding to LLVM's notion + of poison. + }]; + + let parameters = (ins AttributeSelfTypeParameter<"">:$type); + let assemblyFormat = [{}]; +} + //===----------------------------------------------------------------------===// // ConstArrayAttr //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index f8337af303cc..8400d1749bb9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2263,6 +2263,32 @@ static mlir::Value emitNeonRShiftImm(CIRGenFunction &cgf, mlir::Value shiftVec, false /* right shift */); } +/// Vectorize value, usually for argument of a neon SISD intrinsic call. +static void vecExtendIntValue(CIRGenFunction &cgf, cir::VectorType argVTy, + mlir::Value &arg, mlir::Location loc) { + CIRGenBuilderTy &builder = cgf.getBuilder(); + cir::IntType eltTy = mlir::dyn_cast(argVTy.getEltType()); + assert(mlir::isa(arg.getType()) && eltTy); + // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate + // it before inserting. + arg = builder.createIntCast(arg, eltTy); + mlir::Value zero = builder.getConstInt(loc, cgf.SizeTy, 0); + mlir::Value poison = builder.create( + loc, eltTy, builder.getAttr(eltTy)); + arg = builder.create( + loc, builder.create(loc, argVTy, poison), arg, zero); +} + +/// Reduce vector type value to scalar, usually for result of a +/// neon SISD intrinsic call +static mlir::Value vecReduceIntValue(CIRGenFunction &cgf, mlir::Value val, + mlir::Location loc) { + CIRGenBuilderTy &builder = cgf.getBuilder(); + assert(mlir::isa(val.getType())); + return builder.create( + loc, val, builder.getConstInt(loc, cgf.SizeTy, 0)); +} + mlir::Value emitNeonCall(CIRGenBuilderTy &builder, llvm::SmallVector argTypes, llvm::SmallVectorImpl &args, @@ -2851,8 +2877,17 @@ static mlir::Value emitCommonNeonSISDBuiltinExpr( llvm_unreachable(" neon_vqmovnh_s16 NYI "); case NEON::BI__builtin_neon_vqmovnh_u16: llvm_unreachable(" neon_vqmovnh_u16 NYI "); - case NEON::BI__builtin_neon_vqmovns_s32: - llvm_unreachable(" neon_vqmovns_s32 NYI "); + case NEON::BI__builtin_neon_vqmovns_s32: { + mlir::Location loc = cgf.getLoc(expr->getExprLoc()); + cir::VectorType argVecTy = + cir::VectorType::get(&(cgf.getMLIRContext()), cgf.SInt32Ty, 4); + cir::VectorType resVecTy = + cir::VectorType::get(&(cgf.getMLIRContext()), cgf.SInt16Ty, 4); + vecExtendIntValue(cgf, argVecTy, ops[0], loc); + mlir::Value result = emitNeonCall(builder, {argVecTy}, ops, + "aarch64.neon.sqxtn", resVecTy, loc); + return vecReduceIntValue(cgf, result, loc); + } case NEON::BI__builtin_neon_vqmovns_u32: llvm_unreachable(" neon_vqmovns_u32 NYI "); case NEON::BI__builtin_neon_vqmovund_s64: diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index f9c0554c4fef..f353f57f8ca4 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -389,6 +389,12 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType, return op->emitOpError("undef expects non-void type"); } + if (isa(attrType)) { + if (!::mlir::isa(opType)) + return success(); + return op->emitOpError("poison expects non-void type"); + } + if (mlir::isa(attrType)) { if (!mlir::isa(opType)) return op->emitOpError("result type (") diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5db505fe69d9..739d17803ca3 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -425,6 +425,16 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::UndefAttr undefAttr, loc, converter->convertType(undefAttr.getType())); } +/// PoisonAttr visitor. +static mlir::Value +lowerCirAttrAsValue(mlir::Operation *parentOp, cir::PoisonAttr poisonAttr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter) { + auto loc = parentOp->getLoc(); + return rewriter.create( + loc, converter->convertType(poisonAttr.getType())); +} + /// ConstStruct visitor. static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstStructAttr constStruct, @@ -644,6 +654,8 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, return lowerCirAttrAsValue(parentOp, zeroAttr, rewriter, converter); if (const auto undefAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, undefAttr, rewriter, converter); + if (const auto poisonAttr = mlir::dyn_cast(attr)) + return lowerCirAttrAsValue(parentOp, poisonAttr, rewriter, converter); if (const auto globalAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, globalAttr, rewriter, converter); if (const auto vtableAttr = mlir::dyn_cast(attr)) @@ -1555,6 +1567,14 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( mlir::ConversionPatternRewriter &rewriter) const { mlir::Attribute attr = op.getValue(); + // Regardless of the type, we should lower the constant of poison value + // into PoisonOp. + if (mlir::isa(attr)) { + rewriter.replaceOp( + op, lowerCirAttrAsValue(op, attr, rewriter, getTypeConverter())); + return mlir::success(); + } + if (mlir::isa(op.getType())) { // Verified cir.const operations cannot actually be of these types, but the // lowering pass may generate temporary cir.const operations with these @@ -1695,6 +1715,7 @@ mlir::LogicalResult CIRToLLVMVecCreateOpLowering::matchAndRewrite( mlir::Value result = rewriter.create(loc, llvmTy); assert(vecTy.getSize() == op.getElements().size() && "cir.vec.create op count doesn't match vector type elements count"); + for (uint64_t i = 0; i < vecTy.getSize(); ++i) { mlir::Value indexValue = rewriter.create(loc, rewriter.getI64Type(), i); @@ -1745,15 +1766,21 @@ mlir::LogicalResult CIRToLLVMVecSplatOpLowering::matchAndRewrite( assert(vecTy && "result type of cir.vec.splat op is not VectorType"); auto llvmTy = typeConverter->convertType(vecTy); auto loc = op.getLoc(); - mlir::Value undef = rewriter.create(loc, llvmTy); + mlir::Value poison = rewriter.create(loc, llvmTy); mlir::Value indexValue = rewriter.create(loc, rewriter.getI64Type(), 0); mlir::Value elementValue = adaptor.getValue(); + if (mlir::isa(elementValue.getDefiningOp())) { + // If the splat value is poison, then we can just use poison value + // for the entire vector. + rewriter.replaceOp(op, poison); + return mlir::success(); + } mlir::Value oneElement = rewriter.create( - loc, undef, elementValue, indexValue); + loc, poison, elementValue, indexValue); SmallVector zeroValues(vecTy.getSize(), 0); mlir::Value shuffled = rewriter.create( - loc, oneElement, undef, zeroValues); + loc, oneElement, poison, zeroValues); rewriter.replaceOp(op, shuffled); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 3ddaea58f163..487778d3778f 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -14611,14 +14611,25 @@ void test_vst1q_s64(int64_t *a, int64x2_t b) { // return (int8_t)vqmovnh_s16(a); // } -// NYI-LABEL: @test_vqmovns_s32( -// NYI: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 %a, i64 0 -// NYI: [[VQMOVNS_S32_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> [[TMP0]]) -// NYI: [[TMP1:%.*]] = extractelement <4 x i16> [[VQMOVNS_S32_I]], i64 0 -// NYI: ret i16 [[TMP1]] -// int16_t test_vqmovns_s32(int32_t a) { -// return (int16_t)vqmovns_s32(a); -// } +int16_t test_vqmovns_s32(int32_t a) { + return (int16_t)vqmovns_s32(a); + + // CIR-LABEL: vqmovns_s32 + // CIR: [[A:%.*]] = cir.load {{.*}} : !cir.ptr, !s32i + // CIR: [[VQMOVNS_S32_ZERO1:%.*]] = cir.const #cir.int<0> : !u64i + // CIR: [[POISON:%.*]] = cir.const #cir.poison : !s32i + // CIR: [[POISON_VEC:%.*]] = cir.vec.splat [[POISON]] : !s32i, !cir.vector + // CIR: [[TMP0:%.*]] = cir.vec.insert [[A]], [[POISON_VEC]][[[VQMOVNS_S32_ZERO1]] : !u64i] : !cir.vector + // CIR: [[VQMOVNS_S32_I:%.*]] = cir.llvm.intrinsic "aarch64.neon.sqxtn" [[TMP0]] : (!cir.vector) -> !cir.vector + // CIR: [[VQMOVNS_S32_ZERO2:%.*]] = cir.const #cir.int<0> : !u64i + // CIR: [[TMP1:%.*]] = cir.vec.extract [[VQMOVNS_S32_I]][[[VQMOVNS_S32_ZERO2]] : !u64i] : !cir.vector + + // LLVM: {{.*}}@test_vqmovns_s32(i32{{.*}}[[a:%.*]]) + // LLVM: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 [[a]], i64 0 + // LLVM: [[VQMOVNS_S32_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> [[TMP0]]) + // LLVM: [[TMP1:%.*]] = extractelement <4 x i16> [[VQMOVNS_S32_I]], i64 0 + // LLVM: ret i16 [[TMP1]] +} // NYI-LABEL: @test_vqmovnd_s64( // NYI: [[VQMOVND_S64_I:%.*]] = call i32 @llvm.aarch64.neon.scalar.sqxtn.i32.i64(i64 %a) diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 1a6c2f503dfd..aaeb46e770b5 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -393,6 +393,12 @@ module { // ----- +module { + cir.global external @v = #cir.poison : !cir.void // expected-error {{poison expects non-void type}} +} + +// ----- + !s32i = !cir.int cir.func @vec_op_size() { %0 = cir.const #cir.int<1> : !s32i diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index 43e635226000..ae78b8387fc5 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -17,6 +17,8 @@ module { // CHECK: llvm.mlir.zero : !llvm.array<3 x i32> %5 = cir.const #cir.undef : !cir.array // CHECK: llvm.mlir.undef : !llvm.array<3 x i32> + %6 = cir.const #cir.poison : !s32i + // CHECK: llvm.mlir.poison : i32 cir.return } From 3d735ec36aa746b8e7f735b389f3a9f3cf4c7444 Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 12 Dec 2024 12:41:38 -0500 Subject: [PATCH 2162/2301] [CIR][Dialect] Add BinOpKind_Max (#1201) This would facilitate implementation of neon intrinsic `neon_vmax_v` and `__builtin_elementwise_max`, and potentially future optimizations. CIR BinOp supports vector type. Floating point has already been supported by FMaxOp. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 ++- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 9 +++++++++ clang/test/CIR/Lowering/binop-signed-int.cir | 8 ++++++++ clang/test/CIR/Lowering/binop-unsigned-int.cir | 11 +++++++++++ 4 files changed, 30 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 188aabc7db70..45e629520f18 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1157,6 +1157,7 @@ def BinOpKind_Sub : I32EnumAttrCase<"Sub", 5, "sub">; def BinOpKind_And : I32EnumAttrCase<"And", 8, "and">; def BinOpKind_Xor : I32EnumAttrCase<"Xor", 9, "xor">; def BinOpKind_Or : I32EnumAttrCase<"Or", 10, "or">; +def BinOpKind_Max : I32EnumAttrCase<"Max", 11, "max">; def BinOpKind : I32EnumAttr< "BinOpKind", @@ -1164,7 +1165,7 @@ def BinOpKind : I32EnumAttr< [BinOpKind_Mul, BinOpKind_Div, BinOpKind_Rem, BinOpKind_Add, BinOpKind_Sub, BinOpKind_And, BinOpKind_Xor, - BinOpKind_Or]> { + BinOpKind_Or, BinOpKind_Max]> { let cppNamespace = "::cir"; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 739d17803ca3..cedf50a204ef 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2574,6 +2574,15 @@ mlir::LogicalResult CIRToLLVMBinOpLowering::matchAndRewrite( case cir::BinOpKind::Xor: rewriter.replaceOpWithNewOp(op, lhs, rhs); break; + case cir::BinOpKind::Max: + if (mlir::isa(llvmEltTy)) { + auto isUnsigned = isIntTypeUnsigned(type); + if (isUnsigned) + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + else + rewriter.replaceOpWithNewOp(op, llvmTy, lhs, rhs); + } + break; } return mlir::LogicalResult::success(); diff --git a/clang/test/CIR/Lowering/binop-signed-int.cir b/clang/test/CIR/Lowering/binop-signed-int.cir index 44b479f12fb8..0aa698098e2e 100644 --- a/clang/test/CIR/Lowering/binop-signed-int.cir +++ b/clang/test/CIR/Lowering/binop-signed-int.cir @@ -7,6 +7,8 @@ module { %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %2 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %100 = cir.alloca !cir.vector, !cir.ptr>, ["vec1", init] {alignment = 8 : i64} + %101 = cir.alloca !cir.vector, !cir.ptr>, ["vec2", init] {alignment = 8 : i64} %3 = cir.const #cir.int<2> : !s32i cir.store %3, %0 : !s32i, !cir.ptr %4 = cir.const #cir.int<1> : !s32i cir.store %4, %1 : !s32i, !cir.ptr %5 = cir.load %0 : !cir.ptr, !s32i @@ -63,6 +65,12 @@ module { %36 = cir.binop(sub, %32, %33) sat: !s32i // CHECK: = llvm.intr.ssub.sat{{.*}}(i32, i32) -> i32 cir.store %34, %2 : !s32i, !cir.ptr + %37 = cir.binop(max, %32, %33) : !s32i + // CHECK: = llvm.intr.smax + %38 = cir.load %100 : !cir.ptr>, !cir.vector + %39 = cir.load %101 : !cir.ptr>, !cir.vector + %40 = cir.binop(max, %38, %39) : !cir.vector + // CHECK: = llvm.intr.smax({{%.*}}, {{%.*}}) : (vector<2xi32>, vector<2xi32>) -> vector<2xi32> cir.return } } diff --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir index 0ce374488725..b783509d06ed 100644 --- a/clang/test/CIR/Lowering/binop-unsigned-int.cir +++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir @@ -7,6 +7,8 @@ module { %0 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} %1 = cir.alloca !u32i, !cir.ptr, ["b", init] {alignment = 4 : i64} %2 = cir.alloca !u32i, !cir.ptr, ["x", init] {alignment = 4 : i64} + %100 = cir.alloca !cir.vector, !cir.ptr>, ["vec1", init] {alignment = 8 : i64} + %101 = cir.alloca !cir.vector, !cir.ptr>, ["vec2", init] {alignment = 8 : i64} %3 = cir.const #cir.int<2> : !u32i cir.store %3, %0 : !u32i, !cir.ptr %4 = cir.const #cir.int<1> : !u32i cir.store %4, %1 : !u32i, !cir.ptr %5 = cir.load %0 : !cir.ptr, !u32i @@ -51,6 +53,10 @@ module { cir.store %34, %2 : !u32i, !cir.ptr %35 = cir.binop(add, %32, %33) sat: !u32i %36 = cir.binop(sub, %32, %33) sat: !u32i + %37 = cir.binop(max, %32, %33) : !u32i + %38 = cir.load %100 : !cir.ptr>, !cir.vector + %39 = cir.load %101 : !cir.ptr>, !cir.vector + %40 = cir.binop(max, %38, %39) : !cir.vector cir.return } } @@ -64,8 +70,11 @@ module { // MLIR: = llvm.shl // MLIR: = llvm.and // MLIR: = llvm.xor +// MLIR: = llvm.or // MLIR: = llvm.intr.uadd.sat{{.*}}(i32, i32) -> i32 // MLIR: = llvm.intr.usub.sat{{.*}}(i32, i32) -> i32 +// MLIR: = llvm.intr.umax +// MLIR: = llvm.intr.umax // LLVM: = mul i32 // LLVM: = udiv i32 @@ -79,3 +88,5 @@ module { // LLVM: = or i32 // LLVM: = call i32 @llvm.uadd.sat.i32 // LLVM: = call i32 @llvm.usub.sat.i32 +// LLVM: = call i32 @llvm.umax.i32 +// LLVM: = call <2 x i32> @llvm.umax.v2i32 From 5d1c778a0edb100d2df3fcac6236120c9b9812a4 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Thu, 12 Dec 2024 09:46:01 -0800 Subject: [PATCH 2163/2301] [CIR] Fix tests with LLVM_LINK_LLVM_DYLIB (#1222) This option creates and links all tools against a single libLLVM shared library (and the corresponding CLANG_LINK_CLANG_DYLIB option also gets turned on by default). In order for this to work, we need to use LINK_COMPONENTS instead of LINK_LIBS for all LLVM dependencies, and clang_target_link_libraries for all Clang dependencies, so that they get rewritten to use the dylib. Remove llvm_update_compile_flags while I'm here, since the build macros handle that internally. Before this change, we'd link against certain LLVM libraries both statically and dynamically, leading to test failures from duplicate singletons. The way this works for MLIR is fragile right now: MLIR can create its own dylib as well but doesn't have build system support for linking against that dylib. We end up folding the MLIR libraries into libclang-cpp.so (because all Clang dependencies get pulled into it), but MLIR tools still link the MLIR libraries statically. It'll still work, but BUILD_SHARED_LIBS is possibly a better alternative for development. Distributions like Fedora build their LLVM packages with LLVM_LINK_LLVM_DYLIB, so we'll want to eventually have good MLIR support for that setup too. --- .../Transforms/TargetLowering/CMakeLists.txt | 4 ++- clang/tools/cir-opt/CMakeLists.txt | 27 +++++++++-------- clang/tools/cir-translate/CMakeLists.txt | 29 +++++++++---------- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt index d3cb9fc96f1a..21bfa30a111a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt @@ -22,10 +22,12 @@ add_clang_library(TargetLowering DEPENDS clangBasic + LINK_COMPONENTS + TargetParser + LINK_LIBS PUBLIC clangBasic - LLVMTargetParser MLIRIR MLIRPass MLIRDLTIDialect diff --git a/clang/tools/cir-opt/CMakeLists.txt b/clang/tools/cir-opt/CMakeLists.txt index 741cdfa5950d..900697ffe8e4 100644 --- a/clang/tools/cir-opt/CMakeLists.txt +++ b/clang/tools/cir-opt/CMakeLists.txt @@ -4,15 +4,24 @@ get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS) include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) -set(LIBS - ${dialect_libs} - ${conversion_libs} +add_clang_tool(cir-opt + cir-opt.cpp +) + +clang_target_link_libraries(cir-opt + PRIVATE clangCIR clangCIRLoweringThroughMLIR clangCIRLoweringDirectToLLVM - MLIRAnalysis MLIRCIR MLIRCIRTransforms +) + +target_link_libraries(cir-opt + PRIVATE + ${dialect_libs} + ${conversion_libs} + MLIRAnalysis MLIRDialect MLIRIR MLIRMemRefDialect @@ -23,13 +32,3 @@ set(LIBS MLIRTransforms MLIRTransformUtils ) - -add_clang_tool(cir-opt - cir-opt.cpp - - DEPENDS - ${LIBS} -) - -target_link_libraries(cir-opt PRIVATE ${LIBS}) -llvm_update_compile_flags(cir-opt) diff --git a/clang/tools/cir-translate/CMakeLists.txt b/clang/tools/cir-translate/CMakeLists.txt index a5e22b02e505..21834799ea82 100644 --- a/clang/tools/cir-translate/CMakeLists.txt +++ b/clang/tools/cir-translate/CMakeLists.txt @@ -5,15 +5,24 @@ get_property(translation_libs GLOBAL PROPERTY MLIR_TRANSLATION_LIBS) include_directories(${LLVM_MAIN_SRC_DIR}/../mlir/include) include_directories(${CMAKE_BINARY_DIR}/tools/mlir/include) -set(LIBS - ${dialect_libs} - ${conversion_libs} - ${translation_libs} +add_clang_tool(cir-translate + cir-translate.cpp +) + +clang_target_link_libraries(cir-translate + PRIVATE clangCIR clangCIRLoweringDirectToLLVM - MLIRAnalysis MLIRCIR MLIRCIRTransforms +) + +target_link_libraries(cir-translate + PRIVATE + ${dialect_libs} + ${conversion_libs} + ${translation_libs} + MLIRAnalysis MLIRDialect MLIRIR MLIROptLib @@ -24,13 +33,3 @@ set(LIBS MLIRTranslateLib MLIRSupport ) - -add_clang_tool(cir-translate - cir-translate.cpp - - DEPENDS - ${LIBS} -) - -target_link_libraries(cir-translate PRIVATE ${LIBS}) -llvm_update_compile_flags(cir-translate) From 4af8e298f9fe48c03fa49557b2f7d4289c00b9a4 Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Thu, 12 Dec 2024 10:02:04 -0800 Subject: [PATCH 2164/2301] [CIR] Remove the !cir.void return type for functions returning void (#1203) C/C++ functions returning void had an explicit !cir.void return type while not having any returned value, which was breaking a lot of MLIR invariants when the CIR dialect is used in a greater context, for example with the inliner. Now, a C/C++ function returning void has not return type and no return values, which does not break the MLIR invariant about the same number of return types and returned values. This change keeps the same parsing/pretty-printed syntax as before for compatibility. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 13 ++- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 19 ++-- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 39 ++++++-- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 93 ++++++++++++++++--- .../Transforms/TargetLowering/LowerTypes.cpp | 2 +- clang/test/CIR/IR/being_and_nothingness.cir | 35 +++++++ mlir/include/mlir/IR/OpImplementation.h | 3 + .../mlir/Interfaces/FunctionImplementation.h | 22 +++++ mlir/lib/AsmParser/AsmParserImpl.h | 13 +++ .../lib/Interfaces/FunctionImplementation.cpp | 13 ++- 11 files changed, 211 insertions(+), 43 deletions(-) create mode 100644 clang/test/CIR/IR/being_and_nothingness.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 45e629520f18..b86861999330 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3474,8 +3474,6 @@ def FuncOp : CIR_Op<"func", [ /// Returns the results types that the callable region produces when /// executed. llvm::ArrayRef getCallableResults() { - if (::llvm::isa(getFunctionType().getReturnType())) - return {}; return getFunctionType().getReturnTypes(); } @@ -3492,10 +3490,15 @@ def FuncOp : CIR_Op<"func", [ } /// Returns the argument types of this function. - llvm::ArrayRef getArgumentTypes() { return getFunctionType().getInputs(); } + llvm::ArrayRef getArgumentTypes() { + return getFunctionType().getInputs(); + } - /// Returns the result types of this function. - llvm::ArrayRef getResultTypes() { return getFunctionType().getReturnTypes(); } + /// Returns 0 or 1 result type of this function (0 in the case of a function + /// returing void) + llvm::ArrayRef getResultTypes() { + return getFunctionType().getReturnTypes(); + } /// Hook for OpTrait::FunctionOpInterfaceTrait, called after verifying that /// the 'type' attribute is present and checks if it holds a function type. diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index c805b6887cf3..d3f49716301d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -379,22 +379,27 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { ```mlir !cir.func + !cir.func !cir.func !cir.func ``` }]; - let parameters = (ins ArrayRefParameter<"mlir::Type">:$inputs, "mlir::Type":$returnType, + let parameters = (ins ArrayRefParameter<"mlir::Type">:$inputs, ArrayRefParameter<"mlir::Type">:$returnTypes, "bool":$varArg); let assemblyFormat = [{ - `<` $returnType ` ` `(` custom($inputs, $varArg) `>` + `<` custom($returnTypes, $inputs, $varArg) `>` }]; let builders = [ + // Construct with an actual return type or explicit !cir.void TypeBuilderWithInferredContext<(ins "llvm::ArrayRef":$inputs, "mlir::Type":$returnType, CArg<"bool", "false">:$isVarArg), [{ - return $_get(returnType.getContext(), inputs, returnType, isVarArg); + return $_get(returnType.getContext(), inputs, + ::mlir::isa<::cir::VoidType>(returnType) ? llvm::ArrayRef{} + : llvm::ArrayRef{returnType}, + isVarArg); }]> ]; @@ -408,11 +413,11 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { /// Returns the number of arguments to the function. unsigned getNumInputs() const { return getInputs().size(); } - /// Returns the result type of the function as an ArrayRef, enabling better - /// integration with generic MLIR utilities. - llvm::ArrayRef getReturnTypes() const; + /// Returns the result type of the function as an actual return type or + /// explicit !cir.void + mlir::Type getReturnType() const; - /// Returns whether the function is returns void. + /// Returns whether the function returns void. bool isVoid() const; /// Returns a clone of this function type with the given argument diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 799d875739d5..fa81ce682769 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -271,7 +271,7 @@ mlir::Type CIRGenTypes::ConvertFunctionTypeInternal(QualType QFT) { assert(QFT.isCanonical()); const Type *Ty = QFT.getTypePtr(); const FunctionType *FT = cast(QFT.getTypePtr()); - // First, check whether we can build the full fucntion type. If the function + // First, check whether we can build the full function type. If the function // type depends on an incomplete type (e.g. a struct or enum), we cannot lower // the function type. assert(isFuncTypeConvertible(FT) && "NYI"); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index f353f57f8ca4..7da279aa7513 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2224,6 +2224,26 @@ void cir::FuncOp::build(OpBuilder &builder, OperationState &result, getResAttrsAttrName(result.name)); } +// A specific version of function_interface_impl::parseFunctionSignature able to +// handle the "-> !void" special fake return type. +static ParseResult +parseFunctionSignature(OpAsmParser &parser, bool allowVariadic, + SmallVectorImpl &arguments, + bool &isVariadic, SmallVectorImpl &resultTypes, + SmallVectorImpl &resultAttrs) { + if (function_interface_impl::parseFunctionArgumentList(parser, allowVariadic, + arguments, isVariadic)) + return failure(); + if (succeeded(parser.parseOptionalArrow())) { + if (parser.parseOptionalExclamationKeyword("!void").succeeded()) + // This is just an empty return type and attribute. + return success(); + return function_interface_impl::parseFunctionResultList(parser, resultTypes, + resultAttrs); + } + return success(); +} + ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { llvm::SMLoc loc = parser.getCurrentLocation(); @@ -2284,9 +2304,8 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { // Parse the function signature. bool isVariadic = false; - if (function_interface_impl::parseFunctionSignature( - parser, /*allowVariadic=*/true, arguments, isVariadic, resultTypes, - resultAttrs)) + if (parseFunctionSignature(parser, /*allowVariadic=*/true, arguments, + isVariadic, resultTypes, resultAttrs)) return failure(); for (auto &arg : arguments) @@ -2489,13 +2508,8 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p.printSymbolName(getSymName()); auto fnType = getFunctionType(); llvm::SmallVector resultTypes; - if (!fnType.isVoid()) - function_interface_impl::printFunctionSignature( - p, *this, fnType.getInputs(), fnType.isVarArg(), - fnType.getReturnTypes()); - else - function_interface_impl::printFunctionSignature( - p, *this, fnType.getInputs(), fnType.isVarArg(), {}); + function_interface_impl::printFunctionSignature( + p, *this, fnType.getInputs(), fnType.isVarArg(), fnType.getReturnTypes()); if (mlir::ArrayAttr annotations = getAnnotationsAttr()) { p << ' '; @@ -2564,6 +2578,11 @@ LogicalResult cir::FuncOp::verifyType() { if (!getNoProto() && type.isVarArg() && type.getNumInputs() == 0) return emitError() << "prototyped function must have at least one non-variadic input"; + if (auto rt = type.getReturnTypes(); + !rt.empty() && mlir::isa(rt.front())) + return emitOpError("The return type for a function returning void should " + "be empty instead of an explicit !cir.void"); + return success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index df89584fd3a9..2b17b048f6c6 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -33,6 +33,7 @@ #include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include #include using cir::MissingFeatures; @@ -42,13 +43,16 @@ using cir::MissingFeatures; //===----------------------------------------------------------------------===// static mlir::ParseResult -parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, - bool &isVarArg); -static void printFuncTypeArgs(mlir::AsmPrinter &p, - mlir::ArrayRef params, bool isVarArg); +parseFuncType(mlir::AsmParser &p, llvm::SmallVector &returnTypes, + llvm::SmallVector ¶ms, bool &isVarArg); + +static void printFuncType(mlir::AsmPrinter &p, + mlir::ArrayRef returnTypes, + mlir::ArrayRef params, bool isVarArg); static mlir::ParseResult parsePointerAddrSpace(mlir::AsmParser &p, mlir::Attribute &addrSpaceAttr); + static void printPointerAddrSpace(mlir::AsmPrinter &p, mlir::Attribute addrSpaceAttr); @@ -913,9 +917,46 @@ FuncType FuncType::clone(TypeRange inputs, TypeRange results) const { return get(llvm::to_vector(inputs), results[0], isVarArg()); } -mlir::ParseResult parseFuncTypeArgs(mlir::AsmParser &p, - llvm::SmallVector ¶ms, - bool &isVarArg) { +// A special parser is needed for function returning void to consume the "!void" +// returned type in the case there is no alias defined. +static mlir::ParseResult +parseFuncTypeReturn(mlir::AsmParser &p, + llvm::SmallVector &returnTypes) { + if (p.parseOptionalExclamationKeyword("!void").succeeded()) + // !void means no return type. + return p.parseLParen(); + if (succeeded(p.parseOptionalLParen())) + // If we have already a '(', the function has no return type + return mlir::success(); + + mlir::Type type; + auto result = p.parseOptionalType(type); + if (!result.has_value()) + return mlir::failure(); + if (failed(*result) || isa(type)) + // No return type specified. + return p.parseLParen(); + // Otherwise use the actual type. + returnTypes.push_back(type); + return p.parseLParen(); +} + +// A special pretty-printer for function returning void to emit a "!void" +// returned type. Note that there is no real type used here since it does not +// appear in the IR and thus the alias might not be defined and cannot be +// referred to. This is why this is a pure syntactic-sugar string which is used. +static void printFuncTypeReturn(mlir::AsmPrinter &p, + mlir::ArrayRef returnTypes) { + if (returnTypes.empty()) + // Pretty-print no return type as "!void" + p << "!void "; + else + p << returnTypes << ' '; +} + +static mlir::ParseResult +parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, + bool &isVarArg) { isVarArg = false; // `(` `)` if (succeeded(p.parseOptionalRParen())) @@ -945,8 +986,10 @@ mlir::ParseResult parseFuncTypeArgs(mlir::AsmParser &p, return p.parseRParen(); } -void printFuncTypeArgs(mlir::AsmPrinter &p, mlir::ArrayRef params, - bool isVarArg) { +static void printFuncTypeArgs(mlir::AsmPrinter &p, + mlir::ArrayRef params, + bool isVarArg) { + p << '('; llvm::interleaveComma(params, p, [&p](mlir::Type type) { p.printType(type); }); if (isVarArg) { @@ -957,11 +1000,37 @@ void printFuncTypeArgs(mlir::AsmPrinter &p, mlir::ArrayRef params, p << ')'; } -llvm::ArrayRef FuncType::getReturnTypes() const { - return static_cast(getImpl())->returnType; +static mlir::ParseResult +parseFuncType(mlir::AsmParser &p, llvm::SmallVector &returnTypes, + llvm::SmallVector ¶ms, bool &isVarArg) { + if (failed(parseFuncTypeReturn(p, returnTypes))) + return failure(); + return parseFuncTypeArgs(p, params, isVarArg); +} + +static void printFuncType(mlir::AsmPrinter &p, + mlir::ArrayRef returnTypes, + mlir::ArrayRef params, bool isVarArg) { + printFuncTypeReturn(p, returnTypes); + printFuncTypeArgs(p, params, isVarArg); } -bool FuncType::isVoid() const { return mlir::isa(getReturnType()); } +// Return the actual return type or an explicit !cir.void if the function does +// not return anything +mlir::Type FuncType::getReturnType() const { + if (isVoid()) + return cir::VoidType::get(getContext()); + return static_cast(getImpl())->returnTypes.front(); +} + +bool FuncType::isVoid() const { + auto rt = static_cast(getImpl())->returnTypes; + assert(rt.empty() || + !mlir::isa(rt.front()) && + "The return type for a function returning void should be empty " + "instead of a real !cir.void"); + return rt.empty(); +} //===----------------------------------------------------------------------===// // MethodType Definitions diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index 0c2233ef84c9..d655ae9023dd 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -109,7 +109,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { } } - return FuncType::get(getMLIRContext(), ArgTypes, resultType, FI.isVariadic()); + return FuncType::get(ArgTypes, resultType, FI.isVariadic()); } /// Convert a CIR type to its ABI-specific default form. diff --git a/clang/test/CIR/IR/being_and_nothingness.cir b/clang/test/CIR/IR/being_and_nothingness.cir new file mode 100644 index 000000000000..311acb4893dc --- /dev/null +++ b/clang/test/CIR/IR/being_and_nothingness.cir @@ -0,0 +1,35 @@ +// RUN: cir-opt %s | FileCheck %s +// Exercise different ways to encode a function returning void +!s32i = !cir.int +!fnptr1 = !cir.ptr> +// Note there is no !void alias defined +!fnptr2 = !cir.ptr> +!fnptr3 = !cir.ptr> +module { + cir.func @ind1(%fnptr: !fnptr1, %a : !s32i) { + // CHECK: cir.func @ind1(%arg0: !cir.ptr>, %arg1: !s32i) { + cir.return + } + + cir.func @ind2(%fnptr: !fnptr2, %a : !s32i) { + // CHECK: cir.func @ind2(%arg0: !cir.ptr>, %arg1: !s32i) { + cir.return + } + cir.func @ind3(%fnptr: !fnptr3, %a : !s32i) { + // CHECK: cir.func @ind3(%arg0: !cir.ptr>, %arg1: !s32i) { + cir.return + } + cir.func @f1() -> !cir.void { + // CHECK: cir.func @f1() { + cir.return + } + // Note there is no !void alias defined + cir.func @f2() -> !void { + // CHECK: cir.func @f2() { + cir.return + } + cir.func @f3() { + // CHECK: cir.func @f3() { + cir.return + } +} diff --git a/mlir/include/mlir/IR/OpImplementation.h b/mlir/include/mlir/IR/OpImplementation.h index 6c1ff4d0e5e6..d61ca01730b1 100644 --- a/mlir/include/mlir/IR/OpImplementation.h +++ b/mlir/include/mlir/IR/OpImplementation.h @@ -922,6 +922,9 @@ class AsmParser { /// Parse an optional keyword or string. virtual ParseResult parseOptionalKeywordOrString(std::string *result) = 0; + /// Parse the given exclamation-prefixed keyword if present. + virtual ParseResult parseOptionalExclamationKeyword(StringRef keyword) = 0; + //===--------------------------------------------------------------------===// // Attribute/Type Parsing //===--------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Interfaces/FunctionImplementation.h b/mlir/include/mlir/Interfaces/FunctionImplementation.h index a5e6963e4e66..110025bc89f5 100644 --- a/mlir/include/mlir/Interfaces/FunctionImplementation.h +++ b/mlir/include/mlir/Interfaces/FunctionImplementation.h @@ -64,6 +64,28 @@ parseFunctionSignature(OpAsmParser &parser, bool allowVariadic, bool &isVariadic, SmallVectorImpl &resultTypes, SmallVectorImpl &resultAttrs); +/// Parse a function argument list using `parser`. The `allowVariadic` argument +/// indicates whether functions with variadic arguments are supported. The +/// trailing arguments are populated by this function with names, types, +/// attributes and locations of the arguments. +ParseResult +parseFunctionArgumentList(OpAsmParser &parser, bool allowVariadic, + SmallVectorImpl &arguments, + bool &isVariadic); + +/// Parse a function result list using `parser`. +/// +/// function-result-list ::= function-result-list-parens +/// | non-function-type +/// function-result-list-parens ::= `(` `)` +/// | `(` function-result-list-no-parens `)` +/// function-result-list-no-parens ::= function-result (`,` function-result)* +/// function-result ::= type attribute-dict? +/// +ParseResult +parseFunctionResultList(OpAsmParser &parser, SmallVectorImpl &resultTypes, + SmallVectorImpl &resultAttrs); + /// Parser implementation for function-like operations. Uses /// `funcTypeBuilder` to construct the custom function type given lists of /// input and output types. The parser sets the `typeAttrName` attribute to the diff --git a/mlir/lib/AsmParser/AsmParserImpl.h b/mlir/lib/AsmParser/AsmParserImpl.h index d5b72d63813a..8c7ce16fe54d 100644 --- a/mlir/lib/AsmParser/AsmParserImpl.h +++ b/mlir/lib/AsmParser/AsmParserImpl.h @@ -396,6 +396,19 @@ class AsmParserImpl : public BaseT { return parseOptionalString(result); } + /// Parse the given exclamation-prefixed keyword if present. + ParseResult parseOptionalExclamationKeyword(StringRef keyword) override { + if (parser.getToken().isCodeCompletion()) + return parser.codeCompleteOptionalTokens(keyword); + + // Check that the current token has the same spelling. + if (!parser.getToken().is(Token::Kind::exclamation_identifier) || + parser.getTokenSpelling() != keyword) + return failure(); + parser.consumeToken(); + return success(); + } + //===--------------------------------------------------------------------===// // Attribute Parsing //===--------------------------------------------------------------------===// diff --git a/mlir/lib/Interfaces/FunctionImplementation.cpp b/mlir/lib/Interfaces/FunctionImplementation.cpp index 988feee665fe..9922e3c28eab 100644 --- a/mlir/lib/Interfaces/FunctionImplementation.cpp +++ b/mlir/lib/Interfaces/FunctionImplementation.cpp @@ -13,10 +13,9 @@ using namespace mlir; -static ParseResult -parseFunctionArgumentList(OpAsmParser &parser, bool allowVariadic, - SmallVectorImpl &arguments, - bool &isVariadic) { +ParseResult function_interface_impl::parseFunctionArgumentList( + OpAsmParser &parser, bool allowVariadic, + SmallVectorImpl &arguments, bool &isVariadic) { // Parse the function arguments. The argument list either has to consistently // have ssa-id's followed by types, or just be a type list. It isn't ok to @@ -79,9 +78,9 @@ parseFunctionArgumentList(OpAsmParser &parser, bool allowVariadic, /// function-result-list-no-parens ::= function-result (`,` function-result)* /// function-result ::= type attribute-dict? /// -static ParseResult -parseFunctionResultList(OpAsmParser &parser, SmallVectorImpl &resultTypes, - SmallVectorImpl &resultAttrs) { +ParseResult function_interface_impl::parseFunctionResultList( + OpAsmParser &parser, SmallVectorImpl &resultTypes, + SmallVectorImpl &resultAttrs) { if (failed(parser.parseOptionalLParen())) { // We already know that there is no `(`, so parse a type. // Because there is no `(`, it cannot be a function type. From ff9ae83f19a1a504296cfb76656f0eb53d0a093f Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 12 Dec 2024 14:01:17 -0500 Subject: [PATCH 2165/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vaddvq_u32 and neon_vaddvq_u64 (#1225) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 9 ++++---- clang/test/CIR/CodeGen/AArch64/neon-arith.c | 22 +++++++++++++++++++ 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 8400d1749bb9..a77d23bc768f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2654,6 +2654,7 @@ static mlir::Value emitCommonNeonSISDBuiltinExpr( unsigned builtinID = info.BuiltinID; clang::CIRGen::CIRGenBuilderTy &builder = cgf.getBuilder(); mlir::Type resultTy = cgf.convertType(expr->getType()); + mlir::Type argTy = cgf.convertType(expr->getArg(0)->getType()); mlir::Location loc = cgf.getLoc(expr->getExprLoc()); switch (builtinID) { @@ -2672,11 +2673,9 @@ static mlir::Value emitCommonNeonSISDBuiltinExpr( llvm_unreachable(" neon_vaddlv_u32 NYI "); case NEON::BI__builtin_neon_vaddlvq_s32: llvm_unreachable(" neon_vaddlvq_s32 NYI "); - case NEON::BI__builtin_neon_vaddlvq_u32: { - mlir::Type argTy = cgf.convertType(expr->getArg(0)->getType()); + case NEON::BI__builtin_neon_vaddlvq_u32: return emitNeonCall(builder, {argTy}, ops, "aarch64.neon.uaddlv", resultTy, loc); - } case NEON::BI__builtin_neon_vaddv_f32: llvm_unreachable(" neon_vaddv_f32 NYI "); case NEON::BI__builtin_neon_vaddv_s32: @@ -2692,9 +2691,9 @@ static mlir::Value emitCommonNeonSISDBuiltinExpr( case NEON::BI__builtin_neon_vaddvq_s64: llvm_unreachable(" neon_vaddvq_s64 NYI "); case NEON::BI__builtin_neon_vaddvq_u32: - llvm_unreachable(" neon_vaddvq_u32 NYI "); case NEON::BI__builtin_neon_vaddvq_u64: - llvm_unreachable(" neon_vaddvq_u64 NYI "); + return emitNeonCall(builder, {argTy}, ops, "aarch64.neon.uaddv", resultTy, + loc); case NEON::BI__builtin_neon_vcaged_f64: llvm_unreachable(" neon_vcaged_f64 NYI "); case NEON::BI__builtin_neon_vcages_f32: diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index f44a4bb9e465..1ad9bb369c5d 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -919,3 +919,25 @@ int16_t test_vaddv_s16(int16x4_t a) { // LLVM-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16 // LLVM-NEXT: ret i16 [[TMP0]] } + +uint32_t test_vaddvq_u32(uint32x4_t a) { + return vaddvq_u32(a); + + // CIR-LABEL: vaddvq_u32 + // CIR: cir.llvm.intrinsic "aarch64.neon.uaddv" {{%.*}} : (!cir.vector) -> !u32i + + // LLVM-LABEL: test_vaddvq_u32 + // LLVM: [[VADDVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> {{%.*}}) + // LLVM: ret i32 [[VADDVQ_U32_I]] +} + +uint64_t test_vaddvq_u64(uint64x2_t a) { + return vaddvq_u64(a); + + // CIR-LABEL: vaddvq_u64 + // CIR: cir.llvm.intrinsic "aarch64.neon.uaddv" {{%.*}} : (!cir.vector) -> !u64i + + // LLVM-LABEL: test_vaddvq_u64 + // LLVM: [[VADDVQ_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> {{%.*}}) + // LLVM: ret i64 [[VADDVQ_U64_I]] +} From f9dee671d97b6184261358ab8815603327a2c229 Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 12 Dec 2024 14:02:14 -0500 Subject: [PATCH 2166/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vrsraq_n_v and neon_vrsra_n_v (#1230) References: [OG's implementation](https://github.com/llvm/clangir/blob/2b1a638ea07ca10c5727ea835bfbe17b881175cc/clang/lib/CodeGen/CGBuiltin.cpp#L13509) [Builtin's definition ](https://developer.arm.com/architectures/instruction-sets/intrinsics/#f:@navigationhierarchiessimdisa=[Neon]&q=vrsra_n_) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 13 +- clang/test/CIR/CodeGen/AArch64/neon.c | 466 +++++++++++------- 2 files changed, 305 insertions(+), 174 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index a77d23bc768f..8ae9bd5c7c99 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -4361,7 +4361,18 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NEON::BI__builtin_neon_vsraq_n_v NYI"); case NEON::BI__builtin_neon_vrsra_n_v: case NEON::BI__builtin_neon_vrsraq_n_v: { - llvm_unreachable("NEON::BI__builtin_neon_vrsraq_n_v NYI"); + llvm::SmallVector tmpOps = {Ops[1], Ops[2]}; + // The llvm intrinsic is expecting negative shift amount for right shift. + // Thus we have to make shift amount vec type to be signed. + cir::VectorType shitAmtVecTy = + usgn ? getSignChangedVectorType(builder, vTy) : vTy; + mlir::Value tmp = + emitNeonCall(builder, {vTy, shitAmtVecTy}, tmpOps, + usgn ? "aarch64.neon.urshl" : "aarch64.neon.srshl", vTy, + getLoc(E->getExprLoc()), false, + 1 /* shift amount is args[1]*/, true /* right shift */); + Ops[0] = builder.createBitcast(Ops[0], vTy); + return builder.createBinop(Ops[0], cir::BinOpKind::Add, tmp); } case NEON::BI__builtin_neon_vld1_v: case NEON::BI__builtin_neon_vld1q_v: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 487778d3778f..06cc61a7c91e 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -6105,157 +6105,261 @@ uint64x2_t test_vrshrq_n_u64(uint64x2_t a) { // LLVM: ret <2 x i64> [[VRSHR_N1]] } -// NYI-LABEL: @test_vrsra_n_s8( -// NYI: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %b, <8 x i8> ) -// NYI: [[TMP0:%.*]] = add <8 x i8> %a, [[VRSHR_N]] -// NYI: ret <8 x i8> [[TMP0]] -// int8x8_t test_vrsra_n_s8(int8x8_t a, int8x8_t b) { -// return vrsra_n_s8(a, b, 3); -// } - -// NYI-LABEL: @test_vrsra_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> -// NYI: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> ) -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[TMP3:%.*]] = add <4 x i16> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <4 x i16> [[TMP3]] -// int16x4_t test_vrsra_n_s16(int16x4_t a, int16x4_t b) { -// return vrsra_n_s16(a, b, 3); -// } - -// NYI-LABEL: @test_vrsra_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> -// NYI: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> ) -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <2 x i32> [[TMP3]] -// int32x2_t test_vrsra_n_s32(int32x2_t a, int32x2_t b) { -// return vrsra_n_s32(a, b, 3); -// } - -// NYI-LABEL: @test_vrsraq_n_s8( -// NYI: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %b, <16 x i8> ) -// NYI: [[TMP0:%.*]] = add <16 x i8> %a, [[VRSHR_N]] -// NYI: ret <16 x i8> [[TMP0]] -// int8x16_t test_vrsraq_n_s8(int8x16_t a, int8x16_t b) { -// return vrsraq_n_s8(a, b, 3); -// } - -// NYI-LABEL: @test_vrsraq_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> -// NYI: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> ) -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[TMP3:%.*]] = add <8 x i16> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <8 x i16> [[TMP3]] -// int16x8_t test_vrsraq_n_s16(int16x8_t a, int16x8_t b) { -// return vrsraq_n_s16(a, b, 3); -// } - -// NYI-LABEL: @test_vrsraq_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> -// NYI: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> ) -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[TMP3:%.*]] = add <4 x i32> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <4 x i32> [[TMP3]] -// int32x4_t test_vrsraq_n_s32(int32x4_t a, int32x4_t b) { -// return vrsraq_n_s32(a, b, 3); -// } - -// NYI-LABEL: @test_vrsraq_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> -// NYI: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> ) -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[TMP3:%.*]] = add <2 x i64> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <2 x i64> [[TMP3]] -// int64x2_t test_vrsraq_n_s64(int64x2_t a, int64x2_t b) { -// return vrsraq_n_s64(a, b, 3); -// } - -// NYI-LABEL: @test_vrsra_n_u8( -// NYI: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %b, <8 x i8> ) -// NYI: [[TMP0:%.*]] = add <8 x i8> %a, [[VRSHR_N]] -// NYI: ret <8 x i8> [[TMP0]] -// uint8x8_t test_vrsra_n_u8(uint8x8_t a, uint8x8_t b) { -// return vrsra_n_u8(a, b, 3); -// } - -// NYI-LABEL: @test_vrsra_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> -// NYI: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> ) -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[TMP3:%.*]] = add <4 x i16> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <4 x i16> [[TMP3]] -// uint16x4_t test_vrsra_n_u16(uint16x4_t a, uint16x4_t b) { -// return vrsra_n_u16(a, b, 3); -// } - -// NYI-LABEL: @test_vrsra_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> -// NYI: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> ) -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <2 x i32> [[TMP3]] -// uint32x2_t test_vrsra_n_u32(uint32x2_t a, uint32x2_t b) { -// return vrsra_n_u32(a, b, 3); -// } - -// NYI-LABEL: @test_vrsraq_n_u8( -// NYI: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %b, <16 x i8> ) -// NYI: [[TMP0:%.*]] = add <16 x i8> %a, [[VRSHR_N]] -// NYI: ret <16 x i8> [[TMP0]] -// uint8x16_t test_vrsraq_n_u8(uint8x16_t a, uint8x16_t b) { -// return vrsraq_n_u8(a, b, 3); -// } - -// NYI-LABEL: @test_vrsraq_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> -// NYI: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> ) -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[TMP3:%.*]] = add <8 x i16> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <8 x i16> [[TMP3]] -// uint16x8_t test_vrsraq_n_u16(uint16x8_t a, uint16x8_t b) { -// return vrsraq_n_u16(a, b, 3); -// } - -// NYI-LABEL: @test_vrsraq_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> -// NYI: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> ) -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[TMP3:%.*]] = add <4 x i32> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <4 x i32> [[TMP3]] -// uint32x4_t test_vrsraq_n_u32(uint32x4_t a, uint32x4_t b) { -// return vrsraq_n_u32(a, b, 3); -// } - -// NYI-LABEL: @test_vrsraq_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> -// NYI: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> ) -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[TMP3:%.*]] = add <2 x i64> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <2 x i64> [[TMP3]] -// uint64x2_t test_vrsraq_n_u64(uint64x2_t a, uint64x2_t b) { -// return vrsraq_n_u64(a, b, 3); -// } +int8x8_t test_vrsra_n_s8(int8x8_t a, int8x8_t b) { + return vrsra_n_s8(a, b, 3); + + // CIR-LABEL: vrsra_n_s8 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N:%.*]] = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VRSHR_N]]) : !cir.vector + + // LLVM-LABEL: test_vrsra_n_s8 + // LLVM: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %1, <8 x i8> splat (i8 -3)) + // LLVM: [[TMP0:%.*]] = add <8 x i8> %0, [[VRSHR_N]] + // LLVM: ret <8 x i8> [[TMP0]] +} + +int16x4_t test_vrsra_n_s16(int16x4_t a, int16x4_t b) { + return vrsra_n_s16(a, b, 3); + + // CIR-LABEL: vrsra_n_s16 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.srshl" [[VRSHR_N]], [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsra_n_s16 + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> %1 to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> + // LLVM: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> splat (i16 -3)) + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[TMP3:%.*]] = add <4 x i16> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <4 x i16> [[TMP3]] +} + +int32x2_t test_vrsra_n_s32(int32x2_t a, int32x2_t b) { + return vrsra_n_s32(a, b, 3); + + // CIR-LABEL: vrsra_n_s32 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.srshl" [[VRSHR_N]], [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsra_n_s32 + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> %1 to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> + // LLVM: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> splat (i32 -3)) + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <2 x i32> [[TMP3]] +} + +int8x16_t test_vrsraq_n_s8(int8x16_t a, int8x16_t b) { + return vrsraq_n_s8(a, b, 3); + + // CIR-LABEL: vrsraq_n_s8 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N:%.*]] = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VRSHR_N]]) : !cir.vector + + // LLVM-LABEL: test_vrsraq_n_s8 + // LLVM: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %1, <16 x i8> splat (i8 -3)) + // LLVM: [[TMP0:%.*]] = add <16 x i8> %0, [[VRSHR_N]] + // LLVM: ret <16 x i8> [[TMP0]] +} + +int16x8_t test_vrsraq_n_s16(int16x8_t a, int16x8_t b) { + return vrsraq_n_s16(a, b, 3); + + // CIR-LABEL: vrsraq_n_s16 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.srshl" [[VRSHR_N]], [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsraq_n_s16 + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> %1 to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> + // LLVM: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> splat (i16 -3)) + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[TMP3:%.*]] = add <8 x i16> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <8 x i16> [[TMP3]] +} + +int32x4_t test_vrsraq_n_s32(int32x4_t a, int32x4_t b) { + return vrsraq_n_s32(a, b, 3); + + // CIR-LABEL: vrsraq_n_s32 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.srshl" [[VRSHR_N]], [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsraq_n_s32 + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> %1 to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> + // LLVM: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> splat (i32 -3)) + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[TMP3:%.*]] = add <4 x i32> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <4 x i32> [[TMP3]] +} + +int64x2_t test_vrsraq_n_s64(int64x2_t a, int64x2_t b) { + return vrsraq_n_s64(a, b, 3); + + // CIR-LABEL: vrsraq_n_s64 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.srshl" [[VRSHR_N]], [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsraq_n_s64 + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> %1 to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> + // LLVM: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> splat (i64 -3)) + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[TMP3:%.*]] = add <2 x i64> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <2 x i64> [[TMP3]] +} + +uint8x8_t test_vrsra_n_u8(uint8x8_t a, uint8x8_t b) { + return vrsra_n_u8(a, b, 3); + + // CIR-LABEL: vrsra_n_u8 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N:%.*]] = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VRSHR_N]]) : !cir.vector + + // LLVM-LABEL: test_vrsra_n_u8 + // LLVM: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %1, <8 x i8> splat (i8 -3)) + // LLVM: [[TMP0:%.*]] = add <8 x i8> %0, [[VRSHR_N]] + // LLVM: ret <8 x i8> [[TMP0]] +} + +uint16x4_t test_vrsra_n_u16(uint16x4_t a, uint16x4_t b) { + return vrsra_n_u16(a, b, 3); + + // CIR-LABEL: vrsra_n_u16 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.urshl" [[VRSHR_N]], [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsra_n_u16 + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> %1 to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> + // LLVM: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> splat (i16 -3)) + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[TMP3:%.*]] = add <4 x i16> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <4 x i16> [[TMP3]] +} + +uint32x2_t test_vrsra_n_u32(uint32x2_t a, uint32x2_t b) { + return vrsra_n_u32(a, b, 3); + + // CIR-LABEL: vrsra_n_u32 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.urshl" [[VRSHR_N]], [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsra_n_u32 + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> %1 to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> + // LLVM: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> splat (i32 -3)) + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <2 x i32> [[TMP3]] +} + +uint8x16_t test_vrsraq_n_u8(uint8x16_t a, uint8x16_t b) { + return vrsraq_n_u8(a, b, 3); + + // CIR-LABEL: vrsraq_n_u8 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N:%.*]] = cir.llvm.intrinsic "aarch64.neon.urshl" {{%.*}}, [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VRSHR_N]]) : !cir.vector + + // LLVM-LABEL: test_vrsraq_n_u8 + // LLVM: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %1, <16 x i8> splat (i8 -3)) + // LLVM: [[TMP0:%.*]] = add <16 x i8> %0, [[VRSHR_N]] + // LLVM: ret <16 x i8> [[TMP0]] +} + +uint16x8_t test_vrsraq_n_u16(uint16x8_t a, uint16x8_t b) { + return vrsraq_n_u16(a, b, 3); + + // CIR-LABEL: vrsraq_n_u16 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.urshl" [[VRSHR_N]], [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsraq_n_u16 + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> %1 to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> + // LLVM: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> [[VRSHR_N]], <8 x i16> splat (i16 -3)) + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[TMP3:%.*]] = add <8 x i16> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <8 x i16> [[TMP3]] +} + +uint32x4_t test_vrsraq_n_u32(uint32x4_t a, uint32x4_t b) { + return vrsraq_n_u32(a, b, 3); + + // CIR-LABEL: vrsraq_n_u32 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.urshl" [[VRSHR_N]], [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsraq_n_u32 + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> %1 to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> + // LLVM: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> [[VRSHR_N]], <4 x i32> splat (i32 -3)) + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[TMP3:%.*]] = add <4 x i32> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <4 x i32> [[TMP3]] +} + +uint64x2_t test_vrsraq_n_u64(uint64x2_t a, uint64x2_t b) { + return vrsraq_n_u64(a, b, 3); + + // CIR-LABEL: vrsraq_n_u64 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.urshl" [[VRSHR_N]], [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsraq_n_u64 + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> %1 to <16 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> + // LLVM: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> [[VRSHR_N]], <2 x i64> splat (i64 -3)) + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[TMP3:%.*]] = add <2 x i64> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <2 x i64> [[TMP3]] +} // NYI-LABEL: @test_vsri_n_s8( // NYI: [[VSRI_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.vsri.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3) @@ -14985,17 +15089,25 @@ int16_t test_vqmovns_s32(int32_t a) { // return (int64_t)vrsrad_n_s64(a, b, 63); // } -// NYI-LABEL: @test_vrsra_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> -// NYI: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> ) -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> -// NYI: [[TMP3:%.*]] = add <1 x i64> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <1 x i64> [[TMP3]] -// int64x1_t test_vrsra_n_s64(int64x1_t a, int64x1_t b) { -// return vrsra_n_s64(a, b, 1); -// } +int64x1_t test_vrsra_n_s64(int64x1_t a, int64x1_t b) { + return vrsra_n_s64(a, b, 1); + + // CIR-LABEL: vrsra_n_s64 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.srshl" {{%.*}}, [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsra_n_s64 + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> %1 to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> + // LLVM: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> splat (i64 -1)) + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[TMP3:%.*]] = add <1 x i64> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <1 x i64> [[TMP3]] +} // NYI-LABEL: @test_vrsrad_n_u64( // NYI: [[TMP0:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 %b, i64 -63) @@ -15005,17 +15117,25 @@ int16_t test_vqmovns_s32(int32_t a) { // return (uint64_t)vrsrad_n_u64(a, b, 63); // } -// NYI-LABEL: @test_vrsra_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> -// NYI: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> -// NYI: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> ) -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> -// NYI: [[TMP3:%.*]] = add <1 x i64> [[TMP2]], [[VRSHR_N1]] -// NYI: ret <1 x i64> [[TMP3]] -// uint64x1_t test_vrsra_n_u64(uint64x1_t a, uint64x1_t b) { -// return vrsra_n_u64(a, b, 1); -// } +uint64x1_t test_vrsra_n_u64(uint64x1_t a, uint64x1_t b) { + return vrsra_n_u64(a, b, 1); + + // CIR-LABEL: vrsra_n_u64 + // CIR: [[VRSHR_N:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VRSHR_N1:%.*]] = cir.llvm.intrinsic "aarch64.neon.urshl" [[VRSHR_N]], [[splat]] : (!cir.vector, !cir.vector) -> !cir.vector + // CIR: [[TMP2:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.vector), !cir.vector + // CIR: cir.binop(add, [[TMP2]], [[VRSHR_N1]]) : !cir.vector + + // LLVM-LABEL: test_vrsra_n_u64 + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> %1 to <8 x i8> + // LLVM: [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> + // LLVM: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> splat (i64 -1)) + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[TMP3:%.*]] = add <1 x i64> [[TMP2]], [[VRSHR_N1]] + // LLVM: ret <1 x i64> [[TMP3]] +} // NYI-LABEL: @test_vshld_n_s64( // NYI: [[SHLD_N:%.*]] = shl i64 %a, 1 From 28bdb64371082c11c1a52c4c45fa00bfd26b71bb Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 12 Dec 2024 14:02:54 -0500 Subject: [PATCH 2167/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vaddlvq_s16 (#1231) Combined implementaiton with `neon_vaddlvq_u16` OG somehow implemented them separately but they are no different except signess and intrinsic name [OG's neon_vaddlvq_s16](https://github.com/llvm/clangir/blob/2b1a638ea07ca10c5727ea835bfbe17b881175cc/clang/lib/CodeGen/CGBuiltin.cpp#L13483) [OG's neon_vaddlvq_u16](https://github.com/llvm/clangir/blob/2b1a638ea07ca10c5727ea835bfbe17b881175cc/clang/lib/CodeGen/CGBuiltin.cpp#L13449) --- clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 16 +++++++++------- clang/test/CIR/CodeGen/AArch64/neon-arith.c | 11 +++++++++++ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 8ae9bd5c7c99..7982a2350607 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -4330,11 +4330,16 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vaddlvq_u8: { llvm_unreachable("NEON::BI__builtin_neon_vaddlvq_u8 NYI"); } - case NEON::BI__builtin_neon_vaddlvq_u16: { - mlir::Type argTy = cir::VectorType::get(builder.getContext(), UInt16Ty, 8); + case NEON::BI__builtin_neon_vaddlvq_u16: + usgn = true; + [[fallthrough]]; + case NEON::BI__builtin_neon_vaddlvq_s16: { + mlir::Type argTy = cir::VectorType::get(builder.getContext(), + usgn ? UInt16Ty : SInt16Ty, 8); llvm::SmallVector argOps = {emitScalarExpr(E->getArg(0))}; - return emitNeonCall(builder, {argTy}, argOps, "aarch64.neon.uaddlv", - UInt32Ty, getLoc(E->getExprLoc())); + return emitNeonCall(builder, {argTy}, argOps, + usgn ? "aarch64.neon.uaddlv" : "aarch64.neon.saddlv", + usgn ? UInt32Ty : SInt32Ty, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vaddlv_s8: { llvm_unreachable("NEON::BI__builtin_neon_vaddlv_s8 NYI"); @@ -4345,9 +4350,6 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vaddlvq_s8: { llvm_unreachable("NEON::BI__builtin_neon_vaddlvq_s8 NYI"); } - case NEON::BI__builtin_neon_vaddlvq_s16: { - llvm_unreachable("NEON::BI__builtin_neon_vaddlvq_s16 NYI"); - } case NEON::BI__builtin_neon_vsri_n_v: case NEON::BI__builtin_neon_vsriq_n_v: { llvm_unreachable("NEON::BI__builtin_neon_vsriq_n_v NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index 1ad9bb369c5d..b04b9ecb06d1 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -894,6 +894,17 @@ uint32_t test_vaddlvq_u16(uint16x8_t a) { // LLVM: ret i32 [[VADDLV_I]] } +int32_t test_vaddlvq_s16(int16x8_t a) { + return vaddlvq_s16(a); + + // CIR-LABEL: vaddlvq_s16 + // CIR: cir.llvm.intrinsic "aarch64.neon.saddlv" {{%.*}}: (!cir.vector) -> !s32i + + // LLVM: {{.*}}test_vaddlvq_s16(<8 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> [[A]]) + // LLVM: ret i32 [[VADDLV_I]] +} + uint16_t test_vaddv_u16(uint16x4_t a) { return vaddv_u16(a); From 8990725416bbf6867610063f44927b89d93e8e3f Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Thu, 12 Dec 2024 12:20:08 -0800 Subject: [PATCH 2168/2301] [CIR][CIRGen] Add uwtable attribute (#1226) The module-level uwtable attribute controls the unwind tables for any synthesized functions, and the function-level attribute controls them for those functions. I'll add support for this attribute to the LLVM dialect as well, but translate it from CIR directly for now to avoid waiting on the MLIR addition and a subsequent rebase. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 30 ++++++++++ .../clang/CIR/Dialect/IR/CIRDialect.td | 1 + clang/include/clang/CIR/MissingFeatures.h | 1 - clang/lib/CIR/CodeGen/CIRGenModule.cpp | 11 +++- .../Lowering/DirectToLLVM/LowerToLLVMIR.cpp | 22 ++++++++ clang/test/CIR/CodeGen/uwtable.cpp | 56 +++++++++++++++++++ 6 files changed, 119 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/uwtable.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index b375b624ccc5..e54b52b96c91 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -1134,6 +1134,36 @@ def ConvergentAttr : CIRUnitAttr<"Convergent", "convergent"> { let storageType = [{ ConvergentAttr }]; } +def UWTableKindNone + : I32EnumAttrCase<"None", 0, "none">; +def UWTableKindSync + : I32EnumAttrCase<"Sync", 1, "sync">; +def UWTableKindAsync + : I32EnumAttrCase<"Async", 2, "async">; + +def UWTableKind : I32EnumAttr<"UWTableKind", "Unwind table kind", [ + UWTableKindNone, UWTableKindSync, UWTableKindAsync +]> { + let cppNamespace = "::cir"; + let genSpecializedAttr = 0; +} + +def UWTableAttr : EnumAttr { + let summary = "Unwind table kind attribute"; + let description = [{ + The kind of unwind tables to generate for a function. `none` means no unwind + tables are generated; `sync` means synchronous unwind tables (that are only + valid at call boundaries), and `async` means asynchronous unwind tables + (that are valid at all instructions). When applied to a module, this + controls the unwind table generation for any synthesized functions. + }]; + + let cppClassName = "UWTableAttr"; + let assemblyFormat = [{ + `<` $value `>` + }]; +} + class CIR_GlobalCtorDtor : CIR_Attr<"Global" # name, "global_" # attrMnemonic> { diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td index 5b3b4eedc682..46d2f1a13273 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -38,6 +38,7 @@ def CIR_Dialect : Dialect { static llvm::StringRef getLangAttrName() { return "cir.lang"; } static llvm::StringRef getTripleAttrName() { return "cir.triple"; } static llvm::StringRef getOptInfoAttrName() { return "cir.opt_info"; } + static llvm::StringRef getUWTableAttrName() { return "cir.uwtable"; } static llvm::StringRef getGlobalCtorsAttrName() { return "cir.global_ctors"; } static llvm::StringRef getGlobalDtorsAttrName() { return "cir.global_dtors"; } diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 5ab4473bb7ec..19aa040adc19 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -361,7 +361,6 @@ struct MissingFeatures { static bool codeModel() { return false; } static bool largeDataThreshold() { return false; } static bool directAccessExternalData() { return false; } - static bool setUwtable() { return false; } static bool setFramePointer() { return false; } static bool simplifyPersonality() { return false; } static bool emitVersionIdentMetadata() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 7a66b0730078..fb5a44e12abb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2500,6 +2500,12 @@ void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, FuncOp f) { mlir::NamedAttrList attrs{f.getExtraAttrs().getElements().getValue()}; + if ((!decl || !decl->hasAttr()) && codeGenOpts.UnwindTables) { + auto attr = cir::UWTableAttr::get( + &getMLIRContext(), cir::UWTableKind(codeGenOpts.UnwindTables)); + attrs.set(attr.getMnemonic(), attr); + } + if (!hasUnwindExceptions(getLangOpts())) { auto attr = cir::NoThrowAttr::get(&getMLIRContext()); attrs.set(attr.getMnemonic(), attr); @@ -3258,7 +3264,10 @@ void CIRGenModule::Release() { llvm_unreachable("NYI"); assert(!MissingFeatures::directAccessExternalData()); if (codeGenOpts.UnwindTables) - assert(!MissingFeatures::setUwtable()); + theModule->setAttr( + cir::CIRDialect::getUWTableAttrName(), + cir::UWTableAttr::get(&getMLIRContext(), + cir::UWTableKind(codeGenOpts.UnwindTables))); switch (codeGenOpts.getFramePointer()) { case CodeGenOptions::FramePointerKind::None: diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp index ca0b498f9f2f..2418b3bbfa11 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVMIR.cpp @@ -81,6 +81,10 @@ class CIRDialectLLVMIRTranslationInterface oclVerMD->addOperand(llvm::MDNode::get(llvmContext, oclVerElts)); } + if (auto uwTableAttr = + mlir::dyn_cast(attribute.getValue())) + llvmModule->setUwtable(convertUWTableKind(uwTableAttr.getValue())); + // Drop ammended CIR attribute from LLVM op. module->removeAttr(attribute.getName()); } @@ -129,6 +133,11 @@ class CIRDialectLLVMIRTranslationInterface attr.getValue())) { emitOpenCLKernelArgMetadata(clArgMetadata, func.getNumArguments(), llvmFunc, moduleTranslation); + } else if (auto uwTableAttr = + mlir::dyn_cast(attr.getValue())) { + llvm::AttrBuilder builder(llvmFunc->getContext()); + builder.addUWTableAttr(convertUWTableKind(uwTableAttr.getValue())); + llvmFunc->addFnAttrs(builder); } } } @@ -261,6 +270,19 @@ class CIRDialectLLVMIRTranslationInterface llvmFunc->setMetadata("kernel_arg_name", llvm::MDNode::get(vmCtx, argNames)); } + +private: + static llvm::UWTableKind convertUWTableKind(cir::UWTableKind kind) { + // TODO(cir): Use UWTableKindAttr from the LLVM dialect when available. + switch (kind) { + case cir::UWTableKind::None: + return llvm::UWTableKind::None; + case cir::UWTableKind::Sync: + return llvm::UWTableKind::Sync; + case cir::UWTableKind::Async: + return llvm::UWTableKind::Async; + } + } }; void registerCIRDialectTranslation(mlir::DialectRegistry ®istry) { diff --git a/clang/test/CIR/CodeGen/uwtable.cpp b/clang/test/CIR/CodeGen/uwtable.cpp new file mode 100644 index 000000000000..ff9d9873f9b6 --- /dev/null +++ b/clang/test/CIR/CodeGen/uwtable.cpp @@ -0,0 +1,56 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t-none.cir +// RUN: FileCheck %s --input-file=%t-none.cir --check-prefix=CIR-NONE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -funwind-tables=0 %s -o %t-none-explicit.cir +// RUN: FileCheck %s --input-file=%t-none-explicit.cir --check-prefix=CIR-NONE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -funwind-tables=1 %s -o %t-sync.cir +// RUN: FileCheck %s --input-file=%t-sync.cir --check-prefix=CIR-SYNC +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -funwind-tables=2 %s -o %t-async.cir +// RUN: FileCheck %s --input-file=%t-async.cir --check-prefix=CIR-ASYNC + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-none.ll +// RUN: FileCheck %s --input-file=%t-none.ll --check-prefix=LLVM-NONE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -funwind-tables=0 %s -o %t-none-explicit.ll +// RUN: FileCheck %s --input-file=%t-none-explicit.ll --check-prefix=LLVM-NONE +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -funwind-tables=1 %s -o %t-sync.ll +// RUN: FileCheck %s --input-file=%t-sync.ll --check-prefix=LLVM-SYNC +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -funwind-tables=2 %s -o %t-async.ll +// RUN: FileCheck %s --input-file=%t-async.ll --check-prefix=LLVM-ASYNC + +// CIR-NONE-NOT: #cir.uwtable + +// CIR-SYNC-DAG: module {{.*}} attributes {{{.*}}cir.uwtable = #cir.uwtable +// CIR-SYNC-DAG: cir.func @_Z1fv() extra(#[[f_attr:.*]]) +// CIR-SYNC-DAG: cir.func @_Z1gv() extra(#[[g_attr:.*]]) +// CIR-SYNC-DAG: #[[f_attr]] = #cir +// CIR-SYNC-DAG: #[[g_attr]] = +// CIR-SYNC-NOT: #cir.uwtable + +// CIR-ASYNC-DAG: module {{.*}} attributes {{{.*}}cir.uwtable = #cir.uwtable +// CIR-ASYNC-DAG: cir.func @_Z1fv() extra(#[[f_attr:.*]]) +// CIR-ASYNC-DAG: cir.func @_Z1gv() extra(#[[g_attr:.*]]) +// CIR-ASYNC-DAG: #[[f_attr]] = #cir +// CIR-ASYNC-DAG: #[[g_attr]] = +// CIR-ASYNC-NOT: #cir.uwtable + +// Avoid matching "uwtable" in the ModuleID and source_filename comments. +// LLVM-NONE: define {{.*}} @_Z1fv() +// LLVM-NONE-NOT: uwtable + +// LLVM-SYNC: define {{.*}} @_Z1fv() #[[#F_ATTRS:]] +// LLVM-SYNC: define {{.*}} @_Z1gv() #[[#G_ATTRS:]] +// LLVM-SYNC: attributes #[[#F_ATTRS]] = {{{.*}}uwtable(sync) +// LLVM-SYNC: attributes #[[#G_ATTRS]] = +// LLVM-SYNC-NOT: uwtable +// LLVM-SYNC-DAG: ![[#METADATA:]] = !{i32 7, !"uwtable", i32 1} +// LLVM-SYNC-DAG: !llvm.module.flags = !{{{.*}}[[#METADATA]] + +// LLVM-ASYNC: define {{.*}} @_Z1fv() #[[#ATTRS:]] +// LLVM-ASYNC: define {{.*}} @_Z1gv() #[[#G_ATTRS:]] +// LLVM-ASYNC: attributes #[[#ATTRS]] = {{{.*}}uwtable{{ }} +// LLVM-ASYNC: attributes #[[#G_ATTRS]] = +// LLVM-ASYNC-NOT: uwtable +// LLVM-ASYNC-DAG: ![[#METADATA:]] = !{i32 7, !"uwtable", i32 2} +// LLVM-ASYNC-DAG: !llvm.module.flags = !{{{.*}}[[#METADATA]] +void f() {} + +[[clang::nouwtable]] void g() {} From e06d300d0f07dae9b55af4cb0e5405d75176aac7 Mon Sep 17 00:00:00 2001 From: Shoaib Meenai Date: Thu, 12 Dec 2024 16:21:55 -0800 Subject: [PATCH 2169/2301] [CIR][CIRGen] Match skeleton for setCIRFunctionAttributesForDefinition (#1232) Match `CodeGenModule::SetLLVMFunctionAttributesForDefinition` so that we can see what's missing and have a good base to build upon. --- clang/include/clang/CIR/MissingFeatures.h | 7 + clang/lib/CIR/CodeGen/CIRGenModule.cpp | 172 ++++++++++++++++------ 2 files changed, 135 insertions(+), 44 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 19aa040adc19..c0707d687fca 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -371,6 +371,13 @@ struct MissingFeatures { static bool setVisibilityFromDLLStorageClass() { return false; } static bool mustTailCallUndefinedGlobals() { return false; } + //-- Missing parts of the setCIRFunctionAttributesForDefinition skeleton. + static bool stackProtector() { return false; } + static bool optimizeForSize() { return false; } + static bool minSize() { return false; } + static bool setFunctionAlignment() { return false; } + static bool memberFunctionPointerTypeMetadata() { return false; } + //-- Other missing features // We need to track the parent record types that represent a field diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index fb5a44e12abb..b7197afeb896 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2506,26 +2506,101 @@ void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, attrs.set(attr.getMnemonic(), attr); } + if (codeGenOpts.StackClashProtector) + llvm_unreachable("NYI"); + + if (codeGenOpts.StackProbeSize && codeGenOpts.StackProbeSize != 4096) + llvm_unreachable("NYI"); + if (!hasUnwindExceptions(getLangOpts())) { auto attr = cir::NoThrowAttr::get(&getMLIRContext()); attrs.set(attr.getMnemonic(), attr); } + assert(!MissingFeatures::stackProtector()); + + auto existingInlineAttr = dyn_cast_if_present( + attrs.get(cir::InlineAttr::getMnemonic())); + bool isNoInline = existingInlineAttr && existingInlineAttr.isNoInline(); + bool isAlwaysInline = + existingInlineAttr && existingInlineAttr.isAlwaysInline(); + if (!decl) { - // If we don't have a declaration to control inlining, the function isn't - // explicitly marked as alwaysinline for semantic reasons, and inlining is - // disabled, mark the function as noinline. - if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { + // Non-entry HLSL functions must always be inlined. + if (getLangOpts().HLSL && !isNoInline) { auto attr = cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::AlwaysInline); attrs.set(attr.getMnemonic(), attr); + } else if (!isAlwaysInline && codeGenOpts.getInlining() == + CodeGenOptions::OnlyAlwaysInlining) { + // If we don't have a declaration to control inlining, the function isn't + // explicitly marked as alwaysinline for semantic reasons, and inlining is + // disabled, mark the function as noinline. + auto attr = + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); + attrs.set(attr.getMnemonic(), attr); } - } else if (decl->hasAttr()) { + + f.setExtraAttrsAttr(cir::ExtraFuncAttributesAttr::get( + &getMLIRContext(), attrs.getDictionary(&getMLIRContext()))); + return; + } + + // Handle SME attributes that apply to function definitions, + // rather than to function prototypes. + if (decl->hasAttr()) + llvm_unreachable("NYI"); + + if (auto *attr = decl->getAttr()) { + if (attr->isNewZA()) + llvm_unreachable("NYI"); + if (attr->isNewZT0()) + llvm_unreachable("NYI"); + } + + // Track whether we need to add the optnone attribute, + // starting with the default for this optimization level. + bool shouldAddOptNone = + !codeGenOpts.DisableO0ImplyOptNone && codeGenOpts.OptimizationLevel == 0; + // We can't add optnone in the following cases, it won't pass the verifier. + shouldAddOptNone &= !decl->hasAttr(); + shouldAddOptNone &= !decl->hasAttr(); + + // Non-entry HLSL functions must always be inlined. + if (getLangOpts().HLSL && !isNoInline && !decl->hasAttr()) { + auto attr = + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::AlwaysInline); + attrs.set(attr.getMnemonic(), attr); + } else if ((shouldAddOptNone || decl->hasAttr()) && + !isAlwaysInline) { + // Add optnone, but do so only if the function isn't always_inline. + auto optNoneAttr = cir::OptNoneAttr::get(&getMLIRContext()); + attrs.set(optNoneAttr.getMnemonic(), optNoneAttr); + + // OptimizeNone implies noinline; we should not be inlining such functions. + auto noInlineAttr = + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); + attrs.set(noInlineAttr.getMnemonic(), noInlineAttr); + + // We still need to handle naked functions even though optnone subsumes + // much of their semantics. + if (decl->hasAttr()) + llvm_unreachable("NYI"); + + // OptimizeNone wins over OptimizeForSize and MinSize. + assert(!MissingFeatures::optimizeForSize()); + assert(!MissingFeatures::minSize()); + } else if (decl->hasAttr()) { + // Naked implies noinline: we should not be inlining such functions. + llvm_unreachable("NYI"); + } else if (decl->hasAttr()) { + llvm_unreachable("NYI"); + } else if (decl->hasAttr() && !isAlwaysInline) { // Add noinline if the function isn't always_inline. auto attr = cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); attrs.set(attr.getMnemonic(), attr); - } else if (decl->hasAttr()) { + } else if (decl->hasAttr() && !isNoInline) { // (noinline wins over always_inline, and we can't specify both in IR) auto attr = cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::AlwaysInline); @@ -2533,57 +2608,66 @@ void CIRGenModule::setCIRFunctionAttributesForDefinition(const Decl *decl, } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) { // If we're not inlining, then force everything that isn't always_inline // to carry an explicit noinline attribute. - auto attr = - cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); - attrs.set(attr.getMnemonic(), attr); + if (!isAlwaysInline) { + auto attr = + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); + attrs.set(attr.getMnemonic(), attr); + } } else { // Otherwise, propagate the inline hint attribute and potentially use its // absence to mark things as noinline. // Search function and template pattern redeclarations for inline. - auto CheckForInline = [](const FunctionDecl *decl) { - auto CheckRedeclForInline = [](const FunctionDecl *Redecl) { - return Redecl->isInlineSpecified(); + if (auto *fd = dyn_cast(decl)) { + auto checkForInline = [](const FunctionDecl *decl) { + auto checkRedeclForInline = [](const FunctionDecl *redecl) { + return redecl->isInlineSpecified(); + }; + if (any_of(decl->redecls(), checkRedeclForInline)) + return true; + const FunctionDecl *pattern = decl->getTemplateInstantiationPattern(); + if (!pattern) + return false; + return any_of(pattern->redecls(), checkRedeclForInline); }; - if (any_of(decl->redecls(), CheckRedeclForInline)) - return true; - const FunctionDecl *Pattern = decl->getTemplateInstantiationPattern(); - if (!Pattern) - return false; - return any_of(Pattern->redecls(), CheckRedeclForInline); - }; - if (CheckForInline(cast(decl))) { - auto attr = - cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::InlineHint); - attrs.set(attr.getMnemonic(), attr); - } else if (codeGenOpts.getInlining() == CodeGenOptions::OnlyHintInlining) { - auto attr = - cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); - attrs.set(attr.getMnemonic(), attr); + if (checkForInline(fd)) { + auto attr = cir::InlineAttr::get(&getMLIRContext(), + cir::InlineKind::InlineHint); + attrs.set(attr.getMnemonic(), attr); + } else if (codeGenOpts.getInlining() == + CodeGenOptions::OnlyHintInlining && + !fd->isInlined() && !isAlwaysInline) { + auto attr = + cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); + attrs.set(attr.getMnemonic(), attr); + } } } - // Track whether we need to add the optnone attribute, - // starting with the default for this optimization level. - bool ShouldAddOptNone = - !codeGenOpts.DisableO0ImplyOptNone && codeGenOpts.OptimizationLevel == 0; - if (decl) { - ShouldAddOptNone &= !decl->hasAttr(); - ShouldAddOptNone &= !decl->hasAttr(); - ShouldAddOptNone |= decl->hasAttr(); + // Add other optimization related attributes if we are optimizing this + // function. + if (!decl->hasAttr()) { + if (decl->hasAttr()) { + llvm_unreachable("NYI"); + } + if (decl->hasAttr()) + llvm_unreachable("NYI"); + if (decl->hasAttr()) + assert(!MissingFeatures::minSize()); } - if (ShouldAddOptNone) { - auto optNoneAttr = cir::OptNoneAttr::get(&getMLIRContext()); - attrs.set(optNoneAttr.getMnemonic(), optNoneAttr); + f.setExtraAttrsAttr(cir::ExtraFuncAttributesAttr::get( + &getMLIRContext(), attrs.getDictionary(&getMLIRContext()))); - // OptimizeNone implies noinline; we should not be inlining such functions. - auto noInlineAttr = - cir::InlineAttr::get(&getMLIRContext(), cir::InlineKind::NoInline); - attrs.set(noInlineAttr.getMnemonic(), noInlineAttr); + assert(!MissingFeatures::setFunctionAlignment()); + + // In the cross-dso CFI mode with canonical jump tables, we want !type + // attributes on definitions only. + if (codeGenOpts.SanitizeCfiCrossDso && + codeGenOpts.SanitizeCfiCanonicalJumpTables) { + llvm_unreachable("NYI"); } - f.setExtraAttrsAttr(cir::ExtraFuncAttributesAttr::get( - &getMLIRContext(), attrs.getDictionary(&getMLIRContext()))); + assert(!MissingFeatures::memberFunctionPointerTypeMetadata()); } void CIRGenModule::setCIRFunctionAttributes(GlobalDecl GD, From 1f552dad1127e6c0e40804233debdd2be7d43ad9 Mon Sep 17 00:00:00 2001 From: Guojin Date: Fri, 13 Dec 2024 16:02:03 -0500 Subject: [PATCH 2170/2301] [CIR][ThroughMLIR][NFC] BinOpKind_Max NYI ThroughMLIR lowering (#1233) --- clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 8b07d0b31cd2..54c2c798a5a1 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -726,6 +726,9 @@ class CIRBinOpLowering : public mlir::OpConversionPattern { rewriter.replaceOpWithNewOp( op, mlirType, adaptor.getLhs(), adaptor.getRhs()); break; + case cir::BinOpKind::Max: + llvm_unreachable("BinOpKind::Max lowering through MLIR NYI"); + break; } return mlir::LogicalResult::success(); From 61f8f895f50229aa8f75a0ae6a36995ee1f54088 Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Wed, 18 Dec 2024 01:31:43 +0800 Subject: [PATCH 2171/2301] [CIR][CIRGen] Introduce cir.delete.array op (#1172) I am working on a clangir based solution to improve C++'s safety (https://discourse.llvm.org/t/rfc-a-clangir-based-safe-c/83245). This is similar with the previous analysis only approach I proposed, where we may not care about the lowered code. And this is what I described as layering problems in https://discourse.llvm.org/t/rfc-a-clangir-based-safe-c/83245 This is similar with the other issue proposed https://github.com/llvm/clangir/issues/1128. We'd better to emit the higher level operations and lowering/optimizing it later. This is also inspired our handling method for VAArg, where we use ABI information to lower things during the passes. It gives me more confidence that I am doing things right. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 15 ++++++++++++++ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 10 ++++++++-- .../Dialect/Transforms/LoweringPrepare.cpp | 20 +++++++++++++++++-- .../Transforms/LoweringPrepareCXXABI.h | 4 ++++ .../Transforms/LoweringPrepareItaniumCXXABI.h | 12 +++++++++++ .../TargetLowering/ItaniumCXXABI.cpp | 3 +++ clang/test/CIR/CodeGen/delete-array.cpp | 8 ++++++++ 7 files changed, 68 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/delete-array.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b86861999330..798a1718abe9 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3555,6 +3555,21 @@ def LLVMIntrinsicCallOp : CIR_Op<"llvm.intrinsic"> { } +//===----------------------------------------------------------------------===// +// DeleteArrayOp +//===----------------------------------------------------------------------===// + +def DeleteArrayOp : CIR_Op<"delete.array">, + Arguments<(ins CIR_PointerType:$address)> { + let summary = "Delete address representing an array"; + let description = [{ + `cir.delete.array` operation deletes an array. For example, `delete[] ptr;` + will be translated to `cir.delete.array %ptr`. + }]; + let assemblyFormat = "$address `:` type($address) attr-dict"; + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // CallOp and TryCallOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 43fb7c2d7724..50169aa849a4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -901,17 +901,23 @@ void CIRGenFunction::emitCXXDeleteExpr(const CXXDeleteExpr *E) { return; } + // In CodeGen: // We might be deleting a pointer to array. If so, GEP down to the // first non-array element. // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) + // In CIRGen: we handle this differently because the deallocation of + // array highly relates to the array cookies, which is ABI sensitive, + // we plan to handle it in LoweringPreparePass and the corresponding + // ABI part. if (DeleteTy->isConstantArrayType()) { - llvm_unreachable("NYI"); + Ptr = Ptr; } assert(convertTypeForMem(DeleteTy) == Ptr.getElementType()); if (E->isArrayForm()) { - llvm_unreachable("NYI"); + builder.create(Ptr.getPointer().getLoc(), + Ptr.getPointer()); } else { (void)EmitObjectDelete(*this, E, Ptr, DeleteTy); } diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index b0709e9638ff..20239d843d2d 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -77,6 +77,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { void lowerComplexBinOp(ComplexBinOp op); void lowerThreeWayCmpOp(CmpThreeWayOp op); void lowerVAArgOp(VAArgOp op); + void lowerDeleteArrayOp(DeleteArrayOp op); void lowerGlobalOp(GlobalOp op); void lowerDynamicCastOp(DynamicCastOp op); void lowerStdFindOp(StdFindOp op); @@ -157,6 +158,8 @@ struct LoweringPreparePass : public LoweringPrepareBase { /// Tracks current module. ModuleOp theModule; + std::optional datalayout; + /// Tracks existing dynamic initializers. llvm::StringMap dynamicInitializerNames; llvm::SmallVector dynamicInitializers; @@ -344,9 +347,8 @@ static void canonicalizeIntrinsicThreeWayCmp(CIRBaseBuilderTy &builder, void LoweringPreparePass::lowerVAArgOp(VAArgOp op) { CIRBaseBuilderTy builder(getContext()); builder.setInsertionPoint(op); - cir::CIRDataLayout datalayout(theModule); - auto res = cxxABI->lowerVAArg(builder, op, datalayout); + auto res = cxxABI->lowerVAArg(builder, op, *datalayout); if (res) { op.replaceAllUsesWith(res); op.erase(); @@ -354,6 +356,17 @@ void LoweringPreparePass::lowerVAArgOp(VAArgOp op) { return; } +void LoweringPreparePass::lowerDeleteArrayOp(DeleteArrayOp op) { + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPoint(op); + + cxxABI->lowerDeleteArray(builder, op, *datalayout); + // DeleteArrayOp won't have a result, so we don't need to replace + // the uses. + op.erase(); + return; +} + void LoweringPreparePass::lowerUnaryOp(UnaryOp op) { auto ty = op.getType(); if (!mlir::isa(ty)) @@ -1154,6 +1167,8 @@ void LoweringPreparePass::runOnOp(Operation *op) { lowerThreeWayCmpOp(threeWayCmp); } else if (auto vaArgOp = dyn_cast(op)) { lowerVAArgOp(vaArgOp); + } else if (auto deleteArrayOp = dyn_cast(op)) { + lowerDeleteArrayOp(deleteArrayOp); } else if (auto getGlobal = dyn_cast(op)) { lowerGlobalOp(getGlobal); } else if (auto dynamicCast = dyn_cast(op)) { @@ -1188,6 +1203,7 @@ void LoweringPreparePass::runOnOperation() { auto *op = getOperation(); if (isa<::mlir::ModuleOp>(op)) { theModule = cast<::mlir::ModuleOp>(op); + datalayout.emplace(theModule); } llvm::SmallVector opsToTransform; diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h index f3ae48c13574..9824797a31ef 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareCXXABI.h @@ -32,6 +32,10 @@ class LoweringPrepareCXXABI { virtual mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) = 0; + + virtual mlir::Value + lowerDeleteArray(cir::CIRBaseBuilderTy &builder, cir::DeleteArrayOp op, + const cir::CIRDataLayout &datalayout) = 0; virtual ~LoweringPrepareCXXABI() {} virtual mlir::Value lowerDynamicCast(CIRBaseBuilderTy &builder, diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h index 58b2a5e3915d..4436eb2b851d 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepareItaniumCXXABI.h @@ -21,4 +21,16 @@ class LoweringPrepareItaniumCXXABI : public cir::LoweringPrepareCXXABI { cir::DynamicCastOp op) override; mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) override; + + mlir::Value lowerDeleteArray(cir::CIRBaseBuilderTy &builder, + cir::DeleteArrayOp op, + const cir::CIRDataLayout &datalayout) override { + // Note: look at `CIRGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *)` + // in CIRGenExprCXX.cpp. + // In traditional code gen, we need handle ABI related array cookie to + // generate codes to handle the expression to delete array. We need similar + // mechanism here for ItaniumCXXABI. + llvm_unreachable("NYI && Delete Array is not supported to be lowered in " + "Itanium CXX ABI"); + } }; diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index 0ba1e51232c2..f22eca2f15c6 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -210,4 +210,7 @@ class LoweringPrepareItaniumCXXABI : public cir::LoweringPrepareCXXABI { cir::DynamicCastOp op) override; mlir::Value lowerVAArg(cir::CIRBaseBuilderTy &builder, cir::VAArgOp op, const cir::CIRDataLayout &datalayout) override; + mlir::Value lowerDeleteArray(cir::CIRBaseBuilderTy &builder, + cir::DeleteArrayOp op, + const cir::CIRDataLayout &datalayout) override; }; diff --git a/clang/test/CIR/CodeGen/delete-array.cpp b/clang/test/CIR/CodeGen/delete-array.cpp new file mode 100644 index 000000000000..b76d0551f378 --- /dev/null +++ b/clang/test/CIR/CodeGen/delete-array.cpp @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +void test_delete_array(int *ptr) { + delete[] ptr; +} + +// CHECK: cir.delete.array From 50b5898c64e78b863190cf905e9b46016b43dcad Mon Sep 17 00:00:00 2001 From: Guojin Date: Tue, 17 Dec 2024 12:33:26 -0500 Subject: [PATCH 2172/2301] [CIR][CIRGen] Support Lambda capturing `this` object (#1213) The PR should help us to get rid of NYI `NYI UNREACHABLE executed at clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp:899` [Relevant OG code here](https://github.com/llvm/clangir/blob/7fb608d4d1b72c25a1739a1bd66c9024208819cb/clang/lib/CodeGen/CGExpr.cpp#L4767): I put `HasExplicitObjectParameter` support as a missing feature, which is a new C++23 feature. --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 34 +++++- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 2 + clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 19 +++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 +- clang/test/CIR/CodeGen/lambda.cpp | 133 ++++++++++++++++++++--- 5 files changed, 173 insertions(+), 20 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 37b147a3aa57..3921f04688d7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -945,11 +945,35 @@ static LValue emitGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, return LV; } -static LValue emitCapturedFieldLValue(CIRGenFunction &CGF, const FieldDecl *FD, - mlir::Value ThisValue) { - QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); - LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); - return CGF.emitLValueForField(LV, FD); +static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd, + mlir::Value thisValue) { + return cgf.emitLValueForLambdaField(fd, thisValue); +} + +/// Given that we are currently emitting a lambda, emit an l-value for +/// one of its members. +/// +LValue CIRGenFunction::emitLValueForLambdaField(const FieldDecl *field, + mlir::Value thisValue) { + bool hasExplicitObjectParameter = false; + const auto *methD = dyn_cast_if_present(CurCodeDecl); + LValue lambdaLV; + if (methD) { + hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction(); + assert(methD->getParent()->isLambda()); + assert(methD->getParent() == field->getParent()); + } + if (hasExplicitObjectParameter) { + llvm_unreachable("ExplicitObjectMemberFunction NYI"); + } else { + QualType lambdaTagType = getContext().getTagDeclType(field->getParent()); + lambdaLV = MakeNaturalAlignAddrLValue(thisValue, lambdaTagType); + } + return emitLValueForField(lambdaLV, field); +} + +LValue CIRGenFunction::emitLValueForLambdaField(const FieldDecl *field) { + return emitLValueForLambdaField(field, CXXABIThisValue); } static LValue emitFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 32f343ffd605..46f89bf60d18 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -928,6 +928,8 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { ValueDecl *v = capture.getCapturedVar(); fieldName = v->getName(); CGF.getCIRGenModule().LambdaFieldToName[*CurField] = fieldName; + } else if (capture.capturesThis()) { + CGF.getCIRGenModule().LambdaFieldToName[*CurField] = "this"; } else { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index cd7763f2ef79..cba4d0f1a75d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -1279,7 +1279,24 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, MD->getParent()->getCaptureFields(LambdaCaptureFields, LambdaThisCaptureField); if (LambdaThisCaptureField) { - llvm_unreachable("NYI"); + // If the lambda captures the object referred to by '*this' - either by + // value or by reference, make sure CXXThisValue points to the correct + // object. + + // Get the lvalue for the field (which is a copy of the enclosing object + // or contains the address of the enclosing object). + LValue thisFieldLValue = + emitLValueForLambdaField(LambdaThisCaptureField); + if (!LambdaThisCaptureField->getType()->isPointerType()) { + // If the enclosing object was captured by value, just use its + // address. Sign this pointer. + CXXThisValue = thisFieldLValue.getPointer(); + } else { + // Load the lvalue pointed to by the field, since '*this' was captured + // by reference. + CXXThisValue = emitLoadOfLValue(thisFieldLValue, SourceLocation()) + .getScalarVal(); + } } for (auto *FD : MD->getParent()->fields()) { if (FD->hasCapturedVLAType()) { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index b6b949e47bf2..764c20aaa152 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1679,8 +1679,11 @@ class CIRGenFunction : public CIRGenTypeCache { void initializeVTablePointer(mlir::Location loc, const VPtr &Vptr); AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); - LValue emitLValueForField(LValue Base, const clang::FieldDecl *Field); + LValue emitLValueForField(LValue base, const clang::FieldDecl *field); LValue emitLValueForBitField(LValue base, const FieldDecl *field); + LValue emitLValueForLambdaField(const FieldDecl *field); + LValue emitLValueForLambdaField(const FieldDecl *field, + mlir::Value thisValue); /// Like emitLValueForField, excpet that if the Field is a reference, this /// will return the address of the reference and not the address of the value diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index b45634c0def8..138533e2308d 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -9,7 +9,10 @@ void fn() { a(); } +// CHECK-DAG: !ty_A = !cir.struct // CHECK: !ty_anon2E0_ = !cir.struct +// CHECK-DAG: !ty_anon2E7_ = !cir.struct +// CHECK-DAG: !ty_anon2E8_ = !cir.struct}> // CHECK-DAG: module // CHECK: cir.func lambda internal private @_ZZ2fnvENK3$_0clEv{{.*}}) extra @@ -18,9 +21,8 @@ void fn() { // CHECK-NEXT: %0 = cir.alloca !ty_anon2E0_, !cir.ptr, ["a"] // CHECK: cir.call @_ZZ2fnvENK3$_0clEv -// LLVM: {{.*}}void @"_ZZ2fnvENK3$_0clEv"(ptr [[THIS:%.*]]) -// FIXME: argument attributes should be emmitted, and lambda's alignment -// COM: LLVM: {{.*}} @"_ZZ2fnvENK3$_0clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[THIS:%.*]]){{%.*}} align 2 { +// LLVM-LABEL: _ZZ2fnvENK3$_0clEv +// LLVM-SAME: (ptr [[THIS:%.*]]) // LLVM: [[THIS_ADDR:%.*]] = alloca ptr, i64 1, align 8 // LLVM: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // LLVM: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 @@ -53,9 +55,10 @@ void l0() { // CHECK: %8 = cir.load %7 : !cir.ptr>, !cir.ptr // CHECK: cir.store %6, %8 : !s32i, !cir.ptr -// CHECK: cir.func @_Z2l0v() +// CHECK-LABEL: _Z2l0v -// LLVM: {{.* }}void @"_ZZ2l0vENK3$_0clEv"(ptr [[THIS:%.*]]) +// LLVM-LABEL: _ZZ2l0vENK3$_0clEv +// LLVM-SAME: (ptr [[THIS:%.*]]) // LLVM: [[THIS_ADDR:%.*]] = alloca ptr, i64 1, align 8 // LLVM: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // LLVM: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 @@ -91,7 +94,7 @@ auto g() { }; } -// CHECK: cir.func @_Z1gv() -> !ty_anon2E3_ +// CHECK-LABEL: @_Z1gv() // CHECK: %0 = cir.alloca !ty_anon2E3_, !cir.ptr, ["__retval"] {alignment = 8 : i64} // CHECK: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CHECK: %2 = cir.const #cir.int<12> : !s32i @@ -120,7 +123,7 @@ auto g2() { } // Should be same as above because of NRVO -// CHECK: cir.func @_Z2g2v() -> !ty_anon2E4_ +// CHECK-LABEL: @_Z2g2v() // CHECK-NEXT: %0 = cir.alloca !ty_anon2E4_, !cir.ptr, ["__retval", init] {alignment = 8 : i64} // CHECK-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} // CHECK-NEXT: %2 = cir.const #cir.int<12> : !s32i @@ -143,7 +146,7 @@ int f() { return g2()(); } -// CHECK: cir.func @_Z1fv() -> !s32i +// CHECK-LABEL: @_Z1fv() // CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK-NEXT: cir.scope { // CHECK-NEXT: %2 = cir.alloca !ty_anon2E4_, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} @@ -156,7 +159,8 @@ int f() { // CHECK-NEXT: cir.return %1 : !s32i // CHECK-NEXT: } -// LLVM: {{.*}}i32 @"_ZZ2g2vENK3$_0clEv"(ptr [[THIS:%.*]]) +// LLVM-LABEL: _ZZ2g2vENK3$_0clEv +// LLVM-SAME: (ptr [[THIS:%.*]]) // LLVM: [[THIS_ADDR:%.*]] = alloca ptr, i64 1, align 8 // LLVM: [[I_SAVE:%.*]] = alloca i32, i64 1, align 4 // LLVM: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 @@ -201,7 +205,7 @@ int g3() { // lambda operator int (*)(int const&)() // CHECK: cir.func internal private @_ZZ2g3vENK3$_0cvPFiRKiEEv -// CHECK: cir.func @_Z2g3v() -> !s32i +// CHECK-LABEL: @_Z2g3v() // CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} // CHECK: %1 = cir.alloca !cir.ptr)>>, !cir.ptr)>>>, ["fn", init] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !s32i, !cir.ptr, ["task", init] {alignment = 4 : i64} @@ -230,12 +234,14 @@ int g3() { // CHECK: } // lambda operator() +// LLVM-LABEL: _ZZ2g3vENK3$_0clERKi // FIXME: argument attributes should be emitted -// COM: LLVM: define internal noundef i32 @"_ZZ2g3vENK3$_0clERKi"(ptr noundef nonnull align 1 dereferenceable(1) {{%.*}}, ptr noundef nonnull align 4 dereferenceable(4){{%.*}}) #0 align 2 -// LLVM: {{.*}}i32 @"_ZZ2g3vENK3$_0clERKi"(ptr {{%.*}}, ptr {{%.*}}) +// COM: LLVM-SAME: (ptr noundef nonnull align 1 dereferenceable(1) {{%.*}}, +// COM: LLVM-SAME: ptr noundef nonnull align 4 dereferenceable(4){{%.*}}) #0 align 2 // lambda __invoke() -// LLVM: {{.*}}i32 @"_ZZ2g3vEN3$_08__invokeERKi"(ptr [[i:%.*]]) +// LLVM-LABEL: _ZZ2g3vEN3$_08__invokeERKi +// LLVM-SAME: (ptr [[i:%.*]]) // LLVM: [[i_addr:%.*]] = alloca ptr, i64 1, align 8 // LLVM: [[ret_val:%.*]] = alloca i32, i64 1, align 4 // LLVM: [[unused_capture:%.*]] = alloca %class.anon.5, i64 1, align 1 @@ -285,3 +291,104 @@ int g3() { // LLVM: store i32 [[tmp2]], ptr [[ret_val]], align 4 // LLVM: [[tmp3:%.*]] = load i32, ptr [[ret_val]], align 4 // LLVM: ret i32 [[tmp3]] + +struct A { + int a = 111; + int foo() { return [*this] { return a; }(); } + int bar() { return [this] { return a; }(); } +}; +// A's default ctor +// CHECK-LABEL: _ZN1AC1Ev + +// lambda operator() in foo() +// CHECK-LABEL: _ZZN1A3fooEvENKUlvE_clEv +// CHECK-SAME: ([[ARG:%.*]]: !cir.ptr +// CHECK: [[ARG_ADDR:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: [[RETVAL_ADDR:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store [[ARG]], [[ARG_ADDR]] : !cir.ptr, !cir.ptr> +// CHECK: [[CLS_ANNO7:%.*]] = cir.load [[ARG_ADDR]] : !cir.ptr>, !cir.ptr +// CHECK: [[STRUCT_A:%.*]] = cir.get_member [[CLS_ANNO7]][0] {name = "this"} : !cir.ptr -> !cir.ptr +// CHECK: [[a:%.*]] = cir.get_member [[STRUCT_A]][0] {name = "a"} : !cir.ptr -> !cir.ptr loc(#loc70) +// CHECK: [[TMP0:%.*]] = cir.load [[a]] : !cir.ptr, !s32i +// CHECK: cir.store [[TMP0]], [[RETVAL_ADDR]] : !s32i, !cir.ptr +// CHECK: [[RET_VAL:%.*]] = cir.load [[RETVAL_ADDR]] : !cir.ptr, +// CHECK: cir.return [[RET_VAL]] : !s32i + +// LLVM-LABEL: @_ZZN1A3fooEvENKUlvE_clEv +// LLVM-SAME: (ptr [[ARG:%.*]]) +// LLVM: [[ARG_ADDR:%.*]] = alloca ptr, i64 1, align 8 +// LLVM: [[RET:%.*]] = alloca i32, i64 1, align 4 +// LLVM: store ptr [[ARG]], ptr [[ARG_ADDR]], align 8 +// LLVM: [[CLS_ANNO7:%.*]] = load ptr, ptr [[ARG_ADDR]], align 8 +// LLVM: [[STRUCT_A:%.*]] = getelementptr %class.anon.7, ptr [[CLS_ANNO7]], i32 0, i32 0 +// LLVM: [[a:%.*]] = getelementptr %struct.A, ptr [[STRUCT_A]], i32 0, i32 0 +// LLVM: [[TMP0:%.*]] = load i32, ptr [[a]], align 4 +// LLVM: store i32 [[TMP0]], ptr [[RET]], align 4 +// LLVM: [[TMP1:%.*]] = load i32, ptr [[RET]], align 4 +// LLVM: ret i32 [[TMP1]] + +// A::foo() +// CHECK-LABEL: @_ZN1A3fooEv +// CHECK: [[THIS_ARG:%.*]] = cir.alloca !ty_anon2E7_, !cir.ptr, ["ref.tmp0"] {alignment = 4 : i64} +// CHECK: cir.call @_ZZN1A3fooEvENKUlvE_clEv([[THIS_ARG]]) : (!cir.ptr) -> !s32i + +// LLVM-LABEL: _ZN1A3fooEv +// LLVM: [[this_in_foo:%.*]] = alloca %class.anon.7, i64 1, align 4 +// LLVM: call i32 @_ZZN1A3fooEvENKUlvE_clEv(ptr [[this_in_foo]]) + +// lambda operator() in bar() +// CHECK-LABEL: _ZZN1A3barEvENKUlvE_clEv +// CHECK-SAME: ([[ARG2:%.*]]: !cir.ptr +// CHECK: [[ARG2_ADDR:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} +// CHECK: [[RETVAL_ADDR:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} +// CHECK: cir.store [[ARG2]], [[ARG2_ADDR]] : !cir.ptr, !cir.ptr> +// CHECK: [[CLS_ANNO8:%.*]] = cir.load [[ARG2_ADDR]] : !cir.ptr>, !cir.ptr +// CHECK: [[STRUCT_A_PTR:%.*]] = cir.get_member [[CLS_ANNO8]][0] {name = "this"} : !cir.ptr -> !cir.ptr> +// CHECK: [[STRUCT_A:%.*]] = cir.load [[STRUCT_A_PTR]] : !cir.ptr>, !cir.ptr +// CHECK: [[a:%.*]] = cir.get_member [[STRUCT_A]][0] {name = "a"} : !cir.ptr -> !cir.ptr loc(#loc70) +// CHECK: [[TMP0:%.*]] = cir.load [[a]] : !cir.ptr, !s32i +// CHECK: cir.store [[TMP0]], [[RETVAL_ADDR]] : !s32i, !cir.ptr +// CHECK: [[RET_VAL:%.*]] = cir.load [[RETVAL_ADDR]] : !cir.ptr +// CHECK: cir.return [[RET_VAL]] : !s32i + +// LLVM-LABEL: _ZZN1A3barEvENKUlvE_clEv +// LLVM-SAME: (ptr [[ARG2:%.*]]) +// LLVM: [[ARG2_ADDR:%.*]] = alloca ptr, i64 1, align 8 +// LLVM: [[RET:%.*]] = alloca i32, i64 1, align 4 +// LLVM: store ptr [[ARG2]], ptr [[ARG2_ADDR]], align 8 +// LLVM: [[CLS_ANNO8:%.*]] = load ptr, ptr [[ARG2_ADDR]], align 8 +// LLVM: [[STRUCT_A_PTR:%.*]] = getelementptr %class.anon.8, ptr [[CLS_ANNO8]], i32 0, i32 0 +// LLVM: [[STRUCT_A:%.*]] = load ptr, ptr [[STRUCT_A_PTR]], align 8 +// LLVM: [[a:%.*]] = getelementptr %struct.A, ptr [[STRUCT_A]], i32 +// LLVM: [[TMP0:%.*]] = load i32, ptr [[a]], align 4 +// LLVM: store i32 [[TMP0]], ptr [[RET]], align 4 +// LLVM: [[TMP1:%.*]] = load i32, ptr [[RET]], align 4 +// LLVM: ret i32 [[TMP1]] + +// A::bar() +// CHECK-LABEL: _ZN1A3barEv +// CHECK: [[THIS_ARG:%.*]] = cir.alloca !ty_anon2E8_, !cir.ptr, ["ref.tmp0"] {alignment = 8 : i64} +// CHECK: cir.call @_ZZN1A3barEvENKUlvE_clEv([[THIS_ARG]]) + +// LLVM-LABEL: _ZN1A3barEv +// LLVM: [[this_in_bar:%.*]] = alloca %class.anon.8, i64 1, align 8 +// LLVM: call i32 @_ZZN1A3barEvENKUlvE_clEv(ptr [[this_in_bar]]) + +int test_lambda_this1(){ + struct A clsA; + int x = clsA.foo(); + int y = clsA.bar(); + return x+y; +} + +// CHECK-LABEL: test_lambda_this1 +// Construct A +// CHECK: cir.call @_ZN1AC1Ev([[A_THIS:%.*]]) : (!cir.ptr) -> () +// CHECK: cir.call @_ZN1A3fooEv([[A_THIS]]) : (!cir.ptr) -> !s32i +// CHECK: cir.call @_ZN1A3barEv([[A_THIS]]) : (!cir.ptr) -> !s32i + +// LLVM-LABEL: test_lambda_this1 +// LLVM: [[A_THIS:%.*]] = alloca %struct.A, i64 1, align 4 +// LLVM: call void @_ZN1AC1Ev(ptr [[A_THIS]]) +// LLVM: call i32 @_ZN1A3fooEv(ptr [[A_THIS]]) +// LLVM: call i32 @_ZN1A3barEv(ptr [[A_THIS]]) From 0091bdb93eb5b7b3eb2fe103c1baeaa0a2a70998 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 17 Dec 2024 20:34:42 +0300 Subject: [PATCH 2173/2301] [CIR][CIRGen] Fixes function calls with return values and cleanup stage (#1214) #### The Problem Let's take a look at the following code: ``` struct A { ~A() {} }; int foo() { return 42; } void bar() { A a; int b = foo(); } ``` The call to `foo` guarded by the synthetic `tryOp` looks approximately like the following: ``` cir.try synthetic cleanup { %2 = cir.call exception @_Z3foov() : () -> !s32i cleanup { cir.call @_ZN1AD1Ev(%0) : (!cir.ptr) -> () extra(#fn_attr1) // call to destructor of 'A' cir.yield } cir.yield } catch [#cir.unwind { cir.resume }] cir.store %2, %1: !s32i, !cir.ptr // CIR verification error ``` The result of the `foo` call is in the `try` region - and is not accessible from the outside, so the code generation fails with `operand #0 does not dominate its use` . #### Solution So we have several options how to handle this properly. 1. We may intpoduce a new operation here, like `TryCall` but probably more high level one, e.g. introduce the `InvokeOp`. 2. Also, we may add the result to `TryOp`. 3. The fast fix that is implemented in this PR is a temporary `alloca` where we store the call result right in the try region. And the result of the whole `emitCall` is a `load` from the temp `alloca`. So this PR is both the request for changes and an open discussion as well - how to handle this properly. So far I choose the third approach. If it's ok - I will need to create one more PR with a similar fix for the aggregated results or update this one. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 23 ++++++++++++++++ clang/test/CIR/CodeGen/try-catch-dtors.cpp | 31 ++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 6e31e2f41311..45bcdbf40cee 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -552,6 +552,17 @@ static cir::CIRCallOpInterface emitCallLikeOp( extraFnAttrs); } +static RValue getRValueThroughMemory(mlir::Location loc, + CIRGenBuilderTy &builder, mlir::Value val, + Address addr) { + auto ip = builder.saveInsertionPoint(); + builder.setInsertionPointAfterValue(val); + builder.createStore(loc, val, addr); + builder.restoreInsertionPoint(ip); + auto load = builder.createLoad(loc, addr); + return RValue::get(load); +} + RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &CallInfo, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, @@ -890,6 +901,18 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &CallInfo, auto Results = theCall->getOpResults(); assert(Results.size() <= 1 && "multiple returns NYI"); assert(Results[0].getType() == RetCIRTy && "Bitcast support NYI"); + + mlir::Region *region = builder.getBlock()->getParent(); + if (region != theCall->getParentRegion()) { + Address DestPtr = ReturnValue.getValue(); + + if (!DestPtr.isValid()) + DestPtr = CreateMemTemp(RetTy, callLoc, "tmp.try.call.res"); + + return getRValueThroughMemory(callLoc, builder, Results[0], + DestPtr); + } + return RValue::get(Results[0]); } default: diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index 40c35434e17c..5cf0bf0a1a0b 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -308,3 +308,34 @@ void yo2(bool x) { // CIR: } catch [type #cir.all { // CIR: cir.catch_param -> !cir.ptr // CIR: }] + + +int foo() { return 42; } + +struct A { + ~A() {} +}; + +void bar() { + A a; + int b = foo(); +} + +// CIR-LABEL: @_Z3barv +// CIR: %[[V0:.*]] = cir.alloca !ty_A, !cir.ptr, ["a"] {alignment = 1 : i64} +// CIR: %[[V1:.*]] = cir.alloca !s32i, !cir.ptr, ["b", init] {alignment = 4 : i64} +// CIR: %[[V2:.*]] = cir.alloca !s32i, !cir.ptr, ["tmp.try.call.res"] {alignment = 4 : i64} +// CIR: cir.try synthetic cleanup { +// CIR: %[[V4:.*]] = cir.call exception @_Z3foov() : () -> !s32i cleanup { +// CIR: cir.call @_ZN1AD2Ev(%[[V0]]) : (!cir.ptr) -> () extra(#fn_attr) +// CIR: cir.yield +// CIR: } +// CIR: cir.store %[[V4]], %[[V2]] : !s32i, !cir.ptr +// CIR: cir.yield +// CIR: } catch [#cir.unwind { +// CIR: cir.resume +// CIR: }] +// CIR: %[[V3:.*]] = cir.load %[[V2]] : !cir.ptr, !s32i +// CIR: cir.store %[[V3]], %[[V1]] : !s32i, !cir.ptr +// CIR: cir.call @_ZN1AD2Ev(%[[V0]]) : (!cir.ptr) -> () extra(#fn_attr) +// CIR: cir.return From d5d99020bf536a9e705efac111558544e83caae3 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 18 Dec 2024 04:26:21 +0800 Subject: [PATCH 2174/2301] [CIR] Put loop body in nested scopes (#1221) This PR puts for-loop body, while-loop body, and do-while-loop body in nested scopes. Allocas in the loop body are now push down to the nested scope. Resolve #1218 . --- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 16 +-- .../CIR/Dialect/Transforms/LifetimeCheck.cpp | 7 - clang/test/CIR/CodeGen/goto.cpp | 6 +- clang/test/CIR/CodeGen/loop-scope.cpp | 9 +- clang/test/CIR/CodeGen/loop.cpp | 130 +++++++++++------- clang/test/CIR/Lowering/ThroughMLIR/doWhile.c | 62 +++++---- clang/test/CIR/Lowering/ThroughMLIR/while.c | 17 ++- 7 files changed, 138 insertions(+), 109 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 8708eeecb7e5..76d78dc09c76 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -910,13 +910,9 @@ mlir::LogicalResult CIRGenFunction::emitForStmt(const ForStmt &S) { }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - // https://en.cppreference.com/w/cpp/language/for - // While in C++, the scope of the init-statement and the scope of - // statement are one and the same, in C the scope of statement is - // nested within the scope of init-statement. - bool useCurrentScope = - CGM.getASTContext().getLangOpts().CPlusPlus ? true : false; - if (emitStmt(S.getBody(), useCurrentScope).failed()) + // The scope of the for loop body is nested within the scope of the + // for loop's init-statement and condition. + if (emitStmt(S.getBody(), /*useCurrentScope=*/false).failed()) loopRes = mlir::failure(); emitStopPoint(&S); }, @@ -973,7 +969,8 @@ mlir::LogicalResult CIRGenFunction::emitDoStmt(const DoStmt &S) { }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - if (emitStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + // The scope of the do-while loop body is a nested scope. + if (emitStmt(S.getBody(), /*useCurrentScope=*/false).failed()) loopRes = mlir::failure(); emitStopPoint(&S); }); @@ -1028,7 +1025,8 @@ mlir::LogicalResult CIRGenFunction::emitWhileStmt(const WhileStmt &S) { }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - if (emitStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + // The scope of the while loop body is a nested scope. + if (emitStmt(S.getBody(), /*useCurrentScope=*/false).failed()) loopRes = mlir::failure(); emitStopPoint(&S); }); diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 368c36b48946..08afedf804e3 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -967,13 +967,6 @@ void LifetimeCheckPass::classifyAndInitTypeCategories(mlir::Value addr, mlir::Type t, mlir::Location loc, unsigned nestLevel) { - // The same alloca can be hit more than once when checking for dangling - // pointers out of subsequent loop iterations (e.g. second iteraton using - // pointer invalidated in the first run). Since we copy the pmap out to - // start those subsequent checks, make sure sure we skip existing alloca - // tracking. - if (getPmap().count(addr)) - return; getPmap()[addr] = {}; enum TypeCategory { diff --git a/clang/test/CIR/CodeGen/goto.cpp b/clang/test/CIR/CodeGen/goto.cpp index 840b6227696c..8579720104c8 100644 --- a/clang/test/CIR/CodeGen/goto.cpp +++ b/clang/test/CIR/CodeGen/goto.cpp @@ -171,13 +171,16 @@ int jumpIntoLoop(int* ar) { // CHECK: ^bb[[#BLK6]]: // CHECK: cir.br ^bb[[#COND:]] // CHECK: ^bb[[#COND]]: -// CHECK: cir.brcond {{.*}} ^bb[[#BODY]], ^bb[[#EXIT:]] +// CHECK: cir.brcond {{.*}} ^bb[[#BLK8:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BLK8]]: +// CHECK: cir.br ^bb[[#BODY]] // CHECK: ^bb[[#BODY]]: // CHECK: cir.br ^bb[[#COND]] // CHECK: ^bb[[#EXIT]]: // CHECK: cir.br ^bb[[#BLK7:]] // CHECK: ^bb[[#BLK7]]: // CHECK: cir.br ^bb[[#RETURN]] +// CHECK: } @@ -222,6 +225,7 @@ int jumpFromLoop(int* ar) { // CHECK: cir.br ^bb[[#RETURN2:]] // CHECK: ^bb[[#RETURN2]]: // CHECK: cir.return +// CHECK: } void flatLoopWithNoTerminatorInFront(int* ptr) { diff --git a/clang/test/CIR/CodeGen/loop-scope.cpp b/clang/test/CIR/CodeGen/loop-scope.cpp index fcc45a892e3d..9e5a7b9c262c 100644 --- a/clang/test/CIR/CodeGen/loop-scope.cpp +++ b/clang/test/CIR/CodeGen/loop-scope.cpp @@ -12,11 +12,14 @@ void l0(void) { // CPPSCOPE: cir.func @_Z2l0v() // CPPSCOPE-NEXT: cir.scope { // CPPSCOPE-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} -// CPPSCOPE-NEXT: %1 = cir.alloca !s32i, !cir.ptr, ["j", init] {alignment = 4 : i64} -// CPPSCOPE-NEXT: %2 = cir.const #cir.int<0> : !s32i -// CPPSCOPE-NEXT: cir.store %2, %0 : !s32i, !cir.ptr +// CPPSCOPE-NEXT: %1 = cir.const #cir.int<0> : !s32i +// CPPSCOPE-NEXT: cir.store %1, %0 : !s32i, !cir.ptr // CPPSCOPE-NEXT: cir.for : cond { +// CPPSCOPE: } body { +// CPPSCOPE-NEXT: cir.scope { +// CPPSCOPE-NEXT: %2 = cir.alloca !s32i, !cir.ptr, ["j", init] {alignment = 4 : i64} + // CSCOPE: cir.func @l0() // CSCOPE-NEXT: cir.scope { // CSCOPE-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 64909759fd25..5984ded2592c 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -25,10 +25,12 @@ void l1() { // CHECK-NEXT: %6 = cir.cmp(lt, %4, %5) : !s32i, !cir.bool // CHECK-NEXT: cir.condition(%6) // CHECK-NEXT: } body { -// CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !s32i -// CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) nsw : !s32i -// CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) nsw : !s32i +// CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr +// CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } step { // CHECK-NEXT: %4 = cir.load %2 : !cir.ptr, !s32i @@ -57,10 +59,12 @@ void l2(bool cond) { // CHECK-NEXT: %3 = cir.load %0 : !cir.ptr, !cir.bool // CHECK-NEXT: cir.condition(%3) // CHECK-NEXT: } do { -// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } @@ -69,10 +73,12 @@ void l2(bool cond) { // CHECK-NEXT: %[[#TRUE:]] = cir.const #true // CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: } do { -// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } @@ -82,10 +88,12 @@ void l2(bool cond) { // CHECK-NEXT: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool // CHECK-NEXT: cir.condition(%4) // CHECK-NEXT: } do { -// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } @@ -106,10 +114,12 @@ void l3(bool cond) { // CHECK: cir.func @_Z2l3b // CHECK: cir.scope { // CHECK-NEXT: cir.do { -// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { // CHECK-NEXT: %[[#TRUE:]] = cir.load %0 : !cir.ptr, !cir.bool @@ -118,10 +128,12 @@ void l3(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.do { -// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { // CHECK-NEXT: %[[#TRUE:]] = cir.const #true @@ -130,10 +142,12 @@ void l3(bool cond) { // CHECK-NEXT: } // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.do { -// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i -// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i -// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %3 = cir.load %1 : !cir.ptr, !s32i +// CHECK-NEXT: %4 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %5 = cir.binop(add, %3, %4) nsw : !s32i +// CHECK-NEXT: cir.store %5, %1 : !s32i, !cir.ptr +// CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } while { // CHECK-NEXT: %3 = cir.const #cir.int<1> : !s32i @@ -157,18 +171,19 @@ void l4() { // CHECK-NEXT: %[[#TRUE:]] = cir.const #true // CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: } do { -// CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !s32i -// CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: %6 = cir.binop(add, %4, %5) nsw : !s32i -// CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %10 = cir.load %0 : !cir.ptr, !s32i -// CHECK-NEXT: %11 = cir.const #cir.int<10> : !s32i -// CHECK-NEXT: %12 = cir.cmp(lt, %10, %11) : !s32i, !cir.bool -// CHECK-NEXT: cir.if %12 { -// CHECK-NEXT: cir.continue +// CHECK-NEXT: %4 = cir.load %0 : !cir.ptr, !s32i +// CHECK-NEXT: %5 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: %6 = cir.binop(add, %4, %5) nsw : !s32i +// CHECK-NEXT: cir.store %6, %0 : !s32i, !cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %10 = cir.load %0 : !cir.ptr, !s32i +// CHECK-NEXT: %11 = cir.const #cir.int<10> : !s32i +// CHECK-NEXT: %12 = cir.cmp(lt, %10, %11) : !s32i, !cir.bool +// CHECK-NEXT: cir.if %12 { +// CHECK-NEXT: cir.continue +// CHECK-NEXT: } // CHECK-NEXT: } -// CHECK-NEXT: } void l5() { do { @@ -200,7 +215,10 @@ void l6() { // CHECK-NEXT: %[[#TRUE:]] = cir.const #true // CHECK-NEXT: cir.condition(%[[#TRUE]]) // CHECK-NEXT: } do { -// CHECK-NEXT: cir.return +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: cir.return +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield // CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: cir.return @@ -215,15 +233,18 @@ void unreachable_after_break() { // CHECK-NEXT: cir.func @_Z23unreachable_after_breakv() // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} // CHECK-NEXT: cir.for : cond { -// CHECK-NEXT: %1 = cir.const #true -// CHECK-NEXT: cir.condition(%1) +// CHECK-NEXT: %0 = cir.const #true +// CHECK-NEXT: cir.condition(%0) // CHECK-NEXT: } body { -// CHECK-NEXT: cir.break -// CHECK-NEXT: ^bb1: // no predecessors -// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.break +// CHECK-NEXT: ^bb1: // no predecessors +// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } step { // CHECK-NEXT: cir.yield @@ -241,15 +262,18 @@ void unreachable_after_continue() { // CHECK-NEXT: cir.func @_Z26unreachable_after_continuev() // CHECK-NEXT: cir.scope { -// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} // CHECK-NEXT: cir.for : cond { -// CHECK-NEXT: %1 = cir.const #true -// CHECK-NEXT: cir.condition(%1) +// CHECK-NEXT: %0 = cir.const #true +// CHECK-NEXT: cir.condition(%0) // CHECK-NEXT: } body { -// CHECK-NEXT: cir.continue -// CHECK-NEXT: ^bb1: // no predecessors -// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CHECK-NEXT: cir.continue +// CHECK-NEXT: ^bb1: // no predecessors +// CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store %1, %0 : !s32i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: cir.yield // CHECK-NEXT: } step { // CHECK-NEXT: cir.yield diff --git a/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c b/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c index 8cc32dc96c94..cf1e275caece 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c +++ b/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c @@ -32,14 +32,16 @@ void nestedDoWhile() { // CHECK: memref.store %[[C0_I32_2]], %[[ALLOC1]][] : memref // CHECK: memref.alloca_scope { // CHECK: scf.while : () -> () { -// CHECK: %[[VAR1:.+]] = memref.load %[[ALLOC1]][] : memref -// CHECK: %[[VAR2:.+]] = memref.load %[[ALLOC0]][] : memref -// CHECK: %[[ADD:.+]] = arith.addi %[[VAR2]], %[[VAR1]] : i32 -// CHECK: memref.store %[[ADD]], %[[ALLOC0]][] : memref -// CHECK: %[[VAR3:.+]] = memref.load %[[ALLOC1]][] : memref -// CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 -// CHECK: %[[ADD1:.+]] = arith.addi %[[VAR3]], %[[C1_I32]] : i32 -// CHECK: memref.store %[[ADD1]], %[[ALLOC1]][] : memref +// CHECK: memref.alloca_scope { +// CHECK: %[[VAR1:.+]] = memref.load %[[ALLOC1]][] : memref +// CHECK: %[[VAR2:.+]] = memref.load %[[ALLOC0]][] : memref +// CHECK: %[[ADD:.+]] = arith.addi %[[VAR2]], %[[VAR1]] : i32 +// CHECK: memref.store %[[ADD]], %[[ALLOC0]][] : memref +// CHECK: %[[VAR3:.+]] = memref.load %[[ALLOC1]][] : memref +// CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +// CHECK: %[[ADD1:.+]] = arith.addi %[[VAR3]], %[[C1_I32]] : i32 +// CHECK: memref.store %[[ADD1]], %[[ALLOC1]][] : memref +// CHECK: } // CHECK: %[[VAR4:.+]] = memref.load %[[ALLOC1]][] : memref // CHECK: %[[C10_I32:.+]] = arith.constant 10 : i32 // CHECK: %[[CMP:.+]] = arith.cmpi sle, %[[VAR4]], %[[C10_I32]] : i32 @@ -60,28 +62,30 @@ void nestedDoWhile() { // CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 // CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref // CHECK: memref.alloca_scope { -// CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref // CHECK: scf.while : () -> () { -// CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref -// CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 -// CHECK: %[[ONE:.+]] = arith.addi %[[ZERO]], %[[C1_I32]] : i32 -// CHECK: memref.store %[[ONE]], %[[alloca]][] : memref -// CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 -// CHECK: memref.store %[[C0_I32_1]], %[[alloca_0]][] : memref -// CHECK: memref.alloca_scope { -// CHECK: scf.while : () -> () { -// CHECK: %[[EIGHT:.+]] = memref.load %[[alloca_0]][] : memref -// CHECK: %[[C2_I32_3:.+]] = arith.constant 2 : i32 -// CHECK: %[[NINE:.+]] = arith.cmpi slt, %[[EIGHT]], %[[C2_I32_3]] : i32 -// CHECK: %[[TWELVE:.+]] = arith.extui %[[NINE]] : i1 to i8 -// CHECK: %[[THIRTEEN:.+]] = arith.trunci %[[TWELVE]] : i8 to i1 -// CHECK: scf.condition(%[[THIRTEEN]]) -// CHECK: } do { -// CHECK: %[[EIGHT]] = memref.load %[[alloca_0]][] : memref -// CHECK: %[[C1_I32_3:.+]] = arith.constant 1 : i32 -// CHECK: %[[NINE]] = arith.addi %[[EIGHT]], %[[C1_I32_3]] : i32 -// CHECK: memref.store %[[NINE]], %[[alloca_0]][] : memref -// CHECK: scf.yield +// CHECK: memref.alloca_scope { +// CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref +// CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +// CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +// CHECK: %[[ONE:.+]] = arith.addi %[[ZERO]], %[[C1_I32]] : i32 +// CHECK: memref.store %[[ONE]], %[[alloca]][] : memref +// CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 +// CHECK: memref.store %[[C0_I32_1]], %[[alloca_0]][] : memref +// CHECK: memref.alloca_scope { +// CHECK: scf.while : () -> () { +// CHECK: %[[EIGHT:.+]] = memref.load %[[alloca_0]][] : memref +// CHECK: %[[C2_I32_3:.+]] = arith.constant 2 : i32 +// CHECK: %[[NINE:.+]] = arith.cmpi slt, %[[EIGHT]], %[[C2_I32_3]] : i32 +// CHECK: %[[TWELVE:.+]] = arith.extui %[[NINE]] : i1 to i8 +// CHECK: %[[THIRTEEN:.+]] = arith.trunci %[[TWELVE]] : i8 to i1 +// CHECK: scf.condition(%[[THIRTEEN]]) +// CHECK: } do { +// CHECK: %[[EIGHT]] = memref.load %[[alloca_0]][] : memref +// CHECK: %[[C1_I32_3:.+]] = arith.constant 1 : i32 +// CHECK: %[[NINE]] = arith.addi %[[EIGHT]], %[[C1_I32_3]] : i32 +// CHECK: memref.store %[[NINE]], %[[alloca_0]][] : memref +// CHECK: scf.yield +// CHECK: } // CHECK: } // CHECK: } // CHECK: %[[TWO:.+]] = memref.load %[[alloca]][] : memref diff --git a/clang/test/CIR/Lowering/ThroughMLIR/while.c b/clang/test/CIR/Lowering/ThroughMLIR/while.c index 8cc1f7bca30d..5621e1fc7c4a 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/while.c +++ b/clang/test/CIR/Lowering/ThroughMLIR/while.c @@ -32,10 +32,12 @@ void nestedWhile() { //CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR:.+]] : i8 to i1 //CHECK: scf.condition(%[[FIVE]]) //CHECK: } do { -//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref -//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 -//CHECK: %[[ONE:.+]] = arith.addi %0, %[[C1_I32:.+]] : i32 -//CHECK: memref.store %[[ONE:.+]], %[[alloca]][] : memref +//CHECK: memref.alloca_scope { +//CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref +//CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 +//CHECK: %[[ONE:.+]] = arith.addi %0, %[[C1_I32:.+]] : i32 +//CHECK: memref.store %[[ONE:.+]], %[[alloca]][] : memref +//CHECK: } //CHECK: scf.yield //CHECK: } //CHECK: } @@ -47,7 +49,6 @@ void nestedWhile() { //CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32 //CHECK: memref.store %[[C0_I32]], %[[alloca]][] : memref //CHECK: memref.alloca_scope { -//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref //CHECK: scf.while : () -> () { //CHECK: %[[ZERO:.+]] = memref.load %alloca[] : memref //CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 @@ -56,6 +57,8 @@ void nestedWhile() { //CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR]] : i8 to i1 //CHECK: scf.condition(%[[FIVE]]) //CHECK: } do { +//CHECK: memref.alloca_scope { +//CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref //CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 //CHECK: memref.store %[[C0_I32_1]], %[[alloca_0]][] : memref //CHECK: memref.alloca_scope { @@ -78,9 +81,9 @@ void nestedWhile() { //CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 //CHECK: %[[ONE]] = arith.addi %[[ZERO]], %[[C1_I32]] : i32 //CHECK: memref.store %[[ONE]], %[[alloca]][] : memref -//CHECK: scf.yield //CHECK: } +//CHECK: scf.yield //CHECK: } -//CHECK: return //CHECK: } +//CHECK: return //CHECK: } From 61957db3db8d4b5a50fdbfaa38fad99f7e6e1567 Mon Sep 17 00:00:00 2001 From: Guojin Date: Tue, 17 Dec 2024 17:25:19 -0500 Subject: [PATCH 2175/2301] [CIR][Dialect] Add FMaximumOp and FMinimumOp (#1237) There are two sets of intrinsics regarding Min and Max operations for floating points [Maximum](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrmaximum-llvmmaximumop) vs [Maxnum](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrmaxnum-llvmmaxnumop) [Minimum](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrminimum-llvmminimumop) vs [Minnum](https://mlir.llvm.org/docs/Dialects/LLVM/#llvmintrminnum-llvmminnumop) [The difference is whether NaN should be propagated when one of the inputs is NaN](https://llvm.org/docs/LangRef.html#llvm-maximumnum-intrinsic) Maxnum and Minnum would return number if one of inputs is NaN, and the other is a number, But Maximum and Minimum would return NaN (propagation of NaN) And they are resolved to different ASM such as [FMAX](https://developer.arm.com/documentation/ddi0596/2021-03/SIMD-FP-Instructions/FMAX--vector---Floating-point-Maximum--vector--?lang=en) vs [FMAXNM](https://developer.arm.com/documentation/ddi0596/2021-03/SIMD-FP-Instructions/FMAXNM--vector---Floating-point-Maximum-Number--vector--?lang=en) Both have user cases, we already implemented Maxnum and Minnum But Maximum and Minimum has user cases in [neon intrinsic ](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32 ) and [__builtin_elementwise_maximum ](https://github.com/llvm/clangir/blob/a989ecb2c55da1fe28e4072c31af025cba6c4f0f/clang/test/CodeGen/strictfp-elementwise-bulitins.cpp#L53) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 ++-- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 4 +-- .../test/CIR/CodeGen/builtin-floating-point.c | 32 +++++++++---------- .../CIR/Lowering/builtin-floating-point.cir | 30 +++++++++++++---- 4 files changed, 46 insertions(+), 26 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 798a1718abe9..52e1013b88a6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4520,8 +4520,10 @@ class BinaryFPToFPBuiltinOp } def CopysignOp : BinaryFPToFPBuiltinOp<"copysign", "CopySignOp">; -def FMaxOp : BinaryFPToFPBuiltinOp<"fmax", "MaxNumOp">; -def FMinOp : BinaryFPToFPBuiltinOp<"fmin", "MinNumOp">; +def FMaxNumOp : BinaryFPToFPBuiltinOp<"fmaxnum", "MaxNumOp">; +def FMinNumOp : BinaryFPToFPBuiltinOp<"fminnum", "MinNumOp">; +def FMaximumOp : BinaryFPToFPBuiltinOp<"fmaximum", "MaximumOp">; +def FMinimumOp : BinaryFPToFPBuiltinOp<"fminimum", "MinimumOp">; def FModOp : BinaryFPToFPBuiltinOp<"fmod", "FRemOp">; def PowOp : BinaryFPToFPBuiltinOp<"pow", "PowOp">; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 47c4d3dc0f18..93c919134c21 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -670,7 +670,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmaxf: case Builtin::BI__builtin_fmaxl: return RValue::get( - emitBinaryMaybeConstrainedFPBuiltin(*this, *E)); + emitBinaryMaybeConstrainedFPBuiltin(*this, *E)); case Builtin::BI__builtin_fmaxf16: case Builtin::BI__builtin_fmaxf128: @@ -683,7 +683,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fminf: case Builtin::BI__builtin_fminl: return RValue::get( - emitBinaryMaybeConstrainedFPBuiltin(*this, *E)); + emitBinaryMaybeConstrainedFPBuiltin(*this, *E)); case Builtin::BI__builtin_fminf16: case Builtin::BI__builtin_fminf128: diff --git a/clang/test/CIR/CodeGen/builtin-floating-point.c b/clang/test/CIR/CodeGen/builtin-floating-point.c index e882d8606458..2e9b18c51a33 100644 --- a/clang/test/CIR/CodeGen/builtin-floating-point.c +++ b/clang/test/CIR/CodeGen/builtin-floating-point.c @@ -1300,7 +1300,7 @@ long double call_copysignl(long double x, long double y) { float my_fmaxf(float x, float y) { return __builtin_fmaxf(x, y); // CHECK: cir.func @my_fmaxf - // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.float + // CHECK: %{{.+}} = cir.fmaxnum %{{.+}}, %{{.+}} : !cir.float // LLVM: define dso_local float @my_fmaxf // LLVM: %{{.+}} = call float @llvm.maxnum.f32(float %{{.+}}, float %{{.+}}) @@ -1310,7 +1310,7 @@ float my_fmaxf(float x, float y) { double my_fmax(double x, double y) { return __builtin_fmax(x, y); // CHECK: cir.func @my_fmax - // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.double + // CHECK: %{{.+}} = cir.fmaxnum %{{.+}}, %{{.+}} : !cir.double // LLVM: define dso_local double @my_fmax // LLVM: %{{.+}} = call double @llvm.maxnum.f64(double %{{.+}}, double %{{.+}}) @@ -1320,8 +1320,8 @@ double my_fmax(double x, double y) { long double my_fmaxl(long double x, long double y) { return __builtin_fmaxl(x, y); // CHECK: cir.func @my_fmaxl - // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double - // AARCH64: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double + // CHECK: %{{.+}} = cir.fmaxnum %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.fmaxnum %{{.+}}, %{{.+}} : !cir.long_double // LLVM: define dso_local x86_fp80 @my_fmaxl // LLVM: %{{.+}} = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) @@ -1335,7 +1335,7 @@ long double fmaxl(long double, long double); float call_fmaxf(float x, float y) { return fmaxf(x, y); // CHECK: cir.func @call_fmaxf - // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.float + // CHECK: %{{.+}} = cir.fmaxnum %{{.+}}, %{{.+}} : !cir.float // LLVM: define dso_local float @call_fmaxf // LLVM: %{{.+}} = call float @llvm.maxnum.f32(float %{{.+}}, float %{{.+}}) @@ -1345,7 +1345,7 @@ float call_fmaxf(float x, float y) { double call_fmax(double x, double y) { return fmax(x, y); // CHECK: cir.func @call_fmax - // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.double + // CHECK: %{{.+}} = cir.fmaxnum %{{.+}}, %{{.+}} : !cir.double // LLVM: define dso_local double @call_fmax // LLVM: %{{.+}} = call double @llvm.maxnum.f64(double %{{.+}}, double %{{.+}}) @@ -1355,8 +1355,8 @@ double call_fmax(double x, double y) { long double call_fmaxl(long double x, long double y) { return fmaxl(x, y); // CHECK: cir.func @call_fmaxl - // CHECK: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double - // AARCH64: %{{.+}} = cir.fmax %{{.+}}, %{{.+}} : !cir.long_double + // CHECK: %{{.+}} = cir.fmaxnum %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.fmaxnum %{{.+}}, %{{.+}} : !cir.long_double // LLVM: define dso_local x86_fp80 @call_fmaxl // LLVM: %{{.+}} = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) @@ -1368,7 +1368,7 @@ long double call_fmaxl(long double x, long double y) { float my_fminf(float x, float y) { return __builtin_fminf(x, y); // CHECK: cir.func @my_fminf - // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.float + // CHECK: %{{.+}} = cir.fminnum %{{.+}}, %{{.+}} : !cir.float // LLVM: define dso_local float @my_fminf // LLVM: %{{.+}} = call float @llvm.minnum.f32(float %{{.+}}, float %{{.+}}) @@ -1378,7 +1378,7 @@ float my_fminf(float x, float y) { double my_fmin(double x, double y) { return __builtin_fmin(x, y); // CHECK: cir.func @my_fmin - // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.double + // CHECK: %{{.+}} = cir.fminnum %{{.+}}, %{{.+}} : !cir.double // LLVM: define dso_local double @my_fmin // LLVM: %{{.+}} = call double @llvm.minnum.f64(double %{{.+}}, double %{{.+}}) @@ -1388,8 +1388,8 @@ double my_fmin(double x, double y) { long double my_fminl(long double x, long double y) { return __builtin_fminl(x, y); // CHECK: cir.func @my_fminl - // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double - // AARCH64: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double + // CHECK: %{{.+}} = cir.fminnum %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.fminnum %{{.+}}, %{{.+}} : !cir.long_double // LLVM: define dso_local x86_fp80 @my_fminl // LLVM: %{{.+}} = call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) @@ -1403,7 +1403,7 @@ long double fminl(long double, long double); float call_fminf(float x, float y) { return fminf(x, y); // CHECK: cir.func @call_fminf - // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.float + // CHECK: %{{.+}} = cir.fminnum %{{.+}}, %{{.+}} : !cir.float // LLVM: define dso_local float @call_fminf // LLVM: %{{.+}} = call float @llvm.minnum.f32(float %{{.+}}, float %{{.+}}) @@ -1413,7 +1413,7 @@ float call_fminf(float x, float y) { double call_fmin(double x, double y) { return fmin(x, y); // CHECK: cir.func @call_fmin - // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.double + // CHECK: %{{.+}} = cir.fminnum %{{.+}}, %{{.+}} : !cir.double // LLVM: define dso_local double @call_fmin // LLVM: %{{.+}} = call double @llvm.minnum.f64(double %{{.+}}, double %{{.+}}) @@ -1423,8 +1423,8 @@ double call_fmin(double x, double y) { long double call_fminl(long double x, long double y) { return fminl(x, y); // CHECK: cir.func @call_fminl - // CHECK: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double - // AARCH64: %{{.+}} = cir.fmin %{{.+}}, %{{.+}} : !cir.long_double + // CHECK: %{{.+}} = cir.fminnum %{{.+}}, %{{.+}} : !cir.long_double + // AARCH64: %{{.+}} = cir.fminnum %{{.+}}, %{{.+}} : !cir.long_double // LLVM: define dso_local x86_fp80 @call_fminl // LLVM: %{{.+}} = call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}}) diff --git a/clang/test/CIR/Lowering/builtin-floating-point.cir b/clang/test/CIR/Lowering/builtin-floating-point.cir index 4ccf7b1ab49d..211a6f2a1790 100644 --- a/clang/test/CIR/Lowering/builtin-floating-point.cir +++ b/clang/test/CIR/Lowering/builtin-floating-point.cir @@ -138,22 +138,22 @@ module { %215 = cir.copysign %arg2, %arg2 : !cir.vector // CHECK: llvm.intr.copysign(%arg2, %arg2) : (vector<4xf32>, vector<4xf32>) -> vector<4xf32> - %16 = cir.fmax %arg0, %arg0 : !cir.float + %16 = cir.fmaxnum %arg0, %arg0 : !cir.float // CHECK: llvm.intr.maxnum(%arg0, %arg0) : (f32, f32) -> f32 - %116 = cir.fmax %arg1, %arg1 : !cir.vector + %116 = cir.fmaxnum %arg1, %arg1 : !cir.vector // CHECK: llvm.intr.maxnum(%arg1, %arg1) : (vector<2xf64>, vector<2xf64>) -> vector<2xf64> - %216 = cir.fmax %arg2, %arg2 : !cir.vector + %216 = cir.fmaxnum %arg2, %arg2 : !cir.vector // CHECK: llvm.intr.maxnum(%arg2, %arg2) : (vector<4xf32>, vector<4xf32>) -> vector<4xf32> - %17 = cir.fmin %arg0, %arg0 : !cir.float + %17 = cir.fminnum %arg0, %arg0 : !cir.float // CHECK: llvm.intr.minnum(%arg0, %arg0) : (f32, f32) -> f32 - %117 = cir.fmin %arg1, %arg1 : !cir.vector + %117 = cir.fminnum %arg1, %arg1 : !cir.vector // CHECK: llvm.intr.minnum(%arg1, %arg1) : (vector<2xf64>, vector<2xf64>) -> vector<2xf64> - %217 = cir.fmin %arg2, %arg2 : !cir.vector + %217 = cir.fminnum %arg2, %arg2 : !cir.vector // CHECK: llvm.intr.minnum(%arg2, %arg2) : (vector<4xf32>, vector<4xf32>) -> vector<4xf32> %18 = cir.fmod %arg0, %arg0 : !cir.float @@ -174,6 +174,24 @@ module { %219 = cir.pow %arg2, %arg2 : !cir.vector // CHECK: llvm.intr.pow(%arg2, %arg2) : (vector<4xf32>, vector<4xf32>) -> vector<4xf32> + %20 = cir.fmaximum %arg0, %arg0 : !cir.float + // CHECK: llvm.intr.maximum(%arg0, %arg0) : (f32, f32) -> f32 + + %120 = cir.fmaximum %arg1, %arg1 : !cir.vector + // CHECK: llvm.intr.maximum(%arg1, %arg1) : (vector<2xf64>, vector<2xf64>) -> vector<2xf64> + + %220 = cir.fmaximum %arg2, %arg2 : !cir.vector + // CHECK: llvm.intr.maximum(%arg2, %arg2) : (vector<4xf32>, vector<4xf32>) -> vector<4xf32> + + %21 = cir.fminimum %arg0, %arg0 : !cir.float + // CHECK: llvm.intr.minimum(%arg0, %arg0) : (f32, f32) -> f32 + + %121 = cir.fminimum %arg1, %arg1 : !cir.vector + // CHECK: llvm.intr.minimum(%arg1, %arg1) : (vector<2xf64>, vector<2xf64>) -> vector<2xf64> + + %221 = cir.fminimum %arg2, %arg2 : !cir.vector + // CHECK: llvm.intr.minimum(%arg2, %arg2) : (vector<4xf32>, vector<4xf32>) -> vector<4xf32> + cir.return } } From 5d791e78a3c32e47f6fc92b27f2aa3f79a723f34 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Thu, 19 Dec 2024 00:48:12 +0800 Subject: [PATCH 2176/2301] [CIR][CIRGen][TBAA] Add support for scalar types (#1220) --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 6 +- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 8 +- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 +- .../clang/CIR/Dialect/IR/CIRTBAAAttrs.td | 38 +++++ .../include/clang/CIR/Dialect/IR/CIRTypes.td | 29 ++++ clang/include/clang/CIR/MissingFeatures.h | 7 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 +- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- clang/lib/CIR/CodeGen/CIRGenTBAA.cpp | 128 ++++++++++++++- clang/lib/CIR/CodeGen/CIRGenTBAA.h | 6 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 13 +- clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp | 2 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 83 +++++++++- clang/test/CIR/CodeGen/const-alloca.cpp | 6 +- clang/test/CIR/CodeGen/tbaa-scalar.c | 148 ++++++++++++++++++ clang/test/CIR/CodeGen/tbaa-struct.cpp | 35 +++++ clang/test/CIR/CodeGen/tbaa-vptr.cpp | 18 +++ clang/test/CIR/CodeGen/tbaa.c | 22 --- 20 files changed, 505 insertions(+), 58 deletions(-) create mode 100644 clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td create mode 100644 clang/test/CIR/CodeGen/tbaa-scalar.c create mode 100644 clang/test/CIR/CodeGen/tbaa-struct.cpp create mode 100644 clang/test/CIR/CodeGen/tbaa-vptr.cpp delete mode 100644 clang/test/CIR/CodeGen/tbaa.c diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 771b7dd33cd4..48d1f1faf53f 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -170,7 +170,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { /*alignment=*/intAttr, /*mem_order=*/ cir::MemOrderAttr{}, - /*tbaa=*/mlir::ArrayAttr{}); + /*tbaa=*/cir::TBAAAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr, @@ -357,7 +357,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { val.getType()) dst = createPtrBitcast(dst, val.getType()); return create(loc, val, dst, _volatile, align, order, - /*tbaa=*/mlir::ArrayAttr{}); + /*tbaa=*/cir::TBAAAttr{}); } mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, @@ -405,7 +405,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { cir::CopyOp createCopy(mlir::Value dst, mlir::Value src, bool isVolatile = false) { return create(dst.getLoc(), dst, src, isVolatile, - /*tbaa=*/mlir::ArrayAttr{}); + /*tbaa=*/cir::TBAAAttr{}); } cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index e54b52b96c91..e968d4c27fd5 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -24,8 +24,9 @@ include "clang/CIR/Interfaces/ASTAttrInterfaces.td" // CIR Attrs //===----------------------------------------------------------------------===// -class CIR_Attr traits = []> - : AttrDef { +class CIR_Attr traits = [], + string baseCppClass = "::mlir::Attribute"> + : AttrDef { let mnemonic = attrMnemonic; } @@ -1294,8 +1295,7 @@ def GlobalAnnotationValuesAttr : CIR_Attr<"GlobalAnnotationValues", let genVerifyDecl = 1; } -def CIR_TBAAAttr : CIR_Attr<"TBAA", "tbaa", []> { -} +include "clang/CIR/Dialect/IR/CIRTBAAAttrs.td" include "clang/CIR/Dialect/IR/CIROpenCLAttrs.td" diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 52e1013b88a6..8d85482703e6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -588,7 +588,7 @@ def LoadOp : CIR_Op<"load", [ UnitAttr:$is_volatile, OptionalAttr:$alignment, OptionalAttr:$mem_order, - OptionalAttr:$tbaa + OptionalAttr:$tbaa ); let results = (outs CIR_AnyType:$result); @@ -657,7 +657,7 @@ def StoreOp : CIR_Op<"store", [ UnitAttr:$is_volatile, OptionalAttr:$alignment, OptionalAttr:$mem_order, - OptionalAttr:$tbaa); + OptionalAttr:$tbaa); let assemblyFormat = [{ (`volatile` $is_volatile^)? @@ -4068,7 +4068,7 @@ def CopyOp : CIR_Op<"copy", let arguments = (ins Arg:$dst, Arg:$src, UnitAttr:$is_volatile, - OptionalAttr:$tbaa); + OptionalAttr:$tbaa); let summary = "Copies contents from a CIR pointer to another"; let description = [{ Given two CIR pointers, `src` and `dst`, `cir.copy` will copy the memory diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td new file mode 100644 index 000000000000..d46880e8541e --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td @@ -0,0 +1,38 @@ +//===----------------------------------------------------------------------===// +// TBAAAttr +//===----------------------------------------------------------------------===// + +def CIR_TBAAAttr : CIR_Attr<"TBAA", "tbaa", []> { + let summary = "CIR dialect TBAA base attribute"; +} + +//===----------------------------------------------------------------------===// +// TBAAScalarAttr +//===----------------------------------------------------------------------===// + +def CIR_TBAAScalarAttr : CIR_Attr<"TBAAScalar", "tbaa_scalar", [], "TBAAAttr"> { + let summary = "Describes a scalar type in TBAA with an identifier."; + + let parameters = (ins CIR_AnyScalarType : $type); + + let description = [{ + Define a TBAA attribute. + + Example: + ```mlir + // CIR_TBAAScalarAttr + #tbaa_scalar = #cir.tbaa_scalar + #tbaa_scalar1 = #cir.tbaa_scalar + ``` + + See the following link for more details: + https://llvm.org/docs/LangRef.html#tbaa-metadata + }]; + + let assemblyFormat = "`<` struct(params) `>`"; +} + +def CIR_AnyTBAAAttr : AnyAttrOf<[ + CIR_TBAAAttr, + CIR_TBAAScalarAttr +]>; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index d3f49716301d..68b27a053176 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -74,6 +74,28 @@ def CIR_IntType : CIR_Type<"Int", "int", static bool isValidPrimitiveIntBitwidth(unsigned width) { return width == 8 || width == 16 || width == 32 || width == 64; } + + llvm::StringRef getTBAATypeName() const { + switch (getWidth()) { + case 1: + case 8: { + return "omnipotent char"; + } + case 16: { + return "short"; + } + case 32: { + return "int"; + } + case 64: { + return "long"; + } + default: { + llvm::errs() << "unknown type: " << *this << "\n"; + return "unknown"; + } + } + } }]; let genVerifyDecl = 1; } @@ -609,4 +631,11 @@ def CIR_AnyType : AnyTypeOf<[ CIR_ComplexType ]>; +def CIR_AnyScalarType : AnyTypeOf<[ + CIR_IntType, CIR_PointerType, CIR_DataMemberType, CIR_MethodType, + CIR_BoolType, CIR_ArrayType, CIR_VectorType, CIR_FuncType, CIR_VoidType, + CIR_ExceptionType, CIR_AnyFloat, CIR_FP16, CIR_BFloat16, + CIR_ComplexType +]>; + #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index c0707d687fca..346719691a5d 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -58,7 +58,12 @@ struct MissingFeatures { // sanitizer related type check features static bool emitTypeCheck() { return false; } static bool tbaa() { return false; } - static bool tbaa_struct() { return false; } + static bool tbaaStruct() { return false; } + static bool tbaaTagForStruct() { return false; } + static bool tbaaVTablePtr() { return false; } + static bool tbaaIncompleteType() { return false; } + static bool tbaaMergeTBAAInfo() { return false; } + static bool tbaaMayAlias() { return false; } static bool cleanups() { return false; } static bool emitNullabilityCheck() { return false; } static bool ptrAuth() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 28be733f62d7..3019ca8ef62b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -839,7 +839,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return create( loc, addr.getElementType(), addr.getPointer(), /*isDeref=*/false, /*is_volatile=*/isVolatile, /*alignment=*/mlir::IntegerAttr{}, - /*mem_order=*/cir::MemOrderAttr{}, /*tbaa=*/mlir::ArrayAttr{}); + /*mem_order=*/cir::MemOrderAttr{}, /*tbaa=*/cir::TBAAAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 46f89bf60d18..df4e97f7a179 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -1716,7 +1716,7 @@ void CIRGenFunction::emitAggregateCopy(LValue Dest, LValue Src, QualType Ty, // Determine the metadata to describe the position of any padding in this // memcpy, as well as the TBAA tags for the members of the struct, in case // the optimizer wishes to expand it in to scalar memory operations. - assert(!cir::MissingFeatures::tbaa_struct() && "tbaa.struct NYI"); + assert(!cir::MissingFeatures::tbaaStruct() && "tbaa.struct NYI"); if (CGM.getCodeGenOpts().NewStructPathTBAA) { TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer( Dest.getTBAAInfo(), Src.getTBAAInfo()); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index b7197afeb896..81d60477cae0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -3988,7 +3988,7 @@ cir::TBAAAttr CIRGenModule::getTBAABaseTypeInfo(QualType QTy) { return tbaa->getBaseTypeInfo(QTy); } -mlir::ArrayAttr CIRGenModule::getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo) { +cir::TBAAAttr CIRGenModule::getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo) { if (!tbaa) { return nullptr; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 905754a4ad3a..dd8a0c98b081 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -525,7 +525,7 @@ class CIRGenModule : public CIRGenTypeCache { /// type is not suitable for use in TBAA access tags. cir::TBAAAttr getTBAABaseTypeInfo(QualType QTy); - mlir::ArrayAttr getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo); + cir::TBAAAttr getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo); /// Get merged TBAA information for the purposes of type casts. TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp index a6efc05e4110..ce2969d130ff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp @@ -1,11 +1,12 @@ #include "CIRGenTBAA.h" -#include "CIRGenCXXABI.h" #include "CIRGenTypes.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "clang/AST/ASTContext.h" #include "clang/AST/RecordLayout.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" namespace clang::CIRGen { @@ -21,44 +22,159 @@ CIRGenTBAA::CIRGenTBAA(mlir::MLIRContext *mlirContext, : mlirContext(mlirContext), astContext(astContext), types(types), moduleOp(moduleOp), codeGenOpts(codeGenOpts), features(features) {} +cir::TBAAAttr CIRGenTBAA::getChar() { + return cir::TBAAScalarAttr::get(mlirContext, + cir::IntType::get(mlirContext, 1, true)); +} + +static bool typeHasMayAlias(clang::QualType qty) { + // Tagged types have declarations, and therefore may have attributes. + if (auto *td = qty->getAsTagDecl()) + if (td->hasAttr()) + return true; + + // Also look for may_alias as a declaration attribute on a typedef. + // FIXME: We should follow GCC and model may_alias as a type attribute + // rather than as a declaration attribute. + while (auto *tt = qty->getAs()) { + if (tt->getDecl()->hasAttr()) + return true; + qty = tt->desugar(); + } + return false; +} + +/// Check if the given type is a valid base type to be used in access tags. +static bool isValidBaseType(clang::QualType qty) { + if (const clang::RecordType *tty = qty->getAs()) { + const clang::RecordDecl *rd = tty->getDecl()->getDefinition(); + // Incomplete types are not valid base access types. + if (!rd) + return false; + if (rd->hasFlexibleArrayMember()) + return false; + // rd can be struct, union, class, interface or enum. + // For now, we only handle struct and class. + if (rd->isStruct() || rd->isClass()) + return true; + } + return false; +} + cir::TBAAAttr CIRGenTBAA::getTypeInfo(clang::QualType qty) { - return tbaa_NYI(mlirContext); + // At -O0 or relaxed aliasing, TBAA is not emitted for regular types. + if (codeGenOpts.OptimizationLevel == 0 || codeGenOpts.RelaxedAliasing) { + return nullptr; + } + + // If the type has the may_alias attribute (even on a typedef), it is + // effectively in the general char alias class. + if (typeHasMayAlias(qty)) { + assert(!cir::MissingFeatures::tbaaMayAlias()); + return getChar(); + } + // We need this function to not fall back to returning the "omnipotent char" + // type node for aggregate and union types. Otherwise, any dereference of an + // aggregate will result into the may-alias access descriptor, meaning all + // subsequent accesses to direct and indirect members of that aggregate will + // be considered may-alias too. + // function. + if (isValidBaseType(qty)) { + // TODO(cir): support TBAA with struct + return tbaa_NYI(mlirContext); + } + + const clang::Type *ty = astContext.getCanonicalType(qty).getTypePtr(); + if (metadataCache.contains(ty)) { + return metadataCache[ty]; + } + + // Note that the following helper call is allowed to add new nodes to the + // cache, which invalidates all its previously obtained iterators. So we + // first generate the node for the type and then add that node to the + // cache. + auto typeNode = cir::TBAAScalarAttr::get(mlirContext, types.ConvertType(qty)); + return metadataCache[ty] = typeNode; } TBAAAccessInfo CIRGenTBAA::getAccessInfo(clang::QualType accessType) { - return TBAAAccessInfo(); + // Pointee values may have incomplete types, but they shall never be + // dereferenced. + if (accessType->isIncompleteType()) { + assert(!cir::MissingFeatures::tbaaIncompleteType()); + return TBAAAccessInfo::getIncompleteInfo(); + } + + if (typeHasMayAlias(accessType)) { + assert(!cir::MissingFeatures::tbaaMayAlias()); + return TBAAAccessInfo::getMayAliasInfo(); + } + + uint64_t size = astContext.getTypeSizeInChars(accessType).getQuantity(); + return TBAAAccessInfo(getTypeInfo(accessType), size); } TBAAAccessInfo CIRGenTBAA::getVTablePtrAccessInfo(mlir::Type vtablePtrType) { + // TODO(cir): support vtable ptr + assert(!cir::MissingFeatures::tbaaVTablePtr()); return TBAAAccessInfo(); } mlir::ArrayAttr CIRGenTBAA::getTBAAStructInfo(clang::QualType qty) { - return mlir::ArrayAttr::get(mlirContext, {}); + assert(!cir::MissingFeatures::tbaaStruct() && "tbaa.struct NYI"); + return mlir::ArrayAttr(); } cir::TBAAAttr CIRGenTBAA::getBaseTypeInfo(clang::QualType qty) { return tbaa_NYI(mlirContext); } -mlir::ArrayAttr CIRGenTBAA::getAccessTagInfo(TBAAAccessInfo tbaaInfo) { - return mlir::ArrayAttr::get(mlirContext, {tbaa_NYI(mlirContext)}); +cir::TBAAAttr CIRGenTBAA::getAccessTagInfo(TBAAAccessInfo tbaaInfo) { + assert(!tbaaInfo.isIncomplete() && + "Access to an object of an incomplete type!"); + + if (tbaaInfo.isMayAlias()) { + assert(!cir::MissingFeatures::tbaaMayAlias()); + tbaaInfo = TBAAAccessInfo(getChar(), tbaaInfo.size); + } + if (!tbaaInfo.accessType) { + return nullptr; + } + + if (!codeGenOpts.StructPathTBAA) + tbaaInfo = TBAAAccessInfo(tbaaInfo.accessType, tbaaInfo.size); + + if (!tbaaInfo.baseType) { + tbaaInfo.baseType = tbaaInfo.accessType; + assert(!tbaaInfo.offset && + "Nonzero offset for an access with no base type!"); + } + if (codeGenOpts.NewStructPathTBAA) { + llvm_unreachable("NYI"); + } + if (tbaaInfo.baseType == tbaaInfo.accessType) { + return tbaaInfo.accessType; + } + return tbaa_NYI(mlirContext); } TBAAAccessInfo CIRGenTBAA::mergeTBAAInfoForCast(TBAAAccessInfo sourceInfo, TBAAAccessInfo targetInfo) { + assert(!cir::MissingFeatures::tbaaMergeTBAAInfo()); return TBAAAccessInfo(); } TBAAAccessInfo CIRGenTBAA::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo infoA, TBAAAccessInfo infoB) { + assert(!cir::MissingFeatures::tbaaMergeTBAAInfo()); return TBAAAccessInfo(); } TBAAAccessInfo CIRGenTBAA::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo destInfo, TBAAAccessInfo srcInfo) { + assert(!cir::MissingFeatures::tbaaMergeTBAAInfo()); return TBAAAccessInfo(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.h b/clang/lib/CIR/CodeGen/CIRGenTBAA.h index 3f59a0e6538b..03b9b75113c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.h +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.h @@ -104,6 +104,10 @@ class CIRGenTBAA { [[maybe_unused]] const clang::CodeGenOptions &codeGenOpts; [[maybe_unused]] const clang::LangOptions &features; + llvm::DenseMap metadataCache; + + cir::TBAAAttr getChar(); + public: CIRGenTBAA(mlir::MLIRContext *mlirContext, clang::ASTContext &astContext, CIRGenTypes &types, mlir::ModuleOp moduleOp, @@ -129,7 +133,7 @@ class CIRGenTBAA { cir::TBAAAttr getBaseTypeInfo(clang::QualType qty); /// Get TBAA tag for a given memory access. - mlir::ArrayAttr getAccessTagInfo(TBAAAccessInfo tbaaInfo); + cir::TBAAAttr getAccessTagInfo(TBAAAccessInfo tbaaInfo); /// Get merged TBAA information for the purpose of type casts. TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo sourceInfo, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7da279aa7513..f8907712deac 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -17,6 +17,7 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Interfaces/CIRLoopOpInterface.h" #include "clang/CIR/MissingFeatures.h" +#include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" #include #include @@ -106,12 +107,12 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << dynCastInfoAttr.getAlias(); return AliasResult::FinalAlias; } - if (auto tbaaAttr = mlir::dyn_cast(attr)) { - os << tbaaAttr.getMnemonic(); - return AliasResult::OverridableAlias; - } - - return AliasResult::NoAlias; + return TypeSwitch(attr) + .Case([&](auto attr) { + os << decltype(attr)::getMnemonic(); + return AliasResult::OverridableAlias; + }) + .Default([](Attribute) { return AliasResult::NoAlias; }); } }; } // namespace diff --git a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp index 80963353a304..bb99d53e0ad8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp @@ -151,7 +151,7 @@ DeletionKind cir::CopyOp::removeBlockingUses( if (loadsFrom(slot)) builder.create(getLoc(), reachingDefinition, getDst(), false, mlir::IntegerAttr{}, cir::MemOrderAttr(), - mlir::ArrayAttr{}); + cir::TBAAAttr{}); return DeletionKind::Delete; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index cedf50a204ef..fff70e137762 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -18,6 +18,7 @@ #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" #include "mlir/Dialect/LLVMIR/Transforms/Passes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" @@ -41,6 +42,9 @@ #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/MissingFeatures.h" @@ -51,6 +55,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" +#include "llvm/ADT/TypeSwitch.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/Support/Casting.h" @@ -666,6 +671,67 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, llvm_unreachable("unhandled attribute type"); } +mlir::LLVM::TBAATypeDescriptorAttr +createScalarTypeNode(mlir::MLIRContext *ctx, llvm::StringRef typeName, + mlir::LLVM::TBAANodeAttr parent, int64_t offset) { + llvm::SmallVector members; + members.push_back(mlir::LLVM::TBAAMemberAttr::get(ctx, parent, offset)); + return mlir::LLVM::TBAATypeDescriptorAttr::get( + ctx, typeName, llvm::ArrayRef(members)); +} + +mlir::LLVM::TBAARootAttr getRoot(mlir::MLIRContext *ctx) { + return mlir::LLVM::TBAARootAttr::get( + ctx, mlir::StringAttr::get(ctx, "Simple C/C++ TBAA")); +} + +mlir::LLVM::TBAATypeDescriptorAttr getChar(mlir::MLIRContext *ctx) { + return createScalarTypeNode(ctx, "omnipotent char", getRoot(ctx), 0); +} + +// FIXME(cir): This should be moved and use tablegen approach +// see https://github.com/llvm/clangir/pull/1220#discussion_r1889187867 +StringRef getTypeName(mlir::Type type) { + return TypeSwitch(type) + .Case([](cir::IntType ty) { return ty.getTBAATypeName(); }) + .Case([](cir::SingleType) { return "float"; }) + .Case([](cir::DoubleType) { return "double"; }) + .Case([](cir::FP80Type) { return "f80"; }) + .Case([](cir::FP128Type) { return "f128"; }) + .Case( + [](cir::LongDoubleType) { return "long double"; }) + .Case([](cir::BoolType) { return "bool"; }) + .Case([](cir::PointerType) { return "any pointer"; }) + .Default([](auto ty) { + llvm::errs() << "unknown type: " << ty << "\n"; + return "unknown"; + }); +} + +mlir::LLVM::TBAATypeDescriptorAttr +lowerScalarType(mlir::MLIRContext *ctx, cir::TBAAScalarAttr scalarAttr) { + // special handle for omnipotent char + if (auto intTy = mlir::dyn_cast_or_null(scalarAttr.getType())) { + if (intTy.getWidth() == 1 || intTy.getWidth() == 8) { + return getChar(ctx); + } + } + auto name = getTypeName(scalarAttr.getType()); + return createScalarTypeNode(ctx, name, getChar(ctx), 0); +} + +mlir::ArrayAttr lowerCIRTBAAAttr(mlir::Attribute tbaa, + mlir::ConversionPatternRewriter &rewriter) { + auto *ctx = rewriter.getContext(); + if (auto scalarAttr = mlir::dyn_cast(tbaa)) { + auto accessType = lowerScalarType(ctx, scalarAttr); + auto tag = mlir::LLVM::TBAATagAttr::get(accessType, accessType, 0); + return mlir::ArrayAttr::get(ctx, {tag}); + } + assert(!cir::MissingFeatures::tbaaTagForStruct()); + return mlir::ArrayAttr(); +} + //===----------------------------------------------------------------------===// mlir::LLVM::Linkage convertLinkage(cir::GlobalLinkageKind linkage) { @@ -1512,10 +1578,14 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite( } // TODO: nontemporal, syncscope. - rewriter.replaceOpWithNewOp( - op, llvmTy, adaptor.getAddr(), /* alignment */ alignment, + auto loadOp = rewriter.create( + op->getLoc(), llvmTy, adaptor.getAddr(), /* alignment */ alignment, op.getIsVolatile(), /* nontemporal */ false, /* invariant */ false, /* invariantGroup */ invariant, ordering); + rewriter.replaceOp(op, loadOp); + if (auto tbaa = op.getTbaaAttr()) { + loadOp.setTBAATags(lowerCIRTBAAAttr(tbaa, rewriter)); + } return mlir::LogicalResult::success(); } @@ -1547,9 +1617,14 @@ mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite( } // TODO: nontemporal, syncscope. - rewriter.replaceOpWithNewOp( - op, adaptor.getValue(), adaptor.getAddr(), alignment, op.getIsVolatile(), + auto storeOp = rewriter.create( + op->getLoc(), adaptor.getValue(), adaptor.getAddr(), alignment, + op.getIsVolatile(), /* nontemporal */ false, /* invariantGroup */ invariant, ordering); + rewriter.replaceOp(op, storeOp); + if (auto tbaa = op.getTbaaAttr()) { + storeOp.setTBAATags(lowerCIRTBAAAttr(tbaa, rewriter)); + } return mlir::LogicalResult::success(); } diff --git a/clang/test/CIR/CodeGen/const-alloca.cpp b/clang/test/CIR/CodeGen/const-alloca.cpp index 9247b2692474..7cc9a5b57517 100644 --- a/clang/test/CIR/CodeGen/const-alloca.cpp +++ b/clang/test/CIR/CodeGen/const-alloca.cpp @@ -66,8 +66,8 @@ int local_const_load_store() { // LLVM-LABEL: @_Z22local_const_load_storev // LLVM: %[[#INIT:]] = call i32 @_Z11produce_intv() -// LLVM-NEXT: store i32 %[[#INIT]], ptr %[[#SLOT:]], align 4, !invariant.group !{{.+}} -// LLVM-NEXT: %{{.+}} = load i32, ptr %[[#SLOT]], align 4, !invariant.group !{{.+}} +// LLVM-NEXT: store i32 %[[#INIT]], ptr %[[#SLOT:]], align 4, !tbaa !{{.*}}, !invariant.group !{{.+}} +// LLVM-NEXT: %{{.+}} = load i32, ptr %[[#SLOT]], align 4, !tbaa !{{.*}}, !invariant.group !{{.+}} // LLVM: } int local_const_optimize() { @@ -80,7 +80,7 @@ int local_const_optimize() { // LLVM-LABEL: @_Z20local_const_optimizev() // LLVM-NEXT: %[[#slot:]] = alloca i32, align 4 // LLVM-NEXT: %[[#init:]] = tail call i32 @_Z11produce_intv() -// LLVM-NEXT: store i32 %[[#init]], ptr %[[#slot]], align 4, !invariant.group !{{.+}} +// LLVM-NEXT: store i32 %[[#init]], ptr %[[#slot]], align 4, !tbaa !{{.*}}, !invariant.group !{{.+}} // LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#slot]]) // LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#slot]]) // LLVM-NEXT: ret i32 %[[#init]] diff --git a/clang/test/CIR/CodeGen/tbaa-scalar.c b/clang/test/CIR/CodeGen/tbaa-scalar.c new file mode 100644 index 000000000000..b2f893b4f4ac --- /dev/null +++ b/clang/test/CIR/CodeGen/tbaa-scalar.c @@ -0,0 +1,148 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -relaxed-aliasing +// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O0 +// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s + +// NO-TBAA-NOT: !tbaa + +// CIR: #tbaa[[FLOAT_PTR:.*]] = #cir.tbaa_scalar> +// CIR: #tbaa[[FLOAT:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[DOUBLE_PTR:.*]] = #cir.tbaa_scalar> +// CIR: #tbaa[[DOUBLE:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[LONG_DOUBLE_PTR:.*]] = #cir.tbaa_scalar>> +// CIR: #tbaa[[LONG_DOUBLE:.*]] = #cir.tbaa_scalar> +// CIR: #tbaa[[INT:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[LONG:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[CHAR:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[INT_PTR:.*]] = #cir.tbaa_scalar> +// CIR: #tbaa[[LONG_PTR:.*]] = #cir.tbaa_scalar> +// CIR: #tbaa[[CHAR_PTR:.*]] = #cir.tbaa_scalar> + +void test_int_and_float(int *a, float *b) { + // CIR-LABEL: cir.func @test_int_and_float + // CIR: cir.scope + // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa(#tbaa[[INT_PTR]]) + // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s32i tbaa(#tbaa[[INT]]) + // CIR: cir.if + // CIR: %[[C2:.*]] = cir.const #cir.fp<2 + // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[FLOAT_PTR]]) + // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.float, !cir.ptr tbaa(#tbaa[[FLOAT]]) + // CIR: else + // CIR: %[[C3:.*]] = cir.const #cir.fp<3 + // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[FLOAT_PTR]]) + // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.float, !cir.ptr tbaa(#tbaa[[FLOAT]]) + + // LLVM-LABEL: void @test_int_and_float + // LLVM: %[[ARG_a:.*]] = load i32, ptr %{{.*}}, align 4, !tbaa ![[TBAA_INT:.*]] + // LLVM: %[[COND:.*]] = icmp eq i32 %[[ARG_a]], 1 + // LLVM: %[[RET:.*]] = select i1 %[[COND]], float 2.000000e+00, float 3.000000e+00 + // LLVM: store float %[[RET]], ptr %{{.*}}, align 4, !tbaa ![[TBAA_FLOAT:.*]] + // LLVM: ret void + if (*a == 1) { + *b = 2.0f; + } else { + *b = 3.0f; + } +} + +void test_long_and_double(long *a, double *b) { + // CIR-LABEL: cir.func @test_long_and_double + // CIR: cir.scope + // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa(#tbaa[[LONG_PTR]]) + // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s64i tbaa(#tbaa[[LONG]]) + // CIR: cir.if + // CIR: %[[C2:.*]] = cir.const #cir.fp<2 + // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[DOUBLE_PTR]]) + // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.double, !cir.ptr tbaa(#tbaa[[DOUBLE]]) + // CIR: else + // CIR: %[[C3:.*]] = cir.const #cir.fp<3 + // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[DOUBLE_PTR]]) + // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.double, !cir.ptr tbaa(#tbaa[[DOUBLE]]) + + // LLVM-LABEL: void @test_long_and_double + // LLVM: %[[ARG_a:.*]] = load i64, ptr %{{.*}}, align 8, !tbaa ![[TBAA_LONG:.*]] + // LLVM: %[[COND:.*]] = icmp eq i64 %[[ARG_a]], 1 + // LLVM: %[[RET:.*]] = select i1 %[[COND]], double 2.000000e+00, double 3.000000e+00 + // LLVM: store double %[[RET]], ptr %{{.*}}, align 8, !tbaa ![[TBAA_DOUBLE:.*]] + // LLVM: ret void + if (*a == 1L) { + *b = 2.0; + } else { + *b = 3.0; + } +} +void test_long_long_and_long_double(long long *a, long double *b) { + // CIR-LABEL: cir.func @test_long_long_and_long_double + // CIR: cir.scope + // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa(#tbaa[[LONG_PTR]]) + // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s64i tbaa(#tbaa[[LONG]]) + // CIR: cir.if + // CIR: %[[C2:.*]] = cir.const #cir.fp<2 + // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>>, !cir.ptr> tbaa(#tbaa[[LONG_DOUBLE_PTR]]) + // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.long_double, !cir.ptr> tbaa(#tbaa[[LONG_DOUBLE]]) + // CIR: else + // CIR: %[[C3:.*]] = cir.const #cir.fp<3 + // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>>, !cir.ptr> tbaa(#tbaa[[LONG_DOUBLE_PTR]]) + // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.long_double, !cir.ptr> tbaa(#tbaa[[LONG_DOUBLE]]) + + // LLVM-LABEL: void @test_long_long_and_long_double + // LLVM: %[[ARG_a:.*]] = load i64, ptr %{{.*}}, align 8, !tbaa ![[TBAA_LONG_LONG:.*]] + // LLVM: %[[COND:.*]] = icmp eq i64 %[[ARG_a]], 1 + // LLVM: %[[RET:.*]] = select i1 %[[COND]], x86_fp80 0xK40008000000000000000, x86_fp80 0xK4000C000000000000000 + // LLVM: store x86_fp80 %[[RET]], ptr %{{.*}}, align 16, !tbaa ![[TBAA_LONG_DOUBLE:.*]] + // LLVM: ret void + if (*a == 1L) { + *b = 2.0L; + } else { + *b = 3.0L; + } +} + +void test_char(char *a, char* b) { + // CIR-LABEL: cir.func @test_char + // CIR: cir.scope + // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa(#tbaa[[CHAR_PTR]]) + // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s8i tbaa(#tbaa[[CHAR]]) + // CIR: cir.if + // CIR: %[[C2:.*]] = cir.const #cir.int<98> : !s32i + // CIR: %[[C2_CHAR:.*]] = cir.cast(integral, %[[C2]] : !s32i), !s8i + // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[CHAR_PTR]]) + // CIR: cir.store %[[C2_CHAR]], %[[TMP3]] : !s8i, !cir.ptr tbaa(#tbaa[[CHAR]]) + // CIR: else + // CIR: %[[C3:.*]] = cir.const #cir.int<0> : !s32i + // CIR: %[[C3_CHAR:.*]] = cir.cast(integral, %[[C3]] : !s32i), !s8i + // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[CHAR_PTR]]) + // CIR: cir.store %[[C3_CHAR]], %[[TMP4]] : !s8i, !cir.ptr tbaa(#tbaa[[CHAR]]) + + + // LLVM-LABEL: void @test_char + // LLVM: %[[ARG_a:.*]] = load i8, ptr %{{.*}}, align 1, !tbaa ![[TBAA_CHAR:.*]] + // LLVM: %[[COND:.*]] = icmp eq i8 %[[ARG_a]], 97 + // LLVM: %[[RET:.*]] = select i1 %[[COND]], i8 98, i8 0 + // LLVM: store i8 %[[RET]], ptr %{{.*}}, align 1, !tbaa ![[TBAA_CHAR]] + // LLVM: ret void + if (*a == 'a') { + *b = 'b'; + } + else { + *b = '\0'; + } +} + +// LLVM: ![[TBAA_INT]] = !{![[TBAA_INT_PARENT:.*]], ![[TBAA_INT_PARENT]], i64 0} +// LLVM: ![[TBAA_INT_PARENT]] = !{!"int", ![[CHAR:.*]], i64 0} +// LLVM: ![[CHAR]] = !{!"omnipotent char", ![[ROOT:.*]], i64 0} +// LLVM: ![[ROOT]] = !{!"Simple C/C++ TBAA"} +// LLVM: ![[TBAA_FLOAT]] = !{![[TBAA_FLOAT_PARENT:.*]], ![[TBAA_FLOAT_PARENT]], i64 0} +// LLVM: ![[TBAA_FLOAT_PARENT]] = !{!"float", ![[CHAR]], i64 0} +// LLVM: ![[TBAA_LONG]] = !{![[TBAA_LONG_PARENT:.*]], ![[TBAA_LONG_PARENT]], i64 0} +// LLVM: ![[TBAA_LONG_PARENT]] = !{!"long", ![[CHAR]], i64 0} +// LLVM: ![[TBAA_DOUBLE]] = !{![[TBAA_DOUBLE_PARENT:.*]], ![[TBAA_DOUBLE_PARENT]], i64 0} +// LLVM: ![[TBAA_DOUBLE_PARENT]] = !{!"double", ![[CHAR]], i64 0} +// LLVM: ![[TBAA_LONG_DOUBLE]] = !{![[TBAA_LONG_DOUBLE_PARENT:.*]], ![[TBAA_LONG_DOUBLE_PARENT]], i64 0} +// LLVM: ![[TBAA_LONG_DOUBLE_PARENT]] = !{!"long double", ![[CHAR]], i64 0} +// LLVM: ![[TBAA_CHAR]] = !{![[CHAR]], ![[CHAR]], i64 0} diff --git a/clang/test/CIR/CodeGen/tbaa-struct.cpp b/clang/test/CIR/CodeGen/tbaa-struct.cpp new file mode 100644 index 000000000000..84c49df6b455 --- /dev/null +++ b/clang/test/CIR/CodeGen/tbaa-struct.cpp @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// CIR: #tbaa[[tbaa_NYI:.*]] = #cir.tbaa +// CIR: #tbaa[[INT:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[INT_PTR:.*]] = #cir.tbaa_scalar> +// CIR: #tbaa[[StructA_PTR:.*]] = #cir.tbaa_scalar> + +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; +typedef struct +{ + uint16_t f16; + uint32_t f32; + uint16_t f16_2; + uint32_t f32_2; +} StructA; + +uint32_t g(uint32_t *s, StructA *A) { + // CIR-LABEL: cir.func @_Z1g + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[INT]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i + // CIR: %[[pointer_to_StructA:.*]] = cir.load %{{.*}} : !cir.ptr>, !cir.ptr tbaa(#tbaa[[StructA_PTR]]) + // CIR: %[[A_f32:.*]] = cir.get_member %[[pointer_to_StructA]][1] {name = "f32"} : !cir.ptr -> !cir.ptr + // CIR: cir.store %[[UINT_4]], %[[A_f32]] : !u32i, !cir.ptr tbaa(#tbaa[[tbaa_NYI]]) + + *s = 1; + A->f32 = 4; + return *s; +} diff --git a/clang/test/CIR/CodeGen/tbaa-vptr.cpp b/clang/test/CIR/CodeGen/tbaa-vptr.cpp new file mode 100644 index 000000000000..dbe28be626a2 --- /dev/null +++ b/clang/test/CIR/CodeGen/tbaa-vptr.cpp @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// CIR-NOT: #tbaa + +struct Member { + ~Member(); +}; + +struct A { + virtual ~A(); +}; + +struct B : A { + Member m; + virtual ~B(); +}; +B::~B() { } diff --git a/clang/test/CIR/CodeGen/tbaa.c b/clang/test/CIR/CodeGen/tbaa.c deleted file mode 100644 index 43cdde47ecb7..000000000000 --- a/clang/test/CIR/CodeGen/tbaa.c +++ /dev/null @@ -1,22 +0,0 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s - -// CIR: #tbaa[[TBAA_NO:.*]] = #cir.tbaa -void f(int *a, float *b) { - // CIR: cir.scope - // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) - // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s32i tbaa([#tbaa[[TBAA_NO]]]) - // CIR: cir.if - // CIR: %[[C2:.*]] = cir.const #cir.fp<2 - // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) - // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.float, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) - // CIR: else - // CIR: %[[C3:.*]] = cir.const #cir.fp<3 - // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) - // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.float, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) - if (*a == 1) { - *b = 2.0f; - } else { - *b = 3.0f; - } -} From 6398a0168ebe5d14e686e31e51b62022b93c082e Mon Sep 17 00:00:00 2001 From: Congcong Cai Date: Thu, 19 Dec 2024 00:49:06 +0800 Subject: [PATCH 2177/2301] [CIR][CodeGen][NFC] centralized split `std::initializer_list` field (#1235) Use iterator to visit std::initializer_list field reduce the readability --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 122 +++++++++++++----------- 1 file changed, 64 insertions(+), 58 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index df4e97f7a179..831e2e8c3ae6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -296,64 +296,7 @@ class AggExprEmitter : public StmtVisitor { void VisitCXXConstructExpr(const CXXConstructExpr *E); void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); void VisitLambdaExpr(LambdaExpr *E); - void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { - ASTContext &Ctx = CGF.getContext(); - CIRGenFunction::SourceLocRAIIObject locRAIIObject{ - CGF, CGF.getLoc(E->getSourceRange())}; - // Emit an array containing the elements. The array is externally - // destructed if the std::initializer_list object is. - LValue Array = CGF.emitLValue(E->getSubExpr()); - assert(Array.isSimple() && "initializer_list array not a simple lvalue"); - Address ArrayPtr = Array.getAddress(); - - const ConstantArrayType *ArrayType = - Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); - assert(ArrayType && "std::initializer_list constructed from non-array"); - - RecordDecl *Record = E->getType()->castAs()->getDecl(); - RecordDecl::field_iterator Field = Record->field_begin(); - assert(Field != Record->field_end() && - Ctx.hasSameType(Field->getType()->getPointeeType(), - ArrayType->getElementType()) && - "Expected std::initializer_list first field to be const E *"); - // Start pointer. - auto loc = CGF.getLoc(E->getSourceRange()); - AggValueSlot Dest = EnsureSlot(loc, E->getType()); - LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), E->getType()); - LValue Start = - CGF.emitLValueForFieldInitialization(DestLV, *Field, Field->getName()); - mlir::Value ArrayStart = ArrayPtr.emitRawPointer(); - CGF.emitStoreThroughLValue(RValue::get(ArrayStart), Start); - ++Field; - assert(Field != Record->field_end() && - "Expected std::initializer_list to have two fields"); - - auto Builder = CGF.getBuilder(); - - auto sizeOp = Builder.getConstInt(loc, ArrayType->getSize()); - - mlir::Value Size = sizeOp.getRes(); - Builder.getUIntNTy(ArrayType->getSizeBitWidth()); - LValue EndOrLength = - CGF.emitLValueForFieldInitialization(DestLV, *Field, Field->getName()); - if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { - // Length. - CGF.emitStoreThroughLValue(RValue::get(Size), EndOrLength); - } else { - // End pointer. - assert(Field->getType()->isPointerType() && - Ctx.hasSameType(Field->getType()->getPointeeType(), - ArrayType->getElementType()) && - "Expected std::initializer_list second field to be const E *"); - - auto ArrayEnd = - Builder.getArrayElement(loc, loc, ArrayPtr.getPointer(), - ArrayPtr.getElementType(), Size, false); - CGF.emitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); - } - assert(++Field == Record->field_end() && - "Expected std::initializer_list to only have two fields"); - } + void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E); void VisitExprWithCleanups(ExprWithCleanups *E); void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { @@ -954,6 +897,69 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { } } +void AggExprEmitter::VisitCXXStdInitializerListExpr( + CXXStdInitializerListExpr *E) { + ASTContext &Ctx = CGF.getContext(); + CIRGenFunction::SourceLocRAIIObject locRAIIObject{ + CGF, CGF.getLoc(E->getSourceRange())}; + // Emit an array containing the elements. The array is externally + // destructed if the std::initializer_list object is. + LValue Array = CGF.emitLValue(E->getSubExpr()); + assert(Array.isSimple() && "initializer_list array not a simple lvalue"); + Address ArrayPtr = Array.getAddress(); + + const ConstantArrayType *ArrayType = + Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); + assert(ArrayType && "std::initializer_list constructed from non-array"); + + RecordDecl *Record = E->getType()->castAs()->getDecl(); + RecordDecl::field_iterator Field = Record->field_begin(); + assert(Field != Record->field_end() && + Ctx.hasSameType(Field->getType()->getPointeeType(), + ArrayType->getElementType()) && + "Expected std::initializer_list first field to be const E *"); + const FieldDecl *StartField = *Field; + ++Field; + assert(Field != Record->field_end() && + "Expected std::initializer_list to have two fields"); + const FieldDecl *EndOrLengthField = *Field; + ++Field; + assert(Field == Record->field_end() && + "Expected std::initializer_list to only have two fields"); + + // Start pointer. + auto loc = CGF.getLoc(E->getSourceRange()); + AggValueSlot Dest = EnsureSlot(loc, E->getType()); + LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), E->getType()); + LValue Start = CGF.emitLValueForFieldInitialization(DestLV, StartField, + StartField->getName()); + mlir::Value ArrayStart = ArrayPtr.emitRawPointer(); + CGF.emitStoreThroughLValue(RValue::get(ArrayStart), Start); + + auto Builder = CGF.getBuilder(); + + auto sizeOp = Builder.getConstInt(loc, ArrayType->getSize()); + + mlir::Value Size = sizeOp.getRes(); + LValue EndOrLength = CGF.emitLValueForFieldInitialization( + DestLV, EndOrLengthField, EndOrLengthField->getName()); + if (Ctx.hasSameType(EndOrLengthField->getType(), Ctx.getSizeType())) { + // Length. + CGF.emitStoreThroughLValue(RValue::get(Size), EndOrLength); + } else { + // End pointer. + assert(EndOrLengthField->getType()->isPointerType() && + Ctx.hasSameType(EndOrLengthField->getType()->getPointeeType(), + ArrayType->getElementType()) && + "Expected std::initializer_list second field to be const E *"); + + auto ArrayEnd = + Builder.getArrayElement(loc, loc, ArrayPtr.getPointer(), + ArrayPtr.getElementType(), Size, false); + CGF.emitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); + } +} + void AggExprEmitter::VisitCastExpr(CastExpr *E) { if (const auto *ECE = dyn_cast(E)) CGF.CGM.emitExplicitCastExprType(ECE, &CGF); From cdd0b60662c80aa1a19b2d0cea4f20453cb0fc63 Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 18 Dec 2024 11:50:42 -0500 Subject: [PATCH 2178/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vaddv_f32, neon_vaddvq_f32 and neon_vaddvq_f64 (#1238) [Neon intrinsic definition](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32). They are vector across operation which LLVM doesn't currently have a generic intrinsic about it. As a side note for brainstorm, it might be worth in the future for CIR to introduce Vector Across type operations even though LLVM dialect doesn't have it yet. This would help to expose opt opportunities. E.g. a very trivial constant fold can happen if we are adding across a constant vector. --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 7 ++- clang/test/CIR/CodeGen/AArch64/neon.c | 51 ++++++++++++------- 2 files changed, 36 insertions(+), 22 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 7982a2350607..b291dbdd7689 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2676,16 +2676,15 @@ static mlir::Value emitCommonNeonSISDBuiltinExpr( case NEON::BI__builtin_neon_vaddlvq_u32: return emitNeonCall(builder, {argTy}, ops, "aarch64.neon.uaddlv", resultTy, loc); - case NEON::BI__builtin_neon_vaddv_f32: - llvm_unreachable(" neon_vaddv_f32 NYI "); case NEON::BI__builtin_neon_vaddv_s32: llvm_unreachable(" neon_vaddv_s32 NYI "); case NEON::BI__builtin_neon_vaddv_u32: llvm_unreachable(" neon_vaddv_u32 NYI "); + case NEON::BI__builtin_neon_vaddv_f32: case NEON::BI__builtin_neon_vaddvq_f32: - llvm_unreachable(" neon_vaddvq_f32 NYI "); case NEON::BI__builtin_neon_vaddvq_f64: - llvm_unreachable(" neon_vaddvq_f64 NYI "); + return emitNeonCall(builder, {argTy}, ops, "aarch64.neon.faddv", resultTy, + loc); case NEON::BI__builtin_neon_vaddvq_s32: llvm_unreachable(" neon_vaddvq_s32 NYI "); case NEON::BI__builtin_neon_vaddvq_s64: diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 06cc61a7c91e..e3e9c450ff63 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -18383,26 +18383,41 @@ uint64x1_t test_vrsra_n_u64(uint64x1_t a, uint64x1_t b) { // return vneg_s64(a); // } -// NYI-LABEL: @test_vaddv_f32( -// NYI: [[VADDV_F32_I:%.*]] = call float @llvm.aarch64.neon.faddv.f32.v2f32(<2 x float> %a) -// NYI: ret float [[VADDV_F32_I]] -// float32_t test_vaddv_f32(float32x2_t a) { -// return vaddv_f32(a); -// } +float32_t test_vaddv_f32(float32x2_t a) { + return vaddv_f32(a); -// NYI-LABEL: @test_vaddvq_f32( -// NYI: [[VADDVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.faddv.f32.v4f32(<4 x float> %a) -// NYI: ret float [[VADDVQ_F32_I]] -// float32_t test_vaddvq_f32(float32x4_t a) { -// return vaddvq_f32(a); -// } + // CIR-LABEL: vaddv_f32 + // CIR: cir.llvm.intrinsic "aarch64.neon.faddv" {{%.*}} : (!cir.vector) -> !cir.float -// NYI-LABEL: @test_vaddvq_f64( -// NYI: [[VADDVQ_F64_I:%.*]] = call double @llvm.aarch64.neon.faddv.f64.v2f64(<2 x double> %a) -// NYI: ret double [[VADDVQ_F64_I]] -// float64_t test_vaddvq_f64(float64x2_t a) { -// return vaddvq_f64(a); -// } + // LLVM-LABEL: test_vaddv_f32 + // LLVM-SAME: (<2 x float> [[a:%.*]]) + // LLVM: [[VADDV_F32_I:%.*]] = call float @llvm.aarch64.neon.faddv.f32.v2f32(<2 x float> [[a]]) + // LLVM: ret float [[VADDV_F32_I]] +} + +float32_t test_vaddvq_f32(float32x4_t a) { + return vaddvq_f32(a); + + // CIR-LABEL: vaddvq_f32 + // CIR: cir.llvm.intrinsic "aarch64.neon.faddv" {{%.*}} : (!cir.vector) -> !cir.float + + // LLVM-LABEL: test_vaddvq_f32 + // LLVM-SAME: (<4 x float> [[a:%.*]]) + // LLVM: [[VADDVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.faddv.f32.v4f32(<4 x float> [[a]]) + // LLVM: ret float [[VADDVQ_F32_I]] +} + +float64_t test_vaddvq_f64(float64x2_t a) { + return vaddvq_f64(a); + + // CIR-LABEL: vaddvq_f64 + // CIR: cir.llvm.intrinsic "aarch64.neon.faddv" {{%.*}} : (!cir.vector) -> !cir.double + + // LLVM-LABEL: test_vaddvq_f64 + // LLVM-SAME: (<2 x double> [[a:%.*]]) + // LLVM: [[VADDVQ_F64_I:%.*]] = call double @llvm.aarch64.neon.faddv.f64.v2f64(<2 x double> [[a]]) + // LLVM: ret double [[VADDVQ_F64_I]] +} // NYI-LABEL: @test_vmaxv_f32( // NYI: [[VMAXV_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxv.f32.v2f32(<2 x float> %a) From 6eda0ec88438779ee0b270cfbd3387bc077b587f Mon Sep 17 00:00:00 2001 From: Guojin Date: Wed, 18 Dec 2024 11:51:43 -0500 Subject: [PATCH 2179/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vmax_v and neon_vmaxq_v (#1239) This implementation is different from OG in the sense we chose to use CIR op which eventually lowers to generic LLVM intrinsics instead of llvm.aarch64.neon intrinsics But down to the ASM level, [they are identical ](https://godbolt.org/z/Gbbos9z6Y). --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 11 +- clang/test/CIR/CodeGen/AArch64/neon.c | 320 +++++++++++------- 2 files changed, 209 insertions(+), 122 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index b291dbdd7689..d7ca71093f34 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3928,8 +3928,15 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vmax_v: - case NEON::BI__builtin_neon_vmaxq_v: - llvm_unreachable("NEON::BI__builtin_neon_vmaxq_v NYI"); + case NEON::BI__builtin_neon_vmaxq_v: { + mlir::Location loc = getLoc(E->getExprLoc()); + Ops[0] = builder.createBitcast(Ops[0], ty); + Ops[1] = builder.createBitcast(Ops[1], ty); + if (cir::isFPOrFPVectorTy(ty)) { + return builder.create(loc, Ops[0], Ops[1]); + } + return builder.create(loc, cir::BinOpKind::Max, Ops[0], Ops[1]); + } case NEON::BI__builtin_neon_vmaxh_f16: { llvm_unreachable("NEON::BI__builtin_neon_vmaxh_f16 NYI"); } diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index e3e9c450ff63..adb4dc5a80e7 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -4270,132 +4270,207 @@ uint64x2_t test_vrshlq_u64(uint64x2_t a, int64x2_t b) { // return vsliq_n_p64(a, b, 0); // } -// NYI-LABEL: @test_vmax_s8( -// NYI: [[VMAX_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i8> [[VMAX_I]] -// int8x8_t test_vmax_s8(int8x8_t a, int8x8_t b) { -// return vmax_s8(a, b); -// } +int8x8_t test_vmax_s8(int8x8_t a, int8x8_t b) { + return vmax_s8(a, b); -// NYI-LABEL: @test_vmax_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VMAX2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: ret <4 x i16> [[VMAX2_I]] -// int16x4_t test_vmax_s16(int16x4_t a, int16x4_t b) { -// return vmax_s16(a, b); -// } + // CIR-LABEL: vmax_s8 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector -// NYI-LABEL: @test_vmax_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VMAX2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: ret <2 x i32> [[VMAX2_I]] -// int32x2_t test_vmax_s32(int32x2_t a, int32x2_t b) { -// return vmax_s32(a, b); -// } + // LLVM-LABEL: test_vmax_s8 + // LLVM-SAME: (<8 x i8> [[a:%.*]], <8 x i8> [[b:%.*]]) + // LLVM: [[VMAX_I:%.*]] = call <8 x i8> @llvm.smax.v8i8(<8 x i8> [[a]], <8 x i8> [[b]]) + // LLVM: ret <8 x i8> [[VMAX_I]] +} -// NYI-LABEL: @test_vmax_u8( -// NYI: [[VMAX_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umax.v8i8(<8 x i8> %a, <8 x i8> %b) -// NYI: ret <8 x i8> [[VMAX_I]] -// uint8x8_t test_vmax_u8(uint8x8_t a, uint8x8_t b) { -// return vmax_u8(a, b); -// } +int16x4_t test_vmax_s16(int16x4_t a, int16x4_t b) { + return vmax_s16(a, b); -// NYI-LABEL: @test_vmax_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[VMAX2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umax.v4i16(<4 x i16> %a, <4 x i16> %b) -// NYI: ret <4 x i16> [[VMAX2_I]] -// uint16x4_t test_vmax_u16(uint16x4_t a, uint16x4_t b) { -// return vmax_u16(a, b); -// } + // CIR-LABEL: vmax_s16 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector -// NYI-LABEL: @test_vmax_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[VMAX2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umax.v2i32(<2 x i32> %a, <2 x i32> %b) -// NYI: ret <2 x i32> [[VMAX2_I]] -// uint32x2_t test_vmax_u32(uint32x2_t a, uint32x2_t b) { -// return vmax_u32(a, b); -// } + // LLVM-LABEL: test_vmax_s16 + // LLVM-SAME: (<4 x i16> [[a:%.*]], <4 x i16> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[b]] to <8 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <4 x i16> @llvm.smax.v4i16(<4 x i16> [[a]], <4 x i16> [[b]]) + // LLVM: ret <4 x i16> [[VMAX2_I]] +} -// NYI-LABEL: @test_vmax_f32( -// NYI: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> -// NYI: [[VMAX2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmax.v2f32(<2 x float> %a, <2 x float> %b) -// NYI: ret <2 x float> [[VMAX2_I]] -// float32x2_t test_vmax_f32(float32x2_t a, float32x2_t b) { -// return vmax_f32(a, b); -// } +int32x2_t test_vmax_s32(int32x2_t a, int32x2_t b) { + return vmax_s32(a, b); -// NYI-LABEL: @test_vmaxq_s8( -// NYI: [[VMAX_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8> %a, <16 x i8> %b) -// NYI: ret <16 x i8> [[VMAX_I]] -// int8x16_t test_vmaxq_s8(int8x16_t a, int8x16_t b) { -// return vmaxq_s8(a, b); -// } + // CIR-LABEL: vmax_s32 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector -// NYI-LABEL: @test_vmaxq_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VMAX2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16> %a, <8 x i16> %b) -// NYI: ret <8 x i16> [[VMAX2_I]] -// int16x8_t test_vmaxq_s16(int16x8_t a, int16x8_t b) { -// return vmaxq_s16(a, b); -// } + // LLVM-LABEL: test_vmax_s32 + // LLVM-SAME: (<2 x i32> [[a:%.*]], <2 x i32> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[b]] to <8 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <2 x i32> @llvm.smax.v2i32(<2 x i32> [[a]], <2 x i32> [[b]]) + // LLVM: ret <2 x i32> [[VMAX2_I]] +} -// NYI-LABEL: @test_vmaxq_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VMAX2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smax.v4i32(<4 x i32> %a, <4 x i32> %b) -// NYI: ret <4 x i32> [[VMAX2_I]] -// int32x4_t test_vmaxq_s32(int32x4_t a, int32x4_t b) { -// return vmaxq_s32(a, b); -// } +uint8x8_t test_vmax_u8(uint8x8_t a, uint8x8_t b) { + return vmax_u8(a, b); -// NYI-LABEL: @test_vmaxq_u8( -// NYI: [[VMAX_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umax.v16i8(<16 x i8> %a, <16 x i8> %b) -// NYI: ret <16 x i8> [[VMAX_I]] -// uint8x16_t test_vmaxq_u8(uint8x16_t a, uint8x16_t b) { -// return vmaxq_u8(a, b); -// } + // CIR-LABEL: vmax_u8 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector -// NYI-LABEL: @test_vmaxq_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[VMAX2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umax.v8i16(<8 x i16> %a, <8 x i16> %b) -// NYI: ret <8 x i16> [[VMAX2_I]] -// uint16x8_t test_vmaxq_u16(uint16x8_t a, uint16x8_t b) { -// return vmaxq_u16(a, b); -// } + // LLVM-LABEL: test_vmax_u8 + // LLVM-SAME: (<8 x i8> [[a:%.*]], <8 x i8> [[b:%.*]]) + // LLVM: [[VMAX_I:%.*]] = call <8 x i8> @llvm.umax.v8i8(<8 x i8> [[a]], <8 x i8> [[b]]) + // LLVM: ret <8 x i8> [[VMAX_I]] +} -// NYI-LABEL: @test_vmaxq_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[VMAX2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umax.v4i32(<4 x i32> %a, <4 x i32> %b) -// NYI: ret <4 x i32> [[VMAX2_I]] -// uint32x4_t test_vmaxq_u32(uint32x4_t a, uint32x4_t b) { -// return vmaxq_u32(a, b); -// } +uint16x4_t test_vmax_u16(uint16x4_t a, uint16x4_t b) { + return vmax_u16(a, b); -// NYI-LABEL: @test_vmaxq_f32( -// NYI: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> -// NYI: [[VMAX2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmax.v4f32(<4 x float> %a, <4 x float> %b) -// NYI: ret <4 x float> [[VMAX2_I]] -// float32x4_t test_vmaxq_f32(float32x4_t a, float32x4_t b) { -// return vmaxq_f32(a, b); -// } + // CIR-LABEL: vmax_u16 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector -// NYI-LABEL: @test_vmaxq_f64( -// NYI: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> -// NYI: [[VMAX2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmax.v2f64(<2 x double> %a, <2 x double> %b) -// NYI: ret <2 x double> [[VMAX2_I]] -// float64x2_t test_vmaxq_f64(float64x2_t a, float64x2_t b) { -// return vmaxq_f64(a, b); -// } + // LLVM-LABEL: test_vmax_u16 + // LLVM-SAME: (<4 x i16> [[a:%.*]], <4 x i16> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> [[b]] to <8 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <4 x i16> @llvm.umax.v4i16(<4 x i16> [[a]], <4 x i16> [[b]]) + // LLVM: ret <4 x i16> [[VMAX2_I]] +} + +uint32x2_t test_vmax_u32(uint32x2_t a, uint32x2_t b) { + return vmax_u32(a, b); + + // CIR-LABEL: vmax_u32 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector + + // LLVM-LABEL: test_vmax_u32 + // LLVM-SAME: (<2 x i32> [[a:%.*]], <2 x i32> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> [[b]] to <8 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <2 x i32> @llvm.umax.v2i32(<2 x i32> [[a]], <2 x i32> [[b]]) + // LLVM: ret <2 x i32> [[VMAX2_I]] +} + +float32x2_t test_vmax_f32(float32x2_t a, float32x2_t b) { + return vmax_f32(a, b); + + // CIR-LABEL: vmax_f32 + // CIR: cir.fmaximum {{%.*}}, {{%.*}} : !cir.vector + + // LLVM-LABEL: test_vmax_f32 + // LLVM-SAME: (<2 x float> [[a:%.*]], <2 x float> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x float> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x float> [[b]] to <8 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <2 x float> @llvm.maximum.v2f32(<2 x float> [[a]], <2 x float> [[b]]) + // LLVM: ret <2 x float> [[VMAX2_I]] +} + +int8x16_t test_vmaxq_s8(int8x16_t a, int8x16_t b) { + return vmaxq_s8(a, b); + + // CIR-LABEL: vmaxq_s8 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector + + // LLVM-LABEL: test_vmaxq_s8 + // LLVM-SAME: (<16 x i8> [[a:%.*]], <16 x i8> [[b:%.*]]) + // LLVM: [[VMAX_I:%.*]] = call <16 x i8> @llvm.smax.v16i8(<16 x i8> [[a]], <16 x i8> [[b]]) + // LLVM: ret <16 x i8> [[VMAX_I]] +} + +int16x8_t test_vmaxq_s16(int16x8_t a, int16x8_t b) { + return vmaxq_s16(a, b); + + // CIR-LABEL: vmaxq_s16 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector + + // LLVM-LABEL: test_vmaxq_s16 + // LLVM-SAME: (<8 x i16> [[a:%.*]], <8 x i16> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[b]] to <16 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <8 x i16> @llvm.smax.v8i16(<8 x i16> [[a]], <8 x i16> [[b]]) + // LLVM: ret <8 x i16> [[VMAX2_I]] +} + +int32x4_t test_vmaxq_s32(int32x4_t a, int32x4_t b) { + return vmaxq_s32(a, b); + + // CIR-LABEL: vmaxq_s32 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector + + // LLVM-LABEL: test_vmaxq_s32 + // LLVM-SAME: (<4 x i32> [[a:%.*]], <4 x i32> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[b]] to <16 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> [[a]], <4 x i32> [[b]]) + // LLVM: ret <4 x i32> [[VMAX2_I]] +} + +uint8x16_t test_vmaxq_u8(uint8x16_t a, uint8x16_t b) { + return vmaxq_u8(a, b); + + // CIR-LABEL: vmaxq_u8 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector + + // LLVM-LABEL: test_vmaxq_u8 + // LLVM-SAME: (<16 x i8> [[a:%.*]], <16 x i8> [[b:%.*]]) + // LLVM: [[VMAX_I:%.*]] = call <16 x i8> @llvm.umax.v16i8(<16 x i8> [[a]], <16 x i8> [[b]]) + // LLVM: ret <16 x i8> [[VMAX_I]] +} + +uint16x8_t test_vmaxq_u16(uint16x8_t a, uint16x8_t b) { + return vmaxq_u16(a, b); + + // CIR-LABEL: vmaxq_u16 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector + + // LLVM-LABEL: test_vmaxq_u16 + // LLVM-SAME: (<8 x i16> [[a:%.*]], <8 x i16> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> [[b]] to <16 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <8 x i16> @llvm.umax.v8i16(<8 x i16> [[a]], <8 x i16> [[b]]) + // LLVM: ret <8 x i16> [[VMAX2_I]] +} + +uint32x4_t test_vmaxq_u32(uint32x4_t a, uint32x4_t b) { + return vmaxq_u32(a, b); + + // CIR-LABEL: vmaxq_u32 + // CIR: cir.binop(max, {{%.*}}, {{%.*}}) : !cir.vector + + // LLVM-LABEL: test_vmaxq_u32 + // LLVM-SAME: (<4 x i32> [[a:%.*]], <4 x i32> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> [[b]] to <16 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <4 x i32> @llvm.umax.v4i32(<4 x i32> [[a]], <4 x i32> [[b]]) + // LLVM: ret <4 x i32> [[VMAX2_I]] +} + +float32x4_t test_vmaxq_f32(float32x4_t a, float32x4_t b) { + return vmaxq_f32(a, b); + + // CIR-LABEL: vmaxq_f32 + // CIR: cir.fmaximum {{%.*}}, {{%.*}} : !cir.vector + + // LLVM-LABEL: test_vmaxq_f32 + // LLVM-SAME: (<4 x float> [[a:%.*]], <4 x float> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <4 x float> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x float> [[b]] to <16 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <4 x float> @llvm.maximum.v4f32(<4 x float> [[a]], <4 x float> [[b]]) + // LLVM: ret <4 x float> [[VMAX2_I]] +} + +float64x2_t test_vmaxq_f64(float64x2_t a, float64x2_t b) { + return vmaxq_f64(a, b); + + // CIR-LABEL: vmaxq_f64 + // CIR: cir.fmaximum {{%.*}}, {{%.*}} : !cir.vector + + // LLVM-LABEL: test_vmaxq_f64 + // LLVM-SAME: (<2 x double> [[a:%.*]], <2 x double> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[a]] to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x double> [[b]] to <16 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[a]], <2 x double> [[b]]) + // LLVM: ret <2 x double> [[VMAX2_I]] +} int8x8_t test_vmin_s8(int8x8_t a, int8x8_t b) { return vmin_s8(a, b); @@ -18586,14 +18661,19 @@ float64_t test_vaddvq_f64(float64x2_t a) { // return vabd_f64(a, b); // } -// NYI-LABEL: @test_vmax_f64( -// NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> -// NYI: [[VMAX2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmax.v1f64(<1 x double> %a, <1 x double> %b) -// NYI: ret <1 x double> [[VMAX2_I]] -// float64x1_t test_vmax_f64(float64x1_t a, float64x1_t b) { -// return vmax_f64(a, b); -// } +float64x1_t test_vmax_f64(float64x1_t a, float64x1_t b) { + return vmax_f64(a, b); + + // CIR-LABEL: vmax_f64 + // CIR: cir.fmaximum {{%.*}}, {{%.*}} : !cir.vector + + // LLVM-LABEL: test_vmax_f64 + // LLVM-SAME: (<1 x double> [[a:%.*]], <1 x double> [[b:%.*]]) + // LLVM: [[TMP0:%.*]] = bitcast <1 x double> [[a]] to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x double> [[b]] to <8 x i8> + // LLVM: [[VMAX2_I:%.*]] = call <1 x double> @llvm.maximum.v1f64(<1 x double> [[a]], <1 x double> [[b]]) + // LLVM: ret <1 x double> [[VMAX2_I]] +} // NYI-LABEL: @test_vmaxnm_f64( // NYI: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> From 4ebb958cae7ca2e9fdad91e2d28f138f1633725b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 18 Dec 2024 14:11:34 -0300 Subject: [PATCH 2180/2301] [CIR][NFC] Silence warning from recent PR --- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 50169aa849a4..068f060ca7c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -910,7 +910,7 @@ void CIRGenFunction::emitCXXDeleteExpr(const CXXDeleteExpr *E) { // we plan to handle it in LoweringPreparePass and the corresponding // ABI part. if (DeleteTy->isConstantArrayType()) { - Ptr = Ptr; + // Nothing to do here, keep it for skeleton comparison sake. } assert(convertTypeForMem(DeleteTy) == Ptr.getElementType()); From f1fee16eb807402e3c89af85415276b11521e296 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Fri, 20 Dec 2024 01:20:44 +0800 Subject: [PATCH 2181/2301] [CIR][CIRGen][TBAA] Replace hardcoded TBAA names with getTBAAName (#1242) This patch follows https://github.com/llvm/clangir/pull/1220#discussion_r1882808223 by augmenting `CIR_Type` with a new field, `tbaaName`. Specifically, it enables TableGen support for the `-gen-cir-tbaa-name-lowering` option, allowing for the generation of `getTBAAName` functions based on the `tbaaName`. This enhancement enables us to replace the hardcoded TBAA names in the `getTypeName` function with the newly generated `getTBAAName`. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 14 ++++-- .../clang/CIR/Dialect/IR/CMakeLists.txt | 4 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 19 ++++---- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 4 ++ clang/utils/TableGen/CIRLoweringEmitter.cpp | 44 +++++++++++++++++++ clang/utils/TableGen/TableGen.cpp | 6 +++ clang/utils/TableGen/TableGenBackends.h | 2 + 7 files changed, 80 insertions(+), 13 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 68b27a053176..37810b6886df 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -20,13 +20,18 @@ include "mlir/Interfaces/DataLayoutInterfaces.td" include "mlir/IR/AttrTypeBase.td" include "mlir/IR/EnumAttr.td" +// Specify the TBAA name of CIR_type +class TBAALoweringInfo { + string tbaaName = ""; +} + //===----------------------------------------------------------------------===// // CIR Types //===----------------------------------------------------------------------===// class CIR_Type traits = [], string baseCppClass = "::mlir::Type"> - : TypeDef { + : TypeDef, TBAALoweringInfo { let mnemonic = typeMnemonic; } @@ -162,6 +167,7 @@ class CIR_FloatType ]> {} def CIR_Single : CIR_FloatType<"Single", "float"> { + let tbaaName = "float"; let summary = "CIR single-precision float type"; let description = [{ Floating-point type that represents the `float` type in C/C++. Its @@ -170,6 +176,7 @@ def CIR_Single : CIR_FloatType<"Single", "float"> { } def CIR_Double : CIR_FloatType<"Double", "double"> { + let tbaaName = "double"; let summary = "CIR double-precision float type"; let description = [{ Floating-point type that represents the `double` type in C/C++. Its @@ -206,6 +213,7 @@ def CIR_FP128 : CIR_FloatType<"FP128", "f128"> { } def CIR_LongDouble : CIR_FloatType<"LongDouble", "long_double"> { + let tbaaName = "long double"; let summary = "CIR extended-precision float type"; let description = [{ Floating-point type that represents the `long double` type in C/C++. @@ -263,7 +271,7 @@ def CIR_ComplexType : CIR_Type<"Complex", "complex", def CIR_PointerType : CIR_Type<"Pointer", "ptr", [DeclareTypeInterfaceMethods]> { - + let tbaaName = "any pointer"; let summary = "CIR pointer type"; let description = [{ `CIR.ptr` is a type returned by any op generating a pointer in C++. @@ -339,7 +347,7 @@ def CIR_DataMemberType : CIR_Type<"DataMember", "data_member", def CIR_BoolType : CIR_Type<"Bool", "bool", [DeclareTypeInterfaceMethods]> { - + let tbaaName = "bool"; let summary = "CIR bool type"; let description = [{ `cir.bool` represent's C++ bool type. diff --git a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt index 3d43b06c6217..dc0afdd2bc31 100644 --- a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt @@ -31,3 +31,7 @@ add_public_tablegen_target(MLIRCIREnumsGen) clang_tablegen(CIRBuiltinsLowering.inc -gen-cir-builtins-lowering SOURCE CIROps.td TARGET CIRBuiltinsLowering) + +clang_tablegen(CIRTBAANameLowering.inc -gen-cir-tbaa-name-lowering + SOURCE CIRTypes.td + TARGET CIRTBAANameLowering) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index fff70e137762..bb8a0696d815 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -689,19 +689,18 @@ mlir::LLVM::TBAATypeDescriptorAttr getChar(mlir::MLIRContext *ctx) { return createScalarTypeNode(ctx, "omnipotent char", getRoot(ctx), 0); } -// FIXME(cir): This should be moved and use tablegen approach -// see https://github.com/llvm/clangir/pull/1220#discussion_r1889187867 +#define GET_TBAANAME_LOWERING_FUNCTIONS_DEF +#include "clang/CIR/Dialect/IR/CIRTBAANameLowering.inc" +#undef GET_TBAANAME_LOWERING_FUNCTIONS_DEF + StringRef getTypeName(mlir::Type type) { return TypeSwitch(type) .Case([](cir::IntType ty) { return ty.getTBAATypeName(); }) - .Case([](cir::SingleType) { return "float"; }) - .Case([](cir::DoubleType) { return "double"; }) - .Case([](cir::FP80Type) { return "f80"; }) - .Case([](cir::FP128Type) { return "f128"; }) - .Case( - [](cir::LongDoubleType) { return "long double"; }) - .Case([](cir::BoolType) { return "bool"; }) - .Case([](cir::PointerType) { return "any pointer"; }) + .Case< +#define GET_TBAANAME_LOWERING_LIST +#include "clang/CIR/Dialect/IR/CIRTBAANameLowering.inc" +#undef GET_TBAANAME_LOWERING_LIST + >([](auto ty) { return getTBAAName(ty); }) .Default([](auto ty) { llvm::errs() << "unknown type: " << ty << "\n"; return "unknown"; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 48baae2ae799..2b05f51309c5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -1099,5 +1099,9 @@ class CIRToLLVMSignBitOpLowering #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_CLASSES_DECLARE +#define GET_TBAANAME_LOWERING_FUNCTIONS_DECLARE +#include "clang/CIR/Dialect/IR/CIRTBAANameLowering.inc" +#undef GET_TBAANAME_LOWERING_FUNCTIONS_DECLARE + } // namespace direct } // namespace cir diff --git a/clang/utils/TableGen/CIRLoweringEmitter.cpp b/clang/utils/TableGen/CIRLoweringEmitter.cpp index 9b71e9ab597d..11abf5a7ec71 100644 --- a/clang/utils/TableGen/CIRLoweringEmitter.cpp +++ b/clang/utils/TableGen/CIRLoweringEmitter.cpp @@ -16,6 +16,10 @@ std::string ClassDeclaration; std::string ClassDefinitions; std::string ClassList; +std::string TBAANameFunctionDeclaration; +std::string TBAANameFunctionDefinitions; +std::string TBAANameClassList; + void GenerateLowering(const Record *Operation) { using namespace std::string_literals; std::string Name = Operation->getName().str(); @@ -68,6 +72,24 @@ CIR)C++" + ClassList += ", CIR" + Name + "Lowering\n"; } + +void GenerateTBAANameLowering(const Record *def) { + using namespace std::string_literals; + std::string Name = def->getValueAsString("cppClassName").str(); + std::string TBAAName = def->getValueAsString("tbaaName").str(); + TBAANameFunctionDeclaration += "llvm::StringRef getTBAAName(cir::"; + TBAANameFunctionDeclaration += Name + " ty);"; + TBAANameFunctionDeclaration += "\n"; + TBAANameFunctionDefinitions += "llvm::StringRef getTBAAName(cir::"; + TBAANameFunctionDefinitions += Name + " ty) {"; + TBAANameFunctionDefinitions += " return \"" + TBAAName + "\";"; + TBAANameFunctionDefinitions += "}"; + TBAANameFunctionDefinitions += "\n"; + TBAANameClassList += "\n"; + TBAANameClassList += "cir::"; + TBAANameClassList += Name; + TBAANameClassList += ", "; +} } // namespace void clang::EmitCIRBuiltinsLowering(const RecordKeeper &Records, @@ -85,3 +107,25 @@ void clang::EmitCIRBuiltinsLowering(const RecordKeeper &Records, << ClassDefinitions << "\n#endif\n"; OS << "#ifdef GET_BUILTIN_LOWERING_LIST\n" << ClassList << "\n#endif\n"; } + +void clang::EmitCIRTBAANameLowering(const RecordKeeper &Records, + raw_ostream &OS) { + emitSourceFileHeader("Lowering of ClangIR TBAA Name", OS); + + for (const auto *Builtin : + Records.getAllDerivedDefinitions("TBAALoweringInfo")) { + if (!Builtin->getValueAsString("tbaaName").empty()) + GenerateTBAANameLowering(Builtin); + } + + OS << "#ifdef GET_TBAANAME_LOWERING_FUNCTIONS_DECLARE\n" + << TBAANameFunctionDeclaration << "\n#endif\n"; + OS << "#ifdef GET_TBAANAME_LOWERING_FUNCTIONS_DEF\n" + << TBAANameFunctionDefinitions << "\n#endif\n"; + // remove last `, ` + if (!TBAANameClassList.empty()) { + TBAANameClassList.resize(TBAANameClassList.size() - 2); + } + OS << "#ifdef GET_TBAANAME_LOWERING_LIST\n" + << TBAANameClassList << "\n#endif\n"; +} diff --git a/clang/utils/TableGen/TableGen.cpp b/clang/utils/TableGen/TableGen.cpp index a2efc3779fc4..3e6f3cf096d0 100644 --- a/clang/utils/TableGen/TableGen.cpp +++ b/clang/utils/TableGen/TableGen.cpp @@ -26,6 +26,7 @@ enum ActionType { PrintRecords, DumpJSON, GenCIRBuiltinsLowering, + GenCIRTBAANameLowering, GenClangAttrClasses, GenClangAttrParserStringSwitches, GenClangAttrSubjectMatchRulesParserStringSwitches, @@ -125,6 +126,8 @@ cl::opt Action( clEnumValN(GenCIRBuiltinsLowering, "gen-cir-builtins-lowering", "Generate lowering of ClangIR builtins to equivalent LLVM " "IR builtins"), + clEnumValN(GenCIRTBAANameLowering, "gen-cir-tbaa-name-lowering", + "Generate lowering of ClangIR TBAA Name"), clEnumValN(GenClangAttrClasses, "gen-clang-attr-classes", "Generate clang attribute clases"), clEnumValN(GenClangAttrParserStringSwitches, @@ -337,6 +340,9 @@ bool ClangTableGenMain(raw_ostream &OS, const RecordKeeper &Records) { case GenCIRBuiltinsLowering: EmitCIRBuiltinsLowering(Records, OS); break; + case GenCIRTBAANameLowering: + EmitCIRTBAANameLowering(Records, OS); + break; case GenClangAttrClasses: EmitClangAttrClass(Records, OS); break; diff --git a/clang/utils/TableGen/TableGenBackends.h b/clang/utils/TableGen/TableGenBackends.h index cefdb5611ad8..7c1f92e5cde7 100644 --- a/clang/utils/TableGen/TableGenBackends.h +++ b/clang/utils/TableGen/TableGenBackends.h @@ -26,6 +26,8 @@ namespace clang { void EmitCIRBuiltinsLowering(const llvm::RecordKeeper &RK, llvm::raw_ostream &OS); +void EmitCIRTBAANameLowering(const llvm::RecordKeeper &RK, + llvm::raw_ostream &OS); void EmitClangDeclContext(const llvm::RecordKeeper &RK, llvm::raw_ostream &OS); /** @param PriorizeIfSubclassOf These classes should be prioritized in the output. From e8814553fc0670328eeb925929b129e3775b3261 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 19 Dec 2024 20:37:43 +0300 Subject: [PATCH 2182/2301] [CIR][CodeGen] fixes access to globals with bitfields (#1244) This PR adds a bitcast when we rewrite globals type. Previously we just set a new type and it worked. But recently I started to test ClangIR with CSmith in order to find some run time bugs and faced with the next problem. ``` typedef struct { int x : 15; uint8_t y; } S; S g = { -12, 254}; int main() { printf("%d\n", g.y); return 0; } ``` The output for this program is ... 127 but not 254! The reason is that first global var is created with the type of struct `S`, then `get_member` operation is generated with index `1` and then after, the type of the global is rewritten - I assume because of the anon struct created on the right side in the initialization. But the `get_member` operation still wants to access to the field at index `1` and get a wrong byte. If we change the `y` type to `int` we will fail on the verification stage. But in the example above it's a run time error! This is why I suggest to add a bitcast once we have to rewrite the global type. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 7 +++++++ clang/test/CIR/CodeGen/bitfields.c | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 81d60477cae0..2a2557f1fdda 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -814,6 +814,13 @@ void CIRGenModule::replaceGlobal(cir::GlobalOp Old, cir::GlobalOp New) { auto UseOpResultValue = GGO.getAddr(); UseOpResultValue.setType( cir::PointerType::get(&getMLIRContext(), NewTy)); + + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointAfter(UserOp); + mlir::Type ptrTy = builder.getPointerTo(OldTy); + mlir::Value cast = + builder.createBitcast(GGO->getLoc(), UseOpResultValue, ptrTy); + UseOpResultValue.replaceAllUsesExcept(cast, {cast.getDefiningOp()}); } } } diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index 2671523cc4ca..ded089655f59 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -54,6 +54,7 @@ typedef struct { } U; // CHECK: !ty_D = !cir.struct +// CHECK: !ty_G = !cir.struct // CHECK: !ty_T = !cir.struct // CHECK: !ty_anon2E0_ = !cir.struct // CHECK: !ty_anon_struct = !cir.struct @@ -129,3 +130,20 @@ void createU() { void createD() { D d = {1,2,3}; } + +typedef struct { + int x : 15; + int y ; +} G; + +// CHECK: cir.global external @g = #cir.const_struct<{#cir.int<133> : !u8i, #cir.int<127> : !u8i, #cir.int<254> : !s32i}> : !ty_anon_struct +G g = { -123, 254UL}; + +// CHECK: cir.func {{.*@get_y}} +// CHECK: %[[V1:.*]] = cir.get_global @g : !cir.ptr +// CHECK: %[[V2:.*]] = cir.cast(bitcast, %[[V1]] : !cir.ptr), !cir.ptr +// CHECK: %[[V3:.*]] = cir.get_member %[[V2]][1] {name = "y"} : !cir.ptr -> !cir.ptr +// CHECK: cir.load %[[V3]] : !cir.ptr, !s32i +int get_y() { + return g.y; +} From 7de25fa7df0ab7bc376b6e26ef4d18f2e9f33492 Mon Sep 17 00:00:00 2001 From: Guojin Date: Thu, 19 Dec 2024 12:40:58 -0500 Subject: [PATCH 2183/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vaddvq_s64/s32, neon_vaddv_s32/u32, (#1240) Co-authored-by: Bruno Cardoso Lopes --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 10 +++--- clang/test/CIR/CodeGen/AArch64/neon-arith.c | 24 +++++++++++++ clang/test/CIR/CodeGen/AArch64/neon.c | 34 ++++++++++++------- 3 files changed, 50 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index d7ca71093f34..9a3a8bac3dd0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2676,19 +2676,17 @@ static mlir::Value emitCommonNeonSISDBuiltinExpr( case NEON::BI__builtin_neon_vaddlvq_u32: return emitNeonCall(builder, {argTy}, ops, "aarch64.neon.uaddlv", resultTy, loc); - case NEON::BI__builtin_neon_vaddv_s32: - llvm_unreachable(" neon_vaddv_s32 NYI "); - case NEON::BI__builtin_neon_vaddv_u32: - llvm_unreachable(" neon_vaddv_u32 NYI "); case NEON::BI__builtin_neon_vaddv_f32: case NEON::BI__builtin_neon_vaddvq_f32: case NEON::BI__builtin_neon_vaddvq_f64: return emitNeonCall(builder, {argTy}, ops, "aarch64.neon.faddv", resultTy, loc); + case NEON::BI__builtin_neon_vaddv_s32: case NEON::BI__builtin_neon_vaddvq_s32: - llvm_unreachable(" neon_vaddvq_s32 NYI "); case NEON::BI__builtin_neon_vaddvq_s64: - llvm_unreachable(" neon_vaddvq_s64 NYI "); + return emitNeonCall(builder, {argTy}, ops, "aarch64.neon.saddv", resultTy, + loc); + case NEON::BI__builtin_neon_vaddv_u32: case NEON::BI__builtin_neon_vaddvq_u32: case NEON::BI__builtin_neon_vaddvq_u64: return emitNeonCall(builder, {argTy}, ops, "aarch64.neon.uaddv", resultTy, diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index b04b9ecb06d1..92a97831d52a 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -952,3 +952,27 @@ uint64_t test_vaddvq_u64(uint64x2_t a) { // LLVM: [[VADDVQ_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> {{%.*}}) // LLVM: ret i64 [[VADDVQ_U64_I]] } + +int32_t test_vaddvq_s32(int32x4_t a) { + return vaddvq_s32(a); + + // CIR-LABEL: vaddvq_s32 + // CIR: cir.llvm.intrinsic "aarch64.neon.saddv" {{%.*}} : (!cir.vector) -> !s32i + + // LLVM-LABEL: test_vaddvq_s32 + // LLVM-SAME: (<4 x i32> [[a:%.*]]) + // LLVM: [[VADDVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> [[a]]) + // LLVM: ret i32 [[VADDVQ_S32_I]] +} + +int64_t test_vaddvq_s64(int64x2_t a) { + return vaddvq_s64(a); + + // CIR-LABEL: vaddvq_s64 + // CIR: cir.llvm.intrinsic "aarch64.neon.saddv" {{%.*}} : (!cir.vector) -> !s64i + + // LLVM-LABEL: test_vaddvq_s64 + // LLVM-SAME: (<2 x i64> [[a:%.*]]) + // LLVM: [[VADDVQ_S64_I:%.*]] = call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> [[a]]) + // LLVM: ret i64 [[VADDVQ_S64_I]] +} diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index adb4dc5a80e7..1abc935f95b0 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -18967,19 +18967,29 @@ float64x1_t test_vmax_f64(float64x1_t a, float64x1_t b) { // return vmaxv_u32(a); // } -// NYI-LABEL: @test_vaddv_s32( -// NYI: [[VADDV_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> %a) -// NYI: ret i32 [[VADDV_S32_I]] -// int32_t test_vaddv_s32(int32x2_t a) { -// return vaddv_s32(a); -// } +int32_t test_vaddv_s32(int32x2_t a) { + return vaddv_s32(a); -// NYI-LABEL: @test_vaddv_u32( -// NYI: [[VADDV_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32> %a) -// NYI: ret i32 [[VADDV_U32_I]] -// uint32_t test_vaddv_u32(uint32x2_t a) { -// return vaddv_u32(a); -// } + // CIR-LABEL: vaddv_s32 + // CIR: cir.llvm.intrinsic "aarch64.neon.saddv" {{%.*}} : (!cir.vector) -> !s32i + + // LLVM-LABEL: test_vaddv_s32 + // LLVM-SAME: (<2 x i32> [[a:%.*]]) + // LLVM: [[VADDV_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> [[a]]) + // LLVM: ret i32 [[VADDV_S32_I]] +} + +uint32_t test_vaddv_u32(uint32x2_t a) { + return vaddv_u32(a); + + // CIR-LABEL: vaddv_u32 + // CIR: cir.llvm.intrinsic "aarch64.neon.uaddv" {{%.*}} : (!cir.vector) -> !u32i + + // LLVM-LABEL: test_vaddv_u32 + // LLVM-SAME: (<2 x i32> [[a:%.*]]) + // LLVM: [[VADDV_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32> [[a]]) + // LLVM: ret i32 [[VADDV_U32_I]] +} // NYI-LABEL: @test_vaddlv_s32( // NYI: [[VADDLV_S32_I:%.*]] = call i64 @llvm.aarch64.neon.saddlv.i64.v2i32(<2 x i32> %a) From 565f7766e00ea035ed52c58cb364f7bf6f16f975 Mon Sep 17 00:00:00 2001 From: Nathan Lanza Date: Thu, 19 Dec 2024 23:50:12 -0500 Subject: [PATCH 2184/2301] [CIR] Introduce a new dir in tests for known crashes to fix We figure it would be nice to have a common place with all our known crashes that is tracked by git and is actively verified whether or not we can now support the crashes by lit. It can act as our source of truth for known failures and also being potential good first tasks for new developers. Add a simple test case of a known crash that involves copying a struct in a catch. Reviewers: smeenai, bcardosolopes Reviewed By: bcardosolopes Pull Request: https://github.com/llvm/clangir/pull/1243 --- clang/test/CIR/crashes/copy-on-catch.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 clang/test/CIR/crashes/copy-on-catch.cpp diff --git a/clang/test/CIR/crashes/copy-on-catch.cpp b/clang/test/CIR/crashes/copy-on-catch.cpp new file mode 100644 index 000000000000..9bf11f7a2408 --- /dev/null +++ b/clang/test/CIR/crashes/copy-on-catch.cpp @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir -fcxx-exceptions -fexceptions | FileCheck %s +// XFAIL: * + +// CHECK: cir.func + +struct E {}; +E e; + +void throws() { throw e; } + +void bar() { + try { + throws(); + } catch (E e) { + } +} From 87ab7ea57650c56c817f5472f67dc933f69ebac1 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Fri, 20 Dec 2024 20:33:23 +0300 Subject: [PATCH 2185/2301] [CIR][Lowering] fixes return value for = operator for bitfields (#1247) Basically that is - the return value for `=` operator for bitfield assignment is wrong now. For example, the next function returns `7` for 3 bit bit field, though it should be `-1`: ``` int get_a(T *t) { return (t->a = 7); } ``` This PR fix it. Actually, the bug was in the lowering - the integer cast is applied in the wrong place (in comparison with the original codegen). --- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 7 ++++--- clang/test/CIR/CodeGen/bitfields.c | 12 ++++++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index bb8a0696d815..c64df056a5f2 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3650,9 +3650,6 @@ mlir::LogicalResult CIRToLLVMSetBitfieldOpLowering::matchAndRewrite( auto resultTy = getTypeConverter()->convertType(op.getType()); - resultVal = createIntCast(rewriter, resultVal, - mlir::cast(resultTy)); - if (info.getIsSigned()) { assert(size <= storageSize); unsigned highBits = storageSize - size; @@ -3663,6 +3660,10 @@ mlir::LogicalResult CIRToLLVMSetBitfieldOpLowering::matchAndRewrite( } } + resultVal = createIntCast(rewriter, resultVal, + mlir::cast(resultTy), + info.getIsSigned()); + rewriter.replaceOp(op, resultVal); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index ded089655f59..5f13b424daa4 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s struct __long { struct __attribute__((__packed__)) { @@ -131,6 +133,16 @@ void createD() { D d = {1,2,3}; } +// check the -1 is stored to the ret value +// LLVM: define dso_local i32 {{@.*get_a.*}} +// LLVM: %[[V1:.*]] = alloca i32 +// LLVM: store i32 -1, ptr %[[V1]], align 4 +// LLVM: %[[V2:.*]] = load i32, ptr %[[V1]], align 4 +// LLVM: ret i32 %[[V2:.*]] +int get_a(T *t) { + return (t->a = 7); +} + typedef struct { int x : 15; int y ; From ad0f63a5159dca0582d8aee5d1ce3f5f8325071b Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 Dec 2024 14:54:19 -0300 Subject: [PATCH 2186/2301] Revert "[CIR][CIRGen][TBAA] Replace hardcoded TBAA names with getTBAAName (#1242)" Need to revert https://github.com/llvm/clangir/pull/1220 This reverts commit 445f2a522b1243dd60ad1f8e3a979be046104b74. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 14 ++---- .../clang/CIR/Dialect/IR/CMakeLists.txt | 4 -- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 19 ++++---- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 4 -- clang/utils/TableGen/CIRLoweringEmitter.cpp | 44 ------------------- clang/utils/TableGen/TableGen.cpp | 6 --- clang/utils/TableGen/TableGenBackends.h | 2 - 7 files changed, 13 insertions(+), 80 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 37810b6886df..68b27a053176 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -20,18 +20,13 @@ include "mlir/Interfaces/DataLayoutInterfaces.td" include "mlir/IR/AttrTypeBase.td" include "mlir/IR/EnumAttr.td" -// Specify the TBAA name of CIR_type -class TBAALoweringInfo { - string tbaaName = ""; -} - //===----------------------------------------------------------------------===// // CIR Types //===----------------------------------------------------------------------===// class CIR_Type traits = [], string baseCppClass = "::mlir::Type"> - : TypeDef, TBAALoweringInfo { + : TypeDef { let mnemonic = typeMnemonic; } @@ -167,7 +162,6 @@ class CIR_FloatType ]> {} def CIR_Single : CIR_FloatType<"Single", "float"> { - let tbaaName = "float"; let summary = "CIR single-precision float type"; let description = [{ Floating-point type that represents the `float` type in C/C++. Its @@ -176,7 +170,6 @@ def CIR_Single : CIR_FloatType<"Single", "float"> { } def CIR_Double : CIR_FloatType<"Double", "double"> { - let tbaaName = "double"; let summary = "CIR double-precision float type"; let description = [{ Floating-point type that represents the `double` type in C/C++. Its @@ -213,7 +206,6 @@ def CIR_FP128 : CIR_FloatType<"FP128", "f128"> { } def CIR_LongDouble : CIR_FloatType<"LongDouble", "long_double"> { - let tbaaName = "long double"; let summary = "CIR extended-precision float type"; let description = [{ Floating-point type that represents the `long double` type in C/C++. @@ -271,7 +263,7 @@ def CIR_ComplexType : CIR_Type<"Complex", "complex", def CIR_PointerType : CIR_Type<"Pointer", "ptr", [DeclareTypeInterfaceMethods]> { - let tbaaName = "any pointer"; + let summary = "CIR pointer type"; let description = [{ `CIR.ptr` is a type returned by any op generating a pointer in C++. @@ -347,7 +339,7 @@ def CIR_DataMemberType : CIR_Type<"DataMember", "data_member", def CIR_BoolType : CIR_Type<"Bool", "bool", [DeclareTypeInterfaceMethods]> { - let tbaaName = "bool"; + let summary = "CIR bool type"; let description = [{ `cir.bool` represent's C++ bool type. diff --git a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt index dc0afdd2bc31..3d43b06c6217 100644 --- a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt @@ -31,7 +31,3 @@ add_public_tablegen_target(MLIRCIREnumsGen) clang_tablegen(CIRBuiltinsLowering.inc -gen-cir-builtins-lowering SOURCE CIROps.td TARGET CIRBuiltinsLowering) - -clang_tablegen(CIRTBAANameLowering.inc -gen-cir-tbaa-name-lowering - SOURCE CIRTypes.td - TARGET CIRTBAANameLowering) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index c64df056a5f2..87336d2b7606 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -689,18 +689,19 @@ mlir::LLVM::TBAATypeDescriptorAttr getChar(mlir::MLIRContext *ctx) { return createScalarTypeNode(ctx, "omnipotent char", getRoot(ctx), 0); } -#define GET_TBAANAME_LOWERING_FUNCTIONS_DEF -#include "clang/CIR/Dialect/IR/CIRTBAANameLowering.inc" -#undef GET_TBAANAME_LOWERING_FUNCTIONS_DEF - +// FIXME(cir): This should be moved and use tablegen approach +// see https://github.com/llvm/clangir/pull/1220#discussion_r1889187867 StringRef getTypeName(mlir::Type type) { return TypeSwitch(type) .Case([](cir::IntType ty) { return ty.getTBAATypeName(); }) - .Case< -#define GET_TBAANAME_LOWERING_LIST -#include "clang/CIR/Dialect/IR/CIRTBAANameLowering.inc" -#undef GET_TBAANAME_LOWERING_LIST - >([](auto ty) { return getTBAAName(ty); }) + .Case([](cir::SingleType) { return "float"; }) + .Case([](cir::DoubleType) { return "double"; }) + .Case([](cir::FP80Type) { return "f80"; }) + .Case([](cir::FP128Type) { return "f128"; }) + .Case( + [](cir::LongDoubleType) { return "long double"; }) + .Case([](cir::BoolType) { return "bool"; }) + .Case([](cir::PointerType) { return "any pointer"; }) .Default([](auto ty) { llvm::errs() << "unknown type: " << ty << "\n"; return "unknown"; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 2b05f51309c5..48baae2ae799 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -1099,9 +1099,5 @@ class CIRToLLVMSignBitOpLowering #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" #undef GET_BUILTIN_LOWERING_CLASSES_DECLARE -#define GET_TBAANAME_LOWERING_FUNCTIONS_DECLARE -#include "clang/CIR/Dialect/IR/CIRTBAANameLowering.inc" -#undef GET_TBAANAME_LOWERING_FUNCTIONS_DECLARE - } // namespace direct } // namespace cir diff --git a/clang/utils/TableGen/CIRLoweringEmitter.cpp b/clang/utils/TableGen/CIRLoweringEmitter.cpp index 11abf5a7ec71..9b71e9ab597d 100644 --- a/clang/utils/TableGen/CIRLoweringEmitter.cpp +++ b/clang/utils/TableGen/CIRLoweringEmitter.cpp @@ -16,10 +16,6 @@ std::string ClassDeclaration; std::string ClassDefinitions; std::string ClassList; -std::string TBAANameFunctionDeclaration; -std::string TBAANameFunctionDefinitions; -std::string TBAANameClassList; - void GenerateLowering(const Record *Operation) { using namespace std::string_literals; std::string Name = Operation->getName().str(); @@ -72,24 +68,6 @@ CIR)C++" + ClassList += ", CIR" + Name + "Lowering\n"; } - -void GenerateTBAANameLowering(const Record *def) { - using namespace std::string_literals; - std::string Name = def->getValueAsString("cppClassName").str(); - std::string TBAAName = def->getValueAsString("tbaaName").str(); - TBAANameFunctionDeclaration += "llvm::StringRef getTBAAName(cir::"; - TBAANameFunctionDeclaration += Name + " ty);"; - TBAANameFunctionDeclaration += "\n"; - TBAANameFunctionDefinitions += "llvm::StringRef getTBAAName(cir::"; - TBAANameFunctionDefinitions += Name + " ty) {"; - TBAANameFunctionDefinitions += " return \"" + TBAAName + "\";"; - TBAANameFunctionDefinitions += "}"; - TBAANameFunctionDefinitions += "\n"; - TBAANameClassList += "\n"; - TBAANameClassList += "cir::"; - TBAANameClassList += Name; - TBAANameClassList += ", "; -} } // namespace void clang::EmitCIRBuiltinsLowering(const RecordKeeper &Records, @@ -107,25 +85,3 @@ void clang::EmitCIRBuiltinsLowering(const RecordKeeper &Records, << ClassDefinitions << "\n#endif\n"; OS << "#ifdef GET_BUILTIN_LOWERING_LIST\n" << ClassList << "\n#endif\n"; } - -void clang::EmitCIRTBAANameLowering(const RecordKeeper &Records, - raw_ostream &OS) { - emitSourceFileHeader("Lowering of ClangIR TBAA Name", OS); - - for (const auto *Builtin : - Records.getAllDerivedDefinitions("TBAALoweringInfo")) { - if (!Builtin->getValueAsString("tbaaName").empty()) - GenerateTBAANameLowering(Builtin); - } - - OS << "#ifdef GET_TBAANAME_LOWERING_FUNCTIONS_DECLARE\n" - << TBAANameFunctionDeclaration << "\n#endif\n"; - OS << "#ifdef GET_TBAANAME_LOWERING_FUNCTIONS_DEF\n" - << TBAANameFunctionDefinitions << "\n#endif\n"; - // remove last `, ` - if (!TBAANameClassList.empty()) { - TBAANameClassList.resize(TBAANameClassList.size() - 2); - } - OS << "#ifdef GET_TBAANAME_LOWERING_LIST\n" - << TBAANameClassList << "\n#endif\n"; -} diff --git a/clang/utils/TableGen/TableGen.cpp b/clang/utils/TableGen/TableGen.cpp index 3e6f3cf096d0..a2efc3779fc4 100644 --- a/clang/utils/TableGen/TableGen.cpp +++ b/clang/utils/TableGen/TableGen.cpp @@ -26,7 +26,6 @@ enum ActionType { PrintRecords, DumpJSON, GenCIRBuiltinsLowering, - GenCIRTBAANameLowering, GenClangAttrClasses, GenClangAttrParserStringSwitches, GenClangAttrSubjectMatchRulesParserStringSwitches, @@ -126,8 +125,6 @@ cl::opt Action( clEnumValN(GenCIRBuiltinsLowering, "gen-cir-builtins-lowering", "Generate lowering of ClangIR builtins to equivalent LLVM " "IR builtins"), - clEnumValN(GenCIRTBAANameLowering, "gen-cir-tbaa-name-lowering", - "Generate lowering of ClangIR TBAA Name"), clEnumValN(GenClangAttrClasses, "gen-clang-attr-classes", "Generate clang attribute clases"), clEnumValN(GenClangAttrParserStringSwitches, @@ -340,9 +337,6 @@ bool ClangTableGenMain(raw_ostream &OS, const RecordKeeper &Records) { case GenCIRBuiltinsLowering: EmitCIRBuiltinsLowering(Records, OS); break; - case GenCIRTBAANameLowering: - EmitCIRTBAANameLowering(Records, OS); - break; case GenClangAttrClasses: EmitClangAttrClass(Records, OS); break; diff --git a/clang/utils/TableGen/TableGenBackends.h b/clang/utils/TableGen/TableGenBackends.h index 7c1f92e5cde7..cefdb5611ad8 100644 --- a/clang/utils/TableGen/TableGenBackends.h +++ b/clang/utils/TableGen/TableGenBackends.h @@ -26,8 +26,6 @@ namespace clang { void EmitCIRBuiltinsLowering(const llvm::RecordKeeper &RK, llvm::raw_ostream &OS); -void EmitCIRTBAANameLowering(const llvm::RecordKeeper &RK, - llvm::raw_ostream &OS); void EmitClangDeclContext(const llvm::RecordKeeper &RK, llvm::raw_ostream &OS); /** @param PriorizeIfSubclassOf These classes should be prioritized in the output. From ea5cb23e8f9b8d8a0fd353c374047f7133e4760a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 Dec 2024 14:54:46 -0300 Subject: [PATCH 2187/2301] Revert "[CIR][CIRGen][TBAA] Add support for scalar types (#1220)" Fix https://github.com/llvm/clangir/issues/1246 This reverts commit c94c04e5a104283e984c83b3b3f996de59f86673. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 6 +- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 8 +- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 +- .../clang/CIR/Dialect/IR/CIRTBAAAttrs.td | 38 ----- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 29 ---- clang/include/clang/CIR/MissingFeatures.h | 7 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 +- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- clang/lib/CIR/CodeGen/CIRGenTBAA.cpp | 128 +-------------- clang/lib/CIR/CodeGen/CIRGenTBAA.h | 6 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 13 +- clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp | 2 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 83 +--------- clang/test/CIR/CodeGen/const-alloca.cpp | 6 +- clang/test/CIR/CodeGen/tbaa-scalar.c | 148 ------------------ clang/test/CIR/CodeGen/tbaa-struct.cpp | 35 ----- clang/test/CIR/CodeGen/tbaa-vptr.cpp | 18 --- clang/test/CIR/CodeGen/tbaa.c | 22 +++ 20 files changed, 58 insertions(+), 505 deletions(-) delete mode 100644 clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td delete mode 100644 clang/test/CIR/CodeGen/tbaa-scalar.c delete mode 100644 clang/test/CIR/CodeGen/tbaa-struct.cpp delete mode 100644 clang/test/CIR/CodeGen/tbaa-vptr.cpp create mode 100644 clang/test/CIR/CodeGen/tbaa.c diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 48d1f1faf53f..771b7dd33cd4 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -170,7 +170,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { /*alignment=*/intAttr, /*mem_order=*/ cir::MemOrderAttr{}, - /*tbaa=*/cir::TBAAAttr{}); + /*tbaa=*/mlir::ArrayAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr, @@ -357,7 +357,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { val.getType()) dst = createPtrBitcast(dst, val.getType()); return create(loc, val, dst, _volatile, align, order, - /*tbaa=*/cir::TBAAAttr{}); + /*tbaa=*/mlir::ArrayAttr{}); } mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, @@ -405,7 +405,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { cir::CopyOp createCopy(mlir::Value dst, mlir::Value src, bool isVolatile = false) { return create(dst.getLoc(), dst, src, isVolatile, - /*tbaa=*/cir::TBAAAttr{}); + /*tbaa=*/mlir::ArrayAttr{}); } cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index e968d4c27fd5..e54b52b96c91 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -24,9 +24,8 @@ include "clang/CIR/Interfaces/ASTAttrInterfaces.td" // CIR Attrs //===----------------------------------------------------------------------===// -class CIR_Attr traits = [], - string baseCppClass = "::mlir::Attribute"> - : AttrDef { +class CIR_Attr traits = []> + : AttrDef { let mnemonic = attrMnemonic; } @@ -1295,7 +1294,8 @@ def GlobalAnnotationValuesAttr : CIR_Attr<"GlobalAnnotationValues", let genVerifyDecl = 1; } -include "clang/CIR/Dialect/IR/CIRTBAAAttrs.td" +def CIR_TBAAAttr : CIR_Attr<"TBAA", "tbaa", []> { +} include "clang/CIR/Dialect/IR/CIROpenCLAttrs.td" diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 8d85482703e6..52e1013b88a6 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -588,7 +588,7 @@ def LoadOp : CIR_Op<"load", [ UnitAttr:$is_volatile, OptionalAttr:$alignment, OptionalAttr:$mem_order, - OptionalAttr:$tbaa + OptionalAttr:$tbaa ); let results = (outs CIR_AnyType:$result); @@ -657,7 +657,7 @@ def StoreOp : CIR_Op<"store", [ UnitAttr:$is_volatile, OptionalAttr:$alignment, OptionalAttr:$mem_order, - OptionalAttr:$tbaa); + OptionalAttr:$tbaa); let assemblyFormat = [{ (`volatile` $is_volatile^)? @@ -4068,7 +4068,7 @@ def CopyOp : CIR_Op<"copy", let arguments = (ins Arg:$dst, Arg:$src, UnitAttr:$is_volatile, - OptionalAttr:$tbaa); + OptionalAttr:$tbaa); let summary = "Copies contents from a CIR pointer to another"; let description = [{ Given two CIR pointers, `src` and `dst`, `cir.copy` will copy the memory diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td deleted file mode 100644 index d46880e8541e..000000000000 --- a/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td +++ /dev/null @@ -1,38 +0,0 @@ -//===----------------------------------------------------------------------===// -// TBAAAttr -//===----------------------------------------------------------------------===// - -def CIR_TBAAAttr : CIR_Attr<"TBAA", "tbaa", []> { - let summary = "CIR dialect TBAA base attribute"; -} - -//===----------------------------------------------------------------------===// -// TBAAScalarAttr -//===----------------------------------------------------------------------===// - -def CIR_TBAAScalarAttr : CIR_Attr<"TBAAScalar", "tbaa_scalar", [], "TBAAAttr"> { - let summary = "Describes a scalar type in TBAA with an identifier."; - - let parameters = (ins CIR_AnyScalarType : $type); - - let description = [{ - Define a TBAA attribute. - - Example: - ```mlir - // CIR_TBAAScalarAttr - #tbaa_scalar = #cir.tbaa_scalar - #tbaa_scalar1 = #cir.tbaa_scalar - ``` - - See the following link for more details: - https://llvm.org/docs/LangRef.html#tbaa-metadata - }]; - - let assemblyFormat = "`<` struct(params) `>`"; -} - -def CIR_AnyTBAAAttr : AnyAttrOf<[ - CIR_TBAAAttr, - CIR_TBAAScalarAttr -]>; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 68b27a053176..d3f49716301d 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -74,28 +74,6 @@ def CIR_IntType : CIR_Type<"Int", "int", static bool isValidPrimitiveIntBitwidth(unsigned width) { return width == 8 || width == 16 || width == 32 || width == 64; } - - llvm::StringRef getTBAATypeName() const { - switch (getWidth()) { - case 1: - case 8: { - return "omnipotent char"; - } - case 16: { - return "short"; - } - case 32: { - return "int"; - } - case 64: { - return "long"; - } - default: { - llvm::errs() << "unknown type: " << *this << "\n"; - return "unknown"; - } - } - } }]; let genVerifyDecl = 1; } @@ -631,11 +609,4 @@ def CIR_AnyType : AnyTypeOf<[ CIR_ComplexType ]>; -def CIR_AnyScalarType : AnyTypeOf<[ - CIR_IntType, CIR_PointerType, CIR_DataMemberType, CIR_MethodType, - CIR_BoolType, CIR_ArrayType, CIR_VectorType, CIR_FuncType, CIR_VoidType, - CIR_ExceptionType, CIR_AnyFloat, CIR_FP16, CIR_BFloat16, - CIR_ComplexType -]>; - #endif // MLIR_CIR_DIALECT_CIR_TYPES diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 346719691a5d..c0707d687fca 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -58,12 +58,7 @@ struct MissingFeatures { // sanitizer related type check features static bool emitTypeCheck() { return false; } static bool tbaa() { return false; } - static bool tbaaStruct() { return false; } - static bool tbaaTagForStruct() { return false; } - static bool tbaaVTablePtr() { return false; } - static bool tbaaIncompleteType() { return false; } - static bool tbaaMergeTBAAInfo() { return false; } - static bool tbaaMayAlias() { return false; } + static bool tbaa_struct() { return false; } static bool cleanups() { return false; } static bool emitNullabilityCheck() { return false; } static bool ptrAuth() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 3019ca8ef62b..28be733f62d7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -839,7 +839,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return create( loc, addr.getElementType(), addr.getPointer(), /*isDeref=*/false, /*is_volatile=*/isVolatile, /*alignment=*/mlir::IntegerAttr{}, - /*mem_order=*/cir::MemOrderAttr{}, /*tbaa=*/cir::TBAAAttr{}); + /*mem_order=*/cir::MemOrderAttr{}, /*tbaa=*/mlir::ArrayAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 831e2e8c3ae6..44e214fb1f55 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -1722,7 +1722,7 @@ void CIRGenFunction::emitAggregateCopy(LValue Dest, LValue Src, QualType Ty, // Determine the metadata to describe the position of any padding in this // memcpy, as well as the TBAA tags for the members of the struct, in case // the optimizer wishes to expand it in to scalar memory operations. - assert(!cir::MissingFeatures::tbaaStruct() && "tbaa.struct NYI"); + assert(!cir::MissingFeatures::tbaa_struct() && "tbaa.struct NYI"); if (CGM.getCodeGenOpts().NewStructPathTBAA) { TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer( Dest.getTBAAInfo(), Src.getTBAAInfo()); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 2a2557f1fdda..b35ef11c7782 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -3995,7 +3995,7 @@ cir::TBAAAttr CIRGenModule::getTBAABaseTypeInfo(QualType QTy) { return tbaa->getBaseTypeInfo(QTy); } -cir::TBAAAttr CIRGenModule::getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo) { +mlir::ArrayAttr CIRGenModule::getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo) { if (!tbaa) { return nullptr; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index dd8a0c98b081..905754a4ad3a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -525,7 +525,7 @@ class CIRGenModule : public CIRGenTypeCache { /// type is not suitable for use in TBAA access tags. cir::TBAAAttr getTBAABaseTypeInfo(QualType QTy); - cir::TBAAAttr getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo); + mlir::ArrayAttr getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo); /// Get merged TBAA information for the purposes of type casts. TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp index ce2969d130ff..a6efc05e4110 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp @@ -1,12 +1,11 @@ #include "CIRGenTBAA.h" +#include "CIRGenCXXABI.h" #include "CIRGenTypes.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "clang/AST/ASTContext.h" #include "clang/AST/RecordLayout.h" -#include "clang/CIR/Dialect/IR/CIRTypes.h" -#include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" namespace clang::CIRGen { @@ -22,159 +21,44 @@ CIRGenTBAA::CIRGenTBAA(mlir::MLIRContext *mlirContext, : mlirContext(mlirContext), astContext(astContext), types(types), moduleOp(moduleOp), codeGenOpts(codeGenOpts), features(features) {} -cir::TBAAAttr CIRGenTBAA::getChar() { - return cir::TBAAScalarAttr::get(mlirContext, - cir::IntType::get(mlirContext, 1, true)); -} - -static bool typeHasMayAlias(clang::QualType qty) { - // Tagged types have declarations, and therefore may have attributes. - if (auto *td = qty->getAsTagDecl()) - if (td->hasAttr()) - return true; - - // Also look for may_alias as a declaration attribute on a typedef. - // FIXME: We should follow GCC and model may_alias as a type attribute - // rather than as a declaration attribute. - while (auto *tt = qty->getAs()) { - if (tt->getDecl()->hasAttr()) - return true; - qty = tt->desugar(); - } - return false; -} - -/// Check if the given type is a valid base type to be used in access tags. -static bool isValidBaseType(clang::QualType qty) { - if (const clang::RecordType *tty = qty->getAs()) { - const clang::RecordDecl *rd = tty->getDecl()->getDefinition(); - // Incomplete types are not valid base access types. - if (!rd) - return false; - if (rd->hasFlexibleArrayMember()) - return false; - // rd can be struct, union, class, interface or enum. - // For now, we only handle struct and class. - if (rd->isStruct() || rd->isClass()) - return true; - } - return false; -} - cir::TBAAAttr CIRGenTBAA::getTypeInfo(clang::QualType qty) { - // At -O0 or relaxed aliasing, TBAA is not emitted for regular types. - if (codeGenOpts.OptimizationLevel == 0 || codeGenOpts.RelaxedAliasing) { - return nullptr; - } - - // If the type has the may_alias attribute (even on a typedef), it is - // effectively in the general char alias class. - if (typeHasMayAlias(qty)) { - assert(!cir::MissingFeatures::tbaaMayAlias()); - return getChar(); - } - // We need this function to not fall back to returning the "omnipotent char" - // type node for aggregate and union types. Otherwise, any dereference of an - // aggregate will result into the may-alias access descriptor, meaning all - // subsequent accesses to direct and indirect members of that aggregate will - // be considered may-alias too. - // function. - if (isValidBaseType(qty)) { - // TODO(cir): support TBAA with struct - return tbaa_NYI(mlirContext); - } - - const clang::Type *ty = astContext.getCanonicalType(qty).getTypePtr(); - if (metadataCache.contains(ty)) { - return metadataCache[ty]; - } - - // Note that the following helper call is allowed to add new nodes to the - // cache, which invalidates all its previously obtained iterators. So we - // first generate the node for the type and then add that node to the - // cache. - auto typeNode = cir::TBAAScalarAttr::get(mlirContext, types.ConvertType(qty)); - return metadataCache[ty] = typeNode; + return tbaa_NYI(mlirContext); } TBAAAccessInfo CIRGenTBAA::getAccessInfo(clang::QualType accessType) { - // Pointee values may have incomplete types, but they shall never be - // dereferenced. - if (accessType->isIncompleteType()) { - assert(!cir::MissingFeatures::tbaaIncompleteType()); - return TBAAAccessInfo::getIncompleteInfo(); - } - - if (typeHasMayAlias(accessType)) { - assert(!cir::MissingFeatures::tbaaMayAlias()); - return TBAAAccessInfo::getMayAliasInfo(); - } - - uint64_t size = astContext.getTypeSizeInChars(accessType).getQuantity(); - return TBAAAccessInfo(getTypeInfo(accessType), size); + return TBAAAccessInfo(); } TBAAAccessInfo CIRGenTBAA::getVTablePtrAccessInfo(mlir::Type vtablePtrType) { - // TODO(cir): support vtable ptr - assert(!cir::MissingFeatures::tbaaVTablePtr()); return TBAAAccessInfo(); } mlir::ArrayAttr CIRGenTBAA::getTBAAStructInfo(clang::QualType qty) { - assert(!cir::MissingFeatures::tbaaStruct() && "tbaa.struct NYI"); - return mlir::ArrayAttr(); + return mlir::ArrayAttr::get(mlirContext, {}); } cir::TBAAAttr CIRGenTBAA::getBaseTypeInfo(clang::QualType qty) { return tbaa_NYI(mlirContext); } -cir::TBAAAttr CIRGenTBAA::getAccessTagInfo(TBAAAccessInfo tbaaInfo) { - assert(!tbaaInfo.isIncomplete() && - "Access to an object of an incomplete type!"); - - if (tbaaInfo.isMayAlias()) { - assert(!cir::MissingFeatures::tbaaMayAlias()); - tbaaInfo = TBAAAccessInfo(getChar(), tbaaInfo.size); - } - if (!tbaaInfo.accessType) { - return nullptr; - } - - if (!codeGenOpts.StructPathTBAA) - tbaaInfo = TBAAAccessInfo(tbaaInfo.accessType, tbaaInfo.size); - - if (!tbaaInfo.baseType) { - tbaaInfo.baseType = tbaaInfo.accessType; - assert(!tbaaInfo.offset && - "Nonzero offset for an access with no base type!"); - } - if (codeGenOpts.NewStructPathTBAA) { - llvm_unreachable("NYI"); - } - if (tbaaInfo.baseType == tbaaInfo.accessType) { - return tbaaInfo.accessType; - } - return tbaa_NYI(mlirContext); +mlir::ArrayAttr CIRGenTBAA::getAccessTagInfo(TBAAAccessInfo tbaaInfo) { + return mlir::ArrayAttr::get(mlirContext, {tbaa_NYI(mlirContext)}); } TBAAAccessInfo CIRGenTBAA::mergeTBAAInfoForCast(TBAAAccessInfo sourceInfo, TBAAAccessInfo targetInfo) { - assert(!cir::MissingFeatures::tbaaMergeTBAAInfo()); return TBAAAccessInfo(); } TBAAAccessInfo CIRGenTBAA::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo infoA, TBAAAccessInfo infoB) { - assert(!cir::MissingFeatures::tbaaMergeTBAAInfo()); return TBAAAccessInfo(); } TBAAAccessInfo CIRGenTBAA::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo destInfo, TBAAAccessInfo srcInfo) { - assert(!cir::MissingFeatures::tbaaMergeTBAAInfo()); return TBAAAccessInfo(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.h b/clang/lib/CIR/CodeGen/CIRGenTBAA.h index 03b9b75113c9..3f59a0e6538b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.h +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.h @@ -104,10 +104,6 @@ class CIRGenTBAA { [[maybe_unused]] const clang::CodeGenOptions &codeGenOpts; [[maybe_unused]] const clang::LangOptions &features; - llvm::DenseMap metadataCache; - - cir::TBAAAttr getChar(); - public: CIRGenTBAA(mlir::MLIRContext *mlirContext, clang::ASTContext &astContext, CIRGenTypes &types, mlir::ModuleOp moduleOp, @@ -133,7 +129,7 @@ class CIRGenTBAA { cir::TBAAAttr getBaseTypeInfo(clang::QualType qty); /// Get TBAA tag for a given memory access. - cir::TBAAAttr getAccessTagInfo(TBAAAccessInfo tbaaInfo); + mlir::ArrayAttr getAccessTagInfo(TBAAAccessInfo tbaaInfo); /// Get merged TBAA information for the purpose of type casts. TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo sourceInfo, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index f8907712deac..7da279aa7513 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -17,7 +17,6 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Interfaces/CIRLoopOpInterface.h" #include "clang/CIR/MissingFeatures.h" -#include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" #include #include @@ -107,12 +106,12 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << dynCastInfoAttr.getAlias(); return AliasResult::FinalAlias; } - return TypeSwitch(attr) - .Case([&](auto attr) { - os << decltype(attr)::getMnemonic(); - return AliasResult::OverridableAlias; - }) - .Default([](Attribute) { return AliasResult::NoAlias; }); + if (auto tbaaAttr = mlir::dyn_cast(attr)) { + os << tbaaAttr.getMnemonic(); + return AliasResult::OverridableAlias; + } + + return AliasResult::NoAlias; } }; } // namespace diff --git a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp index bb99d53e0ad8..80963353a304 100644 --- a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp @@ -151,7 +151,7 @@ DeletionKind cir::CopyOp::removeBlockingUses( if (loadsFrom(slot)) builder.create(getLoc(), reachingDefinition, getDst(), false, mlir::IntegerAttr{}, cir::MemOrderAttr(), - cir::TBAAAttr{}); + mlir::ArrayAttr{}); return DeletionKind::Delete; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 87336d2b7606..79d4cf7301ac 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -18,7 +18,6 @@ #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" -#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" #include "mlir/Dialect/LLVMIR/Transforms/Passes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" @@ -42,9 +41,6 @@ #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" -#include "clang/CIR/Dialect/IR/CIRAttrs.h" -#include "clang/CIR/Dialect/IR/CIRDialect.h" -#include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/MissingFeatures.h" @@ -55,7 +51,6 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" -#include "llvm/ADT/TypeSwitch.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/Support/Casting.h" @@ -671,67 +666,6 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, llvm_unreachable("unhandled attribute type"); } -mlir::LLVM::TBAATypeDescriptorAttr -createScalarTypeNode(mlir::MLIRContext *ctx, llvm::StringRef typeName, - mlir::LLVM::TBAANodeAttr parent, int64_t offset) { - llvm::SmallVector members; - members.push_back(mlir::LLVM::TBAAMemberAttr::get(ctx, parent, offset)); - return mlir::LLVM::TBAATypeDescriptorAttr::get( - ctx, typeName, llvm::ArrayRef(members)); -} - -mlir::LLVM::TBAARootAttr getRoot(mlir::MLIRContext *ctx) { - return mlir::LLVM::TBAARootAttr::get( - ctx, mlir::StringAttr::get(ctx, "Simple C/C++ TBAA")); -} - -mlir::LLVM::TBAATypeDescriptorAttr getChar(mlir::MLIRContext *ctx) { - return createScalarTypeNode(ctx, "omnipotent char", getRoot(ctx), 0); -} - -// FIXME(cir): This should be moved and use tablegen approach -// see https://github.com/llvm/clangir/pull/1220#discussion_r1889187867 -StringRef getTypeName(mlir::Type type) { - return TypeSwitch(type) - .Case([](cir::IntType ty) { return ty.getTBAATypeName(); }) - .Case([](cir::SingleType) { return "float"; }) - .Case([](cir::DoubleType) { return "double"; }) - .Case([](cir::FP80Type) { return "f80"; }) - .Case([](cir::FP128Type) { return "f128"; }) - .Case( - [](cir::LongDoubleType) { return "long double"; }) - .Case([](cir::BoolType) { return "bool"; }) - .Case([](cir::PointerType) { return "any pointer"; }) - .Default([](auto ty) { - llvm::errs() << "unknown type: " << ty << "\n"; - return "unknown"; - }); -} - -mlir::LLVM::TBAATypeDescriptorAttr -lowerScalarType(mlir::MLIRContext *ctx, cir::TBAAScalarAttr scalarAttr) { - // special handle for omnipotent char - if (auto intTy = mlir::dyn_cast_or_null(scalarAttr.getType())) { - if (intTy.getWidth() == 1 || intTy.getWidth() == 8) { - return getChar(ctx); - } - } - auto name = getTypeName(scalarAttr.getType()); - return createScalarTypeNode(ctx, name, getChar(ctx), 0); -} - -mlir::ArrayAttr lowerCIRTBAAAttr(mlir::Attribute tbaa, - mlir::ConversionPatternRewriter &rewriter) { - auto *ctx = rewriter.getContext(); - if (auto scalarAttr = mlir::dyn_cast(tbaa)) { - auto accessType = lowerScalarType(ctx, scalarAttr); - auto tag = mlir::LLVM::TBAATagAttr::get(accessType, accessType, 0); - return mlir::ArrayAttr::get(ctx, {tag}); - } - assert(!cir::MissingFeatures::tbaaTagForStruct()); - return mlir::ArrayAttr(); -} - //===----------------------------------------------------------------------===// mlir::LLVM::Linkage convertLinkage(cir::GlobalLinkageKind linkage) { @@ -1578,14 +1512,10 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite( } // TODO: nontemporal, syncscope. - auto loadOp = rewriter.create( - op->getLoc(), llvmTy, adaptor.getAddr(), /* alignment */ alignment, + rewriter.replaceOpWithNewOp( + op, llvmTy, adaptor.getAddr(), /* alignment */ alignment, op.getIsVolatile(), /* nontemporal */ false, /* invariant */ false, /* invariantGroup */ invariant, ordering); - rewriter.replaceOp(op, loadOp); - if (auto tbaa = op.getTbaaAttr()) { - loadOp.setTBAATags(lowerCIRTBAAAttr(tbaa, rewriter)); - } return mlir::LogicalResult::success(); } @@ -1617,14 +1547,9 @@ mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite( } // TODO: nontemporal, syncscope. - auto storeOp = rewriter.create( - op->getLoc(), adaptor.getValue(), adaptor.getAddr(), alignment, - op.getIsVolatile(), + rewriter.replaceOpWithNewOp( + op, adaptor.getValue(), adaptor.getAddr(), alignment, op.getIsVolatile(), /* nontemporal */ false, /* invariantGroup */ invariant, ordering); - rewriter.replaceOp(op, storeOp); - if (auto tbaa = op.getTbaaAttr()) { - storeOp.setTBAATags(lowerCIRTBAAAttr(tbaa, rewriter)); - } return mlir::LogicalResult::success(); } diff --git a/clang/test/CIR/CodeGen/const-alloca.cpp b/clang/test/CIR/CodeGen/const-alloca.cpp index 7cc9a5b57517..9247b2692474 100644 --- a/clang/test/CIR/CodeGen/const-alloca.cpp +++ b/clang/test/CIR/CodeGen/const-alloca.cpp @@ -66,8 +66,8 @@ int local_const_load_store() { // LLVM-LABEL: @_Z22local_const_load_storev // LLVM: %[[#INIT:]] = call i32 @_Z11produce_intv() -// LLVM-NEXT: store i32 %[[#INIT]], ptr %[[#SLOT:]], align 4, !tbaa !{{.*}}, !invariant.group !{{.+}} -// LLVM-NEXT: %{{.+}} = load i32, ptr %[[#SLOT]], align 4, !tbaa !{{.*}}, !invariant.group !{{.+}} +// LLVM-NEXT: store i32 %[[#INIT]], ptr %[[#SLOT:]], align 4, !invariant.group !{{.+}} +// LLVM-NEXT: %{{.+}} = load i32, ptr %[[#SLOT]], align 4, !invariant.group !{{.+}} // LLVM: } int local_const_optimize() { @@ -80,7 +80,7 @@ int local_const_optimize() { // LLVM-LABEL: @_Z20local_const_optimizev() // LLVM-NEXT: %[[#slot:]] = alloca i32, align 4 // LLVM-NEXT: %[[#init:]] = tail call i32 @_Z11produce_intv() -// LLVM-NEXT: store i32 %[[#init]], ptr %[[#slot]], align 4, !tbaa !{{.*}}, !invariant.group !{{.+}} +// LLVM-NEXT: store i32 %[[#init]], ptr %[[#slot]], align 4, !invariant.group !{{.+}} // LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#slot]]) // LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#slot]]) // LLVM-NEXT: ret i32 %[[#init]] diff --git a/clang/test/CIR/CodeGen/tbaa-scalar.c b/clang/test/CIR/CodeGen/tbaa-scalar.c deleted file mode 100644 index b2f893b4f4ac..000000000000 --- a/clang/test/CIR/CodeGen/tbaa-scalar.c +++ /dev/null @@ -1,148 +0,0 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -relaxed-aliasing -// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O0 -// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s - -// NO-TBAA-NOT: !tbaa - -// CIR: #tbaa[[FLOAT_PTR:.*]] = #cir.tbaa_scalar> -// CIR: #tbaa[[FLOAT:.*]] = #cir.tbaa_scalar -// CIR: #tbaa[[DOUBLE_PTR:.*]] = #cir.tbaa_scalar> -// CIR: #tbaa[[DOUBLE:.*]] = #cir.tbaa_scalar -// CIR: #tbaa[[LONG_DOUBLE_PTR:.*]] = #cir.tbaa_scalar>> -// CIR: #tbaa[[LONG_DOUBLE:.*]] = #cir.tbaa_scalar> -// CIR: #tbaa[[INT:.*]] = #cir.tbaa_scalar -// CIR: #tbaa[[LONG:.*]] = #cir.tbaa_scalar -// CIR: #tbaa[[CHAR:.*]] = #cir.tbaa_scalar -// CIR: #tbaa[[INT_PTR:.*]] = #cir.tbaa_scalar> -// CIR: #tbaa[[LONG_PTR:.*]] = #cir.tbaa_scalar> -// CIR: #tbaa[[CHAR_PTR:.*]] = #cir.tbaa_scalar> - -void test_int_and_float(int *a, float *b) { - // CIR-LABEL: cir.func @test_int_and_float - // CIR: cir.scope - // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa(#tbaa[[INT_PTR]]) - // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s32i tbaa(#tbaa[[INT]]) - // CIR: cir.if - // CIR: %[[C2:.*]] = cir.const #cir.fp<2 - // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[FLOAT_PTR]]) - // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.float, !cir.ptr tbaa(#tbaa[[FLOAT]]) - // CIR: else - // CIR: %[[C3:.*]] = cir.const #cir.fp<3 - // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[FLOAT_PTR]]) - // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.float, !cir.ptr tbaa(#tbaa[[FLOAT]]) - - // LLVM-LABEL: void @test_int_and_float - // LLVM: %[[ARG_a:.*]] = load i32, ptr %{{.*}}, align 4, !tbaa ![[TBAA_INT:.*]] - // LLVM: %[[COND:.*]] = icmp eq i32 %[[ARG_a]], 1 - // LLVM: %[[RET:.*]] = select i1 %[[COND]], float 2.000000e+00, float 3.000000e+00 - // LLVM: store float %[[RET]], ptr %{{.*}}, align 4, !tbaa ![[TBAA_FLOAT:.*]] - // LLVM: ret void - if (*a == 1) { - *b = 2.0f; - } else { - *b = 3.0f; - } -} - -void test_long_and_double(long *a, double *b) { - // CIR-LABEL: cir.func @test_long_and_double - // CIR: cir.scope - // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa(#tbaa[[LONG_PTR]]) - // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s64i tbaa(#tbaa[[LONG]]) - // CIR: cir.if - // CIR: %[[C2:.*]] = cir.const #cir.fp<2 - // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[DOUBLE_PTR]]) - // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.double, !cir.ptr tbaa(#tbaa[[DOUBLE]]) - // CIR: else - // CIR: %[[C3:.*]] = cir.const #cir.fp<3 - // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[DOUBLE_PTR]]) - // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.double, !cir.ptr tbaa(#tbaa[[DOUBLE]]) - - // LLVM-LABEL: void @test_long_and_double - // LLVM: %[[ARG_a:.*]] = load i64, ptr %{{.*}}, align 8, !tbaa ![[TBAA_LONG:.*]] - // LLVM: %[[COND:.*]] = icmp eq i64 %[[ARG_a]], 1 - // LLVM: %[[RET:.*]] = select i1 %[[COND]], double 2.000000e+00, double 3.000000e+00 - // LLVM: store double %[[RET]], ptr %{{.*}}, align 8, !tbaa ![[TBAA_DOUBLE:.*]] - // LLVM: ret void - if (*a == 1L) { - *b = 2.0; - } else { - *b = 3.0; - } -} -void test_long_long_and_long_double(long long *a, long double *b) { - // CIR-LABEL: cir.func @test_long_long_and_long_double - // CIR: cir.scope - // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa(#tbaa[[LONG_PTR]]) - // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s64i tbaa(#tbaa[[LONG]]) - // CIR: cir.if - // CIR: %[[C2:.*]] = cir.const #cir.fp<2 - // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>>, !cir.ptr> tbaa(#tbaa[[LONG_DOUBLE_PTR]]) - // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.long_double, !cir.ptr> tbaa(#tbaa[[LONG_DOUBLE]]) - // CIR: else - // CIR: %[[C3:.*]] = cir.const #cir.fp<3 - // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>>, !cir.ptr> tbaa(#tbaa[[LONG_DOUBLE_PTR]]) - // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.long_double, !cir.ptr> tbaa(#tbaa[[LONG_DOUBLE]]) - - // LLVM-LABEL: void @test_long_long_and_long_double - // LLVM: %[[ARG_a:.*]] = load i64, ptr %{{.*}}, align 8, !tbaa ![[TBAA_LONG_LONG:.*]] - // LLVM: %[[COND:.*]] = icmp eq i64 %[[ARG_a]], 1 - // LLVM: %[[RET:.*]] = select i1 %[[COND]], x86_fp80 0xK40008000000000000000, x86_fp80 0xK4000C000000000000000 - // LLVM: store x86_fp80 %[[RET]], ptr %{{.*}}, align 16, !tbaa ![[TBAA_LONG_DOUBLE:.*]] - // LLVM: ret void - if (*a == 1L) { - *b = 2.0L; - } else { - *b = 3.0L; - } -} - -void test_char(char *a, char* b) { - // CIR-LABEL: cir.func @test_char - // CIR: cir.scope - // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa(#tbaa[[CHAR_PTR]]) - // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s8i tbaa(#tbaa[[CHAR]]) - // CIR: cir.if - // CIR: %[[C2:.*]] = cir.const #cir.int<98> : !s32i - // CIR: %[[C2_CHAR:.*]] = cir.cast(integral, %[[C2]] : !s32i), !s8i - // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[CHAR_PTR]]) - // CIR: cir.store %[[C2_CHAR]], %[[TMP3]] : !s8i, !cir.ptr tbaa(#tbaa[[CHAR]]) - // CIR: else - // CIR: %[[C3:.*]] = cir.const #cir.int<0> : !s32i - // CIR: %[[C3_CHAR:.*]] = cir.cast(integral, %[[C3]] : !s32i), !s8i - // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr tbaa(#tbaa[[CHAR_PTR]]) - // CIR: cir.store %[[C3_CHAR]], %[[TMP4]] : !s8i, !cir.ptr tbaa(#tbaa[[CHAR]]) - - - // LLVM-LABEL: void @test_char - // LLVM: %[[ARG_a:.*]] = load i8, ptr %{{.*}}, align 1, !tbaa ![[TBAA_CHAR:.*]] - // LLVM: %[[COND:.*]] = icmp eq i8 %[[ARG_a]], 97 - // LLVM: %[[RET:.*]] = select i1 %[[COND]], i8 98, i8 0 - // LLVM: store i8 %[[RET]], ptr %{{.*}}, align 1, !tbaa ![[TBAA_CHAR]] - // LLVM: ret void - if (*a == 'a') { - *b = 'b'; - } - else { - *b = '\0'; - } -} - -// LLVM: ![[TBAA_INT]] = !{![[TBAA_INT_PARENT:.*]], ![[TBAA_INT_PARENT]], i64 0} -// LLVM: ![[TBAA_INT_PARENT]] = !{!"int", ![[CHAR:.*]], i64 0} -// LLVM: ![[CHAR]] = !{!"omnipotent char", ![[ROOT:.*]], i64 0} -// LLVM: ![[ROOT]] = !{!"Simple C/C++ TBAA"} -// LLVM: ![[TBAA_FLOAT]] = !{![[TBAA_FLOAT_PARENT:.*]], ![[TBAA_FLOAT_PARENT]], i64 0} -// LLVM: ![[TBAA_FLOAT_PARENT]] = !{!"float", ![[CHAR]], i64 0} -// LLVM: ![[TBAA_LONG]] = !{![[TBAA_LONG_PARENT:.*]], ![[TBAA_LONG_PARENT]], i64 0} -// LLVM: ![[TBAA_LONG_PARENT]] = !{!"long", ![[CHAR]], i64 0} -// LLVM: ![[TBAA_DOUBLE]] = !{![[TBAA_DOUBLE_PARENT:.*]], ![[TBAA_DOUBLE_PARENT]], i64 0} -// LLVM: ![[TBAA_DOUBLE_PARENT]] = !{!"double", ![[CHAR]], i64 0} -// LLVM: ![[TBAA_LONG_DOUBLE]] = !{![[TBAA_LONG_DOUBLE_PARENT:.*]], ![[TBAA_LONG_DOUBLE_PARENT]], i64 0} -// LLVM: ![[TBAA_LONG_DOUBLE_PARENT]] = !{!"long double", ![[CHAR]], i64 0} -// LLVM: ![[TBAA_CHAR]] = !{![[CHAR]], ![[CHAR]], i64 0} diff --git a/clang/test/CIR/CodeGen/tbaa-struct.cpp b/clang/test/CIR/CodeGen/tbaa-struct.cpp deleted file mode 100644 index 84c49df6b455..000000000000 --- a/clang/test/CIR/CodeGen/tbaa-struct.cpp +++ /dev/null @@ -1,35 +0,0 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s - -// CIR: #tbaa[[tbaa_NYI:.*]] = #cir.tbaa -// CIR: #tbaa[[INT:.*]] = #cir.tbaa_scalar -// CIR: #tbaa[[INT_PTR:.*]] = #cir.tbaa_scalar> -// CIR: #tbaa[[StructA_PTR:.*]] = #cir.tbaa_scalar> - -typedef unsigned char uint8_t; -typedef unsigned short uint16_t; -typedef unsigned int uint32_t; -typedef unsigned long long uint64_t; -typedef struct -{ - uint16_t f16; - uint32_t f32; - uint16_t f16_2; - uint32_t f32_2; -} StructA; - -uint32_t g(uint32_t *s, StructA *A) { - // CIR-LABEL: cir.func @_Z1g - // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i - // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i - // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[INT]]) - // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i - // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i - // CIR: %[[pointer_to_StructA:.*]] = cir.load %{{.*}} : !cir.ptr>, !cir.ptr tbaa(#tbaa[[StructA_PTR]]) - // CIR: %[[A_f32:.*]] = cir.get_member %[[pointer_to_StructA]][1] {name = "f32"} : !cir.ptr -> !cir.ptr - // CIR: cir.store %[[UINT_4]], %[[A_f32]] : !u32i, !cir.ptr tbaa(#tbaa[[tbaa_NYI]]) - - *s = 1; - A->f32 = 4; - return *s; -} diff --git a/clang/test/CIR/CodeGen/tbaa-vptr.cpp b/clang/test/CIR/CodeGen/tbaa-vptr.cpp deleted file mode 100644 index dbe28be626a2..000000000000 --- a/clang/test/CIR/CodeGen/tbaa-vptr.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s - -// CIR-NOT: #tbaa - -struct Member { - ~Member(); -}; - -struct A { - virtual ~A(); -}; - -struct B : A { - Member m; - virtual ~B(); -}; -B::~B() { } diff --git a/clang/test/CIR/CodeGen/tbaa.c b/clang/test/CIR/CodeGen/tbaa.c new file mode 100644 index 000000000000..43cdde47ecb7 --- /dev/null +++ b/clang/test/CIR/CodeGen/tbaa.c @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// CIR: #tbaa[[TBAA_NO:.*]] = #cir.tbaa +void f(int *a, float *b) { + // CIR: cir.scope + // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) + // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s32i tbaa([#tbaa[[TBAA_NO]]]) + // CIR: cir.if + // CIR: %[[C2:.*]] = cir.const #cir.fp<2 + // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) + // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.float, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) + // CIR: else + // CIR: %[[C3:.*]] = cir.const #cir.fp<3 + // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) + // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.float, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) + if (*a == 1) { + *b = 2.0f; + } else { + *b = 3.0f; + } +} From f0a56c0a6bf6c35c22a58ed63b698d7112a5710a Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Fri, 20 Dec 2024 15:21:07 -0300 Subject: [PATCH 2188/2301] [CIR][Types] Mimic OG codegen on isFuncParamTypeConvertible --- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index fa81ce682769..35db7731f42e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -321,22 +321,7 @@ bool CIRGenTypes::isFuncParamTypeConvertible(clang::QualType Ty) { return true; // Incomplete types cannot be converted. - if (TT->isIncompleteType()) - return false; - - // If this is an enum, then it is always safe to convert. - const RecordType *RT = dyn_cast(TT); - if (!RT) - return true; - - // Otherwise, we have to be careful. If it is a struct that we're in the - // process of expanding, then we can't convert the function type. That's ok - // though because we must be in a pointer context under the struct, so we can - // just convert it to a dummy type. - // - // We decide this by checking whether ConvertRecordDeclType returns us an - // opaque type for a struct that we know is defined. - return isSafeToConvert(RT->getDecl(), *this); + return !TT->isIncompleteType(); } /// Code to verify a given function type is complete, i.e. the return type and From 7e06e8f6f020075d70a4afe4c9010ecbc038c744 Mon Sep 17 00:00:00 2001 From: orbiri Date: Mon, 6 Jan 2025 16:52:50 +0200 Subject: [PATCH 2189/2301] [CIR] Lower `cir.bool` to i1 (#1158) This PR changes changes the lowering of `cir.bool` to `i1` in both DorectToLLVM and ThroughMLIR. This dramatically simplifies the lowering logic of most operations and the lowered code itself as it naturally uses `i1` for anything boolean. The change involves separating between type lowering when scalars are involved and when memory is involved. This is a pattern that was inspired by clang's codegen which directly emits `i1` from the AST without intermediate higher level representation like CIR has. This also paves the way to more complex lowerings that are implemented in clang codegen through the three primitives added here: `Convert Type For Memory`, `Emit For Memory` and `Emit To Memory`. They are used in clang for non-trivial types like bitints but also extensible vectors. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 297 +++++++++++------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 54 +++- .../ThroughMLIR/LowerCIRLoopToSCF.cpp | 5 +- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 123 +++++--- clang/test/CIR/CodeGen/atomic-xchg-field.c | 8 +- clang/test/CIR/CodeGen/bf16-ops.c | 10 +- clang/test/CIR/CodeGen/builtin-assume.cpp | 4 +- clang/test/CIR/CodeGen/builtin-constant-p.c | 3 +- clang/test/CIR/CodeGen/complex-arithmetic.c | 9 +- clang/test/CIR/CodeGen/complex-cast.c | 8 +- clang/test/CIR/CodeGen/globals.cpp | 4 + clang/test/CIR/CodeGen/new-null.cpp | 2 +- .../CodeGen/pointer-to-data-member-cast.cpp | 8 +- clang/test/CIR/CodeGen/static.cpp | 4 +- clang/test/CIR/IR/invalid.cir | 6 +- clang/test/CIR/Lowering/ThroughMLIR/bool.cir | 5 +- .../test/CIR/Lowering/ThroughMLIR/branch.cir | 14 +- clang/test/CIR/Lowering/ThroughMLIR/cast.cir | 32 +- clang/test/CIR/Lowering/ThroughMLIR/cmp.cpp | 15 +- clang/test/CIR/Lowering/ThroughMLIR/doWhile.c | 12 +- clang/test/CIR/Lowering/ThroughMLIR/if.c | 16 +- .../test/CIR/Lowering/ThroughMLIR/tenary.cir | 6 +- clang/test/CIR/Lowering/ThroughMLIR/while.c | 12 +- clang/test/CIR/Lowering/binop-overflow.cir | 20 +- clang/test/CIR/Lowering/bool.cir | 9 +- clang/test/CIR/Lowering/branch.cir | 22 +- clang/test/CIR/Lowering/brcond.cir | 25 +- clang/test/CIR/Lowering/cast.cir | 16 +- clang/test/CIR/Lowering/const-array.cir | 5 + clang/test/CIR/Lowering/const.cir | 7 +- clang/test/CIR/Lowering/loadstorealloca.cir | 20 +- clang/test/CIR/Lowering/ptrstride.cir | 10 + clang/test/CIR/Lowering/select.cir | 24 +- clang/test/CIR/Lowering/struct.cir | 19 ++ clang/test/CIR/Lowering/unary-not.cir | 24 +- clang/test/CIR/Lowering/unions.cir | 3 +- 37 files changed, 516 insertions(+), 347 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index b35ef11c7782..085e2c237ee7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -820,7 +820,7 @@ void CIRGenModule::replaceGlobal(cir::GlobalOp Old, cir::GlobalOp New) { mlir::Type ptrTy = builder.getPointerTo(OldTy); mlir::Value cast = builder.createBitcast(GGO->getLoc(), UseOpResultValue, ptrTy); - UseOpResultValue.replaceAllUsesExcept(cast, {cast.getDefiningOp()}); + UseOpResultValue.replaceAllUsesExcept(cast, cast.getDefiningOp()); } } } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 79d4cf7301ac..6a321fca58f1 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -349,12 +349,81 @@ unsigned getGlobalOpTargetAddrSpace(mlir::ConversionPatternRewriter &rewriter, .getAddressSpace(); } +/// Given a type convertor and a data layout, convert the given type to a type +/// that is suitable for memory operations. For example, this can be used to +/// lower cir.bool accesses to i8. +static mlir::Type convertTypeForMemory(const mlir::TypeConverter &converter, + mlir::DataLayout const &dataLayout, + mlir::Type type) { + // TODO(cir): Handle other types similarly to clang's codegen + // convertTypeForMemory + if (isa(type)) { + return mlir::IntegerType::get(type.getContext(), + dataLayout.getTypeSizeInBits(type)); + } + + return converter.convertType(type); +} + +/// Emits the value from memory as expected by its users. Should be called when +/// the memory represetnation of a CIR type is not equal to its scalar +/// representation. +static mlir::Value emitFromMemory(mlir::ConversionPatternRewriter &rewriter, + mlir::DataLayout const &dataLayout, + cir::LoadOp op, mlir::Value value) { + + // TODO(cir): Handle other types similarly to clang's codegen EmitFromMemory + if (auto boolTy = mlir::dyn_cast(op.getResult().getType())) { + // Create a cast value from specified size in datalayout to i1 + assert(value.getType().isInteger(dataLayout.getTypeSizeInBits(boolTy))); + return createIntCast(rewriter, value, rewriter.getI1Type()); + } + + return value; +} + +/// Emits a value to memory with the expected scalar type. Should be called when +/// the memory represetnation of a CIR type is not equal to its scalar +/// representation. +static mlir::Value emitToMemory(mlir::ConversionPatternRewriter &rewriter, + mlir::DataLayout const &dataLayout, + mlir::Type origType, mlir::Value value) { + + // TODO(cir): Handle other types similarly to clang's codegen EmitToMemory + if (auto boolTy = mlir::dyn_cast(origType)) { + // Create zext of value from i1 to i8 + auto memType = + rewriter.getIntegerType(dataLayout.getTypeSizeInBits(boolTy)); + return createIntCast(rewriter, value, memType); + } + + return value; +} + } // namespace //===----------------------------------------------------------------------===// // Visitors for Lowering CIR Const Attributes //===----------------------------------------------------------------------===// +/// Emits a value to memory with the expected scalar type. Should be called when +/// the memory represetnation of a CIR attribute's type is not equal to its +/// scalar representation. +static mlir::Value +emitCirAttrToMemory(mlir::Operation *parentOp, mlir::Attribute attr, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter, + mlir::DataLayout const &dataLayout) { + + mlir::Value loweredValue = + lowerCirAttrAsValue(parentOp, attr, rewriter, converter, dataLayout); + if (auto boolAttr = mlir::dyn_cast(attr)) { + return emitToMemory(rewriter, dataLayout, boolAttr.getType(), loweredValue); + } + + return loweredValue; +} + /// Switches on the type of attribute and calls the appropriate conversion. /// IntAttr visitor. @@ -439,14 +508,16 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::PoisonAttr poisonAttr, static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstStructAttr constStruct, mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { + const mlir::TypeConverter *converter, + mlir::DataLayout const &dataLayout) { auto llvmTy = converter->convertType(constStruct.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); // Iteratively lower each constant element of the struct. for (auto [idx, elt] : llvm::enumerate(constStruct.getMembers())) { - mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); + mlir::Value init = + emitCirAttrToMemory(parentOp, elt, rewriter, converter, dataLayout); result = rewriter.create(loc, result, init, idx); } @@ -457,13 +528,15 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstStructAttr constStruct, static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::VTableAttr vtableArr, mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { + const mlir::TypeConverter *converter, + mlir::DataLayout const &dataLayout) { auto llvmTy = converter->convertType(vtableArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); for (auto [idx, elt] : llvm::enumerate(vtableArr.getVtableData())) { - mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); + mlir::Value init = + lowerCirAttrAsValue(parentOp, elt, rewriter, converter, dataLayout); result = rewriter.create(loc, result, init, idx); } @@ -474,13 +547,15 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::VTableAttr vtableArr, static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::TypeInfoAttr typeinfoArr, mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { + const mlir::TypeConverter *converter, + mlir::DataLayout const &dataLayout) { auto llvmTy = converter->convertType(typeinfoArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); for (auto [idx, elt] : llvm::enumerate(typeinfoArr.getData())) { - mlir::Value init = lowerCirAttrAsValue(parentOp, elt, rewriter, converter); + mlir::Value init = + lowerCirAttrAsValue(parentOp, elt, rewriter, converter, dataLayout); result = rewriter.create(loc, result, init, idx); } @@ -491,7 +566,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::TypeInfoAttr typeinfoArr, static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstArrayAttr constArr, mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { + const mlir::TypeConverter *converter, + mlir::DataLayout const &dataLayout) { auto llvmTy = converter->convertType(constArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result; @@ -508,7 +584,7 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstArrayAttr constArr, if (auto arrayAttr = mlir::dyn_cast(constArr.getElts())) { for (auto [idx, elt] : llvm::enumerate(arrayAttr)) { mlir::Value init = - lowerCirAttrAsValue(parentOp, elt, rewriter, converter); + emitCirAttrToMemory(parentOp, elt, rewriter, converter, dataLayout); result = rewriter.create(loc, result, init, idx); } @@ -565,7 +641,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstVectorAttr constVec, static mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, cir::GlobalViewAttr globalAttr, mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { + const mlir::TypeConverter *converter, + mlir::DataLayout const &dataLayout) { auto module = parentOp->getParentOfType(); mlir::Type sourceType; unsigned sourceAddrSpace = 0; @@ -577,7 +654,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::GlobalViewAttr globalAttr, symName = llvmSymbol.getSymName(); sourceAddrSpace = llvmSymbol.getAddrSpace(); } else if (auto cirSymbol = dyn_cast(sourceSymbol)) { - sourceType = converter->convertType(cirSymbol.getSymType()); + sourceType = + convertTypeForMemory(*converter, dataLayout, cirSymbol.getSymType()); symName = cirSymbol.getSymName(); sourceAddrSpace = getGlobalOpTargetAddrSpace(rewriter, converter, cirSymbol); @@ -622,7 +700,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::GlobalViewAttr globalAttr, auto ptrTy = mlir::dyn_cast(globalAttr.getType()); assert(ptrTy && "Expecting pointer type in GlobalViewAttr"); - auto llvmEltTy = converter->convertType(ptrTy.getPointee()); + auto llvmEltTy = + convertTypeForMemory(*converter, dataLayout, ptrTy.getPointee()); if (llvmEltTy == sourceType) return addrOp; @@ -635,7 +714,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::GlobalViewAttr globalAttr, /// Switches on the type of attribute and calls the appropriate conversion. mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { + const mlir::TypeConverter *converter, + mlir::DataLayout const &dataLayout) { if (const auto intAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, intAttr, rewriter, converter); if (const auto fltAttr = mlir::dyn_cast(attr)) @@ -643,9 +723,11 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, if (const auto ptrAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, ptrAttr, rewriter, converter); if (const auto constStruct = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, constStruct, rewriter, converter); + return lowerCirAttrAsValue(parentOp, constStruct, rewriter, converter, + dataLayout); if (const auto constArr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, constArr, rewriter, converter); + return lowerCirAttrAsValue(parentOp, constArr, rewriter, converter, + dataLayout); if (const auto constVec = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, constVec, rewriter, converter); if (const auto boolAttr = mlir::dyn_cast(attr)) @@ -657,11 +739,14 @@ mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, if (const auto poisonAttr = mlir::dyn_cast(attr)) return lowerCirAttrAsValue(parentOp, poisonAttr, rewriter, converter); if (const auto globalAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, globalAttr, rewriter, converter); + return lowerCirAttrAsValue(parentOp, globalAttr, rewriter, converter, + dataLayout); if (const auto vtableAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, vtableAttr, rewriter, converter); + return lowerCirAttrAsValue(parentOp, vtableAttr, rewriter, converter, + dataLayout); if (const auto typeinfoAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, typeinfoAttr, rewriter, converter); + return lowerCirAttrAsValue(parentOp, typeinfoAttr, rewriter, converter, + dataLayout); llvm_unreachable("unhandled attribute type"); } @@ -816,7 +901,8 @@ mlir::LogicalResult CIRToLLVMPtrStrideOpLowering::matchAndRewrite( mlir::ConversionPatternRewriter &rewriter) const { auto *tc = getTypeConverter(); const auto resultTy = tc->convertType(ptrStrideOp.getType()); - auto elementTy = tc->convertType(ptrStrideOp.getElementTy()); + auto elementTy = + convertTypeForMemory(*tc, dataLayout, ptrStrideOp.getElementTy()); auto *ctx = elementTy.getContext(); // void and function types doesn't really have a layout to use in GEPs, @@ -1012,8 +1098,7 @@ mlir::LogicalResult CIRToLLVMBrCondOpLowering::matchAndRewrite( } if (!i1Condition) - i1Condition = rewriter.create( - brOp.getLoc(), rewriter.getI1Type(), adaptor.getCond()); + i1Condition = adaptor.getCond(); rewriter.replaceOpWithNewOp( brOp, i1Condition, brOp.getDestTrue(), adaptor.getDestOperandsTrue(), @@ -1040,7 +1125,8 @@ mlir::LogicalResult CIRToLLVMCastOpLowering::matchAndRewrite( const auto ptrTy = mlir::cast(castOp.getType()); auto sourceValue = adaptor.getOperands().front(); auto targetType = convertTy(ptrTy); - auto elementTy = convertTy(ptrTy.getPointee()); + auto elementTy = convertTypeForMemory(*getTypeConverter(), dataLayout, + ptrTy.getPointee()); auto offset = llvm::SmallVector{0}; rewriter.replaceOpWithNewOp( castOp, targetType, elementTy, sourceValue, offset); @@ -1111,9 +1197,7 @@ mlir::LogicalResult CIRToLLVMCastOpLowering::matchAndRewrite( return mlir::success(); } case cir::CastKind::float_to_bool: { - auto dstTy = mlir::cast(castOp.getType()); auto llvmSrcVal = adaptor.getOperands().front(); - auto llvmDstTy = getTypeConverter()->convertType(dstTy); auto kind = mlir::LLVM::FCmpPredicate::une; // Check if float is not equal to zero. @@ -1122,10 +1206,9 @@ mlir::LogicalResult CIRToLLVMCastOpLowering::matchAndRewrite( mlir::FloatAttr::get(llvmSrcVal.getType(), 0.0)); // Extend comparison result to either bool (C++) or int (C). - mlir::Value cmpResult = rewriter.create( - castOp.getLoc(), kind, llvmSrcVal, zeroFloat); - rewriter.replaceOpWithNewOp(castOp, llvmDstTy, - cmpResult); + rewriter.replaceOpWithNewOp(castOp, kind, llvmSrcVal, + zeroFloat); + return mlir::success(); } case cir::CastKind::bool_to_int: { @@ -1434,7 +1517,8 @@ mlir::LogicalResult CIRToLLVMAllocaOpLowering::matchAndRewrite( op.getLoc(), typeConverter->convertType(rewriter.getIndexType()), rewriter.getIntegerAttr(rewriter.getIndexType(), 1)); - auto elementTy = getTypeConverter()->convertType(op.getAllocaType()); + auto elementTy = + convertTypeForMemory(*getTypeConverter(), dataLayout, op.getAllocaType()); auto resultTy = getTypeConverter()->convertType(op.getResult().getType()); // Verification between the CIR alloca AS and the one from data layout. { @@ -1489,7 +1573,8 @@ getLLVMMemOrder(std::optional &memorder) { mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite( cir::LoadOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { - const auto llvmTy = getTypeConverter()->convertType(op.getResult().getType()); + const auto llvmTy = convertTypeForMemory(*getTypeConverter(), dataLayout, + op.getResult().getType()); auto memorder = op.getMemOrder(); auto ordering = getLLVMMemOrder(memorder); auto alignOpt = op.getAlignment(); @@ -1512,10 +1597,15 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite( } // TODO: nontemporal, syncscope. - rewriter.replaceOpWithNewOp( - op, llvmTy, adaptor.getAddr(), /* alignment */ alignment, + auto newLoad = rewriter.create( + op->getLoc(), llvmTy, adaptor.getAddr(), /* alignment */ alignment, op.getIsVolatile(), /* nontemporal */ false, /* invariant */ false, /* invariantGroup */ invariant, ordering); + + // Convert adapted result to its original type if needed. + mlir::Value result = + emitFromMemory(rewriter, dataLayout, op, newLoad.getResult()); + rewriter.replaceOp(op, result); return mlir::LogicalResult::success(); } @@ -1546,9 +1636,12 @@ mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite( invariant = addrAllocaOp && addrAllocaOp.getConstant(); } + // Convert adapted value to its memory type if needed. + mlir::Value value = emitToMemory(rewriter, dataLayout, + op.getValue().getType(), adaptor.getValue()); // TODO: nontemporal, syncscope. rewriter.replaceOpWithNewOp( - op, adaptor.getValue(), adaptor.getAddr(), alignment, op.getIsVolatile(), + op, value, adaptor.getAddr(), alignment, op.getIsVolatile(), /* nontemporal */ false, /* invariantGroup */ invariant, ordering); return mlir::LogicalResult::success(); } @@ -1569,9 +1662,9 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( // Regardless of the type, we should lower the constant of poison value // into PoisonOp. - if (mlir::isa(attr)) { + if (auto poisonAttr = mlir::dyn_cast(attr)) { rewriter.replaceOp( - op, lowerCirAttrAsValue(op, attr, rewriter, getTypeConverter())); + op, lowerCirAttrAsValue(op, poisonAttr, rewriter, getTypeConverter())); return mlir::success(); } @@ -1629,7 +1722,8 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( } // Lower GlobalViewAttr to llvm.mlir.addressof if (auto gv = mlir::dyn_cast(op.getValue())) { - auto newOp = lowerCirAttrAsValue(op, gv, rewriter, getTypeConverter()); + auto newOp = + lowerCirAttrAsValue(op, gv, rewriter, getTypeConverter(), dataLayout); rewriter.replaceOp(op, newOp); return mlir::success(); } @@ -1655,16 +1749,16 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( std::optional denseAttr; if (constArr && hasTrailingZeros(constArr)) { - auto newOp = - lowerCirAttrAsValue(op, constArr, rewriter, getTypeConverter()); + auto newOp = lowerCirAttrAsValue(op, constArr, rewriter, + getTypeConverter(), dataLayout); rewriter.replaceOp(op, newOp); return mlir::success(); } else if (constArr && (denseAttr = lowerConstArrayAttr(constArr, typeConverter))) { attr = denseAttr.value(); } else { - auto initVal = - lowerCirAttrAsValue(op, op.getValue(), rewriter, typeConverter); + auto initVal = lowerCirAttrAsValue(op, op.getValue(), rewriter, + typeConverter, dataLayout); rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); return mlir::success(); @@ -1675,14 +1769,16 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( // initializer would be a global constant that is memcopied. Here we just // define a local constant with llvm.undef that will be stored into the // stack. - auto initVal = lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter); + auto initVal = lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter, + dataLayout); rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); return mlir::success(); } else if (auto strTy = mlir::dyn_cast(op.getType())) { auto attr = op.getValue(); if (mlir::isa(attr)) { - auto initVal = lowerCirAttrAsValue(op, attr, rewriter, typeConverter); + auto initVal = + lowerCirAttrAsValue(op, attr, rewriter, typeConverter, dataLayout); rewriter.replaceAllUsesWith(op, initVal); rewriter.eraseOp(op); return mlir::success(); @@ -1692,7 +1788,7 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( << op.getType(); } else if (const auto vecTy = mlir::dyn_cast(op.getType())) { rewriter.replaceOp(op, lowerCirAttrAsValue(op, op.getValue(), rewriter, - getTypeConverter())); + getTypeConverter(), dataLayout)); return mlir::success(); } else return op.emitError() << "unsupported constant type " << op.getType(); @@ -2160,7 +2256,8 @@ mlir::LogicalResult CIRToLLVMSwitchFlatOpLowering::matchAndRewrite( /// insertion point to the end of the initializer block. void CIRToLLVMGlobalOpLowering::setupRegionInitializedLLVMGlobalOp( cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const { - const auto llvmType = getTypeConverter()->convertType(op.getSymType()); + const auto llvmType = + convertTypeForMemory(*getTypeConverter(), dataLayout, op.getSymType()); SmallVector attributes; auto newGlobalOp = rewriter.replaceOpWithNewOp( op, llvmType, op.getConstant(), convertLinkage(op.getLinkage()), @@ -2178,7 +2275,10 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( mlir::ConversionPatternRewriter &rewriter) const { // Fetch required values to create LLVM op. - const auto llvmType = getTypeConverter()->convertType(op.getSymType()); + const auto CIRSymType = op.getSymType(); + + const auto llvmType = + convertTypeForMemory(*getTypeConverter(), dataLayout, CIRSymType); const auto isConst = op.getConstant(); const auto isDsoLocal = op.getDsolocal(); const auto linkage = convertLinkage(op.getLinkage()); @@ -2222,8 +2322,8 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( - op->getLoc(), - lowerCirAttrAsValue(op, constArr, rewriter, typeConverter)); + op->getLoc(), lowerCirAttrAsValue(op, constArr, rewriter, + typeConverter, dataLayout)); return mlir::success(); } } else { @@ -2247,7 +2347,8 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( // should be updated. For now, we use a custom op to initialize globals // to the appropriate value. setupRegionInitializedLLVMGlobalOp(op, rewriter); - auto value = lowerCirAttrAsValue(op, init.value(), rewriter, typeConverter); + auto value = lowerCirAttrAsValue(op, init.value(), rewriter, typeConverter, + dataLayout); rewriter.create(loc, value); return mlir::success(); } else if (auto dataMemberAttr = @@ -2265,27 +2366,28 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( - op->getLoc(), - lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter)); + op->getLoc(), lowerCirAttrAsValue(op, structAttr, rewriter, + typeConverter, dataLayout)); return mlir::success(); } else if (auto attr = mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( - loc, lowerCirAttrAsValue(op, attr, rewriter, typeConverter)); + loc, + lowerCirAttrAsValue(op, attr, rewriter, typeConverter, dataLayout)); return mlir::success(); } else if (const auto vtableAttr = mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( - op->getLoc(), - lowerCirAttrAsValue(op, vtableAttr, rewriter, typeConverter)); + op->getLoc(), lowerCirAttrAsValue(op, vtableAttr, rewriter, + typeConverter, dataLayout)); return mlir::success(); } else if (const auto typeinfoAttr = mlir::dyn_cast(init.value())) { setupRegionInitializedLLVMGlobalOp(op, rewriter); rewriter.create( - op->getLoc(), - lowerCirAttrAsValue(op, typeinfoAttr, rewriter, typeConverter)); + op->getLoc(), lowerCirAttrAsValue(op, typeinfoAttr, rewriter, + typeConverter, dataLayout)); return mlir::success(); } else { op.emitError() << "unsupported initializer '" << init.value() << "'"; @@ -2748,7 +2850,6 @@ mlir::LogicalResult CIRToLLVMCmpOpLowering::matchAndRewrite( cir::CmpOp cmpOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { auto type = cmpOp.getLhs().getType(); - mlir::Value llResult; // Lower to LLVM comparison op. // if (auto intTy = mlir::dyn_cast(type)) { @@ -2757,27 +2858,21 @@ mlir::LogicalResult CIRToLLVMCmpOpLowering::matchAndRewrite( ? mlir::cast(type).isSigned() : mlir::cast(type).isSigned(); auto kind = convertCmpKindToICmpPredicate(cmpOp.getKind(), isSigned); - llResult = rewriter.create( - cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + rewriter.replaceOpWithNewOp( + cmpOp, kind, adaptor.getLhs(), adaptor.getRhs()); } else if (auto ptrTy = mlir::dyn_cast(type)) { auto kind = convertCmpKindToICmpPredicate(cmpOp.getKind(), /* isSigned=*/false); - llResult = rewriter.create( - cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + rewriter.replaceOpWithNewOp( + cmpOp, kind, adaptor.getLhs(), adaptor.getRhs()); } else if (mlir::isa(type)) { auto kind = convertCmpKindToFCmpPredicate(cmpOp.getKind()); - llResult = rewriter.create( - cmpOp.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + rewriter.replaceOpWithNewOp( + cmpOp, kind, adaptor.getLhs(), adaptor.getRhs()); } else { return cmpOp.emitError() << "unsupported type for CmpOp: " << type; } - // LLVM comparison ops return i1, but cir::CmpOp returns the same type as - // the LHS value. Since this return value can be used later, we need to - // restore the type with the extension below. - auto llResultTy = getTypeConverter()->convertType(cmpOp.getType()); - rewriter.replaceOpWithNewOp(cmpOp, llResultTy, llResult); - return mlir::success(); } @@ -2827,8 +2922,7 @@ mlir::LogicalResult CIRToLLVMLLVMIntrinsicCallOpLowering::matchAndRewrite( mlir::LogicalResult CIRToLLVMAssumeOpLowering::matchAndRewrite( cir::AssumeOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { - auto cond = rewriter.create( - op.getLoc(), rewriter.getI1Type(), adaptor.getPredicate()); + auto cond = adaptor.getPredicate(); rewriter.replaceOpWithNewOp(op, cond); return mlir::success(); } @@ -3063,9 +3157,7 @@ mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite( auto cmp = rewriter.create( op.getLoc(), cmpxchg.getResult(), 1); - auto extCmp = rewriter.create(op.getLoc(), - rewriter.getI8Type(), cmp); - rewriter.replaceOp(op, {old, extCmp}); + rewriter.replaceOp(op, {old, cmp}); return mlir::success(); } @@ -3282,9 +3374,7 @@ mlir::LogicalResult CIRToLLVMSelectOpLowering::matchAndRewrite( } } - auto llvmCondition = rewriter.create( - op.getLoc(), mlir::IntegerType::get(op->getContext(), 1), - adaptor.getCondition()); + auto llvmCondition = adaptor.getCondition(); rewriter.replaceOpWithNewOp( op, llvmCondition, adaptor.getTrueValue(), adaptor.getFalseValue()); @@ -3497,8 +3587,8 @@ mlir::LogicalResult CIRToLLVMInlineAsmOpLowering::matchAndRewrite( std::vector attrs; auto typ = cast(cirOperands[i].getType()); - auto typAttr = - mlir::TypeAttr::get(getTypeConverter()->convertType(typ.getPointee())); + auto typAttr = mlir::TypeAttr::get(convertTypeForMemory( + *getTypeConverter(), dataLayout, typ.getPointee())); attrs.push_back(rewriter.getNamedAttr(llvmAttrName, typAttr)); auto newDict = rewriter.getDictionaryAttr(attrs); @@ -3645,13 +3735,7 @@ mlir::LogicalResult CIRToLLVMGetBitfieldOpLowering::matchAndRewrite( mlir::LogicalResult CIRToLLVMIsConstantOpLowering::matchAndRewrite( cir::IsConstantOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { - // FIXME(cir): llvm.intr.is.constant returns i1 value but the LLVM Lowering - // expects that cir.bool type will be lowered as i8 type. - // So we have to insert zext here. - auto isConstantOP = - rewriter.create(op.getLoc(), adaptor.getVal()); - rewriter.replaceOpWithNewOp(op, rewriter.getI8Type(), - isConstantOP); + rewriter.replaceOpWithNewOp(op, adaptor.getVal()); return mlir::success(); } @@ -3871,17 +3955,7 @@ mlir::LogicalResult CIRToLLVMIsFPClassOpLowering::matchAndRewrite( auto flags = adaptor.getFlags(); auto retTy = rewriter.getI1Type(); - auto loc = op->getLoc(); - - auto intrinsic = - rewriter.create(loc, retTy, src, flags); - // FIMXE: CIR now will convert cir::BoolType to i8 type unconditionally. - // Remove this conversion after we fix - // https://github.com/llvm/clangir/issues/480 - auto converted = rewriter.create( - loc, rewriter.getI8Type(), intrinsic->getResult(0)); - - rewriter.replaceOp(op, converted); + rewriter.replaceOpWithNewOp(op, retTy, src, flags); return mlir::success(); } @@ -3960,17 +4034,28 @@ void populateCIRToLLVMConversionPatterns( patterns.add(converter, dataLayout, stringGlobalsMap, argStringGlobalsMap, argsVarMap, patterns.getContext()); + patterns.add< + // clang-format off + CIRToLLVMLoadOpLowering, + CIRToLLVMStoreOpLowering, + CIRToLLVMGlobalOpLowering, + CIRToLLVMConstantOpLowering + // clang-format on + >(converter, patterns.getContext(), lowerModule, dataLayout); patterns.add< // clang-format off CIRToLLVMBaseDataMemberOpLowering, - CIRToLLVMConstantOpLowering, CIRToLLVMDerivedDataMemberOpLowering, - CIRToLLVMGetRuntimeMemberOpLowering, - CIRToLLVMGlobalOpLowering, - CIRToLLVMLoadOpLowering, - CIRToLLVMStoreOpLowering + CIRToLLVMGetRuntimeMemberOpLowering // clang-format on >(converter, patterns.getContext(), lowerModule); + patterns.add< + // clang-format off + CIRToLLVMPtrStrideOpLowering, + CIRToLLVMCastOpLowering, + CIRToLLVMInlineAsmOpLowering + // clang-format on + >(converter, patterns.getContext(), dataLayout); patterns.add< // clang-format off CIRToLLVMAbsOpLowering, @@ -3994,7 +4079,6 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMBrOpLowering, CIRToLLVMByteswapOpLowering, CIRToLLVMCallOpLowering, - CIRToLLVMCastOpLowering, CIRToLLVMCatchParamOpLowering, CIRToLLVMClearCacheOpLowering, CIRToLLVMCmpOpLowering, @@ -4015,7 +4099,6 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMGetBitfieldOpLowering, CIRToLLVMGetGlobalOpLowering, CIRToLLVMGetMemberOpLowering, - CIRToLLVMInlineAsmOpLowering, CIRToLLVMIsConstantOpLowering, CIRToLLVMIsFPClassOpLowering, CIRToLLVMLLVMIntrinsicCallOpLowering, @@ -4029,7 +4112,6 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMPrefetchOpLowering, CIRToLLVMPtrDiffOpLowering, CIRToLLVMPtrMaskOpLowering, - CIRToLLVMPtrStrideOpLowering, CIRToLLVMResumeOpLowering, CIRToLLVMReturnAddrOpLowering, CIRToLLVMRotateOpLowering, @@ -4107,7 +4189,7 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, return converter.convertType(abiType); }); converter.addConversion([&](cir::ArrayType type) -> mlir::Type { - auto ty = converter.convertType(type.getEltType()); + auto ty = convertTypeForMemory(converter, dataLayout, type.getEltType()); return mlir::LLVM::LLVMArrayType::get(ty, type.getSize()); }); converter.addConversion([&](cir::VectorType type) -> mlir::Type { @@ -4115,7 +4197,7 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, return mlir::LLVM::getFixedVectorType(ty, type.getSize()); }); converter.addConversion([&](cir::BoolType type) -> mlir::Type { - return mlir::IntegerType::get(type.getContext(), 8, + return mlir::IntegerType::get(type.getContext(), 1, mlir::IntegerType::Signless); }); converter.addConversion([&](cir::IntType type) -> mlir::Type { @@ -4168,13 +4250,14 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, // TODO(cir): This should be properly validated. case cir::StructType::Struct: for (auto ty : type.getMembers()) - llvmMembers.push_back(converter.convertType(ty)); + llvmMembers.push_back(convertTypeForMemory(converter, dataLayout, ty)); break; // Unions are lowered as only the largest member. case cir::StructType::Union: { auto largestMember = type.getLargestMember(dataLayout); if (largestMember) - llvmMembers.push_back(converter.convertType(largestMember)); + llvmMembers.push_back( + convertTypeForMemory(converter, dataLayout, largestMember)); break; } } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 48baae2ae799..12ded1f39c80 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -14,13 +14,18 @@ #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/IR/MLIRContext.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Transforms/DialectConversion.h" namespace cir { namespace direct { + +/// Convert a CIR attribute to an LLVM attribute. May use the datalayout for +/// lowering attributes to-be-stored in memory. mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter); + const mlir::TypeConverter *converter, + mlir::DataLayout const &dataLayout); mlir::LLVM::Linkage convertLinkage(cir::GlobalLinkageKind linkage); @@ -137,7 +142,13 @@ class CIRToLLVMMemSetInlineOpLowering class CIRToLLVMPtrStrideOpLowering : public mlir::OpConversionPattern { + mlir::DataLayout const &dataLayout; + public: + CIRToLLVMPtrStrideOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + mlir::DataLayout const &dataLayout) + : OpConversionPattern(typeConverter, context), dataLayout(dataLayout) {} using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult @@ -216,9 +227,15 @@ class CIRToLLVMBrCondOpLowering }; class CIRToLLVMCastOpLowering : public mlir::OpConversionPattern { + mlir::DataLayout const &dataLayout; + mlir::Type convertTy(mlir::Type ty) const; public: + CIRToLLVMCastOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + mlir::DataLayout const &dataLayout) + : OpConversionPattern(typeConverter, context), dataLayout(dataLayout) {} using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult @@ -302,12 +319,15 @@ class CIRToLLVMAllocaOpLowering class CIRToLLVMLoadOpLowering : public mlir::OpConversionPattern { cir::LowerModule *lowerMod; + mlir::DataLayout const &dataLayout; public: CIRToLLVMLoadOpLowering(const mlir::TypeConverter &typeConverter, mlir::MLIRContext *context, - cir::LowerModule *lowerModule) - : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) {} + cir::LowerModule *lowerModule, + mlir::DataLayout const &dataLayout) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule), + dataLayout(dataLayout) {} mlir::LogicalResult matchAndRewrite(cir::LoadOp op, OpAdaptor, @@ -317,12 +337,15 @@ class CIRToLLVMLoadOpLowering : public mlir::OpConversionPattern { class CIRToLLVMStoreOpLowering : public mlir::OpConversionPattern { cir::LowerModule *lowerMod; + mlir::DataLayout const &dataLayout; public: CIRToLLVMStoreOpLowering(const mlir::TypeConverter &typeConverter, mlir::MLIRContext *context, - cir::LowerModule *lowerModule) - : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) {} + cir::LowerModule *lowerModule, + mlir::DataLayout const &dataLayout) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule), + dataLayout(dataLayout) {} mlir::LogicalResult matchAndRewrite(cir::StoreOp op, OpAdaptor, @@ -332,12 +355,15 @@ class CIRToLLVMStoreOpLowering class CIRToLLVMConstantOpLowering : public mlir::OpConversionPattern { cir::LowerModule *lowerMod; + mlir::DataLayout const &dataLayout; public: CIRToLLVMConstantOpLowering(const mlir::TypeConverter &typeConverter, mlir::MLIRContext *context, - cir::LowerModule *lowerModule) - : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) { + cir::LowerModule *lowerModule, + mlir::DataLayout const &dataLayout) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule), + dataLayout(dataLayout) { setHasBoundedRewriteRecursion(); } @@ -538,12 +564,15 @@ class CIRToLLVMSwitchFlatOpLowering class CIRToLLVMGlobalOpLowering : public mlir::OpConversionPattern { cir::LowerModule *lowerMod; + mlir::DataLayout const &dataLayout; public: CIRToLLVMGlobalOpLowering(const mlir::TypeConverter &typeConverter, mlir::MLIRContext *context, - cir::LowerModule *lowerModule) - : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) { + cir::LowerModule *lowerModule, + mlir::DataLayout const &dataLayout) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule), + dataLayout(dataLayout) { setHasBoundedRewriteRecursion(); } @@ -904,7 +933,14 @@ class CIRToLLVMTrapOpLowering : public mlir::OpConversionPattern { class CIRToLLVMInlineAsmOpLowering : public mlir::OpConversionPattern { + mlir::DataLayout const &dataLayout; + public: + CIRToLLVMInlineAsmOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + mlir::DataLayout const &dataLayout) + : OpConversionPattern(typeConverter, context), dataLayout(dataLayout) {} + using mlir::OpConversionPattern::OpConversionPattern; mlir::LogicalResult diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp index 16252e1058cd..d3cccda6cdd7 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp @@ -337,11 +337,8 @@ class CIRConditionOpLowering auto *parentOp = op->getParentOp(); return llvm::TypeSwitch(parentOp) .Case([&](auto) { - auto condition = adaptor.getCondition(); - auto i1Condition = rewriter.create( - op.getLoc(), rewriter.getI1Type(), condition); rewriter.replaceOpWithNewOp( - op, i1Condition, parentOp->getOperands()); + op, adaptor.getCondition(), parentOp->getOperands()); return mlir::success(); }) .Default([](auto) { return mlir::failure(); }); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 54c2c798a5a1..63253ecc454c 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -35,6 +35,7 @@ #include "mlir/IR/Operation.h" #include "mlir/IR/Region.h" #include "mlir/IR/TypeRange.h" +#include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" @@ -105,6 +106,54 @@ class CIRCallOpLowering : public mlir::OpConversionPattern { } }; +/// Given a type convertor and a data layout, convert the given type to a type +/// that is suitable for memory operations. For example, this can be used to +/// lower cir.bool accesses to i8. +static mlir::Type convertTypeForMemory(const mlir::TypeConverter &converter, + mlir::Type type) { + // TODO(cir): Handle other types similarly to clang's codegen + // convertTypeForMemory + if (isa(type)) { + // TODO: Use datalayout to get the size of bool + return mlir::IntegerType::get(type.getContext(), 8); + } + + return converter.convertType(type); +} + +/// Emits the value from memory as expected by its users. Should be called when +/// the memory represetnation of a CIR type is not equal to its scalar +/// representation. +static mlir::Value emitFromMemory(mlir::ConversionPatternRewriter &rewriter, + cir::LoadOp op, mlir::Value value) { + + // TODO(cir): Handle other types similarly to clang's codegen EmitFromMemory + if (isa(op.getResult().getType())) { + // Create trunc of value from i8 to i1 + // TODO: Use datalayout to get the size of bool + assert(value.getType().isInteger(8)); + return createIntCast(rewriter, value, rewriter.getI1Type()); + } + + return value; +} + +/// Emits a value to memory with the expected scalar type. Should be called when +/// the memory represetnation of a CIR type is not equal to its scalar +/// representation. +static mlir::Value emitToMemory(mlir::ConversionPatternRewriter &rewriter, + cir::StoreOp op, mlir::Value value) { + + // TODO(cir): Handle other types similarly to clang's codegen EmitToMemory + if (isa(op.getValue().getType())) { + // Create zext of value from i1 to i8 + // TODO: Use datalayout to get the size of bool + return createIntCast(rewriter, value, rewriter.getI8Type()); + } + + return value; +} + class CIRAllocaOpLowering : public mlir::OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -112,8 +161,9 @@ class CIRAllocaOpLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(cir::AllocaOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto type = adaptor.getAllocaType(); - auto mlirType = getTypeConverter()->convertType(type); + + mlir::Type mlirType = + convertTypeForMemory(*getTypeConverter(), adaptor.getAllocaType()); // FIXME: Some types can not be converted yet (e.g. struct) if (!mlirType) @@ -174,12 +224,20 @@ class CIRLoadOpLowering : public mlir::OpConversionPattern { mlir::Value base; SmallVector indices; SmallVector eraseList; + mlir::memref::LoadOp newLoad; if (findBaseAndIndices(adaptor.getAddr(), base, indices, eraseList, rewriter)) { - rewriter.replaceOpWithNewOp(op, base, indices); + newLoad = + rewriter.create(op.getLoc(), base, indices); + // rewriter.replaceOpWithNewOp(op, base, indices); eraseIfSafe(op.getAddr(), base, eraseList, rewriter); } else - rewriter.replaceOpWithNewOp(op, adaptor.getAddr()); + newLoad = + rewriter.create(op.getLoc(), adaptor.getAddr()); + + // Convert adapted result to its original type if needed. + mlir::Value result = emitFromMemory(rewriter, op, newLoad.getResult()); + rewriter.replaceOp(op, result); return mlir::LogicalResult::success(); } }; @@ -194,13 +252,16 @@ class CIRStoreOpLowering : public mlir::OpConversionPattern { mlir::Value base; SmallVector indices; SmallVector eraseList; + + // Convert adapted value to its memory type if needed. + mlir::Value value = emitToMemory(rewriter, op, adaptor.getValue()); if (findBaseAndIndices(adaptor.getAddr(), base, indices, eraseList, rewriter)) { - rewriter.replaceOpWithNewOp(op, adaptor.getValue(), - base, indices); + rewriter.replaceOpWithNewOp(op, value, base, + indices); eraseIfSafe(op.getAddr(), base, eraseList, rewriter); } else - rewriter.replaceOpWithNewOp(op, adaptor.getValue(), + rewriter.replaceOpWithNewOp(op, value, adaptor.getAddr()); return mlir::LogicalResult::success(); } @@ -744,29 +805,20 @@ class CIRCmpOpLowering : public mlir::OpConversionPattern { mlir::ConversionPatternRewriter &rewriter) const override { auto type = op.getLhs().getType(); - mlir::Value mlirResult; - if (auto ty = mlir::dyn_cast(type)) { auto kind = convertCmpKindToCmpIPredicate(op.getKind(), ty.isSigned()); - mlirResult = rewriter.create( - op.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + rewriter.replaceOpWithNewOp( + op, kind, adaptor.getLhs(), adaptor.getRhs()); } else if (auto ty = mlir::dyn_cast(type)) { auto kind = convertCmpKindToCmpFPredicate(op.getKind()); - mlirResult = rewriter.create( - op.getLoc(), kind, adaptor.getLhs(), adaptor.getRhs()); + rewriter.replaceOpWithNewOp( + op, kind, adaptor.getLhs(), adaptor.getRhs()); } else if (auto ty = mlir::dyn_cast(type)) { llvm_unreachable("pointer comparison not supported yet"); } else { return op.emitError() << "unsupported type for CmpOp: " << type; } - // MLIR comparison ops return i1, but cir::CmpOp returns the same type as - // the LHS value. Since this return value can be used later, we need to - // restore the type with the extension below. - auto mlirResultTy = getTypeConverter()->convertType(op.getType()); - rewriter.replaceOpWithNewOp(op, mlirResultTy, - mlirResult); - return mlir::LogicalResult::success(); } }; @@ -826,12 +878,8 @@ struct CIRBrCondOpLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(cir::BrCondOp brOp, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - - auto condition = adaptor.getCond(); - auto i1Condition = rewriter.create( - brOp.getLoc(), rewriter.getI1Type(), condition); rewriter.replaceOpWithNewOp( - brOp, i1Condition.getResult(), brOp.getDestTrue(), + brOp, adaptor.getCond(), brOp.getDestTrue(), adaptor.getDestOperandsTrue(), brOp.getDestFalse(), adaptor.getDestOperandsFalse()); @@ -847,16 +895,13 @@ class CIRTernaryOpLowering : public mlir::OpConversionPattern { matchAndRewrite(cir::TernaryOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { rewriter.setInsertionPoint(op); - auto condition = adaptor.getCond(); - auto i1Condition = rewriter.create( - op.getLoc(), rewriter.getI1Type(), condition); SmallVector resultTypes; if (mlir::failed(getTypeConverter()->convertTypes(op->getResultTypes(), resultTypes))) return mlir::failure(); auto ifOp = rewriter.create(op.getLoc(), resultTypes, - i1Condition.getResult(), true); + adaptor.getCond(), true); auto *thenBlock = &ifOp.getThenRegion().front(); auto *elseBlock = &ifOp.getElseRegion().front(); rewriter.inlineBlockBefore(&op.getTrueRegion().front(), thenBlock, @@ -893,11 +938,8 @@ class CIRIfOpLowering : public mlir::OpConversionPattern { mlir::LogicalResult matchAndRewrite(cir::IfOp ifop, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto condition = adaptor.getCondition(); - auto i1Condition = rewriter.create( - ifop->getLoc(), rewriter.getI1Type(), condition); auto newIfOp = rewriter.create( - ifop->getLoc(), ifop->getResultTypes(), i1Condition); + ifop->getLoc(), ifop->getResultTypes(), adaptor.getCondition()); auto *thenBlock = rewriter.createBlock(&newIfOp.getThenRegion()); rewriter.inlineBlockBefore(&ifop.getThenRegion().front(), thenBlock, thenBlock->end()); @@ -924,7 +966,7 @@ class CIRGlobalOpLowering : public mlir::OpConversionPattern { mlir::OpBuilder b(moduleOp.getContext()); const auto CIRSymType = op.getSymType(); - auto convertedType = getTypeConverter()->convertType(CIRSymType); + auto convertedType = convertTypeForMemory(*getTypeConverter(), CIRSymType); if (!convertedType) return mlir::failure(); auto memrefType = dyn_cast(convertedType); @@ -1170,19 +1212,14 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { return mlir::success(); } case CIR::float_to_bool: { - auto dstTy = mlir::cast(op.getType()); - auto newDstType = convertTy(dstTy); auto kind = mlir::arith::CmpFPredicate::UNE; // Check if float is not equal to zero. auto zeroFloat = rewriter.create( op.getLoc(), src.getType(), mlir::FloatAttr::get(src.getType(), 0.0)); - // Extend comparison result to either bool (C++) or int (C). - mlir::Value cmpResult = rewriter.create( - op.getLoc(), kind, src, zeroFloat); - rewriter.replaceOpWithNewOp(op, newDstType, - cmpResult); + rewriter.replaceOpWithNewOp(op, kind, src, + zeroFloat); return mlir::success(); } case CIR::bool_to_int: { @@ -1330,7 +1367,7 @@ void populateCIRToMLIRConversionPatterns(mlir::RewritePatternSet &patterns, static mlir::TypeConverter prepareTypeConverter() { mlir::TypeConverter converter; converter.addConversion([&](cir::PointerType type) -> mlir::Type { - auto ty = converter.convertType(type.getPointee()); + auto ty = convertTypeForMemory(converter, type.getPointee()); // FIXME: The pointee type might not be converted (e.g. struct) if (!ty) return nullptr; @@ -1350,7 +1387,7 @@ static mlir::TypeConverter prepareTypeConverter() { mlir::IntegerType::SignednessSemantics::Signless); }); converter.addConversion([&](cir::BoolType type) -> mlir::Type { - return mlir::IntegerType::get(type.getContext(), 8); + return mlir::IntegerType::get(type.getContext(), 1); }); converter.addConversion([&](cir::SingleType type) -> mlir::Type { return mlir::Float32Type::get(type.getContext()); diff --git a/clang/test/CIR/CodeGen/atomic-xchg-field.c b/clang/test/CIR/CodeGen/atomic-xchg-field.c index c01abf7bae6e..fd9267632344 100644 --- a/clang/test/CIR/CodeGen/atomic-xchg-field.c +++ b/clang/test/CIR/CodeGen/atomic-xchg-field.c @@ -58,16 +58,14 @@ void structAtomicExchange(unsigned referenceCount, wPtr item) { // LLVM: %[[RES:.*]] = cmpxchg weak ptr %9, i32 %[[EXP]], i32 %[[DES]] seq_cst seq_cst // LLVM: %[[OLD:.*]] = extractvalue { i32, i1 } %[[RES]], 0 // LLVM: %[[CMP:.*]] = extractvalue { i32, i1 } %[[RES]], 1 -// LLVM: %[[Z:.*]] = zext i1 %[[CMP]] to i8 -// LLVM: %[[X:.*]] = xor i8 %[[Z]], 1 -// LLVM: %[[FAIL:.*]] = trunc i8 %[[X]] to i1 - -// LLVM: br i1 %[[FAIL:.*]], label %[[STORE_OLD:.*]], label %[[CONTINUE:.*]] +// LLVM: %[[FAIL:.*]] = xor i1 %[[CMP]], true +// LLVM: br i1 %[[FAIL]], label %[[STORE_OLD:.*]], label %[[CONTINUE:.*]] // LLVM: [[STORE_OLD]]: // LLVM: store i32 %[[OLD]], ptr // LLVM: br label %[[CONTINUE]] // LLVM: [[CONTINUE]]: +// LLVM: %[[Z:.*]] = zext i1 %[[CMP]] to i8 // LLVM: store i8 %[[Z]], ptr {{.*}}, align 1 // LLVM: ret void diff --git a/clang/test/CIR/CodeGen/bf16-ops.c b/clang/test/CIR/CodeGen/bf16-ops.c index 406446b778eb..d0c051a8d5e5 100644 --- a/clang/test/CIR/CodeGen/bf16-ops.c +++ b/clang/test/CIR/CodeGen/bf16-ops.c @@ -41,14 +41,12 @@ void foo(void) { // NATIVE-NEXT: %{{.+}} = cir.cast(integral, %[[#C]] : !s32i), !u32i // NONATIVE-LLVM: %[[#A:]] = fcmp une bfloat %{{.+}}, 0xR0000 - // NONATIVE-LLVM-NEXT: %[[#B:]] = zext i1 %[[#A]] to i8 - // NONATIVE-LLVM-NEXT: %[[#C:]] = xor i8 %[[#B]], 1 - // NONATIVE-LLVM-NEXT: %{{.+}} = zext i8 %[[#C]] to i32 + // NONATIVE-LLVM-NEXT: %[[#C:]] = xor i1 %[[#A]], true + // NONATIVE-LLVM-NEXT: %{{.+}} = zext i1 %[[#C]] to i32 // NATIVE-LLVM: %[[#A:]] = fcmp une bfloat %{{.+}}, 0xR0000 - // NATIVE-LLVM-NEXT: %[[#B:]] = zext i1 %[[#A]] to i8 - // NATIVE-LLVM-NEXT: %[[#C:]] = xor i8 %[[#B]], 1 - // NATIVE-LLVM-NEXT: %{{.+}} = zext i8 %[[#C]] to i32 + // NATIVE-LLVM-NEXT: %[[#C:]] = xor i1 %[[#A]], true + // NATIVE-LLVM-NEXT: %{{.+}} = zext i1 %[[#C]] to i32 h1 = -h1; // NONATIVE: %[[#A:]] = cir.cast(floating, %{{.+}} : !cir.bf16), !cir.float diff --git a/clang/test/CIR/CodeGen/builtin-assume.cpp b/clang/test/CIR/CodeGen/builtin-assume.cpp index 8d7448a2724d..9a099c0c94f9 100644 --- a/clang/test/CIR/CodeGen/builtin-assume.cpp +++ b/clang/test/CIR/CodeGen/builtin-assume.cpp @@ -16,7 +16,7 @@ int test_assume(int x) { // CIR: } // LLVM: @_Z11test_assumei -// LLVM: %[[#cond:]] = trunc i8 %{{.+}} to i1 +// LLVM: %[[#cond:]] = icmp sgt i32 %{{.+}}, 0 // LLVM-NEXT: call void @llvm.assume(i1 %[[#cond]]) int test_assume_attr(int x) { @@ -32,7 +32,7 @@ int test_assume_attr(int x) { // CIR: } // LLVM: @_Z16test_assume_attri -// LLVM: %[[#cond:]] = trunc i8 %{{.+}} to i1 +// LLVM: %[[#cond:]] = icmp sgt i32 %{{.+}}, 0 // LLVM-NEXT: call void @llvm.assume(i1 %[[#cond]]) int test_assume_aligned(int *ptr) { diff --git a/clang/test/CIR/CodeGen/builtin-constant-p.c b/clang/test/CIR/CodeGen/builtin-constant-p.c index a8eb13adacfd..810806ec2443 100644 --- a/clang/test/CIR/CodeGen/builtin-constant-p.c +++ b/clang/test/CIR/CodeGen/builtin-constant-p.c @@ -20,8 +20,7 @@ int foo() { // LLVM: [[TMP1:%.*]] = alloca i32, i64 1 // LLVM: [[TMP2:%.*]] = load i32, ptr @a // LLVM: [[TMP3:%.*]] = call i1 @llvm.is.constant.i32(i32 [[TMP2]]) -// LLVM: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8 -// LLVM: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32 +// LLVM: [[TMP5:%.*]] = zext i1 [[TMP3]] to i32 // LLVM: store i32 [[TMP5]], ptr [[TMP1]] // LLVM: [[TMP6:%.*]] = load i32, ptr [[TMP1]] // LLVM: ret i32 [[TMP6]] diff --git a/clang/test/CIR/CodeGen/complex-arithmetic.c b/clang/test/CIR/CodeGen/complex-arithmetic.c index eddedc2d3a27..3630edfc6033 100644 --- a/clang/test/CIR/CodeGen/complex-arithmetic.c +++ b/clang/test/CIR/CodeGen/complex-arithmetic.c @@ -303,12 +303,9 @@ void mul() { // LLVM-FULL-NEXT: %[[#F:]] = fadd double %[[#C]], %[[#D]] // LLVM-FULL-NEXT: %[[#G:]] = insertvalue { double, double } undef, double %[[#E]], 0 // LLVM-FULL-NEXT: %[[#RES:]] = insertvalue { double, double } %[[#G]], double %[[#F]], 1 -// LLVM-FULL-NEXT: %[[#H:]] = fcmp une double %[[#E]], %[[#E]] -// LLVM-FULL-NEXT: %[[#COND:]] = zext i1 %[[#H]] to i8 -// LLVM-FULL-NEXT: %[[#I:]] = fcmp une double %[[#F]], %[[#F]] -// LLVM-FULL-NEXT: %[[#COND2:]] = zext i1 %[[#I]] to i8 -// LLVM-FULL-NEXT: %[[#J:]] = and i8 %[[#COND]], %[[#COND2]] -// LLVM-FULL-NEXT: %[[#COND3:]] = trunc i8 %[[#J]] to i1 +// LLVM-FULL-NEXT: %[[#COND:]] = fcmp une double %[[#E]], %[[#E]] +// LLVM-FULL-NEXT: %[[#COND2:]] = fcmp une double %[[#F]], %[[#F]] +// LLVM-FULL-NEXT: %[[#COND3:]] = and i1 %[[#COND]], %[[#COND2]] // LLVM-FULL: {{.+}}: // LLVM-FULL-NEXT: %{{.+}} = call { double, double } @__muldc3(double %[[#LHSR]], double %[[#LHSI]], double %[[#RHSR]], double %[[#RHSI]]) // LLVM-FULL-NEXT: br label %{{.+}} diff --git a/clang/test/CIR/CodeGen/complex-cast.c b/clang/test/CIR/CodeGen/complex-cast.c index 98afabd65340..5cadcf711a60 100644 --- a/clang/test/CIR/CodeGen/complex-cast.c +++ b/clang/test/CIR/CodeGen/complex-cast.c @@ -179,10 +179,8 @@ void complex_to_bool() { // LLVM: %[[#REAL:]] = extractvalue { double, double } %{{.+}}, 0 // LLVM-NEXT: %[[#IMAG:]] = extractvalue { double, double } %{{.+}}, 1 // LLVM-NEXT: %[[#RB:]] = fcmp une double %[[#REAL]], 0.000000e+00 -// LLVM-NEXT: %[[#RB2:]] = zext i1 %[[#RB]] to i8 // LLVM-NEXT: %[[#IB:]] = fcmp une double %[[#IMAG]], 0.000000e+00 -// LLVM-NEXT: %[[#IB2:]] = zext i1 %[[#IB]] to i8 -// LLVM-NEXT: %{{.+}} = or i8 %[[#RB2]], %[[#IB2]] +// LLVM-NEXT: %{{.+}} = or i1 %[[#RB]], %[[#IB]] // CIR-BEFORE: %{{.+}} = cir.cast(int_complex_to_bool, %{{.+}} : !cir.complex), !cir.bool @@ -196,10 +194,8 @@ void complex_to_bool() { // LLVM: %[[#REAL:]] = extractvalue { i32, i32 } %{{.+}}, 0 // LLVM-NEXT: %[[#IMAG:]] = extractvalue { i32, i32 } %{{.+}}, 1 // LLVM-NEXT: %[[#RB:]] = icmp ne i32 %[[#REAL]], 0 -// LLVM-NEXT: %[[#RB2:]] = zext i1 %[[#RB]] to i8 // LLVM-NEXT: %[[#IB:]] = icmp ne i32 %[[#IMAG]], 0 -// LLVM-NEXT: %[[#IB2:]] = zext i1 %[[#IB]] to i8 -// LLVM-NEXT: %{{.+}} = or i8 %[[#RB2]], %[[#IB2]] +// LLVM-NEXT: %{{.+}} = or i1 %[[#RB]], %[[#IB]] // CHECK: } diff --git a/clang/test/CIR/CodeGen/globals.cpp b/clang/test/CIR/CodeGen/globals.cpp index ca8161b1cb8f..3b91bacfed22 100644 --- a/clang/test/CIR/CodeGen/globals.cpp +++ b/clang/test/CIR/CodeGen/globals.cpp @@ -20,6 +20,10 @@ void use_global() { int li = a; } +bool bool_global() { + return e; +} + void use_global_string() { unsigned char c = s2[0]; } diff --git a/clang/test/CIR/CodeGen/new-null.cpp b/clang/test/CIR/CodeGen/new-null.cpp index 1957d54873a0..4f46cbd51147 100644 --- a/clang/test/CIR/CodeGen/new-null.cpp +++ b/clang/test/CIR/CodeGen/new-null.cpp @@ -66,7 +66,7 @@ namespace test15 { // LLVM: %[[VAL_0:.*]] = alloca ptr, i64 1, align 8 // LLVM: store ptr %[[VAL_1:.*]], ptr %[[VAL_0]], align 8 // LLVM: %[[VAL_2:.*]] = load ptr, ptr %[[VAL_0]], align 8 - // LLVM: %[[VAL_3:.*]] = call ptr @_ZnwmPvb(i64 1, ptr %[[VAL_2]], i8 1) + // LLVM: %[[VAL_3:.*]] = call ptr @_ZnwmPvb(i64 1, ptr %[[VAL_2]], i1 true) // LLVM: %[[VAL_4:.*]] = icmp ne ptr %[[VAL_3]], null // LLVM: br i1 %[[VAL_4]], label %[[VAL_5:.*]], label %[[VAL_6:.*]] // LLVM: [[VAL_5]]: ; preds = %[[VAL_7:.*]] diff --git a/clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp b/clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp index 0127559bba65..63625236e42a 100644 --- a/clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp +++ b/clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp @@ -23,10 +23,8 @@ auto base_to_derived(int Base2::*ptr) -> int Derived::* { // LLVM: %[[#src:]] = load i64, ptr %{{.+}} // LLVM-NEXT: %[[#is_null:]] = icmp eq i64 %[[#src]], -1 - // LLVM-NEXT: %[[#is_null_bool:]] = zext i1 %[[#is_null]] to i8 // LLVM-NEXT: %[[#adjusted:]] = add i64 %[[#src]], 4 - // LLVM-NEXT: %[[#cond:]] = trunc i8 %[[#is_null_bool]] to i1 - // LLVM-NEXT: %{{.+}} = select i1 %[[#cond]], i64 -1, i64 %[[#adjusted]] + // LLVM-NEXT: %{{.+}} = select i1 %[[#is_null]], i64 -1, i64 %[[#adjusted]] } // CIR-LABEL: @_Z15derived_to_baseM7Derivedi @@ -37,10 +35,8 @@ auto derived_to_base(int Derived::*ptr) -> int Base2::* { // LLVM: %[[#src:]] = load i64, ptr %{{.+}} // LLVM-NEXT: %[[#is_null:]] = icmp eq i64 %[[#src]], -1 - // LLVM-NEXT: %[[#is_null_bool:]] = zext i1 %[[#is_null]] to i8 // LLVM-NEXT: %[[#adjusted:]] = sub i64 %[[#src]], 4 - // LLVM-NEXT: %[[#cond:]] = trunc i8 %[[#is_null_bool]] to i1 - // LLVM-NEXT: %9 = select i1 %[[#cond]], i64 -1, i64 %[[#adjusted]] + // LLVM-NEXT: %{{.+}} = select i1 %[[#is_null]], i64 -1, i64 %[[#adjusted]] } // CIR-LABEL: @_Z27base_to_derived_zero_offsetM5Base1i diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index 2ba42118dddb..88ff490c14ff 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -77,11 +77,11 @@ static Init __ioinit2(false); // LLVM: @_ZL9__ioinit2 = internal global %class.Init zeroinitializer // LLVM: @llvm.global_ctors = appending constant [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init, ptr null }, { i32, ptr, ptr } { i32 65536, ptr @__cxx_global_var_init.1, ptr null }] // LLVM: define internal void @__cxx_global_var_init() -// LLVM-NEXT: call void @_ZN4InitC1Eb(ptr @_ZL8__ioinit, i8 1) +// LLVM-NEXT: call void @_ZN4InitC1Eb(ptr @_ZL8__ioinit, i1 true) // LLVM-NEXT: call void @__cxa_atexit(ptr @_ZN4InitD1Ev, ptr @_ZL8__ioinit, ptr @__dso_handle) // LLVM-NEXT: ret void // LLVM: define internal void @__cxx_global_var_init.1() -// LLVM-NEXT: call void @_ZN4InitC1Eb(ptr @_ZL9__ioinit2, i8 0) +// LLVM-NEXT: call void @_ZN4InitC1Eb(ptr @_ZL9__ioinit2, i1 false) // LLVM-NEXT: call void @__cxa_atexit(ptr @_ZN4InitD1Ev, ptr @_ZL9__ioinit2, ptr @__dso_handle) // LLVM-NEXT: ret void // LLVM: define void @_GLOBAL__sub_I_static.cpp() diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index aaeb46e770b5..c628e3c2b46b 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1140,8 +1140,8 @@ module { !s8i = !cir.int cir.func @no_reference_global() { // expected-error @below {{'cir.get_global' op 'str' does not reference a valid cir.global or cir.func}} - %0 = cir.get_global @str : !cir.ptr - cir.return + %0 = cir.get_global @str : !cir.ptr + cir.return } // ----- @@ -1458,7 +1458,7 @@ cir.global external @f = #cir.fp<0x7FC0000007FC0000007FC000000> : !cir.long_doub // ----- -// Long double with `double` semnatics should have a value that fits in a double. +// Long double with `double` semantics should have a value that fits in a double. // CHECK: cir.global external @f = #cir.fp<0x7FC000007FC000000000> : !cir.long_double cir.global external @f = #cir.fp<0x7FC000007FC000000000> : !cir.long_double diff --git a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir index 408cac97ee41..5383477255aa 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/bool.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/bool.cir @@ -14,8 +14,9 @@ module { // MLIR: func @foo() { // MLIR: [[Value:%[a-z0-9]+]] = memref.alloca() {alignment = 1 : i64} : memref -// MLIR: = arith.constant 1 : i8 -// MLIR: memref.store {{.*}}, [[Value]][] : memref +// MLIR: %[[CONST:.*]] = arith.constant true +// MLIR: %[[BOOL_TO_MEM:.*]] = arith.extui %[[CONST]] : i1 to i8 +// MLIR-NEXT: memref.store %[[BOOL_TO_MEM]], [[Value]][] : memref // return // LLVM: = alloca i8, i64 diff --git a/clang/test/CIR/Lowering/ThroughMLIR/branch.cir b/clang/test/CIR/Lowering/ThroughMLIR/branch.cir index 2b78484627d5..89cd8849a3ca 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/branch.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/branch.cir @@ -13,9 +13,8 @@ cir.func @foo(%arg0: !cir.bool) -> !s32i { } // MLIR: module { -// MLIR-NEXT: func.func @foo(%arg0: i8) -> i32 -// MLIR-NEXT: %0 = arith.trunci %arg0 : i8 to i1 -// MLIR-NEXT: cf.cond_br %0, ^bb1, ^bb2 +// MLIR-NEXT: func.func @foo(%arg0: i1) -> i32 +// MLIR-NEXT: cf.cond_br %arg0, ^bb1, ^bb2 // MLIR-NEXT: ^bb1: // pred: ^bb0 // MLIR-NEXT: %c1_i32 = arith.constant 1 : i32 // MLIR-NEXT: return %c1_i32 : i32 @@ -25,13 +24,12 @@ cir.func @foo(%arg0: !cir.bool) -> !s32i { // MLIR-NEXT: } // MLIR-NEXT: } -// LLVM: define i32 @foo(i8 %0) -// LLVM-NEXT: %2 = trunc i8 %0 to i1 -// LLVM-NEXT: br i1 %2, label %3, label %4 +// LLVM: define i32 @foo(i1 %0) +// LLVM-NEXT: br i1 %0, label %[[TRUE:.*]], label %[[FALSE:.*]] // LLVM-EMPTY: -// LLVM-NEXT: 3: ; preds = %1 +// LLVM-NEXT: [[TRUE]]: // LLVM-NEXT: ret i32 1 // LLVM-EMPTY: -// LLVM-NEXT: 4: ; preds = %1 +// LLVM-NEXT: [[FALSE]]: // LLVM-NEXT: ret i32 0 // LLVM-NEXT: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cast.cir b/clang/test/CIR/Lowering/ThroughMLIR/cast.cir index 18452a456880..8812e77dd583 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/cast.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/cast.cir @@ -7,8 +7,8 @@ !u16i = !cir.int !u8i = !cir.int module { - // MLIR-LABEL: func.func @cast_int_to_bool(%arg0: i32) -> i8 - // LLVM-LABEL: define i8 @cast_int_to_bool(i32 %0) + // MLIR-LABEL: func.func @cast_int_to_bool(%arg0: i32) -> i1 + // LLVM-LABEL: define i1 @cast_int_to_bool(i32 %0) cir.func @cast_int_to_bool(%i : !u32i) -> !cir.bool { // MLIR-NEXT: %[[ZERO:.*]] = arith.constant 0 : i32 // MLIR-NEXT: arith.cmpi ne, %arg0, %[[ZERO]] @@ -71,8 +71,8 @@ module { %1 = cir.cast(floating, %f : !cir.float), !cir.double cir.return %1 : !cir.double } - // MLIR-LABEL: func.func @cast_float_to_bool(%arg0: f32) -> i8 - // LLVM-LABEL: define i8 @cast_float_to_bool(float %0) + // MLIR-LABEL: func.func @cast_float_to_bool(%arg0: f32) -> i1 + // LLVM-LABEL: define i1 @cast_float_to_bool(float %0) cir.func @cast_float_to_bool(%f : !cir.float) -> !cir.bool { // MLIR-NEXT: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 // MLIR-NEXT: arith.cmpf une, %arg0, %[[ZERO]] : f32 @@ -81,29 +81,29 @@ module { %1 = cir.cast(float_to_bool, %f : !cir.float), !cir.bool cir.return %1 : !cir.bool } - // MLIR-LABEL: func.func @cast_bool_to_int8(%arg0: i8) -> i8 - // LLVM-LABEL: define i8 @cast_bool_to_int8(i8 %0) + // MLIR-LABEL: func.func @cast_bool_to_int8(%arg0: i1) -> i8 + // LLVM-LABEL: define i8 @cast_bool_to_int8(i1 %0) cir.func @cast_bool_to_int8(%b : !cir.bool) -> !u8i { - // MLIR-NEXT: arith.bitcast %arg0 : i8 to i8 - // LLVM-NEXT: ret i8 %0 + // MLIR-NEXT: arith.extui %arg0 : i1 to i8 + // LLVM-NEXT: zext i1 %0 to i8 %1 = cir.cast(bool_to_int, %b : !cir.bool), !u8i cir.return %1 : !u8i } - // MLIR-LABEL: func.func @cast_bool_to_int(%arg0: i8) -> i32 - // LLVM-LABEL: define i32 @cast_bool_to_int(i8 %0) + // MLIR-LABEL: func.func @cast_bool_to_int(%arg0: i1) -> i32 + // LLVM-LABEL: define i32 @cast_bool_to_int(i1 %0) cir.func @cast_bool_to_int(%b : !cir.bool) -> !u32i { - // MLIR-NEXT: arith.extui %arg0 : i8 to i32 - // LLVM-NEXT: zext i8 %0 to i32 + // MLIR-NEXT: arith.extui %arg0 : i1 to i32 + // LLVM-NEXT: zext i1 %0 to i32 %1 = cir.cast(bool_to_int, %b : !cir.bool), !u32i cir.return %1 : !u32i } - // MLIR-LABEL: func.func @cast_bool_to_float(%arg0: i8) -> f32 - // LLVM-LABEL: define float @cast_bool_to_float(i8 %0) + // MLIR-LABEL: func.func @cast_bool_to_float(%arg0: i1) -> f32 + // LLVM-LABEL: define float @cast_bool_to_float(i1 %0) cir.func @cast_bool_to_float(%b : !cir.bool) -> !cir.float { - // MLIR-NEXT: arith.uitofp %arg0 : i8 to f32 - // LLVM-NEXT: uitofp i8 %0 to float + // MLIR-NEXT: arith.uitofp %arg0 : i1 to f32 + // LLVM-NEXT: uitofp i1 %0 to float %1 = cir.cast(bool_to_float, %b : !cir.bool), !cir.float cir.return %1 : !cir.float diff --git a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cpp b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cpp index fcb9247bfb8f..607f8ad5005f 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/cmp.cpp +++ b/clang/test/CIR/Lowering/ThroughMLIR/cmp.cpp @@ -7,10 +7,10 @@ bool testSignedIntCmpOps(int a, int b) { // CHECK: %[[ALLOC3:.+]] = memref.alloca() {alignment = 1 : i64} : memref // CHECK: %[[ALLOC4:.+]] = memref.alloca() {alignment = 1 : i64} : memref // CHECK: memref.store %arg0, %[[ALLOC1]][] : memref - // CHECK: memref.store %arg1, %[[ALLOC2]][] : memref - + // CHECK: memref.store %arg1, %[[ALLOC2]][] : memref + bool x = a == b; - + // CHECK: %[[LOAD0:.+]] = memref.load %[[ALLOC1]][] : memref // CHECK: %[[LOAD1:.+]] = memref.load %[[ALLOC2]][] : memref // CHECK: %[[CMP0:.+]] = arith.cmpi eq, %[[LOAD0]], %[[LOAD1]] : i32 @@ -57,11 +57,8 @@ bool testSignedIntCmpOps(int a, int b) { // CHECK: %[[EXT5:.+]] = arith.extui %[[CMP5]] : i1 to i8 // CHECK: memref.store %[[EXT5]], %[[ALLOC4]][] : memref - // CHECK: %[[LOAD12:.+]] = memref.load %[[ALLOC4]][] : memref - // CHECK: memref.store %[[LOAD12]], %[[ALLOC3]][] : memref - // CHECK: %[[LOAD13:.+]] = memref.load %[[ALLOC3]][] : memref - // CHECK: return %[[LOAD13]] : i8 return x; + // CHECK: return } bool testUnSignedIntBinOps(unsigned a, unsigned b) { @@ -71,7 +68,7 @@ bool testUnSignedIntBinOps(unsigned a, unsigned b) { // CHECK: %[[ALLOC4:.+]] = memref.alloca() {alignment = 1 : i64} : memref // CHECK: memref.store %arg0, %[[ALLOC1]][] : memref // CHECK: memref.store %arg1, %[[ALLOC2]][] : memref - + bool x = a == b; // CHECK: %[[LOAD0:.+]] = memref.load %[[ALLOC1]][] : memref @@ -182,4 +179,4 @@ bool testFloatingPointCmpOps(float a, float b) { return x; // CHECK: return -} \ No newline at end of file +} diff --git a/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c b/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c index cf1e275caece..5974734740a2 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c +++ b/clang/test/CIR/Lowering/ThroughMLIR/doWhile.c @@ -45,9 +45,7 @@ void nestedDoWhile() { // CHECK: %[[VAR4:.+]] = memref.load %[[ALLOC1]][] : memref // CHECK: %[[C10_I32:.+]] = arith.constant 10 : i32 // CHECK: %[[CMP:.+]] = arith.cmpi sle, %[[VAR4]], %[[C10_I32]] : i32 -// CHECK: %[[EXT1:.+]] = arith.extui %[[CMP]] : i1 to i8 -// CHECK: %[[TRUNC:.+]] = arith.trunci %[[EXT1]] : i8 to i1 -// CHECK: scf.condition(%[[TRUNC]]) +// CHECK: scf.condition(%[[CMP]]) // CHECK: } do { // CHECK: scf.yield // CHECK: } @@ -76,9 +74,7 @@ void nestedDoWhile() { // CHECK: %[[EIGHT:.+]] = memref.load %[[alloca_0]][] : memref // CHECK: %[[C2_I32_3:.+]] = arith.constant 2 : i32 // CHECK: %[[NINE:.+]] = arith.cmpi slt, %[[EIGHT]], %[[C2_I32_3]] : i32 -// CHECK: %[[TWELVE:.+]] = arith.extui %[[NINE]] : i1 to i8 -// CHECK: %[[THIRTEEN:.+]] = arith.trunci %[[TWELVE]] : i8 to i1 -// CHECK: scf.condition(%[[THIRTEEN]]) +// CHECK: scf.condition(%[[NINE]]) // CHECK: } do { // CHECK: %[[EIGHT]] = memref.load %[[alloca_0]][] : memref // CHECK: %[[C1_I32_3:.+]] = arith.constant 1 : i32 @@ -91,9 +87,7 @@ void nestedDoWhile() { // CHECK: %[[TWO:.+]] = memref.load %[[alloca]][] : memref // CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 // CHECK: %[[THREE:.+]] = arith.cmpi slt, %[[TWO]], %[[C2_I32]] : i32 -// CHECK: %[[SIX:.+]] = arith.extui %[[THREE]] : i1 to i8 -// CHECK: %[[SEVEN:.+]] = arith.trunci %[[SIX]] : i8 to i1 -// CHECK: scf.condition(%[[SEVEN]]) +// CHECK: scf.condition(%[[THREE]]) // CHECK: } do { // CHECK: scf.yield // CHECK: } diff --git a/clang/test/CIR/Lowering/ThroughMLIR/if.c b/clang/test/CIR/Lowering/ThroughMLIR/if.c index 8e88346c727f..dec3f9968d6a 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/if.c +++ b/clang/test/CIR/Lowering/ThroughMLIR/if.c @@ -22,9 +22,7 @@ void foo() { //CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref //CHECK: %[[C0_I32_1:.+]] = arith.constant 0 : i32 //CHECK: %[[ONE:.+]] = arith.cmpi sgt, %[[ZERO]], %[[C0_I32_1]] : i32 -//CHECK: %[[FOUR:.+]] = arith.extui %[[ONE]] : i1 to i8 -//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR]] : i8 to i1 -//CHECK: scf.if %[[FIVE]] { +//CHECK: scf.if %[[ONE]] { //CHECK: %[[SIX:.+]] = memref.load %[[alloca_0]][] : memref //CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 //CHECK: %[[SEVEN:.+]] = arith.addi %[[SIX]], %[[C1_I32]] : i32 @@ -58,9 +56,7 @@ void foo2() { //CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref //CHECK: %[[C3_I32:.+]] = arith.constant 3 : i32 //CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C3_I32]] : i32 -//CHECK: %[[FOUR:.+]] = arith.extui %[[ONE]] : i1 to i8 -//CHECK: %[[FIVE]] = arith.trunci %[[FOUR]] : i8 to i1 -//CHECK: scf.if %[[FIVE]] { +//CHECK: scf.if %[[ONE]] { //CHECK: %[[SIX:.+]] = memref.load %[[alloca_0]][] : memref //CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 //CHECK: %[[SEVEN:.+]] = arith.addi %[[SIX]], %[[C1_I32]] : i32 @@ -95,9 +91,7 @@ void foo3() { //CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref //CHECK: %[[C3_I32:.+]] = arith.constant 3 : i32 //CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C3_I32]] : i32 -//CHECK: %[[FOUR:.+]] = arith.extui %[[ONE]] : i1 to i8 -//CHECK: %[[FIVE]] = arith.trunci %[[FOUR]] : i8 to i1 -//CHECK: scf.if %[[FIVE]] { +//CHECK: scf.if %[[ONE]] { //CHECK: %[[alloca_2:.+]] = memref.alloca() {alignment = 4 : i64} : memref //CHECK: %[[C1_I32:.+]] = arith.constant 1 : i32 //CHECK: memref.store %[[C1_I32]], %[[alloca_2]][] : memref @@ -105,9 +99,7 @@ void foo3() { //CHECK: %[[SIX:.+]] = memref.load %[[alloca_2]][] : memref //CHECK: %[[C2_I32_3:.+]] = arith.constant 2 : i32 //CHECK: %[[SEVEN:.+]] = arith.cmpi sgt, %[[SIX]], %[[C2_I32_3]] : i32 -//CHECK: %[[TEN:.+]] = arith.extui %[[SEVEN]] : i1 to i8 -//CHECK: %[[ELEVEN:.+]] = arith.trunci %[[TEN]] : i8 to i1 -//CHECK: scf.if %[[ELEVEN]] { +//CHECK: scf.if %[[SEVEN]] { //CHECK: %[[TWELVE:.+]] = memref.load %[[alloca_0]][] : memref //CHECK: %[[C1_I32_5:.+]] = arith.constant 1 : i32 //CHECK: %[[THIRTEEN:.+]] = arith.addi %[[TWELVE]], %[[C1_I32_5]] : i32 diff --git a/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir index ce6f466aebc9..819b4c3b941e 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/tenary.cir @@ -26,16 +26,14 @@ cir.func @_Z1xi(%arg0: !s32i) -> !s32i { } // MLIR: %1 = arith.cmpi sgt, %0, %c0_i32 : i32 -// MLIR-NEXT: %2 = arith.extui %1 : i1 to i8 -// MLIR-NEXT: %3 = arith.trunci %2 : i8 to i1 -// MLIR-NEXT: %4 = scf.if %3 -> (i32) { +// MLIR-NEXT: %2 = scf.if %1 -> (i32) { // MLIR-NEXT: %c3_i32 = arith.constant 3 : i32 // MLIR-NEXT: scf.yield %c3_i32 : i32 // MLIR-NEXT: } else { // MLIR-NEXT: %c5_i32 = arith.constant 5 : i32 // MLIR-NEXT: scf.yield %c5_i32 : i32 // MLIR-NEXT: } -// MLIR-NEXT: memref.store %4, %alloca_0[] : memref +// MLIR-NEXT: memref.store %2, %alloca_0[] : memref // MLIR-CANONICALIZE: %[[CMP:.*]] = arith.cmpi sgt // MLIR-CANONICALIZE: arith.select %[[CMP]] diff --git a/clang/test/CIR/Lowering/ThroughMLIR/while.c b/clang/test/CIR/Lowering/ThroughMLIR/while.c index 5621e1fc7c4a..68454f3bea99 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/while.c +++ b/clang/test/CIR/Lowering/ThroughMLIR/while.c @@ -28,9 +28,7 @@ void nestedWhile() { //CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref //CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 //CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO:.+]], %[[C2_I32]] : i32 -//CHECK: %[[FOUR:.+]] = arith.extui %[[ONE:.+]] : i1 to i8 -//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR:.+]] : i8 to i1 -//CHECK: scf.condition(%[[FIVE]]) +//CHECK: scf.condition(%[[ONE]]) //CHECK: } do { //CHECK: memref.alloca_scope { //CHECK: %[[ZERO:.+]] = memref.load %[[alloca]][] : memref @@ -53,9 +51,7 @@ void nestedWhile() { //CHECK: %[[ZERO:.+]] = memref.load %alloca[] : memref //CHECK: %[[C2_I32:.+]] = arith.constant 2 : i32 //CHECK: %[[ONE:.+]] = arith.cmpi slt, %[[ZERO]], %[[C2_I32]] : i32 -//CHECK: %[[FOUR:.+]] = arith.extui %[[ONE]] : i1 to i8 -//CHECK: %[[FIVE:.+]] = arith.trunci %[[FOUR]] : i8 to i1 -//CHECK: scf.condition(%[[FIVE]]) +//CHECK: scf.condition(%[[ONE]]) //CHECK: } do { //CHECK: memref.alloca_scope { //CHECK: %[[alloca_0:.+]] = memref.alloca() {alignment = 4 : i64} : memref @@ -65,9 +61,7 @@ void nestedWhile() { //CHECK: scf.while : () -> () { //CHECK: %{{.*}} = memref.load %[[alloca_0]][] : memref //CHECK: %[[C2_I32]] = arith.constant 2 : i32 -//CHECK: %{{.*}} = arith.cmpi slt, %{{.*}}, %[[C2_I32]] : i32 -//CHECK: %[[SIX:.+]] = arith.extui %{{.*}} : i1 to i8 -//CHECK: %[[SEVEN:.+]] = arith.trunci %[[SIX]] : i8 to i1 +//CHECK: %[[SEVEN:.*]] = arith.cmpi slt, %{{.*}}, %[[C2_I32]] : i32 //CHECK: scf.condition(%[[SEVEN]]) //CHECK: } do { //CHECK: %{{.*}} = memref.load %[[alloca_0]][] : memref diff --git a/clang/test/CIR/Lowering/binop-overflow.cir b/clang/test/CIR/Lowering/binop-overflow.cir index 196771150dbe..6a2ef54c1501 100644 --- a/clang/test/CIR/Lowering/binop-overflow.cir +++ b/clang/test/CIR/Lowering/binop-overflow.cir @@ -11,22 +11,20 @@ module { cir.return %overflow : !cir.bool } - // MLIR: llvm.func @test_add_u32_u32_u32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i8 + // MLIR: llvm.func @test_add_u32_u32_u32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i1 // MLIR-NEXT: %[[#INTRIN_RET:]] = llvm.call_intrinsic "llvm.uadd.with.overflow.i32"(%[[LHS]], %[[RHS]]) : (i32, i32) -> !llvm.struct<(i32, i1)> // MLIR-NEXT: %[[#RES:]] = llvm.extractvalue %[[#INTRIN_RET]][0] : !llvm.struct<(i32, i1)> // MLIR-NEXT: %[[#OVFL:]] = llvm.extractvalue %[[#INTRIN_RET]][1] : !llvm.struct<(i32, i1)> - // MLIR-NEXT: %[[#OVFL_EXT:]] = llvm.zext %[[#OVFL]] : i1 to i8 // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] {{.*}} : i32, !llvm.ptr - // MLIR-NEXT: llvm.return %[[#OVFL_EXT]] : i8 + // MLIR-NEXT: llvm.return %[[#OVFL]] : i1 // MLIR-NEXT: } - // LLVM: define i8 @test_add_u32_u32_u32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]]) + // LLVM: define i1 @test_add_u32_u32_u32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]]) // LLVM-NEXT: %[[#INTRIN_RET:]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %[[#LHS]], i32 %[[#RHS]]) // LLVM-NEXT: %[[#RES:]] = extractvalue { i32, i1 } %[[#INTRIN_RET]], 0 // LLVM-NEXT: %[[#OVFL:]] = extractvalue { i32, i1 } %[[#INTRIN_RET]], 1 - // LLVM-NEXT: %[[#OVFL_EXT:]] = zext i1 %[[#OVFL]] to i8 // LLVM-NEXT: store i32 %[[#RES]], ptr %[[#RES_PTR]], align 4 - // LLVM-NEXT: ret i8 %[[#OVFL_EXT]] + // LLVM-NEXT: ret i1 %[[#OVFL]] // LLVM-NEXT: } cir.func @test_add_u32_u32_i32(%lhs: !u32i, %rhs: !u32i, %res: !cir.ptr) -> !cir.bool { @@ -35,7 +33,7 @@ module { cir.return %overflow : !cir.bool } - // MLIR: llvm.func @test_add_u32_u32_i32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i8 + // MLIR: llvm.func @test_add_u32_u32_i32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i1 // MLIR-NEXT: %[[#LHS_EXT:]] = llvm.zext %[[LHS]] : i32 to i33 // MLIR-NEXT: %[[#RHS_EXT:]] = llvm.zext %[[RHS]] : i32 to i33 // MLIR-NEXT: %[[#INTRIN_RET:]] = llvm.call_intrinsic "llvm.sadd.with.overflow.i33"(%[[#LHS_EXT]], %[[#RHS_EXT]]) : (i33, i33) -> !llvm.struct<(i33, i1)> @@ -45,12 +43,11 @@ module { // MLIR-NEXT: %[[#RES_EXT_2:]] = llvm.sext %[[#RES]] : i32 to i33 // MLIR-NEXT: %[[#TRUNC_OVFL:]] = llvm.icmp "ne" %[[#RES_EXT_2]], %[[#RES_EXT]] : i33 // MLIR-NEXT: %[[#OVFL:]] = llvm.or %[[#ARITH_OVFL]], %[[#TRUNC_OVFL]] : i1 - // MLIR-NEXT: %[[#OVFL_EXT:]] = llvm.zext %[[#OVFL]] : i1 to i8 // MLIR-NEXT: llvm.store %[[#RES]], %[[RES_PTR]] {{.*}} : i32, !llvm.ptr - // MLIR-NEXT: llvm.return %[[#OVFL_EXT]] : i8 + // MLIR-NEXT: llvm.return %[[#OVFL]] : i1 // MLIR-NEXT: } - // LLVM: define i8 @test_add_u32_u32_i32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]]) + // LLVM: define i1 @test_add_u32_u32_i32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]]) // LLVM-NEXT: %[[#LHS_EXT:]] = zext i32 %[[#LHS]] to i33 // LLVM-NEXT: %[[#RHS_EXT:]] = zext i32 %[[#RHS]] to i33 // LLVM-NEXT: %[[#INTRIN_RET:]] = call { i33, i1 } @llvm.sadd.with.overflow.i33(i33 %[[#LHS_EXT]], i33 %[[#RHS_EXT]]) @@ -60,8 +57,7 @@ module { // LLVM-NEXT: %[[#RES_EXT_2:]] = sext i32 %[[#RES]] to i33 // LLVM-NEXT: %[[#TRUNC_OVFL:]] = icmp ne i33 %[[#RES_EXT_2]], %[[#RES_EXT]] // LLVM-NEXT: %[[#OVFL:]] = or i1 %[[#ARITH_OVFL]], %[[#TRUNC_OVFL]] - // LLVM-NEXT: %[[#OVFL_EXT:]] = zext i1 %[[#OVFL]] to i8 // LLVM-NEXT: store i32 %[[#RES]], ptr %[[#RES_PTR]], align 4 - // LLVM-NEXT: ret i8 %[[#OVFL_EXT]] + // LLVM-NEXT: ret i1 %[[#OVFL]] // LLVM-NEXT: } } diff --git a/clang/test/CIR/Lowering/bool.cir b/clang/test/CIR/Lowering/bool.cir index 2d3fc2d8590b..848b552f897a 100644 --- a/clang/test/CIR/Lowering/bool.cir +++ b/clang/test/CIR/Lowering/bool.cir @@ -16,10 +16,11 @@ module { cir.return } // MLIR: llvm.func @foo() -// MLIR-DAG: = llvm.mlir.constant(1 : i8) : i8 -// MLIR-DAG: [[Value:%[a-z0-9]+]] = llvm.mlir.constant(1 : index) : i64 -// MLIR-DAG: = llvm.alloca [[Value]] x i8 {alignment = 1 : i64} : (i64) -> !llvm.ptr -// MLIR-DAG: llvm.store %0, %2 {{.*}} : i8, !llvm.ptr +// MLIR-DAG: %[[TRUE:.*]] = llvm.mlir.constant(true) : i1 +// MLIR-DAG: %[[VALUE:.*]] = llvm.mlir.constant(1 : index) : i64 +// MLIR-DAG: %[[ADDR:.*]] = llvm.alloca %[[VALUE]] x i8 {alignment = 1 : i64} : (i64) -> !llvm.ptr +// MLIR-DAG: %[[TRUE_EXT:.*]] = llvm.zext %[[TRUE]] : i1 to i8 +// MLIR-DAG: llvm.store %[[TRUE_EXT]], %[[ADDR]] {{.*}} : i8, !llvm.ptr // MLIR-NEXT: llvm.return // LLVM: define void @foo() diff --git a/clang/test/CIR/Lowering/branch.cir b/clang/test/CIR/Lowering/branch.cir index a99a217f18da..0daea329f4b8 100644 --- a/clang/test/CIR/Lowering/branch.cir +++ b/clang/test/CIR/Lowering/branch.cir @@ -13,25 +13,23 @@ cir.func @foo(%arg0: !cir.bool) -> !s32i { } // MLIR: module { -// MLIR-NEXT: llvm.func @foo(%arg0: i8) -> i32 -// MLIR-NEXT: %0 = llvm.trunc %arg0 : i8 to i1 -// MLIR-NEXT: llvm.cond_br %0, ^bb1, ^bb2 +// MLIR-NEXT: llvm.func @foo(%arg0: i1) -> i32 +// MLIR-NEXT: llvm.cond_br %arg0, ^bb1, ^bb2 // MLIR-NEXT: ^bb1: // pred: ^bb0 -// MLIR-NEXT: %1 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: llvm.return %1 : i32 +// MLIR-NEXT: %0 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: llvm.return %0 : i32 // MLIR-NEXT: ^bb2: // pred: ^bb0 -// MLIR-NEXT: %2 = llvm.mlir.constant(0 : i32) : i32 -// MLIR-NEXT: llvm.return %2 : i32 +// MLIR-NEXT: %1 = llvm.mlir.constant(0 : i32) : i32 +// MLIR-NEXT: llvm.return %1 : i32 // MLIR-NEXT: } // MLIR-NEXT: } -// LLVM: define i32 @foo(i8 %0) -// LLVM-NEXT: %2 = trunc i8 %0 to i1 -// LLVM-NEXT: br i1 %2, label %3, label %4 +// LLVM: define i32 @foo(i1 %0) +// LLVM-NEXT: br i1 %0, label %2, label %3 // LLVM-EMPTY: -// LLVM-NEXT: 3: ; preds = %1 +// LLVM-NEXT: 2: ; preds = %1 // LLVM-NEXT: ret i32 1 // LLVM-EMPTY: -// LLVM-NEXT: 4: ; preds = %1 +// LLVM-NEXT: 3: ; preds = %1 // LLVM-NEXT: ret i32 0 // LLVM-NEXT: } diff --git a/clang/test/CIR/Lowering/brcond.cir b/clang/test/CIR/Lowering/brcond.cir index 262e0a8f868b..19e778cef823 100644 --- a/clang/test/CIR/Lowering/brcond.cir +++ b/clang/test/CIR/Lowering/brcond.cir @@ -4,40 +4,39 @@ !s32i = !cir.int #fn_attr = #cir, nothrow = #cir.nothrow, optnone = #cir.optnone})> module { cir.func no_proto @test() -> !cir.bool extra(#fn_attr) { - %0 = cir.const #cir.int<0> : !s32i - %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool - cir.br ^bb1 + %0 = cir.const #cir.int<0> : !s32i + %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool + cir.br ^bb1 ^bb1: - cir.brcond %1 ^bb2, ^bb3 + cir.brcond %1 ^bb2, ^bb3 ^bb2: - cir.return %1 : !cir.bool + cir.return %1 : !cir.bool ^bb3: - cir.br ^bb4 + cir.br ^bb4 ^bb4: - cir.return %1 : !cir.bool - } + cir.return %1 : !cir.bool + } } // MLIR: {{.*}} = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: {{.*}} = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: {{.*}} = llvm.icmp "ne" {{.*}}, {{.*}} : i32 -// MLIR-NEXT: {{.*}} = llvm.zext {{.*}} : i1 to i8 // MLIR-NEXT: llvm.br ^bb1 // MLIR-NEXT: ^bb1: // MLIR-NEXT: llvm.cond_br {{.*}}, ^bb2, ^bb3 // MLIR-NEXT: ^bb2: -// MLIR-NEXT: llvm.return {{.*}} : i8 +// MLIR-NEXT: llvm.return {{.*}} : i1 // MLIR-NEXT: ^bb3: // MLIR-NEXT: llvm.br ^bb4 // MLIR-NEXT: ^bb4: -// MLIR-NEXT: llvm.return {{.*}} : i8 +// MLIR-NEXT: llvm.return {{.*}} : i1 // LLVM: br label {{.*}} // LLVM: 1: // LLVM: br i1 false, label {{.*}}, label {{.*}} // LLVM: 2: -// LLVM: ret i8 0 +// LLVM: ret i1 false // LLVM: 3: // LLVM: br label {{.*}} // LLVM: 4: -// LLVM: ret i8 0 +// LLVM: ret i1 false diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index e100e0c2f07e..7b731794f1fa 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -51,7 +51,6 @@ module { %33 = cir.cast(int_to_bool, %arg1 : !s32i), !cir.bool // CHECK: %[[#ZERO:]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[#CMP:]] = llvm.icmp "ne" %arg1, %[[#ZERO]] : i32 - // CHECK: %{{.+}} = llvm.zext %[[#CMP]] : i1 to i8 // Pointer casts. cir.store %16, %6 : !s64i, !cir.ptr @@ -91,9 +90,22 @@ module { %2 = cir.load %0 : !cir.ptr, !cir.bool %3 = cir.cast(bool_to_int, %2 : !cir.bool), !u8i // CHECK: %[[LOAD_BOOL:.*]] = llvm.load %{{.*}} : !llvm.ptr -> i8 - // CHECK: %{{.*}} = llvm.bitcast %[[LOAD_BOOL]] : i8 to i8 + // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[LOAD_BOOL]] : i8 to i1 + // CHECK: %[[EXT:.*]] = llvm.zext %[[TRUNC]] : i1 to i8 cir.store %3, %1 : !u8i, !cir.ptr cir.return } + + // Test cases where the memory type is not the same as the source type. + cir.func @testArrayToPtrDecay() { + // CHECK-LABEL: llvm.func @testArrayToPtrDecay() + %null_bool_array = cir.const #cir.ptr : !cir.ptr> + %bool_array_decay = cir.cast(array_to_ptrdecay, %null_bool_array : !cir.ptr>), !cir.ptr + // CHECK: = llvm.getelementptr %{{.*}}[0] : (!llvm.ptr) -> !llvm.ptr, i8 + %res = cir.load %bool_array_decay : !cir.ptr, !cir.bool + // CHECK-NEXT: %[[BOOL_LOAD:.+]] = llvm.load %{{.*}} {{.*}} : !llvm.ptr -> i8 + // CHECK-NEXT: = llvm.trunc %[[BOOL_LOAD]] : i8 to i1 + cir.return + } } diff --git a/clang/test/CIR/Lowering/const-array.cir b/clang/test/CIR/Lowering/const-array.cir index 41cfbad3daba..84a21665bffd 100644 --- a/clang/test/CIR/Lowering/const-array.cir +++ b/clang/test/CIR/Lowering/const-array.cir @@ -1,11 +1,16 @@ // RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o - | FileCheck %s -check-prefix=LLVM !u8i = !cir.int +#false = #cir.bool : !cir.bool +#true = #cir.bool : !cir.bool module { cir.global "private" internal @normal_url_char = #cir.const_array<[#cir.int<0> : !u8i, #cir.int<1> : !u8i], trailing_zeros> : !cir.array // LLVM: @normal_url_char = internal global [4 x i8] c"\00\01\00\00" + cir.global "private" internal @g_const_bool_arr = #cir.const_array<[#true, #false, #true, #false]> : !cir.array + // LLVM: @g_const_bool_arr = internal global [4 x i8] c"\01\00\01\00" + cir.func @c0() -> !cir.ptr> { %0 = cir.get_global @normal_url_char : !cir.ptr> cir.return %0 : !cir.ptr> diff --git a/clang/test/CIR/Lowering/const.cir b/clang/test/CIR/Lowering/const.cir index ae78b8387fc5..7d9b495f784e 100644 --- a/clang/test/CIR/Lowering/const.cir +++ b/clang/test/CIR/Lowering/const.cir @@ -78,8 +78,9 @@ module { // CHECK: llvm.func @testInitArrWithBool() // CHECK: [[ARR:%.*]] = llvm.mlir.undef : !llvm.array<1 x i8> - // CHECK: [[TRUE:%.*]] = llvm.mlir.constant(1 : i8) : i8 - // CHECK: {{.*}} = llvm.insertvalue [[TRUE]], [[ARR]][0] : !llvm.array<1 x i8> - // CHECL: llvm.return + // CHECK: [[TRUE:%.*]] = llvm.mlir.constant(true) : i1 + // CHECK: [[TRUE_EXT:%.*]] = llvm.zext [[TRUE]] : i1 to i8 + // CHECK: {{.*}} = llvm.insertvalue [[TRUE_EXT]], [[ARR]][0] : !llvm.array<1 x i8> + // CHECK: llvm.return } diff --git a/clang/test/CIR/Lowering/loadstorealloca.cir b/clang/test/CIR/Lowering/loadstorealloca.cir index 5764d5afc8f5..85f714dc6b51 100644 --- a/clang/test/CIR/Lowering/loadstorealloca.cir +++ b/clang/test/CIR/Lowering/loadstorealloca.cir @@ -18,7 +18,7 @@ module { %2 = cir.load volatile %0 : !cir.ptr, !u32i cir.return %2 : !u32i } -} + // MLIR: module { // MLIR-NEXT: func @foo() -> i32 @@ -37,3 +37,21 @@ module { // MLIR-NEXT: llvm.store volatile %2, %1 {{.*}}: i32, !llvm.ptr // MLIR-NEXT: %3 = llvm.load volatile %1 {alignment = 4 : i64} : !llvm.ptr -> i32 // MLIR-NEXT: return %3 : i32 + + cir.func @test_bool_memory_lowering() { + // MLIR-LABEL: @test_bool_memory_lowering + %0 = cir.alloca !cir.bool, !cir.ptr, ["x", init] {alignment = 1 : i64} + // MLIR: %[[VAR:.*]] = llvm.alloca %{{.*}} x i8 + %1 = cir.const #cir.bool : !cir.bool + // MLIR: %[[TRUE:.*]] = llvm.mlir.constant(true) : i1 + cir.store %1, %0 : !cir.bool, !cir.ptr + // MLIR: %[[TRUE_EXT:.*]] = llvm.zext %[[TRUE]] : i1 to i8 + // MLIR: llvm.store %[[TRUE_EXT]], %[[VAR]] {alignment = 1 : i64} : i8, !llvm.ptr + %2 = cir.load %0 : !cir.ptr, !cir.bool + // MLIR: %[[LOAD_VAL:.*]] = llvm.load %[[VAR]] {alignment = 1 : i64} : !llvm.ptr -> i8 + // MLIR: %[[LOAD_SCALAR:.*]] = llvm.trunc %[[LOAD_VAL]] : i8 to i1 + %3 = cir.cast(bool_to_int, %2 : !cir.bool), !u32i + // MLIR: %[[CAST_VAL:.*]] = llvm.zext %[[LOAD_SCALAR]] : i1 to i32 + cir.return + } +} diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index 84e5b0aff6a7..9ab1227fdd0d 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -3,6 +3,8 @@ // XFAIL: * !s32i = !cir.int +!u64i = !cir.int + module { cir.func @f(%arg0: !cir.ptr) { %0 = cir.alloca !cir.ptr, !cir.ptr>, ["a", init] {alignment = 8 : i64} @@ -17,6 +19,11 @@ module { %3 = cir.ptr_stride(%arg0 : !cir.ptr, %2 : !s32i), !cir.ptr cir.return } + + cir.func @bool_stride(%arg0: !cir.ptr, %2 : !u64i) { + %3 = cir.ptr_stride(%arg0 : !cir.ptr, %2 : !u64i), !cir.ptr + cir.return + } } // MLIR-LABEL: @f @@ -33,3 +40,6 @@ module { // MLIR-LABEL: @g // MLIR: %0 = llvm.sext %arg1 : i32 to i64 // MLIR-NEXT: llvm.getelementptr %arg0[%0] : (!llvm.ptr, i64) -> !llvm.ptr, i32 + +// MLIR-LABEL: @bool_stride +// MLIR: llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, i8 diff --git a/clang/test/CIR/Lowering/select.cir b/clang/test/CIR/Lowering/select.cir index 1ac56496e138..71ca79a390e8 100644 --- a/clang/test/CIR/Lowering/select.cir +++ b/clang/test/CIR/Lowering/select.cir @@ -9,9 +9,8 @@ module { cir.return %0 : !s32i } - // LLVM: define i32 @select_int(i8 %[[#COND:]], i32 %[[#TV:]], i32 %[[#FV:]]) - // LLVM-NEXT: %[[#CONDF:]] = trunc i8 %[[#COND]] to i1 - // LLVM-NEXT: %[[#RES:]] = select i1 %[[#CONDF]], i32 %[[#TV]], i32 %[[#FV]] + // LLVM: define i32 @select_int(i1 %[[#COND:]], i32 %[[#TV:]], i32 %[[#FV:]]) + // LLVM-NEXT: %[[#RES:]] = select i1 %[[#COND]], i32 %[[#TV]], i32 %[[#FV]] // LLVM-NEXT: ret i32 %[[#RES]] // LLVM-NEXT: } @@ -20,10 +19,9 @@ module { cir.return %0 : !cir.bool } - // LLVM: define i8 @select_bool(i8 %[[#COND:]], i8 %[[#TV:]], i8 %[[#FV:]]) - // LLVM-NEXT: %[[#CONDF:]] = trunc i8 %[[#COND]] to i1 - // LLVM-NEXT: %[[#RES:]] = select i1 %[[#CONDF]], i8 %[[#TV]], i8 %[[#FV]] - // LLVM-NEXT: ret i8 %[[#RES]] + // LLVM: define i1 @select_bool(i1 %[[#COND:]], i1 %[[#TV:]], i1 %[[#FV:]]) + // LLVM-NEXT: %[[#RES:]] = select i1 %[[#COND]], i1 %[[#TV]], i1 %[[#FV]] + // LLVM-NEXT: ret i1 %[[#RES]] // LLVM-NEXT: } cir.func @logical_and(%arg0 : !cir.bool, %arg1 : !cir.bool) -> !cir.bool { @@ -32,9 +30,9 @@ module { cir.return %1 : !cir.bool } - // LLVM: define i8 @logical_and(i8 %[[#ARG0:]], i8 %[[#ARG1:]]) - // LLVM-NEXT: %[[#RES:]] = and i8 %[[#ARG0]], %[[#ARG1]] - // LLVM-NEXT: ret i8 %[[#RES]] + // LLVM: define i1 @logical_and(i1 %[[#ARG0:]], i1 %[[#ARG1:]]) + // LLVM-NEXT: %[[#RES:]] = and i1 %[[#ARG0]], %[[#ARG1]] + // LLVM-NEXT: ret i1 %[[#RES]] // LLVM-NEXT: } cir.func @logical_or(%arg0 : !cir.bool, %arg1 : !cir.bool) -> !cir.bool { @@ -43,8 +41,8 @@ module { cir.return %1 : !cir.bool } - // LLVM: define i8 @logical_or(i8 %[[#ARG0:]], i8 %[[#ARG1:]]) - // LLVM-NEXT: %[[#RES:]] = or i8 %[[#ARG0]], %[[#ARG1]] - // LLVM-NEXT: ret i8 %[[#RES]] + // LLVM: define i1 @logical_or(i1 %[[#ARG0:]], i1 %[[#ARG1:]]) + // LLVM-NEXT: %[[#RES:]] = or i1 %[[#ARG0]], %[[#ARG1]] + // LLVM-NEXT: ret i1 %[[#RES]] // LLVM-NEXT: } } diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index c89a58a9772e..e612dcd66efd 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -10,6 +10,8 @@ !ty_S2_ = !cir.struct !ty_S3_ = !cir.struct +!struct_with_bool = !cir.struct + module { cir.func @test() { %1 = cir.alloca !ty_S, !cir.ptr, ["x"] {alignment = 4 : i64} @@ -93,4 +95,21 @@ module { // CHECK: "llvm.intr.memcpy"(%[[#SB]], %[[#SA]], %[[#SIZE]]) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () cir.return } + + // Verify that boolean fields are lowered to i8 and that the correct type is inserted during initialization. + cir.global external @struct_with_bool = #cir.const_struct<{#cir.int<1> : !u32i, #cir.bool : !cir.bool}> : !struct_with_bool + // CHECK: llvm.mlir.global external @struct_with_bool() {addr_space = 0 : i32} : !llvm.struct<"struct.struct_with_bool", (i32, i8)> { + // CHECK: %[[FALSE:.+]] = llvm.mlir.constant(false) : i1 + // CHECK-NEXT: %[[FALSE_MEM:.+]] = llvm.zext %[[FALSE]] : i1 to i8 + // CHECK-NEXT: = llvm.insertvalue %[[FALSE_MEM]], %{{.+}}[1] : !llvm.struct<"struct.struct_with_bool", (i32, i8)> + + cir.func @test_struct_with_bool() { + // CHECK-LABEL: llvm.func @test_struct_with_bool() + %0 = cir.alloca !struct_with_bool, !cir.ptr, ["a"] {alignment = 4 : i64} + %1 = cir.get_member %0[1] {name = "b"} : !cir.ptr -> !cir.ptr + // CHECK: %[[BOOL_MEMBER_PTR:.+]] = llvm.getelementptr %{{.*}}[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"struct.struct_with_bool", (i32, i8)> + %2 = cir.load %1 : !cir.ptr, !cir.bool + // CHECK: = llvm.load %[[BOOL_MEMBER_PTR]] {{.*}} : !llvm.ptr -> i8 + cir.return + } } diff --git a/clang/test/CIR/Lowering/unary-not.cir b/clang/test/CIR/Lowering/unary-not.cir index 86a7405bd0ee..35cd54f3df78 100644 --- a/clang/test/CIR/Lowering/unary-not.cir +++ b/clang/test/CIR/Lowering/unary-not.cir @@ -31,18 +31,16 @@ module { %3 = cir.cast(float_to_bool, %2 : !cir.float), !cir.bool // MLIR: %[[#F_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 // MLIR: %[[#F_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#F_ZERO]] : f32 - // MLIR: %[[#F_ZEXT:]] = llvm.zext %[[#F_BOOL]] : i1 to i8 %4 = cir.unary(not, %3) : !cir.bool, !cir.bool - // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(1 : i8) : i8 - // MLIR: = llvm.xor %[[#F_ZEXT]], %[[#F_ONE]] : i8 + // MLIR: %[[#F_ONE:]] = llvm.mlir.constant(true) : i1 + // MLIR: = llvm.xor %[[#F_BOOL]], %[[#F_ONE]] : i1 %5 = cir.load %1 : !cir.ptr, !cir.double %6 = cir.cast(float_to_bool, %5 : !cir.double), !cir.bool // MLIR: %[[#D_ZERO:]] = llvm.mlir.constant(0.000000e+00 : f64) : f64 // MLIR: %[[#D_BOOL:]] = llvm.fcmp "une" %{{.+}}, %[[#D_ZERO]] : f64 - // MLIR: %[[#D_ZEXT:]] = llvm.zext %[[#D_BOOL]] : i1 to i8 %7 = cir.unary(not, %6) : !cir.bool, !cir.bool - // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(1 : i8) : i8 - // MLIR: = llvm.xor %[[#D_ZEXT]], %[[#D_ONE]] : i8 + // MLIR: %[[#D_ONE:]] = llvm.mlir.constant(true) : i1 + // MLIR: = llvm.xor %[[#D_BOOL]], %[[#D_ONE]] : i1 cir.return } @@ -60,10 +58,9 @@ module { // MLIR: %[[#INT:]] = llvm.load %{{.+}} : !llvm.ptr // MLIR: %[[#IZERO:]] = llvm.mlir.constant(0 : i32) : i32 // MLIR: %[[#ICMP:]] = llvm.icmp "ne" %[[#INT]], %[[#IZERO]] : i32 - // MLIR: %[[#IEXT:]] = llvm.zext %[[#ICMP]] : i1 to i8 - // MLIR: %[[#IONE:]] = llvm.mlir.constant(1 : i8) : i8 - // MLIR: %[[#IXOR:]] = llvm.xor %[[#IEXT]], %[[#IONE]] : i8 - // MLIR: = llvm.zext %[[#IXOR]] : i8 to i32 + // MLIR: %[[#IONE:]] = llvm.mlir.constant(true) : i1 + // MLIR: %[[#IXOR:]] = llvm.xor %[[#ICMP]], %[[#IONE]] : i1 + // MLIR: = llvm.zext %[[#IXOR]] : i1 to i32 %17 = cir.load %3 : !cir.ptr, !cir.float %18 = cir.cast(float_to_bool, %17 : !cir.float), !cir.bool @@ -72,10 +69,9 @@ module { // MLIR: %[[#FLOAT:]] = llvm.load %{{.+}} : !llvm.ptr // MLIR: %[[#FZERO:]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 // MLIR: %[[#FCMP:]] = llvm.fcmp "une" %[[#FLOAT]], %[[#FZERO]] : f32 - // MLIR: %[[#FEXT:]] = llvm.zext %[[#FCMP]] : i1 to i8 - // MLIR: %[[#FONE:]] = llvm.mlir.constant(1 : i8) : i8 - // MLIR: %[[#FXOR:]] = llvm.xor %[[#FEXT]], %[[#FONE]] : i8 - // MLIR: = llvm.zext %[[#FXOR]] : i8 to i32 + // MLIR: %[[#FONE:]] = llvm.mlir.constant(true) : i1 + // MLIR: %[[#FXOR:]] = llvm.xor %[[#FCMP]], %[[#FONE]] : i1 + // MLIR: = llvm.zext %[[#FXOR]] : i1 to i32 cir.return } diff --git a/clang/test/CIR/Lowering/unions.cir b/clang/test/CIR/Lowering/unions.cir index fe56e2af7527..445ef463ef2d 100644 --- a/clang/test/CIR/Lowering/unions.cir +++ b/clang/test/CIR/Lowering/unions.cir @@ -25,9 +25,10 @@ module { %5 = cir.const #true %6 = cir.get_member %arg0[0] {name = "b"} : !cir.ptr -> !cir.ptr cir.store %5, %6 : !cir.bool, !cir.ptr - // CHECK: %[[#VAL:]] = llvm.mlir.constant(1 : i8) : i8 + // CHECK: %[[#TRUE:]] = llvm.mlir.constant(true) : i1 // The bitcast it just to bypass the type checker. It will be replaced by an opaque pointer. // CHECK: %[[#ADDR:]] = llvm.bitcast %{{.+}} : !llvm.ptr + // CHECK: %[[#VAL:]] = llvm.zext %[[#TRUE]] : i1 to i8 // CHECK: llvm.store %[[#VAL]], %[[#ADDR]] {{.*}}: i8, !llvm.ptr // Should load direclty from the union's base address. From 2f71d042fc75a7449859cd25c376bbd9cad27b8c Mon Sep 17 00:00:00 2001 From: Chuanqi Xu Date: Mon, 6 Jan 2025 22:54:12 +0800 Subject: [PATCH 2190/2301] [CIR][CIRGen] Improve emission for array of unions (#1236) Close https://github.com/llvm/clangir/issues/1185 The patch itself seems slightly ad-hoc. As the issue tracked by https://github.com/llvm/clangir/issues/1178, the fundamental solution may be to introduce two type systems to solve the inconsistent semantics for Union between LLVM IR and CIR. This will be great to handle other inconsistent semantics between LLVM IR and CIR if any. Back to the patch itself, although the code looks not good initially to me too. But I feel it may be a good workaround since clang/test/CIR/Lowering/union-array.c is an example for array of unions directly and clang/test/CIR/Lowering/nested-union-array.c gives an example for array of unions indirectly (array of structs which contain unions). So I feel we've already covered all the cases. And generally it should be good to use some simple and solid workaround before we introduce the formal full solution. --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 48 ++++++++++++++++++++++- clang/test/CIR/CodeGen/union-array.c | 22 +++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/union-array.c diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index dbd78284349d..3eb8627084f4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1015,6 +1015,50 @@ class ConstExprEmitter return {}; } + auto desiredType = CGM.getTypes().ConvertType(T); + // FIXME(cir): A hack to handle the emission of arrays of unions directly. + // See clang/test/CIR/CodeGen/union-array.c and + // clang/test/CIR/Lowering/nested-union-array.c for example. The root + // cause of these problems is CIR handles union differently than LLVM IR. + // So we can't fix the problem fundamentally by mocking LLVM's handling for + // unions. In LLVM, the union is basically a struct with the largest member + // of the union and consumers cast the union arbitrarily according to their + // needs. But in CIR, we tried to express union semantics properly. This is + // a fundamental difference. + // + // Concretely, for the problem here, if we're constructing the initializer + // for the array of unions, we can't even assume the type of the elements in + // the initializer are the same! It is odd that we can have an array with + // different element types. Here we just pretend it is fine by checking if + // we're constructing an array for an array of unions. If we didn't do so, + // we may meet problems during lowering to LLVM. To solve the problem, we + // may need to introduce 2 type systems for CIR: one for the CIR itself and + // one for lowering. e.g., we can compare the type of CIR during CIRGen, + // analysis and transformations without worrying the concerns here. And + // lower to LLVM IR (or anyother dialects) with the proper type. + // + // (Although the idea to make CIR's type system self contained and generate + // LLVM's + // types in later passes look fine, it has engineering level concern that + // it will make the skeleton of CIRGen to be diverged from the traditional + // CodeGen.) + // + // Besides union, there are other differences between CIR and LLVM's type + // system. e.g., LLVM's pointer types are opaque while CIR has concrete + // pointer types. + bool isDesiredArrayOfUnionDirectly = [&]() { + auto desiredArrayType = dyn_cast(desiredType); + if (!desiredArrayType) + return false; + + auto elementStructType = + dyn_cast(desiredArrayType.getEltType()); + if (!elementStructType) + return false; + + return elementStructType.isUnion(); + }(); + // Emit initializer elements as MLIR attributes and check for common type. mlir::Type CommonElementType; for (unsigned i = 0; i != NumInitableElts; ++i) { @@ -1024,10 +1068,12 @@ class ConstExprEmitter return {}; if (i == 0) CommonElementType = C.getType(); + else if (isDesiredArrayOfUnionDirectly && + C.getType() != CommonElementType) + CommonElementType = nullptr; Elts.push_back(std::move(C)); } - auto desiredType = CGM.getTypes().ConvertType(T); auto typedFiller = llvm::dyn_cast_or_null(Filler); if (Filler && !typedFiller) llvm_unreachable("We shouldn't be receiving untyped attrs here"); diff --git a/clang/test/CIR/CodeGen/union-array.c b/clang/test/CIR/CodeGen/union-array.c new file mode 100644 index 000000000000..8ac8264b159d --- /dev/null +++ b/clang/test/CIR/CodeGen/union-array.c @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -fno-clangir-call-conv-lowering %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM + +typedef struct { + char a; +} S_1; + +typedef struct { + long a, b; +} S_2; + +typedef union { + S_1 a; + S_2 b; +} U; + +void foo() { U arr[2] = {{.b = {1, 2}}, {.a = {1}}}; } + +// CIR: cir.const #cir.const_struct<{#cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s64i, #cir.int<2> : !s64i}> : {{.*}}}> : {{.*}}, #cir.const_struct<{#cir.const_struct<{#cir.int<1> : !s8i}> : {{.*}}, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array}> +// LLVM: store { { %struct.S_2 }, { %struct.S_1, [15 x i8] } } { { %struct.S_2 } { %struct.S_2 { i64 1, i64 2 } }, { %struct.S_1, [15 x i8] } { %struct.S_1 { i8 1 }, [15 x i8] zeroinitializer } } From 380ba1c98fb7f5f8b4795024a158cd637ec02e88 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Mon, 6 Jan 2025 16:08:13 +0100 Subject: [PATCH 2191/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vsetq_lane_f64 (#1251) Lowering Neon `vsetq_lane_f64` References: [Clang CGBuiltin Implementation](https://github.com/llvm/clangir/blob/2b1a638ea07ca10c5727ea835bfbe17b881175cc/clang/lib/CodeGen/CGBuiltin.cpp#L12348) [Builtin definition](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64) --- clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 10 +++++++--- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 12 ++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 9a3a8bac3dd0..a715cb973abf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3691,9 +3691,13 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vset_lane_f64: // The vector type needs a cast for the v1f64 variant. llvm_unreachable("NEON::BI__builtin_neon_vset_lane_f64 NYI"); - case NEON::BI__builtin_neon_vsetq_lane_f64: - // The vector type needs a cast for the v2f64 variant. - llvm_unreachable("NEON::BI__builtin_neon_vsetq_lane_f64 NYI"); + case NEON::BI__builtin_neon_vsetq_lane_f64: { + Ops.push_back(emitScalarExpr(E->getArg(2))); + Ops[1] = builder.createBitcast( + Ops[1], cir::VectorType::get(&getMLIRContext(), DoubleTy, 2)); + return builder.create(getLoc(E->getExprLoc()), Ops[1], + Ops[0], Ops[2]); + } case NEON::BI__builtin_neon_vget_lane_i8: case NEON::BI__builtin_neon_vdupb_lane_i8: diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index c0935a81b835..da589a44f00f 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -137,6 +137,18 @@ float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) { // LLVM: [[INTRN_RES:%.*]] = insertelement <4 x float> [[B]], float [[A]], i32 3 // LLVM: ret <4 x float> [[INTRN_RES]] +float64x2_t test_vsetq_land_f64(float64_t a, float64x2_t b) { + return vsetq_lane_f64(a, b, 0); +} + +// CIR-LABEL: test_vsetq_land_f64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vsetq_land_f64(double{{.*}}[[A:%.*]], <2 x double>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <2 x double> [[B]], double [[A]], i32 0 +// LLVM: ret <2 x double> [[INTRN_RES]] + uint8_t test_vget_lane_u8(uint8x8_t a) { return vget_lane_u8(a, 7); } From 06d1ac4eb88b57261c29a83f4b4d525e95b891d0 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Mon, 6 Jan 2025 16:10:46 +0100 Subject: [PATCH 2192/2301] [CIR][CIRGen][Builtin][Neon] Lower `neon_vaddd_s64` and `neon_vaddd_u64` (#1255) Lowering Neon `vaddd_s64` and `vaddd_u64` - [Clang CGBuiltin Implementation](https://github.com/llvm/clangir/blob/2b1a638ea07ca10c5727ea835bfbe17b881175cc/clang/lib/CodeGen/CGBuiltin.cpp#L12460-L12462) - [vaddd_s64 Builtin definition](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64) - [vaddd_u64 Builtin definition](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 2 +- clang/test/CIR/CodeGen/AArch64/neon.c | 34 ++++++++++++------- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index a715cb973abf..5806fa4c7c4d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3787,7 +3787,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vaddd_s64: case NEON::BI__builtin_neon_vaddd_u64: - llvm_unreachable("NEON::BI__builtin_neon_vaddd_u64 NYI"); + return builder.createAdd(Ops[0], emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vsubd_s64: case NEON::BI__builtin_neon_vsubd_u64: llvm_unreachable("NEON::BI__builtin_neon_vsubd_u64 NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 1abc935f95b0..f71a44400179 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -9881,19 +9881,29 @@ poly16x8_t test_vmull_p8(poly8x8_t a, poly8x8_t b) { // return vmull_high_p8(a, b); // } -// NYI-LABEL: @test_vaddd_s64( -// NYI: [[VADDD_I:%.*]] = add i64 %a, %b -// NYI: ret i64 [[VADDD_I]] -// int64_t test_vaddd_s64(int64_t a, int64_t b) { -// return vaddd_s64(a, b); -// } +int64_t test_vaddd_s64(int64_t a, int64_t b) { + return vaddd_s64(a, b); -// NYI-LABEL: @test_vaddd_u64( -// NYI: [[VADDD_I:%.*]] = add i64 %a, %b -// NYI: ret i64 [[VADDD_I]] -// uint64_t test_vaddd_u64(uint64_t a, uint64_t b) { -// return vaddd_u64(a, b); -// } + // CIR-LABEL: vaddd_s64 + // CIR: {{%.*}} = cir.binop(add, {{%.*}}, {{%.*}}) : !s64i + + // LLVM-LABEL: @test_vaddd_s64 + // LLVM-SAME: (i64 [[a:%.]], i64 [[b:%.]]) + // LLVM: [[VADDD_I:%.*]] = add i64 [[a]], [[b]] + // LLVM: ret i64 [[VADDD_I]] +} + +uint64_t test_vaddd_u64(uint64_t a, uint64_t b) { + return vaddd_u64(a, b); + + // CIR-LABEL: vaddd_u64 + // CIR: {{%.*}} = cir.binop(add, {{%.*}}, {{%.*}}) : !u64i + + // LLVM-LABEL: @test_vaddd_u64 + // LLVM-SAME: (i64 [[a:%.]], i64 [[b:%.]]) + // LLVM: [[VADDD_I:%.*]] = add i64 [[a]], [[b]] + // LLVM: ret i64 [[VADDD_I]] +} // NYI-LABEL: @test_vsubd_s64( // NYI: [[VSUBD_I:%.*]] = sub i64 %a, %b From bac4b5c738532d6aa87bc9594d4141c7b6585739 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Mon, 6 Jan 2025 16:11:06 +0100 Subject: [PATCH 2193/2301] [CIR][CIRGen][Builtin][Neon] Lower `neon_vsubd_s64` and `neon_vsubd_u64` (#1256) Lowering `neon_vsubd_s64` and `neon_vsubd_u64` - [Clang CGBuiltin Implementation](https://github.com/llvm/clangir/blob/2b1a638ea07ca10c5727ea835bfbe17b881175cc/clang/lib/CodeGen/CGBuiltin.cpp#L12463-L12465) - [vaddd_s64 Builtin definition](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64) - [vaddd_u64 Builtin definition](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 2 +- clang/test/CIR/CodeGen/AArch64/neon.c | 34 ++++++++++++------- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 5806fa4c7c4d..1e18b42b3b07 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3790,7 +3790,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, return builder.createAdd(Ops[0], emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vsubd_s64: case NEON::BI__builtin_neon_vsubd_u64: - llvm_unreachable("NEON::BI__builtin_neon_vsubd_u64 NYI"); + return builder.createSub(Ops[0], emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vqdmlalh_s16: case NEON::BI__builtin_neon_vqdmlslh_s16: { llvm_unreachable("NEON::BI__builtin_neon_vqdmlslh_s16 NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index f71a44400179..e28de102a04a 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -9905,19 +9905,29 @@ uint64_t test_vaddd_u64(uint64_t a, uint64_t b) { // LLVM: ret i64 [[VADDD_I]] } -// NYI-LABEL: @test_vsubd_s64( -// NYI: [[VSUBD_I:%.*]] = sub i64 %a, %b -// NYI: ret i64 [[VSUBD_I]] -// int64_t test_vsubd_s64(int64_t a, int64_t b) { -// return vsubd_s64(a, b); -// } +int64_t test_vsubd_s64(int64_t a, int64_t b) { + return vsubd_s64(a, b); -// NYI-LABEL: @test_vsubd_u64( -// NYI: [[VSUBD_I:%.*]] = sub i64 %a, %b -// NYI: ret i64 [[VSUBD_I]] -// uint64_t test_vsubd_u64(uint64_t a, uint64_t b) { -// return vsubd_u64(a, b); -// } + // CIR-LABEL: vsubd_s64 + // CIR: {{%.*}} = cir.binop(sub, {{%.*}}, {{%.*}}) : !s64i + + // LLVM-LABEL: @test_vsubd_s64 + // LLVM-SAME: (i64 [[a:%.]], i64 [[b:%.]]) + // LLVM: [[VSUBD_I:%.*]] = sub i64 [[a]], [[b]] + // LLVM: ret i64 [[VSUBD_I]] +} + +uint64_t test_vsubd_u64(uint64_t a, uint64_t b) { + return vsubd_u64(a, b); + + // CIR-LABEL: vsubd_u64 + // CIR: {{%.*}} = cir.binop(sub, {{%.*}}, {{%.*}}) : !u64i + + // LLVM-LABEL: @test_vsubd_u64 + // LLVM-SAME: (i64 [[a:%.]], i64 [[b:%.]]) + // LLVM: [[VSUBD_I:%.*]] = sub i64 [[a]], [[b]] + // LLVM: ret i64 [[VSUBD_I]] +} // NYI-LABEL: @test_vqaddb_s8( // NYI: [[TMP0:%.*]] = insertelement <8 x i8> poison, i8 %a, i64 0 From aee90fa0bc1bfa9518e0805f7e7004b989a19dfc Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Mon, 6 Jan 2025 16:11:36 +0100 Subject: [PATCH 2194/2301] [CIR][CIRGen][Builtin][Neon] Lower `vaddh_f16`, `vsubh_f16`, `vmulh_f16` and `vdivh_f16` (#1258) Lowering: - `vaddh_f16` - `vsubh_f16` - `vmulh_f16` - `vdivh_f16` --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 8 + .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 24 +- clang/test/CIR/CodeGen/AArch64/neon-fp16.c | 689 ++++++++++++++++++ 3 files changed, 713 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/AArch64/neon-fp16.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 28be733f62d7..46b001531795 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -675,6 +675,14 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { assert(!cir::MissingFeatures::foldBinOpFMF()); return create(lhs.getLoc(), cir::BinOpKind::Mul, lhs, rhs); } + mlir::Value createFDiv(mlir::Value lhs, mlir::Value rhs) { + assert(!cir::MissingFeatures::metaDataNode()); + if (IsFPConstrained) + llvm_unreachable("Constrained FP NYI"); + + assert(!cir::MissingFeatures::foldBinOpFMF()); + return create(lhs.getLoc(), cir::BinOpKind::Div, lhs, rhs); + } mlir::Value createDynCast(mlir::Location loc, mlir::Value src, cir::PointerType destType, bool isRefCast, diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 1e18b42b3b07..6620645d878d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3771,14 +3771,22 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, Ops[0], cir::VectorType::get(&getMLIRContext(), DoubleTy, 2)); return builder.create(getLoc(E->getExprLoc()), Ops[0], emitScalarExpr(E->getArg(1))); - case NEON::BI__builtin_neon_vaddh_f16: - llvm_unreachable("NEON::BI__builtin_neon_vaddh_f16 NYI"); - case NEON::BI__builtin_neon_vsubh_f16: - llvm_unreachable("NEON::BI__builtin_neon_vsubh_f16 NYI"); - case NEON::BI__builtin_neon_vmulh_f16: - llvm_unreachable("NEON::BI__builtin_neon_vmulh_f16 NYI"); - case NEON::BI__builtin_neon_vdivh_f16: - llvm_unreachable("NEON::BI__builtin_neon_vdivh_f16 NYI"); + case NEON::BI__builtin_neon_vaddh_f16: { + Ops.push_back(emitScalarExpr(E->getArg(1))); + return builder.createFAdd(Ops[0], Ops[1]); + } + case NEON::BI__builtin_neon_vsubh_f16: { + Ops.push_back(emitScalarExpr(E->getArg(1))); + return builder.createFSub(Ops[0], Ops[1]); + } + case NEON::BI__builtin_neon_vmulh_f16: { + Ops.push_back(emitScalarExpr(E->getArg(1))); + return builder.createFMul(Ops[0], Ops[1]); + } + case NEON::BI__builtin_neon_vdivh_f16: { + Ops.push_back(emitScalarExpr(E->getArg(1))); + return builder.createFDiv(Ops[0], Ops[1]); + } case NEON::BI__builtin_neon_vfmah_f16: // NEON intrinsic puts accumulator first, unlike the LLVM fma. llvm_unreachable("NEON::BI__builtin_neon_vfmah_f16 NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon-fp16.c b/clang/test/CIR/CodeGen/AArch64/neon-fp16.c new file mode 100644 index 000000000000..3d3d4c439f43 --- /dev/null +++ b/clang/test/CIR/CodeGen/AArch64/neon-fp16.c @@ -0,0 +1,689 @@ +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-llvm -fno-clangir-call-conv-lowering -o - %s \ +// RUN: | opt -S -passes=mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// REQUIRES: aarch64-registered-target || arm-registered-target + +// This test mimics clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c, which eventually +// CIR shall be able to support fully. Since this is going to take some time to converge, +// the unsupported/NYI code is commented out, so that we can incrementally improve this. +// The NYI filecheck used contains the LLVM output from OG codegen that should guide the +// correct result when implementing this into the CIR pipeline. + +#include + +// NYI-LABEL: test_vabsh_f16 +// NYI: [[ABS:%.*]] = call half @llvm.fabs.f16(half %a) +// NYI: ret half [[ABS]] +// float16_t test_vabsh_f16(float16_t a) { +// return vabsh_f16(a); +// } + +// NYI-LABEL: test_vceqzh_f16 +// NYI: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000 +// NYI: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vceqzh_f16(float16_t a) { +// return vceqzh_f16(a); +// } + +// NYI-LABEL: test_vcgezh_f16 +// NYI: [[TMP1:%.*]] = fcmp oge half %a, 0xH0000 +// NYI: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vcgezh_f16(float16_t a) { +// return vcgezh_f16(a); +// } + +// NYI-LABEL: test_vcgtzh_f16 +// NYI: [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000 +// NYI: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vcgtzh_f16(float16_t a) { +// return vcgtzh_f16(a); +// } + +// NYI-LABEL: test_vclezh_f16 +// NYI: [[TMP1:%.*]] = fcmp ole half %a, 0xH0000 +// NYI: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vclezh_f16(float16_t a) { +// return vclezh_f16(a); +// } + +// NYI-LABEL: test_vcltzh_f16 +// NYI: [[TMP1:%.*]] = fcmp olt half %a, 0xH0000 +// NYI: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vcltzh_f16(float16_t a) { +// return vcltzh_f16(a); +// } + +// NYI-LABEL: test_vcvth_f16_s16 +// NYI: [[VCVT:%.*]] = sitofp i16 %a to half +// NYI: ret half [[VCVT]] +// float16_t test_vcvth_f16_s16 (int16_t a) { +// return vcvth_f16_s16(a); +// } + +// NYI-LABEL: test_vcvth_f16_s32 +// NYI: [[VCVT:%.*]] = sitofp i32 %a to half +// NYI: ret half [[VCVT]] +// float16_t test_vcvth_f16_s32 (int32_t a) { +// return vcvth_f16_s32(a); +// } + +// NYI-LABEL: test_vcvth_f16_s64 +// NYI: [[VCVT:%.*]] = sitofp i64 %a to half +// NYI: ret half [[VCVT]] +// float16_t test_vcvth_f16_s64 (int64_t a) { +// return vcvth_f16_s64(a); +// } + +// NYI-LABEL: test_vcvth_f16_u16 +// NYI: [[VCVT:%.*]] = uitofp i16 %a to half +// NYI: ret half [[VCVT]] +// float16_t test_vcvth_f16_u16 (uint16_t a) { +// return vcvth_f16_u16(a); +// } + +// NYI-LABEL: test_vcvth_f16_u32 +// NYI: [[VCVT:%.*]] = uitofp i32 %a to half +// NYI: ret half [[VCVT]] +// float16_t test_vcvth_f16_u32 (uint32_t a) { +// return vcvth_f16_u32(a); +// } + +// NYI-LABEL: test_vcvth_f16_u64 +// NYI: [[VCVT:%.*]] = uitofp i64 %a to half +// NYI: ret half [[VCVT]] +// float16_t test_vcvth_f16_u64 (uint64_t a) { +// return vcvth_f16_u64(a); +// } + +// NYI-LABEL: test_vcvth_s16_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzs.i32.f16(half %a) +// NYI: [[TRUNC:%.*]] = trunc i32 [[VCVT]] to i16 +// NYI: ret i16 [[TRUNC]] +// int16_t test_vcvth_s16_f16 (float16_t a) { +// return vcvth_s16_f16(a); +// } + +// NYI-LABEL: test_vcvth_s32_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzs.i32.f16(half %a) +// NYI: ret i32 [[VCVT]] +// int32_t test_vcvth_s32_f16 (float16_t a) { +// return vcvth_s32_f16(a); +// } + +// NYI-LABEL: test_vcvth_s64_f16 +// NYI: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtzs.i64.f16(half %a) +// NYI: ret i64 [[VCVT]] +// int64_t test_vcvth_s64_f16 (float16_t a) { +// return vcvth_s64_f16(a); +// } + +// NYI-LABEL: test_vcvth_u16_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzu.i32.f16(half %a) +// NYI: [[TRUNC:%.*]] = trunc i32 [[VCVT]] to i16 +// NYI: ret i16 [[TRUNC]] +// uint16_t test_vcvth_u16_f16 (float16_t a) { +// return vcvth_u16_f16(a); +// } + +// NYI-LABEL: test_vcvth_u32_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzu.i32.f16(half %a) +// NYI: ret i32 [[VCVT]] +// uint32_t test_vcvth_u32_f16 (float16_t a) { +// return vcvth_u32_f16(a); +// } + +// NYI-LABEL: test_vcvth_u64_f16 +// NYI: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtzu.i64.f16(half %a) +// NYI: ret i64 [[VCVT]] +// uint64_t test_vcvth_u64_f16 (float16_t a) { +// return vcvth_u64_f16(a); +// } + +// NYI-LABEL: test_vcvtah_s16_f16 +// NYI: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtas.i32.f16(half %a) +// NYI: [[RET:%.*]] = trunc i32 [[FCVT]] to i16 +// NYI: ret i16 [[RET]] +// int16_t test_vcvtah_s16_f16 (float16_t a) { +// return vcvtah_s16_f16(a); +// } + +// NYI-LABEL: test_vcvtah_s32_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtas.i32.f16(half %a) +// NYI: ret i32 [[VCVT]] +// int32_t test_vcvtah_s32_f16 (float16_t a) { +// return vcvtah_s32_f16(a); +// } + +// NYI-LABEL: test_vcvtah_s64_f16 +// NYI: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtas.i64.f16(half %a) +// NYI: ret i64 [[VCVT]] +// int64_t test_vcvtah_s64_f16 (float16_t a) { +// return vcvtah_s64_f16(a); +// } + +// NYI-LABEL: test_vcvtah_u16_f16 +// NYI: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtau.i32.f16(half %a) +// NYI: [[RET:%.*]] = trunc i32 [[FCVT]] to i16 +// NYI: ret i16 [[RET]] +// uint16_t test_vcvtah_u16_f16 (float16_t a) { +// return vcvtah_u16_f16(a); +// } + +// NYI-LABEL: test_vcvtah_u32_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtau.i32.f16(half %a) +// NYI: ret i32 [[VCVT]] +// uint32_t test_vcvtah_u32_f16 (float16_t a) { +// return vcvtah_u32_f16(a); +// } + +// NYI-LABEL: test_vcvtah_u64_f16 +// NYI: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtau.i64.f16(half %a) +// NYI: ret i64 [[VCVT]] +// uint64_t test_vcvtah_u64_f16 (float16_t a) { +// return vcvtah_u64_f16(a); +// } + +// NYI-LABEL: test_vcvtmh_s16_f16 +// NYI: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtms.i32.f16(half %a) +// NYI: [[RET:%.*]] = trunc i32 [[FCVT]] to i16 +// NYI: ret i16 [[RET]] +// int16_t test_vcvtmh_s16_f16 (float16_t a) { +// return vcvtmh_s16_f16(a); +// } + +// NYI-LABEL: test_vcvtmh_s32_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtms.i32.f16(half %a) +// NYI: ret i32 [[VCVT]] +// int32_t test_vcvtmh_s32_f16 (float16_t a) { +// return vcvtmh_s32_f16(a); +// } + +// NYI-LABEL: test_vcvtmh_s64_f16 +// NYI: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtms.i64.f16(half %a) +// NYI: ret i64 [[VCVT]] +// int64_t test_vcvtmh_s64_f16 (float16_t a) { +// return vcvtmh_s64_f16(a); +// } + +// NYI-LABEL: test_vcvtmh_u16_f16 +// NYI: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtmu.i32.f16(half %a) +// NYI: [[RET:%.*]] = trunc i32 [[FCVT]] to i16 +// NYI: ret i16 [[RET]] +// uint16_t test_vcvtmh_u16_f16 (float16_t a) { +// return vcvtmh_u16_f16(a); +// } + +// NYI-LABEL: test_vcvtmh_u32_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtmu.i32.f16(half %a) +// NYI: ret i32 [[VCVT]] +// uint32_t test_vcvtmh_u32_f16 (float16_t a) { +// return vcvtmh_u32_f16(a); +// } + +// NYI-LABEL: test_vcvtmh_u64_f16 +// NYI: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtmu.i64.f16(half %a) +// NYI: ret i64 [[VCVT]] +// uint64_t test_vcvtmh_u64_f16 (float16_t a) { +// return vcvtmh_u64_f16(a); +// } + +// NYI-LABEL: test_vcvtnh_s16_f16 +// NYI: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtns.i32.f16(half %a) +// NYI: [[RET:%.*]] = trunc i32 [[FCVT]] to i16 +// NYI: ret i16 [[RET]] +// int16_t test_vcvtnh_s16_f16 (float16_t a) { +// return vcvtnh_s16_f16(a); +// } + +// NYI-LABEL: test_vcvtnh_s32_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtns.i32.f16(half %a) +// NYI: ret i32 [[VCVT]] +// int32_t test_vcvtnh_s32_f16 (float16_t a) { +// return vcvtnh_s32_f16(a); +// } + +// NYI-LABEL: test_vcvtnh_s64_f16 +// NYI: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtns.i64.f16(half %a) +// NYI: ret i64 [[VCVT]] +// int64_t test_vcvtnh_s64_f16 (float16_t a) { +// return vcvtnh_s64_f16(a); +// } + +// NYI-LABEL: test_vcvtnh_u16_f16 +// NYI: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtnu.i32.f16(half %a) +// NYI: [[RET:%.*]] = trunc i32 [[FCVT]] to i16 +// NYI: ret i16 [[RET]] +// uint16_t test_vcvtnh_u16_f16 (float16_t a) { +// return vcvtnh_u16_f16(a); +// } + +// NYI-LABEL: test_vcvtnh_u32_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtnu.i32.f16(half %a) +// NYI: ret i32 [[VCVT]] +// uint32_t test_vcvtnh_u32_f16 (float16_t a) { +// return vcvtnh_u32_f16(a); +// } + +// NYI-LABEL: test_vcvtnh_u64_f16 +// NYI: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtnu.i64.f16(half %a) +// NYI: ret i64 [[VCVT]] +// uint64_t test_vcvtnh_u64_f16 (float16_t a) { +// return vcvtnh_u64_f16(a); +// } + +// NYI-LABEL: test_vcvtph_s16_f16 +// NYI: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtps.i32.f16(half %a) +// NYI: [[RET:%.*]] = trunc i32 [[FCVT]] to i16 +// NYI: ret i16 [[RET]] +// int16_t test_vcvtph_s16_f16 (float16_t a) { +// return vcvtph_s16_f16(a); +// } + +// NYI-LABEL: test_vcvtph_s32_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtps.i32.f16(half %a) +// NYI: ret i32 [[VCVT]] +// int32_t test_vcvtph_s32_f16 (float16_t a) { +// return vcvtph_s32_f16(a); +// } + +// NYI-LABEL: test_vcvtph_s64_f16 +// NYI: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtps.i64.f16(half %a) +// NYI: ret i64 [[VCVT]] +// int64_t test_vcvtph_s64_f16 (float16_t a) { +// return vcvtph_s64_f16(a); +// } + +// NYI-LABEL: test_vcvtph_u16_f16 +// NYI: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half %a) +// NYI: [[RET:%.*]] = trunc i32 [[FCVT]] to i16 +// NYI: ret i16 [[RET]] +// uint16_t test_vcvtph_u16_f16 (float16_t a) { +// return vcvtph_u16_f16(a); +// } + +// NYI-LABEL: test_vcvtph_u32_f16 +// NYI: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half %a) +// NYI: ret i32 [[VCVT]] +// uint32_t test_vcvtph_u32_f16 (float16_t a) { +// return vcvtph_u32_f16(a); +// } + +// NYI-LABEL: test_vcvtph_u64_f16 +// NYI: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtpu.i64.f16(half %a) +// NYI: ret i64 [[VCVT]] +// uint64_t test_vcvtph_u64_f16 (float16_t a) { +// return vcvtph_u64_f16(a); +// } + +// NYI-LABEL: test_vnegh_f16 +// NYI: [[NEG:%.*]] = fneg half %a +// NYI: ret half [[NEG]] +// float16_t test_vnegh_f16(float16_t a) { +// return vnegh_f16(a); +// } + +// NYI-LABEL: test_vrecpeh_f16 +// NYI: [[VREC:%.*]] = call half @llvm.aarch64.neon.frecpe.f16(half %a) +// NYI: ret half [[VREC]] +// float16_t test_vrecpeh_f16(float16_t a) { +// return vrecpeh_f16(a); +// } + +// NYI-LABEL: test_vrecpxh_f16 +// NYI: [[VREC:%.*]] = call half @llvm.aarch64.neon.frecpx.f16(half %a) +// NYI: ret half [[VREC]] +// float16_t test_vrecpxh_f16(float16_t a) { +// return vrecpxh_f16(a); +// } + +// NYI-LABEL: test_vrndh_f16 +// NYI: [[RND:%.*]] = call half @llvm.trunc.f16(half %a) +// NYI: ret half [[RND]] +// float16_t test_vrndh_f16(float16_t a) { +// return vrndh_f16(a); +// } + +// NYI-LABEL: test_vrndah_f16 +// NYI: [[RND:%.*]] = call half @llvm.round.f16(half %a) +// NYI: ret half [[RND]] +// float16_t test_vrndah_f16(float16_t a) { +// return vrndah_f16(a); +// } + +// NYI-LABEL: test_vrndih_f16 +// NYI: [[RND:%.*]] = call half @llvm.nearbyint.f16(half %a) +// NYI: ret half [[RND]] +// float16_t test_vrndih_f16(float16_t a) { +// return vrndih_f16(a); +// } + +// NYI-LABEL: test_vrndmh_f16 +// NYI: [[RND:%.*]] = call half @llvm.floor.f16(half %a) +// NYI: ret half [[RND]] +// float16_t test_vrndmh_f16(float16_t a) { +// return vrndmh_f16(a); +// } + +// NYI-LABEL: test_vrndnh_f16 +// NYI: [[RND:%.*]] = call half @llvm.roundeven.f16(half %a) +// NYI: ret half [[RND]] +// float16_t test_vrndnh_f16(float16_t a) { +// return vrndnh_f16(a); +// } + +// NYI-LABEL: test_vrndph_f16 +// NYI: [[RND:%.*]] = call half @llvm.ceil.f16(half %a) +// NYI: ret half [[RND]] +// float16_t test_vrndph_f16(float16_t a) { +// return vrndph_f16(a); +// } + +// NYI-LABEL: test_vrndxh_f16 +// NYI: [[RND:%.*]] = call half @llvm.rint.f16(half %a) +// NYI: ret half [[RND]] +// float16_t test_vrndxh_f16(float16_t a) { +// return vrndxh_f16(a); +// } + +// NYI-LABEL: test_vrsqrteh_f16 +// NYI: [[RND:%.*]] = call half @llvm.aarch64.neon.frsqrte.f16(half %a) +// NYI: ret half [[RND]] +// float16_t test_vrsqrteh_f16(float16_t a) { +// return vrsqrteh_f16(a); +// } + +// NYI-LABEL: test_vsqrth_f16 +// NYI: [[SQR:%.*]] = call half @llvm.sqrt.f16(half %a) +// NYI: ret half [[SQR]] +// float16_t test_vsqrth_f16(float16_t a) { +// return vsqrth_f16(a); +// } + +// CIR-LABEL: vaddh_f16 +// CIR: {{%.*}} = cir.binop(add, {{%.*}}, {{%.*}}) : !cir.f16 +// +// LLVM-LABEL: test_vaddh_f16 +// LLVM-SAME: (half [[a:%.]], half [[b:%.]]) +// LLVM: [[ADD:%.*]] = fadd half [[a]], [[b]] +// LLVM: ret half [[ADD]] +float16_t test_vaddh_f16(float16_t a, float16_t b) { + return vaddh_f16(a, b); +} + +// NYI-LABEL: test_vabdh_f16 +// NYI: [[ABD:%.*]] = call half @llvm.aarch64.sisd.fabd.f16(half %a, half %b) +// NYI: ret half [[ABD]] +// float16_t test_vabdh_f16(float16_t a, float16_t b) { +// return vabdh_f16(a, b); +// } + +// NYI-LABEL: test_vcageh_f16 +// NYI: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facge.i32.f16(half %a, half %b) +// NYI: [[RET:%.*]] = trunc i32 [[FACG]] to i16 +// NYI: ret i16 [[RET]] +// uint16_t test_vcageh_f16(float16_t a, float16_t b) { +// return vcageh_f16(a, b); +// } + +// NYI-LABEL: test_vcagth_f16 +// NYI: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facgt.i32.f16(half %a, half %b) +// NYI: [[RET:%.*]] = trunc i32 [[FACG]] to i16 +// NYI: ret i16 [[RET]] +// uint16_t test_vcagth_f16(float16_t a, float16_t b) { +// return vcagth_f16(a, b); +// } + +// NYI-LABEL: test_vcaleh_f16 +// NYI: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facge.i32.f16(half %b, half %a) +// NYI: [[RET:%.*]] = trunc i32 [[FACG]] to i16 +// NYI: ret i16 [[RET]] +// uint16_t test_vcaleh_f16(float16_t a, float16_t b) { +// return vcaleh_f16(a, b); +// } + +// NYI-LABEL: test_vcalth_f16 +// NYI: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facgt.i32.f16(half %b, half %a) +// NYI: [[RET:%.*]] = trunc i32 [[FACG]] to i16 +// NYI: ret i16 [[RET]] +// uint16_t test_vcalth_f16(float16_t a, float16_t b) { +// return vcalth_f16(a, b); +// } + +// NYI-LABEL: test_vceqh_f16 +// NYI: [[TMP1:%.*]] = fcmp oeq half %a, %b +// NYI: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vceqh_f16(float16_t a, float16_t b) { +// return vceqh_f16(a, b); +// } + +// NYI-LABEL: test_vcgeh_f16 +// NYI: [[TMP1:%.*]] = fcmp oge half %a, %b +// NYI: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vcgeh_f16(float16_t a, float16_t b) { +// return vcgeh_f16(a, b); +// } + +// NYI-LABEL: test_vcgth_f16 +//NYI: [[TMP1:%.*]] = fcmp ogt half %a, %b +// NYI: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vcgth_f16(float16_t a, float16_t b) { +// return vcgth_f16(a, b); +// } + +// NYI-LABEL: test_vcleh_f16 +// NYI: [[TMP1:%.*]] = fcmp ole half %a, %b +// NYI: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vcleh_f16(float16_t a, float16_t b) { +// return vcleh_f16(a, b); +// } + +// NYI-LABEL: test_vclth_f16 +// NYI: [[TMP1:%.*]] = fcmp olt half %a, %b +// NYI: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16 +// NYI: ret i16 [[TMP2]] +// uint16_t test_vclth_f16(float16_t a, float16_t b) { +// return vclth_f16(a, b); +// } + +// NYI-LABEL: test_vcvth_n_f16_s16 +// NYI: [[SEXT:%.*]] = sext i16 %a to i32 +// NYI: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 [[SEXT]], i32 1) +// NYI: ret half [[CVT]] +// float16_t test_vcvth_n_f16_s16(int16_t a) { +// return vcvth_n_f16_s16(a, 1); +// } + +// NYI-LABEL: test_vcvth_n_f16_s32 +// NYI: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 %a, i32 1) +// NYI: ret half [[CVT]] +// float16_t test_vcvth_n_f16_s32(int32_t a) { +// return vcvth_n_f16_s32(a, 1); +// } + +// NYI-LABEL: test_vcvth_n_f16_s64 +// NYI: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i64(i64 %a, i32 1) +// NYI: ret half [[CVT]] +// float16_t test_vcvth_n_f16_s64(int64_t a) { +// return vcvth_n_f16_s64(a, 1); +// } + +// NYI-LABEL: test_vcvth_n_s16_f16 +// NYI: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f16(half %a, i32 1) +// NYI: [[RET:%.*]] = trunc i32 [[CVT]] to i16 +// NYI: ret i16 [[RET]] +// int16_t test_vcvth_n_s16_f16(float16_t a) { +// return vcvth_n_s16_f16(a, 1); +// } + +// NYI-LABEL: test_vcvth_n_s32_f16 +// NYI: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f16(half %a, i32 1) +// NYI: ret i32 [[CVT]] +// int32_t test_vcvth_n_s32_f16(float16_t a) { +// return vcvth_n_s32_f16(a, 1); +// } + +// NYI-LABEL: test_vcvth_n_s64_f16 +// NYI: [[CVT:%.*]] = call i64 @llvm.aarch64.neon.vcvtfp2fxs.i64.f16(half %a, i32 1) +// NYI: ret i64 [[CVT]] +// int64_t test_vcvth_n_s64_f16(float16_t a) { +// return vcvth_n_s64_f16(a, 1); +// } + +// NYI-LABEL: test_vcvth_n_f16_u16 +// NYI: [[SEXT:%.*]] = zext i16 %a to i32 +// NYI: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 [[SEXT]], i32 1) +// NYI: ret half [[CVT]] +// float16_t test_vcvth_n_f16_u16(int16_t a) { +// return vcvth_n_f16_u16(a, 1); +// } + +// NYI-LABEL: test_vcvth_n_f16_u32 +// NYI: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 %a, i32 1) +// NYI: ret half [[CVT]] +// float16_t test_vcvth_n_f16_u32(int32_t a) { +// return vcvth_n_f16_u32(a, 1); +// } + +// NYI-LABEL: test_vcvth_n_f16_u64 +// NYI: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i64(i64 %a, i32 1) +// NYI: ret half [[CVT]] +// float16_t test_vcvth_n_f16_u64(int64_t a) { +// return vcvth_n_f16_u64(a, 1); +// } + +// NYI-LABEL: test_vcvth_n_u16_f16 +// NYI: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half %a, i32 1) +// NYI: [[RET:%.*]] = trunc i32 [[CVT]] to i16 +// NYI: ret i16 [[RET]] +// int16_t test_vcvth_n_u16_f16(float16_t a) { +// return vcvth_n_u16_f16(a, 1); +// } + +// NYI-LABEL: test_vcvth_n_u32_f16 +// NYI: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half %a, i32 1) +// NYI: ret i32 [[CVT]] +// int32_t test_vcvth_n_u32_f16(float16_t a) { +// return vcvth_n_u32_f16(a, 1); +// } + +// NYI-LABEL: test_vcvth_n_u64_f16 +// NYI: [[CVT:%.*]] = call i64 @llvm.aarch64.neon.vcvtfp2fxu.i64.f16(half %a, i32 1) +// NYI: ret i64 [[CVT]] +// int64_t test_vcvth_n_u64_f16(float16_t a) { +// return vcvth_n_u64_f16(a, 1); +// } + +// CIR-LABEL: vdivh_f16 +// CIR: {{%.*}} = cir.binop(div, {{%.*}}, {{%.*}}) : !cir.f16 +// +// LLVM-LABEL: test_vdivh_f16 +// LLVM-SAME: (half [[a:%.]], half [[b:%.]]) +// LLVM: [[DIV:%.*]] = fdiv half [[a]], [[b]] +// LLVM: ret half [[DIV]] +float16_t test_vdivh_f16(float16_t a, float16_t b) { + return vdivh_f16(a, b); +} + +// NYI-LABEL: test_vmaxh_f16 +// NYI: [[MAX:%.*]] = call half @llvm.aarch64.neon.fmax.f16(half %a, half %b) +// NYI: ret half [[MAX]] +// float16_t test_vmaxh_f16(float16_t a, float16_t b) { +// return vmaxh_f16(a, b); +// } + +// NYI-LABEL: test_vmaxnmh_f16 +// NYI: [[MAX:%.*]] = call half @llvm.aarch64.neon.fmaxnm.f16(half %a, half %b) +// NYI: ret half [[MAX]] +// float16_t test_vmaxnmh_f16(float16_t a, float16_t b) { +// return vmaxnmh_f16(a, b); +// } + +// NYI-LABEL: test_vminh_f16 +// NYI: [[MIN:%.*]] = call half @llvm.aarch64.neon.fmin.f16(half %a, half %b) +// NYI: ret half [[MIN]] +// float16_t test_vminh_f16(float16_t a, float16_t b) { +// return vminh_f16(a, b); +// } + +// NYI-LABEL: test_vminnmh_f16 +// NYI: [[MIN:%.*]] = call half @llvm.aarch64.neon.fminnm.f16(half %a, half %b) +// NYI: ret half [[MIN]] +// float16_t test_vminnmh_f16(float16_t a, float16_t b) { +// return vminnmh_f16(a, b); +// } + +// CIR-LABEL: vmulh_f16 +// CIR: {{%.*}} = cir.binop(mul, {{%.*}}, {{%.*}}) : !cir.f16 +// +// LLVM-LABEL: test_vmulh_f16 +// LLVM-SAME: (half [[a:%.]], half [[b:%.]]) +// LLVM: [[MUL:%.*]] = fmul half [[a]], [[b]] +// LLVM: ret half [[MUL]] +float16_t test_vmulh_f16(float16_t a, float16_t b) { + return vmulh_f16(a, b); +} + +// NYI-LABEL: test_vmulxh_f16 +// NYI: [[MUL:%.*]] = call half @llvm.aarch64.neon.fmulx.f16(half %a, half %b) +// NYI: ret half [[MUL]] +// float16_t test_vmulxh_f16(float16_t a, float16_t b) { +// return vmulxh_f16(a, b); +// } + +// NYI-LABEL: test_vrecpsh_f16 +// NYI: [[RECPS:%.*]] = call half @llvm.aarch64.neon.frecps.f16(half %a, half %b) +// NYI: ret half [[RECPS]] +// float16_t test_vrecpsh_f16(float16_t a, float16_t b) { +// return vrecpsh_f16(a, b); +// } + +// NYI-LABEL: test_vrsqrtsh_f16 +// NYI: [[RSQRTS:%.*]] = call half @llvm.aarch64.neon.frsqrts.f16(half %a, half %b) +// NYI: ret half [[RSQRTS]] +// float16_t test_vrsqrtsh_f16(float16_t a, float16_t b) { +// return vrsqrtsh_f16(a, b); +// } + +// CIR-LABEL: vsubh_f16 +// CIR: {{%.*}} = cir.binop(sub, {{%.*}}, {{%.*}}) : !cir.f16 +// +// LLVM-LABEL: test_vsubh_f16 +// LLVM-SAME: (half [[a:%.]], half [[b:%.]]) +// LLVM: [[SUB:%.*]] = fsub half [[a]], [[b]] +// LLVM: ret half [[SUB]] +float16_t test_vsubh_f16(float16_t a, float16_t b) { + return vsubh_f16(a, b); +} + +// NYI-LABEL: test_vfmah_f16 +// NYI: [[FMA:%.*]] = call half @llvm.fma.f16(half %b, half %c, half %a) +// NYI: ret half [[FMA]] +// float16_t test_vfmah_f16(float16_t a, float16_t b, float16_t c) { +// return vfmah_f16(a, b, c); +// } + +// NYI-LABEL: test_vfmsh_f16 +// NYI: [[SUB:%.*]] = fneg half %b +// NYI: [[ADD:%.*]] = call half @llvm.fma.f16(half [[SUB]], half %c, half %a) +// NYI: ret half [[ADD]] +// float16_t test_vfmsh_f16(float16_t a, float16_t b, float16_t c) { +// return vfmsh_f16(a, b, c); +// } + From a40bbc528539de721a933279e45ef5206c3a1cb2 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Mon, 6 Jan 2025 16:11:57 +0100 Subject: [PATCH 2195/2301] [CIR][CIRGen][Builtin][Neon] Lower vshld_n_s64 and vshld_n_u64 (#1259) Lowering `vshld_n_s64` and `vshld_n_u64` --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 5 ++- clang/test/CIR/CodeGen/AArch64/neon.c | 32 ++++++++++++------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 6620645d878d..276dc80e454c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3820,7 +3820,10 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vshld_n_s64: case NEON::BI__builtin_neon_vshld_n_u64: { - llvm_unreachable("NEON::BI__builtin_neon_vshld_n_u64 NYI"); + std::optional amt = + E->getArg(1)->getIntegerConstantExpr(getContext()); + assert(amt && "Expected argument to be a constant"); + return builder.createShiftLeft(Ops[0], amt->getZExtValue()); } case NEON::BI__builtin_neon_vshrd_n_s64: { llvm_unreachable("NEON::BI__builtin_neon_vshrd_n_s64 NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index e28de102a04a..0e2b270c33d9 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -15232,12 +15232,16 @@ uint64x1_t test_vrsra_n_u64(uint64x1_t a, uint64x1_t b) { // LLVM: ret <1 x i64> [[TMP3]] } -// NYI-LABEL: @test_vshld_n_s64( -// NYI: [[SHLD_N:%.*]] = shl i64 %a, 1 -// NYI: ret i64 [[SHLD_N]] -// int64_t test_vshld_n_s64(int64_t a) { -// return (int64_t)vshld_n_s64(a, 1); -// } +int64_t test_vshld_n_s64(int64_t a) { + return (int64_t)vshld_n_s64(a, 1); + + // CIR-LABEL: vshld_n_s64 + // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !s64i, {{%.*}} : !s64i) -> !s64i + + // LLVM-LABEL: @test_vshld_n_s64( + // LLVM: [[SHLD_N:%.*]] = shl i64 %0, 1 + // LLVM: ret i64 [[SHLD_N]] +} // NYI-LABEL: @test_vshl_n_s64( // NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> @@ -15248,12 +15252,16 @@ uint64x1_t test_vrsra_n_u64(uint64x1_t a, uint64x1_t b) { // return vshl_n_s64(a, 1); // } -// NYI-LABEL: @test_vshld_n_u64( -// NYI: [[SHLD_N:%.*]] = shl i64 %a, 63 -// NYI: ret i64 [[SHLD_N]] -// uint64_t test_vshld_n_u64(uint64_t a) { -// return (uint64_t)vshld_n_u64(a, 63); -// } +uint64_t test_vshld_n_u64(uint64_t a) { + return (uint64_t)vshld_n_u64(a, 63); + + // CIR-LABEL: vshld_n_u64 + // CIR: {{%.*}} = cir.shift(left, {{%.*}} : !u64i, {{%.*}} : !u64i) -> !u64i + + // LLVM-LABEL: @test_vshld_n_u64( + // LLVM: [[SHLD_N:%.*]] = shl i64 %0, 63 + // LLVM: ret i64 [[SHLD_N]] +} // NYI-LABEL: @test_vshl_n_u64( // NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> From 541f20aabe47a1fcc910b5b5b7bb2ec3d2d5812a Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Mon, 6 Jan 2025 16:12:17 +0100 Subject: [PATCH 2196/2301] [CIR][CIRGen][Builtin][Neon] Lower vshrd_n_s64 (#1260) Lowering `vshrd_n_s64` --- clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 6 +++++- clang/test/CIR/CodeGen/AArch64/neon.c | 16 ++++++++++------ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 276dc80e454c..9cd4a601ee37 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3826,7 +3826,11 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, return builder.createShiftLeft(Ops[0], amt->getZExtValue()); } case NEON::BI__builtin_neon_vshrd_n_s64: { - llvm_unreachable("NEON::BI__builtin_neon_vshrd_n_s64 NYI"); + std::optional amt = + E->getArg(1)->getIntegerConstantExpr(getContext()); + assert(amt && "Expected argument to be a constant"); + uint64_t bits = std::min(static_cast(63), amt->getZExtValue()); + return builder.createShiftRight(Ops[0], bits); } case NEON::BI__builtin_neon_vshrd_n_u64: { llvm_unreachable("NEON::BI__builtin_neon_vshrd_n_u64 NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 0e2b270c33d9..2a7d120a0bbe 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -15078,12 +15078,16 @@ int16_t test_vqmovns_s32(int32_t a) { // return (uint64_t)vcaltd_f64(a, b); // } -// NYI-LABEL: @test_vshrd_n_s64( -// NYI: [[SHRD_N:%.*]] = ashr i64 %a, 1 -// NYI: ret i64 [[SHRD_N]] -// int64_t test_vshrd_n_s64(int64_t a) { -// return (int64_t)vshrd_n_s64(a, 1); -// } +int64_t test_vshrd_n_s64(int64_t a) { + return (int64_t)vshrd_n_s64(a, 1); + + // CIR-LABEL: vshrd_n_s64 + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !s64i, {{%.*}} : !s64i) -> !s64i + + // LLVM-LABEL: @test_vshrd_n_s64( + // LLVM: [[SHRD_N:%.*]] = ashr i64 %0, 1 + // LLVM: ret i64 [[SHRD_N]] +} // NYI-LABEL: @test_vshrd_n_u64( // NYI: ret i64 0 From 596b9e9af2fcd051a0541ee9f1c7a7282969d294 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Mon, 6 Jan 2025 19:32:28 +0100 Subject: [PATCH 2197/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vset_lane_f64 (#1253) Lowering Neon `vset_lane_f64` References: [Clang CGBuiltin Implementation](https://github.com/llvm/clangir/blob/2b1a638ea07ca10c5727ea835bfbe17b881175cc/clang/lib/CodeGen/CGBuiltin.cpp#L12342) [Builtin definition](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64) --- clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 12 ++++++++---- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 18 +++++++++++++++--- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 9cd4a601ee37..82f8b82495df 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3688,9 +3688,14 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, // at the moment, the implementation should be the same as above // vset_lane or vsetq_lane intrinsics llvm_unreachable("NEON::BI__builtin_neon_vsetq_lane_bf16 NYI"); - case NEON::BI__builtin_neon_vset_lane_f64: - // The vector type needs a cast for the v1f64 variant. - llvm_unreachable("NEON::BI__builtin_neon_vset_lane_f64 NYI"); + + case NEON::BI__builtin_neon_vset_lane_f64: { + Ops.push_back(emitScalarExpr(E->getArg(2))); + Ops[1] = builder.createBitcast( + Ops[1], cir::VectorType::get(&getMLIRContext(), DoubleTy, 1)); + return builder.create(getLoc(E->getExprLoc()), Ops[1], + Ops[0], Ops[2]); + } case NEON::BI__builtin_neon_vsetq_lane_f64: { Ops.push_back(emitScalarExpr(E->getArg(2))); Ops[1] = builder.createBitcast( @@ -3698,7 +3703,6 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, return builder.create(getLoc(E->getExprLoc()), Ops[1], Ops[0], Ops[2]); } - case NEON::BI__builtin_neon_vget_lane_i8: case NEON::BI__builtin_neon_vdupb_lane_i8: Ops[0] = builder.createBitcast( diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index da589a44f00f..f1335b6053ee 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -137,15 +137,27 @@ float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) { // LLVM: [[INTRN_RES:%.*]] = insertelement <4 x float> [[B]], float [[A]], i32 3 // LLVM: ret <4 x float> [[INTRN_RES]] -float64x2_t test_vsetq_land_f64(float64_t a, float64x2_t b) { +float64x1_t test_vset_lane_f64(float64_t a, float64x1_t b) { + return vset_lane_f64(a, b, 0); +} + +// CIR-LABEL: test_vset_lane_f64 +// CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vset_lane_f64(double{{.*}}[[A:%.*]], <1 x double>{{.*}}[[B:%.*]]) +// LLVM: [[INTRN_RES:%.*]] = insertelement <1 x double> [[B]], double [[A]], i32 0 +// LLVM: ret <1 x double> [[INTRN_RES]] + +float64x2_t test_vsetq_lane_f64(float64_t a, float64x2_t b) { return vsetq_lane_f64(a, b, 0); } -// CIR-LABEL: test_vsetq_land_f64 +// CIR-LABEL: test_vsetq_lane_f64 // CIR: [[IDX:%.*]] = cir.const #cir.int<0> : !s32i // CIR: {{%.*}} = cir.vec.insert {{%.*}}, {{%.*}}[[[IDX]] : !s32i] : !cir.vector -// LLVM: {{.*}}test_vsetq_land_f64(double{{.*}}[[A:%.*]], <2 x double>{{.*}}[[B:%.*]]) +// LLVM: {{.*}}test_vsetq_lane_f64(double{{.*}}[[A:%.*]], <2 x double>{{.*}}[[B:%.*]]) // LLVM: [[INTRN_RES:%.*]] = insertelement <2 x double> [[B]], double [[A]], i32 0 // LLVM: ret <2 x double> [[INTRN_RES]] From d8df9fd4d254a553e3c46c6815b508aeb18021e4 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 7 Jan 2025 22:53:05 +0800 Subject: [PATCH 2198/2301] [CIR] Add MLIR lowering for f16, bf16, and f128 (#1264) --- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 9 ++++++++ clang/test/CIR/Lowering/ThroughMLIR/float.cir | 23 +++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 clang/test/CIR/Lowering/ThroughMLIR/float.cir diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 63253ecc454c..e4fccd082351 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -1401,6 +1401,15 @@ static mlir::TypeConverter prepareTypeConverter() { converter.addConversion([&](cir::LongDoubleType type) -> mlir::Type { return converter.convertType(type.getUnderlying()); }); + converter.addConversion([&](cir::FP128Type type) -> mlir::Type { + return mlir::Float128Type::get(type.getContext()); + }); + converter.addConversion([&](cir::FP16Type type) -> mlir::Type { + return mlir::Float16Type::get(type.getContext()); + }); + converter.addConversion([&](cir::BF16Type type) -> mlir::Type { + return mlir::BFloat16Type::get(type.getContext()); + }); converter.addConversion([&](cir::ArrayType type) -> mlir::Type { SmallVector shape; mlir::Type curType = type; diff --git a/clang/test/CIR/Lowering/ThroughMLIR/float.cir b/clang/test/CIR/Lowering/ThroughMLIR/float.cir new file mode 100644 index 000000000000..002f1c5053ce --- /dev/null +++ b/clang/test/CIR/Lowering/ThroughMLIR/float.cir @@ -0,0 +1,23 @@ +// RUN: cir-opt %s -cir-to-mlir -o %t.mlir +// RUN: FileCheck %s --input-file %t.mlir + +module { + cir.func @foo() { + %0 = cir.const #cir.fp<1.0> : !cir.float + %1 = cir.const #cir.fp<1.0> : !cir.double + %2 = cir.const #cir.fp<1.0> : !cir.long_double + %3 = cir.const #cir.fp<1.0> : !cir.f128 + %4 = cir.const #cir.fp<1.0> : !cir.f16 + %5 = cir.const #cir.fp<1.0> : !cir.bf16 + cir.return + } + + // CHECK-LABEL: @foo + // CHECK: %{{.+}} = arith.constant 1.000000e+00 : f32 + // CHECK-NEXT: %{{.+}} = arith.constant 1.000000e+00 : f64 + // CHECK-NEXT: %{{.+}} = arith.constant 1.000000e+00 : f80 + // CHECK-NEXT: %{{.+}} = arith.constant 1.000000e+00 : f128 + // CHECK-NEXT: %{{.+}} = arith.constant 1.000000e+00 : f16 + // CHECK-NEXT: %{{.+}} = arith.constant 1.000000e+00 : bf16 + // CHECK: } +} \ No newline at end of file From e62e033d9cd8a7ead268bd95b00ccb5ae551f04b Mon Sep 17 00:00:00 2001 From: Guojin Date: Tue, 7 Jan 2025 09:54:31 -0500 Subject: [PATCH 2199/2301] Lower neon_vmaxvq_u8,neon_vmaxvq_s8, neon_vmaxv_u8 and neon_vmaxvq_s8 (#1265) [Neon definiton](https://developer.arm.com/architectures/instruction-sets/intrinsics/#f:@navigationhierarchiessimdisa=[Neon]&q=vmaxv_s8) [OG implementation](https://github.com/llvm/clangir/blob/04d7dcfb2582753f3eccbf01ec900d60297cbf4b/clang/lib/CodeGen/CGBuiltin.cpp#L13202) Implementation in this PR is different from OG as 1. avoided code duplication by extracting out the common pattern 2. avoided using i32 as return type of the intrinsic call, so eliminated the need for casting result of the intrinsic call. This way of OG's implementation is quite unnecessary IMHO, this is MAX, not ADD or MUL. After all, using the expected type as return type of intrinsic call produces [the same ASM code](https://godbolt.org/z/3nKG7fxPb). --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 32 +++++++++++-- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 48 +++++++++++++++++++ 2 files changed, 76 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 82f8b82495df..0153ca8b4643 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2349,6 +2349,26 @@ emitCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, return builder.createBitcast(res, resultType); } +/// The function `emitCommonNeonVecAcrossCall` implements a common way +/// to implement neon intrinsic which has the following pattern: +/// 1. There is only one argument which is of vector type +/// 2. The result of the neon intrinsic is the element type of the input. +/// This type of intrinsic usually is for across operations of the input vector. + +static mlir::Value emitCommonNeonVecAcrossCall(CIRGenFunction &cgf, + llvm::StringRef intrincsName, + mlir::Type eltTy, + unsigned vecLen, + const clang::CallExpr *e) { + CIRGenBuilderTy &builder = cgf.getBuilder(); + mlir::Value op = cgf.emitScalarExpr(e->getArg(0)); + cir::VectorType vTy = + cir::VectorType::get(&cgf.getMLIRContext(), eltTy, vecLen); + llvm::SmallVector args{op}; + return emitNeonCall(builder, {vTy}, args, intrincsName, eltTy, + cgf.getLoc(e->getExprLoc())); +} + mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, @@ -4274,25 +4294,29 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NEON::BI__builtin_neon_vaddvq_s16 NYI"); } case NEON::BI__builtin_neon_vmaxv_u8: { - llvm_unreachable("NEON::BI__builtin_neon_vmaxv_u8 NYI"); + return emitCommonNeonVecAcrossCall(*this, "aarch64.neon.umaxv", UInt8Ty, 8, + E); } case NEON::BI__builtin_neon_vmaxv_u16: { llvm_unreachable("NEON::BI__builtin_neon_vmaxv_u16 NYI"); } case NEON::BI__builtin_neon_vmaxvq_u8: { - llvm_unreachable("NEON::BI__builtin_neon_vmaxvq_u8 NYI"); + return emitCommonNeonVecAcrossCall(*this, "aarch64.neon.umaxv", UInt8Ty, 16, + E); } case NEON::BI__builtin_neon_vmaxvq_u16: { llvm_unreachable("NEON::BI__builtin_neon_vmaxvq_u16 NYI"); } case NEON::BI__builtin_neon_vmaxv_s8: { - llvm_unreachable("NEON::BI__builtin_neon_vmaxv_s8 NYI"); + return emitCommonNeonVecAcrossCall(*this, "aarch64.neon.smaxv", SInt8Ty, 8, + E); } case NEON::BI__builtin_neon_vmaxv_s16: { llvm_unreachable("NEON::BI__builtin_neon_vmaxv_s16 NYI"); } case NEON::BI__builtin_neon_vmaxvq_s8: { - llvm_unreachable("NEON::BI__builtin_neon_vmaxvq_s8 NYI"); + return emitCommonNeonVecAcrossCall(*this, "aarch64.neon.smaxv", SInt8Ty, 16, + E); } case NEON::BI__builtin_neon_vmaxvq_s16: { llvm_unreachable("NEON::BI__builtin_neon_vmaxvq_s16 NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index f1335b6053ee..fc01ad8b8d44 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -1751,3 +1751,51 @@ uint64_t test_vaddlvq_u32(uint32x4_t a) { // LLVM-NEXT: [[VADDLVQ_U32_I:%.*]] = call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> [[A]]) // LLVM-NEXT: ret i64 [[VADDLVQ_U32_I]] } + +int8_t test_vmaxv_s8(int8x8_t a) { + return vmaxv_s8(a); + + // CIR-LABEL: vmaxv_s8 + // CIR: cir.llvm.intrinsic "aarch64.neon.smaxv" {{%.*}} : (!cir.vector) -> !s8i + + // LLVM-LABEL: @test_vmaxv_s8 + // LLVM-SAME: (<8 x i8> [[a:%.*]]) + // LLVM: [[res:%.*]] = call i8 @llvm.aarch64.neon.smaxv.i8.v8i8(<8 x i8> [[a]]) + // LLVM: ret i8 [[res]] +} + +int8_t test_vmaxv_u8(uint8x8_t a) { + return vmaxv_u8(a); + + // CIR-LABEL: vmaxv_u8 + // CIR: cir.llvm.intrinsic "aarch64.neon.umaxv" {{%.*}} : (!cir.vector) -> !u8i + + // LLVM-LABEL: @test_vmaxv_u8 + // LLVM-SAME: (<8 x i8> [[a:%.*]]) + // LLVM: [[res:%.*]] = call i8 @llvm.aarch64.neon.umaxv.i8.v8i8(<8 x i8> [[a]]) + // LLVM: ret i8 [[res]] +} + +int8_t test_vmaxvq_s8(int8x16_t a) { + return vmaxvq_s8(a); + + // CIR-LABEL: vmaxvq_s8 + // CIR: cir.llvm.intrinsic "aarch64.neon.smaxv" {{%.*}} : (!cir.vector) -> !s8i + + // LLVM-LABEL: @test_vmaxvq_s8 + // LLVM-SAME: (<16 x i8> [[a:%.*]]) + // LLVM: [[res:%.*]] = call i8 @llvm.aarch64.neon.smaxv.i8.v16i8(<16 x i8> [[a]]) + // LLVM: ret i8 [[res]] +} + +int8_t test_vmaxvq_u8(uint8x16_t a) { + return vmaxvq_u8(a); + + // CIR-LABEL: vmaxvq_u8 + // CIR: cir.llvm.intrinsic "aarch64.neon.umaxv" {{%.*}} : (!cir.vector) -> !u8i + + // LLVM-LABEL: @test_vmaxvq_u8 + // LLVM-SAME: (<16 x i8> [[a:%.*]]) + // LLVM: [[res:%.*]] = call i8 @llvm.aarch64.neon.umaxv.i8.v16i8(<16 x i8> [[a]]) + // LLVM: ret i8 [[res]] +} From df3c39a0a891f6be4cb687aace286e7f1dfcb3e0 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 9 Jan 2025 17:10:07 +0300 Subject: [PATCH 2200/2301] [CIR][CodeGen] handle zero init padding case (#1257) I continue to use `csmith` and catch run time bags. Now it's time to fix the layout for the const structs. There is a divergence between const structs generated by CIR and the original codegen. And this PR makes one more step to eliminate it. There are cases where the extra padding is required - and here is a fix for some of them. I did not write extra tests, since the fixes in the existing already covers the code I added. The point is that now the layout for all of these structs in the LLVM IR with and without CIR is the same. --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 66 ++++++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenModule.h | 50 +++++++++++++++++ clang/test/CIR/CodeGen/bitfields.c | 6 +-- clang/test/CIR/CodeGen/const-bitfields.c | 4 +- clang/test/CIR/CodeGen/struct.c | 5 +- 5 files changed, 118 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index 3eb8627084f4..ecdcd2066fb0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -43,6 +43,19 @@ using namespace clang::CIRGen; namespace { class ConstExprEmitter; +static mlir::TypedAttr computePadding(CIRGenModule &CGM, CharUnits size) { + auto eltTy = CGM.UCharTy; + auto arSize = size.getQuantity(); + auto &bld = CGM.getBuilder(); + if (size > CharUnits::One()) { + SmallVector elts(arSize, bld.getZeroAttr(eltTy)); + return bld.getConstArray(mlir::ArrayAttr::get(bld.getContext(), elts), + bld.getArrayType(eltTy, arSize)); + } else { + return cir::ZeroAttr::get(bld.getContext(), eltTy); + } +} + static mlir::Attribute emitArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, mlir::Type CommonElementType, unsigned ArrayBound, @@ -70,12 +83,7 @@ struct ConstantAggregateBuilderUtils { } mlir::TypedAttr getPadding(CharUnits size) const { - auto eltTy = CGM.UCharTy; - auto arSize = size.getQuantity(); - auto &bld = CGM.getBuilder(); - SmallVector elts(arSize, bld.getZeroAttr(eltTy)); - return bld.getConstArray(mlir::ArrayAttr::get(bld.getContext(), elts), - bld.getArrayType(eltTy, arSize)); + return computePadding(CGM, size); } mlir::Attribute getZeroes(CharUnits ZeroSize) const { @@ -508,6 +516,11 @@ class ConstStructBuilder { bool Build(InitListExpr *ILE, bool AllowOverwrite); bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase, const CXXRecordDecl *VTableClass, CharUnits BaseOffset); + + bool ApplyZeroInitPadding(const ASTRecordLayout &Layout, unsigned FieldNo, + const FieldDecl &Field, bool AllowOverwrite, + CharUnits &SizeSoFar, bool &ZeroFieldSize); + mlir::Attribute Finalize(QualType Ty); }; @@ -614,6 +627,10 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) { if (CXXRD->getNumBases()) return false; + const bool ZeroInitPadding = CGM.shouldZeroInitPadding(); + bool ZeroFieldSize = false; + CharUnits SizeSoFar = CharUnits::Zero(); + for (FieldDecl *Field : RD->fields()) { ++FieldNo; @@ -642,6 +659,11 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) { continue; } + if (ZeroInitPadding && + !ApplyZeroInitPadding(Layout, FieldNo, *Field, AllowOverwrite, + SizeSoFar, ZeroFieldSize)) + return false; + // When emitting a DesignatedInitUpdateExpr, a nested InitListExpr // represents additional overwriting of our current constant value, and not // a new constant to emit independently. @@ -784,6 +806,38 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD, return true; } +bool ConstStructBuilder::ApplyZeroInitPadding( + const ASTRecordLayout &Layout, unsigned FieldNo, const FieldDecl &Field, + bool AllowOverwrite, CharUnits &SizeSoFar, bool &ZeroFieldSize) { + + uint64_t StartBitOffset = Layout.getFieldOffset(FieldNo); + CharUnits StartOffset = + CGM.getASTContext().toCharUnitsFromBits(StartBitOffset); + if (SizeSoFar < StartOffset) { + if (!AppendBytes(SizeSoFar, computePadding(CGM, StartOffset - SizeSoFar), + AllowOverwrite)) + return false; + } + + if (!Field.isBitField()) { + CharUnits FieldSize = + CGM.getASTContext().getTypeSizeInChars(Field.getType()); + SizeSoFar = StartOffset + FieldSize; + ZeroFieldSize = FieldSize.isZero(); + } else { + const CIRGenRecordLayout &RL = + CGM.getTypes().getCIRGenRecordLayout(Field.getParent()); + const CIRGenBitFieldInfo &Info = RL.getBitFieldInfo(&Field); + uint64_t EndBitOffset = StartBitOffset + Info.Size; + SizeSoFar = CGM.getASTContext().toCharUnitsFromBits(EndBitOffset); + if (EndBitOffset % CGM.getASTContext().getCharWidth() != 0) { + SizeSoFar++; + } + ZeroFieldSize = Info.Size == 0; + } + return true; +} + mlir::Attribute ConstStructBuilder::Finalize(QualType Type) { Type = Type.getNonReferenceType(); RecordDecl *RD = Type->castAs()->getDecl(); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 905754a4ad3a..a2c736455013 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -273,6 +273,56 @@ class CIRGenModule : public CIRGenTypeCache { return getTriple().isSPIRVLogical(); } + bool shouldZeroInitPadding() const { + // In C23 (N3096) $6.7.10: + // """ + // If any object is initialized with an empty initializer, then it is + // subject to default initialization: + // - if it is an aggregate, every member is initialized (recursively) + // according to these rules, and any padding is initialized to zero bits; + // - if it is a union, the first named member is initialized (recursively) + // according to these rules, and any padding is initialized to zero bits. + // + // If the aggregate or union contains elements or members that are + // aggregates or unions, these rules apply recursively to the subaggregates + // or contained unions. + // + // If there are fewer initializers in a brace-enclosed list than there are + // elements or members of an aggregate, or fewer characters in a string + // literal used to initialize an array of known size than there are elements + // in the array, the remainder of the aggregate is subject to default + // initialization. + // """ + // + // The standard seems ambiguous in the following two areas: + // 1. For a union type with empty initializer, if the first named member is + // not the largest member, then the bytes comes after the first named member + // but before padding are left unspecified. An example is: + // union U { int a; long long b;}; + // union U u = {}; // The first 4 bytes are 0, but 4-8 bytes are left + // unspecified. + // + // 2. It only mentions padding for empty initializer, but doesn't mention + // padding for a non empty initialization list. And if the aggregation or + // union contains elements or members that are aggregates or unions, and + // some are non empty initializers, while others are empty initializers, + // the padding initialization is unclear. An example is: + // struct S1 { int a; long long b; }; + // struct S2 { char c; struct S1 s1; }; + // // The values for paddings between s2.c and s2.s1.a, between s2.s1.a + // and s2.s1.b are unclear. + // struct S2 s2 = { 'c' }; + // + // Here we choose to zero initiailize left bytes of a union type because + // projects like the Linux kernel are relying on this behavior. If we don't + // explicitly zero initialize them, the undef values can be optimized to + // return garbage data. We also choose to zero initialize paddings for + // aggregates and unions, no matter they are initialized by empty + // initializers or non empty initializers. This can provide a consistent + // behavior. So projects like the Linux kernel can rely on it. + return !getLangOpts().CPlusPlus; + } + /// Return the mlir::Value for the address of the given global variable. /// If Ty is non-null and if the global doesn't exist, then it will be created /// with the specified type instead of whatever the normal requested type diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index 5f13b424daa4..cd8e88f5c4c9 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -59,12 +59,12 @@ typedef struct { // CHECK: !ty_G = !cir.struct // CHECK: !ty_T = !cir.struct // CHECK: !ty_anon2E0_ = !cir.struct -// CHECK: !ty_anon_struct = !cir.struct // CHECK: #bfi_a = #cir.bitfield_info // CHECK: #bfi_e = #cir.bitfield_info // CHECK: !ty_S = !cir.struct, !u16i, !u32i}> // CHECK: !ty_U = !cir.struct}> // CHECK: !ty___long = !cir.struct}> +// CHECK: !ty_anon_struct = !cir.struct, !s32i}> // CHECK: #bfi_d = #cir.bitfield_info, size = 2, offset = 17, is_signed = true> // CHECK: cir.func {{.*@store_field}} @@ -127,7 +127,7 @@ void createU() { // CHECK: cir.func {{.*@createD}} // CHECK: %0 = cir.alloca !ty_D, !cir.ptr, ["d"] {alignment = 4 : i64} // CHECK: %1 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr -// CHECK: %2 = cir.const #cir.const_struct<{#cir.int<33> : !u8i, #cir.int<0> : !u8i, #cir.int<3> : !s32i}> : !ty_anon_struct +// CHECK: %2 = cir.const #cir.const_struct<{#cir.int<33> : !u8i, #cir.int<0> : !u8i, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array, #cir.int<3> : !s32i}> : !ty_anon_struct // CHECK: cir.store %2, %1 : !ty_anon_struct, !cir.ptr void createD() { D d = {1,2,3}; @@ -148,7 +148,7 @@ typedef struct { int y ; } G; -// CHECK: cir.global external @g = #cir.const_struct<{#cir.int<133> : !u8i, #cir.int<127> : !u8i, #cir.int<254> : !s32i}> : !ty_anon_struct +// CHECK: cir.global external @g = #cir.const_struct<{#cir.int<133> : !u8i, #cir.int<127> : !u8i, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array, #cir.int<254> : !s32i}> : !ty_anon_struct G g = { -123, 254UL}; // CHECK: cir.func {{.*@get_y}} diff --git a/clang/test/CIR/CodeGen/const-bitfields.c b/clang/test/CIR/CodeGen/const-bitfields.c index 0015f4fe5c83..71e02507b9d0 100644 --- a/clang/test/CIR/CodeGen/const-bitfields.c +++ b/clang/test/CIR/CodeGen/const-bitfields.c @@ -14,13 +14,13 @@ struct Inner { unsigned d : 30; }; -// CHECK: !ty_anon_struct = !cir.struct +// CHECK: !ty_anon_struct = !cir.struct // CHECK: !ty_T = !cir.struct, !s32i} #cir.record.decl.ast> // CHECK: !ty_anon_struct1 = !cir.struct, !u8i, !u8i, !u8i, !u8i}> // CHECK: #bfi_Z = #cir.bitfield_info, size = 9, offset = 11, is_signed = true> struct T GV = { 1, 5, 26, 42 }; -// CHECK: cir.global external @GV = #cir.const_struct<{#cir.int<161> : !u8i, #cir.int<208> : !u8i, #cir.int<0> : !u8i, #cir.int<42> : !s32i}> : !ty_anon_struct +// CHECK: cir.global external @GV = #cir.const_struct<{#cir.int<161> : !u8i, #cir.int<208> : !u8i, #cir.int<0> : !u8i, #cir.zero : !u8i, #cir.int<42> : !s32i}> : !ty_anon_struct // check padding is used (const array of zeros) struct Inner var = { 1, 0, 1, 21}; diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 43f1576bb09a..267c755e0a7e 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -38,8 +38,9 @@ void shouldConstInitStructs(void) { // CHECK: cir.func @shouldConstInitStructs struct Foo f = {1, 2, {3, 4}}; // CHECK: %[[#V0:]] = cir.alloca !ty_Foo, !cir.ptr, ["f"] {alignment = 4 : i64} - // CHECK: %[[#V1:]] = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_Bar}> : !ty_Foo - // CHECK: cir.store %[[#V1]], %[[#V0]] : !ty_Foo, !cir.ptr + // CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr l + // CHECK: %[[#V2:]] = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_Bar}> : !ty_anon_struct + // CHECK: cir.store %[[#V2]], %[[#V1]] : !ty_anon_struct, !cir.ptr } // Should zero-initialize uninitialized global structs. From a995452b7805bb387d296e328679a7cef75c2558 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Thu, 9 Jan 2025 06:11:25 -0800 Subject: [PATCH 2201/2301] [CIR] Cleanup: convert from Clang type to CIR type (#1271) Class `CIRGenFunction` contained three identical functions that converted from a Clang AST type (`clang::QualType`) to a ClangIR type (`mlir::Type`): `convertType`, `ConvertType`, and `getCIRType`. This embarrassment of duplication needed to be fixed, along with cleaning up other functions that convert from Clang types to ClangIR types. The three functions `CIRGenFunction::ConvertType`, `CIRGenFunction::convertType`, and `CIRGenFunction::getCIRType` were combined into a single function `CIRGenFunction::convertType`. Other functions were renamed as follows: - `CIRGenTypes::ConvertType` to `CIRGenTypes::convertType` - `CIRGenTypes::ConvertFunctionTypeInternal` to `CIRGenTypes::convertFunctionTypeInternal` - `CIRGenModule::getCIRType` to `CIRGenModule::convertType` - `ConstExprEmitter::ConvertType` to `ConstExprEmitter::convertType` - `ScalarExprEmitter::ConvertType` to `ScalarExprEmitter::convertType` Many cases of `getTypes().convertType(t)` and `getTypes().convertTypeForMem(t)` were changed to just `convertType(t)` and `convertTypeForMem(t)`, respectively, because the forwarding functions in `CIRGenModule` and `CIRGenFunction` make the explicit call to `getTypes()` unnecessary. --- clang/lib/CIR/CodeGen/CIRAsm.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 44 +++++------ .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 33 ++++---- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 10 +-- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 12 +-- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 31 ++++---- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 78 +++++++++---------- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 19 ++--- clang/lib/CIR/CodeGen/CIRGenFunction.h | 10 +-- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 15 ++-- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 15 ++-- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 26 +++---- clang/lib/CIR/CodeGen/CIRGenTypes.h | 6 +- clang/lib/CIR/CodeGen/TargetInfo.cpp | 16 ++-- 22 files changed, 162 insertions(+), 185 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index a20e75e07423..2513b92e8010 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -455,7 +455,7 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &S) { uint64_t InputSize = getContext().getTypeSize(InputTy); if (getContext().getTypeSize(OutputType) < InputSize) { // Form the asm to return the value as a larger integer or fp type. - ResultRegTypes.back() = ConvertType(InputTy); + ResultRegTypes.back() = convertType(InputTy); } } if (mlir::Type AdjTy = getTargetHooks().adjustInlineAsmType( @@ -478,7 +478,7 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &S) { // Otherwise there will be a mis-match if the matrix is also an // input-argument which is represented as vector. if (isa(OutExpr->getType().getCanonicalType())) - DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType())); + DestAddr = DestAddr.withElementType(convertType(OutExpr->getType())); ArgTypes.push_back(DestAddr.getType()); ArgElemTypes.push_back(DestAddr.getElementType()); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 93c919134c21..bb285e4811e3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -72,7 +72,7 @@ static RValue emitUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { template static RValue emitUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &CGF, const CallExpr &E) { - auto ResultType = CGF.ConvertType(E.getType()); + auto ResultType = CGF.convertType(E.getType()); auto Src = CGF.emitScalarExpr(E.getArg(0)); if (CGF.getBuilder().getIsFPConstrained()) @@ -88,7 +88,7 @@ static RValue emitBinaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { auto Arg1 = CGF.emitScalarExpr(E.getArg(1)); auto Loc = CGF.getLoc(E.getExprLoc()); - auto Ty = CGF.ConvertType(E.getType()); + auto Ty = CGF.convertType(E.getType()); auto Call = CGF.getBuilder().create(Loc, Ty, Arg0, Arg1); return RValue::get(Call->getResult(0)); @@ -101,7 +101,7 @@ static mlir::Value emitBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &CGF, auto Arg1 = CGF.emitScalarExpr(E.getArg(1)); auto Loc = CGF.getLoc(E.getExprLoc()); - auto Ty = CGF.ConvertType(E.getType()); + auto Ty = CGF.convertType(E.getType()); if (CGF.getBuilder().getIsFPConstrained()) { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, &E); @@ -122,7 +122,7 @@ emitBuiltinBitOp(CIRGenFunction &CGF, const CallExpr *E, else arg = CGF.emitScalarExpr(E->getArg(0)); - auto resultTy = CGF.ConvertType(E->getType()); + auto resultTy = CGF.convertType(E->getType()); auto op = CGF.getBuilder().create(CGF.getLoc(E->getExprLoc()), resultTy, arg); return RValue::get(op); @@ -415,7 +415,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // of the type. We feel it should be Ok to use expression type because // it is hard to imagine a builtin function evaluates to // a value that over/underflows its own defined type. - mlir::Type resTy = getCIRType(E->getType()); + mlir::Type resTy = convertType(E->getType()); return RValue::get(builder.getConstFP(getLoc(E->getExprLoc()), resTy, Result.Val.getFloat())); } @@ -1173,7 +1173,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return emitRotate(E, true); case Builtin::BI__builtin_constant_p: { - mlir::Type ResultType = ConvertType(E->getType()); + mlir::Type ResultType = convertType(E->getType()); const Expr *Arg = E->getArg(0); QualType ArgType = Arg->getType(); @@ -1199,7 +1199,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // Convert Objective-C objects to id because we cannot distinguish between // LLVM types for Obj-C classes as they are opaque. ArgType = CGM.getASTContext().getObjCIdType(); - ArgValue = builder.createBitcast(ArgValue, ConvertType(ArgType)); + ArgValue = builder.createBitcast(ArgValue, convertType(ArgType)); mlir::Value Result = builder.create( getLoc(E->getSourceRange()), ArgValue); @@ -1215,7 +1215,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_object_size: { unsigned Type = E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); - auto ResType = mlir::dyn_cast(ConvertType(E->getType())); + auto ResType = mlir::dyn_cast(convertType(E->getType())); assert(ResType && "not sure what to do?"); // We pass this builtin onto the optimizer so that it can figure out the @@ -1306,7 +1306,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_nondeterministic_value NYI"); case Builtin::BI__builtin_elementwise_abs: { - mlir::Type cirTy = ConvertType(E->getArg(0)->getType()); + mlir::Type cirTy = convertType(E->getArg(0)->getType()); bool isIntTy = cir::isIntOrIntVectorTy(cirTy); if (!isIntTy) { mlir::Type eltTy = cirTy; @@ -1851,7 +1851,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, auto loc = getLoc(E->getBeginLoc()); return RValue::get(builder.createZExtOrBitCast( loc, emitSignBit(loc, *this, emitScalarExpr(E->getArg(0))), - ConvertType(E->getType()))); + convertType(E->getType()))); } case Builtin::BI__warn_memset_zero_len: @@ -1897,8 +1897,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, auto EncompassingCIRTy = cir::IntType::get( &getMLIRContext(), EncompassingInfo.Width, EncompassingInfo.Signed); - auto ResultCIRTy = - mlir::cast(CGM.getTypes().ConvertType(ResultQTy)); + auto ResultCIRTy = mlir::cast(CGM.convertType(ResultQTy)); mlir::Value Left = emitScalarExpr(LeftArg); mlir::Value Right = emitScalarExpr(RightArg); @@ -2008,8 +2007,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, clang::QualType ResultQTy = ResultArg->getType()->castAs()->getPointeeType(); - auto ResultCIRTy = - mlir::cast(CGM.getTypes().ConvertType(ResultQTy)); + auto ResultCIRTy = mlir::cast(CGM.convertType(ResultQTy)); auto Loc = getLoc(E->getSourceRange()); auto ArithResult = @@ -2304,7 +2302,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcNan), - ConvertType(E->getType()))); + convertType(E->getType()))); } case Builtin::BI__builtin_issignaling: { @@ -2314,7 +2312,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcSNan), - ConvertType(E->getType()))); + convertType(E->getType()))); } case Builtin::BI__builtin_isinf: { @@ -2326,7 +2324,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcInf), - ConvertType(E->getType()))); + convertType(E->getType()))); } case Builtin::BIfinite: @@ -2344,7 +2342,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcFinite), - ConvertType(E->getType()))); + convertType(E->getType()))); } case Builtin::BI__builtin_isnormal: { @@ -2354,7 +2352,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcNormal), - ConvertType(E->getType()))); + convertType(E->getType()))); } case Builtin::BI__builtin_issubnormal: { @@ -2364,7 +2362,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcSubnormal), - ConvertType(E->getType()))); + convertType(E->getType()))); } case Builtin::BI__builtin_iszero: { @@ -2374,7 +2372,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( Loc, builder.createIsFPClass(Loc, V, FPClassTest::fcZero), - ConvertType(E->getType()))); + convertType(E->getType()))); } case Builtin::BI__builtin_isfpclass: { @@ -2389,7 +2387,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( - Loc, builder.createIsFPClass(Loc, V, Test), ConvertType(E->getType()))); + Loc, builder.createIsFPClass(Loc, V, Test), convertType(E->getType()))); } } @@ -2706,6 +2704,6 @@ cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *FD, Name = astContext.BuiltinInfo.getName(BuiltinID).substr(10); } - auto Ty = getTypes().ConvertType(FD->getType()); + auto Ty = convertType(FD->getType()); return GetOrCreateCIRFunction(Name, Ty, D, /*ForVTable=*/false); } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 0153ca8b4643..f030ebc7b1ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2133,7 +2133,7 @@ static mlir::Value emitArmLdrexNon128Intrinsic(unsigned int builtinID, // Get Instrinc call CIRGenBuilderTy &builder = cgf.getBuilder(); QualType clangResTy = clangCallExpr->getType(); - mlir::Type realResTy = cgf.ConvertType(clangResTy); + mlir::Type realResTy = cgf.convertType(clangResTy); // Return type of LLVM intrinsic is defined in Intrinsic.td, // which can be found under LLVM IR directory. mlir::Type funcResTy = builder.getSInt64Ty(); @@ -2345,7 +2345,7 @@ emitCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, mlir::Value res = emitNeonCall(builder, std::move(argTypes), ops, intrincsName, funcResTy, cgf.getLoc(e->getExprLoc())); - mlir::Type resultType = cgf.ConvertType(e->getType()); + mlir::Type resultType = cgf.convertType(e->getType()); return builder.createBitcast(res, resultType); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 45bcdbf40cee..5a5bf3007c14 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1610,7 +1610,7 @@ void CIRGenFunction::emitNonNullArgCheck(RValue RV, QualType ArgType, mlir::Value CIRGenFunction::emitVAArg(VAArgExpr *VE, Address &VAListAddr) { assert(!VE->isMicrosoftABI() && "NYI"); auto loc = CGM.getLoc(VE->getExprLoc()); - auto type = ConvertType(VE->getType()); + auto type = convertType(VE->getType()); auto vaList = emitVAListRef(VE->getSubExpr()).getPointer(); return builder.create(loc, type, vaList); } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 01ff43a783f2..894708e62645 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -582,7 +582,7 @@ Address CIRGenFunction::getAddressOfDirectBaseInCompleteClass( mlir::Location loc, Address This, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual) { // 'this' must be a pointer (in some address space) to Derived. - assert(This.getElementType() == ConvertType(Derived)); + assert(This.getElementType() == convertType(Derived)); // Compute the offset of the virtual base. CharUnits Offset; @@ -592,7 +592,7 @@ Address CIRGenFunction::getAddressOfDirectBaseInCompleteClass( else Offset = Layout.getBaseClassOffset(Base); - return builder.createBaseClassAddr(loc, This, ConvertType(Base), + return builder.createBaseClassAddr(loc, This, convertType(Base), Offset.getQuantity(), /*assume_not_null=*/true); } @@ -1591,7 +1591,7 @@ Address CIRGenFunction::getAddressOfDerivedClass( QualType derivedTy = getContext().getCanonicalType(getContext().getTagDeclType(derived)); - mlir::Type derivedValueTy = ConvertType(derivedTy); + mlir::Type derivedValueTy = convertType(derivedTy); CharUnits nonVirtualOffset = CGM.getNonVirtualBaseClassOffset(derived, pathBegin, pathEnd); diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 6b6ed53faafa..0408ddc26449 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -262,7 +262,7 @@ CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &S) { // Initialize address of coroutine frame to null auto astVoidPtrTy = CGM.getASTContext().VoidPtrTy; - auto allocaTy = getTypes().convertTypeForMem(astVoidPtrTy); + auto allocaTy = convertTypeForMem(astVoidPtrTy); Address coroFrame = CreateTempAlloca(allocaTy, getContext().getTypeAlignInChars(astVoidPtrTy), openCurlyLoc, "__coro_frame_addr", diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index c4d53a8477ec..b44896241eff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -139,7 +139,7 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &D, if (isEscapingByRef) llvm_unreachable("NYI"); - mlir::Type allocaTy = getTypes().convertTypeForMem(Ty); + mlir::Type allocaTy = convertTypeForMem(Ty); CharUnits allocaAlignment = alignment; // Create the temp alloca and declare variable using it. mlir::Value addrVal; @@ -482,7 +482,7 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, if (D.hasAttr() || D.hasAttr()) llvm_unreachable("CUDA is NYI"); else if (Ty.getAddressSpace() != LangAS::opencl_local) - Init = builder.getZeroInitAttr(getTypes().ConvertType(Ty)); + Init = builder.getZeroInitAttr(convertType(Ty)); cir::GlobalOp GV = builder.createVersionedGlobal( getModule(), getLoc(D.getLocation()), Name, LTy, false, Linkage, AS); @@ -620,7 +620,7 @@ void CIRGenFunction::emitStaticVarDecl(const VarDecl &D, // Store into LocalDeclMap before generating initializer to handle // circular references. - mlir::Type elemTy = getTypes().convertTypeForMem(D.getType()); + mlir::Type elemTy = convertTypeForMem(D.getType()); setAddrOfLocalVar(&D, Address(addr, elemTy, alignment)); // We can't have a VLA here, but we can have a pointer to a VLA, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 3921f04688d7..25fb9a8b501d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -173,8 +173,7 @@ static Address emitPointerWithAlignment(const Expr *expr, llvm_unreachable("NYI"); } - auto ElemTy = - cgf.getTypes().convertTypeForMem(expr->getType()->getPointeeType()); + auto ElemTy = cgf.convertTypeForMem(expr->getType()->getPointeeType()); addr = cgf.getBuilder().createElementBitCast( cgf.getLoc(expr->getSourceRange()), addr, ElemTy); if (CE->getCastKind() == CK_AddressSpaceConversion) { @@ -443,7 +442,7 @@ LValue CIRGenFunction::emitLValueForFieldInitialization( FieldIndex); // Make sure that the address is pointing to the right type. - auto memTy = getTypes().convertTypeForMem(FieldType); + auto memTy = convertTypeForMem(FieldType); V = builder.createElementBitCast(getLoc(Field->getSourceRange()), V, memTy); // TODO: Generate TBAA information that describes this access as a structure @@ -889,7 +888,7 @@ void CIRGenFunction::emitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm_unreachable("volatile bit-field is not implemented for the AACPS"); const CIRGenBitFieldInfo &info = Dst.getBitFieldInfo(); - mlir::Type resLTy = getTypes().convertTypeForMem(Dst.getType()); + mlir::Type resLTy = convertTypeForMem(Dst.getType()); Address ptr = Dst.getBitFieldAddress(); const bool useVolatile = @@ -921,7 +920,7 @@ static LValue emitGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, // as part of getAddrOfGlobalVar. auto V = CGF.CGM.getAddrOfGlobalVar(VD); - auto RealVarTy = CGF.getTypes().convertTypeForMem(VD->getType()); + auto RealVarTy = CGF.convertTypeForMem(VD->getType()); cir::PointerType realPtrTy = CGF.getBuilder().getPointerTo( RealVarTy, cast_if_present( cast(V.getType()).getAddrSpace())); @@ -988,9 +987,8 @@ static LValue emitFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, mlir::Value addr = CGF.getBuilder().create( loc, ptrTy, funcOp.getSymName()); - if (funcOp.getFunctionType() != - CGF.CGM.getTypes().ConvertType(FD->getType())) { - fnTy = CGF.CGM.getTypes().ConvertType(FD->getType()); + if (funcOp.getFunctionType() != CGF.convertType(FD->getType())) { + fnTy = CGF.convertType(FD->getType()); ptrTy = cir::PointerType::get(CGF.getBuilder().getContext(), fnTy); addr = CGF.getBuilder().create(addr.getLoc(), ptrTy, @@ -1618,7 +1616,7 @@ Address CIRGenFunction::emitArrayToPointerDecay(const Expr *E, mlir::Value ptr = CGM.getBuilder().maybeBuildArrayDecay( CGM.getLoc(E->getSourceRange()), Addr.getPointer(), - getTypes().convertTypeForMem(EltType)); + convertTypeForMem(EltType)); return Address(ptr, Addr.getAlignment()); } @@ -1759,7 +1757,7 @@ static Address emitArraySubscriptPtr( assert(0 && "NYI"); } - return Address(eltPtr, CGF.getTypes().convertTypeForMem(eltType), eltAlign); + return Address(eltPtr, CGF.convertTypeForMem(eltType), eltAlign); } LValue CIRGenFunction::emitArraySubscriptExpr(const ArraySubscriptExpr *E, @@ -1994,7 +1992,7 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *E) { if (LV.isSimple()) { Address V = LV.getAddress(); if (V.isValid()) { - auto T = getTypes().convertTypeForMem(E->getType()); + auto T = convertTypeForMem(E->getType()); if (V.getElementType() != T) LV.setAddress( builder.createElementBitCast(getLoc(E->getSourceRange()), V, T)); @@ -2038,8 +2036,8 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *E) { builder.getAddrSpaceAttr(E->getSubExpr()->getType().getAddressSpace()); auto DestAS = builder.getAddrSpaceAttr(E->getType().getAddressSpace()); mlir::Value V = getTargetHooks().performAddrSpaceCast( - *this, LV.getPointer(), SrcAS, DestAS, ConvertType(DestTy)); - return makeAddrLValue(Address(V, getTypes().convertTypeForMem(E->getType()), + *this, LV.getPointer(), SrcAS, DestAS, convertType(DestTy)); + return makeAddrLValue(Address(V, convertTypeForMem(E->getType()), LV.getAddress().getAlignment()), E->getType(), LV.getBaseInfo(), LV.getTBAAInfo()); } @@ -2874,7 +2872,7 @@ mlir::Value CIRGenFunction::emitAlloca(StringRef name, QualType ty, mlir::Location loc, CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize) { - return emitAlloca(name, getCIRType(ty), loc, alignment, + return emitAlloca(name, convertType(ty), loc, alignment, insertIntoFnEntryBlock, arraySize); } @@ -3016,7 +3014,7 @@ Address CIRGenFunction::emitLoadOfReference(LValue refLVal, mlir::Location loc, CharUnits align = CGM.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo, pointeeTBAAInfo, /* forPointeeType= */ true); - return Address(load, getTypes().convertTypeForMem(pointeeType), align); + return Address(load, convertTypeForMem(pointeeType), align); } LValue CIRGenFunction::emitLoadOfReferenceLValue(LValue RefLVal, @@ -3051,9 +3049,8 @@ Address CIRGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, mlir::Location Loc, const Twine &Name, Address *Alloca, mlir::OpBuilder::InsertPoint ip) { - Address Result = - CreateTempAlloca(getTypes().convertTypeForMem(Ty), Align, Loc, Name, - /*ArraySize=*/nullptr, Alloca, ip); + Address Result = CreateTempAlloca(convertTypeForMem(Ty), Align, Loc, Name, + /*ArraySize=*/nullptr, Alloca, ip); if (Ty->isConstantMatrixType()) { assert(0 && "NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 44e214fb1f55..526559a0060c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -528,7 +528,7 @@ void AggExprEmitter::emitArrayInit(Address DestPtr, cir::ArrayType AType, /*condBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { auto currentElement = builder.createLoad(loc, tmpAddr); - mlir::Type boolTy = CGF.getCIRType(CGF.getContext().BoolTy); + mlir::Type boolTy = CGF.convertType(CGF.getContext().BoolTy); auto cmp = builder.create(loc, boolTy, cir::CmpOpKind::ne, currentElement, end); builder.createCondition(cmp); @@ -999,7 +999,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { // GCC union extension QualType Ty = E->getSubExpr()->getType(); - Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty)); + Address CastPtr = Dest.getAddress().withElementType(CGF.convertType(Ty)); emitInitializationToLValue(E->getSubExpr(), CGF.makeAddrLValue(CastPtr, Ty)); break; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 068f060ca7c5..83af7ee98f58 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -1089,7 +1089,7 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *E) { mlir::Type elementTy; Address result = Address::invalid(); auto createCast = [&]() { - elementTy = getTypes().convertTypeForMem(allocType); + elementTy = convertTypeForMem(allocType); result = builder.createElementBitCast(getLoc(E->getSourceRange()), allocation, elementTy); }; @@ -1240,7 +1240,7 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *DeleteFD, // Pass the pointer itself. QualType ArgTy = *ParamTypeIt++; mlir::Value DeletePtr = - builder.createBitcast(Ptr.getLoc(), Ptr, ConvertType(ArgTy)); + builder.createBitcast(Ptr.getLoc(), Ptr, convertType(ArgTy)); DeleteArgs.add(RValue::get(DeletePtr), ArgTy); // Pass the std::destroying_delete tag if present. @@ -1254,7 +1254,7 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *DeleteFD, QualType SizeType = *ParamTypeIt++; CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); assert(SizeTy && "expected cir::IntType"); - auto Size = builder.getConstInt(*currSrcLoc, ConvertType(SizeType), + auto Size = builder.getConstInt(*currSrcLoc, convertType(SizeType), DeleteTypeSize.getQuantity()); // For array new, multiply by the number of elements. @@ -1297,7 +1297,7 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *DeleteFD, static mlir::Value emitDynamicCastToNull(CIRGenFunction &CGF, mlir::Location Loc, QualType DestTy) { - mlir::Type DestCIRTy = CGF.ConvertType(DestTy); + mlir::Type DestCIRTy = CGF.convertType(DestTy); assert(mlir::isa(DestCIRTy) && "result of dynamic_cast should be a ptr"); @@ -1351,7 +1351,7 @@ mlir::Value CIRGenFunction::emitDynamicCast(Address ThisAddr, if (DCE->isAlwaysNull()) return emitDynamicCastToNull(*this, loc, destTy); - auto destCirTy = mlir::cast(ConvertType(destTy)); + auto destCirTy = mlir::cast(convertType(destTy)); return CGM.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy, destCirTy, isRefCast, ThisAddr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 429d2f81af4e..754fa895afce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -389,7 +389,7 @@ mlir::Value ComplexExprEmitter::emitComplexToComplexCast(mlir::Value Val, llvm_unreachable("unexpected src type or dest type"); return Builder.createCast(CGF.getLoc(Loc), CastOpKind, Val, - CGF.ConvertType(DestType)); + CGF.convertType(DestType)); } mlir::Value ComplexExprEmitter::emitScalarToComplexCast(mlir::Value Val, @@ -405,7 +405,7 @@ mlir::Value ComplexExprEmitter::emitScalarToComplexCast(mlir::Value Val, llvm_unreachable("unexpected src type"); return Builder.createCast(CGF.getLoc(Loc), CastOpKind, Val, - CGF.ConvertType(DestType)); + CGF.convertType(DestType)); } mlir::Value ComplexExprEmitter::emitCast(CastKind CK, Expr *Op, @@ -830,7 +830,7 @@ LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *E, mlir::Value ComplexExprEmitter::VisitImaginaryLiteral(const ImaginaryLiteral *IL) { auto Loc = CGF.getLoc(IL->getExprLoc()); - auto Ty = mlir::cast(CGF.getCIRType(IL->getType())); + auto Ty = mlir::cast(CGF.convertType(IL->getType())); auto ElementTy = Ty.getElementTy(); mlir::TypedAttr RealValueAttr; @@ -865,7 +865,7 @@ mlir::Value ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) { // Empty init list initializes to null assert(E->getNumInits() == 0 && "Unexpected number of inits"); QualType Ty = E->getType()->castAs()->getElementType(); - return Builder.getZero(CGF.getLoc(E->getExprLoc()), CGF.ConvertType(Ty)); + return Builder.getZero(CGF.getLoc(E->getExprLoc()), CGF.convertType(Ty)); } mlir::Value CIRGenFunction::emitPromotedComplexExpr(const Expr *E, @@ -879,7 +879,7 @@ mlir::Value CIRGenFunction::emitPromotedValue(mlir::Value result, mlir::cast(result.getType()).getElementTy()) && "integral complex will never be promoted"); return builder.createCast(cir::CastKind::float_complex, result, - ConvertType(PromotionType)); + convertType(PromotionType)); } mlir::Value CIRGenFunction::emitUnPromotedValue(mlir::Value result, @@ -888,7 +888,7 @@ mlir::Value CIRGenFunction::emitUnPromotedValue(mlir::Value result, mlir::cast(result.getType()).getElementTy()) && "integral complex will never be promoted"); return builder.createCast(cir::CastKind::float_complex, result, - ConvertType(UnPromotionType)); + convertType(UnPromotionType)); } mlir::Value CIRGenFunction::emitComplexExpr(const Expr *E) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index ecdcd2066fb0..e5fda5a6bb15 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -841,7 +841,7 @@ bool ConstStructBuilder::ApplyZeroInitPadding( mlir::Attribute ConstStructBuilder::Finalize(QualType Type) { Type = Type.getNonReferenceType(); RecordDecl *RD = Type->castAs()->getDecl(); - mlir::Type ValTy = CGM.getTypes().ConvertType(Type); + mlir::Type ValTy = CGM.convertType(Type); return Builder.build(ValTy, RD->hasFlexibleArrayMember()); } @@ -1069,7 +1069,7 @@ class ConstExprEmitter return {}; } - auto desiredType = CGM.getTypes().ConvertType(T); + auto desiredType = CGM.convertType(T); // FIXME(cir): A hack to handle the emission of arrays of unions directly. // See clang/test/CIR/CodeGen/union-array.c and // clang/test/CIR/Lowering/nested-union-array.c for example. The root @@ -1140,8 +1140,7 @@ class ConstExprEmitter } mlir::Attribute EmitVectorInitialization(InitListExpr *ILE, QualType T) { - cir::VectorType VecTy = - mlir::cast(CGM.getTypes().ConvertType(T)); + cir::VectorType VecTy = mlir::cast(CGM.convertType(T)); unsigned NumElements = VecTy.getSize(); unsigned NumInits = ILE->getNumInits(); assert(NumElements >= NumInits && "Too many initializers for a vector"); @@ -1164,7 +1163,7 @@ class ConstExprEmitter mlir::Attribute VisitImplicitValueInitExpr(ImplicitValueInitExpr *E, QualType T) { - return CGM.getBuilder().getZeroInitAttr(CGM.getCIRType(T)); + return CGM.getBuilder().getZeroInitAttr(CGM.convertType(T)); } mlir::Attribute VisitInitListExpr(InitListExpr *ILE, QualType T) { @@ -1215,7 +1214,7 @@ class ConstExprEmitter return nullptr; } - return CGM.getBuilder().getZeroInitAttr(CGM.getCIRType(Ty)); + return CGM.getBuilder().getZeroInitAttr(CGM.convertType(Ty)); } mlir::Attribute VisitStringLiteral(StringLiteral *E, QualType T) { @@ -1233,7 +1232,7 @@ class ConstExprEmitter } // Utility methods - mlir::Type ConvertType(QualType T) { return CGM.getTypes().ConvertType(T); } + mlir::Type convertType(QualType T) { return CGM.convertType(T); } }; static mlir::Attribute @@ -1695,7 +1694,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) { // be a problem for the near future. if (CD->isTrivial() && CD->isDefaultConstructor()) return cir::ZeroAttr::get(CGM.getBuilder().getContext(), - CGM.getTypes().ConvertType(D.getType())); + CGM.convertType(D.getType())); } } InConstantContext = D.hasConstantInitialization(); @@ -1854,7 +1853,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, // 'undef'. Find out what's better for CIR. assert(0 && "not implemented"); case APValue::Int: { - mlir::Type ty = CGM.getCIRType(DestType); + mlir::Type ty = CGM.convertType(DestType); if (mlir::isa(ty)) return builder.getCIRBoolAttr(Value.getInt().getZExtValue()); assert(mlir::isa(ty) && "expected integral type"); @@ -1867,7 +1866,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, CGM.getASTContext().getTargetInfo().useFP16ConversionIntrinsics()) assert(0 && "not implemented"); else { - mlir::Type ty = CGM.getCIRType(DestType); + mlir::Type ty = CGM.convertType(DestType); assert(mlir::isa(ty) && "expected floating-point type"); return CGM.getBuilder().getAttr(ty, Init); @@ -1915,7 +1914,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, Elts.push_back(typedC); } - auto Desired = CGM.getTypes().ConvertType(DestType); + auto Desired = CGM.convertType(DestType); auto typedFiller = llvm::dyn_cast_or_null(Filler); if (Filler && !typedFiller) @@ -1936,8 +1935,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, return {}; Elts.push_back(C); } - auto Desired = - mlir::cast(CGM.getTypes().ConvertType(DestType)); + auto Desired = mlir::cast(CGM.convertType(DestType)); return cir::ConstVectorAttr::get( Desired, mlir::ArrayAttr::get(CGM.getBuilder().getContext(), Elts)); } @@ -1950,8 +1948,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, if (const auto *memberFuncDecl = dyn_cast(memberDecl)) assert(0 && "not implemented"); - auto cirTy = - mlir::cast(CGM.getTypes().ConvertType(DestType)); + auto cirTy = mlir::cast(CGM.convertType(DestType)); const auto *fieldDecl = cast(memberDecl); return builder.getDataMemberAttr(cirTy, fieldDecl->getFieldIndex()); @@ -2002,7 +1999,7 @@ mlir::Value CIRGenModule::emitMemberPointerConstant(const UnaryOperator *E) { // A member function pointer. if (const auto *methodDecl = dyn_cast(decl)) { - auto ty = mlir::cast(getCIRType(E->getType())); + auto ty = mlir::cast(convertType(E->getType())); if (methodDecl->isVirtual()) return builder.create( loc, ty, getCXXABI().buildVirtualMethodAttr(ty, methodDecl)); @@ -2012,7 +2009,7 @@ mlir::Value CIRGenModule::emitMemberPointerConstant(const UnaryOperator *E) { loc, ty, builder.getMethodAttr(ty, methodFuncOp)); } - auto ty = mlir::cast(getCIRType(E->getType())); + auto ty = mlir::cast(convertType(E->getType())); // Otherwise, a member data pointer. const auto *fieldDecl = cast(decl); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 2f0d83869731..c0b6ac3c78e7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -107,7 +107,7 @@ class ScalarExprEmitter : public StmtVisitor { return I; } - mlir::Type ConvertType(QualType T) { return CGF.ConvertType(T); } + mlir::Type convertType(QualType T) { return CGF.convertType(T); } LValue emitLValue(const Expr *E) { return CGF.emitLValue(E); } LValue emitCheckedLValue(const Expr *E, CIRGenFunction::TypeCheckKind TCK) { return CGF.emitCheckedLValue(E, TCK); @@ -120,11 +120,11 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value emitNullValue(QualType Ty, mlir::Location loc); mlir::Value emitPromotedValue(mlir::Value result, QualType PromotionType) { - return Builder.createFloatingCast(result, ConvertType(PromotionType)); + return Builder.createFloatingCast(result, convertType(PromotionType)); } mlir::Value emitUnPromotedValue(mlir::Value result, QualType ExprType) { - return Builder.createFloatingCast(result, ConvertType(ExprType)); + return Builder.createFloatingCast(result, convertType(ExprType)); } mlir::Value emitPromoted(const Expr *E, QualType PromotionType); @@ -172,7 +172,7 @@ class ScalarExprEmitter : public StmtVisitor { // Leaves. mlir::Value VisitIntegerLiteral(const IntegerLiteral *E) { - mlir::Type Ty = CGF.getCIRType(E->getType()); + mlir::Type Ty = CGF.convertType(E->getType()); return Builder.create( CGF.getLoc(E->getExprLoc()), Ty, Builder.getAttr(Ty, E->getValue())); @@ -182,7 +182,7 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitFloatingLiteral(const FloatingLiteral *E) { - mlir::Type Ty = CGF.getCIRType(E->getType()); + mlir::Type Ty = CGF.convertType(E->getType()); assert(mlir::isa(Ty) && "expect floating-point type"); return Builder.create( @@ -190,7 +190,7 @@ class ScalarExprEmitter : public StmtVisitor { Builder.getAttr(Ty, E->getValue())); } mlir::Value VisitCharacterLiteral(const CharacterLiteral *E) { - mlir::Type Ty = CGF.getCIRType(E->getType()); + mlir::Type Ty = CGF.convertType(E->getType()); auto loc = CGF.getLoc(E->getExprLoc()); auto init = cir::IntAttr::get(Ty, E->getValue()); return Builder.create(loc, Ty, init); @@ -199,7 +199,7 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { - mlir::Type Ty = CGF.getCIRType(E->getType()); + mlir::Type Ty = CGF.convertType(E->getType()); return Builder.create( CGF.getLoc(E->getExprLoc()), Ty, Builder.getCIRBoolAttr(E->getValue())); } @@ -323,7 +323,7 @@ class ScalarExprEmitter : public StmtVisitor { .getSExtValue())); } return CGF.builder.create( - CGF.getLoc(E->getSourceRange()), CGF.getCIRType(E->getType()), Vec1, + CGF.getLoc(E->getSourceRange()), CGF.convertType(E->getType()), Vec1, Vec2, CGF.builder.getArrayAttr(Indices)); } } @@ -420,7 +420,7 @@ class ScalarExprEmitter : public StmtVisitor { // Decrement does not have this property. if (isInc && type->isBooleanType()) { value = Builder.create(CGF.getLoc(E->getExprLoc()), - CGF.getCIRType(type), + CGF.convertType(type), Builder.getCIRBoolAttr(true)); } else if (type->isIntegerType()) { QualType promotedType; @@ -438,8 +438,8 @@ class ScalarExprEmitter : public StmtVisitor { // TODO(cir): Currently, we store bitwidths in CIR types only for // integers. This might also be required for other types. - auto srcCirTy = mlir::dyn_cast(ConvertType(type)); - auto promotedCirTy = mlir::dyn_cast(ConvertType(type)); + auto srcCirTy = mlir::dyn_cast(convertType(type)); + auto promotedCirTy = mlir::dyn_cast(convertType(type)); assert(srcCirTy && promotedCirTy && "Expected integer type"); assert( @@ -948,7 +948,7 @@ class ScalarExprEmitter : public StmtVisitor { // a vector. cir::CmpOpKind Kind = ClangCmpToCIRCmp(E->getOpcode()); Result = Builder.create( - CGF.getLoc(BOInfo.Loc), CGF.getCIRType(BOInfo.FullType), Kind, + CGF.getLoc(BOInfo.Loc), CGF.convertType(BOInfo.FullType), Kind, BOInfo.LHS, BOInfo.RHS); } } else if (BOInfo.isFixedPointOp()) { @@ -987,7 +987,7 @@ class ScalarExprEmitter : public StmtVisitor { // as a logical value again. // TODO: optimize this common case here or leave it for later // CIR passes? - mlir::Type boolTy = CGF.getCIRType(CGF.getContext().BoolTy); + mlir::Type boolTy = CGF.convertType(CGF.getContext().BoolTy); return Builder.create(loc, boolTy, cir::CastKind::int_to_bool, srcVal); } @@ -1047,7 +1047,7 @@ class ScalarExprEmitter : public StmtVisitor { if (DstType->isBooleanType()) return emitConversionToBool(Src, SrcType, CGF.getLoc(Loc)); - mlir::Type DstTy = ConvertType(DstType); + mlir::Type DstTy = convertType(DstType); // Cast from half through float if half isn't a native type. if (SrcType->isHalfType() && @@ -1378,17 +1378,17 @@ mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &Ops) { llvm_unreachable("NYI"); return Builder.create(CGF.getLoc(Ops.Loc), - CGF.getCIRType(Ops.FullType), + CGF.convertType(Ops.FullType), cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), - CGF.getCIRType(Ops.FullType), + CGF.convertType(Ops.FullType), cir::BinOpKind::Div, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), - CGF.getCIRType(Ops.FullType), + CGF.convertType(Ops.FullType), cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); } @@ -1431,7 +1431,7 @@ mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &Ops) { llvm_unreachable("NYI"); return Builder.create(CGF.getLoc(Ops.Loc), - CGF.getCIRType(Ops.FullType), + CGF.convertType(Ops.FullType), cir::BinOpKind::Add, Ops.LHS, Ops.RHS); } @@ -1474,7 +1474,7 @@ mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &Ops) { llvm_unreachable("NYI"); return Builder.create(CGF.getLoc(Ops.Loc), - CGF.getCIRType(Ops.FullType), + CGF.convertType(Ops.FullType), cir::BinOpKind::Sub, Ops.LHS, Ops.RHS); } @@ -1524,7 +1524,7 @@ mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &Ops) { } return Builder.create(CGF.getLoc(Ops.Loc), - CGF.getCIRType(Ops.FullType), Ops.LHS, + CGF.convertType(Ops.FullType), Ops.LHS, Ops.RHS, CGF.getBuilder().getUnitAttr()); } @@ -1548,22 +1548,22 @@ mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &Ops) { // Note that we don't need to distinguish unsigned treatment at this // point since it will be handled later by LLVM lowering. return Builder.create( - CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), Ops.LHS, Ops.RHS); + CGF.getLoc(Ops.Loc), CGF.convertType(Ops.FullType), Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), - CGF.getCIRType(Ops.FullType), + CGF.convertType(Ops.FullType), cir::BinOpKind::And, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), - CGF.getCIRType(Ops.FullType), + CGF.convertType(Ops.FullType), cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); } mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), - CGF.getCIRType(Ops.FullType), + CGF.convertType(Ops.FullType), cir::BinOpKind::Or, Ops.LHS, Ops.RHS); } @@ -1666,7 +1666,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { auto DestAS = CGF.builder.getAddrSpaceAttr( DestTy->getPointeeType().getAddressSpace()); return CGF.CGM.getTargetCIRGenInfo().performAddrSpaceCast( - CGF, Visit(E), SrcAS, DestAS, ConvertType(DestTy)); + CGF, Visit(E), SrcAS, DestAS, convertType(DestTy)); } case CK_AtomicToNonAtomic: llvm_unreachable("NYI"); @@ -1722,7 +1722,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // FIXME: use MustVisitNullValue(E) and evaluate expr. // Note that DestTy is used as the MLIR type instead of a custom // nullptr type. - mlir::Type Ty = CGF.getCIRType(DestTy); + mlir::Type Ty = CGF.convertType(DestTy); return Builder.getNullPtr(Ty, CGF.getLoc(E->getExprLoc())); } @@ -1734,11 +1734,11 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { const MemberPointerType *MPT = CE->getType()->getAs(); if (MPT->isMemberFunctionPointerType()) { - auto Ty = mlir::cast(CGF.getCIRType(DestTy)); + auto Ty = mlir::cast(CGF.convertType(DestTy)); return Builder.getNullMethodPtr(Ty, CGF.getLoc(E->getExprLoc())); } - auto Ty = mlir::cast(CGF.getCIRType(DestTy)); + auto Ty = mlir::cast(CGF.convertType(DestTy)); return Builder.getNullDataMemberPtr(Ty, CGF.getLoc(E->getExprLoc())); } case CK_ReinterpretMemberPointer: @@ -1759,7 +1759,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { llvm_unreachable("NYI"); mlir::Location loc = CGF.getLoc(E->getExprLoc()); - mlir::Type resultTy = CGF.getCIRType(DestTy); + mlir::Type resultTy = CGF.convertType(DestTy); mlir::IntegerAttr offsetAttr = Builder.getIndexAttr(offset.getQuantity()); if (Kind == CK_BaseToDerivedMemberPointer) @@ -1798,7 +1798,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { return Visit(const_cast(E)); case CK_IntegralToPointer: { - auto DestCIRTy = ConvertType(DestTy); + auto DestCIRTy = convertType(DestTy); mlir::Value Src = Visit(const_cast(E)); // Properly resize by casting to an int of the same size as the pointer. @@ -1820,7 +1820,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { assert(!DestTy->isBooleanType() && "bool should use PointerToBool"); if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) llvm_unreachable("NYI"); - return Builder.createPtrToInt(Visit(E), ConvertType(DestTy)); + return Builder.createPtrToInt(Visit(E), convertType(DestTy)); } case CK_ToVoid: { CGF.emitIgnoredExpr(E); @@ -1832,7 +1832,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // Create a vector object and fill all elements with the same scalar value. assert(DestTy->isVectorType() && "CK_VectorSplat to non-vector type"); return CGF.getBuilder().create( - CGF.getLoc(E->getSourceRange()), CGF.getCIRType(DestTy), Visit(E)); + CGF.getLoc(E->getSourceRange()), CGF.convertType(DestTy), Visit(E)); } case CK_FixedPointCast: llvm_unreachable("NYI"); @@ -1945,14 +1945,14 @@ mlir::Value CIRGenFunction::emitComplexToScalarConversion(mlir::Value Src, auto Kind = ComplexElemTy->isFloatingType() ? cir::CastKind::float_complex_to_bool : cir::CastKind::int_complex_to_bool; - return builder.createCast(getLoc(Loc), Kind, Src, ConvertType(DstTy)); + return builder.createCast(getLoc(Loc), Kind, Src, convertType(DstTy)); } auto Kind = ComplexElemTy->isFloatingType() ? cir::CastKind::float_complex_to_real : cir::CastKind::int_complex_to_real; auto Real = - builder.createCast(getLoc(Loc), Kind, Src, ConvertType(ComplexElemTy)); + builder.createCast(getLoc(Loc), Kind, Src, convertType(ComplexElemTy)); return emitScalarConversion(Real, ComplexElemTy, DstTy, Loc); } @@ -1984,7 +1984,7 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { "NYI: scalable vector init"); assert(!cir::MissingFeatures::vectorConstants() && "NYI: vector constants"); auto VectorType = - mlir::dyn_cast(CGF.getCIRType(E->getType())); + mlir::dyn_cast(CGF.convertType(E->getType())); SmallVector Elements; for (Expr *init : E->inits()) { Elements.push_back(Visit(init)); @@ -2025,7 +2025,7 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { boolVal = Builder.createNot(boolVal); // ZExt result to the expr type. - auto dstTy = ConvertType(E->getType()); + auto dstTy = convertType(E->getType()); if (mlir::isa(dstTy)) return Builder.createBoolToInt(boolVal, dstTy); if (mlir::isa(dstTy)) @@ -2287,7 +2287,7 @@ mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location Loc, llvm_unreachable("invalid complex-to-scalar cast kind"); } - return Builder.createCast(Loc, CastOpKind, V, CGF.ConvertType(DestTy)); + return Builder.createCast(Loc, CastOpKind, V, CGF.convertType(DestTy)); } mlir::Value ScalarExprEmitter::emitNullValue(QualType Ty, mlir::Location loc) { @@ -2620,7 +2620,7 @@ mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { } bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); - mlir::Type ResTy = ConvertType(E->getType()); + mlir::Type ResTy = convertType(E->getType()); mlir::Location Loc = CGF.getLoc(E->getExprLoc()); // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. @@ -2692,7 +2692,7 @@ mlir::Value ScalarExprEmitter::VisitBinLOr(const clang::BinaryOperator *E) { } bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); - mlir::Type ResTy = ConvertType(E->getType()); + mlir::Type ResTy = convertType(E->getType()); mlir::Location Loc = CGF.getLoc(E->getExprLoc()); // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index cba4d0f1a75d..4d33c40e38e6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -68,10 +68,6 @@ clang::ASTContext &CIRGenFunction::getContext() const { return CGM.getASTContext(); } -mlir::Type CIRGenFunction::ConvertType(QualType T) { - return CGM.getTypes().ConvertType(T); -} - cir::TypeEvaluationKind CIRGenFunction::getEvaluationKind(QualType type) { type = type.getCanonicalType(); while (true) { @@ -137,7 +133,7 @@ mlir::Type CIRGenFunction::convertTypeForMem(QualType T) { } mlir::Type CIRGenFunction::convertType(QualType T) { - return CGM.getTypes().ConvertType(T); + return CGM.getTypes().convertType(T); } mlir::Location CIRGenFunction::getLoc(SourceLocation SLoc) { @@ -250,10 +246,6 @@ bool CIRGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, return true; } -mlir::Type CIRGenFunction::getCIRType(const QualType &type) { - return CGM.getCIRType(type); -} - /// Determine whether the function F ends with a return stmt. static bool endsWithReturn(const Decl *F) { const Stmt *Body = nullptr; @@ -606,7 +598,7 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, FnRetQualTy = FD->getReturnType(); if (!FnRetQualTy->isVoidType()) - FnRetCIRTy = getCIRType(FnRetQualTy); + FnRetCIRTy = convertType(FnRetQualTy); FunctionArgList Args; QualType ResTy = buildFunctionArgList(GD, Args); @@ -831,7 +823,7 @@ LValue CIRGenFunction::MakeNaturalAlignAddrLValue(mlir::Value val, LValueBaseInfo baseInfo; TBAAAccessInfo tbaaInfo; CharUnits alignment = CGM.getNaturalTypeAlignment(ty, &baseInfo, &tbaaInfo); - Address addr(val, getTypes().convertTypeForMem(ty), alignment); + Address addr(val, convertTypeForMem(ty), alignment); return LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); } @@ -889,7 +881,7 @@ static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &astContext) { /// declared type. static mlir::Value emitArgumentDemotion(CIRGenFunction &CGF, const VarDecl *var, mlir::Value value) { - mlir::Type ty = CGF.ConvertType(var->getType()); + mlir::Type ty = CGF.convertType(var->getType()); // This can happen with promotions that actually don't change the // underlying type, like the enum promotions. @@ -1494,8 +1486,7 @@ void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address DestPtr, // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the // respective address. // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); - builder.createStore(loc, builder.getZero(loc, getTypes().ConvertType(Ty)), - DestPtr); + builder.createStore(loc, builder.getZero(loc, convertType(Ty)), DestPtr); } CIRGenFunction::CIRGenFPOptionsRAII::CIRGenFPOptionsRAII(CIRGenFunction &CGF, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 764c20aaa152..38ea6a407884 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -569,9 +569,9 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Type convertTypeForMem(QualType T); - mlir::Type ConvertType(clang::QualType T); - mlir::Type ConvertType(const TypeDecl *T) { - return ConvertType(getContext().getTypeDeclType(T)); + mlir::Type convertType(clang::QualType T); + mlir::Type convertType(const TypeDecl *T) { + return convertType(getContext().getTypeDeclType(T)); } /// Return the cir::TypeEvaluationKind of QualType \c T. @@ -1113,8 +1113,6 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value emitFromMemory(mlir::Value Value, clang::QualType Ty); - mlir::Type convertType(clang::QualType T); - mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &S); std::pair @@ -1241,8 +1239,6 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value emitPromotedValue(mlir::Value result, QualType PromotionType); mlir::Value emitUnPromotedValue(mlir::Value result, QualType PromotionType); - mlir::Type getCIRType(const clang::QualType &type); - const CaseStmt *foldCaseStmt(const clang::CaseStmt &S, mlir::Type condType, mlir::ArrayAttr &value, cir::CaseOpKind &kind); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index ea5cd755ec26..43341ef0f1ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -1678,8 +1678,8 @@ CIRGenItaniumRTTIBuilder::GetAddrOfTypeName(mlir::Location loc, QualType Ty, // We know that the mangled name of the type starts at index 4 of the // mangled name of the typename, so we can just index into it in order to // get the mangled name of the type. - auto Init = builder.getString( - Name.substr(4), CGM.getTypes().ConvertType(CGM.getASTContext().CharTy)); + auto Init = builder.getString(Name.substr(4), + CGM.convertType(CGM.getASTContext().CharTy)); auto Align = CGM.getASTContext().getTypeAlignInChars(CGM.getASTContext().CharTy); @@ -1770,8 +1770,7 @@ static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) { /// constraints, according to the Itanium C++ ABI, 2.9.5p5c. void CIRGenItaniumRTTIBuilder::BuildVMIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *RD) { - auto UnsignedIntLTy = - CGM.getTypes().ConvertType(CGM.getASTContext().UnsignedIntTy); + auto UnsignedIntLTy = CGM.convertType(CGM.getASTContext().UnsignedIntTy); // Itanium C++ ABI 2.9.5p6c: // __flags is a word with flags describing details about the class // structure, which may be referenced by using the __flags_masks @@ -1815,7 +1814,7 @@ void CIRGenItaniumRTTIBuilder::BuildVMIClassTypeInfo(mlir::Location loc, if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(LangAS::Default) > TI.getLongWidth()) OffsetFlagsTy = CGM.getASTContext().LongLongTy; - auto OffsetFlagsLTy = CGM.getTypes().ConvertType(OffsetFlagsTy); + auto OffsetFlagsLTy = CGM.convertType(OffsetFlagsTy); for (const auto &Base : RD->bases()) { // The __base_type member points to the RTTI for the base type. @@ -2241,7 +2240,7 @@ void CIRGenItaniumCXXABI::emitThrow(CIRGenFunction &CGF, // Now allocate the exception object. auto &builder = CGF.getBuilder(); QualType clangThrowType = E->getSubExpr()->getType(); - auto throwTy = builder.getPointerTo(CGF.ConvertType(clangThrowType)); + auto throwTy = builder.getPointerTo(CGF.convertType(clangThrowType)); uint64_t typeSize = CGF.getContext().getTypeSizeInChars(clangThrowType).getQuantity(); auto subExprLoc = CGF.getLoc(E->getSubExpr()->getSourceRange()); @@ -2407,7 +2406,7 @@ static cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { mlir::Type VoidPtrTy = CGF.VoidPtrTy; mlir::Type RTTIPtrTy = CGF.getBuilder().getUInt8PtrTy(); - mlir::Type PtrDiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); + mlir::Type PtrDiffTy = CGF.convertType(CGF.getContext().getPointerDiffType()); // TODO(cir): mark the function as nowind readonly. @@ -2579,7 +2578,7 @@ static cir::DynamicCastInfoAttr emitDynamicCastInfo(CIRGenFunction &CGF, const CXXRecordDecl *destDecl = DestRecordTy->getAsCXXRecordDecl(); auto offsetHint = computeOffsetHint(CGF.getContext(), srcDecl, destDecl); - mlir::Type ptrdiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); + mlir::Type ptrdiffTy = CGF.convertType(CGF.getContext().getPointerDiffType()); auto offsetHintAttr = cir::IntAttr::get(ptrdiffTy, offsetHint.getQuantity()); return cir::DynamicCastInfoAttr::get(srcRtti, destRtti, runtimeFuncRef, diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 085e2c237ee7..e4a7951361a0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1211,7 +1211,7 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *D, // exists. A use may still exists, however, so we still may need // to do a RAUW. assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type"); - Init = builder.getZeroInitAttr(getCIRType(D->getType())); + Init = builder.getZeroInitAttr(convertType(D->getType())); } else { initializedGlobalDecl = GlobalDecl(D); emitter.emplace(*this); @@ -1224,7 +1224,7 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *D, if (getLangOpts().CPlusPlus) { if (InitDecl->hasFlexibleArrayInit(astContext)) ErrorUnsupported(D, "flexible array initializer"); - Init = builder.getZeroInitAttr(getCIRType(T)); + Init = builder.getZeroInitAttr(convertType(T)); if (!IsDefinitionAvailableExternally) NeedsGlobalCtor = true; } else { @@ -1442,12 +1442,11 @@ CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *E) { auto finalSize = CAT->getSize().getZExtValue(); Str.resize(finalSize); - auto eltTy = getTypes().ConvertType(CAT->getElementType()); + auto eltTy = convertType(CAT->getElementType()); return builder.getString(Str, eltTy, finalSize); } - auto arrayTy = - mlir::dyn_cast(getTypes().ConvertType(E->getType())); + auto arrayTy = mlir::dyn_cast(convertType(E->getType())); assert(arrayTy && "string literals must be emitted as an array type"); auto arrayEltTy = mlir::dyn_cast(arrayTy.getEltType()); @@ -2182,8 +2181,8 @@ void CIRGenModule::emitAliasForGlobal(StringRef mangledName, setCommonAttributes(aliasGD, alias); } -mlir::Type CIRGenModule::getCIRType(const QualType &type) { - return genTypes.ConvertType(type); +mlir::Type CIRGenModule::convertType(QualType type) { + return genTypes.convertType(type); } bool CIRGenModule::verifyModule() { @@ -2228,7 +2227,7 @@ cir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, if (!Ty) { const auto *FD = cast(GD.getDecl()); - Ty = getTypes().ConvertType(FD->getType()); + Ty = convertType(FD->getType()); } // Devirtualized destructor calls may come through here instead of via diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index a2c736455013..afaeea3eb094 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -634,7 +634,7 @@ class CIRGenModule : public CIRGenTypeCache { GlobalDecl aliasGD, cir::FuncOp aliasee, cir::GlobalLinkageKind linkage); - mlir::Type getCIRType(const clang::QualType &type); + mlir::Type convertType(clang::QualType type); /// Set the visibility for the given global. void setGlobalVisibility(mlir::Operation *Op, const NamedDecl *D) const; diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp index 3db01cab6659..e59f96c1b403 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp @@ -204,7 +204,7 @@ void CIRGenFunction::emitKernelMetadata(const FunctionDecl *FD, mlir::IntegerAttr intelReqdSubGroupSizeAttr; if (const VecTypeHintAttr *A = FD->getAttr()) { - mlir::Type typeHintValue = getTypes().ConvertType(A->getTypeHint()); + mlir::Type typeHintValue = convertType(A->getTypeHint()); vecTypeHintAttr = mlir::TypeAttr::get(typeHintValue); vecTypeHintSignedness = OpenCLKernelMetadataAttr::isSignedHint(typeHintValue); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 35db7731f42e..916010a4f19c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -251,7 +251,7 @@ mlir::Type CIRGenTypes::convertTypeForMem(clang::QualType qualType, bool forBitField) { assert(!qualType->isConstantMatrixType() && "Matrix types NYI"); - mlir::Type convertedType = ConvertType(qualType); + mlir::Type convertedType = convertType(qualType); assert(!forBitField && "Bit fields NYI"); @@ -267,7 +267,7 @@ mlir::MLIRContext &CIRGenTypes::getMLIRContext() const { return *Builder.getContext(); } -mlir::Type CIRGenTypes::ConvertFunctionTypeInternal(QualType QFT) { +mlir::Type CIRGenTypes::convertFunctionTypeInternal(QualType QFT) { assert(QFT.isCanonical()); const Type *Ty = QFT.getTypePtr(); const FunctionType *FT = cast(QFT.getTypePtr()); @@ -341,8 +341,8 @@ bool CIRGenTypes::isFuncTypeConvertible(const FunctionType *FT) { return true; } -/// ConvertType - Convert the specified type to its MLIR form. -mlir::Type CIRGenTypes::ConvertType(QualType T) { +/// convertType - Convert the specified type to its MLIR form. +mlir::Type CIRGenTypes::convertType(QualType T) { T = astContext.getCanonicalType(T); const Type *Ty = T.getTypePtr(); @@ -601,7 +601,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { llvm_unreachable("Unexpected undeduced type!"); case Type::Complex: { const ComplexType *CT = cast(Ty); - auto ElementTy = ConvertType(CT->getElementType()); + auto ElementTy = convertType(CT->getElementType()); ResultType = cir::ComplexType::get(Builder.getContext(), ElementTy); break; } @@ -619,7 +619,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { QualType ETy = PTy->getPointeeType(); assert(!ETy->isConstantMatrixType() && "not implemented"); - mlir::Type PointeeType = ConvertType(ETy); + mlir::Type PointeeType = convertType(ETy); // Treat effectively as a *i8. // if (PointeeType->isVoidTy()) @@ -678,7 +678,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { } case Type::FunctionNoProto: case Type::FunctionProto: - ResultType = ConvertFunctionTypeInternal(T); + ResultType = convertFunctionTypeInternal(T); break; case Type::ObjCObject: assert(0 && "not implemented"); @@ -697,7 +697,7 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case Type::Enum: { const EnumDecl *ED = cast(Ty)->getDecl(); if (ED->isCompleteDefinition() || ED->isFixed()) - return ConvertType(ED->getIntegerType()); + return convertType(ED->getIntegerType()); // Return a placeholder 'i32' type. This can be changed later when the // type is defined (see UpdateCompletedType), but is likely to be the // "right" answer. @@ -713,9 +713,9 @@ mlir::Type CIRGenTypes::ConvertType(QualType T) { case Type::MemberPointer: { const auto *MPT = cast(Ty); - auto memberTy = ConvertType(MPT->getPointeeType()); + auto memberTy = convertType(MPT->getPointeeType()); auto clsTy = - mlir::cast(ConvertType(QualType(MPT->getClass(), 0))); + mlir::cast(convertType(QualType(MPT->getClass(), 0))); if (MPT->isMemberDataPointer()) ResultType = cir::DataMemberType::get(Builder.getContext(), memberTy, clsTy); @@ -804,11 +804,11 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeCIRFunctionInfo( // default now. cir::ABIArgInfo &retInfo = FI->getReturnInfo(); if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) - retInfo.setCoerceToType(ConvertType(FI->getReturnType())); + retInfo.setCoerceToType(convertType(FI->getReturnType())); for (auto &I : FI->arguments()) if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) - I.info.setCoerceToType(ConvertType(I.type)); + I.info.setCoerceToType(convertType(I.type)); bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; @@ -841,7 +841,7 @@ void CIRGenTypes::UpdateCompletedType(const TagDecl *TD) { // Okay, we formed some types based on this. We speculated that the enum // would be lowered to i32, so we only need to flush the cache if this // didn't happen. - if (!ConvertType(ED->getIntegerType()).isInteger(32)) + if (!convertType(ED->getIntegerType()).isInteger(32)) TypeCache.clear(); } // If necessary, provide the full definition of a type only used with a diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index c862d3232086..1d01a56cfa41 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -112,8 +112,8 @@ class CIRGenTypes { llvm::SmallVector DeferredRecords; - /// Heper for ConvertType. - mlir::Type ConvertFunctionTypeInternal(clang::QualType FT); + /// Heper for convertType. + mlir::Type convertFunctionTypeInternal(clang::QualType FT); public: CIRGenTypes(CIRGenModule &cgm); @@ -165,7 +165,7 @@ class CIRGenTypes { CIRGenCXXABI &getCXXABI() const { return TheCXXABI; } /// Convert type T into a mlir::Type. - mlir::Type ConvertType(clang::QualType T); + mlir::Type convertType(clang::QualType T); mlir::Type convertRecordDeclType(const clang::RecordDecl *recordDecl); diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 855a0208d8d4..cadfe76a717c 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -129,13 +129,13 @@ class AArch64ABIInfo : public ABIInfo { if (testIfIsVoidTy(it->type)) it->info = cir::ABIArgInfo::getIgnore(); else - it->info = cir::ABIArgInfo::getDirect(CGT.ConvertType(it->type)); + it->info = cir::ABIArgInfo::getDirect(CGT.convertType(it->type)); } auto RetTy = FI.getReturnType(); if (testIfIsVoidTy(RetTy)) FI.getReturnInfo() = cir::ABIArgInfo::getIgnore(); else - FI.getReturnInfo() = cir::ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); + FI.getReturnInfo() = cir::ABIArgInfo::getDirect(CGT.convertType(RetTy)); return; } @@ -334,13 +334,13 @@ void X86_64ABIInfo::computeInfo(CIRGenFunctionInfo &FI) const { if (testIfIsVoidTy(it->type)) it->info = cir::ABIArgInfo::getIgnore(); else - it->info = cir::ABIArgInfo::getDirect(CGT.ConvertType(it->type)); + it->info = cir::ABIArgInfo::getDirect(CGT.convertType(it->type)); } auto RetTy = FI.getReturnType(); if (testIfIsVoidTy(RetTy)) FI.getReturnInfo() = cir::ABIArgInfo::getIgnore(); else - FI.getReturnInfo() = cir::ABIArgInfo::getDirect(CGT.ConvertType(RetTy)); + FI.getReturnInfo() = cir::ABIArgInfo::getDirect(CGT.convertType(RetTy)); } /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in @@ -397,7 +397,7 @@ cir::ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ++neededInt; // Pick an 8-byte type based on the preferred type. - ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); + ResType = GetINTEGERTypeAtOffset(CGT.convertType(Ty), 0, Ty, 0); // If we have a sign or zero extended integer, make sure to return Extend so // that the parameter gets the right LLVM IR attributes. @@ -414,7 +414,7 @@ cir::ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, // register is used, the registers are taken in the order from %xmm0 to // %xmm7. case Class::SSE: { - mlir::Type CIRType = CGT.ConvertType(Ty); + mlir::Type CIRType = CGT.convertType(Ty); ResType = GetSSETypeAtOffset(CIRType, 0, Ty, 0); ++neededSSE; break; @@ -527,7 +527,7 @@ cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next available // register of the sequence %rax, %rdx is used. case Class::Integer: - ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); + ResType = GetINTEGERTypeAtOffset(CGT.convertType(RetTy), 0, RetTy, 0); // If we have a sign or zero extended integer, make sure to return Extend so // that the parameter gets the right LLVM IR attributes. @@ -547,7 +547,7 @@ cir::ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next available SSE // register of the sequence %xmm0, %xmm1 is used. case Class::SSE: - ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); + ResType = GetSSETypeAtOffset(CGT.convertType(RetTy), 0, RetTy, 0); break; default: From 4f7b5ec46207ec25c13450e4b4a936967c7f9129 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Thu, 9 Jan 2025 15:19:43 +0100 Subject: [PATCH 2202/2301] [CIR][CIRGen][Builtin][Neon] Lower vabsh_f16 (#1269) Lower `vabsh_f16` CG: https://github.com/llvm/clangir/blob/25b269e5fed544e1da37b72bc32315c0ae0c5aa6/clang/lib/CodeGen/CGBuiltin.cpp#L11975-L11977 --- clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 6 ++++-- clang/test/CIR/CodeGen/AArch64/neon-fp16.c | 16 ++++++++++------ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index f030ebc7b1ca..3eb0dee23f7e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3555,8 +3555,10 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, switch (BuiltinID) { default: break; - case NEON::BI__builtin_neon_vabsh_f16: - llvm_unreachable("NEON::BI__builtin_neon_vabsh_f16 NYI"); + case NEON::BI__builtin_neon_vabsh_f16: { + Ops.push_back(emitScalarExpr(E->getArg(0))); + return builder.create(getLoc(E->getExprLoc()), Ops); + } case NEON::BI__builtin_neon_vaddq_p128: { llvm_unreachable("NEON::BI__builtin_neon_vaddq_p128 NYI"); } diff --git a/clang/test/CIR/CodeGen/AArch64/neon-fp16.c b/clang/test/CIR/CodeGen/AArch64/neon-fp16.c index 3d3d4c439f43..44738109eccd 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-fp16.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-fp16.c @@ -19,12 +19,16 @@ #include -// NYI-LABEL: test_vabsh_f16 -// NYI: [[ABS:%.*]] = call half @llvm.fabs.f16(half %a) -// NYI: ret half [[ABS]] -// float16_t test_vabsh_f16(float16_t a) { -// return vabsh_f16(a); -// } +// CIR-LABEL: vabsh_f16 +// CIR: {{%.*}} = cir.fabs {{%.*}} : !cir.f16 +// +// LLVM-LABEL: test_vabsh_f16 +// LLVM-SAME: (half [[a:%.]]) +// LLVM: [[ABS:%.*]] = call half @llvm.fabs.f16(half [[a]]) +// LLVM: ret half [[ABS]] +float16_t test_vabsh_f16(float16_t a) { + return vabsh_f16(a); +} // NYI-LABEL: test_vceqzh_f16 // NYI: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000 From c5bbd2e6e3047e413e1d54faf9f1fe09d0571cae Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 9 Jan 2025 12:33:30 -0300 Subject: [PATCH 2203/2301] [CIR][CIRGen][NFCI] Get more scope information to match OG Reland previously reverted attempt now that this passes ASANified `ninja check-clang-cir`. Original message: We are missing cleanups all around, more incremental progress towards fixing that. This is supposed to be NFC intended, but we have to start changing some bits in order to properly match cleanup bits in OG. Start tagging places with more MissingFeatures to allow us to incrementally improve the situation. --- clang/include/clang/CIR/MissingFeatures.h | 14 +- clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 174 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 82 +++++++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 71 ++++--- clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 41 +++-- .../CodeGen/builtin-constant-evaluated.cpp | 10 +- clang/test/CIR/CodeGen/lambda.cpp | 6 +- 8 files changed, 331 insertions(+), 69 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index c0707d687fca..8f9c5d3eff49 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -59,7 +59,6 @@ struct MissingFeatures { static bool emitTypeCheck() { return false; } static bool tbaa() { return false; } static bool tbaa_struct() { return false; } - static bool cleanups() { return false; } static bool emitNullabilityCheck() { return false; } static bool ptrAuth() { return false; } @@ -160,12 +159,22 @@ struct MissingFeatures { static bool fastMathFlags() { return false; } static bool fastMathFuncAttributes() { return false; } + // Cleanup + static bool cleanups() { return false; } + static bool simplifyCleanupEntry() { return false; } + static bool requiresCleanups() { return false; } + static bool cleanupBranchAfterSwitch() { return false; } + static bool cleanupAlwaysBranchThrough() { return false; } + static bool cleanupDestinationIndex() { return false; } + static bool cleanupDestroyNRVOVariable() { return false; } + static bool cleanupAppendInsts() { return false; } + static bool cleanupIndexAndBIAdjustment() { return false; } + // Exception handling static bool isSEHTryScope() { return false; } static bool ehStack() { return false; } static bool emitStartEHSpec() { return false; } static bool emitEndEHSpec() { return false; } - static bool simplifyCleanupEntry() { return false; } // Type qualifiers. static bool atomicTypes() { return false; } @@ -208,7 +217,6 @@ struct MissingFeatures { static bool addAutoInitAnnotation() { return false; } static bool addHeapAllocSiteMetadata() { return false; } static bool loopInfoStack() { return false; } - static bool requiresCleanups() { return false; } static bool constantFoldsToSimpleInteger() { return false; } static bool checkFunctionCallABI() { return false; } static bool zeroInitializer() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 40fc101d4c23..534fc2a59968 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -37,13 +37,12 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc, JumpDest Dest) { // Remove this once we go for making sure unreachable code is // well modeled (or not). - assert(builder.getInsertionBlock() && "not yet implemented"); assert(!cir::MissingFeatures::ehStack()); // Insert a branch: to the cleanup block (unsolved) or to the already // materialized label. Keep track of unsolved goto's. - auto brOp = builder.create( - Loc, Dest.isValid() ? Dest.getBlock() : ReturnBlock().getBlock()); + assert(Dest.getBlock() && "assumes incoming valid dest"); + auto brOp = builder.create(Loc, Dest.getBlock()); // Calculate the innermost active normal cleanup. EHScopeStack::stable_iterator TopCleanup = @@ -70,7 +69,33 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc, return brOp; } - // FIXME(cir): otherwise, thread through all the normal cleanups in scope. + // Otherwise, thread through all the normal cleanups in scope. + auto index = builder.getUInt32(Dest.getDestIndex(), Loc); + assert(!cir::MissingFeatures::cleanupIndexAndBIAdjustment()); + + // Add this destination to all the scopes involved. + EHScopeStack::stable_iterator I = TopCleanup; + EHScopeStack::stable_iterator E = Dest.getScopeDepth(); + if (E.strictlyEncloses(I)) { + while (true) { + EHCleanupScope &Scope = cast(*EHStack.find(I)); + assert(Scope.isNormalCleanup()); + I = Scope.getEnclosingNormalCleanup(); + + // If this is the last cleanup we're propagating through, tell it + // that there's a resolved jump moving through it. + if (!E.strictlyEncloses(I)) { + Scope.addBranchAfter(index, Dest.getBlock()); + break; + } + + // Otherwise, tell the scope that there's a jump propagating + // through it. If this isn't new information, all the rest of + // the work has been done before. + if (!Scope.addBranchThrough(Dest.getBlock())) + break; + } + } return brOp; } @@ -305,6 +330,18 @@ static void emitCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, // No need to emit continuation block because CIR uses a cir.if. } +static mlir::Block *createNormalEntry(CIRGenFunction &cgf, + EHCleanupScope &scope) { + assert(scope.isNormalCleanup()); + mlir::Block *entry = scope.getNormalBlock(); + if (!entry) { + mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder()); + entry = cgf.currLexScope->getOrCreateCleanupBlock(cgf.getBuilder()); + scope.setNormalBlock(entry); + } + return entry; +} + /// Pops a cleanup block. If the block includes a normal cleanup, the /// current insertion point is threaded through the cleanup, as are /// any branch fixups on the cleanup. @@ -341,7 +378,8 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // - whether there's a fallthrough auto *FallthroughSource = builder.getInsertionBlock(); - bool HasFallthrough = (FallthroughSource != nullptr && IsActive); + bool HasFallthrough = + (FallthroughSource != nullptr && (IsActive || HasExistingBranches)); // Branch-through fall-throughs leave the insertion point set to the // end of the last cleanup, which points to the current scope. The @@ -442,7 +480,131 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // Otherwise, the best approach is to thread everything through // the cleanup block and then try to clean up after ourselves. } else { - llvm_unreachable("NYI"); + // Force the entry block to exist. + mlir::Block *normalEntry = createNormalEntry(*this, Scope); + + // I. Set up the fallthrough edge in. + mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP; + + // If there's a fallthrough, we need to store the cleanup + // destination index. For fall-throughs this is always zero. + if (HasFallthrough) { + if (!HasPrebranchedFallthrough) { + assert(!cir::MissingFeatures::cleanupDestinationIndex()); + } + + // Otherwise, save and clear the IP if we don't have fallthrough + // because the cleanup is inactive. + } else if (FallthroughSource) { + assert(!IsActive && "source without fallthrough for active cleanup"); + savedInactiveFallthroughIP = getBuilder().saveInsertionPoint(); + } + + // II. Emit the entry block. This implicitly branches to it if + // we have fallthrough. All the fixups and existing branches + // should already be branched to it. + builder.setInsertionPointToEnd(normalEntry); + + // intercept normal cleanup to mark SEH scope end + if (IsEHa) { + llvm_unreachable("NYI"); + } + + // III. Figure out where we're going and build the cleanup + // epilogue. + bool HasEnclosingCleanups = + (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); + + // Compute the branch-through dest if we need it: + // - if there are branch-throughs threaded through the scope + // - if fall-through is a branch-through + // - if there are fixups that will be optimistically forwarded + // to the enclosing cleanup + mlir::Block *branchThroughDest = nullptr; + if (Scope.hasBranchThroughs() || + (FallthroughSource && FallthroughIsBranchThrough) || + (HasFixups && HasEnclosingCleanups)) { + llvm_unreachable("NYI"); + } + + mlir::Block *fallthroughDest = nullptr; + + // If there's exactly one branch-after and no other threads, + // we can route it without a switch. + // Skip for SEH, since ExitSwitch is used to generate code to indicate + // abnormal termination. (SEH: Except _leave and fall-through at + // the end, all other exits in a _try (return/goto/continue/break) + // are considered as abnormal terminations, using NormalCleanupDestSlot + // to indicate abnormal termination) + if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && + !currentFunctionUsesSEHTry() && Scope.getNumBranchAfters() == 1) { + llvm_unreachable("NYI"); + // Build a switch-out if we need it: + // - if there are branch-afters threaded through the scope + // - if fall-through is a branch-after + // - if there are fixups that have nowhere left to go and + // so must be immediately resolved + } else if (Scope.getNumBranchAfters() || + (HasFallthrough && !FallthroughIsBranchThrough) || + (HasFixups && !HasEnclosingCleanups)) { + assert(!cir::MissingFeatures::cleanupBranchAfterSwitch()); + } else { + // We should always have a branch-through destination in this case. + assert(branchThroughDest); + assert(!cir::MissingFeatures::cleanupAlwaysBranchThrough()); + } + + // IV. Pop the cleanup and emit it. + Scope.markEmitted(); + EHStack.popCleanup(); + assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); + + emitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); + + // Append the prepared cleanup prologue from above. + assert(!cir::MissingFeatures::cleanupAppendInsts()); + + // Optimistically hope that any fixups will continue falling through. + for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); I < E; + ++I) { + llvm_unreachable("NYI"); + } + + // V. Set up the fallthrough edge out. + + // Case 1: a fallthrough source exists but doesn't branch to the + // cleanup because the cleanup is inactive. + if (!HasFallthrough && FallthroughSource) { + // Prebranched fallthrough was forwarded earlier. + // Non-prebranched fallthrough doesn't need to be forwarded. + // Either way, all we need to do is restore the IP we cleared before. + assert(!IsActive); + llvm_unreachable("NYI"); + + // Case 2: a fallthrough source exists and should branch to the + // cleanup, but we're not supposed to branch through to the next + // cleanup. + } else if (HasFallthrough && fallthroughDest) { + llvm_unreachable("NYI"); + + // Case 3: a fallthrough source exists and should branch to the + // cleanup and then through to the next. + } else if (HasFallthrough) { + // Everything is already set up for this. + + // Case 4: no fallthrough source exists. + } else { + // FIXME(cir): should we clear insertion point here? + } + + // VI. Assorted cleaning. + + // Check whether we can merge NormalEntry into a single predecessor. + // This might invalidate (non-IR) pointers to NormalEntry. + // + // If it did invalidate those pointers, and NormalEntry was the same + // as NormalExit, go back and patch up the fixups. + assert(!cir::MissingFeatures::simplifyCleanupEntry()); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index b44896241eff..c1a4ac61a5d6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -916,7 +916,7 @@ template struct DestroyNRVOVariable : EHScopeStack::Cleanup { QualType Ty; void Emit(CIRGenFunction &CGF, Flags flags) override { - llvm_unreachable("NYI"); + assert(!cir::MissingFeatures::cleanupDestroyNRVOVariable()); } virtual ~DestroyNRVOVariable() = default; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 4d33c40e38e6..7de4866cd004 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -349,15 +349,23 @@ void CIRGenFunction::LexicalScope::cleanup() { // Cleanup are done right before codegen resume a scope. This is where // objects are destroyed. - unsigned curLoc = 0; + SmallVector retBlocks; for (auto *retBlock : localScope->getRetBlocks()) { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToEnd(retBlock); - mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; - curLoc++; + retBlocks.push_back(retBlock); + mlir::Location retLoc = localScope->getRetLoc(retBlock); (void)emitReturn(retLoc); } + auto removeUnusedRetBlocks = [&]() { + for (mlir::Block *retBlock : retBlocks) { + if (!retBlock->getUses().empty()) + continue; + retBlock->erase(); + } + }; + auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToEnd(InsPt); @@ -373,9 +381,34 @@ void CIRGenFunction::LexicalScope::cleanup() { if (!cleanupBlock && localScope->getCleanupBlock(builder)) { cleanupBlock = localScope->getCleanupBlock(builder); builder.create(InsPt->back().getLoc(), cleanupBlock); + if (!cleanupBlock->mightHaveTerminator()) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToEnd(cleanupBlock); + builder.create(localScope->EndLoc); + } } if (localScope->Depth == 0) { + // TODO(cir): get rid of all this special cases once cleanups are properly + // implemented. + // TODO(cir): most of this code should move into emitBranchThroughCleanup + if (localScope->getRetBlocks().size() == 1) { + mlir::Block *retBlock = localScope->getRetBlocks()[0]; + mlir::Location loc = localScope->getRetLoc(retBlock); + if (retBlock->getUses().empty()) + retBlock->erase(); + else { + // Thread return block via cleanup block. + if (cleanupBlock) { + for (auto &blockUse : retBlock->getUses()) { + auto brOp = dyn_cast(blockUse.getOwner()); + brOp.setSuccessor(cleanupBlock); + } + } + builder.create(loc, retBlock); + return; + } + } emitImplicitReturn(); return; } @@ -420,6 +453,7 @@ void CIRGenFunction::LexicalScope::cleanup() { // get into this condition and emit the proper cleanup. This is // needed to get nrvo to interop with dtor logic. PerformCleanup = false; + removeUnusedRetBlocks(); return; } @@ -529,7 +563,7 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // the ret after it's been at EndLoc. if (auto *DI = getDebugInfo()) assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); - builder.clearInsertionPoint(); + // FIXME(cir): should we clearInsertionPoint? breaks many testcases PopCleanupBlocks(PrologueCleanupDepth); } @@ -590,6 +624,20 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // block, it'd be deleted now. Same for unused ret allocas from ReturnValue } +static void eraseEmptyAndUnusedBlocks(cir::FuncOp fnOp) { + // Remove any left over blocks that are unrecheable and empty, since they do + // not represent unrecheable code useful for warnings nor anything deemed + // useful in general. + SmallVector blocksToDelete; + for (auto &blk : fnOp.getBlocks()) { + if (!blk.empty() || !blk.getUses().empty()) + continue; + blocksToDelete.push_back(&blk); + } + for (auto *b : blocksToDelete) + b->erase(); +} + cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo) { assert(Fn && "generating code for a null function"); @@ -678,7 +726,6 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, assert(Fn.isDeclaration() && "Function already has body?"); mlir::Block *EntryBB = Fn.addEntryBlock(); builder.setInsertionPointToStart(EntryBB); - { // Initialize lexical scope information. LexicalScope lexScope{*this, fusedLoc, EntryBB}; @@ -728,18 +775,19 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, llvm_unreachable("no definition for emitted function"); assert(builder.getInsertionBlock() && "Should be valid"); - } - if (mlir::failed(Fn.verifyBody())) - return nullptr; + if (mlir::failed(Fn.verifyBody())) + return nullptr; - // Emit the standard function epilogue. - finishFunction(BodyRange.getEnd()); + // Emit the standard function epilogue. + finishFunction(BodyRange.getEnd()); - // If we haven't marked the function nothrow through other means, do a quick - // pass now to see if we can. - assert(!cir::MissingFeatures::tryMarkNoThrow()); + // If we haven't marked the function nothrow through other means, do a quick + // pass now to see if we can. + assert(!cir::MissingFeatures::tryMarkNoThrow()); + } + eraseEmptyAndUnusedBlocks(Fn); return Fn; } @@ -1163,9 +1211,13 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, if (FD && FD->isMain() && cir::MissingFeatures::zerocallusedregs()) llvm_unreachable("NYI"); - mlir::Block *EntryBB = &Fn.getBlocks().front(); + // CIRGen has its own logic for entry blocks, usually per operation region. + mlir::Block *retBlock = currLexScope->getOrCreateRetBlock(*this, getLoc(Loc)); + // returnBlock handles per region getJumpDestInCurrentScope LLVM traditional + // codegen logic. + (void)returnBlock(retBlock); - // TODO: allocapt insertion? probably don't need for CIR + mlir::Block *EntryBB = &Fn.getBlocks().front(); if (cir::MissingFeatures::requiresReturnValueCheck()) llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 38ea6a407884..ce26520114e8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -379,11 +379,14 @@ class CIRGenFunction : public CIRGenTypeCache { clang::GlobalDecl CurGD; /// Unified return block. - /// Not that for LLVM codegen this is a memeber variable instead. - JumpDest ReturnBlock() { - return JumpDest(currLexScope->getOrCreateCleanupBlock(builder)); + /// In CIR this is a function because each scope might have + /// it's associated return block. + JumpDest returnBlock(mlir::Block *retBlock) { + return getJumpDestInCurrentScope(retBlock); } + unsigned nextCleanupDestIndex = 1; + /// The temporary alloca to hold the return value. This is /// invalid iff the function has no return value. Address ReturnValue = Address::invalid(); @@ -1347,6 +1350,16 @@ class CIRGenFunction : public CIRGenTypeCache { void emitStoreThroughBitfieldLValue(RValue Src, LValue Dst, mlir::Value &Result); + /// The given basic block lies in the current EH scope, but may be a + /// target of a potentially scope-crossing jump; get a stable handle + /// to which we can perform this jump later. + /// CIRGen: this mostly tracks state for figuring out the proper scope + /// information, no actual branches are emitted. + JumpDest getJumpDestInCurrentScope(mlir::Block *target) { + return JumpDest(target, EHStack.getInnermostNormalCleanup(), + nextCleanupDestIndex++); + } + cir::BrOp emitBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is @@ -2069,11 +2082,14 @@ class CIRGenFunction : public CIRGenTypeCache { void ForceCleanup(std::initializer_list ValuesToReload = {}) { assert(PerformCleanup && "Already forced cleanup"); - CGF.DidCallStackSave = OldDidCallStackSave; - CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize, - ValuesToReload); - PerformCleanup = false; - CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth; + { + mlir::OpBuilder::InsertionGuard guard(CGF.getBuilder()); + CGF.DidCallStackSave = OldDidCallStackSave; + CGF.PopCleanupBlocks(CleanupStackDepth, + LifetimeExtendedCleanupStackSize, ValuesToReload); + PerformCleanup = false; + CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth; + } } }; @@ -2202,7 +2218,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) { if (CleanupBlock) return getCleanupBlock(builder); - return createCleanupBlock(builder); + CleanupBlock = createCleanupBlock(builder); + return CleanupBlock; } mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) { @@ -2212,9 +2229,10 @@ class CIRGenFunction : public CIRGenTypeCache { { // Create the cleanup block but dont hook it up around just yet. mlir::OpBuilder::InsertionGuard guard(builder); - CleanupBlock = builder.createBlock(builder.getBlock()->getParent()); + mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent() + : &CGF.CurFn->getRegion(0); + CleanupBlock = builder.createBlock(r); } - assert(builder.getInsertionBlock() && "Should be valid"); return CleanupBlock; } @@ -2226,7 +2244,7 @@ class CIRGenFunction : public CIRGenTypeCache { // On switches we need one return block per region, since cases don't // have their own scopes but are distinct regions nonetheless. llvm::SmallVector RetBlocks; - llvm::SmallVector> RetLocs; + llvm::DenseMap RetLocs; llvm::DenseMap RetBlockInCaseIndex; std::optional NormalRetBlockIndex; llvm::SmallVector> SwitchRegions; @@ -2244,7 +2262,7 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::OpBuilder::InsertionGuard guard(CGF.builder); auto *b = CGF.builder.createBlock(CGF.builder.getBlock()->getParent()); RetBlocks.push_back(b); - RetLocs.push_back(loc); + updateRetLoc(b, loc); return b; } @@ -2253,8 +2271,9 @@ class CIRGenFunction : public CIRGenTypeCache { public: llvm::ArrayRef getRetBlocks() { return RetBlocks; } - llvm::ArrayRef> getRetLocs() { - return RetLocs; + mlir::Location getRetLoc(mlir::Block *b) { return RetLocs.at(b); } + void updateRetLoc(mlir::Block *b, mlir::Location loc) { + RetLocs.insert_or_assign(b, loc); } llvm::MutableArrayRef> getSwitchRegions() { assert(isSwitch() && "expected switch scope"); @@ -2268,22 +2287,26 @@ class CIRGenFunction : public CIRGenTypeCache { } mlir::Block *getOrCreateRetBlock(CIRGenFunction &CGF, mlir::Location loc) { + mlir::Block *ret = nullptr; if (auto caseOp = mlir::dyn_cast_if_present( CGF.builder.getBlock()->getParentOp())) { auto iter = RetBlockInCaseIndex.find(caseOp); if (iter != RetBlockInCaseIndex.end()) - return RetBlocks[iter->second]; - - mlir::Block *ret = createRetBlock(CGF, loc); - RetBlockInCaseIndex[caseOp] = RetBlocks.size() - 1; - return ret; - } - if (!NormalRetBlockIndex) { - mlir::Block *ret = createRetBlock(CGF, loc); + ret = RetBlocks[iter->second]; + else { + ret = createRetBlock(CGF, loc); + RetBlockInCaseIndex[caseOp] = RetBlocks.size() - 1; + return ret; + } + } else if (!NormalRetBlockIndex) { + ret = createRetBlock(CGF, loc); NormalRetBlockIndex = RetBlocks.size() - 1; return ret; + } else { + ret = &*RetBlocks[*NormalRetBlockIndex]; } - return &*RetBlocks[*NormalRetBlockIndex]; + updateRetLoc(ret, loc); + return ret; } // Scope entry block tracking diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 76d78dc09c76..1fa1653bc1cf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -72,12 +72,18 @@ Address CIRGenFunction::emitCompoundStmt(const CompoundStmt &S, bool getLast, // Add local scope to track new declared variables. SymTableScopeTy varScope(symbolTable); auto scopeLoc = getLoc(S.getSourceRange()); + mlir::OpBuilder::InsertPoint scopeInsPt; builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) { - LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - retAlloca = emitCompoundStmtWithoutScope(S, getLast, slot); + scopeInsPt = b.saveInsertionPoint(); }); + { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.restoreInsertionPoint(scopeInsPt); + LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; + retAlloca = emitCompoundStmtWithoutScope(S, getLast, slot); + } return retAlloca; } @@ -473,14 +479,25 @@ mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &S) { mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &S) { assert(!cir::MissingFeatures::requiresReturnValueCheck()); + assert(!cir::MissingFeatures::isSEHTryScope()); + auto loc = getLoc(S.getSourceRange()); // Emit the result value, even if unused, to evaluate the side effects. const Expr *RV = S.getRetValue(); - // TODO(cir): LLVM codegen uses a RunCleanupsScope cleanupScope here, we - // should model this in face of dtors. + // Record the result expression of the return statement. The recorded + // expression is used to determine whether a block capture's lifetime should + // end at the end of the full expression as opposed to the end of the scope + // enclosing the block expression. + // + // This permits a small, easily-implemented exception to our over-conservative + // rules about not jumping to statements following block literals with + // non-trivial cleanups. + // TODO(cir): SaveRetExpr + // SaveRetExprRAII SaveRetExpr(RV, *this); + RunCleanupsScope cleanupScope(*this); bool createNewScope = false; if (const auto *EWC = dyn_cast_or_null(RV)) { RV = EWC->getSubExpr(); @@ -557,16 +574,17 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &S) { } } - // Create a new return block (if not existent) and add a branch to - // it. The actual return instruction is only inserted during current - // scope cleanup handling. + cleanupScope.ForceCleanup(); + + // In CIR we might have returns in different scopes. + // FIXME(cir): cleanup code is handling actual return emission, the logic + // should try to match traditional codegen more closely (to the extend which + // is possible). auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); - builder.create(loc, retBlock); + emitBranchThroughCleanup(loc, returnBlock(retBlock)); // Insert the new block to continue codegen after branch to ret block. builder.createBlock(builder.getBlock()->getParent()); - - // TODO(cir): LLVM codegen for a cleanup on cleanupScope here. return mlir::success(); } @@ -1153,5 +1171,6 @@ void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue RV, } else { llvm_unreachable("NYI"); } - emitBranchThroughCleanup(loc, ReturnBlock()); + auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); + emitBranchThroughCleanup(loc, returnBlock(retBlock)); } diff --git a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp index a53d85fbf55b..216e63029ddd 100644 --- a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp +++ b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp @@ -4,9 +4,9 @@ auto func() { return __builtin_strcmp("", ""); // CIR: cir.func @_Z4funcv() - // CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} loc(#loc2) - // CIR-NEXT: %1 = cir.const #cir.int<0> : !s32i loc(#loc7) - // CIR-NEXT: cir.store %1, %0 : !s32i, !cir.ptr loc(#loc8) - // CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i loc(#loc8) - // CIR-NEXT: cir.return %2 : !s32i loc(#loc8) + // CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + // CIR-NEXT: %1 = cir.const #cir.int<0> : !s32i + // CIR-NEXT: cir.store %1, %0 : !s32i, !cir.ptr + // CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i + // CIR-NEXT: cir.return %2 : !s32i } diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 138533e2308d..57c4a85eec0e 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -251,10 +251,8 @@ int g3() { // COM: LLVM: [[CALL:%.*]] = call noundef i32 @"_ZZ2g3vENK3$_0clERKi"(ptr noundef nonnull align 1 dereferenceable(1) [[unused_capture]], ptr noundef nonnull align 4 dereferenceable(4) [[TMP0]]) // LLVM: [[CALL:%.*]] = call i32 @"_ZZ2g3vENK3$_0clERKi"(ptr [[unused_capture]], ptr [[TMP0]]) // LLVM: store i32 [[CALL]], ptr [[ret_val]], align 4 -// FIXME: should just return result -// COM: LLVM: ret i32 [[ret_val]] -// LLVM: call void @llvm.trap() -// LLVM: unreachable +// LLVM: %[[ret:.*]] = load i32, ptr [[ret_val]], align 4 +// LLVM: ret i32 %[[ret]] // lambda operator int (*)(int const&)() // LLVM-LABEL: @"_ZZ2g3vENK3$_0cvPFiRKiEEv" From 9b51d7a1055c5d52f57a6e4d096af873463a6b2e Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 9 Jan 2025 23:43:12 +0800 Subject: [PATCH 2204/2301] [CIR] Add support for GCC function attribute "const" and "pure" (#1262) This patch adds support for the following GCC function attributes: - `__attribute__((const))` - `__attribute__((pure))` The side effect information is attached to the call operations during CIRGen. During LLVM lowering, these information is consumed to further emit appropriate LLVM metadata on LLVM call instructions. --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 25 +++++---- clang/include/clang/CIR/Dialect/IR/CIROps.td | 52 +++++++++++++++++-- .../clang/CIR/Interfaces/CIROpInterfaces.td | 19 ++++--- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 45 +++++++++------- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 3 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 3 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 40 +++++++++++--- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 47 +++++++++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 5 ++ clang/test/CIR/CodeGen/call-side-effect.cpp | 25 +++++++++ clang/test/CIR/IR/side-effect.cir | 21 ++++++++ clang/test/CIR/Lowering/call.cir | 19 +++++++ 12 files changed, 255 insertions(+), 49 deletions(-) create mode 100644 clang/test/CIR/CodeGen/call-side-effect.cpp create mode 100644 clang/test/CIR/IR/side-effect.cir diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 771b7dd33cd4..502fd0d52524 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -652,10 +652,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::Type returnType = cir::VoidType(), mlir::ValueRange operands = mlir::ValueRange(), cir::CallingConv callingConv = cir::CallingConv::C, + cir::SideEffect sideEffect = cir::SideEffect::All, cir::ExtraFuncAttributesAttr extraFnAttr = {}) { - cir::CallOp callOp = - create(loc, callee, returnType, operands, callingConv); + cir::CallOp callOp = create(loc, callee, returnType, operands, + callingConv, sideEffect); if (extraFnAttr) { callOp->setAttr("extra_attrs", extraFnAttr); @@ -671,10 +672,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { cir::CallOp createCallOp(mlir::Location loc, cir::FuncOp callee, mlir::ValueRange operands = mlir::ValueRange(), cir::CallingConv callingConv = cir::CallingConv::C, + cir::SideEffect sideEffect = cir::SideEffect::All, cir::ExtraFuncAttributesAttr extraFnAttr = {}) { return createCallOp(loc, mlir::SymbolRefAttr::get(callee), callee.getFunctionType().getReturnType(), operands, - callingConv, extraFnAttr); + callingConv, sideEffect, extraFnAttr); } cir::CallOp @@ -682,21 +684,23 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { cir::FuncType fn_type, mlir::ValueRange operands = mlir::ValueRange(), cir::CallingConv callingConv = cir::CallingConv::C, + cir::SideEffect sideEffect = cir::SideEffect::All, cir::ExtraFuncAttributesAttr extraFnAttr = {}) { llvm::SmallVector resOperands({ind_target}); resOperands.append(operands.begin(), operands.end()); return createCallOp(loc, mlir::SymbolRefAttr(), fn_type.getReturnType(), - resOperands, callingConv, extraFnAttr); + resOperands, callingConv, sideEffect, extraFnAttr); } cir::CallOp createCallOp(mlir::Location loc, mlir::SymbolRefAttr callee, mlir::ValueRange operands = mlir::ValueRange(), cir::CallingConv callingConv = cir::CallingConv::C, + cir::SideEffect sideEffect = cir::SideEffect::All, cir::ExtraFuncAttributesAttr extraFnAttr = {}) { return createCallOp(loc, callee, cir::VoidType(), operands, callingConv, - extraFnAttr); + sideEffect, extraFnAttr); } cir::CallOp @@ -705,10 +709,11 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { mlir::Type returnType = cir::VoidType(), mlir::ValueRange operands = mlir::ValueRange(), cir::CallingConv callingConv = cir::CallingConv::C, + cir::SideEffect sideEffect = cir::SideEffect::All, cir::ExtraFuncAttributesAttr extraFnAttr = {}) { cir::CallOp tryCallOp = create(loc, callee, returnType, operands, callingConv, - /*exception=*/getUnitAttr()); + sideEffect, /*exception=*/getUnitAttr()); if (extraFnAttr) { tryCallOp->setAttr("extra_attrs", extraFnAttr); } else { @@ -724,20 +729,22 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { createTryCallOp(mlir::Location loc, cir::FuncOp callee, mlir::ValueRange operands, cir::CallingConv callingConv = cir::CallingConv::C, + cir::SideEffect sideEffect = cir::SideEffect::All, cir::ExtraFuncAttributesAttr extraFnAttr = {}) { return createTryCallOp(loc, mlir::SymbolRefAttr::get(callee), callee.getFunctionType().getReturnType(), operands, - callingConv, extraFnAttr); + callingConv, sideEffect, extraFnAttr); } cir::CallOp createIndirectTryCallOp(mlir::Location loc, mlir::Value ind_target, cir::FuncType fn_type, mlir::ValueRange operands, - cir::CallingConv callingConv = cir::CallingConv::C) { + cir::CallingConv callingConv = cir::CallingConv::C, + cir::SideEffect sideEffect = cir::SideEffect::All) { llvm::SmallVector resOperands({ind_target}); resOperands.append(operands.begin(), operands.end()); return createTryCallOp(loc, mlir::SymbolRefAttr(), fn_type.getReturnType(), - resOperands, callingConv); + resOperands, callingConv, sideEffect); } struct GetMethodResults { diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 52e1013b88a6..dfcc836756d1 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3574,6 +3574,39 @@ def DeleteArrayOp : CIR_Op<"delete.array">, // CallOp and TryCallOp //===----------------------------------------------------------------------===// +def SE_All : I32EnumAttrCase<"All", 1, "all">; +def SE_Pure : I32EnumAttrCase<"Pure", 2, "pure">; +def SE_Const : I32EnumAttrCase<"Const", 3, "const">; + +def SideEffect : I32EnumAttr< + "SideEffect", "allowed side effects of a function", + [SE_All, SE_Pure, SE_Const]> { + let description = [{ + The side effect attribute specifies the possible side effects of the callee + of a call operation. This is an enumeration attribute and all possible + enumerators are: + + - all: The callee can have any side effects. This is the default if no side + effects are explicitly listed. + - pure: The callee may read data from memory, but it cannot write data to + memory. This has the same effect as the GNU C/C++ attribute + `__attribute__((pure))`. + - const: The callee may not read or write data from memory. This has the + same effect as the GNU C/C++ attribute `__attribute__((const))`. + + Examples: + + ```mlir + %0 = cir.const #cir.int<0> : !s32i + %1 = cir.const #cir.int<1> : !s32i + %2 = cir.call @add(%0, %1) : (!s32i, !s32i) -> !s32i side_effect(all) + %2 = cir.call @add(%0, %1) : (!s32i, !s32i) -> !s32i side_effect(pure) + %2 = cir.call @add(%0, %1) : (!s32i, !s32i) -> !s32i side_effect(const) + ``` + }]; + let cppNamespace = "::cir"; +} + class CIR_CallOp extra_traits = []> : Op extra_traits = []> : OptionalAttr:$callee, Variadic:$arg_ops, DefaultValuedAttr:$calling_conv, + DefaultValuedAttr:$side_effect, ExtraFuncAttr:$extra_attrs, OptionalAttr:$ast ); @@ -3676,12 +3710,15 @@ def CallOp : CIR_CallOp<"call", [NoRegionArguments]> { OpBuilder<(ins "mlir::SymbolRefAttr":$callee, "mlir::Type":$resType, CArg<"mlir::ValueRange", "{}">:$operands, CArg<"CallingConv", "CallingConv::C">:$callingConv, + CArg<"SideEffect", "SideEffect::All">:$sideEffect, CArg<"mlir::UnitAttr", "{}">:$exception), [{ $_state.addOperands(operands); if (callee) $_state.addAttribute("callee", callee); $_state.addAttribute("calling_conv", CallingConvAttr::get($_builder.getContext(), callingConv)); + $_state.addAttribute("side_effect", + SideEffectAttr::get($_builder.getContext(), sideEffect)); if (exception) $_state.addAttribute("exception", exception); if (resType && !isa(resType)) @@ -3693,6 +3730,7 @@ def CallOp : CIR_CallOp<"call", [NoRegionArguments]> { "FuncType":$fn_type, CArg<"mlir::ValueRange", "{}">:$operands, CArg<"CallingConv", "CallingConv::C">:$callingConv, + CArg<"SideEffect", "SideEffect::All">:$sideEffect, CArg<"mlir::UnitAttr", "{}">:$exception), [{ $_state.addOperands(ValueRange{ind_target}); $_state.addOperands(operands); @@ -3700,6 +3738,8 @@ def CallOp : CIR_CallOp<"call", [NoRegionArguments]> { $_state.addTypes(fn_type.getReturnType()); $_state.addAttribute("calling_conv", CallingConvAttr::get($_builder.getContext(), callingConv)); + $_state.addAttribute("side_effect", + SideEffectAttr::get($_builder.getContext(), sideEffect)); if (exception) $_state.addAttribute("exception", exception); // Create region placeholder for potential cleanups. @@ -3742,7 +3782,8 @@ def TryCallOp : CIR_CallOp<"try_call", CArg<"mlir::ValueRange", "{}">:$operands, CArg<"mlir::ValueRange", "{}">:$contOperands, CArg<"mlir::ValueRange", "{}">:$landingPadOperands, - CArg<"CallingConv", "CallingConv::C">:$callingConv), [{ + CArg<"CallingConv", "CallingConv::C">:$callingConv, + CArg<"SideEffect", "SideEffect::All">:$sideEffect), [{ $_state.addOperands(operands); if (callee) $_state.addAttribute("callee", callee); @@ -3751,6 +3792,8 @@ def TryCallOp : CIR_CallOp<"try_call", $_state.addAttribute("calling_conv", CallingConvAttr::get($_builder.getContext(), callingConv)); + $_state.addAttribute("side_effect", + SideEffectAttr::get($_builder.getContext(), sideEffect)); // Handle branches $_state.addOperands(contOperands); @@ -3771,7 +3814,8 @@ def TryCallOp : CIR_CallOp<"try_call", CArg<"mlir::ValueRange", "{}">:$operands, CArg<"mlir::ValueRange", "{}">:$contOperands, CArg<"mlir::ValueRange", "{}">:$landingPadOperands, - CArg<"CallingConv", "CallingConv::C">:$callingConv), [{ + CArg<"CallingConv", "CallingConv::C">:$callingConv, + CArg<"SideEffect", "SideEffect::All">:$sideEffect), [{ ::llvm::SmallVector finalCallOperands({ind_target}); finalCallOperands.append(operands.begin(), operands.end()); $_state.addOperands(finalCallOperands); @@ -3781,6 +3825,8 @@ def TryCallOp : CIR_CallOp<"try_call", $_state.addAttribute("calling_conv", CallingConvAttr::get($_builder.getContext(), callingConv)); + $_state.addAttribute("side_effect", + SideEffectAttr::get($_builder.getContext(), sideEffect)); // Handle branches $_state.addOperands(contOperands); @@ -4187,7 +4233,7 @@ def MemCpyInlineOp : CIR_MemOp<"memcpy_inline"> { Given two CIR pointers, `src` and `dst`, `memcpy_inline` will copy `len` bytes from the memory pointed by `src` to the memory pointed by `dst`. - Unlike `cir.libc.memcpy`, this Op guarantees that no external functions + Unlike `cir.libc.memcpy`, this Op guarantees that no external functions are called, and length of copied bytes is a constant. Examples: diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td index 445a558debda..6437112cd451 100644 --- a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td +++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td @@ -37,6 +37,9 @@ let cppNamespace = "::cir" in { InterfaceMethod< "Return the calling convention of the call operation", "cir::CallingConv", "getCallingConv", (ins)>, + InterfaceMethod< + "Return the side effects of the call operation", + "cir::SideEffect", "getSideEffect", (ins)>, ]; } @@ -50,20 +53,20 @@ let cppNamespace = "::cir" in { >, InterfaceMethod<"", "bool", "hasLocalLinkage", (ins), [{}], - /*defaultImplementation=*/[{ - return cir::isLocalLinkage($_op.getLinkage()); + /*defaultImplementation=*/[{ + return cir::isLocalLinkage($_op.getLinkage()); }] >, InterfaceMethod<"", "bool", "hasExternalWeakLinkage", (ins), [{}], - /*defaultImplementation=*/[{ - return cir::isExternalWeakLinkage($_op.getLinkage()); + /*defaultImplementation=*/[{ + return cir::isExternalWeakLinkage($_op.getLinkage()); }] >, InterfaceMethod<"", "bool", "isExternalLinkage", (ins), [{}], - /*defaultImplementation=*/[{ - return cir::isExternalLinkage($_op.getLinkage()); + /*defaultImplementation=*/[{ + return cir::isExternalLinkage($_op.getLinkage()); }] >, InterfaceMethod<"", @@ -87,8 +90,8 @@ let cppNamespace = "::cir" in { }] >, ]; - let extraClassDeclaration = [{ - bool hasDefaultVisibility(); + let extraClassDeclaration = [{ + bool hasDefaultVisibility(); bool canBenefitFromLocalAlias(); }]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 5a5bf3007c14..c712a7ea27c4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -345,12 +345,10 @@ static void AddAttributesFromFunctionProtoType(CIRGenBuilderTy &builder, /// attributes that restrict how the frontend generates code must be /// added here rather than getDefaultFunctionAttributes. /// -void CIRGenModule::constructAttributeList(StringRef Name, - const CIRGenFunctionInfo &FI, - CIRGenCalleeInfo CalleeInfo, - mlir::NamedAttrList &funcAttrs, - cir::CallingConv &callingConv, - bool AttrOnCallSite, bool IsThunk) { +void CIRGenModule::constructAttributeList( + StringRef Name, const CIRGenFunctionInfo &FI, CIRGenCalleeInfo CalleeInfo, + mlir::NamedAttrList &funcAttrs, cir::CallingConv &callingConv, + cir::SideEffect &sideEffect, bool AttrOnCallSite, bool IsThunk) { // Implementation Disclaimer // // UnimplementedFeature and asserts are used throughout the code to track @@ -364,6 +362,7 @@ void CIRGenModule::constructAttributeList(StringRef Name, // Collect function CIR attributes from the CC lowering. callingConv = FI.getEffectiveCallingConvention(); + sideEffect = cir::SideEffect::All; // TODO: NoReturn, cmse_nonsecure_call // Collect function CIR attributes from the callee prototype if we have one. @@ -421,8 +420,10 @@ void CIRGenModule::constructAttributeList(StringRef Name, if (TargetDecl->hasAttr()) { // gcc specifies that 'const' functions have greater restrictions than // 'pure' functions, so they also cannot have infinite loops. + sideEffect = cir::SideEffect::Const; } else if (TargetDecl->hasAttr()) { // gcc specifies that 'pure' functions cannot have infinite loops. + sideEffect = cir::SideEffect::Pure; } else if (TargetDecl->hasAttr()) { } @@ -466,11 +467,13 @@ void CIRGenModule::constructAttributeList(StringRef Name, getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, funcAttrs); } -static cir::CIRCallOpInterface emitCallLikeOp( - CIRGenFunction &CGF, mlir::Location callLoc, cir::FuncType indirectFuncTy, - mlir::Value indirectFuncVal, cir::FuncOp directFuncOp, - SmallVectorImpl &CIRCallArgs, bool isInvoke, - cir::CallingConv callingConv, cir::ExtraFuncAttributesAttr extraFnAttrs) { +static cir::CIRCallOpInterface +emitCallLikeOp(CIRGenFunction &CGF, mlir::Location callLoc, + cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal, + cir::FuncOp directFuncOp, + SmallVectorImpl &CIRCallArgs, bool isInvoke, + cir::CallingConv callingConv, cir::SideEffect sideEffect, + cir::ExtraFuncAttributesAttr extraFnAttrs) { auto &builder = CGF.getBuilder(); auto getOrCreateSurroundingTryOp = [&]() { // In OG, we build the landing pad for this scope. In CIR, we emit a @@ -521,10 +524,11 @@ static cir::CIRCallOpInterface emitCallLikeOp( assert(callingConv == cir::CallingConv::C && "NYI"); if (indirectFuncTy) { callOpWithExceptions = builder.createIndirectTryCallOp( - callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs); + callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs, callingConv, + sideEffect); } else { - callOpWithExceptions = - builder.createTryCallOp(callLoc, directFuncOp, CIRCallArgs); + callOpWithExceptions = builder.createTryCallOp( + callLoc, directFuncOp, CIRCallArgs, callingConv, sideEffect); } callOpWithExceptions->setAttr("extra_attrs", extraFnAttrs); @@ -544,12 +548,12 @@ static cir::CIRCallOpInterface emitCallLikeOp( if (indirectFuncTy) { // TODO(cir): Set calling convention for indirect calls. assert(callingConv == cir::CallingConv::C && "NYI"); - return builder.createIndirectCallOp(callLoc, indirectFuncVal, - indirectFuncTy, CIRCallArgs, - cir::CallingConv::C, extraFnAttrs); + return builder.createIndirectCallOp( + callLoc, indirectFuncVal, indirectFuncTy, CIRCallArgs, + cir::CallingConv::C, sideEffect, extraFnAttrs); } return builder.createCallOp(callLoc, directFuncOp, CIRCallArgs, callingConv, - extraFnAttrs); + sideEffect, extraFnAttrs); } static RValue getRValueThroughMemory(mlir::Location loc, @@ -755,8 +759,9 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &CallInfo, FnName = calleeFnOp.getName(); cir::CallingConv callingConv; + cir::SideEffect sideEffect; CGM.constructAttributeList(FnName, CallInfo, Callee.getAbstractInfo(), Attrs, - callingConv, + callingConv, sideEffect, /*AttrOnCallSite=*/true, /*IsThunk=*/false); @@ -837,7 +842,7 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &CallInfo, cir::CIRCallOpInterface callLikeOp = emitCallLikeOp( *this, callLoc, indirectFuncTy, indirectFuncVal, directFuncOp, - CIRCallArgs, isInvoke, callingConv, extraFnAttrs); + CIRCallArgs, isInvoke, callingConv, sideEffect, extraFnAttrs); if (E) callLikeOp->setAttr("ast", diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index e4a7951361a0..6c3fad1f021b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2681,10 +2681,11 @@ void CIRGenModule::setCIRFunctionAttributes(GlobalDecl GD, cir::FuncOp func, bool isThunk) { // TODO(cir): More logic of constructAttributeList is needed. cir::CallingConv callingConv; + cir::SideEffect sideEffect; // Initialize PAL with existing attributes to merge attributes. mlir::NamedAttrList PAL{func.getExtraAttrs().getElements().getValue()}; - constructAttributeList(func.getName(), info, GD, PAL, callingConv, + constructAttributeList(func.getName(), info, GD, PAL, callingConv, sideEffect, /*AttrOnCallSite=*/false, isThunk); func.setExtraAttrsAttr(cir::ExtraFuncAttributesAttr::get( &getMLIRContext(), PAL.getDictionary(&getMLIRContext()))); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index afaeea3eb094..4fa8d9dfbbcb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -367,7 +367,8 @@ class CIRGenModule : public CIRGenTypeCache { CIRGenCalleeInfo CalleeInfo, mlir::NamedAttrList &Attrs, cir::CallingConv &callingConv, - bool AttrOnCallSite, bool IsThunk); + cir::SideEffect &sideEffect, bool AttrOnCallSite, + bool IsThunk); /// Helper function for getDefaultFunctionAttributes. Builds a set of function /// attributes which can be simply added to a function. diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7da279aa7513..d92e45d8d940 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -172,6 +172,7 @@ template struct EnumTraits {}; REGISTER_ENUM_TYPE(GlobalLinkageKind); REGISTER_ENUM_TYPE(CallingConv); +REGISTER_ENUM_TYPE(SideEffect); REGISTER_ENUM_TYPE_WITH_NS(cir::sob, SignedOverflowBehavior); } // namespace @@ -2893,6 +2894,18 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, builder.getContext(), callingConv)); } + if (parser.parseOptionalKeyword("side_effect").succeeded()) { + if (parser.parseLParen().failed()) + return failure(); + cir::SideEffect sideEffect; + if (parseCIRKeyword(parser, sideEffect).failed()) + return failure(); + if (parser.parseRParen().failed()) + return failure(); + result.addAttribute("side_effect", cir::SideEffectAttr::get( + builder.getContext(), sideEffect)); + } + Attribute extraAttrs; if (::mlir::succeeded(parser.parseOptionalKeyword("extra"))) { if (parser.parseLParen().failed()) @@ -2923,11 +2936,14 @@ static ::mlir::ParseResult parseCallCommon(::mlir::OpAsmParser &parser, return ::mlir::success(); } -void printCallCommon( - Operation *op, mlir::Value indirectCallee, mlir::FlatSymbolRefAttr flatSym, - ::mlir::OpAsmPrinter &state, cir::ExtraFuncAttributesAttr extraAttrs, - cir::CallingConv callingConv, ::mlir::UnitAttr exception = {}, - mlir::Block *cont = nullptr, mlir::Block *landingPad = nullptr) { +void printCallCommon(Operation *op, mlir::Value indirectCallee, + mlir::FlatSymbolRefAttr flatSym, + ::mlir::OpAsmPrinter &state, + cir::ExtraFuncAttributesAttr extraAttrs, + cir::CallingConv callingConv, cir::SideEffect sideEffect, + ::mlir::UnitAttr exception = {}, + mlir::Block *cont = nullptr, + mlir::Block *landingPad = nullptr) { state << ' '; auto callLikeOp = mlir::cast(op); @@ -2977,6 +2993,7 @@ void printCallCommon( elidedAttrs.push_back("ast"); elidedAttrs.push_back("extra_attrs"); elidedAttrs.push_back("calling_conv"); + elidedAttrs.push_back("side_effect"); elidedAttrs.push_back("exception"); elidedAttrs.push_back("operandSegmentSizes"); @@ -2991,6 +3008,12 @@ void printCallCommon( state << ")"; } + if (sideEffect != cir::SideEffect::All) { + state << " side_effect("; + state << stringifySideEffect(sideEffect); + state << ")"; + } + if (!extraAttrs.getElements().empty()) { state << " extra("; state.printAttributeWithoutType(extraAttrs); @@ -3023,9 +3046,10 @@ ::mlir::ParseResult cir::CallOp::parse(::mlir::OpAsmParser &parser, void cir::CallOp::print(::mlir::OpAsmPrinter &state) { mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; cir::CallingConv callingConv = getCallingConv(); + cir::SideEffect sideEffect = getSideEffect(); mlir::UnitAttr exception = getExceptionAttr(); printCallCommon(*this, indirectCallee, getCalleeAttr(), state, - getExtraAttrs(), callingConv, exception); + getExtraAttrs(), callingConv, sideEffect, exception); } //===----------------------------------------------------------------------===// @@ -3075,8 +3099,10 @@ ::mlir::ParseResult cir::TryCallOp::parse(::mlir::OpAsmParser &parser, void cir::TryCallOp::print(::mlir::OpAsmPrinter &state) { mlir::Value indirectCallee = isIndirect() ? getIndirectCall() : nullptr; cir::CallingConv callingConv = getCallingConv(); + cir::SideEffect sideEffect = getSideEffect(); printCallCommon(*this, indirectCallee, getCalleeAttr(), state, - getExtraAttrs(), callingConv, {}, getCont(), getLandingPad()); + getExtraAttrs(), callingConv, sideEffect, {}, getCont(), + getLandingPad()); } mlir::SuccessorOperands cir::TryCallOp::getSuccessorOperands(unsigned index) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 6a321fca58f1..f23a73e3e553 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -796,6 +796,43 @@ mlir::LLVM::CConv convertCallingConv(cir::CallingConv callinvConv) { llvm_unreachable("Unknown calling convention"); } +void convertSideEffectForCall(mlir::Operation *callOp, + cir::SideEffect sideEffect, + mlir::LLVM::MemoryEffectsAttr &memoryEffect, + bool &noUnwind, bool &willReturn) { + using mlir::LLVM::ModRefInfo; + + switch (sideEffect) { + case cir::SideEffect::All: + memoryEffect = {}; + noUnwind = false; + willReturn = false; + break; + + case cir::SideEffect::Pure: + memoryEffect = mlir::LLVM::MemoryEffectsAttr::get( + callOp->getContext(), /*other=*/ModRefInfo::Ref, + /*argMem=*/ModRefInfo::Ref, + /*inaccessibleMem=*/ModRefInfo::Ref); + noUnwind = true; + willReturn = true; + break; + + case cir::SideEffect::Const: + memoryEffect = mlir::LLVM::MemoryEffectsAttr::get( + callOp->getContext(), /*other=*/ModRefInfo::NoModRef, + /*argMem=*/ModRefInfo::NoModRef, + /*inaccessibleMem=*/ModRefInfo::NoModRef); + noUnwind = true; + willReturn = true; + break; + + default: + callOp->emitError("unknown side effect"); + break; + } +} + mlir::LogicalResult CIRToLLVMCopyOpLowering::matchAndRewrite( cir::CopyOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -1338,6 +1375,12 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, auto cconv = convertCallingConv(callIf.getCallingConv()); + mlir::LLVM::MemoryEffectsAttr memoryEffects; + bool noUnwind = false; + bool willReturn = false; + convertSideEffectForCall(op, callIf.getSideEffect(), memoryEffects, noUnwind, + willReturn); + mlir::LLVM::LLVMFunctionType llvmFnTy; if (calleeAttr) { // direct call auto fn = @@ -1366,6 +1409,10 @@ rewriteToCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands, auto newOp = rewriter.replaceOpWithNewOp( op, llvmFnTy, calleeAttr, callOperands); newOp.setCConv(cconv); + if (memoryEffects) + newOp.setMemoryEffectsAttr(memoryEffects); + newOp.setNoUnwind(noUnwind); + newOp.setWillReturn(willReturn); } return mlir::success(); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 12ded1f39c80..264ae29a0e85 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -31,6 +31,11 @@ mlir::LLVM::Linkage convertLinkage(cir::GlobalLinkageKind linkage); mlir::LLVM::CConv convertCallingConv(cir::CallingConv callinvConv); +void convertSideEffectForCall(mlir::Operation *callOp, + cir::SideEffect sideEffect, + mlir::LLVM::MemoryEffectsAttr &memoryEffect, + bool &noUnwind, bool &willReturn); + void buildCtorDtorList( mlir::ModuleOp module, mlir::StringRef globalXtorName, mlir::StringRef llvmXtorName, diff --git a/clang/test/CIR/CodeGen/call-side-effect.cpp b/clang/test/CIR/CodeGen/call-side-effect.cpp new file mode 100644 index 000000000000..564e2c115b79 --- /dev/null +++ b/clang/test/CIR/CodeGen/call-side-effect.cpp @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CIR %s +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s + +[[gnu::pure]] int pure_func(int x); +[[gnu::const]] int const_func(int x); + +int test(int x) { + int y1 = pure_func(x); + int y2 = const_func(x); + return y1 + y2; +} + +// CIR-LABEL: @_Z4testi +// CIR: %{{.+}} = cir.call @_Z9pure_funci(%{{.+}}) : (!s32i) -> !s32i side_effect(pure) +// CIR: %{{.+}} = cir.call @_Z10const_funci(%{{.+}}) : (!s32i) -> !s32i side_effect(const) +// CIR: } + +// LLVM-LABEL: @_Z4testi(i32 %0) +// LLVM: %{{.+}} = call i32 @_Z9pure_funci(i32 %{{.+}}) #[[#meta_pure:]] +// LLVM: %{{.+}} = call i32 @_Z10const_funci(i32 %{{.+}}) #[[#meta_const:]] +// LLVM: } +// LLVM: attributes #[[#meta_pure]] = { nounwind willreturn memory(read) } +// LLVM: attributes #[[#meta_const]] = { nounwind willreturn memory(none) } diff --git a/clang/test/CIR/IR/side-effect.cir b/clang/test/CIR/IR/side-effect.cir new file mode 100644 index 000000000000..76659f5c6ef3 --- /dev/null +++ b/clang/test/CIR/IR/side-effect.cir @@ -0,0 +1,21 @@ +// RUN: cir-opt %s -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir + +!s32i = !cir.int + +module { + cir.func private @add(%arg0: !s32i, %arg1: !s32i) -> !s32i + cir.func @call_with_side_effect() { + %0 = cir.const #cir.int<0> : !s32i + %1 = cir.const #cir.int<1> : !s32i + %2 = cir.call @add(%0, %1) : (!s32i, !s32i) -> !s32i side_effect(all) + %3 = cir.call @add(%0, %1) : (!s32i, !s32i) -> !s32i side_effect(pure) + %4 = cir.call @add(%0, %1) : (!s32i, !s32i) -> !s32i side_effect(const) + cir.return + } + // CHECK-LABEL: @call_with_side_effect() + // CHECK: %{{.+}} = cir.call @add(%{{.+}}, %{{.+}}) : (!s32i, !s32i) -> !s32i + // CHECK-NEXT: %{{.+}} = cir.call @add(%{{.+}}, %{{.+}}) : (!s32i, !s32i) -> !s32i side_effect(pure) + // CHECK-NEXT: %{{.+}} = cir.call @add(%{{.+}}, %{{.+}}) : (!s32i, !s32i) -> !s32i side_effect(const) + // CHECK: } +} diff --git a/clang/test/CIR/Lowering/call.cir b/clang/test/CIR/Lowering/call.cir index ade54037b76b..ed4916d55e14 100644 --- a/clang/test/CIR/Lowering/call.cir +++ b/clang/test/CIR/Lowering/call.cir @@ -99,4 +99,23 @@ module { // LLVM-NEXT: %2 = call i32 (i32, ...) %1(i32 0, i32 0) // LLVM-NEXT: ret i32 %2 + cir.func private @add(%arg0: !s32i, %arg1: !s32i) -> !s32i + + cir.func @call_with_side_effect() { + %0 = cir.const #cir.int<0> : !s32i + %1 = cir.const #cir.int<1> : !s32i + %2 = cir.call @add(%0, %1) : (!s32i, !s32i) -> !s32i side_effect(all) + %3 = cir.call @add(%0, %1) : (!s32i, !s32i) -> !s32i side_effect(pure) + %4 = cir.call @add(%0, %1) : (!s32i, !s32i) -> !s32i side_effect(const) + cir.return + } + + // LLVM: @call_with_side_effect + // LLVM: %{{.+}} = call i32 @add(i32 0, i32 1) + // LLVM: %{{.+}} = call i32 @add(i32 0, i32 1) #[[#pure:]] + // LLVM: %{{.+}} = call i32 @add(i32 0, i32 1) #[[#const:]] + // LLVM: } + // LLVM: attributes #[[#pure]] = { nounwind willreturn memory(read) } + // LLVM: attributes #[[#const]] = { nounwind willreturn memory(none) } + } // end module From 369fc37bc481c9582e59e674fcf91259ea4f2307 Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Fri, 10 Jan 2025 16:09:46 +0100 Subject: [PATCH 2205/2301] Revert "[CIR] Remove the !cir.void return type for functions returning void (#1203)" (#1276) This reverts commit 568b51537e573c7a8fb616cda5cdd0aa54bc0832. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 13 +-- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 19 ++-- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 39 ++------ clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 93 +++---------------- .../Transforms/TargetLowering/LowerTypes.cpp | 2 +- clang/test/CIR/IR/being_and_nothingness.cir | 35 ------- mlir/include/mlir/IR/OpImplementation.h | 3 - .../mlir/Interfaces/FunctionImplementation.h | 22 ----- mlir/lib/AsmParser/AsmParserImpl.h | 13 --- .../lib/Interfaces/FunctionImplementation.cpp | 13 +-- 11 files changed, 43 insertions(+), 211 deletions(-) delete mode 100644 clang/test/CIR/IR/being_and_nothingness.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index dfcc836756d1..2e4c707c895c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3474,6 +3474,8 @@ def FuncOp : CIR_Op<"func", [ /// Returns the results types that the callable region produces when /// executed. llvm::ArrayRef getCallableResults() { + if (::llvm::isa(getFunctionType().getReturnType())) + return {}; return getFunctionType().getReturnTypes(); } @@ -3490,15 +3492,10 @@ def FuncOp : CIR_Op<"func", [ } /// Returns the argument types of this function. - llvm::ArrayRef getArgumentTypes() { - return getFunctionType().getInputs(); - } + llvm::ArrayRef getArgumentTypes() { return getFunctionType().getInputs(); } - /// Returns 0 or 1 result type of this function (0 in the case of a function - /// returing void) - llvm::ArrayRef getResultTypes() { - return getFunctionType().getReturnTypes(); - } + /// Returns the result types of this function. + llvm::ArrayRef getResultTypes() { return getFunctionType().getReturnTypes(); } /// Hook for OpTrait::FunctionOpInterfaceTrait, called after verifying that /// the 'type' attribute is present and checks if it holds a function type. diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index d3f49716301d..c805b6887cf3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -379,27 +379,22 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { ```mlir !cir.func - !cir.func !cir.func !cir.func ``` }]; - let parameters = (ins ArrayRefParameter<"mlir::Type">:$inputs, ArrayRefParameter<"mlir::Type">:$returnTypes, + let parameters = (ins ArrayRefParameter<"mlir::Type">:$inputs, "mlir::Type":$returnType, "bool":$varArg); let assemblyFormat = [{ - `<` custom($returnTypes, $inputs, $varArg) `>` + `<` $returnType ` ` `(` custom($inputs, $varArg) `>` }]; let builders = [ - // Construct with an actual return type or explicit !cir.void TypeBuilderWithInferredContext<(ins "llvm::ArrayRef":$inputs, "mlir::Type":$returnType, CArg<"bool", "false">:$isVarArg), [{ - return $_get(returnType.getContext(), inputs, - ::mlir::isa<::cir::VoidType>(returnType) ? llvm::ArrayRef{} - : llvm::ArrayRef{returnType}, - isVarArg); + return $_get(returnType.getContext(), inputs, returnType, isVarArg); }]> ]; @@ -413,11 +408,11 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { /// Returns the number of arguments to the function. unsigned getNumInputs() const { return getInputs().size(); } - /// Returns the result type of the function as an actual return type or - /// explicit !cir.void - mlir::Type getReturnType() const; + /// Returns the result type of the function as an ArrayRef, enabling better + /// integration with generic MLIR utilities. + llvm::ArrayRef getReturnTypes() const; - /// Returns whether the function returns void. + /// Returns whether the function is returns void. bool isVoid() const; /// Returns a clone of this function type with the given argument diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 916010a4f19c..214b864e2bb7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -271,7 +271,7 @@ mlir::Type CIRGenTypes::convertFunctionTypeInternal(QualType QFT) { assert(QFT.isCanonical()); const Type *Ty = QFT.getTypePtr(); const FunctionType *FT = cast(QFT.getTypePtr()); - // First, check whether we can build the full function type. If the function + // First, check whether we can build the full fucntion type. If the function // type depends on an incomplete type (e.g. a struct or enum), we cannot lower // the function type. assert(isFuncTypeConvertible(FT) && "NYI"); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d92e45d8d940..7cc8e306fea6 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2225,26 +2225,6 @@ void cir::FuncOp::build(OpBuilder &builder, OperationState &result, getResAttrsAttrName(result.name)); } -// A specific version of function_interface_impl::parseFunctionSignature able to -// handle the "-> !void" special fake return type. -static ParseResult -parseFunctionSignature(OpAsmParser &parser, bool allowVariadic, - SmallVectorImpl &arguments, - bool &isVariadic, SmallVectorImpl &resultTypes, - SmallVectorImpl &resultAttrs) { - if (function_interface_impl::parseFunctionArgumentList(parser, allowVariadic, - arguments, isVariadic)) - return failure(); - if (succeeded(parser.parseOptionalArrow())) { - if (parser.parseOptionalExclamationKeyword("!void").succeeded()) - // This is just an empty return type and attribute. - return success(); - return function_interface_impl::parseFunctionResultList(parser, resultTypes, - resultAttrs); - } - return success(); -} - ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { llvm::SMLoc loc = parser.getCurrentLocation(); @@ -2305,8 +2285,9 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) { // Parse the function signature. bool isVariadic = false; - if (parseFunctionSignature(parser, /*allowVariadic=*/true, arguments, - isVariadic, resultTypes, resultAttrs)) + if (function_interface_impl::parseFunctionSignature( + parser, /*allowVariadic=*/true, arguments, isVariadic, resultTypes, + resultAttrs)) return failure(); for (auto &arg : arguments) @@ -2509,8 +2490,13 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p.printSymbolName(getSymName()); auto fnType = getFunctionType(); llvm::SmallVector resultTypes; - function_interface_impl::printFunctionSignature( - p, *this, fnType.getInputs(), fnType.isVarArg(), fnType.getReturnTypes()); + if (!fnType.isVoid()) + function_interface_impl::printFunctionSignature( + p, *this, fnType.getInputs(), fnType.isVarArg(), + fnType.getReturnTypes()); + else + function_interface_impl::printFunctionSignature( + p, *this, fnType.getInputs(), fnType.isVarArg(), {}); if (mlir::ArrayAttr annotations = getAnnotationsAttr()) { p << ' '; @@ -2579,11 +2565,6 @@ LogicalResult cir::FuncOp::verifyType() { if (!getNoProto() && type.isVarArg() && type.getNumInputs() == 0) return emitError() << "prototyped function must have at least one non-variadic input"; - if (auto rt = type.getReturnTypes(); - !rt.empty() && mlir::isa(rt.front())) - return emitOpError("The return type for a function returning void should " - "be empty instead of an explicit !cir.void"); - return success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 2b17b048f6c6..df89584fd3a9 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -33,7 +33,6 @@ #include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" -#include #include using cir::MissingFeatures; @@ -43,16 +42,13 @@ using cir::MissingFeatures; //===----------------------------------------------------------------------===// static mlir::ParseResult -parseFuncType(mlir::AsmParser &p, llvm::SmallVector &returnTypes, - llvm::SmallVector ¶ms, bool &isVarArg); - -static void printFuncType(mlir::AsmPrinter &p, - mlir::ArrayRef returnTypes, - mlir::ArrayRef params, bool isVarArg); +parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, + bool &isVarArg); +static void printFuncTypeArgs(mlir::AsmPrinter &p, + mlir::ArrayRef params, bool isVarArg); static mlir::ParseResult parsePointerAddrSpace(mlir::AsmParser &p, mlir::Attribute &addrSpaceAttr); - static void printPointerAddrSpace(mlir::AsmPrinter &p, mlir::Attribute addrSpaceAttr); @@ -917,46 +913,9 @@ FuncType FuncType::clone(TypeRange inputs, TypeRange results) const { return get(llvm::to_vector(inputs), results[0], isVarArg()); } -// A special parser is needed for function returning void to consume the "!void" -// returned type in the case there is no alias defined. -static mlir::ParseResult -parseFuncTypeReturn(mlir::AsmParser &p, - llvm::SmallVector &returnTypes) { - if (p.parseOptionalExclamationKeyword("!void").succeeded()) - // !void means no return type. - return p.parseLParen(); - if (succeeded(p.parseOptionalLParen())) - // If we have already a '(', the function has no return type - return mlir::success(); - - mlir::Type type; - auto result = p.parseOptionalType(type); - if (!result.has_value()) - return mlir::failure(); - if (failed(*result) || isa(type)) - // No return type specified. - return p.parseLParen(); - // Otherwise use the actual type. - returnTypes.push_back(type); - return p.parseLParen(); -} - -// A special pretty-printer for function returning void to emit a "!void" -// returned type. Note that there is no real type used here since it does not -// appear in the IR and thus the alias might not be defined and cannot be -// referred to. This is why this is a pure syntactic-sugar string which is used. -static void printFuncTypeReturn(mlir::AsmPrinter &p, - mlir::ArrayRef returnTypes) { - if (returnTypes.empty()) - // Pretty-print no return type as "!void" - p << "!void "; - else - p << returnTypes << ' '; -} - -static mlir::ParseResult -parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, - bool &isVarArg) { +mlir::ParseResult parseFuncTypeArgs(mlir::AsmParser &p, + llvm::SmallVector ¶ms, + bool &isVarArg) { isVarArg = false; // `(` `)` if (succeeded(p.parseOptionalRParen())) @@ -986,10 +945,8 @@ parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, return p.parseRParen(); } -static void printFuncTypeArgs(mlir::AsmPrinter &p, - mlir::ArrayRef params, - bool isVarArg) { - p << '('; +void printFuncTypeArgs(mlir::AsmPrinter &p, mlir::ArrayRef params, + bool isVarArg) { llvm::interleaveComma(params, p, [&p](mlir::Type type) { p.printType(type); }); if (isVarArg) { @@ -1000,37 +957,11 @@ static void printFuncTypeArgs(mlir::AsmPrinter &p, p << ')'; } -static mlir::ParseResult -parseFuncType(mlir::AsmParser &p, llvm::SmallVector &returnTypes, - llvm::SmallVector ¶ms, bool &isVarArg) { - if (failed(parseFuncTypeReturn(p, returnTypes))) - return failure(); - return parseFuncTypeArgs(p, params, isVarArg); -} - -static void printFuncType(mlir::AsmPrinter &p, - mlir::ArrayRef returnTypes, - mlir::ArrayRef params, bool isVarArg) { - printFuncTypeReturn(p, returnTypes); - printFuncTypeArgs(p, params, isVarArg); +llvm::ArrayRef FuncType::getReturnTypes() const { + return static_cast(getImpl())->returnType; } -// Return the actual return type or an explicit !cir.void if the function does -// not return anything -mlir::Type FuncType::getReturnType() const { - if (isVoid()) - return cir::VoidType::get(getContext()); - return static_cast(getImpl())->returnTypes.front(); -} - -bool FuncType::isVoid() const { - auto rt = static_cast(getImpl())->returnTypes; - assert(rt.empty() || - !mlir::isa(rt.front()) && - "The return type for a function returning void should be empty " - "instead of a real !cir.void"); - return rt.empty(); -} +bool FuncType::isVoid() const { return mlir::isa(getReturnType()); } //===----------------------------------------------------------------------===// // MethodType Definitions diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index d655ae9023dd..0c2233ef84c9 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -109,7 +109,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { } } - return FuncType::get(ArgTypes, resultType, FI.isVariadic()); + return FuncType::get(getMLIRContext(), ArgTypes, resultType, FI.isVariadic()); } /// Convert a CIR type to its ABI-specific default form. diff --git a/clang/test/CIR/IR/being_and_nothingness.cir b/clang/test/CIR/IR/being_and_nothingness.cir deleted file mode 100644 index 311acb4893dc..000000000000 --- a/clang/test/CIR/IR/being_and_nothingness.cir +++ /dev/null @@ -1,35 +0,0 @@ -// RUN: cir-opt %s | FileCheck %s -// Exercise different ways to encode a function returning void -!s32i = !cir.int -!fnptr1 = !cir.ptr> -// Note there is no !void alias defined -!fnptr2 = !cir.ptr> -!fnptr3 = !cir.ptr> -module { - cir.func @ind1(%fnptr: !fnptr1, %a : !s32i) { - // CHECK: cir.func @ind1(%arg0: !cir.ptr>, %arg1: !s32i) { - cir.return - } - - cir.func @ind2(%fnptr: !fnptr2, %a : !s32i) { - // CHECK: cir.func @ind2(%arg0: !cir.ptr>, %arg1: !s32i) { - cir.return - } - cir.func @ind3(%fnptr: !fnptr3, %a : !s32i) { - // CHECK: cir.func @ind3(%arg0: !cir.ptr>, %arg1: !s32i) { - cir.return - } - cir.func @f1() -> !cir.void { - // CHECK: cir.func @f1() { - cir.return - } - // Note there is no !void alias defined - cir.func @f2() -> !void { - // CHECK: cir.func @f2() { - cir.return - } - cir.func @f3() { - // CHECK: cir.func @f3() { - cir.return - } -} diff --git a/mlir/include/mlir/IR/OpImplementation.h b/mlir/include/mlir/IR/OpImplementation.h index d61ca01730b1..6c1ff4d0e5e6 100644 --- a/mlir/include/mlir/IR/OpImplementation.h +++ b/mlir/include/mlir/IR/OpImplementation.h @@ -922,9 +922,6 @@ class AsmParser { /// Parse an optional keyword or string. virtual ParseResult parseOptionalKeywordOrString(std::string *result) = 0; - /// Parse the given exclamation-prefixed keyword if present. - virtual ParseResult parseOptionalExclamationKeyword(StringRef keyword) = 0; - //===--------------------------------------------------------------------===// // Attribute/Type Parsing //===--------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Interfaces/FunctionImplementation.h b/mlir/include/mlir/Interfaces/FunctionImplementation.h index 110025bc89f5..a5e6963e4e66 100644 --- a/mlir/include/mlir/Interfaces/FunctionImplementation.h +++ b/mlir/include/mlir/Interfaces/FunctionImplementation.h @@ -64,28 +64,6 @@ parseFunctionSignature(OpAsmParser &parser, bool allowVariadic, bool &isVariadic, SmallVectorImpl &resultTypes, SmallVectorImpl &resultAttrs); -/// Parse a function argument list using `parser`. The `allowVariadic` argument -/// indicates whether functions with variadic arguments are supported. The -/// trailing arguments are populated by this function with names, types, -/// attributes and locations of the arguments. -ParseResult -parseFunctionArgumentList(OpAsmParser &parser, bool allowVariadic, - SmallVectorImpl &arguments, - bool &isVariadic); - -/// Parse a function result list using `parser`. -/// -/// function-result-list ::= function-result-list-parens -/// | non-function-type -/// function-result-list-parens ::= `(` `)` -/// | `(` function-result-list-no-parens `)` -/// function-result-list-no-parens ::= function-result (`,` function-result)* -/// function-result ::= type attribute-dict? -/// -ParseResult -parseFunctionResultList(OpAsmParser &parser, SmallVectorImpl &resultTypes, - SmallVectorImpl &resultAttrs); - /// Parser implementation for function-like operations. Uses /// `funcTypeBuilder` to construct the custom function type given lists of /// input and output types. The parser sets the `typeAttrName` attribute to the diff --git a/mlir/lib/AsmParser/AsmParserImpl.h b/mlir/lib/AsmParser/AsmParserImpl.h index 8c7ce16fe54d..d5b72d63813a 100644 --- a/mlir/lib/AsmParser/AsmParserImpl.h +++ b/mlir/lib/AsmParser/AsmParserImpl.h @@ -396,19 +396,6 @@ class AsmParserImpl : public BaseT { return parseOptionalString(result); } - /// Parse the given exclamation-prefixed keyword if present. - ParseResult parseOptionalExclamationKeyword(StringRef keyword) override { - if (parser.getToken().isCodeCompletion()) - return parser.codeCompleteOptionalTokens(keyword); - - // Check that the current token has the same spelling. - if (!parser.getToken().is(Token::Kind::exclamation_identifier) || - parser.getTokenSpelling() != keyword) - return failure(); - parser.consumeToken(); - return success(); - } - //===--------------------------------------------------------------------===// // Attribute Parsing //===--------------------------------------------------------------------===// diff --git a/mlir/lib/Interfaces/FunctionImplementation.cpp b/mlir/lib/Interfaces/FunctionImplementation.cpp index 9922e3c28eab..988feee665fe 100644 --- a/mlir/lib/Interfaces/FunctionImplementation.cpp +++ b/mlir/lib/Interfaces/FunctionImplementation.cpp @@ -13,9 +13,10 @@ using namespace mlir; -ParseResult function_interface_impl::parseFunctionArgumentList( - OpAsmParser &parser, bool allowVariadic, - SmallVectorImpl &arguments, bool &isVariadic) { +static ParseResult +parseFunctionArgumentList(OpAsmParser &parser, bool allowVariadic, + SmallVectorImpl &arguments, + bool &isVariadic) { // Parse the function arguments. The argument list either has to consistently // have ssa-id's followed by types, or just be a type list. It isn't ok to @@ -78,9 +79,9 @@ ParseResult function_interface_impl::parseFunctionArgumentList( /// function-result-list-no-parens ::= function-result (`,` function-result)* /// function-result ::= type attribute-dict? /// -ParseResult function_interface_impl::parseFunctionResultList( - OpAsmParser &parser, SmallVectorImpl &resultTypes, - SmallVectorImpl &resultAttrs) { +static ParseResult +parseFunctionResultList(OpAsmParser &parser, SmallVectorImpl &resultTypes, + SmallVectorImpl &resultAttrs) { if (failed(parser.parseOptionalLParen())) { // We already know that there is no `(`, so parse a type. // Because there is no `(`, it cannot be a function type. From 9a3f1df45795346d05bff4343671ad253b18f87c Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Fri, 10 Jan 2025 18:37:36 +0100 Subject: [PATCH 2206/2301] [CIR] Remove return !cir.void from IR and textual representation (#1249) C/C++ functions returning void had an explicit !cir.void return type while not having any returned value, which was breaking a lot of MLIR invariants when the CIR dialect is used in a greater context, for example with the inliner. Now, a C/C++ function returning void has no return type and no return values, which does not break the MLIR invariant about the same number of return types and returned values. This change does not keeps the same parsing/pretty-printed syntax as before for compatibility like in https://github.com/llvm/clangir/pull/1203 because it requires some new features from the MLIR parser infrastructure itself, which is not great. This uses an optional type for function return type. The default MLIR parser for optional parameters requires an optional anchor we do not have in the syntax, so use a custom FuncType parser to handle the optional return type. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 13 ++- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 25 +++-- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 2 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 14 +-- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 94 ++++++++++++++++--- .../Transforms/TargetLowering/LowerTypes.cpp | 2 +- clang/test/CIR/CodeGen/fun-ptr.c | 6 +- clang/test/CIR/CodeGen/gnu-extension.c | 2 +- clang/test/CIR/CodeGen/member-init-struct.cpp | 8 +- clang/test/CIR/CodeGen/multi-vtable.cpp | 4 +- clang/test/CIR/CodeGen/no-proto-fun-ptr.c | 14 +-- clang/test/CIR/CodeGen/pointer-arith-ext.c | 8 +- .../CIR/CodeGen/pointer-to-member-func.cpp | 16 ++-- clang/test/CIR/CodeGen/static.cpp | 14 +-- clang/test/CIR/IR/being_and_nothingness.cir | 28 ++++++ 15 files changed, 182 insertions(+), 68 deletions(-) create mode 100644 clang/test/CIR/IR/being_and_nothingness.cir diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 2e4c707c895c..dfcc836756d1 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3474,8 +3474,6 @@ def FuncOp : CIR_Op<"func", [ /// Returns the results types that the callable region produces when /// executed. llvm::ArrayRef getCallableResults() { - if (::llvm::isa(getFunctionType().getReturnType())) - return {}; return getFunctionType().getReturnTypes(); } @@ -3492,10 +3490,15 @@ def FuncOp : CIR_Op<"func", [ } /// Returns the argument types of this function. - llvm::ArrayRef getArgumentTypes() { return getFunctionType().getInputs(); } + llvm::ArrayRef getArgumentTypes() { + return getFunctionType().getInputs(); + } - /// Returns the result types of this function. - llvm::ArrayRef getResultTypes() { return getFunctionType().getReturnTypes(); } + /// Returns 0 or 1 result type of this function (0 in the case of a function + /// returing void) + llvm::ArrayRef getResultTypes() { + return getFunctionType().getReturnTypes(); + } /// Hook for OpTrait::FunctionOpInterfaceTrait, called after verifying that /// the 'type' attribute is present and checks if it holds a function type. diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index c805b6887cf3..53ea393abe3f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -372,29 +372,38 @@ def CIR_VectorType : CIR_Type<"Vector", "vector", def CIR_FuncType : CIR_Type<"Func", "func"> { let summary = "CIR function type"; let description = [{ - The `!cir.func` is a function type. It consists of a single return type, a - list of parameter types and can optionally be variadic. + The `!cir.func` is a function type. It consists of an optional return type, + a list of parameter types and can optionally be variadic. Example: ```mlir + !cir.func<()> !cir.func + !cir.func<(!s8i, !s8i)> !cir.func !cir.func ``` }]; - let parameters = (ins ArrayRefParameter<"mlir::Type">:$inputs, "mlir::Type":$returnType, + let parameters = (ins ArrayRefParameter<"mlir::Type">:$inputs, + "mlir::Type":$optionalReturnType, "bool":$varArg); + // Use a custom parser to handle the optional return and argument types + // without an optional anchor. let assemblyFormat = [{ - `<` $returnType ` ` `(` custom($inputs, $varArg) `>` + `<` custom($optionalReturnType, $inputs, $varArg) `>` }]; let builders = [ + // Construct with an actual return type or explicit !cir.void TypeBuilderWithInferredContext<(ins "llvm::ArrayRef":$inputs, "mlir::Type":$returnType, CArg<"bool", "false">:$isVarArg), [{ - return $_get(returnType.getContext(), inputs, returnType, isVarArg); + return $_get(returnType.getContext(), inputs, + mlir::isa(returnType) ? nullptr + : returnType, + isVarArg); }]> ]; @@ -408,11 +417,15 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { /// Returns the number of arguments to the function. unsigned getNumInputs() const { return getInputs().size(); } + /// Returns the result type of the function as an actual return type or + /// explicit !cir.void + mlir::Type getReturnType() const; + /// Returns the result type of the function as an ArrayRef, enabling better /// integration with generic MLIR utilities. llvm::ArrayRef getReturnTypes() const; - /// Returns whether the function is returns void. + /// Returns whether the function returns void. bool isVoid() const; /// Returns a clone of this function type with the given argument diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 214b864e2bb7..916010a4f19c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -271,7 +271,7 @@ mlir::Type CIRGenTypes::convertFunctionTypeInternal(QualType QFT) { assert(QFT.isCanonical()); const Type *Ty = QFT.getTypePtr(); const FunctionType *FT = cast(QFT.getTypePtr()); - // First, check whether we can build the full fucntion type. If the function + // First, check whether we can build the full function type. If the function // type depends on an incomplete type (e.g. a struct or enum), we cannot lower // the function type. assert(isFuncTypeConvertible(FT) && "NYI"); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 7cc8e306fea6..d491fdcc211c 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2490,13 +2490,8 @@ void cir::FuncOp::print(OpAsmPrinter &p) { p.printSymbolName(getSymName()); auto fnType = getFunctionType(); llvm::SmallVector resultTypes; - if (!fnType.isVoid()) - function_interface_impl::printFunctionSignature( - p, *this, fnType.getInputs(), fnType.isVarArg(), - fnType.getReturnTypes()); - else - function_interface_impl::printFunctionSignature( - p, *this, fnType.getInputs(), fnType.isVarArg(), {}); + function_interface_impl::printFunctionSignature( + p, *this, fnType.getInputs(), fnType.isVarArg(), fnType.getReturnTypes()); if (mlir::ArrayAttr annotations = getAnnotationsAttr()) { p << ' '; @@ -2565,6 +2560,11 @@ LogicalResult cir::FuncOp::verifyType() { if (!getNoProto() && type.isVarArg() && type.getNumInputs() == 0) return emitError() << "prototyped function must have at least one non-variadic input"; + if (auto rt = type.getReturnTypes(); + !rt.empty() && mlir::isa(rt.front())) + return emitOpError("The return type for a function returning void should " + "be empty instead of an explicit !cir.void"); + return success(); } diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index df89584fd3a9..04ecf52a428d 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -33,6 +33,7 @@ #include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include #include using cir::MissingFeatures; @@ -41,12 +42,13 @@ using cir::MissingFeatures; // CIR Custom Parser/Printer Signatures //===----------------------------------------------------------------------===// -static mlir::ParseResult -parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, - bool &isVarArg); -static void printFuncTypeArgs(mlir::AsmPrinter &p, - mlir::ArrayRef params, bool isVarArg); +static mlir::ParseResult parseFuncType(mlir::AsmParser &p, + mlir::Type &optionalReturnTypes, + llvm::SmallVector ¶ms, + bool &isVarArg); +static void printFuncType(mlir::AsmPrinter &p, mlir::Type optionalReturnTypes, + mlir::ArrayRef params, bool isVarArg); static mlir::ParseResult parsePointerAddrSpace(mlir::AsmParser &p, mlir::Attribute &addrSpaceAttr); static void printPointerAddrSpace(mlir::AsmPrinter &p, @@ -913,9 +915,38 @@ FuncType FuncType::clone(TypeRange inputs, TypeRange results) const { return get(llvm::to_vector(inputs), results[0], isVarArg()); } -mlir::ParseResult parseFuncTypeArgs(mlir::AsmParser &p, - llvm::SmallVector ¶ms, - bool &isVarArg) { +// A special parser is needed for function returning void to handle the missing +// type. +static mlir::ParseResult parseFuncTypeReturn(mlir::AsmParser &p, + mlir::Type &optionalReturnType) { + if (succeeded(p.parseOptionalLParen())) { + // If we have already a '(', the function has no return type + optionalReturnType = {}; + return mlir::success(); + } + mlir::Type type; + if (p.parseType(type)) + return mlir::failure(); + if (isa(type)) + // An explicit !cir.void means also no return type. + optionalReturnType = {}; + else + // Otherwise use the actual type. + optionalReturnType = type; + return p.parseLParen(); +} + +// A special pretty-printer for function returning or not a result. +static void printFuncTypeReturn(mlir::AsmPrinter &p, + mlir::Type optionalReturnType) { + if (optionalReturnType) + p << optionalReturnType << ' '; + p << '('; +} + +static mlir::ParseResult +parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, + bool &isVarArg) { isVarArg = false; // `(` `)` if (succeeded(p.parseOptionalRParen())) @@ -945,8 +976,9 @@ mlir::ParseResult parseFuncTypeArgs(mlir::AsmParser &p, return p.parseRParen(); } -void printFuncTypeArgs(mlir::AsmPrinter &p, mlir::ArrayRef params, - bool isVarArg) { +static void printFuncTypeArgs(mlir::AsmPrinter &p, + mlir::ArrayRef params, + bool isVarArg) { llvm::interleaveComma(params, p, [&p](mlir::Type type) { p.printType(type); }); if (isVarArg) { @@ -957,11 +989,49 @@ void printFuncTypeArgs(mlir::AsmPrinter &p, mlir::ArrayRef params, p << ')'; } +// Use a custom parser to handle the optional return and argument types without +// an optional anchor. +static mlir::ParseResult parseFuncType(mlir::AsmParser &p, + mlir::Type &optionalReturnTypes, + llvm::SmallVector ¶ms, + bool &isVarArg) { + if (failed(parseFuncTypeReturn(p, optionalReturnTypes))) + return failure(); + return parseFuncTypeArgs(p, params, isVarArg); +} + +static void printFuncType(mlir::AsmPrinter &p, mlir::Type optionalReturnTypes, + mlir::ArrayRef params, bool isVarArg) { + printFuncTypeReturn(p, optionalReturnTypes); + printFuncTypeArgs(p, params, isVarArg); +} + +// Return the actual return type or an explicit !cir.void if the function does +// not return anything +mlir::Type FuncType::getReturnType() const { + if (isVoid()) + return cir::VoidType::get(getContext()); + return static_cast(getImpl())->optionalReturnType; +} + +/// Returns the result type of the function as an ArrayRef, enabling better +/// integration with generic MLIR utilities. llvm::ArrayRef FuncType::getReturnTypes() const { - return static_cast(getImpl())->returnType; + if (isVoid()) + return {}; + return static_cast(getImpl())->optionalReturnType; } -bool FuncType::isVoid() const { return mlir::isa(getReturnType()); } +// Whether the function returns void +bool FuncType::isVoid() const { + auto rt = + static_cast(getImpl())->optionalReturnType; + assert(!rt || + !mlir::isa(rt) && + "The return type for a function returning void should be empty " + "instead of a real !cir.void"); + return !rt; +} //===----------------------------------------------------------------------===// // MethodType Definitions diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp index 0c2233ef84c9..d655ae9023dd 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerTypes.cpp @@ -109,7 +109,7 @@ FuncType LowerTypes::getFunctionType(const LowerFunctionInfo &FI) { } } - return FuncType::get(getMLIRContext(), ArgTypes, resultType, FI.isVariadic()); + return FuncType::get(ArgTypes, resultType, FI.isVariadic()); } /// Convert a CIR type to its ABI-specific default form. diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c index 7092c58e64d0..0f9a98300e32 100644 --- a/clang/test/CIR/CodeGen/fun-ptr.c +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -55,10 +55,10 @@ int foo(Data* d) { return f(d); } -// CIR: cir.func private {{@.*test.*}}() -> !cir.ptr> +// CIR: cir.func private {{@.*test.*}}() -> !cir.ptr> // CIR: cir.func {{@.*bar.*}}() -// CIR: [[RET:%.*]] = cir.call {{@.*test.*}}() : () -> !cir.ptr> -// CIR: cir.call [[RET]]() : (!cir.ptr>) -> () +// CIR: [[RET:%.*]] = cir.call {{@.*test.*}}() : () -> !cir.ptr> +// CIR: cir.call [[RET]]() : (!cir.ptr>) -> () // CIR: cir.return // LLVM: declare ptr {{@.*test.*}}() diff --git a/clang/test/CIR/CodeGen/gnu-extension.c b/clang/test/CIR/CodeGen/gnu-extension.c index 7386de78176f..5a9f4b0b22b4 100644 --- a/clang/test/CIR/CodeGen/gnu-extension.c +++ b/clang/test/CIR/CodeGen/gnu-extension.c @@ -15,5 +15,5 @@ void bar(void) { } //CHECK: cir.func @bar() -//CHECK: {{.*}} = cir.get_global @bar : !cir.ptr> +//CHECK: {{.*}} = cir.get_global @bar : !cir.ptr> //CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/member-init-struct.cpp b/clang/test/CIR/CodeGen/member-init-struct.cpp index 8440526c1a1c..4c5161616475 100644 --- a/clang/test/CIR/CodeGen/member-init-struct.cpp +++ b/clang/test/CIR/CodeGen/member-init-struct.cpp @@ -34,7 +34,7 @@ C a, b(x), c(0, 2); // CHECK: %[[VAL_8:.*]] = cir.get_member %[[VAL_2]][2] {name = "d"} : !cir.ptr -> !cir.ptr> // CHECK: %[[VAL_9:.*]] = cir.const {{.*}} : !cir.array // CHECK: cir.store %[[VAL_9]], %[[VAL_8]] : !cir.array, !cir.ptr> -// CHECK: %[[VAL_10:.*]] = cir.get_member %[[VAL_2]][4] {name = "e"} : !cir.ptr -> !cir.ptr in !ty_C>> -// CHECK: %[[VAL_11:.*]] = cir.const #cir.method : !cir.method in !ty_C> -// CHECK: cir.store %[[VAL_11]], %[[VAL_10]] : !cir.method in !ty_C>, !cir.ptr in !ty_C>> -// CHECK: cir.return \ No newline at end of file +// CHECK: %[[VAL_10:.*]] = cir.get_member %[[VAL_2]][4] {name = "e"} : !cir.ptr -> !cir.ptr in !ty_C>> +// CHECK: %[[VAL_11:.*]] = cir.const #cir.method : !cir.method in !ty_C> +// CHECK: cir.store %[[VAL_11]], %[[VAL_10]] : !cir.method in !ty_C>, !cir.ptr in !ty_C>> +// CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/multi-vtable.cpp b/clang/test/CIR/CodeGen/multi-vtable.cpp index b887e78c8239..8b709a3ddeaf 100644 --- a/clang/test/CIR/CodeGen/multi-vtable.cpp +++ b/clang/test/CIR/CodeGen/multi-vtable.cpp @@ -74,9 +74,9 @@ int main() { // CIR: cir.func @main() -> !s32i extra(#fn_attr) { -// CIR: %{{[0-9]+}} = cir.vtable.address_point( %{{[0-9]+}} : !cir.ptr)>>>, vtable_index = 0, address_point_index = 0) : !cir.ptr)>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point( %{{[0-9]+}} : !cir.ptr)>>>, vtable_index = 0, address_point_index = 0) : !cir.ptr)>>> -// CIR: %{{[0-9]+}} = cir.vtable.address_point( %{{[0-9]+}} : !cir.ptr)>>>, vtable_index = 0, address_point_index = 0) : !cir.ptr)>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point( %{{[0-9]+}} : !cir.ptr)>>>, vtable_index = 0, address_point_index = 0) : !cir.ptr)>>> // CIR: } diff --git a/clang/test/CIR/CodeGen/no-proto-fun-ptr.c b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c index b4d92db11963..92c3eb6516e1 100644 --- a/clang/test/CIR/CodeGen/no-proto-fun-ptr.c +++ b/clang/test/CIR/CodeGen/no-proto-fun-ptr.c @@ -7,9 +7,9 @@ void check_noproto_ptr() { } // CHECK: cir.func no_proto @check_noproto_ptr() -// CHECK: [[ALLOC:%.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["fun", init] {alignment = 8 : i64} -// CHECK: [[GGO:%.*]] = cir.get_global @empty : !cir.ptr> -// CHECK: cir.store [[GGO]], [[ALLOC]] : !cir.ptr>, !cir.ptr>> +// CHECK: [[ALLOC:%.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["fun", init] {alignment = 8 : i64} +// CHECK: [[GGO:%.*]] = cir.get_global @empty : !cir.ptr> +// CHECK: cir.store [[GGO]], [[ALLOC]] : !cir.ptr>, !cir.ptr>> // CHECK: cir.return void empty(void) {} @@ -20,8 +20,8 @@ void buz() { } // CHECK: cir.func no_proto @buz() -// CHECK: [[FNPTR_ALLOC:%.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["func"] {alignment = 8 : i64} -// CHECK: [[FNPTR:%.*]] = cir.load deref [[FNPTR_ALLOC]] : !cir.ptr>>, !cir.ptr> -// CHECK: [[CAST:%.*]] = cir.cast(bitcast, %1 : !cir.ptr>), !cir.ptr> -// CHECK: cir.call [[CAST]]() : (!cir.ptr>) -> () +// CHECK: [[FNPTR_ALLOC:%.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["func"] {alignment = 8 : i64} +// CHECK: [[FNPTR:%.*]] = cir.load deref [[FNPTR_ALLOC]] : !cir.ptr>>, !cir.ptr> +// CHECK: [[CAST:%.*]] = cir.cast(bitcast, %1 : !cir.ptr>), !cir.ptr> +// CHECK: cir.call [[CAST]]() : (!cir.ptr>) -> () // CHECK: cir.return diff --git a/clang/test/CIR/CodeGen/pointer-arith-ext.c b/clang/test/CIR/CodeGen/pointer-arith-ext.c index 558ad823cae4..c1a3d374b44b 100644 --- a/clang/test/CIR/CodeGen/pointer-arith-ext.c +++ b/clang/test/CIR/CodeGen/pointer-arith-ext.c @@ -50,9 +50,9 @@ void *f4_1(void *a, int b) { return (a -= b); } FP f5(FP a, int b) { return a + b; } // CIR-LABEL: f5 -// CIR: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>>, !cir.ptr> +// CIR: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>>, !cir.ptr> // CIR: %[[STRIDE:.*]] = cir.load {{.*}} : !cir.ptr, !s32i -// CIR: cir.ptr_stride(%[[PTR]] : !cir.ptr>, %[[STRIDE]] : !s32i) +// CIR: cir.ptr_stride(%[[PTR]] : !cir.ptr>, %[[STRIDE]] : !s32i) // LLVM-LABEL: f5 // LLVM: %[[PTR:.*]] = load ptr, ptr {{.*}}, align 8 @@ -67,10 +67,10 @@ FP f6_1(int a, FP b) { return (a += b); } FP f7(FP a, int b) { return a - b; } // CIR-LABEL: f7 -// CIR: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>>, !cir.ptr> +// CIR: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>>, !cir.ptr> // CIR: %[[STRIDE:.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CIR: %[[SUB:.*]] = cir.unary(minus, %[[STRIDE]]) : !s32i, !s32i -// CIR: cir.ptr_stride(%[[PTR]] : !cir.ptr>, %[[SUB]] : !s32i) +// CIR: cir.ptr_stride(%[[PTR]] : !cir.ptr>, %[[SUB]] : !s32i) // LLVM-LABEL: f7 // LLVM: %[[PTR:.*]] = load ptr, ptr {{.*}}, align 8 diff --git a/clang/test/CIR/CodeGen/pointer-to-member-func.cpp b/clang/test/CIR/CodeGen/pointer-to-member-func.cpp index 6f8b3363bfa3..f3c426c4b1ee 100644 --- a/clang/test/CIR/CodeGen/pointer-to-member-func.cpp +++ b/clang/test/CIR/CodeGen/pointer-to-member-func.cpp @@ -11,24 +11,24 @@ auto make_non_virtual() -> void (Foo::*)(int) { return &Foo::m1; } -// CHECK-LABEL: cir.func @_Z16make_non_virtualv() -> !cir.method in !ty_Foo> -// CHECK: %{{.+}} = cir.const #cir.method<@_ZN3Foo2m1Ei> : !cir.method in !ty_Foo> +// CHECK-LABEL: cir.func @_Z16make_non_virtualv() -> !cir.method in !ty_Foo> +// CHECK: %{{.+}} = cir.const #cir.method<@_ZN3Foo2m1Ei> : !cir.method in !ty_Foo> // CHECK: } auto make_virtual() -> void (Foo::*)(int) { return &Foo::m3; } -// CHECK-LABEL: cir.func @_Z12make_virtualv() -> !cir.method in !ty_Foo> -// CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_Foo> +// CHECK-LABEL: cir.func @_Z12make_virtualv() -> !cir.method in !ty_Foo> +// CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_Foo> // CHECK: } auto make_null() -> void (Foo::*)(int) { return nullptr; } -// CHECK-LABEL: cir.func @_Z9make_nullv() -> !cir.method in !ty_Foo> -// CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_Foo> +// CHECK-LABEL: cir.func @_Z9make_nullv() -> !cir.method in !ty_Foo> +// CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_Foo> // CHECK: } void call(Foo *obj, void (Foo::*func)(int), int arg) { @@ -36,7 +36,7 @@ void call(Foo *obj, void (Foo::*func)(int), int arg) { } // CHECK-LABEL: cir.func @_Z4callP3FooMS_FviEi -// CHECK: %[[CALLEE:.+]], %[[THIS:.+]] = cir.get_method %{{.+}}, %{{.+}} : (!cir.method in !ty_Foo>, !cir.ptr) -> (!cir.ptr, !s32i)>>, !cir.ptr) +// CHECK: %[[CALLEE:.+]], %[[THIS:.+]] = cir.get_method %{{.+}}, %{{.+}} : (!cir.method in !ty_Foo>, !cir.ptr) -> (!cir.ptr, !s32i)>>, !cir.ptr) // CHECK-NEXT: %[[#ARG:]] = cir.load %{{.+}} : !cir.ptr, !s32i -// CHECK-NEXT: cir.call %[[CALLEE]](%[[THIS]], %[[#ARG]]) : (!cir.ptr, !s32i)>>, !cir.ptr, !s32i) -> () +// CHECK-NEXT: cir.call %[[CALLEE]](%[[THIS]], %[[#ARG]]) : (!cir.ptr, !s32i)>>, !cir.ptr, !s32i) -> () // CHECK: } diff --git a/clang/test/CIR/CodeGen/static.cpp b/clang/test/CIR/CodeGen/static.cpp index 88ff490c14ff..657396845e13 100644 --- a/clang/test/CIR/CodeGen/static.cpp +++ b/clang/test/CIR/CodeGen/static.cpp @@ -40,7 +40,7 @@ static Init __ioinit2(false); // AFTER: module {{.*}} attributes {{.*}}cir.global_ctors = [#cir.global_ctor<"__cxx_global_var_init", 65536>, #cir.global_ctor<"__cxx_global_var_init.1", 65536>] // AFTER-NEXT: cir.global "private" external @__dso_handle : i8 -// AFTER-NEXT: cir.func private @__cxa_atexit(!cir.ptr)>>, !cir.ptr, !cir.ptr) +// AFTER-NEXT: cir.func private @__cxa_atexit(!cir.ptr)>>, !cir.ptr, !cir.ptr) // AFTER-NEXT: cir.func private @_ZN4InitC1Eb(!cir.ptr, !cir.bool) // AFTER-NEXT: cir.func private @_ZN4InitD1Ev(!cir.ptr) // AFTER-NEXT: cir.global "private" internal dsolocal @_ZL8__ioinit = #cir.zero : !ty_Init {alignment = 1 : i64, ast = #cir.var.decl.ast} @@ -49,11 +49,11 @@ static Init __ioinit2(false); // AFTER-NEXT: %1 = cir.const #true // AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // AFTER-NEXT: %2 = cir.get_global @_ZL8__ioinit : !cir.ptr -// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> -// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> +// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> +// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> // AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr // AFTER-NEXT: %6 = cir.get_global @__dso_handle : !cir.ptr -// AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () +// AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () // AFTER-NEXT: cir.return // AFTER: cir.global "private" internal dsolocal @_ZL9__ioinit2 = #cir.zero : !ty_Init {alignment = 1 : i64, ast = #cir.var.decl.ast} // AFTER-NEXT: cir.func internal private @__cxx_global_var_init.1() @@ -61,11 +61,11 @@ static Init __ioinit2(false); // AFTER-NEXT: %1 = cir.const #false // AFTER-NEXT: cir.call @_ZN4InitC1Eb(%0, %1) : (!cir.ptr, !cir.bool) -> () // AFTER-NEXT: %2 = cir.get_global @_ZL9__ioinit2 : !cir.ptr -// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> -// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> +// AFTER-NEXT: %3 = cir.get_global @_ZN4InitD1Ev : !cir.ptr)>> +// AFTER-NEXT: %4 = cir.cast(bitcast, %3 : !cir.ptr)>>), !cir.ptr)>> // AFTER-NEXT: %5 = cir.cast(bitcast, %2 : !cir.ptr), !cir.ptr // AFTER-NEXT: %6 = cir.get_global @__dso_handle : !cir.ptr -// AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () +// AFTER-NEXT: cir.call @__cxa_atexit(%4, %5, %6) : (!cir.ptr)>>, !cir.ptr, !cir.ptr) -> () // AFTER-NEXT: cir.return // AFTER: cir.func private @_GLOBAL__sub_I_static.cpp() // AFTER-NEXT: cir.call @__cxx_global_var_init() : () -> () diff --git a/clang/test/CIR/IR/being_and_nothingness.cir b/clang/test/CIR/IR/being_and_nothingness.cir new file mode 100644 index 000000000000..076c75a5b192 --- /dev/null +++ b/clang/test/CIR/IR/being_and_nothingness.cir @@ -0,0 +1,28 @@ +// RUN: cir-opt %s | FileCheck %s +// Exercise different ways to encode a function returning void +!s32i = !cir.int +!f = !cir.func<()> +!f2 = !cir.func +!void = !cir.void +!fnptr2 = !cir.ptr> +// Try some useless !void +!fnptr3 = !cir.ptr> +module { + cir.func @ind2(%fnptr: !fnptr2, %a : !s32i) { + // CHECK: cir.func @ind2(%arg0: !cir.ptr>, %arg1: !s32i) { + cir.return + } + cir.func @f2() { + // CHECK: cir.func @f2() { + cir.return + } + // Try with a lot of useless !void + cir.func @ind3(%fnptr: !fnptr3, %a : !s32i) -> !void { + // CHECK: cir.func @ind3(%arg0: !cir.ptr>, %arg1: !s32i) { + cir.return + } + cir.func @f3() -> !cir.void { + // CHECK: cir.func @f3() { + cir.return + } +} From 82b2b82a70ffacf0c80558f80edabd6df00e9c83 Mon Sep 17 00:00:00 2001 From: orbiri Date: Tue, 14 Jan 2025 00:54:55 +0200 Subject: [PATCH 2207/2301] [CIR][CodeGen][NFC] Add documentation regarding memory emission deferral (#1279) --- clang/lib/CIR/CodeGen/CIRGenFunction.h | 9 +++++++++ clang/lib/CIR/CodeGen/CIRGenTypes.h | 3 ++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index ce26520114e8..047f7488f5dc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1114,6 +1114,10 @@ class CIRGenFunction : public CIRGenTypeCache { /// addressed later. RValue GetUndefRValue(clang::QualType Ty); + /// Given a value and its clang type, returns the value casted from its memory + /// representation. + /// Note: CIR defers most of the special casting to the final lowering passes + /// to conserve the high level information. mlir::Value emitFromMemory(mlir::Value Value, clang::QualType Ty); mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &S); @@ -1339,7 +1343,12 @@ class CIRGenFunction : public CIRGenTypeCache { } void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit); + /// Given a value and its clang type, returns the value casted to its memory + /// representation. + /// Note: CIR defers most of the special casting to the final lowering passes + /// to conserve the high level information. mlir::Value emitToMemory(mlir::Value Value, clang::QualType Ty); + void emitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init); /// Store the specified rvalue into the specified diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 1d01a56cfa41..b2ca274f63ec 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -186,7 +186,8 @@ class CIRGenTypes { /// convertType in that it is used to convert to the memory representation /// for a type. For example, the scalar representation for _Bool is i1, but /// the memory representation is usually i8 or i32, depending on the target. - // TODO: convert this comment to account for MLIR's equivalence + /// Note: CIR defers most of the special conversions to the final lowering + /// passes to conserve the high level information. mlir::Type convertTypeForMem(clang::QualType, bool forBitField = false); /// Get the CIR function type for \arg Info. From f5973cb3e76181b0b0747329a4ea029ca22702df Mon Sep 17 00:00:00 2001 From: Ronan Keryell Date: Tue, 14 Jan 2025 00:01:21 +0100 Subject: [PATCH 2208/2301] [CIR][Doc][NFC] Add a description for all the passes (#1282) Some passes not declared with TableGen did not have descriptions. --- clang/include/clang/CIR/Dialect/Passes.td | 6 +++--- clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 +++++- clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 6 +++++- clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp | 7 ++++++- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/Passes.td b/clang/include/clang/CIR/Dialect/Passes.td index 4a8d2bfa9672..c2fac40b0f0f 100644 --- a/clang/include/clang/CIR/Dialect/Passes.td +++ b/clang/include/clang/CIR/Dialect/Passes.td @@ -118,8 +118,8 @@ def HoistAllocas : Pass<"cir-hoist-allocas"> { } def FlattenCFG : Pass<"cir-flatten-cfg"> { - let summary = "Produces flatten cfg"; - let description = [{ + let summary = "Produces flatten CFG"; + let description = [{ This pass transforms CIR and inline all the nested regions. Thus, the next post condtions are met after the pass applied: - there is not any nested region in a function body @@ -132,7 +132,7 @@ def FlattenCFG : Pass<"cir-flatten-cfg"> { } def GotoSolver : Pass<"cir-goto-solver"> { - let summary = "Replaces goto operatations with branches"; + let summary = "Replaces goto operations with branches"; let description = [{ This pass transforms CIR and replaces goto-s with branch operations to the proper blocks. diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f23a73e3e553..0fd29e6f273b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1356,7 +1356,11 @@ struct ConvertCIRToLLVMPass void processCIRAttrs(mlir::ModuleOp moduleOp); - virtual StringRef getArgument() const override { return "cir-flat-to-llvm"; } + StringRef getDescription() const override { + return "Convert the prepared CIR dialect module to LLVM dialect"; + } + + StringRef getArgument() const override { return "cir-flat-to-llvm"; } }; mlir::LogicalResult diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index e4fccd082351..7f0b3c803e83 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -86,7 +86,11 @@ struct ConvertCIRToMLIRPass } void runOnOperation() final; - virtual StringRef getArgument() const override { return "cir-to-mlir"; } + StringRef getDescription() const override { + return "Convert the CIR dialect module to MLIR standard dialects"; + } + + StringRef getArgument() const override { return "cir-to-mlir"; } }; class CIRCallOpLowering : public mlir::OpConversionPattern { diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp index 930ce1c12f68..728c061dcac0 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerMLIRToLLVM.cpp @@ -49,7 +49,12 @@ struct ConvertMLIRToLLVMPass } void runOnOperation() final; - virtual StringRef getArgument() const override { return "cir-mlir-to-llvm"; } + StringRef getDescription() const override { + return "Convert the MLIR standard dialects produced from CIR to MLIR LLVM " + "dialect"; + } + + StringRef getArgument() const override { return "cir-mlir-to-llvm"; } }; void ConvertMLIRToLLVMPass::runOnOperation() { From ffbd297631fff67a8222db0db7665540137abc73 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Tue, 14 Jan 2025 22:11:30 +0300 Subject: [PATCH 2209/2301] [CIR][CIRGen] Fix CXX codegen for default args (#1278) This PR adds a support for for default arguments --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 5 +++++ clang/test/CIR/CodeGen/defaultarg.cpp | 12 ++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 clang/test/CIR/CodeGen/defaultarg.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 25fb9a8b501d..ef2a8d60b9f5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2636,6 +2636,11 @@ LValue CIRGenFunction::emitLValue(const Expr *E) { // bitfield lvalue or some other non-simple lvalue? return LV; } + case Expr::CXXDefaultArgExprClass: { + auto *DAE = cast(E); + CXXDefaultArgExprScope Scope(*this, DAE); + return emitLValue(DAE->getExpr()); + } case Expr::ParenExprClass: return emitLValue(cast(E)->getSubExpr()); case Expr::DeclRefExprClass: diff --git a/clang/test/CIR/CodeGen/defaultarg.cpp b/clang/test/CIR/CodeGen/defaultarg.cpp new file mode 100644 index 000000000000..af052db501d2 --- /dev/null +++ b/clang/test/CIR/CodeGen/defaultarg.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -std=c++17 %s -o - | FileCheck %s + +void bar(const int &i = 42); + +void foo() { + bar(); +} + +// CHECK: [[TMP0:%.*]] = cir.alloca !s32i +// CHECK: [[TMP1:%.*]] = cir.const #cir.int<42> +// CHECK: cir.store [[TMP1]], [[TMP0]] +// CHECK: cir.call @_Z3barRKi([[TMP0]]) From 6b97d9e272e3f2e50e392b81b02335df8e5f213c Mon Sep 17 00:00:00 2001 From: Guojin Date: Tue, 14 Jan 2025 18:26:14 -0500 Subject: [PATCH 2210/2301] [CIR][CIRGen] virtual table pointer initialization without ctor (#1283) Corresponding [OG code](https://github.com/llvm/clangir/blob/ef20d053b3d78c9d4c135e2811b303b7e5016d30/clang/lib/CodeGen/CGExprConstant.cpp#L846). [OG generated code here](https://godbolt.org/z/x6q333dMn), one notable diff is we're missing `inrange` which is reported in [issue 886 ](https://github.com/llvm/clangir/issues/886). For now, I'm still using GlobalViewAttr to implement it so we can move things fast. But it might be worth considering approach [Comments in issue 258](https://github.com/llvm/clangir/issues/258), especially we could incoporate [inrange info](https://github.com/llvm/clangir/issues/886) to the attribute suggested there. --- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 23 ++++++++++++++++++---- clang/test/CIR/CodeGen/vtable-emission.cpp | 16 ++++++++++++++- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index e5fda5a6bb15..b15b7f3aaf2e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -736,12 +736,27 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD, const CXXRecordDecl *VTableClass, CharUnits Offset) { const ASTRecordLayout &Layout = CGM.getASTContext().getASTRecordLayout(RD); - if (const CXXRecordDecl *CD = dyn_cast(RD)) { // Add a vtable pointer, if we need one and it hasn't already been added. - if (Layout.hasOwnVFPtr()) - llvm_unreachable("NYI"); - + if (Layout.hasOwnVFPtr()) { + CIRGenBuilderTy &builder = CGM.getBuilder(); + cir::GlobalOp vtable = + CGM.getCXXABI().getAddrOfVTable(VTableClass, CharUnits()); + clang::VTableLayout::AddressPointLocation addressPoint = + CGM.getItaniumVTableContext() + .getVTableLayout(VTableClass) + .getAddressPoint(BaseSubobject(CD, Offset)); + assert(!cir::MissingFeatures::ptrAuth()); + mlir::ArrayAttr indices = builder.getArrayAttr({ + builder.getI32IntegerAttr(0), + builder.getI32IntegerAttr(addressPoint.VTableIndex), + builder.getI32IntegerAttr(addressPoint.AddressPointIndex), + }); + cir::GlobalViewAttr vtableInit = + CGM.getBuilder().getGlobalViewAttr(vtable, indices); + if (!AppendBytes(Offset, vtableInit)) + return false; + } // Accumulate and sort bases, in order to visit them in address order, which // may not be the same as declaration order. SmallVector Bases; diff --git a/clang/test/CIR/CodeGen/vtable-emission.cpp b/clang/test/CIR/CodeGen/vtable-emission.cpp index f63a9fe3cd97..6691167488c5 100644 --- a/clang/test/CIR/CodeGen/vtable-emission.cpp +++ b/clang/test/CIR/CodeGen/vtable-emission.cpp @@ -1,15 +1,29 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -o - %s \ +// RUN: | opt -S -passes=instcombine,mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s struct S { virtual void key(); virtual void nonKey() {} -}; +} sobj; void S::key() {} +// CHECK-DAG: !ty_anon_struct1 = !cir.struct x 4>}> +// CHECK-DAG: !ty_anon_struct2 = !cir.struct}> + // The definition of the key function should result in the vtable being emitted. // CHECK: cir.global external @_ZTV1S = #cir.vtable +// LLVM: @_ZTV1S = global { [4 x ptr] } { [4 x ptr] +// LLVM-SAME: [ptr null, ptr @_ZTI1S, ptr @_ZN1S3keyEv, ptr @_ZN1S6nonKeyEv] }, align 8 + +// CHECK: cir.global external @sobj = #cir.const_struct +// CHECK-SAME: <{#cir.global_view<@_ZTV1S, [0 : i32, 0 : i32, 2 : i32]> : +// CHECK-SAME: !cir.ptr}> : !ty_anon_struct2 {alignment = 8 : i64} +// LLVM: @sobj = global { ptr } { ptr getelementptr inbounds +// LLVM-SAME: ({ [4 x ptr] }, ptr @_ZTV1S, i32 0, i32 0, i32 2) }, align 8 // The reference from the vtable should result in nonKey being emitted. // CHECK: cir.func linkonce_odr @_ZN1S6nonKeyEv({{.*}} { From 50290961cfee6dd5c4647a2df5a76ee38028e0b5 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Thu, 16 Jan 2025 03:44:30 +0800 Subject: [PATCH 2211/2301] [CIR] Data member pointer comparison and casts (#1268) This PR adds CIRGen and LLVM lowering support for the following language features related to pointers to data members: - Comparisons between pointers to data members. - Casting from pointers to data members to boolean. - Reinterpret casts between pointers to data members. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 +- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 22 ++++++-- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 12 +++++ .../Transforms/TargetLowering/CIRCXXABI.h | 13 +++++ .../TargetLowering/ItaniumCXXABI.cpp | 52 +++++++++++++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 34 ++++++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 15 ++++-- .../CodeGen/pointer-to-data-member-cast.cpp | 26 ++++++++++ .../CodeGen/pointer-to-data-member-cmp.cpp | 44 ++++++++++++++++ 9 files changed, 205 insertions(+), 16 deletions(-) create mode 100644 clang/test/CIR/CodeGen/pointer-to-data-member-cmp.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index dfcc836756d1..e2f0c980aa5c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -123,6 +123,7 @@ def CK_FloatComplexToIntegralComplex def CK_IntegralComplexCast : I32EnumAttrCase<"int_complex", 23>; def CK_IntegralComplexToFloatComplex : I32EnumAttrCase<"int_complex_to_float_complex", 24>; +def CK_MemberPtrToBoolean : I32EnumAttrCase<"member_ptr_to_bool", 25>; def CastKind : I32EnumAttr< "CastKind", @@ -135,7 +136,7 @@ def CastKind : I32EnumAttr< CK_FloatComplexToReal, CK_IntegralComplexToReal, CK_FloatComplexToBoolean, CK_IntegralComplexToBoolean, CK_FloatComplexCast, CK_FloatComplexToIntegralComplex, CK_IntegralComplexCast, - CK_IntegralComplexToFloatComplex]> { + CK_IntegralComplexToFloatComplex, CK_MemberPtrToBoolean]> { let cppNamespace = "::cir"; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index c0b6ac3c78e7..836e78c32176 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -932,7 +932,12 @@ class ScalarExprEmitter : public StmtVisitor { }; if (const MemberPointerType *MPT = LHSTy->getAs()) { - assert(0 && "not implemented"); + assert(E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE); + mlir::Value lhs = CGF.emitScalarExpr(E->getLHS()); + mlir::Value rhs = CGF.emitScalarExpr(E->getRHS()); + cir::CmpOpKind kind = ClangCmpToCIRCmp(E->getOpcode()); + Result = + Builder.createCompare(CGF.getLoc(E->getExprLoc()), kind, lhs, rhs); } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { BinOpInfo BOInfo = emitBinOps(E); mlir::Value LHS = BOInfo.LHS; @@ -1741,8 +1746,11 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { auto Ty = mlir::cast(CGF.convertType(DestTy)); return Builder.getNullDataMemberPtr(Ty, CGF.getLoc(E->getExprLoc())); } - case CK_ReinterpretMemberPointer: - llvm_unreachable("NYI"); + case CK_ReinterpretMemberPointer: { + mlir::Value src = Visit(E); + return Builder.createBitcast(CGF.getLoc(E->getExprLoc()), src, + CGF.convertType(DestTy)); + } case CK_BaseToDerivedMemberPointer: case CK_DerivedToBaseMemberPointer: { mlir::Value src = Visit(E); @@ -1875,8 +1883,12 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { return emitPointerToBoolConversion(Visit(E), E->getType()); case CK_FloatingToBoolean: return emitFloatToBoolConversion(Visit(E), CGF.getLoc(E->getExprLoc())); - case CK_MemberPointerToBoolean: - llvm_unreachable("NYI"); + case CK_MemberPointerToBoolean: { + mlir::Value memPtr = Visit(E); + return Builder.createCast(CGF.getLoc(CE->getSourceRange()), + cir::CastKind::member_ptr_to_bool, memPtr, + CGF.convertType(DestTy)); + } case CK_FloatingComplexToReal: case CK_IntegralComplexToReal: case CK_FloatingComplexToBoolean: diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d491fdcc211c..2eb5c90b8af3 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -530,6 +530,11 @@ LogicalResult cir::CastOp::verify() { return success(); } + // Handle the data member pointer types. + if (mlir::isa(srcType) && + mlir::isa(resType)) + return success(); + // This is the only cast kind where we don't want vector types to decay // into the element type. if ((!mlir::isa(getSrc().getType()) || @@ -705,6 +710,13 @@ LogicalResult cir::CastOp::verify() { << "requires !cir.complex type for result"; return success(); } + case cir::CastKind::member_ptr_to_bool: { + if (!mlir::isa(srcType)) + return emitOpError() << "requires !cir.data_member type for source"; + if (!mlir::isa(resType)) + return emitOpError() << "requires !cir.bool type for result"; + return success(); + } } llvm_unreachable("Unknown CastOp kind?"); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h index 830d5589fbe9..a1948059d783 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -97,6 +97,19 @@ class CIRCXXABI { virtual mlir::Value lowerDerivedDataMember(cir::DerivedDataMemberOp op, mlir::Value loweredSrc, mlir::OpBuilder &builder) const = 0; + + virtual mlir::Value lowerDataMemberCmp(cir::CmpOp op, mlir::Value loweredLhs, + mlir::Value loweredRhs, + mlir::OpBuilder &builder) const = 0; + + virtual mlir::Value + lowerDataMemberBitcast(cir::CastOp op, mlir::Type loweredDstTy, + mlir::Value loweredSrc, + mlir::OpBuilder &builder) const = 0; + + virtual mlir::Value + lowerDataMemberToBoolCast(cir::CastOp op, mlir::Value loweredSrc, + mlir::OpBuilder &builder) const = 0; }; /// Creates an Itanium-family ABI. diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index f22eca2f15c6..f3569eca9e0a 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -73,6 +73,18 @@ class ItaniumCXXABI : public CIRCXXABI { mlir::Value lowerDerivedDataMember(cir::DerivedDataMemberOp op, mlir::Value loweredSrc, mlir::OpBuilder &builder) const override; + + mlir::Value lowerDataMemberCmp(cir::CmpOp op, mlir::Value loweredLhs, + mlir::Value loweredRhs, + mlir::OpBuilder &builder) const override; + + mlir::Value lowerDataMemberBitcast(cir::CastOp op, mlir::Type loweredDstTy, + mlir::Value loweredSrc, + mlir::OpBuilder &builder) const override; + + mlir::Value + lowerDataMemberToBoolCast(cir::CastOp op, mlir::Value loweredSrc, + mlir::OpBuilder &builder) const override; }; } // namespace @@ -89,18 +101,23 @@ bool ItaniumCXXABI::classifyReturnType(LowerFunctionInfo &FI) const { return false; } -mlir::Type ItaniumCXXABI::lowerDataMemberType( - cir::DataMemberType type, const mlir::TypeConverter &typeConverter) const { +static mlir::Type getABITypeForDataMember(LowerModule &lowerMod) { // Itanium C++ ABI 2.3: // A pointer to data member is an offset from the base address of // the class object containing it, represented as a ptrdiff_t - const clang::TargetInfo &target = LM.getTarget(); + const clang::TargetInfo &target = lowerMod.getTarget(); clang::TargetInfo::IntType ptrdiffTy = target.getPtrDiffType(clang::LangAS::Default); - return cir::IntType::get(type.getContext(), target.getTypeWidth(ptrdiffTy), + return cir::IntType::get(lowerMod.getMLIRContext(), + target.getTypeWidth(ptrdiffTy), target.isTypeSigned(ptrdiffTy)); } +mlir::Type ItaniumCXXABI::lowerDataMemberType( + cir::DataMemberType type, const mlir::TypeConverter &typeConverter) const { + return getABITypeForDataMember(LM); +} + mlir::TypedAttr ItaniumCXXABI::lowerDataMemberConstant( cir::DataMemberAttr attr, const mlir::DataLayout &layout, const mlir::TypeConverter &typeConverter) const { @@ -175,6 +192,33 @@ ItaniumCXXABI::lowerDerivedDataMember(cir::DerivedDataMemberOp op, /*isDerivedToBase=*/false, builder); } +mlir::Value ItaniumCXXABI::lowerDataMemberCmp(cir::CmpOp op, + mlir::Value loweredLhs, + mlir::Value loweredRhs, + mlir::OpBuilder &builder) const { + return builder.create(op.getLoc(), op.getKind(), loweredLhs, + loweredRhs); +} + +mlir::Value +ItaniumCXXABI::lowerDataMemberBitcast(cir::CastOp op, mlir::Type loweredDstTy, + mlir::Value loweredSrc, + mlir::OpBuilder &builder) const { + return builder.create(op.getLoc(), loweredDstTy, + cir::CastKind::bitcast, loweredSrc); +} + +mlir::Value +ItaniumCXXABI::lowerDataMemberToBoolCast(cir::CastOp op, mlir::Value loweredSrc, + mlir::OpBuilder &builder) const { + // Itanium C++ ABI 2.3: + // A NULL pointer is represented as -1. + auto nullAttr = cir::IntAttr::get(getABITypeForDataMember(LM), -1); + auto nullValue = builder.create(op.getLoc(), nullAttr); + return builder.create(op.getLoc(), cir::CmpOpKind::ne, loweredSrc, + nullValue); +} + CIRCXXABI *CreateItaniumCXXABI(LowerModule &LM) { switch (LM.getCXXABIKind()) { // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 0fd29e6f273b..f06ab9653d10 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1299,8 +1299,18 @@ mlir::LogicalResult CIRToLLVMCastOpLowering::matchAndRewrite( } case cir::CastKind::bitcast: { auto dstTy = castOp.getType(); - auto llvmSrcVal = adaptor.getOperands().front(); auto llvmDstTy = getTypeConverter()->convertType(dstTy); + + if (mlir::isa(castOp.getSrc().getType())) { + mlir::Value loweredResult = lowerMod->getCXXABI().lowerDataMemberBitcast( + castOp, llvmDstTy, src, rewriter); + rewriter.replaceOp(castOp, loweredResult); + return mlir::success(); + } + if (mlir::isa(castOp.getSrc().getType())) + llvm_unreachable("NYI"); + + auto llvmSrcVal = adaptor.getOperands().front(); rewriter.replaceOpWithNewOp(castOp, llvmDstTy, llvmSrcVal); return mlir::success(); @@ -1324,6 +1334,16 @@ mlir::LogicalResult CIRToLLVMCastOpLowering::matchAndRewrite( llvmSrcVal); break; } + case cir::CastKind::member_ptr_to_bool: { + mlir::Value loweredResult; + if (mlir::isa(castOp.getSrc().getType())) + llvm_unreachable("NYI"); + else + loweredResult = lowerMod->getCXXABI().lowerDataMemberToBoolCast( + castOp, src, rewriter); + rewriter.replaceOp(castOp, loweredResult); + break; + } default: { return castOp.emitError("Unhandled cast kind: ") << castOp.getKindAttrName(); @@ -2902,6 +2922,14 @@ mlir::LogicalResult CIRToLLVMCmpOpLowering::matchAndRewrite( mlir::ConversionPatternRewriter &rewriter) const { auto type = cmpOp.getLhs().getType(); + if (mlir::isa(type)) { + assert(lowerMod && "lowering module is not available"); + mlir::Value loweredResult = lowerMod->getCXXABI().lowerDataMemberCmp( + cmpOp, adaptor.getLhs(), adaptor.getRhs(), rewriter); + rewriter.replaceOp(cmpOp, loweredResult); + return mlir::success(); + } + // Lower to LLVM comparison op. // if (auto intTy = mlir::dyn_cast(type)) { if (mlir::isa(type)) { @@ -4087,6 +4115,7 @@ void populateCIRToLLVMConversionPatterns( argsVarMap, patterns.getContext()); patterns.add< // clang-format off + CIRToLLVMCastOpLowering, CIRToLLVMLoadOpLowering, CIRToLLVMStoreOpLowering, CIRToLLVMGlobalOpLowering, @@ -4096,6 +4125,7 @@ void populateCIRToLLVMConversionPatterns( patterns.add< // clang-format off CIRToLLVMBaseDataMemberOpLowering, + CIRToLLVMCmpOpLowering, CIRToLLVMDerivedDataMemberOpLowering, CIRToLLVMGetRuntimeMemberOpLowering // clang-format on @@ -4103,7 +4133,6 @@ void populateCIRToLLVMConversionPatterns( patterns.add< // clang-format off CIRToLLVMPtrStrideOpLowering, - CIRToLLVMCastOpLowering, CIRToLLVMInlineAsmOpLowering // clang-format on >(converter, patterns.getContext(), dataLayout); @@ -4132,7 +4161,6 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMCallOpLowering, CIRToLLVMCatchParamOpLowering, CIRToLLVMClearCacheOpLowering, - CIRToLLVMCmpOpLowering, CIRToLLVMCmpThreeWayOpLowering, CIRToLLVMComplexCreateOpLowering, CIRToLLVMComplexImagOpLowering, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 264ae29a0e85..104ce3a0b105 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -232,6 +232,7 @@ class CIRToLLVMBrCondOpLowering }; class CIRToLLVMCastOpLowering : public mlir::OpConversionPattern { + cir::LowerModule *lowerMod; mlir::DataLayout const &dataLayout; mlir::Type convertTy(mlir::Type ty) const; @@ -239,9 +240,10 @@ class CIRToLLVMCastOpLowering : public mlir::OpConversionPattern { public: CIRToLLVMCastOpLowering(const mlir::TypeConverter &typeConverter, mlir::MLIRContext *context, + cir::LowerModule *lowerModule, mlir::DataLayout const &dataLayout) - : OpConversionPattern(typeConverter, context), dataLayout(dataLayout) {} - using mlir::OpConversionPattern::OpConversionPattern; + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule), + dataLayout(dataLayout) {} mlir::LogicalResult matchAndRewrite(cir::CastOp op, OpAdaptor, @@ -649,8 +651,15 @@ class CIRToLLVMShiftOpLowering }; class CIRToLLVMCmpOpLowering : public mlir::OpConversionPattern { + cir::LowerModule *lowerMod; + public: - using mlir::OpConversionPattern::OpConversionPattern; + CIRToLLVMCmpOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + cir::LowerModule *lowerModule) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) { + setHasBoundedRewriteRecursion(); + } mlir::LogicalResult matchAndRewrite(cir::CmpOp op, OpAdaptor, diff --git a/clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp b/clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp index 63625236e42a..51913c09af23 100644 --- a/clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp +++ b/clang/test/CIR/CodeGen/pointer-to-data-member-cast.cpp @@ -70,3 +70,29 @@ auto derived_to_base_zero_offset(int Derived::*ptr) -> int Base1::* { // LLVM-NEXT: %[[#ret:]] = load i64, ptr %[[#ret_slot]] // LLVM-NEXT: ret i64 %[[#ret]] } + +struct Foo { + int a; +}; + +struct Bar { + int a; +}; + +bool to_bool(int Foo::*x) { + return x; +} + +// CIR-LABEL: @_Z7to_boolM3Fooi +// CIR: %[[#x:]] = cir.load %{{.+}} : !cir.ptr>, !cir.data_member +// CIR-NEXT: %{{.+}} = cir.cast(member_ptr_to_bool, %[[#x]] : !cir.data_member), !cir.bool +// CIR: } + +auto bitcast(int Foo::*x) { + return reinterpret_cast(x); +} + +// CIR-LABEL: @_Z7bitcastM3Fooi +// CIR: %[[#x:]] = cir.load %{{.+}} : !cir.ptr>, !cir.data_member +// CIR-NEXT: %{{.+}} = cir.cast(bitcast, %[[#x]] : !cir.data_member), !cir.data_member +// CIR: } diff --git a/clang/test/CIR/CodeGen/pointer-to-data-member-cmp.cpp b/clang/test/CIR/CodeGen/pointer-to-data-member-cmp.cpp new file mode 100644 index 000000000000..ebcf141de32b --- /dev/null +++ b/clang/test/CIR/CodeGen/pointer-to-data-member-cmp.cpp @@ -0,0 +1,44 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir --check-prefix=CIR %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s + +struct Foo { + int a; +}; + +struct Bar { + int a; +}; + +bool eq(int Foo::*x, int Foo::*y) { + return x == y; +} + +// CIR-LABEL: @_Z2eqM3FooiS0_ +// CIR: %[[#x:]] = cir.load %{{.+}} : !cir.ptr>, !cir.data_member +// CIR-NEXT: %[[#y:]] = cir.load %{{.+}} : !cir.ptr>, !cir.data_member +// CIR-NEXT: %{{.+}} = cir.cmp(eq, %[[#x]], %[[#y]]) : !cir.data_member, !cir.bool +// CIR: } + +// LLVM-LABEL: @_Z2eqM3FooiS0_ +// LLVM: %[[#x:]] = load i64, ptr %{{.+}}, align 8 +// LLVM-NEXT: %[[#y:]] = load i64, ptr %{{.+}}, align 8 +// LLVM-NEXT: %{{.+}} = icmp eq i64 %[[#x]], %[[#y]] +// LLVM: } + +bool ne(int Foo::*x, int Foo::*y) { + return x != y; +} + +// CIR-LABEL: @_Z2neM3FooiS0_ +// CIR: %[[#x:]] = cir.load %{{.+}} : !cir.ptr>, !cir.data_member +// CIR-NEXT: %[[#y:]] = cir.load %{{.+}} : !cir.ptr>, !cir.data_member +// CIR-NEXT: %{{.+}} = cir.cmp(ne, %[[#x]], %[[#y]]) : !cir.data_member, !cir.bool +// CIR: } + +// LLVM-LABEL: @_Z2neM3FooiS0_ +// LLVM: %[[#x:]] = load i64, ptr %{{.+}}, align 8 +// LLVM-NEXT: %[[#y:]] = load i64, ptr %{{.+}}, align 8 +// LLVM-NEXT: %{{.+}} = icmp ne i64 %[[#x]], %[[#y]] +// LLVM: } From 37244486691dfb5dd269cfd8a9a68af657cb6585 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Wed, 22 Jan 2025 03:31:27 +0800 Subject: [PATCH 2212/2301] [CIR] Add integer result type for cir.global_view (#1248) This PR updates the `#cir.global_view` attribute and make it accept integer types as its result type. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 5 +++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 34 ++++++++++++++----- clang/test/CIR/Lowering/globals.cir | 20 +++++++++++ 3 files changed, 50 insertions(+), 9 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index e54b52b96c91..3e12a9307807 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -607,11 +607,16 @@ def GlobalViewAttr : CIR_Attr<"GlobalView", "global_view", [TypedAttrInterface]> for `!cir.ptr`, an offset is applied. The first index is relative to the original symbol type, not the produced one. + The result type of this attribute may be an integer type. In such a case, + the pointer to the referenced global is casted to an integer and this + attribute represents the casted result. + Example: ``` cir.global external @s = @".str2": !cir.ptr cir.global external @x = #cir.global_view<@s> : !cir.ptr + cir.global external @s_addr = #cir.global_view<@s> : !s64i cir.global external @rgb = #cir.const_array<[0 : i8, -23 : i8, 33 : i8] : !cir.array> cir.global external @elt_ptr = #cir.global_view<@rgb, [1]> : !cir.ptr diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index f06ab9653d10..3462e90ff5f4 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -698,17 +698,25 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::GlobalViewAttr globalAttr, indices, true); } - auto ptrTy = mlir::dyn_cast(globalAttr.getType()); - assert(ptrTy && "Expecting pointer type in GlobalViewAttr"); - auto llvmEltTy = - convertTypeForMemory(*converter, dataLayout, ptrTy.getPointee()); + if (auto intTy = mlir::dyn_cast(globalAttr.getType())) { + auto llvmDstTy = converter->convertType(globalAttr.getType()); + return rewriter.create(parentOp->getLoc(), + llvmDstTy, addrOp); + } + + if (auto ptrTy = mlir::dyn_cast(globalAttr.getType())) { + auto llvmEltTy = + convertTypeForMemory(*converter, dataLayout, ptrTy.getPointee()); - if (llvmEltTy == sourceType) - return addrOp; + if (llvmEltTy == sourceType) + return addrOp; - auto llvmDstTy = converter->convertType(globalAttr.getType()); - return rewriter.create(parentOp->getLoc(), llvmDstTy, - addrOp); + auto llvmDstTy = converter->convertType(globalAttr.getType()); + return rewriter.create(parentOp->getLoc(), llvmDstTy, + addrOp); + } + + llvm_unreachable("Expecting pointer or integer type for GlobalViewAttr"); } /// Switches on the type of attribute and calls the appropriate conversion. @@ -1752,6 +1760,14 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( attr = rewriter.getIntegerAttr(typeConverter->convertType(op.getType()), value); } else if (mlir::isa(op.getType())) { + // Lower GlobalAddrAttr to llvm.mlir.addressof + llvm.mlir.ptrtoint + if (auto ga = mlir::dyn_cast(op.getValue())) { + auto newOp = + lowerCirAttrAsValue(op, ga, rewriter, getTypeConverter(), dataLayout); + rewriter.replaceOp(op, newOp); + return mlir::success(); + } + attr = rewriter.getIntegerAttr( typeConverter->convertType(op.getType()), mlir::cast(op.getValue()).getValue()); diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 765544e8c125..0108b56b8a7b 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -27,6 +27,7 @@ module { cir.global external @alpha = #cir.const_array<[#cir.int<97> : !s8i, #cir.int<98> : !s8i, #cir.int<99> : !s8i, #cir.int<0> : !s8i]> : !cir.array cir.global "private" constant internal @".str" = #cir.const_array<"example\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.global external @s = #cir.global_view<@".str"> : !cir.ptr + cir.global external @s_addr = #cir.global_view<@".str"> : !u64i // MLIR: llvm.mlir.global internal constant @".str"("example\00") // MLIR-SAME: {addr_space = 0 : i32, alignment = 1 : i64} // MLIR: llvm.mlir.global external @s() {addr_space = 0 : i32} : !llvm.ptr { @@ -34,8 +35,14 @@ module { // MLIR: %1 = llvm.bitcast %0 : !llvm.ptr to !llvm.ptr // MLIR: llvm.return %1 : !llvm.ptr // MLIR: } + // MLIR: llvm.mlir.global external @s_addr() {addr_space = 0 : i32} : i64 { + // MLIR: %0 = llvm.mlir.addressof @".str" : !llvm.ptr + // MLIR: %1 = llvm.ptrtoint %0 : !llvm.ptr to i64 + // MLIR: llvm.return %1 : i64 + // MLIR: } // LLVM: @.str = internal constant [8 x i8] c"example\00" // LLVM: @s = global ptr @.str + // LLVM: @s_addr = global i64 ptrtoint (ptr @.str to i64) cir.global external @aPtr = #cir.global_view<@a> : !cir.ptr // MLIR: llvm.mlir.global external @aPtr() {addr_space = 0 : i32} : !llvm.ptr { // MLIR: %0 = llvm.mlir.addressof @a : !llvm.ptr @@ -198,4 +205,17 @@ module { } // MLIR: %0 = llvm.mlir.addressof @zero_array + cir.func @global_view_as_integer() -> !u64i { + %0 = cir.const #cir.global_view<@".str"> : !u64i + cir.return %0 : !u64i + } + // MLIR-LABEL: @global_view_as_integer + // MLIR-NEXT: %0 = llvm.mlir.addressof @".str" : !llvm.ptr + // MLIR-NEXT: %1 = llvm.ptrtoint %0 : !llvm.ptr to i64 + // MLIR-NEXT: llvm.return %1 : i64 + // MLIR-NEXT: } + // LLVM-LABEL: @global_view_as_integer + // LLVM-NEXT: ret i64 ptrtoint (ptr @.str to i64) + // LLVM-NEXT: } + } From 8efa1d0c73b80e45cf89a5c12c9c91a891959db2 Mon Sep 17 00:00:00 2001 From: Rajveer Singh Bharadwaj Date: Wed, 22 Jan 2025 03:46:01 +0530 Subject: [PATCH 2213/2301] [CIR][CIRGen] Simplify LLVM IR array initialization for clang CIR (#1280) Resolves #1266 After change: ```llvm %1 = alloca ptr, i64 1, align 8 store i32 1, ptr @g_arr, align 4 store i32 2, ptr getelementptr (i32, ptr @g_arr, i64 1), align 4 store i32 3, ptr getelementptr (i32, ptr @g_arr, i64 2), align 4 %2 = load i32, ptr @g, align 4 store i32 %2, ptr getelementptr (i32, ptr @g_arr, i64 3), align 4 store ptr getelementptr (i32, ptr getelementptr (i32, ptr @g_arr, i64 3), i64 1), ptr %1, align 8 ``` --- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 6 +- clang/test/CIR/CodeGen/array-init.c | 213 ++++++++++++++++-------- 2 files changed, 151 insertions(+), 68 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 526559a0060c..d7aa4326bf30 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -465,14 +465,14 @@ void AggExprEmitter::emitArrayInit(Address DestPtr, cir::ArrayType AType, // Emit the explicit initializers. for (uint64_t i = 0; i != NumInitElements; ++i) { - if (i == 1) + if (i > 0) one = CGF.getBuilder().getConstInt( - loc, mlir::cast(CGF.PtrDiffTy), 1); + loc, mlir::cast(CGF.PtrDiffTy), i); // Advance to the next element. if (i > 0) { element = CGF.getBuilder().create( - loc, cirElementPtrType, element, one); + loc, cirElementPtrType, begin, one); // Tell the cleanup that it needs to destroy up to this // element. TODO: some of these stores can be trivially diff --git a/clang/test/CIR/CodeGen/array-init.c b/clang/test/CIR/CodeGen/array-init.c index 22c282a6ffec..19ada3a8ceb8 100644 --- a/clang/test/CIR/CodeGen/array-init.c +++ b/clang/test/CIR/CodeGen/array-init.c @@ -1,6 +1,9 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -fno-clangir-call-conv-lowering %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s -// CHECK-DAG: cir.global "private" constant cir_private @__const.foo.bar = #cir.const_array<[#cir.fp<9.000000e+00> : !cir.double, #cir.fp<8.000000e+00> : !cir.double, #cir.fp<7.000000e+00> : !cir.double]> : !cir.array +// CIR-DAG: cir.global "private" constant cir_private @__const.foo.bar = #cir.const_array<[#cir.fp<9.000000e+00> : !cir.double, #cir.fp<8.000000e+00> : !cir.double, #cir.fp<7.000000e+00> : !cir.double]> : !cir.array typedef struct { int a; long b; @@ -9,78 +12,158 @@ typedef struct { void buz(int x) { T arr[] = { {0, x}, {0, 0} }; } -// CHECK: cir.func @buz -// CHECK-NEXT: [[X_ALLOCA:%.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} -// CHECK-NEXT: [[ARR:%.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 16 : i64} -// CHECK-NEXT: cir.store %arg0, [[X_ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: [[ARR_INIT:%.*]] = cir.const #cir.zero : !cir.array -// CHECK-NEXT: cir.store [[ARR_INIT]], [[ARR]] : !cir.array, !cir.ptr> -// CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: [[A_STORAGE0:%.*]] = cir.get_member [[FI_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: [[B_STORAGE0:%.*]] = cir.get_member [[FI_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: [[X_VAL:%.*]] = cir.load [[X_ALLOCA]] : !cir.ptr, !s32i -// CHECK-NEXT: [[X_CASTED:%.*]] = cir.cast(integral, [[X_VAL]] : !s32i), !s64i -// CHECK-NEXT: cir.store [[X_CASTED]], [[B_STORAGE0]] : !s64i, !cir.ptr -// CHECK-NEXT: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride([[FI_EL]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: [[A_STORAGE1:%.*]] = cir.get_member [[SE_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: [[B_STORAGE1:%.*]] = cir.get_member [[SE_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: cir.return +// CIR: cir.func @buz +// CIR-NEXT: [[X_ALLOCA:%.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CIR-NEXT: [[ARR:%.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 16 : i64} +// CIR-NEXT: cir.store %arg0, [[X_ALLOCA]] : !s32i, !cir.ptr +// CIR-NEXT: [[ARR_INIT:%.*]] = cir.const #cir.zero : !cir.array +// CIR-NEXT: cir.store [[ARR_INIT]], [[ARR]] : !cir.array, !cir.ptr> +// CIR-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr +// CIR-NEXT: [[A_STORAGE0:%.*]] = cir.get_member [[FI_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CIR-NEXT: [[B_STORAGE0:%.*]] = cir.get_member [[FI_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr +// CIR-NEXT: [[X_VAL:%.*]] = cir.load [[X_ALLOCA]] : !cir.ptr, !s32i +// CIR-NEXT: [[X_CASTED:%.*]] = cir.cast(integral, [[X_VAL]] : !s32i), !s64i +// CIR-NEXT: cir.store [[X_CASTED]], [[B_STORAGE0]] : !s64i, !cir.ptr +// CIR-NEXT: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i +// CIR-NEXT: [[SE_EL:%.*]] = cir.ptr_stride([[FI_EL]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CIR-NEXT: [[A_STORAGE1:%.*]] = cir.get_member [[SE_EL]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CIR-NEXT: [[B_STORAGE1:%.*]] = cir.get_member [[SE_EL]][1] {name = "b"} : !cir.ptr -> !cir.ptr +// CIR-NEXT: cir.return void foo() { double bar[] = {9,8,7}; } -// CHECK-LABEL: @foo -// CHECK: %[[DST:.*]] = cir.alloca !cir.array, !cir.ptr>, ["bar"] -// CHECK: %[[SRC:.*]] = cir.get_global @__const.foo.bar : !cir.ptr> -// CHECK: cir.copy %[[SRC]] to %[[DST]] : !cir.ptr> +// CIR-LABEL: @foo +// CIR: %[[DST:.*]] = cir.alloca !cir.array, !cir.ptr>, ["bar"] +// CIR: %[[SRC:.*]] = cir.get_global @__const.foo.bar : !cir.ptr> +// CIR: cir.copy %[[SRC]] to %[[DST]] : !cir.ptr> void bar(int a, int b, int c) { int arr[] = {a,b,c}; } -// CHECK: cir.func @bar -// CHECK: [[ARR:%.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 4 : i64} -// CHECK-NEXT: cir.store %arg0, [[A:%.*]] : !s32i, !cir.ptr -// CHECK-NEXT: cir.store %arg1, [[B:%.*]] : !s32i, !cir.ptr -// CHECK-NEXT: cir.store %arg2, [[C:%.*]] : !s32i, !cir.ptr -// CHECK-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: [[LOAD_A:%.*]] = cir.load [[A]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store [[LOAD_A]], [[FI_EL]] : !s32i, !cir.ptr -// CHECK-NEXT: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: [[SE_EL:%.*]] = cir.ptr_stride(%4 : !cir.ptr, [[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: [[LOAD_B:%.*]] = cir.load [[B]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store [[LOAD_B]], [[SE_EL]] : !s32i, !cir.ptr -// CHECK-NEXT: [[TH_EL:%.*]] = cir.ptr_stride(%7 : !cir.ptr, [[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: [[LOAD_C:%.*]] = cir.load [[C]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store [[LOAD_C]], [[TH_EL]] : !s32i, !cir.ptr +// CIR: cir.func @bar +// CIR: [[ARR:%.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 4 : i64} +// CIR-NEXT: cir.store %arg0, [[A:%.*]] : !s32i, !cir.ptr +// CIR-NEXT: cir.store %arg1, [[B:%.*]] : !s32i, !cir.ptr +// CIR-NEXT: cir.store %arg2, [[C:%.*]] : !s32i, !cir.ptr +// CIR-NEXT: [[FI_EL:%.*]] = cir.cast(array_to_ptrdecay, [[ARR]] : !cir.ptr>), !cir.ptr +// CIR-NEXT: [[LOAD_A:%.*]] = cir.load [[A]] : !cir.ptr, !s32i +// CIR-NEXT: cir.store [[LOAD_A]], [[FI_EL]] : !s32i, !cir.ptr +// CIR-NEXT: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i +// CIR-NEXT: [[SE_EL:%.*]] = cir.ptr_stride(%4 : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CIR-NEXT: [[LOAD_B:%.*]] = cir.load [[B]] : !cir.ptr, !s32i +// CIR-NEXT: cir.store [[LOAD_B]], [[SE_EL]] : !s32i, !cir.ptr +// CIR-NEXT: [[TWO:%.*]] = cir.const #cir.int<2> : !s64i +// CIR-NEXT: [[TH_EL:%.*]] = cir.ptr_stride(%4 : !cir.ptr, [[TWO]] : !s64i), !cir.ptr +// CIR-NEXT: [[LOAD_C:%.*]] = cir.load [[C]] : !cir.ptr, !s32i +// CIR-NEXT: cir.store [[LOAD_C]], [[TH_EL]] : !s32i, !cir.ptr void zero_init(int x) { int arr[3] = {x}; } -// CHECK: cir.func @zero_init -// CHECK: [[VAR_ALLOC:%.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 4 : i64} -// CHECK: [[TEMP:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp", init] {alignment = 8 : i64} -// CHECK: cir.store %arg0, [[VAR_ALLOC]] : !s32i, !cir.ptr -// CHECK: [[BEGIN:%.*]] = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr -// CHECK: [[VAR:%.*]] = cir.load [[VAR_ALLOC]] : !cir.ptr, !s32i -// CHECK: cir.store [[VAR]], [[BEGIN]] : !s32i, !cir.ptr -// CHECK: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i -// CHECK: [[ZERO_INIT_START:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr -// CHECK: cir.store [[ZERO_INIT_START]], [[TEMP]] : !cir.ptr, !cir.ptr> -// CHECK: [[SIZE:%.*]] = cir.const #cir.int<3> : !s64i -// CHECK: [[END:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[SIZE]] : !s64i), !cir.ptr -// CHECK: cir.do { -// CHECK: [[CUR:%.*]] = cir.load [[TEMP]] : !cir.ptr>, !cir.ptr -// CHECK: [[FILLER:%.*]] = cir.const #cir.int<0> : !s32i -// CHECK: cir.store [[FILLER]], [[CUR]] : !s32i, !cir.ptr -// CHECK: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i -// CHECK: [[NEXT:%.*]] = cir.ptr_stride([[CUR]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr -// CHECK: cir.store [[NEXT]], [[TEMP]] : !cir.ptr, !cir.ptr> -// CHECK: cir.yield -// CHECK: } while { -// CHECK: [[CUR:%.*]] = cir.load [[TEMP]] : !cir.ptr>, !cir.ptr -// CHECK: [[CMP:%.*]] = cir.cmp(ne, [[CUR]], [[END]]) : !cir.ptr, !cir.bool -// CHECK: cir.condition([[CMP]]) -// CHECK: } -// CHECK: cir.return +// CIR: cir.func @zero_init +// CIR: [[VAR_ALLOC:%.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} +// CIR: %1 = cir.alloca !cir.array, !cir.ptr>, ["arr", init] {alignment = 4 : i64} +// CIR: [[TEMP:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp", init] {alignment = 8 : i64} +// CIR: cir.store %arg0, [[VAR_ALLOC]] : !s32i, !cir.ptr +// CIR: [[BEGIN:%.*]] = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CIR: [[VAR:%.*]] = cir.load [[VAR_ALLOC]] : !cir.ptr, !s32i +// CIR: cir.store [[VAR]], [[BEGIN]] : !s32i, !cir.ptr +// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i +// CIR: [[ZERO_INIT_START:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CIR: cir.store [[ZERO_INIT_START]], [[TEMP]] : !cir.ptr, !cir.ptr> +// CIR: [[SIZE:%.*]] = cir.const #cir.int<3> : !s64i +// CIR: [[END:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[SIZE]] : !s64i), !cir.ptr +// CIR: cir.do { +// CIR: [[CUR:%.*]] = cir.load [[TEMP]] : !cir.ptr>, !cir.ptr +// CIR: [[FILLER:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: cir.store [[FILLER]], [[CUR]] : !s32i, !cir.ptr +// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i +// CIR: [[NEXT:%.*]] = cir.ptr_stride([[CUR]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CIR: cir.store [[NEXT]], [[TEMP]] : !cir.ptr, !cir.ptr> +// CIR: cir.yield +// CIR: } while { +// CIR: [[CUR:%.*]] = cir.load [[TEMP]] : !cir.ptr>, !cir.ptr +// CIR: [[CMP:%.*]] = cir.cmp(ne, [[CUR]], [[END]]) : !cir.ptr, !cir.bool +// CIR: cir.condition([[CMP]]) +// CIR: } +// CIR: cir.return + +void aggr_init() { + int g = 5; + int g_arr[5] = {1, 2, 3, g}; +} +// CIR-LABEL: cir.func no_proto @aggr_init +// CIR: [[VAR_ALLOC:%.*]] = cir.alloca !s32i, !cir.ptr, ["g", init] {alignment = 4 : i64} +// CIR: %1 = cir.alloca !cir.array, !cir.ptr>, ["g_arr", init] {alignment = 16 : i64} +// CIR: [[TEMP:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp", init] {alignment = 8 : i64} +// CIR: %3 = cir.const #cir.int<5> : !s32i +// CIR: cir.store %3, [[VAR_ALLOC]] : !s32i, !cir.ptr +// CIR: [[BEGIN:%.*]] = cir.cast(array_to_ptrdecay, %1 : !cir.ptr>), !cir.ptr +// CIR: %5 = cir.const #cir.int<1> : !s32i +// CIR: cir.store %5, [[BEGIN]] : !s32i, !cir.ptr +// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i +// CIR: %7 = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CIR: %8 = cir.const #cir.int<2> : !s32i +// CIR: cir.store %8, %7 : !s32i, !cir.ptr +// CIR: [[TWO:%.*]] = cir.const #cir.int<2> : !s64i +// CIR: %10 = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[TWO]] : !s64i), !cir.ptr +// CIR: %11 = cir.const #cir.int<3> : !s32i +// CIR: cir.store %11, %10 : !s32i, !cir.ptr +// CIR: [[THREE:%.*]] = cir.const #cir.int<3> : !s64i +// CIR: %13 = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[THREE]] : !s64i), !cir.ptr +// CIR: [[VAR:%.*]] = cir.load [[VAR_ALLOC]] : !cir.ptr, !s32i +// CIR: cir.store [[VAR]], %13 : !s32i, !cir.ptr +// CIR: [[ONE_VAR:%.*]] = cir.const #cir.int<1> : !s64i +// CIR: %16 = cir.ptr_stride(%13 : !cir.ptr, [[ONE_VAR]] : !s64i), !cir.ptr +// CIR: cir.store %16, [[TEMP]] : !cir.ptr, !cir.ptr> +// CIR: [[SIZE:%.*]] = cir.const #cir.int<5> : !s64i +// CIR: [[END:%.*]] = cir.ptr_stride([[BEGIN]] : !cir.ptr, [[SIZE]] : !s64i), !cir.ptr +// CIR: cir.do { +// CIR: [[CUR:%.*]] = cir.load [[TEMP]] : !cir.ptr>, !cir.ptr +// CIR: [[FILLER:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: cir.store [[FILLER]], [[CUR]] : !s32i, !cir.ptr +// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s64i +// CIR: [[NEXT:%.*]] = cir.ptr_stride([[CUR]] : !cir.ptr, [[ONE]] : !s64i), !cir.ptr +// CIR: cir.store [[NEXT]], [[TEMP]] : !cir.ptr, !cir.ptr> +// CIR: cir.yield +// CIR: } while { +// CIR: [[CUR:%.*]] = cir.load [[TEMP]] : !cir.ptr>, !cir.ptr +// CIR: [[CMP:%.*]] = cir.cmp(ne, [[CUR]], [[END]]) : !cir.ptr, !cir.bool +// CIR: cir.condition([[CMP]]) +// CIR: } +// CIR: cir.return +// +// LLVM-LABEL: @aggr_init +// LLVM: [[VAR_ALLOC:%.*]] = alloca i32, i64 1, align 4 +// LLVM: %2 = alloca [5 x i32], i64 1, align 16 +// LLVM: [[TEMP:%.*]] = alloca ptr, i64 1, align 8 +// LLVM: store i32 5, ptr [[VAR_ALLOC]], align 4 +// LLVM: [[BEGIN:%.*]] = getelementptr i32, ptr %2, i32 0 +// LLVM: store i32 1, ptr [[BEGIN]], align 4 +// LLVM: [[ONE:%.*]] = getelementptr i32, ptr [[BEGIN]], i64 1 +// LLVM: store i32 2, ptr [[ONE]], align 4 +// LLVM: [[TWO:%.*]] = getelementptr i32, ptr [[BEGIN]], i64 2 +// LLVM: store i32 3, ptr [[TWO]], align 4 +// LLVM: [[THREE:%.*]] = getelementptr i32, ptr [[BEGIN]], i64 3 +// LLVM: [[VAR:%.*]] = load i32, ptr [[VAR_ALLOC]], align 4 +// LLVM: store i32 [[VAR]], ptr [[THREE]], align 4 +// LLVM: %9 = getelementptr i32, ptr [[THREE]], i64 1 +// LLVM: store ptr %9, ptr [[TEMP]], align 8 +// LLVM: [[END:%.*]] = getelementptr i32, ptr [[BEGIN]], i64 5 +// LLVM: br label %14 +// +// LLVM: 11: ; preds = %14 +// LLVM: [[CUR:%.*]] = load ptr, ptr [[TEMP]], align 8 +// LLVM: [[CMP:%.*]] = icmp ne ptr [[CUR]], [[END]] +// LLVM: br i1 [[CMP]], label %14, label %17 +// +// LLVM: 14: ; preds = %11, %0 +// LLVM: [[CUR:%.*]] = load ptr, ptr [[TEMP]], align 8 +// LLVM: store i32 0, ptr [[CUR]], align 4 +// LLVM: [[NEXT:%.*]] = getelementptr i32, ptr [[CUR]], i64 1 +// LLVM: store ptr [[NEXT]], ptr [[TEMP]], align 8 +// LLVM: br label %11 +// +// LLVM: 17: ; preds = %11 +// LLVM: ret void From 6ea1d1498461c2e4084bb3adf11b5f5a6a8bd3d1 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 22 Jan 2025 17:28:34 -0800 Subject: [PATCH 2214/2301] [CIR][NFC] Add cleanup region to ScopeOp This does not change anything in practice, work in that direction should come next. We also want this to not affect existing tests to isolate upcoming changes. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 14 ++++-- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 15 +++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 47 +++++++++++++++---- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 6 +-- .../Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 2 +- clang/test/CIR/IR/scope.cir | 32 ++++++++++++- 7 files changed, 92 insertions(+), 26 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e2f0c980aa5c..e1b2f0b766e1 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1066,23 +1066,29 @@ def ScopeOp : CIR_Op<"scope", [ The blocks can be terminated by `cir.yield`, `cir.return` or `cir.throw`. If `cir.scope` yields no value, the `cir.yield` can be left out, and will be inserted implicitly. + + The scope might also have an associated `cleanup` region, providing code + that run destruction of automatic variables. Note that in order to lower the + cleanup region while keeping C++ semantics, all immediate control-flow + breaking operations not under a children scope should jump to this cleanup + code. }]; let results = (outs Optional:$results); - let regions = (region AnyRegion:$scopeRegion); + let regions = (region AnyRegion:$scopeRegion, AnyRegion:$cleanupRegion); let hasVerifier = 1; let skipDefaultBuilders = 1; let assemblyFormat = [{ - custom($scopeRegion) (`:` type($results)^)? attr-dict + custom($scopeRegion, $cleanupRegion) (`:` type($results)^)? attr-dict }]; let extraClassDeclaration = [{ /// Determine whether the scope is empty, meaning it contains a single block /// terminated by a cir.yield. bool isEmpty() { - auto &entry = getRegion().front(); - return getRegion().hasOneBlock() && + auto &entry = getScopeRegion().front(); + return getScopeRegion().hasOneBlock() && llvm::isa(entry.front()); } }]; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index ef2a8d60b9f5..8ed7407f7e2b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2842,7 +2842,7 @@ mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty, if (auto tryOp = llvm::dyn_cast_if_present(entryBlock->getParentOp())) { if (auto scopeOp = llvm::dyn_cast(tryOp->getParentOp())) - entryBlock = &scopeOp.getRegion().front(); + entryBlock = &scopeOp.getScopeRegion().front(); } return emitAlloca(name, ty, loc, alignment, diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 43341ef0f1ca..495491799b3f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -826,13 +826,16 @@ void CIRGenItaniumCXXABI::emitBeginCatch(CIRGenFunction &CGF, auto getCatchParamAllocaIP = [&]() { auto currIns = CGF.getBuilder().saveInsertionPoint(); auto currParent = currIns.getBlock()->getParentOp(); - mlir::Operation *scopeLikeOp = currParent->getParentOfType(); - if (!scopeLikeOp) - scopeLikeOp = currParent->getParentOfType(); - assert(scopeLikeOp && "unknown outermost scope-like parent"); - assert(scopeLikeOp->getNumRegions() == 1 && "expected single region"); - auto *insertBlock = &scopeLikeOp->getRegion(0).getBlocks().back(); + mlir::Block *insertBlock = nullptr; + if (auto scopeOp = currParent->getParentOfType()) { + insertBlock = &scopeOp.getScopeRegion().getBlocks().back(); + } else if (auto fnOp = currParent->getParentOfType()) { + insertBlock = &fnOp.getRegion().getBlocks().back(); + } else { + llvm_unreachable("unknown outermost scope-like parent"); + } + return CGF.getBuilder().getBestAllocaInsertPoint(insertBlock); }; diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 2eb5c90b8af3..b4228fa2dfc8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -276,22 +276,41 @@ void parseVisibilityAttr(OpAsmParser &parser, cir::VisibilityAttr &visibility) { // CIR Custom Parsers/Printers //===----------------------------------------------------------------------===// -static mlir::ParseResult parseOmittedTerminatorRegion(mlir::OpAsmParser &parser, - mlir::Region ®ion) { +static mlir::ParseResult +parseOmittedTerminatorRegion(mlir::OpAsmParser &parser, + mlir::Region &scopeRegion, + mlir::Region &cleanupRegion) { auto regionLoc = parser.getCurrentLocation(); - if (parser.parseRegion(region)) + if (parser.parseRegion(scopeRegion)) return failure(); - if (ensureRegionTerm(parser, region, regionLoc).failed()) + if (ensureRegionTerm(parser, scopeRegion, regionLoc).failed()) return failure(); + + // Parse optional cleanup region. + if (parser.parseOptionalKeyword("cleanup").succeeded()) { + if (parser.parseRegion(cleanupRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + if (ensureRegionTerm(parser, cleanupRegion, regionLoc).failed()) + return failure(); + } + return success(); } static void printOmittedTerminatorRegion(mlir::OpAsmPrinter &printer, cir::ScopeOp &op, - mlir::Region ®ion) { - printer.printRegion(region, + mlir::Region &scopeRegion, + mlir::Region &cleanupRegion) { + printer.printRegion(scopeRegion, /*printEntryBlockArgs=*/false, - /*printBlockTerminators=*/!omitRegionTerm(region)); + /*printBlockTerminators=*/!omitRegionTerm(scopeRegion)); + if (!op.getCleanupRegion().empty()) { + printer << " cleanup "; + printer.printRegion( + cleanupRegion, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/!omitRegionTerm(cleanupRegion)); + } } //===----------------------------------------------------------------------===// @@ -1251,6 +1270,7 @@ void cir::ScopeOp::getSuccessorRegions( // If the condition isn't constant, both regions may be executed. regions.push_back(RegionSuccessor(&getScopeRegion())); + regions.push_back(RegionSuccessor(&getCleanupRegion())); } void cir::ScopeOp::build( @@ -1261,6 +1281,8 @@ void cir::ScopeOp::build( OpBuilder::InsertionGuard guard(builder); Region *scopeRegion = result.addRegion(); builder.createBlock(scopeRegion); + // cleanup region, do not eagarly create blocks, do it upon demand. + (void)result.addRegion(); mlir::Type yieldTy; scopeBuilder(builder, yieldTy, result.location); @@ -1276,17 +1298,22 @@ void cir::ScopeOp::build( OpBuilder::InsertionGuard guard(builder); Region *scopeRegion = result.addRegion(); builder.createBlock(scopeRegion); + // cleanup region, do not eagarly create blocks, do it upon demand. + (void)result.addRegion(); scopeBuilder(builder, result.location); } LogicalResult cir::ScopeOp::verify() { - if (getRegion().empty()) { + if (getScopeRegion().empty()) { return emitOpError() << "cir.scope must not be empty since it should " "include at least an implicit cir.yield "; } - if (getRegion().back().empty() || - !getRegion().back().getTerminator()->hasTrait()) + if (getScopeRegion().back().empty() || + !getScopeRegion() + .back() + .getTerminator() + ->hasTrait()) return emitOpError() << "last block of cir.scope must be terminated"; return success(); } diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 6a7b00901aad..2eb4b6134686 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -141,9 +141,9 @@ class CIRScopeOpFlattening : public mlir::OpRewritePattern { continueBlock->addArguments(scopeOp.getResultTypes(), loc); // Inline body region. - auto *beforeBody = &scopeOp.getRegion().front(); - auto *afterBody = &scopeOp.getRegion().back(); - rewriter.inlineRegionBefore(scopeOp.getRegion(), continueBlock); + auto *beforeBody = &scopeOp.getScopeRegion().front(); + auto *afterBody = &scopeOp.getScopeRegion().back(); + rewriter.inlineRegionBefore(scopeOp.getScopeRegion(), continueBlock); // Save stack and then branch into the body of the region. rewriter.setInsertionPointToEnd(currentBlock); diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 7f0b3c803e83..8a3e6227b072 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -853,7 +853,7 @@ class CIRScopeOpLowering : public mlir::OpConversionPattern { return mlir::success(); } - for (auto &block : scopeOp.getRegion()) { + for (auto &block : scopeOp.getScopeRegion()) { rewriter.setInsertionPointToEnd(&block); auto *terminator = block.getTerminator(); rewriter.replaceOpWithNewOp( diff --git a/clang/test/CIR/IR/scope.cir b/clang/test/CIR/IR/scope.cir index f756355be0a0..1828921a6fe0 100644 --- a/clang/test/CIR/IR/scope.cir +++ b/clang/test/CIR/IR/scope.cir @@ -4,15 +4,18 @@ module { // Should properly print/parse scope with implicit empty yield. + // CHECK-LABEL: implicit_yield cir.func @implicit_yield() { cir.scope { } // CHECK: cir.scope { - // CHECK: } + // CHECK-NEXT: } + // CHECK-NEXT: cir.return cir.return } // Should properly print/parse scope with explicit yield. + // CHECK-LABEL: explicit_yield cir.func @explicit_yield() { %0 = cir.scope { %1 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} @@ -24,4 +27,31 @@ module { // CHECK: } : !cir.ptr cir.return } + + // Handle optional cleanup presence + // CHECK-LABEL: empty_cleanup + cir.func @empty_cleanup() { + cir.scope { + } cleanup { + } + // CHECK: cir.scope { + // CHECK-NEXT: } cleanup { + // CHECK-NEXT: } + // CHECK-NEXT: cir.return + cir.return + } + + // Handle optional cleanup presence + // CHECK-LABEL: some_cleanup + cir.func @some_cleanup() { + cir.scope { + } cleanup { + %1 = cir.alloca !u32i, !cir.ptr, ["a", init] {alignment = 4 : i64} + } + // CHECK: cir.scope { + // CHECK: } cleanup { + // CHECK: cir.alloca + // CHECK: } + cir.return + } } From 3c3b0966b6f66ee4394bdfa08e2fcb346cc01eb5 Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Mon, 27 Jan 2025 09:43:20 -0800 Subject: [PATCH 2215/2301] [CIR] Add limited support for array new (#1286) This change adds initial support for array new expressions where the array size is constant and the element does not require a cookie. --- clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp | 15 +++ clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 15 +++ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 138 ++++++++++++++++++++---- clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 + clang/test/CIR/CodeGen/new.cpp | 34 ++++++ clang/test/CIR/Lowering/new.cpp | 20 ++++ 6 files changed, 208 insertions(+), 19 deletions(-) create mode 100644 clang/test/CIR/Lowering/new.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp index 585dd78bab34..82b253ba4100 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp @@ -85,3 +85,18 @@ bool CIRGenCXXABI::isZeroInitializable(const MemberPointerType *MPT) { // Fake answer. return true; } + +CharUnits CIRGenCXXABI::getArrayCookieSize(const CXXNewExpr *E) { + if (!requiresArrayCookie(E)) + return CharUnits::Zero(); + llvm_unreachable("NYI"); +} + +bool CIRGenCXXABI::requiresArrayCookie(const CXXNewExpr *E) { + // If the class's usual deallocation function takes two arguments, + // it needs a cookie. + if (E->doesUsualArrayDeleteWantSize()) + return true; + + return E->getAllocatedType().isDestructedType(); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 127d59c54892..ce217a6a6cd0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -38,6 +38,8 @@ class CIRGenCXXABI { clang::ASTContext &getContext() const { return CGM.getASTContext(); } + virtual bool requiresArrayCookie(const CXXNewExpr *E); + public: /// Similar to AddedStructorArgs, but only notes the number of additional /// arguments. @@ -347,6 +349,19 @@ class CIRGenCXXABI { virtual cir::MethodAttr buildVirtualMethodAttr(cir::MethodType MethodTy, const CXXMethodDecl *MD) = 0; + + /**************************** Array cookies ******************************/ + + /// Returns the extra size required in order to store the array + /// cookie for the given new-expression. May return 0 to indicate that no + /// array cookie is required. + /// + /// Several cases are filtered out before this method is called: + /// - non-array allocations never need a cookie + /// - calls to \::operator new(size_t, void*) never need a cookie + /// + /// \param E - the new-expression being allocated. + virtual CharUnits getArrayCookieSize(const CXXNewExpr *E); }; /// Creates and Itanium-family ABI diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 83af7ee98f58..90a55ff1a87f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -13,6 +13,7 @@ #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/MissingFeatures.h" #include +#include #include #include #include @@ -549,11 +550,25 @@ static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) { return Params; } +static CharUnits CalculateCookiePadding(CIRGenFunction &CGF, + const CXXNewExpr *E) { + if (!E->isArray()) + return CharUnits::Zero(); + + // No cookie is required if the operator new[] being used is the + // reserved placement operator new[]. + if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) + return CharUnits::Zero(); + + return CGF.CGM.getCXXABI().getArrayCookieSize(E); +} + static mlir::Value emitCXXNewAllocSize(CIRGenFunction &CGF, const CXXNewExpr *e, unsigned minElements, mlir::Value &numElements, mlir::Value &sizeWithoutCookie) { QualType type = e->getAllocatedType(); + mlir::Location Loc = CGF.getLoc(e->getSourceRange()); if (!e->isArray()) { CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); @@ -563,7 +578,96 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &CGF, const CXXNewExpr *e, return sizeWithoutCookie; } - llvm_unreachable("NYI"); + // The width of size_t. + unsigned sizeWidth = CGF.CGM.getDataLayout().getTypeSizeInBits(CGF.SizeTy); + + // The number of elements can be have an arbitrary integer type; + // essentially, we need to multiply it by a constant factor, add a + // cookie size, and verify that the result is representable as a + // size_t. That's just a gloss, though, and it's wrong in one + // important way: if the count is negative, it's an error even if + // the cookie size would bring the total size >= 0. + // + // If the array size is constant, Sema will have prevented negative + // values and size overflow. + + // Compute the constant factor. + llvm::APInt arraySizeMultiplier(sizeWidth, 1); + while (const ConstantArrayType *CAT = + CGF.getContext().getAsConstantArrayType(type)) { + type = CAT->getElementType(); + arraySizeMultiplier *= CAT->getSize(); + } + + CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); + llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity()); + typeSizeMultiplier *= arraySizeMultiplier; + + // Figure out the cookie size. + llvm::APInt cookieSize(sizeWidth, + CalculateCookiePadding(CGF, e).getQuantity()); + + // This will be a size_t. + mlir::Value size; + + // Emit the array size expression. + // We multiply the size of all dimensions for NumElements. + // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. + const Expr *arraySize = *e->getArraySize(); + mlir::Attribute constNumElements = + ConstantEmitter(CGF.CGM, &CGF) + .tryEmitAbstract(arraySize, arraySize->getType()); + if (constNumElements) { + // Get an APInt from the constant + const llvm::APInt &count = + mlir::cast(constNumElements).getValue(); + + unsigned numElementsWidth = count.getBitWidth(); + + // The equivalent code in CodeGen/CGExprCXX.cpp handles these cases as + // overflow, but they should never happen. The size argument is implicitly + // cast to a size_t, so it can never be negative and numElementsWidth will + // always equal sizeWidth. + assert(!count.isNegative() && "Expected non-negative array size"); + assert(numElementsWidth == sizeWidth && + "Expected a size_t array size constant"); + + // Okay, compute a count at the right width. + llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth); + + // Scale numElements by that. This might overflow, but we don't + // care because it only overflows if allocationSize does, too, and + // if that overflows then we shouldn't use this. + // This emits a constant that may not be used, but we can't tell here + // whether it will be needed or not. + numElements = + CGF.getBuilder().getConstInt(Loc, adjustedCount * arraySizeMultiplier); + + // Compute the size before cookie, and track whether it overflowed. + bool overflow; + llvm::APInt allocationSize = + adjustedCount.umul_ov(typeSizeMultiplier, overflow); + + // Sema prevents us from hitting this case + assert(!overflow && "Overflow in array allocation size"); + + // Add in the cookie, and check whether it's overflowed. + if (cookieSize != 0) { + llvm_unreachable("NYI"); + } + + size = CGF.getBuilder().getConstInt(Loc, allocationSize); + } else { + // TODO: Handle the variable size case + llvm_unreachable("NYI"); + } + + if (cookieSize == 0) + sizeWithoutCookie = size; + else + assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?"); + + return size; } namespace { @@ -745,33 +849,32 @@ static void StoreAnyExprIntoOneUnit(CIRGenFunction &CGF, const Expr *Init, llvm_unreachable("bad evaluation kind"); } +void CIRGenFunction::emitNewArrayInitializer( + const CXXNewExpr *E, QualType ElementType, mlir::Type ElementTy, + Address BeginPtr, mlir::Value NumElements, + mlir::Value AllocSizeWithoutCookie) { + // If we have a type with trivial initialization and no initializer, + // there's nothing to do. + if (!E->hasInitializer()) + return; + + llvm_unreachable("NYI"); +} + static void emitNewInitializer(CIRGenFunction &CGF, const CXXNewExpr *E, QualType ElementType, mlir::Type ElementTy, Address NewPtr, mlir::Value NumElements, mlir::Value AllocSizeWithoutCookie) { assert(!cir::MissingFeatures::generateDebugInfo()); if (E->isArray()) { - llvm_unreachable("NYI"); + CGF.emitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements, + AllocSizeWithoutCookie); } else if (const Expr *Init = E->getInitializer()) { StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr, AggValueSlot::DoesNotOverlap); } } -static CharUnits CalculateCookiePadding(CIRGenFunction &CGF, - const CXXNewExpr *E) { - if (!E->isArray()) - return CharUnits::Zero(); - - // No cookie is required if the operator new[] being used is the - // reserved placement operator new[]. - if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) - return CharUnits::Zero(); - - llvm_unreachable("NYI"); - // return CGF.CGM.getCXXABI().GetArrayCookieSize(E); -} - namespace { /// Calls the given 'operator delete' on a single object. struct CallObjectDelete final : EHScopeStack::Cleanup { @@ -1129,9 +1232,6 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *E) { emitNewInitializer(*this, E, allocType, elementTy, result, numElements, allocSizeWithoutCookie); auto resultPtr = result.getPointer(); - if (E->isArray()) { - llvm_unreachable("NYI"); - } // Deactivate the 'operator delete' cleanup if we finished // initialization. diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 047f7488f5dc..bbf1024951db 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -726,6 +726,11 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value emitCXXNewExpr(const CXXNewExpr *E); void emitCXXDeleteExpr(const CXXDeleteExpr *E); + void emitNewArrayInitializer(const CXXNewExpr *E, QualType ElementType, + mlir::Type ElementTy, Address BeginPtr, + mlir::Value NumElements, + mlir::Value AllocSizeWithoutCookie); + void emitCXXAggrConstructorCall(const CXXConstructorDecl *D, const clang::ArrayType *ArrayTy, Address ArrayPtr, const CXXConstructExpr *E, diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index 6e829dd5de6b..9447aff3b10a 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -56,3 +56,37 @@ void t() { B b; b.construct(&b); } + + +void t_new_constant_size() { + auto p = new double[16]; +} + +// In this test, NUM_ELEMENTS isn't used because no cookie is needed and there +// are no constructor calls needed. + +// CHECK: cir.func @_Z19t_new_constant_sizev() +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} +// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<16> : !u64i +// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<128> : !u64i +// CHECK: %3 = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr +// CHECK: cir.store %4, %0 : !cir.ptr, !cir.ptr> +// CHECK: cir.return +// CHECK: } + +void t_new_multidim_constant_size() { + auto p = new double[2][3][4]; +} + +// As above, NUM_ELEMENTS isn't used. + +// CHECK: cir.func @_Z28t_new_multidim_constant_sizev() +// CHECK: %0 = cir.alloca !cir.ptr x 3>>, !cir.ptr x 3>>>, ["p", init] {alignment = 8 : i64} +// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<24> : !u64i +// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<192> : !u64i +// CHECK: %3 = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr +// CHECK: %5 = cir.cast(bitcast, %0 : !cir.ptr x 3>>>), !cir.ptr> +// CHECK: cir.store %4, %5 : !cir.ptr, !cir.ptr> +// CHECK: } diff --git a/clang/test/CIR/Lowering/new.cpp b/clang/test/CIR/Lowering/new.cpp new file mode 100644 index 000000000000..9760854fce20 --- /dev/null +++ b/clang/test/CIR/Lowering/new.cpp @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +void t_new_constant_size() { + auto p = new double[16]; +} + +// LLVM: @_Z19t_new_constant_sizev() +// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 128) +// LLVM: store ptr %[[ADDR]], ptr %[[ALLOCA]], align 8 + +void t_new_multidim_constant_size() { + auto p = new double[2][3][4]; +} + +// LLVM: @_Z28t_new_multidim_constant_sizev() +// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 192) +// LLVM: store ptr %[[ADDR]], ptr %[[ALLOCA]], align 8 \ No newline at end of file From cf09dbfe02ca494600a4caa08cbe64025ecd4d6c Mon Sep 17 00:00:00 2001 From: darkbuck Date: Tue, 28 Jan 2025 13:16:21 -0500 Subject: [PATCH 2216/2301] [CIR] Fix build due to deprecated interfaces (#1299) - After abba01adad5dfc54f781357d924c8021c9306615, 'is' and 'get' interfaces are deprecated even though not removed yet. However, it causes warnings and triggers build failures if that warnings are treated as errors. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 ++-- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenValue.h | 5 +++-- clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp | 12 ++++++------ clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 4 ++-- clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp | 8 ++++---- clang/test/CodeGenOpenCL/printf.cl | 1 - 8 files changed, 20 insertions(+), 20 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index e1b2f0b766e1..2659471e7e38 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3638,8 +3638,8 @@ class CIR_CallOp extra_traits = []> : void setCalleeFromCallable(::mlir::CallInterfaceCallable callee) { if (auto calling = (*this)->getAttrOfType(getCalleeAttrName())) - (*this)->setAttr(getCalleeAttrName(), callee.get()); - setOperand(0, callee.get()); + (*this)->setAttr(getCalleeAttrName(), mlir::cast(callee)); + setOperand(0, mlir::cast(callee)); } bool isIndirect() { return !getCallee(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index c712a7ea27c4..851a2230bf30 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1053,7 +1053,7 @@ void CIRGenFunction::emitCallArgs( const auto *MD = mlir::dyn_cast(Prototype.P); assert(!MD && "ObjCMethodDecl NYI"); - const auto *FPT = Prototype.P.get(); + const auto *FPT = mlir::cast(Prototype.P); IsVariadic = FPT->isVariadic(); ExplicitCC = FPT->getExtInfo().getCC(); ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index b15b7f3aaf2e..b886674b366f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -1456,8 +1456,8 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() { // Convert to the appropriate type; this could be an lvalue for // an integer. FIXME: performAddrSpaceCast if (mlir::isa(destTy)) { - if (value.is()) - return value.get(); + if (auto attr = mlir::dyn_cast(value)) + return attr; llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index 6e26c8059a15..2d24018b9601 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -70,8 +70,9 @@ class RValue { /// Return the mlir::Value of the address of the aggregate. Address getAggregateAddress() const { assert(isAggregate() && "Not an aggregate!"); - auto align = reinterpret_cast(V2.getPointer().get()) >> - AggAlignShift; + auto align = + reinterpret_cast(mlir::cast(V2.getPointer())) >> + AggAlignShift; return Address(V1.getPointer(), ElementType, clang::CharUnits::fromQuantity(align)); } diff --git a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp index 848189f852c4..b0b146e12e2c 100644 --- a/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp +++ b/clang/lib/CIR/CodeGen/ConstantInitBuilder.cpp @@ -23,10 +23,10 @@ ConstantInitBuilderBase::ConstantInitBuilderBase(CIRGenModule &CGM) mlir::Type ConstantInitFuture::getType() const { assert(Data && "dereferencing null future"); - if (Data.is()) { - auto attr = mlir::dyn_cast(Data.get()); - assert(attr && "expected typed attribute"); - return attr.getType(); + if (auto attr = mlir::dyn_cast(Data)) { + auto typedAttr = mlir::dyn_cast(attr); + assert(typedAttr && "expected typed attribute"); + return typedAttr.getType(); } else { llvm_unreachable("Only sypport typed attributes here"); } @@ -42,8 +42,8 @@ void ConstantInitFuture::abandon() { void ConstantInitFuture::installInGlobal(cir::GlobalOp GV) { assert(Data && "installing null future"); - if (Data.is()) { - CIRGenModule::setInitializer(GV, Data.get()); + if (auto attr = mlir::dyn_cast(Data)) { + CIRGenModule::setInitializer(GV, attr); } else { llvm_unreachable("NYI"); // auto &builder = *Data.get(); diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index b4228fa2dfc8..822af97ad6e4 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -784,8 +784,8 @@ OpFoldResult cir::CastOp::fold(FoldAdaptor adaptor) { // create a new attribute that's capable of representing the source. llvm::SmallVector foldResults; auto foldOrder = getSrc().getDefiningOp()->fold(foldResults); - if (foldOrder.succeeded() && foldResults[0].is()) - return foldResults[0].get(); + if (foldOrder.succeeded() && mlir::isa(foldResults[0])) + return mlir::cast(foldResults[0]); return {}; } case cir::CastKind::bitcast: diff --git a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp index 08afedf804e3..323b8e4cd125 100644 --- a/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LifetimeCheck.cpp @@ -526,11 +526,11 @@ static Location getEndLocForHist(Region *R) { static Location getEndLocForHist(LifetimeCheckPass::LexicalScopeContext &lsc) { assert(!lsc.parent.isNull() && "shouldn't be null"); - if (lsc.parent.is()) - return getEndLocForHist(lsc.parent.get()); - assert(lsc.parent.is() && + if (auto r = mlir::dyn_cast(lsc.parent)) + return getEndLocForHist(r); + assert(mlir::isa(lsc.parent) && "Only support operation beyond this point"); - return getEndLocForHist(lsc.parent.get()); + return getEndLocForHist(mlir::cast(lsc.parent)); } void LifetimeCheckPass::killInPset(mlir::Value ptrKey, const State &s, diff --git a/clang/test/CodeGenOpenCL/printf.cl b/clang/test/CodeGenOpenCL/printf.cl index 012b7c822344..2e11b8889d23 100644 --- a/clang/test/CodeGenOpenCL/printf.cl +++ b/clang/test/CodeGenOpenCL/printf.cl @@ -4,7 +4,6 @@ // RUN: %clang_cc1 -no-enable-noundef-analysis -cl-std=CL3.0 -cl-ext=-__opencl_c_fp64,-cl_khr_fp64 -triple spir-unknown-unknown -disable-llvm-passes -emit-llvm -o - %s | FileCheck -check-prefixes=NOFP64,ALL %s // RUN: %clang_cc1 -no-enable-noundef-analysis -cl-std=clc++2021 -cl-ext=+__opencl_c_fp64,+cl_khr_fp64 -triple spir-unknown-unknown -disable-llvm-passes -emit-llvm -o - %s | FileCheck -check-prefixes=FP64,ALL %s // RUN: %clang_cc1 -no-enable-noundef-analysis -cl-std=clc++2021 -cl-ext=-__opencl_c_fp64,-cl_khr_fp64 -triple spir-unknown-unknown -disable-llvm-passes -emit-llvm -o - %s | FileCheck -check-prefixes=NOFP64,ALL %s -// XFAIL: * typedef __attribute__((ext_vector_type(2))) float float2; typedef __attribute__((ext_vector_type(2))) half half2; From 34bd74e2d6fe6271e278cac6937e57106e930852 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Wed, 29 Jan 2025 01:44:56 +0100 Subject: [PATCH 2217/2301] [CIR] Cleanup cir.scopes with a single cir.yield operation (#1291) Cleanup cir scope if it contains only yield operation Fixes: #455 --- clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp | 11 ++++++++++- clang/test/CIR/Transforms/merge-cleanups.cir | 9 +++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp index 555cb20408a5..f923ac69dbeb 100644 --- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp +++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp @@ -62,7 +62,16 @@ struct RemoveEmptyScope : public OpRewritePattern { LogicalResult match(ScopeOp op) const final { // TODO: Remove this logic once CIR uses MLIR infrastructure to remove // trivially dead operations - return success(op.isEmpty()); + if (op.isEmpty()) { + return success(); + } + + Region *region = op.getRegions().front(); + if (region && region->getBlocks().front().getOperations().size() == 1) { + return success(isa(region->getBlocks().front().front())); + } + + return failure(); } void rewrite(ScopeOp op, PatternRewriter &rewriter) const final { diff --git a/clang/test/CIR/Transforms/merge-cleanups.cir b/clang/test/CIR/Transforms/merge-cleanups.cir index 715c7525b94d..a111fec0b0b4 100644 --- a/clang/test/CIR/Transforms/merge-cleanups.cir +++ b/clang/test/CIR/Transforms/merge-cleanups.cir @@ -138,4 +138,13 @@ module { cir.return %0 : !cir.ptr)> } + // Should remove scope with only yield + cir.func @removeBlockWithScopeYeild(%arg0: !s32i) { + cir.scope { + cir.yield + } + cir.return + } + // CHECK: cir.func @removeBlockWithScopeYeild + // CHECK-NEXT: cir.return } From 1b45e8c2717c8cd293b086795350b36263b65eb4 Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Tue, 28 Jan 2025 17:01:31 -0800 Subject: [PATCH 2218/2301] [CIR] Add initial support for array cookies (#1297) This patch adds the minimal support for array cookies needed to enable ClangIR generation for an array new expression that requires cookies but does not require an explicit initializer. This only provides the cookie support for the base Itanium CXXABI. Different cookie calculations are required for AppleARM64, which will be added in a subsequent patch. --- clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 20 ++++++ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 64 +++++++++++++++++-- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 64 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 20 +++--- clang/test/CIR/CodeGen/new.cpp | 54 ++++++++++++++++ clang/test/CIR/Lowering/new.cpp | 41 +++++++++++- 8 files changed, 253 insertions(+), 17 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp index 82b253ba4100..7f31b80f93cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp @@ -89,7 +89,7 @@ bool CIRGenCXXABI::isZeroInitializable(const MemberPointerType *MPT) { CharUnits CIRGenCXXABI::getArrayCookieSize(const CXXNewExpr *E) { if (!requiresArrayCookie(E)) return CharUnits::Zero(); - llvm_unreachable("NYI"); + return getArrayCookieSizeImpl(E->getAllocatedType()); } bool CIRGenCXXABI::requiresArrayCookie(const CXXNewExpr *E) { diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index ce217a6a6cd0..b82b744852f7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -362,6 +362,26 @@ class CIRGenCXXABI { /// /// \param E - the new-expression being allocated. virtual CharUnits getArrayCookieSize(const CXXNewExpr *E); + + /// Initialize the array cookie for the given allocation. + /// + /// \param NewPtr - a char* which is the presumed-non-null + /// return value of the allocation function + /// \param NumElements - the computed number of elements, + /// potentially collapsed from the multidimensional array case; + /// always a size_t + /// \param ElementType - the base element allocated type, + /// i.e. the allocated type after stripping all array types + virtual Address initializeArrayCookie(CIRGenFunction &CGF, Address NewPtr, + mlir::Value NumElements, + const CXXNewExpr *E, + QualType ElementType) = 0; + +protected: + /// Returns the extra size required in order to store the array + /// cookie for the given type. Assumes that an array cookie is + /// required. + virtual CharUnits getArrayCookieSizeImpl(QualType ElementType) = 0; }; /// Creates and Itanium-family ABI diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 90a55ff1a87f..4bc4866e03c7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -623,6 +623,7 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &CGF, const CXXNewExpr *e, mlir::cast(constNumElements).getValue(); unsigned numElementsWidth = count.getBitWidth(); + bool hasAnyOverflow = false; // The equivalent code in CodeGen/CGExprCXX.cpp handles these cases as // overflow, but they should never happen. The size argument is implicitly @@ -653,10 +654,22 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &CGF, const CXXNewExpr *e, // Add in the cookie, and check whether it's overflowed. if (cookieSize != 0) { - llvm_unreachable("NYI"); + // Save the current size without a cookie. This shouldn't be + // used if there was overflow. + sizeWithoutCookie = CGF.getBuilder().getConstInt( + Loc, allocationSize.zextOrTrunc(sizeWidth)); + + allocationSize = allocationSize.uadd_ov(cookieSize, overflow); + hasAnyOverflow |= overflow; } - size = CGF.getBuilder().getConstInt(Loc, allocationSize); + // On overflow, produce a -1 so operator new will fail. + if (hasAnyOverflow) { + size = + CGF.getBuilder().getConstInt(Loc, llvm::APInt::getAllOnes(sizeWidth)); + } else { + size = CGF.getBuilder().getConstInt(Loc, allocationSize); + } } else { // TODO: Handle the variable size case llvm_unreachable("NYI"); @@ -858,6 +871,46 @@ void CIRGenFunction::emitNewArrayInitializer( if (!E->hasInitializer()) return; + Address CurPtr = BeginPtr; + + unsigned InitListElements = 0; + + const Expr *Init = E->getInitializer(); + CleanupDeactivationScope deactivation(*this); + + const InitListExpr *ILE = dyn_cast(Init); + if (ILE) { + llvm_unreachable("NYI"); + } + + // If all elements have already been initialized, skip any further + // initialization. + auto ConstOp = dyn_cast(NumElements.getDefiningOp()); + if (ConstOp) { + auto ConstIntAttr = mlir::dyn_cast(ConstOp.getValue()); + // Just skip out if the constant count is zero. + if (ConstIntAttr && ConstIntAttr.getUInt() <= InitListElements) + return; + } + + assert(Init && "have trailing elements to initialize but no initializer"); + + // If this is a constructor call, try to optimize it out, and failing that + // emit a single loop to initialize all remaining elements. + if (const CXXConstructExpr *CCE = dyn_cast(Init)) { + CXXConstructorDecl *Ctor = CCE->getConstructor(); + if (Ctor->isTrivial()) { + // If new expression did not specify value-initialization, then there + // is no initialization. + if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty()) + return; + + llvm_unreachable("NYI"); + } + + llvm_unreachable("NYI"); + } + llvm_unreachable("NYI"); } @@ -1088,7 +1141,8 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *E) { ++ParamsToSkip; if (allocSize != allocSizeWithoutCookie) { - llvm_unreachable("NYI"); + CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI. + allocAlign = std::max(allocAlign, cookieAlign); } // The allocation alignment may be passed as the second argument. @@ -1186,7 +1240,9 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *E) { assert((allocSize == allocSizeWithoutCookie) == CalculateCookiePadding(*this, E).isZero()); if (allocSize != allocSizeWithoutCookie) { - llvm_unreachable("NYI"); + assert(E->isArray()); + allocation = CGM.getCXXABI().initializeArrayCookie( + *this, allocation, numElements, E, allocType); } mlir::Type elementTy; diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 495491799b3f..88861b7c544c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -324,6 +324,13 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { cir::MethodAttr buildVirtualMethodAttr(cir::MethodType MethodTy, const CXXMethodDecl *MD) override; + Address initializeArrayCookie(CIRGenFunction &CGF, Address NewPtr, + mlir::Value NumElements, const CXXNewExpr *E, + QualType ElementType) override; + +protected: + CharUnits getArrayCookieSizeImpl(QualType ElementType) override; + /**************************** RTTI Uniqueness ******************************/ protected: /// Returns true if the ABI requires RTTI type_info objects to be unique @@ -2637,3 +2644,60 @@ CIRGenItaniumCXXABI::buildVirtualMethodAttr(cir::MethodType MethodTy, bool CIRGenItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { return MPT->isMemberFunctionPointer(); } + +/************************** Array allocation cookies **************************/ + +CharUnits CIRGenItaniumCXXABI::getArrayCookieSizeImpl(QualType ElementType) { + // The array cookie is a size_t; pad that up to the element alignment. + // The cookie is actually right-justified in that space. + return std::max( + CharUnits::fromQuantity(CGM.SizeSizeInBytes), + CGM.getASTContext().getPreferredTypeAlignInChars(ElementType)); +} + +Address CIRGenItaniumCXXABI::initializeArrayCookie(CIRGenFunction &CGF, + Address NewPtr, + mlir::Value NumElements, + const CXXNewExpr *E, + QualType ElementType) { + assert(requiresArrayCookie(E)); + + // TODO: Get the address space when sanitizer support is implemented + + ASTContext &Ctx = getContext(); + CharUnits SizeSize = CGF.getSizeSize(); + mlir::Location Loc = CGF.getLoc(E->getSourceRange()); + + // The size of the cookie. + CharUnits CookieSize = + std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType)); + assert(CookieSize == getArrayCookieSizeImpl(ElementType)); + + // Compute an offset to the cookie. + Address CookiePtr = NewPtr; + CharUnits CookieOffset = CookieSize - SizeSize; + if (!CookieOffset.isZero()) { + auto CastOp = CGF.getBuilder().createPtrBitcast( + CookiePtr.getPointer(), CGF.getBuilder().getUIntNTy(8)); + auto OffsetOp = CGF.getBuilder().getSignedInt( + Loc, CookieOffset.getQuantity(), /*width=*/32); + auto DataPtr = CGF.getBuilder().createPtrStride(Loc, CastOp, OffsetOp); + CookiePtr = Address(DataPtr, NewPtr.getType(), NewPtr.getAlignment()); + } + + // Write the number of elements into the appropriate slot. + Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy); + CGF.getBuilder().createStore(Loc, NumElements, NumElementsPtr); + + if (CGF.SanOpts.has(SanitizerKind::Address)) + llvm_unreachable("NYI"); + + // Finally, compute a pointer to the actual data buffer by skipping + // over the cookie completely. + int64_t Offset = (CookieSize.getQuantity()); + auto CastOp = CGF.getBuilder().createPtrBitcast( + NewPtr.getPointer(), CGF.getBuilder().getUIntNTy(8)); + auto OffsetOp = CGF.getBuilder().getSignedInt(Loc, Offset, /*width=*/32); + auto DataPtr = CGF.getBuilder().createPtrStride(Loc, CastOp, OffsetOp); + return Address(DataPtr, NewPtr.getType(), NewPtr.getAlignment()); +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 6c3fad1f021b..a427a297fd0d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -145,7 +145,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, .toCharUnitsFromBits( astContext.getTargetInfo().getPointerAlign(LangAS::Default)) .getQuantity(); - // TODO: SizeSizeInBytes + SizeSizeInBytes = + astContext + .toCharUnitsFromBits(astContext.getTargetInfo().getMaxPointerWidth()) + .getQuantity(); // TODO: IntAlignInBytes UCharTy = cir::IntType::get(&getMLIRContext(), astContext.getTargetInfo().getCharWidth(), diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index e625efb40dc0..4e29ad5dca46 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -105,19 +105,19 @@ struct CIRGenTypeCache { }; /// The size and alignment of size_t. - // union { - // unsigned char SizeSizeInBytes; // sizeof(size_t) - // unsigned char SizeAlignInBytes; - // }; + union { + unsigned char SizeSizeInBytes; // sizeof(size_t) + unsigned char SizeAlignInBytes; + }; cir::AddressSpaceAttr CIRAllocaAddressSpace; - // clang::CharUnits getSizeSize() const { - // return clang::CharUnits::fromQuantity(SizeSizeInBytes); - // } - // clang::CharUnits getSizeAlign() const { - // return clang::CharUnits::fromQuantity(SizeAlignInBytes); - // } + clang::CharUnits getSizeSize() const { + return clang::CharUnits::fromQuantity(SizeSizeInBytes); + } + clang::CharUnits getSizeAlign() const { + return clang::CharUnits::fromQuantity(SizeAlignInBytes); + } clang::CharUnits getPointerSize() const { return clang::CharUnits::fromQuantity(PointerSizeInBytes); } diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index 9447aff3b10a..fae2c429dd0a 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -90,3 +90,57 @@ void t_new_multidim_constant_size() { // CHECK: %5 = cir.cast(bitcast, %0 : !cir.ptr x 3>>>), !cir.ptr> // CHECK: cir.store %4, %5 : !cir.ptr, !cir.ptr> // CHECK: } + +class C { + public: + ~C(); +}; + +void t_constant_size_nontrivial() { + auto p = new C[3]; +} + +// CHECK: cir.func @_Z26t_constant_size_nontrivialv() +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} +// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<3> : !u64i +// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<3> : !u64i +// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<11> : !u64i +// CHECK: %4 = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr +// CHECK: cir.store %[[#NUM_ELEMENTS]], %5 : !u64i, !cir.ptr +// CHECK: %6 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr +// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<8> : !s32i +// CHECK: %8 = cir.ptr_stride(%6 : !cir.ptr, %[[#COOKIE_SIZE]] : !s32i), !cir.ptr +// CHECK: %9 = cir.cast(bitcast, %8 : !cir.ptr), !cir.ptr +// CHECK: cir.store %9, %0 : !cir.ptr, !cir.ptr> +// CHECK: cir.return +// CHECK: } + +class D { + public: + int x; + ~D(); +}; + +void t_constant_size_nontrivial2() { + auto p = new D[3]; +} + +// In this test SIZE_WITHOUT_COOKIE isn't used, but it would be if there were +// an initializer. + +// CHECK: cir.func @_Z27t_constant_size_nontrivial2v() +// CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} +// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<3> : !u64i +// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<12> : !u64i +// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<20> : !u64i +// CHECK: %4 = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr +// CHECK: cir.store %[[#NUM_ELEMENTS]], %5 : !u64i, !cir.ptr +// CHECK: %6 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr +// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<8> : !s32i +// CHECK: %8 = cir.ptr_stride(%6 : !cir.ptr, %[[#COOKIE_SIZE]] : !s32i), !cir.ptr +// CHECK: %9 = cir.cast(bitcast, %8 : !cir.ptr), !cir.ptr +// CHECK: cir.store %9, %0 : !cir.ptr, !cir.ptr> +// CHECK: cir.return +// CHECK: } \ No newline at end of file diff --git a/clang/test/CIR/Lowering/new.cpp b/clang/test/CIR/Lowering/new.cpp index 9760854fce20..9c276f53dad3 100644 --- a/clang/test/CIR/Lowering/new.cpp +++ b/clang/test/CIR/Lowering/new.cpp @@ -17,4 +17,43 @@ void t_new_multidim_constant_size() { // LLVM: @_Z28t_new_multidim_constant_sizev() // LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8 // LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 192) -// LLVM: store ptr %[[ADDR]], ptr %[[ALLOCA]], align 8 \ No newline at end of file +// LLVM: store ptr %[[ADDR]], ptr %[[ALLOCA]], align 8 + +class C { + public: + ~C(); +}; + +void t_constant_size_nontrivial() { + auto p = new C[3]; +} + +// Note: The below differs from the IR emitted by clang without -fclangir in +// several respects. (1) The alloca here has an extra "i64 1" +// (2) The operator new call is missing "noalias noundef nonnull" on +// the call and "noundef" on the argument, (3) The getelementptr is +// missing "inbounds" + +// LLVM: @_Z26t_constant_size_nontrivialv() +// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[COOKIE_PTR:.*]] = call ptr @_Znam(i64 11) +// LLVM: store i64 3, ptr %[[COOKIE_PTR]], align 8 +// LLVM: %[[ALLOCATED_PTR:.*]] = getelementptr i8, ptr %[[COOKIE_PTR]], i64 8 +// LLVM: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8 + +class D { + public: + int x; + ~D(); +}; + +void t_constant_size_nontrivial2() { + auto p = new D[3]; +} + +// LLVM: @_Z27t_constant_size_nontrivial2v() +// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[COOKIE_PTR:.*]] = call ptr @_Znam(i64 20) +// LLVM: store i64 3, ptr %[[COOKIE_PTR]], align 8 +// LLVM: %[[ALLOCATED_PTR:.*]] = getelementptr i8, ptr %[[COOKIE_PTR]], i64 8 +// LLVM: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8 From a310ae0141bf9bcf100b036375cdfde41e952a2e Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Wed, 29 Jan 2025 02:02:47 +0100 Subject: [PATCH 2219/2301] [CIR][NFC] Teach op parsers to use enum keyword parsing helpers (#1298) Change `parseVisibilityAttr` to use enum parser helper `parseOptionalCIRKeyword` Fixes: #770 --- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 822af97ad6e4..6fa6f73d5965 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -171,6 +171,7 @@ template struct EnumTraits {}; } REGISTER_ENUM_TYPE(GlobalLinkageKind); +REGISTER_ENUM_TYPE(VisibilityKind); REGISTER_ENUM_TYPE(CallingConv); REGISTER_ENUM_TYPE(SideEffect); REGISTER_ENUM_TYPE_WITH_NS(cir::sob, SignedOverflowBehavior); @@ -179,7 +180,6 @@ REGISTER_ENUM_TYPE_WITH_NS(cir::sob, SignedOverflowBehavior); /// Parse an enum from the keyword, or default to the provided default value. /// The return type is the enum type by default, unless overriden with the /// second template argument. -/// TODO: teach other places in this file to use this function. template static RetTy parseOptionalCIRKeyword(AsmParser &parser, EnumTy defaultValue) { llvm::SmallVector names; @@ -259,16 +259,8 @@ void printVisibilityAttr(OpAsmPrinter &printer, } void parseVisibilityAttr(OpAsmParser &parser, cir::VisibilityAttr &visibility) { - cir::VisibilityKind visibilityKind; - - if (parser.parseOptionalKeyword("hidden").succeeded()) { - visibilityKind = cir::VisibilityKind::Hidden; - } else if (parser.parseOptionalKeyword("protected").succeeded()) { - visibilityKind = cir::VisibilityKind::Protected; - } else { - visibilityKind = cir::VisibilityKind::Default; - } - + cir::VisibilityKind visibilityKind = + parseOptionalCIRKeyword(parser, cir::VisibilityKind::Default); visibility = cir::VisibilityAttr::get(parser.getContext(), visibilityKind); } From d329c96a56b41ad99ddffe7bd037ac4ab7476ce6 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Tue, 28 Jan 2025 19:55:20 -0800 Subject: [PATCH 2220/2301] [CIR] Fix vector issues from latest rebase --- clang/lib/CIR/CodeGen/ABIInfo.h | 9 ++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 92 +++++++++---------- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 + clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- clang/lib/CIR/CodeGen/TargetInfo.cpp | 9 ++ .../Transforms/TargetLowering/ABIInfo.h | 1 + clang/test/CIR/CodeGen/OpenCL/printf.cl | 1 - clang/test/CIR/CodeGen/builtins-elementwise.c | 1 - clang/test/CIR/CodeGen/vectype.cpp | 1 - .../test/CIR/Lowering/ThroughMLIR/vectype.cpp | 1 - clang/test/CIR/Lowering/vectype.cpp | 1 - 11 files changed, 67 insertions(+), 55 deletions(-) diff --git a/clang/lib/CIR/CodeGen/ABIInfo.h b/clang/lib/CIR/CodeGen/ABIInfo.h index 6ac37bb01350..cb8dabb31df6 100644 --- a/clang/lib/CIR/CodeGen/ABIInfo.h +++ b/clang/lib/CIR/CodeGen/ABIInfo.h @@ -10,6 +10,8 @@ #define LLVM_CLANG_LIB_CIR_ABIINFO_H #include "clang/AST/Type.h" +#include "clang/Basic/LangOptions.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" namespace clang::CIRGen { @@ -39,6 +41,13 @@ class ABIInfo { // Implement the Type::IsPromotableIntegerType for ABI specific needs. The // only difference is that this consideres bit-precise integer types as well. bool isPromotableIntegerTypeForABI(clang::QualType Ty) const; + + /// Returns the optimal vector memory type based on the given vector type. For + /// example, on certain targets, a vector with 3 elements might be promoted to + /// one with 4 elements to improve performance. + virtual cir::VectorType + getOptimalVectorMemoryType(cir::VectorType T, + const clang::LangOptions &Opt) const; }; } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 8ed7407f7e2b..38a880548202 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -173,9 +173,9 @@ static Address emitPointerWithAlignment(const Expr *expr, llvm_unreachable("NYI"); } - auto ElemTy = cgf.convertTypeForMem(expr->getType()->getPointeeType()); + auto eltTy = cgf.convertTypeForMem(expr->getType()->getPointeeType()); addr = cgf.getBuilder().createElementBitCast( - cgf.getLoc(expr->getSourceRange()), addr, ElemTy); + cgf.getLoc(expr->getSourceRange()), addr, eltTy); if (CE->getCastKind() == CK_AddressSpaceConversion) { assert(!cir::MissingFeatures::addressSpace()); llvm_unreachable("NYI"); @@ -616,6 +616,25 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr, LValueBaseInfo baseInfo, TBAAAccessInfo tbaaInfo, bool isInit, bool isNontemporal) { + assert(!cir::MissingFeatures::threadLocal() && "NYI"); + + auto eltTy = addr.getElementType(); + if (const auto *clangVecTy = ty->getAs()) { + // Boolean vectors use `iN` as storage type. + if (clangVecTy->isExtVectorBoolType()) { + llvm_unreachable("isExtVectorBoolType NYI"); + } + + // Handle vectors of size 3 like size 4 for better performance. + const auto vTy = cast(eltTy); + auto newVecTy = + CGM.getABIInfo().getOptimalVectorMemoryType(vTy, getLangOpts()); + + if (vTy != newVecTy) { + llvm_unreachable("NYI"); + } + } + value = emitToMemory(value, ty); LValue atomicLValue = @@ -626,26 +645,6 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr, return; } - mlir::Type SrcTy = value.getType(); - if (const auto *ClangVecTy = ty->getAs()) { - // TODO(CIR): this has fallen out of date with codegen - llvm_unreachable("NYI: Special treatment of 3-element vector store"); - // auto VecTy = dyn_cast(SrcTy); - // if (!CGM.getCodeGenOpts().PreserveVec3Type && - // ClangVecTy->getNumElements() == 3) { - // // Handle vec3 special. - // if (VecTy && VecTy.getSize() == 3) { - // // Our source is a vec3, do a shuffle vector to make it a vec4. - // value = builder.createVecShuffle(value.getLoc(), value, - // ArrayRef{0, 1, 2, -1}); - // SrcTy = cir::VectorType::get(VecTy.getContext(), VecTy.getEltType(), 4); - // } - // if (addr.getElementType() != SrcTy) { - // addr = addr.withElementType(SrcTy); - // } - // } - } - // Update the alloca with more info on initialization. assert(addr.getPointer() && "expected pointer to exist"); auto SrcAlloca = @@ -2917,40 +2916,36 @@ mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile, LValueBaseInfo baseInfo, TBAAAccessInfo tbaaInfo, bool isNontemporal) { - // TODO(CIR): this has fallen out of sync with codegen - // Atomic operations have to be done on integral types - LValue atomicLValue = - LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); - if (ty->isAtomicType() || LValueIsSuitableForInlineAtomic(atomicLValue)) { - llvm_unreachable("NYI"); - } + assert(!cir::MissingFeatures::threadLocal() && "NYI"); + auto eltTy = addr.getElementType(); - auto ElemTy = addr.getElementType(); + if (const auto *clangVecTy = ty->getAs()) { + // Boolean vectors use `iN` as storage type. + if (clangVecTy->isExtVectorBoolType()) { + llvm_unreachable("NYI"); + } - if (const auto *ClangVecTy = ty->getAs()) { // Handle vectors of size 3 like size 4 for better performance. - const auto VTy = cast(ElemTy); + const auto vTy = cast(eltTy); + auto newVecTy = + CGM.getABIInfo().getOptimalVectorMemoryType(vTy, getLangOpts()); - // TODO(CIR): this has fallen out of sync with codegen - llvm_unreachable("NYI: Special treatment of 3-element vector store"); - // if (!CGM.getCodeGenOpts().PreserveVec3Type && - // ClangVecTy->getNumElements() == 3) { - // auto loc = addr.getPointer().getLoc(); - // auto vec4Ty = cir::VectorType::get(VTy.getContext(), VTy.getEltType(), 4); - // Address Cast = addr.withElementType(vec4Ty); - // // Now load value. - // mlir::Value V = builder.createLoad(loc, Cast); + if (vTy != newVecTy) { + llvm_unreachable("NYI"); + } + } - // // Shuffle vector to get vec3. - // V = builder.createVecShuffle(loc, V, ArrayRef{0, 1, 2}); - // return emitFromMemory(V, ty); - // } + LValue atomicLValue = + LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); + if (ty->isAtomicType() || LValueIsSuitableForInlineAtomic(atomicLValue)) { + llvm_unreachable("NYI"); } + // TODO(cir): modernize this with addr.withElementType(convertTypeForLoadStore auto Ptr = addr.getPointer(); - if (mlir::isa(ElemTy)) { - ElemTy = cir::IntType::get(&getMLIRContext(), 8, true); - auto ElemPtrTy = cir::PointerType::get(&getMLIRContext(), ElemTy); + if (mlir::isa(eltTy)) { + eltTy = cir::IntType::get(&getMLIRContext(), 8, true); + auto ElemPtrTy = cir::PointerType::get(&getMLIRContext(), eltTy); Ptr = builder.create(loc, ElemPtrTy, cir::CastKind::bitcast, Ptr); } @@ -2962,7 +2957,6 @@ mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile, CGM.decorateOperationWithTBAA(loadOp, tbaaInfo); assert(!cir::MissingFeatures::emitScalarRangeCheck() && "NYI"); - return emitFromMemory(loadOp, ty); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index a427a297fd0d..c93a145f35ce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -494,6 +494,10 @@ void CIRGenModule::setDSOLocal(CIRGlobalValueInterface GV) const { GV.setDSOLocal(shouldAssumeDSOLocal(*this, GV)); } +const ABIInfo &CIRGenModule::getABIInfo() { + return getTargetCIRGenInfo().getABIInfo(); +} + void CIRGenModule::emitGlobal(GlobalDecl GD) { llvm::TimeTraceScope scope("build CIR Global", [&]() -> std::string { auto *ND = dyn_cast(GD.getDecl()); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 4fa8d9dfbbcb..867dee754862 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -471,8 +471,8 @@ class CIRGenModule : public CIRGenTypeCache { /// NOTE: This should only be called for definitions. void setCommonAttributes(GlobalDecl GD, mlir::Operation *GV); - // TODO: this obviously overlaps with const TargetCIRGenInfo &getTargetCIRGenInfo(); + const ABIInfo &getABIInfo(); /// Helpers to convert Clang's SourceLocation to a MLIR Location. mlir::Location getLoc(clang::SourceLocation SLoc); diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index cadfe76a717c..d613167677a9 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -318,6 +318,15 @@ static bool classifyReturnType(const CIRGenCXXABI &CXXABI, CIRGenCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); } +cir::VectorType +ABIInfo::getOptimalVectorMemoryType(cir::VectorType T, + const clang::LangOptions &Opt) const { + if (T.getSize() == 3 && !Opt.PreserveVec3Type) { + llvm_unreachable("NYI"); + } + return T; +} + clang::ASTContext &ABIInfo::getContext() const { return CGT.getContext(); } cir::ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h index 434070fd8157..c68392ed7c0b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ABIInfo.h @@ -27,6 +27,7 @@ class LowerTypes; /// Target specific hooks for defining how a type should be passed or returned /// from functions. +/// FIXME(cir): this needs to be merged with clang/lib/CIR/CodeGen/ABIInfo.h class ABIInfo { protected: LowerTypes < diff --git a/clang/test/CIR/CodeGen/OpenCL/printf.cl b/clang/test/CIR/CodeGen/OpenCL/printf.cl index 180e194d8153..b539fce01c2b 100644 --- a/clang/test/CIR/CodeGen/OpenCL/printf.cl +++ b/clang/test/CIR/CodeGen/OpenCL/printf.cl @@ -14,7 +14,6 @@ // RUN: FileCheck -input-file=%t.30fp64.ll -check-prefixes=LLVM-FP64,LLVM-ALL %s // RUN: %clang_cc1 -fclangir -no-enable-noundef-analysis -cl-std=CL3.0 -cl-ext=-__opencl_c_fp64,-cl_khr_fp64 -triple spirv64-unknown-unknown -disable-llvm-passes -emit-llvm -fno-clangir-call-conv-lowering -o %t.30nofp64.ll %s // RUN: FileCheck -input-file=%t.30nofp64.ll -check-prefixes=LLVM-NOFP64,LLVM-ALL %s -// XFAIL: * typedef __attribute__((ext_vector_type(2))) float float2; typedef __attribute__((ext_vector_type(2))) half half2; diff --git a/clang/test/CIR/CodeGen/builtins-elementwise.c b/clang/test/CIR/CodeGen/builtins-elementwise.c index 191a4f8d8c3b..b790588605f4 100644 --- a/clang/test/CIR/CodeGen/builtins-elementwise.c +++ b/clang/test/CIR/CodeGen/builtins-elementwise.c @@ -3,7 +3,6 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \ // RUN: -emit-llvm %s -o %t.ll // RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s -// XFAIL: * typedef int vint4 __attribute__((ext_vector_type(4))); typedef float vfloat4 __attribute__((ext_vector_type(4))); diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index df4fe6ff9459..c47de5a7279f 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -1,5 +1,4 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -// XFAIL: * typedef int vi4 __attribute__((vector_size(16))); typedef double vd2 __attribute__((vector_size(16))); diff --git a/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp b/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp index 57c18c67d44a..81c9fe063260 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp +++ b/clang/test/CIR/Lowering/ThroughMLIR/vectype.cpp @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir %s -o %t.mlir // RUN: FileCheck --input-file=%t.mlir %s -// XFAIL: * typedef int vi4 __attribute__((vector_size(16))); diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp index c457500694ce..eabac1c2fe92 100644 --- a/clang/test/CIR/Lowering/vectype.cpp +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -2,7 +2,6 @@ // RUN: cir-opt %t.cir -cir-to-llvm -o %t.mlir // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ii // RUN: FileCheck --input-file=%t.mlir %s -// XFAIL: * typedef int vi4 __attribute__((vector_size(16))); typedef double vd2 __attribute__((vector_size(16))); From 2267e5d9f73d452317165371e6a42da882a49d49 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Sat, 1 Feb 2025 02:35:39 +0300 Subject: [PATCH 2221/2301] [CIR][CIRGen] Add padding to unions (#1289) This PR adds padding for union type, which is necessary in some cases (e.g. proper offset computation for an element of an array). The previous discussion is here #1281 The idea is to add a notion about padding in the `StructType` in the same fashion as it's done for packed structures - as a bool argument in the constructor. Now we can compute the proper union type size as a size of the largest element + size of padding type. There are some downsides though - I had to add this `padded` word in many places. So take a look please! There are many tests fixed and one new - `union-padding` --- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 20 ++--- .../clang/CIR/Dialect/IR/CIRTypesDetails.h | 34 +++++---- clang/lib/CIR/CodeGen/CIRAsm.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 27 ++++--- clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 25 ++++++- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 17 +++-- clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp | 12 +-- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 74 ++++++++++++------- .../Targets/LoweringPrepareX86CXXABI.cpp | 2 +- .../Transforms/TargetLowering/Targets/X86.cpp | 2 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 5 ++ clang/test/CIR/CodeGen/agg-init2.cpp | 2 +- .../CodeGen/call-via-class-member-funcptr.cpp | 2 +- .../test/CIR/CodeGen/conditional-cleanup.cpp | 8 +- clang/test/CIR/CodeGen/coro-task.cpp | 14 ++-- clang/test/CIR/CodeGen/global-new.cpp | 2 +- clang/test/CIR/CodeGen/lambda.cpp | 2 +- clang/test/CIR/CodeGen/move.cpp | 2 +- clang/test/CIR/CodeGen/new-null.cpp | 2 +- clang/test/CIR/CodeGen/packed-structs.c | 4 +- clang/test/CIR/CodeGen/paren-list-init.cpp | 4 +- clang/test/CIR/CodeGen/struct.c | 10 ++- clang/test/CIR/CodeGen/try-catch-dtors.cpp | 4 +- clang/test/CIR/CodeGen/union-padding.c | 32 ++++++++ 24 files changed, 197 insertions(+), 113 deletions(-) create mode 100644 clang/test/CIR/CodeGen/union-padding.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 9f6eab7c7ba9..3a7e2b9617c2 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -86,12 +86,12 @@ class StructType /// Create a identified and complete struct type. static StructType get(mlir::MLIRContext *context, llvm::ArrayRef members, - mlir::StringAttr name, bool packed, RecordKind kind, - ASTRecordDeclInterface ast = {}); + mlir::StringAttr name, bool packed, bool padded, + RecordKind kind, ASTRecordDeclInterface ast = {}); static StructType getChecked(llvm::function_ref emitError, mlir::MLIRContext *context, llvm::ArrayRef members, - mlir::StringAttr name, bool packed, RecordKind kind, + mlir::StringAttr name, bool packed, bool padded, RecordKind kind, ASTRecordDeclInterface ast = {}); /// Create a identified and incomplete struct type. @@ -105,18 +105,20 @@ class StructType /// Create a anonymous struct type (always complete). static StructType get(mlir::MLIRContext *context, llvm::ArrayRef members, bool packed, - RecordKind kind, ASTRecordDeclInterface ast = {}); + bool padded, RecordKind kind, + ASTRecordDeclInterface ast = {}); static StructType getChecked(llvm::function_ref emitError, mlir::MLIRContext *context, llvm::ArrayRef members, - bool packed, RecordKind kind, ASTRecordDeclInterface ast = {}); + bool packed, bool padded, RecordKind kind, + ASTRecordDeclInterface ast = {}); /// Validate the struct about to be constructed. static llvm::LogicalResult verifyInvariants(llvm::function_ref emitError, llvm::ArrayRef members, mlir::StringAttr name, - bool incomplete, bool packed, StructType::RecordKind kind, - ASTRecordDeclInterface ast); + bool incomplete, bool packed, bool padded, + StructType::RecordKind kind, ASTRecordDeclInterface ast); // Parse/print methods. static constexpr llvm::StringLiteral getMnemonic() { return {"struct"}; } @@ -130,6 +132,7 @@ class StructType StructType::RecordKind getKind() const; bool getIncomplete() const; bool getPacked() const; + bool getPadded() const; void dropAst(); // Predicates @@ -157,7 +160,7 @@ class StructType } /// Complete the struct type by mutating its members and attributes. - void complete(llvm::ArrayRef members, bool packed, + void complete(llvm::ArrayRef members, bool packed, bool isPadded, ASTRecordDeclInterface ast = {}); /// DataLayoutTypeInterface methods. @@ -178,7 +181,6 @@ class StructType // from CIRAttrs.h. The implementation operates in terms of StructLayoutAttr // instead. mutable mlir::Attribute layoutInfo; - bool isPadded(const mlir::DataLayout &dataLayout) const; void computeSizeAndAlignment(const mlir::DataLayout &dataLayout) const; }; diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h index f97a4afe5a4c..7b4778d588bb 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypesDetails.h @@ -32,31 +32,33 @@ struct StructTypeStorage : public mlir::TypeStorage { mlir::StringAttr name; bool incomplete; bool packed; + bool padded; StructType::RecordKind kind; ASTRecordDeclInterface ast; KeyTy(llvm::ArrayRef members, mlir::StringAttr name, - bool incomplete, bool packed, StructType::RecordKind kind, - ASTRecordDeclInterface ast) + bool incomplete, bool packed, bool padded, + StructType::RecordKind kind, ASTRecordDeclInterface ast) : members(members), name(name), incomplete(incomplete), packed(packed), - kind(kind), ast(ast) {} + padded(padded), kind(kind), ast(ast) {} }; llvm::ArrayRef members; mlir::StringAttr name; bool incomplete; bool packed; + bool padded; StructType::RecordKind kind; ASTRecordDeclInterface ast; StructTypeStorage(llvm::ArrayRef members, mlir::StringAttr name, - bool incomplete, bool packed, StructType::RecordKind kind, - ASTRecordDeclInterface ast) + bool incomplete, bool packed, bool padded, + StructType::RecordKind kind, ASTRecordDeclInterface ast) : members(members), name(name), incomplete(incomplete), packed(packed), - kind(kind), ast(ast) {} + padded(padded), kind(kind), ast(ast) {} KeyTy getAsKey() const { - return KeyTy(members, name, incomplete, packed, kind, ast); + return KeyTy(members, name, incomplete, packed, padded, kind, ast); } bool operator==(const KeyTy &key) const { @@ -64,21 +66,21 @@ struct StructTypeStorage : public mlir::TypeStorage { return (name == key.name) && (kind == key.kind); return (members == key.members) && (name == key.name) && (incomplete == key.incomplete) && (packed == key.packed) && - (kind == key.kind) && (ast == key.ast); + (padded == key.padded) && (kind == key.kind) && (ast == key.ast); } static llvm::hash_code hashKey(const KeyTy &key) { if (key.name) return llvm::hash_combine(key.name, key.kind); - return llvm::hash_combine(key.members, key.incomplete, key.packed, key.kind, - key.ast); + return llvm::hash_combine(key.members, key.incomplete, key.packed, + key.padded, key.kind, key.ast); } static StructTypeStorage *construct(mlir::TypeStorageAllocator &allocator, const KeyTy &key) { - return new (allocator.allocate()) - StructTypeStorage(allocator.copyInto(key.members), key.name, - key.incomplete, key.packed, key.kind, key.ast); + return new (allocator.allocate()) StructTypeStorage( + allocator.copyInto(key.members), key.name, key.incomplete, key.packed, + key.padded, key.kind, key.ast); } /// Mutates the members and attributes an identified struct. @@ -89,7 +91,7 @@ struct StructTypeStorage : public mlir::TypeStorage { /// change the struct. llvm::LogicalResult mutate(mlir::TypeStorageAllocator &allocator, llvm::ArrayRef members, bool packed, - ASTRecordDeclInterface ast) { + bool padded, ASTRecordDeclInterface ast) { // Anonymous structs cannot mutate. if (!name) return llvm::failure(); @@ -97,12 +99,14 @@ struct StructTypeStorage : public mlir::TypeStorage { // Mutation of complete structs are allowed if they change nothing. if (!incomplete) return mlir::success((this->members == members) && - (this->packed == packed) && (this->ast == ast)); + (this->packed == packed) && + (this->padded == padded) && (this->ast == ast)); // Mutate incomplete struct. this->members = allocator.copyInto(members); this->packed = packed; this->ast = ast; + this->padded = padded; incomplete = false; return llvm::success(); diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 2513b92e8010..498311280e20 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -619,8 +619,8 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &S) { ResultType = ResultRegTypes[0]; else if (ResultRegTypes.size() > 1) { auto sname = builder.getUniqueAnonRecordName(); - ResultType = - builder.getCompleteStructTy(ResultRegTypes, sname, false, nullptr); + ResultType = builder.getCompleteStructTy(ResultRegTypes, sname, false, + false, nullptr); } bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 46b001531795..dfef1c909c05 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -178,6 +178,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { mlir::Attribute getConstStructOrZeroAttr(mlir::ArrayAttr arrayAttr, bool packed = false, + bool padded = false, mlir::Type type = {}) { llvm::SmallVector members; auto structTy = mlir::dyn_cast(type); @@ -193,9 +194,9 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { // Struct type not specified: create anon struct type from members. if (!structTy) - structTy = - getType(members, packed, cir::StructType::Struct, - /*ast=*/nullptr); + structTy = getType(members, packed, padded, + cir::StructType::Struct, + /*ast=*/nullptr); // Return zero or anonymous constant struct. if (isZero) @@ -205,6 +206,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { cir::ConstStructAttr getAnonConstStruct(mlir::ArrayAttr arrayAttr, bool packed = false, + bool padded = false, mlir::Type ty = {}) { llvm::SmallVector members; for (auto &f : arrayAttr) { @@ -214,7 +216,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { } if (!ty) - ty = getAnonStructTy(members, packed); + ty = getAnonStructTy(members, packed, padded); auto sTy = mlir::dyn_cast(ty); assert(sTy && "expected struct type"); @@ -434,7 +436,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { /// Get a CIR anonymous struct type. cir::StructType getAnonStructTy(llvm::ArrayRef members, - bool packed = false, + bool packed = false, bool padded = false, const clang::RecordDecl *ast = nullptr) { cir::ASTRecordDeclAttr astAttr = nullptr; auto kind = cir::StructType::RecordKind::Struct; @@ -442,7 +444,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { astAttr = getAttr(ast); kind = getRecordKind(ast->getTagKind()); } - return getType(members, packed, kind, astAttr); + return getType(members, packed, padded, kind, astAttr); } /// Get a CIR record kind from a AST declaration tag. @@ -477,6 +479,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { /// it with a different set of attributes, this method will crash. cir::StructType getCompleteStructTy(llvm::ArrayRef members, llvm::StringRef name, bool packed, + bool padded, const clang::RecordDecl *ast) { const auto nameAttr = getStringAttr(name); cir::ASTRecordDeclAttr astAttr = nullptr; @@ -487,19 +490,19 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { } // Create or get the struct. - auto type = - getType(members, nameAttr, packed, kind, astAttr); + auto type = getType(members, nameAttr, packed, padded, + kind, astAttr); // Complete an incomplete struct or ensure the existing complete struct // matches the requested attributes. - type.complete(members, packed, astAttr); + type.complete(members, packed, padded, astAttr); return type; } cir::StructType getCompleteStructType(mlir::ArrayAttr fields, bool packed = false, - llvm::StringRef name = "", + bool padded = false, llvm::StringRef name = "", const clang::RecordDecl *ast = nullptr) { llvm::SmallVector members; for (auto &attr : fields) { @@ -508,9 +511,9 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { } if (name.empty()) - return getAnonStructTy(members, packed, ast); + return getAnonStructTy(members, packed, padded, ast); else - return getCompleteStructTy(members, name, packed, ast); + return getCompleteStructTy(members, name, packed, padded, ast); } cir::ArrayType getArrayType(mlir::Type eltType, unsigned size) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index b886674b366f..c2955d7ad3fa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -385,6 +385,7 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( CharUnits AlignedSize = Size.alignTo(Align); bool Packed = false; + bool Padded = false; ArrayRef UnpackedElems = Elems; llvm::SmallVector UnpackedElemStorage; @@ -435,13 +436,13 @@ mlir::Attribute ConstantAggregateBuilder::buildFrom( auto &builder = CGM.getBuilder(); auto arrAttr = mlir::ArrayAttr::get(builder.getContext(), Packed ? PackedElems : UnpackedElems); - auto strType = builder.getCompleteStructType(arrAttr, Packed); + auto strType = builder.getCompleteStructType(arrAttr, Packed); if (auto desired = dyn_cast(DesiredTy)) if (desired.isLayoutIdentical(strType)) strType = desired; - return builder.getConstStructOrZeroAttr(arrAttr, Packed, strType); + return builder.getConstStructOrZeroAttr(arrAttr, Packed, Padded, strType); } void ConstantAggregateBuilder::condense(CharUnits Offset, @@ -521,6 +522,9 @@ class ConstStructBuilder { const FieldDecl &Field, bool AllowOverwrite, CharUnits &SizeSoFar, bool &ZeroFieldSize); + bool ApplyZeroInitPadding(const ASTRecordLayout &Layout, bool AllowOverwrite, + CharUnits SizeSoFar); + mlir::Attribute Finalize(QualType Ty); }; @@ -715,6 +719,10 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) { } } + if (ZeroInitPadding && + !ApplyZeroInitPadding(Layout, AllowOverwrite, SizeSoFar)) + return false; + return true; } @@ -853,6 +861,19 @@ bool ConstStructBuilder::ApplyZeroInitPadding( return true; } +bool ConstStructBuilder::ApplyZeroInitPadding(const ASTRecordLayout &Layout, + bool AllowOverwrite, + CharUnits SizeSoFar) { + CharUnits TotalSize = Layout.getSize(); + if (SizeSoFar < TotalSize) { + if (!AppendBytes(SizeSoFar, computePadding(CGM, TotalSize - SizeSoFar), + AllowOverwrite)) + return false; + } + SizeSoFar = TotalSize; + return true; +} + mlir::Attribute ConstStructBuilder::Finalize(QualType Type) { Type = Type.getNonReferenceType(); RecordDecl *RD = Type->castAs()->getDecl(); diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 373e0a735622..e179cf5f89ca 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -197,8 +197,10 @@ struct CIRRecordLowering final { void fillOutputFields(); void appendPaddingBytes(CharUnits Size) { - if (!Size.isZero()) + if (!Size.isZero()) { fieldTypes.push_back(getByteArrayType(Size)); + isPadded = 1; + } } CIRGenTypes &cirGenTypes; @@ -219,6 +221,7 @@ struct CIRRecordLowering final { bool IsZeroInitializable : 1; bool IsZeroInitializableAsBase : 1; bool isPacked : 1; + bool isPadded : 1; private: CIRRecordLowering(const CIRRecordLowering &) = delete; @@ -235,7 +238,7 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, dataLayout{cirGenTypes.getModule().getModule()}, IsZeroInitializable(true), IsZeroInitializableAsBase(true), - isPacked{isPacked} {} + isPacked{isPacked}, isPadded{false} {} void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, @@ -366,7 +369,7 @@ void CIRRecordLowering::lowerUnion() { if (LayoutSize < getSize(StorageType)) StorageType = getByteArrayType(LayoutSize); // NOTE(cir): Defer padding calculations to the lowering process. - // appendPaddingBytes(LayoutSize - getSize(StorageType)); + appendPaddingBytes(LayoutSize - getSize(StorageType)); // Set packed if we need it. if (LayoutSize % getAlignment(StorageType)) isPacked = true; @@ -680,6 +683,7 @@ void CIRRecordLowering::insertPadding() { } if (Padding.empty()) return; + isPadded = 1; // Add the padding to the Members list and sort it. for (std::vector>::const_iterator Pad = Padding.begin(), @@ -705,8 +709,9 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, cir::StructType *Ty) { CIRRecordLowering baseBuilder(*this, D, /*Packed=*/builder.isPacked); baseBuilder.lower(/*NonVirtualBaseType=*/true); auto baseIdentifier = getRecordTypeName(D, ".base"); - BaseTy = Builder.getCompleteStructTy( - baseBuilder.fieldTypes, baseIdentifier, baseBuilder.isPacked, D); + BaseTy = Builder.getCompleteStructTy(baseBuilder.fieldTypes, + baseIdentifier, baseBuilder.isPacked, + baseBuilder.isPadded, D); // TODO(cir): add something like addRecordTypeName // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work @@ -720,7 +725,7 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *D, cir::StructType *Ty) { // signifies that the type is no longer opaque and record layout is complete, // but we may need to recursively layout D while laying D out as a base type. auto astAttr = cir::ASTRecordDeclAttr::get(Ty->getContext(), D); - Ty->complete(builder.fieldTypes, builder.isPacked, astAttr); + Ty->complete(builder.fieldTypes, builder.isPacked, builder.isPadded, astAttr); auto RL = std::make_unique( Ty ? *Ty : cir::StructType{}, BaseTy ? BaseTy : cir::StructType{}, diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp index d1b17ad6cf39..9add51a35c53 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp @@ -209,19 +209,9 @@ llvm::TypeSize CIRDataLayout::getTypeSizeInBits(mlir::Type Ty) const { "Cannot getTypeInfo() on a type that is unsized!"); if (auto structTy = llvm::dyn_cast(Ty)) { - // FIXME(cir): CIR struct's data layout implementation doesn't do a good job // of handling unions particularities. We should have a separate union type. - if (structTy.isUnion()) { - auto largestMember = structTy.getLargestMember(layout); - return llvm::TypeSize::getFixed(layout.getTypeSizeInBits(largestMember)); - } - - // FIXME(cir): We should be able to query the size of a struct directly to - // its data layout implementation instead of requiring a separate - // StructLayout object. - // Get the layout annotation... which is lazily created on demand. - return getStructLayout(structTy)->getSizeInBits(); + return structTy.getTypeSizeInBits(layout, {}); } // FIXME(cir): This does not account for different address spaces, and relies diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index 04ecf52a428d..b22b5707a793 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -128,6 +128,7 @@ Type StructType::parse(mlir::AsmParser &parser) { const auto loc = parser.getCurrentLocation(); const auto eLoc = parser.getEncodedSourceLoc(loc); bool packed = false; + bool padded = false; RecordKind kind; auto *context = parser.getContext(); @@ -173,6 +174,9 @@ Type StructType::parse(mlir::AsmParser &parser) { if (parser.parseOptionalKeyword("packed").succeeded()) packed = true; + if (parser.parseOptionalKeyword("padded").succeeded()) + padded = true; + // Parse record members or lack thereof. bool incomplete = true; llvm::SmallVector members; @@ -200,13 +204,13 @@ Type StructType::parse(mlir::AsmParser &parser) { if (name && incomplete) { // Identified & incomplete type = getChecked(eLoc, context, name, kind); } else if (name && !incomplete) { // Identified & complete - type = getChecked(eLoc, context, membersRef, name, packed, kind); + type = getChecked(eLoc, context, membersRef, name, packed, padded, kind); // If the record has a self-reference, its type already exists in a // incomplete state. In this case, we must complete it. if (mlir::cast(type).isIncomplete()) - mlir::cast(type).complete(membersRef, packed, ast); + mlir::cast(type).complete(membersRef, packed, padded, ast); } else if (!name && !incomplete) { // anonymous & complete - type = getChecked(eLoc, context, membersRef, packed, kind); + type = getChecked(eLoc, context, membersRef, packed, padded, kind); } else { // anonymous & incomplete parser.emitError(loc, "anonymous structs must be complete"); return {}; @@ -247,6 +251,9 @@ void StructType::print(mlir::AsmPrinter &printer) const { if (getPacked()) printer << "packed "; + if (getPadded()) + printer << "padded "; + if (isIncomplete()) { printer << "incomplete"; } else { @@ -266,7 +273,8 @@ void StructType::print(mlir::AsmPrinter &printer) const { mlir::LogicalResult StructType::verifyInvariants( llvm::function_ref emitError, llvm::ArrayRef members, mlir::StringAttr name, bool incomplete, - bool packed, cir::StructType::RecordKind kind, ASTRecordDeclInterface ast) { + bool packed, bool padded, cir::StructType::RecordKind kind, + ASTRecordDeclInterface ast) { if (name && name.getValue().empty()) { emitError() << "identified structs cannot have an empty name"; return mlir::failure(); @@ -276,24 +284,25 @@ mlir::LogicalResult StructType::verifyInvariants( void StructType::dropAst() { getImpl()->ast = nullptr; } StructType StructType::get(::mlir::MLIRContext *context, ArrayRef members, - StringAttr name, bool packed, RecordKind kind, - ASTRecordDeclInterface ast) { - return Base::get(context, members, name, /*incomplete=*/false, packed, kind, - ast); + StringAttr name, bool packed, bool padded, + RecordKind kind, ASTRecordDeclInterface ast) { + return Base::get(context, members, name, /*incomplete=*/false, packed, padded, + kind, ast); } StructType StructType::getChecked( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::MLIRContext *context, ArrayRef members, StringAttr name, - bool packed, RecordKind kind, ASTRecordDeclInterface ast) { + bool packed, bool padded, RecordKind kind, ASTRecordDeclInterface ast) { return Base::getChecked(emitError, context, members, name, - /*incomplete=*/false, packed, kind, ast); + /*incomplete=*/false, packed, padded, kind, ast); } StructType StructType::get(::mlir::MLIRContext *context, StringAttr name, RecordKind kind) { return Base::get(context, /*members=*/ArrayRef{}, name, - /*incomplete=*/true, /*packed=*/false, kind, + /*incomplete=*/true, /*packed=*/false, /*padded=*/false, + kind, /*ast=*/ASTRecordDeclInterface{}); } @@ -301,23 +310,23 @@ StructType StructType::getChecked( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::MLIRContext *context, StringAttr name, RecordKind kind) { return Base::getChecked(emitError, context, ArrayRef{}, name, - /*incomplete=*/true, /*packed=*/false, kind, - ASTRecordDeclInterface{}); + /*incomplete=*/true, /*packed=*/false, + /*padded=*/false, kind, ASTRecordDeclInterface{}); } StructType StructType::get(::mlir::MLIRContext *context, ArrayRef members, - bool packed, RecordKind kind, + bool packed, bool padded, RecordKind kind, ASTRecordDeclInterface ast) { return Base::get(context, members, StringAttr{}, /*incomplete=*/false, packed, - kind, ast); + padded, kind, ast); } StructType StructType::getChecked( ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError, ::mlir::MLIRContext *context, ArrayRef members, bool packed, - RecordKind kind, ASTRecordDeclInterface ast) { + bool padded, RecordKind kind, ASTRecordDeclInterface ast) { return Base::getChecked(emitError, context, members, StringAttr{}, - /*incomplete=*/false, packed, kind, ast); + /*incomplete=*/false, packed, padded, kind, ast); } ::llvm::ArrayRef StructType::getMembers() const { @@ -332,15 +341,17 @@ bool StructType::getIncomplete() const { return getImpl()->incomplete; } bool StructType::getPacked() const { return getImpl()->packed; } +bool StructType::getPadded() const { return getImpl()->padded; } + cir::StructType::RecordKind StructType::getKind() const { return getImpl()->kind; } ASTRecordDeclInterface StructType::getAst() const { return getImpl()->ast; } -void StructType::complete(ArrayRef members, bool packed, +void StructType::complete(ArrayRef members, bool packed, bool padded, ASTRecordDeclInterface ast) { - if (mutate(members, packed, ast).failed()) + if (mutate(members, packed, padded, ast).failed()) llvm_unreachable("failed to complete struct"); } @@ -481,12 +492,6 @@ StructType::getPreferredAlignment(const ::mlir::DataLayout &dataLayout, llvm_unreachable("NYI"); } -bool StructType::isPadded(const ::mlir::DataLayout &dataLayout) const { - if (!layoutInfo) - computeSizeAndAlignment(dataLayout); - return mlir::cast(layoutInfo).getPadded(); -} - uint64_t StructType::getElementOffset(const ::mlir::DataLayout &dataLayout, unsigned idx) const { assert(idx < getMembers().size() && "access not valid"); @@ -514,8 +519,13 @@ void StructType::computeSizeAndAlignment( unsigned largestMemberSize = 0; llvm::SmallVector memberOffsets; + bool dontCountLastElt = isUnion() && getPadded(); + if (dontCountLastElt) + numElements--; + // Loop over each of the elements, placing them in memory. memberOffsets.reserve(numElements); + for (unsigned i = 0, e = numElements; i != e; ++i) { auto ty = members[i]; @@ -545,7 +555,7 @@ void StructType::computeSizeAndAlignment( // Struct size up to each element is the element offset. memberOffsets.push_back(mlir::IntegerAttr::get( - mlir::IntegerType::get(getContext(), 32), structSize)); + mlir::IntegerType::get(getContext(), 32), isUnion() ? 0 : structSize)); // Consume space for this data item structSize += dataLayout.getTypeSize(ty); @@ -554,7 +564,15 @@ void StructType::computeSizeAndAlignment( // For unions, the size and aligment is that of the largest element. if (isUnion()) { structSize = largestMemberSize; - isPadded = false; + if (getPadded()) { + memberOffsets.push_back(mlir::IntegerAttr::get( + mlir::IntegerType::get(getContext(), 32), structSize)); + auto ty = getMembers()[numElements]; + structSize += dataLayout.getTypeSize(ty); + isPadded = true; + } else { + isPadded = false; + } } else { // Add padding to the end of the struct so that it could be put in an array // and all array elements would be aligned correctly. @@ -1045,7 +1063,7 @@ static mlir::Type getMethodLayoutType(mlir::MLIRContext *ctx) { auto voidPtrTy = cir::PointerType::get(cir::VoidType::get(ctx)); mlir::Type fields[2]{voidPtrTy, voidPtrTy}; return cir::StructType::get(ctx, fields, /*packed=*/false, - cir::StructType::Struct); + /*padded=*/false, cir::StructType::Struct); } llvm::TypeSize diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp index d9b574fac4e7..643d8280a20b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareX86CXXABI.cpp @@ -300,7 +300,7 @@ mlir::Value LoweringPrepareX86CXXABI::lowerVAArgX86_64( ? cast(ai.getCoerceToType()) : StructType::get( Context, {DoubleType::get(Context), DoubleType::get(Context)}, - /*packed=*/false, StructType::Struct); + /*packed=*/false, /*padded=*/false, StructType::Struct); cir::PointerType addrTy = builder.getPointerTo(ty); mlir::Value tmp = builder.createAlloca(loc, addrTy, ty, "tmp", CharUnits::fromQuantity(tyAlign)); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 11c1ed459147..6e4856c42482 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -487,7 +487,7 @@ static mlir::Type GetX86_64ByValArgumentPair(mlir::Type lo, mlir::Type hi, } auto result = StructType::get(lo.getContext(), {lo, hi}, /*packed=*/false, - StructType::Struct); + /*padded=*/false, StructType::Struct); // Verify that the second element is at an 8-byte offset. assert(td.getStructLayout(result)->getElementOffset(1) == 8 && diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3462e90ff5f4..05679d9d86dd 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4353,6 +4353,11 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, if (largestMember) llvmMembers.push_back( convertTypeForMemory(converter, dataLayout, largestMember)); + if (type.getPadded()) { + auto last = *type.getMembers().rbegin(); + llvmMembers.push_back( + convertTypeForMemory(converter, dataLayout, last)); + } break; } } diff --git a/clang/test/CIR/CodeGen/agg-init2.cpp b/clang/test/CIR/CodeGen/agg-init2.cpp index cec2d67eb648..66b3e380a010 100644 --- a/clang/test/CIR/CodeGen/agg-init2.cpp +++ b/clang/test/CIR/CodeGen/agg-init2.cpp @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -Wno-unused-value -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s -// CHECK: !ty_Zero = !cir.struct +// CHECK: !ty_Zero = !cir.struct struct Zero { void yolo(); diff --git a/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp index 56ba8a4c80ec..ba8e811151e6 100644 --- a/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp +++ b/clang/test/CIR/CodeGen/call-via-class-member-funcptr.cpp @@ -16,7 +16,7 @@ class f { const char *f::b() { return g.b(h); } void fn1() { f f1; } -// CIR: ty_a = !cir.struct +// CIR: ty_a = !cir.struct // CIR: ty_f = !cir.struct // CIR: cir.global external @h = #cir.int<0> diff --git a/clang/test/CIR/CodeGen/conditional-cleanup.cpp b/clang/test/CIR/CodeGen/conditional-cleanup.cpp index 00b08cdddce5..d1ecca2d1da4 100644 --- a/clang/test/CIR/CodeGen/conditional-cleanup.cpp +++ b/clang/test/CIR/CodeGen/conditional-cleanup.cpp @@ -23,11 +23,11 @@ namespace test7 { } } -// CIR-DAG: ![[A:.*]] = !cir.struct, !cir.ptr>, ["__retval"] {alignment = 8 : i64} diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 364f0bfc85ce..01f30de810e8 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -126,13 +126,13 @@ co_invoke_fn co_invoke; }} // namespace folly::coro -// CHECK-DAG: ![[IntTask:.*]] = !cir.struct" {!u8i}> -// CHECK-DAG: ![[VoidTask:.*]] = !cir.struct" {!u8i}> -// CHECK-DAG: ![[VoidPromisse:.*]] = !cir.struct::promise_type" {!u8i}> -// CHECK-DAG: ![[CoroHandleVoid:.*]] = !cir.struct" {!u8i}> -// CHECK-DAG: ![[CoroHandlePromise:ty_.*]] = !cir.struct::promise_type>" {!u8i}> -// CHECK-DAG: ![[StdString:.*]] = !cir.struct -// CHECK-DAG: ![[SuspendAlways:.*]] = !cir.struct +// CHECK-DAG: ![[IntTask:.*]] = !cir.struct" padded {!u8i}> +// CHECK-DAG: ![[VoidTask:.*]] = !cir.struct" padded {!u8i}> +// CHECK-DAG: ![[VoidPromisse:.*]] = !cir.struct::promise_type" padded {!u8i}> +// CHECK-DAG: ![[CoroHandleVoid:.*]] = !cir.struct" padded {!u8i}> +// CHECK-DAG: ![[CoroHandlePromise:ty_.*]] = !cir.struct::promise_type>" padded {!u8i}> +// CHECK-DAG: ![[StdString:.*]] = !cir.struct +// CHECK-DAG: ![[SuspendAlways:.*]] = !cir.struct // CHECK: module {{.*}} { // CHECK-NEXT: cir.global external @_ZN5folly4coro9co_invokeE = #cir.zero : !ty_folly3A3Acoro3A3Aco_invoke_fn diff --git a/clang/test/CIR/CodeGen/global-new.cpp b/clang/test/CIR/CodeGen/global-new.cpp index eb9ab0aee1ac..65534d59ab40 100644 --- a/clang/test/CIR/CodeGen/global-new.cpp +++ b/clang/test/CIR/CodeGen/global-new.cpp @@ -12,7 +12,7 @@ struct e { e(int); }; e *g = new e(0); -// CIR_BEFORE: ![[ty:.*]] = !cir.struct { // CIR_BEFORE: %[[GlobalAddr:.*]] = cir.get_global @g : !cir.ptr> diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 57c4a85eec0e..680cd2e122f1 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -10,7 +10,7 @@ void fn() { } // CHECK-DAG: !ty_A = !cir.struct -// CHECK: !ty_anon2E0_ = !cir.struct +// CHECK: !ty_anon2E0_ = !cir.struct // CHECK-DAG: !ty_anon2E7_ = !cir.struct // CHECK-DAG: !ty_anon2E8_ = !cir.struct}> // CHECK-DAG: module diff --git a/clang/test/CIR/CodeGen/move.cpp b/clang/test/CIR/CodeGen/move.cpp index 2a6cbc158b0e..b61488459137 100644 --- a/clang/test/CIR/CodeGen/move.cpp +++ b/clang/test/CIR/CodeGen/move.cpp @@ -16,7 +16,7 @@ struct string { } // std namespace -// CHECK: ![[StdString:ty_.*]] = !cir.struct +// CHECK: ![[StdString:ty_.*]] = !cir.struct std::string getstr(); void emplace(std::string &&s); diff --git a/clang/test/CIR/CodeGen/new-null.cpp b/clang/test/CIR/CodeGen/new-null.cpp index 4f46cbd51147..b3c99c049933 100644 --- a/clang/test/CIR/CodeGen/new-null.cpp +++ b/clang/test/CIR/CodeGen/new-null.cpp @@ -39,7 +39,7 @@ void *operator new[](size_t, void*, bool) throw(); namespace test15 { struct A { A(); ~A(); }; - // CIR-DAG: ![[TEST15A:.*]] = !cir.struct -// CIR: !ty_C = !cir.struct -// CIR: !ty_D = !cir.struct +// CIR: !ty_D = !cir.struct +// CIR-DAG: ![[VecType:.*]] = !cir.struct // CIR-DAG: ![[S1:.*]] = !cir.struct -// CIR_EH-DAG: ![[VecType:.*]] = !cir.struct +// CIR_EH-DAG: ![[VecType:.*]] = !cir.struct // CIR_EH-DAG: ![[S1:.*]] = !cir.struct template diff --git a/clang/test/CIR/CodeGen/struct.c b/clang/test/CIR/CodeGen/struct.c index 267c755e0a7e..68c54f56e98f 100644 --- a/clang/test/CIR/CodeGen/struct.c +++ b/clang/test/CIR/CodeGen/struct.c @@ -38,9 +38,13 @@ void shouldConstInitStructs(void) { // CHECK: cir.func @shouldConstInitStructs struct Foo f = {1, 2, {3, 4}}; // CHECK: %[[#V0:]] = cir.alloca !ty_Foo, !cir.ptr, ["f"] {alignment = 4 : i64} - // CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr l - // CHECK: %[[#V2:]] = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array, #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i}> : !ty_Bar}> : !ty_anon_struct - // CHECK: cir.store %[[#V2]], %[[#V1]] : !ty_anon_struct, !cir.ptr + // CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr + // CHECK: %[[#V2:]] = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i, + // CHECK-SAME: #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array, + // CHECK-SAME: #cir.const_struct<{#cir.int<3> : !s32i, #cir.int<4> : !s8i, + // CHECK-SAME: #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array}> + // CHECK-SAME: : !ty_anon_struct}> : !ty_anon_struct1 + // CHECK: cir.store %[[#V2]], %[[#V1]] : !ty_anon_struct1, !cir.ptr } // Should zero-initialize uninitialized global structs. diff --git a/clang/test/CIR/CodeGen/try-catch-dtors.cpp b/clang/test/CIR/CodeGen/try-catch-dtors.cpp index 5cf0bf0a1a0b..e212133e3e6f 100644 --- a/clang/test/CIR/CodeGen/try-catch-dtors.cpp +++ b/clang/test/CIR/CodeGen/try-catch-dtors.cpp @@ -20,10 +20,10 @@ void yo() { } } -// CIR-DAG: ![[VecTy:.*]] = !cir.struct +// CIR-DAG: ![[VecTy:.*]] = !cir.struct // CIR-DAG: ![[S1:.*]] = !cir.struct -// CIR_FLAT-DAG: ![[VecTy:.*]] = !cir.struct +// CIR_FLAT-DAG: ![[VecTy:.*]] = !cir.struct // CIR_FLAT-DAG: ![[S1:.*]] = !cir.struct // CIR: cir.scope { diff --git a/clang/test/CIR/CodeGen/union-padding.c b/clang/test/CIR/CodeGen/union-padding.c new file mode 100644 index 000000000000..8deb2890083c --- /dev/null +++ b/clang/test/CIR/CodeGen/union-padding.c @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o - | FileCheck %s + +typedef union { + short f0; + signed f1 : 11; + unsigned f2 : 2; + signed f3 : 5; +} U; + +static U g1[2] = {{65534UL}, {65534UL}}; +static short *g2[1] = {&g1[1].f0}; +static short **g3 = &g2[0]; + +short use() { + U u; + return **g3; +} +// CHECK: !ty_U = !cir.struct}> +// CHECK: !ty_anon_struct = !cir.struct}> + +// CHECK: @g3 = #cir.global_view<@g2> : !cir.ptr> +// CHECK: @g2 = #cir.const_array<[#cir.global_view<@g1, [1 : i32]> : !cir.ptr]> : !cir.array x 1> + +// CHECK: @g1 = +// CHECK-SAME: #cir.const_array<[ +// CHECK-SAME: #cir.const_struct<{#cir.int<-2> : !s16i, +// CHECK-SAME: #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array}> : !ty_anon_struct, +// CHECK-SAME: #cir.const_struct<{#cir.int<-2> : !s16i, +// CHECK-SAME: #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array}> : !ty_anon_struct +// CHECK-SAME: ]> : !cir.array + + From 713ba6ac69693677b15eb88d0f4b3adbd4c056e7 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Sat, 1 Feb 2025 00:36:37 +0100 Subject: [PATCH 2222/2301] [CIR][CIRGen][Builtin][Clang] Lowering clang::AArch64::BI__builtin_arm_ldaex (#1293) Lowering clang::AArch64::BI__builtin_arm_ldaex --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 10 ++-- ...in-arm-ldrex.c => builtin-arm-exclusive.c} | 49 +++++++++++++++++-- 2 files changed, 50 insertions(+), 9 deletions(-) rename clang/test/CIR/CodeGen/{builtin-arm-ldrex.c => builtin-arm-exclusive.c} (51%) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 3eb0dee23f7e..6dfda0068530 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2122,12 +2122,10 @@ mlir::Value CIRGenFunction::emitScalarOrConstFoldImmArg(unsigned ICEArguments, static mlir::Value emitArmLdrexNon128Intrinsic(unsigned int builtinID, const CallExpr *clangCallExpr, CIRGenFunction &cgf) { - StringRef intrinsicName; - if (builtinID == clang::AArch64::BI__builtin_arm_ldrex) { - intrinsicName = "aarch64.ldxr"; - } else { - llvm_unreachable("Unknown builtinID"); - } + StringRef intrinsicName = builtinID == clang::AArch64::BI__builtin_arm_ldrex + ? "aarch64.ldxr" + : "aarch64.ldaxr"; + // Argument mlir::Value loadAddr = cgf.emitScalarExpr(clangCallExpr->getArg(0)); // Get Instrinc call diff --git a/clang/test/CIR/CodeGen/builtin-arm-ldrex.c b/clang/test/CIR/CodeGen/builtin-arm-exclusive.c similarity index 51% rename from clang/test/CIR/CodeGen/builtin-arm-ldrex.c rename to clang/test/CIR/CodeGen/builtin-arm-exclusive.c index ba9bbb171bb7..fc35cb1dd87b 100644 --- a/clang/test/CIR/CodeGen/builtin-arm-ldrex.c +++ b/clang/test/CIR/CodeGen/builtin-arm-exclusive.c @@ -5,17 +5,18 @@ struct twoFldT { char a, b; }; // CIR: !ty_twoFldT = !cir.struct) -> !s64i -// CIR: [[CAST0:%.*]] = cir.cast(integral, [[INTRES0]] : !s64i), !s8i +// CIR: [[INTRES0:%.*]] = cir.llvm.intrinsic "aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i +// CIR: [[CAST0:%.*]] = cir.cast(integral, [[INTRES0]] : !s64i), !s8i // CIR: [[CAST_I32:%.*]] = cir.cast(integral, [[CAST0]] : !s8i), !s32i sum += __builtin_arm_ldrex((short *)addr); // CIR: [[INTRES1:%.*]] = cir.llvm.intrinsic "aarch64.ldxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i -// CIR: [[CAST1:%.*]] = cir.cast(integral, [[INTRES1]] : !s64i), !s16i +// CIR: [[CAST1:%.*]] = cir.cast(integral, [[INTRES1]] : !s64i), !s16i // CIR: [[CAST_I16:%.*]] = cir.cast(integral, [[CAST1]] : !s16i), !s32i sum += __builtin_arm_ldrex((int *)addr); @@ -44,5 +45,47 @@ int test_ldrex(char *addr, long long *addr64, float *addrfloat) { // sum += __builtin_arm_ldrex((double *)addr); + return sum; +} + +int test_ldaex(char *addr, long long *addr64, float *addrfloat) { +// CIR-LABEL: @test_ldaex + int sum = 0; + sum += __builtin_arm_ldaex(addr); +// CIR: [[INTRES0:%.*]] = cir.llvm.intrinsic "aarch64.ldaxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i +// CIR: [[CAST0:%.*]] = cir.cast(integral, [[INTRES0]] : !s64i), !s8i +// CIR: [[CAST_I32:%.*]] = cir.cast(integral, [[CAST0]] : !s8i), !s32i + + sum += __builtin_arm_ldaex((short *)addr); +// CIR: [[INTRES1:%.*]] = cir.llvm.intrinsic "aarch64.ldaxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i +// CIR: [[CAST1:%.*]] = cir.cast(integral, [[INTRES1]] : !s64i), !s16i +// CIR: [[CAST_I16:%.*]] = cir.cast(integral, [[CAST1]] : !s16i), !s32i + + sum += __builtin_arm_ldaex((int *)addr); +// CIR: [[INTRES2:%.*]] = cir.llvm.intrinsic "aarch64.ldaxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i +// CIR: [[CAST2:%.*]] = cir.cast(integral, [[INTRES2]] : !s64i), !s32i + + sum += __builtin_arm_ldaex((long long *)addr); +// CIR: [[INTRES3:%.*]] = cir.llvm.intrinsic "aarch64.ldaxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i + + sum += __builtin_arm_ldaex(addr64); +// CIR: [[INTRES4:%.*]] = cir.llvm.intrinsic "aarch64.ldaxr" {{%[0-9]+}} : (!cir.ptr) -> !s64i + + + sum += *__builtin_arm_ldaex((int **)addr); +// CIR: [[INTRES5:%.*]] = cir.llvm.intrinsic "aarch64.ldaxr" {{%[0-9]+}} : (!cir.ptr>) -> !s64i + + sum += __builtin_arm_ldaex((struct twoFldT **)addr)->a; +// CIR: [[INTRES6:%.*]] = cir.llvm.intrinsic "aarch64.ldaxr" {{%[0-9]+}} : (!cir.ptr>) -> !s64i +// CIR: [[CAST3:%.*]] = cir.cast(int_to_ptr, [[INTRES6]] : !s64i), !cir.ptr +// CIR: [[MEMBER_A:%.*]] = cir.get_member [[CAST3]][0] {name = "a"} : !cir.ptr -> !cir.ptr + + + // TODO: Uncomment next 2 lines, add tests when floating result type supported + // sum += __builtin_arm_ldaex(addrfloat); + + // sum += __builtin_arm_ldaex((double *)addr); + + return sum; } From c4e58420474090601b9419159ad3343ffb530c34 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Sat, 1 Feb 2025 00:46:47 +0100 Subject: [PATCH 2223/2301] [CIR] Fix Lowering/ptrstride.cir xfailed after rebasing (#1302) Fixing Lit test after rebasing `CIRToLLVMPtrStrideOpLowering` will not emit casting in this case because the width is equal to *layoutWidth https://github.com/llvm/clangir/blob/d329c96a56b41ad99ddffe7bd037ac4ab7476ce6/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp#L967-L999 Fixes: #1295 --- clang/test/CIR/Lowering/ptrstride.cir | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/clang/test/CIR/Lowering/ptrstride.cir b/clang/test/CIR/Lowering/ptrstride.cir index 9ab1227fdd0d..8df4d55d1322 100644 --- a/clang/test/CIR/Lowering/ptrstride.cir +++ b/clang/test/CIR/Lowering/ptrstride.cir @@ -1,6 +1,5 @@ // RUN: cir-opt %s -cir-to-llvm -o %t.mlir // RUN: FileCheck %s --input-file=%t.mlir -check-prefix=MLIR -// XFAIL: * !s32i = !cir.int !u64i = !cir.int @@ -15,6 +14,7 @@ module { %4 = cir.load %3 : !cir.ptr, !s32i cir.return } + cir.func @g(%arg0: !cir.ptr, %2 : !s32i) { %3 = cir.ptr_stride(%arg0 : !cir.ptr, %2 : !s32i), !cir.ptr cir.return @@ -38,8 +38,7 @@ module { // MLIR: llvm.return // MLIR-LABEL: @g -// MLIR: %0 = llvm.sext %arg1 : i32 to i64 -// MLIR-NEXT: llvm.getelementptr %arg0[%0] : (!llvm.ptr, i64) -> !llvm.ptr, i32 +// MLIR: llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i32) -> !llvm.ptr, i32 // MLIR-LABEL: @bool_stride // MLIR: llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, i8 From de1cd92f24ecc2124b27b7b7c51f218d32f9dfa7 Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Mon, 3 Feb 2025 08:53:23 -0800 Subject: [PATCH 2224/2301] [CIR] Implement support for zero-initialized builtin type allocation (#1304) This handles initialization of array new allocations in the simple case where the entire allocated memory block can be initialized with a memset to zero. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 ++ clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 95 ++++++++++++++++++++++++- clang/test/CIR/CodeGen/new.cpp | 19 ++++- clang/test/CIR/Lowering/new.cpp | 10 +++ 4 files changed, 125 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index dfef1c909c05..f41dcf871c6b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -535,6 +535,10 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { // Constant creation helpers // ------------------------- // + cir::ConstantOp getUInt8(uint8_t c, mlir::Location loc) { + auto uInt8Ty = getUInt8Ty(); + return create(loc, uInt8Ty, cir::IntAttr::get(uInt8Ty, c)); + } cir::ConstantOp getSInt32(int32_t c, mlir::Location loc) { auto sInt32Ty = getSInt32Ty(); return create(loc, sInt32Ty, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 4bc4866e03c7..af0bb71366b9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -876,11 +876,96 @@ void CIRGenFunction::emitNewArrayInitializer( unsigned InitListElements = 0; const Expr *Init = E->getInitializer(); + QualType::DestructionKind DtorKind = ElementType.isDestructedType(); CleanupDeactivationScope deactivation(*this); + CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType); + CharUnits ElementAlign = + BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize); + + // Attempt to perform zero-initialization using memset. + auto TryMemsetInitialization = [&]() -> bool { + auto Loc = NumElements.getLoc(); + + // FIXME: If the type is a pointer-to-data-member under the Itanium ABI, + // we can initialize with a memset to -1. + if (!CGM.getTypes().isZeroInitializable(ElementType)) + return false; + + // Optimization: since zero initialization will just set the memory + // to all zeroes, generate a single memset to do it in one shot. + + // Subtract out the size of any elements we've already initialized. + auto RemainingSize = AllocSizeWithoutCookie; + if (InitListElements) { + llvm_unreachable("NYI"); + } + + // Create the memset. + auto CastOp = + builder.createPtrBitcast(CurPtr.getPointer(), builder.getVoidTy()); + builder.createMemSet(Loc, CastOp, builder.getUInt8(0, Loc), RemainingSize); + return true; + }; + const InitListExpr *ILE = dyn_cast(Init); - if (ILE) { - llvm_unreachable("NYI"); + const CXXParenListInitExpr *CPLIE = nullptr; + const StringLiteral *SL = nullptr; + const ObjCEncodeExpr *OCEE = nullptr; + const Expr *IgnoreParen = nullptr; + if (!ILE) { + IgnoreParen = Init->IgnoreParenImpCasts(); + CPLIE = dyn_cast(IgnoreParen); + SL = dyn_cast(IgnoreParen); + OCEE = dyn_cast(IgnoreParen); + } + + // If the initializer is an initializer list, first do the explicit elements. + if (ILE || CPLIE || SL || OCEE) { + // Initializing from a (braced) string literal is a special case; the init + // list element does not initialize a (single) array element. + if ((ILE && ILE->isStringLiteralInit()) || SL || OCEE) { + llvm_unreachable("NYI"); + } + + ArrayRef InitExprs = + ILE ? ILE->inits() : CPLIE->getInitExprs(); + InitListElements = InitExprs.size(); + + // If this is a multi-dimensional array new, we will initialize multiple + // elements with each init list element. + QualType AllocType = E->getAllocatedType(); + if (const ConstantArrayType *CAT = dyn_cast_or_null( + AllocType->getAsArrayTypeUnsafe())) { + llvm_unreachable("NYI"); + } + + // Enter a partial-destruction Cleanup if necessary. + if (DtorKind) { + llvm_unreachable("NYI"); + } + + CharUnits StartAlign = CurPtr.getAlignment(); + for (const Expr *IE : InitExprs) { + llvm_unreachable("NYI"); + } + + // The remaining elements are filled with the array filler expression. + Init = ILE ? ILE->getArrayFiller() : CPLIE->getArrayFiller(); + + // Extract the initializer for the individual array elements by pulling + // out the array filler from all the nested initializer lists. This avoids + // generating a nested loop for the initialization. + while (Init && Init->getType()->isConstantArrayType()) { + auto *SubILE = dyn_cast(Init); + if (!SubILE) + break; + assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?"); + Init = SubILE->getArrayFiller(); + } + + // Switch back to initializing one base element at a time. + CurPtr = CurPtr.withElementType(BeginPtr.getElementType()); } // If all elements have already been initialized, skip any further @@ -911,6 +996,12 @@ void CIRGenFunction::emitNewArrayInitializer( llvm_unreachable("NYI"); } + // If this is value-initialization, we can usually use memset. + if (isa(Init)) { + if (TryMemsetInitialization()) + return; + llvm_unreachable("NYI"); + } llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index fae2c429dd0a..9134018d8673 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -143,4 +143,21 @@ void t_constant_size_nontrivial2() { // CHECK: %9 = cir.cast(bitcast, %8 : !cir.ptr), !cir.ptr // CHECK: cir.store %9, %0 : !cir.ptr, !cir.ptr> // CHECK: cir.return -// CHECK: } \ No newline at end of file +// CHECK: } + +void t_constant_size_memset_init() { + auto p = new int[16] {}; +} + +// In this test, NUM_ELEMENTS isn't used because no cookie is needed and there +// are no constructor calls needed. + +// CHECK: cir.func @_Z27t_constant_size_memset_initv() +// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<16> : !u64i +// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<64> : !u64i +// CHECK: %[[#ALLOC_PTR:]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %[[#ELEM_PTR:]] = cir.cast(bitcast, %[[#ALLOC_PTR]] : !cir.ptr), !cir.ptr +// CHECK: %[[#VOID_PTR:]] = cir.cast(bitcast, %[[#ELEM_PTR]] : !cir.ptr), !cir.ptr +// CHECK: %[[#ZERO:]] = cir.const #cir.int<0> : !u8i +// CHECK: %[[#ZERO_I32:]] = cir.cast(integral, %[[#ZERO]] : !u8i), !s32i +// CHECK: cir.libc.memset %[[#ALLOCATION_SIZE]] bytes from %[[#VOID_PTR]] set to %[[#ZERO_I32]] : !cir.ptr, !s32i, !u64i diff --git a/clang/test/CIR/Lowering/new.cpp b/clang/test/CIR/Lowering/new.cpp index 9c276f53dad3..c4c12531a6b7 100644 --- a/clang/test/CIR/Lowering/new.cpp +++ b/clang/test/CIR/Lowering/new.cpp @@ -57,3 +57,13 @@ void t_constant_size_nontrivial2() { // LLVM: store i64 3, ptr %[[COOKIE_PTR]], align 8 // LLVM: %[[ALLOCATED_PTR:.*]] = getelementptr i8, ptr %[[COOKIE_PTR]], i64 8 // LLVM: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8 + +void t_constant_size_memset_init() { + auto p = new int[16] {}; +} + +// LLVM: @_Z27t_constant_size_memset_initv() +// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 64) +// LLVM: call void @llvm.memset.p0.i64(ptr %[[ADDR]], i8 0, i64 64, i1 false) +// LLVM: store ptr %[[ADDR]], ptr %[[ALLOCA]], align 8 From 7c222511a8808d62b230c50895cf57c4fd47200a Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Mon, 3 Feb 2025 12:10:16 -0800 Subject: [PATCH 2225/2301] [CIR][CIRGen] Implement array cookie ABI for AppleARM64 targets (#1301) This change introduces CIRGenCXXABI subclasses for ARM and AppleARM64 and implements ARM CXXABI handling for array cookies. --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 85 +++++++++++++++++-- .../CIR/CodeGen/applearm64-array-cookies.cpp | 54 ++++++++++++ clang/test/CIR/Lowering/applearm64-new.cpp | 41 +++++++++ 3 files changed, 175 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/applearm64-array-cookies.cpp create mode 100644 clang/test/CIR/Lowering/applearm64-new.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 88861b7c544c..fbf901c47a7c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -360,6 +360,30 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { classifyRTTIUniqueness(QualType CanTy, cir::GlobalLinkageKind Linkage) const; friend class CIRGenItaniumRTTIBuilder; }; + +class CIRGenARMCXXABI : public CIRGenItaniumCXXABI { +public: + CIRGenARMCXXABI(CIRGenModule &CGM) : CIRGenItaniumCXXABI(CGM) { + // TODO(cir): When implemented, /*UseARMMethodPtrABI=*/true, + // /*UseARMGuardVarABI=*/true) {} + assert(!cir::MissingFeatures::appleArm64CXXABI()); + } + CharUnits getArrayCookieSizeImpl(QualType elementType) override; + Address initializeArrayCookie(CIRGenFunction &CGF, Address NewPtr, + mlir::Value NumElements, const CXXNewExpr *E, + QualType ElementType) override; +}; + +class CIRGenAppleARM64CXXABI : public CIRGenARMCXXABI { +public: + CIRGenAppleARM64CXXABI(CIRGenModule &CGM) : CIRGenARMCXXABI(CGM) { + Use32BitVTableOffsetABI = true; + } + + // ARM64 libraries are prepared for non-unique RTTI. + bool shouldRTTIBeUnique() const override { return false; } +}; + } // namespace CIRGenCXXABI::AddedStructorArgs CIRGenItaniumCXXABI::getImplicitConstructorArgs( @@ -404,12 +428,11 @@ CIRGenCXXABI *clang::CIRGen::CreateCIRGenItaniumCXXABI(CIRGenModule &CGM) { switch (CGM.getASTContext().getCXXABIKind()) { case TargetCXXABI::GenericItanium: case TargetCXXABI::GenericAArch64: - case TargetCXXABI::AppleARM64: - // TODO: this isn't quite right, clang uses AppleARM64CXXABI which inherits - // from ARMCXXABI. We'll have to follow suit. - assert(!cir::MissingFeatures::appleArm64CXXABI()); return new CIRGenItaniumCXXABI(CGM); + case TargetCXXABI::AppleARM64: + return new CIRGenAppleARM64CXXABI(CGM); + default: llvm_unreachable("bad or NYI ABI kind"); } @@ -2700,4 +2723,56 @@ Address CIRGenItaniumCXXABI::initializeArrayCookie(CIRGenFunction &CGF, auto OffsetOp = CGF.getBuilder().getSignedInt(Loc, Offset, /*width=*/32); auto DataPtr = CGF.getBuilder().createPtrStride(Loc, CastOp, OffsetOp); return Address(DataPtr, NewPtr.getType(), NewPtr.getAlignment()); -} \ No newline at end of file +} + +CharUnits CIRGenARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { + // ARM says that the cookie is always: + // struct array_cookie { + // std::size_t element_size; // element_size != 0 + // std::size_t element_count; + // }; + // But the base ABI doesn't give anything an alignment greater than + // 8, so we can dismiss this as typical ABI-author blindness to + // actual language complexity and round up to the element alignment. + return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes), + getContext().getTypeAlignInChars(elementType)); +} + +Address CIRGenARMCXXABI::initializeArrayCookie(CIRGenFunction &cgf, + Address newPtr, + mlir::Value numElements, + const CXXNewExpr *expr, + QualType elementType) { + assert(requiresArrayCookie(expr)); + + // The cookie is always at the start of the buffer. + auto cookiePtr = + cgf.getBuilder().createPtrBitcast(newPtr.getPointer(), cgf.SizeTy); + Address cookie = Address(cookiePtr, cgf.SizeTy, newPtr.getAlignment()); + + ASTContext &ctx = getContext(); + CharUnits sizeSize = cgf.getSizeSize(); + mlir::Location loc = cgf.getLoc(expr->getSourceRange()); + + // The first element is the element size. + mlir::Value elementSize = cgf.getBuilder().getConstInt( + loc, cgf.SizeTy, ctx.getTypeSizeInChars(elementType).getQuantity()); + cgf.getBuilder().createStore(loc, elementSize, cookie); + + // The second element is the element count. + auto offsetOp = cgf.getBuilder().getSignedInt(loc, 1, /*width=*/32); + auto dataPtr = + cgf.getBuilder().createPtrStride(loc, cookie.getPointer(), offsetOp); + cookie = Address(dataPtr, cgf.SizeTy, newPtr.getAlignment()); + cgf.getBuilder().createStore(loc, numElements, cookie); + + // Finally, compute a pointer to the actual data buffer by skipping + // over the cookie completely. + CharUnits cookieSize = CIRGenARMCXXABI::getArrayCookieSizeImpl(elementType); + offsetOp = cgf.getBuilder().getSignedInt(loc, cookieSize.getQuantity(), + /*width=*/32); + auto castOp = cgf.getBuilder().createPtrBitcast( + newPtr.getPointer(), cgf.getBuilder().getUIntNTy(8)); + dataPtr = cgf.getBuilder().createPtrStride(loc, castOp, offsetOp); + return Address(dataPtr, newPtr.getType(), newPtr.getAlignment()); +} diff --git a/clang/test/CIR/CodeGen/applearm64-array-cookies.cpp b/clang/test/CIR/CodeGen/applearm64-array-cookies.cpp new file mode 100644 index 000000000000..d19b93f173cf --- /dev/null +++ b/clang/test/CIR/CodeGen/applearm64-array-cookies.cpp @@ -0,0 +1,54 @@ +// RUN: %clang_cc1 -std=c++20 -triple=arm64e-apple-darwin -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +class C { + public: + ~C(); +}; + +void t_constant_size_nontrivial() { + auto p = new C[3]; +} + +// CHECK: cir.func @_Z26t_constant_size_nontrivialv() +// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<3> : !u64i +// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<3> : !u64i +// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<19> : !u64i +// CHECK: %[[#ALLOC_PTR:]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %[[#COOKIE_PTR:]] = cir.cast(bitcast, %[[#ALLOC_PTR]] : !cir.ptr), !cir.ptr +// CHECK: %[[#ELEMENT_SIZE:]] = cir.const #cir.int<1> : !u64i +// CHECK: cir.store %[[#ELEMENT_SIZE]], %[[#COOKIE_PTR]] : !u64i, !cir.ptr +// CHECK: %[[#SECOND_COOKIE_OFFSET:]] = cir.const #cir.int<1> : !s32i +// CHECK: %[[#COOKIE_PTR2:]] = cir.ptr_stride(%[[#COOKIE_PTR]] : !cir.ptr, %[[#SECOND_COOKIE_OFFSET]] : !s32i), !cir.ptr +// CHECK: cir.store %[[#NUM_ELEMENTS]], %[[#COOKIE_PTR2]] : !u64i, !cir.ptr +// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<16> : !s32i +// CHECK: %[[#ALLOC_AS_I8:]] = cir.cast(bitcast, %[[#ALLOC_PTR]] : !cir.ptr), !cir.ptr +// CHECK: cir.ptr_stride(%[[#ALLOC_AS_I8]] : !cir.ptr, %[[#COOKIE_SIZE]] : !s32i), !cir.ptr + +class D { + public: + int x; + ~D(); +}; + +void t_constant_size_nontrivial2() { + auto p = new D[3]; +} + +// In this test SIZE_WITHOUT_COOKIE isn't used, but it would be if there were +// an initializer. + +// CHECK: cir.func @_Z27t_constant_size_nontrivial2v() +// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<3> : !u64i +// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<12> : !u64i +// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<28> : !u64i +// CHECK: %[[#ALLOC_PTR:]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %[[#COOKIE_PTR:]] = cir.cast(bitcast, %[[#ALLOC_PTR]] : !cir.ptr), !cir.ptr +// CHECK: %[[#ELEMENT_SIZE:]] = cir.const #cir.int<4> : !u64i +// CHECK: cir.store %[[#ELEMENT_SIZE]], %[[#COOKIE_PTR]] : !u64i, !cir.ptr +// CHECK: %[[#SECOND_COOKIE_OFFSET:]] = cir.const #cir.int<1> : !s32i +// CHECK: %[[#COOKIE_PTR2:]] = cir.ptr_stride(%[[#COOKIE_PTR]] : !cir.ptr, %[[#SECOND_COOKIE_OFFSET]] : !s32i), !cir.ptr +// CHECK: cir.store %[[#NUM_ELEMENTS]], %[[#COOKIE_PTR2]] : !u64i, !cir.ptr +// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<16> : !s32i +// CHECK: %[[#ALLOC_AS_I8:]] = cir.cast(bitcast, %[[#ALLOC_PTR]] : !cir.ptr), !cir.ptr +// CHECK: cir.ptr_stride(%[[#ALLOC_AS_I8]] : !cir.ptr, %[[#COOKIE_SIZE]] : !s32i), !cir.ptr diff --git a/clang/test/CIR/Lowering/applearm64-new.cpp b/clang/test/CIR/Lowering/applearm64-new.cpp new file mode 100644 index 000000000000..b72995d02477 --- /dev/null +++ b/clang/test/CIR/Lowering/applearm64-new.cpp @@ -0,0 +1,41 @@ +// RUN: %clang_cc1 -triple=arm64e-apple-darwin -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +class C { + public: + ~C(); +}; + +void t_constant_size_nontrivial() { + auto p = new C[3]; +} + +// Note: The below differs from the IR emitted by clang without -fclangir in +// several respects. (1) The alloca here has an extra "i64 1" +// (2) The operator new call is missing "noalias noundef nonnull" on +// the call and "noundef" on the argument, (3) The getelementptr is +// missing "inbounds" + +// LLVM: @_Z26t_constant_size_nontrivialv() +// LLVM: %[[COOKIE_PTR:.*]] = call ptr @_Znam(i64 19) +// LLVM: store i64 1, ptr %[[COOKIE_PTR]], align 8 +// LLVM: %[[NUM_ELEMENTS_PTR:.*]] = getelementptr i64, ptr %[[COOKIE_PTR]], i64 1 +// LLVM: store i64 3, ptr %[[NUM_ELEMENTS_PTR]], align 8 +// LLVM: %[[ALLOCATED_PTR:.*]] = getelementptr i8, ptr %[[COOKIE_PTR]], i64 16 + +class D { + public: + int x; + ~D(); +}; + +void t_constant_size_nontrivial2() { + auto p = new D[3]; +} + +// LLVM: @_Z27t_constant_size_nontrivial2v() +// LLVM: %[[COOKIE_PTR:.*]] = call ptr @_Znam(i64 28) +// LLVM: store i64 4, ptr %[[COOKIE_PTR]], align 8 +// LLVM: %[[NUM_ELEMENTS_PTR:.*]] = getelementptr i64, ptr %[[COOKIE_PTR]], i64 1 +// LLVM: store i64 3, ptr %[[NUM_ELEMENTS_PTR]], align 8 +// LLVM: %[[ALLOCATED_PTR:.*]] = getelementptr i8, ptr %[[COOKIE_PTR]], i64 16 From f8821e8521686749ab43015cd7a5b2f863b9943d Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Mon, 3 Feb 2025 16:48:16 -0800 Subject: [PATCH 2226/2301] [CIR] Implement partial initialization for array new (#1307) This implements CIR generation for the case where an array is allocated with array new and a partial initialization list is provided. --- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 28 +++++++++++++++++++++++-- clang/test/CIR/CodeGen/new.cpp | 28 +++++++++++++++++++++++++ clang/test/CIR/Lowering/new.cpp | 16 ++++++++++++++ 3 files changed, 70 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index af0bb71366b9..15dc33f2df64 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "clang/AST/CharUnits.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" #include "clang/CIR/MissingFeatures.h" #include @@ -876,6 +877,7 @@ void CIRGenFunction::emitNewArrayInitializer( unsigned InitListElements = 0; const Expr *Init = E->getInitializer(); + Address EndOfInit = Address::invalid(); QualType::DestructionKind DtorKind = ElementType.isDestructedType(); CleanupDeactivationScope deactivation(*this); @@ -898,7 +900,13 @@ void CIRGenFunction::emitNewArrayInitializer( // Subtract out the size of any elements we've already initialized. auto RemainingSize = AllocSizeWithoutCookie; if (InitListElements) { - llvm_unreachable("NYI"); + // We know this can't overflow; we check this when doing the allocation. + unsigned InitializedSize = + getContext().getTypeSizeInChars(ElementType).getQuantity() * + InitListElements; + auto InitSizeOp = + builder.getConstInt(Loc, RemainingSize.getType(), InitializedSize); + RemainingSize = builder.createSub(RemainingSize, InitSizeOp); } // Create the memset. @@ -946,8 +954,24 @@ void CIRGenFunction::emitNewArrayInitializer( } CharUnits StartAlign = CurPtr.getAlignment(); + unsigned i = 0; for (const Expr *IE : InitExprs) { - llvm_unreachable("NYI"); + if (EndOfInit.isValid()) { + // This will involve DTor handling. + llvm_unreachable("NYI"); + } + // FIXME: If the last initializer is an incomplete initializer list for + // an array, and we have an array filler, we can fold together the two + // initialization loops. + StoreAnyExprIntoOneUnit(*this, IE, IE->getType(), CurPtr, + AggValueSlot::DoesNotOverlap); + auto Loc = getLoc(IE->getExprLoc()); + auto CastOp = builder.createPtrBitcast(CurPtr.getPointer(), + convertTypeForMem(AllocType)); + auto OffsetOp = builder.getSignedInt(Loc, 1, /*width=*/32); + auto DataPtr = builder.createPtrStride(Loc, CastOp, OffsetOp); + CurPtr = Address(DataPtr, CurPtr.getType(), + StartAlign.alignmentAtOffset((++i) * ElementSize)); } // The remaining elements are filled with the array filler expression. diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index 9134018d8673..2750929832e0 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -161,3 +161,31 @@ void t_constant_size_memset_init() { // CHECK: %[[#ZERO:]] = cir.const #cir.int<0> : !u8i // CHECK: %[[#ZERO_I32:]] = cir.cast(integral, %[[#ZERO]] : !u8i), !s32i // CHECK: cir.libc.memset %[[#ALLOCATION_SIZE]] bytes from %[[#VOID_PTR]] set to %[[#ZERO_I32]] : !cir.ptr, !s32i, !u64i + +void t_constant_size_partial_init() { + auto p = new int[16] { 1, 2, 3 }; +} + +// CHECK: cir.func @_Z28t_constant_size_partial_initv() +// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<16> : !u64i +// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<64> : !u64i +// CHECK: %[[#ALLOC_PTR:]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %[[#ELEM_0_PTR:]] = cir.cast(bitcast, %[[#ALLOC_PTR]] : !cir.ptr), !cir.ptr +// CHECK: %[[#CONST_ONE:]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[#CONST_ONE]], %[[#ELEM_0_PTR]] : !s32i, !cir.ptr +// CHECK: %[[#OFFSET:]] = cir.const #cir.int<1> : !s32i +// CHECK: %[[#ELEM_1_PTR:]] = cir.ptr_stride(%[[#ELEM_0_PTR]] : !cir.ptr, %[[#OFFSET]] : !s32i), !cir.ptr +// CHECK: %[[#CONST_TWO:]] = cir.const #cir.int<2> : !s32i +// CHECK: cir.store %[[#CONST_TWO]], %[[#ELEM_1_PTR]] : !s32i, !cir.ptr +// CHECK: %[[#OFFSET1:]] = cir.const #cir.int<1> : !s32i +// CHECK: %[[#ELEM_2_PTR:]] = cir.ptr_stride(%[[#ELEM_1_PTR]] : !cir.ptr, %[[#OFFSET1]] : !s32i), !cir.ptr +// CHECK: %[[#CONST_THREE:]] = cir.const #cir.int<3> : !s32i +// CHECK: cir.store %[[#CONST_THREE]], %[[#ELEM_2_PTR]] : !s32i, !cir.ptr +// CHECK: %[[#OFFSET2:]] = cir.const #cir.int<1> : !s32i +// CHECK: %[[#ELEM_3_PTR:]] = cir.ptr_stride(%[[#ELEM_2_PTR]] : !cir.ptr, %[[#OFFSET2]] : !s32i), !cir.ptr +// CHECK: %[[#INIT_SIZE:]] = cir.const #cir.int<12> : !u64i +// CHECK: %[[#REMAINING_SIZE:]] = cir.binop(sub, %[[#ALLOCATION_SIZE]], %[[#INIT_SIZE]]) : !u64i +// CHECK: %[[#VOID_PTR:]] = cir.cast(bitcast, %[[#ELEM_3_PTR]] : !cir.ptr), !cir.ptr +// CHECK: %[[#ZERO:]] = cir.const #cir.int<0> : !u8i +// CHECK: %[[#ZERO_I32:]] = cir.cast(integral, %[[#ZERO]] : !u8i), !s32i +// CHECK: cir.libc.memset %[[#REMAINING_SIZE]] bytes from %[[#VOID_PTR]] set to %[[#ZERO_I32]] : !cir.ptr, !s32i, !u64i diff --git a/clang/test/CIR/Lowering/new.cpp b/clang/test/CIR/Lowering/new.cpp index c4c12531a6b7..bff825323e9e 100644 --- a/clang/test/CIR/Lowering/new.cpp +++ b/clang/test/CIR/Lowering/new.cpp @@ -67,3 +67,19 @@ void t_constant_size_memset_init() { // LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 64) // LLVM: call void @llvm.memset.p0.i64(ptr %[[ADDR]], i8 0, i64 64, i1 false) // LLVM: store ptr %[[ADDR]], ptr %[[ALLOCA]], align 8 + +void t_constant_size_partial_init() { + auto p = new int[16] { 1, 2, 3 }; +} + +// LLVM: @_Z28t_constant_size_partial_initv() +// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 64) +// LLVM: store i32 1, ptr %[[ADDR]], align 4 +// LLVM: %[[ELEM_1_PTR:.*]] = getelementptr i32, ptr %[[ADDR]], i64 1 +// LLVM: store i32 2, ptr %[[ELEM_1_PTR]], align 4 +// LLVM: %[[ELEM_2_PTR:.*]] = getelementptr i32, ptr %[[ELEM_1_PTR]], i64 1 +// LLVM: store i32 3, ptr %[[ELEM_2_PTR]], align 4 +// LLVM: %[[ELEM_3_PTR:.*]] = getelementptr i32, ptr %[[ELEM_2_PTR]], i64 1 +// LLVM: call void @llvm.memset.p0.i64(ptr %[[ELEM_3_PTR]], i8 0, i64 52, i1 false) +// LLVM: store ptr %[[ADDR]], ptr %[[ALLOCA]], align 8 From fee4bb6fb4c7b5a64d37ae56c55578276718a3bb Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 5 Feb 2025 04:41:27 +0300 Subject: [PATCH 2227/2301] [CIR][Codegen] Fixes global variables that point to another globals (#1277) Testing CIR with `csmith` is in progress! And this PR is not a small one, so I need to apologize. But looks like there is a problem that may affect the run time. ### Problem Consider the next code: ``` #include typedef struct { int f0 : 24; int f1; int f2; } S; static S g1 = {2799, 9, 123}; static int *g2 = &g1.f2; int main() { printf("check: %d\n",*g2); return 0; } ``` This program dumps anything but not `123`. So basically we don't support global variables that refer to another globals in the case of aggregate types. This PR fixes global views for two cases: structs and array of structs (see the tests). There is an issue with unions too, but I think we will discuss it later, once we will agree on the approach ### Some details For the example above, `g1` variable is created in two steps: 1) The variable is created with the `S` type. In our case it's `!cir.struct x 3>, !cir.int, !cir.int}`(which differs from the OG btw, and may be I need to take a look at the bit fields again - but it's not a problem right now) 2) For the left side we create anon structure of type ` !cir.struct, !cir.int, !cir.int, !cir.int, !cir.int, !cir.int}>` which is the same as in OG and then `g1` is replaced with the new type. Basically the same happens in the OG. But then the `replaceAllUsesWith` solves all the possible problems. In our case we can not do it this easy. The `g2` is: ``` cir.global @g2 = #cir.global_view<@g1, [2 : i32]> : !cir.ptr ``` The problem is in the indexes! After `g1` is replaced, the index in `g2`'s `GlobalViewAttr`still points to the old type!!! So we have to create a new `GlobalViewAttr` with new indexes! ### Solution My solution is based on the `computeGlobalViewIndicesFromFlatOffset` function from `CIRGenBuilder` that is basically a mapping from indexes to offset. I compute an offset and then map it back to the indexes with for the new type. May be there is a better solution though, have some ideas? #### Implementation details Most of the changes are in the `CIRGenModule` where we do the replacement. Also, there are some changes in `CIRGenBuilder` - I moved `computeGlobalViewIndicesFromFlatOffset` to `cpp` with no changes and added the opposite function `computeOffsetFromGlobalViewIndices`. One more fix is more important - is about `GlobalViewAttr` indexes generation for vtable. I suggest we don't set the first index to zero in CIR and add it in the lowering - in this case we can do it uniformly as ``` if (isa(sourceType)) indices.push_back(0); ``` The previous approach was not completely correct - we have to add the leading zero index for the anon structures as well: ``` if (stTy.isIdentified()) indices.push_back(0); // Wrong ``` Thus, there are some changes in tests as well. It's not an unrelated issue - we have to fix it - either now or as a separated PR that should come before this one. Also, I added tests with the covered cases with the original LLVM IR, just for comparison - so there are some divergences as well. Finally - this code was already tested with `csmith` and I don't have any problems so far. Once we'll fix unions I will have more information. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 24 ++++ clang/lib/CIR/CodeGen/CIRGenBuilder.cpp | 71 ++++++++++ clang/lib/CIR/CodeGen/CIRGenBuilder.h | 63 +++------ clang/lib/CIR/CodeGen/CIRGenExprConst.cpp | 1 - clang/lib/CIR/CodeGen/CIRGenModule.cpp | 81 ++++++++++- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 3 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 6 +- clang/test/CIR/CodeGen/globals-ref-globals.c | 126 ++++++++++++++++++ clang/test/CIR/CodeGen/union-padding.c | 2 +- clang/test/CIR/CodeGen/vbase.cpp | 2 +- clang/test/CIR/CodeGen/vtable-emission.cpp | 2 +- clang/test/CIR/CodeGen/vtt.cpp | 2 +- 12 files changed, 322 insertions(+), 61 deletions(-) create mode 100644 clang/test/CIR/CodeGen/globals-ref-globals.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 3e12a9307807..298d06805c54 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -622,6 +622,30 @@ def GlobalViewAttr : CIR_Attr<"GlobalView", "global_view", [TypedAttrInterface]> cir.global external @elt_ptr = #cir.global_view<@rgb, [1]> : !cir.ptr cir.global external @table_of_ptrs = #cir.const_array<[#cir.global_view<@rgb, [1]> : !cir.ptr] : !cir.array x 1>> ``` + + Note, that unlike LLVM IR's gep instruction, CIR doesn't add the leading zero index + when it's known to be constant zero, e.g. for pointers, i.e. we use indexes exactly + to access sub elements or for the offset. The leading zero index is added later in + the lowering. + + Example: + ``` + struct A { + int a; + }; + + struct B: virtual A { + int b; + }; + ``` + VTT for B: + ``` + cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [0 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 1> + ``` + The same for LLVM IR after CIR: + ``` + @_ZTT1B = linkonce_odr global [1 x ptr] [ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV1B, i32 0, i32 0, i32 3)], align 8 + ``` }]; let parameters = (ins AttributeSelfTypeParameter<"">:$type, diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp index a50cefe34c79..905ca67aafd5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp @@ -63,3 +63,74 @@ cir::ConstantOp CIRGenBuilderTy::getConstInt(mlir::Location loc, mlir::Type t, assert(intTy && "expected cir::IntType"); return create(loc, intTy, cir::IntAttr::get(t, C)); } + +void CIRGenBuilderTy::computeGlobalViewIndicesFromFlatOffset( + int64_t Offset, mlir::Type Ty, cir::CIRDataLayout Layout, + llvm::SmallVectorImpl &Indices) { + if (!Offset) + return; + + mlir::Type SubType; + + auto getIndexAndNewOffset = + [](int64_t Offset, int64_t EltSize) -> std::pair { + int64_t DivRet = Offset / EltSize; + if (DivRet < 0) + DivRet -= 1; // make sure offset is positive + int64_t ModRet = Offset - (DivRet * EltSize); + return {DivRet, ModRet}; + }; + + if (auto ArrayTy = mlir::dyn_cast(Ty)) { + int64_t EltSize = Layout.getTypeAllocSize(ArrayTy.getEltType()); + SubType = ArrayTy.getEltType(); + const auto [Index, NewOffset] = getIndexAndNewOffset(Offset, EltSize); + Indices.push_back(Index); + Offset = NewOffset; + } else if (auto StructTy = mlir::dyn_cast(Ty)) { + auto Elts = StructTy.getMembers(); + int64_t Pos = 0; + for (size_t I = 0; I < Elts.size(); ++I) { + int64_t EltSize = + (int64_t)Layout.getTypeAllocSize(Elts[I]).getFixedValue(); + unsigned AlignMask = Layout.getABITypeAlign(Elts[I]).value() - 1; + if (StructTy.getPacked()) + AlignMask = 0; + Pos = (Pos + AlignMask) & ~AlignMask; + assert(Offset >= 0); + if (Offset < Pos + EltSize) { + Indices.push_back(I); + SubType = Elts[I]; + Offset -= Pos; + break; + } + Pos += EltSize; + } + } else { + llvm_unreachable("unexpected type"); + } + + assert(SubType); + computeGlobalViewIndicesFromFlatOffset(Offset, SubType, Layout, Indices); +} + +uint64_t CIRGenBuilderTy::computeOffsetFromGlobalViewIndices( + const cir::CIRDataLayout &layout, mlir::Type typ, + llvm::ArrayRef indexes) { + + uint64_t offset = 0; + for (auto idx : indexes) { + if (auto sTy = dyn_cast(typ)) { + offset += sTy.getElementOffset(layout.layout, idx); + assert(idx < sTy.getMembers().size()); + typ = sTy.getMembers()[idx]; + } else if (auto arTy = dyn_cast(typ)) { + typ = arTy.getEltType(); + offset += layout.getTypeAllocSize(typ) * idx; + } else { + llvm_unreachable("NYI"); + } + } + + return offset; +} \ No newline at end of file diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index f41dcf871c6b..3a8a3955d7f2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -146,6 +146,20 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return cir::GlobalViewAttr::get(type, symbol, indices); } + cir::GlobalViewAttr getGlobalViewAttr(cir::PointerType type, + cir::GlobalOp globalOp, + llvm::ArrayRef indices) { + llvm::SmallVector attrs; + for (auto ind : indices) { + auto a = + mlir::IntegerAttr::get(mlir::IntegerType::get(getContext(), 64), ind); + attrs.push_back(a); + } + + mlir::ArrayAttr arAttr = mlir::ArrayAttr::get(getContext(), attrs); + return getGlobalViewAttr(type, globalOp, arAttr); + } + mlir::Attribute getString(llvm::StringRef str, mlir::Type eltTy, unsigned size = 0) { unsigned finalSize = size ? size : str.size(); @@ -941,51 +955,12 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { // yet, return them. void computeGlobalViewIndicesFromFlatOffset( int64_t Offset, mlir::Type Ty, cir::CIRDataLayout Layout, - llvm::SmallVectorImpl &Indices) { - if (!Offset) - return; - - mlir::Type SubType; - - auto getIndexAndNewOffset = - [](int64_t Offset, int64_t EltSize) -> std::pair { - int64_t DivRet = Offset / EltSize; - if (DivRet < 0) - DivRet -= 1; // make sure offset is positive - int64_t ModRet = Offset - (DivRet * EltSize); - return {DivRet, ModRet}; - }; - - if (auto ArrayTy = mlir::dyn_cast(Ty)) { - int64_t EltSize = Layout.getTypeAllocSize(ArrayTy.getEltType()); - SubType = ArrayTy.getEltType(); - auto const [Index, NewOffset] = getIndexAndNewOffset(Offset, EltSize); - Indices.push_back(Index); - Offset = NewOffset; - } else if (auto StructTy = mlir::dyn_cast(Ty)) { - auto Elts = StructTy.getMembers(); - int64_t Pos = 0; - for (size_t I = 0; I < Elts.size(); ++I) { - int64_t EltSize = - (int64_t)Layout.getTypeAllocSize(Elts[I]).getFixedValue(); - unsigned AlignMask = Layout.getABITypeAlign(Elts[I]).value() - 1; - Pos = (Pos + AlignMask) & ~AlignMask; - assert(Offset >= 0); - if (Offset < Pos + EltSize) { - Indices.push_back(I); - SubType = Elts[I]; - Offset -= Pos; - break; - } - Pos += EltSize; - } - } else { - llvm_unreachable("unexpected type"); - } + llvm::SmallVectorImpl &Indices); - assert(SubType); - computeGlobalViewIndicesFromFlatOffset(Offset, SubType, Layout, Indices); - } + // Convert high-level indices (e.g. from GlobalViewAttr) to byte offset + uint64_t computeOffsetFromGlobalViewIndices(const cir::CIRDataLayout &layout, + mlir::Type t, + llvm::ArrayRef indexes); cir::StackSaveOp createStackSave(mlir::Location loc, mlir::Type ty) { return create(loc, ty); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index c2955d7ad3fa..f0ee6333c53a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -756,7 +756,6 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD, .getAddressPoint(BaseSubobject(CD, Offset)); assert(!cir::MissingFeatures::ptrAuth()); mlir::ArrayAttr indices = builder.getArrayAttr({ - builder.getI32IntegerAttr(0), builder.getI32IntegerAttr(addressPoint.VTableIndex), builder.getI32IntegerAttr(addressPoint.AddressPointIndex), }); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c93a145f35ce..521c41aa4504 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -9,9 +9,7 @@ // This is the internal per-translation-unit state used for CIR translation. // //===----------------------------------------------------------------------===// - #include "CIRGenModule.h" - #include "CIRGenCXXABI.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" @@ -797,9 +795,75 @@ void CIRGenModule::setNonAliasAttributes(GlobalDecl GD, mlir::Operation *GO) { assert(!cir::MissingFeatures::setTargetAttributes()); } +static llvm::SmallVector indexesOfArrayAttr(mlir::ArrayAttr indexes) { + llvm::SmallVector inds; + + for (mlir::Attribute i : indexes) { + auto ind = dyn_cast(i); + assert(ind && "expect MLIR integer attribute"); + inds.push_back(ind.getValue().getSExtValue()); + } + + return inds; +} + +static bool isViewOnGlobal(GlobalOp glob, GlobalViewAttr view) { + return view.getSymbol().getValue() == glob.getSymName(); +} + +static GlobalViewAttr createNewGlobalView(CIRGenModule &CGM, GlobalOp newGlob, + GlobalViewAttr attr, + mlir::Type oldTy) { + if (!attr.getIndices() || !isViewOnGlobal(newGlob, attr)) + return attr; + + llvm::SmallVector oldInds = indexesOfArrayAttr(attr.getIndices()); + llvm::SmallVector newInds; + CIRGenBuilderTy &bld = CGM.getBuilder(); + const CIRDataLayout &layout = CGM.getDataLayout(); + mlir::MLIRContext *ctxt = bld.getContext(); + auto newTy = newGlob.getSymType(); + + auto offset = bld.computeOffsetFromGlobalViewIndices(layout, oldTy, oldInds); + bld.computeGlobalViewIndicesFromFlatOffset(offset, newTy, layout, newInds); + cir::PointerType newPtrTy; + + if (isa(oldTy)) + newPtrTy = cir::PointerType::get(ctxt, newTy); + else if (cir::ArrayType oldArTy = dyn_cast(oldTy)) + newPtrTy = dyn_cast(attr.getType()); + + if (newPtrTy) + return bld.getGlobalViewAttr(newPtrTy, newGlob, newInds); + + llvm_unreachable("NYI"); +} + +static mlir::Attribute getNewInitValue(CIRGenModule &CGM, GlobalOp newGlob, + mlir::Type oldTy, GlobalOp user, + mlir::Attribute oldInit) { + if (auto oldView = mlir::dyn_cast(oldInit)) { + return createNewGlobalView(CGM, newGlob, oldView, oldTy); + } else if (auto oldArray = mlir::dyn_cast(oldInit)) { + llvm::SmallVector newArray; + auto eltsAttr = dyn_cast(oldArray.getElts()); + for (auto elt : eltsAttr) { + if (auto view = dyn_cast(elt)) + newArray.push_back(createNewGlobalView(CGM, newGlob, view, oldTy)); + else if (auto view = dyn_cast(elt)) + newArray.push_back(getNewInitValue(CGM, newGlob, oldTy, user, elt)); + } + + auto &builder = CGM.getBuilder(); + mlir::Attribute ar = mlir::ArrayAttr::get(builder.getContext(), newArray); + return builder.getConstArray(ar, cast(oldArray.getType())); + } else { + llvm_unreachable("NYI"); + } +} + void CIRGenModule::replaceGlobal(cir::GlobalOp Old, cir::GlobalOp New) { assert(Old.getSymName() == New.getSymName() && "symbol names must match"); - // If the types does not match, update all references to Old to the new type. auto OldTy = Old.getSymType(); auto NewTy = New.getSymType(); @@ -809,6 +873,7 @@ void CIRGenModule::replaceGlobal(cir::GlobalOp Old, cir::GlobalOp New) { if (oldAS != newAS) { llvm_unreachable("NYI"); } + if (OldTy != NewTy) { auto OldSymUses = Old.getSymbolUses(theModule.getOperation()); if (OldSymUses.has_value()) { @@ -823,11 +888,16 @@ void CIRGenModule::replaceGlobal(cir::GlobalOp Old, cir::GlobalOp New) { cir::PointerType::get(&getMLIRContext(), NewTy)); mlir::OpBuilder::InsertionGuard guard(builder); - builder.setInsertionPointAfter(UserOp); + builder.setInsertionPointAfter(GGO); mlir::Type ptrTy = builder.getPointerTo(OldTy); mlir::Value cast = builder.createBitcast(GGO->getLoc(), UseOpResultValue, ptrTy); UseOpResultValue.replaceAllUsesExcept(cast, cast.getDefiningOp()); + } else if (auto glob = dyn_cast(UserOp)) { + if (auto init = glob.getInitialValue()) { + auto nw = getNewInitValue(*this, New, OldTy, glob, init.value()); + glob.setInitialValueAttr(nw); + } } } } @@ -1083,7 +1153,8 @@ CIRGenModule::getAddrOfGlobalVarAttr(const VarDecl *D, mlir::Type Ty, Ty = getTypes().convertTypeForMem(ASTTy); auto globalOp = getOrCreateCIRGlobal(D, Ty, IsForDefinition); - return builder.getGlobalViewAttr(builder.getPointerTo(Ty), globalOp); + auto ptrTy = builder.getPointerTo(globalOp.getSymType()); + return builder.getGlobalViewAttr(ptrTy, globalOp); } mlir::Operation *CIRGenModule::getWeakRefReference(const ValueDecl *VD) { diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 2fa51b534da4..81f7971ac787 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -622,8 +622,7 @@ void CIRGenVTables::emitVTTDefinition(cir::GlobalOp VTT, "Did not find ctor vtable address point!"); } - mlir::Attribute Idxs[3] = { - CGM.getBuilder().getI32IntegerAttr(0), + mlir::Attribute Idxs[2] = { CGM.getBuilder().getI32IntegerAttr(AddressPoint.VTableIndex), CGM.getBuilder().getI32IntegerAttr(AddressPoint.AddressPointIndex), }; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 05679d9d86dd..0331122f35d2 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -680,12 +680,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::GlobalViewAttr globalAttr, if (globalAttr.getIndices()) { llvm::SmallVector indices; - if (auto stTy = dyn_cast(sourceType)) { - if (stTy.isIdentified()) - indices.push_back(0); - } else if (isa(sourceType)) { + if (isa(sourceType)) indices.push_back(0); - } for (auto idx : globalAttr.getIndices()) { auto intAttr = dyn_cast(idx); diff --git a/clang/test/CIR/CodeGen/globals-ref-globals.c b/clang/test/CIR/CodeGen/globals-ref-globals.c new file mode 100644 index 000000000000..8343153e3e8e --- /dev/null +++ b/clang/test/CIR/CodeGen/globals-ref-globals.c @@ -0,0 +1,126 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM + +typedef struct { + int f0 : 24; + int f1; + int f2; +} S; + +static S g1 = {2799, 9, 123}; +static int *g2[4] = {&g1.f1, &g1.f1, &g1.f1, &g1.f1}; +static int **g3 = &g2[1]; +static int ***g4 = &g3; +static int ****g5 = &g4; + +static S g6[2] = {{2799, 9, 123}, {2799, 9, 123}}; +static int *g7[2] = {&g6[0].f2, &g6[1].f2}; +static int **g8 = &g7[1]; + +// CHECK-DAG: !ty_anon_struct = !cir.struct +// CHECK-DAG: !ty_anon_struct1 = !cir.struct, !s32i}> +// CHECK-DAG: !ty_anon_struct2 = !cir.struct +// CHECK-DAG: !ty_anon_struct3 = !cir.struct, !s32i, !s8i, !cir.array}> + +// CHECK-DAG: g1 = #cir.const_struct<{#cir.int<239> : !u8i, #cir.int<10> : !u8i, #cir.int<0> : !u8i, #cir.zero : !u8i, #cir.int<9> : !s32i, #cir.int<123> : !s32i}> : !ty_anon_struct +// CHECK-DAG: g2 = #cir.const_array<[#cir.global_view<@g1, [4]> : !cir.ptr, #cir.global_view<@g1, [4]> : !cir.ptr, #cir.global_view<@g1, [4]> : !cir.ptr, #cir.global_view<@g1, [4]> : !cir.ptr]> : !cir.array x 4> +// CHECK-DAG: g3 = #cir.global_view<@g2, [1 : i32]> : !cir.ptr> +// CHECK-DAG: g4 = #cir.global_view<@g3> : !cir.ptr>> +// CHECK-DAG: g5 = #cir.global_view<@g4> : !cir.ptr>>> +// CHECK-DAG: g6 = #cir.const_array<[#cir.const_struct<{#cir.int<239> : !u8i, #cir.int<10> : !u8i, #cir.int<0> : !u8i, #cir.zero : !u8i, #cir.int<9> : !s32i, #cir.int<123> : !s32i}> : !ty_anon_struct, #cir.const_struct<{#cir.int<239> : !u8i, #cir.int<10> : !u8i, #cir.int<0> : !u8i, #cir.zero : !u8i, #cir.int<9> : !s32i, #cir.int<123> : !s32i}> : !ty_anon_struct]> : !cir.array +// CHECK-DAG: g7 = #cir.const_array<[#cir.global_view<@g6, [0, 5]> : !cir.ptr, #cir.global_view<@g6, [1, 5]> : !cir.ptr]> : !cir.array x 2> +// CHECK-DAG: g8 = #cir.global_view<@g7, [1 : i32]> : !cir.ptr> + +// LLVM-DAG: @g1 = internal global { i8, i8, i8, i8, i32, i32 } { i8 -17, i8 10, i8 0, i8 0, i32 9, i32 123 }, align 4 +// LLVM-DAG: @g2 = internal global [4 x ptr] [ptr getelementptr inbounds ({ i8, i8, i8, i8, i32, i32 }, ptr @g1, i32 0, i32 4), ptr getelementptr inbounds ({ i8, i8, i8, i8, i32, i32 }, ptr @g1, i32 0, i32 4), ptr getelementptr inbounds ({ i8, i8, i8, i8, i32, i32 }, ptr @g1, i32 0, i32 4), ptr getelementptr inbounds ({ i8, i8, i8, i8, i32, i32 }, ptr @g1, i32 0, i32 4)], align 16 +// LLVM-DAG: @g3 = internal global ptr getelementptr inbounds ([4 x ptr], ptr @g2, i32 0, i32 1), align 8 +// LLVM-DAG: @g4 = internal global ptr @g3, align 8 +// LLVM-DAG: @g5 = internal global ptr @g4, align 8 +// LLVM-DAG: @g6 = internal global [2 x { i8, i8, i8, i8, i32, i32 }] [{ i8, i8, i8, i8, i32, i32 } { i8 -17, i8 10, i8 0, i8 0, i32 9, i32 123 }, { i8, i8, i8, i8, i32, i32 } { i8 -17, i8 10, i8 0, i8 0, i32 9, i32 123 }], align 16 +// LLVM-DAG: @g7 = internal global [2 x ptr] [ptr getelementptr inbounds ([2 x { i8, i8, i8, i8, i32, i32 }], ptr @g6, i32 0, i32 0, i32 5), ptr getelementptr inbounds ([2 x { i8, i8, i8, i8, i32, i32 }], ptr @g6, i32 0, i32 1, i32 5)], align 16 +// LLVM-DAG: @g8 = internal global ptr getelementptr inbounds ([2 x ptr], ptr @g7, i32 0, i32 1), align 8 + +// FIXME: LLVM output should be: @g2 = internal global [4 x ptr] [ptr getelementptr (i8, ptr @g1, i64 4), ptr getelementptr (i8, ptr @g1, i64 4), ptr getelementptr (i8, ptr @g1, i64 4), ptr getelementptr (i8, ptr @g1, i64 4)], align 16 +// FIXME: LLVM output should be: @g3 = internal global ptr getelementptr (i8, ptr @g2, i64 8), align 8 +// FIXME: LLVM output should be: @g7 = internal global [2 x ptr] [ptr getelementptr (i8, ptr @g6, i64 8), ptr getelementptr (i8, ptr @g6, i64 20)], align 16 +// FIXME: LLVM output should be: @g8 = internal global ptr getelementptr (i8, ptr @g7, i64 8), align 8 + +typedef struct { + char f1; + int f6; +} S1; + +S1 g9 = {1, 42}; +int* g10 = &g9.f6; + +#pragma pack(push) +#pragma pack(1) +typedef struct { + char f1; + int f6; +} S2; +#pragma pack(pop) + +S2 g11 = {1, 42}; +int* g12 = &g11.f6; + +// CHECK-DAG: g9 = #cir.const_struct<{#cir.int<1> : !s8i, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array, #cir.int<42> : !s32i}> : !ty_anon_struct1 {alignment = 4 : i64} +// CHECK-DAG: g10 = #cir.global_view<@g9, [2 : i32]> : !cir.ptr {alignment = 8 : i64} +// CHECK-DAG: g11 = #cir.const_struct<{#cir.int<1> : !s8i, #cir.int<42> : !s32i}> : !ty_S2_ {alignment = 1 : i64} +// CHECK-DAG: g12 = #cir.global_view<@g11, [1 : i32]> : !cir.ptr {alignment = 8 : i64} + +// LLVM-DAG: @g9 = global { i8, [3 x i8], i32 } { i8 1, [3 x i8] zeroinitializer, i32 42 }, align 4 +// LLVM-DAG: @g10 = global ptr getelementptr inbounds ({ i8, [3 x i8], i32 }, ptr @g9, i32 0, i32 2), align 8 +// LLVM-DAG: @g11 = global %struct.S2 <{ i8 1, i32 42 }>, align 1 +// LLVM-DAG: @g12 = global ptr getelementptr inbounds (%struct.S2, ptr @g11, i32 0, i32 1), align 8 + +// FIXME: LLVM output should be: @g10 = dso_local global ptr getelementptr (i8, ptr @g9, i64 4), align 8 +// FIXME: LLVM output should be: @g12 = dso_local global ptr getelementptr (i8, ptr @g11, i64 1), align 8 + + +typedef struct { + short f0; + int f1; + char f2; +} S3; + +static S3 g13 = {-1L,0L,1L}; +static S3* g14[2][2] = {{&g13, &g13}, {&g13, &g13}}; + +// CHECK-DAG: g13 = #cir.const_struct<{#cir.int<-1> : !s16i, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array, #cir.int<0> : !s32i, #cir.int<1> : !s8i, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array}> : !ty_anon_struct3 +// CHECK-DAG: g14 = #cir.const_array<[#cir.const_array<[#cir.global_view<@g13> : !cir.ptr, #cir.global_view<@g13> : !cir.ptr]> : !cir.array x 2>, #cir.const_array<[#cir.global_view<@g13> : !cir.ptr, #cir.global_view<@g13> : !cir.ptr]> : !cir.array x 2>]> : !cir.array x 2> x 2> + +typedef struct { + int f0; + int f1; +} S4; + +typedef struct { + int f0 : 17; + int f1 : 5; + int f2 : 19; + S4 f3; +} S5; + +static S5 g15 = {187,1,442,{123,321}}; + +int* g16 = &g15.f3.f1; + +// CHECK-DAG: g15 = #cir.const_struct<{#cir.int<187> : !u8i, #cir.int<0> : !u8i, #cir.int<2> : !u8i, #cir.zero : !u8i, #cir.int<186> : !u8i, #cir.int<1> : !u8i, #cir.int<0> : !u8i, #cir.zero : !u8i, #cir.const_struct<{#cir.int<123> : !s32i, #cir.int<321> : !s32i}> : !ty_S4_}> : !ty_anon_struct2 {alignment = 4 : i64} +// CHECK-DAG: g16 = #cir.global_view<@g15, [8, 1]> : !cir.ptr {alignment = 8 : i64} + +// LLVM-DAG: @g15 = internal global { i8, i8, i8, i8, i8, i8, i8, i8, %struct.S4 } { i8 -69, i8 0, i8 2, i8 0, i8 -70, i8 1, i8 0, i8 0, %struct.S4 { i32 123, i32 321 } }, align 4 +// LLVM-DAG: @g16 = global ptr getelementptr inbounds ({ i8, i8, i8, i8, i8, i8, i8, i8, %struct.S4 }, ptr @g15, i32 0, i32 8, i32 1), align 8 + +// FIXME: LLVM output should be: @g16 = dso_local global ptr getelementptr (i8, ptr @g15, i64 12), align 8 + +void use() { + int a = **g3; + int b = ***g4; + int c = ****g5; + int d = **g8; + S3 s = *g14[1][1]; + int f = *g16; +} diff --git a/clang/test/CIR/CodeGen/union-padding.c b/clang/test/CIR/CodeGen/union-padding.c index 8deb2890083c..85ed30d52ce5 100644 --- a/clang/test/CIR/CodeGen/union-padding.c +++ b/clang/test/CIR/CodeGen/union-padding.c @@ -19,7 +19,7 @@ short use() { // CHECK: !ty_anon_struct = !cir.struct}> // CHECK: @g3 = #cir.global_view<@g2> : !cir.ptr> -// CHECK: @g2 = #cir.const_array<[#cir.global_view<@g1, [1 : i32]> : !cir.ptr]> : !cir.array x 1> +// CHECK: @g2 = #cir.const_array<[#cir.global_view<@g1, [1]> : !cir.ptr]> : !cir.array x 1> // CHECK: @g1 = // CHECK-SAME: #cir.const_array<[ diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp index 1ba565b7cb79..574d2d943258 100644 --- a/clang/test/CIR/CodeGen/vbase.cpp +++ b/clang/test/CIR/CodeGen/vbase.cpp @@ -18,7 +18,7 @@ void ppp() { B b; } // CIR: cir.global linkonce_odr @_ZTV1B = #cir.vtable<{#cir.const_array<[#cir.ptr<12 : i64> : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1B> : !cir.ptr]> : !cir.array x 3>}> // VTT for B. -// CIR: cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 1> +// CIR: cir.global linkonce_odr @_ZTT1B = #cir.const_array<[#cir.global_view<@_ZTV1B, [0 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 1> // CIR: cir.global "private" external @_ZTVN10__cxxabiv121__vmi_class_type_infoE diff --git a/clang/test/CIR/CodeGen/vtable-emission.cpp b/clang/test/CIR/CodeGen/vtable-emission.cpp index 6691167488c5..7f09f74de0d5 100644 --- a/clang/test/CIR/CodeGen/vtable-emission.cpp +++ b/clang/test/CIR/CodeGen/vtable-emission.cpp @@ -20,7 +20,7 @@ void S::key() {} // LLVM-SAME: [ptr null, ptr @_ZTI1S, ptr @_ZN1S3keyEv, ptr @_ZN1S6nonKeyEv] }, align 8 // CHECK: cir.global external @sobj = #cir.const_struct -// CHECK-SAME: <{#cir.global_view<@_ZTV1S, [0 : i32, 0 : i32, 2 : i32]> : +// CHECK-SAME: <{#cir.global_view<@_ZTV1S, [0 : i32, 2 : i32]> : // CHECK-SAME: !cir.ptr}> : !ty_anon_struct2 {alignment = 8 : i64} // LLVM: @sobj = global { ptr } { ptr getelementptr inbounds // LLVM-SAME: ({ [4 x ptr] }, ptr @_ZTV1S, i32 0, i32 0, i32 2) }, align 8 diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp index ab8cc999f856..c32e242737cf 100644 --- a/clang/test/CIR/CodeGen/vtt.cpp +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -46,7 +46,7 @@ int f() { // Vtable of Class D // CIR: cir.global linkonce_odr @_ZTV1D = #cir.vtable<{#cir.const_array<[#cir.ptr<40 : i64> : !cir.ptr, #cir.ptr : !cir.ptr, #cir.global_view<@_ZTI1D> : !cir.ptr, #cir.global_view<@_ZN1B1wEv> : !cir.ptr, #cir.global_view<@_ZN1D1yEv> : !cir.ptr]> : !cir.array x 5>, #cir.const_array<[#cir.ptr<24 : i64> : !cir.ptr, #cir.ptr<-16 : i64> : !cir.ptr, #cir.global_view<@_ZTI1D> : !cir.ptr, #cir.global_view<@_ZN1C1xEv> : !cir.ptr]> : !cir.array x 4>, #cir.const_array<[#cir.ptr : !cir.ptr, #cir.ptr<-40 : i64> : !cir.ptr, #cir.global_view<@_ZTI1D> : !cir.ptr, #cir.global_view<@_ZN1A1vEv> : !cir.ptr]> : !cir.array x 4>}> : !ty_anon_struct4 {alignment = 8 : i64} // VTT of class D -// CIR: cir.global linkonce_odr @_ZTT1D = #cir.const_array<[#cir.global_view<@_ZTV1D, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D0_1B, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D0_1B, [0 : i32, 1 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D16_1C, [0 : i32, 0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D16_1C, [0 : i32, 1 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTV1D, [0 : i32, 2 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTV1D, [0 : i32, 1 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 7> {alignment = 8 : i64} +// CIR: cir.global linkonce_odr @_ZTT1D = #cir.const_array<[#cir.global_view<@_ZTV1D, [0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D0_1B, [0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D0_1B, [1 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D16_1C, [0 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTC1D16_1C, [1 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTV1D, [2 : i32, 3 : i32]> : !cir.ptr, #cir.global_view<@_ZTV1D, [1 : i32, 3 : i32]> : !cir.ptr]> : !cir.array x 7> {alignment = 8 : i64} // Class B constructor // CIR: cir.func linkonce_odr @_ZN1BC2Ev(%arg0: !cir.ptr From 5373f42aa68596a529032294577fda2268571571 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 5 Feb 2025 05:40:51 +0300 Subject: [PATCH 2228/2301] [CIR][ABI][Lowering] Fixes calling convention (#1308) This PR fixes two run time bugs in the calling convention pass. These bugs were found with `csmith`. Case #1. Return value from a function. Before this PR the returned value were stored in a bit casted memory location. But for the next example it's not safe: the size of a memory slot is less than the size of return value. And the store operation cause a segfault! ``` #pragma pack(push) #pragma pack(1) typedef struct { int f0 : 18; int f1 : 31; int f2 : 5; int f3 : 29; int f4 : 24; } PackedS; #pragma pack(pop) ``` CIR type for this struct is `!ty_PackedS1_ = !cir.struct}>`, i.e. it occupies 14 bytes. Before this PR the next code ``` PackedS foo(void) { PackedS s; return s; } void check(void) { PackedS y = foo(); } ``` produced the next CIR: ``` %0 = cir.alloca !ty_PackedS1_, !cir.ptr, ["y", init] {alignment = 1 : i64} %1 = cir.call @foo() : () -> !cir.array %2 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr> cir.store %1, %2 : !cir.array, !cir.ptr> ``` As one cat see, `%1` is an array of two 64-bit integers and the memory was allocated for 14 bytes only (size of struct). Hence the segfault! This PR fixes such cases and now we have a coercion through memory, which is even with the OG. Case #2. Passing an argument from a pointer deref. Previously for the struct types passed by value we tried to find alloca instruction in order to use it as a source for memcpy operation. But if we have pointer dereference, (in other words if we have a ` >` as alloca result) we don't need to search for the address of the location where this pointer stored - instead we're interested in the pointer itself. And it's a general approach - instead of trying to find an alloca instruction we need to find a first pointer on the way - that will be an address we meed to use for the memcpy source. I combined these two cases into a single PR since there are only few changes actually. But I can split in two if you'd prefer --- .../TargetLowering/LowerFunction.cpp | 37 +++++--- .../AArch64/aarch64-cc-structs.c | 94 +++++++++++++++++-- 2 files changed, 112 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 0771bce3399e..fcf95b7db7d0 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -232,6 +232,17 @@ cir::AllocaOp findAlloca(mlir::Operation *op) { return {}; } +mlir::Value findAddr(mlir::Value v) { + if (mlir::isa(v.getType())) + return v; + + auto op = v.getDefiningOp(); + if (!op || !mlir::isa(op)) + return {}; + + return findAddr(op->getOperand(0)); +} + /// Create a store to \param Dst from \param Src where the source and /// destination may have different types. /// @@ -338,10 +349,10 @@ mlir::Value createCoercedValue(mlir::Value Src, mlir::Type Ty, return CGF.buildAggregateBitcast(Src, Ty); } - if (auto alloca = findAlloca(Src.getDefiningOp())) { - auto tmpAlloca = createTmpAlloca(CGF, alloca.getLoc(), Ty); - createMemCpy(CGF, tmpAlloca, alloca, SrcSize.getFixedValue()); - return CGF.getRewriter().create(alloca.getLoc(), + if (mlir::Value addr = findAddr(Src)) { + auto tmpAlloca = createTmpAlloca(CGF, addr.getLoc(), Ty); + createMemCpy(CGF, tmpAlloca, addr, SrcSize.getFixedValue()); + return CGF.getRewriter().create(addr.getLoc(), tmpAlloca.getResult()); } @@ -371,7 +382,6 @@ mlir::Value createCoercedNonPrimitive(mlir::Value src, mlir::Type ty, auto tySize = LF.LM.getDataLayout().getTypeStoreSize(ty); createMemCpy(LF, alloca, addr, tySize.getFixedValue()); - auto newLoad = bld.create(src.getLoc(), alloca.getResult()); bld.replaceAllOpUsesWith(load, newLoad); @@ -1265,6 +1275,14 @@ mlir::Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // FIXME(cir): Use return value slot here. mlir::Value RetVal = callOp.getResult(); + mlir::Value dstPtr; + for (auto *user : Caller->getUsers()) { + if (auto storeOp = mlir::dyn_cast(user)) { + assert(!dstPtr && "multiple destinations for the return value"); + dstPtr = storeOp.getAddr(); + } + } + // TODO(cir): Check for volatile return values. cir_cconv_assert(!cir::MissingFeatures::volatileTypes()); @@ -1283,16 +1301,11 @@ mlir::Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (mlir::dyn_cast(RetTy) && mlir::cast(RetTy).getNumElements() != 0) { RetVal = newCallOp.getResult(); + createCoercedStore(RetVal, dstPtr, false, *this); - llvm::SmallVector workList; for (auto *user : Caller->getUsers()) if (auto storeOp = mlir::dyn_cast(user)) - workList.push_back(storeOp); - for (StoreOp storeOp : workList) { - auto destPtr = - createCoercedBitcast(storeOp.getAddr(), RetVal.getType(), *this); - rewriter.replaceOpWithNewOp(storeOp, RetVal, destPtr); - } + rewriter.eraseOp(storeOp); } // NOTE(cir): No need to convert from a temp to an RValue. This is diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index 34923dc2422e..b434edc09dfe 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -302,19 +302,18 @@ void pass_nested_u(NESTED_U a) {} // CHECK: cir.func no_proto @call_nested_u() // CHECK: %[[#V0:]] = cir.alloca !ty_NESTED_U, !cir.ptr -// CHECK: %[[#V1:]] = cir.alloca !u64i, !cir.ptr, ["tmp"] {alignment = 8 : i64} +// CHECK: %[[#V1:]] = cir.alloca !u64i, !cir.ptr, ["tmp"] // CHECK: %[[#V2:]] = cir.load %[[#V0]] : !cir.ptr, !ty_NESTED_U -// CHECK: %[[#V3:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr) -// CHECK: %[[#V4:]] = cir.load %[[#V3]] -// CHECK: %[[#V5:]] = cir.cast(bitcast, %[[#V3]] -// CHECK: %[[#V6:]] = cir.load %[[#V5]] -// CHECK: %[[#V7:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V3:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V4:]] = cir.load %[[#V3]] : !cir.ptr, !ty_anon2E0_ +// CHECK: %[[#V5:]] = cir.cast(bitcast, %[[#V3]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V6:]] = cir.load %[[#V5]] : !cir.ptr, !ty_anon2E1_ +// CHECK: %[[#V7:]] = cir.cast(bitcast, %[[#V5]] : !cir.ptr), !cir.ptr // CHECK: %[[#V8:]] = cir.cast(bitcast, %[[#V1]] : !cir.ptr), !cir.ptr // CHECK: %[[#V9:]] = cir.const #cir.int<2> : !u64i // CHECK: cir.libc.memcpy %[[#V9]] bytes from %[[#V7]] to %[[#V8]] : !u64i, !cir.ptr -> !cir.ptr // CHECK: %[[#V10:]] = cir.load %[[#V1]] : !cir.ptr, !u64i // CHECK: cir.call @pass_nested_u(%[[#V10]]) : (!u64i) -> () -// CHECK: cir.return // LLVM: void @call_nested_u() // LLVM: %[[#V1:]] = alloca %struct.NESTED_U, i64 1, align 1 @@ -330,3 +329,84 @@ void call_nested_u() { NESTED_U a; pass_nested_u(a); } + + +#pragma pack(push) +#pragma pack(1) +typedef struct { + int f0 : 18; + int f1 : 31; + int f2 : 5; + int f3 : 29; + int f4 : 24; +} PackedS1; +#pragma pack(pop) + +PackedS1 foo(void) { + PackedS1 s; + return s; +} + +void bar(void) { + PackedS1 y = foo(); +} + +// CHECK: cir.func @bar +// CHECK: %[[#V0:]] = cir.alloca !ty_PackedS1_, !cir.ptr, ["y", init] +// CHECK: %[[#V1:]] = cir.alloca !cir.array, !cir.ptr>, ["tmp"] +// CHECK: %[[#V2:]] = cir.call @foo() : () -> !cir.array +// CHECK: cir.store %[[#V2]], %[[#V1]] : !cir.array, !cir.ptr> +// CHECK: %[[#V3:]] = cir.cast(bitcast, %[[#V1]] : !cir.ptr>), !cir.ptr +// CHECK: %[[#V4:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V5:]] = cir.const #cir.int<14> : !u64i +// CHECK: cir.libc.memcpy %[[#V5]] bytes from %[[#V3]] to %[[#V4]] : !u64i, !cir.ptr -> !cir.ptr + +// LLVML: void @bar +// LLVM: %[[#V1:]] = alloca %struct.PackedS1, i64 1, align 1 +// LLVM: %[[#V2:]] = alloca [2 x i64], i64 1, align 8 +// LLVM: %[[#V3:]] = call [2 x i64] @foo() +// LLVM: store [2 x i64] %[[#V3]], ptr %[[#V2]], align 8 +// LLVM: call void @llvm.memcpy.p0.p0.i64(ptr %[[#V1]], ptr %[[#V2]], i64 14, i1 false) + + +#pragma pack(push) +#pragma pack(1) +typedef struct { + short f0; + int f1; +} PackedS2; +#pragma pack(pop) + +PackedS2 g[3] = {{1,2},{3,4},{5,6}}; + +void baz(PackedS2 a) { + short *x = &g[2].f0; + (*x) = a.f0; +} + +void qux(void) { + const PackedS2 *s1 = &g[1]; + baz(*s1); +} + +// check source of memcpy +// CHECK: cir.func @qux +// CHECK: %[[#V0:]] = cir.alloca !cir.ptr, !cir.ptr>, ["s1", init] +// CHECK: %[[#V1:]] = cir.alloca !u64i, !cir.ptr, ["tmp"] +// CHECK: %[[#V2:]] = cir.get_global @g : !cir.ptr> +// CHECK: %[[#V3:]] = cir.const #cir.int<1> : !s32i +// CHECK: %[[#V4:]] = cir.cast(array_to_ptrdecay, %[[#V2]] : !cir.ptr>), !cir.ptr +// CHECK: %[[#V5:]] = cir.ptr_stride(%[[#V4]] : !cir.ptr, %[[#V3]] : !s32i), !cir.ptr +// CHECK: cir.store %[[#V5]], %[[#V0]] : !cir.ptr, !cir.ptr> +// CHECK: %[[#V6:]] = cir.load deref %[[#V0]] : !cir.ptr>, !cir.ptr +// CHECK: %[[#V7:]] = cir.cast(bitcast, %[[#V6]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V8:]] = cir.const #cir.int<6> : !u64i +// CHECK: cir.libc.memcpy %[[#V8]] bytes from %[[#V7]] + +// LLVM: void @qux +// LLVM: %[[#V1:]] = alloca ptr, i64 1, align 8 +// LLVM: %[[#V2:]] = alloca i64, i64 1, align 8 +// LLVM: store ptr getelementptr (%struct.PackedS2, ptr @g, i64 1), ptr %[[#V1]], align 8 +// LLVM: %[[#V3:]] = load ptr, ptr %[[#V1]], align 8 +// LLVM: %[[#V4:]] = load %struct.PackedS2, ptr %[[#V3]], align 1 +// LLVM: call void @llvm.memcpy.p0.p0.i64(ptr %[[#V2]], ptr %[[#V3]], i64 6, i1 false) From 90a5b619b9a97b2ad64baee1daa165abb8be8a48 Mon Sep 17 00:00:00 2001 From: AdUhTkJm <30948580+AdUhTkJm@users.noreply.github.com> Date: Wed, 5 Feb 2025 20:06:23 +0000 Subject: [PATCH 2229/2301] [CIR][CUDA] Initial support for host compilation (#1309) Adds support for `__host__` and `__device__` functions when compiling for CUDA host. The conditions I checked against is taken from OG. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 26 +++++++-- clang/lib/CIR/CodeGen/TargetInfo.cpp | 56 +++++++++++++++++++ clang/test/CIR/CodeGen/CUDA/simple.cu | 16 ++++++ clang/test/CIR/CodeGen/Inputs/cuda.h | 74 ++++++++++++++++++++++++++ 4 files changed, 167 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/CUDA/simple.cu create mode 100644 clang/test/CIR/CodeGen/Inputs/cuda.h diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 521c41aa4504..444883f6d774 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -514,7 +514,19 @@ void CIRGenModule::emitGlobal(GlobalDecl GD) { assert(!Global->hasAttr() && "NYI"); assert(!Global->hasAttr() && "NYI"); - assert(!langOpts.CUDA && "NYI"); + + if (langOpts.CUDA) { + if (langOpts.CUDAIsDevice) + llvm_unreachable("NYI"); + + if (dyn_cast(Global)) + llvm_unreachable("NYI"); + + // We must skip __device__ functions when compiling for host. + if (!Global->hasAttr() && Global->hasAttr()) { + return; + } + } if (langOpts.OpenMP) { // If this is OpenMP, check if it is legal to emit this global normally. @@ -557,6 +569,7 @@ void CIRGenModule::emitGlobal(GlobalDecl GD) { return; } } else { + assert(!langOpts.CUDA && "NYI"); const auto *VD = cast(Global); assert(VD->isFileVarDecl() && "Cannot emit local var decl as global."); if (VD->isThisDeclarationADefinition() != VarDecl::Definition && @@ -2322,7 +2335,13 @@ cir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, auto F = GetOrCreateCIRFunction(MangledName, Ty, GD, ForVTable, DontDefer, /*IsThunk=*/false, IsForDefinition); - assert(!langOpts.CUDA && "NYI"); + // As __global__ functions always reside on device, + // we need special care when accessing them from host; + // otherwise, CUDA functions behave as normal functions + if (langOpts.CUDA && !langOpts.CUDAIsDevice && + cast(GD.getDecl())->hasAttr()) { + llvm_unreachable("NYI"); + } return F; } @@ -3164,9 +3183,6 @@ void CIRGenModule::Release() { assert(!MissingFeatures::registerGlobalDtorsWithAtExit()); assert(!MissingFeatures::emitCXXThreadLocalInitFunc()); assert(!MissingFeatures::objCRuntime()); - if (astContext.getLangOpts().CUDA) { - llvm_unreachable("NYI"); - } assert(!MissingFeatures::openMPRuntime()); assert(!MissingFeatures::pgoReader()); assert(!MissingFeatures::emitCtorList()); // GlobalCtors, GlobalDtors diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index d613167677a9..7669dad59eb8 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -305,6 +305,30 @@ class SPIRVTargetCIRGenInfo : public CommonSPIRTargetCIRGenInfo { } // namespace +//===----------------------------------------------------------------------===// +// NVPTX ABI Implementation +//===----------------------------------------------------------------------===// + +namespace { + +class NVPTXABIInfo : public ABIInfo { +public: + NVPTXABIInfo(CIRGenTypes &cgt) : ABIInfo(cgt) {} + + cir::ABIArgInfo classifyReturnType(QualType retTy) const; + cir::ABIArgInfo classifyArgumentType(QualType ty) const; + + void computeInfo(CIRGenFunctionInfo &fnInfo) const override; +}; + +class NVPTXTargetCIRGenInfo : public TargetCIRGenInfo { +public: + NVPTXTargetCIRGenInfo(CIRGenTypes &cgt) + : TargetCIRGenInfo(std::make_unique(cgt)) {} +}; + +} // namespace + // TODO(cir): remove the attribute once this gets used. LLVM_ATTRIBUTE_UNUSED static bool classifyReturnType(const CIRGenCXXABI &CXXABI, @@ -443,6 +467,34 @@ cir::ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, return cir::ABIArgInfo::getDirect(ResType); } +// Skeleton only. Implement when used in TargetLower stage. +cir::ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType retTy) const { + llvm_unreachable("not yet implemented"); +} + +cir::ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType ty) const { + llvm_unreachable("not yet implemented"); +} + +void NVPTXABIInfo::computeInfo(CIRGenFunctionInfo &fnInfo) const { + // Top level CIR has unlimited arguments and return types. Lowering for ABI + // specific concerns should happen during a lowering phase. Assume everything + // is direct for now. + for (CIRGenFunctionInfo::arg_iterator it = fnInfo.arg_begin(), + ie = fnInfo.arg_end(); + it != ie; ++it) { + if (testIfIsVoidTy(it->type)) + it->info = cir::ABIArgInfo::getIgnore(); + else + it->info = cir::ABIArgInfo::getDirect(CGT.convertType(it->type)); + } + auto retTy = fnInfo.getReturnType(); + if (testIfIsVoidTy(retTy)) + fnInfo.getReturnInfo() = cir::ABIArgInfo::getIgnore(); + else + fnInfo.getReturnInfo() = cir::ABIArgInfo::getDirect(CGT.convertType(retTy)); +} + ABIInfo::~ABIInfo() {} bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { @@ -634,5 +686,9 @@ const TargetCIRGenInfo &CIRGenModule::getTargetCIRGenInfo() { case llvm::Triple::spirv64: { return SetCIRGenInfo(new SPIRVTargetCIRGenInfo(genTypes)); } + + case llvm::Triple::nvptx64: { + return SetCIRGenInfo(new NVPTXTargetCIRGenInfo(genTypes)); + } } } diff --git a/clang/test/CIR/CodeGen/CUDA/simple.cu b/clang/test/CIR/CodeGen/CUDA/simple.cu new file mode 100644 index 000000000000..5a0141d3d4b5 --- /dev/null +++ b/clang/test/CIR/CodeGen/CUDA/simple.cu @@ -0,0 +1,16 @@ +#include "../Inputs/cuda.h" + +// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fclangir \ +// RUN: -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + + +// This should emit as a normal C++ function. +__host__ void host_fn(int *a, int *b, int *c) {} + +// CIR: cir.func @_Z7host_fnPiS_S_ + +// This shouldn't emit. +__device__ void device_fn(int* a, double b, float c) {} + +// CHECK-NOT: cir.func @_Z9device_fnPidf diff --git a/clang/test/CIR/CodeGen/Inputs/cuda.h b/clang/test/CIR/CodeGen/Inputs/cuda.h new file mode 100644 index 000000000000..204bf2972088 --- /dev/null +++ b/clang/test/CIR/CodeGen/Inputs/cuda.h @@ -0,0 +1,74 @@ +/* Minimal declarations for CUDA support. Testing purposes only. */ +/* From test/CodeGenCUDA/Inputs/cuda.h. */ +#include + +#if __HIP__ || __CUDA__ +#define __constant__ __attribute__((constant)) +#define __device__ __attribute__((device)) +#define __global__ __attribute__((global)) +#define __host__ __attribute__((host)) +#define __shared__ __attribute__((shared)) +#if __HIP__ +#define __managed__ __attribute__((managed)) +#endif +#define __launch_bounds__(...) __attribute__((launch_bounds(__VA_ARGS__))) +#define __grid_constant__ __attribute__((grid_constant)) +#else +#define __constant__ +#define __device__ +#define __global__ +#define __host__ +#define __shared__ +#define __managed__ +#define __launch_bounds__(...) +#define __grid_constant__ +#endif + +struct dim3 { + unsigned x, y, z; + __host__ __device__ dim3(unsigned x, unsigned y = 1, unsigned z = 1) : x(x), y(y), z(z) {} +}; + +#if __HIP__ || HIP_PLATFORM +typedef struct hipStream *hipStream_t; +typedef enum hipError {} hipError_t; +int hipConfigureCall(dim3 gridSize, dim3 blockSize, size_t sharedSize = 0, + hipStream_t stream = 0); +extern "C" hipError_t __hipPushCallConfiguration(dim3 gridSize, dim3 blockSize, + size_t sharedSize = 0, + hipStream_t stream = 0); +#ifndef __HIP_API_PER_THREAD_DEFAULT_STREAM__ +extern "C" hipError_t hipLaunchKernel(const void *func, dim3 gridDim, + dim3 blockDim, void **args, + size_t sharedMem, + hipStream_t stream); +#else +extern "C" hipError_t hipLaunchKernel_spt(const void *func, dim3 gridDim, + dim3 blockDim, void **args, + size_t sharedMem, + hipStream_t stream); +#endif // __HIP_API_PER_THREAD_DEFAULT_STREAM__ +#elif __OFFLOAD_VIA_LLVM__ +extern "C" unsigned __llvmPushCallConfiguration(dim3 gridDim, dim3 blockDim, + size_t sharedMem = 0, void *stream = 0); +extern "C" unsigned llvmLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, + void **args, size_t sharedMem = 0, void *stream = 0); +#else +typedef struct cudaStream *cudaStream_t; +typedef enum cudaError {} cudaError_t; +extern "C" int cudaConfigureCall(dim3 gridSize, dim3 blockSize, + size_t sharedSize = 0, + cudaStream_t stream = 0); +extern "C" int __cudaPushCallConfiguration(dim3 gridSize, dim3 blockSize, + size_t sharedSize = 0, + cudaStream_t stream = 0); +extern "C" cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, + dim3 blockDim, void **args, + size_t sharedMem, cudaStream_t stream); +extern "C" cudaError_t cudaLaunchKernel_ptsz(const void *func, dim3 gridDim, + dim3 blockDim, void **args, + size_t sharedMem, cudaStream_t stream); + +#endif + +extern "C" __device__ int printf(const char*, ...); From 82d18f5a14141a2d698c6446def63bcf45915037 Mon Sep 17 00:00:00 2001 From: AdUhTkJm <30948580+AdUhTkJm@users.noreply.github.com> Date: Thu, 6 Feb 2025 00:03:40 +0000 Subject: [PATCH 2230/2301] [CIR][CUDA] Initial support for device compilation (#1311) This allows a simple CUDA file to compile with `-fcuda-is-device`. --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 39 +++++++++++++------- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 6 ++- clang/test/CIR/CodeGen/CUDA/simple-device.cu | 14 +++++++ 4 files changed, 46 insertions(+), 17 deletions(-) create mode 100644 clang/test/CIR/CodeGen/CUDA/simple-device.cu diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 851a2230bf30..9a15e3337f32 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1639,9 +1639,9 @@ static void getTrivialDefaultFunctionAttributes( // TODO: NoThrow attribute should be added for other GPU modes CUDA, SYCL, // HIP, OpenMP offload. // AFAIK, neither of them support exceptions in device code. - if ((langOpts.CUDA && langOpts.CUDAIsDevice) || langOpts.SYCLIsDevice) + if (langOpts.SYCLIsDevice) llvm_unreachable("NYI"); - if (langOpts.OpenCL) { + if (langOpts.OpenCL || (langOpts.CUDA && langOpts.CUDAIsDevice)) { auto noThrow = cir::NoThrowAttr::get(CGM.getBuilder().getContext()); funcAttrs.set(noThrow.getMnemonic(), noThrow); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 444883f6d774..c58d260e166a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -516,16 +516,32 @@ void CIRGenModule::emitGlobal(GlobalDecl GD) { assert(!Global->hasAttr() && "NYI"); if (langOpts.CUDA) { - if (langOpts.CUDAIsDevice) - llvm_unreachable("NYI"); + if (langOpts.CUDAIsDevice) { + // This will implicitly mark templates and their + // specializations as __host__ __device__. + if (langOpts.OffloadImplicitHostDeviceTemplates) + llvm_unreachable("NYI"); - if (dyn_cast(Global)) - llvm_unreachable("NYI"); + // This maps some parallel standard libraries implicitly + // to GPU, even when they are not marked __device__. + if (langOpts.HIPStdPar) + llvm_unreachable("NYI"); - // We must skip __device__ functions when compiling for host. - if (!Global->hasAttr() && Global->hasAttr()) { - return; + if (Global->hasAttr()) + llvm_unreachable("NYI"); + + if (!Global->hasAttr()) + return; + } else { + // We must skip __device__ functions when compiling for host. + if (!Global->hasAttr() && + Global->hasAttr()) { + return; + } } + + if (dyn_cast(Global)) + llvm_unreachable("NYI"); } if (langOpts.OpenMP) { @@ -2415,8 +2431,6 @@ StringRef CIRGenModule::getMangledName(GlobalDecl GD) { } } - assert(!langOpts.CUDAIsDevice && "NYI"); - // Keep the first result in the case of a mangling collision. const auto *ND = cast(GD.getDecl()); std::string MangledName = getMangledNameImpl(*this, GD, ND); @@ -3099,7 +3113,8 @@ void CIRGenModule::emitDeferred(unsigned recursionLimit) { // Emit CUDA/HIP static device variables referenced by host code only. Note we // should not clear CUDADeviceVarODRUsedByHost since it is still needed for // further handling. - if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { + if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice && + !getASTContext().CUDADeviceVarODRUsedByHost.empty()) { llvm_unreachable("NYI"); } @@ -3392,10 +3407,6 @@ void CIRGenModule::Release() { llvm_unreachable("NYI"); } - if (langOpts.CUDAIsDevice && getTriple().isNVPTX()) { - llvm_unreachable("NYI"); - } - if (langOpts.EHAsynch) llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 916010a4f19c..66d6a57e242a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -348,7 +348,11 @@ mlir::Type CIRGenTypes::convertType(QualType T) { // For the device-side compilation, CUDA device builtin surface/texture types // may be represented in different types. - assert(!astContext.getLangOpts().CUDAIsDevice && "not implemented"); + if (astContext.getLangOpts().CUDAIsDevice) { + if (Ty->isCUDADeviceBuiltinSurfaceType() || + Ty->isCUDADeviceBuiltinTextureType()) + llvm_unreachable("NYI"); + } if (const auto *recordType = dyn_cast(T)) return convertRecordDeclType(recordType->getDecl()); diff --git a/clang/test/CIR/CodeGen/CUDA/simple-device.cu b/clang/test/CIR/CodeGen/CUDA/simple-device.cu new file mode 100644 index 000000000000..c19a09a7e40b --- /dev/null +++ b/clang/test/CIR/CodeGen/CUDA/simple-device.cu @@ -0,0 +1,14 @@ +#include "../Inputs/cuda.h" + +// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device \ +// RUN: -fclangir -emit-cir -o - %s | FileCheck %s + +// This shouldn't emit. +__host__ void host_fn(int *a, int *b, int *c) {} + +// CHECK-NOT: cir.func @_Z7host_fnPiS_S_ + +// This should emit as a normal C++ function. +__device__ void device_fn(int* a, double b, float c) {} + +// CIR: cir.func @_Z9device_fnPidf From 95a497b452dfd8db3189f544f4a47cf18b889e30 Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Wed, 5 Feb 2025 16:30:31 -0800 Subject: [PATCH 2231/2301] [CIR] Enable cir.bool binops (#1312) Add !cir.bool to the list of types accepted by the binop rewriter. Although we don't generate boolean binops from logical operations on C++ boolean values, there will be cases where such operations are useful while generating conditions for other operations, such as the overflow checks needed for array new handling. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 ++-- clang/test/CIR/Lowering/binop-bool.cir | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/Lowering/binop-bool.cir diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 0331122f35d2..48841f716bf4 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2678,8 +2678,8 @@ mlir::LogicalResult CIRToLLVMBinOpLowering::matchAndRewrite( "inconsistent operands' types not supported yet"); mlir::Type type = op.getRhs().getType(); - assert((mlir::isa(type)) && + assert((mlir::isa(type)) && "operand type not supported yet"); auto llvmTy = getTypeConverter()->convertType(op.getType()); diff --git a/clang/test/CIR/Lowering/binop-bool.cir b/clang/test/CIR/Lowering/binop-bool.cir new file mode 100644 index 000000000000..7267c407cc0a --- /dev/null +++ b/clang/test/CIR/Lowering/binop-bool.cir @@ -0,0 +1,18 @@ +// RUN: cir-opt %s -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +module { + cir.func @foo() { + %0 = cir.alloca !cir.bool, !cir.ptr, ["a", init] {alignment = 4 : i64} + %1 = cir.alloca !cir.bool, !cir.ptr, ["b", init] {alignment = 4 : i64} + %2 = cir.load %0 : !cir.ptr, !cir.bool + %3 = cir.load %1 : !cir.ptr, !cir.bool + %4 = cir.binop(or, %2, %3) : !cir.bool + // CHECK: = llvm.or {{.*}}, {{.*}} : i1 + %5 = cir.binop(xor, %2, %3) : !cir.bool + // CHECK: = llvm.xor {{.*}}, {{.*}} : i1 + %6 = cir.binop(and, %2, %3) : !cir.bool + // CHECK: = llvm.and {{.*}}, {{.*}} : i1 + cir.return + } +} From 6d1b387502cf0590453b266eaa7fa1f5eb8b2edf Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 5 Feb 2025 16:51:24 -0800 Subject: [PATCH 2232/2301] [CIR] Silence warnings introduced in the past few weeks --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 42 ++++++++++++------- clang/lib/CIR/CodeGen/CIRGenBuilder.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 1 + clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 2 - clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 1 - clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 35 ++++++++++++++-- clang/lib/CIR/FrontendAction/CIRGenAction.cpp | 6 +-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 -- 8 files changed, 62 insertions(+), 31 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 607f62cea8ca..62fe88a9d552 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -737,6 +737,13 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(), cir::AtomicFetchKind::Nand); break; + case AtomicExpr::AO__atomic_test_and_set: { + llvm_unreachable("NYI"); + } + + case AtomicExpr::AO__atomic_clear: { + llvm_unreachable("NYI"); + } } assert(Op.size() && "expected operation name to build"); @@ -854,6 +861,8 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__opencl_atomic_load: case AtomicExpr::AO__hip_atomic_load: + case AtomicExpr::AO__atomic_test_and_set: + case AtomicExpr::AO__atomic_clear: break; case AtomicExpr::AO__atomic_load: @@ -1144,6 +1153,8 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__opencl_atomic_fetch_max: case AtomicExpr::AO__scoped_atomic_fetch_max: case AtomicExpr::AO__scoped_atomic_max_fetch: + case AtomicExpr::AO__atomic_test_and_set: + case AtomicExpr::AO__atomic_clear: llvm_unreachable("Integral atomic operations always become atomicrmw!"); } @@ -1175,22 +1186,21 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *E) { llvm_unreachable("NYI"); } - [[maybe_unused]] bool IsStore = - E->getOp() == AtomicExpr::AO__c11_atomic_store || - E->getOp() == AtomicExpr::AO__opencl_atomic_store || - E->getOp() == AtomicExpr::AO__hip_atomic_store || - E->getOp() == AtomicExpr::AO__atomic_store || - E->getOp() == AtomicExpr::AO__atomic_store_n || - E->getOp() == AtomicExpr::AO__scoped_atomic_store || - E->getOp() == AtomicExpr::AO__scoped_atomic_store_n; - [[maybe_unused]] bool IsLoad = - E->getOp() == AtomicExpr::AO__c11_atomic_load || - E->getOp() == AtomicExpr::AO__opencl_atomic_load || - E->getOp() == AtomicExpr::AO__hip_atomic_load || - E->getOp() == AtomicExpr::AO__atomic_load || - E->getOp() == AtomicExpr::AO__atomic_load_n || - E->getOp() == AtomicExpr::AO__scoped_atomic_load || - E->getOp() == AtomicExpr::AO__scoped_atomic_load_n; + bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store || + E->getOp() == AtomicExpr::AO__opencl_atomic_store || + E->getOp() == AtomicExpr::AO__hip_atomic_store || + E->getOp() == AtomicExpr::AO__atomic_store || + E->getOp() == AtomicExpr::AO__atomic_store_n || + E->getOp() == AtomicExpr::AO__scoped_atomic_store || + E->getOp() == AtomicExpr::AO__scoped_atomic_store_n || + E->getOp() == AtomicExpr::AO__atomic_clear; + bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load || + E->getOp() == AtomicExpr::AO__opencl_atomic_load || + E->getOp() == AtomicExpr::AO__hip_atomic_load || + E->getOp() == AtomicExpr::AO__atomic_load || + E->getOp() == AtomicExpr::AO__atomic_load_n || + E->getOp() == AtomicExpr::AO__scoped_atomic_load || + E->getOp() == AtomicExpr::AO__scoped_atomic_load_n; if (auto ordAttr = getConstOpIntAttr(Order)) { // We should not ever get to a case where the ordering isn't a valid CABI diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp index 905ca67aafd5..f4c9506d02d9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp @@ -118,7 +118,7 @@ uint64_t CIRGenBuilderTy::computeOffsetFromGlobalViewIndices( const cir::CIRDataLayout &layout, mlir::Type typ, llvm::ArrayRef indexes) { - uint64_t offset = 0; + int64_t offset = 0; for (auto idx : indexes) { if (auto sTy = dyn_cast(typ)) { offset += sTy.getElementOffset(layout.layout, idx); diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index c1a4ac61a5d6..da646e24333d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -813,6 +813,7 @@ void CIRGenFunction::emitDecl(const Decl &D) { case Decl::Friend: case Decl::FriendTemplate: case Decl::Block: + case Decl::OutlinedFunction: case Decl::Captured: case Decl::UsingShadow: case Decl::ConstructorUsingShadow: diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 15dc33f2df64..762de00d0cf9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -882,8 +882,6 @@ void CIRGenFunction::emitNewArrayInitializer( CleanupDeactivationScope deactivation(*this); CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType); - CharUnits ElementAlign = - BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize); // Attempt to perform zero-initialization using memset. auto TryMemsetInitialization = [&]() -> bool { diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index fbf901c47a7c..394cca867a62 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2751,7 +2751,6 @@ Address CIRGenARMCXXABI::initializeArrayCookie(CIRGenFunction &cgf, Address cookie = Address(cookiePtr, cgf.SizeTy, newPtr.getAlignment()); ASTContext &ctx = getContext(); - CharUnits sizeSize = cgf.getSizeSize(); mlir::Location loc = cgf.getLoc(expr->getSourceRange()); // The first element is the element size. diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 1fa1653bc1cf..450fd8647468 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -107,9 +107,6 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *S, switch (S->getStmtClass()) { case Stmt::OMPScopeDirectiveClass: llvm_unreachable("NYI"); - case Stmt::OpenACCCombinedConstructClass: - case Stmt::OpenACCComputeConstructClass: - case Stmt::OpenACCLoopConstructClass: case Stmt::OMPErrorDirectiveClass: case Stmt::NoStmtClass: case Stmt::CXXCatchStmtClass: @@ -128,6 +125,7 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *S, case Stmt::DefaultStmtClass: case Stmt::CaseStmtClass: case Stmt::SEHLeaveStmtClass: + case Stmt::SYCLKernelCallStmtClass: llvm_unreachable("should have emitted these statements as simple"); #define STMT(Type, Base) @@ -271,7 +269,19 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *S, case Stmt::OMPReverseDirectiveClass: case Stmt::OMPInterchangeDirectiveClass: case Stmt::OMPAssumeDirectiveClass: - case Stmt::OMPMaskedDirectiveClass: { + case Stmt::OMPMaskedDirectiveClass: + case Stmt::OpenACCComputeConstructClass: + case Stmt::OpenACCLoopConstructClass: + case Stmt::OpenACCCombinedConstructClass: + case Stmt::OpenACCDataConstructClass: + case Stmt::OpenACCEnterDataConstructClass: + case Stmt::OpenACCExitDataConstructClass: + case Stmt::OpenACCHostDataConstructClass: + case Stmt::OpenACCWaitConstructClass: + case Stmt::OpenACCInitConstructClass: + case Stmt::OpenACCShutdownConstructClass: + case Stmt::OpenACCSetConstructClass: + case Stmt::OpenACCUpdateConstructClass: { llvm::errs() << "CIR codegen for '" << S->getStmtClassName() << "' not implemented\n"; assert(0 && "not implemented"); @@ -328,6 +338,23 @@ mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *S, llvm::errs() << "CIR codegen for '" << S->getStmtClassName() << "' not implemented\n"; assert(0 && "not implemented"); + case Stmt::SYCLKernelCallStmtClass: + // SYCL kernel call statements are generated as wrappers around the body + // of functions declared with the sycl_kernel_entry_point attribute. Such + // functions are used to specify how a SYCL kernel (a function object) is + // to be invoked; the SYCL kernel call statement contains a transformed + // variation of the function body and is used to generate a SYCL kernel + // caller function; a function that serves as the device side entry point + // used to execute the SYCL kernel. The sycl_kernel_entry_point attributed + // function is invoked by host code in order to trigger emission of the + // device side SYCL kernel caller function and to generate metadata needed + // by SYCL run-time library implementations; the function is otherwise + // intended to have no effect. As such, the function body is not evaluated + // as part of the invocation during host compilation (and the function + // should not be called or emitted during device compilation); the SYCL + // kernel call statement is thus handled as a null statement for the + // purpose of code generation. + break; } return mlir::success(); diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index 5cb81e89388f..9fbb74244907 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -111,10 +111,10 @@ class CIRGenConsumer : public clang::ASTConsumer { CompilerInstance &compilerInstance; DiagnosticsEngine &diagnosticsEngine; - const HeaderSearchOptions &headerSearchOptions; + [[maybe_unused]] const HeaderSearchOptions &headerSearchOptions; CodeGenOptions &codeGenOptions; - const TargetOptions &targetOptions; - const LangOptions &langOptions; + [[maybe_unused]] const TargetOptions &targetOptions; + [[maybe_unused]] const LangOptions &langOptions; const FrontendOptions &feOptions; std::unique_ptr outputStream; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 48841f716bf4..3817eb3960a5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -830,10 +830,6 @@ void convertSideEffectForCall(mlir::Operation *callOp, noUnwind = true; willReturn = true; break; - - default: - callOp->emitError("unknown side effect"); - break; } } From a07dbdf4dd15ee10cdf94f923624bd9d366f3203 Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Thu, 6 Feb 2025 10:11:08 -0800 Subject: [PATCH 2233/2301] [CIR] Implement array new handling for variable array size (#1313) Implement CIR code generation for array new in the case where the array size is represented by a variable. --- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 109 ++++++++++++- clang/test/CIR/CodeGen/new.cpp | 203 ++++++++++++++++++------ clang/test/CIR/Lowering/new.cpp | 106 ++++++++++++- 3 files changed, 365 insertions(+), 53 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 762de00d0cf9..e7948cf98292 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -672,8 +672,113 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &CGF, const CXXNewExpr *e, size = CGF.getBuilder().getConstInt(Loc, allocationSize); } } else { - // TODO: Handle the variable size case - llvm_unreachable("NYI"); + // Create a value for the variable number of elements + numElements = CGF.emitScalarExpr(*e->getArraySize()); + auto numElementsType = mlir::cast(numElements.getType()); + unsigned numElementsWidth = numElementsType.getWidth(); + + // We might need check for overflow. + + mlir::Value hasOverflow = nullptr; + // The clang LLVM IR codegen checks for the size variable being signed, + // having a smaller width than size_t, and having a larger width than + // size_t. However, the AST implicitly casts the size variable to size_t + // so none of these conditions will ever be met. + bool isSigned = + (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType(); + assert(!isSigned && (numElementsWidth == sizeWidth) && + (numElements.getType() == CGF.SizeTy) && + "Expected array size to be implicitly cast to size_t!"); + + // There are up to three conditions we need to test for: + // 1) if minElements > 0, we need to check whether numElements is smaller + // than that. + // 2) we need to compute + // sizeWithoutCookie := numElements * typeSizeMultiplier + // and check whether it overflows; and + // 3) if we need a cookie, we need to compute + // size := sizeWithoutCookie + cookieSize + // and check whether it overflows. + + if (minElements) { + // Don't allow allocation of fewer elements than we have initializers. + if (!hasOverflow) { + // FIXME: Avoid creating this twice. It may happen above. + mlir::Value minElementsV = CGF.getBuilder().getConstInt( + Loc, llvm::APInt(sizeWidth, minElements)); + hasOverflow = CGF.getBuilder().createCompare(Loc, cir::CmpOpKind::lt, + numElements, minElementsV); + } + } + + size = numElements; + + // Multiply by the type size if necessary. This multiplier + // includes all the factors for nested arrays. + // + // This step also causes numElements to be scaled up by the + // nested-array factor if necessary. Overflow on this computation + // can be ignored because the result shouldn't be used if + // allocation fails. + if (typeSizeMultiplier != 1) { + mlir::Value tsmV = CGF.getBuilder().getConstInt(Loc, typeSizeMultiplier); + auto mulResult = CGF.getBuilder().createBinOpOverflowOp( + Loc, mlir::cast(CGF.SizeTy), + cir::BinOpOverflowKind::Mul, size, tsmV); + + if (hasOverflow) + hasOverflow = + CGF.getBuilder().createOr(hasOverflow, mulResult.overflow); + else + hasOverflow = mulResult.overflow; + + size = mulResult.result; + + // Also scale up numElements by the array size multiplier. + if (arraySizeMultiplier != 1) { + // If the base element type size is 1, then we can re-use the + // multiply we just did. + if (typeSize.isOne()) { + assert(arraySizeMultiplier == typeSizeMultiplier); + numElements = size; + + // Otherwise we need a separate multiply. + } else { + mlir::Value asmV = + CGF.getBuilder().getConstInt(Loc, arraySizeMultiplier); + numElements = CGF.getBuilder().createMul(numElements, asmV); + } + } + } else { + // numElements doesn't need to be scaled. + assert(arraySizeMultiplier == 1); + } + + // Add in the cookie size if necessary. + if (cookieSize != 0) { + sizeWithoutCookie = size; + mlir::Value cookieSizeV = CGF.getBuilder().getConstInt(Loc, cookieSize); + auto addResult = CGF.getBuilder().createBinOpOverflowOp( + Loc, mlir::cast(CGF.SizeTy), + cir::BinOpOverflowKind::Add, size, cookieSizeV); + + if (hasOverflow) + hasOverflow = + CGF.getBuilder().createOr(hasOverflow, addResult.overflow); + else + hasOverflow = addResult.overflow; + + size = addResult.result; + } + + // If we had any possibility of dynamic overflow, make a select to + // overwrite 'size' with an all-ones value, which should cause + // operator new to throw. + if (hasOverflow) { + mlir::Value allOnes = + CGF.getBuilder().getConstInt(Loc, llvm::APInt::getAllOnes(sizeWidth)); + size = CGF.getBuilder().createSelect(Loc, hasOverflow, allOnes, size); + } } if (cookieSize == 0) diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index 2750929832e0..f158ab0bb0f3 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -67,9 +67,9 @@ void t_new_constant_size() { // CHECK: cir.func @_Z19t_new_constant_sizev() // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} -// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<16> : !u64i -// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<128> : !u64i -// CHECK: %3 = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %[[NUM_ELEMENTS:.*]] = cir.const #cir.int<16> : !u64i +// CHECK: %[[ALLOCATION_SIZE:.*]] = cir.const #cir.int<128> : !u64i +// CHECK: %3 = cir.call @_Znam(%[[ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr // CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr // CHECK: cir.store %4, %0 : !cir.ptr, !cir.ptr> // CHECK: cir.return @@ -83,9 +83,9 @@ void t_new_multidim_constant_size() { // CHECK: cir.func @_Z28t_new_multidim_constant_sizev() // CHECK: %0 = cir.alloca !cir.ptr x 3>>, !cir.ptr x 3>>>, ["p", init] {alignment = 8 : i64} -// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<24> : !u64i -// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<192> : !u64i -// CHECK: %3 = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %[[NUM_ELEMENTS:.*]] = cir.const #cir.int<24> : !u64i +// CHECK: %[[ALLOCATION_SIZE:.*]] = cir.const #cir.int<192> : !u64i +// CHECK: %3 = cir.call @_Znam(%[[ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr // CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr // CHECK: %5 = cir.cast(bitcast, %0 : !cir.ptr x 3>>>), !cir.ptr> // CHECK: cir.store %4, %5 : !cir.ptr, !cir.ptr> @@ -102,15 +102,15 @@ void t_constant_size_nontrivial() { // CHECK: cir.func @_Z26t_constant_size_nontrivialv() // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} -// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<3> : !u64i -// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<3> : !u64i -// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<11> : !u64i -// CHECK: %4 = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %[[NUM_ELEMENTS:.*]] = cir.const #cir.int<3> : !u64i +// CHECK: %[[SIZE_WITHOUT_COOKIE:.*]] = cir.const #cir.int<3> : !u64i +// CHECK: %[[ALLOCATION_SIZE:.*]] = cir.const #cir.int<11> : !u64i +// CHECK: %4 = cir.call @_Znam(%[[ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr // CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr -// CHECK: cir.store %[[#NUM_ELEMENTS]], %5 : !u64i, !cir.ptr +// CHECK: cir.store %[[NUM_ELEMENTS]], %5 : !u64i, !cir.ptr // CHECK: %6 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr -// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<8> : !s32i -// CHECK: %8 = cir.ptr_stride(%6 : !cir.ptr, %[[#COOKIE_SIZE]] : !s32i), !cir.ptr +// CHECK: %[[COOKIE_SIZE:.*]] = cir.const #cir.int<8> : !s32i +// CHECK: %8 = cir.ptr_stride(%6 : !cir.ptr, %[[COOKIE_SIZE]] : !s32i), !cir.ptr // CHECK: %9 = cir.cast(bitcast, %8 : !cir.ptr), !cir.ptr // CHECK: cir.store %9, %0 : !cir.ptr, !cir.ptr> // CHECK: cir.return @@ -131,15 +131,15 @@ void t_constant_size_nontrivial2() { // CHECK: cir.func @_Z27t_constant_size_nontrivial2v() // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["p", init] {alignment = 8 : i64} -// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<3> : !u64i -// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<12> : !u64i -// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<20> : !u64i -// CHECK: %4 = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %[[NUM_ELEMENTS:.*]] = cir.const #cir.int<3> : !u64i +// CHECK: %[[SIZE_WITHOUT_COOKIE:.*]] = cir.const #cir.int<12> : !u64i +// CHECK: %[[ALLOCATION_SIZE:.*]] = cir.const #cir.int<20> : !u64i +// CHECK: %4 = cir.call @_Znam(%[[ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr // CHECK: %5 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr -// CHECK: cir.store %[[#NUM_ELEMENTS]], %5 : !u64i, !cir.ptr +// CHECK: cir.store %[[NUM_ELEMENTS]], %5 : !u64i, !cir.ptr // CHECK: %6 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr -// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<8> : !s32i -// CHECK: %8 = cir.ptr_stride(%6 : !cir.ptr, %[[#COOKIE_SIZE]] : !s32i), !cir.ptr +// CHECK: %[[COOKIE_SIZE:.*]] = cir.const #cir.int<8> : !s32i +// CHECK: %8 = cir.ptr_stride(%6 : !cir.ptr, %[[COOKIE_SIZE]] : !s32i), !cir.ptr // CHECK: %9 = cir.cast(bitcast, %8 : !cir.ptr), !cir.ptr // CHECK: cir.store %9, %0 : !cir.ptr, !cir.ptr> // CHECK: cir.return @@ -153,39 +153,142 @@ void t_constant_size_memset_init() { // are no constructor calls needed. // CHECK: cir.func @_Z27t_constant_size_memset_initv() -// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<16> : !u64i -// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<64> : !u64i -// CHECK: %[[#ALLOC_PTR:]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr -// CHECK: %[[#ELEM_PTR:]] = cir.cast(bitcast, %[[#ALLOC_PTR]] : !cir.ptr), !cir.ptr -// CHECK: %[[#VOID_PTR:]] = cir.cast(bitcast, %[[#ELEM_PTR]] : !cir.ptr), !cir.ptr -// CHECK: %[[#ZERO:]] = cir.const #cir.int<0> : !u8i -// CHECK: %[[#ZERO_I32:]] = cir.cast(integral, %[[#ZERO]] : !u8i), !s32i -// CHECK: cir.libc.memset %[[#ALLOCATION_SIZE]] bytes from %[[#VOID_PTR]] set to %[[#ZERO_I32]] : !cir.ptr, !s32i, !u64i +// CHECK: %[[NUM_ELEMENTS:.*]] = cir.const #cir.int<16> : !u64i +// CHECK: %[[ALLOCATION_SIZE:.*]] = cir.const #cir.int<64> : !u64i +// CHECK: %[[ALLOC_PTR:.*]] = cir.call @_Znam(%[[ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %[[ELEM_PTR:.*]] = cir.cast(bitcast, %[[ALLOC_PTR]] : !cir.ptr), !cir.ptr +// CHECK: %[[VOID_PTR:.*]] = cir.cast(bitcast, %[[ELEM_PTR]] : !cir.ptr), !cir.ptr +// CHECK: %[[ZERO:.*]] = cir.const #cir.int<0> : !u8i +// CHECK: %[[ZERO_I32:.*]] = cir.cast(integral, %[[ZERO]] : !u8i), !s32i +// CHECK: cir.libc.memset %[[ALLOCATION_SIZE]] bytes from %[[VOID_PTR]] set to %[[ZERO_I32]] : !cir.ptr, !s32i, !u64i void t_constant_size_partial_init() { auto p = new int[16] { 1, 2, 3 }; } // CHECK: cir.func @_Z28t_constant_size_partial_initv() -// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<16> : !u64i -// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<64> : !u64i -// CHECK: %[[#ALLOC_PTR:]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr -// CHECK: %[[#ELEM_0_PTR:]] = cir.cast(bitcast, %[[#ALLOC_PTR]] : !cir.ptr), !cir.ptr -// CHECK: %[[#CONST_ONE:]] = cir.const #cir.int<1> : !s32i -// CHECK: cir.store %[[#CONST_ONE]], %[[#ELEM_0_PTR]] : !s32i, !cir.ptr -// CHECK: %[[#OFFSET:]] = cir.const #cir.int<1> : !s32i -// CHECK: %[[#ELEM_1_PTR:]] = cir.ptr_stride(%[[#ELEM_0_PTR]] : !cir.ptr, %[[#OFFSET]] : !s32i), !cir.ptr -// CHECK: %[[#CONST_TWO:]] = cir.const #cir.int<2> : !s32i -// CHECK: cir.store %[[#CONST_TWO]], %[[#ELEM_1_PTR]] : !s32i, !cir.ptr -// CHECK: %[[#OFFSET1:]] = cir.const #cir.int<1> : !s32i -// CHECK: %[[#ELEM_2_PTR:]] = cir.ptr_stride(%[[#ELEM_1_PTR]] : !cir.ptr, %[[#OFFSET1]] : !s32i), !cir.ptr -// CHECK: %[[#CONST_THREE:]] = cir.const #cir.int<3> : !s32i -// CHECK: cir.store %[[#CONST_THREE]], %[[#ELEM_2_PTR]] : !s32i, !cir.ptr -// CHECK: %[[#OFFSET2:]] = cir.const #cir.int<1> : !s32i -// CHECK: %[[#ELEM_3_PTR:]] = cir.ptr_stride(%[[#ELEM_2_PTR]] : !cir.ptr, %[[#OFFSET2]] : !s32i), !cir.ptr -// CHECK: %[[#INIT_SIZE:]] = cir.const #cir.int<12> : !u64i -// CHECK: %[[#REMAINING_SIZE:]] = cir.binop(sub, %[[#ALLOCATION_SIZE]], %[[#INIT_SIZE]]) : !u64i -// CHECK: %[[#VOID_PTR:]] = cir.cast(bitcast, %[[#ELEM_3_PTR]] : !cir.ptr), !cir.ptr -// CHECK: %[[#ZERO:]] = cir.const #cir.int<0> : !u8i -// CHECK: %[[#ZERO_I32:]] = cir.cast(integral, %[[#ZERO]] : !u8i), !s32i -// CHECK: cir.libc.memset %[[#REMAINING_SIZE]] bytes from %[[#VOID_PTR]] set to %[[#ZERO_I32]] : !cir.ptr, !s32i, !u64i +// CHECK: %[[NUM_ELEMENTS:.*]] = cir.const #cir.int<16> : !u64i +// CHECK: %[[ALLOCATION_SIZE:.*]] = cir.const #cir.int<64> : !u64i +// CHECK: %[[ALLOC_PTR:.*]] = cir.call @_Znam(%[[ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr +// CHECK: %[[ELEM_0_PTR:.*]] = cir.cast(bitcast, %[[ALLOC_PTR]] : !cir.ptr), !cir.ptr +// CHECK: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[CONST_ONE]], %[[ELEM_0_PTR]] : !s32i, !cir.ptr +// CHECK: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: %[[ELEM_1_PTR:.*]] = cir.ptr_stride(%[[ELEM_0_PTR]] : !cir.ptr, %[[OFFSET]] : !s32i), !cir.ptr +// CHECK: %[[CONST_TWO:.*]] = cir.const #cir.int<2> : !s32i +// CHECK: cir.store %[[CONST_TWO]], %[[ELEM_1_PTR]] : !s32i, !cir.ptr +// CHECK: %[[OFFSET1:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: %[[ELEM_2_PTR:.*]] = cir.ptr_stride(%[[ELEM_1_PTR]] : !cir.ptr, %[[OFFSET1]] : !s32i), !cir.ptr +// CHECK: %[[CONST_THREE:.*]] = cir.const #cir.int<3> : !s32i +// CHECK: cir.store %[[CONST_THREE]], %[[ELEM_2_PTR]] : !s32i, !cir.ptr +// CHECK: %[[OFFSET2:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: %[[ELEM_3_PTR:.*]] = cir.ptr_stride(%[[ELEM_2_PTR]] : !cir.ptr, %[[OFFSET2]] : !s32i), !cir.ptr +// CHECK: %[[INIT_SIZE:.*]] = cir.const #cir.int<12> : !u64i +// CHECK: %[[REMAINING_SIZE:.*]] = cir.binop(sub, %[[ALLOCATION_SIZE]], %[[INIT_SIZE]]) : !u64i +// CHECK: %[[VOID_PTR:.*]] = cir.cast(bitcast, %[[ELEM_3_PTR]] : !cir.ptr), !cir.ptr +// CHECK: %[[ZERO:.*]] = cir.const #cir.int<0> : !u8i +// CHECK: %[[ZERO_I32:.*]] = cir.cast(integral, %[[ZERO]] : !u8i), !s32i +// CHECK: cir.libc.memset %[[REMAINING_SIZE]] bytes from %[[VOID_PTR]] set to %[[ZERO_I32]] : !cir.ptr, !s32i, !u64i + +void t_new_var_size(size_t n) { + auto p = new char[n]; +} + +// CHECK: cir.func @_Z14t_new_var_sizem +// CHECK: %[[N:.*]] = cir.load %[[ARG_ALLOCA:.*]] +// CHECK: %[[PTR:.*]] = cir.call @_Znam(%[[N]]) : (!u64i) + +void t_new_var_size2(int n) { + auto p = new char[n]; +} + +// CHECK: cir.func @_Z15t_new_var_size2i +// CHECK: %[[N:.*]] = cir.load %[[ARG_ALLOCA:.*]] +// CHECK: %[[N_SIZE_T:.*]] = cir.cast(integral, %[[N]] : !s32i), !u64i +// CHECK: %[[PTR:.*]] = cir.call @_Znam(%[[N_SIZE_T]]) : (!u64i) + +void t_new_var_size3(size_t n) { + auto p = new double[n]; +} + +// CHECK: cir.func @_Z15t_new_var_size3m +// CHECK: %[[N:.*]] = cir.load %[[ARG_ALLOCA:.*]] +// CHECK: %[[ELEMENT_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK: %[[RESULT:.*]], %[[OVERFLOW:.*]] = cir.binop.overflow(mul, %[[N]], %[[ELEMENT_SIZE]]) : !u64i, (!u64i, !cir.bool) +// CHECK: %[[ALL_ONES:.*]] = cir.const #cir.int<18446744073709551615> : !u64i +// CHECK: %[[ALLOC_SIZE:.*]] = cir.select if %[[OVERFLOW]] then %[[ALL_ONES]] else %[[RESULT]] : (!cir.bool, !u64i, !u64i) +// CHECK: %[[PTR:.*]] = cir.call @_Znam(%[[ALLOC_SIZE]]) : (!u64i) + +void t_new_var_size4(int n) { + auto p = new double[n]; +} + +// CHECK: cir.func @_Z15t_new_var_size4i +// CHECK: %[[N:.*]] = cir.load %[[ARG_ALLOCA:.*]] +// CHECK: %[[N_SIZE_T:.*]] = cir.cast(integral, %[[N]] : !s32i), !u64i +// CHECK: %[[ELEMENT_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK: %[[RESULT:.*]], %[[OVERFLOW:.*]] = cir.binop.overflow(mul, %[[N_SIZE_T]], %[[ELEMENT_SIZE]]) : !u64i, (!u64i, !cir.bool) +// CHECK: %[[ALL_ONES:.*]] = cir.const #cir.int<18446744073709551615> : !u64i +// CHECK: %[[ALLOC_SIZE:.*]] = cir.select if %[[OVERFLOW]] then %[[ALL_ONES]] else %[[RESULT]] : (!cir.bool, !u64i, !u64i) +// CHECK: %[[PTR:.*]] = cir.call @_Znam(%[[ALLOC_SIZE]]) : (!u64i) + +void t_new_var_size5(int n) { + auto p = new double[n][2][3]; +} + +// NUM_ELEMENTS isn't used in this case because there is no cookie. + +// CHECK: cir.func @_Z15t_new_var_size5i +// CHECK: %[[N:.*]] = cir.load %[[ARG_ALLOCA:.*]] +// CHECK: %[[N_SIZE_T:.*]] = cir.cast(integral, %[[N]] : !s32i), !u64i +// CHECK: %[[ELEMENT_SIZE:.*]] = cir.const #cir.int<48> : !u64i +// CHECK: %[[RESULT:.*]], %[[OVERFLOW:.*]] = cir.binop.overflow(mul, %[[N_SIZE_T]], %[[ELEMENT_SIZE]]) : !u64i, (!u64i, !cir.bool) +// CHECK: %[[NUM_ELEMENTS_MULTIPLIER:.*]] = cir.const #cir.int<6> +// CHECK: %[[NUM_ELEMENTS:.*]] = cir.binop(mul, %[[N_SIZE_T]], %[[NUM_ELEMENTS_MULTIPLIER]]) : !u64i +// CHECK: %[[ALL_ONES:.*]] = cir.const #cir.int<18446744073709551615> : !u64i +// CHECK: %[[ALLOC_SIZE:.*]] = cir.select if %[[OVERFLOW]] then %[[ALL_ONES]] else %[[RESULT]] : (!cir.bool, !u64i, !u64i) +// CHECK: %[[PTR:.*]] = cir.call @_Znam(%[[ALLOC_SIZE]]) : (!u64i) + +void t_new_var_size6(int n) { + auto p = new double[n] { 1, 2, 3 }; +} + +// CHECK: cir.func @_Z15t_new_var_size6i +// CHECK: %[[N:.*]] = cir.load %[[ARG_ALLOCA:.*]] +// CHECK: %[[N_SIZE_T:.*]] = cir.cast(integral, %[[N]] : !s32i), !u64i +// CHECK: %[[MIN_SIZE:.*]] = cir.const #cir.int<3> : !u64i +// CHECK: %[[LT_MIN_SIZE:.*]] = cir.cmp(lt, %[[N_SIZE_T]], %[[MIN_SIZE]]) : !u64i, !cir.bool +// CHECK: %[[ELEMENT_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK: %[[RESULT:.*]], %[[OVERFLOW:.*]] = cir.binop.overflow(mul, %[[N_SIZE_T]], %[[ELEMENT_SIZE]]) : !u64i, (!u64i, !cir.bool) +// CHECK: %[[ANY_OVERFLOW:.*]] = cir.binop(or, %[[LT_MIN_SIZE]], %[[OVERFLOW]]) : !cir.bool +// CHECK: %[[ALL_ONES:.*]] = cir.const #cir.int<18446744073709551615> : !u64i +// CHECK: %[[ALLOC_SIZE:.*]] = cir.select if %[[ANY_OVERFLOW]] then %[[ALL_ONES]] else %[[RESULT]] : (!cir.bool, !u64i, !u64i) +// CHECK: %[[PTR:.*]] = cir.call @_Znam(%[[ALLOC_SIZE]]) : (!u64i) + +void t_new_var_size7(__int128 n) { + auto p = new double[n]; +} + +// CHECK: cir.func @_Z15t_new_var_size7n +// CHECK: %[[N:.*]] = cir.load %[[ARG_ALLOCA:.*]] +// CHECK: %[[N_SIZE_T:.*]] = cir.cast(integral, %[[N]] : !s128i), !u64i +// CHECK: %[[ELEMENT_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK: %[[RESULT:.*]], %[[OVERFLOW:.*]] = cir.binop.overflow(mul, %[[N_SIZE_T]], %[[ELEMENT_SIZE]]) : !u64i, (!u64i, !cir.bool) +// CHECK: %[[ALL_ONES:.*]] = cir.const #cir.int<18446744073709551615> : !u64i +// CHECK: %[[ALLOC_SIZE:.*]] = cir.select if %[[OVERFLOW]] then %[[ALL_ONES]] else %[[RESULT]] : (!cir.bool, !u64i, !u64i) +// CHECK: %[[PTR:.*]] = cir.call @_Znam(%[[ALLOC_SIZE]]) : (!u64i) + +void t_new_var_size_nontrivial(size_t n) { + auto p = new D[n]; +} + +// CHECK: cir.func @_Z25t_new_var_size_nontrivialm +// CHECK: %[[N:.*]] = cir.load %[[ARG_ALLOCA:.*]] +// CHECK: %[[ELEMENT_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK: %[[SIZE_WITHOUT_COOKIE:.*]], %[[OVERFLOW:.*]] = cir.binop.overflow(mul, %[[N]], %[[ELEMENT_SIZE]]) : !u64i, (!u64i, !cir.bool) +// CHECK: %[[COOKIE_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK: %[[SIZE:.*]], %[[OVERFLOW2:.*]] = cir.binop.overflow(add, %[[SIZE_WITHOUT_COOKIE]], %[[COOKIE_SIZE]]) : !u64i, (!u64i, !cir.bool) +// CHECK: %[[ANY_OVERFLOW:.*]] = cir.binop(or, %[[OVERFLOW]], %[[OVERFLOW2]]) : !cir.bool +// CHECK: %[[ALL_ONES:.*]] = cir.const #cir.int<18446744073709551615> : !u64i +// CHECK: %[[ALLOC_SIZE:.*]] = cir.select if %[[ANY_OVERFLOW]] then %[[ALL_ONES]] else %[[SIZE]] : (!cir.bool, !u64i, !u64i) +// CHECK: %[[PTR:.*]] = cir.call @_Znam(%[[ALLOC_SIZE]]) : (!u64i) diff --git a/clang/test/CIR/Lowering/new.cpp b/clang/test/CIR/Lowering/new.cpp index bff825323e9e..2cac378f3848 100644 --- a/clang/test/CIR/Lowering/new.cpp +++ b/clang/test/CIR/Lowering/new.cpp @@ -1,6 +1,8 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -fclangir -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM +#include "std-cxx.h" + void t_new_constant_size() { auto p = new double[16]; } @@ -83,3 +85,105 @@ void t_constant_size_partial_init() { // LLVM: %[[ELEM_3_PTR:.*]] = getelementptr i32, ptr %[[ELEM_2_PTR]], i64 1 // LLVM: call void @llvm.memset.p0.i64(ptr %[[ELEM_3_PTR]], i8 0, i64 52, i1 false) // LLVM: store ptr %[[ADDR]], ptr %[[ALLOCA]], align 8 + +void t_new_var_size(size_t n) { + auto p = new char[n]; +} + +// LLVM: @_Z14t_new_var_sizem +// LLVM: %[[N:.*]] = load i64, ptr %[[ARG_ALLOCA:.*]] +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 %[[N]]) + +void t_new_var_size2(int n) { + auto p = new char[n]; +} + +// LLVM: @_Z15t_new_var_size2i +// LLVM: %[[N:.*]] = load i32, ptr %[[ARG_ALLOCA:.*]] +// LLVM: %[[N_SIZE_T:.*]] = sext i32 %[[N]] to i64 +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 %[[N_SIZE_T]]) + +void t_new_var_size3(size_t n) { + auto p = new double[n]; +} + +// LLVM: @_Z15t_new_var_size3m +// LLVM: %[[N:.*]] = load i64, ptr %[[ARG_ALLOCA:.*]] +// LLVM: %[[RESULT_PAIR:.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %[[N]], i64 8) +// LLVM: %[[RESULT:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 0 +// LLVM: %[[OVERFLOW:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 1 +// LLVM: %[[ALLOC_SIZE:.*]] = select i1 %[[OVERFLOW]], i64 -1, i64 %[[RESULT]] +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 %[[ALLOC_SIZE]]) + +void t_new_var_size4(int n) { + auto p = new double[n]; +} + +// LLVM: @_Z15t_new_var_size4i +// LLVM: %[[N:.*]] = load i32, ptr %[[ARG_ALLOCA:.*]] +// LLVM: %[[N_SIZE_T:.*]] = sext i32 %[[N]] to i64 +// LLVM: %[[RESULT_PAIR:.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %[[N_SIZE_T]], i64 8) +// LLVM: %[[RESULT:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 0 +// LLVM: %[[OVERFLOW:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 1 +// LLVM: %[[ALLOC_SIZE:.*]] = select i1 %[[OVERFLOW]], i64 -1, i64 %[[RESULT]] +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 %[[ALLOC_SIZE]]) + +void t_new_var_size5(int n) { + auto p = new double[n][2][3]; +} + +// NUM_ELEMENTS is not used in this case because cookies aren't required + +// LLVM: @_Z15t_new_var_size5i +// LLVM: %[[N:.*]] = load i32, ptr %[[ARG_ALLOCA:.*]] +// LLVM: %[[N_SIZE_T:.*]] = sext i32 %[[N]] to i64 +// LLVM: %[[RESULT_PAIR:.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %[[N_SIZE_T]], i64 48) +// LLVM: %[[RESULT:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 0 +// LLVM: %[[OVERFLOW:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 1 +// LLVM: %[[NUM_ELEMENTS:.*]] = mul i64 %[[N_SIZE_T]], 6 +// LLVM: %[[ALLOC_SIZE:.*]] = select i1 %[[OVERFLOW]], i64 -1, i64 %[[RESULT]] +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 %[[ALLOC_SIZE]]) + +void t_new_var_size6(int n) { + auto p = new double[n] { 1, 2, 3 }; +} + +// LLVM: @_Z15t_new_var_size6i +// LLVM: %[[N:.*]] = load i32, ptr %[[ARG_ALLOCA:.*]] +// LLVM: %[[N_SIZE_T:.*]] = sext i32 %[[N]] to i64 +// LLVM: %[[LT_MIN_SIZE:.*]] = icmp ult i64 %[[N_SIZE_T]], 3 +// LLVM: %[[RESULT_PAIR:.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %[[N_SIZE_T]], i64 8) +// LLVM: %[[RESULT:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 0 +// LLVM: %[[OVERFLOW:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 1 +// LLVM: %[[ANY_OVERFLOW:.*]] = or i1 %[[LT_MIN_SIZE]], %[[OVERFLOW]] +// LLVM: %[[ALLOC_SIZE:.*]] = select i1 %[[ANY_OVERFLOW]], i64 -1, i64 %[[RESULT]] +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 %[[ALLOC_SIZE]]) + +void t_new_var_size7(__int128 n) { + auto p = new double[n]; +} + +// LLVM: @_Z15t_new_var_size7n +// LLVM: %[[N:.*]] = load i128, ptr %[[ARG_ALLOCA:.*]] +// LLVM: %[[N_SIZE_T:.*]] = trunc i128 %[[N]] to i64 +// LLVM: %[[RESULT_PAIR:.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %[[N_SIZE_T]], i64 8) +// LLVM: %[[RESULT:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 0 +// LLVM: %[[OVERFLOW:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 1 +// LLVM: %[[ALLOC_SIZE:.*]] = select i1 %[[OVERFLOW]], i64 -1, i64 %[[RESULT]] +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 %[[ALLOC_SIZE]]) + +void t_new_var_size_nontrivial(size_t n) { + auto p = new D[n]; +} + +// LLVM: @_Z25t_new_var_size_nontrivialm +// LLVM: %[[N:.*]] = load i64, ptr %[[ARG_ALLOCA:.*]] +// LLVM: %[[RESULT_PAIR:.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %[[N]], i64 4) +// LLVM: %[[SIZE_WITHOUT_COOKIE:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 0 +// LLVM: %[[OVERFLOW:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR]], 1 +// LLVM: %[[RESULT_PAIR2:.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %[[SIZE_WITHOUT_COOKIE]], i64 8) +// LLVM: %[[SIZE:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR2]], 0 +// LLVM: %[[OVERFLOW2:.*]] = extractvalue { i64, i1 } %[[RESULT_PAIR2]], 1 +// LLVM: %[[ANY_OVERFLOW:.*]] = or i1 %[[OVERFLOW]], %[[OVERFLOW2]] +// LLVM: %[[ALLOC_SIZE:.*]] = select i1 %[[ANY_OVERFLOW]], i64 -1, i64 %[[SIZE]] +// LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 %[[ALLOC_SIZE]]) From f7b915147d50fc70a4d1e689a7c648a1a6354003 Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Fri, 7 Feb 2025 13:15:21 -0800 Subject: [PATCH 2234/2301] [CIR] Add attribute visitor for lowering globals (#1318) This adds a new mlir-tablegen option to generate a .inc file with the complete set of attrdefs defined in a .td file and uses the file generated for CIR attrdefs to create an attr visitor. This visitor is used in the lowering of global variables directly to LLVM IR. The purpose of this change is to align the incubator lowering implementation with the recent upstream changes to make future upstreaming easier, while also fulfilling the upstream request to have the visitor be based on a tablegen created file. The new mlir-tablegen feature will be upstreamed after it is established here. No observable change is intended in the CIR code. --- .../clang/CIR/Dialect/IR/CIRAttrVisitor.h | 47 ++++++ .../clang/CIR/Dialect/IR/CMakeLists.txt | 1 + .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 156 +++++++----------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 3 +- mlir/test/mlir-tblgen/attrdefs.td | 8 + mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp | 24 +++ 6 files changed, 141 insertions(+), 98 deletions(-) create mode 100644 clang/include/clang/CIR/Dialect/IR/CIRAttrVisitor.h diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrVisitor.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrVisitor.h new file mode 100644 index 000000000000..106fb3d0ed17 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrVisitor.h @@ -0,0 +1,47 @@ +//===- CIRAttrVisitor.h - Visitor for CIR attributes ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the CirAttrVisitor interface. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRATTRVISITOR_H +#define LLVM_CLANG_CIR_DIALECT_IR_CIRATTRVISITOR_H + +#include "clang/CIR/Dialect/IR/CIRAttrs.h" + +namespace cir { + +#define DISPATCH(NAME) return getImpl()->visitCir##NAME(cirAttr); + +template class CirAttrVisitor { +public: + RetTy visit(mlir::Attribute attr) { +#define ATTRDEF(NAME) \ + if (const auto cirAttr = mlir::dyn_cast(attr)) \ + DISPATCH(NAME); +#include "clang/CIR/Dialect/IR/CIRAttrDefsList.inc" + llvm_unreachable("unhandled attribute type"); + } + + // If the implementation chooses not to implement a certain visit + // method, fall back to the parent. +#define ATTRDEF(NAME) \ + RetTy visitCir##NAME(NAME cirAttr) { DISPATCH(Attr); } +#include "clang/CIR/Dialect/IR/CIRAttrDefsList.inc" + + RetTy visitCirAttr(mlir::Attribute attr) { return RetTy(); } + + ImplClass *getImpl() { return static_cast(this); } +}; + +#undef DISPATCH + +} // namespace cir + +#endif // LLVM_CLANG_CIR_DIALECT_IR_CIRATTRVISITOR_H diff --git a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt index 3d43b06c6217..014bb3d9b03c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt @@ -26,6 +26,7 @@ mlir_tablegen(CIROpsStructs.h.inc -gen-attrdef-decls) mlir_tablegen(CIROpsStructs.cpp.inc -gen-attrdef-defs) mlir_tablegen(CIROpsAttributes.h.inc -gen-attrdef-decls) mlir_tablegen(CIROpsAttributes.cpp.inc -gen-attrdef-defs) +mlir_tablegen(CIRAttrDefsList.inc -gen-attrdef-list) add_public_tablegen_target(MLIRCIREnumsGen) clang_tablegen(CIRBuiltinsLowering.inc -gen-cir-builtins-lowering diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3817eb3960a5..833d256d0404 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -41,6 +41,7 @@ #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" +#include "clang/CIR/Dialect/IR/CIRAttrVisitor.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/MissingFeatures.h" @@ -425,32 +426,52 @@ emitCirAttrToMemory(mlir::Operation *parentOp, mlir::Attribute attr, } /// Switches on the type of attribute and calls the appropriate conversion. +class CirAttrToValue : public CirAttrVisitor { +public: + CirAttrToValue(mlir::Operation *parentOp, + mlir::ConversionPatternRewriter &rewriter, + const mlir::TypeConverter *converter, + mlir::DataLayout const &dataLayout) + : parentOp(parentOp), rewriter(rewriter), converter(converter), + dataLayout(dataLayout) {} + + mlir::Value visitCirIntAttr(cir::IntAttr attr); + mlir::Value visitCirFPAttr(cir::FPAttr attr); + mlir::Value visitCirConstPtrAttr(cir::ConstPtrAttr attr); + mlir::Value visitCirConstStructAttr(cir::ConstStructAttr attr); + mlir::Value visitCirConstArrayAttr(cir::ConstArrayAttr attr); + mlir::Value visitCirConstVectorAttr(cir::ConstVectorAttr attr); + mlir::Value visitCirBoolAttr(cir::BoolAttr attr); + mlir::Value visitCirZeroAttr(cir::ZeroAttr attr); + mlir::Value visitCirUndefAttr(cir::UndefAttr attr); + mlir::Value visitCirPoisonAttr(cir::PoisonAttr attr); + mlir::Value visitCirGlobalViewAttr(cir::GlobalViewAttr attr); + mlir::Value visitCirVTableAttr(cir::VTableAttr attr); + mlir::Value visitCirTypeInfoAttr(cir::TypeInfoAttr attr); + +private: + mlir::Operation *parentOp; + mlir::ConversionPatternRewriter &rewriter; + const mlir::TypeConverter *converter; + mlir::DataLayout const &dataLayout; +}; /// IntAttr visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::IntAttr intAttr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +mlir::Value CirAttrToValue::visitCirIntAttr(cir::IntAttr intAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(intAttr.getType()), intAttr.getValue()); } /// BoolAttr visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::BoolAttr boolAttr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +mlir::Value CirAttrToValue::visitCirBoolAttr(cir::BoolAttr boolAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(boolAttr.getType()), boolAttr.getValue()); } /// ConstPtrAttr visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstPtrAttr ptrAttr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +mlir::Value CirAttrToValue::visitCirConstPtrAttr(cir::ConstPtrAttr ptrAttr) { auto loc = parentOp->getLoc(); if (ptrAttr.isNullValue()) { return rewriter.create( @@ -465,51 +486,36 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstPtrAttr ptrAttr, } /// FPAttr visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::FPAttr fltAttr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +mlir::Value CirAttrToValue::visitCirFPAttr(cir::FPAttr fltAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(fltAttr.getType()), fltAttr.getValue()); } /// ZeroAttr visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ZeroAttr zeroAttr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +mlir::Value CirAttrToValue::visitCirZeroAttr(cir::ZeroAttr zeroAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(zeroAttr.getType())); } /// UndefAttr visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::UndefAttr undefAttr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +mlir::Value CirAttrToValue::visitCirUndefAttr(cir::UndefAttr undefAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(undefAttr.getType())); } /// PoisonAttr visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::PoisonAttr poisonAttr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +mlir::Value CirAttrToValue::visitCirPoisonAttr(cir::PoisonAttr poisonAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(poisonAttr.getType())); } /// ConstStruct visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstStructAttr constStruct, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter, - mlir::DataLayout const &dataLayout) { +mlir::Value +CirAttrToValue::visitCirConstStructAttr(cir::ConstStructAttr constStruct) { auto llvmTy = converter->convertType(constStruct.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); @@ -525,18 +531,13 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstStructAttr constStruct, } // VTableAttr visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::VTableAttr vtableArr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter, - mlir::DataLayout const &dataLayout) { +mlir::Value CirAttrToValue::visitCirVTableAttr(cir::VTableAttr vtableArr) { auto llvmTy = converter->convertType(vtableArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); for (auto [idx, elt] : llvm::enumerate(vtableArr.getVtableData())) { - mlir::Value init = - lowerCirAttrAsValue(parentOp, elt, rewriter, converter, dataLayout); + mlir::Value init = visit(elt); result = rewriter.create(loc, result, init, idx); } @@ -544,18 +545,14 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::VTableAttr vtableArr, } // TypeInfoAttr visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::TypeInfoAttr typeinfoArr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter, - mlir::DataLayout const &dataLayout) { +mlir::Value +CirAttrToValue::visitCirTypeInfoAttr(cir::TypeInfoAttr typeinfoArr) { auto llvmTy = converter->convertType(typeinfoArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); for (auto [idx, elt] : llvm::enumerate(typeinfoArr.getData())) { - mlir::Value init = - lowerCirAttrAsValue(parentOp, elt, rewriter, converter, dataLayout); + mlir::Value init = visit(elt); result = rewriter.create(loc, result, init, idx); } @@ -563,11 +560,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::TypeInfoAttr typeinfoArr, } // ConstArrayAttr visitor -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstArrayAttr constArr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter, - mlir::DataLayout const &dataLayout) { +mlir::Value +CirAttrToValue::visitCirConstArrayAttr(cir::ConstArrayAttr constArr) { auto llvmTy = converter->convertType(constArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result; @@ -610,10 +604,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstArrayAttr constArr, } // ConstVectorAttr visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstVectorAttr constVec, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter) { +mlir::Value +CirAttrToValue::visitCirConstVectorAttr(cir::ConstVectorAttr constVec) { auto llvmTy = converter->convertType(constVec.getType()); auto loc = parentOp->getLoc(); SmallVector mlirValues; @@ -638,11 +630,8 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::ConstVectorAttr constVec, } // GlobalViewAttr visitor. -static mlir::Value -lowerCirAttrAsValue(mlir::Operation *parentOp, cir::GlobalViewAttr globalAttr, - mlir::ConversionPatternRewriter &rewriter, - const mlir::TypeConverter *converter, - mlir::DataLayout const &dataLayout) { +mlir::Value +CirAttrToValue::visitCirGlobalViewAttr(cir::GlobalViewAttr globalAttr) { auto module = parentOp->getParentOfType(); mlir::Type sourceType; unsigned sourceAddrSpace = 0; @@ -716,43 +705,16 @@ lowerCirAttrAsValue(mlir::Operation *parentOp, cir::GlobalViewAttr globalAttr, } /// Switches on the type of attribute and calls the appropriate conversion. -mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + const mlir::Attribute attr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter, mlir::DataLayout const &dataLayout) { - if (const auto intAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, intAttr, rewriter, converter); - if (const auto fltAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, fltAttr, rewriter, converter); - if (const auto ptrAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, ptrAttr, rewriter, converter); - if (const auto constStruct = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, constStruct, rewriter, converter, - dataLayout); - if (const auto constArr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, constArr, rewriter, converter, - dataLayout); - if (const auto constVec = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, constVec, rewriter, converter); - if (const auto boolAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, boolAttr, rewriter, converter); - if (const auto zeroAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, zeroAttr, rewriter, converter); - if (const auto undefAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, undefAttr, rewriter, converter); - if (const auto poisonAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, poisonAttr, rewriter, converter); - if (const auto globalAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, globalAttr, rewriter, converter, - dataLayout); - if (const auto vtableAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, vtableAttr, rewriter, converter, - dataLayout); - if (const auto typeinfoAttr = mlir::dyn_cast(attr)) - return lowerCirAttrAsValue(parentOp, typeinfoAttr, rewriter, converter, - dataLayout); - - llvm_unreachable("unhandled attribute type"); + CirAttrToValue valueConverter(parentOp, rewriter, converter, dataLayout); + auto value = valueConverter.visit(attr); + if (!value) + llvm_unreachable("unhandled attribute type"); + return value; } //===----------------------------------------------------------------------===// @@ -1734,8 +1696,8 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( // Regardless of the type, we should lower the constant of poison value // into PoisonOp. if (auto poisonAttr = mlir::dyn_cast(attr)) { - rewriter.replaceOp( - op, lowerCirAttrAsValue(op, poisonAttr, rewriter, getTypeConverter())); + rewriter.replaceOp(op, lowerCirAttrAsValue(op, poisonAttr, rewriter, + getTypeConverter(), dataLayout)); return mlir::success(); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 104ce3a0b105..bb0dcaf87efe 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -22,7 +22,8 @@ namespace direct { /// Convert a CIR attribute to an LLVM attribute. May use the datalayout for /// lowering attributes to-be-stored in memory. -mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, mlir::Attribute attr, +mlir::Value lowerCirAttrAsValue(mlir::Operation *parentOp, + const mlir::Attribute attr, mlir::ConversionPatternRewriter &rewriter, const mlir::TypeConverter *converter, mlir::DataLayout const &dataLayout); diff --git a/mlir/test/mlir-tblgen/attrdefs.td b/mlir/test/mlir-tblgen/attrdefs.td index 35d2c49619ee..e911f70e4358 100644 --- a/mlir/test/mlir-tblgen/attrdefs.td +++ b/mlir/test/mlir-tblgen/attrdefs.td @@ -1,5 +1,6 @@ // RUN: mlir-tblgen -gen-attrdef-decls -I %S/../../include %s | FileCheck %s --check-prefix=DECL // RUN: mlir-tblgen -gen-attrdef-defs -I %S/../../include %s | FileCheck %s --check-prefix=DEF +// RUN: mlir-tblgen -gen-attrdef-list -I %S/../../include %s | FileCheck %s --check-prefix=LIST include "mlir/IR/AttrTypeBase.td" include "mlir/IR/OpBase.td" @@ -19,6 +20,13 @@ include "mlir/IR/OpBase.td" // DEF: ::test::CompoundAAttr, // DEF: ::test::SingleParameterAttr +// LIST: ATTRDEF(IndexAttr) +// LIST: ATTRDEF(SimpleAAttr) +// LIST: ATTRDEF(CompoundAAttr) +// LIST: ATTRDEF(SingleParameterAttr) + +// LIST: #undef ATTRDEF + // DEF-LABEL: ::mlir::OptionalParseResult generatedAttributeParser( // DEF-SAME: ::mlir::AsmParser &parser, // DEF-SAME: ::llvm::StringRef *mnemonic, ::mlir::Type type, diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp index 6a39424bd463..4f0100fa67cd 100644 --- a/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp +++ b/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp @@ -690,6 +690,7 @@ class DefGenerator { public: bool emitDecls(StringRef selectedDialect); bool emitDefs(StringRef selectedDialect); + bool emitList(StringRef selectedDialect); protected: DefGenerator(ArrayRef defs, raw_ostream &os, @@ -1025,6 +1026,23 @@ bool DefGenerator::emitDefs(StringRef selectedDialect) { return false; } +bool DefGenerator::emitList(StringRef selectedDialect) { + emitSourceFileHeader(("List of " + defType + "Def Definitions").str(), os); + + SmallVector defs; + collectAllDefs(selectedDialect, defRecords, defs); + if (defs.empty()) + return false; + + auto interleaveFn = [&](const AttrOrTypeDef &def) { + os << defType.upper() << "DEF(" << def.getCppClassName() << ")"; + }; + llvm::interleave(defs, os, interleaveFn, "\n"); + os << "\n\n"; + os << "#undef " << defType.upper() << "DEF" << "\n"; + return false; +} + //===----------------------------------------------------------------------===// // Type Constraints //===----------------------------------------------------------------------===// @@ -1099,6 +1117,12 @@ static mlir::GenRegistration AttrDefGenerator generator(records, os); return generator.emitDecls(attrDialect); }); +static mlir::GenRegistration + genAttrList("gen-attrdef-list", "Generate an AttrDef list", + [](const RecordKeeper &records, raw_ostream &os) { + AttrDefGenerator generator(records, os); + return generator.emitList(attrDialect); + }); //===----------------------------------------------------------------------===// // TypeDef From a7383c9d05165d16edba857ddc86e5d29d94d2cc Mon Sep 17 00:00:00 2001 From: Konstantinos Parasyris Date: Fri, 7 Feb 2025 13:33:49 -0800 Subject: [PATCH 2235/2301] [CIR][HIP] Compile host code (#1319) Adds support for `__host__` and `__device__` functions when compiling for CUDA host. The PR follows the structure of #1309 --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 10 ++--- clang/lib/CIR/CodeGen/TargetInfo.cpp | 56 ++++++++++++++++++++++++++ clang/test/CIR/CodeGen/HIP/simple.cpp | 16 ++++++++ 3 files changed, 76 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/CodeGen/HIP/simple.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c58d260e166a..412369ed07ef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -515,7 +515,8 @@ void CIRGenModule::emitGlobal(GlobalDecl GD) { assert(!Global->hasAttr() && "NYI"); assert(!Global->hasAttr() && "NYI"); - if (langOpts.CUDA) { + if (langOpts.CUDA || langOpts.HIP) { + // clang uses the same flag when building HIP code if (langOpts.CUDAIsDevice) { // This will implicitly mark templates and their // specializations as __host__ __device__. @@ -3217,8 +3218,7 @@ void CIRGenModule::Release() { if (astContext.getTargetInfo().getTriple().isWasm()) llvm_unreachable("NYI"); - if (getTriple().isAMDGPU() || - (getTriple().isSPIRV() && getTriple().getVendor() == llvm::Triple::AMD)) { + if (getTriple().isSPIRV() && getTriple().getVendor() == llvm::Triple::AMD) { llvm_unreachable("NYI"); } @@ -3229,9 +3229,7 @@ void CIRGenModule::Release() { if (!astContext.CUDAExternalDeviceDeclODRUsedByHost.empty()) { llvm_unreachable("NYI"); } - if (langOpts.HIP && !getLangOpts().OffloadingNewDriver) { - llvm_unreachable("NYI"); - } + assert(!MissingFeatures::emitLLVMUsed()); assert(!MissingFeatures::sanStats()); diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 7669dad59eb8..07dca811985e 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -329,6 +329,30 @@ class NVPTXTargetCIRGenInfo : public TargetCIRGenInfo { } // namespace +//===----------------------------------------------------------------------===// +// AMDGPU ABI Implementation +//===----------------------------------------------------------------------===// + +namespace { + +class AMDGPUABIInfo : public ABIInfo { +public: + AMDGPUABIInfo(CIRGenTypes &cgt) : ABIInfo(cgt) {} + + cir::ABIArgInfo classifyReturnType(QualType retTy) const; + cir::ABIArgInfo classifyArgumentType(QualType ty) const; + + void computeInfo(CIRGenFunctionInfo &fnInfo) const override; +}; + +class AMDGPUTargetCIRGenInfo : public TargetCIRGenInfo { +public: + AMDGPUTargetCIRGenInfo(CIRGenTypes &cgt) + : TargetCIRGenInfo(std::make_unique(cgt)) {} +}; + +} // namespace + // TODO(cir): remove the attribute once this gets used. LLVM_ATTRIBUTE_UNUSED static bool classifyReturnType(const CIRGenCXXABI &CXXABI, @@ -495,6 +519,34 @@ void NVPTXABIInfo::computeInfo(CIRGenFunctionInfo &fnInfo) const { fnInfo.getReturnInfo() = cir::ABIArgInfo::getDirect(CGT.convertType(retTy)); } +// Skeleton only. Implement when used in TargetLower stage. +cir::ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType retTy) const { + llvm_unreachable("not yet implemented"); +} + +cir::ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType ty) const { + llvm_unreachable("not yet implemented"); +} + +void AMDGPUABIInfo::computeInfo(CIRGenFunctionInfo &fnInfo) const { + // Top level CIR has unlimited arguments and return types. Lowering for ABI + // specific concerns should happen during a lowering phase. Assume everything + // is direct for now. + for (CIRGenFunctionInfo::arg_iterator it = fnInfo.arg_begin(), + ie = fnInfo.arg_end(); + it != ie; ++it) { + if (testIfIsVoidTy(it->type)) + it->info = cir::ABIArgInfo::getIgnore(); + else + it->info = cir::ABIArgInfo::getDirect(CGT.convertType(it->type)); + } + auto retTy = fnInfo.getReturnType(); + if (testIfIsVoidTy(retTy)) + fnInfo.getReturnInfo() = cir::ABIArgInfo::getIgnore(); + else + fnInfo.getReturnInfo() = cir::ABIArgInfo::getDirect(CGT.convertType(retTy)); +} + ABIInfo::~ABIInfo() {} bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { @@ -690,5 +742,9 @@ const TargetCIRGenInfo &CIRGenModule::getTargetCIRGenInfo() { case llvm::Triple::nvptx64: { return SetCIRGenInfo(new NVPTXTargetCIRGenInfo(genTypes)); } + + case llvm::Triple::amdgcn: { + return SetCIRGenInfo(new AMDGPUTargetCIRGenInfo(genTypes)); + } } } diff --git a/clang/test/CIR/CodeGen/HIP/simple.cpp b/clang/test/CIR/CodeGen/HIP/simple.cpp new file mode 100644 index 000000000000..ec4110da10d7 --- /dev/null +++ b/clang/test/CIR/CodeGen/HIP/simple.cpp @@ -0,0 +1,16 @@ +#include "../Inputs/cuda.h" + +// RUN: %clang_cc1 -triple=amdgcn-amd-amdhsa -x hip -fclangir \ +// RUN: -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + + +// This should emit as a normal C++ function. +__host__ void host_fn(int *a, int *b, int *c) {} + +// CIR: cir.func @_Z7host_fnPiS_S_ + +// This shouldn't emit. +__device__ void device_fn(int* a, double b, float c) {} + +// CHECK-NOT: cir.func @_Z9device_fnPidf From 53335ae73af15b60f2f5f9ff654a185a87a973c4 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Mon, 10 Feb 2025 11:46:24 +0100 Subject: [PATCH 2236/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vuzp_v and neon_vuzpq_v (#1314) Lowering `builtin_neon_vuzp_v` and `builtin_neon_vuzpq_v` [Clang CGBuiltin Implementation](https://github.com/llvm/clangir/blob/2b1a638ea07ca10c5727ea835bfbe17b881175cc/clang/lib/CodeGen/CGBuiltin.cpp#L8606-L8622) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 21 +- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 227 ++++++++++++++++++ 2 files changed, 247 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 6dfda0068530..c76ca37a5425 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -4571,7 +4571,26 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vuzp_v: case NEON::BI__builtin_neon_vuzpq_v: { - llvm_unreachable("NEON::BI__builtin_neon_vuzpq_v NYI"); + Ops[1] = builder.createBitcast(Ops[1], ty); + Ops[2] = builder.createBitcast(Ops[2], ty); + // Adding a bitcast here as Ops[0] might be a void pointer. + mlir::Value baseAddr = + builder.createBitcast(Ops[0], builder.getPointerTo(ty)); + mlir::Value sv; + mlir::Location loc = getLoc(E->getExprLoc()); + + for (unsigned vi = 0; vi != 2; ++vi) { + llvm::SmallVector indices; + for (unsigned i = 0, e = vTy.getSize(); i != e; ++i) { + indices.push_back(2 * i + vi); + } + cir::ConstantOp idx = builder.getConstInt(loc, SInt32Ty, vi); + mlir::Value addr = builder.create( + loc, baseAddr.getType(), baseAddr, idx); + sv = builder.createVecShuffle(loc, Ops[1], Ops[2], indices); + (void)builder.CIRBaseBuilderTy::createStore(loc, sv, addr); + } + return sv; } case NEON::BI__builtin_neon_vzip_v: case NEON::BI__builtin_neon_vzipq_v: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index fc01ad8b8d44..18cd9298be12 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -476,6 +476,233 @@ uint32x4x2_t test_vtrnq_u32(uint32x4_t a, uint32x4_t b) { // LLVM: ret %struct.uint32x4x2_t {{.*}} } +uint8x8x2_t test_vuzp_u8(uint8x8_t a, uint8x8_t b) { + return vuzp_u8(a, b); + + // CIR-LABEL:vuzp_u8 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<2> : !s32i, #cir.int<4> : !s32i, #cir.int<6> : !s32i, + // CIR-SAME: #cir.int<8> : !s32i, #cir.int<10> : !s32i, #cir.int<12> : !s32i, + // CIR-SAME: #cir.int<14> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<3> : !s32i, #cir.int<5> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<9> : !s32i, #cir.int<11> : !s32i, #cir.int<13> : !s32i, #cir.int<15> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vuzp_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VTRN:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[B]], + // LLVM-SAME: <8 x i32> + // LLVM: store <8 x i8> [[VTRN]], ptr [[RES:%.*]], align 8 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<8 x i8>, ptr [[RES]], i64 1 + // LLVM: [[VTRN1:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[B]], <8 x i32> + // LLVM: store <8 x i8> [[VTRN1]], ptr [[RES1]], align 8 + // LLVM-NEXT: [[RET:%.*]] = load %struct.uint8x8x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.uint8x8x2_t [[RET]] +} + +uint16x4x2_t test_vuzp_u16(uint16x4_t a, uint16x4_t b) { + return vuzp_u16(a, b); + + // CIR-LABEL: vuzp_u16 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<2> : !s32i, #cir.int<4> : !s32i, #cir.int<6> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<3> : !s32i, #cir.int<5> : !s32i, #cir.int<7> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vuzp_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[vuzp:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[B]], + // LLVM-SAME: <4 x i32> + // LLVM: store <4 x i16> [[vuzp]], ptr [[RES:%.*]], align 8 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<4 x i16>, ptr [[RES]], i64 1 + // LLVM: [[vuzp1:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[B]], + // LLVM-SAME: <4 x i32> + // LLVM: store <4 x i16> [[vuzp1]], ptr [[RES1]], align 8 + // LLVM-NEXT: [[RET:%.*]] = load %struct.uint16x4x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.uint16x4x2_t [[RET]] +} + +int32x2x2_t test_vuzp_s32(int32x2_t a, int32x2_t b) { + return vuzp_s32(a, b); + + // CIR-LABEL: vuzp_s32 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<2> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<3> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vuzp_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[vuzp:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: store <2 x i32> [[vuzp]], ptr [[RES:%.*]], align 8 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<2 x i32>, ptr [[RES]], i64 1 + // LLVM: [[vuzp1:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: store <2 x i32> [[vuzp1]], ptr [[RES1]], align 8 + // LLVM-NEXT: [[RET:%.*]] = load %struct.int32x2x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.int32x2x2_t [[RET]] +} + +float32x2x2_t test_vuzp_f32(float32x2_t a, float32x2_t b) { + return vuzp_f32(a, b); + + // CIR-LABEL: vuzp_f32 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<2> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<3> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vuzp_f32(<2 x float>{{.*}}[[A:%.*]], <2 x float>{{.*}}[[B:%.*]]) + // LLVM: [[vuzp:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: store <2 x float> [[vuzp]], ptr [[RES:%.*]], align 8 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<2 x float>, ptr [[RES]], i64 1 + // LLVM: [[vuzp1:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: store <2 x float> [[vuzp1]], ptr [[RES1]], align 8 + // LLVM-NEXT: [[RET:%.*]] = load %struct.float32x2x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.float32x2x2_t [[RET]] +} + +uint8x16x2_t test_vuzpq_u8(uint8x16_t a, uint8x16_t b) { + return vuzpq_u8(a, b); + + // CIR-LABEL: vuzpq_u8 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<2> : !s32i, #cir.int<4> : !s32i, #cir.int<6> : !s32i, + // CIR-SAME: #cir.int<8> : !s32i, #cir.int<10> : !s32i, #cir.int<12> : !s32i, #cir.int<14> : !s32i, + // CIR-SAME: #cir.int<16> : !s32i, #cir.int<18> : !s32i, #cir.int<20> : !s32i, #cir.int<22> : !s32i, + // CIR-SAME: #cir.int<24> : !s32i, #cir.int<26> : !s32i, #cir.int<28> : !s32i, #cir.int<30> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<3> : !s32i, #cir.int<5> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<9> : !s32i, #cir.int<11> : !s32i, #cir.int<13> : !s32i, #cir.int<15> : !s32i, + // CIR-SAME: #cir.int<17> : !s32i, #cir.int<19> : !s32i, #cir.int<21> : !s32i, #cir.int<23> : !s32i, + // CIR-SAME: #cir.int<25> : !s32i, #cir.int<27> : !s32i, #cir.int<29> : !s32i, #cir.int<31> : !s32i] : !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vuzpq_u8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[vuzp:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[B]], + // LLVM-SAME: <16 x i32> + // LLVM: store <16 x i8> [[vuzp]], ptr [[RES:%.*]], align 16 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<16 x i8>, ptr [[RES]], i64 1 + // LLVM: [[vuzp1:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[B]], + // LLVM-SAME: <16 x i32> + // LLVM: store <16 x i8> [[vuzp1]], ptr [[RES1]], align 16 + // LLVM-NEXT: [[RET:%.*]] = load %struct.uint8x16x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.uint8x16x2_t [[RET]] +} + +int16x8x2_t test_vuzpq_s16(int16x8_t a, int16x8_t b) { + return vuzpq_s16(a, b); + + // CIR-LABEL: vuzpq_s16 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<2> : !s32i, #cir.int<4> : !s32i, #cir.int<6> : !s32i, + // CIR-SAME: #cir.int<8> : !s32i, #cir.int<10> : !s32i, #cir.int<12> : !s32i, + // CIR-SAME: #cir.int<14> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<3> : !s32i, #cir.int<5> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<9> : !s32i, #cir.int<11> : !s32i, #cir.int<13> : !s32i, + // CIR-SAME: #cir.int<15> : !s32i] : !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vuzpq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[vuzp:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[B]], + // LLVM-SAME: <8 x i32> + // LLVM: store <8 x i16> [[vuzp]], ptr [[RES:%.*]], align 16 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<8 x i16>, ptr [[RES]], i64 1 + // LLVM: [[vuzp1:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[B]], <8 x i32> + // LLVM: store <8 x i16> [[vuzp1]], ptr [[RES1]], align 16 + // LLVM: [[RET:%.*]] = load %struct.int16x8x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.int16x8x2_t [[RET]] +} + +uint32x4x2_t test_vuzpq_u32(uint32x4_t a, uint32x4_t b) { + return vuzpq_u32(a, b); + + // CIR-LABEL: vuzpq_u32 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<2> : !s32i, #cir.int<4> : !s32i, #cir.int<6> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<3> : !s32i, #cir.int<5> : !s32i, #cir.int<7> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + // LLVM: [[RET:%.*]] = load %struct.uint32x4x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.uint32x4x2_t [[RET]] +} + +float32x4x2_t test_vuzpq_f32(float32x4_t a, float32x4_t b) { + return vuzpq_f32(a, b); + + // CIR-LABEL: vuzpq_f32 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<2> : !s32i, #cir.int<4> : !s32i, #cir.int<6> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<3> : !s32i, #cir.int<5> : !s32i, #cir.int<7> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + // LLVM: [[RET:%.*]] = load %struct.float32x4x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.float32x4x2_t [[RET]] +} + uint8x8_t test_vqmovun_s16(int16x8_t a) { return vqmovun_s16(a); From 2294d5f454f04a94868d419087ab19f78c586bd6 Mon Sep 17 00:00:00 2001 From: AdUhTkJm <30948580+AdUhTkJm@users.noreply.github.com> Date: Mon, 10 Feb 2025 10:53:30 +0000 Subject: [PATCH 2237/2301] [CIR][CUDA] Generate attribute for kernel name of device stubs (#1317) Now a `__global__` function on host will be generated to a device stub, with an attribute recording the corresponding kernel name (mangled name on device of the same function). The dynamic registration phase will be implemented in LLVM lowering. For example, CIR generated for `__global__ void global_fn();` looks like ``` #fn_attr1 = #cir})> cir.func private @_Z24__device_stub__global_fnv() extra(#fn_attr1) ``` --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 1 + .../clang/CIR/Dialect/IR/CIRCUDAAttrs.td | 38 +++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 10 +++++ clang/lib/CIR/CodeGen/CIRGenModule.cpp | 18 ++++----- clang/test/CIR/CodeGen/CUDA/simple-device.cu | 14 ------- clang/test/CIR/CodeGen/CUDA/simple.cu | 29 ++++++++++---- 6 files changed, 79 insertions(+), 31 deletions(-) create mode 100644 clang/include/clang/CIR/Dialect/IR/CIRCUDAAttrs.td delete mode 100644 clang/test/CIR/CodeGen/CUDA/simple-device.cu diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index 298d06805c54..a74eda452a49 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -1327,5 +1327,6 @@ def CIR_TBAAAttr : CIR_Attr<"TBAA", "tbaa", []> { } include "clang/CIR/Dialect/IR/CIROpenCLAttrs.td" +include "clang/CIR/Dialect/IR/CIRCUDAAttrs.td" #endif // MLIR_CIR_DIALECT_CIR_ATTRS diff --git a/clang/include/clang/CIR/Dialect/IR/CIRCUDAAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRCUDAAttrs.td new file mode 100644 index 000000000000..fd74fe2d349e --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIRCUDAAttrs.td @@ -0,0 +1,38 @@ +//===---- CIRCUDAAttrs.td - CIR dialect attrs for CUDA -----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the CIR dialect attributes for OpenCL. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_CIR_DIALECT_CIR_CUDA_ATTRS +#define MLIR_CIR_DIALECT_CIR_CUDA_ATTRS + +//===----------------------------------------------------------------------===// +// CUDAKernelNameAttr +//===----------------------------------------------------------------------===// + +def CUDAKernelNameAttr : CIR_Attr<"CUDAKernelName", + "cuda_kernel_name"> { + let summary = "Device-side function name for this stub."; + let description = + [{ + This attribute is attached to function definitions and records the + mangled name of the kernel function used on the device. + + In CUDA, global functions (kernels) are processed differently for host + and device. On host, Clang generates device stubs; on device, they are + treated as normal functions. As they probably have different mangled + names, we must record the corresponding device-side name for a stub. + }]; + + let parameters = (ins "std::string":$kernel_name); + let assemblyFormat = "`<` $kernel_name `>`"; +} + +#endif // MLIR_CIR_DIALECT_CIR_CUDA_ATTRS diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 9a15e3337f32..4d018ce1a9c9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -460,6 +460,16 @@ void CIRGenModule::constructAttributeList( getLangOpts().OffloadUniformBlock) assert(!cir::MissingFeatures::CUDA()); + if (langOpts.CUDA && !langOpts.CUDAIsDevice && + TargetDecl->hasAttr()) { + GlobalDecl kernel(CalleeInfo.getCalleeDecl()); + llvm::StringRef kernelName = getMangledName( + kernel.getWithKernelReferenceKind(KernelReferenceKind::Kernel)); + auto attr = + cir::CUDAKernelNameAttr::get(&getMLIRContext(), kernelName.str()); + funcAttrs.set(attr.getMnemonic(), attr); + } + if (TargetDecl->hasAttr()) ; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 412369ed07ef..82a4c9838e1f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -9,7 +9,6 @@ // This is the internal per-translation-unit state used for CIR translation. // //===----------------------------------------------------------------------===// -#include "CIRGenModule.h" #include "CIRGenCXXABI.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" @@ -528,10 +527,9 @@ void CIRGenModule::emitGlobal(GlobalDecl GD) { if (langOpts.HIPStdPar) llvm_unreachable("NYI"); - if (Global->hasAttr()) - llvm_unreachable("NYI"); - - if (!Global->hasAttr()) + // Global functions reside on device, so it shouldn't be skipped. + if (!Global->hasAttr() && + !Global->hasAttr()) return; } else { // We must skip __device__ functions when compiling for host. @@ -2352,10 +2350,10 @@ cir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, auto F = GetOrCreateCIRFunction(MangledName, Ty, GD, ForVTable, DontDefer, /*IsThunk=*/false, IsForDefinition); - // As __global__ functions always reside on device, - // we need special care when accessing them from host; - // otherwise, CUDA functions behave as normal functions - if (langOpts.CUDA && !langOpts.CUDAIsDevice && + // As __global__ functions (kernels) always reside on device, + // when we access them from host, we must refer to the kernel handle. + // For CUDA, it's just the device stub. For HIP, it's something different. + if (langOpts.CUDA && !langOpts.CUDAIsDevice && langOpts.HIP && cast(GD.getDecl())->hasAttr()) { llvm_unreachable("NYI"); } @@ -2398,7 +2396,7 @@ static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, assert(0 && "NYI"); } else if (FD && FD->hasAttr() && GD.getKernelReferenceKind() == KernelReferenceKind::Stub) { - assert(0 && "NYI"); + Out << "__device_stub__"; } else { Out << II->getName(); } diff --git a/clang/test/CIR/CodeGen/CUDA/simple-device.cu b/clang/test/CIR/CodeGen/CUDA/simple-device.cu deleted file mode 100644 index c19a09a7e40b..000000000000 --- a/clang/test/CIR/CodeGen/CUDA/simple-device.cu +++ /dev/null @@ -1,14 +0,0 @@ -#include "../Inputs/cuda.h" - -// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device \ -// RUN: -fclangir -emit-cir -o - %s | FileCheck %s - -// This shouldn't emit. -__host__ void host_fn(int *a, int *b, int *c) {} - -// CHECK-NOT: cir.func @_Z7host_fnPiS_S_ - -// This should emit as a normal C++ function. -__device__ void device_fn(int* a, double b, float c) {} - -// CIR: cir.func @_Z9device_fnPidf diff --git a/clang/test/CIR/CodeGen/CUDA/simple.cu b/clang/test/CIR/CodeGen/CUDA/simple.cu index 5a0141d3d4b5..1a822d9bcc88 100644 --- a/clang/test/CIR/CodeGen/CUDA/simple.cu +++ b/clang/test/CIR/CodeGen/CUDA/simple.cu @@ -1,16 +1,31 @@ #include "../Inputs/cuda.h" +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir \ +// RUN: -x cuda -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR-HOST --input-file=%t.cir %s + // RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fclangir \ -// RUN: -emit-cir %s -o %t.cir -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: -fcuda-is-device -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR-DEVICE --input-file=%t.cir %s +// Attribute for global_fn +// CIR-HOST: [[Kernel:#[a-zA-Z_0-9]+]] = {{.*}}#cir.cuda_kernel_name<_Z9global_fnv>{{.*}} -// This should emit as a normal C++ function. __host__ void host_fn(int *a, int *b, int *c) {} +// CIR-HOST: cir.func @_Z7host_fnPiS_S_ +// CIR-DEVICE-NOT: cir.func @_Z7host_fnPiS_S_ -// CIR: cir.func @_Z7host_fnPiS_S_ - -// This shouldn't emit. __device__ void device_fn(int* a, double b, float c) {} +// CIR-HOST-NOT: cir.func @_Z9device_fnPidf +// CIR-DEVICE: cir.func @_Z9device_fnPidf + +#ifdef __CUDA_ARCH__ +__global__ void global_fn() {} +#else +__global__ void global_fn(); +#endif +// CIR-HOST: @_Z24__device_stub__global_fnv(){{.*}}extra([[Kernel]]) +// CIR-DEVICE: @_Z9global_fnv -// CHECK-NOT: cir.func @_Z9device_fnPidf +// Make sure `global_fn` indeed gets emitted +__host__ void x() { auto v = global_fn; } From 3e17e7b9404e1a28bf33bdd5943f4a208134d479 Mon Sep 17 00:00:00 2001 From: Konstantinos Parasyris Date: Mon, 10 Feb 2025 02:55:45 -0800 Subject: [PATCH 2238/2301] [CIR][HIP] Compile HIP device code (#1322) Depends on #1319 --- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 3 ++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenTypes.cpp | 6 +++++- clang/test/CIR/CodeGen/HIP/simple-device.cpp | 14 ++++++++++++++ 4 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/HIP/simple-device.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 4d018ce1a9c9..d9d7c1f13de2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1651,7 +1651,8 @@ static void getTrivialDefaultFunctionAttributes( // AFAIK, neither of them support exceptions in device code. if (langOpts.SYCLIsDevice) llvm_unreachable("NYI"); - if (langOpts.OpenCL || (langOpts.CUDA && langOpts.CUDAIsDevice)) { + if (langOpts.OpenCL || + ((langOpts.CUDA || langOpts.HIP) && langOpts.CUDAIsDevice)) { auto noThrow = cir::NoThrowAttr::get(CGM.getBuilder().getContext()); funcAttrs.set(noThrow.getMnemonic(), noThrow); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 82a4c9838e1f..76221be12319 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -3112,7 +3112,7 @@ void CIRGenModule::emitDeferred(unsigned recursionLimit) { // Emit CUDA/HIP static device variables referenced by host code only. Note we // should not clear CUDADeviceVarODRUsedByHost since it is still needed for // further handling. - if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice && + if ((getLangOpts().CUDA || getLangOpts().HIP) && getLangOpts().CUDAIsDevice && !getASTContext().CUDADeviceVarODRUsedByHost.empty()) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp index 66d6a57e242a..70f7b681bc86 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp @@ -348,7 +348,11 @@ mlir::Type CIRGenTypes::convertType(QualType T) { // For the device-side compilation, CUDA device builtin surface/texture types // may be represented in different types. - if (astContext.getLangOpts().CUDAIsDevice) { + // NOTE: CUDAIsDevice is true when building also HIP code. + // 1. There is no SurfaceType on HIP, + // 2. There is Texture memory on HIP but accessing the memory goes through + // calls to the runtime. e.g. for a 2D: `tex2D(tex, x, y);` + if (astContext.getLangOpts().CUDA && astContext.getLangOpts().CUDAIsDevice) { if (Ty->isCUDADeviceBuiltinSurfaceType() || Ty->isCUDADeviceBuiltinTextureType()) llvm_unreachable("NYI"); diff --git a/clang/test/CIR/CodeGen/HIP/simple-device.cpp b/clang/test/CIR/CodeGen/HIP/simple-device.cpp new file mode 100644 index 000000000000..e627a90dc410 --- /dev/null +++ b/clang/test/CIR/CodeGen/HIP/simple-device.cpp @@ -0,0 +1,14 @@ +#include "../Inputs/cuda.h" + +// RUN: %clang_cc1 -triple=amdgcn-amd-amdhsa -x hip -fcuda-is-device \ +// RUN: -fclangir -emit-cir -o - %s | FileCheck %s + +// This shouldn't emit. +__host__ void host_fn(int *a, int *b, int *c) {} + +// CHECK-NOT: cir.func @_Z7host_fnPiS_S_ + +// This should emit as a normal C++ function. +__device__ void device_fn(int* a, double b, float c) {} + +// CIR: cir.func @_Z9device_fnPidf From 637f2f3fc3560695ea4cfdc2cd6df7767501ceff Mon Sep 17 00:00:00 2001 From: Omar Hossam Date: Tue, 11 Feb 2025 02:34:04 +0100 Subject: [PATCH 2239/2301] [CIR][CIRGen] Handle __sync_{and,or,xor}_and_fetch (#1328) This addresses https://github.com/llvm/clangir/issues/1273. `Nand` is missing here, as i didn't intuitively know how to implement it initially. I think I have figured it out and will push in an upcoming commit. Co-authored-by: Omar Ibrahim --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 13 +- clang/test/CIR/CodeGen/atomic.cpp | 232 ++++++++++++++++++++++++ 2 files changed, 240 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index bb285e4811e3..bd34f4430c3a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1078,8 +1078,8 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, bool LoseInfo = false; Probability.convert(llvm::APFloat::IEEEdouble(), llvm::RoundingMode::Dynamic, &LoseInfo); - ProbAttr = mlir::FloatAttr::get( - mlir::Float64Type::get(&getMLIRContext()), Probability); + ProbAttr = mlir::FloatAttr::get(mlir::Float64Type::get(&getMLIRContext()), + Probability); } auto result = builder.create(getLoc(E->getSourceRange()), @@ -1766,21 +1766,24 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_and_and_fetch_4: case Builtin::BI__sync_and_and_fetch_8: case Builtin::BI__sync_and_and_fetch_16: - llvm_unreachable("BI__sync_and_and_fetch like NYI"); + return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::And, E, + cir::BinOpKind::And); case Builtin::BI__sync_or_and_fetch_1: case Builtin::BI__sync_or_and_fetch_2: case Builtin::BI__sync_or_and_fetch_4: case Builtin::BI__sync_or_and_fetch_8: case Builtin::BI__sync_or_and_fetch_16: - llvm_unreachable("BI__sync_or_and_fetch like NYI"); + return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Or, E, + cir::BinOpKind::Or); case Builtin::BI__sync_xor_and_fetch_1: case Builtin::BI__sync_xor_and_fetch_2: case Builtin::BI__sync_xor_and_fetch_4: case Builtin::BI__sync_xor_and_fetch_8: case Builtin::BI__sync_xor_and_fetch_16: - llvm_unreachable("BI__sync_xor_and_fetch like NYI"); + return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Xor, E, + cir::BinOpKind::Xor); case Builtin::BI__sync_nand_and_fetch_1: case Builtin::BI__sync_nand_and_fetch_2: diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 4f24e947e6c5..99a295846284 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -816,4 +816,236 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RET7:%.*]] = sub i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 ull = __sync_sub_and_fetch (&ull, uc); + + // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i + // CHECK: [[RES0:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i + // CHECK: [[RET0:%.*]] = cir.binop(and, [[RES0]], [[VAL0]]) : !s8i + // LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES0:%.*]] = atomicrmw and ptr @sc, i8 [[VAL0]] seq_cst, align 1 + // LLVM: [[RET0:%.*]] = and i8 [[RES0]], [[VAL0]] + // LLVM: store i8 [[RET0]], ptr @sc, align 1 + sc = __sync_and_and_fetch (&sc, uc); + + // CHECK: [[RES1:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i + // CHECK: [[RET1:%.*]] = cir.binop(and, [[RES1]], [[VAL1]]) : !u8i + // LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES1:%.*]] = atomicrmw and ptr @uc, i8 [[VAL1]] seq_cst, align 1 + // LLVM: [[RET1:%.*]] = and i8 [[RES1]], [[VAL1]] + // LLVM: store i8 [[RET1]], ptr @uc, align 1 + uc = __sync_and_and_fetch (&uc, uc); + + // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i + // CHECK: [[RES2:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i + // CHECK: [[RET2:%.*]] = cir.binop(and, [[RES2]], [[VAL2]]) : !s16i + // LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16 + // LLVM: [[RES2:%.*]] = atomicrmw and ptr @ss, i16 [[CONV2]] seq_cst, align 2 + // LLVM: [[RET2:%.*]] = and i16 [[RES2]], [[CONV2]] + // LLVM: store i16 [[RET2]], ptr @ss, align 2 + ss = __sync_and_and_fetch (&ss, uc); + + // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i + // CHECK: [[RES3:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i + // CHECK: [[RET3:%.*]] = cir.binop(and, [[RES3]], [[VAL3]]) : !u16i + // LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16 + // LLVM: [[RES3:%.*]] = atomicrmw and ptr @us, i16 [[CONV3]] seq_cst, align 2 + // LLVM: [[RET3:%.*]] = and i16 [[RES3]], [[CONV3]] + // LLVM: store i16 [[RET3]], ptr @us + us = __sync_and_and_fetch (&us, uc); + + // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i + // CHECK: [[RES4:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i + // CHECK: [[RET4:%.*]] = cir.binop(and, [[RES4]], [[VAL4]]) : !s32i + // LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32 + // LLVM: [[RES4:%.*]] = atomicrmw and ptr @si, i32 [[CONV4]] seq_cst, align 4 + // LLVM: [[RET4:%.*]] = and i32 [[RES4]], [[CONV4]] + // LLVM: store i32 [[RET4]], ptr @si, align 4 + si = __sync_and_and_fetch (&si, uc); + + // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i + // CHECK: [[RES5:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i + // CHECK: [[RET5:%.*]] = cir.binop(and, [[RES5]], [[VAL5]]) : !u32i + // LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32 + // LLVM: [[RES5:%.*]] = atomicrmw and ptr @ui, i32 [[CONV5]] seq_cst, align 4 + // LLVM: [[RET5:%.*]] = and i32 [[RES5]], [[CONV5]] + // LLVM: store i32 [[RET5]], ptr @ui, align 4 + ui = __sync_and_and_fetch (&ui, uc); + + // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i + // CHECK: [[RES6:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i + // CHECK: [[RET6:%.*]] = cir.binop(and, [[RES6]], [[VAL6]]) : !s64i + // LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64 + // LLVM: [[RES6:%.*]] = atomicrmw and ptr @sll, i64 [[CONV6]] seq_cst, align 8 + // LLVM: [[RET6:%.*]] = and i64 [[RES6]], [[CONV6]] + // LLVM: store i64 [[RET6]], ptr @sll, align 8 + sll = __sync_and_and_fetch (&sll, uc); + + // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i + // CHECK: [[RES7:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i + // CHECK: [[RET7:%.*]] = cir.binop(and, [[RES7]], [[VAL7]]) : !u64i + // LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64 + // LLVM: [[RES7:%.*]] = atomicrmw and ptr @ull, i64 [[CONV7]] seq_cst, align 8 + // LLVM: [[RET7:%.*]] = and i64 [[RES7]], [[CONV7]] + // LLVM: store i64 [[RET7]], ptr @ull, align 8 + ull = __sync_and_and_fetch (&ull, uc); + + // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i + // CHECK: [[RES0:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i + // CHECK: [[RET0:%.*]] = cir.binop(or, [[RES0]], [[VAL0]]) : !s8i + // LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES0:%.*]] = atomicrmw or ptr @sc, i8 [[VAL0]] seq_cst, align 1 + // LLVM: [[RET0:%.*]] = or i8 [[RES0]], [[VAL0]] + // LLVM: store i8 [[RET0]], ptr @sc, align 1 + sc = __sync_or_and_fetch (&sc, uc); + + // CHECK: [[RES1:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i + // CHECK: [[RET1:%.*]] = cir.binop(or, [[RES1]], [[VAL1]]) : !u8i + // LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES1:%.*]] = atomicrmw or ptr @uc, i8 [[VAL1]] seq_cst, align 1 + // LLVM: [[RET1:%.*]] = or i8 [[RES1]], [[VAL1]] + // LLVM: store i8 [[RET1]], ptr @uc, align 1 + uc = __sync_or_and_fetch (&uc, uc); + + // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i + // CHECK: [[RES2:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i + // CHECK: [[RET2:%.*]] = cir.binop(or, [[RES2]], [[VAL2]]) : !s16i + // LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16 + // LLVM: [[RES2:%.*]] = atomicrmw or ptr @ss, i16 [[CONV2]] seq_cst, align 2 + // LLVM: [[RET2:%.*]] = or i16 [[RES2]], [[CONV2]] + // LLVM: store i16 [[RET2]], ptr @ss, align 2 + ss = __sync_or_and_fetch (&ss, uc); + + // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i + // CHECK: [[RES3:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i + // CHECK: [[RET3:%.*]] = cir.binop(or, [[RES3]], [[VAL3]]) : !u16i + // LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16 + // LLVM: [[RES3:%.*]] = atomicrmw or ptr @us, i16 [[CONV3]] seq_cst, align 2 + // LLVM: [[RET3:%.*]] = or i16 [[RES3]], [[CONV3]] + // LLVM: store i16 [[RET3]], ptr @us + us = __sync_or_and_fetch (&us, uc); + + // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i + // CHECK: [[RES4:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i + // CHECK: [[RET4:%.*]] = cir.binop(or, [[RES4]], [[VAL4]]) : !s32i + // LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32 + // LLVM: [[RES4:%.*]] = atomicrmw or ptr @si, i32 [[CONV4]] seq_cst, align 4 + // LLVM: [[RET4:%.*]] = or i32 [[RES4]], [[CONV4]] + // LLVM: store i32 [[RET4]], ptr @si, align 4 + si = __sync_or_and_fetch (&si, uc); + + // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i + // CHECK: [[RES5:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i + // CHECK: [[RET5:%.*]] = cir.binop(or, [[RES5]], [[VAL5]]) : !u32i + // LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32 + // LLVM: [[RES5:%.*]] = atomicrmw or ptr @ui, i32 [[CONV5]] seq_cst, align 4 + // LLVM: [[RET5:%.*]] = or i32 [[RES5]], [[CONV5]] + // LLVM: store i32 [[RET5]], ptr @ui, align 4 + ui = __sync_or_and_fetch (&ui, uc); + + // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i + // CHECK: [[RES6:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i + // CHECK: [[RET6:%.*]] = cir.binop(or, [[RES6]], [[VAL6]]) : !s64i + // LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64 + // LLVM: [[RES6:%.*]] = atomicrmw or ptr @sll, i64 [[CONV6]] seq_cst, align 8 + // LLVM: [[RET6:%.*]] = or i64 [[RES6]], [[CONV6]] + // LLVM: store i64 [[RET6]], ptr @sll, align 8 + sll = __sync_or_and_fetch (&sll, uc); + + // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i + // CHECK: [[RES7:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i + // CHECK: [[RET7:%.*]] = cir.binop(or, [[RES7]], [[VAL7]]) : !u64i + // LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64 + // LLVM: [[RES7:%.*]] = atomicrmw or ptr @ull, i64 [[CONV7]] seq_cst, align 8 + // LLVM: [[RET7:%.*]] = or i64 [[RES7]], [[CONV7]] + // LLVM: store i64 [[RET7]], ptr @ull, align 8 + ull = __sync_or_and_fetch (&ull, uc); + + // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i + // CHECK: [[RES0:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i + // CHECK: [[RET0:%.*]] = cir.binop(xor, [[RES0]], [[VAL0]]) : !s8i + // LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES0:%.*]] = atomicrmw xor ptr @sc, i8 [[VAL0]] seq_cst, align 1 + // LLVM: [[RET0:%.*]] = xor i8 [[RES0]], [[VAL0]] + // LLVM: store i8 [[RET0]], ptr @sc, align 1 + sc = __sync_xor_and_fetch (&sc, uc); + + // CHECK: [[RES1:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i + // CHECK: [[RET1:%.*]] = cir.binop(xor, [[RES1]], [[VAL1]]) : !u8i + // LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES1:%.*]] = atomicrmw xor ptr @uc, i8 [[VAL1]] seq_cst, align 1 + // LLVM: [[RET1:%.*]] = xor i8 [[RES1]], [[VAL1]] + // LLVM: store i8 [[RET1]], ptr @uc, align 1 + uc = __sync_xor_and_fetch (&uc, uc); + + // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i + // CHECK: [[RES2:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i + // CHECK: [[RET2:%.*]] = cir.binop(xor, [[RES2]], [[VAL2]]) : !s16i + // LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16 + // LLVM: [[RES2:%.*]] = atomicrmw xor ptr @ss, i16 [[CONV2]] seq_cst, align 2 + // LLVM: [[RET2:%.*]] = xor i16 [[RES2]], [[CONV2]] + // LLVM: store i16 [[RET2]], ptr @ss, align 2 + ss = __sync_xor_and_fetch (&ss, uc); + + // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i + // CHECK: [[RES3:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i + // CHECK: [[RET3:%.*]] = cir.binop(xor, [[RES3]], [[VAL3]]) : !u16i + // LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16 + // LLVM: [[RES3:%.*]] = atomicrmw xor ptr @us, i16 [[CONV3]] seq_cst, align 2 + // LLVM: [[RET3:%.*]] = xor i16 [[RES3]], [[CONV3]] + // LLVM: store i16 [[RET3]], ptr @us + us = __sync_xor_and_fetch (&us, uc); + + // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i + // CHECK: [[RES4:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i + // CHECK: [[RET4:%.*]] = cir.binop(xor, [[RES4]], [[VAL4]]) : !s32i + // LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32 + // LLVM: [[RES4:%.*]] = atomicrmw xor ptr @si, i32 [[CONV4]] seq_cst, align 4 + // LLVM: [[RET4:%.*]] = xor i32 [[RES4]], [[CONV4]] + // LLVM: store i32 [[RET4]], ptr @si, align 4 + si = __sync_xor_and_fetch (&si, uc); + + // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i + // CHECK: [[RES5:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i + // CHECK: [[RET5:%.*]] = cir.binop(xor, [[RES5]], [[VAL5]]) : !u32i + // LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32 + // LLVM: [[RES5:%.*]] = atomicrmw xor ptr @ui, i32 [[CONV5]] seq_cst, align 4 + // LLVM: [[RET5:%.*]] = xor i32 [[RES5]], [[CONV5]] + // LLVM: store i32 [[RET5]], ptr @ui, align 4 + ui = __sync_xor_and_fetch (&ui, uc); + + // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i + // CHECK: [[RES6:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i + // CHECK: [[RET6:%.*]] = cir.binop(xor, [[RES6]], [[VAL6]]) : !s64i + // LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64 + // LLVM: [[RES6:%.*]] = atomicrmw xor ptr @sll, i64 [[CONV6]] seq_cst, align 8 + // LLVM: [[RET6:%.*]] = xor i64 [[RES6]], [[CONV6]] + // LLVM: store i64 [[RET6]], ptr @sll, align 8 + sll = __sync_xor_and_fetch (&sll, uc); + + // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i + // CHECK: [[RES7:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i + // CHECK: [[RET7:%.*]] = cir.binop(xor, [[RES7]], [[VAL7]]) : !u64i + // LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64 + // LLVM: [[RES7:%.*]] = atomicrmw xor ptr @ull, i64 [[CONV7]] seq_cst, align 8 + // LLVM: [[RET7:%.*]] = xor i64 [[RES7]], [[CONV7]] + // LLVM: store i64 [[RET7]], ptr @ull, align 8 + ull = __sync_xor_and_fetch (&ull, uc); + } From e1a9263d7f9bd7644822f29dafcc20169aa57c90 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Tue, 11 Feb 2025 21:08:20 +0800 Subject: [PATCH 2240/2301] [CIR][CIRGen] Support __builtin_isinf_sign (#1142) --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 17 +++++++++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 4 +-- clang/test/CIR/CodeGen/builtin-isinf-sign.c | 29 +++++++++++++++++++ 3 files changed, 47 insertions(+), 3 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-isinf-sign.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index bd34f4430c3a..587eaf654e87 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1424,6 +1424,23 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_matrix_column_major_store: llvm_unreachable("BI__builtin_matrix_column_major_store NYI"); + case Builtin::BI__builtin_isinf_sign: { + CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); + mlir::Location Loc = getLoc(E->getBeginLoc()); + mlir::Value Arg = emitScalarExpr(E->getArg(0)); + mlir::Value AbsArg = builder.create(Loc, Arg.getType(), Arg); + mlir::Value IsInf = + builder.createIsFPClass(Loc, AbsArg, FPClassTest::fcInf); + mlir::Value IsNeg = emitSignBit(Loc, *this, Arg); + auto IntTy = convertType(E->getType()); + auto Zero = builder.getNullValue(IntTy, Loc); + auto One = builder.getConstant(Loc, cir::IntAttr::get(IntTy, 1)); + auto NegativeOne = builder.getConstant(Loc, cir::IntAttr::get(IntTy, -1)); + auto SignResult = builder.createSelect(Loc, IsNeg, NegativeOne, One); + auto Result = builder.createSelect(Loc, IsInf, SignResult, Zero); + return RValue::get(Result); + } + case Builtin::BI__builtin_flt_rounds: llvm_unreachable("BI__builtin_flt_rounds NYI"); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 833d256d0404..7832d77335e0 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -4067,9 +4067,7 @@ mlir::LogicalResult CIRToLLVMSignBitOpLowering::matchAndRewrite( auto zero = rewriter.create(op->getLoc(), intTy, 0); auto cmpResult = rewriter.create( op.getLoc(), mlir::LLVM::ICmpPredicate::slt, bitcast.getResult(), zero); - auto converted = rewriter.create( - op.getLoc(), getTypeConverter()->convertType(op.getType()), cmpResult); - rewriter.replaceOp(op, converted); + rewriter.replaceOp(op, cmpResult); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/builtin-isinf-sign.c b/clang/test/CIR/CodeGen/builtin-isinf-sign.c new file mode 100644 index 000000000000..887cf12d875c --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-isinf-sign.c @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +int test_float_isinf_sign(float x) { + // CIR-LABEL: test_float_isinf_sign + // CIR: %[[TMP0:.*]] = cir.load %{{.*}} : !cir.ptr, !cir.float + // CIR: %[[TMP1:.*]] = cir.fabs %[[TMP0]] : !cir.float + // CIR: %[[IS_INF:.*]] = cir.is_fp_class %[[TMP1]], 516 : (!cir.float) -> !cir.bool + // CIR: %[[IS_NEG:.*]] = cir.signbit %[[TMP0]] : !cir.float -> !cir.bool + // CIR: %[[C_0:.*]] = cir.const #cir.int<0> : !s32i + // CIR: %[[C_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[C_m1:.*]] = cir.const #cir.int<-1> : !s32i + // CIR: %[[TMP4:.*]] = cir.select if %[[IS_NEG]] then %[[C_m1]] else %[[C_1]] : (!cir.bool, !s32i, !s32i) -> !s32i + // CIR: %[[RET:.*]] = cir.select if %[[IS_INF]] then %[[TMP4]] else %[[C_0]] : (!cir.bool, !s32i, !s32i) -> !s32i + // CIR: cir.store %[[RET]], %{{.*}} : !s32i, !cir.ptr + + // LLVM-LABEL: test_float_isinf_sign + // LLVM: %[[TMP0:.*]] = load float, ptr %{{.*}} + // LLVM: %[[TMP1:.*]] = call float @llvm.fabs.f32(float %[[TMP0]]) + // LLVM: %[[IS_INF:.*]] = call i1 @llvm.is.fpclass.f32(float %[[TMP1]], i32 516) + // LLVM: %[[TMP1:.*]] = bitcast float %[[TMP0]] to i32 + // LLVM: %[[IS_NEG:.*]] = icmp slt i32 %[[TMP1]], 0 + // LLVM: %[[TMP2:.*]] = select i1 %[[IS_NEG]], i32 -1, i32 1 + // LLVM: %[[RET:.*]] = select i1 %[[IS_INF]], i32 %[[TMP2]], i32 0 + // LLVM: store i32 %[[RET]], ptr %{{.*}}, align 4 + return __builtin_isinf_sign(x); +} From 2c7e65ea621a2649399c5cddbf8d9278d6a485ee Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Tue, 11 Feb 2025 14:28:09 +0100 Subject: [PATCH 2241/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vzip_v and neon_vzipq_v (#1320) Lowering `builtin_neon_vzip_v` and `builtin_neon_vzipq_v` [Clang CGBuiltin Implementation](https://github.com/llvm/clangir/blob/2b1a638ea07ca10c5727ea835bfbe17b881175cc/clang/lib/CodeGen/CGBuiltin.cpp#L8628-L8644) --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 22 +- clang/test/CIR/CodeGen/AArch64/neon-misc.c | 227 ++++++++++++++++++ 2 files changed, 248 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index c76ca37a5425..e50daeee1709 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -4594,7 +4594,27 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vzip_v: case NEON::BI__builtin_neon_vzipq_v: { - llvm_unreachable("NEON::BI__builtin_neon_vzipq_v NYI"); + Ops[1] = builder.createBitcast(Ops[1], ty); + Ops[2] = builder.createBitcast(Ops[2], ty); + // Adding a bitcast here as Ops[0] might be a void pointer. + mlir::Value baseAddr = + builder.createBitcast(Ops[0], builder.getPointerTo(ty)); + mlir::Value sv; + mlir::Location loc = getLoc(E->getExprLoc()); + + for (unsigned vi = 0; vi != 2; ++vi) { + llvm::SmallVector indices; + for (unsigned i = 0, e = vTy.getSize(); i != e; i += 2) { + indices.push_back((i + vi * e) >> 1); + indices.push_back(((i + vi * e) >> 1) + e); + } + cir::ConstantOp idx = builder.getConstInt(loc, SInt32Ty, vi); + mlir::Value addr = builder.create( + loc, baseAddr.getType(), baseAddr, idx); + sv = builder.createVecShuffle(loc, Ops[1], Ops[2], indices); + (void)builder.CIRBaseBuilderTy::createStore(loc, sv, addr); + } + return sv; } case NEON::BI__builtin_neon_vqtbl1q_v: { llvm_unreachable("NEON::BI__builtin_neon_vqtbl1q_v NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon-misc.c b/clang/test/CIR/CodeGen/AArch64/neon-misc.c index 18cd9298be12..eefca6dd4a7e 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-misc.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-misc.c @@ -703,6 +703,233 @@ float32x4x2_t test_vuzpq_f32(float32x4_t a, float32x4_t b) { // LLVM-NEXT: ret %struct.float32x4x2_t [[RET]] } +uint8x8x2_t test_vzip_u8(uint8x8_t a, uint8x8_t b) { + return vzip_u8(a, b); + + // CIR-LABEL:vzip_u8 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<8> : !s32i, #cir.int<1> : !s32i, #cir.int<9> : !s32i, + // CIR-SAME: #cir.int<2> : !s32i, #cir.int<10> : !s32i, #cir.int<3> : !s32i, + // CIR-SAME: #cir.int<11> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<4> : !s32i, #cir.int<12> : !s32i, #cir.int<5> : !s32i, #cir.int<13> : !s32i, + // CIR-SAME: #cir.int<6> : !s32i, #cir.int<14> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<15> : !s32i] : !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vzip_u8(<8 x i8>{{.*}}[[A:%.*]], <8 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[VTRN:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[B]], + // LLVM-SAME: + // LLVM: store <8 x i8> [[VTRN]], ptr [[RES:%.*]], align 8 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<8 x i8>, ptr [[RES]], i64 1 + // LLVM: [[VTRN1:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[B]], <8 x i32> + // LLVM: store <8 x i8> [[VTRN1]], ptr [[RES1]], align 8 + // LLVM-NEXT: [[RET:%.*]] = load %struct.uint8x8x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.uint8x8x2_t [[RET]] +} + +uint16x4x2_t test_vzip_u16(uint16x4_t a, uint16x4_t b) { + return vzip_u16(a, b); + + // CIR-LABEL: vzip_u16 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<4> : !s32i, #cir.int<1> : !s32i, #cir.int<5> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<2> : !s32i, #cir.int<6> : !s32i, #cir.int<3> : !s32i, #cir.int<7> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vzip_u16(<4 x i16>{{.*}}[[A:%.*]], <4 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[vzip:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[B]], + // LLVM-SAME: <4 x i32> + // LLVM: store <4 x i16> [[vzip]], ptr [[RES:%.*]], align 8 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<4 x i16>, ptr [[RES]], i64 1 + // LLVM: [[vzip1:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[B]], + // LLVM-SAME: <4 x i32> + // LLVM: store <4 x i16> [[vzip1]], ptr [[RES1]], align 8 + // LLVM-NEXT: [[RET:%.*]] = load %struct.uint16x4x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.uint16x4x2_t [[RET]] +} + +int32x2x2_t test_vzip_s32(int32x2_t a, int32x2_t b) { + return vzip_s32(a, b); + + // CIR-LABEL: vzip_s32 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<2> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<3> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vzip_s32(<2 x i32>{{.*}}[[A:%.*]], <2 x i32>{{.*}}[[B:%.*]]) + // LLVM: [[vzip:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: store <2 x i32> [[vzip]], ptr [[RES:%.*]], align 8 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<2 x i32>, ptr [[RES]], i64 1 + // LLVM: [[vzip1:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: store <2 x i32> [[vzip1]], ptr [[RES1]], align 8 + // LLVM-NEXT: [[RET:%.*]] = load %struct.int32x2x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.int32x2x2_t [[RET]] +} + +float32x2x2_t test_vzip_f32(float32x2_t a, float32x2_t b) { + return vzip_f32(a, b); + + // CIR-LABEL: vzip_f32 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<2> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<1> : !s32i, #cir.int<3> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vzip_f32(<2 x float>{{.*}}[[A:%.*]], <2 x float>{{.*}}[[B:%.*]]) + // LLVM: [[vzip:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: store <2 x float> [[vzip]], ptr [[RES:%.*]], align 8 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<2 x float>, ptr [[RES]], i64 1 + // LLVM: [[vzip1:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[B]], + // LLVM-SAME: <2 x i32> + // LLVM: store <2 x float> [[vzip1]], ptr [[RES1]], align 8 + // LLVM-NEXT: [[RET:%.*]] = load %struct.float32x2x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.float32x2x2_t [[RET]] +} + +uint8x16x2_t test_vzipq_u8(uint8x16_t a, uint8x16_t b) { + return vzipq_u8(a, b); + + // CIR-LABEL: vzipq_u8 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<16> : !s32i, #cir.int<1> : !s32i, #cir.int<17> : !s32i, + // CIR-SAME: #cir.int<2> : !s32i, #cir.int<18> : !s32i, #cir.int<3> : !s32i, #cir.int<19> : !s32i, + // CIR-SAME: #cir.int<4> : !s32i, #cir.int<20> : !s32i, #cir.int<5> : !s32i, #cir.int<21> : !s32i, + // CIR-SAME: #cir.int<6> : !s32i, #cir.int<22> : !s32i, #cir.int<7> : !s32i, #cir.int<23> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<8> : !s32i, #cir.int<24> : !s32i, #cir.int<9> : !s32i, #cir.int<25> : !s32i, + // CIR-SAME: #cir.int<10> : !s32i, #cir.int<26> : !s32i, #cir.int<11> : !s32i, #cir.int<27> : !s32i, + // CIR-SAME: #cir.int<12> : !s32i, #cir.int<28> : !s32i, #cir.int<13> : !s32i, #cir.int<29> : !s32i, + // CIR-SAME: #cir.int<14> : !s32i, #cir.int<30> : !s32i, #cir.int<15> : !s32i, #cir.int<31> : !s32i] : !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vzipq_u8(<16 x i8>{{.*}}[[A:%.*]], <16 x i8>{{.*}}[[B:%.*]]) + // LLVM: [[vzip:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[B]], + // LLVM-SAME: <16 x i32> + // LLVM: store <16 x i8> [[vzip]], ptr [[RES:%.*]], align 16 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<16 x i8>, ptr [[RES]], i64 1 + // LLVM: [[vzip1:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[B]], + // LLVM-SAME: <16 x i32> + // LLVM: store <16 x i8> [[vzip1]], ptr [[RES1]], align 16 + // LLVM-NEXT: [[RET:%.*]] = load %struct.uint8x16x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.uint8x16x2_t [[RET]] +} + +int16x8x2_t test_vzipq_s16(int16x8_t a, int16x8_t b) { + return vzipq_s16(a, b); + + // CIR-LABEL: vzipq_s16 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<8> : !s32i, #cir.int<1> : !s32i, #cir.int<9> : !s32i, + // CIR-SAME: #cir.int<2> : !s32i, #cir.int<10> : !s32i, #cir.int<3> : !s32i, + // CIR-SAME: #cir.int<11> : !s32i] : !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<4> : !s32i, #cir.int<12> : !s32i, #cir.int<5> : !s32i, #cir.int<13> : !s32i, + // CIR-SAME: #cir.int<6> : !s32i, #cir.int<14> : !s32i, #cir.int<7> : !s32i, + // CIR-SAME: #cir.int<15> : !s32i] : !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + + // LLVM: {{.*}}test_vzipq_s16(<8 x i16>{{.*}}[[A:%.*]], <8 x i16>{{.*}}[[B:%.*]]) + // LLVM: [[vzip:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[B]], + // LLVM-SAME: <8 x i32> + // LLVM: store <8 x i16> [[vzip]], ptr [[RES:%.*]], align 16 + // LLVM: [[RES1:%.*]] = getelementptr {{.*}}<8 x i16>, ptr [[RES]], i64 1 + // LLVM: [[vzip1:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[B]], <8 x i32> + // LLVM: store <8 x i16> [[vzip1]], ptr [[RES1]], align 16 + // LLVM-NEXT: [[RET:%.*]] = load %struct.int16x8x2_t, ptr {{.*}} + // LLVM-NEXT: ret %struct.int16x8x2_t [[RET]] +} + +uint32x4x2_t test_vzipq_u32(uint32x4_t a, uint32x4_t b) { + return vzipq_u32(a, b); + + // CIR-LABEL: vzipq_u32 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<4> : !s32i, #cir.int<1> : !s32i, #cir.int<5> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<2> : !s32i, #cir.int<6> : !s32i, #cir.int<3> : !s32i, #cir.int<7> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + // LLVM: [[RET:%.*]] = load %struct.uint32x4x2_t, ptr {{.*}} + // LLVM: ret %struct.uint32x4x2_t [[RET]] +} + +float32x4x2_t test_vzipq_f32(float32x4_t a, float32x4_t b) { + return vzipq_f32(a, b); + + // CIR-LABEL: vzipq_f32 + // CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr> + // CIR: [[ZERO:%.*]] = cir.const #cir.int<0> : !s32i + // CIR: [[ADDR:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ZERO]] : !s32i), !cir.ptr> + // CIR: [[RES:%.*]] = cir.vec.shuffle([[INP1:%.*]], [[INP2:%.*]] : !cir.vector) + // CIR-SAME: [#cir.int<0> : !s32i, #cir.int<4> : !s32i, #cir.int<1> : !s32i, #cir.int<5> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES]], [[ADDR]] : !cir.vector, !cir.ptr> + // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[ADDR1:%.*]] = cir.ptr_stride([[PTR]] : !cir.ptr>, [[ONE]] : !s32i), !cir.ptr> + // CIR: [[RES1:%.*]] = cir.vec.shuffle([[INP1]], [[INP2]] : !cir.vector) + // CIR-SAME: [#cir.int<2> : !s32i, #cir.int<6> : !s32i, #cir.int<3> : !s32i, #cir.int<7> : !s32i] : + // CIR-SAME: !cir.vector + // CIR: cir.store [[RES1]], [[ADDR1]] : !cir.vector, !cir.ptr> + // LLVM: [[RET:%.*]] = load %struct.float32x4x2_t, ptr {{.*}} + // LLVM: ret %struct.float32x4x2_t [[RET]] +} + uint8x8_t test_vqmovun_s16(int16x8_t a) { return vqmovun_s16(a); From dde05491e4e9d5eb08aafd74ab72f09fdcf1ec2d Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Tue, 11 Feb 2025 05:29:22 -0800 Subject: [PATCH 2242/2301] [CIR][NFC] Replace CIR attr visitor base class with a type switch (#1330) This change refactors the CirAttrVisitor use to eliminate the base class and instead use an llvm::TypeSwitch in the derived class to visit CIR attributes. No observable change is intended. --- .../clang/CIR/Dialect/IR/CIRAttrVisitor.h | 47 ------------ .../clang/CIR/Dialect/IR/CMakeLists.txt | 1 - .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 71 ++++++++++--------- mlir/test/mlir-tblgen/attrdefs.td | 8 --- mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp | 23 ------ 5 files changed, 38 insertions(+), 112 deletions(-) delete mode 100644 clang/include/clang/CIR/Dialect/IR/CIRAttrVisitor.h diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrVisitor.h b/clang/include/clang/CIR/Dialect/IR/CIRAttrVisitor.h deleted file mode 100644 index 106fb3d0ed17..000000000000 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrVisitor.h +++ /dev/null @@ -1,47 +0,0 @@ -//===- CIRAttrVisitor.h - Visitor for CIR attributes ------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file defines the CirAttrVisitor interface. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_CIR_DIALECT_IR_CIRATTRVISITOR_H -#define LLVM_CLANG_CIR_DIALECT_IR_CIRATTRVISITOR_H - -#include "clang/CIR/Dialect/IR/CIRAttrs.h" - -namespace cir { - -#define DISPATCH(NAME) return getImpl()->visitCir##NAME(cirAttr); - -template class CirAttrVisitor { -public: - RetTy visit(mlir::Attribute attr) { -#define ATTRDEF(NAME) \ - if (const auto cirAttr = mlir::dyn_cast(attr)) \ - DISPATCH(NAME); -#include "clang/CIR/Dialect/IR/CIRAttrDefsList.inc" - llvm_unreachable("unhandled attribute type"); - } - - // If the implementation chooses not to implement a certain visit - // method, fall back to the parent. -#define ATTRDEF(NAME) \ - RetTy visitCir##NAME(NAME cirAttr) { DISPATCH(Attr); } -#include "clang/CIR/Dialect/IR/CIRAttrDefsList.inc" - - RetTy visitCirAttr(mlir::Attribute attr) { return RetTy(); } - - ImplClass *getImpl() { return static_cast(this); } -}; - -#undef DISPATCH - -} // namespace cir - -#endif // LLVM_CLANG_CIR_DIALECT_IR_CIRATTRVISITOR_H diff --git a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt index 014bb3d9b03c..3d43b06c6217 100644 --- a/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt +++ b/clang/include/clang/CIR/Dialect/IR/CMakeLists.txt @@ -26,7 +26,6 @@ mlir_tablegen(CIROpsStructs.h.inc -gen-attrdef-decls) mlir_tablegen(CIROpsStructs.cpp.inc -gen-attrdef-defs) mlir_tablegen(CIROpsAttributes.h.inc -gen-attrdef-decls) mlir_tablegen(CIROpsAttributes.cpp.inc -gen-attrdef-defs) -mlir_tablegen(CIRAttrDefsList.inc -gen-attrdef-list) add_public_tablegen_target(MLIRCIREnumsGen) clang_tablegen(CIRBuiltinsLowering.inc -gen-cir-builtins-lowering diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7832d77335e0..c79cc75e7f31 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -41,7 +41,6 @@ #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" -#include "clang/CIR/Dialect/IR/CIRAttrVisitor.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/MissingFeatures.h" @@ -52,6 +51,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" +#include "llvm/ADT/TypeSwitch.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/Support/Casting.h" @@ -426,7 +426,7 @@ emitCirAttrToMemory(mlir::Operation *parentOp, mlir::Attribute attr, } /// Switches on the type of attribute and calls the appropriate conversion. -class CirAttrToValue : public CirAttrVisitor { +class CirAttrToValue { public: CirAttrToValue(mlir::Operation *parentOp, mlir::ConversionPatternRewriter &rewriter, @@ -435,19 +435,29 @@ class CirAttrToValue : public CirAttrVisitor { : parentOp(parentOp), rewriter(rewriter), converter(converter), dataLayout(dataLayout) {} - mlir::Value visitCirIntAttr(cir::IntAttr attr); - mlir::Value visitCirFPAttr(cir::FPAttr attr); - mlir::Value visitCirConstPtrAttr(cir::ConstPtrAttr attr); - mlir::Value visitCirConstStructAttr(cir::ConstStructAttr attr); - mlir::Value visitCirConstArrayAttr(cir::ConstArrayAttr attr); - mlir::Value visitCirConstVectorAttr(cir::ConstVectorAttr attr); - mlir::Value visitCirBoolAttr(cir::BoolAttr attr); - mlir::Value visitCirZeroAttr(cir::ZeroAttr attr); - mlir::Value visitCirUndefAttr(cir::UndefAttr attr); - mlir::Value visitCirPoisonAttr(cir::PoisonAttr attr); - mlir::Value visitCirGlobalViewAttr(cir::GlobalViewAttr attr); - mlir::Value visitCirVTableAttr(cir::VTableAttr attr); - mlir::Value visitCirTypeInfoAttr(cir::TypeInfoAttr attr); + mlir::Value visit(mlir::Attribute attr) { + return llvm::TypeSwitch(attr) + .Case( + [&](auto attrT) { return visitCirAttr(attrT); }) + .Default([&](auto attrT) { return mlir::Value(); }); + } + + mlir::Value visitCirAttr(cir::IntAttr attr); + mlir::Value visitCirAttr(cir::FPAttr attr); + mlir::Value visitCirAttr(cir::ConstPtrAttr attr); + mlir::Value visitCirAttr(cir::ConstStructAttr attr); + mlir::Value visitCirAttr(cir::ConstArrayAttr attr); + mlir::Value visitCirAttr(cir::ConstVectorAttr attr); + mlir::Value visitCirAttr(cir::BoolAttr attr); + mlir::Value visitCirAttr(cir::ZeroAttr attr); + mlir::Value visitCirAttr(cir::UndefAttr attr); + mlir::Value visitCirAttr(cir::PoisonAttr attr); + mlir::Value visitCirAttr(cir::GlobalViewAttr attr); + mlir::Value visitCirAttr(cir::VTableAttr attr); + mlir::Value visitCirAttr(cir::TypeInfoAttr attr); private: mlir::Operation *parentOp; @@ -457,21 +467,21 @@ class CirAttrToValue : public CirAttrVisitor { }; /// IntAttr visitor. -mlir::Value CirAttrToValue::visitCirIntAttr(cir::IntAttr intAttr) { +mlir::Value CirAttrToValue::visitCirAttr(cir::IntAttr intAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(intAttr.getType()), intAttr.getValue()); } /// BoolAttr visitor. -mlir::Value CirAttrToValue::visitCirBoolAttr(cir::BoolAttr boolAttr) { +mlir::Value CirAttrToValue::visitCirAttr(cir::BoolAttr boolAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(boolAttr.getType()), boolAttr.getValue()); } /// ConstPtrAttr visitor. -mlir::Value CirAttrToValue::visitCirConstPtrAttr(cir::ConstPtrAttr ptrAttr) { +mlir::Value CirAttrToValue::visitCirAttr(cir::ConstPtrAttr ptrAttr) { auto loc = parentOp->getLoc(); if (ptrAttr.isNullValue()) { return rewriter.create( @@ -486,36 +496,35 @@ mlir::Value CirAttrToValue::visitCirConstPtrAttr(cir::ConstPtrAttr ptrAttr) { } /// FPAttr visitor. -mlir::Value CirAttrToValue::visitCirFPAttr(cir::FPAttr fltAttr) { +mlir::Value CirAttrToValue::visitCirAttr(cir::FPAttr fltAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(fltAttr.getType()), fltAttr.getValue()); } /// ZeroAttr visitor. -mlir::Value CirAttrToValue::visitCirZeroAttr(cir::ZeroAttr zeroAttr) { +mlir::Value CirAttrToValue::visitCirAttr(cir::ZeroAttr zeroAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(zeroAttr.getType())); } /// UndefAttr visitor. -mlir::Value CirAttrToValue::visitCirUndefAttr(cir::UndefAttr undefAttr) { +mlir::Value CirAttrToValue::visitCirAttr(cir::UndefAttr undefAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(undefAttr.getType())); } /// PoisonAttr visitor. -mlir::Value CirAttrToValue::visitCirPoisonAttr(cir::PoisonAttr poisonAttr) { +mlir::Value CirAttrToValue::visitCirAttr(cir::PoisonAttr poisonAttr) { auto loc = parentOp->getLoc(); return rewriter.create( loc, converter->convertType(poisonAttr.getType())); } /// ConstStruct visitor. -mlir::Value -CirAttrToValue::visitCirConstStructAttr(cir::ConstStructAttr constStruct) { +mlir::Value CirAttrToValue::visitCirAttr(cir::ConstStructAttr constStruct) { auto llvmTy = converter->convertType(constStruct.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); @@ -531,7 +540,7 @@ CirAttrToValue::visitCirConstStructAttr(cir::ConstStructAttr constStruct) { } // VTableAttr visitor. -mlir::Value CirAttrToValue::visitCirVTableAttr(cir::VTableAttr vtableArr) { +mlir::Value CirAttrToValue::visitCirAttr(cir::VTableAttr vtableArr) { auto llvmTy = converter->convertType(vtableArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); @@ -545,8 +554,7 @@ mlir::Value CirAttrToValue::visitCirVTableAttr(cir::VTableAttr vtableArr) { } // TypeInfoAttr visitor. -mlir::Value -CirAttrToValue::visitCirTypeInfoAttr(cir::TypeInfoAttr typeinfoArr) { +mlir::Value CirAttrToValue::visitCirAttr(cir::TypeInfoAttr typeinfoArr) { auto llvmTy = converter->convertType(typeinfoArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); @@ -560,8 +568,7 @@ CirAttrToValue::visitCirTypeInfoAttr(cir::TypeInfoAttr typeinfoArr) { } // ConstArrayAttr visitor -mlir::Value -CirAttrToValue::visitCirConstArrayAttr(cir::ConstArrayAttr constArr) { +mlir::Value CirAttrToValue::visitCirAttr(cir::ConstArrayAttr constArr) { auto llvmTy = converter->convertType(constArr.getType()); auto loc = parentOp->getLoc(); mlir::Value result; @@ -604,8 +611,7 @@ CirAttrToValue::visitCirConstArrayAttr(cir::ConstArrayAttr constArr) { } // ConstVectorAttr visitor. -mlir::Value -CirAttrToValue::visitCirConstVectorAttr(cir::ConstVectorAttr constVec) { +mlir::Value CirAttrToValue::visitCirAttr(cir::ConstVectorAttr constVec) { auto llvmTy = converter->convertType(constVec.getType()); auto loc = parentOp->getLoc(); SmallVector mlirValues; @@ -630,8 +636,7 @@ CirAttrToValue::visitCirConstVectorAttr(cir::ConstVectorAttr constVec) { } // GlobalViewAttr visitor. -mlir::Value -CirAttrToValue::visitCirGlobalViewAttr(cir::GlobalViewAttr globalAttr) { +mlir::Value CirAttrToValue::visitCirAttr(cir::GlobalViewAttr globalAttr) { auto module = parentOp->getParentOfType(); mlir::Type sourceType; unsigned sourceAddrSpace = 0; diff --git a/mlir/test/mlir-tblgen/attrdefs.td b/mlir/test/mlir-tblgen/attrdefs.td index e911f70e4358..35d2c49619ee 100644 --- a/mlir/test/mlir-tblgen/attrdefs.td +++ b/mlir/test/mlir-tblgen/attrdefs.td @@ -1,6 +1,5 @@ // RUN: mlir-tblgen -gen-attrdef-decls -I %S/../../include %s | FileCheck %s --check-prefix=DECL // RUN: mlir-tblgen -gen-attrdef-defs -I %S/../../include %s | FileCheck %s --check-prefix=DEF -// RUN: mlir-tblgen -gen-attrdef-list -I %S/../../include %s | FileCheck %s --check-prefix=LIST include "mlir/IR/AttrTypeBase.td" include "mlir/IR/OpBase.td" @@ -20,13 +19,6 @@ include "mlir/IR/OpBase.td" // DEF: ::test::CompoundAAttr, // DEF: ::test::SingleParameterAttr -// LIST: ATTRDEF(IndexAttr) -// LIST: ATTRDEF(SimpleAAttr) -// LIST: ATTRDEF(CompoundAAttr) -// LIST: ATTRDEF(SingleParameterAttr) - -// LIST: #undef ATTRDEF - // DEF-LABEL: ::mlir::OptionalParseResult generatedAttributeParser( // DEF-SAME: ::mlir::AsmParser &parser, // DEF-SAME: ::llvm::StringRef *mnemonic, ::mlir::Type type, diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp index 4f0100fa67cd..3f47c981d6f8 100644 --- a/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp +++ b/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp @@ -1026,23 +1026,6 @@ bool DefGenerator::emitDefs(StringRef selectedDialect) { return false; } -bool DefGenerator::emitList(StringRef selectedDialect) { - emitSourceFileHeader(("List of " + defType + "Def Definitions").str(), os); - - SmallVector defs; - collectAllDefs(selectedDialect, defRecords, defs); - if (defs.empty()) - return false; - - auto interleaveFn = [&](const AttrOrTypeDef &def) { - os << defType.upper() << "DEF(" << def.getCppClassName() << ")"; - }; - llvm::interleave(defs, os, interleaveFn, "\n"); - os << "\n\n"; - os << "#undef " << defType.upper() << "DEF" << "\n"; - return false; -} - //===----------------------------------------------------------------------===// // Type Constraints //===----------------------------------------------------------------------===// @@ -1117,12 +1100,6 @@ static mlir::GenRegistration AttrDefGenerator generator(records, os); return generator.emitDecls(attrDialect); }); -static mlir::GenRegistration - genAttrList("gen-attrdef-list", "Generate an AttrDef list", - [](const RecordKeeper &records, raw_ostream &os) { - AttrDefGenerator generator(records, os); - return generator.emitList(attrDialect); - }); //===----------------------------------------------------------------------===// // TypeDef From 37fab7cb0fc716649fcc049c58a68629410dac87 Mon Sep 17 00:00:00 2001 From: FantasqueX Date: Tue, 11 Feb 2025 22:47:09 +0800 Subject: [PATCH 2243/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vsra_n_v and neon_vsraq_n_v (#1326) Clang CGBuiltin Implementation: https://github.com/llvm/clangir/blob/a7383c9d05165d16edba857ddc86e5d29d94d2cc/clang/lib/CodeGen/CGBuiltin.cpp#L9562-L9566 Similar commit: https://github.com/llvm/clangir/commit/f9dee671d97b6184261358ab8815603327a2c229 Closes: https://github.com/llvm/clangir/issues/1325 --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 8 +- clang/test/CIR/CodeGen/AArch64/neon.c | 419 +++++++++++------- 2 files changed, 264 insertions(+), 163 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index e50daeee1709..36e68212f696 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -4410,8 +4410,12 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NEON::BI__builtin_neon_vsliq_n_v NYI"); } case NEON::BI__builtin_neon_vsra_n_v: - case NEON::BI__builtin_neon_vsraq_n_v: - llvm_unreachable("NEON::BI__builtin_neon_vsraq_n_v NYI"); + case NEON::BI__builtin_neon_vsraq_n_v: { + Ops[0] = builder.createBitcast(Ops[0], vTy); + Ops[1] = emitNeonRShiftImm(*this, Ops[1], Ops[2], vTy, usgn, + getLoc(E->getExprLoc())); + return builder.createAdd(Ops[0], Ops[1]); + } case NEON::BI__builtin_neon_vrsra_n_v: case NEON::BI__builtin_neon_vrsraq_n_v: { llvm::SmallVector tmpOps = {Ops[1], Ops[2]}; diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 2a7d120a0bbe..35c411cbeab4 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -5781,157 +5781,242 @@ uint64x2_t test_vshrq_n_u64(uint64x2_t a) { // LLVM: ret <2 x i64> [[VSHR_N]] } -// NYI-LABEL: @test_vsra_n_s8( -// NYI: [[VSRA_N:%.*]] = ashr <8 x i8> %b, -// NYI: [[TMP0:%.*]] = add <8 x i8> %a, [[VSRA_N]] -// NYI: ret <8 x i8> [[TMP0]] -// int8x8_t test_vsra_n_s8(int8x8_t a, int8x8_t b) { -// return vsra_n_s8(a, b, 3); -// } +int8x8_t test_vsra_n_s8(int8x8_t a, int8x8_t b) { + return vsra_n_s8(a, b, 3); -// NYI-LABEL: @test_vsra_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> -// NYI: [[VSRA_N:%.*]] = ashr <4 x i16> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <4 x i16> [[TMP2]], [[VSRA_N]] -// NYI: ret <4 x i16> [[TMP4]] -// int16x4_t test_vsra_n_s16(int16x4_t a, int16x4_t b) { -// return vsra_n_s16(a, b, 3); -// } + // CIR-LABEL: vsra_n_s8 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector -// NYI-LABEL: @test_vsra_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> -// NYI: [[VSRA_N:%.*]] = ashr <2 x i32> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <2 x i32> [[TMP2]], [[VSRA_N]] -// NYI: ret <2 x i32> [[TMP4]] -// int32x2_t test_vsra_n_s32(int32x2_t a, int32x2_t b) { -// return vsra_n_s32(a, b, 3); -// } + // LLVM-LABEL: @test_vsra_n_s8( + // LLVM: [[VSRA_N:%.*]] = ashr <8 x i8> %1, splat (i8 3) + // LLVM: [[TMP0:%.*]] = add <8 x i8> %0, [[VSRA_N]] + // LLVM: ret <8 x i8> [[TMP0]] +} -// NYI-LABEL: @test_vsraq_n_s8( -// NYI: [[VSRA_N:%.*]] = ashr <16 x i8> %b, -// NYI: [[TMP0:%.*]] = add <16 x i8> %a, [[VSRA_N]] -// NYI: ret <16 x i8> [[TMP0]] -// int8x16_t test_vsraq_n_s8(int8x16_t a, int8x16_t b) { -// return vsraq_n_s8(a, b, 3); -// } +int16x4_t test_vsra_n_s16(int16x4_t a, int16x4_t b) { + return vsra_n_s16(a, b, 3); -// NYI-LABEL: @test_vsraq_n_s16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> -// NYI: [[VSRA_N:%.*]] = ashr <8 x i16> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <8 x i16> [[TMP2]], [[VSRA_N]] -// NYI: ret <8 x i16> [[TMP4]] -// int16x8_t test_vsraq_n_s16(int16x8_t a, int16x8_t b) { -// return vsraq_n_s16(a, b, 3); -// } + // CIR-LABEL: vsra_n_s16 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector -// NYI-LABEL: @test_vsraq_n_s32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> -// NYI: [[VSRA_N:%.*]] = ashr <4 x i32> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <4 x i32> [[TMP2]], [[VSRA_N]] -// NYI: ret <4 x i32> [[TMP4]] -// int32x4_t test_vsraq_n_s32(int32x4_t a, int32x4_t b) { -// return vsraq_n_s32(a, b, 3); -// } + // LLVM-LABEL: test_vsra_n_s16 + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> %1 to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> + // LLVM: [[VSRA_N:%.*]] = ashr <4 x i16> [[TMP3]], splat (i16 3) + // LLVM: [[TMP4:%.*]] = add <4 x i16> [[TMP2]], [[VSRA_N]] + // LLVM: ret <4 x i16> [[TMP4]] +} -// NYI-LABEL: @test_vsraq_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> -// NYI: [[VSRA_N:%.*]] = ashr <2 x i64> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <2 x i64> [[TMP2]], [[VSRA_N]] -// NYI: ret <2 x i64> [[TMP4]] -// int64x2_t test_vsraq_n_s64(int64x2_t a, int64x2_t b) { -// return vsraq_n_s64(a, b, 3); -// } - -// NYI-LABEL: @test_vsra_n_u8( -// NYI: [[VSRA_N:%.*]] = lshr <8 x i8> %b, -// NYI: [[TMP0:%.*]] = add <8 x i8> %a, [[VSRA_N]] -// NYI: ret <8 x i8> [[TMP0]] -// uint8x8_t test_vsra_n_u8(uint8x8_t a, uint8x8_t b) { -// return vsra_n_u8(a, b, 3); -// } -// NYI-LABEL: @test_vsra_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> -// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> -// NYI: [[VSRA_N:%.*]] = lshr <4 x i16> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <4 x i16> [[TMP2]], [[VSRA_N]] -// NYI: ret <4 x i16> [[TMP4]] -// uint16x4_t test_vsra_n_u16(uint16x4_t a, uint16x4_t b) { -// return vsra_n_u16(a, b, 3); -// } +int32x2_t test_vsra_n_s32(int32x2_t a, int32x2_t b) { + return vsra_n_s32(a, b, 3); -// NYI-LABEL: @test_vsra_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> -// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> -// NYI: [[VSRA_N:%.*]] = lshr <2 x i32> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <2 x i32> [[TMP2]], [[VSRA_N]] -// NYI: ret <2 x i32> [[TMP4]] -// uint32x2_t test_vsra_n_u32(uint32x2_t a, uint32x2_t b) { -// return vsra_n_u32(a, b, 3); -// } + // CIR-LABEL: vsra_n_s32 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector -// NYI-LABEL: @test_vsraq_n_u8( -// NYI: [[VSRA_N:%.*]] = lshr <16 x i8> %b, -// NYI: [[TMP0:%.*]] = add <16 x i8> %a, [[VSRA_N]] -// NYI: ret <16 x i8> [[TMP0]] -// uint8x16_t test_vsraq_n_u8(uint8x16_t a, uint8x16_t b) { -// return vsraq_n_u8(a, b, 3); -// } + // LLVM-LABEL: test_vsra_n_s32 + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> %1 to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> + // LLVM: [[VSRA_N:%.*]] = ashr <2 x i32> [[TMP3]], splat (i32 3) + // LLVM: [[TMP4:%.*]] = add <2 x i32> [[TMP2]], [[VSRA_N]] + // LLVM: ret <2 x i32> [[TMP4]] +} -// NYI-LABEL: @test_vsraq_n_u16( -// NYI: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> -// NYI: [[VSRA_N:%.*]] = lshr <8 x i16> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <8 x i16> [[TMP2]], [[VSRA_N]] -// NYI: ret <8 x i16> [[TMP4]] -// uint16x8_t test_vsraq_n_u16(uint16x8_t a, uint16x8_t b) { -// return vsraq_n_u16(a, b, 3); -// } +int8x16_t test_vsraq_n_s8(int8x16_t a, int8x16_t b) { + return vsraq_n_s8(a, b, 3); -// NYI-LABEL: @test_vsraq_n_u32( -// NYI: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> -// NYI: [[VSRA_N:%.*]] = lshr <4 x i32> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <4 x i32> [[TMP2]], [[VSRA_N]] -// NYI: ret <4 x i32> [[TMP4]] -// uint32x4_t test_vsraq_n_u32(uint32x4_t a, uint32x4_t b) { -// return vsraq_n_u32(a, b, 3); -// } + // CIR-LABEL: vsraq_n_s8 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector -// NYI-LABEL: @test_vsraq_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> -// NYI: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> -// NYI: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> -// NYI: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> -// NYI: [[VSRA_N:%.*]] = lshr <2 x i64> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <2 x i64> [[TMP2]], [[VSRA_N]] -// NYI: ret <2 x i64> [[TMP4]] -// uint64x2_t test_vsraq_n_u64(uint64x2_t a, uint64x2_t b) { -// return vsraq_n_u64(a, b, 3); -// } + // LLVM-LABEL: test_vsraq_n_s8 + // LLVM: [[VSRA_N:%.*]] = ashr <16 x i8> %1, splat (i8 3) + // LLVM: [[TMP0:%.*]] = add <16 x i8> %0, [[VSRA_N]] + // LLVM: ret <16 x i8> [[TMP0]] +} + +int16x8_t test_vsraq_n_s16(int16x8_t a, int16x8_t b) { + return vsraq_n_s16(a, b, 3); + + // CIR-LABEL: vsraq_n_s16 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: test_vsraq_n_s16 + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> %1 to <16 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> + // LLVM: [[VSRA_N:%.*]] = ashr <8 x i16> [[TMP3]], splat (i16 3) + // LLVM: [[TMP4:%.*]] = add <8 x i16> [[TMP2]], [[VSRA_N]] + // LLVM: ret <8 x i16> [[TMP4]] +} + +int32x4_t test_vsraq_n_s32(int32x4_t a, int32x4_t b) { + return vsraq_n_s32(a, b, 3); + + // CIR-LABEL: vsraq_n_s32 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: test_vsraq_n_s32 + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> %1 to <16 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> + // LLVM: [[VSRA_N:%.*]] = ashr <4 x i32> [[TMP3]], splat (i32 3) + // LLVM: [[TMP4:%.*]] = add <4 x i32> [[TMP2]], [[VSRA_N]] + // LLVM: ret <4 x i32> [[TMP4]] +} + +int64x2_t test_vsraq_n_s64(int64x2_t a, int64x2_t b) { + return vsraq_n_s64(a, b, 3); + + // CIR-LABEL: vsraq_n_s64 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: test_vsraq_n_s64 + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> %1 to <16 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> + // LLVM: [[VSRA_N:%.*]] = ashr <2 x i64> [[TMP3]], splat (i64 3) + // LLVM: [[TMP4:%.*]] = add <2 x i64> [[TMP2]], [[VSRA_N]] + // LLVM: ret <2 x i64> [[TMP4]] +} + +uint8x8_t test_vsra_n_u8(uint8x8_t a, uint8x8_t b) { + return vsra_n_u8(a, b, 3); + + // CIR-LABEL: vsra_n_u8 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: @test_vsra_n_u8( + // LLVM: [[VSRA_N:%.*]] = lshr <8 x i8> %1, splat (i8 3) + // LLVM: [[TMP0:%.*]] = add <8 x i8> %0, [[VSRA_N]] + // LLVM: ret <8 x i8> [[TMP0]] +} + +uint16x4_t test_vsra_n_u16(uint16x4_t a, uint16x4_t b) { + return vsra_n_u16(a, b, 3); + + // CIR-LABEL: vsra_n_u16 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: test_vsra_n_u16 + // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i16> %1 to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> + // LLVM: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> + // LLVM: [[VSRA_N:%.*]] = lshr <4 x i16> [[TMP3]], splat (i16 3) + // LLVM: [[TMP4:%.*]] = add <4 x i16> [[TMP2]], [[VSRA_N]] + // LLVM: ret <4 x i16> [[TMP4]] +} + +uint32x2_t test_vsra_n_u32(uint32x2_t a, uint32x2_t b) { + return vsra_n_u32(a, b, 3); + + // CIR-LABEL: vsra_n_u32 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: test_vsra_n_u32 + // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i32> %1 to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> + // LLVM: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> + // LLVM: [[VSRA_N:%.*]] = lshr <2 x i32> [[TMP3]], splat (i32 3) + // LLVM: [[TMP4:%.*]] = add <2 x i32> [[TMP2]], [[VSRA_N]] + // LLVM: ret <2 x i32> [[TMP4]] +} + +uint8x16_t test_vsraq_n_u8(uint8x16_t a, uint8x16_t b) { + return vsraq_n_u8(a, b, 3); + + // CIR-LABEL: vsraq_n_u8 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: test_vsraq_n_u8 + // LLVM: [[VSRA_N:%.*]] = lshr <16 x i8> %1, splat (i8 3) + // LLVM: [[TMP0:%.*]] = add <16 x i8> %0, [[VSRA_N]] + // LLVM: ret <16 x i8> [[TMP0]] +} + +uint16x8_t test_vsraq_n_u16(uint16x8_t a, uint16x8_t b) { + return vsraq_n_u16(a, b, 3); + + // CIR-LABEL: vsraq_n_u16 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: test_vsraq_n_u16 + // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <8 x i16> %1 to <16 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> + // LLVM: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> + // LLVM: [[VSRA_N:%.*]] = lshr <8 x i16> [[TMP3]], splat (i16 3) + // LLVM: [[TMP4:%.*]] = add <8 x i16> [[TMP2]], [[VSRA_N]] + // LLVM: ret <8 x i16> [[TMP4]] +} + +uint32x4_t test_vsraq_n_u32(uint32x4_t a, uint32x4_t b) { + return vsraq_n_u32(a, b, 3); + + // CIR-LABEL: vsraq_n_u32 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: test_vsraq_n_u32 + // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <4 x i32> %1 to <16 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> + // LLVM: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> + // LLVM: [[VSRA_N:%.*]] = lshr <4 x i32> [[TMP3]], splat (i32 3) + // LLVM: [[TMP4:%.*]] = add <4 x i32> [[TMP2]], [[VSRA_N]] + // LLVM: ret <4 x i32> [[TMP4]] +} + +uint64x2_t test_vsraq_n_u64(uint64x2_t a, uint64x2_t b) { + return vsraq_n_u64(a, b, 3); + + // CIR-LABEL: vsraq_n_u64 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: test_vsraq_n_u64 + // LLVM: [[TMP0:%.*]] = bitcast <2 x i64> %0 to <16 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <2 x i64> %1 to <16 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> + // LLVM: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> + // LLVM: [[VSRA_N:%.*]] = lshr <2 x i64> [[TMP3]], splat (i64 3) + // LLVM: [[TMP4:%.*]] = add <2 x i64> [[TMP2]], [[VSRA_N]] + // LLVM: ret <2 x i64> [[TMP4]] +} int8x8_t test_vrshr_n_s8(int8x8_t a) { return vrshr_n_s8(a, 3); @@ -15142,17 +15227,23 @@ int64_t test_vshrd_n_s64(int64_t a) { // return (int64_t)vsrad_n_s64(a, b, 63); // } -// NYI-LABEL: @test_vsra_n_s64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> -// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> -// NYI: [[VSRA_N:%.*]] = ashr <1 x i64> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <1 x i64> [[TMP2]], [[VSRA_N]] -// NYI: ret <1 x i64> [[TMP4]] -// int64x1_t test_vsra_n_s64(int64x1_t a, int64x1_t b) { -// return vsra_n_s64(a, b, 1); -// } +int64x1_t test_vsra_n_s64(int64x1_t a, int64x1_t b) { + return vsra_n_s64(a, b, 1); + + // CIR-LABEL: vsra_n_s64 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: test_vsra_n_s64 + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> %1 to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> + // LLVM: [[VSRA_N:%.*]] = ashr <1 x i64> [[TMP3]], splat (i64 1) + // LLVM: [[TMP4:%.*]] = add <1 x i64> [[TMP2]], [[VSRA_N]] + // LLVM: ret <1 x i64> [[TMP4]] +} // NYI-LABEL: @test_vsrad_n_u64( // NYI: [[SHRD_N:%.*]] = lshr i64 %b, 63 @@ -15168,17 +15259,23 @@ int64_t test_vshrd_n_s64(int64_t a) { // return (uint64_t)vsrad_n_u64(a, b, 64); // } -// NYI-LABEL: @test_vsra_n_u64( -// NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> -// NYI: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> -// NYI: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> -// NYI: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> -// NYI: [[VSRA_N:%.*]] = lshr <1 x i64> [[TMP3]], -// NYI: [[TMP4:%.*]] = add <1 x i64> [[TMP2]], [[VSRA_N]] -// NYI: ret <1 x i64> [[TMP4]] -// uint64x1_t test_vsra_n_u64(uint64x1_t a, uint64x1_t b) { -// return vsra_n_u64(a, b, 1); -// } +uint64x1_t test_vsra_n_u64(uint64x1_t a, uint64x1_t b) { + return vsra_n_u64(a, b, 1); + + // CIR-LABEL: vsra_n_u64 + // CIR: [[splat:%.*]] = cir.const #cir.const_vector + // CIR: [[VSRA_N:%.*]] = cir.shift(right, {{%.*}}, [[splat]] : !cir.vector) -> !cir.vector + // CIR: cir.binop(add, {{%.*}}, [[VSRA_N]]) : !cir.vector + + // LLVM-LABEL: test_vsra_n_u64 + // LLVM: [[TMP0:%.*]] = bitcast <1 x i64> %0 to <8 x i8> + // LLVM: [[TMP1:%.*]] = bitcast <1 x i64> %1 to <8 x i8> + // LLVM: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> + // LLVM: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> + // LLVM: [[VSRA_N:%.*]] = lshr <1 x i64> [[TMP3]], splat (i64 1) + // LLVM: [[TMP4:%.*]] = add <1 x i64> [[TMP2]], [[VSRA_N]] + // LLVM: ret <1 x i64> [[TMP4]] +} // NYI-LABEL: @test_vrsrad_n_s64( // NYI: [[TMP0:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 %b, i64 -63) From c4409e18346ba8525ce7b8f7e127e1ca5df4ebb5 Mon Sep 17 00:00:00 2001 From: FantasqueX Date: Tue, 11 Feb 2025 22:50:02 +0800 Subject: [PATCH 2244/2301] [CIR][CIRGen] Add alignment attribute to AtomicCmpXchg (#1327) There is an `alignment` attribute in MLIR's LLVMIR Dialect https://github.com/llvm/clangir/blob/a7383c9d05165d16edba857ddc86e5d29d94d2cc/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td#L1880 When Clang builds IR, it adds an alignment automatically https://github.com/llvm/clangir/blob/a7383c9d05165d16edba857ddc86e5d29d94d2cc/clang/lib/CodeGen/CGBuilder.h#L168-L177 This PR does the same thing for ClangIR. Closes: https://github.com/llvm/clangir/issues/1275 --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 + clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 7 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 1 + clang/test/CIR/CodeGen/atomic-runtime.cpp | 30 ++++----- clang/test/CIR/CodeGen/atomic-xchg-field.c | 4 +- clang/test/CIR/CodeGen/atomic.cpp | 64 +++++++++---------- 7 files changed, 60 insertions(+), 52 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 2659471e7e38..a0a13454fab3 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -5390,6 +5390,7 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", CIR_AnyType:$desired, Arg:$succ_order, Arg:$fail_order, + OptionalAttr:$alignment, UnitAttr:$weak, UnitAttr:$is_volatile); @@ -5401,6 +5402,7 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", `success` `=` $succ_order `,` `failure` `=` $fail_order `)` + (`align` `(` $alignment^ `)`)? (`weak` $weak^)? (`volatile` $is_volatile^)? `:` `(` type($old) `,` type($cmp) `)` attr-dict diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 62fe88a9d552..49fe86eb17b4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -423,7 +423,9 @@ static void emitAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, auto boolTy = builder.getBoolTy(); auto cmpxchg = builder.create( loc, Expected.getType(), boolTy, Ptr.getPointer(), Expected, Desired, - SuccessOrder, FailureOrder); + cir::MemOrderAttr::get(&CGF.getMLIRContext(), SuccessOrder), + cir::MemOrderAttr::get(&CGF.getMLIRContext(), FailureOrder), + builder.getI64IntegerAttr(Ptr.getAlignment().getAsAlign().value())); cmpxchg.setIsVolatile(E->isVolatile()); cmpxchg.setWeak(IsWeak); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 587eaf654e87..b3807cba5828 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -324,8 +324,11 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, auto op = builder.create( cgf.getLoc(expr->getSourceRange()), cmpVal.getType(), builder.getBoolTy(), destAddr.getPointer(), cmpVal, newVal, - cir::MemOrder::SequentiallyConsistent, - cir::MemOrder::SequentiallyConsistent); + MemOrderAttr::get(&cgf.getMLIRContext(), + cir::MemOrder::SequentiallyConsistent), + MemOrderAttr::get(&cgf.getMLIRContext(), + cir::MemOrder::SequentiallyConsistent), + builder.getI64IntegerAttr(destAddr.getAlignment().getAsAlign().value())); return returnBool ? op.getResult(1) : op.getResult(0); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index c79cc75e7f31..5dbc7fdccbd7 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3202,6 +3202,7 @@ mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite( op.getLoc(), adaptor.getPtr(), expected, desired, getLLVMAtomicOrder(adaptor.getSuccOrder()), getLLVMAtomicOrder(adaptor.getFailOrder())); + cmpxchg.setAlignment(adaptor.getAlignment()); cmpxchg.setWeak(adaptor.getWeak()); cmpxchg.setVolatile_(adaptor.getIsVolatile()); diff --git a/clang/test/CIR/CodeGen/atomic-runtime.cpp b/clang/test/CIR/CodeGen/atomic-runtime.cpp index 28220ee0f5e6..cea46849c83e 100644 --- a/clang/test/CIR/CodeGen/atomic-runtime.cpp +++ b/clang/test/CIR/CodeGen/atomic-runtime.cpp @@ -120,7 +120,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = relaxed) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = relaxed) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -131,7 +131,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = acquire) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = acquire) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -142,7 +142,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = seq_cst) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -158,7 +158,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = relaxed) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = relaxed) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -169,7 +169,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = acquire) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = acquire) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -180,7 +180,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = seq_cst) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -196,7 +196,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = relaxed) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = relaxed) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -207,7 +207,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = acquire) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = acquire) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -218,7 +218,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = seq_cst) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -234,7 +234,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = relaxed) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = relaxed) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -245,7 +245,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = acquire) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = acquire) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -256,7 +256,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = seq_cst) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -272,7 +272,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = relaxed) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = relaxed) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -283,7 +283,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = acquire) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = acquire) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -294,7 +294,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = seq_cst) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/atomic-xchg-field.c b/clang/test/CIR/CodeGen/atomic-xchg-field.c index fd9267632344..59b36ba183bb 100644 --- a/clang/test/CIR/CodeGen/atomic-xchg-field.c +++ b/clang/test/CIR/CodeGen/atomic-xchg-field.c @@ -47,7 +47,7 @@ void structAtomicExchange(unsigned referenceCount, wPtr item) { } // CHECK-LABEL: @structAtomicExchange -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u32i, {{.*}} : !u32i, success = seq_cst, failure = seq_cst) weak : (!u32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u32i, {{.*}} : !u32i, success = seq_cst, failure = seq_cst) align(8) weak : (!u32i, !cir.bool) // LLVM-LABEL: @structAtomicExchange // LLVM: load i32 @@ -55,7 +55,7 @@ void structAtomicExchange(unsigned referenceCount, wPtr item) { // LLVM: store i32 // LLVM: %[[EXP:.*]] = load i32 // LLVM: %[[DES:.*]] = load i32 -// LLVM: %[[RES:.*]] = cmpxchg weak ptr %9, i32 %[[EXP]], i32 %[[DES]] seq_cst seq_cst +// LLVM: %[[RES:.*]] = cmpxchg weak ptr %9, i32 %[[EXP]], i32 %[[DES]] seq_cst seq_cst, align 8 // LLVM: %[[OLD:.*]] = extractvalue { i32, i1 } %[[RES]], 0 // LLVM: %[[CMP:.*]] = extractvalue { i32, i1 } %[[RES]], 1 // LLVM: %[[FAIL:.*]] = xor i1 %[[CMP]], true diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 99a295846284..b737376833f3 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -261,7 +261,7 @@ bool fd4(struct S *a, struct S *b, struct S *c) { } // CHECK-LABEL: @_Z3fd4P1SS0_S0_ -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) weak : (!u64i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) align(8) weak : (!u64i, !cir.bool) // LLVM-LABEL: @_Z3fd4P1SS0_S0_ // LLVM: cmpxchg weak ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 @@ -273,7 +273,7 @@ bool fi4a(int *i) { } // CHECK-LABEL: @_Z4fi4aPi -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) align(4) : (!s32i, !cir.bool) // LLVM-LABEL: @_Z4fi4aPi // LLVM: %[[RES:.*]] = cmpxchg ptr %7, i32 %8, i32 %9 acquire acquire, align 4 @@ -286,7 +286,7 @@ bool fi4b(int *i) { } // CHECK-LABEL: @_Z4fi4bPi -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) weak : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) align(4) weak : (!s32i, !cir.bool) // LLVM-LABEL: @_Z4fi4bPi // LLVM: %[[R:.*]] = cmpxchg weak ptr {{.*}}, i32 {{.*}}, i32 {{.*}} acquire acquire, align 4 @@ -299,7 +299,7 @@ bool fi4c(atomic_int *i) { } // CHECK-LABEL: @_Z4fi4cPU7_Atomici -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = seq_cst, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = seq_cst, failure = seq_cst) align(4) : (!s32i, !cir.bool) // CHECK: %[[CMP:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[CMP:.*]] { // CHECK: cir.store %old, {{.*}} : !s32i, !cir.ptr @@ -436,14 +436,14 @@ void sub_byte(char* a, char b) { // CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr // CHECK: %[[CMP:.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i -// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP]] : !s32i, %[[UPD]] : !s32i, success = seq_cst, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP]] : !s32i, %[[UPD]] : !s32i, success = seq_cst, failure = seq_cst) align(4) : (!s32i, !cir.bool) // CHECK: cir.store %[[RES]], {{.*}} : !cir.bool, !cir.ptr // LLVM-LABEL: @_Z12cmp_bool_int // LLVM: %[[PTR:.*]] = load ptr // LLVM: %[[CMP:.*]] = load i32 // LLVM: %[[UPD:.*]] = load i32 -// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst +// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst, align 4 // LLVM: %[[TMP:.*]] = extractvalue { i32, i1 } %[[RES]], 1 // LLVM: %[[EXT:.*]] = zext i1 %[[TMP]] to i8 // LLVM: store i8 %[[EXT]], ptr {{.*}} @@ -453,28 +453,28 @@ void cmp_bool_int(int* p, int x, int u) { // CHECK-LABEL: @_Z13cmp_bool_long -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s64i, {{.*}} : !s64i, success = seq_cst, failure = seq_cst) : (!s64i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s64i, {{.*}} : !s64i, success = seq_cst, failure = seq_cst) align(8) : (!s64i, !cir.bool) // LLVM-LABEL: @_Z13cmp_bool_long -// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst +// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 void cmp_bool_long(long* p, long x, long u) { bool r = __sync_bool_compare_and_swap(p, x, u); } // CHECK-LABEL: @_Z14cmp_bool_short -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s16i, {{.*}} : !s16i, success = seq_cst, failure = seq_cst) : (!s16i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s16i, {{.*}} : !s16i, success = seq_cst, failure = seq_cst) align(2) : (!s16i, !cir.bool) // LLVM-LABEL: @_Z14cmp_bool_short -// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst +// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst, align 2 void cmp_bool_short(short* p, short x, short u) { bool r = __sync_bool_compare_and_swap(p, x, u); } // CHECK-LABEL: @_Z13cmp_bool_byte -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s8i, {{.*}} : !s8i, success = seq_cst, failure = seq_cst) : (!s8i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s8i, {{.*}} : !s8i, success = seq_cst, failure = seq_cst) align(1) : (!s8i, !cir.bool) // LLVM-LABEL: @_Z13cmp_bool_byte -// LLVM: cmpxchg ptr {{.*}}, i8 {{.*}}, i8 {{.*}} seq_cst seq_cst +// LLVM: cmpxchg ptr {{.*}}, i8 {{.*}}, i8 {{.*}} seq_cst seq_cst, align 1 void cmp_bool_byte(char* p, char x, char u) { bool r = __sync_bool_compare_and_swap(p, x, u); } @@ -483,14 +483,14 @@ void cmp_bool_byte(char* p, char x, char u) { // CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr // CHECK: %[[CMP:.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i -// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP]] : !s32i, %[[UPD]] : !s32i, success = seq_cst, failure = seq_cst) : (!s32i, !cir.bool) +// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP]] : !s32i, %[[UPD]] : !s32i, success = seq_cst, failure = seq_cst) align(4) : (!s32i, !cir.bool) // CHECK: cir.store %[[OLD]], {{.*}} : !s32i, !cir.ptr // LLVM-LABEL: @_Z11cmp_val_int // LLVM: %[[PTR:.*]] = load ptr // LLVM: %[[CMP:.*]] = load i32 // LLVM: %[[UPD:.*]] = load i32 -// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst +// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst, align 4 // LLVM: %[[TMP:.*]] = extractvalue { i32, i1 } %[[RES]], 0 // LLVM: store i32 %[[TMP]], ptr {{.*}} void cmp_val_int(int* p, int x, int u) { @@ -498,28 +498,28 @@ void cmp_val_int(int* p, int x, int u) { } // CHECK-LABEL: @_Z12cmp_val_long -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s64i, {{.*}} : !s64i, success = seq_cst, failure = seq_cst) : (!s64i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s64i, {{.*}} : !s64i, success = seq_cst, failure = seq_cst) align(8) : (!s64i, !cir.bool) // LLVM-LABEL: @_Z12cmp_val_long -// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst +// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 void cmp_val_long(long* p, long x, long u) { long r = __sync_val_compare_and_swap(p, x, u); } // CHECK-LABEL: @_Z13cmp_val_short -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s16i, {{.*}} : !s16i, success = seq_cst, failure = seq_cst) : (!s16i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s16i, {{.*}} : !s16i, success = seq_cst, failure = seq_cst) align(2) : (!s16i, !cir.bool) // LLVM-LABEL: @_Z13cmp_val_short -// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst +// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst, align 2 void cmp_val_short(short* p, short x, short u) { short r = __sync_val_compare_and_swap(p, x, u); } // CHECK-LABEL: @_Z12cmp_val_byte -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s8i, {{.*}} : !s8i, success = seq_cst, failure = seq_cst) : (!s8i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s8i, {{.*}} : !s8i, success = seq_cst, failure = seq_cst) align(1) : (!s8i, !cir.bool) // LLVM-LABEL: @_Z12cmp_val_byte -// LLVM: cmpxchg ptr {{.*}}, i8 {{.*}}, i8 {{.*}} seq_cst seq_cst +// LLVM: cmpxchg ptr {{.*}}, i8 {{.*}}, i8 {{.*}} seq_cst seq_cst, align 1 void cmp_val_byte(char* p, char x, char u) { char r = __sync_val_compare_and_swap(p, x, u); } @@ -586,14 +586,14 @@ void sub_uchar(unsigned char* a, char b) { // CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: %[[UPD_U:.*]] = cir.cast(integral, %[[UPD]] : !s32i), !u32i // CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP_U]] : -// CHECK-SAME: !u32i, %[[UPD_U]] : !u32i, success = seq_cst, failure = seq_cst) : (!u32i, !cir.bool) +// CHECK-SAME: !u32i, %[[UPD_U]] : !u32i, success = seq_cst, failure = seq_cst) align(4) : (!u32i, !cir.bool) // CHECK: cir.store %[[RES]], {{.*}} : !cir.bool, !cir.ptr // LLVM-LABEL: @_Z13cmp_bool_uint // LLVM: %[[PTR:.*]] = load ptr // LLVM: %[[CMP:.*]] = load i32 // LLVM: %[[UPD:.*]] = load i32 -// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst +// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst, align 4 // LLVM: %[[TMP:.*]] = extractvalue { i32, i1 } %[[RES]], 1 // LLVM: %[[EXT:.*]] = zext i1 %[[TMP]] to i8 // LLVM: store i8 %[[EXT]], ptr {{.*}} @@ -602,19 +602,19 @@ void cmp_bool_uint(unsigned int* p, int x, int u) { } // CHECK-LABEL: @_Z15cmp_bool_ushort -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u16i, {{.*}} : !u16i, success = seq_cst, failure = seq_cst) : (!u16i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u16i, {{.*}} : !u16i, success = seq_cst, failure = seq_cst) align(2) : (!u16i, !cir.bool) // LLVM-LABEL: @_Z15cmp_bool_ushort -// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst +// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst, align 2 void cmp_bool_ushort(unsigned short* p, short x, short u) { bool r = __sync_bool_compare_and_swap(p, x, u); } // CHECK-LABEL: @_Z14cmp_bool_ulong -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) : (!u64i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) align(8) : (!u64i, !cir.bool) // LLVM-LABEL: @_Z14cmp_bool_ulong -// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst +// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 void cmp_bool_ulong(unsigned long* p, long x, long u) { bool r = __sync_bool_compare_and_swap(p, x, u); } @@ -626,7 +626,7 @@ void cmp_bool_ulong(unsigned long* p, long x, long u) { // CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: %[[UPD_U:.*]] = cir.cast(integral, %[[UPD]] : !s32i), !u32i // CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP_U]] : -// CHECK-SAME: !u32i, %[[UPD_U]] : !u32i, success = seq_cst, failure = seq_cst) : (!u32i, !cir.bool) +// CHECK-SAME: !u32i, %[[UPD_U]] : !u32i, success = seq_cst, failure = seq_cst) align(4) : (!u32i, !cir.bool) // CHECK: %[[R:.*]] = cir.cast(integral, %[[OLD]] : !u32i), !s32i // CHECK: cir.store %[[R]], {{.*}} : !s32i, !cir.ptr @@ -634,7 +634,7 @@ void cmp_bool_ulong(unsigned long* p, long x, long u) { // LLVM: %[[PTR:.*]] = load ptr // LLVM: %[[CMP:.*]] = load i32 // LLVM: %[[UPD:.*]] = load i32 -// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst +// LLVM: %[[RES:.*]] = cmpxchg ptr %[[PTR]], i32 %[[CMP]], i32 %[[UPD]] seq_cst seq_cst, align 4 // LLVM: %[[TMP:.*]] = extractvalue { i32, i1 } %[[RES]], 0 // LLVM: store i32 %[[TMP]], ptr {{.*}} void cmp_val_uint(unsigned int* p, int x, int u) { @@ -642,19 +642,19 @@ void cmp_val_uint(unsigned int* p, int x, int u) { } // CHECK-LABEL: @_Z14cmp_val_ushort -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u16i, {{.*}} : !u16i, success = seq_cst, failure = seq_cst) : (!u16i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u16i, {{.*}} : !u16i, success = seq_cst, failure = seq_cst) align(2) : (!u16i, !cir.bool) // LLVM-LABEL: @_Z14cmp_val_ushort -// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst +// LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst, align 2 void cmp_val_ushort(unsigned short* p, short x, short u) { short r = __sync_val_compare_and_swap(p, x, u); } // CHECK-LABEL: @_Z13cmp_val_ulong -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) : (!u64i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) align(8) : (!u64i, !cir.bool) // LLVM-LABEL: @_Z13cmp_val_ulong -// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst +// LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 void cmp_val_ulong(unsigned long* p, long x, long u) { long r = __sync_val_compare_and_swap(p, x, u); } From 1468ac4fda9501fafd07ee31d6236ec21978ec9a Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Tue, 11 Feb 2025 22:50:25 +0800 Subject: [PATCH 2245/2301] [CIR][CIRGen][TBAA] Add support for scalar types (#1329) This patch introduces support for TBAA with scalar types. By encoding the type name in the CIR, we address the limitation of distinguishing between different C++ types that map to the same CIR type. For example, long and long long types. see https://github.com/llvm/clangir/issues/1241 --- .../CIR/Dialect/Builder/CIRBaseBuilder.h | 6 +- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 8 +- clang/include/clang/CIR/Dialect/IR/CIROps.td | 6 +- .../clang/CIR/Dialect/IR/CIRTBAAAttrs.td | 58 ++++ clang/include/clang/CIR/MissingFeatures.h | 11 +- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 2 +- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 2 +- clang/lib/CIR/CodeGen/CIRGenTBAA.cpp | 252 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenTBAA.h | 11 +- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 14 +- clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp | 2 +- .../CIR/Lowering/DirectToLLVM/CMakeLists.txt | 1 + .../Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp | 73 +++++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 11 +- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 3 + clang/test/CIR/CodeGen/const-alloca.cpp | 6 +- clang/test/CIR/CodeGen/tbaa-bitinit.c | 13 + clang/test/CIR/CodeGen/tbaa-enum.c | 146 ++++++++++ clang/test/CIR/CodeGen/tbaa-scalar.c | 143 ++++++++++ clang/test/CIR/CodeGen/tbaa-union.c | 32 +++ clang/test/CIR/CodeGen/tbaa-vptr.cpp | 18 ++ clang/test/CIR/CodeGen/tbaa.c | 22 -- 24 files changed, 788 insertions(+), 56 deletions(-) create mode 100644 clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td create mode 100644 clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp create mode 100644 clang/test/CIR/CodeGen/tbaa-bitinit.c create mode 100644 clang/test/CIR/CodeGen/tbaa-enum.c create mode 100644 clang/test/CIR/CodeGen/tbaa-scalar.c create mode 100644 clang/test/CIR/CodeGen/tbaa-union.c create mode 100644 clang/test/CIR/CodeGen/tbaa-vptr.cpp delete mode 100644 clang/test/CIR/CodeGen/tbaa.c diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 502fd0d52524..383fcd950ab0 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -170,7 +170,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { /*alignment=*/intAttr, /*mem_order=*/ cir::MemOrderAttr{}, - /*tbaa=*/mlir::ArrayAttr{}); + /*tbaa=*/cir::TBAAAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Value ptr, @@ -357,7 +357,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { val.getType()) dst = createPtrBitcast(dst, val.getType()); return create(loc, val, dst, _volatile, align, order, - /*tbaa=*/mlir::ArrayAttr{}); + /*tbaa=*/cir::TBAAAttr{}); } mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, @@ -405,7 +405,7 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { cir::CopyOp createCopy(mlir::Value dst, mlir::Value src, bool isVolatile = false) { return create(dst.getLoc(), dst, src, isVolatile, - /*tbaa=*/mlir::ArrayAttr{}); + /*tbaa=*/cir::TBAAAttr{}); } cir::MemCpyOp createMemCpy(mlir::Location loc, mlir::Value dst, diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index a74eda452a49..d3f5c88df19c 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -24,8 +24,9 @@ include "clang/CIR/Interfaces/ASTAttrInterfaces.td" // CIR Attrs //===----------------------------------------------------------------------===// -class CIR_Attr traits = []> - : AttrDef { +class CIR_Attr traits = [], + string baseCppClass = "::mlir::Attribute"> + : AttrDef { let mnemonic = attrMnemonic; } @@ -1323,8 +1324,7 @@ def GlobalAnnotationValuesAttr : CIR_Attr<"GlobalAnnotationValues", let genVerifyDecl = 1; } -def CIR_TBAAAttr : CIR_Attr<"TBAA", "tbaa", []> { -} +include "clang/CIR/Dialect/IR/CIRTBAAAttrs.td" include "clang/CIR/Dialect/IR/CIROpenCLAttrs.td" include "clang/CIR/Dialect/IR/CIRCUDAAttrs.td" diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a0a13454fab3..7994cd282d83 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -589,7 +589,7 @@ def LoadOp : CIR_Op<"load", [ UnitAttr:$is_volatile, OptionalAttr:$alignment, OptionalAttr:$mem_order, - OptionalAttr:$tbaa + OptionalAttr:$tbaa ); let results = (outs CIR_AnyType:$result); @@ -658,7 +658,7 @@ def StoreOp : CIR_Op<"store", [ UnitAttr:$is_volatile, OptionalAttr:$alignment, OptionalAttr:$mem_order, - OptionalAttr:$tbaa); + OptionalAttr:$tbaa); let assemblyFormat = [{ (`volatile` $is_volatile^)? @@ -4121,7 +4121,7 @@ def CopyOp : CIR_Op<"copy", let arguments = (ins Arg:$dst, Arg:$src, UnitAttr:$is_volatile, - OptionalAttr:$tbaa); + OptionalAttr:$tbaa); let summary = "Copies contents from a CIR pointer to another"; let description = [{ Given two CIR pointers, `src` and `dst`, `cir.copy` will copy the memory diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td new file mode 100644 index 000000000000..40e2d2d7f1d9 --- /dev/null +++ b/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td @@ -0,0 +1,58 @@ +//===----------------------------------------------------------------------===// +// TBAAAttr +//===----------------------------------------------------------------------===// + +def CIR_TBAAAttr : CIR_Attr<"TBAA", "tbaa", []> { + let summary = "CIR dialect TBAA base attribute"; +} + +//===----------------------------------------------------------------------===// +// TBAAOmnipotentCharAttr +//===----------------------------------------------------------------------===// + +def CIR_TBAAOmnipotentChar + : CIR_Attr<"TBAAOmnipotentChar", "tbaa_omnipotent_char", [], "TBAAAttr"> { + let summary = "Describes a special scalar type, the omnipotent char type."; +} + +//===----------------------------------------------------------------------===// +// TBAAScalarAttr +//===----------------------------------------------------------------------===// + +def CIR_TBAAScalarAttr : CIR_Attr<"TBAAScalar", "tbaa_scalar", [], "TBAAAttr"> { + let summary = "Describes a scalar type in TBAA with an identifier."; + + let parameters = (ins StringRefParameter<> : $id, CIR_AnyType : $type); + + let description = [{ + Define a TBAA scalar attribute. + + Example: + ```mlir + // CIR_TBAAScalarAttr + #tbaa_scalar = #cir.tbaa_scalar + #tbaa_scalar1 = #cir.tbaa_scalar + ``` + + See the following link for more details: + https://llvm.org/docs/LangRef.html#tbaa-metadata + }]; + + let assemblyFormat = "`<` struct(params) `>`"; +} + +def CIR_TBAATagAttr : CIR_Attr<"TBAATag", "tbaa_tag", [], "TBAAAttr"> { + let parameters = (ins CIR_TBAAAttr + : $base, CIR_TBAAAttr + : $access, "int64_t" + : $offset); + + let assemblyFormat = "`<` struct(params) `>`"; +} + +def CIR_AnyTBAAAttr : AnyAttrOf<[ + CIR_TBAAAttr, + CIR_TBAAOmnipotentChar, + CIR_TBAAScalarAttr, + CIR_TBAATagAttr +]>; diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 8f9c5d3eff49..1487747899d6 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -58,7 +58,16 @@ struct MissingFeatures { // sanitizer related type check features static bool emitTypeCheck() { return false; } static bool tbaa() { return false; } - static bool tbaa_struct() { return false; } + static bool tbaaStruct() { return false; } + static bool tbaaTagForStruct() { return false; } + static bool tbaaTagForEnum() { return false; } + static bool tbaaTagForBitInt() { return false; } + static bool tbaaVTablePtr() { return false; } + static bool tbaaIncompleteType() { return false; } + static bool tbaaMergeTBAAInfo() { return false; } + static bool tbaaMayAlias() { return false; } + static bool tbaaNewStructPath() { return false; } + static bool tbaaPointer() { return false; } static bool emitNullabilityCheck() { return false; } static bool ptrAuth() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 3a8a3955d7f2..e678a13ede7e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -868,7 +868,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return create( loc, addr.getElementType(), addr.getPointer(), /*isDeref=*/false, /*is_volatile=*/isVolatile, /*alignment=*/mlir::IntegerAttr{}, - /*mem_order=*/cir::MemOrderAttr{}, /*tbaa=*/mlir::ArrayAttr{}); + /*mem_order=*/cir::MemOrderAttr{}, /*tbaa=*/cir::TBAAAttr{}); } mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index d7aa4326bf30..8c37cdea13f8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -1722,7 +1722,7 @@ void CIRGenFunction::emitAggregateCopy(LValue Dest, LValue Src, QualType Ty, // Determine the metadata to describe the position of any padding in this // memcpy, as well as the TBAA tags for the members of the struct, in case // the optimizer wishes to expand it in to scalar memory operations. - assert(!cir::MissingFeatures::tbaa_struct() && "tbaa.struct NYI"); + assert(!cir::MissingFeatures::tbaaStruct() && "tbaa.struct NYI"); if (CGM.getCodeGenOpts().NewStructPathTBAA) { TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer( Dest.getTBAAInfo(), Src.getTBAAInfo()); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 76221be12319..d0b098cd188f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -4096,7 +4096,7 @@ cir::TBAAAttr CIRGenModule::getTBAABaseTypeInfo(QualType QTy) { return tbaa->getBaseTypeInfo(QTy); } -mlir::ArrayAttr CIRGenModule::getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo) { +cir::TBAAAttr CIRGenModule::getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo) { if (!tbaa) { return nullptr; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 867dee754862..522d38e0b018 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -576,7 +576,7 @@ class CIRGenModule : public CIRGenTypeCache { /// type is not suitable for use in TBAA access tags. cir::TBAAAttr getTBAABaseTypeInfo(QualType QTy); - mlir::ArrayAttr getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo); + cir::TBAAAttr getTBAAAccessTagInfo(TBAAAccessInfo tbaaInfo); /// Get merged TBAA information for the purposes of type casts. TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp index a6efc05e4110..fa8325c654cb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp @@ -1,12 +1,17 @@ #include "CIRGenTBAA.h" -#include "CIRGenCXXABI.h" #include "CIRGenTypes.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "mlir/Support/LLVM.h" #include "clang/AST/ASTContext.h" #include "clang/AST/RecordLayout.h" +#include "clang/AST/Type.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "clang/CIR/MissingFeatures.h" #include "llvm/Support/ErrorHandling.h" +#include namespace clang::CIRGen { cir::TBAAAttr tbaa_NYI(mlir::MLIRContext *mlirContext) { @@ -21,44 +26,279 @@ CIRGenTBAA::CIRGenTBAA(mlir::MLIRContext *mlirContext, : mlirContext(mlirContext), astContext(astContext), types(types), moduleOp(moduleOp), codeGenOpts(codeGenOpts), features(features) {} +cir::TBAAAttr CIRGenTBAA::getChar() { + return cir::TBAAOmnipotentCharAttr::get(mlirContext); +} + +static bool typeHasMayAlias(clang::QualType qty) { + // Tagged types have declarations, and therefore may have attributes. + if (auto *td = qty->getAsTagDecl()) + if (td->hasAttr()) + return true; + + // Also look for may_alias as a declaration attribute on a typedef. + // FIXME: We should follow GCC and model may_alias as a type attribute + // rather than as a declaration attribute. + while (auto *tt = qty->getAs()) { + if (tt->getDecl()->hasAttr()) + return true; + qty = tt->desugar(); + } + return false; +} + +/// Check if the given type is a valid base type to be used in access tags. +static bool isValidBaseType(clang::QualType qty) { + if (const clang::RecordType *tty = qty->getAs()) { + const clang::RecordDecl *rd = tty->getDecl()->getDefinition(); + // Incomplete types are not valid base access types. + if (!rd) + return false; + if (rd->hasFlexibleArrayMember()) + return false; + // rd can be struct, union, class, interface or enum. + // For now, we only handle struct and class. + if (rd->isStruct() || rd->isClass()) + return true; + } + return false; +} + +cir::TBAAAttr CIRGenTBAA::getScalarTypeInfo(clang::QualType qty) { + const clang::Type *ty = astContext.getCanonicalType(qty).getTypePtr(); + assert(mlir::isa(ty)); + const clang::BuiltinType *bty = mlir::dyn_cast(ty); + return cir::TBAAScalarAttr::get(mlirContext, bty->getName(features), + types.convertType(qty)); +} + +cir::TBAAAttr CIRGenTBAA::getTypeInfoHelper(clang::QualType qty) { + const clang::Type *ty = astContext.getCanonicalType(qty).getTypePtr(); + // Handle builtin types. + if (const clang::BuiltinType *bty = mlir::dyn_cast(ty)) { + switch (bty->getKind()) { + // Character types are special and can alias anything. + // In C++, this technically only includes "char" and "unsigned char", + // and not "signed char". In C, it includes all three. For now, + // the risk of exploiting this detail in C++ seems likely to outweigh + // the benefit. + case BuiltinType::Char_U: + case BuiltinType::Char_S: + case BuiltinType::UChar: + case BuiltinType::SChar: + return getChar(); + + // Unsigned types can alias their corresponding signed types. + case BuiltinType::UShort: + return getScalarTypeInfo(astContext.ShortTy); + case BuiltinType::UInt: + return getScalarTypeInfo(astContext.IntTy); + case BuiltinType::ULong: + return getScalarTypeInfo(astContext.LongTy); + case BuiltinType::ULongLong: + return getScalarTypeInfo(astContext.LongLongTy); + case BuiltinType::UInt128: + return getScalarTypeInfo(astContext.Int128Ty); + + case BuiltinType::UShortFract: + return getScalarTypeInfo(astContext.ShortFractTy); + case BuiltinType::UFract: + return getScalarTypeInfo(astContext.FractTy); + case BuiltinType::ULongFract: + return getScalarTypeInfo(astContext.LongFractTy); + + case BuiltinType::SatUShortFract: + return getScalarTypeInfo(astContext.SatShortFractTy); + case BuiltinType::SatUFract: + return getScalarTypeInfo(astContext.SatFractTy); + case BuiltinType::SatULongFract: + return getScalarTypeInfo(astContext.SatLongFractTy); + + case BuiltinType::UShortAccum: + return getScalarTypeInfo(astContext.ShortAccumTy); + case BuiltinType::UAccum: + return getScalarTypeInfo(astContext.AccumTy); + case BuiltinType::ULongAccum: + return getScalarTypeInfo(astContext.LongAccumTy); + + case BuiltinType::SatUShortAccum: + return getScalarTypeInfo(astContext.SatShortAccumTy); + case BuiltinType::SatUAccum: + return getScalarTypeInfo(astContext.SatAccumTy); + case BuiltinType::SatULongAccum: + return getScalarTypeInfo(astContext.SatLongAccumTy); + + // Treat all other builtin types as distinct types. This includes + // treating wchar_t, char16_t, and char32_t as distinct from their + // "underlying types". + default: + return getScalarTypeInfo(qty); + } + } + // C++1z [basic.lval]p10: "If a program attempts to access the stored value of + // an object through a glvalue of other than one of the following types the + // behavior is undefined: [...] a char, unsigned char, or std::byte type." + if (ty->isStdByteType()) + return getChar(); + + // Handle pointers and references. + // + // C has a very strict rule for pointer aliasing. C23 6.7.6.1p2: + // For two pointer types to be compatible, both shall be identically + // qualified and both shall be pointers to compatible types. + // + // This rule is impractically strict; we want to at least ignore CVR + // qualifiers. Distinguishing by CVR qualifiers would make it UB to + // e.g. cast a `char **` to `const char * const *` and dereference it, + // which is too common and useful to invalidate. C++'s similar types + // rule permits qualifier differences in these nested positions; in fact, + // C++ even allows that cast as an implicit conversion. + // + // Other qualifiers could theoretically be distinguished, especially if + // they involve a significant representation difference. We don't + // currently do so, however. + if (ty->isPointerType() || ty->isReferenceType()) { + if (!codeGenOpts.PointerTBAA) { + return cir::TBAAScalarAttr::get(mlirContext, "any pointer", + types.convertType(qty)); + } + assert(!cir::MissingFeatures::tbaaPointer()); + return tbaa_NYI(mlirContext); + } + // Accesses to arrays are accesses to objects of their element types. + if (codeGenOpts.NewStructPathTBAA && ty->isArrayType()) { + assert(!cir::MissingFeatures::tbaaNewStructPath()); + return tbaa_NYI(mlirContext); + } + // Enum types are distinct types. In C++ they have "underlying types", + // however they aren't related for TBAA. + if (const EnumType *ety = dyn_cast(ty)) { + assert(!cir::MissingFeatures::tbaaTagForEnum()); + return tbaa_NYI(mlirContext); + } + if (const auto *eit = dyn_cast(ty)) { + assert(!cir::MissingFeatures::tbaaTagForBitInt()); + return tbaa_NYI(mlirContext); + } + // For now, handle any other kind of type conservatively. + return getChar(); +} + cir::TBAAAttr CIRGenTBAA::getTypeInfo(clang::QualType qty) { - return tbaa_NYI(mlirContext); + // At -O0 or relaxed aliasing, TBAA is not emitted for regular types. + if (codeGenOpts.OptimizationLevel == 0 || codeGenOpts.RelaxedAliasing) { + return nullptr; + } + + // If the type has the may_alias attribute (even on a typedef), it is + // effectively in the general char alias class. + if (typeHasMayAlias(qty)) { + assert(!cir::MissingFeatures::tbaaMayAlias()); + return getChar(); + } + // We need this function to not fall back to returning the "omnipotent char" + // type node for aggregate and union types. Otherwise, any dereference of an + // aggregate will result into the may-alias access descriptor, meaning all + // subsequent accesses to direct and indirect members of that aggregate will + // be considered may-alias too. + // function. + if (isValidBaseType(qty)) { + assert(!cir::MissingFeatures::tbaaTagForStruct()); + return tbaa_NYI(mlirContext); + } + + const clang::Type *ty = astContext.getCanonicalType(qty).getTypePtr(); + if (metadataCache.contains(ty)) { + return metadataCache[ty]; + } + + // Note that the following helper call is allowed to add new nodes to the + // cache, which invalidates all its previously obtained iterators. So we + // first generate the node for the type and then add that node to the + // cache. + auto typeNode = getTypeInfoHelper(qty); + return metadataCache[ty] = typeNode; } TBAAAccessInfo CIRGenTBAA::getAccessInfo(clang::QualType accessType) { - return TBAAAccessInfo(); + // Pointee values may have incomplete types, but they shall never be + // dereferenced. + if (accessType->isIncompleteType()) { + assert(!cir::MissingFeatures::tbaaIncompleteType()); + return TBAAAccessInfo::getIncompleteInfo(); + } + + if (typeHasMayAlias(accessType)) { + assert(!cir::MissingFeatures::tbaaMayAlias()); + return TBAAAccessInfo::getMayAliasInfo(); + } + + uint64_t size = astContext.getTypeSizeInChars(accessType).getQuantity(); + return TBAAAccessInfo(getTypeInfo(accessType), size); } TBAAAccessInfo CIRGenTBAA::getVTablePtrAccessInfo(mlir::Type vtablePtrType) { + assert(!cir::MissingFeatures::tbaaVTablePtr()); return TBAAAccessInfo(); } mlir::ArrayAttr CIRGenTBAA::getTBAAStructInfo(clang::QualType qty) { - return mlir::ArrayAttr::get(mlirContext, {}); + assert(!cir::MissingFeatures::tbaaStruct() && "tbaa.struct NYI"); + return mlir::ArrayAttr(); } cir::TBAAAttr CIRGenTBAA::getBaseTypeInfo(clang::QualType qty) { return tbaa_NYI(mlirContext); } -mlir::ArrayAttr CIRGenTBAA::getAccessTagInfo(TBAAAccessInfo tbaaInfo) { - return mlir::ArrayAttr::get(mlirContext, {tbaa_NYI(mlirContext)}); +cir::TBAAAttr CIRGenTBAA::getAccessTagInfo(TBAAAccessInfo tbaaInfo) { + assert(!tbaaInfo.isIncomplete() && + "Access to an object of an incomplete type!"); + + if (tbaaInfo.isMayAlias()) { + assert(!cir::MissingFeatures::tbaaMayAlias()); + tbaaInfo = TBAAAccessInfo(getChar(), tbaaInfo.size); + } + if (!tbaaInfo.accessType) { + return nullptr; + } + + if (!codeGenOpts.StructPathTBAA) + tbaaInfo = TBAAAccessInfo(tbaaInfo.accessType, tbaaInfo.size); + + if (!tbaaInfo.baseType) { + tbaaInfo.baseType = tbaaInfo.accessType; + assert(!tbaaInfo.offset && + "Nonzero offset for an access with no base type!"); + } + if (codeGenOpts.NewStructPathTBAA) { + assert(!cir::MissingFeatures::tbaaNewStructPath()); + return tbaa_NYI(mlirContext); + } + if (tbaaInfo.baseType == tbaaInfo.accessType) { + return tbaaInfo.accessType; + } + return cir::TBAATagAttr::get(mlirContext, tbaaInfo.baseType, + tbaaInfo.accessType, tbaaInfo.offset); } TBAAAccessInfo CIRGenTBAA::mergeTBAAInfoForCast(TBAAAccessInfo sourceInfo, TBAAAccessInfo targetInfo) { + assert(!cir::MissingFeatures::tbaaMergeTBAAInfo()); return TBAAAccessInfo(); } TBAAAccessInfo CIRGenTBAA::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo infoA, TBAAAccessInfo infoB) { + assert(!cir::MissingFeatures::tbaaMergeTBAAInfo()); return TBAAAccessInfo(); } TBAAAccessInfo CIRGenTBAA::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo destInfo, TBAAAccessInfo srcInfo) { + assert(!cir::MissingFeatures::tbaaMergeTBAAInfo()); return TBAAAccessInfo(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.h b/clang/lib/CIR/CodeGen/CIRGenTBAA.h index 3f59a0e6538b..57a2c0fae226 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.h +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.h @@ -104,6 +104,15 @@ class CIRGenTBAA { [[maybe_unused]] const clang::CodeGenOptions &codeGenOpts; [[maybe_unused]] const clang::LangOptions &features; + llvm::DenseMap metadataCache; + + cir::TBAAAttr getChar(); + + // An internal helper function to generate metadata used + // to describe accesses to objects of the given type. + cir::TBAAAttr getTypeInfoHelper(clang::QualType qty); + cir::TBAAAttr getScalarTypeInfo(clang::QualType qty); + public: CIRGenTBAA(mlir::MLIRContext *mlirContext, clang::ASTContext &astContext, CIRGenTypes &types, mlir::ModuleOp moduleOp, @@ -129,7 +138,7 @@ class CIRGenTBAA { cir::TBAAAttr getBaseTypeInfo(clang::QualType qty); /// Get TBAA tag for a given memory access. - mlir::ArrayAttr getAccessTagInfo(TBAAAccessInfo tbaaInfo); + cir::TBAAAttr getAccessTagInfo(TBAAAccessInfo tbaaInfo); /// Get merged TBAA information for the purpose of type casts. TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo sourceInfo, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 6fa6f73d5965..07ec1721c124 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -17,6 +17,7 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "clang/CIR/Interfaces/CIRLoopOpInterface.h" #include "clang/CIR/MissingFeatures.h" +#include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/ErrorHandling.h" #include #include @@ -106,12 +107,13 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { os << dynCastInfoAttr.getAlias(); return AliasResult::FinalAlias; } - if (auto tbaaAttr = mlir::dyn_cast(attr)) { - os << tbaaAttr.getMnemonic(); - return AliasResult::OverridableAlias; - } - - return AliasResult::NoAlias; + return TypeSwitch(attr) + .Case([&](auto attr) { + os << decltype(attr)::getMnemonic(); + return AliasResult::OverridableAlias; + }) + .Default([](Attribute) { return AliasResult::NoAlias; }); } }; } // namespace diff --git a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp index 80963353a304..bb99d53e0ad8 100644 --- a/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRMemorySlot.cpp @@ -151,7 +151,7 @@ DeletionKind cir::CopyOp::removeBlockingUses( if (loadsFrom(slot)) builder.create(getLoc(), reachingDefinition, getDst(), false, mlir::IntegerAttr{}, cir::MemOrderAttr(), - mlir::ArrayAttr{}); + cir::TBAAAttr{}); return DeletionKind::Delete; } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt index df89f6b2a7b7..3920fdfc68c3 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt +++ b/clang/lib/CIR/Lowering/DirectToLLVM/CMakeLists.txt @@ -8,6 +8,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_clang_library(clangCIRLoweringDirectToLLVM LowerToLLVMIR.cpp LowerToLLVM.cpp + LowerTBAAToLLVM.cpp DEPENDS MLIRCIREnumsGen diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp new file mode 100644 index 000000000000..7ea58c4f66e4 --- /dev/null +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp @@ -0,0 +1,73 @@ +#include "LowerToLLVM.h" +#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/Interfaces/DataLayoutInterfaces.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/ErrorHandling.h" +#include + +namespace cir { +namespace direct { + +class CIRToLLVMTBAAAttrLowering { +public: + CIRToLLVMTBAAAttrLowering(mlir::MLIRContext *mlirContext) + : mlirContext(mlirContext) {} + mlir::LLVM::TBAARootAttr getRoot() { + return mlir::LLVM::TBAARootAttr::get( + mlirContext, mlir::StringAttr::get(mlirContext, "Simple C/C++ TBAA")); + } + + mlir::LLVM::TBAATypeDescriptorAttr getChar() { + return createScalarTypeNode("omnipotent char", getRoot(), 0); + } + + mlir::LLVM::TBAATypeDescriptorAttr + createScalarTypeNode(llvm::StringRef typeName, + mlir::LLVM::TBAANodeAttr parent, int64_t size) { + llvm::SmallVector members; + members.push_back(mlir::LLVM::TBAAMemberAttr::get(mlirContext, parent, 0)); + return mlir::LLVM::TBAATypeDescriptorAttr::get( + mlirContext, typeName, + llvm::ArrayRef(members)); + } + +protected: + mlir::MLIRContext *mlirContext; +}; + +class CIRToLLVMTBAAScalarAttrLowering : public CIRToLLVMTBAAAttrLowering { +public: + CIRToLLVMTBAAScalarAttrLowering(mlir::MLIRContext *mlirContext) + : CIRToLLVMTBAAAttrLowering(mlirContext) {} + mlir::LLVM::TBAATypeDescriptorAttr + lowerScalarType(cir::TBAAScalarAttr scalarAttr) { + mlir::DataLayout layout; + auto size = layout.getTypeSize(scalarAttr.getType()); + return createScalarTypeNode(scalarAttr.getId(), getChar(), size); + } +}; + +mlir::ArrayAttr lowerCIRTBAAAttr(mlir::Attribute tbaa, + mlir::ConversionPatternRewriter &rewriter, + cir::LowerModule *lowerMod) { + auto *ctx = rewriter.getContext(); + CIRToLLVMTBAAScalarAttrLowering scalarLower(ctx); + if (auto charAttr = mlir::dyn_cast(tbaa)) { + auto accessType = scalarLower.getChar(); + auto tag = mlir::LLVM::TBAATagAttr::get(accessType, accessType, 0); + return mlir::ArrayAttr::get(ctx, {tag}); + } + if (auto scalarAttr = mlir::dyn_cast(tbaa)) { + auto accessType = scalarLower.lowerScalarType(scalarAttr); + auto tag = mlir::LLVM::TBAATagAttr::get(accessType, accessType, 0); + return mlir::ArrayAttr::get(ctx, {tag}); + } + return mlir::ArrayAttr(); +} + +} // namespace direct +} // namespace cir \ No newline at end of file diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5dbc7fdccbd7..5d8cf071927b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1644,6 +1644,9 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite( mlir::Value result = emitFromMemory(rewriter, dataLayout, op, newLoad.getResult()); rewriter.replaceOp(op, result); + if (auto tbaa = op.getTbaaAttr()) { + newLoad.setTBAATags(lowerCIRTBAAAttr(tbaa, rewriter, lowerMod)); + } return mlir::LogicalResult::success(); } @@ -1678,9 +1681,13 @@ mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite( mlir::Value value = emitToMemory(rewriter, dataLayout, op.getValue().getType(), adaptor.getValue()); // TODO: nontemporal, syncscope. - rewriter.replaceOpWithNewOp( - op, value, adaptor.getAddr(), alignment, op.getIsVolatile(), + auto storeOp = rewriter.create( + op->getLoc(), value, adaptor.getAddr(), alignment, op.getIsVolatile(), /* nontemporal */ false, /* invariantGroup */ invariant, ordering); + rewriter.replaceOp(op, storeOp); + if (auto tbaa = op.getTbaaAttr()) { + storeOp.setTBAATags(lowerCIRTBAAAttr(tbaa, rewriter, lowerMod)); + } return mlir::LogicalResult::success(); } diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index bb0dcaf87efe..629d148427fd 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -1145,6 +1145,9 @@ class CIRToLLVMSignBitOpLowering matchAndRewrite(cir::SignBitOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override; }; +mlir::ArrayAttr lowerCIRTBAAAttr(mlir::Attribute tbaa, + mlir::ConversionPatternRewriter &rewriter, + cir::LowerModule *lowerMod); #define GET_BUILTIN_LOWERING_CLASSES_DECLARE #include "clang/CIR/Dialect/IR/CIRBuiltinsLowering.inc" diff --git a/clang/test/CIR/CodeGen/const-alloca.cpp b/clang/test/CIR/CodeGen/const-alloca.cpp index 9247b2692474..7cc9a5b57517 100644 --- a/clang/test/CIR/CodeGen/const-alloca.cpp +++ b/clang/test/CIR/CodeGen/const-alloca.cpp @@ -66,8 +66,8 @@ int local_const_load_store() { // LLVM-LABEL: @_Z22local_const_load_storev // LLVM: %[[#INIT:]] = call i32 @_Z11produce_intv() -// LLVM-NEXT: store i32 %[[#INIT]], ptr %[[#SLOT:]], align 4, !invariant.group !{{.+}} -// LLVM-NEXT: %{{.+}} = load i32, ptr %[[#SLOT]], align 4, !invariant.group !{{.+}} +// LLVM-NEXT: store i32 %[[#INIT]], ptr %[[#SLOT:]], align 4, !tbaa !{{.*}}, !invariant.group !{{.+}} +// LLVM-NEXT: %{{.+}} = load i32, ptr %[[#SLOT]], align 4, !tbaa !{{.*}}, !invariant.group !{{.+}} // LLVM: } int local_const_optimize() { @@ -80,7 +80,7 @@ int local_const_optimize() { // LLVM-LABEL: @_Z20local_const_optimizev() // LLVM-NEXT: %[[#slot:]] = alloca i32, align 4 // LLVM-NEXT: %[[#init:]] = tail call i32 @_Z11produce_intv() -// LLVM-NEXT: store i32 %[[#init]], ptr %[[#slot]], align 4, !invariant.group !{{.+}} +// LLVM-NEXT: store i32 %[[#init]], ptr %[[#slot]], align 4, !tbaa !{{.*}}, !invariant.group !{{.+}} // LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#slot]]) // LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#slot]]) // LLVM-NEXT: ret i32 %[[#init]] diff --git a/clang/test/CIR/CodeGen/tbaa-bitinit.c b/clang/test/CIR/CodeGen/tbaa-bitinit.c new file mode 100644 index 000000000000..72c62162e91e --- /dev/null +++ b/clang/test/CIR/CodeGen/tbaa-bitinit.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + + +// CIR: #tbaa[[tbaa_NYI:.*]] = #cir.tbaa + +_BitInt(33) a; +_BitInt(31) b; +void c() { + // CIR: %{{.*}} = cir.load %{{.*}} : !cir.ptr>, !cir.int tbaa(#tbaa[[tbaa_NYI]]) + // CIR: cir.store %{{.*}}, %{{.*}} : !cir.int, !cir.ptr> tbaa(#tbaa[[tbaa_NYI]]) + b = a; +} diff --git a/clang/test/CIR/CodeGen/tbaa-enum.c b/clang/test/CIR/CodeGen/tbaa-enum.c new file mode 100644 index 000000000000..a08b416906fc --- /dev/null +++ b/clang/test/CIR/CodeGen/tbaa-enum.c @@ -0,0 +1,146 @@ +// This is inspired from clang/test/CodeGen/tbaa.c, with both CIR and LLVM checks. +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -disable-llvm-passes +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -disable-llvm-passes -relaxed-aliasing +// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O0 -disable-llvm-passes +// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s + +// NO-TBAA-NOT: !tbaa + +// CIR: #tbaa[[NYI:.*]] = #cir.tbaa +// CIR: #tbaa[[CHAR:.*]] = #cir.tbaa_omnipotent_char +// CIR: #tbaa[[INT:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[LONG_LONG:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[SHORT:.*]] = #cir.tbaa_scalar + +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; + +typedef enum { + RED_AUTO_32, + GREEN_AUTO_32, + BLUE_AUTO_32 +} EnumAuto32; + +typedef enum { + RED_AUTO_64, + GREEN_AUTO_64, + BLUE_AUTO_64 = 0x100000000ull +} EnumAuto64; + +typedef enum : uint16_t { + RED_16, + GREEN_16, + BLUE_16 +} Enum16; + +typedef enum : uint8_t { + RED_8, + GREEN_8, + BLUE_8 +} Enum8; + +uint32_t g0(EnumAuto32 *E, uint32_t *val) { + // CIR-LABEL: cir.func @g0 + // CIR: %[[C5:.*]] = cir.const #cir.int<5> : !s32i + // CIR: %[[U_C5:.*]] = cir.cast(integral, %[[C5]] : !s32i), !u32i + // CIR: %[[VAL_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[U_C5]], %[[VAL_PTR]] : !u32i, !cir.ptr tbaa(#tbaa[[INT]]) + // CIR: %[[C0:.*]] = cir.const #cir.int<0> : !s32i + // CIR: %[[U_C0:.*]] = cir.cast(integral, %[[C0]] : !s32i), !u32i + // CIR: %[[E_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[U_C0]], %[[E_PTR]] : !u32i, !cir.ptr tbaa(#tbaa[[NYI]]) + // CIR: %[[RET_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: %[[RET:.*]] = cir.load %[[RET_PTR]] : !cir.ptr, !u32i tbaa(#tbaa[[INT]]) + // CIR: cir.store %[[RET]], %{{.*}} : !u32i, !cir.ptr + + // LLVM-LABEL: define{{.*}} i32 @g0( + // LLVM: store i32 5, ptr %{{.*}}, align 4, !tbaa [[TAG_i32:!.*]] + // LLVM: store i32 0, ptr %{{.*}}, align 4 + // LLVM: load i32, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + *val = 5; + *E = RED_AUTO_32; + return *val; +} + +uint64_t g1(EnumAuto64 *E, uint64_t *val) { + // CIR-LABEL: cir.func @g1 + // CIR: %[[C5:.*]] = cir.const #cir.int<5> : !s32i + // CIR: %[[U_C5:.*]] = cir.cast(integral, %[[C5]] : !s32i), !u64i + // CIR: %[[VAL_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[U_C5]], %[[VAL_PTR]] : !u64i, !cir.ptr tbaa(#tbaa[[LONG_LONG]]) + // CIR: %[[C0:.*]] = cir.const #cir.int<0> : !u64i + // CIR: %[[E_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[C0]], %[[E_PTR]] : !u64i, !cir.ptr tbaa(#tbaa[[NYI]]) + // CIR: %[[RET_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: %[[RET:.*]] = cir.load %[[RET_PTR]] : !cir.ptr, !u64i tbaa(#tbaa[[LONG_LONG]]) + // CIR: cir.store %[[RET]], %{{.*}} : !u64i, !cir.ptr + + // LLVM-LABEL: define{{.*}} i64 @g1( + // LLVM: store i64 5, ptr %{{.*}}, align 8, !tbaa [[TAG_i64:!.*]] + // LLVM: store i64 0, ptr %{{.*}}, align 8 + // LLVM: load i64, ptr %{{.*}}, align 8, !tbaa [[TAG_i64]] + *val = 5; + *E = RED_AUTO_64; + return *val; +} + +uint16_t g2(Enum16 *E, uint16_t *val) { + // CIR-LABEL: cir.func @g2 + // CIR: %[[C5:.*]] = cir.const #cir.int<5> : !s32i + // CIR: %[[U_C5:.*]] = cir.cast(integral, %[[C5]] : !s32i), !u16i + // CIR: %[[VAL_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[U_C5]], %[[VAL_PTR]] : !u16i, !cir.ptr tbaa(#tbaa[[SHORT]]) + // CIR: %[[C0:.*]] = cir.const #cir.int<0> : !u16i + // CIR: %[[E_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[C0]], %[[E_PTR]] : !u16i, !cir.ptr tbaa(#tbaa[[NYI]]) + // CIR: %[[RET_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: %[[RET:.*]] = cir.load %[[RET_PTR]] : !cir.ptr, !u16i tbaa(#tbaa[[SHORT]]) + // CIR: cir.store %[[RET]], %{{.*}} : !u16i, !cir.ptr + + // LLVM-LABEL: define{{.*}} i16 @g2( + // LLVM: store i16 5, ptr %{{.*}}, align 2, !tbaa [[TAG_i16:!.*]] + // LLVM: store i16 0, ptr %{{.*}}, align 2 + // LLVM: load i16, ptr %{{.*}}, align 2, !tbaa [[TAG_i16]] + *val = 5; + *E = RED_16; + return *val; +} + +uint8_t g3(Enum8 *E, uint8_t *val) { + // CIR-LABEL: cir.func @g3 + // CIR: %[[C5:.*]] = cir.const #cir.int<5> : !s32i + // CIR: %[[U_C5:.*]] = cir.cast(integral, %[[C5]] : !s32i), !u8i + // CIR: %[[VAL_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[U_C5]], %[[VAL_PTR]] : !u8i, !cir.ptr tbaa(#tbaa[[CHAR]]) + // CIR: %[[C0:.*]] = cir.const #cir.int<0> : !u8i + // CIR: %[[E_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[C0]], %[[E_PTR]] : !u8i, !cir.ptr tbaa(#tbaa[[NYI]]) + // CIR: %[[RET_PTR:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: %[[RET:.*]] = cir.load %[[RET_PTR]] : !cir.ptr, !u8i tbaa(#tbaa[[CHAR]]) + // CIR: cir.store %[[RET]], %{{.*}} : !u8i, !cir.ptr + + + // LLVM-LABEL: define{{.*}} i8 @g3( + // LLVM: store i8 5, ptr %{{.*}}, align 1, !tbaa [[TAG_i8:!.*]] + // LLVM: store i8 0, ptr %{{.*}}, align 1 + // LLVM: load i8, ptr %{{.*}}, align 1, !tbaa [[TAG_i8]] + *val = 5; + *E = RED_8; + return *val; +} + +// LLVM: [[TAG_i32]] = !{[[TYPE_i32:!.*]], [[TYPE_i32]], i64 0} +// LLVM: [[TYPE_i32]] = !{!"int", [[TYPE_char:!.*]], +// LLVM: [[TYPE_char]] = !{!"omnipotent char", [[TAG_c_tbaa:!.*]], +// LLVM: [[TAG_c_tbaa]] = !{!"Simple C/C++ TBAA"} +// LLVM: [[TAG_i64]] = !{[[TYPE_i64:!.*]], [[TYPE_i64]], i64 0} +// LLVM: [[TYPE_i64]] = !{!"long long", [[TYPE_char]], +// LLVM: [[TAG_i16]] = !{[[TYPE_i16:!.*]], [[TYPE_i16]], i64 0} +// LLVM: [[TYPE_i16]] = !{!"short", [[TYPE_char]], +// LLVM: [[TAG_i8]] = !{[[TYPE_i8:!.*]], [[TYPE_char]], i64 0} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/tbaa-scalar.c b/clang/test/CIR/CodeGen/tbaa-scalar.c new file mode 100644 index 000000000000..086e5fbd375c --- /dev/null +++ b/clang/test/CIR/CodeGen/tbaa-scalar.c @@ -0,0 +1,143 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -relaxed-aliasing +// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O0 +// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s + +// NO-TBAA-NOT: !tbaa + +// CIR: #tbaa[[CHAR:.*]] = #cir.tbaa_omnipotent_char +// CIR: #tbaa[[FLOAT:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[DOUBLE:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[LONG_DOUBLE:.*]] = #cir.tbaa_scalar> +// CIR: #tbaa[[INT:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[LONG:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[LONG_LONG:.*]] = #cir.tbaa_scalar + +void test_int_and_float(int *a, float *b) { + // CIR-LABEL: cir.func @test_int_and_float + // CIR: cir.scope + // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s32i tbaa(#tbaa[[INT]]) + // CIR: cir.if + // CIR: %[[C2:.*]] = cir.const #cir.fp<2 + // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.float, !cir.ptr tbaa(#tbaa[[FLOAT]]) + // CIR: else + // CIR: %[[C3:.*]] = cir.const #cir.fp<3 + // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.float, !cir.ptr tbaa(#tbaa[[FLOAT]]) + + // LLVM-LABEL: void @test_int_and_float + // LLVM: %[[ARG_a:.*]] = load i32, ptr %{{.*}}, align 4, !tbaa ![[TBAA_INT:.*]] + // LLVM: %[[COND:.*]] = icmp eq i32 %[[ARG_a]], 1 + // LLVM: %[[RET:.*]] = select i1 %[[COND]], float 2.000000e+00, float 3.000000e+00 + // LLVM: store float %[[RET]], ptr %{{.*}}, align 4, !tbaa ![[TBAA_FLOAT:.*]] + // LLVM: ret void + if (*a == 1) { + *b = 2.0f; + } else { + *b = 3.0f; + } +} + +void test_long_and_double(long *a, double *b) { + // CIR-LABEL: cir.func @test_long_and_double + // CIR: cir.scope + // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s64i tbaa(#tbaa[[LONG]]) + // CIR: cir.if + // CIR: %[[C2:.*]] = cir.const #cir.fp<2 + // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.double, !cir.ptr tbaa(#tbaa[[DOUBLE]]) + // CIR: else + // CIR: %[[C3:.*]] = cir.const #cir.fp<3 + // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.double, !cir.ptr tbaa(#tbaa[[DOUBLE]]) + + // LLVM-LABEL: void @test_long_and_double + // LLVM: %[[ARG_a:.*]] = load i64, ptr %{{.*}}, align 8, !tbaa ![[TBAA_LONG:.*]] + // LLVM: %[[COND:.*]] = icmp eq i64 %[[ARG_a]], 1 + // LLVM: %[[RET:.*]] = select i1 %[[COND]], double 2.000000e+00, double 3.000000e+00 + // LLVM: store double %[[RET]], ptr %{{.*}}, align 8, !tbaa ![[TBAA_DOUBLE:.*]] + // LLVM: ret void + if (*a == 1L) { + *b = 2.0; + } else { + *b = 3.0; + } +} +void test_long_long_and_long_double(long long *a, long double *b) { + // CIR-LABEL: cir.func @test_long_long_and_long_double + // CIR: cir.scope + // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s64i tbaa(#tbaa[[LONG_LONG]]) + // CIR: cir.if + // CIR: %[[C2:.*]] = cir.const #cir.fp<2 + // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>>, !cir.ptr> + // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.long_double, !cir.ptr> tbaa(#tbaa[[LONG_DOUBLE]]) + // CIR: else + // CIR: %[[C3:.*]] = cir.const #cir.fp<3 + // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>>, !cir.ptr> + // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.long_double, !cir.ptr> tbaa(#tbaa[[LONG_DOUBLE]]) + + // LLVM-LABEL: void @test_long_long_and_long_double + // LLVM: %[[ARG_a:.*]] = load i64, ptr %{{.*}}, align 8, !tbaa ![[TBAA_LONG_LONG:.*]] + // LLVM: %[[COND:.*]] = icmp eq i64 %[[ARG_a]], 1 + // LLVM: %[[RET:.*]] = select i1 %[[COND]], x86_fp80 0xK40008000000000000000, x86_fp80 0xK4000C000000000000000 + // LLVM: store x86_fp80 %[[RET]], ptr %{{.*}}, align 16, !tbaa ![[TBAA_LONG_DOUBLE:.*]] + // LLVM: ret void + if (*a == 1L) { + *b = 2.0L; + } else { + *b = 3.0L; + } +} + +void test_char(char *a, char* b) { + // CIR-LABEL: cir.func @test_char + // CIR: cir.scope + // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s8i tbaa(#tbaa[[CHAR]]) + // CIR: cir.if + // CIR: %[[C2:.*]] = cir.const #cir.int<98> : !s32i + // CIR: %[[C2_CHAR:.*]] = cir.cast(integral, %[[C2]] : !s32i), !s8i + // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[C2_CHAR]], %[[TMP3]] : !s8i, !cir.ptr tbaa(#tbaa[[CHAR]]) + // CIR: else + // CIR: %[[C3:.*]] = cir.const #cir.int<0> : !s32i + // CIR: %[[C3_CHAR:.*]] = cir.cast(integral, %[[C3]] : !s32i), !s8i + // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[C3_CHAR]], %[[TMP4]] : !s8i, !cir.ptr tbaa(#tbaa[[CHAR]]) + + + // LLVM-LABEL: void @test_char + // LLVM: %[[ARG_a:.*]] = load i8, ptr %{{.*}}, align 1, !tbaa ![[TBAA_CHAR:.*]] + // LLVM: %[[COND:.*]] = icmp eq i8 %[[ARG_a]], 97 + // LLVM: %[[RET:.*]] = select i1 %[[COND]], i8 98, i8 0 + // LLVM: store i8 %[[RET]], ptr %{{.*}}, align 1, !tbaa ![[TBAA_CHAR]] + // LLVM: ret void + if (*a == 'a') { + *b = 'b'; + } + else { + *b = '\0'; + } +} + +// LLVM: ![[TBAA_INT]] = !{![[TBAA_INT_PARENT:.*]], ![[TBAA_INT_PARENT]], i64 0} +// LLVM: ![[TBAA_INT_PARENT]] = !{!"int", ![[CHAR:.*]], i64 0} +// LLVM: ![[CHAR]] = !{!"omnipotent char", ![[ROOT:.*]], i64 0} +// LLVM: ![[ROOT]] = !{!"Simple C/C++ TBAA"} +// LLVM: ![[TBAA_FLOAT]] = !{![[TBAA_FLOAT_PARENT:.*]], ![[TBAA_FLOAT_PARENT]], i64 0} +// LLVM: ![[TBAA_FLOAT_PARENT]] = !{!"float", ![[CHAR]], i64 0} +// LLVM: ![[TBAA_LONG]] = !{![[TBAA_LONG_PARENT:.*]], ![[TBAA_LONG_PARENT]], i64 0} +// LLVM: ![[TBAA_LONG_PARENT]] = !{!"long", ![[CHAR]], i64 0} +// LLVM: ![[TBAA_DOUBLE]] = !{![[TBAA_DOUBLE_PARENT:.*]], ![[TBAA_DOUBLE_PARENT]], i64 0} +// LLVM: ![[TBAA_DOUBLE_PARENT]] = !{!"double", ![[CHAR]], i64 0} +// LLVM: ![[TBAA_LONG_DOUBLE]] = !{![[TBAA_LONG_DOUBLE_PARENT:.*]], ![[TBAA_LONG_DOUBLE_PARENT]], i64 0} +// LLVM: ![[TBAA_LONG_DOUBLE_PARENT]] = !{!"long double", ![[CHAR]], i64 0} +// LLVM: ![[TBAA_CHAR]] = !{![[CHAR]], ![[CHAR]], i64 0} diff --git a/clang/test/CIR/CodeGen/tbaa-union.c b/clang/test/CIR/CodeGen/tbaa-union.c new file mode 100644 index 000000000000..b06e285bb453 --- /dev/null +++ b/clang/test/CIR/CodeGen/tbaa-union.c @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -relaxed-aliasing +// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O0 +// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s + +// NO-TBAA-NOT: !tbaa +// CIR: #tbaa[[CHAR:.*]] = #cir.tbaa_omnipotent_char +typedef struct { + union { + int a, b; + }; + int c; +} S; + +void foo(S *s) { + // CIR-LABEL: cir.func @foo + // CIR: %[[C1:.*]] = cir.const #cir.int<1> : !s32i loc(#loc6) + // CIR: %{{.*}} = cir.load %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: cir.store %[[C1]], %{{.*}} : !s32i, !cir.ptr tbaa(#tbaa[[CHAR]]) + + // LLVM-LABEL: void @foo + // LLVM: store i32 1, ptr %{{.*}}, align 4, !tbaa ![[TBAA_TAG:.*]] + s->a = 1; +} + +// LLVM: ![[TBAA_TAG]] = !{![[CHAR:.*]], ![[CHAR]], i64 0} +// LLVM: ![[CHAR]] = !{!"omnipotent char", ![[ROOT:.*]], i64 0} +// LLVM: ![[ROOT]] = !{!"Simple C/C++ TBAA"} diff --git a/clang/test/CIR/CodeGen/tbaa-vptr.cpp b/clang/test/CIR/CodeGen/tbaa-vptr.cpp new file mode 100644 index 000000000000..dbe28be626a2 --- /dev/null +++ b/clang/test/CIR/CodeGen/tbaa-vptr.cpp @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// CIR-NOT: #tbaa + +struct Member { + ~Member(); +}; + +struct A { + virtual ~A(); +}; + +struct B : A { + Member m; + virtual ~B(); +}; +B::~B() { } diff --git a/clang/test/CIR/CodeGen/tbaa.c b/clang/test/CIR/CodeGen/tbaa.c deleted file mode 100644 index 43cdde47ecb7..000000000000 --- a/clang/test/CIR/CodeGen/tbaa.c +++ /dev/null @@ -1,22 +0,0 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s - -// CIR: #tbaa[[TBAA_NO:.*]] = #cir.tbaa -void f(int *a, float *b) { - // CIR: cir.scope - // CIR: %[[TMP1:.*]] = cir.load deref %{{.*}} : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) - // CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr, !s32i tbaa([#tbaa[[TBAA_NO]]]) - // CIR: cir.if - // CIR: %[[C2:.*]] = cir.const #cir.fp<2 - // CIR: %[[TMP3:.*]] = cir.load deref %[[ARG_b:.*]] : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) - // CIR: cir.store %[[C2]], %[[TMP3]] : !cir.float, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) - // CIR: else - // CIR: %[[C3:.*]] = cir.const #cir.fp<3 - // CIR: %[[TMP4:.*]] = cir.load deref %[[ARG_b]] : !cir.ptr>, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) - // CIR: cir.store %[[C3]], %[[TMP4]] : !cir.float, !cir.ptr tbaa([#tbaa[[TBAA_NO]]]) - if (*a == 1) { - *b = 2.0f; - } else { - *b = 3.0f; - } -} From 3017a004bbd2c310ec1818264a81068bab520913 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Tue, 11 Feb 2025 17:55:36 +0300 Subject: [PATCH 2246/2301] [CIR][CodeGen] Add initial support for __cxa_rethrow (#1290) This PR adds an initial support for `__cxa_rethrow`, and one test that produces a rethrow. I am very open to suggestions regarding this PR, because I'm still a bit unsure if this replicates the original codegen properly. For example, using the test added, the OG CodeGen produces: ``` entry: invoke void @_ZN1SC2Ev(ptr noundef nonnull align 1 dereferenceable(1) %s) to label %invoke.cont unwind label %lpad invoke.cont: ; preds = %entry invoke void @__cxa_rethrow() #2 to label %unreachable unwind label %lpad lpad: ; preds = %invoke.cont, %entry %0 = landingpad { ptr, i32 } catch ptr null %1 = extractvalue { ptr, i32 } %0, 0 store ptr %1, ptr %exn.slot, align 8 %2 = extractvalue { ptr, i32 } %0, 1 store i32 %2, ptr %ehselector.slot, align 4 br label %catch catch: ; preds = %lpad %exn = load ptr, ptr %exn.slot, align 8 %3 = call ptr @__cxa_begin_catch(ptr %exn) #3 %4 = load i32, ptr %r, align 4 %inc = add nsw i32 %4, 1 store i32 %inc, ptr %r, align 4 call void @__cxa_end_catch() br label %try.cont ``` and the proposed CIR equivalent produces: ``` invoke void @_ZN1SC2Ev(ptr %1) to label %5 unwind label %9 5: ; preds = %4 invoke void @__cxa_rethrow() to label %6 unwind label %13 6: ; preds = %5 br label %7 7: ; preds = %6 unreachable 8: ; No predecessors! br label %22 9: ; preds = %4 %10 = landingpad { ptr, i32 } catch ptr null %11 = extractvalue { ptr, i32 } %10, 0 %12 = extractvalue { ptr, i32 } %10, 1 br label %17 13: ; preds = %5 %14 = landingpad { ptr, i32 } catch ptr null %15 = extractvalue { ptr, i32 } %14, 0 %16 = extractvalue { ptr, i32 } %14, 1 br label %17 17: ; preds = %13, %9 %18 = phi ptr [ %11, %9 ], [ %15, %13 ] %19 = call ptr @__cxa_begin_catch(ptr %18) %20 = load i32, ptr %2, align 4 %21 = add i32 %20, 1 store i32 %21, ptr %2, align 4 call void @__cxa_end_catch() br label %22 ``` There are quite a number of differences: `phi` in the CIR version VS loading from `%exn.slot` in the OG, having multiple landing pads, etc. The CIR version still seems reasonable to me, mostly because currently we are unable to replicate the exact behavior of the OG codegen. Again, I am very open to more discussions and suggestions here) --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 12 +- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 7 +- .../Dialect/Transforms/LoweringPrepare.cpp | 24 ++- clang/test/CIR/CodeGen/throw.cpp | 178 +++++++++++++++++- 4 files changed, 216 insertions(+), 5 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 394cca867a62..4de62d46e5a5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2261,7 +2261,17 @@ mlir::Value CIRGenItaniumCXXABI::getCXXDestructorImplicitParam( void CIRGenItaniumCXXABI::emitRethrow(CIRGenFunction &CGF, bool isNoReturn) { // void __cxa_rethrow(); - llvm_unreachable("NYI"); + + if (isNoReturn) { + auto &builder = CGF.getBuilder(); + assert(CGF.currSrcLoc && "expected source location"); + auto loc = *CGF.currSrcLoc; + builder.create(loc, mlir::Value{}, mlir::FlatSymbolRefAttr{}, + mlir::FlatSymbolRefAttr{}); + builder.create(loc); + } else { + llvm_unreachable("NYI"); + } } void CIRGenItaniumCXXABI::emitThrow(CIRGenFunction &CGF, diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 2eb4b6134686..e912a9aa6c8f 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -426,11 +426,14 @@ class CIRTryOpFlattening : public mlir::OpRewritePattern { SmallVectorImpl &landingPads) const { // Replace the tryOp return with a branch that jumps out of the body. rewriter.setInsertionPointToEnd(afterBody); - auto tryBodyYield = cast(afterBody->getTerminator()); mlir::Block *beforeCatch = rewriter.getInsertionBlock(); rewriter.setInsertionPointToEnd(beforeCatch); - rewriter.replaceOpWithNewOp(tryBodyYield, afterTry); + + // Check if the terminator is a YieldOp because there could be another + // terminator, e.g. unreachable + if (auto tryBodyYield = dyn_cast(afterBody->getTerminator())) + rewriter.replaceOpWithNewOp(tryBodyYield, afterTry); // Start the landing pad by getting the inflight exception information. mlir::Block *nextDispatcher = diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index 20239d843d2d..f6f3e58e0322 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -86,6 +86,7 @@ struct LoweringPreparePass : public LoweringPrepareBase { void lowerToMemCpy(StoreOp op); void lowerArrayDtor(ArrayDtor op); void lowerArrayCtor(ArrayCtor op); + void lowerThrowOp(ThrowOp op); /// Collect annotations of global values in the module void addGlobalAnnotations(mlir::Operation *op, mlir::ArrayAttr annotations); @@ -1133,6 +1134,25 @@ void LoweringPreparePass::lowerIterEndOp(IterEndOp op) { op.erase(); } +void LoweringPreparePass::lowerThrowOp(ThrowOp op) { + CIRBaseBuilderTy builder(getContext()); + + if (op.rethrows()) { + auto voidTy = cir::VoidType::get(builder.getContext()); + auto fnType = cir::FuncType::get({}, voidTy); + auto fnName = "__cxa_rethrow"; + + builder.setInsertionPointToStart(&theModule.getBodyRegion().front()); + FuncOp f = buildRuntimeFunction(builder, fnName, op.getLoc(), fnType); + + builder.setInsertionPointAfter(op.getOperation()); + auto call = builder.createTryCallOp(op.getLoc(), f, {}); + + op->replaceAllUsesWith(call); + op->erase(); + } +} + void LoweringPreparePass::addGlobalAnnotations(mlir::Operation *op, mlir::ArrayAttr annotations) { auto globalValue = cast(op); @@ -1195,6 +1215,8 @@ void LoweringPreparePass::runOnOp(Operation *op) { } if (std::optional annotations = fnOp.getAnnotations()) addGlobalAnnotations(fnOp, annotations.value()); + } else if (auto throwOp = dyn_cast(op)) { + lowerThrowOp(throwOp); } } @@ -1211,7 +1233,7 @@ void LoweringPreparePass::runOnOperation() { op->walk([&](Operation *op) { if (isa(op)) + ArrayCtor, ArrayDtor, cir::FuncOp, StoreOp, ThrowOp>(op)) opsToTransform.push_back(op); }); diff --git a/clang/test/CIR/CodeGen/throw.cpp b/clang/test/CIR/CodeGen/throw.cpp index c2395c3725c3..87b34d231767 100644 --- a/clang/test/CIR/CodeGen/throw.cpp +++ b/clang/test/CIR/CodeGen/throw.cpp @@ -21,4 +21,180 @@ double d(int a, int b) { // LLVM: %[[ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 8) // LLVM: store ptr @.str, ptr %[[ADDR]], align 8 // LLVM: call void @__cxa_throw(ptr %[[ADDR]], ptr @_ZTIPKc, ptr null) -// LLVM: unreachable \ No newline at end of file +// LLVM: unreachable + +struct S { + S() {} +}; + +void refoo1() { + int r = 1; + try { + S s; + throw; + } catch (...) { + ++r; + } +} + +// CIR-LABEL: @_Z6refoo1v() +// CIR: %[[V0:.*]] = cir.alloca !s32i, !cir.ptr, ["r", init] {alignment = 4 : i64} +// CIR: %[[V1:.*]] = cir.const #cir.int<1> : !s32i +// CIR: cir.store %[[V1]], %[[V0]] : !s32i, !cir.ptr +// CIR: cir.scope { +// CIR: %[[V2:.*]] = cir.alloca !ty_S, !cir.ptr, ["s", init] {alignment = 1 : i64} +// CIR: cir.try { +// CIR: cir.call exception @_ZN1SC2Ev(%[[V2]]) : (!cir.ptr) -> () +// CIR: cir.call exception @__cxa_rethrow() : () -> () +// CIR: cir.unreachable +// CIR: } catch [type #cir.all { +// CIR: %[[V3:.*]] = cir.catch_param -> !cir.ptr +// CIR: %[[V4:.*]] = cir.load %[[V0]] : !cir.ptr, !s32i +// CIR: %[[V5:.*]] = cir.unary(inc, %[[V4]]) : !s32i, !s32i +// CIR: cir.store %[[V5]], %[[V0]] : !s32i, !cir.ptr +// CIR: cir.yield +// CIR: }] +// CIR: } +// CIR: cir.return +// CIR: } + +// LLVM: define dso_local void @_Z6refoo1v() +// LLVM: %[[V1:.*]] = alloca %struct.S, i64 1, align 1 +// LLVM: %[[V2:.*]] = alloca i32, i64 1, align 4 +// LLVM: store i32 1, ptr %[[V2]], align 4 +// LLVM: br label %[[B3:.*]] +// LLVM: [[B3]]: +// LLVM: br label %[[B4:.*]] +// LLVM: [[B4]]: +// LLVM: invoke void @_ZN1SC2Ev(ptr %[[V1]]) +// LLVM: to label %[[B5:.*]] unwind label %[[B7:.*]] +// LLVM: [[B5]]: +// LLVM: invoke void @__cxa_rethrow() +// LLVM: to label %[[B6:.*]] unwind label %[[B11:.*]] +// LLVM: [[B6]]: +// LLVM: unreachable +// LLVM: [[B7]]: +// LLVM: %[[V8:.*]] = landingpad { ptr, i32 } +// LLVM: catch ptr null +// LLVM: %[[V9:.*]] = extractvalue { ptr, i32 } %[[V8]], 0 +// LLVM: %[[V10:.*]] = extractvalue { ptr, i32 } %[[V8]], 1 +// LLVM: br label %[[B15:.*]] +// LLVM: [[B11]]: +// LLVM: %[[V12:.*]] = landingpad { ptr, i32 } +// LLVM: catch ptr null +// LLVM: %[[V13:.*]] = extractvalue { ptr, i32 } %[[V12]], 0 +// LLVM: %[[V14:.*]] = extractvalue { ptr, i32 } %[[V12]], 1 +// LLVM: br label %[[B15:.*]] +// LLVM: [[B15]]: +// LLVM: %[[V16:.*]] = phi ptr [ %[[V9]], %[[B7]] ], [ %[[V13]], %[[B11]] ] +// LLVM: %[[V17:.*]] = call ptr @__cxa_begin_catch(ptr %[[V16]]) +// LLVM: %[[V18:.*]] = load i32, ptr %[[V2]], align 4 +// LLVM: %[[V19:.*]] = add i32 %[[V18]], 1 +// LLVM: store i32 %[[V19]], ptr %[[V2]], align 4 +// LLVM: call void @__cxa_end_catch() + +void refoo2() { + int r = 1; + try { + for (int i = 0; i < 5; i++) { + S s; + throw; + } + S s; + } catch (...) { + ++r; + } +} + +// CIR-LABEL: @_Z6refoo2v() +// CIR: %[[V0:.*]] = cir.alloca !s32i, !cir.ptr, ["r", init] {alignment = 4 : i64} +// CIR: %[[V1:.*]] = cir.const #cir.int<1> : !s32i +// CIR: cir.store %[[V1]], %[[V0]] : !s32i, !cir.ptr +// CIR: cir.scope { +// CIR: %[[V2:.*]] = cir.alloca !ty_S, !cir.ptr, ["s", init] {alignment = 1 : i64} +// CIR: cir.try { +// CIR: cir.scope { +// CIR: %[[V3:.*]] = cir.alloca !s32i, !cir.ptr, ["i", init] {alignment = 4 : i64} +// CIR: %[[V4:.*]] = cir.const #cir.int<0> : !s32i +// CIR: cir.store %[[V4]], %[[V3]] : !s32i, !cir.ptr +// CIR: cir.for : cond { +// CIR: %[[V5:.*]] = cir.load %[[V3]] : !cir.ptr, !s32i +// CIR: %[[V6:.*]] = cir.const #cir.int<5> : !s32i +// CIR: %[[V7:.*]] = cir.cmp(lt, %[[V5]], %[[V6]]) : !s32i, !cir.bool +// CIR: cir.condition(%[[V7]]) +// CIR: } body { +// CIR: cir.scope { +// CIR: %[[V5:.*]] = cir.alloca !ty_S, !cir.ptr, ["s", init] {alignment = 1 : i64} +// CIR: cir.call exception @_ZN1SC2Ev(%[[V5]]) : (!cir.ptr) -> () +// CIR: cir.call exception @__cxa_rethrow() : () -> () +// CIR: cir.unreachable +// CIR: } +// CIR: cir.yield +// CIR: } step { +// CIR: %[[V5:.*]] = cir.load %[[V3]] : !cir.ptr, !s32i +// CIR: %[[V6:.*]] = cir.unary(inc, %[[V5]]) : !s32i, !s32i +// CIR: cir.store %[[V6]], %[[V3]] : !s32i, !cir.ptr +// CIR: cir.yield +// CIR: } +// CIR: } +// CIR: cir.call exception @_ZN1SC2Ev(%[[V2]]) : (!cir.ptr) -> () +// CIR: cir.yield +// CIR: } catch [type #cir.all { +// CIR: %[[V3:.*]] = cir.catch_param -> !cir.ptr +// CIR: %[[V4:.*]] = cir.load %[[V0]] : !cir.ptr, !s32i +// CIR: %[[V5:.*]] = cir.unary(inc, %[[V4]]) : !s32i, !s32i +// CIR: cir.store %[[V5]], %[[V0]] : !s32i, !cir.ptr +// CIR: cir.yield +// CIR: }] +// CIR: } +// CIR: cir.return +// CIR: } + +// LLVM: {{.*}}: +// LLVM: invoke void @_ZN1SC2Ev(ptr %[[V3:.*]]) +// LLVM: to label %[[B13:.*]] unwind label %[[B22:.*]] +// LLVM: [[B13]]: +// LLVM: invoke void @__cxa_rethrow() +// LLVM: to label %[[B14:.*]] unwind label %[[B26:.*]] +// LLVM: [[B14]]: +// LLVM: unreachable +// LLVM: [[B15]]: +// LLVM: br label %[[B16:.*]] +// LLVM: [[B16]]: +// LLVM: %[[V17]] = load i32, ptr {{.*}}, align 4 +// LLVM: %[[V18]] = add i32 %[[V17]], 1 +// LLVM: store i32 %[[V18]], ptr {{.*}}, align 4 +// LLVM: br label {{.*}} +// LLVM: %[[B19:.*]] +// LLVM: br label %[[B20:.*]] +// LLVM: [[B20]]: +// LLVM: invoke void @_ZN1SC2Ev(ptr {{.*}}) +// LLVM: to label %[[B21:.*]] unwind label %[[B30:.*]] +// LLVM: [[B21]]: +// LLVM: br label {{.*}} +// LLVM: [[B22]]: +// LLVM: %[[V23:.*]] = landingpad { ptr, i32 } +// LLVM: catch ptr null +// LLVM: %[[V24:.*]] = extractvalue { ptr, i32 } %[[V23]], 0 +// LLVM: %[[V25:.*]] = extractvalue { ptr, i32 } %[[V23]], 1 +// LLVM: br label %[[B34:.*]] +// LLVM: [[B26]]: +// LLVM: %[[V27:.*]] = landingpad { ptr, i32 } +// LLVM: catch ptr null +// LLVM: %[[V28:.*]] = extractvalue { ptr, i32 } %[[V27]], 0 +// LLVM: %[[V29:.*]] = extractvalue { ptr, i32 } %[[V27]], 1 +// LLVM: br label %[[B34:.*]] +// LLVM: [[B30]]: +// LLVM: %[[V31:.*]] = landingpad { ptr, i32 } +// LLVM: catch ptr null +// LLVM: %[[V32:.*]] = extractvalue { ptr, i32 } %[[V31]], 0 +// LLVM: %[[V33:.*]] = extractvalue { ptr, i32 } %[[V31]], 1 +// LLVM: br label %[[B34:.*]] +// LLVM: [[B34]]: +// LLVM: %[[V35:.*]] = phi ptr [ %[[V32]], %[[B30]] ], [ %[[V24]], %[[B22]] ], [ %[[V28]], %[[B26]] ] +// LLVM: %[[V36:.*]] = call ptr @__cxa_begin_catch(ptr %[[V35]]) +// LLVM: %[[V37:.*]] = load i32, ptr {{.*}}, align 4 +// LLVM: %[[V38:.*]] = add i32 %[[V37]], 1 +// LLVM: store i32 %[[V38]], ptr {{.*}}, align 4 +// LLVM: call void @__cxa_end_catch() +// LLVM: br label {{.*}} From a322191cba0ad70f384f4671c7287926cc1ee79a Mon Sep 17 00:00:00 2001 From: AdUhTkJm <30948580+AdUhTkJm@users.noreply.github.com> Date: Tue, 11 Feb 2025 14:56:38 +0000 Subject: [PATCH 2247/2301] [CIR] Correct initialization of VoidPtrPtrTy (#1336) See issue #1331 for more details. I'm not sure how to test this change; would it be enough that this does not cause any regressions? --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 ++ clang/lib/CIR/CodeGen/CIRGenTypeCache.h | 12 ++++-------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index d0b098cd188f..68a4ea22a16b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -128,6 +128,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, // Initialize CIR pointer types cache. VoidPtrTy = cir::PointerType::get(&getMLIRContext(), VoidTy); + VoidPtrPtrTy = cir::PointerType::get(&getMLIRContext(), VoidPtrTy); FP16Ty = cir::FP16Type::get(&getMLIRContext()); BFloat16Ty = cir::BF16Type::get(&getMLIRContext()); @@ -159,6 +160,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, UInt8PtrTy = builder.getPointerTo(UInt8Ty); UInt8PtrPtrTy = builder.getPointerTo(UInt8PtrTy); AllocaInt8PtrTy = UInt8PtrTy; + AllocaVoidPtrTy = VoidPtrTy; // TODO: GlobalsInt8PtrTy // TODO: ConstGlobalsPtrTy CIRAllocaAddressSpace = getTargetCIRGenInfo().getCIRAllocaAddressSpace(); diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h index 4e29ad5dca46..551bc74861b6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h @@ -62,16 +62,12 @@ struct CIRGenTypeCache { cir::PointerType UInt8PtrTy; /// void** in address space 0 - union { - cir::PointerType VoidPtrPtrTy; - cir::PointerType UInt8PtrPtrTy; - }; + cir::PointerType VoidPtrPtrTy; + cir::PointerType UInt8PtrPtrTy; /// void* in alloca address space - union { - cir::PointerType AllocaVoidPtrTy; - cir::PointerType AllocaInt8PtrTy; - }; + cir::PointerType AllocaVoidPtrTy; + cir::PointerType AllocaInt8PtrTy; /// void* in default globals address space // union { From 1c92e3f6b4ef4daf48a75a0aa7c6c3db5e15e55f Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Wed, 12 Feb 2025 02:42:14 -0800 Subject: [PATCH 2248/2301] [CIR][NFC] Refactor GlobalOpLowering to align with upstream (#1337) This change refactors the CIRToLLVMGlobalOpLowering handling to align with changes that were requested upstream. The upstream changes didn't entirely fit with the full incubator implementation but these changes fit the goals of the upstream refactoring into the current implementation. No observable changes are intended for this change. --- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 168 ++++++++---------- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 5 +- 2 files changed, 76 insertions(+), 97 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 5d8cf071927b..8cfee09104c4 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2307,8 +2307,9 @@ mlir::LogicalResult CIRToLLVMSwitchFlatOpLowering::matchAndRewrite( /// Replace CIR global with a region initialized LLVM global and update /// insertion point to the end of the initializer block. -void CIRToLLVMGlobalOpLowering::setupRegionInitializedLLVMGlobalOp( - cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const { +void CIRToLLVMGlobalOpLowering::createRegionInitializedLLVMGlobalOp( + cir::GlobalOp op, mlir::Attribute attr, + mlir::ConversionPatternRewriter &rewriter) const { const auto llvmType = convertTypeForMemory(*getTypeConverter(), dataLayout, op.getSymType()); SmallVector attributes; @@ -2321,6 +2322,10 @@ void CIRToLLVMGlobalOpLowering::setupRegionInitializedLLVMGlobalOp( /*comdat*/ mlir::SymbolRefAttr(), attributes); newGlobalOp.getRegion().push_back(new mlir::Block()); rewriter.setInsertionPointToEnd(newGlobalOp.getInitializerBlock()); + + rewriter.create( + op->getLoc(), + lowerCirAttrAsValue(op, attr, rewriter, typeConverter, dataLayout)); } mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( @@ -2350,109 +2355,82 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( attributes.push_back(rewriter.getNamedAttr("visibility_", visibility)); - // Check for missing funcionalities. - if (!init.has_value()) { - rewriter.replaceOpWithNewOp( - op, llvmType, isConst, linkage, symbol, mlir::Attribute(), - /*alignment*/ 0, - /*addrSpace*/ getGlobalOpTargetAddrSpace(rewriter, typeConverter, op), - /*dsoLocal*/ isDsoLocal, /*threadLocal*/ (bool)op.getTlsModelAttr(), - /*comdat*/ mlir::SymbolRefAttr(), attributes); - return mlir::success(); - } - - // Initializer is a constant array: convert it to a compatible llvm init. - if (auto constArr = mlir::dyn_cast(init.value())) { - if (auto attr = mlir::dyn_cast(constArr.getElts())) { - llvm::SmallString<256> literal(attr.getValue()); - if (constArr.getTrailingZerosNum()) - literal.append(constArr.getTrailingZerosNum(), '\0'); - init = rewriter.getStringAttr(literal); - } else if (auto attr = - mlir::dyn_cast(constArr.getElts())) { - // Failed to use a compact attribute as an initializer: - // initialize elements individually. - if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { - setupRegionInitializedLLVMGlobalOp(op, rewriter); - rewriter.create( - op->getLoc(), lowerCirAttrAsValue(op, constArr, rewriter, - typeConverter, dataLayout)); - return mlir::success(); + if (init.has_value()) { + if (mlir::isa(init.value())) { + // If a directly equivalent attribute is available, use it. + init = + llvm::TypeSwitch(init.value()) + .Case([&](cir::FPAttr attr) { + return rewriter.getFloatAttr(llvmType, attr.getValue()); + }) + .Case([&](cir::IntAttr attr) { + return rewriter.getIntegerAttr(llvmType, attr.getValue()); + }) + .Case([&](cir::BoolAttr attr) { + return rewriter.getBoolAttr(attr.getValue()); + }) + .Default([&](mlir::Attribute attr) { return mlir::Attribute(); }); + // If initRewriter returned a null attribute, init will have a value but + // the value will be null. If that happens, initRewriter didn't handle the + // attribute type. It probably needs to be added to + // GlobalInitAttrRewriter. + if (!init.value()) { + op.emitError() << "unsupported initializer '" << init.value() << "'"; + return mlir::failure(); } + } else if (mlir::isa(init.value())) { + // TODO(cir): once LLVM's dialect has proper equivalent attributes this + // should be updated. For now, we use a custom op to initialize globals + // to the appropriate value. + createRegionInitializedLLVMGlobalOp(op, init.value(), rewriter); + return mlir::success(); + } else if (auto constArr = + mlir::dyn_cast(init.value())) { + // Initializer is a constant array: convert it to a compatible llvm init. + if (auto attr = mlir::dyn_cast(constArr.getElts())) { + llvm::SmallString<256> literal(attr.getValue()); + if (constArr.getTrailingZerosNum()) + literal.append(constArr.getTrailingZerosNum(), '\0'); + init = rewriter.getStringAttr(literal); + } else if (auto attr = + mlir::dyn_cast(constArr.getElts())) { + // Failed to use a compact attribute as an initializer: + // initialize elements individually. + if (!(init = lowerConstArrayAttr(constArr, getTypeConverter()))) { + createRegionInitializedLLVMGlobalOp(op, constArr, rewriter); + return mlir::success(); + } + } else { + op.emitError() + << "unsupported lowering for #cir.const_array with value " + << constArr.getElts(); + return mlir::failure(); + } + } else if (auto dataMemberAttr = + mlir::dyn_cast(init.value())) { + assert(lowerMod && "lower module is not available"); + mlir::DataLayout layout(op->getParentOfType()); + mlir::TypedAttr abiValue = lowerMod->getCXXABI().lowerDataMemberConstant( + dataMemberAttr, layout, *typeConverter); + auto abiOp = mlir::cast(rewriter.clone(*op.getOperation())); + abiOp.setInitialValueAttr(abiValue); + abiOp.setSymType(abiValue.getType()); + rewriter.replaceOp(op, abiOp); + return mlir::success(); } else { - op.emitError() << "unsupported lowering for #cir.const_array with value " - << constArr.getElts(); + op.emitError() << "unsupported initializer '" << init.value() << "'"; return mlir::failure(); } - } else if (auto fltAttr = mlir::dyn_cast(init.value())) { - // Initializer is a constant floating-point number: convert to MLIR - // builtin constant. - init = rewriter.getFloatAttr(llvmType, fltAttr.getValue()); - } - // Initializer is a constant integer: convert to MLIR builtin constant. - else if (auto intAttr = mlir::dyn_cast(init.value())) { - init = rewriter.getIntegerAttr(llvmType, intAttr.getValue()); - } else if (auto boolAttr = mlir::dyn_cast(init.value())) { - init = rewriter.getBoolAttr(boolAttr.getValue()); - } else if (isa( - init.value())) { - // TODO(cir): once LLVM's dialect has proper equivalent attributes this - // should be updated. For now, we use a custom op to initialize globals - // to the appropriate value. - setupRegionInitializedLLVMGlobalOp(op, rewriter); - auto value = lowerCirAttrAsValue(op, init.value(), rewriter, typeConverter, - dataLayout); - rewriter.create(loc, value); - return mlir::success(); - } else if (auto dataMemberAttr = - mlir::dyn_cast(init.value())) { - assert(lowerMod && "lower module is not available"); - mlir::DataLayout layout(op->getParentOfType()); - mlir::TypedAttr abiValue = lowerMod->getCXXABI().lowerDataMemberConstant( - dataMemberAttr, layout, *typeConverter); - auto abiOp = mlir::cast(rewriter.clone(*op.getOperation())); - abiOp.setInitialValueAttr(abiValue); - abiOp.setSymType(abiValue.getType()); - rewriter.replaceOp(op, abiOp); - return mlir::success(); - } else if (const auto structAttr = - mlir::dyn_cast(init.value())) { - setupRegionInitializedLLVMGlobalOp(op, rewriter); - rewriter.create( - op->getLoc(), lowerCirAttrAsValue(op, structAttr, rewriter, - typeConverter, dataLayout)); - return mlir::success(); - } else if (auto attr = mlir::dyn_cast(init.value())) { - setupRegionInitializedLLVMGlobalOp(op, rewriter); - rewriter.create( - loc, - lowerCirAttrAsValue(op, attr, rewriter, typeConverter, dataLayout)); - return mlir::success(); - } else if (const auto vtableAttr = - mlir::dyn_cast(init.value())) { - setupRegionInitializedLLVMGlobalOp(op, rewriter); - rewriter.create( - op->getLoc(), lowerCirAttrAsValue(op, vtableAttr, rewriter, - typeConverter, dataLayout)); - return mlir::success(); - } else if (const auto typeinfoAttr = - mlir::dyn_cast(init.value())) { - setupRegionInitializedLLVMGlobalOp(op, rewriter); - rewriter.create( - op->getLoc(), lowerCirAttrAsValue(op, typeinfoAttr, rewriter, - typeConverter, dataLayout)); - return mlir::success(); - } else { - op.emitError() << "unsupported initializer '" << init.value() << "'"; - return mlir::failure(); } // Rewrite op. auto llvmGlobalOp = rewriter.replaceOpWithNewOp( - op, llvmType, isConst, linkage, symbol, init.value(), + op, llvmType, isConst, linkage, symbol, init.value_or(mlir::Attribute()), /*alignment*/ op.getAlignment().value_or(0), /*addrSpace*/ getGlobalOpTargetAddrSpace(rewriter, typeConverter, op), - /*dsoLocal*/ false, /*threadLocal*/ (bool)op.getTlsModelAttr(), + /*dsoLocal*/ isDsoLocal, /*threadLocal*/ (bool)op.getTlsModelAttr(), /*comdat*/ mlir::SymbolRefAttr(), attributes); auto mod = op->getParentOfType(); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 629d148427fd..7b668dea43f5 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -589,8 +589,9 @@ class CIRToLLVMGlobalOpLowering mlir::ConversionPatternRewriter &) const override; private: - void setupRegionInitializedLLVMGlobalOp( - cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const; + void createRegionInitializedLLVMGlobalOp( + cir::GlobalOp op, mlir::Attribute attr, + mlir::ConversionPatternRewriter &rewriter) const; mutable mlir::LLVM::ComdatOp comdatOp = nullptr; static void addComdat(mlir::LLVM::GlobalOp &op, From e3423089a1402ee531b7d230e7b5abd7287f5983 Mon Sep 17 00:00:00 2001 From: AdUhTkJm <30948580+AdUhTkJm@users.noreply.github.com> Date: Wed, 12 Feb 2025 12:53:13 +0000 Subject: [PATCH 2249/2301] [CIR][CUDA] Generate device stubs (#1332) Now we're able to generate device stubs. A simple explanation: We first store function arguments inside a `void* args[]`, which shall be passed into `cudaLaunchKernel`. Then we retrieve configuration using `__cudaPopCallConfiguration`, popping the config pushed by callsite. (We can't generate calls to kernels currently.) Now we have enough arguments. Invoke `cudaLaunchKernel` and we're OK. --- clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp | 171 ++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h | 47 ++++++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 12 +- clang/lib/CIR/CodeGen/CMakeLists.txt | 1 + clang/test/CIR/CodeGen/CUDA/simple.cu | 26 +-- 7 files changed, 248 insertions(+), 15 deletions(-) create mode 100644 clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp create mode 100644 clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h diff --git a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp new file mode 100644 index 000000000000..400c41cbb0d4 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp @@ -0,0 +1,171 @@ +//===--- CIRGenCUDARuntime.cpp - Interface to CUDA Runtimes ----*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides an abstract class for CUDA CIR generation. Concrete +// subclasses of this implement code generation for specific OpenCL +// runtime libraries. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenCUDARuntime.h" +#include "CIRGenFunction.h" +#include "clang/Basic/Cuda.h" +#include "clang/CIR/Dialect/IR/CIRTypes.h" + +using namespace clang; +using namespace clang::CIRGen; + +CIRGenCUDARuntime::~CIRGenCUDARuntime() {} + +void CIRGenCUDARuntime::emitDeviceStubBodyLegacy(CIRGenFunction &cgf, + cir::FuncOp fn, + FunctionArgList &args) { + llvm_unreachable("NYI"); +} + +void CIRGenCUDARuntime::emitDeviceStubBodyNew(CIRGenFunction &cgf, + cir::FuncOp fn, + FunctionArgList &args) { + if (cgm.getLangOpts().HIP) + llvm_unreachable("NYI"); + + // This requires arguments to be sent to kernels in a different way. + if (cgm.getLangOpts().OffloadViaLLVM) + llvm_unreachable("NYI"); + + auto &builder = cgm.getBuilder(); + + // For cudaLaunchKernel, we must add another layer of indirection + // to arguments. For example, for function `add(int a, float b)`, + // we need to pass it as `void *args[2] = { &a, &b }`. + + auto loc = fn.getLoc(); + auto voidPtrArrayTy = + cir::ArrayType::get(&cgm.getMLIRContext(), cgm.VoidPtrTy, args.size()); + mlir::Value kernelArgs = builder.createAlloca( + loc, cir::PointerType::get(voidPtrArrayTy), voidPtrArrayTy, "kernel_args", + CharUnits::fromQuantity(16)); + + // Store arguments into kernelArgs + for (auto [i, arg] : llvm::enumerate(args)) { + mlir::Value index = + builder.getConstInt(loc, llvm::APInt(/*numBits=*/32, i)); + mlir::Value storePos = builder.createPtrStride(loc, kernelArgs, index); + builder.CIRBaseBuilderTy::createStore( + loc, cgf.GetAddrOfLocalVar(arg).getPointer(), storePos); + } + + // We retrieve dim3 type by looking into the second argument of + // cudaLaunchKernel, as is done in OG. + TranslationUnitDecl *tuDecl = cgm.getASTContext().getTranslationUnitDecl(); + DeclContext *dc = TranslationUnitDecl::castToDeclContext(tuDecl); + + // The default stream is usually stream 0 (the legacy default stream). + // For per-thread default stream, we need a different LaunchKernel function. + if (cgm.getLangOpts().GPUDefaultStream == + LangOptions::GPUDefaultStreamKind::PerThread) + llvm_unreachable("NYI"); + + std::string launchAPI = "cudaLaunchKernel"; + const IdentifierInfo &launchII = cgm.getASTContext().Idents.get(launchAPI); + FunctionDecl *launchFD = nullptr; + for (auto *result : dc->lookup(&launchII)) { + if (FunctionDecl *fd = dyn_cast(result)) + launchFD = fd; + } + + if (launchFD == nullptr) { + cgm.Error(cgf.CurFuncDecl->getLocation(), + "Can't find declaration for " + launchAPI); + return; + } + + // Use this function to retrieve arguments for cudaLaunchKernel: + // int __cudaPopCallConfiguration(dim3 *gridDim, dim3 *blockDim, size_t + // *sharedMem, cudaStream_t *stream) + // + // Here cudaStream_t, while also being the 6th argument of cudaLaunchKernel, + // is a pointer to some opaque struct. + + mlir::Type dim3Ty = + cgf.getTypes().convertType(launchFD->getParamDecl(1)->getType()); + mlir::Type streamTy = + cgf.getTypes().convertType(launchFD->getParamDecl(5)->getType()); + + mlir::Value gridDim = + builder.createAlloca(loc, cir::PointerType::get(dim3Ty), dim3Ty, + "grid_dim", CharUnits::fromQuantity(8)); + mlir::Value blockDim = + builder.createAlloca(loc, cir::PointerType::get(dim3Ty), dim3Ty, + "block_dim", CharUnits::fromQuantity(8)); + mlir::Value sharedMem = + builder.createAlloca(loc, cir::PointerType::get(cgm.SizeTy), cgm.SizeTy, + "shared_mem", cgm.getSizeAlign()); + mlir::Value stream = + builder.createAlloca(loc, cir::PointerType::get(streamTy), streamTy, + "stream", cgm.getPointerAlign()); + + cir::FuncOp popConfig = cgm.createRuntimeFunction( + cir::FuncType::get({gridDim.getType(), blockDim.getType(), + sharedMem.getType(), stream.getType()}, + cgm.SInt32Ty), + "__cudaPopCallConfiguration"); + cgf.emitRuntimeCall(loc, popConfig, {gridDim, blockDim, sharedMem, stream}); + + // Now emit the call to cudaLaunchKernel + // cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, + // void **args, size_t sharedMem, + // cudaStream_t stream); + auto kernelTy = + cir::PointerType::get(&cgm.getMLIRContext(), fn.getFunctionType()); + + mlir::Value kernel = + builder.create(loc, kernelTy, fn.getSymName()); + mlir::Value func = builder.createBitcast(kernel, cgm.VoidPtrTy); + CallArgList launchArgs; + + mlir::Value kernelArgsDecayed = + builder.createCast(cir::CastKind::array_to_ptrdecay, kernelArgs, + cir::PointerType::get(cgm.VoidPtrTy)); + + launchArgs.add(RValue::get(func), launchFD->getParamDecl(0)->getType()); + launchArgs.add( + RValue::getAggregate(Address(gridDim, CharUnits::fromQuantity(8))), + launchFD->getParamDecl(1)->getType()); + launchArgs.add( + RValue::getAggregate(Address(blockDim, CharUnits::fromQuantity(8))), + launchFD->getParamDecl(2)->getType()); + launchArgs.add(RValue::get(kernelArgsDecayed), + launchFD->getParamDecl(3)->getType()); + launchArgs.add( + RValue::get(builder.CIRBaseBuilderTy::createLoad(loc, sharedMem)), + launchFD->getParamDecl(4)->getType()); + launchArgs.add(RValue::get(stream), launchFD->getParamDecl(5)->getType()); + + mlir::Type launchTy = cgm.getTypes().convertType(launchFD->getType()); + mlir::Operation *launchFn = + cgm.createRuntimeFunction(cast(launchTy), launchAPI); + const auto &callInfo = cgm.getTypes().arrangeFunctionDeclaration(launchFD); + cgf.emitCall(callInfo, CIRGenCallee::forDirect(launchFn), ReturnValueSlot(), + launchArgs); +} + +void CIRGenCUDARuntime::emitDeviceStub(CIRGenFunction &cgf, cir::FuncOp fn, + FunctionArgList &args) { + // Device stub and its handle might be different. + if (cgm.getLangOpts().HIP) + llvm_unreachable("NYI"); + + // CUDA 9.0 changed the way to launch kernels. + if (CudaFeatureEnabled(cgm.getTarget().getSDKVersion(), + CudaFeature::CUDA_USES_NEW_LAUNCH) || + cgm.getLangOpts().OffloadViaLLVM) + emitDeviceStubBodyNew(cgf, fn, args); + else + emitDeviceStubBodyLegacy(cgf, fn, args); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h new file mode 100644 index 000000000000..a3145a0baeb3 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h @@ -0,0 +1,47 @@ +//===------ CIRGenCUDARuntime.h - Interface to CUDA Runtimes -----*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides an abstract class for CUDA CIR generation. Concrete +// subclasses of this implement code generation for specific OpenCL +// runtime libraries. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_CIR_CIRGENCUDARUNTIME_H +#define LLVM_CLANG_LIB_CIR_CIRGENCUDARUNTIME_H + +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" + +namespace clang::CIRGen { + +class CIRGenFunction; +class CIRGenModule; +class FunctionArgList; + +class CIRGenCUDARuntime { +protected: + CIRGenModule &cgm; + +private: + void emitDeviceStubBodyLegacy(CIRGenFunction &cgf, cir::FuncOp fn, + FunctionArgList &args); + void emitDeviceStubBodyNew(CIRGenFunction &cgf, cir::FuncOp fn, + FunctionArgList &args); + +public: + CIRGenCUDARuntime(CIRGenModule &cgm) : cgm(cgm) {} + virtual ~CIRGenCUDARuntime(); + + virtual void emitDeviceStub(CIRGenFunction &cgf, cir::FuncOp fn, + FunctionArgList &args); +}; + +} // namespace clang::CIRGen + +#endif // LLVM_CLANG_LIB_CIR_CIRGENCUDARUNTIME_H diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 7de4866cd004..ee9ebaa61b32 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -753,7 +753,7 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, emitConstructorBody(Args); else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice && FD->hasAttr()) - llvm_unreachable("NYI"); + CGM.getCUDARuntime().emitDeviceStub(*this, Fn, Args); else if (isa(FD) && cast(FD)->isLambdaStaticInvoker()) { // The lambda static invoker function is special, because it forwards or diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 68a4ea22a16b..10dbd85edc4b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -9,6 +9,7 @@ // This is the internal per-translation-unit state used for CIR translation. // //===----------------------------------------------------------------------===// +#include "CIRGenCUDARuntime.h" #include "CIRGenCXXABI.h" #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" @@ -108,7 +109,8 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, theModule{mlir::ModuleOp::create(builder.getUnknownLoc())}, Diags(Diags), target(astContext.getTargetInfo()), ABI(createCXXABI(*this)), genTypes{*this}, VTables{*this}, - openMPRuntime(new CIRGenOpenMPRuntime(*this)) { + openMPRuntime(new CIRGenOpenMPRuntime(*this)), + cudaRuntime(new CIRGenCUDARuntime(*this)) { // Initialize CIR signed integer types cache. SInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 522d38e0b018..9020c4826282 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -15,6 +15,7 @@ #include "Address.h" #include "CIRGenBuilder.h" +#include "CIRGenCUDARuntime.h" #include "CIRGenCall.h" #include "CIRGenOpenCLRuntime.h" #include "CIRGenTBAA.h" @@ -113,6 +114,9 @@ class CIRGenModule : public CIRGenTypeCache { /// Holds the OpenMP runtime std::unique_ptr openMPRuntime; + /// Holds the CUDA runtime + std::unique_ptr cudaRuntime; + /// Per-function codegen information. Updated everytime emitCIR is called /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; @@ -862,12 +866,18 @@ class CIRGenModule : public CIRGenTypeCache { /// Print out an error that codegen doesn't support the specified decl yet. void ErrorUnsupported(const Decl *D, const char *Type); - /// Return a reference to the configured OpenMP runtime. + /// Return a reference to the configured OpenCL runtime. CIRGenOpenCLRuntime &getOpenCLRuntime() { assert(openCLRuntime != nullptr); return *openCLRuntime; } + /// Return a reference to the configured CUDA runtime. + CIRGenCUDARuntime &getCUDARuntime() { + assert(cudaRuntime != nullptr); + return *cudaRuntime; + } + void createOpenCLRuntime() { openCLRuntime.reset(new CIRGenOpenCLRuntime(*this)); } diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index 02ac813ef732..8a065191f4d1 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -19,6 +19,7 @@ add_clang_library(clangCIR CIRGenClass.cpp CIRGenCleanup.cpp CIRGenCoroutine.cpp + CIRGenCUDARuntime.cpp CIRGenDecl.cpp CIRGenDeclCXX.cpp CIRGenException.cpp diff --git a/clang/test/CIR/CodeGen/CUDA/simple.cu b/clang/test/CIR/CodeGen/CUDA/simple.cu index 1a822d9bcc88..9675de3fe61a 100644 --- a/clang/test/CIR/CodeGen/CUDA/simple.cu +++ b/clang/test/CIR/CodeGen/CUDA/simple.cu @@ -1,15 +1,17 @@ #include "../Inputs/cuda.h" // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir \ -// RUN: -x cuda -emit-cir %s -o %t.cir +// RUN: -x cuda -emit-cir -target-sdk-version=12.3 \ +// RUN: %s -o %t.cir // RUN: FileCheck --check-prefix=CIR-HOST --input-file=%t.cir %s // RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fclangir \ -// RUN: -fcuda-is-device -emit-cir %s -o %t.cir +// RUN: -fcuda-is-device -emit-cir -target-sdk-version=12.3 \ +// RUN: %s -o %t.cir // RUN: FileCheck --check-prefix=CIR-DEVICE --input-file=%t.cir %s // Attribute for global_fn -// CIR-HOST: [[Kernel:#[a-zA-Z_0-9]+]] = {{.*}}#cir.cuda_kernel_name<_Z9global_fnv>{{.*}} +// CIR-HOST: [[Kernel:#[a-zA-Z_0-9]+]] = {{.*}}#cir.cuda_kernel_name<_Z9global_fni>{{.*}} __host__ void host_fn(int *a, int *b, int *c) {} // CIR-HOST: cir.func @_Z7host_fnPiS_S_ @@ -19,13 +21,13 @@ __device__ void device_fn(int* a, double b, float c) {} // CIR-HOST-NOT: cir.func @_Z9device_fnPidf // CIR-DEVICE: cir.func @_Z9device_fnPidf -#ifdef __CUDA_ARCH__ -__global__ void global_fn() {} -#else -__global__ void global_fn(); -#endif -// CIR-HOST: @_Z24__device_stub__global_fnv(){{.*}}extra([[Kernel]]) -// CIR-DEVICE: @_Z9global_fnv +__global__ void global_fn(int a) {} +// CIR-DEVICE: @_Z9global_fni -// Make sure `global_fn` indeed gets emitted -__host__ void x() { auto v = global_fn; } +// Check for device stub emission. + +// CIR-HOST: @_Z24__device_stub__global_fni{{.*}}extra([[Kernel]]) +// CIR-HOST: cir.alloca {{.*}}"kernel_args" +// CIR-HOST: cir.call @__cudaPopCallConfiguration +// CIR-HOST: cir.get_global @_Z24__device_stub__global_fni +// CIR-HOST: cir.call @cudaLaunchKernel From 6dd6d82034d84d1b0f997c19255910606178cf16 Mon Sep 17 00:00:00 2001 From: Rajveer Singh Bharadwaj Date: Wed, 12 Feb 2025 18:23:58 +0530 Subject: [PATCH 2250/2301] [CIR][CIRGen] Support for builtin `__atomic_thread_fence` (#1287) Fix https://github.com/llvm/clangir/issues/1274 Implements atomic thread fence synchronization primitive corresponding to `atomic.thread_fence` CIR. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 43 ++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 36 ++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 22 +++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 11 ++ clang/test/CIR/CodeGen/atomic-thread-fence.c | 133 ++++++++++++++++++ 5 files changed, 244 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/atomic-thread-fence.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7994cd282d83..523222085db4 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -5411,6 +5411,49 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", let hasVerifier = 0; } +def MemScope_SingleThread : I32EnumAttrCase<"MemScope_SingleThread", + 0, "single_thread">; +def MemScope_System : I32EnumAttrCase<"MemScope_System", + 1, "system">; + +def MemScopeKind : I32EnumAttr< + "MemScopeKind", + "Memory Scope Enumeration", + [MemScope_SingleThread, MemScope_System]> { + let cppNamespace = "::cir"; +} + +def AtomicFence : CIR_Op<"atomic.fence"> { + let summary = "Atomic thread fence"; + let description = [{ + C/C++ Atomic thread fence synchronization primitive. Implements the builtin + `__atomic_thread_fence` which enforces memory ordering constraints across + threads within the specified synchronization scope. + + This handles all variations including: + - `__atomic_thread_fence` + - `__atomic_signal_fence` + - `__c11_atomic_thread_fence` + - `__c11_atomic_signal_fence` + + Example: + ```mlir + cir.atomic.fence system seq_cst + cir.atomic.fence single_thread seq_cst + ``` + + }]; + let results = (outs); + let arguments = (ins Arg:$sync_scope, + Arg:$ordering); + + let assemblyFormat = [{ + $sync_scope $ordering attr-dict + }]; + + let hasVerifier = 0; +} + def SignBitOp : CIR_Op<"signbit", [Pure]> { let summary = "Checks the sign of a floating-point number"; let description = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index b3807cba5828..a5f899320a86 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -16,7 +16,11 @@ #include "CIRGenCstEmitter.h" #include "CIRGenFunction.h" #include "CIRGenModule.h" +#include "CIRGenValue.h" #include "TargetInfo.h" +#include "clang/AST/Expr.h" +#include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/MissingFeatures.h" // TODO(cir): we shouldn't need this but we currently reuse intrinsic IDs for @@ -30,7 +34,9 @@ #include "clang/Frontend/FrontendDiagnostic.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/Value.h" +#include "mlir/Support/LLVM.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "llvm/Support/ErrorHandling.h" @@ -333,6 +339,30 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, return returnBool ? op.getResult(1) : op.getResult(0); } +static mlir::Value makeAtomicFenceValue(CIRGenFunction &cgf, + const CallExpr *expr, + cir::MemScopeKind syncScope) { + auto &builder = cgf.getBuilder(); + mlir::Value orderingVal = cgf.emitScalarExpr(expr->getArg(0)); + + auto constOrdering = + mlir::dyn_cast(orderingVal.getDefiningOp()); + if (!constOrdering) + llvm_unreachable("NYI: variable ordering not supported"); + + auto constOrderingAttr = + mlir::dyn_cast(constOrdering.getValue()); + if (constOrderingAttr) { + cir::MemOrder ordering = + static_cast(constOrderingAttr.getUInt()); + + builder.create(cgf.getLoc(expr->getSourceRange()), + syncScope, ordering); + } + + return mlir::Value(); +} + static bool typeRequiresBuiltinLaunderImp(const ASTContext &astContext, QualType ty, llvm::SmallPtrSetImpl &seen) { @@ -1863,10 +1893,14 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__atomic_clear NYI"); case Builtin::BI__atomic_thread_fence: + return RValue::get( + makeAtomicFenceValue(*this, E, cir::MemScopeKind::MemScope_System)); case Builtin::BI__atomic_signal_fence: + return RValue::get(makeAtomicFenceValue( + *this, E, cir::MemScopeKind::MemScope_SingleThread)); case Builtin::BI__c11_atomic_thread_fence: case Builtin::BI__c11_atomic_signal_fence: - llvm_unreachable("BI__atomic_thread_fence like NYI"); + llvm_unreachable("BI__c11_atomic_thread_fence like NYI"); case Builtin::BI__builtin_signbit: case Builtin::BI__builtin_signbitf: diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8cfee09104c4..3ac4de81422b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -18,6 +18,8 @@ #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMAttrs.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/LLVMIR/Transforms/Passes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" @@ -41,6 +43,8 @@ #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/LoweringHelpers.h" #include "clang/CIR/MissingFeatures.h" @@ -3176,6 +3180,11 @@ mlir::LLVM::AtomicOrdering getLLVMAtomicOrder(cir::MemOrder memo) { llvm_unreachable("shouldn't get here"); } +llvm::StringRef getLLVMSyncScope(cir::MemScopeKind syncScope) { + return syncScope == cir::MemScopeKind::MemScope_SingleThread ? "singlethread" + : ""; +} + mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite( cir::AtomicCmpXchg op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -3344,6 +3353,18 @@ mlir::LogicalResult CIRToLLVMAtomicFetchLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMAtomicFenceLowering::matchAndRewrite( + cir::AtomicFence op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto llvmOrder = getLLVMAtomicOrder(adaptor.getOrdering()); + auto llvmSyncScope = getLLVMSyncScope(adaptor.getSyncScope()); + + rewriter.replaceOpWithNewOp(op, llvmOrder, + llvmSyncScope); + + return mlir::success(); +} + mlir::LogicalResult CIRToLLVMByteswapOpLowering::matchAndRewrite( cir::ByteswapOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -4105,6 +4126,7 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMAtomicCmpXchgLowering, CIRToLLVMAtomicFetchLowering, CIRToLLVMAtomicXchgLowering, + CIRToLLVMAtomicFenceLowering, CIRToLLVMBaseClassAddrOpLowering, CIRToLLVMBinOpLowering, CIRToLLVMBinOpOverflowOpLowering, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 7b668dea43f5..f5441c7d11ac 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -16,6 +16,7 @@ #include "mlir/IR/MLIRContext.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Transforms/DialectConversion.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" namespace cir { namespace direct { @@ -822,6 +823,16 @@ class CIRToLLVMAtomicFetchLowering mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMAtomicFenceLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AtomicFence op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + class CIRToLLVMByteswapOpLowering : public mlir::OpConversionPattern { public: diff --git a/clang/test/CIR/CodeGen/atomic-thread-fence.c b/clang/test/CIR/CodeGen/atomic-thread-fence.c new file mode 100644 index 000000000000..4c71c3c83966 --- /dev/null +++ b/clang/test/CIR/CodeGen/atomic-thread-fence.c @@ -0,0 +1,133 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-android21 -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + + +struct Data { + int value; + void *ptr; +}; + +typedef struct Data *DataPtr; + +void applyThreadFence() { + __atomic_thread_fence(__ATOMIC_SEQ_CST); +} + +// CIR-LABEL: @applyThreadFence +// CIR: cir.atomic.fence system seq_cst +// CIR: cir.return + +// LLVM-LABEL: @applyThreadFence +// LLVM: fence seq_cst +// LLVM: ret void + +void applySignalFence() { + __atomic_signal_fence(__ATOMIC_SEQ_CST); +} +// CIR-LABEL: @applySignalFence +// CIR: cir.atomic.fence single_thread seq_cst +// CIR: cir.return + +// LLVM-LABEL: @applySignalFence +// LLVM: fence syncscope("singlethread") seq_cst +// LLVM: ret void + +void modifyWithThreadFence(DataPtr d) { + __atomic_thread_fence(__ATOMIC_SEQ_CST); + d->value = 42; +} +// CIR-LABEL: @modifyWithThreadFence +// CIR: %[[DATA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["d", init] {alignment = 8 : i64} +// CIR: cir.atomic.fence system seq_cst +// CIR: %[[VAL_42:.*]] = cir.const #cir.int<42> : !s32i +// CIR: %[[LOAD_DATA:.*]] = cir.load %[[DATA]] : !cir.ptr>, !cir.ptr +// CIR: %[[DATA_VALUE:.*]] = cir.get_member %[[LOAD_DATA]][0] {name = "value"} : !cir.ptr -> !cir.ptr +// CIR: cir.store %[[VAL_42]], %[[DATA_VALUE]] : !s32i, !cir.ptr +// CIR: cir.return + +// LLVM-LABEL: @modifyWithThreadFence +// LLVM: %[[DATA:.*]] = alloca ptr, i64 1, align 8 +// LLVM: fence seq_cst +// LLVM: %[[DATA_PTR:.*]] = load ptr, ptr %[[DATA]], align 8 +// LLVM: %[[DATA_VALUE:.*]] = getelementptr %struct.Data, ptr %[[DATA_PTR]], i32 0, i32 0 +// LLVM: store i32 42, ptr %[[DATA_VALUE]], align 4 +// LLVM: ret void + +void modifyWithSignalFence(DataPtr d) { + __atomic_signal_fence(__ATOMIC_SEQ_CST); + d->value = 24; +} +// CIR-LABEL: @modifyWithSignalFence +// CIR: %[[DATA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["d", init] {alignment = 8 : i64} +// CIR: cir.atomic.fence single_thread seq_cst +// CIR: %[[VAL_42:.*]] = cir.const #cir.int<24> : !s32i +// CIR: %[[LOAD_DATA:.*]] = cir.load %[[DATA]] : !cir.ptr>, !cir.ptr +// CIR: %[[DATA_VALUE:.*]] = cir.get_member %[[LOAD_DATA]][0] {name = "value"} : !cir.ptr -> !cir.ptr +// CIR: cir.store %[[VAL_42]], %[[DATA_VALUE]] : !s32i, !cir.ptr +// CIR: cir.return + +// LLVM-LABEL: @modifyWithSignalFence +// LLVM: %[[DATA:.*]] = alloca ptr, i64 1, align 8 +// LLVM: fence syncscope("singlethread") seq_cst +// LLVM: %[[DATA_PTR:.*]] = load ptr, ptr %[[DATA]], align 8 +// LLVM: %[[DATA_VALUE:.*]] = getelementptr %struct.Data, ptr %[[DATA_PTR]], i32 0, i32 0 +// LLVM: store i32 24, ptr %[[DATA_VALUE]], align 4 +// LLVM: ret void + +void loadWithThreadFence(DataPtr d) { + __atomic_thread_fence(__ATOMIC_SEQ_CST); + __atomic_load_n(&d->ptr, __ATOMIC_SEQ_CST); +} +// CIR-LABEL: @loadWithThreadFence +// CIR: %[[DATA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["d", init] {alignment = 8 : i64} +// CIR: %[[ATOMIC_TEMP:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["atomic-temp"] {alignment = 8 : i64} +// CIR: cir.atomic.fence system seq_cst +// CIR: %[[LOAD_DATA:.*]] = cir.load %[[DATA]] : !cir.ptr>, !cir.ptr +// CIR: %[[DATA_VALUE:.*]] = cir.get_member %[[LOAD_DATA]][1] {name = "ptr"} : !cir.ptr -> !cir.ptr> +// CIR: %[[CASTED_DATA_VALUE:.*]] = cir.cast(bitcast, %[[DATA_VALUE]] : !cir.ptr>), !cir.ptr +// CIR: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %[[CASTED_DATA_VALUE]] : !cir.ptr, !u64i +// CIR: %[[CASTED_ATOMIC_TEMP:.*]] = cir.cast(bitcast, %[[ATOMIC_TEMP]] : !cir.ptr>), !cir.ptr +// CIR: cir.store %[[ATOMIC_LOAD]], %[[CASTED_ATOMIC_TEMP]] : !u64i, !cir.ptr +// CIR: %[[ATOMIC_LOAD_PTR:.*]] = cir.load %[[ATOMIC_TEMP]] : !cir.ptr>, !cir.ptr +// CIR: cir.return + +// LLVM-LABEL: @loadWithThreadFence +// LLVM: %[[DATA:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[DATA_TEMP:.*]] = alloca ptr, i64 1, align 8 +// LLVM: fence seq_cst +// LLVM: %[[DATA_PTR:.*]] = load ptr, ptr %[[DATA]], align 8 +// LLVM: %[[DATA_VALUE:.*]] = getelementptr %struct.Data, ptr %[[DATA_PTR]], i32 0, i32 1 +// LLVM: %[[ATOMIC_LOAD:.*]] = load atomic i64, ptr %[[DATA_VALUE]] seq_cst, align 8 +// LLVM: store i64 %[[ATOMIC_LOAD]], ptr %[[DATA_TEMP]], align 8 +// LLVM: %[[DATA_TEMP_LOAD:.*]] = load ptr, ptr %[[DATA_TEMP]], align 8 +// LLVM: ret void + +void loadWithSignalFence(DataPtr d) { + __atomic_signal_fence(__ATOMIC_SEQ_CST); + __atomic_load_n(&d->ptr, __ATOMIC_SEQ_CST); +} +// CIR-LABEL: @loadWithSignalFence +// CIR: %[[DATA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["d", init] {alignment = 8 : i64} +// CIR: %[[ATOMIC_TEMP:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["atomic-temp"] {alignment = 8 : i64} +// CIR: cir.atomic.fence single_thread seq_cst +// CIR: %[[LOAD_DATA:.*]] = cir.load %[[DATA]] : !cir.ptr>, !cir.ptr +// CIR: %[[DATA_PTR:.*]] = cir.get_member %[[LOAD_DATA]][1] {name = "ptr"} : !cir.ptr -> !cir.ptr> +// CIR: %[[CASTED_DATA_PTR:.*]] = cir.cast(bitcast, %[[DATA_PTR]] : !cir.ptr>), !cir.ptr +// CIR: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %[[CASTED_DATA_PTR]] : !cir.ptr, !u64i +// CIR: %[[CASTED_ATOMIC_TEMP:.*]] = cir.cast(bitcast, %[[ATOMIC_TEMP]] : !cir.ptr>), !cir.ptr +// CIR: cir.store %[[ATOMIC_LOAD]], %[[CASTED_ATOMIC_TEMP]] : !u64i, !cir.ptr +// CIR: %[[LOAD_ATOMIC_TEMP:.*]] = cir.load %[[ATOMIC_TEMP]] : !cir.ptr>, !cir.ptr +// CIR: cir.return + +// LLVM-LABEL: @loadWithSignalFence +// LLVM: %[[DATA:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[DATA_TEMP:.*]] = alloca ptr, i64 1, align 8 +// LLVM: fence syncscope("singlethread") seq_cst +// LLVM: %[[DATA_PTR:.*]] = load ptr, ptr %[[DATA]], align 8 +// LLVM: %[[DATA_VALUE:.*]] = getelementptr %struct.Data, ptr %[[DATA_PTR]], i32 0, i32 1 +// LLVM: %[[ATOMIC_LOAD:.*]] = load atomic i64, ptr %[[DATA_VALUE]] seq_cst, align 8 +// LLVM: store i64 %[[ATOMIC_LOAD]], ptr %[[DATA_TEMP]], align 8 +// LLVM: %[[DATA_TEMP_LOAD]] = load ptr, ptr %[[DATA_TEMP]], align 8 +// LLVM: ret void From 03e65354e20cc9f4265e90981a5abae80a4911b8 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Wed, 12 Feb 2025 16:00:37 +0300 Subject: [PATCH 2251/2301] [CIR][ABI][AArch64][Lowering] Fix calls for struct types > 128 bits (#1335) In [PR#1074](https://github.com/llvm/clangir/pull/1074) we introduced calls for struct types > 128 bits, but there's is an issue here. [This](https://github.com/llvm/clangir/blob/3e17e7b9404e1a28bf33bdd5943f4a208134d479/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp#L1169) is meant to be a `memcpy` of the alloca instead of directly passing the alloca, just like in the [OG](https://github.com/llvm/clangir/blob/3e17e7b9404e1a28bf33bdd5943f4a208134d479/clang/lib/CodeGen/CGCall.cpp#L5323). The PR was meant to use a `memcpy` and later handle cases where we don't need the `memcpy`. For example, running the following code snippet `tmp.c` using `bin/clang tmp.c -o tmp -Xclang -fclangir -Xclang -fclangir-call-conv-lowering --target=aarch64-none-linux-gnu`: ``` #include typedef struct { int a, b, c, d, e; } S; void change(S s) { s.a = 10; } void foo(void) { S s; s.a = 9; change(s); printf("%d\n", s.a); } int main(void) { foo(); return 0; } ``` gives 10 instead of 9, because we pass the pointer instead of a copy. Relevant part of the OG LLVM output: ``` @foo() %s = alloca %struct.S, align 4 %byval-temp = alloca %struct.S, align 4 %a = getelementptr inbounds nuw %struct.S, ptr %s, i32 0, i32 0 store i32 9, ptr %a, align 4 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %byval-temp, ptr align 4 %s, i64 20, i1 false) call void @change(ptr noundef %byval-temp) ``` Current LLVM output through CIR: ``` @foo() %1 = alloca %struct.S, i64 1, align 4 %2 = getelementptr %struct.S, ptr %1, i32 0, i32 0 store i32 9, ptr %2, align 4 %3 = load %struct.S, ptr %1, align 4 call void @change(ptr %1) ``` So, there should be a memcpy. This PR fixes this, and adds a comment/note for the future cases where we need to check if the copy is not needed. I have also updated the old test with structs having size > 128. --- .../TargetLowering/LowerFunction.cpp | 8 ++++++- .../AArch64/aarch64-cc-structs.c | 24 +++++++++++-------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index fcf95b7db7d0..023e3baf2105 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -1166,7 +1166,13 @@ mlir::Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, if (::cir::MissingFeatures::undef()) cir_cconv_unreachable("NYI"); - IRCallArgs[FirstIRArg] = alloca; + // TODO(cir): add check for cases where we don't need the memcpy + auto tmpAlloca = createTmpAlloca( + *this, alloca.getLoc(), + mlir::cast(alloca.getType()).getPointee()); + auto tySize = LM.getDataLayout().getTypeAllocSize(I->getType()); + createMemCpy(*this, tmpAlloca, alloca, tySize.getFixedValue()); + IRCallArgs[FirstIRArg] = tmpAlloca; // NOTE(cir): Skipping Emissions, lifetime markers. diff --git a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c index b434edc09dfe..95217199ed52 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c +++ b/clang/test/CIR/CallConvLowering/AArch64/aarch64-cc-structs.c @@ -171,20 +171,24 @@ GT_128 get_gt_128(GT_128 s) { } // CHECK: cir.func no_proto @call_and_get_gt_128(%arg0: !cir.ptr -// CHECK: %[[#V0:]] = cir.alloca !ty_GT_128_, !cir.ptr, {{.*}} {alignment = 8 : i64} -// CHECK: %[[#V1:]] = cir.alloca !ty_GT_128_, !cir.ptr, {{.*}} {alignment = 8 : i64} -// CHECK: cir.call @get_gt_128(%[[#V1]], %arg0) : (!cir.ptr, !cir.ptr) -> () -// CHECK: %[[#V2:]] = cir.load %[[#V1]] : !cir.ptr, !ty_GT_128_ -// CHECK: cir.store %[[#V2]], %[[#V0]] : !ty_GT_128_, !cir.ptr +// CHECK: %[[#V0:]] = cir.alloca !ty_GT_128_, !cir.ptr, ["tmp"] {alignment = 8 : i64} +// CHECK: %[[#V1:]] = cir.load %arg0 : !cir.ptr, !ty_GT_128_ +// CHECK: %[[#V2:]] = cir.alloca !ty_GT_128_, !cir.ptr, [""] {alignment = 8 : i64} +// CHECK: %[[#V3:]] = cir.alloca !ty_GT_128_, !cir.ptr, ["tmp"] {alignment = 8 : i64} +// CHECK: %[[#V4:]] = cir.cast(bitcast, %arg0 : !cir.ptr), !cir.ptr +// CHECK: %[[#V5:]] = cir.cast(bitcast, %[[#V3]] : !cir.ptr), !cir.ptr +// CHECK: %[[#V6:]] = cir.const #cir.int<24> : !u64i +// CHECK: cir.libc.memcpy %[[#V6]] bytes from %[[#V4]] to %[[#V5]] : !u64i, !cir.ptr -> !cir.ptr +// CHECK: cir.call @get_gt_128(%[[#V2]], %[[#V3]]) : (!cir.ptr, !cir.ptr) -> () // CHECK: cir.return // LLVM: void @call_and_get_gt_128(ptr %[[#V0:]]) // LLVM: %[[#V2:]] = alloca %struct.GT_128, i64 1, align 8 -// LLVM: %[[#V3:]] = alloca %struct.GT_128, i64 1, align 8 -// LLVM: call void @get_gt_128(ptr %[[#V3]], ptr %[[#V0]]) -// LLVM: %[[#V4:]] = load %struct.GT_128, ptr %[[#V3]], align 8 -// LLVM: store %struct.GT_128 %[[#V4]], ptr %[[#V2]], align 8 -// LLVM: ret void +// LLVM: %[[#V3:]] = load %struct.GT_128, ptr %[[#V0]], align 8 +// LLVM: %[[#V4:]] = alloca %struct.GT_128, i64 1, align 8 +// LLVM: %[[#V5:]] = alloca %struct.GT_128, i64 1, align 8 +// LLVM: call void @llvm.memcpy.p0.p0.i64(ptr %[[#V5]], ptr %[[#V0]], i64 24, i1 false) +// LLVM: call void @get_gt_128(ptr %[[#V4]], ptr %[[#V5]]) GT_128 call_and_get_gt_128() { GT_128 s; s = get_gt_128(s); From 090718c785c7e4a22f53b655aeb2755a6e1da471 Mon Sep 17 00:00:00 2001 From: Robert Konicar Date: Wed, 12 Feb 2025 15:00:28 +0100 Subject: [PATCH 2252/2301] [CIR] Fix error messages when the conversion to MLIR standard dialects fails. (#1310) The old error message is wrong, as the `ConvertCIRToMLIRPass` does not convert to LLVM Dialect but to MLIR standard dialects. --- clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp index 8a3e6227b072..3d88820f5033 100644 --- a/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp +++ b/clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRToMLIR.cpp @@ -1505,11 +1505,12 @@ mlir::ModuleOp lowerFromCIRToMLIR(mlir::ModuleOp theModule, auto result = !mlir::failed(pm.run(theModule)); if (!result) report_fatal_error( - "The pass manager failed to lower CIR to LLVMIR dialect!"); + "The pass manager failed to lower CIR to MLIR standard dialects!"); // Now that we ran all the lowering passes, verify the final output. if (theModule.verify().failed()) - report_fatal_error("Verification of the final LLVMIR dialect failed!"); + report_fatal_error( + "Verification of the final MLIR in standard dialects failed!"); return theModule; } From db307ce95f657c0e4bc63bc43da8fd8765cbaccb Mon Sep 17 00:00:00 2001 From: Konstantinos Parasyris Date: Wed, 12 Feb 2025 06:11:21 -0800 Subject: [PATCH 2253/2301] [CIR][HIP] Use CUDA attributes for HIP global functions (#1333) --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 5 ++-- clang/test/CIR/CodeGen/HIP/simple-device.cpp | 14 --------- clang/test/CIR/CodeGen/HIP/simple.cpp | 31 +++++++++++++++----- 3 files changed, 26 insertions(+), 24 deletions(-) delete mode 100644 clang/test/CIR/CodeGen/HIP/simple-device.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 10dbd85edc4b..a8376dd8f6c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2357,8 +2357,9 @@ cir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, // As __global__ functions (kernels) always reside on device, // when we access them from host, we must refer to the kernel handle. // For CUDA, it's just the device stub. For HIP, it's something different. - if (langOpts.CUDA && !langOpts.CUDAIsDevice && langOpts.HIP && - cast(GD.getDecl())->hasAttr()) { + if ((langOpts.CUDA || langOpts.HIP) && !langOpts.CUDAIsDevice && + cast(GD.getDecl())->hasAttr() && + cast(GD.getDecl())->isThisDeclarationADefinition()) { llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/HIP/simple-device.cpp b/clang/test/CIR/CodeGen/HIP/simple-device.cpp deleted file mode 100644 index e627a90dc410..000000000000 --- a/clang/test/CIR/CodeGen/HIP/simple-device.cpp +++ /dev/null @@ -1,14 +0,0 @@ -#include "../Inputs/cuda.h" - -// RUN: %clang_cc1 -triple=amdgcn-amd-amdhsa -x hip -fcuda-is-device \ -// RUN: -fclangir -emit-cir -o - %s | FileCheck %s - -// This shouldn't emit. -__host__ void host_fn(int *a, int *b, int *c) {} - -// CHECK-NOT: cir.func @_Z7host_fnPiS_S_ - -// This should emit as a normal C++ function. -__device__ void device_fn(int* a, double b, float c) {} - -// CIR: cir.func @_Z9device_fnPidf diff --git a/clang/test/CIR/CodeGen/HIP/simple.cpp b/clang/test/CIR/CodeGen/HIP/simple.cpp index ec4110da10d7..4fa711373d89 100644 --- a/clang/test/CIR/CodeGen/HIP/simple.cpp +++ b/clang/test/CIR/CodeGen/HIP/simple.cpp @@ -1,16 +1,31 @@ #include "../Inputs/cuda.h" -// RUN: %clang_cc1 -triple=amdgcn-amd-amdhsa -x hip -fclangir \ -// RUN: -emit-cir %s -o %t.cir -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir \ +// RUN: -x hip -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR-HOST --input-file=%t.cir %s +// RUN: %clang_cc1 -triple=amdgcn-amd-amdhsa -x hip \ +// RUN: -fcuda-is-device -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR-DEVICE --input-file=%t.cir %s + +// Attribute for global_fn +// CIR-HOST: [[Kernel:#[a-zA-Z_0-9]+]] = {{.*}}#cir.cuda_kernel_name<_Z9global_fnv>{{.*}} -// This should emit as a normal C++ function. __host__ void host_fn(int *a, int *b, int *c) {} +// CIR-HOST: cir.func @_Z7host_fnPiS_S_ +// CIR-DEVICE-NOT: cir.func @_Z7host_fnPiS_S_ -// CIR: cir.func @_Z7host_fnPiS_S_ +__device__ void device_fn(int *a, double b, float c) {} +// CIR-HOST-NOT: cir.func @_Z9device_fnPidf +// CIR-DEVICE: cir.func @_Z9device_fnPidf -// This shouldn't emit. -__device__ void device_fn(int* a, double b, float c) {} +#ifdef __AMDGPU__ +__global__ void global_fn() {} +#else +__global__ void global_fn(); +#endif +// CIR-HOST: @_Z24__device_stub__global_fnv(){{.*}}extra([[Kernel]]) +// CIR-DEVICE: @_Z9global_fnv -// CHECK-NOT: cir.func @_Z9device_fnPidf +// Make sure `global_fn` indeed gets emitted +__host__ void x() { auto v = global_fn; } From 0bdd896e5675a1919ad394d0092bab6e43ead9d2 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 13 Feb 2025 15:01:46 +0100 Subject: [PATCH 2254/2301] Revert "[CIR][HIP] Use CUDA attributes for HIP global functions (#1333)" Broke CI jobs This reverts commit db307ce95f657c0e4bc63bc43da8fd8765cbaccb. --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 5 ++-- clang/test/CIR/CodeGen/HIP/simple-device.cpp | 14 +++++++++ clang/test/CIR/CodeGen/HIP/simple.cpp | 31 +++++--------------- 3 files changed, 24 insertions(+), 26 deletions(-) create mode 100644 clang/test/CIR/CodeGen/HIP/simple-device.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index a8376dd8f6c5..10dbd85edc4b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2357,9 +2357,8 @@ cir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, // As __global__ functions (kernels) always reside on device, // when we access them from host, we must refer to the kernel handle. // For CUDA, it's just the device stub. For HIP, it's something different. - if ((langOpts.CUDA || langOpts.HIP) && !langOpts.CUDAIsDevice && - cast(GD.getDecl())->hasAttr() && - cast(GD.getDecl())->isThisDeclarationADefinition()) { + if (langOpts.CUDA && !langOpts.CUDAIsDevice && langOpts.HIP && + cast(GD.getDecl())->hasAttr()) { llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/HIP/simple-device.cpp b/clang/test/CIR/CodeGen/HIP/simple-device.cpp new file mode 100644 index 000000000000..e627a90dc410 --- /dev/null +++ b/clang/test/CIR/CodeGen/HIP/simple-device.cpp @@ -0,0 +1,14 @@ +#include "../Inputs/cuda.h" + +// RUN: %clang_cc1 -triple=amdgcn-amd-amdhsa -x hip -fcuda-is-device \ +// RUN: -fclangir -emit-cir -o - %s | FileCheck %s + +// This shouldn't emit. +__host__ void host_fn(int *a, int *b, int *c) {} + +// CHECK-NOT: cir.func @_Z7host_fnPiS_S_ + +// This should emit as a normal C++ function. +__device__ void device_fn(int* a, double b, float c) {} + +// CIR: cir.func @_Z9device_fnPidf diff --git a/clang/test/CIR/CodeGen/HIP/simple.cpp b/clang/test/CIR/CodeGen/HIP/simple.cpp index 4fa711373d89..ec4110da10d7 100644 --- a/clang/test/CIR/CodeGen/HIP/simple.cpp +++ b/clang/test/CIR/CodeGen/HIP/simple.cpp @@ -1,31 +1,16 @@ #include "../Inputs/cuda.h" -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir \ -// RUN: -x hip -emit-cir %s -o %t.cir -// RUN: FileCheck --check-prefix=CIR-HOST --input-file=%t.cir %s +// RUN: %clang_cc1 -triple=amdgcn-amd-amdhsa -x hip -fclangir \ +// RUN: -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s -// RUN: %clang_cc1 -triple=amdgcn-amd-amdhsa -x hip \ -// RUN: -fcuda-is-device -emit-cir %s -o %t.cir -// RUN: FileCheck --check-prefix=CIR-DEVICE --input-file=%t.cir %s - -// Attribute for global_fn -// CIR-HOST: [[Kernel:#[a-zA-Z_0-9]+]] = {{.*}}#cir.cuda_kernel_name<_Z9global_fnv>{{.*}} +// This should emit as a normal C++ function. __host__ void host_fn(int *a, int *b, int *c) {} -// CIR-HOST: cir.func @_Z7host_fnPiS_S_ -// CIR-DEVICE-NOT: cir.func @_Z7host_fnPiS_S_ -__device__ void device_fn(int *a, double b, float c) {} -// CIR-HOST-NOT: cir.func @_Z9device_fnPidf -// CIR-DEVICE: cir.func @_Z9device_fnPidf +// CIR: cir.func @_Z7host_fnPiS_S_ -#ifdef __AMDGPU__ -__global__ void global_fn() {} -#else -__global__ void global_fn(); -#endif -// CIR-HOST: @_Z24__device_stub__global_fnv(){{.*}}extra([[Kernel]]) -// CIR-DEVICE: @_Z9global_fnv +// This shouldn't emit. +__device__ void device_fn(int* a, double b, float c) {} -// Make sure `global_fn` indeed gets emitted -__host__ void x() { auto v = global_fn; } +// CHECK-NOT: cir.func @_Z9device_fnPidf From be82182cb75f20335511698fc0b786c4e66824ac Mon Sep 17 00:00:00 2001 From: AdUhTkJm <30948580+AdUhTkJm@users.noreply.github.com> Date: Thu, 13 Feb 2025 14:07:20 +0000 Subject: [PATCH 2255/2301] [CIR][CUDA][NFC] Skeleton of `setCUDAKernelCallingConvention` (#1344) This is only a skeleton following OG, and shouldn't have changed any visible behaviour. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 ++- clang/lib/CIR/CodeGen/CIRGenCall.cpp | 4 +++- clang/lib/CIR/CodeGen/TargetInfo.cpp | 8 ++++++++ clang/lib/CIR/CodeGen/TargetInfo.h | 6 ++++++ 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 523222085db4..a08e16df9222 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3350,11 +3350,12 @@ def DerivedDataMemberOp : CIR_Op<"derived_data_member", [Pure]> { def CC_C : I32EnumAttrCase<"C", 1, "c">; def CC_SpirKernel : I32EnumAttrCase<"SpirKernel", 2, "spir_kernel">; def CC_SpirFunction : I32EnumAttrCase<"SpirFunction", 3, "spir_function">; +def CC_OpenCLKernel : I32EnumAttrCase<"OpenCLKernel", 4, "opencl_kernel">; def CallingConv : I32EnumAttr< "CallingConv", "calling convention", - [CC_C, CC_SpirKernel, CC_SpirFunction]> { + [CC_C, CC_SpirKernel, CC_SpirFunction, CC_OpenCLKernel]> { let cppNamespace = "::cir"; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index d9d7c1f13de2..54c0e185648b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -1531,7 +1531,9 @@ const CIRGenFunctionInfo &CIRGenTypes::arrangeFreeFunctionCall( static void setCUDAKernelCallingConvention(CanQualType &FTy, CIRGenModule &CGM, const FunctionDecl *FD) { if (FD->hasAttr()) { - llvm_unreachable("NYI"); + const FunctionType *FT = FTy->getAs(); + CGM.getTargetCIRGenInfo().setCUDAKernelCallingConvention(FT); + FTy = FT->getCanonicalTypeUnqualified(); } } diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index 07dca811985e..a10980a6e66b 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -301,6 +301,10 @@ class SPIRVTargetCIRGenInfo : public CommonSPIRTargetCIRGenInfo { public: SPIRVTargetCIRGenInfo(CIRGenTypes &CGT) : CommonSPIRTargetCIRGenInfo(std::make_unique(CGT)) {} + + void setCUDAKernelCallingConvention(const FunctionType *&ft) const override { + llvm_unreachable("NYI"); + } }; } // namespace @@ -349,6 +353,10 @@ class AMDGPUTargetCIRGenInfo : public TargetCIRGenInfo { public: AMDGPUTargetCIRGenInfo(CIRGenTypes &cgt) : TargetCIRGenInfo(std::make_unique(cgt)) {} + + void setCUDAKernelCallingConvention(const FunctionType *&ft) const override { + llvm_unreachable("NYI"); + } }; } // namespace diff --git a/clang/lib/CIR/CodeGen/TargetInfo.h b/clang/lib/CIR/CodeGen/TargetInfo.h index 98e660eec748..8aafdda042fb 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.h +++ b/clang/lib/CIR/CodeGen/TargetInfo.h @@ -113,6 +113,12 @@ class TargetCIRGenInfo { return cir::CallingConv::SpirKernel; } + // Set calling convention for CUDA Kernels. + // Some targets, such as AMD GPU or SPIRV, treat CUDA kernels as OpenCL + // kernels. They should reset the calling convention to OpenCLKernel, + // which will be further resolved by getOpenCLKernelCallingConv(). + virtual void setCUDAKernelCallingConvention(const FunctionType *&ft) const {} + virtual ~TargetCIRGenInfo() {} }; From 8debbb91849722209fdc3ef35622795714c96b26 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Fri, 14 Feb 2025 16:02:32 +0100 Subject: [PATCH 2256/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vstl1_lane_s64 and vstl1q_lane_s64 (#1340) Lower `neon_vstl1_lane_s64` and `vstl1q_lane_s64` --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 7 +- clang/test/CIR/CodeGen/AArch64/neon-ldst.c | 128 ++++++++++++++++++ 2 files changed, 134 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 36e68212f696..b7835b419f62 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -4473,7 +4473,12 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vstl1_lane_s64: case NEON::BI__builtin_neon_vstl1q_lane_s64: { - llvm_unreachable("NEON::BI__builtin_neon_vstl1q_lane_s64 NYI"); + Ops[1] = builder.createBitcast(Ops[1], ty); + Ops[1] = builder.create(Ops[1].getLoc(), Ops[1], Ops[2]); + cir::StoreOp Store = builder.createAlignedStore( + getLoc(E->getExprLoc()), Ops[1], Ops[0], PtrOp0.getAlignment()); + Store.setAtomic(cir::MemOrder::Release); + return Ops[1]; } case NEON::BI__builtin_neon_vld2_v: case NEON::BI__builtin_neon_vld2q_v: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c index 7ee0423398b1..e35260757351 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c @@ -501,3 +501,131 @@ void test_vst1q_lane_f64(float64_t * ptr, float64x2_t src) { // LLVM: [[VEC_CAST1:%.*]] = bitcast <16 x i8> [[VEC_CAST0]] to <2 x double> // LLVM: [[RES:%.*]] = extractelement <2 x double> [[VEC_CAST1]], i32 1 // LLVM: store double [[RES]], ptr [[PTR]], align 8 + +void test_vstl1q_lane_u64(uint64_t *a, uint64x2_t b) { + vstl1q_lane_u64(a, b, 1); +} + +// CIR-LABEL: test_vstl1q_lane_u64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !u64i, !cir.ptr + +// LLVM: {{.*}}test_vstl1q_lane_u64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8> +// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// LLVM: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1 +// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8 + +void test_vstl1q_lane_s64(int64_t *a, int64x2_t b) { + vstl1q_lane_s64(a, b, 1); +} + +// CIR-LABEL: test_vstl1q_lane_s64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !s64i, !cir.ptr + +// LLVM: {{.*}}test_vstl1q_lane_s64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8> +// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// LLVM: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1 +// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8 + +void test_vstl1q_lane_f64(float64_t *a, float64x2_t b) { + vstl1q_lane_f64(a, b, 1); +} + +// CIR-LABEL: test_vstl1q_lane_f64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !cir.double, !cir.ptr + +// LLVM: {{.*}}test_vstl1q_lane_f64(ptr{{.*}}[[PTR:%.*]], <2 x double>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[SRC]] to <16 x i8> +// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double> +// LLVM: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 1 +// LLVM: store atomic double [[TMP2]], ptr [[PTR]] release, align 8 + +void test_vstl1q_lane_p64(poly64_t *a, poly64x2_t b) { + vstl1q_lane_p64(a, b, 1); +} + +// CIR-LABEL: test_vstl1q_lane_p64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !s64i, !cir.ptr + +// LLVM: {{.*}}test_vstl1q_lane_p64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8> +// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// LLVM: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1 +// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8 + +void test_vstl1_lane_u64(uint64_t *a, uint64x1_t b) { + vstl1_lane_u64(a, b, 0); +} + +// CIR-LABEL: test_vstl1_lane_u64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !u64i, !cir.ptr + +// LLVM: {{.*}}test_vstl1_lane_u64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8> +// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// LLVM: [[TMP2:%.*]] = extractelement <1 x i64> [[TMP1]], i32 0 +// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8 + +void test_vstl1_lane_s64(int64_t *a, int64x1_t b) { + vstl1_lane_s64(a, b, 0); +} + +// CIR-LABEL:test_vstl1_lane_s64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !s64i, !cir.ptr + +// LLVM: {{.*}}test_vstl1_lane_s64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8> +// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// LLVM: [[TMP2:%.*]] = extractelement <1 x i64> [[TMP1]], i32 0 +// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8 + +void test_vstl1_lane_f64(float64_t *a, float64x1_t b) { + vstl1_lane_f64(a, b, 0); +} + +// CIR-LABEL:test_vstl1_lane_f64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !cir.double, !cir.ptr + +// LLVM: {{.*}}test_vstl1_lane_f64(ptr{{.*}}[[PTR:%.*]], <1 x double>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <1 x double> [[SRC]] to <8 x i8> +// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double> +// LLVM: [[TMP2:%.*]] = extractelement <1 x double> [[TMP1]], i32 0 +// LLVM: store atomic double [[TMP2]], ptr [[PTR]] release, align 8 + +void test_vstl1_lane_p64(poly64_t *a, poly64x1_t b) { + vstl1_lane_p64(a, b, 0); +} + +// CIR-LABEL: test_vstl1_lane_p64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[VAL:%.*]] = cir.vec.extract {{%.*}}[[[LANE]] : !s32i] : !cir.vector +// CIR: [[PTR:%.*]] = cir.cast(bitcast, {{%.*}} : !cir.ptr), !cir.ptr +// CIR: cir.store align(8) atomic(release) [[VAL]], [[PTR]] : !s64i, !cir.ptr + +// LLVM: {{.*}}test_vstl1_lane_p64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8> +// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// LLVM: [[TMP2:%.*]] = extractelement <1 x i64> [[TMP1]], i32 0 +// LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8 From 0d2a01ff094b2dd9b9ef75bf7578f9ddbb997922 Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Fri, 14 Feb 2025 07:22:05 -0800 Subject: [PATCH 2257/2301] [CIR] Add support for array new with constructors (#1347) This adds support for array new expressions on objects with non-trivial constructors. --- .../include/clang/CIR/Dialect/IR/CIRTypes.td | 2 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 6 +- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 17 +++++- clang/test/CIR/CodeGen/array-new-init.cpp | 60 +++++++++++++++++++ clang/test/CIR/Lowering/new.cpp | 28 +++++++++ 5 files changed, 108 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/array-new-init.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 53ea393abe3f..6f30f0188802 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -587,7 +587,7 @@ def ArrayPtr : Type< CPred<"::mlir::isa<::cir::PointerType>($_self)">, CPred<"::mlir::isa<::cir::ArrayType>(" "::mlir::cast<::cir::PointerType>($_self).getPointee())">, - ]>, "!cir.ptr"> { + ]>, "!cir.ptr"> { } // Pointer to functions diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 894708e62645..a1437ec19174 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1889,10 +1889,10 @@ void CIRGenFunction::emitCXXAggrConstructorCall( llvm_unreachable("NYI"); } - // Wmit the constructor call that will execute for every array element. + // Emit the constructor call that will execute for every array element. + auto arrayOp = builder.createPtrBitcast(arrayBase.getPointer(), arrayTy); builder.create( - *currSrcLoc, arrayBase.getPointer(), - [&](mlir::OpBuilder &b, mlir::Location loc) { + *currSrcLoc, arrayOp, [&](mlir::OpBuilder &b, mlir::Location loc) { auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc); Address curAddr = Address(arg, ptrToElmType, eltAlignment); auto currAVS = AggValueSlot::forAddr( diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index e7948cf98292..bca09c535c91 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -1120,7 +1120,22 @@ void CIRGenFunction::emitNewArrayInitializer( llvm_unreachable("NYI"); } - llvm_unreachable("NYI"); + // Store the new Cleanup position for irregular Cleanups. + // + // FIXME: Share this cleanup with the constructor call emission rather than + // having it create a cleanup of its own. + if (EndOfInit.isValid()) + llvm_unreachable("NYI"); + + // Emit a constructor call loop to initialize the remaining elements. + if (InitListElements) + llvm_unreachable("NYI"); + auto arrayType = convertType(CCE->getType()); + CurPtr = CurPtr.withElementType(arrayType); + emitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE, + /*NewPointerIsChecked*/ true, + CCE->requiresZeroInitialization()); + return; } // If this is value-initialization, we can usually use memset. diff --git a/clang/test/CIR/CodeGen/array-new-init.cpp b/clang/test/CIR/CodeGen/array-new-init.cpp new file mode 100644 index 000000000000..263c2a3edfe4 --- /dev/null +++ b/clang/test/CIR/CodeGen/array-new-init.cpp @@ -0,0 +1,60 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck -check-prefix=BEFORE %s +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -I%S/../Inputs -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare %s -o %t.cir 2>&1 | FileCheck -check-prefix=AFTER %s + +class E { + public: + E(); + ~E(); +}; + +void t_new_constant_size_constructor() { + auto p = new E[3]; +} + +// BEFORE: cir.func @_Z31t_new_constant_size_constructorv +// BEFORE: %[[NUM_ELEMENTS:.*]] = cir.const #cir.int<3> : !u64i +// BEFORE: %[[SIZE_WITHOUT_COOKIE:.*]] = cir.const #cir.int<3> : !u64i +// BEFORE: %[[ALLOC_SIZE:.*]] = cir.const #cir.int<11> : !u64i +// BEFORE: %[[ALLOC_PTR:.*]] = cir.call @_Znam(%[[ALLOC_SIZE]]) +// BEFORE: %[[COOKIE_PTR:.*]] = cir.cast(bitcast, %[[ALLOC_PTR]] : !cir.ptr), !cir.ptr +// BEFORE: cir.store %[[NUM_ELEMENTS]], %[[COOKIE_PTR]] : !u64i, !cir.ptr +// BEFORE: %[[PTR_AS_U8:.*]] = cir.cast(bitcast, %[[ALLOC_PTR]] : !cir.ptr), !cir.ptr +// BEFORE: %[[OFFSET:.*]] = cir.const #cir.int<8> : !s32i +// BEFORE: %[[OBJ_PTR:.*]] = cir.ptr_stride(%[[PTR_AS_U8]] : !cir.ptr, %[[OFFSET]] : !s32i), !cir.ptr +// BEFORE: %[[OBJ_ELEM_PTR:.*]] = cir.cast(bitcast, %[[OBJ_PTR]] : !cir.ptr), !cir.ptr +// BEFORE: %[[OBJ_ARRAY_PTR:.*]] = cir.cast(bitcast, %[[OBJ_ELEM_PTR]] : !cir.ptr), !cir.ptr> +// BEFORE: cir.array.ctor(%[[OBJ_ARRAY_PTR]] : !cir.ptr>) { +// BEFORE: ^bb0(%arg0: !cir.ptr +// BEFORE: cir.call @_ZN1EC1Ev(%arg0) : (!cir.ptr) -> () +// BEFORE: cir.yield +// BEFORE: } + +// AFTER: cir.func @_Z31t_new_constant_size_constructorv +// AFTER: %[[NUM_ELEMENTS:.*]] = cir.const #cir.int<3> : !u64i +// AFTER: %[[SIZE_WITHOUT_COOKIE:.*]] = cir.const #cir.int<3> : !u64i +// AFTER: %[[ALLOC_SIZE:.*]] = cir.const #cir.int<11> : !u64i +// AFTER: %[[ALLOC_PTR:.*]] = cir.call @_Znam(%[[ALLOC_SIZE]]) +// AFTER: %[[COOKIE_PTR:.*]] = cir.cast(bitcast, %[[ALLOC_PTR]] : !cir.ptr), !cir.ptr +// AFTER: cir.store %[[NUM_ELEMENTS]], %[[COOKIE_PTR]] : !u64i, !cir.ptr +// AFTER: %[[PTR_AS_U8:.*]] = cir.cast(bitcast, %[[ALLOC_PTR]] : !cir.ptr), !cir.ptr +// AFTER: %[[OFFSET:.*]] = cir.const #cir.int<8> : !s32i +// AFTER: %[[OBJ_PTR:.*]] = cir.ptr_stride(%[[PTR_AS_U8]] : !cir.ptr, %[[OFFSET]] : !s32i), !cir.ptr +// AFTER: %[[OBJ_ELEM_PTR:.*]] = cir.cast(bitcast, %[[OBJ_PTR]] : !cir.ptr), !cir.ptr +// AFTER: %[[OBJ_ARRAY_PTR:.*]] = cir.cast(bitcast, %[[OBJ_ELEM_PTR]] : !cir.ptr), !cir.ptr> +// AFTER: %[[NUM_ELEMENTS2:.*]] = cir.const #cir.int<3> : !u64i +// AFTER: %[[ELEM_PTR:.*]] = cir.cast(array_to_ptrdecay, %10 : !cir.ptr>), !cir.ptr +// AFTER: %[[END_PTR:.*]] = cir.ptr_stride(%[[ELEM_PTR]] : !cir.ptr, %[[NUM_ELEMENTS2]] : !u64i), !cir.ptr +// AFTER: %[[CUR_ELEM_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// AFTER: cir.store %[[ELEM_PTR]], %[[CUR_ELEM_ALLOCA]] : !cir.ptr, !cir.ptr> +// AFTER: cir.do { +// AFTER: %[[CUR_ELEM_PTR:.*]] = cir.load %[[CUR_ELEM_ALLOCA]] : !cir.ptr>, !cir.ptr +// AFTER: %[[OFFSET:.*]] = cir.const #cir.int<1> : !u64i +// AFTER: cir.call @_ZN1EC1Ev(%[[CUR_ELEM_PTR]]) : (!cir.ptr) -> () +// AFTER: %[[NEXT_PTR:.*]] = cir.ptr_stride(%[[CUR_ELEM_PTR]] : !cir.ptr, %[[OFFSET]] : !u64i), !cir.ptr +// AFTER: cir.store %[[NEXT_PTR]], %[[CUR_ELEM_ALLOCA]] : !cir.ptr, !cir.ptr> +// AFTER: cir.yield +// AFTER: } while { +// AFTER: %[[CUR_ELEM_PTR2:.*]] = cir.load %[[CUR_ELEM_ALLOCA]] : !cir.ptr>, !cir.ptr +// AFTER: %[[END_TEST:.*]] = cir.cmp(eq, %[[CUR_ELEM_PTR2]], %[[END_PTR]]) : !cir.ptr, !cir.bool +// AFTER: cir.condition(%[[END_TEST]]) +// AFTER: } diff --git a/clang/test/CIR/Lowering/new.cpp b/clang/test/CIR/Lowering/new.cpp index 2cac378f3848..b4ea023ab4dc 100644 --- a/clang/test/CIR/Lowering/new.cpp +++ b/clang/test/CIR/Lowering/new.cpp @@ -187,3 +187,31 @@ void t_new_var_size_nontrivial(size_t n) { // LLVM: %[[ANY_OVERFLOW:.*]] = or i1 %[[OVERFLOW]], %[[OVERFLOW2]] // LLVM: %[[ALLOC_SIZE:.*]] = select i1 %[[ANY_OVERFLOW]], i64 -1, i64 %[[SIZE]] // LLVM: %[[ADDR:.*]] = call ptr @_Znam(i64 %[[ALLOC_SIZE]]) + +class E { + public: + E(); + ~E(); +}; + +void t_new_constant_size_constructor() { + auto p = new E[3]; +} + +// LLVM: @_Z31t_new_constant_size_constructorv +// LLVM: %[[ALLOC_PTR:.*]] = call ptr @_Znam(i64 11) +// LLVM: store i64 3, ptr %[[ALLOC_PTR]], align 8 +// LLVM: %[[OBJ_PTR:.*]] = getelementptr i8, ptr %[[ALLOC_PTR]], i64 8 +// LLVM: %[[ELEM_PTR:.*]] = getelementptr %class.E, ptr %[[OBJ_PTR]], i32 0 +// LLVM: %[[END_PTR:.*]] = getelementptr %class.E, ptr %[[ELEM_PTR]], i64 3 +// LLVM: br label %[[INIT_ELEM_BB:.*]] +// LLVM: [[LOOP_INC_BB:.*]]: +// LLVM: %[[NEXT_ELEM_PTR:.*]] = load ptr +// LLVM: %[[END_TEST:.*]] = icmp eq ptr %[[NEXT_ELEM_PTR]], %[[END_PTR]] +// LLVM: br i1 %[[END_TEST]], label %[[INIT_ELEM_BB]], label %[[EXIT_BB:.*]] +// LLVM: [[INIT_ELEM_BB]]: +// LLVM: %[[CUR_ELEM_PTR:.*]] = load ptr +// LLVM: call void @_ZN1EC1Ev(ptr %[[CUR_ELEM_PTR]]) +// LLVM: %[[NEXT_PTR:.*]] = getelementptr %class.E, ptr %[[CUR_ELEM_PTR]], i64 1 +// LLVM: store ptr %[[NEXT_PTR]] +// LLVM: br label %[[LOOP_INC_BB]] From cf491db2065e312a8852e68a86c977ffcfc4a201 Mon Sep 17 00:00:00 2001 From: AdUhTkJm <30948580+AdUhTkJm@users.noreply.github.com> Date: Fri, 14 Feb 2025 15:40:58 +0000 Subject: [PATCH 2258/2301] [CIR][CUDA] Generate kernel calls (#1348) Now we could generate calls to `__global__` functions. Most work is already done in AST. It rewrites `fn<<<2, 2>>>()` to something like `__cudaPushCallConfiguration(dim3(2, 1, 1), dim3(2, 1, 1), 0, nullptr)`, which returns a bool. We calls the device stub as a normal function when the call returns true. --- clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp | 20 ++++++++++++++++++++ clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h | 6 ++++++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 9 +++++++-- clang/test/CIR/CodeGen/CUDA/simple.cu | 15 +++++++++++++++ 4 files changed, 48 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp index 400c41cbb0d4..acbbcd2c5c8b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp @@ -169,3 +169,23 @@ void CIRGenCUDARuntime::emitDeviceStub(CIRGenFunction &cgf, cir::FuncOp fn, else emitDeviceStubBodyLegacy(cgf, fn, args); } + +RValue CIRGenCUDARuntime::emitCUDAKernelCallExpr(CIRGenFunction &cgf, + const CUDAKernelCallExpr *expr, + ReturnValueSlot retValue) { + auto builder = cgm.getBuilder(); + mlir::Location loc = + cgf.currSrcLoc ? cgf.currSrcLoc.value() : builder.getUnknownLoc(); + + cgf.emitIfOnBoolExpr( + expr->getConfig(), + [&](mlir::OpBuilder &b, mlir::Location l) { + CIRGenCallee callee = cgf.emitCallee(expr->getCallee()); + cgf.emitCall(expr->getCallee()->getType(), callee, expr, retValue); + b.create(loc); + }, + loc, [](mlir::OpBuilder &b, mlir::Location l) {}, + std::optional()); + + return RValue::get(nullptr); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h index a3145a0baeb3..634f4891b85d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h +++ b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h @@ -23,6 +23,8 @@ namespace clang::CIRGen { class CIRGenFunction; class CIRGenModule; class FunctionArgList; +class RValue; +class ReturnValueSlot; class CIRGenCUDARuntime { protected: @@ -40,6 +42,10 @@ class CIRGenCUDARuntime { virtual void emitDeviceStub(CIRGenFunction &cgf, cir::FuncOp fn, FunctionArgList &args); + + virtual RValue emitCUDAKernelCallExpr(CIRGenFunction &cgf, + const CUDAKernelCallExpr *expr, + ReturnValueSlot retValue); }; } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 38a880548202..4d4dd663e0dc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -530,7 +530,10 @@ static CIRGenCallee emitDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { auto CalleePtr = emitFunctionDeclPointer(CGM, GD); - assert(!CGM.getLangOpts().CUDA && "NYI"); + // For HIP, the device stub should be converted to handle. + if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice && + FD->hasAttr()) + llvm_unreachable("NYI"); return CIRGenCallee::forDirect(CalleePtr, GD); } @@ -1405,7 +1408,9 @@ RValue CIRGenFunction::emitCallExpr(const clang::CallExpr *E, if (const auto *CE = dyn_cast(E)) return emitCXXMemberCallExpr(CE, ReturnValue); - assert(!dyn_cast(E) && "CUDA NYI"); + if (const auto *CE = dyn_cast(E)) + return CGM.getCUDARuntime().emitCUDAKernelCallExpr(*this, CE, ReturnValue); + if (const auto *CE = dyn_cast(E)) if (const CXXMethodDecl *MD = dyn_cast_or_null(CE->getCalleeDecl())) diff --git a/clang/test/CIR/CodeGen/CUDA/simple.cu b/clang/test/CIR/CodeGen/CUDA/simple.cu index 9675de3fe61a..51a1d3bb2f4b 100644 --- a/clang/test/CIR/CodeGen/CUDA/simple.cu +++ b/clang/test/CIR/CodeGen/CUDA/simple.cu @@ -31,3 +31,18 @@ __global__ void global_fn(int a) {} // CIR-HOST: cir.call @__cudaPopCallConfiguration // CIR-HOST: cir.get_global @_Z24__device_stub__global_fni // CIR-HOST: cir.call @cudaLaunchKernel + +int main() { + global_fn<<<1, 1>>>(1); +} +// CIR-DEVICE-NOT: cir.func @main() + +// CIR-HOST: cir.func @main() +// CIR-HOST: cir.call @_ZN4dim3C1Ejjj +// CIR-HOST: cir.call @_ZN4dim3C1Ejjj +// CIR-HOST: [[Push:%[0-9]+]] = cir.call @__cudaPushCallConfiguration +// CIR-HOST: [[ConfigOK:%[0-9]+]] = cir.cast(int_to_bool, [[Push]] +// CIR-HOST: cir.if [[ConfigOK]] { +// CIR-HOST: [[Arg:%[0-9]+]] = cir.const #cir.int<1> +// CIR-HOST: cir.call @_Z24__device_stub__global_fni([[Arg]]) +// CIR-HOST: } From a0091e38f1027e35d17819e02ee1ae257a12d296 Mon Sep 17 00:00:00 2001 From: Henrich Lauko Date: Sat, 15 Feb 2025 00:21:21 +0100 Subject: [PATCH 2259/2301] [CIR] Add missing Language::CIR cases (#1349) --- clang/lib/Basic/LangStandards.cpp | 3 +++ .../lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp | 1 + clang/lib/Frontend/CompilerInvocation.cpp | 6 ++++++ clang/lib/Frontend/FrontendActions.cpp | 1 + 4 files changed, 11 insertions(+) diff --git a/clang/lib/Basic/LangStandards.cpp b/clang/lib/Basic/LangStandards.cpp index aa01e25baeca..e17ec30bc62c 100644 --- a/clang/lib/Basic/LangStandards.cpp +++ b/clang/lib/Basic/LangStandards.cpp @@ -23,6 +23,8 @@ StringRef clang::languageToString(Language L) { return "LLVM IR"; case Language::C: return "C"; + case Language::CIR: + return "CIR"; case Language::CXX: return "C++"; case Language::ObjC: @@ -91,6 +93,7 @@ LangStandard::Kind clang::getDefaultLanguageStandard(clang::Language Lang, switch (Lang) { case Language::Unknown: case Language::LLVM_IR: + case Language::CIR: llvm_unreachable("Invalid input kind!"); case Language::OpenCL: return LangStandard::lang_opencl12; diff --git a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp index 8d874f226cb1..e881d56258e5 100644 --- a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp +++ b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp @@ -220,6 +220,7 @@ StringRef getLanguageName(Language Lang) { case Language::Unknown: case Language::Asm: case Language::LLVM_IR: + case Language::CIR: llvm_unreachable("Unsupported language kind"); } diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index a505b5412448..6de4d7756a60 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -2959,6 +2959,9 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts, case Language::HLSL: Lang = "hlsl"; break; + case Language::CIR: + Lang = "cir"; + break; } GenerateArg(Consumer, OPT_x, @@ -3614,6 +3617,7 @@ static bool IsInputCompatibleWithStandard(InputKind IK, switch (IK.getLanguage()) { case Language::Unknown: case Language::LLVM_IR: + case Language::CIR: llvm_unreachable("should not parse language flags for this input"); case Language::C: @@ -3676,6 +3680,8 @@ static StringRef GetInputKindName(InputKind IK) { return "Asm"; case Language::LLVM_IR: return "LLVM IR"; + case Language::CIR: + return "CIR"; case Language::HLSL: return "HLSL"; diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp index 0e10268e024c..fe10ef0748b2 100644 --- a/clang/lib/Frontend/FrontendActions.cpp +++ b/clang/lib/Frontend/FrontendActions.cpp @@ -1111,6 +1111,7 @@ void PrintPreambleAction::ExecuteAction() { case Language::Unknown: case Language::Asm: case Language::LLVM_IR: + case Language::CIR: // We can't do anything with these. return; } From c69469e4d8219e3202d7055106c6b692a9076564 Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Mon, 17 Feb 2025 11:13:06 +0800 Subject: [PATCH 2260/2301] [CIR] Lower nested local constant alloca (#1261) This PR adds support for lowering local constants in nested scopes, including those in nested loops. For those constant allocas in non-loop inner scopes, this PR keeps their constant flags during alloca hoisting. LLVM lowering would correctly emit necessary invariant metadata for those allocas. For those constant allocas in a loop, this PR introduces a new operation `cir.invariant_group` that marks the beginning of the lifetime of the constant objects. This operation is put at the location of the alloca operation before hoisting them. This PR updates LLVM lowering to emit the necessary invariant metadata when loading and storing through such pointers. This PR takes care of the special case where the constant alloca represents a variable declared in the condition part of a while loop. In such a case, this PR removes the constant flag on the alloca operation when hositing them. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 57 ++++++ .../CIR/Dialect/Transforms/HoistAllocas.cpp | 165 ++++++++++++++++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 42 +++-- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 15 ++ clang/test/CIR/CodeGen/const-alloca.cpp | 85 +++++++++ 5 files changed, 341 insertions(+), 23 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index a08e16df9222..b06c61577150 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3563,6 +3563,63 @@ def LLVMIntrinsicCallOp : CIR_Op<"llvm.intrinsic"> { } +//===----------------------------------------------------------------------===// +// InvariantGroupOp +//===----------------------------------------------------------------------===// + +def InvariantGroupOp + : CIR_Op<"invariant_group", [Pure, SameOperandsAndResultType]> { + let summary = "Start an invariant group"; + let description = [{ + The `cir.invariant_group` operation takes a single pointer value as argument + and returns the same pointer value with fresh [invariant group] information. + All loads and stores that access the returned pointer value are presumed by + the optimizer to load or store the same value. + + [invariant group]: https://llvm.org/docs/LangRef.html#invariant-group-metadata + + This operation is not emitted during CIRGen. Instead, it is created when + hoisting constant alloca operations to the entry block of a function. This + operation effectively marks the syntactic scope of the constant local + variable represented by the hosited alloca operation, and it allows for + better LLVMIR generation with potentially more optimizations. + + For example, if we have the following CIR before alloca hoisting: + + ```mlir + cir.func @foo() { + cir.scope { + %0 = cir.alloca !s32i : !cir.ptr + use(%0) + } + } + ``` + + After alloca hoisting: + + ```mlir + cir.func @foo() { + %0 = cir.alloca !s32i : !cir.ptr + cir.scope { + %1 = cir.invariant_group %0 : !cir.ptr + use(%1) + } + } + ``` + + During LLVMIR lowering, load and store operations whose pointer operand + comes from `cir.invariant_group` are lowered to corresponding LLVM + instructions with invariant group metadata attached. + }]; + + let arguments = (ins CIR_PointerType:$ptr); + let results = (outs CIR_PointerType:$result); + + let assemblyFormat = [{ + $ptr `:` type($result) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // DeleteArrayOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp index 4b29c7235a02..a4de5f2af3ed 100644 --- a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp +++ b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp @@ -28,7 +28,142 @@ struct HoistAllocasPass : public HoistAllocasBase { void runOnOperation() override; }; -static void process(cir::FuncOp func) { +static bool isOpInLoop(mlir::Operation *op) { + return op->getParentOfType(); +} + +static bool hasStoreToAllocaInWhileCond(cir::AllocaOp alloca) { + // This function determines whether the given alloca operation represents + // a variable defined as a while loop's condition. + // + // Specifically, C/C++ allows the condition of a while loop be a variable + // declaration: + // + // while (const int x = foo()) { /* body... */ } + // + // CIRGen would emit the following CIR for the above code: + // + // cir.scope { + // %x.slot = cir.alloca !s32i [init, const] + // cir.while { + // %0 = cir.call @foo() + // cir.store %0, %x + // %1 = cir.load %x + // %2 = cir.cast int_to_bool %1 + // cir.condition(%2) + // } do { + // // loop body goes here. + // } + // } + // + // Note that %x.slot is emitted outside the cir.while operation. Ideally, the + // cir.while operation should cover this cir.alloca operation, but currently + // CIR does not work this way. When hoisting such an alloca operation, one + // must remove the "const" flag from it, otherwise LLVM lowering code will + // mistakenly attach invariant group metadata to the load and store operations + // in the while body, indicating that all loads and stores across all + // iterations of the loop are constant. + + for (mlir::Operation *user : alloca->getUsers()) { + if (!mlir::isa(user)) + continue; + + auto store = mlir::cast(user); + mlir::Operation *storeParentOp = store->getParentOp(); + if (!mlir::isa(storeParentOp)) + continue; + + auto whileOp = mlir::cast(storeParentOp); + return &whileOp.getCond() == store->getParentRegion(); + } + + return false; +} + +static void processConstAlloca(cir::AllocaOp alloca) { + // When optimization is enabled, LLVM lowering would start emitting invariant + // group metadata for loads and stores to alloca-ed objects with "const" + // attribute. For example, the following CIR: + // + // %slot = cir.alloca !s32i [init, const] + // cir.store %0, %slot + // %1 = cir.load %slot + // + // would be lowered to the following LLVM IR: + // + // %slot = alloca i32, i64 1 + // store i32 %0, ptr %slot, !invariant.group !0 + // %1 = load i32, ptr %slot, !invariant.group !0 + // + // The invariant group metadata would tell LLVM optimizer that the store and + // load instruction would store and load the same value from %slot. + // + // So far so good. Things started to get tricky when such an alloca operation + // appears in the body of a loop construct: + // + // cir.some_loop_construct { + // %slot = cir.alloca !s32i [init, const] + // cir.store %0, %slot + // %1 = cir.load %slot + // } + // + // After alloca hoisting, the CIR code above would be transformed into: + // + // %slot = cir.alloca !s32i [init, const] + // cir.some_loop_construct { + // cir.store %0, %slot + // %1 = cir.load %slot + // } + // + // Notice how alloca hoisting change the semantics of the program in such a + // case. The transformed code now indicates the optimizer that the load and + // store operations load and store the same value **across all iterations of + // the loop**! + // + // To overcome this problem, we instead transform the program into this: + // + // %slot = cir.alloca !s32i [init, const] + // cir.some_loop_construct { + // %slot.inv = cir.invariant_group %slot + // cir.store %0, %slot.inv + // %1 = cir.load %slot.inv + // } + // + // The cir.invariant_group operation attaches fresh invariant information to + // the operand pointer and yields a pointer with the fresh invariant + // information. Upon each loop iteration, the old invariant information is + // disgarded, and a new invariant information is attached, thus the correct + // program semantic retains. During LLVM lowering, the cir.invariant_group + // operation would eventually become an intrinsic call to + // @llvm.launder.invariant.group. + + if (isOpInLoop(alloca)) { + // Mark the alloca-ed pointer as invariant via the cir.invariant_group + // operation. + mlir::OpBuilder builder(alloca); + auto invariantGroupOp = + builder.create(alloca.getLoc(), alloca); + + // And replace all uses of the original alloca-ed pointer with the marked + // pointer (which carries invariant group information). + alloca->replaceUsesWithIf( + invariantGroupOp, + [op = invariantGroupOp.getOperation()](mlir::OpOperand &use) { + return use.getOwner() != op; + }); + } else if (hasStoreToAllocaInWhileCond(alloca)) { + // The alloca represents a variable declared as the condition of a while + // loop. In CIR, the alloca would be emitted at a scope outside of the + // while loop. We have to remove the constant flag during hoisting, + // otherwise we would be telling the optimizer that the alloca-ed value + // is constant across all iterations of the while loop. + // + // See the body of the isWhileCondition function for more details. + alloca.setConstant(false); + } +} + +static void process(mlir::ModuleOp mod, cir::FuncOp func) { if (func.getRegion().empty()) return; @@ -47,25 +182,35 @@ static void process(cir::FuncOp func) { return; mlir::Operation *insertPoint = &*entryBlock.begin(); + auto optInfoAttr = mlir::cast_if_present( + mod->getAttr(cir::CIRDialect::getOptInfoAttrName())); + unsigned optLevel = optInfoAttr ? optInfoAttr.getLevel() : 0; for (auto alloca : allocas) { - alloca->moveBefore(insertPoint); if (alloca.getConstant()) { - // Hoisted alloca may come from the body of a loop, in which case the - // stack slot is re-used by multiple objects alive in different iterations - // of the loop. In theory, each of these objects are still constant within - // their lifetimes, but currently we're not emitting metadata to further - // describe this. So for now let's behave conservatively and remove the - // const flag on nested allocas when hoisting them. - alloca.setConstant(false); + if (optLevel == 0) { + // Under non-optimized builds, just remove the constant flag. + alloca.setConstant(false); + continue; + } + + processConstAlloca(alloca); } + + alloca->moveBefore(insertPoint); } } void HoistAllocasPass::runOnOperation() { llvm::TimeTraceScope scope("Hoist Allocas"); llvm::SmallVector ops; - getOperation()->walk([&](cir::FuncOp op) { process(op); }); + + Operation *op = getOperation(); + auto mod = mlir::dyn_cast(op); + if (!mod) + mod = op->getParentOfType(); + + getOperation()->walk([&](cir::FuncOp op) { process(mod, op); }); } } // namespace diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3ac4de81422b..3b9f1def6db8 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1612,6 +1612,15 @@ getLLVMMemOrder(std::optional &memorder) { llvm_unreachable("unknown memory order"); } +static bool isLoadOrStoreInvariant(mlir::Value addr) { + if (auto addrAllocaOp = + mlir::dyn_cast_if_present(addr.getDefiningOp())) + return addrAllocaOp.getConstant(); + if (mlir::isa_and_present(addr.getDefiningOp())) + return true; + return false; +} + mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite( cir::LoadOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -1631,12 +1640,8 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite( auto invariant = false; // Under -O1 or higher optimization levels, add the invariant metadata if the // load operation loads from a constant object. - if (lowerMod && - lowerMod->getContext().getCodeGenOpts().OptimizationLevel > 0) { - auto addrAllocaOp = - mlir::dyn_cast_if_present(op.getAddr().getDefiningOp()); - invariant = addrAllocaOp && addrAllocaOp.getConstant(); - } + if (lowerMod && lowerMod->getContext().getCodeGenOpts().OptimizationLevel > 0) + invariant = isLoadOrStoreInvariant(op.getAddr()); // TODO: nontemporal, syncscope. auto newLoad = rewriter.create( @@ -1674,12 +1679,8 @@ mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite( auto invariant = false; // Under -O1 or higher optimization levels, add the invariant metadata if the // store operation stores to a constant object. - if (lowerMod && - lowerMod->getContext().getCodeGenOpts().OptimizationLevel > 0) { - auto addrAllocaOp = - mlir::dyn_cast_if_present(op.getAddr().getDefiningOp()); - invariant = addrAllocaOp && addrAllocaOp.getConstant(); - } + if (lowerMod && lowerMod->getContext().getCodeGenOpts().OptimizationLevel > 0) + invariant = isLoadOrStoreInvariant(op.getAddr()); // Convert adapted value to its memory type if needed. mlir::Value value = emitToMemory(rewriter, dataLayout, @@ -3666,6 +3667,20 @@ mlir::LogicalResult CIRToLLVMInlineAsmOpLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMInvariantGroupOpLowering::matchAndRewrite( + cir::InvariantGroupOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + if (!lowerMod || + lowerMod->getContext().getCodeGenOpts().OptimizationLevel == 0) { + rewriter.replaceOp(op, adaptor.getPtr()); + return mlir::success(); + } + + rewriter.replaceOpWithNewOp( + op, adaptor.getPtr()); + return mlir::success(); +} + mlir::LogicalResult CIRToLLVMPrefetchOpLowering::matchAndRewrite( cir::PrefetchOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -4107,7 +4122,8 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMBaseDataMemberOpLowering, CIRToLLVMCmpOpLowering, CIRToLLVMDerivedDataMemberOpLowering, - CIRToLLVMGetRuntimeMemberOpLowering + CIRToLLVMGetRuntimeMemberOpLowering, + CIRToLLVMInvariantGroupOpLowering // clang-format on >(converter, patterns.getContext(), lowerModule); patterns.add< diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index f5441c7d11ac..5aafd1a2ecab 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -975,6 +975,21 @@ class CIRToLLVMInlineAsmOpLowering mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMInvariantGroupOpLowering + : public mlir::OpConversionPattern { + cir::LowerModule *lowerMod; + +public: + CIRToLLVMInvariantGroupOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + cir::LowerModule *lowerModule) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) {} + + mlir::LogicalResult + matchAndRewrite(cir::InvariantGroupOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + class CIRToLLVMPrefetchOpLowering : public mlir::OpConversionPattern { public: diff --git a/clang/test/CIR/CodeGen/const-alloca.cpp b/clang/test/CIR/CodeGen/const-alloca.cpp index 7cc9a5b57517..cd64a91ecf5d 100644 --- a/clang/test/CIR/CodeGen/const-alloca.cpp +++ b/clang/test/CIR/CodeGen/const-alloca.cpp @@ -5,6 +5,7 @@ int produce_int(); void blackbox(const int &); +void consume(int); void local_const_int() { const int x = produce_int(); @@ -85,3 +86,87 @@ int local_const_optimize() { // LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#slot]]) // LLVM-NEXT: ret i32 %[[#init]] // LLVM-NEXT: } + +int local_scoped_const() { + { + const int x = produce_int(); + blackbox(x); + return x; + } +} + +// CIR-LABEL: @_Z18local_scoped_constv() +// CIR: cir.scope { +// CIR-NEXT: %[[#x_slot:]] = cir.alloca !s32i, !cir.ptr, ["x", init, const] +// CIR-NEXT: %[[#init:]] = cir.call @_Z11produce_intv() : () -> !s32i +// CIR-NEXT: cir.store %[[#init]], %[[#x_slot]] : !s32i, !cir.ptr +// CIR-NEXT: cir.call @_Z8blackboxRKi(%[[#x_slot]]) : (!cir.ptr) -> () +// CIR-NEXT: %[[#x_reload:]] = cir.load %[[#x_slot]] : !cir.ptr, !s32i +// CIR-NEXT: cir.store %[[#x_reload]], %[[#ret_slot:]] : !s32i, !cir.ptr +// CIR-NEXT: %[[#ret:]] = cir.load %[[#ret_slot]] : !cir.ptr, !s32i +// CIR-NEXT: cir.return %[[#ret]] : !s32i +// CIR-NEXT: } +// CIR: } + +// LLVM-LABEL: @_Z18local_scoped_constv() +// LLVM-NEXT: %[[#x_slot:]] = alloca i32, align 4 +// LLVM-NEXT: %[[#init:]] = tail call i32 @_Z11produce_intv() +// LLVM-NEXT: store i32 %[[#init]], ptr %[[#x_slot]], align 4, !tbaa !{{.+}}, !invariant.group !{{.+}} +// LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#x_slot]]) +// LLVM-NEXT: ret i32 %[[#init]] +// LLVM-NEXT: } + +void local_const_in_loop() { + for (int i = 0; i < 10; ++i) { + const int x = produce_int(); + blackbox(x); + consume(x); + } +} + +// CIR-LABEL: @_Z19local_const_in_loopv +// CIR: cir.scope { +// CIR: cir.for : cond { +// CIR: } body { +// CIR-NEXT: cir.scope { +// CIR-NEXT: %[[#x_slot:]] = cir.alloca !s32i, !cir.ptr, ["x", init, const] +// CIR-NEXT: %[[#init:]] = cir.call @_Z11produce_intv() : () -> !s32i +// CIR-NEXT: cir.store %[[#init]], %[[#x_slot]] : !s32i, !cir.ptr +// CIR-NEXT: cir.call @_Z8blackboxRKi(%[[#x_slot]]) : (!cir.ptr) -> () +// CIR-NEXT: %[[#x_reload:]] = cir.load %[[#x_slot]] : !cir.ptr, !s32i +// CIR-NEXT: cir.call @_Z7consumei(%[[#x_reload]]) : (!s32i) -> () +// CIR-NEXT: } +// CIR-NEXT: cir.yield +// CIR-NEXT: } step { +// CIR: } +// CIR-NEXT: } +// CIR-NEXT: cir.return +// CIR-NEXT: } + +// LLVM-LABEL: @_Z19local_const_in_loopv() +// LLVM: %[[#x_ptr:]] = call ptr @llvm.launder.invariant.group.p0(ptr nonnull %1) +// LLVM-NEXT: %[[#init:]] = call i32 @_Z11produce_intv() +// LLVM-NEXT: store i32 %[[#init]], ptr %[[#x_ptr]], align 4, !tbaa !{{.+}}, !invariant.group !{{.+}} +// LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#x_ptr]]) +// LLVM-NEXT: call void @_Z7consumei(i32 %[[#init]]) +// LLVM: } + +void local_const_in_while_condition() { + while (const int x = produce_int()) { + blackbox(x); + } +} + +// LLVM-LABEL: @_Z30local_const_in_while_conditionv() +// LLVM: %[[#x_slot:]] = alloca i32, align 4 +// LLVM-NEXT: %[[#init:]] = tail call i32 @_Z11produce_intv() +// LLVM-NEXT: store i32 %[[#init]], ptr %[[#x_slot]], align 4 +// LLVM-NEXT: %[[loop_cond:.+]] = icmp eq i32 %[[#init]], 0 +// LLVM-NEXT: br i1 %[[loop_cond]], label %{{.+}}, label %[[loop_body:.+]] +// LLVM: [[loop_body]]: +// LLVM-NEXT: call void @_Z8blackboxRKi(ptr nonnull %[[#x_slot]]) +// LLVM-NEXT: %[[#next:]] = call i32 @_Z11produce_intv() +// LLVM-NEXT: store i32 %[[#next]], ptr %[[#x_slot]], align 4 +// LLVM-NEXT: %[[cond:.+]] = icmp eq i32 %[[#next]], 0 +// LLVM-NEXT: br i1 %[[cond]], label %{{.+}}, label %[[loop_body]] +// LLVM: } From bf3135bd175f571d79dc485cffd72bb7a72168a1 Mon Sep 17 00:00:00 2001 From: Letu Ren Date: Tue, 18 Feb 2025 12:00:55 +0800 Subject: [PATCH 2261/2301] [CIR][NFC] Fix typo (#1366) --- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 3a7e2b9617c2..171ffda0b691 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -40,13 +40,13 @@ struct StructTypeStorage; /// There are three possible formats for this type: /// /// - Identified and complete structs: unique name and a known body. -/// - Identified and incomplete structs: unique name and unkonwn body. +/// - Identified and incomplete structs: unique name and unknown body. /// - Anonymous structs: no name and a known body. /// /// Identified structs are uniqued by their name, and anonymous structs are /// uniqued by their body. This means that two anonymous structs with the same /// body will be the same type, and two identified structs with the same name -/// will be the same type. Attempting to build a struct with a existing name, +/// will be the same type. Attempting to build a struct with an existing name, /// but a different body will result in an error. /// /// A few examples: @@ -57,7 +57,7 @@ struct StructTypeStorage; /// !anonymous = !cir.struct}> /// ``` /// -/// Incomplete structs are mutable, meaning the can be later completed with a +/// Incomplete structs are mutable, meaning they can be later completed with a /// body automatically updating in place every type in the code that uses the /// incomplete struct. Mutability allows for recursive types to be represented, /// meaning the struct can have members that refer to itself. This is useful for @@ -83,7 +83,7 @@ class StructType enum RecordKind : uint32_t { Class, Union, Struct }; - /// Create a identified and complete struct type. + /// Create an identified and complete struct type. static StructType get(mlir::MLIRContext *context, llvm::ArrayRef members, mlir::StringAttr name, bool packed, bool padded, @@ -94,7 +94,7 @@ class StructType mlir::StringAttr name, bool packed, bool padded, RecordKind kind, ASTRecordDeclInterface ast = {}); - /// Create a identified and incomplete struct type. + /// Create an identified and incomplete struct type. static StructType get(mlir::MLIRContext *context, mlir::StringAttr name, RecordKind kind); static StructType @@ -102,7 +102,7 @@ class StructType mlir::MLIRContext *context, mlir::StringAttr name, RecordKind kind); - /// Create a anonymous struct type (always complete). + /// Create an anonymous struct type (always complete). static StructType get(mlir::MLIRContext *context, llvm::ArrayRef members, bool packed, bool padded, RecordKind kind, From 13a8b5d361794faa1d4aecbe8924b417afc21caf Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Wed, 19 Feb 2025 20:18:57 +0100 Subject: [PATCH 2262/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vldap1_lane_s64 and vldap1q_lane_s64 (#1346) Lower `neon_vldap1_lane_s64` and `vldap1q_lane_s64` To add atomic `MemOrder` I changed the return type of builder to return LoadOp similar to our builders for StoreOp. --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 6 +- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 7 +- clang/test/CIR/CodeGen/AArch64/neon-ldst.c | 137 ++++++++++++++++++ 3 files changed, 146 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index e678a13ede7e..5a06ddbd0b27 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -871,7 +871,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { /*mem_order=*/cir::MemOrderAttr{}, /*tbaa=*/cir::TBAAAttr{}); } - mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, + cir::LoadOp createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value ptr, llvm::MaybeAlign align, bool isVolatile) { if (ty != mlir::cast(ptr.getType()).getPointee()) @@ -880,14 +880,14 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return CIRBaseBuilderTy::createLoad(loc, ptr, isVolatile, alignment); } - mlir::Value createAlignedLoad(mlir::Location loc, mlir::Type ty, + cir::LoadOp createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value ptr, llvm::MaybeAlign align) { // TODO: make sure callsites shouldn't be really passing volatile. assert(!cir::MissingFeatures::volatileLoadOrStore()); return createAlignedLoad(loc, ty, ptr, align, /*isVolatile=*/false); } - mlir::Value + cir::LoadOp createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value addr, clang::CharUnits align = clang::CharUnits::One()) { return createAlignedLoad(loc, ty, addr, align.getAsAlign()); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index b7835b419f62..631c09355115 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -4453,7 +4453,12 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vldap1_lane_s64: case NEON::BI__builtin_neon_vldap1q_lane_s64: { - llvm_unreachable("NEON::BI__builtin_neon_vldap1q_lane_s64 NYI"); + cir::LoadOp Load = builder.createAlignedLoad( + Ops[0].getLoc(), vTy.getEltType(), Ops[0], PtrOp0.getAlignment()); + Load.setAtomic(cir::MemOrder::Acquire); + return builder.create(getLoc(E->getExprLoc()), + builder.createBitcast(Ops[1], vTy), + Load, Ops[2]); } case NEON::BI__builtin_neon_vld1_dup_v: case NEON::BI__builtin_neon_vld1q_dup_v: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c index e35260757351..ca0de045bcb6 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-ldst.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-ldst.c @@ -629,3 +629,140 @@ void test_vstl1_lane_p64(poly64_t *a, poly64x1_t b) { // LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> // LLVM: [[TMP2:%.*]] = extractelement <1 x i64> [[TMP1]], i32 0 // LLVM: store atomic i64 [[TMP2]], ptr [[PTR]] release, align 8 + +uint64x2_t test_vldap1q_lane_u64(uint64_t *a, uint64x2_t b) { + return vldap1q_lane_u64(a, b, 1); +} + +// CIR-LABEL:test_vldap1q_lane_u64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr, !u64 +// CIR: [[VEC:%.*]] = cir.cast(bitcast, {{.*}} : !cir.vector), !cir.vector +// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vldap1q_lane_u64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8> +// LLVM: [[TMP2:%.*]] = load atomic i64, ptr [[PTR]] acquire, align 8 +// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// LLVM: [[VLDAP1_LANE:%.*]] = insertelement <2 x i64> [[TMP1]], i64 [[TMP2]], i32 1 + +int64x2_t test_vldap1q_lane_s64(int64_t *a, int64x2_t b) { + return vldap1q_lane_s64(a, b, 1); +} + +// CIR-LABEL:test_vldap1q_lane_s64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr, !s64 +// CIR: [[VEC:%.*]] = cir.cast(bitcast, {{.*}} : !cir.vector), !cir.vector +// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vldap1q_lane_s64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8> +// LLVM: [[TMP2:%.*]] = load atomic i64, ptr [[PTR]] acquire, align 8 +// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// LLVM: [[VLDAP1_LANE:%.*]] = insertelement <2 x i64> [[TMP1]], i64 [[TMP2]], i32 1 + +float64x2_t test_vldap1q_lane_f64(float64_t *a, float64x2_t b) { + return vldap1q_lane_f64(a, b, 1); +} + +// CIR-LABEL:test_vldap1q_lane_f64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr, !cir.double +// CIR: [[VEC:%.*]] = cir.cast(bitcast, {{.*}} : !cir.vector), !cir.vector +// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vldap1q_lane_f64(ptr{{.*}}[[PTR:%.*]], <2 x double>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[SRC]] to <16 x i8> +// LLVM: [[TMP2:%.*]] = load atomic double, ptr [[PTR]] acquire, align 8 +// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double> +// LLVM: [[VLDAP1_LANE:%.*]] = insertelement <2 x double> [[TMP1]], double [[TMP2]], i32 1 + +poly64x2_t test_vldap1q_lane_p64(poly64_t *a, poly64x2_t b) { + return vldap1q_lane_p64(a, b, 1); +} + +// CIR-LABEL:test_vldap1q_lane_p64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr, !s64 +// CIR: [[VEC:%.*]] = cir.cast(bitcast, {{.*}} : !cir.vector), !cir.vector +// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vldap1q_lane_p64(ptr{{.*}}[[PTR:%.*]], <2 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[SRC]] to <16 x i8> +// LLVM: [[TMP2:%.*]] = load atomic i64, ptr [[PTR]] acquire, align 8 +// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> +// LLVM: [[VLDAP1_LANE:%.*]] = insertelement <2 x i64> [[TMP1]], i64 [[TMP2]], i32 1 + +uint64x1_t test_vldap1_lane_u64(uint64_t *a, uint64x1_t b) { + return vldap1_lane_u64(a, b, 0); +} + +// CIR-LABEL:test_vldap1_lane_u64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr, !u64 +// CIR: [[VEC:%.*]] = cir.cast(bitcast, {{.*}} : !cir.vector), !cir.vector +// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vldap1_lane_u64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8> +// LLVM: [[TMP2:%.*]] = load atomic i64, ptr [[PTR]] acquire, align 8 +// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// LLVM: [[VLDAP1_LANE:%.*]] = insertelement <1 x i64> [[TMP1]], i64 [[TMP2]], i32 0 + +int64x1_t test_vldap1_lane_s64(int64_t *a, int64x1_t b) { + return vldap1_lane_s64(a, b, 0); +} + +// CIR-LABEL:test_vldap1_lane_s64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr, !s64 +// CIR: [[VEC:%.*]] = cir.cast(bitcast, {{.*}} : !cir.vector), !cir.vector +// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vldap1_lane_s64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8> +// LLVM: [[TMP2:%.*]] = load atomic i64, ptr [[PTR]] acquire, align 8 +// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// LLVM: [[VLDAP1_LANE:%.*]] = insertelement <1 x i64> [[TMP1]], i64 [[TMP2]], i32 0 + + +float64x1_t test_vldap1_lane_f64(float64_t *a, float64x1_t b) { + return vldap1_lane_f64(a, b, 0); +} + +// CIR-LABEL: test_vldap1_lane_f64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr, !cir.double +// CIR: [[VEC:%.*]] = cir.cast(bitcast, {{.*}} : !cir.vector), !cir.vector +// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vldap1_lane_f64(ptr{{.*}}[[PTR:%.*]], <1 x double>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <1 x double> [[SRC]] to <8 x i8> +// LLVM: [[TMP2:%.*]] = load atomic double, ptr [[PTR]] acquire, align 8 +// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double> +// LLVM: [[VLDAP1_LANE:%.*]] = insertelement <1 x double> [[TMP1]], double [[TMP2]], i32 0 + +poly64x1_t test_vldap1_lane_p64(poly64_t *a, poly64x1_t b) { + return vldap1_lane_p64(a, b, 0); +} + +// CIR-LABEL: test_vldap1_lane_p64 +// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i +// CIR: [[TMP0:%.*]] = cir.cast(bitcast, {{.*}} : !cir.ptr), !cir.ptr +// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr, !s64 +// CIR: [[VEC:%.*]] = cir.cast(bitcast, {{.*}} : !cir.vector), !cir.vector +// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector + +// LLVM: {{.*}}test_vldap1_lane_p64(ptr{{.*}}[[PTR:%.*]], <1 x i64>{{.*}}[[SRC:%.*]]) +// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[SRC]] to <8 x i8> +// LLVM: [[TMP2:%.*]] = load atomic i64, ptr [[PTR]] acquire, align 8 +// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// LLVM: [[VLDAP1_LANE:%.*]] = insertelement <1 x i64> [[TMP1]], i64 [[TMP2]], i32 0 From a0fe0fb28bc28a7946e65f5fb01450f11ac2c54e Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Wed, 19 Feb 2025 20:21:11 +0100 Subject: [PATCH 2263/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vaddlv s16 and u16 (#1352) Lower `neon_vaddlv_s16` and `neon_vaddlv_u16` --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 13 +++++++---- clang/test/CIR/CodeGen/AArch64/neon-arith.c | 23 +++++++++++++++++++ 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 631c09355115..9acc4fcbd35e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -4375,9 +4375,6 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vaddlv_u8: { llvm_unreachable("NEON::BI__builtin_neon_vaddlv_u8 NYI"); } - case NEON::BI__builtin_neon_vaddlv_u16: { - llvm_unreachable("NEON::BI__builtin_neon_vaddlv_u16 NYI"); - } case NEON::BI__builtin_neon_vaddlvq_u8: { llvm_unreachable("NEON::BI__builtin_neon_vaddlvq_u8 NYI"); } @@ -4395,8 +4392,16 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vaddlv_s8: { llvm_unreachable("NEON::BI__builtin_neon_vaddlv_s8 NYI"); } + case NEON::BI__builtin_neon_vaddlv_u16: + usgn = true; + [[fallthrough]]; case NEON::BI__builtin_neon_vaddlv_s16: { - llvm_unreachable("NEON::BI__builtin_neon_vaddlv_s16 NYI"); + mlir::Type argTy = cir::VectorType::get(builder.getContext(), + usgn ? UInt16Ty : SInt16Ty, 4); + llvm::SmallVector argOps = {emitScalarExpr(E->getArg(0))}; + return emitNeonCall(builder, {argTy}, argOps, + usgn ? "aarch64.neon.uaddlv" : "aarch64.neon.saddlv", + usgn ? UInt32Ty : SInt32Ty, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vaddlvq_s8: { llvm_unreachable("NEON::BI__builtin_neon_vaddlvq_s8 NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index 92a97831d52a..9426bd004f4d 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -905,6 +905,29 @@ int32_t test_vaddlvq_s16(int16x8_t a) { // LLVM: ret i32 [[VADDLV_I]] } + +int32_t test_vaddlv_s16(int16x4_t a) { + return vaddlv_s16(a); + + // CIR-LABEL: vaddlv_s16 + // CIR: cir.llvm.intrinsic "aarch64.neon.saddlv" {{%.*}}: (!cir.vector) -> !s32i + + // LLVM: {{.*}}test_vaddlv_s16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> [[A]]) + // LLVM: ret i32 [[VADDLV_I]] +} + +int32_t test_vaddlv_u16(uint16x4_t a) { + return vaddlv_u16(a); + + // CIR-LABEL: vaddlv_u16 + // CIR: cir.llvm.intrinsic "aarch64.neon.uaddlv" {{%.*}}: (!cir.vector) -> !u32i + + // LLVM: {{.*}}test_vaddlv_u16(<4 x i16>{{.*}}[[A:%.*]]) + // LLVM: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> [[A]]) + // LLVM: ret i32 [[VADDLV_I]] +} + uint16_t test_vaddv_u16(uint16x4_t a) { return vaddv_u16(a); From 23b5d051b41b70a5889d2e3f7f87ab8a919baf75 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Wed, 19 Feb 2025 20:22:35 +0100 Subject: [PATCH 2264/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vsrad_n_s64 (#1355) Lower `neon_vsrad_n_s64` --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 7 +++- clang/test/CIR/CodeGen/AArch64/neon.c | 33 +++++++++++++++---- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 9acc4fcbd35e..d8b2008ed531 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3860,7 +3860,12 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NEON::BI__builtin_neon_vshrd_n_u64 NYI"); } case NEON::BI__builtin_neon_vsrad_n_s64: { - llvm_unreachable("NEON::BI__builtin_neon_vsrad_n_s64 NYI"); + std::optional amt = + E->getArg(2)->getIntegerConstantExpr(getContext()); + uint64_t shiftAmt = + std::min(static_cast(63), amt->getZExtValue()); + return builder.createAdd(Ops[0], + builder.createShift(Ops[1], shiftAmt, false)); } case NEON::BI__builtin_neon_vsrad_n_u64: { llvm_unreachable("NEON::BI__builtin_neon_vsrad_n_u64 NYI"); diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 35c411cbeab4..ac8aead31bec 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -15219,13 +15219,32 @@ int64_t test_vshrd_n_s64(int64_t a) { // return vrshr_n_u64(a, 1); // } -// NYI-LABEL: @test_vsrad_n_s64( -// NYI: [[SHRD_N:%.*]] = ashr i64 %b, 63 -// NYI: [[TMP0:%.*]] = add i64 %a, [[SHRD_N]] -// NYI: ret i64 [[TMP0]] -// int64_t test_vsrad_n_s64(int64_t a, int64_t b) { -// return (int64_t)vsrad_n_s64(a, b, 63); -// } + +int64_t test_vsrad_n_s64(int64_t a, int64_t b) { + return (int64_t)vsrad_n_s64(a, b, 63); + + // CIR-LABEL: vsrad_n_s64 + // CIR: [[ASHR:%.*]] = cir.shift(right, {{%.*}} : !s64i, {{%.*}} : !s64i) -> !s64i + // CIR: {{.*}} = cir.binop(add, {{.*}}, [[ASHR]]) : !s64i + + // LLVM-LABEL: test_vsrad_n_s64( + // LLVM: [[SHRD_N:%.*]] = ashr i64 %1, 63 + // LLVM: [[TMP0:%.*]] = add i64 %0, [[SHRD_N]] + // LLVM: ret i64 [[TMP0]] +} + +int64_t test_vsrad_n_s64_2(int64_t a, int64_t b) { + return (int64_t)vsrad_n_s64(a, b, 64); + + // CIR-LABEL: vsrad_n_s64 + // CIR: [[ASHR:%.*]] = cir.shift(right, {{%.*}} : !s64i, {{%.*}} : !s64i) -> !s64i + // CIR: {{.*}} = cir.binop(add, {{.*}}, [[ASHR]]) : !s64i + + // LLVM-LABEL: test_vsrad_n_s64_2( + // LLVM: [[SHRD_N:%.*]] = ashr i64 %1, 63 + // LLVM: [[TMP0:%.*]] = add i64 %0, [[SHRD_N]] + // LLVM: ret i64 [[TMP0]] +} int64x1_t test_vsra_n_s64(int64x1_t a, int64x1_t b) { return vsra_n_s64(a, b, 1); From 19e50765d100ebfece997ca83d527257968d54ac Mon Sep 17 00:00:00 2001 From: Letu Ren Date: Thu, 20 Feb 2025 03:24:23 +0800 Subject: [PATCH 2265/2301] [CIR][CIRGen] Atomics: handle atomic_compare_exchange_weak (#1359) Traditional Clang implementation: https://github.com/llvm/clangir/blob/a0091e38f1027e35d17819e02ee1ae257a12d296/clang/lib/CodeGen/CGAtomic.cpp#L545-L550 --- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 3 ++- clang/test/CIR/CodeGen/atomic.cpp | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index 49fe86eb17b4..ddd6c78d20b3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -565,7 +565,8 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__c11_atomic_compare_exchange_weak: case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: case AtomicExpr::AO__hip_atomic_compare_exchange_weak: - llvm_unreachable("NYI"); + emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Order, Scope); return; case AtomicExpr::AO__atomic_compare_exchange: case AtomicExpr::AO__atomic_compare_exchange_n: diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index b737376833f3..fb1f792a4027 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -308,6 +308,21 @@ bool fi4c(atomic_int *i) { // LLVM-LABEL: @_Z4fi4cPU7_Atomici // LLVM: cmpxchg ptr {{.*}}, i32 {{.*}}, i32 {{.*}} seq_cst seq_cst, align 4 +bool fi4d(atomic_int *i) { + int cmp = 0; + return atomic_compare_exchange_weak(i, &cmp, 1); +} + +// CHECK-LABEL: @_Z4fi4dPU7_Atomici +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = seq_cst, failure = seq_cst) align(4) weak : (!s32i, !cir.bool) +// CHECK: %[[CMP:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool +// CHECK: cir.if %[[CMP:.*]] { +// CHECK: cir.store %old, {{.*}} : !s32i, !cir.ptr +// CHECK: } + +// LLVM-LABEL: @_Z4fi4dPU7_Atomici +// LLVM: cmpxchg weak ptr {{.*}}, i32 {{.*}}, i32 {{.*}} seq_cst seq_cst, align 4 + bool fsb(bool *c) { return __atomic_exchange_n(c, 1, memory_order_seq_cst); } From a99b65fd340fb42a9b8c033359198a326d20eee3 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Wed, 19 Feb 2025 20:25:23 +0100 Subject: [PATCH 2266/2301] [CIR][CIRGen][Builtin][Neon] Lower vget_lane_bf16, vduph_lane f16 and bf16 (#1360) Lower vget_lane_bf16, vduph_lane f16 and bf16 --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 3 +- .../CodeGen/AArch64/bf16-getset-intrinsics.c | 181 +++++++ .../AArch64/v8.2a-neon-intrinsics-generic.c | 495 ++++++++++++++++++ 3 files changed, 678 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/AArch64/bf16-getset-intrinsics.c create mode 100644 clang/test/CIR/CodeGen/AArch64/v8.2a-neon-intrinsics-generic.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index d8b2008ed531..2bd91898ca56 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3889,7 +3889,8 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vget_lane_bf16: case NEON::BI__builtin_neon_vduph_lane_bf16: case NEON::BI__builtin_neon_vduph_lane_f16: { - llvm_unreachable("NEON::BI__builtin_neon_vduph_lane_f16 NYI"); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + emitScalarExpr(E->getArg(1))); } case NEON::BI__builtin_neon_vgetq_lane_bf16: case NEON::BI__builtin_neon_vduph_laneq_bf16: diff --git a/clang/test/CIR/CodeGen/AArch64/bf16-getset-intrinsics.c b/clang/test/CIR/CodeGen/AArch64/bf16-getset-intrinsics.c new file mode 100644 index 000000000000..a8f643e82c5f --- /dev/null +++ b/clang/test/CIR/CodeGen/AArch64/bf16-getset-intrinsics.c @@ -0,0 +1,181 @@ +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +bf16 \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +bf16 \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-llvm -fno-clangir-call-conv-lowering -o - %s \ +// RUN: | opt -S -passes=mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// REQUIRES: aarch64-registered-target || arm-registered-target + +// This test mimics clang/test/CodeGen/AArch64/bf16-getset-intrinsics.c, which eventually +// CIR shall be able to support fully. Since this is going to take some time to converge, +// the unsupported/NYI code is commented out, so that we can incrementally improve this. +// The NYI filecheck used contains the LLVM output from OG codegen that should guide the +// correct result when implementing this into the CIR pipeline. + +#include + +// CHECK-LABEL: @test_vcreate_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast i64 [[A:%.*]] to <4 x bfloat> +// CHECK-NEXT: ret <4 x bfloat> [[TMP0]] +// +// bfloat16x4_t test_vcreate_bf16(uint64_t a) { +// return vcreate_bf16(a); +// } + +// CHECK-LABEL: @test_vdup_n_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <4 x bfloat> poison, bfloat [[V:%.*]], i32 0 +// CHECK-NEXT: [[VECINIT1_I:%.*]] = insertelement <4 x bfloat> [[VECINIT_I]], bfloat [[V]], i32 1 +// CHECK-NEXT: [[VECINIT2_I:%.*]] = insertelement <4 x bfloat> [[VECINIT1_I]], bfloat [[V]], i32 2 +// CHECK-NEXT: [[VECINIT3_I:%.*]] = insertelement <4 x bfloat> [[VECINIT2_I]], bfloat [[V]], i32 3 +// CHECK-NEXT: ret <4 x bfloat> [[VECINIT3_I]] +// +// bfloat16x4_t test_vdup_n_bf16(bfloat16_t v) { +// return vdup_n_bf16(v); +// } + +// CHECK-LABEL: @test_vdupq_n_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <8 x bfloat> poison, bfloat [[V:%.*]], i32 0 +// CHECK-NEXT: [[VECINIT1_I:%.*]] = insertelement <8 x bfloat> [[VECINIT_I]], bfloat [[V]], i32 1 +// CHECK-NEXT: [[VECINIT2_I:%.*]] = insertelement <8 x bfloat> [[VECINIT1_I]], bfloat [[V]], i32 2 +// CHECK-NEXT: [[VECINIT3_I:%.*]] = insertelement <8 x bfloat> [[VECINIT2_I]], bfloat [[V]], i32 3 +// CHECK-NEXT: [[VECINIT4_I:%.*]] = insertelement <8 x bfloat> [[VECINIT3_I]], bfloat [[V]], i32 4 +// CHECK-NEXT: [[VECINIT5_I:%.*]] = insertelement <8 x bfloat> [[VECINIT4_I]], bfloat [[V]], i32 5 +// CHECK-NEXT: [[VECINIT6_I:%.*]] = insertelement <8 x bfloat> [[VECINIT5_I]], bfloat [[V]], i32 6 +// CHECK-NEXT: [[VECINIT7_I:%.*]] = insertelement <8 x bfloat> [[VECINIT6_I]], bfloat [[V]], i32 7 +// CHECK-NEXT: ret <8 x bfloat> [[VECINIT7_I]] +// +// bfloat16x8_t test_vdupq_n_bf16(bfloat16_t v) { +// return vdupq_n_bf16(v); +// } + +// CHECK-LABEL: @test_vdup_lane_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[V:%.*]] to <8 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x bfloat> +// CHECK-NEXT: [[LANE:%.*]] = shufflevector <4 x bfloat> [[TMP1]], <4 x bfloat> [[TMP1]], <4 x i32> +// CHECK-NEXT: ret <4 x bfloat> [[LANE]] +// +// bfloat16x4_t test_vdup_lane_bf16(bfloat16x4_t v) { +// return vdup_lane_bf16(v, 1); +// } + +// CHECK-LABEL: @test_vdupq_lane_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[V:%.*]] to <8 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x bfloat> +// CHECK-NEXT: [[LANE:%.*]] = shufflevector <4 x bfloat> [[TMP1]], <4 x bfloat> [[TMP1]], <8 x i32> +// CHECK-NEXT: ret <8 x bfloat> [[LANE]] +// +// bfloat16x8_t test_vdupq_lane_bf16(bfloat16x4_t v) { +// return vdupq_lane_bf16(v, 1); +// } + +// CHECK-LABEL: @test_vdup_laneq_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[V:%.*]] to <16 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x bfloat> +// CHECK-NEXT: [[LANE:%.*]] = shufflevector <8 x bfloat> [[TMP1]], <8 x bfloat> [[TMP1]], <4 x i32> +// CHECK-NEXT: ret <4 x bfloat> [[LANE]] +// +// bfloat16x4_t test_vdup_laneq_bf16(bfloat16x8_t v) { +// return vdup_laneq_bf16(v, 7); +// } + +// CHECK-LABEL: @test_vdupq_laneq_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[V:%.*]] to <16 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x bfloat> +// CHECK-NEXT: [[LANE:%.*]] = shufflevector <8 x bfloat> [[TMP1]], <8 x bfloat> [[TMP1]], <8 x i32> +// CHECK-NEXT: ret <8 x bfloat> [[LANE]] +// +// bfloat16x8_t test_vdupq_laneq_bf16(bfloat16x8_t v) { +// return vdupq_laneq_bf16(v, 7); +// } + +// CHECK-LABEL: @test_vcombine_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x bfloat> [[LOW:%.*]], <4 x bfloat> [[HIGH:%.*]], <8 x i32> +// CHECK-NEXT: ret <8 x bfloat> [[SHUFFLE_I]] +// +// bfloat16x8_t test_vcombine_bf16(bfloat16x4_t low, bfloat16x4_t high) { +// return vcombine_bf16(low, high); +// } + +// CHECK-LABEL: @test_vget_high_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[A:%.*]], <8 x bfloat> [[A]], <4 x i32> +// CHECK-NEXT: ret <4 x bfloat> [[SHUFFLE_I]] +// +// bfloat16x4_t test_vget_high_bf16(bfloat16x8_t a) { +// return vget_high_bf16(a); +// } + +// CHECK-LABEL: @test_vget_low_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[A:%.*]], <8 x bfloat> [[A]], <4 x i32> +// CHECK-NEXT: ret <4 x bfloat> [[SHUFFLE_I]] +// +// bfloat16x4_t test_vget_low_bf16(bfloat16x8_t a) { +// return vget_low_bf16(a); +// } + +bfloat16_t test_vget_lane_bf16(bfloat16x4_t v) { + return vget_lane_bf16(v, 1); + + // CIR-LABEL: vget_lane_bf16 + // CIR: [[TMP0:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[TMP1:%.*]] = cir.vec.extract {{.*}}[{{.*}} : !s32i] : !cir.vector + + // LLVM-LABEL: test_vget_lane_bf16 + // LLVM-SAME: (<4 x bfloat> [[VEC:%.*]]) + // LLVM: [[VGET_LANE:%.*]] = extractelement <4 x bfloat> [[VEC]], i32 1 + // LLVM: ret bfloat [[VGET_LANE]] +} + +// CHECK-LABEL: @test_vgetq_lane_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x bfloat> [[V:%.*]], i32 7 +// CHECK-NEXT: ret bfloat [[VGETQ_LANE]] +// +// bfloat16_t test_vgetq_lane_bf16(bfloat16x8_t v) { +// return vgetq_lane_bf16(v, 7); +// } + +// CHECK-LABEL: @test_vset_lane_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VSET_LANE:%.*]] = insertelement <4 x bfloat> [[V:%.*]], bfloat [[A:%.*]], i32 1 +// CHECK-NEXT: ret <4 x bfloat> [[VSET_LANE]] +// +// bfloat16x4_t test_vset_lane_bf16(bfloat16_t a, bfloat16x4_t v) { +// return vset_lane_bf16(a, v, 1); +// } + +// CHECK-LABEL: @test_vsetq_lane_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VSET_LANE:%.*]] = insertelement <8 x bfloat> [[V:%.*]], bfloat [[A:%.*]], i32 7 +// CHECK-NEXT: ret <8 x bfloat> [[VSET_LANE]] +// +// bfloat16x8_t test_vsetq_lane_bf16(bfloat16_t a, bfloat16x8_t v) { +// return vsetq_lane_bf16(a, v, 7); +// } + +bfloat16_t test_vduph_lane_bf16(bfloat16x4_t v) { + return vduph_lane_bf16(v, 1); + + // CIR-LABEL: vduph_lane_bf16 + // CIR: [[TMP0:%.*]] = cir.const #cir.int<1> : !s32i + // CIR: [[TMP1:%.*]] = cir.vec.extract {{.*}}[{{.*}} : !s32i] : !cir.vector + + // LLVM-LABEL: test_vduph_lane_bf16 + // LLVM-SAME: (<4 x bfloat> [[VEC:%.*]]) + // LLVM: [[VGET_LANE:%.*]] = extractelement <4 x bfloat> [[VEC]], i32 1 + // LLVM: ret bfloat [[VGET_LANE]] +} diff --git a/clang/test/CIR/CodeGen/AArch64/v8.2a-neon-intrinsics-generic.c b/clang/test/CIR/CodeGen/AArch64/v8.2a-neon-intrinsics-generic.c new file mode 100644 index 000000000000..2de4862258a1 --- /dev/null +++ b/clang/test/CIR/CodeGen/AArch64/v8.2a-neon-intrinsics-generic.c @@ -0,0 +1,495 @@ +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 -target-feature +v8.2a \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 -target-feature +v8.2a \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-llvm -fno-clangir-call-conv-lowering -o - %s \ +// RUN: | opt -S -passes=mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// REQUIRES: aarch64-registered-target || arm-registered-target + +// This test mimics clang/test/CodeGen/AArch64/v8.2a-neon-intrinsics.c, which eventually +// CIR shall be able to support fully. Since this is going to take some time to converge, +// the unsupported/NYI code is commented out, so that we can incrementally improve this. +// The NYI filecheck used contains the LLVM output from OG codegen that should guide the +// correct result when implementing this into the CIR pipeline. + +#include + +// CHECK-LABEL: define {{[^@]+}}@test_vbsl_f16 +// CHECK-SAME: (<4 x i16> noundef [[A:%.*]], <4 x half> noundef [[B:%.*]], <4 x half> noundef [[C:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B]] to <8 x i8> +// CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x half> [[C]] to <8 x i8> +// CHECK-NEXT: [[VBSL1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> +// CHECK-NEXT: [[VBSL2_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> +// CHECK-NEXT: [[VBSL3_I:%.*]] = and <4 x i16> [[A]], [[VBSL1_I]] +// CHECK-NEXT: [[TMP3:%.*]] = xor <4 x i16> [[A]], splat (i16 -1) +// CHECK-NEXT: [[VBSL4_I:%.*]] = and <4 x i16> [[TMP3]], [[VBSL2_I]] +// CHECK-NEXT: [[VBSL5_I:%.*]] = or <4 x i16> [[VBSL3_I]], [[VBSL4_I]] +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[VBSL5_I]] to <4 x half> +// CHECK-NEXT: ret <4 x half> [[TMP4]] +// +// float16x4_t test_vbsl_f16(uint16x4_t a, float16x4_t b, float16x4_t c) { +// return vbsl_f16(a, b, c); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vbslq_f16 +// CHECK-SAME: (<8 x i16> noundef [[A:%.*]], <8 x half> noundef [[B:%.*]], <8 x half> noundef [[C:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B]] to <16 x i8> +// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x half> [[C]] to <16 x i8> +// CHECK-NEXT: [[VBSL1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> +// CHECK-NEXT: [[VBSL2_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> +// CHECK-NEXT: [[VBSL3_I:%.*]] = and <8 x i16> [[A]], [[VBSL1_I]] +// CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i16> [[A]], splat (i16 -1) +// CHECK-NEXT: [[VBSL4_I:%.*]] = and <8 x i16> [[TMP3]], [[VBSL2_I]] +// CHECK-NEXT: [[VBSL5_I:%.*]] = or <8 x i16> [[VBSL3_I]], [[VBSL4_I]] +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[VBSL5_I]] to <8 x half> +// CHECK-NEXT: ret <8 x half> [[TMP4]] +// +// float16x8_t test_vbslq_f16(uint16x8_t a, float16x8_t b, float16x8_t c) { +// return vbslq_f16(a, b, c); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vzip_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[RETVAL_I:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T:%.*]], align 8 +// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[A]] to <8 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B]] to <8 x i8> +// CHECK-NEXT: [[VZIP_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: store <4 x half> [[VZIP_I]], ptr [[RETVAL_I]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds <4 x half>, ptr [[RETVAL_I]], i32 1 +// CHECK-NEXT: [[VZIP1_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: store <4 x half> [[VZIP1_I]], ptr [[TMP2]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load [[STRUCT_FLOAT16X4X2_T]], ptr [[RETVAL_I]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOAT16X4X2_T]], ptr [[RETVAL]], i32 0, i32 0 +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue [[STRUCT_FLOAT16X4X2_T]] [[TMP3]], 0 +// CHECK-NEXT: store [2 x <4 x half>] [[TMP5]], ptr [[TMP4]], align 8 +// CHECK-NEXT: [[TMP6:%.*]] = load [[STRUCT_FLOAT16X4X2_T]], ptr [[RETVAL]], align 8 +// CHECK-NEXT: ret [[STRUCT_FLOAT16X4X2_T]] [[TMP6]] +// +// float16x4x2_t test_vzip_f16(float16x4_t a, float16x4_t b) { +// return vzip_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vzipq_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[RETVAL_I:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T:%.*]], align 16 +// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[A]] to <16 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B]] to <16 x i8> +// CHECK-NEXT: [[VZIP_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: store <8 x half> [[VZIP_I]], ptr [[RETVAL_I]], align 16 +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds <8 x half>, ptr [[RETVAL_I]], i32 1 +// CHECK-NEXT: [[VZIP1_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: store <8 x half> [[VZIP1_I]], ptr [[TMP2]], align 16 +// CHECK-NEXT: [[TMP3:%.*]] = load [[STRUCT_FLOAT16X8X2_T]], ptr [[RETVAL_I]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOAT16X8X2_T]], ptr [[RETVAL]], i32 0, i32 0 +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue [[STRUCT_FLOAT16X8X2_T]] [[TMP3]], 0 +// CHECK-NEXT: store [2 x <8 x half>] [[TMP5]], ptr [[TMP4]], align 16 +// CHECK-NEXT: [[TMP6:%.*]] = load [[STRUCT_FLOAT16X8X2_T]], ptr [[RETVAL]], align 16 +// CHECK-NEXT: ret [[STRUCT_FLOAT16X8X2_T]] [[TMP6]] +// +// float16x8x2_t test_vzipq_f16(float16x8_t a, float16x8_t b) { +// return vzipq_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vuzp_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[RETVAL_I:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T:%.*]], align 8 +// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[A]] to <8 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B]] to <8 x i8> +// CHECK-NEXT: [[VUZP_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: store <4 x half> [[VUZP_I]], ptr [[RETVAL_I]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds <4 x half>, ptr [[RETVAL_I]], i32 1 +// CHECK-NEXT: [[VUZP1_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: store <4 x half> [[VUZP1_I]], ptr [[TMP2]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load [[STRUCT_FLOAT16X4X2_T]], ptr [[RETVAL_I]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOAT16X4X2_T]], ptr [[RETVAL]], i32 0, i32 0 +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue [[STRUCT_FLOAT16X4X2_T]] [[TMP3]], 0 +// CHECK-NEXT: store [2 x <4 x half>] [[TMP5]], ptr [[TMP4]], align 8 +// CHECK-NEXT: [[TMP6:%.*]] = load [[STRUCT_FLOAT16X4X2_T]], ptr [[RETVAL]], align 8 +// CHECK-NEXT: ret [[STRUCT_FLOAT16X4X2_T]] [[TMP6]] +// +// float16x4x2_t test_vuzp_f16(float16x4_t a, float16x4_t b) { +// return vuzp_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vuzpq_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[RETVAL_I:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T:%.*]], align 16 +// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[A]] to <16 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B]] to <16 x i8> +// CHECK-NEXT: [[VUZP_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: store <8 x half> [[VUZP_I]], ptr [[RETVAL_I]], align 16 +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds <8 x half>, ptr [[RETVAL_I]], i32 1 +// CHECK-NEXT: [[VUZP1_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: store <8 x half> [[VUZP1_I]], ptr [[TMP2]], align 16 +// CHECK-NEXT: [[TMP3:%.*]] = load [[STRUCT_FLOAT16X8X2_T]], ptr [[RETVAL_I]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOAT16X8X2_T]], ptr [[RETVAL]], i32 0, i32 0 +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue [[STRUCT_FLOAT16X8X2_T]] [[TMP3]], 0 +// CHECK-NEXT: store [2 x <8 x half>] [[TMP5]], ptr [[TMP4]], align 16 +// CHECK-NEXT: [[TMP6:%.*]] = load [[STRUCT_FLOAT16X8X2_T]], ptr [[RETVAL]], align 16 +// CHECK-NEXT: ret [[STRUCT_FLOAT16X8X2_T]] [[TMP6]] +// +// float16x8x2_t test_vuzpq_f16(float16x8_t a, float16x8_t b) { +// return vuzpq_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vtrn_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[RETVAL_I:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T:%.*]], align 8 +// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[A]] to <8 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B]] to <8 x i8> +// CHECK-NEXT: [[VTRN_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: store <4 x half> [[VTRN_I]], ptr [[RETVAL_I]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds <4 x half>, ptr [[RETVAL_I]], i32 1 +// CHECK-NEXT: [[VTRN1_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: store <4 x half> [[VTRN1_I]], ptr [[TMP2]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load [[STRUCT_FLOAT16X4X2_T]], ptr [[RETVAL_I]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOAT16X4X2_T]], ptr [[RETVAL]], i32 0, i32 0 +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue [[STRUCT_FLOAT16X4X2_T]] [[TMP3]], 0 +// CHECK-NEXT: store [2 x <4 x half>] [[TMP5]], ptr [[TMP4]], align 8 +// CHECK-NEXT: [[TMP6:%.*]] = load [[STRUCT_FLOAT16X4X2_T]], ptr [[RETVAL]], align 8 +// CHECK-NEXT: ret [[STRUCT_FLOAT16X4X2_T]] [[TMP6]] +// +// float16x4x2_t test_vtrn_f16(float16x4_t a, float16x4_t b) { +// return vtrn_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vtrnq_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[RETVAL_I:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T:%.*]], align 16 +// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[A]] to <16 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B]] to <16 x i8> +// CHECK-NEXT: [[VTRN_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: store <8 x half> [[VTRN_I]], ptr [[RETVAL_I]], align 16 +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds <8 x half>, ptr [[RETVAL_I]], i32 1 +// CHECK-NEXT: [[VTRN1_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: store <8 x half> [[VTRN1_I]], ptr [[TMP2]], align 16 +// CHECK-NEXT: [[TMP3:%.*]] = load [[STRUCT_FLOAT16X8X2_T]], ptr [[RETVAL_I]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT_FLOAT16X8X2_T]], ptr [[RETVAL]], i32 0, i32 0 +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue [[STRUCT_FLOAT16X8X2_T]] [[TMP3]], 0 +// CHECK-NEXT: store [2 x <8 x half>] [[TMP5]], ptr [[TMP4]], align 16 +// CHECK-NEXT: [[TMP6:%.*]] = load [[STRUCT_FLOAT16X8X2_T]], ptr [[RETVAL]], align 16 +// CHECK-NEXT: ret [[STRUCT_FLOAT16X8X2_T]] [[TMP6]] +// +// float16x8x2_t test_vtrnq_f16(float16x8_t a, float16x8_t b) { +// return vtrnq_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vmov_n_f16 +// CHECK-SAME: (half noundef [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> poison, half [[A]], i32 0 +// CHECK-NEXT: [[VECINIT1:%.*]] = insertelement <4 x half> [[VECINIT]], half [[A]], i32 1 +// CHECK-NEXT: [[VECINIT2:%.*]] = insertelement <4 x half> [[VECINIT1]], half [[A]], i32 2 +// CHECK-NEXT: [[VECINIT3:%.*]] = insertelement <4 x half> [[VECINIT2]], half [[A]], i32 3 +// CHECK-NEXT: ret <4 x half> [[VECINIT3]] +// +// float16x4_t test_vmov_n_f16(float16_t a) { +// return vmov_n_f16(a); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vmovq_n_f16 +// CHECK-SAME: (half noundef [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> poison, half [[A]], i32 0 +// CHECK-NEXT: [[VECINIT1:%.*]] = insertelement <8 x half> [[VECINIT]], half [[A]], i32 1 +// CHECK-NEXT: [[VECINIT2:%.*]] = insertelement <8 x half> [[VECINIT1]], half [[A]], i32 2 +// CHECK-NEXT: [[VECINIT3:%.*]] = insertelement <8 x half> [[VECINIT2]], half [[A]], i32 3 +// CHECK-NEXT: [[VECINIT4:%.*]] = insertelement <8 x half> [[VECINIT3]], half [[A]], i32 4 +// CHECK-NEXT: [[VECINIT5:%.*]] = insertelement <8 x half> [[VECINIT4]], half [[A]], i32 5 +// CHECK-NEXT: [[VECINIT6:%.*]] = insertelement <8 x half> [[VECINIT5]], half [[A]], i32 6 +// CHECK-NEXT: [[VECINIT7:%.*]] = insertelement <8 x half> [[VECINIT6]], half [[A]], i32 7 +// CHECK-NEXT: ret <8 x half> [[VECINIT7]] +// +// float16x8_t test_vmovq_n_f16(float16_t a) { +// return vmovq_n_f16(a); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vdup_n_f16 +// CHECK-SAME: (half noundef [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> poison, half [[A]], i32 0 +// CHECK-NEXT: [[VECINIT1:%.*]] = insertelement <4 x half> [[VECINIT]], half [[A]], i32 1 +// CHECK-NEXT: [[VECINIT2:%.*]] = insertelement <4 x half> [[VECINIT1]], half [[A]], i32 2 +// CHECK-NEXT: [[VECINIT3:%.*]] = insertelement <4 x half> [[VECINIT2]], half [[A]], i32 3 +// CHECK-NEXT: ret <4 x half> [[VECINIT3]] +// +// float16x4_t test_vdup_n_f16(float16_t a) { +// return vdup_n_f16(a); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vdupq_n_f16 +// CHECK-SAME: (half noundef [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> poison, half [[A]], i32 0 +// CHECK-NEXT: [[VECINIT1:%.*]] = insertelement <8 x half> [[VECINIT]], half [[A]], i32 1 +// CHECK-NEXT: [[VECINIT2:%.*]] = insertelement <8 x half> [[VECINIT1]], half [[A]], i32 2 +// CHECK-NEXT: [[VECINIT3:%.*]] = insertelement <8 x half> [[VECINIT2]], half [[A]], i32 3 +// CHECK-NEXT: [[VECINIT4:%.*]] = insertelement <8 x half> [[VECINIT3]], half [[A]], i32 4 +// CHECK-NEXT: [[VECINIT5:%.*]] = insertelement <8 x half> [[VECINIT4]], half [[A]], i32 5 +// CHECK-NEXT: [[VECINIT6:%.*]] = insertelement <8 x half> [[VECINIT5]], half [[A]], i32 6 +// CHECK-NEXT: [[VECINIT7:%.*]] = insertelement <8 x half> [[VECINIT6]], half [[A]], i32 7 +// CHECK-NEXT: ret <8 x half> [[VECINIT7]] +// +// float16x8_t test_vdupq_n_f16(float16_t a) { +// return vdupq_n_f16(a); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vdup_lane_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[A]] to <8 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half> +// CHECK-NEXT: [[LANE:%.*]] = shufflevector <4 x half> [[TMP1]], <4 x half> [[TMP1]], <4 x i32> +// CHECK-NEXT: ret <4 x half> [[LANE]] +// +// float16x4_t test_vdup_lane_f16(float16x4_t a) { +// return vdup_lane_f16(a, 3); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vdupq_lane_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[A]] to <8 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half> +// CHECK-NEXT: [[LANE:%.*]] = shufflevector <4 x half> [[TMP1]], <4 x half> [[TMP1]], <8 x i32> +// CHECK-NEXT: ret <8 x half> [[LANE]] +// +// float16x8_t test_vdupq_lane_f16(float16x4_t a) { +// return vdupq_lane_f16(a, 3); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vdup_laneq_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[A]] to <16 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half> +// CHECK-NEXT: [[LANE:%.*]] = shufflevector <8 x half> [[TMP1]], <8 x half> [[TMP1]], <4 x i32> +// CHECK-NEXT: ret <4 x half> [[LANE]] +// +// float16x4_t test_vdup_laneq_f16(float16x8_t a) { +// return vdup_laneq_f16(a, 1); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vdupq_laneq_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[A]] to <16 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half> +// CHECK-NEXT: [[LANE:%.*]] = shufflevector <8 x half> [[TMP1]], <8 x half> [[TMP1]], <8 x i32> +// CHECK-NEXT: ret <8 x half> [[LANE]] +// +// float16x8_t test_vdupq_laneq_f16(float16x8_t a) { +// return vdupq_laneq_f16(a, 7); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vext_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[A]] to <8 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B]] to <8 x i8> +// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half> +// CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> +// CHECK-NEXT: [[VEXT:%.*]] = shufflevector <4 x half> [[TMP2]], <4 x half> [[TMP3]], <4 x i32> +// CHECK-NEXT: ret <4 x half> [[VEXT]] +// +// float16x4_t test_vext_f16(float16x4_t a, float16x4_t b) { +// return vext_f16(a, b, 2); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vextq_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[A]] to <16 x i8> +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B]] to <16 x i8> +// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half> +// CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> +// CHECK-NEXT: [[VEXT:%.*]] = shufflevector <8 x half> [[TMP2]], <8 x half> [[TMP3]], <8 x i32> +// CHECK-NEXT: ret <8 x half> [[VEXT]] +// +// float16x8_t test_vextq_f16(float16x8_t a, float16x8_t b) { +// return vextq_f16(a, b, 5); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vrev64_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[A]], <4 x i32> +// CHECK-NEXT: ret <4 x half> [[SHUFFLE_I]] +// +// float16x4_t test_vrev64_f16(float16x4_t a) { +// return vrev64_f16(a); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vrev64q_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[A]], <8 x i32> +// CHECK-NEXT: ret <8 x half> [[SHUFFLE_I]] +// +// float16x8_t test_vrev64q_f16(float16x8_t a) { +// return vrev64q_f16(a); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vzip1_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: ret <4 x half> [[SHUFFLE_I]] +// +// float16x4_t test_vzip1_f16(float16x4_t a, float16x4_t b) { +// return vzip1_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vzip1q_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: ret <8 x half> [[SHUFFLE_I]] +// +// float16x8_t test_vzip1q_f16(float16x8_t a, float16x8_t b) { +// return vzip1q_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vzip2_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: ret <4 x half> [[SHUFFLE_I]] +// +// float16x4_t test_vzip2_f16(float16x4_t a, float16x4_t b) { +// return vzip2_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vzip2q_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: ret <8 x half> [[SHUFFLE_I]] +// +// float16x8_t test_vzip2q_f16(float16x8_t a, float16x8_t b) { +// return vzip2q_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vuzp1_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: ret <4 x half> [[SHUFFLE_I]] +// +// float16x4_t test_vuzp1_f16(float16x4_t a, float16x4_t b) { +// return vuzp1_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vuzp1q_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: ret <8 x half> [[SHUFFLE_I]] +// +// float16x8_t test_vuzp1q_f16(float16x8_t a, float16x8_t b) { +// return vuzp1q_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vuzp2_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: ret <4 x half> [[SHUFFLE_I]] +// +// float16x4_t test_vuzp2_f16(float16x4_t a, float16x4_t b) { +// return vuzp2_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vuzp2q_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: ret <8 x half> [[SHUFFLE_I]] +// +// float16x8_t test_vuzp2q_f16(float16x8_t a, float16x8_t b) { +// return vuzp2q_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vtrn1_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: ret <4 x half> [[SHUFFLE_I]] +// +// float16x4_t test_vtrn1_f16(float16x4_t a, float16x4_t b) { +// return vtrn1_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vtrn1q_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: ret <8 x half> [[SHUFFLE_I]] +// +// float16x8_t test_vtrn1q_f16(float16x8_t a, float16x8_t b) { +// return vtrn1q_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vtrn2_f16 +// CHECK-SAME: (<4 x half> noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[B]], <4 x i32> +// CHECK-NEXT: ret <4 x half> [[SHUFFLE_I]] +// +// float16x4_t test_vtrn2_f16(float16x4_t a, float16x4_t b) { +// return vtrn2_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vtrn2q_f16 +// CHECK-SAME: (<8 x half> noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[B]], <8 x i32> +// CHECK-NEXT: ret <8 x half> [[SHUFFLE_I]] +// +// float16x8_t test_vtrn2q_f16(float16x8_t a, float16x8_t b) { +// return vtrn2q_f16(a, b); +// } + +// CHECK-LABEL: define {{[^@]+}}@test_vduph_laneq_f16 +// CHECK-SAME: (<8 x half> noundef [[VEC:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x half> [[VEC]], i32 7 +// CHECK-NEXT: ret half [[VGETQ_LANE]] +// +// float16_t test_vduph_laneq_f16(float16x8_t vec) { +// return vduph_laneq_f16(vec, 7); +// } + +float16_t test_vduph_lane_f16(float16x4_t vec) { + return vduph_lane_f16(vec, 3); + + // CIR-LABEL: vduph_lane_f16 + // CIR: [[TMP0:%.*]] = cir.const #cir.int<3> : !s32i + // CIR: [[TMP1:%.*]] = cir.vec.extract {{.*}}[{{.*}} : !s32i] : !cir.vector + + // LLVM-LABEL: test_vduph_lane_f16 + // LLVM-SAME: (<4 x half> [[VEC:%.*]]) + // LLVM: [[VGET_LANE:%.*]] = extractelement <4 x half> [[VEC]], i32 3 + // LLVM: ret half [[VGET_LANE]] +} From dbe544db0d12befa4bab958c7a371396ecedf6dc Mon Sep 17 00:00:00 2001 From: Letu Ren Date: Thu, 20 Feb 2025 03:28:55 +0800 Subject: [PATCH 2267/2301] [CIR][CIRGen] handle `__builtin_elementwise_acos` (#1362) Traditional Clang implementation: https://github.com/llvm/clangir/blob/a0091e38f1027e35d17819e02ee1ae257a12d296/clang/lib/CodeGen/CGBuiltin.cpp#L4116-L4118 I use the first argument type as the return type. It is OK for `__builtin_elementwise_acos`, however, I'm not sure it is OK for other builtin functions. Resolves: https://github.com/llvm/clangir/issues/1361 --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 5 +++-- clang/lib/CIR/CodeGen/CIRGenFunction.h | 13 ++++++++++++ clang/test/CIR/CodeGen/builtins-elementwise.c | 21 +++++++++++++++++++ 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index a5f899320a86..572ef5d6022c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1356,8 +1356,9 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, mlir::Value result = call->getResult(0); return RValue::get(result); } - case Builtin::BI__builtin_elementwise_acos: - llvm_unreachable("BI__builtin_elementwise_acos NYI"); + case Builtin::BI__builtin_elementwise_acos: { + return emitBuiltinWithOneOverloadedType<1>(E, "acos"); + } case Builtin::BI__builtin_elementwise_asin: llvm_unreachable("BI__builtin_elementwise_asin NYI"); case Builtin::BI__builtin_elementwise_atan: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index bbf1024951db..44484bcd2fe3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1403,6 +1403,19 @@ class CIRGenFunction : public CIRGenTypeCache { RValue emitBuiltinExpr(const clang::GlobalDecl GD, unsigned BuiltinID, const clang::CallExpr *E, ReturnValueSlot ReturnValue); RValue emitRotate(const CallExpr *E, bool IsRotateRight); + template + RValue emitBuiltinWithOneOverloadedType(const CallExpr *E, + llvm::StringRef Name) { + static_assert(N, "expect non-empty argument"); + mlir::Type cirTy = convertType(E->getArg(0)->getType()); + SmallVector args; + for (uint32_t i = 0; i < N; ++i) { + args.push_back(emitScalarExpr(E->getArg(i))); + } + const auto call = builder.create( + getLoc(E->getExprLoc()), builder.getStringAttr(Name), cirTy, args); + return RValue::get(call->getResult(0)); + } mlir::Value emitTargetBuiltinExpr(unsigned BuiltinID, const clang::CallExpr *E, ReturnValueSlot ReturnValue); diff --git a/clang/test/CIR/CodeGen/builtins-elementwise.c b/clang/test/CIR/CodeGen/builtins-elementwise.c index b790588605f4..80e238e0c445 100644 --- a/clang/test/CIR/CodeGen/builtins-elementwise.c +++ b/clang/test/CIR/CodeGen/builtins-elementwise.c @@ -36,3 +36,24 @@ void test_builtin_elementwise_abs(vint4 vi4, int i, float f, double d, // LLVM: {{%.*}} = call <4 x double> @llvm.fabs.v4f64(<4 x double> {{%.*}}) vd4 = __builtin_elementwise_abs(vd4); } + +void test_builtin_elementwise_acos(float f, double d, vfloat4 vf4, + vdouble4 vd4) { + // CIR-LABEL: test_builtin_elementwise_acos + // LLVM-LABEL: test_builtin_elementwise_acos + // CIR: {{%.*}} = cir.llvm.intrinsic "acos" {{%.*}} : (!cir.float) -> !cir.float + // LLVM: {{%.*}} = call float @llvm.acos.f32(float {{%.*}}) + f = __builtin_elementwise_acos(f); + + // CIR: {{%.*}} = cir.llvm.intrinsic "acos" {{%.*}} : (!cir.double) -> !cir.double + // LLVM: {{%.*}} = call double @llvm.acos.f64(double {{%.*}}) + d = __builtin_elementwise_acos(d); + + // CIR: {{%.*}} = cir.llvm.intrinsic "acos" {{%.*}} : (!cir.vector) -> !cir.vector + // LLVM: {{%.*}} = call <4 x float> @llvm.acos.v4f32(<4 x float> {{%.*}}) + vf4 = __builtin_elementwise_acos(vf4); + + // CIR: {{%.*}} = cir.llvm.intrinsic "acos" {{%.*}} : (!cir.vector) -> !cir.vector + // LLVM: {{%.*}} = call <4 x double> @llvm.acos.v4f64(<4 x double> {{%.*}}) + vd4 = __builtin_elementwise_acos(vd4); +} From 2b9428565bb849218e6f53f940f4dbf3bac49515 Mon Sep 17 00:00:00 2001 From: Letu Ren Date: Thu, 20 Feb 2025 03:30:08 +0800 Subject: [PATCH 2268/2301] [CIR][CIRGen] handle vectors of size 3 like size 4 for better performance (#1363) This PR removes XFAIL test_vec3 --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 10 +++++++++- clang/lib/CIR/CodeGen/TargetInfo.cpp | 2 +- clang/test/CIR/CodeGen/vectype-ext.cpp | 1 - 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 4d4dd663e0dc..a6121122fe34 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -39,6 +39,8 @@ #include "mlir/IR/Operation.h" #include "mlir/IR/Value.h" +#include + using namespace clang; using namespace clang::CIRGen; using namespace cir; @@ -2936,7 +2938,13 @@ mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile, CGM.getABIInfo().getOptimalVectorMemoryType(vTy, getLangOpts()); if (vTy != newVecTy) { - llvm_unreachable("NYI"); + const Address cast = addr.withElementType(newVecTy); + mlir::Value v = builder.createLoad(loc, cast, isVolatile); + const uint64_t oldNumElements = vTy.getSize(); + SmallVector mask(oldNumElements); + std::iota(mask.begin(), mask.end(), 0); + v = builder.createVecShuffle(loc, v, mask); + return emitFromMemory(v, ty); } } diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp index a10980a6e66b..ffc3f4666a78 100644 --- a/clang/lib/CIR/CodeGen/TargetInfo.cpp +++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp @@ -378,7 +378,7 @@ cir::VectorType ABIInfo::getOptimalVectorMemoryType(cir::VectorType T, const clang::LangOptions &Opt) const { if (T.getSize() == 3 && !Opt.PreserveVec3Type) { - llvm_unreachable("NYI"); + return cir::VectorType::get(&CGT.getMLIRContext(), T.getEltType(), 4); } return T; } diff --git a/clang/test/CIR/CodeGen/vectype-ext.cpp b/clang/test/CIR/CodeGen/vectype-ext.cpp index e4cc9927894f..d7702f1f9658 100644 --- a/clang/test/CIR/CodeGen/vectype-ext.cpp +++ b/clang/test/CIR/CodeGen/vectype-ext.cpp @@ -2,7 +2,6 @@ // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR // RUN: %clang_cc1 -std=c++17 -fclangir -emit-llvm -triple x86_64-unknown-linux-gnu %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=LLVM -// XFAIL: * typedef int vi4 __attribute__((ext_vector_type(4))); typedef int vi3 __attribute__((ext_vector_type(3))); From b510e50a9a838def30622126b07d03882a1144aa Mon Sep 17 00:00:00 2001 From: AdUhTkJm <30948580+AdUhTkJm@users.noreply.github.com> Date: Wed, 19 Feb 2025 19:32:14 +0000 Subject: [PATCH 2269/2301] [CIR][CUDA] Handle shared and local variables (#1368) CUDA shared variables are device-only, accessible from all threads in a block of some kernel. It's similar to `local` variables in OpenCL which all threads in a work-group can access. Hence they are realized as `static` variables in addrspace(local). On the other hand, the local variables inside a kernel (without special attributes) are just regular variables, typically emitted by `CreateTempAlloca`. They are in the default address space. OG checks if the expected address space, denoted by the type, is the same as the actual address space indicated by attributes. If they aren't the same, a `addrspacecast` is emitted when a global variable is accessed. In CIR however, `cir.get_global` alreadys carries that information in `!cir.ptr` type, so we don't need a cast. --- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 16 +++++++++++----- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 14 ++++++++++++-- clang/lib/CIR/Dialect/IR/CIRAttrs.cpp | 4 +++- clang/test/CIR/CodeGen/CUDA/address-spaces.cu | 19 +++++++++++++++++++ 4 files changed, 45 insertions(+), 8 deletions(-) create mode 100644 clang/test/CIR/CodeGen/CUDA/address-spaces.cu diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index da646e24333d..333bbf0e4c95 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -479,9 +479,10 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, // OpenCL variables in local address space and CUDA shared // variables cannot have an initializer. mlir::Attribute Init = nullptr; - if (D.hasAttr() || D.hasAttr()) + if (D.hasAttr()) llvm_unreachable("CUDA is NYI"); - else if (Ty.getAddressSpace() != LangAS::opencl_local) + else if (Ty.getAddressSpace() != LangAS::opencl_local && + !D.hasAttr()) Init = builder.getZeroInitAttr(convertType(Ty)); cir::GlobalOp GV = builder.createVersionedGlobal( @@ -499,9 +500,14 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &D, setGVProperties(GV, &D); - // Make sure the result is of the correct type. - if (AS != builder.getAddrSpaceAttr(Ty.getAddressSpace())) - llvm_unreachable("address space cast NYI"); + // OG checks if the expected address space, denoted by the type, is the + // same as the actual address space indicated by attributes. If they aren't + // the same, an addrspacecast is emitted when this variable is accessed. + // In CIR however, cir.get_global alreadys carries that information in + // !cir.ptr type - if this global is in OpenCL local address space, then its + // type would be !cir.ptr<..., addrspace(offload_local)>. Therefore we don't + // need an explicit address space cast in CIR: they will get emitted when + // lowering to LLVM IR. // Ensure that the static local gets initialized by making sure the parent // function gets emitted eventually. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 10dbd85edc4b..ac2125cf717c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1604,7 +1604,14 @@ LangAS CIRGenModule::getGlobalConstantAddressSpace() const { LangAS CIRGenModule::getLangTempAllocaAddressSpace() const { if (getLangOpts().OpenCL) return LangAS::opencl_private; - if (getLangOpts().SYCLIsDevice || getLangOpts().CUDAIsDevice || + + // For temporaries inside functions, CUDA treats them as normal variables. + // LangAS::cuda_device, on the other hand, is reserved for those variables + // explicitly marked with __device__. + if (getLangOpts().CUDAIsDevice) + return LangAS::Default; + + if (getLangOpts().SYCLIsDevice || (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)) llvm_unreachable("NYI"); return LangAS::Default; @@ -3983,8 +3990,11 @@ LangAS CIRGenModule::getGlobalVarAddressSpace(const VarDecl *D) { (!D || D->getType().getAddressSpace() == LangAS::Default)) llvm_unreachable("NYI"); - if (langOpts.CUDA && langOpts.CUDAIsDevice) + if (langOpts.CUDA && langOpts.CUDAIsDevice) { + if (D && D->hasAttr()) + return LangAS::cuda_shared; llvm_unreachable("NYI"); + } if (langOpts.OpenMP) llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp index 9dbf12ad138c..1edc09f2183e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp @@ -645,6 +645,9 @@ AddressSpaceAttr::getValueFromLangAS(clang::LangAS langAS) { case LangAS::opencl_global: return Kind::offload_global; case LangAS::opencl_local: + case LangAS::cuda_shared: + // Local means local among the work-group (OpenCL) or block (CUDA). + // All threads inside the kernel can access local memory. return Kind::offload_local; case LangAS::opencl_constant: return Kind::offload_constant; @@ -657,7 +660,6 @@ AddressSpaceAttr::getValueFromLangAS(clang::LangAS langAS) { case LangAS::opencl_global_host: case LangAS::cuda_device: case LangAS::cuda_constant: - case LangAS::cuda_shared: case LangAS::sycl_global: case LangAS::sycl_global_device: case LangAS::sycl_global_host: diff --git a/clang/test/CIR/CodeGen/CUDA/address-spaces.cu b/clang/test/CIR/CodeGen/CUDA/address-spaces.cu new file mode 100644 index 000000000000..364ab58742c3 --- /dev/null +++ b/clang/test/CIR/CodeGen/CUDA/address-spaces.cu @@ -0,0 +1,19 @@ +#include "../Inputs/cuda.h" + +// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fclangir \ +// RUN: -fcuda-is-device -emit-cir -target-sdk-version=12.3 \ +// RUN: %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +__global__ void fn() { + int i = 0; + __shared__ int j; + j = i; +} + +// CIR: cir.global "private" internal dsolocal addrspace(offload_local) @_ZZ2fnvE1j : !s32i +// CIR: cir.func @_Z2fnv +// CIR: [[Local:%[0-9]+]] = cir.alloca !s32i, !cir.ptr, ["i", init] +// CIR: [[Shared:%[0-9]+]] = cir.get_global @_ZZ2fnvE1j : !cir.ptr +// CIR: [[Tmp:%[0-9]+]] = cir.load [[Local]] : !cir.ptr, !s32i +// CIR: cir.store [[Tmp]], [[Shared]] : !s32i, !cir.ptr From 74e4af3c22f61021adedd75b3aa09d92453bf18f Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Wed, 19 Feb 2025 11:33:19 -0800 Subject: [PATCH 2270/2301] [CIR] Add handling for initializing multi-dimension arrays (#1369) Add an implementation for array new initialization of multidimension arrays. This is able to leverage existing array element initialization with just a few changes to update the pointer types. --- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 9 ++++--- clang/test/CIR/CodeGen/new.cpp | 35 +++++++++++++++++++++++++ clang/test/CIR/Lowering/new.cpp | 20 ++++++++++++++ 3 files changed, 61 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index bca09c535c91..4e3774e0b3b9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -1048,7 +1048,10 @@ void CIRGenFunction::emitNewArrayInitializer( QualType AllocType = E->getAllocatedType(); if (const ConstantArrayType *CAT = dyn_cast_or_null( AllocType->getAsArrayTypeUnsafe())) { - llvm_unreachable("NYI"); + ElementTy = convertTypeForMem(AllocType); + auto CastOp = builder.createPtrBitcast(CurPtr.getPointer(), ElementTy); + CurPtr = Address(CastOp, ElementTy, CurPtr.getAlignment()); + InitListElements *= getContext().getConstantArrayElementCount(CAT); } // Enter a partial-destruction Cleanup if necessary. @@ -1070,10 +1073,10 @@ void CIRGenFunction::emitNewArrayInitializer( AggValueSlot::DoesNotOverlap); auto Loc = getLoc(IE->getExprLoc()); auto CastOp = builder.createPtrBitcast(CurPtr.getPointer(), - convertTypeForMem(AllocType)); + CurPtr.getElementType()); auto OffsetOp = builder.getSignedInt(Loc, 1, /*width=*/32); auto DataPtr = builder.createPtrStride(Loc, CastOp, OffsetOp); - CurPtr = Address(DataPtr, CurPtr.getType(), + CurPtr = Address(DataPtr, CurPtr.getElementType(), StartAlign.alignmentAtOffset((++i) * ElementSize)); } diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index f158ab0bb0f3..9a6bc33d4051 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -292,3 +292,38 @@ void t_new_var_size_nontrivial(size_t n) { // CHECK: %[[ALL_ONES:.*]] = cir.const #cir.int<18446744073709551615> : !u64i // CHECK: %[[ALLOC_SIZE:.*]] = cir.select if %[[ANY_OVERFLOW]] then %[[ALL_ONES]] else %[[SIZE]] : (!cir.bool, !u64i, !u64i) // CHECK: %[[PTR:.*]] = cir.call @_Znam(%[[ALLOC_SIZE]]) : (!u64i) + +void t_multidim_init() { + auto *p = new int[2][3] { {1, 2, 3}, {4, 5, 6}}; +} + +// CHECK: cir.func @_Z15t_multidim_initv() +// CHECK: %[[NUM_ELEMENTS:.*]] = cir.const #cir.int<6> : !u64i +// CHECK: %[[ALLOCATION_SIZE:.*]] = cir.const #cir.int<24> : !u64i +// CHECK: %[[NEW_PTR:.*]] = cir.call @_Znam(%2) : (!u64i) -> !cir.ptr +// CHECK: %[[ELEMENT_PTR:.*]] = cir.cast(bitcast, %[[NEW_PTR]] : !cir.ptr), !cir.ptr +// CHECK: %[[ARRAY_ELEM0_PTR:.*]] = cir.cast(bitcast, %[[ELEMENT_PTR]] : !cir.ptr), !cir.ptr> +// CHECK: %[[ELEM_00_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARRAY_ELEM0_PTR]] : !cir.ptr>), !cir.ptr +// CHECK: %[[ELEM_00_VAL:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: cir.store %[[ELEM_00_VAL]], %[[ELEM_00_PTR]] : !s32i, !cir.ptr +// CHECK: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i +// CHECK: %[[ELEM_01_PTR:.*]] = cir.ptr_stride(%[[ELEM_00_PTR]] : !cir.ptr, %[[OFFSET]] : !s64i), !cir.ptr +// CHECK: %[[ELEM_01_VAL:.*]] = cir.const #cir.int<2> : !s32i +// CHECK: cir.store %[[ELEM_01_VAL]], %[[ELEM_01_PTR]] : !s32i, !cir.ptr +// CHECK: %[[OFFSET1:.*]] = cir.const #cir.int<2> : !s64i +// CHECK: %[[ELEM_02_PTR:.*]] = cir.ptr_stride(%[[ELEM_00_PTR]] : !cir.ptr, %[[OFFSET1]] : !s64i), !cir.ptr +// CHECK: %[[ELEM_02_VAL:.*]] = cir.const #cir.int<3> : !s32i +// CHECK: cir.store %[[ELEM_02_VAL]], %[[ELEM_02_PTR]] : !s32i, !cir.ptr +// CHECK: %[[OFFSET3:.*]] = cir.const #cir.int<1> : !s32i +// CHECK: %[[ARRAY_ELEM1_PTR:.*]] = cir.ptr_stride(%[[ARRAY_ELEM0_PTR]] : !cir.ptr>, %[[OFFSET3]] : !s32i), !cir.ptr> +// CHECK: %[[ELEM_10_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARRAY_ELEM1_PTR]] : !cir.ptr>), !cir.ptr +// CHECK: %[[ELEM_10_VAL:.*]] = cir.const #cir.int<4> : !s32i +// CHECK: cir.store %[[ELEM_10_VAL]], %[[ELEM_10_PTR]] : !s32i, !cir.ptr +// CHECK: %[[OFFSET4:.*]] = cir.const #cir.int<1> : !s64i +// CHECK: %[[ELEM_11_PTR:.*]] = cir.ptr_stride(%[[ELEM_10_PTR]] : !cir.ptr, %[[OFFSET4]] : !s64i), !cir.ptr +// CHECK: %[[ELEM_11_VAL:.*]] = cir.const #cir.int<5> : !s32i +// CHECK: cir.store %[[ELEM_11_VAL]], %[[ELEM_11_PTR]] : !s32i, !cir.ptr +// CHECK: %[[OFFSET5:.*]] = cir.const #cir.int<2> : !s64i +// CHECK: %[[ELEM_12_PTR:.*]] = cir.ptr_stride(%[[ELEM_10_PTR]] : !cir.ptr, %21 : !s64i), !cir.ptr +// CHECK: %[[ELEM_12_VAL:.*]] = cir.const #cir.int<6> : !s32i +// CHECK: cir.store %[[ELEM_12_VAL]], %[[ELEM_12_PTR]] : !s32i, !cir.ptr diff --git a/clang/test/CIR/Lowering/new.cpp b/clang/test/CIR/Lowering/new.cpp index b4ea023ab4dc..bf80e70d4d37 100644 --- a/clang/test/CIR/Lowering/new.cpp +++ b/clang/test/CIR/Lowering/new.cpp @@ -215,3 +215,23 @@ void t_new_constant_size_constructor() { // LLVM: %[[NEXT_PTR:.*]] = getelementptr %class.E, ptr %[[CUR_ELEM_PTR]], i64 1 // LLVM: store ptr %[[NEXT_PTR]] // LLVM: br label %[[LOOP_INC_BB]] + +void t_multidim_init() { + auto *p = new int[2][3] { {1, 2, 3}, {4, 5, 6}}; +} + +// LLVM: @_Z15t_multidim_initv() +// LLVM: %[[ALLOC_PTR:.*]] = call ptr @_Znam(i64 24) +// LLVM: %[[ELEM_00_PTR:.*]] = getelementptr i32, ptr %[[ALLOC_PTR]], i32 0 +// LLVM: store i32 1, ptr %[[ELEM_00_PTR]], align 4 +// LLVM: %[[ELEM_01_PTR:.*]] = getelementptr i32, ptr %[[ELEM_00_PTR]], i64 1 +// LLVM: store i32 2, ptr %[[ELEM_01_PTR]], align 4 +// LLVM: %[[ELEM_02_PTR:.*]] = getelementptr i32, ptr %[[ELEM_00_PTR]], i64 2 +// LLVM: store i32 3, ptr %[[ELEM_02_PTR]], align 4 +// LLVM: %[[ELEM_1_PTR:.*]] = getelementptr [3 x i32], ptr %[[ALLOC_PTR]], i64 1 +// LLVM: %[[ELEM_10_PTR:.*]] = getelementptr i32, ptr %[[ELEM_1_PTR]], i32 0 +// LLVM: store i32 4, ptr %[[ELEM_10_PTR]], align 4 +// LLVM: %[[ELEM_11_PTR:.*]] = getelementptr i32, ptr %[[ELEM_10_PTR]], i64 1 +// LLVM: store i32 5, ptr %[[ELEM_11_PTR]], align 4 +// LLVM: %[[ELEM_12_PTR:.*]] = getelementptr i32, ptr %[[ELEM_10_PTR]], i64 2 +// LLVM: store i32 6, ptr %[[ELEM_12_PTR]], align 4 From ab985cc63a2c6f9b921a5d07b33b956599c2549b Mon Sep 17 00:00:00 2001 From: Konstantinos Parasyris Date: Wed, 19 Feb 2025 11:39:58 -0800 Subject: [PATCH 2271/2301] [CIR][HIP|CUDA] Generate global storing CUDA|HIP stub function (#1341) On HIP when launching a kernel we pass as a first argument a global variable that points to the device stub function. We follow OG design by having a map that pairs globals to symbols. In CUDA this is effectively a nop, as CUDA passes the device stub as a first argument. --- clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp | 136 ++++++++++++++++--- clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h | 14 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 28 ++-- clang/test/CIR/CodeGen/HIP/simple-device.cpp | 14 -- clang/test/CIR/CodeGen/HIP/simple.cpp | 32 ++++- 5 files changed, 172 insertions(+), 52 deletions(-) delete mode 100644 clang/test/CIR/CodeGen/HIP/simple-device.cpp diff --git a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp index acbbcd2c5c8b..30697d50bf2b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp @@ -14,14 +14,35 @@ #include "CIRGenCUDARuntime.h" #include "CIRGenFunction.h" +#include "mlir/IR/Operation.h" #include "clang/Basic/Cuda.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/raw_ostream.h" +#include using namespace clang; using namespace clang::CIRGen; CIRGenCUDARuntime::~CIRGenCUDARuntime() {} +CIRGenCUDARuntime::CIRGenCUDARuntime(CIRGenModule &cgm) : cgm(cgm) { + if (cgm.getLangOpts().OffloadViaLLVM) + llvm_unreachable("NYI"); + else if (cgm.getLangOpts().HIP) + Prefix = "hip"; + else + Prefix = "cuda"; +} + +std::string CIRGenCUDARuntime::addPrefixToName(StringRef FuncName) const { + return (Prefix + FuncName).str(); +} +std::string +CIRGenCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const { + return ("__" + Prefix + FuncName).str(); +} + void CIRGenCUDARuntime::emitDeviceStubBodyLegacy(CIRGenFunction &cgf, cir::FuncOp fn, FunctionArgList &args) { @@ -31,8 +52,6 @@ void CIRGenCUDARuntime::emitDeviceStubBodyLegacy(CIRGenFunction &cgf, void CIRGenCUDARuntime::emitDeviceStubBodyNew(CIRGenFunction &cgf, cir::FuncOp fn, FunctionArgList &args) { - if (cgm.getLangOpts().HIP) - llvm_unreachable("NYI"); // This requires arguments to be sent to kernels in a different way. if (cgm.getLangOpts().OffloadViaLLVM) @@ -40,7 +59,7 @@ void CIRGenCUDARuntime::emitDeviceStubBodyNew(CIRGenFunction &cgf, auto &builder = cgm.getBuilder(); - // For cudaLaunchKernel, we must add another layer of indirection + // For [cuda|hip]LaunchKernel, we must add another layer of indirection // to arguments. For example, for function `add(int a, float b)`, // we need to pass it as `void *args[2] = { &a, &b }`. @@ -71,7 +90,8 @@ void CIRGenCUDARuntime::emitDeviceStubBodyNew(CIRGenFunction &cgf, LangOptions::GPUDefaultStreamKind::PerThread) llvm_unreachable("NYI"); - std::string launchAPI = "cudaLaunchKernel"; + std::string launchAPI = addPrefixToName("LaunchKernel"); + std::cout << "LaunchAPI is " << launchAPI << "\n"; const IdentifierInfo &launchII = cgm.getASTContext().Idents.get(launchAPI); FunctionDecl *launchFD = nullptr; for (auto *result : dc->lookup(&launchII)) { @@ -86,11 +106,11 @@ void CIRGenCUDARuntime::emitDeviceStubBodyNew(CIRGenFunction &cgf, } // Use this function to retrieve arguments for cudaLaunchKernel: - // int __cudaPopCallConfiguration(dim3 *gridDim, dim3 *blockDim, size_t + // int __[cuda|hip]PopCallConfiguration(dim3 *gridDim, dim3 *blockDim, size_t // *sharedMem, cudaStream_t *stream) // - // Here cudaStream_t, while also being the 6th argument of cudaLaunchKernel, - // is a pointer to some opaque struct. + // Here [cuda|hip]Stream_t, while also being the 6th argument of + // [cuda|hip]LaunchKernel, is a pointer to some opaque struct. mlir::Type dim3Ty = cgf.getTypes().convertType(launchFD->getParamDecl(1)->getType()); @@ -114,26 +134,45 @@ void CIRGenCUDARuntime::emitDeviceStubBodyNew(CIRGenFunction &cgf, cir::FuncType::get({gridDim.getType(), blockDim.getType(), sharedMem.getType(), stream.getType()}, cgm.SInt32Ty), - "__cudaPopCallConfiguration"); + addUnderscoredPrefixToName("PopCallConfiguration")); cgf.emitRuntimeCall(loc, popConfig, {gridDim, blockDim, sharedMem, stream}); // Now emit the call to cudaLaunchKernel - // cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, + // [cuda|hip]Error_t [cuda|hip]LaunchKernel(const void *func, dim3 gridDim, + // dim3 blockDim, // void **args, size_t sharedMem, - // cudaStream_t stream); - auto kernelTy = - cir::PointerType::get(&cgm.getMLIRContext(), fn.getFunctionType()); + // [cuda|hip]Stream_t stream); - mlir::Value kernel = - builder.create(loc, kernelTy, fn.getSymName()); - mlir::Value func = builder.createBitcast(kernel, cgm.VoidPtrTy); + // We now either pick the function or the stub global for cuda, hip + // resepectively. + auto kernel = [&]() { + if (auto globalOp = llvm::dyn_cast_or_null( + KernelHandles[fn.getSymName()])) { + auto kernelTy = + cir::PointerType::get(&cgm.getMLIRContext(), globalOp.getSymType()); + mlir::Value kernel = builder.create( + loc, kernelTy, globalOp.getSymName()); + return kernel; + } + if (auto funcOp = llvm::dyn_cast_or_null( + KernelHandles[fn.getSymName()])) { + auto kernelTy = cir::PointerType::get(&cgm.getMLIRContext(), + funcOp.getFunctionType()); + mlir::Value kernel = + builder.create(loc, kernelTy, funcOp.getSymName()); + mlir::Value func = builder.createBitcast(kernel, cgm.VoidPtrTy); + return func; + } + assert(false && "Expected stub handle to be cir::GlobalOp or funcOp"); + }(); + // mlir::Value func = builder.createBitcast(kernel, cgm.VoidPtrTy); CallArgList launchArgs; mlir::Value kernelArgsDecayed = builder.createCast(cir::CastKind::array_to_ptrdecay, kernelArgs, cir::PointerType::get(cgm.VoidPtrTy)); - launchArgs.add(RValue::get(func), launchFD->getParamDecl(0)->getType()); + launchArgs.add(RValue::get(kernel), launchFD->getParamDecl(0)->getType()); launchArgs.add( RValue::getAggregate(Address(gridDim, CharUnits::fromQuantity(8))), launchFD->getParamDecl(1)->getType()); @@ -157,13 +196,16 @@ void CIRGenCUDARuntime::emitDeviceStubBodyNew(CIRGenFunction &cgf, void CIRGenCUDARuntime::emitDeviceStub(CIRGenFunction &cgf, cir::FuncOp fn, FunctionArgList &args) { - // Device stub and its handle might be different. - if (cgm.getLangOpts().HIP) - llvm_unreachable("NYI"); - + if (auto globalOp = + llvm::dyn_cast(KernelHandles[fn.getSymName()])) { + auto symbol = mlir::FlatSymbolRefAttr::get(fn.getSymNameAttr()); + // Set the initializer for the global + cgm.setInitializer(globalOp, symbol); + } // CUDA 9.0 changed the way to launch kernels. if (CudaFeatureEnabled(cgm.getTarget().getSDKVersion(), CudaFeature::CUDA_USES_NEW_LAUNCH) || + (cgm.getLangOpts().HIP && cgm.getLangOpts().HIPUseNewLaunchAPI) || cgm.getLangOpts().OffloadViaLLVM) emitDeviceStubBodyNew(cgf, fn, args); else @@ -189,3 +231,57 @@ RValue CIRGenCUDARuntime::emitCUDAKernelCallExpr(CIRGenFunction &cgf, return RValue::get(nullptr); } + +mlir::Operation *CIRGenCUDARuntime::getKernelHandle(cir::FuncOp fn, + GlobalDecl GD) { + + // Check if we already have a kernel handle for this function + auto Loc = KernelHandles.find(fn.getSymName()); + if (Loc != KernelHandles.end()) { + auto OldHandle = Loc->second; + // Here we know that the fn did not change. Return it + if (KernelStubs[OldHandle] == fn) + return OldHandle; + + // We've found the function name, but F itself has changed, so we need to + // update the references. + if (cgm.getLangOpts().HIP) { + // For HIP compilation the handle itself does not change, so we only need + // to update the Stub value. + KernelStubs[OldHandle] = fn; + return OldHandle; + } + // For non-HIP compilation, erase the old Stub and fall-through to creating + // new entries. + KernelStubs.erase(OldHandle); + } + + // If not targeting HIP, store the function itself + if (!cgm.getLangOpts().HIP) { + KernelHandles[fn.getSymName()] = fn; + KernelStubs[fn] = fn; + return fn; + } + + // Create a new CIR global variable to represent the kernel handle + auto &builder = cgm.getBuilder(); + auto globalName = cgm.getMangledName( + GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel)); + auto globalOp = cgm.getOrInsertGlobal( + fn->getLoc(), globalName, fn.getFunctionType(), [&] { + return CIRGenModule::createGlobalOp( + cgm, fn->getLoc(), globalName, + builder.getPointerTo(fn.getFunctionType()), true, /* addrSpace=*/{}, + /*insertPoint=*/nullptr, fn.getLinkage()); + }); + + globalOp->setAttr("alignment", builder.getI64IntegerAttr( + cgm.getPointerAlign().getQuantity())); + globalOp->setAttr("visibility", fn->getAttr("sym_visibility")); + + // Store references + KernelHandles[fn.getSymName()] = globalOp; + KernelStubs[globalOp] = fn; + + return globalOp; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h index 634f4891b85d..700f939e3082 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h +++ b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.h @@ -29,15 +29,26 @@ class ReturnValueSlot; class CIRGenCUDARuntime { protected: CIRGenModule &cgm; + StringRef Prefix; + + // Map a device stub function to a symbol for identifying kernel in host code. + // For CUDA, the symbol for identifying the kernel is the same as the device + // stub function. For HIP, they are different. + llvm::DenseMap KernelHandles; + + // Map a kernel handle to the kernel stub. + llvm::DenseMap KernelStubs; private: void emitDeviceStubBodyLegacy(CIRGenFunction &cgf, cir::FuncOp fn, FunctionArgList &args); void emitDeviceStubBodyNew(CIRGenFunction &cgf, cir::FuncOp fn, FunctionArgList &args); + std::string addPrefixToName(StringRef FuncName) const; + std::string addUnderscoredPrefixToName(StringRef FuncName) const; public: - CIRGenCUDARuntime(CIRGenModule &cgm) : cgm(cgm) {} + CIRGenCUDARuntime(CIRGenModule &cgm); virtual ~CIRGenCUDARuntime(); virtual void emitDeviceStub(CIRGenFunction &cgf, cir::FuncOp fn, @@ -46,6 +57,7 @@ class CIRGenCUDARuntime { virtual RValue emitCUDAKernelCallExpr(CIRGenFunction &cgf, const CUDAKernelCallExpr *expr, ReturnValueSlot retValue); + virtual mlir::Operation *getKernelHandle(cir::FuncOp fn, GlobalDecl GD); }; } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index ac2125cf717c..34ba5d0874e6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -651,9 +651,10 @@ void CIRGenModule::emitGlobalFunctionDefinition(GlobalDecl GD, // Get or create the prototype for the function. auto Fn = dyn_cast_if_present(Op); - if (!Fn || Fn.getFunctionType() != Ty) - Fn = GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/true, - ForDefinition); + if (!Fn || Fn.getFunctionType() != Ty) { + Fn = GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, + /*DontDefer=*/true, ForDefinition); + } // Already emitted. if (!Fn.isDeclaration()) @@ -2363,10 +2364,17 @@ cir::FuncOp CIRGenModule::GetAddrOfFunction(clang::GlobalDecl GD, mlir::Type Ty, // As __global__ functions (kernels) always reside on device, // when we access them from host, we must refer to the kernel handle. - // For CUDA, it's just the device stub. For HIP, it's something different. - if (langOpts.CUDA && !langOpts.CUDAIsDevice && langOpts.HIP && + // For HIP, we should never directly access the host device addr, but + // instead the Global Variable of that stub. For CUDA, it's just the device + // stub. For HIP, it's something different. + if ((langOpts.HIP || langOpts.CUDA) && !langOpts.CUDAIsDevice && cast(GD.getDecl())->hasAttr()) { - llvm_unreachable("NYI"); + auto *stubHandle = getCUDARuntime().getKernelHandle(F, GD); + if (IsForDefinition) + return F; + + if (langOpts.HIP) + llvm_unreachable("NYI"); } return F; @@ -3176,15 +3184,15 @@ CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { auto FInfo = &getTypes().arrangeCXXMethodDeclaration(cast(D)); auto Ty = getTypes().GetFunctionType(*FInfo); - return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, - IsForDefinition); + return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, + /*DontDefer=*/false, IsForDefinition); } if (isa(D)) { const CIRGenFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD); auto Ty = getTypes().GetFunctionType(FI); - return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false, - IsForDefinition); + return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, + /*DontDefer=*/false, IsForDefinition); } return getAddrOfGlobalVar(cast(D), /*Ty=*/nullptr, IsForDefinition) diff --git a/clang/test/CIR/CodeGen/HIP/simple-device.cpp b/clang/test/CIR/CodeGen/HIP/simple-device.cpp deleted file mode 100644 index e627a90dc410..000000000000 --- a/clang/test/CIR/CodeGen/HIP/simple-device.cpp +++ /dev/null @@ -1,14 +0,0 @@ -#include "../Inputs/cuda.h" - -// RUN: %clang_cc1 -triple=amdgcn-amd-amdhsa -x hip -fcuda-is-device \ -// RUN: -fclangir -emit-cir -o - %s | FileCheck %s - -// This shouldn't emit. -__host__ void host_fn(int *a, int *b, int *c) {} - -// CHECK-NOT: cir.func @_Z7host_fnPiS_S_ - -// This should emit as a normal C++ function. -__device__ void device_fn(int* a, double b, float c) {} - -// CIR: cir.func @_Z9device_fnPidf diff --git a/clang/test/CIR/CodeGen/HIP/simple.cpp b/clang/test/CIR/CodeGen/HIP/simple.cpp index ec4110da10d7..f04dd27e0411 100644 --- a/clang/test/CIR/CodeGen/HIP/simple.cpp +++ b/clang/test/CIR/CodeGen/HIP/simple.cpp @@ -1,16 +1,34 @@ #include "../Inputs/cuda.h" -// RUN: %clang_cc1 -triple=amdgcn-amd-amdhsa -x hip -fclangir \ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir \ +// RUN: -x hip -fhip-new-launch-api \ // RUN: -emit-cir %s -o %t.cir -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: FileCheck --check-prefix=CIR-HOST --input-file=%t.cir %s +// RUN: %clang_cc1 -triple=amdgcn-amd-amdhsa -x hip \ +// RUN: -fcuda-is-device -fhip-new-launch-api \ +// RUN: -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR-DEVICE --input-file=%t.cir %s + +// Attribute for global_fn +// CIR-HOST: [[Kernel:#[a-zA-Z_0-9]+]] = {{.*}}#cir.cuda_kernel_name<_Z9global_fni>{{.*}} -// This should emit as a normal C++ function. -__host__ void host_fn(int *a, int *b, int *c) {} -// CIR: cir.func @_Z7host_fnPiS_S_ +__host__ void host_fn(int *a, int *b, int *c) {} +// CIR-HOST: cir.func @_Z7host_fnPiS_S_ +// CIR-DEVICE-NOT: cir.func @_Z7host_fnPiS_S_ -// This shouldn't emit. __device__ void device_fn(int* a, double b, float c) {} +// CIR-HOST-NOT: cir.func @_Z9device_fnPidf +// CIR-DEVICE: cir.func @_Z9device_fnPidf + +__global__ void global_fn(int a) {} +// CIR-DEVICE: @_Z9global_fni + +// CIR-HOST: cir.alloca {{.*}}"kernel_args" +// CIR-HOST: cir.call @__hipPopCallConfiguration -// CHECK-NOT: cir.func @_Z9device_fnPidf +// Host access the global stub instead of the functiond evice stub. +// The stub has the mangled name of the function +// CIR-HOST: cir.get_global @_Z9global_fni +// CIR-HOST: cir.call @hipLaunchKernel From a4d50ebcdf22a2d3d5a757f88efe71660110964f Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Wed, 19 Feb 2025 21:45:17 +0100 Subject: [PATCH 2272/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vshrd_n_u64 (#1353) Lower `neon_vshrd_n_u64` --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 10 ++++- clang/test/CIR/CodeGen/AArch64/neon.c | 44 ++++++++++++++----- 2 files changed, 42 insertions(+), 12 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 2bd91898ca56..32cf58a1c280 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3857,7 +3857,15 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, return builder.createShiftRight(Ops[0], bits); } case NEON::BI__builtin_neon_vshrd_n_u64: { - llvm_unreachable("NEON::BI__builtin_neon_vshrd_n_u64 NYI"); + std::optional amt = + E->getArg(1)->getIntegerConstantExpr(getContext()); + assert(amt && "Expected argument to be a constant"); + uint64_t shiftAmt = amt->getZExtValue(); + if (shiftAmt == 64) + return builder.getConstInt(getLoc(E->getExprLoc()), builder.getUInt64Ty(), + 0); + + return builder.createShiftRight(Ops[0], shiftAmt); } case NEON::BI__builtin_neon_vsrad_n_s64: { std::optional amt = diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index ac8aead31bec..09e50faca4b2 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -15174,18 +15174,40 @@ int64_t test_vshrd_n_s64(int64_t a) { // LLVM: ret i64 [[SHRD_N]] } -// NYI-LABEL: @test_vshrd_n_u64( -// NYI: ret i64 0 -// uint64_t test_vshrd_n_u64(uint64_t a) { -// return (uint64_t)vshrd_n_u64(a, 64); -// } +uint64_t test_vshrd_n_u64(uint64_t a) { + return (uint64_t)vshrd_n_u64(a, 64); -// NYI-LABEL: @test_vshrd_n_u64_2( -// NYI: ret i64 0 -// uint64_t test_vshrd_n_u64_2() { -// uint64_t a = UINT64_C(0xf000000000000000); -// return vshrd_n_u64(a, 64); -// } + // CIR-LABEL: vshrd_n_u64 + // CIR: {{.*}} = cir.const #cir.int<0> : !u64i + // CIR: cir.return {{.*}} : !u64i + + // LLVM-LABEL: @test_vshrd_n_u64( + // LLVM: ret i64 0 +} + +uint64_t test_vshrd_n_u64_2() { + uint64_t a = UINT64_C(0xf000000000000000); + return vshrd_n_u64(a, 64); + + // CIR-LABEL: vshrd_n_u64 + // CIR: {{.*}} = cir.const #cir.int<0> : !u64i + // CIR: cir.return {{.*}} : !u64i + + // LLVM-LABEL: @test_vshrd_n_u64_2( + // LLVM: ret i64 0 + +} + +uint64_t test_vshrd_n_u64_3(uint64_t a) { + return vshrd_n_u64(a, 1); + + // CIR-LABEL: vshrd_n_u64 + // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !u64i, {{%.*}} : !u64i) -> !u64i + + // LLVM-LABEL: @test_vshrd_n_u64_3( + // LLVM: [[SHRD_N:%.*]] = lshr i64 %0, 1 + // LLVM: ret i64 [[SHRD_N]] +} // NYI-LABEL: @test_vrshrd_n_s64( // NYI: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 %a, i64 -63) From 8dfd5eef2010dc56c0e7c92597ea5ed997f6b5cb Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Wed, 19 Feb 2025 21:45:37 +0100 Subject: [PATCH 2273/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vsrad_n_u64 (#1356) Lower `neon_vsrad_n_u64` --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 8 ++++- clang/test/CIR/CodeGen/AArch64/neon.c | 33 ++++++++++++------- 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 32cf58a1c280..9f6f94f295be 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3876,7 +3876,13 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, builder.createShift(Ops[1], shiftAmt, false)); } case NEON::BI__builtin_neon_vsrad_n_u64: { - llvm_unreachable("NEON::BI__builtin_neon_vsrad_n_u64 NYI"); + std::optional amt = + E->getArg(2)->getIntegerConstantExpr(getContext()); + uint64_t shiftAmt = amt->getZExtValue(); + if (shiftAmt == 64) + return Ops[0]; + + return builder.createAdd(Ops[0], builder.createShiftLeft(Ops[1], shiftAmt)); } case NEON::BI__builtin_neon_vqdmlalh_lane_s16: case NEON::BI__builtin_neon_vqdmlalh_laneq_s16: diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 09e50faca4b2..998891bfb998 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -15286,19 +15286,28 @@ int64x1_t test_vsra_n_s64(int64x1_t a, int64x1_t b) { // LLVM: ret <1 x i64> [[TMP4]] } -// NYI-LABEL: @test_vsrad_n_u64( -// NYI: [[SHRD_N:%.*]] = lshr i64 %b, 63 -// NYI: [[TMP0:%.*]] = add i64 %a, [[SHRD_N]] -// NYI: ret i64 [[TMP0]] -// uint64_t test_vsrad_n_u64(uint64_t a, uint64_t b) { -// return (uint64_t)vsrad_n_u64(a, b, 63); -// } +uint64_t test_vsrad_n_u64(uint64_t a, uint64_t b) { + return (uint64_t)vsrad_n_u64(a, b, 63); -// NYI-LABEL: @test_vsrad_n_u64_2( -// NYI: ret i64 %a -// uint64_t test_vsrad_n_u64_2(uint64_t a, uint64_t b) { -// return (uint64_t)vsrad_n_u64(a, b, 64); -// } + // CIR-LABEL:test_vsrad_n_u64 + // CIR: [[SHL:%.*]] = cir.shift(left, {{%.*}} : !u64i, {{%.*}} : !u64i) -> !u64i + // CIR: {{.*}} = cir.binop(add, {{.*}}, [[SHL]]) : !u64i + + // LLVM-LABEL: test_vsrad_n_u64( + // LLVM: [[SHRD_N:%.*]] = shl i64 %1, 63 + // LLVM: [[TMP0:%.*]] = add i64 %0, [[SHRD_N]] + // LLVM: ret i64 [[TMP0]] +} + +uint64_t test_vsrad_n_u64_2(uint64_t a, uint64_t b) { + return (uint64_t)vsrad_n_u64(a, b, 64); + + // CIR-LABEL:test_vsrad_n_u64 + // CIR: cir.return {{.*}} : !u64i + + // LLVM-LABEL: test_vsrad_n_u64_2( + // LLVM: ret i64 %0 +} uint64x1_t test_vsra_n_u64(uint64x1_t a, uint64x1_t b) { return vsra_n_u64(a, b, 1); From 4e4e4b4818fbcc7378999effc3caae266bcaaed5 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 19 Feb 2025 23:45:53 +0300 Subject: [PATCH 2274/2301] [CIR][Bugfix] fixes global array of pointers (#1350) This PR fixes a case with global vars replacement when an array element is neither a`GlobalView` nor `ConstArray` but just a null pointer . Previously we just skip such element. Basically, the case in question is `static S* global[] = {0, &another_global}` --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 ++ clang/test/CIR/CodeGen/globals-ref-globals.c | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 34ba5d0874e6..79a9d7875423 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -885,6 +885,8 @@ static mlir::Attribute getNewInitValue(CIRGenModule &CGM, GlobalOp newGlob, newArray.push_back(createNewGlobalView(CGM, newGlob, view, oldTy)); else if (auto view = dyn_cast(elt)) newArray.push_back(getNewInitValue(CGM, newGlob, oldTy, user, elt)); + else + newArray.push_back(elt); } auto &builder = CGM.getBuilder(); diff --git a/clang/test/CIR/CodeGen/globals-ref-globals.c b/clang/test/CIR/CodeGen/globals-ref-globals.c index 8343153e3e8e..c0efc37ac063 100644 --- a/clang/test/CIR/CodeGen/globals-ref-globals.c +++ b/clang/test/CIR/CodeGen/globals-ref-globals.c @@ -87,10 +87,10 @@ typedef struct { } S3; static S3 g13 = {-1L,0L,1L}; -static S3* g14[2][2] = {{&g13, &g13}, {&g13, &g13}}; +static S3* g14[2][2] = {{0, &g13}, {&g13, &g13}}; // CHECK-DAG: g13 = #cir.const_struct<{#cir.int<-1> : !s16i, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array, #cir.int<0> : !s32i, #cir.int<1> : !s8i, #cir.const_array<[#cir.zero : !u8i, #cir.zero : !u8i, #cir.zero : !u8i]> : !cir.array}> : !ty_anon_struct3 -// CHECK-DAG: g14 = #cir.const_array<[#cir.const_array<[#cir.global_view<@g13> : !cir.ptr, #cir.global_view<@g13> : !cir.ptr]> : !cir.array x 2>, #cir.const_array<[#cir.global_view<@g13> : !cir.ptr, #cir.global_view<@g13> : !cir.ptr]> : !cir.array x 2>]> : !cir.array x 2> x 2> +// CHECK-DAG: g14 = #cir.const_array<[#cir.const_array<[#cir.ptr : !cir.ptr, #cir.global_view<@g13> : !cir.ptr]> : !cir.array x 2>, #cir.const_array<[#cir.global_view<@g13> : !cir.ptr, #cir.global_view<@g13> : !cir.ptr]> : !cir.array x 2>]> : !cir.array x 2> x 2> typedef struct { int f0; From 27154ac37bf1da0cfceefc7e98f3c14ca54b0a97 Mon Sep 17 00:00:00 2001 From: Omar Hossam Date: Wed, 19 Feb 2025 22:05:51 +0100 Subject: [PATCH 2275/2301] [CIR][CIRGen] Handle __sync_nand_and_fetch (#1351) The final part of #1273. --- clang/include/clang/CIR/MissingFeatures.h | 1 - clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 15 +- clang/test/CIR/CodeGen/atomic.cpp | 174 +++++++++++++++++----- 3 files changed, 140 insertions(+), 50 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 1487747899d6..f9a500bb7c64 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -247,7 +247,6 @@ struct MissingFeatures { static bool emitConstrainedFPCall() { return false; } static bool emitEmptyRecordCheck() { return false; } static bool isPPC_FP128Ty() { return false; } - static bool emitBinaryAtomicPostHasInvert() { return false; } static bool createLaunderInvariantGroup() { return false; } // Inline assembly diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 572ef5d6022c..24e929ea4b83 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -293,8 +293,8 @@ static RValue emitBinaryAtomic(CIRGenFunction &CGF, cir::AtomicFetchKind kind, static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, cir::AtomicFetchKind atomicOpkind, - const CallExpr *e, - cir::BinOpKind binopKind) { + const CallExpr *e, cir::BinOpKind binopKind, + bool invert = false) { mlir::Value val; mlir::Type valueType; clang::QualType typ = e->getType(); @@ -302,12 +302,10 @@ static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, makeBinaryAtomicValue(cgf, atomicOpkind, e, &val, &valueType); clang::CIRGen::CIRGenBuilderTy &builder = cgf.getBuilder(); result = builder.create(result.getLoc(), binopKind, result, val); + if (invert) + result = builder.create(result.getLoc(), + cir::UnaryOpKind::Not, result); result = emitFromInt(cgf, result, typ, valueType); - // FIXME: Some callers of this function expect the result to be inverted, - // which would need invert flag passed in and do the inversion here like - // traditional clang code gen does. When we implment those caller builtins - // we should implement the inversion here. - assert(!MissingFeatures::emitBinaryAtomicPostHasInvert()); return RValue::get(result); } @@ -1841,7 +1839,8 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_nand_and_fetch_4: case Builtin::BI__sync_nand_and_fetch_8: case Builtin::BI__sync_nand_and_fetch_16: - llvm_unreachable("BI__sync_nand_and_fetch like NYI"); + return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Nand, E, + cir::BinOpKind::And, true); case Builtin::BI__sync_val_compare_and_swap_1: case Builtin::BI__sync_val_compare_and_swap_2: diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index fb1f792a4027..54244c6d4f74 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -676,7 +676,7 @@ void cmp_val_ulong(unsigned long* p, long x, long u) { // CHECK-LABEL: @test_op_and_fetch // LLVM-LABEL: @test_op_and_fetch -extern "C" void test_op_and_fetch (void) +extern "C" void test_op_and_fetch(void) { // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i // CHECK: [[RES0:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i @@ -685,7 +685,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES0:%.*]] = atomicrmw add ptr @sc, i8 [[VAL0]] seq_cst, align 1 // LLVM: [[RET0:%.*]] = add i8 [[RES0]], [[VAL0]] // LLVM: store i8 [[RET0]], ptr @sc, align 1 - sc = __sync_add_and_fetch (&sc, uc); + sc = __sync_add_and_fetch(&sc, uc); // CHECK: [[RES1:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i // CHECK: [[RET1:%.*]] = cir.binop(add, [[RES1]], [[VAL1]]) : !u8i @@ -693,7 +693,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES1:%.*]] = atomicrmw add ptr @uc, i8 [[VAL1]] seq_cst, align 1 // LLVM: [[RET1:%.*]] = add i8 [[RES1]], [[VAL1]] // LLVM: store i8 [[RET1]], ptr @uc, align 1 - uc = __sync_add_and_fetch (&uc, uc); + uc = __sync_add_and_fetch(&uc, uc); // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i // CHECK: [[RES2:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i @@ -703,7 +703,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES2:%.*]] = atomicrmw add ptr @ss, i16 [[CONV2]] seq_cst, align 2 // LLVM: [[RET2:%.*]] = add i16 [[RES2]], [[CONV2]] // LLVM: store i16 [[RET2]], ptr @ss, align 2 - ss = __sync_add_and_fetch (&ss, uc); + ss = __sync_add_and_fetch(&ss, uc); // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i // CHECK: [[RES3:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i @@ -713,7 +713,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES3:%.*]] = atomicrmw add ptr @us, i16 [[CONV3]] seq_cst, align 2 // LLVM: [[RET3:%.*]] = add i16 [[RES3]], [[CONV3]] // LLVM: store i16 [[RET3]], ptr @us - us = __sync_add_and_fetch (&us, uc); + us = __sync_add_and_fetch(&us, uc); // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i // CHECK: [[RES4:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i @@ -723,7 +723,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES4:%.*]] = atomicrmw add ptr @si, i32 [[CONV4]] seq_cst, align 4 // LLVM: [[RET4:%.*]] = add i32 [[RES4]], [[CONV4]] // LLVM: store i32 [[RET4]], ptr @si, align 4 - si = __sync_add_and_fetch (&si, uc); + si = __sync_add_and_fetch(&si, uc); // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i // CHECK: [[RES5:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i @@ -733,7 +733,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES5:%.*]] = atomicrmw add ptr @ui, i32 [[CONV5]] seq_cst, align 4 // LLVM: [[RET5:%.*]] = add i32 [[RES5]], [[CONV5]] // LLVM: store i32 [[RET5]], ptr @ui, align 4 - ui = __sync_add_and_fetch (&ui, uc); + ui = __sync_add_and_fetch(&ui, uc); // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i // CHECK: [[RES6:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i @@ -743,7 +743,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES6:%.*]] = atomicrmw add ptr @sll, i64 [[CONV6]] seq_cst, align 8 // LLVM: [[RET6:%.*]] = add i64 [[RES6]], [[CONV6]] // LLVM: store i64 [[RET6]], ptr @sll, align 8 - sll = __sync_add_and_fetch (&sll, uc); + sll = __sync_add_and_fetch(&sll, uc); // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i // CHECK: [[RES7:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i @@ -753,7 +753,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES7:%.*]] = atomicrmw add ptr @ull, i64 [[CONV7]] seq_cst, align 8 // LLVM: [[RET7:%.*]] = add i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 - ull = __sync_add_and_fetch (&ull, uc); + ull = __sync_add_and_fetch(&ull, uc); // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i // CHECK: [[RES0:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i @@ -762,7 +762,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES0:%.*]] = atomicrmw sub ptr @sc, i8 [[VAL0]] seq_cst, align 1 // LLVM: [[RET0:%.*]] = sub i8 [[RES0]], [[VAL0]] // LLVM: store i8 [[RET0]], ptr @sc, align 1 - sc = __sync_sub_and_fetch (&sc, uc); + sc = __sync_sub_and_fetch(&sc, uc); // CHECK: [[RES1:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i // CHECK: [[RET1:%.*]] = cir.binop(sub, [[RES1]], [[VAL1]]) : !u8i @@ -770,7 +770,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES1:%.*]] = atomicrmw sub ptr @uc, i8 [[VAL1]] seq_cst, align 1 // LLVM: [[RET1:%.*]] = sub i8 [[RES1]], [[VAL1]] // LLVM: store i8 [[RET1]], ptr @uc, align 1 - uc = __sync_sub_and_fetch (&uc, uc); + uc = __sync_sub_and_fetch(&uc, uc); // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i // CHECK: [[RES2:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i @@ -780,7 +780,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES2:%.*]] = atomicrmw sub ptr @ss, i16 [[CONV2]] seq_cst, align 2 // LLVM: [[RET2:%.*]] = sub i16 [[RES2]], [[CONV2]] // LLVM: store i16 [[RET2]], ptr @ss, align 2 - ss = __sync_sub_and_fetch (&ss, uc); + ss = __sync_sub_and_fetch(&ss, uc); // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i // CHECK: [[RES3:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i @@ -790,7 +790,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES3:%.*]] = atomicrmw sub ptr @us, i16 [[CONV3]] seq_cst, align 2 // LLVM: [[RET3:%.*]] = sub i16 [[RES3]], [[CONV3]] // LLVM: store i16 [[RET3]], ptr @us - us = __sync_sub_and_fetch (&us, uc); + us = __sync_sub_and_fetch(&us, uc); // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i // CHECK: [[RES4:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i @@ -800,7 +800,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES4:%.*]] = atomicrmw sub ptr @si, i32 [[CONV4]] seq_cst, align 4 // LLVM: [[RET4:%.*]] = sub i32 [[RES4]], [[CONV4]] // LLVM: store i32 [[RET4]], ptr @si, align 4 - si = __sync_sub_and_fetch (&si, uc); + si = __sync_sub_and_fetch(&si, uc); // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i // CHECK: [[RES5:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i @@ -810,7 +810,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES5:%.*]] = atomicrmw sub ptr @ui, i32 [[CONV5]] seq_cst, align 4 // LLVM: [[RET5:%.*]] = sub i32 [[RES5]], [[CONV5]] // LLVM: store i32 [[RET5]], ptr @ui, align 4 - ui = __sync_sub_and_fetch (&ui, uc); + ui = __sync_sub_and_fetch(&ui, uc); // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i // CHECK: [[RES6:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i @@ -820,7 +820,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES6:%.*]] = atomicrmw sub ptr @sll, i64 [[CONV6]] seq_cst, align 8 // LLVM: [[RET6:%.*]] = sub i64 [[RES6]], [[CONV6]] // LLVM: store i64 [[RET6]], ptr @sll, align 8 - sll = __sync_sub_and_fetch (&sll, uc); + sll = __sync_sub_and_fetch(&sll, uc); // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i // CHECK: [[RES7:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i @@ -830,7 +830,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES7:%.*]] = atomicrmw sub ptr @ull, i64 [[CONV7]] seq_cst, align 8 // LLVM: [[RET7:%.*]] = sub i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 - ull = __sync_sub_and_fetch (&ull, uc); + ull = __sync_sub_and_fetch(&ull, uc); // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i // CHECK: [[RES0:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i @@ -839,7 +839,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES0:%.*]] = atomicrmw and ptr @sc, i8 [[VAL0]] seq_cst, align 1 // LLVM: [[RET0:%.*]] = and i8 [[RES0]], [[VAL0]] // LLVM: store i8 [[RET0]], ptr @sc, align 1 - sc = __sync_and_and_fetch (&sc, uc); + sc = __sync_and_and_fetch(&sc, uc); // CHECK: [[RES1:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i // CHECK: [[RET1:%.*]] = cir.binop(and, [[RES1]], [[VAL1]]) : !u8i @@ -847,7 +847,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES1:%.*]] = atomicrmw and ptr @uc, i8 [[VAL1]] seq_cst, align 1 // LLVM: [[RET1:%.*]] = and i8 [[RES1]], [[VAL1]] // LLVM: store i8 [[RET1]], ptr @uc, align 1 - uc = __sync_and_and_fetch (&uc, uc); + uc = __sync_and_and_fetch(&uc, uc); // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i // CHECK: [[RES2:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i @@ -857,7 +857,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES2:%.*]] = atomicrmw and ptr @ss, i16 [[CONV2]] seq_cst, align 2 // LLVM: [[RET2:%.*]] = and i16 [[RES2]], [[CONV2]] // LLVM: store i16 [[RET2]], ptr @ss, align 2 - ss = __sync_and_and_fetch (&ss, uc); + ss = __sync_and_and_fetch(&ss, uc); // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i // CHECK: [[RES3:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i @@ -867,7 +867,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES3:%.*]] = atomicrmw and ptr @us, i16 [[CONV3]] seq_cst, align 2 // LLVM: [[RET3:%.*]] = and i16 [[RES3]], [[CONV3]] // LLVM: store i16 [[RET3]], ptr @us - us = __sync_and_and_fetch (&us, uc); + us = __sync_and_and_fetch(&us, uc); // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i // CHECK: [[RES4:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i @@ -877,7 +877,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES4:%.*]] = atomicrmw and ptr @si, i32 [[CONV4]] seq_cst, align 4 // LLVM: [[RET4:%.*]] = and i32 [[RES4]], [[CONV4]] // LLVM: store i32 [[RET4]], ptr @si, align 4 - si = __sync_and_and_fetch (&si, uc); + si = __sync_and_and_fetch(&si, uc); // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i // CHECK: [[RES5:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i @@ -887,7 +887,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES5:%.*]] = atomicrmw and ptr @ui, i32 [[CONV5]] seq_cst, align 4 // LLVM: [[RET5:%.*]] = and i32 [[RES5]], [[CONV5]] // LLVM: store i32 [[RET5]], ptr @ui, align 4 - ui = __sync_and_and_fetch (&ui, uc); + ui = __sync_and_and_fetch(&ui, uc); // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i // CHECK: [[RES6:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i @@ -897,7 +897,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES6:%.*]] = atomicrmw and ptr @sll, i64 [[CONV6]] seq_cst, align 8 // LLVM: [[RET6:%.*]] = and i64 [[RES6]], [[CONV6]] // LLVM: store i64 [[RET6]], ptr @sll, align 8 - sll = __sync_and_and_fetch (&sll, uc); + sll = __sync_and_and_fetch(&sll, uc); // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i // CHECK: [[RES7:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i @@ -907,7 +907,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES7:%.*]] = atomicrmw and ptr @ull, i64 [[CONV7]] seq_cst, align 8 // LLVM: [[RET7:%.*]] = and i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 - ull = __sync_and_and_fetch (&ull, uc); + ull = __sync_and_and_fetch(&ull, uc); // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i // CHECK: [[RES0:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i @@ -916,7 +916,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES0:%.*]] = atomicrmw or ptr @sc, i8 [[VAL0]] seq_cst, align 1 // LLVM: [[RET0:%.*]] = or i8 [[RES0]], [[VAL0]] // LLVM: store i8 [[RET0]], ptr @sc, align 1 - sc = __sync_or_and_fetch (&sc, uc); + sc = __sync_or_and_fetch(&sc, uc); // CHECK: [[RES1:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i // CHECK: [[RET1:%.*]] = cir.binop(or, [[RES1]], [[VAL1]]) : !u8i @@ -924,7 +924,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES1:%.*]] = atomicrmw or ptr @uc, i8 [[VAL1]] seq_cst, align 1 // LLVM: [[RET1:%.*]] = or i8 [[RES1]], [[VAL1]] // LLVM: store i8 [[RET1]], ptr @uc, align 1 - uc = __sync_or_and_fetch (&uc, uc); + uc = __sync_or_and_fetch(&uc, uc); // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i // CHECK: [[RES2:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i @@ -934,7 +934,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES2:%.*]] = atomicrmw or ptr @ss, i16 [[CONV2]] seq_cst, align 2 // LLVM: [[RET2:%.*]] = or i16 [[RES2]], [[CONV2]] // LLVM: store i16 [[RET2]], ptr @ss, align 2 - ss = __sync_or_and_fetch (&ss, uc); + ss = __sync_or_and_fetch(&ss, uc); // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i // CHECK: [[RES3:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i @@ -944,7 +944,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES3:%.*]] = atomicrmw or ptr @us, i16 [[CONV3]] seq_cst, align 2 // LLVM: [[RET3:%.*]] = or i16 [[RES3]], [[CONV3]] // LLVM: store i16 [[RET3]], ptr @us - us = __sync_or_and_fetch (&us, uc); + us = __sync_or_and_fetch(&us, uc); // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i // CHECK: [[RES4:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i @@ -954,7 +954,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES4:%.*]] = atomicrmw or ptr @si, i32 [[CONV4]] seq_cst, align 4 // LLVM: [[RET4:%.*]] = or i32 [[RES4]], [[CONV4]] // LLVM: store i32 [[RET4]], ptr @si, align 4 - si = __sync_or_and_fetch (&si, uc); + si = __sync_or_and_fetch(&si, uc); // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i // CHECK: [[RES5:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i @@ -964,7 +964,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES5:%.*]] = atomicrmw or ptr @ui, i32 [[CONV5]] seq_cst, align 4 // LLVM: [[RET5:%.*]] = or i32 [[RES5]], [[CONV5]] // LLVM: store i32 [[RET5]], ptr @ui, align 4 - ui = __sync_or_and_fetch (&ui, uc); + ui = __sync_or_and_fetch(&ui, uc); // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i // CHECK: [[RES6:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i @@ -974,7 +974,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES6:%.*]] = atomicrmw or ptr @sll, i64 [[CONV6]] seq_cst, align 8 // LLVM: [[RET6:%.*]] = or i64 [[RES6]], [[CONV6]] // LLVM: store i64 [[RET6]], ptr @sll, align 8 - sll = __sync_or_and_fetch (&sll, uc); + sll = __sync_or_and_fetch(&sll, uc); // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i // CHECK: [[RES7:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i @@ -984,7 +984,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES7:%.*]] = atomicrmw or ptr @ull, i64 [[CONV7]] seq_cst, align 8 // LLVM: [[RET7:%.*]] = or i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 - ull = __sync_or_and_fetch (&ull, uc); + ull = __sync_or_and_fetch(&ull, uc); // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i // CHECK: [[RES0:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i @@ -993,7 +993,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES0:%.*]] = atomicrmw xor ptr @sc, i8 [[VAL0]] seq_cst, align 1 // LLVM: [[RET0:%.*]] = xor i8 [[RES0]], [[VAL0]] // LLVM: store i8 [[RET0]], ptr @sc, align 1 - sc = __sync_xor_and_fetch (&sc, uc); + sc = __sync_xor_and_fetch(&sc, uc); // CHECK: [[RES1:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i // CHECK: [[RET1:%.*]] = cir.binop(xor, [[RES1]], [[VAL1]]) : !u8i @@ -1001,7 +1001,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES1:%.*]] = atomicrmw xor ptr @uc, i8 [[VAL1]] seq_cst, align 1 // LLVM: [[RET1:%.*]] = xor i8 [[RES1]], [[VAL1]] // LLVM: store i8 [[RET1]], ptr @uc, align 1 - uc = __sync_xor_and_fetch (&uc, uc); + uc = __sync_xor_and_fetch(&uc, uc); // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i // CHECK: [[RES2:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i @@ -1011,7 +1011,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES2:%.*]] = atomicrmw xor ptr @ss, i16 [[CONV2]] seq_cst, align 2 // LLVM: [[RET2:%.*]] = xor i16 [[RES2]], [[CONV2]] // LLVM: store i16 [[RET2]], ptr @ss, align 2 - ss = __sync_xor_and_fetch (&ss, uc); + ss = __sync_xor_and_fetch(&ss, uc); // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i // CHECK: [[RES3:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i @@ -1021,7 +1021,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES3:%.*]] = atomicrmw xor ptr @us, i16 [[CONV3]] seq_cst, align 2 // LLVM: [[RET3:%.*]] = xor i16 [[RES3]], [[CONV3]] // LLVM: store i16 [[RET3]], ptr @us - us = __sync_xor_and_fetch (&us, uc); + us = __sync_xor_and_fetch(&us, uc); // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i // CHECK: [[RES4:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i @@ -1031,7 +1031,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES4:%.*]] = atomicrmw xor ptr @si, i32 [[CONV4]] seq_cst, align 4 // LLVM: [[RET4:%.*]] = xor i32 [[RES4]], [[CONV4]] // LLVM: store i32 [[RET4]], ptr @si, align 4 - si = __sync_xor_and_fetch (&si, uc); + si = __sync_xor_and_fetch(&si, uc); // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i // CHECK: [[RES5:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i @@ -1041,7 +1041,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES5:%.*]] = atomicrmw xor ptr @ui, i32 [[CONV5]] seq_cst, align 4 // LLVM: [[RET5:%.*]] = xor i32 [[RES5]], [[CONV5]] // LLVM: store i32 [[RET5]], ptr @ui, align 4 - ui = __sync_xor_and_fetch (&ui, uc); + ui = __sync_xor_and_fetch(&ui, uc); // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i // CHECK: [[RES6:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i @@ -1051,7 +1051,7 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES6:%.*]] = atomicrmw xor ptr @sll, i64 [[CONV6]] seq_cst, align 8 // LLVM: [[RET6:%.*]] = xor i64 [[RES6]], [[CONV6]] // LLVM: store i64 [[RET6]], ptr @sll, align 8 - sll = __sync_xor_and_fetch (&sll, uc); + sll = __sync_xor_and_fetch(&sll, uc); // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i // CHECK: [[RES7:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i @@ -1061,6 +1061,98 @@ extern "C" void test_op_and_fetch (void) // LLVM: [[RES7:%.*]] = atomicrmw xor ptr @ull, i64 [[CONV7]] seq_cst, align 8 // LLVM: [[RET7:%.*]] = xor i64 [[RES7]], [[CONV7]] // LLVM: store i64 [[RET7]], ptr @ull, align 8 - ull = __sync_xor_and_fetch (&ull, uc); + ull = __sync_xor_and_fetch(&ull, uc); + // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i + // CHECK: [[RES0:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i + // CHECK: [[INTERM0:%.*]] = cir.binop(and, [[RES0]], [[VAL0]]) : !s8i + // CHECK: [[RET0:%.*]] = cir.unary(not, [[INTERM0]]) : !s8i, !s8i + // LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES0:%.*]] = atomicrmw nand ptr @sc, i8 [[VAL0]] seq_cst, align 1 + // LLVM: [[INTERM0:%.*]] = and i8 [[RES0]], [[VAL0]] + // LLVM: [[RET0:%.*]] = xor i8 [[INTERM0]], -1 + // LLVM: store i8 [[RET0]], ptr @sc, align 1 + sc = __sync_nand_and_fetch(&sc, uc); + + // CHECK: [[RES1:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i + // CHECK: [[INTERM1:%.*]] = cir.binop(and, [[RES1]], [[VAL1]]) : !u8i + // CHECK: [[RET1:%.*]] = cir.unary(not, [[INTERM1]]) : !u8i, !u8i + // LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[RES1:%.*]] = atomicrmw nand ptr @uc, i8 [[VAL1]] seq_cst, align 1 + // LLVM: [[INTERM1:%.*]] = and i8 [[RES1]], [[VAL1]] + // LLVM: [[RET1:%.*]] = xor i8 [[INTERM1]], -1 + // LLVM: store i8 [[RET1]], ptr @uc, align 1 + uc = __sync_nand_and_fetch(&uc, uc); + + // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i + // CHECK: [[RES2:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i + // CHECK: [[INTERM2:%.*]] = cir.binop(and, [[RES2]], [[VAL2]]) : !s16i + // CHECK: [[RET2:%.*]] = cir.unary(not, [[INTERM2]]) : !s16i, !s16i + // LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16 + // LLVM: [[RES2:%.*]] = atomicrmw nand ptr @ss, i16 [[CONV2]] seq_cst, align 2 + // LLVM: [[INTERM2:%.*]] = and i16 [[RES2]], [[CONV2]] + // LLVM: [[RET2:%.*]] = xor i16 [[INTERM2]], -1 + // LLVM: store i16 [[RET2]], ptr @ss, align 2 + ss = __sync_nand_and_fetch(&ss, uc); + + // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i + // CHECK: [[RES3:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i + // CHECK: [[INTERM3:%.*]] = cir.binop(and, [[RES3]], [[VAL3]]) : !u16i + // CHECK: [[RET3:%.*]] = cir.unary(not, [[INTERM3]]) : !u16i, !u16i + // LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16 + // LLVM: [[RES3:%.*]] = atomicrmw nand ptr @us, i16 [[CONV3]] seq_cst, align 2 + // LLVM: [[INTERM3:%.*]] = and i16 [[RES3]], [[CONV3]] + // LLVM: [[RET3:%.*]] = xor i16 [[INTERM3]], -1 + // LLVM: store i16 [[RET3]], ptr @us, align 2 + us = __sync_nand_and_fetch(&us, uc); + + // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i + // CHECK: [[RES4:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i + // CHECK: [[INTERM4:%.*]] = cir.binop(and, [[RES4]], [[VAL4]]) : !s32i + // CHECK: [[RET4:%.*]] = cir.unary(not, [[INTERM4]]) : !s32i, !s32i + // LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32 + // LLVM: [[RES4:%.*]] = atomicrmw nand ptr @si, i32 [[CONV4]] seq_cst, align 4 + // LLVM: [[INTERM4:%.*]] = and i32 [[RES4]], [[CONV4]] + // LLVM: [[RET4:%.*]] = xor i32 [[INTERM4]], -1 + // LLVM: store i32 [[RET4]], ptr @si, align 4 + si = __sync_nand_and_fetch(&si, uc); + + // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i + // CHECK: [[RES5:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i + // CHECK: [[INTERM5:%.*]] = cir.binop(and, [[RES5]], [[VAL5]]) : !u32i + // CHECK: [[RET5:%.*]] = cir.unary(not, [[INTERM5]]) : !u32i, !u32i + // LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32 + // LLVM: [[RES5:%.*]] = atomicrmw nand ptr @ui, i32 [[CONV5]] seq_cst, align 4 + // LLVM: [[INTERM5:%.*]] = and i32 [[RES5]], [[CONV5]] + // LLVM: [[RET5:%.*]] = xor i32 [[INTERM5]], -1 + // LLVM: store i32 [[RET5]], ptr @ui, align 4 + ui = __sync_nand_and_fetch(&ui, uc); + + // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i + // CHECK: [[RES6:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i + // CHECK: [[INTERM6:%.*]] = cir.binop(and, [[RES6]], [[VAL6]]) : !s64i + // CHECK: [[RET6:%.*]] = cir.unary(not, [[INTERM6]]) : !s64i, !s64i + // LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64 + // LLVM: [[RES6:%.*]] = atomicrmw nand ptr @sll, i64 [[CONV6]] seq_cst, align 8 + // LLVM: [[INTERM6:%.*]] = and i64 [[RES6]], [[CONV6]] + // LLVM: [[RET6:%.*]] = xor i64 [[INTERM6]], -1 + // LLVM: store i64 [[RET6]], ptr @sll, align 8 + sll = __sync_nand_and_fetch(&sll, uc); + + // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i + // CHECK: [[RES7:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i + // CHECK: [[INTERM7:%.*]] = cir.binop(and, [[RES7]], [[VAL7]]) : !u64i + // CHECK: [[RET7:%.*]] = cir.unary(not, [[INTERM7]]) : !u64i, !u64i + // LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1 + // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64 + // LLVM: [[RES7:%.*]] = atomicrmw nand ptr @ull, i64 [[CONV7]] seq_cst, align 8 + // LLVM: [[INTERM7:%.*]] = and i64 [[RES7]], [[CONV7]] + // LLVM: [[RET7:%.*]] = xor i64 [[INTERM7]], -1 + // LLVM: store i64 [[RET7]], ptr @ull, align 8 + ull = __sync_nand_and_fetch(&ull, uc); } From 2df2022c90e00c0eac542eba4078e79306155c7b Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Wed, 19 Feb 2025 22:06:12 +0100 Subject: [PATCH 2276/2301] [CIR][CIRGen][Builtin][Neon] Lower vgetq_lane_bf16, vduph f16 and fb16 (#1372) Lower vgetq_lane_bf16, vduph f16 and fb16 --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 3 ++- .../CodeGen/AArch64/bf16-getset-intrinsics.c | 20 +++++++++++------- .../AArch64/v8.2a-neon-intrinsics-generic.c | 21 +++++++++++-------- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 9f6f94f295be..4db27337ced6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3909,7 +3909,8 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vgetq_lane_bf16: case NEON::BI__builtin_neon_vduph_laneq_bf16: case NEON::BI__builtin_neon_vduph_laneq_f16: { - llvm_unreachable("NEON::BI__builtin_neon_vduph_laneq_f16 NYI"); + return builder.create(getLoc(E->getExprLoc()), Ops[0], + emitScalarExpr(E->getArg(1))); } case NEON::BI__builtin_neon_vcvt_bf16_f32: case NEON::BI__builtin_neon_vcvtq_low_bf16_f32: diff --git a/clang/test/CIR/CodeGen/AArch64/bf16-getset-intrinsics.c b/clang/test/CIR/CodeGen/AArch64/bf16-getset-intrinsics.c index a8f643e82c5f..103a88dbcfc8 100644 --- a/clang/test/CIR/CodeGen/AArch64/bf16-getset-intrinsics.c +++ b/clang/test/CIR/CodeGen/AArch64/bf16-getset-intrinsics.c @@ -140,14 +140,18 @@ bfloat16_t test_vget_lane_bf16(bfloat16x4_t v) { // LLVM: ret bfloat [[VGET_LANE]] } -// CHECK-LABEL: @test_vgetq_lane_bf16( -// CHECK-NEXT: entry: -// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x bfloat> [[V:%.*]], i32 7 -// CHECK-NEXT: ret bfloat [[VGETQ_LANE]] -// -// bfloat16_t test_vgetq_lane_bf16(bfloat16x8_t v) { -// return vgetq_lane_bf16(v, 7); -// } +bfloat16_t test_vgetq_lane_bf16(bfloat16x8_t v) { + return vgetq_lane_bf16(v, 7); + + // CIR-LABEL: vgetq_lane_bf16 + // CIR: [[TMP0:%.*]] = cir.const #cir.int<7> : !s32i + // CIR: [[TMP1:%.*]] = cir.vec.extract {{.*}}[{{.*}} : !s32i] : !cir.vector + + // LLVM-LABEL: test_vgetq_lane_bf16 + // LLVM-SAME: (<8 x bfloat> [[VEC:%.*]]) + // LLVM: [[VGET_LANE:%.*]] = extractelement <8 x bfloat> [[VEC]], i32 7 + // LLVM: ret bfloat [[VGET_LANE]] +} // CHECK-LABEL: @test_vset_lane_bf16( // CHECK-NEXT: entry: diff --git a/clang/test/CIR/CodeGen/AArch64/v8.2a-neon-intrinsics-generic.c b/clang/test/CIR/CodeGen/AArch64/v8.2a-neon-intrinsics-generic.c index 2de4862258a1..e06ac095d39b 100644 --- a/clang/test/CIR/CodeGen/AArch64/v8.2a-neon-intrinsics-generic.c +++ b/clang/test/CIR/CodeGen/AArch64/v8.2a-neon-intrinsics-generic.c @@ -471,15 +471,18 @@ // return vtrn2q_f16(a, b); // } -// CHECK-LABEL: define {{[^@]+}}@test_vduph_laneq_f16 -// CHECK-SAME: (<8 x half> noundef [[VEC:%.*]]) #[[ATTR0]] { -// CHECK-NEXT: entry: -// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x half> [[VEC]], i32 7 -// CHECK-NEXT: ret half [[VGETQ_LANE]] -// -// float16_t test_vduph_laneq_f16(float16x8_t vec) { -// return vduph_laneq_f16(vec, 7); -// } +float16_t test_vduph_laneq_f16(float16x8_t vec) { + return vduph_laneq_f16(vec, 7); + + // CIR-LABEL: vduph_laneq_f16 + // CIR: [[TMP0:%.*]] = cir.const #cir.int<7> : !s32i + // CIR: [[TMP1:%.*]] = cir.vec.extract {{.*}}[{{.*}} : !s32i] : !cir.vector + + // LLVM-LABEL: test_vduph_laneq_f16 + // LLVM-SAME: (<8 x half> [[VEC:%.*]]) + // LLVM: [[VGET_LANE:%.*]] = extractelement <8 x half> [[VEC]], i32 7 + // LLVM: ret half [[VGET_LANE]] +} float16_t test_vduph_lane_f16(float16x4_t vec) { return vduph_lane_f16(vec, 3); From e6165c836a23217e701f4cc9d67ee72c00550137 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Thu, 20 Feb 2025 22:04:47 +0300 Subject: [PATCH 2277/2301] [CIR][CIRGen] Add more support for __cxa_rethrow (#1343) This is the second part of [PR#1290](https://github.com/llvm/clangir/pull/1290), adding initial support for `__cxa_rethrow`. So, the last PR did not support statements that come after the `UnreachableOp` from the `rethrow`, we just ignored them, e.g: ``` struct S { S() {} }; void foo() { int r = 1; try { throw; S s; } catch (...) { ++r; } } ``` This PR fixes this. A few changes: - `rethrow` statements split their block into multiple blocks. - Tests with statements that come after the `rethrow` were added and old ones were modified. Concern: - The `ScopeOp` workaround still exists which I guess was one of the main concerns when we tried in the last PR. As usual, I am open to discussions on this and how to approach it better -:) --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 34 ++++++- clang/test/CIR/CodeGen/throw.cpp | 95 +++++++++++++++++++ 2 files changed, 126 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 4de62d46e5a5..56d447f97525 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2266,9 +2266,37 @@ void CIRGenItaniumCXXABI::emitRethrow(CIRGenFunction &CGF, bool isNoReturn) { auto &builder = CGF.getBuilder(); assert(CGF.currSrcLoc && "expected source location"); auto loc = *CGF.currSrcLoc; - builder.create(loc, mlir::Value{}, mlir::FlatSymbolRefAttr{}, - mlir::FlatSymbolRefAttr{}); - builder.create(loc); + + // The idea here is creating a separate block for the rethrow with an + // `UnreachableOp` as the terminator. So, we branch from the current block + // to the rethrow block and create a block for the remaining operations. + + mlir::Block *currentBlock = builder.getInsertionBlock(); + mlir::Region *region = currentBlock->getParent(); + + if (currentBlock->empty()) { + builder.create(loc, mlir::Value{}, + mlir::FlatSymbolRefAttr{}, + mlir::FlatSymbolRefAttr{}); + builder.create(loc); + } else { + mlir::Block *rethrowBlock = builder.createBlock(region); + builder.create(loc, mlir::Value{}, + mlir::FlatSymbolRefAttr{}, + mlir::FlatSymbolRefAttr{}); + builder.create(loc); + + builder.setInsertionPointToEnd(currentBlock); + builder.create(loc, rethrowBlock); + } + + mlir::Block *remBlock = builder.createBlock(region); + // This will be erased during codegen, it acts as a placeholder for the + // operations to be inserted (if any) + builder.create(loc, /*scopeBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + b.create(loc); + }); } else { llvm_unreachable("NYI"); } diff --git a/clang/test/CIR/CodeGen/throw.cpp b/clang/test/CIR/CodeGen/throw.cpp index 87b34d231767..dcaf2ce11f58 100644 --- a/clang/test/CIR/CodeGen/throw.cpp +++ b/clang/test/CIR/CodeGen/throw.cpp @@ -47,6 +47,8 @@ void refoo1() { // CIR: cir.call exception @_ZN1SC2Ev(%[[V2]]) : (!cir.ptr) -> () // CIR: cir.call exception @__cxa_rethrow() : () -> () // CIR: cir.unreachable +// CIR: ^bb1: // no predecessors +// CIR: cir.yield // CIR: } catch [type #cir.all { // CIR: %[[V3:.*]] = cir.catch_param -> !cir.ptr // CIR: %[[V4:.*]] = cir.load %[[V0]] : !cir.ptr, !s32i @@ -128,6 +130,8 @@ void refoo2() { // CIR: cir.call exception @_ZN1SC2Ev(%[[V5]]) : (!cir.ptr) -> () // CIR: cir.call exception @__cxa_rethrow() : () -> () // CIR: cir.unreachable +// CIR: ^bb1: // no predecessors +// CIR: cir.yield // CIR: } // CIR: cir.yield // CIR: } step { @@ -198,3 +202,94 @@ void refoo2() { // LLVM: store i32 %[[V38]], ptr {{.*}}, align 4 // LLVM: call void @__cxa_end_catch() // LLVM: br label {{.*}} + +void refoo3() { + int r = 1; + try { + throw; + S s; + } catch (...) { + ++r; + } +} + +// CIR-LABEL: @_Z6refoo3v() +// CIR: %[[V0:.*]] = cir.alloca !s32i, !cir.ptr, ["r", init] {alignment = 4 : i64} +// CIR: %[[V1:.*]] = cir.const #cir.int<1> : !s32i +// CIR: cir.store %[[V1]], %[[V0]] : !s32i, !cir.ptr +// CIR: cir.scope { +// CIR: %[[V2:.*]] = cir.alloca !ty_S, !cir.ptr, ["s", init] {alignment = 1 : i64} +// CIR: cir.try { +// CIR: cir.call exception @__cxa_rethrow() : () -> () +// CIR: cir.unreachable +// CIR: ^bb1: // no predecessors +// CIR: cir.call exception @_ZN1SC2Ev(%[[V2]]) : (!cir.ptr) -> () +// CIR: cir.yield +// CIR: } catch [type #cir.all { +// CIR: %[[V3:.*]] = cir.catch_param -> !cir.ptr +// CIR: %[[V4:.*]] = cir.load %[[V0]] : !cir.ptr, !s32i +// CIR: %[[V5:.*]] = cir.unary(inc, %[[V4]]) : !s32i, !s32i +// CIR: cir.store %[[V5]], %[[V0]] : !s32i, !cir.ptr +// CIR: cir.yield +// CIR: }] +// CIR: } +// CIR: cir.return +// CIR: } + +// LLVM: invoke void @__cxa_rethrow() +// LLVM: to label %[[B5:.*]] unwind label %[[B8:.*]] +// LLVM: [[B5]]: +// LLVM: unreachable +// LLVM: [[B6]]: +// LLVM: invoke void @_ZN1SC2Ev(ptr {{.*}}) +// LLVM: to label %[[B7:.*]] unwind label %[[B12:.*]] +// LLVM: [[B7]]: +// LLVM: br label %[[B21:.*]] +// LLVM: [[B8]]: +// LLVM: %[[V9:.*]] = landingpad { ptr, i32 } +// LLVM: catch ptr null +// LLVM: %[[V10:.*]] = extractvalue { ptr, i32 } %[[V9]], 0 +// LLVM: %[[V11:.*]] = extractvalue { ptr, i32 } %[[V9]], 1 +// LLVM: br label %[[B16:.*]] +// LLVM: [[B12]]: +// LLVM: %[[V13:.*]] = landingpad { ptr, i32 } +// LLVM: catch ptr null +// LLVM: %[[V14:.*]] = extractvalue { ptr, i32 } %[[V13]], 0 +// LLVM: %[[V15:.*]] = extractvalue { ptr, i32 } %[[V13]], 1 +// LLVM: br label %[[B16]] +// LLVM: [[B16]]: +// LLVM: %[[V17:.*]] = phi ptr [ %[[V14]], %[[B12]] ], [ %[[V10]], %[[B8]] ] +// LLVM: %[[V18:.*]] = call ptr @__cxa_begin_catch(ptr %[[V17]]) +// LLVM: %[[V19:.*]] = load i32, ptr {{.*}}, align 4 +// LLVM: %[[V20:.*]] = add i32 %[[V19]], 1 +// LLVM: store i32 %[[V20]], ptr {{.*}}, align 4 +// LLVM: call void @__cxa_end_catch() +// LLVM: br label %[[B21]] +// LLVM: [[B21]]: +// LLVM: br label {{.*}} + +void refoo4() { + try { + for (int i = 0; i < 5; i++) { + throw; + throw; + S s; + i++; + } + } catch (...) { + int r = 1; + } +} + +// CIR-LABEL: @_Z6refoo4v +// CIR: cir.call exception @__cxa_rethrow() : () -> () +// CIR-NEXT: unreachable +// CIR: cir.call exception @__cxa_rethrow() : () -> () +// CIR-NEXT: unreachable +// CIR: cir.call exception @_ZN1SC2Ev + +// LLVM: invoke void @__cxa_rethrow +// LLVM: unreachable +// LLVM: invoke void @__cxa_rethrow +// LLVM: unreachable +// LLVM: invoke void @_ZN1SC2Ev From de13703db16e1b0df05da9c7089f57ec99f1f76f Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Fri, 21 Feb 2025 03:18:48 +0800 Subject: [PATCH 2278/2301] [CIR][CIRGen][TBAA] Add CIR_TBAAStructAttr (#1365) This patch introduces `CIR_TBAAStructAttr`, which encodes the type and offset of each field in a struct, although it may lead to some duplication in `CIR`. If we manage `cir::TBAAStructAttr` by adding a new method to `ASTRecordDeclInterface`, it will also introduce duplication between `CIRGen` and LLVM lowering. --- .../clang/CIR/Dialect/IR/CIRTBAAAttrs.td | 75 ++++- clang/lib/CIR/CodeGen/CIRGenTBAA.cpp | 97 ++++++- clang/lib/CIR/CodeGen/CIRGenTBAA.h | 4 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 2 +- clang/test/CIR/CodeGen/tbaa-struct.cpp | 262 ++++++++++++++++++ 5 files changed, 436 insertions(+), 4 deletions(-) create mode 100644 clang/test/CIR/CodeGen/tbaa-struct.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td index 40e2d2d7f1d9..83d3d24c05c8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td @@ -50,9 +50,82 @@ def CIR_TBAATagAttr : CIR_Attr<"TBAATag", "tbaa_tag", [], "TBAAAttr"> { let assemblyFormat = "`<` struct(params) `>`"; } +def CIR_TBAAMemberAttr : CIR_Attr<"TBAAMember", "tbaa_member", []> { + let summary = "Attribute representing a member of a TBAA structured type."; + let parameters = (ins "TBAAAttr":$type_desc, + "int64_t":$offset); + let description = [{ + Define a TBAA struct attribute. + + Example: + ```mlir + !ty_StructS = !cir.struct + #tbaa_scalar = #cir.tbaa_scalar + #tbaa_scalar1 = #cir.tbaa_scalar + #tbaa_struct = #cir.tbaa_struct, <#tbaa_scalar, 4>}> + ``` + + See the following link for more details: + https://llvm.org/docs/LangRef.html#tbaa-metadata + }]; + + let assemblyFormat = "`<` params `>`"; +} + +def CIR_TBAAMemberAttrArray : ArrayRefParameter<"TBAAMemberAttr"> { + let summary = "Array of TBAAMemberAttr attributes."; + let printer = [{ + $_printer << '{'; + llvm::interleaveComma($_self, $_printer, [&](TBAAMemberAttr attr) { + $_printer.printStrippedAttrOrType(attr); + }); + $_printer << '}'; + }]; + let parser = [{ + [&]() -> llvm::FailureOr> { + using Result = llvm::SmallVector; + if ($_parser.parseLBrace()) + return mlir::failure(); + llvm::FailureOr result = mlir::FieldParser::parse($_parser); + if (failed(result)) + return mlir::failure(); + if ($_parser.parseRBrace()) + return mlir::failure(); + return result; + }() + }]; +} + +def CIR_TBAAStructAttr : CIR_Attr<"TBAAStruct", + "tbaa_struct", [], "TBAAAttr"> { + let summary = "Describes a struct type in TBAA"; + + let parameters = (ins StringRefParameter<> : $id, + CIR_TBAAMemberAttrArray:$members); + + let description = [{ + Define a TBAA struct attribute. + + Example: + ```mlir + !ty_StructS = !cir.struct + #tbaa_scalar = #cir.tbaa_scalar + #tbaa_scalar1 = #cir.tbaa_scalar + // CIR_TBAAStructAttr + #tbaa_struct = #cir.tbaa_struct, <#tbaa_scalar, 4>}> + ``` + + See the following link for more details: + https://llvm.org/docs/LangRef.html#tbaa-metadata + }]; + + let assemblyFormat = "`<` struct(params) `>`"; +} + def CIR_AnyTBAAAttr : AnyAttrOf<[ CIR_TBAAAttr, CIR_TBAAOmnipotentChar, - CIR_TBAAScalarAttr, + CIR_TBAAScalarAttr, + CIR_TBAAStructAttr, CIR_TBAATagAttr ]>; diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp index fa8325c654cb..f2d46a30cc53 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp @@ -1,4 +1,5 @@ #include "CIRGenTBAA.h" +#include "CIRGenCXXABI.h" #include "CIRGenTypes.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/MLIRContext.h" @@ -204,7 +205,7 @@ cir::TBAAAttr CIRGenTBAA::getTypeInfo(clang::QualType qty) { // function. if (isValidBaseType(qty)) { assert(!cir::MissingFeatures::tbaaTagForStruct()); - return tbaa_NYI(mlirContext); + return getValidBaseTypeInfo(qty); } const clang::Type *ty = astContext.getCanonicalType(qty).getTypePtr(); @@ -248,9 +249,101 @@ mlir::ArrayAttr CIRGenTBAA::getTBAAStructInfo(clang::QualType qty) { } cir::TBAAAttr CIRGenTBAA::getBaseTypeInfo(clang::QualType qty) { - return tbaa_NYI(mlirContext); + return isValidBaseType(qty) ? getValidBaseTypeInfo(qty) : nullptr; +} + +cir::TBAAAttr CIRGenTBAA::getValidBaseTypeInfo(clang::QualType qty) { + assert(isValidBaseType(qty) && "Must be a valid base type"); + + const clang::Type *ty = astContext.getCanonicalType(qty).getTypePtr(); + + // nullptr is a valid value in the cache, so use find rather than [] + auto iter = baseTypeMetadataCache.find(ty); + if (iter != baseTypeMetadataCache.end()) + return iter->second; + + // First calculate the metadata, before recomputinyg the insertion point, as + // the helper can recursively call us. + auto typeNode = getBaseTypeInfoHelper(ty); + LLVM_ATTRIBUTE_UNUSED auto inserted = + baseTypeMetadataCache.insert({ty, typeNode}); + assert(inserted.second && "BaseType metadata was already inserted"); + + return typeNode; } +cir::TBAAAttr CIRGenTBAA::getBaseTypeInfoHelper(const clang::Type *ty) { + using namespace clang; + if (auto *tty = mlir::dyn_cast(ty)) { + const clang::RecordDecl *rd = tty->getDecl()->getDefinition(); + const ASTRecordLayout &layout = astContext.getASTRecordLayout(rd); + SmallVector fields; + if (const CXXRecordDecl *cxxrd = dyn_cast(rd)) { + // Handle C++ base classes. Non-virtual bases can treated a kind of + // field. Virtual bases are more complex and omitted, but avoid an + // incomplete view for NewStructPathTBAA. + if (codeGenOpts.NewStructPathTBAA && cxxrd->getNumVBases() != 0) + return nullptr; + for (const CXXBaseSpecifier &cxxBaseSpecifier : cxxrd->bases()) { + if (cxxBaseSpecifier.isVirtual()) + continue; + QualType baseQTy = cxxBaseSpecifier.getType(); + const CXXRecordDecl *baseRD = baseQTy->getAsCXXRecordDecl(); + if (baseRD->isEmpty()) + continue; + auto typeNode = isValidBaseType(baseQTy) ? getValidBaseTypeInfo(baseQTy) + : getTypeInfo(baseQTy); + if (!typeNode) + return nullptr; + uint64_t offset = layout.getBaseClassOffset(baseRD).getQuantity(); + [[maybe_unused]] uint64_t size = + astContext.getASTRecordLayout(baseRD).getDataSize().getQuantity(); + fields.push_back( + cir::TBAAMemberAttr::get(mlirContext, typeNode, offset)); + } + // The order in which base class subobjects are allocated is + // unspecified, so may differ from declaration order. In particular, + // Itanium ABI will allocate a primary base first. Since we exclude + // empty subobjects, the objects are not overlapping and their offsets + // are unique. + llvm::sort(fields, [](const cir::TBAAMemberAttr &lhs, + const cir::TBAAMemberAttr &rhs) { + return lhs.getOffset() < rhs.getOffset(); + }); + } + for (FieldDecl *field : rd->fields()) { + if (field->isZeroSize(astContext) || field->isUnnamedBitField()) + continue; + QualType fieldQTy = field->getType(); + auto typeNode = isValidBaseType(fieldQTy) ? getValidBaseTypeInfo(fieldQTy) + : getTypeInfo(fieldQTy); + if (!typeNode) + return nullptr; + + uint64_t bitOffset = layout.getFieldOffset(field->getFieldIndex()); + uint64_t offset = astContext.toCharUnitsFromBits(bitOffset).getQuantity(); + [[maybe_unused]] uint64_t size = + astContext.getTypeSizeInChars(fieldQTy).getQuantity(); + fields.push_back(cir::TBAAMemberAttr::get(mlirContext, typeNode, offset)); + } + + SmallString<256> outName; + if (features.CPlusPlus) { + // Don't use the mangler for C code. + llvm::raw_svector_ostream out(outName); + types.getCXXABI().getMangleContext().mangleCanonicalTypeName( + QualType(ty, 0), out); + } else { + outName = rd->getName(); + } + if (codeGenOpts.NewStructPathTBAA) { + assert(!cir::MissingFeatures::tbaaNewStructPath()); + return nullptr; + } + return cir::TBAAStructAttr::get(mlirContext, outName, fields); + } + return nullptr; +} cir::TBAAAttr CIRGenTBAA::getAccessTagInfo(TBAAAccessInfo tbaaInfo) { assert(!tbaaInfo.isIncomplete() && "Access to an object of an incomplete type!"); diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.h b/clang/lib/CIR/CodeGen/CIRGenTBAA.h index 57a2c0fae226..301adf414139 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.h +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.h @@ -105,6 +105,7 @@ class CIRGenTBAA { [[maybe_unused]] const clang::LangOptions &features; llvm::DenseMap metadataCache; + llvm::DenseMap baseTypeMetadataCache; cir::TBAAAttr getChar(); @@ -113,6 +114,9 @@ class CIRGenTBAA { cir::TBAAAttr getTypeInfoHelper(clang::QualType qty); cir::TBAAAttr getScalarTypeInfo(clang::QualType qty); + cir::TBAAAttr getValidBaseTypeInfo(clang::QualType qty); + cir::TBAAAttr getBaseTypeInfoHelper(const clang::Type *ty); + public: CIRGenTBAA(mlir::MLIRContext *mlirContext, clang::ASTContext &astContext, CIRGenTypes &types, mlir::ModuleOp moduleOp, diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 07ec1721c124..a0a7f88a0f90 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -109,7 +109,7 @@ struct CIROpAsmDialectInterface : public OpAsmDialectInterface { } return TypeSwitch(attr) .Case([&](auto attr) { + cir::TBAAStructAttr, cir::TBAATagAttr>([&](auto attr) { os << decltype(attr)::getMnemonic(); return AliasResult::OverridableAlias; }) diff --git a/clang/test/CIR/CodeGen/tbaa-struct.cpp b/clang/test/CIR/CodeGen/tbaa-struct.cpp new file mode 100644 index 000000000000..4b5916a9d3f8 --- /dev/null +++ b/clang/test/CIR/CodeGen/tbaa-struct.cpp @@ -0,0 +1,262 @@ +// This is inspired from clang/test/CodeGen/tbaa.cpp, with both CIR and LLVM checks. +// g13 is not supported due to DiscreteBitFieldABI is NYI. +// see clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp CIRRecordLowering::accumulateBitFields + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// CIR: #tbaa[[NYI:.*]] = #cir.tbaa +// CIR: #tbaa[[CHAR:.*]] = #cir.tbaa_omnipotent_char +// CIR: #tbaa[[INT:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[SHORT:.*]] = #cir.tbaa_scalar +// CIR: #tbaa[[STRUCT_six:.*]] = #cir.tbaa_struct, <#tbaa[[CHAR]], 4>, <#tbaa[[CHAR]], 5>}> +// CIR: #tbaa[[STRUCT_StructA:.*]] = #cir.tbaa_struct, <#tbaa[[INT]], 4>, <#tbaa[[SHORT]], 8>, <#tbaa[[INT]], 12>}> +// CIR: #tbaa[[STRUCT_StructS:.*]] = #cir.tbaa_struct, <#tbaa[[INT]], 4>}> +// CIR: #tbaa[[STRUCT_StructS2:.*]] = #cir.tbaa_struct, <#tbaa[[INT]], 4>}> +// CIR: #tbaa[[TAG_six_b:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[TAG_StructA_f32:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[TAG_StructA_f16:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[TAG_StructS_f32:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[TAG_StructS_f16:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[TAG_StructS2_f32:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[TAG_StructS2_f16:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[STRUCT_StructB:.*]] = #cir.tbaa_struct, <#tbaa[[STRUCT_StructA]], 4>, <#tbaa[[INT]], 20>}> +// CIR: #tbaa[[TAG_StructB_a_f32:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[TAG_StructB_a_f16:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[TAG_StructB_f32:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[TAG_StructB_a_f32_2:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[STRUCT_StructC:.*]] = #cir.tbaa_struct, <#tbaa[[STRUCT_StructB]], 4>, <#tbaa[[INT]], 28>}> +// CIR: #tbaa[[STRUCT_StructD:.*]] = #cir.tbaa_struct, <#tbaa[[STRUCT_StructB]], 4>, <#tbaa[[INT]], 28>, <#tbaa[[CHAR]], 32>}> +// CIR: #tbaa[[TAG_StructC_b_a_f32:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[TAG_StructD_b_a_f32:.*]] = #cir.tbaa_tag + + +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; +typedef struct +{ + uint16_t f16; + uint32_t f32; + uint16_t f16_2; + uint32_t f32_2; +} StructA; +typedef struct +{ + uint16_t f16; + StructA a; + uint32_t f32; +} StructB; +typedef struct +{ + uint16_t f16; + StructB b; + uint32_t f32; +} StructC; +typedef struct +{ + uint16_t f16; + StructB b; + uint32_t f32; + uint8_t f8; +} StructD; + +typedef struct +{ + uint16_t f16; + uint32_t f32; +} StructS; +typedef struct +{ + uint16_t f16; + uint32_t f32; +} StructS2; + +uint32_t g(uint32_t *s, StructA *A, uint64_t count) { + // CIR-LABEL: cir.func @_Z1g + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[INT]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructA_f32]]) + *s = 1; + A->f32 = 4; + return *s; +} + +uint32_t g2(uint32_t *s, StructA *A, uint64_t count) { + // CIR-LABEL: cir.func @_Z2g2 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[INT]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u16i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u16i, !cir.ptr tbaa(#tbaa[[TAG_StructA_f16]]) + *s = 1; + A->f16 = 4; + return *s; +} + +uint32_t g3(StructA *A, StructB *B, uint64_t count) { + // CIR-LABEL: cir.func @_Z2g3 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructA_f32]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructB_a_f32]]) + A->f32 = 1; + B->a.f32 = 4; + return A->f32; +} + +uint32_t g4(StructA *A, StructB *B, uint64_t count) { + // CIR-LABEL: cir.func @_Z2g4 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructA_f32]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u16i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u16i, !cir.ptr tbaa(#tbaa[[TAG_StructB_a_f16]]) + A->f32 = 1; + B->a.f16 = 4; + return A->f32; +} + +uint32_t g5(StructA *A, StructB *B, uint64_t count) { + // CIR-LABEL: cir.func @_Z2g5 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructA_f32]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructB_f32]]) + A->f32 = 1; + B->f32 = 4; + return A->f32; +} + +uint32_t g6(StructA *A, StructB *B, uint64_t count) { + // CIR-LABEL: cir.func @_Z2g6 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructA_f32]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructB_a_f32_2]]) + A->f32 = 1; + B->a.f32_2 = 4; + return A->f32; +} + +uint32_t g7(StructA *A, StructS *S, uint64_t count) { + // CIR-LABEL: cir.func @_Z2g7 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructA_f32]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructS_f32]]) + A->f32 = 1; + S->f32 = 4; + return A->f32; +} + +uint32_t g8(StructA *A, StructS *S, uint64_t count) { + // CIR-LABEL: cir.func @_Z2g8 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructA_f32]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u16i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u16i, !cir.ptr tbaa(#tbaa[[TAG_StructS_f16]]) + A->f32 = 1; + S->f16 = 4; + return A->f32; +} + +uint32_t g9(StructS *S, StructS2 *S2, uint64_t count) { + // CIR-LABEL: cir.func @_Z2g9 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructS_f32]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructS2_f32]]) + S->f32 = 1; + S2->f32 = 4; + return S->f32; +} + +uint32_t g10(StructS *S, StructS2 *S2, uint64_t count) { + // CIR-LABEL: cir.func @_Z3g10 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructS_f32]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u16i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u16i, !cir.ptr tbaa(#tbaa[[TAG_StructS2_f16]]) + S->f32 = 1; + S2->f16 = 4; + return S->f32; +} + +uint32_t g11(StructC *C, StructD *D, uint64_t count) { + // CIR-LABEL: cir.func @_Z3g11 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructC_b_a_f32]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructD_b_a_f32]]) + C->b.a.f32 = 1; + D->b.a.f32 = 4; + return C->b.a.f32; +} + +uint32_t g12(StructC *C, StructD *D, uint64_t count) { + // CIR-LABEL: cir.func @_Z3g12 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructB_a_f32]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructB_a_f32]]) + StructB *b1 = &(C->b); + StructB *b2 = &(D->b); + // b1, b2 have different context. + b1->a.f32 = 1; + b2->a.f32 = 4; + return b1->a.f32; +} + +struct six { + char a; + int :0; + char b; + char c; +}; +char g14(struct six *a, struct six *b) { + // CIR-LABEL: cir.func @_Z3g14 + // CIR: %[[TMP1:.*]] = cir.load %{{.*}} : !cir.ptr>, !cir.ptr + // CIR: %[[TMP2:.*]] = cir.get_member %[[TMP1]][2] {name = "b"} : !cir.ptr -> !cir.ptr + // CIR: %[[TMP3:.*]] = cir.load %[[TMP2]] : !cir.ptr, !s8i tbaa(#tbaa[[TAG_six_b]]) + return a->b; +} + +// Types that differ only by name may alias. +typedef StructS StructS3; +uint32_t g15(StructS *S, StructS3 *S3, uint64_t count) { + // CIR-LABEL: cir.func @_Z3g15 + // CIR: %[[INT_1:.*]] = cir.const #cir.int<1> : !s32i + // CIR: %[[UINT_1:.*]] = cir.cast(integral, %[[INT_1]] : !s32i), !u32i + // CIR: cir.store %[[UINT_1]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructS_f32]]) + // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i + // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i + // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructS_f32]]) + S->f32 = 1; + S3->f32 = 4; + return S->f32; +} From d48d459983bfa79b0c6d833eedd8b9ff7dc87de8 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Thu, 20 Feb 2025 22:23:29 +0300 Subject: [PATCH 2279/2301] [CIR][CodeGen] Fix extra Yieldop case during try-catch generation (#1370) Currently, trying to generate CIR for the following code snippet `yield.cpp` fails. Using `bin/clang++ yield.cpp -Xclang -fclangir -Xclang -emit-cir -S -o -`: ``` struct S { S() {}; int a; }; void foo() { try { S s; } catch (...) { foo(); } } ``` The error: ``` loc("yield.cpp":6:6): error: 'cir.yield' op must be the last operation in the parent block ``` There is an extra YieldOp! The CIR dump before verification looks something like: ``` "cir.scope"() ({ %0 = "cir.alloca"() <{alignment = 4 : i64, allocaType = !cir.struct} #cir.record.decl.ast>, ast = #cir.var.decl.ast, init, name = "s"}> : () -> !cir.ptr} #cir.record.decl.ast>> "cir.try"() <{catch_types = [#cir.all]}> ({ "cir.call"(%0) <{callee = @_ZN1SC1Ev, calling_conv = 1 : i32, exception, extra_attrs = #cir, side_effect = 1 : i32}> ({ "cir.yield"() : () -> () }) : (!cir.ptr} #cir.record.decl.ast>>) -> () "cir.yield"() : () -> () }, { %1 = "cir.catch_param"() : () -> !cir.ptr "cir.call"() <{ast = #cir.call.expr.ast, callee = @_Z3foov, calling_conv = 1 : i32, exception, extra_attrs = #cir, side_effect = 1 : i32}> ({ "cir.yield"() : () -> () "cir.yield"() : () -> () <--- **DUPLICATE** }) : () -> () "cir.yield"() : () -> () }) : () -> () "cir.yield"() : () -> () }, { }) : () -> () ``` This PR adds a check for an already existing terminator before creating a YieldOp during the cleanup. --- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 3 +- clang/test/CIR/CodeGen/try-catch.cpp | 41 +++++++++++++++---- 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 56d447f97525..f7f576bb4751 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -718,7 +718,8 @@ struct CallEndCatch final : EHScopeStack::Cleanup { // here. For CIR, just let it pass since the cleanup is going // to be emitted on a later pass when lowering the catch region. // CGF.EmitRuntimeCallOrTryCall(getEndCatchFn(CGF.CGM)); - CGF.getBuilder().create(*CGF.currSrcLoc); + if (!CGF.getBuilder().getBlock()->mightHaveTerminator()) + CGF.getBuilder().create(*CGF.currSrcLoc); } }; } // namespace diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 8945bc33b739..0bcca60549b9 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -82,26 +82,49 @@ unsigned long long tc3() { return z; } -// CIR: cir.func @_Z3tc4v() +// CHECK: cir.func @_Z3tc4v() unsigned long long tc4() { int x = 50, y = 3; unsigned long long z; - // CIR-NOT: cir.try + // CHECK-NOT: cir.try try { int a = 4; a++; - // CIR: cir.scope { - // CIR: cir.alloca !s32i, !cir.ptr, ["a", init] - // CIR-NOT: cir.alloca !cir.ptr - // CIR: cir.const #cir.int<4> : !s32i - // CIR: cir.unary(inc, - // CIR: cir.store %11, %8 : !s32i, !cir.ptr + // CHECK: cir.scope { + // CHECK: cir.alloca !s32i, !cir.ptr, ["a", init] + // CHECK-NOT: cir.alloca !cir.ptr + // CHECK: cir.const #cir.int<4> : !s32i + // CHECK: cir.unary(inc, + // CHECK: cir.store %11, %8 : !s32i, !cir.ptr } catch (int idx) { z = 98; idx++; } return z; -} \ No newline at end of file +} + +struct S { + S() {}; + int a; +}; + +// CHECK: cir.func @_Z3tc5v() +void tc5() { + try { + S s; + } catch (...) { + tc5(); + } +} + +// CHECK: cir.try { +// CHECK: cir.call exception @_ZN1SC2Ev({{.*}}) : (!cir.ptr) -> () +// CHECK: cir.yield +// CHECK: } catch [type #cir.all { +// CHECK: {{.*}} = cir.catch_param -> !cir.ptr +// CHECK: cir.call exception @_Z3tc5v() : () -> () +// CHECK: cir.yield +// CHECK: }] From 6492b9bf277913a6c3a9f60c0976a6961176a90c Mon Sep 17 00:00:00 2001 From: Letu Ren Date: Fri, 21 Feb 2025 03:24:47 +0800 Subject: [PATCH 2280/2301] [CIR][CIRGen] handle `__builtin_elementwise_exp` (#1376) Original Clang's implementation: https://github.com/llvm/clangir/blob/2df2022c90e00c0eac542eba4078e79306155c7b/clang/lib/CodeGen/CGBuiltin.cpp#L4131-L4133 https://github.com/llvm/clangir/blob/2df2022c90e00c0eac542eba4078e79306155c7b/clang/lib/CodeGen/CGBuiltin.cpp#L762-L776 Compared with non-elementwise exp https://github.com/llvm/clangir/blob/2df2022c90e00c0eac542eba4078e79306155c7b/clang/lib/CodeGen/CGBuiltin.cpp#L3006-L3016 https://github.com/llvm/clangir/blob/2df2022c90e00c0eac542eba4078e79306155c7b/clang/lib/CodeGen/CGBuiltin.cpp#L669-L685 elementwise version doesn't handle constrained situation. I'm not sure whether it is intended. For renaming, it is to match original clang's implementation closely. Resolves: https://github.com/llvm/clangir/issues/1375 --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 48 +++++++++++-------- clang/test/CIR/CodeGen/builtins-elementwise.c | 21 ++++++++ 2 files changed, 50 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 24e929ea4b83..987f20629bea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -63,7 +63,8 @@ static mlir::Value tryUseTestFPKind(CIRGenFunction &CGF, unsigned BuiltinID, } template -static RValue emitUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { +static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &CGF, + const CallExpr &E) { auto Arg = CGF.emitScalarExpr(E.getArg(0)); CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, &E); @@ -75,6 +76,14 @@ static RValue emitUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { return RValue::get(Call->getResult(0)); } +template +static RValue emitUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { + auto Arg = CGF.emitScalarExpr(E.getArg(0)); + auto Call = + CGF.getBuilder().create(Arg.getLoc(), Arg.getType(), Arg); + return RValue::get(Call->getResult(0)); +} + template static RValue emitUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &CGF, const CallExpr &E) { @@ -600,7 +609,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_ceilf16: case Builtin::BI__builtin_ceill: case Builtin::BI__builtin_ceilf128: - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIcopysign: case Builtin::BIcopysignf: @@ -623,7 +632,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_cosl: case Builtin::BI__builtin_cosf128: assert(!cir::MissingFeatures::fastMathFlags()); - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIcosh: case Builtin::BIcoshf: @@ -644,7 +653,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_expl: case Builtin::BI__builtin_expf128: assert(!cir::MissingFeatures::fastMathFlags()); - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIexp2: case Builtin::BIexp2f: @@ -655,7 +664,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_exp2l: case Builtin::BI__builtin_exp2f128: assert(!cir::MissingFeatures::fastMathFlags()); - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BI__builtin_exp10: case Builtin::BI__builtin_exp10f: @@ -672,7 +681,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fabsf16: case Builtin::BI__builtin_fabsl: case Builtin::BI__builtin_fabsf128: - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIfloor: case Builtin::BIfloorf: @@ -682,7 +691,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_floorf16: case Builtin::BI__builtin_floorl: case Builtin::BI__builtin_floorf128: - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIfma: case Builtin::BIfmaf: @@ -745,7 +754,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_logl: case Builtin::BI__builtin_logf128: assert(!cir::MissingFeatures::fastMathFlags()); - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIlog10: case Builtin::BIlog10f: @@ -756,7 +765,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log10l: case Builtin::BI__builtin_log10f128: assert(!cir::MissingFeatures::fastMathFlags()); - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIlog2: case Builtin::BIlog2f: @@ -767,7 +776,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log2l: case Builtin::BI__builtin_log2f128: assert(!cir::MissingFeatures::fastMathFlags()); - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BInearbyint: case Builtin::BInearbyintf: @@ -776,7 +785,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_nearbyintf: case Builtin::BI__builtin_nearbyintl: case Builtin::BI__builtin_nearbyintf128: - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIpow: case Builtin::BIpowf: @@ -800,7 +809,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_rintf16: case Builtin::BI__builtin_rintl: case Builtin::BI__builtin_rintf128: - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIround: case Builtin::BIroundf: @@ -810,7 +819,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_roundf16: case Builtin::BI__builtin_roundl: case Builtin::BI__builtin_roundf128: - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIroundeven: case Builtin::BIroundevenf: @@ -831,7 +840,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sinl: case Builtin::BI__builtin_sinf128: assert(!cir::MissingFeatures::fastMathFlags()); - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIsqrt: case Builtin::BIsqrtf: @@ -842,7 +851,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sqrtl: case Builtin::BI__builtin_sqrtf128: assert(!cir::MissingFeatures::fastMathFlags()); - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BI__builtin_elementwise_sqrt: llvm_unreachable("BI__builtin_elementwise_sqrt NYI"); @@ -875,7 +884,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_truncf16: case Builtin::BI__builtin_truncl: case Builtin::BI__builtin_truncf128: - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); case Builtin::BIlround: case Builtin::BIlroundf: @@ -1344,7 +1353,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, if (mlir::isa(cirTy)) eltTy = mlir::cast(cirTy).getEltType(); if (mlir::isa(eltTy)) { - return emitUnaryFPBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPBuiltin(*this, *E); } llvm_unreachable("unsupported type for BI__builtin_elementwise_abs"); } @@ -1365,8 +1374,9 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_elementwise_atan2 NYI"); case Builtin::BI__builtin_elementwise_ceil: llvm_unreachable("BI__builtin_elementwise_ceil NYI"); - case Builtin::BI__builtin_elementwise_exp: - llvm_unreachable("BI__builtin_elementwise_exp NYI"); + case Builtin::BI__builtin_elementwise_exp: { + return emitUnaryFPBuiltin(*this, *E); + } case Builtin::BI__builtin_elementwise_exp2: llvm_unreachable("BI__builtin_elementwise_exp2 NYI"); case Builtin::BI__builtin_elementwise_log: diff --git a/clang/test/CIR/CodeGen/builtins-elementwise.c b/clang/test/CIR/CodeGen/builtins-elementwise.c index 80e238e0c445..c57c975115e8 100644 --- a/clang/test/CIR/CodeGen/builtins-elementwise.c +++ b/clang/test/CIR/CodeGen/builtins-elementwise.c @@ -57,3 +57,24 @@ void test_builtin_elementwise_acos(float f, double d, vfloat4 vf4, // LLVM: {{%.*}} = call <4 x double> @llvm.acos.v4f64(<4 x double> {{%.*}}) vd4 = __builtin_elementwise_acos(vd4); } + +void test_builtin_elementwise_exp(float f, double d, vfloat4 vf4, + vdouble4 vd4) { + // CIR-LABEL: test_builtin_elementwise_exp + // LLVM-LABEL: test_builtin_elementwise_exp + // CIR: {{%.*}} = cir.exp {{%.*}} : !cir.float + // LLVM: {{%.*}} = call float @llvm.exp.f32(float {{%.*}}) + f = __builtin_elementwise_exp(f); + + // CIR: {{%.*}} = cir.exp {{%.*}} : !cir.double + // LLVM: {{%.*}} = call double @llvm.exp.f64(double {{%.*}}) + d = __builtin_elementwise_exp(d); + + // CIR: {{%.*}} = cir.exp {{%.*}} : !cir.vector + // LLVM: {{%.*}} = call <4 x float> @llvm.exp.v4f32(<4 x float> {{%.*}}) + vf4 = __builtin_elementwise_exp(vf4); + + // CIR: {{%.*}} = cir.exp {{%.*}} : !cir.vector + // LLVM: {{%.*}} = call <4 x double> @llvm.exp.v4f64(<4 x double> {{%.*}}) + vd4 = __builtin_elementwise_exp(vd4); +} From 5f68f6c3f84b95358c338a8636a7e2880db27c71 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Thu, 20 Feb 2025 21:44:40 +0100 Subject: [PATCH 2281/2301] [CIR][CIRGen][Builtin][Neon] Lower vrsrad_n u64 and s64 (#1379) Lower vrsrad_n u64 and s64 --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 16 ++++++- clang/test/CIR/CodeGen/AArch64/neon.c | 44 +++++++++++++------ 2 files changed, 45 insertions(+), 15 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 4db27337ced6..dbd627ffe4c1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3840,7 +3840,21 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vrsrad_n_u64: case NEON::BI__builtin_neon_vrsrad_n_s64: { - llvm_unreachable("NEON::BI__builtin_neon_vrsrad_n_s64 NYI"); + cir::IntType IntType = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64 + ? builder.getUInt64Ty() + : builder.getSInt64Ty(); + Ops[1] = builder.createBitcast(Ops[1], IntType); + Ops.push_back(builder.createNeg(emitScalarExpr(E->getArg(2)))); + + const StringRef Intrinsic = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64 + ? "aarch64.neon.urshl" + : "aarch64.neon.srshl"; + + llvm::SmallVector args = { + Ops[1], builder.createIntCast(Ops[2], IntType)}; + Ops[1] = emitNeonCall(builder, {IntType, IntType}, args, Intrinsic, IntType, + getLoc(E->getExprLoc())); + return builder.createAdd(Ops[0], builder.createBitcast(Ops[1], IntType)); } case NEON::BI__builtin_neon_vshld_n_s64: case NEON::BI__builtin_neon_vshld_n_u64: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 998891bfb998..c14f2ba0c2d1 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -15327,13 +15327,21 @@ uint64x1_t test_vsra_n_u64(uint64x1_t a, uint64x1_t b) { // LLVM: ret <1 x i64> [[TMP4]] } -// NYI-LABEL: @test_vrsrad_n_s64( -// NYI: [[TMP0:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 %b, i64 -63) -// NYI: [[TMP1:%.*]] = add i64 %a, [[TMP0]] -// NYI: ret i64 [[TMP1]] -// int64_t test_vrsrad_n_s64(int64_t a, int64_t b) { -// return (int64_t)vrsrad_n_s64(a, b, 63); -// } +int64_t test_vrsrad_n_s64(int64_t a, int64_t b) { + return (int64_t)vrsrad_n_s64(a, b, 63); + + // CIR-LABEL: vrsrad_n_s64 + // CIR: [[TMP0:%.*]] = cir.const #cir.int<63> : !s32i + // CIR: [[TMP1:%.*]] = cir.unary(minus, [[TMP0]]) : !s32i, !s32i + // CIR: [[TMP2:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !s64i + // CIR: [[TMP3:%.*]] = cir.llvm.intrinsic "aarch64.neon.srshl" {{.*}}, [[TMP2]] : (!s64i, !s64i) -> !s64i + // CIR: [[TMP4:%.*]] = cir.binop(add, {{.*}}, [[TMP3]]) : !s64i + + // LLVM-LABEL: @test_vrsrad_n_s64( + // LLVM: [[TMP0:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 %1, i64 -63) + // LLVM: [[TMP1:%.*]] = add i64 %0, [[TMP0]] + // LLVM: ret i64 [[TMP1]] +} int64x1_t test_vrsra_n_s64(int64x1_t a, int64x1_t b) { return vrsra_n_s64(a, b, 1); @@ -15355,13 +15363,21 @@ int64x1_t test_vrsra_n_s64(int64x1_t a, int64x1_t b) { // LLVM: ret <1 x i64> [[TMP3]] } -// NYI-LABEL: @test_vrsrad_n_u64( -// NYI: [[TMP0:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 %b, i64 -63) -// NYI: [[TMP1:%.*]] = add i64 %a, [[TMP0]] -// NYI: ret i64 [[TMP1]] -// uint64_t test_vrsrad_n_u64(uint64_t a, uint64_t b) { -// return (uint64_t)vrsrad_n_u64(a, b, 63); -// } +uint64_t test_vrsrad_n_u64(uint64_t a, uint64_t b) { + return (uint64_t)vrsrad_n_u64(a, b, 63); + + // CIR-LABEL:vrsrad_n_u64 + // CIR: [[TMP0:%.*]] = cir.const #cir.int<63> : !s32i + // CIR: [[TMP1:%.*]] = cir.unary(minus, [[TMP0]]) : !s32i, !s32i + // CIR: [[TMP2:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u64i + // CIR: [[TMP3:%.*]] = cir.llvm.intrinsic "aarch64.neon.urshl" {{.*}}, [[TMP2]] : (!u64i, !u64i) -> !u64i + // CIR: [[TMP4:%.*]] = cir.binop(add, {{.*}}, [[TMP3]]) : !u64i + + // LLVM-LABEL: @test_vrsrad_n_u64( + // LLVM: [[TMP0:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 %1, i64 -63) + // LLVM: [[TMP1:%.*]] = add i64 %0, [[TMP0]] + // LLVM: ret i64 [[TMP1]] +} uint64x1_t test_vrsra_n_u64(uint64x1_t a, uint64x1_t b) { return vrsra_n_u64(a, b, 1); From a1ab6bf6cd3b83d0982c16f29e8c98958f69c024 Mon Sep 17 00:00:00 2001 From: Andy Kaylor Date: Thu, 20 Feb 2025 16:18:22 -0800 Subject: [PATCH 2282/2301] [CIR] Fix Address element type problems (#1373) There were problems with the pointer type and element type of the Address class getting out of sync. In the traditional codegen the pointer has no type, so it was sufficient for the Address class to simply track the type it was supposed to be pointing to. Since ClangIR pointer values are typed, the Address::withType function wasn't really doing what it was supposed to. It returned an object with the same pointer that the original object had, but with a mismatched element type. This change updates the Address::withType function to perform a bitcast to get the expected pointer type before creating a new Address object. It also adds assertions in the Address class to verify that pointer type and element type are consistent and updates many places that were causing those assertions to fire. These code changes cause extra bitcasts to be emitted in a few places. Regression tests have been updated as needed to reflect the CIR that is now generated. --- clang/lib/CIR/CodeGen/Address.h | 24 ++++++++++++------- clang/lib/CIR/CodeGen/CIRAsm.cpp | 10 ++++---- clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 13 ++++++---- clang/lib/CIR/CodeGen/CIRGenBuilder.cpp | 14 ++++++++++- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 ++-- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenCXX.cpp | 3 +-- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenDecl.cpp | 9 +++---- clang/lib/CIR/CodeGen/CIRGenException.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp | 3 ++- clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp | 14 ++++++----- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 ++-- clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 16 +++++++++---- clang/test/CIR/CodeGen/atomic-thread-fence.c | 10 ++++---- clang/test/CIR/CodeGen/atomic-xchg-field.c | 5 ++-- clang/test/CIR/CodeGen/atomic.cpp | 6 ++--- clang/test/CIR/CodeGen/union-init.c | 6 ++--- 19 files changed, 91 insertions(+), 60 deletions(-) diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h index b88bd4378647..5f82dd6b7eff 100644 --- a/clang/lib/CIR/CodeGen/Address.h +++ b/clang/lib/CIR/CodeGen/Address.h @@ -25,6 +25,9 @@ namespace clang::CIRGen { +// Forward declaration to avoid a circular dependency +class CIRGenBuilderTy; + // Indicates whether a pointer is known not to be null. enum KnownNonNull_t { NotKnownNonNull, KnownNonNull }; @@ -64,6 +67,9 @@ class Address { assert(pointer && "Pointer cannot be null"); assert(elementType && "Element type cannot be null"); assert(!alignment.isZero() && "Alignment cannot be zero"); + + assert(mlir::cast(pointer.getType()).getPointee() == + ElementType); } Address(mlir::Value basePtr, mlir::Type elementType, @@ -104,15 +110,9 @@ class Address { bool hasOffset() const { return bool(offset); } - /// Return address with different element type, but same pointer and - /// alignment. - Address withElementType(mlir::Type ElemTy) const { - if (!hasOffset()) - return Address(getBasePointer(), ElemTy, getAlignment(), - getPointerAuthInfo(), /*Offset=*/nullptr, - isKnownNonNull()); - return Address(getPointer(), ElemTy, getAlignment(), isKnownNonNull()); - } + /// Return address with different element type, a bitcast pointer, and + /// the same alignment. + Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const; mlir::Value getPointer() const { assert(isValid()); @@ -142,11 +142,17 @@ class Address { /// Return the type of the pointer value. cir::PointerType getType() const { + assert(mlir::cast( + PointerAndKnownNonNull.getPointer().getType()) + .getPointee() == ElementType); return mlir::cast(getPointer().getType()); } mlir::Type getElementType() const { assert(isValid()); + assert(mlir::cast( + PointerAndKnownNonNull.getPointer().getType()) + .getPointee() == ElementType); return ElementType; } diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 498311280e20..f7cbe073f4da 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -214,8 +214,9 @@ std::pair CIRGenFunction::emitAsmInputLValue( getTargetHooks().isScalarizableAsmOperand(*this, Ty)) { Ty = cir::IntType::get(&getMLIRContext(), Size, false); - return {builder.createLoad(getLoc(Loc), - InputValue.getAddress().withElementType(Ty)), + return {builder.createLoad( + getLoc(Loc), + InputValue.getAddress().withElementType(builder, Ty)), mlir::Type()}; } } @@ -320,7 +321,7 @@ static void emitAsmStores(CIRGenFunction &CGF, const AsmStmt &S, // ResultTypeRequiresCast.size() elements of RegResults. if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) { unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]); - Address A = Dest.getAddress().withElementType(ResultRegTypes[i]); + Address A = Dest.getAddress().withElementType(Builder, ResultRegTypes[i]); if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) { Builder.createStore(CGF.getLoc(S.getAsmLoc()), Tmp, A); continue; @@ -478,7 +479,8 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &S) { // Otherwise there will be a mis-match if the matrix is also an // input-argument which is represented as vector. if (isa(OutExpr->getType().getCanonicalType())) - DestAddr = DestAddr.withElementType(convertType(OutExpr->getType())); + DestAddr = + DestAddr.withElementType(builder, convertType(OutExpr->getType())); ArgTypes.push_back(DestAddr.getType()); ArgElemTypes.push_back(DestAddr.getElementType()); diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index ddd6c78d20b3..fc9c7c19afb4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -305,7 +305,7 @@ Address AtomicInfo::castToAtomicIntPointer(Address addr) const { if (intTy && intTy.getWidth() == AtomicSizeInBits) return addr; auto ty = CGF.getBuilder().getUIntNTy(AtomicSizeInBits); - return addr.withElementType(ty); + return addr.withElementType(CGF.getBuilder(), ty); } Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const { @@ -1243,8 +1243,9 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *E) { if (RValTy->isVoidType()) return RValue::get(nullptr); - return convertTempToRValue(Dest.withElementType(convertTypeForMem(RValTy)), - RValTy, E->getExprLoc()); + return convertTempToRValue( + Dest.withElementType(builder, convertTypeForMem(RValTy)), RValTy, + E->getExprLoc()); } // The memory order is not known at compile-time. The atomic operations @@ -1321,8 +1322,10 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *E) { if (RValTy->isVoidType()) return RValue::get(nullptr); - return convertTempToRValue(Dest.withElementType(convertTypeForMem(RValTy)), - RValTy, E->getExprLoc()); + + return convertTempToRValue( + Dest.withElementType(builder, convertTypeForMem(RValTy)), RValTy, + E->getExprLoc()); } void CIRGenFunction::emitAtomicStore(RValue rvalue, LValue lvalue, diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp index f4c9506d02d9..f5555ef3bf74 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp @@ -133,4 +133,16 @@ uint64_t CIRGenBuilderTy::computeOffsetFromGlobalViewIndices( } return offset; -} \ No newline at end of file +} + +// This can't be defined in Address.h because that file is included by +// CIRGenBuilder.h +Address Address::withElementType(CIRGenBuilderTy &builder, + mlir::Type ElemTy) const { + if (!hasOffset()) + return Address(builder.createPtrBitcast(getBasePointer(), ElemTy), ElemTy, + getAlignment(), getPointerAuthInfo(), /*Offset=*/nullptr, + isKnownNonNull()); + return Address(builder.createPtrBitcast(getPointer(), ElemTy), ElemTy, + getAlignment(), isKnownNonNull()); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 5a06ddbd0b27..e315630a9e3e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -733,7 +733,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { auto ptrTy = getPointerTo(destType); auto baseAddr = create( loc, ptrTy, addr.getPointer(), mlir::APInt(64, offset), assumeNotNull); - return Address(baseAddr, ptrTy, addr.getAlignment()); + return Address(baseAddr, destType, addr.getAlignment()); } Address createDerivedClassAddr(mlir::Location loc, Address addr, @@ -745,7 +745,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { auto ptrTy = getPointerTo(destType); auto derivedAddr = create( loc, ptrTy, addr.getPointer(), mlir::APInt(64, offset), assumeNotNull); - return Address(derivedAddr, ptrTy, addr.getAlignment()); + return Address(derivedAddr, destType, addr.getAlignment()); } mlir::Value createVTTAddrPoint(mlir::Location loc, mlir::Type retTy, diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index dbd627ffe4c1..f92b9d1bf373 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -4502,7 +4502,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vld1_dup_v: case NEON::BI__builtin_neon_vld1q_dup_v: { - Address ptrAddr = PtrOp0.withElementType(vTy.getEltType()); + Address ptrAddr = PtrOp0.withElementType(builder, vTy.getEltType()); mlir::Value val = builder.createLoad(getLoc(E->getExprLoc()), ptrAddr); cir::VecSplatOp vecSplat = builder.create(getLoc(E->getExprLoc()), vTy, val); diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index 5d401d008dd7..79157921bfe2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -408,8 +408,7 @@ void CIRGenModule::emitCXXGlobalVarDeclInit(const VarDecl *varDecl, builder.setInsertionPointToStart(block); auto getGlobal = builder.createGetGlobal(addr); - Address declAddr(getGlobal, getGlobal.getType(), - getASTContext().getDeclAlign(varDecl)); + Address declAddr(getGlobal, getASTContext().getDeclAlign(varDecl)); assert(performInit && "cannot have constant initializer which needs " "destruction for reference"); RValue rv = cgf.emitReferenceBindingToExpr(init); diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index a1437ec19174..1db346682531 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -1672,7 +1672,7 @@ CIRGenFunction::getAddressOfBaseClass(Address Value, VBase, BaseValueTy, not NullCheckValue); // Cast to the destination type. - Value = Value.withElementType(BaseValueTy); + Value = Value.withElementType(builder, BaseValueTy); return Value; } @@ -1894,7 +1894,7 @@ void CIRGenFunction::emitCXXAggrConstructorCall( builder.create( *currSrcLoc, arrayOp, [&](mlir::OpBuilder &b, mlir::Location loc) { auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc); - Address curAddr = Address(arg, ptrToElmType, eltAlignment); + Address curAddr = Address(arg, elementType, eltAlignment); auto currAVS = AggValueSlot::forAddr( curAddr, type.getQualifiers(), AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 333bbf0e4c95..64e48a42cc25 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -244,11 +244,8 @@ static void emitStoresForConstant(CIRGenModule &CGM, const VarDecl &D, // FIXME(cir): This is closer to memcpy behavior but less optimal, instead of // copy from a global, we just create a cir.const out of it. - if (addr.getElementType() != Ty) { - auto ptr = addr.getPointer(); - ptr = builder.createBitcast(ptr.getLoc(), ptr, builder.getPointerTo(Ty)); - addr = addr.withPointer(ptr, addr.isKnownNonNull()); - } + if (addr.getElementType() != Ty) + addr = addr.withElementType(builder, Ty); auto loc = CGM.getLoc(D.getSourceRange()); builder.createStore(loc, builder.getConstant(loc, constant), addr); @@ -1108,7 +1105,7 @@ void CIRGenFunction::emitArrayDestroy(mlir::Value begin, mlir::Value end, builder.create( *currSrcLoc, begin, [&](mlir::OpBuilder &b, mlir::Location loc) { auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc); - Address curAddr = Address(arg, ptrToElmType, elementAlign); + Address curAddr = Address(arg, cirElementType, elementAlign); if (useEHCleanup) { pushRegularPartialArrayCleanup(arg, arg, elementType, elementAlign, destroyer); diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index b7a10fb4ef96..4ee25d0d38eb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -237,7 +237,7 @@ void CIRGenFunction::emitAnyExprToExn(const Expr *e, Address addr) { // __cxa_allocate_exception returns a void*; we need to cast this // to the appropriate type for the object. auto ty = convertTypeForMem(e->getType()); - Address typedAddr = addr.withElementType(ty); + Address typedAddr = addr.withElementType(builder, ty); // From LLVM's codegen: // FIXME: this isn't quite right! If there's a final unelided call diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index a6121122fe34..3a289c080ad9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -2938,7 +2938,7 @@ mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile, CGM.getABIInfo().getOptimalVectorMemoryType(vTy, getLangOpts()); if (vTy != newVecTy) { - const Address cast = addr.withElementType(newVecTy); + const Address cast = addr.withElementType(builder, newVecTy); mlir::Value v = builder.createLoad(loc, cast, isVolatile); const uint64_t oldNumElements = vTy.getSize(); SmallVector mask(oldNumElements); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 8c37cdea13f8..6485c996349e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -999,7 +999,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { // GCC union extension QualType Ty = E->getSubExpr()->getType(); - Address CastPtr = Dest.getAddress().withElementType(CGF.convertType(Ty)); + Address CastPtr = Dest.getAddress().withElementType(CGF.getBuilder(), + CGF.convertType(Ty)); emitInitializationToLValue(E->getSubExpr(), CGF.makeAddrLValue(CastPtr, Ty)); break; diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 4e3774e0b3b9..1367478df2f1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -379,7 +379,7 @@ static void emitNullBaseClassInitialization(CIRGenFunction &CGF, if (Base->isEmpty()) return; - DestPtr = DestPtr.withElementType(CGF.UInt8Ty); + DestPtr = DestPtr.withElementType(CGF.getBuilder(), CGF.UInt8Ty); const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base); CharUnits NVSize = Layout.getNonVirtualSize(); @@ -1049,8 +1049,7 @@ void CIRGenFunction::emitNewArrayInitializer( if (const ConstantArrayType *CAT = dyn_cast_or_null( AllocType->getAsArrayTypeUnsafe())) { ElementTy = convertTypeForMem(AllocType); - auto CastOp = builder.createPtrBitcast(CurPtr.getPointer(), ElementTy); - CurPtr = Address(CastOp, ElementTy, CurPtr.getAlignment()); + CurPtr = CurPtr.withElementType(builder, ElementTy); InitListElements *= getContext().getConstantArrayElementCount(CAT); } @@ -1095,7 +1094,7 @@ void CIRGenFunction::emitNewArrayInitializer( } // Switch back to initializing one base element at a time. - CurPtr = CurPtr.withElementType(BeginPtr.getElementType()); + CurPtr = CurPtr.withElementType(builder, BeginPtr.getElementType()); } // If all elements have already been initialized, skip any further @@ -1134,7 +1133,7 @@ void CIRGenFunction::emitNewArrayInitializer( if (InitListElements) llvm_unreachable("NYI"); auto arrayType = convertType(CCE->getType()); - CurPtr = CurPtr.withElementType(arrayType); + CurPtr = CurPtr.withElementType(builder, arrayType); emitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE, /*NewPointerIsChecked*/ true, CCE->requiresZeroInitialization()); @@ -1412,7 +1411,10 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *E) { allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign)); } - allocation = Address(RV.getScalarVal(), UInt8Ty, allocationAlign); + auto allocPtr = RV.getScalarVal(); + allocation = Address( + allocPtr, mlir::cast(allocPtr.getType()).getPointee(), + allocationAlign); } // Emit a null check on the allocation result if the allocation diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 836e78c32176..3f6c81408ae5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1606,8 +1606,8 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { mlir::Value DestPtr = CGF.getBuilder().createBitcast( CGF.getLoc(E->getExprLoc()), SourceAddr.getPointer(), DestPtrTy); - Address DestAddr = - SourceAddr.withPointer(DestPtr).withElementType(DestElemTy); + Address DestAddr = Address(DestPtr, DestElemTy, SourceAddr.getAlignment(), + SourceAddr.isKnownNonNull()); LValue DestLVal = CGF.makeAddrLValue(DestAddr, DestTy); DestLVal.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); return emitLoadOfLValue(DestLVal, CE->getExprLoc()); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index f7f576bb4751..b3c0adbe2049 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -2386,8 +2386,10 @@ mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset( loc, Address(VBaseOffsetPtr, CGM.SInt32Ty, CharUnits::fromQuantity(4))); // vbase.offset } else { + auto OffsetPtr = CGF.getBuilder().createBitcast( + VBaseOffsetPtr, CGF.getBuilder().getPointerTo(CGM.PtrDiffTy)); VBaseOffset = CGF.getBuilder().createLoad( - loc, Address(VBaseOffsetPtr, CGM.PtrDiffTy, + loc, Address(OffsetPtr, CGM.PtrDiffTy, CGF.getPointerAlign())); // vbase.offset } return VBaseOffset; @@ -2744,11 +2746,13 @@ Address CIRGenItaniumCXXABI::initializeArrayCookie(CIRGenFunction &CGF, auto OffsetOp = CGF.getBuilder().getSignedInt( Loc, CookieOffset.getQuantity(), /*width=*/32); auto DataPtr = CGF.getBuilder().createPtrStride(Loc, CastOp, OffsetOp); - CookiePtr = Address(DataPtr, NewPtr.getType(), NewPtr.getAlignment()); + CookiePtr = + Address(DataPtr, CGF.getBuilder().getUIntNTy(8), NewPtr.getAlignment()); } // Write the number of elements into the appropriate slot. - Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy); + Address NumElementsPtr = + CookiePtr.withElementType(CGF.getBuilder(), CGF.SizeTy); CGF.getBuilder().createStore(Loc, NumElements, NumElementsPtr); if (CGF.SanOpts.has(SanitizerKind::Address)) @@ -2761,7 +2765,8 @@ Address CIRGenItaniumCXXABI::initializeArrayCookie(CIRGenFunction &CGF, NewPtr.getPointer(), CGF.getBuilder().getUIntNTy(8)); auto OffsetOp = CGF.getBuilder().getSignedInt(Loc, Offset, /*width=*/32); auto DataPtr = CGF.getBuilder().createPtrStride(Loc, CastOp, OffsetOp); - return Address(DataPtr, NewPtr.getType(), NewPtr.getAlignment()); + return Address(DataPtr, CGF.getBuilder().getUIntNTy(8), + NewPtr.getAlignment()); } CharUnits CIRGenARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { @@ -2812,5 +2817,6 @@ Address CIRGenARMCXXABI::initializeArrayCookie(CIRGenFunction &cgf, auto castOp = cgf.getBuilder().createPtrBitcast( newPtr.getPointer(), cgf.getBuilder().getUIntNTy(8)); dataPtr = cgf.getBuilder().createPtrStride(loc, castOp, offsetOp); - return Address(dataPtr, newPtr.getType(), newPtr.getAlignment()); + return Address(dataPtr, cgf.getBuilder().getUIntNTy(8), + newPtr.getAlignment()); } diff --git a/clang/test/CIR/CodeGen/atomic-thread-fence.c b/clang/test/CIR/CodeGen/atomic-thread-fence.c index 4c71c3c83966..1b3199f56165 100644 --- a/clang/test/CIR/CodeGen/atomic-thread-fence.c +++ b/clang/test/CIR/CodeGen/atomic-thread-fence.c @@ -87,10 +87,11 @@ void loadWithThreadFence(DataPtr d) { // CIR: %[[LOAD_DATA:.*]] = cir.load %[[DATA]] : !cir.ptr>, !cir.ptr // CIR: %[[DATA_VALUE:.*]] = cir.get_member %[[LOAD_DATA]][1] {name = "ptr"} : !cir.ptr -> !cir.ptr> // CIR: %[[CASTED_DATA_VALUE:.*]] = cir.cast(bitcast, %[[DATA_VALUE]] : !cir.ptr>), !cir.ptr -// CIR: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %[[CASTED_DATA_VALUE]] : !cir.ptr, !u64i // CIR: %[[CASTED_ATOMIC_TEMP:.*]] = cir.cast(bitcast, %[[ATOMIC_TEMP]] : !cir.ptr>), !cir.ptr +// CIR: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %[[CASTED_DATA_VALUE]] : !cir.ptr, !u64i // CIR: cir.store %[[ATOMIC_LOAD]], %[[CASTED_ATOMIC_TEMP]] : !u64i, !cir.ptr -// CIR: %[[ATOMIC_LOAD_PTR:.*]] = cir.load %[[ATOMIC_TEMP]] : !cir.ptr>, !cir.ptr +// CIR: %[[DOUBLE_CASTED_ATOMIC_TEMP:.*]] = cir.cast(bitcast, %[[CASTED_ATOMIC_TEMP]] : !cir.ptr), !cir.ptr> +// CIR: %[[ATOMIC_LOAD_PTR:.*]] = cir.load %[[DOUBLE_CASTED_ATOMIC_TEMP]] : !cir.ptr>, !cir.ptr // CIR: cir.return // LLVM-LABEL: @loadWithThreadFence @@ -115,10 +116,11 @@ void loadWithSignalFence(DataPtr d) { // CIR: %[[LOAD_DATA:.*]] = cir.load %[[DATA]] : !cir.ptr>, !cir.ptr // CIR: %[[DATA_PTR:.*]] = cir.get_member %[[LOAD_DATA]][1] {name = "ptr"} : !cir.ptr -> !cir.ptr> // CIR: %[[CASTED_DATA_PTR:.*]] = cir.cast(bitcast, %[[DATA_PTR]] : !cir.ptr>), !cir.ptr -// CIR: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %[[CASTED_DATA_PTR]] : !cir.ptr, !u64i // CIR: %[[CASTED_ATOMIC_TEMP:.*]] = cir.cast(bitcast, %[[ATOMIC_TEMP]] : !cir.ptr>), !cir.ptr +// CIR: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %[[CASTED_DATA_PTR]] : !cir.ptr, !u64i // CIR: cir.store %[[ATOMIC_LOAD]], %[[CASTED_ATOMIC_TEMP]] : !u64i, !cir.ptr -// CIR: %[[LOAD_ATOMIC_TEMP:.*]] = cir.load %[[ATOMIC_TEMP]] : !cir.ptr>, !cir.ptr +// CIR: %[[DOUBLE_CASTED_ATOMIC_TEMP:.*]] = cir.cast(bitcast, %[[CASTED_ATOMIC_TEMP]] : !cir.ptr), !cir.ptr> +// CIR: %[[LOAD_ATOMIC_TEMP:.*]] = cir.load %[[DOUBLE_CASTED_ATOMIC_TEMP]] : !cir.ptr>, !cir.ptr // CIR: cir.return // LLVM-LABEL: @loadWithSignalFence diff --git a/clang/test/CIR/CodeGen/atomic-xchg-field.c b/clang/test/CIR/CodeGen/atomic-xchg-field.c index 59b36ba183bb..d2d0a6be7410 100644 --- a/clang/test/CIR/CodeGen/atomic-xchg-field.c +++ b/clang/test/CIR/CodeGen/atomic-xchg-field.c @@ -27,7 +27,8 @@ void field_access(wPtr item) { // CHECK-NEXT: %[[WADDR:.*]] = cir.alloca !cir.ptr, {{.*}} {alignment = 8 : i64} // CHECK: %[[FIELD:.*]] = cir.load %[[WADDR]] // CHECK: %[[MEMBER:.*]] = cir.get_member %[[FIELD]][1] {name = "ref"} -// CHECK: cir.atomic.xchg(%[[MEMBER]] : !cir.ptr>, {{.*}} : !u64i, seq_cst) +// CHECK: %[[CASTED_MEMBER:.*]] = cir.cast(bitcast, %[[MEMBER]] : !cir.ptr>), !cir.ptr +// CHECK: cir.atomic.xchg(%[[CASTED_MEMBER]] : !cir.ptr, {{.*}} : !u64i, seq_cst) // LLVM-LABEL: @field_access // LLVM: = alloca ptr, i64 1, align 8 @@ -77,8 +78,8 @@ void structLoad(unsigned referenceCount, wPtr item) { // CHECK-LABEL: @structLoad // CHECK: %[[ATOMIC_TEMP:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["atomic-temp"] -// CHECK: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %6 : !cir.ptr, !u64i // CHECK: %[[RES:.*]] = cir.cast(bitcast, %[[ATOMIC_TEMP]] : !cir.ptr>), !cir.ptr +// CHECK: %[[ATOMIC_LOAD:.*]] = cir.load atomic(seq_cst) %6 : !cir.ptr, !u64i // CHECK: cir.store %[[ATOMIC_LOAD]], %[[RES]] : !u64i, !cir.ptr // No LLVM tests needed for this one, already covered elsewhere. diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index 54244c6d4f74..d04c767591a4 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -239,7 +239,7 @@ void fd3(struct S *a, struct S *b, struct S *c) { } // CHECK-LABEL: @_Z3fd3P1SS0_S0_ -// CHECK: cir.atomic.xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, seq_cst) : !u64i +// CHECK: cir.atomic.xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, seq_cst) : !u64i // FIXME: CIR is producing an over alignment of 8, only 4 needed. // LLVM-LABEL: @_Z3fd3P1SS0_S0_ @@ -261,7 +261,7 @@ bool fd4(struct S *a, struct S *b, struct S *c) { } // CHECK-LABEL: @_Z3fd4P1SS0_S0_ -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) align(8) weak : (!u64i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) align(8) weak : (!u64i, !cir.bool) // LLVM-LABEL: @_Z3fd4P1SS0_S0_ // LLVM: cmpxchg weak ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 @@ -328,7 +328,7 @@ bool fsb(bool *c) { } // CHECK-LABEL: @_Z3fsbPb -// CHECK: cir.atomic.xchg({{.*}} : !cir.ptr, {{.*}} : !u8i, seq_cst) : !u8i +// CHECK: cir.atomic.xchg({{.*}} : !cir.ptr, {{.*}} : !u8i, seq_cst) : !u8i // LLVM-LABEL: @_Z3fsbPb // LLVM: atomicrmw xchg ptr {{.*}}, i8 {{.*}} seq_cst, align 1 diff --git a/clang/test/CIR/CodeGen/union-init.c b/clang/test/CIR/CodeGen/union-init.c index 122999de23c2..a8d3321142f3 100644 --- a/clang/test/CIR/CodeGen/union-init.c +++ b/clang/test/CIR/CodeGen/union-init.c @@ -54,9 +54,9 @@ typedef union { // CHECK: %[[VAL_0:.*]] = cir.alloca !s32i, !cir.ptr, ["x", init] {alignment = 4 : i64} // CHECK: %[[VAL_1:.*]] = cir.alloca !ty_U, !cir.ptr, ["u", init] {alignment = 4 : i64} // CHECK: cir.store %arg0, %[[VAL_0]] : !s32i, !cir.ptr -// CHECK: %[[VAL_2:.*]] = cir.load %[[VAL_0]] : !cir.ptr, !s32i -// CHECK: %[[VAL_3:.*]] = cir.cast(bitcast, %[[VAL_1]] : !cir.ptr), !cir.ptr -// CHECK: cir.store %[[VAL_2]], %[[VAL_3]] : !s32i, !cir.ptr +// CHECK: %[[VAL_2:.*]] = cir.cast(bitcast, %[[VAL_1]] : !cir.ptr), !cir.ptr +// CHECK: %[[VAL_3:.*]] = cir.load %[[VAL_0]] : !cir.ptr, !s32i +// CHECK: cir.store %[[VAL_3]], %[[VAL_2]] : !s32i, !cir.ptr void union_cast(int x) { U u = (U) x; From cc67bf76587408d578a0e23ef275433f82ab5bd5 Mon Sep 17 00:00:00 2001 From: AdUhTkJm <30948580+AdUhTkJm@users.noreply.github.com> Date: Sat, 22 Feb 2025 01:02:20 +0000 Subject: [PATCH 2283/2301] [CIR][CUDA] Skeleton of NVPTX target lowering info (#1358) Added a skeleton of NVPTX target lowering info. This enables lowering of `simple.cu` (as it hardly tests device side functionalities), so a test of LLVM IR is also added onto it. --- .../Transforms/TargetLowering/CMakeLists.txt | 1 + .../Transforms/TargetLowering/LowerModule.cpp | 2 + .../Transforms/TargetLowering/TargetInfo.h | 3 + .../TargetLowering/Targets/NVPTX.cpp | 71 +++++++++++++++++++ clang/test/CIR/CodeGen/CUDA/simple.cu | 17 +++++ 5 files changed, 94 insertions(+) create mode 100644 clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/NVPTX.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt index 21bfa30a111a..dab8dbbe5611 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CMakeLists.txt @@ -13,6 +13,7 @@ add_clang_library(TargetLowering TargetInfo.cpp TargetLoweringInfo.cpp Targets/AArch64.cpp + Targets/NVPTX.cpp Targets/SPIR.cpp Targets/X86.cpp Targets/LoweringPrepareAArch64CXXABI.cpp diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index 278091070763..e979e813336f 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -81,6 +81,8 @@ createTargetLoweringInfo(LowerModule &LM) { } case llvm::Triple::spirv64: return createSPIRVTargetLoweringInfo(LM); + case llvm::Triple::nvptx64: + return createNVPTXTargetLoweringInfo(LM); default: cir_cconv_unreachable("ABI NYI"); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h index 8184c4f0afc2..a03cf711babc 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/TargetInfo.h @@ -30,6 +30,9 @@ createAArch64TargetLoweringInfo(LowerModule &CGM, cir::AArch64ABIKind AVXLevel); std::unique_ptr createSPIRVTargetLoweringInfo(LowerModule &CGM); +std::unique_ptr +createNVPTXTargetLoweringInfo(LowerModule &CGM); + } // namespace cir #endif // LLVM_CLANG_LIB_CIR_DIALECT_TRANSFORMS_TARGETLOWERING_TARGETINFO_H diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/NVPTX.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/NVPTX.cpp new file mode 100644 index 000000000000..64c13331d9ba --- /dev/null +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/NVPTX.cpp @@ -0,0 +1,71 @@ +//===- NVPTX.cpp - TargetInfo for NVPTX -----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "ABIInfoImpl.h" +#include "LowerFunctionInfo.h" +#include "LowerTypes.h" +#include "TargetInfo.h" +#include "TargetLoweringInfo.h" +#include "clang/CIR/ABIArgInfo.h" +#include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/ErrorHandling.h" + +using ABIArgInfo = cir::ABIArgInfo; +using MissingFeature = cir::MissingFeatures; + +namespace cir { + +//===----------------------------------------------------------------------===// +// NVPTX ABI Implementation +//===----------------------------------------------------------------------===// + +namespace { + +class NVPTXABIInfo : public ABIInfo { +public: + NVPTXABIInfo(LowerTypes <) : ABIInfo(lt) {} + +private: + void computeInfo(LowerFunctionInfo &fi) const override { + llvm_unreachable("NYI"); + } +}; + +class NVPTXTargetLoweringInfo : public TargetLoweringInfo { +public: + NVPTXTargetLoweringInfo(LowerTypes <) + : TargetLoweringInfo(std::make_unique(lt)) {} + + unsigned getTargetAddrSpaceFromCIRAddrSpace( + cir::AddressSpaceAttr addressSpaceAttr) const override { + using Kind = cir::AddressSpaceAttr::Kind; + switch (addressSpaceAttr.getValue()) { + case Kind::offload_private: + return 0; + case Kind::offload_local: + return 3; + case Kind::offload_global: + return 1; + case Kind::offload_constant: + return 2; + case Kind::offload_generic: + return 4; + default: + cir_cconv_unreachable("Unknown CIR address space for this target"); + } + } +}; + +} // namespace + +std::unique_ptr +createNVPTXTargetLoweringInfo(LowerModule &lowerModule) { + return std::make_unique(lowerModule.getTypes()); +} + +} // namespace cir diff --git a/clang/test/CIR/CodeGen/CUDA/simple.cu b/clang/test/CIR/CodeGen/CUDA/simple.cu index 51a1d3bb2f4b..905ad8048b05 100644 --- a/clang/test/CIR/CodeGen/CUDA/simple.cu +++ b/clang/test/CIR/CodeGen/CUDA/simple.cu @@ -32,6 +32,10 @@ __global__ void global_fn(int a) {} // CIR-HOST: cir.get_global @_Z24__device_stub__global_fni // CIR-HOST: cir.call @cudaLaunchKernel +// COM: LLVM-HOST: void @_Z24__device_stub__global_fni +// COM: LLVM-HOST: call i32 @__cudaPopCallConfiguration +// COM: LLVM-HOST: call i32 @cudaLaunchKernel(ptr @_Z24__device_stub__global_fni + int main() { global_fn<<<1, 1>>>(1); } @@ -46,3 +50,16 @@ int main() { // CIR-HOST: [[Arg:%[0-9]+]] = cir.const #cir.int<1> // CIR-HOST: cir.call @_Z24__device_stub__global_fni([[Arg]]) // CIR-HOST: } + +// COM: LLVM-HOST: define dso_local i32 @main +// COM: LLVM-HOST: alloca %struct.dim3 +// COM: LLVM-HOST: alloca %struct.dim3 +// COM: LLVM-HOST: call void @_ZN4dim3C1Ejjj +// COM: LLVM-HOST: call void @_ZN4dim3C1Ejjj +// COM: LLVM-HOST: [[LLVMConfigOK:%[0-9]+]] = call i32 @__cudaPushCallConfiguration +// COM: LLVM-HOST: br [[LLVMConfigOK]], label %[[Good:[0-9]+]], label [[Bad:[0-9]+]] +// COM: LLVM-HOST: [[Good]]: +// COM: LLVM-HOST: call void @_Z24__device_stub__global_fni +// COM: LLVM-HOST: br label [[Bad]] +// COM: LLVM-HOST: [[Bad]]: +// COM: LLVM-HOST: ret i32 From dda69c825fc44bcde74a89095ca32e4540b6a152 Mon Sep 17 00:00:00 2001 From: AdUhTkJm <30948580+AdUhTkJm@users.noreply.github.com> Date: Sat, 22 Feb 2025 01:03:20 +0000 Subject: [PATCH 2284/2301] [CIR][CUDA] Add attribute for CUDA fat binary name (#1377) This is a preparation of generating registration functions in LoweringPrepare. CUDA compilation works as follows (irrelevant arguments omitted): ```sh # First compile for device, generating PTX assembly clang++ test.cu -fcuda-is-device -o device.s # Convert that into a binary file ptxas device.s --output-file device.o fatbin --create device.fatbin --image=profile=sm_52,file=device.o # Pass that file as an argument to host clang++ test.cu -fcuda-include-gpubinary device.fatbin -cuid="some unique id" ``` And from the name of GPU binary, we can obtain a handle for registration. So we add an attribute to ModuleOp, recording that name. If that `-fcuda-include-gpubinary` is not specified (like in the test `simple.cu`), OG will not generate any registration function. We do the same here by not generating the attribute. --- .../clang/CIR/Dialect/IR/CIRCUDAAttrs.td | 19 ++++++++++++++++++- .../clang/CIR/Dialect/IR/CIRDialect.td | 1 + clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp | 2 -- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 11 +++++++++++ clang/test/CIR/CodeGen/CUDA/registration.cu | 9 +++++++++ clang/test/CIR/CodeGen/CUDA/simple.cu | 2 +- clang/test/CIR/CodeGen/HIP/simple.cpp | 2 +- 7 files changed, 41 insertions(+), 5 deletions(-) create mode 100644 clang/test/CIR/CodeGen/CUDA/registration.cu diff --git a/clang/include/clang/CIR/Dialect/IR/CIRCUDAAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRCUDAAttrs.td index fd74fe2d349e..e658bb49e815 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRCUDAAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRCUDAAttrs.td @@ -18,7 +18,7 @@ //===----------------------------------------------------------------------===// def CUDAKernelNameAttr : CIR_Attr<"CUDAKernelName", - "cuda_kernel_name"> { + "cu.kernel_name"> { let summary = "Device-side function name for this stub."; let description = [{ @@ -35,4 +35,21 @@ def CUDAKernelNameAttr : CIR_Attr<"CUDAKernelName", let assemblyFormat = "`<` $kernel_name `>`"; } +def CUDABinaryHandleAttr : CIR_Attr<"CUDABinaryHandle", + "cu.binary_handle"> { + let summary = "Fat binary handle for device code."; + let description = + [{ + This attribute is attached to the ModuleOp and records the binary file + name passed to host. + + CUDA first compiles device-side code into a fat binary file. The file + name is then passed into host-side code, which is used to create a handle + and then generate various registration functions. + }]; + + let parameters = (ins "std::string":$name); + let assemblyFormat = "`<` $name `>`"; +} + #endif // MLIR_CIR_DIALECT_CIR_CUDA_ATTRS diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td index 46d2f1a13273..c1ea26919c8e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -45,6 +45,7 @@ def CIR_Dialect : Dialect { static llvm::StringRef getGlobalAnnotationsAttrName() { return "cir.global_annotations"; } static llvm::StringRef getOpenCLVersionAttrName() { return "cir.cl.version"; } + static llvm::StringRef getCUDABinaryHandleAttrName() { return "cir.cu.binary_handle"; } void registerAttributes(); void registerTypes(); diff --git a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp index 30697d50bf2b..c47663772aa1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCUDARuntime.cpp @@ -19,7 +19,6 @@ #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/Casting.h" #include "llvm/Support/raw_ostream.h" -#include using namespace clang; using namespace clang::CIRGen; @@ -91,7 +90,6 @@ void CIRGenCUDARuntime::emitDeviceStubBodyNew(CIRGenFunction &cgf, llvm_unreachable("NYI"); std::string launchAPI = addPrefixToName("LaunchKernel"); - std::cout << "LaunchAPI is " << launchAPI << "\n"; const IdentifierInfo &launchII = cgm.getASTContext().Idents.get(launchAPI); FunctionDecl *launchFD = nullptr; for (auto *result : dc->lookup(&launchII)) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 79a9d7875423..3c14885dc1c3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -215,6 +215,17 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, /*line=*/0, /*col=*/0)); } + + // Set CUDA GPU binary handle. + if (langOpts.CUDA) { + std::string cudaBinaryName = codeGenOpts.CudaGpuBinaryFileName; + if (!cudaBinaryName.empty()) { + theModule->setAttr( + cir::CIRDialect::getCUDABinaryHandleAttrName(), + cir::CUDABinaryHandleAttr::get(&mlirContext, cudaBinaryName)); + } + } + if (langOpts.Sanitize.has(SanitizerKind::Thread) || (!codeGenOpts.RelaxedAliasing && codeGenOpts.OptimizationLevel > 0)) { tbaa.reset(new CIRGenTBAA(&mlirContext, astContext, genTypes, theModule, diff --git a/clang/test/CIR/CodeGen/CUDA/registration.cu b/clang/test/CIR/CodeGen/CUDA/registration.cu new file mode 100644 index 000000000000..2c04731bea62 --- /dev/null +++ b/clang/test/CIR/CodeGen/CUDA/registration.cu @@ -0,0 +1,9 @@ +#include "../Inputs/cuda.h" + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir \ +// RUN: -x cuda -emit-cir -target-sdk-version=12.3 \ +// RUN: -fcuda-include-gpubinary fatbin.o\ +// RUN: %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR-HOST --input-file=%t.cir %s + +// CIR-HOST: module @"{{.*}}" attributes{{.*}}cir.cu.binary_handle = #cir.cu.binary_handle{{.*}} diff --git a/clang/test/CIR/CodeGen/CUDA/simple.cu b/clang/test/CIR/CodeGen/CUDA/simple.cu index 905ad8048b05..023089c1eb2d 100644 --- a/clang/test/CIR/CodeGen/CUDA/simple.cu +++ b/clang/test/CIR/CodeGen/CUDA/simple.cu @@ -11,7 +11,7 @@ // RUN: FileCheck --check-prefix=CIR-DEVICE --input-file=%t.cir %s // Attribute for global_fn -// CIR-HOST: [[Kernel:#[a-zA-Z_0-9]+]] = {{.*}}#cir.cuda_kernel_name<_Z9global_fni>{{.*}} +// CIR-HOST: [[Kernel:#[a-zA-Z_0-9]+]] = {{.*}}#cir.cu.kernel_name<_Z9global_fni>{{.*}} __host__ void host_fn(int *a, int *b, int *c) {} // CIR-HOST: cir.func @_Z7host_fnPiS_S_ diff --git a/clang/test/CIR/CodeGen/HIP/simple.cpp b/clang/test/CIR/CodeGen/HIP/simple.cpp index f04dd27e0411..d4db01aa23b8 100644 --- a/clang/test/CIR/CodeGen/HIP/simple.cpp +++ b/clang/test/CIR/CodeGen/HIP/simple.cpp @@ -11,7 +11,7 @@ // RUN: FileCheck --check-prefix=CIR-DEVICE --input-file=%t.cir %s // Attribute for global_fn -// CIR-HOST: [[Kernel:#[a-zA-Z_0-9]+]] = {{.*}}#cir.cuda_kernel_name<_Z9global_fni>{{.*}} +// CIR-HOST: [[Kernel:#[a-zA-Z_0-9]+]] = {{.*}}#cir.cu.kernel_name<_Z9global_fni>{{.*}} __host__ void host_fn(int *a, int *b, int *c) {} From 37fa92a89c2a51cd78c0cb85604cd5bee5fce031 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Sat, 22 Feb 2025 02:04:20 +0100 Subject: [PATCH 2285/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vrshrd_n for s64 and u64 (#1383) Lower neon_vrshrd_n for s64 and u64 --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 16 +++++++++- clang/test/CIR/CodeGen/AArch64/neon.c | 32 ++++++++++++------- 2 files changed, 35 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index f92b9d1bf373..392dcf7688aa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3836,7 +3836,21 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vrshrd_n_u64: case NEON::BI__builtin_neon_vrshrd_n_s64: { - llvm_unreachable("NEON::BI__builtin_neon_vrshrd_n_s64 NYI"); + cir::IntType IntType = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64 + ? builder.getUInt64Ty() + : builder.getSInt64Ty(); + + const StringRef Intrinsic = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64 + ? "aarch64.neon.urshl" + : "aarch64.neon.srshl"; + Ops.push_back(emitScalarExpr(E->getArg(1))); + std::optional APSInt = + E->getArg(1)->getIntegerConstantExpr(getContext()); + assert(APSInt && "Expected argument to be a constant"); + int64_t SV = -APSInt->getSExtValue(); + Ops[1] = builder.getSInt64(SV, getLoc(E->getExprLoc())); + return emitNeonCall(builder, {IntType, builder.getSInt64Ty()}, Ops, + Intrinsic, IntType, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrsrad_n_u64: case NEON::BI__builtin_neon_vrsrad_n_s64: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index c14f2ba0c2d1..405392ff0294 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -15209,12 +15209,16 @@ uint64_t test_vshrd_n_u64_3(uint64_t a) { // LLVM: ret i64 [[SHRD_N]] } -// NYI-LABEL: @test_vrshrd_n_s64( -// NYI: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 %a, i64 -63) -// NYI: ret i64 [[VRSHR_N]] -// int64_t test_vrshrd_n_s64(int64_t a) { -// return (int64_t)vrshrd_n_s64(a, 63); -// } +int64_t test_vrshrd_n_s64(int64_t a) { + return (int64_t)vrshrd_n_s64(a, 63); + + // CIR-LABEL: vrshrd_n_s64 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.srshl" {{.*}}, {{.*}} : (!s64i, !s64i) -> !s64i + + // LLVM-LABEL: @test_vrshrd_n_s64( + // LLVM: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 %0, i64 -63) + // LLVM: ret i64 [[VRSHR_N]] +} // NYI-LABEL: @test_vrshr_n_s64( // NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> @@ -15225,12 +15229,16 @@ uint64_t test_vshrd_n_u64_3(uint64_t a) { // return vrshr_n_s64(a, 1); // } -// NYI-LABEL: @test_vrshrd_n_u64( -// NYI: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 %a, i64 -63) -// NYI: ret i64 [[VRSHR_N]] -// uint64_t test_vrshrd_n_u64(uint64_t a) { -// return (uint64_t)vrshrd_n_u64(a, 63); -// } +uint64_t test_vrshrd_n_u64(uint64_t a) { + return (uint64_t)vrshrd_n_u64(a, 63); + + // CIR-LABEL: vrshrd_n_u64 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.urshl" {{.*}}, {{.*}} : (!u64i, !s64i) -> !u64i + + // LLVM-LABEL: @test_vrshrd_n_u64( + // LLVM: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 %0, i64 -63) + // LLVM: ret i64 [[VRSHR_N]] +} // NYI-LABEL: @test_vrshr_n_u64( // NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> From b6796c4a45e22ac29be648bd44fd44a16ee08afd Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Sat, 22 Feb 2025 02:04:44 +0100 Subject: [PATCH 2286/2301] [CIR][CIRGen][Builtin][Neon] Lower builtin_neon_vqshld_n s64 and u64 (#1385) Lower builtin_neon_vqshld_n s64 and u64 --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 12 ++++++- clang/test/CIR/CodeGen/AArch64/neon.c | 32 ++++++++++++------- 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 392dcf7688aa..7f27edca2224 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3832,7 +3832,17 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vqshld_n_u64: case NEON::BI__builtin_neon_vqshld_n_s64: { - llvm_unreachable("NEON::BI__builtin_neon_vqshld_n_s64 NYI"); + cir::IntType IntType = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64 + ? builder.getUInt64Ty() + : builder.getSInt64Ty(); + + const StringRef Intrinsic = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64 + ? "aarch64.neon.uqshl" + : "aarch64.neon.sqshl"; + Ops.push_back(emitScalarExpr(E->getArg(1))); + Ops[1] = builder.createIntCast(Ops[1], IntType); + return emitNeonCall(builder, {IntType, IntType}, Ops, Intrinsic, IntType, + getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrshrd_n_u64: case NEON::BI__builtin_neon_vrshrd_n_s64: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index 405392ff0294..aa6dca77e7df 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -15472,12 +15472,16 @@ uint64_t test_vshld_n_u64(uint64_t a) { // return (int32_t)vqshls_n_s32(a, 31); // } -// NYI-LABEL: @test_vqshld_n_s64( -// NYI: [[VQSHL_N:%.*]] = call i64 @llvm.aarch64.neon.sqshl.i64(i64 %a, i64 63) -// NYI: ret i64 [[VQSHL_N]] -// int64_t test_vqshld_n_s64(int64_t a) { -// return (int64_t)vqshld_n_s64(a, 63); -// } +int64_t test_vqshld_n_s64(int64_t a) { + return (int64_t)vqshld_n_s64(a, 63); + + // CIR-LABEL: vqshld_n_s64 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.sqshl" {{.*}}, {{.*}} : (!s64i, !s64i) -> !s64i + + // LLVM-LABEL: @test_vqshld_n_s64( + // LLVM: [[VQSHL_N:%.*]] = call i64 @llvm.aarch64.neon.sqshl.i64(i64 %0, i64 63) + // LLVM: ret i64 [[VQSHL_N]] +} // NYI-LABEL: @test_vqshl_n_s8( // NYI: [[VQSHL_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %a, <8 x i8> zeroinitializer) @@ -15631,12 +15635,16 @@ uint64_t test_vshld_n_u64(uint64_t a) { // return (uint32_t)vqshls_n_u32(a, 31); // } -// NYI-LABEL: @test_vqshld_n_u64( -// NYI: [[VQSHL_N:%.*]] = call i64 @llvm.aarch64.neon.uqshl.i64(i64 %a, i64 63) -// NYI: ret i64 [[VQSHL_N]] -// uint64_t test_vqshld_n_u64(uint64_t a) { -// return (uint64_t)vqshld_n_u64(a, 63); -// } +uint64_t test_vqshld_n_u64(uint64_t a) { + return (uint64_t)vqshld_n_u64(a, 63); + + // CIR-LABEL: vqshld_n_u64 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.uqshl" {{.*}}, {{.*}} : (!u64i, !u64i) -> !u64i + + // LLVM-LABEL: @test_vqshld_n_u64( + // LLVM: [[VQSHL_N:%.*]] = call i64 @llvm.aarch64.neon.uqshl.i64(i64 %0, i64 63) + // LLVM: ret i64 [[VQSHL_N]] +} // NYI-LABEL: @test_vqshl_n_u64( // NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> From c301b4a0d3d2d79b26c9c809c11b8a1137c0a9ec Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Sat, 22 Feb 2025 22:07:00 +0800 Subject: [PATCH 2287/2301] [CIR] LLVM lowering support for pointers to member functions (#1292) This PR adds support for LLVM lowering of pointers to member functions. The lowering is ABI-specific and this patch only considers Itanium ABI. Itanium ABI lowers pointers to member functions to a struct with two fields of type `ptrdiff_t`. To extract fields from such aggregate values, this PR includes a new operation `cir.extract_member` to accomplish this. --- .../include/clang/CIR/Dialect/IR/CIRAttrs.td | 10 + clang/include/clang/CIR/Dialect/IR/CIROps.td | 58 ++++ clang/include/clang/CIR/MissingFeatures.h | 3 + clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 16 + .../Transforms/TargetLowering/CIRCXXABI.h | 20 ++ .../TargetLowering/ItaniumCXXABI.cpp | 296 +++++++++++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 62 +++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h | 25 ++ .../CIR/CodeGen/pointer-to-member-func.cpp | 38 +++ clang/test/CIR/Lowering/struct.cir | 15 + 10 files changed, 530 insertions(+), 13 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td index d3f5c88df19c..fa7b5c0d7a08 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRAttrs.td @@ -573,6 +573,16 @@ def MethodAttr : CIR_Attr<"Method", "method", [TypedAttrInterface]> { let hasCustomAssemblyFormat = 1; let genVerifyDecl = 1; + + let extraClassDeclaration = [{ + bool isNull() const { + return !getSymbol().has_value() && !getVtableOffset().has_value(); + } + + bool isVirtual() const { + return getVtableOffset().has_value(); + } + }]; } //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index b06c61577150..1feb631bfcdf 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2850,6 +2850,64 @@ def GetMemberOp : CIR_Op<"get_member"> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// ExtractMemberOp +//===----------------------------------------------------------------------===// + +def ExtractMemberOp : CIR_Op<"extract_member", [Pure]> { + let summary = "Extract the value of a member of a struct value"; + let description = [{ + The `cir.extract_member` operation extracts the value of a particular member + from the input record. Unlike `cir.get_member` which derives pointers, this + operation operates on values. It takes a value of record type, and extract + the value of the specified record member from the input record value. + + Currently `cir.extract_member` does not work on unions. + + Example: + + ```mlir + // Suppose we have a struct with multiple members. + !s32i = !cir.int + !s8i = !cir.int + !struct_ty = !cir.struct<"struct.Bar" {!s32i, !s8i}> + + // And suppose we have a value of the struct type. + %0 = cir.const #cir.const_struct<{#cir.int<1> : !s32i, #cir.int<2> : !s8i}> : !struct_ty + + // Extract the value of the second member of the struct. + %1 = cir.extract_member %0[1] : !struct_ty -> !s8i + ``` + }]; + + let arguments = (ins CIR_StructType:$record, IndexAttr:$index_attr); + let results = (outs CIR_AnyType:$result); + + let assemblyFormat = [{ + $record `[` $index_attr `]` attr-dict + `:` qualified(type($record)) `->` qualified(type($result)) + }]; + + let builders = [ + OpBuilder<(ins "mlir::Type":$type, "mlir::Value":$record, "uint64_t":$index), [{ + mlir::APInt fieldIdx(64, index); + build($_builder, $_state, type, record, fieldIdx); + }]>, + OpBuilder<(ins "mlir::Value":$record, "uint64_t":$index), [{ + auto recordTy = mlir::cast(record.getType()); + mlir::Type memberTy = recordTy.getMembers()[index]; + build($_builder, $_state, memberTy, record, index); + }]> + ]; + + let extraClassDeclaration = [{ + /// Get the index of the struct member being accessed. + uint64_t getIndex() { return getIndexAttr().getZExtValue(); } + }]; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // GetRuntimeMemberOp //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index f9a500bb7c64..67547c376790 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -70,6 +70,9 @@ struct MissingFeatures { static bool tbaaPointer() { return false; } static bool emitNullabilityCheck() { return false; } static bool ptrAuth() { return false; } + static bool emitCFICheck() { return false; } + static bool emitVFEInfo() { return false; } + static bool emitWPDInfo() { return false; } // GNU vectors are done, but other kinds of vectors haven't been implemented. static bool scalableVectors() { return false; } diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index a0a7f88a0f90..18e4adf19d53 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -3577,6 +3577,22 @@ LogicalResult cir::GetMemberOp::verify() { return mlir::success(); } +//===----------------------------------------------------------------------===// +// ExtractMemberOp Definitions +//===----------------------------------------------------------------------===// + +LogicalResult cir::ExtractMemberOp::verify() { + auto recordTy = mlir::cast(getRecord().getType()); + if (recordTy.getKind() == cir::StructType::Union) + return emitError() + << "cir.extract_member currently does not work on unions"; + if (recordTy.getMembers().size() <= getIndex()) + return emitError() << "member index out of bounds"; + if (recordTy.getMembers()[getIndex()] != getType()) + return emitError() << "member type mismatch"; + return mlir::success(); +} + //===----------------------------------------------------------------------===// // GetRuntimeMemberOp Definitions //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h index a1948059d783..995fcd027919 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/CIRCXXABI.h @@ -72,6 +72,12 @@ class CIRCXXABI { lowerDataMemberType(cir::DataMemberType type, const mlir::TypeConverter &typeConverter) const = 0; + /// Lower the given member function pointer type to its ABI type. The returned + /// type is also a CIR type. + virtual mlir::Type + lowerMethodType(cir::MethodType type, + const mlir::TypeConverter &typeConverter) const = 0; + /// Lower the given data member pointer constant to a constant of the ABI /// type. The returned constant is represented as an attribute as well. virtual mlir::TypedAttr @@ -79,6 +85,12 @@ class CIRCXXABI { const mlir::DataLayout &layout, const mlir::TypeConverter &typeConverter) const = 0; + /// Lower the given member function pointer constant to a constant of the ABI + /// type. The returned constant is represented as an attribute as well. + virtual mlir::TypedAttr + lowerMethodConstant(cir::MethodAttr attr, const mlir::DataLayout &layout, + const mlir::TypeConverter &typeConverter) const = 0; + /// Lower the given cir.get_runtime_member op to a sequence of more /// "primitive" CIR operations that act on the ABI types. virtual mlir::Operation * @@ -86,6 +98,14 @@ class CIRCXXABI { mlir::Value loweredAddr, mlir::Value loweredMember, mlir::OpBuilder &builder) const = 0; + /// Lower the given cir.get_method op to a sequence of more "primitive" CIR + /// operations that act on the ABI types. The lowered result values will be + /// stored in the given loweredResults array. + virtual void + lowerGetMethod(cir::GetMethodOp op, mlir::Value (&loweredResults)[2], + mlir::Value loweredMethod, mlir::Value loweredObjectPtr, + mlir::ConversionPatternRewriter &rewriter) const = 0; + /// Lower the given cir.base_data_member op to a sequence of more "primitive" /// CIR operations that act on the ABI types. virtual mlir::Value lowerBaseDataMember(cir::BaseDataMemberOp op, diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp index f3569eca9e0a..992cf88efaea 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/ItaniumCXXABI.cpp @@ -33,15 +33,28 @@ namespace { class ItaniumCXXABI : public CIRCXXABI { protected: + enum class VTableComponentLayout { + /// Components in the vtable are pointers to other structs/functions. + Pointer, + + /// Components in the vtable are relative offsets between the vtable and the + /// other structs/functions. + Relative, + }; + bool UseARMMethodPtrABI; bool UseARMGuardVarABI; bool Use32BitVTableOffsetABI; + VTableComponentLayout VTComponentLayout; public: - ItaniumCXXABI(LowerModule &LM, bool UseARMMethodPtrABI = false, - bool UseARMGuardVarABI = false) + ItaniumCXXABI( + LowerModule &LM, bool UseARMMethodPtrABI = false, + bool UseARMGuardVarABI = false, + VTableComponentLayout VTComponentLayout = VTableComponentLayout::Pointer) : CIRCXXABI(LM), UseARMMethodPtrABI(UseARMMethodPtrABI), - UseARMGuardVarABI(UseARMGuardVarABI), Use32BitVTableOffsetABI(false) {} + UseARMGuardVarABI(UseARMGuardVarABI), Use32BitVTableOffsetABI(false), + VTComponentLayout(VTComponentLayout) {} bool classifyReturnType(LowerFunctionInfo &FI) const override; @@ -57,15 +70,27 @@ class ItaniumCXXABI : public CIRCXXABI { lowerDataMemberType(cir::DataMemberType type, const mlir::TypeConverter &typeConverter) const override; + mlir::Type + lowerMethodType(cir::MethodType type, + const mlir::TypeConverter &typeConverter) const override; + mlir::TypedAttr lowerDataMemberConstant( cir::DataMemberAttr attr, const mlir::DataLayout &layout, const mlir::TypeConverter &typeConverter) const override; + mlir::TypedAttr + lowerMethodConstant(cir::MethodAttr attr, const mlir::DataLayout &layout, + const mlir::TypeConverter &typeConverter) const override; + mlir::Operation * lowerGetRuntimeMember(cir::GetRuntimeMemberOp op, mlir::Type loweredResultTy, mlir::Value loweredAddr, mlir::Value loweredMember, mlir::OpBuilder &builder) const override; + void lowerGetMethod(cir::GetMethodOp op, mlir::Value (&loweredResults)[2], + mlir::Value loweredMethod, mlir::Value loweredObjectPtr, + mlir::ConversionPatternRewriter &rewriter) const override; + mlir::Value lowerBaseDataMember(cir::BaseDataMemberOp op, mlir::Value loweredSrc, mlir::OpBuilder &builder) const override; @@ -101,10 +126,7 @@ bool ItaniumCXXABI::classifyReturnType(LowerFunctionInfo &FI) const { return false; } -static mlir::Type getABITypeForDataMember(LowerModule &lowerMod) { - // Itanium C++ ABI 2.3: - // A pointer to data member is an offset from the base address of - // the class object containing it, represented as a ptrdiff_t +static cir::IntType getPtrDiffCIRTy(LowerModule &lowerMod) { const clang::TargetInfo &target = lowerMod.getTarget(); clang::TargetInfo::IntType ptrdiffTy = target.getPtrDiffType(clang::LangAS::Default); @@ -115,7 +137,32 @@ static mlir::Type getABITypeForDataMember(LowerModule &lowerMod) { mlir::Type ItaniumCXXABI::lowerDataMemberType( cir::DataMemberType type, const mlir::TypeConverter &typeConverter) const { - return getABITypeForDataMember(LM); + // Itanium C++ ABI 2.3.1: + // A data member pointer is represented as the data member's offset in bytes + // from the address point of an object of the base type, as a ptrdiff_t. + return getPtrDiffCIRTy(LM); +} + +mlir::Type +ItaniumCXXABI::lowerMethodType(cir::MethodType type, + const mlir::TypeConverter &typeConverter) const { + // Itanium C++ ABI 2.3.2: + // In all representations, the basic ABI properties of member function + // pointer types are those of the following class, where fnptr_t is the + // appropriate function-pointer type for a member function of this type: + // + // struct { + // fnptr_t ptr; + // ptrdiff_t adj; + // }; + + cir::IntType ptrdiffCIRTy = getPtrDiffCIRTy(LM); + + // Note that clang CodeGen emits struct{ptrdiff_t, ptrdiff_t} for member + // function pointers. Let's follow this approach. + return cir::StructType::get(type.getContext(), {ptrdiffCIRTy, ptrdiffCIRTy}, + /*packed=*/false, /*padded=*/false, + cir::StructType::Struct); } mlir::TypedAttr ItaniumCXXABI::lowerDataMemberConstant( @@ -139,6 +186,72 @@ mlir::TypedAttr ItaniumCXXABI::lowerDataMemberConstant( return cir::IntAttr::get(abiTy, memberOffset); } +mlir::TypedAttr ItaniumCXXABI::lowerMethodConstant( + cir::MethodAttr attr, const mlir::DataLayout &layout, + const mlir::TypeConverter &typeConverter) const { + cir::IntType ptrdiffCIRTy = getPtrDiffCIRTy(LM); + auto loweredMethodTy = mlir::cast( + lowerMethodType(attr.getType(), typeConverter)); + + auto zero = cir::IntAttr::get(ptrdiffCIRTy, 0); + + // Itanium C++ ABI 2.3.2: + // In all representations, the basic ABI properties of member function + // pointer types are those of the following class, where fnptr_t is the + // appropriate function-pointer type for a member function of this type: + // + // struct { + // fnptr_t ptr; + // ptrdiff_t adj; + // }; + + if (attr.isNull()) { + // Itanium C++ ABI 2.3.2: + // + // In the standard representation, a null member function pointer is + // represented with ptr set to a null pointer. The value of adj is + // unspecified for null member function pointers. + // + // clang CodeGen emits struct{null, null} for null member function pointers. + // Let's do the same here. + return cir::ConstStructAttr::get( + loweredMethodTy, mlir::ArrayAttr::get(attr.getContext(), {zero, zero})); + } + + if (attr.isVirtual()) { + if (UseARMMethodPtrABI) { + // ARM C++ ABI 3.2.1: + // This ABI specifies that adj contains twice the this + // adjustment, plus 1 if the member function is virtual. The + // least significant bit of adj then makes exactly the same + // discrimination as the least significant bit of ptr does for + // Itanium. + llvm_unreachable("ARM method ptr abi NYI"); + } + + // Itanium C++ ABI 2.3.2: + // + // In the standard representation, a member function pointer for a + // virtual function is represented with ptr set to 1 plus the function's + // v-table entry offset (in bytes), converted to a function pointer as if + // by reinterpret_cast(uintfnptr_t(1 + offset)), where + // uintfnptr_t is an unsigned integer of the same size as fnptr_t. + auto ptr = + cir::IntAttr::get(ptrdiffCIRTy, 1 + attr.getVtableOffset().value()); + return cir::ConstStructAttr::get( + loweredMethodTy, mlir::ArrayAttr::get(attr.getContext(), {ptr, zero})); + } + + // Itanium C++ ABI 2.3.2: + // + // A member function pointer for a non-virtual member function is + // represented with ptr set to a pointer to the function, using the base + // ABI's representation of function pointers. + auto ptr = cir::GlobalViewAttr::get(ptrdiffCIRTy, attr.getSymbol().value()); + return cir::ConstStructAttr::get( + loweredMethodTy, mlir::ArrayAttr::get(attr.getContext(), {ptr, zero})); +} + mlir::Operation *ItaniumCXXABI::lowerGetRuntimeMember( cir::GetRuntimeMemberOp op, mlir::Type loweredResultTy, mlir::Value loweredAddr, mlir::Value loweredMember, @@ -154,6 +267,171 @@ mlir::Operation *ItaniumCXXABI::lowerGetRuntimeMember( memberBytesPtr); } +void ItaniumCXXABI::lowerGetMethod( + cir::GetMethodOp op, mlir::Value (&loweredResults)[2], + mlir::Value loweredMethod, mlir::Value loweredObjectPtr, + mlir::ConversionPatternRewriter &rewriter) const { + // In the Itanium and ARM ABIs, method pointers have the form: + // struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr; + // + // In the Itanium ABI: + // - method pointers are virtual if (memptr.ptr & 1) is nonzero + // - the this-adjustment is (memptr.adj) + // - the virtual offset is (memptr.ptr - 1) + // + // In the ARM ABI: + // - method pointers are virtual if (memptr.adj & 1) is nonzero + // - the this-adjustment is (memptr.adj >> 1) + // - the virtual offset is (memptr.ptr) + // ARM uses 'adj' for the virtual flag because Thumb functions + // may be only single-byte aligned. + // + // If the member is virtual, the adjusted 'this' pointer points + // to a vtable pointer from which the virtual offset is applied. + // + // If the member is non-virtual, memptr.ptr is the address of + // the function to call. + + mlir::Value &callee = loweredResults[0]; + mlir::Value &adjustedThis = loweredResults[1]; + mlir::Type calleePtrTy = op.getCallee().getType(); + + cir::IntType ptrdiffCIRTy = getPtrDiffCIRTy(LM); + mlir::Value ptrdiffOne = rewriter.create( + op.getLoc(), cir::IntAttr::get(ptrdiffCIRTy, 1)); + + mlir::Value adj = rewriter.create( + op.getLoc(), ptrdiffCIRTy, loweredMethod, 1); + if (UseARMMethodPtrABI) + llvm_unreachable("ARM method ptr abi NYI"); + + // Apply the adjustment to the 'this' pointer. + mlir::Type thisVoidPtrTy = cir::PointerType::get( + cir::VoidType::get(rewriter.getContext()), + mlir::cast(op.getObject().getType()).getAddrSpace()); + mlir::Value thisVoidPtr = rewriter.create( + op.getLoc(), thisVoidPtrTy, cir::CastKind::bitcast, loweredObjectPtr); + adjustedThis = rewriter.create(op.getLoc(), thisVoidPtrTy, + thisVoidPtr, adj); + + // Load the "ptr" field of the member function pointer and determine if it + // points to a virtual function. + mlir::Value methodPtrField = rewriter.create( + op.getLoc(), ptrdiffCIRTy, loweredMethod, 0); + mlir::Value virtualBit = rewriter.create( + op.getLoc(), cir::BinOpKind::And, methodPtrField, ptrdiffOne); + mlir::Value isVirtual; + if (UseARMMethodPtrABI) + llvm_unreachable("ARM method ptr abi NYI"); + else + isVirtual = rewriter.create(op.getLoc(), cir::CmpOpKind::eq, + virtualBit, ptrdiffOne); + + assert(!MissingFeatures::emitCFICheck()); + assert(!MissingFeatures::emitVFEInfo()); + assert(!MissingFeatures::emitWPDInfo()); + + // See their original definitions in + // ItaniumCXXABI::EmitLoadOfMemberFunctionPointer in file + // clang/lib/CodeGen/ItaniumCXXABI.cpp. + bool shouldEmitCFICheck = false; + bool shouldEmitVFEInfo = + LM.getContext().getCodeGenOpts().VirtualFunctionElimination; + bool shouldEmitWPDInfo = LM.getContext().getCodeGenOpts().WholeProgramVTables; + + mlir::Block *currBlock = rewriter.getInsertionBlock(); + mlir::Block *continueBlock = + rewriter.splitBlock(currBlock, rewriter.getInsertionPoint()); + continueBlock->addArgument(calleePtrTy, op.getLoc()); + + mlir::Block *virtualBlock = rewriter.createBlock(continueBlock); + mlir::Block *nonVirtualBlock = rewriter.createBlock(continueBlock); + rewriter.setInsertionPointToEnd(currBlock); + rewriter.create(op.getLoc(), isVirtual, virtualBlock, + nonVirtualBlock); + + auto buildVirtualBranch = [&] { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPointToStart(virtualBlock); + + // Load vtable pointer. + // Note that vtable pointer always point to the global address space. + auto vtablePtrTy = cir::PointerType::get( + rewriter.getContext(), + cir::IntType::get(rewriter.getContext(), 8, true)); + auto vtablePtrPtrTy = cir::PointerType::get( + rewriter.getContext(), vtablePtrTy, + mlir::cast(op.getObject().getType()).getAddrSpace()); + auto vtablePtrPtr = rewriter.create( + op.getLoc(), vtablePtrPtrTy, cir::CastKind::bitcast, loweredObjectPtr); + mlir::Value vtablePtr = rewriter.create( + op.getLoc(), vtablePtrPtr, /*isDeref=*/false, /*isVolatile=*/false, + /*alignment=*/mlir::IntegerAttr(), /*mem_order=*/cir::MemOrderAttr(), + /*tbaa=*/mlir::ArrayAttr()); + + // Get the vtable offset. + mlir::Value vtableOffset = methodPtrField; + if (!UseARMMethodPtrABI) + vtableOffset = rewriter.create( + op.getLoc(), cir::BinOpKind::Sub, vtableOffset, ptrdiffOne); + if (Use32BitVTableOffsetABI) + llvm_unreachable("NYI"); + + if (shouldEmitCFICheck || shouldEmitVFEInfo || shouldEmitWPDInfo) + llvm_unreachable("NYI"); + + // Apply the offset to the vtable pointer and get the pointer to the target + // virtual function. Then load that pointer to get the callee. + mlir::Value funcPtr; + if (shouldEmitVFEInfo) + llvm_unreachable("NYI"); + else { + if (shouldEmitCFICheck || shouldEmitWPDInfo) + llvm_unreachable("NYI"); + + if (VTComponentLayout == VTableComponentLayout::Relative) + llvm_unreachable("NYI"); + else { + mlir::Value vfpAddr = rewriter.create( + op.getLoc(), vtablePtrTy, vtablePtr, vtableOffset); + auto vfpPtrTy = + cir::PointerType::get(rewriter.getContext(), calleePtrTy); + mlir::Value vfpPtr = rewriter.create( + op.getLoc(), vfpPtrTy, cir::CastKind::bitcast, vfpAddr); + funcPtr = rewriter.create( + op.getLoc(), vfpPtr, /*isDeref=*/false, /*isVolatile=*/false, + /*alignment=*/mlir::IntegerAttr(), + /*mem_order=*/cir::MemOrderAttr(), + /*tbaa=*/mlir::ArrayAttr()); + } + } + + if (shouldEmitCFICheck) + llvm_unreachable("NYI"); + + rewriter.create(op.getLoc(), continueBlock, funcPtr); + }; + + auto buildNonVirtualBranch = [&] { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPointToStart(nonVirtualBlock); + + mlir::Value funcPtr = rewriter.create( + op.getLoc(), calleePtrTy, cir::CastKind::int_to_ptr, methodPtrField); + + if (shouldEmitCFICheck) + llvm_unreachable("NYI"); + + rewriter.create(op.getLoc(), continueBlock, funcPtr); + }; + + buildVirtualBranch(); + buildNonVirtualBranch(); + + rewriter.setInsertionPointToStart(continueBlock); + callee = continueBlock->getArgument(0); +} + static mlir::Value lowerDataMemberCast(mlir::Operation *op, mlir::Value loweredSrc, std::int64_t offset, @@ -213,7 +491,7 @@ ItaniumCXXABI::lowerDataMemberToBoolCast(cir::CastOp op, mlir::Value loweredSrc, mlir::OpBuilder &builder) const { // Itanium C++ ABI 2.3: // A NULL pointer is represented as -1. - auto nullAttr = cir::IntAttr::get(getABITypeForDataMember(LM), -1); + auto nullAttr = cir::IntAttr::get(getPtrDiffCIRTy(LM), -1); auto nullValue = builder.create(op.getLoc(), nullAttr); return builder.create(op.getLoc(), cir::CmpOpKind::ne, loweredSrc, nullValue); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 3b9f1def6db8..e1a2d49fc5d2 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1794,6 +1794,14 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( dataMember, layout, *typeConverter); rewriter.replaceOpWithNewOp(op, abiValue); return mlir::success(); + } else if (mlir::isa(op.getType())) { + assert(lowerMod && "lower module is not available"); + auto method = mlir::cast(op.getValue()); + mlir::DataLayout layout(op->getParentOfType()); + mlir::TypedAttr abiValue = lowerMod->getCXXABI().lowerMethodConstant( + method, layout, *typeConverter); + rewriter.replaceOpWithNewOp(op, abiValue); + return mlir::success(); } // TODO(cir): constant arrays are currently just pushed into the stack using // the store instruction, instead of being stored as global variables and @@ -1829,16 +1837,14 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite( // stack. auto initVal = lowerCirAttrAsValue(op, structAttr, rewriter, typeConverter, dataLayout); - rewriter.replaceAllUsesWith(op, initVal); - rewriter.eraseOp(op); + rewriter.replaceOp(op, initVal); return mlir::success(); } else if (auto strTy = mlir::dyn_cast(op.getType())) { auto attr = op.getValue(); if (mlir::isa(attr)) { auto initVal = lowerCirAttrAsValue(op, attr, rewriter, typeConverter, dataLayout); - rewriter.replaceAllUsesWith(op, initVal); - rewriter.eraseOp(op); + rewriter.replaceOp(op, initVal); return mlir::success(); } @@ -3479,6 +3485,46 @@ mlir::LogicalResult CIRToLLVMGetMemberOpLowering::matchAndRewrite( } } +mlir::LogicalResult CIRToLLVMExtractMemberOpLowering::matchAndRewrite( + cir::ExtractMemberOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + std::int64_t indecies[1] = {static_cast(op.getIndex())}; + + mlir::Type recordTy = op.getRecord().getType(); + if (auto llvmStructTy = + mlir::dyn_cast(recordTy)) { + rewriter.replaceOpWithNewOp( + op, adaptor.getRecord(), indecies); + return mlir::success(); + } + + auto cirStructTy = mlir::cast(recordTy); + switch (cirStructTy.getKind()) { + case cir::StructType::Struct: + case cir::StructType::Class: { + rewriter.replaceOpWithNewOp( + op, adaptor.getRecord(), indecies); + return mlir::success(); + } + + case cir::StructType::Union: { + op.emitError("cir.extract_member cannot extract member from a union"); + return mlir::failure(); + } + } +} + +mlir::LogicalResult CIRToLLVMGetMethodOpLowering::matchAndRewrite( + cir::GetMethodOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + assert(lowerMod && "lowering module is not available"); + mlir::Value loweredResults[2]; + lowerMod->getCXXABI().lowerGetMethod(op, loweredResults, adaptor.getMethod(), + adaptor.getObject(), rewriter); + rewriter.replaceOp(op, loweredResults); + return mlir::success(); +} + mlir::LogicalResult CIRToLLVMGetRuntimeMemberOpLowering::matchAndRewrite( cir::GetRuntimeMemberOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -4122,6 +4168,7 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMBaseDataMemberOpLowering, CIRToLLVMCmpOpLowering, CIRToLLVMDerivedDataMemberOpLowering, + CIRToLLVMGetMethodOpLowering, CIRToLLVMGetRuntimeMemberOpLowering, CIRToLLVMInvariantGroupOpLowering // clang-format on @@ -4169,6 +4216,7 @@ void populateCIRToLLVMConversionPatterns( CIRToLLVMEhInflightOpLowering, CIRToLLVMEhTypeIdOpLowering, CIRToLLVMExpectOpLowering, + CIRToLLVMExtractMemberOpLowering, CIRToLLVMFrameAddrOpLowering, CIRToLLVMFreeExceptionOpLowering, CIRToLLVMFuncOpLowering, @@ -4264,6 +4312,12 @@ void prepareTypeConverter(mlir::LLVMTypeConverter &converter, lowerModule->getCXXABI().lowerDataMemberType(type, converter); return converter.convertType(abiType); }); + converter.addConversion([&, lowerModule](cir::MethodType type) -> mlir::Type { + assert(lowerModule && "CXXABI is not available"); + mlir::Type abiType = + lowerModule->getCXXABI().lowerMethodType(type, converter); + return converter.convertType(abiType); + }); converter.addConversion([&](cir::ArrayType type) -> mlir::Type { auto ty = convertTypeForMemory(converter, dataLayout, type.getEltType()); return mlir::LLVM::LLVMArrayType::get(ty, type.getSize()); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 5aafd1a2ecab..a3cd1cd1f0ba 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -882,6 +882,31 @@ class CIRToLLVMGetMemberOpLowering mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMExtractMemberOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ExtractMemberOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + +class CIRToLLVMGetMethodOpLowering + : public mlir::OpConversionPattern { + cir::LowerModule *lowerMod; + +public: + CIRToLLVMGetMethodOpLowering(const mlir::TypeConverter &typeConverter, + mlir::MLIRContext *context, + cir::LowerModule *lowerModule) + : OpConversionPattern(typeConverter, context), lowerMod(lowerModule) {} + + mlir::LogicalResult + matchAndRewrite(cir::GetMethodOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + class CIRToLLVMGetRuntimeMemberOpLowering : public mlir::OpConversionPattern { cir::LowerModule *lowerMod; diff --git a/clang/test/CIR/CodeGen/pointer-to-member-func.cpp b/clang/test/CIR/CodeGen/pointer-to-member-func.cpp index f3c426c4b1ee..a1a42f4d494c 100644 --- a/clang/test/CIR/CodeGen/pointer-to-member-func.cpp +++ b/clang/test/CIR/CodeGen/pointer-to-member-func.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++17 -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll --check-prefix=LLVM %s struct Foo { void m1(int); @@ -15,6 +17,10 @@ auto make_non_virtual() -> void (Foo::*)(int) { // CHECK: %{{.+}} = cir.const #cir.method<@_ZN3Foo2m1Ei> : !cir.method in !ty_Foo> // CHECK: } +// LLVM-LABEL: @_Z16make_non_virtualv +// LLVM: store { i64, i64 } { i64 ptrtoint (ptr @_ZN3Foo2m1Ei to i64), i64 0 }, ptr %{{.+}} +// LLVM: } + auto make_virtual() -> void (Foo::*)(int) { return &Foo::m3; } @@ -23,6 +29,10 @@ auto make_virtual() -> void (Foo::*)(int) { // CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_Foo> // CHECK: } +// LLVM-LABEL: @_Z12make_virtualv +// LLVM: store { i64, i64 } { i64 9, i64 0 }, ptr %{{.+}} +// LLVM: } + auto make_null() -> void (Foo::*)(int) { return nullptr; } @@ -31,6 +41,10 @@ auto make_null() -> void (Foo::*)(int) { // CHECK: %{{.+}} = cir.const #cir.method : !cir.method in !ty_Foo> // CHECK: } +// LLVM-LABEL: @_Z9make_nullv +// LLVM: store { i64, i64 } zeroinitializer, ptr %{{.+}} +// LLVM: } + void call(Foo *obj, void (Foo::*func)(int), int arg) { (obj->*func)(arg); } @@ -40,3 +54,27 @@ void call(Foo *obj, void (Foo::*func)(int), int arg) { // CHECK-NEXT: %[[#ARG:]] = cir.load %{{.+}} : !cir.ptr, !s32i // CHECK-NEXT: cir.call %[[CALLEE]](%[[THIS]], %[[#ARG]]) : (!cir.ptr, !s32i)>>, !cir.ptr, !s32i) -> () // CHECK: } + +// LLVM-LABEL: @_Z4callP3FooMS_FviEi +// LLVM: %[[#obj:]] = load ptr, ptr %{{.+}} +// LLVM-NEXT: %[[#memfn_ptr:]] = load { i64, i64 }, ptr %{{.+}} +// LLVM-NEXT: %[[#this_adj:]] = extractvalue { i64, i64 } %[[#memfn_ptr]], 1 +// LLVM-NEXT: %[[#adjusted_this:]] = getelementptr i8, ptr %[[#obj]], i64 %[[#this_adj]] +// LLVM-NEXT: %[[#ptr_field:]] = extractvalue { i64, i64 } %[[#memfn_ptr]], 0 +// LLVM-NEXT: %[[#virt_bit:]] = and i64 %[[#ptr_field]], 1 +// LLVM-NEXT: %[[#is_virt:]] = icmp eq i64 %[[#virt_bit]], 1 +// LLVM-NEXT: br i1 %[[#is_virt]], label %[[#block_virt:]], label %[[#block_non_virt:]] +// LLVM: [[#block_virt]]: +// LLVM-NEXT: %[[#vtable_ptr:]] = load ptr, ptr %[[#obj]] +// LLVM-NEXT: %[[#vtable_offset:]] = sub i64 %[[#ptr_field]], 1 +// LLVM-NEXT: %[[#vfp_ptr:]] = getelementptr i8, ptr %[[#vtable_ptr]], i64 %[[#vtable_offset]] +// LLVM-NEXT: %[[#vfp:]] = load ptr, ptr %[[#vfp_ptr]] +// LLVM-NEXT: br label %[[#block_continue:]] +// LLVM: [[#block_non_virt]]: +// LLVM-NEXT: %[[#func_ptr:]] = inttoptr i64 %[[#ptr_field]] to ptr +// LLVM-NEXT: br label %[[#block_continue]] +// LLVM: [[#block_continue]]: +// LLVM-NEXT: %[[#callee_ptr:]] = phi ptr [ %[[#func_ptr]], %[[#block_non_virt]] ], [ %[[#vfp]], %[[#block_virt]] ] +// LLVM-NEXT: %[[#arg:]] = load i32, ptr %{{.+}} +// LLVM-NEXT: call void %[[#callee_ptr]](ptr %[[#adjusted_this]], i32 %[[#arg]]) +// LLVM: } diff --git a/clang/test/CIR/Lowering/struct.cir b/clang/test/CIR/Lowering/struct.cir index e612dcd66efd..e082d5a7f113 100644 --- a/clang/test/CIR/Lowering/struct.cir +++ b/clang/test/CIR/Lowering/struct.cir @@ -24,6 +24,21 @@ module { cir.return } + // CHECK-LABEL: @test_value + cir.func @test_value() { + %0 = cir.const #cir.const_struct<{#cir.int<1> : !u8i, #cir.int<2> : !s32i}> : !ty_S + // CHECK: %[[#v0:]] = llvm.mlir.undef : !llvm.struct<"struct.S", (i8, i32)> + // CHECK-NEXT: %[[#v1:]] = llvm.mlir.constant(1 : i8) : i8 + // CHECK-NEXT: %[[#v2:]] = llvm.insertvalue %[[#v1]], %[[#v0]][0] : !llvm.struct<"struct.S", (i8, i32)> + // CHECK-NEXT: %[[#v3:]] = llvm.mlir.constant(2 : i32) : i32 + // CHECK-NEXT: %[[#v4:]] = llvm.insertvalue %[[#v3]], %[[#v2]][1] : !llvm.struct<"struct.S", (i8, i32)> + %1 = cir.extract_member %0[0] : !ty_S -> !u8i + // CHECK-NEXT: %{{.+}} = llvm.extractvalue %[[#v4]][0] : !llvm.struct<"struct.S", (i8, i32)> + %2 = cir.extract_member %0[1] : !ty_S -> !s32i + // CHECK-NEXT: %{{.+}} = llvm.extractvalue %[[#v4]][1] : !llvm.struct<"struct.S", (i8, i32)> + cir.return + } + cir.func @shouldConstInitLocalStructsWithConstStructAttr() { %0 = cir.alloca !ty_S2A, !cir.ptr, ["s"] {alignment = 4 : i64} %1 = cir.const #cir.const_struct<{#cir.int<1> : !s32i}> : !ty_S2A From c99703037ad2cecce98560f546b36406961cb476 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Mon, 24 Feb 2025 20:02:09 +0100 Subject: [PATCH 2288/2301] [CIR][CIRGen][Builtin][Neon] Lower builtin_neon_vqshlud_n_s64 (#1384) Lower builtin_neon_vqshlud_n_s64 --- clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 10 +++++++++- clang/test/CIR/CodeGen/AArch64/neon.c | 16 ++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 7f27edca2224..cd5196376c21 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -3828,7 +3828,15 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NEON::BI__builtin_neon_vqdmlslh_s16 NYI"); } case NEON::BI__builtin_neon_vqshlud_n_s64: { - llvm_unreachable("NEON::BI__builtin_neon_vqshlud_n_s64 NYI"); + const cir::IntType IntType = builder.getSInt64Ty(); + Ops.push_back(emitScalarExpr(E->getArg(1))); + std::optional APSInt = + E->getArg(1)->getIntegerConstantExpr(getContext()); + assert(APSInt && "Expected argument to be a constant"); + Ops[1] = builder.getSInt64(APSInt->getZExtValue(), getLoc(E->getExprLoc())); + const StringRef Intrinsic = "aarch64.neon.sqshlu"; + return emitNeonCall(builder, {IntType, IntType}, Ops, Intrinsic, IntType, + getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vqshld_n_u64: case NEON::BI__builtin_neon_vqshld_n_s64: { diff --git a/clang/test/CIR/CodeGen/AArch64/neon.c b/clang/test/CIR/CodeGen/AArch64/neon.c index aa6dca77e7df..d169be7712cd 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon.c +++ b/clang/test/CIR/CodeGen/AArch64/neon.c @@ -15680,12 +15680,16 @@ uint64_t test_vqshld_n_u64(uint64_t a) { // return (int32_t)vqshlus_n_s32(a, 31); // } -// NYI-LABEL: @test_vqshlud_n_s64( -// NYI: [[VQSHLU_N:%.*]] = call i64 @llvm.aarch64.neon.sqshlu.i64(i64 %a, i64 63) -// NYI: ret i64 [[VQSHLU_N]] -// int64_t test_vqshlud_n_s64(int64_t a) { -// return (int64_t)vqshlud_n_s64(a, 63); -// } +int64_t test_vqshlud_n_s64(int64_t a) { + return (int64_t)vqshlud_n_s64(a, 63); + + // CIR-LABEL: vqshlud_n_s64 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.sqshlu" {{.*}}, {{.*}} : (!s64i, !s64i) -> !s64i + + // LLVM-LABEL: @test_vqshlud_n_s64( + // LLVM: [[VQSHLU_N:%.*]] = call i64 @llvm.aarch64.neon.sqshlu.i64(i64 %0, i64 63) + // LLVM: ret i64 [[VQSHLU_N]] +} // NYI-LABEL: @test_vqshlu_n_s64( // NYI: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> From e607e3fce587ed20e555becd587f55ea1cc1339e Mon Sep 17 00:00:00 2001 From: Sirui Mu Date: Tue, 25 Feb 2025 03:07:34 +0800 Subject: [PATCH 2289/2301] [CIR] Add support for __builtin_bitreverse (#1386) This PR adds CIRGen and LLVM lowering support for the `__builtin_bitreverse` family of builtin functions. --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 30 +++++++++++++ clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 7 +++- clang/test/CIR/CodeGen/builtin-bitreverse.c | 44 ++++++++++++++++++++ 3 files changed, 79 insertions(+), 2 deletions(-) create mode 100644 clang/test/CIR/CodeGen/builtin-bitreverse.c diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 1feb631bfcdf..dc85225d0d5f 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1782,6 +1782,36 @@ def RotateOp : CIR_Op<"rotate", [Pure, SameOperandsAndResultType]> { }]; } +//===----------------------------------------------------------------------===// +// BitReverseOp +//===----------------------------------------------------------------------===// + +def BitReverseOp : CIR_Op<"bit_reverse", [Pure, SameOperandsAndResultType]> { + let summary = "Reverse the bit pattern of the operand integer"; + let description = [{ + The `cir.bit_reverse` operation reverses the bit pattern of the operand + integer. Its only argument must be of unsigned integer types of width 8, 16, + 32, or 64. + + This operation covers the C/C++ builtin function `__builtin_bitreverse`. + + Example: + + ```mlir + %1 = cir.bit_reverse %0 : !u32i + ``` + }]; + + let arguments = (ins AnyTypeOf<[UInt8, UInt16, UInt32, UInt64]>:$src); + let results = (outs AnyTypeOf<[UInt8, UInt16, UInt32, UInt64]>:$result); + + let assemblyFormat = [{ + $src `:` type($result) attr-dict + }]; + + let llvmOp = "BitReverseOp"; +} + //===----------------------------------------------------------------------===// // CmpThreeWayOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 987f20629bea..eb4d76098de5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -1187,8 +1187,11 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_bitreverse8: case Builtin::BI__builtin_bitreverse16: case Builtin::BI__builtin_bitreverse32: - case Builtin::BI__builtin_bitreverse64: - llvm_unreachable("BI__builtin_bitreverse8 like NYI"); + case Builtin::BI__builtin_bitreverse64: { + mlir::Value arg = emitScalarExpr(E->getArg(0)); + return RValue::get( + builder.create(getLoc(E->getSourceRange()), arg)); + } case Builtin::BI__builtin_rotateleft8: case Builtin::BI__builtin_rotateleft16: diff --git a/clang/test/CIR/CodeGen/builtin-bitreverse.c b/clang/test/CIR/CodeGen/builtin-bitreverse.c new file mode 100644 index 000000000000..9d569f8f64b7 --- /dev/null +++ b/clang/test/CIR/CodeGen/builtin-bitreverse.c @@ -0,0 +1,44 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +unsigned char bitreverse8(unsigned char value) { + return __builtin_bitreverse8(value); +} + +// CIR-LABEL: @bitreverse8 +// CIR: %{{.+}} = cir.bit_reverse %{{.+}} : !u8i + +// LLVM-LABEL: @bitreverse8 +// LLVM: %{{.+}} = call i8 @llvm.bitreverse.i8(i8 %{{.+}}) + +unsigned short bitreverse16(unsigned short value) { + return __builtin_bitreverse16(value); +} + +// CIR-LABEL: @bitreverse16 +// CIR: %{{.+}} = cir.bit_reverse %{{.+}} : !u16i + +// LLVM-LABEL: @bitreverse16 +// LLVM: %{{.+}} = call i16 @llvm.bitreverse.i16(i16 %{{.+}}) + +unsigned bitreverse32(unsigned value) { + return __builtin_bitreverse32(value); +} + +// CIR-LABEL: @bitreverse32 +// CIR: %{{.+}} = cir.bit_reverse %{{.+}} : !u32i + +// LLVM-LABEL: @bitreverse32 +// LLVM: %{{.+}} = call i32 @llvm.bitreverse.i32(i32 %{{.+}}) + +unsigned long long bitreverse64(unsigned long long value) { + return __builtin_bitreverse64(value); +} + +// CIR-LABEL: @bitreverse64 +// CIR: %{{.+}} = cir.bit_reverse %{{.+}} : !u64i + +// LLVM-LABEL: @bitreverse64 +// LLVM: %{{.+}} = call i64 @llvm.bitreverse.i64(i64 %{{.+}}) From 60126e97a084b9c8028a380ba2b1402a98e66b74 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Mon, 24 Feb 2025 20:13:03 +0100 Subject: [PATCH 2290/2301] [CIR][CIRGen][Builtin][Neon] Lower neon_vrnd32x (#1388) Lower neon_vrnd32x --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 14 +- .../AArch64/v8.5a-neon-frint3264-intrinsic.c | 150 ++++++++++++++++++ 2 files changed, 158 insertions(+), 6 deletions(-) create mode 100644 clang/test/CIR/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index cd5196376c21..dfc1c7c79de1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2619,6 +2619,14 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( : "aarch64.neon.srhadd"; break; } + case NEON::BI__builtin_neon_vrnd32x_f32: + case NEON::BI__builtin_neon_vrnd32xq_f32: + case NEON::BI__builtin_neon_vrnd32x_f64: + case NEON::BI__builtin_neon_vrnd32xq_f64: { + intrincsName = "aarch64.neon.frint32x"; + argTypes.push_back(vTy); + break; + } case NEON::BI__builtin_neon_vshl_v: case NEON::BI__builtin_neon_vshlq_v: { return builder.create( @@ -4186,12 +4194,6 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vrndh_f16: { llvm_unreachable("NEON::BI__builtin_neon_vrndh_f16 NYI"); } - case NEON::BI__builtin_neon_vrnd32x_f32: - case NEON::BI__builtin_neon_vrnd32xq_f32: - case NEON::BI__builtin_neon_vrnd32x_f64: - case NEON::BI__builtin_neon_vrnd32xq_f64: { - llvm_unreachable("NEON::BI__builtin_neon_vrnd32xq_f64 NYI"); - } case NEON::BI__builtin_neon_vrnd32z_f32: case NEON::BI__builtin_neon_vrnd32zq_f32: case NEON::BI__builtin_neon_vrnd32z_f64: diff --git a/clang/test/CIR/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.c b/clang/test/CIR/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.c new file mode 100644 index 000000000000..0baf8c7ca52e --- /dev/null +++ b/clang/test/CIR/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.c @@ -0,0 +1,150 @@ +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 -target-feature +v8.5a \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-cir -o %t.cir %s +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 -target-feature +v8.5a \ +// RUN: -fclangir -disable-O0-optnone \ +// RUN: -flax-vector-conversions=none -emit-llvm -fno-clangir-call-conv-lowering -o - %s \ +// RUN: | opt -S -passes=mem2reg,simplifycfg -o %t.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +// REQUIRES: aarch64-registered-target || arm-registered-target + +// This test mimics clang/test/CodeGen/AArch64/v8.2a-neon-frint3264-intrinsics.c, which eventually +// CIR shall be able to support fully. Since this is going to take some time to converge, +// the unsupported/NYI code is commented out, so that we can incrementally improve this. +// The NYI filecheck used contains the LLVM output from OG codegen that should guide the +// correct result when implementing this into the CIR pipeline. + +#include + +float32x2_t test_vrnd32x_f32(float32x2_t a) { + return vrnd32x_f32(a); + + // CIR-LABEL: vrnd32x_f32 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.frint32x" {{.*}} : (!cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vrnd32x_f32 + // LLVM: [[RND:%.*]] = call <2 x float> @llvm.aarch64.neon.frint32x.v2f32(<2 x float> %0) + // LLVM: ret <2 x float> [[RND]] +} + + +float32x4_t test_vrnd32xq_f32(float32x4_t a) { + return vrnd32xq_f32(a); + + // CIR-LABEL: vrnd32xq_f32 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.frint32x" {{.*}} : (!cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vrnd32xq_f32 + // LLVM: [[RND:%.*]] = call <4 x float> @llvm.aarch64.neon.frint32x.v4f32(<4 x float> %0) + // LLVM: ret <4 x float> [[RND]] +} + +// CHECK-LABEL: test_vrnd32z_f32 +// CHECK: [[RND:%.*]] = call <2 x float> @llvm.aarch64.neon.frint32z.v2f32(<2 x float> %a) +// CHECK: ret <2 x float> [[RND]] +// float32x2_t test_vrnd32z_f32(float32x2_t a) { +// return vrnd32z_f32(a); +// } + +// CHECK-LABEL: test_vrnd32zq_f32 +// CHECK: [[RND:%.*]] = call <4 x float> @llvm.aarch64.neon.frint32z.v4f32(<4 x float> %a) +// CHECK: ret <4 x float> [[RND]] +// float32x4_t test_vrnd32zq_f32(float32x4_t a) { +// return vrnd32zq_f32(a); +// } + +// CHECK-LABEL: test_vrnd64x_f32 +// CHECK: [[RND:%.*]] = call <2 x float> @llvm.aarch64.neon.frint64x.v2f32(<2 x float> %a) +// CHECK: ret <2 x float> [[RND]] +// float32x2_t test_vrnd64x_f32(float32x2_t a) { +// return vrnd64x_f32(a); +// } + +// CHECK-LABEL: test_vrnd64xq_f32 +// CHECK: [[RND:%.*]] = call <4 x float> @llvm.aarch64.neon.frint64x.v4f32(<4 x float> %a) +// CHECK: ret <4 x float> [[RND]] +// float32x4_t test_vrnd64xq_f32(float32x4_t a) { +// return vrnd64xq_f32(a); +// } + +// CHECK-LABEL: test_vrnd64z_f32 +// CHECK: [[RND:%.*]] = call <2 x float> @llvm.aarch64.neon.frint64z.v2f32(<2 x float> %a) +// CHECK: ret <2 x float> [[RND]] +// float32x2_t test_vrnd64z_f32(float32x2_t a) { +// return vrnd64z_f32(a); +// } + +// CHECK-LABEL: test_vrnd64zq_f32 +// CHECK: [[RND:%.*]] = call <4 x float> @llvm.aarch64.neon.frint64z.v4f32(<4 x float> %a) +// CHECK: ret <4 x float> [[RND]] +// float32x4_t test_vrnd64zq_f32(float32x4_t a) { +// return vrnd64zq_f32(a); +// } + +float64x1_t test_vrnd32x_f64(float64x1_t a) { + return vrnd32x_f64(a); + + // CIR-LABEL: vrnd32x_f64 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.frint32x" {{.*}} : (!cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vrnd32x_f64 + // LLVM: [[RND:%.*]] = call <1 x double> @llvm.aarch64.neon.frint32x.v1f64(<1 x double> %0) + // LLVM: ret <1 x double> [[RND]] +} + + +float64x2_t test_vrnd32xq_f64(float64x2_t a) { + return vrnd32xq_f64(a); + + // CIR-LABEL: vrnd32xq_f64 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.frint32x" {{.*}} : (!cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vrnd32xq_f64 + // LLVM: [[RND:%.*]] = call <2 x double> @llvm.aarch64.neon.frint32x.v2f64(<2 x double> %0) + // LLVM: ret <2 x double> [[RND]] +} + +// CHECK-LABEL: test_vrnd32z_f64 +// CHECK: [[RND:%.*]] = call <1 x double> @llvm.aarch64.neon.frint32z.v1f64(<1 x double> %a) +// CHECK: ret <1 x double> [[RND]] +// float64x1_t test_vrnd32z_f64(float64x1_t a) { +// return vrnd32z_f64(a); +// } + +// CHECK-LABEL: test_vrnd32zq_f64 +// CHECK: [[RND:%.*]] = call <2 x double> @llvm.aarch64.neon.frint32z.v2f64(<2 x double> %a) +// CHECK: ret <2 x double> [[RND]] +// float64x2_t test_vrnd32zq_f64(float64x2_t a) { +// return vrnd32zq_f64(a); +// } + +// CHECK-LABEL: test_vrnd64x_f64 +// CHECK: [[RND:%.*]] = call <1 x double> @llvm.aarch64.neon.frint64x.v1f64(<1 x double> %a) +// CHECK: ret <1 x double> [[RND]] +// float64x1_t test_vrnd64x_f64(float64x1_t a) { +// return vrnd64x_f64(a); +// } + +// CHECK-LABEL: test_vrnd64xq_f64 +// CHECK: [[RND:%.*]] = call <2 x double> @llvm.aarch64.neon.frint64x.v2f64(<2 x double> %a) +// CHECK: ret <2 x double> [[RND]] +// float64x2_t test_vrnd64xq_f64(float64x2_t a) { +// return vrnd64xq_f64(a); +// } + +// CHECK-LABEL: test_vrnd64z_f64 +// CHECK: [[RND:%.*]] = call <1 x double> @llvm.aarch64.neon.frint64z.v1f64(<1 x double> %a) +// CHECK: ret <1 x double> [[RND]] +// float64x1_t test_vrnd64z_f64(float64x1_t a) { +// return vrnd64z_f64(a); +// } + +// CHECK-LABEL: test_vrnd64zq_f64 +// CHECK: [[RND:%.*]] = call <2 x double> @llvm.aarch64.neon.frint64z.v2f64(<2 x double> %a) +// CHECK: ret <2 x double> [[RND]] +// float64x2_t test_vrnd64zq_f64(float64x2_t a) { +// return vrnd64zq_f64(a); +// } From 0018c12e027272e898cd527a088d8c3d581753cf Mon Sep 17 00:00:00 2001 From: AdUhTkJm <30948580+AdUhTkJm@users.noreply.github.com> Date: Mon, 24 Feb 2025 22:15:12 +0000 Subject: [PATCH 2291/2301] [CIR] CallConvLowering for X86 aggregate (#1387) This deals with some x86 aggregate types for CallConvLowering pass. Suppose we have a simple struct like this. ```cpp struct dim3 { int x, y, z; }; ``` It can be coerced into ```cpp struct dim3_ { uint64_t xy; int z; }; ``` And for a function that receives it as an argument, OG does the following transformation for x86: ```cpp void f(dim3 arg) { /* Before */ } void f(uint64_t xy, int z) { /* After */ } ``` Now this transformation is implemented in the CallConvLowering pass of CIR. --- clang/include/clang/CIR/MissingFeatures.h | 1 + .../TargetLowering/LowerFunction.cpp | 44 +++++++++- .../Transforms/TargetLowering/Targets/X86.cpp | 12 ++- .../CIR/CallConvLowering/x86_64/basic.cpp | 82 ++++++++++++++++++- 4 files changed, 134 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 67547c376790..21bfe021dd1a 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -347,6 +347,7 @@ struct MissingFeatures { static bool undef() { return false; } static bool noFPClass() { return false; } static bool llvmIntrinsicElementTypeSupport() { return false; } + static bool argHasMaybeUndefAttr() { return false; } //-- Missing parts of the CIRGenModule::Release skeleton. static bool emitModuleInitializers() { return false; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp index 023e3baf2105..59420c1d2b54 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerFunction.cpp @@ -612,7 +612,7 @@ llvm::LogicalResult LowerFunction::buildFunctionProlog( Ptr.getLoc(), PointerType::get(STy, ptrType.getAddrSpace()), CastKind::bitcast, Ptr); } else { - cir_cconv_unreachable("NYI"); + addrToStoreInto = createTmpAlloca(*this, Ptr.getLoc(), STy); } assert(STy.getNumElements() == NumIRArgs); @@ -628,7 +628,7 @@ llvm::LogicalResult LowerFunction::buildFunctionProlog( } if (srcSize > dstSize) { - cir_cconv_unreachable("NYI"); + createMemCpy(*this, Ptr, addrToStoreInto, dstSize); } } } else { @@ -1126,9 +1126,47 @@ mlir::Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo, // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. + // As an example, if we have SrcTy = struct { i32, i32, i32 }, then the + // coerced type can be STy = struct { u64, i32 }. Hence a function with + // a single argument SrcTy will be rewritten to take two arguments, + // namely u64 and i32. StructType STy = mlir::dyn_cast(ArgInfo.getCoerceToType()); if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { - cir_cconv_unreachable("NYI"); + mlir::Type SrcTy = Src.getType(); + llvm::TypeSize SrcTypeSize = LM.getDataLayout().getTypeAllocSize(SrcTy); + llvm::TypeSize DstTypeSize = LM.getDataLayout().getTypeAllocSize(STy); + + if (SrcTypeSize.isScalable()) { + cir_cconv_unreachable("NYI"); + } else { + size_t SrcSize = SrcTypeSize.getFixedValue(); + size_t DstSize = DstTypeSize.getFixedValue(); + + // Create a new temporary space and copy src in the front bits of it. + // Other bits will be left untouched. + // Note in OG, Src is of type Address, while here it is mlir::Value. + // Here we need to first create another alloca to convert it into a + // PointerType, so that we can call memcpy. + if (SrcSize < DstSize) { + auto Alloca = createTmpAlloca(*this, loc, STy); + auto SrcAlloca = createTmpAlloca(*this, loc, SrcTy); + rewriter.create(loc, Src, SrcAlloca); + createMemCpy(*this, Alloca, SrcAlloca, SrcSize); + Src = Alloca; + } else { + cir_cconv_unreachable("NYI"); + } + + assert(NumIRArgs == STy.getNumElements()); + for (unsigned I = 0; I != STy.getNumElements(); ++I) { + mlir::Value Member = rewriter.create( + loc, PointerType::get(STy.getMembers()[I]), Src, /*name=*/"", + /*index=*/I); + mlir::Value Load = rewriter.create(loc, Member); + cir_cconv_assert(!cir::MissingFeatures::argHasMaybeUndefAttr()); + IRCallArgs[FirstIRArg + I] = Load; + } + } } else { // In the simple case, just pass the coerced loaded value. cir_cconv_assert(NumIRArgs == 1); diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp index 6e4856c42482..8d769808de70 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/X86.cpp @@ -182,6 +182,8 @@ void X86_64ABIInfo::classify(mlir::Type Ty, uint64_t OffsetBase, Class &Lo, return; } else if (mlir::isa(Ty)) { Current = Class::Integer; + } else if (mlir::isa(Ty)) { + Current = Class::Integer; } else if (const auto RT = mlir::dyn_cast(Ty)) { uint64_t Size = getContext().getTypeSize(Ty); @@ -397,7 +399,11 @@ mlir::Type X86_64ABIInfo::GetINTEGERTypeAtOffset(mlir::Type DestTy, // returning an 8-byte unit starting with it. See if we can safely use it. if (IROffset == 0) { // Pointers and int64's always fill the 8-byte unit. - cir_cconv_assert(!mlir::isa(DestTy) && "Ptrs are NYI"); + if (auto ptrTy = mlir::dyn_cast(DestTy)) { + if (ptrTy.getTypeSizeInBits(getDataLayout().layout, {}) == 64) + return DestTy; + cir_cconv_unreachable("NYI"); + } // If we have a 1/2/4-byte integer, we can use it only if the rest of the // goodness in the source type is just tail padding. This is allowed to @@ -406,6 +412,10 @@ mlir::Type X86_64ABIInfo::GetINTEGERTypeAtOffset(mlir::Type DestTy, // have to do this analysis on the source type because we can't depend on // unions being lowered a specific way etc. if (auto intTy = mlir::dyn_cast(DestTy)) { + // Pointers and int64's always fill the 8-byte unit. + if (intTy.getWidth() == 64) + return DestTy; + if (intTy.getWidth() == 8 || intTy.getWidth() == 16 || intTy.getWidth() == 32) { unsigned BitWidth = intTy.getWidth(); diff --git a/clang/test/CIR/CallConvLowering/x86_64/basic.cpp b/clang/test/CIR/CallConvLowering/x86_64/basic.cpp index 5bef1d34f974..52014d301b9b 100644 --- a/clang/test/CIR/CallConvLowering/x86_64/basic.cpp +++ b/clang/test/CIR/CallConvLowering/x86_64/basic.cpp @@ -125,4 +125,84 @@ S1 s1(S1 arg) { // CHECK: %[[#V18:]] = cir.load %[[#V17]] : !cir.ptr, !u64i // CHECK: cir.return %[[#V18]] : !u64i return {1, 2}; -} \ No newline at end of file +} + +/// Test call conv lowering for flattened structs. /// + +struct S2 { + int x, y, z; +}; + +// COM: Function prologue + +// CHECK: cir.func @_Z2s22S2(%[[ARG0:[a-z0-9]+]]: !u64i {{.*}}, %[[ARG1:[a-z0-9]+]]: !s32i {{.*}}) -> !ty_anon_struct +// CHECK: %[[#F0:]] = cir.alloca !ty_S2_, !cir.ptr +// CHECK: %[[#F1:]] = cir.alloca !ty_anon_struct, !cir.ptr +// CHECK: %[[#F2:]] = cir.get_member %[[#F1]][0]{{.*}} : !cir.ptr -> !cir.ptr +// CHECK: cir.store %[[ARG0]], %[[#F2]] : !u64i, !cir.ptr +// CHECK: %[[#F3:]] = cir.get_member %[[#F1]][1]{{.*}} : !cir.ptr -> !cir.ptr +// CHECK: cir.store %[[ARG1]], %[[#F3]] : !s32i, !cir.ptr +// CHECK: %[[#F4:]] = cir.cast(bitcast, %[[#F1]] : !cir.ptr), !cir.ptr +// CHECK: %[[#F5:]] = cir.cast(bitcast, %[[#F0]] : !cir.ptr), !cir.ptr +// CHECK: %[[#F6:]] = cir.const #cir.int<12> : !u64i +// CHECK: cir.libc.memcpy %[[#F6]] bytes from %[[#F4]] to %[[#F5]] +S2 s2(S2 arg) { + // CHECK: %[[#F7:]] = cir.alloca !ty_S2_, !cir.ptr, ["__retval"] {alignment = 4 : i64} + // CHECK: %[[#F8:]] = cir.alloca !ty_S2_, !cir.ptr, ["agg.tmp0"] {alignment = 4 : i64} + // CHECK: %[[#F9:]] = cir.alloca !ty_S2_, !cir.ptr, ["agg.tmp1"] {alignment = 4 : i64} + // CHECK: %[[#F10:]] = cir.alloca !ty_anon_struct, !cir.ptr, ["tmp"] {alignment = 8 : i64} + // CHECK: %[[#F11:]] = cir.alloca !ty_S2_, !cir.ptr, ["tmp"] {alignment = 4 : i64} + // CHECK: %[[#F12:]] = cir.alloca !ty_anon_struct, !cir.ptr, ["tmp"] {alignment = 8 : i64} + // CHECK: %[[#F13:]] = cir.alloca !ty_anon_struct, !cir.ptr, ["tmp"] {alignment = 8 : i64} + + // COM: Construction of S2 { 1, 2, 3 }. + + // CHECK: %[[#F14:]] = cir.get_member %[[#F8]][0] {{.*}} : !cir.ptr -> !cir.ptr + // CHECK: %[[#F15:]] = cir.const #cir.int<1> : !s32i + // CHECK: cir.store %[[#F15]], %[[#F14]] : !s32i, !cir.ptr + // CHECK: %[[#F16:]] = cir.get_member %[[#F8]][1] {{.*}} : !cir.ptr -> !cir.ptr + // CHECK: %[[#F17:]] = cir.const #cir.int<2> : !s32i + // CHECK: cir.store %[[#F17]], %[[#F16]] : !s32i, !cir.ptr + // CHECK: %[[#F18:]] = cir.get_member %[[#F8]][2] {{.*}} : !cir.ptr -> !cir.ptr + // CHECK: %[[#F19:]] = cir.const #cir.int<3> : !s32i + // CHECK: cir.store %[[#F19]], %[[#F18]] : !s32i, !cir.ptr + + // COM: Flattening of the struct. + // COM: { i32, i32, i32 } -> { i64, i32 }. + + // CHECK: %[[#F20:]] = cir.load %[[#F8]] : !cir.ptr, !ty_S2_ + // CHECK: cir.store %[[#F20]], %[[#F11]] : !ty_S2_, !cir.ptr + // CHECK: %[[#F21:]] = cir.cast(bitcast, %[[#F11]] : !cir.ptr), !cir.ptr + // CHECK: %[[#F22:]] = cir.cast(bitcast, %[[#F10]] : !cir.ptr), !cir.ptr + // CHECK: %[[#F23:]] = cir.const #cir.int<12> : !u64i + // CHECK: cir.libc.memcpy %[[#F23]] bytes from %[[#F21]] to %[[#F22]] + + // COM: Function call. + // COM: Retrieve the two values in { i64, i32 }. + + // CHECK: %[[#F24:]] = cir.get_member %[[#F10]][0] {name = ""} : !cir.ptr -> !cir.ptr + // CHECK: %[[#F25:]] = cir.load %[[#F24]] : !cir.ptr, !u64i + // CHECK: %[[#F26:]] = cir.get_member %[[#F10]][1] {name = ""} : !cir.ptr -> !cir.ptr + // CHECK: %[[#F27:]] = cir.load %[[#F26]] : !cir.ptr, !s32i + // CHECK: %[[#F28:]] = cir.call @_Z2s22S2(%[[#F25]], %[[#F27]]) : (!u64i, !s32i) -> !ty_anon_struct + // CHECK: cir.store %[[#F28]], %[[#F12]] : !ty_anon_struct, !cir.ptr + + // CHECK: %[[#F29:]] = cir.cast(bitcast, %[[#F12]] : !cir.ptr), !cir.ptr + // CHECK: %[[#F30:]] = cir.cast(bitcast, %[[#F9]] : !cir.ptr), !cir.ptr + // CHECK: %[[#F31:]] = cir.const #cir.int<12> : !u64i + // CHECK: cir.libc.memcpy %[[#F31]] bytes from %[[#F29]] to %[[#F30]] + + // COM: Construct S2 { 1, 2, 3 } again. + // COM: It has been tested above, so no duplication here. + + // COM: For return, the first two fields of S2 is also coerced. + + // CHECK: %[[#F39:]] = cir.cast(bitcast, %[[#F7]] : !cir.ptr), !cir.ptr + // CHECK: %[[#F40:]] = cir.cast(bitcast, %[[#F13]] : !cir.ptr), !cir.ptr + // CHECK: %[[#F41:]] = cir.const #cir.int<12> : !u64i + // cir.libc.memcpy %[[#F41]] bytes from %[[#F39]] to %[[#F40]] + // CHECK: %[[#F42:]] = cir.load %[[#F13]] : !cir.ptr, !ty_anon_struct + // cir.return %[[#F42]] : !ty_anon_struct + s2({ 1, 2, 3 }); + return { 1, 2, 3 }; +} From 52bb68cc64a2df4b1f8f870c3fb792cd19613b30 Mon Sep 17 00:00:00 2001 From: Letu Ren Date: Tue, 25 Feb 2025 06:21:24 +0800 Subject: [PATCH 2292/2301] [CIR][NFC] Un XFAIL global-bar-simple test (#1395) I checked https://github.com/llvm/clangir/blob/main/clang/test/CIR/CodeGen/globals.cpp and thought code works as expected. Although, test results need to be adjusted a bit. Resolves: https://github.com/llvm/clangir/issues/1252 --- clang/test/CIR/global-var-simple.cpp | 42 ++++++++++++++-------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/clang/test/CIR/global-var-simple.cpp b/clang/test/CIR/global-var-simple.cpp index ae0056f9f7c1..9c10deab2a02 100644 --- a/clang/test/CIR/global-var-simple.cpp +++ b/clang/test/CIR/global-var-simple.cpp @@ -1,60 +1,60 @@ // Global variables of intergal types -// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -// XFAIL: * +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck %s --input-file=%t.cir char c; -// CHECK: cir.global @c : !cir.int +// CHECK: cir.global external @c = #cir.int<0> : !s8i signed char sc; -// CHECK: cir.global @sc : !cir.int +// CHECK: cir.global external @sc = #cir.int<0> : !s8i unsigned char uc; -// CHECK: cir.global @uc : !cir.int +// CHECK: cir.global external @uc = #cir.int<0> : !u8i short ss; -// CHECK: cir.global @ss : !cir.int +// CHECK: cir.global external @ss = #cir.int<0> : !s16i unsigned short us; -// CHECK: cir.global @us : !cir.int +// CHECK: cir.global external @us = #cir.int<0> : !u16i int si; -// CHECK: cir.global @si : !cir.int +// CHECK: cir.global external @si = #cir.int<0> : !s32i unsigned ui; -// CHECK: cir.global @ui : !cir.int +// CHECK: cir.global external @ui = #cir.int<0> : !u32i long sl; -// CHECK: cir.global @sl : !cir.int +// CHECK: cir.global external @sl = #cir.int<0> : !s64i unsigned long ul; -// CHECK: cir.global @ul : !cir.int +// CHECK: cir.global external @ul = #cir.int<0> : !u64i long long sll; -// CHECK: cir.global @sll : !cir.int +// CHECK: cir.global external @sll = #cir.int<0> : !s64i unsigned long long ull; -// CHECK: cir.global @ull : !cir.int +// CHECK: cir.global external @ull = #cir.int<0> : !u64i __int128 s128; -// CHECK: cir.global @s128 : !cir.int +// CHECK: cir.global external @s128 = #cir.int<0> : !s128i unsigned __int128 u128; -// CHECK: cir.global @u128 : !cir.int +// CHECK: cir.global external @u128 = #cir.int<0> : !u128i wchar_t wc; -// CHECK: cir.global @wc : !cir.int +// CHECK: cir.global external @wc = #cir.int<0> : !s32i char8_t c8; -// CHECK: cir.global @c8 : !cir.int +// CHECK: cir.global external @c8 = #cir.int<0> : !u8i char16_t c16; -// CHECK: cir.global @c16 : !cir.int +// CHECK: cir.global external @c16 = #cir.int<0> : !u16i char32_t c32; -// CHECK: cir.global @c32 : !cir.int +// CHECK: cir.global external @c32 = #cir.int<0> : !u32i _BitInt(20) sb20; -// CHECK: cir.global @sb20 : !cir.int +// CHECK: cir.global external @sb20 = #cir.int<0> : !cir.int unsigned _BitInt(48) ub48; -// CHECK: cir.global @ub48 : !cir.int +// CHECK: external @ub48 = #cir.int<0> : !u48i From db3e2797fecf0e395d87f4c225f4dbe8f0ed7f57 Mon Sep 17 00:00:00 2001 From: Letu Ren Date: Tue, 25 Feb 2025 06:22:48 +0800 Subject: [PATCH 2293/2301] [CIR][CIRGen][builtin] handle `_mm_clflush` (#1397) --- clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 17 +++++ .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 61 --------------- clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp | 74 ++++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenFunction.h | 44 ++++++++++- clang/test/CIR/CodeGen/X86/builtins-x86.c | 13 ++++ 5 files changed, 146 insertions(+), 63 deletions(-) create mode 100644 clang/test/CIR/CodeGen/X86/builtins-x86.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index eb4d76098de5..fa2d720d0e15 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -2626,6 +2626,23 @@ mlir::Value CIRGenFunction::emitTargetBuiltinExpr(unsigned BuiltinID, getTarget().getTriple().getArch()); } +mlir::Value CIRGenFunction::emitScalarOrConstFoldImmArg(unsigned ICEArguments, + unsigned Idx, + const CallExpr *E) { + mlir::Value Arg = {}; + if ((ICEArguments & (1 << Idx)) == 0) { + Arg = emitScalarExpr(E->getArg(Idx)); + } else { + // If this is required to be a constant, constant fold it so that we + // know that the generated intrinsic gets a ConstantInt. + std::optional Result = + E->getArg(Idx)->getIntegerConstantExpr(getContext()); + assert(Result && "Expected argument to be a constant"); + Arg = builder.getConstInt(getLoc(E->getSourceRange()), *Result); + } + return Arg; +} + void CIRGenFunction::emitVAStartEnd(mlir::Value ArgValue, bool IsStart) { // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this // early, defer to LLVM lowering. diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index dfc1c7c79de1..3d29cb1dc06b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1676,50 +1676,6 @@ static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] = { #undef SMEMAP1 #undef SMEMAP2 -// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code, -// we handle them here. -enum class CIRGenFunction::MSVCIntrin { - _BitScanForward, - _BitScanReverse, - _InterlockedAnd, - _InterlockedDecrement, - _InterlockedExchange, - _InterlockedExchangeAdd, - _InterlockedExchangeSub, - _InterlockedIncrement, - _InterlockedOr, - _InterlockedXor, - _InterlockedExchangeAdd_acq, - _InterlockedExchangeAdd_rel, - _InterlockedExchangeAdd_nf, - _InterlockedExchange_acq, - _InterlockedExchange_rel, - _InterlockedExchange_nf, - _InterlockedCompareExchange_acq, - _InterlockedCompareExchange_rel, - _InterlockedCompareExchange_nf, - _InterlockedCompareExchange128, - _InterlockedCompareExchange128_acq, - _InterlockedCompareExchange128_rel, - _InterlockedCompareExchange128_nf, - _InterlockedOr_acq, - _InterlockedOr_rel, - _InterlockedOr_nf, - _InterlockedXor_acq, - _InterlockedXor_rel, - _InterlockedXor_nf, - _InterlockedAnd_acq, - _InterlockedAnd_rel, - _InterlockedAnd_nf, - _InterlockedIncrement_acq, - _InterlockedIncrement_rel, - _InterlockedIncrement_nf, - _InterlockedDecrement_acq, - _InterlockedDecrement_rel, - _InterlockedDecrement_nf, - __fastfail, -}; - static std::optional translateAarch64ToMsvcIntrin(unsigned BuiltinID) { using MSVCIntrin = CIRGenFunction::MSVCIntrin; @@ -2102,23 +2058,6 @@ mlir::Value CIRGenFunction::emitAArch64SVEBuiltinExpr(unsigned BuiltinID, llvm_unreachable("NYI"); } -mlir::Value CIRGenFunction::emitScalarOrConstFoldImmArg(unsigned ICEArguments, - unsigned Idx, - const CallExpr *E) { - mlir::Value Arg = {}; - if ((ICEArguments & (1 << Idx)) == 0) { - Arg = emitScalarExpr(E->getArg(Idx)); - } else { - // If this is required to be a constant, constant fold it so that we - // know that the generated intrinsic gets a ConstantInt. - std::optional Result = - E->getArg(Idx)->getIntegerConstantExpr(getContext()); - assert(Result && "Expected argument to be a constant"); - Arg = builder.getConstInt(getLoc(E->getSourceRange()), *Result); - } - return Arg; -} - static mlir::Value emitArmLdrexNon128Intrinsic(unsigned int builtinID, const CallExpr *clangCallExpr, CIRGenFunction &cgf) { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp index 0cd8f09f6da3..50d40cd278a2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp @@ -31,7 +31,79 @@ using namespace clang; using namespace clang::CIRGen; using namespace cir; +static std::optional +translateX86ToMsvcIntrin(unsigned BuiltinID) { + using MSVCIntrin = CIRGenFunction::MSVCIntrin; + switch (BuiltinID) { + default: + return std::nullopt; + case clang::X86::BI_BitScanForward: + case clang::X86::BI_BitScanForward64: + return MSVCIntrin::_BitScanForward; + case clang::X86::BI_BitScanReverse: + case clang::X86::BI_BitScanReverse64: + return MSVCIntrin::_BitScanReverse; + case clang::X86::BI_InterlockedAnd64: + return MSVCIntrin::_InterlockedAnd; + case clang::X86::BI_InterlockedCompareExchange128: + return MSVCIntrin::_InterlockedCompareExchange128; + case clang::X86::BI_InterlockedExchange64: + return MSVCIntrin::_InterlockedExchange; + case clang::X86::BI_InterlockedExchangeAdd64: + return MSVCIntrin::_InterlockedExchangeAdd; + case clang::X86::BI_InterlockedExchangeSub64: + return MSVCIntrin::_InterlockedExchangeSub; + case clang::X86::BI_InterlockedOr64: + return MSVCIntrin::_InterlockedOr; + case clang::X86::BI_InterlockedXor64: + return MSVCIntrin::_InterlockedXor; + case clang::X86::BI_InterlockedDecrement64: + return MSVCIntrin::_InterlockedDecrement; + case clang::X86::BI_InterlockedIncrement64: + return MSVCIntrin::_InterlockedIncrement; + } + llvm_unreachable("must return from switch"); +} + mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E) { - llvm_unreachable("NYI"); + if (BuiltinID == Builtin::BI__builtin_cpu_is) + llvm_unreachable("__builtin_cpu_is NYI"); + if (BuiltinID == Builtin::BI__builtin_cpu_supports) + llvm_unreachable("__builtin_cpu_supports NYI"); + if (BuiltinID == Builtin::BI__builtin_cpu_init) + llvm_unreachable("__builtin_cpu_init NYI"); + + // Handle MSVC intrinsics before argument evaluation to prevent double + // evaluation. + if (std::optional MsvcIntId = translateX86ToMsvcIntrin(BuiltinID)) + llvm_unreachable("translateX86ToMsvcIntrin NYI"); + + llvm::SmallVector Ops; + + // Find out if any arguments are required to be integer constant expressions. + unsigned ICEArguments = 0; + ASTContext::GetBuiltinTypeError Error; + getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); + assert(Error == ASTContext::GE_None && "Should not codegen an error"); + + for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { + Ops.push_back(emitScalarOrConstFoldImmArg(ICEArguments, i, E)); + } + + switch (BuiltinID) { + default: + return nullptr; + case X86::BI_mm_prefetch: { + llvm_unreachable("_mm_prefetch NYI"); + } + case X86::BI_mm_clflush: { + mlir::Type voidTy = cir::VoidType::get(&getMLIRContext()); + return builder + .create( + getLoc(E->getExprLoc()), builder.getStringAttr("x86.sse2.clflush"), + voidTy, Ops[0]) + .getResult(); + } + } } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 44484bcd2fe3..e4f7216f77fc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1015,7 +1015,49 @@ class CIRGenFunction : public CIRGenTypeCache { RValue emitCoroutineIntrinsic(const CallExpr *E, unsigned int IID); RValue emitCoroutineFrame(); - enum class MSVCIntrin; + // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code, + // we handle them here. + enum class MSVCIntrin { + _BitScanForward, + _BitScanReverse, + _InterlockedAnd, + _InterlockedDecrement, + _InterlockedExchange, + _InterlockedExchangeAdd, + _InterlockedExchangeSub, + _InterlockedIncrement, + _InterlockedOr, + _InterlockedXor, + _InterlockedExchangeAdd_acq, + _InterlockedExchangeAdd_rel, + _InterlockedExchangeAdd_nf, + _InterlockedExchange_acq, + _InterlockedExchange_rel, + _InterlockedExchange_nf, + _InterlockedCompareExchange_acq, + _InterlockedCompareExchange_rel, + _InterlockedCompareExchange_nf, + _InterlockedCompareExchange128, + _InterlockedCompareExchange128_acq, + _InterlockedCompareExchange128_rel, + _InterlockedCompareExchange128_nf, + _InterlockedOr_acq, + _InterlockedOr_rel, + _InterlockedOr_nf, + _InterlockedXor_acq, + _InterlockedXor_rel, + _InterlockedXor_nf, + _InterlockedAnd_acq, + _InterlockedAnd_rel, + _InterlockedAnd_nf, + _InterlockedIncrement_acq, + _InterlockedIncrement_rel, + _InterlockedIncrement_nf, + _InterlockedDecrement_acq, + _InterlockedDecrement_rel, + _InterlockedDecrement_nf, + __fastfail, + }; mlir::Value emitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, diff --git a/clang/test/CIR/CodeGen/X86/builtins-x86.c b/clang/test/CIR/CodeGen/X86/builtins-x86.c new file mode 100644 index 000000000000..f91ae6696cd5 --- /dev/null +++ b/clang/test/CIR/CodeGen/X86/builtins-x86.c @@ -0,0 +1,13 @@ +// Global variables of intergal types +// RUN: %clang_cc1 -triple x86_64-unknown-linux -Wno-implicit-function-declaration -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux -Wno-implicit-function-declaration -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +void test_mm_clflush(const void* tmp_vCp) { + // CIR-LABEL: test_mm_clflush + // LLVM-LABEL: test_mm_clflush + _mm_clflush(tmp_vCp); + // CIR: {{%.*}} = cir.llvm.intrinsic "x86.sse2.clflush" {{%.*}} : (!cir.ptr) -> !void + // LLVM: call void @llvm.x86.sse2.clflush(ptr {{%.*}}) +} From d44a8e7fb7dc59fc1ef4175147e765791710d711 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Mon, 24 Feb 2025 23:23:26 +0100 Subject: [PATCH 2294/2301] [CIR][CIRGen][Builtin][Neon] Lower vrnd32z and vrnd32zq (#1399) Lower vrnd32z and vrnd32zq --- .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 14 ++-- .../AArch64/v8.5a-neon-frint3264-intrinsic.c | 64 ++++++++++++------- 2 files changed, 48 insertions(+), 30 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 3d29cb1dc06b..1af545d2e6ca 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2566,6 +2566,14 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( argTypes.push_back(vTy); break; } + case NEON::BI__builtin_neon_vrnd32z_f32: + case NEON::BI__builtin_neon_vrnd32zq_f32: + case NEON::BI__builtin_neon_vrnd32z_f64: + case NEON::BI__builtin_neon_vrnd32zq_f64: { + intrincsName = "aarch64.neon.frint32z"; + argTypes.push_back(vTy); + break; + } case NEON::BI__builtin_neon_vshl_v: case NEON::BI__builtin_neon_vshlq_v: { return builder.create( @@ -4133,12 +4141,6 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vrndh_f16: { llvm_unreachable("NEON::BI__builtin_neon_vrndh_f16 NYI"); } - case NEON::BI__builtin_neon_vrnd32z_f32: - case NEON::BI__builtin_neon_vrnd32zq_f32: - case NEON::BI__builtin_neon_vrnd32z_f64: - case NEON::BI__builtin_neon_vrnd32zq_f64: { - llvm_unreachable("NEON::BI__builtin_neon_vrnd32zq_f64 NYI"); - } case NEON::BI__builtin_neon_vrnd64x_f32: case NEON::BI__builtin_neon_vrnd64xq_f32: case NEON::BI__builtin_neon_vrnd64x_f64: diff --git a/clang/test/CIR/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.c b/clang/test/CIR/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.c index 0baf8c7ca52e..1ef035815351 100644 --- a/clang/test/CIR/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.c +++ b/clang/test/CIR/CodeGen/AArch64/v8.5a-neon-frint3264-intrinsic.c @@ -42,19 +42,27 @@ float32x4_t test_vrnd32xq_f32(float32x4_t a) { // LLVM: ret <4 x float> [[RND]] } -// CHECK-LABEL: test_vrnd32z_f32 -// CHECK: [[RND:%.*]] = call <2 x float> @llvm.aarch64.neon.frint32z.v2f32(<2 x float> %a) -// CHECK: ret <2 x float> [[RND]] -// float32x2_t test_vrnd32z_f32(float32x2_t a) { -// return vrnd32z_f32(a); -// } +float32x2_t test_vrnd32z_f32(float32x2_t a) { + return vrnd32z_f32(a); -// CHECK-LABEL: test_vrnd32zq_f32 -// CHECK: [[RND:%.*]] = call <4 x float> @llvm.aarch64.neon.frint32z.v4f32(<4 x float> %a) -// CHECK: ret <4 x float> [[RND]] -// float32x4_t test_vrnd32zq_f32(float32x4_t a) { -// return vrnd32zq_f32(a); -// } + // CIR-LABEL: vrnd32z_f32 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.frint32z" {{.*}} : (!cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vrnd32z_f32 + // LLVM: [[RND:%.*]] = call <2 x float> @llvm.aarch64.neon.frint32z.v2f32(<2 x float> %0) + // LLVM: ret <2 x float> [[RND]] +} + +float32x4_t test_vrnd32zq_f32(float32x4_t a) { + return vrnd32zq_f32(a); + + // CIR-LABEL: vrnd32zq_f32 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.frint32z" {{.*}} : (!cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vrnd32zq_f32 + // LLVM: [[RND:%.*]] = call <4 x float> @llvm.aarch64.neon.frint32z.v4f32(<4 x float> %0) + // LLVM: ret <4 x float> [[RND]] +} // CHECK-LABEL: test_vrnd64x_f32 // CHECK: [[RND:%.*]] = call <2 x float> @llvm.aarch64.neon.frint64x.v2f32(<2 x float> %a) @@ -107,19 +115,27 @@ float64x2_t test_vrnd32xq_f64(float64x2_t a) { // LLVM: ret <2 x double> [[RND]] } -// CHECK-LABEL: test_vrnd32z_f64 -// CHECK: [[RND:%.*]] = call <1 x double> @llvm.aarch64.neon.frint32z.v1f64(<1 x double> %a) -// CHECK: ret <1 x double> [[RND]] -// float64x1_t test_vrnd32z_f64(float64x1_t a) { -// return vrnd32z_f64(a); -// } +float64x1_t test_vrnd32z_f64(float64x1_t a) { + return vrnd32z_f64(a); -// CHECK-LABEL: test_vrnd32zq_f64 -// CHECK: [[RND:%.*]] = call <2 x double> @llvm.aarch64.neon.frint32z.v2f64(<2 x double> %a) -// CHECK: ret <2 x double> [[RND]] -// float64x2_t test_vrnd32zq_f64(float64x2_t a) { -// return vrnd32zq_f64(a); -// } + // CIR-LABEL: vrnd32z_f64 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.frint32z" {{.*}} : (!cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vrnd32z_f64 + // LLVM: [[RND:%.*]] = call <1 x double> @llvm.aarch64.neon.frint32z.v1f64(<1 x double> %0) + // LLVM: ret <1 x double> [[RND]] +} + +float64x2_t test_vrnd32zq_f64(float64x2_t a) { + return vrnd32zq_f64(a); + + // CIR-LABEL: vrnd32zq_f64 + // CIR: [[TMP0:%.*]] = cir.llvm.intrinsic "aarch64.neon.frint32z" {{.*}} : (!cir.vector) -> !cir.vector + + // LLVM-LABEL: @test_vrnd32zq_f64 + // LLVM: [[RND:%.*]] = call <2 x double> @llvm.aarch64.neon.frint32z.v2f64(<2 x double> %0) + // LLVM: ret <2 x double> [[RND]] +} // CHECK-LABEL: test_vrnd64x_f64 // CHECK: [[RND:%.*]] = call <1 x double> @llvm.aarch64.neon.frint64x.v1f64(<1 x double> %a) From 2947f3887d1ea6907f96b68ca6528023b1a25b31 Mon Sep 17 00:00:00 2001 From: "Chibuoyim (Wilson) Ogbonna" Date: Tue, 25 Feb 2025 01:25:23 +0300 Subject: [PATCH 2295/2301] [CIR][CodeGen] Support return in TryOp (#1398) This PR adds support for returns inside of a TryOp, for example: ``` void foo() { int r = 1; try { return; ++r; } catch (...) { } } ``` Currently, it fails during the CodeGen with: ``` error: 'cir.return' op expects parent op to be one of 'cir.func, cir.scope, cir.if, cir.switch, cir.do, cir.while, cir.for, cir.case' ``` were TryOp's omitted on purpose? --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 3 ++- clang/test/CIR/CodeGen/try-catch.cpp | 21 ++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index dc85225d0d5f..31d69d582c08 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -684,7 +684,8 @@ def StoreOp : CIR_Op<"store", [ def ReturnOp : CIR_Op<"return", [ParentOneOf<["FuncOp", "ScopeOp", "IfOp", "SwitchOp", "DoWhileOp", - "WhileOp", "ForOp", "CaseOp"]>, + "WhileOp", "ForOp", "CaseOp", + "TryOp"]>, Terminator]> { let summary = "Return from function"; let description = [{ diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp index 0bcca60549b9..570017f83553 100644 --- a/clang/test/CIR/CodeGen/try-catch.cpp +++ b/clang/test/CIR/CodeGen/try-catch.cpp @@ -128,3 +128,24 @@ void tc5() { // CHECK: cir.call exception @_Z3tc5v() : () -> () // CHECK: cir.yield // CHECK: }] + +// CHECK: cir.func @_Z3tc6v() +void tc6() { + int r = 1; + try { + return; + ++r; + } catch (...) { + } +} + +// CHECK: cir.scope { +// CHECK: cir.try { +// CHECK: cir.return +// CHECK: ^bb1: // no predecessors +// CHECK: %[[V2:.*]] = cir.load {{.*}} : !cir.ptr, !s32i +// CHECK: %[[V3:.*]] = cir.unary(inc, %[[V2]]) : !s32i, !s32i +// CHECK: cir.store %[[V3]], {{.*}} : !s32i, !cir.ptr +// CHECK: cir.yield +// CHECK: } +// CHECK: } From 253fb9a882751ae6b6fbb6093754510a21a35494 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Tue, 25 Feb 2025 06:28:49 +0800 Subject: [PATCH 2296/2301] [CIR][Lowering][TBAA] Lower CIR_TBAAStructAttr (#1381) --- .../Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp | 75 ++++++--- clang/test/CIR/CodeGen/tbaa-struct.cpp | 142 ++++++++++++++++++ 2 files changed, 192 insertions(+), 25 deletions(-) diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp index 7ea58c4f66e4..83a63ee38d11 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp @@ -16,9 +16,35 @@ class CIRToLLVMTBAAAttrLowering { public: CIRToLLVMTBAAAttrLowering(mlir::MLIRContext *mlirContext) : mlirContext(mlirContext) {} + + mlir::LLVM::TBAATypeDescriptorAttr + lowerCIRTBAAAttrToLLVMTBAAAttr(mlir::Attribute tbaa) { + if (auto charAttr = mlir::dyn_cast(tbaa)) { + return getChar(); + } + if (auto scalarAttr = mlir::dyn_cast(tbaa)) { + mlir::DataLayout layout; + auto size = layout.getTypeSize(scalarAttr.getType()); + return createScalarTypeNode(scalarAttr.getId(), getChar(), size); + } + if (auto structAttr = mlir::dyn_cast(tbaa)) { + llvm::SmallVector members; + for (const auto &member : structAttr.getMembers()) { + auto memberTypeDesc = + lowerCIRTBAAAttrToLLVMTBAAAttr(member.getTypeDesc()); + auto memberAttr = mlir::LLVM::TBAAMemberAttr::get( + mlirContext, memberTypeDesc, member.getOffset()); + members.push_back(memberAttr); + } + return mlir::LLVM::TBAATypeDescriptorAttr::get( + mlirContext, structAttr.getId(), members); + } + return nullptr; + } + +private: mlir::LLVM::TBAARootAttr getRoot() { - return mlir::LLVM::TBAARootAttr::get( - mlirContext, mlir::StringAttr::get(mlirContext, "Simple C/C++ TBAA")); + return createTBAARoot("Simple C/C++ TBAA"); } mlir::LLVM::TBAATypeDescriptorAttr getChar() { @@ -35,36 +61,35 @@ class CIRToLLVMTBAAAttrLowering { llvm::ArrayRef(members)); } -protected: - mlir::MLIRContext *mlirContext; -}; - -class CIRToLLVMTBAAScalarAttrLowering : public CIRToLLVMTBAAAttrLowering { -public: - CIRToLLVMTBAAScalarAttrLowering(mlir::MLIRContext *mlirContext) - : CIRToLLVMTBAAAttrLowering(mlirContext) {} - mlir::LLVM::TBAATypeDescriptorAttr - lowerScalarType(cir::TBAAScalarAttr scalarAttr) { - mlir::DataLayout layout; - auto size = layout.getTypeSize(scalarAttr.getType()); - return createScalarTypeNode(scalarAttr.getId(), getChar(), size); + mlir::LLVM::TBAARootAttr createTBAARoot(llvm::StringRef name) { + return mlir::LLVM::TBAARootAttr::get( + mlirContext, mlir::StringAttr::get(mlirContext, name)); } + + mlir::MLIRContext *mlirContext; }; mlir::ArrayAttr lowerCIRTBAAAttr(mlir::Attribute tbaa, mlir::ConversionPatternRewriter &rewriter, cir::LowerModule *lowerMod) { auto *ctx = rewriter.getContext(); - CIRToLLVMTBAAScalarAttrLowering scalarLower(ctx); - if (auto charAttr = mlir::dyn_cast(tbaa)) { - auto accessType = scalarLower.getChar(); - auto tag = mlir::LLVM::TBAATagAttr::get(accessType, accessType, 0); - return mlir::ArrayAttr::get(ctx, {tag}); - } - if (auto scalarAttr = mlir::dyn_cast(tbaa)) { - auto accessType = scalarLower.lowerScalarType(scalarAttr); - auto tag = mlir::LLVM::TBAATagAttr::get(accessType, accessType, 0); - return mlir::ArrayAttr::get(ctx, {tag}); + CIRToLLVMTBAAAttrLowering lower(ctx); + if (auto tbaaTag = mlir::dyn_cast(tbaa)) { + mlir::LLVM::TBAATypeDescriptorAttr accessType = + lower.lowerCIRTBAAAttrToLLVMTBAAAttr(tbaaTag.getAccess()); + if (auto structAttr = + mlir::dyn_cast(tbaaTag.getBase())) { + auto baseType = lower.lowerCIRTBAAAttrToLLVMTBAAAttr(structAttr); + auto tag = mlir::LLVM::TBAATagAttr::get(baseType, accessType, + tbaaTag.getOffset()); + return mlir::ArrayAttr::get(ctx, {tag}); + } + } else { + auto accessType = lower.lowerCIRTBAAAttrToLLVMTBAAAttr(tbaa); + if (accessType) { + auto tag = mlir::LLVM::TBAATagAttr::get(accessType, accessType, 0); + return mlir::ArrayAttr::get(ctx, {tag}); + } } return mlir::ArrayAttr(); } diff --git a/clang/test/CIR/CodeGen/tbaa-struct.cpp b/clang/test/CIR/CodeGen/tbaa-struct.cpp index 4b5916a9d3f8..7349ff4cc5e1 100644 --- a/clang/test/CIR/CodeGen/tbaa-struct.cpp +++ b/clang/test/CIR/CodeGen/tbaa-struct.cpp @@ -4,7 +4,16 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir -O1 // RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -disable-llvm-passes -no-struct-path-tbaa +// RUN: FileCheck --check-prefix=CHECK --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -disable-llvm-passes +// RUN: FileCheck --check-prefixes=PATH,OLD-PATH --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -disable-llvm-passes -relaxed-aliasing +// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O0 -disable-llvm-passes +// RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s +// NO-TBAA-NOT: !tbaa // CIR: #tbaa[[NYI:.*]] = #cir.tbaa // CIR: #tbaa[[CHAR:.*]] = #cir.tbaa_omnipotent_char // CIR: #tbaa[[INT:.*]] = #cir.tbaa_scalar @@ -81,6 +90,14 @@ uint32_t g(uint32_t *s, StructA *A, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructA_f32]]) + + + // CHECK-LABEL: define{{.*}} i32 @_Z1g + // CHECK: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32:!.*]] + // CHECK: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // PATH-LABEL: define{{.*}} i32 @_Z1g + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32:!.*]] + // PATH: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_A_f32:!.*]] *s = 1; A->f32 = 4; return *s; @@ -94,6 +111,13 @@ uint32_t g2(uint32_t *s, StructA *A, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u16i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u16i, !cir.ptr tbaa(#tbaa[[TAG_StructA_f16]]) + + // CHECK-LABEL: define{{.*}} i32 @_Z2g2 + // CHECK: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // CHECK: store i16 4, ptr %{{.*}}, align {{4|2}}, !tbaa [[TAG_i16:!.*]] + // PATH-LABEL: define{{.*}} i32 @_Z2g2 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // PATH: store i16 4, ptr %{{.*}}, align {{4|2}}, !tbaa [[TAG_A_f16:!.*]] *s = 1; A->f16 = 4; return *s; @@ -107,6 +131,13 @@ uint32_t g3(StructA *A, StructB *B, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructB_a_f32]]) + + // CHECK-LABEL: define{{.*}} i32 @_Z2g3 + // CHECK: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // CHECK: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // PATH-LABEL: define{{.*}} i32 @_Z2g3 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_A_f32]] + // PATH: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_B_a_f32:!.*]] A->f32 = 1; B->a.f32 = 4; return A->f32; @@ -120,6 +151,13 @@ uint32_t g4(StructA *A, StructB *B, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u16i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u16i, !cir.ptr tbaa(#tbaa[[TAG_StructB_a_f16]]) + + // LLVM-LABEL: define{{.*}} i32 @_Z2g4 + // LLVM: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // LLVM: store i16 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i16]] + // PATH-LABEL: define{{.*}} i32 @_Z2g4 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_A_f32]] + // PATH: store i16 4, ptr %{{.*}}, align {{4|2}}, !tbaa [[TAG_B_a_f16:!.*]] A->f32 = 1; B->a.f16 = 4; return A->f32; @@ -133,6 +171,13 @@ uint32_t g5(StructA *A, StructB *B, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructB_f32]]) + + // LLVM-LABEL: define{{.*}} i32 @_Z2g5 + // LLVM: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // LLVM: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // PATH-LABEL: define{{.*}} i32 @_Z2g5 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_A_f32]] + // PATH: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_B_f32:!.*]] A->f32 = 1; B->f32 = 4; return A->f32; @@ -146,6 +191,13 @@ uint32_t g6(StructA *A, StructB *B, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructB_a_f32_2]]) + + // LLVM-LABEL: define{{.*}} i32 @_Z2g6 + // LLVM: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // LLVM: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // PATH-LABEL: define{{.*}} i32 @_Z2g6 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_A_f32]] + // PATH: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_B_a_f32_2:!.*]] A->f32 = 1; B->a.f32_2 = 4; return A->f32; @@ -159,6 +211,13 @@ uint32_t g7(StructA *A, StructS *S, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructS_f32]]) + + // LLVM-LABEL: define{{.*}} i32 @_Z2g7 + // LLVM: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // LLVM: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // PATH-LABEL: define{{.*}} i32 @_Z2g7 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_A_f32]] + // PATH: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_S_f32:!.*]] A->f32 = 1; S->f32 = 4; return A->f32; @@ -172,6 +231,13 @@ uint32_t g8(StructA *A, StructS *S, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u16i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u16i, !cir.ptr tbaa(#tbaa[[TAG_StructS_f16]]) + + // LLVM-LABEL: define{{.*}} i32 @_Z2g8 + // LLVM: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // LLVM: store i16 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i16]] + // PATH-LABEL: define{{.*}} i32 @_Z2g8 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_A_f32]] + // PATH: store i16 4, ptr %{{.*}}, align {{4|2}}, !tbaa [[TAG_S_f16:!.*]] A->f32 = 1; S->f16 = 4; return A->f32; @@ -185,6 +251,13 @@ uint32_t g9(StructS *S, StructS2 *S2, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructS2_f32]]) + + // LLVM-LABEL: define{{.*}} i32 @_Z2g9 + // LLVM: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // LLVM: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // PATH-LABEL: define{{.*}} i32 @_Z2g9 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_S_f32]] + // PATH: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_S2_f32:!.*]] S->f32 = 1; S2->f32 = 4; return S->f32; @@ -198,6 +271,13 @@ uint32_t g10(StructS *S, StructS2 *S2, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u16i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u16i, !cir.ptr tbaa(#tbaa[[TAG_StructS2_f16]]) + + // LLVM-LABEL: define{{.*}} i32 @_Z3g10 + // LLVM: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // LLVM: store i16 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i16]] + // PATH-LABEL: define{{.*}} i32 @_Z3g10 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_S_f32]] + // PATH: store i16 4, ptr %{{.*}}, align {{4|2}}, !tbaa [[TAG_S2_f16:!.*]] S->f32 = 1; S2->f16 = 4; return S->f32; @@ -211,6 +291,13 @@ uint32_t g11(StructC *C, StructD *D, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructD_b_a_f32]]) + + // LLVM-LABEL: define{{.*}} i32 @_Z3g11 + // LLVM: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // LLVM: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // PATH-LABEL: define{{.*}} i32 @_Z3g11 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_C_b_a_f32:!.*]] + // PATH: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_D_b_a_f32:!.*]] C->b.a.f32 = 1; D->b.a.f32 = 4; return C->b.a.f32; @@ -224,6 +311,14 @@ uint32_t g12(StructC *C, StructD *D, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructB_a_f32]]) + + // LLVM-LABEL: define{{.*}} i32 @_Z3g12 + // LLVM: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // LLVM: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // TODO(cir): differentiate the two accesses. + // PATH-LABEL: define{{.*}} i32 @_Z3g12 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_B_a_f32]] + // PATH: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_B_a_f32]] StructB *b1 = &(C->b); StructB *b2 = &(D->b); // b1, b2 have different context. @@ -243,6 +338,11 @@ char g14(struct six *a, struct six *b) { // CIR: %[[TMP1:.*]] = cir.load %{{.*}} : !cir.ptr>, !cir.ptr // CIR: %[[TMP2:.*]] = cir.get_member %[[TMP1]][2] {name = "b"} : !cir.ptr -> !cir.ptr // CIR: %[[TMP3:.*]] = cir.load %[[TMP2]] : !cir.ptr, !s8i tbaa(#tbaa[[TAG_six_b]]) + + // LLVM-LABEL: define{{.*}} i8 @_Z3g14 + // LLVM: load i8, ptr %{{.*}}, align 1, !tbaa [[TAG_char]] + // PATH-LABEL: define{{.*}} i8 @_Z3g14 + // PATH: load i8, ptr %{{.*}}, align 1, !tbaa [[TAG_six_b:!.*]] return a->b; } @@ -256,7 +356,49 @@ uint32_t g15(StructS *S, StructS3 *S3, uint64_t count) { // CIR: %[[INT_4:.*]] = cir.const #cir.int<4> : !s32i // CIR: %[[UINT_4:.*]] = cir.cast(integral, %[[INT_4]] : !s32i), !u32i // CIR: cir.store %[[UINT_4]], %{{.*}} : !u32i, !cir.ptr tbaa(#tbaa[[TAG_StructS_f32]]) + + + // LLVM-LABEL: define{{.*}} i32 @_Z3g15 + // LLVM: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // LLVM: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_i32]] + // PATH-LABEL: define{{.*}} i32 @_Z3g15 + // PATH: store i32 1, ptr %{{.*}}, align 4, !tbaa [[TAG_S_f32]] + // PATH: store i32 4, ptr %{{.*}}, align 4, !tbaa [[TAG_S_f32]] S->f32 = 1; S3->f32 = 4; return S->f32; } + +// LLVM: [[TYPE_char:!.*]] = !{!"omnipotent char", [[TAG_cxx_tbaa:!.*]], +// LLVM: [[TAG_cxx_tbaa]] = !{!"Simple C++ TBAA"} +// LLVM: [[TAG_i32]] = !{[[TYPE_i32:!.*]], [[TYPE_i32]], i64 0} +// LLVM: [[TYPE_i32]] = !{!"int", [[TYPE_char]], +// LLVM: [[TAG_i16]] = !{[[TYPE_i16:!.*]], [[TYPE_i16]], i64 0} +// LLVM: [[TYPE_i16]] = !{!"short", [[TYPE_char]], +// LLVM: [[TAG_char]] = !{[[TYPE_char]], [[TYPE_char]], i64 0} + +// OLD-PATH: [[TAG_i32]] = !{[[TYPE_INT:!.*]], [[TYPE_INT]], i64 0} +// OLD-PATH: [[TYPE_INT]] = !{!"int", [[TYPE_CHAR:!.*]], i64 0} +// OLD-PATH: [[TYPE_CHAR]] = !{!"omnipotent char", [[TAG_cxx_tbaa:!.*]], +// OLD-PATH: [[TAG_cxx_tbaa]] = !{!"Simple C/C++ TBAA"} +// OLD-PATH: [[TAG_A_f32]] = !{[[TYPE_A:!.*]], [[TYPE_INT]], i64 4} +// OLD-PATH: [[TYPE_A]] = !{!"_ZTS7StructA", [[TYPE_SHORT:!.*]], i64 0, [[TYPE_INT]], i64 4, [[TYPE_SHORT]], i64 8, [[TYPE_INT]], i64 12} +// OLD-PATH: [[TYPE_SHORT:!.*]] = !{!"short", [[TYPE_CHAR]] +// OLD-PATH: [[TAG_A_f16]] = !{[[TYPE_A]], [[TYPE_SHORT]], i64 0} +// OLD-PATH: [[TAG_B_a_f32]] = !{[[TYPE_B:!.*]], [[TYPE_INT]], i64 8} +// OLD-PATH: [[TYPE_B]] = !{!"_ZTS7StructB", [[TYPE_SHORT]], i64 0, [[TYPE_A]], i64 4, [[TYPE_INT]], i64 20} +// OLD-PATH: [[TAG_B_a_f16]] = !{[[TYPE_B]], [[TYPE_SHORT]], i64 4} +// OLD-PATH: [[TAG_B_f32]] = !{[[TYPE_B]], [[TYPE_INT]], i64 20} +// OLD-PATH: [[TAG_B_a_f32_2]] = !{[[TYPE_B]], [[TYPE_INT]], i64 16} +// OLD-PATH: [[TAG_S_f32]] = !{[[TYPE_S:!.*]], [[TYPE_INT]], i64 4} +// OLD-PATH: [[TYPE_S]] = !{!"_ZTS7StructS", [[TYPE_SHORT]], i64 0, [[TYPE_INT]], i64 4} +// OLD-PATH: [[TAG_S_f16]] = !{[[TYPE_S]], [[TYPE_SHORT]], i64 0} +// OLD-PATH: [[TAG_S2_f32]] = !{[[TYPE_S2:!.*]], [[TYPE_INT]], i64 4} +// OLD-PATH: [[TYPE_S2]] = !{!"_ZTS8StructS2", [[TYPE_SHORT]], i64 0, [[TYPE_INT]], i64 4} +// OLD-PATH: [[TAG_S2_f16]] = !{[[TYPE_S2]], [[TYPE_SHORT]], i64 0} +// OLD-PATH: [[TAG_C_b_a_f32]] = !{[[TYPE_C:!.*]], [[TYPE_INT]], i64 12} +// OLD-PATH: [[TYPE_C]] = !{!"_ZTS7StructC", [[TYPE_SHORT]], i64 0, [[TYPE_B]], i64 4, [[TYPE_INT]], i64 28} +// OLD-PATH: [[TAG_D_b_a_f32]] = !{[[TYPE_D:!.*]], [[TYPE_INT]], i64 12} +// OLD-PATH: [[TYPE_D]] = !{!"_ZTS7StructD", [[TYPE_SHORT]], i64 0, [[TYPE_B]], i64 4, [[TYPE_INT]], i64 28, [[TYPE_CHAR]], i64 32} +// OLD-PATH: [[TAG_six_b]] = !{[[TYPE_six:!.*]], [[TYPE_CHAR]], i64 4} +// OLD-PATH: [[TYPE_six]] = !{!"_ZTS3six", [[TYPE_CHAR]], i64 0, [[TYPE_CHAR]], i64 4, [[TYPE_CHAR]], i64 5} From af9a39d842f0c0da1a1fee03ec2144e89e911df2 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Mon, 24 Feb 2025 14:56:40 -0800 Subject: [PATCH 2297/2301] [CIR] New assembly format for function type return (#1391) Change the assembly format for `cir::FuncType` from ``` !cir.func ``` to ``` !cir.func<(!argType) -> !returnType> ``` This change (1) is easier to parse because it doesn't require lookahead, (2) is consistent with the format of ClangIR `FuncOp` assembly, and (3) is consistent with function types in other MLIR dialects. Change all the tests to use or to expect the new format for function types. The contents and the semantics of `cir::FuncType` are unchanged. Only the assembly format is being changed. Functions that return `void` in C or C++ are still represented in MLIR as having no return type. Most of the changes are in `parseFuncType` and `printFuncType` and the functions they call in `CIRTypes.cpp`. A `FuncType::verify` function was added to check that an explicit return type is not `cir::VoidType`. `FuncType::isVoid()` was renamed to `FuncType::hasVoidReturn()` Some comments for `FuncType` were improved. An `llvm_unreachable` was added to `StructType::getKindAsStr` to suppress a compiler warning and to catch a memory error that corrupts the `RecordKind` field. (This was a drive-by fix and has nothing to do with the rest of this change.) --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 +- clang/include/clang/CIR/Dialect/IR/CIRTypes.h | 1 + .../include/clang/CIR/Dialect/IR/CIRTypes.td | 35 +++--- clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 6 +- clang/lib/CIR/Dialect/IR/CIRTypes.cpp | 112 ++++++++---------- .../CIR/CallConvLowering/AArch64/ptr-fields.c | 6 +- .../test/CIR/CallConvLowering/x86_64/fptrs.c | 22 ++-- clang/test/CIR/CodeGen/coro-task.cpp | 12 +- clang/test/CIR/CodeGen/derived-to-base.cpp | 10 +- clang/test/CIR/CodeGen/dtors.cpp | 2 +- clang/test/CIR/CodeGen/dynamic-cast-exact.cpp | 16 +-- clang/test/CIR/CodeGen/fun-ptr.c | 16 +-- clang/test/CIR/CodeGen/hello.c | 2 +- clang/test/CIR/CodeGen/lambda.cpp | 14 +-- clang/test/CIR/CodeGen/multi-vtable.cpp | 22 ++-- clang/test/CIR/CodeGen/no-prototype.c | 22 ++-- clang/test/CIR/CodeGen/store.c | 6 +- clang/test/CIR/CodeGen/struct.cpp | 4 +- clang/test/CIR/CodeGen/vtable-rtti.cpp | 16 +-- clang/test/CIR/CodeGen/vtt.cpp | 26 ++-- clang/test/CIR/IR/being_and_nothingness.cir | 15 +-- clang/test/CIR/IR/call-op-call-conv.cir | 6 +- clang/test/CIR/IR/call.cir | 4 +- clang/test/CIR/IR/func.cir | 2 +- clang/test/CIR/IR/invalid.cir | 18 ++- .../test/CIR/Lowering/ThroughMLIR/vtable.cir | 8 +- clang/test/CIR/Lowering/call-op-call-conv.cir | 2 +- clang/test/CIR/Lowering/call.cir | 16 +-- clang/test/CIR/Lowering/func.cir | 6 +- clang/test/CIR/Lowering/globals.cir | 10 +- clang/test/CIR/Lowering/hello.cir | 2 +- 31 files changed, 221 insertions(+), 222 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 31d69d582c08..652426db6202 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -3888,7 +3888,7 @@ def CallOp : CIR_CallOp<"call", [NoRegionArguments]> { CArg<"mlir::UnitAttr", "{}">:$exception), [{ $_state.addOperands(ValueRange{ind_target}); $_state.addOperands(operands); - if (!fn_type.isVoid()) + if (!fn_type.hasVoidReturn()) $_state.addTypes(fn_type.getReturnType()); $_state.addAttribute("calling_conv", CallingConvAttr::get($_builder.getContext(), callingConv)); @@ -3974,7 +3974,7 @@ def TryCallOp : CIR_CallOp<"try_call", finalCallOperands.append(operands.begin(), operands.end()); $_state.addOperands(finalCallOperands); - if (!fn_type.isVoid()) + if (!fn_type.hasVoidReturn()) $_state.addTypes(fn_type.getReturnType()); $_state.addAttribute("calling_conv", diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h index 171ffda0b691..221090f25d9e 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h @@ -154,6 +154,7 @@ class StructType case RecordKind::Struct: return "struct"; } + llvm_unreachable("Invalid value for StructType::getKind()"); } std::string getPrefixedName() { return getKindAsStr() + "." + getName().getValue().str(); diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td index 6f30f0188802..3a0451175f30 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.td @@ -379,10 +379,10 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { ```mlir !cir.func<()> - !cir.func + !cir.func<() -> !bool> !cir.func<(!s8i, !s8i)> - !cir.func - !cir.func + !cir.func<(!s8i, !s8i) -> !s32i> + !cir.func<(!s32i, ...) -> !s32i> ``` }]; @@ -390,23 +390,27 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { "mlir::Type":$optionalReturnType, "bool":$varArg); // Use a custom parser to handle the optional return and argument types - // without an optional anchor. let assemblyFormat = [{ `<` custom($optionalReturnType, $inputs, $varArg) `>` }]; let builders = [ - // Construct with an actual return type or explicit !cir.void + // Create a FuncType, converting the return type from C-style to + // MLIR-style. If the given return type is `cir::VoidType`, ignore it + // and create the FuncType with no return type, which is how MLIR + // represents function types. TypeBuilderWithInferredContext<(ins "llvm::ArrayRef":$inputs, "mlir::Type":$returnType, CArg<"bool", "false">:$isVarArg), [{ - return $_get(returnType.getContext(), inputs, - mlir::isa(returnType) ? nullptr - : returnType, - isVarArg); + return $_get(returnType.getContext(), inputs, + mlir::isa(returnType) ? nullptr + : returnType, + isVarArg); }]> ]; + let genVerifyDecl = 1; + let extraClassDeclaration = [{ /// Returns whether the function is variadic. bool isVarArg() const { return getVarArg(); } @@ -417,16 +421,17 @@ def CIR_FuncType : CIR_Type<"Func", "func"> { /// Returns the number of arguments to the function. unsigned getNumInputs() const { return getInputs().size(); } - /// Returns the result type of the function as an actual return type or - /// explicit !cir.void + /// Get the C-style return type of the function, which is !cir.void if the + /// function returns nothing and the actual return type otherwise. mlir::Type getReturnType() const; - /// Returns the result type of the function as an ArrayRef, enabling better - /// integration with generic MLIR utilities. + /// Get the MLIR-style return type of the function, which is an empty + /// ArrayRef if the function returns nothing and a single-element ArrayRef + /// with the actual return type otherwise. llvm::ArrayRef getReturnTypes() const; - /// Returns whether the function returns void. - bool isVoid() const; + /// Does the fuction type return nothing? + bool hasVoidReturn() const; /// Returns a clone of this function type with the given argument /// and result types. diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 18e4adf19d53..22e144738783 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -2739,15 +2739,15 @@ verifyCallCommInSymbolUses(Operation *op, SymbolTableCollection &symbolTable) { << stringifyCallingConv(callIf.getCallingConv()); // Void function must not return any results. - if (fnType.isVoid() && op->getNumResults() != 0) + if (fnType.hasVoidReturn() && op->getNumResults() != 0) return op->emitOpError("callee returns void but call has results"); // Non-void function calls must return exactly one result. - if (!fnType.isVoid() && op->getNumResults() != 1) + if (!fnType.hasVoidReturn() && op->getNumResults() != 1) return op->emitOpError("incorrect number of results for callee"); // Parent function and return value types must match. - if (!fnType.isVoid() && + if (!fnType.hasVoidReturn() && op->getResultTypes().front() != fnType.getReturnType()) { return op->emitOpError("result type mismatch: expected ") << fnType.getReturnType() << ", but provided " diff --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp index b22b5707a793..e35d60293f41 100644 --- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp @@ -937,66 +937,51 @@ FuncType FuncType::clone(TypeRange inputs, TypeRange results) const { // type. static mlir::ParseResult parseFuncTypeReturn(mlir::AsmParser &p, mlir::Type &optionalReturnType) { - if (succeeded(p.parseOptionalLParen())) { - // If we have already a '(', the function has no return type - optionalReturnType = {}; - return mlir::success(); + if (succeeded(p.parseOptionalArrow())) { + // `->` found. It must be followed by the return type. + return p.parseType(optionalReturnType); } - mlir::Type type; - if (p.parseType(type)) - return mlir::failure(); - if (isa(type)) - // An explicit !cir.void means also no return type. - optionalReturnType = {}; - else - // Otherwise use the actual type. - optionalReturnType = type; - return p.parseLParen(); + // Function has `void` return in C++, no return in MLIR. + optionalReturnType = {}; + return success(); } // A special pretty-printer for function returning or not a result. static void printFuncTypeReturn(mlir::AsmPrinter &p, mlir::Type optionalReturnType) { if (optionalReturnType) - p << optionalReturnType << ' '; - p << '('; + p << " -> " << optionalReturnType; } static mlir::ParseResult parseFuncTypeArgs(mlir::AsmParser &p, llvm::SmallVector ¶ms, bool &isVarArg) { isVarArg = false; - // `(` `)` - if (succeeded(p.parseOptionalRParen())) + if (failed(p.parseLParen())) + return failure(); + if (succeeded(p.parseOptionalRParen())) { + // `()` empty argument list return mlir::success(); - - // `(` `...` `)` - if (succeeded(p.parseOptionalEllipsis())) { - isVarArg = true; - return p.parseRParen(); } - - // type (`,` type)* (`,` `...`)? - mlir::Type type; - if (p.parseType(type)) - return mlir::failure(); - params.push_back(type); - while (succeeded(p.parseOptionalComma())) { + do { if (succeeded(p.parseOptionalEllipsis())) { + // `...`, which must be the last thing in the list. isVarArg = true; - return p.parseRParen(); + break; + } else { + mlir::Type argType; + if (failed(p.parseType(argType))) + return failure(); + params.push_back(argType); } - if (p.parseType(type)) - return mlir::failure(); - params.push_back(type); - } - + } while (succeeded(p.parseOptionalComma())); return p.parseRParen(); } static void printFuncTypeArgs(mlir::AsmPrinter &p, mlir::ArrayRef params, bool isVarArg) { + p << '('; llvm::interleaveComma(params, p, [&p](mlir::Type type) { p.printType(type); }); if (isVarArg) { @@ -1010,45 +995,52 @@ static void printFuncTypeArgs(mlir::AsmPrinter &p, // Use a custom parser to handle the optional return and argument types without // an optional anchor. static mlir::ParseResult parseFuncType(mlir::AsmParser &p, - mlir::Type &optionalReturnTypes, + mlir::Type &optionalReturnType, llvm::SmallVector ¶ms, bool &isVarArg) { - if (failed(parseFuncTypeReturn(p, optionalReturnTypes))) + if (failed(parseFuncTypeArgs(p, params, isVarArg))) return failure(); - return parseFuncTypeArgs(p, params, isVarArg); + return parseFuncTypeReturn(p, optionalReturnType); } -static void printFuncType(mlir::AsmPrinter &p, mlir::Type optionalReturnTypes, +static void printFuncType(mlir::AsmPrinter &p, mlir::Type optionalReturnType, mlir::ArrayRef params, bool isVarArg) { - printFuncTypeReturn(p, optionalReturnTypes); printFuncTypeArgs(p, params, isVarArg); + printFuncTypeReturn(p, optionalReturnType); } -// Return the actual return type or an explicit !cir.void if the function does -// not return anything +/// Get the C-style return type of the function, which is !cir.void if the +/// function returns nothing and the actual return type otherwise. mlir::Type FuncType::getReturnType() const { - if (isVoid()) + if (hasVoidReturn()) return cir::VoidType::get(getContext()); - return static_cast(getImpl())->optionalReturnType; + return getOptionalReturnType(); } -/// Returns the result type of the function as an ArrayRef, enabling better -/// integration with generic MLIR utilities. +/// Get the MLIR-style return type of the function, which is an empty +/// ArrayRef if the function returns nothing and a single-element ArrayRef +/// with the actual return type otherwise. llvm::ArrayRef FuncType::getReturnTypes() const { - if (isVoid()) + if (hasVoidReturn()) return {}; - return static_cast(getImpl())->optionalReturnType; -} - -// Whether the function returns void -bool FuncType::isVoid() const { - auto rt = - static_cast(getImpl())->optionalReturnType; - assert(!rt || - !mlir::isa(rt) && - "The return type for a function returning void should be empty " - "instead of a real !cir.void"); - return !rt; + // Can't use getOptionalReturnType() here because llvm::ArrayRef hold a + // pointer to its elements and doesn't do lifetime extension. That would + // result in returning a pointer to a temporary that has gone out of scope. + return getImpl()->optionalReturnType; +} + +// Does the fuction type return nothing? +bool FuncType::hasVoidReturn() const { return !getOptionalReturnType(); } + +mlir::LogicalResult +FuncType::verify(llvm::function_ref emitError, + llvm::ArrayRef argTypes, mlir::Type returnType, + bool isVarArg) { + if (returnType && mlir::isa(returnType)) { + emitError() << "!cir.func cannot have an explicit 'void' return type"; + return mlir::failure(); + } + return mlir::success(); } //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CallConvLowering/AArch64/ptr-fields.c b/clang/test/CIR/CallConvLowering/AArch64/ptr-fields.c index 8fc121e9bf9b..e5f06757c2ef 100644 --- a/clang/test/CIR/CallConvLowering/AArch64/ptr-fields.c +++ b/clang/test/CIR/CallConvLowering/AArch64/ptr-fields.c @@ -15,9 +15,9 @@ int foo(int x) { return x; } // CIR: %[[#V0:]] = cir.alloca !ty_A, !cir.ptr, [""] {alignment = 4 : i64} // CIR: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr // CIR: cir.store %arg0, %[[#V1]] : !u64i, !cir.ptr -// CIR: %[[#V2:]] = cir.get_global @foo : !cir.ptr> -// CIR: %[[#V3:]] = cir.get_member %[[#V0]][0] {name = "f"} : !cir.ptr -> !cir.ptr>> -// CIR: cir.store %[[#V2]], %[[#V3]] : !cir.ptr>, !cir.ptr>> +// CIR: %[[#V2:]] = cir.get_global @foo : !cir.ptr !s32i>> +// CIR: %[[#V3:]] = cir.get_member %[[#V0]][0] {name = "f"} : !cir.ptr -> !cir.ptr !s32i>>> +// CIR: cir.store %[[#V2]], %[[#V3]] : !cir.ptr !s32i>>, !cir.ptr !s32i>>> // CIR: cir.return // LLVM: void @passA(i64 %[[#V0:]]) diff --git a/clang/test/CIR/CallConvLowering/x86_64/fptrs.c b/clang/test/CIR/CallConvLowering/x86_64/fptrs.c index f2a7538919c2..e7d15528da19 100644 --- a/clang/test/CIR/CallConvLowering/x86_64/fptrs.c +++ b/clang/test/CIR/CallConvLowering/x86_64/fptrs.c @@ -10,10 +10,10 @@ typedef int (*myfptr)(S); int foo(S s) { return 42 + s.a; } // CHECK: cir.func {{.*@bar}} -// CHECK: %[[#V0:]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["a", init] -// CHECK: %[[#V1:]] = cir.get_global @foo : !cir.ptr> -// CHECK: %[[#V2:]] = cir.cast(bitcast, %[[#V1]] : !cir.ptr>), !cir.ptr> -// CHECK: cir.store %[[#V2]], %[[#V0]] : !cir.ptr>, !cir.ptr>> +// CHECK: %[[#V0:]] = cir.alloca !cir.ptr !s32i>>, !cir.ptr !s32i>>>, ["a", init] +// CHECK: %[[#V1:]] = cir.get_global @foo : !cir.ptr !s32i>> +// CHECK: %[[#V2:]] = cir.cast(bitcast, %[[#V1]] : !cir.ptr !s32i>>), !cir.ptr !s32i>> +// CHECK: cir.store %[[#V2]], %[[#V0]] : !cir.ptr !s32i>>, !cir.ptr !s32i>>> void bar() { myfptr a = foo; } @@ -22,15 +22,15 @@ void bar() { // CHECK: %[[#V0:]] = cir.alloca !ty_S, !cir.ptr, [""] {alignment = 4 : i64} // CHECK: %[[#V1:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr // CHECK: cir.store %arg0, %[[#V1]] : !s32i, !cir.ptr -// CHECK: %[[#V2:]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["a", init] -// CHECK: %[[#V3:]] = cir.get_global @foo : !cir.ptr> -// CHECK: %[[#V4:]] = cir.cast(bitcast, %[[#V3]] : !cir.ptr>), !cir.ptr> -// CHECK: cir.store %[[#V4]], %[[#V2]] : !cir.ptr>, !cir.ptr>> -// CHECK: %[[#V5:]] = cir.load %[[#V2]] : !cir.ptr>>, !cir.ptr> +// CHECK: %[[#V2:]] = cir.alloca !cir.ptr !s32i>>, !cir.ptr !s32i>>>, ["a", init] +// CHECK: %[[#V3:]] = cir.get_global @foo : !cir.ptr !s32i>> +// CHECK: %[[#V4:]] = cir.cast(bitcast, %[[#V3]] : !cir.ptr !s32i>>), !cir.ptr !s32i>> +// CHECK: cir.store %[[#V4]], %[[#V2]] : !cir.ptr !s32i>>, !cir.ptr !s32i>>> +// CHECK: %[[#V5:]] = cir.load %[[#V2]] : !cir.ptr !s32i>>>, !cir.ptr !s32i>> // CHECK: %[[#V6:]] = cir.cast(bitcast, %[[#V0]] : !cir.ptr), !cir.ptr // CHECK: %[[#V7:]] = cir.load %[[#V6]] : !cir.ptr, !s32i -// CHECK: %[[#V8:]] = cir.cast(bitcast, %[[#V5]] : !cir.ptr>), !cir.ptr> -// CHECK: %[[#V9:]] = cir.call %[[#V8]](%[[#V7]]) : (!cir.ptr>, !s32i) -> !s32i +// CHECK: %[[#V8:]] = cir.cast(bitcast, %[[#V5]] : !cir.ptr !s32i>>), !cir.ptr !s32i>> +// CHECK: %[[#V9:]] = cir.call %[[#V8]](%[[#V7]]) : (!cir.ptr !s32i>>, !s32i) -> !s32i // LLVM: define dso_local void @baz(i32 %0) // LLVM: %[[#V1:]] = alloca %struct.S, i64 1 diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp index 01f30de810e8..c3ed24c9ea33 100644 --- a/clang/test/CIR/CodeGen/coro-task.cpp +++ b/clang/test/CIR/CodeGen/coro-task.cpp @@ -359,19 +359,19 @@ folly::coro::Task go4() { // CHECK: %17 = cir.alloca !ty_anon2E2_, !cir.ptr, ["ref.tmp1"] {alignment = 1 : i64} // Get the lambda invoker ptr via `lambda operator folly::coro::Task (*)(int const&)()` -// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr)>> -// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr)>>, !cir.ptr)>> -// CHECK: cir.yield %19 : !cir.ptr)>> +// CHECK: %18 = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%17) : (!cir.ptr) -> !cir.ptr) -> ![[IntTask]]>> +// CHECK: %19 = cir.unary(plus, %18) : !cir.ptr) -> ![[IntTask]]>>, !cir.ptr) -> ![[IntTask]]>> +// CHECK: cir.yield %19 : !cir.ptr) -> ![[IntTask]]>> // CHECK: } -// CHECK: cir.store %12, %3 : !cir.ptr)>>, !cir.ptr)>>> +// CHECK: cir.store %12, %3 : !cir.ptr) -> ![[IntTask]]>>, !cir.ptr) -> ![[IntTask]]>>> // CHECK: cir.scope { // CHECK: %17 = cir.alloca !s32i, !cir.ptr, ["ref.tmp2", init] {alignment = 4 : i64} -// CHECK: %18 = cir.load %3 : !cir.ptr)>>>, !cir.ptr)>> +// CHECK: %18 = cir.load %3 : !cir.ptr) -> ![[IntTask]]>>>, !cir.ptr) -> ![[IntTask]]>> // CHECK: %19 = cir.const #cir.int<3> : !s32i // CHECK: cir.store %19, %17 : !s32i, !cir.ptr // Call invoker, which calls operator() indirectly. -// CHECK: %20 = cir.call %18(%17) : (!cir.ptr)>>, !cir.ptr) -> ![[IntTask]] +// CHECK: %20 = cir.call %18(%17) : (!cir.ptr) -> ![[IntTask]]>>, !cir.ptr) -> ![[IntTask]] // CHECK: cir.store %20, %4 : ![[IntTask]], !cir.ptr // CHECK: } diff --git a/clang/test/CIR/CodeGen/derived-to-base.cpp b/clang/test/CIR/CodeGen/derived-to-base.cpp index 879d09f58c34..c6dc92d3ac7b 100644 --- a/clang/test/CIR/CodeGen/derived-to-base.cpp +++ b/clang/test/CIR/CodeGen/derived-to-base.cpp @@ -118,11 +118,11 @@ void vcall(C1 &c1) { // CHECK: %5 = cir.load %2 : !cir.ptr, !s32i // CHECK: cir.call @_ZN5buffyC2ERKS_(%3, %1) : (!cir.ptr, !cir.ptr) -> () // CHECK: %6 = cir.load %3 : !cir.ptr, !ty_buffy -// CHECK: %7 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr, !s32i, !ty_buffy)>>>> -// CHECK: %8 = cir.load %7 : !cir.ptr, !s32i, !ty_buffy)>>>>, !cir.ptr, !s32i, !ty_buffy)>>> -// CHECK: %9 = cir.vtable.address_point( %8 : !cir.ptr, !s32i, !ty_buffy)>>>, vtable_index = 0, address_point_index = 2) : !cir.ptr, !s32i, !ty_buffy)>>> -// CHECK: %10 = cir.load align(8) %9 : !cir.ptr, !s32i, !ty_buffy)>>>, !cir.ptr, !s32i, !ty_buffy)>> -// CHECK: %11 = cir.call %10(%4, %5, %6) : (!cir.ptr, !s32i, !ty_buffy)>>, !cir.ptr, !s32i, !ty_buffy) -> !s32i +// CHECK: %7 = cir.cast(bitcast, %4 : !cir.ptr), !cir.ptr, !s32i, !ty_buffy) -> !s32i>>>> +// CHECK: %8 = cir.load %7 : !cir.ptr, !s32i, !ty_buffy) -> !s32i>>>>, !cir.ptr, !s32i, !ty_buffy) -> !s32i>>> +// CHECK: %9 = cir.vtable.address_point( %8 : !cir.ptr, !s32i, !ty_buffy) -> !s32i>>>, vtable_index = 0, address_point_index = 2) : !cir.ptr, !s32i, !ty_buffy) -> !s32i>>> +// CHECK: %10 = cir.load align(8) %9 : !cir.ptr, !s32i, !ty_buffy) -> !s32i>>>, !cir.ptr, !s32i, !ty_buffy) -> !s32i>> +// CHECK: %11 = cir.call %10(%4, %5, %6) : (!cir.ptr, !s32i, !ty_buffy) -> !s32i>>, !cir.ptr, !s32i, !ty_buffy) -> !s32i // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp index 60c330d53b78..f432d61d86c4 100644 --- a/clang/test/CIR/CodeGen/dtors.cpp +++ b/clang/test/CIR/CodeGen/dtors.cpp @@ -36,7 +36,7 @@ class B : public A }; // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.record.decl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct !u32i>>>} #cir.record.decl.ast> // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct diff --git a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp index cf5a9f9f6f64..829a3829ccd1 100644 --- a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp +++ b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp @@ -16,10 +16,10 @@ struct Derived final : Base1 {}; Derived *ptr_cast(Base1 *ptr) { return dynamic_cast(ptr); // CHECK: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr - // CHECK-NEXT: %[[#EXPECTED_VPTR:]] = cir.vtable.address_point(@_ZTV7Derived, vtable_index = 0, address_point_index = 2) : !cir.ptr>> - // CHECK-NEXT: %[[#SRC_VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr>>> - // CHECK-NEXT: %[[#SRC_VPTR:]] = cir.load %[[#SRC_VPTR_PTR]] : !cir.ptr>>>, !cir.ptr>> - // CHECK-NEXT: %[[#SUCCESS:]] = cir.cmp(eq, %[[#SRC_VPTR]], %[[#EXPECTED_VPTR]]) : !cir.ptr>>, !cir.bool + // CHECK-NEXT: %[[#EXPECTED_VPTR:]] = cir.vtable.address_point(@_ZTV7Derived, vtable_index = 0, address_point_index = 2) : !cir.ptr !u32i>>> + // CHECK-NEXT: %[[#SRC_VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr !u32i>>>> + // CHECK-NEXT: %[[#SRC_VPTR:]] = cir.load %[[#SRC_VPTR_PTR]] : !cir.ptr !u32i>>>>, !cir.ptr !u32i>>> + // CHECK-NEXT: %[[#SUCCESS:]] = cir.cmp(eq, %[[#SRC_VPTR]], %[[#EXPECTED_VPTR]]) : !cir.ptr !u32i>>>, !cir.bool // CHECK-NEXT: %{{.+}} = cir.ternary(%[[#SUCCESS]], true { // CHECK-NEXT: %[[#RES:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr // CHECK-NEXT: cir.yield %[[#RES]] : !cir.ptr @@ -39,10 +39,10 @@ Derived *ptr_cast(Base1 *ptr) { Derived &ref_cast(Base1 &ref) { return dynamic_cast(ref); // CHECK: %[[#SRC:]] = cir.load %{{.+}} : !cir.ptr>, !cir.ptr - // CHECK-NEXT: %[[#EXPECTED_VPTR:]] = cir.vtable.address_point(@_ZTV7Derived, vtable_index = 0, address_point_index = 2) : !cir.ptr>> - // CHECK-NEXT: %[[#SRC_VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr>>> - // CHECK-NEXT: %[[#SRC_VPTR:]] = cir.load %[[#SRC_VPTR_PTR]] : !cir.ptr>>>, !cir.ptr>> - // CHECK-NEXT: %[[#SUCCESS:]] = cir.cmp(eq, %[[#SRC_VPTR]], %[[#EXPECTED_VPTR]]) : !cir.ptr>>, !cir.bool + // CHECK-NEXT: %[[#EXPECTED_VPTR:]] = cir.vtable.address_point(@_ZTV7Derived, vtable_index = 0, address_point_index = 2) : !cir.ptr !u32i>>> + // CHECK-NEXT: %[[#SRC_VPTR_PTR:]] = cir.cast(bitcast, %[[#SRC]] : !cir.ptr), !cir.ptr !u32i>>>> + // CHECK-NEXT: %[[#SRC_VPTR:]] = cir.load %[[#SRC_VPTR_PTR]] : !cir.ptr !u32i>>>>, !cir.ptr !u32i>>> + // CHECK-NEXT: %[[#SUCCESS:]] = cir.cmp(eq, %[[#SRC_VPTR]], %[[#EXPECTED_VPTR]]) : !cir.ptr !u32i>>>, !cir.bool // CHECK-NEXT: %[[#FAILED:]] = cir.unary(not, %[[#SUCCESS]]) : !cir.bool, !cir.bool // CHECK-NEXT: cir.if %[[#FAILED]] { // CHECK-NEXT: cir.call @__cxa_bad_cast() : () -> () diff --git a/clang/test/CIR/CodeGen/fun-ptr.c b/clang/test/CIR/CodeGen/fun-ptr.c index 0f9a98300e32..087164c7b474 100644 --- a/clang/test/CIR/CodeGen/fun-ptr.c +++ b/clang/test/CIR/CodeGen/fun-ptr.c @@ -17,7 +17,7 @@ typedef struct A { fun_typ fun; } A; -// CIR: !ty_A = !cir.struct>)>>} #cir.record.decl.ast> +// CIR: !ty_A = !cir.struct>) -> !s32i>>} #cir.record.decl.ast> A a = {(fun_typ)0}; int extract_a(Data* d) { @@ -27,15 +27,15 @@ int extract_a(Data* d) { // CIR: cir.func {{@.*foo.*}}(%arg0: !cir.ptr // CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["d", init] // CIR: [[TMP1:%.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] -// CIR: [[TMP2:%.*]] = cir.alloca !cir.ptr)>>, !cir.ptr)>>>, ["f", init] +// CIR: [[TMP2:%.*]] = cir.alloca !cir.ptr) -> !s32i>>, !cir.ptr) -> !s32i>>>, ["f", init] // CIR: cir.store %arg0, [[TMP0]] : !cir.ptr, !cir.ptr> -// CIR: [[TMP3:%.*]] = cir.const #cir.ptr : !cir.ptr)>> -// CIR: cir.store [[TMP3]], [[TMP2]] : !cir.ptr)>>, !cir.ptr)>>> -// CIR: [[TMP4:%.*]] = cir.get_global {{@.*extract_a.*}} : !cir.ptr)>> -// CIR: cir.store [[TMP4]], [[TMP2]] : !cir.ptr)>>, !cir.ptr)>>> -// CIR: [[TMP5:%.*]] = cir.load [[TMP2]] : !cir.ptr)>>>, !cir.ptr)>> +// CIR: [[TMP3:%.*]] = cir.const #cir.ptr : !cir.ptr) -> !s32i>> +// CIR: cir.store [[TMP3]], [[TMP2]] : !cir.ptr) -> !s32i>>, !cir.ptr) -> !s32i>>> +// CIR: [[TMP4:%.*]] = cir.get_global {{@.*extract_a.*}} : !cir.ptr) -> !s32i>> +// CIR: cir.store [[TMP4]], [[TMP2]] : !cir.ptr) -> !s32i>>, !cir.ptr) -> !s32i>>> +// CIR: [[TMP5:%.*]] = cir.load [[TMP2]] : !cir.ptr) -> !s32i>>>, !cir.ptr) -> !s32i>> // CIR: [[TMP6:%.*]] = cir.load [[TMP0]] : !cir.ptr>, !cir.ptr -// CIR: [[TMP7:%.*]] = cir.call [[TMP5]]([[TMP6]]) : (!cir.ptr)>>, !cir.ptr) -> !s32i +// CIR: [[TMP7:%.*]] = cir.call [[TMP5]]([[TMP6]]) : (!cir.ptr) -> !s32i>>, !cir.ptr) -> !s32i // CIR: cir.store [[TMP7]], [[TMP1]] : !s32i, !cir.ptr // LLVM: define dso_local i32 {{@.*foo.*}}(ptr %0) diff --git a/clang/test/CIR/CodeGen/hello.c b/clang/test/CIR/CodeGen/hello.c index 8fb49131784c..3454b40afeed 100644 --- a/clang/test/CIR/CodeGen/hello.c +++ b/clang/test/CIR/CodeGen/hello.c @@ -11,7 +11,7 @@ int main (void) { // CHECK: cir.global "private" constant cir_private dsolocal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} // CHECK: cir.func @main() -> !s32i // CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} -// CHECK: %1 = cir.get_global @printf : !cir.ptr, ...)>> +// CHECK: %1 = cir.get_global @printf : !cir.ptr, ...) -> !s32i>> // CHECK: %2 = cir.get_global @".str" : !cir.ptr> // CHECK: %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr // CHECK: %4 = cir.call @printf(%3) : (!cir.ptr) -> !s32i diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 680cd2e122f1..59f5c5e8c3ce 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -207,27 +207,27 @@ int g3() { // CHECK-LABEL: @_Z2g3v() // CHECK: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} -// CHECK: %1 = cir.alloca !cir.ptr)>>, !cir.ptr)>>>, ["fn", init] {alignment = 8 : i64} +// CHECK: %1 = cir.alloca !cir.ptr) -> !s32i>>, !cir.ptr) -> !s32i>>>, ["fn", init] {alignment = 8 : i64} // CHECK: %2 = cir.alloca !s32i, !cir.ptr, ["task", init] {alignment = 4 : i64} // 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`. // CHECK: %3 = cir.scope { // CHECK: %7 = cir.alloca !ty_anon2E5_, !cir.ptr, ["ref.tmp0"] {alignment = 1 : i64} -// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr)>> -// CHECK: %9 = cir.unary(plus, %8) : !cir.ptr)>>, !cir.ptr)>> -// CHECK: cir.yield %9 : !cir.ptr)>> +// CHECK: %8 = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%7) : (!cir.ptr) -> !cir.ptr) -> !s32i>> +// CHECK: %9 = cir.unary(plus, %8) : !cir.ptr) -> !s32i>>, !cir.ptr) -> !s32i>> +// CHECK: cir.yield %9 : !cir.ptr) -> !s32i>> // CHECK: } // 2. Load ptr to `__invoke()`. -// CHECK: cir.store %3, %1 : !cir.ptr)>>, !cir.ptr)>>> +// CHECK: cir.store %3, %1 : !cir.ptr) -> !s32i>>, !cir.ptr) -> !s32i>>> // CHECK: %4 = cir.scope { // CHECK: %7 = cir.alloca !s32i, !cir.ptr, ["ref.tmp1", init] {alignment = 4 : i64} -// CHECK: %8 = cir.load %1 : !cir.ptr)>>>, !cir.ptr)>> +// CHECK: %8 = cir.load %1 : !cir.ptr) -> !s32i>>>, !cir.ptr) -> !s32i>> // CHECK: %9 = cir.const #cir.int<3> : !s32i // CHECK: cir.store %9, %7 : !s32i, !cir.ptr // 3. Call `__invoke()`, which effectively executes `operator()`. -// CHECK: %10 = cir.call %8(%7) : (!cir.ptr)>>, !cir.ptr) -> !s32i +// CHECK: %10 = cir.call %8(%7) : (!cir.ptr) -> !s32i>>, !cir.ptr) -> !s32i // CHECK: cir.yield %10 : !s32i // CHECK: } diff --git a/clang/test/CIR/CodeGen/multi-vtable.cpp b/clang/test/CIR/CodeGen/multi-vtable.cpp index 8b709a3ddeaf..6fe6680d2ca3 100644 --- a/clang/test/CIR/CodeGen/multi-vtable.cpp +++ b/clang/test/CIR/CodeGen/multi-vtable.cpp @@ -34,14 +34,14 @@ int main() { // CIR: ![[VTableTypeMother:ty_.*]] = !cir.struct x 4>}> // CIR: ![[VTableTypeFather:ty_.*]] = !cir.struct x 3>}> // CIR: ![[VTableTypeChild:ty_.*]] = !cir.struct x 4>, !cir.array x 3>}> -// CIR: !ty_Father = !cir.struct>>} #cir.record.decl.ast> -// CIR: !ty_Mother = !cir.struct>>} #cir.record.decl.ast> +// CIR: !ty_Father = !cir.struct !u32i>>>} #cir.record.decl.ast> +// CIR: !ty_Mother = !cir.struct !u32i>>>} #cir.record.decl.ast> // CIR: !ty_Child = !cir.struct // CIR: cir.func linkonce_odr @_ZN6MotherC2Ev(%arg0: !cir.ptr -// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV6Mother, vtable_index = 0, address_point_index = 2) : !cir.ptr>> -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> -// CIR: cir.store %2, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV6Mother, vtable_index = 0, address_point_index = 2) : !cir.ptr !u32i>>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr !u32i>>>> +// CIR: cir.store %2, %{{[0-9]+}} : !cir.ptr !u32i>>>, !cir.ptr !u32i>>>> // CIR: cir.return // CIR: } @@ -51,13 +51,13 @@ int main() { // LLVM-DAG: } // CIR: cir.func linkonce_odr @_ZN5ChildC2Ev(%arg0: !cir.ptr -// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV5Child, vtable_index = 0, address_point_index = 2) : !cir.ptr>> -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> -// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> -// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV5Child, vtable_index = 1, address_point_index = 2) : !cir.ptr>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV5Child, vtable_index = 0, address_point_index = 2) : !cir.ptr !u32i>>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr !u32i>>>> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr !u32i>>>, !cir.ptr !u32i>>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV5Child, vtable_index = 1, address_point_index = 2) : !cir.ptr !u32i>>> // CIR: %7 = cir.base_class_addr(%1 : !cir.ptr nonnull) [8] -> !cir.ptr -// CIR: %8 = cir.cast(bitcast, %7 : !cir.ptr), !cir.ptr>>> loc(#loc8) -// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> +// CIR: %8 = cir.cast(bitcast, %7 : !cir.ptr), !cir.ptr !u32i>>>> loc(#loc8) +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr !u32i>>>, !cir.ptr !u32i>>>> // CIR: cir.return // CIR: } diff --git a/clang/test/CIR/CodeGen/no-prototype.c b/clang/test/CIR/CodeGen/no-prototype.c index c119304ce54d..ad647de0f590 100644 --- a/clang/test/CIR/CodeGen/no-prototype.c +++ b/clang/test/CIR/CodeGen/no-prototype.c @@ -35,8 +35,8 @@ int test1(int x) { int noProto2(); int test2(int x) { return noProto2(x); - // CHECK: [[GGO:%.*]] = cir.get_global @noProto2 : !cir.ptr> - // CHECK: {{.*}} = cir.call [[GGO]](%{{[0-9]+}}) : (!cir.ptr>, !s32i) -> !s32i + // CHECK: [[GGO:%.*]] = cir.get_global @noProto2 : !cir.ptr !s32i>> + // CHECK: {{.*}} = cir.call [[GGO]](%{{[0-9]+}}) : (!cir.ptr !s32i>>, !s32i) -> !s32i } int noProto2(int x) { return x; } // CHECK: cir.func no_proto @noProto2(%arg0: !s32i {{.+}}) -> !s32i @@ -50,9 +50,9 @@ int noProto3(); int test3(int x) { // CHECK: cir.func @test3 return noProto3(x); - // CHECK: [[GGO:%.*]] = cir.get_global @noProto3 : !cir.ptr> - // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> - // CHECK: {{%.*}} = cir.call [[CAST]](%{{[0-9]+}}) : (!cir.ptr>, !s32i) -> !s32i + // CHECK: [[GGO:%.*]] = cir.get_global @noProto3 : !cir.ptr !s32i>> + // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr !s32i>>), !cir.ptr !s32i>> + // CHECK: {{%.*}} = cir.call [[CAST]](%{{[0-9]+}}) : (!cir.ptr !s32i>>, !s32i) -> !s32i } @@ -67,18 +67,18 @@ int noProto4() { return 0; } // cir.func private no_proto @noProto4() -> !s32i int test4(int x) { return noProto4(x); // Even if we know the definition, this should compile. - // CHECK: [[GGO:%.*]] = cir.get_global @noProto4 : !cir.ptr> - // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> - // CHECK: {{%.*}} = cir.call [[CAST]]({{%.*}}) : (!cir.ptr>, !s32i) -> !s32i + // CHECK: [[GGO:%.*]] = cir.get_global @noProto4 : !cir.ptr !s32i>> + // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr !s32i>>), !cir.ptr !s32i>> + // CHECK: {{%.*}} = cir.call [[CAST]]({{%.*}}) : (!cir.ptr !s32i>>, !s32i) -> !s32i } // No-proto definition followed by an incorrect call due to lack of args. int noProto5(); int test5(int x) { return noProto5(); - // CHECK: [[GGO:%.*]] = cir.get_global @noProto5 : !cir.ptr> - // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr>), !cir.ptr> - // CHECK: {{%.*}} = cir.call [[CAST]]() : (!cir.ptr>) -> !s32i + // CHECK: [[GGO:%.*]] = cir.get_global @noProto5 : !cir.ptr !s32i>> + // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr !s32i>>), !cir.ptr !s32i>> + // CHECK: {{%.*}} = cir.call [[CAST]]() : (!cir.ptr !s32i>>) -> !s32i } int noProto5(int x) { return x; } // CHECK: cir.func no_proto @noProto5(%arg0: !s32i {{.+}}) -> !s32i diff --git a/clang/test/CIR/CodeGen/store.c b/clang/test/CIR/CodeGen/store.c index 9a94e6578129..2d2fc6029ce9 100644 --- a/clang/test/CIR/CodeGen/store.c +++ b/clang/test/CIR/CodeGen/store.c @@ -24,7 +24,7 @@ void storeNoArgsFn() { // CHECK: cir.func {{.*@storeNoArgsFn}} // CHECK: %0 = cir.alloca -// CHECK: %1 = cir.get_global @get42 : !cir.ptr> -// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr>), !cir.ptr> -// CHECK: cir.store %2, %0 : !cir.ptr>, !cir.ptr>> +// CHECK: %1 = cir.get_global @get42 : !cir.ptr !s32i>> +// CHECK: %2 = cir.cast(bitcast, %1 : !cir.ptr !s32i>>), !cir.ptr !s32i>> +// CHECK: cir.store %2, %0 : !cir.ptr !s32i>>, !cir.ptr !s32i>>> diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index 91acb833a706..b3c502e66534 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -32,7 +32,7 @@ void yoyo(incomplete *i) {} // CHECK-DAG: !ty_Foo = !cir.struct // CHECK-DAG: !ty_Mandalore = !cir.struct, !s32i} #cir.record.decl.ast> // CHECK-DAG: !ty_Adv = !cir.struct -// CHECK-DAG: !ty_Entry = !cir.struct, !cir.ptr)>>}> +// CHECK-DAG: !ty_Entry = !cir.struct, !cir.ptr) -> !u32i>>}> // CHECK: cir.func linkonce_odr @_ZN3Bar6methodEv(%arg0: !cir.ptr // CHECK-NEXT: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} @@ -172,4 +172,4 @@ void ppp() { Entry x; } // CHECK: cir.func linkonce_odr @_ZN5EntryC2Ev(%arg0: !cir.ptr -// CHECK: cir.get_member %1[0] {name = "procAddr"} : !cir.ptr -> !cir.ptr, !cir.ptr)>>> +// CHECK: cir.get_member %1[0] {name = "procAddr"} : !cir.ptr -> !cir.ptr, !cir.ptr) -> !u32i>>> diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp index e11e80bd5b4f..3c8a5253c526 100644 --- a/clang/test/CIR/CodeGen/vtable-rtti.cpp +++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp @@ -27,8 +27,8 @@ class B : public A // RTTI_DISABLED: ![[VTableTypeA:ty_.*]] = !cir.struct x 5>}> // Class A -// CHECK: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.record.decl.ast> -// RTTI_DISABLED: ![[ClassA:ty_.*]] = !cir.struct>>} #cir.record.decl.ast> +// CHECK: ![[ClassA:ty_.*]] = !cir.struct !u32i>>>} #cir.record.decl.ast> +// RTTI_DISABLED: ![[ClassA:ty_.*]] = !cir.struct !u32i>>>} #cir.record.decl.ast> // Class B // CHECK: ![[ClassB:ty_.*]] = !cir.struct @@ -45,9 +45,9 @@ class B : public A // CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr // CHECK: %2 = cir.base_class_addr(%1 : !cir.ptr nonnull) [0] -> !cir.ptr // CHECK: cir.call @_ZN1AC2Ev(%2) : (!cir.ptr) -> () -// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : !cir.ptr>> -// CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> -// CHECK: cir.store %3, %4 : !cir.ptr>>, !cir.ptr>>> +// CHECK: %3 = cir.vtable.address_point(@_ZTV1B, vtable_index = 0, address_point_index = 2) : !cir.ptr !u32i>>> +// CHECK: %4 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr !u32i>>>> +// CHECK: cir.store %3, %4 : !cir.ptr !u32i>>>, !cir.ptr !u32i>>>> // CHECK: cir.return // CHECK: } @@ -73,9 +73,9 @@ class B : public A // CHECK: %0 = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] {alignment = 8 : i64} // CHECK: cir.store %arg0, %0 : !cir.ptr, !cir.ptr> // CHECK: %1 = cir.load %0 : !cir.ptr>, !cir.ptr -// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : !cir.ptr>> -// CHECK: %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr>>> -// CHECK: cir.store %2, %3 : !cir.ptr>>, !cir.ptr>>> +// CHECK: %2 = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : !cir.ptr !u32i>>> +// CHECK: %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr !u32i>>>> +// CHECK: cir.store %2, %3 : !cir.ptr !u32i>>>, !cir.ptr !u32i>>>> // CHECK: cir.return // CHECK: } diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp index c32e242737cf..6404f306327e 100644 --- a/clang/test/CIR/CodeGen/vtt.cpp +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -38,9 +38,9 @@ int f() { // Class A constructor // CIR: cir.func linkonce_odr @_ZN1AC2Ev(%arg0: !cir.ptr -// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : !cir.ptr>> -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> -// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1A, vtable_index = 0, address_point_index = 2) : !cir.ptr !u32i>>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr !u32i>>>> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr !u32i>>>, !cir.ptr !u32i>>>> // CIR: } // Vtable of Class D @@ -115,19 +115,19 @@ int f() { // CIR: %[[VTT_D_TO_C:.*]] = cir.vtt.address_point @_ZTT1D, offset = 3 -> !cir.ptr> // CIR: cir.call @_ZN1CC2Ev(%[[C_PTR]], %[[VTT_D_TO_C]]) : (!cir.ptr, !cir.ptr>) -> () -// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1D, vtable_index = 0, address_point_index = 3) : !cir.ptr>> -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> -// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> -// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1D, vtable_index = 2, address_point_index = 3) : !cir.ptr>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1D, vtable_index = 0, address_point_index = 3) : !cir.ptr !u32i>>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr !u32i>>>> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr !u32i>>>, !cir.ptr !u32i>>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1D, vtable_index = 2, address_point_index = 3) : !cir.ptr !u32i>>> // CIR: %{{[0-9]+}} = cir.base_class_addr(%{{[0-9]+}} : !cir.ptr nonnull) [40] -> !cir.ptr -// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> -// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> -// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1D, vtable_index = 1, address_point_index = 3) : !cir.ptr>> +// CIR: %{{[0-9]+}} = cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr !u32i>>>> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr !u32i>>>, !cir.ptr !u32i>>>> +// CIR: %{{[0-9]+}} = cir.vtable.address_point(@_ZTV1D, vtable_index = 1, address_point_index = 3) : !cir.ptr !u32i>>> // CIR: cir.base_class_addr(%{{[0-9]+}} : !cir.ptr nonnull) [16] -> !cir.ptr -// CIR: cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr>>> -// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr>>, !cir.ptr>>> +// CIR: cir.cast(bitcast, %{{[0-9]+}} : !cir.ptr), !cir.ptr !u32i>>>> +// CIR: cir.store %{{[0-9]+}}, %{{[0-9]+}} : !cir.ptr !u32i>>>, !cir.ptr !u32i>>>> // CIR: cir.return // CIR: } @@ -174,4 +174,4 @@ namespace other { // CIR: %[[VAL_4:.*]] = cir.base_class_addr(%[[VAL_2]] : !cir.ptr nonnull) [0] -> !cir.ptr // CIR: cir.call @_ZN5other1AD2Ev(%[[VAL_4]]) : (!cir.ptr) -> () // CIR: cir.return -// CIR: } \ No newline at end of file +// CIR: } diff --git a/clang/test/CIR/IR/being_and_nothingness.cir b/clang/test/CIR/IR/being_and_nothingness.cir index 076c75a5b192..8dff3ba723b7 100644 --- a/clang/test/CIR/IR/being_and_nothingness.cir +++ b/clang/test/CIR/IR/being_and_nothingness.cir @@ -1,12 +1,12 @@ // RUN: cir-opt %s | FileCheck %s // Exercise different ways to encode a function returning void +// This test is less useful that it used to be, because a redundant `!cir.void` +// as a function return type is no longer supported. !s32i = !cir.int !f = !cir.func<()> -!f2 = !cir.func +!f2 = !cir.func<() -> !s32i> !void = !cir.void !fnptr2 = !cir.ptr> -// Try some useless !void -!fnptr3 = !cir.ptr> module { cir.func @ind2(%fnptr: !fnptr2, %a : !s32i) { // CHECK: cir.func @ind2(%arg0: !cir.ptr>, %arg1: !s32i) { @@ -16,13 +16,4 @@ module { // CHECK: cir.func @f2() { cir.return } - // Try with a lot of useless !void - cir.func @ind3(%fnptr: !fnptr3, %a : !s32i) -> !void { - // CHECK: cir.func @ind3(%arg0: !cir.ptr>, %arg1: !s32i) { - cir.return - } - cir.func @f3() -> !cir.void { - // CHECK: cir.func @f3() { - cir.return - } } diff --git a/clang/test/CIR/IR/call-op-call-conv.cir b/clang/test/CIR/IR/call-op-call-conv.cir index b47e1226e10b..bf13cb76d947 100644 --- a/clang/test/CIR/IR/call-op-call-conv.cir +++ b/clang/test/CIR/IR/call-op-call-conv.cir @@ -2,7 +2,7 @@ // RUN: FileCheck --input-file=%t.cir %s !s32i = !cir.int -!fnptr = !cir.ptr> +!fnptr = !cir.ptr !s32i>> module { cir.func @my_add(%a: !s32i, %b: !s32i) -> !s32i cc(spir_function) { @@ -22,6 +22,6 @@ module { } } -// CHECK: %{{[0-9]+}} = cir.call %arg0(%arg1) : (!cir.ptr>, !s32i) -> !s32i cc(spir_kernel) -// CHECK: %{{[0-9]+}} = cir.call %arg0(%arg1) : (!cir.ptr>, !s32i) -> !s32i cc(spir_function) +// CHECK: %{{[0-9]+}} = cir.call %arg0(%arg1) : (!cir.ptr !s32i>>, !s32i) -> !s32i cc(spir_kernel) +// CHECK: %{{[0-9]+}} = cir.call %arg0(%arg1) : (!cir.ptr !s32i>>, !s32i) -> !s32i cc(spir_function) // CHECK: %{{[0-9]+}} = cir.try_call @my_add(%{{[0-9]+}}, %{{[0-9]+}}) ^{{.+}}, ^{{.+}} : (!s32i, !s32i) -> !s32i cc(spir_function) diff --git a/clang/test/CIR/IR/call.cir b/clang/test/CIR/IR/call.cir index 0b1fc68622f8..abd2fe9ba878 100644 --- a/clang/test/CIR/IR/call.cir +++ b/clang/test/CIR/IR/call.cir @@ -1,7 +1,7 @@ // RUN: cir-opt %s | FileCheck %s !s32i = !cir.int -!fnptr = !cir.ptr> +!fnptr = !cir.ptr !s32i>> #fn_attr = #cir, optnone = #cir.optnone})> #fn_attr1 = #cir @@ -16,7 +16,7 @@ module { cir.func @ind(%fnptr: !fnptr, %a : !s32i) { %r = cir.call %fnptr(%a) : (!fnptr, !s32i) -> !s32i -// CHECK: %0 = cir.call %arg0(%arg1) : (!cir.ptr>, !s32i) -> !s32i +// CHECK: %0 = cir.call %arg0(%arg1) : (!cir.ptr !s32i>>, !s32i) -> !s32i // Check parse->pretty-print round-trip on extra() attribute %7 = cir.call @_ZNSt5arrayIiLm8192EEixEm(%a) : (!s32i) -> !s32i extra(#fn_attr1) // CHECK: %1 = cir.call @_ZNSt5arrayIiLm8192EEixEm(%arg1) : (!s32i) -> !s32i extra(#fn_attr1) diff --git a/clang/test/CIR/IR/func.cir b/clang/test/CIR/IR/func.cir index a1468e6462f4..87e21efb9e66 100644 --- a/clang/test/CIR/IR/func.cir +++ b/clang/test/CIR/IR/func.cir @@ -28,7 +28,7 @@ module { // Should parse custom assembly format. cir.func @parse_func_type() -> () { - %1 = cir.alloca !cir.ptr>, !cir.ptr>>, ["fn", init] {alignment = 8 : i64} + %1 = cir.alloca !cir.ptr !s32i>>, !cir.ptr !s32i>>>, ["fn", init] {alignment = 8 : i64} cir.return } diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index c628e3c2b46b..58178e7c0b9a 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -1159,8 +1159,8 @@ cir.func @bad_long_double(%arg0 : !cir.long_double) -> () { !u8i = !cir.int !void = !cir.void -!Base = !cir.struct ()>>>}> -!Derived = !cir.struct ()>>>}>}> +!Base = !cir.struct !cir.int>>>}> +!Derived = !cir.struct !cir.int>>>}>}> module { cir.global "private" constant external @_ZTI4Base : !cir.ptr @@ -1181,8 +1181,8 @@ module { !u8i = !cir.int !void = !cir.void -!Base = !cir.struct ()>>>}> -!Derived = !cir.struct ()>>>}>}> +!Base = !cir.struct !cir.int>>>}> +!Derived = !cir.struct !cir.int>>>}>}> module { cir.global "private" constant external @_ZTI4Base : !cir.ptr @@ -1490,3 +1490,13 @@ cir.func @cast0(%arg0: !s32i, %arg1: !s32i) { %1 = cir.cmp(eq, %arg0, %arg1): !s32i, !s32i cir.return } + +// ----- + +// Verify that void-returning functions have no return type listed in +// MLIR assembly. + +!s32i = !cir.int +// expected-error @below {{!cir.func cannot have an explicit 'void' return type}} +// expected-error @below {{failed to parse CIR_PointerType parameter}} +cir.global external dsolocal @vfp = #cir.ptr : !cir.ptr !cir.void>> diff --git a/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir b/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir index 66eb06629793..1cb4c0b672ae 100644 --- a/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir +++ b/clang/test/CIR/Lowering/ThroughMLIR/vtable.cir @@ -13,9 +13,9 @@ !ty_anon_struct2 = !cir.struct> x 4>}> !ty_anon_struct3 = !cir.struct> x 3>}> !ty_anon_struct4 = !cir.struct> x 4>, !cir.array> x 3>}> -!ty_Father = !cir.struct ()>>>} #cir.record.decl.ast> -!ty_Mother = !cir.struct ()>>>} #cir.record.decl.ast> -!ty_Child = !cir.struct ()>>>} #cir.record.decl.ast>, !cir.struct ()>>>} #cir.record.decl.ast>} #cir.record.decl.ast> +!ty_Father = !cir.struct !cir.int>>>} #cir.record.decl.ast> +!ty_Mother = !cir.struct !cir.int>>>} #cir.record.decl.ast> +!ty_Child = !cir.struct !cir.int>>>} #cir.record.decl.ast>, !cir.struct !cir.int>>>} #cir.record.decl.ast>} #cir.record.decl.ast> module { cir.func linkonce_odr @_ZN6Mother6simpleEv(%arg0: !cir.ptr) { @@ -70,4 +70,4 @@ module { // MLIR: %{{[0-9]+}} = llvm.insertvalue %{{[0-9]+}}, %{{[0-9]+}}[2] : !llvm.array<3 x ptr> // MLIR: %{{[0-9]+}} = llvm.insertvalue %{{[0-9]+}}, %{{[0-9]+}}[1] : !llvm.struct<(array<4 x ptr>, array<3 x ptr>)> // MLIR: llvm.return %{{[0-9]+}} : !llvm.struct<(array<4 x ptr>, array<3 x ptr>)> -// MLIR: } \ No newline at end of file +// MLIR: } diff --git a/clang/test/CIR/Lowering/call-op-call-conv.cir b/clang/test/CIR/Lowering/call-op-call-conv.cir index 21e9e01c14ae..92f0028e7bae 100644 --- a/clang/test/CIR/Lowering/call-op-call-conv.cir +++ b/clang/test/CIR/Lowering/call-op-call-conv.cir @@ -2,7 +2,7 @@ // RUN: FileCheck --input-file=%t.ll %s --check-prefix=LLVM !s32i = !cir.int -!fnptr = !cir.ptr> +!fnptr = !cir.ptr !s32i>> module { cir.func private @my_add(%a: !s32i, %b: !s32i) -> !s32i cc(spir_function) diff --git a/clang/test/CIR/Lowering/call.cir b/clang/test/CIR/Lowering/call.cir index ed4916d55e14..51dea3ef1008 100644 --- a/clang/test/CIR/Lowering/call.cir +++ b/clang/test/CIR/Lowering/call.cir @@ -38,11 +38,11 @@ module { } // check indirect call lowering - cir.global "private" external @fp : !cir.ptr> + cir.global "private" external @fp : !cir.ptr !s32i>> cir.func @callIndirect(%arg: !s32i) -> !s32i { - %fpp = cir.get_global @fp : !cir.ptr>> - %fp = cir.load %fpp : !cir.ptr>>, !cir.ptr> - %retval = cir.call %fp(%arg) : (!cir.ptr>, !s32i) -> !s32i + %fpp = cir.get_global @fp : !cir.ptr !s32i>>> + %fp = cir.load %fpp : !cir.ptr !s32i>>>, !cir.ptr !s32i>> + %retval = cir.call %fp(%arg) : (!cir.ptr !s32i>>, !s32i) -> !s32i cir.return %retval : !s32i } @@ -77,12 +77,12 @@ module { // LLVM-NEXT: ret i32 %1 // check indirect vararg call lowering - cir.global "private" external @varargfp : !cir.ptr> + cir.global "private" external @varargfp : !cir.ptr !s32i>> cir.func @varargCallIndirect() -> !s32i { - %fpp = cir.get_global @varargfp : !cir.ptr>> - %fp = cir.load %fpp : !cir.ptr>>, !cir.ptr> + %fpp = cir.get_global @varargfp : !cir.ptr !s32i>>> + %fp = cir.load %fpp : !cir.ptr !s32i>>>, !cir.ptr !s32i>> %zero = cir.const #cir.int<0> : !s32i - %retval = cir.call %fp(%zero, %zero) : (!cir.ptr>, !s32i, !s32i) -> !s32i + %retval = cir.call %fp(%zero, %zero) : (!cir.ptr !s32i>>, !s32i, !s32i) -> !s32i cir.return %retval : !s32i } diff --git a/clang/test/CIR/Lowering/func.cir b/clang/test/CIR/Lowering/func.cir index 76e6d4f0d181..241fdc364c02 100644 --- a/clang/test/CIR/Lowering/func.cir +++ b/clang/test/CIR/Lowering/func.cir @@ -6,11 +6,11 @@ module { cir.func no_proto private @noProto3(...) -> !s32i // MLIR: llvm.func @noProto3(...) -> i32 cir.func @test3(%arg0: !s32i) { - %3 = cir.get_global @noProto3 : !cir.ptr> + %3 = cir.get_global @noProto3 : !cir.ptr !s32i>> // MLIR: %[[#FN_PTR:]] = llvm.mlir.addressof @noProto3 : !llvm.ptr - %4 = cir.cast(bitcast, %3 : !cir.ptr>), !cir.ptr> + %4 = cir.cast(bitcast, %3 : !cir.ptr !s32i>>), !cir.ptr !s32i>> // MLIR: %[[#FUNC:]] = llvm.bitcast %[[#FN_PTR]] : !llvm.ptr to !llvm.ptr - %5 = cir.call %4(%arg0) : (!cir.ptr>, !s32i) -> !s32i + %5 = cir.call %4(%arg0) : (!cir.ptr !s32i>>, !s32i) -> !s32i // MLIR: %{{.+}} = llvm.call %[[#FUNC]](%{{.+}}) : !llvm.ptr, (i32) -> i32 cir.return } diff --git a/clang/test/CIR/Lowering/globals.cir b/clang/test/CIR/Lowering/globals.cir index 0108b56b8a7b..3f99fd102efd 100644 --- a/clang/test/CIR/Lowering/globals.cir +++ b/clang/test/CIR/Lowering/globals.cir @@ -15,7 +15,7 @@ !ty_Bar = !cir.struct !ty_StringStruct = !cir.struct, !cir.array, !cir.array} #cir.record.decl.ast> !ty_StringStructPtr = !cir.struct} #cir.record.decl.ast> -!ty_anon2E1_ = !cir.struct)>>} #cir.record.decl.ast> +!ty_anon2E1_ = !cir.struct)>>} #cir.record.decl.ast> module { cir.global external @a = #cir.int<3> : !s32i @@ -164,7 +164,7 @@ module { // MLIR: } // LLVM: @undefStruct = global %struct.Bar undef - cir.global "private" internal @Handlers = #cir.const_array<[#cir.const_struct<{#cir.global_view<@myfun> : !cir.ptr>}> : !ty_anon2E1_]> : !cir.array + cir.global "private" internal @Handlers = #cir.const_array<[#cir.const_struct<{#cir.global_view<@myfun> : !cir.ptr>}> : !ty_anon2E1_]> : !cir.array cir.func internal private @myfun(%arg0: !s32i) { %0 = cir.alloca !s32i, !cir.ptr, ["a", init] {alignment = 4 : i64} cir.store %arg0, %0 : !s32i, !cir.ptr @@ -179,10 +179,10 @@ module { %3 = cir.load %0 : !cir.ptr, !s32i %4 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr %5 = cir.ptr_stride(%4 : !cir.ptr, %3 : !s32i), !cir.ptr - %6 = cir.get_member %5[0] {name = "func"} : !cir.ptr -> !cir.ptr>> - %7 = cir.load %6 : !cir.ptr>>, !cir.ptr> + %6 = cir.get_member %5[0] {name = "func"} : !cir.ptr -> !cir.ptr>> + %7 = cir.load %6 : !cir.ptr>>, !cir.ptr> %8 = cir.load %1 : !cir.ptr, !s32i - cir.call %7(%8) : (!cir.ptr>, !s32i) -> () + cir.call %7(%8) : (!cir.ptr>, !s32i) -> () cir.return } //MLIR-LABEL: @foo diff --git a/clang/test/CIR/Lowering/hello.cir b/clang/test/CIR/Lowering/hello.cir index 868261307b87..361fa7ed36af 100644 --- a/clang/test/CIR/Lowering/hello.cir +++ b/clang/test/CIR/Lowering/hello.cir @@ -8,7 +8,7 @@ module @"/tmp/test.raw" attributes {cir.lang = #cir.lang, cir.sob = #cir.sign cir.global "private" constant internal @".str" = #cir.const_array<"Hello, world!\0A\00" : !cir.array> : !cir.array {alignment = 1 : i64} cir.func @main() -> !s32i { %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} - %1 = cir.get_global @printf : !cir.ptr, ...)>> + %1 = cir.get_global @printf : !cir.ptr, ...) -> !s32i>> %2 = cir.get_global @".str" : !cir.ptr> %3 = cir.cast(array_to_ptrdecay, %2 : !cir.ptr>), !cir.ptr %4 = cir.call @printf(%3) : (!cir.ptr) -> !s32i From b5fb7656f2c0c7c5cefff09737c08f1509052af0 Mon Sep 17 00:00:00 2001 From: advay168 <23453652+advay168@users.noreply.github.com> Date: Mon, 24 Feb 2025 23:26:16 +0000 Subject: [PATCH 2298/2301] [CIR][CUDA] Fix CUDA CIR mangling bug (#1396) Includes function name while mangling C functions to avoid link errors. [This is the same way OG handles it](https://github.com/llvm/clangir/blob/c301b4a0d3d2d79b26c9c809c11b8a1137c0a9ec/clang/lib/CodeGen/CodeGenModule.cpp#L1896). --- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 2 +- clang/test/CIR/CodeGen/CUDA/mangling.cu | 92 +++++++++++++++++++++++++ 2 files changed, 93 insertions(+), 1 deletion(-) create mode 100644 clang/test/CIR/CodeGen/CUDA/mangling.cu diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 3c14885dc1c3..a1296846f9e7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -2428,7 +2428,7 @@ static std::string getMangledNameImpl(CIRGenModule &CGM, GlobalDecl GD, assert(0 && "NYI"); } else if (FD && FD->hasAttr() && GD.getKernelReferenceKind() == KernelReferenceKind::Stub) { - Out << "__device_stub__"; + Out << "__device_stub__" << II->getName(); } else { Out << II->getName(); } diff --git a/clang/test/CIR/CodeGen/CUDA/mangling.cu b/clang/test/CIR/CodeGen/CUDA/mangling.cu new file mode 100644 index 000000000000..27b9bc96bd7c --- /dev/null +++ b/clang/test/CIR/CodeGen/CUDA/mangling.cu @@ -0,0 +1,92 @@ +#include "../Inputs/cuda.h" + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir \ +// RUN: -x cuda -emit-cir -target-sdk-version=12.3 \ +// RUN: %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR-HOST --input-file=%t.cir %s + +// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fclangir \ +// RUN: -fcuda-is-device -emit-cir -target-sdk-version=12.3 \ +// RUN: %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR-DEVICE --input-file=%t.cir %s + +namespace ns { + __global__ void cpp_global_function_1(int a, int* b, float c) {} + // CIR-HOST: cir.func @_ZN2ns36__device_stub__cpp_global_function_1EiPif + // CIR-DEVICE: cir.func @_ZN2ns21cpp_global_function_1EiPif + + __global__ void cpp_global_function_2(int a, int* b, float c) {} + + // CIR-HOST: cir.func @_ZN2ns36__device_stub__cpp_global_function_2EiPif + // CIR-DEVICE: cir.func @_ZN2ns21cpp_global_function_2EiPif + + __host__ void cpp_host_function_1(int a, int* b, float c) {} + + // CIR-HOST: cir.func @_ZN2ns19cpp_host_function_1EiPif + + __host__ void cpp_host_function_2(int a, int* b, float c) {} + + // CIR-HOST: cir.func @_ZN2ns19cpp_host_function_2EiPif + + __device__ void cpp_device_function_1(int a, int* b, float c) {} + + // CIR-DEVICE: cir.func @_ZN2ns21cpp_device_function_1EiPif + + __device__ void cpp_device_function_2(int a, int* b, float c) {} + + // CIR-DEVICE: cir.func @_ZN2ns21cpp_device_function_2EiPif +} + +__global__ void cpp_global_function_1(int a, int* b, float c) {} + +// CIR-HOST: cir.func @_Z36__device_stub__cpp_global_function_1iPif +// CIR-DEVICE: cir.func @_Z21cpp_global_function_1iPif + +__global__ void cpp_global_function_2(int a, int* b, float c) {} + +// CIR-HOST: cir.func @_Z36__device_stub__cpp_global_function_2iPif +// CIR-DEVICE: cir.func @_Z21cpp_global_function_2iPif + +__host__ void cpp_host_function_1(int a, int* b, float c) {} + +// CIR-HOST: cir.func @_Z19cpp_host_function_1iPif + +__host__ void cpp_host_function_2(int a, int* b, float c) {} + +// CIR-HOST: cir.func @_Z19cpp_host_function_2iPif + +__device__ void cpp_device_function_1(int a, int* b, float c) {} + +// CIR-DEVICE: cir.func @_Z21cpp_device_function_1iPif + +__device__ void cpp_device_function_2(int a, int* b, float c) {} + +// CIR-DEVICE: cir.func @_Z21cpp_device_function_2iPif + +extern "C" { + __global__ void c_global_function_1(int a, int* b, float c) {} + + // CIR-HOST: cir.func @__device_stub__c_global_function_1 + // CIR-DEVICE: cir.func @c_global_function_1 + + __global__ void c_global_function_2(int a, int* b, float c) {} + + // CIR-HOST: cir.func @__device_stub__c_global_function_2 + // CIR-DEVICE: cir.func @c_global_function_2 + + __host__ void c_host_function_1(int a, int* b, float c) {} + + // CIR-HOST: cir.func @c_host_function_1 + + __host__ void c_host_function_2(int a, int* b, float c) {} + + // CIR-HOST: cir.func @c_host_function_2 + + __device__ void c_device_function_1(int a, int* b, float c) {} + + // CIR-DEVICE: cir.func @c_device_function_1 + + __device__ void c_device_function_2(int a, int* b, float c) {} + + // CIR-DEVICE: cir.func @c_device_function_2 +} \ No newline at end of file From dc932dee0b1ea19e49a7fb9cf763b5362af37b78 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Mon, 24 Feb 2025 18:33:48 -0800 Subject: [PATCH 2299/2301] [CIR] ClangIR specific .clang-tidy files (#1402) Commit the .clang-tidy files for ClangIR. The biggest different between these and the Clang files is the capitalization of variable names. Most ClangIR code follows the MLIR conventions instead of the Clang conventions. --- clang/lib/CIR/.clang-tidy | 62 ++++++++++++++++++++++++ clang/lib/CIR/FrontendAction/.clang-tidy | 16 ++++++ 2 files changed, 78 insertions(+) create mode 100644 clang/lib/CIR/.clang-tidy create mode 100644 clang/lib/CIR/FrontendAction/.clang-tidy diff --git a/clang/lib/CIR/.clang-tidy b/clang/lib/CIR/.clang-tidy new file mode 100644 index 000000000000..aaba4585494d --- /dev/null +++ b/clang/lib/CIR/.clang-tidy @@ -0,0 +1,62 @@ +InheritParentConfig: true +Checks: > + -misc-const-correctness, + -llvm-header-guard, + bugprone-argument-comment, + bugprone-assert-side-effect, + bugprone-branch-clone, + bugprone-copy-constructor-init, + bugprone-dangling-handle, + bugprone-dynamic-static-initializers, + bugprone-macro-parentheses, + bugprone-macro-repeated-side-effects, + bugprone-misplaced-widening-cast, + bugprone-move-forwarding-reference, + bugprone-multiple-statement-macro, + bugprone-suspicious-semicolon, + bugprone-swapped-arguments, + bugprone-terminating-continue, + bugprone-unused-raii, + bugprone-unused-return-value, + misc-redundant-expression, + misc-static-assert, + misc-unused-using-decls, + modernize-use-bool-literals, + modernize-loop-convert, + modernize-make-unique, + modernize-raw-string-literal, + modernize-use-equals-default, + modernize-use-default-member-init, + modernize-use-emplace, + modernize-use-nullptr, + modernize-use-override, + modernize-use-using, + performance-for-range-copy, + performance-implicit-conversion-in-loop, + performance-inefficient-algorithm, + performance-inefficient-vector-operation, + performance-move-const-arg, + performance-no-automatic-move, + performance-trivially-destructible, + performance-unnecessary-copy-initialization, + performance-unnecessary-value-param, + readability-avoid-const-params-in-decls, + readability-const-return-type, + readability-container-size-empty, + readability-identifier-naming, + readability-inconsistent-declaration-parameter-name, + readability-misleading-indentation, + readability-redundant-control-flow, + readability-redundant-smartptr-get, + readability-simplify-boolean-expr, + readability-simplify-subscript-expr, + readability-use-anyofallof + + +CheckOptions: + - key: readability-identifier-naming.MemberCase + value: camelBack + - key: readability-identifier-naming.ParameterCase + value: camelBack + - key: readability-identifier-naming.VariableCase + value: camelBack diff --git a/clang/lib/CIR/FrontendAction/.clang-tidy b/clang/lib/CIR/FrontendAction/.clang-tidy new file mode 100644 index 000000000000..cfb5bdb4bd1f --- /dev/null +++ b/clang/lib/CIR/FrontendAction/.clang-tidy @@ -0,0 +1,16 @@ +InheritParentConfig: true +CheckOptions: + - key: readability-identifier-naming.ClassCase + value: CamelCase + - key: readability-identifier-naming.EnumCase + value: CamelCase + - key: readability-identifier-naming.FunctionCase + value: camelBack + - key: readability-identifier-naming.MemberCase + value: CamelCase + - key: readability-identifier-naming.ParameterCase + value: CamelCase + - key: readability-identifier-naming.UnionCase + value: CamelCase + - key: readability-identifier-naming.VariableCase + value: CamelCase From 3f64640cb834dd51968bda5014abec9161d588b2 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Thu, 12 Dec 2024 20:57:20 +0800 Subject: [PATCH 2300/2301] [mlir][llvm] support new-struct-path-tbaa --- .../mlir/Dialect/LLVMIR/LLVMAttrDefs.td | 78 ++++++++++++++++++- .../mlir/Target/LLVMIR/ModuleTranslation.h | 2 +- mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp | 3 +- mlir/lib/Dialect/LLVMIR/IR/LLVMInterfaces.cpp | 10 ++- mlir/lib/Target/LLVMIR/ModuleTranslation.cpp | 38 ++++++++- 5 files changed, 125 insertions(+), 6 deletions(-) diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td index 267389774bd5..fb1615aa58c7 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td @@ -1084,8 +1084,84 @@ def LLVM_TBAATagAttr : LLVM_Attr<"TBAATag", "tbaa_tag"> { let assemblyFormat = "`<` struct(params) `>`"; } +def LLVM_TBAAStructFieldAttr : LLVM_Attr<"TBAAStructField", "tbaa_struct_field"> { + let parameters = (ins + "TBAANodeAttr":$typeDesc, + "int64_t":$offset, + "int64_t":$size + ); + let assemblyFormat = "`<` struct(params) `>`"; +} + + +def LLVM_TBAAStructFieldAttrArray : ArrayRefParameter<"TBAAStructFieldAttr"> { + let printer = [{ + $_printer << '{'; + llvm::interleaveComma($_self, $_printer, [&](TBAAStructFieldAttr attr) { + $_printer.printStrippedAttrOrType(attr); + }); + $_printer << '}'; + }]; + + let parser = [{ + [&]() -> FailureOr> { + using Result = SmallVector; + if ($_parser.parseLBrace()) + return failure(); + FailureOr result = FieldParser::parse($_parser); + if (failed(result)) + return failure(); + if ($_parser.parseRBrace()) + return failure(); + return result; + }() + }]; +} + +def LLVM_TBAATypeNodeAttr : LLVM_Attr<"TBAATypeNode", "tbaa_type_node", [], "TBAANodeAttr"> { + let parameters = (ins + "TBAANodeAttr":$parent, + "int64_t":$size, + StringRefParameter<>:$id, + LLVM_TBAAStructFieldAttrArray:$fields + ); + let assemblyFormat = "`<` struct(params) `>`"; +} + +def LLVM_TBAAAccessTagAttr : LLVM_Attr<"TBAAAccessTag", "tbaa_access_tag"> { + let parameters = (ins + "TBAATypeNodeAttr":$base_type, + "TBAATypeNodeAttr":$access_type, + "int64_t":$offset, + "int64_t":$size + ); + let builders = [ + AttrBuilderWithInferredContext<(ins "TBAATypeNodeAttr":$baseType, + "TBAATypeNodeAttr":$accessType, + "int64_t":$offset, + "int64_t":$size), [{ + return $_get(baseType.getContext(), baseType, accessType, offset, size); + }]> + ]; + let assemblyFormat = "`<` struct(params) `>`"; +} + +def LLVM_TBAAAccessTagArrayAttr + : TypedArrayAttrBase { + let constBuilderCall = ?; +} + +// def LLVM_TBAATagAttr2 : AnyAttrOf<[ +// LLVM_TBAATagAttr, +// LLVM_TBAAAccessTagAttr +// ]>; + def LLVM_TBAATagArrayAttr - : TypedArrayAttrBase, LLVM_TBAATagAttr.summary # " array"> { let constBuilderCall = ?; } diff --git a/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h b/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h index 1b62437761ed..cd1370a35aaa 100644 --- a/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h +++ b/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h @@ -329,7 +329,7 @@ class ModuleTranslation { /// Returns the LLVM metadata corresponding to the given mlir LLVM dialect /// TBAATagAttr. - llvm::MDNode *getTBAANode(TBAATagAttr tbaaAttr) const; + llvm::MDNode *getTBAANode(Attribute tbaaAttr) const; /// Process tbaa LLVM Metadata operations and create LLVM /// metadata nodes for them. diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp index ef5f1b069b40..4a9829e14f6e 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -3383,7 +3383,8 @@ struct LLVMOpAsmDialectInterface : public OpAsmDialectInterface { LoopVectorizeAttr, LoopInterleaveAttr, LoopUnrollAttr, LoopUnrollAndJamAttr, LoopLICMAttr, LoopDistributeAttr, LoopPipelineAttr, LoopPeeledAttr, LoopUnswitchAttr, TBAARootAttr, - TBAATagAttr, TBAATypeDescriptorAttr>([&](auto attr) { + TBAATagAttr, TBAATypeDescriptorAttr, TBAAAccessTagAttr, + TBAATypeNodeAttr>([&](auto attr) { os << decltype(attr)::getMnemonic(); return AliasResult::OverridableAlias; }) diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMInterfaces.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMInterfaces.cpp index ca1277c09323..3ece05f10419 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMInterfaces.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMInterfaces.cpp @@ -58,7 +58,15 @@ mlir::LLVM::detail::verifyAliasAnalysisOpInterface(Operation *op) { ArrayAttr tags = iface.getTBAATagsOrNull(); if (!tags) return success(); - + if (tags.size() > 0) { + if (mlir::isa(tags[0])) { + return isArrayOf(op, tags); + } + + if (mlir::isa(tags[0])) { + return isArrayOf(op, tags); + } + } return isArrayOf(op, tags); } diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp index 4367100e3aca..62b7d3cd07e8 100644 --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -1782,7 +1782,8 @@ void ModuleTranslation::setAliasScopeMetadata(AliasAnalysisOpInterface op, llvm::LLVMContext::MD_noalias); } -llvm::MDNode *ModuleTranslation::getTBAANode(TBAATagAttr tbaaAttr) const { +// llvm::MDNode *ModuleTranslation::getTBAANode(TBAATagAttr tbaaAttr) const { +llvm::MDNode *ModuleTranslation::getTBAANode(Attribute tbaaAttr) const { return tbaaMetadataMapping.lookup(tbaaAttr); } @@ -1802,7 +1803,8 @@ void ModuleTranslation::setTBAAMetadata(AliasAnalysisOpInterface op, return; } - llvm::MDNode *node = getTBAANode(cast(tagRefs[0])); + // llvm::MDNode *node = getTBAANode(cast(tagRefs[0])); + llvm::MDNode *node = getTBAANode(tagRefs[0]); inst->setMetadata(llvm::LLVMContext::MD_tbaa, node); } @@ -1822,6 +1824,7 @@ void ModuleTranslation::setBranchWeightsMetadata(BranchWeightOpInterface op) { LogicalResult ModuleTranslation::createTBAAMetadata() { llvm::LLVMContext &ctx = llvmModule->getContext(); llvm::IntegerType *offsetTy = llvm::IntegerType::get(ctx, 64); + llvm::IntegerType *sizeTy = llvm::IntegerType::get(ctx, 64); // Walk the entire module and create all metadata nodes for the TBAA // attributes. The code below relies on two invariants of the @@ -1849,6 +1852,23 @@ LogicalResult ModuleTranslation::createTBAAMetadata() { tbaaMetadataMapping.insert({descriptor, llvm::MDNode::get(ctx, operands)}); }); + walker.addWalk([&](TBAATypeNodeAttr descriptor) { + SmallVector operands; + operands.push_back(tbaaMetadataMapping.lookup(descriptor.getParent())); + operands.push_back(llvm::ConstantAsMetadata::get( + llvm::ConstantInt::get(sizeTy, descriptor.getSize()))); + operands.push_back(llvm::MDString::get(ctx, descriptor.getId())); + for (auto field : descriptor.getFields()) { + operands.push_back(tbaaMetadataMapping.lookup(field.getTypeDesc())); + operands.push_back(llvm::ConstantAsMetadata::get( + llvm::ConstantInt::get(offsetTy, field.getOffset()))); + operands.push_back(llvm::ConstantAsMetadata::get( + llvm::ConstantInt::get(sizeTy, field.getSize()))); + } + + tbaaMetadataMapping.insert({descriptor, llvm::MDNode::get(ctx, operands)}); + }); + walker.addWalk([&](TBAATagAttr tag) { SmallVector operands; @@ -1864,6 +1884,20 @@ LogicalResult ModuleTranslation::createTBAAMetadata() { tbaaMetadataMapping.insert({tag, llvm::MDNode::get(ctx, operands)}); }); + walker.addWalk([&](TBAAAccessTagAttr tag) { + SmallVector operands; + + operands.push_back(tbaaMetadataMapping.lookup(tag.getBaseType())); + operands.push_back(tbaaMetadataMapping.lookup(tag.getAccessType())); + + operands.push_back(llvm::ConstantAsMetadata::get( + llvm::ConstantInt::get(offsetTy, tag.getOffset()))); + operands.push_back(llvm::ConstantAsMetadata::get( + llvm::ConstantInt::get(sizeTy, tag.getSize()))); + + tbaaMetadataMapping.insert({tag, llvm::MDNode::get(ctx, operands)}); + }); + mlirModule->walk([&](AliasAnalysisOpInterface analysisOpInterface) { if (auto attr = analysisOpInterface.getTBAATagsOrNull()) walker.walk(attr); From 7d3576c28b6c882ce3ec8b95f48a92cba8e6c6f1 Mon Sep 17 00:00:00 2001 From: PikachuHy Date: Tue, 25 Feb 2025 13:46:32 +0800 Subject: [PATCH 2301/2301] [CIR][CIRGen][TBAA] Add support for -new-struct-path-tbaa --- .../clang/CIR/Dialect/IR/CIRDialect.td | 2 + .../clang/CIR/Dialect/IR/CIRTBAAAttrs.td | 5 +- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 4 + clang/lib/CIR/CodeGen/CIRGenTBAA.cpp | 13 +-- .../Transforms/TargetLowering/LowerModule.cpp | 6 + .../Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp | 104 ++++++++++++++++++ clang/test/CIR/CodeGen/tbaa-struct.cpp | 44 ++++++-- 7 files changed, 158 insertions(+), 20 deletions(-) diff --git a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td index c1ea26919c8e..08805ebcc990 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRDialect.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRDialect.td @@ -46,6 +46,8 @@ def CIR_Dialect : Dialect { static llvm::StringRef getOpenCLVersionAttrName() { return "cir.cl.version"; } static llvm::StringRef getCUDABinaryHandleAttrName() { return "cir.cu.binary_handle"; } + + static llvm::StringRef getNewStructPathTBAAAttrName() { return "cir.new_struct_path_tbaa"; } void registerAttributes(); void registerTypes(); diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td b/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td index 83d3d24c05c8..fb1761c553bb 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTBAAAttrs.td @@ -62,7 +62,7 @@ def CIR_TBAAMemberAttr : CIR_Attr<"TBAAMember", "tbaa_member", []> { !ty_StructS = !cir.struct #tbaa_scalar = #cir.tbaa_scalar #tbaa_scalar1 = #cir.tbaa_scalar - #tbaa_struct = #cir.tbaa_struct, <#tbaa_scalar, 4>}> + #tbaa_struct = #cir.tbaa_struct, <#tbaa_scalar, 4>}> ``` See the following link for more details: @@ -101,6 +101,7 @@ def CIR_TBAAStructAttr : CIR_Attr<"TBAAStruct", let summary = "Describes a struct type in TBAA"; let parameters = (ins StringRefParameter<> : $id, + CIR_StructType:$type, CIR_TBAAMemberAttrArray:$members); let description = [{ @@ -112,7 +113,7 @@ def CIR_TBAAStructAttr : CIR_Attr<"TBAAStruct", #tbaa_scalar = #cir.tbaa_scalar #tbaa_scalar1 = #cir.tbaa_scalar // CIR_TBAAStructAttr - #tbaa_struct = #cir.tbaa_struct, <#tbaa_scalar, 4>}> + #tbaa_struct = #cir.tbaa_struct, <#tbaa_scalar, 4>}> ``` See the following link for more details: diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index a1296846f9e7..5c5c15feec2f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -231,6 +231,10 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, tbaa.reset(new CIRGenTBAA(&mlirContext, astContext, genTypes, theModule, codeGenOpts, langOpts)); } + if (codeGenOpts.NewStructPathTBAA) { + theModule->setAttr(cir::CIRDialect::getNewStructPathTBAAAttrName(), + cir::BoolAttr::get(&mlirContext, cir::BoolType{}, true)); + } } CIRGenModule::~CIRGenModule() {} diff --git a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp index f2d46a30cc53..c8cf6c9c4da2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenTBAA.cpp @@ -169,7 +169,7 @@ cir::TBAAAttr CIRGenTBAA::getTypeInfoHelper(clang::QualType qty) { // Accesses to arrays are accesses to objects of their element types. if (codeGenOpts.NewStructPathTBAA && ty->isArrayType()) { assert(!cir::MissingFeatures::tbaaNewStructPath()); - return tbaa_NYI(mlirContext); + return getTypeInfo(cast(ty)->getElementType()); } // Enum types are distinct types. In C++ they have "underlying types", // however they aren't related for TBAA. @@ -336,11 +336,8 @@ cir::TBAAAttr CIRGenTBAA::getBaseTypeInfoHelper(const clang::Type *ty) { outName = rd->getName(); } - if (codeGenOpts.NewStructPathTBAA) { - assert(!cir::MissingFeatures::tbaaNewStructPath()); - return nullptr; - } - return cir::TBAAStructAttr::get(mlirContext, outName, fields); + return cir::TBAAStructAttr::get(mlirContext, outName, + types.convertRecordDeclType(rd), fields); } return nullptr; } @@ -364,10 +361,6 @@ cir::TBAAAttr CIRGenTBAA::getAccessTagInfo(TBAAAccessInfo tbaaInfo) { assert(!tbaaInfo.offset && "Nonzero offset for an access with no base type!"); } - if (codeGenOpts.NewStructPathTBAA) { - assert(!cir::MissingFeatures::tbaaNewStructPath()); - return tbaa_NYI(mlirContext); - } if (tbaaInfo.baseType == tbaaInfo.accessType) { return tbaaInfo.accessType; } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp index e979e813336f..03d993ce0d98 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/LowerModule.cpp @@ -24,6 +24,7 @@ #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LogicalResult.h" +#include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Target/AArch64.h" #include "llvm/Support/ErrorHandling.h" @@ -254,6 +255,11 @@ createLowerModule(mlir::ModuleOp module, mlir::PatternRewriter &rewriter) { codeGenOpts.OptimizeSize = optInfo.getSize(); } + if (auto newStructPathTbaaAttr = mlir::cast_if_present( + module->getAttr(cir::CIRDialect::getNewStructPathTBAAAttrName()))) { + codeGenOpts.NewStructPathTBAA = newStructPathTbaaAttr.getValue(); + } + return std::make_unique(std::move(langOpts), std::move(codeGenOpts), module, std::move(targetInfo), rewriter); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp index 83a63ee38d11..cbd716797409 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerTBAAToLLVM.cpp @@ -5,6 +5,7 @@ #include "mlir/IR/MLIRContext.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "clang/CIR/Dialect/IR/CIRAttrs.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/ErrorHandling.h" #include @@ -69,10 +70,113 @@ class CIRToLLVMTBAAAttrLowering { mlir::MLIRContext *mlirContext; }; +class CIRToLLVMTBAAAttrLoweringNewPath { +public: + CIRToLLVMTBAAAttrLoweringNewPath(mlir::MLIRContext *mlirContext) + : mlirContext(mlirContext) {} + mlir::LLVM::TBAATypeNodeAttr + lowerCIRTBAAAttrToLLVMTBAAAttr(mlir::Attribute tbaa) { + + if (auto charAttr = mlir::dyn_cast(tbaa)) { + return getChar(); + } + if (auto scalarAttr = mlir::dyn_cast(tbaa)) { + mlir::DataLayout layout; + auto size = layout.getTypeSize(scalarAttr.getType()); + return createScalarTypeNode(scalarAttr.getId(), getChar(), size); + } + if (auto structAttr = mlir::dyn_cast(tbaa)) { + llvm::SmallVector members; + for (const auto &member : structAttr.getMembers()) { + auto memberTypeDesc = + lowerCIRTBAAAttrToLLVMTBAAAttr(member.getTypeDesc()); + auto memberAttr = mlir::LLVM::TBAAStructFieldAttr::get( + mlirContext, memberTypeDesc, member.getOffset(), + getSize(member.getTypeDesc())); + members.push_back(memberAttr); + } + + return mlir::LLVM::TBAATypeNodeAttr::get(mlirContext, getChar(), + getSize(structAttr), + structAttr.getId(), members); + } + return nullptr; + } + static int64_t getSize(mlir::Attribute tbaa) { + if (auto charAttr = mlir::dyn_cast(tbaa)) { + return 1; + } + if (auto scalarAttr = mlir::dyn_cast(tbaa)) { + mlir::DataLayout layout; + auto size = layout.getTypeSize(scalarAttr.getType()); + return size; + } + if (auto structAttr = mlir::dyn_cast(tbaa)) { + mlir::DataLayout layout; + auto size = layout.getTypeSize(structAttr.getType()); + return size; + } + return 0; + } + + mlir::LLVM::TBAARootAttr createTBAARoot(llvm::StringRef name) { + return mlir::LLVM::TBAARootAttr::get( + mlirContext, mlir::StringAttr::get(mlirContext, name)); + } + mlir::LLVM::TBAARootAttr getRoot() { + return createTBAARoot("Simple C/C++ TBAA"); + } + + mlir::LLVM::TBAATypeNodeAttr getChar() { + return createScalarTypeNode("omnipotent char", getRoot(), 1); + } + + mlir::LLVM::TBAATypeNodeAttr + createScalarTypeNode(llvm::StringRef typeName, + mlir::LLVM::TBAANodeAttr parent, int64_t size) { + + return mlir::LLVM::TBAATypeNodeAttr::get( + mlirContext, parent, size, typeName, + llvm::ArrayRef()); + } + mlir::MLIRContext *mlirContext; +}; + +mlir::ArrayAttr lowerCIRTBAAAttr(mlir::Attribute tbaa, + mlir::ConversionPatternRewriter &rewriter) { + auto *ctx = rewriter.getContext(); + CIRToLLVMTBAAAttrLoweringNewPath lower(ctx); + if (auto tbaaTag = mlir::dyn_cast(tbaa)) { + auto accessType = lower.lowerCIRTBAAAttrToLLVMTBAAAttr(tbaaTag.getAccess()); + if (auto structAttr = + mlir::dyn_cast(tbaaTag.getBase())) { + auto baseType = lower.lowerCIRTBAAAttrToLLVMTBAAAttr(structAttr); + auto tag = mlir::LLVM::TBAAAccessTagAttr::get( + baseType, accessType, tbaaTag.getOffset(), + lower.getSize(tbaaTag.getAccess())); + return mlir::ArrayAttr::get(ctx, {tag}); + } + } else { + auto accessType = lower.lowerCIRTBAAAttrToLLVMTBAAAttr(tbaa); + if (accessType) { + auto tag = mlir::LLVM::TBAAAccessTagAttr::get(accessType, accessType, 0, + lower.getSize(tbaa)); + return mlir::ArrayAttr::get(ctx, {tag}); + } + } + return mlir::ArrayAttr(); +} + mlir::ArrayAttr lowerCIRTBAAAttr(mlir::Attribute tbaa, mlir::ConversionPatternRewriter &rewriter, cir::LowerModule *lowerMod) { auto *ctx = rewriter.getContext(); + auto newStructPathTBAA = + lowerMod->getContext().getCodeGenOpts().NewStructPathTBAA; + if (newStructPathTBAA) { + auto ret = lowerCIRTBAAAttr(tbaa, rewriter); + return ret; + } CIRToLLVMTBAAAttrLowering lower(ctx); if (auto tbaaTag = mlir::dyn_cast(tbaa)) { mlir::LLVM::TBAATypeDescriptorAttr accessType = diff --git a/clang/test/CIR/CodeGen/tbaa-struct.cpp b/clang/test/CIR/CodeGen/tbaa-struct.cpp index 7349ff4cc5e1..58f9ec4d8cc5 100644 --- a/clang/test/CIR/CodeGen/tbaa-struct.cpp +++ b/clang/test/CIR/CodeGen/tbaa-struct.cpp @@ -8,6 +8,8 @@ // RUN: FileCheck --check-prefix=CHECK --input-file=%t.ll %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -disable-llvm-passes // RUN: FileCheck --check-prefixes=PATH,OLD-PATH --input-file=%t.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -disable-llvm-passes -new-struct-path-tbaa +// RUN: FileCheck --check-prefixes=PATH,NEW-PATH --input-file=%t.ll %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O1 -disable-llvm-passes -relaxed-aliasing // RUN: FileCheck --check-prefix=NO-TBAA --input-file=%t.ll %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t.ll -O0 -disable-llvm-passes @@ -18,24 +20,24 @@ // CIR: #tbaa[[CHAR:.*]] = #cir.tbaa_omnipotent_char // CIR: #tbaa[[INT:.*]] = #cir.tbaa_scalar // CIR: #tbaa[[SHORT:.*]] = #cir.tbaa_scalar -// CIR: #tbaa[[STRUCT_six:.*]] = #cir.tbaa_struct, <#tbaa[[CHAR]], 4>, <#tbaa[[CHAR]], 5>}> -// CIR: #tbaa[[STRUCT_StructA:.*]] = #cir.tbaa_struct, <#tbaa[[INT]], 4>, <#tbaa[[SHORT]], 8>, <#tbaa[[INT]], 12>}> -// CIR: #tbaa[[STRUCT_StructS:.*]] = #cir.tbaa_struct, <#tbaa[[INT]], 4>}> -// CIR: #tbaa[[STRUCT_StructS2:.*]] = #cir.tbaa_struct, <#tbaa[[INT]], 4>}> -// CIR: #tbaa[[TAG_six_b:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[STRUCT_StructA:.*]] = #cir.tbaa_struct, <#tbaa[[INT]], 4>, <#tbaa[[SHORT]], 8>, <#tbaa[[INT]], 12>}> +// CIR: #tbaa[[STRUCT_StructS:.*]] = #cir.tbaa_struct, <#tbaa[[INT]], 4>}> +// CIR: #tbaa[[STRUCT_StructS2:.*]] = #cir.tbaa_struct, <#tbaa[[INT]], 4>}> +// CIR: #tbaa[[STRUCT_six:.*]] = #cir.tbaa_struct, <#tbaa[[CHAR]], 4>, <#tbaa[[CHAR]], 5>}> // CIR: #tbaa[[TAG_StructA_f32:.*]] = #cir.tbaa_tag // CIR: #tbaa[[TAG_StructA_f16:.*]] = #cir.tbaa_tag // CIR: #tbaa[[TAG_StructS_f32:.*]] = #cir.tbaa_tag // CIR: #tbaa[[TAG_StructS_f16:.*]] = #cir.tbaa_tag // CIR: #tbaa[[TAG_StructS2_f32:.*]] = #cir.tbaa_tag // CIR: #tbaa[[TAG_StructS2_f16:.*]] = #cir.tbaa_tag -// CIR: #tbaa[[STRUCT_StructB:.*]] = #cir.tbaa_struct, <#tbaa[[STRUCT_StructA]], 4>, <#tbaa[[INT]], 20>}> +// CIR: #tbaa[[TAG_six_b:.*]] = #cir.tbaa_tag +// CIR: #tbaa[[STRUCT_StructB:.*]] = #cir.tbaa_struct, <#tbaa[[STRUCT_StructA]], 4>, <#tbaa[[INT]], 20>}> // CIR: #tbaa[[TAG_StructB_a_f32:.*]] = #cir.tbaa_tag // CIR: #tbaa[[TAG_StructB_a_f16:.*]] = #cir.tbaa_tag // CIR: #tbaa[[TAG_StructB_f32:.*]] = #cir.tbaa_tag // CIR: #tbaa[[TAG_StructB_a_f32_2:.*]] = #cir.tbaa_tag -// CIR: #tbaa[[STRUCT_StructC:.*]] = #cir.tbaa_struct, <#tbaa[[STRUCT_StructB]], 4>, <#tbaa[[INT]], 28>}> -// CIR: #tbaa[[STRUCT_StructD:.*]] = #cir.tbaa_struct, <#tbaa[[STRUCT_StructB]], 4>, <#tbaa[[INT]], 28>, <#tbaa[[CHAR]], 32>}> +// CIR: #tbaa[[STRUCT_StructC:.*]] = #cir.tbaa_struct, <#tbaa[[STRUCT_StructB]], 4>, <#tbaa[[INT]], 28>}> +// CIR: #tbaa[[STRUCT_StructD:.*]] = #cir.tbaa_struct, <#tbaa[[STRUCT_StructB]], 4>, <#tbaa[[INT]], 28>, <#tbaa[[CHAR]], 32>}> // CIR: #tbaa[[TAG_StructC_b_a_f32:.*]] = #cir.tbaa_tag // CIR: #tbaa[[TAG_StructD_b_a_f32:.*]] = #cir.tbaa_tag @@ -402,3 +404,29 @@ uint32_t g15(StructS *S, StructS3 *S3, uint64_t count) { // OLD-PATH: [[TYPE_D]] = !{!"_ZTS7StructD", [[TYPE_SHORT]], i64 0, [[TYPE_B]], i64 4, [[TYPE_INT]], i64 28, [[TYPE_CHAR]], i64 32} // OLD-PATH: [[TAG_six_b]] = !{[[TYPE_six:!.*]], [[TYPE_CHAR]], i64 4} // OLD-PATH: [[TYPE_six]] = !{!"_ZTS3six", [[TYPE_CHAR]], i64 0, [[TYPE_CHAR]], i64 4, [[TYPE_CHAR]], i64 5} + +// NEW-PATH-DAG: [[ROOT:!.*]] = !{!"Simple C/C++ TBAA"} +// NEW-PATH-DAG: [[TYPE_char:!.*]] = !{[[ROOT]], i64 1, !"omnipotent char"} +// NEW-PATH-DAG: [[TYPE_short:!.*]] = !{[[TYPE_char]], i64 2, !"short"} +// NEW-PATH-DAG: [[TYPE_int:!.*]] = !{[[TYPE_char]], i64 4, !"int"} +// NEW-PATH-DAG: [[TAG_i32:!.*]] = !{[[TYPE_int]], [[TYPE_int]], i64 0, i64 4} +// NEW-PATH-DAG: [[TYPE_A:!.*]] = !{[[TYPE_char]], i64 16, !"_ZTS7StructA", [[TYPE_short]], i64 0, i64 2, [[TYPE_int]], i64 4, i64 4, [[TYPE_short]], i64 8, i64 2, [[TYPE_int]], i64 12, i64 4} +// NEW-PATH-DAG: [[TAG_A_f16]] = !{[[TYPE_A]], [[TYPE_short]], i64 0, i64 2} +// NEW-PATH-DAG: [[TAG_A_f32]] = !{[[TYPE_A]], [[TYPE_int]], i64 4, i64 4} +// NEW-PATH-DAG: [[TYPE_B:!.*]] = !{[[TYPE_char]], i64 24, !"_ZTS7StructB", [[TYPE_short]], i64 0, i64 2, [[TYPE_A]], i64 4, i64 16, [[TYPE_int]], i64 20, i64 4} +// NEW-PATH-DAG: [[TAG_B_a_f16]] = !{[[TYPE_B]], [[TYPE_short]], i64 4, i64 2} +// NEW-PATH-DAG: [[TAG_B_a_f32]] = !{[[TYPE_B]], [[TYPE_int]], i64 8, i64 4} +// NEW-PATH-DAG: [[TAG_B_f32]] = !{[[TYPE_B]], [[TYPE_int]], i64 20, i64 4} +// NEW-PATH-DAG: [[TAG_B_a_f32_2]] = !{[[TYPE_B]], [[TYPE_int]], i64 16, i64 4} +// NEW-PATH-DAG: [[TYPE_S:!.*]] = !{[[TYPE_char]], i64 8, !"_ZTS7StructS", [[TYPE_short]], i64 0, i64 2, [[TYPE_int]], i64 4, i64 4} +// NEW-PATH-DAG: [[TAG_S_f16]] = !{[[TYPE_S]], [[TYPE_short]], i64 0, i64 2} +// NEW-PATH-DAG: [[TAG_S_f32]] = !{[[TYPE_S]], [[TYPE_int]], i64 4, i64 4} +// NEW-PATH-DAG: [[TYPE_S2:!.*]] = !{[[TYPE_char]], i64 8, !"_ZTS8StructS2", [[TYPE_short]], i64 0, i64 2, [[TYPE_int]], i64 4, i64 4} +// NEW-PATH-DAG: [[TAG_S2_f16]] = !{[[TYPE_S2]], [[TYPE_short]], i64 0, i64 2} +// NEW-PATH-DAG: [[TAG_S2_f32]] = !{[[TYPE_S2]], [[TYPE_int]], i64 4, i64 4} +// NEW-PATH-DAG: [[TYPE_C:!.*]] = !{[[TYPE_char]], i64 32, !"_ZTS7StructC", [[TYPE_short]], i64 0, i64 2, [[TYPE_B]], i64 4, i64 24, [[TYPE_int]], i64 28, i64 4} +// NEW-PATH-DAG: [[TAG_C_b_a_f32]] = !{[[TYPE_C]], [[TYPE_int]], i64 12, i64 4} +// NEW-PATH-DAG: [[TYPE_D:!.*]] = !{[[TYPE_char]], i64 36, !"_ZTS7StructD", [[TYPE_short]], i64 0, i64 2, [[TYPE_B]], i64 4, i64 24, [[TYPE_int]], i64 28, i64 4, [[TYPE_char]], i64 32, i64 1} +// NEW-PATH-DAG: [[TAG_D_b_a_f32]] = !{[[TYPE_D]], [[TYPE_int]], i64 12, i64 4} +// NEW-PATH-DAG: [[TYPE_six:!.*]] = !{[[TYPE_char]], i64 6, !"_ZTS3six", [[TYPE_char]], i64 0, i64 1, [[TYPE_char]], i64 4, i64 1, [[TYPE_char]], i64 5, i64 1} +// NEW-PATH-DAG: [[TAG_six_b]] = !{[[TYPE_six]], [[TYPE_char]], i64 4, i64 1}